From 97a86503c01291b0ce21ed862116ec31093e2727 Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Fri, 15 Dec 2023 11:47:49 +0800 Subject: [PATCH 0001/2138] anolis: configs: add default anolis_defconfig These configs is imported from ANCK 5.10-016.1, and they are refreshed by: ARCH=${arch} CROSS_COMPILE=scripts/dummy-tools/ make olddefconfig Signed-off-by: Qiao Ma --- arch/arm64/configs/anolis-debug_defconfig | 7168 +++++++++++++++++++ arch/arm64/configs/anolis_defconfig | 7124 +++++++++++++++++++ arch/x86/configs/anolis-debug_defconfig | 7783 +++++++++++++++++++++ arch/x86/configs/anolis_defconfig | 7710 ++++++++++++++++++++ 4 files changed, 29785 insertions(+) create mode 100644 arch/arm64/configs/anolis-debug_defconfig create mode 100644 arch/arm64/configs/anolis_defconfig create mode 100644 arch/x86/configs/anolis-debug_defconfig create mode 100644 arch/x86/configs/anolis_defconfig diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig new file mode 100644 index 000000000000..247a3d434dab --- /dev/null +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -0,0 +1,7168 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/arm64 6.6.7 Kernel Configuration +# +CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=200000 +CONFIG_CLANG_VERSION=0 +CONFIG_AS_IS_GNU=y +CONFIG_AS_VERSION=25000 +CONFIG_LD_IS_BFD=y +CONFIG_LD_VERSION=25000 +CONFIG_LLD_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_CAN_LINK_STATIC=y +CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y +CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y +CONFIG_TOOLS_SUPPORT_RELR=y +CONFIG_CC_HAS_ASM_INLINE=y +CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y +CONFIG_PAHOLE_VERSION=117 +CONFIG_CONSTRUCTORS=y +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_TABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +# CONFIG_WERROR is not set +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_DEFAULT_INIT="" +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_SYSVIPC_COMPAT=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +# CONFIG_WATCH_QUEUE is not set +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_SHOW_LEVEL=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_GENERIC_IRQ_INJECTION=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS=y +CONFIG_GENERIC_IRQ_IPI=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_IRQ_MSI_IOMMU=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +CONFIG_GENERIC_IRQ_DEBUGFS=y +# end of IRQ subsystem + +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_ARCH_HAS_TICK_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y +CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y +CONFIG_CONTEXT_TRACKING=y +CONFIG_CONTEXT_TRACKING_IDLE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +# CONFIG_NO_HZ_IDLE is not set +CONFIG_NO_HZ_FULL=y +CONFIG_CONTEXT_TRACKING_USER=y +# CONFIG_CONTEXT_TRACKING_USER_FORCE is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# end of Timers subsystem + +CONFIG_BPF=y +CONFIG_HAVE_EBPF_JIT=y +CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y + +# +# BPF subsystem +# +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_BPF_JIT_DEFAULT_ON=y +CONFIG_BPF_UNPRIV_DEFAULT_OFF=y +# CONFIG_BPF_PRELOAD is not set +CONFIG_BPF_LSM=y +# end of BPF subsystem + +CONFIG_PREEMPT_VOLUNTARY_BUILD=y +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set +CONFIG_PREEMPT_COUNT=y +# CONFIG_PREEMPT_DYNAMIC is not set +CONFIG_SCHED_CORE=y + +# +# CPU/Task time and stats accounting +# +CONFIG_VIRT_CPU_ACCOUNTING=y +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y +# CONFIG_IRQ_TIME_ACCOUNTING is not set +CONFIG_HAVE_SCHED_AVG_IRQ=y +CONFIG_SCHED_THERMAL_PRESSURE=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +CONFIG_PSI_DEFAULT_DISABLED=y +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_TREE_SRCU=y +CONFIG_TASKS_RCU_GENERIC=y +CONFIG_TASKS_RUDE_RCU=y +CONFIG_TASKS_TRACE_RCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +CONFIG_RCU_NOCB_CPU=y +# CONFIG_RCU_NOCB_CPU_DEFAULT_ALL is not set +# CONFIG_RCU_LAZY is not set +# end of RCU Subsystem + +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +# CONFIG_IKHEADERS is not set +CONFIG_LOG_BUF_SHIFT=20 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +# CONFIG_PRINTK_INDEX is not set +CONFIG_GENERIC_SCHED_CLOCK=y + +# +# Scheduler features +# +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y +CONFIG_CC_HAS_INT128=y +CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" +CONFIG_GCC11_NO_ARRAY_BOUNDS=y +CONFIG_CC_NO_ARRAY_BOUNDS=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +# CONFIG_CGROUP_FAVOR_DYNMODS is not set +CONFIG_MEMCG=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_SCHED_MM_CID=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_MISC is not set +CONFIG_CGROUP_DEBUG=y +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_TIME_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_RD_ZSTD=y +# CONFIG_BOOT_CONFIG is not set +CONFIG_INITRAMFS_PRESERVE_MTIME=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_LD_ORPHAN_WARN=y +CONFIG_LD_ORPHAN_WARN_LEVEL="warn" +CONFIG_SYSCTL=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +# CONFIG_EXPERT is not set +CONFIG_UID16=y +CONFIG_MULTIUSER=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_SELFTEST is not set +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_KCMP=y +CONFIG_RSEQ=y +CONFIG_CACHESTAT_SYSCALL=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_GUEST_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +# end of Kernel Performance Events And Counters + +CONFIG_SYSTEM_DATA_VERIFICATION=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y + +# +# Kexec and crash features +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_HAVE_IMA_KEXEC=y +CONFIG_KEXEC=y +CONFIG_KEXEC_FILE=y +CONFIG_KEXEC_SIG=y +CONFIG_KEXEC_IMAGE_VERIFY_SIG=y +CONFIG_CRASH_DUMP=y +# end of Kexec and crash features +# end of General setup + +CONFIG_ARM64=y +CONFIG_GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS=y +CONFIG_64BIT=y +CONFIG_MMU=y +CONFIG_ARM64_PAGE_SHIFT=12 +CONFIG_ARM64_CONT_PTE_SHIFT=4 +CONFIG_ARM64_CONT_PMD_SHIFT=4 +CONFIG_ARCH_MMAP_RND_BITS_MIN=18 +CONFIG_ARCH_MMAP_RND_BITS_MAX=33 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_SMP=y +CONFIG_KERNEL_MODE_NEON=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC=y +CONFIG_KASAN_SHADOW_OFFSET=0xdfff800000000000 + +# +# Platform selection +# +# CONFIG_ARCH_ACTIONS is not set +# CONFIG_ARCH_SUNXI is not set +# CONFIG_ARCH_ALPINE is not set +# CONFIG_ARCH_APPLE is not set +# CONFIG_ARCH_BCM is not set +# CONFIG_ARCH_BERLIN is not set +# CONFIG_ARCH_BITMAIN is not set +# CONFIG_ARCH_EXYNOS is not set +# CONFIG_ARCH_SPARX5 is not set +# CONFIG_ARCH_K3 is not set +# CONFIG_ARCH_LG1K is not set +CONFIG_ARCH_HISI=y +# CONFIG_ARCH_KEEMBAY is not set +# CONFIG_ARCH_MEDIATEK is not set +# CONFIG_ARCH_MESON is not set +# CONFIG_ARCH_MVEBU is not set +# CONFIG_ARCH_NXP is not set +# CONFIG_ARCH_MA35 is not set +# CONFIG_ARCH_NPCM is not set +CONFIG_ARCH_QCOM=y +# CONFIG_ARCH_REALTEK is not set +# CONFIG_ARCH_RENESAS is not set +# CONFIG_ARCH_ROCKCHIP is not set +CONFIG_ARCH_SEATTLE=y +# CONFIG_ARCH_INTEL_SOCFPGA is not set +# CONFIG_ARCH_STM32 is not set +# CONFIG_ARCH_SYNQUACER is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_SPRD is not set +CONFIG_ARCH_THUNDER=y +CONFIG_ARCH_THUNDER2=y +# CONFIG_ARCH_UNIPHIER is not set +CONFIG_ARCH_VEXPRESS=y +# CONFIG_ARCH_VISCONTI is not set +CONFIG_ARCH_XGENE=y +# CONFIG_ARCH_ZYNQMP is not set +# end of Platform selection + +# +# Kernel Features +# + +# +# ARM errata workarounds via the alternatives framework +# +CONFIG_AMPERE_ERRATUM_AC03_CPU_38=y +CONFIG_ARM64_WORKAROUND_CLEAN_CACHE=y +CONFIG_ARM64_ERRATUM_826319=y +CONFIG_ARM64_ERRATUM_827319=y +CONFIG_ARM64_ERRATUM_824069=y +CONFIG_ARM64_ERRATUM_819472=y +CONFIG_ARM64_ERRATUM_832075=y +CONFIG_ARM64_ERRATUM_834220=y +CONFIG_ARM64_ERRATUM_1742098=y +CONFIG_ARM64_ERRATUM_845719=y +CONFIG_ARM64_ERRATUM_843419=y +CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419=y +CONFIG_ARM64_ERRATUM_1024718=y +CONFIG_ARM64_ERRATUM_1418040=y +CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT=y +CONFIG_ARM64_ERRATUM_1165522=y +CONFIG_ARM64_ERRATUM_1319367=y +CONFIG_ARM64_ERRATUM_1530923=y +CONFIG_ARM64_WORKAROUND_REPEAT_TLBI=y +CONFIG_ARM64_ERRATUM_2441007=y +CONFIG_ARM64_ERRATUM_1286807=y +CONFIG_ARM64_ERRATUM_1463225=y +CONFIG_ARM64_ERRATUM_1542419=y +CONFIG_ARM64_ERRATUM_1508412=y +CONFIG_ARM64_ERRATUM_2051678=y +CONFIG_ARM64_ERRATUM_2077057=y +CONFIG_ARM64_ERRATUM_2658417=y +CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE=y +CONFIG_ARM64_ERRATUM_2054223=y +CONFIG_ARM64_ERRATUM_2067961=y +CONFIG_ARM64_ERRATUM_2441009=y +CONFIG_ARM64_ERRATUM_2645198=y +CONFIG_ARM64_ERRATUM_2966298=y +CONFIG_CAVIUM_ERRATUM_22375=y +CONFIG_CAVIUM_ERRATUM_23144=y +CONFIG_CAVIUM_ERRATUM_23154=y +CONFIG_CAVIUM_ERRATUM_27456=y +CONFIG_CAVIUM_ERRATUM_30115=y +CONFIG_CAVIUM_TX2_ERRATUM_219=y +CONFIG_FUJITSU_ERRATUM_010001=y +CONFIG_HISILICON_ERRATUM_161600802=y +CONFIG_QCOM_FALKOR_ERRATUM_1003=y +CONFIG_QCOM_FALKOR_ERRATUM_1009=y +CONFIG_QCOM_QDF2400_ERRATUM_0065=y +CONFIG_QCOM_FALKOR_ERRATUM_E1041=y +CONFIG_NVIDIA_CARMEL_CNP_ERRATUM=y +CONFIG_ROCKCHIP_ERRATUM_3588001=y +CONFIG_SOCIONEXT_SYNQUACER_PREITS=y +# end of ARM errata workarounds via the alternatives framework + +CONFIG_ARM64_4K_PAGES=y +# CONFIG_ARM64_16K_PAGES is not set +# CONFIG_ARM64_64K_PAGES is not set +# CONFIG_ARM64_VA_BITS_39 is not set +CONFIG_ARM64_VA_BITS_48=y +CONFIG_ARM64_VA_BITS=48 +CONFIG_ARM64_PA_BITS_48=y +CONFIG_ARM64_PA_BITS=48 +# CONFIG_CPU_BIG_ENDIAN is not set +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_SCHED_MC=y +# CONFIG_SCHED_CLUSTER is not set +CONFIG_SCHED_SMT=y +CONFIG_NR_CPUS=1024 +CONFIG_HOTPLUG_CPU=y +CONFIG_NUMA=y +CONFIG_NODES_SHIFT=6 +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_HW_PERF_EVENTS=y +CONFIG_CC_HAVE_SHADOW_CALL_STACK=y +CONFIG_PARAVIRT=y +CONFIG_PARAVIRT_TIME_ACCOUNTING=y +CONFIG_ARCH_SUPPORTS_KEXEC=y +CONFIG_ARCH_SUPPORTS_KEXEC_FILE=y +CONFIG_ARCH_SELECTS_KEXEC_FILE=y +CONFIG_ARCH_SUPPORTS_KEXEC_SIG=y +CONFIG_ARCH_SUPPORTS_KEXEC_IMAGE_VERIFY_SIG=y +CONFIG_ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_SIG=y +CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y +CONFIG_TRANS_TABLE=y +# CONFIG_XEN is not set +CONFIG_ARCH_FORCE_MAX_ORDER=10 +CONFIG_UNMAP_KERNEL_AT_EL0=y +CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY=y +# CONFIG_RODATA_FULL_DEFAULT_ENABLED is not set +# CONFIG_ARM64_SW_TTBR0_PAN is not set +CONFIG_ARM64_TAGGED_ADDR_ABI=y +CONFIG_COMPAT=y +CONFIG_KUSER_HELPERS=y +# CONFIG_COMPAT_ALIGNMENT_FIXUPS is not set +# CONFIG_ARMV8_DEPRECATED is not set + +# +# ARMv8.1 architectural features +# +CONFIG_ARM64_HW_AFDBM=y +CONFIG_ARM64_PAN=y +CONFIG_AS_HAS_LSE_ATOMICS=y +CONFIG_ARM64_LSE_ATOMICS=y +CONFIG_ARM64_USE_LSE_ATOMICS=y +# end of ARMv8.1 architectural features + +# +# ARMv8.2 architectural features +# +CONFIG_AS_HAS_ARMV8_2=y +CONFIG_AS_HAS_SHA3=y +CONFIG_ARM64_PMEM=y +CONFIG_ARM64_RAS_EXTN=y +CONFIG_ARM64_CNP=y +# end of ARMv8.2 architectural features + +# +# ARMv8.3 architectural features +# +# CONFIG_ARM64_PTR_AUTH is not set +CONFIG_CC_HAS_BRANCH_PROT_PAC_RET=y +CONFIG_CC_HAS_SIGN_RETURN_ADDRESS=y +CONFIG_AS_HAS_ARMV8_3=y +CONFIG_AS_HAS_CFI_NEGATE_RA_STATE=y +CONFIG_AS_HAS_LDAPR=y +# end of ARMv8.3 architectural features + +# +# ARMv8.4 architectural features +# +# CONFIG_ARM64_AMU_EXTN is not set +CONFIG_AS_HAS_ARMV8_4=y +CONFIG_ARM64_TLB_RANGE=y +# end of ARMv8.4 architectural features + +# +# ARMv8.5 architectural features +# +CONFIG_AS_HAS_ARMV8_5=y +# CONFIG_ARM64_BTI is not set +CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI=y +CONFIG_ARM64_E0PD=y +CONFIG_ARM64_AS_HAS_MTE=y +CONFIG_ARM64_MTE=y +# end of ARMv8.5 architectural features + +# +# ARMv8.7 architectural features +# +CONFIG_ARM64_EPAN=y +# end of ARMv8.7 architectural features + +CONFIG_ARM64_SVE=y +CONFIG_ARM64_SME=y +CONFIG_ARM64_PSEUDO_NMI=y +# CONFIG_ARM64_DEBUG_PRIORITY_MASKING is not set +CONFIG_RELOCATABLE=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_RANDOMIZE_MODULE_REGION_FULL=y +CONFIG_CC_HAVE_STACKPROTECTOR_SYSREG=y +CONFIG_STACKPROTECTOR_PER_TASK=y +# end of Kernel Features + +# +# Boot options +# +CONFIG_ARM64_ACPI_PARKING_PROTOCOL=y +CONFIG_CMDLINE="console=ttyAMA0" +CONFIG_CMDLINE_FROM_BOOTLOADER=y +# CONFIG_CMDLINE_FORCE is not set +CONFIG_EFI_STUB=y +CONFIG_EFI=y +CONFIG_DMI=y +# end of Boot options + +# +# Power management options +# +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_HIBERNATION_SNAPSHOT_DEV=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_USERSPACE_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +CONFIG_PM_DEBUG=y +CONFIG_PM_ADVANCED_DEBUG=y +CONFIG_PM_TEST_SUSPEND=y +CONFIG_PM_SLEEP_DEBUG=y +CONFIG_PM_CLK=y +CONFIG_PM_GENERIC_DOMAINS=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_PM_GENERIC_DOMAINS_SLEEP=y +CONFIG_PM_GENERIC_DOMAINS_OF=y +CONFIG_CPU_PM=y +# CONFIG_ENERGY_MODEL is not set +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_HIBERNATION_HEADER=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +# end of Power management options + +# +# CPU Power Management +# + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +# CONFIG_CPU_IDLE_GOV_LADDER is not set +CONFIG_CPU_IDLE_GOV_MENU=y +# CONFIG_CPU_IDLE_GOV_TEO is not set + +# +# ARM CPU Idle Drivers +# +# CONFIG_ARM_PSCI_CPUIDLE is not set +# end of ARM CPU Idle Drivers +# end of CPU Idle + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set + +# +# CPU frequency scaling drivers +# +# CONFIG_CPUFREQ_DT is not set +# CONFIG_CPUFREQ_DT_PLATDEV is not set +CONFIG_ACPI_CPPC_CPUFREQ=y +CONFIG_ACPI_CPPC_CPUFREQ_FIE=y +CONFIG_ARM_SCPI_CPUFREQ=m +# CONFIG_ARM_QCOM_CPUFREQ_HW is not set +# end of CPU Frequency scaling +# end of CPU Power Management + +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_GENERIC_GSI=y +CONFIG_ACPI_CCA_REQUIRED=y +CONFIG_ACPI_DEBUGGER=y +CONFIG_ACPI_DEBUGGER_USER=m +CONFIG_ACPI_SPCR_TABLE=y +# CONFIG_ACPI_FPDT is not set +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_VIDEO=m +CONFIG_ACPI_FAN=y +# CONFIG_ACPI_TAD is not set +# CONFIG_ACPI_DOCK is not set +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_MCFG=y +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_THERMAL=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_DEBUG=y +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_HED=y +CONFIG_ACPI_CUSTOM_METHOD=m +# CONFIG_ACPI_BGRT is not set +CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y +CONFIG_ACPI_NFIT=m +# CONFIG_NFIT_SECURITY_DEBUG is not set +CONFIG_ACPI_NUMA=y +CONFIG_ACPI_HMAT=y +CONFIG_HAVE_ACPI_APEI=y +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_APEI_SEA=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +CONFIG_ACPI_APEI_EINJ=m +CONFIG_ACPI_APEI_ERST_DEBUG=m +CONFIG_ACPI_CONFIGFS=m +# CONFIG_ACPI_PFRUT is not set +CONFIG_ACPI_IORT=y +CONFIG_ACPI_GTDT=y +CONFIG_ACPI_AGDI=y +CONFIG_ACPI_APMT=y +CONFIG_ACPI_PPTT=y +CONFIG_ACPI_PCC=y +# CONFIG_ACPI_FFH is not set +# CONFIG_PMIC_OPREGION is not set +CONFIG_ACPI_PRMT=y +CONFIG_IRQ_BYPASS_MANAGER=y +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_DIRTY_RING=y +CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL=y +CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE=y +CONFIG_KVM_XFER_TO_GUEST_WORK=y +CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +# CONFIG_NVHE_EL2_DEBUG is not set + +# +# General architecture-dependent options +# +CONFIG_ARCH_HAS_SUBPAGE_FAULTS=y +CONFIG_HOTPLUG_CORE_SYNC=y +CONFIG_HOTPLUG_CORE_SYNC_DEAD=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +CONFIG_UPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_KRETPROBES=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_IDLE_POLL_SETUP=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_KEEPINITRD=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_ARCH_HAS_SET_DIRECT_MAP=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_ARCH_WANTS_NO_INSTR=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_PERF_EVENTS_NMI=y +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_MMU_GATHER_TABLE_FREE=y +CONFIG_MMU_GATHER_RCU_TABLE_FREE=y +CONFIG_MMU_LAZY_TLB_REFCOUNT=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_HAVE_ARCH_SECCOMP=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP=y +CONFIG_SECCOMP_FILTER=y +# CONFIG_SECCOMP_CACHE_DEBUG is not set +CONFIG_HAVE_ARCH_STACKLEAK=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_ARCH_SUPPORTS_SHADOW_CALL_STACK=y +# CONFIG_SHADOW_CALL_STACK is not set +CONFIG_ARCH_SUPPORTS_LTO_CLANG=y +CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y +CONFIG_LTO_NONE=y +CONFIG_ARCH_SUPPORTS_CFI_CLANG=y +# CONFIG_CFI_CLANG is not set +CONFIG_HAVE_CONTEXT_TRACKING_USER=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_MOVE_PUD=y +CONFIG_HAVE_MOVE_PMD=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_ARCH_HUGE_VMALLOC=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARCH_WANT_PMD_MKWRITE=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK=y +CONFIG_SOFTIRQ_ON_OWN_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_ARCH_MMAP_RND_BITS=18 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=11 +CONFIG_PAGE_SIZE_LESS_THAN_64KB=y +CONFIG_PAGE_SIZE_LESS_THAN_256KB=y +CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT=y +CONFIG_CLONE_BACKWARDS=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET=y +CONFIG_RANDOMIZE_KSTACK_OFFSET=y +# CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT is not set +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_HAVE_ARCH_COMPILER_H=y +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y +CONFIG_LOCK_EVENT_COUNTS=y +CONFIG_ARCH_HAS_RELR=y +CONFIG_RELR=y +CONFIG_HAVE_PREEMPT_DYNAMIC=y +CONFIG_HAVE_PREEMPT_DYNAMIC_KEY=y +CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK=y +CONFIG_ARCH_HAVE_TRACE_MMIO_ACCESS=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +# end of GCOV-based kernel profiling + +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_GCC_PLUGINS=y +# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set +CONFIG_FUNCTION_ALIGNMENT_4B=y +CONFIG_FUNCTION_ALIGNMENT_8B=y +CONFIG_FUNCTION_ALIGNMENT=8 +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULE_SIG_FORMAT=y +CONFIG_MODULES=y +# CONFIG_MODULE_DEBUG is not set +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set +CONFIG_MODVERSIONS=y +CONFIG_ASM_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +# CONFIG_MODULE_SIG_ALL is not set +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set +CONFIG_MODULE_SIG_SHA256=y +# CONFIG_MODULE_SIG_SHA384 is not set +# CONFIG_MODULE_SIG_SHA512 is not set +CONFIG_MODULE_SIG_HASH="sha256" +CONFIG_MODULE_COMPRESS_NONE=y +# CONFIG_MODULE_COMPRESS_GZIP is not set +# CONFIG_MODULE_COMPRESS_XZ is not set +# CONFIG_MODULE_COMPRESS_ZSTD is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +CONFIG_MODPROBE_PATH="/sbin/modprobe" +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLOCK_LEGACY_AUTOLOAD=y +CONFIG_BLK_RQ_ALLOC_TIME=y +CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_DEV_BSG_COMMON=y +CONFIG_BLK_ICQ=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_INTEGRITY_T10=y +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +# CONFIG_BLK_WBT is not set +# CONFIG_BLK_CGROUP_IOLATENCY is not set +# CONFIG_BLK_CGROUP_FC_APPID is not set +CONFIG_BLK_CGROUP_IOCOST=y +# CONFIG_BLK_CGROUP_IOPRIO is not set +CONFIG_BLK_DEBUG_FS=y +CONFIG_BLK_DEBUG_FS_ZONED=y +# CONFIG_BLK_SED_OPAL is not set +# CONFIG_BLK_INLINE_ENCRYPTION is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +# end of Partition Types + +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_PM=y +CONFIG_BLOCK_HOLDER_DEPRECATED=y +CONFIG_BLK_MQ_STACKING=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +# CONFIG_BFQ_CGROUP_DEBUG is not set +# end of IO Schedulers + +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK=y +CONFIG_ARCH_INLINE_SPIN_LOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_READ_LOCK=y +CONFIG_ARCH_INLINE_READ_LOCK_BH=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_READ_UNLOCK=y +CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_WRITE_LOCK=y +CONFIG_ARCH_INLINE_WRITE_LOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_UNINLINE_SPIN_UNLOCK=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ARCH_BINFMT_ELF_STATE=y +CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS=y +CONFIG_ARCH_HAVE_ELF_PROT=y +CONFIG_ARCH_USE_GNU_PROPERTY=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_ZPOOL=y +CONFIG_SWAP=y +CONFIG_ZSWAP=y +# CONFIG_ZSWAP_DEFAULT_ON is not set +# CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set +CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set +CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lzo" +CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y +# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set +# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set +CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud" +CONFIG_ZBUD=y +# CONFIG_Z3FOLD is not set +CONFIG_ZSMALLOC=y +CONFIG_ZSMALLOC_STAT=y +CONFIG_ZSMALLOC_CHAIN_SIZE=8 + +# +# SLAB allocator options +# +# CONFIG_SLAB_DEPRECATED is not set +CONFIG_SLUB=y +# CONFIG_SLAB_MERGE_DEFAULT is not set +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SLAB_FREELIST_HARDENED is not set +# CONFIG_SLUB_STATS is not set +CONFIG_SLUB_CPU_PARTIAL=y +# CONFIG_RANDOM_KMALLOC_CACHES is not set +# end of SLAB allocator options + +CONFIG_SHUFFLE_PAGE_ALLOCATOR=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SPARSEMEM=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_FAST_GUP=y +CONFIG_ARCH_KEEP_MEMBLOCK=y +CONFIG_NUMA_KEEP_MEMINFO=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_EXCLUSIVE_SYSTEM_RAM=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_MHP_MEMMAP_ON_MEMORY=y +CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 +CONFIG_PAGE_REPORTING=y +CONFIG_MIGRATION=y +CONFIG_DEVICE_MIGRATION=y +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y +CONFIG_CONTIG_ALLOC=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +CONFIG_MEMORY_FAILURE=y +CONFIG_HWPOISON_INJECT=m +CONFIG_ARCH_WANTS_THP_SWAP=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_THP_SWAP=y +CONFIG_READ_ONLY_THP_FOR_FS=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +# CONFIG_CMA_SYSFS is not set +CONFIG_CMA_AREAS=19 +CONFIG_GENERIC_EARLY_IOREMAP=y +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +CONFIG_PAGE_IDLE_FLAG=y +CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_HAS_CURRENT_STACK_POINTER=y +CONFIG_ARCH_HAS_PTE_DEVMAP=y +CONFIG_ZONE_DMA=y +CONFIG_ZONE_DMA32=y +CONFIG_ZONE_DEVICE=y +CONFIG_HMM_MIRROR=y +# CONFIG_DEVICE_PRIVATE is not set +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y +CONFIG_ARCH_USES_PG_ARCH_X=y +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_TEST is not set +# CONFIG_DMAPOOL_TEST is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_MEMFD_CREATE=y +CONFIG_SECRETMEM=y +# CONFIG_ANON_VMA_NAME is not set +CONFIG_USERFAULTFD=y +CONFIG_HAVE_ARCH_USERFAULTFD_MINOR=y +CONFIG_LRU_GEN=y +# CONFIG_LRU_GEN_ENABLED is not set +# CONFIG_LRU_GEN_STATS is not set +CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK=y +CONFIG_PER_VMA_LOCK=y +CONFIG_LOCK_MM_AND_FIND_VMA=y + +# +# Data Access Monitoring +# +CONFIG_DAMON=y +CONFIG_DAMON_VADDR=y +CONFIG_DAMON_PADDR=y +# CONFIG_DAMON_SYSFS is not set +CONFIG_DAMON_DBGFS=y +# CONFIG_DAMON_RECLAIM is not set +# CONFIG_DAMON_LRU_SORT is not set +# end of Data Access Monitoring +# end of Memory Management options + +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y +CONFIG_NET_XGRESS=y +CONFIG_NET_REDIRECT=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_AF_UNIX_OOB=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +# CONFIG_TLS_TOE is not set +CONFIG_XFRM=y +CONFIG_XFRM_OFFLOAD=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_AH=m +CONFIG_XFRM_ESP=m +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m +CONFIG_NET_HANDSHAKE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +# CONFIG_IP_PNP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +# CONFIG_NET_FOU is not set +# CONFIG_NET_FOU_IP_TUNNELS is not set +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +# CONFIG_INET_ESPINTCP is not set +CONFIG_INET_IPCOMP=m +CONFIG_INET_TABLE_PERTURB_ORDER=16 +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +# CONFIG_TCP_CONG_CDG is not set +CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +# CONFIG_INET6_ESPINTCP is not set +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +# CONFIG_IPV6_ILA is not set +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +# CONFIG_IPV6_SEG6_LWTUNNEL is not set +# CONFIG_IPV6_SEG6_HMAC is not set +# CONFIG_IPV6_RPL_LWTUNNEL is not set +# CONFIG_IPV6_IOAM6_LWTUNNEL is not set +CONFIG_NETLABEL=y +CONFIG_MPTCP=y +CONFIG_INET_MPTCP_DIAG=m +CONFIG_MPTCP_IPV6=y +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_EGRESS=y +CONFIG_NETFILTER_SKIP_EGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_BPF_LINK=y +# CONFIG_NETFILTER_NETLINK_HOOK is not set +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_SYSLOG=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CONNTRACK_OVS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NF_NAT_OVS=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +# CONFIG_NFT_SYNPROXY is not set +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +# CONFIG_NFT_REJECT_NETDEV is not set +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +# CONFIG_NF_FLOW_TABLE_PROCFS is not set +CONFIG_NETFILTER_XTABLES=y +# CONFIG_NETFILTER_XTABLES_COMPAT is not set + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +# end of Core Netfilter Configuration + +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +# CONFIG_IP_VS_TWOS is not set + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +# CONFIG_IP6_NF_MATCH_SRH is not set +# CONFIG_IP6_NF_TARGET_HL is not set +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_TABLES_BRIDGE=m +# CONFIG_NFT_BRIDGE_META is not set +CONFIG_NFT_BRIDGE_REJECT=m +# CONFIG_NF_CONNTRACK_BRIDGE is not set +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +# CONFIG_BPFILTER is not set +# CONFIG_IP_DCCP is not set +CONFIG_IP_SCTP=m +# CONFIG_SCTP_DBG_OBJCNT is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_INET_SCTP_DIAG=m +# CONFIG_RDS is not set +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_TIPC_MEDIA_UDP=y +CONFIG_TIPC_CRYPTO=y +CONFIG_TIPC_DIAG=m +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +# CONFIG_ATM_CLIP_NO_ICMP is not set +CONFIG_ATM_LANE=m +# CONFIG_ATM_MPOA is not set +CONFIG_ATM_BR2684=m +# CONFIG_ATM_BR2684_IPFILTER is not set +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +# CONFIG_BRIDGE_MRP is not set +# CONFIG_BRIDGE_CFM is not set +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_LLC=m +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_DEBUGFS is not set +# CONFIG_6LOWPAN_NHC is not set +CONFIG_IEEE802154=m +# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set +CONFIG_IEEE802154_SOCKET=m +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +# CONFIG_NET_SCH_CBS is not set +# CONFIG_NET_SCH_ETF is not set +CONFIG_NET_SCH_MQPRIO_LIB=m +# CONFIG_NET_SCH_TAPRIO is not set +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +# CONFIG_NET_SCH_SKBPRIO is not set +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=y +# CONFIG_NET_SCH_CAKE is not set +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +# CONFIG_NET_SCH_FQ_PIE is not set +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +# CONFIG_NET_SCH_ETS is not set +CONFIG_NET_SCH_DEFAULT=y +# CONFIG_DEFAULT_FQ is not set +# CONFIG_DEFAULT_CODEL is not set +CONFIG_DEFAULT_FQ_CODEL=y +# CONFIG_DEFAULT_SFQ is not set +# CONFIG_DEFAULT_PFIFO_FAST is not set +CONFIG_DEFAULT_NET_SCH="fq_codel" + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +# CONFIG_NET_EMATCH_IPT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +# CONFIG_NET_ACT_MPLS is not set +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +# CONFIG_NET_ACT_CONNMARK is not set +# CONFIG_NET_ACT_CTINFO is not set +CONFIG_NET_ACT_SKBMOD=m +# CONFIG_NET_ACT_IFE is not set +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m +# CONFIG_NET_ACT_GATE is not set +CONFIG_NET_TC_SKB_EXT=y +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=m +# CONFIG_BATMAN_ADV is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VSOCKETS_LOOPBACK=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=y +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=y +# CONFIG_HSR is not set +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_L3_MASTER_DEV=y +# CONFIG_QRTR is not set +# CONFIG_NET_NCSI is not set +CONFIG_PCPU_DEV_REFCNT=y +CONFIG_MAX_SKB_FRAGS=17 +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_SOCK_RX_QUEUE_MAPPING=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +CONFIG_NET_DROP_MONITOR=y +# end of Network testing +# end of Networking options + +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=y +# CONFIG_MCTP is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +CONFIG_CFG80211_CRDA_SUPPORT=y +# CONFIG_CFG80211_WEXT is not set +CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +CONFIG_MAC80211_DEBUGFS=y +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +CONFIG_RFKILL_GPIO=m +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +CONFIG_CEPH_LIB_PRETTYDEBUG=y +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +# CONFIG_NFC is not set +CONFIG_PSAMPLE=m +# CONFIG_NET_IFE is not set +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_SOCK_VALIDATE_XMIT=y +CONFIG_NET_SELFTESTS=y +CONFIG_NET_SOCK_MSG=y +CONFIG_NET_DEVLINK=y +CONFIG_PAGE_POOL=y +# CONFIG_PAGE_POOL_STATS is not set +CONFIG_FAILOVER=m +CONFIG_ETHTOOL_NETLINK=y + +# +# Device Drivers +# +CONFIG_ARM_AMBA=y +CONFIG_HAVE_PCI=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DOMAINS_GENERIC=y +CONFIG_PCI_SYSCALL=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +CONFIG_PCIE_DPC=y +# CONFIG_PCIE_PTM is not set +CONFIG_PCIE_EDR=y +CONFIG_PCI_MSI=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=y +# CONFIG_PCI_PF_STUB is not set +CONFIG_PCI_ATS=y +CONFIG_PCI_ECAM=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +# CONFIG_PCI_P2PDMA is not set +CONFIG_PCI_LABEL=y +# CONFIG_PCI_DYNAMIC_OF_NODES is not set +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_ACPI_IBM=m +# CONFIG_HOTPLUG_PCI_CPCI is not set +# CONFIG_HOTPLUG_PCI_SHPC is not set + +# +# PCI controller drivers +# +# CONFIG_PCIE_ALTERA is not set +CONFIG_PCI_HOST_THUNDER_PEM=y +CONFIG_PCI_HOST_THUNDER_ECAM=y +# CONFIG_PCI_FTPCI100 is not set +CONFIG_PCI_HOST_COMMON=y +CONFIG_PCI_HOST_GENERIC=y +# CONFIG_PCIE_HISI_ERR is not set +# CONFIG_PCIE_MICROCHIP_HOST is not set +CONFIG_PCI_XGENE=y +CONFIG_PCI_XGENE_MSI=y +# CONFIG_PCIE_XILINX is not set + +# +# Cadence-based PCIe controllers +# +# CONFIG_PCIE_CADENCE_PLAT_HOST is not set +# CONFIG_PCI_J721E_HOST is not set +# end of Cadence-based PCIe controllers + +# +# DesignWare-based PCIe controllers +# +CONFIG_PCIE_DW=y +CONFIG_PCIE_DW_HOST=y +# CONFIG_PCIE_AL is not set +# CONFIG_PCI_MESON is not set +CONFIG_PCI_HISI=y +# CONFIG_PCIE_KIRIN is not set +# CONFIG_PCIE_HISI_STB is not set +# CONFIG_PCIE_DW_PLAT_HOST is not set +# CONFIG_PCIE_QCOM is not set +# end of DesignWare-based PCIe controllers + +# +# Mobiveil-based PCIe controllers +# +# end of Mobiveil-based PCIe controllers +# end of PCI controller drivers + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# end of PCI switch controller drivers + +# CONFIG_CXL_BUS is not set +CONFIG_PCCARD=y +# CONFIG_PCMCIA is not set +CONFIG_CARDBUS=y + +# +# PC-card bridges +# +CONFIG_YENTA=m +CONFIG_YENTA_O2=y +CONFIG_YENTA_RICOH=y +CONFIG_YENTA_TI=y +CONFIG_YENTA_ENE_TUNE=y +CONFIG_YENTA_TOSHIBA=y +# CONFIG_RAPIDIO is not set + +# +# Generic Driver Options +# +CONFIG_AUXILIARY_BUS=y +# CONFIG_UEVENT_HELPER is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_DEVTMPFS_SAFE is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_FW_LOADER_DEBUG=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set +# CONFIG_FW_LOADER_COMPRESS is not set +CONFIG_FW_CACHE=y +# CONFIG_FW_UPLOAD is not set +# end of Firmware loader + +CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +CONFIG_HMEM_REPORTING=y +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +CONFIG_SOC_BUS=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=m +CONFIG_REGMAP_SPI=m +CONFIG_REGMAP_MMIO=y +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +CONFIG_GENERIC_ARCH_TOPOLOGY=y +CONFIG_GENERIC_ARCH_NUMA=y +# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set +# end of Generic Driver Options + +# +# Bus devices +# +# CONFIG_BRCMSTB_GISB_ARB is not set +# CONFIG_MOXTET is not set +CONFIG_HISILICON_LPC=y +# CONFIG_QCOM_EBI2 is not set +# CONFIG_QCOM_SSC_BLOCK_BUS is not set +CONFIG_VEXPRESS_CONFIG=y +# CONFIG_MHI_BUS is not set +# CONFIG_MHI_BUS_EP is not set +# end of Bus devices + +# +# Cache Drivers +# +# end of Cache Drivers + +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y + +# +# Firmware Drivers +# + +# +# ARM System Control and Management Interface Protocol +# +# CONFIG_ARM_SCMI_PROTOCOL is not set +# end of ARM System Control and Management Interface Protocol + +CONFIG_ARM_SCPI_PROTOCOL=m +CONFIG_ARM_SCPI_POWER_DOMAIN=m +CONFIG_ARM_SDE_INTERFACE=y +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +# CONFIG_ISCSI_IBFT is not set +CONFIG_FW_CFG_SYSFS=y +# CONFIG_FW_CFG_SYSFS_CMDLINE is not set +CONFIG_QCOM_SCM=y +# CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT is not set +CONFIG_SYSFB=y +# CONFIG_SYSFB_SIMPLEFB is not set +# CONFIG_ARM_FFA_TRANSPORT is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=y +CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y +CONFIG_EFI_SOFT_RESERVE=y +CONFIG_EFI_PARAMS_FROM_FDT=y +CONFIG_EFI_RUNTIME_WRAPPERS=y +CONFIG_EFI_GENERIC_STUB=y +# CONFIG_EFI_ZBOOT is not set +CONFIG_EFI_ARMSTUB_DTB_LOADER=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set +# CONFIG_EFI_CAPSULE_LOADER is not set +# CONFIG_EFI_TEST is not set +# CONFIG_RESET_ATTACK_MITIGATION is not set +# CONFIG_EFI_DISABLE_PCI_DMA is not set +CONFIG_EFI_EARLYCON=y +CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y +# CONFIG_EFI_DISABLE_RUNTIME is not set +# CONFIG_EFI_COCO_SECRET is not set +# end of EFI (Extensible Firmware Interface) Support + +CONFIG_UEFI_CPER=y +CONFIG_UEFI_CPER_ARM=y +CONFIG_ARM_PSCI_FW=y +# CONFIG_ARM_PSCI_CHECKER is not set +CONFIG_HAVE_ARM_SMCCC=y +CONFIG_HAVE_ARM_SMCCC_DISCOVERY=y +CONFIG_ARM_SMCCC_SOC_ID=y + +# +# Tegra firmware driver +# +# end of Tegra firmware driver +# end of Firmware Drivers + +# CONFIG_GNSS is not set +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set + +# +# Partition parsers +# +# CONFIG_MTD_AR7_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +CONFIG_MTD_OF_PARTS=m +# CONFIG_MTD_AFS_PARTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# end of Partition parsers + +# +# User Modules And Translation Layers +# +CONFIG_MTD_BLKDEVS=m +CONFIG_MTD_BLOCK=m +# CONFIG_MTD_BLOCK_RO is not set + +# +# Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK. +# +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=m +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_GEN_PROBE=m +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +CONFIG_MTD_CFI_INTELEXT=m +CONFIG_MTD_CFI_AMDSTD=m +CONFIG_MTD_CFI_STAA=m +CONFIG_MTD_CFI_UTIL=m +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set +# end of RAM/ROM/Flash chip drivers + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +CONFIG_MTD_PHYSMAP=m +# CONFIG_MTD_PHYSMAP_COMPAT is not set +# CONFIG_MTD_PHYSMAP_OF is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set +# end of Mapping drivers for chip access + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_MCHP48L640 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# end of Self-contained MTD device drivers + +# +# NAND +# +# CONFIG_MTD_ONENAND is not set +# CONFIG_MTD_RAW_NAND is not set +# CONFIG_MTD_SPI_NAND is not set + +# +# ECC engine support +# +# CONFIG_MTD_NAND_ECC_SW_HAMMING is not set +# CONFIG_MTD_NAND_ECC_SW_BCH is not set +# CONFIG_MTD_NAND_ECC_MXIC is not set +# end of ECC engine support +# end of NAND + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# end of LPDDR & LPDDR2 PCM memory drivers + +# CONFIG_MTD_SPI_NOR is not set +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_FASTMAP is not set +# CONFIG_MTD_UBI_GLUEBI is not set +# CONFIG_MTD_UBI_BLOCK is not set +# CONFIG_MTD_HYPERBUS is not set +CONFIG_DTC=y +CONFIG_OF=y +# CONFIG_OF_UNITTEST is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_KOBJ=y +CONFIG_OF_DYNAMIC=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_IRQ=y +CONFIG_OF_RESERVED_MEM=y +CONFIG_OF_RESOLVE=y +CONFIG_OF_OVERLAY=y +CONFIG_OF_NUMA=y +# CONFIG_PARPORT is not set +CONFIG_PNP=y +CONFIG_PNP_DEBUG_MESSAGES=y + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_CDROM=m +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +CONFIG_ZRAM=m +CONFIG_ZRAM_DEF_COMP_LZORLE=y +# CONFIG_ZRAM_DEF_COMP_ZSTD is not set +# CONFIG_ZRAM_DEF_COMP_LZ4 is not set +# CONFIG_ZRAM_DEF_COMP_LZO is not set +# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set +CONFIG_ZRAM_DEF_COMP="lzo-rle" +CONFIG_ZRAM_WRITEBACK=y +# CONFIG_ZRAM_MEMORY_TRACKING is not set +# CONFIG_ZRAM_MULTI_COMP is not set +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 +# CONFIG_BLK_DEV_DRBD is not set +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_VIRTIO_BLK=m +CONFIG_BLK_DEV_RBD=m +CONFIG_BLK_DEV_UBLK=m +CONFIG_BLKDEV_UBLK_LEGACY_OPCODES=y + +# +# NVME Support +# +CONFIG_NVME_CORE=m +CONFIG_BLK_DEV_NVME=m +CONFIG_NVME_MULTIPATH=y +# CONFIG_NVME_VERBOSE_ERRORS is not set +# CONFIG_NVME_HWMON is not set +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TCP=m +# CONFIG_NVME_AUTH is not set +CONFIG_NVME_TARGET=m +# CONFIG_NVME_TARGET_PASSTHRU is not set +CONFIG_NVME_TARGET_LOOP=m +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_FCLOOP=m +CONFIG_NVME_TARGET_TCP=m +# CONFIG_NVME_TARGET_AUTH is not set +# end of NVME Support + +# +# Misc devices +# +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +CONFIG_TIFM_CORE=m +# CONFIG_TIFM_7XX1 is not set +# CONFIG_ICS932S401 is not set +CONFIG_ENCLOSURE_SERVICES=m +# CONFIG_HP_ILO is not set +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +# CONFIG_DW_XDATA_PCIE is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_XILINX_SDFEC is not set +# CONFIG_HISI_HIKEY_USB is not set +# CONFIG_OPEN_DICE is not set +# CONFIG_VCPU_STALL_DETECTOR is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_EEPROM_EE1004 is not set +# end of EEPROM support + +CONFIG_CB710_CORE=m +# CONFIG_CB710_DEBUG is not set +CONFIG_CB710_DEBUG_ASSUMPTIONS=y + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +# end of Texas Instruments shared transport line discipline + +# CONFIG_SENSORS_LIS3_I2C is not set + +# +# Altera FPGA firmware download module (requires I2C) +# +# CONFIG_ALTERA_STAPL is not set +# CONFIG_VMWARE_VMCI is not set +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_BCM_VK is not set +# CONFIG_MISC_ALCOR_PCI is not set +# CONFIG_MISC_RTSX_PCI is not set +# CONFIG_MISC_RTSX_USB is not set +# CONFIG_UACCE is not set +CONFIG_PVPANIC=y +# CONFIG_PVPANIC_MMIO is not set +# CONFIG_PVPANIC_PCI is not set +# CONFIG_GP_PCI1XXXX is not set +# end of Misc devices + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=m +CONFIG_SCSI_COMMON=y +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=m +CONFIG_BLK_DEV_BSG=y +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=m +# CONFIG_SCSI_CXGB3_ISCSI is not set +CONFIG_SCSI_CXGB4_ISCSI=m +# CONFIG_SCSI_BNX2_ISCSI is not set +# CONFIG_SCSI_BNX2X_FCOE is not set +CONFIG_BE2ISCSI=m +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +CONFIG_SCSI_HPSA=m +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +# CONFIG_SCSI_AACRAID is not set +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +CONFIG_SCSI_HISI_SAS=m +CONFIG_SCSI_HISI_SAS_PCI=m +# CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE is not set +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +# CONFIG_MEGARAID_NEWGEN is not set +# CONFIG_MEGARAID_LEGACY is not set +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +# CONFIG_SCSI_MPT2SAS is not set +# CONFIG_SCSI_MPI3MR is not set +CONFIG_SCSI_SMARTPQI=m +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_BUSLOGIC is not set +# CONFIG_SCSI_MYRB is not set +# CONFIG_SCSI_MYRS is not set +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +# CONFIG_FCOE is not set +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FDOMAIN_PCI is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +CONFIG_SCSI_IPR=m +CONFIG_SCSI_IPR_TRACE=y +CONFIG_SCSI_IPR_DUMP=y +# CONFIG_SCSI_QLOGIC_1280 is not set +CONFIG_SCSI_QLA_FC=m +# CONFIG_TCM_QLA2XXX is not set +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_QEDI=m +CONFIG_QEDF=m +CONFIG_SCSI_LPFC=m +# CONFIG_SCSI_LPFC_DEBUG_FS is not set +# CONFIG_SCSI_EFCT is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +CONFIG_SCSI_DEBUG=m +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_BFA_FC is not set +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_CHELSIO_FCOE=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +# end of SCSI device support + +CONFIG_ATA=y +CONFIG_SATA_HOST=y +CONFIG_PATA_TIMINGS=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_FORCE=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +CONFIG_SATA_MOBILE_LPM_POLICY=0 +CONFIG_SATA_AHCI_PLATFORM=m +# CONFIG_AHCI_DWC is not set +# CONFIG_AHCI_CEVA is not set +CONFIG_AHCI_XGENE=m +CONFIG_SATA_AHCI_SEATTLE=m +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=y +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_OF_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set +CONFIG_ATA_GENERIC=m +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_BITMAP_FILE=y +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +# CONFIG_MD_MULTIPATH is not set +CONFIG_MD_FAULTY=m +CONFIG_MD_CLUSTER=m +# CONFIG_BCACHE is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_BUFIO=m +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +# CONFIG_DM_UNSTRIPED is not set +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +CONFIG_DM_WRITECACHE=m +# CONFIG_DM_EBS is not set +CONFIG_DM_ERA=m +# CONFIG_DM_CLONE is not set +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +# CONFIG_DM_MULTIPATH_HST is not set +# CONFIG_DM_MULTIPATH_IOA is not set +CONFIG_DM_DELAY=m +# CONFIG_DM_DUST is not set +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set +# CONFIG_DM_VERITY_FEC is not set +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +# CONFIG_DM_ZONED is not set +CONFIG_DM_AUDIT=y +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +# CONFIG_TCM_FC is not set +CONFIG_ISCSI_TARGET=m +CONFIG_ISCSI_TARGET_CXGB4=m +# CONFIG_REMOTE_TARGET is not set +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +# CONFIG_FUSION_FC is not set +CONFIG_FUSION_SAS=m +CONFIG_FUSION_MAX_SGE=128 +# CONFIG_FUSION_CTL is not set +CONFIG_FUSION_LOGGING=y + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +# end of IEEE 1394 (FireWire) support + +CONFIG_NETDEVICES=y +CONFIG_MII=m +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_WIREGUARD=m +# CONFIG_WIREGUARD_DEBUG is not set +# CONFIG_EQUALIZER is not set +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN_L3S=y +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +# CONFIG_BAREUDP is not set +# CONFIG_GTP is not set +# CONFIG_AMT is not set +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +# CONFIG_NETCONSOLE_EXTENDED_LOG is not set +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ARCNET is not set +# CONFIG_ATM_DRIVERS is not set +CONFIG_ETHERNET=y +CONFIG_MDIO=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +CONFIG_ENA_ETHERNET=m +CONFIG_NET_VENDOR_AMD=y +# CONFIG_AMD8111_ETH is not set +# CONFIG_PCNET32 is not set +CONFIG_AMD_XGBE=m +# CONFIG_AMD_XGBE_DCB is not set +# CONFIG_PDS_CORE is not set +CONFIG_NET_XGENE=m +CONFIG_NET_XGENE_V2=m +CONFIG_NET_VENDOR_AQUANTIA=y +# CONFIG_AQTION is not set +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_NET_VENDOR_ASIX=y +# CONFIG_SPI_AX88796C is not set +CONFIG_NET_VENDOR_ATHEROS=y +# CONFIG_ATL2 is not set +CONFIG_ATL1=m +CONFIG_ATL1E=m +CONFIG_ATL1C=m +CONFIG_ALX=m +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +CONFIG_BNX2=m +# CONFIG_CNIC is not set +CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set +CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y +CONFIG_BNXT_DCB=y +CONFIG_BNXT_HWMON=y +# CONFIG_NET_VENDOR_CADENCE is not set +CONFIG_NET_VENDOR_CAVIUM=y +CONFIG_THUNDER_NIC_PF=m +CONFIG_THUNDER_NIC_VF=m +CONFIG_THUNDER_NIC_BGX=m +CONFIG_THUNDER_NIC_RGX=m +CONFIG_CAVIUM_PTP=y +CONFIG_LIQUIDIO_CORE=m +CONFIG_LIQUIDIO=m +CONFIG_LIQUIDIO_VF=m +CONFIG_NET_VENDOR_CHELSIO=y +# CONFIG_CHELSIO_T1 is not set +# CONFIG_CHELSIO_T3 is not set +CONFIG_CHELSIO_T4=m +# CONFIG_CHELSIO_T4_DCB is not set +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_LIB=m +CONFIG_CHELSIO_INLINE_CRYPTO=y +CONFIG_CHELSIO_IPSEC_INLINE=m +# CONFIG_CHELSIO_TLS_DEVICE is not set +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_CORTINA is not set +CONFIG_NET_VENDOR_DAVICOM=y +# CONFIG_DM9051 is not set +CONFIG_DNET=m +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +CONFIG_NET_VENDOR_ENGLEDER=y +# CONFIG_TSNEP is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_NET_VENDOR_FUNGIBLE=y +# CONFIG_FUN_ETH is not set +CONFIG_NET_VENDOR_GOOGLE=y +CONFIG_GVE=m +CONFIG_NET_VENDOR_HISILICON=y +# CONFIG_HIX5HD2_GMAC is not set +# CONFIG_HISI_FEMAC is not set +# CONFIG_HIP04_ETH is not set +CONFIG_HNS_MDIO=m +CONFIG_HNS=m +CONFIG_HNS_DSAF=m +CONFIG_HNS_ENET=m +CONFIG_HNS3=m +CONFIG_HNS3_HCLGE=m +CONFIG_HNS3_DCB=y +CONFIG_HNS3_HCLGEVF=m +CONFIG_HNS3_ENET=m +CONFIG_NET_VENDOR_HUAWEI=y +CONFIG_HINIC=m +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +# CONFIG_E1000 is not set +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCB=y +CONFIG_IXGBE_IPSEC=y +CONFIG_IXGBEVF=m +CONFIG_IXGBEVF_IPSEC=y +CONFIG_I40E=m +# CONFIG_I40E_DCB is not set +CONFIG_IAVF=m +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_ICE_SWITCHDEV=y +CONFIG_FM10K=m +CONFIG_IGC=m +# CONFIG_JME is not set +CONFIG_NET_VENDOR_ADI=y +# CONFIG_ADIN1110 is not set +CONFIG_NET_VENDOR_LITEX=y +# CONFIG_LITEX_LITEETH is not set +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +# CONFIG_MLX4_CORE_GEN2 is not set +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +CONFIG_MLX5_ESWITCH=y +CONFIG_MLX5_BRIDGE=y +CONFIG_MLX5_CLS_ACT=y +CONFIG_MLX5_TC_CT=y +CONFIG_MLX5_TC_SAMPLE=y +CONFIG_MLX5_CORE_EN_DCB=y +CONFIG_MLX5_CORE_IPOIB=y +# CONFIG_MLX5_MACSEC is not set +# CONFIG_MLX5_EN_IPSEC is not set +# CONFIG_MLX5_EN_TLS is not set +CONFIG_MLX5_SW_STEERING=y +# CONFIG_MLX5_SF is not set +CONFIG_MLXSW_CORE=m +CONFIG_MLXSW_CORE_HWMON=y +CONFIG_MLXSW_CORE_THERMAL=y +CONFIG_MLXSW_PCI=m +CONFIG_MLXSW_I2C=m +CONFIG_MLXSW_SPECTRUM=m +CONFIG_MLXSW_SPECTRUM_DCB=y +CONFIG_MLXSW_MINIMAL=m +CONFIG_MLXFW=m +# CONFIG_MLXBF_GIGE is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +CONFIG_NET_VENDOR_MICROSOFT=y +CONFIG_NET_VENDOR_MYRI=y +# CONFIG_MYRI10GE is not set +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETERION is not set +CONFIG_NET_VENDOR_NETRONOME=y +CONFIG_NFP=m +CONFIG_NFP_APP_FLOWER=y +CONFIG_NFP_APP_ABM_NIC=y +CONFIG_NFP_NET_IPSEC=y +CONFIG_NFP_DEBUG=y +# CONFIG_NET_VENDOR_NVIDIA is not set +CONFIG_NET_VENDOR_OKI=y +CONFIG_ETHOC=m +# CONFIG_NET_VENDOR_PACKET_ENGINES is not set +CONFIG_NET_VENDOR_PENSANDO=y +# CONFIG_IONIC is not set +CONFIG_NET_VENDOR_QLOGIC=y +CONFIG_QLA3XXX=m +# CONFIG_QLCNIC is not set +CONFIG_NETXEN_NIC=m +CONFIG_QED=m +CONFIG_QED_LL2=y +CONFIG_QED_SRIOV=y +CONFIG_QEDE=m +CONFIG_QED_RDMA=y +CONFIG_QED_ISCSI=y +CONFIG_QED_FCOE=y +CONFIG_QED_OOO=y +# CONFIG_NET_VENDOR_BROCADE is not set +CONFIG_NET_VENDOR_QUALCOMM=y +# CONFIG_QCA7000_SPI is not set +CONFIG_QCOM_EMAC=m +# CONFIG_RMNET is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set +CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set +CONFIG_R8169=m +# CONFIG_NET_VENDOR_RENESAS is not set +CONFIG_NET_VENDOR_ROCKER=y +CONFIG_ROCKER=m +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +CONFIG_NET_VENDOR_SOLARFLARE=y +# CONFIG_SFC is not set +# CONFIG_SFC_FALCON is not set +# CONFIG_SFC_SIENA is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +CONFIG_NET_VENDOR_VERTEXCOM=y +# CONFIG_MSE102X is not set +# CONFIG_NET_VENDOR_VIA is not set +CONFIG_NET_VENDOR_WANGXUN=y +CONFIG_LIBWX=m +CONFIG_NGBE=m +CONFIG_TXGBE=m +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_NET_VENDOR_XILINX=y +# CONFIG_XILINX_EMACLITE is not set +# CONFIG_XILINX_AXI_EMAC is not set +# CONFIG_XILINX_LL_TEMAC is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_PHYLINK=m +CONFIG_PHYLIB=y +CONFIG_SWPHY=y +CONFIG_LED_TRIGGER_PHY=y +CONFIG_PHYLIB_LEDS=y +CONFIG_FIXED_PHY=y +CONFIG_SFP=m + +# +# MII PHY device drivers +# +CONFIG_AMD_PHY=m +# CONFIG_ADIN_PHY is not set +# CONFIG_ADIN1100_PHY is not set +CONFIG_AQUANTIA_PHY=m +CONFIG_AX88796B_PHY=m +CONFIG_BROADCOM_PHY=m +# CONFIG_BCM54140_PHY is not set +CONFIG_BCM7XXX_PHY=m +# CONFIG_BCM84881_PHY is not set +CONFIG_BCM87XX_PHY=m +CONFIG_BCM_NET_PHYLIB=m +CONFIG_BCM_NET_PHYPTP=m +CONFIG_CICADA_PHY=m +CONFIG_CORTINA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_LXT_PHY=m +CONFIG_INTEL_XWAY_PHY=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_MARVELL_10G_PHY=m +# CONFIG_MARVELL_88Q2XXX_PHY is not set +# CONFIG_MARVELL_88X2222_PHY is not set +# CONFIG_MAXLINEAR_GPHY is not set +# CONFIG_MEDIATEK_GE_PHY is not set +CONFIG_MICREL_PHY=m +# CONFIG_MICROCHIP_T1S_PHY is not set +CONFIG_MICROCHIP_PHY=m +CONFIG_MICROCHIP_T1_PHY=m +CONFIG_MICROSEMI_PHY=m +# CONFIG_MOTORCOMM_PHY is not set +CONFIG_NATIONAL_PHY=m +# CONFIG_NXP_CBTX_PHY is not set +# CONFIG_NXP_C45_TJA11XX_PHY is not set +# CONFIG_NXP_TJA11XX_PHY is not set +# CONFIG_NCN26000_PHY is not set +CONFIG_AT803X_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m +CONFIG_RENESAS_PHY=m +CONFIG_ROCKCHIP_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_DP83822_PHY=m +CONFIG_DP83TC811_PHY=m +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +# CONFIG_DP83869_PHY is not set +# CONFIG_DP83TD510_PHY is not set +CONFIG_VITESSE_PHY=m +CONFIG_XILINX_GMII2RGMII=m +CONFIG_MICREL_KS8995MA=m +# CONFIG_PSE_CONTROLLER is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +CONFIG_FWNODE_MDIO=y +CONFIG_OF_MDIO=y +CONFIG_ACPI_MDIO=y +CONFIG_MDIO_DEVRES=y +CONFIG_MDIO_XGENE=m +CONFIG_MDIO_BITBANG=m +CONFIG_MDIO_BCM_UNIMAC=m +CONFIG_MDIO_CAVIUM=m +CONFIG_MDIO_GPIO=m +CONFIG_MDIO_HISI_FEMAC=m +CONFIG_MDIO_I2C=m +# CONFIG_MDIO_MVUSB is not set +CONFIG_MDIO_MSCC_MIIM=m +CONFIG_MDIO_OCTEON=m +# CONFIG_MDIO_IPQ4019 is not set +# CONFIG_MDIO_IPQ8064 is not set +CONFIG_MDIO_THUNDER=m + +# +# MDIO Multiplexers +# +# CONFIG_MDIO_BUS_MUX_GPIO is not set +# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set +# CONFIG_MDIO_BUS_MUX_MMIOREG is not set + +# +# PCS device drivers +# +CONFIG_PCS_XPCS=m +# end of PCS device drivers + +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +# CONFIG_PPPOE_HASH_BITS_1 is not set +# CONFIG_PPPOE_HASH_BITS_2 is not set +CONFIG_PPPOE_HASH_BITS_4=y +# CONFIG_PPPOE_HASH_BITS_8 is not set +CONFIG_PPPOE_HASH_BITS=4 +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLHC=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +# CONFIG_SLIP_MODE_SLIP6 is not set +CONFIG_USB_NET_DRIVERS=y +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=m +CONFIG_USB_NET_AX8817X=m +CONFIG_USB_NET_AX88179_178A=m +CONFIG_USB_NET_CDCETHER=m +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +CONFIG_USB_NET_SR9700=m +# CONFIG_USB_NET_SR9800 is not set +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +CONFIG_USB_NET_NET1080=m +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m +CONFIG_USB_NET_CDC_SUBSET=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y +CONFIG_USB_BELKIN=y +CONFIG_USB_ARMLINUX=y +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y +CONFIG_USB_NET_ZAURUS=m +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +# CONFIG_USB_NET_AQC111 is not set +CONFIG_USB_RTL8153_ECM=m +# CONFIG_WLAN is not set +CONFIG_WAN=y +CONFIG_HDLC=m +CONFIG_HDLC_RAW=m +# CONFIG_HDLC_RAW_ETH is not set +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m + +# +# X.25/LAPB support is disabled +# +# CONFIG_PCI200SYN is not set +# CONFIG_WANXL is not set +# CONFIG_PC300TOO is not set +# CONFIG_FARSYNC is not set +CONFIG_IEEE802154_DRIVERS=m +# CONFIG_IEEE802154_FAKELB is not set +# CONFIG_IEEE802154_AT86RF230 is not set +# CONFIG_IEEE802154_MRF24J40 is not set +# CONFIG_IEEE802154_CC2520 is not set +# CONFIG_IEEE802154_ATUSB is not set +# CONFIG_IEEE802154_ADF7242 is not set +# CONFIG_IEEE802154_CA8210 is not set +# CONFIG_IEEE802154_MCR20A is not set +# CONFIG_IEEE802154_HWSIM is not set + +# +# Wireless WAN +# +# CONFIG_WWAN is not set +# end of Wireless WAN + +# CONFIG_VMXNET3 is not set +# CONFIG_FUJITSU_ES is not set +CONFIG_NETDEVSIM=m +CONFIG_NET_FAILOVER=m +# CONFIG_ISDN is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y +CONFIG_INPUT_FF_MEMLESS=m +CONFIG_INPUT_SPARSEKMAP=m +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_KEYBOARD_QT1050 is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +CONFIG_KEYBOARD_GPIO=m +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_PINEPHONE is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_OMAP4 is not set +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_CAP11XX is not set +# CONFIG_KEYBOARD_BCM is not set +# CONFIG_KEYBOARD_CYPRESS_SF is not set +CONFIG_INPUT_MOUSE=y +# CONFIG_MOUSE_PS2 is not set +# CONFIG_MOUSE_SERIAL is not set +# CONFIG_MOUSE_APPLETOUCH is not set +# CONFIG_MOUSE_BCM5974 is not set +# CONFIG_MOUSE_CYAPA is not set +CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_I2C=y +CONFIG_MOUSE_ELAN_I2C_SMBUS=y +# CONFIG_MOUSE_VSXXXAA is not set +# CONFIG_MOUSE_GPIO is not set +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set +CONFIG_RMI4_CORE=m +CONFIG_RMI4_I2C=m +CONFIG_RMI4_SPI=m +CONFIG_RMI4_SMB=m +CONFIG_RMI4_F03=y +CONFIG_RMI4_F03_SERIO=m +CONFIG_RMI4_2D_SENSOR=y +CONFIG_RMI4_F11=y +CONFIG_RMI4_F12=y +CONFIG_RMI4_F30=y +CONFIG_RMI4_F34=y +# CONFIG_RMI4_F3A is not set +CONFIG_RMI4_F55=y + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_SERIO_SERPORT=y +CONFIG_SERIO_AMBAKMI=y +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +# CONFIG_SERIO_PS2MULT is not set +CONFIG_SERIO_ARC_PS2=m +# CONFIG_SERIO_APBPS2 is not set +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_LEGACY_TIOCSTI=y +CONFIG_LDISC_AUTOLOAD=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +CONFIG_SERIAL_8250_16550A_VARIANTS=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCILIB=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +# CONFIG_SERIAL_8250_PCI1XXXX is not set +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DWLIB=y +CONFIG_SERIAL_8250_FSL=y +CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_8250_RT288X=y +CONFIG_SERIAL_8250_PERICOM=y +CONFIG_SERIAL_OF_PLATFORM=y + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_AMBA_PL010 is not set +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +# CONFIG_SERIAL_EARLYCON_SEMIHOST is not set +# CONFIG_SERIAL_KGDB_NMI is not set +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_CONSOLE_POLL=y +# CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_MSM is not set +# CONFIG_SERIAL_SIFIVE is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_XILINX_PS_UART is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set +# CONFIG_SERIAL_SPRD is not set +# end of Serial drivers + +CONFIG_SERIAL_MCTRL_GPIO=y +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +# CONFIG_NOZOMI is not set +# CONFIG_NULL_TTY is not set +CONFIG_HVC_DRIVER=y +# CONFIG_HVC_DCC is not set +# CONFIG_SERIAL_DEV_BUS is not set +CONFIG_VIRTIO_CONSOLE=m +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +CONFIG_IPMI_PLAT_DATA=y +CONFIG_IPMI_PANIC_EVENT=y +CONFIG_IPMI_PANIC_STRING=y +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +# CONFIG_IPMI_IPMB is not set +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +# CONFIG_SSIF_IPMI_BMC is not set +# CONFIG_IPMB_DEVICE_INTERFACE is not set +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +# CONFIG_HW_RANDOM_BA431 is not set +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_HW_RANDOM_HISI=y +CONFIG_HW_RANDOM_HISTB=y +CONFIG_HW_RANDOM_XGENE=m +CONFIG_HW_RANDOM_CAVIUM=m +# CONFIG_HW_RANDOM_CCTRNG is not set +# CONFIG_HW_RANDOM_XIPHERA is not set +CONFIG_HW_RANDOM_ARM_SMCCC_TRNG=y +CONFIG_HW_RANDOM_CN10K=y +# CONFIG_APPLICOM is not set +CONFIG_DEVMEM=y +# CONFIG_DEVPORT is not set +CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=y +CONFIG_TCG_TIS=y +CONFIG_TCG_TIS_SPI=m +# CONFIG_TCG_TIS_SPI_CR50 is not set +# CONFIG_TCG_TIS_I2C is not set +# CONFIG_TCG_TIS_I2C_CR50 is not set +# CONFIG_TCG_TIS_I2C_ATMEL is not set +# CONFIG_TCG_TIS_I2C_INFINEON is not set +# CONFIG_TCG_TIS_I2C_NUVOTON is not set +CONFIG_TCG_ATMEL=m +# CONFIG_TCG_INFINEON is not set +CONFIG_TCG_CRB=y +# CONFIG_TCG_VTPM_PROXY is not set +# CONFIG_TCG_TIS_ST33ZP24_I2C is not set +# CONFIG_TCG_TIS_ST33ZP24_SPI is not set +# CONFIG_XILLYBUS is not set +# CONFIG_XILLYUSB is not set +# end of Character devices + +# +# I2C support +# +CONFIG_I2C=m +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_MUX=m + +# +# Multiplexer I2C Chip support +# +CONFIG_I2C_ARB_GPIO_CHALLENGE=m +CONFIG_I2C_MUX_GPIO=m +# CONFIG_I2C_MUX_GPMUX is not set +# CONFIG_I2C_MUX_LTC4306 is not set +CONFIG_I2C_MUX_PCA9541=m +CONFIG_I2C_MUX_PCA954x=m +CONFIG_I2C_MUX_PINCTRL=m +# CONFIG_I2C_MUX_REG is not set +# CONFIG_I2C_DEMUX_PINCTRL is not set +CONFIG_I2C_MUX_MLXCPLD=m +# end of Multiplexer I2C Chip support + +# CONFIG_I2C_HELPER_AUTO is not set +CONFIG_I2C_SMBUS=m + +# +# I2C Algorithms +# +CONFIG_I2C_ALGOBIT=m +CONFIG_I2C_ALGOPCF=m +CONFIG_I2C_ALGOPCA=m +# end of I2C Algorithms + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_AMD_MP2 is not set +# CONFIG_I2C_HIX5HD2 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_PIIX4 is not set +CONFIG_I2C_NFORCE2=m +# CONFIG_I2C_NVIDIA_GPU is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# ACPI drivers +# +# CONFIG_I2C_SCMI is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CADENCE is not set +# CONFIG_I2C_CBUS_GPIO is not set +CONFIG_I2C_DESIGNWARE_CORE=m +# CONFIG_I2C_DESIGNWARE_SLAVE is not set +CONFIG_I2C_DESIGNWARE_PLATFORM=m +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set +CONFIG_I2C_GPIO=m +CONFIG_I2C_GPIO_FAULT_INJECTOR=y +# CONFIG_I2C_HISI is not set +# CONFIG_I2C_NOMADIK is not set +# CONFIG_I2C_OCORES is not set +CONFIG_I2C_PCA_PLATFORM=m +# CONFIG_I2C_QCOM_CCI is not set +CONFIG_I2C_QUP=m +# CONFIG_I2C_RK3X is not set +CONFIG_I2C_SIMTEC=m +CONFIG_I2C_VERSATILE=m +CONFIG_I2C_THUNDERX=m +# CONFIG_I2C_XILINX is not set +CONFIG_I2C_XLP9XX=m + +# +# External I2C/SMBus adapter drivers +# +CONFIG_I2C_DIOLAN_U2C=m +# CONFIG_I2C_CP2615 is not set +# CONFIG_I2C_PCI1XXXX is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +CONFIG_I2C_TINY_USB=m + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_MLXCPLD is not set +CONFIG_I2C_XGENE_SLIMPRO=m +# CONFIG_I2C_VIRTIO is not set +# end of I2C Hardware Bus support + +CONFIG_I2C_STUB=m +CONFIG_I2C_SLAVE=y +CONFIG_I2C_SLAVE_EEPROM=m +# CONFIG_I2C_SLAVE_TESTUNIT is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + +# CONFIG_I3C is not set +CONFIG_SPI=y +CONFIG_SPI_DEBUG=y +CONFIG_SPI_MASTER=y +# CONFIG_SPI_MEM is not set + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +CONFIG_SPI_CADENCE=m +# CONFIG_SPI_CADENCE_QUADSPI is not set +CONFIG_SPI_DESIGNWARE=m +# CONFIG_SPI_DW_DMA is not set +# CONFIG_SPI_DW_PCI is not set +CONFIG_SPI_DW_MMIO=m +# CONFIG_SPI_HISI_KUNPENG is not set +# CONFIG_SPI_HISI_SFC_V3XX is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_FSL_SPI is not set +# CONFIG_SPI_MICROCHIP_CORE is not set +# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PCI1XXXX is not set +CONFIG_SPI_PL022=m +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_QCOM_QSPI is not set +CONFIG_SPI_QUP=y +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_SIFIVE is not set +# CONFIG_SPI_MXIC is not set +# CONFIG_SPI_THUNDERX is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +CONFIG_SPI_XLP=m +# CONFIG_SPI_ZYNQMP_GQSPI is not set +# CONFIG_SPI_AMD is not set + +# +# SPI Multiplexer support +# +# CONFIG_SPI_MUX is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +CONFIG_SPI_DYNAMIC=y +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_GPIO=m + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +CONFIG_PTP_1588_CLOCK_OPTIONAL=y +CONFIG_DP83640_PHY=m +# CONFIG_PTP_1588_CLOCK_INES is not set +CONFIG_PTP_1588_CLOCK_KVM=y +# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set +# CONFIG_PTP_1588_CLOCK_IDTCM is not set +# CONFIG_PTP_1588_CLOCK_MOCK is not set +# CONFIG_PTP_1588_CLOCK_OCP is not set +# end of PTP clock support + +CONFIG_PINCTRL=y +CONFIG_PINMUX=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_CY8C95X0 is not set +# CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_MICROCHIP_SGPIO is not set +# CONFIG_PINCTRL_OCELOT is not set +# CONFIG_PINCTRL_SINGLE is not set +# CONFIG_PINCTRL_STMFX is not set +CONFIG_PINCTRL_MSM=y +# CONFIG_PINCTRL_IPQ5018 is not set +# CONFIG_PINCTRL_IPQ5332 is not set +# CONFIG_PINCTRL_IPQ8074 is not set +# CONFIG_PINCTRL_IPQ6018 is not set +# CONFIG_PINCTRL_IPQ9574 is not set +# CONFIG_PINCTRL_MDM9607 is not set +# CONFIG_PINCTRL_MSM8916 is not set +# CONFIG_PINCTRL_MSM8953 is not set +# CONFIG_PINCTRL_MSM8976 is not set +# CONFIG_PINCTRL_MSM8994 is not set +# CONFIG_PINCTRL_MSM8996 is not set +# CONFIG_PINCTRL_MSM8998 is not set +# CONFIG_PINCTRL_QCM2290 is not set +# CONFIG_PINCTRL_QCS404 is not set +CONFIG_PINCTRL_QDF2XXX=y +# CONFIG_PINCTRL_QDU1000 is not set +# CONFIG_PINCTRL_SA8775P is not set +# CONFIG_PINCTRL_SC7180 is not set +# CONFIG_PINCTRL_SC7280 is not set +# CONFIG_PINCTRL_SC8180X is not set +# CONFIG_PINCTRL_SC8280XP is not set +# CONFIG_PINCTRL_SDM660 is not set +# CONFIG_PINCTRL_SDM670 is not set +# CONFIG_PINCTRL_SDM845 is not set +# CONFIG_PINCTRL_SDX75 is not set +# CONFIG_PINCTRL_SM6115 is not set +# CONFIG_PINCTRL_SM6125 is not set +# CONFIG_PINCTRL_SM6350 is not set +# CONFIG_PINCTRL_SM6375 is not set +# CONFIG_PINCTRL_SM7150 is not set +# CONFIG_PINCTRL_SM8150 is not set +# CONFIG_PINCTRL_SM8250 is not set +# CONFIG_PINCTRL_SM8350 is not set +# CONFIG_PINCTRL_SM8450 is not set +# CONFIG_PINCTRL_SM8550 is not set +# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set +# CONFIG_PINCTRL_LPASS_LPI is not set + +# +# Renesas pinctrl drivers +# +# end of Renesas pinctrl drivers + +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_OF_GPIO=y +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_CDEV=y +CONFIG_GPIO_CDEV_V1=y +CONFIG_GPIO_GENERIC=m + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_74XX_MMIO is not set +# CONFIG_GPIO_ALTERA is not set +CONFIG_GPIO_AMDPT=m +# CONFIG_GPIO_CADENCE is not set +CONFIG_GPIO_DWAPB=m +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_FTGPIO010 is not set +CONFIG_GPIO_GENERIC_PLATFORM=m +# CONFIG_GPIO_GRGPIO is not set +# CONFIG_GPIO_HISI is not set +# CONFIG_GPIO_HLWD is not set +# CONFIG_GPIO_LOGICVC is not set +# CONFIG_GPIO_MB86S7X is not set +CONFIG_GPIO_PL061=y +# CONFIG_GPIO_SIFIVE is not set +# CONFIG_GPIO_SYSCON is not set +# CONFIG_GPIO_THUNDERX is not set +CONFIG_GPIO_XGENE=y +CONFIG_GPIO_XGENE_SB=m +# CONFIG_GPIO_XILINX is not set +CONFIG_GPIO_XLP=m +# CONFIG_GPIO_AMD_FCH is not set +# end of Memory mapped GPIO drivers + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADNP is not set +# CONFIG_GPIO_FXL6408 is not set +# CONFIG_GPIO_DS4520 is not set +# CONFIG_GPIO_GW_PLD is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCA9570 is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set +# end of I2C GPIO expanders + +# +# MFD GPIO expanders +# +# end of MFD GPIO expanders + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set +# end of PCI GPIO expanders + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_74X164 is not set +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set +# end of SPI GPIO expanders + +# +# USB GPIO expanders +# +# end of USB GPIO expanders + +# +# Virtual GPIO drivers +# +# CONFIG_GPIO_AGGREGATOR is not set +# CONFIG_GPIO_LATCH is not set +# CONFIG_GPIO_MOCKUP is not set +# CONFIG_GPIO_VIRTIO is not set +# CONFIG_GPIO_SIM is not set +# end of Virtual GPIO drivers + +# CONFIG_W1 is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_BRCMSTB is not set +CONFIG_POWER_RESET_GPIO=y +CONFIG_POWER_RESET_GPIO_RESTART=y +CONFIG_POWER_RESET_HISI=y +# CONFIG_POWER_RESET_MSM is not set +# CONFIG_POWER_RESET_LTC2952 is not set +# CONFIG_POWER_RESET_REGULATOR is not set +CONFIG_POWER_RESET_RESTART=y +# CONFIG_POWER_RESET_VEXPRESS is not set +# CONFIG_POWER_RESET_XGENE is not set +CONFIG_POWER_RESET_SYSCON=y +# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set +# CONFIG_SYSCON_REBOOT_MODE is not set +# CONFIG_NVMEM_REBOOT_MODE is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_POWER_SUPPLY_HWMON=y +# CONFIG_IP5XXX_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_CW2015 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SAMSUNG_SDI is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_MANAGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_MANAGER is not set +# CONFIG_CHARGER_LT3651 is not set +# CONFIG_CHARGER_LTC4162L is not set +# CONFIG_CHARGER_DETECTOR_MAX14656 is not set +# CONFIG_CHARGER_MAX77976 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24190 is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ2515X is not set +# CONFIG_CHARGER_BQ25890 is not set +# CONFIG_CHARGER_BQ25980 is not set +# CONFIG_CHARGER_BQ256XX is not set +CONFIG_CHARGER_SMB347=m +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_BATTERY_GOLDFISH is not set +# CONFIG_BATTERY_RT5033 is not set +# CONFIG_CHARGER_RT9455 is not set +# CONFIG_CHARGER_RT9467 is not set +# CONFIG_CHARGER_RT9471 is not set +# CONFIG_CHARGER_UCS1002 is not set +# CONFIG_CHARGER_BD99954 is not set +# CONFIG_BATTERY_UG3105 is not set +CONFIG_HWMON=y +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +CONFIG_SENSORS_AD7314=m +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM1177 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7310 is not set +# CONFIG_SENSORS_ADT7410 is not set +# CONFIG_SENSORS_ADT7411 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +# CONFIG_SENSORS_ADT7475 is not set +# CONFIG_SENSORS_AHT10 is not set +# CONFIG_SENSORS_AQUACOMPUTER_D5NEXT is not set +# CONFIG_SENSORS_AS370 is not set +# CONFIG_SENSORS_ASC7621 is not set +# CONFIG_SENSORS_AXI_FAN_CONTROL is not set +CONFIG_SENSORS_ARM_SCPI=m +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_CORSAIR_CPRO is not set +# CONFIG_SENSORS_CORSAIR_PSU is not set +# CONFIG_SENSORS_DRIVETEMP is not set +# CONFIG_SENSORS_DS620 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_I5K_AMB is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_FTSTEUTATES is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_G760A is not set +CONFIG_SENSORS_G762=m +# CONFIG_SENSORS_GPIO_FAN is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_HS3001 is not set +# CONFIG_SENSORS_IBMAEM is not set +# CONFIG_SENSORS_IBMPEX is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_JC42 is not set +CONFIG_SENSORS_POWR1220=m +# CONFIG_SENSORS_LINEAGE is not set +CONFIG_SENSORS_LTC2945=m +# CONFIG_SENSORS_LTC2947_I2C is not set +# CONFIG_SENSORS_LTC2947_SPI is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC2992 is not set +# CONFIG_SENSORS_LTC4151 is not set +# CONFIG_SENSORS_LTC4215 is not set +CONFIG_SENSORS_LTC4222=m +# CONFIG_SENSORS_LTC4245 is not set +CONFIG_SENSORS_LTC4260=m +# CONFIG_SENSORS_LTC4261 is not set +CONFIG_SENSORS_MAX1111=m +# CONFIG_SENSORS_MAX127 is not set +# CONFIG_SENSORS_MAX16065 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX1668 is not set +# CONFIG_SENSORS_MAX197 is not set +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX31730 is not set +# CONFIG_SENSORS_MAX31760 is not set +# CONFIG_MAX31827 is not set +# CONFIG_SENSORS_MAX6620 is not set +# CONFIG_SENSORS_MAX6621 is not set +# CONFIG_SENSORS_MAX6639 is not set +# CONFIG_SENSORS_MAX6642 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_MAX6697 is not set +CONFIG_SENSORS_MAX31790=m +# CONFIG_SENSORS_MC34VR500 is not set +# CONFIG_SENSORS_MCP3021 is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_TPS23861 is not set +# CONFIG_SENSORS_MR75203 is not set +CONFIG_SENSORS_ADCXX=m +# CONFIG_SENSORS_LM63 is not set +CONFIG_SENSORS_LM70=m +# CONFIG_SENSORS_LM73 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +# CONFIG_SENSORS_LM95234 is not set +# CONFIG_SENSORS_LM95241 is not set +# CONFIG_SENSORS_LM95245 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +CONFIG_SENSORS_NCT6683=m +# CONFIG_SENSORS_NCT6775 is not set +# CONFIG_SENSORS_NCT6775_I2C is not set +CONFIG_SENSORS_NCT7802=m +CONFIG_SENSORS_NCT7904=m +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_NZXT_KRAKEN2 is not set +# CONFIG_SENSORS_NZXT_SMART2 is not set +# CONFIG_SENSORS_OCC_P8_I2C is not set +# CONFIG_SENSORS_PCF8591 is not set +CONFIG_PMBUS=m +# CONFIG_SENSORS_PMBUS is not set +# CONFIG_SENSORS_ACBEL_FSG032 is not set +# CONFIG_SENSORS_ADM1266 is not set +# CONFIG_SENSORS_ADM1275 is not set +# CONFIG_SENSORS_BEL_PFE is not set +# CONFIG_SENSORS_BPA_RS600 is not set +# CONFIG_SENSORS_DELTA_AHE50DC_FAN is not set +# CONFIG_SENSORS_FSP_3Y is not set +# CONFIG_SENSORS_IBM_CFFPS is not set +# CONFIG_SENSORS_DPS920AB is not set +# CONFIG_SENSORS_INSPUR_IPSPS is not set +# CONFIG_SENSORS_IR35221 is not set +# CONFIG_SENSORS_IR36021 is not set +# CONFIG_SENSORS_IR38064 is not set +# CONFIG_SENSORS_IRPS5401 is not set +# CONFIG_SENSORS_ISL68137 is not set +# CONFIG_SENSORS_LM25066 is not set +# CONFIG_SENSORS_LT7182S is not set +# CONFIG_SENSORS_LTC2978 is not set +CONFIG_SENSORS_LTC3815=m +# CONFIG_SENSORS_MAX15301 is not set +# CONFIG_SENSORS_MAX16064 is not set +# CONFIG_SENSORS_MAX16601 is not set +# CONFIG_SENSORS_MAX20730 is not set +CONFIG_SENSORS_MAX20751=m +# CONFIG_SENSORS_MAX31785 is not set +# CONFIG_SENSORS_MAX34440 is not set +# CONFIG_SENSORS_MAX8688 is not set +# CONFIG_SENSORS_MP2888 is not set +# CONFIG_SENSORS_MP2975 is not set +# CONFIG_SENSORS_MP5023 is not set +# CONFIG_SENSORS_MPQ7932 is not set +# CONFIG_SENSORS_PIM4328 is not set +# CONFIG_SENSORS_PLI1209BC is not set +# CONFIG_SENSORS_PM6764TR is not set +# CONFIG_SENSORS_PXE1610 is not set +# CONFIG_SENSORS_Q54SJ108A2 is not set +# CONFIG_SENSORS_STPDDC60 is not set +# CONFIG_SENSORS_TDA38640 is not set +CONFIG_SENSORS_TPS40422=m +# CONFIG_SENSORS_TPS53679 is not set +# CONFIG_SENSORS_TPS546D24 is not set +# CONFIG_SENSORS_UCD9000 is not set +# CONFIG_SENSORS_UCD9200 is not set +# CONFIG_SENSORS_XDPE152 is not set +# CONFIG_SENSORS_XDPE122 is not set +# CONFIG_SENSORS_ZL6100 is not set +CONFIG_SENSORS_PWM_FAN=m +# CONFIG_SENSORS_SBTSI is not set +# CONFIG_SENSORS_SBRMI is not set +# CONFIG_SENSORS_SHT15 is not set +# CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHT4x is not set +CONFIG_SENSORS_SHTC1=m +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_EMC1403 is not set +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC2305 is not set +# CONFIG_SENSORS_EMC6W201 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_SCH5627 is not set +# CONFIG_SENSORS_SCH5636 is not set +# CONFIG_SENSORS_STTS751 is not set +CONFIG_SENSORS_ADC128D818=m +# CONFIG_SENSORS_ADS7828 is not set +CONFIG_SENSORS_ADS7871=m +# CONFIG_SENSORS_AMC6821 is not set +# CONFIG_SENSORS_INA209 is not set +# CONFIG_SENSORS_INA2XX is not set +# CONFIG_SENSORS_INA238 is not set +# CONFIG_SENSORS_INA3221 is not set +CONFIG_SENSORS_TC74=m +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP102 is not set +CONFIG_SENSORS_TMP103=m +# CONFIG_SENSORS_TMP108 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set +# CONFIG_SENSORS_TMP464 is not set +# CONFIG_SENSORS_TMP513 is not set +CONFIG_SENSORS_VEXPRESS=m +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_VT8231 is not set +# CONFIG_SENSORS_W83773G is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83795 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set +CONFIG_SENSORS_XGENE=m + +# +# ACPI drivers +# +CONFIG_SENSORS_ACPI_POWER=y +CONFIG_THERMAL=y +# CONFIG_THERMAL_NETLINK is not set +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_OF=y +# CONFIG_THERMAL_WRITABLE_TRIPS is not set +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +# CONFIG_THERMAL_GOV_BANG_BANG is not set +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_CPU_THERMAL=y +CONFIG_CPU_FREQ_THERMAL=y +# CONFIG_THERMAL_EMULATION is not set +# CONFIG_THERMAL_MMIO is not set +CONFIG_HISI_THERMAL=m + +# +# Qualcomm thermal drivers +# +# CONFIG_QCOM_LMH is not set +# end of Qualcomm thermal drivers + +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +CONFIG_WATCHDOG_OPEN_TIMEOUT=0 +CONFIG_WATCHDOG_SYSFS=y +# CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT is not set + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set + +# +# Watchdog Device Drivers +# +CONFIG_SOFT_WATCHDOG=m +CONFIG_GPIO_WATCHDOG=m +# CONFIG_WDAT_WDT is not set +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_XILINX_WINDOW_WATCHDOG is not set +# CONFIG_ZIIRAVE_WATCHDOG is not set +CONFIG_ARM_SP805_WATCHDOG=m +CONFIG_ARM_SBSA_WATCHDOG=m +# CONFIG_CADENCE_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set +# CONFIG_QCOM_WDT is not set +# CONFIG_ARM_SMC_WATCHDOG is not set +CONFIG_ALIM7101_WDT=m +CONFIG_I6300ESB_WDT=m +# CONFIG_HP_WATCHDOG is not set +CONFIG_MARVELL_GTI_WDT=y +# CONFIG_MEN_A21_WDT is not set + +# +# PCI-based Watchdog Cards +# +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m + +# +# USB-based Watchdog Cards +# +CONFIG_USBPCWATCHDOG=m +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +CONFIG_BCMA=m +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +CONFIG_BCMA_DRIVER_PCI=y +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +# CONFIG_BCMA_DEBUG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=m +# CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_SMPRO is not set +# CONFIG_MFD_ATMEL_FLEXCOM is not set +# CONFIG_MFD_ATMEL_HLCDC is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_CS42L43_I2C is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_MFD_MAX5970 is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_GATEWORKS_GSC is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_MP2629 is not set +# CONFIG_MFD_HI6421_PMIC is not set +# CONFIG_MFD_HI655X_PMIC is not set +# CONFIG_LPC_ICH is not set +# CONFIG_LPC_SCH is not set +# CONFIG_MFD_IQS62X is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77650 is not set +# CONFIG_MFD_MAX77686 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77714 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MT6360 is not set +# CONFIG_MFD_MT6370 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_MFD_OCELOT is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_CPCAP is not set +# CONFIG_MFD_VIPERBOARD is not set +# CONFIG_MFD_NTXEC is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_QCOM_RPM is not set +# CONFIG_MFD_SY7636A is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT4831 is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RT5120 is not set +# CONFIG_MFD_RK8XX_I2C is not set +# CONFIG_MFD_RK8XX_SPI is not set +# CONFIG_MFD_RN5T618 is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_STMPE is not set +CONFIG_MFD_SYSCON=y +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TI_LP87565 is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS65219 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS6594_I2C is not set +# CONFIG_MFD_TPS6594_SPI is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TQMX86 is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_STMFX is not set +# CONFIG_MFD_ATC260X_I2C is not set +# CONFIG_MFD_QCOM_PM8008 is not set +# CONFIG_MFD_VEXPRESS_SYSREG is not set +# CONFIG_MFD_INTEL_M10_BMC_SPI is not set +# CONFIG_MFD_RSMU_I2C is not set +# CONFIG_MFD_RSMU_SPI is not set +# end of Multifunction device drivers + +CONFIG_REGULATOR=y +# CONFIG_REGULATOR_DEBUG is not set +# CONFIG_REGULATOR_FIXED_VOLTAGE is not set +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set +# CONFIG_REGULATOR_88PG86X is not set +# CONFIG_REGULATOR_ACT8865 is not set +# CONFIG_REGULATOR_AD5398 is not set +# CONFIG_REGULATOR_AW37503 is not set +# CONFIG_REGULATOR_DA9121 is not set +# CONFIG_REGULATOR_DA9210 is not set +# CONFIG_REGULATOR_DA9211 is not set +# CONFIG_REGULATOR_FAN53555 is not set +# CONFIG_REGULATOR_FAN53880 is not set +# CONFIG_REGULATOR_GPIO is not set +# CONFIG_REGULATOR_ISL9305 is not set +# CONFIG_REGULATOR_ISL6271A is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_LP3972 is not set +# CONFIG_REGULATOR_LP872X is not set +# CONFIG_REGULATOR_LP8755 is not set +# CONFIG_REGULATOR_LTC3589 is not set +# CONFIG_REGULATOR_LTC3676 is not set +# CONFIG_REGULATOR_MAX1586 is not set +# CONFIG_REGULATOR_MAX77857 is not set +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_MAX8893 is not set +# CONFIG_REGULATOR_MAX8952 is not set +# CONFIG_REGULATOR_MAX8973 is not set +# CONFIG_REGULATOR_MAX20086 is not set +# CONFIG_REGULATOR_MAX20411 is not set +# CONFIG_REGULATOR_MAX77826 is not set +# CONFIG_REGULATOR_MCP16502 is not set +# CONFIG_REGULATOR_MP5416 is not set +# CONFIG_REGULATOR_MP8859 is not set +# CONFIG_REGULATOR_MP886X is not set +# CONFIG_REGULATOR_MPQ7920 is not set +# CONFIG_REGULATOR_MT6311 is not set +# CONFIG_REGULATOR_PCA9450 is not set +# CONFIG_REGULATOR_PF8X00 is not set +# CONFIG_REGULATOR_PFUZE100 is not set +# CONFIG_REGULATOR_PV88060 is not set +# CONFIG_REGULATOR_PV88080 is not set +# CONFIG_REGULATOR_PV88090 is not set +# CONFIG_REGULATOR_PWM is not set +# CONFIG_REGULATOR_QCOM_REFGEN is not set +# CONFIG_REGULATOR_RAA215300 is not set +# CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY is not set +# CONFIG_REGULATOR_RT4801 is not set +# CONFIG_REGULATOR_RT4803 is not set +# CONFIG_REGULATOR_RT5190A is not set +# CONFIG_REGULATOR_RT5739 is not set +# CONFIG_REGULATOR_RT5759 is not set +# CONFIG_REGULATOR_RT6160 is not set +# CONFIG_REGULATOR_RT6190 is not set +# CONFIG_REGULATOR_RT6245 is not set +# CONFIG_REGULATOR_RTQ2134 is not set +# CONFIG_REGULATOR_RTMV20 is not set +# CONFIG_REGULATOR_RTQ6752 is not set +# CONFIG_REGULATOR_RTQ2208 is not set +# CONFIG_REGULATOR_SLG51000 is not set +# CONFIG_REGULATOR_SY8106A is not set +# CONFIG_REGULATOR_SY8824X is not set +# CONFIG_REGULATOR_SY8827N is not set +# CONFIG_REGULATOR_TPS51632 is not set +# CONFIG_REGULATOR_TPS62360 is not set +# CONFIG_REGULATOR_TPS6286X is not set +# CONFIG_REGULATOR_TPS6287X is not set +# CONFIG_REGULATOR_TPS65023 is not set +# CONFIG_REGULATOR_TPS6507X is not set +# CONFIG_REGULATOR_TPS65132 is not set +# CONFIG_REGULATOR_TPS6524X is not set +# CONFIG_REGULATOR_VCTRL is not set +# CONFIG_REGULATOR_VEXPRESS is not set +# CONFIG_REGULATOR_VQMMC_IPQ4019 is not set +# CONFIG_RC_CORE is not set +CONFIG_CEC_CORE=m + +# +# CEC support +# +# CONFIG_MEDIA_CEC_SUPPORT is not set +# end of CEC support + +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +CONFIG_APERTURE_HELPERS=y +CONFIG_VIDEO_CMDLINE=y +CONFIG_VIDEO_NOMODESET=y +# CONFIG_AUXDISPLAY is not set +CONFIG_DRM=m +CONFIG_DRM_KMS_HELPER=m +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DISPLAY_HELPER=m +CONFIG_DRM_DISPLAY_DP_HELPER=y +CONFIG_DRM_DISPLAY_HDCP_HELPER=y +CONFIG_DRM_DISPLAY_HDMI_HELPER=y +CONFIG_DRM_DP_AUX_CHARDEV=y +CONFIG_DRM_DP_CEC=y +CONFIG_DRM_TTM=m +CONFIG_DRM_EXEC=m +CONFIG_DRM_BUDDY=m +CONFIG_DRM_VRAM_HELPER=m +CONFIG_DRM_TTM_HELPER=m +CONFIG_DRM_GEM_SHMEM_HELPER=m +CONFIG_DRM_SUBALLOC_HELPER=m +CONFIG_DRM_SCHED=m + +# +# I2C encoder or helper chips +# +CONFIG_DRM_I2C_CH7006=m +# CONFIG_DRM_I2C_SIL164 is not set +CONFIG_DRM_I2C_NXP_TDA998X=m +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# end of I2C encoder or helper chips + +# +# ARM devices +# +# CONFIG_DRM_HDLCD is not set +# CONFIG_DRM_MALI_DISPLAY is not set +# CONFIG_DRM_KOMEDA is not set +# end of ARM devices + +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_USERPTR=y +CONFIG_DRM_AMDGPU=m +# CONFIG_DRM_AMDGPU_SI is not set +CONFIG_DRM_AMDGPU_CIK=y +CONFIG_DRM_AMDGPU_USERPTR=y + +# +# ACP (Audio CoProcessor) Configuration +# +CONFIG_DRM_AMD_ACP=y +# end of ACP (Audio CoProcessor) Configuration + +# +# Display Engine Configuration +# +CONFIG_DRM_AMD_DC=y +CONFIG_DRM_AMD_DC_FP=y +# CONFIG_DEBUG_KERNEL_DC is not set +# CONFIG_DRM_AMD_SECURE_DISPLAY is not set +# end of Display Engine Configuration + +CONFIG_HSA_AMD=y +CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +CONFIG_NOUVEAU_DEBUG_MMU=y +# CONFIG_NOUVEAU_DEBUG_PUSH is not set +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +# CONFIG_DRM_VGEM is not set +CONFIG_DRM_VKMS=m +# CONFIG_DRM_VMWGFX is not set +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=m +CONFIG_DRM_MGAG200=m +CONFIG_DRM_QXL=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_VIRTIO_GPU_KMS=y +# CONFIG_DRM_MSM is not set +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_ABT_Y030XX067A is not set +# CONFIG_DRM_PANEL_ARM_VERSATILE is not set +# CONFIG_DRM_PANEL_AUO_A030JTN01 is not set +# CONFIG_DRM_PANEL_LVDS is not set +# CONFIG_DRM_PANEL_SIMPLE is not set +# CONFIG_DRM_PANEL_EDP is not set +# CONFIG_DRM_PANEL_ILITEK_IL9322 is not set +# CONFIG_DRM_PANEL_ILITEK_ILI9341 is not set +# CONFIG_DRM_PANEL_INNOLUX_EJ030NA is not set +# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set +# CONFIG_DRM_PANEL_LG_LB035Q02 is not set +# CONFIG_DRM_PANEL_LG_LG4573 is not set +# CONFIG_DRM_PANEL_NEC_NL8048HL11 is not set +# CONFIG_DRM_PANEL_NEWVISION_NV3052C is not set +# CONFIG_DRM_PANEL_NOVATEK_NT39016 is not set +# CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO is not set +# CONFIG_DRM_PANEL_ORISETECH_OTA5601A is not set +# CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20 is not set +# CONFIG_DRM_PANEL_SAMSUNG_DB7430 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6D27A1 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set +# CONFIG_DRM_PANEL_SEIKO_43WVF1G is not set +# CONFIG_DRM_PANEL_SHARP_LS037V7DW01 is not set +# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set +# CONFIG_DRM_PANEL_SONY_ACX565AKM is not set +# CONFIG_DRM_PANEL_TPO_TD028TTEC1 is not set +# CONFIG_DRM_PANEL_TPO_TD043MTEA1 is not set +# CONFIG_DRM_PANEL_TPO_TPG110 is not set +# CONFIG_DRM_PANEL_WIDECHIPS_WS2401 is not set +# end of Display Panels + +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_CHIPONE_ICN6211 is not set +# CONFIG_DRM_CHRONTEL_CH7033 is not set +# CONFIG_DRM_DISPLAY_CONNECTOR is not set +# CONFIG_DRM_ITE_IT6505 is not set +# CONFIG_DRM_LONTIUM_LT8912B is not set +# CONFIG_DRM_LONTIUM_LT9211 is not set +# CONFIG_DRM_LONTIUM_LT9611 is not set +# CONFIG_DRM_LONTIUM_LT9611UXC is not set +# CONFIG_DRM_ITE_IT66121 is not set +# CONFIG_DRM_LVDS_CODEC is not set +# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set +# CONFIG_DRM_NWL_MIPI_DSI is not set +# CONFIG_DRM_NXP_PTN3460 is not set +# CONFIG_DRM_PARADE_PS8622 is not set +# CONFIG_DRM_PARADE_PS8640 is not set +# CONFIG_DRM_SAMSUNG_DSIM is not set +# CONFIG_DRM_SIL_SII8620 is not set +# CONFIG_DRM_SII902X is not set +# CONFIG_DRM_SII9234 is not set +# CONFIG_DRM_SIMPLE_BRIDGE is not set +# CONFIG_DRM_THINE_THC63LVD1024 is not set +# CONFIG_DRM_TOSHIBA_TC358762 is not set +# CONFIG_DRM_TOSHIBA_TC358764 is not set +# CONFIG_DRM_TOSHIBA_TC358767 is not set +# CONFIG_DRM_TOSHIBA_TC358768 is not set +# CONFIG_DRM_TOSHIBA_TC358775 is not set +# CONFIG_DRM_TI_DLPC3433 is not set +# CONFIG_DRM_TI_TFP410 is not set +# CONFIG_DRM_TI_SN65DSI83 is not set +# CONFIG_DRM_TI_SN65DSI86 is not set +# CONFIG_DRM_TI_TPD12S015 is not set +# CONFIG_DRM_ANALOGIX_ANX6345 is not set +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# CONFIG_DRM_ANALOGIX_ANX7625 is not set +# CONFIG_DRM_I2C_ADV7511 is not set +# CONFIG_DRM_CDNS_DSI is not set +# CONFIG_DRM_CDNS_MHDP8546 is not set +# end of Display Interface Bridges + +# CONFIG_DRM_LOONGSON is not set +# CONFIG_DRM_ETNAVIV is not set +CONFIG_DRM_HISI_HIBMC=m +# CONFIG_DRM_HISI_KIRIN is not set +# CONFIG_DRM_LOGICVC is not set +# CONFIG_DRM_ARCPGU is not set +CONFIG_DRM_BOCHS=m +CONFIG_DRM_CIRRUS_QEMU=m +# CONFIG_DRM_GM12U320 is not set +# CONFIG_DRM_PANEL_MIPI_DBI is not set +# CONFIG_DRM_SIMPLEDRM is not set +# CONFIG_TINYDRM_HX8357D is not set +# CONFIG_TINYDRM_ILI9163 is not set +# CONFIG_TINYDRM_ILI9225 is not set +# CONFIG_TINYDRM_ILI9341 is not set +# CONFIG_TINYDRM_ILI9486 is not set +# CONFIG_TINYDRM_MI0283QT is not set +# CONFIG_TINYDRM_REPAPER is not set +# CONFIG_TINYDRM_ST7586 is not set +# CONFIG_TINYDRM_ST7735R is not set +# CONFIG_DRM_PL111 is not set +# CONFIG_DRM_LIMA is not set +# CONFIG_DRM_PANFROST is not set +# CONFIG_DRM_TIDSS is not set +# CONFIG_DRM_GUD is not set +# CONFIG_DRM_SSD130X is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y + +# +# Frame buffer Devices +# +CONFIG_FB=y +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_ARMCLCD is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_UVESA is not set +CONFIG_FB_EFI=y +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +CONFIG_FB_SIMPLE=y +CONFIG_FB_SSD1307=m +# CONFIG_FB_SM712 is not set +CONFIG_FB_CORE=y +CONFIG_FB_NOTIFY=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_DEVICE=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_IOMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y +CONFIG_FB_BACKLIGHT=m +# CONFIG_FB_MODE_HELPERS is not set +CONFIG_FB_TILEBLITTING=y +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# +CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +CONFIG_LCD_PLATFORM=m +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_KTD253 is not set +# CONFIG_BACKLIGHT_KTZ8866 is not set +CONFIG_BACKLIGHT_PWM=m +# CONFIG_BACKLIGHT_QCOM_WLED is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3630A is not set +# CONFIG_BACKLIGHT_LM3639 is not set +CONFIG_BACKLIGHT_LP855X=m +CONFIG_BACKLIGHT_GPIO=m +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# CONFIG_BACKLIGHT_LED is not set +# end of Backlight & LCD device support + +CONFIG_HDMI=y + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION is not set +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +# end of Console display driver support + +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +# end of Graphics support + +# CONFIG_DRM_ACCEL is not set +CONFIG_SOUND=m +# CONFIG_SND is not set +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=m +# CONFIG_HID_ACCUTOUCH is not set +CONFIG_HID_ACRUX=m +# CONFIG_HID_ACRUX_FF is not set +CONFIG_HID_APPLE=m +CONFIG_HID_APPLEIR=m +# CONFIG_HID_ASUS is not set +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=m +CONFIG_HID_BETOP_FF=m +# CONFIG_HID_BIGBEN_FF is not set +CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CORSAIR=m +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_MACALLY is not set +# CONFIG_HID_CMEDIA is not set +# CONFIG_HID_CP2112 is not set +# CONFIG_HID_CREATIVE_SB0540 is not set +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +# CONFIG_DRAGONRISE_FF is not set +# CONFIG_HID_EMS_FF is not set +CONFIG_HID_ELAN=m +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +# CONFIG_HID_EVISION is not set +CONFIG_HID_EZKEY=m +# CONFIG_HID_FT260 is not set +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +# CONFIG_HID_GLORIOUS is not set +CONFIG_HID_HOLTEK=m +# CONFIG_HOLTEK_FF is not set +# CONFIG_HID_GOOGLE_STADIA_FF is not set +# CONFIG_HID_VIVALDI is not set +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +# CONFIG_HID_VIEWSONIC is not set +# CONFIG_HID_VRC2 is not set +# CONFIG_HID_XIAOMI is not set +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=m +CONFIG_HID_JABRA=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LCPOWER=m +CONFIG_HID_LED=m +CONFIG_HID_LENOVO=m +# CONFIG_HID_LETSKETCH is not set +CONFIG_HID_LOGITECH=m +CONFIG_HID_LOGITECH_DJ=m +CONFIG_HID_LOGITECH_HIDPP=m +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +# CONFIG_LOGIWHEELS_FF is not set +CONFIG_HID_MAGICMOUSE=y +# CONFIG_HID_MALTRON is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_MEGAWORLD_FF is not set +# CONFIG_HID_REDRAGON is not set +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m +CONFIG_HID_MULTITOUCH=m +# CONFIG_HID_NINTENDO is not set +CONFIG_HID_NTI=m +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +# CONFIG_PANTHERLORD_FF is not set +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PICOLCD_FB=y +CONFIG_HID_PICOLCD_BACKLIGHT=y +CONFIG_HID_PICOLCD_LCD=y +CONFIG_HID_PICOLCD_LEDS=y +CONFIG_HID_PLANTRONICS=m +# CONFIG_HID_PXRC is not set +# CONFIG_HID_RAZER is not set +CONFIG_HID_PRIMAX=m +# CONFIG_HID_RETRODE is not set +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +# CONFIG_HID_SEMITEK is not set +# CONFIG_HID_SIGMAMICRO is not set +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +# CONFIG_HID_STEAM is not set +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +# CONFIG_GREENASIA_FF is not set +CONFIG_HID_SMARTJOYPLUS=m +# CONFIG_SMARTJOYPLUS_FF is not set +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +# CONFIG_HID_TOPRE is not set +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +# CONFIG_THRUSTMASTER_FF is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_U2FZERO is not set +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +# CONFIG_ZEROPLUS_FF is not set +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=m +# CONFIG_HID_SENSOR_CUSTOM_SENSOR is not set +# CONFIG_HID_ALPS is not set +# CONFIG_HID_MCP2221 is not set +# end of Special HID drivers + +# +# HID-BPF support +# +# CONFIG_HID_BPF is not set +# end of HID-BPF support + +# +# USB HID support +# +CONFIG_USB_HID=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +# end of USB HID support + +CONFIG_I2C_HID=m +# CONFIG_I2C_HID_ACPI is not set +# CONFIG_I2C_HID_OF is not set +# CONFIG_I2C_HID_OF_ELAN is not set +# CONFIG_I2C_HID_OF_GOODIX is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_LED_TRIG=y +CONFIG_USB_ULPI_BUS=m +# CONFIG_USB_CONN_GPIO is not set +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_FEW_INIT_RETRIES is not set +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_PRODUCTLIST is not set +CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_AUTOSUSPEND_DELAY=2 +CONFIG_USB_MON=y + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +# CONFIG_USB_XHCI_DBGCAP is not set +CONFIG_USB_XHCI_PCI=y +# CONFIG_USB_XHCI_PCI_RENESAS is not set +CONFIG_USB_XHCI_PLATFORM=y +# CONFIG_USB_XHCI_HISTB is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +# CONFIG_USB_EHCI_FSL is not set +CONFIG_USB_EHCI_HCD_PLATFORM=m +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +CONFIG_USB_UHCI_HCD=y +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_BCMA is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m +CONFIG_USB_TMC=m + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m + +# +# USB Imaging devices +# +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +# CONFIG_USBIP_CORE is not set + +# +# USB dual-mode controller drivers +# +# CONFIG_USB_CDNS_SUPPORT is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +CONFIG_USB_SERIAL=y +# CONFIG_USB_SERIAL_CONSOLE is not set +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_SIMPLE=m +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +# CONFIG_USB_SERIAL_F81232 is not set +CONFIG_USB_SERIAL_F8153X=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +# CONFIG_USB_SERIAL_METRO is not set +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MXUPORT=m +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_WWAN=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +# CONFIG_USB_SERIAL_WISHBONE is not set +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +CONFIG_USB_SERIAL_UPD78F0730=m +# CONFIG_USB_SERIAL_XR is not set +CONFIG_USB_SERIAL_DEBUG=m + +# +# USB Miscellaneous drivers +# +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +CONFIG_USB_IDMOUSE=m +CONFIG_USB_APPLEDISPLAY=m +# CONFIG_USB_QCOM_EUD is not set +# CONFIG_APPLE_MFI_FASTCHARGE is not set +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_LD=m +# CONFIG_USB_TRANCEVIBRATOR is not set +CONFIG_USB_IOWARRIOR=m +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +CONFIG_USB_ISIGHTFW=m +# CONFIG_USB_YUREX is not set +CONFIG_USB_EZUSB_FX2=m +# CONFIG_USB_HUB_USB251XB is not set +CONFIG_USB_HSIC_USB3503=m +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +CONFIG_USB_CHAOSKEY=m +# CONFIG_USB_ONBOARD_HUB is not set +CONFIG_USB_ATM=m +# CONFIG_USB_SPEEDTOUCH is not set +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +# CONFIG_USB_ULPI is not set +# end of USB Physical Layer drivers + +# CONFIG_USB_GADGET is not set +CONFIG_TYPEC=y +CONFIG_TYPEC_TCPM=y +# CONFIG_TYPEC_TCPCI is not set +# CONFIG_TYPEC_FUSB302 is not set +# CONFIG_TYPEC_QCOM_PMIC is not set +CONFIG_TYPEC_UCSI=y +# CONFIG_UCSI_CCG is not set +CONFIG_UCSI_ACPI=y +# CONFIG_UCSI_STM32G0 is not set +CONFIG_TYPEC_TPS6598X=m +# CONFIG_TYPEC_ANX7411 is not set +# CONFIG_TYPEC_RT1719 is not set +# CONFIG_TYPEC_HD3SS3220 is not set +# CONFIG_TYPEC_STUSB160X is not set +# CONFIG_TYPEC_WUSB3801 is not set + +# +# USB Type-C Multiplexer/DeMultiplexer Switch support +# +# CONFIG_TYPEC_MUX_FSA4480 is not set +# CONFIG_TYPEC_MUX_GPIO_SBU is not set +CONFIG_TYPEC_MUX_PI3USB30532=m +# CONFIG_TYPEC_MUX_NB7VPQ904M is not set +# end of USB Type-C Multiplexer/DeMultiplexer Switch support + +# +# USB Type-C Alternate Mode drivers +# +CONFIG_TYPEC_DP_ALTMODE=m +# CONFIG_TYPEC_NVIDIA_ALTMODE is not set +# end of USB Type-C Alternate Mode drivers + +CONFIG_USB_ROLE_SWITCH=y +CONFIG_MMC=m +# CONFIG_PWRSEQ_EMMC is not set +# CONFIG_PWRSEQ_SIMPLE is not set +CONFIG_MMC_BLOCK=m +CONFIG_MMC_BLOCK_MINORS=8 +CONFIG_SDIO_UART=m +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_ARMMMCI=m +CONFIG_MMC_STM32_SDMMC=y +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_IO_ACCESSORS=y +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_RICOH_MMC=y +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +# CONFIG_MMC_SDHCI_OF_ARASAN is not set +# CONFIG_MMC_SDHCI_OF_AT91 is not set +# CONFIG_MMC_SDHCI_OF_DWCMSHC is not set +# CONFIG_MMC_SDHCI_CADENCE is not set +# CONFIG_MMC_SDHCI_F_SDH30 is not set +# CONFIG_MMC_SDHCI_MILBEAUT is not set +# CONFIG_MMC_SDHCI_MSM is not set +CONFIG_MMC_TIFM_SD=m +# CONFIG_MMC_SPI is not set +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_DW=m +CONFIG_MMC_DW_PLTFM=m +CONFIG_MMC_DW_BLUEFIELD=m +# CONFIG_MMC_DW_EXYNOS is not set +# CONFIG_MMC_DW_HI3798CV200 is not set +# CONFIG_MMC_DW_K3 is not set +# CONFIG_MMC_DW_PCI is not set +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +# CONFIG_MMC_USDHI6ROL0 is not set +CONFIG_MMC_CQHCI=m +# CONFIG_MMC_HSQ is not set +CONFIG_MMC_TOSHIBA_PCI=m +CONFIG_MMC_MTK=m +# CONFIG_MMC_SDHCI_XENON is not set +# CONFIG_MMC_SDHCI_OMAP is not set +# CONFIG_MMC_SDHCI_AM654 is not set +# CONFIG_SCSI_UFSHCD is not set +CONFIG_MEMSTICK=m +# CONFIG_MEMSTICK_DEBUG is not set + +# +# MemoryStick drivers +# +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set +CONFIG_MSPRO_BLOCK=m +# CONFIG_MS_BLOCK is not set + +# +# MemoryStick Host Controller Drivers +# +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_CLASS_FLASH=m +# CONFIG_LEDS_CLASS_MULTICOLOR is not set +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_AN30259A is not set +# CONFIG_LEDS_AW200XX is not set +# CONFIG_LEDS_AW2013 is not set +# CONFIG_LEDS_BCM6328 is not set +# CONFIG_LEDS_BCM6358 is not set +# CONFIG_LEDS_CR0014114 is not set +# CONFIG_LEDS_EL15203000 is not set +CONFIG_LEDS_LM3530=m +# CONFIG_LEDS_LM3532 is not set +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_LM3692X is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set +CONFIG_LEDS_LP3944=m +# CONFIG_LEDS_LP3952 is not set +# CONFIG_LEDS_LP50XX is not set +# CONFIG_LEDS_LP55XX_COMMON is not set +# CONFIG_LEDS_LP8860 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_PCA995X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_PWM is not set +# CONFIG_LEDS_REGULATOR is not set +# CONFIG_LEDS_BD2606MVV is not set +# CONFIG_LEDS_BD2802 is not set +CONFIG_LEDS_LT3593=m +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set +# CONFIG_LEDS_IS31FL319X is not set +# CONFIG_LEDS_IS31FL32XX is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +CONFIG_LEDS_BLINKM=m +# CONFIG_LEDS_SYSCON is not set +# CONFIG_LEDS_MLXREG is not set +# CONFIG_LEDS_USER is not set +# CONFIG_LEDS_SPI_BYTE is not set +# CONFIG_LEDS_LM3697 is not set + +# +# Flash and Torch LED drivers +# +# CONFIG_LEDS_AAT1290 is not set +# CONFIG_LEDS_AS3645A is not set +# CONFIG_LEDS_KTD2692 is not set +# CONFIG_LEDS_LM3601X is not set +# CONFIG_LEDS_RT4505 is not set +# CONFIG_LEDS_RT8515 is not set +# CONFIG_LEDS_SGM3140 is not set + +# +# RGB LED drivers +# + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +# CONFIG_LEDS_TRIGGER_DISK is not set +# CONFIG_LEDS_TRIGGER_MTD is not set +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +# CONFIG_LEDS_TRIGGER_CPU is not set +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +# CONFIG_LEDS_TRIGGER_PANIC is not set +# CONFIG_LEDS_TRIGGER_NETDEV is not set +# CONFIG_LEDS_TRIGGER_PATTERN is not set +# CONFIG_LEDS_TRIGGER_AUDIO is not set +# CONFIG_LEDS_TRIGGER_TTY is not set + +# +# Simple LED drivers +# +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +CONFIG_INFINIBAND_VIRT_DMA=y +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_INFINIBAND_CXGB4=m +# CONFIG_INFINIBAND_EFA is not set +CONFIG_INFINIBAND_ERDMA=m +CONFIG_INFINIBAND_HNS=m +# CONFIG_INFINIBAND_HNS_HIP08 is not set +# CONFIG_INFINIBAND_IRDMA is not set +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +# CONFIG_INFINIBAND_MTHCA is not set +# CONFIG_INFINIBAND_OCRDMA is not set +CONFIG_INFINIBAND_QEDR=m +CONFIG_RDMA_RXE=m +CONFIG_RDMA_SIW=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +# CONFIG_INFINIBAND_RTRS_CLIENT is not set +# CONFIG_INFINIBAND_RTRS_SERVER is not set +CONFIG_EDAC_SUPPORT=y +CONFIG_EDAC=y +CONFIG_EDAC_LEGACY_SYSFS=y +CONFIG_EDAC_DEBUG=y +CONFIG_EDAC_GHES=y +CONFIG_EDAC_THUNDERX=m +CONFIG_EDAC_XGENE=m +# CONFIG_EDAC_DMC520 is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_SYSTOHC is not set +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +CONFIG_RTC_DRV_ABB5ZES3=m +# CONFIG_RTC_DRV_ABEOZ9 is not set +CONFIG_RTC_DRV_ABX80X=m +CONFIG_RTC_DRV_DS1307=m +# CONFIG_RTC_DRV_DS1307_CENTURY is not set +CONFIG_RTC_DRV_DS1374=m +CONFIG_RTC_DRV_DS1374_WDT=y +CONFIG_RTC_DRV_DS1672=m +# CONFIG_RTC_DRV_HYM8563 is not set +CONFIG_RTC_DRV_MAX6900=m +# CONFIG_RTC_DRV_NCT3018Y is not set +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +# CONFIG_RTC_DRV_ISL12026 is not set +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +CONFIG_RTC_DRV_PCF85063=m +# CONFIG_RTC_DRV_PCF85363 is not set +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +# CONFIG_RTC_DRV_S35390A is not set +CONFIG_RTC_DRV_FM3130=m +CONFIG_RTC_DRV_RX8010=m +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +# CONFIG_RTC_DRV_RV3028 is not set +# CONFIG_RTC_DRV_RV3032 is not set +# CONFIG_RTC_DRV_RV8803 is not set +# CONFIG_RTC_DRV_SD3078 is not set + +# +# SPI RTC drivers +# +CONFIG_RTC_DRV_M41T93=m +CONFIG_RTC_DRV_M41T94=m +# CONFIG_RTC_DRV_DS1302 is not set +CONFIG_RTC_DRV_DS1305=m +CONFIG_RTC_DRV_DS1343=m +CONFIG_RTC_DRV_DS1347=m +CONFIG_RTC_DRV_DS1390=m +# CONFIG_RTC_DRV_MAX6916 is not set +CONFIG_RTC_DRV_R9701=m +CONFIG_RTC_DRV_RX4581=m +CONFIG_RTC_DRV_RS5C348=m +CONFIG_RTC_DRV_MAX6902=m +CONFIG_RTC_DRV_PCF2123=m +CONFIG_RTC_DRV_MCP795=m +CONFIG_RTC_I2C_AND_SPI=m + +# +# SPI and I2C RTC drivers +# +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_DS3232_HWMON=y +CONFIG_RTC_DRV_PCF2127=m +CONFIG_RTC_DRV_RV3029C2=m +# CONFIG_RTC_DRV_RV3029_HWMON is not set +# CONFIG_RTC_DRV_RX6110 is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +CONFIG_RTC_DRV_DS1685_FAMILY=m +CONFIG_RTC_DRV_DS1685=y +# CONFIG_RTC_DRV_DS1689 is not set +# CONFIG_RTC_DRV_DS17285 is not set +# CONFIG_RTC_DRV_DS17485 is not set +# CONFIG_RTC_DRV_DS17885 is not set +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_EFI=y +CONFIG_RTC_DRV_STK17TA8=m +# CONFIG_RTC_DRV_M48T86 is not set +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_RP5C01=m +# CONFIG_RTC_DRV_ZYNQMP is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_PL030 is not set +CONFIG_RTC_DRV_PL031=y +# CONFIG_RTC_DRV_CADENCE is not set +# CONFIG_RTC_DRV_FTRTC010 is not set +# CONFIG_RTC_DRV_XGENE is not set +# CONFIG_RTC_DRV_R7301 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_GOLDFISH is not set +CONFIG_DMADEVICES=y +CONFIG_DMADEVICES_DEBUG=y +CONFIG_DMADEVICES_VDEBUG=y + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_ACPI=y +CONFIG_DMA_OF=y +# CONFIG_ALTERA_MSGDMA is not set +# CONFIG_AMBA_PL08X is not set +# CONFIG_BCM_SBA_RAID is not set +# CONFIG_DW_AXI_DMAC is not set +# CONFIG_FSL_EDMA is not set +# CONFIG_FSL_QDMA is not set +# CONFIG_HISI_DMA is not set +# CONFIG_INTEL_IDMA64 is not set +# CONFIG_K3_DMA is not set +# CONFIG_MV_XOR_V2 is not set +# CONFIG_PL330_DMA is not set +# CONFIG_PLX_DMA is not set +# CONFIG_XGENE_DMA is not set +# CONFIG_XILINX_DMA is not set +# CONFIG_XILINX_XDMA is not set +# CONFIG_XILINX_ZYNQMP_DMA is not set +# CONFIG_XILINX_ZYNQMP_DPDMA is not set +# CONFIG_QCOM_BAM_DMA is not set +# CONFIG_QCOM_GPI_DMA is not set +CONFIG_QCOM_HIDMA_MGMT=m +CONFIG_QCOM_HIDMA=m +CONFIG_DW_DMAC_CORE=m +CONFIG_DW_DMAC=m +CONFIG_DW_DMAC_PCI=m +# CONFIG_DW_EDMA is not set +# CONFIG_SF_PDMA is not set + +# +# DMA Clients +# +CONFIG_ASYNC_TX_DMA=y +CONFIG_DMATEST=m +CONFIG_DMA_ENGINE_RAID=y + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +# CONFIG_UDMABUF is not set +# CONFIG_DMABUF_MOVE_NOTIFY is not set +CONFIG_DMABUF_DEBUG=y +# CONFIG_DMABUF_SELFTESTS is not set +# CONFIG_DMABUF_HEAPS is not set +# CONFIG_DMABUF_SYSFS_STATS is not set +# end of DMABUF options + +CONFIG_UIO=m +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=m +# CONFIG_UIO_DMEM_GENIRQ is not set +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +CONFIG_VFIO=m +CONFIG_VFIO_GROUP=y +CONFIG_VFIO_CONTAINER=y +CONFIG_VFIO_IOMMU_TYPE1=m +CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_VIRQFD=y + +# +# VFIO support for PCI devices +# +CONFIG_VFIO_PCI_CORE=m +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y +CONFIG_VFIO_PCI=m +# CONFIG_MLX5_VFIO_PCI is not set +# end of VFIO support for PCI devices + +# +# VFIO support for platform devices +# +# CONFIG_VFIO_PLATFORM is not set +# CONFIG_VFIO_AMBA is not set +# end of VFIO support for platform devices + +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO_ANCHOR=y +CONFIG_VIRTIO=m +CONFIG_VIRTIO_PCI_LIB=m +CONFIG_VIRTIO_PCI_LIB_LEGACY=m +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=m +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_PMEM=m +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_MEM=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=m +# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set +CONFIG_VIRTIO_DMA_SHARED_BUFFER=m +# CONFIG_VDPA is not set +CONFIG_VHOST_IOTLB=m +CONFIG_VHOST_TASK=y +CONFIG_VHOST=m +CONFIG_VHOST_MENU=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# Microsoft Hyper-V guest support +# +# CONFIG_HYPERV is not set +# end of Microsoft Hyper-V guest support + +# CONFIG_GREYBUS is not set +# CONFIG_COMEDI is not set +CONFIG_STAGING=y +# CONFIG_RTS5208 is not set +# CONFIG_VT6655 is not set +# CONFIG_FB_SM750 is not set +# CONFIG_STAGING_MEDIA is not set +# CONFIG_STAGING_BOARD is not set +# CONFIG_LTE_GDM724X is not set +# CONFIG_FB_TFT is not set +# CONFIG_KS7010 is not set +# CONFIG_PI433 is not set +# CONFIG_XIL_AXIS_FIFO is not set +# CONFIG_FIELDBUS_DEV is not set +# CONFIG_QLGE is not set +# CONFIG_VME_BUS is not set +# CONFIG_GOLDFISH is not set +CONFIG_CHROME_PLATFORMS=y +# CONFIG_CHROMEOS_ACPI is not set +# CONFIG_CHROMEOS_TBMC is not set +# CONFIG_CROS_EC is not set +# CONFIG_CROS_KBD_LED_BACKLIGHT is not set +# CONFIG_CROS_HPS_I2C is not set +# CONFIG_CHROMEOS_PRIVACY_SCREEN is not set +# CONFIG_MELLANOX_PLATFORM is not set +CONFIG_SURFACE_PLATFORMS=y +# CONFIG_SURFACE_3_POWER_OPREGION is not set +# CONFIG_SURFACE_GPE is not set +# CONFIG_SURFACE_HOTPLUG is not set +# CONFIG_SURFACE_PRO3_BUTTON is not set +CONFIG_HAVE_CLK=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y + +# +# Clock driver for ARM Reference designs +# +# CONFIG_CLK_ICST is not set +CONFIG_CLK_SP810=y +CONFIG_CLK_VEXPRESS_OSC=y +# end of Clock driver for ARM Reference designs + +# CONFIG_LMK04832 is not set +# CONFIG_COMMON_CLK_MAX9485 is not set +CONFIG_COMMON_CLK_SCPI=m +# CONFIG_COMMON_CLK_SI5341 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI514 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_SI570 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CDCE925 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_COMMON_CLK_AXI_CLKGEN is not set +CONFIG_COMMON_CLK_XGENE=y +# CONFIG_COMMON_CLK_PWM is not set +# CONFIG_COMMON_CLK_RS9_PCIE is not set +# CONFIG_COMMON_CLK_SI521XX is not set +# CONFIG_COMMON_CLK_VC3 is not set +# CONFIG_COMMON_CLK_VC5 is not set +# CONFIG_COMMON_CLK_VC7 is not set +# CONFIG_COMMON_CLK_FIXED_MMIO is not set +CONFIG_COMMON_CLK_HI3516CV300=y +CONFIG_COMMON_CLK_HI3519=y +CONFIG_COMMON_CLK_HI3559A=y +CONFIG_COMMON_CLK_HI3660=y +CONFIG_COMMON_CLK_HI3670=y +CONFIG_COMMON_CLK_HI3798CV200=y +# CONFIG_COMMON_CLK_HI6220 is not set +CONFIG_RESET_HISI=y +CONFIG_STUB_CLK_HI3660=y +# CONFIG_COMMON_CLK_QCOM is not set +# CONFIG_XILINX_VCU is not set +# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set +CONFIG_HWSPINLOCK=y +# CONFIG_HWSPINLOCK_QCOM is not set + +# +# Clock Source drivers +# +CONFIG_TIMER_OF=y +CONFIG_TIMER_ACPI=y +CONFIG_TIMER_PROBE=y +CONFIG_CLKSRC_MMIO=y +CONFIG_ARM_ARCH_TIMER=y +CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y +CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND=y +CONFIG_FSL_ERRATUM_A008585=y +CONFIG_HISILICON_ERRATUM_161010101=y +CONFIG_ARM64_ERRATUM_858921=y +CONFIG_ARM_TIMER_SP804=y +# end of Clock Source drivers + +CONFIG_MAILBOX=y +CONFIG_ARM_MHU=m +# CONFIG_ARM_MHU_V2 is not set +# CONFIG_PLATFORM_MHU is not set +# CONFIG_PL320_MBOX is not set +CONFIG_PCC=y +# CONFIG_ALTERA_MBOX is not set +CONFIG_HI3660_MBOX=y +CONFIG_HI6220_MBOX=y +CONFIG_MAILBOX_TEST=m +# CONFIG_QCOM_APCS_IPC is not set +CONFIG_XGENE_SLIMPRO_MBOX=m +# CONFIG_QCOM_IPCC is not set +CONFIG_IOMMU_IOVA=y +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +CONFIG_IOMMU_IO_PGTABLE=y +CONFIG_IOMMU_IO_PGTABLE_LPAE=y +# CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set +# CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set +# CONFIG_IOMMU_IO_PGTABLE_DART is not set +# end of Generic IOMMU Pagetable Support + +# CONFIG_IOMMU_DEBUGFS is not set +CONFIG_IOMMU_DEFAULT_DMA_STRICT=y +# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set +CONFIG_OF_IOMMU=y +CONFIG_IOMMU_DMA=y +# CONFIG_IOMMUFD is not set +CONFIG_ARM_SMMU=y +# CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS is not set +CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT=y +CONFIG_ARM_SMMU_QCOM=y +# CONFIG_ARM_SMMU_QCOM_DEBUG is not set +CONFIG_ARM_SMMU_V3=y +# CONFIG_ARM_SMMU_V3_SVA is not set +# CONFIG_QCOM_IOMMU is not set +# CONFIG_VIRTIO_IOMMU is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set +# CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Broadcom SoC drivers +# +# CONFIG_SOC_BRCMSTB is not set +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# CONFIG_QUICC_ENGINE is not set +# CONFIG_FSL_RCPM is not set +# end of NXP/Freescale QorIQ SoC drivers + +# +# fujitsu SoC drivers +# +# CONFIG_A64FX_DIAG is not set +# end of fujitsu SoC drivers + +# +# Hisilicon SoC drivers +# +# CONFIG_KUNPENG_HCCS is not set +# end of Hisilicon SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Enable LiteX SoC Builder specific drivers +# +# CONFIG_LITEX_SOC_CONTROLLER is not set +# end of Enable LiteX SoC Builder specific drivers + +# CONFIG_WPCM450_SOC is not set + +# +# Qualcomm SoC drivers +# +# CONFIG_QCOM_AOSS_QMP is not set +# CONFIG_QCOM_COMMAND_DB is not set +# CONFIG_QCOM_CPR is not set +# CONFIG_QCOM_GENI_SE is not set +# CONFIG_QCOM_GSBI is not set +# CONFIG_QCOM_LLCC is not set +CONFIG_QCOM_KRYO_L2_ACCESSORS=y +# CONFIG_QCOM_OCMEM is not set +# CONFIG_QCOM_RAMP_CTRL is not set +# CONFIG_QCOM_RMTFS_MEM is not set +# CONFIG_QCOM_RPM_MASTER_STATS is not set +# CONFIG_QCOM_RPMH is not set +# CONFIG_QCOM_SMEM is not set +# CONFIG_QCOM_SPM is not set +# CONFIG_QCOM_ICC_BWMON is not set +# end of Qualcomm SoC drivers + +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + +# CONFIG_PM_DEVFREQ is not set +CONFIG_EXTCON=y + +# +# Extcon Device Drivers +# +# CONFIG_EXTCON_FSA9480 is not set +CONFIG_EXTCON_GPIO=m +# CONFIG_EXTCON_MAX3355 is not set +# CONFIG_EXTCON_PTN5150 is not set +# CONFIG_EXTCON_QCOM_SPMI_MISC is not set +# CONFIG_EXTCON_RT8973A is not set +# CONFIG_EXTCON_SM5502 is not set +# CONFIG_EXTCON_USB_GPIO is not set +# CONFIG_EXTCON_USBC_TUSB320 is not set +# CONFIG_MEMORY is not set +# CONFIG_IIO is not set +# CONFIG_NTB is not set +CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +# CONFIG_PWM_DEBUG is not set +# CONFIG_PWM_ATMEL_TCB is not set +# CONFIG_PWM_CLK is not set +# CONFIG_PWM_DWC is not set +# CONFIG_PWM_FSL_FTM is not set +# CONFIG_PWM_HIBVT is not set +# CONFIG_PWM_PCA9685 is not set +# CONFIG_PWM_XILINX is not set + +# +# IRQ chip support +# +CONFIG_IRQCHIP=y +CONFIG_ARM_GIC=y +CONFIG_ARM_GIC_MAX_NR=1 +CONFIG_ARM_GIC_V2M=y +CONFIG_ARM_GIC_V3=y +CONFIG_ARM_GIC_V3_ITS=y +CONFIG_ARM_GIC_V3_ITS_PCI=y +# CONFIG_AL_FIC is not set +CONFIG_HISILICON_IRQ_MBIGEN=y +# CONFIG_XILINX_INTC is not set +CONFIG_PARTITION_PERCPU=y +CONFIG_QCOM_IRQ_COMBINER=y +# CONFIG_QCOM_PDC is not set +# CONFIG_QCOM_MPM is not set +# end of IRQ chip support + +# CONFIG_IPACK_BUS is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_QCOM_AOSS is not set +# CONFIG_RESET_QCOM_PDC is not set +# CONFIG_RESET_TI_SYSCON is not set +# CONFIG_RESET_TI_TPS380X is not set +# CONFIG_COMMON_RESET_HI3660 is not set +CONFIG_COMMON_RESET_HI6220=m + +# +# PHY Subsystem +# +CONFIG_GENERIC_PHY=y +CONFIG_PHY_XGENE=y +# CONFIG_PHY_CAN_TRANSCEIVER is not set + +# +# PHY drivers for Broadcom platforms +# +# CONFIG_BCM_KONA_USB2_PHY is not set +# end of PHY drivers for Broadcom platforms + +# CONFIG_PHY_CADENCE_TORRENT is not set +# CONFIG_PHY_CADENCE_DPHY is not set +# CONFIG_PHY_CADENCE_DPHY_RX is not set +# CONFIG_PHY_CADENCE_SIERRA is not set +# CONFIG_PHY_CADENCE_SALVO is not set +CONFIG_PHY_HI6220_USB=m +# CONFIG_PHY_HI3660_USB is not set +# CONFIG_PHY_HI3670_USB is not set +# CONFIG_PHY_HI3670_PCIE is not set +# CONFIG_PHY_HISTB_COMBPHY is not set +# CONFIG_PHY_HISI_INNO_USB2 is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_LAN966X_SERDES is not set +# CONFIG_PHY_MAPPHONE_MDM6600 is not set +# CONFIG_PHY_OCELOT_SERDES is not set +# CONFIG_PHY_QCOM_APQ8064_SATA is not set +# CONFIG_PHY_QCOM_EDP is not set +# CONFIG_PHY_QCOM_IPQ4019_USB is not set +# CONFIG_PHY_QCOM_IPQ806X_SATA is not set +# CONFIG_PHY_QCOM_PCIE2 is not set +# CONFIG_PHY_QCOM_QMP is not set +# CONFIG_PHY_QCOM_QUSB2 is not set +# CONFIG_PHY_QCOM_SNPS_EUSB2 is not set +# CONFIG_PHY_QCOM_EUSB2_REPEATER is not set +# CONFIG_PHY_QCOM_M31_USB is not set +# CONFIG_PHY_QCOM_USB_HS is not set +# CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2 is not set +# CONFIG_PHY_QCOM_USB_HSIC is not set +# CONFIG_PHY_QCOM_USB_HS_28NM is not set +# CONFIG_PHY_QCOM_USB_SS is not set +# CONFIG_PHY_QCOM_IPQ806X_USB is not set +# CONFIG_PHY_QCOM_SGMII_ETH is not set +# CONFIG_PHY_TUSB1210 is not set +# end of PHY Subsystem + +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# CONFIG_ARM_CCI_PMU is not set +CONFIG_ARM_CCN=y +CONFIG_ARM_CMN=y +CONFIG_ARM_PMU=y +CONFIG_ARM_PMU_ACPI=y +CONFIG_ARM_SMMU_V3_PMU=y +CONFIG_ARM_PMUV3=y +CONFIG_ARM_DSU_PMU=y +CONFIG_QCOM_L2_PMU=y +CONFIG_QCOM_L3_PMU=y +CONFIG_THUNDERX2_PMU=m +CONFIG_XGENE_PMU=y +CONFIG_ARM_SPE_PMU=m +# CONFIG_ARM_DMC620_PMU is not set +# CONFIG_MARVELL_CN10K_TAD_PMU is not set +CONFIG_ALIBABA_UNCORE_DRW_PMU=m +CONFIG_HISI_PMU=y +# CONFIG_HISI_PCIE_PMU is not set +# CONFIG_HNS3_PMU is not set +# CONFIG_MARVELL_CN10K_DDR_PMU is not set +# CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU is not set +# end of Performance monitor support + +CONFIG_RAS=y +# CONFIG_USB4 is not set + +# +# Android +# +# CONFIG_ANDROID_BINDER_IPC is not set +# end of Android + +CONFIG_LIBNVDIMM=m +CONFIG_BLK_DEV_PMEM=m +CONFIG_ND_CLAIM=y +CONFIG_ND_BTT=m +CONFIG_BTT=y +CONFIG_ND_PFN=m +CONFIG_NVDIMM_PFN=y +CONFIG_NVDIMM_DAX=y +CONFIG_OF_PMEM=m +CONFIG_NVDIMM_KEYS=y +# CONFIG_NVDIMM_SECURITY_TEST is not set +CONFIG_DAX=y +CONFIG_DEV_DAX=m +CONFIG_DEV_DAX_PMEM=m +CONFIG_DEV_DAX_HMEM=m +CONFIG_DEV_DAX_HMEM_DEVICES=y +# CONFIG_DEV_DAX_KMEM is not set +CONFIG_NVMEM=y +CONFIG_NVMEM_SYSFS=y + +# +# Layout Types +# +# CONFIG_NVMEM_LAYOUT_SL28_VPD is not set +# CONFIG_NVMEM_LAYOUT_ONIE_TLV is not set +# end of Layout Types + +# CONFIG_NVMEM_QCOM_QFPROM is not set +# CONFIG_NVMEM_QCOM_SEC_QFPROM is not set +# CONFIG_NVMEM_RMEM is not set +# CONFIG_NVMEM_U_BOOT_ENV is not set + +# +# HW tracing support +# +CONFIG_STM=m +# CONFIG_STM_PROTO_BASIC is not set +# CONFIG_STM_PROTO_SYS_T is not set +# CONFIG_STM_DUMMY is not set +# CONFIG_STM_SOURCE_CONSOLE is not set +# CONFIG_STM_SOURCE_HEARTBEAT is not set +# CONFIG_STM_SOURCE_FTRACE is not set +# CONFIG_INTEL_TH is not set +# CONFIG_HISI_PTT is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +# CONFIG_FSI is not set +CONFIG_TEE=m +# CONFIG_OPTEE is not set +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# CONFIG_MOST is not set +# CONFIG_PECI is not set +# CONFIG_HTE is not set +# CONFIG_CDX_BUS is not set +# end of Device Drivers + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +# CONFIG_VALIDATE_FS_PARSER is not set +CONFIG_FS_IOMAP=y +CONFIG_BUFFER_HEAD=y +CONFIG_LEGACY_DIRECT_IO=y +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=m +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_DEBUG=y +CONFIG_JBD2=m +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=m +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_XFS_FS=m +CONFIG_XFS_SUPPORT_V4=y +CONFIG_XFS_SUPPORT_ASCII_CI=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_ONLINE_SCRUB is not set +CONFIG_XFS_WARN=y +# CONFIG_XFS_DEBUG is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +# CONFIG_ZONEFS_FS is not set +CONFIG_FS_DAX=y +CONFIG_FS_DAX_PMD=y +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +# CONFIG_FS_VERITY is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QUOTA_DEBUG=y +CONFIG_QUOTA_TREE=y +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_VIRTIO_FS=m +CONFIG_FUSE_DAX=y +CONFIG_OVERLAY_FS=m +CONFIG_OVERLAY_FS_REDIRECT_DIR=y +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y +CONFIG_OVERLAY_FS_INDEX=y +# CONFIG_OVERLAY_FS_NFS_EXPORT is not set +# CONFIG_OVERLAY_FS_XINO_AUTO is not set +# CONFIG_OVERLAY_FS_METACOPY is not set +# CONFIG_OVERLAY_FS_DEBUG is not set + +# +# Caches +# +CONFIG_NETFS_SUPPORT=m +CONFIG_NETFS_STATS=y +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_DEBUG is not set +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_ERROR_INJECTION is not set +CONFIG_CACHEFILES_ONDEMAND=y +# end of Caches + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/EXFAT/NT Filesystems +# +CONFIG_FAT_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +# CONFIG_FAT_DEFAULT_UTF8 is not set +# CONFIG_EXFAT_FS is not set +# CONFIG_NTFS_FS is not set +# CONFIG_NTFS3_FS is not set +# end of DOS/FAT/EXFAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +CONFIG_PROC_VMCORE_DEVICE_DUMP=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +# CONFIG_TMPFS_INODE64 is not set +# CONFIG_TMPFS_QUOTA is not set +CONFIG_ARCH_SUPPORTS_HUGETLBFS=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +# end of Pseudo filesystems + +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_UBIFS_FS is not set +CONFIG_CRAMFS=m +CONFIG_CRAMFS_BLOCKDEV=y +# CONFIG_CRAMFS_MTD is not set +CONFIG_SQUASHFS=m +# CONFIG_SQUASHFS_FILE_CACHE is not set +CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set +CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_CONSOLE=y +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +CONFIG_PSTORE_RAM=y +# CONFIG_PSTORE_BLK is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_EROFS_FS=m +CONFIG_EROFS_FS_DEBUG=y +CONFIG_EROFS_FS_XATTR=y +CONFIG_EROFS_FS_POSIX_ACL=y +CONFIG_EROFS_FS_SECURITY=y +CONFIG_EROFS_FS_ZIP=y +CONFIG_EROFS_FS_ZIP_LZMA=y +# CONFIG_EROFS_FS_ZIP_DEFLATE is not set +CONFIG_EROFS_FS_ONDEMAND=y +# CONFIG_EROFS_FS_PCPU_KTHREAD is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y +CONFIG_NFS_DISABLE_UDP_SUPPORT=y +# CONFIG_NFS_V4_2_READ_PLUS is not set +CONFIG_NFSD=m +# CONFIG_NFSD_V2 is not set +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_PNFS=y +# CONFIG_NFSD_BLOCKLAYOUT is not set +CONFIG_NFSD_SCSILAYOUT=y +# CONFIG_NFSD_FLEXFILELAYOUT is not set +# CONFIG_NFSD_V4_2_INTER_SSC is not set +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_COMMON=y +CONFIG_NFS_V4_2_SSC_HELPER=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=m +CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1=y +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA is not set +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 is not set +CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_CEPH_FS=m +# CONFIG_CEPH_FSCACHE is not set +CONFIG_CEPH_FS_POSIX_ACL=y +# CONFIG_CEPH_FS_SECURITY_LABEL is not set +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_DEBUG=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set +CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_SWN_UPCALL is not set +# CONFIG_CIFS_SMB_DIRECT is not set +# CONFIG_CIFS_FSCACHE is not set +# CONFIG_SMB_SERVER is not set +CONFIG_SMBFS=m +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=m +CONFIG_NLS_UCS2_UTILS=m +CONFIG_DLM=m +# CONFIG_DLM_DEBUG is not set +# CONFIG_UNICODE is not set +CONFIG_IO_WQ=y +# end of File systems + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_KEYS_REQUEST_CACHE is not set +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_TRUSTED_KEYS=m +CONFIG_TRUSTED_KEYS_TPM=y +CONFIG_TRUSTED_KEYS_TEE=y +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_USER_DECRYPTED_DATA is not set +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HARDENED_USERCOPY=y +CONFIG_FORTIFY_SOURCE=y +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9 +CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256 +# CONFIG_SECURITY_SELINUX_DEBUG is not set +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +CONFIG_SECURITY_YAMA=y +# CONFIG_SECURITY_SAFESETID is not set +# CONFIG_SECURITY_LOCKDOWN_LSM is not set +# CONFIG_SECURITY_LANDLOCK is not set +CONFIG_INTEGRITY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_TRUSTED_KEYRING=y +CONFIG_INTEGRITY_PLATFORM_KEYRING=y +# CONFIG_INTEGRITY_MACHINE_KEYRING is not set +CONFIG_LOAD_UEFI_KEYS=y +CONFIG_INTEGRITY_AUDIT=y +CONFIG_IMA=y +# CONFIG_IMA_KEXEC is not set +CONFIG_IMA_MEASURE_PCR_IDX=10 +CONFIG_IMA_LSM_RULES=y +# CONFIG_IMA_NG_TEMPLATE is not set +CONFIG_IMA_SIG_TEMPLATE=y +CONFIG_IMA_DEFAULT_TEMPLATE="ima-sig" +# CONFIG_IMA_DEFAULT_HASH_SHA1 is not set +CONFIG_IMA_DEFAULT_HASH_SHA256=y +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set +# CONFIG_IMA_DEFAULT_HASH_SM3 is not set +CONFIG_IMA_DEFAULT_HASH="sha256" +CONFIG_IMA_WRITE_POLICY=y +CONFIG_IMA_READ_POLICY=y +CONFIG_IMA_APPRAISE=y +# CONFIG_IMA_ARCH_POLICY is not set +CONFIG_IMA_APPRAISE_BUILD_POLICY=y +# CONFIG_IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_MODULE_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_POLICY_SIGS is not set +CONFIG_IMA_APPRAISE_BOOTPARAM=y +# CONFIG_IMA_APPRAISE_MODSIG is not set +CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY=y +CONFIG_IMA_BLACKLIST_KEYRING=y +CONFIG_IMA_LOAD_X509=y +CONFIG_IMA_X509_PATH="/etc/keys/x509_ima.der" +# CONFIG_IMA_APPRAISE_SIGNED_INIT is not set +CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y +CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y +# CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT is not set +# CONFIG_IMA_DISABLE_HTABLE is not set +CONFIG_EVM=y +CONFIG_EVM_ATTR_FSUUID=y +# CONFIG_EVM_ADD_XATTRS is not set +CONFIG_EVM_LOAD_X509=y +CONFIG_EVM_X509_PATH="/etc/keys/x509_evm.der" +CONFIG_DEFAULT_SECURITY_SELINUX=y +# CONFIG_DEFAULT_SECURITY_DAC is not set +CONFIG_LSM="integrity,selinux,smack,tomoyo,apparmor" + +# +# Kernel hardening options +# + +# +# Memory initialization +# +CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y +CONFIG_INIT_STACK_NONE=y +# CONFIG_INIT_STACK_ALL_PATTERN is not set +# CONFIG_INIT_STACK_ALL_ZERO is not set +# CONFIG_GCC_PLUGIN_STACKLEAK is not set +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y +# CONFIG_ZERO_CALL_USED_REGS is not set +# end of Memory initialization + +# +# Hardening of kernel data structures +# +CONFIG_LIST_HARDENED=y +CONFIG_BUG_ON_DATA_CORRUPTION=y +# end of Hardening of kernel data structures + +CONFIG_CC_HAS_RANDSTRUCT=y +CONFIG_RANDSTRUCT_NONE=y +# CONFIG_RANDSTRUCT_FULL is not set +# CONFIG_RANDSTRUCT_PERFORMANCE is not set +# end of Kernel hardening options +# end of Security options + +CONFIG_XOR_BLOCKS=m +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_FIPS_NAME="Linux Kernel Cryptographic API" +# CONFIG_CRYPTO_FIPS_CUSTOM_VERSION is not set +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_SIG2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=m +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=m +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +# CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is not set +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=y +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m +# end of Crypto core or helper + +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=m +# CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set +# CONFIG_CRYPTO_ECDH is not set +# CONFIG_CRYPTO_ECDSA is not set +# CONFIG_CRYPTO_ECRDSA is not set +CONFIG_CRYPTO_SM2=y +# CONFIG_CRYPTO_CURVE25519 is not set +# end of Public-key cryptography + +# +# Block ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_ANUBIS=m +# CONFIG_CRYPTO_ARIA is not set +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=y +CONFIG_CRYPTO_SM4_GENERIC=y +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m +# end of Block ciphers + +# +# Length-preserving ciphers and modes +# +# CONFIG_CRYPTO_ADIANTUM is not set +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_HCTR2 is not set +# CONFIG_CRYPTO_KEYWRAP is not set +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=y +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=y +# end of Length-preserving ciphers and modes + +# +# AEAD (authenticated encryption with associated data) ciphers +# +# CONFIG_CRYPTO_AEGIS128 is not set +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_GENIV=m +CONFIG_CRYPTO_SEQIV=m +CONFIG_CRYPTO_ECHAINIV=m +CONFIG_CRYPTO_ESSIV=m +# end of AEAD (authenticated encryption with associated data) ciphers + +# +# Hashes, digests, and MACs +# +CONFIG_CRYPTO_BLAKE2B=m +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_POLY1305=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_SHA3=y +CONFIG_CRYPTO_SM3=y +CONFIG_CRYPTO_SM3_GENERIC=y +# CONFIG_CRYPTO_STREEBOG is not set +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_XXHASH=m +# end of Hashes, digests, and MACs + +# +# CRCs (cyclic redundancy checks) +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_CRC64_ROCKSOFT=y +# end of CRCs (cyclic redundancy checks) + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_842 is not set +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ZSTD=m +# end of Compression + +# +# Random number generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +# CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE is not set +# end of Random number generation + +# +# Userspace interface +# +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y +# CONFIG_CRYPTO_STATS is not set +# end of Userspace interface + +CONFIG_CRYPTO_HASH_INFO=y +# CONFIG_CRYPTO_NHPOLY1305_NEON is not set +CONFIG_CRYPTO_CHACHA20_NEON=m + +# +# Accelerated Cryptographic Algorithms for CPU (arm64) +# +CONFIG_CRYPTO_GHASH_ARM64_CE=m +CONFIG_CRYPTO_POLY1305_NEON=m +CONFIG_CRYPTO_SHA1_ARM64_CE=m +CONFIG_CRYPTO_SHA256_ARM64=m +CONFIG_CRYPTO_SHA2_ARM64_CE=m +# CONFIG_CRYPTO_SHA512_ARM64 is not set +# CONFIG_CRYPTO_SHA512_ARM64_CE is not set +# CONFIG_CRYPTO_SHA3_ARM64 is not set +CONFIG_CRYPTO_SM3_NEON=m +CONFIG_CRYPTO_SM3_ARM64_CE=m +# CONFIG_CRYPTO_POLYVAL_ARM64_CE is not set +CONFIG_CRYPTO_AES_ARM64=y +CONFIG_CRYPTO_AES_ARM64_CE=y +CONFIG_CRYPTO_AES_ARM64_CE_BLK=y +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y +CONFIG_CRYPTO_AES_ARM64_BS=m +CONFIG_CRYPTO_SM4_ARM64_CE=m +CONFIG_CRYPTO_SM4_ARM64_CE_BLK=m +CONFIG_CRYPTO_SM4_ARM64_NEON_BLK=m +CONFIG_CRYPTO_AES_ARM64_CE_CCM=y +CONFIG_CRYPTO_SM4_ARM64_CE_CCM=m +CONFIG_CRYPTO_SM4_ARM64_CE_GCM=m +CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m +# end of Accelerated Cryptographic Algorithms for CPU (arm64) + +CONFIG_CRYPTO_HW=y +# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set +# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set +CONFIG_CRYPTO_DEV_CCP=y +CONFIG_CRYPTO_DEV_CCP_DD=m +CONFIG_CRYPTO_DEV_SP_CCP=y +CONFIG_CRYPTO_DEV_CCP_CRYPTO=m +# CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set +CONFIG_CRYPTO_DEV_CPT=m +CONFIG_CAVIUM_CPT=m +CONFIG_CRYPTO_DEV_NITROX=m +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m +# CONFIG_CRYPTO_DEV_OCTEONTX_CPT is not set +# CONFIG_CRYPTO_DEV_QAT_DH895xCC is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXX is not set +# CONFIG_CRYPTO_DEV_QAT_C62X is not set +# CONFIG_CRYPTO_DEV_QAT_4XXX is not set +# CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set +# CONFIG_CRYPTO_DEV_QAT_C62XVF is not set +CONFIG_CRYPTO_DEV_CAVIUM_ZIP=m +# CONFIG_CRYPTO_DEV_QCE is not set +# CONFIG_CRYPTO_DEV_QCOM_RNG is not set +CONFIG_CRYPTO_DEV_CHELSIO=m +# CONFIG_CRYPTO_DEV_VIRTIO is not set +# CONFIG_CRYPTO_DEV_SAFEXCEL is not set +# CONFIG_CRYPTO_DEV_CCREE is not set +# CONFIG_CRYPTO_DEV_HISI_SEC is not set +# CONFIG_CRYPTO_DEV_HISI_SEC2 is not set +# CONFIG_CRYPTO_DEV_HISI_ZIP is not set +# CONFIG_CRYPTO_DEV_HISI_HPRE is not set +# CONFIG_CRYPTO_DEV_HISI_TRNG is not set +# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set +CONFIG_SIGNED_PE_FILE_VERIFICATION=y +# CONFIG_FIPS_SIGNATURE_SELFTEST is not set + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_MODULE_SIG_KEY_TYPE_RSA=y +# CONFIG_MODULE_SIG_KEY_TYPE_ECDSA is not set +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +CONFIG_SYSTEM_EXTRA_CERTIFICATE=y +CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE=8192 +CONFIG_SECONDARY_TRUSTED_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" +# CONFIG_SYSTEM_REVOCATION_LIST is not set +# CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE is not set +# end of Certificates for signature checking + +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_RAID6_PQ_BENCHMARK=y +CONFIG_LINEAR_RANGES=y +# CONFIG_PACKING is not set +CONFIG_BITREVERSE=y +CONFIG_HAVE_ARCH_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_CORDIC=m +# CONFIG_PRIME_NUMBERS is not set +CONFIG_RATIONAL=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +CONFIG_ARCH_USE_SYM_ANNOTATIONS=y +CONFIG_INDIRECT_PIO=y +# CONFIG_TRACE_MMIO_ACCESS is not set + +# +# Crypto library routines +# +CONFIG_CRYPTO_LIB_UTILS=y +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_ARC4=m +CONFIG_CRYPTO_LIB_GF128MUL=y +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y +CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA=m +CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m +CONFIG_CRYPTO_LIB_CHACHA=m +CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m +CONFIG_CRYPTO_LIB_CURVE25519=m +CONFIG_CRYPTO_LIB_DES=m +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=9 +CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m +CONFIG_CRYPTO_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m +CONFIG_CRYPTO_LIB_SHA1=y +CONFIG_CRYPTO_LIB_SHA256=y +# end of Crypto library routines + +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC64_ROCKSOFT=y +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC64=y +# CONFIG_CRC4 is not set +CONFIG_CRC7=m +CONFIG_LIBCRC32C=m +CONFIG_CRC8=m +CONFIG_XXHASH=y +CONFIG_AUDIT_GENERIC=y +CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y +CONFIG_AUDIT_COMPAT_GENERIC=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=m +CONFIG_LZ4HC_COMPRESS=m +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMMON=y +CONFIG_ZSTD_COMPRESS=m +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_MICROLZMA=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_DECOMPRESS_ZSTD=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_DMA_OPS=y +CONFIG_NEED_SG_DMA_FLAGS=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_DMA_DECLARE_COHERENT=y +CONFIG_ARCH_HAS_SETUP_DMA_OPS=y +CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS=y +CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE=y +CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU=y +CONFIG_ARCH_HAS_DMA_PREP_COHERENT=y +CONFIG_SWIOTLB=y +# CONFIG_SWIOTLB_DYNAMIC is not set +CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC=y +# CONFIG_DMA_RESTRICTED_POOL is not set +CONFIG_DMA_NONCOHERENT_MMAP=y +CONFIG_DMA_COHERENT_POOL=y +CONFIG_DMA_DIRECT_REMAP=y +CONFIG_DMA_CMA=y +# CONFIG_DMA_NUMA_CMA is not set + +# +# Default contiguous memory area size: +# +CONFIG_CMA_SIZE_MBYTES=64 +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_ALIGNMENT=8 +CONFIG_DMA_API_DEBUG=y +CONFIG_DMA_API_DEBUG_SG=y +# CONFIG_DMA_MAP_BENCHMARK is not set +CONFIG_SGL_ALLOC=y +CONFIG_CHECK_SIGNATURE=y +# CONFIG_CPUMASK_OFFSTACK is not set +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_CLZ_TAB=y +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_SIGNATURE=y +CONFIG_DIMLIB=y +CONFIG_LIBFDT=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_HAVE_GENERIC_VDSO=y +CONFIG_GENERIC_GETTIMEOFDAY=y +CONFIG_GENERIC_VDSO_TIME_NS=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_PMEM_API=y +CONFIG_MEMREGION=y +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_ARCH_STACKWALK=y +CONFIG_STACKDEPOT=y +CONFIG_STACKDEPOT_ALWAYS_INIT=y +CONFIG_SBITMAP=y +CONFIG_PARMAN=m +CONFIG_OBJAGG=m +# end of Library routines + +CONFIG_GENERIC_IOREMAP=y +CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y +CONFIG_PLDMFW=y +CONFIG_ASN1_ENCODER=m + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +# CONFIG_PRINTK_CALLER is not set +# CONFIG_STACKTRACE_BUILD_ID is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DYNAMIC_DEBUG_CORE=y +CONFIG_SYMBOLIC_ERRNAME=y +CONFIG_DEBUG_BUGVERBOSE=y +# end of printk and dmesg options + +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +CONFIG_AS_HAS_NON_CONST_LEB128=y +# CONFIG_DEBUG_INFO_NONE is not set +# CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT is not set +CONFIG_DEBUG_INFO_DWARF4=y +# CONFIG_DEBUG_INFO_DWARF5 is not set +# CONFIG_DEBUG_INFO_REDUCED is not set +CONFIG_DEBUG_INFO_COMPRESSED_NONE=y +# CONFIG_DEBUG_INFO_COMPRESSED_ZLIB is not set +# CONFIG_DEBUG_INFO_COMPRESSED_ZSTD is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +CONFIG_DEBUG_INFO_BTF=y +# CONFIG_GDB_SCRIPTS is not set +CONFIG_FRAME_WARN=2048 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +# CONFIG_HEADERS_INSTALL is not set +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_ARCH_WANT_FRAME_POINTERS=y +CONFIG_FRAME_POINTER=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +# +# Generic Kernel Debugging Instruments +# +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_FS_ALLOW_ALL=y +# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set +# CONFIG_DEBUG_FS_ALLOW_NONE is not set +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_KGDB=y +CONFIG_KGDB_HONOUR_BLOCKLIST=y +CONFIG_KGDB_SERIAL_CONSOLE=y +CONFIG_KGDB_TESTS=y +# CONFIG_KGDB_TESTS_ON_BOOT is not set +CONFIG_KGDB_KDB=y +CONFIG_KDB_DEFAULT_ENABLE=0x0 +CONFIG_KDB_KEYBOARD=y +CONFIG_KDB_CONTINUE_CATASTROPHIC=0 +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +CONFIG_UBSAN=y +# CONFIG_UBSAN_TRAP is not set +CONFIG_CC_HAS_UBSAN_BOUNDS_STRICT=y +CONFIG_CC_HAS_UBSAN_ARRAY_BOUNDS=y +CONFIG_UBSAN_BOUNDS=y +CONFIG_UBSAN_BOUNDS_STRICT=y +CONFIG_UBSAN_ARRAY_BOUNDS=y +CONFIG_UBSAN_SHIFT=y +# CONFIG_UBSAN_DIV_ZERO is not set +# CONFIG_UBSAN_UNREACHABLE is not set +CONFIG_UBSAN_BOOL=y +CONFIG_UBSAN_ENUM=y +# CONFIG_UBSAN_ALIGNMENT is not set +CONFIG_UBSAN_SANITIZE_ALL=y +# CONFIG_TEST_UBSAN is not set +CONFIG_HAVE_KCSAN_COMPILER=y +# end of Generic Kernel Debugging Instruments + +# +# Networking Debugging +# +# CONFIG_NET_DEV_REFCNT_TRACKER is not set +# CONFIG_NET_NS_REFCNT_TRACKER is not set +# CONFIG_DEBUG_NET is not set +# end of Networking Debugging + +# +# Memory Debugging +# +CONFIG_PAGE_EXTENSION=y +CONFIG_DEBUG_PAGEALLOC=y +# CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT is not set +CONFIG_SLUB_DEBUG=y +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_PAGE_OWNER is not set +# CONFIG_PAGE_TABLE_CHECK is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_DEBUG_PAGE_REF=y +# CONFIG_DEBUG_RODATA_TEST is not set +CONFIG_ARCH_HAS_DEBUG_WX=y +# CONFIG_DEBUG_WX is not set +CONFIG_GENERIC_PTDUMP=y +# CONFIG_PTDUMP_DEBUGFS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE=16000 +CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y +CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN=y +# CONFIG_PER_VMA_LOCK_STATS is not set +CONFIG_DEBUG_OBJECTS=y +# CONFIG_DEBUG_OBJECTS_SELFTEST is not set +CONFIG_DEBUG_OBJECTS_FREE=y +CONFIG_DEBUG_OBJECTS_TIMERS=y +CONFIG_DEBUG_OBJECTS_WORK=y +CONFIG_DEBUG_OBJECTS_RCU_HEAD=y +CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y +CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1 +# CONFIG_SHRINKER_DEBUG is not set +CONFIG_DEBUG_STACK_USAGE=y +# CONFIG_SCHED_STACK_END_CHECK is not set +CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_VM_PGTABLE is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_DEBUG_PER_CPU_MAPS=y +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_HAVE_ARCH_KASAN_SW_TAGS=y +CONFIG_HAVE_ARCH_KASAN_HW_TAGS=y +CONFIG_HAVE_ARCH_KASAN_VMALLOC=y +CONFIG_CC_HAS_KASAN_GENERIC=y +CONFIG_CC_HAS_KASAN_SW_TAGS=y +CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y +CONFIG_KASAN=y +CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX=y +CONFIG_KASAN_GENERIC=y +# CONFIG_KASAN_SW_TAGS is not set +# CONFIG_KASAN_HW_TAGS is not set +# CONFIG_KASAN_OUTLINE is not set +CONFIG_KASAN_INLINE=y +CONFIG_KASAN_STACK=y +CONFIG_KASAN_VMALLOC=y +# CONFIG_KASAN_MODULE_TEST is not set +CONFIG_HAVE_ARCH_KFENCE=y +CONFIG_KFENCE=y +CONFIG_KFENCE_SAMPLE_INTERVAL=0 +CONFIG_KFENCE_NUM_OBJECTS=255 +# CONFIG_KFENCE_DEFERRABLE is not set +CONFIG_KFENCE_STRESS_TEST_FAULTS=0 +# end of Memory Debugging + +CONFIG_DEBUG_SHIRQ=y + +# +# Debug Oops, Lockups and Hangs +# +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=1 +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y +CONFIG_HARDLOCKUP_DETECTOR=y +# CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY is not set +CONFIG_HARDLOCKUP_DETECTOR_PERF=y +# CONFIG_HARDLOCKUP_DETECTOR_BUDDY is not set +# CONFIG_HARDLOCKUP_DETECTOR_ARCH is not set +CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER=y +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_WQ_WATCHDOG=y +# CONFIG_WQ_CPU_INTENSIVE_REPORT is not set +# CONFIG_TEST_LOCKUP is not set +# end of Debug Oops, Lockups and Hangs + +# +# Scheduler Debugging +# +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +# end of Scheduler Debugging + +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +CONFIG_PROVE_LOCKING=y +# CONFIG_PROVE_RAW_LOCK_NESTING is not set +# CONFIG_LOCK_STAT is not set +CONFIG_DEBUG_RT_MUTEXES=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y +CONFIG_DEBUG_RWSEMS=y +CONFIG_DEBUG_LOCK_ALLOC=y +CONFIG_LOCKDEP=y +CONFIG_LOCKDEP_BITS=15 +CONFIG_LOCKDEP_CHAINS_BITS=16 +CONFIG_LOCKDEP_STACK_TRACE_BITS=19 +CONFIG_LOCKDEP_STACK_TRACE_HASH_BITS=14 +CONFIG_LOCKDEP_CIRCULAR_QUEUE_BITS=12 +# CONFIG_DEBUG_LOCKDEP is not set +CONFIG_DEBUG_ATOMIC_SLEEP=y +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_LOCK_TORTURE_TEST=m +# CONFIG_WW_MUTEX_SELFTEST is not set +# CONFIG_SCF_TORTURE_TEST is not set +# CONFIG_CSD_LOCK_WAIT_DEBUG is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +CONFIG_TRACE_IRQFLAGS=y +CONFIG_TRACE_IRQFLAGS_NMI=y +# CONFIG_DEBUG_IRQFLAGS is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +# CONFIG_DEBUG_KOBJECT_RELEASE is not set + +# +# Debug kernel data structures +# +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PLIST is not set +CONFIG_DEBUG_SG=y +CONFIG_DEBUG_NOTIFIERS=y +# CONFIG_DEBUG_MAPLE_TREE is not set +# end of Debug kernel data structures + +CONFIG_DEBUG_CREDENTIALS=y + +# +# RCU Debugging +# +CONFIG_PROVE_RCU=y +CONFIG_TORTURE_TEST=m +# CONFIG_RCU_SCALE_TEST is not set +CONFIG_RCU_TORTURE_TEST=m +# CONFIG_RCU_REF_SCALE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 +# CONFIG_RCU_CPU_STALL_CPUTIME is not set +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +CONFIG_LATENCYTOP=y +# CONFIG_DEBUG_CGROUP_REF is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_PREEMPTIRQ_TRACEPOINTS=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_BOOTTIME_TRACING is not set +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_FUNCTION_GRAPH_RETVAL is not set +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS=y +CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y +# CONFIG_FUNCTION_PROFILER is not set +CONFIG_STACK_TRACER=y +# CONFIG_IRQSOFF_TRACER is not set +CONFIG_SCHED_TRACER=y +CONFIG_HWLAT_TRACER=y +CONFIG_OSNOISE_TRACER=y +CONFIG_TIMERLAT_TRACER=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_TRACER_SNAPSHOT=y +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_PROBE_EVENTS_BTF_ARGS=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_DYNAMIC_EVENTS=y +CONFIG_PROBE_EVENTS=y +# CONFIG_BPF_KPROBE_OVERRIDE is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +CONFIG_FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY=y +CONFIG_TRACING_MAP=y +CONFIG_SYNTH_EVENTS=y +# CONFIG_USER_EVENTS is not set +CONFIG_HIST_TRIGGERS=y +# CONFIG_TRACE_EVENT_INJECT is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +CONFIG_RING_BUFFER_BENCHMARK=m +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_FTRACE_RECORD_RECURSION is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_SYNTH_EVENT_GEN_TEST is not set +# CONFIG_KPROBE_EVENT_GEN_TEST is not set +# CONFIG_HIST_TRIGGERS_DEBUG is not set +# CONFIG_RV is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y +CONFIG_STRICT_DEVMEM=y +# CONFIG_IO_STRICT_DEVMEM is not set + +# +# arm64 Debugging +# +CONFIG_PID_IN_CONTEXTIDR=y +# CONFIG_DEBUG_EFI is not set +# CONFIG_ARM64_RELOC_TEST is not set +CONFIG_CORESIGHT=m +CONFIG_CORESIGHT_LINKS_AND_SINKS=m +CONFIG_CORESIGHT_LINK_AND_SINK_TMC=m +CONFIG_CORESIGHT_CATU=m +CONFIG_CORESIGHT_SINK_TPIU=m +CONFIG_CORESIGHT_SINK_ETBV10=m +CONFIG_CORESIGHT_SOURCE_ETM4X=m +CONFIG_ETM4X_IMPDEF_FEATURE=y +CONFIG_CORESIGHT_STM=m +CONFIG_CORESIGHT_CPU_DEBUG=m +# CONFIG_CORESIGHT_CPU_DEBUG_DEFAULT_ON is not set +CONFIG_CORESIGHT_CTI=m +CONFIG_CORESIGHT_CTI_INTEGRATION_REGS=y +# CONFIG_CORESIGHT_TRBE is not set +# CONFIG_ULTRASOC_SMB is not set +# CONFIG_CORESIGHT_TPDM is not set +# CONFIG_CORESIGHT_TPDA is not set +# CONFIG_CORESIGHT_DUMMY is not set +# end of arm64 Debugging + +# +# Kernel Testing and Coverage +# +# CONFIG_KUNIT is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +CONFIG_FUNCTION_ERROR_INJECTION=y +CONFIG_FAULT_INJECTION=y +CONFIG_FAILSLAB=y +CONFIG_FAIL_PAGE_ALLOC=y +# CONFIG_FAULT_INJECTION_USERCOPY is not set +CONFIG_FAIL_MAKE_REQUEST=y +CONFIG_FAIL_IO_TIMEOUT=y +# CONFIG_FAIL_FUTEX is not set +CONFIG_FAULT_INJECTION_DEBUG_FS=y +# CONFIG_FAIL_FUNCTION is not set +CONFIG_FAIL_MMC_REQUEST=y +# CONFIG_FAIL_SUNRPC is not set +# CONFIG_FAULT_INJECTION_CONFIGFS is not set +CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_TEST_DHRY is not set +# CONFIG_LKDTM is not set +# CONFIG_TEST_MIN_HEAP is not set +# CONFIG_TEST_DIV64 is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_TEST_REF_TRACKER is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_REED_SOLOMON_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +CONFIG_PERCPU_TEST=m +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_ASYNC_RAID6_TEST=m +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_STRING_SELFTEST is not set +CONFIG_TEST_STRING_HELPERS=m +CONFIG_TEST_KSTRTOX=y +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_SCANF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_XARRAY is not set +# CONFIG_TEST_MAPLE_TREE is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_PARMAN is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_BITOPS is not set +# CONFIG_TEST_VMALLOC is not set +# CONFIG_TEST_USER_COPY is not set +CONFIG_TEST_BPF=m +# CONFIG_TEST_BLACKHOLE_DEV is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_DYNAMIC_DEBUG is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_MEMCAT_P is not set +# CONFIG_TEST_OBJAGG is not set +# CONFIG_TEST_MEMINIT is not set +# CONFIG_TEST_FREE_PAGES is not set +CONFIG_ARCH_USE_MEMTEST=y +# CONFIG_MEMTEST is not set +# end of Kernel Testing and Coverage + +# +# Rust hacking +# +# end of Rust hacking +# end of Kernel hacking diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig new file mode 100644 index 000000000000..ffd410167da4 --- /dev/null +++ b/arch/arm64/configs/anolis_defconfig @@ -0,0 +1,7124 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/arm64 6.6.7 Kernel Configuration +# +CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=200000 +CONFIG_CLANG_VERSION=0 +CONFIG_AS_IS_GNU=y +CONFIG_AS_VERSION=25000 +CONFIG_LD_IS_BFD=y +CONFIG_LD_VERSION=25000 +CONFIG_LLD_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_CAN_LINK_STATIC=y +CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y +CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y +CONFIG_TOOLS_SUPPORT_RELR=y +CONFIG_CC_HAS_ASM_INLINE=y +CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y +CONFIG_PAHOLE_VERSION=117 +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_TABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +# CONFIG_WERROR is not set +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_DEFAULT_INIT="" +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_SYSVIPC_COMPAT=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +# CONFIG_WATCH_QUEUE is not set +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_SHOW_LEVEL=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_GENERIC_IRQ_INJECTION=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS=y +CONFIG_GENERIC_IRQ_IPI=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_IRQ_MSI_IOMMU=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +# end of IRQ subsystem + +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_ARCH_HAS_TICK_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y +CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y +CONFIG_CONTEXT_TRACKING=y +CONFIG_CONTEXT_TRACKING_IDLE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +# CONFIG_NO_HZ_IDLE is not set +CONFIG_NO_HZ_FULL=y +CONFIG_CONTEXT_TRACKING_USER=y +# CONFIG_CONTEXT_TRACKING_USER_FORCE is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# end of Timers subsystem + +CONFIG_BPF=y +CONFIG_HAVE_EBPF_JIT=y +CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y + +# +# BPF subsystem +# +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_BPF_JIT_DEFAULT_ON=y +CONFIG_BPF_UNPRIV_DEFAULT_OFF=y +# CONFIG_BPF_PRELOAD is not set +CONFIG_BPF_LSM=y +# end of BPF subsystem + +CONFIG_PREEMPT_VOLUNTARY_BUILD=y +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set +# CONFIG_PREEMPT_DYNAMIC is not set +CONFIG_SCHED_CORE=y + +# +# CPU/Task time and stats accounting +# +CONFIG_VIRT_CPU_ACCOUNTING=y +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y +# CONFIG_IRQ_TIME_ACCOUNTING is not set +CONFIG_HAVE_SCHED_AVG_IRQ=y +CONFIG_SCHED_THERMAL_PRESSURE=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +CONFIG_PSI_DEFAULT_DISABLED=y +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_TREE_SRCU=y +CONFIG_TASKS_RCU_GENERIC=y +CONFIG_TASKS_RUDE_RCU=y +CONFIG_TASKS_TRACE_RCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +CONFIG_RCU_NOCB_CPU=y +# CONFIG_RCU_NOCB_CPU_DEFAULT_ALL is not set +# CONFIG_RCU_LAZY is not set +# end of RCU Subsystem + +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +# CONFIG_IKHEADERS is not set +CONFIG_LOG_BUF_SHIFT=20 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +# CONFIG_PRINTK_INDEX is not set +CONFIG_GENERIC_SCHED_CLOCK=y + +# +# Scheduler features +# +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y +CONFIG_CC_HAS_INT128=y +CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" +CONFIG_GCC11_NO_ARRAY_BOUNDS=y +CONFIG_CC_NO_ARRAY_BOUNDS=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +# CONFIG_CGROUP_FAVOR_DYNMODS is not set +CONFIG_MEMCG=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_SCHED_MM_CID=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_MISC is not set +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_TIME_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_RD_ZSTD=y +# CONFIG_BOOT_CONFIG is not set +CONFIG_INITRAMFS_PRESERVE_MTIME=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_LD_ORPHAN_WARN=y +CONFIG_LD_ORPHAN_WARN_LEVEL="warn" +CONFIG_SYSCTL=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +# CONFIG_EXPERT is not set +CONFIG_UID16=y +CONFIG_MULTIUSER=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_SELFTEST is not set +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_KCMP=y +CONFIG_RSEQ=y +CONFIG_CACHESTAT_SYSCALL=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_GUEST_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +# end of Kernel Performance Events And Counters + +CONFIG_SYSTEM_DATA_VERIFICATION=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y + +# +# Kexec and crash features +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_HAVE_IMA_KEXEC=y +CONFIG_KEXEC=y +CONFIG_KEXEC_FILE=y +CONFIG_KEXEC_SIG=y +CONFIG_KEXEC_IMAGE_VERIFY_SIG=y +CONFIG_CRASH_DUMP=y +# end of Kexec and crash features +# end of General setup + +CONFIG_ARM64=y +CONFIG_GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS=y +CONFIG_64BIT=y +CONFIG_MMU=y +CONFIG_ARM64_PAGE_SHIFT=12 +CONFIG_ARM64_CONT_PTE_SHIFT=4 +CONFIG_ARM64_CONT_PMD_SHIFT=4 +CONFIG_ARCH_MMAP_RND_BITS_MIN=18 +CONFIG_ARCH_MMAP_RND_BITS_MAX=33 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_SMP=y +CONFIG_KERNEL_MODE_NEON=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC=y + +# +# Platform selection +# +# CONFIG_ARCH_ACTIONS is not set +# CONFIG_ARCH_SUNXI is not set +# CONFIG_ARCH_ALPINE is not set +# CONFIG_ARCH_APPLE is not set +# CONFIG_ARCH_BCM is not set +# CONFIG_ARCH_BERLIN is not set +# CONFIG_ARCH_BITMAIN is not set +# CONFIG_ARCH_EXYNOS is not set +# CONFIG_ARCH_SPARX5 is not set +# CONFIG_ARCH_K3 is not set +# CONFIG_ARCH_LG1K is not set +CONFIG_ARCH_HISI=y +# CONFIG_ARCH_KEEMBAY is not set +# CONFIG_ARCH_MEDIATEK is not set +# CONFIG_ARCH_MESON is not set +# CONFIG_ARCH_MVEBU is not set +# CONFIG_ARCH_NXP is not set +# CONFIG_ARCH_MA35 is not set +# CONFIG_ARCH_NPCM is not set +CONFIG_ARCH_QCOM=y +# CONFIG_ARCH_REALTEK is not set +# CONFIG_ARCH_RENESAS is not set +# CONFIG_ARCH_ROCKCHIP is not set +CONFIG_ARCH_SEATTLE=y +# CONFIG_ARCH_INTEL_SOCFPGA is not set +# CONFIG_ARCH_STM32 is not set +# CONFIG_ARCH_SYNQUACER is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_SPRD is not set +CONFIG_ARCH_THUNDER=y +CONFIG_ARCH_THUNDER2=y +# CONFIG_ARCH_UNIPHIER is not set +CONFIG_ARCH_VEXPRESS=y +# CONFIG_ARCH_VISCONTI is not set +CONFIG_ARCH_XGENE=y +# CONFIG_ARCH_ZYNQMP is not set +# end of Platform selection + +# +# Kernel Features +# + +# +# ARM errata workarounds via the alternatives framework +# +CONFIG_AMPERE_ERRATUM_AC03_CPU_38=y +CONFIG_ARM64_WORKAROUND_CLEAN_CACHE=y +CONFIG_ARM64_ERRATUM_826319=y +CONFIG_ARM64_ERRATUM_827319=y +CONFIG_ARM64_ERRATUM_824069=y +CONFIG_ARM64_ERRATUM_819472=y +CONFIG_ARM64_ERRATUM_832075=y +CONFIG_ARM64_ERRATUM_834220=y +CONFIG_ARM64_ERRATUM_1742098=y +CONFIG_ARM64_ERRATUM_845719=y +CONFIG_ARM64_ERRATUM_843419=y +CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419=y +CONFIG_ARM64_ERRATUM_1024718=y +CONFIG_ARM64_ERRATUM_1418040=y +CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT=y +CONFIG_ARM64_ERRATUM_1165522=y +CONFIG_ARM64_ERRATUM_1319367=y +CONFIG_ARM64_ERRATUM_1530923=y +CONFIG_ARM64_WORKAROUND_REPEAT_TLBI=y +CONFIG_ARM64_ERRATUM_2441007=y +CONFIG_ARM64_ERRATUM_1286807=y +CONFIG_ARM64_ERRATUM_1463225=y +CONFIG_ARM64_ERRATUM_1542419=y +CONFIG_ARM64_ERRATUM_1508412=y +CONFIG_ARM64_ERRATUM_2051678=y +CONFIG_ARM64_ERRATUM_2077057=y +CONFIG_ARM64_ERRATUM_2658417=y +CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE=y +CONFIG_ARM64_ERRATUM_2054223=y +CONFIG_ARM64_ERRATUM_2067961=y +CONFIG_ARM64_ERRATUM_2441009=y +CONFIG_ARM64_ERRATUM_2645198=y +CONFIG_ARM64_ERRATUM_2966298=y +CONFIG_CAVIUM_ERRATUM_22375=y +CONFIG_CAVIUM_ERRATUM_23144=y +CONFIG_CAVIUM_ERRATUM_23154=y +CONFIG_CAVIUM_ERRATUM_27456=y +CONFIG_CAVIUM_ERRATUM_30115=y +CONFIG_CAVIUM_TX2_ERRATUM_219=y +CONFIG_FUJITSU_ERRATUM_010001=y +CONFIG_HISILICON_ERRATUM_161600802=y +CONFIG_QCOM_FALKOR_ERRATUM_1003=y +CONFIG_QCOM_FALKOR_ERRATUM_1009=y +CONFIG_QCOM_QDF2400_ERRATUM_0065=y +CONFIG_QCOM_FALKOR_ERRATUM_E1041=y +CONFIG_NVIDIA_CARMEL_CNP_ERRATUM=y +CONFIG_ROCKCHIP_ERRATUM_3588001=y +CONFIG_SOCIONEXT_SYNQUACER_PREITS=y +# end of ARM errata workarounds via the alternatives framework + +CONFIG_ARM64_4K_PAGES=y +# CONFIG_ARM64_16K_PAGES is not set +# CONFIG_ARM64_64K_PAGES is not set +# CONFIG_ARM64_VA_BITS_39 is not set +CONFIG_ARM64_VA_BITS_48=y +CONFIG_ARM64_VA_BITS=48 +CONFIG_ARM64_PA_BITS_48=y +CONFIG_ARM64_PA_BITS=48 +# CONFIG_CPU_BIG_ENDIAN is not set +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_SCHED_MC=y +# CONFIG_SCHED_CLUSTER is not set +CONFIG_SCHED_SMT=y +CONFIG_NR_CPUS=1024 +CONFIG_HOTPLUG_CPU=y +CONFIG_NUMA=y +CONFIG_NODES_SHIFT=6 +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_HW_PERF_EVENTS=y +CONFIG_CC_HAVE_SHADOW_CALL_STACK=y +CONFIG_PARAVIRT=y +CONFIG_PARAVIRT_TIME_ACCOUNTING=y +CONFIG_ARCH_SUPPORTS_KEXEC=y +CONFIG_ARCH_SUPPORTS_KEXEC_FILE=y +CONFIG_ARCH_SELECTS_KEXEC_FILE=y +CONFIG_ARCH_SUPPORTS_KEXEC_SIG=y +CONFIG_ARCH_SUPPORTS_KEXEC_IMAGE_VERIFY_SIG=y +CONFIG_ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_SIG=y +CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y +CONFIG_TRANS_TABLE=y +# CONFIG_XEN is not set +CONFIG_ARCH_FORCE_MAX_ORDER=10 +CONFIG_UNMAP_KERNEL_AT_EL0=y +CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY=y +# CONFIG_RODATA_FULL_DEFAULT_ENABLED is not set +# CONFIG_ARM64_SW_TTBR0_PAN is not set +CONFIG_ARM64_TAGGED_ADDR_ABI=y +CONFIG_COMPAT=y +CONFIG_KUSER_HELPERS=y +# CONFIG_COMPAT_ALIGNMENT_FIXUPS is not set +# CONFIG_ARMV8_DEPRECATED is not set + +# +# ARMv8.1 architectural features +# +CONFIG_ARM64_HW_AFDBM=y +CONFIG_ARM64_PAN=y +CONFIG_AS_HAS_LSE_ATOMICS=y +CONFIG_ARM64_LSE_ATOMICS=y +CONFIG_ARM64_USE_LSE_ATOMICS=y +# end of ARMv8.1 architectural features + +# +# ARMv8.2 architectural features +# +CONFIG_AS_HAS_ARMV8_2=y +CONFIG_AS_HAS_SHA3=y +CONFIG_ARM64_PMEM=y +CONFIG_ARM64_RAS_EXTN=y +CONFIG_ARM64_CNP=y +# end of ARMv8.2 architectural features + +# +# ARMv8.3 architectural features +# +# CONFIG_ARM64_PTR_AUTH is not set +CONFIG_CC_HAS_BRANCH_PROT_PAC_RET=y +CONFIG_CC_HAS_SIGN_RETURN_ADDRESS=y +CONFIG_AS_HAS_ARMV8_3=y +CONFIG_AS_HAS_CFI_NEGATE_RA_STATE=y +CONFIG_AS_HAS_LDAPR=y +# end of ARMv8.3 architectural features + +# +# ARMv8.4 architectural features +# +# CONFIG_ARM64_AMU_EXTN is not set +CONFIG_AS_HAS_ARMV8_4=y +CONFIG_ARM64_TLB_RANGE=y +# end of ARMv8.4 architectural features + +# +# ARMv8.5 architectural features +# +CONFIG_AS_HAS_ARMV8_5=y +# CONFIG_ARM64_BTI is not set +CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI=y +CONFIG_ARM64_E0PD=y +CONFIG_ARM64_AS_HAS_MTE=y +CONFIG_ARM64_MTE=y +# end of ARMv8.5 architectural features + +# +# ARMv8.7 architectural features +# +CONFIG_ARM64_EPAN=y +# end of ARMv8.7 architectural features + +CONFIG_ARM64_SVE=y +CONFIG_ARM64_SME=y +CONFIG_ARM64_PSEUDO_NMI=y +# CONFIG_ARM64_DEBUG_PRIORITY_MASKING is not set +CONFIG_RELOCATABLE=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_RANDOMIZE_MODULE_REGION_FULL=y +CONFIG_CC_HAVE_STACKPROTECTOR_SYSREG=y +CONFIG_STACKPROTECTOR_PER_TASK=y +# end of Kernel Features + +# +# Boot options +# +CONFIG_ARM64_ACPI_PARKING_PROTOCOL=y +CONFIG_CMDLINE="console=ttyAMA0" +CONFIG_CMDLINE_FROM_BOOTLOADER=y +# CONFIG_CMDLINE_FORCE is not set +CONFIG_EFI_STUB=y +CONFIG_EFI=y +CONFIG_DMI=y +# end of Boot options + +# +# Power management options +# +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_HIBERNATION_SNAPSHOT_DEV=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_USERSPACE_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +CONFIG_PM_DEBUG=y +# CONFIG_PM_ADVANCED_DEBUG is not set +# CONFIG_PM_TEST_SUSPEND is not set +CONFIG_PM_SLEEP_DEBUG=y +CONFIG_PM_CLK=y +CONFIG_PM_GENERIC_DOMAINS=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_PM_GENERIC_DOMAINS_SLEEP=y +CONFIG_PM_GENERIC_DOMAINS_OF=y +CONFIG_CPU_PM=y +# CONFIG_ENERGY_MODEL is not set +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_HIBERNATION_HEADER=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +# end of Power management options + +# +# CPU Power Management +# + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +# CONFIG_CPU_IDLE_GOV_LADDER is not set +CONFIG_CPU_IDLE_GOV_MENU=y +# CONFIG_CPU_IDLE_GOV_TEO is not set + +# +# ARM CPU Idle Drivers +# +# CONFIG_ARM_PSCI_CPUIDLE is not set +# end of ARM CPU Idle Drivers +# end of CPU Idle + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set + +# +# CPU frequency scaling drivers +# +# CONFIG_CPUFREQ_DT is not set +# CONFIG_CPUFREQ_DT_PLATDEV is not set +CONFIG_ACPI_CPPC_CPUFREQ=y +CONFIG_ACPI_CPPC_CPUFREQ_FIE=y +CONFIG_ARM_SCPI_CPUFREQ=m +# CONFIG_ARM_QCOM_CPUFREQ_HW is not set +# end of CPU Frequency scaling +# end of CPU Power Management + +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_GENERIC_GSI=y +CONFIG_ACPI_CCA_REQUIRED=y +# CONFIG_ACPI_DEBUGGER is not set +CONFIG_ACPI_SPCR_TABLE=y +# CONFIG_ACPI_FPDT is not set +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_VIDEO=m +CONFIG_ACPI_FAN=y +# CONFIG_ACPI_TAD is not set +# CONFIG_ACPI_DOCK is not set +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_MCFG=y +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_THERMAL=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_HED=y +# CONFIG_ACPI_CUSTOM_METHOD is not set +# CONFIG_ACPI_BGRT is not set +CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y +CONFIG_ACPI_NFIT=m +# CONFIG_NFIT_SECURITY_DEBUG is not set +CONFIG_ACPI_NUMA=y +CONFIG_ACPI_HMAT=y +CONFIG_HAVE_ACPI_APEI=y +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_APEI_SEA=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +CONFIG_ACPI_APEI_EINJ=m +# CONFIG_ACPI_APEI_ERST_DEBUG is not set +# CONFIG_ACPI_CONFIGFS is not set +# CONFIG_ACPI_PFRUT is not set +CONFIG_ACPI_IORT=y +CONFIG_ACPI_GTDT=y +CONFIG_ACPI_AGDI=y +CONFIG_ACPI_APMT=y +CONFIG_ACPI_PPTT=y +CONFIG_ACPI_PCC=y +# CONFIG_ACPI_FFH is not set +# CONFIG_PMIC_OPREGION is not set +CONFIG_ACPI_PRMT=y +CONFIG_IRQ_BYPASS_MANAGER=y +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_DIRTY_RING=y +CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL=y +CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE=y +CONFIG_KVM_XFER_TO_GUEST_WORK=y +CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +# CONFIG_NVHE_EL2_DEBUG is not set + +# +# General architecture-dependent options +# +CONFIG_ARCH_HAS_SUBPAGE_FAULTS=y +CONFIG_HOTPLUG_CORE_SYNC=y +CONFIG_HOTPLUG_CORE_SYNC_DEAD=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +CONFIG_UPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_KRETPROBES=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_IDLE_POLL_SETUP=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_KEEPINITRD=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_ARCH_HAS_SET_DIRECT_MAP=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_ARCH_WANTS_NO_INSTR=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_PERF_EVENTS_NMI=y +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_MMU_GATHER_TABLE_FREE=y +CONFIG_MMU_GATHER_RCU_TABLE_FREE=y +CONFIG_MMU_LAZY_TLB_REFCOUNT=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_HAVE_ARCH_SECCOMP=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP=y +CONFIG_SECCOMP_FILTER=y +# CONFIG_SECCOMP_CACHE_DEBUG is not set +CONFIG_HAVE_ARCH_STACKLEAK=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_ARCH_SUPPORTS_SHADOW_CALL_STACK=y +# CONFIG_SHADOW_CALL_STACK is not set +CONFIG_ARCH_SUPPORTS_LTO_CLANG=y +CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y +CONFIG_LTO_NONE=y +CONFIG_ARCH_SUPPORTS_CFI_CLANG=y +# CONFIG_CFI_CLANG is not set +CONFIG_HAVE_CONTEXT_TRACKING_USER=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_MOVE_PUD=y +CONFIG_HAVE_MOVE_PMD=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_ARCH_HUGE_VMALLOC=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARCH_WANT_PMD_MKWRITE=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK=y +CONFIG_SOFTIRQ_ON_OWN_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_ARCH_MMAP_RND_BITS=18 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=11 +CONFIG_PAGE_SIZE_LESS_THAN_64KB=y +CONFIG_PAGE_SIZE_LESS_THAN_256KB=y +CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT=y +CONFIG_CLONE_BACKWARDS=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET=y +CONFIG_RANDOMIZE_KSTACK_OFFSET=y +# CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT is not set +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_HAVE_ARCH_COMPILER_H=y +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y +# CONFIG_LOCK_EVENT_COUNTS is not set +CONFIG_ARCH_HAS_RELR=y +CONFIG_RELR=y +CONFIG_HAVE_PREEMPT_DYNAMIC=y +CONFIG_HAVE_PREEMPT_DYNAMIC_KEY=y +CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK=y +CONFIG_ARCH_HAVE_TRACE_MMIO_ACCESS=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +# end of GCOV-based kernel profiling + +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_GCC_PLUGINS=y +# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set +CONFIG_FUNCTION_ALIGNMENT_4B=y +CONFIG_FUNCTION_ALIGNMENT_8B=y +CONFIG_FUNCTION_ALIGNMENT=8 +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULE_SIG_FORMAT=y +CONFIG_MODULES=y +# CONFIG_MODULE_DEBUG is not set +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set +CONFIG_MODVERSIONS=y +CONFIG_ASM_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +# CONFIG_MODULE_SIG_ALL is not set +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set +CONFIG_MODULE_SIG_SHA256=y +# CONFIG_MODULE_SIG_SHA384 is not set +# CONFIG_MODULE_SIG_SHA512 is not set +CONFIG_MODULE_SIG_HASH="sha256" +CONFIG_MODULE_COMPRESS_NONE=y +# CONFIG_MODULE_COMPRESS_GZIP is not set +# CONFIG_MODULE_COMPRESS_XZ is not set +# CONFIG_MODULE_COMPRESS_ZSTD is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +CONFIG_MODPROBE_PATH="/sbin/modprobe" +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLOCK_LEGACY_AUTOLOAD=y +CONFIG_BLK_RQ_ALLOC_TIME=y +CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_DEV_BSG_COMMON=y +CONFIG_BLK_ICQ=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_INTEGRITY_T10=y +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +# CONFIG_BLK_WBT is not set +# CONFIG_BLK_CGROUP_IOLATENCY is not set +# CONFIG_BLK_CGROUP_FC_APPID is not set +CONFIG_BLK_CGROUP_IOCOST=y +# CONFIG_BLK_CGROUP_IOPRIO is not set +CONFIG_BLK_DEBUG_FS=y +CONFIG_BLK_DEBUG_FS_ZONED=y +# CONFIG_BLK_SED_OPAL is not set +# CONFIG_BLK_INLINE_ENCRYPTION is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +# end of Partition Types + +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_PM=y +CONFIG_BLOCK_HOLDER_DEPRECATED=y +CONFIG_BLK_MQ_STACKING=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +# CONFIG_BFQ_CGROUP_DEBUG is not set +# end of IO Schedulers + +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK=y +CONFIG_ARCH_INLINE_SPIN_LOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_READ_LOCK=y +CONFIG_ARCH_INLINE_READ_LOCK_BH=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_READ_UNLOCK=y +CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_WRITE_LOCK=y +CONFIG_ARCH_INLINE_WRITE_LOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_SPIN_TRYLOCK=y +CONFIG_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_INLINE_SPIN_LOCK=y +CONFIG_INLINE_SPIN_LOCK_BH=y +CONFIG_INLINE_SPIN_LOCK_IRQ=y +CONFIG_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_INLINE_SPIN_UNLOCK_BH=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_READ_LOCK=y +CONFIG_INLINE_READ_LOCK_BH=y +CONFIG_INLINE_READ_LOCK_IRQ=y +CONFIG_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_BH=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_WRITE_LOCK=y +CONFIG_INLINE_WRITE_LOCK_BH=y +CONFIG_INLINE_WRITE_LOCK_IRQ=y +CONFIG_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_BH=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ARCH_BINFMT_ELF_STATE=y +CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS=y +CONFIG_ARCH_HAVE_ELF_PROT=y +CONFIG_ARCH_USE_GNU_PROPERTY=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_ZPOOL=y +CONFIG_SWAP=y +CONFIG_ZSWAP=y +# CONFIG_ZSWAP_DEFAULT_ON is not set +# CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set +CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set +CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lzo" +CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y +# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set +# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set +CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud" +CONFIG_ZBUD=y +# CONFIG_Z3FOLD is not set +CONFIG_ZSMALLOC=y +CONFIG_ZSMALLOC_STAT=y +CONFIG_ZSMALLOC_CHAIN_SIZE=8 + +# +# SLAB allocator options +# +# CONFIG_SLAB_DEPRECATED is not set +CONFIG_SLUB=y +# CONFIG_SLAB_MERGE_DEFAULT is not set +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SLAB_FREELIST_HARDENED is not set +# CONFIG_SLUB_STATS is not set +CONFIG_SLUB_CPU_PARTIAL=y +# CONFIG_RANDOM_KMALLOC_CACHES is not set +# end of SLAB allocator options + +CONFIG_SHUFFLE_PAGE_ALLOCATOR=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SPARSEMEM=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_FAST_GUP=y +CONFIG_ARCH_KEEP_MEMBLOCK=y +CONFIG_NUMA_KEEP_MEMINFO=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_EXCLUSIVE_SYSTEM_RAM=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_MHP_MEMMAP_ON_MEMORY=y +CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 +CONFIG_PAGE_REPORTING=y +CONFIG_MIGRATION=y +CONFIG_DEVICE_MIGRATION=y +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y +CONFIG_CONTIG_ALLOC=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +CONFIG_MEMORY_FAILURE=y +CONFIG_HWPOISON_INJECT=m +CONFIG_ARCH_WANTS_THP_SWAP=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_THP_SWAP=y +CONFIG_READ_ONLY_THP_FOR_FS=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +# CONFIG_CMA_SYSFS is not set +CONFIG_CMA_AREAS=19 +CONFIG_GENERIC_EARLY_IOREMAP=y +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +CONFIG_PAGE_IDLE_FLAG=y +CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_HAS_CURRENT_STACK_POINTER=y +CONFIG_ARCH_HAS_PTE_DEVMAP=y +CONFIG_ZONE_DMA=y +CONFIG_ZONE_DMA32=y +CONFIG_ZONE_DEVICE=y +CONFIG_HMM_MIRROR=y +# CONFIG_DEVICE_PRIVATE is not set +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y +CONFIG_ARCH_USES_PG_ARCH_X=y +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_TEST is not set +# CONFIG_DMAPOOL_TEST is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_MEMFD_CREATE=y +CONFIG_SECRETMEM=y +# CONFIG_ANON_VMA_NAME is not set +CONFIG_USERFAULTFD=y +CONFIG_HAVE_ARCH_USERFAULTFD_MINOR=y +CONFIG_LRU_GEN=y +# CONFIG_LRU_GEN_ENABLED is not set +# CONFIG_LRU_GEN_STATS is not set +CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK=y +CONFIG_PER_VMA_LOCK=y +CONFIG_LOCK_MM_AND_FIND_VMA=y + +# +# Data Access Monitoring +# +CONFIG_DAMON=y +CONFIG_DAMON_VADDR=y +CONFIG_DAMON_PADDR=y +# CONFIG_DAMON_SYSFS is not set +CONFIG_DAMON_DBGFS=y +# CONFIG_DAMON_RECLAIM is not set +# CONFIG_DAMON_LRU_SORT is not set +# end of Data Access Monitoring +# end of Memory Management options + +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y +CONFIG_NET_XGRESS=y +CONFIG_NET_REDIRECT=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_AF_UNIX_OOB=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +# CONFIG_TLS_TOE is not set +CONFIG_XFRM=y +CONFIG_XFRM_OFFLOAD=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_AH=m +CONFIG_XFRM_ESP=m +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m +CONFIG_NET_HANDSHAKE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +# CONFIG_IP_PNP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +# CONFIG_NET_FOU is not set +# CONFIG_NET_FOU_IP_TUNNELS is not set +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +# CONFIG_INET_ESPINTCP is not set +CONFIG_INET_IPCOMP=m +CONFIG_INET_TABLE_PERTURB_ORDER=16 +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +# CONFIG_TCP_CONG_CDG is not set +CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +# CONFIG_INET6_ESPINTCP is not set +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +# CONFIG_IPV6_ILA is not set +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +# CONFIG_IPV6_SEG6_LWTUNNEL is not set +# CONFIG_IPV6_SEG6_HMAC is not set +# CONFIG_IPV6_RPL_LWTUNNEL is not set +# CONFIG_IPV6_IOAM6_LWTUNNEL is not set +CONFIG_NETLABEL=y +CONFIG_MPTCP=y +CONFIG_INET_MPTCP_DIAG=m +CONFIG_MPTCP_IPV6=y +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_EGRESS=y +CONFIG_NETFILTER_SKIP_EGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_BPF_LINK=y +# CONFIG_NETFILTER_NETLINK_HOOK is not set +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_SYSLOG=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CONNTRACK_OVS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NF_NAT_OVS=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +# CONFIG_NFT_SYNPROXY is not set +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +# CONFIG_NFT_REJECT_NETDEV is not set +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +# CONFIG_NF_FLOW_TABLE_PROCFS is not set +CONFIG_NETFILTER_XTABLES=y +# CONFIG_NETFILTER_XTABLES_COMPAT is not set + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +# end of Core Netfilter Configuration + +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +# CONFIG_IP_VS_TWOS is not set + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +# CONFIG_IP6_NF_MATCH_SRH is not set +# CONFIG_IP6_NF_TARGET_HL is not set +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_TABLES_BRIDGE=m +# CONFIG_NFT_BRIDGE_META is not set +CONFIG_NFT_BRIDGE_REJECT=m +# CONFIG_NF_CONNTRACK_BRIDGE is not set +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +# CONFIG_BPFILTER is not set +# CONFIG_IP_DCCP is not set +CONFIG_IP_SCTP=m +# CONFIG_SCTP_DBG_OBJCNT is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_INET_SCTP_DIAG=m +# CONFIG_RDS is not set +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_TIPC_MEDIA_UDP=y +CONFIG_TIPC_CRYPTO=y +CONFIG_TIPC_DIAG=m +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +# CONFIG_ATM_CLIP_NO_ICMP is not set +CONFIG_ATM_LANE=m +# CONFIG_ATM_MPOA is not set +CONFIG_ATM_BR2684=m +# CONFIG_ATM_BR2684_IPFILTER is not set +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +# CONFIG_BRIDGE_MRP is not set +# CONFIG_BRIDGE_CFM is not set +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_LLC=m +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_DEBUGFS is not set +# CONFIG_6LOWPAN_NHC is not set +CONFIG_IEEE802154=m +# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set +CONFIG_IEEE802154_SOCKET=m +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +# CONFIG_NET_SCH_CBS is not set +# CONFIG_NET_SCH_ETF is not set +CONFIG_NET_SCH_MQPRIO_LIB=m +# CONFIG_NET_SCH_TAPRIO is not set +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +# CONFIG_NET_SCH_SKBPRIO is not set +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=y +# CONFIG_NET_SCH_CAKE is not set +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +# CONFIG_NET_SCH_FQ_PIE is not set +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +# CONFIG_NET_SCH_ETS is not set +CONFIG_NET_SCH_DEFAULT=y +# CONFIG_DEFAULT_FQ is not set +# CONFIG_DEFAULT_CODEL is not set +CONFIG_DEFAULT_FQ_CODEL=y +# CONFIG_DEFAULT_SFQ is not set +# CONFIG_DEFAULT_PFIFO_FAST is not set +CONFIG_DEFAULT_NET_SCH="fq_codel" + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +# CONFIG_NET_EMATCH_IPT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +# CONFIG_NET_ACT_MPLS is not set +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +# CONFIG_NET_ACT_CONNMARK is not set +# CONFIG_NET_ACT_CTINFO is not set +CONFIG_NET_ACT_SKBMOD=m +# CONFIG_NET_ACT_IFE is not set +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m +# CONFIG_NET_ACT_GATE is not set +CONFIG_NET_TC_SKB_EXT=y +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=m +# CONFIG_BATMAN_ADV is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VSOCKETS_LOOPBACK=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=y +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=y +# CONFIG_HSR is not set +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_L3_MASTER_DEV=y +# CONFIG_QRTR is not set +# CONFIG_NET_NCSI is not set +CONFIG_PCPU_DEV_REFCNT=y +CONFIG_MAX_SKB_FRAGS=17 +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_SOCK_RX_QUEUE_MAPPING=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +CONFIG_NET_DROP_MONITOR=y +# end of Network testing +# end of Networking options + +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=y +# CONFIG_MCTP is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +CONFIG_CFG80211_CRDA_SUPPORT=y +# CONFIG_CFG80211_WEXT is not set +CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +CONFIG_MAC80211_DEBUGFS=y +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +CONFIG_RFKILL_GPIO=m +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +# CONFIG_NFC is not set +CONFIG_PSAMPLE=m +# CONFIG_NET_IFE is not set +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_SOCK_VALIDATE_XMIT=y +CONFIG_NET_SELFTESTS=y +CONFIG_NET_SOCK_MSG=y +CONFIG_NET_DEVLINK=y +CONFIG_PAGE_POOL=y +# CONFIG_PAGE_POOL_STATS is not set +CONFIG_FAILOVER=m +CONFIG_ETHTOOL_NETLINK=y + +# +# Device Drivers +# +CONFIG_ARM_AMBA=y +CONFIG_HAVE_PCI=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DOMAINS_GENERIC=y +CONFIG_PCI_SYSCALL=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +CONFIG_PCIE_DPC=y +# CONFIG_PCIE_PTM is not set +CONFIG_PCIE_EDR=y +CONFIG_PCI_MSI=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=y +# CONFIG_PCI_PF_STUB is not set +CONFIG_PCI_ATS=y +CONFIG_PCI_ECAM=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +# CONFIG_PCI_P2PDMA is not set +CONFIG_PCI_LABEL=y +# CONFIG_PCI_DYNAMIC_OF_NODES is not set +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_ACPI_IBM=m +# CONFIG_HOTPLUG_PCI_CPCI is not set +# CONFIG_HOTPLUG_PCI_SHPC is not set + +# +# PCI controller drivers +# +# CONFIG_PCIE_ALTERA is not set +CONFIG_PCI_HOST_THUNDER_PEM=y +CONFIG_PCI_HOST_THUNDER_ECAM=y +# CONFIG_PCI_FTPCI100 is not set +CONFIG_PCI_HOST_COMMON=y +CONFIG_PCI_HOST_GENERIC=y +# CONFIG_PCIE_HISI_ERR is not set +# CONFIG_PCIE_MICROCHIP_HOST is not set +CONFIG_PCI_XGENE=y +CONFIG_PCI_XGENE_MSI=y +# CONFIG_PCIE_XILINX is not set + +# +# Cadence-based PCIe controllers +# +# CONFIG_PCIE_CADENCE_PLAT_HOST is not set +# CONFIG_PCI_J721E_HOST is not set +# end of Cadence-based PCIe controllers + +# +# DesignWare-based PCIe controllers +# +CONFIG_PCIE_DW=y +CONFIG_PCIE_DW_HOST=y +# CONFIG_PCIE_AL is not set +# CONFIG_PCI_MESON is not set +CONFIG_PCI_HISI=y +# CONFIG_PCIE_KIRIN is not set +# CONFIG_PCIE_HISI_STB is not set +# CONFIG_PCIE_DW_PLAT_HOST is not set +# CONFIG_PCIE_QCOM is not set +# end of DesignWare-based PCIe controllers + +# +# Mobiveil-based PCIe controllers +# +# end of Mobiveil-based PCIe controllers +# end of PCI controller drivers + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# end of PCI switch controller drivers + +# CONFIG_CXL_BUS is not set +CONFIG_PCCARD=y +# CONFIG_PCMCIA is not set +CONFIG_CARDBUS=y + +# +# PC-card bridges +# +CONFIG_YENTA=m +CONFIG_YENTA_O2=y +CONFIG_YENTA_RICOH=y +CONFIG_YENTA_TI=y +CONFIG_YENTA_ENE_TUNE=y +CONFIG_YENTA_TOSHIBA=y +# CONFIG_RAPIDIO is not set + +# +# Generic Driver Options +# +CONFIG_AUXILIARY_BUS=y +# CONFIG_UEVENT_HELPER is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_DEVTMPFS_SAFE is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_FW_LOADER_DEBUG=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set +# CONFIG_FW_LOADER_COMPRESS is not set +CONFIG_FW_CACHE=y +# CONFIG_FW_UPLOAD is not set +# end of Firmware loader + +CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +CONFIG_HMEM_REPORTING=y +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +CONFIG_SOC_BUS=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=m +CONFIG_REGMAP_SPI=m +CONFIG_REGMAP_MMIO=y +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +CONFIG_GENERIC_ARCH_TOPOLOGY=y +CONFIG_GENERIC_ARCH_NUMA=y +# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set +# end of Generic Driver Options + +# +# Bus devices +# +# CONFIG_BRCMSTB_GISB_ARB is not set +# CONFIG_MOXTET is not set +CONFIG_HISILICON_LPC=y +# CONFIG_QCOM_EBI2 is not set +# CONFIG_QCOM_SSC_BLOCK_BUS is not set +CONFIG_VEXPRESS_CONFIG=y +# CONFIG_MHI_BUS is not set +# CONFIG_MHI_BUS_EP is not set +# end of Bus devices + +# +# Cache Drivers +# +# end of Cache Drivers + +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y + +# +# Firmware Drivers +# + +# +# ARM System Control and Management Interface Protocol +# +# CONFIG_ARM_SCMI_PROTOCOL is not set +# end of ARM System Control and Management Interface Protocol + +CONFIG_ARM_SCPI_PROTOCOL=m +CONFIG_ARM_SCPI_POWER_DOMAIN=m +CONFIG_ARM_SDE_INTERFACE=y +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +# CONFIG_ISCSI_IBFT is not set +CONFIG_FW_CFG_SYSFS=y +# CONFIG_FW_CFG_SYSFS_CMDLINE is not set +CONFIG_QCOM_SCM=y +# CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT is not set +CONFIG_SYSFB=y +# CONFIG_SYSFB_SIMPLEFB is not set +# CONFIG_ARM_FFA_TRANSPORT is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=y +CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y +CONFIG_EFI_SOFT_RESERVE=y +CONFIG_EFI_PARAMS_FROM_FDT=y +CONFIG_EFI_RUNTIME_WRAPPERS=y +CONFIG_EFI_GENERIC_STUB=y +# CONFIG_EFI_ZBOOT is not set +CONFIG_EFI_ARMSTUB_DTB_LOADER=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set +# CONFIG_EFI_CAPSULE_LOADER is not set +# CONFIG_EFI_TEST is not set +# CONFIG_RESET_ATTACK_MITIGATION is not set +# CONFIG_EFI_DISABLE_PCI_DMA is not set +CONFIG_EFI_EARLYCON=y +CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y +# CONFIG_EFI_DISABLE_RUNTIME is not set +# CONFIG_EFI_COCO_SECRET is not set +# end of EFI (Extensible Firmware Interface) Support + +CONFIG_UEFI_CPER=y +CONFIG_UEFI_CPER_ARM=y +CONFIG_ARM_PSCI_FW=y +# CONFIG_ARM_PSCI_CHECKER is not set +CONFIG_HAVE_ARM_SMCCC=y +CONFIG_HAVE_ARM_SMCCC_DISCOVERY=y +CONFIG_ARM_SMCCC_SOC_ID=y + +# +# Tegra firmware driver +# +# end of Tegra firmware driver +# end of Firmware Drivers + +# CONFIG_GNSS is not set +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set + +# +# Partition parsers +# +# CONFIG_MTD_AR7_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +CONFIG_MTD_OF_PARTS=m +# CONFIG_MTD_AFS_PARTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# end of Partition parsers + +# +# User Modules And Translation Layers +# +CONFIG_MTD_BLKDEVS=m +CONFIG_MTD_BLOCK=m +# CONFIG_MTD_BLOCK_RO is not set + +# +# Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK. +# +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=m +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_GEN_PROBE=m +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +CONFIG_MTD_CFI_INTELEXT=m +CONFIG_MTD_CFI_AMDSTD=m +CONFIG_MTD_CFI_STAA=m +CONFIG_MTD_CFI_UTIL=m +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set +# end of RAM/ROM/Flash chip drivers + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +CONFIG_MTD_PHYSMAP=m +# CONFIG_MTD_PHYSMAP_COMPAT is not set +# CONFIG_MTD_PHYSMAP_OF is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set +# end of Mapping drivers for chip access + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_MCHP48L640 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# end of Self-contained MTD device drivers + +# +# NAND +# +# CONFIG_MTD_ONENAND is not set +# CONFIG_MTD_RAW_NAND is not set +# CONFIG_MTD_SPI_NAND is not set + +# +# ECC engine support +# +# CONFIG_MTD_NAND_ECC_SW_HAMMING is not set +# CONFIG_MTD_NAND_ECC_SW_BCH is not set +# CONFIG_MTD_NAND_ECC_MXIC is not set +# end of ECC engine support +# end of NAND + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# end of LPDDR & LPDDR2 PCM memory drivers + +# CONFIG_MTD_SPI_NOR is not set +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_FASTMAP is not set +# CONFIG_MTD_UBI_GLUEBI is not set +# CONFIG_MTD_UBI_BLOCK is not set +# CONFIG_MTD_HYPERBUS is not set +CONFIG_DTC=y +CONFIG_OF=y +# CONFIG_OF_UNITTEST is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_KOBJ=y +CONFIG_OF_DYNAMIC=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_IRQ=y +CONFIG_OF_RESERVED_MEM=y +CONFIG_OF_RESOLVE=y +CONFIG_OF_OVERLAY=y +CONFIG_OF_NUMA=y +# CONFIG_PARPORT is not set +CONFIG_PNP=y +CONFIG_PNP_DEBUG_MESSAGES=y + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_CDROM=m +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +CONFIG_ZRAM=m +CONFIG_ZRAM_DEF_COMP_LZORLE=y +# CONFIG_ZRAM_DEF_COMP_ZSTD is not set +# CONFIG_ZRAM_DEF_COMP_LZ4 is not set +# CONFIG_ZRAM_DEF_COMP_LZO is not set +# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set +CONFIG_ZRAM_DEF_COMP="lzo-rle" +CONFIG_ZRAM_WRITEBACK=y +# CONFIG_ZRAM_MEMORY_TRACKING is not set +# CONFIG_ZRAM_MULTI_COMP is not set +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 +# CONFIG_BLK_DEV_DRBD is not set +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_VIRTIO_BLK=m +CONFIG_BLK_DEV_RBD=m +CONFIG_BLK_DEV_UBLK=m +CONFIG_BLKDEV_UBLK_LEGACY_OPCODES=y + +# +# NVME Support +# +CONFIG_NVME_CORE=m +CONFIG_BLK_DEV_NVME=m +CONFIG_NVME_MULTIPATH=y +# CONFIG_NVME_VERBOSE_ERRORS is not set +# CONFIG_NVME_HWMON is not set +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TCP=m +# CONFIG_NVME_AUTH is not set +CONFIG_NVME_TARGET=m +# CONFIG_NVME_TARGET_PASSTHRU is not set +CONFIG_NVME_TARGET_LOOP=m +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_FCLOOP=m +CONFIG_NVME_TARGET_TCP=m +# CONFIG_NVME_TARGET_AUTH is not set +# end of NVME Support + +# +# Misc devices +# +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +CONFIG_TIFM_CORE=m +# CONFIG_TIFM_7XX1 is not set +# CONFIG_ICS932S401 is not set +CONFIG_ENCLOSURE_SERVICES=m +# CONFIG_HP_ILO is not set +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +# CONFIG_DW_XDATA_PCIE is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_XILINX_SDFEC is not set +# CONFIG_HISI_HIKEY_USB is not set +# CONFIG_OPEN_DICE is not set +# CONFIG_VCPU_STALL_DETECTOR is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_EEPROM_EE1004 is not set +# end of EEPROM support + +CONFIG_CB710_CORE=m +# CONFIG_CB710_DEBUG is not set +CONFIG_CB710_DEBUG_ASSUMPTIONS=y + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +# end of Texas Instruments shared transport line discipline + +# CONFIG_SENSORS_LIS3_I2C is not set + +# +# Altera FPGA firmware download module (requires I2C) +# +# CONFIG_ALTERA_STAPL is not set +# CONFIG_VMWARE_VMCI is not set +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_BCM_VK is not set +# CONFIG_MISC_ALCOR_PCI is not set +# CONFIG_MISC_RTSX_PCI is not set +# CONFIG_MISC_RTSX_USB is not set +# CONFIG_UACCE is not set +CONFIG_PVPANIC=y +# CONFIG_PVPANIC_MMIO is not set +# CONFIG_PVPANIC_PCI is not set +# CONFIG_GP_PCI1XXXX is not set +# end of Misc devices + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=m +CONFIG_SCSI_COMMON=y +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=m +CONFIG_BLK_DEV_BSG=y +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=m +# CONFIG_SCSI_CXGB3_ISCSI is not set +CONFIG_SCSI_CXGB4_ISCSI=m +# CONFIG_SCSI_BNX2_ISCSI is not set +# CONFIG_SCSI_BNX2X_FCOE is not set +CONFIG_BE2ISCSI=m +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +CONFIG_SCSI_HPSA=m +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +# CONFIG_SCSI_AACRAID is not set +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +CONFIG_SCSI_HISI_SAS=m +CONFIG_SCSI_HISI_SAS_PCI=m +# CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE is not set +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +# CONFIG_MEGARAID_NEWGEN is not set +# CONFIG_MEGARAID_LEGACY is not set +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +# CONFIG_SCSI_MPT2SAS is not set +# CONFIG_SCSI_MPI3MR is not set +CONFIG_SCSI_SMARTPQI=m +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_BUSLOGIC is not set +# CONFIG_SCSI_MYRB is not set +# CONFIG_SCSI_MYRS is not set +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +# CONFIG_FCOE is not set +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FDOMAIN_PCI is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +CONFIG_SCSI_IPR=m +CONFIG_SCSI_IPR_TRACE=y +CONFIG_SCSI_IPR_DUMP=y +# CONFIG_SCSI_QLOGIC_1280 is not set +CONFIG_SCSI_QLA_FC=m +# CONFIG_TCM_QLA2XXX is not set +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_QEDI=m +CONFIG_QEDF=m +CONFIG_SCSI_LPFC=m +# CONFIG_SCSI_LPFC_DEBUG_FS is not set +# CONFIG_SCSI_EFCT is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +CONFIG_SCSI_DEBUG=m +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_BFA_FC is not set +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_CHELSIO_FCOE=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +# end of SCSI device support + +CONFIG_ATA=y +CONFIG_SATA_HOST=y +CONFIG_PATA_TIMINGS=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_FORCE=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +CONFIG_SATA_MOBILE_LPM_POLICY=0 +CONFIG_SATA_AHCI_PLATFORM=m +# CONFIG_AHCI_DWC is not set +# CONFIG_AHCI_CEVA is not set +CONFIG_AHCI_XGENE=m +CONFIG_SATA_AHCI_SEATTLE=m +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=y +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_OF_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set +CONFIG_ATA_GENERIC=m +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_BITMAP_FILE=y +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +# CONFIG_MD_MULTIPATH is not set +CONFIG_MD_FAULTY=m +CONFIG_MD_CLUSTER=m +# CONFIG_BCACHE is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_BUFIO=m +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +# CONFIG_DM_UNSTRIPED is not set +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +CONFIG_DM_WRITECACHE=m +# CONFIG_DM_EBS is not set +CONFIG_DM_ERA=m +# CONFIG_DM_CLONE is not set +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +# CONFIG_DM_MULTIPATH_HST is not set +# CONFIG_DM_MULTIPATH_IOA is not set +CONFIG_DM_DELAY=m +# CONFIG_DM_DUST is not set +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set +# CONFIG_DM_VERITY_FEC is not set +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +# CONFIG_DM_ZONED is not set +CONFIG_DM_AUDIT=y +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +# CONFIG_TCM_FC is not set +CONFIG_ISCSI_TARGET=m +CONFIG_ISCSI_TARGET_CXGB4=m +# CONFIG_REMOTE_TARGET is not set +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +# CONFIG_FUSION_FC is not set +CONFIG_FUSION_SAS=m +CONFIG_FUSION_MAX_SGE=128 +# CONFIG_FUSION_CTL is not set +CONFIG_FUSION_LOGGING=y + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +# end of IEEE 1394 (FireWire) support + +CONFIG_NETDEVICES=y +CONFIG_MII=m +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_WIREGUARD=m +# CONFIG_WIREGUARD_DEBUG is not set +# CONFIG_EQUALIZER is not set +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN_L3S=y +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +# CONFIG_BAREUDP is not set +# CONFIG_GTP is not set +# CONFIG_AMT is not set +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +# CONFIG_NETCONSOLE_EXTENDED_LOG is not set +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ARCNET is not set +# CONFIG_ATM_DRIVERS is not set +CONFIG_ETHERNET=y +CONFIG_MDIO=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +CONFIG_ENA_ETHERNET=m +CONFIG_NET_VENDOR_AMD=y +# CONFIG_AMD8111_ETH is not set +# CONFIG_PCNET32 is not set +CONFIG_AMD_XGBE=m +# CONFIG_AMD_XGBE_DCB is not set +# CONFIG_PDS_CORE is not set +CONFIG_NET_XGENE=m +CONFIG_NET_XGENE_V2=m +CONFIG_NET_VENDOR_AQUANTIA=y +# CONFIG_AQTION is not set +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_NET_VENDOR_ASIX=y +# CONFIG_SPI_AX88796C is not set +CONFIG_NET_VENDOR_ATHEROS=y +# CONFIG_ATL2 is not set +CONFIG_ATL1=m +CONFIG_ATL1E=m +CONFIG_ATL1C=m +CONFIG_ALX=m +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +CONFIG_BNX2=m +# CONFIG_CNIC is not set +CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set +CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y +CONFIG_BNXT_DCB=y +CONFIG_BNXT_HWMON=y +# CONFIG_NET_VENDOR_CADENCE is not set +CONFIG_NET_VENDOR_CAVIUM=y +CONFIG_THUNDER_NIC_PF=m +CONFIG_THUNDER_NIC_VF=m +CONFIG_THUNDER_NIC_BGX=m +CONFIG_THUNDER_NIC_RGX=m +CONFIG_CAVIUM_PTP=y +CONFIG_LIQUIDIO_CORE=m +CONFIG_LIQUIDIO=m +CONFIG_LIQUIDIO_VF=m +CONFIG_NET_VENDOR_CHELSIO=y +# CONFIG_CHELSIO_T1 is not set +# CONFIG_CHELSIO_T3 is not set +CONFIG_CHELSIO_T4=m +# CONFIG_CHELSIO_T4_DCB is not set +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_LIB=m +CONFIG_CHELSIO_INLINE_CRYPTO=y +CONFIG_CHELSIO_IPSEC_INLINE=m +# CONFIG_CHELSIO_TLS_DEVICE is not set +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_CORTINA is not set +CONFIG_NET_VENDOR_DAVICOM=y +# CONFIG_DM9051 is not set +CONFIG_DNET=m +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +CONFIG_NET_VENDOR_ENGLEDER=y +# CONFIG_TSNEP is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_NET_VENDOR_FUNGIBLE=y +# CONFIG_FUN_ETH is not set +CONFIG_NET_VENDOR_GOOGLE=y +CONFIG_GVE=m +CONFIG_NET_VENDOR_HISILICON=y +# CONFIG_HIX5HD2_GMAC is not set +# CONFIG_HISI_FEMAC is not set +# CONFIG_HIP04_ETH is not set +CONFIG_HNS_MDIO=m +CONFIG_HNS=m +CONFIG_HNS_DSAF=m +CONFIG_HNS_ENET=m +CONFIG_HNS3=m +CONFIG_HNS3_HCLGE=m +CONFIG_HNS3_DCB=y +CONFIG_HNS3_HCLGEVF=m +CONFIG_HNS3_ENET=m +CONFIG_NET_VENDOR_HUAWEI=y +CONFIG_HINIC=m +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +# CONFIG_E1000 is not set +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCB=y +CONFIG_IXGBE_IPSEC=y +CONFIG_IXGBEVF=m +CONFIG_IXGBEVF_IPSEC=y +CONFIG_I40E=m +# CONFIG_I40E_DCB is not set +CONFIG_IAVF=m +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_ICE_SWITCHDEV=y +CONFIG_FM10K=m +CONFIG_IGC=m +# CONFIG_JME is not set +CONFIG_NET_VENDOR_ADI=y +# CONFIG_ADIN1110 is not set +CONFIG_NET_VENDOR_LITEX=y +# CONFIG_LITEX_LITEETH is not set +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +# CONFIG_MLX4_CORE_GEN2 is not set +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +CONFIG_MLX5_ESWITCH=y +CONFIG_MLX5_BRIDGE=y +CONFIG_MLX5_CLS_ACT=y +CONFIG_MLX5_TC_CT=y +CONFIG_MLX5_TC_SAMPLE=y +CONFIG_MLX5_CORE_EN_DCB=y +CONFIG_MLX5_CORE_IPOIB=y +# CONFIG_MLX5_MACSEC is not set +# CONFIG_MLX5_EN_IPSEC is not set +# CONFIG_MLX5_EN_TLS is not set +CONFIG_MLX5_SW_STEERING=y +# CONFIG_MLX5_SF is not set +CONFIG_MLXSW_CORE=m +CONFIG_MLXSW_CORE_HWMON=y +CONFIG_MLXSW_CORE_THERMAL=y +CONFIG_MLXSW_PCI=m +CONFIG_MLXSW_I2C=m +CONFIG_MLXSW_SPECTRUM=m +CONFIG_MLXSW_SPECTRUM_DCB=y +CONFIG_MLXSW_MINIMAL=m +CONFIG_MLXFW=m +# CONFIG_MLXBF_GIGE is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +CONFIG_NET_VENDOR_MICROSOFT=y +CONFIG_NET_VENDOR_MYRI=y +# CONFIG_MYRI10GE is not set +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETERION is not set +CONFIG_NET_VENDOR_NETRONOME=y +CONFIG_NFP=m +CONFIG_NFP_APP_FLOWER=y +CONFIG_NFP_APP_ABM_NIC=y +CONFIG_NFP_NET_IPSEC=y +# CONFIG_NFP_DEBUG is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +CONFIG_NET_VENDOR_OKI=y +CONFIG_ETHOC=m +# CONFIG_NET_VENDOR_PACKET_ENGINES is not set +CONFIG_NET_VENDOR_PENSANDO=y +# CONFIG_IONIC is not set +CONFIG_NET_VENDOR_QLOGIC=y +CONFIG_QLA3XXX=m +# CONFIG_QLCNIC is not set +CONFIG_NETXEN_NIC=m +CONFIG_QED=m +CONFIG_QED_LL2=y +CONFIG_QED_SRIOV=y +CONFIG_QEDE=m +CONFIG_QED_RDMA=y +CONFIG_QED_ISCSI=y +CONFIG_QED_FCOE=y +CONFIG_QED_OOO=y +# CONFIG_NET_VENDOR_BROCADE is not set +CONFIG_NET_VENDOR_QUALCOMM=y +# CONFIG_QCA7000_SPI is not set +CONFIG_QCOM_EMAC=m +# CONFIG_RMNET is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set +CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set +CONFIG_R8169=m +# CONFIG_NET_VENDOR_RENESAS is not set +CONFIG_NET_VENDOR_ROCKER=y +CONFIG_ROCKER=m +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +CONFIG_NET_VENDOR_SOLARFLARE=y +# CONFIG_SFC is not set +# CONFIG_SFC_FALCON is not set +# CONFIG_SFC_SIENA is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +CONFIG_NET_VENDOR_VERTEXCOM=y +# CONFIG_MSE102X is not set +# CONFIG_NET_VENDOR_VIA is not set +CONFIG_NET_VENDOR_WANGXUN=y +CONFIG_LIBWX=m +CONFIG_NGBE=m +CONFIG_TXGBE=m +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_NET_VENDOR_XILINX=y +# CONFIG_XILINX_EMACLITE is not set +# CONFIG_XILINX_AXI_EMAC is not set +# CONFIG_XILINX_LL_TEMAC is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_PHYLINK=m +CONFIG_PHYLIB=y +CONFIG_SWPHY=y +CONFIG_LED_TRIGGER_PHY=y +CONFIG_PHYLIB_LEDS=y +CONFIG_FIXED_PHY=y +CONFIG_SFP=m + +# +# MII PHY device drivers +# +CONFIG_AMD_PHY=m +# CONFIG_ADIN_PHY is not set +# CONFIG_ADIN1100_PHY is not set +CONFIG_AQUANTIA_PHY=m +CONFIG_AX88796B_PHY=m +CONFIG_BROADCOM_PHY=m +# CONFIG_BCM54140_PHY is not set +CONFIG_BCM7XXX_PHY=m +# CONFIG_BCM84881_PHY is not set +CONFIG_BCM87XX_PHY=m +CONFIG_BCM_NET_PHYLIB=m +CONFIG_BCM_NET_PHYPTP=m +CONFIG_CICADA_PHY=m +CONFIG_CORTINA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_LXT_PHY=m +CONFIG_INTEL_XWAY_PHY=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_MARVELL_10G_PHY=m +# CONFIG_MARVELL_88Q2XXX_PHY is not set +# CONFIG_MARVELL_88X2222_PHY is not set +# CONFIG_MAXLINEAR_GPHY is not set +# CONFIG_MEDIATEK_GE_PHY is not set +CONFIG_MICREL_PHY=m +# CONFIG_MICROCHIP_T1S_PHY is not set +CONFIG_MICROCHIP_PHY=m +CONFIG_MICROCHIP_T1_PHY=m +CONFIG_MICROSEMI_PHY=m +# CONFIG_MOTORCOMM_PHY is not set +CONFIG_NATIONAL_PHY=m +# CONFIG_NXP_CBTX_PHY is not set +# CONFIG_NXP_C45_TJA11XX_PHY is not set +# CONFIG_NXP_TJA11XX_PHY is not set +# CONFIG_NCN26000_PHY is not set +CONFIG_AT803X_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m +CONFIG_RENESAS_PHY=m +CONFIG_ROCKCHIP_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_DP83822_PHY=m +CONFIG_DP83TC811_PHY=m +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +# CONFIG_DP83869_PHY is not set +# CONFIG_DP83TD510_PHY is not set +CONFIG_VITESSE_PHY=m +CONFIG_XILINX_GMII2RGMII=m +CONFIG_MICREL_KS8995MA=m +# CONFIG_PSE_CONTROLLER is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +CONFIG_FWNODE_MDIO=y +CONFIG_OF_MDIO=y +CONFIG_ACPI_MDIO=y +CONFIG_MDIO_DEVRES=y +CONFIG_MDIO_XGENE=m +CONFIG_MDIO_BITBANG=m +CONFIG_MDIO_BCM_UNIMAC=m +CONFIG_MDIO_CAVIUM=m +CONFIG_MDIO_GPIO=m +CONFIG_MDIO_HISI_FEMAC=m +CONFIG_MDIO_I2C=m +# CONFIG_MDIO_MVUSB is not set +CONFIG_MDIO_MSCC_MIIM=m +CONFIG_MDIO_OCTEON=m +# CONFIG_MDIO_IPQ4019 is not set +# CONFIG_MDIO_IPQ8064 is not set +CONFIG_MDIO_THUNDER=m + +# +# MDIO Multiplexers +# +# CONFIG_MDIO_BUS_MUX_GPIO is not set +# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set +# CONFIG_MDIO_BUS_MUX_MMIOREG is not set + +# +# PCS device drivers +# +CONFIG_PCS_XPCS=m +# end of PCS device drivers + +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +# CONFIG_PPPOE_HASH_BITS_1 is not set +# CONFIG_PPPOE_HASH_BITS_2 is not set +CONFIG_PPPOE_HASH_BITS_4=y +# CONFIG_PPPOE_HASH_BITS_8 is not set +CONFIG_PPPOE_HASH_BITS=4 +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLHC=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +# CONFIG_SLIP_MODE_SLIP6 is not set +CONFIG_USB_NET_DRIVERS=y +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=m +CONFIG_USB_NET_AX8817X=m +CONFIG_USB_NET_AX88179_178A=m +CONFIG_USB_NET_CDCETHER=m +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +CONFIG_USB_NET_SR9700=m +# CONFIG_USB_NET_SR9800 is not set +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +CONFIG_USB_NET_NET1080=m +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m +CONFIG_USB_NET_CDC_SUBSET=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y +CONFIG_USB_BELKIN=y +CONFIG_USB_ARMLINUX=y +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y +CONFIG_USB_NET_ZAURUS=m +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +# CONFIG_USB_NET_AQC111 is not set +CONFIG_USB_RTL8153_ECM=m +# CONFIG_WLAN is not set +CONFIG_WAN=y +CONFIG_HDLC=m +CONFIG_HDLC_RAW=m +# CONFIG_HDLC_RAW_ETH is not set +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m + +# +# X.25/LAPB support is disabled +# +# CONFIG_PCI200SYN is not set +# CONFIG_WANXL is not set +# CONFIG_PC300TOO is not set +# CONFIG_FARSYNC is not set +CONFIG_IEEE802154_DRIVERS=m +# CONFIG_IEEE802154_FAKELB is not set +# CONFIG_IEEE802154_AT86RF230 is not set +# CONFIG_IEEE802154_MRF24J40 is not set +# CONFIG_IEEE802154_CC2520 is not set +# CONFIG_IEEE802154_ATUSB is not set +# CONFIG_IEEE802154_ADF7242 is not set +# CONFIG_IEEE802154_CA8210 is not set +# CONFIG_IEEE802154_MCR20A is not set +# CONFIG_IEEE802154_HWSIM is not set + +# +# Wireless WAN +# +# CONFIG_WWAN is not set +# end of Wireless WAN + +# CONFIG_VMXNET3 is not set +# CONFIG_FUJITSU_ES is not set +CONFIG_NETDEVSIM=m +CONFIG_NET_FAILOVER=m +# CONFIG_ISDN is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y +CONFIG_INPUT_FF_MEMLESS=m +CONFIG_INPUT_SPARSEKMAP=m +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_KEYBOARD_QT1050 is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +CONFIG_KEYBOARD_GPIO=m +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_PINEPHONE is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_OMAP4 is not set +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_CAP11XX is not set +# CONFIG_KEYBOARD_BCM is not set +# CONFIG_KEYBOARD_CYPRESS_SF is not set +CONFIG_INPUT_MOUSE=y +# CONFIG_MOUSE_PS2 is not set +# CONFIG_MOUSE_SERIAL is not set +# CONFIG_MOUSE_APPLETOUCH is not set +# CONFIG_MOUSE_BCM5974 is not set +# CONFIG_MOUSE_CYAPA is not set +CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_I2C=y +CONFIG_MOUSE_ELAN_I2C_SMBUS=y +# CONFIG_MOUSE_VSXXXAA is not set +# CONFIG_MOUSE_GPIO is not set +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set +CONFIG_RMI4_CORE=m +CONFIG_RMI4_I2C=m +CONFIG_RMI4_SPI=m +CONFIG_RMI4_SMB=m +CONFIG_RMI4_F03=y +CONFIG_RMI4_F03_SERIO=m +CONFIG_RMI4_2D_SENSOR=y +CONFIG_RMI4_F11=y +CONFIG_RMI4_F12=y +CONFIG_RMI4_F30=y +CONFIG_RMI4_F34=y +# CONFIG_RMI4_F3A is not set +CONFIG_RMI4_F55=y + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_SERIO_SERPORT=y +CONFIG_SERIO_AMBAKMI=y +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +# CONFIG_SERIO_PS2MULT is not set +CONFIG_SERIO_ARC_PS2=m +# CONFIG_SERIO_APBPS2 is not set +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_LEGACY_TIOCSTI=y +CONFIG_LDISC_AUTOLOAD=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +CONFIG_SERIAL_8250_16550A_VARIANTS=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCILIB=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +# CONFIG_SERIAL_8250_PCI1XXXX is not set +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DWLIB=y +CONFIG_SERIAL_8250_FSL=y +CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_8250_RT288X=y +CONFIG_SERIAL_8250_PERICOM=y +CONFIG_SERIAL_OF_PLATFORM=y + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_AMBA_PL010 is not set +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +# CONFIG_SERIAL_EARLYCON_SEMIHOST is not set +# CONFIG_SERIAL_KGDB_NMI is not set +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_CONSOLE_POLL=y +# CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_MSM is not set +# CONFIG_SERIAL_SIFIVE is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_XILINX_PS_UART is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set +# CONFIG_SERIAL_SPRD is not set +# end of Serial drivers + +CONFIG_SERIAL_MCTRL_GPIO=y +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +# CONFIG_NOZOMI is not set +# CONFIG_NULL_TTY is not set +CONFIG_HVC_DRIVER=y +# CONFIG_HVC_DCC is not set +# CONFIG_SERIAL_DEV_BUS is not set +CONFIG_VIRTIO_CONSOLE=m +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +CONFIG_IPMI_PLAT_DATA=y +CONFIG_IPMI_PANIC_EVENT=y +CONFIG_IPMI_PANIC_STRING=y +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +# CONFIG_IPMI_IPMB is not set +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +# CONFIG_SSIF_IPMI_BMC is not set +# CONFIG_IPMB_DEVICE_INTERFACE is not set +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +# CONFIG_HW_RANDOM_BA431 is not set +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_HW_RANDOM_HISI=y +CONFIG_HW_RANDOM_HISTB=y +CONFIG_HW_RANDOM_XGENE=m +CONFIG_HW_RANDOM_CAVIUM=m +# CONFIG_HW_RANDOM_CCTRNG is not set +# CONFIG_HW_RANDOM_XIPHERA is not set +CONFIG_HW_RANDOM_ARM_SMCCC_TRNG=y +CONFIG_HW_RANDOM_CN10K=y +# CONFIG_APPLICOM is not set +CONFIG_DEVMEM=y +# CONFIG_DEVPORT is not set +CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=y +CONFIG_TCG_TIS=y +CONFIG_TCG_TIS_SPI=m +# CONFIG_TCG_TIS_SPI_CR50 is not set +# CONFIG_TCG_TIS_I2C is not set +# CONFIG_TCG_TIS_I2C_CR50 is not set +# CONFIG_TCG_TIS_I2C_ATMEL is not set +# CONFIG_TCG_TIS_I2C_INFINEON is not set +# CONFIG_TCG_TIS_I2C_NUVOTON is not set +CONFIG_TCG_ATMEL=m +# CONFIG_TCG_INFINEON is not set +CONFIG_TCG_CRB=y +# CONFIG_TCG_VTPM_PROXY is not set +# CONFIG_TCG_TIS_ST33ZP24_I2C is not set +# CONFIG_TCG_TIS_ST33ZP24_SPI is not set +# CONFIG_XILLYBUS is not set +# CONFIG_XILLYUSB is not set +# end of Character devices + +# +# I2C support +# +CONFIG_I2C=m +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_MUX=m + +# +# Multiplexer I2C Chip support +# +CONFIG_I2C_ARB_GPIO_CHALLENGE=m +CONFIG_I2C_MUX_GPIO=m +# CONFIG_I2C_MUX_GPMUX is not set +# CONFIG_I2C_MUX_LTC4306 is not set +CONFIG_I2C_MUX_PCA9541=m +CONFIG_I2C_MUX_PCA954x=m +CONFIG_I2C_MUX_PINCTRL=m +# CONFIG_I2C_MUX_REG is not set +# CONFIG_I2C_DEMUX_PINCTRL is not set +CONFIG_I2C_MUX_MLXCPLD=m +# end of Multiplexer I2C Chip support + +# CONFIG_I2C_HELPER_AUTO is not set +CONFIG_I2C_SMBUS=m + +# +# I2C Algorithms +# +CONFIG_I2C_ALGOBIT=m +CONFIG_I2C_ALGOPCF=m +CONFIG_I2C_ALGOPCA=m +# end of I2C Algorithms + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_AMD_MP2 is not set +# CONFIG_I2C_HIX5HD2 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_PIIX4 is not set +CONFIG_I2C_NFORCE2=m +# CONFIG_I2C_NVIDIA_GPU is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set + +# +# ACPI drivers +# +# CONFIG_I2C_SCMI is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CADENCE is not set +# CONFIG_I2C_CBUS_GPIO is not set +CONFIG_I2C_DESIGNWARE_CORE=m +# CONFIG_I2C_DESIGNWARE_SLAVE is not set +CONFIG_I2C_DESIGNWARE_PLATFORM=m +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set +CONFIG_I2C_GPIO=m +# CONFIG_I2C_GPIO_FAULT_INJECTOR is not set +# CONFIG_I2C_HISI is not set +# CONFIG_I2C_NOMADIK is not set +# CONFIG_I2C_OCORES is not set +CONFIG_I2C_PCA_PLATFORM=m +# CONFIG_I2C_QCOM_CCI is not set +CONFIG_I2C_QUP=m +# CONFIG_I2C_RK3X is not set +CONFIG_I2C_SIMTEC=m +CONFIG_I2C_VERSATILE=m +CONFIG_I2C_THUNDERX=m +# CONFIG_I2C_XILINX is not set +CONFIG_I2C_XLP9XX=m + +# +# External I2C/SMBus adapter drivers +# +CONFIG_I2C_DIOLAN_U2C=m +# CONFIG_I2C_CP2615 is not set +# CONFIG_I2C_PCI1XXXX is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +CONFIG_I2C_TINY_USB=m + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_MLXCPLD is not set +CONFIG_I2C_XGENE_SLIMPRO=m +# CONFIG_I2C_VIRTIO is not set +# end of I2C Hardware Bus support + +CONFIG_I2C_STUB=m +CONFIG_I2C_SLAVE=y +CONFIG_I2C_SLAVE_EEPROM=m +# CONFIG_I2C_SLAVE_TESTUNIT is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + +# CONFIG_I3C is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +# CONFIG_SPI_MEM is not set + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +CONFIG_SPI_CADENCE=m +# CONFIG_SPI_CADENCE_QUADSPI is not set +CONFIG_SPI_DESIGNWARE=m +# CONFIG_SPI_DW_DMA is not set +# CONFIG_SPI_DW_PCI is not set +CONFIG_SPI_DW_MMIO=m +# CONFIG_SPI_HISI_KUNPENG is not set +# CONFIG_SPI_HISI_SFC_V3XX is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_FSL_SPI is not set +# CONFIG_SPI_MICROCHIP_CORE is not set +# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PCI1XXXX is not set +CONFIG_SPI_PL022=m +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_QCOM_QSPI is not set +CONFIG_SPI_QUP=y +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_SIFIVE is not set +# CONFIG_SPI_MXIC is not set +# CONFIG_SPI_THUNDERX is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +CONFIG_SPI_XLP=m +# CONFIG_SPI_ZYNQMP_GQSPI is not set +# CONFIG_SPI_AMD is not set + +# +# SPI Multiplexer support +# +# CONFIG_SPI_MUX is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +CONFIG_SPI_DYNAMIC=y +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_GPIO=m + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +CONFIG_PTP_1588_CLOCK_OPTIONAL=y +CONFIG_DP83640_PHY=m +# CONFIG_PTP_1588_CLOCK_INES is not set +CONFIG_PTP_1588_CLOCK_KVM=y +# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set +# CONFIG_PTP_1588_CLOCK_IDTCM is not set +# CONFIG_PTP_1588_CLOCK_MOCK is not set +# CONFIG_PTP_1588_CLOCK_OCP is not set +# end of PTP clock support + +CONFIG_PINCTRL=y +CONFIG_PINMUX=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_CY8C95X0 is not set +# CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_MICROCHIP_SGPIO is not set +# CONFIG_PINCTRL_OCELOT is not set +# CONFIG_PINCTRL_SINGLE is not set +# CONFIG_PINCTRL_STMFX is not set +CONFIG_PINCTRL_MSM=y +# CONFIG_PINCTRL_IPQ5018 is not set +# CONFIG_PINCTRL_IPQ5332 is not set +# CONFIG_PINCTRL_IPQ8074 is not set +# CONFIG_PINCTRL_IPQ6018 is not set +# CONFIG_PINCTRL_IPQ9574 is not set +# CONFIG_PINCTRL_MDM9607 is not set +# CONFIG_PINCTRL_MSM8916 is not set +# CONFIG_PINCTRL_MSM8953 is not set +# CONFIG_PINCTRL_MSM8976 is not set +# CONFIG_PINCTRL_MSM8994 is not set +# CONFIG_PINCTRL_MSM8996 is not set +# CONFIG_PINCTRL_MSM8998 is not set +# CONFIG_PINCTRL_QCM2290 is not set +# CONFIG_PINCTRL_QCS404 is not set +CONFIG_PINCTRL_QDF2XXX=y +# CONFIG_PINCTRL_QDU1000 is not set +# CONFIG_PINCTRL_SA8775P is not set +# CONFIG_PINCTRL_SC7180 is not set +# CONFIG_PINCTRL_SC7280 is not set +# CONFIG_PINCTRL_SC8180X is not set +# CONFIG_PINCTRL_SC8280XP is not set +# CONFIG_PINCTRL_SDM660 is not set +# CONFIG_PINCTRL_SDM670 is not set +# CONFIG_PINCTRL_SDM845 is not set +# CONFIG_PINCTRL_SDX75 is not set +# CONFIG_PINCTRL_SM6115 is not set +# CONFIG_PINCTRL_SM6125 is not set +# CONFIG_PINCTRL_SM6350 is not set +# CONFIG_PINCTRL_SM6375 is not set +# CONFIG_PINCTRL_SM7150 is not set +# CONFIG_PINCTRL_SM8150 is not set +# CONFIG_PINCTRL_SM8250 is not set +# CONFIG_PINCTRL_SM8350 is not set +# CONFIG_PINCTRL_SM8450 is not set +# CONFIG_PINCTRL_SM8550 is not set +# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set +# CONFIG_PINCTRL_LPASS_LPI is not set + +# +# Renesas pinctrl drivers +# +# end of Renesas pinctrl drivers + +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_OF_GPIO=y +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_CDEV=y +CONFIG_GPIO_CDEV_V1=y +CONFIG_GPIO_GENERIC=m + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_74XX_MMIO is not set +# CONFIG_GPIO_ALTERA is not set +CONFIG_GPIO_AMDPT=m +# CONFIG_GPIO_CADENCE is not set +CONFIG_GPIO_DWAPB=m +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_FTGPIO010 is not set +CONFIG_GPIO_GENERIC_PLATFORM=m +# CONFIG_GPIO_GRGPIO is not set +# CONFIG_GPIO_HISI is not set +# CONFIG_GPIO_HLWD is not set +# CONFIG_GPIO_LOGICVC is not set +# CONFIG_GPIO_MB86S7X is not set +CONFIG_GPIO_PL061=y +# CONFIG_GPIO_SIFIVE is not set +# CONFIG_GPIO_SYSCON is not set +# CONFIG_GPIO_THUNDERX is not set +CONFIG_GPIO_XGENE=y +CONFIG_GPIO_XGENE_SB=m +# CONFIG_GPIO_XILINX is not set +CONFIG_GPIO_XLP=m +# CONFIG_GPIO_AMD_FCH is not set +# end of Memory mapped GPIO drivers + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADNP is not set +# CONFIG_GPIO_FXL6408 is not set +# CONFIG_GPIO_DS4520 is not set +# CONFIG_GPIO_GW_PLD is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCA9570 is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set +# end of I2C GPIO expanders + +# +# MFD GPIO expanders +# +# end of MFD GPIO expanders + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set +# end of PCI GPIO expanders + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_74X164 is not set +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set +# end of SPI GPIO expanders + +# +# USB GPIO expanders +# +# end of USB GPIO expanders + +# +# Virtual GPIO drivers +# +# CONFIG_GPIO_AGGREGATOR is not set +# CONFIG_GPIO_LATCH is not set +# CONFIG_GPIO_MOCKUP is not set +# CONFIG_GPIO_VIRTIO is not set +# CONFIG_GPIO_SIM is not set +# end of Virtual GPIO drivers + +# CONFIG_W1 is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_BRCMSTB is not set +CONFIG_POWER_RESET_GPIO=y +CONFIG_POWER_RESET_GPIO_RESTART=y +CONFIG_POWER_RESET_HISI=y +# CONFIG_POWER_RESET_MSM is not set +# CONFIG_POWER_RESET_LTC2952 is not set +# CONFIG_POWER_RESET_REGULATOR is not set +CONFIG_POWER_RESET_RESTART=y +# CONFIG_POWER_RESET_VEXPRESS is not set +# CONFIG_POWER_RESET_XGENE is not set +CONFIG_POWER_RESET_SYSCON=y +# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set +# CONFIG_SYSCON_REBOOT_MODE is not set +# CONFIG_NVMEM_REBOOT_MODE is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_POWER_SUPPLY_HWMON=y +# CONFIG_IP5XXX_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_CW2015 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SAMSUNG_SDI is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_MANAGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_MANAGER is not set +# CONFIG_CHARGER_LT3651 is not set +# CONFIG_CHARGER_LTC4162L is not set +# CONFIG_CHARGER_DETECTOR_MAX14656 is not set +# CONFIG_CHARGER_MAX77976 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24190 is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ2515X is not set +# CONFIG_CHARGER_BQ25890 is not set +# CONFIG_CHARGER_BQ25980 is not set +# CONFIG_CHARGER_BQ256XX is not set +CONFIG_CHARGER_SMB347=m +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_BATTERY_GOLDFISH is not set +# CONFIG_BATTERY_RT5033 is not set +# CONFIG_CHARGER_RT9455 is not set +# CONFIG_CHARGER_RT9467 is not set +# CONFIG_CHARGER_RT9471 is not set +# CONFIG_CHARGER_UCS1002 is not set +# CONFIG_CHARGER_BD99954 is not set +# CONFIG_BATTERY_UG3105 is not set +CONFIG_HWMON=y +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +CONFIG_SENSORS_AD7314=m +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM1177 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7310 is not set +# CONFIG_SENSORS_ADT7410 is not set +# CONFIG_SENSORS_ADT7411 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +# CONFIG_SENSORS_ADT7475 is not set +# CONFIG_SENSORS_AHT10 is not set +# CONFIG_SENSORS_AQUACOMPUTER_D5NEXT is not set +# CONFIG_SENSORS_AS370 is not set +# CONFIG_SENSORS_ASC7621 is not set +# CONFIG_SENSORS_AXI_FAN_CONTROL is not set +CONFIG_SENSORS_ARM_SCPI=m +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_CORSAIR_CPRO is not set +# CONFIG_SENSORS_CORSAIR_PSU is not set +# CONFIG_SENSORS_DRIVETEMP is not set +# CONFIG_SENSORS_DS620 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_I5K_AMB is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_FTSTEUTATES is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_G760A is not set +CONFIG_SENSORS_G762=m +# CONFIG_SENSORS_GPIO_FAN is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_HS3001 is not set +# CONFIG_SENSORS_IBMAEM is not set +# CONFIG_SENSORS_IBMPEX is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_JC42 is not set +CONFIG_SENSORS_POWR1220=m +# CONFIG_SENSORS_LINEAGE is not set +CONFIG_SENSORS_LTC2945=m +# CONFIG_SENSORS_LTC2947_I2C is not set +# CONFIG_SENSORS_LTC2947_SPI is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC2992 is not set +# CONFIG_SENSORS_LTC4151 is not set +# CONFIG_SENSORS_LTC4215 is not set +CONFIG_SENSORS_LTC4222=m +# CONFIG_SENSORS_LTC4245 is not set +CONFIG_SENSORS_LTC4260=m +# CONFIG_SENSORS_LTC4261 is not set +CONFIG_SENSORS_MAX1111=m +# CONFIG_SENSORS_MAX127 is not set +# CONFIG_SENSORS_MAX16065 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX1668 is not set +# CONFIG_SENSORS_MAX197 is not set +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX31730 is not set +# CONFIG_SENSORS_MAX31760 is not set +# CONFIG_MAX31827 is not set +# CONFIG_SENSORS_MAX6620 is not set +# CONFIG_SENSORS_MAX6621 is not set +# CONFIG_SENSORS_MAX6639 is not set +# CONFIG_SENSORS_MAX6642 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_MAX6697 is not set +CONFIG_SENSORS_MAX31790=m +# CONFIG_SENSORS_MC34VR500 is not set +# CONFIG_SENSORS_MCP3021 is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_TPS23861 is not set +# CONFIG_SENSORS_MR75203 is not set +CONFIG_SENSORS_ADCXX=m +# CONFIG_SENSORS_LM63 is not set +CONFIG_SENSORS_LM70=m +# CONFIG_SENSORS_LM73 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +# CONFIG_SENSORS_LM95234 is not set +# CONFIG_SENSORS_LM95241 is not set +# CONFIG_SENSORS_LM95245 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +CONFIG_SENSORS_NCT6683=m +# CONFIG_SENSORS_NCT6775 is not set +# CONFIG_SENSORS_NCT6775_I2C is not set +CONFIG_SENSORS_NCT7802=m +CONFIG_SENSORS_NCT7904=m +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_NZXT_KRAKEN2 is not set +# CONFIG_SENSORS_NZXT_SMART2 is not set +# CONFIG_SENSORS_OCC_P8_I2C is not set +# CONFIG_SENSORS_PCF8591 is not set +CONFIG_PMBUS=m +# CONFIG_SENSORS_PMBUS is not set +# CONFIG_SENSORS_ACBEL_FSG032 is not set +# CONFIG_SENSORS_ADM1266 is not set +# CONFIG_SENSORS_ADM1275 is not set +# CONFIG_SENSORS_BEL_PFE is not set +# CONFIG_SENSORS_BPA_RS600 is not set +# CONFIG_SENSORS_DELTA_AHE50DC_FAN is not set +# CONFIG_SENSORS_FSP_3Y is not set +# CONFIG_SENSORS_IBM_CFFPS is not set +# CONFIG_SENSORS_DPS920AB is not set +# CONFIG_SENSORS_INSPUR_IPSPS is not set +# CONFIG_SENSORS_IR35221 is not set +# CONFIG_SENSORS_IR36021 is not set +# CONFIG_SENSORS_IR38064 is not set +# CONFIG_SENSORS_IRPS5401 is not set +# CONFIG_SENSORS_ISL68137 is not set +# CONFIG_SENSORS_LM25066 is not set +# CONFIG_SENSORS_LT7182S is not set +# CONFIG_SENSORS_LTC2978 is not set +CONFIG_SENSORS_LTC3815=m +# CONFIG_SENSORS_MAX15301 is not set +# CONFIG_SENSORS_MAX16064 is not set +# CONFIG_SENSORS_MAX16601 is not set +# CONFIG_SENSORS_MAX20730 is not set +CONFIG_SENSORS_MAX20751=m +# CONFIG_SENSORS_MAX31785 is not set +# CONFIG_SENSORS_MAX34440 is not set +# CONFIG_SENSORS_MAX8688 is not set +# CONFIG_SENSORS_MP2888 is not set +# CONFIG_SENSORS_MP2975 is not set +# CONFIG_SENSORS_MP5023 is not set +# CONFIG_SENSORS_MPQ7932 is not set +# CONFIG_SENSORS_PIM4328 is not set +# CONFIG_SENSORS_PLI1209BC is not set +# CONFIG_SENSORS_PM6764TR is not set +# CONFIG_SENSORS_PXE1610 is not set +# CONFIG_SENSORS_Q54SJ108A2 is not set +# CONFIG_SENSORS_STPDDC60 is not set +# CONFIG_SENSORS_TDA38640 is not set +CONFIG_SENSORS_TPS40422=m +# CONFIG_SENSORS_TPS53679 is not set +# CONFIG_SENSORS_TPS546D24 is not set +# CONFIG_SENSORS_UCD9000 is not set +# CONFIG_SENSORS_UCD9200 is not set +# CONFIG_SENSORS_XDPE152 is not set +# CONFIG_SENSORS_XDPE122 is not set +# CONFIG_SENSORS_ZL6100 is not set +CONFIG_SENSORS_PWM_FAN=m +# CONFIG_SENSORS_SBTSI is not set +# CONFIG_SENSORS_SBRMI is not set +# CONFIG_SENSORS_SHT15 is not set +# CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHT4x is not set +CONFIG_SENSORS_SHTC1=m +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_EMC1403 is not set +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC2305 is not set +# CONFIG_SENSORS_EMC6W201 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_SCH5627 is not set +# CONFIG_SENSORS_SCH5636 is not set +# CONFIG_SENSORS_STTS751 is not set +CONFIG_SENSORS_ADC128D818=m +# CONFIG_SENSORS_ADS7828 is not set +CONFIG_SENSORS_ADS7871=m +# CONFIG_SENSORS_AMC6821 is not set +# CONFIG_SENSORS_INA209 is not set +# CONFIG_SENSORS_INA2XX is not set +# CONFIG_SENSORS_INA238 is not set +# CONFIG_SENSORS_INA3221 is not set +CONFIG_SENSORS_TC74=m +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP102 is not set +CONFIG_SENSORS_TMP103=m +# CONFIG_SENSORS_TMP108 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set +# CONFIG_SENSORS_TMP464 is not set +# CONFIG_SENSORS_TMP513 is not set +CONFIG_SENSORS_VEXPRESS=m +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_VT8231 is not set +# CONFIG_SENSORS_W83773G is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83795 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set +CONFIG_SENSORS_XGENE=m + +# +# ACPI drivers +# +CONFIG_SENSORS_ACPI_POWER=y +CONFIG_THERMAL=y +# CONFIG_THERMAL_NETLINK is not set +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_OF=y +# CONFIG_THERMAL_WRITABLE_TRIPS is not set +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +# CONFIG_THERMAL_GOV_BANG_BANG is not set +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_CPU_THERMAL=y +CONFIG_CPU_FREQ_THERMAL=y +# CONFIG_THERMAL_EMULATION is not set +# CONFIG_THERMAL_MMIO is not set +CONFIG_HISI_THERMAL=m + +# +# Qualcomm thermal drivers +# +# CONFIG_QCOM_LMH is not set +# end of Qualcomm thermal drivers + +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +CONFIG_WATCHDOG_OPEN_TIMEOUT=0 +CONFIG_WATCHDOG_SYSFS=y +# CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT is not set + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set + +# +# Watchdog Device Drivers +# +CONFIG_SOFT_WATCHDOG=m +CONFIG_GPIO_WATCHDOG=m +# CONFIG_WDAT_WDT is not set +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_XILINX_WINDOW_WATCHDOG is not set +# CONFIG_ZIIRAVE_WATCHDOG is not set +CONFIG_ARM_SP805_WATCHDOG=m +CONFIG_ARM_SBSA_WATCHDOG=m +# CONFIG_CADENCE_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set +# CONFIG_QCOM_WDT is not set +# CONFIG_ARM_SMC_WATCHDOG is not set +CONFIG_ALIM7101_WDT=m +CONFIG_I6300ESB_WDT=m +# CONFIG_HP_WATCHDOG is not set +CONFIG_MARVELL_GTI_WDT=y +# CONFIG_MEN_A21_WDT is not set + +# +# PCI-based Watchdog Cards +# +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m + +# +# USB-based Watchdog Cards +# +CONFIG_USBPCWATCHDOG=m +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +CONFIG_BCMA=m +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +CONFIG_BCMA_DRIVER_PCI=y +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +# CONFIG_BCMA_DEBUG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=m +# CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_SMPRO is not set +# CONFIG_MFD_ATMEL_FLEXCOM is not set +# CONFIG_MFD_ATMEL_HLCDC is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_CS42L43_I2C is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_MFD_MAX5970 is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_GATEWORKS_GSC is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_MP2629 is not set +# CONFIG_MFD_HI6421_PMIC is not set +# CONFIG_MFD_HI655X_PMIC is not set +# CONFIG_LPC_ICH is not set +# CONFIG_LPC_SCH is not set +# CONFIG_MFD_IQS62X is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77650 is not set +# CONFIG_MFD_MAX77686 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77714 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MT6360 is not set +# CONFIG_MFD_MT6370 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_MFD_OCELOT is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_CPCAP is not set +# CONFIG_MFD_VIPERBOARD is not set +# CONFIG_MFD_NTXEC is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_QCOM_RPM is not set +# CONFIG_MFD_SY7636A is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT4831 is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RT5120 is not set +# CONFIG_MFD_RK8XX_I2C is not set +# CONFIG_MFD_RK8XX_SPI is not set +# CONFIG_MFD_RN5T618 is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_STMPE is not set +CONFIG_MFD_SYSCON=y +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TI_LP87565 is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS65219 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS6594_I2C is not set +# CONFIG_MFD_TPS6594_SPI is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TQMX86 is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_STMFX is not set +# CONFIG_MFD_ATC260X_I2C is not set +# CONFIG_MFD_QCOM_PM8008 is not set +# CONFIG_MFD_VEXPRESS_SYSREG is not set +# CONFIG_MFD_INTEL_M10_BMC_SPI is not set +# CONFIG_MFD_RSMU_I2C is not set +# CONFIG_MFD_RSMU_SPI is not set +# end of Multifunction device drivers + +CONFIG_REGULATOR=y +# CONFIG_REGULATOR_DEBUG is not set +# CONFIG_REGULATOR_FIXED_VOLTAGE is not set +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set +# CONFIG_REGULATOR_88PG86X is not set +# CONFIG_REGULATOR_ACT8865 is not set +# CONFIG_REGULATOR_AD5398 is not set +# CONFIG_REGULATOR_AW37503 is not set +# CONFIG_REGULATOR_DA9121 is not set +# CONFIG_REGULATOR_DA9210 is not set +# CONFIG_REGULATOR_DA9211 is not set +# CONFIG_REGULATOR_FAN53555 is not set +# CONFIG_REGULATOR_FAN53880 is not set +# CONFIG_REGULATOR_GPIO is not set +# CONFIG_REGULATOR_ISL9305 is not set +# CONFIG_REGULATOR_ISL6271A is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_LP3972 is not set +# CONFIG_REGULATOR_LP872X is not set +# CONFIG_REGULATOR_LP8755 is not set +# CONFIG_REGULATOR_LTC3589 is not set +# CONFIG_REGULATOR_LTC3676 is not set +# CONFIG_REGULATOR_MAX1586 is not set +# CONFIG_REGULATOR_MAX77857 is not set +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_MAX8893 is not set +# CONFIG_REGULATOR_MAX8952 is not set +# CONFIG_REGULATOR_MAX8973 is not set +# CONFIG_REGULATOR_MAX20086 is not set +# CONFIG_REGULATOR_MAX20411 is not set +# CONFIG_REGULATOR_MAX77826 is not set +# CONFIG_REGULATOR_MCP16502 is not set +# CONFIG_REGULATOR_MP5416 is not set +# CONFIG_REGULATOR_MP8859 is not set +# CONFIG_REGULATOR_MP886X is not set +# CONFIG_REGULATOR_MPQ7920 is not set +# CONFIG_REGULATOR_MT6311 is not set +# CONFIG_REGULATOR_PCA9450 is not set +# CONFIG_REGULATOR_PF8X00 is not set +# CONFIG_REGULATOR_PFUZE100 is not set +# CONFIG_REGULATOR_PV88060 is not set +# CONFIG_REGULATOR_PV88080 is not set +# CONFIG_REGULATOR_PV88090 is not set +# CONFIG_REGULATOR_PWM is not set +# CONFIG_REGULATOR_QCOM_REFGEN is not set +# CONFIG_REGULATOR_RAA215300 is not set +# CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY is not set +# CONFIG_REGULATOR_RT4801 is not set +# CONFIG_REGULATOR_RT4803 is not set +# CONFIG_REGULATOR_RT5190A is not set +# CONFIG_REGULATOR_RT5739 is not set +# CONFIG_REGULATOR_RT5759 is not set +# CONFIG_REGULATOR_RT6160 is not set +# CONFIG_REGULATOR_RT6190 is not set +# CONFIG_REGULATOR_RT6245 is not set +# CONFIG_REGULATOR_RTQ2134 is not set +# CONFIG_REGULATOR_RTMV20 is not set +# CONFIG_REGULATOR_RTQ6752 is not set +# CONFIG_REGULATOR_RTQ2208 is not set +# CONFIG_REGULATOR_SLG51000 is not set +# CONFIG_REGULATOR_SY8106A is not set +# CONFIG_REGULATOR_SY8824X is not set +# CONFIG_REGULATOR_SY8827N is not set +# CONFIG_REGULATOR_TPS51632 is not set +# CONFIG_REGULATOR_TPS62360 is not set +# CONFIG_REGULATOR_TPS6286X is not set +# CONFIG_REGULATOR_TPS6287X is not set +# CONFIG_REGULATOR_TPS65023 is not set +# CONFIG_REGULATOR_TPS6507X is not set +# CONFIG_REGULATOR_TPS65132 is not set +# CONFIG_REGULATOR_TPS6524X is not set +# CONFIG_REGULATOR_VCTRL is not set +# CONFIG_REGULATOR_VEXPRESS is not set +# CONFIG_REGULATOR_VQMMC_IPQ4019 is not set +# CONFIG_RC_CORE is not set +CONFIG_CEC_CORE=m + +# +# CEC support +# +# CONFIG_MEDIA_CEC_SUPPORT is not set +# end of CEC support + +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +CONFIG_APERTURE_HELPERS=y +CONFIG_VIDEO_CMDLINE=y +CONFIG_VIDEO_NOMODESET=y +# CONFIG_AUXDISPLAY is not set +CONFIG_DRM=m +CONFIG_DRM_KMS_HELPER=m +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DISPLAY_HELPER=m +CONFIG_DRM_DISPLAY_DP_HELPER=y +CONFIG_DRM_DISPLAY_HDCP_HELPER=y +CONFIG_DRM_DISPLAY_HDMI_HELPER=y +CONFIG_DRM_DP_AUX_CHARDEV=y +CONFIG_DRM_DP_CEC=y +CONFIG_DRM_TTM=m +CONFIG_DRM_EXEC=m +CONFIG_DRM_BUDDY=m +CONFIG_DRM_VRAM_HELPER=m +CONFIG_DRM_TTM_HELPER=m +CONFIG_DRM_GEM_SHMEM_HELPER=m +CONFIG_DRM_SUBALLOC_HELPER=m +CONFIG_DRM_SCHED=m + +# +# I2C encoder or helper chips +# +CONFIG_DRM_I2C_CH7006=m +# CONFIG_DRM_I2C_SIL164 is not set +CONFIG_DRM_I2C_NXP_TDA998X=m +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# end of I2C encoder or helper chips + +# +# ARM devices +# +# CONFIG_DRM_HDLCD is not set +# CONFIG_DRM_MALI_DISPLAY is not set +# CONFIG_DRM_KOMEDA is not set +# end of ARM devices + +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_USERPTR=y +CONFIG_DRM_AMDGPU=m +# CONFIG_DRM_AMDGPU_SI is not set +CONFIG_DRM_AMDGPU_CIK=y +CONFIG_DRM_AMDGPU_USERPTR=y + +# +# ACP (Audio CoProcessor) Configuration +# +CONFIG_DRM_AMD_ACP=y +# end of ACP (Audio CoProcessor) Configuration + +# +# Display Engine Configuration +# +CONFIG_DRM_AMD_DC=y +CONFIG_DRM_AMD_DC_FP=y +# CONFIG_DEBUG_KERNEL_DC is not set +# CONFIG_DRM_AMD_SECURE_DISPLAY is not set +# end of Display Engine Configuration + +CONFIG_HSA_AMD=y +CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +# CONFIG_NOUVEAU_DEBUG_MMU is not set +# CONFIG_NOUVEAU_DEBUG_PUSH is not set +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +# CONFIG_DRM_VGEM is not set +CONFIG_DRM_VKMS=m +# CONFIG_DRM_VMWGFX is not set +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=m +CONFIG_DRM_MGAG200=m +CONFIG_DRM_QXL=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_VIRTIO_GPU_KMS=y +# CONFIG_DRM_MSM is not set +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_ABT_Y030XX067A is not set +# CONFIG_DRM_PANEL_ARM_VERSATILE is not set +# CONFIG_DRM_PANEL_AUO_A030JTN01 is not set +# CONFIG_DRM_PANEL_LVDS is not set +# CONFIG_DRM_PANEL_SIMPLE is not set +# CONFIG_DRM_PANEL_EDP is not set +# CONFIG_DRM_PANEL_ILITEK_IL9322 is not set +# CONFIG_DRM_PANEL_ILITEK_ILI9341 is not set +# CONFIG_DRM_PANEL_INNOLUX_EJ030NA is not set +# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set +# CONFIG_DRM_PANEL_LG_LB035Q02 is not set +# CONFIG_DRM_PANEL_LG_LG4573 is not set +# CONFIG_DRM_PANEL_NEC_NL8048HL11 is not set +# CONFIG_DRM_PANEL_NEWVISION_NV3052C is not set +# CONFIG_DRM_PANEL_NOVATEK_NT39016 is not set +# CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO is not set +# CONFIG_DRM_PANEL_ORISETECH_OTA5601A is not set +# CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20 is not set +# CONFIG_DRM_PANEL_SAMSUNG_DB7430 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6D27A1 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set +# CONFIG_DRM_PANEL_SEIKO_43WVF1G is not set +# CONFIG_DRM_PANEL_SHARP_LS037V7DW01 is not set +# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set +# CONFIG_DRM_PANEL_SONY_ACX565AKM is not set +# CONFIG_DRM_PANEL_TPO_TD028TTEC1 is not set +# CONFIG_DRM_PANEL_TPO_TD043MTEA1 is not set +# CONFIG_DRM_PANEL_TPO_TPG110 is not set +# CONFIG_DRM_PANEL_WIDECHIPS_WS2401 is not set +# end of Display Panels + +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_CHIPONE_ICN6211 is not set +# CONFIG_DRM_CHRONTEL_CH7033 is not set +# CONFIG_DRM_DISPLAY_CONNECTOR is not set +# CONFIG_DRM_ITE_IT6505 is not set +# CONFIG_DRM_LONTIUM_LT8912B is not set +# CONFIG_DRM_LONTIUM_LT9211 is not set +# CONFIG_DRM_LONTIUM_LT9611 is not set +# CONFIG_DRM_LONTIUM_LT9611UXC is not set +# CONFIG_DRM_ITE_IT66121 is not set +# CONFIG_DRM_LVDS_CODEC is not set +# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set +# CONFIG_DRM_NWL_MIPI_DSI is not set +# CONFIG_DRM_NXP_PTN3460 is not set +# CONFIG_DRM_PARADE_PS8622 is not set +# CONFIG_DRM_PARADE_PS8640 is not set +# CONFIG_DRM_SAMSUNG_DSIM is not set +# CONFIG_DRM_SIL_SII8620 is not set +# CONFIG_DRM_SII902X is not set +# CONFIG_DRM_SII9234 is not set +# CONFIG_DRM_SIMPLE_BRIDGE is not set +# CONFIG_DRM_THINE_THC63LVD1024 is not set +# CONFIG_DRM_TOSHIBA_TC358762 is not set +# CONFIG_DRM_TOSHIBA_TC358764 is not set +# CONFIG_DRM_TOSHIBA_TC358767 is not set +# CONFIG_DRM_TOSHIBA_TC358768 is not set +# CONFIG_DRM_TOSHIBA_TC358775 is not set +# CONFIG_DRM_TI_DLPC3433 is not set +# CONFIG_DRM_TI_TFP410 is not set +# CONFIG_DRM_TI_SN65DSI83 is not set +# CONFIG_DRM_TI_SN65DSI86 is not set +# CONFIG_DRM_TI_TPD12S015 is not set +# CONFIG_DRM_ANALOGIX_ANX6345 is not set +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# CONFIG_DRM_ANALOGIX_ANX7625 is not set +# CONFIG_DRM_I2C_ADV7511 is not set +# CONFIG_DRM_CDNS_DSI is not set +# CONFIG_DRM_CDNS_MHDP8546 is not set +# end of Display Interface Bridges + +# CONFIG_DRM_LOONGSON is not set +# CONFIG_DRM_ETNAVIV is not set +CONFIG_DRM_HISI_HIBMC=m +# CONFIG_DRM_HISI_KIRIN is not set +# CONFIG_DRM_LOGICVC is not set +# CONFIG_DRM_ARCPGU is not set +CONFIG_DRM_BOCHS=m +CONFIG_DRM_CIRRUS_QEMU=m +# CONFIG_DRM_GM12U320 is not set +# CONFIG_DRM_PANEL_MIPI_DBI is not set +# CONFIG_DRM_SIMPLEDRM is not set +# CONFIG_TINYDRM_HX8357D is not set +# CONFIG_TINYDRM_ILI9163 is not set +# CONFIG_TINYDRM_ILI9225 is not set +# CONFIG_TINYDRM_ILI9341 is not set +# CONFIG_TINYDRM_ILI9486 is not set +# CONFIG_TINYDRM_MI0283QT is not set +# CONFIG_TINYDRM_REPAPER is not set +# CONFIG_TINYDRM_ST7586 is not set +# CONFIG_TINYDRM_ST7735R is not set +# CONFIG_DRM_PL111 is not set +# CONFIG_DRM_LIMA is not set +# CONFIG_DRM_PANFROST is not set +# CONFIG_DRM_TIDSS is not set +# CONFIG_DRM_GUD is not set +# CONFIG_DRM_SSD130X is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y + +# +# Frame buffer Devices +# +CONFIG_FB=y +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_ARMCLCD is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_UVESA is not set +CONFIG_FB_EFI=y +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +CONFIG_FB_SIMPLE=y +CONFIG_FB_SSD1307=m +# CONFIG_FB_SM712 is not set +CONFIG_FB_CORE=y +CONFIG_FB_NOTIFY=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_DEVICE=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_IOMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y +CONFIG_FB_BACKLIGHT=m +# CONFIG_FB_MODE_HELPERS is not set +CONFIG_FB_TILEBLITTING=y +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# +CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +CONFIG_LCD_PLATFORM=m +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_KTD253 is not set +# CONFIG_BACKLIGHT_KTZ8866 is not set +CONFIG_BACKLIGHT_PWM=m +# CONFIG_BACKLIGHT_QCOM_WLED is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3630A is not set +# CONFIG_BACKLIGHT_LM3639 is not set +CONFIG_BACKLIGHT_LP855X=m +CONFIG_BACKLIGHT_GPIO=m +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# CONFIG_BACKLIGHT_LED is not set +# end of Backlight & LCD device support + +CONFIG_HDMI=y + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION is not set +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +# end of Console display driver support + +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +# end of Graphics support + +# CONFIG_DRM_ACCEL is not set +CONFIG_SOUND=m +# CONFIG_SND is not set +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=m +# CONFIG_HID_ACCUTOUCH is not set +CONFIG_HID_ACRUX=m +# CONFIG_HID_ACRUX_FF is not set +CONFIG_HID_APPLE=m +CONFIG_HID_APPLEIR=m +# CONFIG_HID_ASUS is not set +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=m +CONFIG_HID_BETOP_FF=m +# CONFIG_HID_BIGBEN_FF is not set +CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CORSAIR=m +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_MACALLY is not set +# CONFIG_HID_CMEDIA is not set +# CONFIG_HID_CP2112 is not set +# CONFIG_HID_CREATIVE_SB0540 is not set +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +# CONFIG_DRAGONRISE_FF is not set +# CONFIG_HID_EMS_FF is not set +CONFIG_HID_ELAN=m +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +# CONFIG_HID_EVISION is not set +CONFIG_HID_EZKEY=m +# CONFIG_HID_FT260 is not set +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +# CONFIG_HID_GLORIOUS is not set +CONFIG_HID_HOLTEK=m +# CONFIG_HOLTEK_FF is not set +# CONFIG_HID_GOOGLE_STADIA_FF is not set +# CONFIG_HID_VIVALDI is not set +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +# CONFIG_HID_VIEWSONIC is not set +# CONFIG_HID_VRC2 is not set +# CONFIG_HID_XIAOMI is not set +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=m +CONFIG_HID_JABRA=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LCPOWER=m +CONFIG_HID_LED=m +CONFIG_HID_LENOVO=m +# CONFIG_HID_LETSKETCH is not set +CONFIG_HID_LOGITECH=m +CONFIG_HID_LOGITECH_DJ=m +CONFIG_HID_LOGITECH_HIDPP=m +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +# CONFIG_LOGIWHEELS_FF is not set +CONFIG_HID_MAGICMOUSE=y +# CONFIG_HID_MALTRON is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_MEGAWORLD_FF is not set +# CONFIG_HID_REDRAGON is not set +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m +CONFIG_HID_MULTITOUCH=m +# CONFIG_HID_NINTENDO is not set +CONFIG_HID_NTI=m +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +# CONFIG_PANTHERLORD_FF is not set +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PICOLCD_FB=y +CONFIG_HID_PICOLCD_BACKLIGHT=y +CONFIG_HID_PICOLCD_LCD=y +CONFIG_HID_PICOLCD_LEDS=y +CONFIG_HID_PLANTRONICS=m +# CONFIG_HID_PXRC is not set +# CONFIG_HID_RAZER is not set +CONFIG_HID_PRIMAX=m +# CONFIG_HID_RETRODE is not set +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +# CONFIG_HID_SEMITEK is not set +# CONFIG_HID_SIGMAMICRO is not set +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +# CONFIG_HID_STEAM is not set +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +# CONFIG_GREENASIA_FF is not set +CONFIG_HID_SMARTJOYPLUS=m +# CONFIG_SMARTJOYPLUS_FF is not set +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +# CONFIG_HID_TOPRE is not set +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +# CONFIG_THRUSTMASTER_FF is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_U2FZERO is not set +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +# CONFIG_ZEROPLUS_FF is not set +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=m +# CONFIG_HID_SENSOR_CUSTOM_SENSOR is not set +# CONFIG_HID_ALPS is not set +# CONFIG_HID_MCP2221 is not set +# end of Special HID drivers + +# +# HID-BPF support +# +# CONFIG_HID_BPF is not set +# end of HID-BPF support + +# +# USB HID support +# +CONFIG_USB_HID=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +# end of USB HID support + +CONFIG_I2C_HID=m +# CONFIG_I2C_HID_ACPI is not set +# CONFIG_I2C_HID_OF is not set +# CONFIG_I2C_HID_OF_ELAN is not set +# CONFIG_I2C_HID_OF_GOODIX is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_LED_TRIG=y +CONFIG_USB_ULPI_BUS=m +# CONFIG_USB_CONN_GPIO is not set +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_FEW_INIT_RETRIES is not set +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_PRODUCTLIST is not set +CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_AUTOSUSPEND_DELAY=2 +CONFIG_USB_MON=y + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +# CONFIG_USB_XHCI_DBGCAP is not set +CONFIG_USB_XHCI_PCI=y +# CONFIG_USB_XHCI_PCI_RENESAS is not set +CONFIG_USB_XHCI_PLATFORM=y +# CONFIG_USB_XHCI_HISTB is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +# CONFIG_USB_EHCI_FSL is not set +CONFIG_USB_EHCI_HCD_PLATFORM=m +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +CONFIG_USB_UHCI_HCD=y +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_BCMA is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m +CONFIG_USB_TMC=m + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m + +# +# USB Imaging devices +# +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +# CONFIG_USBIP_CORE is not set + +# +# USB dual-mode controller drivers +# +# CONFIG_USB_CDNS_SUPPORT is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +CONFIG_USB_SERIAL=y +# CONFIG_USB_SERIAL_CONSOLE is not set +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_SIMPLE=m +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +# CONFIG_USB_SERIAL_F81232 is not set +CONFIG_USB_SERIAL_F8153X=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +# CONFIG_USB_SERIAL_METRO is not set +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MXUPORT=m +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_WWAN=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +# CONFIG_USB_SERIAL_WISHBONE is not set +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +CONFIG_USB_SERIAL_UPD78F0730=m +# CONFIG_USB_SERIAL_XR is not set +CONFIG_USB_SERIAL_DEBUG=m + +# +# USB Miscellaneous drivers +# +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +CONFIG_USB_IDMOUSE=m +CONFIG_USB_APPLEDISPLAY=m +# CONFIG_USB_QCOM_EUD is not set +# CONFIG_APPLE_MFI_FASTCHARGE is not set +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_LD=m +# CONFIG_USB_TRANCEVIBRATOR is not set +CONFIG_USB_IOWARRIOR=m +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +CONFIG_USB_ISIGHTFW=m +# CONFIG_USB_YUREX is not set +CONFIG_USB_EZUSB_FX2=m +# CONFIG_USB_HUB_USB251XB is not set +CONFIG_USB_HSIC_USB3503=m +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +CONFIG_USB_CHAOSKEY=m +# CONFIG_USB_ONBOARD_HUB is not set +CONFIG_USB_ATM=m +# CONFIG_USB_SPEEDTOUCH is not set +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +# CONFIG_USB_ULPI is not set +# end of USB Physical Layer drivers + +# CONFIG_USB_GADGET is not set +CONFIG_TYPEC=y +CONFIG_TYPEC_TCPM=y +# CONFIG_TYPEC_TCPCI is not set +# CONFIG_TYPEC_FUSB302 is not set +# CONFIG_TYPEC_QCOM_PMIC is not set +CONFIG_TYPEC_UCSI=y +# CONFIG_UCSI_CCG is not set +CONFIG_UCSI_ACPI=y +# CONFIG_UCSI_STM32G0 is not set +CONFIG_TYPEC_TPS6598X=m +# CONFIG_TYPEC_ANX7411 is not set +# CONFIG_TYPEC_RT1719 is not set +# CONFIG_TYPEC_HD3SS3220 is not set +# CONFIG_TYPEC_STUSB160X is not set +# CONFIG_TYPEC_WUSB3801 is not set + +# +# USB Type-C Multiplexer/DeMultiplexer Switch support +# +# CONFIG_TYPEC_MUX_FSA4480 is not set +# CONFIG_TYPEC_MUX_GPIO_SBU is not set +CONFIG_TYPEC_MUX_PI3USB30532=m +# CONFIG_TYPEC_MUX_NB7VPQ904M is not set +# end of USB Type-C Multiplexer/DeMultiplexer Switch support + +# +# USB Type-C Alternate Mode drivers +# +CONFIG_TYPEC_DP_ALTMODE=m +# CONFIG_TYPEC_NVIDIA_ALTMODE is not set +# end of USB Type-C Alternate Mode drivers + +CONFIG_USB_ROLE_SWITCH=y +CONFIG_MMC=m +# CONFIG_PWRSEQ_EMMC is not set +# CONFIG_PWRSEQ_SIMPLE is not set +CONFIG_MMC_BLOCK=m +CONFIG_MMC_BLOCK_MINORS=8 +CONFIG_SDIO_UART=m +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_ARMMMCI=m +CONFIG_MMC_STM32_SDMMC=y +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_IO_ACCESSORS=y +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_RICOH_MMC=y +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +# CONFIG_MMC_SDHCI_OF_ARASAN is not set +# CONFIG_MMC_SDHCI_OF_AT91 is not set +# CONFIG_MMC_SDHCI_OF_DWCMSHC is not set +# CONFIG_MMC_SDHCI_CADENCE is not set +# CONFIG_MMC_SDHCI_F_SDH30 is not set +# CONFIG_MMC_SDHCI_MILBEAUT is not set +# CONFIG_MMC_SDHCI_MSM is not set +CONFIG_MMC_TIFM_SD=m +# CONFIG_MMC_SPI is not set +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_DW=m +CONFIG_MMC_DW_PLTFM=m +CONFIG_MMC_DW_BLUEFIELD=m +# CONFIG_MMC_DW_EXYNOS is not set +# CONFIG_MMC_DW_HI3798CV200 is not set +# CONFIG_MMC_DW_K3 is not set +# CONFIG_MMC_DW_PCI is not set +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +# CONFIG_MMC_USDHI6ROL0 is not set +CONFIG_MMC_CQHCI=m +# CONFIG_MMC_HSQ is not set +CONFIG_MMC_TOSHIBA_PCI=m +CONFIG_MMC_MTK=m +# CONFIG_MMC_SDHCI_XENON is not set +# CONFIG_MMC_SDHCI_OMAP is not set +# CONFIG_MMC_SDHCI_AM654 is not set +# CONFIG_SCSI_UFSHCD is not set +CONFIG_MEMSTICK=m +# CONFIG_MEMSTICK_DEBUG is not set + +# +# MemoryStick drivers +# +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set +CONFIG_MSPRO_BLOCK=m +# CONFIG_MS_BLOCK is not set + +# +# MemoryStick Host Controller Drivers +# +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_CLASS_FLASH=m +# CONFIG_LEDS_CLASS_MULTICOLOR is not set +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_AN30259A is not set +# CONFIG_LEDS_AW200XX is not set +# CONFIG_LEDS_AW2013 is not set +# CONFIG_LEDS_BCM6328 is not set +# CONFIG_LEDS_BCM6358 is not set +# CONFIG_LEDS_CR0014114 is not set +# CONFIG_LEDS_EL15203000 is not set +CONFIG_LEDS_LM3530=m +# CONFIG_LEDS_LM3532 is not set +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_LM3692X is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set +CONFIG_LEDS_LP3944=m +# CONFIG_LEDS_LP3952 is not set +# CONFIG_LEDS_LP50XX is not set +# CONFIG_LEDS_LP55XX_COMMON is not set +# CONFIG_LEDS_LP8860 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_PCA995X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_PWM is not set +# CONFIG_LEDS_REGULATOR is not set +# CONFIG_LEDS_BD2606MVV is not set +# CONFIG_LEDS_BD2802 is not set +CONFIG_LEDS_LT3593=m +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set +# CONFIG_LEDS_IS31FL319X is not set +# CONFIG_LEDS_IS31FL32XX is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +CONFIG_LEDS_BLINKM=m +# CONFIG_LEDS_SYSCON is not set +# CONFIG_LEDS_MLXREG is not set +# CONFIG_LEDS_USER is not set +# CONFIG_LEDS_SPI_BYTE is not set +# CONFIG_LEDS_LM3697 is not set + +# +# Flash and Torch LED drivers +# +# CONFIG_LEDS_AAT1290 is not set +# CONFIG_LEDS_AS3645A is not set +# CONFIG_LEDS_KTD2692 is not set +# CONFIG_LEDS_LM3601X is not set +# CONFIG_LEDS_RT4505 is not set +# CONFIG_LEDS_RT8515 is not set +# CONFIG_LEDS_SGM3140 is not set + +# +# RGB LED drivers +# + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +# CONFIG_LEDS_TRIGGER_DISK is not set +# CONFIG_LEDS_TRIGGER_MTD is not set +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +# CONFIG_LEDS_TRIGGER_CPU is not set +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +# CONFIG_LEDS_TRIGGER_PANIC is not set +# CONFIG_LEDS_TRIGGER_NETDEV is not set +# CONFIG_LEDS_TRIGGER_PATTERN is not set +# CONFIG_LEDS_TRIGGER_AUDIO is not set +# CONFIG_LEDS_TRIGGER_TTY is not set + +# +# Simple LED drivers +# +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +CONFIG_INFINIBAND_VIRT_DMA=y +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_INFINIBAND_CXGB4=m +# CONFIG_INFINIBAND_EFA is not set +CONFIG_INFINIBAND_ERDMA=m +CONFIG_INFINIBAND_HNS=m +# CONFIG_INFINIBAND_HNS_HIP08 is not set +# CONFIG_INFINIBAND_IRDMA is not set +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +# CONFIG_INFINIBAND_MTHCA is not set +# CONFIG_INFINIBAND_OCRDMA is not set +CONFIG_INFINIBAND_QEDR=m +CONFIG_RDMA_RXE=m +CONFIG_RDMA_SIW=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +# CONFIG_INFINIBAND_RTRS_CLIENT is not set +# CONFIG_INFINIBAND_RTRS_SERVER is not set +CONFIG_EDAC_SUPPORT=y +CONFIG_EDAC=y +CONFIG_EDAC_LEGACY_SYSFS=y +# CONFIG_EDAC_DEBUG is not set +CONFIG_EDAC_GHES=y +CONFIG_EDAC_THUNDERX=m +CONFIG_EDAC_XGENE=m +# CONFIG_EDAC_DMC520 is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_SYSTOHC is not set +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +CONFIG_RTC_DRV_ABB5ZES3=m +# CONFIG_RTC_DRV_ABEOZ9 is not set +CONFIG_RTC_DRV_ABX80X=m +CONFIG_RTC_DRV_DS1307=m +# CONFIG_RTC_DRV_DS1307_CENTURY is not set +CONFIG_RTC_DRV_DS1374=m +CONFIG_RTC_DRV_DS1374_WDT=y +CONFIG_RTC_DRV_DS1672=m +# CONFIG_RTC_DRV_HYM8563 is not set +CONFIG_RTC_DRV_MAX6900=m +# CONFIG_RTC_DRV_NCT3018Y is not set +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +# CONFIG_RTC_DRV_ISL12026 is not set +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +CONFIG_RTC_DRV_PCF85063=m +# CONFIG_RTC_DRV_PCF85363 is not set +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +# CONFIG_RTC_DRV_S35390A is not set +CONFIG_RTC_DRV_FM3130=m +CONFIG_RTC_DRV_RX8010=m +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +# CONFIG_RTC_DRV_RV3028 is not set +# CONFIG_RTC_DRV_RV3032 is not set +# CONFIG_RTC_DRV_RV8803 is not set +# CONFIG_RTC_DRV_SD3078 is not set + +# +# SPI RTC drivers +# +CONFIG_RTC_DRV_M41T93=m +CONFIG_RTC_DRV_M41T94=m +# CONFIG_RTC_DRV_DS1302 is not set +CONFIG_RTC_DRV_DS1305=m +CONFIG_RTC_DRV_DS1343=m +CONFIG_RTC_DRV_DS1347=m +CONFIG_RTC_DRV_DS1390=m +# CONFIG_RTC_DRV_MAX6916 is not set +CONFIG_RTC_DRV_R9701=m +CONFIG_RTC_DRV_RX4581=m +CONFIG_RTC_DRV_RS5C348=m +CONFIG_RTC_DRV_MAX6902=m +CONFIG_RTC_DRV_PCF2123=m +CONFIG_RTC_DRV_MCP795=m +CONFIG_RTC_I2C_AND_SPI=m + +# +# SPI and I2C RTC drivers +# +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_DS3232_HWMON=y +CONFIG_RTC_DRV_PCF2127=m +CONFIG_RTC_DRV_RV3029C2=m +# CONFIG_RTC_DRV_RV3029_HWMON is not set +# CONFIG_RTC_DRV_RX6110 is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +CONFIG_RTC_DRV_DS1685_FAMILY=m +CONFIG_RTC_DRV_DS1685=y +# CONFIG_RTC_DRV_DS1689 is not set +# CONFIG_RTC_DRV_DS17285 is not set +# CONFIG_RTC_DRV_DS17485 is not set +# CONFIG_RTC_DRV_DS17885 is not set +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_EFI=y +CONFIG_RTC_DRV_STK17TA8=m +# CONFIG_RTC_DRV_M48T86 is not set +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_RP5C01=m +# CONFIG_RTC_DRV_ZYNQMP is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_PL030 is not set +CONFIG_RTC_DRV_PL031=y +# CONFIG_RTC_DRV_CADENCE is not set +# CONFIG_RTC_DRV_FTRTC010 is not set +# CONFIG_RTC_DRV_XGENE is not set +# CONFIG_RTC_DRV_R7301 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_GOLDFISH is not set +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_ACPI=y +CONFIG_DMA_OF=y +# CONFIG_ALTERA_MSGDMA is not set +# CONFIG_AMBA_PL08X is not set +# CONFIG_BCM_SBA_RAID is not set +# CONFIG_DW_AXI_DMAC is not set +# CONFIG_FSL_EDMA is not set +# CONFIG_FSL_QDMA is not set +# CONFIG_HISI_DMA is not set +# CONFIG_INTEL_IDMA64 is not set +# CONFIG_K3_DMA is not set +# CONFIG_MV_XOR_V2 is not set +# CONFIG_PL330_DMA is not set +# CONFIG_PLX_DMA is not set +# CONFIG_XGENE_DMA is not set +# CONFIG_XILINX_DMA is not set +# CONFIG_XILINX_XDMA is not set +# CONFIG_XILINX_ZYNQMP_DMA is not set +# CONFIG_XILINX_ZYNQMP_DPDMA is not set +# CONFIG_QCOM_BAM_DMA is not set +# CONFIG_QCOM_GPI_DMA is not set +CONFIG_QCOM_HIDMA_MGMT=m +CONFIG_QCOM_HIDMA=m +CONFIG_DW_DMAC_CORE=m +CONFIG_DW_DMAC=m +CONFIG_DW_DMAC_PCI=m +# CONFIG_DW_EDMA is not set +# CONFIG_SF_PDMA is not set + +# +# DMA Clients +# +CONFIG_ASYNC_TX_DMA=y +CONFIG_DMATEST=m +CONFIG_DMA_ENGINE_RAID=y + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +# CONFIG_UDMABUF is not set +# CONFIG_DMABUF_MOVE_NOTIFY is not set +# CONFIG_DMABUF_DEBUG is not set +# CONFIG_DMABUF_SELFTESTS is not set +# CONFIG_DMABUF_HEAPS is not set +# CONFIG_DMABUF_SYSFS_STATS is not set +# end of DMABUF options + +CONFIG_UIO=m +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=m +# CONFIG_UIO_DMEM_GENIRQ is not set +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +CONFIG_VFIO=m +CONFIG_VFIO_GROUP=y +CONFIG_VFIO_CONTAINER=y +CONFIG_VFIO_IOMMU_TYPE1=m +CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_VIRQFD=y + +# +# VFIO support for PCI devices +# +CONFIG_VFIO_PCI_CORE=m +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y +CONFIG_VFIO_PCI=m +# CONFIG_MLX5_VFIO_PCI is not set +# end of VFIO support for PCI devices + +# +# VFIO support for platform devices +# +# CONFIG_VFIO_PLATFORM is not set +# CONFIG_VFIO_AMBA is not set +# end of VFIO support for platform devices + +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO_ANCHOR=y +CONFIG_VIRTIO=m +CONFIG_VIRTIO_PCI_LIB=m +CONFIG_VIRTIO_PCI_LIB_LEGACY=m +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=m +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_PMEM=m +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_MEM=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=m +# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set +CONFIG_VIRTIO_DMA_SHARED_BUFFER=m +# CONFIG_VDPA is not set +CONFIG_VHOST_IOTLB=m +CONFIG_VHOST_TASK=y +CONFIG_VHOST=m +CONFIG_VHOST_MENU=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# Microsoft Hyper-V guest support +# +# CONFIG_HYPERV is not set +# end of Microsoft Hyper-V guest support + +# CONFIG_GREYBUS is not set +# CONFIG_COMEDI is not set +CONFIG_STAGING=y +# CONFIG_RTS5208 is not set +# CONFIG_VT6655 is not set +# CONFIG_FB_SM750 is not set +# CONFIG_STAGING_MEDIA is not set +# CONFIG_STAGING_BOARD is not set +# CONFIG_LTE_GDM724X is not set +# CONFIG_FB_TFT is not set +# CONFIG_KS7010 is not set +# CONFIG_PI433 is not set +# CONFIG_XIL_AXIS_FIFO is not set +# CONFIG_FIELDBUS_DEV is not set +# CONFIG_QLGE is not set +# CONFIG_VME_BUS is not set +# CONFIG_GOLDFISH is not set +CONFIG_CHROME_PLATFORMS=y +# CONFIG_CHROMEOS_ACPI is not set +# CONFIG_CHROMEOS_TBMC is not set +# CONFIG_CROS_EC is not set +# CONFIG_CROS_KBD_LED_BACKLIGHT is not set +# CONFIG_CROS_HPS_I2C is not set +# CONFIG_CHROMEOS_PRIVACY_SCREEN is not set +# CONFIG_MELLANOX_PLATFORM is not set +CONFIG_SURFACE_PLATFORMS=y +# CONFIG_SURFACE_3_POWER_OPREGION is not set +# CONFIG_SURFACE_GPE is not set +# CONFIG_SURFACE_HOTPLUG is not set +# CONFIG_SURFACE_PRO3_BUTTON is not set +CONFIG_HAVE_CLK=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y + +# +# Clock driver for ARM Reference designs +# +# CONFIG_CLK_ICST is not set +CONFIG_CLK_SP810=y +CONFIG_CLK_VEXPRESS_OSC=y +# end of Clock driver for ARM Reference designs + +# CONFIG_LMK04832 is not set +# CONFIG_COMMON_CLK_MAX9485 is not set +CONFIG_COMMON_CLK_SCPI=m +# CONFIG_COMMON_CLK_SI5341 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI514 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_SI570 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CDCE925 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_COMMON_CLK_AXI_CLKGEN is not set +CONFIG_COMMON_CLK_XGENE=y +# CONFIG_COMMON_CLK_PWM is not set +# CONFIG_COMMON_CLK_RS9_PCIE is not set +# CONFIG_COMMON_CLK_SI521XX is not set +# CONFIG_COMMON_CLK_VC3 is not set +# CONFIG_COMMON_CLK_VC5 is not set +# CONFIG_COMMON_CLK_VC7 is not set +# CONFIG_COMMON_CLK_FIXED_MMIO is not set +CONFIG_COMMON_CLK_HI3516CV300=y +CONFIG_COMMON_CLK_HI3519=y +CONFIG_COMMON_CLK_HI3559A=y +CONFIG_COMMON_CLK_HI3660=y +CONFIG_COMMON_CLK_HI3670=y +CONFIG_COMMON_CLK_HI3798CV200=y +# CONFIG_COMMON_CLK_HI6220 is not set +CONFIG_RESET_HISI=y +CONFIG_STUB_CLK_HI3660=y +# CONFIG_COMMON_CLK_QCOM is not set +# CONFIG_XILINX_VCU is not set +# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set +CONFIG_HWSPINLOCK=y +# CONFIG_HWSPINLOCK_QCOM is not set + +# +# Clock Source drivers +# +CONFIG_TIMER_OF=y +CONFIG_TIMER_ACPI=y +CONFIG_TIMER_PROBE=y +CONFIG_CLKSRC_MMIO=y +CONFIG_ARM_ARCH_TIMER=y +CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y +CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND=y +CONFIG_FSL_ERRATUM_A008585=y +CONFIG_HISILICON_ERRATUM_161010101=y +CONFIG_ARM64_ERRATUM_858921=y +CONFIG_ARM_TIMER_SP804=y +# end of Clock Source drivers + +CONFIG_MAILBOX=y +CONFIG_ARM_MHU=m +# CONFIG_ARM_MHU_V2 is not set +# CONFIG_PLATFORM_MHU is not set +# CONFIG_PL320_MBOX is not set +CONFIG_PCC=y +# CONFIG_ALTERA_MBOX is not set +CONFIG_HI3660_MBOX=y +CONFIG_HI6220_MBOX=y +# CONFIG_MAILBOX_TEST is not set +# CONFIG_QCOM_APCS_IPC is not set +CONFIG_XGENE_SLIMPRO_MBOX=m +# CONFIG_QCOM_IPCC is not set +CONFIG_IOMMU_IOVA=y +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +CONFIG_IOMMU_IO_PGTABLE=y +CONFIG_IOMMU_IO_PGTABLE_LPAE=y +# CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set +# CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set +# CONFIG_IOMMU_IO_PGTABLE_DART is not set +# end of Generic IOMMU Pagetable Support + +# CONFIG_IOMMU_DEBUGFS is not set +CONFIG_IOMMU_DEFAULT_DMA_STRICT=y +# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set +CONFIG_OF_IOMMU=y +CONFIG_IOMMU_DMA=y +# CONFIG_IOMMUFD is not set +CONFIG_ARM_SMMU=y +# CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS is not set +CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT=y +CONFIG_ARM_SMMU_QCOM=y +# CONFIG_ARM_SMMU_QCOM_DEBUG is not set +CONFIG_ARM_SMMU_V3=y +# CONFIG_ARM_SMMU_V3_SVA is not set +# CONFIG_QCOM_IOMMU is not set +# CONFIG_VIRTIO_IOMMU is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set +# CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Broadcom SoC drivers +# +# CONFIG_SOC_BRCMSTB is not set +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# CONFIG_QUICC_ENGINE is not set +# CONFIG_FSL_RCPM is not set +# end of NXP/Freescale QorIQ SoC drivers + +# +# fujitsu SoC drivers +# +# CONFIG_A64FX_DIAG is not set +# end of fujitsu SoC drivers + +# +# Hisilicon SoC drivers +# +# CONFIG_KUNPENG_HCCS is not set +# end of Hisilicon SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Enable LiteX SoC Builder specific drivers +# +# CONFIG_LITEX_SOC_CONTROLLER is not set +# end of Enable LiteX SoC Builder specific drivers + +# CONFIG_WPCM450_SOC is not set + +# +# Qualcomm SoC drivers +# +# CONFIG_QCOM_AOSS_QMP is not set +# CONFIG_QCOM_COMMAND_DB is not set +# CONFIG_QCOM_CPR is not set +# CONFIG_QCOM_GENI_SE is not set +# CONFIG_QCOM_GSBI is not set +# CONFIG_QCOM_LLCC is not set +CONFIG_QCOM_KRYO_L2_ACCESSORS=y +# CONFIG_QCOM_OCMEM is not set +# CONFIG_QCOM_RAMP_CTRL is not set +# CONFIG_QCOM_RMTFS_MEM is not set +# CONFIG_QCOM_RPM_MASTER_STATS is not set +# CONFIG_QCOM_RPMH is not set +# CONFIG_QCOM_SMEM is not set +# CONFIG_QCOM_SPM is not set +# CONFIG_QCOM_ICC_BWMON is not set +# end of Qualcomm SoC drivers + +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + +# CONFIG_PM_DEVFREQ is not set +CONFIG_EXTCON=y + +# +# Extcon Device Drivers +# +# CONFIG_EXTCON_FSA9480 is not set +CONFIG_EXTCON_GPIO=m +# CONFIG_EXTCON_MAX3355 is not set +# CONFIG_EXTCON_PTN5150 is not set +# CONFIG_EXTCON_QCOM_SPMI_MISC is not set +# CONFIG_EXTCON_RT8973A is not set +# CONFIG_EXTCON_SM5502 is not set +# CONFIG_EXTCON_USB_GPIO is not set +# CONFIG_EXTCON_USBC_TUSB320 is not set +# CONFIG_MEMORY is not set +# CONFIG_IIO is not set +# CONFIG_NTB is not set +CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +# CONFIG_PWM_DEBUG is not set +# CONFIG_PWM_ATMEL_TCB is not set +# CONFIG_PWM_CLK is not set +# CONFIG_PWM_DWC is not set +# CONFIG_PWM_FSL_FTM is not set +# CONFIG_PWM_HIBVT is not set +# CONFIG_PWM_PCA9685 is not set +# CONFIG_PWM_XILINX is not set + +# +# IRQ chip support +# +CONFIG_IRQCHIP=y +CONFIG_ARM_GIC=y +CONFIG_ARM_GIC_MAX_NR=1 +CONFIG_ARM_GIC_V2M=y +CONFIG_ARM_GIC_V3=y +CONFIG_ARM_GIC_V3_ITS=y +CONFIG_ARM_GIC_V3_ITS_PCI=y +# CONFIG_AL_FIC is not set +CONFIG_HISILICON_IRQ_MBIGEN=y +# CONFIG_XILINX_INTC is not set +CONFIG_PARTITION_PERCPU=y +CONFIG_QCOM_IRQ_COMBINER=y +# CONFIG_QCOM_PDC is not set +# CONFIG_QCOM_MPM is not set +# end of IRQ chip support + +# CONFIG_IPACK_BUS is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_QCOM_AOSS is not set +# CONFIG_RESET_QCOM_PDC is not set +# CONFIG_RESET_TI_SYSCON is not set +# CONFIG_RESET_TI_TPS380X is not set +# CONFIG_COMMON_RESET_HI3660 is not set +CONFIG_COMMON_RESET_HI6220=m + +# +# PHY Subsystem +# +CONFIG_GENERIC_PHY=y +CONFIG_PHY_XGENE=y +# CONFIG_PHY_CAN_TRANSCEIVER is not set + +# +# PHY drivers for Broadcom platforms +# +# CONFIG_BCM_KONA_USB2_PHY is not set +# end of PHY drivers for Broadcom platforms + +# CONFIG_PHY_CADENCE_TORRENT is not set +# CONFIG_PHY_CADENCE_DPHY is not set +# CONFIG_PHY_CADENCE_DPHY_RX is not set +# CONFIG_PHY_CADENCE_SIERRA is not set +# CONFIG_PHY_CADENCE_SALVO is not set +CONFIG_PHY_HI6220_USB=m +# CONFIG_PHY_HI3660_USB is not set +# CONFIG_PHY_HI3670_USB is not set +# CONFIG_PHY_HI3670_PCIE is not set +# CONFIG_PHY_HISTB_COMBPHY is not set +# CONFIG_PHY_HISI_INNO_USB2 is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_LAN966X_SERDES is not set +# CONFIG_PHY_MAPPHONE_MDM6600 is not set +# CONFIG_PHY_OCELOT_SERDES is not set +# CONFIG_PHY_QCOM_APQ8064_SATA is not set +# CONFIG_PHY_QCOM_EDP is not set +# CONFIG_PHY_QCOM_IPQ4019_USB is not set +# CONFIG_PHY_QCOM_IPQ806X_SATA is not set +# CONFIG_PHY_QCOM_PCIE2 is not set +# CONFIG_PHY_QCOM_QMP is not set +# CONFIG_PHY_QCOM_QUSB2 is not set +# CONFIG_PHY_QCOM_SNPS_EUSB2 is not set +# CONFIG_PHY_QCOM_EUSB2_REPEATER is not set +# CONFIG_PHY_QCOM_M31_USB is not set +# CONFIG_PHY_QCOM_USB_HS is not set +# CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2 is not set +# CONFIG_PHY_QCOM_USB_HSIC is not set +# CONFIG_PHY_QCOM_USB_HS_28NM is not set +# CONFIG_PHY_QCOM_USB_SS is not set +# CONFIG_PHY_QCOM_IPQ806X_USB is not set +# CONFIG_PHY_QCOM_SGMII_ETH is not set +# CONFIG_PHY_TUSB1210 is not set +# end of PHY Subsystem + +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# CONFIG_ARM_CCI_PMU is not set +CONFIG_ARM_CCN=y +CONFIG_ARM_CMN=y +CONFIG_ARM_PMU=y +CONFIG_ARM_PMU_ACPI=y +CONFIG_ARM_SMMU_V3_PMU=y +CONFIG_ARM_PMUV3=y +CONFIG_ARM_DSU_PMU=y +CONFIG_QCOM_L2_PMU=y +CONFIG_QCOM_L3_PMU=y +CONFIG_THUNDERX2_PMU=m +CONFIG_XGENE_PMU=y +CONFIG_ARM_SPE_PMU=m +# CONFIG_ARM_DMC620_PMU is not set +# CONFIG_MARVELL_CN10K_TAD_PMU is not set +CONFIG_ALIBABA_UNCORE_DRW_PMU=m +CONFIG_HISI_PMU=y +# CONFIG_HISI_PCIE_PMU is not set +# CONFIG_HNS3_PMU is not set +# CONFIG_MARVELL_CN10K_DDR_PMU is not set +# CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU is not set +# end of Performance monitor support + +CONFIG_RAS=y +# CONFIG_USB4 is not set + +# +# Android +# +# CONFIG_ANDROID_BINDER_IPC is not set +# end of Android + +CONFIG_LIBNVDIMM=m +CONFIG_BLK_DEV_PMEM=m +CONFIG_ND_CLAIM=y +CONFIG_ND_BTT=m +CONFIG_BTT=y +CONFIG_ND_PFN=m +CONFIG_NVDIMM_PFN=y +CONFIG_NVDIMM_DAX=y +CONFIG_OF_PMEM=m +CONFIG_NVDIMM_KEYS=y +# CONFIG_NVDIMM_SECURITY_TEST is not set +CONFIG_DAX=y +CONFIG_DEV_DAX=m +CONFIG_DEV_DAX_PMEM=m +CONFIG_DEV_DAX_HMEM=m +CONFIG_DEV_DAX_HMEM_DEVICES=y +# CONFIG_DEV_DAX_KMEM is not set +CONFIG_NVMEM=y +CONFIG_NVMEM_SYSFS=y + +# +# Layout Types +# +# CONFIG_NVMEM_LAYOUT_SL28_VPD is not set +# CONFIG_NVMEM_LAYOUT_ONIE_TLV is not set +# end of Layout Types + +# CONFIG_NVMEM_QCOM_QFPROM is not set +# CONFIG_NVMEM_QCOM_SEC_QFPROM is not set +# CONFIG_NVMEM_RMEM is not set +# CONFIG_NVMEM_U_BOOT_ENV is not set + +# +# HW tracing support +# +CONFIG_STM=m +# CONFIG_STM_PROTO_BASIC is not set +# CONFIG_STM_PROTO_SYS_T is not set +# CONFIG_STM_DUMMY is not set +# CONFIG_STM_SOURCE_CONSOLE is not set +# CONFIG_STM_SOURCE_HEARTBEAT is not set +# CONFIG_STM_SOURCE_FTRACE is not set +# CONFIG_INTEL_TH is not set +# CONFIG_HISI_PTT is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +# CONFIG_FSI is not set +CONFIG_TEE=m +# CONFIG_OPTEE is not set +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# CONFIG_MOST is not set +# CONFIG_PECI is not set +# CONFIG_HTE is not set +# CONFIG_CDX_BUS is not set +# end of Device Drivers + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +# CONFIG_VALIDATE_FS_PARSER is not set +CONFIG_FS_IOMAP=y +CONFIG_BUFFER_HEAD=y +CONFIG_LEGACY_DIRECT_IO=y +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=m +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=m +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=m +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_XFS_FS=m +CONFIG_XFS_SUPPORT_V4=y +CONFIG_XFS_SUPPORT_ASCII_CI=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_ONLINE_SCRUB is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +# CONFIG_ZONEFS_FS is not set +CONFIG_FS_DAX=y +CONFIG_FS_DAX_PMD=y +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +# CONFIG_FS_VERITY is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=y +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_VIRTIO_FS=m +CONFIG_FUSE_DAX=y +CONFIG_OVERLAY_FS=m +CONFIG_OVERLAY_FS_REDIRECT_DIR=y +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y +CONFIG_OVERLAY_FS_INDEX=y +# CONFIG_OVERLAY_FS_NFS_EXPORT is not set +# CONFIG_OVERLAY_FS_XINO_AUTO is not set +# CONFIG_OVERLAY_FS_METACOPY is not set +# CONFIG_OVERLAY_FS_DEBUG is not set + +# +# Caches +# +CONFIG_NETFS_SUPPORT=m +CONFIG_NETFS_STATS=y +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_DEBUG is not set +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_ERROR_INJECTION is not set +CONFIG_CACHEFILES_ONDEMAND=y +# end of Caches + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/EXFAT/NT Filesystems +# +CONFIG_FAT_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +# CONFIG_FAT_DEFAULT_UTF8 is not set +# CONFIG_EXFAT_FS is not set +# CONFIG_NTFS_FS is not set +# CONFIG_NTFS3_FS is not set +# end of DOS/FAT/EXFAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +CONFIG_PROC_VMCORE_DEVICE_DUMP=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +# CONFIG_TMPFS_INODE64 is not set +# CONFIG_TMPFS_QUOTA is not set +CONFIG_ARCH_SUPPORTS_HUGETLBFS=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +# end of Pseudo filesystems + +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_UBIFS_FS is not set +CONFIG_CRAMFS=m +CONFIG_CRAMFS_BLOCKDEV=y +# CONFIG_CRAMFS_MTD is not set +CONFIG_SQUASHFS=m +# CONFIG_SQUASHFS_FILE_CACHE is not set +CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set +CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_CONSOLE=y +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +CONFIG_PSTORE_RAM=y +# CONFIG_PSTORE_BLK is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_EROFS_FS=m +# CONFIG_EROFS_FS_DEBUG is not set +CONFIG_EROFS_FS_XATTR=y +CONFIG_EROFS_FS_POSIX_ACL=y +CONFIG_EROFS_FS_SECURITY=y +CONFIG_EROFS_FS_ZIP=y +CONFIG_EROFS_FS_ZIP_LZMA=y +CONFIG_EROFS_FS_ZIP_DEFLATE=y +CONFIG_EROFS_FS_ONDEMAND=y +# CONFIG_EROFS_FS_PCPU_KTHREAD is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y +CONFIG_NFS_DISABLE_UDP_SUPPORT=y +# CONFIG_NFS_V4_2_READ_PLUS is not set +CONFIG_NFSD=m +# CONFIG_NFSD_V2 is not set +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_PNFS=y +# CONFIG_NFSD_BLOCKLAYOUT is not set +CONFIG_NFSD_SCSILAYOUT=y +# CONFIG_NFSD_FLEXFILELAYOUT is not set +# CONFIG_NFSD_V4_2_INTER_SSC is not set +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_COMMON=y +CONFIG_NFS_V4_2_SSC_HELPER=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=m +CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1=y +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA is not set +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 is not set +CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_CEPH_FS=m +# CONFIG_CEPH_FSCACHE is not set +CONFIG_CEPH_FS_POSIX_ACL=y +# CONFIG_CEPH_FS_SECURITY_LABEL is not set +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_DEBUG=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set +CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_SWN_UPCALL is not set +# CONFIG_CIFS_SMB_DIRECT is not set +# CONFIG_CIFS_FSCACHE is not set +# CONFIG_SMB_SERVER is not set +CONFIG_SMBFS=m +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=m +CONFIG_NLS_UCS2_UTILS=m +CONFIG_DLM=m +# CONFIG_DLM_DEBUG is not set +# CONFIG_UNICODE is not set +CONFIG_IO_WQ=y +# end of File systems + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_KEYS_REQUEST_CACHE is not set +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_TRUSTED_KEYS=m +CONFIG_TRUSTED_KEYS_TPM=y +CONFIG_TRUSTED_KEYS_TEE=y +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_USER_DECRYPTED_DATA is not set +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HARDENED_USERCOPY=y +CONFIG_FORTIFY_SOURCE=y +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9 +CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256 +# CONFIG_SECURITY_SELINUX_DEBUG is not set +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +CONFIG_SECURITY_YAMA=y +# CONFIG_SECURITY_SAFESETID is not set +# CONFIG_SECURITY_LOCKDOWN_LSM is not set +# CONFIG_SECURITY_LANDLOCK is not set +CONFIG_INTEGRITY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_TRUSTED_KEYRING=y +CONFIG_INTEGRITY_PLATFORM_KEYRING=y +# CONFIG_INTEGRITY_MACHINE_KEYRING is not set +CONFIG_LOAD_UEFI_KEYS=y +CONFIG_INTEGRITY_AUDIT=y +CONFIG_IMA=y +# CONFIG_IMA_KEXEC is not set +CONFIG_IMA_MEASURE_PCR_IDX=10 +CONFIG_IMA_LSM_RULES=y +# CONFIG_IMA_NG_TEMPLATE is not set +CONFIG_IMA_SIG_TEMPLATE=y +CONFIG_IMA_DEFAULT_TEMPLATE="ima-sig" +# CONFIG_IMA_DEFAULT_HASH_SHA1 is not set +CONFIG_IMA_DEFAULT_HASH_SHA256=y +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set +# CONFIG_IMA_DEFAULT_HASH_SM3 is not set +CONFIG_IMA_DEFAULT_HASH="sha256" +CONFIG_IMA_WRITE_POLICY=y +CONFIG_IMA_READ_POLICY=y +CONFIG_IMA_APPRAISE=y +# CONFIG_IMA_ARCH_POLICY is not set +CONFIG_IMA_APPRAISE_BUILD_POLICY=y +# CONFIG_IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_MODULE_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_POLICY_SIGS is not set +CONFIG_IMA_APPRAISE_BOOTPARAM=y +# CONFIG_IMA_APPRAISE_MODSIG is not set +CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY=y +CONFIG_IMA_BLACKLIST_KEYRING=y +CONFIG_IMA_LOAD_X509=y +CONFIG_IMA_X509_PATH="/etc/keys/x509_ima.der" +# CONFIG_IMA_APPRAISE_SIGNED_INIT is not set +CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y +CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y +# CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT is not set +# CONFIG_IMA_DISABLE_HTABLE is not set +CONFIG_EVM=y +CONFIG_EVM_ATTR_FSUUID=y +# CONFIG_EVM_ADD_XATTRS is not set +CONFIG_EVM_LOAD_X509=y +CONFIG_EVM_X509_PATH="/etc/keys/x509_evm.der" +CONFIG_DEFAULT_SECURITY_SELINUX=y +# CONFIG_DEFAULT_SECURITY_DAC is not set +CONFIG_LSM="integrity,selinux,smack,tomoyo,apparmor" + +# +# Kernel hardening options +# + +# +# Memory initialization +# +CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y +CONFIG_INIT_STACK_NONE=y +# CONFIG_INIT_STACK_ALL_PATTERN is not set +# CONFIG_INIT_STACK_ALL_ZERO is not set +# CONFIG_GCC_PLUGIN_STACKLEAK is not set +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y +# CONFIG_ZERO_CALL_USED_REGS is not set +# end of Memory initialization + +# +# Hardening of kernel data structures +# +CONFIG_LIST_HARDENED=y +CONFIG_BUG_ON_DATA_CORRUPTION=y +# end of Hardening of kernel data structures + +CONFIG_CC_HAS_RANDSTRUCT=y +CONFIG_RANDSTRUCT_NONE=y +# CONFIG_RANDSTRUCT_FULL is not set +# CONFIG_RANDSTRUCT_PERFORMANCE is not set +# end of Kernel hardening options +# end of Security options + +CONFIG_XOR_BLOCKS=m +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_FIPS_NAME="Linux Kernel Cryptographic API" +# CONFIG_CRYPTO_FIPS_CUSTOM_VERSION is not set +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_SIG2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=m +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=m +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +# CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is not set +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=y +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m +# end of Crypto core or helper + +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=m +# CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set +# CONFIG_CRYPTO_ECDH is not set +# CONFIG_CRYPTO_ECDSA is not set +# CONFIG_CRYPTO_ECRDSA is not set +CONFIG_CRYPTO_SM2=y +# CONFIG_CRYPTO_CURVE25519 is not set +# end of Public-key cryptography + +# +# Block ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_ANUBIS=m +# CONFIG_CRYPTO_ARIA is not set +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=y +CONFIG_CRYPTO_SM4_GENERIC=y +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m +# end of Block ciphers + +# +# Length-preserving ciphers and modes +# +# CONFIG_CRYPTO_ADIANTUM is not set +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_HCTR2 is not set +# CONFIG_CRYPTO_KEYWRAP is not set +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=y +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=y +# end of Length-preserving ciphers and modes + +# +# AEAD (authenticated encryption with associated data) ciphers +# +# CONFIG_CRYPTO_AEGIS128 is not set +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_GENIV=m +CONFIG_CRYPTO_SEQIV=m +CONFIG_CRYPTO_ECHAINIV=m +CONFIG_CRYPTO_ESSIV=m +# end of AEAD (authenticated encryption with associated data) ciphers + +# +# Hashes, digests, and MACs +# +CONFIG_CRYPTO_BLAKE2B=m +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_POLY1305=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_SHA3=y +CONFIG_CRYPTO_SM3=y +CONFIG_CRYPTO_SM3_GENERIC=y +# CONFIG_CRYPTO_STREEBOG is not set +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_XXHASH=m +# end of Hashes, digests, and MACs + +# +# CRCs (cyclic redundancy checks) +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_CRC64_ROCKSOFT=y +# end of CRCs (cyclic redundancy checks) + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_842 is not set +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ZSTD=m +# end of Compression + +# +# Random number generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +# CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE is not set +# end of Random number generation + +# +# Userspace interface +# +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y +# CONFIG_CRYPTO_STATS is not set +# end of Userspace interface + +CONFIG_CRYPTO_HASH_INFO=y +# CONFIG_CRYPTO_NHPOLY1305_NEON is not set +CONFIG_CRYPTO_CHACHA20_NEON=m + +# +# Accelerated Cryptographic Algorithms for CPU (arm64) +# +CONFIG_CRYPTO_GHASH_ARM64_CE=m +CONFIG_CRYPTO_POLY1305_NEON=m +CONFIG_CRYPTO_SHA1_ARM64_CE=m +CONFIG_CRYPTO_SHA256_ARM64=m +CONFIG_CRYPTO_SHA2_ARM64_CE=m +# CONFIG_CRYPTO_SHA512_ARM64 is not set +# CONFIG_CRYPTO_SHA512_ARM64_CE is not set +# CONFIG_CRYPTO_SHA3_ARM64 is not set +CONFIG_CRYPTO_SM3_NEON=m +CONFIG_CRYPTO_SM3_ARM64_CE=m +# CONFIG_CRYPTO_POLYVAL_ARM64_CE is not set +CONFIG_CRYPTO_AES_ARM64=y +CONFIG_CRYPTO_AES_ARM64_CE=y +CONFIG_CRYPTO_AES_ARM64_CE_BLK=y +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y +CONFIG_CRYPTO_AES_ARM64_BS=m +CONFIG_CRYPTO_SM4_ARM64_CE=m +CONFIG_CRYPTO_SM4_ARM64_CE_BLK=m +CONFIG_CRYPTO_SM4_ARM64_NEON_BLK=m +CONFIG_CRYPTO_AES_ARM64_CE_CCM=y +CONFIG_CRYPTO_SM4_ARM64_CE_CCM=m +CONFIG_CRYPTO_SM4_ARM64_CE_GCM=m +CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m +# end of Accelerated Cryptographic Algorithms for CPU (arm64) + +CONFIG_CRYPTO_HW=y +# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set +# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set +CONFIG_CRYPTO_DEV_CCP=y +CONFIG_CRYPTO_DEV_CCP_DD=m +CONFIG_CRYPTO_DEV_SP_CCP=y +CONFIG_CRYPTO_DEV_CCP_CRYPTO=m +# CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set +CONFIG_CRYPTO_DEV_CPT=m +CONFIG_CAVIUM_CPT=m +CONFIG_CRYPTO_DEV_NITROX=m +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m +# CONFIG_CRYPTO_DEV_OCTEONTX_CPT is not set +# CONFIG_CRYPTO_DEV_QAT_DH895xCC is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXX is not set +# CONFIG_CRYPTO_DEV_QAT_C62X is not set +# CONFIG_CRYPTO_DEV_QAT_4XXX is not set +# CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set +# CONFIG_CRYPTO_DEV_QAT_C62XVF is not set +CONFIG_CRYPTO_DEV_CAVIUM_ZIP=m +# CONFIG_CRYPTO_DEV_QCE is not set +# CONFIG_CRYPTO_DEV_QCOM_RNG is not set +CONFIG_CRYPTO_DEV_CHELSIO=m +# CONFIG_CRYPTO_DEV_VIRTIO is not set +# CONFIG_CRYPTO_DEV_SAFEXCEL is not set +# CONFIG_CRYPTO_DEV_CCREE is not set +# CONFIG_CRYPTO_DEV_HISI_SEC is not set +# CONFIG_CRYPTO_DEV_HISI_SEC2 is not set +# CONFIG_CRYPTO_DEV_HISI_ZIP is not set +# CONFIG_CRYPTO_DEV_HISI_HPRE is not set +# CONFIG_CRYPTO_DEV_HISI_TRNG is not set +# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set +CONFIG_SIGNED_PE_FILE_VERIFICATION=y +# CONFIG_FIPS_SIGNATURE_SELFTEST is not set + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_MODULE_SIG_KEY_TYPE_RSA=y +# CONFIG_MODULE_SIG_KEY_TYPE_ECDSA is not set +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +CONFIG_SYSTEM_EXTRA_CERTIFICATE=y +CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE=8192 +CONFIG_SECONDARY_TRUSTED_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" +# CONFIG_SYSTEM_REVOCATION_LIST is not set +# CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE is not set +# end of Certificates for signature checking + +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_RAID6_PQ_BENCHMARK=y +CONFIG_LINEAR_RANGES=y +# CONFIG_PACKING is not set +CONFIG_BITREVERSE=y +CONFIG_HAVE_ARCH_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_CORDIC=m +# CONFIG_PRIME_NUMBERS is not set +CONFIG_RATIONAL=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +CONFIG_ARCH_USE_SYM_ANNOTATIONS=y +CONFIG_INDIRECT_PIO=y +# CONFIG_TRACE_MMIO_ACCESS is not set + +# +# Crypto library routines +# +CONFIG_CRYPTO_LIB_UTILS=y +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_ARC4=m +CONFIG_CRYPTO_LIB_GF128MUL=y +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y +CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA=m +CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m +CONFIG_CRYPTO_LIB_CHACHA=m +CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m +CONFIG_CRYPTO_LIB_CURVE25519=m +CONFIG_CRYPTO_LIB_DES=m +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=9 +CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m +CONFIG_CRYPTO_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m +CONFIG_CRYPTO_LIB_SHA1=y +CONFIG_CRYPTO_LIB_SHA256=y +# end of Crypto library routines + +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC64_ROCKSOFT=y +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC64=y +# CONFIG_CRC4 is not set +CONFIG_CRC7=m +CONFIG_LIBCRC32C=m +CONFIG_CRC8=m +CONFIG_XXHASH=y +CONFIG_AUDIT_GENERIC=y +CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y +CONFIG_AUDIT_COMPAT_GENERIC=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=m +CONFIG_LZ4HC_COMPRESS=m +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMMON=y +CONFIG_ZSTD_COMPRESS=m +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_MICROLZMA=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_DECOMPRESS_ZSTD=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_DMA_OPS=y +CONFIG_NEED_SG_DMA_FLAGS=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_DMA_DECLARE_COHERENT=y +CONFIG_ARCH_HAS_SETUP_DMA_OPS=y +CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS=y +CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE=y +CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU=y +CONFIG_ARCH_HAS_DMA_PREP_COHERENT=y +CONFIG_SWIOTLB=y +# CONFIG_SWIOTLB_DYNAMIC is not set +CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC=y +# CONFIG_DMA_RESTRICTED_POOL is not set +CONFIG_DMA_NONCOHERENT_MMAP=y +CONFIG_DMA_COHERENT_POOL=y +CONFIG_DMA_DIRECT_REMAP=y +CONFIG_DMA_CMA=y +# CONFIG_DMA_NUMA_CMA is not set + +# +# Default contiguous memory area size: +# +CONFIG_CMA_SIZE_MBYTES=64 +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_ALIGNMENT=8 +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_DMA_MAP_BENCHMARK is not set +CONFIG_SGL_ALLOC=y +CONFIG_CHECK_SIGNATURE=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_CLZ_TAB=y +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_SIGNATURE=y +CONFIG_DIMLIB=y +CONFIG_LIBFDT=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_HAVE_GENERIC_VDSO=y +CONFIG_GENERIC_GETTIMEOFDAY=y +CONFIG_GENERIC_VDSO_TIME_NS=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_PMEM_API=y +CONFIG_MEMREGION=y +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_ARCH_STACKWALK=y +CONFIG_STACKDEPOT=y +CONFIG_SBITMAP=y +CONFIG_PARMAN=m +CONFIG_OBJAGG=m +# end of Library routines + +CONFIG_GENERIC_IOREMAP=y +CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y +CONFIG_PLDMFW=y +CONFIG_ASN1_ENCODER=m + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +# CONFIG_PRINTK_CALLER is not set +# CONFIG_STACKTRACE_BUILD_ID is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DYNAMIC_DEBUG_CORE=y +CONFIG_SYMBOLIC_ERRNAME=y +CONFIG_DEBUG_BUGVERBOSE=y +# end of printk and dmesg options + +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +CONFIG_AS_HAS_NON_CONST_LEB128=y +# CONFIG_DEBUG_INFO_NONE is not set +# CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT is not set +CONFIG_DEBUG_INFO_DWARF4=y +# CONFIG_DEBUG_INFO_DWARF5 is not set +# CONFIG_DEBUG_INFO_REDUCED is not set +CONFIG_DEBUG_INFO_COMPRESSED_NONE=y +# CONFIG_DEBUG_INFO_COMPRESSED_ZLIB is not set +# CONFIG_DEBUG_INFO_COMPRESSED_ZSTD is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +CONFIG_DEBUG_INFO_BTF=y +# CONFIG_GDB_SCRIPTS is not set +CONFIG_FRAME_WARN=2048 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +# CONFIG_HEADERS_INSTALL is not set +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_ARCH_WANT_FRAME_POINTERS=y +CONFIG_FRAME_POINTER=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +# +# Generic Kernel Debugging Instruments +# +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_FS_ALLOW_ALL=y +# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set +# CONFIG_DEBUG_FS_ALLOW_NONE is not set +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_KGDB=y +CONFIG_KGDB_HONOUR_BLOCKLIST=y +CONFIG_KGDB_SERIAL_CONSOLE=y +CONFIG_KGDB_TESTS=y +# CONFIG_KGDB_TESTS_ON_BOOT is not set +CONFIG_KGDB_KDB=y +CONFIG_KDB_DEFAULT_ENABLE=0x0 +CONFIG_KDB_KEYBOARD=y +CONFIG_KDB_CONTINUE_CATASTROPHIC=0 +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +# CONFIG_UBSAN is not set +CONFIG_HAVE_KCSAN_COMPILER=y +# end of Generic Kernel Debugging Instruments + +# +# Networking Debugging +# +# CONFIG_NET_DEV_REFCNT_TRACKER is not set +# CONFIG_NET_NS_REFCNT_TRACKER is not set +# CONFIG_DEBUG_NET is not set +# end of Networking Debugging + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +CONFIG_SLUB_DEBUG=y +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_PAGE_OWNER is not set +# CONFIG_PAGE_TABLE_CHECK is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +# CONFIG_DEBUG_RODATA_TEST is not set +CONFIG_ARCH_HAS_DEBUG_WX=y +# CONFIG_DEBUG_WX is not set +CONFIG_GENERIC_PTDUMP=y +# CONFIG_PTDUMP_DEBUGFS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_PER_VMA_LOCK_STATS is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SHRINKER_DEBUG is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_SCHED_STACK_END_CHECK is not set +CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_VM_PGTABLE is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_HAVE_ARCH_KASAN_SW_TAGS=y +CONFIG_HAVE_ARCH_KASAN_HW_TAGS=y +CONFIG_HAVE_ARCH_KASAN_VMALLOC=y +CONFIG_CC_HAS_KASAN_GENERIC=y +CONFIG_CC_HAS_KASAN_SW_TAGS=y +CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y +# CONFIG_KASAN is not set +CONFIG_HAVE_ARCH_KFENCE=y +CONFIG_KFENCE=y +CONFIG_KFENCE_SAMPLE_INTERVAL=0 +CONFIG_KFENCE_NUM_OBJECTS=255 +# CONFIG_KFENCE_DEFERRABLE is not set +CONFIG_KFENCE_STRESS_TEST_FAULTS=0 +# end of Memory Debugging + +CONFIG_DEBUG_SHIRQ=y + +# +# Debug Oops, Lockups and Hangs +# +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=1 +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y +CONFIG_HARDLOCKUP_DETECTOR=y +# CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY is not set +CONFIG_HARDLOCKUP_DETECTOR_PERF=y +# CONFIG_HARDLOCKUP_DETECTOR_BUDDY is not set +# CONFIG_HARDLOCKUP_DETECTOR_ARCH is not set +CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER=y +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_WQ_CPU_INTENSIVE_REPORT is not set +# CONFIG_TEST_LOCKUP is not set +# end of Debug Oops, Lockups and Hangs + +# +# Scheduler Debugging +# +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +# end of Scheduler Debugging + +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +# CONFIG_SCF_TORTURE_TEST is not set +# CONFIG_CSD_LOCK_WAIT_DEBUG is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +# CONFIG_DEBUG_IRQFLAGS is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set + +# +# Debug kernel data structures +# +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PLIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_MAPLE_TREE is not set +# end of Debug kernel data structures + +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_RCU_SCALE_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_REF_SCALE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 +# CONFIG_RCU_CPU_STALL_CPUTIME is not set +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_LATENCYTOP is not set +# CONFIG_DEBUG_CGROUP_REF is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_BOOTTIME_TRACING is not set +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_FUNCTION_GRAPH_RETVAL is not set +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS=y +CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y +# CONFIG_FUNCTION_PROFILER is not set +CONFIG_STACK_TRACER=y +# CONFIG_IRQSOFF_TRACER is not set +CONFIG_SCHED_TRACER=y +CONFIG_HWLAT_TRACER=y +CONFIG_OSNOISE_TRACER=y +CONFIG_TIMERLAT_TRACER=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_TRACER_SNAPSHOT=y +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_PROBE_EVENTS_BTF_ARGS=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_DYNAMIC_EVENTS=y +CONFIG_PROBE_EVENTS=y +# CONFIG_BPF_KPROBE_OVERRIDE is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +CONFIG_FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY=y +CONFIG_TRACING_MAP=y +CONFIG_SYNTH_EVENTS=y +# CONFIG_USER_EVENTS is not set +CONFIG_HIST_TRIGGERS=y +# CONFIG_TRACE_EVENT_INJECT is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +CONFIG_RING_BUFFER_BENCHMARK=m +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_FTRACE_RECORD_RECURSION is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_SYNTH_EVENT_GEN_TEST is not set +# CONFIG_KPROBE_EVENT_GEN_TEST is not set +# CONFIG_HIST_TRIGGERS_DEBUG is not set +# CONFIG_RV is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y +CONFIG_STRICT_DEVMEM=y +# CONFIG_IO_STRICT_DEVMEM is not set + +# +# arm64 Debugging +# +CONFIG_PID_IN_CONTEXTIDR=y +# CONFIG_DEBUG_EFI is not set +# CONFIG_ARM64_RELOC_TEST is not set +CONFIG_CORESIGHT=m +CONFIG_CORESIGHT_LINKS_AND_SINKS=m +CONFIG_CORESIGHT_LINK_AND_SINK_TMC=m +CONFIG_CORESIGHT_CATU=m +CONFIG_CORESIGHT_SINK_TPIU=m +CONFIG_CORESIGHT_SINK_ETBV10=m +CONFIG_CORESIGHT_SOURCE_ETM4X=m +CONFIG_ETM4X_IMPDEF_FEATURE=y +CONFIG_CORESIGHT_STM=m +CONFIG_CORESIGHT_CPU_DEBUG=m +# CONFIG_CORESIGHT_CPU_DEBUG_DEFAULT_ON is not set +CONFIG_CORESIGHT_CTI=m +CONFIG_CORESIGHT_CTI_INTEGRATION_REGS=y +# CONFIG_CORESIGHT_TRBE is not set +# CONFIG_ULTRASOC_SMB is not set +# CONFIG_CORESIGHT_TPDM is not set +# CONFIG_CORESIGHT_TPDA is not set +# CONFIG_CORESIGHT_DUMMY is not set +# end of arm64 Debugging + +# +# Kernel Testing and Coverage +# +# CONFIG_KUNIT is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +CONFIG_FUNCTION_ERROR_INJECTION=y +# CONFIG_FAULT_INJECTION is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_TEST_DHRY is not set +# CONFIG_LKDTM is not set +# CONFIG_TEST_MIN_HEAP is not set +# CONFIG_TEST_DIV64 is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_TEST_REF_TRACKER is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_REED_SOLOMON_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_ASYNC_RAID6_TEST=m +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_STRING_SELFTEST is not set +# CONFIG_TEST_STRING_HELPERS is not set +CONFIG_TEST_KSTRTOX=y +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_SCANF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_XARRAY is not set +# CONFIG_TEST_MAPLE_TREE is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_PARMAN is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_BITOPS is not set +# CONFIG_TEST_VMALLOC is not set +# CONFIG_TEST_USER_COPY is not set +CONFIG_TEST_BPF=m +# CONFIG_TEST_BLACKHOLE_DEV is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_DYNAMIC_DEBUG is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_MEMCAT_P is not set +# CONFIG_TEST_OBJAGG is not set +# CONFIG_TEST_MEMINIT is not set +# CONFIG_TEST_FREE_PAGES is not set +CONFIG_ARCH_USE_MEMTEST=y +# CONFIG_MEMTEST is not set +# end of Kernel Testing and Coverage + +# +# Rust hacking +# +# end of Rust hacking +# end of Kernel hacking diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig new file mode 100644 index 000000000000..714c90743cba --- /dev/null +++ b/arch/x86/configs/anolis-debug_defconfig @@ -0,0 +1,7783 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/x86 6.6.7 Kernel Configuration +# +CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=200000 +CONFIG_CLANG_VERSION=0 +CONFIG_AS_IS_GNU=y +CONFIG_AS_VERSION=25000 +CONFIG_LD_IS_BFD=y +CONFIG_LD_VERSION=25000 +CONFIG_LLD_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_CAN_LINK_STATIC=y +CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y +CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y +CONFIG_TOOLS_SUPPORT_RELR=y +CONFIG_CC_HAS_ASM_INLINE=y +CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y +CONFIG_PAHOLE_VERSION=117 +CONFIG_CONSTRUCTORS=y +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_TABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +# CONFIG_WERROR is not set +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_HAVE_KERNEL_ZSTD=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +# CONFIG_KERNEL_LZ4 is not set +# CONFIG_KERNEL_ZSTD is not set +CONFIG_DEFAULT_INIT="" +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_SYSVIPC_COMPAT=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +# CONFIG_WATCH_QUEUE is not set +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_PENDING_IRQ=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_GENERIC_IRQ_INJECTION=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_IRQ_MSI_IOMMU=y +CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y +CONFIG_GENERIC_IRQ_RESERVATION_MODE=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +CONFIG_GENERIC_IRQ_DEBUGFS=y +# end of IRQ subsystem + +CONFIG_CLOCKSOURCE_WATCHDOG=y +CONFIG_ARCH_CLOCKSOURCE_INIT=y +CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y +CONFIG_GENERIC_CMOS_UPDATE=y +CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y +CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y +CONFIG_CONTEXT_TRACKING=y +CONFIG_CONTEXT_TRACKING_IDLE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +# CONFIG_NO_HZ_IDLE is not set +CONFIG_NO_HZ_FULL=y +CONFIG_CONTEXT_TRACKING_USER=y +# CONFIG_CONTEXT_TRACKING_USER_FORCE is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US=125 +# end of Timers subsystem + +CONFIG_BPF=y +CONFIG_HAVE_EBPF_JIT=y +CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y + +# +# BPF subsystem +# +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_BPF_JIT_DEFAULT_ON=y +CONFIG_BPF_UNPRIV_DEFAULT_OFF=y +# CONFIG_BPF_PRELOAD is not set +CONFIG_BPF_LSM=y +# end of BPF subsystem + +CONFIG_PREEMPT_BUILD=y +CONFIG_PREEMPT_NONE=y +# CONFIG_PREEMPT_VOLUNTARY is not set +# CONFIG_PREEMPT is not set +CONFIG_PREEMPT_COUNT=y +CONFIG_PREEMPTION=y +CONFIG_PREEMPT_DYNAMIC=y +CONFIG_SCHED_CORE=y + +# +# CPU/Task time and stats accounting +# +CONFIG_VIRT_CPU_ACCOUNTING=y +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_SCHED_AVG_IRQ=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +CONFIG_PSI_DEFAULT_DISABLED=y +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +CONFIG_PREEMPT_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_TREE_SRCU=y +CONFIG_TASKS_RCU_GENERIC=y +CONFIG_TASKS_RCU=y +CONFIG_TASKS_RUDE_RCU=y +CONFIG_TASKS_TRACE_RCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +CONFIG_RCU_NOCB_CPU=y +# CONFIG_RCU_NOCB_CPU_DEFAULT_ALL is not set +# CONFIG_RCU_LAZY is not set +# end of RCU Subsystem + +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +# CONFIG_IKHEADERS is not set +CONFIG_LOG_BUF_SHIFT=21 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +# CONFIG_PRINTK_INDEX is not set +CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y + +# +# Scheduler features +# +# CONFIG_UCLAMP_TASK is not set +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y +CONFIG_CC_HAS_INT128=y +CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" +CONFIG_GCC11_NO_ARRAY_BOUNDS=y +CONFIG_CC_NO_ARRAY_BOUNDS=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +# CONFIG_CGROUP_FAVOR_DYNMODS is not set +CONFIG_MEMCG=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_SCHED_MM_CID=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_MISC is not set +CONFIG_CGROUP_DEBUG=y +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_TIME_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_RD_ZSTD=y +# CONFIG_BOOT_CONFIG is not set +CONFIG_INITRAMFS_PRESERVE_MTIME=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_LD_ORPHAN_WARN=y +CONFIG_LD_ORPHAN_WARN_LEVEL="warn" +CONFIG_SYSCTL=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_HAVE_PCSPKR_PLATFORM=y +# CONFIG_EXPERT is not set +CONFIG_UID16=y +CONFIG_MULTIUSER=y +CONFIG_SGETMASK_SYSCALL=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_PCSPKR_PLATFORM=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_SELFTEST is not set +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_KCMP=y +CONFIG_RSEQ=y +CONFIG_CACHESTAT_SYSCALL=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_GUEST_PERF_EVENTS=y + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# end of Kernel Performance Events And Counters + +CONFIG_SYSTEM_DATA_VERIFICATION=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y + +# +# Kexec and crash features +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_HAVE_IMA_KEXEC=y +CONFIG_KEXEC=y +CONFIG_KEXEC_FILE=y +CONFIG_KEXEC_SIG=y +# CONFIG_KEXEC_SIG_FORCE is not set +CONFIG_KEXEC_BZIMAGE_VERIFY_SIG=y +CONFIG_KEXEC_JUMP=y +CONFIG_CRASH_DUMP=y +CONFIG_CRASH_HOTPLUG=y +CONFIG_CRASH_MAX_MEMORY_RANGES=8192 +# end of Kexec and crash features +# end of General setup + +CONFIG_64BIT=y +CONFIG_X86_64=y +CONFIG_X86=y +CONFIG_INSTRUCTION_DECODER=y +CONFIG_OUTPUT_FORMAT="elf64-x86-64" +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_MMU=y +CONFIG_ARCH_MMAP_RND_BITS_MIN=28 +CONFIG_ARCH_MMAP_RND_BITS_MAX=32 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_GENERIC_ISA_DMA=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ARCH_HAS_CPU_RELAX=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_AUDIT_ARCH=y +CONFIG_KASAN_SHADOW_OFFSET=0xdffffc0000000000 +CONFIG_HAVE_INTEL_TXT=y +CONFIG_X86_64_SMP=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_DYNAMIC_PHYSICAL_MASK=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_CC_HAS_SANE_STACKPROTECTOR=y + +# +# Processor type and features +# +CONFIG_SMP=y +CONFIG_X86_X2APIC=y +CONFIG_X86_MPPARSE=y +# CONFIG_GOLDFISH is not set +CONFIG_X86_CPU_RESCTRL=y +CONFIG_X86_EXTENDED_PLATFORM=y +# CONFIG_X86_NUMACHIP is not set +# CONFIG_X86_VSMP is not set +CONFIG_X86_UV=y +# CONFIG_X86_GOLDFISH is not set +# CONFIG_X86_INTEL_MID is not set +CONFIG_X86_INTEL_LPSS=y +CONFIG_X86_AMD_PLATFORM_DEVICE=y +CONFIG_IOSF_MBI=y +# CONFIG_IOSF_MBI_DEBUG is not set +CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y +CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_HYPERVISOR_GUEST=y +CONFIG_PARAVIRT=y +# CONFIG_PARAVIRT_DEBUG is not set +# CONFIG_PARAVIRT_SPINLOCKS is not set +CONFIG_X86_HV_CALLBACK_VECTOR=y +CONFIG_XEN=y +# CONFIG_XEN_PV is not set +CONFIG_XEN_PVHVM=y +CONFIG_XEN_PVHVM_SMP=y +CONFIG_XEN_PVHVM_GUEST=y +CONFIG_XEN_SAVE_RESTORE=y +# CONFIG_XEN_DEBUG_FS is not set +# CONFIG_XEN_PVH is not set +CONFIG_KVM_GUEST=y +CONFIG_ARCH_CPUIDLE_HALTPOLL=y +# CONFIG_PVH is not set +CONFIG_PARAVIRT_TIME_ACCOUNTING=y +CONFIG_PARAVIRT_CLOCK=y +# CONFIG_JAILHOUSE_GUEST is not set +# CONFIG_ACRN_GUEST is not set +CONFIG_INTEL_TDX_GUEST=y +# CONFIG_MK8 is not set +# CONFIG_MPSC is not set +# CONFIG_MCORE2 is not set +# CONFIG_MATOM is not set +CONFIG_GENERIC_CPU=y +CONFIG_X86_INTERNODE_CACHE_SHIFT=6 +CONFIG_X86_L1_CACHE_SHIFT=6 +CONFIG_X86_TSC=y +CONFIG_X86_CMPXCHG64=y +CONFIG_X86_CMOV=y +CONFIG_X86_MINIMUM_CPU_FAMILY=64 +CONFIG_X86_DEBUGCTLMSR=y +CONFIG_IA32_FEAT_CTL=y +CONFIG_X86_VMX_FEATURE_NAMES=y +CONFIG_CPU_SUP_INTEL=y +CONFIG_CPU_SUP_AMD=y +CONFIG_CPU_SUP_HYGON=y +CONFIG_CPU_SUP_CENTAUR=y +CONFIG_CPU_SUP_ZHAOXIN=y +CONFIG_HPET_TIMER=y +CONFIG_HPET_EMULATE_RTC=y +CONFIG_DMI=y +# CONFIG_GART_IOMMU is not set +CONFIG_BOOT_VESA_SUPPORT=y +# CONFIG_MAXSMP is not set +CONFIG_NR_CPUS_RANGE_BEGIN=2 +CONFIG_NR_CPUS_RANGE_END=8192 +CONFIG_NR_CPUS_DEFAULT=64 +CONFIG_NR_CPUS=1024 +CONFIG_SCHED_CLUSTER=y +CONFIG_SCHED_SMT=y +CONFIG_SCHED_MC=y +CONFIG_SCHED_MC_PRIO=y +CONFIG_X86_LOCAL_APIC=y +CONFIG_X86_IO_APIC=y +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y +CONFIG_X86_MCE=y +CONFIG_X86_MCELOG_LEGACY=y +CONFIG_X86_MCE_INTEL=y +CONFIG_X86_MCE_AMD=y +CONFIG_X86_MCE_THRESHOLD=y +CONFIG_X86_MCE_INJECT=m + +# +# Performance monitoring +# +CONFIG_PERF_EVENTS_INTEL_UNCORE=m +CONFIG_PERF_EVENTS_INTEL_RAPL=m +CONFIG_PERF_EVENTS_INTEL_CSTATE=m +CONFIG_PERF_EVENTS_AMD_POWER=m +CONFIG_PERF_EVENTS_AMD_UNCORE=y +CONFIG_PERF_EVENTS_AMD_BRS=y +# end of Performance monitoring + +CONFIG_X86_16BIT=y +CONFIG_X86_ESPFIX64=y +CONFIG_X86_VSYSCALL_EMULATION=y +CONFIG_X86_IOPL_IOPERM=y +CONFIG_MICROCODE=y +# CONFIG_MICROCODE_LATE_LOADING is not set +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +# CONFIG_X86_5LEVEL is not set +CONFIG_X86_DIRECT_GBPAGES=y +CONFIG_X86_CPA_STATISTICS=y +CONFIG_X86_MEM_ENCRYPT=y +CONFIG_AMD_MEM_ENCRYPT=y +# CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set +CONFIG_NUMA=y +CONFIG_AMD_NUMA=y +CONFIG_X86_64_ACPI_NUMA=y +CONFIG_NUMA_EMU=y +CONFIG_NODES_SHIFT=6 +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +# CONFIG_ARCH_MEMORY_PROBE is not set +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_X86_PMEM_LEGACY_DEVICE=y +CONFIG_X86_PMEM_LEGACY=m +CONFIG_X86_CHECK_BIOS_CORRUPTION=y +CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y +CONFIG_MTRR=y +CONFIG_MTRR_SANITIZER=y +CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1 +CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 +CONFIG_X86_PAT=y +CONFIG_ARCH_USES_PG_UNCACHED=y +CONFIG_X86_UMIP=y +CONFIG_CC_HAS_IBT=y +CONFIG_X86_CET=y +CONFIG_X86_KERNEL_IBT=y +CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y +# CONFIG_X86_INTEL_TSX_MODE_OFF is not set +# CONFIG_X86_INTEL_TSX_MODE_ON is not set +CONFIG_X86_INTEL_TSX_MODE_AUTO=y +CONFIG_X86_SGX=y +# CONFIG_X86_USER_SHADOW_STACK is not set +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_EFI_HANDOVER_PROTOCOL=y +CONFIG_EFI_MIXED=y +# CONFIG_EFI_FAKE_MEMMAP is not set +CONFIG_EFI_RUNTIME_MAP=y +# CONFIG_HZ_100 is not set +# CONFIG_HZ_250 is not set +# CONFIG_HZ_300 is not set +CONFIG_HZ_1000=y +CONFIG_HZ=1000 +CONFIG_SCHED_HRTICK=y +CONFIG_ARCH_SUPPORTS_KEXEC=y +CONFIG_ARCH_SUPPORTS_KEXEC_FILE=y +CONFIG_ARCH_SELECTS_KEXEC_FILE=y +CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY=y +CONFIG_ARCH_SUPPORTS_KEXEC_SIG=y +CONFIG_ARCH_SUPPORTS_KEXEC_SIG_FORCE=y +CONFIG_ARCH_SUPPORTS_KEXEC_BZIMAGE_VERIFY_SIG=y +CONFIG_ARCH_SUPPORTS_KEXEC_JUMP=y +CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y +CONFIG_ARCH_SUPPORTS_CRASH_HOTPLUG=y +CONFIG_PHYSICAL_START=0x1000000 +CONFIG_RELOCATABLE=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_X86_NEED_RELOCS=y +CONFIG_PHYSICAL_ALIGN=0x1000000 +CONFIG_DYNAMIC_MEMORY_LAYOUT=y +CONFIG_RANDOMIZE_MEMORY=y +CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0xa +# CONFIG_ADDRESS_MASKING is not set +CONFIG_HOTPLUG_CPU=y +# CONFIG_COMPAT_VDSO is not set +CONFIG_LEGACY_VSYSCALL_XONLY=y +# CONFIG_LEGACY_VSYSCALL_NONE is not set +# CONFIG_CMDLINE_BOOL is not set +CONFIG_MODIFY_LDT_SYSCALL=y +# CONFIG_STRICT_SIGALTSTACK_SIZE is not set +CONFIG_HAVE_LIVEPATCH=y +CONFIG_LIVEPATCH=y +# end of Processor type and features + +CONFIG_CC_HAS_SLS=y +CONFIG_CC_HAS_RETURN_THUNK=y +CONFIG_CC_HAS_ENTRY_PADDING=y +CONFIG_FUNCTION_PADDING_CFI=11 +CONFIG_FUNCTION_PADDING_BYTES=16 +CONFIG_CALL_PADDING=y +CONFIG_HAVE_CALL_THUNKS=y +CONFIG_CALL_THUNKS=y +CONFIG_PREFIX_SYMBOLS=y +CONFIG_SPECULATION_MITIGATIONS=y +CONFIG_PAGE_TABLE_ISOLATION=y +CONFIG_RETPOLINE=y +CONFIG_RETHUNK=y +CONFIG_CPU_UNRET_ENTRY=y +CONFIG_CALL_DEPTH_TRACKING=y +# CONFIG_CALL_THUNKS_DEBUG is not set +CONFIG_CPU_IBPB_ENTRY=y +CONFIG_CPU_IBRS_ENTRY=y +CONFIG_CPU_SRSO=y +# CONFIG_SLS is not set +# CONFIG_GDS_FORCE_MITIGATION is not set +CONFIG_ARCH_HAS_ADD_PAGES=y + +# +# Power management and ACPI options +# +CONFIG_ARCH_HIBERNATION_HEADER=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_HIBERNATION_SNAPSHOT_DEV=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_USERSPACE_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +CONFIG_PM_DEBUG=y +CONFIG_PM_ADVANCED_DEBUG=y +# CONFIG_PM_TEST_SUSPEND is not set +CONFIG_PM_SLEEP_DEBUG=y +CONFIG_PM_TRACE=y +CONFIG_PM_TRACE_RTC=y +CONFIG_PM_CLK=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +# CONFIG_ENERGY_MODEL is not set +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y +CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y +CONFIG_ACPI_DEBUGGER=y +CONFIG_ACPI_DEBUGGER_USER=m +CONFIG_ACPI_SPCR_TABLE=y +# CONFIG_ACPI_FPDT is not set +CONFIG_ACPI_LPIT=y +CONFIG_ACPI_SLEEP=y +CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y +CONFIG_ACPI_EC_DEBUGFS=m +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_VIDEO=m +CONFIG_ACPI_FAN=y +CONFIG_ACPI_TAD=m +CONFIG_ACPI_DOCK=y +CONFIG_ACPI_CPU_FREQ_PSS=y +CONFIG_ACPI_PROCESSOR_CSTATE=y +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_PROCESSOR_AGGREGATOR=m +CONFIG_ACPI_THERMAL=y +CONFIG_ACPI_PLATFORM_PROFILE=m +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_DEBUG=y +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_HOTPLUG_IOAPIC=y +CONFIG_ACPI_SBS=m +CONFIG_ACPI_HED=y +CONFIG_ACPI_CUSTOM_METHOD=m +CONFIG_ACPI_BGRT=y +CONFIG_ACPI_NFIT=m +# CONFIG_NFIT_SECURITY_DEBUG is not set +CONFIG_ACPI_NUMA=y +CONFIG_ACPI_HMAT=y +CONFIG_HAVE_ACPI_APEI=y +CONFIG_HAVE_ACPI_APEI_NMI=y +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +CONFIG_ACPI_APEI_EINJ=m +CONFIG_ACPI_APEI_ERST_DEBUG=m +# CONFIG_ACPI_DPTF is not set +CONFIG_ACPI_WATCHDOG=y +CONFIG_ACPI_EXTLOG=m +CONFIG_ACPI_ADXL=y +CONFIG_ACPI_CONFIGFS=m +# CONFIG_ACPI_PFRUT is not set +CONFIG_ACPI_PCC=y +# CONFIG_ACPI_FFH is not set +CONFIG_PMIC_OPREGION=y +CONFIG_ACPI_PRMT=y +CONFIG_X86_PM_TIMER=y + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y + +# +# CPU frequency scaling drivers +# +CONFIG_X86_INTEL_PSTATE=y +# CONFIG_X86_PCC_CPUFREQ is not set +CONFIG_X86_AMD_PSTATE=y +CONFIG_X86_AMD_PSTATE_DEFAULT_MODE=3 +# CONFIG_X86_AMD_PSTATE_UT is not set +CONFIG_X86_ACPI_CPUFREQ=m +CONFIG_X86_ACPI_CPUFREQ_CPB=y +CONFIG_X86_POWERNOW_K8=m +CONFIG_X86_AMD_FREQ_SENSITIVITY=m +# CONFIG_X86_SPEEDSTEP_CENTRINO is not set +CONFIG_X86_P4_CLOCKMOD=m + +# +# shared options +# +CONFIG_X86_SPEEDSTEP_LIB=m +# end of CPU Frequency scaling + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +# CONFIG_CPU_IDLE_GOV_LADDER is not set +CONFIG_CPU_IDLE_GOV_MENU=y +# CONFIG_CPU_IDLE_GOV_TEO is not set +CONFIG_CPU_IDLE_GOV_HALTPOLL=y +CONFIG_HALTPOLL_CPUIDLE=y +# end of CPU Idle + +CONFIG_INTEL_IDLE=y +# end of Power management and ACPI options + +# +# Bus options (PCI etc.) +# +CONFIG_PCI_DIRECT=y +CONFIG_PCI_MMCONFIG=y +CONFIG_PCI_XEN=y +CONFIG_MMCONF_FAM10H=y +CONFIG_ISA_DMA_API=y +CONFIG_AMD_NB=y +# end of Bus options (PCI etc.) + +# +# Binary Emulations +# +CONFIG_IA32_EMULATION=y +# CONFIG_X86_X32_ABI is not set +CONFIG_COMPAT_32=y +CONFIG_COMPAT=y +CONFIG_COMPAT_FOR_U64_ALIGNMENT=y +# end of Binary Emulations + +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_PFNCACHE=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_DIRTY_RING=y +CONFIG_HAVE_KVM_DIRTY_RING_TSO=y +CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_KVM_ASYNC_PF=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_KVM_COMPAT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_HAVE_KVM_NO_POLL=y +CONFIG_KVM_XFER_TO_GUEST_WORK=y +CONFIG_HAVE_KVM_PM_NOTIFIER=y +CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m +CONFIG_KVM_INTEL=m +CONFIG_X86_SGX_KVM=y +CONFIG_KVM_AMD=m +CONFIG_KVM_AMD_SEV=y +CONFIG_KVM_SMM=y +# CONFIG_KVM_XEN is not set +CONFIG_KVM_EXTERNAL_WRITE_TRACKING=y +CONFIG_AS_AVX512=y +CONFIG_AS_SHA1_NI=y +CONFIG_AS_SHA256_NI=y +CONFIG_AS_TPAUSE=y +CONFIG_AS_GFNI=y +CONFIG_AS_WRUSS=y + +# +# General architecture-dependent options +# +CONFIG_HOTPLUG_SMT=y +CONFIG_HOTPLUG_CORE_SYNC=y +CONFIG_HOTPLUG_CORE_SYNC_DEAD=y +CONFIG_HOTPLUG_CORE_SYNC_FULL=y +CONFIG_HOTPLUG_SPLIT_STARTUP=y +CONFIG_HOTPLUG_PARALLEL=y +CONFIG_GENERIC_ENTRY=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +# CONFIG_STATIC_CALL_SELFTEST is not set +CONFIG_OPTPROBES=y +CONFIG_KPROBES_ON_FTRACE=y +CONFIG_UPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_KRETPROBES=y +CONFIG_KRETPROBE_ON_RETHOOK=y +CONFIG_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_OPTPROBES=y +CONFIG_HAVE_KPROBES_ON_FTRACE=y +CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_ARCH_HAS_SET_DIRECT_MAP=y +CONFIG_ARCH_HAS_CPU_FINALIZE_INIT=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y +CONFIG_ARCH_WANTS_NO_INSTR=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_RUST=y +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y +CONFIG_HAVE_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_PERF_EVENTS_NMI=y +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_MMU_GATHER_TABLE_FREE=y +CONFIG_MMU_GATHER_RCU_TABLE_FREE=y +CONFIG_MMU_GATHER_MERGE_VMAS=y +CONFIG_MMU_LAZY_TLB_REFCOUNT=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y +CONFIG_HAVE_ARCH_SECCOMP=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP=y +CONFIG_SECCOMP_FILTER=y +# CONFIG_SECCOMP_CACHE_DEBUG is not set +CONFIG_HAVE_ARCH_STACKLEAK=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR=y +# CONFIG_STACKPROTECTOR_STRONG is not set +CONFIG_ARCH_SUPPORTS_LTO_CLANG=y +CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y +CONFIG_LTO_NONE=y +CONFIG_ARCH_SUPPORTS_CFI_CLANG=y +# CONFIG_CFI_CLANG is not set +CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y +CONFIG_HAVE_CONTEXT_TRACKING_USER=y +CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_MOVE_PUD=y +CONFIG_HAVE_MOVE_PMD=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_ARCH_HUGE_VMALLOC=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARCH_WANT_PMD_MKWRITE=y +CONFIG_HAVE_ARCH_SOFT_DIRTY=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK=y +CONFIG_SOFTIRQ_ON_OWN_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_HAVE_EXIT_THREAD=y +CONFIG_ARCH_MMAP_RND_BITS=28 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 +CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y +CONFIG_PAGE_SIZE_LESS_THAN_64KB=y +CONFIG_PAGE_SIZE_LESS_THAN_256KB=y +CONFIG_HAVE_OBJTOOL=y +CONFIG_HAVE_JUMP_LABEL_HACK=y +CONFIG_HAVE_NOINSTR_HACK=y +CONFIG_HAVE_NOINSTR_VALIDATION=y +CONFIG_HAVE_UACCESS_VALIDATION=y +CONFIG_HAVE_STACK_VALIDATION=y +CONFIG_HAVE_RELIABLE_STACKTRACE=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET=y +CONFIG_RANDOMIZE_KSTACK_OFFSET=y +# CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT is not set +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y +CONFIG_LOCK_EVENT_COUNTS=y +CONFIG_ARCH_HAS_MEM_ENCRYPT=y +CONFIG_ARCH_HAS_CC_PLATFORM=y +CONFIG_HAVE_STATIC_CALL=y +CONFIG_HAVE_STATIC_CALL_INLINE=y +CONFIG_HAVE_PREEMPT_DYNAMIC=y +CONFIG_HAVE_PREEMPT_DYNAMIC_CALL=y +CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK=y +CONFIG_ARCH_HAS_ELFCORE_COMPAT=y +CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH=y +CONFIG_DYNAMIC_SIGFRAME=y +CONFIG_HAVE_ARCH_NODE_DEV_GROUP=y +CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +# end of GCOV-based kernel profiling + +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_GCC_PLUGINS=y +# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set +CONFIG_FUNCTION_ALIGNMENT_4B=y +CONFIG_FUNCTION_ALIGNMENT_16B=y +CONFIG_FUNCTION_ALIGNMENT=16 +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULE_SIG_FORMAT=y +CONFIG_MODULES=y +# CONFIG_MODULE_DEBUG is not set +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set +CONFIG_MODVERSIONS=y +CONFIG_ASM_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +# CONFIG_MODULE_SIG_ALL is not set +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set +CONFIG_MODULE_SIG_SHA256=y +# CONFIG_MODULE_SIG_SHA384 is not set +# CONFIG_MODULE_SIG_SHA512 is not set +CONFIG_MODULE_SIG_HASH="sha256" +CONFIG_MODULE_COMPRESS_NONE=y +# CONFIG_MODULE_COMPRESS_GZIP is not set +# CONFIG_MODULE_COMPRESS_XZ is not set +# CONFIG_MODULE_COMPRESS_ZSTD is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +CONFIG_MODPROBE_PATH="/sbin/modprobe" +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLOCK_LEGACY_AUTOLOAD=y +CONFIG_BLK_RQ_ALLOC_TIME=y +CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_DEV_BSG_COMMON=y +CONFIG_BLK_ICQ=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_INTEGRITY_T10=m +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +# CONFIG_BLK_WBT is not set +CONFIG_BLK_CGROUP_IOLATENCY=y +# CONFIG_BLK_CGROUP_FC_APPID is not set +CONFIG_BLK_CGROUP_IOCOST=y +# CONFIG_BLK_CGROUP_IOPRIO is not set +CONFIG_BLK_DEBUG_FS=y +CONFIG_BLK_DEBUG_FS_ZONED=y +# CONFIG_BLK_SED_OPAL is not set +# CONFIG_BLK_INLINE_ENCRYPTION is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +CONFIG_OSF_PARTITION=y +CONFIG_AMIGA_PARTITION=y +# CONFIG_ATARI_PARTITION is not set +CONFIG_MAC_PARTITION=y +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +# CONFIG_LDM_PARTITION is not set +CONFIG_SGI_PARTITION=y +# CONFIG_ULTRIX_PARTITION is not set +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +# end of Partition Types + +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_PM=y +CONFIG_BLOCK_HOLDER_DEPRECATED=y +CONFIG_BLK_MQ_STACKING=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +CONFIG_BFQ_CGROUP_DEBUG=y +# end of IO Schedulers + +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_UNINLINE_SPIN_UNLOCK=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y +CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_ZPOOL=y +CONFIG_SWAP=y +CONFIG_ZSWAP=y +# CONFIG_ZSWAP_DEFAULT_ON is not set +# CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set +CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set +CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lzo" +CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y +# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set +# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set +CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud" +CONFIG_ZBUD=y +# CONFIG_Z3FOLD is not set +CONFIG_ZSMALLOC=y +CONFIG_ZSMALLOC_STAT=y +CONFIG_ZSMALLOC_CHAIN_SIZE=8 + +# +# SLAB allocator options +# +# CONFIG_SLAB_DEPRECATED is not set +CONFIG_SLUB=y +# CONFIG_SLAB_MERGE_DEFAULT is not set +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SLAB_FREELIST_HARDENED is not set +# CONFIG_SLUB_STATS is not set +CONFIG_SLUB_CPU_PARTIAL=y +# CONFIG_RANDOM_KMALLOC_CACHES is not set +# end of SLAB allocator options + +CONFIG_SHUFFLE_PAGE_ALLOCATOR=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SPARSEMEM=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP=y +CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP=y +CONFIG_HAVE_FAST_GUP=y +CONFIG_NUMA_KEEP_MEMINFO=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_EXCLUSIVE_SYSTEM_RAM=y +CONFIG_HAVE_BOOTMEM_INFO_NODE=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_MHP_MEMMAP_ON_MEMORY=y +CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 +CONFIG_PAGE_REPORTING=y +CONFIG_MIGRATION=y +CONFIG_DEVICE_MIGRATION=y +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y +CONFIG_CONTIG_ALLOC=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +CONFIG_MEMORY_FAILURE=y +CONFIG_HWPOISON_INJECT=m +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y +CONFIG_ARCH_WANTS_THP_SWAP=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_THP_SWAP=y +CONFIG_READ_ONLY_THP_FOR_FS=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +# CONFIG_CMA_SYSFS is not set +CONFIG_CMA_AREAS=19 +CONFIG_MEM_SOFT_DIRTY=y +CONFIG_GENERIC_EARLY_IOREMAP=y +CONFIG_DEFERRED_STRUCT_PAGE_INIT=y +CONFIG_PAGE_IDLE_FLAG=y +CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_HAS_CURRENT_STACK_POINTER=y +CONFIG_ARCH_HAS_PTE_DEVMAP=y +CONFIG_ZONE_DMA=y +CONFIG_ZONE_DMA32=y +CONFIG_ZONE_DEVICE=y +CONFIG_HMM_MIRROR=y +CONFIG_GET_FREE_REGION=y +CONFIG_DEVICE_PRIVATE=y +CONFIG_VMAP_PFN=y +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y +CONFIG_ARCH_HAS_PKEYS=y +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_TEST is not set +# CONFIG_DMAPOOL_TEST is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_MAPPING_DIRTY_HELPERS=y +CONFIG_MEMFD_CREATE=y +CONFIG_SECRETMEM=y +# CONFIG_ANON_VMA_NAME is not set +CONFIG_USERFAULTFD=y +CONFIG_HAVE_ARCH_USERFAULTFD_WP=y +CONFIG_HAVE_ARCH_USERFAULTFD_MINOR=y +CONFIG_PTE_MARKER_UFFD_WP=y +CONFIG_LRU_GEN=y +# CONFIG_LRU_GEN_ENABLED is not set +# CONFIG_LRU_GEN_STATS is not set +CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK=y +CONFIG_PER_VMA_LOCK=y +CONFIG_LOCK_MM_AND_FIND_VMA=y + +# +# Data Access Monitoring +# +CONFIG_DAMON=y +CONFIG_DAMON_VADDR=y +CONFIG_DAMON_PADDR=y +# CONFIG_DAMON_SYSFS is not set +CONFIG_DAMON_DBGFS=y +# CONFIG_DAMON_RECLAIM is not set +# CONFIG_DAMON_LRU_SORT is not set +# end of Data Access Monitoring +# end of Memory Management options + +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y +CONFIG_NET_XGRESS=y +CONFIG_NET_REDIRECT=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_AF_UNIX_OOB=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +# CONFIG_TLS_TOE is not set +CONFIG_XFRM=y +CONFIG_XFRM_OFFLOAD=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +# CONFIG_XFRM_USER_COMPAT is not set +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_AH=m +CONFIG_XFRM_ESP=m +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m +CONFIG_NET_HANDSHAKE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +# CONFIG_IP_PNP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +# CONFIG_NET_FOU is not set +# CONFIG_NET_FOU_IP_TUNNELS is not set +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +# CONFIG_INET_ESPINTCP is not set +CONFIG_INET_IPCOMP=m +CONFIG_INET_TABLE_PERTURB_ORDER=16 +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m +CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +# CONFIG_INET6_ESPINTCP is not set +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +# CONFIG_IPV6_ILA is not set +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +# CONFIG_IPV6_SEG6_LWTUNNEL is not set +# CONFIG_IPV6_SEG6_HMAC is not set +# CONFIG_IPV6_RPL_LWTUNNEL is not set +# CONFIG_IPV6_IOAM6_LWTUNNEL is not set +CONFIG_NETLABEL=y +CONFIG_MPTCP=y +CONFIG_INET_MPTCP_DIAG=m +CONFIG_MPTCP_IPV6=y +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_EGRESS=y +CONFIG_NETFILTER_SKIP_EGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_BPF_LINK=y +# CONFIG_NETFILTER_NETLINK_HOOK is not set +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_SYSLOG=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CONNTRACK_OVS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NF_NAT_OVS=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +# CONFIG_NFT_SYNPROXY is not set +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +# CONFIG_NFT_REJECT_NETDEV is not set +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +# CONFIG_NF_FLOW_TABLE_PROCFS is not set +CONFIG_NETFILTER_XTABLES=y +# CONFIG_NETFILTER_XTABLES_COMPAT is not set + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +# end of Core Netfilter Configuration + +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_DEBUG=y +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +# CONFIG_IP_VS_TWOS is not set + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +# CONFIG_IP6_NF_MATCH_SRH is not set +# CONFIG_IP6_NF_TARGET_HL is not set +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_TABLES_BRIDGE=m +# CONFIG_NFT_BRIDGE_META is not set +CONFIG_NFT_BRIDGE_REJECT=m +# CONFIG_NF_CONNTRACK_BRIDGE is not set +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +# CONFIG_BPFILTER is not set +# CONFIG_IP_DCCP is not set +CONFIG_IP_SCTP=m +# CONFIG_SCTP_DBG_OBJCNT is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_INET_SCTP_DIAG=m +# CONFIG_RDS is not set +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_TIPC_MEDIA_UDP=y +CONFIG_TIPC_CRYPTO=y +CONFIG_TIPC_DIAG=m +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +# CONFIG_ATM_CLIP_NO_ICMP is not set +CONFIG_ATM_LANE=m +# CONFIG_ATM_MPOA is not set +CONFIG_ATM_BR2684=m +# CONFIG_ATM_BR2684_IPFILTER is not set +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +# CONFIG_BRIDGE_MRP is not set +# CONFIG_BRIDGE_CFM is not set +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_LLC=m +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_DEBUGFS is not set +# CONFIG_6LOWPAN_NHC is not set +CONFIG_IEEE802154=m +# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set +CONFIG_IEEE802154_SOCKET=m +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +# CONFIG_NET_SCH_CBS is not set +# CONFIG_NET_SCH_ETF is not set +CONFIG_NET_SCH_MQPRIO_LIB=m +# CONFIG_NET_SCH_TAPRIO is not set +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +# CONFIG_NET_SCH_SKBPRIO is not set +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=y +# CONFIG_NET_SCH_CAKE is not set +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +# CONFIG_NET_SCH_FQ_PIE is not set +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +# CONFIG_NET_SCH_ETS is not set +CONFIG_NET_SCH_DEFAULT=y +# CONFIG_DEFAULT_FQ is not set +# CONFIG_DEFAULT_CODEL is not set +CONFIG_DEFAULT_FQ_CODEL=y +# CONFIG_DEFAULT_SFQ is not set +# CONFIG_DEFAULT_PFIFO_FAST is not set +CONFIG_DEFAULT_NET_SCH="fq_codel" + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +# CONFIG_NET_EMATCH_IPT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +# CONFIG_NET_ACT_MPLS is not set +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +# CONFIG_NET_ACT_CONNMARK is not set +# CONFIG_NET_ACT_CTINFO is not set +CONFIG_NET_ACT_SKBMOD=m +# CONFIG_NET_ACT_IFE is not set +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m +# CONFIG_NET_ACT_GATE is not set +CONFIG_NET_TC_SKB_EXT=y +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=y +# CONFIG_BATMAN_ADV is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VSOCKETS_LOOPBACK=m +CONFIG_VMWARE_VMCI_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_HYPERV_VSOCKETS=m +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=y +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=y +# CONFIG_HSR is not set +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_L3_MASTER_DEV=y +# CONFIG_QRTR is not set +# CONFIG_NET_NCSI is not set +CONFIG_PCPU_DEV_REFCNT=y +CONFIG_MAX_SKB_FRAGS=17 +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_SOCK_RX_QUEUE_MAPPING=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +CONFIG_NET_DROP_MONITOR=y +# end of Network testing +# end of Networking options + +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +CONFIG_BT=m +CONFIG_BT_BREDR=y +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_CMTP=m +CONFIG_BT_HIDP=m +CONFIG_BT_HS=y +CONFIG_BT_LE=y +CONFIG_BT_LE_L2CAP_ECRED=y +# CONFIG_BT_6LOWPAN is not set +# CONFIG_BT_LEDS is not set +# CONFIG_BT_MSFTEXT is not set +# CONFIG_BT_AOSPEXT is not set +CONFIG_BT_DEBUGFS=y +# CONFIG_BT_SELFTEST is not set + +# +# Bluetooth device drivers +# +CONFIG_BT_INTEL=m +CONFIG_BT_BCM=m +CONFIG_BT_RTL=m +CONFIG_BT_HCIBTUSB=m +CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y +CONFIG_BT_HCIBTUSB_POLL_SYNC=y +CONFIG_BT_HCIBTUSB_BCM=y +# CONFIG_BT_HCIBTUSB_MTK is not set +CONFIG_BT_HCIBTUSB_RTL=y +CONFIG_BT_HCIBTSDIO=m +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_H4=y +CONFIG_BT_HCIUART_BCSP=y +CONFIG_BT_HCIUART_ATH3K=y +# CONFIG_BT_HCIUART_INTEL is not set +# CONFIG_BT_HCIUART_AG6XX is not set +CONFIG_BT_HCIBCM203X=m +# CONFIG_BT_HCIBCM4377 is not set +CONFIG_BT_HCIBPA10X=m +CONFIG_BT_HCIBFUSB=m +CONFIG_BT_HCIVHCI=m +CONFIG_BT_MRVL=m +CONFIG_BT_MRVL_SDIO=m +CONFIG_BT_ATH3K=m +# CONFIG_BT_MTKSDIO is not set +# CONFIG_BT_VIRTIO is not set +# end of Bluetooth device drivers + +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=y +# CONFIG_MCTP is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y +CONFIG_CFG80211_DEFAULT_PS=y +CONFIG_CFG80211_DEBUGFS=y +CONFIG_CFG80211_CRDA_SUPPORT=y +# CONFIG_CFG80211_WEXT is not set +CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +CONFIG_MAC80211_DEBUGFS=y +CONFIG_MAC80211_MESSAGE_TRACING=y +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +# CONFIG_RFKILL_GPIO is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +CONFIG_CEPH_LIB_PRETTYDEBUG=y +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +# CONFIG_NFC is not set +CONFIG_PSAMPLE=m +# CONFIG_NET_IFE is not set +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_SOCK_VALIDATE_XMIT=y +CONFIG_NET_SELFTESTS=y +CONFIG_NET_SOCK_MSG=y +CONFIG_NET_DEVLINK=y +CONFIG_PAGE_POOL=y +# CONFIG_PAGE_POOL_STATS is not set +CONFIG_FAILOVER=m +CONFIG_ETHTOOL_NETLINK=y + +# +# Device Drivers +# +CONFIG_HAVE_EISA=y +# CONFIG_EISA is not set +CONFIG_HAVE_PCI=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +CONFIG_PCIE_DPC=y +# CONFIG_PCIE_PTM is not set +CONFIG_PCIE_EDR=y +CONFIG_PCI_MSI=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=y +CONFIG_PCI_PF_STUB=y +CONFIG_PCI_ATS=y +CONFIG_PCI_LOCKLESS_CONFIG=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +# CONFIG_PCI_P2PDMA is not set +CONFIG_PCI_LABEL=y +CONFIG_PCI_HYPERV=m +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_ACPI_IBM=m +# CONFIG_HOTPLUG_PCI_CPCI is not set +CONFIG_HOTPLUG_PCI_SHPC=y + +# +# PCI controller drivers +# +CONFIG_VMD=y +CONFIG_PCI_HYPERV_INTERFACE=m + +# +# Cadence-based PCIe controllers +# +# end of Cadence-based PCIe controllers + +# +# DesignWare-based PCIe controllers +# +# CONFIG_PCI_MESON is not set +# CONFIG_PCIE_DW_PLAT_HOST is not set +# end of DesignWare-based PCIe controllers + +# +# Mobiveil-based PCIe controllers +# +# end of Mobiveil-based PCIe controllers +# end of PCI controller drivers + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# end of PCI switch controller drivers + +# CONFIG_CXL_BUS is not set +CONFIG_PCCARD=y +# CONFIG_PCMCIA is not set +CONFIG_CARDBUS=y + +# +# PC-card bridges +# +CONFIG_YENTA=m +CONFIG_YENTA_O2=y +CONFIG_YENTA_RICOH=y +CONFIG_YENTA_TI=y +CONFIG_YENTA_ENE_TUNE=y +CONFIG_YENTA_TOSHIBA=y +# CONFIG_RAPIDIO is not set + +# +# Generic Driver Options +# +CONFIG_AUXILIARY_BUS=y +# CONFIG_UEVENT_HELPER is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_DEVTMPFS_SAFE is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_FW_LOADER_DEBUG=y +CONFIG_FW_LOADER_PAGED_BUF=y +CONFIG_FW_LOADER_SYSFS=y +CONFIG_EXTRA_FIRMWARE="" +CONFIG_FW_LOADER_USER_HELPER=y +# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set +# CONFIG_FW_LOADER_COMPRESS is not set +CONFIG_FW_CACHE=y +# CONFIG_FW_UPLOAD is not set +# end of Firmware loader + +CONFIG_WANT_DEV_COREDUMP=y +CONFIG_ALLOW_DEV_COREDUMP=y +CONFIG_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +CONFIG_HMEM_REPORTING=y +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_SYS_HYPERVISOR=y +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=m +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set +# end of Generic Driver Options + +# +# Bus devices +# +# CONFIG_MHI_BUS is not set +# CONFIG_MHI_BUS_EP is not set +# end of Bus devices + +# +# Cache Drivers +# +# end of Cache Drivers + +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y + +# +# Firmware Drivers +# + +# +# ARM System Control and Management Interface Protocol +# +# end of ARM System Control and Management Interface Protocol + +CONFIG_EDD=m +# CONFIG_EDD_OFF is not set +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y +CONFIG_ISCSI_IBFT_FIND=y +CONFIG_ISCSI_IBFT=m +CONFIG_FW_CFG_SYSFS=y +# CONFIG_FW_CFG_SYSFS_CMDLINE is not set +CONFIG_SYSFB=y +# CONFIG_SYSFB_SIMPLEFB is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=y +CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y +CONFIG_EFI_SOFT_RESERVE=y +CONFIG_EFI_DXE_MEM_ATTRIBUTES=y +CONFIG_EFI_RUNTIME_WRAPPERS=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set +# CONFIG_EFI_CAPSULE_LOADER is not set +# CONFIG_EFI_TEST is not set +# CONFIG_APPLE_PROPERTIES is not set +# CONFIG_RESET_ATTACK_MITIGATION is not set +CONFIG_EFI_RCI2_TABLE=y +# CONFIG_EFI_DISABLE_PCI_DMA is not set +CONFIG_EFI_EARLYCON=y +CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y +# CONFIG_EFI_DISABLE_RUNTIME is not set +CONFIG_EFI_COCO_SECRET=y +CONFIG_UNACCEPTED_MEMORY=y +# end of EFI (Extensible Firmware Interface) Support + +CONFIG_UEFI_CPER=y +CONFIG_UEFI_CPER_X86=y + +# +# Tegra firmware driver +# +# end of Tegra firmware driver +# end of Firmware Drivers + +# CONFIG_GNSS is not set +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set + +# +# Partition parsers +# +# CONFIG_MTD_AR7_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# end of Partition parsers + +# +# User Modules And Translation Layers +# +CONFIG_MTD_BLKDEVS=m +CONFIG_MTD_BLOCK=m +# CONFIG_MTD_BLOCK_RO is not set + +# +# Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK. +# +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set +# end of RAM/ROM/Flash chip drivers + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set +# end of Mapping drivers for chip access + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# end of Self-contained MTD device drivers + +# +# NAND +# +# CONFIG_MTD_ONENAND is not set +# CONFIG_MTD_RAW_NAND is not set + +# +# ECC engine support +# +# CONFIG_MTD_NAND_ECC_SW_HAMMING is not set +# CONFIG_MTD_NAND_ECC_SW_BCH is not set +# CONFIG_MTD_NAND_ECC_MXIC is not set +# end of ECC engine support +# end of NAND + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# end of LPDDR & LPDDR2 PCM memory drivers + +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_FASTMAP is not set +# CONFIG_MTD_UBI_GLUEBI is not set +# CONFIG_MTD_UBI_BLOCK is not set +# CONFIG_MTD_HYPERBUS is not set +# CONFIG_OF is not set +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +CONFIG_PARPORT=m +CONFIG_PARPORT_PC=m +CONFIG_PARPORT_SERIAL=m +# CONFIG_PARPORT_PC_FIFO is not set +# CONFIG_PARPORT_PC_SUPERIO is not set +CONFIG_PARPORT_1284=y +CONFIG_PARPORT_NOT_PC=y +CONFIG_PNP=y +# CONFIG_PNP_DEBUG_MESSAGES is not set + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_NULL_BLK=m +# CONFIG_BLK_DEV_FD is not set +CONFIG_CDROM=m +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +CONFIG_ZRAM=m +CONFIG_ZRAM_DEF_COMP_LZORLE=y +# CONFIG_ZRAM_DEF_COMP_ZSTD is not set +# CONFIG_ZRAM_DEF_COMP_LZ4 is not set +# CONFIG_ZRAM_DEF_COMP_LZO is not set +# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set +CONFIG_ZRAM_DEF_COMP="lzo-rle" +CONFIG_ZRAM_WRITEBACK=y +# CONFIG_ZRAM_MEMORY_TRACKING is not set +# CONFIG_ZRAM_MULTI_COMP is not set +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 +# CONFIG_BLK_DEV_DRBD is not set +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_XEN_BLKDEV_FRONTEND=m +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_RBD=m +CONFIG_BLK_DEV_UBLK=m +CONFIG_BLKDEV_UBLK_LEGACY_OPCODES=y + +# +# NVME Support +# +CONFIG_NVME_CORE=m +CONFIG_BLK_DEV_NVME=m +CONFIG_NVME_MULTIPATH=y +# CONFIG_NVME_VERBOSE_ERRORS is not set +# CONFIG_NVME_HWMON is not set +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TCP=m +# CONFIG_NVME_AUTH is not set +CONFIG_NVME_TARGET=m +# CONFIG_NVME_TARGET_PASSTHRU is not set +CONFIG_NVME_TARGET_LOOP=m +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_FCLOOP=m +CONFIG_NVME_TARGET_TCP=m +# CONFIG_NVME_TARGET_AUTH is not set +# end of NVME Support + +# +# Misc devices +# +CONFIG_SENSORS_LIS3LV02D=m +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_IBM_ASM is not set +# CONFIG_PHANTOM is not set +CONFIG_TIFM_CORE=m +CONFIG_TIFM_7XX1=m +# CONFIG_ICS932S401 is not set +CONFIG_ENCLOSURE_SERVICES=m +CONFIG_SGI_XP=m +CONFIG_HP_ILO=m +CONFIG_SGI_GRU=m +# CONFIG_SGI_GRU_DEBUG is not set +CONFIG_APDS9802ALS=m +CONFIG_ISL29003=m +CONFIG_ISL29020=m +CONFIG_SENSORS_TSL2550=m +CONFIG_SENSORS_BH1770=m +CONFIG_SENSORS_APDS990X=m +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +CONFIG_VMWARE_BALLOON=m +# CONFIG_SRAM is not set +# CONFIG_DW_XDATA_PCIE is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_XILINX_SDFEC is not set +CONFIG_MISC_RTSX=m +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_EEPROM_EE1004 is not set +# end of EEPROM support + +CONFIG_CB710_CORE=m +# CONFIG_CB710_DEBUG is not set +CONFIG_CB710_DEBUG_ASSUMPTIONS=y + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +# end of Texas Instruments shared transport line discipline + +CONFIG_SENSORS_LIS3_I2C=m + +# +# Altera FPGA firmware download module (requires I2C) +# +CONFIG_ALTERA_STAPL=m +CONFIG_INTEL_MEI=m +CONFIG_INTEL_MEI_ME=m +# CONFIG_INTEL_MEI_TXE is not set +# CONFIG_INTEL_MEI_GSC is not set +# CONFIG_INTEL_MEI_HDCP is not set +# CONFIG_INTEL_MEI_PXP is not set +# CONFIG_INTEL_MEI_GSC_PROXY is not set +CONFIG_VMWARE_VMCI=m +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_BCM_VK is not set +# CONFIG_MISC_ALCOR_PCI is not set +CONFIG_MISC_RTSX_PCI=m +CONFIG_MISC_RTSX_USB=m +# CONFIG_UACCE is not set +CONFIG_PVPANIC=y +# CONFIG_PVPANIC_MMIO is not set +# CONFIG_PVPANIC_PCI is not set +# CONFIG_GP_PCI1XXXX is not set +# end of Misc devices + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=m +CONFIG_SCSI_COMMON=y +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=m +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=m +CONFIG_BLK_DEV_BSG=y +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=m +# CONFIG_SCSI_CXGB3_ISCSI is not set +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_SCSI_BNX2X_FCOE=m +CONFIG_BE2ISCSI=m +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +CONFIG_SCSI_HPSA=m +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +CONFIG_SCSI_AACRAID=m +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +# CONFIG_MEGARAID_NEWGEN is not set +# CONFIG_MEGARAID_LEGACY is not set +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +CONFIG_SCSI_MPT2SAS=m +# CONFIG_SCSI_MPI3MR is not set +CONFIG_SCSI_SMARTPQI=m +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_BUSLOGIC is not set +# CONFIG_SCSI_MYRB is not set +# CONFIG_SCSI_MYRS is not set +CONFIG_VMWARE_PVSCSI=m +# CONFIG_XEN_SCSI_FRONTEND is not set +CONFIG_HYPERV_STORAGE=m +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +# CONFIG_FCOE is not set +CONFIG_FCOE_FNIC=m +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FDOMAIN_PCI is not set +CONFIG_SCSI_ISCI=m +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_PPA is not set +# CONFIG_SCSI_IMM is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +# CONFIG_SCSI_QLOGIC_1280 is not set +CONFIG_SCSI_QLA_FC=m +# CONFIG_TCM_QLA2XXX is not set +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_QEDI=m +CONFIG_QEDF=m +CONFIG_SCSI_LPFC=m +# CONFIG_SCSI_LPFC_DEBUG_FS is not set +# CONFIG_SCSI_EFCT is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +CONFIG_SCSI_DEBUG=m +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_BFA_FC is not set +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_CHELSIO_FCOE=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +# end of SCSI device support + +CONFIG_ATA=m +CONFIG_SATA_HOST=y +CONFIG_PATA_TIMINGS=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_FORCE=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=m +CONFIG_SATA_MOBILE_LPM_POLICY=0 +CONFIG_SATA_AHCI_PLATFORM=m +# CONFIG_AHCI_DWC is not set +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=m +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_RZ1000 is not set +# CONFIG_PATA_PARPORT is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set +CONFIG_ATA_GENERIC=m +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_BITMAP_FILE=y +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +# CONFIG_MD_MULTIPATH is not set +CONFIG_MD_FAULTY=m +CONFIG_MD_CLUSTER=m +# CONFIG_BCACHE is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_BUFIO=m +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +# CONFIG_DM_UNSTRIPED is not set +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +CONFIG_DM_WRITECACHE=m +# CONFIG_DM_EBS is not set +CONFIG_DM_ERA=m +# CONFIG_DM_CLONE is not set +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +# CONFIG_DM_MULTIPATH_HST is not set +# CONFIG_DM_MULTIPATH_IOA is not set +CONFIG_DM_DELAY=m +# CONFIG_DM_DUST is not set +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set +# CONFIG_DM_VERITY_FEC is not set +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +# CONFIG_DM_ZONED is not set +CONFIG_DM_AUDIT=y +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +# CONFIG_TCM_FC is not set +CONFIG_ISCSI_TARGET=m +CONFIG_ISCSI_TARGET_CXGB4=m +# CONFIG_SBP_TARGET is not set +# CONFIG_REMOTE_TARGET is not set +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +# CONFIG_FUSION_FC is not set +CONFIG_FUSION_SAS=m +CONFIG_FUSION_MAX_SGE=128 +# CONFIG_FUSION_CTL is not set +CONFIG_FUSION_LOGGING=y + +# +# IEEE 1394 (FireWire) support +# +CONFIG_FIREWIRE=m +CONFIG_FIREWIRE_OHCI=m +CONFIG_FIREWIRE_SBP2=m +CONFIG_FIREWIRE_NET=m +# CONFIG_FIREWIRE_NOSY is not set +# end of IEEE 1394 (FireWire) support + +CONFIG_MACINTOSH_DRIVERS=y +CONFIG_MAC_EMUMOUSEBTN=y +CONFIG_NETDEVICES=y +CONFIG_MII=m +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_WIREGUARD=m +# CONFIG_WIREGUARD_DEBUG is not set +# CONFIG_EQUALIZER is not set +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN_L3S=y +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +# CONFIG_BAREUDP is not set +# CONFIG_GTP is not set +# CONFIG_AMT is not set +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +# CONFIG_NETCONSOLE_EXTENDED_LOG is not set +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ARCNET is not set +# CONFIG_ATM_DRIVERS is not set +CONFIG_ETHERNET=y +CONFIG_MDIO=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +CONFIG_ENA_ETHERNET=m +# CONFIG_NET_VENDOR_AMD is not set +CONFIG_NET_VENDOR_AQUANTIA=y +CONFIG_AQTION=m +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_NET_VENDOR_ASIX=y +CONFIG_NET_VENDOR_ATHEROS=y +CONFIG_ATL2=m +CONFIG_ATL1=m +CONFIG_ATL1E=m +CONFIG_ATL1C=m +CONFIG_ALX=m +# CONFIG_CX_ECAT is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +CONFIG_BNX2=m +CONFIG_CNIC=m +CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set +CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y +CONFIG_BNXT_DCB=y +CONFIG_BNXT_HWMON=y +# CONFIG_NET_VENDOR_CADENCE is not set +CONFIG_NET_VENDOR_CAVIUM=y +# CONFIG_THUNDER_NIC_PF is not set +# CONFIG_THUNDER_NIC_VF is not set +# CONFIG_THUNDER_NIC_BGX is not set +# CONFIG_THUNDER_NIC_RGX is not set +CONFIG_CAVIUM_PTP=y +CONFIG_LIQUIDIO_CORE=m +CONFIG_LIQUIDIO=m +CONFIG_LIQUIDIO_VF=m +CONFIG_NET_VENDOR_CHELSIO=y +# CONFIG_CHELSIO_T1 is not set +# CONFIG_CHELSIO_T3 is not set +CONFIG_CHELSIO_T4=m +# CONFIG_CHELSIO_T4_DCB is not set +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_LIB=m +CONFIG_CHELSIO_INLINE_CRYPTO=y +CONFIG_CHELSIO_IPSEC_INLINE=m +# CONFIG_CHELSIO_TLS_DEVICE is not set +CONFIG_NET_VENDOR_CISCO=y +CONFIG_ENIC=m +# CONFIG_NET_VENDOR_CORTINA is not set +CONFIG_NET_VENDOR_DAVICOM=y +CONFIG_DNET=m +CONFIG_NET_VENDOR_DEC=y +# CONFIG_NET_TULIP is not set +# CONFIG_NET_VENDOR_DLINK is not set +CONFIG_NET_VENDOR_EMULEX=y +CONFIG_BE2NET=m +CONFIG_BE2NET_HWMON=y +# CONFIG_BE2NET_BE2 is not set +# CONFIG_BE2NET_BE3 is not set +CONFIG_BE2NET_LANCER=y +CONFIG_BE2NET_SKYHAWK=y +CONFIG_NET_VENDOR_ENGLEDER=y +# CONFIG_TSNEP is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_NET_VENDOR_FUNGIBLE=y +# CONFIG_FUN_ETH is not set +CONFIG_NET_VENDOR_GOOGLE=y +CONFIG_GVE=m +CONFIG_NET_VENDOR_HUAWEI=y +CONFIG_HINIC=m +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_E1000E_HWTS=y +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGB_DCA=y +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCA=y +CONFIG_IXGBE_DCB=y +CONFIG_IXGBE_IPSEC=y +CONFIG_IXGBEVF=m +CONFIG_IXGBEVF_IPSEC=y +CONFIG_I40E=m +CONFIG_I40E_DCB=y +CONFIG_IAVF=m +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_ICE_SWITCHDEV=y +CONFIG_ICE_HWTS=y +CONFIG_FM10K=m +CONFIG_IGC=m +# CONFIG_JME is not set +CONFIG_NET_VENDOR_LITEX=y +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +# CONFIG_MLX4_CORE_GEN2 is not set +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +CONFIG_MLX5_ESWITCH=y +CONFIG_MLX5_BRIDGE=y +CONFIG_MLX5_CLS_ACT=y +CONFIG_MLX5_TC_CT=y +CONFIG_MLX5_TC_SAMPLE=y +CONFIG_MLX5_CORE_EN_DCB=y +# CONFIG_MLX5_CORE_IPOIB is not set +# CONFIG_MLX5_MACSEC is not set +# CONFIG_MLX5_EN_IPSEC is not set +# CONFIG_MLX5_EN_TLS is not set +CONFIG_MLX5_SW_STEERING=y +# CONFIG_MLX5_SF is not set +CONFIG_MLXSW_CORE=m +CONFIG_MLXSW_CORE_HWMON=y +CONFIG_MLXSW_CORE_THERMAL=y +CONFIG_MLXSW_PCI=m +CONFIG_MLXSW_I2C=m +CONFIG_MLXSW_SPECTRUM=m +CONFIG_MLXSW_SPECTRUM_DCB=y +CONFIG_MLXSW_MINIMAL=m +CONFIG_MLXFW=m +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +CONFIG_NET_VENDOR_MICROSOFT=y +# CONFIG_MICROSOFT_MANA is not set +CONFIG_NET_VENDOR_MYRI=y +CONFIG_MYRI10GE=m +CONFIG_MYRI10GE_DCA=y +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETERION is not set +CONFIG_NET_VENDOR_NETRONOME=y +CONFIG_NFP=m +CONFIG_NFP_APP_FLOWER=y +CONFIG_NFP_APP_ABM_NIC=y +CONFIG_NFP_NET_IPSEC=y +CONFIG_NFP_DEBUG=y +# CONFIG_NET_VENDOR_NVIDIA is not set +CONFIG_NET_VENDOR_OKI=y +CONFIG_ETHOC=m +# CONFIG_NET_VENDOR_PACKET_ENGINES is not set +CONFIG_NET_VENDOR_PENSANDO=y +# CONFIG_IONIC is not set +CONFIG_NET_VENDOR_QLOGIC=y +CONFIG_QLA3XXX=m +# CONFIG_QLCNIC is not set +CONFIG_NETXEN_NIC=m +CONFIG_QED=m +CONFIG_QED_LL2=y +CONFIG_QED_SRIOV=y +CONFIG_QEDE=m +CONFIG_QED_RDMA=y +CONFIG_QED_ISCSI=y +CONFIG_QED_FCOE=y +CONFIG_QED_OOO=y +CONFIG_NET_VENDOR_BROCADE=y +# CONFIG_BNA is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y +# CONFIG_ATP is not set +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set +CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set +CONFIG_R8169=m +# CONFIG_NET_VENDOR_RENESAS is not set +CONFIG_NET_VENDOR_ROCKER=y +CONFIG_ROCKER=m +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +CONFIG_NET_VENDOR_SOLARFLARE=y +CONFIG_SFC=m +CONFIG_SFC_MTD=y +CONFIG_SFC_MCDI_MON=y +CONFIG_SFC_SRIOV=y +CONFIG_SFC_MCDI_LOGGING=y +# CONFIG_SFC_FALCON is not set +# CONFIG_SFC_SIENA is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +CONFIG_NET_VENDOR_VERTEXCOM=y +# CONFIG_NET_VENDOR_VIA is not set +CONFIG_NET_VENDOR_WANGXUN=y +CONFIG_LIBWX=m +CONFIG_NGBE=m +CONFIG_TXGBE=m +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_NET_VENDOR_XILINX=y +# CONFIG_XILINX_EMACLITE is not set +# CONFIG_XILINX_AXI_EMAC is not set +# CONFIG_XILINX_LL_TEMAC is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_PHYLINK=m +CONFIG_PHYLIB=y +CONFIG_SWPHY=y +CONFIG_LED_TRIGGER_PHY=y +CONFIG_FIXED_PHY=y +CONFIG_SFP=m + +# +# MII PHY device drivers +# +CONFIG_AMD_PHY=m +# CONFIG_ADIN_PHY is not set +# CONFIG_ADIN1100_PHY is not set +CONFIG_AQUANTIA_PHY=m +CONFIG_AX88796B_PHY=m +CONFIG_BROADCOM_PHY=m +# CONFIG_BCM54140_PHY is not set +CONFIG_BCM7XXX_PHY=m +# CONFIG_BCM84881_PHY is not set +CONFIG_BCM87XX_PHY=m +CONFIG_BCM_NET_PHYLIB=m +CONFIG_BCM_NET_PHYPTP=m +CONFIG_CICADA_PHY=m +CONFIG_CORTINA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_LXT_PHY=m +CONFIG_INTEL_XWAY_PHY=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_MARVELL_10G_PHY=m +# CONFIG_MARVELL_88Q2XXX_PHY is not set +# CONFIG_MARVELL_88X2222_PHY is not set +# CONFIG_MAXLINEAR_GPHY is not set +# CONFIG_MEDIATEK_GE_PHY is not set +CONFIG_MICREL_PHY=m +# CONFIG_MICROCHIP_T1S_PHY is not set +CONFIG_MICROCHIP_PHY=m +CONFIG_MICROCHIP_T1_PHY=m +CONFIG_MICROSEMI_PHY=m +# CONFIG_MOTORCOMM_PHY is not set +CONFIG_NATIONAL_PHY=m +# CONFIG_NXP_CBTX_PHY is not set +# CONFIG_NXP_C45_TJA11XX_PHY is not set +# CONFIG_NXP_TJA11XX_PHY is not set +# CONFIG_NCN26000_PHY is not set +CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m +CONFIG_RENESAS_PHY=m +CONFIG_ROCKCHIP_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_DP83822_PHY=m +CONFIG_DP83TC811_PHY=m +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +# CONFIG_DP83869_PHY is not set +# CONFIG_DP83TD510_PHY is not set +CONFIG_VITESSE_PHY=m +CONFIG_XILINX_GMII2RGMII=m +# CONFIG_PSE_CONTROLLER is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +CONFIG_FWNODE_MDIO=y +CONFIG_ACPI_MDIO=y +CONFIG_MDIO_DEVRES=y +CONFIG_MDIO_BITBANG=m +CONFIG_MDIO_BCM_UNIMAC=m +CONFIG_MDIO_CAVIUM=m +# CONFIG_MDIO_GPIO is not set +CONFIG_MDIO_I2C=m +# CONFIG_MDIO_MVUSB is not set +CONFIG_MDIO_THUNDER=m + +# +# MDIO Multiplexers +# + +# +# PCS device drivers +# +CONFIG_PCS_XPCS=m +# end of PCS device drivers + +# CONFIG_PLIP is not set +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +# CONFIG_PPPOE_HASH_BITS_1 is not set +# CONFIG_PPPOE_HASH_BITS_2 is not set +CONFIG_PPPOE_HASH_BITS_4=y +# CONFIG_PPPOE_HASH_BITS_8 is not set +CONFIG_PPPOE_HASH_BITS=4 +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLHC=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +# CONFIG_SLIP_MODE_SLIP6 is not set +CONFIG_USB_NET_DRIVERS=y +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=m +CONFIG_USB_NET_AX8817X=m +CONFIG_USB_NET_AX88179_178A=m +CONFIG_USB_NET_CDCETHER=m +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +# CONFIG_USB_NET_SR9700 is not set +# CONFIG_USB_NET_SR9800 is not set +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +CONFIG_USB_NET_NET1080=m +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m +CONFIG_USB_NET_CDC_SUBSET=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y +CONFIG_USB_BELKIN=y +CONFIG_USB_ARMLINUX=y +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y +CONFIG_USB_NET_ZAURUS=m +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +# CONFIG_USB_NET_AQC111 is not set +CONFIG_USB_RTL8153_ECM=m +CONFIG_WLAN=y +# CONFIG_WLAN_VENDOR_ADMTEK is not set +CONFIG_ATH_COMMON=m +CONFIG_WLAN_VENDOR_ATH=y +CONFIG_ATH_DEBUG=y +# CONFIG_ATH_TRACEPOINTS is not set +# CONFIG_ATH5K is not set +# CONFIG_ATH5K_PCI is not set +CONFIG_ATH9K_HW=m +CONFIG_ATH9K_COMMON=m +CONFIG_ATH9K_COMMON_DEBUG=y +CONFIG_ATH9K_BTCOEX_SUPPORT=y +CONFIG_ATH9K=m +CONFIG_ATH9K_PCI=y +CONFIG_ATH9K_AHB=y +CONFIG_ATH9K_DEBUGFS=y +# CONFIG_ATH9K_STATION_STATISTICS is not set +# CONFIG_ATH9K_DYNACK is not set +CONFIG_ATH9K_WOW=y +CONFIG_ATH9K_RFKILL=y +# CONFIG_ATH9K_CHANNEL_CONTEXT is not set +CONFIG_ATH9K_PCOEM=y +# CONFIG_ATH9K_PCI_NO_EEPROM is not set +CONFIG_ATH9K_HTC=m +# CONFIG_ATH9K_HTC_DEBUGFS is not set +# CONFIG_ATH9K_HWRNG is not set +# CONFIG_ATH9K_COMMON_SPECTRAL is not set +# CONFIG_CARL9170 is not set +# CONFIG_ATH6KL is not set +# CONFIG_AR5523 is not set +# CONFIG_WIL6210 is not set +CONFIG_ATH10K=m +CONFIG_ATH10K_CE=y +CONFIG_ATH10K_PCI=m +# CONFIG_ATH10K_SDIO is not set +# CONFIG_ATH10K_USB is not set +CONFIG_ATH10K_DEBUG=y +# CONFIG_ATH10K_DEBUGFS is not set +CONFIG_ATH10K_TRACING=y +# CONFIG_WCN36XX is not set +# CONFIG_ATH11K is not set +# CONFIG_ATH12K is not set +# CONFIG_WLAN_VENDOR_ATMEL is not set +CONFIG_WLAN_VENDOR_BROADCOM=y +# CONFIG_B43 is not set +# CONFIG_B43LEGACY is not set +CONFIG_BRCMUTIL=m +CONFIG_BRCMSMAC=m +CONFIG_BRCMSMAC_LEDS=y +CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_PROTO_BCDC=y +CONFIG_BRCMFMAC_PROTO_MSGBUF=y +CONFIG_BRCMFMAC_SDIO=y +CONFIG_BRCMFMAC_USB=y +CONFIG_BRCMFMAC_PCIE=y +# CONFIG_BRCM_TRACING is not set +# CONFIG_BRCMDBG is not set +# CONFIG_WLAN_VENDOR_CISCO is not set +CONFIG_WLAN_VENDOR_INTEL=y +# CONFIG_IPW2100 is not set +# CONFIG_IPW2200 is not set +# CONFIG_IWL4965 is not set +# CONFIG_IWL3945 is not set +CONFIG_IWLWIFI=m +CONFIG_IWLWIFI_LEDS=y +CONFIG_IWLDVM=m +CONFIG_IWLMVM=m +CONFIG_IWLWIFI_OPMODE_MODULAR=y + +# +# Debugging Options +# +CONFIG_IWLWIFI_DEBUG=y +CONFIG_IWLWIFI_DEBUGFS=y +CONFIG_IWLWIFI_DEVICE_TRACING=y +# end of Debugging Options + +# CONFIG_WLAN_VENDOR_INTERSIL is not set +CONFIG_WLAN_VENDOR_MARVELL=y +# CONFIG_LIBERTAS is not set +# CONFIG_LIBERTAS_THINFIRM is not set +CONFIG_MWIFIEX=m +CONFIG_MWIFIEX_SDIO=m +CONFIG_MWIFIEX_PCIE=m +CONFIG_MWIFIEX_USB=m +# CONFIG_MWL8K is not set +CONFIG_WLAN_VENDOR_MEDIATEK=y +CONFIG_MT7601U=m +CONFIG_MT76_CORE=m +CONFIG_MT76_LEDS=y +CONFIG_MT76_USB=m +CONFIG_MT76x02_LIB=m +CONFIG_MT76x02_USB=m +CONFIG_MT76x0_COMMON=m +CONFIG_MT76x0U=m +# CONFIG_MT76x0E is not set +CONFIG_MT76x2_COMMON=m +# CONFIG_MT76x2E is not set +CONFIG_MT76x2U=m +# CONFIG_MT7603E is not set +# CONFIG_MT7615E is not set +# CONFIG_MT7663U is not set +# CONFIG_MT7663S is not set +# CONFIG_MT7915E is not set +# CONFIG_MT7921E is not set +# CONFIG_MT7921S is not set +# CONFIG_MT7921U is not set +# CONFIG_MT7996E is not set +CONFIG_WLAN_VENDOR_MICROCHIP=y +# CONFIG_WILC1000_SDIO is not set +CONFIG_WLAN_VENDOR_PURELIFI=y +# CONFIG_PLFXLC is not set +CONFIG_WLAN_VENDOR_RALINK=y +CONFIG_RT2X00=m +# CONFIG_RT2400PCI is not set +# CONFIG_RT2500PCI is not set +# CONFIG_RT61PCI is not set +CONFIG_RT2800PCI=m +CONFIG_RT2800PCI_RT33XX=y +CONFIG_RT2800PCI_RT35XX=y +CONFIG_RT2800PCI_RT53XX=y +CONFIG_RT2800PCI_RT3290=y +# CONFIG_RT2500USB is not set +# CONFIG_RT73USB is not set +CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT33XX=y +CONFIG_RT2800USB_RT35XX=y +CONFIG_RT2800USB_RT3573=y +CONFIG_RT2800USB_RT53XX=y +CONFIG_RT2800USB_RT55XX=y +CONFIG_RT2800USB_UNKNOWN=y +CONFIG_RT2800_LIB=m +CONFIG_RT2800_LIB_MMIO=m +CONFIG_RT2X00_LIB_MMIO=m +CONFIG_RT2X00_LIB_PCI=m +CONFIG_RT2X00_LIB_USB=m +CONFIG_RT2X00_LIB=m +CONFIG_RT2X00_LIB_FIRMWARE=y +CONFIG_RT2X00_LIB_CRYPTO=y +CONFIG_RT2X00_LIB_LEDS=y +CONFIG_RT2X00_LIB_DEBUGFS=y +# CONFIG_RT2X00_DEBUG is not set +CONFIG_WLAN_VENDOR_REALTEK=y +# CONFIG_RTL8180 is not set +# CONFIG_RTL8187 is not set +CONFIG_RTL_CARDS=m +CONFIG_RTL8192CE=m +CONFIG_RTL8192SE=m +CONFIG_RTL8192DE=m +CONFIG_RTL8723AE=m +CONFIG_RTL8723BE=m +CONFIG_RTL8188EE=m +CONFIG_RTL8192EE=m +CONFIG_RTL8821AE=m +CONFIG_RTL8192CU=m +CONFIG_RTLWIFI=m +CONFIG_RTLWIFI_PCI=m +CONFIG_RTLWIFI_USB=m +CONFIG_RTLWIFI_DEBUG=y +CONFIG_RTL8192C_COMMON=m +CONFIG_RTL8723_COMMON=m +CONFIG_RTLBTCOEXIST=m +CONFIG_RTL8XXXU=m +# CONFIG_RTL8XXXU_UNTESTED is not set +CONFIG_RTW88=m +CONFIG_RTW88_CORE=m +CONFIG_RTW88_PCI=m +CONFIG_RTW88_8822B=m +CONFIG_RTW88_8822C=m +CONFIG_RTW88_8822BE=m +# CONFIG_RTW88_8822BS is not set +# CONFIG_RTW88_8822BU is not set +CONFIG_RTW88_8822CE=m +# CONFIG_RTW88_8822CS is not set +# CONFIG_RTW88_8822CU is not set +# CONFIG_RTW88_8723DE is not set +# CONFIG_RTW88_8723DS is not set +# CONFIG_RTW88_8723DU is not set +# CONFIG_RTW88_8821CE is not set +# CONFIG_RTW88_8821CS is not set +# CONFIG_RTW88_8821CU is not set +CONFIG_RTW88_DEBUG=y +CONFIG_RTW88_DEBUGFS=y +# CONFIG_RTW89 is not set +# CONFIG_WLAN_VENDOR_RSI is not set +CONFIG_WLAN_VENDOR_SILABS=y +# CONFIG_WFX is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set +# CONFIG_WLAN_VENDOR_ZYDAS is not set +CONFIG_WLAN_VENDOR_QUANTENNA=y +# CONFIG_QTNFMAC_PCIE is not set +# CONFIG_USB_NET_RNDIS_WLAN is not set +CONFIG_MAC80211_HWSIM=m +# CONFIG_VIRT_WIFI is not set +CONFIG_WAN=y +CONFIG_HDLC=m +CONFIG_HDLC_RAW=m +# CONFIG_HDLC_RAW_ETH is not set +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m + +# +# X.25/LAPB support is disabled +# +# CONFIG_PCI200SYN is not set +# CONFIG_WANXL is not set +# CONFIG_PC300TOO is not set +# CONFIG_FARSYNC is not set +CONFIG_IEEE802154_DRIVERS=m +CONFIG_IEEE802154_FAKELB=m +# CONFIG_IEEE802154_ATUSB is not set +# CONFIG_IEEE802154_HWSIM is not set + +# +# Wireless WAN +# +# CONFIG_WWAN is not set +# end of Wireless WAN + +CONFIG_XEN_NETDEV_FRONTEND=m +CONFIG_VMXNET3=m +CONFIG_FUJITSU_ES=m +CONFIG_HYPERV_NET=m +CONFIG_NETDEVSIM=m +CONFIG_NET_FAILOVER=m +CONFIG_ISDN=y +CONFIG_ISDN_CAPI=y +CONFIG_CAPI_TRACE=y +CONFIG_ISDN_CAPI_MIDDLEWARE=y +CONFIG_MISDN=m +CONFIG_MISDN_DSP=m +CONFIG_MISDN_L1OIP=m + +# +# mISDN hardware drivers +# +CONFIG_MISDN_HFCPCI=m +CONFIG_MISDN_HFCMULTI=m +CONFIG_MISDN_HFCUSB=m +CONFIG_MISDN_AVMFRITZ=m +CONFIG_MISDN_SPEEDFAX=m +CONFIG_MISDN_INFINEON=m +CONFIG_MISDN_W6692=m +CONFIG_MISDN_NETJET=m +CONFIG_MISDN_HDLC=m +CONFIG_MISDN_IPAC=m +CONFIG_MISDN_ISAR=m + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y +CONFIG_INPUT_FF_MEMLESS=m +CONFIG_INPUT_SPARSEKMAP=m +# CONFIG_INPUT_MATRIXKMAP is not set +CONFIG_INPUT_VIVALDIFMAP=y + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=m +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +CONFIG_INPUT_JOYDEV=m +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADC is not set +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1050 is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_CYPRESS_SF is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=m +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_BYD=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_LIFEBOOK=y +CONFIG_MOUSE_PS2_TRACKPOINT=y +CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_ELANTECH_SMBUS=y +CONFIG_MOUSE_PS2_SENTELIC=y +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +CONFIG_MOUSE_PS2_FOCALTECH=y +CONFIG_MOUSE_PS2_VMMOUSE=y +CONFIG_MOUSE_PS2_SMBUS=y +CONFIG_MOUSE_SERIAL=m +CONFIG_MOUSE_APPLETOUCH=m +CONFIG_MOUSE_BCM5974=m +CONFIG_MOUSE_CYAPA=m +CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_I2C=y +# CONFIG_MOUSE_ELAN_I2C_SMBUS is not set +CONFIG_MOUSE_VSXXXAA=m +# CONFIG_MOUSE_GPIO is not set +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +# CONFIG_INPUT_JOYSTICK is not set +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=m +CONFIG_TABLET_USB_AIPTEK=m +# CONFIG_TABLET_USB_HANWANG is not set +CONFIG_TABLET_USB_KBTAB=m +# CONFIG_TABLET_USB_PEGASUS is not set +CONFIG_TABLET_SERIAL_WACOM4=m +CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_ADC is not set +# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set +# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_BU21029 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 is not set +# CONFIG_TOUCHSCREEN_CY8CTMA140 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP5 is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set +# CONFIG_TOUCHSCREEN_EXC3000 is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GOODIX is not set +# CONFIG_TOUCHSCREEN_HIDEEP is not set +# CONFIG_TOUCHSCREEN_HYCON_HY46XX is not set +# CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX is not set +# CONFIG_TOUCHSCREEN_ILI210X is not set +# CONFIG_TOUCHSCREEN_ILITEK is not set +# CONFIG_TOUCHSCREEN_S6SY761 is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_EKTF2127 is not set +# CONFIG_TOUCHSCREEN_ELAN is not set +CONFIG_TOUCHSCREEN_ELO=m +CONFIG_TOUCHSCREEN_WACOM_W8001=m +CONFIG_TOUCHSCREEN_WACOM_I2C=m +# CONFIG_TOUCHSCREEN_MAX11801 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MMS114 is not set +# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set +# CONFIG_TOUCHSCREEN_MSG2638 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_NOVATEK_NVT_TS is not set +# CONFIG_TOUCHSCREEN_IMAGIS is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_PIXCIR is not set +# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC_SERIO is not set +# CONFIG_TOUCHSCREEN_TSC2004 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_RM_TS is not set +# CONFIG_TOUCHSCREEN_SILEAD is not set +# CONFIG_TOUCHSCREEN_SIS_I2C is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_STMFTS is not set +# CONFIG_TOUCHSCREEN_SX8654 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +# CONFIG_TOUCHSCREEN_ZET6223 is not set +# CONFIG_TOUCHSCREEN_ZFORCE is not set +# CONFIG_TOUCHSCREEN_COLIBRI_VF50 is not set +# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set +# CONFIG_TOUCHSCREEN_IQS5XX is not set +# CONFIG_TOUCHSCREEN_IQS7211 is not set +# CONFIG_TOUCHSCREEN_ZINITIX is not set +# CONFIG_TOUCHSCREEN_HIMAX_HX83112B is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_E3X0_BUTTON is not set +CONFIG_INPUT_PCSPKR=m +# CONFIG_INPUT_MMA8450 is not set +CONFIG_INPUT_APANEL=m +# CONFIG_INPUT_GPIO_BEEPER is not set +# CONFIG_INPUT_GPIO_DECODER is not set +# CONFIG_INPUT_GPIO_VIBRA is not set +CONFIG_INPUT_ATLAS_BTNS=m +CONFIG_INPUT_ATI_REMOTE2=m +CONFIG_INPUT_KEYSPAN_REMOTE=m +# CONFIG_INPUT_KXTJ9 is not set +CONFIG_INPUT_POWERMATE=m +CONFIG_INPUT_YEALINK=m +CONFIG_INPUT_CM109=m +CONFIG_INPUT_UINPUT=m +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_PWM_BEEPER is not set +# CONFIG_INPUT_PWM_VIBRA is not set +CONFIG_INPUT_GPIO_ROTARY_ENCODER=m +# CONFIG_INPUT_DA7280_HAPTICS is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_IMS_PCU is not set +# CONFIG_INPUT_IQS269A is not set +# CONFIG_INPUT_IQS626A is not set +# CONFIG_INPUT_IQS7222 is not set +# CONFIG_INPUT_CMA3000 is not set +CONFIG_INPUT_XEN_KBDDEV_FRONTEND=m +# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set +# CONFIG_INPUT_DRV260X_HAPTICS is not set +# CONFIG_INPUT_DRV2665_HAPTICS is not set +# CONFIG_INPUT_DRV2667_HAPTICS is not set +CONFIG_RMI4_CORE=m +CONFIG_RMI4_I2C=m +CONFIG_RMI4_SMB=m +CONFIG_RMI4_F03=y +CONFIG_RMI4_F03_SERIO=m +CONFIG_RMI4_2D_SENSOR=y +CONFIG_RMI4_F11=y +CONFIG_RMI4_F12=y +CONFIG_RMI4_F30=y +# CONFIG_RMI4_F34 is not set +# CONFIG_RMI4_F3A is not set +CONFIG_RMI4_F55=y + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +CONFIG_SERIO_I8042=y +CONFIG_SERIO_SERPORT=y +# CONFIG_SERIO_CT82C710 is not set +# CONFIG_SERIO_PARKBD is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +# CONFIG_SERIO_PS2MULT is not set +CONFIG_SERIO_ARC_PS2=m +CONFIG_HYPERV_KEYBOARD=m +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_LEGACY_TIOCSTI=y +CONFIG_LDISC_AUTOLOAD=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +# CONFIG_SERIAL_8250_16550A_VARIANTS is not set +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCILIB=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +# CONFIG_SERIAL_8250_PCI1XXXX is not set +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DWLIB=y +CONFIG_SERIAL_8250_DW=y +# CONFIG_SERIAL_8250_RT288X is not set +CONFIG_SERIAL_8250_LPSS=y +CONFIG_SERIAL_8250_MID=y +CONFIG_SERIAL_8250_PERICOM=y + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_KGDB_NMI is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_CONSOLE_POLL=y +CONFIG_SERIAL_JSM=m +# CONFIG_SERIAL_LANTIQ is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +CONFIG_SERIAL_ARC=m +CONFIG_SERIAL_ARC_NR_PORTS=1 +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# CONFIG_SERIAL_SPRD is not set +# end of Serial drivers + +CONFIG_SERIAL_MCTRL_GPIO=y +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +CONFIG_NOZOMI=m +# CONFIG_NULL_TTY is not set +CONFIG_HVC_DRIVER=y +CONFIG_HVC_IRQ=y +CONFIG_HVC_XEN=y +CONFIG_HVC_XEN_FRONTEND=y +# CONFIG_SERIAL_DEV_BUS is not set +CONFIG_PRINTER=m +# CONFIG_LP_CONSOLE is not set +CONFIG_PPDEV=m +CONFIG_VIRTIO_CONSOLE=m +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +CONFIG_IPMI_PLAT_DATA=y +CONFIG_IPMI_PANIC_EVENT=y +CONFIG_IPMI_PANIC_STRING=y +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +CONFIG_HW_RANDOM_INTEL=m +CONFIG_HW_RANDOM_AMD=m +# CONFIG_HW_RANDOM_BA431 is not set +CONFIG_HW_RANDOM_VIA=m +CONFIG_HW_RANDOM_VIRTIO=y +# CONFIG_HW_RANDOM_XIPHERA is not set +# CONFIG_APPLICOM is not set +# CONFIG_MWAVE is not set +CONFIG_DEVMEM=y +CONFIG_NVRAM=y +CONFIG_DEVPORT=y +CONFIG_HPET=y +CONFIG_HPET_MMAP=y +# CONFIG_HPET_MMAP_DEFAULT is not set +CONFIG_HANGCHECK_TIMER=m +CONFIG_UV_MMTIMER=m +CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=y +CONFIG_TCG_TIS=y +# CONFIG_TCG_TIS_I2C is not set +# CONFIG_TCG_TIS_I2C_CR50 is not set +CONFIG_TCG_TIS_I2C_ATMEL=m +CONFIG_TCG_TIS_I2C_INFINEON=m +CONFIG_TCG_TIS_I2C_NUVOTON=m +CONFIG_TCG_NSC=m +CONFIG_TCG_ATMEL=m +CONFIG_TCG_INFINEON=m +# CONFIG_TCG_XEN is not set +CONFIG_TCG_CRB=y +# CONFIG_TCG_VTPM_PROXY is not set +CONFIG_TCG_TIS_ST33ZP24=m +CONFIG_TCG_TIS_ST33ZP24_I2C=m +CONFIG_TELCLOCK=m +# CONFIG_XILLYBUS is not set +# CONFIG_XILLYUSB is not set +# end of Character devices + +# +# I2C support +# +CONFIG_I2C=m +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_MUX=m + +# +# Multiplexer I2C Chip support +# +# CONFIG_I2C_MUX_GPIO is not set +# CONFIG_I2C_MUX_LTC4306 is not set +# CONFIG_I2C_MUX_PCA9541 is not set +# CONFIG_I2C_MUX_PCA954x is not set +# CONFIG_I2C_MUX_REG is not set +CONFIG_I2C_MUX_MLXCPLD=m +# end of Multiplexer I2C Chip support + +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_SMBUS=m +CONFIG_I2C_ALGOBIT=m +CONFIG_I2C_ALGOPCA=m + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +CONFIG_I2C_AMD756=m +CONFIG_I2C_AMD756_S4882=m +CONFIG_I2C_AMD8111=m +# CONFIG_I2C_AMD_MP2 is not set +CONFIG_I2C_I801=m +CONFIG_I2C_ISCH=m +CONFIG_I2C_ISMT=m +CONFIG_I2C_PIIX4=m +CONFIG_I2C_NFORCE2=m +CONFIG_I2C_NFORCE2_S4985=m +# CONFIG_I2C_NVIDIA_GPU is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +CONFIG_I2C_SIS96X=m +CONFIG_I2C_VIA=m +CONFIG_I2C_VIAPRO=m + +# +# ACPI drivers +# +CONFIG_I2C_SCMI=m + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CBUS_GPIO is not set +CONFIG_I2C_DESIGNWARE_CORE=m +# CONFIG_I2C_DESIGNWARE_SLAVE is not set +CONFIG_I2C_DESIGNWARE_PLATFORM=m +# CONFIG_I2C_DESIGNWARE_AMDPSP is not set +CONFIG_I2C_DESIGNWARE_BAYTRAIL=y +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set +# CONFIG_I2C_GPIO is not set +# CONFIG_I2C_OCORES is not set +CONFIG_I2C_PCA_PLATFORM=m +CONFIG_I2C_SIMTEC=m +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +CONFIG_I2C_DIOLAN_U2C=m +# CONFIG_I2C_CP2615 is not set +CONFIG_I2C_PARPORT=m +# CONFIG_I2C_PCI1XXXX is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +CONFIG_I2C_TINY_USB=m +CONFIG_I2C_VIPERBOARD=m + +# +# Other I2C/SMBus bus drivers +# +CONFIG_I2C_MLXCPLD=m +# CONFIG_I2C_VIRTIO is not set +# end of I2C Hardware Bus support + +CONFIG_I2C_STUB=m +# CONFIG_I2C_SLAVE is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + +# CONFIG_I3C is not set +# CONFIG_SPI is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_PARPORT=m +CONFIG_PPS_CLIENT_GPIO=m + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +CONFIG_PTP_1588_CLOCK_OPTIONAL=y +CONFIG_DP83640_PHY=m +# CONFIG_PTP_1588_CLOCK_INES is not set +CONFIG_PTP_1588_CLOCK_KVM=m +# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set +# CONFIG_PTP_1588_CLOCK_IDTCM is not set +# CONFIG_PTP_1588_CLOCK_MOCK is not set +# CONFIG_PTP_1588_CLOCK_VMW is not set +# CONFIG_PTP_1588_CLOCK_OCP is not set +# end of PTP clock support + +CONFIG_PINCTRL=y +CONFIG_PINMUX=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_CY8C95X0 is not set +# CONFIG_PINCTRL_MCP23S08 is not set + +# +# Intel pinctrl drivers +# +CONFIG_PINCTRL_BAYTRAIL=y +# CONFIG_PINCTRL_CHERRYVIEW is not set +# CONFIG_PINCTRL_LYNXPOINT is not set +CONFIG_PINCTRL_INTEL=y +# CONFIG_PINCTRL_ALDERLAKE is not set +CONFIG_PINCTRL_BROXTON=m +CONFIG_PINCTRL_CANNONLAKE=m +CONFIG_PINCTRL_CEDARFORK=m +CONFIG_PINCTRL_DENVERTON=m +# CONFIG_PINCTRL_ELKHARTLAKE is not set +# CONFIG_PINCTRL_EMMITSBURG is not set +CONFIG_PINCTRL_GEMINILAKE=m +CONFIG_PINCTRL_ICELAKE=m +# CONFIG_PINCTRL_JASPERLAKE is not set +# CONFIG_PINCTRL_LAKEFIELD is not set +CONFIG_PINCTRL_LEWISBURG=m +# CONFIG_PINCTRL_METEORLAKE is not set +CONFIG_PINCTRL_SUNRISEPOINT=m +# CONFIG_PINCTRL_TIGERLAKE is not set +# end of Intel pinctrl drivers + +# +# Renesas pinctrl drivers +# +# end of Renesas pinctrl drivers + +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_CDEV=y +CONFIG_GPIO_CDEV_V1=y +CONFIG_GPIO_GENERIC=m + +# +# Memory mapped GPIO drivers +# +CONFIG_GPIO_AMDPT=m +# CONFIG_GPIO_DWAPB is not set +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_GENERIC_PLATFORM is not set +CONFIG_GPIO_ICH=m +# CONFIG_GPIO_MB86S7X is not set +# CONFIG_GPIO_AMD_FCH is not set +# end of Memory mapped GPIO drivers + +# +# Port-mapped I/O GPIO drivers +# +# CONFIG_GPIO_VX855 is not set +# CONFIG_GPIO_F7188X is not set +# CONFIG_GPIO_IT87 is not set +# CONFIG_GPIO_SCH is not set +# CONFIG_GPIO_SCH311X is not set +# CONFIG_GPIO_WINBOND is not set +# CONFIG_GPIO_WS16C48 is not set +# end of Port-mapped I/O GPIO drivers + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_FXL6408 is not set +# CONFIG_GPIO_DS4520 is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCA9570 is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set +# end of I2C GPIO expanders + +# +# MFD GPIO expanders +# +# CONFIG_GPIO_ELKHARTLAKE is not set +# end of MFD GPIO expanders + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_AMD8111 is not set +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_ML_IOH is not set +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set +# end of PCI GPIO expanders + +# +# USB GPIO expanders +# +CONFIG_GPIO_VIPERBOARD=m +# end of USB GPIO expanders + +# +# Virtual GPIO drivers +# +# CONFIG_GPIO_AGGREGATOR is not set +# CONFIG_GPIO_LATCH is not set +# CONFIG_GPIO_MOCKUP is not set +# CONFIG_GPIO_VIRTIO is not set +# CONFIG_GPIO_SIM is not set +# end of Virtual GPIO drivers + +# CONFIG_W1 is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_RESTART is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_POWER_SUPPLY_HWMON=y +# CONFIG_GENERIC_ADC_BATTERY is not set +# CONFIG_IP5XXX_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_CW2015 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SAMSUNG_SDI is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_MANAGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_LT3651 is not set +# CONFIG_CHARGER_LTC4162L is not set +# CONFIG_CHARGER_MAX77976 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ2515X is not set +# CONFIG_CHARGER_BQ25890 is not set +# CONFIG_CHARGER_BQ25980 is not set +# CONFIG_CHARGER_BQ256XX is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_BATTERY_GOLDFISH is not set +# CONFIG_BATTERY_RT5033 is not set +# CONFIG_CHARGER_RT9455 is not set +# CONFIG_CHARGER_BD99954 is not set +# CONFIG_BATTERY_UG3105 is not set +CONFIG_HWMON=y +CONFIG_HWMON_VID=m +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +CONFIG_SENSORS_ABITUGURU=m +CONFIG_SENSORS_ABITUGURU3=m +CONFIG_SENSORS_AD7414=m +CONFIG_SENSORS_AD7418=m +CONFIG_SENSORS_ADM1025=m +CONFIG_SENSORS_ADM1026=m +CONFIG_SENSORS_ADM1029=m +CONFIG_SENSORS_ADM1031=m +# CONFIG_SENSORS_ADM1177 is not set +CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADT7X10=m +CONFIG_SENSORS_ADT7410=m +CONFIG_SENSORS_ADT7411=m +CONFIG_SENSORS_ADT7462=m +CONFIG_SENSORS_ADT7470=m +CONFIG_SENSORS_ADT7475=m +# CONFIG_SENSORS_AHT10 is not set +# CONFIG_SENSORS_AQUACOMPUTER_D5NEXT is not set +# CONFIG_SENSORS_AS370 is not set +CONFIG_SENSORS_ASC7621=m +# CONFIG_SENSORS_AXI_FAN_CONTROL is not set +CONFIG_SENSORS_K8TEMP=m +CONFIG_SENSORS_K10TEMP=m +CONFIG_SENSORS_FAM15H_POWER=m +CONFIG_SENSORS_APPLESMC=m +CONFIG_SENSORS_ASB100=m +CONFIG_SENSORS_ATXP1=m +# CONFIG_SENSORS_CORSAIR_CPRO is not set +# CONFIG_SENSORS_CORSAIR_PSU is not set +# CONFIG_SENSORS_DRIVETEMP is not set +CONFIG_SENSORS_DS620=m +CONFIG_SENSORS_DS1621=m +CONFIG_SENSORS_DELL_SMM=m +# CONFIG_I8K is not set +CONFIG_SENSORS_I5K_AMB=m +CONFIG_SENSORS_F71805F=m +CONFIG_SENSORS_F71882FG=m +CONFIG_SENSORS_F75375S=m +CONFIG_SENSORS_FSCHMD=m +# CONFIG_SENSORS_FTSTEUTATES is not set +CONFIG_SENSORS_GL518SM=m +CONFIG_SENSORS_GL520SM=m +CONFIG_SENSORS_G760A=m +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_HS3001 is not set +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +# CONFIG_SENSORS_IIO_HWMON is not set +CONFIG_SENSORS_I5500=m +CONFIG_SENSORS_CORETEMP=m +CONFIG_SENSORS_IT87=m +CONFIG_SENSORS_JC42=m +# CONFIG_SENSORS_POWR1220 is not set +CONFIG_SENSORS_LINEAGE=m +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2947_I2C is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC2992 is not set +CONFIG_SENSORS_LTC4151=m +CONFIG_SENSORS_LTC4215=m +# CONFIG_SENSORS_LTC4222 is not set +CONFIG_SENSORS_LTC4245=m +# CONFIG_SENSORS_LTC4260 is not set +CONFIG_SENSORS_LTC4261=m +# CONFIG_SENSORS_MAX127 is not set +CONFIG_SENSORS_MAX16065=m +CONFIG_SENSORS_MAX1619=m +CONFIG_SENSORS_MAX1668=m +CONFIG_SENSORS_MAX197=m +# CONFIG_SENSORS_MAX31730 is not set +# CONFIG_SENSORS_MAX31760 is not set +# CONFIG_MAX31827 is not set +# CONFIG_SENSORS_MAX6620 is not set +# CONFIG_SENSORS_MAX6621 is not set +CONFIG_SENSORS_MAX6639=m +CONFIG_SENSORS_MAX6650=m +CONFIG_SENSORS_MAX6697=m +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MC34VR500 is not set +CONFIG_SENSORS_MCP3021=m +# CONFIG_SENSORS_MLXREG_FAN is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_TPS23861 is not set +# CONFIG_SENSORS_MR75203 is not set +CONFIG_SENSORS_LM63=m +CONFIG_SENSORS_LM73=m +CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM77=m +CONFIG_SENSORS_LM78=m +CONFIG_SENSORS_LM80=m +CONFIG_SENSORS_LM83=m +CONFIG_SENSORS_LM85=m +CONFIG_SENSORS_LM87=m +CONFIG_SENSORS_LM90=m +CONFIG_SENSORS_LM92=m +CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_LM95234=m +CONFIG_SENSORS_LM95241=m +CONFIG_SENSORS_LM95245=m +CONFIG_SENSORS_PC87360=m +CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_NTC_THERMISTOR=m +# CONFIG_SENSORS_NCT6683 is not set +CONFIG_SENSORS_NCT6775_CORE=m +CONFIG_SENSORS_NCT6775=m +# CONFIG_SENSORS_NCT6775_I2C is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_NZXT_KRAKEN2 is not set +# CONFIG_SENSORS_NZXT_SMART2 is not set +# CONFIG_SENSORS_OCC_P8_I2C is not set +# CONFIG_SENSORS_OXP is not set +CONFIG_SENSORS_PCF8591=m +CONFIG_PMBUS=m +CONFIG_SENSORS_PMBUS=m +# CONFIG_SENSORS_ACBEL_FSG032 is not set +# CONFIG_SENSORS_ADM1266 is not set +CONFIG_SENSORS_ADM1275=m +# CONFIG_SENSORS_BEL_PFE is not set +# CONFIG_SENSORS_BPA_RS600 is not set +# CONFIG_SENSORS_DELTA_AHE50DC_FAN is not set +# CONFIG_SENSORS_FSP_3Y is not set +# CONFIG_SENSORS_IBM_CFFPS is not set +# CONFIG_SENSORS_DPS920AB is not set +# CONFIG_SENSORS_INSPUR_IPSPS is not set +# CONFIG_SENSORS_IR35221 is not set +# CONFIG_SENSORS_IR36021 is not set +# CONFIG_SENSORS_IR38064 is not set +# CONFIG_SENSORS_IRPS5401 is not set +# CONFIG_SENSORS_ISL68137 is not set +CONFIG_SENSORS_LM25066=m +# CONFIG_SENSORS_LT7182S is not set +CONFIG_SENSORS_LTC2978=m +# CONFIG_SENSORS_LTC3815 is not set +# CONFIG_SENSORS_MAX15301 is not set +CONFIG_SENSORS_MAX16064=m +# CONFIG_SENSORS_MAX16601 is not set +# CONFIG_SENSORS_MAX20730 is not set +# CONFIG_SENSORS_MAX20751 is not set +# CONFIG_SENSORS_MAX31785 is not set +CONFIG_SENSORS_MAX34440=m +CONFIG_SENSORS_MAX8688=m +# CONFIG_SENSORS_MP2888 is not set +# CONFIG_SENSORS_MP2975 is not set +# CONFIG_SENSORS_MP5023 is not set +# CONFIG_SENSORS_MPQ7932 is not set +# CONFIG_SENSORS_PIM4328 is not set +# CONFIG_SENSORS_PLI1209BC is not set +# CONFIG_SENSORS_PM6764TR is not set +# CONFIG_SENSORS_PXE1610 is not set +# CONFIG_SENSORS_Q54SJ108A2 is not set +# CONFIG_SENSORS_STPDDC60 is not set +# CONFIG_SENSORS_TDA38640 is not set +# CONFIG_SENSORS_TPS40422 is not set +# CONFIG_SENSORS_TPS53679 is not set +# CONFIG_SENSORS_TPS546D24 is not set +CONFIG_SENSORS_UCD9000=m +CONFIG_SENSORS_UCD9200=m +# CONFIG_SENSORS_XDPE152 is not set +# CONFIG_SENSORS_XDPE122 is not set +CONFIG_SENSORS_ZL6100=m +# CONFIG_SENSORS_SBTSI is not set +# CONFIG_SENSORS_SBRMI is not set +CONFIG_SENSORS_SHT15=m +CONFIG_SENSORS_SHT21=m +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHT4x is not set +# CONFIG_SENSORS_SHTC1 is not set +CONFIG_SENSORS_SIS5595=m +CONFIG_SENSORS_DME1737=m +CONFIG_SENSORS_EMC1403=m +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC2305 is not set +CONFIG_SENSORS_EMC6W201=m +CONFIG_SENSORS_SMSC47M1=m +CONFIG_SENSORS_SMSC47M192=m +CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SCH56XX_COMMON=m +CONFIG_SENSORS_SCH5627=m +CONFIG_SENSORS_SCH5636=m +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_ADC128D818 is not set +CONFIG_SENSORS_ADS7828=m +CONFIG_SENSORS_AMC6821=m +CONFIG_SENSORS_INA209=m +CONFIG_SENSORS_INA2XX=m +# CONFIG_SENSORS_INA238 is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set +CONFIG_SENSORS_THMC50=m +CONFIG_SENSORS_TMP102=m +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set +CONFIG_SENSORS_TMP401=m +CONFIG_SENSORS_TMP421=m +# CONFIG_SENSORS_TMP464 is not set +# CONFIG_SENSORS_TMP513 is not set +CONFIG_SENSORS_VIA_CPUTEMP=m +CONFIG_SENSORS_VIA686A=m +CONFIG_SENSORS_VT1211=m +CONFIG_SENSORS_VT8231=m +# CONFIG_SENSORS_W83773G is not set +CONFIG_SENSORS_W83781D=m +CONFIG_SENSORS_W83791D=m +CONFIG_SENSORS_W83792D=m +CONFIG_SENSORS_W83793=m +CONFIG_SENSORS_W83795=m +# CONFIG_SENSORS_W83795_FANCTRL is not set +CONFIG_SENSORS_W83L785TS=m +CONFIG_SENSORS_W83L786NG=m +CONFIG_SENSORS_W83627HF=m +CONFIG_SENSORS_W83627EHF=m +# CONFIG_SENSORS_XGENE is not set + +# +# ACPI drivers +# +CONFIG_SENSORS_ACPI_POWER=m +CONFIG_SENSORS_ATK0110=m +# CONFIG_SENSORS_ASUS_WMI is not set +# CONFIG_SENSORS_ASUS_EC is not set +# CONFIG_SENSORS_HP_WMI is not set +CONFIG_THERMAL=y +# CONFIG_THERMAL_NETLINK is not set +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_ACPI=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +# CONFIG_THERMAL_DEFAULT_GOV_BANG_BANG is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +CONFIG_THERMAL_GOV_BANG_BANG=y +CONFIG_THERMAL_GOV_USER_SPACE=y +# CONFIG_THERMAL_EMULATION is not set + +# +# Intel thermal drivers +# +CONFIG_INTEL_POWERCLAMP=m +CONFIG_X86_THERMAL_VECTOR=y +CONFIG_INTEL_TCC=y +CONFIG_X86_PKG_TEMP_THERMAL=m +CONFIG_INTEL_SOC_DTS_IOSF_CORE=m +# CONFIG_INTEL_SOC_DTS_THERMAL is not set + +# +# ACPI INT340X thermal drivers +# +CONFIG_INT340X_THERMAL=m +CONFIG_ACPI_THERMAL_REL=m +# CONFIG_INT3406_THERMAL is not set +CONFIG_PROC_THERMAL_MMIO_RAPL=m +# end of ACPI INT340X thermal drivers + +CONFIG_INTEL_PCH_THERMAL=m +# CONFIG_INTEL_TCC_COOLING is not set +# CONFIG_INTEL_HFI_THERMAL is not set +# end of Intel thermal drivers + +# CONFIG_GENERIC_ADC_THERMAL is not set +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +CONFIG_WATCHDOG_OPEN_TIMEOUT=0 +CONFIG_WATCHDOG_SYSFS=y +# CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT is not set + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set + +# +# Watchdog Device Drivers +# +CONFIG_SOFT_WATCHDOG=m +CONFIG_WDAT_WDT=m +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_ZIIRAVE_WATCHDOG is not set +# CONFIG_MLX_WDT is not set +# CONFIG_CADENCE_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set +# CONFIG_ACQUIRE_WDT is not set +# CONFIG_ADVANTECH_WDT is not set +# CONFIG_ADVANTECH_EC_WDT is not set +CONFIG_ALIM1535_WDT=m +CONFIG_ALIM7101_WDT=m +# CONFIG_EBC_C384_WDT is not set +# CONFIG_EXAR_WDT is not set +CONFIG_F71808E_WDT=m +CONFIG_SP5100_TCO=m +CONFIG_SBC_FITPC2_WATCHDOG=m +# CONFIG_EUROTECH_WDT is not set +CONFIG_IB700_WDT=m +CONFIG_IBMASR=m +# CONFIG_WAFER_WDT is not set +CONFIG_I6300ESB_WDT=m +CONFIG_IE6XX_WDT=m +CONFIG_ITCO_WDT=m +CONFIG_ITCO_VENDOR_SUPPORT=y +CONFIG_IT8712F_WDT=m +CONFIG_IT87_WDT=m +CONFIG_HP_WATCHDOG=m +CONFIG_HPWDT_NMI_DECODING=y +# CONFIG_SC1200_WDT is not set +# CONFIG_PC87413_WDT is not set +CONFIG_NV_TCO=m +# CONFIG_60XX_WDT is not set +# CONFIG_CPU5_WDT is not set +CONFIG_SMSC_SCH311X_WDT=m +# CONFIG_SMSC37B787_WDT is not set +# CONFIG_TQMX86_WDT is not set +CONFIG_VIA_WDT=m +CONFIG_W83627HF_WDT=m +CONFIG_W83877F_WDT=m +CONFIG_W83977F_WDT=m +CONFIG_MACHZ_WDT=m +# CONFIG_SBC_EPX_C3_WATCHDOG is not set +CONFIG_INTEL_MEI_WDT=m +# CONFIG_NI903X_WDT is not set +# CONFIG_NIC7018_WDT is not set +# CONFIG_MEN_A21_WDT is not set +CONFIG_XEN_WDT=m + +# +# PCI-based Watchdog Cards +# +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m + +# +# USB-based Watchdog Cards +# +CONFIG_USBPCWATCHDOG=m +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +CONFIG_BCMA=m +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +CONFIG_BCMA_DRIVER_PCI=y +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +# CONFIG_BCMA_DEBUG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=y +# CONFIG_MFD_SMPRO is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_CS42L43_I2C is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_MP2629 is not set +# CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set +CONFIG_LPC_ICH=m +CONFIG_LPC_SCH=m +CONFIG_MFD_INTEL_LPSS=m +CONFIG_MFD_INTEL_LPSS_ACPI=m +CONFIG_MFD_INTEL_LPSS_PCI=m +# CONFIG_MFD_INTEL_PMC_BXT is not set +# CONFIG_MFD_IQS62X is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MT6360 is not set +# CONFIG_MFD_MT6370 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +CONFIG_MFD_VIPERBOARD=m +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_SY7636A is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT4831 is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RT5120 is not set +# CONFIG_MFD_SI476X_CORE is not set +CONFIG_MFD_SM501=m +CONFIG_MFD_SM501_GPIO=y +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_SYSCON is not set +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS6594_I2C is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TQMX86 is not set +CONFIG_MFD_VX855=m +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_ATC260X_I2C is not set +# end of Multifunction device drivers + +# CONFIG_REGULATOR is not set +CONFIG_RC_CORE=m +# CONFIG_LIRC is not set +CONFIG_RC_MAP=m +CONFIG_RC_DECODERS=y +CONFIG_IR_IMON_DECODER=m +CONFIG_IR_JVC_DECODER=m +CONFIG_IR_MCE_KBD_DECODER=m +CONFIG_IR_NEC_DECODER=m +CONFIG_IR_RC5_DECODER=m +CONFIG_IR_RC6_DECODER=m +# CONFIG_IR_RCMM_DECODER is not set +CONFIG_IR_SANYO_DECODER=m +# CONFIG_IR_SHARP_DECODER is not set +CONFIG_IR_SONY_DECODER=m +# CONFIG_IR_XMP_DECODER is not set +CONFIG_RC_DEVICES=y +CONFIG_IR_ENE=m +CONFIG_IR_FINTEK=m +# CONFIG_IR_IGORPLUGUSB is not set +CONFIG_IR_IGUANA=m +CONFIG_IR_IMON=m +CONFIG_IR_IMON_RAW=m +CONFIG_IR_ITE_CIR=m +CONFIG_IR_MCEUSB=m +CONFIG_IR_NUVOTON=m +CONFIG_IR_REDRAT3=m +CONFIG_IR_SERIAL=m +# CONFIG_IR_SERIAL_TRANSMITTER is not set +CONFIG_IR_STREAMZAP=m +# CONFIG_IR_TOY is not set +CONFIG_IR_TTUSBIR=m +CONFIG_IR_WINBOND_CIR=m +CONFIG_RC_ATI_REMOTE=m +CONFIG_RC_LOOPBACK=m +# CONFIG_RC_XBOX_DVD is not set +CONFIG_CEC_CORE=m + +# +# CEC support +# +# CONFIG_MEDIA_CEC_RC is not set +CONFIG_MEDIA_CEC_SUPPORT=y +# CONFIG_CEC_CH7322 is not set +# CONFIG_CEC_GPIO is not set +# CONFIG_CEC_SECO is not set +CONFIG_USB_PULSE8_CEC=m +CONFIG_USB_RAINSHADOW_CEC=m +# end of CEC support + +CONFIG_MEDIA_SUPPORT=m +CONFIG_MEDIA_SUPPORT_FILTER=y +CONFIG_MEDIA_SUBDRV_AUTOSELECT=y + +# +# Media device types +# +# CONFIG_MEDIA_CAMERA_SUPPORT is not set +# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set +# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set +# CONFIG_MEDIA_RADIO_SUPPORT is not set +# CONFIG_MEDIA_SDR_SUPPORT is not set +# CONFIG_MEDIA_PLATFORM_SUPPORT is not set +# CONFIG_MEDIA_TEST_SUPPORT is not set +# end of Media device types + +# +# Media drivers +# + +# +# Drivers filtered as selected at 'Filter media drivers' +# + +# +# Media drivers +# +CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_MEDIA_PCI_SUPPORT=y +# CONFIG_IPU_BRIDGE is not set +# end of Media drivers + +CONFIG_MEDIA_HIDE_ANCILLARY_SUBDRV=y + +# +# Media ancillary drivers +# +# end of Media ancillary drivers + +# +# Graphics support +# +CONFIG_APERTURE_HELPERS=y +CONFIG_VIDEO_CMDLINE=y +CONFIG_VIDEO_NOMODESET=y +# CONFIG_AUXDISPLAY is not set +# CONFIG_PANEL is not set +# CONFIG_AGP is not set +CONFIG_INTEL_GTT=m +CONFIG_VGA_SWITCHEROO=y +CONFIG_DRM=m +CONFIG_DRM_MIPI_DSI=y +CONFIG_DRM_KMS_HELPER=m +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DISPLAY_HELPER=m +CONFIG_DRM_DISPLAY_DP_HELPER=y +CONFIG_DRM_DISPLAY_HDCP_HELPER=y +CONFIG_DRM_DISPLAY_HDMI_HELPER=y +# CONFIG_DRM_DP_AUX_CHARDEV is not set +# CONFIG_DRM_DP_CEC is not set +CONFIG_DRM_TTM=m +CONFIG_DRM_EXEC=m +CONFIG_DRM_BUDDY=m +CONFIG_DRM_VRAM_HELPER=m +CONFIG_DRM_TTM_HELPER=m +CONFIG_DRM_GEM_SHMEM_HELPER=m +CONFIG_DRM_SUBALLOC_HELPER=m +CONFIG_DRM_SCHED=m + +# +# I2C encoder or helper chips +# +CONFIG_DRM_I2C_CH7006=m +CONFIG_DRM_I2C_SIL164=m +# CONFIG_DRM_I2C_NXP_TDA998X is not set +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# end of I2C encoder or helper chips + +# +# ARM devices +# +# end of ARM devices + +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_USERPTR=y +CONFIG_DRM_AMDGPU=m +# CONFIG_DRM_AMDGPU_SI is not set +# CONFIG_DRM_AMDGPU_CIK is not set +# CONFIG_DRM_AMDGPU_USERPTR is not set + +# +# ACP (Audio CoProcessor) Configuration +# +# CONFIG_DRM_AMD_ACP is not set +# end of ACP (Audio CoProcessor) Configuration + +# +# Display Engine Configuration +# +CONFIG_DRM_AMD_DC=y +CONFIG_DRM_AMD_DC_FP=y +# CONFIG_DEBUG_KERNEL_DC is not set +# CONFIG_DRM_AMD_SECURE_DISPLAY is not set +# end of Display Engine Configuration + +# CONFIG_HSA_AMD is not set +CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +CONFIG_NOUVEAU_DEBUG_MMU=y +# CONFIG_NOUVEAU_DEBUG_PUSH is not set +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +CONFIG_DRM_I915=m +CONFIG_DRM_I915_FORCE_PROBE="" +CONFIG_DRM_I915_CAPTURE_ERROR=y +CONFIG_DRM_I915_COMPRESS_ERROR=y +CONFIG_DRM_I915_USERPTR=y +CONFIG_DRM_I915_GVT_KVMGT=m +CONFIG_DRM_I915_REQUEST_TIMEOUT=20000 +CONFIG_DRM_I915_FENCE_TIMEOUT=10000 +CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND=250 +CONFIG_DRM_I915_HEARTBEAT_INTERVAL=2500 +CONFIG_DRM_I915_PREEMPT_TIMEOUT=640 +CONFIG_DRM_I915_PREEMPT_TIMEOUT_COMPUTE=7500 +CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT=8000 +CONFIG_DRM_I915_STOP_TIMEOUT=100 +CONFIG_DRM_I915_TIMESLICE_DURATION=1 +CONFIG_DRM_I915_GVT=y +# CONFIG_DRM_VGEM is not set +CONFIG_DRM_VKMS=m +CONFIG_DRM_VMWGFX=m +# CONFIG_DRM_VMWGFX_MKSSTATS is not set +CONFIG_DRM_GMA500=m +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=m +CONFIG_DRM_MGAG200=m +CONFIG_DRM_QXL=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_VIRTIO_GPU_KMS=y +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set +# end of Display Panels + +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# end of Display Interface Bridges + +# CONFIG_DRM_LOONGSON is not set +# CONFIG_DRM_ETNAVIV is not set +CONFIG_DRM_BOCHS=m +CONFIG_DRM_CIRRUS_QEMU=m +# CONFIG_DRM_GM12U320 is not set +# CONFIG_DRM_SIMPLEDRM is not set +# CONFIG_DRM_XEN_FRONTEND is not set +# CONFIG_DRM_VBOXVIDEO is not set +# CONFIG_DRM_GUD is not set +# CONFIG_DRM_SSD130X is not set +# CONFIG_DRM_HYPERV is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y +CONFIG_DRM_PRIVACY_SCREEN=y + +# +# Frame buffer Devices +# +CONFIG_FB=y +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ARC is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_VGA16 is not set +# CONFIG_FB_UVESA is not set +CONFIG_FB_VESA=y +CONFIG_FB_EFI=y +# CONFIG_FB_N411 is not set +# CONFIG_FB_HGA is not set +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_LE80578 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_VIA is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SM501 is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_XEN_FBDEV_FRONTEND is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +CONFIG_FB_HYPERV=m +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SSD1307 is not set +# CONFIG_FB_SM712 is not set +CONFIG_FB_CORE=y +CONFIG_FB_NOTIFY=y +CONFIG_FIRMWARE_EDID=y +CONFIG_FB_DEVICE=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_IOMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y +# CONFIG_FB_MODE_HELPERS is not set +CONFIG_FB_TILEBLITTING=y +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# +CONFIG_LCD_CLASS_DEVICE=m +CONFIG_LCD_PLATFORM=m +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_KTD253 is not set +# CONFIG_BACKLIGHT_KTZ8866 is not set +# CONFIG_BACKLIGHT_PWM is not set +CONFIG_BACKLIGHT_APPLE=m +# CONFIG_BACKLIGHT_QCOM_WLED is not set +# CONFIG_BACKLIGHT_SAHARA is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3630A is not set +# CONFIG_BACKLIGHT_LM3639 is not set +CONFIG_BACKLIGHT_LP855X=m +# CONFIG_BACKLIGHT_GPIO is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# end of Backlight & LCD device support + +CONFIG_HDMI=y + +# +# Console display driver support +# +CONFIG_VGA_CONSOLE=y +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION is not set +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +# end of Console display driver support + +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +# end of Graphics support + +# CONFIG_DRM_ACCEL is not set +CONFIG_SOUND=m +# CONFIG_SND is not set +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=m +# CONFIG_HID_ACCUTOUCH is not set +CONFIG_HID_ACRUX=m +# CONFIG_HID_ACRUX_FF is not set +CONFIG_HID_APPLE=m +CONFIG_HID_APPLEIR=m +CONFIG_HID_ASUS=m +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=m +CONFIG_HID_BETOP_FF=m +# CONFIG_HID_BIGBEN_FF is not set +CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CORSAIR=m +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_MACALLY is not set +CONFIG_HID_CMEDIA=m +# CONFIG_HID_CP2112 is not set +# CONFIG_HID_CREATIVE_SB0540 is not set +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +# CONFIG_DRAGONRISE_FF is not set +# CONFIG_HID_EMS_FF is not set +CONFIG_HID_ELAN=m +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +# CONFIG_HID_EVISION is not set +CONFIG_HID_EZKEY=m +# CONFIG_HID_FT260 is not set +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +# CONFIG_HID_GLORIOUS is not set +CONFIG_HID_HOLTEK=m +# CONFIG_HOLTEK_FF is not set +# CONFIG_HID_GOOGLE_STADIA_FF is not set +# CONFIG_HID_VIVALDI is not set +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +# CONFIG_HID_VIEWSONIC is not set +# CONFIG_HID_VRC2 is not set +# CONFIG_HID_XIAOMI is not set +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=m +CONFIG_HID_JABRA=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LCPOWER=m +CONFIG_HID_LED=m +CONFIG_HID_LENOVO=m +# CONFIG_HID_LETSKETCH is not set +CONFIG_HID_LOGITECH=m +CONFIG_HID_LOGITECH_DJ=m +CONFIG_HID_LOGITECH_HIDPP=m +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +# CONFIG_LOGIWHEELS_FF is not set +CONFIG_HID_MAGICMOUSE=y +# CONFIG_HID_MALTRON is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_MEGAWORLD_FF is not set +# CONFIG_HID_REDRAGON is not set +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m +CONFIG_HID_MULTITOUCH=m +# CONFIG_HID_NINTENDO is not set +CONFIG_HID_NTI=m +CONFIG_HID_NTRIG=y +# CONFIG_HID_NVIDIA_SHIELD is not set +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +# CONFIG_PANTHERLORD_FF is not set +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PICOLCD_FB=y +CONFIG_HID_PICOLCD_BACKLIGHT=y +CONFIG_HID_PICOLCD_LCD=y +CONFIG_HID_PICOLCD_LEDS=y +CONFIG_HID_PICOLCD_CIR=y +CONFIG_HID_PLANTRONICS=m +# CONFIG_HID_PXRC is not set +# CONFIG_HID_RAZER is not set +CONFIG_HID_PRIMAX=m +# CONFIG_HID_RETRODE is not set +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +# CONFIG_HID_SEMITEK is not set +# CONFIG_HID_SIGMAMICRO is not set +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +# CONFIG_HID_STEAM is not set +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +# CONFIG_GREENASIA_FF is not set +CONFIG_HID_HYPERV_MOUSE=m +CONFIG_HID_SMARTJOYPLUS=m +# CONFIG_SMARTJOYPLUS_FF is not set +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +# CONFIG_HID_TOPRE is not set +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +# CONFIG_THRUSTMASTER_FF is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_U2FZERO is not set +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +# CONFIG_ZEROPLUS_FF is not set +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=y +CONFIG_HID_SENSOR_CUSTOM_SENSOR=m +CONFIG_HID_ALPS=m +# CONFIG_HID_MCP2221 is not set +# end of Special HID drivers + +# +# HID-BPF support +# +# CONFIG_HID_BPF is not set +# end of HID-BPF support + +# +# USB HID support +# +CONFIG_USB_HID=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +# end of USB HID support + +CONFIG_I2C_HID=m +# CONFIG_I2C_HID_ACPI is not set +# CONFIG_I2C_HID_OF is not set + +# +# Intel ISH HID support +# +CONFIG_INTEL_ISH_HID=m +# CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER is not set +# end of Intel ISH HID support + +# +# AMD SFH HID Support +# +# CONFIG_AMD_SFH_HID is not set +# end of AMD SFH HID Support + +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_LED_TRIG=y +# CONFIG_USB_ULPI_BUS is not set +# CONFIG_USB_CONN_GPIO is not set +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_FEW_INIT_RETRIES is not set +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_PRODUCTLIST is not set +CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_AUTOSUSPEND_DELAY=2 +CONFIG_USB_MON=y + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_DBGCAP=y +CONFIG_USB_XHCI_PCI=y +# CONFIG_USB_XHCI_PCI_RENESAS is not set +# CONFIG_USB_XHCI_PLATFORM is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +# CONFIG_USB_EHCI_FSL is not set +# CONFIG_USB_EHCI_HCD_PLATFORM is not set +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +# CONFIG_USB_UHCI_HCD is not set +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_BCMA is not set +# CONFIG_USB_HCD_TEST_MODE is not set +# CONFIG_USB_XEN_HCD is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m +CONFIG_USB_TMC=m + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m + +# +# USB Imaging devices +# +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +# CONFIG_USBIP_CORE is not set + +# +# USB dual-mode controller drivers +# +# CONFIG_USB_CDNS_SUPPORT is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +CONFIG_USB_SERIAL=y +CONFIG_USB_SERIAL_CONSOLE=y +CONFIG_USB_SERIAL_GENERIC=y +# CONFIG_USB_SERIAL_SIMPLE is not set +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +# CONFIG_USB_SERIAL_F81232 is not set +CONFIG_USB_SERIAL_F8153X=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +# CONFIG_USB_SERIAL_METRO is not set +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7715_PARPORT=y +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MXUPORT=m +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_WWAN=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +# CONFIG_USB_SERIAL_WISHBONE is not set +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +CONFIG_USB_SERIAL_UPD78F0730=m +# CONFIG_USB_SERIAL_XR is not set +CONFIG_USB_SERIAL_DEBUG=m + +# +# USB Miscellaneous drivers +# +CONFIG_USB_USS720=m +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +CONFIG_USB_IDMOUSE=m +CONFIG_USB_APPLEDISPLAY=m +# CONFIG_APPLE_MFI_FASTCHARGE is not set +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_LD=m +# CONFIG_USB_TRANCEVIBRATOR is not set +CONFIG_USB_IOWARRIOR=m +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +CONFIG_USB_ISIGHTFW=m +# CONFIG_USB_YUREX is not set +CONFIG_USB_EZUSB_FX2=m +# CONFIG_USB_HUB_USB251XB is not set +CONFIG_USB_HSIC_USB3503=m +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +# CONFIG_USB_CHAOSKEY is not set +CONFIG_USB_ATM=m +CONFIG_USB_SPEEDTOUCH=m +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +# end of USB Physical Layer drivers + +# CONFIG_USB_GADGET is not set +CONFIG_TYPEC=y +CONFIG_TYPEC_TCPM=y +# CONFIG_TYPEC_TCPCI is not set +CONFIG_TYPEC_FUSB302=m +CONFIG_TYPEC_UCSI=y +# CONFIG_UCSI_CCG is not set +CONFIG_UCSI_ACPI=y +# CONFIG_UCSI_STM32G0 is not set +CONFIG_TYPEC_TPS6598X=m +# CONFIG_TYPEC_ANX7411 is not set +# CONFIG_TYPEC_RT1719 is not set +# CONFIG_TYPEC_HD3SS3220 is not set +# CONFIG_TYPEC_STUSB160X is not set +# CONFIG_TYPEC_WUSB3801 is not set + +# +# USB Type-C Multiplexer/DeMultiplexer Switch support +# +# CONFIG_TYPEC_MUX_FSA4480 is not set +# CONFIG_TYPEC_MUX_GPIO_SBU is not set +CONFIG_TYPEC_MUX_PI3USB30532=m +# CONFIG_TYPEC_MUX_NB7VPQ904M is not set +# end of USB Type-C Multiplexer/DeMultiplexer Switch support + +# +# USB Type-C Alternate Mode drivers +# +CONFIG_TYPEC_DP_ALTMODE=m +# CONFIG_TYPEC_NVIDIA_ALTMODE is not set +# end of USB Type-C Alternate Mode drivers + +CONFIG_USB_ROLE_SWITCH=y +CONFIG_USB_ROLES_INTEL_XHCI=y +CONFIG_MMC=m +CONFIG_MMC_BLOCK=m +CONFIG_MMC_BLOCK_MINORS=8 +CONFIG_SDIO_UART=m +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_IO_ACCESSORS=y +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_RICOH_MMC=y +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +# CONFIG_MMC_SDHCI_F_SDH30 is not set +# CONFIG_MMC_WBSD is not set +CONFIG_MMC_TIFM_SD=m +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +# CONFIG_MMC_USDHI6ROL0 is not set +CONFIG_MMC_REALTEK_PCI=m +CONFIG_MMC_REALTEK_USB=m +CONFIG_MMC_CQHCI=m +# CONFIG_MMC_HSQ is not set +# CONFIG_MMC_TOSHIBA_PCI is not set +# CONFIG_MMC_MTK is not set +# CONFIG_MMC_SDHCI_XENON is not set +# CONFIG_SCSI_UFSHCD is not set +CONFIG_MEMSTICK=m +# CONFIG_MEMSTICK_DEBUG is not set + +# +# MemoryStick drivers +# +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set +CONFIG_MSPRO_BLOCK=m +# CONFIG_MS_BLOCK is not set + +# +# MemoryStick Host Controller Drivers +# +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_MEMSTICK_REALTEK_PCI=m +CONFIG_MEMSTICK_REALTEK_USB=m +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +# CONFIG_LEDS_CLASS_FLASH is not set +# CONFIG_LEDS_CLASS_MULTICOLOR is not set +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_APU is not set +# CONFIG_LEDS_AW200XX is not set +CONFIG_LEDS_LM3530=m +# CONFIG_LEDS_LM3532 is not set +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set +CONFIG_LEDS_LP3944=m +# CONFIG_LEDS_LP3952 is not set +# CONFIG_LEDS_LP50XX is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_PCA995X is not set +# CONFIG_LEDS_PWM is not set +# CONFIG_LEDS_BD2606MVV is not set +# CONFIG_LEDS_BD2802 is not set +CONFIG_LEDS_INTEL_SS4200=m +# CONFIG_LEDS_LT3593 is not set +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set +# CONFIG_LEDS_IS31FL319X is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +CONFIG_LEDS_BLINKM=m +CONFIG_LEDS_MLXCPLD=m +# CONFIG_LEDS_MLXREG is not set +# CONFIG_LEDS_USER is not set +# CONFIG_LEDS_NIC78BX is not set + +# +# Flash and Torch LED drivers +# + +# +# RGB LED drivers +# + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +CONFIG_LEDS_TRIGGER_DISK=y +# CONFIG_LEDS_TRIGGER_MTD is not set +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +# CONFIG_LEDS_TRIGGER_CPU is not set +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +# CONFIG_LEDS_TRIGGER_PANIC is not set +# CONFIG_LEDS_TRIGGER_NETDEV is not set +# CONFIG_LEDS_TRIGGER_PATTERN is not set +CONFIG_LEDS_TRIGGER_AUDIO=m +# CONFIG_LEDS_TRIGGER_TTY is not set + +# +# Simple LED drivers +# +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +CONFIG_INFINIBAND_VIRT_DMA=y +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_INFINIBAND_CXGB4=m +# CONFIG_INFINIBAND_EFA is not set +CONFIG_INFINIBAND_ERDMA=m +CONFIG_INFINIBAND_HFI1=m +# CONFIG_HFI1_DEBUG_SDMA_ORDER is not set +# CONFIG_SDMA_VERBOSITY is not set +# CONFIG_INFINIBAND_IRDMA is not set +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +# CONFIG_INFINIBAND_MTHCA is not set +# CONFIG_INFINIBAND_OCRDMA is not set +CONFIG_INFINIBAND_QEDR=m +# CONFIG_INFINIBAND_QIB is not set +CONFIG_INFINIBAND_USNIC=m +CONFIG_INFINIBAND_VMWARE_PVRDMA=m +CONFIG_INFINIBAND_RDMAVT=m +CONFIG_RDMA_RXE=m +CONFIG_RDMA_SIW=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +# CONFIG_INFINIBAND_RTRS_CLIENT is not set +# CONFIG_INFINIBAND_RTRS_SERVER is not set +CONFIG_INFINIBAND_OPA_VNIC=m +CONFIG_EDAC_ATOMIC_SCRUB=y +CONFIG_EDAC_SUPPORT=y +CONFIG_EDAC=y +CONFIG_EDAC_LEGACY_SYSFS=y +CONFIG_EDAC_DEBUG=y +CONFIG_EDAC_DECODE_MCE=m +CONFIG_EDAC_GHES=y +CONFIG_EDAC_AMD64=m +CONFIG_EDAC_E752X=m +CONFIG_EDAC_I82975X=m +CONFIG_EDAC_I3000=m +CONFIG_EDAC_I3200=m +CONFIG_EDAC_IE31200=m +CONFIG_EDAC_X38=m +CONFIG_EDAC_I5400=m +CONFIG_EDAC_I7CORE=m +CONFIG_EDAC_I5100=m +CONFIG_EDAC_I7300=m +CONFIG_EDAC_SBRIDGE=m +CONFIG_EDAC_SKX=m +CONFIG_EDAC_I10NM=m +CONFIG_EDAC_PND2=m +# CONFIG_EDAC_IGEN6 is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_MC146818_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_SYSTOHC is not set +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABEOZ9 is not set +# CONFIG_RTC_DRV_ABX80X is not set +CONFIG_RTC_DRV_DS1307=m +# CONFIG_RTC_DRV_DS1307_CENTURY is not set +CONFIG_RTC_DRV_DS1374=m +# CONFIG_RTC_DRV_DS1374_WDT is not set +CONFIG_RTC_DRV_DS1672=m +CONFIG_RTC_DRV_MAX6900=m +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF85363 is not set +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +# CONFIG_RTC_DRV_S35390A is not set +CONFIG_RTC_DRV_FM3130=m +# CONFIG_RTC_DRV_RX8010 is not set +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +# CONFIG_RTC_DRV_RV3028 is not set +# CONFIG_RTC_DRV_RV3032 is not set +# CONFIG_RTC_DRV_RV8803 is not set +# CONFIG_RTC_DRV_SD3078 is not set + +# +# SPI RTC drivers +# +CONFIG_RTC_I2C_AND_SPI=m + +# +# SPI and I2C RTC drivers +# +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_DS3232_HWMON=y +# CONFIG_RTC_DRV_PCF2127 is not set +CONFIG_RTC_DRV_RV3029C2=m +# CONFIG_RTC_DRV_RV3029_HWMON is not set +# CONFIG_RTC_DRV_RX6110 is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_CMOS=y +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_STK17TA8=m +# CONFIG_RTC_DRV_M48T86 is not set +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_RP5C01=m + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_FTRTC010 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set +# CONFIG_RTC_DRV_GOLDFISH is not set +CONFIG_DMADEVICES=y +CONFIG_DMADEVICES_DEBUG=y +CONFIG_DMADEVICES_VDEBUG=y + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_VIRTUAL_CHANNELS=y +CONFIG_DMA_ACPI=y +# CONFIG_ALTERA_MSGDMA is not set +CONFIG_INTEL_IDMA64=m +CONFIG_INTEL_IDXD_BUS=m +CONFIG_INTEL_IDXD=m +# CONFIG_INTEL_IDXD_COMPAT is not set +CONFIG_INTEL_IDXD_SVM=y +# CONFIG_INTEL_IDXD_PERFMON is not set +CONFIG_INTEL_IOATDMA=m +# CONFIG_PLX_DMA is not set +# CONFIG_XILINX_DMA is not set +# CONFIG_XILINX_XDMA is not set +CONFIG_AMD_PTDMA=y +# CONFIG_QCOM_HIDMA_MGMT is not set +# CONFIG_QCOM_HIDMA is not set +CONFIG_DW_DMAC_CORE=y +CONFIG_DW_DMAC=m +CONFIG_DW_DMAC_PCI=y +# CONFIG_DW_EDMA is not set +CONFIG_HSU_DMA=y +# CONFIG_SF_PDMA is not set +# CONFIG_INTEL_LDMA is not set + +# +# DMA Clients +# +CONFIG_ASYNC_TX_DMA=y +CONFIG_DMATEST=m +CONFIG_DMA_ENGINE_RAID=y + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +# CONFIG_UDMABUF is not set +# CONFIG_DMABUF_MOVE_NOTIFY is not set +CONFIG_DMABUF_DEBUG=y +# CONFIG_DMABUF_SELFTESTS is not set +# CONFIG_DMABUF_HEAPS is not set +# CONFIG_DMABUF_SYSFS_STATS is not set +# end of DMABUF options + +CONFIG_DCA=m +CONFIG_UIO=m +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=m +# CONFIG_UIO_DMEM_GENIRQ is not set +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +CONFIG_UIO_HV_GENERIC=m +CONFIG_VFIO=m +CONFIG_VFIO_GROUP=y +CONFIG_VFIO_CONTAINER=y +CONFIG_VFIO_IOMMU_TYPE1=m +CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_VIRQFD=y + +# +# VFIO support for PCI devices +# +CONFIG_VFIO_PCI_CORE=m +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y +CONFIG_VFIO_PCI=m +# CONFIG_VFIO_PCI_VGA is not set +# CONFIG_VFIO_PCI_IGD is not set +# CONFIG_MLX5_VFIO_PCI is not set +# end of VFIO support for PCI devices + +CONFIG_VFIO_MDEV=m +CONFIG_IRQ_BYPASS_MANAGER=m +CONFIG_VIRT_DRIVERS=y +CONFIG_VMGENID=y +# CONFIG_VBOXGUEST is not set +# CONFIG_NITRO_ENCLAVES is not set +CONFIG_EFI_SECRET=m +CONFIG_SEV_GUEST=m +CONFIG_TDX_GUEST_DRIVER=m +CONFIG_VIRTIO_ANCHOR=y +CONFIG_VIRTIO=y +CONFIG_VIRTIO_PCI_LIB=y +CONFIG_VIRTIO_PCI_LIB_LEGACY=y +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_PMEM=m +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_MEM=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=y +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y +CONFIG_VIRTIO_DMA_SHARED_BUFFER=m +# CONFIG_VDPA is not set +CONFIG_VHOST_IOTLB=m +CONFIG_VHOST_TASK=y +CONFIG_VHOST=m +CONFIG_VHOST_MENU=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# Microsoft Hyper-V guest support +# +CONFIG_HYPERV=m +# CONFIG_HYPERV_VTL_MODE is not set +CONFIG_HYPERV_TIMER=y +CONFIG_HYPERV_UTILS=m +CONFIG_HYPERV_BALLOON=m +# end of Microsoft Hyper-V guest support + +# +# Xen driver support +# +# CONFIG_XEN_BALLOON is not set +CONFIG_XEN_DEV_EVTCHN=m +# CONFIG_XEN_BACKEND is not set +CONFIG_XENFS=m +CONFIG_XEN_COMPAT_XENFS=y +CONFIG_XEN_SYS_HYPERVISOR=y +CONFIG_XEN_XENBUS_FRONTEND=y +# CONFIG_XEN_GNTDEV is not set +# CONFIG_XEN_GRANT_DEV_ALLOC is not set +# CONFIG_XEN_GRANT_DMA_ALLOC is not set +# CONFIG_XEN_PVCALLS_FRONTEND is not set +CONFIG_XEN_PRIVCMD=m +CONFIG_XEN_EFI=y +CONFIG_XEN_AUTO_XLATE=y +CONFIG_XEN_ACPI=y +# CONFIG_XEN_UNPOPULATED_ALLOC is not set +# CONFIG_XEN_VIRTIO is not set +# end of Xen driver support + +# CONFIG_GREYBUS is not set +# CONFIG_COMEDI is not set +# CONFIG_STAGING is not set +# CONFIG_CHROME_PLATFORMS is not set +CONFIG_MELLANOX_PLATFORM=y +CONFIG_MLXREG_HOTPLUG=m +# CONFIG_MLXREG_IO is not set +# CONFIG_MLXREG_LC is not set +# CONFIG_NVSW_SN2201 is not set +CONFIG_SURFACE_PLATFORMS=y +# CONFIG_SURFACE_3_POWER_OPREGION is not set +# CONFIG_SURFACE_GPE is not set +# CONFIG_SURFACE_HOTPLUG is not set +# CONFIG_SURFACE_PRO3_BUTTON is not set +CONFIG_X86_PLATFORM_DEVICES=y +CONFIG_ACPI_WMI=m +CONFIG_WMI_BMOF=m +# CONFIG_HUAWEI_WMI is not set +# CONFIG_UV_SYSFS is not set +CONFIG_MXM_WMI=m +# CONFIG_NVIDIA_WMI_EC_BACKLIGHT is not set +# CONFIG_XIAOMI_WMI is not set +# CONFIG_GIGABYTE_WMI is not set +# CONFIG_YOGABOOK is not set +CONFIG_ACERHDF=m +# CONFIG_ACER_WIRELESS is not set +CONFIG_ACER_WMI=m +# CONFIG_AMD_PMF is not set +# CONFIG_AMD_PMC is not set +# CONFIG_AMD_HSMP is not set +# CONFIG_ADV_SWBUTTON is not set +CONFIG_APPLE_GMUX=m +CONFIG_ASUS_LAPTOP=m +# CONFIG_ASUS_WIRELESS is not set +CONFIG_ASUS_WMI=m +CONFIG_ASUS_NB_WMI=m +# CONFIG_ASUS_TF103C_DOCK is not set +# CONFIG_MERAKI_MX100 is not set +CONFIG_EEEPC_LAPTOP=m +CONFIG_EEEPC_WMI=m +# CONFIG_X86_PLATFORM_DRIVERS_DELL is not set +CONFIG_AMILO_RFKILL=m +CONFIG_FUJITSU_LAPTOP=m +CONFIG_FUJITSU_TABLET=m +# CONFIG_GPD_POCKET_FAN is not set +# CONFIG_X86_PLATFORM_DRIVERS_HP is not set +# CONFIG_WIRELESS_HOTKEY is not set +# CONFIG_IBM_RTL is not set +CONFIG_IDEAPAD_LAPTOP=m +# CONFIG_LENOVO_YMC is not set +CONFIG_SENSORS_HDAPS=m +CONFIG_THINKPAD_ACPI=m +# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set +# CONFIG_THINKPAD_ACPI_DEBUG is not set +# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set +CONFIG_THINKPAD_ACPI_VIDEO=y +CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y +# CONFIG_THINKPAD_LMI is not set +# CONFIG_INTEL_ATOMISP2_PM is not set +# CONFIG_INTEL_IFS is not set +# CONFIG_INTEL_SAR_INT1092 is not set +CONFIG_INTEL_PMC_CORE=m + +# +# Intel Speed Select Technology interface support +# +CONFIG_INTEL_SPEED_SELECT_INTERFACE=m +# end of Intel Speed Select Technology interface support + +CONFIG_INTEL_WMI=y +# CONFIG_INTEL_WMI_SBL_FW_UPDATE is not set +CONFIG_INTEL_WMI_THUNDERBOLT=m + +# +# Intel Uncore Frequency Control +# +# CONFIG_INTEL_UNCORE_FREQ_CONTROL is not set +# end of Intel Uncore Frequency Control + +CONFIG_INTEL_HID_EVENT=m +CONFIG_INTEL_VBTN=m +# CONFIG_INTEL_INT0002_VGPIO is not set +CONFIG_INTEL_OAKTRAIL=m +# CONFIG_INTEL_ISHTP_ECLITE is not set +# CONFIG_INTEL_PUNIT_IPC is not set +CONFIG_INTEL_RST=m +# CONFIG_INTEL_SMARTCONNECT is not set +CONFIG_INTEL_TURBO_MAX_3=y +# CONFIG_INTEL_VSEC is not set +# CONFIG_MSI_EC is not set +CONFIG_MSI_LAPTOP=m +CONFIG_MSI_WMI=m +# CONFIG_PCENGINES_APU2 is not set +# CONFIG_BARCO_P50_GPIO is not set +CONFIG_SAMSUNG_LAPTOP=m +CONFIG_SAMSUNG_Q10=m +# CONFIG_ACPI_TOSHIBA is not set +CONFIG_TOSHIBA_BT_RFKILL=m +# CONFIG_TOSHIBA_HAPS is not set +# CONFIG_TOSHIBA_WMI is not set +CONFIG_ACPI_CMPC=m +CONFIG_COMPAL_LAPTOP=m +# CONFIG_LG_LAPTOP is not set +CONFIG_PANASONIC_LAPTOP=m +CONFIG_SONY_LAPTOP=m +CONFIG_SONYPI_COMPAT=y +# CONFIG_SYSTEM76_ACPI is not set +CONFIG_TOPSTAR_LAPTOP=m +CONFIG_MLX_PLATFORM=m +CONFIG_INTEL_IPS=m +# CONFIG_INTEL_SCU_PCI is not set +# CONFIG_INTEL_SCU_PLATFORM is not set +# CONFIG_SIEMENS_SIMATIC_IPC is not set +# CONFIG_WINMATE_FM07_KEYS is not set +# CONFIG_SEL3350_PLATFORM is not set +CONFIG_P2SB=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y +# CONFIG_COMMON_CLK_MAX9485 is not set +# CONFIG_COMMON_CLK_SI5341 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_COMMON_CLK_PWM is not set +# CONFIG_XILINX_VCU is not set +CONFIG_HWSPINLOCK=y + +# +# Clock Source drivers +# +CONFIG_CLKEVT_I8253=y +CONFIG_I8253_LOCK=y +CONFIG_CLKBLD_I8253=y +# end of Clock Source drivers + +CONFIG_MAILBOX=y +CONFIG_PCC=y +# CONFIG_ALTERA_MBOX is not set +CONFIG_IOMMU_IOVA=y +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +CONFIG_IOMMU_IO_PGTABLE=y +# end of Generic IOMMU Pagetable Support + +CONFIG_IOMMU_DEBUGFS=y +# CONFIG_IOMMU_DEFAULT_DMA_STRICT is not set +# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set +CONFIG_IOMMU_DEFAULT_PASSTHROUGH=y +CONFIG_IOMMU_DMA=y +CONFIG_IOMMU_SVA=y +CONFIG_AMD_IOMMU=y +CONFIG_AMD_IOMMU_V2=m +# CONFIG_AMD_IOMMU_DEBUGFS is not set +CONFIG_DMAR_TABLE=y +CONFIG_DMAR_PERF=y +CONFIG_DMAR_DEBUG=y +CONFIG_INTEL_IOMMU=y +CONFIG_INTEL_IOMMU_DEBUGFS=y +CONFIG_INTEL_IOMMU_SVM=y +# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set +CONFIG_INTEL_IOMMU_FLOPPY_WA=y +CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON=y +CONFIG_INTEL_IOMMU_PERF_EVENTS=y +# CONFIG_IOMMUFD is not set +CONFIG_IRQ_REMAP=y +CONFIG_HYPERV_IOMMU=y +# CONFIG_VIRTIO_IOMMU is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set +# CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Broadcom SoC drivers +# +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# end of NXP/Freescale QorIQ SoC drivers + +# +# fujitsu SoC drivers +# +# end of fujitsu SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Enable LiteX SoC Builder specific drivers +# +# end of Enable LiteX SoC Builder specific drivers + +# CONFIG_WPCM450_SOC is not set + +# +# Qualcomm SoC drivers +# +# end of Qualcomm SoC drivers + +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + +# CONFIG_PM_DEVFREQ is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +CONFIG_IIO=m +CONFIG_IIO_BUFFER=y +# CONFIG_IIO_BUFFER_CB is not set +# CONFIG_IIO_BUFFER_DMA is not set +# CONFIG_IIO_BUFFER_DMAENGINE is not set +# CONFIG_IIO_BUFFER_HW_CONSUMER is not set +CONFIG_IIO_KFIFO_BUF=m +CONFIG_IIO_TRIGGERED_BUFFER=m +# CONFIG_IIO_CONFIGFS is not set +CONFIG_IIO_TRIGGER=y +CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 +# CONFIG_IIO_SW_DEVICE is not set +# CONFIG_IIO_SW_TRIGGER is not set +# CONFIG_IIO_TRIGGERED_EVENT is not set + +# +# Accelerometers +# +# CONFIG_ADXL313_I2C is not set +# CONFIG_ADXL345_I2C is not set +# CONFIG_ADXL355_I2C is not set +# CONFIG_ADXL367_I2C is not set +# CONFIG_ADXL372_I2C is not set +# CONFIG_BMA180 is not set +# CONFIG_BMA400 is not set +# CONFIG_BMC150_ACCEL is not set +# CONFIG_DA280 is not set +# CONFIG_DA311 is not set +# CONFIG_DMARD06 is not set +# CONFIG_DMARD09 is not set +# CONFIG_DMARD10 is not set +# CONFIG_FXLS8962AF_I2C is not set +CONFIG_HID_SENSOR_ACCEL_3D=m +# CONFIG_IIO_ST_ACCEL_3AXIS is not set +# CONFIG_IIO_KX022A_I2C is not set +# CONFIG_KXSD9 is not set +# CONFIG_KXCJK1013 is not set +# CONFIG_MC3230 is not set +# CONFIG_MMA7455_I2C is not set +# CONFIG_MMA7660 is not set +# CONFIG_MMA8452 is not set +# CONFIG_MMA9551 is not set +# CONFIG_MMA9553 is not set +# CONFIG_MSA311 is not set +# CONFIG_MXC4005 is not set +# CONFIG_MXC6255 is not set +# CONFIG_STK8312 is not set +# CONFIG_STK8BA50 is not set +# end of Accelerometers + +# +# Analog to digital converters +# +# CONFIG_AD7091R5 is not set +# CONFIG_AD7291 is not set +# CONFIG_AD7606_IFACE_PARALLEL is not set +# CONFIG_AD799X is not set +# CONFIG_ENVELOPE_DETECTOR is not set +# CONFIG_HX711 is not set +# CONFIG_INA2XX_ADC is not set +# CONFIG_LTC2471 is not set +# CONFIG_LTC2485 is not set +# CONFIG_LTC2497 is not set +# CONFIG_MAX1363 is not set +# CONFIG_MAX9611 is not set +# CONFIG_MCP3422 is not set +# CONFIG_NAU7802 is not set +# CONFIG_RICHTEK_RTQ6056 is not set +# CONFIG_SD_ADC_MODULATOR is not set +# CONFIG_TI_ADC081C is not set +# CONFIG_TI_ADS1015 is not set +# CONFIG_TI_ADS7924 is not set +# CONFIG_TI_ADS1100 is not set +# CONFIG_VF610_ADC is not set +# CONFIG_VIPERBOARD_ADC is not set +# CONFIG_XILINX_XADC is not set +# end of Analog to digital converters + +# +# Analog to digital and digital to analog converters +# +# end of Analog to digital and digital to analog converters + +# +# Analog Front Ends +# +# CONFIG_IIO_RESCALE is not set +# end of Analog Front Ends + +# +# Amplifiers +# +# CONFIG_HMC425 is not set +# end of Amplifiers + +# +# Capacitance to digital converters +# +# CONFIG_AD7150 is not set +# CONFIG_AD7746 is not set +# end of Capacitance to digital converters + +# +# Chemical Sensors +# +# CONFIG_ATLAS_PH_SENSOR is not set +# CONFIG_ATLAS_EZO_SENSOR is not set +# CONFIG_BME680 is not set +# CONFIG_CCS811 is not set +# CONFIG_IAQCORE is not set +# CONFIG_SCD30_CORE is not set +# CONFIG_SCD4X is not set +# CONFIG_SENSIRION_SGP30 is not set +# CONFIG_SENSIRION_SGP40 is not set +# CONFIG_SPS30_I2C is not set +# CONFIG_SENSEAIR_SUNRISE_CO2 is not set +# CONFIG_VZ89X is not set +# end of Chemical Sensors + +# +# Hid Sensor IIO Common +# +CONFIG_HID_SENSOR_IIO_COMMON=m +CONFIG_HID_SENSOR_IIO_TRIGGER=m +# end of Hid Sensor IIO Common + +# +# IIO SCMI Sensors +# +# end of IIO SCMI Sensors + +# +# SSP Sensor Common +# +# end of SSP Sensor Common + +# +# Digital to analog converters +# +# CONFIG_AD5064 is not set +# CONFIG_AD5380 is not set +# CONFIG_AD5446 is not set +# CONFIG_AD5593R is not set +# CONFIG_AD5696_I2C is not set +# CONFIG_DPOT_DAC is not set +# CONFIG_DS4424 is not set +# CONFIG_M62332 is not set +# CONFIG_MAX517 is not set +# CONFIG_MAX5821 is not set +# CONFIG_MCP4725 is not set +# CONFIG_MCP4728 is not set +# CONFIG_TI_DAC5571 is not set +# CONFIG_VF610_DAC is not set +# end of Digital to analog converters + +# +# IIO dummy driver +# +# end of IIO dummy driver + +# +# Filters +# +# end of Filters + +# +# Frequency Synthesizers DDS/PLL +# + +# +# Clock Generator/Distribution +# +# end of Clock Generator/Distribution + +# +# Phase-Locked Loop (PLL) frequency synthesizers +# +# end of Phase-Locked Loop (PLL) frequency synthesizers +# end of Frequency Synthesizers DDS/PLL + +# +# Digital gyroscope sensors +# +# CONFIG_BMG160 is not set +# CONFIG_FXAS21002C is not set +CONFIG_HID_SENSOR_GYRO_3D=m +# CONFIG_MPU3050_I2C is not set +# CONFIG_IIO_ST_GYRO_3AXIS is not set +# CONFIG_ITG3200 is not set +# end of Digital gyroscope sensors + +# +# Health Sensors +# + +# +# Heart Rate Monitors +# +# CONFIG_AFE4404 is not set +# CONFIG_MAX30100 is not set +# CONFIG_MAX30102 is not set +# end of Heart Rate Monitors +# end of Health Sensors + +# +# Humidity sensors +# +# CONFIG_AM2315 is not set +# CONFIG_DHT11 is not set +# CONFIG_HDC100X is not set +# CONFIG_HDC2010 is not set +CONFIG_HID_SENSOR_HUMIDITY=m +# CONFIG_HTS221 is not set +# CONFIG_HTU21 is not set +# CONFIG_SI7005 is not set +# CONFIG_SI7020 is not set +# end of Humidity sensors + +# +# Inertial measurement units +# +# CONFIG_BMI160_I2C is not set +# CONFIG_BOSCH_BNO055_I2C is not set +# CONFIG_FXOS8700_I2C is not set +# CONFIG_KMX61 is not set +# CONFIG_INV_ICM42600_I2C is not set +# CONFIG_INV_MPU6050_I2C is not set +# CONFIG_IIO_ST_LSM6DSX is not set +# CONFIG_IIO_ST_LSM9DS0 is not set +# end of Inertial measurement units + +# +# Light sensors +# +# CONFIG_ACPI_ALS is not set +# CONFIG_ADJD_S311 is not set +# CONFIG_ADUX1020 is not set +# CONFIG_AL3010 is not set +# CONFIG_AL3320A is not set +# CONFIG_APDS9300 is not set +# CONFIG_APDS9960 is not set +# CONFIG_AS73211 is not set +# CONFIG_BH1750 is not set +# CONFIG_BH1780 is not set +# CONFIG_CM32181 is not set +# CONFIG_CM3232 is not set +# CONFIG_CM3323 is not set +# CONFIG_CM3605 is not set +# CONFIG_CM36651 is not set +# CONFIG_GP2AP002 is not set +# CONFIG_GP2AP020A00F is not set +# CONFIG_SENSORS_ISL29018 is not set +# CONFIG_SENSORS_ISL29028 is not set +# CONFIG_ISL29125 is not set +CONFIG_HID_SENSOR_ALS=m +CONFIG_HID_SENSOR_PROX=m +# CONFIG_JSA1212 is not set +# CONFIG_ROHM_BU27008 is not set +# CONFIG_ROHM_BU27034 is not set +# CONFIG_RPR0521 is not set +# CONFIG_LTR501 is not set +# CONFIG_LTRF216A is not set +# CONFIG_LV0104CS is not set +# CONFIG_MAX44000 is not set +# CONFIG_MAX44009 is not set +# CONFIG_NOA1305 is not set +# CONFIG_OPT3001 is not set +# CONFIG_OPT4001 is not set +# CONFIG_PA12203001 is not set +# CONFIG_SI1133 is not set +# CONFIG_SI1145 is not set +# CONFIG_STK3310 is not set +# CONFIG_ST_UVIS25 is not set +# CONFIG_TCS3414 is not set +# CONFIG_TCS3472 is not set +# CONFIG_SENSORS_TSL2563 is not set +# CONFIG_TSL2583 is not set +# CONFIG_TSL2591 is not set +# CONFIG_TSL2772 is not set +# CONFIG_TSL4531 is not set +# CONFIG_US5182D is not set +# CONFIG_VCNL4000 is not set +# CONFIG_VCNL4035 is not set +# CONFIG_VEML6030 is not set +# CONFIG_VEML6070 is not set +# CONFIG_VL6180 is not set +# CONFIG_ZOPT2201 is not set +# end of Light sensors + +# +# Magnetometer sensors +# +# CONFIG_AK8974 is not set +# CONFIG_AK8975 is not set +# CONFIG_AK09911 is not set +# CONFIG_BMC150_MAGN_I2C is not set +# CONFIG_MAG3110 is not set +CONFIG_HID_SENSOR_MAGNETOMETER_3D=m +# CONFIG_MMC35240 is not set +# CONFIG_IIO_ST_MAGN_3AXIS is not set +# CONFIG_SENSORS_HMC5843_I2C is not set +# CONFIG_SENSORS_RM3100_I2C is not set +# CONFIG_TI_TMAG5273 is not set +# CONFIG_YAMAHA_YAS530 is not set +# end of Magnetometer sensors + +# +# Multiplexers +# +# CONFIG_IIO_MUX is not set +# end of Multiplexers + +# +# Inclinometer sensors +# +CONFIG_HID_SENSOR_INCLINOMETER_3D=m +CONFIG_HID_SENSOR_DEVICE_ROTATION=m +# end of Inclinometer sensors + +# +# Triggers - standalone +# +# CONFIG_IIO_INTERRUPT_TRIGGER is not set +# CONFIG_IIO_SYSFS_TRIGGER is not set +# end of Triggers - standalone + +# +# Linear and angular position sensors +# +# CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE is not set +# end of Linear and angular position sensors + +# +# Digital potentiometers +# +# CONFIG_AD5110 is not set +# CONFIG_AD5272 is not set +# CONFIG_DS1803 is not set +# CONFIG_MAX5432 is not set +# CONFIG_MCP4018 is not set +# CONFIG_MCP4531 is not set +# CONFIG_TPL0102 is not set +# end of Digital potentiometers + +# +# Digital potentiostats +# +# CONFIG_LMP91000 is not set +# end of Digital potentiostats + +# +# Pressure sensors +# +# CONFIG_ABP060MG is not set +# CONFIG_BMP280 is not set +# CONFIG_DLHL60D is not set +# CONFIG_DPS310 is not set +CONFIG_HID_SENSOR_PRESS=m +# CONFIG_HP03 is not set +# CONFIG_ICP10100 is not set +# CONFIG_MPL115_I2C is not set +# CONFIG_MPL3115 is not set +# CONFIG_MPRLS0025PA is not set +# CONFIG_MS5611 is not set +# CONFIG_MS5637 is not set +# CONFIG_IIO_ST_PRESS is not set +# CONFIG_T5403 is not set +# CONFIG_HP206C is not set +# CONFIG_ZPA2326 is not set +# end of Pressure sensors + +# +# Lightning sensors +# +# end of Lightning sensors + +# +# Proximity and distance sensors +# +# CONFIG_IRSD200 is not set +# CONFIG_ISL29501 is not set +# CONFIG_LIDAR_LITE_V2 is not set +# CONFIG_MB1232 is not set +# CONFIG_PING is not set +# CONFIG_RFD77402 is not set +# CONFIG_SRF04 is not set +# CONFIG_SX9310 is not set +# CONFIG_SX9324 is not set +# CONFIG_SX9360 is not set +# CONFIG_SX9500 is not set +# CONFIG_SRF08 is not set +# CONFIG_VCNL3020 is not set +# CONFIG_VL53L0X_I2C is not set +# end of Proximity and distance sensors + +# +# Resolver to digital converters +# +# end of Resolver to digital converters + +# +# Temperature sensors +# +CONFIG_HID_SENSOR_TEMP=m +# CONFIG_MLX90614 is not set +# CONFIG_MLX90632 is not set +# CONFIG_TMP006 is not set +# CONFIG_TMP007 is not set +# CONFIG_TMP117 is not set +# CONFIG_TSYS01 is not set +# CONFIG_TSYS02D is not set +# CONFIG_MAX30208 is not set +# end of Temperature sensors + +CONFIG_NTB=m +# CONFIG_NTB_MSI is not set +# CONFIG_NTB_AMD is not set +# CONFIG_NTB_IDT is not set +# CONFIG_NTB_INTEL is not set +# CONFIG_NTB_EPF is not set +# CONFIG_NTB_SWITCHTEC is not set +# CONFIG_NTB_PINGPONG is not set +# CONFIG_NTB_TOOL is not set +# CONFIG_NTB_PERF is not set +# CONFIG_NTB_TRANSPORT is not set +CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +# CONFIG_PWM_DEBUG is not set +# CONFIG_PWM_CLK is not set +# CONFIG_PWM_DWC is not set +CONFIG_PWM_LPSS=m +CONFIG_PWM_LPSS_PCI=m +CONFIG_PWM_LPSS_PLATFORM=m +# CONFIG_PWM_PCA9685 is not set + +# +# IRQ chip support +# +# end of IRQ chip support + +# CONFIG_IPACK_BUS is not set +# CONFIG_RESET_CONTROLLER is not set + +# +# PHY Subsystem +# +# CONFIG_GENERIC_PHY is not set +# CONFIG_USB_LGM_PHY is not set +# CONFIG_PHY_CAN_TRANSCEIVER is not set + +# +# PHY drivers for Broadcom platforms +# +# CONFIG_BCM_KONA_USB2_PHY is not set +# end of PHY drivers for Broadcom platforms + +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_CPCAP_USB is not set +# CONFIG_PHY_INTEL_LGM_EMMC is not set +# end of PHY Subsystem + +CONFIG_POWERCAP=y +CONFIG_INTEL_RAPL_CORE=m +CONFIG_INTEL_RAPL=m +CONFIG_IDLE_INJECT=y +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# end of Performance monitor support + +CONFIG_RAS=y +CONFIG_RAS_CEC=y +# CONFIG_RAS_CEC_DEBUG is not set +# CONFIG_USB4 is not set + +# +# Android +# +# CONFIG_ANDROID_BINDER_IPC is not set +# end of Android + +CONFIG_LIBNVDIMM=y +CONFIG_BLK_DEV_PMEM=y +CONFIG_ND_CLAIM=y +CONFIG_ND_BTT=y +CONFIG_BTT=y +CONFIG_ND_PFN=y +CONFIG_NVDIMM_PFN=y +CONFIG_NVDIMM_DAX=y +CONFIG_NVDIMM_KEYS=y +# CONFIG_NVDIMM_SECURITY_TEST is not set +CONFIG_DAX=y +CONFIG_DEV_DAX=y +CONFIG_DEV_DAX_PMEM=y +CONFIG_DEV_DAX_HMEM=y +CONFIG_DEV_DAX_HMEM_DEVICES=y +CONFIG_DEV_DAX_KMEM=y +CONFIG_NVMEM=y +CONFIG_NVMEM_SYSFS=y + +# +# Layout Types +# +# CONFIG_NVMEM_LAYOUT_SL28_VPD is not set +# CONFIG_NVMEM_LAYOUT_ONIE_TLV is not set +# end of Layout Types + +# CONFIG_NVMEM_RMEM is not set + +# +# HW tracing support +# +CONFIG_STM=m +CONFIG_STM_PROTO_BASIC=m +CONFIG_STM_PROTO_SYS_T=m +CONFIG_STM_DUMMY=m +CONFIG_STM_SOURCE_CONSOLE=m +CONFIG_STM_SOURCE_HEARTBEAT=m +CONFIG_STM_SOURCE_FTRACE=m +CONFIG_INTEL_TH=m +CONFIG_INTEL_TH_PCI=m +CONFIG_INTEL_TH_ACPI=m +CONFIG_INTEL_TH_GTH=m +CONFIG_INTEL_TH_STH=m +CONFIG_INTEL_TH_MSU=m +CONFIG_INTEL_TH_PTI=m +# CONFIG_INTEL_TH_DEBUG is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +CONFIG_TEE=m +CONFIG_AMDTEE=m +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# CONFIG_MOST is not set +# CONFIG_PECI is not set +# CONFIG_HTE is not set +# end of Device Drivers + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +# CONFIG_VALIDATE_FS_PARSER is not set +CONFIG_FS_IOMAP=y +CONFIG_BUFFER_HEAD=y +CONFIG_LEGACY_DIRECT_IO=y +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_DEBUG=y +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_XFS_FS=m +CONFIG_XFS_SUPPORT_V4=y +CONFIG_XFS_SUPPORT_ASCII_CI=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_ONLINE_SCRUB is not set +CONFIG_XFS_WARN=y +# CONFIG_XFS_DEBUG is not set +CONFIG_GFS2_FS=m +CONFIG_GFS2_FS_LOCKING_DLM=y +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +# CONFIG_ZONEFS_FS is not set +CONFIG_FS_DAX=y +CONFIG_FS_DAX_PMD=y +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +# CONFIG_FS_VERITY is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QUOTA_DEBUG=y +CONFIG_QUOTA_TREE=y +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_VIRTIO_FS=m +CONFIG_FUSE_DAX=y +CONFIG_OVERLAY_FS=m +CONFIG_OVERLAY_FS_REDIRECT_DIR=y +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y +CONFIG_OVERLAY_FS_INDEX=y +# CONFIG_OVERLAY_FS_NFS_EXPORT is not set +# CONFIG_OVERLAY_FS_XINO_AUTO is not set +# CONFIG_OVERLAY_FS_METACOPY is not set +# CONFIG_OVERLAY_FS_DEBUG is not set + +# +# Caches +# +CONFIG_NETFS_SUPPORT=y +CONFIG_NETFS_STATS=y +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_DEBUG is not set +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_ERROR_INJECTION is not set +CONFIG_CACHEFILES_ONDEMAND=y +# end of Caches + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/EXFAT/NT Filesystems +# +CONFIG_FAT_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +# CONFIG_FAT_DEFAULT_UTF8 is not set +# CONFIG_EXFAT_FS is not set +# CONFIG_NTFS_FS is not set +# CONFIG_NTFS3_FS is not set +# end of DOS/FAT/EXFAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +CONFIG_PROC_VMCORE_DEVICE_DUMP=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_PROC_PID_ARCH_STATUS=y +CONFIG_PROC_CPU_RESCTRL=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +# CONFIG_TMPFS_INODE64 is not set +# CONFIG_TMPFS_QUOTA is not set +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP=y +CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +# end of Pseudo filesystems + +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_UBIFS_FS is not set +CONFIG_CRAMFS=m +CONFIG_CRAMFS_BLOCKDEV=y +# CONFIG_CRAMFS_MTD is not set +CONFIG_SQUASHFS=m +# CONFIG_SQUASHFS_FILE_CACHE is not set +CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set +CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +# CONFIG_SQUASHFS_LZ4 is not set +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_CONSOLE=y +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +CONFIG_PSTORE_RAM=y +# CONFIG_PSTORE_BLK is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_EROFS_FS=m +# CONFIG_EROFS_FS_DEBUG is not set +CONFIG_EROFS_FS_XATTR=y +CONFIG_EROFS_FS_POSIX_ACL=y +CONFIG_EROFS_FS_SECURITY=y +CONFIG_EROFS_FS_ZIP=y +CONFIG_EROFS_FS_ZIP_LZMA=y +# CONFIG_EROFS_FS_ZIP_DEFLATE is not set +CONFIG_EROFS_FS_ONDEMAND=y +# CONFIG_EROFS_FS_PCPU_KTHREAD is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y +CONFIG_NFS_DISABLE_UDP_SUPPORT=y +# CONFIG_NFS_V4_2_READ_PLUS is not set +CONFIG_NFSD=m +# CONFIG_NFSD_V2 is not set +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_PNFS=y +# CONFIG_NFSD_BLOCKLAYOUT is not set +CONFIG_NFSD_SCSILAYOUT=y +# CONFIG_NFSD_FLEXFILELAYOUT is not set +# CONFIG_NFSD_V4_2_INTER_SSC is not set +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_COMMON=y +CONFIG_NFS_V4_2_SSC_HELPER=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=m +CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1=y +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA is not set +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 is not set +CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_CEPH_FS=m +# CONFIG_CEPH_FSCACHE is not set +CONFIG_CEPH_FS_POSIX_ACL=y +# CONFIG_CEPH_FS_SECURITY_LABEL is not set +CONFIG_CIFS=y +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_DEBUG=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set +CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_SWN_UPCALL is not set +# CONFIG_SMB_SERVER is not set +CONFIG_SMBFS=y +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=m +CONFIG_NLS_UCS2_UTILS=y +CONFIG_DLM=m +CONFIG_DLM_DEBUG=y +# CONFIG_UNICODE is not set +CONFIG_IO_WQ=y +# end of File systems + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_KEYS_REQUEST_CACHE is not set +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_TRUSTED_KEYS=y +CONFIG_TRUSTED_KEYS_TPM=y +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_USER_DECRYPTED_DATA is not set +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y +CONFIG_INTEL_TXT=y +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HARDENED_USERCOPY=y +CONFIG_FORTIFY_SOURCE=y +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9 +CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256 +# CONFIG_SECURITY_SELINUX_DEBUG is not set +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +CONFIG_SECURITY_YAMA=y +# CONFIG_SECURITY_SAFESETID is not set +# CONFIG_SECURITY_LOCKDOWN_LSM is not set +# CONFIG_SECURITY_LANDLOCK is not set +CONFIG_INTEGRITY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_TRUSTED_KEYRING=y +CONFIG_INTEGRITY_PLATFORM_KEYRING=y +# CONFIG_INTEGRITY_MACHINE_KEYRING is not set +CONFIG_LOAD_UEFI_KEYS=y +CONFIG_INTEGRITY_AUDIT=y +CONFIG_IMA=y +# CONFIG_IMA_KEXEC is not set +CONFIG_IMA_MEASURE_PCR_IDX=10 +CONFIG_IMA_LSM_RULES=y +# CONFIG_IMA_NG_TEMPLATE is not set +CONFIG_IMA_SIG_TEMPLATE=y +CONFIG_IMA_DEFAULT_TEMPLATE="ima-sig" +# CONFIG_IMA_DEFAULT_HASH_SHA1 is not set +CONFIG_IMA_DEFAULT_HASH_SHA256=y +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set +# CONFIG_IMA_DEFAULT_HASH_SM3 is not set +CONFIG_IMA_DEFAULT_HASH="sha256" +CONFIG_IMA_WRITE_POLICY=y +CONFIG_IMA_READ_POLICY=y +CONFIG_IMA_APPRAISE=y +# CONFIG_IMA_ARCH_POLICY is not set +CONFIG_IMA_APPRAISE_BUILD_POLICY=y +# CONFIG_IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_MODULE_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_POLICY_SIGS is not set +CONFIG_IMA_APPRAISE_BOOTPARAM=y +# CONFIG_IMA_APPRAISE_MODSIG is not set +CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY=y +CONFIG_IMA_BLACKLIST_KEYRING=y +CONFIG_IMA_LOAD_X509=y +CONFIG_IMA_X509_PATH="/etc/keys/x509_ima.der" +# CONFIG_IMA_APPRAISE_SIGNED_INIT is not set +CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y +CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y +# CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT is not set +# CONFIG_IMA_DISABLE_HTABLE is not set +CONFIG_EVM=y +CONFIG_EVM_ATTR_FSUUID=y +# CONFIG_EVM_ADD_XATTRS is not set +CONFIG_EVM_LOAD_X509=y +CONFIG_EVM_X509_PATH="/etc/keys/x509_evm.der" +CONFIG_DEFAULT_SECURITY_SELINUX=y +# CONFIG_DEFAULT_SECURITY_DAC is not set +CONFIG_LSM="integrity,selinux,smack,tomoyo,apparmor" + +# +# Kernel hardening options +# + +# +# Memory initialization +# +CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y +CONFIG_INIT_STACK_NONE=y +# CONFIG_INIT_STACK_ALL_PATTERN is not set +# CONFIG_INIT_STACK_ALL_ZERO is not set +# CONFIG_GCC_PLUGIN_STACKLEAK is not set +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y +# CONFIG_ZERO_CALL_USED_REGS is not set +# end of Memory initialization + +# +# Hardening of kernel data structures +# +CONFIG_LIST_HARDENED=y +CONFIG_BUG_ON_DATA_CORRUPTION=y +# end of Hardening of kernel data structures + +CONFIG_CC_HAS_RANDSTRUCT=y +CONFIG_RANDSTRUCT_NONE=y +# CONFIG_RANDSTRUCT_FULL is not set +# CONFIG_RANDSTRUCT_PERFORMANCE is not set +# end of Kernel hardening options +# end of Security options + +CONFIG_XOR_BLOCKS=m +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_FIPS_NAME="Linux Kernel Cryptographic API" +# CONFIG_CRYPTO_FIPS_CUSTOM_VERSION is not set +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_SIG2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=m +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=m +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +# CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is not set +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=y +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_SIMD=y +# end of Crypto core or helper + +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=m +# CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set +CONFIG_CRYPTO_ECC=m +CONFIG_CRYPTO_ECDH=m +# CONFIG_CRYPTO_ECDSA is not set +# CONFIG_CRYPTO_ECRDSA is not set +CONFIG_CRYPTO_SM2=y +# CONFIG_CRYPTO_CURVE25519 is not set +# end of Public-key cryptography + +# +# Block ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_ANUBIS=m +# CONFIG_CRYPTO_ARIA is not set +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=y +CONFIG_CRYPTO_SM4_GENERIC=y +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m +# end of Block ciphers + +# +# Length-preserving ciphers and modes +# +# CONFIG_CRYPTO_ADIANTUM is not set +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_HCTR2 is not set +# CONFIG_CRYPTO_KEYWRAP is not set +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=y +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=y +# end of Length-preserving ciphers and modes + +# +# AEAD (authenticated encryption with associated data) ciphers +# +# CONFIG_CRYPTO_AEGIS128 is not set +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CCM=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_GENIV=m +CONFIG_CRYPTO_SEQIV=m +CONFIG_CRYPTO_ECHAINIV=m +CONFIG_CRYPTO_ESSIV=m +# end of AEAD (authenticated encryption with associated data) ciphers + +# +# Hashes, digests, and MACs +# +CONFIG_CRYPTO_BLAKE2B=m +CONFIG_CRYPTO_CMAC=y +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_POLY1305=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_SHA3=y +CONFIG_CRYPTO_SM3=y +CONFIG_CRYPTO_SM3_GENERIC=y +# CONFIG_CRYPTO_STREEBOG is not set +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_XXHASH=m +# end of Hashes, digests, and MACs + +# +# CRCs (cyclic redundancy checks) +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_CRC64_ROCKSOFT=m +# end of CRCs (cyclic redundancy checks) + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_842 is not set +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ZSTD=m +# end of Compression + +# +# Random number generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +# CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE is not set +# end of Random number generation + +# +# Userspace interface +# +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y +# CONFIG_CRYPTO_STATS is not set +# end of Userspace interface + +CONFIG_CRYPTO_HASH_INFO=y + +# +# Accelerated Cryptographic Algorithms for CPU (x86) +# +CONFIG_CRYPTO_CURVE25519_X86=m +CONFIG_CRYPTO_AES_NI_INTEL=y +CONFIG_CRYPTO_BLOWFISH_X86_64=m +CONFIG_CRYPTO_CAMELLIA_X86_64=m +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=m +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m +CONFIG_CRYPTO_CAST5_AVX_X86_64=m +CONFIG_CRYPTO_CAST6_AVX_X86_64=m +CONFIG_CRYPTO_DES3_EDE_X86_64=m +CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m +CONFIG_CRYPTO_SERPENT_AVX_X86_64=m +CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m +CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64=y +CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64=y +CONFIG_CRYPTO_TWOFISH_X86_64=m +CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m +CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m +# CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64 is not set +# CONFIG_CRYPTO_ARIA_AESNI_AVX2_X86_64 is not set +# CONFIG_CRYPTO_ARIA_GFNI_AVX512_X86_64 is not set +CONFIG_CRYPTO_CHACHA20_X86_64=m +# CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 is not set +# CONFIG_CRYPTO_NHPOLY1305_SSE2 is not set +# CONFIG_CRYPTO_NHPOLY1305_AVX2 is not set +CONFIG_CRYPTO_BLAKE2S_X86=y +# CONFIG_CRYPTO_POLYVAL_CLMUL_NI is not set +CONFIG_CRYPTO_POLY1305_X86_64=m +CONFIG_CRYPTO_SHA1_SSSE3=y +CONFIG_CRYPTO_SHA256_SSSE3=y +CONFIG_CRYPTO_SHA512_SSSE3=y +CONFIG_CRYPTO_SM3_AVX_X86_64=y +CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m +CONFIG_CRYPTO_CRC32C_INTEL=m +CONFIG_CRYPTO_CRC32_PCLMUL=m +CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m +# end of Accelerated Cryptographic Algorithms for CPU (x86) + +CONFIG_CRYPTO_HW=y +CONFIG_CRYPTO_DEV_PADLOCK=m +CONFIG_CRYPTO_DEV_PADLOCK_AES=m +CONFIG_CRYPTO_DEV_PADLOCK_SHA=m +# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set +# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set +CONFIG_CRYPTO_DEV_CCP=y +CONFIG_CRYPTO_DEV_CCP_DD=m +CONFIG_CRYPTO_DEV_SP_CCP=y +CONFIG_CRYPTO_DEV_CCP_CRYPTO=m +CONFIG_CRYPTO_DEV_SP_PSP=y +# CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set +CONFIG_CRYPTO_DEV_NITROX=m +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m +CONFIG_CRYPTO_DEV_QAT=m +CONFIG_CRYPTO_DEV_QAT_DH895xCC=m +CONFIG_CRYPTO_DEV_QAT_C3XXX=m +CONFIG_CRYPTO_DEV_QAT_C62X=m +# CONFIG_CRYPTO_DEV_QAT_4XXX is not set +CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m +CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m +CONFIG_CRYPTO_DEV_QAT_C62XVF=m +CONFIG_CRYPTO_DEV_CHELSIO=m +# CONFIG_CRYPTO_DEV_VIRTIO is not set +# CONFIG_CRYPTO_DEV_SAFEXCEL is not set +# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set +CONFIG_SIGNED_PE_FILE_VERIFICATION=y +# CONFIG_FIPS_SIGNATURE_SELFTEST is not set + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_MODULE_SIG_KEY_TYPE_RSA=y +# CONFIG_MODULE_SIG_KEY_TYPE_ECDSA is not set +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +CONFIG_SYSTEM_EXTRA_CERTIFICATE=y +CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE=8192 +CONFIG_SECONDARY_TRUSTED_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" +# CONFIG_SYSTEM_REVOCATION_LIST is not set +# CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE is not set +# end of Certificates for signature checking + +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_RAID6_PQ_BENCHMARK=y +# CONFIG_PACKING is not set +CONFIG_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_CORDIC=m +# CONFIG_PRIME_NUMBERS is not set +CONFIG_RATIONAL=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +CONFIG_ARCH_USE_SYM_ANNOTATIONS=y + +# +# Crypto library routines +# +CONFIG_CRYPTO_LIB_UTILS=y +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_ARC4=m +CONFIG_CRYPTO_LIB_GF128MUL=y +CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S=y +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y +CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA=m +CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m +CONFIG_CRYPTO_LIB_CHACHA=m +CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519=m +CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m +CONFIG_CRYPTO_LIB_CURVE25519=m +CONFIG_CRYPTO_LIB_DES=m +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=11 +CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m +CONFIG_CRYPTO_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m +CONFIG_CRYPTO_LIB_SHA1=y +CONFIG_CRYPTO_LIB_SHA256=y +# end of Crypto library routines + +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC64_ROCKSOFT=m +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC64=m +# CONFIG_CRC4 is not set +CONFIG_CRC7=m +CONFIG_LIBCRC32C=m +CONFIG_CRC8=m +CONFIG_XXHASH=y +CONFIG_RANDOM32_SELFTEST=y +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=m +CONFIG_LZ4HC_COMPRESS=m +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMMON=y +CONFIG_ZSTD_COMPRESS=m +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_MICROLZMA=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_DECOMPRESS_ZSTD=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_DMA_OPS=y +CONFIG_NEED_SG_DMA_FLAGS=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED=y +CONFIG_SWIOTLB=y +# CONFIG_SWIOTLB_DYNAMIC is not set +CONFIG_DMA_COHERENT_POOL=y +CONFIG_DMA_CMA=y +# CONFIG_DMA_NUMA_CMA is not set + +# +# Default contiguous memory area size: +# +CONFIG_CMA_SIZE_MBYTES=0 +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_ALIGNMENT=8 +CONFIG_DMA_API_DEBUG=y +CONFIG_DMA_API_DEBUG_SG=y +# CONFIG_DMA_MAP_BENCHMARK is not set +CONFIG_SGL_ALLOC=y +CONFIG_CHECK_SIGNATURE=y +CONFIG_CPUMASK_OFFSTACK=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_CLZ_TAB=y +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_SIGNATURE=y +CONFIG_DIMLIB=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_HAVE_GENERIC_VDSO=y +CONFIG_GENERIC_GETTIMEOFDAY=y +CONFIG_GENERIC_VDSO_TIME_NS=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_PMEM_API=y +CONFIG_MEMREGION=y +CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION=y +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_ARCH_HAS_COPY_MC=y +CONFIG_ARCH_STACKWALK=y +CONFIG_STACKDEPOT=y +CONFIG_STACKDEPOT_ALWAYS_INIT=y +CONFIG_SBITMAP=y +CONFIG_PARMAN=m +CONFIG_OBJAGG=m +# end of Library routines + +CONFIG_PLDMFW=y +CONFIG_ASN1_ENCODER=y + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +# CONFIG_PRINTK_CALLER is not set +# CONFIG_STACKTRACE_BUILD_ID is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DYNAMIC_DEBUG_CORE=y +CONFIG_SYMBOLIC_ERRNAME=y +CONFIG_DEBUG_BUGVERBOSE=y +# end of printk and dmesg options + +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +CONFIG_AS_HAS_NON_CONST_LEB128=y +# CONFIG_DEBUG_INFO_NONE is not set +# CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT is not set +CONFIG_DEBUG_INFO_DWARF4=y +# CONFIG_DEBUG_INFO_DWARF5 is not set +# CONFIG_DEBUG_INFO_REDUCED is not set +CONFIG_DEBUG_INFO_COMPRESSED_NONE=y +# CONFIG_DEBUG_INFO_COMPRESSED_ZLIB is not set +# CONFIG_DEBUG_INFO_COMPRESSED_ZSTD is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +CONFIG_DEBUG_INFO_BTF=y +# CONFIG_GDB_SCRIPTS is not set +CONFIG_FRAME_WARN=2048 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +# CONFIG_HEADERS_INSTALL is not set +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_OBJTOOL=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +# +# Generic Kernel Debugging Instruments +# +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_FS_ALLOW_ALL=y +# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set +# CONFIG_DEBUG_FS_ALLOW_NONE is not set +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_KGDB=y +CONFIG_KGDB_HONOUR_BLOCKLIST=y +CONFIG_KGDB_SERIAL_CONSOLE=y +CONFIG_KGDB_TESTS=y +# CONFIG_KGDB_TESTS_ON_BOOT is not set +CONFIG_KGDB_LOW_LEVEL_TRAP=y +CONFIG_KGDB_KDB=y +CONFIG_KDB_DEFAULT_ENABLE=0x1 +CONFIG_KDB_KEYBOARD=y +CONFIG_KDB_CONTINUE_CATASTROPHIC=0 +CONFIG_ARCH_HAS_EARLY_DEBUG=y +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +CONFIG_UBSAN=y +# CONFIG_UBSAN_TRAP is not set +CONFIG_CC_HAS_UBSAN_BOUNDS_STRICT=y +CONFIG_CC_HAS_UBSAN_ARRAY_BOUNDS=y +CONFIG_UBSAN_BOUNDS=y +CONFIG_UBSAN_BOUNDS_STRICT=y +CONFIG_UBSAN_ARRAY_BOUNDS=y +CONFIG_UBSAN_SHIFT=y +# CONFIG_UBSAN_DIV_ZERO is not set +CONFIG_UBSAN_BOOL=y +CONFIG_UBSAN_ENUM=y +# CONFIG_UBSAN_ALIGNMENT is not set +CONFIG_UBSAN_SANITIZE_ALL=y +# CONFIG_TEST_UBSAN is not set +CONFIG_HAVE_ARCH_KCSAN=y +CONFIG_HAVE_KCSAN_COMPILER=y +# end of Generic Kernel Debugging Instruments + +# +# Networking Debugging +# +# CONFIG_NET_DEV_REFCNT_TRACKER is not set +# CONFIG_NET_NS_REFCNT_TRACKER is not set +# CONFIG_DEBUG_NET is not set +# end of Networking Debugging + +# +# Memory Debugging +# +CONFIG_PAGE_EXTENSION=y +CONFIG_DEBUG_PAGEALLOC=y +# CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT is not set +CONFIG_SLUB_DEBUG=y +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_PAGE_OWNER is not set +# CONFIG_PAGE_TABLE_CHECK is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_DEBUG_PAGE_REF=y +# CONFIG_DEBUG_RODATA_TEST is not set +CONFIG_ARCH_HAS_DEBUG_WX=y +# CONFIG_DEBUG_WX is not set +CONFIG_GENERIC_PTDUMP=y +# CONFIG_PTDUMP_DEBUGFS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE=16000 +CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y +CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN=y +# CONFIG_PER_VMA_LOCK_STATS is not set +CONFIG_DEBUG_OBJECTS=y +# CONFIG_DEBUG_OBJECTS_SELFTEST is not set +CONFIG_DEBUG_OBJECTS_FREE=y +CONFIG_DEBUG_OBJECTS_TIMERS=y +CONFIG_DEBUG_OBJECTS_WORK=y +CONFIG_DEBUG_OBJECTS_RCU_HEAD=y +CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y +CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1 +# CONFIG_SHRINKER_DEBUG is not set +CONFIG_DEBUG_STACK_USAGE=y +# CONFIG_SCHED_STACK_END_CHECK is not set +CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y +CONFIG_DEBUG_VM_IRQSOFF=y +CONFIG_DEBUG_VM=y +# CONFIG_DEBUG_VM_MAPLE_TREE is not set +# CONFIG_DEBUG_VM_RB is not set +# CONFIG_DEBUG_VM_PGFLAGS is not set +CONFIG_DEBUG_VM_PGTABLE=y +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_DEBUG_PER_CPU_MAPS=y +CONFIG_ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP=y +# CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_HAVE_ARCH_KASAN_VMALLOC=y +CONFIG_CC_HAS_KASAN_GENERIC=y +CONFIG_CC_HAS_KASAN_SW_TAGS=y +CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y +CONFIG_KASAN=y +CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX=y +CONFIG_KASAN_GENERIC=y +# CONFIG_KASAN_OUTLINE is not set +CONFIG_KASAN_INLINE=y +CONFIG_KASAN_STACK=y +CONFIG_KASAN_VMALLOC=y +# CONFIG_KASAN_MODULE_TEST is not set +CONFIG_HAVE_ARCH_KFENCE=y +CONFIG_KFENCE=y +CONFIG_KFENCE_SAMPLE_INTERVAL=100 +CONFIG_KFENCE_NUM_OBJECTS=255 +# CONFIG_KFENCE_DEFERRABLE is not set +CONFIG_KFENCE_STRESS_TEST_FAULTS=0 +CONFIG_HAVE_ARCH_KMSAN=y +# end of Memory Debugging + +CONFIG_DEBUG_SHIRQ=y + +# +# Debug Oops, Lockups and Hangs +# +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=1 +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y +CONFIG_HARDLOCKUP_DETECTOR=y +# CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY is not set +CONFIG_HARDLOCKUP_DETECTOR_PERF=y +# CONFIG_HARDLOCKUP_DETECTOR_BUDDY is not set +# CONFIG_HARDLOCKUP_DETECTOR_ARCH is not set +CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER=y +CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_WQ_CPU_INTENSIVE_REPORT is not set +# CONFIG_TEST_LOCKUP is not set +# end of Debug Oops, Lockups and Hangs + +# +# Scheduler Debugging +# +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +# end of Scheduler Debugging + +# CONFIG_DEBUG_TIMEKEEPING is not set +# CONFIG_DEBUG_PREEMPT is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +CONFIG_PROVE_LOCKING=y +# CONFIG_PROVE_RAW_LOCK_NESTING is not set +CONFIG_LOCK_STAT=y +CONFIG_DEBUG_RT_MUTEXES=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y +CONFIG_DEBUG_RWSEMS=y +CONFIG_DEBUG_LOCK_ALLOC=y +CONFIG_LOCKDEP=y +CONFIG_LOCKDEP_BITS=15 +CONFIG_LOCKDEP_CHAINS_BITS=16 +CONFIG_LOCKDEP_STACK_TRACE_BITS=19 +CONFIG_LOCKDEP_STACK_TRACE_HASH_BITS=14 +CONFIG_LOCKDEP_CIRCULAR_QUEUE_BITS=12 +# CONFIG_DEBUG_LOCKDEP is not set +CONFIG_DEBUG_ATOMIC_SLEEP=y +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_LOCK_TORTURE_TEST=m +# CONFIG_WW_MUTEX_SELFTEST is not set +# CONFIG_SCF_TORTURE_TEST is not set +# CONFIG_CSD_LOCK_WAIT_DEBUG is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +CONFIG_TRACE_IRQFLAGS=y +CONFIG_TRACE_IRQFLAGS_NMI=y +# CONFIG_NMI_CHECK_CPU is not set +# CONFIG_DEBUG_IRQFLAGS is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +# CONFIG_DEBUG_KOBJECT_RELEASE is not set + +# +# Debug kernel data structures +# +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PLIST is not set +CONFIG_DEBUG_SG=y +CONFIG_DEBUG_NOTIFIERS=y +# CONFIG_DEBUG_MAPLE_TREE is not set +# end of Debug kernel data structures + +CONFIG_DEBUG_CREDENTIALS=y + +# +# RCU Debugging +# +CONFIG_PROVE_RCU=y +CONFIG_TORTURE_TEST=m +# CONFIG_RCU_SCALE_TEST is not set +CONFIG_RCU_TORTURE_TEST=m +# CONFIG_RCU_REF_SCALE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 +# CONFIG_RCU_CPU_STALL_CPUTIME is not set +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +CONFIG_LATENCYTOP=y +# CONFIG_DEBUG_CGROUP_REF is not set +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_NOP_TRACER=y +CONFIG_HAVE_RETHOOK=y +CONFIG_RETHOOK=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y +CONFIG_HAVE_DYNAMIC_FTRACE_NO_PATCHABLE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_FENTRY=y +CONFIG_HAVE_OBJTOOL_MCOUNT=y +CONFIG_HAVE_OBJTOOL_NOP_MCOUNT=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_HAVE_BUILDTIME_MCOUNT_SORT=y +CONFIG_BUILDTIME_MCOUNT_SORT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_PREEMPTIRQ_TRACEPOINTS=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_BOOTTIME_TRACING is not set +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_FUNCTION_GRAPH_RETVAL is not set +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y +# CONFIG_FPROBE is not set +CONFIG_FUNCTION_PROFILER=y +CONFIG_STACK_TRACER=y +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set +CONFIG_SCHED_TRACER=y +CONFIG_HWLAT_TRACER=y +CONFIG_OSNOISE_TRACER=y +CONFIG_TIMERLAT_TRACER=y +CONFIG_MMIOTRACE=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_TRACER_SNAPSHOT=y +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_PROBE_EVENTS_BTF_ARGS=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_DYNAMIC_EVENTS=y +CONFIG_PROBE_EVENTS=y +# CONFIG_BPF_KPROBE_OVERRIDE is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +CONFIG_FTRACE_MCOUNT_USE_CC=y +CONFIG_TRACING_MAP=y +CONFIG_SYNTH_EVENTS=y +# CONFIG_USER_EVENTS is not set +CONFIG_HIST_TRIGGERS=y +# CONFIG_TRACE_EVENT_INJECT is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +CONFIG_RING_BUFFER_BENCHMARK=m +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_FTRACE_RECORD_RECURSION is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_FTRACE_SORT_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS is not set +# CONFIG_MMIOTRACE_TEST is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_SYNTH_EVENT_GEN_TEST is not set +# CONFIG_KPROBE_EVENT_GEN_TEST is not set +# CONFIG_HIST_TRIGGERS_DEBUG is not set +# CONFIG_RV is not set +CONFIG_PROVIDE_OHCI1394_DMA_INIT=y +# CONFIG_SAMPLES is not set +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +CONFIG_STRICT_DEVMEM=y +# CONFIG_IO_STRICT_DEVMEM is not set + +# +# x86 Debugging +# +CONFIG_EARLY_PRINTK_USB=y +# CONFIG_X86_VERBOSE_BOOTUP is not set +CONFIG_EARLY_PRINTK=y +CONFIG_EARLY_PRINTK_DBGP=y +CONFIG_EARLY_PRINTK_USB_XDBC=y +# CONFIG_EFI_PGT_DUMP is not set +# CONFIG_DEBUG_TLBFLUSH is not set +CONFIG_HAVE_MMIOTRACE_SUPPORT=y +CONFIG_X86_DECODER_SELFTEST=y +CONFIG_IO_DELAY_0X80=y +# CONFIG_IO_DELAY_0XED is not set +# CONFIG_IO_DELAY_UDELAY is not set +# CONFIG_IO_DELAY_NONE is not set +CONFIG_DEBUG_BOOT_PARAMS=y +# CONFIG_CPA_DEBUG is not set +# CONFIG_DEBUG_ENTRY is not set +# CONFIG_DEBUG_NMI_SELFTEST is not set +CONFIG_X86_DEBUG_FPU=y +# CONFIG_PUNIT_ATOM_DEBUG is not set +CONFIG_UNWINDER_ORC=y +# CONFIG_UNWINDER_FRAME_POINTER is not set +# end of x86 Debugging + +# +# Kernel Testing and Coverage +# +# CONFIG_KUNIT is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +CONFIG_FUNCTION_ERROR_INJECTION=y +CONFIG_FAULT_INJECTION=y +CONFIG_FAILSLAB=y +CONFIG_FAIL_PAGE_ALLOC=y +# CONFIG_FAULT_INJECTION_USERCOPY is not set +CONFIG_FAIL_MAKE_REQUEST=y +CONFIG_FAIL_IO_TIMEOUT=y +# CONFIG_FAIL_FUTEX is not set +CONFIG_FAULT_INJECTION_DEBUG_FS=y +# CONFIG_FAIL_FUNCTION is not set +CONFIG_FAIL_MMC_REQUEST=y +# CONFIG_FAIL_SUNRPC is not set +# CONFIG_FAULT_INJECTION_CONFIGFS is not set +# CONFIG_FAULT_INJECTION_STACKTRACE_FILTER is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_TEST_DHRY is not set +# CONFIG_LKDTM is not set +# CONFIG_TEST_MIN_HEAP is not set +# CONFIG_TEST_DIV64 is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_TEST_REF_TRACKER is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_REED_SOLOMON_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_ASYNC_RAID6_TEST=m +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_STRING_SELFTEST is not set +CONFIG_TEST_STRING_HELPERS=m +CONFIG_TEST_KSTRTOX=y +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_SCANF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_XARRAY is not set +# CONFIG_TEST_MAPLE_TREE is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_PARMAN is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_BITOPS is not set +# CONFIG_TEST_VMALLOC is not set +# CONFIG_TEST_USER_COPY is not set +CONFIG_TEST_BPF=m +# CONFIG_TEST_BLACKHOLE_DEV is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_DYNAMIC_DEBUG is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_MEMCAT_P is not set +CONFIG_TEST_LIVEPATCH=m +# CONFIG_TEST_OBJAGG is not set +# CONFIG_TEST_MEMINIT is not set +# CONFIG_TEST_HMM is not set +# CONFIG_TEST_FREE_PAGES is not set +# CONFIG_TEST_FPU is not set +# CONFIG_TEST_CLOCKSOURCE_WATCHDOG is not set +CONFIG_ARCH_USE_MEMTEST=y +# CONFIG_MEMTEST is not set +# CONFIG_HYPERV_TESTING is not set +# end of Kernel Testing and Coverage + +# +# Rust hacking +# +# end of Rust hacking +# end of Kernel hacking diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig new file mode 100644 index 000000000000..3c8b51687fb7 --- /dev/null +++ b/arch/x86/configs/anolis_defconfig @@ -0,0 +1,7710 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/x86 6.6.7 Kernel Configuration +# +CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=200000 +CONFIG_CLANG_VERSION=0 +CONFIG_AS_IS_GNU=y +CONFIG_AS_VERSION=25000 +CONFIG_LD_IS_BFD=y +CONFIG_LD_VERSION=25000 +CONFIG_LLD_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_CAN_LINK_STATIC=y +CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y +CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y +CONFIG_TOOLS_SUPPORT_RELR=y +CONFIG_CC_HAS_ASM_INLINE=y +CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y +CONFIG_PAHOLE_VERSION=117 +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_TABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +# CONFIG_WERROR is not set +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_HAVE_KERNEL_ZSTD=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +# CONFIG_KERNEL_LZ4 is not set +# CONFIG_KERNEL_ZSTD is not set +CONFIG_DEFAULT_INIT="" +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_SYSVIPC_COMPAT=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +# CONFIG_WATCH_QUEUE is not set +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_PENDING_IRQ=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_GENERIC_IRQ_INJECTION=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_IRQ_MSI_IOMMU=y +CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y +CONFIG_GENERIC_IRQ_RESERVATION_MODE=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +# end of IRQ subsystem + +CONFIG_CLOCKSOURCE_WATCHDOG=y +CONFIG_ARCH_CLOCKSOURCE_INIT=y +CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y +CONFIG_GENERIC_CMOS_UPDATE=y +CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y +CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y +CONFIG_CONTEXT_TRACKING=y +CONFIG_CONTEXT_TRACKING_IDLE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +# CONFIG_NO_HZ_IDLE is not set +CONFIG_NO_HZ_FULL=y +CONFIG_CONTEXT_TRACKING_USER=y +# CONFIG_CONTEXT_TRACKING_USER_FORCE is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US=125 +# end of Timers subsystem + +CONFIG_BPF=y +CONFIG_HAVE_EBPF_JIT=y +CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y + +# +# BPF subsystem +# +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_BPF_JIT_DEFAULT_ON=y +CONFIG_BPF_UNPRIV_DEFAULT_OFF=y +# CONFIG_BPF_PRELOAD is not set +CONFIG_BPF_LSM=y +# end of BPF subsystem + +CONFIG_PREEMPT_BUILD=y +CONFIG_PREEMPT_NONE=y +# CONFIG_PREEMPT_VOLUNTARY is not set +# CONFIG_PREEMPT is not set +CONFIG_PREEMPT_COUNT=y +CONFIG_PREEMPTION=y +CONFIG_PREEMPT_DYNAMIC=y +CONFIG_SCHED_CORE=y + +# +# CPU/Task time and stats accounting +# +CONFIG_VIRT_CPU_ACCOUNTING=y +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_SCHED_AVG_IRQ=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +CONFIG_PSI_DEFAULT_DISABLED=y +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +CONFIG_PREEMPT_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_TREE_SRCU=y +CONFIG_TASKS_RCU_GENERIC=y +CONFIG_TASKS_RCU=y +CONFIG_TASKS_RUDE_RCU=y +CONFIG_TASKS_TRACE_RCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +CONFIG_RCU_NOCB_CPU=y +# CONFIG_RCU_NOCB_CPU_DEFAULT_ALL is not set +# CONFIG_RCU_LAZY is not set +# end of RCU Subsystem + +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +# CONFIG_IKHEADERS is not set +CONFIG_LOG_BUF_SHIFT=21 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +# CONFIG_PRINTK_INDEX is not set +CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y + +# +# Scheduler features +# +# CONFIG_UCLAMP_TASK is not set +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y +CONFIG_CC_HAS_INT128=y +CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" +CONFIG_GCC11_NO_ARRAY_BOUNDS=y +CONFIG_CC_NO_ARRAY_BOUNDS=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +# CONFIG_CGROUP_FAVOR_DYNMODS is not set +CONFIG_MEMCG=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_SCHED_MM_CID=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_MISC is not set +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_TIME_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_RD_ZSTD=y +# CONFIG_BOOT_CONFIG is not set +CONFIG_INITRAMFS_PRESERVE_MTIME=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_LD_ORPHAN_WARN=y +CONFIG_LD_ORPHAN_WARN_LEVEL="warn" +CONFIG_SYSCTL=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_HAVE_PCSPKR_PLATFORM=y +# CONFIG_EXPERT is not set +CONFIG_UID16=y +CONFIG_MULTIUSER=y +CONFIG_SGETMASK_SYSCALL=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_PCSPKR_PLATFORM=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_SELFTEST is not set +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_KCMP=y +CONFIG_RSEQ=y +CONFIG_CACHESTAT_SYSCALL=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_GUEST_PERF_EVENTS=y + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# end of Kernel Performance Events And Counters + +CONFIG_SYSTEM_DATA_VERIFICATION=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y + +# +# Kexec and crash features +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_HAVE_IMA_KEXEC=y +CONFIG_KEXEC=y +CONFIG_KEXEC_FILE=y +CONFIG_KEXEC_SIG=y +# CONFIG_KEXEC_SIG_FORCE is not set +CONFIG_KEXEC_BZIMAGE_VERIFY_SIG=y +CONFIG_KEXEC_JUMP=y +CONFIG_CRASH_DUMP=y +CONFIG_CRASH_HOTPLUG=y +CONFIG_CRASH_MAX_MEMORY_RANGES=8192 +# end of Kexec and crash features +# end of General setup + +CONFIG_64BIT=y +CONFIG_X86_64=y +CONFIG_X86=y +CONFIG_INSTRUCTION_DECODER=y +CONFIG_OUTPUT_FORMAT="elf64-x86-64" +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_MMU=y +CONFIG_ARCH_MMAP_RND_BITS_MIN=28 +CONFIG_ARCH_MMAP_RND_BITS_MAX=32 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_GENERIC_ISA_DMA=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ARCH_HAS_CPU_RELAX=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_AUDIT_ARCH=y +CONFIG_HAVE_INTEL_TXT=y +CONFIG_X86_64_SMP=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_DYNAMIC_PHYSICAL_MASK=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_CC_HAS_SANE_STACKPROTECTOR=y + +# +# Processor type and features +# +CONFIG_SMP=y +CONFIG_X86_X2APIC=y +CONFIG_X86_MPPARSE=y +# CONFIG_GOLDFISH is not set +CONFIG_X86_CPU_RESCTRL=y +CONFIG_X86_EXTENDED_PLATFORM=y +# CONFIG_X86_NUMACHIP is not set +# CONFIG_X86_VSMP is not set +CONFIG_X86_UV=y +# CONFIG_X86_GOLDFISH is not set +# CONFIG_X86_INTEL_MID is not set +CONFIG_X86_INTEL_LPSS=y +CONFIG_X86_AMD_PLATFORM_DEVICE=y +CONFIG_IOSF_MBI=y +# CONFIG_IOSF_MBI_DEBUG is not set +CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y +CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_HYPERVISOR_GUEST=y +CONFIG_PARAVIRT=y +# CONFIG_PARAVIRT_DEBUG is not set +# CONFIG_PARAVIRT_SPINLOCKS is not set +CONFIG_X86_HV_CALLBACK_VECTOR=y +CONFIG_XEN=y +# CONFIG_XEN_PV is not set +CONFIG_XEN_PVHVM=y +CONFIG_XEN_PVHVM_SMP=y +CONFIG_XEN_PVHVM_GUEST=y +CONFIG_XEN_SAVE_RESTORE=y +# CONFIG_XEN_DEBUG_FS is not set +# CONFIG_XEN_PVH is not set +CONFIG_KVM_GUEST=y +CONFIG_ARCH_CPUIDLE_HALTPOLL=y +# CONFIG_PVH is not set +CONFIG_PARAVIRT_TIME_ACCOUNTING=y +CONFIG_PARAVIRT_CLOCK=y +# CONFIG_JAILHOUSE_GUEST is not set +# CONFIG_ACRN_GUEST is not set +CONFIG_INTEL_TDX_GUEST=y +# CONFIG_MK8 is not set +# CONFIG_MPSC is not set +# CONFIG_MCORE2 is not set +# CONFIG_MATOM is not set +CONFIG_GENERIC_CPU=y +CONFIG_X86_INTERNODE_CACHE_SHIFT=6 +CONFIG_X86_L1_CACHE_SHIFT=6 +CONFIG_X86_TSC=y +CONFIG_X86_CMPXCHG64=y +CONFIG_X86_CMOV=y +CONFIG_X86_MINIMUM_CPU_FAMILY=64 +CONFIG_X86_DEBUGCTLMSR=y +CONFIG_IA32_FEAT_CTL=y +CONFIG_X86_VMX_FEATURE_NAMES=y +CONFIG_CPU_SUP_INTEL=y +CONFIG_CPU_SUP_AMD=y +CONFIG_CPU_SUP_HYGON=y +CONFIG_CPU_SUP_CENTAUR=y +CONFIG_CPU_SUP_ZHAOXIN=y +CONFIG_HPET_TIMER=y +CONFIG_HPET_EMULATE_RTC=y +CONFIG_DMI=y +# CONFIG_GART_IOMMU is not set +CONFIG_BOOT_VESA_SUPPORT=y +# CONFIG_MAXSMP is not set +CONFIG_NR_CPUS_RANGE_BEGIN=2 +CONFIG_NR_CPUS_RANGE_END=512 +CONFIG_NR_CPUS_DEFAULT=64 +CONFIG_NR_CPUS=64 +CONFIG_SCHED_CLUSTER=y +CONFIG_SCHED_SMT=y +CONFIG_SCHED_MC=y +CONFIG_SCHED_MC_PRIO=y +CONFIG_X86_LOCAL_APIC=y +CONFIG_X86_IO_APIC=y +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y +CONFIG_X86_MCE=y +CONFIG_X86_MCELOG_LEGACY=y +CONFIG_X86_MCE_INTEL=y +CONFIG_X86_MCE_AMD=y +CONFIG_X86_MCE_THRESHOLD=y +CONFIG_X86_MCE_INJECT=m + +# +# Performance monitoring +# +CONFIG_PERF_EVENTS_INTEL_UNCORE=m +CONFIG_PERF_EVENTS_INTEL_RAPL=m +CONFIG_PERF_EVENTS_INTEL_CSTATE=m +CONFIG_PERF_EVENTS_AMD_POWER=m +CONFIG_PERF_EVENTS_AMD_UNCORE=y +CONFIG_PERF_EVENTS_AMD_BRS=y +# end of Performance monitoring + +CONFIG_X86_16BIT=y +CONFIG_X86_ESPFIX64=y +CONFIG_X86_VSYSCALL_EMULATION=y +CONFIG_X86_IOPL_IOPERM=y +CONFIG_MICROCODE=y +# CONFIG_MICROCODE_LATE_LOADING is not set +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +# CONFIG_X86_5LEVEL is not set +CONFIG_X86_DIRECT_GBPAGES=y +CONFIG_X86_CPA_STATISTICS=y +CONFIG_X86_MEM_ENCRYPT=y +CONFIG_AMD_MEM_ENCRYPT=y +# CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set +CONFIG_NUMA=y +CONFIG_AMD_NUMA=y +CONFIG_X86_64_ACPI_NUMA=y +CONFIG_NUMA_EMU=y +CONFIG_NODES_SHIFT=6 +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +# CONFIG_ARCH_MEMORY_PROBE is not set +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_X86_PMEM_LEGACY_DEVICE=y +CONFIG_X86_PMEM_LEGACY=m +CONFIG_X86_CHECK_BIOS_CORRUPTION=y +# CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK is not set +CONFIG_MTRR=y +CONFIG_MTRR_SANITIZER=y +CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1 +CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 +CONFIG_X86_PAT=y +CONFIG_ARCH_USES_PG_UNCACHED=y +CONFIG_X86_UMIP=y +CONFIG_CC_HAS_IBT=y +CONFIG_X86_CET=y +CONFIG_X86_KERNEL_IBT=y +CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y +# CONFIG_X86_INTEL_TSX_MODE_OFF is not set +# CONFIG_X86_INTEL_TSX_MODE_ON is not set +CONFIG_X86_INTEL_TSX_MODE_AUTO=y +CONFIG_X86_SGX=y +# CONFIG_X86_USER_SHADOW_STACK is not set +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_EFI_HANDOVER_PROTOCOL=y +CONFIG_EFI_MIXED=y +# CONFIG_EFI_FAKE_MEMMAP is not set +CONFIG_EFI_RUNTIME_MAP=y +# CONFIG_HZ_100 is not set +# CONFIG_HZ_250 is not set +# CONFIG_HZ_300 is not set +CONFIG_HZ_1000=y +CONFIG_HZ=1000 +CONFIG_SCHED_HRTICK=y +CONFIG_ARCH_SUPPORTS_KEXEC=y +CONFIG_ARCH_SUPPORTS_KEXEC_FILE=y +CONFIG_ARCH_SELECTS_KEXEC_FILE=y +CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY=y +CONFIG_ARCH_SUPPORTS_KEXEC_SIG=y +CONFIG_ARCH_SUPPORTS_KEXEC_SIG_FORCE=y +CONFIG_ARCH_SUPPORTS_KEXEC_BZIMAGE_VERIFY_SIG=y +CONFIG_ARCH_SUPPORTS_KEXEC_JUMP=y +CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y +CONFIG_ARCH_SUPPORTS_CRASH_HOTPLUG=y +CONFIG_PHYSICAL_START=0x1000000 +CONFIG_RELOCATABLE=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_X86_NEED_RELOCS=y +CONFIG_PHYSICAL_ALIGN=0x1000000 +CONFIG_DYNAMIC_MEMORY_LAYOUT=y +CONFIG_RANDOMIZE_MEMORY=y +CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0xa +# CONFIG_ADDRESS_MASKING is not set +CONFIG_HOTPLUG_CPU=y +# CONFIG_COMPAT_VDSO is not set +CONFIG_LEGACY_VSYSCALL_XONLY=y +# CONFIG_LEGACY_VSYSCALL_NONE is not set +# CONFIG_CMDLINE_BOOL is not set +CONFIG_MODIFY_LDT_SYSCALL=y +# CONFIG_STRICT_SIGALTSTACK_SIZE is not set +CONFIG_HAVE_LIVEPATCH=y +CONFIG_LIVEPATCH=y +# end of Processor type and features + +CONFIG_CC_HAS_SLS=y +CONFIG_CC_HAS_RETURN_THUNK=y +CONFIG_CC_HAS_ENTRY_PADDING=y +CONFIG_FUNCTION_PADDING_CFI=11 +CONFIG_FUNCTION_PADDING_BYTES=16 +CONFIG_CALL_PADDING=y +CONFIG_HAVE_CALL_THUNKS=y +CONFIG_CALL_THUNKS=y +CONFIG_PREFIX_SYMBOLS=y +CONFIG_SPECULATION_MITIGATIONS=y +CONFIG_PAGE_TABLE_ISOLATION=y +CONFIG_RETPOLINE=y +CONFIG_RETHUNK=y +CONFIG_CPU_UNRET_ENTRY=y +CONFIG_CALL_DEPTH_TRACKING=y +# CONFIG_CALL_THUNKS_DEBUG is not set +CONFIG_CPU_IBPB_ENTRY=y +CONFIG_CPU_IBRS_ENTRY=y +CONFIG_CPU_SRSO=y +# CONFIG_SLS is not set +# CONFIG_GDS_FORCE_MITIGATION is not set +CONFIG_ARCH_HAS_ADD_PAGES=y + +# +# Power management and ACPI options +# +CONFIG_ARCH_HIBERNATION_HEADER=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_HIBERNATION_SNAPSHOT_DEV=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_USERSPACE_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +CONFIG_PM_DEBUG=y +# CONFIG_PM_ADVANCED_DEBUG is not set +# CONFIG_PM_TEST_SUSPEND is not set +CONFIG_PM_SLEEP_DEBUG=y +# CONFIG_PM_TRACE_RTC is not set +CONFIG_PM_CLK=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +# CONFIG_ENERGY_MODEL is not set +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y +CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y +# CONFIG_ACPI_DEBUGGER is not set +CONFIG_ACPI_SPCR_TABLE=y +# CONFIG_ACPI_FPDT is not set +CONFIG_ACPI_LPIT=y +CONFIG_ACPI_SLEEP=y +CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y +CONFIG_ACPI_EC_DEBUGFS=m +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_VIDEO=m +CONFIG_ACPI_FAN=y +CONFIG_ACPI_TAD=m +CONFIG_ACPI_DOCK=y +CONFIG_ACPI_CPU_FREQ_PSS=y +CONFIG_ACPI_PROCESSOR_CSTATE=y +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_PROCESSOR_AGGREGATOR=m +CONFIG_ACPI_THERMAL=y +CONFIG_ACPI_PLATFORM_PROFILE=m +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_HOTPLUG_IOAPIC=y +CONFIG_ACPI_SBS=m +CONFIG_ACPI_HED=y +# CONFIG_ACPI_CUSTOM_METHOD is not set +CONFIG_ACPI_BGRT=y +CONFIG_ACPI_NFIT=m +# CONFIG_NFIT_SECURITY_DEBUG is not set +CONFIG_ACPI_NUMA=y +CONFIG_ACPI_HMAT=y +CONFIG_HAVE_ACPI_APEI=y +CONFIG_HAVE_ACPI_APEI_NMI=y +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +CONFIG_ACPI_APEI_EINJ=m +# CONFIG_ACPI_APEI_ERST_DEBUG is not set +# CONFIG_ACPI_DPTF is not set +CONFIG_ACPI_WATCHDOG=y +CONFIG_ACPI_EXTLOG=m +CONFIG_ACPI_ADXL=y +# CONFIG_ACPI_CONFIGFS is not set +# CONFIG_ACPI_PFRUT is not set +CONFIG_ACPI_PCC=y +# CONFIG_ACPI_FFH is not set +CONFIG_PMIC_OPREGION=y +CONFIG_ACPI_PRMT=y +CONFIG_X86_PM_TIMER=y + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y + +# +# CPU frequency scaling drivers +# +CONFIG_X86_INTEL_PSTATE=y +# CONFIG_X86_PCC_CPUFREQ is not set +CONFIG_X86_AMD_PSTATE=y +CONFIG_X86_AMD_PSTATE_DEFAULT_MODE=3 +# CONFIG_X86_AMD_PSTATE_UT is not set +CONFIG_X86_ACPI_CPUFREQ=m +CONFIG_X86_ACPI_CPUFREQ_CPB=y +CONFIG_X86_POWERNOW_K8=m +CONFIG_X86_AMD_FREQ_SENSITIVITY=m +# CONFIG_X86_SPEEDSTEP_CENTRINO is not set +CONFIG_X86_P4_CLOCKMOD=m + +# +# shared options +# +CONFIG_X86_SPEEDSTEP_LIB=m +# end of CPU Frequency scaling + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +# CONFIG_CPU_IDLE_GOV_LADDER is not set +CONFIG_CPU_IDLE_GOV_MENU=y +# CONFIG_CPU_IDLE_GOV_TEO is not set +CONFIG_CPU_IDLE_GOV_HALTPOLL=y +CONFIG_HALTPOLL_CPUIDLE=y +# end of CPU Idle + +CONFIG_INTEL_IDLE=y +# end of Power management and ACPI options + +# +# Bus options (PCI etc.) +# +CONFIG_PCI_DIRECT=y +CONFIG_PCI_MMCONFIG=y +CONFIG_PCI_XEN=y +CONFIG_MMCONF_FAM10H=y +CONFIG_ISA_DMA_API=y +CONFIG_AMD_NB=y +# end of Bus options (PCI etc.) + +# +# Binary Emulations +# +CONFIG_IA32_EMULATION=y +# CONFIG_X86_X32_ABI is not set +CONFIG_COMPAT_32=y +CONFIG_COMPAT=y +CONFIG_COMPAT_FOR_U64_ALIGNMENT=y +# end of Binary Emulations + +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_PFNCACHE=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_DIRTY_RING=y +CONFIG_HAVE_KVM_DIRTY_RING_TSO=y +CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_KVM_ASYNC_PF=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_KVM_COMPAT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_HAVE_KVM_NO_POLL=y +CONFIG_KVM_XFER_TO_GUEST_WORK=y +CONFIG_HAVE_KVM_PM_NOTIFIER=y +CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m +CONFIG_KVM_INTEL=m +CONFIG_X86_SGX_KVM=y +CONFIG_KVM_AMD=m +CONFIG_KVM_AMD_SEV=y +CONFIG_KVM_SMM=y +# CONFIG_KVM_XEN is not set +CONFIG_KVM_EXTERNAL_WRITE_TRACKING=y +CONFIG_AS_AVX512=y +CONFIG_AS_SHA1_NI=y +CONFIG_AS_SHA256_NI=y +CONFIG_AS_TPAUSE=y +CONFIG_AS_GFNI=y +CONFIG_AS_WRUSS=y + +# +# General architecture-dependent options +# +CONFIG_HOTPLUG_SMT=y +CONFIG_HOTPLUG_CORE_SYNC=y +CONFIG_HOTPLUG_CORE_SYNC_DEAD=y +CONFIG_HOTPLUG_CORE_SYNC_FULL=y +CONFIG_HOTPLUG_SPLIT_STARTUP=y +CONFIG_HOTPLUG_PARALLEL=y +CONFIG_GENERIC_ENTRY=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +# CONFIG_STATIC_CALL_SELFTEST is not set +CONFIG_OPTPROBES=y +CONFIG_KPROBES_ON_FTRACE=y +CONFIG_UPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_KRETPROBES=y +CONFIG_KRETPROBE_ON_RETHOOK=y +CONFIG_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_OPTPROBES=y +CONFIG_HAVE_KPROBES_ON_FTRACE=y +CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_ARCH_HAS_SET_DIRECT_MAP=y +CONFIG_ARCH_HAS_CPU_FINALIZE_INIT=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y +CONFIG_ARCH_WANTS_NO_INSTR=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_RUST=y +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y +CONFIG_HAVE_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_PERF_EVENTS_NMI=y +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_MMU_GATHER_TABLE_FREE=y +CONFIG_MMU_GATHER_RCU_TABLE_FREE=y +CONFIG_MMU_GATHER_MERGE_VMAS=y +CONFIG_MMU_LAZY_TLB_REFCOUNT=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y +CONFIG_HAVE_ARCH_SECCOMP=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP=y +CONFIG_SECCOMP_FILTER=y +# CONFIG_SECCOMP_CACHE_DEBUG is not set +CONFIG_HAVE_ARCH_STACKLEAK=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR=y +# CONFIG_STACKPROTECTOR_STRONG is not set +CONFIG_ARCH_SUPPORTS_LTO_CLANG=y +CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y +CONFIG_LTO_NONE=y +CONFIG_ARCH_SUPPORTS_CFI_CLANG=y +# CONFIG_CFI_CLANG is not set +CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y +CONFIG_HAVE_CONTEXT_TRACKING_USER=y +CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_MOVE_PUD=y +CONFIG_HAVE_MOVE_PMD=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_ARCH_HUGE_VMALLOC=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARCH_WANT_PMD_MKWRITE=y +CONFIG_HAVE_ARCH_SOFT_DIRTY=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK=y +CONFIG_SOFTIRQ_ON_OWN_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_HAVE_EXIT_THREAD=y +CONFIG_ARCH_MMAP_RND_BITS=28 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 +CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y +CONFIG_PAGE_SIZE_LESS_THAN_64KB=y +CONFIG_PAGE_SIZE_LESS_THAN_256KB=y +CONFIG_HAVE_OBJTOOL=y +CONFIG_HAVE_JUMP_LABEL_HACK=y +CONFIG_HAVE_NOINSTR_HACK=y +CONFIG_HAVE_NOINSTR_VALIDATION=y +CONFIG_HAVE_UACCESS_VALIDATION=y +CONFIG_HAVE_STACK_VALIDATION=y +CONFIG_HAVE_RELIABLE_STACKTRACE=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET=y +CONFIG_RANDOMIZE_KSTACK_OFFSET=y +# CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT is not set +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y +# CONFIG_LOCK_EVENT_COUNTS is not set +CONFIG_ARCH_HAS_MEM_ENCRYPT=y +CONFIG_ARCH_HAS_CC_PLATFORM=y +CONFIG_HAVE_STATIC_CALL=y +CONFIG_HAVE_STATIC_CALL_INLINE=y +CONFIG_HAVE_PREEMPT_DYNAMIC=y +CONFIG_HAVE_PREEMPT_DYNAMIC_CALL=y +CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK=y +CONFIG_ARCH_HAS_ELFCORE_COMPAT=y +CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH=y +CONFIG_DYNAMIC_SIGFRAME=y +CONFIG_HAVE_ARCH_NODE_DEV_GROUP=y +CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +# end of GCOV-based kernel profiling + +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_GCC_PLUGINS=y +# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set +CONFIG_FUNCTION_ALIGNMENT_4B=y +CONFIG_FUNCTION_ALIGNMENT_16B=y +CONFIG_FUNCTION_ALIGNMENT=16 +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULE_SIG_FORMAT=y +CONFIG_MODULES=y +# CONFIG_MODULE_DEBUG is not set +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set +CONFIG_MODVERSIONS=y +CONFIG_ASM_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +# CONFIG_MODULE_SIG_ALL is not set +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set +CONFIG_MODULE_SIG_SHA256=y +# CONFIG_MODULE_SIG_SHA384 is not set +# CONFIG_MODULE_SIG_SHA512 is not set +CONFIG_MODULE_SIG_HASH="sha256" +CONFIG_MODULE_COMPRESS_NONE=y +# CONFIG_MODULE_COMPRESS_GZIP is not set +# CONFIG_MODULE_COMPRESS_XZ is not set +# CONFIG_MODULE_COMPRESS_ZSTD is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +CONFIG_MODPROBE_PATH="/sbin/modprobe" +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLOCK_LEGACY_AUTOLOAD=y +CONFIG_BLK_RQ_ALLOC_TIME=y +CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_DEV_BSG_COMMON=y +CONFIG_BLK_ICQ=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_INTEGRITY_T10=m +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +# CONFIG_BLK_WBT is not set +CONFIG_BLK_CGROUP_IOLATENCY=y +# CONFIG_BLK_CGROUP_FC_APPID is not set +CONFIG_BLK_CGROUP_IOCOST=y +# CONFIG_BLK_CGROUP_IOPRIO is not set +CONFIG_BLK_DEBUG_FS=y +CONFIG_BLK_DEBUG_FS_ZONED=y +# CONFIG_BLK_SED_OPAL is not set +# CONFIG_BLK_INLINE_ENCRYPTION is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +CONFIG_OSF_PARTITION=y +CONFIG_AMIGA_PARTITION=y +# CONFIG_ATARI_PARTITION is not set +CONFIG_MAC_PARTITION=y +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +# CONFIG_LDM_PARTITION is not set +CONFIG_SGI_PARTITION=y +# CONFIG_ULTRIX_PARTITION is not set +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +# end of Partition Types + +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_PM=y +CONFIG_BLOCK_HOLDER_DEPRECATED=y +CONFIG_BLK_MQ_STACKING=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +# CONFIG_BFQ_CGROUP_DEBUG is not set +# end of IO Schedulers + +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_UNINLINE_SPIN_UNLOCK=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y +CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_ZPOOL=y +CONFIG_SWAP=y +CONFIG_ZSWAP=y +# CONFIG_ZSWAP_DEFAULT_ON is not set +# CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set +CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set +CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lzo" +CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y +# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set +# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set +CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud" +CONFIG_ZBUD=y +# CONFIG_Z3FOLD is not set +CONFIG_ZSMALLOC=y +CONFIG_ZSMALLOC_STAT=y +CONFIG_ZSMALLOC_CHAIN_SIZE=8 + +# +# SLAB allocator options +# +# CONFIG_SLAB_DEPRECATED is not set +CONFIG_SLUB=y +# CONFIG_SLAB_MERGE_DEFAULT is not set +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SLAB_FREELIST_HARDENED is not set +# CONFIG_SLUB_STATS is not set +CONFIG_SLUB_CPU_PARTIAL=y +# CONFIG_RANDOM_KMALLOC_CACHES is not set +# end of SLAB allocator options + +CONFIG_SHUFFLE_PAGE_ALLOCATOR=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SPARSEMEM=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP=y +CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP=y +CONFIG_HAVE_FAST_GUP=y +CONFIG_NUMA_KEEP_MEMINFO=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_EXCLUSIVE_SYSTEM_RAM=y +CONFIG_HAVE_BOOTMEM_INFO_NODE=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_MHP_MEMMAP_ON_MEMORY=y +CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 +CONFIG_PAGE_REPORTING=y +CONFIG_MIGRATION=y +CONFIG_DEVICE_MIGRATION=y +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y +CONFIG_CONTIG_ALLOC=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +CONFIG_MEMORY_FAILURE=y +CONFIG_HWPOISON_INJECT=m +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y +CONFIG_ARCH_WANTS_THP_SWAP=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_THP_SWAP=y +CONFIG_READ_ONLY_THP_FOR_FS=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +# CONFIG_CMA_SYSFS is not set +CONFIG_CMA_AREAS=19 +CONFIG_MEM_SOFT_DIRTY=y +CONFIG_GENERIC_EARLY_IOREMAP=y +CONFIG_DEFERRED_STRUCT_PAGE_INIT=y +CONFIG_PAGE_IDLE_FLAG=y +CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_HAS_CURRENT_STACK_POINTER=y +CONFIG_ARCH_HAS_PTE_DEVMAP=y +CONFIG_ZONE_DMA=y +CONFIG_ZONE_DMA32=y +CONFIG_ZONE_DEVICE=y +CONFIG_HMM_MIRROR=y +CONFIG_GET_FREE_REGION=y +CONFIG_DEVICE_PRIVATE=y +CONFIG_VMAP_PFN=y +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y +CONFIG_ARCH_HAS_PKEYS=y +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_TEST is not set +# CONFIG_DMAPOOL_TEST is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_MAPPING_DIRTY_HELPERS=y +CONFIG_MEMFD_CREATE=y +CONFIG_SECRETMEM=y +# CONFIG_ANON_VMA_NAME is not set +CONFIG_USERFAULTFD=y +CONFIG_HAVE_ARCH_USERFAULTFD_WP=y +CONFIG_HAVE_ARCH_USERFAULTFD_MINOR=y +CONFIG_PTE_MARKER_UFFD_WP=y +CONFIG_LRU_GEN=y +# CONFIG_LRU_GEN_ENABLED is not set +# CONFIG_LRU_GEN_STATS is not set +CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK=y +CONFIG_PER_VMA_LOCK=y +CONFIG_LOCK_MM_AND_FIND_VMA=y + +# +# Data Access Monitoring +# +CONFIG_DAMON=y +CONFIG_DAMON_VADDR=y +CONFIG_DAMON_PADDR=y +# CONFIG_DAMON_SYSFS is not set +CONFIG_DAMON_DBGFS=y +# CONFIG_DAMON_RECLAIM is not set +# CONFIG_DAMON_LRU_SORT is not set +# end of Data Access Monitoring +# end of Memory Management options + +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y +CONFIG_NET_XGRESS=y +CONFIG_NET_REDIRECT=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_AF_UNIX_OOB=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +# CONFIG_TLS_TOE is not set +CONFIG_XFRM=y +CONFIG_XFRM_OFFLOAD=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +# CONFIG_XFRM_USER_COMPAT is not set +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_AH=m +CONFIG_XFRM_ESP=m +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m +CONFIG_NET_HANDSHAKE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +# CONFIG_IP_PNP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +# CONFIG_NET_FOU is not set +# CONFIG_NET_FOU_IP_TUNNELS is not set +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +# CONFIG_INET_ESPINTCP is not set +CONFIG_INET_IPCOMP=m +CONFIG_INET_TABLE_PERTURB_ORDER=16 +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m +CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +# CONFIG_INET6_ESPINTCP is not set +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +# CONFIG_IPV6_ILA is not set +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +# CONFIG_IPV6_SEG6_LWTUNNEL is not set +# CONFIG_IPV6_SEG6_HMAC is not set +# CONFIG_IPV6_RPL_LWTUNNEL is not set +# CONFIG_IPV6_IOAM6_LWTUNNEL is not set +CONFIG_NETLABEL=y +CONFIG_MPTCP=y +CONFIG_INET_MPTCP_DIAG=m +CONFIG_MPTCP_IPV6=y +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_EGRESS=y +CONFIG_NETFILTER_SKIP_EGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_BPF_LINK=y +# CONFIG_NETFILTER_NETLINK_HOOK is not set +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_SYSLOG=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CONNTRACK_OVS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NF_NAT_OVS=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +# CONFIG_NFT_SYNPROXY is not set +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +# CONFIG_NFT_REJECT_NETDEV is not set +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +# CONFIG_NF_FLOW_TABLE_PROCFS is not set +CONFIG_NETFILTER_XTABLES=y +# CONFIG_NETFILTER_XTABLES_COMPAT is not set + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +# end of Core Netfilter Configuration + +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +# CONFIG_IP_VS_TWOS is not set + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +# CONFIG_IP6_NF_MATCH_SRH is not set +# CONFIG_IP6_NF_TARGET_HL is not set +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_TABLES_BRIDGE=m +# CONFIG_NFT_BRIDGE_META is not set +CONFIG_NFT_BRIDGE_REJECT=m +# CONFIG_NF_CONNTRACK_BRIDGE is not set +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +# CONFIG_BPFILTER is not set +# CONFIG_IP_DCCP is not set +CONFIG_IP_SCTP=m +# CONFIG_SCTP_DBG_OBJCNT is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_INET_SCTP_DIAG=m +# CONFIG_RDS is not set +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_TIPC_MEDIA_UDP=y +CONFIG_TIPC_CRYPTO=y +CONFIG_TIPC_DIAG=m +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +# CONFIG_ATM_CLIP_NO_ICMP is not set +CONFIG_ATM_LANE=m +# CONFIG_ATM_MPOA is not set +CONFIG_ATM_BR2684=m +# CONFIG_ATM_BR2684_IPFILTER is not set +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +# CONFIG_BRIDGE_MRP is not set +# CONFIG_BRIDGE_CFM is not set +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_LLC=m +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_DEBUGFS is not set +# CONFIG_6LOWPAN_NHC is not set +CONFIG_IEEE802154=m +# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set +CONFIG_IEEE802154_SOCKET=m +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +# CONFIG_NET_SCH_CBS is not set +# CONFIG_NET_SCH_ETF is not set +CONFIG_NET_SCH_MQPRIO_LIB=m +# CONFIG_NET_SCH_TAPRIO is not set +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +# CONFIG_NET_SCH_SKBPRIO is not set +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=y +# CONFIG_NET_SCH_CAKE is not set +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +# CONFIG_NET_SCH_FQ_PIE is not set +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +# CONFIG_NET_SCH_ETS is not set +CONFIG_NET_SCH_DEFAULT=y +# CONFIG_DEFAULT_FQ is not set +# CONFIG_DEFAULT_CODEL is not set +CONFIG_DEFAULT_FQ_CODEL=y +# CONFIG_DEFAULT_SFQ is not set +# CONFIG_DEFAULT_PFIFO_FAST is not set +CONFIG_DEFAULT_NET_SCH="fq_codel" + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +# CONFIG_NET_EMATCH_IPT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +# CONFIG_NET_ACT_MPLS is not set +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +# CONFIG_NET_ACT_CONNMARK is not set +# CONFIG_NET_ACT_CTINFO is not set +CONFIG_NET_ACT_SKBMOD=m +# CONFIG_NET_ACT_IFE is not set +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m +# CONFIG_NET_ACT_GATE is not set +CONFIG_NET_TC_SKB_EXT=y +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=m +# CONFIG_BATMAN_ADV is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VSOCKETS_LOOPBACK=m +CONFIG_VMWARE_VMCI_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_HYPERV_VSOCKETS=m +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=y +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=y +# CONFIG_HSR is not set +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_L3_MASTER_DEV=y +# CONFIG_QRTR is not set +# CONFIG_NET_NCSI is not set +CONFIG_PCPU_DEV_REFCNT=y +CONFIG_MAX_SKB_FRAGS=17 +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_SOCK_RX_QUEUE_MAPPING=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +CONFIG_NET_DROP_MONITOR=y +# end of Network testing +# end of Networking options + +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +CONFIG_BT=m +CONFIG_BT_BREDR=y +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_CMTP=m +CONFIG_BT_HIDP=m +CONFIG_BT_HS=y +CONFIG_BT_LE=y +CONFIG_BT_LE_L2CAP_ECRED=y +# CONFIG_BT_6LOWPAN is not set +# CONFIG_BT_LEDS is not set +# CONFIG_BT_MSFTEXT is not set +# CONFIG_BT_AOSPEXT is not set +CONFIG_BT_DEBUGFS=y +# CONFIG_BT_SELFTEST is not set + +# +# Bluetooth device drivers +# +CONFIG_BT_INTEL=m +CONFIG_BT_BCM=m +CONFIG_BT_RTL=m +CONFIG_BT_HCIBTUSB=m +CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y +CONFIG_BT_HCIBTUSB_POLL_SYNC=y +CONFIG_BT_HCIBTUSB_BCM=y +# CONFIG_BT_HCIBTUSB_MTK is not set +CONFIG_BT_HCIBTUSB_RTL=y +CONFIG_BT_HCIBTSDIO=m +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_H4=y +CONFIG_BT_HCIUART_BCSP=y +CONFIG_BT_HCIUART_ATH3K=y +# CONFIG_BT_HCIUART_INTEL is not set +# CONFIG_BT_HCIUART_AG6XX is not set +CONFIG_BT_HCIBCM203X=m +# CONFIG_BT_HCIBCM4377 is not set +CONFIG_BT_HCIBPA10X=m +CONFIG_BT_HCIBFUSB=m +CONFIG_BT_HCIVHCI=m +CONFIG_BT_MRVL=m +CONFIG_BT_MRVL_SDIO=m +CONFIG_BT_ATH3K=m +# CONFIG_BT_MTKSDIO is not set +# CONFIG_BT_VIRTIO is not set +# end of Bluetooth device drivers + +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=y +# CONFIG_MCTP is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +CONFIG_CFG80211_CRDA_SUPPORT=y +# CONFIG_CFG80211_WEXT is not set +CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +CONFIG_MAC80211_DEBUGFS=y +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +# CONFIG_RFKILL_GPIO is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +# CONFIG_NFC is not set +CONFIG_PSAMPLE=m +# CONFIG_NET_IFE is not set +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_SOCK_VALIDATE_XMIT=y +CONFIG_NET_SELFTESTS=y +CONFIG_NET_SOCK_MSG=y +CONFIG_NET_DEVLINK=y +CONFIG_PAGE_POOL=y +# CONFIG_PAGE_POOL_STATS is not set +CONFIG_FAILOVER=m +CONFIG_ETHTOOL_NETLINK=y + +# +# Device Drivers +# +CONFIG_HAVE_EISA=y +# CONFIG_EISA is not set +CONFIG_HAVE_PCI=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +CONFIG_PCIE_DPC=y +# CONFIG_PCIE_PTM is not set +CONFIG_PCIE_EDR=y +CONFIG_PCI_MSI=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=y +CONFIG_PCI_PF_STUB=y +CONFIG_PCI_ATS=y +CONFIG_PCI_LOCKLESS_CONFIG=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +# CONFIG_PCI_P2PDMA is not set +CONFIG_PCI_LABEL=y +CONFIG_PCI_HYPERV=m +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_ACPI_IBM=m +# CONFIG_HOTPLUG_PCI_CPCI is not set +CONFIG_HOTPLUG_PCI_SHPC=y + +# +# PCI controller drivers +# +CONFIG_VMD=y +CONFIG_PCI_HYPERV_INTERFACE=m + +# +# Cadence-based PCIe controllers +# +# end of Cadence-based PCIe controllers + +# +# DesignWare-based PCIe controllers +# +# CONFIG_PCI_MESON is not set +# CONFIG_PCIE_DW_PLAT_HOST is not set +# end of DesignWare-based PCIe controllers + +# +# Mobiveil-based PCIe controllers +# +# end of Mobiveil-based PCIe controllers +# end of PCI controller drivers + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# end of PCI switch controller drivers + +# CONFIG_CXL_BUS is not set +CONFIG_PCCARD=y +# CONFIG_PCMCIA is not set +CONFIG_CARDBUS=y + +# +# PC-card bridges +# +CONFIG_YENTA=m +CONFIG_YENTA_O2=y +CONFIG_YENTA_RICOH=y +CONFIG_YENTA_TI=y +CONFIG_YENTA_ENE_TUNE=y +CONFIG_YENTA_TOSHIBA=y +# CONFIG_RAPIDIO is not set + +# +# Generic Driver Options +# +CONFIG_AUXILIARY_BUS=y +# CONFIG_UEVENT_HELPER is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_DEVTMPFS_SAFE is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_FW_LOADER_DEBUG=y +CONFIG_FW_LOADER_PAGED_BUF=y +CONFIG_FW_LOADER_SYSFS=y +CONFIG_EXTRA_FIRMWARE="" +CONFIG_FW_LOADER_USER_HELPER=y +# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set +# CONFIG_FW_LOADER_COMPRESS is not set +CONFIG_FW_CACHE=y +# CONFIG_FW_UPLOAD is not set +# end of Firmware loader + +CONFIG_WANT_DEV_COREDUMP=y +CONFIG_ALLOW_DEV_COREDUMP=y +CONFIG_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +CONFIG_HMEM_REPORTING=y +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_SYS_HYPERVISOR=y +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=m +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set +# end of Generic Driver Options + +# +# Bus devices +# +# CONFIG_MHI_BUS is not set +# CONFIG_MHI_BUS_EP is not set +# end of Bus devices + +# +# Cache Drivers +# +# end of Cache Drivers + +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y + +# +# Firmware Drivers +# + +# +# ARM System Control and Management Interface Protocol +# +# end of ARM System Control and Management Interface Protocol + +CONFIG_EDD=m +# CONFIG_EDD_OFF is not set +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y +CONFIG_ISCSI_IBFT_FIND=y +CONFIG_ISCSI_IBFT=m +CONFIG_FW_CFG_SYSFS=y +# CONFIG_FW_CFG_SYSFS_CMDLINE is not set +CONFIG_SYSFB=y +# CONFIG_SYSFB_SIMPLEFB is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=y +CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y +CONFIG_EFI_SOFT_RESERVE=y +CONFIG_EFI_DXE_MEM_ATTRIBUTES=y +CONFIG_EFI_RUNTIME_WRAPPERS=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set +# CONFIG_EFI_CAPSULE_LOADER is not set +# CONFIG_EFI_TEST is not set +# CONFIG_APPLE_PROPERTIES is not set +# CONFIG_RESET_ATTACK_MITIGATION is not set +CONFIG_EFI_RCI2_TABLE=y +# CONFIG_EFI_DISABLE_PCI_DMA is not set +CONFIG_EFI_EARLYCON=y +CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y +# CONFIG_EFI_DISABLE_RUNTIME is not set +CONFIG_EFI_COCO_SECRET=y +CONFIG_UNACCEPTED_MEMORY=y +# end of EFI (Extensible Firmware Interface) Support + +CONFIG_UEFI_CPER=y +CONFIG_UEFI_CPER_X86=y + +# +# Tegra firmware driver +# +# end of Tegra firmware driver +# end of Firmware Drivers + +# CONFIG_GNSS is not set +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set + +# +# Partition parsers +# +# CONFIG_MTD_AR7_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# end of Partition parsers + +# +# User Modules And Translation Layers +# +CONFIG_MTD_BLKDEVS=m +CONFIG_MTD_BLOCK=m +# CONFIG_MTD_BLOCK_RO is not set + +# +# Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK. +# +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set +# end of RAM/ROM/Flash chip drivers + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set +# end of Mapping drivers for chip access + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# end of Self-contained MTD device drivers + +# +# NAND +# +# CONFIG_MTD_ONENAND is not set +# CONFIG_MTD_RAW_NAND is not set + +# +# ECC engine support +# +# CONFIG_MTD_NAND_ECC_SW_HAMMING is not set +# CONFIG_MTD_NAND_ECC_SW_BCH is not set +# CONFIG_MTD_NAND_ECC_MXIC is not set +# end of ECC engine support +# end of NAND + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# end of LPDDR & LPDDR2 PCM memory drivers + +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_FASTMAP is not set +# CONFIG_MTD_UBI_GLUEBI is not set +# CONFIG_MTD_UBI_BLOCK is not set +# CONFIG_MTD_HYPERBUS is not set +# CONFIG_OF is not set +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +CONFIG_PARPORT=m +CONFIG_PARPORT_PC=m +CONFIG_PARPORT_SERIAL=m +# CONFIG_PARPORT_PC_FIFO is not set +# CONFIG_PARPORT_PC_SUPERIO is not set +CONFIG_PARPORT_1284=y +CONFIG_PARPORT_NOT_PC=y +CONFIG_PNP=y +# CONFIG_PNP_DEBUG_MESSAGES is not set + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_NULL_BLK=m +# CONFIG_BLK_DEV_FD is not set +CONFIG_CDROM=m +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +CONFIG_ZRAM=m +CONFIG_ZRAM_DEF_COMP_LZORLE=y +# CONFIG_ZRAM_DEF_COMP_ZSTD is not set +# CONFIG_ZRAM_DEF_COMP_LZ4 is not set +# CONFIG_ZRAM_DEF_COMP_LZO is not set +# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set +CONFIG_ZRAM_DEF_COMP="lzo-rle" +CONFIG_ZRAM_WRITEBACK=y +# CONFIG_ZRAM_MEMORY_TRACKING is not set +# CONFIG_ZRAM_MULTI_COMP is not set +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 +# CONFIG_BLK_DEV_DRBD is not set +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_XEN_BLKDEV_FRONTEND=m +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_RBD=m +CONFIG_BLK_DEV_UBLK=m +CONFIG_BLKDEV_UBLK_LEGACY_OPCODES=y + +# +# NVME Support +# +CONFIG_NVME_CORE=m +CONFIG_BLK_DEV_NVME=m +CONFIG_NVME_MULTIPATH=y +# CONFIG_NVME_VERBOSE_ERRORS is not set +# CONFIG_NVME_HWMON is not set +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TCP=m +# CONFIG_NVME_AUTH is not set +CONFIG_NVME_TARGET=m +# CONFIG_NVME_TARGET_PASSTHRU is not set +CONFIG_NVME_TARGET_LOOP=m +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_FCLOOP=m +CONFIG_NVME_TARGET_TCP=m +# CONFIG_NVME_TARGET_AUTH is not set +# end of NVME Support + +# +# Misc devices +# +CONFIG_SENSORS_LIS3LV02D=m +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_IBM_ASM is not set +# CONFIG_PHANTOM is not set +CONFIG_TIFM_CORE=m +CONFIG_TIFM_7XX1=m +# CONFIG_ICS932S401 is not set +CONFIG_ENCLOSURE_SERVICES=m +CONFIG_SGI_XP=m +CONFIG_HP_ILO=m +CONFIG_SGI_GRU=m +# CONFIG_SGI_GRU_DEBUG is not set +CONFIG_APDS9802ALS=m +CONFIG_ISL29003=m +CONFIG_ISL29020=m +CONFIG_SENSORS_TSL2550=m +CONFIG_SENSORS_BH1770=m +CONFIG_SENSORS_APDS990X=m +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +CONFIG_VMWARE_BALLOON=m +# CONFIG_SRAM is not set +# CONFIG_DW_XDATA_PCIE is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_XILINX_SDFEC is not set +CONFIG_MISC_RTSX=m +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_EEPROM_EE1004 is not set +# end of EEPROM support + +CONFIG_CB710_CORE=m +# CONFIG_CB710_DEBUG is not set +CONFIG_CB710_DEBUG_ASSUMPTIONS=y + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +# end of Texas Instruments shared transport line discipline + +CONFIG_SENSORS_LIS3_I2C=m + +# +# Altera FPGA firmware download module (requires I2C) +# +CONFIG_ALTERA_STAPL=m +CONFIG_INTEL_MEI=m +CONFIG_INTEL_MEI_ME=m +# CONFIG_INTEL_MEI_TXE is not set +# CONFIG_INTEL_MEI_GSC is not set +# CONFIG_INTEL_MEI_HDCP is not set +# CONFIG_INTEL_MEI_PXP is not set +# CONFIG_INTEL_MEI_GSC_PROXY is not set +CONFIG_VMWARE_VMCI=m +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_BCM_VK is not set +# CONFIG_MISC_ALCOR_PCI is not set +CONFIG_MISC_RTSX_PCI=m +CONFIG_MISC_RTSX_USB=m +# CONFIG_UACCE is not set +CONFIG_PVPANIC=y +# CONFIG_PVPANIC_MMIO is not set +# CONFIG_PVPANIC_PCI is not set +# CONFIG_GP_PCI1XXXX is not set +# end of Misc devices + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=m +CONFIG_SCSI_COMMON=y +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=m +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=m +CONFIG_BLK_DEV_BSG=y +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=m +# CONFIG_SCSI_CXGB3_ISCSI is not set +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_SCSI_BNX2X_FCOE=m +CONFIG_BE2ISCSI=m +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +CONFIG_SCSI_HPSA=m +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +CONFIG_SCSI_AACRAID=m +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +# CONFIG_MEGARAID_NEWGEN is not set +# CONFIG_MEGARAID_LEGACY is not set +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +CONFIG_SCSI_MPT2SAS=m +# CONFIG_SCSI_MPI3MR is not set +CONFIG_SCSI_SMARTPQI=m +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_BUSLOGIC is not set +# CONFIG_SCSI_MYRB is not set +# CONFIG_SCSI_MYRS is not set +CONFIG_VMWARE_PVSCSI=m +# CONFIG_XEN_SCSI_FRONTEND is not set +CONFIG_HYPERV_STORAGE=m +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +# CONFIG_FCOE is not set +CONFIG_FCOE_FNIC=m +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FDOMAIN_PCI is not set +CONFIG_SCSI_ISCI=m +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_PPA is not set +# CONFIG_SCSI_IMM is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +# CONFIG_SCSI_QLOGIC_1280 is not set +CONFIG_SCSI_QLA_FC=m +# CONFIG_TCM_QLA2XXX is not set +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_QEDI=m +CONFIG_QEDF=m +CONFIG_SCSI_LPFC=m +# CONFIG_SCSI_LPFC_DEBUG_FS is not set +# CONFIG_SCSI_EFCT is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +CONFIG_SCSI_DEBUG=m +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_BFA_FC is not set +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_CHELSIO_FCOE=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +# end of SCSI device support + +CONFIG_ATA=m +CONFIG_SATA_HOST=y +CONFIG_PATA_TIMINGS=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_FORCE=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=m +CONFIG_SATA_MOBILE_LPM_POLICY=0 +CONFIG_SATA_AHCI_PLATFORM=m +# CONFIG_AHCI_DWC is not set +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=m +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_RZ1000 is not set +# CONFIG_PATA_PARPORT is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set +CONFIG_ATA_GENERIC=m +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_BITMAP_FILE=y +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +# CONFIG_MD_MULTIPATH is not set +CONFIG_MD_FAULTY=m +CONFIG_MD_CLUSTER=m +# CONFIG_BCACHE is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_BUFIO=m +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +# CONFIG_DM_UNSTRIPED is not set +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +CONFIG_DM_WRITECACHE=m +# CONFIG_DM_EBS is not set +CONFIG_DM_ERA=m +# CONFIG_DM_CLONE is not set +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +# CONFIG_DM_MULTIPATH_HST is not set +# CONFIG_DM_MULTIPATH_IOA is not set +CONFIG_DM_DELAY=m +# CONFIG_DM_DUST is not set +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set +# CONFIG_DM_VERITY_FEC is not set +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +# CONFIG_DM_ZONED is not set +CONFIG_DM_AUDIT=y +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +# CONFIG_TCM_FC is not set +CONFIG_ISCSI_TARGET=m +CONFIG_ISCSI_TARGET_CXGB4=m +# CONFIG_SBP_TARGET is not set +# CONFIG_REMOTE_TARGET is not set +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +# CONFIG_FUSION_FC is not set +CONFIG_FUSION_SAS=m +CONFIG_FUSION_MAX_SGE=128 +# CONFIG_FUSION_CTL is not set +CONFIG_FUSION_LOGGING=y + +# +# IEEE 1394 (FireWire) support +# +CONFIG_FIREWIRE=m +CONFIG_FIREWIRE_OHCI=m +CONFIG_FIREWIRE_SBP2=m +CONFIG_FIREWIRE_NET=m +# CONFIG_FIREWIRE_NOSY is not set +# end of IEEE 1394 (FireWire) support + +CONFIG_MACINTOSH_DRIVERS=y +CONFIG_MAC_EMUMOUSEBTN=y +CONFIG_NETDEVICES=y +CONFIG_MII=m +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_WIREGUARD=m +# CONFIG_WIREGUARD_DEBUG is not set +# CONFIG_EQUALIZER is not set +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN_L3S=y +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +# CONFIG_BAREUDP is not set +# CONFIG_GTP is not set +# CONFIG_AMT is not set +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +# CONFIG_NETCONSOLE_EXTENDED_LOG is not set +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ARCNET is not set +# CONFIG_ATM_DRIVERS is not set +CONFIG_ETHERNET=y +CONFIG_MDIO=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +CONFIG_ENA_ETHERNET=m +# CONFIG_NET_VENDOR_AMD is not set +CONFIG_NET_VENDOR_AQUANTIA=y +CONFIG_AQTION=m +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_NET_VENDOR_ASIX=y +CONFIG_NET_VENDOR_ATHEROS=y +CONFIG_ATL2=m +CONFIG_ATL1=m +CONFIG_ATL1E=m +CONFIG_ATL1C=m +CONFIG_ALX=m +# CONFIG_CX_ECAT is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +CONFIG_BNX2=m +CONFIG_CNIC=m +CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set +CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y +CONFIG_BNXT_DCB=y +CONFIG_BNXT_HWMON=y +# CONFIG_NET_VENDOR_CADENCE is not set +CONFIG_NET_VENDOR_CAVIUM=y +# CONFIG_THUNDER_NIC_PF is not set +# CONFIG_THUNDER_NIC_VF is not set +# CONFIG_THUNDER_NIC_BGX is not set +# CONFIG_THUNDER_NIC_RGX is not set +CONFIG_CAVIUM_PTP=y +CONFIG_LIQUIDIO_CORE=m +CONFIG_LIQUIDIO=m +CONFIG_LIQUIDIO_VF=m +CONFIG_NET_VENDOR_CHELSIO=y +# CONFIG_CHELSIO_T1 is not set +# CONFIG_CHELSIO_T3 is not set +CONFIG_CHELSIO_T4=m +# CONFIG_CHELSIO_T4_DCB is not set +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_LIB=m +CONFIG_CHELSIO_INLINE_CRYPTO=y +CONFIG_CHELSIO_IPSEC_INLINE=m +# CONFIG_CHELSIO_TLS_DEVICE is not set +CONFIG_NET_VENDOR_CISCO=y +CONFIG_ENIC=m +# CONFIG_NET_VENDOR_CORTINA is not set +CONFIG_NET_VENDOR_DAVICOM=y +CONFIG_DNET=m +CONFIG_NET_VENDOR_DEC=y +# CONFIG_NET_TULIP is not set +# CONFIG_NET_VENDOR_DLINK is not set +CONFIG_NET_VENDOR_EMULEX=y +CONFIG_BE2NET=m +CONFIG_BE2NET_HWMON=y +# CONFIG_BE2NET_BE2 is not set +# CONFIG_BE2NET_BE3 is not set +CONFIG_BE2NET_LANCER=y +CONFIG_BE2NET_SKYHAWK=y +CONFIG_NET_VENDOR_ENGLEDER=y +# CONFIG_TSNEP is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_NET_VENDOR_FUNGIBLE=y +# CONFIG_FUN_ETH is not set +CONFIG_NET_VENDOR_GOOGLE=y +CONFIG_GVE=m +CONFIG_NET_VENDOR_HUAWEI=y +CONFIG_HINIC=m +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_E1000E_HWTS=y +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGB_DCA=y +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCA=y +CONFIG_IXGBE_DCB=y +CONFIG_IXGBE_IPSEC=y +CONFIG_IXGBEVF=m +CONFIG_IXGBEVF_IPSEC=y +CONFIG_I40E=m +CONFIG_I40E_DCB=y +CONFIG_IAVF=m +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_ICE_SWITCHDEV=y +CONFIG_ICE_HWTS=y +CONFIG_FM10K=m +CONFIG_IGC=m +# CONFIG_JME is not set +CONFIG_NET_VENDOR_LITEX=y +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +# CONFIG_MLX4_CORE_GEN2 is not set +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +CONFIG_MLX5_ESWITCH=y +CONFIG_MLX5_BRIDGE=y +CONFIG_MLX5_CLS_ACT=y +CONFIG_MLX5_TC_CT=y +CONFIG_MLX5_TC_SAMPLE=y +CONFIG_MLX5_CORE_EN_DCB=y +# CONFIG_MLX5_CORE_IPOIB is not set +# CONFIG_MLX5_MACSEC is not set +# CONFIG_MLX5_EN_IPSEC is not set +# CONFIG_MLX5_EN_TLS is not set +CONFIG_MLX5_SW_STEERING=y +# CONFIG_MLX5_SF is not set +CONFIG_MLXSW_CORE=m +CONFIG_MLXSW_CORE_HWMON=y +CONFIG_MLXSW_CORE_THERMAL=y +CONFIG_MLXSW_PCI=m +CONFIG_MLXSW_I2C=m +CONFIG_MLXSW_SPECTRUM=m +CONFIG_MLXSW_SPECTRUM_DCB=y +CONFIG_MLXSW_MINIMAL=m +CONFIG_MLXFW=m +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +CONFIG_NET_VENDOR_MICROSOFT=y +# CONFIG_MICROSOFT_MANA is not set +CONFIG_NET_VENDOR_MYRI=y +CONFIG_MYRI10GE=m +CONFIG_MYRI10GE_DCA=y +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETERION is not set +CONFIG_NET_VENDOR_NETRONOME=y +CONFIG_NFP=m +CONFIG_NFP_APP_FLOWER=y +CONFIG_NFP_APP_ABM_NIC=y +CONFIG_NFP_NET_IPSEC=y +# CONFIG_NFP_DEBUG is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +CONFIG_NET_VENDOR_OKI=y +CONFIG_ETHOC=m +# CONFIG_NET_VENDOR_PACKET_ENGINES is not set +CONFIG_NET_VENDOR_PENSANDO=y +# CONFIG_IONIC is not set +CONFIG_NET_VENDOR_QLOGIC=y +CONFIG_QLA3XXX=m +# CONFIG_QLCNIC is not set +CONFIG_NETXEN_NIC=m +CONFIG_QED=m +CONFIG_QED_LL2=y +CONFIG_QED_SRIOV=y +CONFIG_QEDE=m +CONFIG_QED_RDMA=y +CONFIG_QED_ISCSI=y +CONFIG_QED_FCOE=y +CONFIG_QED_OOO=y +CONFIG_NET_VENDOR_BROCADE=y +# CONFIG_BNA is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y +# CONFIG_ATP is not set +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set +CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set +CONFIG_R8169=m +# CONFIG_NET_VENDOR_RENESAS is not set +CONFIG_NET_VENDOR_ROCKER=y +CONFIG_ROCKER=m +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +CONFIG_NET_VENDOR_SOLARFLARE=y +CONFIG_SFC=m +CONFIG_SFC_MTD=y +CONFIG_SFC_MCDI_MON=y +CONFIG_SFC_SRIOV=y +CONFIG_SFC_MCDI_LOGGING=y +# CONFIG_SFC_FALCON is not set +# CONFIG_SFC_SIENA is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +CONFIG_NET_VENDOR_VERTEXCOM=y +# CONFIG_NET_VENDOR_VIA is not set +CONFIG_NET_VENDOR_WANGXUN=y +CONFIG_LIBWX=m +CONFIG_NGBE=m +CONFIG_TXGBE=m +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_NET_VENDOR_XILINX=y +# CONFIG_XILINX_EMACLITE is not set +# CONFIG_XILINX_AXI_EMAC is not set +# CONFIG_XILINX_LL_TEMAC is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_PHYLINK=m +CONFIG_PHYLIB=y +CONFIG_SWPHY=y +CONFIG_LED_TRIGGER_PHY=y +CONFIG_FIXED_PHY=y +CONFIG_SFP=m + +# +# MII PHY device drivers +# +CONFIG_AMD_PHY=m +# CONFIG_ADIN_PHY is not set +# CONFIG_ADIN1100_PHY is not set +CONFIG_AQUANTIA_PHY=m +CONFIG_AX88796B_PHY=m +CONFIG_BROADCOM_PHY=m +# CONFIG_BCM54140_PHY is not set +CONFIG_BCM7XXX_PHY=m +# CONFIG_BCM84881_PHY is not set +CONFIG_BCM87XX_PHY=m +CONFIG_BCM_NET_PHYLIB=m +CONFIG_BCM_NET_PHYPTP=m +CONFIG_CICADA_PHY=m +CONFIG_CORTINA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_LXT_PHY=m +CONFIG_INTEL_XWAY_PHY=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_MARVELL_10G_PHY=m +# CONFIG_MARVELL_88Q2XXX_PHY is not set +# CONFIG_MARVELL_88X2222_PHY is not set +# CONFIG_MAXLINEAR_GPHY is not set +# CONFIG_MEDIATEK_GE_PHY is not set +CONFIG_MICREL_PHY=m +# CONFIG_MICROCHIP_T1S_PHY is not set +CONFIG_MICROCHIP_PHY=m +CONFIG_MICROCHIP_T1_PHY=m +CONFIG_MICROSEMI_PHY=m +# CONFIG_MOTORCOMM_PHY is not set +CONFIG_NATIONAL_PHY=m +# CONFIG_NXP_CBTX_PHY is not set +# CONFIG_NXP_C45_TJA11XX_PHY is not set +# CONFIG_NXP_TJA11XX_PHY is not set +# CONFIG_NCN26000_PHY is not set +CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m +CONFIG_RENESAS_PHY=m +CONFIG_ROCKCHIP_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_DP83822_PHY=m +CONFIG_DP83TC811_PHY=m +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +# CONFIG_DP83869_PHY is not set +# CONFIG_DP83TD510_PHY is not set +CONFIG_VITESSE_PHY=m +CONFIG_XILINX_GMII2RGMII=m +# CONFIG_PSE_CONTROLLER is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +CONFIG_FWNODE_MDIO=y +CONFIG_ACPI_MDIO=y +CONFIG_MDIO_DEVRES=y +CONFIG_MDIO_BITBANG=m +CONFIG_MDIO_BCM_UNIMAC=m +CONFIG_MDIO_CAVIUM=m +# CONFIG_MDIO_GPIO is not set +CONFIG_MDIO_I2C=m +# CONFIG_MDIO_MVUSB is not set +CONFIG_MDIO_THUNDER=m + +# +# MDIO Multiplexers +# + +# +# PCS device drivers +# +CONFIG_PCS_XPCS=m +# end of PCS device drivers + +# CONFIG_PLIP is not set +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +# CONFIG_PPPOE_HASH_BITS_1 is not set +# CONFIG_PPPOE_HASH_BITS_2 is not set +CONFIG_PPPOE_HASH_BITS_4=y +# CONFIG_PPPOE_HASH_BITS_8 is not set +CONFIG_PPPOE_HASH_BITS=4 +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLHC=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +# CONFIG_SLIP_MODE_SLIP6 is not set +CONFIG_USB_NET_DRIVERS=y +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=m +CONFIG_USB_NET_AX8817X=m +CONFIG_USB_NET_AX88179_178A=m +CONFIG_USB_NET_CDCETHER=m +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +# CONFIG_USB_NET_SR9700 is not set +# CONFIG_USB_NET_SR9800 is not set +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +CONFIG_USB_NET_NET1080=m +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m +CONFIG_USB_NET_CDC_SUBSET=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y +CONFIG_USB_BELKIN=y +CONFIG_USB_ARMLINUX=y +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y +CONFIG_USB_NET_ZAURUS=m +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +# CONFIG_USB_NET_AQC111 is not set +CONFIG_USB_RTL8153_ECM=m +CONFIG_WLAN=y +# CONFIG_WLAN_VENDOR_ADMTEK is not set +CONFIG_ATH_COMMON=m +CONFIG_WLAN_VENDOR_ATH=y +# CONFIG_ATH_DEBUG is not set +# CONFIG_ATH5K is not set +# CONFIG_ATH5K_PCI is not set +CONFIG_ATH9K_HW=m +CONFIG_ATH9K_COMMON=m +CONFIG_ATH9K_COMMON_DEBUG=y +CONFIG_ATH9K_BTCOEX_SUPPORT=y +CONFIG_ATH9K=m +CONFIG_ATH9K_PCI=y +CONFIG_ATH9K_AHB=y +CONFIG_ATH9K_DEBUGFS=y +# CONFIG_ATH9K_STATION_STATISTICS is not set +# CONFIG_ATH9K_DYNACK is not set +CONFIG_ATH9K_WOW=y +CONFIG_ATH9K_RFKILL=y +# CONFIG_ATH9K_CHANNEL_CONTEXT is not set +CONFIG_ATH9K_PCOEM=y +# CONFIG_ATH9K_PCI_NO_EEPROM is not set +CONFIG_ATH9K_HTC=m +# CONFIG_ATH9K_HTC_DEBUGFS is not set +# CONFIG_ATH9K_HWRNG is not set +# CONFIG_ATH9K_COMMON_SPECTRAL is not set +# CONFIG_CARL9170 is not set +# CONFIG_ATH6KL is not set +# CONFIG_AR5523 is not set +# CONFIG_WIL6210 is not set +CONFIG_ATH10K=m +CONFIG_ATH10K_CE=y +CONFIG_ATH10K_PCI=m +# CONFIG_ATH10K_SDIO is not set +# CONFIG_ATH10K_USB is not set +# CONFIG_ATH10K_DEBUG is not set +# CONFIG_ATH10K_DEBUGFS is not set +# CONFIG_ATH10K_TRACING is not set +# CONFIG_WCN36XX is not set +# CONFIG_ATH11K is not set +# CONFIG_ATH12K is not set +# CONFIG_WLAN_VENDOR_ATMEL is not set +CONFIG_WLAN_VENDOR_BROADCOM=y +# CONFIG_B43 is not set +# CONFIG_B43LEGACY is not set +CONFIG_BRCMUTIL=m +CONFIG_BRCMSMAC=m +CONFIG_BRCMSMAC_LEDS=y +CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_PROTO_BCDC=y +CONFIG_BRCMFMAC_PROTO_MSGBUF=y +CONFIG_BRCMFMAC_SDIO=y +CONFIG_BRCMFMAC_USB=y +CONFIG_BRCMFMAC_PCIE=y +# CONFIG_BRCM_TRACING is not set +# CONFIG_BRCMDBG is not set +# CONFIG_WLAN_VENDOR_CISCO is not set +CONFIG_WLAN_VENDOR_INTEL=y +# CONFIG_IPW2100 is not set +# CONFIG_IPW2200 is not set +# CONFIG_IWL4965 is not set +# CONFIG_IWL3945 is not set +CONFIG_IWLWIFI=m +CONFIG_IWLWIFI_LEDS=y +CONFIG_IWLDVM=m +CONFIG_IWLMVM=m +CONFIG_IWLWIFI_OPMODE_MODULAR=y + +# +# Debugging Options +# +# CONFIG_IWLWIFI_DEBUG is not set +CONFIG_IWLWIFI_DEBUGFS=y +# CONFIG_IWLWIFI_DEVICE_TRACING is not set +# end of Debugging Options + +# CONFIG_WLAN_VENDOR_INTERSIL is not set +CONFIG_WLAN_VENDOR_MARVELL=y +# CONFIG_LIBERTAS is not set +# CONFIG_LIBERTAS_THINFIRM is not set +CONFIG_MWIFIEX=m +CONFIG_MWIFIEX_SDIO=m +CONFIG_MWIFIEX_PCIE=m +CONFIG_MWIFIEX_USB=m +# CONFIG_MWL8K is not set +CONFIG_WLAN_VENDOR_MEDIATEK=y +CONFIG_MT7601U=m +CONFIG_MT76_CORE=m +CONFIG_MT76_LEDS=y +CONFIG_MT76_USB=m +CONFIG_MT76x02_LIB=m +CONFIG_MT76x02_USB=m +CONFIG_MT76x0_COMMON=m +CONFIG_MT76x0U=m +# CONFIG_MT76x0E is not set +CONFIG_MT76x2_COMMON=m +# CONFIG_MT76x2E is not set +CONFIG_MT76x2U=m +# CONFIG_MT7603E is not set +# CONFIG_MT7615E is not set +# CONFIG_MT7663U is not set +# CONFIG_MT7663S is not set +# CONFIG_MT7915E is not set +# CONFIG_MT7921E is not set +# CONFIG_MT7921S is not set +# CONFIG_MT7921U is not set +# CONFIG_MT7996E is not set +CONFIG_WLAN_VENDOR_MICROCHIP=y +# CONFIG_WILC1000_SDIO is not set +CONFIG_WLAN_VENDOR_PURELIFI=y +# CONFIG_PLFXLC is not set +CONFIG_WLAN_VENDOR_RALINK=y +CONFIG_RT2X00=m +# CONFIG_RT2400PCI is not set +# CONFIG_RT2500PCI is not set +# CONFIG_RT61PCI is not set +CONFIG_RT2800PCI=m +CONFIG_RT2800PCI_RT33XX=y +CONFIG_RT2800PCI_RT35XX=y +CONFIG_RT2800PCI_RT53XX=y +CONFIG_RT2800PCI_RT3290=y +# CONFIG_RT2500USB is not set +# CONFIG_RT73USB is not set +CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT33XX=y +CONFIG_RT2800USB_RT35XX=y +CONFIG_RT2800USB_RT3573=y +CONFIG_RT2800USB_RT53XX=y +CONFIG_RT2800USB_RT55XX=y +CONFIG_RT2800USB_UNKNOWN=y +CONFIG_RT2800_LIB=m +CONFIG_RT2800_LIB_MMIO=m +CONFIG_RT2X00_LIB_MMIO=m +CONFIG_RT2X00_LIB_PCI=m +CONFIG_RT2X00_LIB_USB=m +CONFIG_RT2X00_LIB=m +CONFIG_RT2X00_LIB_FIRMWARE=y +CONFIG_RT2X00_LIB_CRYPTO=y +CONFIG_RT2X00_LIB_LEDS=y +CONFIG_RT2X00_LIB_DEBUGFS=y +# CONFIG_RT2X00_DEBUG is not set +CONFIG_WLAN_VENDOR_REALTEK=y +# CONFIG_RTL8180 is not set +# CONFIG_RTL8187 is not set +CONFIG_RTL_CARDS=m +CONFIG_RTL8192CE=m +CONFIG_RTL8192SE=m +CONFIG_RTL8192DE=m +CONFIG_RTL8723AE=m +CONFIG_RTL8723BE=m +CONFIG_RTL8188EE=m +CONFIG_RTL8192EE=m +CONFIG_RTL8821AE=m +CONFIG_RTL8192CU=m +CONFIG_RTLWIFI=m +CONFIG_RTLWIFI_PCI=m +CONFIG_RTLWIFI_USB=m +# CONFIG_RTLWIFI_DEBUG is not set +CONFIG_RTL8192C_COMMON=m +CONFIG_RTL8723_COMMON=m +CONFIG_RTLBTCOEXIST=m +CONFIG_RTL8XXXU=m +# CONFIG_RTL8XXXU_UNTESTED is not set +CONFIG_RTW88=m +CONFIG_RTW88_CORE=m +CONFIG_RTW88_PCI=m +CONFIG_RTW88_8822B=m +CONFIG_RTW88_8822C=m +CONFIG_RTW88_8822BE=m +# CONFIG_RTW88_8822BS is not set +# CONFIG_RTW88_8822BU is not set +CONFIG_RTW88_8822CE=m +# CONFIG_RTW88_8822CS is not set +# CONFIG_RTW88_8822CU is not set +# CONFIG_RTW88_8723DE is not set +# CONFIG_RTW88_8723DS is not set +# CONFIG_RTW88_8723DU is not set +# CONFIG_RTW88_8821CE is not set +# CONFIG_RTW88_8821CS is not set +# CONFIG_RTW88_8821CU is not set +# CONFIG_RTW88_DEBUG is not set +# CONFIG_RTW88_DEBUGFS is not set +# CONFIG_RTW89 is not set +# CONFIG_WLAN_VENDOR_RSI is not set +CONFIG_WLAN_VENDOR_SILABS=y +# CONFIG_WFX is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set +# CONFIG_WLAN_VENDOR_ZYDAS is not set +CONFIG_WLAN_VENDOR_QUANTENNA=y +# CONFIG_QTNFMAC_PCIE is not set +# CONFIG_USB_NET_RNDIS_WLAN is not set +CONFIG_MAC80211_HWSIM=m +# CONFIG_VIRT_WIFI is not set +CONFIG_WAN=y +CONFIG_HDLC=m +CONFIG_HDLC_RAW=m +# CONFIG_HDLC_RAW_ETH is not set +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m + +# +# X.25/LAPB support is disabled +# +# CONFIG_PCI200SYN is not set +# CONFIG_WANXL is not set +# CONFIG_PC300TOO is not set +# CONFIG_FARSYNC is not set +CONFIG_IEEE802154_DRIVERS=m +CONFIG_IEEE802154_FAKELB=m +# CONFIG_IEEE802154_ATUSB is not set +# CONFIG_IEEE802154_HWSIM is not set + +# +# Wireless WAN +# +# CONFIG_WWAN is not set +# end of Wireless WAN + +CONFIG_XEN_NETDEV_FRONTEND=m +CONFIG_VMXNET3=m +CONFIG_FUJITSU_ES=m +CONFIG_HYPERV_NET=m +CONFIG_NETDEVSIM=m +CONFIG_NET_FAILOVER=m +CONFIG_ISDN=y +CONFIG_ISDN_CAPI=y +CONFIG_CAPI_TRACE=y +CONFIG_ISDN_CAPI_MIDDLEWARE=y +CONFIG_MISDN=m +CONFIG_MISDN_DSP=m +CONFIG_MISDN_L1OIP=m + +# +# mISDN hardware drivers +# +CONFIG_MISDN_HFCPCI=m +CONFIG_MISDN_HFCMULTI=m +CONFIG_MISDN_HFCUSB=m +CONFIG_MISDN_AVMFRITZ=m +CONFIG_MISDN_SPEEDFAX=m +CONFIG_MISDN_INFINEON=m +CONFIG_MISDN_W6692=m +CONFIG_MISDN_NETJET=m +CONFIG_MISDN_HDLC=m +CONFIG_MISDN_IPAC=m +CONFIG_MISDN_ISAR=m + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y +CONFIG_INPUT_FF_MEMLESS=m +CONFIG_INPUT_SPARSEKMAP=m +# CONFIG_INPUT_MATRIXKMAP is not set +CONFIG_INPUT_VIVALDIFMAP=y + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=m +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +CONFIG_INPUT_JOYDEV=m +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADC is not set +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1050 is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_CYPRESS_SF is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=m +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_BYD=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_LIFEBOOK=y +CONFIG_MOUSE_PS2_TRACKPOINT=y +CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_ELANTECH_SMBUS=y +CONFIG_MOUSE_PS2_SENTELIC=y +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +CONFIG_MOUSE_PS2_FOCALTECH=y +CONFIG_MOUSE_PS2_VMMOUSE=y +CONFIG_MOUSE_PS2_SMBUS=y +CONFIG_MOUSE_SERIAL=m +CONFIG_MOUSE_APPLETOUCH=m +CONFIG_MOUSE_BCM5974=m +CONFIG_MOUSE_CYAPA=m +CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_I2C=y +# CONFIG_MOUSE_ELAN_I2C_SMBUS is not set +CONFIG_MOUSE_VSXXXAA=m +# CONFIG_MOUSE_GPIO is not set +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +# CONFIG_INPUT_JOYSTICK is not set +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=m +CONFIG_TABLET_USB_AIPTEK=m +# CONFIG_TABLET_USB_HANWANG is not set +CONFIG_TABLET_USB_KBTAB=m +# CONFIG_TABLET_USB_PEGASUS is not set +CONFIG_TABLET_SERIAL_WACOM4=m +CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_ADC is not set +# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set +# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_BU21029 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 is not set +# CONFIG_TOUCHSCREEN_CY8CTMA140 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP5 is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set +# CONFIG_TOUCHSCREEN_EXC3000 is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GOODIX is not set +# CONFIG_TOUCHSCREEN_HIDEEP is not set +# CONFIG_TOUCHSCREEN_HYCON_HY46XX is not set +# CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX is not set +# CONFIG_TOUCHSCREEN_ILI210X is not set +# CONFIG_TOUCHSCREEN_ILITEK is not set +# CONFIG_TOUCHSCREEN_S6SY761 is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_EKTF2127 is not set +# CONFIG_TOUCHSCREEN_ELAN is not set +CONFIG_TOUCHSCREEN_ELO=m +CONFIG_TOUCHSCREEN_WACOM_W8001=m +CONFIG_TOUCHSCREEN_WACOM_I2C=m +# CONFIG_TOUCHSCREEN_MAX11801 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MMS114 is not set +# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set +# CONFIG_TOUCHSCREEN_MSG2638 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_NOVATEK_NVT_TS is not set +# CONFIG_TOUCHSCREEN_IMAGIS is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_PIXCIR is not set +# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC_SERIO is not set +# CONFIG_TOUCHSCREEN_TSC2004 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_RM_TS is not set +# CONFIG_TOUCHSCREEN_SILEAD is not set +# CONFIG_TOUCHSCREEN_SIS_I2C is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_STMFTS is not set +# CONFIG_TOUCHSCREEN_SX8654 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +# CONFIG_TOUCHSCREEN_ZET6223 is not set +# CONFIG_TOUCHSCREEN_ZFORCE is not set +# CONFIG_TOUCHSCREEN_COLIBRI_VF50 is not set +# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set +# CONFIG_TOUCHSCREEN_IQS5XX is not set +# CONFIG_TOUCHSCREEN_IQS7211 is not set +# CONFIG_TOUCHSCREEN_ZINITIX is not set +# CONFIG_TOUCHSCREEN_HIMAX_HX83112B is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_E3X0_BUTTON is not set +CONFIG_INPUT_PCSPKR=m +# CONFIG_INPUT_MMA8450 is not set +CONFIG_INPUT_APANEL=m +# CONFIG_INPUT_GPIO_BEEPER is not set +# CONFIG_INPUT_GPIO_DECODER is not set +# CONFIG_INPUT_GPIO_VIBRA is not set +CONFIG_INPUT_ATLAS_BTNS=m +CONFIG_INPUT_ATI_REMOTE2=m +CONFIG_INPUT_KEYSPAN_REMOTE=m +# CONFIG_INPUT_KXTJ9 is not set +CONFIG_INPUT_POWERMATE=m +CONFIG_INPUT_YEALINK=m +CONFIG_INPUT_CM109=m +CONFIG_INPUT_UINPUT=m +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_PWM_BEEPER is not set +# CONFIG_INPUT_PWM_VIBRA is not set +CONFIG_INPUT_GPIO_ROTARY_ENCODER=m +# CONFIG_INPUT_DA7280_HAPTICS is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_IMS_PCU is not set +# CONFIG_INPUT_IQS269A is not set +# CONFIG_INPUT_IQS626A is not set +# CONFIG_INPUT_IQS7222 is not set +# CONFIG_INPUT_CMA3000 is not set +CONFIG_INPUT_XEN_KBDDEV_FRONTEND=m +# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set +# CONFIG_INPUT_DRV260X_HAPTICS is not set +# CONFIG_INPUT_DRV2665_HAPTICS is not set +# CONFIG_INPUT_DRV2667_HAPTICS is not set +CONFIG_RMI4_CORE=m +CONFIG_RMI4_I2C=m +CONFIG_RMI4_SMB=m +CONFIG_RMI4_F03=y +CONFIG_RMI4_F03_SERIO=m +CONFIG_RMI4_2D_SENSOR=y +CONFIG_RMI4_F11=y +CONFIG_RMI4_F12=y +CONFIG_RMI4_F30=y +# CONFIG_RMI4_F34 is not set +# CONFIG_RMI4_F3A is not set +CONFIG_RMI4_F55=y + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +CONFIG_SERIO_I8042=y +CONFIG_SERIO_SERPORT=y +# CONFIG_SERIO_CT82C710 is not set +# CONFIG_SERIO_PARKBD is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +# CONFIG_SERIO_PS2MULT is not set +CONFIG_SERIO_ARC_PS2=m +CONFIG_HYPERV_KEYBOARD=m +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_LEGACY_TIOCSTI=y +CONFIG_LDISC_AUTOLOAD=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +# CONFIG_SERIAL_8250_16550A_VARIANTS is not set +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCILIB=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +# CONFIG_SERIAL_8250_PCI1XXXX is not set +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DWLIB=y +CONFIG_SERIAL_8250_DW=y +# CONFIG_SERIAL_8250_RT288X is not set +CONFIG_SERIAL_8250_LPSS=y +CONFIG_SERIAL_8250_MID=y +CONFIG_SERIAL_8250_PERICOM=y + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_KGDB_NMI is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_CONSOLE_POLL=y +CONFIG_SERIAL_JSM=m +# CONFIG_SERIAL_LANTIQ is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +CONFIG_SERIAL_ARC=m +CONFIG_SERIAL_ARC_NR_PORTS=1 +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# CONFIG_SERIAL_SPRD is not set +# end of Serial drivers + +CONFIG_SERIAL_MCTRL_GPIO=y +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +CONFIG_NOZOMI=m +# CONFIG_NULL_TTY is not set +CONFIG_HVC_DRIVER=y +CONFIG_HVC_IRQ=y +CONFIG_HVC_XEN=y +CONFIG_HVC_XEN_FRONTEND=y +# CONFIG_SERIAL_DEV_BUS is not set +CONFIG_PRINTER=m +# CONFIG_LP_CONSOLE is not set +CONFIG_PPDEV=m +CONFIG_VIRTIO_CONSOLE=m +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +CONFIG_IPMI_PLAT_DATA=y +CONFIG_IPMI_PANIC_EVENT=y +CONFIG_IPMI_PANIC_STRING=y +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +CONFIG_HW_RANDOM_INTEL=m +CONFIG_HW_RANDOM_AMD=m +# CONFIG_HW_RANDOM_BA431 is not set +CONFIG_HW_RANDOM_VIA=m +CONFIG_HW_RANDOM_VIRTIO=y +# CONFIG_HW_RANDOM_XIPHERA is not set +# CONFIG_APPLICOM is not set +# CONFIG_MWAVE is not set +CONFIG_DEVMEM=y +CONFIG_NVRAM=y +CONFIG_DEVPORT=y +CONFIG_HPET=y +CONFIG_HPET_MMAP=y +# CONFIG_HPET_MMAP_DEFAULT is not set +CONFIG_HANGCHECK_TIMER=m +CONFIG_UV_MMTIMER=m +CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=y +CONFIG_TCG_TIS=y +# CONFIG_TCG_TIS_I2C is not set +# CONFIG_TCG_TIS_I2C_CR50 is not set +CONFIG_TCG_TIS_I2C_ATMEL=m +CONFIG_TCG_TIS_I2C_INFINEON=m +CONFIG_TCG_TIS_I2C_NUVOTON=m +CONFIG_TCG_NSC=m +CONFIG_TCG_ATMEL=m +CONFIG_TCG_INFINEON=m +# CONFIG_TCG_XEN is not set +CONFIG_TCG_CRB=y +# CONFIG_TCG_VTPM_PROXY is not set +CONFIG_TCG_TIS_ST33ZP24=m +CONFIG_TCG_TIS_ST33ZP24_I2C=m +CONFIG_TELCLOCK=m +# CONFIG_XILLYBUS is not set +# CONFIG_XILLYUSB is not set +# end of Character devices + +# +# I2C support +# +CONFIG_I2C=m +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_MUX=m + +# +# Multiplexer I2C Chip support +# +# CONFIG_I2C_MUX_GPIO is not set +# CONFIG_I2C_MUX_LTC4306 is not set +# CONFIG_I2C_MUX_PCA9541 is not set +# CONFIG_I2C_MUX_PCA954x is not set +# CONFIG_I2C_MUX_REG is not set +CONFIG_I2C_MUX_MLXCPLD=m +# end of Multiplexer I2C Chip support + +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_SMBUS=m +CONFIG_I2C_ALGOBIT=m +CONFIG_I2C_ALGOPCA=m + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +CONFIG_I2C_AMD756=m +CONFIG_I2C_AMD756_S4882=m +CONFIG_I2C_AMD8111=m +# CONFIG_I2C_AMD_MP2 is not set +CONFIG_I2C_I801=m +CONFIG_I2C_ISCH=m +CONFIG_I2C_ISMT=m +CONFIG_I2C_PIIX4=m +CONFIG_I2C_NFORCE2=m +CONFIG_I2C_NFORCE2_S4985=m +# CONFIG_I2C_NVIDIA_GPU is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +CONFIG_I2C_SIS96X=m +CONFIG_I2C_VIA=m +CONFIG_I2C_VIAPRO=m + +# +# ACPI drivers +# +CONFIG_I2C_SCMI=m + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CBUS_GPIO is not set +CONFIG_I2C_DESIGNWARE_CORE=m +# CONFIG_I2C_DESIGNWARE_SLAVE is not set +CONFIG_I2C_DESIGNWARE_PLATFORM=m +# CONFIG_I2C_DESIGNWARE_AMDPSP is not set +CONFIG_I2C_DESIGNWARE_BAYTRAIL=y +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set +# CONFIG_I2C_GPIO is not set +# CONFIG_I2C_OCORES is not set +CONFIG_I2C_PCA_PLATFORM=m +CONFIG_I2C_SIMTEC=m +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +CONFIG_I2C_DIOLAN_U2C=m +# CONFIG_I2C_CP2615 is not set +CONFIG_I2C_PARPORT=m +# CONFIG_I2C_PCI1XXXX is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +CONFIG_I2C_TINY_USB=m +CONFIG_I2C_VIPERBOARD=m + +# +# Other I2C/SMBus bus drivers +# +CONFIG_I2C_MLXCPLD=m +# CONFIG_I2C_VIRTIO is not set +# end of I2C Hardware Bus support + +CONFIG_I2C_STUB=m +# CONFIG_I2C_SLAVE is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + +# CONFIG_I3C is not set +# CONFIG_SPI is not set +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_PARPORT=m +CONFIG_PPS_CLIENT_GPIO=m + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +CONFIG_PTP_1588_CLOCK_OPTIONAL=y +CONFIG_DP83640_PHY=m +# CONFIG_PTP_1588_CLOCK_INES is not set +CONFIG_PTP_1588_CLOCK_KVM=m +# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set +# CONFIG_PTP_1588_CLOCK_IDTCM is not set +# CONFIG_PTP_1588_CLOCK_MOCK is not set +# CONFIG_PTP_1588_CLOCK_VMW is not set +# CONFIG_PTP_1588_CLOCK_OCP is not set +# end of PTP clock support + +CONFIG_PINCTRL=y +CONFIG_PINMUX=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_CY8C95X0 is not set +# CONFIG_PINCTRL_MCP23S08 is not set + +# +# Intel pinctrl drivers +# +CONFIG_PINCTRL_BAYTRAIL=y +# CONFIG_PINCTRL_CHERRYVIEW is not set +# CONFIG_PINCTRL_LYNXPOINT is not set +CONFIG_PINCTRL_INTEL=y +# CONFIG_PINCTRL_ALDERLAKE is not set +CONFIG_PINCTRL_BROXTON=m +CONFIG_PINCTRL_CANNONLAKE=m +CONFIG_PINCTRL_CEDARFORK=m +CONFIG_PINCTRL_DENVERTON=m +# CONFIG_PINCTRL_ELKHARTLAKE is not set +# CONFIG_PINCTRL_EMMITSBURG is not set +CONFIG_PINCTRL_GEMINILAKE=m +CONFIG_PINCTRL_ICELAKE=m +# CONFIG_PINCTRL_JASPERLAKE is not set +# CONFIG_PINCTRL_LAKEFIELD is not set +CONFIG_PINCTRL_LEWISBURG=m +# CONFIG_PINCTRL_METEORLAKE is not set +CONFIG_PINCTRL_SUNRISEPOINT=m +# CONFIG_PINCTRL_TIGERLAKE is not set +# end of Intel pinctrl drivers + +# +# Renesas pinctrl drivers +# +# end of Renesas pinctrl drivers + +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_CDEV=y +CONFIG_GPIO_CDEV_V1=y +CONFIG_GPIO_GENERIC=m + +# +# Memory mapped GPIO drivers +# +CONFIG_GPIO_AMDPT=m +# CONFIG_GPIO_DWAPB is not set +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_GENERIC_PLATFORM is not set +CONFIG_GPIO_ICH=m +# CONFIG_GPIO_MB86S7X is not set +# CONFIG_GPIO_AMD_FCH is not set +# end of Memory mapped GPIO drivers + +# +# Port-mapped I/O GPIO drivers +# +# CONFIG_GPIO_VX855 is not set +# CONFIG_GPIO_F7188X is not set +# CONFIG_GPIO_IT87 is not set +# CONFIG_GPIO_SCH is not set +# CONFIG_GPIO_SCH311X is not set +# CONFIG_GPIO_WINBOND is not set +# CONFIG_GPIO_WS16C48 is not set +# end of Port-mapped I/O GPIO drivers + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_FXL6408 is not set +# CONFIG_GPIO_DS4520 is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCA9570 is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set +# end of I2C GPIO expanders + +# +# MFD GPIO expanders +# +# CONFIG_GPIO_ELKHARTLAKE is not set +# end of MFD GPIO expanders + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_AMD8111 is not set +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_ML_IOH is not set +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set +# end of PCI GPIO expanders + +# +# USB GPIO expanders +# +CONFIG_GPIO_VIPERBOARD=m +# end of USB GPIO expanders + +# +# Virtual GPIO drivers +# +# CONFIG_GPIO_AGGREGATOR is not set +# CONFIG_GPIO_LATCH is not set +# CONFIG_GPIO_MOCKUP is not set +# CONFIG_GPIO_VIRTIO is not set +# CONFIG_GPIO_SIM is not set +# end of Virtual GPIO drivers + +# CONFIG_W1 is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_RESTART is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_POWER_SUPPLY_HWMON=y +# CONFIG_GENERIC_ADC_BATTERY is not set +# CONFIG_IP5XXX_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_CW2015 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SAMSUNG_SDI is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_MANAGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_LT3651 is not set +# CONFIG_CHARGER_LTC4162L is not set +# CONFIG_CHARGER_MAX77976 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ2515X is not set +# CONFIG_CHARGER_BQ25890 is not set +# CONFIG_CHARGER_BQ25980 is not set +# CONFIG_CHARGER_BQ256XX is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_BATTERY_GOLDFISH is not set +# CONFIG_BATTERY_RT5033 is not set +# CONFIG_CHARGER_RT9455 is not set +# CONFIG_CHARGER_BD99954 is not set +# CONFIG_BATTERY_UG3105 is not set +CONFIG_HWMON=y +CONFIG_HWMON_VID=m +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +CONFIG_SENSORS_ABITUGURU=m +CONFIG_SENSORS_ABITUGURU3=m +CONFIG_SENSORS_AD7414=m +CONFIG_SENSORS_AD7418=m +CONFIG_SENSORS_ADM1025=m +CONFIG_SENSORS_ADM1026=m +CONFIG_SENSORS_ADM1029=m +CONFIG_SENSORS_ADM1031=m +# CONFIG_SENSORS_ADM1177 is not set +CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADT7X10=m +CONFIG_SENSORS_ADT7410=m +CONFIG_SENSORS_ADT7411=m +CONFIG_SENSORS_ADT7462=m +CONFIG_SENSORS_ADT7470=m +CONFIG_SENSORS_ADT7475=m +# CONFIG_SENSORS_AHT10 is not set +# CONFIG_SENSORS_AQUACOMPUTER_D5NEXT is not set +# CONFIG_SENSORS_AS370 is not set +CONFIG_SENSORS_ASC7621=m +# CONFIG_SENSORS_AXI_FAN_CONTROL is not set +CONFIG_SENSORS_K8TEMP=m +CONFIG_SENSORS_K10TEMP=m +CONFIG_SENSORS_FAM15H_POWER=m +CONFIG_SENSORS_APPLESMC=m +CONFIG_SENSORS_ASB100=m +CONFIG_SENSORS_ATXP1=m +# CONFIG_SENSORS_CORSAIR_CPRO is not set +# CONFIG_SENSORS_CORSAIR_PSU is not set +# CONFIG_SENSORS_DRIVETEMP is not set +CONFIG_SENSORS_DS620=m +CONFIG_SENSORS_DS1621=m +CONFIG_SENSORS_DELL_SMM=m +# CONFIG_I8K is not set +CONFIG_SENSORS_I5K_AMB=m +CONFIG_SENSORS_F71805F=m +CONFIG_SENSORS_F71882FG=m +CONFIG_SENSORS_F75375S=m +CONFIG_SENSORS_FSCHMD=m +# CONFIG_SENSORS_FTSTEUTATES is not set +CONFIG_SENSORS_GL518SM=m +CONFIG_SENSORS_GL520SM=m +CONFIG_SENSORS_G760A=m +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_HS3001 is not set +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +# CONFIG_SENSORS_IIO_HWMON is not set +CONFIG_SENSORS_I5500=m +CONFIG_SENSORS_CORETEMP=m +CONFIG_SENSORS_IT87=m +CONFIG_SENSORS_JC42=m +# CONFIG_SENSORS_POWR1220 is not set +CONFIG_SENSORS_LINEAGE=m +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2947_I2C is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC2992 is not set +CONFIG_SENSORS_LTC4151=m +CONFIG_SENSORS_LTC4215=m +# CONFIG_SENSORS_LTC4222 is not set +CONFIG_SENSORS_LTC4245=m +# CONFIG_SENSORS_LTC4260 is not set +CONFIG_SENSORS_LTC4261=m +# CONFIG_SENSORS_MAX127 is not set +CONFIG_SENSORS_MAX16065=m +CONFIG_SENSORS_MAX1619=m +CONFIG_SENSORS_MAX1668=m +CONFIG_SENSORS_MAX197=m +# CONFIG_SENSORS_MAX31730 is not set +# CONFIG_SENSORS_MAX31760 is not set +# CONFIG_MAX31827 is not set +# CONFIG_SENSORS_MAX6620 is not set +# CONFIG_SENSORS_MAX6621 is not set +CONFIG_SENSORS_MAX6639=m +CONFIG_SENSORS_MAX6650=m +CONFIG_SENSORS_MAX6697=m +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MC34VR500 is not set +CONFIG_SENSORS_MCP3021=m +# CONFIG_SENSORS_MLXREG_FAN is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_TPS23861 is not set +# CONFIG_SENSORS_MR75203 is not set +CONFIG_SENSORS_LM63=m +CONFIG_SENSORS_LM73=m +CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM77=m +CONFIG_SENSORS_LM78=m +CONFIG_SENSORS_LM80=m +CONFIG_SENSORS_LM83=m +CONFIG_SENSORS_LM85=m +CONFIG_SENSORS_LM87=m +CONFIG_SENSORS_LM90=m +CONFIG_SENSORS_LM92=m +CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_LM95234=m +CONFIG_SENSORS_LM95241=m +CONFIG_SENSORS_LM95245=m +CONFIG_SENSORS_PC87360=m +CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_NTC_THERMISTOR=m +# CONFIG_SENSORS_NCT6683 is not set +CONFIG_SENSORS_NCT6775_CORE=m +CONFIG_SENSORS_NCT6775=m +# CONFIG_SENSORS_NCT6775_I2C is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_NZXT_KRAKEN2 is not set +# CONFIG_SENSORS_NZXT_SMART2 is not set +# CONFIG_SENSORS_OCC_P8_I2C is not set +# CONFIG_SENSORS_OXP is not set +CONFIG_SENSORS_PCF8591=m +CONFIG_PMBUS=m +CONFIG_SENSORS_PMBUS=m +# CONFIG_SENSORS_ACBEL_FSG032 is not set +# CONFIG_SENSORS_ADM1266 is not set +CONFIG_SENSORS_ADM1275=m +# CONFIG_SENSORS_BEL_PFE is not set +# CONFIG_SENSORS_BPA_RS600 is not set +# CONFIG_SENSORS_DELTA_AHE50DC_FAN is not set +# CONFIG_SENSORS_FSP_3Y is not set +# CONFIG_SENSORS_IBM_CFFPS is not set +# CONFIG_SENSORS_DPS920AB is not set +# CONFIG_SENSORS_INSPUR_IPSPS is not set +# CONFIG_SENSORS_IR35221 is not set +# CONFIG_SENSORS_IR36021 is not set +# CONFIG_SENSORS_IR38064 is not set +# CONFIG_SENSORS_IRPS5401 is not set +# CONFIG_SENSORS_ISL68137 is not set +CONFIG_SENSORS_LM25066=m +# CONFIG_SENSORS_LT7182S is not set +CONFIG_SENSORS_LTC2978=m +# CONFIG_SENSORS_LTC3815 is not set +# CONFIG_SENSORS_MAX15301 is not set +CONFIG_SENSORS_MAX16064=m +# CONFIG_SENSORS_MAX16601 is not set +# CONFIG_SENSORS_MAX20730 is not set +# CONFIG_SENSORS_MAX20751 is not set +# CONFIG_SENSORS_MAX31785 is not set +CONFIG_SENSORS_MAX34440=m +CONFIG_SENSORS_MAX8688=m +# CONFIG_SENSORS_MP2888 is not set +# CONFIG_SENSORS_MP2975 is not set +# CONFIG_SENSORS_MP5023 is not set +# CONFIG_SENSORS_MPQ7932 is not set +# CONFIG_SENSORS_PIM4328 is not set +# CONFIG_SENSORS_PLI1209BC is not set +# CONFIG_SENSORS_PM6764TR is not set +# CONFIG_SENSORS_PXE1610 is not set +# CONFIG_SENSORS_Q54SJ108A2 is not set +# CONFIG_SENSORS_STPDDC60 is not set +# CONFIG_SENSORS_TDA38640 is not set +# CONFIG_SENSORS_TPS40422 is not set +# CONFIG_SENSORS_TPS53679 is not set +# CONFIG_SENSORS_TPS546D24 is not set +CONFIG_SENSORS_UCD9000=m +CONFIG_SENSORS_UCD9200=m +# CONFIG_SENSORS_XDPE152 is not set +# CONFIG_SENSORS_XDPE122 is not set +CONFIG_SENSORS_ZL6100=m +# CONFIG_SENSORS_SBTSI is not set +# CONFIG_SENSORS_SBRMI is not set +CONFIG_SENSORS_SHT15=m +CONFIG_SENSORS_SHT21=m +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHT4x is not set +# CONFIG_SENSORS_SHTC1 is not set +CONFIG_SENSORS_SIS5595=m +CONFIG_SENSORS_DME1737=m +CONFIG_SENSORS_EMC1403=m +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC2305 is not set +CONFIG_SENSORS_EMC6W201=m +CONFIG_SENSORS_SMSC47M1=m +CONFIG_SENSORS_SMSC47M192=m +CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SCH56XX_COMMON=m +CONFIG_SENSORS_SCH5627=m +CONFIG_SENSORS_SCH5636=m +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_ADC128D818 is not set +CONFIG_SENSORS_ADS7828=m +CONFIG_SENSORS_AMC6821=m +CONFIG_SENSORS_INA209=m +CONFIG_SENSORS_INA2XX=m +# CONFIG_SENSORS_INA238 is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set +CONFIG_SENSORS_THMC50=m +CONFIG_SENSORS_TMP102=m +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set +CONFIG_SENSORS_TMP401=m +CONFIG_SENSORS_TMP421=m +# CONFIG_SENSORS_TMP464 is not set +# CONFIG_SENSORS_TMP513 is not set +CONFIG_SENSORS_VIA_CPUTEMP=m +CONFIG_SENSORS_VIA686A=m +CONFIG_SENSORS_VT1211=m +CONFIG_SENSORS_VT8231=m +# CONFIG_SENSORS_W83773G is not set +CONFIG_SENSORS_W83781D=m +CONFIG_SENSORS_W83791D=m +CONFIG_SENSORS_W83792D=m +CONFIG_SENSORS_W83793=m +CONFIG_SENSORS_W83795=m +# CONFIG_SENSORS_W83795_FANCTRL is not set +CONFIG_SENSORS_W83L785TS=m +CONFIG_SENSORS_W83L786NG=m +CONFIG_SENSORS_W83627HF=m +CONFIG_SENSORS_W83627EHF=m +# CONFIG_SENSORS_XGENE is not set + +# +# ACPI drivers +# +CONFIG_SENSORS_ACPI_POWER=m +CONFIG_SENSORS_ATK0110=m +# CONFIG_SENSORS_ASUS_WMI is not set +# CONFIG_SENSORS_ASUS_EC is not set +# CONFIG_SENSORS_HP_WMI is not set +CONFIG_THERMAL=y +# CONFIG_THERMAL_NETLINK is not set +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_ACPI=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +# CONFIG_THERMAL_DEFAULT_GOV_BANG_BANG is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +CONFIG_THERMAL_GOV_BANG_BANG=y +CONFIG_THERMAL_GOV_USER_SPACE=y +# CONFIG_THERMAL_EMULATION is not set + +# +# Intel thermal drivers +# +CONFIG_INTEL_POWERCLAMP=m +CONFIG_X86_THERMAL_VECTOR=y +CONFIG_INTEL_TCC=y +CONFIG_X86_PKG_TEMP_THERMAL=m +CONFIG_INTEL_SOC_DTS_IOSF_CORE=m +# CONFIG_INTEL_SOC_DTS_THERMAL is not set + +# +# ACPI INT340X thermal drivers +# +CONFIG_INT340X_THERMAL=m +CONFIG_ACPI_THERMAL_REL=m +# CONFIG_INT3406_THERMAL is not set +CONFIG_PROC_THERMAL_MMIO_RAPL=m +# end of ACPI INT340X thermal drivers + +CONFIG_INTEL_PCH_THERMAL=m +# CONFIG_INTEL_TCC_COOLING is not set +# CONFIG_INTEL_HFI_THERMAL is not set +# end of Intel thermal drivers + +# CONFIG_GENERIC_ADC_THERMAL is not set +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +CONFIG_WATCHDOG_OPEN_TIMEOUT=0 +CONFIG_WATCHDOG_SYSFS=y +# CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT is not set + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set + +# +# Watchdog Device Drivers +# +CONFIG_SOFT_WATCHDOG=m +CONFIG_WDAT_WDT=m +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_ZIIRAVE_WATCHDOG is not set +# CONFIG_MLX_WDT is not set +# CONFIG_CADENCE_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set +# CONFIG_ACQUIRE_WDT is not set +# CONFIG_ADVANTECH_WDT is not set +# CONFIG_ADVANTECH_EC_WDT is not set +CONFIG_ALIM1535_WDT=m +CONFIG_ALIM7101_WDT=m +# CONFIG_EBC_C384_WDT is not set +# CONFIG_EXAR_WDT is not set +CONFIG_F71808E_WDT=m +CONFIG_SP5100_TCO=m +CONFIG_SBC_FITPC2_WATCHDOG=m +# CONFIG_EUROTECH_WDT is not set +CONFIG_IB700_WDT=m +CONFIG_IBMASR=m +# CONFIG_WAFER_WDT is not set +CONFIG_I6300ESB_WDT=m +CONFIG_IE6XX_WDT=m +CONFIG_ITCO_WDT=m +CONFIG_ITCO_VENDOR_SUPPORT=y +CONFIG_IT8712F_WDT=m +CONFIG_IT87_WDT=m +CONFIG_HP_WATCHDOG=m +CONFIG_HPWDT_NMI_DECODING=y +# CONFIG_SC1200_WDT is not set +# CONFIG_PC87413_WDT is not set +CONFIG_NV_TCO=m +# CONFIG_60XX_WDT is not set +# CONFIG_CPU5_WDT is not set +CONFIG_SMSC_SCH311X_WDT=m +# CONFIG_SMSC37B787_WDT is not set +# CONFIG_TQMX86_WDT is not set +CONFIG_VIA_WDT=m +CONFIG_W83627HF_WDT=m +CONFIG_W83877F_WDT=m +CONFIG_W83977F_WDT=m +CONFIG_MACHZ_WDT=m +# CONFIG_SBC_EPX_C3_WATCHDOG is not set +CONFIG_INTEL_MEI_WDT=m +# CONFIG_NI903X_WDT is not set +# CONFIG_NIC7018_WDT is not set +# CONFIG_MEN_A21_WDT is not set +CONFIG_XEN_WDT=m + +# +# PCI-based Watchdog Cards +# +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m + +# +# USB-based Watchdog Cards +# +CONFIG_USBPCWATCHDOG=m +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +CONFIG_BCMA=m +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +CONFIG_BCMA_DRIVER_PCI=y +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +# CONFIG_BCMA_DEBUG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=y +# CONFIG_MFD_SMPRO is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_CS42L43_I2C is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_MP2629 is not set +# CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set +CONFIG_LPC_ICH=m +CONFIG_LPC_SCH=m +CONFIG_MFD_INTEL_LPSS=m +CONFIG_MFD_INTEL_LPSS_ACPI=m +CONFIG_MFD_INTEL_LPSS_PCI=m +# CONFIG_MFD_INTEL_PMC_BXT is not set +# CONFIG_MFD_IQS62X is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MT6360 is not set +# CONFIG_MFD_MT6370 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +CONFIG_MFD_VIPERBOARD=m +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_SY7636A is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT4831 is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RT5120 is not set +# CONFIG_MFD_SI476X_CORE is not set +CONFIG_MFD_SM501=m +CONFIG_MFD_SM501_GPIO=y +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_SYSCON is not set +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS6594_I2C is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TQMX86 is not set +CONFIG_MFD_VX855=m +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_ATC260X_I2C is not set +# end of Multifunction device drivers + +# CONFIG_REGULATOR is not set +CONFIG_RC_CORE=m +# CONFIG_LIRC is not set +CONFIG_RC_MAP=m +CONFIG_RC_DECODERS=y +CONFIG_IR_IMON_DECODER=m +CONFIG_IR_JVC_DECODER=m +CONFIG_IR_MCE_KBD_DECODER=m +CONFIG_IR_NEC_DECODER=m +CONFIG_IR_RC5_DECODER=m +CONFIG_IR_RC6_DECODER=m +# CONFIG_IR_RCMM_DECODER is not set +CONFIG_IR_SANYO_DECODER=m +# CONFIG_IR_SHARP_DECODER is not set +CONFIG_IR_SONY_DECODER=m +# CONFIG_IR_XMP_DECODER is not set +CONFIG_RC_DEVICES=y +CONFIG_IR_ENE=m +CONFIG_IR_FINTEK=m +# CONFIG_IR_IGORPLUGUSB is not set +CONFIG_IR_IGUANA=m +CONFIG_IR_IMON=m +CONFIG_IR_IMON_RAW=m +CONFIG_IR_ITE_CIR=m +CONFIG_IR_MCEUSB=m +CONFIG_IR_NUVOTON=m +CONFIG_IR_REDRAT3=m +CONFIG_IR_SERIAL=m +# CONFIG_IR_SERIAL_TRANSMITTER is not set +CONFIG_IR_STREAMZAP=m +# CONFIG_IR_TOY is not set +CONFIG_IR_TTUSBIR=m +CONFIG_IR_WINBOND_CIR=m +CONFIG_RC_ATI_REMOTE=m +# CONFIG_RC_LOOPBACK is not set +# CONFIG_RC_XBOX_DVD is not set +CONFIG_CEC_CORE=m + +# +# CEC support +# +# CONFIG_MEDIA_CEC_RC is not set +CONFIG_MEDIA_CEC_SUPPORT=y +# CONFIG_CEC_CH7322 is not set +# CONFIG_CEC_GPIO is not set +# CONFIG_CEC_SECO is not set +CONFIG_USB_PULSE8_CEC=m +CONFIG_USB_RAINSHADOW_CEC=m +# end of CEC support + +CONFIG_MEDIA_SUPPORT=m +CONFIG_MEDIA_SUPPORT_FILTER=y +CONFIG_MEDIA_SUBDRV_AUTOSELECT=y + +# +# Media device types +# +# CONFIG_MEDIA_CAMERA_SUPPORT is not set +# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set +# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set +# CONFIG_MEDIA_RADIO_SUPPORT is not set +# CONFIG_MEDIA_SDR_SUPPORT is not set +# CONFIG_MEDIA_PLATFORM_SUPPORT is not set +# CONFIG_MEDIA_TEST_SUPPORT is not set +# end of Media device types + +# +# Media drivers +# + +# +# Drivers filtered as selected at 'Filter media drivers' +# + +# +# Media drivers +# +CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_MEDIA_PCI_SUPPORT=y +# CONFIG_IPU_BRIDGE is not set +# end of Media drivers + +CONFIG_MEDIA_HIDE_ANCILLARY_SUBDRV=y + +# +# Media ancillary drivers +# +# end of Media ancillary drivers + +# +# Graphics support +# +CONFIG_APERTURE_HELPERS=y +CONFIG_VIDEO_CMDLINE=y +CONFIG_VIDEO_NOMODESET=y +# CONFIG_AUXDISPLAY is not set +# CONFIG_PANEL is not set +# CONFIG_AGP is not set +CONFIG_INTEL_GTT=m +CONFIG_VGA_SWITCHEROO=y +CONFIG_DRM=m +CONFIG_DRM_MIPI_DSI=y +CONFIG_DRM_KMS_HELPER=m +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DISPLAY_HELPER=m +CONFIG_DRM_DISPLAY_DP_HELPER=y +CONFIG_DRM_DISPLAY_HDCP_HELPER=y +CONFIG_DRM_DISPLAY_HDMI_HELPER=y +# CONFIG_DRM_DP_AUX_CHARDEV is not set +# CONFIG_DRM_DP_CEC is not set +CONFIG_DRM_TTM=m +CONFIG_DRM_EXEC=m +CONFIG_DRM_BUDDY=m +CONFIG_DRM_VRAM_HELPER=m +CONFIG_DRM_TTM_HELPER=m +CONFIG_DRM_GEM_SHMEM_HELPER=m +CONFIG_DRM_SUBALLOC_HELPER=m +CONFIG_DRM_SCHED=m + +# +# I2C encoder or helper chips +# +CONFIG_DRM_I2C_CH7006=m +CONFIG_DRM_I2C_SIL164=m +# CONFIG_DRM_I2C_NXP_TDA998X is not set +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# end of I2C encoder or helper chips + +# +# ARM devices +# +# end of ARM devices + +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_USERPTR=y +CONFIG_DRM_AMDGPU=m +# CONFIG_DRM_AMDGPU_SI is not set +# CONFIG_DRM_AMDGPU_CIK is not set +# CONFIG_DRM_AMDGPU_USERPTR is not set + +# +# ACP (Audio CoProcessor) Configuration +# +# CONFIG_DRM_AMD_ACP is not set +# end of ACP (Audio CoProcessor) Configuration + +# +# Display Engine Configuration +# +CONFIG_DRM_AMD_DC=y +CONFIG_DRM_AMD_DC_FP=y +# CONFIG_DEBUG_KERNEL_DC is not set +# CONFIG_DRM_AMD_SECURE_DISPLAY is not set +# end of Display Engine Configuration + +# CONFIG_HSA_AMD is not set +CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +# CONFIG_NOUVEAU_DEBUG_MMU is not set +# CONFIG_NOUVEAU_DEBUG_PUSH is not set +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +CONFIG_DRM_I915=m +CONFIG_DRM_I915_FORCE_PROBE="" +CONFIG_DRM_I915_CAPTURE_ERROR=y +CONFIG_DRM_I915_COMPRESS_ERROR=y +CONFIG_DRM_I915_USERPTR=y +CONFIG_DRM_I915_GVT_KVMGT=m +CONFIG_DRM_I915_REQUEST_TIMEOUT=20000 +CONFIG_DRM_I915_FENCE_TIMEOUT=10000 +CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND=250 +CONFIG_DRM_I915_HEARTBEAT_INTERVAL=2500 +CONFIG_DRM_I915_PREEMPT_TIMEOUT=640 +CONFIG_DRM_I915_PREEMPT_TIMEOUT_COMPUTE=7500 +CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT=8000 +CONFIG_DRM_I915_STOP_TIMEOUT=100 +CONFIG_DRM_I915_TIMESLICE_DURATION=1 +CONFIG_DRM_I915_GVT=y +# CONFIG_DRM_VGEM is not set +CONFIG_DRM_VKMS=m +CONFIG_DRM_VMWGFX=m +# CONFIG_DRM_VMWGFX_MKSSTATS is not set +CONFIG_DRM_GMA500=m +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=m +CONFIG_DRM_MGAG200=m +CONFIG_DRM_QXL=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_VIRTIO_GPU_KMS=y +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set +# end of Display Panels + +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# end of Display Interface Bridges + +# CONFIG_DRM_LOONGSON is not set +# CONFIG_DRM_ETNAVIV is not set +CONFIG_DRM_BOCHS=m +CONFIG_DRM_CIRRUS_QEMU=m +# CONFIG_DRM_GM12U320 is not set +# CONFIG_DRM_SIMPLEDRM is not set +# CONFIG_DRM_XEN_FRONTEND is not set +# CONFIG_DRM_VBOXVIDEO is not set +# CONFIG_DRM_GUD is not set +# CONFIG_DRM_SSD130X is not set +# CONFIG_DRM_HYPERV is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y +CONFIG_DRM_PRIVACY_SCREEN=y + +# +# Frame buffer Devices +# +CONFIG_FB=y +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ARC is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_VGA16 is not set +# CONFIG_FB_UVESA is not set +CONFIG_FB_VESA=y +CONFIG_FB_EFI=y +# CONFIG_FB_N411 is not set +# CONFIG_FB_HGA is not set +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_LE80578 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_VIA is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SM501 is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_XEN_FBDEV_FRONTEND is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +CONFIG_FB_HYPERV=m +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SSD1307 is not set +# CONFIG_FB_SM712 is not set +CONFIG_FB_CORE=y +CONFIG_FB_NOTIFY=y +CONFIG_FIRMWARE_EDID=y +CONFIG_FB_DEVICE=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_IOMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y +# CONFIG_FB_MODE_HELPERS is not set +CONFIG_FB_TILEBLITTING=y +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# +CONFIG_LCD_CLASS_DEVICE=m +CONFIG_LCD_PLATFORM=m +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_KTD253 is not set +# CONFIG_BACKLIGHT_KTZ8866 is not set +# CONFIG_BACKLIGHT_PWM is not set +CONFIG_BACKLIGHT_APPLE=m +# CONFIG_BACKLIGHT_QCOM_WLED is not set +# CONFIG_BACKLIGHT_SAHARA is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3630A is not set +# CONFIG_BACKLIGHT_LM3639 is not set +CONFIG_BACKLIGHT_LP855X=m +# CONFIG_BACKLIGHT_GPIO is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# end of Backlight & LCD device support + +CONFIG_HDMI=y + +# +# Console display driver support +# +CONFIG_VGA_CONSOLE=y +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION is not set +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +# end of Console display driver support + +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +# end of Graphics support + +# CONFIG_DRM_ACCEL is not set +CONFIG_SOUND=m +# CONFIG_SND is not set +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=m +# CONFIG_HID_ACCUTOUCH is not set +CONFIG_HID_ACRUX=m +# CONFIG_HID_ACRUX_FF is not set +CONFIG_HID_APPLE=m +CONFIG_HID_APPLEIR=m +CONFIG_HID_ASUS=m +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=m +CONFIG_HID_BETOP_FF=m +# CONFIG_HID_BIGBEN_FF is not set +CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CORSAIR=m +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_MACALLY is not set +CONFIG_HID_CMEDIA=m +# CONFIG_HID_CP2112 is not set +# CONFIG_HID_CREATIVE_SB0540 is not set +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +# CONFIG_DRAGONRISE_FF is not set +# CONFIG_HID_EMS_FF is not set +CONFIG_HID_ELAN=m +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +# CONFIG_HID_EVISION is not set +CONFIG_HID_EZKEY=m +# CONFIG_HID_FT260 is not set +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +# CONFIG_HID_GLORIOUS is not set +CONFIG_HID_HOLTEK=m +# CONFIG_HOLTEK_FF is not set +# CONFIG_HID_GOOGLE_STADIA_FF is not set +# CONFIG_HID_VIVALDI is not set +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +# CONFIG_HID_VIEWSONIC is not set +# CONFIG_HID_VRC2 is not set +# CONFIG_HID_XIAOMI is not set +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=m +CONFIG_HID_JABRA=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LCPOWER=m +CONFIG_HID_LED=m +CONFIG_HID_LENOVO=m +# CONFIG_HID_LETSKETCH is not set +CONFIG_HID_LOGITECH=m +CONFIG_HID_LOGITECH_DJ=m +CONFIG_HID_LOGITECH_HIDPP=m +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +# CONFIG_LOGIWHEELS_FF is not set +CONFIG_HID_MAGICMOUSE=y +# CONFIG_HID_MALTRON is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_MEGAWORLD_FF is not set +# CONFIG_HID_REDRAGON is not set +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m +CONFIG_HID_MULTITOUCH=m +# CONFIG_HID_NINTENDO is not set +CONFIG_HID_NTI=m +CONFIG_HID_NTRIG=y +# CONFIG_HID_NVIDIA_SHIELD is not set +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +# CONFIG_PANTHERLORD_FF is not set +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PICOLCD_FB=y +CONFIG_HID_PICOLCD_BACKLIGHT=y +CONFIG_HID_PICOLCD_LCD=y +CONFIG_HID_PICOLCD_LEDS=y +CONFIG_HID_PICOLCD_CIR=y +CONFIG_HID_PLANTRONICS=m +# CONFIG_HID_PXRC is not set +# CONFIG_HID_RAZER is not set +CONFIG_HID_PRIMAX=m +# CONFIG_HID_RETRODE is not set +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +# CONFIG_HID_SEMITEK is not set +# CONFIG_HID_SIGMAMICRO is not set +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +# CONFIG_HID_STEAM is not set +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +# CONFIG_GREENASIA_FF is not set +CONFIG_HID_HYPERV_MOUSE=m +CONFIG_HID_SMARTJOYPLUS=m +# CONFIG_SMARTJOYPLUS_FF is not set +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +# CONFIG_HID_TOPRE is not set +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +# CONFIG_THRUSTMASTER_FF is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_U2FZERO is not set +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +# CONFIG_ZEROPLUS_FF is not set +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=y +CONFIG_HID_SENSOR_CUSTOM_SENSOR=m +CONFIG_HID_ALPS=m +# CONFIG_HID_MCP2221 is not set +# end of Special HID drivers + +# +# HID-BPF support +# +# CONFIG_HID_BPF is not set +# end of HID-BPF support + +# +# USB HID support +# +CONFIG_USB_HID=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +# end of USB HID support + +CONFIG_I2C_HID=m +# CONFIG_I2C_HID_ACPI is not set +# CONFIG_I2C_HID_OF is not set + +# +# Intel ISH HID support +# +CONFIG_INTEL_ISH_HID=m +# CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER is not set +# end of Intel ISH HID support + +# +# AMD SFH HID Support +# +# CONFIG_AMD_SFH_HID is not set +# end of AMD SFH HID Support + +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_LED_TRIG=y +# CONFIG_USB_ULPI_BUS is not set +# CONFIG_USB_CONN_GPIO is not set +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_FEW_INIT_RETRIES is not set +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_PRODUCTLIST is not set +CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_AUTOSUSPEND_DELAY=2 +CONFIG_USB_MON=y + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_DBGCAP=y +CONFIG_USB_XHCI_PCI=y +# CONFIG_USB_XHCI_PCI_RENESAS is not set +# CONFIG_USB_XHCI_PLATFORM is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +# CONFIG_USB_EHCI_FSL is not set +# CONFIG_USB_EHCI_HCD_PLATFORM is not set +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +# CONFIG_USB_UHCI_HCD is not set +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_BCMA is not set +# CONFIG_USB_HCD_TEST_MODE is not set +# CONFIG_USB_XEN_HCD is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m +CONFIG_USB_TMC=m + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m + +# +# USB Imaging devices +# +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +# CONFIG_USBIP_CORE is not set + +# +# USB dual-mode controller drivers +# +# CONFIG_USB_CDNS_SUPPORT is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +CONFIG_USB_SERIAL=y +CONFIG_USB_SERIAL_CONSOLE=y +CONFIG_USB_SERIAL_GENERIC=y +# CONFIG_USB_SERIAL_SIMPLE is not set +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +# CONFIG_USB_SERIAL_F81232 is not set +CONFIG_USB_SERIAL_F8153X=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +# CONFIG_USB_SERIAL_METRO is not set +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7715_PARPORT=y +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MXUPORT=m +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_WWAN=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +# CONFIG_USB_SERIAL_WISHBONE is not set +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +CONFIG_USB_SERIAL_UPD78F0730=m +# CONFIG_USB_SERIAL_XR is not set +CONFIG_USB_SERIAL_DEBUG=m + +# +# USB Miscellaneous drivers +# +CONFIG_USB_USS720=m +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +CONFIG_USB_IDMOUSE=m +CONFIG_USB_APPLEDISPLAY=m +# CONFIG_APPLE_MFI_FASTCHARGE is not set +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_LD=m +# CONFIG_USB_TRANCEVIBRATOR is not set +CONFIG_USB_IOWARRIOR=m +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +CONFIG_USB_ISIGHTFW=m +# CONFIG_USB_YUREX is not set +CONFIG_USB_EZUSB_FX2=m +# CONFIG_USB_HUB_USB251XB is not set +CONFIG_USB_HSIC_USB3503=m +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +# CONFIG_USB_CHAOSKEY is not set +CONFIG_USB_ATM=m +CONFIG_USB_SPEEDTOUCH=m +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +# end of USB Physical Layer drivers + +# CONFIG_USB_GADGET is not set +CONFIG_TYPEC=y +CONFIG_TYPEC_TCPM=y +# CONFIG_TYPEC_TCPCI is not set +CONFIG_TYPEC_FUSB302=m +CONFIG_TYPEC_UCSI=y +# CONFIG_UCSI_CCG is not set +CONFIG_UCSI_ACPI=y +# CONFIG_UCSI_STM32G0 is not set +CONFIG_TYPEC_TPS6598X=m +# CONFIG_TYPEC_ANX7411 is not set +# CONFIG_TYPEC_RT1719 is not set +# CONFIG_TYPEC_HD3SS3220 is not set +# CONFIG_TYPEC_STUSB160X is not set +# CONFIG_TYPEC_WUSB3801 is not set + +# +# USB Type-C Multiplexer/DeMultiplexer Switch support +# +# CONFIG_TYPEC_MUX_FSA4480 is not set +# CONFIG_TYPEC_MUX_GPIO_SBU is not set +CONFIG_TYPEC_MUX_PI3USB30532=m +# CONFIG_TYPEC_MUX_NB7VPQ904M is not set +# end of USB Type-C Multiplexer/DeMultiplexer Switch support + +# +# USB Type-C Alternate Mode drivers +# +CONFIG_TYPEC_DP_ALTMODE=m +# CONFIG_TYPEC_NVIDIA_ALTMODE is not set +# end of USB Type-C Alternate Mode drivers + +CONFIG_USB_ROLE_SWITCH=y +CONFIG_USB_ROLES_INTEL_XHCI=y +CONFIG_MMC=m +CONFIG_MMC_BLOCK=m +CONFIG_MMC_BLOCK_MINORS=8 +CONFIG_SDIO_UART=m +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_IO_ACCESSORS=y +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_RICOH_MMC=y +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +# CONFIG_MMC_SDHCI_F_SDH30 is not set +# CONFIG_MMC_WBSD is not set +CONFIG_MMC_TIFM_SD=m +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +# CONFIG_MMC_USDHI6ROL0 is not set +CONFIG_MMC_REALTEK_PCI=m +CONFIG_MMC_REALTEK_USB=m +CONFIG_MMC_CQHCI=m +# CONFIG_MMC_HSQ is not set +# CONFIG_MMC_TOSHIBA_PCI is not set +# CONFIG_MMC_MTK is not set +# CONFIG_MMC_SDHCI_XENON is not set +# CONFIG_SCSI_UFSHCD is not set +CONFIG_MEMSTICK=m +# CONFIG_MEMSTICK_DEBUG is not set + +# +# MemoryStick drivers +# +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set +CONFIG_MSPRO_BLOCK=m +# CONFIG_MS_BLOCK is not set + +# +# MemoryStick Host Controller Drivers +# +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_MEMSTICK_REALTEK_PCI=m +CONFIG_MEMSTICK_REALTEK_USB=m +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +# CONFIG_LEDS_CLASS_FLASH is not set +# CONFIG_LEDS_CLASS_MULTICOLOR is not set +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_APU is not set +# CONFIG_LEDS_AW200XX is not set +CONFIG_LEDS_LM3530=m +# CONFIG_LEDS_LM3532 is not set +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set +CONFIG_LEDS_LP3944=m +# CONFIG_LEDS_LP3952 is not set +# CONFIG_LEDS_LP50XX is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_PCA995X is not set +# CONFIG_LEDS_PWM is not set +# CONFIG_LEDS_BD2606MVV is not set +# CONFIG_LEDS_BD2802 is not set +CONFIG_LEDS_INTEL_SS4200=m +# CONFIG_LEDS_LT3593 is not set +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set +# CONFIG_LEDS_IS31FL319X is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +CONFIG_LEDS_BLINKM=m +CONFIG_LEDS_MLXCPLD=m +# CONFIG_LEDS_MLXREG is not set +# CONFIG_LEDS_USER is not set +# CONFIG_LEDS_NIC78BX is not set + +# +# Flash and Torch LED drivers +# + +# +# RGB LED drivers +# + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +CONFIG_LEDS_TRIGGER_DISK=y +# CONFIG_LEDS_TRIGGER_MTD is not set +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +# CONFIG_LEDS_TRIGGER_CPU is not set +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +# CONFIG_LEDS_TRIGGER_PANIC is not set +# CONFIG_LEDS_TRIGGER_NETDEV is not set +# CONFIG_LEDS_TRIGGER_PATTERN is not set +CONFIG_LEDS_TRIGGER_AUDIO=m +# CONFIG_LEDS_TRIGGER_TTY is not set + +# +# Simple LED drivers +# +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +CONFIG_INFINIBAND_VIRT_DMA=y +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_INFINIBAND_CXGB4=m +# CONFIG_INFINIBAND_EFA is not set +CONFIG_INFINIBAND_ERDMA=m +CONFIG_INFINIBAND_HFI1=m +# CONFIG_HFI1_DEBUG_SDMA_ORDER is not set +# CONFIG_SDMA_VERBOSITY is not set +# CONFIG_INFINIBAND_IRDMA is not set +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +# CONFIG_INFINIBAND_MTHCA is not set +# CONFIG_INFINIBAND_OCRDMA is not set +CONFIG_INFINIBAND_QEDR=m +# CONFIG_INFINIBAND_QIB is not set +CONFIG_INFINIBAND_USNIC=m +CONFIG_INFINIBAND_VMWARE_PVRDMA=m +CONFIG_INFINIBAND_RDMAVT=m +CONFIG_RDMA_RXE=m +CONFIG_RDMA_SIW=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +# CONFIG_INFINIBAND_RTRS_CLIENT is not set +# CONFIG_INFINIBAND_RTRS_SERVER is not set +CONFIG_INFINIBAND_OPA_VNIC=m +CONFIG_EDAC_ATOMIC_SCRUB=y +CONFIG_EDAC_SUPPORT=y +CONFIG_EDAC=y +CONFIG_EDAC_LEGACY_SYSFS=y +# CONFIG_EDAC_DEBUG is not set +CONFIG_EDAC_DECODE_MCE=m +CONFIG_EDAC_GHES=y +CONFIG_EDAC_AMD64=m +CONFIG_EDAC_E752X=m +CONFIG_EDAC_I82975X=m +CONFIG_EDAC_I3000=m +CONFIG_EDAC_I3200=m +CONFIG_EDAC_IE31200=m +CONFIG_EDAC_X38=m +CONFIG_EDAC_I5400=m +CONFIG_EDAC_I7CORE=m +CONFIG_EDAC_I5100=m +CONFIG_EDAC_I7300=m +CONFIG_EDAC_SBRIDGE=m +CONFIG_EDAC_SKX=m +CONFIG_EDAC_I10NM=m +CONFIG_EDAC_PND2=m +# CONFIG_EDAC_IGEN6 is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_MC146818_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +# CONFIG_RTC_SYSTOHC is not set +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABEOZ9 is not set +# CONFIG_RTC_DRV_ABX80X is not set +CONFIG_RTC_DRV_DS1307=m +# CONFIG_RTC_DRV_DS1307_CENTURY is not set +CONFIG_RTC_DRV_DS1374=m +# CONFIG_RTC_DRV_DS1374_WDT is not set +CONFIG_RTC_DRV_DS1672=m +CONFIG_RTC_DRV_MAX6900=m +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF85363 is not set +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +# CONFIG_RTC_DRV_S35390A is not set +CONFIG_RTC_DRV_FM3130=m +# CONFIG_RTC_DRV_RX8010 is not set +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +# CONFIG_RTC_DRV_RV3028 is not set +# CONFIG_RTC_DRV_RV3032 is not set +# CONFIG_RTC_DRV_RV8803 is not set +# CONFIG_RTC_DRV_SD3078 is not set + +# +# SPI RTC drivers +# +CONFIG_RTC_I2C_AND_SPI=m + +# +# SPI and I2C RTC drivers +# +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_DS3232_HWMON=y +# CONFIG_RTC_DRV_PCF2127 is not set +CONFIG_RTC_DRV_RV3029C2=m +# CONFIG_RTC_DRV_RV3029_HWMON is not set +# CONFIG_RTC_DRV_RX6110 is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_CMOS=y +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_STK17TA8=m +# CONFIG_RTC_DRV_M48T86 is not set +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_RP5C01=m + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_FTRTC010 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set +# CONFIG_RTC_DRV_GOLDFISH is not set +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_VIRTUAL_CHANNELS=y +CONFIG_DMA_ACPI=y +# CONFIG_ALTERA_MSGDMA is not set +CONFIG_INTEL_IDMA64=m +CONFIG_INTEL_IDXD_BUS=m +CONFIG_INTEL_IDXD=m +# CONFIG_INTEL_IDXD_COMPAT is not set +CONFIG_INTEL_IDXD_SVM=y +# CONFIG_INTEL_IDXD_PERFMON is not set +CONFIG_INTEL_IOATDMA=m +# CONFIG_PLX_DMA is not set +# CONFIG_XILINX_DMA is not set +# CONFIG_XILINX_XDMA is not set +CONFIG_AMD_PTDMA=m +# CONFIG_QCOM_HIDMA_MGMT is not set +# CONFIG_QCOM_HIDMA is not set +CONFIG_DW_DMAC_CORE=y +CONFIG_DW_DMAC=m +CONFIG_DW_DMAC_PCI=y +# CONFIG_DW_EDMA is not set +CONFIG_HSU_DMA=y +# CONFIG_SF_PDMA is not set +# CONFIG_INTEL_LDMA is not set + +# +# DMA Clients +# +CONFIG_ASYNC_TX_DMA=y +CONFIG_DMATEST=m +CONFIG_DMA_ENGINE_RAID=y + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +# CONFIG_UDMABUF is not set +# CONFIG_DMABUF_MOVE_NOTIFY is not set +# CONFIG_DMABUF_DEBUG is not set +# CONFIG_DMABUF_SELFTESTS is not set +# CONFIG_DMABUF_HEAPS is not set +# CONFIG_DMABUF_SYSFS_STATS is not set +# end of DMABUF options + +CONFIG_DCA=m +CONFIG_UIO=m +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=m +# CONFIG_UIO_DMEM_GENIRQ is not set +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +CONFIG_UIO_HV_GENERIC=m +CONFIG_VFIO=m +CONFIG_VFIO_GROUP=y +CONFIG_VFIO_CONTAINER=y +CONFIG_VFIO_IOMMU_TYPE1=m +CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_VIRQFD=y + +# +# VFIO support for PCI devices +# +CONFIG_VFIO_PCI_CORE=m +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y +CONFIG_VFIO_PCI=m +# CONFIG_VFIO_PCI_VGA is not set +# CONFIG_VFIO_PCI_IGD is not set +# CONFIG_MLX5_VFIO_PCI is not set +# end of VFIO support for PCI devices + +CONFIG_VFIO_MDEV=m +CONFIG_IRQ_BYPASS_MANAGER=m +CONFIG_VIRT_DRIVERS=y +CONFIG_VMGENID=y +# CONFIG_VBOXGUEST is not set +# CONFIG_NITRO_ENCLAVES is not set +CONFIG_EFI_SECRET=m +CONFIG_SEV_GUEST=m +CONFIG_TDX_GUEST_DRIVER=m +CONFIG_VIRTIO_ANCHOR=y +CONFIG_VIRTIO=y +CONFIG_VIRTIO_PCI_LIB=y +CONFIG_VIRTIO_PCI_LIB_LEGACY=y +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_PMEM=m +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_MEM=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=y +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y +CONFIG_VIRTIO_DMA_SHARED_BUFFER=m +# CONFIG_VDPA is not set +CONFIG_VHOST_IOTLB=m +CONFIG_VHOST_TASK=y +CONFIG_VHOST=m +CONFIG_VHOST_MENU=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# Microsoft Hyper-V guest support +# +CONFIG_HYPERV=m +# CONFIG_HYPERV_VTL_MODE is not set +CONFIG_HYPERV_TIMER=y +CONFIG_HYPERV_UTILS=m +CONFIG_HYPERV_BALLOON=m +# end of Microsoft Hyper-V guest support + +# +# Xen driver support +# +# CONFIG_XEN_BALLOON is not set +CONFIG_XEN_DEV_EVTCHN=m +# CONFIG_XEN_BACKEND is not set +CONFIG_XENFS=m +CONFIG_XEN_COMPAT_XENFS=y +CONFIG_XEN_SYS_HYPERVISOR=y +CONFIG_XEN_XENBUS_FRONTEND=y +# CONFIG_XEN_GNTDEV is not set +# CONFIG_XEN_GRANT_DEV_ALLOC is not set +# CONFIG_XEN_GRANT_DMA_ALLOC is not set +# CONFIG_XEN_PVCALLS_FRONTEND is not set +CONFIG_XEN_PRIVCMD=m +CONFIG_XEN_EFI=y +CONFIG_XEN_AUTO_XLATE=y +CONFIG_XEN_ACPI=y +# CONFIG_XEN_UNPOPULATED_ALLOC is not set +# CONFIG_XEN_VIRTIO is not set +# end of Xen driver support + +# CONFIG_GREYBUS is not set +# CONFIG_COMEDI is not set +# CONFIG_STAGING is not set +# CONFIG_CHROME_PLATFORMS is not set +CONFIG_MELLANOX_PLATFORM=y +CONFIG_MLXREG_HOTPLUG=m +# CONFIG_MLXREG_IO is not set +# CONFIG_MLXREG_LC is not set +# CONFIG_NVSW_SN2201 is not set +CONFIG_SURFACE_PLATFORMS=y +# CONFIG_SURFACE_3_POWER_OPREGION is not set +# CONFIG_SURFACE_GPE is not set +# CONFIG_SURFACE_HOTPLUG is not set +# CONFIG_SURFACE_PRO3_BUTTON is not set +CONFIG_X86_PLATFORM_DEVICES=y +CONFIG_ACPI_WMI=m +CONFIG_WMI_BMOF=m +# CONFIG_HUAWEI_WMI is not set +# CONFIG_UV_SYSFS is not set +CONFIG_MXM_WMI=m +# CONFIG_NVIDIA_WMI_EC_BACKLIGHT is not set +# CONFIG_XIAOMI_WMI is not set +# CONFIG_GIGABYTE_WMI is not set +# CONFIG_YOGABOOK is not set +CONFIG_ACERHDF=m +# CONFIG_ACER_WIRELESS is not set +CONFIG_ACER_WMI=m +# CONFIG_AMD_PMF is not set +# CONFIG_AMD_PMC is not set +# CONFIG_AMD_HSMP is not set +# CONFIG_ADV_SWBUTTON is not set +CONFIG_APPLE_GMUX=m +CONFIG_ASUS_LAPTOP=m +# CONFIG_ASUS_WIRELESS is not set +CONFIG_ASUS_WMI=m +CONFIG_ASUS_NB_WMI=m +# CONFIG_ASUS_TF103C_DOCK is not set +# CONFIG_MERAKI_MX100 is not set +CONFIG_EEEPC_LAPTOP=m +CONFIG_EEEPC_WMI=m +# CONFIG_X86_PLATFORM_DRIVERS_DELL is not set +CONFIG_AMILO_RFKILL=m +CONFIG_FUJITSU_LAPTOP=m +CONFIG_FUJITSU_TABLET=m +# CONFIG_GPD_POCKET_FAN is not set +# CONFIG_X86_PLATFORM_DRIVERS_HP is not set +# CONFIG_WIRELESS_HOTKEY is not set +# CONFIG_IBM_RTL is not set +CONFIG_IDEAPAD_LAPTOP=m +# CONFIG_LENOVO_YMC is not set +CONFIG_SENSORS_HDAPS=m +CONFIG_THINKPAD_ACPI=m +# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set +# CONFIG_THINKPAD_ACPI_DEBUG is not set +# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set +CONFIG_THINKPAD_ACPI_VIDEO=y +CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y +# CONFIG_THINKPAD_LMI is not set +# CONFIG_INTEL_ATOMISP2_PM is not set +# CONFIG_INTEL_IFS is not set +# CONFIG_INTEL_SAR_INT1092 is not set +CONFIG_INTEL_PMC_CORE=m + +# +# Intel Speed Select Technology interface support +# +CONFIG_INTEL_SPEED_SELECT_INTERFACE=m +# end of Intel Speed Select Technology interface support + +CONFIG_INTEL_WMI=y +# CONFIG_INTEL_WMI_SBL_FW_UPDATE is not set +CONFIG_INTEL_WMI_THUNDERBOLT=m + +# +# Intel Uncore Frequency Control +# +# CONFIG_INTEL_UNCORE_FREQ_CONTROL is not set +# end of Intel Uncore Frequency Control + +CONFIG_INTEL_HID_EVENT=m +CONFIG_INTEL_VBTN=m +# CONFIG_INTEL_INT0002_VGPIO is not set +CONFIG_INTEL_OAKTRAIL=m +# CONFIG_INTEL_ISHTP_ECLITE is not set +# CONFIG_INTEL_PUNIT_IPC is not set +CONFIG_INTEL_RST=m +# CONFIG_INTEL_SMARTCONNECT is not set +CONFIG_INTEL_TURBO_MAX_3=y +# CONFIG_INTEL_VSEC is not set +# CONFIG_MSI_EC is not set +CONFIG_MSI_LAPTOP=m +CONFIG_MSI_WMI=m +# CONFIG_PCENGINES_APU2 is not set +# CONFIG_BARCO_P50_GPIO is not set +CONFIG_SAMSUNG_LAPTOP=m +CONFIG_SAMSUNG_Q10=m +# CONFIG_ACPI_TOSHIBA is not set +CONFIG_TOSHIBA_BT_RFKILL=m +# CONFIG_TOSHIBA_HAPS is not set +# CONFIG_TOSHIBA_WMI is not set +CONFIG_ACPI_CMPC=m +CONFIG_COMPAL_LAPTOP=m +# CONFIG_LG_LAPTOP is not set +CONFIG_PANASONIC_LAPTOP=m +CONFIG_SONY_LAPTOP=m +CONFIG_SONYPI_COMPAT=y +# CONFIG_SYSTEM76_ACPI is not set +CONFIG_TOPSTAR_LAPTOP=m +CONFIG_MLX_PLATFORM=m +CONFIG_INTEL_IPS=m +# CONFIG_INTEL_SCU_PCI is not set +# CONFIG_INTEL_SCU_PLATFORM is not set +# CONFIG_SIEMENS_SIMATIC_IPC is not set +# CONFIG_WINMATE_FM07_KEYS is not set +# CONFIG_SEL3350_PLATFORM is not set +CONFIG_P2SB=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y +# CONFIG_COMMON_CLK_MAX9485 is not set +# CONFIG_COMMON_CLK_SI5341 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_COMMON_CLK_PWM is not set +# CONFIG_XILINX_VCU is not set +CONFIG_HWSPINLOCK=y + +# +# Clock Source drivers +# +CONFIG_CLKEVT_I8253=y +CONFIG_I8253_LOCK=y +CONFIG_CLKBLD_I8253=y +# end of Clock Source drivers + +CONFIG_MAILBOX=y +CONFIG_PCC=y +# CONFIG_ALTERA_MBOX is not set +CONFIG_IOMMU_IOVA=y +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +CONFIG_IOMMU_IO_PGTABLE=y +# end of Generic IOMMU Pagetable Support + +# CONFIG_IOMMU_DEBUGFS is not set +# CONFIG_IOMMU_DEFAULT_DMA_STRICT is not set +# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set +CONFIG_IOMMU_DEFAULT_PASSTHROUGH=y +CONFIG_IOMMU_DMA=y +CONFIG_IOMMU_SVA=y +CONFIG_AMD_IOMMU=y +CONFIG_AMD_IOMMU_V2=m +CONFIG_DMAR_TABLE=y +CONFIG_INTEL_IOMMU=y +CONFIG_INTEL_IOMMU_SVM=y +# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set +CONFIG_INTEL_IOMMU_FLOPPY_WA=y +CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON=y +CONFIG_INTEL_IOMMU_PERF_EVENTS=y +# CONFIG_IOMMUFD is not set +CONFIG_IRQ_REMAP=y +CONFIG_HYPERV_IOMMU=y +# CONFIG_VIRTIO_IOMMU is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set +# CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Broadcom SoC drivers +# +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# end of NXP/Freescale QorIQ SoC drivers + +# +# fujitsu SoC drivers +# +# end of fujitsu SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Enable LiteX SoC Builder specific drivers +# +# end of Enable LiteX SoC Builder specific drivers + +# CONFIG_WPCM450_SOC is not set + +# +# Qualcomm SoC drivers +# +# end of Qualcomm SoC drivers + +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + +# CONFIG_PM_DEVFREQ is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +CONFIG_IIO=m +CONFIG_IIO_BUFFER=y +# CONFIG_IIO_BUFFER_CB is not set +# CONFIG_IIO_BUFFER_DMA is not set +# CONFIG_IIO_BUFFER_DMAENGINE is not set +# CONFIG_IIO_BUFFER_HW_CONSUMER is not set +CONFIG_IIO_KFIFO_BUF=m +CONFIG_IIO_TRIGGERED_BUFFER=m +# CONFIG_IIO_CONFIGFS is not set +CONFIG_IIO_TRIGGER=y +CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 +# CONFIG_IIO_SW_DEVICE is not set +# CONFIG_IIO_SW_TRIGGER is not set +# CONFIG_IIO_TRIGGERED_EVENT is not set + +# +# Accelerometers +# +# CONFIG_ADXL313_I2C is not set +# CONFIG_ADXL345_I2C is not set +# CONFIG_ADXL355_I2C is not set +# CONFIG_ADXL367_I2C is not set +# CONFIG_ADXL372_I2C is not set +# CONFIG_BMA180 is not set +# CONFIG_BMA400 is not set +# CONFIG_BMC150_ACCEL is not set +# CONFIG_DA280 is not set +# CONFIG_DA311 is not set +# CONFIG_DMARD06 is not set +# CONFIG_DMARD09 is not set +# CONFIG_DMARD10 is not set +# CONFIG_FXLS8962AF_I2C is not set +CONFIG_HID_SENSOR_ACCEL_3D=m +# CONFIG_IIO_ST_ACCEL_3AXIS is not set +# CONFIG_IIO_KX022A_I2C is not set +# CONFIG_KXSD9 is not set +# CONFIG_KXCJK1013 is not set +# CONFIG_MC3230 is not set +# CONFIG_MMA7455_I2C is not set +# CONFIG_MMA7660 is not set +# CONFIG_MMA8452 is not set +# CONFIG_MMA9551 is not set +# CONFIG_MMA9553 is not set +# CONFIG_MSA311 is not set +# CONFIG_MXC4005 is not set +# CONFIG_MXC6255 is not set +# CONFIG_STK8312 is not set +# CONFIG_STK8BA50 is not set +# end of Accelerometers + +# +# Analog to digital converters +# +# CONFIG_AD7091R5 is not set +# CONFIG_AD7291 is not set +# CONFIG_AD7606_IFACE_PARALLEL is not set +# CONFIG_AD799X is not set +# CONFIG_ENVELOPE_DETECTOR is not set +# CONFIG_HX711 is not set +# CONFIG_INA2XX_ADC is not set +# CONFIG_LTC2471 is not set +# CONFIG_LTC2485 is not set +# CONFIG_LTC2497 is not set +# CONFIG_MAX1363 is not set +# CONFIG_MAX9611 is not set +# CONFIG_MCP3422 is not set +# CONFIG_NAU7802 is not set +# CONFIG_RICHTEK_RTQ6056 is not set +# CONFIG_SD_ADC_MODULATOR is not set +# CONFIG_TI_ADC081C is not set +# CONFIG_TI_ADS1015 is not set +# CONFIG_TI_ADS7924 is not set +# CONFIG_TI_ADS1100 is not set +# CONFIG_VF610_ADC is not set +# CONFIG_VIPERBOARD_ADC is not set +# CONFIG_XILINX_XADC is not set +# end of Analog to digital converters + +# +# Analog to digital and digital to analog converters +# +# end of Analog to digital and digital to analog converters + +# +# Analog Front Ends +# +# CONFIG_IIO_RESCALE is not set +# end of Analog Front Ends + +# +# Amplifiers +# +# CONFIG_HMC425 is not set +# end of Amplifiers + +# +# Capacitance to digital converters +# +# CONFIG_AD7150 is not set +# CONFIG_AD7746 is not set +# end of Capacitance to digital converters + +# +# Chemical Sensors +# +# CONFIG_ATLAS_PH_SENSOR is not set +# CONFIG_ATLAS_EZO_SENSOR is not set +# CONFIG_BME680 is not set +# CONFIG_CCS811 is not set +# CONFIG_IAQCORE is not set +# CONFIG_SCD30_CORE is not set +# CONFIG_SCD4X is not set +# CONFIG_SENSIRION_SGP30 is not set +# CONFIG_SENSIRION_SGP40 is not set +# CONFIG_SPS30_I2C is not set +# CONFIG_SENSEAIR_SUNRISE_CO2 is not set +# CONFIG_VZ89X is not set +# end of Chemical Sensors + +# +# Hid Sensor IIO Common +# +CONFIG_HID_SENSOR_IIO_COMMON=m +CONFIG_HID_SENSOR_IIO_TRIGGER=m +# end of Hid Sensor IIO Common + +# +# IIO SCMI Sensors +# +# end of IIO SCMI Sensors + +# +# SSP Sensor Common +# +# end of SSP Sensor Common + +# +# Digital to analog converters +# +# CONFIG_AD5064 is not set +# CONFIG_AD5380 is not set +# CONFIG_AD5446 is not set +# CONFIG_AD5593R is not set +# CONFIG_AD5696_I2C is not set +# CONFIG_DPOT_DAC is not set +# CONFIG_DS4424 is not set +# CONFIG_M62332 is not set +# CONFIG_MAX517 is not set +# CONFIG_MAX5821 is not set +# CONFIG_MCP4725 is not set +# CONFIG_MCP4728 is not set +# CONFIG_TI_DAC5571 is not set +# CONFIG_VF610_DAC is not set +# end of Digital to analog converters + +# +# IIO dummy driver +# +# end of IIO dummy driver + +# +# Filters +# +# end of Filters + +# +# Frequency Synthesizers DDS/PLL +# + +# +# Clock Generator/Distribution +# +# end of Clock Generator/Distribution + +# +# Phase-Locked Loop (PLL) frequency synthesizers +# +# end of Phase-Locked Loop (PLL) frequency synthesizers +# end of Frequency Synthesizers DDS/PLL + +# +# Digital gyroscope sensors +# +# CONFIG_BMG160 is not set +# CONFIG_FXAS21002C is not set +CONFIG_HID_SENSOR_GYRO_3D=m +# CONFIG_MPU3050_I2C is not set +# CONFIG_IIO_ST_GYRO_3AXIS is not set +# CONFIG_ITG3200 is not set +# end of Digital gyroscope sensors + +# +# Health Sensors +# + +# +# Heart Rate Monitors +# +# CONFIG_AFE4404 is not set +# CONFIG_MAX30100 is not set +# CONFIG_MAX30102 is not set +# end of Heart Rate Monitors +# end of Health Sensors + +# +# Humidity sensors +# +# CONFIG_AM2315 is not set +# CONFIG_DHT11 is not set +# CONFIG_HDC100X is not set +# CONFIG_HDC2010 is not set +CONFIG_HID_SENSOR_HUMIDITY=m +# CONFIG_HTS221 is not set +# CONFIG_HTU21 is not set +# CONFIG_SI7005 is not set +# CONFIG_SI7020 is not set +# end of Humidity sensors + +# +# Inertial measurement units +# +# CONFIG_BMI160_I2C is not set +# CONFIG_BOSCH_BNO055_I2C is not set +# CONFIG_FXOS8700_I2C is not set +# CONFIG_KMX61 is not set +# CONFIG_INV_ICM42600_I2C is not set +# CONFIG_INV_MPU6050_I2C is not set +# CONFIG_IIO_ST_LSM6DSX is not set +# CONFIG_IIO_ST_LSM9DS0 is not set +# end of Inertial measurement units + +# +# Light sensors +# +# CONFIG_ACPI_ALS is not set +# CONFIG_ADJD_S311 is not set +# CONFIG_ADUX1020 is not set +# CONFIG_AL3010 is not set +# CONFIG_AL3320A is not set +# CONFIG_APDS9300 is not set +# CONFIG_APDS9960 is not set +# CONFIG_AS73211 is not set +# CONFIG_BH1750 is not set +# CONFIG_BH1780 is not set +# CONFIG_CM32181 is not set +# CONFIG_CM3232 is not set +# CONFIG_CM3323 is not set +# CONFIG_CM3605 is not set +# CONFIG_CM36651 is not set +# CONFIG_GP2AP002 is not set +# CONFIG_GP2AP020A00F is not set +# CONFIG_SENSORS_ISL29018 is not set +# CONFIG_SENSORS_ISL29028 is not set +# CONFIG_ISL29125 is not set +CONFIG_HID_SENSOR_ALS=m +CONFIG_HID_SENSOR_PROX=m +# CONFIG_JSA1212 is not set +# CONFIG_ROHM_BU27008 is not set +# CONFIG_ROHM_BU27034 is not set +# CONFIG_RPR0521 is not set +# CONFIG_LTR501 is not set +# CONFIG_LTRF216A is not set +# CONFIG_LV0104CS is not set +# CONFIG_MAX44000 is not set +# CONFIG_MAX44009 is not set +# CONFIG_NOA1305 is not set +# CONFIG_OPT3001 is not set +# CONFIG_OPT4001 is not set +# CONFIG_PA12203001 is not set +# CONFIG_SI1133 is not set +# CONFIG_SI1145 is not set +# CONFIG_STK3310 is not set +# CONFIG_ST_UVIS25 is not set +# CONFIG_TCS3414 is not set +# CONFIG_TCS3472 is not set +# CONFIG_SENSORS_TSL2563 is not set +# CONFIG_TSL2583 is not set +# CONFIG_TSL2591 is not set +# CONFIG_TSL2772 is not set +# CONFIG_TSL4531 is not set +# CONFIG_US5182D is not set +# CONFIG_VCNL4000 is not set +# CONFIG_VCNL4035 is not set +# CONFIG_VEML6030 is not set +# CONFIG_VEML6070 is not set +# CONFIG_VL6180 is not set +# CONFIG_ZOPT2201 is not set +# end of Light sensors + +# +# Magnetometer sensors +# +# CONFIG_AK8974 is not set +# CONFIG_AK8975 is not set +# CONFIG_AK09911 is not set +# CONFIG_BMC150_MAGN_I2C is not set +# CONFIG_MAG3110 is not set +CONFIG_HID_SENSOR_MAGNETOMETER_3D=m +# CONFIG_MMC35240 is not set +# CONFIG_IIO_ST_MAGN_3AXIS is not set +# CONFIG_SENSORS_HMC5843_I2C is not set +# CONFIG_SENSORS_RM3100_I2C is not set +# CONFIG_TI_TMAG5273 is not set +# CONFIG_YAMAHA_YAS530 is not set +# end of Magnetometer sensors + +# +# Multiplexers +# +# CONFIG_IIO_MUX is not set +# end of Multiplexers + +# +# Inclinometer sensors +# +CONFIG_HID_SENSOR_INCLINOMETER_3D=m +CONFIG_HID_SENSOR_DEVICE_ROTATION=m +# end of Inclinometer sensors + +# +# Triggers - standalone +# +# CONFIG_IIO_INTERRUPT_TRIGGER is not set +# CONFIG_IIO_SYSFS_TRIGGER is not set +# end of Triggers - standalone + +# +# Linear and angular position sensors +# +# CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE is not set +# end of Linear and angular position sensors + +# +# Digital potentiometers +# +# CONFIG_AD5110 is not set +# CONFIG_AD5272 is not set +# CONFIG_DS1803 is not set +# CONFIG_MAX5432 is not set +# CONFIG_MCP4018 is not set +# CONFIG_MCP4531 is not set +# CONFIG_TPL0102 is not set +# end of Digital potentiometers + +# +# Digital potentiostats +# +# CONFIG_LMP91000 is not set +# end of Digital potentiostats + +# +# Pressure sensors +# +# CONFIG_ABP060MG is not set +# CONFIG_BMP280 is not set +# CONFIG_DLHL60D is not set +# CONFIG_DPS310 is not set +CONFIG_HID_SENSOR_PRESS=m +# CONFIG_HP03 is not set +# CONFIG_ICP10100 is not set +# CONFIG_MPL115_I2C is not set +# CONFIG_MPL3115 is not set +# CONFIG_MPRLS0025PA is not set +# CONFIG_MS5611 is not set +# CONFIG_MS5637 is not set +# CONFIG_IIO_ST_PRESS is not set +# CONFIG_T5403 is not set +# CONFIG_HP206C is not set +# CONFIG_ZPA2326 is not set +# end of Pressure sensors + +# +# Lightning sensors +# +# end of Lightning sensors + +# +# Proximity and distance sensors +# +# CONFIG_IRSD200 is not set +# CONFIG_ISL29501 is not set +# CONFIG_LIDAR_LITE_V2 is not set +# CONFIG_MB1232 is not set +# CONFIG_PING is not set +# CONFIG_RFD77402 is not set +# CONFIG_SRF04 is not set +# CONFIG_SX9310 is not set +# CONFIG_SX9324 is not set +# CONFIG_SX9360 is not set +# CONFIG_SX9500 is not set +# CONFIG_SRF08 is not set +# CONFIG_VCNL3020 is not set +# CONFIG_VL53L0X_I2C is not set +# end of Proximity and distance sensors + +# +# Resolver to digital converters +# +# end of Resolver to digital converters + +# +# Temperature sensors +# +CONFIG_HID_SENSOR_TEMP=m +# CONFIG_MLX90614 is not set +# CONFIG_MLX90632 is not set +# CONFIG_TMP006 is not set +# CONFIG_TMP007 is not set +# CONFIG_TMP117 is not set +# CONFIG_TSYS01 is not set +# CONFIG_TSYS02D is not set +# CONFIG_MAX30208 is not set +# end of Temperature sensors + +CONFIG_NTB=m +# CONFIG_NTB_MSI is not set +# CONFIG_NTB_AMD is not set +# CONFIG_NTB_IDT is not set +# CONFIG_NTB_INTEL is not set +# CONFIG_NTB_EPF is not set +# CONFIG_NTB_SWITCHTEC is not set +# CONFIG_NTB_PINGPONG is not set +# CONFIG_NTB_TOOL is not set +# CONFIG_NTB_PERF is not set +# CONFIG_NTB_TRANSPORT is not set +CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +# CONFIG_PWM_DEBUG is not set +# CONFIG_PWM_CLK is not set +# CONFIG_PWM_DWC is not set +CONFIG_PWM_LPSS=m +CONFIG_PWM_LPSS_PCI=m +CONFIG_PWM_LPSS_PLATFORM=m +# CONFIG_PWM_PCA9685 is not set + +# +# IRQ chip support +# +# end of IRQ chip support + +# CONFIG_IPACK_BUS is not set +# CONFIG_RESET_CONTROLLER is not set + +# +# PHY Subsystem +# +# CONFIG_GENERIC_PHY is not set +# CONFIG_USB_LGM_PHY is not set +# CONFIG_PHY_CAN_TRANSCEIVER is not set + +# +# PHY drivers for Broadcom platforms +# +# CONFIG_BCM_KONA_USB2_PHY is not set +# end of PHY drivers for Broadcom platforms + +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_CPCAP_USB is not set +# CONFIG_PHY_INTEL_LGM_EMMC is not set +# end of PHY Subsystem + +CONFIG_POWERCAP=y +CONFIG_INTEL_RAPL_CORE=m +CONFIG_INTEL_RAPL=m +CONFIG_IDLE_INJECT=y +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# end of Performance monitor support + +CONFIG_RAS=y +CONFIG_RAS_CEC=y +# CONFIG_RAS_CEC_DEBUG is not set +# CONFIG_USB4 is not set + +# +# Android +# +# CONFIG_ANDROID_BINDER_IPC is not set +# end of Android + +CONFIG_LIBNVDIMM=y +CONFIG_BLK_DEV_PMEM=y +CONFIG_ND_CLAIM=y +CONFIG_ND_BTT=y +CONFIG_BTT=y +CONFIG_ND_PFN=y +CONFIG_NVDIMM_PFN=y +CONFIG_NVDIMM_DAX=y +CONFIG_NVDIMM_KEYS=y +# CONFIG_NVDIMM_SECURITY_TEST is not set +CONFIG_DAX=y +CONFIG_DEV_DAX=y +CONFIG_DEV_DAX_PMEM=y +CONFIG_DEV_DAX_HMEM=y +CONFIG_DEV_DAX_HMEM_DEVICES=y +CONFIG_DEV_DAX_KMEM=y +CONFIG_NVMEM=y +CONFIG_NVMEM_SYSFS=y + +# +# Layout Types +# +# CONFIG_NVMEM_LAYOUT_SL28_VPD is not set +# CONFIG_NVMEM_LAYOUT_ONIE_TLV is not set +# end of Layout Types + +# CONFIG_NVMEM_RMEM is not set + +# +# HW tracing support +# +CONFIG_STM=m +CONFIG_STM_PROTO_BASIC=m +CONFIG_STM_PROTO_SYS_T=m +CONFIG_STM_DUMMY=m +CONFIG_STM_SOURCE_CONSOLE=m +CONFIG_STM_SOURCE_HEARTBEAT=m +CONFIG_STM_SOURCE_FTRACE=m +CONFIG_INTEL_TH=m +CONFIG_INTEL_TH_PCI=m +CONFIG_INTEL_TH_ACPI=m +CONFIG_INTEL_TH_GTH=m +CONFIG_INTEL_TH_STH=m +CONFIG_INTEL_TH_MSU=m +CONFIG_INTEL_TH_PTI=m +# CONFIG_INTEL_TH_DEBUG is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +CONFIG_TEE=m +CONFIG_AMDTEE=m +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# CONFIG_MOST is not set +# CONFIG_PECI is not set +# CONFIG_HTE is not set +# end of Device Drivers + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +# CONFIG_VALIDATE_FS_PARSER is not set +CONFIG_FS_IOMAP=y +CONFIG_BUFFER_HEAD=y +CONFIG_LEGACY_DIRECT_IO=y +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_XFS_FS=m +CONFIG_XFS_SUPPORT_V4=y +CONFIG_XFS_SUPPORT_ASCII_CI=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_ONLINE_SCRUB is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +CONFIG_GFS2_FS=m +CONFIG_GFS2_FS_LOCKING_DLM=y +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +# CONFIG_ZONEFS_FS is not set +CONFIG_FS_DAX=y +CONFIG_FS_DAX_PMD=y +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +# CONFIG_FS_VERITY is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=y +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_VIRTIO_FS=m +CONFIG_FUSE_DAX=y +CONFIG_OVERLAY_FS=m +CONFIG_OVERLAY_FS_REDIRECT_DIR=y +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y +CONFIG_OVERLAY_FS_INDEX=y +# CONFIG_OVERLAY_FS_NFS_EXPORT is not set +# CONFIG_OVERLAY_FS_XINO_AUTO is not set +# CONFIG_OVERLAY_FS_METACOPY is not set +# CONFIG_OVERLAY_FS_DEBUG is not set + +# +# Caches +# +CONFIG_NETFS_SUPPORT=m +CONFIG_NETFS_STATS=y +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_DEBUG is not set +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_ERROR_INJECTION is not set +CONFIG_CACHEFILES_ONDEMAND=y +# end of Caches + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/EXFAT/NT Filesystems +# +CONFIG_FAT_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +# CONFIG_FAT_DEFAULT_UTF8 is not set +# CONFIG_EXFAT_FS is not set +# CONFIG_NTFS_FS is not set +# CONFIG_NTFS3_FS is not set +# end of DOS/FAT/EXFAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +CONFIG_PROC_VMCORE_DEVICE_DUMP=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_PROC_PID_ARCH_STATUS=y +CONFIG_PROC_CPU_RESCTRL=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +# CONFIG_TMPFS_INODE64 is not set +# CONFIG_TMPFS_QUOTA is not set +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP=y +CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +# end of Pseudo filesystems + +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_UBIFS_FS is not set +CONFIG_CRAMFS=m +CONFIG_CRAMFS_BLOCKDEV=y +# CONFIG_CRAMFS_MTD is not set +CONFIG_SQUASHFS=m +# CONFIG_SQUASHFS_FILE_CACHE is not set +CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set +CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +# CONFIG_SQUASHFS_LZ4 is not set +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_CONSOLE=y +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +CONFIG_PSTORE_RAM=y +# CONFIG_PSTORE_BLK is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_EROFS_FS=m +# CONFIG_EROFS_FS_DEBUG is not set +CONFIG_EROFS_FS_XATTR=y +CONFIG_EROFS_FS_POSIX_ACL=y +CONFIG_EROFS_FS_SECURITY=y +CONFIG_EROFS_FS_ZIP=y +CONFIG_EROFS_FS_ZIP_LZMA=y +CONFIG_EROFS_FS_ZIP_DEFLATE=y +CONFIG_EROFS_FS_ONDEMAND=y +# CONFIG_EROFS_FS_PCPU_KTHREAD is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y +CONFIG_NFS_DISABLE_UDP_SUPPORT=y +# CONFIG_NFS_V4_2_READ_PLUS is not set +CONFIG_NFSD=m +# CONFIG_NFSD_V2 is not set +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_PNFS=y +# CONFIG_NFSD_BLOCKLAYOUT is not set +CONFIG_NFSD_SCSILAYOUT=y +# CONFIG_NFSD_FLEXFILELAYOUT is not set +# CONFIG_NFSD_V4_2_INTER_SSC is not set +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_COMMON=y +CONFIG_NFS_V4_2_SSC_HELPER=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=m +CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1=y +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA is not set +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 is not set +CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_CEPH_FS=m +# CONFIG_CEPH_FSCACHE is not set +CONFIG_CEPH_FS_POSIX_ACL=y +# CONFIG_CEPH_FS_SECURITY_LABEL is not set +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_DEBUG=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set +CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_SWN_UPCALL is not set +# CONFIG_CIFS_SMB_DIRECT is not set +# CONFIG_CIFS_FSCACHE is not set +# CONFIG_SMB_SERVER is not set +CONFIG_SMBFS=m +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=m +CONFIG_NLS_UCS2_UTILS=m +CONFIG_DLM=m +CONFIG_DLM_DEBUG=y +# CONFIG_UNICODE is not set +CONFIG_IO_WQ=y +# end of File systems + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_KEYS_REQUEST_CACHE is not set +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_TRUSTED_KEYS=y +CONFIG_TRUSTED_KEYS_TPM=y +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_USER_DECRYPTED_DATA is not set +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y +CONFIG_INTEL_TXT=y +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HARDENED_USERCOPY=y +CONFIG_FORTIFY_SOURCE=y +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9 +CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256 +# CONFIG_SECURITY_SELINUX_DEBUG is not set +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +CONFIG_SECURITY_YAMA=y +# CONFIG_SECURITY_SAFESETID is not set +# CONFIG_SECURITY_LOCKDOWN_LSM is not set +# CONFIG_SECURITY_LANDLOCK is not set +CONFIG_INTEGRITY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_TRUSTED_KEYRING=y +CONFIG_INTEGRITY_PLATFORM_KEYRING=y +# CONFIG_INTEGRITY_MACHINE_KEYRING is not set +CONFIG_LOAD_UEFI_KEYS=y +CONFIG_INTEGRITY_AUDIT=y +CONFIG_IMA=y +# CONFIG_IMA_KEXEC is not set +CONFIG_IMA_MEASURE_PCR_IDX=10 +CONFIG_IMA_LSM_RULES=y +# CONFIG_IMA_NG_TEMPLATE is not set +CONFIG_IMA_SIG_TEMPLATE=y +CONFIG_IMA_DEFAULT_TEMPLATE="ima-sig" +# CONFIG_IMA_DEFAULT_HASH_SHA1 is not set +CONFIG_IMA_DEFAULT_HASH_SHA256=y +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set +# CONFIG_IMA_DEFAULT_HASH_SM3 is not set +CONFIG_IMA_DEFAULT_HASH="sha256" +CONFIG_IMA_WRITE_POLICY=y +CONFIG_IMA_READ_POLICY=y +CONFIG_IMA_APPRAISE=y +# CONFIG_IMA_ARCH_POLICY is not set +CONFIG_IMA_APPRAISE_BUILD_POLICY=y +# CONFIG_IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_MODULE_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_POLICY_SIGS is not set +CONFIG_IMA_APPRAISE_BOOTPARAM=y +# CONFIG_IMA_APPRAISE_MODSIG is not set +CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY=y +CONFIG_IMA_BLACKLIST_KEYRING=y +CONFIG_IMA_LOAD_X509=y +CONFIG_IMA_X509_PATH="/etc/keys/x509_ima.der" +# CONFIG_IMA_APPRAISE_SIGNED_INIT is not set +CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y +CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y +# CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT is not set +# CONFIG_IMA_DISABLE_HTABLE is not set +CONFIG_EVM=y +CONFIG_EVM_ATTR_FSUUID=y +# CONFIG_EVM_ADD_XATTRS is not set +CONFIG_EVM_LOAD_X509=y +CONFIG_EVM_X509_PATH="/etc/keys/x509_evm.der" +CONFIG_DEFAULT_SECURITY_SELINUX=y +# CONFIG_DEFAULT_SECURITY_DAC is not set +CONFIG_LSM="integrity,selinux,smack,tomoyo,apparmor" + +# +# Kernel hardening options +# + +# +# Memory initialization +# +CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y +CONFIG_INIT_STACK_NONE=y +# CONFIG_INIT_STACK_ALL_PATTERN is not set +# CONFIG_INIT_STACK_ALL_ZERO is not set +# CONFIG_GCC_PLUGIN_STACKLEAK is not set +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y +# CONFIG_ZERO_CALL_USED_REGS is not set +# end of Memory initialization + +# +# Hardening of kernel data structures +# +CONFIG_LIST_HARDENED=y +CONFIG_BUG_ON_DATA_CORRUPTION=y +# end of Hardening of kernel data structures + +CONFIG_CC_HAS_RANDSTRUCT=y +CONFIG_RANDSTRUCT_NONE=y +# CONFIG_RANDSTRUCT_FULL is not set +# CONFIG_RANDSTRUCT_PERFORMANCE is not set +# end of Kernel hardening options +# end of Security options + +CONFIG_XOR_BLOCKS=m +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_FIPS_NAME="Linux Kernel Cryptographic API" +# CONFIG_CRYPTO_FIPS_CUSTOM_VERSION is not set +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_SIG2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=m +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=m +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +# CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is not set +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=y +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_SIMD=y +# end of Crypto core or helper + +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=m +# CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set +CONFIG_CRYPTO_ECC=m +CONFIG_CRYPTO_ECDH=m +# CONFIG_CRYPTO_ECDSA is not set +# CONFIG_CRYPTO_ECRDSA is not set +CONFIG_CRYPTO_SM2=y +# CONFIG_CRYPTO_CURVE25519 is not set +# end of Public-key cryptography + +# +# Block ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_ANUBIS=m +# CONFIG_CRYPTO_ARIA is not set +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=y +CONFIG_CRYPTO_SM4_GENERIC=y +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m +# end of Block ciphers + +# +# Length-preserving ciphers and modes +# +# CONFIG_CRYPTO_ADIANTUM is not set +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_HCTR2 is not set +# CONFIG_CRYPTO_KEYWRAP is not set +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=y +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=y +# end of Length-preserving ciphers and modes + +# +# AEAD (authenticated encryption with associated data) ciphers +# +# CONFIG_CRYPTO_AEGIS128 is not set +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_GENIV=m +CONFIG_CRYPTO_SEQIV=m +CONFIG_CRYPTO_ECHAINIV=m +CONFIG_CRYPTO_ESSIV=m +# end of AEAD (authenticated encryption with associated data) ciphers + +# +# Hashes, digests, and MACs +# +CONFIG_CRYPTO_BLAKE2B=m +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_POLY1305=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_SHA3=y +CONFIG_CRYPTO_SM3=y +CONFIG_CRYPTO_SM3_GENERIC=y +# CONFIG_CRYPTO_STREEBOG is not set +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_XXHASH=m +# end of Hashes, digests, and MACs + +# +# CRCs (cyclic redundancy checks) +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_CRC64_ROCKSOFT=m +# end of CRCs (cyclic redundancy checks) + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_842 is not set +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ZSTD=m +# end of Compression + +# +# Random number generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +# CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE is not set +# end of Random number generation + +# +# Userspace interface +# +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y +# CONFIG_CRYPTO_STATS is not set +# end of Userspace interface + +CONFIG_CRYPTO_HASH_INFO=y + +# +# Accelerated Cryptographic Algorithms for CPU (x86) +# +CONFIG_CRYPTO_CURVE25519_X86=m +CONFIG_CRYPTO_AES_NI_INTEL=y +CONFIG_CRYPTO_BLOWFISH_X86_64=m +CONFIG_CRYPTO_CAMELLIA_X86_64=m +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=m +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m +CONFIG_CRYPTO_CAST5_AVX_X86_64=m +CONFIG_CRYPTO_CAST6_AVX_X86_64=m +CONFIG_CRYPTO_DES3_EDE_X86_64=m +CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m +CONFIG_CRYPTO_SERPENT_AVX_X86_64=m +CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m +CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64=y +CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64=y +CONFIG_CRYPTO_TWOFISH_X86_64=m +CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m +CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m +# CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64 is not set +# CONFIG_CRYPTO_ARIA_AESNI_AVX2_X86_64 is not set +# CONFIG_CRYPTO_ARIA_GFNI_AVX512_X86_64 is not set +CONFIG_CRYPTO_CHACHA20_X86_64=m +# CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 is not set +# CONFIG_CRYPTO_NHPOLY1305_SSE2 is not set +# CONFIG_CRYPTO_NHPOLY1305_AVX2 is not set +CONFIG_CRYPTO_BLAKE2S_X86=y +# CONFIG_CRYPTO_POLYVAL_CLMUL_NI is not set +CONFIG_CRYPTO_POLY1305_X86_64=m +CONFIG_CRYPTO_SHA1_SSSE3=y +CONFIG_CRYPTO_SHA256_SSSE3=y +CONFIG_CRYPTO_SHA512_SSSE3=y +CONFIG_CRYPTO_SM3_AVX_X86_64=y +CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m +CONFIG_CRYPTO_CRC32C_INTEL=m +CONFIG_CRYPTO_CRC32_PCLMUL=m +CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m +# end of Accelerated Cryptographic Algorithms for CPU (x86) + +CONFIG_CRYPTO_HW=y +CONFIG_CRYPTO_DEV_PADLOCK=m +CONFIG_CRYPTO_DEV_PADLOCK_AES=m +CONFIG_CRYPTO_DEV_PADLOCK_SHA=m +# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set +# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set +CONFIG_CRYPTO_DEV_CCP=y +CONFIG_CRYPTO_DEV_CCP_DD=m +CONFIG_CRYPTO_DEV_SP_CCP=y +CONFIG_CRYPTO_DEV_CCP_CRYPTO=m +CONFIG_CRYPTO_DEV_SP_PSP=y +# CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set +CONFIG_CRYPTO_DEV_NITROX=m +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m +CONFIG_CRYPTO_DEV_QAT=m +CONFIG_CRYPTO_DEV_QAT_DH895xCC=m +CONFIG_CRYPTO_DEV_QAT_C3XXX=m +CONFIG_CRYPTO_DEV_QAT_C62X=m +# CONFIG_CRYPTO_DEV_QAT_4XXX is not set +CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m +CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m +CONFIG_CRYPTO_DEV_QAT_C62XVF=m +CONFIG_CRYPTO_DEV_CHELSIO=m +# CONFIG_CRYPTO_DEV_VIRTIO is not set +# CONFIG_CRYPTO_DEV_SAFEXCEL is not set +# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set +CONFIG_SIGNED_PE_FILE_VERIFICATION=y +# CONFIG_FIPS_SIGNATURE_SELFTEST is not set + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_MODULE_SIG_KEY_TYPE_RSA=y +# CONFIG_MODULE_SIG_KEY_TYPE_ECDSA is not set +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +CONFIG_SYSTEM_EXTRA_CERTIFICATE=y +CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE=8192 +CONFIG_SECONDARY_TRUSTED_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" +# CONFIG_SYSTEM_REVOCATION_LIST is not set +# CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE is not set +# end of Certificates for signature checking + +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_RAID6_PQ_BENCHMARK=y +# CONFIG_PACKING is not set +CONFIG_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_CORDIC=m +# CONFIG_PRIME_NUMBERS is not set +CONFIG_RATIONAL=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +CONFIG_ARCH_USE_SYM_ANNOTATIONS=y + +# +# Crypto library routines +# +CONFIG_CRYPTO_LIB_UTILS=y +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_ARC4=m +CONFIG_CRYPTO_LIB_GF128MUL=y +CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S=y +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y +CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA=m +CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m +CONFIG_CRYPTO_LIB_CHACHA=m +CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519=m +CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m +CONFIG_CRYPTO_LIB_CURVE25519=m +CONFIG_CRYPTO_LIB_DES=m +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=11 +CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m +CONFIG_CRYPTO_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m +CONFIG_CRYPTO_LIB_SHA1=y +CONFIG_CRYPTO_LIB_SHA256=y +# end of Crypto library routines + +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC64_ROCKSOFT=m +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC64=m +# CONFIG_CRC4 is not set +CONFIG_CRC7=m +CONFIG_LIBCRC32C=m +CONFIG_CRC8=m +CONFIG_XXHASH=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=m +CONFIG_LZ4HC_COMPRESS=m +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMMON=y +CONFIG_ZSTD_COMPRESS=m +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_MICROLZMA=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_DECOMPRESS_ZSTD=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_DMA_OPS=y +CONFIG_NEED_SG_DMA_FLAGS=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED=y +CONFIG_SWIOTLB=y +# CONFIG_SWIOTLB_DYNAMIC is not set +CONFIG_DMA_COHERENT_POOL=y +CONFIG_DMA_CMA=y +# CONFIG_DMA_NUMA_CMA is not set + +# +# Default contiguous memory area size: +# +CONFIG_CMA_SIZE_MBYTES=0 +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_ALIGNMENT=8 +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_DMA_MAP_BENCHMARK is not set +CONFIG_SGL_ALLOC=y +CONFIG_CHECK_SIGNATURE=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_CLZ_TAB=y +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_SIGNATURE=y +CONFIG_DIMLIB=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_HAVE_GENERIC_VDSO=y +CONFIG_GENERIC_GETTIMEOFDAY=y +CONFIG_GENERIC_VDSO_TIME_NS=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_PMEM_API=y +CONFIG_MEMREGION=y +CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION=y +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_ARCH_HAS_COPY_MC=y +CONFIG_ARCH_STACKWALK=y +CONFIG_STACKDEPOT=y +CONFIG_SBITMAP=y +CONFIG_PARMAN=m +CONFIG_OBJAGG=m +# end of Library routines + +CONFIG_PLDMFW=y +CONFIG_ASN1_ENCODER=y + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +# CONFIG_PRINTK_CALLER is not set +# CONFIG_STACKTRACE_BUILD_ID is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DYNAMIC_DEBUG_CORE=y +CONFIG_SYMBOLIC_ERRNAME=y +CONFIG_DEBUG_BUGVERBOSE=y +# end of printk and dmesg options + +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +CONFIG_AS_HAS_NON_CONST_LEB128=y +# CONFIG_DEBUG_INFO_NONE is not set +# CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT is not set +CONFIG_DEBUG_INFO_DWARF4=y +# CONFIG_DEBUG_INFO_DWARF5 is not set +# CONFIG_DEBUG_INFO_REDUCED is not set +CONFIG_DEBUG_INFO_COMPRESSED_NONE=y +# CONFIG_DEBUG_INFO_COMPRESSED_ZLIB is not set +# CONFIG_DEBUG_INFO_COMPRESSED_ZSTD is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +CONFIG_DEBUG_INFO_BTF=y +# CONFIG_GDB_SCRIPTS is not set +CONFIG_FRAME_WARN=2048 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +# CONFIG_HEADERS_INSTALL is not set +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_OBJTOOL=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +# +# Generic Kernel Debugging Instruments +# +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_FS_ALLOW_ALL=y +# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set +# CONFIG_DEBUG_FS_ALLOW_NONE is not set +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_KGDB=y +CONFIG_KGDB_HONOUR_BLOCKLIST=y +CONFIG_KGDB_SERIAL_CONSOLE=y +CONFIG_KGDB_TESTS=y +# CONFIG_KGDB_TESTS_ON_BOOT is not set +CONFIG_KGDB_LOW_LEVEL_TRAP=y +CONFIG_KGDB_KDB=y +CONFIG_KDB_DEFAULT_ENABLE=0x1 +CONFIG_KDB_KEYBOARD=y +CONFIG_KDB_CONTINUE_CATASTROPHIC=0 +CONFIG_ARCH_HAS_EARLY_DEBUG=y +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +# CONFIG_UBSAN is not set +CONFIG_HAVE_ARCH_KCSAN=y +CONFIG_HAVE_KCSAN_COMPILER=y +# CONFIG_KCSAN is not set +# end of Generic Kernel Debugging Instruments + +# +# Networking Debugging +# +# CONFIG_NET_DEV_REFCNT_TRACKER is not set +# CONFIG_NET_NS_REFCNT_TRACKER is not set +# CONFIG_DEBUG_NET is not set +# end of Networking Debugging + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +CONFIG_SLUB_DEBUG=y +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_PAGE_OWNER is not set +# CONFIG_PAGE_TABLE_CHECK is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +# CONFIG_DEBUG_RODATA_TEST is not set +CONFIG_ARCH_HAS_DEBUG_WX=y +# CONFIG_DEBUG_WX is not set +CONFIG_GENERIC_PTDUMP=y +# CONFIG_PTDUMP_DEBUGFS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_PER_VMA_LOCK_STATS is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SHRINKER_DEBUG is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_SCHED_STACK_END_CHECK is not set +CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_VM_PGTABLE is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP=y +# CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_HAVE_ARCH_KASAN_VMALLOC=y +CONFIG_CC_HAS_KASAN_GENERIC=y +CONFIG_CC_HAS_KASAN_SW_TAGS=y +CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y +# CONFIG_KASAN is not set +CONFIG_HAVE_ARCH_KFENCE=y +CONFIG_KFENCE=y +CONFIG_KFENCE_SAMPLE_INTERVAL=0 +CONFIG_KFENCE_NUM_OBJECTS=255 +# CONFIG_KFENCE_DEFERRABLE is not set +CONFIG_KFENCE_STRESS_TEST_FAULTS=0 +CONFIG_HAVE_ARCH_KMSAN=y +# end of Memory Debugging + +CONFIG_DEBUG_SHIRQ=y + +# +# Debug Oops, Lockups and Hangs +# +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=1 +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y +CONFIG_HARDLOCKUP_DETECTOR=y +# CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY is not set +CONFIG_HARDLOCKUP_DETECTOR_PERF=y +# CONFIG_HARDLOCKUP_DETECTOR_BUDDY is not set +# CONFIG_HARDLOCKUP_DETECTOR_ARCH is not set +CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER=y +CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_WQ_CPU_INTENSIVE_REPORT is not set +# CONFIG_TEST_LOCKUP is not set +# end of Debug Oops, Lockups and Hangs + +# +# Scheduler Debugging +# +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +# end of Scheduler Debugging + +# CONFIG_DEBUG_TIMEKEEPING is not set +# CONFIG_DEBUG_PREEMPT is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +# CONFIG_SCF_TORTURE_TEST is not set +# CONFIG_CSD_LOCK_WAIT_DEBUG is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +# CONFIG_NMI_CHECK_CPU is not set +# CONFIG_DEBUG_IRQFLAGS is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set + +# +# Debug kernel data structures +# +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PLIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_MAPLE_TREE is not set +# end of Debug kernel data structures + +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_RCU_SCALE_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_REF_SCALE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 +# CONFIG_RCU_CPU_STALL_CPUTIME is not set +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_LATENCYTOP is not set +# CONFIG_DEBUG_CGROUP_REF is not set +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_NOP_TRACER=y +CONFIG_HAVE_RETHOOK=y +CONFIG_RETHOOK=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y +CONFIG_HAVE_DYNAMIC_FTRACE_NO_PATCHABLE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_FENTRY=y +CONFIG_HAVE_OBJTOOL_MCOUNT=y +CONFIG_HAVE_OBJTOOL_NOP_MCOUNT=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_HAVE_BUILDTIME_MCOUNT_SORT=y +CONFIG_BUILDTIME_MCOUNT_SORT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_BOOTTIME_TRACING is not set +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_FUNCTION_GRAPH_RETVAL is not set +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y +# CONFIG_FPROBE is not set +CONFIG_FUNCTION_PROFILER=y +CONFIG_STACK_TRACER=y +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set +CONFIG_SCHED_TRACER=y +CONFIG_HWLAT_TRACER=y +CONFIG_OSNOISE_TRACER=y +CONFIG_TIMERLAT_TRACER=y +# CONFIG_MMIOTRACE is not set +CONFIG_FTRACE_SYSCALLS=y +CONFIG_TRACER_SNAPSHOT=y +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_PROBE_EVENTS_BTF_ARGS=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_DYNAMIC_EVENTS=y +CONFIG_PROBE_EVENTS=y +# CONFIG_BPF_KPROBE_OVERRIDE is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +CONFIG_FTRACE_MCOUNT_USE_CC=y +CONFIG_TRACING_MAP=y +CONFIG_SYNTH_EVENTS=y +# CONFIG_USER_EVENTS is not set +CONFIG_HIST_TRIGGERS=y +# CONFIG_TRACE_EVENT_INJECT is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +CONFIG_RING_BUFFER_BENCHMARK=m +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_FTRACE_RECORD_RECURSION is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_FTRACE_SORT_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_SYNTH_EVENT_GEN_TEST is not set +# CONFIG_KPROBE_EVENT_GEN_TEST is not set +# CONFIG_HIST_TRIGGERS_DEBUG is not set +# CONFIG_RV is not set +CONFIG_PROVIDE_OHCI1394_DMA_INIT=y +# CONFIG_SAMPLES is not set +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +CONFIG_STRICT_DEVMEM=y +# CONFIG_IO_STRICT_DEVMEM is not set + +# +# x86 Debugging +# +CONFIG_EARLY_PRINTK_USB=y +# CONFIG_X86_VERBOSE_BOOTUP is not set +CONFIG_EARLY_PRINTK=y +CONFIG_EARLY_PRINTK_DBGP=y +CONFIG_EARLY_PRINTK_USB_XDBC=y +# CONFIG_EFI_PGT_DUMP is not set +# CONFIG_DEBUG_TLBFLUSH is not set +CONFIG_HAVE_MMIOTRACE_SUPPORT=y +CONFIG_X86_DECODER_SELFTEST=y +CONFIG_IO_DELAY_0X80=y +# CONFIG_IO_DELAY_0XED is not set +# CONFIG_IO_DELAY_UDELAY is not set +# CONFIG_IO_DELAY_NONE is not set +CONFIG_DEBUG_BOOT_PARAMS=y +# CONFIG_CPA_DEBUG is not set +# CONFIG_DEBUG_ENTRY is not set +# CONFIG_DEBUG_NMI_SELFTEST is not set +# CONFIG_X86_DEBUG_FPU is not set +# CONFIG_PUNIT_ATOM_DEBUG is not set +CONFIG_UNWINDER_ORC=y +# CONFIG_UNWINDER_FRAME_POINTER is not set +# end of x86 Debugging + +# +# Kernel Testing and Coverage +# +# CONFIG_KUNIT is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +CONFIG_FUNCTION_ERROR_INJECTION=y +# CONFIG_FAULT_INJECTION is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_TEST_DHRY is not set +# CONFIG_LKDTM is not set +# CONFIG_TEST_MIN_HEAP is not set +# CONFIG_TEST_DIV64 is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_TEST_REF_TRACKER is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_REED_SOLOMON_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_ASYNC_RAID6_TEST=m +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_STRING_SELFTEST is not set +# CONFIG_TEST_STRING_HELPERS is not set +CONFIG_TEST_KSTRTOX=y +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_SCANF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_XARRAY is not set +# CONFIG_TEST_MAPLE_TREE is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_PARMAN is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_BITOPS is not set +# CONFIG_TEST_VMALLOC is not set +# CONFIG_TEST_USER_COPY is not set +CONFIG_TEST_BPF=m +# CONFIG_TEST_BLACKHOLE_DEV is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_DYNAMIC_DEBUG is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_MEMCAT_P is not set +CONFIG_TEST_LIVEPATCH=m +# CONFIG_TEST_OBJAGG is not set +# CONFIG_TEST_MEMINIT is not set +# CONFIG_TEST_HMM is not set +# CONFIG_TEST_FREE_PAGES is not set +# CONFIG_TEST_FPU is not set +# CONFIG_TEST_CLOCKSOURCE_WATCHDOG is not set +CONFIG_ARCH_USE_MEMTEST=y +# CONFIG_MEMTEST is not set +# CONFIG_HYPERV_TESTING is not set +# end of Kernel Testing and Coverage + +# +# Rust hacking +# +# end of Rust hacking +# end of Kernel hacking -- Gitee From f51d5db54f719746163afd5bb0475d1444642c35 Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Fri, 15 Dec 2023 11:41:31 +0800 Subject: [PATCH 0002/2138] anolis: spec: add basic framework to generate rpm tree Signed-off-by: Qiao Ma --- anolis/.gitignore | 1 + anolis/Makefile | 87 ++ anolis/Makefile.variables | 80 ++ anolis/buildpkg.sh | 89 ++ anolis/changelog/000-changelog.base | 2 + anolis/genlog.sh | 65 ++ anolis/genrpmtree.sh | 34 + anolis/genspec.sh | 16 + anolis/rpm/cpupower.config | 3 + anolis/rpm/cpupower.service | 13 + anolis/rpm/generate_bls_conf.sh | 35 + anolis/rpm/kernel.spec.template | 1581 +++++++++++++++++++++++++++ 12 files changed, 2006 insertions(+) create mode 100644 anolis/.gitignore create mode 100644 anolis/Makefile create mode 100644 anolis/Makefile.variables create mode 100644 anolis/buildpkg.sh create mode 100644 anolis/changelog/000-changelog.base create mode 100644 anolis/genlog.sh create mode 100644 anolis/genrpmtree.sh create mode 100644 anolis/genspec.sh create mode 100644 anolis/rpm/cpupower.config create mode 100644 anolis/rpm/cpupower.service create mode 100755 anolis/rpm/generate_bls_conf.sh create mode 100644 anolis/rpm/kernel.spec.template diff --git a/anolis/.gitignore b/anolis/.gitignore new file mode 100644 index 000000000000..9b1960e711fc --- /dev/null +++ b/anolis/.gitignore @@ -0,0 +1 @@ +output/ \ No newline at end of file diff --git a/anolis/Makefile b/anolis/Makefile new file mode 100644 index 000000000000..069ceab2c498 --- /dev/null +++ b/anolis/Makefile @@ -0,0 +1,87 @@ +include Makefile.variables + +all: help examples + +dist-check: + @if [ "${DIST_BUILD_MODE}" == "official" ]; then \ + if [ "$(shell git describe --tags --exact-match HEAD 2>/dev/null)" != "${DIST_ANOLIS_VERSION}" ]; then \ + echo "Error: For official build, the tag ${DIST_ANOLIS_VERSION} should point to HEAD"; \ + exit 1; \ + fi \ + fi + @if [ "${DIST_BUILD_MODE}" == "diy" ] && [ -z "${DIST_DIY}" ]; then \ + echo "Error: For diy build, the variable DIST_DIY should not be empty"; \ + exit 1; \ + fi + +dist-genlog: + sh genlog.sh + +dist-genspec: dist-check + sh genspec.sh + +dist-genrpmtree: dist-check + sh genrpmtree.sh + +dist-rpms: dist-genrpmtree dist-check + sh buildpkg.sh + +clean: + rm -rf $(DIST_OUTPUT) + +dist-version: + @echo $(DIST_ANOLIS_VERSION) + +examples: + @echo '' + @echo 'Build Examples:' + @echo '- *RECOMMEND* devel build with basic rpm packages' + @echo ' DIST_BUILD_MODE=devel DIST_BUILD_EXTRA=base make dist-rpms' + @echo '- *RECOMMEND* devel build with full rpm packages' + @echo ' DIST_BUILD_MODE=devel make dist-rpms' + @echo '- *RECOMMEND* nightly build with full rpm packages' + @echo ' DIST_BUILD_MODE=nightly make dist-rpms' + @echo '- diy build with basic rpm packages' + @echo ' DIST_BUILD_MODE=diy BUILD_DIY="your_diy_name" DIST_BUILD_EXTRA=base make dist-rpms' + @echo '' + @echo 'Kernel Version Examples:' + @echo '- show the kernel version of devel mode' + @echo ' DIST_BUILD_MODE=devel make dist-version' + @echo '- show the kernel version of diy mode' + @echo ' DIST_BUILD_MODE=diy BUILD_DIY="your_diy_name" make dist-version' + @echo '' + @echo 'Other Examples:' + @echo '- only generate rpm tree in devel mode, but do not build rpm packages' + @echo ' DIST_BUILD_MODE=devel make dist-genrpmtree' + @echo '- cleanup' + @echo ' make clean' + +help: + @echo 'For anolis release' + @echo '' + @echo 'RUN `make examples` for some examples' + @echo '--------------------------------' + @echo 'generic commands:' + @echo ' dist-genspec - generate kernel spec file through kernel.spec.template and changelog files' + @echo ' dist-genlog - generate changelogs' + @echo ' dist-genrpmtree - generate rpm tree' + @echo ' dist-rpms - build kernel rpm package, it will auto generated in $(DIST_SHORT_OUTPUT)' + @echo ' dist-version - show dist version' + @echo ' clean - cleanup output dir' + @echo ' examples - show some examples' + @echo '' + @echo '-------------------------------' + @echo 'the environment variables that could override:' + @echo ' DIST - the distribution suffix, eg: .an7, .an8, .an23' + @echo ' DIST_OUTPUT - the output directory, default: $(DIST_SHORT_OUTPUT)' + @echo ' DIST_BUILD_MODE - the build mode. optional: official/nightly/devel/diy' + @echo ' !!! NOTE: BE CAUTIOUS ABOUT USING official BUILD !!!' + @echo ' - official build. kernel version: $(DIST_KERNELVERSION)-$(DIST_OFFICIAL_PKGRELEASEVERION), with srpm' + @echo ' - nightly build. kernel version: $(DIST_KERNELVERSION)-$(DIST_UNOFFICIAL_PKGRELEASEVERION), with srpm' + @echo ' - devel build. kernel version: $(DIST_KERNELVERSION)-$(DIST_UNOFFICIAL_PKGRELEASEVERION), without srpm' + @echo ' - diy build. kernel version: $(DIST_KERNELVERSION)-$(DIST_DIY_PKGRELEASEVERION), with srpm' + @echo ' DIST_BUILD_NUMBER - the build number for unofficial build, eg: 1/2' + @echo ' DIST_DIY - the kernel version for diy build' + @echo ' DIST_BUILD_VARIANT & DIST_BUILD_EXTRA - see comments in buildpkg.sh' + +export \ No newline at end of file diff --git a/anolis/Makefile.variables b/anolis/Makefile.variables new file mode 100644 index 000000000000..d806e7988230 --- /dev/null +++ b/anolis/Makefile.variables @@ -0,0 +1,80 @@ +# the global environment variables, which will be passed to shell scripts +# all variables are start with DIST_, to avoid influences kernel build + +# the dist suffix, eg: an7, an8, an23 +DIST ?= .an23 + +# build mode: +# - official build, the kernel version looks like: 5.10.134-15.1_rc1, and also generate source rpm +# - nightly build, the kernel version looks like: 5.10.134-1.git.6235a991a61d, and also generate source rpm +# - devel build, same as nightly build, without source rpm +DIST_BUILD_MODE ?= devel + +# the package release version. +# eg: for ANCK 5.10-015.1, the major version is 15, the minor version is 1 +DIST_RELEASE_MAJOR_VERSION = 1 +DIST_RELEASE_MINOR_VERSION = + +# testing stage. +# eg: alpha, beta, rc +DIST_TESTING_STAGE = rc +DIST_TESTING_STAGE_MAJOR_VERSION = 1 +DIST_TESTING_STAGE_MINOR_VERSION = + +# special versions, eg: the pgo version +DIST_SPECIAL_VERSION_NAME = +DIST_SPECIAL_VERSION_MAJOR = +DIST_SPECIAL_VERSION_MINOR = + +# build number +DIST_BUILD_NUMBER ?= 1 + +# the kernel root +DIST_SRCROOT = $(shell realpath ..)/ +DIST_SOURCES = $(DIST_SRCROOT)anolis/ +DIST_RPM = $(DIST_SOURCES)rpm/ +DIST_CHANGELOG = $(DIST_SOURCES)changelog/ + +# the output directory +DIST_OUTPUT ?= $(DIST_SOURCES)output/ +DIST_RPMBUILDDIR_OUTPUT = ${DIST_OUTPUT}/rpmbuild +DIST_SHORT_OUTPUT=$(subst $(DIST_SRCROOT),,$(DIST_OUTPUT)) + +DIST_SPEC_TEMPLATE = kernel.spec.template +DIST_SPEC_FILE = kernel.spec + +# generate anolis kernel version + +# kernel version for offical build +DIST_RELEASE_VERSION = $(DIST_RELEASE_MAJOR_VERSION)$(if $(DIST_RELEASE_MINOR_VERSION),.$(DIST_RELEASE_MINOR_VERSION)) +DIST_SPECIAL_VERSION = $(if $(DIST_SPECIAL_VERSION_NAME),.$(DIST_SPECIAL_VERSION_NAME)$(if $(DIST_SPECIAL_VERSION_MAJOR),.$(DIST_SPECIAL_VERSION_MAJOR))$(if $(DIST_SPECIAL_VERSION_MINOR),.$(DIST_SPECIAL_VERSION_MINOR))) +DIST_TESTING_VERSION = $(if $(DIST_TESTING_STAGE),_$(DIST_TESTING_STAGE)$(if $(DIST_TESTING_STAGE_MAJOR_VERSION),$(DIST_TESTING_STAGE_MAJOR_VERSION))$(if $(DIST_TESTING_STAGE_MINOR_VERSION),.$(DIST_TESTING_STAGE_MINOR_VERSION))) +DIST_LINUXVERSION:=$(shell cat $(DIST_SRCROOT)/Makefile | sed -ne '/^VERSION\ =\ /{s///;p;q}') +DIST_LINUXKPATCHLEVEL:=$(shell cat $(DIST_SRCROOT)/Makefile | sed -ne '/^PATCHLEVEL\ =\ /{s///;p;q}') +DIST_LINUXKSUBLEVEL:=$(shell cat $(DIST_SRCROOT)/Makefile | sed -ne '/^SUBLEVEL\ =\ /{s///;p;q}') +DIST_KERNELVERSION = $(DIST_LINUXVERSION).$(DIST_LINUXKPATCHLEVEL).$(DIST_LINUXKSUBLEVEL) +DIST_OFFICIAL_PKGRELEASEVERION = $(DIST_RELEASE_VERSION)$(DIST_SPECIAL_VERSION)$(DIST_TESTING_VERSION) + +# kernel version for unoffical build +DIST_GIT_HEAD_SHORT_COMMIT_ID = $(shell git rev-parse --short HEAD) +DIST_GIT_HEAD_FULL_COMMIT_ID = $(shell git rev-parse HEAD) +DIST_UNOFFICIAL_PKGRELEASEVERION = ${DIST_BUILD_NUMBER}.git.$(DIST_GIT_HEAD_SHORT_COMMIT_ID) + +# kernel version for diy build +DIST_DIY_PKGRELEASEVERION = ${DIST_DIY}.diy + +# final kernel version +ifeq ("${DIST_BUILD_MODE}", "official") +DIST_PKGRELEASEVERION = $(DIST_OFFICIAL_PKGRELEASEVERION) +else ifeq ("${DIST_BUILD_MODE}", "diy") +DIST_PKGRELEASEVERION = $(DIST_DIY_PKGRELEASEVERION) +else +DIST_PKGRELEASEVERION = $(DIST_UNOFFICIAL_PKGRELEASEVERION) +endif +DIST_ANOLIS_VERSION = $(DIST_KERNELVERSION)-$(DIST_PKGRELEASEVERION) + +# the package id used for compress kernel tarball: +# for official build, we compress tarball from tag +# for unofficial build, we compress tarball from git HEAD +DIST_PKG_COMMIT_ID = $(if $(DIST_OFFICIAL_BUILD),$(DIST_ANOLIS_VERSION),$(DIST_GIT_HEAD_FULL_COMMIT_ID)) + diff --git a/anolis/buildpkg.sh b/anolis/buildpkg.sh new file mode 100644 index 000000000000..34ff6a929f1d --- /dev/null +++ b/anolis/buildpkg.sh @@ -0,0 +1,89 @@ +set -xe + +function do_rpmbuild() { + if [ "$DIST_BUILD_MODE" == "official" ] || \ + [ "$DIST_BUILD_MODE" == "nightly" ] || \ + [ "$DIST_BUILD_MODE" == "diy" ]; then + CMD="-ba" + else + CMD="-bb" + fi + + # Now we have: + # + variants: default, only-debug, with-debug + # + extras: base, with-debuginfo, full + # + modes: official, nightly, dev + #TODO: add with-gcov + # + # Matrix + # + # | BuildMode | KernelName | GenerateSrpm | + # |-----------|-----------------|--------------| + # | official | without sha id | Yes | + # | nightly | with git sha id | Yes | + # | devel | with git sha id | No | + # + # | Extra\Var | Default | Only-debug | With-debug | + # |-----------|----------|------------|------------| + # | Base | +default | -default | +default | + # | | -debug | +debug | +debug | + # | | +headers | + # |-----------|------------------------------------| + # | debuginfo | +debuginfo | + # |-----------|------------------------------------| + # | full | +tools +doc +perf | + # + # Note: pre-release mode will always be "full" and "with-debug" by default + + build_opts="--with headers --without bpftool --without signmodules" + + if [ "_${DIST_BUILD_VARIANT}" == "_only-debug" ]; then + build_opts="$build_opts --without default --with debug" + elif [ "_${DIST_BUILD_VARIANT}" == "_with-debug" ]; then + build_opts="$build_opts --with default --with debug" + else # assume default + build_opts="$build_opts --with default --without debug" + fi + + if [ "_${DIST_BUILD_EXTRA}" == "_debuginfo" ]; then + build_opts="$build_opts --with debuginfo --without tools --without doc --without perf" + elif [ "_${DIST_BUILD_EXTRA}" == "_base" ]; then + build_opts="$build_opts --without debuginfo --without tools --without doc --without perf" + else # assume full + build_opts="$build_opts --with debuginfo --with tools --with doc --with perf" + fi + + # launch a new shell to clear current environment variables passed by Makefile + rpmbuild \ + --define "%_smp_mflags -j$(nproc)" \ + --define "%packager " \ + --define "%_topdir ${DIST_RPMBUILDDIR_OUTPUT}" \ + ${build_opts} \ + ${CMD} ${DIST_RPMBUILDDIR_OUTPUT}/SPECS/kernel.spec \ + --target=$(uname -m) || exit 1 +} + +function output() { + if [ -z "$DIST_OFFICIAL_BUILD" ]; then + targetdir=${DIST_BUILD_NUMBER} + else + targetdir=${DIST_ANOLIS_VERSION} + fi + + mkdir -p ${DIST_OUTPUT}/${targetdir} + + cp ${DIST_RPMBUILDDIR_OUTPUT}/RPMS/$(uname -m)/*.rpm ${DIST_OUTPUT}/${targetdir}/ + + # copy srpm packages if and only if they exist. + if [ -f ${DIST_RPMBUILDDIR_OUTPUT}/SRPMS/*.rpm ]; then + cp ${DIST_RPMBUILDDIR_OUTPUT}/SRPMS/*.rpm ${DIST_OUTPUT}/${targetdir} + fi + + ls ${DIST_OUTPUT}/${targetdir}/*.rpm + + rpm_num=$(ls ${DIST_OUTPUT}/${targetdir}/*.rpm | wc -l) + echo "${rpm_num} rpm(s) copied." +} + +do_rpmbuild +output \ No newline at end of file diff --git a/anolis/changelog/000-changelog.base b/anolis/changelog/000-changelog.base new file mode 100644 index 000000000000..7bf7526d60c1 --- /dev/null +++ b/anolis/changelog/000-changelog.base @@ -0,0 +1,2 @@ +* Fri Dec 15 2023 Qiao Ma [6.6.7-1_rc1%%DIST%%] +- anolis: bump kernel to 6.6.7 (Qiao Ma) \ No newline at end of file diff --git a/anolis/genlog.sh b/anolis/genlog.sh new file mode 100644 index 000000000000..a11293855fa5 --- /dev/null +++ b/anolis/genlog.sh @@ -0,0 +1,65 @@ +# by default, it generates changlogs from latest-tag to HEAD +function get_changelog_start_end() { + if [ -z "$CHANGELOG_START" ]; then + CHANGELOG_START=$(git describe --tags --abbrev=0) + fi + if [ -z "$CHANGELOG_START" ]; then + echo "cannot decide CHANGELOG_START" + exit 1 + fi + + if [ -z "$CHANGELOG_END" ]; then + CHANGELOG_END=$(git log --format="%H" -1 HEAD) + fi +} + +function get_author_sign() { + if [ -z "$AUTHOR_SIGN" ]; then + AUTHOR_SIGN=$(git var GIT_COMMITTER_IDENT |sed 's/>.*/>/') + fi + if [ -z "$AUTHOR_SIGN" ]; then + echo "unkonwn AUTHOR_SIGN" + exit 1 + fi +} + +function get_changelog_file_name() { + local file_base_name="changelog.${DIST_ANOLIS_VERSION}" + local files_num=$(ls ${DIST_CHANGELOG} | grep -E '[0-9]+-changelog.*' | wc -l) + local file_name=$(printf "%03d-${file_base_name}\n" ${files_num}) + CHANGELOG_FILE=${DIST_CHANGELOG}/${file_name} +} + +function generate_changelog() { + get_changelog_start_end + get_author_sign + get_changelog_file_name + + touch ${CHANGELOG_FILE} + echo "* $(date +"%a %b %d %Y") ${AUTHOR_SIGN} [${DIST_ANOLIS_VERSION}%%DIST%%]" > ${CHANGELOG_FILE} + + # TODO: + # 1. if config changes, add kernel config refresh log + # 2. if linux upstream kernel version updated, add related log + + local commits=$(git rev-list ${CHANGELOG_START}..${CHANGELOG_END}) + for commit in $commits + do + ## eg: - anolis: net/netfilter: rename nft_expr_info (Kangjie Xu) + local log=$(git log --format='- %s (%an)' -1 ${commit}) + + ## eg: {CVE-2022-32250} + ## xargs is used to strip space + local cve_list=$(git log --format='%b' -1 ${commit} | grep -Eio '^[[:blank:]]*Fixes:[[:blank:]]*CVE-.*[[:blank:]]*$' | sed 's/fixes://ig' | xargs | sed 's/[[:blank:]]/,/') + local cve_fmt="" + if [ -n "${cve_list}" ]; then + cve_fmt=$(cat <<< "${cve_list}" | paste -sd "," -) + cve_fmt=" {${cve_fmt}}" + fi + ## merge them together, eg: - anolis: net/netfilter: rename nft_expr_info (Kangjie Xu) {CVE-2022-32250} + echo "${log}${cve_fmt}" >> ${CHANGELOG_FILE} + done + echo "" >> ${CHANGELOG_FILE} +} + +generate_changelog \ No newline at end of file diff --git a/anolis/genrpmtree.sh b/anolis/genrpmtree.sh new file mode 100644 index 000000000000..dd19f111f649 --- /dev/null +++ b/anolis/genrpmtree.sh @@ -0,0 +1,34 @@ +#! /bin/bash + +set -xe + +function do_prep() { + mkdir -p ${DIST_RPMBUILDDIR_OUTPUT} + mkdir -p ${DIST_RPMBUILDDIR_OUTPUT}/{BUILD,RPMS,SOURCES,SPECS,SRPMS} + + cp ${DIST_RPM}/cpupower* ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/ + cp ${DIST_RPM}/generate_bls_conf.sh ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/ + + # for official build, the corresponding tag should exist + if [ -n "$DIST_OFFICIAL_BUILD" ]; then + if ! git tag | grep -q -x "${DIST_PKG_COMMIT_ID}"; then + echo "cannot find official build tag: ${DIST_PKG_COMMIT_ID}" + exit 1 + fi + fi + + pkgname="linux-${DIST_ANOLIS_VERSION}${DIST}" + pushd ${DIST_SRCROOT} > /dev/null + git archive --format=tar --prefix="${pkgname}/" ${DIST_PKG_COMMIT_ID} | xz -T$(nproc) > ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/${pkgname}.tar.xz + md5sum ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/${pkgname}.tar.xz > ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/download + popd > /dev/null + DIST_OUTPUT=${DIST_RPMBUILDDIR_OUTPUT}/SPECS/ sh genspec.sh + + cp ${DIST_SRCROOT}/arch/x86/configs/anolis_defconfig ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-x86_64.config + cp ${DIST_SRCROOT}/arch/x86/configs/anolis-debug_defconfig ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-x86_64-debug.config + cp ${DIST_SRCROOT}/arch/arm64/configs/anolis_defconfig ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-aarch64.config + cp ${DIST_SRCROOT}/arch/arm64/configs/anolis-debug_defconfig ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-aarch64-debug.config + +} + +do_prep \ No newline at end of file diff --git a/anolis/genspec.sh b/anolis/genspec.sh new file mode 100644 index 000000000000..cfd9f58c2a4e --- /dev/null +++ b/anolis/genspec.sh @@ -0,0 +1,16 @@ +#! /bin/bash +# generate kernel spec through spec template and changelog files. +# it it call from Makefile, do not run it directly. + +mkdir -p ${DIST_OUTPUT} +cp -f ${DIST_RPM}/${DIST_SPEC_TEMPLATE} ${DIST_OUTPUT}/${DIST_SPEC_FILE} + +for changelog_file in $(ls ${DIST_CHANGELOG} | sort) +do + sed -i "/%changelog/r ${DIST_CHANGELOG}/${changelog_file}" ${DIST_OUTPUT}/${DIST_SPEC_FILE} +done + +sed -i -e " + s/%%DIST%%/$DIST/ + s/%%DIST_KERNELVERSION%%/$DIST_KERNELVERSION/ + s/%%DIST_PKGRELEASEVERION%%/$DIST_PKGRELEASEVERION/" ${DIST_OUTPUT}/${DIST_SPEC_FILE} \ No newline at end of file diff --git a/anolis/rpm/cpupower.config b/anolis/rpm/cpupower.config new file mode 100644 index 000000000000..8629a4a3ede7 --- /dev/null +++ b/anolis/rpm/cpupower.config @@ -0,0 +1,3 @@ +# See 'cpupower help' and cpupower(1) for more info +CPUPOWER_START_OPTS="frequency-set -g performance" +CPUPOWER_STOP_OPTS="frequency-set -g ondemand" diff --git a/anolis/rpm/cpupower.service b/anolis/rpm/cpupower.service new file mode 100644 index 000000000000..5f10ab7ee39a --- /dev/null +++ b/anolis/rpm/cpupower.service @@ -0,0 +1,13 @@ +[Unit] +Description=Configure CPU power related settings +After=syslog.target + +[Service] +Type=oneshot +RemainAfterExit=yes +EnvironmentFile=/etc/sysconfig/cpupower +ExecStart=/usr/bin/cpupower $CPUPOWER_START_OPTS +ExecStop=/usr/bin/cpupower $CPUPOWER_STOP_OPTS + +[Install] +WantedBy=multi-user.target diff --git a/anolis/rpm/generate_bls_conf.sh b/anolis/rpm/generate_bls_conf.sh new file mode 100755 index 000000000000..878696c12f33 --- /dev/null +++ b/anolis/rpm/generate_bls_conf.sh @@ -0,0 +1,35 @@ +#!/bin/bash +set -e + +. /etc/os-release + +if [ "${ID}" == "anolis" ]; then + VERSION=${VERSION%%.*} +fi + +kernelver=$1 && shift +rootfs=$1 && shift +variant=$1 && shift + +output="${rootfs}/lib/modules/${kernelver}/bls.conf" +date=$(date -u +%Y%m%d%H%M%S) + +if [ "${variant:-5}" = "debug" ]; then + debugname=" with debugging" + debugid="-debug" +else + debugname="" + debugid="" +fi + +cat >${output} < in your rpmbuild command or force values +# to 0 in here to disable them. +# +# standard kernel +%define with_up %{?_without_up: 0} %{?!_without_up: 1} +# kernel-debug +%define with_debug %{?_without_debug: 0} %{?!_without_debug: 1} +# kernel-doc +%define with_doc %{?_without_doc: 0} %{?!_without_doc: 1} +# kernel-headers +%define with_headers %{?_without_headers: 0} %{?!_without_headers: 1} +# perf +%define with_perf %{?_without_perf: 0} %{?!_without_perf: 1} +# tools +%define with_tools %{?_without_tools: 0} %{?!_without_tools: 1} +# bpf tool +%define with_bpftool %{?_without_bpftool: 0} %{?!_without_bpftool: 1} +# kernel-debuginfo +%define with_debuginfo %{?_without_debuginfo: 0} %{?!_without_debuginfo: 1} +# +# Additional options for user-friendly one-off kernel building: +# +# Only build the base kernel (--with baseonly): +%define with_baseonly %{?_with_baseonly: 1} %{?!_with_baseonly: 0} +# Only build the debug kernel (--with dbgonly): +%define with_dbgonly %{?_with_dbgonly: 1} %{?!_with_dbgonly: 0} +# +# should we do C=1 builds with sparse +%define with_sparse %{?_with_sparse: 1} %{?!_with_sparse: 0} + +# The kernel tarball/base version +%define kversion 5.10 + +%define with_gcov %{?_with_gcov: 1} %{?!_with_gcov: 0} + +# turn off debug kernel for gcov builds +%if %{with_gcov} +%define with_debug 0 +%endif + +%define make_target bzImage +%define image_install_path boot + +%define KVERREL %{version}-%{release}.%{_target_cpu} +%define KVERREL_RE %(echo %KVERREL | sed 's/+/[+]/g') +%define hdrarch %_target_cpu +%define asmarch %_target_cpu + +%if !%{with_debuginfo} +%define _enable_debug_packages 0 +%endif +%define debuginfodir /usr/lib/debug +# Needed because we override almost everything involving build-ids +# and debuginfo generation. Currently we rely on the old alldebug setting. +%global _build_id_links alldebug + +# if requested, only build base kernel +%if %{with_baseonly} +%define with_debug 0 +%define with_perf 0 +%define with_tools 0 +%define with_bpftool 0 +%endif + +# if requested, only build debug kernel +%if %{with_dbgonly} +%define with_up 0 +%define with_tools 0 +%define with_perf 0 +%define with_bpftool 0 +%endif + +# Overrides for generic default options + +# only package docs noarch +%ifnarch noarch +%define with_doc 0 +%define doc_build_fail true +%endif + +# don't build noarch kernels or headers (duh) +%ifarch noarch +%define with_up 0 +%define with_headers 0 +%define with_tools 0 +%define with_perf 0 +%define with_bpftool 0 +%define with_debug 0 +%define with_doc 0 +%define all_arch_configs %{name}-%{version}-*.config +%endif + +# Per-arch tweaks + +%ifarch x86_64 +%define asmarch x86 +%define all_arch_configs %{name}-%{version}-x86_64*.config +%define kernel_image arch/x86/boot/bzImage +%endif + +%ifarch aarch64 +%define all_arch_configs %{name}-%{version}-aarch64*.config +%define asmarch arm64 +%define hdrarch arm64 +%define make_target Image.gz +%define kernel_image arch/arm64/boot/Image.gz +%endif + +# To temporarily exclude an architecture from being built, add it to +# %%nobuildarches. Do _NOT_ use the ExclusiveArch: line, because if we +# don't build kernel-headers then the new build system will no longer let +# us use the previous build of that package -- it'll just be completely AWOL. +# Which is a BadThing(tm). + +# We only build kernel-headers on the following... +%define nobuildarches i386 i686 + +%ifarch %nobuildarches +%define with_up 0 +%define with_debug 0 +%define with_debuginfo 0 +%define with_perf 0 +%define with_tools 0 +%define with_bpftool 0 +%define _enable_debug_packages 0 +%endif + +# Architectures we build tools/cpupower on +%define cpupowerarchs x86_64 aarch64 + + +# +# Packages that need to be installed before the kernel is, because the %%post +# scripts use them. +# +%define kernel_prereq coreutils, systemd >= 203-2, /usr/bin/kernel-install +%define initrd_prereq dracut >= 027 + + +Name: kernel%{?variant} +Group: System Environment/Kernel +License: GPLv2 and Redistributable, no modification permitted +URL: http://www.kernel.org/ +Version: %{rpmversion} +Release: %{pkg_release} +Summary: The Linux kernel, based on version %{version}, heavily modified with backports +# DO NOT CHANGE THE 'ExclusiveArch' LINE TO TEMPORARILY EXCLUDE AN ARCHITECTURE BUILD. +# SET %%nobuildarches (ABOVE) INSTEAD +ExclusiveArch: noarch i686 x86_64 aarch64 +ExclusiveOS: Linux + + +# +# List the packages used during the kernel build +# +BuildRequires: kmod, patch, bash, coreutils, tar, git, which +BuildRequires: bzip2, xz, findutils, gzip, m4, perl-interpreter, perl-Carp, perl-devel, perl-generators, make, diffutils, gawk +BuildRequires: gcc, binutils, system-rpm-config, hmaccalc, python3-devel +BuildRequires: net-tools, hostname, bc, bison, flex, elfutils-devel, dwarves +BuildRequires: libnl3-devel +%if %{with_doc} +BuildRequires: xmlto, asciidoc, python3-sphinx +%endif +%if %{with_headers} +BuildRequires: rsync +%endif +%if %{with_sparse} +BuildRequires: sparse +%endif +%if %{with_perf} +BuildRequires: zlib-devel binutils-devel newt-devel perl(ExtUtils::Embed) bison flex xz-devel +BuildRequires: audit-libs-devel +BuildRequires: java-devel +BuildRequires: libbpf-devel +BuildRequires: libbabeltrace-devel +BuildRequires: libtraceevent-devel +BuildRequires: numactl-devel +%ifarch aarch64 +BuildRequires: opencsd-devel >= 1.0.0 +%endif +%endif +%if %{with_tools} +BuildRequires: gettext ncurses-devel +BuildRequires: libcap-devel libcap-ng-devel +BuildRequires: pciutils-devel +BuildRequires: openssl-devel +%endif +%if %{with_bpftool} +BuildRequires: python3-docutils +BuildRequires: zlib-devel binutils-devel +%endif +BuildConflicts: rhbuildsys(DiskFree) < 500Mb +%if %{with_debuginfo} +BuildRequires: rpm-build, elfutils +#BuildConflicts: rpm < 4.13.0.1-19 +# Most of these should be enabled after more investigation +%undefine _include_minidebuginfo +%undefine _find_debuginfo_dwz_opts +%undefine _unique_build_ids +%undefine _unique_debug_names +%undefine _unique_debug_srcs +%undefine _debugsource_packages +%undefine _debuginfo_subpackages +%global _find_debuginfo_opts -r --keep-section .BTF* +%global _missing_build_ids_terminate_build 1 +%global _no_recompute_build_ids 1 +%endif + +BuildRequires: openssl openssl-devel + +# These below are required to build man pages +%if %{with_perf} +BuildRequires: xmlto +%endif +%if %{with_perf} || %{with_tools} +BuildRequires: asciidoc +%endif + +Source0: linux-%{rpmversion}-%{pkg_release}.tar.xz + +%define modsign_cmd %{SOURCE18} + +Source20: kernel-%{version}-aarch64.config +Source21: kernel-%{version}-aarch64-debug.config +Source39: kernel-%{version}-x86_64.config +Source40: kernel-%{version}-x86_64-debug.config +Source43: generate_bls_conf.sh + + + +# Sources for kernel-tools +Source2000: cpupower.service +Source2001: cpupower.config + +## Patches needed for building this package + +# %%PATCH_LIST%% + +# END OF PATCH DEFINITIONS + +BuildRoot: %{_tmppath}/%{name}-%{KVERREL}-root + +%description +This is the package which provides the Linux kernel for Alibaba Cloud Linux. +It is based on upstream Linux at version %{version} and maintains kABI +compatibility of a set of approved symbols, however it is heavily modified with +backports and fixes pulled from newer upstream Linux %{name} releases. This means +this is not a %{version} kernel anymore: it includes several components which come +from newer upstream linux versions, while maintaining a well tested and stable +core. Some of the components/backports that may be pulled in are: changes like +updates to the core kernel (eg.: scheduler, cgroups, memory management, security +fixes and features), updates to block layer, supported filesystems, major driver +updates for supported hardware in Alibaba Cloud Linux, enhancements for +enterprise customers, etc. + +# +# This macro does requires, provides, conflicts, obsoletes for a kernel package. +# %%kernel_reqprovconf +# It uses any kernel__conflicts and kernel__obsoletes +# macros defined above. +# +%define kernel_reqprovconf \ +Provides: %{name} = %{rpmversion}-%{pkg_release}\ +Provides: %{name}-%{_target_cpu} = %{rpmversion}-%{pkg_release}%{?1:+%{1}}\ +Provides: kernel-drm-nouveau = 16\ +Provides: %{name}-uname-r = %{KVERREL}%{?variant}%{?1:+%{1}}\ +Requires(pre): %{kernel_prereq}\ +Requires(pre): %{initrd_prereq}\ +Requires(pre): linux-firmware >= 20190516-94.git711d3297\ +Requires(preun): systemd >= 200\ +Conflicts: xfsprogs < 4.3.0-1\ +Conflicts: xorg-x11-drv-vmmouse < 13.0.99\ +%{expand:%%{?kernel%{?1:_%{1}}_conflicts:Conflicts: %%{kernel%{?1:_%{1}}_conflicts}}}\ +%{expand:%%{?kernel%{?1:_%{1}}_obsoletes:Obsoletes: %%{kernel%{?1:_%{1}}_obsoletes}}}\ +%{expand:%%{?kernel%{?1:_%{1}}_provides:Provides: %%{kernel%{?1:_%{1}}_provides}}}\ +# We can't let RPM do the dependencies automatic because it'll then pick up\ +# a correct but undesirable perl dependency from the module headers which\ +# isn't required for the kernel proper to function\ +AutoReq: no\ +AutoProv: yes\ +%{nil} + + +%package doc +Summary: Various documentation bits found in the kernel source +Group: Documentation +%description doc +This package contains documentation files from the kernel +source. Various bits of information about the Linux kernel and the +device drivers shipped with it are documented in these files. + +You'll want to install this package if you need a reference to the +options that can be passed to Linux kernel modules at load time. + + +%package headers +Summary: Header files for the Linux kernel for use by glibc +Group: Development/System +Obsoletes: glibc-kernheaders < 3.0-46 +Provides: glibc-kernheaders = 3.0-46 +%if "0%{?variant}" +Obsoletes: kernel-headers < %{rpmversion}-%{pkg_release} +Provides: kernel-headers = %{rpmversion}-%{pkg_release} +%endif +%description headers +Kernel-headers includes the C header files that specify the interface +between the Linux kernel and userspace libraries and programs. The +header files define structures and constants that are needed for +building most standard programs and are also needed for rebuilding the +glibc package. + +%package debuginfo-common-%{_target_cpu} +Summary: Kernel source files used by %{name}-debuginfo packages +Group: Development/Debug +Provides: installonlypkg(kernel) +%description debuginfo-common-%{_target_cpu} +This package is required by %{name}-debuginfo subpackages. +It provides the kernel source files common to all builds. + +%if %{with_perf} +%package -n perf +Summary: Performance monitoring for the Linux kernel +Group: Development/System +Requires: bzip2 +License: GPLv2 +%description -n perf +This package contains the perf tool, which enables performance monitoring +of the Linux kernel. + +%package -n perf-debuginfo +Summary: Debug information for package perf +Group: Development/Debug +Requires: %{name}-debuginfo-common-%{_target_cpu} = %{version}-%{release} +AutoReqProv: no +%description -n perf-debuginfo +This package provides debug information for the perf package. + +# Note that this pattern only works right to match the .build-id +# symlinks because of the trailing nonmatching alternation and +# the leading .*, because of find-debuginfo.sh's buggy handling +# of matching the pattern against the symlinks file. +%{expand:%%global _find_debuginfo_opts %{?_find_debuginfo_opts} -p '.*%%{_bindir}/perf(\.debug)?|.*%%{_libexecdir}/perf-core/.*|.*%%{_libdir}/libperf-jvmti.so(\.debug)?|XXX' -o perf-debuginfo.list} + +%package -n python3-perf +Summary: Python bindings for apps which will manipulate perf events +Group: Development/Libraries +%description -n python3-perf +The python3-perf package contains a module that permits applications +written in the Python programming language to use the interface +to manipulate perf events. + +%package -n python3-perf-debuginfo +Summary: Debug information for package perf python bindings +Group: Development/Debug +Requires: %{name}-debuginfo-common-%{_target_cpu} = %{version}-%{release} +AutoReqProv: no +%description -n python3-perf-debuginfo +This package provides debug information for the perf python bindings. + +# the python_sitearch macro should already be defined from above +%{expand:%%global _find_debuginfo_opts %{?_find_debuginfo_opts} -p '.*%%{python3_sitearch}/perf.*so(\.debug)?|XXX' -o python3-perf-debuginfo.list} + +# with_perf +%endif + +%if %{with_tools} +%package -n %{name}-tools +Summary: Assortment of tools for the Linux kernel +Group: Development/System +License: GPLv2 +%ifarch %{cpupowerarchs} +Provides: cpupowerutils = 1:009-0.6.p1 +Obsoletes: cpupowerutils < 1:009-0.6.p1 +Provides: cpufreq-utils = 1:009-0.6.p1 +Provides: cpufrequtils = 1:009-0.6.p1 +Obsoletes: cpufreq-utils < 1:009-0.6.p1 +Obsoletes: cpufrequtils < 1:009-0.6.p1 +Obsoletes: cpuspeed < 1:1.5-16 +Requires: %{name}-tools-libs = %{version}-%{release} +%endif +%define __requires_exclude ^%{_bindir}/python +%description -n %{name}-tools +This package contains the tools/ directory from the kernel source +and the supporting documentation. + +%package -n %{name}-tools-libs +Summary: Libraries for the %{name}-tools +Group: Development/System +License: GPLv2 +%description -n %{name}-tools-libs +This package contains the libraries built from the tools/ directory +from the kernel source. + +%package -n %{name}-tools-libs-devel +Summary: Assortment of tools for the Linux kernel +Group: Development/System +License: GPLv2 +Requires: %{name}-tools = %{version}-%{release} +%ifarch %{cpupowerarchs} +Provides: cpupowerutils-devel = 1:009-0.6.p1 +Obsoletes: cpupowerutils-devel < 1:009-0.6.p1 +%endif +Requires: %{name}-tools-libs = %{version}-%{release} +Provides: %{name}-tools-devel +%description -n %{name}-tools-libs-devel +This package contains the development files for the tools/ directory from +the kernel source. + +%package -n %{name}-tools-debuginfo +Summary: Debug information for package %{name}-tools +Group: Development/Debug +Requires: %{name}-debuginfo-common-%{_target_cpu} = %{version}-%{release} +AutoReqProv: no +%description -n %{name}-tools-debuginfo +This package provides debug information for package %{name}-tools. + +# Note that this pattern only works right to match the .build-id +# symlinks because of the trailing nonmatching alternation and +# the leading .*, because of find-debuginfo.sh's buggy handling +# of matching the pattern against the symlinks file. +%{expand:%%global _find_debuginfo_opts %{?_find_debuginfo_opts} -p '.*%%{_bindir}/centrino-decode(\.debug)?|.*%%{_bindir}/powernow-k8-decode(\.debug)?|.*%%{_bindir}/cpupower(\.debug)?|.*%%{_libdir}/libcpupower.*|.*%%{_bindir}/turbostat(\.debug)?|.*%%{_bindir}/x86_energy_perf_policy(\.debug)?|.*%%{_bindir}/tmon(\.debug)?|.*%%{_bindir}/lsgpio(\.debug)?|.*%%{_bindir}/gpio-hammer(\.debug)?|.*%%{_bindir}/gpio-event-mon(\.debug)?|.*%%{_bindir}/iio_event_monitor(\.debug)?|.*%%{_bindir}/iio_generic_buffer(\.debug)?|.*%%{_bindir}/lsiio(\.debug)?|XXX' -o %{name}-tools-debuginfo.list} + +# with_tools +%endif + +%if %{with_bpftool} + +%package -n bpftool +Summary: Inspection and simple manipulation of eBPF programs and maps +License: GPLv2 +%description -n bpftool +This package contains the bpftool, which allows inspection and simple +manipulation of eBPF programs and maps. + +%package -n bpftool-debuginfo +Summary: Debug information for package bpftool +Group: Development/Debug +Requires: %{name}-debuginfo-common-%{_target_cpu} = %{version}-%{release} +AutoReqProv: no +%description -n bpftool-debuginfo +This package provides debug information for the bpftool package. + +%{expand:%%global _find_debuginfo_opts %{?_find_debuginfo_opts} -p '.*%%{_sbindir}/bpftool(\.debug)?|XXX' -o bpftool-debuginfo.list} + +# with_bpftool +%endif + +%if %{with_gcov} +%package gcov +Summary: gcov graph and source files for coverage data collection. +Group: Development/System +%description gcov +kernel-gcov includes the gcov graph and source files for gcov coverage collection. +%endif + +# +# This macro creates a kernel--debuginfo package. +# %%kernel_debuginfo_package +# +%define kernel_debuginfo_package() \ +%package %{?1:%{1}-}debuginfo\ +Summary: Debug information for package %{name}%{?1:-%{1}}\ +Group: Development/Debug\ +Requires: %{name}-debuginfo-common-%{_target_cpu} = %{version}-%{release}\ +Provides: %{name}%{?1:-%{1}}-debuginfo-%{_target_cpu} = %{version}-%{release}\ +Provides: installonlypkg(kernel)\ +AutoReqProv: no\ +%description %{?1:%{1}-}debuginfo\ +This package provides debug information for package %{name}%{?1:-%{1}}.\ +This is required to use SystemTap with %{name}%{?1:-%{1}}-%{KVERREL}.\ +%{expand:%%global _find_debuginfo_opts %{?_find_debuginfo_opts} -p '/.*/%%{KVERREL_RE}%{?1:[+]%{1}}/.*|/.*%%{KVERREL_RE}%{?1:\+%{1}}(\.debug)?' -o debuginfo%{?1}.list}\ +%{nil} + +# +# This macro creates a kernel--devel package. +# %%kernel_devel_package +# +%define kernel_devel_package() \ +%package %{?1:%{1}-}devel\ +Summary: Development package for building kernel modules to match the %{?2:%{2} }kernel\ +Group: System Environment/Kernel\ +Provides: %{name}%{?1:-%{1}}-devel-%{_target_cpu} = %{version}-%{release}\ +Provides: %{name}-devel-%{_target_cpu} = %{version}-%{release}%{?1:+%{1}}\ +Provides: %{name}-devel-uname-r = %{KVERREL}%{?variant}%{?1:+%{1}}\ +Provides: installonlypkg(kernel)\ +AutoReqProv: no\ +Requires(pre): findutils\ +Requires: findutils\ +Requires: perl-interpreter\ +%description %{?1:%{1}-}devel\ +This package provides kernel headers and makefiles sufficient to build modules\ +against the %{?2:%{2} }kernel package.\ +%{nil} + +# +# This macro creates a %%{name}- and its -devel and -debuginfo too. +# %%define variant_summary The Linux kernel compiled for +# %%kernel_variant_package [-n ] +# +%define kernel_variant_package(n:) \ +%package %{?1:%{1}}\ +Summary: %{variant_summary}\ +Group: System Environment/Kernel\ +Provides: installonlypkg(kernel)\ +Requires: grubby \ +%{expand:%%kernel_reqprovconf}\ +%{expand:%%kernel_devel_package %{?1:%{1}} %{!?-n:%{?1:%{1}}}%{?-n:%{-n*}}}\ +%{expand:%%kernel_debuginfo_package %{?1:%{1}}}\ +%{nil} + +# First the auxiliary packages of the main kernel package. +%kernel_devel_package +%kernel_debuginfo_package + +# Now, each variant package. + +%define variant_summary The Linux kernel compiled with extra debugging enabled +%kernel_variant_package debug +%description debug +The kernel package contains the Linux kernel (vmlinuz), the core of any +Linux operating system. The kernel handles the basic functions +of the operating system: memory allocation, process allocation, device +input and output, etc. + +This variant of the kernel has numerous debugging options enabled. +It should only be installed when trying to gather additional information +on kernel bugs, as some of these options impact performance noticably. + +%prep +# do a few sanity-checks for --with *only builds +%if %{with_baseonly} +%if !%{with_up} +echo "Cannot build --with baseonly, up build is disabled" +exit 1 +%endif +%endif + +# more sanity checking; do it quietly +if [ "%{patches}" != "%%{patches}" ] ; then + for patch in %{patches} ; do + if [ ! -f $patch ] ; then + echo "ERROR: Patch ${patch##/*/} listed in specfile but is missing" + exit 1 + fi + done +fi 2>/dev/null + +patch_command='patch -p1 -F1 -s' +ApplyPatch() +{ + local patch=$1 + shift + if [ ! -f $RPM_SOURCE_DIR/$patch ]; then + exit 1 + fi + if ! grep -E "^Patch[0-9]+: $patch\$" %{_specdir}/${RPM_PACKAGE_NAME%%%%%{?variant}}.spec ; then + if [ "${patch:0:8}" != "patch-4." ] ; then + echo "ERROR: Patch $patch not listed as a source patch in specfile" + exit 1 + fi + fi 2>/dev/null + case "$patch" in + *.bz2) bunzip2 < "$RPM_SOURCE_DIR/$patch" | sed -n '/^---$/,$p' | $patch_command ${1+"$@"} ;; + *.gz) gunzip < "$RPM_SOURCE_DIR/$patch" | sed -n '/^---$/,$p' | $patch_command ${1+"$@"} ;; + *.xz) unxz < "$RPM_SOURCE_DIR/$patch" | sed -n '/^---$/,$p' | $patch_command ${1+"$@"} ;; + *) sed -n '/^---$/,$p' "$RPM_SOURCE_DIR/$patch" | $patch_command ${1+"$@"} ;; + esac +} + +# don't apply patch if it's empty +ApplyOptionalPatch() +{ + local patch=$1 + shift + if [ ! -f $RPM_SOURCE_DIR/$patch ]; then + exit 1 + fi + local C=$(wc -l $RPM_SOURCE_DIR/$patch | awk '{print $1}') + if [ "$C" -gt 9 ]; then + ApplyPatch $patch ${1+"$@"} + fi +} + +%setup -q -n %{name}-%{rpmversion}-%{pkg_release} -c +mv linux-%{rpmversion}-%{pkg_release} linux-%{KVERREL} + +cd linux-%{KVERREL} + +# Drop some necessary files from the source dir into the buildroot +cp $RPM_SOURCE_DIR/kernel-%{version}-*.config . + +# %%PATCH_APPLICATION%% + +# END OF PATCH APPLICATIONS + +# Any further pre-build tree manipulations happen here. + +chmod +x scripts/checkpatch.pl +mv COPYING COPYING-%{version} + +# This Prevents scripts/setlocalversion from mucking with our version numbers. +touch .scmversion + +# Do not use "ambiguous" python shebangs. RHEL 8 now has a new script +# (/usr/lib/rpm/redhat/brp-mangle-shebangs), which forces us to specify a +# "non-ambiguous" python shebang for scripts we ship in buildroot. This +# script throws an error like below: +# *** ERROR: ambiguous python shebang in /usr/bin/kvm_stat: #!/usr/bin/python. Change it to python3 (or python2) explicitly. +# We patch all sources below for which we got a report/error. +pathfix.py -i "%{__python3} %{py3_shbang_opts}" -p -n \ + tools/kvm/kvm_stat/kvm_stat \ + scripts/show_delta \ + scripts/diffconfig \ + scripts/bloat-o-meter \ + scripts/jobserver-exec \ + tools \ + Documentation \ + scripts/clang-tools + +%define make make HOSTCFLAGS="%{?build_hostcflags}" HOSTLDFLAGS="%{?build_hostldflags}" + +# only deal with configs if we are going to build for the arch +%ifnarch %nobuildarches + +rm -rf configs +mkdir configs + +# Remove configs not for the buildarch +for cfg in kernel-%{version}-*.config; do + if [ `echo %{all_arch_configs} | grep -c $cfg` -eq 0 ]; then + rm -f $cfg + fi +done + +# enable GCOV kernel config options if gcov is on +%if %{with_gcov} +for i in *.config +do + sed -i 's/# CONFIG_GCOV_KERNEL is not set/CONFIG_GCOV_KERNEL=y\nCONFIG_GCOV_PROFILE_ALL=y\n/' $i +done +%endif + +# now run oldconfig over all the config files +for i in *.config +do + mv $i .config + Arch=`sed -n 3p .config | cut -d' ' -f2 | cut -d'/' -f2` + make ARCH=$Arch listnewconfig | grep -E '^CONFIG_' >.newoptions || true + if [ -s .newoptions ]; then + cat .newoptions + #exit 1 + fi + rm -f .newoptions + make ARCH=$Arch olddefconfig + echo "# $Arch" > configs/$i + cat .config >> configs/$i +done +# end of kernel config +%endif + +# # End of Configs stuff + +# get rid of unwanted files resulting from patch fuzz +find . \( -name "*.orig" -o -name "*~" \) -exec rm -f {} \; >/dev/null + +# remove unnecessary SCM files +find . -name .gitignore -exec rm -f {} \; >/dev/null + +cd .. + +### +### build +### +%build + +%if %{with_sparse} +%define sparse_mflags C=1 +%endif + +cp_vmlinux() +{ + eu-strip --remove-comment -o "$2" "$1" +} + +BuildKernel() { + MakeTarget=$1 + KernelImage=$2 + Flavour=$3 + Flav=${Flavour:++${Flavour}} + InstallName=${5:-vmlinuz} + + DoModules=1 + + # Pick the right config file for the kernel we're building + Config=kernel-%{version}-%{_target_cpu}${Flavour:+-${Flavour}}.config + DevelDir=/usr/src/kernels/%{KVERREL}${Flav} + + # When the bootable image is just the ELF kernel, strip it. + # We already copy the unstripped file into the debuginfo package. + if [ "$KernelImage" = vmlinux ]; then + CopyKernel=cp_vmlinux + else + CopyKernel=cp + fi + + KernelVer=%{version}-%{release}.%{_target_cpu}${Flav} + echo BUILDING A KERNEL FOR ${Flavour} %{_target_cpu}... + + # make sure EXTRAVERSION says what we want it to say + perl -p -i -e "s/^EXTRAVERSION.*/EXTRAVERSION = -%{release}.%{_target_cpu}${Flav}/" Makefile + + # and now to start the build process + + %{make} -s %{?_smp_mflags} mrproper + cp configs/$Config .config + + %if %{signmodules} + cp %{SOURCE11} certs/. + cp %{SOURCE12} certs/. + %endif + + Arch=`head -1 .config | cut -b 3-` + echo USING ARCH=$Arch + + KCFLAGS="%{?kcflags}" + + # add kpatch flags for base kernel + if [ "$Flavour" == "" ]; then + KCFLAGS="$KCFLAGS %{?kpatch_kcflags}" + fi + + %{make} -s ARCH=$Arch olddefconfig >/dev/null + %{make} -s ARCH=$Arch V=1 %{?_smp_mflags} KCFLAGS="$KCFLAGS" WITH_GCOV="%{?with_gcov}" $MakeTarget %{?sparse_mflags} %{?kernel_mflags} + if [ $DoModules -eq 1 ]; then + %{make} -s ARCH=$Arch V=1 %{?_smp_mflags} KCFLAGS="$KCFLAGS" WITH_GCOV="%{?with_gcov}" modules %{?sparse_mflags} || exit 1 + fi + + mkdir -p $RPM_BUILD_ROOT/%{image_install_path} + mkdir -p $RPM_BUILD_ROOT/lib/modules/$KernelVer +%if %{with_debuginfo} + mkdir -p $RPM_BUILD_ROOT%{debuginfodir}/%{image_install_path} +%endif + +%ifarch aarch64 + %{make} -s ARCH=$Arch V=1 dtbs dtbs_install INSTALL_DTBS_PATH=$RPM_BUILD_ROOT/%{image_install_path}/dtb-$KernelVer + cp -r $RPM_BUILD_ROOT/%{image_install_path}/dtb-$KernelVer $RPM_BUILD_ROOT/lib/modules/$KernelVer/dtb + find arch/$Arch/boot/dts -name '*.dtb' -type f | xargs rm -f +%endif + + # Start installing the results + install -m 644 .config $RPM_BUILD_ROOT/boot/config-$KernelVer + install -m 644 .config $RPM_BUILD_ROOT/lib/modules/$KernelVer/config + install -m 644 System.map $RPM_BUILD_ROOT/boot/System.map-$KernelVer + install -m 644 System.map $RPM_BUILD_ROOT/lib/modules/$KernelVer/System.map + + # We estimate the size of the initramfs because rpm needs to take this size + # into consideration when performing disk space calculations. (See bz #530778) + dd if=/dev/zero of=$RPM_BUILD_ROOT/boot/initramfs-$KernelVer.img bs=1M count=20 + + if [ -f arch/$Arch/boot/zImage.stub ]; then + cp arch/$Arch/boot/zImage.stub $RPM_BUILD_ROOT/%{image_install_path}/zImage.stub-$KernelVer || : + cp arch/$Arch/boot/zImage.stub $RPM_BUILD_ROOT/lib/modules/$KernelVer/zImage.stub-$KernelVer || : + fi + + $CopyKernel $KernelImage \ + $RPM_BUILD_ROOT/%{image_install_path}/$InstallName-$KernelVer + chmod 755 $RPM_BUILD_ROOT/%{image_install_path}/$InstallName-$KernelVer + cp $RPM_BUILD_ROOT/%{image_install_path}/$InstallName-$KernelVer $RPM_BUILD_ROOT/lib/modules/$KernelVer/$InstallName + + # hmac sign the kernel for FIPS + echo "Creating hmac file: $RPM_BUILD_ROOT/%{image_install_path}/.vmlinuz-$KernelVer.hmac" + ls -l $RPM_BUILD_ROOT/%{image_install_path}/$InstallName-$KernelVer + sha512hmac $RPM_BUILD_ROOT/%{image_install_path}/$InstallName-$KernelVer | sed -e "s,$RPM_BUILD_ROOT,," > $RPM_BUILD_ROOT/%{image_install_path}/.vmlinuz-$KernelVer.hmac; + cp $RPM_BUILD_ROOT/%{image_install_path}/.vmlinuz-$KernelVer.hmac $RPM_BUILD_ROOT/lib/modules/$KernelVer/.vmlinuz.hmac + + if [ $DoModules -eq 1 ]; then + # Override $(mod-fw) because we don't want it to install any firmware + # we'll get it from the linux-firmware package and we don't want conflicts + %{make} -s %{?_smp_mflags} ARCH=$Arch INSTALL_MOD_PATH=$RPM_BUILD_ROOT modules_install KERNELRELEASE=$KernelVer mod-fw= + fi + +%if %{with_gcov} + # install gcov-needed files to $BUILDROOT/$BUILD/...: + # gcov_info->filename is absolute path + # gcno references to sources can use absolute paths (e.g. in out-of-tree builds) + # sysfs symlink targets (set up at compile time) use absolute paths to BUILD dir + find . \( -name '*.gcno' -o -name '*.[chS]' \) -exec install -D '{}' "$RPM_BUILD_ROOT/$(pwd)/{}" \; +%endif + + if [ $DoVDSO -ne 0 ]; then + %{make} -s ARCH=$Arch INSTALL_MOD_PATH=$RPM_BUILD_ROOT vdso_install KERNELRELEASE=$KernelVer + if [ ! -s ldconfig-kernel.conf ]; then + echo > ldconfig-kernel.conf "\ + # Placeholder file, no vDSO hwcap entries used in this kernel." + fi + %{__install} -D -m 444 ldconfig-kernel.conf \ + $RPM_BUILD_ROOT/etc/ld.so.conf.d/%{name}-$KernelVer.conf + rm -rf $RPM_BUILD_ROOT/lib/modules/$KernelVer/vdso/.build-id + fi + + # And save the headers/makefiles etc for building modules against + # + # This all looks scary, but the end result is supposed to be: + # * all arch relevant include/ files + # * all Makefile/Kconfig files + # * all script/ files + + rm -f $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + rm -f $RPM_BUILD_ROOT/lib/modules/$KernelVer/source + mkdir -p $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + (cd $RPM_BUILD_ROOT/lib/modules/$KernelVer ; ln -s build source) + # dirs for additional modules per module-init-tools, kbuild/modules.txt + mkdir -p $RPM_BUILD_ROOT/lib/modules/$KernelVer/updates + mkdir -p $RPM_BUILD_ROOT/lib/modules/$KernelVer/weak-updates + # first copy everything + cp --parents `find -type f -name "Makefile*" -o -name "Kconfig*"` $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + cp Module.symvers $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + cp System.map $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + if [ -s Module.markers ]; then + cp Module.markers $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + fi + + # create the kABI metadata for use in packaging + # NOTENOTE: the name symvers is used by the rpm backend + # NOTENOTE: to discover and run the /usr/lib/rpm/fileattrs/kabi.attr + # NOTENOTE: script which dynamically adds exported kernel symbol + # NOTENOTE: checksums to the rpm metadata provides list. + # NOTENOTE: if you change the symvers name, update the backend too + echo "**** GENERATING kernel ABI metadata ****" + gzip -c9 < Module.symvers > $RPM_BUILD_ROOT/boot/symvers-$KernelVer.gz + cp $RPM_BUILD_ROOT/boot/symvers-$KernelVer.gz $RPM_BUILD_ROOT/lib/modules/$KernelVer/symvers.gz + + # then drop all but the needed Makefiles/Kconfig files + rm -rf $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/Documentation + rm -rf $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/scripts + rm -rf $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include + cp .config $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + cp -a scripts $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + rm -rf $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/scripts/tracing + rm -f $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/scripts/spdxcheck.py + if [ -f tools/objtool/objtool ]; then + cp -a tools/objtool/objtool $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/tools/objtool/ || : + fi + if [ -d arch/$Arch/scripts ]; then + cp -a arch/$Arch/scripts $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/arch/%{_arch} || : + fi + if [ -f arch/$Arch/*lds ]; then + cp -a arch/$Arch/*lds $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/arch/%{_arch}/ || : + fi + if [ -f arch/%{asmarch}/kernel/module.lds ]; then + cp -a --parents arch/%{asmarch}/kernel/module.lds $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + fi + rm -f $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/scripts/*.o + rm -f $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/scripts/*/*.o + if [ -d arch/%{asmarch}/include ]; then + cp -a --parents arch/%{asmarch}/include $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + fi +%ifarch aarch64 + # arch/arm64/include/asm/xen references arch/arm + cp -a --parents arch/arm/include/asm/xen $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + # arch/arm64/include/asm/opcodes.h references arch/arm + cp -a --parents arch/arm/include/asm/opcodes.h $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ +%endif + cp -a include $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include +%ifarch x86_64 + # files for 'make prepare' to succeed with kernel-devel + cp -a --parents arch/x86/entry/syscalls/syscall_32.tbl $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/entry/syscalls/syscall_64.tbl $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/tools/relocs_32.c $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/tools/relocs_64.c $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/tools/relocs.c $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/tools/relocs_common.c $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/tools/relocs.h $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents tools/include/tools/le_byteshift.h $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/purgatory/purgatory.c $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/purgatory/stack.S $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/purgatory/setup-x86_64.S $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/purgatory/entry64.S $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/boot/string.h $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/boot/string.c $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/boot/ctype.h $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + + cp -a --parents scripts/syscalltbl.sh $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents scripts/syscallhdr.sh $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ +%endif + # Make sure the Makefile, version.h, and auto.conf have a matching + # timestamp so that external modules can be built + touch -r $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/Makefile \ + $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include/generated/uapi/linux/version.h \ + $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include/config/auto.conf + +%if %{with_debuginfo} + eu-readelf -n vmlinux | grep "Build ID" | awk '{print $NF}' > vmlinux.id + cp vmlinux.id $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/vmlinux.id + + # + # save the vmlinux file for kernel debugging into the kernel-debuginfo rpm + # + mkdir -p $RPM_BUILD_ROOT%{debuginfodir}/lib/modules/$KernelVer + cp vmlinux $RPM_BUILD_ROOT%{debuginfodir}/lib/modules/$KernelVer +%endif + + find $RPM_BUILD_ROOT/lib/modules/$KernelVer -name "*.ko" -type f >modnames + + # mark modules executable so that strip-to-file can strip them + xargs --no-run-if-empty chmod u+x < modnames + + # Generate a list of modules for block and networking. + + grep -F /drivers/ modnames | xargs --no-run-if-empty nm -upA | + sed -n 's,^.*/\([^/]*\.ko\): *U \(.*\)$,\1 \2,p' > drivers.undef + + collect_modules_list() + { + sed -r -n -e "s/^([^ ]+) \\.?($2)\$/\\1/p" drivers.undef | + LC_ALL=C sort -u > $RPM_BUILD_ROOT/lib/modules/$KernelVer/modules.$1 + if [ ! -z "$3" ]; then + sed -r -e "/^($3)\$/d" -i $RPM_BUILD_ROOT/lib/modules/$KernelVer/modules.$1 + fi + } + + collect_modules_list networking \ + 'register_netdev|ieee80211_register_hw|usbnet_probe|phy_driver_register|rt(l_|2x00)(pci|usb)_probe|register_netdevice' + collect_modules_list block \ + 'ata_scsi_ioctl|scsi_add_host|scsi_add_host_with_dma|blk_alloc_queue|blk_init_queue|register_mtd_blktrans|scsi_esp_register|scsi_register_device_handler|blk_queue_physical_block_size' 'pktcdvd.ko|dm-mod.ko' + collect_modules_list drm \ + 'drm_open|drm_init' + collect_modules_list modesetting \ + 'drm_crtc_init' + + # detect missing or incorrect license tags + ( find $RPM_BUILD_ROOT/lib/modules/$KernelVer -name '*.ko' | xargs /sbin/modinfo -l | \ + grep -E -v 'GPL( v2)?$|Dual BSD/GPL$|Dual MPL/GPL$|GPL and additional rights$' ) && exit 1 + + # remove files that will be auto generated by depmod at rpm -i time + pushd $RPM_BUILD_ROOT/lib/modules/$KernelVer/ + rm -f modules.{alias*,builtin.bin,dep*,*map,symbols*,devname,softdep} + popd + + # Copy the System.map file for depmod to use, and create a backup of the + # full module tree so we can restore it after we're done filtering + cp System.map $RPM_BUILD_ROOT/. + pushd $RPM_BUILD_ROOT + + if [ $DoModules -eq 1 ]; then + + # Run depmod on the resulting module tree and make sure it isn't broken + depmod -b . -aeF ./System.map $KernelVer &> depmod.out + if [ -s depmod.out ]; then + echo "Depmod failure" + cat depmod.out + exit 1 + else + rm depmod.out + fi + else + # Ensure important files/directories exist to let the packaging succeed + mkdir -p lib/modules/$KernelVer/kernel + # Add files usually created by make modules, needed to prevent errors + # thrown by depmod during package installation + touch lib/modules/$KernelVer/modules.order + touch lib/modules/$KernelVer/modules.builtin + fi + + # remove files that will be auto generated by depmod at rpm -i time + pushd $RPM_BUILD_ROOT/lib/modules/$KernelVer/ + rm -f modules.{alias*,builtin.bin,dep*,*map,symbols*,devname,softdep} + popd + + # Cleanup + rm System.map + popd + +%if %{signmodules} + if [ $DoModules -eq 1 ]; then + # Save the signing keys so we can sign the modules in __modsign_install_post + cp certs/signing_key.pem certs/signing_key.pem.sign${Flav} + cp certs/signing_key.x509 certs/signing_key.x509.sign${Flav} + fi +%endif + + # Move the devel headers out of the root file system + mkdir -p $RPM_BUILD_ROOT/usr/src/kernels + mv $RPM_BUILD_ROOT/lib/modules/$KernelVer/build $RPM_BUILD_ROOT/$DevelDir + + # This is going to create a broken link during the build, but we don't use + # it after this point. We need the link to actually point to something + # when kernel-devel is installed, and a relative link doesn't work across + # the F17 UsrMove feature. + ln -sf $DevelDir $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + + # prune junk from kernel-devel + find $RPM_BUILD_ROOT/usr/src/kernels -name ".*.cmd" -exec rm -f {} \; + + # build a BLS config for this kernel + %{SOURCE43} "$KernelVer" "$RPM_BUILD_ROOT" "%{?variant}" +} + +### +# DO it... +### + +# prepare directories +rm -rf $RPM_BUILD_ROOT +mkdir -p $RPM_BUILD_ROOT/boot +mkdir -p $RPM_BUILD_ROOT%{_libexecdir} + +cd linux-%{KVERREL} + + +%if %{with_debug} +BuildKernel %make_target %kernel_image debug +%endif + +%if %{with_up} +BuildKernel %make_target %kernel_image +%endif + +%global perf_make \ + make EXTRA_CFLAGS="${RPM_OPT_FLAGS}" LDFLAGS="%{__global_ldflags}" -C tools/perf V=1 NO_PERF_READ_VDSO32=1 NO_PERF_READ_VDSOX32=1 WERROR=0 NO_LIBUNWIND=1 HAVE_CPLUS_DEMANGLE=1 NO_GTK2=1 NO_STRLCPY=1 NO_BIONIC=1 LIBBPF_DYNAMIC=1 LIBTRACEEVENT_DYNAMIC=1 %{?perf_build_extra_opts} prefix=%{_prefix} PYTHON=%{__python3} +%if %{with_perf} +# perf +# make sure check-headers.sh is executable +chmod +x tools/perf/check-headers.sh +%{perf_make} DESTDIR=$RPM_BUILD_ROOT all +%endif + +%global tools_make \ + %{make} V=1 CFLAGS="${RPM_OPT_FLAGS}" LDFLAGS="%{__global_ldflags}" + +%if %{with_tools} +%ifarch %{cpupowerarchs} +# cpupower +# make sure version-gen.sh is executable. +chmod +x tools/power/cpupower/utils/version-gen.sh +%{tools_make} -C tools/power/cpupower CPUFREQ_BENCH=false DEBUG=false +%ifarch x86_64 + pushd tools/power/cpupower/debug/x86_64 + %{tools_make} centrino-decode powernow-k8-decode + popd +%endif +%ifarch x86_64 + pushd tools/power/x86/x86_energy_perf_policy/ + %{tools_make} + popd + pushd tools/power/x86/turbostat + %{tools_make} + popd + pushd tools/power/x86/intel-speed-select + %{make} + popd +%endif +%endif +pushd tools/thermal/tmon/ +%{make} V=1 +popd +pushd tools/iio/ +%{make} V=1 +popd +pushd tools/gpio/ +%{make} V=1 +popd +# build MM tools +pushd tools/mm/ +%{make} V=1 slabinfo page_owner_sort page-types +popd +%endif + +%global bpftool_make \ + make EXTRA_CFLAGS="${RPM_OPT_FLAGS}" EXTRA_LDFLAGS="%{__global_ldflags}" DESTDIR=$RPM_BUILD_ROOT V=1 +%if %{with_bpftool} +pushd tools/bpf/bpftool +%{bpftool_make} +popd +%endif + +%if %{with_doc} +# Make the HTML pages. +make htmldocs || %{doc_build_fail} + +# sometimes non-world-readable files sneak into the kernel source tree +chmod -R a=rX Documentation +find Documentation -type d | xargs chmod u+w +%endif + +# In the modsign case, we do 3 things. 1) We check the "flavour" and hard +# code the value in the following invocations. This is somewhat sub-optimal +# but we're doing this inside of an RPM macro and it isn't as easy as it +# could be because of that. 2) We restore the .tmp_versions/ directory from +# the one we saved off in BuildKernel above. This is to make sure we're +# signing the modules we actually built/installed in that flavour. 3) We +# grab the arch and invoke mod-sign.sh command to actually sign the modules. +# +# We have to do all of those things _after_ find-debuginfo runs, otherwise +# that will strip the signature off of the modules. + +%define __modsign_install_post \ + if [ "%{signmodules}" -eq "1" ]; then \ + if [ "%{with_debug}" -ne "0" ]; then \ + %{modsign_cmd} certs/signing_key.pem.sign+debug certs/signing_key.x509.sign+debug $RPM_BUILD_ROOT/lib/modules/%{KVERREL}+debug/ \ + fi \ + if [ "%{with_up}" -ne "0" ]; then \ + %{modsign_cmd} certs/signing_key.pem.sign certs/signing_key.x509.sign $RPM_BUILD_ROOT/lib/modules/%{KVERREL}/ \ + fi \ + fi \ +%{nil} + +### +### Special hacks for debuginfo subpackages. +### + +# This macro is used by %%install, so we must redefine it before that. +%define debug_package %{nil} + +%if %{with_debuginfo} + +%ifnarch noarch +%global __debug_package 1 +%files -f debugfiles.list debuginfo-common-%{_target_cpu} +%defattr(-,root,root) +%endif + +%endif + +# +# Disgusting hack alert! We need to ensure we sign modules *after* all +# invocations of strip occur, which is in __debug_install_post if +# find-debuginfo.sh runs, and __os_install_post if not. +# +%define __spec_install_post \ + %{?__debug_package:%{__debug_install_post}}\ + %{__arch_install_post}\ + %{__os_install_post}\ + %{__modsign_install_post} + +### +### install +### + +%install + +cd linux-%{KVERREL} + +%if %{with_doc} +docdir=$RPM_BUILD_ROOT%{_datadir}/doc/kernel-doc-%{rpmversion} + +# copy the source over +mkdir -p $docdir +tar -h -f - --exclude=man --exclude='.*' -c Documentation | tar xf - -C $docdir + +# with_doc +%endif + +# We have to do the headers install before the tools install because the +# kernel headers_install will remove any header files in /usr/include that +# it doesn't install itself. + +%if %{with_headers} +# Install kernel headers +%{make} ARCH=%{hdrarch} INSTALL_HDR_PATH=$RPM_BUILD_ROOT/usr headers_install + +find $RPM_BUILD_ROOT/usr/include \ + \( -name .install -o -name .check -o \ + -name ..install.cmd -o -name ..check.cmd \) -delete + +%endif + +%if %{with_perf} +# perf tool binary and supporting scripts/binaries +%{perf_make} DESTDIR=$RPM_BUILD_ROOT lib=%{_lib} install-bin +# remove the 'trace' symlink. +rm -f %{buildroot}%{_bindir}/trace + +# For both of the below, yes, this should be using a macro but right now +# it's hard coded and we don't actually want it anyway right now. +# Whoever wants examples can fix it up! + +# remove examples +rm -rf %{buildroot}/usr/lib/perf/examples +rm -rf %{buildroot}/usr/lib/perf/include + +# python-perf extension +%{perf_make} DESTDIR=$RPM_BUILD_ROOT install-python_ext + +# perf man pages (note: implicit rpm magic compresses them later) +mkdir -p %{buildroot}/%{_mandir}/man1 +%{perf_make} DESTDIR=$RPM_BUILD_ROOT install-man + +# remove any tracevent files, eg. its plugins still gets built and installed, +# even if we build against system's libtracevent during perf build (by setting +# LIBTRACEEVENT_DYNAMIC=1 above in perf_make macro). Those files should already +# ship with libtraceevent package. +rm -rf %{buildroot}%{_libdir}/traceevent +%endif + +%if %{with_tools} +%ifarch %{cpupowerarchs} +%{make} -C tools/power/cpupower DESTDIR=$RPM_BUILD_ROOT libdir=%{_libdir} mandir=%{_mandir} CPUFREQ_BENCH=false install +rm -f %{buildroot}%{_libdir}/*.{a,la} +%find_lang cpupower +mv cpupower.lang ../ +%ifarch x86_64 + pushd tools/power/cpupower/debug/x86_64 + install -m755 centrino-decode %{buildroot}%{_bindir}/centrino-decode + install -m755 powernow-k8-decode %{buildroot}%{_bindir}/powernow-k8-decode + popd +%endif +chmod 0755 %{buildroot}%{_libdir}/libcpupower.so* +mkdir -p %{buildroot}%{_unitdir} %{buildroot}%{_sysconfdir}/sysconfig +install -m644 %{SOURCE2000} %{buildroot}%{_unitdir}/cpupower.service +install -m644 %{SOURCE2001} %{buildroot}%{_sysconfdir}/sysconfig/cpupower +%endif +%ifarch x86_64 + mkdir -p %{buildroot}%{_mandir}/man8 + pushd tools/power/x86/x86_energy_perf_policy + %{tools_make} DESTDIR=%{buildroot} install + popd + pushd tools/power/x86/turbostat + %{tools_make} DESTDIR=%{buildroot} install + popd + pushd tools/power/x86/intel-speed-select + %{make} DESTDIR=%{buildroot} install + popd +%endif +pushd tools/thermal/tmon +%{make} V=1 INSTALL_ROOT=%{buildroot} install +popd +pushd tools/iio +%{make} V=1 DESTDIR=%{buildroot} install +popd +pushd tools/gpio +%{make} V=1 DESTDIR=%{buildroot} install +popd +pushd tools/kvm/kvm_stat +make INSTALL_ROOT=%{buildroot} install-tools +make INSTALL_ROOT=%{buildroot} install-man +popd +# install MM tools +pushd tools/mm/ +install -m755 slabinfo %{buildroot}%{_bindir}/slabinfo +install -m755 page_owner_sort %{buildroot}%{_bindir}/page_owner_sort +install -m755 page-types %{buildroot}%{_bindir}/page-types +popd +%endif + +%if %{with_bpftool} +pushd tools/bpf/bpftool +%{bpftool_make} prefix=%{_prefix} bash_compdir=%{_sysconfdir}/bash_completion.d/ mandir=%{_mandir} install doc-install +popd +%endif + +# We have to do the headers checksum calculation after the tools install because +# these might end up installing their own set of headers on top of kernel's +%if %{with_headers} +# compute a content hash to export as Provides: kernel-headers-checksum +HEADERS_CHKSUM=$(export LC_ALL=C; find $RPM_BUILD_ROOT/usr/include -type f -name "*.h" \ + ! -path $RPM_BUILD_ROOT/usr/include/linux/version.h | \ + sort | xargs cat | sha1sum - | cut -f 1 -d ' '); +# export the checksum via usr/include/linux/version.h, so the dynamic +# find-provides can grab the hash to update it accordingly +echo "#define KERNEL_HEADERS_CHECKSUM \"$HEADERS_CHKSUM\"" >> $RPM_BUILD_ROOT/usr/include/linux/version.h +%endif + +### +### clean +### + +%clean +rm -rf $RPM_BUILD_ROOT + +### +### scripts +### + +%if %{with_tools} +%post -n %{name}-tools-libs +/sbin/ldconfig + +%postun -n %{name}-tools-libs +/sbin/ldconfig +%endif + +# +# This macro defines a %%post script for a kernel*-devel package. +# %%kernel_devel_post [] +# +%define kernel_devel_post() \ +%{expand:%%post %{?1:%{1}-}devel}\ +if [ -f /etc/sysconfig/kernel ]\ +then\ + . /etc/sysconfig/kernel || exit $?\ +fi\ +if [ "$HARDLINK" != "no" -a -x /usr/sbin/hardlink ]\ +then\ + (cd /usr/src/kernels/%{KVERREL}%{?1:+%{1}} &&\ + /usr/bin/find . -type f | while read f; do\ + hardlink -c /usr/src/kernels/*%{?dist}.*/$f $f\ + done)\ +fi\ +%{nil} + +# This macro defines a %%posttrans script for a kernel package. +# %%kernel_variant_posttrans [] +# More text can follow to go at the end of this variant's %%post. +# +%define kernel_variant_posttrans() \ +%{expand:%%posttrans %{?1:%{1}}}\ +if [ -x %{_sbindir}/weak-modules ]\ +then\ + %{_sbindir}/weak-modules --add-kernel %{KVERREL}%{?1:+%{1}} || exit $?\ +fi\ +/bin/kernel-install add %{KVERREL}%{?1:+%{1}} /lib/modules/%{KVERREL}%{?1:+%{1}}/vmlinuz || exit $?\ +%{nil} + +# +# This macro defines a %%post script for a kernel package and its devel package. +# %%kernel_variant_post [-v ] [-r ] +# More text can follow to go at the end of this variant's %%post. +# +%define kernel_variant_post(v:r:) \ +%{expand:%%kernel_devel_post %{?-v*}}\ +%{expand:%%kernel_variant_posttrans %{?-v*}}\ +%{expand:%%post %{?-v*}}\ +%{-r:\ +if [ `uname -i` == "x86_64" -o `uname -i` == "i386" ] &&\ + [ -f /etc/sysconfig/kernel ]; then\ + /bin/sed -r -i -e 's/^DEFAULTKERNEL=%{-r*}$/DEFAULTKERNEL=kernel%{?-v:-%{-v*}}/' /etc/sysconfig/kernel || exit $?\ +fi}\ +%{nil} + +# +# This macro defines a %%preun script for a kernel package. +# %%kernel_variant_preun +# +%define kernel_variant_preun() \ +%{expand:%%preun %{?1}}\ +/bin/kernel-install remove %{KVERREL}%{?1:+%{1}} /lib/modules/%{KVERREL}%{?1:+%{1}}/vmlinuz || exit $?\ +if [ -x %{_sbindir}/weak-modules ]\ +then\ + %{_sbindir}/weak-modules --remove-kernel %{KVERREL}%{?1:+%{1}} || exit $?\ +fi\ +%{nil} + +%kernel_variant_preun +%kernel_variant_post -r kernel-smp + +%kernel_variant_preun debug +%kernel_variant_post -v debug + +if [ -x /sbin/ldconfig ] +then + /sbin/ldconfig -X || exit $? +fi + +### +### file lists +### + +%if %{with_headers} +%files headers +%defattr(-,root,root) +/usr/include/* +%endif + +# only some architecture builds need kernel-doc +%if %{with_doc} +%files doc +%defattr(-,root,root) +%{_datadir}/doc/kernel-doc-%{rpmversion}/Documentation/* +%dir %{_datadir}/doc/kernel-doc-%{rpmversion}/Documentation +%dir %{_datadir}/doc/kernel-doc-%{rpmversion} +%endif + +%if %{with_perf} +%files -n perf +%{_bindir}/perf +%{_libdir}/libperf-jvmti.so +%dir %{_libexecdir}/perf-core +%{_libexecdir}/perf-core/* +%{_datadir}/perf-core/* +%{_mandir}/man[1-8]/perf* +%{_sysconfdir}/bash_completion.d/perf +%doc linux-%{KVERREL}/tools/perf/Documentation/examples.txt +%{_docdir}/perf-tip/tips.txt + +%files -n python3-perf +%{python3_sitearch}/* + +%if %{with_debuginfo} +%files -f perf-debuginfo.list -n perf-debuginfo + +%files -f python3-perf-debuginfo.list -n python3-perf-debuginfo +%endif +# with_perf +%endif + +%if %{with_tools} +%ifarch %{cpupowerarchs} +%defattr(-,root,root) +%files -n %{name}-tools -f cpupower.lang +%{_bindir}/cpupower +%{_datadir}/bash-completion/completions/cpupower +%ifarch x86_64 +%{_bindir}/centrino-decode +%{_bindir}/powernow-k8-decode +%endif +%{_unitdir}/cpupower.service +%{_mandir}/man[1-8]/cpupower* +%config(noreplace) %{_sysconfdir}/sysconfig/cpupower +%ifarch x86_64 +%{_bindir}/x86_energy_perf_policy +%{_mandir}/man8/x86_energy_perf_policy* +%{_bindir}/turbostat +%{_mandir}/man8/turbostat* +%{_bindir}/intel-speed-select +%endif +# !cpupowerarchs +%else +%files -n %{name}-tools +%defattr(-,root,root) +# cpupowerarchs +%endif +%{_bindir}/tmon +%{_bindir}/iio_event_monitor +%{_bindir}/iio_generic_buffer +%{_bindir}/lsiio +%{_bindir}/lsgpio +%{_bindir}/gpio-hammer +%{_bindir}/gpio-event-mon +%{_bindir}/gpio-watch +%{_mandir}/man1/kvm_stat* +%{_bindir}/kvm_stat +%{_bindir}/page_owner_sort +%{_bindir}/slabinfo +%{_bindir}/page-types + +%if %{with_debuginfo} +%files -f %{name}-tools-debuginfo.list -n %{name}-tools-debuginfo +%defattr(-,root,root) +%endif + +%ifarch %{cpupowerarchs} +%files -n %{name}-tools-libs +%{_libdir}/libcpupower.so.1 +%{_libdir}/libcpupower.so.0.0.1 + +%files -n %{name}-tools-libs-devel +%{_libdir}/libcpupower.so +%{_includedir}/cpufreq.h +%endif +# with_tools +%endif + +%if %{with_bpftool} +%files -n bpftool +%{_sbindir}/bpftool +%{_sysconfdir}/bash_completion.d/bpftool +%{_mandir}/man8/bpftool-cgroup.8.* +%{_mandir}/man8/bpftool-map.8.* +%{_mandir}/man8/bpftool-prog.8.* +%{_mandir}/man8/bpftool-perf.8.* +%{_mandir}/man8/bpftool.8.* +%{_mandir}/man8/bpftool-btf.8.* +%{_mandir}/man8/bpftool-feature.8.* +%{_mandir}/man8/bpftool-gen.8.* +%{_mandir}/man8/bpftool-iter.8.* +%{_mandir}/man8/bpftool-link.8.* +%{_mandir}/man8/bpftool-net.8.* +%{_mandir}/man8/bpftool-struct_ops.8.* + +%if %{with_debuginfo} +%files -f bpftool-debuginfo.list -n bpftool-debuginfo +%defattr(-,root,root) +%endif +%endif + +# empty meta-package +%ifnarch %nobuildarches noarch +%files +%defattr(-,root,root) +%endif + +%if %{with_gcov} +%ifarch x86_64 aarch64 +%files gcov +%defattr(-,root,root) +%{_builddir} +%endif +%endif + +# This is %%{image_install_path} on an arch where that includes ELF files, +# or empty otherwise. +%define elf_image_install_path %{?kernel_image_elf:%{image_install_path}} + +# +# This macro defines the %%files sections for a kernel package +# and its devel and debuginfo packages. +# %%kernel_variant_files [-k vmlinux] +# +%define kernel_variant_files(k:) \ +%if %{1}\ +%{expand:%%files %{?2}}\ +%defattr(-,root,root)\ +%{!?_licensedir:%global license %%doc}\ +%license linux-%{KVERREL}/COPYING-%{version}\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/%{?-k:%{-k*}}%{!?-k:vmlinuz}\ +%ghost /%{image_install_path}/%{?-k:%{-k*}}%{!?-k:vmlinuz}-%{KVERREL}%{?2:+%{2}}\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/.vmlinuz.hmac \ +%ghost /%{image_install_path}/.vmlinuz-%{KVERREL}%{?2:+%{2}}.hmac \ +%ifarch aarch64\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/dtb \ +%ghost /%{image_install_path}/dtb-%{KVERREL}%{?2:+%{2}} \ +%endif\ +%attr(0600, root, root) /lib/modules/%{KVERREL}%{?2:+%{2}}/System.map\ +%attr(0600, root, root) /boot/System.map-%{KVERREL}%{?2:+%{2}}\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/symvers.gz\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/config\ +%attr(0600, root, root) /boot/symvers-%{KVERREL}%{?2:+%{2}}.gz\ +%attr(0600, root, root) /boot/initramfs-%{KVERREL}%{?2:+%{2}}.img\ +%attr(0644, root, root) /boot/config-%{KVERREL}%{?2:+%{2}}\ +%dir /lib/modules\ +%dir /lib/modules/%{KVERREL}%{?2:+%{2}}\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/kernel\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/build\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/source\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/updates\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/weak-updates\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/bls.conf\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/modules.*\ +%{expand:%%files %{?2:%{2}-}devel}\ +%defattr(-,root,root)\ +%defverify(not mtime)\ +/usr/src/kernels/%{KVERREL}%{?2:+%{2}}\ +%if %{with_debuginfo}\ +%ifnarch noarch\ +%{expand:%%files -f debuginfo%{?2}.list %{?2:%{2}-}debuginfo}\ +%defattr(-,root,root)\ +%endif\ +%endif\ +%endif\ +%{nil} + +%kernel_variant_files %{with_up} +%kernel_variant_files %{with_debug} debug + +# plz don't put in a version string unless you're going to tag +# and build. +# +# +%changelog + -- Gitee From 37de1eb8eba2c3377da482c6ee3ed9dda24be0c3 Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Fri, 29 Dec 2023 14:33:05 +0800 Subject: [PATCH 0003/2138] anolis: spec: avoid override system %rpmversion definition ANBZ: #7810 It will report such error on Anolis 23 baseos: > # yum-builddep -y kernel.spec > Last metadata expiration check: 0:21:45 ago on Thu Dec 28 09:02:26 2023. > RPM: error: kernel.spec: line 11: Macro %rpmversion is a built-in (%define) > Failed to open: 'kernel.spec', not a valid spec file: can't parse specfile > > Error: Some packages could not be found. The root cause is that higher version rpmbuild has built-in the variable %rpmversion, which is confilict with kernel.spec. So replace variable %rpmversion to %kernelversion. Fixes: 94293ca20de2("anolis: spec: add basic framework to generate rpm tree") Signed-off-by: Qiao Ma --- anolis/rpm/kernel.spec.template | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/anolis/rpm/kernel.spec.template b/anolis/rpm/kernel.spec.template index 0c270e7981fa..ff6c6285960b 100644 --- a/anolis/rpm/kernel.spec.template +++ b/anolis/rpm/kernel.spec.template @@ -14,7 +14,7 @@ # define buildid .local %global dist %%DIST%% -%define rpmversion %%DIST_KERNELVERSION%% +%define kernelversion %%DIST_KERNELVERSION%% %define pkgrelease %%DIST_PKGRELEASEVERION%% # allow pkg_release to have configurable %%{?dist} tag @@ -58,9 +58,6 @@ # should we do C=1 builds with sparse %define with_sparse %{?_with_sparse: 1} %{?!_with_sparse: 0} -# The kernel tarball/base version -%define kversion 5.10 - %define with_gcov %{?_with_gcov: 1} %{?!_with_gcov: 0} # turn off debug kernel for gcov builds @@ -171,7 +168,7 @@ Name: kernel%{?variant} Group: System Environment/Kernel License: GPLv2 and Redistributable, no modification permitted URL: http://www.kernel.org/ -Version: %{rpmversion} +Version: %{kernelversion} Release: %{pkg_release} Summary: The Linux kernel, based on version %{version}, heavily modified with backports # DO NOT CHANGE THE 'ExclusiveArch' LINE TO TEMPORARILY EXCLUDE AN ARCHITECTURE BUILD. @@ -246,7 +243,7 @@ BuildRequires: xmlto BuildRequires: asciidoc %endif -Source0: linux-%{rpmversion}-%{pkg_release}.tar.xz +Source0: linux-%{kernelversion}-%{pkg_release}.tar.xz %define modsign_cmd %{SOURCE18} @@ -290,8 +287,8 @@ enterprise customers, etc. # macros defined above. # %define kernel_reqprovconf \ -Provides: %{name} = %{rpmversion}-%{pkg_release}\ -Provides: %{name}-%{_target_cpu} = %{rpmversion}-%{pkg_release}%{?1:+%{1}}\ +Provides: %{name} = %{kernelversion}-%{pkg_release}\ +Provides: %{name}-%{_target_cpu} = %{kernelversion}-%{pkg_release}%{?1:+%{1}}\ Provides: kernel-drm-nouveau = 16\ Provides: %{name}-uname-r = %{KVERREL}%{?variant}%{?1:+%{1}}\ Requires(pre): %{kernel_prereq}\ @@ -329,8 +326,8 @@ Group: Development/System Obsoletes: glibc-kernheaders < 3.0-46 Provides: glibc-kernheaders = 3.0-46 %if "0%{?variant}" -Obsoletes: kernel-headers < %{rpmversion}-%{pkg_release} -Provides: kernel-headers = %{rpmversion}-%{pkg_release} +Obsoletes: kernel-headers < %{kernelversion}-%{pkg_release} +Provides: kernel-headers = %{kernelversion}-%{pkg_release} %endif %description headers Kernel-headers includes the C header files that specify the interface @@ -611,8 +608,8 @@ ApplyOptionalPatch() fi } -%setup -q -n %{name}-%{rpmversion}-%{pkg_release} -c -mv linux-%{rpmversion}-%{pkg_release} linux-%{KVERREL} +%setup -q -n %{name}-%{kernelversion}-%{pkg_release} -c +mv linux-%{kernelversion}-%{pkg_release} linux-%{KVERREL} cd linux-%{KVERREL} @@ -1172,7 +1169,7 @@ find Documentation -type d | xargs chmod u+w cd linux-%{KVERREL} %if %{with_doc} -docdir=$RPM_BUILD_ROOT%{_datadir}/doc/kernel-doc-%{rpmversion} +docdir=$RPM_BUILD_ROOT%{_datadir}/doc/kernel-doc-%{kernelversion} # copy the source over mkdir -p $docdir @@ -1396,9 +1393,9 @@ fi %if %{with_doc} %files doc %defattr(-,root,root) -%{_datadir}/doc/kernel-doc-%{rpmversion}/Documentation/* -%dir %{_datadir}/doc/kernel-doc-%{rpmversion}/Documentation -%dir %{_datadir}/doc/kernel-doc-%{rpmversion} +%{_datadir}/doc/kernel-doc-%{kernelversion}/Documentation/* +%dir %{_datadir}/doc/kernel-doc-%{kernelversion}/Documentation +%dir %{_datadir}/doc/kernel-doc-%{kernelversion} %endif %if %{with_perf} -- Gitee From 3b914d1637640617ec69638c625d0bfcf46eb1f1 Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:01:10 +0800 Subject: [PATCH 0004/2138] anolis: x86/cpu/hygon: Fix __max_die_per_package for Hygon family 18h model 4h ANBZ: #5455 From model 4h, Hygon processors use CPUID leaf 0xB to derive the core ID, socket ID and APIC ID with the SMT and CORE level types. But still set __max_die_per_package to nodes_per_socket because of lacking the DIE level type. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- arch/x86/kernel/cpu/hygon.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index 6e738759779e..f0482c9d49fd 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -80,12 +80,14 @@ static void hygon_get_topology(struct cpuinfo_x86 *c) c->x86_max_cores /= smp_num_siblings; /* - * In case leaf B is available, use it to derive + * From model 0x4, leaf B is available, so use it to derive * topology information. */ err = detect_extended_topology(c); - if (!err) + if (!err) { c->x86_coreid_bits = get_count_order(c->x86_max_cores); + __max_die_per_package = nodes_per_socket; + } /* * Socket ID is ApicId[6] for the processors with model <= 0x3 -- Gitee From 6ef84a8a28b175ec6ec23d2ff0d662ce39f9dc6d Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:02:06 +0800 Subject: [PATCH 0005/2138] anolis: x86/microcode/hygon: Add microcode loading support for Hygon processors ANBZ: #5455 Add support for loading Hygon microcode, which is compatible with AMD one. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- Documentation/arch/x86/microcode.rst | 13 +++++- arch/x86/Kconfig | 2 +- arch/x86/kernel/cpu/microcode/amd.c | 52 ++++++++++++++++++++---- arch/x86/kernel/cpu/microcode/core.c | 22 +++++++++- arch/x86/kernel/cpu/microcode/internal.h | 12 ++++++ 5 files changed, 90 insertions(+), 11 deletions(-) diff --git a/Documentation/arch/x86/microcode.rst b/Documentation/arch/x86/microcode.rst index b627c6f36bcf..69c04052861d 100644 --- a/Documentation/arch/x86/microcode.rst +++ b/Documentation/arch/x86/microcode.rst @@ -35,6 +35,8 @@ on Intel: kernel/x86/microcode/GenuineIntel.bin on AMD : kernel/x86/microcode/AuthenticAMD.bin +on Hygon: + kernel/x86/microcode/HygonGenuine.bin During BSP (BootStrapping Processor) boot (pre-SMP), the kernel scans the microcode file in the initrd. If microcode matching the @@ -69,6 +71,10 @@ here for future reference only). cd $TMPDIR mkdir -p $DSTDIR + if [ -d /lib/firmware/hygon-ucode ]; then + cat /lib/firmware/hygon-ucode/microcode_hygon*.bin > $DSTDIR/HygonGenuine.bin + fi + if [ -d /lib/firmware/amd-ucode ]; then cat /lib/firmware/amd-ucode/microcode_amd*.bin > $DSTDIR/AuthenticAMD.bin fi @@ -217,7 +223,8 @@ currently supported. Here's an example:: - CONFIG_EXTRA_FIRMWARE="intel-ucode/06-3a-09 amd-ucode/microcode_amd_fam15h.bin" + CONFIG_EXTRA_FIRMWARE="intel-ucode/06-3a-09 \ + amd-ucode/microcode_amd_fam15h.bin hygon-ucode/microcode_hygon_fam18h.bin" CONFIG_EXTRA_FIRMWARE_DIR="/lib/firmware" This basically means, you have the following tree structure locally:: @@ -227,6 +234,10 @@ This basically means, you have the following tree structure locally:: ... | |-- microcode_amd_fam15h.bin ... + |-- hygon-ucode + ... + | |-- microcode_hygon_fam18h.bin + ... |-- intel-ucode ... | |-- 06-3a-09 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 05c82fd5d0f6..a0d11cd0de55 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1312,7 +1312,7 @@ config X86_REBOOTFIXUPS config MICROCODE def_bool y - depends on CPU_SUP_AMD || CPU_SUP_INTEL + depends on CPU_SUP_AMD || CPU_SUP_INTEL || CPU_SUP_HYGON config MICROCODE_LATE_LOADING bool "Late microcode loading (DANGEROUS)" diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index bbd1dc38ea03..296b1f327d24 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -493,15 +493,18 @@ static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size) static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) { - char fw_name[36] = "amd-ucode/microcode_amd.bin"; + char fw_name[40] = "amd-ucode/microcode_amd.bin"; struct firmware fw; if (IS_ENABLED(CONFIG_X86_32)) return false; - if (family >= 0x15) + if (x86_cpuid_vendor() == X86_VENDOR_AMD && family >= 0x15) snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", family); + else if (x86_cpuid_vendor() == X86_VENDOR_HYGON) + snprintf(fw_name, sizeof(fw_name), + "hygon-ucode/microcode_hygon_fam%.2xh.bin", family); if (firmware_request_builtin(&fw, fw_name)) { cp->size = fw.size; @@ -521,11 +524,18 @@ static void find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio_data if (IS_ENABLED(CONFIG_X86_32)) { uci = (struct ucode_cpu_info *)__pa_nodebug(ucode_cpu_info); - path = (const char *)__pa_nodebug(ucode_path); + if (x86_cpuid_vendor() == X86_VENDOR_HYGON) + path = (const char *)__pa_nodebug( + "kernel/x86/microcode/HygonGenuine.bin"); + else + path = (const char *)__pa_nodebug(ucode_path); use_pa = true; } else { uci = ucode_cpu_info; - path = ucode_path; + if (x86_cpuid_vendor() == X86_VENDOR_HYGON) + path = "kernel/x86/microcode/HygonGenuine.bin"; + else + path = ucode_path; use_pa = false; } @@ -561,8 +571,14 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) struct cont_desc desc = { 0 }; enum ucode_state ret; struct cpio_data cp; + const char *path; - cp = find_microcode_in_initrd(ucode_path, false); + if (x86_cpuid_vendor() == X86_VENDOR_HYGON) + path = "kernel/x86/microcode/HygonGenuine.bin"; + else + path = ucode_path; + + cp = find_microcode_in_initrd(path, false); if (!(cp.data && cp.size)) return -EINVAL; @@ -904,13 +920,17 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz */ static enum ucode_state request_microcode_amd(int cpu, struct device *device) { - char fw_name[36] = "amd-ucode/microcode_amd.bin"; + char fw_name[40] = "amd-ucode/microcode_amd.bin"; struct cpuinfo_x86 *c = &cpu_data(cpu); enum ucode_state ret = UCODE_NFOUND; const struct firmware *fw; - if (c->x86 >= 0x15) - snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86); + if (x86_cpuid_vendor() == X86_VENDOR_AMD && c->x86 >= 0x15) + snprintf(fw_name, sizeof(fw_name), + "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86); + else if (x86_cpuid_vendor() == X86_VENDOR_HYGON) + snprintf(fw_name, sizeof(fw_name), + "hygon-ucode/microcode_hygon_fam%.2xh.bin", c->x86); if (request_firmware_direct(&fw, (const char *)fw_name, device)) { pr_debug("failed to load file %s\n", fw_name); @@ -960,6 +980,22 @@ struct microcode_ops * __init init_amd_microcode(void) return µcode_amd_ops; } +#ifdef CONFIG_CPU_SUP_HYGON +const struct microcode_ops * __init init_hygon_microcode(void) +{ + struct cpuinfo_x86 *c = &boot_cpu_data; + + if (c->x86_vendor != X86_VENDOR_HYGON) + return NULL; + + if (ucode_new_rev) + pr_info_once("microcode updated early to new patch_level=0x%08x\n", + ucode_new_rev); + + return µcode_amd_ops; +} +#endif + void __exit exit_amd_microcode(void) { cleanup(); diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index a4ebd5e0ae82..98245c19a90d 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -41,7 +41,11 @@ #define DRIVER_VERSION "2.2" +#ifdef CONFIG_CPU_SUP_HYGON +static const struct microcode_ops *microcode_ops; +#else static struct microcode_ops *microcode_ops; +#endif static bool dis_ucode_ldr = true; bool initrd_gone; @@ -125,7 +129,8 @@ static bool __init check_loader_disabled_bsp(void) if (native_cpuid_ecx(1) & BIT(31)) return *res; - if (x86_cpuid_vendor() == X86_VENDOR_AMD) { + if (x86_cpuid_vendor() == X86_VENDOR_AMD || + x86_cpuid_vendor() == X86_VENDOR_HYGON) { if (amd_check_current_patch_level()) return *res; } @@ -158,6 +163,10 @@ void __init load_ucode_bsp(void) intel = false; break; + case X86_VENDOR_HYGON: + intel = false; + break; + default: return; } @@ -198,6 +207,9 @@ void load_ucode_ap(void) if (x86_family(cpuid_1_eax) >= 0x10) load_ucode_amd_early(cpuid_1_eax); break; + case X86_VENDOR_HYGON: + load_ucode_amd_early(cpuid_1_eax); + break; default: break; } @@ -222,6 +234,9 @@ static int __init save_microcode_in_initrd(void) if (c->x86 >= 0x10) ret = save_microcode_in_initrd_amd(cpuid_eax(1)); break; + case X86_VENDOR_HYGON: + ret = save_microcode_in_initrd_amd(cpuid_eax(1)); + break; default: break; } @@ -316,6 +331,9 @@ static void reload_early_microcode(unsigned int cpu) if (family >= 0x10) reload_ucode_amd(cpu); break; + case X86_VENDOR_HYGON: + reload_ucode_amd(cpu); + break; default: break; } @@ -642,6 +660,8 @@ static int __init microcode_init(void) microcode_ops = init_intel_microcode(); else if (c->x86_vendor == X86_VENDOR_AMD) microcode_ops = init_amd_microcode(); + else if (c->x86_vendor == X86_VENDOR_HYGON) + microcode_ops = init_hygon_microcode(); else pr_err("no support for this CPU vendor\n"); diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h index bf883aa71233..9e76fe430812 100644 --- a/arch/x86/kernel/cpu/microcode/internal.h +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -55,6 +55,9 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa); #define CPUID_AMD1 QCHAR('A', 'u', 't', 'h') #define CPUID_AMD2 QCHAR('e', 'n', 't', 'i') #define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D') +#define CPUID_HYGON1 QCHAR('H', 'y', 'g', 'o') +#define CPUID_HYGON2 QCHAR('n', 'G', 'e', 'n') +#define CPUID_HYGON3 QCHAR('u', 'i', 'n', 'e') #define CPUID_IS(a, b, c, ebx, ecx, edx) \ (!(((ebx) ^ (a)) | ((edx) ^ (b)) | ((ecx) ^ (c)))) @@ -81,6 +84,9 @@ static inline int x86_cpuid_vendor(void) if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx)) return X86_VENDOR_AMD; + if (CPUID_IS(CPUID_HYGON1, CPUID_HYGON2, CPUID_HYGON3, ebx, ecx, edx)) + return X86_VENDOR_HYGON; + return X86_VENDOR_UNKNOWN; } @@ -114,6 +120,12 @@ static inline struct microcode_ops *init_amd_microcode(void) { return NULL; } static inline void exit_amd_microcode(void) { } #endif /* !CONFIG_CPU_SUP_AMD */ +#ifdef CONFIG_CPU_SUP_HYGON +const struct microcode_ops *init_hygon_microcode(void); +#else /* CONFIG_CPU_SUP_HYGON */ +static const inline struct microcode_ops *init_hygon_microcode(void) { return NULL; } +#endif /* !CONFIG_CPU_SUP_HYGON */ + #ifdef CONFIG_CPU_SUP_INTEL void load_ucode_intel_bsp(void); void load_ucode_intel_ap(void); -- Gitee From 226af8ce724c633c3c2c3fe2e7055c69005f7c3c Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:02:41 +0800 Subject: [PATCH 0006/2138] anolis: x86/amd_nb: Add Hygon family 18h model 4h PCI IDs ANBZ: #5455 Add the PCI device IDs for Hygon family 18h model 4h processors. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- arch/x86/kernel/amd_nb.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 6dabb53f58a4..77a5b9616217 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -127,16 +127,19 @@ static const struct pci_device_id amd_nb_link_ids[] = { static const struct pci_device_id hygon_root_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) }, {} }; static const struct pci_device_id hygon_nb_misc_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, {} }; static const struct pci_device_id hygon_nb_link_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) }, {} }; -- Gitee From 2cb63e4f5d838e511782c133fa631e4332776756 Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:03:10 +0800 Subject: [PATCH 0007/2138] anolis: x86/amd_nb: Add northbridge support for Hygon family 18h model 4h ANBZ: #5455 Add dedicated functions to initialize the northbridge for Hygon family 18h model 4h processors. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- arch/x86/include/asm/amd_nb.h | 8 ++ arch/x86/kernel/amd_nb.c | 193 ++++++++++++++++++++++++++++++++++ 2 files changed, 201 insertions(+) diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index c8cdc69aae09..497ad86ef225 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h @@ -82,6 +82,10 @@ u16 amd_nb_num(void); bool amd_nb_has_feature(unsigned int feature); struct amd_northbridge *node_to_amd_nb(int node); +bool hygon_f18h_m4h(void); +u16 hygon_nb_num(void); +int get_df_id(struct pci_dev *misc, u8 *id); + static inline u16 amd_pci_dev_to_node_id(struct pci_dev *pdev) { struct pci_dev *misc; @@ -122,6 +126,10 @@ static inline struct amd_northbridge *node_to_amd_nb(int node) } #define amd_gart_present(x) false +#define hygon_f18h_m4h false +#define hygon_nb_num(x) 0 +#define get_df_id(x, y) NULL + #endif diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 77a5b9616217..36fbf7ae4388 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -45,10 +45,13 @@ #define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4 0x12c4 #define PCI_DEVICE_ID_AMD_MI200_DF_F4 0x14d4 +#define PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1 0x1491 + /* Protect the PCI config register pairs used for SMN. */ static DEFINE_MUTEX(smn_mutex); static u32 *flush_words; +static u16 nb_num; static const struct pci_device_id amd_root_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) }, @@ -233,6 +236,191 @@ int amd_smn_write(u16 node, u32 address, u32 value) } EXPORT_SYMBOL_GPL(amd_smn_write); +bool hygon_f18h_m4h(void) +{ + if (boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) + return false; + + if (boot_cpu_data.x86 == 0x18 && + boot_cpu_data.x86_model >= 0x4 && + boot_cpu_data.x86_model <= 0xf) + return true; + + return false; +} +EXPORT_SYMBOL_GPL(hygon_f18h_m4h); + +u16 hygon_nb_num(void) +{ + return nb_num; +} +EXPORT_SYMBOL_GPL(hygon_nb_num); + +static int get_df1_register(struct pci_dev *misc, int offset, u32 *value) +{ + struct pci_dev *df_f1 = NULL; + int err; + + while ((df_f1 = pci_get_device(misc->vendor, + PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1, df_f1))) + if (pci_domain_nr(df_f1->bus) == pci_domain_nr(misc->bus) && + df_f1->bus->number == misc->bus->number && + PCI_SLOT(df_f1->devfn) == PCI_SLOT(misc->devfn)) + break; + + if (!df_f1) { + pr_warn("Error getting DF F1 device.\n"); + return -ENODEV; + } + + err = pci_read_config_dword(df_f1, offset, value); + if (err) + pr_warn("Error reading DF F1 register.\n"); + + return err; +} + +int get_df_id(struct pci_dev *misc, u8 *id) +{ + u32 value; + int ret; + + /* F1x200[23:20]: DF ID */ + ret = get_df1_register(misc, 0x200, &value); + *id = (value >> 20) & 0xf; + + return ret; +} +EXPORT_SYMBOL_GPL(get_df_id); + +static u8 get_socket_num(struct pci_dev *misc) +{ + u32 value; + int ret; + + /* F1x200[7:0]: Which socket is present. */ + ret = get_df1_register(misc, 0x200, &value); + + return ret ? 0 : hweight8(value & 0xff); +} + +static int northbridge_init_f18h_m4h(const struct pci_device_id *root_ids, + const struct pci_device_id *misc_ids, + const struct pci_device_id *link_ids) +{ + struct pci_dev *root, *misc, *link; + struct pci_dev *root_first = NULL; + struct amd_northbridge *nb; + u16 roots_per_socket = 0; + u16 miscs_per_socket = 0; + u16 socket_num = 0; + u16 root_count = 0; + u16 misc_count = 0; + int err = -ENODEV; + u8 i, j, m, n; + u8 id; + + pr_info("Hygon Fam%xh Model%xh NB driver.\n", + boot_cpu_data.x86, boot_cpu_data.x86_model); + + misc = next_northbridge(NULL, misc_ids); + if (misc != NULL) { + socket_num = get_socket_num(misc); + pr_info("Socket number: %d\n", socket_num); + if (!socket_num) { + err = -ENODEV; + goto ret; + } + } else { + err = -ENODEV; + goto ret; + } + + misc = NULL; + while ((misc = next_northbridge(misc, misc_ids)) != NULL) + misc_count++; + + root = NULL; + while ((root = next_northbridge(root, root_ids)) != NULL) + root_count++; + + if (!root_count || !misc_count) { + err = -ENODEV; + goto ret; + } + + /* + * There should be _exactly_ N roots for each DF/SMN + * interface, and M DF/SMN interfaces in one socket. + */ + roots_per_socket = root_count / socket_num; + miscs_per_socket = misc_count / socket_num; + + if (!roots_per_socket || !miscs_per_socket) { + err = -ENODEV; + goto ret; + } + + nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL); + if (!nb) { + err = -ENOMEM; + goto ret; + } + + amd_northbridges.nb = nb; + amd_northbridges.num = misc_count; + + link = misc = root = NULL; + j = m = n = 0; + for (i = 0; i < amd_northbridges.num; i++) { + misc = next_northbridge(misc, misc_ids); + link = next_northbridge(link, link_ids); + + /* Only save the first PCI root device for each socket. */ + if (!(i % miscs_per_socket)) { + root_first = next_northbridge(root, root_ids); + root = root_first; + j = 1; + } + + if (get_df_id(misc, &id)) { + err = -ENODEV; + goto err; + } + pr_info("DF ID: %d\n", id); + + if (id < 4) { + /* Add the devices with id<4 from the tail. */ + node_to_amd_nb(misc_count - m - 1)->misc = misc; + node_to_amd_nb(misc_count - m - 1)->link = link; + node_to_amd_nb(misc_count - m - 1)->root = root_first; + m++; + } else { + node_to_amd_nb(n)->misc = misc; + node_to_amd_nb(n)->link = link; + node_to_amd_nb(n)->root = root_first; + n++; + } + + /* Skip the redundant PCI root devices per socket. */ + while (j < roots_per_socket) { + root = next_northbridge(root, root_ids); + j++; + } + } + nb_num = n; + + return 0; + +err: + kfree(nb); + amd_northbridges.nb = NULL; + +ret: + pr_err("Hygon Fam%xh Model%xh northbridge init failed(%d)!\n", + boot_cpu_data.x86, boot_cpu_data.x86_model, err); + return err; +} static int amd_cache_northbridges(void) { @@ -253,6 +441,11 @@ static int amd_cache_northbridges(void) root_ids = hygon_root_ids; misc_ids = hygon_nb_misc_ids; link_ids = hygon_nb_link_ids; + + if (boot_cpu_data.x86_model >= 0x4 && + boot_cpu_data.x86_model <= 0xf) + return northbridge_init_f18h_m4h(root_ids, + misc_ids, link_ids); } misc = NULL; -- Gitee From 569fe3c75e117640f09b3e38650b9352b68dc136 Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:03:39 +0800 Subject: [PATCH 0008/2138] anolis: iommu/hygon: Add support for Hygon family 18h model 4h IOAPIC ANBZ: #5455 The SB IOAPIC is on the device 0xb from Hygon family 18h model 4h. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- drivers/iommu/amd/init.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index ef3fae113dd6..f6c1f7e04d47 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -3013,6 +3013,9 @@ static void __init free_iommu_resources(void) /* SB IOAPIC is always on this device in AMD systems */ #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0)) +/* SB IOAPIC for Hygon family 18h model 4h is on the device 0xb */ +#define IOAPIC_SB_DEVID_FAM18H_M4H ((0x00 << 8) | PCI_DEVFN(0xb, 0)) + static bool __init check_ioapic_information(void) { const char *fw_bug = FW_BUG; @@ -3038,7 +3041,12 @@ static bool __init check_ioapic_information(void) pr_err("%s: IOAPIC[%d] not in IVRS table\n", fw_bug, id); ret = false; - } else if (devid == IOAPIC_SB_DEVID) { + } else if (devid == IOAPIC_SB_DEVID || + (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18 && + boot_cpu_data.x86_model >= 0x4 && + boot_cpu_data.x86_model <= 0xf && + devid == IOAPIC_SB_DEVID_FAM18H_M4H)) { has_sb_ioapic = true; ret = true; } -- Gitee From bbc5f632237e8e36964e0d76a2eadd75e92e4b18 Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:04:15 +0800 Subject: [PATCH 0009/2138] anolis: EDAC/amd64: Get UMC channel from the 6th nibble for Hygon ANBZ: #5455 On Hygon family 18h platforms, we look at the 6th nibble(bit 20~23) in the instance_id to derive the channel number. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- drivers/edac/amd64_edac.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 9cd86390a167..67dfc0e47d04 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -3059,7 +3059,11 @@ static inline void decode_bus_error(int node_id, struct mce *m) */ static void umc_get_err_info(struct mce *m, struct err_info *err) { - err->channel = (m->ipid & GENMASK(31, 0)) >> 20; + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) + err->channel = (m->ipid & GENMASK(23, 0)) >> 20; + else + err->channel = (m->ipid & GENMASK(31, 0)) >> 20; err->csrow = m->synd & 0x7; } -- Gitee From 83ce395acf8447a4fbbc04d01092b0b737503c55 Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:04:42 +0800 Subject: [PATCH 0010/2138] anolis: EDAC/amd64: Add support for Hygon family 18h model 4h ANBZ: #5455 Add support for Hygon family 18h model 4h to get UMC base, instance number and determine DDR memory types. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- drivers/edac/amd64_edac.c | 61 ++++++++++++++++++++++++++++++++------- 1 file changed, 51 insertions(+), 10 deletions(-) diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 67dfc0e47d04..3b7d658045ef 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -96,6 +96,17 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, return pcibios_err_to_errno(err); } +static u32 get_umc_base_f18h_m4h(u16 node, u8 channel) +{ + struct pci_dev *f3 = node_to_amd_nb(node)->misc; + u8 df_id; + + get_df_id(f3, &df_id); + df_id -= 4; + + return get_umc_base(channel) + (0x80000000 + (0x10000000 * df_id)); +} + /* * Select DCT to which PCI cfg accesses are routed */ @@ -1610,7 +1621,10 @@ static void umc_dump_misc_regs(struct amd64_pvt *pvt) u32 i, tmp, umc_base; for_each_umc(i) { - umc_base = get_umc_base(i); + if (hygon_f18h_m4h()) + umc_base = get_umc_base_f18h_m4h(pvt->mc_node_id, i); + else + umc_base = get_umc_base(i); umc = &pvt->umc[i]; edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg); @@ -1719,11 +1733,17 @@ static void umc_read_base_mask(struct amd64_pvt *pvt) u32 mask_reg, mask_reg_sec; u32 *base, *base_sec; u32 *mask, *mask_sec; + u32 umc_base; int cs, umc; for_each_umc(umc) { - umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR; - umc_base_reg_sec = get_umc_base(umc) + UMCCH_BASE_ADDR_SEC; + if (hygon_f18h_m4h()) + umc_base = get_umc_base_f18h_m4h(pvt->mc_node_id, umc); + else + umc_base = get_umc_base(umc); + + umc_base_reg = umc_base + UMCCH_BASE_ADDR; + umc_base_reg_sec = umc_base + UMCCH_BASE_ADDR_SEC; for_each_chip_select(cs, umc, pvt) { base = &pvt->csels[umc].csbases[cs]; @@ -1741,8 +1761,8 @@ static void umc_read_base_mask(struct amd64_pvt *pvt) umc, cs, *base_sec, base_reg_sec); } - umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK; - umc_mask_reg_sec = get_umc_base(umc) + get_umc_reg(pvt, UMCCH_ADDR_MASK_SEC); + umc_mask_reg = umc_base + UMCCH_ADDR_MASK; + umc_mask_reg_sec = umc_base + get_umc_reg(pvt, UMCCH_ADDR_MASK_SEC); for_each_chip_select_mask(cs, umc, pvt) { mask = &pvt->csels[umc].csmasks[cs]; @@ -1825,7 +1845,8 @@ static void umc_determine_memory_type(struct amd64_pvt *pvt) * Check if the system supports the "DDR Type" field in UMC Config * and has DDR5 DIMMs in use. */ - if (pvt->flags.zn_regs_v2 && ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) { + if ((pvt->flags.zn_regs_v2 || hygon_f18h_m4h()) && + ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) { if (umc->dimm_cfg & BIT(5)) umc->dram_type = MEM_LRDDR5; else if (umc->dimm_cfg & BIT(4)) @@ -3178,8 +3199,11 @@ static void umc_read_mc_regs(struct amd64_pvt *pvt) /* Read registers from each UMC */ for_each_umc(i) { + if (hygon_f18h_m4h()) + umc_base = get_umc_base_f18h_m4h(pvt->mc_node_id, i); + else + umc_base = get_umc_base(i); - umc_base = get_umc_base(i); umc = &pvt->umc[i]; amd_smn_read(nid, umc_base + get_umc_reg(pvt, UMCCH_DIMM_CFG), &umc->dimm_cfg); @@ -4098,6 +4122,11 @@ static int per_family_init(struct amd64_pvt *pvt) break; case 0x18: + if (pvt->model == 0x4) { + pvt->ctl_name = "F18h_M04h"; + pvt->max_mcs = 3; + break; + } pvt->ctl_name = "F18h"; break; @@ -4361,6 +4390,7 @@ static int __init amd64_edac_init(void) { const char *owner; int err = -ENODEV; + u16 instance_num; int i; if (ghes_get_devices()) @@ -4378,8 +4408,13 @@ static int __init amd64_edac_init(void) opstate_init(); + if (hygon_f18h_m4h()) + instance_num = hygon_nb_num(); + else + instance_num = amd_nb_num(); + err = -ENOMEM; - ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL); + ecc_stngs = kcalloc(instance_num, sizeof(ecc_stngs[0]), GFP_KERNEL); if (!ecc_stngs) goto err_free; @@ -4387,7 +4422,7 @@ static int __init amd64_edac_init(void) if (!msrs) goto err_free; - for (i = 0; i < amd_nb_num(); i++) { + for (i = 0; i < instance_num; i++) { err = probe_one_instance(i); if (err) { /* unwind properly */ @@ -4432,6 +4467,7 @@ static int __init amd64_edac_init(void) static void __exit amd64_edac_exit(void) { + u16 instance_num; int i; if (pci_ctl) @@ -4443,7 +4479,12 @@ static void __exit amd64_edac_exit(void) else amd_unregister_ecc_decoder(decode_bus_error); - for (i = 0; i < amd_nb_num(); i++) + if (hygon_f18h_m4h()) + instance_num = hygon_nb_num(); + else + instance_num = amd_nb_num(); + + for (i = 0; i < instance_num; i++) remove_one_instance(i); kfree(ecc_stngs); -- Gitee From 6fd01237dfaaa62c442096a377f88d273c62605d Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:04:58 +0800 Subject: [PATCH 0011/2138] anolis: EDAC/amd64: Adjust address translation for Hygon family 18h model 4h ANBZ: #5455 Add Hygon family 18h model 4h processor support for DramOffset and HiAddrOffset, and get the socket interleaving number from DramBase- Address(D18F0x110). Update intlv_num_chan and num_intlv_bits support for Hygon family 18h model 4h processor. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- drivers/edac/amd64_edac.c | 40 ++++++++++++++++++++++++++++++--------- 1 file changed, 31 insertions(+), 9 deletions(-) diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 3b7d658045ef..7583b0071eec 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -1148,8 +1148,11 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr ctx.nid = nid; ctx.inst_id = umc; - /* Read D18F0x1B4 (DramOffset), check if base 1 is used. */ - if (df_indirect_read_instance(nid, 0, 0x1B4, umc, &ctx.tmp)) + /* Read DramOffset, check if base 1 is used. */ + if (hygon_f18h_m4h() && + df_indirect_read_instance(nid, 0, 0x214, umc, &ctx.tmp)) + goto out_err; + else if (df_indirect_read_instance(nid, 0, 0x1B4, umc, &ctx.tmp)) goto out_err; /* Remove HiAddrOffset from normalized address, if enabled: */ @@ -1173,6 +1176,9 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr goto out_err; } + intlv_num_sockets = 0; + if (hygon_f18h_m4h()) + intlv_num_sockets = (ctx.tmp >> 2) & 0x3; lgcy_mmio_hole_en = ctx.tmp & BIT(1); intlv_num_chan = (ctx.tmp >> 4) & 0xF; intlv_addr_sel = (ctx.tmp >> 8) & 0x7; @@ -1189,7 +1195,8 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr if (df_indirect_read_instance(nid, 0, 0x114 + (8 * base), umc, &ctx.tmp)) goto out_err; - intlv_num_sockets = (ctx.tmp >> 8) & 0x1; + if (!hygon_f18h_m4h()) + intlv_num_sockets = (ctx.tmp >> 8) & 0x1; intlv_num_dies = (ctx.tmp >> 10) & 0x3; dram_limit_addr = ((ctx.tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0); @@ -1207,6 +1214,9 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr hash_enabled = true; break; default: + if (hygon_f18h_m4h() && boot_cpu_data.x86_model == 0x4 && + intlv_num_chan == 2) + break; pr_err("%s: Invalid number of interleaved channels %d.\n", __func__, intlv_num_chan); goto out_err; @@ -1225,8 +1235,9 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr /* Add a bit if sockets are interleaved. */ num_intlv_bits += intlv_num_sockets; - /* Assert num_intlv_bits <= 4 */ - if (num_intlv_bits > 4) { + /* Assert num_intlv_bits in the correct range. */ + if ((hygon_f18h_m4h() && num_intlv_bits > 7) || + (!hygon_f18h_m4h() && num_intlv_bits > 4)) { pr_err("%s: Invalid interleave bits %d.\n", __func__, num_intlv_bits); goto out_err; @@ -1245,7 +1256,10 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr if (df_indirect_read_instance(nid, 0, 0x50, umc, &ctx.tmp)) goto out_err; - cs_fabric_id = (ctx.tmp >> 8) & 0xFF; + if (hygon_f18h_m4h()) + cs_fabric_id = (ctx.tmp >> 8) & 0x7FF; + else + cs_fabric_id = (ctx.tmp >> 8) & 0xFF; die_id_bit = 0; /* If interleaved over more than 1 channel: */ @@ -1265,8 +1279,13 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr /* If interleaved over more than 1 die. */ if (intlv_num_dies) { sock_id_bit = die_id_bit + intlv_num_dies; - die_id_shift = (ctx.tmp >> 24) & 0xF; - die_id_mask = (ctx.tmp >> 8) & 0xFF; + if (hygon_f18h_m4h()) { + die_id_shift = (ctx.tmp >> 12) & 0xF; + die_id_mask = ctx.tmp & 0x7FF; + } else { + die_id_shift = (ctx.tmp >> 24) & 0xF; + die_id_mask = (ctx.tmp >> 8) & 0xFF; + } cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit; } @@ -1274,7 +1293,10 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr /* If interleaved over more than 1 socket. */ if (intlv_num_sockets) { socket_id_shift = (ctx.tmp >> 28) & 0xF; - socket_id_mask = (ctx.tmp >> 16) & 0xFF; + if (hygon_f18h_m4h()) + socket_id_mask = (ctx.tmp >> 16) & 0x7FF; + else + socket_id_mask = (ctx.tmp >> 16) & 0xFF; cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit; } -- Gitee From e8d61af7a97bb026a083826d290649bc34e24666 Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:05:40 +0800 Subject: [PATCH 0012/2138] anolis: EDAC/mce_amd: Use struct cpuinfo_x86.logical_die_id for Hygon NodeId ANBZ: #5455 The cpuinfo_x86.cpu_die_id is get from CPUID or MSR in the commit 028c221ed190 ("x86/CPU/AMD: Save AMD NodeId as cpu_die_id"). But the value may not be continuous for Hygon model 4h~6h processors. Use cpuinfo_x86.logical_die_id will always format continuous die (or node) IDs, because it will convert the physical die ID to logical die ID. So use topology_logical_die_id() instead of topology_die_id() to decode UMC ECC errors for Hygon processors. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- drivers/edac/mce_amd.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c index 9215c06783df..06e29d2b51d1 100644 --- a/drivers/edac/mce_amd.c +++ b/drivers/edac/mce_amd.c @@ -1187,8 +1187,13 @@ static void decode_smca_error(struct mce *m) pr_cont(", %s.\n", smca_mce_descs[bank_type].descs[xec]); if ((bank_type == SMCA_UMC || bank_type == SMCA_UMC_V2) && - xec == 0 && decode_dram_ecc) - decode_dram_ecc(topology_die_id(m->extcpu), m); + xec == 0 && decode_dram_ecc) { + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) + decode_dram_ecc(topology_logical_die_id(m->extcpu), m); + else + decode_dram_ecc(topology_die_id(m->extcpu), m); + } } static inline void amd_decode_err_code(u16 ec) -- Gitee From 76bb7a5bbc7b2b66cf7811c7e982ad1831bdd447 Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:06:12 +0800 Subject: [PATCH 0013/2138] anolis: hwmon/k10temp: Add support for Hygon family 18h model 4h ANBZ: #5455 The DF F3 device ID used to get the temperature for Hygon family 18h model 4h processor is the same as 17H_M30H, but with different offsets, which may span two distributed ranges. The second offset range can be considered as private for Hygon, so use struct hygon_private to describe it. Add a pointer priv in k10temp_data to point to the private data. Add functions k10temp_get_ccd_support_2nd() and hygon_read_temp() to support reading the second offset range. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 [Fixes conflicts] Signed-off-by: Qinyun Tan --- drivers/hwmon/k10temp.c | 95 +++++++++++++++++++++++++++++++++++++---- 1 file changed, 87 insertions(+), 8 deletions(-) diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index c906731c6c2d..47d0cc541def 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -84,6 +84,11 @@ static DEFINE_MUTEX(nb_smu_ind_mutex); */ #define AMD_I3255_STR "3255" +struct hygon_private { + u32 index_2nd; + u32 offset_2nd; +}; + struct k10temp_data { struct pci_dev *pdev; void (*read_htcreg)(struct pci_dev *pdev, u32 *regval); @@ -94,6 +99,7 @@ struct k10temp_data { bool is_zen; u32 ccd_offset; bool disp_negative; + void *priv; }; #define TCTL_BIT 0 @@ -202,6 +208,23 @@ static int k10temp_read_labels(struct device *dev, return 0; } +static void hygon_read_temp(struct k10temp_data *data, int channel, + u32 *regval) +{ + struct hygon_private *h_priv; + + h_priv = (struct hygon_private *)data->priv; + if ((channel - 2) < h_priv->index_2nd) + amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + ZEN_CCD_TEMP(data->ccd_offset, channel - 2), + regval); + else + amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + ZEN_CCD_TEMP(h_priv->offset_2nd, + channel - 2 - h_priv->index_2nd), + regval); +} + static int k10temp_read_temp(struct device *dev, u32 attr, int channel, long *val) { @@ -223,13 +246,15 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel, *val = 0; break; case 2 ... 13: /* Tccd{1-12} */ - ret = amd_smn_read(amd_pci_dev_to_node_id(data->pdev), - ZEN_CCD_TEMP(data->ccd_offset, channel - 2), - ®val); - - if (ret) - return ret; - + if (hygon_f18h_m4h()) + hygon_read_temp(data, channel, ®val); + else { + ret = amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + ZEN_CCD_TEMP(data->ccd_offset, channel - 2), + ®val); + if (ret) + return ret; + } *val = (regval & ZEN_CCD_TEMP_MASK) * 125 - 49000; break; default: @@ -406,14 +431,48 @@ static void k10temp_get_ccd_support(struct pci_dev *pdev, } } +static void k10temp_get_ccd_support_2nd(struct pci_dev *pdev, + struct k10temp_data *data, int limit) +{ + struct hygon_private *h_priv; + u32 regval; + int i; + + h_priv = (struct hygon_private *)data->priv; + for (i = h_priv->index_2nd; i < limit; i++) { + amd_smn_read(amd_pci_dev_to_node_id(pdev), + ZEN_CCD_TEMP(h_priv->offset_2nd, + i - h_priv->index_2nd), + ®val); + if (regval & ZEN_CCD_TEMP_VALID) + data->show_temp |= BIT(TCCD_BIT(i)); + } +} + static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int unreliable = has_erratum_319(pdev); struct device *dev = &pdev->dev; + struct hygon_private *h_priv; struct k10temp_data *data; struct device *hwmon_dev; + u8 df_id; int i; + if (hygon_f18h_m4h()) { + if (get_df_id(pdev, &df_id)) { + pr_err("Get DF ID failed.\n"); + return -ENODEV; + } + + /* + * The temperature should be get from the devices + * with id < 4. + */ + if (df_id >= 4) + return 0; + } + if (unreliable) { if (!force) { dev_err(dev, @@ -441,7 +500,7 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) (boot_cpu_data.x86_model & 0xf0) == 0x70)) { data->read_htcreg = read_htcreg_nb_f15; data->read_tempreg = read_tempreg_nb_f15; - } else if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) { + } else if (boot_cpu_data.x86 == 0x17) { data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK; data->read_tempreg = read_tempreg_nb_zen; data->is_zen = true; @@ -466,6 +525,25 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) k10temp_get_ccd_support(pdev, data, 8); break; } + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) { + data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK; + data->read_tempreg = read_tempreg_nb_zen; + data->is_zen = true; + + if (boot_cpu_data.x86_model >= 0x4 && + boot_cpu_data.x86_model <= 0xf) { + data->ccd_offset = 0x154; + data->priv = devm_kzalloc(dev, sizeof(*h_priv), + GFP_KERNEL); + if (!data->priv) + return -ENOMEM; + h_priv = (struct hygon_private *)data->priv; + h_priv->offset_2nd = 0x2f8; + h_priv->index_2nd = 3; + k10temp_get_ccd_support(pdev, data, h_priv->index_2nd); + k10temp_get_ccd_support_2nd(pdev, data, 8); + } } else if (boot_cpu_data.x86 == 0x19) { data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK; data->read_tempreg = read_tempreg_nb_zen; @@ -547,6 +625,7 @@ static const struct pci_device_id k10temp_id_table[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M60H_DF_F3) }, { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, + { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, {} }; MODULE_DEVICE_TABLE(pci, k10temp_id_table); -- Gitee From ad5c1e11cc81bee86d7b83ee5016e5fbb53464dd Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:06:38 +0800 Subject: [PATCH 0014/2138] anolis: i2c-piix4: Remove the IMC detecting for Hygon SMBus ANBZ: #5455 Remove IMC detecting path for Hygon processors. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- drivers/i2c/busses/i2c-piix4.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index 809fbd014cd6..cc170c114e10 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -1043,8 +1043,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) bool notify_imc = false; is_sb800 = true; - if ((dev->vendor == PCI_VENDOR_ID_AMD || - dev->vendor == PCI_VENDOR_ID_HYGON) && + if (dev->vendor == PCI_VENDOR_ID_AMD && dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS) { u8 imc; -- Gitee From 17138879a5e64e63ee447a1a4b6786bc672de36b Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:07:05 +0800 Subject: [PATCH 0015/2138] anolis: x86/cpu: Get LLC ID for Hygon family 18h model 5h ANBZ: #5455 Add support to calculate LLC ID from the number of threads sharing the cache for Hygon family 18h model 5h processor. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- arch/x86/kernel/cpu/cacheinfo.c | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c index 8f86eacf69f7..7c4ce361c728 100644 --- a/arch/x86/kernel/cpu/cacheinfo.c +++ b/arch/x86/kernel/cpu/cacheinfo.c @@ -708,11 +708,30 @@ void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu) if (!cpuid_edx(0x80000006)) return; - /* - * LLC is at the core complex level. - * Core complex ID is ApicId[3] for these processors. - */ - per_cpu(cpu_llc_id, cpu) = c->apicid >> 3; + if (c->x86_model < 0x5) { + /* + * LLC is at the core complex level. + * Core complex ID is ApicId[3] for these processors. + */ + per_cpu(cpu_llc_id, cpu) = c->apicid >> 3; + } else { + /* + * LLC ID is calculated from the number of threads + * sharing the cache. + */ + u32 eax, ebx, ecx, edx, num_sharing_cache = 0; + u32 llc_index = find_num_cache_leaves(c) - 1; + + cpuid_count(0x8000001d, llc_index, &eax, &ebx, &ecx, &edx); + if (eax) + num_sharing_cache = ((eax >> 14) & 0xfff) + 1; + + if (num_sharing_cache) { + int bits = get_count_order(num_sharing_cache); + + per_cpu(cpu_llc_id, cpu) = c->apicid >> bits; + } + } } void init_amd_cacheinfo(struct cpuinfo_x86 *c) -- Gitee From 2c35517bf79024ab2bb6d5642b0fde77d6fe7aa1 Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:07:27 +0800 Subject: [PATCH 0016/2138] anolis: x86/amd_nb: Add support for Hygon family 18h model 5h ANBZ: #5455 Add root and DF F1/F3/F4 device IDs for Hygon family 18h model 5h processors. But some model 5h processors have the legacy(M04H) DF devices, so add a if conditional to read the df1 register. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- arch/x86/kernel/amd_nb.c | 24 ++++++++++++++++++++++-- include/linux/pci_ids.h | 1 + 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 36fbf7ae4388..e15af0db8dcb 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -45,7 +45,10 @@ #define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4 0x12c4 #define PCI_DEVICE_ID_AMD_MI200_DF_F4 0x14d4 +#define PCI_DEVICE_ID_HYGON_18H_M05H_ROOT 0x14a0 #define PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1 0x1491 +#define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F1 0x14b1 +#define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F4 0x14b4 /* Protect the PCI config register pairs used for SMN. */ static DEFINE_MUTEX(smn_mutex); @@ -131,18 +134,21 @@ static const struct pci_device_id amd_nb_link_ids[] = { static const struct pci_device_id hygon_root_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) }, { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_ROOT) }, {} }; static const struct pci_device_id hygon_nb_misc_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3) }, {} }; static const struct pci_device_id hygon_nb_link_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_DF_F4) }, {} }; @@ -259,10 +265,24 @@ EXPORT_SYMBOL_GPL(hygon_nb_num); static int get_df1_register(struct pci_dev *misc, int offset, u32 *value) { struct pci_dev *df_f1 = NULL; + u32 device; int err; - while ((df_f1 = pci_get_device(misc->vendor, - PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1, df_f1))) + switch (boot_cpu_data.x86_model) { + case 0x4: + device = PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1; + break; + case 0x5: + if (misc->device == PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3) + device = PCI_DEVICE_ID_HYGON_18H_M05H_DF_F1; + else + device = PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1; + break; + default: + return -ENODEV; + } + + while ((df_f1 = pci_get_device(misc->vendor, device, df_f1))) if (pci_domain_nr(df_f1->bus) == pci_domain_nr(misc->bus) && df_f1->bus->number == misc->bus->number && PCI_SLOT(df_f1->devfn) == PCI_SLOT(misc->devfn)) diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 3dce2be622e7..74bfe88f3a9e 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2600,6 +2600,7 @@ #define PCI_VENDOR_ID_ZHAOXIN 0x1d17 #define PCI_VENDOR_ID_HYGON 0x1d94 +#define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3 0x14b3 #define PCI_VENDOR_ID_FUNGIBLE 0x1dad -- Gitee From 4435a5305df965713f8912fcadf7fb294e111573 Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:07:50 +0800 Subject: [PATCH 0017/2138] anolis: EDAC/amd64: Add support for Hygon family 18h model 5h ANBZ: #5455 Add Hygon family 18h model 5h processor support for amd64_edac. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- drivers/edac/amd64_edac.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 7583b0071eec..09debab4415a 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -4148,6 +4148,10 @@ static int per_family_init(struct amd64_pvt *pvt) pvt->ctl_name = "F18h_M04h"; pvt->max_mcs = 3; break; + } else if (pvt->model == 0x5) { + pvt->ctl_name = "F18h_M05h"; + pvt->max_mcs = 1; + break; } pvt->ctl_name = "F18h"; break; -- Gitee From c218dedc00d73d687e5dc8b881d2a8efa209f4ad Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:08:20 +0800 Subject: [PATCH 0018/2138] anolis: hwmon/k10temp: Add support for Hygon family 18h model 5h ANBZ: #5455 Add 18H_M05H DF F3 device ID to get the temperature for Hygon family 18h model 5h processor. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- drivers/hwmon/k10temp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index 47d0cc541def..6dbcb8cd0951 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -626,6 +626,7 @@ static const struct pci_device_id k10temp_id_table[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M60H_DF_F3) }, { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, + { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3) }, {} }; MODULE_DEVICE_TABLE(pci, k10temp_id_table); -- Gitee From f43efa6e43ad0dfa09347c06fb5c0f2f14b65509 Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:08:52 +0800 Subject: [PATCH 0019/2138] anolis: x86/amd_nb: Add support for Hygon family 18h model 6h ANBZ: #5455 Hygon family 18h model 6h processor has the same DF F1 device ID as M05H_DF_F1, but should get DF ID from DF F5 device. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- arch/x86/kernel/amd_nb.c | 70 ++++++++++++++++++++++++++-------------- 1 file changed, 46 insertions(+), 24 deletions(-) diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index e15af0db8dcb..e7dbd486ef16 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -49,6 +49,7 @@ #define PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1 0x1491 #define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F1 0x14b1 #define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F4 0x14b4 +#define PCI_DEVICE_ID_HYGON_18H_M06H_DF_F5 0x14b5 /* Protect the PCI config register pairs used for SMN. */ static DEFINE_MUTEX(smn_mutex); @@ -262,40 +263,55 @@ u16 hygon_nb_num(void) } EXPORT_SYMBOL_GPL(hygon_nb_num); -static int get_df1_register(struct pci_dev *misc, int offset, u32 *value) +static int get_df_register(struct pci_dev *misc, u8 func, int offset, u32 *value) { - struct pci_dev *df_f1 = NULL; + struct pci_dev *df_func = NULL; u32 device; int err; - switch (boot_cpu_data.x86_model) { - case 0x4: - device = PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1; - break; - case 0x5: - if (misc->device == PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3) - device = PCI_DEVICE_ID_HYGON_18H_M05H_DF_F1; - else + if (func == 1) { + switch (boot_cpu_data.x86_model) { + case 0x4: device = PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1; - break; - default: + break; + case 0x5: + if (misc->device == PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3) + device = PCI_DEVICE_ID_HYGON_18H_M05H_DF_F1; + else + device = PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1; + break; + case 0x6: + device = PCI_DEVICE_ID_HYGON_18H_M05H_DF_F1; + break; + default: + return -ENODEV; + } + } else if (func == 5) { + switch (boot_cpu_data.x86_model) { + case 0x6: + device = PCI_DEVICE_ID_HYGON_18H_M06H_DF_F5; + break; + default: + return -ENODEV; + } + } else { return -ENODEV; } - while ((df_f1 = pci_get_device(misc->vendor, device, df_f1))) - if (pci_domain_nr(df_f1->bus) == pci_domain_nr(misc->bus) && - df_f1->bus->number == misc->bus->number && - PCI_SLOT(df_f1->devfn) == PCI_SLOT(misc->devfn)) + while ((df_func = pci_get_device(misc->vendor, device, df_func))) + if (pci_domain_nr(df_func->bus) == pci_domain_nr(misc->bus) && + df_func->bus->number == misc->bus->number && + PCI_SLOT(df_func->devfn) == PCI_SLOT(misc->devfn)) break; - if (!df_f1) { - pr_warn("Error getting DF F1 device.\n"); + if (!df_func) { + pr_warn("Error getting DF F%d device.\n", func); return -ENODEV; } - err = pci_read_config_dword(df_f1, offset, value); + err = pci_read_config_dword(df_func, offset, value); if (err) - pr_warn("Error reading DF F1 register.\n"); + pr_warn("Error reading DF F%d register.\n", func); return err; } @@ -305,9 +321,15 @@ int get_df_id(struct pci_dev *misc, u8 *id) u32 value; int ret; - /* F1x200[23:20]: DF ID */ - ret = get_df1_register(misc, 0x200, &value); - *id = (value >> 20) & 0xf; + if (boot_cpu_data.x86_model == 0x6) { + /* F5x180[19:16]: DF ID */ + ret = get_df_register(misc, 5, 0x180, &value); + *id = (value >> 16) & 0xf; + } else { + /* F1x200[23:20]: DF ID */ + ret = get_df_register(misc, 1, 0x200, &value); + *id = (value >> 20) & 0xf; + } return ret; } @@ -319,7 +341,7 @@ static u8 get_socket_num(struct pci_dev *misc) int ret; /* F1x200[7:0]: Which socket is present. */ - ret = get_df1_register(misc, 0x200, &value); + ret = get_df_register(misc, 1, 0x200, &value); return ret ? 0 : hweight8(value & 0xff); } -- Gitee From ec209f593af425a7f79d862cc1c915379f820daf Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:09:21 +0800 Subject: [PATCH 0020/2138] anolis: EDAC/amd64: Add support for Hygon family 18h model 6h ANBZ: #5455 Add Hygon family 18h model 6h processor support for amd64_edac. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- drivers/edac/amd64_edac.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 09debab4415a..699925e63aa9 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -4152,6 +4152,9 @@ static int per_family_init(struct amd64_pvt *pvt) pvt->ctl_name = "F18h_M05h"; pvt->max_mcs = 1; break; + } else if (pvt->model == 0x6) { + pvt->ctl_name = "F18h_M06h"; + break; } pvt->ctl_name = "F18h"; break; -- Gitee From 9ec15fa3e6dcaaf081b93cc0621ca421d7d78fef Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:09:47 +0800 Subject: [PATCH 0021/2138] anolis: EDAC/amd64: Adjust UMC channel for Hygon family 18h model 6h ANBZ: #5455 Hygon family 18h model 6h has 2 cs mapped to 1 umc, so adjust for it. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- drivers/edac/amd64_edac.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 699925e63aa9..196a060e3928 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -3117,6 +3117,7 @@ static void decode_umc_error(int node_id, struct mce *m) struct amd64_pvt *pvt; struct err_info err; u64 sys_addr; + u8 umc; node_id = fixup_node_id(node_id, m); @@ -3147,7 +3148,12 @@ static void decode_umc_error(int node_id, struct mce *m) pvt->ops->get_err_info(m, &err); - if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) { + if (hygon_f18h_m4h() && boot_cpu_data.x86_model == 0x6) + umc = err.channel << 1; + else + umc = err.channel; + + if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, umc, &sys_addr)) { err.err_code = ERR_NORM_ADDR; goto log_error; } -- Gitee From 8d91660b5a9816eb74b062fefe86a72798210d51 Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:10:11 +0800 Subject: [PATCH 0022/2138] anolis: perf/x86/uncore: Add L3 PMU support for Hygon family 18h model 6h ANBZ: #5455 Adjust the L3 PMU slicemask and threadmask for Hygon family 18h model 6h processor. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- arch/x86/events/amd/uncore.c | 52 +++++++++++++++++++++++++++++-- arch/x86/include/asm/perf_event.h | 8 +++++ 2 files changed, 58 insertions(+), 2 deletions(-) diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index 83f15fe411b3..5100469fef32 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -196,10 +196,21 @@ static void amd_uncore_del(struct perf_event *event, int flags) */ static u64 l3_thread_slice_mask(u64 config) { - if (boot_cpu_data.x86 <= 0x18) + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && + boot_cpu_data.x86 <= 0x18) return ((config & AMD64_L3_SLICE_MASK) ? : AMD64_L3_SLICE_MASK) | ((config & AMD64_L3_THREAD_MASK) ? : AMD64_L3_THREAD_MASK); + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) { + if (boot_cpu_data.x86_model == 0x6) + return ((config & HYGON_L3_SLICE_MASK) ? : HYGON_L3_SLICE_MASK) | + ((config & HYGON_L3_THREAD_MASK) ? : HYGON_L3_THREAD_MASK); + else + return ((config & AMD64_L3_SLICE_MASK) ? : AMD64_L3_SLICE_MASK) | + ((config & AMD64_L3_THREAD_MASK) ? : AMD64_L3_THREAD_MASK); + } + /* * If the user doesn't specify a threadmask, they're not trying to * count core 0, so we enable all cores & threads. @@ -268,6 +279,13 @@ amd_f17h_uncore_is_visible(struct kobject *kobj, struct attribute *attr, int i) attr->mode : 0; } +static umode_t +hygon_f18h_m6h_uncore_is_visible(struct kobject *kobj, struct attribute *attr, int i) +{ + return boot_cpu_data.x86 == 0x18 && boot_cpu_data.x86_model == 0x6 ? + attr->mode : 0; +} + static umode_t amd_f19h_uncore_is_visible(struct kobject *kobj, struct attribute *attr, int i) { @@ -325,6 +343,8 @@ DEFINE_UNCORE_FORMAT_ATTR(threadmask2, threadmask, "config:56-57"); /* F19h L DEFINE_UNCORE_FORMAT_ATTR(enallslices, enallslices, "config:46"); /* F19h L3 */ DEFINE_UNCORE_FORMAT_ATTR(enallcores, enallcores, "config:47"); /* F19h L3 */ DEFINE_UNCORE_FORMAT_ATTR(sliceid, sliceid, "config:48-50"); /* F19h L3 */ +DEFINE_UNCORE_FORMAT_ATTR(slicemask4, slicemask, "config:28-31"); /* F18h L3 */ +DEFINE_UNCORE_FORMAT_ATTR(threadmask32, threadmask, "config:32-63"); /* F18h L3 */ /* Common DF and NB attributes */ static struct attribute *amd_uncore_df_format_attr[] = { @@ -347,6 +367,12 @@ static struct attribute *amd_f17h_uncore_l3_format_attr[] = { NULL, }; +/* F18h M06h unique L3 attributes */ +static struct attribute *hygon_f18h_m6h_uncore_l3_format_attr[] = { + &format_attr_slicemask4.attr, /* slicemask */ + NULL, +}; + /* F19h unique L3 attributes */ static struct attribute *amd_f19h_uncore_l3_format_attr[] = { &format_attr_coreid.attr, /* coreid */ @@ -372,6 +398,12 @@ static struct attribute_group amd_f17h_uncore_l3_format_group = { .is_visible = amd_f17h_uncore_is_visible, }; +static struct attribute_group hygon_f18h_m6h_uncore_l3_format_group = { + .name = "format", + .attrs = hygon_f18h_m6h_uncore_l3_format_attr, + .is_visible = hygon_f18h_m6h_uncore_is_visible, +}; + static struct attribute_group amd_f19h_uncore_l3_format_group = { .name = "format", .attrs = amd_f19h_uncore_l3_format_attr, @@ -396,6 +428,11 @@ static const struct attribute_group *amd_uncore_l3_attr_update[] = { NULL, }; +static const struct attribute_group *hygon_uncore_l3_attr_update[] = { + &hygon_f18h_m6h_uncore_l3_format_group, + NULL, +}; + static struct pmu amd_nb_pmu = { .task_ctx_nr = perf_invalid_context, .attr_groups = amd_uncore_df_attr_groups, @@ -709,10 +746,21 @@ static int __init amd_uncore_init(void) *l3_attr++ = &format_attr_event8.attr; *l3_attr++ = &format_attr_umask8.attr; *l3_attr++ = &format_attr_threadmask2.attr; - } else if (boot_cpu_data.x86 >= 0x17) { + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && + boot_cpu_data.x86 >= 0x17) { *l3_attr++ = &format_attr_event8.attr; *l3_attr++ = &format_attr_umask8.attr; *l3_attr++ = &format_attr_threadmask8.attr; + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) { + *l3_attr++ = &format_attr_event8.attr; + *l3_attr++ = &format_attr_umask8.attr; + if (boot_cpu_data.x86_model == 0x6) { + *l3_attr++ = &format_attr_threadmask32.attr; + amd_llc_pmu.attr_update = hygon_uncore_l3_attr_update; + } else { + *l3_attr++ = &format_attr_threadmask8.attr; + } } amd_uncore_llc = alloc_percpu(struct amd_uncore *); diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 85a9fd5a3ec3..0c4a93712ef5 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -60,6 +60,14 @@ #define INTEL_ARCH_EVENT_MASK \ (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT) +#define HYGON_L3_SLICE_SHIFT 28 +#define HYGON_L3_SLICE_MASK \ + (0xFULL << HYGON_L3_SLICE_SHIFT) + +#define HYGON_L3_THREAD_SHIFT 32 +#define HYGON_L3_THREAD_MASK \ + (0xFFFFFFFFULL << HYGON_L3_THREAD_SHIFT) + #define AMD64_L3_SLICE_SHIFT 48 #define AMD64_L3_SLICE_MASK \ (0xFULL << AMD64_L3_SLICE_SHIFT) -- Gitee From e2f81f316139fd42e199841223e7caede787dda7 Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:11:01 +0800 Subject: [PATCH 0023/2138] anolis: x86/resctrl: Add Hygon QoS support ANBZ: #5455 Add support for Hygon QoS feature. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- arch/x86/kernel/cpu/hygon.c | 2 ++ arch/x86/kernel/cpu/resctrl/core.c | 10 +++++++--- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index f0482c9d49fd..b6f932d2d6aa 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "cpu.h" @@ -242,6 +243,7 @@ static void bsp_init_hygon(struct cpuinfo_x86 *c) x86_amd_ls_cfg_ssbd_mask = 1ULL << 10; } } + resctrl_cpu_detect(c); } static void early_init_hygon(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 10830995eada..e3c6d6552ffc 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -755,7 +755,8 @@ static __init bool get_mem_config(void) if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) return __get_mem_config_intel(&hw_res->r_resctrl); - else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) return __rdt_get_mem_config_amd(&hw_res->r_resctrl); return false; @@ -907,7 +908,8 @@ static __init void rdt_init_res_defs(void) { if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) rdt_init_res_defs_intel(); - else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) rdt_init_res_defs_amd(); } @@ -938,7 +940,9 @@ void resctrl_cpu_detect(struct cpuinfo_x86 *c) c->x86_cache_occ_scale = ebx; c->x86_cache_mbm_width_offset = eax & 0xff; - if (c->x86_vendor == X86_VENDOR_AMD && !c->x86_cache_mbm_width_offset) + if ((c->x86_vendor == X86_VENDOR_AMD || + c->x86_vendor == X86_VENDOR_HYGON) && + !c->x86_cache_mbm_width_offset) c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_AMD; } } -- Gitee From 9089b29024e036a3ff04d96b1fe72f278000e37f Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:11:52 +0800 Subject: [PATCH 0024/2138] anolis: ALSA: hda: Add support for Hygon family 18h model 5h HD-Audio ANBZ: #5455 Add the new PCI ID 0x1d94 0x14a9 for Hygon family 18h model 5h HDA controller. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- include/linux/pci_ids.h | 1 + sound/pci/hda/hda_intel.c | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 74bfe88f3a9e..9e9feaedbd2b 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2600,6 +2600,7 @@ #define PCI_VENDOR_ID_ZHAOXIN 0x1d17 #define PCI_VENDOR_ID_HYGON 0x1d94 +#define PCI_DEVICE_ID_HYGON_18H_M05H_HDA 0x14a9 #define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3 0x14b3 #define PCI_VENDOR_ID_FUNGIBLE 0x1dad diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 134c6f6e0959..84cc8d952045 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -238,6 +238,7 @@ enum { AZX_DRIVER_CMEDIA, AZX_DRIVER_ZHAOXIN, AZX_DRIVER_LOONGSON, + AZX_DRIVER_HYGON, AZX_DRIVER_GENERIC, AZX_NUM_DRIVERS, /* keep this as last entry */ }; @@ -350,6 +351,7 @@ static const char * const driver_short_names[] = { [AZX_DRIVER_CMEDIA] = "HDA C-Media", [AZX_DRIVER_ZHAOXIN] = "HDA Zhaoxin", [AZX_DRIVER_LOONGSON] = "HDA Loongson", + [AZX_DRIVER_HYGON] = "HDA Hygon", [AZX_DRIVER_GENERIC] = "HD-Audio Generic", }; @@ -2753,6 +2755,9 @@ static const struct pci_device_id azx_ids[] = { .driver_data = AZX_DRIVER_LOONGSON }, { PCI_VDEVICE(LOONGSON, PCI_DEVICE_ID_LOONGSON_HDMI), .driver_data = AZX_DRIVER_LOONGSON }, + /* Hygon HDAudio */ + { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_HDA), + .driver_data = AZX_DRIVER_HYGON | AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_NO_MSI }, { 0, } }; MODULE_DEVICE_TABLE(pci, azx_ids); -- Gitee From 25367feb93ee396265633b3b9ea666be06020904 Mon Sep 17 00:00:00 2001 From: Pu Wen Date: Sun, 24 Dec 2023 16:12:21 +0800 Subject: [PATCH 0025/2138] anolis: ALSA: hda: Fix single byte writing issue for Hygon family 18h model 5h ANBZ: #5455 On Hygon family 18h model 5h controller, some registers such as GCTL, SD_CTL and SD_CTL_3B should be accessed in dword, or the writing will fail. Signed-off-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/11 --- include/sound/hdaudio.h | 1 + sound/hda/hdac_controller.c | 10 ++++++++-- sound/hda/hdac_stream.c | 39 ++++++++++++++++++++++++++++--------- sound/pci/hda/hda_intel.c | 4 ++++ 4 files changed, 43 insertions(+), 11 deletions(-) diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h index 32c59053b48e..101183b8d3bc 100644 --- a/include/sound/hdaudio.h +++ b/include/sound/hdaudio.h @@ -350,6 +350,7 @@ struct hdac_bus { bool needs_damn_long_delay:1; bool not_use_interrupts:1; /* prohibiting the RIRB IRQ */ bool access_sdnctl_in_dword:1; /* accessing the sdnctl register by dword */ + bool hygon_dword_access:1; int poll_count; diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c index 7f3a000fab0c..df37a85cf27c 100644 --- a/sound/hda/hdac_controller.c +++ b/sound/hda/hdac_controller.c @@ -410,7 +410,10 @@ void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus) { unsigned long timeout; - snd_hdac_chip_updateb(bus, GCTL, AZX_GCTL_RESET, AZX_GCTL_RESET); + if (bus->hygon_dword_access) + snd_hdac_chip_updatel(bus, GCTL, AZX_GCTL_RESET, AZX_GCTL_RESET); + else + snd_hdac_chip_updateb(bus, GCTL, AZX_GCTL_RESET, AZX_GCTL_RESET); timeout = jiffies + msecs_to_jiffies(100); while (!snd_hdac_chip_readb(bus, GCTL) && time_before(jiffies, timeout)) @@ -475,7 +478,10 @@ static void azx_int_disable(struct hdac_bus *bus) /* disable interrupts in stream descriptor */ list_for_each_entry(azx_dev, &bus->stream_list, list) - snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_INT_MASK, 0); + if (bus->hygon_dword_access) + snd_hdac_stream_updatel(azx_dev, SD_CTL, SD_INT_MASK, 0); + else + snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_INT_MASK, 0); /* disable SIE for all streams & disable controller CIE and GIE */ snd_hdac_chip_writel(bus, INTCTL, 0); diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c index fe0958f9969c..2312266939b2 100644 --- a/sound/hda/hdac_stream.c +++ b/sound/hda/hdac_stream.c @@ -146,11 +146,15 @@ void snd_hdac_stream_start(struct hdac_stream *azx_dev) stripe_ctl = snd_hdac_get_stream_stripe_ctl(bus, azx_dev->substream); else stripe_ctl = 0; - snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, + if (bus->hygon_dword_access) + snd_hdac_stream_updatel(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, + stripe_ctl); + else + snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, stripe_ctl); } /* set DMA start and interrupt mask */ - if (bus->access_sdnctl_in_dword) + if (bus->access_sdnctl_in_dword || bus->hygon_dword_access) snd_hdac_stream_updatel(azx_dev, SD_CTL, 0, SD_CTL_DMA_START | SD_INT_MASK); else @@ -166,11 +170,21 @@ EXPORT_SYMBOL_GPL(snd_hdac_stream_start); */ static void snd_hdac_stream_clear(struct hdac_stream *azx_dev) { - snd_hdac_stream_updateb(azx_dev, SD_CTL, - SD_CTL_DMA_START | SD_INT_MASK, 0); - snd_hdac_stream_writeb(azx_dev, SD_STS, SD_INT_MASK); /* to be sure */ - if (azx_dev->stripe) - snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, 0); + struct hdac_bus *bus = azx_dev->bus; + + if (bus->hygon_dword_access) { + snd_hdac_stream_updatel(azx_dev, SD_CTL, + SD_CTL_DMA_START | SD_INT_MASK, 0); + snd_hdac_stream_writeb(azx_dev, SD_STS, SD_INT_MASK); /* to be sure */ + if (azx_dev->stripe) + snd_hdac_stream_updatel(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, 0); + } else { + snd_hdac_stream_updateb(azx_dev, SD_CTL, + SD_CTL_DMA_START | SD_INT_MASK, 0); + snd_hdac_stream_writeb(azx_dev, SD_STS, SD_INT_MASK); /* to be sure */ + if (azx_dev->stripe) + snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, 0); + } azx_dev->running = false; } @@ -225,12 +239,16 @@ void snd_hdac_stream_reset(struct hdac_stream *azx_dev) { unsigned char val; int dma_run_state; + struct hdac_bus *bus = azx_dev->bus; snd_hdac_stream_clear(azx_dev); dma_run_state = snd_hdac_stream_readb(azx_dev, SD_CTL) & SD_CTL_DMA_START; - snd_hdac_stream_updateb(azx_dev, SD_CTL, 0, SD_CTL_STREAM_RESET); + if (bus->hygon_dword_access) + snd_hdac_stream_updatel(azx_dev, SD_CTL, 0, SD_CTL_STREAM_RESET); + else + snd_hdac_stream_updateb(azx_dev, SD_CTL, 0, SD_CTL_STREAM_RESET); /* wait for hardware to report that the stream entered reset */ snd_hdac_stream_readb_poll(azx_dev, SD_CTL, val, (val & SD_CTL_STREAM_RESET), 3, 300); @@ -238,7 +256,10 @@ void snd_hdac_stream_reset(struct hdac_stream *azx_dev) if (azx_dev->bus->dma_stop_delay && dma_run_state) udelay(azx_dev->bus->dma_stop_delay); - snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_CTL_STREAM_RESET, 0); + if (bus->hygon_dword_access) + snd_hdac_stream_updatel(azx_dev, SD_CTL, SD_CTL_STREAM_RESET, 0); + else + snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_CTL_STREAM_RESET, 0); /* wait for hardware to report that the stream is out of reset */ snd_hdac_stream_readb_poll(azx_dev, SD_CTL, val, !(val & SD_CTL_STREAM_RESET), 3, 300); diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 84cc8d952045..8ffe22e53909 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -1880,6 +1880,10 @@ static int azx_first_init(struct azx *chip) bus->access_sdnctl_in_dword = 1; } + if (chip->driver_type == AZX_DRIVER_HYGON && + chip->pci->device == PCI_DEVICE_ID_HYGON_18H_M05H_HDA) + bus->hygon_dword_access = 1; + err = pcim_iomap_regions(pci, 1 << 0, "ICH HD audio"); if (err < 0) return err; -- Gitee From efdd13b6a2da2dac43727afd477b171c083d4a53 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Wed, 27 Dec 2023 21:05:02 +0800 Subject: [PATCH 0026/2138] anolis: ata: libata: disabling PhyRdy Change Interrupt based on actual LPM capability ANBZ: #7809 The ahci spec mentions that PhyRdy Change Interrupt and Link Power Management (LPM) do not coexist. However, before enabling LPM, the driver did not check whether the host supports LPM, but directly disabled PhyRdy Change Interrupt. Increase the judgment on the actual support of LPM, and disable PhyRdy Change Interrupt only when it is supported. Signed-off-by: leoliu-oc Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/anck-next/pulls/23 --- drivers/ata/libata-eh.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 9cc022522184..dca0e73300da 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -3383,6 +3383,8 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, struct ata_device **r_failed_dev) { struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL; + struct device *device = ap ? ap->host->dev : NULL; + struct pci_dev *pdev = (!device || !dev_is_pci(device)) ? NULL : to_pci_dev(device); struct ata_eh_context *ehc = &link->eh_context; struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; enum ata_lpm_policy old_policy = link->lpm_policy; @@ -3391,6 +3393,11 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, unsigned int err_mask; int rc; + /* if controller does not support lpm, then sets no LPM flags*/ + if ((pdev && pdev->vendor == PCI_VENDOR_ID_ZHAOXIN) && + !(~ap->host->flags & (ATA_HOST_NO_PART | ATA_HOST_NO_SSC | ATA_HOST_NO_DEVSLP))) + link->flags |= ATA_LFLAG_NO_LPM; + /* if the link or host doesn't do LPM, noop */ if (!IS_ENABLED(CONFIG_SATA_HOST) || (link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm)) -- Gitee From 8369c6b20a44a099df6f7dd185062595096d24db Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Wed, 27 Dec 2023 21:05:09 +0800 Subject: [PATCH 0027/2138] anolis: Add support for Zhaoxin I2C controller ANBZ: #7809 Zhaoxin I2C Linux driver support all bidirectional bus protocols speed specified in the I2C Specification 7.0. Signed-off-by: leoliu-oc Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/anck-next/pulls/27 --- drivers/i2c/busses/Kconfig | 11 + drivers/i2c/busses/Makefile | 1 + drivers/i2c/busses/i2c-zhaoxin.c | 561 +++++++++++++++++++++++++++++++ 3 files changed, 573 insertions(+) create mode 100644 drivers/i2c/busses/i2c-zhaoxin.c diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 982007a112c2..b9ee0e451e97 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -336,6 +336,17 @@ config I2C_VIAPRO This driver can also be built as a module. If so, the module will be called i2c-viapro. +config I2C_ZHAOXIN + tristate "Zhaoxin I2C controller driver" + depends on PCI + select I2C_ALGOBIT + help + If you say yes to this option, support will be included for the + Zhaoxin I2C interface + + This driver can also be built as a module. If so, the module + will be called i2c-zhaoxin. + if ACPI comment "ACPI drivers" diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index 9be9fdb07f3d..bef7c205433b 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_I2C_SIS630) += i2c-sis630.o obj-$(CONFIG_I2C_SIS96X) += i2c-sis96x.o obj-$(CONFIG_I2C_VIA) += i2c-via.o obj-$(CONFIG_I2C_VIAPRO) += i2c-viapro.o +obj-$(CONFIG_I2C_ZHAOXIN) += i2c-zhaoxin.o # Mac SMBus host controller drivers obj-$(CONFIG_I2C_HYDRA) += i2c-hydra.o diff --git a/drivers/i2c/busses/i2c-zhaoxin.c b/drivers/i2c/busses/i2c-zhaoxin.c new file mode 100644 index 000000000000..3d4cb36c1f17 --- /dev/null +++ b/drivers/i2c/busses/i2c-zhaoxin.c @@ -0,0 +1,561 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright(c) 2021 Shanghai Zhaoxin Semiconductor Corporation. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_VERSION "1.5.1" + +#define ZX_I2C_NAME "i2c_zhaoxin" + +/* REG_CR Bit fields */ +#define ZXI2C_REG_CR 0x00 +#define ZXI2C_CR_ENABLE BIT(0) +#define ZXI2C_CR_RX_END BIT(1) +#define ZXI2C_CR_TX_END BIT(2) +#define ZXI2C_CR_END_MASK GENMASK(2, 1) +#define ZXI2C_CR_CPU_RDY BIT(3) +#define ZXI2C_CR_MST_RST BIT(7) +#define ZXI2C_CR_FIFO_MODE BIT(14) + +/* REG_TCR Bit fields */ +#define ZXI2C_REG_TCR 0x02 +#define ZXI2C_TCR_HS_MODE BIT(13) +#define ZXI2C_TCR_MASTER_READ BIT(14) +#define ZXI2C_TCR_FAST BIT(15) + +/* REG_CSR Bit fields */ +#define ZXI2C_REG_CSR 0x04 +#define ZXI2C_CSR_RCV_NOT_ACK BIT(0) +#define ZXI2C_CSR_READY_MASK BIT(1) + +/* REG_ISR Bit fields */ +#define ZXI2C_REG_ISR 0x06 +#define ZXI2C_ISR_NACK_ADDR BIT(0) +#define ZXI2C_ISR_BYTE_END BIT(1) +#define ZXI2C_ISR_SCL_TIMEOUT BIT(2) +#define ZXI2C_ISR_MASK_ALL GENMASK(2, 0) +#define ZXI2C_IRQ_FIFOEND BIT(3) +#define ZXI2C_IRQ_FIFONACK BIT(4) +#define ZXI2C_IRQ_MASK (ZXI2C_ISR_MASK_ALL | ZXI2C_IRQ_FIFOEND | ZXI2C_IRQ_FIFONACK) + +/* REG_IMR Bit fields */ +#define ZXI2C_REG_IMR 0x08 +#define ZXI2C_IMR_ADDRNACK BIT(0) +#define ZXI2C_IMR_BYTE BIT(1) +#define ZXI2C_IMR_SCL_TIMEOUT BIT(2) +#define ZXI2C_IMR_ENABLE_ALL GENMASK(2, 0) + +#define ZXI2C_REG_CLK 0x10 +#define ZXI2C_CLK_50M BIT(0) +#define ZXI2C_REG_REV 0x11 +#define ZXI2C_REG_HCR 0x12 +#define ZXI2C_HCR_RST_FIFO GENMASK(1, 0) +#define ZXI2C_REG_HTDR 0x13 +#define ZXI2C_REG_HRDR 0x14 +#define ZXI2C_REG_HTLR 0x15 +#define ZXI2C_REG_HRLR 0x16 +#define ZXI2C_REG_HWCNTR 0x18 +#define ZXI2C_REG_HRCNTR 0x19 + +#define ZXI2C_REG_CDR 0x0A +#define ZXI2C_REG_TR 0x0C +#define ZXI2C_REG_MCR 0x0E + +struct zxi2c { + struct i2c_adapter adapter; + struct completion complete; + struct device *dev; + void __iomem *base; + struct clk *clk; + u16 tcr; + int irq; + u16 cmd_status; + u16 tr; + u16 mcr; + u16 csr; + u8 fstp; + u8 hrv; +}; + +/* parameters Constants */ +#define ZXI2C_GOLD_FSTP_100K 0xF3 +#define ZXI2C_GOLD_FSTP_400K 0x38 +#define ZXI2C_GOLD_FSTP_1M 0x13 +#define ZXI2C_GOLD_FSTP_3400K 0x37 + +#define ZXI2C_HS_MASTER_CODE (0x08 << 8) +#define ZXI2C_FIFO_SIZE 32 + +static int zxi2c_wait_bus_ready(struct zxi2c *i2c) +{ + unsigned long timeout; + void __iomem *base = i2c->base; + u16 tmp; + + timeout = jiffies + msecs_to_jiffies(200); + while (!(readw(base + ZXI2C_REG_CSR) & ZXI2C_CSR_READY_MASK)) { + if (time_after(jiffies, timeout)) { + dev_warn(i2c->dev, "timeout waiting for bus ready\n"); + return -EBUSY; + } + tmp = ioread16(i2c->base + ZXI2C_REG_CR); + iowrite16(tmp | ZXI2C_CR_END_MASK, i2c->base + ZXI2C_REG_CR); + + msleep(20); + } + + return 0; +} + +static int zxi2c_wait_status(struct zxi2c *i2c, u8 status) +{ + unsigned long time_left; + + time_left = wait_for_completion_timeout(&i2c->complete, msecs_to_jiffies(500)); + if (time_left <= 1) + return -ETIMEDOUT; + + if (i2c->cmd_status & status) + return 0; + + return -EIO; +} + +static irqreturn_t zxi2c_isr(int irq, void *data) +{ + struct zxi2c *i2c = data; + + /* save the status and write-clear it */ + i2c->cmd_status = readw(i2c->base + ZXI2C_REG_ISR); + if (!i2c->cmd_status) + return IRQ_NONE; + + writew(i2c->cmd_status, i2c->base + ZXI2C_REG_ISR); + + complete(&i2c->complete); + + return IRQ_HANDLED; +} + +static int zxi2c_write(struct zxi2c *i2c, struct i2c_msg *msg, bool last) +{ + u16 val, tcr_val = i2c->tcr; + int xfer_len = 0; + void __iomem *base = i2c->base; + + writew(msg->buf[0] & 0xFF, base + ZXI2C_REG_CDR); + reinit_completion(&i2c->complete); + writew(tcr_val | msg->addr, base + ZXI2C_REG_TCR); + + while (xfer_len < msg->len) { + int err; + + err = zxi2c_wait_status(i2c, ZXI2C_ISR_BYTE_END); + if (err) + return err; + + xfer_len++; + + val = readw(base + ZXI2C_REG_CSR); + if (val & ZXI2C_CSR_RCV_NOT_ACK) { + dev_dbg(i2c->dev, "write RCV NACK error\n"); + return -EIO; + } + + if (msg->len == 0) { + val = ZXI2C_CR_TX_END | ZXI2C_CR_CPU_RDY | ZXI2C_CR_ENABLE; + writew(val, base + ZXI2C_REG_CR); + break; + } + + if (xfer_len == msg->len) { + if (last) + writeb(ZXI2C_CR_TX_END, base + ZXI2C_REG_CR); + } else { + writew(msg->buf[xfer_len] & 0xFF, base + ZXI2C_REG_CDR); + writew(ZXI2C_CR_CPU_RDY | ZXI2C_CR_ENABLE, base + ZXI2C_REG_CR); + } + } + + return 0; +} + +static int zxi2c_read(struct zxi2c *i2c, struct i2c_msg *msg, bool first) +{ + u16 val, tcr_val = i2c->tcr; + u32 xfer_len = 0; + void __iomem *base = i2c->base; + + val = readw(base + ZXI2C_REG_CR); + val &= ~(ZXI2C_CR_TX_END | ZXI2C_CR_RX_END); + + if (msg->len == 1) + val |= ZXI2C_CR_RX_END; + + writew(val, base + ZXI2C_REG_CR); + + reinit_completion(&i2c->complete); + + tcr_val |= ZXI2C_TCR_MASTER_READ | msg->addr; + + writew(tcr_val, base + ZXI2C_REG_TCR); + + if (!first) { + val = readw(base + ZXI2C_REG_CR); + val |= ZXI2C_CR_CPU_RDY; + writew(val, base + ZXI2C_REG_CR); + } + + while (xfer_len < msg->len) { + int err; + + err = zxi2c_wait_status(i2c, ZXI2C_ISR_BYTE_END); + if (err) + return err; + + msg->buf[xfer_len] = readw(base + ZXI2C_REG_CDR) >> 8; + xfer_len++; + + val = readw(base + ZXI2C_REG_CR) | ZXI2C_CR_CPU_RDY; + if (xfer_len == msg->len - 1) + val |= ZXI2C_CR_RX_END; + writew(val, base + ZXI2C_REG_CR); + } + + return 0; +} + +static int zxi2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) +{ + struct i2c_msg *msg; + int i; + int ret = 0; + struct zxi2c *i2c = i2c_get_adapdata(adap); + + for (i = 0; ret >= 0 && i < num; i++) { + msg = &msgs[i]; + if (msg->len == 0) { + dev_dbg(i2c->dev, "zero len unsupported\n"); + return -ENODEV; + } + if (msg->flags & I2C_M_RD) + ret = zxi2c_read(i2c, msg, i == 0); + else + ret = zxi2c_write(i2c, msg, i == (num - 1)); + } + + return (ret < 0) ? ret : i; +} + +static int zxi2c_fifo_xfer(struct zxi2c *i2c, struct i2c_msg *msg) +{ + u16 xfered_len = 0; + u16 byte_left = msg->len; + u16 tcr_val = i2c->tcr; + void __iomem *base = i2c->base; + bool read = !!(msg->flags & I2C_M_RD); + + while (byte_left) { + u16 i; + u8 tmp; + int error; + u16 xfer_len = min_t(u16, byte_left, ZXI2C_FIFO_SIZE); + + byte_left -= xfer_len; + + /* reset fifo buffer */ + tmp = ioread8(base + ZXI2C_REG_HCR); + iowrite8(tmp | ZXI2C_HCR_RST_FIFO, base + ZXI2C_REG_HCR); + + /* set xfer len */ + if (read) { + iowrite8(xfer_len - 1, base + ZXI2C_REG_HRLR); + } else { + iowrite8(xfer_len - 1, base + ZXI2C_REG_HTLR); + /* set write data */ + for (i = 0; i < xfer_len; i++) + iowrite8(msg->buf[xfered_len + i], base + ZXI2C_REG_HTDR); + } + + /* prepare to stop transmission */ + if (i2c->hrv && !byte_left) { + tmp = ioread8(i2c->base + ZXI2C_REG_CR); + tmp |= read ? ZXI2C_CR_RX_END : ZXI2C_CR_TX_END; + iowrite8(tmp, base + ZXI2C_REG_CR); + } + + reinit_completion(&i2c->complete); + + if (xfered_len) { + /* continue transmission */ + tmp = ioread8(i2c->base + ZXI2C_REG_CR); + iowrite8(tmp |= ZXI2C_CR_CPU_RDY, i2c->base + ZXI2C_REG_CR); + } else { + /* start transmission */ + tcr_val |= (read ? ZXI2C_TCR_MASTER_READ : 0); + writew(tcr_val | msg->addr, base + ZXI2C_REG_TCR); + } + + error = zxi2c_wait_status(i2c, ZXI2C_IRQ_FIFOEND); + if (error) + return error; + + /* get the received data */ + if (read) + for (i = 0; i < xfer_len; i++) + msg->buf[xfered_len + i] = ioread8(base + ZXI2C_REG_HRDR); + + xfered_len += xfer_len; + } + + return 1; +} + +static int zxi2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) +{ + u8 tmp; + int ret; + struct zxi2c *i2c = (struct zxi2c *)i2c_get_adapdata(adap); + + ret = zxi2c_wait_bus_ready(i2c); + if (ret) + return ret; + + tmp = ioread8(i2c->base + ZXI2C_REG_CR); + tmp &= ~(ZXI2C_CR_RX_END | ZXI2C_CR_TX_END); + + if (num == 1 && msgs->len >= 2 && (i2c->hrv || msgs->len <= ZXI2C_FIFO_SIZE)) { + /* enable fifo mode */ + iowrite16(ZXI2C_CR_FIFO_MODE | tmp, i2c->base + ZXI2C_REG_CR); + /* clear irq status */ + iowrite8(ZXI2C_IRQ_MASK, i2c->base + ZXI2C_REG_ISR); + /* enable fifo irq */ + iowrite8(ZXI2C_ISR_NACK_ADDR | ZXI2C_IRQ_FIFOEND, i2c->base + ZXI2C_REG_IMR); + ret = zxi2c_fifo_xfer(i2c, msgs); + } else { + /* enable byte mode */ + iowrite16(tmp, i2c->base + ZXI2C_REG_CR); + /* clear irq status */ + iowrite8(ZXI2C_IRQ_MASK, i2c->base + ZXI2C_REG_ISR); + /* enable byte irq */ + iowrite8(ZXI2C_ISR_NACK_ADDR | ZXI2C_IMR_BYTE, i2c->base + ZXI2C_REG_IMR); + ret = zxi2c_xfer(adap, msgs, num); + if (ret < 0) + iowrite16(tmp | ZXI2C_CR_END_MASK, i2c->base + ZXI2C_REG_CR); + /* make sure the state machine is stopped */ + usleep_range(1, 2); + } + /* dis interrupt */ + iowrite8(0, i2c->base + ZXI2C_REG_IMR); + + /* timeout may caused by another high-priority process, try again */ + if (ret == -ETIMEDOUT) + ret = -EAGAIN; + + return ret; +} + +static u32 zxi2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm zxi2c_algorithm = { + .master_xfer = zxi2c_master_xfer, + .functionality = zxi2c_func, +}; + +static const struct i2c_adapter_quirks zxi2c_quirks = { + .flags = I2C_AQ_NO_ZERO_LEN | I2C_AQ_COMB_WRITE_THEN_READ, +}; + +static const u32 zxi2c_speed_params_table[][3] = { + /* speed, ZXI2C_TCR, ZXI2C_FSTP */ + { I2C_MAX_STANDARD_MODE_FREQ, 0, ZXI2C_GOLD_FSTP_100K }, + { I2C_MAX_FAST_MODE_FREQ, ZXI2C_TCR_FAST, ZXI2C_GOLD_FSTP_400K }, + { I2C_MAX_FAST_MODE_PLUS_FREQ, ZXI2C_TCR_FAST, ZXI2C_GOLD_FSTP_1M }, + { I2C_MAX_HIGH_SPEED_MODE_FREQ, ZXI2C_TCR_HS_MODE | ZXI2C_TCR_FAST, + ZXI2C_GOLD_FSTP_3400K }, + /* never reached, keep for debug. freq src is 27M mode */ + { I2C_MAX_STANDARD_MODE_FREQ, 0, 0x83 }, + { I2C_MAX_FAST_MODE_FREQ, ZXI2C_TCR_FAST, 0x1e }, + { I2C_MAX_FAST_MODE_PLUS_FREQ, ZXI2C_TCR_FAST, 10 } +}; + +static void zxi2c_set_bus_speed(struct zxi2c *i2c) +{ + iowrite16(i2c->tr, i2c->base + ZXI2C_REG_TR); + iowrite8(ZXI2C_CLK_50M, i2c->base + ZXI2C_REG_CLK); + iowrite16(i2c->mcr, i2c->base + ZXI2C_REG_MCR); +} + +static void zxi2c_get_bus_speed(struct zxi2c *i2c) +{ + u8 i, count; + u8 fstp; + const u32 *params; + + u32 acpi_speed = i2c_acpi_find_bus_speed(i2c->dev); + + count = ARRAY_SIZE(zxi2c_speed_params_table); + for (i = 0; i < count; i++) + if (acpi_speed == zxi2c_speed_params_table[i][0]) + break; + /* if not found, use 400k as default */ + i = i < count ? i : 1; + + params = zxi2c_speed_params_table[i]; + fstp = ioread8(i2c->base + ZXI2C_REG_TR); + if (abs(fstp - params[2]) > 0x10) { + /* + * if BIOS setting value far from golden value, + * use golden value and warn user + */ + dev_warn(i2c->dev, "speed:%d, fstp:0x%x, golden:0x%x\n", + params[0], fstp, params[2]); + i2c->tr = params[2] | 0xff00; + } else + i2c->tr = fstp | 0xff00; + + i2c->tcr = params[1]; + i2c->mcr = ioread16(i2c->base + ZXI2C_REG_MCR); + /* for Hs-mode, use 0000 1000 as master code */ + if (params[0] == I2C_MAX_HIGH_SPEED_MODE_FREQ) + i2c->mcr |= ZXI2C_HS_MASTER_CODE; + + dev_info(i2c->dev, "speed mode is %s\n", i2c_freq_mode_string(params[0])); +} + +static int zxi2c_init(struct platform_device *pdev, struct zxi2c **pi2c) +{ + int err; + struct zxi2c *i2c; + struct resource *res; + + i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL); + if (!i2c) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (IS_ERR(res)) { + dev_err(&pdev->dev, "IORESOURCE_MEM failed\n"); + return -ENODEV; + } + i2c->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(i2c->base)) + return PTR_ERR(i2c->base); + + i2c->irq = platform_get_irq(pdev, 0); + if (i2c->irq < 0) + return i2c->irq; + + err = devm_request_irq(&pdev->dev, i2c->irq, zxi2c_isr, IRQF_SHARED, pdev->name, i2c); + if (err) + return dev_err_probe(&pdev->dev, err, "failed to request irq %i\n", i2c->irq); + + i2c->dev = &pdev->dev; + init_completion(&i2c->complete); + platform_set_drvdata(pdev, i2c); + + *pi2c = i2c; + return 0; +} + +static int zxi2c_probe(struct platform_device *pdev) +{ + int error; + struct zxi2c *i2c; + struct i2c_adapter *adap; + + error = zxi2c_init(pdev, &i2c); + if (error) + return error; + + zxi2c_get_bus_speed(i2c); + zxi2c_set_bus_speed(i2c); + i2c->hrv = ioread8(i2c->base + ZXI2C_REG_REV); + + adap = &i2c->adapter; + adap->owner = THIS_MODULE; + adap->algo = &zxi2c_algorithm; + adap->retries = 2; + adap->quirks = &zxi2c_quirks; + + adap->dev.parent = &pdev->dev; + ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev)); + snprintf(adap->name, sizeof(adap->name), "zhaoxin-%s-%s", dev_name(pdev->dev.parent), + dev_name(i2c->dev)); + i2c_set_adapdata(adap, i2c); + + error = i2c_add_adapter(adap); + if (error) + return error; + + dev_info(i2c->dev, "adapter /dev/i2c-%d registered. version %s\n", + adap->nr, DRIVER_VERSION); + + return 0; +} + +static int zxi2c_remove(struct platform_device *pdev) +{ + struct zxi2c *i2c = platform_get_drvdata(pdev); + + devm_free_irq(&pdev->dev, i2c->irq, i2c); + + i2c_del_adapter(&i2c->adapter); + + platform_set_drvdata(pdev, NULL); + + devm_kfree(&pdev->dev, i2c); + + return 0; +} + +static int zxi2c_resume(struct device *dev) +{ + struct zxi2c *i2c = dev_get_drvdata(dev); + + iowrite8(ZXI2C_CR_MST_RST, i2c->base + ZXI2C_REG_CR); + zxi2c_set_bus_speed(i2c); + + return 0; +} + +static const struct dev_pm_ops zxi2c_pm = { + SET_SYSTEM_SLEEP_PM_OPS(NULL, zxi2c_resume) +}; + +static const struct acpi_device_id zxi2c_acpi_match[] = { + {"IIC1D17", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, zxi2c_acpi_match); + +static struct platform_driver zxi2c_driver = { + .probe = zxi2c_probe, + .remove = zxi2c_remove, + .driver = { + .name = ZX_I2C_NAME, + .acpi_match_table = ACPI_PTR(zxi2c_acpi_match), + .pm = &zxi2c_pm, + }, +}; + +module_platform_driver(zxi2c_driver); + +MODULE_VERSION(DRIVER_VERSION); +MODULE_AUTHOR("HansHu@zhaoxin.com"); +MODULE_DESCRIPTION("Shanghai Zhaoxin IIC driver"); +MODULE_LICENSE("GPL"); -- Gitee From 43c58b6fd8b9be03d8a03bdf1a3c5441b099146d Mon Sep 17 00:00:00 2001 From: Xingrui Yi Date: Tue, 9 Jan 2024 11:11:13 +0800 Subject: [PATCH 0028/2138] anolis: config: change nr cpus to 1024 ANBZ: #7888 On X86 platform, the config NR_CPUS is 64, which is too small for server. NR_CPUS should be change to 1024 which is same in 5.10. Signed-off-by: Xingrui Yi Reviewed-by: Qiao Ma Reviewed-by: Artie Ding Link: https://gitee.com/anolis/anck-next/pulls/35 --- arch/x86/configs/anolis_defconfig | 5 +++-- lib/Kconfig | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 3c8b51687fb7..4fb6c9d9d8b7 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -408,9 +408,9 @@ CONFIG_DMI=y CONFIG_BOOT_VESA_SUPPORT=y # CONFIG_MAXSMP is not set CONFIG_NR_CPUS_RANGE_BEGIN=2 -CONFIG_NR_CPUS_RANGE_END=512 +CONFIG_NR_CPUS_RANGE_END=8192 CONFIG_NR_CPUS_DEFAULT=64 -CONFIG_NR_CPUS=64 +CONFIG_NR_CPUS=1024 CONFIG_SCHED_CLUSTER=y CONFIG_SCHED_SMT=y CONFIG_SCHED_MC=y @@ -7263,6 +7263,7 @@ CONFIG_CMA_ALIGNMENT=8 # CONFIG_DMA_MAP_BENCHMARK is not set CONFIG_SGL_ALLOC=y CONFIG_CHECK_SIGNATURE=y +CONFIG_CPUMASK_OFFSTACK=y CONFIG_CPU_RMAP=y CONFIG_DQL=y CONFIG_GLOB=y diff --git a/lib/Kconfig b/lib/Kconfig index ee365b7402f1..c8e32e86b848 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -532,7 +532,7 @@ config CHECK_SIGNATURE bool config CPUMASK_OFFSTACK - bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS + bool "Force CPU masks off stack" help Use dynamic allocation for cpumask_var_t, instead of putting them on the stack. This is a bit more expensive, but avoids -- Gitee From 26ea42aabb0a4009734d4cb5267d7475ec3b949f Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Thu, 16 Nov 2023 06:22:42 -0800 Subject: [PATCH 0029/2138] perf/x86/intel/cstate: Cleanup duplicate attr_groups ANBZ: #8007 commit 243218ca93037631f0224fdbefea045912cb761a upstream. The events of the cstate_core and cstate_pkg PMU have the same format. They both need to create a "events" group (with empty attrs). The attr_groups can be shared. Remove the dedicated attr_groups for each cstate PMU. Use the shared cstate_attr_groups to replace. Intel-SIG: commit 243218ca9303 perf/x86/intel/cstate: Cleanup duplicate attr_groups Backport Sierra Forrest(SRF) perf cstate support to kernel v6.6. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20231116142245.1233485-1-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Reviewed-by: Peng Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2681 --- arch/x86/events/intel/cstate.c | 44 +++++++++------------------------- 1 file changed, 11 insertions(+), 33 deletions(-) diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index cc6609cbfc8d..a7dc33623595 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -188,20 +188,20 @@ static struct attribute *attrs_empty[] = { * "events" group (with empty attrs) before updating * it with detected events. */ -static struct attribute_group core_events_attr_group = { +static struct attribute_group cstate_events_attr_group = { .name = "events", .attrs = attrs_empty, }; -DEFINE_CSTATE_FORMAT_ATTR(core_event, event, "config:0-63"); -static struct attribute *core_format_attrs[] = { - &format_attr_core_event.attr, +DEFINE_CSTATE_FORMAT_ATTR(cstate_event, event, "config:0-63"); +static struct attribute *cstate_format_attrs[] = { + &format_attr_cstate_event.attr, NULL, }; -static struct attribute_group core_format_attr_group = { +static struct attribute_group cstate_format_attr_group = { .name = "format", - .attrs = core_format_attrs, + .attrs = cstate_format_attrs, }; static cpumask_t cstate_core_cpu_mask; @@ -216,9 +216,9 @@ static struct attribute_group cpumask_attr_group = { .attrs = cstate_cpumask_attrs, }; -static const struct attribute_group *core_attr_groups[] = { - &core_events_attr_group, - &core_format_attr_group, +static const struct attribute_group *cstate_attr_groups[] = { + &cstate_events_attr_group, + &cstate_format_attr_group, &cpumask_attr_group, NULL, }; @@ -267,30 +267,8 @@ static struct perf_msr pkg_msr[] = { [PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY, &group_cstate_pkg_c10, test_msr }, }; -static struct attribute_group pkg_events_attr_group = { - .name = "events", - .attrs = attrs_empty, -}; - -DEFINE_CSTATE_FORMAT_ATTR(pkg_event, event, "config:0-63"); -static struct attribute *pkg_format_attrs[] = { - &format_attr_pkg_event.attr, - NULL, -}; -static struct attribute_group pkg_format_attr_group = { - .name = "format", - .attrs = pkg_format_attrs, -}; - static cpumask_t cstate_pkg_cpu_mask; -static const struct attribute_group *pkg_attr_groups[] = { - &pkg_events_attr_group, - &pkg_format_attr_group, - &cpumask_attr_group, - NULL, -}; - static ssize_t cstate_get_attr_cpumask(struct device *dev, struct device_attribute *attr, char *buf) @@ -474,7 +452,7 @@ static const struct attribute_group *pkg_attr_update[] = { }; static struct pmu cstate_core_pmu = { - .attr_groups = core_attr_groups, + .attr_groups = cstate_attr_groups, .attr_update = core_attr_update, .name = "cstate_core", .task_ctx_nr = perf_invalid_context, @@ -489,7 +467,7 @@ static struct pmu cstate_core_pmu = { }; static struct pmu cstate_pkg_pmu = { - .attr_groups = pkg_attr_groups, + .attr_groups = cstate_attr_groups, .attr_update = pkg_attr_update, .name = "cstate_pkg", .task_ctx_nr = perf_invalid_context, -- Gitee From f6c3958226dd997fb1d2f0ab191bf500aefe09da Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Thu, 16 Nov 2023 06:22:43 -0800 Subject: [PATCH 0030/2138] x86/smp: Export symbol cpu_clustergroup_mask() ANBZ: #8007 commit c3dd1995620cdcd65cf4944c4164b0dbc16e557c upstream. Intel cstate PMU driver will invoke the topology_cluster_cpumask() to retrieve the CPU mask of a cluster. A modpost error is triggered since the symbol cpu_clustergroup_mask is not exported. Intel-SIG: commit c3dd1995620c x86/smp: Export symbol cpu_clustergroup_mask() Backport Sierra Forrest(SRF) perf cstate support to kernel v6.6. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20231116142245.1233485-2-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Reviewed-by: Peng Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2681 --- arch/x86/kernel/smpboot.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index ce77dac9a020..19246d32a8ae 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -748,6 +748,7 @@ const struct cpumask *cpu_clustergroup_mask(int cpu) { return cpu_l2c_shared_mask(cpu); } +EXPORT_SYMBOL_GPL(cpu_clustergroup_mask); static void impress_friends(void) { -- Gitee From b3eb7c2c72d16d15aa181892387a05c480645a81 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Thu, 16 Nov 2023 06:22:44 -0800 Subject: [PATCH 0031/2138] perf/x86/intel/cstate: Add Sierra Forest support ANBZ: #8007 commit 3877d55a0db2688c2e4ab8a319614a0c81f8e2d2 upstream. A new module C6 Residency Counter is introduced in the Sierra Forest. The scope of the new counter is module (A cluster of cores shared L2 cache). Create a brand new cstate_module PMU to profile the new counter. The only differences between the new cstate_module PMU and the existing cstate PMU are the scope and events. Regarding the choice of the new cstate_module PMU name, the current naming rule of a cstate PMU is "cstate_" + the scope of the PMU. The scope of the PMU is the cores shared L2. On SRF, Intel calls it "module", while the internal Linux sched code calls it "cluster". The "cstate_module" is used as the new PMU name, because - The Cstate PMU driver is a Intel specific driver. It doesn't impact other ARCHs. The name makes it consistent with the documentation. - The "cluster" mainly be used by the scheduler developer, while the user of cstate PMU is more likely a researcher reading HW docs and optimizing power. - In the Intel's SDM, the "cluster" has a different meaning/scope for topology. Using it will mislead the end users. Besides the module C6, the core C1/C6 and pkg C6 residency counters are supported in the Sierra Forest as well. Intel-SIG: commit 3877d55a0db2 perf/x86/intel/cstate: Add Sierra Forest support Backport Sierra Forrest(SRF) perf cstate support to kernel v6.6. Suggested-by: Artem Bityutskiy Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20231116142245.1233485-3-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Reviewed-by: Peng Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2681 --- arch/x86/events/intel/cstate.c | 113 +++++++++++++++++++++++++++++++-- 1 file changed, 109 insertions(+), 4 deletions(-) diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index a7dc33623595..a179c2e27648 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -41,7 +41,7 @@ * MSR_CORE_C1_RES: CORE C1 Residency Counter * perf code: 0x00 * Available model: SLM,AMT,GLM,CNL,ICX,TNT,ADL,RPL - * MTL + * MTL,SRF * Scope: Core (each processor core has a MSR) * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter * perf code: 0x01 @@ -52,7 +52,7 @@ * perf code: 0x02 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW, * SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX, - * TGL,TNT,RKL,ADL,RPL,SPR,MTL + * TGL,TNT,RKL,ADL,RPL,SPR,MTL,SRF * Scope: Core * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter * perf code: 0x03 @@ -75,7 +75,7 @@ * perf code: 0x02 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW, * SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX, - * TGL,TNT,RKL,ADL,RPL,SPR,MTL + * TGL,TNT,RKL,ADL,RPL,SPR,MTL,SRF * Scope: Package (physical package) * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter. * perf code: 0x03 @@ -96,6 +96,10 @@ * Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL, * TNT,RKL,ADL,RPL,MTL * Scope: Package (physical package) + * MSR_MODULE_C6_RES_MS: Module C6 Residency Counter. + * perf code: 0x00 + * Available model: SRF + * Scope: A cluster of cores shared L2 cache * */ @@ -129,6 +133,7 @@ static ssize_t cstate_get_attr_cpumask(struct device *dev, struct cstate_model { unsigned long core_events; unsigned long pkg_events; + unsigned long module_events; unsigned long quirks; }; @@ -269,6 +274,28 @@ static struct perf_msr pkg_msr[] = { static cpumask_t cstate_pkg_cpu_mask; +/* cstate_module PMU */ +static struct pmu cstate_module_pmu; +static bool has_cstate_module; + +enum perf_cstate_module_events { + PERF_CSTATE_MODULE_C6_RES = 0, + + PERF_CSTATE_MODULE_EVENT_MAX, +}; + +PMU_EVENT_ATTR_STRING(c6-residency, attr_cstate_module_c6, "event=0x00"); + +static unsigned long module_msr_mask; + +PMU_EVENT_GROUP(events, cstate_module_c6); + +static struct perf_msr module_msr[] = { + [PERF_CSTATE_MODULE_C6_RES] = { MSR_MODULE_C6_RES_MS, &group_cstate_module_c6, test_msr }, +}; + +static cpumask_t cstate_module_cpu_mask; + static ssize_t cstate_get_attr_cpumask(struct device *dev, struct device_attribute *attr, char *buf) @@ -279,6 +306,8 @@ static ssize_t cstate_get_attr_cpumask(struct device *dev, return cpumap_print_to_pagebuf(true, buf, &cstate_core_cpu_mask); else if (pmu == &cstate_pkg_pmu) return cpumap_print_to_pagebuf(true, buf, &cstate_pkg_cpu_mask); + else if (pmu == &cstate_module_pmu) + return cpumap_print_to_pagebuf(true, buf, &cstate_module_cpu_mask); else return 0; } @@ -316,6 +345,15 @@ static int cstate_pmu_event_init(struct perf_event *event) event->hw.event_base = pkg_msr[cfg].msr; cpu = cpumask_any_and(&cstate_pkg_cpu_mask, topology_die_cpumask(event->cpu)); + } else if (event->pmu == &cstate_module_pmu) { + if (cfg >= PERF_CSTATE_MODULE_EVENT_MAX) + return -EINVAL; + cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_MODULE_EVENT_MAX); + if (!(module_msr_mask & (1 << cfg))) + return -EINVAL; + event->hw.event_base = module_msr[cfg].msr; + cpu = cpumask_any_and(&cstate_module_cpu_mask, + topology_cluster_cpumask(event->cpu)); } else { return -ENOENT; } @@ -403,6 +441,17 @@ static int cstate_cpu_exit(unsigned int cpu) perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target); } } + + if (has_cstate_module && + cpumask_test_and_clear_cpu(cpu, &cstate_module_cpu_mask)) { + + target = cpumask_any_but(topology_cluster_cpumask(cpu), cpu); + /* Migrate events if there is a valid target */ + if (target < nr_cpu_ids) { + cpumask_set_cpu(target, &cstate_module_cpu_mask); + perf_pmu_migrate_context(&cstate_module_pmu, cpu, target); + } + } return 0; } @@ -429,6 +478,15 @@ static int cstate_cpu_init(unsigned int cpu) if (has_cstate_pkg && target >= nr_cpu_ids) cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask); + /* + * If this is the first online thread of that cluster, set it + * in the cluster cpu mask as the designated reader. + */ + target = cpumask_any_and(&cstate_module_cpu_mask, + topology_cluster_cpumask(cpu)); + if (has_cstate_module && target >= nr_cpu_ids) + cpumask_set_cpu(cpu, &cstate_module_cpu_mask); + return 0; } @@ -451,6 +509,11 @@ static const struct attribute_group *pkg_attr_update[] = { NULL, }; +static const struct attribute_group *module_attr_update[] = { + &group_cstate_module_c6, + NULL +}; + static struct pmu cstate_core_pmu = { .attr_groups = cstate_attr_groups, .attr_update = core_attr_update, @@ -481,6 +544,21 @@ static struct pmu cstate_pkg_pmu = { .module = THIS_MODULE, }; +static struct pmu cstate_module_pmu = { + .attr_groups = cstate_attr_groups, + .attr_update = module_attr_update, + .name = "cstate_module", + .task_ctx_nr = perf_invalid_context, + .event_init = cstate_pmu_event_init, + .add = cstate_pmu_event_add, + .del = cstate_pmu_event_del, + .start = cstate_pmu_event_start, + .stop = cstate_pmu_event_stop, + .read = cstate_pmu_event_update, + .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE, + .module = THIS_MODULE, +}; + static const struct cstate_model nhm_cstates __initconst = { .core_events = BIT(PERF_CSTATE_CORE_C3_RES) | BIT(PERF_CSTATE_CORE_C6_RES), @@ -593,6 +671,15 @@ static const struct cstate_model glm_cstates __initconst = { BIT(PERF_CSTATE_PKG_C10_RES), }; +static const struct cstate_model srf_cstates __initconst = { + .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | + BIT(PERF_CSTATE_CORE_C6_RES), + + .pkg_events = BIT(PERF_CSTATE_PKG_C6_RES), + + .module_events = BIT(PERF_CSTATE_MODULE_C6_RES), +}; + static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &nhm_cstates), @@ -645,6 +732,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, &glm_cstates), X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &glm_cstates), X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &adl_cstates), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, &srf_cstates), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_cstates), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_cstates), @@ -686,10 +774,14 @@ static int __init cstate_probe(const struct cstate_model *cm) pkg_msr_mask = perf_msr_probe(pkg_msr, PERF_CSTATE_PKG_EVENT_MAX, true, (void *) &cm->pkg_events); + module_msr_mask = perf_msr_probe(module_msr, PERF_CSTATE_MODULE_EVENT_MAX, + true, (void *) &cm->module_events); + has_cstate_core = !!core_msr_mask; has_cstate_pkg = !!pkg_msr_mask; + has_cstate_module = !!module_msr_mask; - return (has_cstate_core || has_cstate_pkg) ? 0 : -ENODEV; + return (has_cstate_core || has_cstate_pkg || has_cstate_module) ? 0 : -ENODEV; } static inline void cstate_cleanup(void) @@ -702,6 +794,9 @@ static inline void cstate_cleanup(void) if (has_cstate_pkg) perf_pmu_unregister(&cstate_pkg_pmu); + + if (has_cstate_module) + perf_pmu_unregister(&cstate_module_pmu); } static int __init cstate_init(void) @@ -738,6 +833,16 @@ static int __init cstate_init(void) return err; } } + + if (has_cstate_module) { + err = perf_pmu_register(&cstate_module_pmu, cstate_module_pmu.name, -1); + if (err) { + has_cstate_module = false; + pr_info("Failed to register cstate cluster pmu\n"); + cstate_cleanup(); + return err; + } + } return 0; } -- Gitee From 2d3d740ada80b7d8dfae096254056cf53e409483 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Thu, 16 Nov 2023 06:22:45 -0800 Subject: [PATCH 0032/2138] perf/x86/intel/cstate: Add Grand Ridge support ANBZ: #8007 commit bbb968696d0f3442ab823598def3b756cf4735c6 upstream. The same as the Sierra Forest, the Grand Ridge supports core C1/C6 and module C6. But it doesn't support pkg C6 residency counter. Intel-SIG: commit bbb968696d0f perf/x86/intel/cstate: Add Grand Ridge support Backport Sierra Forrest(SRF) perf cstate support to kernel v6.6. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20231116142245.1233485-4-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Reviewed-by: Peng Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2681 --- arch/x86/events/intel/cstate.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index a179c2e27648..d7ce7eec0cd2 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -41,7 +41,7 @@ * MSR_CORE_C1_RES: CORE C1 Residency Counter * perf code: 0x00 * Available model: SLM,AMT,GLM,CNL,ICX,TNT,ADL,RPL - * MTL,SRF + * MTL,SRF,GRR * Scope: Core (each processor core has a MSR) * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter * perf code: 0x01 @@ -52,7 +52,8 @@ * perf code: 0x02 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW, * SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX, - * TGL,TNT,RKL,ADL,RPL,SPR,MTL,SRF + * TGL,TNT,RKL,ADL,RPL,SPR,MTL,SRF, + * GRR * Scope: Core * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter * perf code: 0x03 @@ -98,7 +99,7 @@ * Scope: Package (physical package) * MSR_MODULE_C6_RES_MS: Module C6 Residency Counter. * perf code: 0x00 - * Available model: SRF + * Available model: SRF,GRR * Scope: A cluster of cores shared L2 cache * */ @@ -671,6 +672,13 @@ static const struct cstate_model glm_cstates __initconst = { BIT(PERF_CSTATE_PKG_C10_RES), }; +static const struct cstate_model grr_cstates __initconst = { + .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | + BIT(PERF_CSTATE_CORE_C6_RES), + + .module_events = BIT(PERF_CSTATE_MODULE_C6_RES), +}; + static const struct cstate_model srf_cstates __initconst = { .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | BIT(PERF_CSTATE_CORE_C6_RES), @@ -733,6 +741,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &glm_cstates), X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &adl_cstates), X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, &srf_cstates), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, &grr_cstates), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_cstates), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_cstates), -- Gitee From 71e994adc1d869d063137dbb66e197cf7ae2e525 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Sat, 23 Dec 2023 11:40:56 +0800 Subject: [PATCH 0033/2138] sched: Add cpus_share_resources API ANBZ: #8001 commit b95303e0aeaf446b65169dd4142cacdaeb7d4c8b upstream. Add cpus_share_resources() API. This is the preparation for the optimization of select_idle_cpu() on platforms with cluster scheduler level. On a machine with clusters cpus_share_resources() will test whether two cpus are within the same cluster. On a non-cluster machine it will behaves the same as cpus_share_cache(). So we use "resources" here for cache resources. Intel-SIG: commit b95303e0aeaf sched: Add cpus_share_resources API. Cluster based task wakeup optimization backport. Signed-off-by: Barry Song Signed-off-by: Yicong Yang Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Gautham R. Shenoy Reviewed-by: Tim Chen Reviewed-by: Vincent Guittot Tested-and-reviewed-by: Chen Yu Tested-by: K Prateek Nayak Link: https://lkml.kernel.org/r/20231019033323.54147-2-yangyicong@huawei.com [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2678 --- include/linux/sched/sd_flags.h | 7 +++++++ include/linux/sched/topology.h | 8 +++++++- kernel/sched/core.c | 12 ++++++++++++ kernel/sched/sched.h | 1 + kernel/sched/topology.c | 13 +++++++++++++ 5 files changed, 40 insertions(+), 1 deletion(-) diff --git a/include/linux/sched/sd_flags.h b/include/linux/sched/sd_flags.h index fad77b5172e2..a8b28647aafc 100644 --- a/include/linux/sched/sd_flags.h +++ b/include/linux/sched/sd_flags.h @@ -109,6 +109,13 @@ SD_FLAG(SD_ASYM_CPUCAPACITY_FULL, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS) */ SD_FLAG(SD_SHARE_CPUCAPACITY, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS) +/* + * Domain members share CPU cluster (LLC tags or L2 cache) + * + * NEEDS_GROUPS: Clusters are shared between groups. + */ +SD_FLAG(SD_CLUSTER, SDF_NEEDS_GROUPS) + /* * Domain members share CPU package resources (i.e. caches) * diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 67b573d5bf28..4c14fe127223 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -45,7 +45,7 @@ static inline int cpu_smt_flags(void) #ifdef CONFIG_SCHED_CLUSTER static inline int cpu_cluster_flags(void) { - return SD_SHARE_PKG_RESOURCES; + return SD_CLUSTER | SD_SHARE_PKG_RESOURCES; } #endif @@ -179,6 +179,7 @@ cpumask_var_t *alloc_sched_domains(unsigned int ndoms); void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); bool cpus_share_cache(int this_cpu, int that_cpu); +bool cpus_share_resources(int this_cpu, int that_cpu); typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); typedef int (*sched_domain_flags_f)(void); @@ -232,6 +233,11 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu) return true; } +static inline bool cpus_share_resources(int this_cpu, int that_cpu) +{ + return true; +} + #endif /* !CONFIG_SMP */ #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 86606fb9e6bc..f02dbe357801 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3956,6 +3956,18 @@ bool cpus_share_cache(int this_cpu, int that_cpu) return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); } +/* + * Whether CPUs are share cache resources, which means LLC on non-cluster + * machines and LLC tag or L2 on machines with clusters. + */ +bool cpus_share_resources(int this_cpu, int that_cpu) +{ + if (this_cpu == that_cpu) + return true; + + return per_cpu(sd_share_id, this_cpu) == per_cpu(sd_share_id, that_cpu); +} + static inline bool ttwu_queue_cond(struct task_struct *p, int cpu) { /* diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index d48c6a292a83..1c94253c242e 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1869,6 +1869,7 @@ static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc); DECLARE_PER_CPU(int, sd_llc_size); DECLARE_PER_CPU(int, sd_llc_id); +DECLARE_PER_CPU(int, sd_share_id); DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 3a13cecf1774..1a6e0485018e 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -666,6 +666,7 @@ static void destroy_sched_domains(struct sched_domain *sd) DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc); DEFINE_PER_CPU(int, sd_llc_size); DEFINE_PER_CPU(int, sd_llc_id); +DEFINE_PER_CPU(int, sd_share_id); DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa); DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); @@ -691,6 +692,17 @@ static void update_top_cache_domain(int cpu) per_cpu(sd_llc_id, cpu) = id; rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds); + sd = lowest_flag_domain(cpu, SD_CLUSTER); + if (sd) + id = cpumask_first(sched_domain_span(sd)); + + /* + * This assignment should be placed after the sd_llc_id as + * we want this id equals to cluster id on cluster machines + * but equals to LLC id on non-Cluster machines. + */ + per_cpu(sd_share_id, cpu) = id; + sd = lowest_flag_domain(cpu, SD_NUMA); rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); @@ -1548,6 +1560,7 @@ static struct cpumask ***sched_domains_numa_masks; */ #define TOPOLOGY_SD_FLAGS \ (SD_SHARE_CPUCAPACITY | \ + SD_CLUSTER | \ SD_SHARE_PKG_RESOURCES | \ SD_NUMA | \ SD_ASYM_PACKING) -- Gitee From 7217bba277bdae819e88a6f288a8d0456f91ef5c Mon Sep 17 00:00:00 2001 From: Barry Song Date: Sat, 23 Dec 2023 11:51:13 +0800 Subject: [PATCH 0034/2138] sched/fair: Scan cluster before scanning LLC in wake-up path ANBZ: #8001 commit 8881e1639f1f899b64e9bccf6cc14d51c1d3c822 upstream. For platforms having clusters like Kunpeng920, CPUs within the same cluster have lower latency when synchronizing and accessing shared resources like cache. Thus, this patch tries to find an idle cpu within the cluster of the target CPU before scanning the whole LLC to gain lower latency. This will be implemented in 2 steps in select_idle_sibling(): 1. When the prev_cpu/recent_used_cpu are good wakeup candidates, use them if they're sharing cluster with the target CPU. Otherwise trying to scan for an idle CPU in the target's cluster. 2. Scanning the cluster prior to the LLC of the target CPU for an idle CPU to wakeup. Testing has been done on Kunpeng920 by pinning tasks to one numa and two numa. On Kunpeng920, Each numa has 8 clusters and each cluster has 4 CPUs. With this patch, We noticed enhancement on tbench and netperf within one numa or cross two numa on top of tip-sched-core commit 9b46f1abc6d4 ("sched/debug: Print 'tgid' in sched_show_task()") tbench results (node 0): baseline patched 1: 327.2833 372.4623 ( 13.80%) 4: 1320.5933 1479.8833 ( 12.06%) 8: 2638.4867 2921.5267 ( 10.73%) 16: 5282.7133 5891.5633 ( 11.53%) 32: 9810.6733 9877.3400 ( 0.68%) 64: 7408.9367 7447.9900 ( 0.53%) 128: 6203.2600 6191.6500 ( -0.19%) tbench results (node 0-1): baseline patched 1: 332.0433 372.7223 ( 12.25%) 4: 1325.4667 1477.6733 ( 11.48%) 8: 2622.9433 2897.9967 ( 10.49%) 16: 5218.6100 5878.2967 ( 12.64%) 32: 10211.7000 11494.4000 ( 12.56%) 64: 13313.7333 16740.0333 ( 25.74%) 128: 13959.1000 14533.9000 ( 4.12%) netperf results TCP_RR (node 0): baseline patched 1: 76546.5033 90649.9867 ( 18.42%) 4: 77292.4450 90932.7175 ( 17.65%) 8: 77367.7254 90882.3467 ( 17.47%) 16: 78519.9048 90938.8344 ( 15.82%) 32: 72169.5035 72851.6730 ( 0.95%) 64: 25911.2457 25882.2315 ( -0.11%) 128: 10752.6572 10768.6038 ( 0.15%) netperf results TCP_RR (node 0-1): baseline patched 1: 76857.6667 90892.2767 ( 18.26%) 4: 78236.6475 90767.3017 ( 16.02%) 8: 77929.6096 90684.1633 ( 16.37%) 16: 77438.5873 90502.5787 ( 16.87%) 32: 74205.6635 88301.5612 ( 19.00%) 64: 69827.8535 71787.6706 ( 2.81%) 128: 25281.4366 25771.3023 ( 1.94%) netperf results UDP_RR (node 0): baseline patched 1: 96869.8400 110800.8467 ( 14.38%) 4: 97744.9750 109680.5425 ( 12.21%) 8: 98783.9863 110409.9637 ( 11.77%) 16: 99575.0235 110636.2435 ( 11.11%) 32: 95044.7250 97622.8887 ( 2.71%) 64: 32925.2146 32644.4991 ( -0.85%) 128: 12859.2343 12824.0051 ( -0.27%) netperf results UDP_RR (node 0-1): baseline patched 1: 97202.4733 110190.1200 ( 13.36%) 4: 95954.0558 106245.7258 ( 10.73%) 8: 96277.1958 105206.5304 ( 9.27%) 16: 97692.7810 107927.2125 ( 10.48%) 32: 79999.6702 103550.2999 ( 29.44%) 64: 80592.7413 87284.0856 ( 8.30%) 128: 27701.5770 29914.5820 ( 7.99%) Note neither Kunpeng920 nor x86 Jacobsville supports SMT, so the SMT branch in the code has not been tested but it supposed to work. Chen Yu also noticed this will improve the performance of tbench and netperf on a 24 CPUs Jacobsville machine, there are 4 CPUs in one cluster sharing L2 Cache. Intel-SIG: commit 8881e1639f1f sched/fair: Scan cluster before scanning LLC in wake-up path. Cluster based task wakeup optimization backport. [https://lore.kernel.org/lkml/Ytfjs+m1kUs0ScSn@worktop.programming.kicks-ass.net] Suggested-by: Peter Zijlstra Signed-off-by: Barry Song Signed-off-by: Yicong Yang Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Tim Chen Reviewed-by: Chen Yu Reviewed-by: Gautham R. Shenoy Reviewed-by: Vincent Guittot Tested-and-reviewed-by: Chen Yu Tested-by: Yicong Yang Link: https://lkml.kernel.org/r/20231019033323.54147-3-yangyicong@huawei.com [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2678 --- kernel/sched/fair.c | 40 ++++++++++++++++++++++++++++++++++++---- kernel/sched/sched.h | 1 + kernel/sched/topology.c | 12 ++++++++++++ 3 files changed, 49 insertions(+), 4 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 3b2cfdb8d788..4d4fdf0023f5 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7444,6 +7444,30 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool } } + if (static_branch_unlikely(&sched_cluster_active)) { + struct sched_group *sg = sd->groups; + + if (sg->flags & SD_CLUSTER) { + for_each_cpu_wrap(cpu, sched_group_span(sg), target + 1) { + if (!cpumask_test_cpu(cpu, cpus)) + continue; + + if (has_idle_core) { + i = select_idle_core(p, cpu, cpus, &idle_cpu); + if ((unsigned int)i < nr_cpumask_bits) + return i; + } else { + if (--nr <= 0) + return -1; + idle_cpu = __select_idle_cpu(cpu, p); + if ((unsigned int)idle_cpu < nr_cpumask_bits) + return idle_cpu; + } + } + cpumask_andnot(cpus, cpus, sched_group_span(sg)); + } + } + for_each_cpu_wrap(cpu, cpus, target + 1) { if (has_idle_core) { i = select_idle_core(p, cpu, cpus, &idle_cpu); @@ -7451,7 +7475,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool return i; } else { - if (!--nr) + if (--nr <= 0) return -1; idle_cpu = __select_idle_cpu(cpu, p); if ((unsigned int)idle_cpu < nr_cpumask_bits) @@ -7580,8 +7604,12 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) */ if (prev != target && cpus_share_cache(prev, target) && (available_idle_cpu(prev) || sched_idle_cpu(prev)) && - asym_fits_cpu(task_util, util_min, util_max, prev)) - return prev; + asym_fits_cpu(task_util, util_min, util_max, prev)) { + + if (!static_branch_unlikely(&sched_cluster_active) || + cpus_share_resources(prev, target)) + return prev; + } /* * Allow a per-cpu kthread to stack with the wakee if the @@ -7608,7 +7636,11 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) && cpumask_test_cpu(recent_used_cpu, p->cpus_ptr) && asym_fits_cpu(task_util, util_min, util_max, recent_used_cpu)) { - return recent_used_cpu; + + if (!static_branch_unlikely(&sched_cluster_active) || + cpus_share_resources(recent_used_cpu, target)) + return recent_used_cpu; + } /* diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 1c94253c242e..ec7dd031f1ab 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1875,6 +1875,7 @@ DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); extern struct static_key_false sched_asym_cpucapacity; +extern struct static_key_false sched_cluster_active; static __always_inline bool sched_asym_cpucap_active(void) { diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 1a6e0485018e..bc4625de1136 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -671,7 +671,9 @@ DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa); DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); + DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity); +DEFINE_STATIC_KEY_FALSE(sched_cluster_active); static void update_top_cache_domain(int cpu) { @@ -2382,6 +2384,7 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att struct rq *rq = NULL; int i, ret = -ENOMEM; bool has_asym = false; + bool has_cluster = false; if (WARN_ON(cpumask_empty(cpu_map))) goto error; @@ -2507,12 +2510,18 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig); cpu_attach_domain(sd, d.rd, i); + + if (lowest_flag_domain(i, SD_CLUSTER)) + has_cluster = true; } rcu_read_unlock(); if (has_asym) static_branch_inc_cpuslocked(&sched_asym_cpucapacity); + if (has_cluster) + static_branch_inc_cpuslocked(&sched_cluster_active); + if (rq && sched_debug_verbose) { pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n", cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); @@ -2612,6 +2621,9 @@ static void detach_destroy_domains(const struct cpumask *cpu_map) if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu))) static_branch_dec_cpuslocked(&sched_asym_cpucapacity); + if (static_branch_unlikely(&sched_cluster_active)) + static_branch_dec_cpuslocked(&sched_cluster_active); + rcu_read_lock(); for_each_cpu(i, cpu_map) cpu_attach_domain(NULL, &def_root_domain, i); -- Gitee From a434634eef6a701b099cd3f1ceaf7fa207990df6 Mon Sep 17 00:00:00 2001 From: Yicong Yang Date: Thu, 19 Oct 2023 11:33:23 +0800 Subject: [PATCH 0035/2138] sched/fair: Use candidate prev/recent_used CPU if scanning failed for cluster wakeup ANBZ: #8001 commit 22165f61d0c4092adf40f967c899e5d8b8a0d703 upstream. Chen Yu reports a hackbench regression of cluster wakeup when hackbench threads equal to the CPU number [1]. Analysis shows it's because we wake up more on the target CPU even if the prev_cpu is a good wakeup candidate and leads to the decrease of the CPU utilization. Generally if the task's prev_cpu is idle we'll wake up the task on it without scanning. On cluster machines we'll try to wake up the task in the same cluster of the target for better cache affinity, so if the prev_cpu is idle but not sharing the same cluster with the target we'll still try to find an idle CPU within the cluster. This will improve the performance at low loads on cluster machines. But in the issue above, if the prev_cpu is idle but not in the cluster with the target CPU, we'll try to scan an idle one in the cluster. But since the system is busy, we're likely to fail the scanning and use target instead, even if the prev_cpu is idle. Then leads to the regression. This patch solves this in 2 steps: o record the prev_cpu/recent_used_cpu if they're good wakeup candidates but not sharing the cluster with the target. o on scanning failure use the prev_cpu/recent_used_cpu if they're recorded as idle [1] https://lore.kernel.org/all/ZGzDLuVaHR1PAYDt@chenyu5-mobl1/ Intel-SIG: commit 22165f61d0c4 Use candidate prev/recent_used CPU if scanning failed for cluster wakeup. Cluster based task wakeup optimization backport Closes: https://lore.kernel.org/all/ZGsLy83wPIpamy6x@chenyu5-mobl1/ Reported-by: Chen Yu Signed-off-by: Yicong Yang Tested-and-reviewed-by: Chen Yu Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Vincent Guittot Link: https://lkml.kernel.org/r/20231019033323.54147-4-yangyicong@huawei.com [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2678 --- kernel/sched/fair.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 4d4fdf0023f5..42d825758e30 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7577,7 +7577,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) bool has_idle_core = false; struct sched_domain *sd; unsigned long task_util, util_min, util_max; - int i, recent_used_cpu; + int i, recent_used_cpu, prev_aff = -1; /* * On asymmetric system, update task utilization because we will check @@ -7609,6 +7609,8 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) if (!static_branch_unlikely(&sched_cluster_active) || cpus_share_resources(prev, target)) return prev; + + prev_aff = prev; } /* @@ -7641,6 +7643,8 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) cpus_share_resources(recent_used_cpu, target)) return recent_used_cpu; + } else { + recent_used_cpu = -1; } /* @@ -7681,6 +7685,17 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) if ((unsigned)i < nr_cpumask_bits) return i; + /* + * For cluster machines which have lower sharing cache like L2 or + * LLC Tag, we tend to find an idle CPU in the target's cluster + * first. But prev_cpu or recent_used_cpu may also be a good candidate, + * use them if possible when no idle CPU found in select_idle_cpu(). + */ + if ((unsigned int)prev_aff < nr_cpumask_bits) + return prev_aff; + if ((unsigned int)recent_used_cpu < nr_cpumask_bits) + return recent_used_cpu; + return target; } -- Gitee From 4ee3629b39af51a6b8d35f77ef409cf3afb4f808 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Wed, 25 Oct 2023 13:16:19 -0700 Subject: [PATCH 0036/2138] perf: Add branch stack counters ANBZ: #8006 commit 571d91dcadfa3cef499010b4eddb9b58b0da4d24 upstream. Currently, the additional information of a branch entry is stored in a u64 space. With more and more information added, the space is running out. For example, the information of occurrences of events will be added for each branch. Two places were suggested to append the counters. https://lore.kernel.org/lkml/20230802215814.GH231007@hirez.programming.kicks-ass.net/ One place is right after the flags of each branch entry. It changes the existing struct perf_branch_entry. The later ARCH specific implementation has to be really careful to consistently pick the right struct. The other place is right after the entire struct perf_branch_stack. The disadvantage is that the pointer of the extra space has to be recorded. The common interface perf_sample_save_brstack() has to be updated. The latter is much straightforward, and should be easily understood and maintained. It is implemented in the patch. Add a new branch sample type, PERF_SAMPLE_BRANCH_COUNTERS, to indicate the event which is recorded in the branch info. The "u64 counters" may store the occurrences of several events. The information regarding the number of events/counters and the width of each counter should be exposed via sysfs as a reference for the perf tool. Define the branch_counter_nr and branch_counter_width ABI here. The support will be implemented later in the Intel-specific patch. Intel-SIG: commit 571d91dcadfa perf: Add branch stack counters Backport LBR branch counter support to kernel v6.6. Suggested-by: Peter Zijlstra (Intel) Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20231025201626.3000228-1-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2680 --- .../testing/sysfs-bus-event_source-devices-caps | 6 ++++++ arch/powerpc/perf/core-book3s.c | 2 +- arch/x86/events/amd/core.c | 2 +- arch/x86/events/core.c | 2 +- arch/x86/events/intel/core.c | 2 +- arch/x86/events/intel/ds.c | 4 ++-- include/linux/perf_event.h | 17 ++++++++++++++++- include/uapi/linux/perf_event.h | 10 ++++++++++ kernel/events/core.c | 8 ++++++++ 9 files changed, 46 insertions(+), 7 deletions(-) diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-caps b/Documentation/ABI/testing/sysfs-bus-event_source-devices-caps index 8757dcf41c08..a5f506f7d481 100644 --- a/Documentation/ABI/testing/sysfs-bus-event_source-devices-caps +++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-caps @@ -16,3 +16,9 @@ Description: Example output in powerpc: grep . /sys/bus/event_source/devices/cpu/caps/* /sys/bus/event_source/devices/cpu/caps/pmu_name:POWER9 + + The "branch_counter_nr" in the supported platform exposes the + maximum number of counters which can be shown in the u64 counters + of PERF_SAMPLE_BRANCH_COUNTERS, while the "branch_counter_width" + exposes the width of each counter. Both of them can be used by + the perf tool to parse the logged counters in each branch. diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 10b946e9c6e7..b7ff680cde96 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -2312,7 +2312,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, struct cpu_hw_events *cpuhw; cpuhw = this_cpu_ptr(&cpu_hw_events); power_pmu_bhrb_read(event, cpuhw); - perf_sample_save_brstack(&data, event, &cpuhw->bhrb_stack); + perf_sample_save_brstack(&data, event, &cpuhw->bhrb_stack, NULL); } if (event->attr.sample_type & PERF_SAMPLE_DATA_SRC && diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index aa8fc2cf1bde..8411b91e1307 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -954,7 +954,7 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs) continue; if (has_branch_stack(event)) - perf_sample_save_brstack(&data, event, &cpuc->lbr_stack); + perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL); if (perf_event_overflow(event, &data, regs)) x86_pmu_stop(event, 0); diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 150a365b4fbc..9cbffdda5d85 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -1705,7 +1705,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs) perf_sample_data_init(&data, 0, event->hw.last_period); if (has_branch_stack(event)) - perf_sample_save_brstack(&data, event, &cpuc->lbr_stack); + perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL); if (perf_event_overflow(event, &data, regs)) x86_pmu_stop(event, 0); diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 37c8badd2701..c59a0699f3dc 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3058,7 +3058,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) perf_sample_data_init(&data, 0, event->hw.last_period); if (has_branch_stack(event)) - perf_sample_save_brstack(&data, event, &cpuc->lbr_stack); + perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL); if (perf_event_overflow(event, &data, regs)) x86_pmu_stop(event, 0); diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index d9a51b638931..b8c7a1e02dfa 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1755,7 +1755,7 @@ static void setup_pebs_fixed_sample_data(struct perf_event *event, setup_pebs_time(event, data, pebs->tsc); if (has_branch_stack(event)) - perf_sample_save_brstack(data, event, &cpuc->lbr_stack); + perf_sample_save_brstack(data, event, &cpuc->lbr_stack, NULL); } static void adaptive_pebs_save_regs(struct pt_regs *regs, @@ -1916,7 +1916,7 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event, if (has_branch_stack(event)) { intel_pmu_store_pebs_lbrs(lbr); - perf_sample_save_brstack(data, event, &cpuc->lbr_stack); + perf_sample_save_brstack(data, event, &cpuc->lbr_stack, NULL); } } diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 7a5563ffe61b..3243444d5429 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1144,6 +1144,10 @@ static inline bool branch_sample_priv(const struct perf_event *event) return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_PRIV_SAVE; } +static inline bool branch_sample_counters(const struct perf_event *event) +{ + return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS; +} struct perf_sample_data { /* @@ -1178,6 +1182,7 @@ struct perf_sample_data { struct perf_callchain_entry *callchain; struct perf_raw_record *raw; struct perf_branch_stack *br_stack; + u64 *br_stack_cntr; union perf_sample_weight weight; union perf_mem_data_src data_src; u64 txn; @@ -1255,7 +1260,8 @@ static inline void perf_sample_save_raw_data(struct perf_sample_data *data, static inline void perf_sample_save_brstack(struct perf_sample_data *data, struct perf_event *event, - struct perf_branch_stack *brs) + struct perf_branch_stack *brs, + u64 *brs_cntr) { int size = sizeof(u64); /* nr */ @@ -1263,7 +1269,16 @@ static inline void perf_sample_save_brstack(struct perf_sample_data *data, size += sizeof(u64); size += brs->nr * sizeof(struct perf_branch_entry); + /* + * The extension space for counters is appended after the + * struct perf_branch_stack. It is used to store the occurrences + * of events of each branch. + */ + if (brs_cntr) + size += brs->nr * sizeof(u64); + data->br_stack = brs; + data->br_stack_cntr = brs_cntr; data->dyn_size += size; data->sample_flags |= PERF_SAMPLE_BRANCH_STACK; } diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 39c6a250dd1b..4461f380425b 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -204,6 +204,8 @@ enum perf_branch_sample_type_shift { PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 18, /* save privilege mode */ + PERF_SAMPLE_BRANCH_COUNTERS_SHIFT = 19, /* save occurrences of events on a branch */ + PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */ }; @@ -235,6 +237,8 @@ enum perf_branch_sample_type { PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT, + PERF_SAMPLE_BRANCH_COUNTERS = 1U << PERF_SAMPLE_BRANCH_COUNTERS_SHIFT, + PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, }; @@ -982,6 +986,12 @@ enum perf_event_type { * { u64 nr; * { u64 hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX * { u64 from, to, flags } lbr[nr]; + * # + * # The format of the counters is decided by the + * # "branch_counter_nr" and "branch_counter_width", + * # which are defined in the ABI. + * # + * { u64 counters; } cntr[nr] && PERF_SAMPLE_BRANCH_COUNTERS * } && PERF_SAMPLE_BRANCH_STACK * * { u64 abi; # enum perf_sample_regs_abi diff --git a/kernel/events/core.c b/kernel/events/core.c index ec0fae49a0dd..264e3bcda783 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -7440,6 +7440,14 @@ void perf_output_sample(struct perf_output_handle *handle, if (branch_sample_hw_index(event)) perf_output_put(handle, data->br_stack->hw_idx); perf_output_copy(handle, data->br_stack->entries, size); + /* + * Add the extension space which is appended + * right after the struct perf_branch_stack. + */ + if (data->br_stack_cntr) { + size = data->br_stack->nr * sizeof(u64); + perf_output_copy(handle, data->br_stack_cntr, size); + } } else { /* * we always store at least the value of nr -- Gitee From 81aa8fd808e7200ac7e1fe5805e6ef7c719bcd4a Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Wed, 25 Oct 2023 13:16:20 -0700 Subject: [PATCH 0037/2138] perf/x86: Add PERF_X86_EVENT_NEEDS_BRANCH_STACK flag ANBZ: #8006 commit 85846b27072defc7ab3dcee7ff36563a040079dc upstream. Currently, branch_sample_type !=0 is used to check whether a branch stack setup is required. But it doesn't check the sample type, unnecessary branch stack setup may be done for a counting event. E.g., perf record -e "{branch-instructions,branch-misses}:S" -j any Also, the event only with the new PERF_SAMPLE_BRANCH_COUNTERS branch sample type may not require a branch stack setup either. Add a new flag NEEDS_BRANCH_STACK to indicate whether the event requires a branch stack setup. Replace the needs_branch_stack() by checking the new flag. The counting event check is implemented here. The later patch will take the new PERF_SAMPLE_BRANCH_COUNTERS into account. Intel-SIG: commit 85846b27072d perf/x86: Add PERF_X86_EVENT_NEEDS_BRANCH_STACK flag Backport LBR branch counter support to kernel v6.6. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20231025201626.3000228-2-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2680 --- arch/x86/events/intel/core.c | 14 +++++++++++--- arch/x86/events/perf_event_flags.h | 1 + 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index c59a0699f3dc..58374e7788a8 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2518,9 +2518,14 @@ static void intel_pmu_assign_event(struct perf_event *event, int idx) perf_report_aux_output_id(event, idx); } +static __always_inline bool intel_pmu_needs_branch_stack(struct perf_event *event) +{ + return event->hw.flags & PERF_X86_EVENT_NEEDS_BRANCH_STACK; +} + static void intel_pmu_del_event(struct perf_event *event) { - if (needs_branch_stack(event)) + if (intel_pmu_needs_branch_stack(event)) intel_pmu_lbr_del(event); if (event->attr.precise_ip) intel_pmu_pebs_del(event); @@ -2831,7 +2836,7 @@ static void intel_pmu_add_event(struct perf_event *event) { if (event->attr.precise_ip) intel_pmu_pebs_add(event); - if (needs_branch_stack(event)) + if (intel_pmu_needs_branch_stack(event)) intel_pmu_lbr_add(event); } @@ -3908,7 +3913,10 @@ static int intel_pmu_hw_config(struct perf_event *event) x86_pmu.pebs_aliases(event); } - if (needs_branch_stack(event)) { + if (needs_branch_stack(event) && is_sampling_event(event)) + event->hw.flags |= PERF_X86_EVENT_NEEDS_BRANCH_STACK; + + if (intel_pmu_needs_branch_stack(event)) { ret = intel_pmu_setup_lbr_filter(event); if (ret) return ret; diff --git a/arch/x86/events/perf_event_flags.h b/arch/x86/events/perf_event_flags.h index 1dc19b9b4426..a1685981c520 100644 --- a/arch/x86/events/perf_event_flags.h +++ b/arch/x86/events/perf_event_flags.h @@ -20,3 +20,4 @@ PERF_ARCH(TOPDOWN, 0x04000) /* Count Topdown slots/metrics events */ PERF_ARCH(PEBS_STLAT, 0x08000) /* st+stlat data address sampling */ PERF_ARCH(AMD_BRS, 0x10000) /* AMD Branch Sampling */ PERF_ARCH(PEBS_LAT_HYBRID, 0x20000) /* ld and st lat for hybrid */ +PERF_ARCH(NEEDS_BRANCH_STACK, 0x40000) /* require branch stack setup */ -- Gitee From 462dc5431e76fadbb368bcace16bae99ff6b1455 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Wed, 25 Oct 2023 13:16:21 -0700 Subject: [PATCH 0038/2138] perf: Add branch_sample_call_stack ANBZ: #8006 commit 1f2376cd03dd3b965d130ed46a7c92769d614ba1 upstream. Add a helper function to check call stack sample type. The later patch will invoke the function in several places. Intel-SIG: commit 1f2376cd03dd perf: Add branch_sample_call_stack Backport LBR branch counter support to kernel v6.6. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20231025201626.3000228-3-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2680 --- arch/x86/events/core.c | 2 +- include/linux/perf_event.h | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 9cbffdda5d85..9c3ccd975858 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -603,7 +603,7 @@ int x86_pmu_hw_config(struct perf_event *event) } } - if (event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK) + if (branch_sample_call_stack(event)) event->attach_state |= PERF_ATTACH_TASK_DATA; /* diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 3243444d5429..43538ac32380 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1149,6 +1149,11 @@ static inline bool branch_sample_counters(const struct perf_event *event) return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS; } +static inline bool branch_sample_call_stack(const struct perf_event *event) +{ + return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK; +} + struct perf_sample_data { /* * Fields set by perf_sample_data_init() unconditionally, -- Gitee From b5fe2d256171fd00bbf0deba356864217070d477 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Wed, 25 Oct 2023 13:16:22 -0700 Subject: [PATCH 0039/2138] perf/x86/intel: Reorganize attrs and is_visible ANBZ: #8006 commit 318c4985911245508f7e0bab5265e208a38b5f18 upstream. Some attrs and is_visible implementations are rather far away from one another which makes the whole thing hard to interpret. There are only two attribute groups which have both .attrs and .is_visible, group_default and group_caps_lbr. Move them together. No functional changes. Intel-SIG: commit 318c49859112 perf/x86/intel: Reorganize attrs and is_visible Backport LBR branch counter support to kernel v6.6. Suggested-by: Peter Zijlstra (Intel) Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20231025201626.3000228-4-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2680 --- arch/x86/events/intel/core.c | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 58374e7788a8..887f4f2b3f48 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -5499,6 +5499,12 @@ static struct attribute *lbr_attrs[] = { NULL }; +static umode_t +lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i) +{ + return x86_pmu.lbr_nr ? attr->mode : 0; +} + static char pmu_name_str[30]; static ssize_t pmu_name_show(struct device *cdev, @@ -5525,6 +5531,15 @@ static struct attribute *intel_pmu_attrs[] = { NULL, }; +static umode_t +default_is_visible(struct kobject *kobj, struct attribute *attr, int i) +{ + if (attr == &dev_attr_allow_tsx_force_abort.attr) + return x86_pmu.flags & PMU_FL_TFA ? attr->mode : 0; + + return attr->mode; +} + static umode_t tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i) { @@ -5546,27 +5561,12 @@ mem_is_visible(struct kobject *kobj, struct attribute *attr, int i) return pebs_is_visible(kobj, attr, i); } -static umode_t -lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i) -{ - return x86_pmu.lbr_nr ? attr->mode : 0; -} - static umode_t exra_is_visible(struct kobject *kobj, struct attribute *attr, int i) { return x86_pmu.version >= 2 ? attr->mode : 0; } -static umode_t -default_is_visible(struct kobject *kobj, struct attribute *attr, int i) -{ - if (attr == &dev_attr_allow_tsx_force_abort.attr) - return x86_pmu.flags & PMU_FL_TFA ? attr->mode : 0; - - return attr->mode; -} - static umode_t td_is_visible(struct kobject *kobj, struct attribute *attr, int i) { -- Gitee From 58784584b06b241cb31ebef095ec4c1c81219950 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Wed, 25 Oct 2023 13:16:23 -0700 Subject: [PATCH 0040/2138] perf/x86/intel: Support branch counters logging ANBZ: #8006 commit 33744916196b4ed7a50f6f47af7c3ad46b730ce6 upstream. The branch counters logging (A.K.A LBR event logging) introduces a per-counter indication of precise event occurrences in LBRs. It can provide a means to attribute exposed retirement latency to combinations of events across a block of instructions. It also provides a means of attributing Timed LBR latencies to events. The feature is first introduced on SRF/GRR. It is an enhancement of the ARCH LBR. It adds new fields in the LBR_INFO MSRs to log the occurrences of events on the GP counters. The information is displayed by the order of counters. The design proposed in this patch requires that the events which are logged must be in a group with the event that has LBR. If there are more than one LBR group, the counters logging information only from the current group (overflowed) are stored for the perf tool, otherwise the perf tool cannot know which and when other groups are scheduled especially when multiplexing is triggered. The user can ensure it uses the maximum number of counters that support LBR info (4 by now) by making the group large enough. The HW only logs events by the order of counters. The order may be different from the order of enabling which the perf tool can understand. When parsing the information of each branch entry, convert the counter order to the enabled order, and store the enabled order in the extension space. Unconditionally reset LBRs for an LBR event group when it's deleted. The logged counter information is only valid for the current LBR group. If another LBR group is scheduled later, the information from the stale LBRs would be otherwise wrongly interpreted. Add a sanity check in intel_pmu_hw_config(). Disable the feature if other counter filters (inv, cmask, edge, in_tx) are set or LBR call stack mode is enabled. (For the LBR call stack mode, we cannot simply flush the LBR, since it will break the call stack. Also, there is no obvious usage with the call stack mode for now.) Only applying the PERF_SAMPLE_BRANCH_COUNTERS doesn't require any branch stack setup. Expose the maximum number of supported counters and the width of the counters into the sysfs. The perf tool can use the information to parse the logged counters in each branch. Intel-SIG: commit 33744916196b perf/x86/intel: Support branch counters logging Backport LBR branch counter support to kernel v6.6. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20231025201626.3000228-5-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2680 --- arch/x86/events/intel/core.c | 103 +++++++++++++++++++++++++++-- arch/x86/events/intel/ds.c | 2 +- arch/x86/events/intel/lbr.c | 85 +++++++++++++++++++++++- arch/x86/events/perf_event.h | 12 ++++ arch/x86/events/perf_event_flags.h | 1 + arch/x86/include/asm/msr-index.h | 5 ++ arch/x86/include/asm/perf_event.h | 4 ++ include/uapi/linux/perf_event.h | 3 + 8 files changed, 207 insertions(+), 8 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 887f4f2b3f48..d84979a01902 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2803,6 +2803,7 @@ static void intel_pmu_enable_fixed(struct perf_event *event) static void intel_pmu_enable_event(struct perf_event *event) { + u64 enable_mask = ARCH_PERFMON_EVENTSEL_ENABLE; struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; @@ -2811,8 +2812,10 @@ static void intel_pmu_enable_event(struct perf_event *event) switch (idx) { case 0 ... INTEL_PMC_IDX_FIXED - 1: + if (branch_sample_counters(event)) + enable_mask |= ARCH_PERFMON_EVENTSEL_BR_CNTR; intel_set_masks(event, idx); - __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); + __x86_pmu_enable_event(hwc, enable_mask); break; case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1: case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END: @@ -3063,7 +3066,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) perf_sample_data_init(&data, 0, event->hw.last_period); if (has_branch_stack(event)) - perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL); + intel_pmu_lbr_save_brstack(&data, cpuc, event); if (perf_event_overflow(event, &data, regs)) x86_pmu_stop(event, 0); @@ -3628,6 +3631,13 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, if (cpuc->excl_cntrs) return intel_get_excl_constraints(cpuc, event, idx, c2); + /* Not all counters support the branch counter feature. */ + if (branch_sample_counters(event)) { + c2 = dyn_constraint(cpuc, c2, idx); + c2->idxmsk64 &= x86_pmu.lbr_counters; + c2->weight = hweight64(c2->idxmsk64); + } + return c2; } @@ -3916,6 +3926,58 @@ static int intel_pmu_hw_config(struct perf_event *event) if (needs_branch_stack(event) && is_sampling_event(event)) event->hw.flags |= PERF_X86_EVENT_NEEDS_BRANCH_STACK; + if (branch_sample_counters(event)) { + struct perf_event *leader, *sibling; + int num = 0; + + if (!(x86_pmu.flags & PMU_FL_BR_CNTR) || + (event->attr.config & ~INTEL_ARCH_EVENT_MASK)) + return -EINVAL; + + /* + * The branch counter logging is not supported in the call stack + * mode yet, since we cannot simply flush the LBR during e.g., + * multiplexing. Also, there is no obvious usage with the call + * stack mode. Simply forbids it for now. + * + * If any events in the group enable the branch counter logging + * feature, the group is treated as a branch counter logging + * group, which requires the extra space to store the counters. + */ + leader = event->group_leader; + if (branch_sample_call_stack(leader)) + return -EINVAL; + if (branch_sample_counters(leader)) + num++; + leader->hw.flags |= PERF_X86_EVENT_BRANCH_COUNTERS; + + for_each_sibling_event(sibling, leader) { + if (branch_sample_call_stack(sibling)) + return -EINVAL; + if (branch_sample_counters(sibling)) + num++; + } + + if (num > fls(x86_pmu.lbr_counters)) + return -EINVAL; + /* + * Only applying the PERF_SAMPLE_BRANCH_COUNTERS doesn't + * require any branch stack setup. + * Clear the bit to avoid unnecessary branch stack setup. + */ + if (0 == (event->attr.branch_sample_type & + ~(PERF_SAMPLE_BRANCH_PLM_ALL | + PERF_SAMPLE_BRANCH_COUNTERS))) + event->hw.flags &= ~PERF_X86_EVENT_NEEDS_BRANCH_STACK; + + /* + * Force the leader to be a LBR event. So LBRs can be reset + * with the leader event. See intel_pmu_lbr_del() for details. + */ + if (!intel_pmu_needs_branch_stack(leader)) + return -EINVAL; + } + if (intel_pmu_needs_branch_stack(event)) { ret = intel_pmu_setup_lbr_filter(event); if (ret) @@ -4399,8 +4461,13 @@ cmt_get_event_constraints(struct cpu_hw_events *cpuc, int idx, */ if (event->attr.precise_ip == 3) { /* Force instruction:ppp on PMC0, 1 and Fixed counter 0 */ - if (constraint_match(&fixed0_constraint, event->hw.config)) - return &fixed0_counter0_1_constraint; + if (constraint_match(&fixed0_constraint, event->hw.config)) { + /* The fixed counter 0 doesn't support LBR event logging. */ + if (branch_sample_counters(event)) + return &counter0_1_constraint; + else + return &fixed0_counter0_1_constraint; + } switch (c->idxmsk64 & 0x3ull) { case 0x1: @@ -4597,7 +4664,7 @@ int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu) goto err; } - if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) { + if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA | PMU_FL_BR_CNTR)) { size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint); cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu)); @@ -5494,15 +5561,39 @@ static ssize_t branches_show(struct device *cdev, static DEVICE_ATTR_RO(branches); +static ssize_t branch_counter_nr_show(struct device *cdev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", fls(x86_pmu.lbr_counters)); +} + +static DEVICE_ATTR_RO(branch_counter_nr); + +static ssize_t branch_counter_width_show(struct device *cdev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", LBR_INFO_BR_CNTR_BITS); +} + +static DEVICE_ATTR_RO(branch_counter_width); + static struct attribute *lbr_attrs[] = { &dev_attr_branches.attr, + &dev_attr_branch_counter_nr.attr, + &dev_attr_branch_counter_width.attr, NULL }; static umode_t lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i) { - return x86_pmu.lbr_nr ? attr->mode : 0; + /* branches */ + if (i == 0) + return x86_pmu.lbr_nr ? attr->mode : 0; + + return (x86_pmu.flags & PMU_FL_BR_CNTR) ? attr->mode : 0; } static char pmu_name_str[30]; diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index b8c7a1e02dfa..299ee85b253d 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1916,7 +1916,7 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event, if (has_branch_stack(event)) { intel_pmu_store_pebs_lbrs(lbr); - perf_sample_save_brstack(data, event, &cpuc->lbr_stack, NULL); + intel_pmu_lbr_save_brstack(data, cpuc, event); } } diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index c3b0d15a9841..78cd5084104e 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -676,6 +676,25 @@ void intel_pmu_lbr_del(struct perf_event *event) WARN_ON_ONCE(cpuc->lbr_users < 0); WARN_ON_ONCE(cpuc->lbr_pebs_users < 0); perf_sched_cb_dec(event->pmu); + + /* + * The logged occurrences information is only valid for the + * current LBR group. If another LBR group is scheduled in + * later, the information from the stale LBRs will be wrongly + * interpreted. Reset the LBRs here. + * + * Only clear once for a branch counter group with the leader + * event. Because + * - Cannot simply reset the LBRs with the !cpuc->lbr_users. + * Because it's possible that the last LBR user is not in a + * branch counter group, e.g., a branch_counters group + + * several normal LBR events. + * - The LBR reset can be done with any one of the events in a + * branch counter group, since they are always scheduled together. + * It's easy to force the leader event an LBR event. + */ + if (is_branch_counters_group(event) && event == event->group_leader) + intel_pmu_lbr_reset(); } static inline bool vlbr_exclude_host(void) @@ -866,6 +885,8 @@ static __always_inline u16 get_lbr_cycles(u64 info) return cycles; } +static_assert((64 - PERF_BRANCH_ENTRY_INFO_BITS_MAX) > LBR_INFO_BR_CNTR_NUM * LBR_INFO_BR_CNTR_BITS); + static void intel_pmu_store_lbr(struct cpu_hw_events *cpuc, struct lbr_entry *entries) { @@ -898,11 +919,67 @@ static void intel_pmu_store_lbr(struct cpu_hw_events *cpuc, e->abort = !!(info & LBR_INFO_ABORT); e->cycles = get_lbr_cycles(info); e->type = get_lbr_br_type(info); + + /* + * Leverage the reserved field of cpuc->lbr_entries[i] to + * temporarily store the branch counters information. + * The later code will decide what content can be disclosed + * to the perf tool. Pleae see intel_pmu_lbr_counters_reorder(). + */ + e->reserved = (info >> LBR_INFO_BR_CNTR_OFFSET) & LBR_INFO_BR_CNTR_FULL_MASK; } cpuc->lbr_stack.nr = i; } +/* + * The enabled order may be different from the counter order. + * Update the lbr_counters with the enabled order. + */ +static void intel_pmu_lbr_counters_reorder(struct cpu_hw_events *cpuc, + struct perf_event *event) +{ + int i, j, pos = 0, order[X86_PMC_IDX_MAX]; + struct perf_event *leader, *sibling; + u64 src, dst, cnt; + + leader = event->group_leader; + if (branch_sample_counters(leader)) + order[pos++] = leader->hw.idx; + + for_each_sibling_event(sibling, leader) { + if (!branch_sample_counters(sibling)) + continue; + order[pos++] = sibling->hw.idx; + } + + WARN_ON_ONCE(!pos); + + for (i = 0; i < cpuc->lbr_stack.nr; i++) { + src = cpuc->lbr_entries[i].reserved; + dst = 0; + for (j = 0; j < pos; j++) { + cnt = (src >> (order[j] * LBR_INFO_BR_CNTR_BITS)) & LBR_INFO_BR_CNTR_MASK; + dst |= cnt << j * LBR_INFO_BR_CNTR_BITS; + } + cpuc->lbr_counters[i] = dst; + cpuc->lbr_entries[i].reserved = 0; + } +} + +void intel_pmu_lbr_save_brstack(struct perf_sample_data *data, + struct cpu_hw_events *cpuc, + struct perf_event *event) +{ + if (is_branch_counters_group(event)) { + intel_pmu_lbr_counters_reorder(cpuc, event); + perf_sample_save_brstack(data, event, &cpuc->lbr_stack, cpuc->lbr_counters); + return; + } + + perf_sample_save_brstack(data, event, &cpuc->lbr_stack, NULL); +} + static void intel_pmu_arch_lbr_read(struct cpu_hw_events *cpuc) { intel_pmu_store_lbr(cpuc, NULL); @@ -1173,8 +1250,10 @@ intel_pmu_lbr_filter(struct cpu_hw_events *cpuc) for (i = 0; i < cpuc->lbr_stack.nr; ) { if (!cpuc->lbr_entries[i].from) { j = i; - while (++j < cpuc->lbr_stack.nr) + while (++j < cpuc->lbr_stack.nr) { cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j]; + cpuc->lbr_counters[j-1] = cpuc->lbr_counters[j]; + } cpuc->lbr_stack.nr--; if (!cpuc->lbr_entries[i].from) continue; @@ -1525,8 +1604,12 @@ void __init intel_pmu_arch_lbr_init(void) x86_pmu.lbr_mispred = ecx.split.lbr_mispred; x86_pmu.lbr_timed_lbr = ecx.split.lbr_timed_lbr; x86_pmu.lbr_br_type = ecx.split.lbr_br_type; + x86_pmu.lbr_counters = ecx.split.lbr_counters; x86_pmu.lbr_nr = lbr_nr; + if (!!x86_pmu.lbr_counters) + x86_pmu.flags |= PMU_FL_BR_CNTR; + if (x86_pmu.lbr_mispred) static_branch_enable(&x86_lbr_mispred); if (x86_pmu.lbr_timed_lbr) diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index c8ba2be7585d..b8a2d3ba4ccd 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -110,6 +110,11 @@ static inline bool is_topdown_event(struct perf_event *event) return is_metric_event(event) || is_slots_event(event); } +static inline bool is_branch_counters_group(struct perf_event *event) +{ + return event->group_leader->hw.flags & PERF_X86_EVENT_BRANCH_COUNTERS; +} + struct amd_nb { int nb_id; /* NorthBridge id */ int refcnt; /* reference count */ @@ -283,6 +288,7 @@ struct cpu_hw_events { int lbr_pebs_users; struct perf_branch_stack lbr_stack; struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; + u64 lbr_counters[MAX_LBR_ENTRIES]; /* branch stack extra */ union { struct er_account *lbr_sel; struct er_account *lbr_ctl; @@ -881,6 +887,7 @@ struct x86_pmu { unsigned int lbr_mispred:1; unsigned int lbr_timed_lbr:1; unsigned int lbr_br_type:1; + unsigned int lbr_counters:4; void (*lbr_reset)(void); void (*lbr_read)(struct cpu_hw_events *cpuc); @@ -1005,6 +1012,7 @@ do { \ #define PMU_FL_INSTR_LATENCY 0x80 /* Support Instruction Latency in PEBS Memory Info Record */ #define PMU_FL_MEM_LOADS_AUX 0x100 /* Require an auxiliary event for the complete memory info */ #define PMU_FL_RETIRE_LATENCY 0x200 /* Support Retire Latency in PEBS */ +#define PMU_FL_BR_CNTR 0x400 /* Support branch counter logging */ #define EVENT_VAR(_id) event_attr_##_id #define EVENT_PTR(_id) &event_attr_##_id.attr.attr @@ -1545,6 +1553,10 @@ void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr); void intel_ds_init(void); +void intel_pmu_lbr_save_brstack(struct perf_sample_data *data, + struct cpu_hw_events *cpuc, + struct perf_event *event); + void intel_pmu_lbr_swap_task_ctx(struct perf_event_pmu_context *prev_epc, struct perf_event_pmu_context *next_epc); diff --git a/arch/x86/events/perf_event_flags.h b/arch/x86/events/perf_event_flags.h index a1685981c520..6c977c19f2cd 100644 --- a/arch/x86/events/perf_event_flags.h +++ b/arch/x86/events/perf_event_flags.h @@ -21,3 +21,4 @@ PERF_ARCH(PEBS_STLAT, 0x08000) /* st+stlat data address sampling */ PERF_ARCH(AMD_BRS, 0x10000) /* AMD Branch Sampling */ PERF_ARCH(PEBS_LAT_HYBRID, 0x20000) /* ld and st lat for hybrid */ PERF_ARCH(NEEDS_BRANCH_STACK, 0x40000) /* require branch stack setup */ +PERF_ARCH(BRANCH_COUNTERS, 0x80000) /* logs the counters in the extra space of each branch */ diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 24b7bd255e98..8a07cfa720eb 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -252,6 +252,11 @@ #define LBR_INFO_CYCLES 0xffff #define LBR_INFO_BR_TYPE_OFFSET 56 #define LBR_INFO_BR_TYPE (0xfull << LBR_INFO_BR_TYPE_OFFSET) +#define LBR_INFO_BR_CNTR_OFFSET 32 +#define LBR_INFO_BR_CNTR_NUM 4 +#define LBR_INFO_BR_CNTR_BITS 2 +#define LBR_INFO_BR_CNTR_MASK GENMASK_ULL(LBR_INFO_BR_CNTR_BITS - 1, 0) +#define LBR_INFO_BR_CNTR_FULL_MASK GENMASK_ULL(LBR_INFO_BR_CNTR_NUM * LBR_INFO_BR_CNTR_BITS - 1, 0) #define MSR_ARCH_LBR_CTL 0x000014ce #define ARCH_LBR_CTL_LBREN BIT(0) diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 0c4a93712ef5..9450594f1709 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -31,6 +31,7 @@ #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22) #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23) #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL +#define ARCH_PERFMON_EVENTSEL_BR_CNTR (1ULL << 35) #define INTEL_FIXED_BITS_MASK 0xFULL #define INTEL_FIXED_BITS_STRIDE 4 @@ -224,6 +225,9 @@ union cpuid28_ecx { unsigned int lbr_timed_lbr:1; /* Branch Type Field Supported */ unsigned int lbr_br_type:1; + unsigned int reserved:13; + /* Branch counters (Event Logging) Supported */ + unsigned int lbr_counters:4; } split; unsigned int full; }; diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 4461f380425b..3a64499b0f5d 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -1437,6 +1437,9 @@ struct perf_branch_entry { reserved:31; }; +/* Size of used info bits in struct perf_branch_entry */ +#define PERF_BRANCH_ENTRY_INFO_BITS_MAX 33 + union perf_sample_weight { __u64 full; #if defined(__LITTLE_ENDIAN_BITFIELD) -- Gitee From 7746860a01268014ec5fa0a4c3e0272a02da5246 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Fri, 17 Nov 2023 08:39:35 -0800 Subject: [PATCH 0041/2138] perf/x86/intel/uncore: Generic uncore_get_uncores and MMIO format of SPR ANBZ: #8008 commit cf35791476fcb3230b98a42241a56242d60ebdd3 upstream. Factor out SPR_UNCORE_MMIO_COMMON_FORMAT which can be reused by Granite Rapids in the following patch. Granite Rapids have more uncore units than Sapphire Rapids. Add new parameters to support adjustable uncore units. No functional change. Intel-SIG: commit cf35791476fc perf/x86/intel/uncore: Generic uncore_get_uncores and MMIO format of SPR Backport GNR/SRF uncore PMU support to kernel v6.6 Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Tested-by: Ammy Yi Link: https://lore.kernel.org/r/20231117163939.2468007-1-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2682 --- arch/x86/events/intel/uncore_snbep.c | 34 +++++++++++++++++++--------- 1 file changed, 23 insertions(+), 11 deletions(-) diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index a8f11e60b987..eaa221bd22bc 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -6087,13 +6087,16 @@ static struct uncore_event_desc spr_uncore_imc_events[] = { { /* end: all zeroes */ }, }; +#define SPR_UNCORE_MMIO_COMMON_FORMAT() \ + SPR_UNCORE_COMMON_FORMAT(), \ + .ops = &spr_uncore_mmio_ops + static struct intel_uncore_type spr_uncore_imc = { - SPR_UNCORE_COMMON_FORMAT(), + SPR_UNCORE_MMIO_COMMON_FORMAT(), .name = "imc", .fixed_ctr_bits = 48, .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR, .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL, - .ops = &spr_uncore_mmio_ops, .event_descs = spr_uncore_imc_events, }; @@ -6420,7 +6423,8 @@ static void uncore_type_customized_copy(struct intel_uncore_type *to_type, static struct intel_uncore_type ** uncore_get_uncores(enum uncore_access_type type_id, int num_extra, - struct intel_uncore_type **extra) + struct intel_uncore_type **extra, int max_num_types, + struct intel_uncore_type **uncores) { struct intel_uncore_type **types, **start_types; int i; @@ -6429,9 +6433,9 @@ uncore_get_uncores(enum uncore_access_type type_id, int num_extra, /* Only copy the customized features */ for (; *types; types++) { - if ((*types)->type_id >= UNCORE_SPR_NUM_UNCORE_TYPES) + if ((*types)->type_id >= max_num_types) continue; - uncore_type_customized_copy(*types, spr_uncores[(*types)->type_id]); + uncore_type_customized_copy(*types, uncores[(*types)->type_id]); } for (i = 0; i < num_extra; i++, types++) @@ -6478,7 +6482,9 @@ void spr_uncore_cpu_init(void) uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR, UNCORE_SPR_MSR_EXTRA_UNCORES, - spr_msr_uncores); + spr_msr_uncores, + UNCORE_SPR_NUM_UNCORE_TYPES, + spr_uncores); type = uncore_find_type_by_id(uncore_msr_uncores, UNCORE_SPR_CHA); if (type) { @@ -6560,7 +6566,9 @@ int spr_uncore_pci_init(void) spr_update_device_location(UNCORE_SPR_M3UPI); uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI, UNCORE_SPR_PCI_EXTRA_UNCORES, - spr_pci_uncores); + spr_pci_uncores, + UNCORE_SPR_NUM_UNCORE_TYPES, + spr_uncores); return 0; } @@ -6568,12 +6576,16 @@ void spr_uncore_mmio_init(void) { int ret = snbep_pci2phy_map_init(0x3250, SKX_CPUNODEID, SKX_GIDNIDMAP, true); - if (ret) - uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL); - else { + if (ret) { + uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL, + UNCORE_SPR_NUM_UNCORE_TYPES, + spr_uncores); + } else { uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, UNCORE_SPR_MMIO_EXTRA_UNCORES, - spr_mmio_uncores); + spr_mmio_uncores, + UNCORE_SPR_NUM_UNCORE_TYPES, + spr_uncores); spr_uncore_imc_free_running.num_boxes = uncore_type_max_boxes(uncore_mmio_uncores, UNCORE_SPR_IMC) / 2; } -- Gitee From 719c076d30a13ec7c2517febce6d4b0fa6d68b27 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Fri, 17 Nov 2023 08:39:36 -0800 Subject: [PATCH 0042/2138] perf/x86/uncore: Use u64 to replace unsigned for the uncore offsets array ANBZ: #8008 commit b560e0cd882b11921c84307efe139f1247434c5e upstream. The current perf doesn't save the complete address of an uncore unit. The complete address of each unit is calculated by the base address + offset. The type of the base address is u64, while the type of offset is unsigned. In the old platforms (without the discovery table method), the base address and offset are hard coded in the driver. Perf can always use the lowest address as the base address. Everything works well. In the new platforms (starting from SPR), the discovery table provides a complete address for all uncore units. To follow the current framework/codes, when parsing the discovery table, the complete address of the first box is stored as a base address. The offset of the following units is calculated by the complete address of the unit minus the base address (the address of the first unit). On GNR, the latter units may have a lower address compared to the first unit. So the offset is a negative value. The upper 32 bits are lost when casting a negative u64 to an unsigned type. Use u64 to replace unsigned for the uncore offsets array to correct the above case. There is no functional change. Intel-SIG: commit b560e0cd882b perf/x86/uncore: Use u64 to replace unsigned for the uncore offsets array Backport GNR/SRF uncore PMU support to kernel v6.6 Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Tested-by: Ammy Yi Link: https://lore.kernel.org/r/20231117163939.2468007-2-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2682 --- arch/x86/events/intel/uncore.h | 6 +++--- arch/x86/events/intel/uncore_discovery.c | 5 +++-- arch/x86/events/intel/uncore_discovery.h | 2 +- arch/x86/events/intel/uncore_nhmex.c | 2 +- arch/x86/events/intel/uncore_snbep.c | 6 +++--- 5 files changed, 11 insertions(+), 10 deletions(-) diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h index c30fb5bb1222..7428ecaddf72 100644 --- a/arch/x86/events/intel/uncore.h +++ b/arch/x86/events/intel/uncore.h @@ -72,9 +72,9 @@ struct intel_uncore_type { unsigned single_fixed:1; unsigned pair_ctr_ctl:1; union { - unsigned *msr_offsets; - unsigned *pci_offsets; - unsigned *mmio_offsets; + u64 *msr_offsets; + u64 *pci_offsets; + u64 *mmio_offsets; }; unsigned *box_ids; struct event_constraint unconstrainted; diff --git a/arch/x86/events/intel/uncore_discovery.c b/arch/x86/events/intel/uncore_discovery.c index cb488e41807c..9a698a92962a 100644 --- a/arch/x86/events/intel/uncore_discovery.c +++ b/arch/x86/events/intel/uncore_discovery.c @@ -125,7 +125,8 @@ uncore_insert_box_info(struct uncore_unit_discovery *unit, int die, bool parsed) { struct intel_uncore_discovery_type *type; - unsigned int *box_offset, *ids; + unsigned int *ids; + u64 *box_offset; int i; if (!unit->ctl || !unit->ctl_offset || !unit->ctr_offset) { @@ -153,7 +154,7 @@ uncore_insert_box_info(struct uncore_unit_discovery *unit, if (!type) return; - box_offset = kcalloc(type->num_boxes + 1, sizeof(unsigned int), GFP_KERNEL); + box_offset = kcalloc(type->num_boxes + 1, sizeof(u64), GFP_KERNEL); if (!box_offset) return; diff --git a/arch/x86/events/intel/uncore_discovery.h b/arch/x86/events/intel/uncore_discovery.h index 6ee80ad3423e..22e769a81103 100644 --- a/arch/x86/events/intel/uncore_discovery.h +++ b/arch/x86/events/intel/uncore_discovery.h @@ -125,7 +125,7 @@ struct intel_uncore_discovery_type { u8 ctr_offset; /* Counter 0 offset */ u16 num_boxes; /* number of boxes for the uncore block */ unsigned int *ids; /* Box IDs */ - unsigned int *box_offset; /* Box offset */ + u64 *box_offset; /* Box offset */ }; bool intel_uncore_has_discovery_tables(int *ignore); diff --git a/arch/x86/events/intel/uncore_nhmex.c b/arch/x86/events/intel/uncore_nhmex.c index 173e2674be6e..56eea2c66cfb 100644 --- a/arch/x86/events/intel/uncore_nhmex.c +++ b/arch/x86/events/intel/uncore_nhmex.c @@ -306,7 +306,7 @@ static const struct attribute_group nhmex_uncore_cbox_format_group = { }; /* msr offset for each instance of cbox */ -static unsigned nhmex_cbox_msr_offsets[] = { +static u64 nhmex_cbox_msr_offsets[] = { 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0, }; diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index eaa221bd22bc..c4015a78c035 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -5280,7 +5280,7 @@ void snr_uncore_mmio_init(void) /* ICX uncore support */ -static unsigned icx_cha_msr_offsets[] = { +static u64 icx_cha_msr_offsets[] = { 0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310, 0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e, 0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a, @@ -5328,7 +5328,7 @@ static struct intel_uncore_type icx_uncore_chabox = { .format_group = &snr_uncore_chabox_format_group, }; -static unsigned icx_msr_offsets[] = { +static u64 icx_msr_offsets[] = { 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0, }; @@ -6192,7 +6192,7 @@ static struct intel_uncore_type *spr_uncores[UNCORE_SPR_NUM_UNCORE_TYPES] = { */ #define SPR_UNCORE_UPI_NUM_BOXES 4 -static unsigned int spr_upi_pci_offsets[SPR_UNCORE_UPI_NUM_BOXES] = { +static u64 spr_upi_pci_offsets[SPR_UNCORE_UPI_NUM_BOXES] = { 0, 0x8000, 0x10000, 0x18000 }; -- Gitee From 1b3c6a4e30b6a6e57f1856075394cfc15afa65ce Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Fri, 17 Nov 2023 08:39:37 -0800 Subject: [PATCH 0043/2138] perf/x86/intel/uncore: Support Granite Rapids ANBZ: #8008 commit 632c4bf6d007862307440b177d9fee829857e8bb upstream. The same as Sapphire Rapids, Granite Rapids also supports the discovery table feature. All the basic uncore PMON information can be retrieved from the discovery table which resides in the BIOS. There are 4 new units are added on Granite Rapids, b2cmi, b2cxl, ubox, and mdf_sbo. The layout of the counters is exactly the same as the generic uncore counters. Only add a name for the new units. All the details can be retrieved from the discovery table. The description of the new units can be found at https://www.intel.com/content/www/us/en/secure/content-details/772943/content-details.html The other units, e.g., cha, iio, irp, pcu, and imc, are the same as Sapphire Rapids. Ignore the upi and b2upi units in the discovery table, which are broken for now. Intel-SIG: commit 632c4bf6d007 perf/x86/intel/uncore: Support Granite Rapids Backport GNR/SRF uncore PMU support to kernel v6.6 Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Tested-by: Ammy Yi Link: https://lore.kernel.org/r/20231117163939.2468007-3-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2682 --- arch/x86/events/intel/uncore.c | 10 ++++ arch/x86/events/intel/uncore.h | 4 ++ arch/x86/events/intel/uncore_snbep.c | 87 ++++++++++++++++++++++++++++ 3 files changed, 101 insertions(+) diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 69043e02e8a7..01c01cae82ef 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -1814,6 +1814,14 @@ static const struct intel_uncore_init_fun spr_uncore_init __initconst = { .uncore_units_ignore = spr_uncore_units_ignore, }; +static const struct intel_uncore_init_fun gnr_uncore_init __initconst = { + .cpu_init = gnr_uncore_cpu_init, + .pci_init = gnr_uncore_pci_init, + .mmio_init = gnr_uncore_mmio_init, + .use_discovery = true, + .uncore_units_ignore = gnr_uncore_units_ignore, +}; + static const struct intel_uncore_init_fun generic_uncore_init __initconst = { .cpu_init = intel_uncore_generic_uncore_cpu_init, .pci_init = intel_uncore_generic_uncore_pci_init, @@ -1865,6 +1873,8 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &mtl_uncore_init), X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &spr_uncore_init), X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &spr_uncore_init), + X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, &gnr_uncore_init), + X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D, &gnr_uncore_init), X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &snr_uncore_init), X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &adl_uncore_init), {}, diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h index 7428ecaddf72..4838502d89ae 100644 --- a/arch/x86/events/intel/uncore.h +++ b/arch/x86/events/intel/uncore.h @@ -593,6 +593,7 @@ extern struct list_head pci2phy_map_head; extern struct pci_extra_dev *uncore_extra_pci_dev; extern struct event_constraint uncore_constraint_empty; extern int spr_uncore_units_ignore[]; +extern int gnr_uncore_units_ignore[]; /* uncore_snb.c */ int snb_uncore_pci_init(void); @@ -634,6 +635,9 @@ void icx_uncore_mmio_init(void); int spr_uncore_pci_init(void); void spr_uncore_cpu_init(void); void spr_uncore_mmio_init(void); +int gnr_uncore_pci_init(void); +void gnr_uncore_cpu_init(void); +void gnr_uncore_mmio_init(void); /* uncore_nhmex.c */ void nhmex_uncore_cpu_init(void); diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index c4015a78c035..1cf944324b86 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -6592,3 +6592,90 @@ void spr_uncore_mmio_init(void) } /* end of SPR uncore support */ + +/* GNR uncore support */ + +#define UNCORE_GNR_NUM_UNCORE_TYPES 23 +#define UNCORE_GNR_TYPE_15 15 +#define UNCORE_GNR_B2UPI 18 +#define UNCORE_GNR_TYPE_21 21 +#define UNCORE_GNR_TYPE_22 22 + +int gnr_uncore_units_ignore[] = { + UNCORE_SPR_UPI, + UNCORE_GNR_TYPE_15, + UNCORE_GNR_B2UPI, + UNCORE_GNR_TYPE_21, + UNCORE_GNR_TYPE_22, + UNCORE_IGNORE_END +}; + +static struct intel_uncore_type gnr_uncore_ubox = { + .name = "ubox", + .attr_update = uncore_alias_groups, +}; + +static struct intel_uncore_type gnr_uncore_b2cmi = { + SPR_UNCORE_PCI_COMMON_FORMAT(), + .name = "b2cmi", +}; + +static struct intel_uncore_type gnr_uncore_b2cxl = { + SPR_UNCORE_MMIO_COMMON_FORMAT(), + .name = "b2cxl", +}; + +static struct intel_uncore_type gnr_uncore_mdf_sbo = { + .name = "mdf_sbo", + .attr_update = uncore_alias_groups, +}; + +static struct intel_uncore_type *gnr_uncores[UNCORE_GNR_NUM_UNCORE_TYPES] = { + &spr_uncore_chabox, + &spr_uncore_iio, + &spr_uncore_irp, + NULL, + &spr_uncore_pcu, + &gnr_uncore_ubox, + &spr_uncore_imc, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + &gnr_uncore_b2cmi, + &gnr_uncore_b2cxl, + NULL, + NULL, + &gnr_uncore_mdf_sbo, + NULL, + NULL, +}; + +void gnr_uncore_cpu_init(void) +{ + uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR, 0, NULL, + UNCORE_GNR_NUM_UNCORE_TYPES, + gnr_uncores); +} + +int gnr_uncore_pci_init(void) +{ + uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI, 0, NULL, + UNCORE_GNR_NUM_UNCORE_TYPES, + gnr_uncores); + return 0; +} + +void gnr_uncore_mmio_init(void) +{ + uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL, + UNCORE_GNR_NUM_UNCORE_TYPES, + gnr_uncores); +} + +/* end of GNR uncore support */ -- Gitee From 23fa975c93e27f0a560566540dced7c5175600a3 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Fri, 17 Nov 2023 08:39:38 -0800 Subject: [PATCH 0044/2138] perf/x86/intel/uncore: Support IIO free-running counters on GNR ANBZ: #8008 commit 388d76175bd9bbad52bbff25c88361d9e5c6615e upstream. The free-running counters for IIO uncore blocks on Granite Rapids are similar to Sapphire Rapids. The key difference is the offset of the registers. The number of the IIO uncore blocks can also be retrieved from the discovery table. Intel-SIG: commit 388d76175bd9 perf/x86/intel/uncore: Support IIO free-running counters on GNR Backport GNR/SRF uncore PMU support to kernel v6.6 Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Tested-by: Ammy Yi Link: https://lore.kernel.org/r/20231117163939.2468007-4-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2682 --- arch/x86/events/intel/uncore_snbep.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 1cf944324b86..94c903b385ae 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -6656,11 +6656,21 @@ static struct intel_uncore_type *gnr_uncores[UNCORE_GNR_NUM_UNCORE_TYPES] = { NULL, }; +static struct freerunning_counters gnr_iio_freerunning[] = { + [SPR_IIO_MSR_IOCLK] = { 0x290e, 0x01, 0x10, 1, 48 }, + [SPR_IIO_MSR_BW_IN] = { 0x360e, 0x10, 0x80, 8, 48 }, + [SPR_IIO_MSR_BW_OUT] = { 0x2e0e, 0x10, 0x80, 8, 48 }, +}; + void gnr_uncore_cpu_init(void) { - uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR, 0, NULL, + uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR, + UNCORE_SPR_MSR_EXTRA_UNCORES, + spr_msr_uncores, UNCORE_GNR_NUM_UNCORE_TYPES, gnr_uncores); + spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO); + spr_uncore_iio_free_running.freerunning = gnr_iio_freerunning; } int gnr_uncore_pci_init(void) -- Gitee From 394cdb26877850255da816653b6b7b991cca63d9 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Fri, 17 Nov 2023 08:39:39 -0800 Subject: [PATCH 0045/2138] perf/x86/intel/uncore: Support Sierra Forest and Grand Ridge ANBZ: #8008 commit cb4a6ccf35839895da63fcf6134d6fbd13224805 upstream. The same as Granite Rapids, the Sierra Forest and Grand Ridge also supports the discovery table feature and the same type of the uncore units. The difference of the available units and counters can be retrieved from the discovery table automatically. Just add the CPU model ID. Intel-SIG: commit cb4a6ccf3583 perf/x86/intel/uncore: Support Sierra Forest and Grand Ridge Backport GNR/SRF uncore PMU support to kernel v6.6 Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Tested-by: Ammy Yi Link: https://lore.kernel.org/r/20231117163939.2468007-5-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2682 --- arch/x86/events/intel/uncore.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 01c01cae82ef..4e26a28536de 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -1877,6 +1877,8 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D, &gnr_uncore_init), X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &snr_uncore_init), X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &adl_uncore_init), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, &gnr_uncore_init), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, &gnr_uncore_init), {}, }; MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match); -- Gitee From 8e38fe9cc7d19e58a504bed15a1a5b075114ac2c Mon Sep 17 00:00:00 2001 From: Jithu Joseph Date: Thu, 5 Oct 2023 12:51:30 -0700 Subject: [PATCH 0046/2138] platform/x86/intel/ifs: Refactor image loading code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8013 commit a138ac2656d1329c3994a227769b7ba3926818a7 upstream. Intel-SIG: commit a138ac2656d1 platform/x86/intel/ifs: Refactor image loading code Backport Intel In Field Scan(IFS) SAF & Array BIST support for GNR & SRF IFS image loading flow is slightly different for newer IFS generations. In preparation for adding support for newer IFS generations, refactor portions of existing image loading code for reuse. Signed-off-by: Jithu Joseph Reviewed-by: Tony Luck Reviewed-by: Ilpo Järvinen Tested-by: Pengfei Xu Link: https://lore.kernel.org/r/20231005195137.3117166-3-jithu.joseph@intel.com Signed-off-by: Ilpo Järvinen [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2688 --- drivers/platform/x86/intel/ifs/load.c | 31 ++++++++++++++++----------- 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c index 53d957d4eea4..31d4c044c2f6 100644 --- a/drivers/platform/x86/intel/ifs/load.c +++ b/drivers/platform/x86/intel/ifs/load.c @@ -80,6 +80,23 @@ static struct metadata_header *find_meta_data(void *ucode, unsigned int meta_typ return NULL; } +static void hashcopy_err_message(struct device *dev, u32 err_code) +{ + if (err_code >= ARRAY_SIZE(scan_hash_status)) + dev_err(dev, "invalid error code 0x%x for hash copy\n", err_code); + else + dev_err(dev, "Hash copy error : %s\n", scan_hash_status[err_code]); +} + +static void auth_err_message(struct device *dev, u32 err_code) +{ + if (err_code >= ARRAY_SIZE(scan_authentication_status)) + dev_err(dev, "invalid error code 0x%x for authentication\n", err_code); + else + dev_err(dev, "Chunk authentication error : %s\n", + scan_authentication_status[err_code]); +} + /* * To copy scan hashes and authenticate test chunks, the initiating cpu must point * to the EDX:EAX to the test image in linear address. @@ -109,11 +126,7 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work) if (!hashes_status.valid) { ifsd->loading_error = true; - if (err_code >= ARRAY_SIZE(scan_hash_status)) { - dev_err(dev, "invalid error code 0x%x for hash copy\n", err_code); - goto done; - } - dev_err(dev, "Hash copy error : %s", scan_hash_status[err_code]); + hashcopy_err_message(dev, err_code); goto done; } @@ -133,13 +146,7 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work) if (err_code) { ifsd->loading_error = true; - if (err_code >= ARRAY_SIZE(scan_authentication_status)) { - dev_err(dev, - "invalid error code 0x%x for authentication\n", err_code); - goto done; - } - dev_err(dev, "Chunk authentication error %s\n", - scan_authentication_status[err_code]); + auth_err_message(dev, err_code); goto done; } } -- Gitee From f12c1806df6ad0ef6efec3db1fc6fcfd034c17a3 Mon Sep 17 00:00:00 2001 From: Jithu Joseph Date: Thu, 5 Oct 2023 12:51:31 -0700 Subject: [PATCH 0047/2138] platform/x86/intel/ifs: Gen2 scan image loading MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8013 commit 07f47c01b3bc2a42c4d4da35831edab10aa60449 upstream. Intel-SIG: commit 07f47c01b3bc platform/x86/intel/ifs: Gen2 scan image loading Backport Intel In Field Scan(IFS) SAF & Array BIST support for GNR & SRF Scan image loading flow for newer IFS generations are slightly different from that of current generation. In newer schemes, loading need not be done once for each socket as was done in gen0. Also the width of NUM_CHUNKS bitfield in SCAN_HASHES_STATUS MSR has increased from 8 -> 16 bits. Similarly there are width differences for CHUNK_AUTHENTICATION_STATUS too. Further the parameter to AUTHENTICATE_AND_COPY_CHUNK is passed differently in newer generations. Signed-off-by: Jithu Joseph Reviewed-by: Tony Luck Reviewed-by: Ilpo Järvinen Tested-by: Pengfei Xu Link: https://lore.kernel.org/r/20231005195137.3117166-4-jithu.joseph@intel.com Signed-off-by: Ilpo Järvinen [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2688 --- drivers/platform/x86/intel/ifs/ifs.h | 27 +++++++ drivers/platform/x86/intel/ifs/load.c | 112 +++++++++++++++++++++++++- 2 files changed, 137 insertions(+), 2 deletions(-) diff --git a/drivers/platform/x86/intel/ifs/ifs.h b/drivers/platform/x86/intel/ifs/ifs.h index 6bc63ab70517..f0dd849b3400 100644 --- a/drivers/platform/x86/intel/ifs/ifs.h +++ b/drivers/platform/x86/intel/ifs/ifs.h @@ -137,6 +137,8 @@ #define MSR_CHUNKS_AUTHENTICATION_STATUS 0x000002c5 #define MSR_ACTIVATE_SCAN 0x000002c6 #define MSR_SCAN_STATUS 0x000002c7 +#define MSR_SAF_CTRL 0x000004f0 + #define SCAN_NOT_TESTED 0 #define SCAN_TEST_PASS 1 #define SCAN_TEST_FAIL 2 @@ -158,6 +160,19 @@ union ifs_scan_hashes_status { }; }; +union ifs_scan_hashes_status_gen2 { + u64 data; + struct { + u16 chunk_size; + u16 num_chunks; + u32 error_code :8; + u32 chunks_in_stride :9; + u32 rsvd :2; + u32 max_core_limit :12; + u32 valid :1; + }; +}; + /* MSR_CHUNKS_AUTH_STATUS bit fields */ union ifs_chunks_auth_status { u64 data; @@ -170,6 +185,16 @@ union ifs_chunks_auth_status { }; }; +union ifs_chunks_auth_status_gen2 { + u64 data; + struct { + u16 valid_chunks; + u16 total_chunks; + u32 error_code :8; + u32 rsvd2 :24; + }; +}; + /* MSR_ACTIVATE_SCAN bit fields */ union ifs_scan { u64 data; @@ -246,6 +271,7 @@ struct ifs_test_caps { * @scan_details: opaque scan status code from h/w * @cur_batch: number indicating the currently loaded test file * @generation: IFS test generation enumerated by hardware + * @chunk_size: size of a test chunk */ struct ifs_data { int loaded_version; @@ -256,6 +282,7 @@ struct ifs_data { u64 scan_details; u32 cur_batch; u32 generation; + u32 chunk_size; }; struct ifs_work { diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c index 31d4c044c2f6..538cff1d5d3e 100644 --- a/drivers/platform/x86/intel/ifs/load.c +++ b/drivers/platform/x86/intel/ifs/load.c @@ -2,6 +2,7 @@ /* Copyright(c) 2022 Intel Corporation. */ #include +#include #include #include @@ -26,6 +27,11 @@ union meta_data { #define IFS_HEADER_SIZE (sizeof(struct microcode_header_intel)) #define META_TYPE_IFS 1 +#define INVALIDATE_STRIDE 0x1UL +#define IFS_GEN_STRIDE_AWARE 2 +#define AUTH_INTERRUPTED_ERROR 5 +#define IFS_AUTH_RETRY_CT 10 + static struct microcode_header_intel *ifs_header_ptr; /* pointer to the ifs image header */ static u64 ifs_hash_ptr; /* Address of ifs metadata (hash) */ static u64 ifs_test_image_ptr; /* 256B aligned address of test pattern */ @@ -44,7 +50,10 @@ static const char * const scan_hash_status[] = { static const char * const scan_authentication_status[] = { [0] = "No error reported", [1] = "Attempt to authenticate a chunk which is already marked as authentic", - [2] = "Chunk authentication error. The hash of chunk did not match expected value" + [2] = "Chunk authentication error. The hash of chunk did not match expected value", + [3] = "Reserved", + [4] = "Chunk outside the current stride", + [5] = "Authentication flow interrupted", }; #define MC_HEADER_META_TYPE_END (0) @@ -154,6 +163,102 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work) complete(&ifs_done); } +static int get_num_chunks(int gen, union ifs_scan_hashes_status_gen2 status) +{ + return gen >= IFS_GEN_STRIDE_AWARE ? status.chunks_in_stride : status.num_chunks; +} + +static bool need_copy_scan_hashes(struct ifs_data *ifsd) +{ + return !ifsd->loaded || + ifsd->generation < IFS_GEN_STRIDE_AWARE || + ifsd->loaded_version != ifs_header_ptr->rev; +} + +static int copy_hashes_authenticate_chunks_gen2(struct device *dev) +{ + union ifs_scan_hashes_status_gen2 hashes_status; + union ifs_chunks_auth_status_gen2 chunk_status; + u32 err_code, valid_chunks, total_chunks; + int i, num_chunks, chunk_size; + union meta_data *ifs_meta; + int starting_chunk_nr; + struct ifs_data *ifsd; + u64 linear_addr, base; + u64 chunk_table[2]; + int retry_count; + + ifsd = ifs_get_data(dev); + + if (need_copy_scan_hashes(ifsd)) { + wrmsrl(MSR_COPY_SCAN_HASHES, ifs_hash_ptr); + rdmsrl(MSR_SCAN_HASHES_STATUS, hashes_status.data); + + /* enumerate the scan image information */ + chunk_size = hashes_status.chunk_size * SZ_1K; + err_code = hashes_status.error_code; + + num_chunks = get_num_chunks(ifsd->generation, hashes_status); + + if (!hashes_status.valid) { + hashcopy_err_message(dev, err_code); + return -EIO; + } + ifsd->loaded_version = ifs_header_ptr->rev; + ifsd->chunk_size = chunk_size; + } else { + num_chunks = ifsd->valid_chunks; + chunk_size = ifsd->chunk_size; + } + + if (ifsd->generation >= IFS_GEN_STRIDE_AWARE) { + wrmsrl(MSR_SAF_CTRL, INVALIDATE_STRIDE); + rdmsrl(MSR_CHUNKS_AUTHENTICATION_STATUS, chunk_status.data); + if (chunk_status.valid_chunks != 0) { + dev_err(dev, "Couldn't invalidate installed stride - %d\n", + chunk_status.valid_chunks); + return -EIO; + } + } + + base = ifs_test_image_ptr; + ifs_meta = (union meta_data *)find_meta_data(ifs_header_ptr, META_TYPE_IFS); + starting_chunk_nr = ifs_meta->starting_chunk; + + /* scan data authentication and copy chunks to secured memory */ + for (i = 0; i < num_chunks; i++) { + retry_count = IFS_AUTH_RETRY_CT; + linear_addr = base + i * chunk_size; + + chunk_table[0] = starting_chunk_nr + i; + chunk_table[1] = linear_addr; + do { + wrmsrl(MSR_AUTHENTICATE_AND_COPY_CHUNK, (u64)chunk_table); + rdmsrl(MSR_CHUNKS_AUTHENTICATION_STATUS, chunk_status.data); + err_code = chunk_status.error_code; + } while (err_code == AUTH_INTERRUPTED_ERROR && --retry_count); + + if (err_code) { + ifsd->loading_error = true; + auth_err_message(dev, err_code); + return -EIO; + } + } + + valid_chunks = chunk_status.valid_chunks; + total_chunks = chunk_status.total_chunks; + + if (valid_chunks != total_chunks) { + ifsd->loading_error = true; + dev_err(dev, "Couldn't authenticate all the chunks. Authenticated %d total %d.\n", + valid_chunks, total_chunks); + return -EIO; + } + ifsd->valid_chunks = valid_chunks; + + return 0; +} + static int validate_ifs_metadata(struct device *dev) { struct ifs_data *ifsd = ifs_get_data(dev); @@ -206,7 +311,9 @@ static int scan_chunks_sanity_check(struct device *dev) return ret; ifsd->loading_error = false; - ifsd->loaded_version = ifs_header_ptr->rev; + + if (ifsd->generation > 0) + return copy_hashes_authenticate_chunks_gen2(dev); /* copy the scan hash and authenticate per package */ cpus_read_lock(); @@ -226,6 +333,7 @@ static int scan_chunks_sanity_check(struct device *dev) ifs_pkg_auth[curr_pkg] = 1; } ret = 0; + ifsd->loaded_version = ifs_header_ptr->rev; out: cpus_read_unlock(); -- Gitee From 65669f3f26ac810a8eb5fa3acebddff9cfcc80a4 Mon Sep 17 00:00:00 2001 From: Jithu Joseph Date: Thu, 5 Oct 2023 12:51:34 -0700 Subject: [PATCH 0048/2138] platform/x86/intel/ifs: Metadata validation for start_chunk MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8013 commit 60d2e1b37d530d6b1f8b7773cebaf8bbc1536b28 upstream. Intel-SIG: commit 60d2e1b37d53 platform/x86/intel/ifs: Metadata validation for start_chunk Backport Intel In Field Scan(IFS) SAF & Array BIST support for GNR & SRF Add an additional check to validate IFS image metadata field prior to loading the test image. If start_chunk is not a multiple of chunks_per_stride error out. Signed-off-by: Jithu Joseph Reviewed-by: Tony Luck Tested-by: Pengfei Xu Link: https://lore.kernel.org/r/20231005195137.3117166-7-jithu.joseph@intel.com Reviewed-by: Ilpo Järvinen Signed-off-by: Ilpo Järvinen [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2688 --- drivers/platform/x86/intel/ifs/load.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c index 538cff1d5d3e..cf156c4a8024 100644 --- a/drivers/platform/x86/intel/ifs/load.c +++ b/drivers/platform/x86/intel/ifs/load.c @@ -291,6 +291,13 @@ static int validate_ifs_metadata(struct device *dev) return ret; } + if (ifs_meta->chunks_per_stride && + (ifs_meta->starting_chunk % ifs_meta->chunks_per_stride != 0)) { + dev_warn(dev, "Starting chunk num %u not a multiple of chunks_per_stride %u\n", + ifs_meta->starting_chunk, ifs_meta->chunks_per_stride); + return ret; + } + return 0; } -- Gitee From a3b17c0a0ac7a1c49a0d882bb97d241acda2752e Mon Sep 17 00:00:00 2001 From: Jithu Joseph Date: Thu, 5 Oct 2023 12:51:35 -0700 Subject: [PATCH 0049/2138] platform/x86/intel/ifs: Add new CPU support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8013 commit e6483a0b59026ded36a6f5eba1425a6b0965984a upstream. Intel-SIG: commit e6483a0b5902 platform/x86/intel/ifs: Add new CPU support Backport Intel In Field Scan(IFS) SAF & Array BIST support for GNR & SRF Add Granite Rapids(GNR) and Sierra Forest(SRF) cpuids to x86 match table so that IFS driver can be loaded for those. Signed-off-by: Jithu Joseph Reviewed-by: Tony Luck Reviewed-by: Ilpo Järvinen Tested-by: Pengfei Xu Link: https://lore.kernel.org/r/20231005195137.3117166-8-jithu.joseph@intel.com Signed-off-by: Ilpo Järvinen [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2688 --- drivers/platform/x86/intel/ifs/core.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/platform/x86/intel/ifs/core.c b/drivers/platform/x86/intel/ifs/core.c index 4ff2aa4b484b..0c8927916373 100644 --- a/drivers/platform/x86/intel/ifs/core.c +++ b/drivers/platform/x86/intel/ifs/core.c @@ -18,6 +18,9 @@ static const struct x86_cpu_id ifs_cpu_ids[] __initconst = { X86_MATCH(SAPPHIRERAPIDS_X), X86_MATCH(EMERALDRAPIDS_X), + X86_MATCH(GRANITERAPIDS_X), + X86_MATCH(GRANITERAPIDS_D), + X86_MATCH(ATOM_CRESTMONT_X), {} }; MODULE_DEVICE_TABLE(x86cpu, ifs_cpu_ids); -- Gitee From 4a38ecc03edf2765a5be50746118e392424b8ac3 Mon Sep 17 00:00:00 2001 From: Jithu Joseph Date: Thu, 5 Oct 2023 12:51:36 -0700 Subject: [PATCH 0050/2138] platform/x86/intel/ifs: Add new error code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8013 commit b9aa9e4c8b4e52b6f2f5986b27e97f4b6163f0bf upstream. Intel-SIG: commit b9aa9e4c8b4e platform/x86/intel/ifs: Add new error code Backport Intel In Field Scan(IFS) SAF & Array BIST support for GNR & SRF Make driver aware of a newly added error code so that it can provide a more appropriate error message. Signed-off-by: Jithu Joseph Reviewed-by: Tony Luck Reviewed-by: Ilpo Järvinen Tested-by: Pengfei Xu Link: https://lore.kernel.org/r/20231005195137.3117166-9-jithu.joseph@intel.com Signed-off-by: Ilpo Järvinen [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2688 --- drivers/platform/x86/intel/ifs/runtest.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c index c7a5bf24bef3..5aa76bf25b3e 100644 --- a/drivers/platform/x86/intel/ifs/runtest.c +++ b/drivers/platform/x86/intel/ifs/runtest.c @@ -40,6 +40,8 @@ enum ifs_status_err_code { IFS_UNASSIGNED_ERROR_CODE = 7, IFS_EXCEED_NUMBER_OF_THREADS_CONCURRENT = 8, IFS_INTERRUPTED_DURING_EXECUTION = 9, + IFS_UNASSIGNED_ERROR_CODE_0xA = 0xA, + IFS_CORRUPTED_CHUNK = 0xB, }; static const char * const scan_test_status[] = { @@ -55,6 +57,8 @@ static const char * const scan_test_status[] = { [IFS_EXCEED_NUMBER_OF_THREADS_CONCURRENT] = "Exceeded number of Logical Processors (LP) allowed to run Scan-At-Field concurrently", [IFS_INTERRUPTED_DURING_EXECUTION] = "Interrupt occurred prior to SCAN start", + [IFS_UNASSIGNED_ERROR_CODE_0xA] = "Unassigned error code 0xA", + [IFS_CORRUPTED_CHUNK] = "Scan operation aborted due to corrupted image. Try reloading", }; static void message_not_tested(struct device *dev, int cpu, union ifs_status status) @@ -123,6 +127,8 @@ static bool can_restart(union ifs_status status) case IFS_MISMATCH_ARGUMENTS_BETWEEN_THREADS: case IFS_CORE_NOT_CAPABLE_CURRENTLY: case IFS_UNASSIGNED_ERROR_CODE: + case IFS_UNASSIGNED_ERROR_CODE_0xA: + case IFS_CORRUPTED_CHUNK: break; } return false; -- Gitee From 321c2316e7d12551e9a8f2e774ccbf3de6b3dcc6 Mon Sep 17 00:00:00 2001 From: Jithu Joseph Date: Thu, 5 Oct 2023 12:51:37 -0700 Subject: [PATCH 0051/2138] platform/x86/intel/ifs: ARRAY BIST for Sierra Forest MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8013 commit 06d65b2bc532fc9af1c55aa7a18cfd237ce46588 upstream. Intel-SIG: commit 06d65b2bc532 platform/x86/intel/ifs: ARRAY BIST for Sierra Forest Backport Intel In Field Scan(IFS) SAF & Array BIST support for GNR & SRF Array BIST MSR addresses, bit definition and semantics are different for Sierra Forest. Branch into a separate Array BIST flow on Sierra Forest when user invokes Array Test. Signed-off-by: Jithu Joseph Reviewed-by: Tony Luck Tested-by: Pengfei Xu Link: https://lore.kernel.org/r/20231005195137.3117166-10-jithu.joseph@intel.com [ij: ARRAY_GEN_* -> ARRAY_GEN* for consistency] Reviewed-by: Ilpo Järvinen Signed-off-by: Ilpo Järvinen [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2688 --- drivers/platform/x86/intel/ifs/core.c | 15 +++++----- drivers/platform/x86/intel/ifs/ifs.h | 7 +++++ drivers/platform/x86/intel/ifs/runtest.c | 37 +++++++++++++++++++++++- 3 files changed, 51 insertions(+), 8 deletions(-) diff --git a/drivers/platform/x86/intel/ifs/core.c b/drivers/platform/x86/intel/ifs/core.c index 0c8927916373..7b11198d85a1 100644 --- a/drivers/platform/x86/intel/ifs/core.c +++ b/drivers/platform/x86/intel/ifs/core.c @@ -11,16 +11,16 @@ #include "ifs.h" -#define X86_MATCH(model) \ +#define X86_MATCH(model, array_gen) \ X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, \ - INTEL_FAM6_##model, X86_FEATURE_CORE_CAPABILITIES, NULL) + INTEL_FAM6_##model, X86_FEATURE_CORE_CAPABILITIES, array_gen) static const struct x86_cpu_id ifs_cpu_ids[] __initconst = { - X86_MATCH(SAPPHIRERAPIDS_X), - X86_MATCH(EMERALDRAPIDS_X), - X86_MATCH(GRANITERAPIDS_X), - X86_MATCH(GRANITERAPIDS_D), - X86_MATCH(ATOM_CRESTMONT_X), + X86_MATCH(SAPPHIRERAPIDS_X, ARRAY_GEN0), + X86_MATCH(EMERALDRAPIDS_X, ARRAY_GEN0), + X86_MATCH(GRANITERAPIDS_X, ARRAY_GEN0), + X86_MATCH(GRANITERAPIDS_D, ARRAY_GEN0), + X86_MATCH(ATOM_CRESTMONT_X, ARRAY_GEN1), {} }; MODULE_DEVICE_TABLE(x86cpu, ifs_cpu_ids); @@ -100,6 +100,7 @@ static int __init ifs_init(void) continue; ifs_devices[i].rw_data.generation = FIELD_GET(MSR_INTEGRITY_CAPS_SAF_GEN_MASK, msrval); + ifs_devices[i].rw_data.array_gen = (u32)m->driver_data; ret = misc_register(&ifs_devices[i].misc); if (ret) goto err_exit; diff --git a/drivers/platform/x86/intel/ifs/ifs.h b/drivers/platform/x86/intel/ifs/ifs.h index f0dd849b3400..56b9f3e3cf76 100644 --- a/drivers/platform/x86/intel/ifs/ifs.h +++ b/drivers/platform/x86/intel/ifs/ifs.h @@ -137,6 +137,8 @@ #define MSR_CHUNKS_AUTHENTICATION_STATUS 0x000002c5 #define MSR_ACTIVATE_SCAN 0x000002c6 #define MSR_SCAN_STATUS 0x000002c7 +#define MSR_ARRAY_TRIGGER 0x000002d6 +#define MSR_ARRAY_STATUS 0x000002d7 #define MSR_SAF_CTRL 0x000004f0 #define SCAN_NOT_TESTED 0 @@ -146,6 +148,9 @@ #define IFS_TYPE_SAF 0 #define IFS_TYPE_ARRAY_BIST 1 +#define ARRAY_GEN0 0 +#define ARRAY_GEN1 1 + /* MSR_SCAN_HASHES_STATUS bit fields */ union ifs_scan_hashes_status { u64 data; @@ -272,6 +277,7 @@ struct ifs_test_caps { * @cur_batch: number indicating the currently loaded test file * @generation: IFS test generation enumerated by hardware * @chunk_size: size of a test chunk + * @array_gen: test generation of array test */ struct ifs_data { int loaded_version; @@ -283,6 +289,7 @@ struct ifs_data { u32 cur_batch; u32 generation; u32 chunk_size; + u32 array_gen; }; struct ifs_work { diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c index 5aa76bf25b3e..e9d7a8c84e05 100644 --- a/drivers/platform/x86/intel/ifs/runtest.c +++ b/drivers/platform/x86/intel/ifs/runtest.c @@ -329,6 +329,38 @@ static void ifs_array_test_core(int cpu, struct device *dev) ifsd->status = SCAN_TEST_PASS; } +#define ARRAY_GEN1_TEST_ALL_ARRAYS 0x0ULL +#define ARRAY_GEN1_STATUS_FAIL 0x1ULL + +static int do_array_test_gen1(void *status) +{ + int cpu = smp_processor_id(); + int first; + + first = cpumask_first(cpu_smt_mask(cpu)); + + if (cpu == first) { + wrmsrl(MSR_ARRAY_TRIGGER, ARRAY_GEN1_TEST_ALL_ARRAYS); + rdmsrl(MSR_ARRAY_STATUS, *((u64 *)status)); + } + + return 0; +} + +static void ifs_array_test_gen1(int cpu, struct device *dev) +{ + struct ifs_data *ifsd = ifs_get_data(dev); + u64 status = 0; + + stop_core_cpuslocked(cpu, do_array_test_gen1, &status); + ifsd->scan_details = status; + + if (status & ARRAY_GEN1_STATUS_FAIL) + ifsd->status = SCAN_TEST_FAIL; + else + ifsd->status = SCAN_TEST_PASS; +} + /* * Initiate per core test. It wakes up work queue threads on the target cpu and * its sibling cpu. Once all sibling threads wake up, the scan test gets executed and @@ -357,7 +389,10 @@ int do_core_test(int cpu, struct device *dev) ifs_test_core(cpu, dev); break; case IFS_TYPE_ARRAY_BIST: - ifs_array_test_core(cpu, dev); + if (ifsd->array_gen == ARRAY_GEN0) + ifs_array_test_core(cpu, dev); + else + ifs_array_test_gen1(cpu, dev); break; default: ret = -EINVAL; -- Gitee From 951136e7f098b5b2ea9c53e815348aa5034756b2 Mon Sep 17 00:00:00 2001 From: Aichun Shi Date: Wed, 24 Jan 2024 16:10:22 +0800 Subject: [PATCH 0052/2138] x86: configs: Add Intel In Field Scan(IFS) kernel config ANBZ: #8013 Intel-SIG: no upstream x86: configs: Add Intel In Field Scan(IFS) kernel config Backport Intel In Field Scan(IFS) SAF & Array BIST support for GNR & SRF Signed-off-by: Aichun Shi Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2688 --- arch/x86/configs/anolis-debug_defconfig | 2 +- arch/x86/configs/anolis_defconfig | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index 714c90743cba..a61c59967e02 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -5695,7 +5695,7 @@ CONFIG_THINKPAD_ACPI_VIDEO=y CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y # CONFIG_THINKPAD_LMI is not set # CONFIG_INTEL_ATOMISP2_PM is not set -# CONFIG_INTEL_IFS is not set +CONFIG_INTEL_IFS=m # CONFIG_INTEL_SAR_INT1092 is not set CONFIG_INTEL_PMC_CORE=m diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 4fb6c9d9d8b7..937a54d025e9 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -5688,7 +5688,7 @@ CONFIG_THINKPAD_ACPI_VIDEO=y CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y # CONFIG_THINKPAD_LMI is not set # CONFIG_INTEL_ATOMISP2_PM is not set -# CONFIG_INTEL_IFS is not set +CONFIG_INTEL_IFS=m # CONFIG_INTEL_SAR_INT1092 is not set CONFIG_INTEL_PMC_CORE=m -- Gitee From 4cf6acecadbdb36de5ec735980436a0faeeec650 Mon Sep 17 00:00:00 2001 From: Serge Hallyn Date: Tue, 5 Jan 2016 20:12:21 +0000 Subject: [PATCH 0053/2138] anolis: userns: add a sysctl to disable unprivileged user namespace unsharing ANBZ: #8322 commit 5758824b20fa2308ebb5c460874d0ffd73d0d8e4 Ubuntu groovy. It is turned on by default, but can be turned off if admins prefer or, more importantly, if a security vulnerability is found. The intent is to use this as mitigation so long as Ubuntu is on the cutting edge of enablement for things like unprivileged filesystem mounting. (This patch is tweaked from the one currently still in Debian sid, which in turn came from the patch we had in saucy) Signed-off-by: Serge Hallyn [bwh: Remove unneeded binary sysctl bits] [ saf: move extern unprivileged_userns_clone declaration to include/linux/user_namespace.h to conform with 2374c09b1c8a "sysctl: remove all extern declaration from sysctl.c" ] Signed-off-by: Tim Gardner [jingbo: add documentation for the sysctl] Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2781 --- Documentation/admin-guide/sysctl/kernel.rst | 10 ++++++++++ kernel/fork.c | 15 +++++++++++++++ kernel/sysctl.c | 12 ++++++++++++ kernel/user_namespace.c | 6 ++++++ 4 files changed, 43 insertions(+) diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst index cf33de56da27..4408fe2f97e7 100644 --- a/Documentation/admin-guide/sysctl/kernel.rst +++ b/Documentation/admin-guide/sysctl/kernel.rst @@ -1603,6 +1603,16 @@ entry will default to 2 instead of 0. = ============================================================= +unprivileged_userns_clone +========================= + +This value controls if unprivileged users could unshare a new user +namespace. When the value is zero, unprivileged users are not allowed +to unshare a new user namespace. Privileged users (with CAP_SYS_ADMIN) +are not affected and are always capable of unsharing a new user +namespace. + + warn_limit ========== diff --git a/kernel/fork.c b/kernel/fork.c index 23efaa2c42e4..feedc398e854 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -111,6 +111,11 @@ #define CREATE_TRACE_POINTS #include +#ifdef CONFIG_USER_NS +extern int unprivileged_userns_clone; +#else +#define unprivileged_userns_clone 0 +#endif /* * Minimum number of threads to boot the kernel @@ -2259,6 +2264,10 @@ __latent_entropy struct task_struct *copy_process( if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) return ERR_PTR(-EINVAL); + if ((clone_flags & CLONE_NEWUSER) && !unprivileged_userns_clone) + if (!capable(CAP_SYS_ADMIN)) + return ERR_PTR(-EPERM); + /* * Thread groups must share signals as well, and detached threads * can only be started up within the thread group. @@ -3412,6 +3421,12 @@ int ksys_unshare(unsigned long unshare_flags) if (unshare_flags & CLONE_NEWNS) unshare_flags |= CLONE_FS; + if ((unshare_flags & CLONE_NEWUSER) && !unprivileged_userns_clone) { + err = -EPERM; + if (!capable(CAP_SYS_ADMIN)) + goto bad_unshare_out; + } + err = check_unshare_flags(unshare_flags); if (err) goto bad_unshare_out; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 354a2d294f52..ecb22a814e16 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -96,6 +96,9 @@ EXPORT_SYMBOL_GPL(sysctl_long_vals); static const int six_hundred_forty_kb = 640 * 1024; #endif +#ifdef CONFIG_USER_NS +extern int unprivileged_userns_clone; +#endif static const int ngroups_max = NGROUPS_MAX; static const int cap_last_cap = CAP_LAST_CAP; @@ -2042,6 +2045,15 @@ static struct ctl_table kern_table[] = { .extra1 = SYSCTL_ONE, .extra2 = SYSCTL_INT_MAX, }, +#endif +#ifdef CONFIG_USER_NS + { + .procname = "unprivileged_userns_clone", + .data = &unprivileged_userns_clone, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, #endif { } }; diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index 1d8e47bed3f1..a470898ff62b 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -22,6 +22,12 @@ #include #include +/* + * sysctl determining whether unprivileged users may unshare a new + * userns. Allowed by default + */ +int unprivileged_userns_clone = 1; + static struct kmem_cache *user_ns_cachep __read_mostly; static DEFINE_MUTEX(userns_state_mutex); -- Gitee From cd2ecc033d6334985d5e2301674cfeeef061d961 Mon Sep 17 00:00:00 2001 From: Jiang Liu Date: Sun, 24 Apr 2022 09:54:17 +0800 Subject: [PATCH 0054/2138] anolis: userns: add a sysctl to control the max depth ANBZ: #8322 Add "sysctl.kernel.userns_max_level" to control the maximum nested level of user namespace. The valid configuration values are 0-33. When configured to zero, user namespace is effectively disabled. Originally the check is "if (parent_ns->level > 32)" and init_user_ns.level is zero, so the actually maximum level is 33 instead of 32. Signed-off-by: Jiang Liu Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2781 --- Documentation/admin-guide/sysctl/kernel.rst | 8 ++++++++ kernel/sysctl.c | 11 +++++++++++ kernel/user_namespace.c | 9 ++++++++- 3 files changed, 27 insertions(+), 1 deletion(-) diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst index 4408fe2f97e7..af3fe24e938e 100644 --- a/Documentation/admin-guide/sysctl/kernel.rst +++ b/Documentation/admin-guide/sysctl/kernel.rst @@ -1613,6 +1613,14 @@ are not affected and are always capable of unsharing a new user namespace. +userns_max_level +================ + +This value indicates the maximum nested level of user namespace. The +valid configuration values are 0-33. When configured to zero, user +namespace is effectively disabled. + + warn_limit ========== diff --git a/kernel/sysctl.c b/kernel/sysctl.c index ecb22a814e16..0c11d319fa01 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -98,6 +98,8 @@ static const int six_hundred_forty_kb = 640 * 1024; #ifdef CONFIG_USER_NS extern int unprivileged_userns_clone; +extern int userns_max_level; +extern int userns_max_level_max; #endif static const int ngroups_max = NGROUPS_MAX; @@ -2054,6 +2056,15 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "userns_max_level", + .data = &userns_max_level, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = &userns_max_level_max, + }, #endif { } }; diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index a470898ff62b..8846049c8fa3 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -28,6 +28,13 @@ */ int unprivileged_userns_clone = 1; +/* + * sysctl determining the maximum of nested level. + * Default to 33 to keep compatible with upstream. + */ +int userns_max_level = 33; +int userns_max_level_max = 33; + static struct kmem_cache *user_ns_cachep __read_mostly; static DEFINE_MUTEX(userns_state_mutex); @@ -94,7 +101,7 @@ int create_user_ns(struct cred *new) int ret, i; ret = -ENOSPC; - if (parent_ns->level > 32) + if (parent_ns->level >= userns_max_level) goto fail; ucounts = inc_user_namespaces(parent_ns, owner); -- Gitee From 2ae49ba0c93c97c0f659ce4498f04fb46841a995 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 22 Jan 2024 12:28:50 +0800 Subject: [PATCH 0055/2138] x86/microcode/amd: Fix snprintf() format string warning in W=1 build MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8003 commit 2e9064faccd1a5b9de8c6f4b23d9f4948901cbe9 upstream. Building with GCC 11.x results in the following warning: arch/x86/kernel/cpu/microcode/amd.c: In function ‘find_blobs_in_containers’: arch/x86/kernel/cpu/microcode/amd.c:504:58: error: ‘h.bin’ directive output may be truncated writing 5 bytes into a region of size between 1 and 7 [-Werror=format-truncation=] arch/x86/kernel/cpu/microcode/amd.c:503:17: note: ‘snprintf’ output between 35 and 41 bytes into a destination of size 36 The issue is that GCC does not know that the family can only be a byte (it ultimately comes from CPUID). Suggest the right size to the compiler by marking the argument as char-size ("hh"). While at it, instead of using the slightly more obscure precision specifier use the width with zero padding (over 23000 occurrences in kernel sources, vs 500 for the idiom using the precision). Intel-SIG: commit 2e9064faccd1 x86/microcode/amd: Fix snprintf() format string warning in W=1 build. Microcode restructuring backport. Reported-by: kernel test robot Signed-off-by: Paolo Bonzini Signed-off-by: Ingo Molnar Signed-off-by: Borislav Petkov (AMD) Closes: https://lore.kernel.org/oe-kbuild-all/202308252255.2HPJ6x5Q-lkp@intel.com/ Link: https://lore.kernel.org/r/20231016224858.2829248-1-pbonzini@redhat.com [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/amd.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 296b1f327d24..e884a7894be8 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -501,10 +501,10 @@ static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) if (x86_cpuid_vendor() == X86_VENDOR_AMD && family >= 0x15) snprintf(fw_name, sizeof(fw_name), - "amd-ucode/microcode_amd_fam%.2xh.bin", family); + "amd-ucode/microcode_amd_fam%02hhxh.bin", family); else if (x86_cpuid_vendor() == X86_VENDOR_HYGON) snprintf(fw_name, sizeof(fw_name), - "hygon-ucode/microcode_hygon_fam%.2xh.bin", family); + "hygon-ucode/microcode_hygon_fam%02hhxh.bin", family); if (firmware_request_builtin(&fw, fw_name)) { cp->size = fw.size; -- Gitee From 67ccafe617c1cd50c763ed4423b5b8ee86d5f5fe Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 13:59:36 +0200 Subject: [PATCH 0056/2138] x86/boot/32: Disable stackprotector and tracing for mk_early_pgtbl_32() ANBZ: #8003 commit 242db7589460ca94e28c51ffbddd621756f97e11 upstream. Stackprotector cannot work before paging is enabled. The read from the per CPU variable __stack_chk_guard is always accessing the virtual address either directly on UP or via FS on SMP. In physical address mode this results in an access to memory above 3GB. So this works by chance as the hardware returns the same value when there is no RAM at this physical address. When there is RAM populated above 3G then the read is by chance the same as nothing changes that memory during the very early boot stage. Stop relying on pure luck and disable the stack protector for the only C function which is called during early boot before paging is enabled. Remove function tracing from the whole source file as there is no way to trace this at all, but in case of CONFIG_DYNAMIC_FTRACE=n mk_early_pgtbl_32() would access global function tracer variables in physical address mode which again might work by chance. Intel-SIG: commit 242db7589460 x86/boot/32: Disable stackprotector and tracing for mk_early_pgtbl_32(). Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115902.156063939@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/Makefile | 1 + arch/x86/kernel/head32.c | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 3269a0e23d3a..0000325ab98f 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -16,6 +16,7 @@ CFLAGS_REMOVE_kvmclock.o = -pg CFLAGS_REMOVE_ftrace.o = -pg CFLAGS_REMOVE_early_printk.o = -pg CFLAGS_REMOVE_head64.o = -pg +CFLAGS_REMOVE_head32.o = -pg CFLAGS_REMOVE_sev.o = -pg CFLAGS_REMOVE_rethook.o = -pg endif diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index 246a609f889b..bf678d6f4359 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c @@ -70,7 +70,8 @@ asmlinkage __visible void __init __noreturn i386_start_kernel(void) * always zero at this stage. */ void __init mk_early_pgtbl_32(void); -void __init mk_early_pgtbl_32(void) + +void __init __no_stack_protector mk_early_pgtbl_32(void) { #ifdef __pa #undef __pa -- Gitee From fb269ddf44ba79ebfff012dc38f54dadb046a8b7 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Oct 2023 23:23:25 +0200 Subject: [PATCH 0057/2138] x86/boot: Use __pa_nodebug() in mk_early_pgtbl_32() ANBZ: #8003 commit 1e2dd572d2b773b5b8882aae66e5f0328d562aa9 upstream. Use the existing macro instead of undefining and redefining __pa(). No functional change. Intel-SIG: commit 1e2dd572d2b7 x86/boot: Use __pa_nodebug() in mk_early_pgtbl_32(). Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231017211722.051625827@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/head32.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index bf678d6f4359..8fe0dd38fff0 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c @@ -73,25 +73,21 @@ void __init mk_early_pgtbl_32(void); void __init __no_stack_protector mk_early_pgtbl_32(void) { -#ifdef __pa -#undef __pa -#endif -#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) pte_t pte, *ptep; int i; unsigned long *ptr; /* Enough space to fit pagetables for the low memory linear map */ - const unsigned long limit = __pa(_end) + + const unsigned long limit = __pa_nodebug(_end) + (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT); #ifdef CONFIG_X86_PAE - pmd_t pl2, *pl2p = (pmd_t *)__pa(initial_pg_pmd); + pmd_t pl2, *pl2p = (pmd_t *)__pa_nodebug(initial_pg_pmd); #define SET_PL2(pl2, val) { (pl2).pmd = (val); } #else - pgd_t pl2, *pl2p = (pgd_t *)__pa(initial_page_table); + pgd_t pl2, *pl2p = (pgd_t *)__pa_nodebug(initial_page_table); #define SET_PL2(pl2, val) { (pl2).pgd = (val); } #endif - ptep = (pte_t *)__pa(__brk_base); + ptep = (pte_t *)__pa_nodebug(__brk_base); pte.pte = PTE_IDENT_ATTR; while ((pte.pte & PTE_PFN_MASK) < limit) { @@ -111,11 +107,11 @@ void __init __no_stack_protector mk_early_pgtbl_32(void) pl2p++; } - ptr = (unsigned long *)__pa(&max_pfn_mapped); + ptr = (unsigned long *)__pa_nodebug(&max_pfn_mapped); /* Can't use pte_pfn() since it's a call with CONFIG_PARAVIRT */ *ptr = (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT; - ptr = (unsigned long *)__pa(&_brk_end); + ptr = (unsigned long *)__pa_nodebug(&_brk_end); *ptr = (unsigned long)ptep + PAGE_OFFSET; } -- Gitee From c8bf4700080fa59a6f9853a0459ccf0cc7b7c6cf Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Oct 2023 23:23:26 +0200 Subject: [PATCH 0058/2138] x86/boot/32: De-uglify the 2/3 level paging difference in mk_early_pgtbl_32() ANBZ: #8003 commit a62f4ca106fd250e9247decd100f3905131fc1fe upstream. Move the ifdeffery out of the function and use proper typedefs to make it work for both 2 and 3 level paging. No functional change. [ bp: Move mk_early_pgtbl_32() declaration into a header. ] Intel-SIG: commit a62f4ca106fd x86/boot/32: De-uglify the 2/3 level paging difference in mk_early_pgtbl_32(). Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231017211722.111059491@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/include/asm/setup.h | 1 + arch/x86/kernel/head32.c | 38 +++++++++++++++++++----------------- 2 files changed, 21 insertions(+), 18 deletions(-) diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index f3495623ac99..bf483fcb4e57 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -126,6 +126,7 @@ void clear_bss(void); #ifdef __i386__ asmlinkage void __init __noreturn i386_start_kernel(void); +void __init mk_early_pgtbl_32(void); #else asmlinkage void __init __noreturn x86_64_start_kernel(char *real_mode); diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index 8fe0dd38fff0..2b6599807026 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c @@ -69,41 +69,43 @@ asmlinkage __visible void __init __noreturn i386_start_kernel(void) * to the first kernel PMD. Note the upper half of each PMD or PTE are * always zero at this stage. */ -void __init mk_early_pgtbl_32(void); +#ifdef CONFIG_X86_PAE +typedef pmd_t pl2_t; +#define pl2_base initial_pg_pmd +#define SET_PL2(val) { .pmd = (val), } +#else +typedef pgd_t pl2_t; +#define pl2_base initial_page_table +#define SET_PL2(val) { .pgd = (val), } +#endif void __init __no_stack_protector mk_early_pgtbl_32(void) { - pte_t pte, *ptep; - int i; - unsigned long *ptr; /* Enough space to fit pagetables for the low memory linear map */ const unsigned long limit = __pa_nodebug(_end) + (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT); -#ifdef CONFIG_X86_PAE - pmd_t pl2, *pl2p = (pmd_t *)__pa_nodebug(initial_pg_pmd); -#define SET_PL2(pl2, val) { (pl2).pmd = (val); } -#else - pgd_t pl2, *pl2p = (pgd_t *)__pa_nodebug(initial_page_table); -#define SET_PL2(pl2, val) { (pl2).pgd = (val); } -#endif + pte_t pte, *ptep = (pte_t *)__pa_nodebug(__brk_base); + pl2_t *pl2p = (pl2_t *)__pa_nodebug(pl2_base); + unsigned long *ptr; + int i; - ptep = (pte_t *)__pa_nodebug(__brk_base); pte.pte = PTE_IDENT_ATTR; while ((pte.pte & PTE_PFN_MASK) < limit) { + pl2_t pl2 = SET_PL2((unsigned long)ptep | PDE_IDENT_ATTR); - SET_PL2(pl2, (unsigned long)ptep | PDE_IDENT_ATTR); *pl2p = pl2; -#ifndef CONFIG_X86_PAE - /* Kernel PDE entry */ - *(pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = pl2; -#endif + + if (!IS_ENABLED(CONFIG_X86_PAE)) { + /* Kernel PDE entry */ + *(pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = pl2; + } + for (i = 0; i < PTRS_PER_PTE; i++) { *ptep = pte; pte.pte += PAGE_SIZE; ptep++; } - pl2p++; } -- Gitee From 7344bfaa481476f22153d4ee4856f7e06e3d7259 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Oct 2023 23:23:28 +0200 Subject: [PATCH 0059/2138] x86/boot/32: Restructure mk_early_pgtbl_32() ANBZ: #8003 commit 69ba866db281c768d5ecca909361ea4c4e71d57e upstream. Prepare it for adding a temporary initrd mapping by splitting out the actual map loop. No functional change. Intel-SIG: commit 69ba866db281 x86/boot/32: Restructure mk_early_pgtbl_32(). Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231017211722.175910753@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/head32.c | 42 ++++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 19 deletions(-) diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index 2b6599807026..bdce6321fabd 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c @@ -79,35 +79,40 @@ typedef pgd_t pl2_t; #define SET_PL2(val) { .pgd = (val), } #endif -void __init __no_stack_protector mk_early_pgtbl_32(void) +static __init __no_stack_protector pte_t init_map(pte_t pte, pte_t **ptep, pl2_t **pl2p, + const unsigned long limit) { - /* Enough space to fit pagetables for the low memory linear map */ - const unsigned long limit = __pa_nodebug(_end) + - (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT); - pte_t pte, *ptep = (pte_t *)__pa_nodebug(__brk_base); - pl2_t *pl2p = (pl2_t *)__pa_nodebug(pl2_base); - unsigned long *ptr; - int i; - - pte.pte = PTE_IDENT_ATTR; - while ((pte.pte & PTE_PFN_MASK) < limit) { - pl2_t pl2 = SET_PL2((unsigned long)ptep | PDE_IDENT_ATTR); - - *pl2p = pl2; + pl2_t pl2 = SET_PL2((unsigned long)*ptep | PDE_IDENT_ATTR); + int i; + **pl2p = pl2; if (!IS_ENABLED(CONFIG_X86_PAE)) { /* Kernel PDE entry */ - *(pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = pl2; + *(*pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = pl2; } for (i = 0; i < PTRS_PER_PTE; i++) { - *ptep = pte; + **ptep = pte; pte.pte += PAGE_SIZE; - ptep++; + (*ptep)++; } - pl2p++; + (*pl2p)++; } + return pte; +} + +void __init __no_stack_protector mk_early_pgtbl_32(void) +{ + /* Enough space to fit pagetables for the low memory linear map */ + const unsigned long limit = __pa_nodebug(_end) + + (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT); + pte_t pte, *ptep = (pte_t *)__pa_nodebug(__brk_base); + pl2_t *pl2p = (pl2_t *)__pa_nodebug(pl2_base); + unsigned long *ptr; + + pte.pte = PTE_IDENT_ATTR; + pte = init_map(pte, &ptep, &pl2p, limit); ptr = (unsigned long *)__pa_nodebug(&max_pfn_mapped); /* Can't use pte_pfn() since it's a call with CONFIG_PARAVIRT */ @@ -116,4 +121,3 @@ void __init __no_stack_protector mk_early_pgtbl_32(void) ptr = (unsigned long *)__pa_nodebug(&_brk_end); *ptr = (unsigned long)ptep + PAGE_OFFSET; } - -- Gitee From 021241e42ccea56faf306b1de8b3f07c607334d9 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 22 Jan 2024 12:31:50 +0800 Subject: [PATCH 0060/2138] x86/microcode: Provide CONFIG_MICROCODE_INITRD32 ANBZ: #8003 commit fdbd43819400e74c1c20a646969ea8f71706eb2b upstream. Create an aggregate config switch which covers X86_32, MICROCODE and BLK_DEV_INITRD to avoid lengthy #ifdeffery in upcoming code. Intel-SIG: commit fdbd43819400 x86/microcode: Provide CONFIG_MICROCODE_INITRD32. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231017211722.236208250@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/Kconfig | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index a0d11cd0de55..3ef3238c6e95 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1314,6 +1314,10 @@ config MICROCODE def_bool y depends on CPU_SUP_AMD || CPU_SUP_INTEL || CPU_SUP_HYGON +config MICROCODE_INITRD32 + def_bool y + depends on MICROCODE && X86_32 && BLK_DEV_INITRD + config MICROCODE_LATE_LOADING bool "Late microcode loading (DANGEROUS)" default n -- Gitee From 6b820d613ad229f2be82bce4ada00c05c8869d71 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Oct 2023 23:23:31 +0200 Subject: [PATCH 0061/2138] x86/boot/32: Temporarily map initrd for microcode loading ANBZ: #8003 commit 4c585af7180c147062c636a927a2fc2b6a7072f5 upstream. Early microcode loading on 32-bit runs in physical address mode because the initrd is not covered by the initial page tables. That results in a horrible mess all over the microcode loader code. Provide a temporary mapping for the initrd in the initial page tables by appending it to the actual initial mapping starting with a new PGD or PMD depending on the configured page table levels ([non-]PAE). The page table entries are located after _brk_end so they are not permanently using memory space. The mapping is invalidated right away in i386_start_kernel() after the early microcode loader has run. This prepares for removing the physical address mode oddities from all over the microcode loader code, which in turn allows further cleanups. Provide the map and unmap code and document the place where the microcode loader needs to be invoked with a comment. Intel-SIG: commit 4c585af7180c x86/boot/32: Temporarily map initrd for microcode loading. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231017211722.292291436@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/include/asm/microcode.h | 2 ++ arch/x86/kernel/head32.c | 54 ++++++++++++++++++++++++++++++-- 2 files changed, 54 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index bbbe9d744977..5216bf1acc3b 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -23,6 +23,8 @@ static inline void load_ucode_ap(void) { } static inline void microcode_bsp_resume(void) { } #endif +extern unsigned long initrd_start_early; + #ifdef CONFIG_CPU_SUP_INTEL /* Intel specific microcode defines. Public for IFS */ struct microcode_header_intel { diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index bdce6321fabd..abdbfd335e13 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c @@ -29,11 +29,33 @@ static void __init i386_default_early_setup(void) x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc; } +#ifdef CONFIG_MICROCODE_INITRD32 +unsigned long __initdata initrd_start_early; +static pte_t __initdata *initrd_pl2p_start, *initrd_pl2p_end; + +static void zap_early_initrd_mapping(void) +{ + pte_t *pl2p = initrd_pl2p_start; + + for (; pl2p < initrd_pl2p_end; pl2p++) { + *pl2p = (pte_t){ .pte = 0 }; + + if (!IS_ENABLED(CONFIG_X86_PAE)) + *(pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = (pte_t) {.pte = 0}; + } +} +#else +static inline void zap_early_initrd_mapping(void) { } +#endif + asmlinkage __visible void __init __noreturn i386_start_kernel(void) { /* Make sure IDT is set up before any exception happens */ idt_setup_early_handler(); + /* load_ucode_bsp() */ + zap_early_initrd_mapping(); + cr4_init_shadow(); sanitize_boot_params(&boot_params); @@ -105,9 +127,9 @@ static __init __no_stack_protector pte_t init_map(pte_t pte, pte_t **ptep, pl2_t void __init __no_stack_protector mk_early_pgtbl_32(void) { /* Enough space to fit pagetables for the low memory linear map */ - const unsigned long limit = __pa_nodebug(_end) + - (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT); + unsigned long limit = __pa_nodebug(_end) + (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT); pte_t pte, *ptep = (pte_t *)__pa_nodebug(__brk_base); + struct boot_params __maybe_unused *params; pl2_t *pl2p = (pl2_t *)__pa_nodebug(pl2_base); unsigned long *ptr; @@ -120,4 +142,32 @@ void __init __no_stack_protector mk_early_pgtbl_32(void) ptr = (unsigned long *)__pa_nodebug(&_brk_end); *ptr = (unsigned long)ptep + PAGE_OFFSET; + +#ifdef CONFIG_MICROCODE_INITRD32 + /* Running on a hypervisor? */ + if (native_cpuid_ecx(1) & BIT(31)) + return; + + params = (struct boot_params *)__pa_nodebug(&boot_params); + if (!params->hdr.ramdisk_size || !params->hdr.ramdisk_image) + return; + + /* Save the virtual start address */ + ptr = (unsigned long *)__pa_nodebug(&initrd_start_early); + *ptr = (pte.pte & PTE_PFN_MASK) + PAGE_OFFSET; + *ptr += ((unsigned long)params->hdr.ramdisk_image) & ~PAGE_MASK; + + /* Save PLP2 for cleanup */ + ptr = (unsigned long *)__pa_nodebug(&initrd_pl2p_start); + *ptr = (unsigned long)pl2p + PAGE_OFFSET; + + limit = (unsigned long)params->hdr.ramdisk_image; + pte.pte = PTE_IDENT_ATTR | PFN_ALIGN(limit); + limit = (unsigned long)params->hdr.ramdisk_image + params->hdr.ramdisk_size; + + init_map(pte, &ptep, &pl2p, limit); + + ptr = (unsigned long *)__pa_nodebug(&initrd_pl2p_end); + *ptr = (unsigned long)pl2p + PAGE_OFFSET; +#endif } -- Gitee From 69ed1665f6f45f3b80f79c9b57e5b1a15e05dbe8 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 22 Jan 2024 12:42:27 +0800 Subject: [PATCH 0062/2138] x86/microcode/32: Move early loading after paging enable ANBZ: #8003 commit 0b62f6cb07738d7211d926c39f6946b87f72e792 upstream. 32-bit loads microcode before paging is enabled. The commit which introduced that has zero justification in the changelog. The cover letter has slightly more content, but it does not give any technical justification either: "The problem in current microcode loading method is that we load a microcode way, way too late; ideally we should load it before turning paging on. This may only be practical on 32 bits since we can't get to 64-bit mode without paging on, but we should still do it as early as at all possible." Handwaving word salad with zero technical content. Someone claimed in an offlist conversation that this is required for curing the ATOM erratum AAE44/AAF40/AAG38/AAH41. That erratum requires an microcode update in order to make the usage of PSE safe. But during early boot, PSE is completely irrelevant and it is evaluated way later. Neither is it relevant for the AP on single core HT enabled CPUs as the microcode loading on the AP is not doing anything. On dual core CPUs there is a theoretical problem if a split of an executable large page between enabling paging including PSE and loading the microcode happens. But that's only theoretical, it's practically irrelevant because the affected dual core CPUs are 64bit enabled and therefore have paging and PSE enabled before loading the microcode on the second core. So why would it work on 64-bit but not on 32-bit? The erratum: "AAG38 Code Fetch May Occur to Incorrect Address After a Large Page is Split Into 4-Kbyte Pages Problem: If software clears the PS (page size) bit in a present PDE (page directory entry), that will cause linear addresses mapped through this PDE to use 4-KByte pages instead of using a large page after old TLB entries are invalidated. Due to this erratum, if a code fetch uses this PDE before the TLB entry for the large page is invalidated then it may fetch from a different physical address than specified by either the old large page translation or the new 4-KByte page translation. This erratum may also cause speculative code fetches from incorrect addresses." The practical relevance for this is exactly zero because there is no splitting of large text pages during early boot-time, i.e. between paging enable and microcode loading, and neither during CPU hotplug. IOW, this load microcode before paging enable is yet another voodoo programming solution in search of a problem. What's worse is that it causes at least two serious problems: 1) When stackprotector is enabled, the microcode loader code has the stackprotector mechanics enabled. The read from the per CPU variable __stack_chk_guard is always accessing the virtual address either directly on UP or via %fs on SMP. In physical address mode this results in an access to memory above 3GB. So this works by chance as the hardware returns the same value when there is no RAM at this physical address. When there is RAM populated above 3G then the read is by chance the same as nothing changes that memory during the very early boot stage. That's not necessarily true during runtime CPU hotplug. 2) When function tracing is enabled, the relevant microcode loader functions and the functions invoked from there will call into the tracing code and evaluate global and per CPU variables in physical address mode. What could potentially go wrong? Cure this and move the microcode loading after the early paging enable, use the new temporary initrd mapping and remove the gunk in the microcode loader which is required to handle physical address mode. Intel-SIG: commit 0b62f6cb0773 x86/microcode/32: Move early loading after paging enable. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231017211722.348298216@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/include/asm/microcode.h | 5 - arch/x86/kernel/cpu/common.c | 12 --- arch/x86/kernel/cpu/microcode/amd.c | 110 +++++++-------------- arch/x86/kernel/cpu/microcode/core.c | 78 ++++----------- arch/x86/kernel/cpu/microcode/intel.c | 116 ++++------------------- arch/x86/kernel/cpu/microcode/internal.h | 2 +- arch/x86/kernel/head32.c | 3 +- arch/x86/kernel/head_32.S | 10 -- arch/x86/kernel/smpboot.c | 12 +-- 9 files changed, 71 insertions(+), 277 deletions(-) diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 5216bf1acc3b..78f1eb2532dc 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -70,11 +70,6 @@ static inline u32 intel_get_microcode_revision(void) return rev; } - -void show_ucode_info_early(void); - -#else /* CONFIG_CPU_SUP_INTEL */ -static inline void show_ucode_info_early(void) { } #endif /* !CONFIG_CPU_SUP_INTEL */ #endif /* _ASM_X86_MICROCODE_H */ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 8bc90a501e7b..a844110691f9 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -2224,8 +2224,6 @@ static inline void setup_getcpu(int cpu) } #ifdef CONFIG_X86_64 -static inline void ucode_cpu_init(int cpu) { } - static inline void tss_setup_ist(struct tss_struct *tss) { /* Set up the per-CPU TSS IST stacks */ @@ -2236,16 +2234,8 @@ static inline void tss_setup_ist(struct tss_struct *tss) /* Only mapped when SEV-ES is active */ tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(VC); } - #else /* CONFIG_X86_64 */ - -static inline void ucode_cpu_init(int cpu) -{ - show_ucode_info_early(); -} - static inline void tss_setup_ist(struct tss_struct *tss) { } - #endif /* !CONFIG_X86_64 */ static inline void tss_setup_io_bitmap(struct tss_struct *tss) @@ -2301,8 +2291,6 @@ void cpu_init(void) struct task_struct *cur = current; int cpu = raw_smp_processor_id(); - ucode_cpu_init(cpu); - #ifdef CONFIG_NUMA if (this_cpu_read(numa_node) == 0 && early_cpu_to_node(cpu) != NUMA_NO_NODE) diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index e884a7894be8..3a5b64d19f76 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -121,24 +121,20 @@ static u16 find_equiv_id(struct equiv_cpu_table *et, u32 sig) /* * Check whether there is a valid microcode container file at the beginning - * of @buf of size @buf_size. Set @early to use this function in the early path. + * of @buf of size @buf_size. */ -static bool verify_container(const u8 *buf, size_t buf_size, bool early) +static bool verify_container(const u8 *buf, size_t buf_size) { u32 cont_magic; if (buf_size <= CONTAINER_HDR_SZ) { - if (!early) - pr_debug("Truncated microcode container header.\n"); - + pr_debug("Truncated microcode container header.\n"); return false; } cont_magic = *(const u32 *)buf; if (cont_magic != UCODE_MAGIC) { - if (!early) - pr_debug("Invalid magic value (0x%08x).\n", cont_magic); - + pr_debug("Invalid magic value (0x%08x).\n", cont_magic); return false; } @@ -147,23 +143,20 @@ static bool verify_container(const u8 *buf, size_t buf_size, bool early) /* * Check whether there is a valid, non-truncated CPU equivalence table at the - * beginning of @buf of size @buf_size. Set @early to use this function in the - * early path. + * beginning of @buf of size @buf_size. */ -static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early) +static bool verify_equivalence_table(const u8 *buf, size_t buf_size) { const u32 *hdr = (const u32 *)buf; u32 cont_type, equiv_tbl_len; - if (!verify_container(buf, buf_size, early)) + if (!verify_container(buf, buf_size)) return false; cont_type = hdr[1]; if (cont_type != UCODE_EQUIV_CPU_TABLE_TYPE) { - if (!early) - pr_debug("Wrong microcode container equivalence table type: %u.\n", - cont_type); - + pr_debug("Wrong microcode container equivalence table type: %u.\n", + cont_type); return false; } @@ -172,9 +165,7 @@ static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early) equiv_tbl_len = hdr[2]; if (equiv_tbl_len < sizeof(struct equiv_cpu_entry) || buf_size < equiv_tbl_len) { - if (!early) - pr_debug("Truncated equivalence table.\n"); - + pr_debug("Truncated equivalence table.\n"); return false; } @@ -183,22 +174,19 @@ static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early) /* * Check whether there is a valid, non-truncated microcode patch section at the - * beginning of @buf of size @buf_size. Set @early to use this function in the - * early path. + * beginning of @buf of size @buf_size. * * On success, @sh_psize returns the patch size according to the section header, * to the caller. */ static bool -__verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize, bool early) +__verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize) { u32 p_type, p_size; const u32 *hdr; if (buf_size < SECTION_HDR_SIZE) { - if (!early) - pr_debug("Truncated patch section.\n"); - + pr_debug("Truncated patch section.\n"); return false; } @@ -207,17 +195,13 @@ __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize, bool early p_size = hdr[1]; if (p_type != UCODE_UCODE_TYPE) { - if (!early) - pr_debug("Invalid type field (0x%x) in container file section header.\n", - p_type); - + pr_debug("Invalid type field (0x%x) in container file section header.\n", + p_type); return false; } if (p_size < sizeof(struct microcode_header_amd)) { - if (!early) - pr_debug("Patch of size %u too short.\n", p_size); - + pr_debug("Patch of size %u too short.\n", p_size); return false; } @@ -269,7 +253,7 @@ static unsigned int __verify_patch_size(u8 family, u32 sh_psize, size_t buf_size * 0: success */ static int -verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool early) +verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size) { struct microcode_header_amd *mc_hdr; unsigned int ret; @@ -277,7 +261,7 @@ verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool ea u16 proc_id; u8 patch_fam; - if (!__verify_patch_section(buf, buf_size, &sh_psize, early)) + if (!__verify_patch_section(buf, buf_size, &sh_psize)) return -1; /* @@ -292,16 +276,13 @@ verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool ea * size sh_psize, as the section claims. */ if (buf_size < sh_psize) { - if (!early) - pr_debug("Patch of size %u truncated.\n", sh_psize); - + pr_debug("Patch of size %u truncated.\n", sh_psize); return -1; } ret = __verify_patch_size(family, sh_psize, buf_size); if (!ret) { - if (!early) - pr_debug("Per-family patch size mismatch.\n"); + pr_debug("Per-family patch size mismatch.\n"); return -1; } @@ -309,8 +290,7 @@ verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool ea mc_hdr = (struct microcode_header_amd *)(buf + SECTION_HDR_SIZE); if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) { - if (!early) - pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id); + pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id); return -1; } @@ -337,7 +317,7 @@ static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc) u16 eq_id; u8 *buf; - if (!verify_equivalence_table(ucode, size, true)) + if (!verify_equivalence_table(ucode, size)) return 0; buf = ucode; @@ -364,7 +344,7 @@ static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc) u32 patch_size; int ret; - ret = verify_patch(x86_family(desc->cpuid_1_eax), buf, size, &patch_size, true); + ret = verify_patch(x86_family(desc->cpuid_1_eax), buf, size, &patch_size); if (ret < 0) { /* * Patch verification failed, skip to the next container, if @@ -456,14 +436,8 @@ static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size) { struct cont_desc desc = { 0 }; struct microcode_amd *mc; - u32 rev, dummy, *new_rev; bool ret = false; - -#ifdef CONFIG_X86_32 - new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); -#else - new_rev = &ucode_new_rev; -#endif + u32 rev, dummy; desc.cpuid_1_eax = cpuid_1_eax; @@ -484,8 +458,8 @@ static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size) return ret; if (!__apply_microcode_amd(mc)) { - *new_rev = mc->hdr.patch_id; - ret = true; + ucode_new_rev = mc->hdr.patch_id; + ret = true; } return ret; @@ -517,33 +491,13 @@ static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) static void find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio_data *ret) { - struct ucode_cpu_info *uci; struct cpio_data cp; - const char *path; - bool use_pa; - - if (IS_ENABLED(CONFIG_X86_32)) { - uci = (struct ucode_cpu_info *)__pa_nodebug(ucode_cpu_info); - if (x86_cpuid_vendor() == X86_VENDOR_HYGON) - path = (const char *)__pa_nodebug( - "kernel/x86/microcode/HygonGenuine.bin"); - else - path = (const char *)__pa_nodebug(ucode_path); - use_pa = true; - } else { - uci = ucode_cpu_info; - if (x86_cpuid_vendor() == X86_VENDOR_HYGON) - path = "kernel/x86/microcode/HygonGenuine.bin"; - else - path = ucode_path; - use_pa = false; - } if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax))) - cp = find_microcode_in_initrd(path, use_pa); + cp = find_microcode_in_initrd(ucode_path); /* Needed in load_microcode_amd() */ - uci->cpu_sig.sig = cpuid_1_eax; + ucode_cpu_info->cpu_sig.sig = cpuid_1_eax; *ret = cp; } @@ -578,7 +532,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) else path = ucode_path; - cp = find_microcode_in_initrd(path, false); + cp = find_microcode_in_initrd(path); if (!(cp.data && cp.size)) return -EINVAL; @@ -754,7 +708,7 @@ static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size) u32 equiv_tbl_len; const u32 *hdr; - if (!verify_equivalence_table(buf, buf_size, false)) + if (!verify_equivalence_table(buf, buf_size)) return 0; hdr = (const u32 *)buf; @@ -800,7 +754,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover, u16 proc_id; int ret; - ret = verify_patch(family, fw, leftover, patch_size, false); + ret = verify_patch(family, fw, leftover, patch_size); if (ret) return ret; @@ -938,7 +892,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device) } ret = UCODE_ERROR; - if (!verify_container(fw->data, fw->size, false)) + if (!verify_container(fw->data, fw->size)) goto fw_release; ret = load_microcode_amd(c->x86, fw->data, fw->size); diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 98245c19a90d..d755684c2580 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -94,10 +94,7 @@ static bool amd_check_current_patch_level(void) native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy); - if (IS_ENABLED(CONFIG_X86_32)) - levels = (u32 *)__pa_nodebug(&final_levels); - else - levels = final_levels; + levels = final_levels; for (i = 0; levels[i]; i++) { if (lvl == levels[i]) @@ -109,17 +106,8 @@ static bool amd_check_current_patch_level(void) static bool __init check_loader_disabled_bsp(void) { static const char *__dis_opt_str = "dis_ucode_ldr"; - -#ifdef CONFIG_X86_32 - const char *cmdline = (const char *)__pa_nodebug(boot_command_line); - const char *option = (const char *)__pa_nodebug(__dis_opt_str); - bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr); - -#else /* CONFIG_X86_64 */ const char *cmdline = boot_command_line; const char *option = __dis_opt_str; - bool *res = &dis_ucode_ldr; -#endif /* * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not @@ -127,18 +115,18 @@ static bool __init check_loader_disabled_bsp(void) * that's good enough as they don't land on the BSP path anyway. */ if (native_cpuid_ecx(1) & BIT(31)) - return *res; + return true; if (x86_cpuid_vendor() == X86_VENDOR_AMD || x86_cpuid_vendor() == X86_VENDOR_HYGON) { if (amd_check_current_patch_level()) - return *res; + return true; } if (cmdline_find_option_bool(cmdline, option) <= 0) - *res = false; + dis_ucode_ldr = false; - return *res; + return dis_ucode_ldr; } void __init load_ucode_bsp(void) @@ -180,20 +168,11 @@ void __init load_ucode_bsp(void) load_ucode_amd_early(cpuid_1_eax); } -static bool check_loader_disabled_ap(void) -{ -#ifdef CONFIG_X86_32 - return *((bool *)__pa_nodebug(&dis_ucode_ldr)); -#else - return dis_ucode_ldr; -#endif -} - void load_ucode_ap(void) { unsigned int cpuid_1_eax; - if (check_loader_disabled_ap()) + if (dis_ucode_ldr) return; cpuid_1_eax = native_cpuid_eax(1); @@ -247,40 +226,28 @@ static int __init save_microcode_in_initrd(void) return ret; } -struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa) +struct cpio_data find_microcode_in_initrd(const char *path) { #ifdef CONFIG_BLK_DEV_INITRD unsigned long start = 0; size_t size; #ifdef CONFIG_X86_32 - struct boot_params *params; - - if (use_pa) - params = (struct boot_params *)__pa_nodebug(&boot_params); - else - params = &boot_params; - - size = params->hdr.ramdisk_size; - - /* - * Set start only if we have an initrd image. We cannot use initrd_start - * because it is not set that early yet. - */ + size = boot_params.hdr.ramdisk_size; + /* Early load on BSP has a temporary mapping. */ if (size) - start = params->hdr.ramdisk_image; + start = initrd_start_early; -# else /* CONFIG_X86_64 */ +#else /* CONFIG_X86_64 */ size = (unsigned long)boot_params.ext_ramdisk_size << 32; size |= boot_params.hdr.ramdisk_size; if (size) { start = (unsigned long)boot_params.ext_ramdisk_image << 32; start |= boot_params.hdr.ramdisk_image; - start += PAGE_OFFSET; } -# endif +#endif /* * Fixup the start address: after reserve_initrd() runs, initrd_start @@ -291,23 +258,10 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa) * initrd_gone is for the hotplug case where we've thrown out initrd * already. */ - if (!use_pa) { - if (initrd_gone) - return (struct cpio_data){ NULL, 0, "" }; - if (initrd_start) - start = initrd_start; - } else { - /* - * The picture with physical addresses is a bit different: we - * need to get the *physical* address to which the ramdisk was - * relocated, i.e., relocated_ramdisk (not initrd_start) and - * since we're running from physical addresses, we need to access - * relocated_ramdisk through its *physical* address too. - */ - u64 *rr = (u64 *)__pa_nodebug(&relocated_ramdisk); - if (*rr) - start = *rr; - } + if (initrd_gone) + return (struct cpio_data){ NULL, 0, "" }; + if (initrd_start) + start = initrd_start; return find_cpio_data(path, (void *)start, size, NULL); #else /* !CONFIG_BLK_DEV_INITRD */ diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 94dd6af9c963..24a5c8b594c6 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -319,15 +319,8 @@ static void save_microcode_patch(struct ucode_cpu_info *uci, void *data, unsigne if (!intel_find_matching_signature(p->data, uci->cpu_sig.sig, uci->cpu_sig.pf)) return; - /* - * Save for early loading. On 32-bit, that needs to be a physical - * address as the APs are running from physical addresses, before - * paging has been enabled. - */ - if (IS_ENABLED(CONFIG_X86_32)) - intel_ucode_patch = (struct microcode_intel *)__pa_nodebug(p->data); - else - intel_ucode_patch = p->data; + /* Save for early loading */ + intel_ucode_patch = p->data; } /* @@ -420,66 +413,10 @@ static bool load_builtin_intel_microcode(struct cpio_data *cp) return false; } -static void print_ucode_info(int old_rev, int new_rev, unsigned int date) -{ - pr_info_once("updated early: 0x%x -> 0x%x, date = %04x-%02x-%02x\n", - old_rev, - new_rev, - date & 0xffff, - date >> 24, - (date >> 16) & 0xff); -} - -#ifdef CONFIG_X86_32 - -static int delay_ucode_info; -static int current_mc_date; -static int early_old_rev; - -/* - * Print early updated ucode info after printk works. This is delayed info dump. - */ -void show_ucode_info_early(void) -{ - struct ucode_cpu_info uci; - - if (delay_ucode_info) { - intel_cpu_collect_info(&uci); - print_ucode_info(early_old_rev, uci.cpu_sig.rev, current_mc_date); - delay_ucode_info = 0; - } -} - -/* - * At this point, we can not call printk() yet. Delay printing microcode info in - * show_ucode_info_early() until printk() works. - */ -static void print_ucode(int old_rev, int new_rev, int date) -{ - int *delay_ucode_info_p; - int *current_mc_date_p; - int *early_old_rev_p; - - delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info); - current_mc_date_p = (int *)__pa_nodebug(¤t_mc_date); - early_old_rev_p = (int *)__pa_nodebug(&early_old_rev); - - *delay_ucode_info_p = 1; - *current_mc_date_p = date; - *early_old_rev_p = old_rev; -} -#else - -static inline void print_ucode(int old_rev, int new_rev, int date) -{ - print_ucode_info(old_rev, new_rev, date); -} -#endif - -static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) +static int apply_microcode_early(struct ucode_cpu_info *uci) { struct microcode_intel *mc; - u32 rev, old_rev; + u32 rev, old_rev, date; mc = uci->mc; if (!mc) @@ -513,11 +450,9 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) uci->cpu_sig.rev = rev; - if (early) - print_ucode(old_rev, uci->cpu_sig.rev, mc->hdr.date); - else - print_ucode_info(old_rev, uci->cpu_sig.rev, mc->hdr.date); - + date = mc->hdr.date; + pr_info_once("updated early: 0x%x -> 0x%x, date = %04x-%02x-%02x\n", + old_rev, rev, date & 0xffff, date >> 24, (date >> 16) & 0xff); return 0; } @@ -535,7 +470,7 @@ int __init save_microcode_in_initrd_intel(void) intel_ucode_patch = NULL; if (!load_builtin_intel_microcode(&cp)) - cp = find_microcode_in_initrd(ucode_path, false); + cp = find_microcode_in_initrd(ucode_path); if (!(cp.data && cp.size)) return 0; @@ -551,21 +486,11 @@ int __init save_microcode_in_initrd_intel(void) */ static struct microcode_intel *__load_ucode_intel(struct ucode_cpu_info *uci) { - static const char *path; struct cpio_data cp; - bool use_pa; - - if (IS_ENABLED(CONFIG_X86_32)) { - path = (const char *)__pa_nodebug(ucode_path); - use_pa = true; - } else { - path = ucode_path; - use_pa = false; - } /* try built-in microcode first */ if (!load_builtin_intel_microcode(&cp)) - cp = find_microcode_in_initrd(path, use_pa); + cp = find_microcode_in_initrd(ucode_path); if (!(cp.data && cp.size)) return NULL; @@ -586,30 +511,21 @@ void __init load_ucode_intel_bsp(void) uci.mc = patch; - apply_microcode_early(&uci, true); + apply_microcode_early(&uci); } void load_ucode_intel_ap(void) { - struct microcode_intel *patch, **iup; struct ucode_cpu_info uci; - if (IS_ENABLED(CONFIG_X86_32)) - iup = (struct microcode_intel **) __pa_nodebug(&intel_ucode_patch); - else - iup = &intel_ucode_patch; - - if (!*iup) { - patch = __load_ucode_intel(&uci); - if (!patch) + if (!intel_ucode_patch) { + intel_ucode_patch = __load_ucode_intel(&uci); + if (!intel_ucode_patch) return; - - *iup = patch; } - uci.mc = *iup; - - apply_microcode_early(&uci, true); + uci.mc = intel_ucode_patch; + apply_microcode_early(&uci); } static struct microcode_intel *find_patch(struct ucode_cpu_info *uci) @@ -647,7 +563,7 @@ void reload_ucode_intel(void) uci.mc = p; - apply_microcode_early(&uci, false); + apply_microcode_early(&uci); } static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h index 9e76fe430812..96df3da32346 100644 --- a/arch/x86/kernel/cpu/microcode/internal.h +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -44,7 +44,7 @@ struct microcode_ops { }; extern struct ucode_cpu_info ucode_cpu_info[]; -struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa); +struct cpio_data find_microcode_in_initrd(const char *path); #define MAX_UCODE_COUNT 128 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index abdbfd335e13..de001b2146ab 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -53,7 +54,7 @@ asmlinkage __visible void __init __noreturn i386_start_kernel(void) /* Make sure IDT is set up before any exception happens */ idt_setup_early_handler(); - /* load_ucode_bsp() */ + load_ucode_bsp(); zap_early_initrd_mapping(); cr4_init_shadow(); diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index c9318993f959..63f6ff4b28eb 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -118,11 +118,6 @@ SYM_CODE_START(startup_32) movl %eax, pa(olpc_ofw_pgd) #endif -#ifdef CONFIG_MICROCODE - /* Early load ucode on BSP. */ - call load_ucode_bsp -#endif - /* Create early pagetables. */ call mk_early_pgtbl_32 @@ -157,11 +152,6 @@ SYM_FUNC_START(startup_32_smp) movl %eax,%ss leal -__PAGE_OFFSET(%ecx),%esp -#ifdef CONFIG_MICROCODE - /* Early load ucode on AP. */ - call load_ucode_ap -#endif - .Ldefault_entry: movl $(CR0_STATE & ~X86_CR0_PG),%eax movl %eax,%cr0 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 19246d32a8ae..d28920971d80 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -259,12 +259,9 @@ static void notrace start_secondary(void *unused) cpu_init_exception_handling(); /* - * 32-bit systems load the microcode from the ASM startup code for - * historical reasons. - * - * On 64-bit systems load it before reaching the AP alive - * synchronization point below so it is not part of the full per - * CPU serialized bringup part when "parallel" bringup is enabled. + * Load the microcode before reaching the AP alive synchronization + * point below so it is not part of the full per CPU serialized + * bringup part when "parallel" bringup is enabled. * * That's even safe when hyperthreading is enabled in the CPU as * the core code starts the primary threads first and leaves the @@ -277,8 +274,7 @@ static void notrace start_secondary(void *unused) * CPUID, MSRs etc. must be strictly serialized to maintain * software state correctness. */ - if (IS_ENABLED(CONFIG_X86_64)) - load_ucode_ap(); + load_ucode_ap(); /* * Synchronization point with the hotplug core. Sets this CPUs -- Gitee From a2de2d567da8cbdbcc9ec40cdefbb9cea50e7f59 Mon Sep 17 00:00:00 2001 From: Ashok Raj Date: Tue, 17 Oct 2023 23:23:33 +0200 Subject: [PATCH 0063/2138] x86/microcode/intel: Rip out mixed stepping support for Intel CPUs ANBZ: #8003 commit ae76d951f6537001bdf77894d19cd4a446de337e upstream. Mixed steppings aren't supported on Intel CPUs. Only one microcode patch is required for the entire system. The caching of microcode blobs which match the family and model is therefore pointless and in fact is dysfunctional as CPU hotplug updates use only a single microcode blob, i.e. the one where *intel_ucode_patch points to. Remove the microcode cache and make it an AMD local feature. [ tglx: - save only at the end. Otherwise random microcode ends up in the pointer for early loading - free the ucode patch pointer in save_microcode_patch() only after kmemdup() has succeeded, as reported by Andrew Cooper ] Intel-SIG: commit ae76d951f653 x86/microcode/intel: Rip out mixed stepping support for Intel CPUs. Microcode restructuring backport. Originally-by: Thomas Gleixner Signed-off-by: Ashok Raj Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231017211722.404362809@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/amd.c | 10 ++ arch/x86/kernel/cpu/microcode/core.c | 2 - arch/x86/kernel/cpu/microcode/intel.c | 133 ++++------------------- arch/x86/kernel/cpu/microcode/internal.h | 10 -- 4 files changed, 34 insertions(+), 121 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 3a5b64d19f76..93156848df12 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -37,6 +37,16 @@ #include "internal.h" +struct ucode_patch { + struct list_head plist; + void *data; + unsigned int size; + u32 patch_id; + u16 equiv_cpu; +}; + +static LIST_HEAD(microcode_cache); + #define UCODE_MAGIC 0x00414d44 #define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000 #define UCODE_UCODE_TYPE 0x00000001 diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index d755684c2580..95bd2b43d720 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -50,8 +50,6 @@ static bool dis_ucode_ldr = true; bool initrd_gone; -LIST_HEAD(microcode_cache); - /* * Synchronization. * diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 24a5c8b594c6..03a55bfa88c5 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -33,10 +33,10 @@ static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin"; /* Current microcode patch used in early patching on the APs. */ -static struct microcode_intel *intel_ucode_patch; +static struct microcode_intel *intel_ucode_patch __read_mostly; /* last level cache size per core */ -static int llc_size_per_core; +static int llc_size_per_core __ro_after_init; /* microcode format is extended from prescott processors */ struct extended_signature { @@ -253,74 +253,17 @@ static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev return intel_find_matching_signature(mc, csig, cpf); } -static struct ucode_patch *memdup_patch(void *data, unsigned int size) +static void save_microcode_patch(void *data, unsigned int size) { - struct ucode_patch *p; - - p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL); - if (!p) - return NULL; - - p->data = kmemdup(data, size, GFP_KERNEL); - if (!p->data) { - kfree(p); - return NULL; - } - - return p; -} - -static void save_microcode_patch(struct ucode_cpu_info *uci, void *data, unsigned int size) -{ - struct microcode_header_intel *mc_hdr, *mc_saved_hdr; - struct ucode_patch *iter, *tmp, *p = NULL; - bool prev_found = false; - unsigned int sig, pf; - - mc_hdr = (struct microcode_header_intel *)data; - - list_for_each_entry_safe(iter, tmp, µcode_cache, plist) { - mc_saved_hdr = (struct microcode_header_intel *)iter->data; - sig = mc_saved_hdr->sig; - pf = mc_saved_hdr->pf; - - if (intel_find_matching_signature(data, sig, pf)) { - prev_found = true; - - if (mc_hdr->rev <= mc_saved_hdr->rev) - continue; - - p = memdup_patch(data, size); - if (!p) - pr_err("Error allocating buffer %p\n", data); - else { - list_replace(&iter->plist, &p->plist); - kfree(iter->data); - kfree(iter); - } - } - } - - /* - * There weren't any previous patches found in the list cache; save the - * newly found. - */ - if (!prev_found) { - p = memdup_patch(data, size); - if (!p) - pr_err("Error allocating buffer for %p\n", data); - else - list_add_tail(&p->plist, µcode_cache); - } + struct microcode_header_intel *p; + p = kmemdup(data, size, GFP_KERNEL); if (!p) return; - if (!intel_find_matching_signature(p->data, uci->cpu_sig.sig, uci->cpu_sig.pf)) - return; - + kfree(intel_ucode_patch); /* Save for early loading */ - intel_ucode_patch = p->data; + intel_ucode_patch = (struct microcode_intel *)p; } /* @@ -332,6 +275,7 @@ scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save) { struct microcode_header_intel *mc_header; struct microcode_intel *patch = NULL; + u32 cur_rev = uci->cpu_sig.rev; unsigned int mc_size; while (size) { @@ -341,8 +285,7 @@ scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save) mc_header = (struct microcode_header_intel *)data; mc_size = get_totalsize(mc_header); - if (!mc_size || - mc_size > size || + if (!mc_size || mc_size > size || intel_microcode_sanity_check(data, false, MC_HEADER_TYPE_MICROCODE) < 0) break; @@ -354,31 +297,16 @@ scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save) continue; } - if (save) { - save_microcode_patch(uci, data, mc_size); + /* BSP scan: Check whether there is newer microcode */ + if (!save && cur_rev >= mc_header->rev) goto next; - } - - - if (!patch) { - if (!has_newer_microcode(data, - uci->cpu_sig.sig, - uci->cpu_sig.pf, - uci->cpu_sig.rev)) - goto next; - } else { - struct microcode_header_intel *phdr = &patch->hdr; - - if (!has_newer_microcode(data, - phdr->sig, - phdr->pf, - phdr->rev)) - goto next; - } + /* Save scan: Check whether there is newer or matching microcode */ + if (save && cur_rev != mc_header->rev) + goto next; - /* We have a newer patch, save it. */ patch = data; + cur_rev = mc_header->rev; next: data += mc_size; @@ -387,6 +315,9 @@ scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save) if (size) return NULL; + if (save && patch) + save_microcode_patch(patch, mc_size); + return patch; } @@ -528,26 +459,10 @@ void load_ucode_intel_ap(void) apply_microcode_early(&uci); } -static struct microcode_intel *find_patch(struct ucode_cpu_info *uci) +/* Accessor for microcode pointer */ +static struct microcode_intel *ucode_get_patch(void) { - struct microcode_header_intel *phdr; - struct ucode_patch *iter, *tmp; - - list_for_each_entry_safe(iter, tmp, µcode_cache, plist) { - - phdr = (struct microcode_header_intel *)iter->data; - - if (phdr->rev <= uci->cpu_sig.rev) - continue; - - if (!intel_find_matching_signature(phdr, - uci->cpu_sig.sig, - uci->cpu_sig.pf)) - continue; - - return iter->data; - } - return NULL; + return intel_ucode_patch; } void reload_ucode_intel(void) @@ -557,7 +472,7 @@ void reload_ucode_intel(void) intel_cpu_collect_info(&uci); - p = find_patch(&uci); + p = ucode_get_patch(); if (!p) return; @@ -601,7 +516,7 @@ static enum ucode_state apply_microcode_intel(int cpu) return UCODE_ERROR; /* Look for a newer patch in our cache: */ - mc = find_patch(uci); + mc = ucode_get_patch(); if (!mc) { mc = uci->mc; if (!mc) @@ -730,7 +645,7 @@ static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter) uci->mc = (struct microcode_intel *)new_mc; /* Save for CPU hotplug */ - save_microcode_patch(uci, new_mc, new_mc_size); + save_microcode_patch(new_mc, new_mc_size); pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", cpu, new_rev, uci->cpu_sig.rev); diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h index 96df3da32346..12eb95557bdf 100644 --- a/arch/x86/kernel/cpu/microcode/internal.h +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -8,16 +8,6 @@ #include #include -struct ucode_patch { - struct list_head plist; - void *data; /* Intel uses only this one */ - unsigned int size; - u32 patch_id; - u16 equiv_cpu; -}; - -extern struct list_head microcode_cache; - struct device; enum ucode_state { -- Gitee From acbd7213040e6b0aadf40b690e9b8f8147f41b04 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 13:59:39 +0200 Subject: [PATCH 0064/2138] x86/microcode/intel: Simplify scan_microcode() ANBZ: #8003 commit b0f0bf5eef5fac6ba30b7cac15ca4cb01f8a6ca9 upstream. Make it readable and comprehensible. Intel-SIG: commit b0f0bf5eef5f x86/microcode/intel: Simplify scan_microcode(). Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115902.271940980@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/intel.c | 28 +++++++-------------------- 1 file changed, 7 insertions(+), 21 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 03a55bfa88c5..26f759b7b7fa 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -266,22 +266,16 @@ static void save_microcode_patch(void *data, unsigned int size) intel_ucode_patch = (struct microcode_intel *)p; } -/* - * Get microcode matching with BSP's model. Only CPUs with the same model as - * BSP can stay in the platform. - */ -static struct microcode_intel * -scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save) +/* Scan CPIO for microcode matching the boot CPU's family, model, stepping */ +static struct microcode_intel *scan_microcode(void *data, size_t size, + struct ucode_cpu_info *uci, bool save) { struct microcode_header_intel *mc_header; struct microcode_intel *patch = NULL; u32 cur_rev = uci->cpu_sig.rev; unsigned int mc_size; - while (size) { - if (size < sizeof(struct microcode_header_intel)) - break; - + for (; size >= sizeof(struct microcode_header_intel); size -= mc_size, data += mc_size) { mc_header = (struct microcode_header_intel *)data; mc_size = get_totalsize(mc_header); @@ -289,27 +283,19 @@ scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save) intel_microcode_sanity_check(data, false, MC_HEADER_TYPE_MICROCODE) < 0) break; - size -= mc_size; - - if (!intel_find_matching_signature(data, uci->cpu_sig.sig, - uci->cpu_sig.pf)) { - data += mc_size; + if (!intel_find_matching_signature(data, uci->cpu_sig.sig, uci->cpu_sig.pf)) continue; - } /* BSP scan: Check whether there is newer microcode */ if (!save && cur_rev >= mc_header->rev) - goto next; + continue; /* Save scan: Check whether there is newer or matching microcode */ if (save && cur_rev != mc_header->rev) - goto next; + continue; patch = data; cur_rev = mc_header->rev; - -next: - data += mc_size; } if (size) -- Gitee From 74a4f48eeff36066dbc4d1370268e2f6b9e2bd76 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 13:59:40 +0200 Subject: [PATCH 0065/2138] x86/microcode/intel: Simplify and rename generic_load_microcode() ANBZ: #8003 commit 6b072022ab2e1e83b7588144ee0080f7197b71da upstream. so it becomes less obfuscated and rename it because there is nothing generic about it. Intel-SIG: commit 6b072022ab2e x86/microcode/intel: Simplify and rename generic_load_microcode(). Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115902.330295409@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/intel.c | 47 ++++++++++----------------- 1 file changed, 17 insertions(+), 30 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 26f759b7b7fa..9463b148e1c6 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -240,19 +240,6 @@ int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type) } EXPORT_SYMBOL_GPL(intel_microcode_sanity_check); -/* - * Returns 1 if update has been found, 0 otherwise. - */ -static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev) -{ - struct microcode_header_intel *mc_hdr = mc; - - if (mc_hdr->rev <= new_rev) - return 0; - - return intel_find_matching_signature(mc, csig, cpf); -} - static void save_microcode_patch(void *data, unsigned int size) { struct microcode_header_intel *p; @@ -559,14 +546,12 @@ static enum ucode_state apply_microcode_intel(int cpu) return ret; } -static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter) +static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; unsigned int curr_mc_size = 0, new_mc_size = 0; - enum ucode_state ret = UCODE_OK; - int new_rev = uci->cpu_sig.rev; + int cur_rev = uci->cpu_sig.rev; u8 *new_mc = NULL, *mc = NULL; - unsigned int csig, cpf; while (iov_iter_count(iter)) { struct microcode_header_intel mc_header; @@ -583,6 +568,7 @@ static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter) pr_err("error! Bad data in microcode data file (totalsize too small)\n"); break; } + data_size = mc_size - sizeof(mc_header); if (data_size > iov_iter_count(iter)) { pr_err("error! Bad data in microcode data file (truncated file?)\n"); @@ -605,16 +591,17 @@ static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter) break; } - csig = uci->cpu_sig.sig; - cpf = uci->cpu_sig.pf; - if (has_newer_microcode(mc, csig, cpf, new_rev)) { - vfree(new_mc); - new_rev = mc_header.rev; - new_mc = mc; - new_mc_size = mc_size; - mc = NULL; /* trigger new vmalloc */ - ret = UCODE_NEW; - } + if (cur_rev >= mc_header.rev) + continue; + + if (!intel_find_matching_signature(mc, uci->cpu_sig.sig, uci->cpu_sig.pf)) + continue; + + vfree(new_mc); + cur_rev = mc_header.rev; + new_mc = mc; + new_mc_size = mc_size; + mc = NULL; } vfree(mc); @@ -634,9 +621,9 @@ static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter) save_microcode_patch(new_mc, new_mc_size); pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", - cpu, new_rev, uci->cpu_sig.rev); + cpu, cur_rev, uci->cpu_sig.rev); - return ret; + return UCODE_NEW; } static bool is_blacklisted(unsigned int cpu) @@ -685,7 +672,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device) kvec.iov_base = (void *)firmware->data; kvec.iov_len = firmware->size; iov_iter_kvec(&iter, ITER_SOURCE, &kvec, 1, firmware->size); - ret = generic_load_microcode(cpu, &iter); + ret = parse_microcode_blobs(cpu, &iter); release_firmware(firmware); -- Gitee From 3f331247a2e4ec4e1f63cf129da2e298f5f3bbb0 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 13:59:41 +0200 Subject: [PATCH 0066/2138] x86/microcode/intel: Cleanup code further ANBZ: #8003 commit 0177669ee61de4dc641f9ad86a3df6f22327cf6c upstream. Sanitize the microcode scan loop, fixup printks and move the loading function for builtin microcode next to the place where it is used and mark it __init. Intel-SIG: commit 0177669ee61d x86/microcode/intel: Cleanup code further. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115902.389400871@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/intel.c | 76 +++++++++++---------------- 1 file changed, 32 insertions(+), 44 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 9463b148e1c6..d6ff6ebc624b 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -36,7 +36,7 @@ static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin"; static struct microcode_intel *intel_ucode_patch __read_mostly; /* last level cache size per core */ -static int llc_size_per_core __ro_after_init; +static unsigned int llc_size_per_core __ro_after_init; /* microcode format is extended from prescott processors */ struct extended_signature { @@ -294,29 +294,6 @@ static struct microcode_intel *scan_microcode(void *data, size_t size, return patch; } -static bool load_builtin_intel_microcode(struct cpio_data *cp) -{ - unsigned int eax = 1, ebx, ecx = 0, edx; - struct firmware fw; - char name[30]; - - if (IS_ENABLED(CONFIG_X86_32)) - return false; - - native_cpuid(&eax, &ebx, &ecx, &edx); - - sprintf(name, "intel-ucode/%02x-%02x-%02x", - x86_family(eax), x86_model(eax), x86_stepping(eax)); - - if (firmware_request_builtin(&fw, name)) { - cp->size = fw.size; - cp->data = (void *)fw.data; - return true; - } - - return false; -} - static int apply_microcode_early(struct ucode_cpu_info *uci) { struct microcode_intel *mc; @@ -360,6 +337,28 @@ static int apply_microcode_early(struct ucode_cpu_info *uci) return 0; } +static bool load_builtin_intel_microcode(struct cpio_data *cp) +{ + unsigned int eax = 1, ebx, ecx = 0, edx; + struct firmware fw; + char name[30]; + + if (IS_ENABLED(CONFIG_X86_32)) + return false; + + native_cpuid(&eax, &ebx, &ecx, &edx); + + sprintf(name, "intel-ucode/%02x-%02x-%02x", + x86_family(eax), x86_model(eax), x86_stepping(eax)); + + if (firmware_request_builtin(&fw, name)) { + cp->size = fw.size; + cp->data = (void *)fw.data; + return true; + } + return false; +} + int __init save_microcode_in_initrd_intel(void) { struct ucode_cpu_info uci; @@ -432,25 +431,16 @@ void load_ucode_intel_ap(void) apply_microcode_early(&uci); } -/* Accessor for microcode pointer */ -static struct microcode_intel *ucode_get_patch(void) -{ - return intel_ucode_patch; -} - void reload_ucode_intel(void) { - struct microcode_intel *p; struct ucode_cpu_info uci; intel_cpu_collect_info(&uci); - p = ucode_get_patch(); - if (!p) + uci.mc = intel_ucode_patch; + if (!uci.mc) return; - uci.mc = p; - apply_microcode_early(&uci); } @@ -488,8 +478,7 @@ static enum ucode_state apply_microcode_intel(int cpu) if (WARN_ON(raw_smp_processor_id() != cpu)) return UCODE_ERROR; - /* Look for a newer patch in our cache: */ - mc = ucode_get_patch(); + mc = intel_ucode_patch; if (!mc) { mc = uci->mc; if (!mc) @@ -680,18 +669,17 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device) } static struct microcode_ops microcode_intel_ops = { - .request_microcode_fw = request_microcode_fw, - .collect_cpu_info = collect_cpu_info, - .apply_microcode = apply_microcode_intel, + .request_microcode_fw = request_microcode_fw, + .collect_cpu_info = collect_cpu_info, + .apply_microcode = apply_microcode_intel, }; -static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c) +static __init void calc_llc_size_per_core(struct cpuinfo_x86 *c) { u64 llc_size = c->x86_cache_size * 1024ULL; do_div(llc_size, c->x86_max_cores); - - return (int)llc_size; + llc_size_per_core = (unsigned int)llc_size; } struct microcode_ops * __init init_intel_microcode(void) @@ -704,7 +692,7 @@ struct microcode_ops * __init init_intel_microcode(void) return NULL; } - llc_size_per_core = calc_llc_size_per_core(c); + calc_llc_size_per_core(c); return µcode_intel_ops; } -- Gitee From 4c931e469456e57394466338cd246010c367dead Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 22 Jan 2024 12:50:13 +0800 Subject: [PATCH 0067/2138] x86/microcode/intel: Simplify early loading ANBZ: #8003 commit dd5e3e3ca6ac011582a9f3f987493bf6741568c0 upstream. The early loading code is overly complicated: - It scans the builtin/initrd for microcode not only on the BSP, but also on all APs during early boot and then later in the boot process it scans again to duplicate and save the microcode before initrd goes away. That's a pointless exercise because this can be simply done before bringing up the APs when the memory allocator is up and running. - Saving the microcode from within the scan loop is completely non-obvious and a left over of the microcode cache. This can be done at the call site now which makes it obvious. Rework the code so that only the BSP scans the builtin/initrd microcode once during early boot and save it away in an early initcall for later use. [ bp: Test and fold in a fix from tglx ontop which handles the need to distinguish what save_microcode() does depending on when it is called: - when on the BSP during early load, it needs to find a newer revision than the one currently loaded on the BSP - later, before SMP init, it still runs on the BSP and gets the BSP revision just loaded and uses that revision to know which patch to save for the APs. For that it needs to find the exact one as on the BSP. ] Intel-SIG: commit dd5e3e3ca6ac x86/microcode/intel: Simplify early loading. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231017211722.629085215@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/core.c | 6 +- arch/x86/kernel/cpu/microcode/intel.c | 163 +++++++++++------------ arch/x86/kernel/cpu/microcode/internal.h | 3 +- 3 files changed, 79 insertions(+), 93 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 95bd2b43d720..f6e28d033f6e 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -46,7 +46,7 @@ static const struct microcode_ops *microcode_ops; #else static struct microcode_ops *microcode_ops; #endif -static bool dis_ucode_ldr = true; +bool dis_ucode_ldr = true; bool initrd_gone; @@ -203,10 +203,6 @@ static int __init save_microcode_in_initrd(void) } switch (c->x86_vendor) { - case X86_VENDOR_INTEL: - if (c->x86 >= 6) - ret = save_microcode_in_initrd_intel(); - break; case X86_VENDOR_AMD: if (c->x86 >= 0x10) ret = save_microcode_in_initrd_amd(cpuid_eax(1)); diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index d6ff6ebc624b..9b6614490113 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -32,8 +32,10 @@ static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin"; +#define UCODE_BSP_LOADED ((struct microcode_intel *)0x1UL) + /* Current microcode patch used in early patching on the APs. */ -static struct microcode_intel *intel_ucode_patch __read_mostly; +static struct microcode_intel *ucode_patch_va __read_mostly; /* last level cache size per core */ static unsigned int llc_size_per_core __ro_after_init; @@ -240,22 +242,30 @@ int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type) } EXPORT_SYMBOL_GPL(intel_microcode_sanity_check); -static void save_microcode_patch(void *data, unsigned int size) +static void update_ucode_pointer(struct microcode_intel *mc) { - struct microcode_header_intel *p; + kfree(ucode_patch_va); + + /* + * Save the virtual address for early loading and for eventual free + * on late loading. + */ + ucode_patch_va = mc; +} - p = kmemdup(data, size, GFP_KERNEL); - if (!p) - return; +static void save_microcode_patch(struct microcode_intel *patch) +{ + struct microcode_intel *mc; - kfree(intel_ucode_patch); - /* Save for early loading */ - intel_ucode_patch = (struct microcode_intel *)p; + mc = kmemdup(patch, get_totalsize(&patch->hdr), GFP_KERNEL); + if (mc) + update_ucode_pointer(mc); } -/* Scan CPIO for microcode matching the boot CPU's family, model, stepping */ -static struct microcode_intel *scan_microcode(void *data, size_t size, - struct ucode_cpu_info *uci, bool save) +/* Scan blob for microcode matching the boot CPUs family, model, stepping */ +static __init struct microcode_intel *scan_microcode(void *data, size_t size, + struct ucode_cpu_info *uci, + bool save) { struct microcode_header_intel *mc_header; struct microcode_intel *patch = NULL; @@ -273,35 +283,35 @@ static struct microcode_intel *scan_microcode(void *data, size_t size, if (!intel_find_matching_signature(data, uci->cpu_sig.sig, uci->cpu_sig.pf)) continue; - /* BSP scan: Check whether there is newer microcode */ - if (!save && cur_rev >= mc_header->rev) - continue; - - /* Save scan: Check whether there is newer or matching microcode */ - if (save && cur_rev != mc_header->rev) + /* + * For saving the early microcode, find the matching revision which + * was loaded on the BSP. + * + * On the BSP during early boot, find a newer revision than + * actually loaded in the CPU. + */ + if (save) { + if (cur_rev != mc_header->rev) + continue; + } else if (cur_rev >= mc_header->rev) { continue; + } patch = data; cur_rev = mc_header->rev; } - if (size) - return NULL; - - if (save && patch) - save_microcode_patch(patch, mc_size); - - return patch; + return size ? NULL : patch; } -static int apply_microcode_early(struct ucode_cpu_info *uci) +static enum ucode_state apply_microcode_early(struct ucode_cpu_info *uci) { struct microcode_intel *mc; u32 rev, old_rev, date; mc = uci->mc; if (!mc) - return 0; + return UCODE_NFOUND; /* * Save us the MSR write below - which is a particular expensive @@ -327,17 +337,17 @@ static int apply_microcode_early(struct ucode_cpu_info *uci) rev = intel_get_microcode_revision(); if (rev != mc->hdr.rev) - return -1; + return UCODE_ERROR; uci->cpu_sig.rev = rev; date = mc->hdr.date; pr_info_once("updated early: 0x%x -> 0x%x, date = %04x-%02x-%02x\n", old_rev, rev, date & 0xffff, date >> 24, (date >> 16) & 0xff); - return 0; + return UCODE_UPDATED; } -static bool load_builtin_intel_microcode(struct cpio_data *cp) +static __init bool load_builtin_intel_microcode(struct cpio_data *cp) { unsigned int eax = 1, ebx, ecx = 0, edx; struct firmware fw; @@ -359,89 +369,71 @@ static bool load_builtin_intel_microcode(struct cpio_data *cp) return false; } -int __init save_microcode_in_initrd_intel(void) +static __init struct microcode_intel *get_microcode_blob(struct ucode_cpu_info *uci, bool save) { - struct ucode_cpu_info uci; struct cpio_data cp; - /* - * initrd is going away, clear patch ptr. We will scan the microcode one - * last time before jettisoning and save a patch, if found. Then we will - * update that pointer too, with a stable patch address to use when - * resuming the cores. - */ - intel_ucode_patch = NULL; - if (!load_builtin_intel_microcode(&cp)) cp = find_microcode_in_initrd(ucode_path); if (!(cp.data && cp.size)) - return 0; + return NULL; - intel_cpu_collect_info(&uci); + intel_cpu_collect_info(uci); - scan_microcode(cp.data, cp.size, &uci, true); - return 0; + return scan_microcode(cp.data, cp.size, uci, save); } /* - * @res_patch, output: a pointer to the patch we found. + * Invoked from an early init call to save the microcode blob which was + * selected during early boot when mm was not usable. The microcode must be + * saved because initrd is going away. It's an early init call so the APs + * just can use the pointer and do not have to scan initrd/builtin firmware + * again. */ -static struct microcode_intel *__load_ucode_intel(struct ucode_cpu_info *uci) +static int __init save_builtin_microcode(void) { - struct cpio_data cp; - - /* try built-in microcode first */ - if (!load_builtin_intel_microcode(&cp)) - cp = find_microcode_in_initrd(ucode_path); + struct ucode_cpu_info uci; - if (!(cp.data && cp.size)) - return NULL; + if (xchg(&ucode_patch_va, NULL) != UCODE_BSP_LOADED) + return 0; - intel_cpu_collect_info(uci); + if (dis_ucode_ldr || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) + return 0; - return scan_microcode(cp.data, cp.size, uci, false); + uci.mc = get_microcode_blob(&uci, true); + if (uci.mc) + save_microcode_patch(uci.mc); + return 0; } +early_initcall(save_builtin_microcode); +/* Load microcode on BSP from initrd or builtin blobs */ void __init load_ucode_intel_bsp(void) { - struct microcode_intel *patch; struct ucode_cpu_info uci; - patch = __load_ucode_intel(&uci); - if (!patch) - return; - - uci.mc = patch; - - apply_microcode_early(&uci); + uci.mc = get_microcode_blob(&uci, false); + if (uci.mc && apply_microcode_early(&uci) == UCODE_UPDATED) + ucode_patch_va = UCODE_BSP_LOADED; } void load_ucode_intel_ap(void) { struct ucode_cpu_info uci; - if (!intel_ucode_patch) { - intel_ucode_patch = __load_ucode_intel(&uci); - if (!intel_ucode_patch) - return; - } - - uci.mc = intel_ucode_patch; - apply_microcode_early(&uci); + uci.mc = ucode_patch_va; + if (uci.mc) + apply_microcode_early(&uci); } +/* Reload microcode on resume */ void reload_ucode_intel(void) { - struct ucode_cpu_info uci; - - intel_cpu_collect_info(&uci); - - uci.mc = intel_ucode_patch; - if (!uci.mc) - return; + struct ucode_cpu_info uci = { .mc = ucode_patch_va, }; - apply_microcode_early(&uci); + if (uci.mc) + apply_microcode_early(&uci); } static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) @@ -478,7 +470,7 @@ static enum ucode_state apply_microcode_intel(int cpu) if (WARN_ON(raw_smp_processor_id() != cpu)) return UCODE_ERROR; - mc = intel_ucode_patch; + mc = ucode_patch_va; if (!mc) { mc = uci->mc; if (!mc) @@ -538,8 +530,8 @@ static enum ucode_state apply_microcode_intel(int cpu) static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - unsigned int curr_mc_size = 0, new_mc_size = 0; int cur_rev = uci->cpu_sig.rev; + unsigned int curr_mc_size = 0; u8 *new_mc = NULL, *mc = NULL; while (iov_iter_count(iter)) { @@ -589,7 +581,6 @@ static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter) vfree(new_mc); cur_rev = mc_header.rev; new_mc = mc; - new_mc_size = mc_size; mc = NULL; } @@ -603,11 +594,11 @@ static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter) if (!new_mc) return UCODE_NFOUND; - vfree(uci->mc); - uci->mc = (struct microcode_intel *)new_mc; - /* Save for CPU hotplug */ - save_microcode_patch(new_mc, new_mc_size); + save_microcode_patch((struct microcode_intel *)new_mc); + uci->mc = ucode_patch_va; + + vfree(new_mc); pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", cpu, cur_rev, uci->cpu_sig.rev); diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h index 12eb95557bdf..9428ffcd7d79 100644 --- a/arch/x86/kernel/cpu/microcode/internal.h +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -90,6 +90,7 @@ static inline unsigned int x86_cpuid_family(void) return x86_family(eax); } +extern bool dis_ucode_ldr; extern bool initrd_gone; #ifdef CONFIG_CPU_SUP_AMD @@ -119,13 +120,11 @@ static const inline struct microcode_ops *init_hygon_microcode(void) { return NU #ifdef CONFIG_CPU_SUP_INTEL void load_ucode_intel_bsp(void); void load_ucode_intel_ap(void); -int save_microcode_in_initrd_intel(void); void reload_ucode_intel(void); struct microcode_ops *init_intel_microcode(void); #else /* CONFIG_CPU_SUP_INTEL */ static inline void load_ucode_intel_bsp(void) { } static inline void load_ucode_intel_ap(void) { } -static inline int save_microcode_in_initrd_intel(void) { return -EINVAL; } static inline void reload_ucode_intel(void) { } static inline struct microcode_ops *init_intel_microcode(void) { return NULL; } #endif /* !CONFIG_CPU_SUP_INTEL */ -- Gitee From 4ac67ac9129ee09f57a593eb949ff0bbf5dffe35 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 13:59:44 +0200 Subject: [PATCH 0068/2138] x86/microcode/intel: Save the microcode only after a successful late-load ANBZ: #8003 commit 2a1dada3d1cf8f80a27663653a371d99dbf5d540 upstream. There are situations where the late microcode is loaded into memory but is not applied: 1) The rendezvous fails 2) The microcode is rejected by the CPUs If any of this happens then the pointer which was updated at firmware load time is stale and subsequent CPU hotplug operations either fail to update or create inconsistent microcode state. Save the loaded microcode in a separate pointer before the late load is attempted and when successful, update the hotplug pointer accordingly via a new microcode_ops callback. Remove the pointless fallback in the loader to a microcode pointer which is never populated. Intel-SIG: commit 2a1dada3d1cf x86/microcode/intel: Save the microcode only after a successful late-load. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115902.505491309@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/core.c | 4 ++++ arch/x86/kernel/cpu/microcode/intel.c | 30 ++++++++++++------------ arch/x86/kernel/cpu/microcode/internal.h | 1 + 3 files changed, 20 insertions(+), 15 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index f6e28d033f6e..a0751b0491b1 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -421,6 +421,10 @@ static int microcode_reload_late(void) store_cpu_caps(&prev_info); ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask); + + if (microcode_ops->finalize_late_load) + microcode_ops->finalize_late_load(ret); + if (!ret) { pr_info("Reload succeeded, microcode revision: 0x%x -> 0x%x\n", old, boot_cpu_data.microcode); diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 9b6614490113..076133b09cc7 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -36,6 +36,7 @@ static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin"; /* Current microcode patch used in early patching on the APs. */ static struct microcode_intel *ucode_patch_va __read_mostly; +static struct microcode_intel *ucode_patch_late __read_mostly; /* last level cache size per core */ static unsigned int llc_size_per_core __ro_after_init; @@ -470,12 +471,9 @@ static enum ucode_state apply_microcode_intel(int cpu) if (WARN_ON(raw_smp_processor_id() != cpu)) return UCODE_ERROR; - mc = ucode_patch_va; - if (!mc) { - mc = uci->mc; - if (!mc) - return UCODE_NFOUND; - } + mc = ucode_patch_late; + if (!mc) + return UCODE_NFOUND; /* * Save us the MSR write below - which is a particular expensive @@ -594,15 +592,7 @@ static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter) if (!new_mc) return UCODE_NFOUND; - /* Save for CPU hotplug */ - save_microcode_patch((struct microcode_intel *)new_mc); - uci->mc = ucode_patch_va; - - vfree(new_mc); - - pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", - cpu, cur_rev, uci->cpu_sig.rev); - + ucode_patch_late = (struct microcode_intel *)new_mc; return UCODE_NEW; } @@ -659,10 +649,20 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device) return ret; } +static void finalize_late_load(int result) +{ + if (!result) + save_microcode_patch(ucode_patch_late); + + vfree(ucode_patch_late); + ucode_patch_late = NULL; +} + static struct microcode_ops microcode_intel_ops = { .request_microcode_fw = request_microcode_fw, .collect_cpu_info = collect_cpu_info, .apply_microcode = apply_microcode_intel, + .finalize_late_load = finalize_late_load, }; static __init void calc_llc_size_per_core(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h index 9428ffcd7d79..86a249f69bef 100644 --- a/arch/x86/kernel/cpu/microcode/internal.h +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -31,6 +31,7 @@ struct microcode_ops { */ enum ucode_state (*apply_microcode)(int cpu); int (*collect_cpu_info)(int cpu, struct cpu_signature *csig); + void (*finalize_late_load)(int result); }; extern struct ucode_cpu_info ucode_cpu_info[]; -- Gitee From 7ad63b80647ee1aea6c4954be1bebb89262ae92e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 13:59:45 +0200 Subject: [PATCH 0069/2138] x86/microcode/intel: Switch to kvmalloc() ANBZ: #8003 commit f24f204405f9875bc539c6e88553fd5ac913c867 upstream. Microcode blobs are getting larger and might soon reach the kmalloc() limit. Switch over kvmalloc(). Intel-SIG: commit f24f204405f9 x86/microcode/intel: Switch to kvmalloc(). Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115902.564323243@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/intel.c | 48 ++++++++++++++------------- 1 file changed, 25 insertions(+), 23 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 076133b09cc7..dd2d3fde8d06 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include @@ -245,7 +244,7 @@ EXPORT_SYMBOL_GPL(intel_microcode_sanity_check); static void update_ucode_pointer(struct microcode_intel *mc) { - kfree(ucode_patch_va); + kvfree(ucode_patch_va); /* * Save the virtual address for early loading and for eventual free @@ -256,11 +255,14 @@ static void update_ucode_pointer(struct microcode_intel *mc) static void save_microcode_patch(struct microcode_intel *patch) { + unsigned int size = get_totalsize(&patch->hdr); struct microcode_intel *mc; - mc = kmemdup(patch, get_totalsize(&patch->hdr), GFP_KERNEL); + mc = kvmemdup(patch, size, GFP_KERNEL); if (mc) update_ucode_pointer(mc); + else + pr_err("Unable to allocate microcode memory size: %u\n", size); } /* Scan blob for microcode matching the boot CPUs family, model, stepping */ @@ -539,36 +541,34 @@ static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter) if (!copy_from_iter_full(&mc_header, sizeof(mc_header), iter)) { pr_err("error! Truncated or inaccessible header in microcode data file\n"); - break; + goto fail; } mc_size = get_totalsize(&mc_header); if (mc_size < sizeof(mc_header)) { pr_err("error! Bad data in microcode data file (totalsize too small)\n"); - break; + goto fail; } - data_size = mc_size - sizeof(mc_header); if (data_size > iov_iter_count(iter)) { pr_err("error! Bad data in microcode data file (truncated file?)\n"); - break; + goto fail; } /* For performance reasons, reuse mc area when possible */ if (!mc || mc_size > curr_mc_size) { - vfree(mc); - mc = vmalloc(mc_size); + kvfree(mc); + mc = kvmalloc(mc_size, GFP_KERNEL); if (!mc) - break; + goto fail; curr_mc_size = mc_size; } memcpy(mc, &mc_header, sizeof(mc_header)); data = mc + sizeof(mc_header); if (!copy_from_iter_full(data, data_size, iter) || - intel_microcode_sanity_check(mc, true, MC_HEADER_TYPE_MICROCODE) < 0) { - break; - } + intel_microcode_sanity_check(mc, true, MC_HEADER_TYPE_MICROCODE) < 0) + goto fail; if (cur_rev >= mc_header.rev) continue; @@ -576,24 +576,26 @@ static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter) if (!intel_find_matching_signature(mc, uci->cpu_sig.sig, uci->cpu_sig.pf)) continue; - vfree(new_mc); + kvfree(new_mc); cur_rev = mc_header.rev; new_mc = mc; mc = NULL; } - vfree(mc); - - if (iov_iter_count(iter)) { - vfree(new_mc); - return UCODE_ERROR; - } + if (iov_iter_count(iter)) + goto fail; + kvfree(mc); if (!new_mc) return UCODE_NFOUND; ucode_patch_late = (struct microcode_intel *)new_mc; return UCODE_NEW; + +fail: + kvfree(mc); + kvfree(new_mc); + return UCODE_ERROR; } static bool is_blacklisted(unsigned int cpu) @@ -652,9 +654,9 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device) static void finalize_late_load(int result) { if (!result) - save_microcode_patch(ucode_patch_late); - - vfree(ucode_patch_late); + update_ucode_pointer(ucode_patch_late); + else + kvfree(ucode_patch_late); ucode_patch_late = NULL; } -- Gitee From 9c67f6d0ef4655db7b034fc18445605e486d5534 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Oct 2023 23:23:44 +0200 Subject: [PATCH 0070/2138] x86/microcode/intel: Unify microcode apply() functions ANBZ: #8003 commit 3973718cff1e3a5d88ea78ec28ecca2afa60b30b upstream. Deduplicate the early and late apply() functions. [ bp: Rename the function which does the actual application to __apply_microcode() to differentiate it from microcode_ops.apply_microcode(). ] Intel-SIG: commit 3973718cff1e x86/microcode/intel: Unify microcode apply() functions. Microcode restructuring backport. Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20231017211722.795508212@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/intel.c | 104 +++++++++----------------- 1 file changed, 36 insertions(+), 68 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index dd2d3fde8d06..4235c95f17cf 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -307,12 +307,12 @@ static __init struct microcode_intel *scan_microcode(void *data, size_t size, return size ? NULL : patch; } -static enum ucode_state apply_microcode_early(struct ucode_cpu_info *uci) +static enum ucode_state __apply_microcode(struct ucode_cpu_info *uci, + struct microcode_intel *mc, + u32 *cur_rev) { - struct microcode_intel *mc; - u32 rev, old_rev, date; + u32 rev; - mc = uci->mc; if (!mc) return UCODE_NFOUND; @@ -321,14 +321,12 @@ static enum ucode_state apply_microcode_early(struct ucode_cpu_info *uci) * operation - when the other hyperthread has updated the microcode * already. */ - rev = intel_get_microcode_revision(); - if (rev >= mc->hdr.rev) { - uci->cpu_sig.rev = rev; + *cur_rev = intel_get_microcode_revision(); + if (*cur_rev >= mc->hdr.rev) { + uci->cpu_sig.rev = *cur_rev; return UCODE_OK; } - old_rev = rev; - /* * Writeback and invalidate caches before updating microcode to avoid * internal issues depending on what the microcode is updating. @@ -343,13 +341,24 @@ static enum ucode_state apply_microcode_early(struct ucode_cpu_info *uci) return UCODE_ERROR; uci->cpu_sig.rev = rev; - - date = mc->hdr.date; - pr_info_once("updated early: 0x%x -> 0x%x, date = %04x-%02x-%02x\n", - old_rev, rev, date & 0xffff, date >> 24, (date >> 16) & 0xff); return UCODE_UPDATED; } +static enum ucode_state apply_microcode_early(struct ucode_cpu_info *uci) +{ + struct microcode_intel *mc = uci->mc; + enum ucode_state ret; + u32 cur_rev, date; + + ret = __apply_microcode(uci, mc, &cur_rev); + if (ret == UCODE_UPDATED) { + date = mc->hdr.date; + pr_info_once("updated early: 0x%x -> 0x%x, date = %04x-%02x-%02x\n", + cur_rev, mc->hdr.rev, date & 0xffff, date >> 24, (date >> 16) & 0xff); + } + return ret; +} + static __init bool load_builtin_intel_microcode(struct cpio_data *cp) { unsigned int eax = 1, ebx, ecx = 0, edx; @@ -459,70 +468,29 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) return 0; } -static enum ucode_state apply_microcode_intel(int cpu) +static enum ucode_state apply_microcode_late(int cpu) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - struct cpuinfo_x86 *c = &cpu_data(cpu); - bool bsp = c->cpu_index == boot_cpu_data.cpu_index; - struct microcode_intel *mc; + struct microcode_intel *mc = ucode_patch_late; enum ucode_state ret; - static int prev_rev; - u32 rev; + u32 cur_rev; - /* We should bind the task to the CPU */ - if (WARN_ON(raw_smp_processor_id() != cpu)) + if (WARN_ON_ONCE(smp_processor_id() != cpu)) return UCODE_ERROR; - mc = ucode_patch_late; - if (!mc) - return UCODE_NFOUND; + ret = __apply_microcode(uci, mc, &cur_rev); + if (ret != UCODE_UPDATED && ret != UCODE_OK) + return ret; - /* - * Save us the MSR write below - which is a particular expensive - * operation - when the other hyperthread has updated the microcode - * already. - */ - rev = intel_get_microcode_revision(); - if (rev >= mc->hdr.rev) { - ret = UCODE_OK; - goto out; - } - - /* - * Writeback and invalidate caches before updating microcode to avoid - * internal issues depending on what the microcode is updating. - */ - native_wbinvd(); - - /* write microcode via MSR 0x79 */ - wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); - - rev = intel_get_microcode_revision(); - - if (rev != mc->hdr.rev) { - pr_err("CPU%d update to revision 0x%x failed\n", - cpu, mc->hdr.rev); - return UCODE_ERROR; - } - - if (bsp && rev != prev_rev) { - pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n", - rev, - mc->hdr.date & 0xffff, - mc->hdr.date >> 24, + if (!cpu && uci->cpu_sig.rev != cur_rev) { + pr_info("Updated to revision 0x%x, date = %04x-%02x-%02x\n", + uci->cpu_sig.rev, mc->hdr.date & 0xffff, mc->hdr.date >> 24, (mc->hdr.date >> 16) & 0xff); - prev_rev = rev; } - ret = UCODE_UPDATED; - -out: - uci->cpu_sig.rev = rev; - c->microcode = rev; - - /* Update boot_cpu_data's revision too, if we're on the BSP: */ - if (bsp) - boot_cpu_data.microcode = rev; + cpu_data(cpu).microcode = uci->cpu_sig.rev; + if (!cpu) + boot_cpu_data.microcode = uci->cpu_sig.rev; return ret; } @@ -663,7 +631,7 @@ static void finalize_late_load(int result) static struct microcode_ops microcode_intel_ops = { .request_microcode_fw = request_microcode_fw, .collect_cpu_info = collect_cpu_info, - .apply_microcode = apply_microcode_intel, + .apply_microcode = apply_microcode_late, .finalize_late_load = finalize_late_load, }; -- Gitee From abd11759edc682a574bdfc57f57d34dad0cf2d5f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Oct 2023 23:23:45 +0200 Subject: [PATCH 0071/2138] x86/microcode/intel: Rework intel_cpu_collect_info() ANBZ: #8003 commit 164aa1ca537238c46923ccacd8995b4265aee47b upstream. Nothing needs struct ucode_cpu_info. Make it take struct cpu_signature, let it return a boolean and simplify the implementation. Rename it now that the silly name clash with collect_cpu_info() is gone. Intel-SIG: commit 164aa1ca5372 x86/microcode/intel: Rework intel_cpu_collect_info(). Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231017211722.851573238@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/include/asm/cpu.h | 4 ++-- arch/x86/kernel/cpu/microcode/intel.c | 33 ++++++++------------------- drivers/platform/x86/intel/ifs/load.c | 8 +++---- 3 files changed, 14 insertions(+), 31 deletions(-) diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index 25050d953eee..068a07ed6165 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h @@ -71,9 +71,9 @@ static inline void init_ia32_feat_ctl(struct cpuinfo_x86 *c) {} extern __noendbr void cet_disable(void); -struct ucode_cpu_info; +struct cpu_signature; -int intel_cpu_collect_info(struct ucode_cpu_info *uci); +void intel_collect_cpu_info(struct cpu_signature *sig); static inline bool intel_cpu_signatures_match(unsigned int s1, unsigned int p1, unsigned int s2, unsigned int p2) diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 4235c95f17cf..5aa7f5efc440 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -68,36 +68,21 @@ static inline unsigned int exttable_size(struct extended_sigtable *et) return et->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE; } -int intel_cpu_collect_info(struct ucode_cpu_info *uci) +void intel_collect_cpu_info(struct cpu_signature *sig) { - unsigned int val[2]; - unsigned int family, model; - struct cpu_signature csig = { 0 }; - unsigned int eax, ebx, ecx, edx; - - memset(uci, 0, sizeof(*uci)); - - eax = 0x00000001; - ecx = 0; - native_cpuid(&eax, &ebx, &ecx, &edx); - csig.sig = eax; + sig->sig = cpuid_eax(1); + sig->pf = 0; + sig->rev = intel_get_microcode_revision(); - family = x86_family(eax); - model = x86_model(eax); + if (x86_model(sig->sig) >= 5 || x86_family(sig->sig) > 6) { + unsigned int val[2]; - if (model >= 5 || family > 6) { /* get processor flags from MSR 0x17 */ native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); - csig.pf = 1 << ((val[1] >> 18) & 7); + sig->pf = 1 << ((val[1] >> 18) & 7); } - - csig.rev = intel_get_microcode_revision(); - - uci->cpu_sig = csig; - - return 0; } -EXPORT_SYMBOL_GPL(intel_cpu_collect_info); +EXPORT_SYMBOL_GPL(intel_collect_cpu_info); /* * Returns 1 if update has been found, 0 otherwise. @@ -391,7 +376,7 @@ static __init struct microcode_intel *get_microcode_blob(struct ucode_cpu_info * if (!(cp.data && cp.size)) return NULL; - intel_cpu_collect_info(uci); + intel_collect_cpu_info(&uci->cpu_sig); return scan_microcode(cp.data, cp.size, uci, save); } diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c index cf156c4a8024..d97c129e97ff 100644 --- a/drivers/platform/x86/intel/ifs/load.c +++ b/drivers/platform/x86/intel/ifs/load.c @@ -349,7 +349,7 @@ static int scan_chunks_sanity_check(struct device *dev) static int image_sanity_check(struct device *dev, const struct microcode_header_intel *data) { - struct ucode_cpu_info uci; + struct cpu_signature sig; /* Provide a specific error message when loading an older/unsupported image */ if (data->hdrver != MC_HEADER_TYPE_IFS) { @@ -362,11 +362,9 @@ static int image_sanity_check(struct device *dev, const struct microcode_header_ return -EINVAL; } - intel_cpu_collect_info(&uci); + intel_collect_cpu_info(&sig); - if (!intel_find_matching_signature((void *)data, - uci.cpu_sig.sig, - uci.cpu_sig.pf)) { + if (!intel_find_matching_signature((void *)data, sig.sig, sig.pf)) { dev_err(dev, "cpu signature, processor flags not matching\n"); return -EINVAL; } -- Gitee From ee3a6d53138f0aa401f89d6e99a985b5d8adc744 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 13:59:49 +0200 Subject: [PATCH 0072/2138] x86/microcode/intel: Reuse intel_cpu_collect_info() ANBZ: #8003 commit 11f96ac4c21e701650c7d8349b252973185ac6ce upstream. No point for an almost duplicate function. Intel-SIG: commit 11f96ac4c21e x86/microcode/intel: Reuse intel_cpu_collect_info(). Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115902.741173606@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/intel.c | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 5aa7f5efc440..47a96c0ef65f 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -435,21 +435,7 @@ void reload_ucode_intel(void) static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) { - struct cpuinfo_x86 *c = &cpu_data(cpu_num); - unsigned int val[2]; - - memset(csig, 0, sizeof(*csig)); - - csig->sig = cpuid_eax(0x00000001); - - if ((c->x86_model >= 5) || (c->x86 > 6)) { - /* get processor flags from MSR 0x17 */ - rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); - csig->pf = 1 << ((val[1] >> 18) & 7); - } - - csig->rev = c->microcode; - + intel_collect_cpu_info(csig); return 0; } -- Gitee From b3d368b3cd354c48fc91c52aea90db5816dba7bb Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 13:59:50 +0200 Subject: [PATCH 0073/2138] x86/microcode/intel: Rework intel_find_matching_signature() ANBZ: #8003 commit b7fcd995b261c9976e05f47554529c98a0f1cbb0 upstream. Take a cpu_signature argument and work from there. Move the match() helper next to the callsite as there is no point for having it in a header. Intel-SIG: commit b7fcd995b261 x86/microcode/intel: Rework intel_find_matching_signature(). Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115902.797820205@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/include/asm/cpu.h | 16 +------------- arch/x86/kernel/cpu/microcode/intel.c | 31 ++++++++++++++++----------- drivers/platform/x86/intel/ifs/load.c | 2 +- 3 files changed, 21 insertions(+), 28 deletions(-) diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index 068a07ed6165..fecc4fe1d68a 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h @@ -75,22 +75,8 @@ struct cpu_signature; void intel_collect_cpu_info(struct cpu_signature *sig); -static inline bool intel_cpu_signatures_match(unsigned int s1, unsigned int p1, - unsigned int s2, unsigned int p2) -{ - if (s1 != s2) - return false; - - /* Processor flags are either both 0 ... */ - if (!p1 && !p2) - return true; - - /* ... or they intersect. */ - return p1 & p2; -} - extern u64 x86_read_arch_cap_msr(void); -int intel_find_matching_signature(void *mc, unsigned int csig, int cpf); +bool intel_find_matching_signature(void *mc, struct cpu_signature *sig); int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type); extern struct cpumask cpus_stop_mask; diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 47a96c0ef65f..e5c5ddfd6831 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -84,29 +84,36 @@ void intel_collect_cpu_info(struct cpu_signature *sig) } EXPORT_SYMBOL_GPL(intel_collect_cpu_info); -/* - * Returns 1 if update has been found, 0 otherwise. - */ -int intel_find_matching_signature(void *mc, unsigned int csig, int cpf) +static inline bool cpu_signatures_match(struct cpu_signature *s1, unsigned int sig2, + unsigned int pf2) +{ + if (s1->sig != sig2) + return false; + + /* Processor flags are either both 0 or they intersect. */ + return ((!s1->pf && !pf2) || (s1->pf & pf2)); +} + +bool intel_find_matching_signature(void *mc, struct cpu_signature *sig) { struct microcode_header_intel *mc_hdr = mc; - struct extended_sigtable *ext_hdr; struct extended_signature *ext_sig; + struct extended_sigtable *ext_hdr; int i; - if (intel_cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf)) - return 1; + if (cpu_signatures_match(sig, mc_hdr->sig, mc_hdr->pf)) + return true; /* Look for ext. headers: */ if (get_totalsize(mc_hdr) <= intel_microcode_get_datasize(mc_hdr) + MC_HEADER_SIZE) - return 0; + return false; ext_hdr = mc + intel_microcode_get_datasize(mc_hdr) + MC_HEADER_SIZE; ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE; for (i = 0; i < ext_hdr->count; i++) { - if (intel_cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf)) - return 1; + if (cpu_signatures_match(sig, ext_sig->sig, ext_sig->pf)) + return true; ext_sig++; } return 0; @@ -268,7 +275,7 @@ static __init struct microcode_intel *scan_microcode(void *data, size_t size, intel_microcode_sanity_check(data, false, MC_HEADER_TYPE_MICROCODE) < 0) break; - if (!intel_find_matching_signature(data, uci->cpu_sig.sig, uci->cpu_sig.pf)) + if (!intel_find_matching_signature(data, &uci->cpu_sig)) continue; /* @@ -512,7 +519,7 @@ static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter) if (cur_rev >= mc_header.rev) continue; - if (!intel_find_matching_signature(mc, uci->cpu_sig.sig, uci->cpu_sig.pf)) + if (!intel_find_matching_signature(mc, &uci->cpu_sig)) continue; kvfree(new_mc); diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c index d97c129e97ff..2cf3b4a8813f 100644 --- a/drivers/platform/x86/intel/ifs/load.c +++ b/drivers/platform/x86/intel/ifs/load.c @@ -364,7 +364,7 @@ static int image_sanity_check(struct device *dev, const struct microcode_header_ intel_collect_cpu_info(&sig); - if (!intel_find_matching_signature((void *)data, sig.sig, sig.pf)) { + if (!intel_find_matching_signature((void *)data, &sig)) { dev_err(dev, "cpu signature, processor flags not matching\n"); return -EINVAL; } -- Gitee From b138fbb929bb718ff80cd848fd8c6318a596bdbb Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Oct 2023 23:23:49 +0200 Subject: [PATCH 0074/2138] x86/microcode: Remove pointless apply() invocation ANBZ: #8003 commit b48b26f992a3828b4ae274669f99ce68451d4904 upstream. Microcode is applied on the APs during early bringup. There is no point in trying to apply the microcode again during the hotplug operations and neither at the point where the microcode device is initialized. Collect CPU info and microcode revision in setup_online_cpu() for now. This will move to the CPU hotplug callback later. [ bp: Leave the starting notifier for the following scenario: - boot, late load, suspend to disk, resume without the starting notifier, only the last core manages to update the microcode upon resume: # rdmsr -a 0x8b 10000bf 10000bf 10000bf 10000bf 10000bf 10000dc <---- This is on an AMD F10h machine. For the future, one should check whether potential unification of the CPU init path could cover the resume path too so that this can be simplified even more. tglx: This is caused by the odd handling of APs which try to find the microcode blob in builtin or initrd instead of caching the microcode blob during early init before the APs are brought up. Will be cleaned up in a later step. ] Intel-SIG: commit b48b26f992a3 x86/microcode: Remove pointless apply() invocation. Microcode restructuring backport. Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20231017211723.018821624@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/core.c | 23 ++++++----------------- 1 file changed, 6 insertions(+), 17 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index a0751b0491b1..b7c0b462919a 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -511,17 +511,6 @@ static void microcode_fini_cpu(int cpu) microcode_ops->microcode_fini_cpu(cpu); } -static enum ucode_state microcode_init_cpu(int cpu) -{ - struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - - memset(uci, 0, sizeof(*uci)); - - microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig); - - return microcode_ops->apply_microcode(cpu); -} - /** * microcode_bsp_resume - Update boot CPU microcode during resume. */ @@ -576,14 +565,14 @@ static int mc_cpu_down_prep(unsigned int cpu) static void setup_online_cpu(struct work_struct *work) { int cpu = smp_processor_id(); - enum ucode_state err; + struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - err = microcode_init_cpu(cpu); - if (err == UCODE_ERROR) { - pr_err("Error applying microcode on CPU%d\n", cpu); - return; - } + memset(uci, 0, sizeof(*uci)); + microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig); + cpu_data(cpu).microcode = uci->cpu_sig.rev; + if (!cpu) + boot_cpu_data.microcode = uci->cpu_sig.rev; mc_cpu_online(cpu); } -- Gitee From 2835af1c66e23942e94b63a7fda5ac9a753b28a9 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 10 Oct 2023 17:08:41 +0200 Subject: [PATCH 0075/2138] x86/microcode/amd: Use correct per CPU ucode_cpu_info ANBZ: #8003 commit ecfd41089348fa4cc767dc588367e9fdf8cb6b9d upstream. find_blobs_in_containers() is invoked on every CPU but overwrites unconditionally ucode_cpu_info of CPU0. Fix this by using the proper CPU data and move the assignment into the call site apply_ucode_from_containers() so that the function can be reused. Intel-SIG: commit ecfd41089348 x86/microcode/amd: Use correct per CPU ucode_cpu_info. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231010150702.433454320@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/amd.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 93156848df12..2bc1a85ddd18 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -506,9 +506,6 @@ static void find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio_data if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax))) cp = find_microcode_in_initrd(ucode_path); - /* Needed in load_microcode_amd() */ - ucode_cpu_info->cpu_sig.sig = cpuid_1_eax; - *ret = cp; } @@ -516,6 +513,9 @@ static void apply_ucode_from_containers(unsigned int cpuid_1_eax) { struct cpio_data cp = { }; + /* Needed in load_microcode_amd() */ + ucode_cpu_info[smp_processor_id()].cpu_sig.sig = cpuid_1_eax; + find_blobs_in_containers(cpuid_1_eax, &cp); if (!(cp.data && cp.size)) return; -- Gitee From 0b428302e38316fc14647e8dff367093d8773b79 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 22 Jan 2024 13:11:31 +0800 Subject: [PATCH 0076/2138] x86/microcode/amd: Cache builtin microcode too ANBZ: #8003 commit d419d28261e72e1c9ec418711b3da41df2265139 upstream. save_microcode_in_initrd_amd() fails to cache builtin microcode and only scans initrd. Use find_blobs_in_containers() instead which covers both. Intel-SIG: commit d419d28261e7 x86/microcode/amd: Cache builtin microcode too. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231010150702.495139089@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/amd.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 2bc1a85ddd18..7253641e4184 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -535,14 +535,8 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) struct cont_desc desc = { 0 }; enum ucode_state ret; struct cpio_data cp; - const char *path; - if (x86_cpuid_vendor() == X86_VENDOR_HYGON) - path = "kernel/x86/microcode/HygonGenuine.bin"; - else - path = ucode_path; - - cp = find_microcode_in_initrd(path); + find_blobs_in_containers(cpuid_1_eax, &cp); if (!(cp.data && cp.size)) return -EINVAL; -- Gitee From f2270854382a46ed31247b71675064fbd82aa428 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 22 Jan 2024 16:15:42 +0800 Subject: [PATCH 0077/2138] x86/microcode/amd: Cache builtin/initrd microcode early ANBZ: #8003 commit a7939f01672034a58ad3fdbce69bb6c665ce0024 upstream. There is no reason to scan builtin/initrd microcode on each AP. Cache the builtin/initrd microcode in an early initcall so that the early AP loader can utilize the cache. The existing fs initcall which invoked save_microcode_in_initrd_amd() is still required to maintain the initrd_gone flag. Rename it accordingly. This will be removed once the AP loader code is converted to use the cache. Intel-SIG: commit a7939f016720 x86/microcode/amd: Cache builtin/initrd microcode early. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231017211723.187566507@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/amd.c | 9 ++++++++- arch/x86/kernel/cpu/microcode/core.c | 28 ++++------------------------ 2 files changed, 12 insertions(+), 25 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 7253641e4184..81dd20652dee 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -530,12 +530,18 @@ void load_ucode_amd_early(unsigned int cpuid_1_eax) static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size); -int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) +static int __init save_microcode_in_initrd(void) { + unsigned int cpuid_1_eax = native_cpuid_eax(1); + struct cpuinfo_x86 *c = &boot_cpu_data; struct cont_desc desc = { 0 }; enum ucode_state ret; struct cpio_data cp; + if (dis_ucode_ldr || ((c->x86_vendor != X86_VENDOR_AMD || + c->x86 < 0x10) && (c->x86_vendor != X86_VENDOR_HYGON))) + return 0; + find_blobs_in_containers(cpuid_1_eax, &cp); if (!(cp.data && cp.size)) return -EINVAL; @@ -552,6 +558,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) return 0; } +early_initcall(save_microcode_in_initrd); /* * a small, trivial cache of per-family ucode patches diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index b7c0b462919a..b0175ced6f1e 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -192,33 +192,14 @@ void load_ucode_ap(void) } } -static int __init save_microcode_in_initrd(void) +/* Temporary workaround until find_microcode_in_initrd() is __init */ +static int __init mark_initrd_gone(void) { - struct cpuinfo_x86 *c = &boot_cpu_data; - int ret = -EINVAL; - - if (dis_ucode_ldr) { - ret = 0; - goto out; - } - - switch (c->x86_vendor) { - case X86_VENDOR_AMD: - if (c->x86 >= 0x10) - ret = save_microcode_in_initrd_amd(cpuid_eax(1)); - break; - case X86_VENDOR_HYGON: - ret = save_microcode_in_initrd_amd(cpuid_eax(1)); - break; - default: - break; - } - -out: initrd_gone = true; - return ret; + return 0; } +fs_initcall(mark_initrd_gone); struct cpio_data find_microcode_in_initrd(const char *path) { @@ -641,5 +622,4 @@ static int __init microcode_init(void) return error; } -fs_initcall(save_microcode_in_initrd); late_initcall(microcode_init); -- Gitee From f47dd865a19df8d05e9108caa5140952ea33c670 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 22 Jan 2024 13:39:30 +0800 Subject: [PATCH 0078/2138] x86/microcode/amd: Use cached microcode for AP load ANBZ: #8003 commit 5af05b8d51a8e3ff5905663655c0f46d1aaae44a upstream. Now that the microcode cache is initialized before the APs are brought up, there is no point in scanning builtin/initrd microcode during AP loading. Convert the AP loader to utilize the cache, which in turn makes the CPU hotplug callback which applies the microcode after initrd/builtin is gone, obsolete as the early loading during late hotplug operations including the resume path depends now only on the cache. Intel-SIG: commit 5af05b8d51a8 x86/microcode/amd: Use cached microcode for AP load. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231017211723.243426023@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/amd.c | 20 +++++++++++--------- arch/x86/kernel/cpu/microcode/core.c | 17 +++-------------- arch/x86/kernel/cpu/microcode/internal.h | 2 -- 3 files changed, 14 insertions(+), 25 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 81dd20652dee..cd157a8d43eb 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -499,7 +499,7 @@ static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) return false; } -static void find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio_data *ret) +static void __init find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio_data *ret) { struct cpio_data cp; @@ -509,12 +509,12 @@ static void find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio_data *ret = cp; } -static void apply_ucode_from_containers(unsigned int cpuid_1_eax) +void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax) { struct cpio_data cp = { }; /* Needed in load_microcode_amd() */ - ucode_cpu_info[smp_processor_id()].cpu_sig.sig = cpuid_1_eax; + ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax; find_blobs_in_containers(cpuid_1_eax, &cp); if (!(cp.data && cp.size)) @@ -523,11 +523,6 @@ static void apply_ucode_from_containers(unsigned int cpuid_1_eax) early_apply_microcode(cpuid_1_eax, cp.data, cp.size); } -void load_ucode_amd_early(unsigned int cpuid_1_eax) -{ - return apply_ucode_from_containers(cpuid_1_eax); -} - static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size); static int __init save_microcode_in_initrd(void) @@ -612,7 +607,6 @@ static struct ucode_patch *find_patch(unsigned int cpu) struct ucode_cpu_info *uci = ucode_cpu_info + cpu; u16 equiv_id; - equiv_id = find_equiv_id(&equiv_table, uci->cpu_sig.sig); if (!equiv_id) return NULL; @@ -714,6 +708,14 @@ static enum ucode_state apply_microcode_amd(int cpu) return ret; } +void load_ucode_amd_ap(unsigned int cpuid_1_eax) +{ + unsigned int cpu = smp_processor_id(); + + ucode_cpu_info[cpu].cpu_sig.sig = cpuid_1_eax; + apply_microcode_amd(cpu); +} + static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size) { u32 equiv_tbl_len; diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index b0175ced6f1e..2b6e93cb9b73 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -163,7 +163,7 @@ void __init load_ucode_bsp(void) if (intel) load_ucode_intel_bsp(); else - load_ucode_amd_early(cpuid_1_eax); + load_ucode_amd_bsp(cpuid_1_eax); } void load_ucode_ap(void) @@ -182,10 +182,10 @@ void load_ucode_ap(void) break; case X86_VENDOR_AMD: if (x86_family(cpuid_1_eax) >= 0x10) - load_ucode_amd_early(cpuid_1_eax); + load_ucode_amd_ap(cpuid_1_eax); break; case X86_VENDOR_HYGON: - load_ucode_amd_early(cpuid_1_eax); + load_ucode_amd_ap(cpuid_1_eax); break; default: break; @@ -510,15 +510,6 @@ static struct syscore_ops mc_syscore_ops = { .resume = microcode_bsp_resume, }; -static int mc_cpu_starting(unsigned int cpu) -{ - enum ucode_state err = microcode_ops->apply_microcode(cpu); - - pr_debug("%s: CPU%d, err: %d\n", __func__, cpu, err); - - return err == UCODE_ERROR; -} - static int mc_cpu_online(unsigned int cpu) { struct device *dev = get_cpu_device(cpu); @@ -608,8 +599,6 @@ static int __init microcode_init(void) schedule_on_each_cpu(setup_online_cpu); register_syscore_ops(&mc_syscore_ops); - cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:starting", - mc_cpu_starting, NULL); cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online", mc_cpu_online, mc_cpu_down_prep); diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h index 86a249f69bef..1a3a26ea5a3e 100644 --- a/arch/x86/kernel/cpu/microcode/internal.h +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -97,7 +97,6 @@ extern bool initrd_gone; #ifdef CONFIG_CPU_SUP_AMD void load_ucode_amd_bsp(unsigned int family); void load_ucode_amd_ap(unsigned int family); -void load_ucode_amd_early(unsigned int cpuid_1_eax); int save_microcode_in_initrd_amd(unsigned int family); void reload_ucode_amd(unsigned int cpu); struct microcode_ops *init_amd_microcode(void); @@ -105,7 +104,6 @@ void exit_amd_microcode(void); #else /* CONFIG_CPU_SUP_AMD */ static inline void load_ucode_amd_bsp(unsigned int family) { } static inline void load_ucode_amd_ap(unsigned int family) { } -static inline void load_ucode_amd_early(unsigned int family) { } static inline int save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; } static inline void reload_ucode_amd(unsigned int cpu) { } static inline struct microcode_ops *init_amd_microcode(void) { return NULL; } -- Gitee From 50eda43cb2510b2375300da57785c3b6def33e57 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 22 Jan 2024 13:23:57 +0800 Subject: [PATCH 0079/2138] x86/microcode: Mop up early loading leftovers ANBZ: #8003 commit 8529e8ab6c6fab8ebf06ead98e77d7646b42fc48 upstream. Get rid of the initrd_gone hack which was required to keep find_microcode_in_initrd() functional after init. As find_microcode_in_initrd() is now only used during init, mark it accordingly. Intel-SIG: commit 8529e8ab6c6f x86/microcode: Mop up early loading leftovers. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231017211723.298854846@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/core.c | 18 +----------------- arch/x86/kernel/cpu/microcode/internal.h | 1 - 2 files changed, 1 insertion(+), 18 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 2b6e93cb9b73..10e822e88c0e 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -48,8 +48,6 @@ static struct microcode_ops *microcode_ops; #endif bool dis_ucode_ldr = true; -bool initrd_gone; - /* * Synchronization. * @@ -192,16 +190,7 @@ void load_ucode_ap(void) } } -/* Temporary workaround until find_microcode_in_initrd() is __init */ -static int __init mark_initrd_gone(void) -{ - initrd_gone = true; - - return 0; -} -fs_initcall(mark_initrd_gone); - -struct cpio_data find_microcode_in_initrd(const char *path) +struct cpio_data __init find_microcode_in_initrd(const char *path) { #ifdef CONFIG_BLK_DEV_INITRD unsigned long start = 0; @@ -229,12 +218,7 @@ struct cpio_data find_microcode_in_initrd(const char *path) * has the virtual address of the beginning of the initrd. It also * possibly relocates the ramdisk. In either case, initrd_start contains * the updated address so use that instead. - * - * initrd_gone is for the hotplug case where we've thrown out initrd - * already. */ - if (initrd_gone) - return (struct cpio_data){ NULL, 0, "" }; if (initrd_start) start = initrd_start; diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h index 1a3a26ea5a3e..6ed522d5b942 100644 --- a/arch/x86/kernel/cpu/microcode/internal.h +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -92,7 +92,6 @@ static inline unsigned int x86_cpuid_family(void) } extern bool dis_ucode_ldr; -extern bool initrd_gone; #ifdef CONFIG_CPU_SUP_AMD void load_ucode_amd_bsp(unsigned int family); -- Gitee From e5849d90178cc1d469914f786759a6f0b65948db Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Oct 2023 23:23:58 +0200 Subject: [PATCH 0080/2138] x86/microcode: Get rid of the schedule work indirection ANBZ: #8003 commit 2e1997335ceb6fc819862804f51d4fe83593c138 upstream. Scheduling work on all CPUs to collect the microcode information is just another extra step for no value. Let the CPU hotplug callback registration do it. Intel-SIG: commit 2e1997335ceb x86/microcode: Get rid of the schedule work indirection. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231017211723.354748138@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/core.c | 29 ++++++++++------------------ 1 file changed, 10 insertions(+), 19 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 10e822e88c0e..17372282bb92 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -496,8 +496,16 @@ static struct syscore_ops mc_syscore_ops = { static int mc_cpu_online(unsigned int cpu) { + struct ucode_cpu_info *uci = ucode_cpu_info + cpu; struct device *dev = get_cpu_device(cpu); + memset(uci, 0, sizeof(*uci)); + + microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig); + cpu_data(cpu).microcode = uci->cpu_sig.rev; + if (!cpu) + boot_cpu_data.microcode = uci->cpu_sig.rev; + if (sysfs_create_group(&dev->kobj, &mc_attr_group)) pr_err("Failed to create group for CPU%d\n", cpu); return 0; @@ -518,20 +526,6 @@ static int mc_cpu_down_prep(unsigned int cpu) return 0; } -static void setup_online_cpu(struct work_struct *work) -{ - int cpu = smp_processor_id(); - struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - - memset(uci, 0, sizeof(*uci)); - - microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig); - cpu_data(cpu).microcode = uci->cpu_sig.rev; - if (!cpu) - boot_cpu_data.microcode = uci->cpu_sig.rev; - mc_cpu_online(cpu); -} - static struct attribute *cpu_root_microcode_attrs[] = { #ifdef CONFIG_MICROCODE_LATE_LOADING &dev_attr_reload.attr, @@ -579,12 +573,9 @@ static int __init microcode_init(void) } } - /* Do per-CPU setup */ - schedule_on_each_cpu(setup_online_cpu); - register_syscore_ops(&mc_syscore_ops); - cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online", - mc_cpu_online, mc_cpu_down_prep); + cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/microcode:online", + mc_cpu_online, mc_cpu_down_prep); pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION); -- Gitee From 133e336ee9f6b2880a3b3d35925272cbd64c5560 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 13:59:55 +0200 Subject: [PATCH 0081/2138] x86/microcode: Clean up mc_cpu_down_prep() ANBZ: #8003 commit ba48aa32388ac652256baa8d0a6092d350160da0 upstream. This function has nothing to do with suspend. It's a hotplug callback. Remove the bogus comment. Drop the pointless debug printk. The hotplug core provides tracepoints which track the invocation of those callbacks. Intel-SIG: commit ba48aa32388a x86/microcode: Clean up mc_cpu_down_prep(). Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115903.028651784@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/core.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 17372282bb92..3b4987510650 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -513,16 +513,10 @@ static int mc_cpu_online(unsigned int cpu) static int mc_cpu_down_prep(unsigned int cpu) { - struct device *dev; - - dev = get_cpu_device(cpu); + struct device *dev = get_cpu_device(cpu); microcode_fini_cpu(cpu); - - /* Suspend is in progress, only remove the interface */ sysfs_remove_group(&dev->kobj, &mc_attr_group); - pr_debug("%s: CPU%d\n", __func__, cpu); - return 0; } -- Gitee From 8a7cf0bd398e3641249bd5bad1fcb84cfc7c0b5e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 13:59:56 +0200 Subject: [PATCH 0082/2138] x86/microcode: Handle "nosmt" correctly ANBZ: #8003 commit 634ac23ad609b3ddd9e0e478bd5afbf49d3a2556 upstream. On CPUs where microcode loading is not NMI-safe the SMT siblings which are parked in one of the play_dead() variants still react to NMIs. So if an NMI hits while the primary thread updates the microcode the resulting behaviour is undefined. The default play_dead() implementation on modern CPUs is using MWAIT which is not guaranteed to be safe against a microcode update which affects MWAIT. Take the cpus_booted_once_mask into account to detect this case and refuse to load late if the vendor specific driver does not advertise that late loading is NMI safe. AMD stated that this is safe, so mark the AMD driver accordingly. This requirement will be partially lifted in later changes. Intel-SIG: commit 634ac23ad609 x86/microcode: Handle "nosmt" correctly. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115903.087472735@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/Kconfig | 2 +- arch/x86/kernel/cpu/microcode/amd.c | 9 +++-- arch/x86/kernel/cpu/microcode/core.c | 51 +++++++++++++++--------- arch/x86/kernel/cpu/microcode/internal.h | 13 +++--- 4 files changed, 44 insertions(+), 31 deletions(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 3ef3238c6e95..59aa22a1e062 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1321,7 +1321,7 @@ config MICROCODE_INITRD32 config MICROCODE_LATE_LOADING bool "Late microcode loading (DANGEROUS)" default n - depends on MICROCODE + depends on MICROCODE && SMP help Loading microcode late, when the system is up and executing instructions is a tricky business and should be avoided if possible. Just the sequence diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index cd157a8d43eb..f0b246eda09c 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -925,10 +925,11 @@ static void microcode_fini_cpu_amd(int cpu) } static struct microcode_ops microcode_amd_ops = { - .request_microcode_fw = request_microcode_amd, - .collect_cpu_info = collect_cpu_info_amd, - .apply_microcode = apply_microcode_amd, - .microcode_fini_cpu = microcode_fini_cpu_amd, + .request_microcode_fw = request_microcode_amd, + .collect_cpu_info = collect_cpu_info_amd, + .apply_microcode = apply_microcode_amd, + .microcode_fini_cpu = microcode_fini_cpu_amd, + .nmi_safe = true, }; struct microcode_ops * __init init_amd_microcode(void) diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 3b4987510650..03a0c6af1033 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -269,23 +269,6 @@ static struct platform_device *microcode_pdev; */ #define SPINUNIT 100 /* 100 nsec */ -static int check_online_cpus(void) -{ - unsigned int cpu; - - /* - * Make sure all CPUs are online. It's fine for SMT to be disabled if - * all the primary threads are still online. - */ - for_each_present_cpu(cpu) { - if (topology_is_primary_thread(cpu) && !cpu_online(cpu)) { - pr_err("Not all CPUs online, aborting microcode update.\n"); - return -EINVAL; - } - } - - return 0; -} static atomic_t late_cpus_in; static atomic_t late_cpus_out; @@ -402,6 +385,35 @@ static int microcode_reload_late(void) return ret; } +/* + * Ensure that all required CPUs which are present and have been booted + * once are online. + * + * To pass this check, all primary threads must be online. + * + * If the microcode load is not safe against NMI then all SMT threads + * must be online as well because they still react to NMIs when they are + * soft-offlined and parked in one of the play_dead() variants. So if a + * NMI hits while the primary thread updates the microcode the resulting + * behaviour is undefined. The default play_dead() implementation on + * modern CPUs uses MWAIT, which is also not guaranteed to be safe + * against a microcode update which affects MWAIT. + */ +static bool ensure_cpus_are_online(void) +{ + unsigned int cpu; + + for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) { + if (!cpu_online(cpu)) { + if (topology_is_primary_thread(cpu) || !microcode_ops->nmi_safe) { + pr_err("CPU %u not online\n", cpu); + return false; + } + } + } + return true; +} + static ssize_t reload_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) @@ -417,9 +429,10 @@ static ssize_t reload_store(struct device *dev, cpus_read_lock(); - ret = check_online_cpus(); - if (ret) + if (!ensure_cpus_are_online()) { + ret = -EBUSY; goto put; + } tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev); if (tmp_ret != UCODE_NEW) diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h index 6ed522d5b942..cec418225e75 100644 --- a/arch/x86/kernel/cpu/microcode/internal.h +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -20,18 +20,17 @@ enum ucode_state { struct microcode_ops { enum ucode_state (*request_microcode_fw)(int cpu, struct device *dev); - void (*microcode_fini_cpu)(int cpu); /* - * The generic 'microcode_core' part guarantees that - * the callbacks below run on a target cpu when they - * are being called. + * The generic 'microcode_core' part guarantees that the callbacks + * below run on a target CPU when they are being called. * See also the "Synchronization" section in microcode_core.c. */ - enum ucode_state (*apply_microcode)(int cpu); - int (*collect_cpu_info)(int cpu, struct cpu_signature *csig); - void (*finalize_late_load)(int result); + enum ucode_state (*apply_microcode)(int cpu); + int (*collect_cpu_info)(int cpu, struct cpu_signature *csig); + void (*finalize_late_load)(int result); + unsigned int nmi_safe : 1; }; extern struct ucode_cpu_info ucode_cpu_info[]; -- Gitee From c8ea641cb556c6c040c3a380cdd3157570e6c53b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 13:59:57 +0200 Subject: [PATCH 0083/2138] x86/microcode: Clarify the late load logic ANBZ: #8003 commit 6f059e634dcd0d725854514c94c114bbdd83950d upstream. reload_store() is way too complicated. Split the inner workings out and make the following enhancements: - Taint the kernel only when the microcode was actually updated. If. e.g. the rendezvous fails, then nothing happened and there is no reason for tainting. - Return useful error codes Intel-SIG: commit 6f059e634dcd x86/microcode: Clarify the late load logic. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Nikolay Borisov Link: https://lore.kernel.org/r/20231002115903.145048840@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/core.c | 41 +++++++++++++--------------- 1 file changed, 19 insertions(+), 22 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 03a0c6af1033..acced35aa200 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -377,11 +377,11 @@ static int microcode_reload_late(void) pr_info("Reload succeeded, microcode revision: 0x%x -> 0x%x\n", old, boot_cpu_data.microcode); microcode_check(&prev_info); + add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); } else { pr_info("Reload failed, current microcode revision: 0x%x\n", boot_cpu_data.microcode); } - return ret; } @@ -414,40 +414,37 @@ static bool ensure_cpus_are_online(void) return true; } +static int ucode_load_late_locked(void) +{ + if (!ensure_cpus_are_online()) + return -EBUSY; + + switch (microcode_ops->request_microcode_fw(0, µcode_pdev->dev)) { + case UCODE_NEW: + return microcode_reload_late(); + case UCODE_NFOUND: + return -ENOENT; + default: + return -EBADFD; + } +} + static ssize_t reload_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { - enum ucode_state tmp_ret = UCODE_OK; - int bsp = boot_cpu_data.cpu_index; unsigned long val; - ssize_t ret = 0; + ssize_t ret; ret = kstrtoul(buf, 0, &val); if (ret || val != 1) return -EINVAL; cpus_read_lock(); - - if (!ensure_cpus_are_online()) { - ret = -EBUSY; - goto put; - } - - tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev); - if (tmp_ret != UCODE_NEW) - goto put; - - ret = microcode_reload_late(); -put: + ret = ucode_load_late_locked(); cpus_read_unlock(); - if (ret == 0) - ret = size; - - add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); - - return ret; + return ret ? : size; } static DEVICE_ATTR_WO(reload); -- Gitee From 3886012483573cc078ca098f9c0d82dcd0930679 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 13:59:59 +0200 Subject: [PATCH 0084/2138] x86/microcode: Sanitize __wait_for_cpus() ANBZ: #8003 commit 0772b9aa1a8f7322dce8588c231cff8b57298a53 upstream. The code is too complicated for no reason: - The return value is pointless as this is a strict boolean. - It's way simpler to count down from num_online_cpus() and check for zero. - The timeout argument is pointless as this is always one second. - Touching the NMI watchdog every 100ns does not make any sense, neither does checking every 100ns. This is really not a hotpath operation. Preload the atomic counter with the number of online CPUs and simplify the whole timeout logic. Delay for one microsecond and touch the NMI watchdog once per millisecond. Intel-SIG: commit 0772b9aa1a8f x86/microcode: Sanitize __wait_for_cpus(). Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115903.204251527@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/core.c | 39 ++++++++++++---------------- 1 file changed, 17 insertions(+), 22 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index acced35aa200..c9b25f180d01 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -267,31 +267,26 @@ static struct platform_device *microcode_pdev; * requirement can be relaxed in the future. Right now, this is conservative * and good. */ -#define SPINUNIT 100 /* 100 nsec */ +static atomic_t late_cpus_in, late_cpus_out; - -static atomic_t late_cpus_in; -static atomic_t late_cpus_out; - -static int __wait_for_cpus(atomic_t *t, long long timeout) +static bool wait_for_cpus(atomic_t *cnt) { - int all_cpus = num_online_cpus(); + unsigned int timeout; - atomic_inc(t); + WARN_ON_ONCE(atomic_dec_return(cnt) < 0); - while (atomic_read(t) < all_cpus) { - if (timeout < SPINUNIT) { - pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n", - all_cpus - atomic_read(t)); - return 1; - } + for (timeout = 0; timeout < USEC_PER_SEC; timeout++) { + if (!atomic_read(cnt)) + return true; - ndelay(SPINUNIT); - timeout -= SPINUNIT; + udelay(1); - touch_nmi_watchdog(); + if (!(timeout % USEC_PER_MSEC)) + touch_nmi_watchdog(); } - return 0; + /* Prevent the late comers from making progress and let them time out */ + atomic_inc(cnt); + return false; } /* @@ -309,7 +304,7 @@ static int __reload_late(void *info) * Wait for all CPUs to arrive. A load will not be attempted unless all * CPUs show up. * */ - if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC)) + if (!wait_for_cpus(&late_cpus_in)) return -1; /* @@ -332,7 +327,7 @@ static int __reload_late(void *info) } wait_for_siblings: - if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC)) + if (!wait_for_cpus(&late_cpus_out)) panic("Timeout during microcode update!\n"); /* @@ -359,8 +354,8 @@ static int microcode_reload_late(void) pr_err("Attempting late microcode loading - it is dangerous and taints the kernel.\n"); pr_err("You should switch to early loading, if possible.\n"); - atomic_set(&late_cpus_in, 0); - atomic_set(&late_cpus_out, 0); + atomic_set(&late_cpus_in, num_online_cpus()); + atomic_set(&late_cpus_out, num_online_cpus()); /* * Take a snapshot before the microcode update in order to compare and -- Gitee From f2a88050a5f3527674a4f527aed8b91ce304ea71 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Oct 2023 23:24:05 +0200 Subject: [PATCH 0085/2138] x86/microcode: Add per CPU result state ANBZ: #8003 commit 4b753955e9151ad2f722137a7bcbafda756186b3 upstream. The microcode rendezvous is purely acting on global state, which does not allow to analyze fails in a coherent way. Introduce per CPU state where the results are written into, which allows to analyze the return codes of the individual CPUs. Initialize the state when walking the cpu_present_mask in the online check to avoid another for_each_cpu() loop. Enhance the result print out with that. The structure is intentionally named ucode_ctrl as it will gain control fields in subsequent changes. Intel-SIG: commit 4b753955e915 x86/microcode: Add per CPU result state. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231017211723.632681010@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/core.c | 114 +++++++++++++---------- arch/x86/kernel/cpu/microcode/internal.h | 1 + 2 files changed, 68 insertions(+), 47 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index c9b25f180d01..a82e825ff7d6 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -267,6 +267,11 @@ static struct platform_device *microcode_pdev; * requirement can be relaxed in the future. Right now, this is conservative * and good. */ +struct microcode_ctrl { + enum ucode_state result; +}; + +static DEFINE_PER_CPU(struct microcode_ctrl, ucode_ctrl); static atomic_t late_cpus_in, late_cpus_out; static bool wait_for_cpus(atomic_t *cnt) @@ -289,23 +294,19 @@ static bool wait_for_cpus(atomic_t *cnt) return false; } -/* - * Returns: - * < 0 - on error - * 0 - success (no update done or microcode was updated) - */ -static int __reload_late(void *info) +static int load_cpus_stopped(void *unused) { int cpu = smp_processor_id(); - enum ucode_state err; - int ret = 0; + enum ucode_state ret; /* * Wait for all CPUs to arrive. A load will not be attempted unless all * CPUs show up. * */ - if (!wait_for_cpus(&late_cpus_in)) - return -1; + if (!wait_for_cpus(&late_cpus_in)) { + this_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT); + return 0; + } /* * On an SMT system, it suffices to load the microcode on one sibling of @@ -314,17 +315,11 @@ static int __reload_late(void *info) * loading attempts happen on multiple threads of an SMT core. See * below. */ - if (cpumask_first(topology_sibling_cpumask(cpu)) == cpu) - err = microcode_ops->apply_microcode(cpu); - else + if (cpumask_first(topology_sibling_cpumask(cpu)) != cpu) goto wait_for_siblings; - if (err >= UCODE_NFOUND) { - if (err == UCODE_ERROR) { - pr_warn("Error reloading microcode on CPU %d\n", cpu); - ret = -1; - } - } + ret = microcode_ops->apply_microcode(cpu); + this_cpu_write(ucode_ctrl.result, ret); wait_for_siblings: if (!wait_for_cpus(&late_cpus_out)) @@ -336,19 +331,18 @@ static int __reload_late(void *info) * per-cpu cpuinfo can be updated with right microcode * revision. */ - if (cpumask_first(topology_sibling_cpumask(cpu)) != cpu) - err = microcode_ops->apply_microcode(cpu); + if (cpumask_first(topology_sibling_cpumask(cpu)) == cpu) + return 0; - return ret; + ret = microcode_ops->apply_microcode(cpu); + this_cpu_write(ucode_ctrl.result, ret); + return 0; } -/* - * Reload microcode late on all CPUs. Wait for a sec until they - * all gather together. - */ -static int microcode_reload_late(void) +static int load_late_stop_cpus(void) { - int old = boot_cpu_data.microcode, ret; + unsigned int cpu, updated = 0, failed = 0, timedout = 0, siblings = 0; + int old_rev = boot_cpu_data.microcode; struct cpuinfo_x86 prev_info; pr_err("Attempting late microcode loading - it is dangerous and taints the kernel.\n"); @@ -363,26 +357,47 @@ static int microcode_reload_late(void) */ store_cpu_caps(&prev_info); - ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask); + stop_machine_cpuslocked(load_cpus_stopped, NULL, cpu_online_mask); + + /* Analyze the results */ + for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) { + switch (per_cpu(ucode_ctrl.result, cpu)) { + case UCODE_UPDATED: updated++; break; + case UCODE_TIMEOUT: timedout++; break; + case UCODE_OK: siblings++; break; + default: failed++; break; + } + } if (microcode_ops->finalize_late_load) - microcode_ops->finalize_late_load(ret); - - if (!ret) { - pr_info("Reload succeeded, microcode revision: 0x%x -> 0x%x\n", - old, boot_cpu_data.microcode); - microcode_check(&prev_info); - add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); - } else { - pr_info("Reload failed, current microcode revision: 0x%x\n", - boot_cpu_data.microcode); + microcode_ops->finalize_late_load(!updated); + + if (!updated) { + /* Nothing changed. */ + if (!failed && !timedout) + return 0; + pr_err("update failed: %u CPUs failed %u CPUs timed out\n", + failed, timedout); + return -EIO; + } + + add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); + pr_info("load: updated on %u primary CPUs with %u siblings\n", updated, siblings); + if (failed || timedout) { + pr_err("load incomplete. %u CPUs timed out or failed\n", + num_online_cpus() - (updated + siblings)); } - return ret; + pr_info("revision: 0x%x -> 0x%x\n", old_rev, boot_cpu_data.microcode); + microcode_check(&prev_info); + + return updated + siblings == num_online_cpus() ? 0 : -EIO; } /* - * Ensure that all required CPUs which are present and have been booted - * once are online. + * This function does two things: + * + * 1) Ensure that all required CPUs which are present and have been booted + * once are online. * * To pass this check, all primary threads must be online. * @@ -393,9 +408,12 @@ static int microcode_reload_late(void) * behaviour is undefined. The default play_dead() implementation on * modern CPUs uses MWAIT, which is also not guaranteed to be safe * against a microcode update which affects MWAIT. + * + * 2) Initialize the per CPU control structure */ -static bool ensure_cpus_are_online(void) +static bool setup_cpus(void) { + struct microcode_ctrl ctrl = { .result = -1, }; unsigned int cpu; for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) { @@ -405,18 +423,20 @@ static bool ensure_cpus_are_online(void) return false; } } + /* Initialize the per CPU state */ + per_cpu(ucode_ctrl, cpu) = ctrl; } return true; } -static int ucode_load_late_locked(void) +static int load_late_locked(void) { - if (!ensure_cpus_are_online()) + if (!setup_cpus()) return -EBUSY; switch (microcode_ops->request_microcode_fw(0, µcode_pdev->dev)) { case UCODE_NEW: - return microcode_reload_late(); + return load_late_stop_cpus(); case UCODE_NFOUND: return -ENOENT; default: @@ -436,7 +456,7 @@ static ssize_t reload_store(struct device *dev, return -EINVAL; cpus_read_lock(); - ret = ucode_load_late_locked(); + ret = load_late_locked(); cpus_read_unlock(); return ret ? : size; diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h index cec418225e75..2db13aeb707b 100644 --- a/arch/x86/kernel/cpu/microcode/internal.h +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -16,6 +16,7 @@ enum ucode_state { UCODE_UPDATED, UCODE_NFOUND, UCODE_ERROR, + UCODE_TIMEOUT, }; struct microcode_ops { -- Gitee From d4b7d49fcff29a2dda281be0f8db66a7b8843fa5 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 14:00:01 +0200 Subject: [PATCH 0086/2138] x86/microcode: Add per CPU control field ANBZ: #8003 commit ba3aeb97cb2c53025356f31c5a0a294385194115 upstream. Add a per CPU control field to ucode_ctrl and define constants for it which are going to be used to control the loading state machine. In theory this could be a global control field, but a global control does not cover the following case: 15 primary CPUs load microcode successfully 1 primary CPU fails and returns with an error code With global control the sibling of the failed CPU would either try again or the whole operation would be aborted with the consequence that the 15 siblings do not invoke the apply path and end up with inconsistent software state. The result in dmesg would be inconsistent too. There are two additional fields added and initialized: ctrl_cpu and secondaries. ctrl_cpu is the CPU number of the primary thread for now, but with the upcoming uniform loading at package or system scope this will be one CPU per package or just one CPU. Secondaries hands the control CPU a CPU mask which will be required to release the secondary CPUs out of the wait loop. Preparatory change for implementing a properly split control flow for primary and secondary CPUs. Intel-SIG: commit ba3aeb97cb2c x86/microcode: Add per CPU control field. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115903.319959519@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/core.c | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index a82e825ff7d6..f27f78c274d7 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -267,8 +267,19 @@ static struct platform_device *microcode_pdev; * requirement can be relaxed in the future. Right now, this is conservative * and good. */ +enum sibling_ctrl { + /* Spinwait with timeout */ + SCTRL_WAIT, + /* Invoke the microcode_apply() callback */ + SCTRL_APPLY, + /* Proceed without invoking the microcode_apply() callback */ + SCTRL_DONE, +}; + struct microcode_ctrl { + enum sibling_ctrl ctrl; enum ucode_state result; + unsigned int ctrl_cpu; }; static DEFINE_PER_CPU(struct microcode_ctrl, ucode_ctrl); @@ -413,7 +424,7 @@ static int load_late_stop_cpus(void) */ static bool setup_cpus(void) { - struct microcode_ctrl ctrl = { .result = -1, }; + struct microcode_ctrl ctrl = { .ctrl = SCTRL_WAIT, .result = -1, }; unsigned int cpu; for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) { @@ -423,7 +434,12 @@ static bool setup_cpus(void) return false; } } - /* Initialize the per CPU state */ + + /* + * Initialize the per CPU state. This is core scope for now, + * but prepared to take package or system scope into account. + */ + ctrl.ctrl_cpu = cpumask_first(topology_sibling_cpumask(cpu)); per_cpu(ucode_ctrl, cpu) = ctrl; } return true; -- Gitee From 3a85fad343778f56a13f4505311389e7621d1713 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 14:00:02 +0200 Subject: [PATCH 0087/2138] x86/microcode: Provide new control functions ANBZ: #8003 commit 6067788f04b1020b316344fe34746f96d594a042 upstream. The current all in one code is unreadable and really not suited for adding future features like uniform loading with package or system scope. Provide a set of new control functions which split the handling of the primary and secondary CPUs. These will replace the current rendezvous all in one function in the next step. This is intentionally a separate change because diff makes an complete unreadable mess otherwise. So the flow separates the primary and the secondary CPUs into their own functions which use the control field in the per CPU ucode_ctrl struct. primary() secondary() wait_for_all() wait_for_all() apply_ucode() wait_for_release() release() apply_ucode() Intel-SIG: commit 6067788f04b1 x86/microcode: Provide new control functions. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115903.377922731@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/core.c | 84 ++++++++++++++++++++++++++++ 1 file changed, 84 insertions(+) diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index f27f78c274d7..0e43d2e97a56 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -305,6 +305,90 @@ static bool wait_for_cpus(atomic_t *cnt) return false; } +static bool wait_for_ctrl(void) +{ + unsigned int timeout; + + for (timeout = 0; timeout < USEC_PER_SEC; timeout++) { + if (this_cpu_read(ucode_ctrl.ctrl) != SCTRL_WAIT) + return true; + udelay(1); + if (!(timeout % 1000)) + touch_nmi_watchdog(); + } + return false; +} + +static __maybe_unused void load_secondary(unsigned int cpu) +{ + unsigned int ctrl_cpu = this_cpu_read(ucode_ctrl.ctrl_cpu); + enum ucode_state ret; + + /* Initial rendezvous to ensure that all CPUs have arrived */ + if (!wait_for_cpus(&late_cpus_in)) { + pr_err_once("load: %d CPUs timed out\n", atomic_read(&late_cpus_in) - 1); + this_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT); + return; + } + + /* + * Wait for primary threads to complete. If one of them hangs due + * to the update, there is no way out. This is non-recoverable + * because the CPU might hold locks or resources and confuse the + * scheduler, watchdogs etc. There is no way to safely evacuate the + * machine. + */ + if (!wait_for_ctrl()) + panic("Microcode load: Primary CPU %d timed out\n", ctrl_cpu); + + /* + * If the primary succeeded then invoke the apply() callback, + * otherwise copy the state from the primary thread. + */ + if (this_cpu_read(ucode_ctrl.ctrl) == SCTRL_APPLY) + ret = microcode_ops->apply_microcode(cpu); + else + ret = per_cpu(ucode_ctrl.result, ctrl_cpu); + + this_cpu_write(ucode_ctrl.result, ret); + this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE); +} + +static __maybe_unused void load_primary(unsigned int cpu) +{ + struct cpumask *secondaries = topology_sibling_cpumask(cpu); + enum sibling_ctrl ctrl; + enum ucode_state ret; + unsigned int sibling; + + /* Initial rendezvous to ensure that all CPUs have arrived */ + if (!wait_for_cpus(&late_cpus_in)) { + this_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT); + pr_err_once("load: %d CPUs timed out\n", atomic_read(&late_cpus_in) - 1); + return; + } + + ret = microcode_ops->apply_microcode(cpu); + this_cpu_write(ucode_ctrl.result, ret); + this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE); + + /* + * If the update was successful, let the siblings run the apply() + * callback. If not, tell them it's done. This also covers the + * case where the CPU has uniform loading at package or system + * scope implemented but does not advertise it. + */ + if (ret == UCODE_UPDATED || ret == UCODE_OK) + ctrl = SCTRL_APPLY; + else + ctrl = SCTRL_DONE; + + for_each_cpu(sibling, secondaries) { + if (sibling != cpu) + per_cpu(ucode_ctrl.ctrl, sibling) = ctrl; + } +} + static int load_cpus_stopped(void *unused) { int cpu = smp_processor_id(); -- Gitee From 428282bfaaab4ca44eb27cacebe9171461a14db8 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 14:00:03 +0200 Subject: [PATCH 0088/2138] x86/microcode: Replace the all-in-one rendevous handler ANBZ: #8003 commit 0bf871651211b58c7b19f40b746b646d5311e2ec upstream. with a new handler which just separates the control flow of primary and secondary CPUs. Intel-SIG: commit 0bf871651211 x86/microcode: Replace the all-in-one rendevous handler. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115903.433704135@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/core.c | 51 +++++----------------------- 1 file changed, 9 insertions(+), 42 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 0e43d2e97a56..f717b2440186 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -283,7 +283,7 @@ struct microcode_ctrl { }; static DEFINE_PER_CPU(struct microcode_ctrl, ucode_ctrl); -static atomic_t late_cpus_in, late_cpus_out; +static atomic_t late_cpus_in; static bool wait_for_cpus(atomic_t *cnt) { @@ -319,7 +319,7 @@ static bool wait_for_ctrl(void) return false; } -static __maybe_unused void load_secondary(unsigned int cpu) +static void load_secondary(unsigned int cpu) { unsigned int ctrl_cpu = this_cpu_read(ucode_ctrl.ctrl_cpu); enum ucode_state ret; @@ -354,7 +354,7 @@ static __maybe_unused void load_secondary(unsigned int cpu) this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE); } -static __maybe_unused void load_primary(unsigned int cpu) +static void load_primary(unsigned int cpu) { struct cpumask *secondaries = topology_sibling_cpumask(cpu); enum sibling_ctrl ctrl; @@ -391,46 +391,14 @@ static __maybe_unused void load_primary(unsigned int cpu) static int load_cpus_stopped(void *unused) { - int cpu = smp_processor_id(); - enum ucode_state ret; - - /* - * Wait for all CPUs to arrive. A load will not be attempted unless all - * CPUs show up. - * */ - if (!wait_for_cpus(&late_cpus_in)) { - this_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT); - return 0; - } - - /* - * On an SMT system, it suffices to load the microcode on one sibling of - * the core because the microcode engine is shared between the threads. - * Synchronization still needs to take place so that no concurrent - * loading attempts happen on multiple threads of an SMT core. See - * below. - */ - if (cpumask_first(topology_sibling_cpumask(cpu)) != cpu) - goto wait_for_siblings; + unsigned int cpu = smp_processor_id(); - ret = microcode_ops->apply_microcode(cpu); - this_cpu_write(ucode_ctrl.result, ret); - -wait_for_siblings: - if (!wait_for_cpus(&late_cpus_out)) - panic("Timeout during microcode update!\n"); - - /* - * At least one thread has completed update on each core. - * For others, simply call the update to make sure the - * per-cpu cpuinfo can be updated with right microcode - * revision. - */ - if (cpumask_first(topology_sibling_cpumask(cpu)) == cpu) - return 0; + if (this_cpu_read(ucode_ctrl.ctrl_cpu) == cpu) + load_primary(cpu); + else + load_secondary(cpu); - ret = microcode_ops->apply_microcode(cpu); - this_cpu_write(ucode_ctrl.result, ret); + /* No point to wait here. The CPUs will all wait in stop_machine(). */ return 0; } @@ -444,7 +412,6 @@ static int load_late_stop_cpus(void) pr_err("You should switch to early loading, if possible.\n"); atomic_set(&late_cpus_in, num_online_cpus()); - atomic_set(&late_cpus_out, num_online_cpus()); /* * Take a snapshot before the microcode update in order to compare and -- Gitee From 4dcedfee85161738c0faab86d7bb1a5127914464 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 14:00:05 +0200 Subject: [PATCH 0089/2138] x86/microcode: Rendezvous and load in NMI ANBZ: #8003 commit 7eb314a22800457396f541c655697dabd71e44a7 upstream. stop_machine() does not prevent the spin-waiting sibling from handling an NMI, which is obviously violating the whole concept of rendezvous. Implement a static branch right in the beginning of the NMI handler which is nopped out except when enabled by the late loading mechanism. The late loader enables the static branch before stop_machine() is invoked. Each CPU has an nmi_enable in its control structure which indicates whether the CPU should go into the update routine. This is required to bridge the gap between enabling the branch and actually being at the point where it is required to enter the loader wait loop. Each CPU which arrives in the stopper thread function sets that flag and issues a self NMI right after that. If the NMI function sees the flag clear, it returns. If it's set it clears the flag and enters the rendezvous. This is safe against a real NMI which hits in between setting the flag and sending the NMI to itself. The real NMI will be swallowed by the microcode update and the self NMI will then let stuff continue. Otherwise this would end up with a spurious NMI. Intel-SIG: commit 7eb314a22800 x86/microcode: Rendezvous and load in NMI. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115903.489900814@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/include/asm/microcode.h | 12 +++++++ arch/x86/kernel/cpu/microcode/core.c | 42 +++++++++++++++++++++--- arch/x86/kernel/cpu/microcode/intel.c | 1 + arch/x86/kernel/cpu/microcode/internal.h | 3 +- arch/x86/kernel/nmi.c | 4 +++ 5 files changed, 57 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 78f1eb2532dc..82924828a94b 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -72,4 +72,16 @@ static inline u32 intel_get_microcode_revision(void) } #endif /* !CONFIG_CPU_SUP_INTEL */ +bool microcode_nmi_handler(void); + +#ifdef CONFIG_MICROCODE_LATE_LOADING +DECLARE_STATIC_KEY_FALSE(microcode_nmi_handler_enable); +static __always_inline bool microcode_nmi_handler_enabled(void) +{ + return static_branch_unlikely(µcode_nmi_handler_enable); +} +#else +static __always_inline bool microcode_nmi_handler_enabled(void) { return false; } +#endif + #endif /* _ASM_X86_MICROCODE_H */ diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index f717b2440186..0437cb115952 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -31,6 +32,7 @@ #include #include +#include #include #include #include @@ -280,8 +282,10 @@ struct microcode_ctrl { enum sibling_ctrl ctrl; enum ucode_state result; unsigned int ctrl_cpu; + bool nmi_enabled; }; +DEFINE_STATIC_KEY_FALSE(microcode_nmi_handler_enable); static DEFINE_PER_CPU(struct microcode_ctrl, ucode_ctrl); static atomic_t late_cpus_in; @@ -297,7 +301,8 @@ static bool wait_for_cpus(atomic_t *cnt) udelay(1); - if (!(timeout % USEC_PER_MSEC)) + /* If invoked directly, tickle the NMI watchdog */ + if (!microcode_ops->use_nmi && !(timeout % USEC_PER_MSEC)) touch_nmi_watchdog(); } /* Prevent the late comers from making progress and let them time out */ @@ -313,7 +318,8 @@ static bool wait_for_ctrl(void) if (this_cpu_read(ucode_ctrl.ctrl) != SCTRL_WAIT) return true; udelay(1); - if (!(timeout % 1000)) + /* If invoked directly, tickle the NMI watchdog */ + if (!microcode_ops->use_nmi && !(timeout % 1000)) touch_nmi_watchdog(); } return false; @@ -389,7 +395,7 @@ static void load_primary(unsigned int cpu) } } -static int load_cpus_stopped(void *unused) +static bool microcode_update_handler(void) { unsigned int cpu = smp_processor_id(); @@ -398,7 +404,29 @@ static int load_cpus_stopped(void *unused) else load_secondary(cpu); - /* No point to wait here. The CPUs will all wait in stop_machine(). */ + touch_nmi_watchdog(); + return true; +} + +bool microcode_nmi_handler(void) +{ + if (!this_cpu_read(ucode_ctrl.nmi_enabled)) + return false; + + this_cpu_write(ucode_ctrl.nmi_enabled, false); + return microcode_update_handler(); +} + +static int load_cpus_stopped(void *unused) +{ + if (microcode_ops->use_nmi) { + /* Enable the NMI handler and raise NMI */ + this_cpu_write(ucode_ctrl.nmi_enabled, true); + apic->send_IPI(smp_processor_id(), NMI_VECTOR); + } else { + /* Just invoke the handler directly */ + microcode_update_handler(); + } return 0; } @@ -419,8 +447,14 @@ static int load_late_stop_cpus(void) */ store_cpu_caps(&prev_info); + if (microcode_ops->use_nmi) + static_branch_enable_cpuslocked(µcode_nmi_handler_enable); + stop_machine_cpuslocked(load_cpus_stopped, NULL, cpu_online_mask); + if (microcode_ops->use_nmi) + static_branch_disable_cpuslocked(µcode_nmi_handler_enable); + /* Analyze the results */ for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) { switch (per_cpu(ucode_ctrl.result, cpu)) { diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index e5c5ddfd6831..905ed3b557fb 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -611,6 +611,7 @@ static struct microcode_ops microcode_intel_ops = { .collect_cpu_info = collect_cpu_info, .apply_microcode = apply_microcode_late, .finalize_late_load = finalize_late_load, + .use_nmi = IS_ENABLED(CONFIG_X86_64), }; static __init void calc_llc_size_per_core(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h index 2db13aeb707b..a1fdfb6bd015 100644 --- a/arch/x86/kernel/cpu/microcode/internal.h +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -31,7 +31,8 @@ struct microcode_ops { enum ucode_state (*apply_microcode)(int cpu); int (*collect_cpu_info)(int cpu, struct cpu_signature *csig); void (*finalize_late_load)(int result); - unsigned int nmi_safe : 1; + unsigned int nmi_safe : 1, + use_nmi : 1; }; extern struct ucode_cpu_info ucode_cpu_info[]; diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 87aee638e1a5..cdca650af532 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #define CREATE_TRACE_POINTS @@ -343,6 +344,9 @@ static noinstr void default_do_nmi(struct pt_regs *regs) instrumentation_begin(); + if (microcode_nmi_handler_enabled() && microcode_nmi_handler()) + goto out; + handled = nmi_handle(NMI_LOCAL, regs); __this_cpu_add(nmi_stats.normal, handled); if (handled) { -- Gitee From 86b360d0e471b02dd0acc7a90bcc0adf8a21cf44 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 14:00:06 +0200 Subject: [PATCH 0090/2138] x86/microcode: Protect against instrumentation ANBZ: #8003 commit 1582c0f4a21303792f523fe2839dd8433ee630c0 upstream. The wait for control loop in which the siblings are waiting for the microcode update on the primary thread must be protected against instrumentation as instrumentation can end up in #INT3, #DB or #PF, which then returns with IRET. That IRET reenables NMI which is the opposite of what the NMI rendezvous is trying to achieve. Intel-SIG: commit 1582c0f4a213 x86/microcode: Protect against instrumentation. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115903.545969323@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/kernel/cpu/microcode/core.c | 111 ++++++++++++++++++++------- 1 file changed, 83 insertions(+), 28 deletions(-) diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 0437cb115952..48f8c3c29f1f 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -287,54 +287,65 @@ struct microcode_ctrl { DEFINE_STATIC_KEY_FALSE(microcode_nmi_handler_enable); static DEFINE_PER_CPU(struct microcode_ctrl, ucode_ctrl); +static unsigned int loops_per_usec; static atomic_t late_cpus_in; -static bool wait_for_cpus(atomic_t *cnt) +static noinstr bool wait_for_cpus(atomic_t *cnt) { - unsigned int timeout; + unsigned int timeout, loops; - WARN_ON_ONCE(atomic_dec_return(cnt) < 0); + WARN_ON_ONCE(raw_atomic_dec_return(cnt) < 0); for (timeout = 0; timeout < USEC_PER_SEC; timeout++) { - if (!atomic_read(cnt)) + if (!raw_atomic_read(cnt)) return true; - udelay(1); + for (loops = 0; loops < loops_per_usec; loops++) + cpu_relax(); /* If invoked directly, tickle the NMI watchdog */ - if (!microcode_ops->use_nmi && !(timeout % USEC_PER_MSEC)) + if (!microcode_ops->use_nmi && !(timeout % USEC_PER_MSEC)) { + instrumentation_begin(); touch_nmi_watchdog(); + instrumentation_end(); + } } /* Prevent the late comers from making progress and let them time out */ - atomic_inc(cnt); + raw_atomic_inc(cnt); return false; } -static bool wait_for_ctrl(void) +static noinstr bool wait_for_ctrl(void) { - unsigned int timeout; + unsigned int timeout, loops; for (timeout = 0; timeout < USEC_PER_SEC; timeout++) { - if (this_cpu_read(ucode_ctrl.ctrl) != SCTRL_WAIT) + if (raw_cpu_read(ucode_ctrl.ctrl) != SCTRL_WAIT) return true; - udelay(1); + + for (loops = 0; loops < loops_per_usec; loops++) + cpu_relax(); + /* If invoked directly, tickle the NMI watchdog */ - if (!microcode_ops->use_nmi && !(timeout % 1000)) + if (!microcode_ops->use_nmi && !(timeout % USEC_PER_MSEC)) { + instrumentation_begin(); touch_nmi_watchdog(); + instrumentation_end(); + } } return false; } -static void load_secondary(unsigned int cpu) +/* + * Protected against instrumentation up to the point where the primary + * thread completed the update. See microcode_nmi_handler() for details. + */ +static noinstr bool load_secondary_wait(unsigned int ctrl_cpu) { - unsigned int ctrl_cpu = this_cpu_read(ucode_ctrl.ctrl_cpu); - enum ucode_state ret; - /* Initial rendezvous to ensure that all CPUs have arrived */ if (!wait_for_cpus(&late_cpus_in)) { - pr_err_once("load: %d CPUs timed out\n", atomic_read(&late_cpus_in) - 1); - this_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT); - return; + raw_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT); + return false; } /* @@ -344,9 +355,33 @@ static void load_secondary(unsigned int cpu) * scheduler, watchdogs etc. There is no way to safely evacuate the * machine. */ - if (!wait_for_ctrl()) - panic("Microcode load: Primary CPU %d timed out\n", ctrl_cpu); + if (wait_for_ctrl()) + return true; + + instrumentation_begin(); + panic("Microcode load: Primary CPU %d timed out\n", ctrl_cpu); + instrumentation_end(); +} +/* + * Protected against instrumentation up to the point where the primary + * thread completed the update. See microcode_nmi_handler() for details. + */ +static noinstr void load_secondary(unsigned int cpu) +{ + unsigned int ctrl_cpu = raw_cpu_read(ucode_ctrl.ctrl_cpu); + enum ucode_state ret; + + if (!load_secondary_wait(ctrl_cpu)) { + instrumentation_begin(); + pr_err_once("load: %d CPUs timed out\n", + atomic_read(&late_cpus_in) - 1); + instrumentation_end(); + return; + } + + /* Primary thread completed. Allow to invoke instrumentable code */ + instrumentation_begin(); /* * If the primary succeeded then invoke the apply() callback, * otherwise copy the state from the primary thread. @@ -358,6 +393,7 @@ static void load_secondary(unsigned int cpu) this_cpu_write(ucode_ctrl.result, ret); this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE); + instrumentation_end(); } static void load_primary(unsigned int cpu) @@ -395,25 +431,43 @@ static void load_primary(unsigned int cpu) } } -static bool microcode_update_handler(void) +static noinstr bool microcode_update_handler(void) { - unsigned int cpu = smp_processor_id(); + unsigned int cpu = raw_smp_processor_id(); - if (this_cpu_read(ucode_ctrl.ctrl_cpu) == cpu) + if (raw_cpu_read(ucode_ctrl.ctrl_cpu) == cpu) { + instrumentation_begin(); load_primary(cpu); - else + instrumentation_end(); + } else { load_secondary(cpu); + } + instrumentation_begin(); touch_nmi_watchdog(); + instrumentation_end(); + return true; } -bool microcode_nmi_handler(void) +/* + * Protection against instrumentation is required for CPUs which are not + * safe against an NMI which is delivered to the secondary SMT sibling + * while the primary thread updates the microcode. Instrumentation can end + * up in #INT3, #DB and #PF. The IRET from those exceptions reenables NMI + * which is the opposite of what the NMI rendezvous is trying to achieve. + * + * The primary thread is safe versus instrumentation as the actual + * microcode update handles this correctly. It's only the sibling code + * path which must be NMI safe until the primary thread completed the + * update. + */ +bool noinstr microcode_nmi_handler(void) { - if (!this_cpu_read(ucode_ctrl.nmi_enabled)) + if (!raw_cpu_read(ucode_ctrl.nmi_enabled)) return false; - this_cpu_write(ucode_ctrl.nmi_enabled, false); + raw_cpu_write(ucode_ctrl.nmi_enabled, false); return microcode_update_handler(); } @@ -440,6 +494,7 @@ static int load_late_stop_cpus(void) pr_err("You should switch to early loading, if possible.\n"); atomic_set(&late_cpus_in, num_online_cpus()); + loops_per_usec = loops_per_jiffy / (TICK_NSEC / 1000); /* * Take a snapshot before the microcode update in order to compare and -- Gitee From cde9aae7e0c7c040a734718ab23a3d01b3575639 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 14:00:07 +0200 Subject: [PATCH 0091/2138] x86/apic: Provide apic_force_nmi_on_cpu() ANBZ: #8003 commit 9cab5fb776d4367e26950cf759211e948335288e upstream. When SMT siblings are soft-offlined and parked in one of the play_dead() variants they still react on NMI, which is problematic on affected Intel CPUs. The default play_dead() variant uses MWAIT on modern CPUs, which is not guaranteed to be safe when updated concurrently. Right now late loading is prevented when not all SMT siblings are online, but as they still react on NMI, it is possible to bring them out of their park position into a trivial rendezvous handler. Provide a function which allows to do that. I does sanity checks whether the target is in the cpus_booted_once_mask and whether the APIC driver supports it. Mark X2APIC and XAPIC as capable, but exclude 32bit and the UV and NUMACHIP variants as that needs feedback from the relevant experts. Intel-SIG: commit 9cab5fb776d4 x86/apic: Provide apic_force_nmi_on_cpu(). Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115903.603100036@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/include/asm/apic.h | 5 ++++- arch/x86/kernel/apic/apic_flat_64.c | 2 ++ arch/x86/kernel/apic/ipi.c | 8 ++++++++ arch/x86/kernel/apic/x2apic_cluster.c | 1 + arch/x86/kernel/apic/x2apic_phys.c | 1 + 5 files changed, 16 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 33aa0c31c21c..a2258c894244 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -277,7 +277,8 @@ struct apic { u32 disable_esr : 1, dest_mode_logical : 1, - x2apic_set_max_apicid : 1; + x2apic_set_max_apicid : 1, + nmi_to_offline_cpu : 1; u32 (*calc_dest_apicid)(unsigned int cpu); @@ -543,6 +544,8 @@ extern bool default_check_apicid_used(physid_mask_t *map, int apicid); extern void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap); extern int default_cpu_present_to_apicid(int mps_cpu); +void apic_send_nmi_to_offline_cpu(unsigned int cpu); + #else /* CONFIG_X86_LOCAL_APIC */ static inline unsigned int read_apic_id(void) { return 0; } diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index 032a84e2c3cc..cd16228611ce 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c @@ -103,6 +103,7 @@ static struct apic apic_flat __ro_after_init = { .send_IPI_allbutself = default_send_IPI_allbutself, .send_IPI_all = default_send_IPI_all, .send_IPI_self = default_send_IPI_self, + .nmi_to_offline_cpu = true, .read = native_apic_mem_read, .write = native_apic_mem_write, @@ -175,6 +176,7 @@ static struct apic apic_physflat __ro_after_init = { .send_IPI_allbutself = default_send_IPI_allbutself, .send_IPI_all = default_send_IPI_all, .send_IPI_self = default_send_IPI_self, + .nmi_to_offline_cpu = true, .read = native_apic_mem_read, .write = native_apic_mem_write, diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c index a44ba7209ef3..edad86f32e38 100644 --- a/arch/x86/kernel/apic/ipi.c +++ b/arch/x86/kernel/apic/ipi.c @@ -97,6 +97,14 @@ void native_send_call_func_ipi(const struct cpumask *mask) __apic_send_IPI_mask(mask, CALL_FUNCTION_VECTOR); } +void apic_send_nmi_to_offline_cpu(unsigned int cpu) +{ + if (WARN_ON_ONCE(!apic->nmi_to_offline_cpu)) + return; + if (WARN_ON_ONCE(!cpumask_test_cpu(cpu, &cpus_booted_once_mask))) + return; + apic->send_IPI(cpu, NMI_VECTOR); +} #endif /* CONFIG_SMP */ static inline int __prepare_ICR2(unsigned int mask) diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index affbff65e497..a8306089c91b 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c @@ -251,6 +251,7 @@ static struct apic apic_x2apic_cluster __ro_after_init = { .send_IPI_allbutself = x2apic_send_IPI_allbutself, .send_IPI_all = x2apic_send_IPI_all, .send_IPI_self = x2apic_send_IPI_self, + .nmi_to_offline_cpu = true, .read = native_apic_msr_read, .write = native_apic_msr_write, diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index 788cdb4ee394..c8ac1b12b8ac 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c @@ -166,6 +166,7 @@ static struct apic apic_x2apic_phys __ro_after_init = { .send_IPI_allbutself = x2apic_send_IPI_allbutself, .send_IPI_all = x2apic_send_IPI_all, .send_IPI_self = x2apic_send_IPI_self, + .nmi_to_offline_cpu = true, .read = native_apic_msr_read, .write = native_apic_msr_write, -- Gitee From 810b47699cbef17d7568d4c13ad37116b878f240 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 2 Oct 2023 14:00:08 +0200 Subject: [PATCH 0092/2138] x86/microcode: Handle "offline" CPUs correctly ANBZ: #8003 commit 8f849ff63bcbc77670da03cb8f2b78b06257f455 upstream. Offline CPUs need to be parked in a safe loop when microcode update is in progress on the primary CPU. Currently, offline CPUs are parked in mwait_play_dead(), and for Intel CPUs, its not a safe instruction, because the MWAIT instruction can be patched in the new microcode update that can cause instability. - Add a new microcode state 'UCODE_OFFLINE' to report status on per-CPU basis. - Force NMI on the offline CPUs. Wake up offline CPUs while the update is in progress and then return them back to mwait_play_dead() after microcode update is complete. Intel-SIG: commit 8f849ff63bcb x86/microcode: Handle "offline" CPUs correctly. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115903.660850472@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/include/asm/microcode.h | 1 + arch/x86/kernel/cpu/microcode/core.c | 112 ++++++++++++++++++++++- arch/x86/kernel/cpu/microcode/internal.h | 1 + arch/x86/kernel/nmi.c | 5 +- 4 files changed, 113 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 82924828a94b..0ee6ed0ff2bf 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -73,6 +73,7 @@ static inline u32 intel_get_microcode_revision(void) #endif /* !CONFIG_CPU_SUP_INTEL */ bool microcode_nmi_handler(void); +void microcode_offline_nmi_handler(void); #ifdef CONFIG_MICROCODE_LATE_LOADING DECLARE_STATIC_KEY_FALSE(microcode_nmi_handler_enable); diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 48f8c3c29f1f..bd8f7ffab96c 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -287,8 +287,9 @@ struct microcode_ctrl { DEFINE_STATIC_KEY_FALSE(microcode_nmi_handler_enable); static DEFINE_PER_CPU(struct microcode_ctrl, ucode_ctrl); +static atomic_t late_cpus_in, offline_in_nmi; static unsigned int loops_per_usec; -static atomic_t late_cpus_in; +static cpumask_t cpu_offline_mask; static noinstr bool wait_for_cpus(atomic_t *cnt) { @@ -396,7 +397,7 @@ static noinstr void load_secondary(unsigned int cpu) instrumentation_end(); } -static void load_primary(unsigned int cpu) +static void __load_primary(unsigned int cpu) { struct cpumask *secondaries = topology_sibling_cpumask(cpu); enum sibling_ctrl ctrl; @@ -431,6 +432,67 @@ static void load_primary(unsigned int cpu) } } +static bool kick_offline_cpus(unsigned int nr_offl) +{ + unsigned int cpu, timeout; + + for_each_cpu(cpu, &cpu_offline_mask) { + /* Enable the rendezvous handler and send NMI */ + per_cpu(ucode_ctrl.nmi_enabled, cpu) = true; + apic_send_nmi_to_offline_cpu(cpu); + } + + /* Wait for them to arrive */ + for (timeout = 0; timeout < (USEC_PER_SEC / 2); timeout++) { + if (atomic_read(&offline_in_nmi) == nr_offl) + return true; + udelay(1); + } + /* Let the others time out */ + return false; +} + +static void release_offline_cpus(void) +{ + unsigned int cpu; + + for_each_cpu(cpu, &cpu_offline_mask) + per_cpu(ucode_ctrl.ctrl, cpu) = SCTRL_DONE; +} + +static void load_primary(unsigned int cpu) +{ + unsigned int nr_offl = cpumask_weight(&cpu_offline_mask); + bool proceed = true; + + /* Kick soft-offlined SMT siblings if required */ + if (!cpu && nr_offl) + proceed = kick_offline_cpus(nr_offl); + + /* If the soft-offlined CPUs did not respond, abort */ + if (proceed) + __load_primary(cpu); + + /* Unconditionally release soft-offlined SMT siblings if required */ + if (!cpu && nr_offl) + release_offline_cpus(); +} + +/* + * Minimal stub rendezvous handler for soft-offlined CPUs which participate + * in the NMI rendezvous to protect against a concurrent NMI on affected + * CPUs. + */ +void noinstr microcode_offline_nmi_handler(void) +{ + if (!raw_cpu_read(ucode_ctrl.nmi_enabled)) + return; + raw_cpu_write(ucode_ctrl.nmi_enabled, false); + raw_cpu_write(ucode_ctrl.result, UCODE_OFFLINE); + raw_atomic_inc(&offline_in_nmi); + wait_for_ctrl(); +} + static noinstr bool microcode_update_handler(void) { unsigned int cpu = raw_smp_processor_id(); @@ -487,6 +549,7 @@ static int load_cpus_stopped(void *unused) static int load_late_stop_cpus(void) { unsigned int cpu, updated = 0, failed = 0, timedout = 0, siblings = 0; + unsigned int nr_offl, offline = 0; int old_rev = boot_cpu_data.microcode; struct cpuinfo_x86 prev_info; @@ -494,6 +557,7 @@ static int load_late_stop_cpus(void) pr_err("You should switch to early loading, if possible.\n"); atomic_set(&late_cpus_in, num_online_cpus()); + atomic_set(&offline_in_nmi, 0); loops_per_usec = loops_per_jiffy / (TICK_NSEC / 1000); /* @@ -516,6 +580,7 @@ static int load_late_stop_cpus(void) case UCODE_UPDATED: updated++; break; case UCODE_TIMEOUT: timedout++; break; case UCODE_OK: siblings++; break; + case UCODE_OFFLINE: offline++; break; default: failed++; break; } } @@ -527,6 +592,13 @@ static int load_late_stop_cpus(void) /* Nothing changed. */ if (!failed && !timedout) return 0; + + nr_offl = cpumask_weight(&cpu_offline_mask); + if (offline < nr_offl) { + pr_warn("%u offline siblings did not respond.\n", + nr_offl - atomic_read(&offline_in_nmi)); + return -EIO; + } pr_err("update failed: %u CPUs failed %u CPUs timed out\n", failed, timedout); return -EIO; @@ -560,19 +632,49 @@ static int load_late_stop_cpus(void) * modern CPUs uses MWAIT, which is also not guaranteed to be safe * against a microcode update which affects MWAIT. * - * 2) Initialize the per CPU control structure + * As soft-offlined CPUs still react on NMIs, the SMT sibling + * restriction can be lifted when the vendor driver signals to use NMI + * for rendezvous and the APIC provides a mechanism to send an NMI to a + * soft-offlined CPU. The soft-offlined CPUs are then able to + * participate in the rendezvous in a trivial stub handler. + * + * 2) Initialize the per CPU control structure and create a cpumask + * which contains "offline"; secondary threads, so they can be handled + * correctly by a control CPU. */ static bool setup_cpus(void) { struct microcode_ctrl ctrl = { .ctrl = SCTRL_WAIT, .result = -1, }; + bool allow_smt_offline; unsigned int cpu; + allow_smt_offline = microcode_ops->nmi_safe || + (microcode_ops->use_nmi && apic->nmi_to_offline_cpu); + + cpumask_clear(&cpu_offline_mask); + for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) { + /* + * Offline CPUs sit in one of the play_dead() functions + * with interrupts disabled, but they still react on NMIs + * and execute arbitrary code. Also MWAIT being updated + * while the offline CPU sits there is not necessarily safe + * on all CPU variants. + * + * Mark them in the offline_cpus mask which will be handled + * by CPU0 later in the update process. + * + * Ensure that the primary thread is online so that it is + * guaranteed that all cores are updated. + */ if (!cpu_online(cpu)) { - if (topology_is_primary_thread(cpu) || !microcode_ops->nmi_safe) { - pr_err("CPU %u not online\n", cpu); + if (topology_is_primary_thread(cpu) || !allow_smt_offline) { + pr_err("CPU %u not online, loading aborted\n", cpu); return false; } + cpumask_set_cpu(cpu, &cpu_offline_mask); + per_cpu(ucode_ctrl, cpu) = ctrl; + continue; } /* diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h index a1fdfb6bd015..aaebbe7ef126 100644 --- a/arch/x86/kernel/cpu/microcode/internal.h +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -17,6 +17,7 @@ enum ucode_state { UCODE_NFOUND, UCODE_ERROR, UCODE_TIMEOUT, + UCODE_OFFLINE, }; struct microcode_ops { diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index cdca650af532..6da2cfa23c29 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -502,8 +502,11 @@ DEFINE_IDTENTRY_RAW(exc_nmi) if (IS_ENABLED(CONFIG_NMI_CHECK_CPU)) raw_atomic_long_inc(&nsp->idt_calls); - if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id())) + if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id())) { + if (microcode_nmi_handler_enabled()) + microcode_offline_nmi_handler(); return; + } if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { this_cpu_write(nmi_state, NMI_LATCHED); -- Gitee From fa059abdd97f6bd2da6afda13e452e996483ed75 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 22 Jan 2024 13:27:11 +0800 Subject: [PATCH 0093/2138] x86/microcode: Prepare for minimal revision check ANBZ: #8003 commit 9407bda845dd19756e276d4f3abc15a20777ba45 upstream. Applying microcode late can be fatal for the running kernel when the update changes functionality which is in use already in a non-compatible way, e.g. by removing a CPUID bit. There is no way for admins which do not have access to the vendors deep technical support to decide whether late loading of such a microcode is safe or not. Intel has added a new field to the microcode header which tells the minimal microcode revision which is required to be active in the CPU in order to be safe. Provide infrastructure for handling this in the core code and a command line switch which allows to enforce it. If the update is considered safe the kernel is not tainted and the annoying warning message not emitted. If it's enforced and the currently loaded microcode revision is not safe for late loading then the load is aborted. Intel-SIG: commit 9407bda845dd x86/microcode: Prepare for minimal revision check. Microcode restructuring backport. Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231017211724.079611170@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- .../admin-guide/kernel-parameters.txt | 5 ++++ arch/x86/Kconfig | 23 ++++++++++++++++++- arch/x86/kernel/cpu/microcode/amd.c | 3 +++ arch/x86/kernel/cpu/microcode/core.c | 19 +++++++++++---- arch/x86/kernel/cpu/microcode/intel.c | 3 +++ arch/x86/kernel/cpu/microcode/internal.h | 2 ++ 6 files changed, 49 insertions(+), 6 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index d83a3f47e200..184f2f96f6a5 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -3287,6 +3287,11 @@ mga= [HW,DRM] + microcode.force_minrev= [X86] + Format: + Enable or disable the microcode minimal revision + enforcement for the runtime microcode loader. + min_addr=nn[KMG] [KNL,BOOT,IA-64] All physical memory below this physical address is ignored. diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 59aa22a1e062..45562660bd52 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1327,7 +1327,28 @@ config MICROCODE_LATE_LOADING is a tricky business and should be avoided if possible. Just the sequence of synchronizing all cores and SMT threads is one fragile dance which does not guarantee that cores might not softlock after the loading. Therefore, - use this at your own risk. Late loading taints the kernel too. + use this at your own risk. Late loading taints the kernel unless the + microcode header indicates that it is safe for late loading via the + minimal revision check. This minimal revision check can be enforced on + the kernel command line with "microcode.minrev=Y". + +config MICROCODE_LATE_FORCE_MINREV + bool "Enforce late microcode loading minimal revision check" + default n + depends on MICROCODE_LATE_LOADING + help + To prevent that users load microcode late which modifies already + in use features, newer microcode patches have a minimum revision field + in the microcode header, which tells the kernel which minimum + revision must be active in the CPU to safely load that new microcode + late into the running system. If disabled the check will not + be enforced but the kernel will be tainted when the minimal + revision check fails. + + This minimal revision check can also be controlled via the + "microcode.minrev" parameter on the kernel command line. + + If unsure say Y. config X86_MSR tristate "/dev/cpu/*/msr - Model-specific register support" diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index f0b246eda09c..2ba4f7dd445a 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -892,6 +892,9 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device) enum ucode_state ret = UCODE_NFOUND; const struct firmware *fw; + if (force_minrev) + return UCODE_NFOUND; + if (x86_cpuid_vendor() == X86_VENDOR_AMD && c->x86 >= 0x15) snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86); diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index bd8f7ffab96c..7196ad323c4b 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -50,6 +50,9 @@ static struct microcode_ops *microcode_ops; #endif bool dis_ucode_ldr = true; +bool force_minrev = IS_ENABLED(CONFIG_MICROCODE_LATE_FORCE_MINREV); +module_param(force_minrev, bool, S_IRUSR | S_IWUSR); + /* * Synchronization. * @@ -546,15 +549,17 @@ static int load_cpus_stopped(void *unused) return 0; } -static int load_late_stop_cpus(void) +static int load_late_stop_cpus(bool is_safe) { unsigned int cpu, updated = 0, failed = 0, timedout = 0, siblings = 0; unsigned int nr_offl, offline = 0; int old_rev = boot_cpu_data.microcode; struct cpuinfo_x86 prev_info; - pr_err("Attempting late microcode loading - it is dangerous and taints the kernel.\n"); - pr_err("You should switch to early loading, if possible.\n"); + if (!is_safe) { + pr_err("Late microcode loading without minimal revision check.\n"); + pr_err("You should switch to early loading, if possible.\n"); + } atomic_set(&late_cpus_in, num_online_cpus()); atomic_set(&offline_in_nmi, 0); @@ -604,7 +609,9 @@ static int load_late_stop_cpus(void) return -EIO; } - add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); + if (!is_safe || failed || timedout) + add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); + pr_info("load: updated on %u primary CPUs with %u siblings\n", updated, siblings); if (failed || timedout) { pr_err("load incomplete. %u CPUs timed out or failed\n", @@ -694,7 +701,9 @@ static int load_late_locked(void) switch (microcode_ops->request_microcode_fw(0, µcode_pdev->dev)) { case UCODE_NEW: - return load_late_stop_cpus(); + return load_late_stop_cpus(false); + case UCODE_NEW_SAFE: + return load_late_stop_cpus(true); case UCODE_NFOUND: return -ENOENT; default: diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 905ed3b557fb..14aa4c6d4c14 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -480,6 +480,9 @@ static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter) unsigned int curr_mc_size = 0; u8 *new_mc = NULL, *mc = NULL; + if (force_minrev) + return UCODE_NFOUND; + while (iov_iter_count(iter)) { struct microcode_header_intel mc_header; unsigned int mc_size, data_size; diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h index aaebbe7ef126..980ef806b377 100644 --- a/arch/x86/kernel/cpu/microcode/internal.h +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -13,6 +13,7 @@ struct device; enum ucode_state { UCODE_OK = 0, UCODE_NEW, + UCODE_NEW_SAFE, UCODE_UPDATED, UCODE_NFOUND, UCODE_ERROR, @@ -94,6 +95,7 @@ static inline unsigned int x86_cpuid_family(void) } extern bool dis_ucode_ldr; +extern bool force_minrev; #ifdef CONFIG_CPU_SUP_AMD void load_ucode_amd_bsp(unsigned int family); -- Gitee From 7c5962225f85b86797bada791a4ec784199cdecc Mon Sep 17 00:00:00 2001 From: Ashok Raj Date: Mon, 2 Oct 2023 14:00:11 +0200 Subject: [PATCH 0094/2138] x86/microcode/intel: Add a minimum required revision for late loading ANBZ: #8003 commit cf5ab01c87030a085e211a0a327535932ec6f719 upstream. In general users, don't have the necessary information to determine whether late loading of a new microcode version is safe and does not modify anything which the currently running kernel uses already, e.g. removal of CPUID bits or behavioural changes of MSRs. To address this issue, Intel has added a "minimum required version" field to a previously reserved field in the microcode header. Microcode updates should only be applied if the current microcode version is equal to, or greater than this minimum required version. Thomas made some suggestions on how meta-data in the microcode file could provide Linux with information to decide if the new microcode is suitable candidate for late loading. But even the "simpler" option requires a lot of metadata and corresponding kernel code to parse it, so the final suggestion was to add the 'minimum required version' field in the header. When microcode changes visible features, microcode will set the minimum required version to its own revision which prevents late loading. Old microcode blobs have the minimum revision field always set to 0, which indicates that there is no information and the kernel considers it unsafe. This is a pure OS software mechanism. The hardware/firmware ignores this header field. For early loading there is no restriction because OS visible features are enumerated after the early load and therefore a change has no effect. The check is always enabled, but by default not enforced. It can be enforced via Kconfig or kernel command line. If enforced, the kernel refuses to late load microcode with a minimum required version field which is zero or when the currently loaded microcode revision is smaller than the minimum required revision. If not enforced the load happens independent of the revision check to stay compatible with the existing behaviour, but it influences the decision whether the kernel is tainted or not. If the check signals that the late load is safe, then the kernel is not tainted. Early loading is not affected by this. [ tglx: Massaged changelog and fixed up the implementation ] Intel-SIG: commit cf5ab01c8703 x86/microcode/intel: Add a minimum required revision for late loading. Microcode restructuring backport. Suggested-by: Thomas Gleixner Signed-off-by: Ashok Raj Signed-off-by: Thomas Gleixner Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20231002115903.776467264@linutronix.de [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/include/asm/microcode.h | 3 ++- arch/x86/kernel/cpu/microcode/intel.c | 37 ++++++++++++++++++++++++--- 2 files changed, 35 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 0ee6ed0ff2bf..695e569159c1 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -38,7 +38,8 @@ struct microcode_header_intel { unsigned int datasize; unsigned int totalsize; unsigned int metasize; - unsigned int reserved[2]; + unsigned int min_req_ver; + unsigned int reserved; }; struct microcode_intel { diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 14aa4c6d4c14..6024feb98d29 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -473,16 +473,40 @@ static enum ucode_state apply_microcode_late(int cpu) return ret; } +static bool ucode_validate_minrev(struct microcode_header_intel *mc_header) +{ + int cur_rev = boot_cpu_data.microcode; + + /* + * When late-loading, ensure the header declares a minimum revision + * required to perform a late-load. The previously reserved field + * is 0 in older microcode blobs. + */ + if (!mc_header->min_req_ver) { + pr_info("Unsafe microcode update: Microcode header does not specify a required min version\n"); + return false; + } + + /* + * Check whether the current revision is either greater or equal to + * to the minimum revision specified in the header. + */ + if (cur_rev < mc_header->min_req_ver) { + pr_info("Unsafe microcode update: Current revision 0x%x too old\n", cur_rev); + pr_info("Current should be at 0x%x or higher. Use early loading instead\n", mc_header->min_req_ver); + return false; + } + return true; +} + static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; + bool is_safe, new_is_safe = false; int cur_rev = uci->cpu_sig.rev; unsigned int curr_mc_size = 0; u8 *new_mc = NULL, *mc = NULL; - if (force_minrev) - return UCODE_NFOUND; - while (iov_iter_count(iter)) { struct microcode_header_intel mc_header; unsigned int mc_size, data_size; @@ -525,9 +549,14 @@ static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter) if (!intel_find_matching_signature(mc, &uci->cpu_sig)) continue; + is_safe = ucode_validate_minrev(&mc_header); + if (force_minrev && !is_safe) + continue; + kvfree(new_mc); cur_rev = mc_header.rev; new_mc = mc; + new_is_safe = is_safe; mc = NULL; } @@ -539,7 +568,7 @@ static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter) return UCODE_NFOUND; ucode_patch_late = (struct microcode_intel *)new_mc; - return UCODE_NEW; + return new_is_safe ? UCODE_NEW_SAFE : UCODE_NEW; fail: kvfree(mc); -- Gitee From 2af3b11a44ed09f4986bd79ebca6f32e4e0f835d Mon Sep 17 00:00:00 2001 From: Yuntao Wang Date: Mon, 13 Nov 2023 11:40:26 +0800 Subject: [PATCH 0095/2138] x86/setup: Make relocated_ramdisk a local variable of relocate_initrd() ANBZ: #8003 commit f7a25cf1d4707da39b80df96a3be8a8abd07c35b upstream. After 0b62f6cb0773 ("x86/microcode/32: Move early loading after paging enable"), the global variable relocated_ramdisk is no longer used anywhere except for the relocate_initrd() function. Make it a local variable of that function. Intel-SIG: commit f7a25cf1d470 Make relocated_ramdisk a local variable of relocate_initrd(). Microcode restructuring backport. Signed-off-by: Yuntao Wang Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Baoquan He Link: https://lore.kernel.org/r/20231113034026.130679-1-ytcoode@gmail.com [ Aubrey Li: amend commit log ] Signed-off-by: Aubrey Li Reviewed-by: Pu Wen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2679 --- arch/x86/include/asm/setup.h | 2 -- arch/x86/kernel/setup.c | 4 +--- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index bf483fcb4e57..5c83729c8e71 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -31,8 +31,6 @@ #include #include -extern u64 relocated_ramdisk; - /* Interrupt control for vSMPowered x86_64 systems */ #ifdef CONFIG_X86_64 void vsmp_init(void); diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index eb129277dcdd..3993353af472 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -226,8 +226,6 @@ static void __init reserve_brk(void) _brk_start = 0; } -u64 relocated_ramdisk; - #ifdef CONFIG_BLK_DEV_INITRD static u64 __init get_ramdisk_image(void) @@ -261,7 +259,7 @@ static void __init relocate_initrd(void) u64 area_size = PAGE_ALIGN(ramdisk_size); /* We need to move the initrd down into directly mapped mem */ - relocated_ramdisk = memblock_phys_alloc_range(area_size, PAGE_SIZE, 0, + u64 relocated_ramdisk = memblock_phys_alloc_range(area_size, PAGE_SIZE, 0, PFN_PHYS(max_pfn_mapped)); if (!relocated_ramdisk) panic("Cannot find place for new RAMDISK of size %lld\n", -- Gitee From 3db8b4aac165aad7ca61110d3acc236c5ff84457 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Mon, 24 Dec 2018 16:18:53 +0800 Subject: [PATCH 0096/2138] anolis: drivers/virtio: add vring_force_dma_api boot param ANBZ: #8326 Prior to xdragon platform 20181230 release (e.g. 0930 release), vring_use_dma_api() is required to return 'true' unconditionally. Introduce a new kernel boot parameter called "vring_force_dma_api" to control the behavior, boot xdragon host with "vring_force_dma_api" command line to make ENI hotplug work, so that normal ECS hosts keep the original behavior. Also supports virtio_ring.vring_force_dma_api=1/0 when virtio_ring is built as module. Signed-off-by: Eryu Guan Signed-off-by: Shannon Zhao Signed-off-by: Joseph Qi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2783 --- .../admin-guide/kernel-parameters.txt | 5 ++++ drivers/virtio/virtio_ring.c | 23 +++++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 184f2f96f6a5..1a6639eb4b6c 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -7043,6 +7043,11 @@ vmpoff= [KNL,S390] Perform z/VM CP command after power off. Format: + vring_force_dma_api + Force virtio vring to use dma api. This is only needed + on xdragon platform (prior to 20181230 release, e.g. + 0930 release). + vsyscall= [X86-64] Controls the behavior of vsyscalls (i.e. calls to fixed addresses of 0xffffffffff600x00 from legacy diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 80669e05bf0e..e4e0829eac46 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #ifdef DEBUG @@ -251,6 +252,21 @@ static bool virtqueue_use_indirect(const struct vring_virtqueue *vq, return (vq->indirect && total_sg > 1 && vq->vq.num_free); } +static bool vring_force_dma_api; + +#ifdef MODULE +module_param(vring_force_dma_api, bool, 0640); +#else +static int __init vring_dma_api_setup(char *str) +{ + vring_force_dma_api = true; + printk(KERN_INFO "Force vring dma api enabled\n"); + + return 0; +} +__setup("vring_force_dma_api", vring_dma_api_setup); +#endif + /* * Modern virtio devices have feature bits to specify whether they need a * quirk and bypass the IOMMU. If not there, just use the DMA API. @@ -279,6 +295,13 @@ static bool virtqueue_use_indirect(const struct vring_virtqueue *vq, static bool vring_use_dma_api(const struct virtio_device *vdev) { + /* + * Prior to xdragon platform 20181230 release (e.g. 0930 release), we + * need this hack to get ENI hotplug to work. + */ + if (vring_force_dma_api) + return true; + if (!virtio_has_dma_quirk(vdev)) return true; -- Gitee From 34c2e410c48dd22054021ca81e56387fa5fdca15 Mon Sep 17 00:00:00 2001 From: Guixin Liu Date: Tue, 27 Feb 2024 14:51:39 +0800 Subject: [PATCH 0097/2138] anolis: kabi: Introduce kabi macro for anolis cloud-kernel ANBZ: #3879 Add kabi generic series of macros head file, this series of macros should be used for keeping related struct size not change. And also introduce CONFIG_CK_KABI_RESERVE and CONFIG_CK_KABI_SIZE_ALIGN_CHECKS configs. Signed-off-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2792 --- arch/arm64/configs/anolis-debug_defconfig | 2 + arch/arm64/configs/anolis_defconfig | 2 + arch/x86/configs/anolis-debug_defconfig | 2 + arch/x86/configs/anolis_defconfig | 2 + include/linux/ck_kabi.h | 532 ++++++++++++++++++++++ init/Kconfig | 18 + 6 files changed, 558 insertions(+) create mode 100644 include/linux/ck_kabi.h diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index 247a3d434dab..b3c3c3da6168 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -957,6 +957,8 @@ CONFIG_ARCH_USE_QUEUED_RWLOCKS=y CONFIG_QUEUED_RWLOCKS=y CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_CK_KABI_RESERVE=y +CONFIG_CK_KABI_SIZE_ALIGN_CHECKS=y CONFIG_FREEZER=y # diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index ffd410167da4..dadd27949f52 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -977,6 +977,8 @@ CONFIG_ARCH_USE_QUEUED_RWLOCKS=y CONFIG_QUEUED_RWLOCKS=y CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_CK_KABI_RESERVE=y +CONFIG_CK_KABI_SIZE_ALIGN_CHECKS=y CONFIG_FREEZER=y # diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index a61c59967e02..b5adf870d839 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -1016,6 +1016,8 @@ CONFIG_QUEUED_RWLOCKS=y CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_CK_KABI_RESERVE=y +CONFIG_CK_KABI_SIZE_ALIGN_CHECKS=y CONFIG_FREEZER=y # diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 937a54d025e9..be00c2ce6add 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -1011,6 +1011,8 @@ CONFIG_QUEUED_RWLOCKS=y CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_CK_KABI_RESERVE=y +CONFIG_CK_KABI_SIZE_ALIGN_CHECKS=y CONFIG_FREEZER=y # diff --git a/include/linux/ck_kabi.h b/include/linux/ck_kabi.h new file mode 100644 index 000000000000..a2ecc950c93a --- /dev/null +++ b/include/linux/ck_kabi.h @@ -0,0 +1,532 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * ck_kabi.h - Anolis Cloud-Kernel kABI abstraction header + * + * Copyright (c) 2014 Don Zickus + * Copyright (c) 2015-2018 Jiri Benc + * Copyright (c) 2015 Sabrina Dubroca, Hannes Frederic Sowa + * Copyright (c) 2016-2018 Prarit Bhargava + * Copyright (c) 2017 Paolo Abeni, Larry Woodman + * Copyright (c) 2023 Guixin Liu + * + * This file is released under the GPLv2. + * See the file COPYING for more details. + * + * These kabi macros hide the changes from the kabi checker and from the + * process that computes the exported symbols' checksums. + * They have 2 variants: one (defined under __GENKSYMS__) used when + * generating the checksums, and the other used when building the kernel's + * binaries. + * + * The use of these macros does not guarantee that the usage and modification + * of code is correct. As with all Anolis only changes, an engineer must + * explain why the use of the macro is valid in the patch containing the + * changes. + * + */ + +#ifndef _LINUX_CK_KABI_H +#define _LINUX_CK_KABI_H + +#include +#include +#include + +/* + * NOTE + * Unless indicated otherwise, don't use ';' after these macros as it + * messes up the kABI checker by changing what the resulting token string + * looks like. Instead let the macros add the ';' so it can be properly + * hidden from the kABI checker (mainly for CK_KABI_EXTEND, but applied to + * most macros for uniformity). + * + * + * CK_KABI_CONST + * Adds a new const modifier to a function parameter preserving the old + * checksum. + * + * CK_KABI_ADD_MODIFIER + * Adds a new modifier to a function parameter or a typedef, preserving + * the old checksum. Useful e.g. for adding rcu annotations or changing + * int to unsigned. Beware that this may change the semantics; if you're + * sure this is safe, always explain why binary compatibility with 3rd + * party modules is retained. + * + * CK_KABI_DEPRECATE + * Marks the element as deprecated and make it unusable by modules while + * keeping a hole in its place to preserve binary compatibility. + * + * CK_KABI_DEPRECATE_FN + * Marks the function pointer as deprecated and make it unusable by modules + * while keeping a hole in its place to preserve binary compatibility. + * + * CK_KABI_EXTEND + * Adds a new field to a struct. This must always be added to the end of + * the struct. Before using this macro, make sure this is actually safe + * to do - there is a number of conditions under which it is *not* safe. + * In particular (but not limited to), this macro cannot be used: + * - if the struct in question is embedded in another struct, or + * - if the struct is allocated by drivers either statically or + * dynamically, or + * - if the struct is allocated together with driver data (an example of + * such behavior is struct net_device or struct request). + * + * CK_KABI_EXTEND_WITH_SIZE + * Adds a new element (usually a struct) to a struct and reserves extra + * space for the new element. The provided 'size' is the total space to + * be added in longs (i.e. it's 8 * 'size' bytes), including the size of + * the added element. It is automatically checked that the new element + * does not overflow the reserved space, now nor in the future. However, + * no attempt is done to check the content of the added element (struct) + * for kABI conformance - kABI checking inside the added element is + * effectively switched off. + * For any struct being added by CK_KABI_EXTEND_WITH_SIZE, it is + * recommended its content to be documented as not covered by kABI + * guarantee. + * + * CK_KABI_FILL_HOLE + * Fills a hole in a struct. + * + * Warning: only use if a hole exists for _all_ arches. Use pahole to verify. + * + * CK_KABI_RENAME + * Renames an element without changing its type. This macro can be used in + * bitfields, for example. + * + * NOTE: this macro does not add the final ';' + * + * CK_KABI_REPLACE + * Replaces the _orig field by the _new field. The size of the occupied + * space is preserved, it's fine if the _new field is smaller than the + * _orig field. If a _new field is larger or has a different alignment, + * compilation will abort. + * + * CK_KABI_REPLACE_SPLIT + * Works the same as CK_KABI_REPLACE but replaces a single _orig field by + * multiple new fields. The checks for size and alignment done by + * CK_KABI_REPLACE are still applied. + * + * CK_KABI_HIDE_INCLUDE + * Hides the given include file from kABI checksum computations. This is + * used when a newly added #include makes a previously opaque struct + * visible. + * + * Example usage: + * #include CK_KABI_HIDE_INCLUDE() + * + * CK_KABI_FAKE_INCLUDE + * Pretends inclusion of the given file for kABI checksum computations. + * This is used when upstream removed a particular #include but that made + * some structures opaque that were previously visible and is causing kABI + * checker failures. + * + * Example usage: + * #include CK_KABI_FAKE_INCLUDE() + * + * CK_KABI_RESERVE + * Adds a reserved field to a struct. This is done prior to kABI freeze + * for structs that cannot be expanded later using CK_KABI_EXTEND (for + * example because they are embedded in another struct or because they are + * allocated by drivers or because they use unusual memory layout). The + * size of the reserved field is 'unsigned long' and is assumed to be + * 8 bytes. + * + * The argument is a number unique for the given struct; usually, multiple + * CK_KABI_RESERVE macros are added to a struct with numbers starting from + * one. + * + * Example usage: + * struct foo { + * int a; + * CK_KABI_RESERVE(1) + * CK_KABI_RESERVE(2) + * CK_KABI_RESERVE(3) + * CK_KABI_RESERVE(4) + * }; + * + * CK_KABI_USE + * Uses a previously reserved field or multiple fields. The arguments are + * one or more numbers assigned to CK_KABI_RESERVE, followed by a field to + * be put in their place. The compiler ensures that the new field is not + * larger than the reserved area. + * + * Example usage: + * struct foo { + * int a; + * CK_KABI_USE(1, int b) + * CK_KABI_USE(2, 3, int c[3]) + * CK_KABI_RESERVE(4) + * }; + * + * CK_KABI_USE_SPLIT + * Works the same as CK_KABI_USE but replaces a single reserved field by + * multiple new fields. + * + * CK_KABI_AUX_EMBED + * CK_KABI_AUX_PTR + * Adds an extension of a struct in the form of "auxiliary structure". + * This is done prior to kABI freeze for structs that cannot be expanded + * later using CK_KABI_EXTEND. See also CK_KABI_RESERVED, these two + * approaches can (and often are) combined. + * + * To use this for 'struct foo' (the "base structure"), define a new + * structure called 'struct foo_ck_reserved'; this new struct is called "auxiliary + * structure". Then add CK_KABI_AUX_EMBED or CK_KABI_AUX_PTR to the end + * of the base structure. The argument is the name of the base structure, + * without the 'struct' keyword. + * + * CK_KABI_AUX_PTR stores a pointer to the aux structure in the base + * struct. The lifecycle of the aux struct needs to be properly taken + * care of. + * + * CK_KABI_AUX_EMBED embeds the aux struct into the base struct. This + * cannot be used when the base struct is itself embedded into another + * struct, allocated in an array, etc. + * + * Both approaches (ptr and embed) work correctly even when the aux struct + * is allocated by modules. To ensure this, the code responsible for + * allocation/assignment of the aux struct has to properly set the size of + * the aux struct; see the CK_KABI_AUX_SET_SIZE and CK_KABI_AUX_INIT_SIZE + * macros. + * + * New fields can be later added to the auxiliary structure, always to its + * end. Note the auxiliary structure cannot be shrunk in size later (i.e., + * fields cannot be removed, only deprecated). Any code accessing fields + * from the aux struct must guard the access using the CK_KABI_AUX macro. + * The access itself is then done via a '_ck_reserved' field in the base struct. + * + * The auxiliary structure is not guaranteed for access by modules unless + * explicitly commented as such in the declaration of the aux struct + * itself or some of its elements. + * + * Example: + * + * struct foo_ck_reserved { + * int newly_added; + * }; + * + * struct foo { + * bool big_hammer; + * CK_KABI_AUX_PTR(foo) + * }; + * + * void use(struct foo *f) + * { + * if (CK_KABI_AUX(f, foo, newly_added)) + * f->_ck_reserved->newly_added = 123; + * else + * // the field 'newly_added' is not present in the passed + * // struct, fall back to old behavior + * f->big_hammer = true; + * } + * + * static struct foo_ck_reserved my_foo_ck_reserved { + * .newly_added = 0; + * } + * + * static struct foo my_foo = { + * .big_hammer = false, + * ._ck_reserved = &my_foo_ck_reserved, + * CK_KABI_AUX_INIT_SIZE(foo) + * }; + * + * CK_KABI_USE_AUX_PTR + * Creates an auxiliary structure post kABI freeze. This works by using + * two reserved fields (thus there has to be two reserved fields still + * available) and converting them to CK_KABI_AUX_PTR. + * + * Example: + * + * struct foo_ck_reserved { + * }; + * + * struct foo { + * int a; + * CK_KABI_RESERVE(1) + * CK_KABI_USE_AUX_PTR(2, 3, foo) + * }; + * + * CK_KABI_AUX_SET_SIZE + * CK_KABI_AUX_INIT_SIZE + * Calculates and stores the size of the auxiliary structure. + * + * CK_KABI_AUX_SET_SIZE is for dynamically allocated base structs, + * CK_KABI_AUX_INIT_SIZE is for statically allocated case structs. + * + * These macros must be called from the allocation (CK_KABI_AUX_SET_SIZE) + * or declaration (CK_KABI_AUX_INIT_SIZE) site, regardless of whether + * that happens in the kernel or in a module. Without calling one of + * these macros, the aux struct will appear to have no fields to the + * kernel. + * + * Note: since CK_KABI_AUX_SET_SIZE is intended to be invoked outside of + * a struct definition, it does not add the semicolon and must be + * terminated by semicolon by the caller. + * + * CK_KABI_AUX + * Verifies that the given field exists in the given auxiliary structure. + * This MUST be called prior to accessing that field; failing to do that + * may lead to invalid memory access. + * + * The first argument is a pointer to the base struct, the second argument + * is the name of the base struct (without the 'struct' keyword), the + * third argument is the field name. + * + * This macro works for structs extended by either of CK_KABI_AUX_EMBED, + * CK_KABI_AUX_PTR and CK_KABI_USE_AUX_PTR. + * + * CK_KABI_FORCE_CHANGE + * Force change of the symbol checksum. The argument of the macro is a + * version for cases we need to do this more than once. + * + * This macro does the opposite: it changes the symbol checksum without + * actually changing anything about the exported symbol. It is useful for + * symbols that are not whitelisted, we're changing them in an + * incompatible way and want to prevent 3rd party modules to silently + * corrupt memory. Instead, by changing the symbol checksum, such modules + * won't be loaded by the kernel. This macro should only be used as a + * last resort when all other KABI workarounds have failed. + * + * CK_KABI_EXCLUDE + * !!! WARNING: DANGEROUS, DO NOT USE unless you are aware of all the !!! + * !!! implications. This should be used ONLY EXCEPTIONALLY and only !!! + * !!! under specific circumstances. Very likely, this macro does not !!! + * !!! do what you expect it to do. Note that any usage of this macro !!! + * !!! MUST be paired with a CK_KABI_FORCE_CHANGE annotation of !!! + * !!! a suitable symbol (or an equivalent safeguard) and the commit !!! + * !!! log MUST explain why the chosen solution is appropriate. !!! + * + * Exclude the element from checksum generation. Any such element is + * considered not to be part of the kABI whitelist and may be changed at + * will. Note however that it's the responsibility of the developer + * changing the element to ensure 3rd party drivers using this element + * won't panic, for example by not allowing them to be loaded. That can + * be achieved by changing another, non-whitelisted symbol they use, + * either by nature of the change or by using CK_KABI_FORCE_CHANGE. + * + * Also note that any change to the element must preserve its size. Change + * of the size is not allowed and would constitute a silent kABI breakage. + * Beware that the CK_KABI_EXCLUDE macro does not do any size checks. + * + * CK_KABI_BROKEN_INSERT + * CK_KABI_BROKEN_REMOVE + * Insert a field to the middle of a struct / delete a field from a struct. + * Note that this breaks kABI! It can be done only when it's certain that + * no 3rd party driver can validly reach into the struct. A typical + * example is a struct that is: both (a) referenced only through a long + * chain of pointers from another struct that is part of a whitelisted + * symbol and (b) kernel internal only, it should have never been visible + * to genksyms in the first place. + * + * Another example are structs that are explicitly exempt from kABI + * guarantee but we did not have enough foresight to use CK_KABI_EXCLUDE. + * In this case, the warning for CK_KABI_EXCLUDE applies. + * + * A detailed explanation of correctness of every CK_KABI_BROKEN_* macro + * use is especially important. + * + * CK_KABI_BROKEN_INSERT_BLOCK + * CK_KABI_BROKEN_REMOVE_BLOCK + * A version of CK_KABI_BROKEN_INSERT / REMOVE that allows multiple fields + * to be inserted or removed together. All fields need to be terminated + * by ';' inside(!) the macro parameter. The macro itself must not be + * terminated by ';'. + * + * CK_KABI_BROKEN_REPLACE + * Replace a field by a different one without doing any checking. This + * allows replacing a field by another with a different size. Similarly + * to other CK_KABI_BROKEN macros, use of this indicates a kABI breakage. + * + * CK_KABI_BROKEN_INSERT_ENUM + * CK_KABI_BROKEN_REMOVE_ENUM + * Insert a field to the middle of an enumaration type / delete a field from + * an enumaration type. Note that this can break kABI especially if the + * number of enum fields is used in an array within a structure. It can be + * done only when it is certain that no 3rd party driver will use the + * enumeration type or a structure that embeds an array with size determined + * by an enumeration type. + * + * CK_KABI_EXTEND_ENUM + * Adds a new field to an enumeration type. This must always be added to + * the end of the enum. Before using this macro, make sure this is actually + * safe to do. + */ + +#ifdef __GENKSYMS__ + +# define CK_KABI_CONST +# define CK_KABI_ADD_MODIFIER(_new) +# define CK_KABI_EXTEND(_new) +# define CK_KABI_FILL_HOLE(_new) +# define CK_KABI_FORCE_CHANGE(ver) __attribute__((ck_kabi_change ## ver)) +# define CK_KABI_RENAME(_orig, _new) _orig +# define CK_KABI_HIDE_INCLUDE(_file) +# define CK_KABI_FAKE_INCLUDE(_file) _file +# define CK_KABI_BROKEN_INSERT(_new) +# define CK_KABI_BROKEN_REMOVE(_orig) _orig; +# define CK_KABI_BROKEN_INSERT_BLOCK(_new) +# define CK_KABI_BROKEN_REMOVE_BLOCK(_orig) _orig +# define CK_KABI_BROKEN_REPLACE(_orig, _new) _orig; +# define CK_KABI_BROKEN_INSERT_ENUM(_new) +# define CK_KABI_BROKEN_REMOVE_ENUM(_orig) _orig, +# define CK_KABI_EXTEND_ENUM(_new) + +# define _CK_KABI_DEPRECATE(_type, _orig) _type _orig +# define _CK_KABI_DEPRECATE_FN(_type, _orig, _args...) _type (*_orig)(_args) +# define _CK_KABI_REPLACE(_orig, _new) _orig +# define _CK_KABI_EXCLUDE(_elem) + +#else + +# define CK_KABI_ALIGN_WARNING ". Disable CONFIG_CK_KABI_SIZE_ALIGN_CHECKS if debugging." + +# define CK_KABI_CONST const +# define CK_KABI_ADD_MODIFIER(_new) _new +# define CK_KABI_EXTEND(_new) _new; +# define CK_KABI_FILL_HOLE(_new) _new; +# define CK_KABI_FORCE_CHANGE(ver) +# define CK_KABI_RENAME(_orig, _new) _new +# define CK_KABI_HIDE_INCLUDE(_file) _file +# define CK_KABI_FAKE_INCLUDE(_file) +# define CK_KABI_BROKEN_INSERT(_new) _new; +# define CK_KABI_BROKEN_REMOVE(_orig) +# define CK_KABI_BROKEN_INSERT_BLOCK(_new) _new +# define CK_KABI_BROKEN_REMOVE_BLOCK(_orig) +# define CK_KABI_BROKEN_REPLACE(_orig, _new) _new; +# define CK_KABI_BROKEN_INSERT_ENUM(_new) _new, +# define CK_KABI_BROKEN_REMOVE_ENUM(_orig) +# define CK_KABI_EXTEND_ENUM(_new) _new, + +#if IS_BUILTIN(CONFIG_CK_KABI_SIZE_ALIGN_CHECKS) +# define __CK_KABI_CHECK_SIZE_ALIGN(_orig, _new) \ + union { \ + _Static_assert(sizeof(struct{_new;}) <= sizeof(struct{_orig;}), \ + __FILE__ ":" __stringify(__LINE__) ": " __stringify(_new) " is larger than " __stringify(_orig) CK_KABI_ALIGN_WARNING); \ + _Static_assert(__alignof__(struct{_new;}) <= __alignof__(struct{_orig;}), \ + __FILE__ ":" __stringify(__LINE__) ": " __stringify(_orig) " is not aligned the same as " __stringify(_new) CK_KABI_ALIGN_WARNING); \ + } +# define __CK_KABI_CHECK_SIZE(_item, _size) \ + _Static_assert(sizeof(struct{_item;}) <= _size, \ + __FILE__ ":" __stringify(__LINE__) ": " __stringify(_item) " is larger than the reserved size (" __stringify(_size) " bytes)" CK_KABI_ALIGN_WARNING) +#else +# define __CK_KABI_CHECK_SIZE_ALIGN(_orig, _new) +# define __CK_KABI_CHECK_SIZE(_item, _size) +#endif + +#define CK_KABI_UNIQUE_ID __PASTE(ck_kabi_hidden_, __LINE__) + +# define _CK_KABI_DEPRECATE(_type, _orig) _type ck_reserved_##_orig +# define _CK_KABI_DEPRECATE_FN(_type, _orig, _args...) \ + _type (* ck_reserved_##_orig)(_args) + +#ifdef CONFIG_CK_KABI_RESERVE +# define _CK_KABI_REPLACE(_orig, _new) \ + union { \ + _new; \ + struct { \ + _orig; \ + } CK_KABI_UNIQUE_ID; \ + __CK_KABI_CHECK_SIZE_ALIGN(_orig, _new); \ + } +#else +# define _CK_KABI_REPLACE(_orig, _new) CK_KABI_BROKEN_REPLACE(_orig, _new) +#endif + +# define _CK_KABI_EXCLUDE(_elem) _elem + +#endif /* __GENKSYMS__ */ + +# define CK_KABI_DEPRECATE(_type, _orig) _CK_KABI_DEPRECATE(_type, _orig); +# define CK_KABI_DEPRECATE_FN(_type, _orig, _args...) \ + _CK_KABI_DEPRECATE_FN(_type, _orig, _args); +# define CK_KABI_REPLACE(_orig, _new) _CK_KABI_REPLACE(_orig, _new); + +#define _CK_KABI_REPLACE1(_new) _new; +#define _CK_KABI_REPLACE2(_new, ...) _new; _CK_KABI_REPLACE1(__VA_ARGS__) +#define _CK_KABI_REPLACE3(_new, ...) _new; _CK_KABI_REPLACE2(__VA_ARGS__) +#define _CK_KABI_REPLACE4(_new, ...) _new; _CK_KABI_REPLACE3(__VA_ARGS__) +#define _CK_KABI_REPLACE5(_new, ...) _new; _CK_KABI_REPLACE4(__VA_ARGS__) +#define _CK_KABI_REPLACE6(_new, ...) _new; _CK_KABI_REPLACE5(__VA_ARGS__) +#define _CK_KABI_REPLACE7(_new, ...) _new; _CK_KABI_REPLACE6(__VA_ARGS__) +#define _CK_KABI_REPLACE8(_new, ...) _new; _CK_KABI_REPLACE7(__VA_ARGS__) +#define _CK_KABI_REPLACE9(_new, ...) _new; _CK_KABI_REPLACE8(__VA_ARGS__) +#define _CK_KABI_REPLACE10(_new, ...) _new; _CK_KABI_REPLACE9(__VA_ARGS__) +#define _CK_KABI_REPLACE11(_new, ...) _new; _CK_KABI_REPLACE10(__VA_ARGS__) +#define _CK_KABI_REPLACE12(_new, ...) _new; _CK_KABI_REPLACE11(__VA_ARGS__) + +#define CK_KABI_REPLACE_SPLIT(_orig, ...) _CK_KABI_REPLACE(_orig, \ + struct { __PASTE(_CK_KABI_REPLACE, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__) }); + +# define CK_KABI_RESERVE(n) _CK_KABI_RESERVE(n); + +#define _CK_KABI_USE1(n, _new) _CK_KABI_RESERVE(n), _new +#define _CK_KABI_USE2(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE1(__VA_ARGS__) +#define _CK_KABI_USE3(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE2(__VA_ARGS__) +#define _CK_KABI_USE4(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE3(__VA_ARGS__) +#define _CK_KABI_USE5(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE4(__VA_ARGS__) +#define _CK_KABI_USE6(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE5(__VA_ARGS__) +#define _CK_KABI_USE7(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE6(__VA_ARGS__) +#define _CK_KABI_USE8(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE7(__VA_ARGS__) +#define _CK_KABI_USE9(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE8(__VA_ARGS__) +#define _CK_KABI_USE10(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE9(__VA_ARGS__) +#define _CK_KABI_USE11(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE10(__VA_ARGS__) +#define _CK_KABI_USE12(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE11(__VA_ARGS__) + +#define _CK_KABI_USE(...) _CK_KABI_REPLACE(__VA_ARGS__) +#define CK_KABI_USE(n, ...) _CK_KABI_USE(__PASTE(_CK_KABI_USE, COUNT_ARGS(__VA_ARGS__))(n, __VA_ARGS__)); + +# define CK_KABI_USE_SPLIT(n, ...) CK_KABI_REPLACE_SPLIT(_CK_KABI_RESERVE(n), __VA_ARGS__) + +#ifdef CONFIG_CK_KABI_RESERVE +# define _CK_KABI_RESERVE(n) unsigned long ck_reserved##n +#else +# define _CK_KABI_RESERVE(n) +#endif + +#define CK_KABI_EXCLUDE(_elem) _CK_KABI_EXCLUDE(_elem); + +#define CK_KABI_EXTEND_WITH_SIZE(_new, _size) \ + CK_KABI_EXTEND(union { \ + _new; \ + unsigned long CK_KABI_UNIQUE_ID[_size]; \ + __CK_KABI_CHECK_SIZE(_new, 8 * (_size)); \ + }) + +#ifdef CONFIG_CK_KABI_RESERVE +#define _CK_KABI_AUX_PTR(_struct) \ + size_t _struct##_size_ck_reserved; \ + _CK_KABI_EXCLUDE(struct _struct##_ck_reserved *_ck_reserved) +#define CK_KABI_AUX_PTR(_struct) \ + _CK_KABI_AUX_PTR(_struct); + +#define _CK_KABI_AUX_EMBED(_struct) \ + size_t _struct##_size_ck_reserved; \ + _CK_KABI_EXCLUDE(struct _struct##_ck_reserved _ck_reserved) +#define CK_KABI_AUX_EMBED(_struct) \ + _CK_KABI_AUX_EMBED(_struct); + +#define CK_KABI_USE_AUX_PTR(n1, n2, _struct) \ + CK_KABI_USE(n1, n2, \ + struct { CK_KABI_AUX_PTR(_struct) }) + +#define CK_KABI_AUX_SET_SIZE(_name, _struct) ({ \ + (_name)->_struct##_size_ck_reserved = sizeof(struct _struct##_ck_reserved); \ +}) + +#define CK_KABI_AUX_INIT_SIZE(_struct) \ + ._struct##_size_ck_reserved = sizeof(struct _struct##_ck_reserved), + +#define CK_KABI_AUX(_ptr, _struct, _field) ({ \ + size_t __off = offsetof(struct _struct##_ck_reserved, _field); \ + (_ptr)->_struct##_size_ck_reserved > __off ? true : false; \ +}) +#else +#define CK_KABI_AUX_PTR(_struct) +#define CK_KABI_AUX_EMBED(_struct) +#define CK_KABI_USE_AUX_PTR(n1, n2, _struct) +#define CK_KABI_AUX_SET_SIZE(_name, _struct) +#define CK_KABI_AUX_INIT_SIZE(_struct) +#define CK_KABI_AUX(_ptr, _struct, _field) (false) +#endif /* CONFIG_CK_KABI_RESERVE */ + +#endif /* _LINUX_CK_KABI_H */ diff --git a/init/Kconfig b/init/Kconfig index 60ed7713b5ee..ccebe67eed59 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -2006,3 +2006,21 @@ config ARCH_HAS_SYNC_CORE_BEFORE_USERMODE # . config ARCH_HAS_SYSCALL_WRAPPER def_bool n + +config CK_KABI_RESERVE + bool "Enables KABI and hotfix RESERVE" + default y + help + This option enables KABI and hotfix reserve. + For Anolis Cloud Kernel, the KABI reserve macros and hotfix reserve + macros are the same. + For some embedded systems, KABI and hotfix reserve may be not necessary. + Disable it on demand. + +config CK_KABI_SIZE_ALIGN_CHECKS + bool "Enables more stringent kabi checks in the macros" + default y + depends on CK_KABI_RESERVE + help + This option enables more stringent kabi checks. Those must be disabled + in case of a debug-build because they allow to change struct sizes. \ No newline at end of file -- Gitee From bb56bb2012db73fbb61e8c29c60ab287a19626a2 Mon Sep 17 00:00:00 2001 From: Jingbo Xu Date: Sun, 26 Mar 2023 11:42:50 +0800 Subject: [PATCH 0098/2138] anolis: fs: export mount_lock ANBZ: #8323 Export mount_lock so that [un]lock_mount_hash() could be called from modules. Signed-off-by: Jingbo Xu Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2788 --- fs/namespace.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/namespace.c b/fs/namespace.c index b4385e2413d5..45463bc55428 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -98,6 +98,7 @@ EXPORT_SYMBOL_GPL(fs_kobj); * tree or hash is modified or when a vfsmount structure is modified. */ __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock); +EXPORT_SYMBOL_GPL(mount_lock); static inline void lock_mount_hash(void) { -- Gitee From 310c57aafed6bdb43054ef517bdb324126be4cf4 Mon Sep 17 00:00:00 2001 From: Jingbo Xu Date: Mon, 27 Mar 2023 11:54:52 +0800 Subject: [PATCH 0099/2138] anolis: fs: export fc_drop_locked ANBZ: #8323 Export fc_drop_locked. Signed-off-by: Jingbo Xu Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2788 --- fs/fs_context.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/fs_context.c b/fs/fs_context.c index 98589aae5208..8cc839a46f45 100644 --- a/fs/fs_context.c +++ b/fs/fs_context.c @@ -378,6 +378,7 @@ void fc_drop_locked(struct fs_context *fc) fc->root = NULL; deactivate_locked_super(sb); } +EXPORT_SYMBOL_GPL(fc_drop_locked); static void legacy_fs_context_free(struct fs_context *fc); -- Gitee From e44dab2a66a9b19972af245a6f581329a22bf0b1 Mon Sep 17 00:00:00 2001 From: Jingbo Xu Date: Thu, 7 Apr 2022 14:58:02 +0800 Subject: [PATCH 0100/2138] anolis: fuse: bind sb to init_user_ns for virtfuse ANBZ: #8323 Bind the superblock to init_user_ns even when it's mounted from a user namespace other than init_user_ns. Co-developed-by: Jiang Liu Signed-off-by: Jiang Liu [ jingbo: bind vfuse's sb to init_user_ns through fsc->global] Signed-off-by: Jingbo Xu Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2788 --- fs/fuse/fuse_i.h | 2 ++ fs/fuse/inode.c | 15 +++++++++++---- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 4ce1a6fdc94f..28ef136d3598 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -1355,4 +1355,6 @@ struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid, void fuse_file_release(struct inode *inode, struct fuse_file *ff, unsigned int open_flags, fl_owner_t id, bool isdir); +static inline bool is_virtfuse_device(struct file *file) { return false; } + #endif /* _FS_FUSE_I_H */ diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 735abf426a06..46067699a2e1 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -1753,10 +1753,12 @@ static int fuse_fill_super(struct super_block *sb, struct fs_context *fsc) /* * Require mount to happen from the same user namespace which - * opened /dev/fuse to prevent potential attacks. + * opened /dev/fuse to prevent potential attacks. While for + * virtual fuse, the mount is always bound to init_user_ns. */ - if ((ctx->file->f_op != &fuse_dev_operations) || - (ctx->file->f_cred->user_ns != sb->s_user_ns)) + if (!is_virtfuse_device(ctx->file) && + ((ctx->file->f_op != &fuse_dev_operations) || + (ctx->file->f_cred->user_ns != sb->s_user_ns))) return -EINVAL; ctx->fudptr = &ctx->file->private_data; @@ -1791,6 +1793,7 @@ static int fuse_get_tree(struct fs_context *fsc) struct fuse_conn *fc; struct fuse_mount *fm; struct super_block *sb; + bool is_virtfuse; int err; fc = kmalloc(sizeof(*fc), GFP_KERNEL); @@ -1827,14 +1830,18 @@ static int fuse_get_tree(struct fs_context *fsc) * Allow creating a fuse mount with an already initialized fuse * connection */ + is_virtfuse = is_virtfuse_device(ctx->file); fud = READ_ONCE(ctx->file->private_data); - if (ctx->file->f_op == &fuse_dev_operations && fud) { + if ((ctx->file->f_op == &fuse_dev_operations || is_virtfuse) && fud) { fsc->sget_key = fud->fc; sb = sget_fc(fsc, fuse_test_super, fuse_set_no_super); err = PTR_ERR_OR_ZERO(sb); if (!IS_ERR(sb)) fsc->root = dget(sb->s_root); } else { + /* bind sb to init_user_ns for virtfuse */ + if (is_virtfuse) + fsc->global = true; err = get_tree_nodev(fsc, fuse_fill_super); } out: -- Gitee From e40edc7d46e7926c0d2c522cb626060c89da3ae5 Mon Sep 17 00:00:00 2001 From: Jingbo Xu Date: Sun, 26 Mar 2023 12:02:51 +0800 Subject: [PATCH 0101/2138] anolis: fuse: add fuse_mount_callback hook ANBZ: #8323 Add a hook which can be called when mounting fuse. Signed-off-by: Jingbo Xu Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2788 --- fs/fuse/fuse_i.h | 3 +++ fs/fuse/inode.c | 12 ++++++++++++ 2 files changed, 15 insertions(+) diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 28ef136d3598..3f8f6dc2f8cd 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -1355,6 +1355,9 @@ struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid, void fuse_file_release(struct inode *inode, struct fuse_file *ff, unsigned int open_flags, fl_owner_t id, bool isdir); +typedef int (*fuse_mount_cb_t)(struct file *file); +extern fuse_mount_cb_t fuse_mount_callback; + static inline bool is_virtfuse_device(struct file *file) { return false; } #endif /* _FS_FUSE_I_H */ diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 46067699a2e1..943e584c9672 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -63,6 +63,9 @@ MODULE_PARM_DESC(max_user_congthresh, static struct file_system_type fuseblk_fs_type; #endif +fuse_mount_cb_t fuse_mount_callback; +EXPORT_SYMBOL_GPL(fuse_mount_callback); + struct fuse_forget_link *fuse_alloc_forget(void) { return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL_ACCOUNT); @@ -1844,6 +1847,15 @@ static int fuse_get_tree(struct fs_context *fsc) fsc->global = true; err = get_tree_nodev(fsc, fuse_fill_super); } + + if (is_virtfuse && !err) { + if (WARN_ON(!fuse_mount_callback)) + err = -EINVAL; + else + err = fuse_mount_callback(ctx->file); + if (err) + fc_drop_locked(fsc); + } out: if (fsc->s_fs_info) fuse_mount_destroy(fm); -- Gitee From 0d4b179fdbc23cd69b0e8250952e25501a70a209 Mon Sep 17 00:00:00 2001 From: Jingbo Xu Date: Fri, 3 Mar 2023 11:59:11 +0800 Subject: [PATCH 0102/2138] anolis: virtfuse: add a driver support FUSE device virtualization ANBZ: #8323 For container workloads, there are use cases for storage sidecar containers to provide FUSE filesystems for other containers. With the default FUSE driver, we are facing several issues: 1) The protection mechanism in FUSE is designed for generic use cases, it's too restrict for container usage cases. 2) Multiple FUSE filesystems may be created from FUSE char device. So introduce the virtfuse driver, which provides: 1) a mechanism to run FUSE server inside containers without CAP_SYS_ADMIN capability. 2) a one FUSE filesystem instance per virtfuse device working mode. 3) a mechanism to mount the same FUSE filesystem instance in different user namespaces. 4) a communication protocol for container orchestrators to cooperate with FUSE server running in containers. The above goals are achieved by relaxing the protection constraints, and rely on the container orchestrators to manage access permissions. Co-developed-by: Jiang Liu Signed-off-by: Jingbo Xu Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2788 --- fs/fuse/Kconfig | 11 ++ fs/fuse/Makefile | 1 + fs/fuse/fuse_i.h | 8 + fs/fuse/virtfuse.c | 283 ++++++++++++++++++++++++++++++++++ include/uapi/linux/virtfuse.h | 20 +++ 5 files changed, 323 insertions(+) create mode 100644 fs/fuse/virtfuse.c create mode 100644 include/uapi/linux/virtfuse.h diff --git a/fs/fuse/Kconfig b/fs/fuse/Kconfig index 038ed0b9aaa5..ad36e8915364 100644 --- a/fs/fuse/Kconfig +++ b/fs/fuse/Kconfig @@ -52,3 +52,14 @@ config FUSE_DAX If you want to allow mounting a Virtio Filesystem with the "dax" option, answer Y. + +config VIRT_FUSE + tristate "FUSE device virtualization extension" + depends on FUSE_FS + help + This FUSE extension provides virtualized FUSE devices for container + workloads. Each virtualized FUSE device only supports one instance + of FUSE filesystem with special treatments for user namespace. + + If you want to support FUSE device virtualization for containers, + answer Y or M. diff --git a/fs/fuse/Makefile b/fs/fuse/Makefile index 0c48b35c058d..8dfb7f9c1f58 100644 --- a/fs/fuse/Makefile +++ b/fs/fuse/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_FUSE_FS) += fuse.o obj-$(CONFIG_CUSE) += cuse.o obj-$(CONFIG_VIRTIO_FS) += virtiofs.o +obj-$(CONFIG_VIRT_FUSE) += virtfuse.o fuse-y := dev.o dir.o file.o inode.o control.o xattr.o acl.o readdir.o ioctl.o fuse-$(CONFIG_FUSE_DAX) += dax.o diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 3f8f6dc2f8cd..29523925ff81 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -31,6 +31,7 @@ #include #include #include +#include /** Default max number of pages that can be used in a single read request */ #define FUSE_DEFAULT_MAX_PAGES_PER_REQ 32 @@ -1358,6 +1359,13 @@ void fuse_file_release(struct inode *inode, struct fuse_file *ff, typedef int (*fuse_mount_cb_t)(struct file *file); extern fuse_mount_cb_t fuse_mount_callback; +#if IS_ENABLED(CONFIG_VIRT_FUSE) +static inline bool is_virtfuse_device(struct file *file) +{ + return iminor(file_inode(file)) != FUSE_MINOR; +} +#else static inline bool is_virtfuse_device(struct file *file) { return false; } +#endif #endif /* _FS_FUSE_I_H */ diff --git a/fs/fuse/virtfuse.c b/fs/fuse/virtfuse.c new file mode 100644 index 000000000000..6764945f705b --- /dev/null +++ b/fs/fuse/virtfuse.c @@ -0,0 +1,283 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2022, Alibaba Cloud + * + * Virtual FUSE Device + */ + +#include +#include +#include +#include +#include +#include +#include "fuse_i.h" + +static uint virtfuse_dev_count = 64; +module_param_named(max_devices, virtfuse_dev_count, uint, 0644); +MODULE_PARM_DESC(max_devices, "Maximum number of devices supported"); + +struct virtfuse_dev { + char name[16]; /* adequate space for "virtfuse%d" */ + struct miscdevice dev; + atomic_t refcount; + spinlock_t lock; + struct fuse_conn *fc; +}; + +static struct virtfuse_dev *virtfuse_devices; +static struct file_operations virtfuse_fops; + +static inline struct virtfuse_dev *virtfuse_dev_get(struct file *file) +{ + dev_t devt = file_inode(file)->i_rdev; + struct virtfuse_dev *vfud; + int i; + + for (i = 0; i < virtfuse_dev_count; i++) { + vfud = &virtfuse_devices[i]; + if (vfud->dev.this_device->devt == devt) + return vfud; + } + + pr_err("virtfuse: failed to find virtfuse for minor %d\n", MINOR(devt)); + return NULL; +} + +static int virtfuse_dev_release(struct inode *inode, struct file *file) +{ + struct fuse_dev *fud = READ_ONCE(file->private_data); + struct virtfuse_dev *vfud; + + if (!fud) + return 0; + + vfud = virtfuse_dev_get(file); + if (!vfud) + return -EUCLEAN; + + /* + * 1. For the initial fuse mount after RESET, the mount may fail + * halfway and thus virtfuse_dev_alloc() is not called yet. + * + * 2. When the old fuse daemon has exited and RESET has not been + * done yet, refcount is zero while vfud->fc is still there. In + * this case, if a new fuse daemon tries to mount, the mount + * will fail and virtfuse_dev_release() will be called then. + */ + spin_lock(&vfud->lock); + if (vfud->fc && vfud->fc == fud->fc) + WARN_ON(atomic_dec_if_positive(&vfud->refcount) < 0); + spin_unlock(&vfud->lock); + + return fuse_dev_release(inode, file); +} + +static int virtfuse_dev_alloc(struct file *file) +{ + struct virtfuse_dev *vfud = virtfuse_dev_get(file); + struct fuse_dev *fud = READ_ONCE(file->private_data); + int ret = 0; + + if (!vfud) + return -EUCLEAN; + + spin_lock(&vfud->lock); + if (!vfud->fc) { + /* the initial fuse mount after RESET */ + WARN_ON(atomic_read(&vfud->refcount) != 0); + atomic_set(&vfud->refcount, 1); + vfud->fc = fuse_conn_get(fud->fc); + } else if (atomic_read(&vfud->refcount) == 0) { + pr_err_ratelimited("%s: please reset before mount\n", vfud->dev.name); + ret = -EBUSY; + } else if (fud->fc != vfud->fc) { + pr_err_ratelimited("%s: can't be mounted multiple times\n", vfud->dev.name); + ret = -EBUSY; + } + spin_unlock(&vfud->lock); + return ret; +} + +static int virtfuse_dev_clone(struct file *file, unsigned long arg) +{ + int fd, ret; + struct file *old; + + if (get_user(fd, (__u32 __user *)arg)) + return -EFAULT; + + old = fget(fd); + if (!old) + return -EINVAL; + /* + * Don't clone fuse_conn between normal fuse device and virtfuse, + * or different virtfuse. + */ + if (file_inode(old)->i_rdev != file_inode(file)->i_rdev) { + fput(old); + return -EINVAL; + } + + ret = fuse_dev_operations.unlocked_ioctl(file, FUSE_DEV_IOC_CLONE, arg); + if (!ret) + atomic_inc(&virtfuse_dev_get(file)->refcount); + fput(old); + return ret; +} + +static int virtfuse_clone(struct file *file) +{ + struct virtfuse_dev *vfud; + struct fuse_conn *fc; + struct fuse_dev *fud; + int err; + + if (file->private_data) + return -EEXIST; + + vfud = virtfuse_dev_get(file); + if (!vfud) + return -EUCLEAN; + + spin_lock(&vfud->lock); + if (!vfud->fc) { + spin_unlock(&vfud->lock); + return -ENODATA; + } + + /* acquire temporary refcount */ + fc = fuse_conn_get(vfud->fc); + atomic_inc(&vfud->refcount); + spin_unlock(&vfud->lock); + + /* follow fuse_device_clone() to clone the connection */ + fud = fuse_dev_alloc_install(fc); + if (fud) { + atomic_inc(&vfud->refcount); + file->private_data = fud; + atomic_inc(&fc->dev_count); + err = 0; + } else { + err = -ENOMEM; + } + + /* drop temporary refcount */ + atomic_dec(&vfud->refcount); + fuse_conn_put(fc); + return err; +} + +static int virtfuse_reset(struct file *file) +{ + struct virtfuse_dev *vfud = virtfuse_dev_get(file); + struct fuse_conn *fc = NULL; + + if (!vfud) + return -EUCLEAN; + + if (atomic_read(&vfud->refcount)) + return -EBUSY; + + spin_lock(&vfud->lock); + if (vfud->fc) { + fc = vfud->fc; + vfud->fc = NULL; + } + spin_unlock(&vfud->lock); + + if (fc) + fuse_conn_put(fc); + return 0; +} + +static long virtfuse_dev_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + switch (cmd) { + case FUSE_DEV_IOC_CLONE: + return virtfuse_dev_clone(file, arg); + case VIRTFUSE_IOC_CLONE: + return virtfuse_clone(file); + case VIRTFUSE_IOC_RESET: + return virtfuse_reset(file); + default: + return fuse_dev_operations.unlocked_ioctl(file, cmd, arg); + } +} + +static void virtfuse_free_devices(void) +{ + struct virtfuse_dev *vfud; + int i; + + for (i = 0; i < virtfuse_dev_count; i++) { + vfud = &virtfuse_devices[i]; + if (vfud->dev.this_device) + misc_deregister(&vfud->dev); + WARN_ON(atomic_read(&vfud->refcount) != 0); + } + kfree(virtfuse_devices); + virtfuse_devices = NULL; +} + +static int __init virtfuse_init(void) +{ + struct virtfuse_dev *vfud; + int i, ret; + + if (virtfuse_dev_count == 0) { + pr_err("virtfuse: max_devices is zero\n"); + return -EINVAL; + } else if (virtfuse_dev_count > VIRT_FUSE_MAX_DEVICES) { + pr_err("virtfuse: max_devices is too big, max %d\n", + VIRT_FUSE_MAX_DEVICES); + return -EINVAL; + } + + virtfuse_fops = fuse_dev_operations; + virtfuse_fops.owner = THIS_MODULE; + virtfuse_fops.compat_ioctl = virtfuse_dev_ioctl; + virtfuse_fops.unlocked_ioctl = virtfuse_dev_ioctl; + virtfuse_fops.release = virtfuse_dev_release; + + virtfuse_devices = kcalloc(virtfuse_dev_count, + sizeof(struct virtfuse_dev), GFP_KERNEL); + if (virtfuse_devices == NULL) + return -ENOMEM; + + for (i = 0; i < virtfuse_dev_count; i++) { + vfud = &virtfuse_devices[i]; + spin_lock_init(&vfud->lock); + snprintf(vfud->name, sizeof(vfud->name), "virtfuse%d", i); + + vfud->dev.name = vfud->name; + vfud->dev.minor = MISC_DYNAMIC_MINOR; + vfud->dev.fops = &virtfuse_fops; + + ret = misc_register(&vfud->dev); + if (ret) { + pr_err("virtfuse: failed to create virtfuse%d\n", i); + vfud->dev.this_device = NULL; + virtfuse_free_devices(); + return ret; + } + } + + fuse_mount_callback = virtfuse_dev_alloc; + return 0; +} + +static void __exit virtfuse_exit(void) +{ + fuse_mount_callback = NULL; + virtfuse_free_devices(); +} + +module_init(virtfuse_init); +module_exit(virtfuse_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Virtual FUSE Device"); +MODULE_AUTHOR("Jingbo Xu "); +MODULE_AUTHOR("Jiang Liu "); diff --git a/include/uapi/linux/virtfuse.h b/include/uapi/linux/virtfuse.h new file mode 100644 index 000000000000..00c3c883f2c8 --- /dev/null +++ b/include/uapi/linux/virtfuse.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _LINUX_VIRTFUSE_H +#define _LINUX_VIRTFUSE_H + +#include +#include + +/* Maximum number of devices supported. */ +#define VIRT_FUSE_MAX_DEVICES 1024 + +/* + * Clone a fuse device sharing the fuse connection bound to the specified + * virtual device. + */ +#define VIRTFUSE_IOC_CLONE _IO(0x99, 1) + +/* Reset the specified virtual device */ +#define VIRTFUSE_IOC_RESET _IO(0x99, 2) + +#endif -- Gitee From 8a511e5e1871ac228a98acea3a69b1f7f4be5b92 Mon Sep 17 00:00:00 2001 From: Jingbo Xu Date: Tue, 28 Jun 2022 17:24:36 +0800 Subject: [PATCH 0103/2138] anolis: virtfuse: add VIRTFUSE_IOC_GET_MOUNTS ioctl ANBZ: #8323 Add VIRTFUSE_IOC_GET_MOUNTS ioctl to print all mountinfo of the specified virtual device. [jingbo's backport notes to ANCK 6.6] Open coded [un]lock_mount_hash() as they are moved into fs/namespace.c rather than fs/mount.h in v6.6. Signed-off-by: Jingbo Xu Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2788 --- fs/fuse/virtfuse.c | 145 ++++++++++++++++++++++++++++++++++ include/uapi/linux/virtfuse.h | 13 +++ 2 files changed, 158 insertions(+) diff --git a/fs/fuse/virtfuse.c b/fs/fuse/virtfuse.c index 6764945f705b..cc7f25701dd4 100644 --- a/fs/fuse/virtfuse.c +++ b/fs/fuse/virtfuse.c @@ -11,7 +11,9 @@ #include #include #include +#include #include "fuse_i.h" +#include "../mount.h" static uint virtfuse_dev_count = 64; module_param_named(max_devices, virtfuse_dev_count, uint, 0644); @@ -191,6 +193,147 @@ static int virtfuse_reset(struct file *file) return 0; } +static int fillbuf(char *buf, unsigned int len, unsigned int *pcount, + const char *fmt, ...) +{ + va_list args; + unsigned int count = *pcount; + int step; + + va_start(args, fmt); + step = vsnprintf(buf + count, len - count, fmt, args); + va_end(args); + if (step >= len - count) + return -EMSGSIZE; + + *pcount += step; + return 0; +} + +static int virtfuse_get_mounts(struct file *file, unsigned long arg) +{ + struct virtfuse_mounts_buf vbuf, __user *u_vbuf; + struct virtfuse_dev *vfud = virtfuse_dev_get(file); + struct fuse_conn *fc = NULL; + struct fuse_mount *fm; + struct super_block *sb; + struct mount *mnt; + unsigned int count = 0, len; + int order, step, ret = 0; + char *buf, *name, *p; + void __user *u_buf; + + if (!vfud) + return -EUCLEAN; + + u_vbuf = (struct virtfuse_mounts_buf __user *)arg; + u_buf = (void __user *)u_vbuf->buf; + if (copy_from_user(&vbuf, u_vbuf, sizeof(vbuf)) != 0) + return -EFAULT; + + len = vbuf.len; + if (len <= 1) + return -EMSGSIZE; + + /* init the user buffer as an empty string */ + if (clear_user(u_buf, 1) != 0) + return -EFAULT; + + spin_lock(&vfud->lock); + if (vfud->fc) + fc = fuse_conn_get(vfud->fc); + spin_unlock(&vfud->lock); + if (!fc) + return 0; + + down_read(&fc->killsb); + fm = list_first_entry_or_null(&fc->mounts, struct fuse_mount, fc_entry); + if (!fm || !fm->sb) + goto out_up_killsb; + sb = fm->sb; + + name = __getname(); + if (!name) { + ret = -ENOMEM; + goto out_up_killsb; + } + + order = get_order(len); + buf = (void *)__get_free_pages(GFP_KERNEL, order); + if (!buf) { + ret = -ENOMEM; + goto out_putname; + } + + /* connection state */ + ret = fillbuf(buf, len, &count, "%s\n", + fc->connected ? "Connected" : "Aborted"); + if (ret) + goto out_free_pages; + + /* open coded lock_mount_hash() */ + write_seqlock(&mount_lock); + + list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { + /* skip slave mounts */ + if (mnt->mnt_master) + continue; + + /* skip private mounts, e.g. from clone_private_mount() */ + if (!mnt->mnt_ns) + continue; + + /* mountpoint */ + p = dentry_path_raw(mnt->mnt_mountpoint, name, PATH_MAX); + if (IS_ERR(p)) { + ret = PTR_ERR(p); + break; + } + ret = fillbuf(buf, len, &count, "%s %s", + mnt->mnt_devname ? : "none", p); + if (ret) + break; + + /* fstype */ + if (sb->s_subtype && sb->s_subtype[0]) + sprintf(name, "%s.%s", sb->s_type->name, sb->s_subtype); + else + sprintf(name, "%s", sb->s_type->name); + ret = fillbuf(buf, len, &count, " %s", name); + if (ret) + break; + + /* mount options */ + step = sprintf(name, "%s,user_id=%u,group_id=%u", + __mnt_is_readonly(&mnt->mnt) ? "ro" : "rw", + from_kuid_munged(fc->user_ns, fc->user_id), + from_kgid_munged(fc->user_ns, fc->group_id)); + if (fc->default_permissions) + step += sprintf(name + step, ",default_permissions"); + if (fc->allow_other) + step += sprintf(name + step, ",allow_other"); + ret = fillbuf(buf, len, &count, " %s\n", name); + if (ret) + break; + } + + /* open coded unlock_mount_hash() */ + write_sequnlock(&mount_lock); + + /* also copy the trailing null (ensured by vsnprintf) */ + if (!ret && (copy_to_user(u_buf, buf, count + 1) != 0)) + ret = -EFAULT; + +out_free_pages: + free_pages((unsigned long)buf, order); +out_putname: + __putname(name); +out_up_killsb: + up_read(&fc->killsb); + fuse_conn_put(fc); + return ret; +} + static long virtfuse_dev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { @@ -201,6 +344,8 @@ static long virtfuse_dev_ioctl(struct file *file, unsigned int cmd, return virtfuse_clone(file); case VIRTFUSE_IOC_RESET: return virtfuse_reset(file); + case VIRTFUSE_IOC_GET_MOUNTS: + return virtfuse_get_mounts(file, arg); default: return fuse_dev_operations.unlocked_ioctl(file, cmd, arg); } diff --git a/include/uapi/linux/virtfuse.h b/include/uapi/linux/virtfuse.h index 00c3c883f2c8..93b7ab200b32 100644 --- a/include/uapi/linux/virtfuse.h +++ b/include/uapi/linux/virtfuse.h @@ -17,4 +17,17 @@ /* Reset the specified virtual device */ #define VIRTFUSE_IOC_RESET _IO(0x99, 2) +/* Print all mountinfo of the specified virtual device. */ +#define VIRTFUSE_IOC_GET_MOUNTS _IO(0x99, 3) + +/* + * @len indicates the size of the buffer indicated by @buf + * @buf indicates a buffer to contain the output mountinfo of the specified + * virtual device. + */ +struct virtfuse_mounts_buf { + __u32 len; + __u8 buf[]; +}; + #endif -- Gitee From 8f074077dd51b090cae69d4ab34969df4f0bb9e9 Mon Sep 17 00:00:00 2001 From: Jingbo Xu Date: Wed, 20 Dec 2023 11:21:25 +0800 Subject: [PATCH 0104/2138] anolis: virtfuse: improve mntpoint printing ANBZ: #8323 Introduce d_absolute_path_locked() to improve the mntpoint printing: 1. print the full path of the mntpoint 1) mount ext4 on /mnt 2) mount fuse on /mnt/dir through virtfuse 3) prior this patch, VIRTFUSE_IOC_GET_MOUNTS ioctl prints the mntpoint as "/dir", as dentry_path_raw() only prints the relative path of the dentry in which mount it resides in. 4) with this patch, VIRTFUSE_IOC_GET_MOUNTS ioctl prints the absolute path of the mntpoint as "/mnt/dir" 2. print the right path of the mntpoint if it resides in a bind mount 1) bind mount /foo/bar to /mnt 2) mount fuse on /mnt/dir through virtfuse 3) prior this patch, VIRTFUSE_IOC_GET_MOUNTS ioctl prints the mntpoint as "/foo/bar/dir" 4) with this patch, VIRTFUSE_IOC_GET_MOUNTS ioctl prints the right path as "/mnt/dir" [jingbo's backport notes to ANCK 6.6] Replace prepend_path_locked() with d_absolute_path_locked(), as we have __prepend_path() helper now. Signed-off-by: Jingbo Xu Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2788 --- fs/d_path.c | 52 ++++++++++++++++++++++++++++++++++++++++++ fs/fuse/virtfuse.c | 7 +++++- include/linux/dcache.h | 1 + 3 files changed, 59 insertions(+), 1 deletion(-) diff --git a/fs/d_path.c b/fs/d_path.c index 5f4da5c8d5db..df50090b6a0f 100644 --- a/fs/d_path.c +++ b/fs/d_path.c @@ -196,6 +196,58 @@ static int prepend_path(const struct path *path, return error; } +static int prepend_path_locked(const struct path *path, + const struct path *root, + struct prepend_buffer *p) +{ + struct prepend_buffer b; + unsigned seq = 0; + int error; + + rcu_read_lock(); +restart: + b = *p; + read_seqbegin_or_lock(&rename_lock, &seq); + error = __prepend_path(path->dentry, real_mount(path->mnt), root, &b); + if (!(seq & 1)) + rcu_read_unlock(); + if (need_seqretry(&rename_lock, seq)) { + seq = 1; + goto restart; + } + done_seqretry(&rename_lock, seq); + + if (unlikely(error == 3)) + b = *p; + + if (b.len == p->len) + prepend_char(&b, '/'); + + *p = b; + return error; +} + +/* + * d_absolute_path_locked - return the absolute path of a dentry + * + * @path: path to report + * @buf: buffer to return value in + * @buflen: buffer length + * + * Write absolute pathname like d_absolute_path() except with mount_lock held. + */ +char *d_absolute_path_locked(const struct path *path, char *buf, int buflen) +{ + struct path root = {}; + DECLARE_BUFFER(b, buf, buflen); + + prepend_char(&b, 0); + if (unlikely(prepend_path_locked(path, &root, &b) > 1)) + return ERR_PTR(-EINVAL); + return extract_string(&b); +} +EXPORT_SYMBOL(d_absolute_path_locked); + /** * __d_path - return the path of a dentry * @path: the dentry/vfsmount to report diff --git a/fs/fuse/virtfuse.c b/fs/fuse/virtfuse.c index cc7f25701dd4..b6ef6fbf4490 100644 --- a/fs/fuse/virtfuse.c +++ b/fs/fuse/virtfuse.c @@ -275,6 +275,11 @@ static int virtfuse_get_mounts(struct file *file, unsigned long arg) write_seqlock(&mount_lock); list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { + struct path path = { + .dentry = mnt->mnt.mnt_root, + .mnt = &mnt->mnt + }; + /* skip slave mounts */ if (mnt->mnt_master) continue; @@ -284,7 +289,7 @@ static int virtfuse_get_mounts(struct file *file, unsigned long arg) continue; /* mountpoint */ - p = dentry_path_raw(mnt->mnt_mountpoint, name, PATH_MAX); + p = d_absolute_path_locked(&path, name, PATH_MAX); if (IS_ERR(p)) { ret = PTR_ERR(p); break; diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 6b351e009f59..344f41a3e052 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -296,6 +296,7 @@ extern char *d_absolute_path(const struct path *, char *, int); extern char *d_path(const struct path *, char *, int); extern char *dentry_path_raw(const struct dentry *, char *, int); extern char *dentry_path(const struct dentry *, char *, int); +extern char *d_absolute_path_locked(const struct path *, char *, int); /* Allocation counts.. */ -- Gitee From e8ee163bff478d58466a70d6d151661fdc0a3b37 Mon Sep 17 00:00:00 2001 From: Jingbo Xu Date: Fri, 5 May 2023 13:36:27 +0800 Subject: [PATCH 0105/2138] anolis: configs: x86_64, arm64: enable virtfuse as module ANBZ: #8323 Enable fuse device virtualization as module in x86_64 and arm64. Signed-off-by: Jingbo Xu Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2788 --- arch/arm64/configs/anolis-debug_defconfig | 1 + arch/arm64/configs/anolis_defconfig | 1 + arch/x86/configs/anolis-debug_defconfig | 1 + arch/x86/configs/anolis_defconfig | 1 + 4 files changed, 4 insertions(+) diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index b3c3c3da6168..419072dae8e2 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -5889,6 +5889,7 @@ CONFIG_FUSE_FS=m CONFIG_CUSE=m CONFIG_VIRTIO_FS=m CONFIG_FUSE_DAX=y +CONFIG_VIRT_FUSE=m CONFIG_OVERLAY_FS=m CONFIG_OVERLAY_FS_REDIRECT_DIR=y CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index dadd27949f52..27ba33178302 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -5908,6 +5908,7 @@ CONFIG_FUSE_FS=m CONFIG_CUSE=m CONFIG_VIRTIO_FS=m CONFIG_FUSE_DAX=y +CONFIG_VIRT_FUSE=m CONFIG_OVERLAY_FS=m CONFIG_OVERLAY_FS_REDIRECT_DIR=y CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index b5adf870d839..2931e96d0ba8 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -6484,6 +6484,7 @@ CONFIG_FUSE_FS=m CONFIG_CUSE=m CONFIG_VIRTIO_FS=m CONFIG_FUSE_DAX=y +CONFIG_VIRT_FUSE=m CONFIG_OVERLAY_FS=m CONFIG_OVERLAY_FS_REDIRECT_DIR=y CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index be00c2ce6add..b031bb4585c3 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -6473,6 +6473,7 @@ CONFIG_FUSE_FS=m CONFIG_CUSE=m CONFIG_VIRTIO_FS=m CONFIG_FUSE_DAX=y +CONFIG_VIRT_FUSE=m CONFIG_OVERLAY_FS=m CONFIG_OVERLAY_FS_REDIRECT_DIR=y CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y -- Gitee From a186e952fd90e3259a69bf1bf75ab0003151a7df Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Fri, 23 Feb 2024 14:44:51 +0800 Subject: [PATCH 0106/2138] anolis: iocost: add legacy interface file ANBZ: #8329 To support cgroup v1. Signed-off-by: Jiufei Xue Signed-off-by: Joseph Qi Reviewed-by: Gao Xiang Link: https://gitee.com/anolis/cloud-kernel/pulls/2785 --- block/blk-iocost.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 129732a8d0dd..a213d38c1627 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -3532,8 +3532,31 @@ static struct cftype ioc_files[] = { {} }; +static struct cftype ioc_legacy_files[] = { + { + .name = "cost.weight", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = ioc_weight_show, + .write = ioc_weight_write, + }, + { + .name = "cost.qos", + .flags = CFTYPE_ONLY_ON_ROOT, + .seq_show = ioc_qos_show, + .write = ioc_qos_write, + }, + { + .name = "cost.model", + .flags = CFTYPE_ONLY_ON_ROOT, + .seq_show = ioc_cost_model_show, + .write = ioc_cost_model_write, + }, + {} +}; + static struct blkcg_policy blkcg_policy_iocost = { .dfl_cftypes = ioc_files, + .legacy_cftypes = ioc_legacy_files, .cpd_alloc_fn = ioc_cpd_alloc, .cpd_free_fn = ioc_cpd_free, .pd_alloc_fn = ioc_pd_alloc, -- Gitee From 89523ecf33ef2b0658d03c779136a1582417f211 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Fri, 23 Feb 2024 14:45:10 +0800 Subject: [PATCH 0107/2138] anolis: iocost: add ioc_gq stat ANBZ: #8329 Add a stat file to monitor the ioc_gq stat. Signed-off-by: Jiufei Xue Signed-off-by: Joseph Qi Reviewed-by: Gao Xiang Link: https://gitee.com/anolis/cloud-kernel/pulls/2785 --- block/blk-iocost.c | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/block/blk-iocost.c b/block/blk-iocost.c index a213d38c1627..2c2c82007a54 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -3510,6 +3510,36 @@ static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input, return ret; } +static u64 ioc_stat_prfill(struct seq_file *sf, struct blkg_policy_data *pd, + int off) +{ + struct blkcg_gq *blkg = pd->blkg; + const char *dname = blkg_dev_name(blkg); + struct ioc_gq *iocg = blkg_to_iocg(blkg); + struct ioc *ioc = iocg->ioc; + + if (!dname) + return 0; + + seq_printf(sf, "%s is_active=%d active=%u inuse=%u " + "hweight_active=%u hweight_inuse=%u vrate=%llu\n", + dname, !list_empty(&iocg->active_list), + iocg->active, iocg->inuse, + iocg->hweight_active, iocg->hweight_inuse, + (unsigned long long)atomic64_read(&ioc->vtime_rate)); + + return 0; +} + +static int ioc_cost_print_stat(struct seq_file *sf, void *v) +{ + struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); + + blkcg_print_blkgs(sf, blkcg, ioc_stat_prfill, + &blkcg_policy_iocost, seq_cft(sf)->private, false); + return 0; +} + static struct cftype ioc_files[] = { { .name = "weight", @@ -3551,6 +3581,11 @@ static struct cftype ioc_legacy_files[] = { .seq_show = ioc_cost_model_show, .write = ioc_cost_model_write, }, + { + .name = "cost.stat", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = ioc_cost_print_stat, + }, {} }; -- Gitee From 828c18acc8b1e3a537359721e06b4c8146755233 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Fri, 23 Feb 2024 14:51:36 +0800 Subject: [PATCH 0108/2138] anolis: mm: add proc interface to control context readahead ANBZ: #8333 For some workloads whose io activities are mostly random, context readahead feature can introduce unnecessary io read operations, which will impact app's performance. Context readahead's algorithm is straightforward and not that smart. This patch adds "/proc/sys/vm/enable_context_readahead" to control whether to disable or enable this feature. Currently we enable context readahead default, user can echo 0 to /proc/sys/vm/enable_context_readahead to disable context readahead. We also have tested mongodb's performance in 'random point select' case, With context readahead enabled: mongodb eps 12409 With context readahead disabled: mongodb eps 14443 About 16% performance improvement. Signed-off-by: Xiaoguang Wang Signed-off-by: Joseph Qi Acked-by: Jingbo Xu Link: https://gitee.com/anolis/cloud-kernel/pulls/2786 --- Documentation/admin-guide/sysctl/vm.rst | 17 +++++++++++++++++ kernel/sysctl.c | 11 +++++++++++ mm/readahead.c | 9 +++++++-- 3 files changed, 35 insertions(+), 2 deletions(-) diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst index 45ba1f4dc004..beabacb0fcba 100644 --- a/Documentation/admin-guide/sysctl/vm.rst +++ b/Documentation/admin-guide/sysctl/vm.rst @@ -75,6 +75,7 @@ Currently, these files are in /proc/sys/vm: - watermark_boost_factor - watermark_scale_factor - zone_reclaim_mode +- enable_context_readahead admin_reserve_kbytes @@ -1044,3 +1045,19 @@ of other processes running on other nodes will not be affected. Allowing regular swap effectively restricts allocations to the local node unless explicitly overridden by memory policies or cpuset configurations. + + +enable_context_readahead +======================== + +Specific workloads whose io activities are mostly random, context readahead +feature may introduce unnecessary io read operations, which will impact app's +performance. + +Default it is enabled. + +To disable context readahead: + echo 0 > /proc/sys/vm/enable_context_readahead + +To enable context readahead again: + echo 1 > /proc/sys/vm/enable_context_readahead diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 0c11d319fa01..204528a81b43 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -135,6 +135,8 @@ enum sysctl_writes_mode { static enum sysctl_writes_mode sysctl_writes_strict = SYSCTL_WRITES_STRICT; #endif /* CONFIG_PROC_SYSCTL */ +extern int sysctl_enable_context_readahead; + #if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \ defined(CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT) int sysctl_legacy_va_layout; @@ -2272,6 +2274,15 @@ static struct ctl_table vm_table[] = { .extra2 = (void *)&mmap_rnd_compat_bits_max, }, #endif + { + .procname = "enable_context_readahead", + .data = &sysctl_enable_context_readahead, + .maxlen = sizeof(sysctl_enable_context_readahead), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, { } }; diff --git a/mm/readahead.c b/mm/readahead.c index f1595c032ce7..5ea79401c31d 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -131,6 +131,9 @@ #include "internal.h" +/* enable context readahead default */ +int sysctl_enable_context_readahead = 1; + /* * Initialise a struct file's readahead state. Assumes that the caller has * memset *ra to zero. @@ -633,9 +636,11 @@ static void ondemand_readahead(struct readahead_control *ractl, * Query the page cache and look for the traces(cached history pages) * that a sequential stream would leave behind. */ - if (try_context_readahead(ractl->mapping, ra, index, req_size, - max_pages)) + if (sysctl_enable_context_readahead && + try_context_readahead(ractl->mapping, ra, index, req_size, + max_pages)) { goto readit; + } /* * standalone, small random read -- Gitee From 134314ec775d81c740295eb359f2cc58ad6b321c Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Wed, 6 Dec 2017 17:40:35 +0800 Subject: [PATCH 0109/2138] anolis: writeback: add memcg_blkcg_link tree ANBZ: #8327 Here we add a global radix tree to link memcg and blkcg that the user attach the tasks to when using cgroup v1, which is used for writeback cgroup. Signed-off-by: Jiufei Xue Signed-off-by: Joseph Qi Acked-by: Jingbo Xu Link: https://gitee.com/anolis/cloud-kernel/pulls/2784 --- include/linux/backing-dev.h | 31 +++++- kernel/cgroup/cgroup-internal.h | 2 + kernel/cgroup/cgroup.c | 16 +++- mm/backing-dev.c | 162 +++++++++++++++++++++++++++++++- mm/memcontrol.c | 2 +- 5 files changed, 206 insertions(+), 7 deletions(-) diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 1a97277f99b1..a28d2248ed82 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -174,9 +174,7 @@ static inline bool inode_cgwb_enabled(struct inode *inode) { struct backing_dev_info *bdi = inode_to_bdi(inode); - return cgroup_subsys_on_dfl(memory_cgrp_subsys) && - cgroup_subsys_on_dfl(io_cgrp_subsys) && - (bdi->capabilities & BDI_CAP_WRITEBACK) && + return (bdi->capabilities & BDI_CAP_WRITEBACK) && (inode->i_sb->s_iflags & SB_I_CGROUPWB); } @@ -318,6 +316,13 @@ static inline void unlocked_inode_to_wb_end(struct inode *inode, rcu_read_unlock(); } +void insert_memcg_blkcg_link(struct cgroup_subsys *ss, + struct list_head *tmp_links, + struct css_set *cset); +int allocate_memcg_blkcg_links(int count, struct list_head *tmp_links); +void free_memcg_blkcg_links(struct list_head *links_to_free); +void delete_memcg_blkcg_link(struct cgroup_subsys *ss, + struct cgroup_subsys_state *css); #else /* CONFIG_CGROUP_WRITEBACK */ static inline bool inode_cgwb_enabled(struct inode *inode) @@ -368,6 +373,26 @@ static inline void wb_blkcg_offline(struct cgroup_subsys_state *css) { } +static inline void insert_memcg_blkcg_link(struct cgroup_subsys *ss, + struct list_head *tmp_links, + struct css_set *cset) +{ +} + +static inline int allocate_memcg_blkcg_links(int count, struct list_head *tmp_links) +{ + return 0; +} + +static inline void free_memcg_blkcg_links(struct list_head *links_to_free) +{ +} + +static inline void delete_memcg_blkcg_link(struct cgroup_subsys *ss, + struct cgroup_subsys_state *css) +{ +} + #endif /* CONFIG_CGROUP_WRITEBACK */ const char *bdi_dev_name(struct backing_dev_info *bdi); diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h index 5e17f01ced9f..b5394a68fb4f 100644 --- a/kernel/cgroup/cgroup-internal.h +++ b/kernel/cgroup/cgroup-internal.h @@ -108,6 +108,7 @@ struct cgroup_taskset { /* the src and dst cset list running through cset->mg_node */ struct list_head src_csets; struct list_head dst_csets; + int dst_count; /* the number of tasks in the set */ int nr_tasks; @@ -152,6 +153,7 @@ struct cgroup_mgctx { .src_csets = LIST_HEAD_INIT(tset.src_csets), \ .dst_csets = LIST_HEAD_INIT(tset.dst_csets), \ .csets = &tset.src_csets, \ + .dst_count = 0, \ } #define CGROUP_MGCTX_INIT(name) \ diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 36097e8c904f..1e8078e73762 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -59,6 +59,7 @@ #include #include #include +#include #include #define CREATE_TRACE_POINTS @@ -2438,9 +2439,11 @@ static void cgroup_migrate_add_task(struct task_struct *task, if (list_empty(&cset->mg_node)) list_add_tail(&cset->mg_node, &mgctx->tset.src_csets); - if (list_empty(&cset->mg_dst_cset->mg_node)) + if (list_empty(&cset->mg_dst_cset->mg_node)) { list_add_tail(&cset->mg_dst_cset->mg_node, &mgctx->tset.dst_csets); + mgctx->tset.dst_count++; + } } /** @@ -2521,9 +2524,14 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx) struct task_struct *task, *tmp_task; struct css_set *cset, *tmp_cset; int ssid, failed_ssid, ret; + LIST_HEAD(tmp_links); /* check that we can legitimately attach to the cgroup */ if (tset->nr_tasks) { + ret = allocate_memcg_blkcg_links(tset->dst_count*2, &tmp_links); + if (ret) + goto out_release_tset; + do_each_subsys_mask(ss, ssid, mgctx->ss_mask) { if (ss->can_attach) { tset->ssid = ssid; @@ -2576,6 +2584,8 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx) tset->ssid = ssid; ss->attach(tset); } + list_for_each_entry(cset, &tset->dst_csets, mg_node) + insert_memcg_blkcg_link(ss, &tmp_links, cset); } while_each_subsys_mask(); } @@ -2602,6 +2612,8 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx) } spin_unlock_irq(&css_set_lock); + free_memcg_blkcg_links(&tmp_links); + /* * Re-initialize the cgroup_taskset structure in case it is reused * again in another cgroup_migrate_add_task()/cgroup_migrate_execute() @@ -5352,6 +5364,8 @@ static void css_free_rwork_fn(struct work_struct *work) struct cgroup_subsys_state *parent = css->parent; int id = css->id; + delete_memcg_blkcg_link(ss, css); + ss->css_free(css); cgroup_idr_remove(&ss->css_idr, id); cgroup_put(cgrp); diff --git a/mm/backing-dev.c b/mm/backing-dev.c index e039d05304dd..2fed2a533ed7 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -499,6 +499,158 @@ static void wb_exit(struct bdi_writeback *wb) #include +struct memcg_blkcg_link { + struct list_head list; + struct rcu_head rcu; + struct cgroup_subsys_state *memcg_css; + struct cgroup_subsys_state *blkcg_css; +}; + +static RADIX_TREE(memcg_blkcg_tree, GFP_ATOMIC); +static DEFINE_SPINLOCK(memcg_blkcg_tree_lock); + +int allocate_memcg_blkcg_links(int count, struct list_head *tmp_links) +{ + struct memcg_blkcg_link *link; + int i; + + for (i = 0; i < count; i++) { + link = kzalloc(sizeof(*link), GFP_KERNEL); + if (!link) { + free_memcg_blkcg_links(tmp_links); + return -ENOMEM; + } + list_add(&link->list, tmp_links); + } + return 0; +} + +static void link_free(struct rcu_head *head) +{ + struct memcg_blkcg_link *link = container_of(head, + struct memcg_blkcg_link, rcu); + kfree(link); +} + +void insert_memcg_blkcg_link(struct cgroup_subsys *ss, + struct list_head *tmp_links, + struct css_set *cset) +{ + struct memcg_blkcg_link *link; + struct cgroup_subsys_state *blkcg_css; + struct cgroup_subsys_state *memcg_css; + int err; + + if (ss->id != io_cgrp_id && ss->id != memory_cgrp_id) + return; + + WARN_ON(list_empty(tmp_links)); + + memcg_css = cset->subsys[memory_cgrp_id]; + blkcg_css = cset->subsys[io_cgrp_id]; + + if ((memcg_css == &root_mem_cgroup->css) || + (blkcg_css == blkcg_root_css)) + return; + + rcu_read_lock(); + link = radix_tree_lookup(&memcg_blkcg_tree, memcg_css->id); + if (link && ((link->blkcg_css == blkcg_css) || + (link->blkcg_css == blkcg_root_css))) { + rcu_read_unlock(); + return; + } + rcu_read_unlock(); + + spin_lock(&memcg_blkcg_tree_lock); + if (link) { + radix_tree_delete(&memcg_blkcg_tree, memcg_css->id); + call_rcu(&link->rcu, link_free); + blkcg_css = blkcg_root_css; + } + + link = list_first_entry(tmp_links, struct memcg_blkcg_link, list); + list_del_init(&link->list); + + link->memcg_css = memcg_css; + link->blkcg_css = blkcg_css; + err = radix_tree_insert(&memcg_blkcg_tree, memcg_css->id, link); + WARN_ON(err); + + spin_unlock(&memcg_blkcg_tree_lock); +} + +void free_memcg_blkcg_links(struct list_head *links_to_free) +{ + struct memcg_blkcg_link *link, *tmp_link; + + list_for_each_entry_safe(link, tmp_link, links_to_free, list) { + list_del(&link->list); + kfree(link); + } +} + +static void delete_memcg_link(struct cgroup_subsys_state *memcg_css) +{ + struct memcg_blkcg_link *link; + + spin_lock(&memcg_blkcg_tree_lock); + link = radix_tree_lookup(&memcg_blkcg_tree, memcg_css->id); + if (link) { + radix_tree_delete(&memcg_blkcg_tree, memcg_css->id); + call_rcu(&link->rcu, link_free); + } + spin_unlock(&memcg_blkcg_tree_lock); +} + +static void delete_blkcg_link(struct cgroup_subsys_state *blkcg_css) +{ + struct memcg_blkcg_link *link; + struct radix_tree_iter iter; + void **slot; + + spin_lock(&memcg_blkcg_tree_lock); + radix_tree_for_each_slot(slot, &memcg_blkcg_tree, &iter, 0) { + link = *slot; + if (link->blkcg_css == blkcg_css) { + radix_tree_delete(&memcg_blkcg_tree, link->memcg_css->id); + call_rcu(&link->rcu, link_free); + } + } + spin_unlock(&memcg_blkcg_tree_lock); +} + +void delete_memcg_blkcg_link(struct cgroup_subsys *ss, + struct cgroup_subsys_state *css) +{ + if (ss->id != io_cgrp_id && ss->id != memory_cgrp_id) + return; + + if (ss->id == io_cgrp_id) + delete_blkcg_link(css); + if (ss->id == memory_cgrp_id) + delete_memcg_link(css); +} + +static struct cgroup_subsys_state *find_blkcg_css(struct cgroup_subsys_state *memcg_css) +{ + struct memcg_blkcg_link *link; + struct cgroup_subsys_state *blkcg_css; + + rcu_read_lock(); + link = radix_tree_lookup(&memcg_blkcg_tree, memcg_css->id); + if (link) + blkcg_css = link->blkcg_css; + else + blkcg_css = blkcg_root_css; + + css_get(blkcg_css); + + rcu_read_unlock(); + + return blkcg_css; +} + /* * cgwb_lock protects bdi->cgwb_tree, blkcg->cgwb_list, offline_cgwbs and * memcg->cgwb_list. bdi->cgwb_tree is also RCU protected. @@ -583,7 +735,10 @@ static int cgwb_create(struct backing_dev_info *bdi, int ret = 0; memcg = mem_cgroup_from_css(memcg_css); - blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys); + if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) + blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys); + else + blkcg_css = find_blkcg_css(memcg_css); memcg_cgwb_list = &memcg->cgwb_list; blkcg_cgwb_list = blkcg_get_cgwb_list(blkcg_css); @@ -704,7 +859,10 @@ struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi, struct cgroup_subsys_state *blkcg_css; /* see whether the blkcg association has changed */ - blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys); + if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) + blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys); + else + blkcg_css = find_blkcg_css(memcg_css); if (unlikely(wb->blkcg_css != blkcg_css || !wb_tryget(wb))) wb = NULL; css_put(blkcg_css); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index d2ceadd11b10..a198f53860c7 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -371,7 +371,7 @@ struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio) { struct mem_cgroup *memcg = folio_memcg(folio); - if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) + if (!memcg) memcg = root_mem_cgroup; return &memcg->css; -- Gitee From 3b96f6264d3d1756bca487ac93f0330ae0a0f483 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Fri, 23 Feb 2024 10:29:14 +0800 Subject: [PATCH 0110/2138] anolis: writeback: add debug info for memcg-blkcg link ANBZ: #8327 Signed-off-by: Jiufei Xue Signed-off-by: Joseph Qi Acked-by: Jingbo Xu Link: https://gitee.com/anolis/cloud-kernel/pulls/2784 --- include/trace/events/writeback.h | 21 ++++++++++++++ mm/backing-dev.c | 50 ++++++++++++++++++++++++++++++++ 2 files changed, 71 insertions(+) diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 54e353c9f919..acd558657cf3 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -441,6 +441,27 @@ TRACE_EVENT(writeback_bdi_register, ) ); +TRACE_EVENT(insert_memcg_blkcg_link, + TP_PROTO(struct cgroup_subsys_state *memcg_css, + struct cgroup_subsys_state *blkcg_css, + struct cgroup_subsys_state *old_blkcg_css), + TP_ARGS(memcg_css, blkcg_css, old_blkcg_css), + TP_STRUCT__entry( + __field(unsigned int, memcg_ino) + __field(unsigned int, blkcg_ino) + __field(unsigned int, old_blkcg_ino) + ), + TP_fast_assign( + __entry->memcg_ino = kernfs_ino(memcg_css->cgroup->kn); + __entry->blkcg_ino = kernfs_ino(blkcg_css->cgroup->kn); + __entry->old_blkcg_ino = old_blkcg_css ? + kernfs_ino(old_blkcg_css->cgroup->kn) : 0; + ), + TP_printk("memcg_ino=%u blkcg_ino=%u old_blkcg_ino=%u", + __entry->memcg_ino, __entry->blkcg_ino, __entry->old_blkcg_ino + ) +); + DECLARE_EVENT_CLASS(wbc_class, TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), TP_ARGS(wbc, bdi), diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 2fed2a533ed7..6fff90c68c77 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -41,9 +41,22 @@ struct workqueue_struct *bdi_wq; static struct dentry *bdi_debug_root; +#ifdef CONFIG_CGROUP_WRITEBACK +static struct dentry *memcg_blkcg_file; +static const struct file_operations memcg_blkcg_debug_fops; +#endif + static void bdi_debug_init(void) { bdi_debug_root = debugfs_create_dir("bdi", NULL); + +#ifdef CONFIG_CGROUP_WRITEBACK + if (!bdi_debug_root) + return; + + memcg_blkcg_file = debugfs_create_file("bdi_wb_link", 0444, bdi_debug_root, + NULL, &memcg_blkcg_debug_fops); +#endif } static int bdi_debug_stats_show(struct seq_file *m, void *v) @@ -509,6 +522,40 @@ struct memcg_blkcg_link { static RADIX_TREE(memcg_blkcg_tree, GFP_ATOMIC); static DEFINE_SPINLOCK(memcg_blkcg_tree_lock); +static int memcg_blkcg_link_show(struct seq_file *m, void *v) +{ + struct memcg_blkcg_link *link; + struct radix_tree_iter iter; + void **slot; + + seq_puts(m, "memory <---> blkio\n"); + rcu_read_lock(); + radix_tree_for_each_slot(slot, &memcg_blkcg_tree, &iter, 0) { + link = *slot; + seq_printf(m, "%s:%5lu <---> %s:%5lu\n", + link->memcg_css->cgroup->kn->name, + kernfs_ino(link->memcg_css->cgroup->kn), + (link->blkcg_css == blkcg_root_css) ? + "root" : link->blkcg_css->cgroup->kn->name, + kernfs_ino(link->blkcg_css->cgroup->kn)); + } + rcu_read_unlock(); + + return 0; +} + +static int memcg_blkcg_link_open(struct inode *inode, struct file *file) +{ + return single_open(file, memcg_blkcg_link_show, inode->i_private); +} + +static const struct file_operations memcg_blkcg_debug_fops = { + .open = memcg_blkcg_link_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + int allocate_memcg_blkcg_links(int count, struct list_head *tmp_links) { struct memcg_blkcg_link *link; @@ -562,6 +609,9 @@ void insert_memcg_blkcg_link(struct cgroup_subsys *ss, } rcu_read_unlock(); + trace_insert_memcg_blkcg_link(memcg_css, blkcg_css, + link ? link->blkcg_css : NULL); + spin_lock(&memcg_blkcg_tree_lock); if (link) { radix_tree_delete(&memcg_blkcg_tree, memcg_css->id); -- Gitee From 221258833936c2ac8e54f9b9c8298092a9ec18ac Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Fri, 23 Feb 2024 10:30:11 +0800 Subject: [PATCH 0111/2138] anolis: fs/writeback: fix double free of blkcg_css MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8327 We have gotten a WARNNING when releasing blkcg_css: [332489.681635] WARNING: CPU: 55 PID: 14859 at lib/list_debug.c:56 __list_del_entry+0x81/0xc0 [332489.682191] list_del corruption, ffff883e6b94d450->prev is LIST_POISON2 (dead000000000200) ...... [332489.683895] CPU: 55 PID: 14859 Comm: kworker/55:2 Tainted: G [332489.684477] Hardware name: Inspur SA5248M4/X10DRT-PS, BIOS 4.05A 10/11/2016 [332489.685061] Workqueue: cgroup_destroy css_release_work_fn [332489.685654]  ffffc9001d92bd28 ffffffff81380042 ffffc9001d92bd78 0000000000000000 [332489.686269]  ffffc9001d92bd68 ffffffff81088f8b 0000003800000000 ffff883e6b94d4a0 [332489.686867]  ffff883e6b94d400 ffffffff81ce8fe0 ffff88375b24f400 ffff883e6b94d4a0 [332489.687479] Call Trace: [332489.688078]  [] dump_stack+0x63/0x81 [332489.688681]  [] __warn+0xcb/0xf0 [332489.689276]  [] warn_slowpath_fmt+0x5f/0x80 [332489.689877]  [] __list_del_entry+0x81/0xc0 [332489.690481]  [] css_release_work_fn+0x42/0x140 [332489.691090]  [] process_one_work+0x189/0x420 [332489.691693]  [] worker_thread+0x4e/0x4b0 [332489.692293]  [] ? process_one_work+0x420/0x420 [332489.692905]  [] kthread+0xe6/0x100 [332489.693504]  [] ? kthread_park+0x60/0x60 [332489.694099]  [] ret_from_fork+0x41/0x50 [332489.694722] ---[ end trace 0cf869c4a5cfba87 ]--- ...... This is caused by calling css_get after the css is killed by another thread described below: Thread 1 Thread 2 cgroup_rmdir -> kill_css -> percpu_ref_kill_and_confirm -> css_killed_ref_fn css_killed_work_fn -> css_put -> css_release wb_get_create -> find_blkcg_css -> css_get -> css_put -> css_release (double free) -> css_release_workfn -> css_free_work_fn -> blkcg_css_free When doublefree happened, it may free the memory still used by other threads and cause a kernel panic. Fix this by using css_tryget_online in find_blkcg_css while will return false if the css is killed. Signed-off-by: Jiufei Xue Signed-off-by: Joseph Qi Acked-by: Jingbo Xu Link: https://gitee.com/anolis/cloud-kernel/pulls/2784 --- mm/backing-dev.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 6fff90c68c77..6c269c011181 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -689,15 +689,21 @@ static struct cgroup_subsys_state *find_blkcg_css(struct cgroup_subsys_state *me rcu_read_lock(); link = radix_tree_lookup(&memcg_blkcg_tree, memcg_css->id); - if (link) + if (link) { blkcg_css = link->blkcg_css; - else - blkcg_css = blkcg_root_css; + if (css_tryget_online(blkcg_css)) + goto out; + } + /* + * If not blkcg_root_css and tryget failed, + * get a reference of blkcg_root_css and return. + */ + blkcg_css = blkcg_root_css; css_get(blkcg_css); +out: rcu_read_unlock(); - return blkcg_css; } -- Gitee From a93927eda760343240c58be4b3a4edc0bcff45a8 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Fri, 23 Feb 2024 10:30:53 +0800 Subject: [PATCH 0112/2138] anolis: fs/writeback: Attach inode's wb to root if needed ANBZ: #8327 There might have tons of files queued in the writeback, awaiting for writing back. Unfortunately, the writeback's cgroup has been dead. In this case, we reassociate the inode with another writeback, but we possibly can't because the writeback associated with the dead cgroup is the only valid one. In this case, the new writeback is allocated, initialized and associated with the inode in the non-stopping fashion until all data resident in the inode's page cache are flushed to disk. It causes unnecessary high system load. This fixes the issue by enforce moving the inode to root cgroup when the previous binding cgroup becomes dead. With it, no more unnecessary writebacks are created, populated and the system load decreased by about 6x in the test case we carried out: Without the patch: 30% system load With the patch: 5% system load Signed-off-by: luanshi Signed-off-by: Jiufei Xue Signed-off-by: Joseph Qi Acked-by: Jingbo Xu Link: https://gitee.com/anolis/cloud-kernel/pulls/2784 --- fs/fs-writeback.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 0a498bc60f55..4e9c22e10b96 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -858,6 +858,16 @@ void wbc_detach_inode(struct writeback_control *wbc) inode->i_wb_frn_avg_time = min(avg_time, (unsigned long)U16_MAX); inode->i_wb_frn_history = history; + /* + * Without wb list lock i_wb can switch at any point, so it can + * judge on the wrong wb anyway. + * + * The wb is switched to the root memcg unconditionally. We expect + * the correct wb (best candidate) is picked up in next round. + */ + if (wb == inode->i_wb && wb_dying(wb) && !(inode->i_state & I_DIRTY_ALL)) + inode_switch_wbs(inode, root_mem_cgroup->css.id); + wb_put(wbc->wb); wbc->wb = NULL; } -- Gitee From 18e32cffed8491814c6680043ed56bb4a6a54fd0 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Fri, 23 Feb 2024 10:34:36 +0800 Subject: [PATCH 0113/2138] anolis: writeback: introduce cgwb_v1 boot param ANBZ: #8327 So far writeback control is supported for cgroup v1 interface. However it also has some restrictions, so introduce a new kernel boot parameter to control the behavior which is disabled by default. Users can enable the writeback control for cgroup v1 with the command line "cgwb_v1". Signed-off-by: Jiufei Xue Signed-off-by: Joseph Qi Acked-by: Jingbo Xu Link: https://gitee.com/anolis/cloud-kernel/pulls/2784 --- .../admin-guide/kernel-parameters.txt | 3 +++ include/linux/backing-dev.h | 19 ++++++++++++++++++- mm/backing-dev.c | 12 ++++++++++++ mm/memcontrol.c | 13 ++++++++++++- 4 files changed, 45 insertions(+), 2 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 1a6639eb4b6c..302697c9397b 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -586,6 +586,9 @@ nokmem -- Disable kernel memory accounting. nobpf -- Disable BPF memory accounting. + cgwb_v1 Enable writeback control for cgroup for cgroup v1 + interface. + checkreqprot= [SELINUX] Set initial checkreqprot flag value. Format: { "0" | "1" } See security/selinux/Kconfig help text. diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index a28d2248ed82..fc826261b21f 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -159,6 +159,21 @@ struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, void wb_memcg_offline(struct mem_cgroup *memcg); void wb_blkcg_offline(struct cgroup_subsys_state *css); +extern bool cgwb_v1; + +static inline bool memcg_blkcg_on_dfl(void) +{ + return cgroup_subsys_on_dfl(memory_cgrp_subsys) && + cgroup_subsys_on_dfl(io_cgrp_subsys); +} + +static inline bool cgroup_writeback_support_v1(void) +{ + return cgwb_v1 && + !cgroup_subsys_on_dfl(memory_cgrp_subsys) && + !cgroup_subsys_on_dfl(io_cgrp_subsys); +} + /** * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode * @inode: inode of interest @@ -174,7 +189,9 @@ static inline bool inode_cgwb_enabled(struct inode *inode) { struct backing_dev_info *bdi = inode_to_bdi(inode); - return (bdi->capabilities & BDI_CAP_WRITEBACK) && + return (memcg_blkcg_on_dfl() || + cgroup_writeback_support_v1()) && + (bdi->capabilities & BDI_CAP_WRITEBACK) && (inode->i_sb->s_iflags & SB_I_CGROUPWB); } diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 6c269c011181..f032314fcbf2 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -54,6 +54,9 @@ static void bdi_debug_init(void) if (!bdi_debug_root) return; + if (!cgwb_v1) + return; + memcg_blkcg_file = debugfs_create_file("bdi_wb_link", 0444, bdi_debug_root, NULL, &memcg_blkcg_debug_fops); #endif @@ -561,6 +564,9 @@ int allocate_memcg_blkcg_links(int count, struct list_head *tmp_links) struct memcg_blkcg_link *link; int i; + if (!cgwb_v1) + return 0; + for (i = 0; i < count; i++) { link = kzalloc(sizeof(*link), GFP_KERNEL); if (!link) { @@ -588,6 +594,9 @@ void insert_memcg_blkcg_link(struct cgroup_subsys *ss, struct cgroup_subsys_state *memcg_css; int err; + if (!cgwb_v1) + return; + if (ss->id != io_cgrp_id && ss->id != memory_cgrp_id) return; @@ -673,6 +682,9 @@ static void delete_blkcg_link(struct cgroup_subsys_state *blkcg_css) void delete_memcg_blkcg_link(struct cgroup_subsys *ss, struct cgroup_subsys_state *css) { + if (!cgwb_v1) + return; + if (ss->id != io_cgrp_id && ss->id != memory_cgrp_id) return; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index a198f53860c7..fe9230e6096a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -371,7 +371,8 @@ struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio) { struct mem_cgroup *memcg = folio_memcg(folio); - if (!memcg) + if (!memcg || + (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgwb_v1)) memcg = root_mem_cgroup; return &memcg->css; @@ -7416,6 +7417,16 @@ static int __init cgroup_memory(char *s) } __setup("cgroup.memory=", cgroup_memory); +bool cgwb_v1; + +static int __init enable_cgroup_writeback_v1(char *s) +{ + cgwb_v1 = true; + + return 0; +} +__setup("cgwb_v1", enable_cgroup_writeback_v1); + /* * subsys_initcall() for memory controller. * -- Gitee From 304f2a8790f94099f7da1b99a0ebef418dff2a14 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Fri, 23 Feb 2024 10:39:04 +0800 Subject: [PATCH 0114/2138] anolis: fs/writeback: wrap cgroup writeback v1 logic ANBZ: #8327 Wrap cgroup writeback v1 logic to prevent build errors without CONFIG_CGROUPS or CONFIG_CGROUP_WRITEBACK. Signed-off-by: Hao Xu Signed-off-by: Joseph Qi Acked-by: Jingbo Xu Link: https://gitee.com/anolis/cloud-kernel/pulls/2784 --- include/linux/backing-dev.h | 2 ++ include/trace/events/writeback.h | 2 ++ mm/memcontrol.c | 8 +++++++- 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index fc826261b21f..506f89a99a6c 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -390,6 +390,7 @@ static inline void wb_blkcg_offline(struct cgroup_subsys_state *css) { } +#ifdef CONFIG_CGROUPS static inline void insert_memcg_blkcg_link(struct cgroup_subsys *ss, struct list_head *tmp_links, struct css_set *cset) @@ -409,6 +410,7 @@ static inline void delete_memcg_blkcg_link(struct cgroup_subsys *ss, struct cgroup_subsys_state *css) { } +#endif #endif /* CONFIG_CGROUP_WRITEBACK */ diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index acd558657cf3..0e190e112dc4 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -441,6 +441,7 @@ TRACE_EVENT(writeback_bdi_register, ) ); +#ifdef CONFIG_CGROUP_WRITEBACK TRACE_EVENT(insert_memcg_blkcg_link, TP_PROTO(struct cgroup_subsys_state *memcg_css, struct cgroup_subsys_state *blkcg_css, @@ -461,6 +462,7 @@ TRACE_EVENT(insert_memcg_blkcg_link, __entry->memcg_ino, __entry->blkcg_ino, __entry->old_blkcg_ino ) ); +#endif DECLARE_EVENT_CLASS(wbc_class, TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), diff --git a/mm/memcontrol.c b/mm/memcontrol.c index fe9230e6096a..8adc1af822c4 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -371,8 +371,12 @@ struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio) { struct mem_cgroup *memcg = folio_memcg(folio); +#ifdef CONFIG_CGROUP_WRITEBACK if (!memcg || - (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgwb_v1)) + (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgwb_v1)) +#else + if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) +#endif memcg = root_mem_cgroup; return &memcg->css; @@ -7417,6 +7421,7 @@ static int __init cgroup_memory(char *s) } __setup("cgroup.memory=", cgroup_memory); +#ifdef CONFIG_CGROUP_WRITEBACK bool cgwb_v1; static int __init enable_cgroup_writeback_v1(char *s) @@ -7426,6 +7431,7 @@ static int __init enable_cgroup_writeback_v1(char *s) return 0; } __setup("cgwb_v1", enable_cgroup_writeback_v1); +#endif /* * subsys_initcall() for memory controller. -- Gitee From 5bdbf82b2ba7731ab2e3855bcec45205a888650c Mon Sep 17 00:00:00 2001 From: "Carrie.Cai" Date: Tue, 6 Feb 2024 18:20:08 +0800 Subject: [PATCH 0115/2138] anolis: config: modify deconfig files for Mont-TSSE Driver ANBZ: #8156 Signed-off-by: Carrie.Cai Reviewed-by: Tianjia Zhang Reviewed-by: Xuchun Shang Link: https://gitee.com/anolis/cloud-kernel/pulls/2746 --- arch/x86/configs/anolis-debug_defconfig | 1 + arch/x86/configs/anolis_defconfig | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index 2931e96d0ba8..ecdf002bc8a9 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -7113,6 +7113,7 @@ CONFIG_CRYPTO_DEV_QAT_C62X=m CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m CONFIG_CRYPTO_DEV_QAT_C62XVF=m +CONFIG_CRYPTO_DEV_TSSE=m CONFIG_CRYPTO_DEV_CHELSIO=m # CONFIG_CRYPTO_DEV_VIRTIO is not set # CONFIG_CRYPTO_DEV_SAFEXCEL is not set diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index b031bb4585c3..3bafc6bbc1fe 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -7104,6 +7104,7 @@ CONFIG_CRYPTO_DEV_QAT_C62X=m CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m CONFIG_CRYPTO_DEV_QAT_C62XVF=m +CONFIG_CRYPTO_DEV_TSSE=m CONFIG_CRYPTO_DEV_CHELSIO=m # CONFIG_CRYPTO_DEV_VIRTIO is not set # CONFIG_CRYPTO_DEV_SAFEXCEL is not set -- Gitee From 54b29e38247b87242994ffe9c0d60b03383b9bf7 Mon Sep 17 00:00:00 2001 From: "Carrie.Cai" Date: Tue, 6 Feb 2024 18:23:13 +0800 Subject: [PATCH 0116/2138] anolis: crypto: add support for Mont-TSSE driver ANBZ: #8156 Mont-TSSE(TM) is a high speed crypto algorithm accelerator, it support SM2/3/4, AES and SHA algorithms. Mont-TSSE(TM) has 32 symmetric and 6 asymmetric crypto acceleration engines.The interface of Mont-TSSE(TM) system is PCIe 5.0x8. A total processing throughput is up to 200Gbps. Signed-off-by: Carrie.Cai Reviewed-by: Tianjia Zhang Reviewed-by: Xuchun Shang Link: https://gitee.com/anolis/cloud-kernel/pulls/2746 --- drivers/crypto/Kconfig | 1 + drivers/crypto/Makefile | 1 + drivers/crypto/montage/Kconfig | 3 + drivers/crypto/montage/Makefile | 3 + drivers/crypto/montage/tsse/Kconfig | 9 + drivers/crypto/montage/tsse/Makefile | 15 + drivers/crypto/montage/tsse/tsse_dev.h | 101 +++ drivers/crypto/montage/tsse/tsse_dev_drv.c | 328 ++++++++++ drivers/crypto/montage/tsse/tsse_dev_drv.h | 25 + drivers/crypto/montage/tsse/tsse_dev_mgr.c | 229 +++++++ drivers/crypto/montage/tsse/tsse_fw_service.c | 163 +++++ drivers/crypto/montage/tsse/tsse_fw_service.h | 17 + drivers/crypto/montage/tsse/tsse_ipc.c | 221 +++++++ drivers/crypto/montage/tsse/tsse_ipc.h | 112 ++++ drivers/crypto/montage/tsse/tsse_irq.c | 30 + drivers/crypto/montage/tsse/tsse_irq.h | 30 + drivers/crypto/montage/tsse/tsse_log.h | 24 + drivers/crypto/montage/tsse/tsse_service.c | 30 + drivers/crypto/montage/tsse/tsse_service.h | 16 + drivers/crypto/montage/tsse/tsse_vuart.c | 596 ++++++++++++++++++ drivers/crypto/montage/tsse/tsse_vuart.h | 23 + drivers/crypto/montage/tsse/tsse_vuart_regs.h | 72 +++ 22 files changed, 2049 insertions(+) create mode 100644 drivers/crypto/montage/Kconfig create mode 100644 drivers/crypto/montage/Makefile create mode 100644 drivers/crypto/montage/tsse/Kconfig create mode 100644 drivers/crypto/montage/tsse/Makefile create mode 100644 drivers/crypto/montage/tsse/tsse_dev.h create mode 100644 drivers/crypto/montage/tsse/tsse_dev_drv.c create mode 100644 drivers/crypto/montage/tsse/tsse_dev_drv.h create mode 100644 drivers/crypto/montage/tsse/tsse_dev_mgr.c create mode 100644 drivers/crypto/montage/tsse/tsse_fw_service.c create mode 100644 drivers/crypto/montage/tsse/tsse_fw_service.h create mode 100644 drivers/crypto/montage/tsse/tsse_ipc.c create mode 100644 drivers/crypto/montage/tsse/tsse_ipc.h create mode 100644 drivers/crypto/montage/tsse/tsse_irq.c create mode 100644 drivers/crypto/montage/tsse/tsse_irq.h create mode 100644 drivers/crypto/montage/tsse/tsse_log.h create mode 100644 drivers/crypto/montage/tsse/tsse_service.c create mode 100644 drivers/crypto/montage/tsse/tsse_service.h create mode 100644 drivers/crypto/montage/tsse/tsse_vuart.c create mode 100644 drivers/crypto/montage/tsse/tsse_vuart.h create mode 100644 drivers/crypto/montage/tsse/tsse_vuart_regs.h diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index c761952f0dc6..b03f7ed92793 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -796,5 +796,6 @@ config CRYPTO_DEV_SA2UL source "drivers/crypto/aspeed/Kconfig" source "drivers/crypto/starfive/Kconfig" +source "drivers/crypto/montage/Kconfig" endif # CRYPTO_HW diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index d859d6a5f3a4..94c8b187f739 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -51,3 +51,4 @@ obj-y += hisilicon/ obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/ obj-y += intel/ obj-y += starfive/ +obj-y += montage/ diff --git a/drivers/crypto/montage/Kconfig b/drivers/crypto/montage/Kconfig new file mode 100644 index 000000000000..e8e4b287a792 --- /dev/null +++ b/drivers/crypto/montage/Kconfig @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +source "drivers/crypto/montage/tsse/Kconfig" diff --git a/drivers/crypto/montage/Makefile b/drivers/crypto/montage/Makefile new file mode 100644 index 000000000000..a50415fe10c7 --- /dev/null +++ b/drivers/crypto/montage/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_CRYPTO_DEV_TSSE) += tsse/ diff --git a/drivers/crypto/montage/tsse/Kconfig b/drivers/crypto/montage/tsse/Kconfig new file mode 100644 index 000000000000..5854f8e4525c --- /dev/null +++ b/drivers/crypto/montage/tsse/Kconfig @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0-only +config CRYPTO_DEV_TSSE + tristate "Support for Montage(R) TSSE" + depends on X86 && PCI + select FW_LOADER + help + Support for Montage(R) TSSE for accelerating crypto workloads. + + To compile this as a module, choose M here. \ No newline at end of file diff --git a/drivers/crypto/montage/tsse/Makefile b/drivers/crypto/montage/tsse/Makefile new file mode 100644 index 000000000000..d67ffde3a5b0 --- /dev/null +++ b/drivers/crypto/montage/tsse/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# +# This file is part of tsse driver for Linux +# +# Copyright © 2023 Montage Technology. All rights reserved. + +obj-m += tsse.o + +tsse-objs := tsse_dev_mgr.o \ + tsse_ipc.o \ + tsse_fw_service.o \ + tsse_service.o \ + tsse_irq.o \ + tsse_dev_drv.o \ + tsse_vuart.o diff --git a/drivers/crypto/montage/tsse/tsse_dev.h b/drivers/crypto/montage/tsse/tsse_dev.h new file mode 100644 index 000000000000..d1dafee61300 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_dev.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_DEV_H__ +#define __TSSE_DEV_H__ +#include +#include +#include +#include +#include +#include +#include +#include "tsse_ipc.h" + +#define TSSE_PCI_MAX_BARS 4 +#define TSSE_FW_VERSION_LEN 32 +struct tsse_bar { + void __iomem *virt_addr; + resource_size_t addr; + resource_size_t size; +}; +struct tsse_dev_pci { + struct pci_dev *pci_dev; + struct tsse_bar bars[TSSE_PCI_MAX_BARS]; + u8 revid; +}; +enum tsse_dev_status_bit { + TSSE_DEV_STATUS_STARTING = 0, + TSSE_DEV_STATUS_STARTED = 1 + +}; +struct tsse_qpairs_bank { + struct tsse_dev *tsse_dev; + void __iomem *reg_base; + + u32 num_qparis; + u32 irq_vec; +}; +struct tsse_dev { + struct module *owner; + struct dentry *debugfs_dir; + unsigned long status; + struct list_head list; + struct tsse_dev_pci tsse_pci_dev; + struct tsse_qpairs_bank qpairs_bank; + atomic_t ref_count; + bool is_vf; + int id; + u32 num_irqs; + u32 num_vfs; + struct uart_port *port; + struct tsse_ipc *ipc; + void *adi; + void *mbx_hw; + const struct firmware *fw; + char fw_version[TSSE_FW_VERSION_LEN]; +}; +#define TSSEDEV_TO_DEV(tssedev) (&((tssedev)->tsse_pci_dev.pci_dev->dev)) +#define TSSE_DEV_BARS(tssedev) ((tssedev)->tsse_pci_dev.bars) + +#include "tsse_log.h" + +struct list_head *tsse_devmgr_get_head(void); + +int tsse_dev_get(struct tsse_dev *tsse_dev); +void tsse_dev_put(struct tsse_dev *tsse_dev); +int tsse_devmgr_add_dev(struct tsse_dev *tsse_dev); +void tsse_devmgr_rm_dev(struct tsse_dev *tdev); +int tsse_prepare_restart_dev(struct tsse_dev *tdev); +int tsse_start_dev(struct tsse_dev *tdev); +struct tsse_dev *get_tssedev(int id); + +static inline struct tsse_dev *pci_to_tsse_dev(struct pci_dev *pci_dev) +{ + return (struct tsse_dev *)pci_get_drvdata(pci_dev); +} + +static inline int tsse_get_cur_node(void) +{ + int cpu, node; + + cpu = get_cpu(); + node = topology_physical_package_id(cpu); + put_cpu(); + + return node; +} + +static inline int tsse_dev_started(struct tsse_dev *tdev) +{ + return test_bit(TSSE_DEV_STATUS_STARTED, &tdev->status); +} +static inline int tsse_dev_in_use(struct tsse_dev *tdev) +{ + return atomic_read(&tdev->ref_count) != 0; +} +#endif diff --git a/drivers/crypto/montage/tsse/tsse_dev_drv.c b/drivers/crypto/montage/tsse/tsse_dev_drv.c new file mode 100644 index 000000000000..9e914576a129 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_dev_drv.c @@ -0,0 +1,328 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include "tsse_dev_drv.h" +#include "tsse_vuart.h" +#include "tsse_ipc.h" +#include "tsse_fw_service.h" + +static DEFINE_IDA(tsse_ida); + +static inline void tsse_qpair_enable_pf(struct tsse_dev *tdev, bool enable) +{ + writel(enable ? 1 : 0, + TSSE_DEV_BARS(tdev)[2].virt_addr + 0x5780000 + 0x50000); +} +static int tsse_sriov_disable(struct tsse_dev *tdev) +{ + pci_disable_sriov(tdev->tsse_pci_dev.pci_dev); + tsse_qpair_enable_pf(tdev, true); + + return 0; +} + +static int tsse_sriov_configure(struct pci_dev *pdev, int num_vfs_param) +{ + int totalvfs = pci_sriov_get_totalvfs(pdev); + struct tsse_dev *tdev = pci_to_tsse_dev(pdev); + int ret = 0; + + if ((!tdev) || (num_vfs_param < 0) || (totalvfs <= 0)) { + dev_err(&pdev->dev, + "%s %d: failed to config sriov, tdev=%p totalvfs=%d num_vfs_param=%d\n", + __func__, __LINE__, tdev, totalvfs, num_vfs_param); + return -EBADE; + } + + if (num_vfs_param > totalvfs) + num_vfs_param = totalvfs; + + dev_info(&pdev->dev, "%s %d: has total %d vfs, and enable %d vfs\n", + __func__, __LINE__, totalvfs, num_vfs_param); + + if ((num_vfs_param > TSSE_PF_MAX_IRQ_NUM) || + (num_vfs_param > TSSE_PF_MAX_QPAIR_NUM)) { + tsse_dev_err( + tdev, + "vfs number is greater than pf's \"max_irq_num=%d or max_qpairs_num=%d\"\n", + TSSE_PF_MAX_IRQ_NUM, TSSE_PF_MAX_QPAIR_NUM); + return -EBADE; + } + + if (!tsse_dev_started(tdev)) { + dev_err(&pdev->dev, "%s %d: device is not started\n", __func__, + __LINE__); + return -EBADE; + } + + if (tsse_dev_in_use(tdev)) { + dev_err(&pdev->dev, "%s %d: device is busy\n", __func__, + __LINE__); + return -EBUSY; + } + + tsse_sriov_disable(tdev); + + tsse_prepare_restart_dev(tdev); + + tdev->num_vfs = num_vfs_param; + + if (tdev->num_vfs > 0) { + tdev->num_irqs = TSSE_SRIOV_PF_MAX_IRQ_NUM; + tdev->qpairs_bank.num_qparis = TSSE_SRIOV_PF_MAX_QPAIR_NUM; + } else { + tdev->num_irqs = TSSE_PF_MAX_IRQ_NUM; + tdev->qpairs_bank.num_qparis = TSSE_PF_MAX_QPAIR_NUM; + } + + tsse_dev_info( + tdev, + "num_irqs:%u num_qparis:%u qpairs' start irq vector index:%u qpairs' reg base:0x%lx\n", + tdev->num_irqs, tdev->qpairs_bank.num_qparis, + tdev->qpairs_bank.irq_vec, (ulong)tdev->qpairs_bank.reg_base); + + ret = tsse_start_dev(tdev); + if (ret) { + dev_err(&pdev->dev, "%s %d: failed to start the device\n", + __func__, __LINE__); + return ret; + } + + if (num_vfs_param > 0) { + tsse_qpair_enable_pf(tdev, false); + pci_enable_sriov(pdev, num_vfs_param); + } + + return num_vfs_param; +} + +static int device_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int status = 0; + int bar; + u32 tmp_val; + struct tsse_dev *tdev; + + if (!pdev->is_physfn) { + dev_err(&pdev->dev, "%s %d: this is not Physical fn\n", + __func__, __LINE__); + return -EPERM; + } + + if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) { + dev_err(&pdev->dev, + "%s %d: invalid numa configuration for tsse\n", + __func__, __LINE__); + return -EINVAL; + } + + tdev = kzalloc_node(sizeof(*tdev), GFP_KERNEL, dev_to_node(&pdev->dev)); + + if (!tdev) + return -ENOMEM; + + status = pcim_enable_device(pdev); + + if (status) { + dev_err(&pdev->dev, "pcim_enable_device failed\n"); + goto out_err; + } + + pci_set_master(pdev); + + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(48))) { + if ((dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))) { + dev_err(&pdev->dev, + "failed to set tsse dma address width\n"); + status = -EFAULT; + goto out_err; + } else { + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + } + + } else { + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(48)); + } + + dma_set_max_seg_size(&pdev->dev, UINT_MAX); + + status = pcim_iomap_regions(pdev, BIT(0) | BIT(2), TSSE_DEV_NAME); + if (status) { + dev_err(&pdev->dev, "I/O memory remapping failed\n"); + goto out_err; + } + + for (bar = 2; bar < 4;) { + TSSE_DEV_BARS(tdev)[bar].addr = pci_resource_start(pdev, bar); + TSSE_DEV_BARS(tdev)[bar].size = pci_resource_len(pdev, bar); + TSSE_DEV_BARS(tdev) + [bar].virt_addr = pcim_iomap_table(pdev)[bar]; + + dev_info(&pdev->dev, + "bar[%d]: addr=0x%llx, size=0x%llx, virt_addr=0x%lx\n", + bar, TSSE_DEV_BARS(tdev)[bar].addr, + TSSE_DEV_BARS(tdev)[bar].size, + (ulong)TSSE_DEV_BARS(tdev)[bar].virt_addr); + + bar += 2; + } + + tdev->owner = THIS_MODULE; + tdev->is_vf = false; + tdev->tsse_pci_dev.pci_dev = pdev; + tdev->id = ida_alloc(&tsse_ida, GFP_KERNEL); + if (tdev->id < 0) { + dev_err(&pdev->dev, "Unable to get id\n"); + status = tdev->id; + goto out_err; + } + + pci_set_drvdata(pdev, tdev); + + tdev->num_irqs = TSSE_PF_MAX_IRQ_NUM; + tdev->qpairs_bank.num_qparis = TSSE_PF_MAX_QPAIR_NUM; + tdev->qpairs_bank.irq_vec = TSSE_PF_QPAIR_START_IRQ_VECTOR; + tdev->qpairs_bank.reg_base = + TSSE_DEV_BARS(tdev)[2].virt_addr + TSSE_PF_QPAIR_REG_BASE; + + tsse_qpair_enable_pf(tdev, true); + + tsse_dev_info( + tdev, + "num_irqs:%u num_qparis:%u qpairs' start irq vector index:%u qpairs' reg base:0x%lx\n", + tdev->num_irqs, tdev->qpairs_bank.num_qparis, + tdev->qpairs_bank.irq_vec, (ulong)tdev->qpairs_bank.reg_base); + + if (tsse_devmgr_add_dev(tdev)) { + dev_err(&pdev->dev, + "%s %d: tsse_devmgr failed to add new device\n", + __func__, __LINE__); + status = -EFAULT; + goto out_err_ida_free; + } + + if (vuart_init_port(pdev)) { + dev_err(&pdev->dev, + "%s %d: vuart_init_port failed to init vuart.\n", + __func__, __LINE__); + status = -EFAULT; + goto out_err_port_init; + } + /* Its result not break driver init process */ + if (!tsse_fw_load(pdev)) + get_firmware_version((char *)tdev->fw->data, tdev->fw->size, tdev->fw_version); + + if (tsse_ipc_init(pdev)) { + dev_err(&pdev->dev, + "%s %d: tsse_ipc_init failed to tsse_ipc.\n", __func__, + __LINE__); + status = -EFAULT; + goto out_err_ipc; + } + + tsse_dev_info(tdev, "successful\n"); + + pci_read_config_dword(pdev, 0x720, &tmp_val); + tsse_dev_dbg(tdev, "the value of FILTER_MASK_2_REG is 0x%x\n", tmp_val); + + return 0; + +out_err_ipc: + vuart_uninit_port(pdev); +out_err_port_init: + tsse_devmgr_rm_dev(tdev); +out_err_ida_free: + ida_free(&tsse_ida, tdev->id); +out_err: + kfree(tdev); + return status; +} + +static void device_remove(struct pci_dev *pdev) +{ + struct tsse_dev *tdev = pci_to_tsse_dev(pdev); + + pr_info("%s %d: pci_dev 0x%lx tsse_dev 0x%lx\n", __func__, __LINE__, + (ulong)pdev, (ulong)tdev); + + tsse_sriov_disable(tdev); + if (tdev->fw) { + release_firmware(tdev->fw); + tdev->fw = NULL; + } + tsse_ipc_deinit(tdev); + vuart_uninit_port(pdev); + tsse_devmgr_rm_dev(tdev); + ida_free(&tsse_ida, tdev->id); + kfree(tdev); + dev_info(&pdev->dev, "%s %d: successful\n", __func__, __LINE__); +} + +static const struct pci_device_id pci_ids[] = { + { + PCI_DEVICE(0x1b00, 0xc011), + }, + { + PCI_DEVICE(0x1b00, 0xd011), + }, + { 0 } +}; + +static struct pci_driver pci_driver = { + .name = TSSE_DEV_NAME, + .id_table = pci_ids, + .probe = device_probe, + .remove = device_remove, + .sriov_configure = tsse_sriov_configure, +}; + +MODULE_DEVICE_TABLE(pci, pci_ids); + +static int __init tsse_init(void) +{ + int status; + + status = vuart_register(); + if (status) { + pr_err("vuart_register failed[%d].\n", status); + return status; + } + + status = pci_register_driver(&pci_driver); + if (status) { + vuart_unregister(); + return status; + } + + pr_info(KBUILD_MODNAME ": loaded.\n"); + + return 0; +} + +static void __exit tsse_exit(void) +{ + pci_unregister_driver(&pci_driver); + vuart_unregister(); + + pr_info(KBUILD_MODNAME ": unloaded.\n"); +} + +module_init(tsse_init); +module_exit(tsse_exit); + +MODULE_AUTHOR("montage-tech.com"); +MODULE_DESCRIPTION("TSSE device driver"); +MODULE_VERSION("1.0.0"); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(TSSE_FIRMWARE); diff --git a/drivers/crypto/montage/tsse/tsse_dev_drv.h b/drivers/crypto/montage/tsse/tsse_dev_drv.h new file mode 100644 index 000000000000..6a05572a3849 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_dev_drv.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_DEV_DRV_H__ +#define __TSSE_DEV_DRV_H__ +#define TSSE_DEV_NAME "tsse" + +// TODO: need to support full qpairs +#define TSSE_PF_MAX_QPAIR_NUM 16 + +#define TSSE_PF_MAX_IRQ_NUM 96 +#define TSSE_PF_QPAIR_START_IRQ_VECTOR 32 + +#define TSSE_SRIOV_PF_MAX_QPAIR_NUM 0 +#define TSSE_SRIOV_PF_MAX_IRQ_NUM 16 + +#define TSSE_PF_QPAIR_REG_BASE 0x5700000 + +#include "tsse_dev.h" + +#endif diff --git a/drivers/crypto/montage/tsse/tsse_dev_mgr.c b/drivers/crypto/montage/tsse/tsse_dev_mgr.c new file mode 100644 index 000000000000..159f75c8f46f --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_dev_mgr.c @@ -0,0 +1,229 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "tsse_dev.h" +#include "tsse_irq.h" +static DEFINE_MUTEX(tsse_dev_table_lock); +static LIST_HEAD(tsse_dev_table); + +static DEFINE_MUTEX(algs_lock); + +static inline void tsse_list_del(struct list_head *entry) +{ + WRITE_ONCE(entry->next->prev, entry->prev); + WRITE_ONCE(entry->prev->next, entry->next); +} +static inline void tsse_list_add(struct list_head *new, struct list_head *prev, + struct list_head *next) +{ + WRITE_ONCE(new->next, next); + WRITE_ONCE(new->prev, prev); + mb(); /* Make sure new node updates first */ + WRITE_ONCE(next->prev, new); + WRITE_ONCE(prev->next, new); +} + +static inline void tsse_list_add_tail(struct list_head *new, + struct list_head *head) +{ + tsse_list_add(new, head->prev, head); +} + +static int tsse_dev_pf_get(struct tsse_dev *vf_tsse_dev) +{ + int ret = 0; + struct tsse_dev *pf_tsse_dev = NULL; + struct pci_dev *pf_pci_dev = NULL; + + pf_pci_dev = vf_tsse_dev->tsse_pci_dev.pci_dev->physfn; + + if (!pf_pci_dev) + return 0; + + pf_tsse_dev = pci_to_tsse_dev(pf_pci_dev); + if (pf_tsse_dev) { + if (atomic_add_return(1, &pf_tsse_dev->ref_count) == 1) { + if (!try_module_get(pf_tsse_dev->owner)) + ret = -EFAULT; + } + } + return ret; +} + +static void tsse_dev_pf_put(struct tsse_dev *vf_tsse_dev) +{ + struct tsse_dev *pf_tsse_dev = NULL; + struct pci_dev *pf_pci_dev = NULL; + + pf_pci_dev = vf_tsse_dev->tsse_pci_dev.pci_dev->physfn; + + if (!pf_pci_dev) + return; + + pf_tsse_dev = pci_to_tsse_dev(pf_pci_dev); + if (pf_tsse_dev) { + if (atomic_sub_return(1, &pf_tsse_dev->ref_count) == 0) + module_put(pf_tsse_dev->owner); + } +} + +int tsse_dev_get(struct tsse_dev *tdev) +{ + int ref_count = atomic_add_return(1, &tdev->ref_count); + + if (!tsse_dev_started(tdev)) { + atomic_sub(1, &tdev->ref_count); + return -EAGAIN; + } + + if (ref_count == 1) { + if (!try_module_get(tdev->owner)) + return -EFAULT; + if (tdev->is_vf) + return tsse_dev_pf_get(tdev); + } + return 0; +} +void tsse_dev_put(struct tsse_dev *tdev) +{ + if (atomic_sub_return(1, &tdev->ref_count) == 0) { + module_put(tdev->owner); + if (tdev->is_vf) + tsse_dev_pf_put(tdev); + } +} + +int tsse_stop_dev(struct tsse_dev *tdev, bool busy_exit) +{ + int times, max_retry = 150; + + clear_bit(TSSE_DEV_STATUS_STARTING, &tdev->status); + clear_bit(TSSE_DEV_STATUS_STARTED, &tdev->status); + + for (times = 0; times < max_retry; times++) { + if (!tsse_dev_in_use(tdev)) + break; + msleep(100); + } + + if (times >= max_retry) { + tsse_dev_err(tdev, "Failed to stop busy device\n"); + if (busy_exit) + return -EBUSY; + } + if (tdev->qpairs_bank.num_qparis != 0) { + mutex_lock(&tsse_dev_table_lock); + tsse_list_del(&tdev->list); + mutex_unlock(&tsse_dev_table_lock); + tsse_dev_info(tdev, "removed from active dev table list\n"); + } + + tsse_dev_info(tdev, "device stopped\n"); + + return 0; +} + +int tsse_start_dev(struct tsse_dev *tdev) +{ + struct tsse_dev *tmp_dev; + struct list_head *prev_node = &tsse_dev_table; + int ret = 0; + + if (tdev->qpairs_bank.num_qparis == 0) { + set_bit(TSSE_DEV_STATUS_STARTED, &tdev->status); + tsse_dev_info(tdev, "device started\n"); + return 0; + } + + set_bit(TSSE_DEV_STATUS_STARTING, &tdev->status); + + mutex_lock(&tsse_dev_table_lock); + + list_for_each_entry(tmp_dev, &tsse_dev_table, list) { + if (tmp_dev == tdev) { + ret = -EEXIST; + tsse_dev_err(tdev, + "The device cannot be added repeatedly\n"); + goto clear_status; + } + } + + set_bit(TSSE_DEV_STATUS_STARTED, &tdev->status); + tsse_list_add(&tdev->list, prev_node, prev_node->next); + + tsse_dev_info(tdev, "device started\n"); + mutex_unlock(&tsse_dev_table_lock); + + return 0; +clear_status: + mutex_unlock(&tsse_dev_table_lock); + clear_bit(TSSE_DEV_STATUS_STARTING, &tdev->status); + clear_bit(TSSE_DEV_STATUS_STARTED, &tdev->status); + return ret; +} +EXPORT_SYMBOL_GPL(tsse_start_dev); + +int tsse_prepare_restart_dev(struct tsse_dev *tdev) +{ + return tsse_stop_dev(tdev, false); +} +EXPORT_SYMBOL_GPL(tsse_prepare_restart_dev); + +void tsse_devmgr_rm_dev(struct tsse_dev *tdev) +{ + tsse_stop_dev(tdev, false); + tsse_dev_free_irq_vectors(tdev); + msleep(300); +} +EXPORT_SYMBOL_GPL(tsse_devmgr_rm_dev); + +int tsse_devmgr_add_dev(struct tsse_dev *tdev) +{ + int ret; + + ret = tsse_dev_alloc_irq_vectors(tdev); + if (ret == 0) { + atomic_set(&tdev->ref_count, 0); + tdev->status = 0; + ret = tsse_start_dev(tdev); + + if (ret != 0) + tsse_dev_free_irq_vectors(tdev); + } + return ret; +} +EXPORT_SYMBOL_GPL(tsse_devmgr_add_dev); + +struct list_head *tsse_devmgr_get_head(void) +{ + return &tsse_dev_table; +} + +struct tsse_dev *get_tssedev(int id) +{ + struct list_head *itr; + struct tsse_dev *ptr; + + mutex_lock(&tsse_dev_table_lock); + + list_for_each(itr, &tsse_dev_table) { + ptr = list_entry(itr, struct tsse_dev, list); + break; + } + + mutex_unlock(&tsse_dev_table_lock); + + return ptr; +} +EXPORT_SYMBOL_GPL(get_tssedev); diff --git a/drivers/crypto/montage/tsse/tsse_fw_service.c b/drivers/crypto/montage/tsse/tsse_fw_service.c new file mode 100644 index 000000000000..fc3907a7c503 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_fw_service.c @@ -0,0 +1,163 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tsse_dev.h" +#include "tsse_service.h" + +#define SEARCH_PATTERN "MT_CFG_BUILD_VERSION_DETAIL" +#define SEARCH_PATTERN_LEN 28 + +int fw_send_msg(struct tsse_ipc *tsseipc, struct ipc_msg *msg) +{ + u8 *h2d; + u32 int_reg; + u32 rc; + + mutex_lock(&tsseipc->list_lock); + + int_reg = readl(tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + if ((int_reg & IPC_REGISTER_INT_SET) != 0) { + rc = -1; + mutex_unlock(&tsseipc->list_lock); + return rc; + } + h2d = (u8 *)(tsseipc->virt_addr + HOST2MAIN_IPC_OFFSET); + memcpy_toio(h2d, msg, sizeof(struct ipc_header)); + memcpy_toio(h2d + sizeof(struct ipc_header), (u32 *)msg->i_data, + msg->header.i_len - sizeof(struct ipc_header)); + writel(0x1, tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + + dev_info(tsseipc->dev, "notify device to get firmware\n"); + mutex_unlock(&tsseipc->list_lock); + return 0; +} + +void fw_free(void *msg_t) +{ + struct tsse_msg *tssemsg; + struct ipc_msg *payload; + + payload = (struct ipc_msg *)msg_t; + tssemsg = container_of(payload, struct tsse_msg, ipc_payload); + + kvfree(tssemsg); +} + +int get_firmware_version(char *fw_buffer, uint32_t buffer_len, char *fw_version) +{ + char *pattern; + char *space_ch = " "; + uint32_t pattern_i = 0, buffer_i = 0; + uint32_t pattern_len = SEARCH_PATTERN_LEN - 1; // Not include "\0" + uint32_t version_start = 0; + uint32_t version_len = 0; + + pattern = kzalloc(SEARCH_PATTERN_LEN, GFP_KERNEL); + if (!pattern) + return -1; + + snprintf(pattern, SEARCH_PATTERN_LEN, SEARCH_PATTERN); + + while (buffer_i < buffer_len) { + if (pattern[pattern_i] == fw_buffer[buffer_i]) { + buffer_i++; + pattern_i++; + } + if (pattern_i == pattern_len) { + break; // pattern found + } else if ((buffer_i < buffer_len) && + (pattern[pattern_i] != fw_buffer[buffer_i])) { + // mismatch after pattern_i matches + if (pattern_i != 0) { + // since the pattern has no common prefix, when mismatch, + // the next compare should start from pattern beginning + pattern_i = 0; + } else { + buffer_i++; + } + } + } + kfree(pattern); + if (pattern_i == pattern_len) { + buffer_i++; + version_start = buffer_i; + while (buffer_i < buffer_len) { + if (fw_buffer[buffer_i] == space_ch[0]) { + version_len = buffer_i - version_start; + strscpy(fw_version, fw_buffer + version_start, version_len + 1); + return 0; + } + buffer_i++; + } + } + return -1; +} + +void fw_service(void *tsseipc_t, void *msg_t) +{ + void __iomem *fw; + uint32_t size; + uint32_t task_offset; + struct fw_load *fw_task; + struct tsse_dev *tdev; + struct tsse_ipc *tsseipc = (struct tsse_ipc *)tsseipc_t; + struct ipc_msg *msg = (struct ipc_msg *)msg_t; + + task_offset = sizeof(struct msg_info); + fw_task = (struct fw_load *)(msg->i_data + + task_offset / sizeof(uint32_t)); + + tdev = pci_to_tsse_dev(tsseipc->pdev); + if (!tdev || !tdev->fw) { + fw_task->result = 1; + fw_task->size = 0; + dev_info(tsseipc->dev, "firmware loading failed\n"); + fw_send_msg(tsseipc, msg); + fw_free(msg); + return; + } + + fw_task->result = 0; + fw_task->size = tdev->fw->size; + size = tdev->fw->size; + fw = tsseipc->virt_addr + fw_task->offset + FW_BASE; + + memcpy_toio((u8 *)fw, tdev->fw->data, size); + dev_info(tsseipc->dev, "firmware loading done\n"); + fw_send_msg(tsseipc, msg); + fw_free(msg); + + dev_info(tsseipc->dev, "firmware version: %s\n", tdev->fw_version); + + if (tdev->fw) { + release_firmware(tdev->fw); + tdev->fw = NULL; + } +} + +int tsse_fw_load(struct pci_dev *pdev) +{ + int result; + struct tsse_dev *tdev = pci_to_tsse_dev(pdev); + + result = request_firmware(&tdev->fw, TSSE_FIRMWARE, &pdev->dev); + if (result) + dev_err(&pdev->dev, "%s failed\n", __func__); + return result; +} diff --git a/drivers/crypto/montage/tsse/tsse_fw_service.h b/drivers/crypto/montage/tsse/tsse_fw_service.h new file mode 100644 index 000000000000..973ca6a0bce9 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_fw_service.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_FW_SERVICE_H__ +#define __TSSE_FW_SERVICE_H__ + +#define FW_BASE 0x7000000 +#define TSSE_FIRMWARE "tsse_firmware.bin" + +void fw_service(void *tsseipc_t, void *msg_t); +int tsse_fw_load(struct pci_dev *pdev); +int get_firmware_version(char *fw_buffer, uint32_t buffer_len, char *fw_version); +#endif diff --git a/drivers/crypto/montage/tsse/tsse_ipc.c b/drivers/crypto/montage/tsse/tsse_ipc.c new file mode 100644 index 000000000000..0f92c096f211 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_ipc.c @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#include +#include +#include +#include + +#include "tsse_ipc.h" +#include "tsse_dev.h" +#include "tsse_service.h" + +struct tsse_msg *get_msginf(void __iomem *d2h) +{ + uint32_t u_len; + struct tsse_msg *tssemsg; + + struct ipc_header *ipc_info = (struct ipc_header *)d2h; + + u_len = ipc_info->i_len - sizeof(struct ipc_header); + + tssemsg = (struct tsse_msg *)(kzalloc(sizeof(struct tsse_msg) + u_len, + GFP_ATOMIC)); + + if (!tssemsg) { + pr_info("%s(): tssemsg kzalloc failed\n", __func__); + return NULL; + } + + tssemsg->ipc_payload.header.inst_id = ipc_info->inst_id; + tssemsg->ipc_payload.header.tgid = ipc_info->tgid; + tssemsg->ipc_payload.header.i_len = ipc_info->i_len; + + return tssemsg; +} + +void ipc_recieve_msg(struct tsse_ipc *tsseipc, struct ipc_msg *msg) +{ + uint32_t u_len = msg->header.i_len - sizeof(struct ipc_header); + uint32_t *msg_data = NULL; + void __iomem *d2h = tsseipc->virt_addr + MAIN2HOST_IPC_OFFSET; + + msg_data = (uint32_t *)(d2h + sizeof(struct ipc_header)); + memcpy_fromio(msg->i_data, msg_data, u_len); + return; + +} + +int msg_rout(struct tsse_ipc *tsseipc, struct tsse_msg *tssemsg) +{ + int ret = 0; + struct ipc_msg *msg; + struct msg_info *info; + uint32_t msg_class; + + msg = &tssemsg->ipc_payload; + + ipc_recieve_msg(tsseipc, msg); + info = (struct msg_info *)msg->i_data; + msg_class = info->msg_class; + if (msg_class == IPC_MESSAGE_BOOT) { + service_rout(tsseipc, msg); + return 0; + } + + return ret; +} + +static irqreturn_t tsse_ipc_d2h_irqhandler(int irq, void *dev_id) +{ + struct tsse_ipc *tsseipc = (struct tsse_ipc *)dev_id; + + writel(0x0, tsseipc->virt_addr + MAIN2HOST_INTR_SET_OFFSET); + tasklet_hi_schedule(&tsseipc->ipc_handle); + dev_err(tsseipc->dev, "irq%d\n", irq); + return IRQ_HANDLED; +} + +bool check_send_enbit(struct tsse_ipc *tsseipc) +{ + u32 int_reg; + + int_reg = readl(tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + if ((int_reg & IPC_REGISTER_INT_SET) == 0) + return true; + else + return false; +} +EXPORT_SYMBOL(check_send_enbit); + +void notify_device(struct tsse_ipc *tsseipc) +{ + writel(0x1, tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + return; + +} +EXPORT_SYMBOL(notify_device); + +void ipc_send_msg(struct tsse_ipc *tsseipc, struct ipc_data *msg) +{ + u8 *h2d = NULL; + + h2d = (u8 *)(tsseipc->virt_addr + HOST2MAIN_IPC_OFFSET); + memcpy_toio(h2d, msg, sizeof(struct ipc_header)); + memcpy_toio(h2d + sizeof(struct ipc_header), (u32 *)msg->i_ptr, + msg->header.i_len - sizeof(struct ipc_header)); + return; + +} + +void ipc_hw_init(struct tsse_ipc *hw_ipc) +{ + writel(0x1, hw_ipc->virt_addr + MAIN2HOST_INTR_ENABLE_OFFSET); + writel(0x0, hw_ipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + writel(0x0, hw_ipc->virt_addr + MAIN2HOST_INTR_SET_OFFSET); +} + +int ipc_init_msg(struct tsse_ipc *tsseipc) +{ + u8 *h2d; + u32 int_reg; + u32 rc; + u32 cmd_len; + struct ipc_msg *msg; + struct msg_info *info_msg; + + msg = (struct ipc_msg *)(kzalloc( + sizeof(struct ipc_msg) + sizeof(struct msg_info), GFP_ATOMIC)); + + if (!msg) { + pr_info("%s(): msg kzalloc failed\n", __func__); + return -1; + } + cmd_len = sizeof(uint32_t); + msg->header.i_len = + sizeof(struct ipc_header) + sizeof(struct msg_info) + cmd_len; + info_msg = (struct msg_info *)msg->i_data; + info_msg->msg_class = IPC_MESSAGE_BASIC; + *(msg->i_data + sizeof(struct msg_info) / 4) = IPC_BASIC_CMD_HOST_INIT; + + mutex_lock(&tsseipc->list_lock); + int_reg = readl(tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + if ((int_reg & IPC_REGISTER_INT_SET) != 0) { + rc = -1; + mutex_unlock(&tsseipc->list_lock); + kfree(msg); + return rc; + } + h2d = (u8 *)(tsseipc->virt_addr + HOST2MAIN_IPC_OFFSET); + + memcpy_toio(h2d, msg, sizeof(struct ipc_header)); + memcpy_toio(h2d + sizeof(struct ipc_header), (u32 *)msg->i_data, + sizeof(struct msg_info) + sizeof(uint32_t)); + + writel(0x1, tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + mutex_unlock(&tsseipc->list_lock); + kfree(msg); + + return 0; +} + +static void tsse_ipc_bh_handler(unsigned long data) +{ + struct tsse_ipc *tsseipc = (struct tsse_ipc *)data; + + void __iomem *d2h_payload = tsseipc->virt_addr + MAIN2HOST_IPC_OFFSET; + struct tsse_msg *msg_tsse = get_msginf(d2h_payload); + + if (!msg_tsse) { + dev_err(tsseipc->dev, "get_msginf is NULL\n"); + return; + } + msg_rout(tsseipc, msg_tsse); +} + +int tsse_ipc_init(struct pci_dev *pdev) +{ + struct tsse_dev *tdev = pci_to_tsse_dev(pdev); + struct tsse_ipc *ipc; + int rc; + + ipc = devm_kzalloc(&pdev->dev, sizeof(*ipc), GFP_KERNEL); + if (ipc == NULL) + return -ENOMEM; + tdev->ipc = ipc; + ipc->pdev = pdev; + ipc->dev = &pdev->dev; + ipc->virt_addr = TSSE_DEV_BARS(tdev)[2].virt_addr; + + mutex_init(&ipc->list_lock); + tasklet_init(&(ipc->ipc_handle), tsse_ipc_bh_handler, + (ulong)(ipc)); + + rc = request_threaded_irq(pci_irq_vector(pdev, 0), NULL, + tsse_ipc_d2h_irqhandler, IRQF_SHARED, + "pf-ipc", ipc); + ipc_hw_init(ipc); + ipc_init_msg(ipc); + + return rc; +} +EXPORT_SYMBOL_GPL(tsse_ipc_init); + +void tsse_ipc_deinit(void *tdev_t) +{ + struct tsse_ipc *tsseipc; + struct pci_dev *pdev; + struct tsse_dev *tdev; + + tdev = tdev_t; + tsseipc = tdev->ipc; + pdev = tsseipc->pdev; + free_irq(pci_irq_vector(pdev, 0), tdev->ipc); + return; + +} +EXPORT_SYMBOL_GPL(tsse_ipc_deinit); diff --git a/drivers/crypto/montage/tsse/tsse_ipc.h b/drivers/crypto/montage/tsse/tsse_ipc.h new file mode 100644 index 000000000000..59dcbf6eafc4 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_ipc.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TM_HOST_IPC_H__ +#define __TM_HOST_IPC_H__ + +#include +#include +#include + +#define TSSE_PASID_SVA + +#define HOST2MAIN_INTR_SET_OFFSET 0x2000 +#define HOST2MAIN_INTR_ENABLE_OFFSET 0x2004 +#define HOST2MAIN_ACK_INTR_CLR_OFFSET 0x2008 +#define HOST2MAIN_ACK_INTR_ENABLE_OFFSET 0x200c +#define HOST2MAIN_VLD_INTR_STATUS_OFFSET 0x2010 +#define HOST2MAIN_ACK_INTR_STATUS_OFFSET 0x2014 +#define MSIX_MASK_EN_REG_OFFSET 0x2020 +#define INTR_MASK_BIT_OFFSET 0x2024 +#define INTR_PENDING_BIT_OFFSET 0x2028 +#define HOST2MAIN_IPC_OFFSET 0x2400 + +#define MAIN2HOST_INTR_SET_OFFSET 0x3000 +#define MAIN2HOST_INTR_ENABLE_OFFSET 0x3004 +#define MAIN2HOST_ACK_INTR_CLR_OFFSET 0x3008 +#define MAIN2HOST_ACK_INTR_ENABLE_OFFSET 0x300c +#define MAIN2HOST_VEN_MSI_FUNC_NUM_OFFSET 0x3010 +#define MAIN2HOST_VEN_MSI_VFUNC_ACTIVE_OFFSET 0x3014 +#define MAIN2HOST_IPC_OFFSET 0x3400 + +#define IPC_REGISTER_INT_SET BIT(0) +#define IPC_REGISTER_INT_MASK BIT(1) + +enum IPC_BASIC_CMD { + IPC_BASIC_CMD_HOST_INIT = 0x1, + IPC_BASIC_CMD_PING = 0x2, +}; + +enum IPC_BOOT_CMD { + IPC_BOOT_CMD_GET_FIRMWARE = 0x1, +}; + +enum IPC_MESSAGE_CLASS { + IPC_MESSAGE_BASIC = 1, + IPC_MESSAGE_BOOT, + IPC_MESSAGE_CLASS_NUM, +}; + +struct ipc_header { + uint32_t inst_id; + pid_t tgid; + uint32_t i_len; + uint32_t pasid : 20; + uint32_t reserved_1 : 4; + uint32_t pasid_en : 8; + + uint32_t reserved[2]; +}; + +struct ipc_data { + struct ipc_header header; + void *i_ptr; +}; + +struct ipc_msg { + struct ipc_header header; + uint32_t i_data[]; +}; + +struct fw_load { + uint32_t command; + uint32_t result; + uint8_t name[32]; + uint32_t offset; + uint32_t size; +}; + +struct msg_info { + uint32_t host_id; + uint32_t msg_class; + uint32_t flags; + uint32_t reserved[3]; +}; + +struct ipc_layout { + struct ipc_header header; + struct msg_info info; +}; + +struct tsse_msg { + struct list_head list; + struct ipc_msg ipc_payload; +}; + +struct tsse_ipc { + struct device *dev; + struct pci_dev *pdev; + void __iomem *virt_addr; + struct mutex list_lock; + struct tasklet_struct ipc_handle; +}; + +int tsse_ipc_init(struct pci_dev *pdev); +void tsse_ipc_deinit(void *tdev); +bool check_send_enbit(struct tsse_ipc *tsseipc); +void notify_device(struct tsse_ipc *tsseipc); +#endif diff --git a/drivers/crypto/montage/tsse/tsse_irq.c b/drivers/crypto/montage/tsse/tsse_irq.c new file mode 100644 index 000000000000..8cb94fea3da4 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_irq.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#include +#include +#include "tsse_dev.h" +#include "tsse_irq.h" + +#undef TSSE_IRQ_DBG + +int tsse_dev_alloc_irq_vectors(struct tsse_dev *tdev) +{ + int request_num = tdev->num_irqs; + int irq_num = pci_alloc_irq_vectors(tdev->tsse_pci_dev.pci_dev, + request_num, request_num, + PCI_IRQ_MSIX); + + if (irq_num < 0) { + dev_err(TSSEDEV_TO_DEV(tdev), + "%s %d :failed to alloc MSIX interrupt vectors\n", + __func__, __LINE__); + return irq_num; + } + + return 0; +} diff --git a/drivers/crypto/montage/tsse/tsse_irq.h b/drivers/crypto/montage/tsse/tsse_irq.h new file mode 100644 index 000000000000..09bed4e6d58a --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_irq.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_IRQ_H__ +#define __TSSE_IRQ_H__ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include "tsse_dev.h" + +static inline void tsse_dev_free_irq_vectors(struct tsse_dev *tdev) +{ + pci_free_irq_vectors(tdev->tsse_pci_dev.pci_dev); +} + +int tsse_dev_alloc_irq_vectors(struct tsse_dev *tdev); + +#endif diff --git a/drivers/crypto/montage/tsse/tsse_log.h b/drivers/crypto/montage/tsse/tsse_log.h new file mode 100644 index 000000000000..153cbe16374e --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_log.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_LOG_H__ +#define __TSSE_LOG_H__ + +#define tsse_dev_err(tssedev, fmt, ...) \ + dev_err(TSSEDEV_TO_DEV(tssedev), "%s %d: " fmt, __func__, __LINE__, \ + ##__VA_ARGS__) +#define tsse_dev_warn(tssedev, fmt, ...) \ + dev_warn(TSSEDEV_TO_DEV(tssedev), "%s %d: " fmt, __func__, __LINE__, \ + ##__VA_ARGS__) +#define tsse_dev_info(tssedev, fmt, ...) \ + dev_info(TSSEDEV_TO_DEV(tssedev), "%s %d: " fmt, __func__, __LINE__, \ + ##__VA_ARGS__) +#define tsse_dev_dbg(tssedev, fmt, ...) \ + dev_dbg(TSSEDEV_TO_DEV(tssedev), "%s %d: " fmt, __func__, __LINE__, \ + ##__VA_ARGS__) + +#endif diff --git a/drivers/crypto/montage/tsse/tsse_service.c b/drivers/crypto/montage/tsse/tsse_service.c new file mode 100644 index 000000000000..64121a655803 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_service.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ +#include +#include "tsse_ipc.h" +#include "tsse_fw_service.h" + +int service_rout(struct tsse_ipc *tsseipc, struct ipc_msg *msg) +{ + struct msg_info *info; + uint32_t msg_class; + int ret; + + info = (struct msg_info *)msg->i_data; + msg_class = info->msg_class; + switch (msg_class) { + case IPC_MESSAGE_BOOT: + fw_service(tsseipc, msg); + break; + + default: + ret = -EINVAL; + break; + } + return 0; + +} diff --git a/drivers/crypto/montage/tsse/tsse_service.h b/drivers/crypto/montage/tsse/tsse_service.h new file mode 100644 index 000000000000..d5fd87ee7dce --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_service.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_SERVICE_H__ +#define __TSSE_SERVICE_H__ + +#include "tsse_ipc.h" +#include "tsse_fw_service.h" + +int service_rout(struct tsse_ipc *tsseipc, struct ipc_msg *msg); + +#endif diff --git a/drivers/crypto/montage/tsse/tsse_vuart.c b/drivers/crypto/montage/tsse/tsse_vuart.c new file mode 100644 index 000000000000..f49d4ffc9f3c --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_vuart.c @@ -0,0 +1,596 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tsse_dev.h" +#include "tsse_vuart_regs.h" +#include "tsse_vuart.h" + +#ifdef DEBUG +#define VUART_PRINT(fmt, ...) pr_info(fmt, ##__VA_ARGS__) +#else +#define VUART_PRINT(fmt, ...) +#endif + +#define TSSE_VUART_BAUD (38400) +#define TSSE_VUART_MAX_RX_COUNT (256) +#define BOTH_EMPTY (VUART_FSR_TXFIFOE | VUART_FSR_RXFIFO) +struct tsse_vuart { + struct uart_port port; + unsigned int tx_threshold; + unsigned int rx_threshold; + unsigned int tx_loadsz; + unsigned char shutdown; + unsigned char confige_done; +}; + +#define SERIAL_LSR_NAME "tsse_vuart" + +static struct uart_driver g_vuart_reg = { + .owner = THIS_MODULE, + .driver_name = SERIAL_LSR_NAME, + .dev_name = "ttyTSSE", + .nr = TSSE_VUART_MAX_DEV, +}; + +static unsigned int g_trigger_level[4] = { 0, 31, 63, 111 }; +static unsigned long g_line[TSSE_VUART_BITMAP_SIZE]; + +static unsigned int vuart_serial_in(struct uart_port *port, int offset) +{ + unsigned int ret = le32_to_cpu(readl(port->membase + offset)); +#ifdef DEBUG + pr_debug("%s offset 0x%x, v 0x%x\n", __func__, offset, ret); +#endif + return ret; +} + +static void vuart_serial_out(struct uart_port *port, int offset, int value) +{ +#ifdef DEBUG + pr_debug("%s offset 0x%x, v 0x%x\n", __func__, offset, value); +#endif + value = cpu_to_le32(value); + writel(value, port->membase + offset); +} + +static void vuart_wait_for_xmitr(struct uart_port *port) +{ + unsigned int status, tmout = 10000; + + for (;;) { + status = vuart_serial_in(port, VUART_FSR); + if (FIELD_GET(VUART_FSR_TXFIFOE, status)) + break; + if (--tmout == 0) { + pr_err("%s:timeout(10ms), TX is not empty.\n", + __func__); + break; + } + udelay(1); + touch_nmi_watchdog(); + } +} + +static unsigned int vuart_tx_empty(struct uart_port *port) +{ + unsigned long flags; + unsigned int lsr; + + spin_lock_irqsave(&port->lock, flags); + lsr = vuart_serial_in(port, VUART_FSR); + spin_unlock_irqrestore(&port->lock, flags); + + return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0; +} + +static void vuart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ +} + +static unsigned int vuart_get_mctrl(struct uart_port *port) +{ + return 0; +} + +static void vuart_stop_tx(struct uart_port *port) +{ + unsigned int ier; + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + if (!vuart->confige_done) + return; + + ier = vuart_serial_in(port, VUART_IER); + ier &= ~VUART_IER_HETXEI; + vuart_serial_out(port, VUART_IER, ier); +} + +static void vuart_tx_chars(struct uart_port *port) +{ + struct circ_buf *xmit = &port->state->xmit; + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + int count; + + if (port->x_char) { + pr_err("x_char %d\n", port->x_char); + return; + } + + if (uart_tx_stopped(port) || uart_circ_empty(xmit)) { + vuart_stop_tx(port); + return; + } + + count = vuart->tx_loadsz; + do { + vuart_serial_out(port, VUART_TX, xmit->buf[xmit->tail]); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + if (uart_circ_empty(xmit)) + break; + } while (--count > 0); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); +} + +static void vuart_start_tx(struct uart_port *port) +{ + unsigned int ier, fsr; + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + if (!vuart->confige_done) + return; + + if (uart_tx_stopped(port)) { + vuart_stop_tx(port); + return; + } + + fsr = vuart_serial_in(port, VUART_FSR); + VUART_PRINT("==>Existing Data number in TX FIFO %ld\n", + FIELD_GET(VUART_FSR_TFIFODN, fsr)); + VUART_PRINT("==>Existing Data number in RX FIFO %ld\n", + FIELD_GET(VUART_FSR_RFIFODN, fsr)); + if (fsr & VUART_FSR_TXFIFOE) + vuart_tx_chars(port); + ier = vuart_serial_in(port, VUART_IER); + ier |= VUART_IER_HETXEI | VUART_IER_HETXUI; + vuart_serial_out(port, VUART_IER, ier); +} + +static void vuart_throttle(struct uart_port *port) +{ +} + +static void vuart_unthrottle(struct uart_port *port) +{ +} + +static void vuart_stop_rx(struct uart_port *port) +{ + unsigned int ier; + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + if (!vuart->confige_done) + return; + + ier = vuart_serial_in(port, VUART_IER); + ier &= ~(VUART_IER_HERXTOI | VUART_IER_HETXDRI | VUART_IER_HERXOI); + vuart_serial_out(port, VUART_IER, ier); +} + +static void vuart_enable_ms(struct uart_port *port) +{ +} + +static void vuart_break_ctl(struct uart_port *port, int ctl) +{ +} + +static irqreturn_t vuart_interrupt(int irq, void *port) +{ + int handled = 0; + struct uart_port *p = (struct uart_port *)port; + + if (p->handle_irq(p)) + handled = 1; + + return IRQ_RETVAL(handled); +} + +static void vuart_check_config_done(struct uart_port *port) +{ + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + if (vuart_serial_in(port, VUART_CFG) == 1) + vuart->confige_done = 1; +} + +static int vuart_startup(struct uart_port *port) +{ + unsigned int ret, hcr, ier, fcr = 0; + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + if (port->flags & UPF_SHARE_IRQ) + port->irqflags |= IRQF_SHARED; + ret = request_irq(port->irq, vuart_interrupt, port->irqflags, + "tsse_uart", port); + if (ret) + return ret; + + hcr = vuart_serial_in(port, VUART_HCR); + vuart->rx_threshold = FIELD_GET(VUART_HCR_RFIFOT, hcr); + vuart->tx_threshold = FIELD_GET(VUART_HCR_TFIFOT, hcr); + fcr |= FIELD_PREP(VUART_FCR_RFIFOT, vuart->rx_threshold); + fcr |= FIELD_PREP(VUART_FCR_TFIFOT, vuart->tx_threshold); + fcr |= FIELD_PREP(VUART_FCR_TFIFORST, 1); + fcr |= FIELD_PREP(VUART_FCR_RFIFORST, 1); + vuart_serial_out(port, VUART_FCR, fcr); + + vuart->rx_threshold = g_trigger_level[vuart->rx_threshold]; + vuart->tx_threshold = g_trigger_level[vuart->tx_threshold]; + + vuart_check_config_done(port); + ier = vuart_serial_in(port, VUART_IER); + ier |= VUART_IER_CCFGDI | VUART_IER_HETXDRI | VUART_IER_HERXTOI; + vuart_serial_out(port, VUART_IER, ier); + + vuart_serial_out(port, VUART_SCR, FIELD_PREP(VUART_SCR_SCR, 1)); + + vuart->shutdown = 0; + + return 0; +} + +static void vuart_shutdown(struct uart_port *port) +{ + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + vuart->shutdown = 1; + vuart_stop_rx(port); + vuart_stop_tx(port); + free_irq(port->irq, port); + vuart_serial_out(port, VUART_SCR, 0); +} + +static void vuart_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + unsigned int baud; + unsigned long flags; + + if ((termios->c_cflag & CSIZE) != CS8) + pr_err("Warning:termios is not CS8.\n"); + + baud = uart_get_baud_rate(port, termios, old, 0, TSSE_VUART_BAUD); + + spin_lock_irqsave(&port->lock, flags); + uart_update_timeout(port, termios->c_cflag, baud); + + port->read_status_mask = + VUART_FSR_TXFIFOE | VUART_FSR_TXOE | VUART_FSR_RXDR; + if (termios->c_iflag & INPCK) + port->read_status_mask |= VUART_FSR_RXUE; + + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= VUART_FSR_RXUE; + if (termios->c_iflag & (IGNBRK | IGNPAR)) + port->ignore_status_mask |= VUART_FSR_TXFIFOE; + + if ((termios->c_cflag & CREAD) == 0) { + port->ignore_status_mask |= VUART_FSR_RXDR; + pr_err("Warning:termios is not set CREAD.\n"); + } + + spin_unlock_irqrestore(&port->lock, flags); + + if (tty_termios_baud_rate(termios)) + tty_termios_encode_baud_rate(termios, baud, baud); +} + +static void vuart_set_ldisc(struct uart_port *port, struct ktermios *ktermios) +{ +} + +static void vuart_pm(struct uart_port *port, unsigned int state, + unsigned int oldstate) +{ +} + +static void vuart_release_port(struct uart_port *port) +{ +} + +static int vuart_request_port(struct uart_port *port) +{ + return 0; +} + +static void vuart_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) + port->type = PORT_16550A; +} + +static int vuart_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + if (port->type != PORT_16550A) + return -EINVAL; + return 0; +} + +#ifdef CONFIG_CONSOLE_POLL +static void vuart_poll_put_char(struct uart_port *port, unsigned char c) +{ + unsigned int ier_save; + + ier_save = vuart_serial_in(port, VUART_IER); + vuart_wait_for_xmitr(port); + vuart_serial_out(port, VUART_TX, c); + + vuart_wait_for_xmitr(port); + vuart_serial_out(port, VUART_IER, ier_save); +} + +static int vuart_poll_get_char(struct uart_port *port) +{ + int status; + + status = vuart_serial_in(port, VUART_FSR); + if (!FIELD_GET(VUART_FSR_RXDR, status)) + return NO_POLL_CHAR; + + return vuart_serial_in(port, VUART_RX); +} + +#endif + +static const char *vuart_type(struct uart_port *port) +{ + return "tsse_vuart"; +} + +static const struct uart_ops vuart_ops = { + .tx_empty = vuart_tx_empty, + .set_mctrl = vuart_set_mctrl, + .get_mctrl = vuart_get_mctrl, + .stop_tx = vuart_stop_tx, + .start_tx = vuart_start_tx, + .throttle = vuart_throttle, + .unthrottle = vuart_unthrottle, + .stop_rx = vuart_stop_rx, + .enable_ms = vuart_enable_ms, + .break_ctl = vuart_break_ctl, + .startup = vuart_startup, + .shutdown = vuart_shutdown, + .set_termios = vuart_set_termios, + .set_ldisc = vuart_set_ldisc, + .pm = vuart_pm, + .type = vuart_type, + .release_port = vuart_release_port, + .request_port = vuart_request_port, + .config_port = vuart_config_port, + .verify_port = vuart_verify_port, +#ifdef CONFIG_CONSOLE_POLL + .poll_get_char = vuart_poll_get_char, + .poll_put_char = vuart_poll_put_char, +#endif +}; + +static unsigned int vuart_rx_chars(struct uart_port *port, unsigned int lsr) +{ + int max_count = TSSE_VUART_MAX_RX_COUNT; + unsigned char ch; + struct tty_port *tport = &port->state->port; + + do { + if (lsr & VUART_FSR_RXDR) + ch = vuart_serial_in(port, VUART_RX); + else + ch = 0; + port->icount.rx++; + if (lsr & VUART_FSR_RXUE) { + port->icount.overrun++; + pr_err("income byte underflow, record and clear int.\n"); + vuart_serial_out(port, VUART_IIR, VUART_IIR_RXUE); + } + + if (!uart_prepare_sysrq_char(port, ch)) { + if (tty_insert_flip_char(tport, ch, TTY_NORMAL) == 0) + ++port->icount.buf_overrun; + } + + if (--max_count == 0) + break; + lsr = vuart_serial_in(port, VUART_FSR); + } while (lsr & VUART_FSR_RXDR); + + tty_flip_buffer_push(&port->state->port); + return lsr; +} + +static int vuart_deal_irq(struct uart_port *port, unsigned int iir) +{ + unsigned char status; + unsigned int ier; + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + if (iir & VUART_IIR_CPUCD) + vuart->confige_done = 1; + + status = vuart_serial_in(port, VUART_FSR); + if (port->read_status_mask & VUART_FSR_RXDR) + vuart_rx_chars(port, status); + else + pr_err("read_status_mask not set VUART_FSR_RXDR, ignor rx.\n"); + + ier = vuart_serial_in(port, VUART_IER); + if (!(status & VUART_FSR_TXOE) && (status & VUART_FSR_TXFIFOE) && + (ier & VUART_IER_HETXEI)) + vuart_tx_chars(port); + + return 1; +} + +#ifdef DEBUG +static void vuart_debug_iir(unsigned int iir) +{ + VUART_PRINT("%s called iir %u.\n", __func__, iir); + if (iir & VUART_IIR_TXEI) + pr_err("TX FIFO empty interrupt.\n"); + + if (iir & VUART_IIR_RXTOI) + pr_err("Host RX FIFO character timeout interrupt.\n"); + + if (iir & VUART_IIR_RXDAI) + pr_err("Host RX FIFO data available interrupt.\n"); + + if (iir & VUART_IIR_RXUE) + pr_err("HOST RX FIFO Underflow error.\n"); + + if (iir & VUART_IIR_TXOE) + pr_err("HOST TX FIFO Overrun error.\n"); + + if (iir & VUART_IIR_CPUCD) + pr_err("CPU has finished configuration for virtual UART"); + + if (iir & VUART_IIR_TXFI) + pr_err("Host TX FIFO full interrupt.\n"); +} +#endif + +static int vuart_handle_irq(struct uart_port *port) +{ + unsigned int iir; + unsigned long flags; + int ret; + + iir = vuart_serial_in(port, VUART_IIR); + vuart_serial_out(port, VUART_IIR, iir); +#ifdef DEBUG + vuart_debug_iir(iir); +#endif + spin_lock_irqsave(&port->lock, flags); + ret = vuart_deal_irq(port, iir); + + uart_unlock_and_check_sysrq_irqrestore(port, flags); + + return ret; +} + +static int vuart_get_line(void) +{ + int bit = 0; + + bit = find_first_zero_bit(&g_line[0], TSSE_VUART_MAX_DEV); + if (bit >= TSSE_VUART_MAX_DEV) + return -ENOSPC; + set_bit(bit, &g_line[0]); + return bit; +} + +static void vuart_free_line(int line) +{ + clear_bit(line, &g_line[0]); +} + +int vuart_init_port(struct pci_dev *pdev) +{ + struct tsse_dev *tdev = pci_to_tsse_dev(pdev); + struct tsse_vuart *vuart = NULL; + struct uart_port *p = NULL; + int ret = 0; + int line = vuart_get_line(); + + if (line == -ENOSPC) { + dev_err(&pdev->dev, "device too more, max is 64.\n"); + return -ENOMEM; + } + + vuart = kzalloc_node(sizeof(struct tsse_vuart), GFP_KERNEL, + dev_to_node(&pdev->dev)); + if (!vuart) { + ret = -ENOMEM; + goto zalloc_fail; + } + vuart->shutdown = 1; + p = &(vuart->port); + p->mapbase = 0; + p->mapsize = 0; + p->membase = TSSE_DEV_BARS(tdev)[2].virt_addr + RLS_VUART_OFFSET; + p->irq = pci_irq_vector(pdev, RLS_VUART_IRQ_NUM); + p->handle_irq = vuart_handle_irq; + spin_lock_init(&p->lock); + p->line = line; + p->type = PORT_16550A; + p->uartclk = TSSE_VUART_BAUD * 16; + p->iotype = UPIO_MEM; + p->ops = &vuart_ops; + p->fifosize = 128; + vuart->tx_loadsz = 128; + p->flags = UPF_BOOT_AUTOCONF | UPF_FIXED_TYPE | UPF_FIXED_PORT | + UPF_SHARE_IRQ; + p->dev = &pdev->dev; + p->private_data = tdev; + + tdev->port = (struct uart_port *)vuart; + ret = uart_add_one_port(&g_vuart_reg, p); + if (ret != 0) { + dev_err(&pdev->dev, "add port fialed.[%d]\n", ret); + goto add_port_fail; + } + return 0; +add_port_fail: + kfree(vuart); +zalloc_fail: + vuart_free_line(line); + + return ret; +} + +void vuart_uninit_port(struct pci_dev *pdev) +{ + struct tsse_dev *tdev = pci_to_tsse_dev(pdev); + struct tsse_vuart *vuart = (struct tsse_vuart *)(tdev->port); + + if (tdev->port) { + if (!vuart->shutdown) + free_irq(tdev->port->irq, tdev->port); + vuart_free_line(tdev->port->line); + uart_remove_one_port(&g_vuart_reg, tdev->port); + kfree(vuart); + } +} + +int vuart_register(void) +{ + return uart_register_driver(&g_vuart_reg); +} + +void vuart_unregister(void) +{ + uart_unregister_driver(&g_vuart_reg); +} diff --git a/drivers/crypto/montage/tsse/tsse_vuart.h b/drivers/crypto/montage/tsse/tsse_vuart.h new file mode 100644 index 000000000000..1ed43368751a --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_vuart.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_VUART_H__ +#define __TSSE_VUART_H__ + +#include + +#define RLS_VUART_OFFSET (0x680000) +#define RLS_VUART_IRQ_NUM (10) +#define TSSE_VUART_MAX_DEV (64) +#define TSSE_VUART_BITMAP_SIZE (ALIGN(TSSE_VUART_MAX_DEV, 64) / 64) + +int vuart_register(void); +void vuart_unregister(void); +int vuart_init_port(struct pci_dev *pdev); +void vuart_uninit_port(struct pci_dev *pdev); + +#endif diff --git a/drivers/crypto/montage/tsse/tsse_vuart_regs.h b/drivers/crypto/montage/tsse/tsse_vuart_regs.h new file mode 100644 index 000000000000..26fa62f5014a --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_vuart_regs.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_VUART_REGS_H__ +#define __TSSE_VUART_REGS_H__ + +#include +#include + +#define VUART_ID 0x0 +#define VUART_ID_MASK GENMASK(31, 0) + +#define VUART_HCR 0x10 +#define VUART_HCR_RFIFOT GENMASK(3, 2) +#define VUART_HCR_TFIFOT GENMASK(5, 4) + +#define INTRID_NONE BIT(0) +#define INTRID_CPU_LSR (BIT(2) | BIT(1)) +#define INTRID_TRIGGER_LEVEL BIT(2) +#define INTRID_RX_TIMEOUT (BIT(2) | BIT(3)) +#define INTRID_TX_EMPTY BIT(1) + +#define VUART_IIR 0x28 +#define VUART_IIR_TXEI GENMASK(0, 0) +#define VUART_IIR_RXTOI GENMASK(1, 1) +#define VUART_IIR_RXDAI GENMASK(2, 2) +#define VUART_IIR_CPUCD GENMASK(3, 3) +#define VUART_IIR_TXFI GENMASK(4, 4) +#define VUART_IIR_RXUE GENMASK(5, 5) +#define VUART_IIR_TXOE GENMASK(6, 6) + +#define VUART_FCR 0x30 +#define VUART_FCR_TFIFORST GENMASK(0, 0) +#define VUART_FCR_RFIFORST GENMASK(1, 1) +#define VUART_FCR_RFIFOT GENMASK(3, 2) +#define VUART_FCR_TFIFOT GENMASK(5, 4) + +#define VUART_FSR 0x34 +#define VUART_FSR_TXDR GENMASK(0, 0) +#define VUART_FSR_RXDR GENMASK(1, 1) +#define VUART_FSR_RXFIFO GENMASK(2, 2) +#define VUART_FSR_TXFIFOE GENMASK(3, 3) +#define VUART_FSR_RXFIFOF GENMASK(4, 4) +#define VUART_FSR_TXFIFOF GENMASK(5, 5) +#define VUART_FSR_TFIFODN GENMASK(13, 6) +#define VUART_FSR_RFIFODN GENMASK(21, 14) +#define VUART_FSR_TXOE GENMASK(23, 23) +#define VUART_FSR_RXUE GENMASK(24, 24) + +#define VUART_SCR 0x3c +#define VUART_SCR_SCR GENMASK(7, 0) + +#define VUART_TX 0x40 +#define VUART_RX 0x40 + +#define VUART_IER 0x48 +#define VUART_IER_HETXEI GENMASK(0, 0) +#define VUART_IER_HERXTOI GENMASK(1, 1) +#define VUART_IER_HETXDRI GENMASK(2, 2) +#define VUART_IER_CCFGDI GENMASK(3, 3) +#define VUART_IER_HETXFI GENMASK(4, 4) +#define VUART_IER_HETXUI GENMASK(5, 5) +#define VUART_IER_HERXOI GENMASK(6, 6) + +#define VUART_CFG 0x4c +#define VUART_CFG_CCFGD GENMASK(0, 0) + +#endif -- Gitee From c8db9164c310b9c788fa62226e4d9518adc688ec Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Thu, 28 Dec 2023 15:14:10 +0800 Subject: [PATCH 0117/2138] anolis: x86/mce: Add Centaur MCA support ANBZ: #7809 Add MCA support for some Zhaoxin CPUs which use X86_VENDOR_CENTAUR as vendor ID. Signed-off-by: leoliu-oc Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2676 --- arch/x86/kernel/cpu/mce/core.c | 39 ++++++++++++--------------------- arch/x86/kernel/cpu/mce/intel.c | 3 ++- 2 files changed, 16 insertions(+), 26 deletions(-) diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index e103c227acd3..2cafc35f3b7b 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -482,7 +482,8 @@ int mce_usable_address(struct mce *m) /* Checks after this one are Intel/Zhaoxin-specific: */ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL && - boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN) + boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR && + boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN) return 1; if (!(m->status & MCI_STATUS_MISCV)) @@ -506,6 +507,7 @@ bool mce_is_memory_error(struct mce *m) return amd_mce_is_memory_error(m); case X86_VENDOR_INTEL: + case X86_VENDOR_CENTAUR: case X86_VENDOR_ZHAOXIN: /* * Intel SDM Volume 3B - 15.9.2 Compound Error Codes @@ -1231,7 +1233,8 @@ static noinstr bool mce_check_crashing_cpu(void) mcgstatus = __rdmsr(MSR_IA32_MCG_STATUS); - if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) { + if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) { if (mcgstatus & MCG_STATUS_LMCES) return false; } @@ -1505,7 +1508,8 @@ noinstr void do_machine_check(struct pt_regs *regs) * on Intel, Zhaoxin only. */ if (m.cpuvendor == X86_VENDOR_INTEL || - m.cpuvendor == X86_VENDOR_ZHAOXIN) + m.cpuvendor == X86_VENDOR_CENTAUR || + m.cpuvendor == X86_VENDOR_ZHAOXIN) lmce = m.mcgstatus & MCG_STATUS_LMCES; /* @@ -1932,7 +1936,8 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) mce_flags.skx_repmov_quirk = 1; } - if (c->x86_vendor == X86_VENDOR_ZHAOXIN) { + if (c->x86_vendor == X86_VENDOR_CENTAUR || + c->x86_vendor == X86_VENDOR_ZHAOXIN) { /* * All newer Zhaoxin CPUs support MCE broadcasting. Enable * synchronization with a one second timeout. @@ -1985,21 +1990,6 @@ static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c) } } -static void mce_centaur_feature_init(struct cpuinfo_x86 *c) -{ - struct mca_config *cfg = &mca_cfg; - - /* - * All newer Centaur CPUs support MCE broadcasting. Enable - * synchronization with a one second timeout. - */ - if ((c->x86 == 6 && c->x86_model == 0xf && c->x86_stepping >= 0xe) || - c->x86 > 6) { - if (cfg->monarch_timeout < 0) - cfg->monarch_timeout = USEC_PER_SEC; - } -} - static void mce_zhaoxin_feature_init(struct cpuinfo_x86 *c) { struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); @@ -2047,9 +2037,6 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) break; case X86_VENDOR_CENTAUR: - mce_centaur_feature_init(c); - break; - case X86_VENDOR_ZHAOXIN: mce_zhaoxin_feature_init(c); break; @@ -2066,6 +2053,7 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c) mce_intel_feature_clear(c); break; + case X86_VENDOR_CENTAUR: case X86_VENDOR_ZHAOXIN: mce_zhaoxin_feature_clear(c); break; @@ -2349,9 +2337,10 @@ static void vendor_disable_error_reporting(void) * controller (iMC), etc. */ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL || - boot_cpu_data.x86_vendor == X86_VENDOR_HYGON || - boot_cpu_data.x86_vendor == X86_VENDOR_AMD || - boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON || + boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) return; mce_disable_error_reporting(); diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c index f5323551c1a9..e013dd5162fc 100644 --- a/arch/x86/kernel/cpu/mce/intel.c +++ b/arch/x86/kernel/cpu/mce/intel.c @@ -93,7 +93,8 @@ static int cmci_supported(int *banks) * makes sure none of the backdoors are entered otherwise. */ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL && - boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN) + boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR && + boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN) return 0; if (!boot_cpu_has(X86_FEATURE_APIC) || lapic_get_maxlvt() < 6) -- Gitee From 018ada23ad13bbf5c8f48fc3d254dc246fc5316f Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Tue, 2 Jan 2024 19:30:57 +0800 Subject: [PATCH 0118/2138] anolis: x86/cpufeatures: Add Zhaoxin feature bits ANBZ: #7809 Add Zhaoxin feature bits on Zhaoxin CPUs. Signed-off-by: leoliu-oc Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2675 --- arch/x86/include/asm/cpufeatures.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 55d18eef6775..3e210b517953 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -156,6 +156,23 @@ #define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */ #define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */ #define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */ +#define X86_FEATURE_ZX_FMA (5*32+15) /* FMA supported */ +#define X86_FEATURE_PARALLAX (5*32+16) /* Adaptive P-state control present */ +#define X86_FEATURE_PARALLAX_EN (5*32+17) /* Adaptive P-state control enabled */ +#define X86_FEATURE_OVERSTRESS (5*32+18) /* Overstress Feature for auto overclock present */ +#define X86_FEATURE_OVERSTRESS_EN (5*32+19) /* Overstress Feature for auto overclock enabled */ +#define X86_FEATURE_TM3 (5*32+20) /* Thermal Monitor 3 present */ +#define X86_FEATURE_TM3_EN (5*32+21) /* Thermal Monitor 3 enabled */ +#define X86_FEATURE_RNG2 (5*32+22) /* 2nd generation of RNG present */ +#define X86_FEATURE_RNG2_EN (5*32+23) /* 2nd generation of RNG enabled */ +#define X86_FEATURE_SEM (5*32+24) /* SME feature present */ +#define X86_FEATURE_PHE2 (5*32+25) /* SHA384 and SHA 512 present */ +#define X86_FEATURE_PHE2_EN (5*32+26) /* SHA384 and SHA 512 enabled */ +#define X86_FEATURE_XMODX (5*32+27) /* "rsa" XMODEXP and MONTMUL2 are present */ +#define X86_FEATURE_XMODX_EN (5*32+28) /* "rsa_en" XMODEXP and MONTMUL2 are enabled */ +#define X86_FEATURE_VEX (5*32+29) /* VEX instructions are present */ +#define X86_FEATURE_VEX_EN (5*32+30) /* VEX instructions are enabled */ +#define X86_FEATURE_STK (5*32+31) /* STK are present */ /* More extended AMD flags: CPUID level 0x80000001, ECX, word 6 */ #define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */ -- Gitee From 4677698723f6f1b127a030778a47729e092ec9c9 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Thu, 28 Dec 2023 15:11:35 +0800 Subject: [PATCH 0119/2138] anolis: x86/cpu: Add detect extended topology for Zhaoxin CPUs ANBZ: #7809 Detect the extended topology information of Zhaoxin CPUs if available. Signed-off-by: leoliu-oc Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2675 --- arch/x86/kernel/cpu/centaur.c | 8 +++++++- arch/x86/kernel/cpu/zhaoxin.c | 7 ++++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 345f7d905db6..a5c01c8f8824 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -109,6 +109,9 @@ static void early_init_centaur(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); } + + if (detect_extended_topology_early(c) < 0) + detect_ht_early(c); } static void init_centaur(struct cpuinfo_x86 *c) @@ -127,11 +130,14 @@ static void init_centaur(struct cpuinfo_x86 *c) clear_cpu_cap(c, 0*32+31); #endif early_init_centaur(c); + detect_extended_topology(c); init_intel_cacheinfo(c); - detect_num_cpu_cores(c); + if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { + detect_num_cpu_cores(c); #ifdef CONFIG_X86_32 detect_ht(c); #endif + } if (c->cpuid_level > 9) { unsigned int eax = cpuid_eax(10); diff --git a/arch/x86/kernel/cpu/zhaoxin.c b/arch/x86/kernel/cpu/zhaoxin.c index 05fa4ef63490..2126b10de796 100644 --- a/arch/x86/kernel/cpu/zhaoxin.c +++ b/arch/x86/kernel/cpu/zhaoxin.c @@ -79,16 +79,21 @@ static void early_init_zhaoxin(struct cpuinfo_x86 *c) c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff); } + if (detect_extended_topology_early(c) < 0) + detect_ht_early(c); } static void init_zhaoxin(struct cpuinfo_x86 *c) { early_init_zhaoxin(c); + detect_extended_topology(c); init_intel_cacheinfo(c); - detect_num_cpu_cores(c); + if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { + detect_num_cpu_cores(c); #ifdef CONFIG_X86_32 detect_ht(c); #endif + } if (c->cpuid_level > 9) { unsigned int eax = cpuid_eax(10); -- Gitee From 0d009f8d4bd141062b6a8abef8781ad52f8e3b43 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Thu, 14 Dec 2023 18:56:22 +0200 Subject: [PATCH 0120/2138] intel_idle: add Sierra Forest SoC support ANBZ: #8417 commit 92813fd5b1562e547120c8489137b040892fe1bc upstream. Add Sierra Forest SoC C-states, which are C1, C1E, C6S, and C6SP. Sierra Forest SoC is built with modules, each module includes 4 cores (Crestmont microarchitecture). There is one L2 cache per module, shared between the 4 cores. There is no core C6 state, but there is C6S state, which has module scope: when all 4 cores request C6S, the entire module (4 cores + L2 cache) enters the low power state. C6SP state has package scope - when all modules in the package enter C6S, the package enters the power state mode. Intel-SIG: commit 92813fd5b156 intel_idle: add Sierra Forest SoC support. Backport Intel_idle Sierra Forest SoC support. Signed-off-by: Artem Bityutskiy Signed-off-by: Rafael J. Wysocki [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Xuchun Shang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2808 --- drivers/idle/intel_idle.c | 44 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 45500d2d5b4b..670a041eb910 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -1237,6 +1237,43 @@ static struct cpuidle_state snr_cstates[] __initdata = { .enter = NULL } }; +static struct cpuidle_state srf_cstates[] __initdata = { + { + .name = "C1", + .desc = "MWAIT 0x00", + .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_ALWAYS_ENABLE, + .exit_latency = 1, + .target_residency = 1, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C1E", + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, + .exit_latency = 2, + .target_residency = 10, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6S", + .desc = "MWAIT 0x22", + .flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 270, + .target_residency = 700, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6SP", + .desc = "MWAIT 0x23", + .flags = MWAIT2flg(0x23) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 310, + .target_residency = 900, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .enter = NULL } +}; + static const struct idle_cpu idle_cpu_nehalem __initconst = { .state_table = nehalem_cstates, .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE, @@ -1382,6 +1419,12 @@ static const struct idle_cpu idle_cpu_snr __initconst = { .use_acpi = true, }; +static const struct idle_cpu idle_cpu_srf __initconst = { + .state_table = srf_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct x86_cpu_id intel_idle_ids[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &idle_cpu_nhx), X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &idle_cpu_nehalem), @@ -1427,6 +1470,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &idle_cpu_bxt), X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &idle_cpu_dnv), X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &idle_cpu_snr), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, &idle_cpu_srf), {} }; -- Gitee From ab84b1d5768f72b841c7d9a6712701900a9a9587 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Fri, 23 Feb 2024 16:04:55 +0800 Subject: [PATCH 0121/2138] anolis: ext4: remove projid limit when create hard link ANBZ: #8366 This is a temporary workaround to avoid the limitation when creating hard link cross two projids. Signed-off-by: zhangliguang Signed-off-by: Joseph Qi Reviewed-by: Gao Xiang Link: https://gitee.com/anolis/cloud-kernel/pulls/2795 --- Documentation/admin-guide/sysctl/fs.rst | 9 +++++++++ fs/ext4/namei.c | 5 ++++- fs/namei.c | 13 +++++++++++++ 3 files changed, 26 insertions(+), 1 deletion(-) diff --git a/Documentation/admin-guide/sysctl/fs.rst b/Documentation/admin-guide/sysctl/fs.rst index a321b84eccaa..11b2dd4ef5ae 100644 --- a/Documentation/admin-guide/sysctl/fs.rst +++ b/Documentation/admin-guide/sysctl/fs.rst @@ -205,6 +205,15 @@ already own the source file, or do not have read/write access to it. This protection is based on the restrictions in Openwall and grsecurity. +hardlink_cross_projid +--------------------- + +This is a temporary workaround plan to avoid the limitation when creating +hard link cross two projids. When set to "0", hardlink creation cross +two projids is restricted. When set to "1" hardlinks can be created +cross two projids. + + protected_regular ----------------- diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 96a048d3f51b..3839b6057027 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -50,6 +50,8 @@ #define NAMEI_RA_BLOCKS 4 #define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS) +extern int sysctl_hardlink_cross_projid __read_mostly; + static struct buffer_head *ext4_append(handle_t *handle, struct inode *inode, ext4_lblk_t *block) @@ -3553,7 +3555,8 @@ static int ext4_link(struct dentry *old_dentry, if (err) return err; - if ((ext4_test_inode_flag(dir, EXT4_INODE_PROJINHERIT)) && + if (!sysctl_hardlink_cross_projid && + (ext4_test_inode_flag(dir, EXT4_INODE_PROJINHERIT)) && (!projid_eq(EXT4_I(dir)->i_projid, EXT4_I(old_dentry->d_inode)->i_projid))) return -EXDEV; diff --git a/fs/namei.c b/fs/namei.c index beffbb02a24e..ef117f0adce7 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1033,6 +1033,9 @@ static int sysctl_protected_hardlinks __read_mostly; static int sysctl_protected_fifos __read_mostly; static int sysctl_protected_regular __read_mostly; +int sysctl_hardlink_cross_projid __read_mostly; +EXPORT_SYMBOL_GPL(sysctl_hardlink_cross_projid); + #ifdef CONFIG_SYSCTL static struct ctl_table namei_sysctls[] = { { @@ -1071,6 +1074,16 @@ static struct ctl_table namei_sysctls[] = { .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_TWO, }, + { + .procname = "hardlink_cross_projid", + .data = &sysctl_hardlink_cross_projid, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + + }, { } }; -- Gitee From 3c616f10e903b1df904087272e1d860f220e7260 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Wed, 7 Mar 2018 17:12:11 +0800 Subject: [PATCH 0122/2138] anolis: jbd2: create jbd2-ckpt thread for journal checkpoint ANBZ: #8366 This is trying to do jbd2 checkpoint in a specific kernel thread, then checkpoint won't be under io throttle control. Signed-off-by: Joseph Qi Signed-off-by: Jiufei Xue Reviewed-by: Gao Xiang Link: https://gitee.com/anolis/cloud-kernel/pulls/2795 --- fs/ext4/super.c | 2 ++ fs/jbd2/checkpoint.c | 13 +++++++++- fs/jbd2/journal.c | 57 +++++++++++++++++++++++++++++++++++++++++++- include/linux/jbd2.h | 17 +++++++++++++ 4 files changed, 87 insertions(+), 2 deletions(-) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 71ced0ada9a2..58b3e5a9c832 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -4976,6 +4976,7 @@ static int ext4_load_and_init_journal(struct super_block *sb, } set_task_ioprio(sbi->s_journal->j_task, ctx->journal_ioprio); + set_task_ioprio(sbi->s_journal->j_checkpoint_task, ctx->journal_ioprio); sbi->s_journal->j_submit_inode_data_buffers = ext4_journal_submit_inode_data_buffers; @@ -6559,6 +6560,7 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb) if (sbi->s_journal) { ext4_init_journal_params(sb, sbi->s_journal); set_task_ioprio(sbi->s_journal->j_task, ctx->journal_ioprio); + set_task_ioprio(sbi->s_journal->j_checkpoint_task, ctx->journal_ioprio); } /* Flush outstanding errors before changing fs state */ diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c index 8fda66c98a61..64965c4d4e43 100644 --- a/fs/jbd2/checkpoint.c +++ b/fs/jbd2/checkpoint.c @@ -88,7 +88,18 @@ __releases(&journal->j_state_lock) spin_unlock(&journal->j_list_lock); write_unlock(&journal->j_state_lock); if (chkpt) { - jbd2_log_do_checkpoint(journal); + DEFINE_WAIT(wait); + + prepare_to_wait( + &journal->j_wait_done_checkpoint, &wait, + TASK_UNINTERRUPTIBLE); + mutex_unlock(&journal->j_checkpoint_mutex); + wake_up(&journal->j_wait_checkpoint); + schedule(); + mutex_lock(&journal->j_checkpoint_mutex); + finish_wait(&journal->j_wait_done_checkpoint, + &wait); + jbd2_debug(1, "wake up checkpoint thread.\n"); } else if (jbd2_cleanup_journal_tail(journal) <= 0) { /* * We were able to recover space or the diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index dfbb8f73861f..94577fe217bd 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -191,6 +191,9 @@ static int kjournald2(void *arg) if (journal->j_flags & JBD2_UNMOUNT) goto end_loop; + if (kthread_should_stop()) + goto end_loop; + jbd2_debug(1, "commit_sequence=%u, commit_request=%u\n", journal->j_commit_sequence, journal->j_commit_request); @@ -261,9 +264,40 @@ static int kjournald2(void *arg) return 0; } +static int jbd2_checkpoint_thread(void *arg) +{ + journal_t *journal = arg; + DEFINE_WAIT(wait); + + jbd2_debug(1, "%s\n", __func__); + journal->j_checkpoint_task = current; + +loop: + prepare_to_wait(&journal->j_wait_checkpoint, &wait, + TASK_INTERRUPTIBLE); + wake_up_all(&journal->j_wait_done_checkpoint); + schedule(); + finish_wait(&journal->j_wait_checkpoint, &wait); + + if (journal->j_flags & JBD2_UNMOUNT) + goto end_loop; + + mutex_lock(&journal->j_checkpoint_mutex); + jbd2_log_do_checkpoint(journal); + mutex_unlock(&journal->j_checkpoint_mutex); + + goto loop; + +end_loop: + journal->j_checkpoint_task = NULL; + wake_up_all(&journal->j_wait_done_checkpoint); + jbd2_debug(1, "%s exiting.\n", __func__); + return 0; +} + static int jbd2_journal_start_thread(journal_t *journal) { - struct task_struct *t; + struct task_struct *t, *t_ckpt; t = kthread_run(kjournald2, journal, "jbd2/%s", journal->j_devname); @@ -271,6 +305,17 @@ static int jbd2_journal_start_thread(journal_t *journal) return PTR_ERR(t); wait_event(journal->j_wait_done_commit, journal->j_task != NULL); + + t_ckpt = kthread_run(jbd2_checkpoint_thread, journal, "jbd2-ckpt/%s", + journal->j_devname); + if (IS_ERR(t_ckpt)) { + kthread_stop(t); + return PTR_ERR(t_ckpt); + } + + wait_event(journal->j_wait_done_checkpoint, + journal->j_checkpoint_task != NULL); + return 0; } @@ -286,6 +331,14 @@ static void journal_kill_thread(journal_t *journal) write_lock(&journal->j_state_lock); } write_unlock(&journal->j_state_lock); + + while (journal->j_checkpoint_task) { + mutex_lock(&journal->j_checkpoint_mutex); + wake_up(&journal->j_wait_checkpoint); + wait_event(journal->j_wait_done_checkpoint, + journal->j_checkpoint_task == NULL); + mutex_unlock(&journal->j_checkpoint_mutex); + } } /* @@ -1584,6 +1637,8 @@ static journal_t *journal_init_common(struct block_device *bdev, init_waitqueue_head(&journal->j_wait_transaction_locked); init_waitqueue_head(&journal->j_wait_done_commit); + init_waitqueue_head(&journal->j_wait_checkpoint); + init_waitqueue_head(&journal->j_wait_done_checkpoint); init_waitqueue_head(&journal->j_wait_commit); init_waitqueue_head(&journal->j_wait_updates); init_waitqueue_head(&journal->j_wait_reserved); diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index f0bc9aa5aed3..b54db4a9b214 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -844,6 +844,16 @@ struct journal_s */ wait_queue_head_t j_wait_commit; + /** + * @j_wait_done_checkpoint: Wait queue for waiting for checkpoint to complete. + */ + wait_queue_head_t j_wait_done_checkpoint; + + /** + * @j_wait_checkpoint: Wait queue to trigger checkpointing. + */ + wait_queue_head_t j_wait_checkpoint; + /** * @j_wait_updates: Wait queue to wait for updates to complete. */ @@ -1207,6 +1217,13 @@ struct journal_s int (*j_finish_inode_data_buffers) (struct jbd2_inode *); + /** + * @j_checkpoint_task: + * + * Pointer to the current checkpoint thread for this journal. + */ + struct task_struct *j_checkpoint_task; + /* * Journal statistics */ -- Gitee From 183e8ad2c8b3e8e9b838641b81705460918671c9 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Mon, 26 Feb 2024 14:53:30 +0800 Subject: [PATCH 0123/2138] anolis: jbd2: add new "stats" proc file ANBZ: #8366 /proc/fs/jbd2/${device}/info only shows whole average statistical info about jbd2's life cycle, but it can not show jbd2 info in specified time interval and sometimes this capability is very useful for trouble shooting. For example, we can not see how rs_locked and rs_flushing grows in specified time interval, but these two indexes can explain some reasons for app's behaviours. Here we add a new "stats" proc file like /proc/diskstats, then we can implement a simple tool jbd2_stats which'll display detailed jbd2 info in specified time interval. Like below(time interval 5s): [lege@localhost ~]$ cat /proc/fs/jbd2/vdb1-8/stats 51 30 8192 0 1 241616 0 0 22 0 47158 891 942 1000 1000 [lege@localhost ~]$ gcc -o jbd2_stat jbd2_stat.c ; ./jbd2_stat Device tid trans handles locked flushing logging vdb1-8 1861 158 359 13.00 0.00 2.00 Device tid trans handles locked flushing logging vdb1-8 1974 113 389 26.00 0.00 5.00 Device tid trans handles locked flushing logging vdb1-8 2188 214 308 10.00 0.00 7.00 Device tid trans handles locked flushing logging vdb1-8 2344 156 332 19.00 0.00 4.00 Signed-off-by: Xiaoguang Wang Signed-off-by: Joseph Qi Reviewed-by: Gao Xiang Link: https://gitee.com/anolis/cloud-kernel/pulls/2795 --- fs/jbd2/journal.c | 103 +++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 98 insertions(+), 5 deletions(-) diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 94577fe217bd..925edbf63c39 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -1257,25 +1257,78 @@ static const struct seq_operations jbd2_seq_info_ops = { .show = jbd2_seq_info_show, }; -static int jbd2_seq_info_open(struct inode *inode, struct file *file) +static void *jbd2_seq_stats_start(struct seq_file *seq, loff_t *pos) +{ + return *pos ? NULL : SEQ_START_TOKEN; +} + +static void *jbd2_seq_stats_next(struct seq_file *seq, void *v, loff_t *pos) +{ + (*pos)++; + return NULL; +} + +static int jbd2_seq_stats_show(struct seq_file *seq, void *v) +{ + struct jbd2_stats_proc_session *s = seq->private; + + if (v != SEQ_START_TOKEN) + return 0; + + seq_printf(seq, "%lu %lu %d %lu %lu %lu %lu %lu %lu %llu %u %u %u %d %d\n", + s->stats->ts_tid, s->stats->ts_requested, + s->journal->j_max_transaction_buffers, s->stats->run.rs_wait, + s->stats->run.rs_request_delay, s->stats->run.rs_running, + s->stats->run.rs_locked, s->stats->run.rs_flushing, + s->stats->run.rs_logging, + div_u64(s->journal->j_average_commit_time, NSEC_PER_MSEC), + s->stats->run.rs_handle_count, s->stats->run.rs_blocks, + s->stats->run.rs_blocks_logged, HZ, jiffies_to_msecs(HZ)); + return 0; +} + +static void jbd2_seq_stats_stop(struct seq_file *seq, void *v) +{ +} + +static const struct seq_operations jbd2_seq_stats_ops = { + .start = jbd2_seq_stats_start, + .next = jbd2_seq_stats_next, + .stop = jbd2_seq_stats_stop, + .show = jbd2_seq_stats_show, +}; + +static struct jbd2_stats_proc_session *__jbd2_seq_open(struct inode *inode, + struct file *file) { journal_t *journal = pde_data(inode); struct jbd2_stats_proc_session *s; - int rc, size; + int size; s = kmalloc(sizeof(*s), GFP_KERNEL); if (s == NULL) - return -ENOMEM; + return ERR_PTR(-ENOMEM); size = sizeof(struct transaction_stats_s); s->stats = kmalloc(size, GFP_KERNEL); if (s->stats == NULL) { kfree(s); - return -ENOMEM; + return ERR_PTR(-ENOMEM); } spin_lock(&journal->j_history_lock); memcpy(s->stats, &journal->j_stats, size); s->journal = journal; spin_unlock(&journal->j_history_lock); + return s; +} + +static int jbd2_seq_info_open(struct inode *inode, struct file *file) +{ + struct jbd2_stats_proc_session *s; + int rc; + + s = __jbd2_seq_open(inode, file); + if (IS_ERR(s)) + return PTR_ERR(s); rc = seq_open(file, &jbd2_seq_info_ops); if (rc == 0) { @@ -1286,7 +1339,6 @@ static int jbd2_seq_info_open(struct inode *inode, struct file *file) kfree(s); } return rc; - } static int jbd2_seq_info_release(struct inode *inode, struct file *file) @@ -1305,6 +1357,44 @@ static const struct proc_ops jbd2_info_proc_ops = { .proc_release = jbd2_seq_info_release, }; +static int jbd2_seq_stats_open(struct inode *inode, struct file *file) +{ + struct jbd2_stats_proc_session *s; + int rc; + + s = __jbd2_seq_open(inode, file); + if (IS_ERR(s)) + return PTR_ERR(s); + + rc = seq_open(file, &jbd2_seq_stats_ops); + if (rc == 0) { + struct seq_file *m = file->private_data; + + m->private = s; + } else { + kfree(s->stats); + kfree(s); + } + return rc; +} + +static int jbd2_seq_stats_release(struct inode *inode, struct file *file) +{ + struct seq_file *seq = file->private_data; + struct jbd2_stats_proc_session *s = seq->private; + + kfree(s->stats); + kfree(s); + return seq_release(inode, file); +} + +static const struct proc_ops jbd2_stats_proc_ops = { + .proc_open = jbd2_seq_stats_open, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_release = jbd2_seq_stats_release, +}; + static struct proc_dir_entry *proc_jbd2_stats; static void jbd2_stats_proc_init(journal_t *journal) @@ -1313,12 +1403,15 @@ static void jbd2_stats_proc_init(journal_t *journal) if (journal->j_proc_entry) { proc_create_data("info", S_IRUGO, journal->j_proc_entry, &jbd2_info_proc_ops, journal); + proc_create_data("stats", 0444, journal->j_proc_entry, + &jbd2_stats_proc_ops, journal); } } static void jbd2_stats_proc_exit(journal_t *journal) { remove_proc_entry("info", journal->j_proc_entry); + remove_proc_entry("stats", journal->j_proc_entry); remove_proc_entry(journal->j_devname, proc_jbd2_stats); } -- Gitee From 2069f58fc54836de18de70d643d7441665ec4085 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Mon, 26 Feb 2024 15:01:48 +0800 Subject: [PATCH 0124/2138] anolis: jbd2: add proc entry to control whether doing buffer copy-out ANBZ: #8366 When jbd2 tries to get write access to one buffer, and if this buffer is under writeback with BH_Shadow flag, jbd2 will wait until this buffer has been written to disk, but sometimes the time taken to wait may be much long, especially disk capacity is almost full. Here add a proc entry "force-copy", if its value is not zero, jbd2 will always do meta buffer copy-cout, then we can eliminate the unnecessary waiting time here, and reduce long tail latency for buffered-write. I construct such test case below: $cat offline.fio ; fio-rand-RW.job for fiotest [global] name=fio-rand-RW filename=fio-rand-RW rw=randrw rwmixread=60 rwmixwrite=40 bs=4K direct=0 numjobs=4 time_based=1 runtime=900 [file1] size=60G ioengine=sync iodepth=16 $cat online.fio ; fio-seq-write.job for fiotest [global] name=fio-seq-write filename=fio-seq-write rw=write bs=256K direct=0 numjobs=1 time_based=1 runtime=60 [file1] rate=50m size=10G ioengine=sync iodepth=16 With this patch: $cat /proc/fs/jbd2/sda5-8/force_copy 0 online fio almost always get such long tail latency: Jobs: 1 (f=1), 0B/s-0B/s: [W(1)][100.0%][w=50.0MiB/s][w=200 IOPS][eta 00m:00s] file1: (groupid=0, jobs=1): err= 0: pid=17855: Thu Nov 15 09:45:57 2018 write: IOPS=200, BW=50.0MiB/s (52.4MB/s)(3000MiB/60001msec) clat (usec): min=135, max=4086.6k, avg=867.21, stdev=50338.22 lat (usec): min=139, max=4086.6k, avg=871.16, stdev=50338.22 clat percentiles (usec): | 1.00th=[ 141], 5.00th=[ 143], 10.00th=[ 145], | 20.00th=[ 147], 30.00th=[ 147], 40.00th=[ 149], | 50.00th=[ 149], 60.00th=[ 151], 70.00th=[ 153], | 80.00th=[ 155], 90.00th=[ 159], 95.00th=[ 163], | 99.00th=[ 255], 99.50th=[ 273], 99.90th=[ 429], | 99.95th=[ 441], 99.99th=[3640656] $cat /proc/fs/jbd2/sda5-8/force_copy 1 online fio latency is much better. Jobs: 1 (f=1), 0B/s-0B/s: [W(1)][100.0%][w=50.0MiB/s][w=200 IOPS][eta 00m:00s] file1: (groupid=0, jobs=1): err= 0: pid=8084: Thu Nov 15 09:31:15 2018 write: IOPS=200, BW=50.0MiB/s (52.4MB/s)(3000MiB/60001msec) clat (usec): min=137, max=545, avg=151.35, stdev=16.22 lat (usec): min=140, max=548, avg=155.31, stdev=16.65 clat percentiles (usec): | 1.00th=[ 143], 5.00th=[ 145], 10.00th=[ 145], 20.00th=[ 147], | 30.00th=[ 147], 40.00th=[ 147], 50.00th=[ 149], 60.00th=[ 149], | 70.00th=[ 151], 80.00th=[ 155], 90.00th=[ 157], 95.00th=[ 161], | 99.00th=[ 239], 99.50th=[ 269], 99.90th=[ 420], 99.95th=[ 429], | 99.99th=[ 537] As to the cost: because we'll always need to copy meta buffer, will consume minor cpu time and some memory (at most 32MB for 128MB journal size). Signed-off-by: Xiaoguang Wang Signed-off-by: Joseph Qi Reviewed-by: Gao Xiang Link: https://gitee.com/anolis/cloud-kernel/pulls/2795 --- fs/jbd2/journal.c | 57 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/jbd2.h | 5 ++++ 2 files changed, 62 insertions(+) diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 925edbf63c39..02cd958992bf 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -442,6 +442,9 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction, } kunmap_local(mapped_data); + /* force copy-out */ + if (need_copy_out == 0 && journal->j_force_copy) + need_copy_out = 1; /* * Do we need to do a data copy? */ @@ -1395,6 +1398,57 @@ static const struct proc_ops jbd2_stats_proc_ops = { .proc_release = jbd2_seq_stats_release, }; +static int jbd2_seq_force_copy_show(struct seq_file *m, void *v) +{ + journal_t *journal = m->private; + + seq_printf(m, "%u\n", journal->j_force_copy); + return 0; +} + +static int jbd2_seq_force_copy_open(struct inode *inode, struct file *filp) +{ + journal_t *journal = pde_data(inode); + + return single_open(filp, jbd2_seq_force_copy_show, journal); +} + +/* Worst case buffer size needed for holding an integer. */ +#define PROC_NUMBUF 13 + +static ssize_t jbd2_seq_force_copy_write(struct file *file, + const char __user *buf, size_t count, loff_t *offset) +{ + struct inode *inode = file_inode(file); + journal_t *journal = pde_data(inode); + char buffer[PROC_NUMBUF]; + unsigned int force_copy; + int err; + + memset(buffer, 0, sizeof(buffer)); + if (count > sizeof(buffer) - 1) + count = sizeof(buffer) - 1; + if (copy_from_user(buffer, buf, count)) { + err = -EFAULT; + goto out; + } + + err = kstrtouint(strstrip(buffer), 0, &force_copy); + if (err) + goto out; + journal->j_force_copy = force_copy; +out: + return err < 0 ? err : count; +} + +static const struct proc_ops jbd2_force_copy_proc_ops = { + .proc_open = jbd2_seq_force_copy_open, + .proc_read = seq_read, + .proc_write = jbd2_seq_force_copy_write, + .proc_lseek = seq_lseek, + .proc_release = single_release, +}; + static struct proc_dir_entry *proc_jbd2_stats; static void jbd2_stats_proc_init(journal_t *journal) @@ -1403,6 +1457,8 @@ static void jbd2_stats_proc_init(journal_t *journal) if (journal->j_proc_entry) { proc_create_data("info", S_IRUGO, journal->j_proc_entry, &jbd2_info_proc_ops, journal); + proc_create_data("force_copy", 0644, journal->j_proc_entry, + &jbd2_force_copy_proc_ops, journal); proc_create_data("stats", 0444, journal->j_proc_entry, &jbd2_stats_proc_ops, journal); } @@ -1411,6 +1467,7 @@ static void jbd2_stats_proc_init(journal_t *journal) static void jbd2_stats_proc_exit(journal_t *journal) { remove_proc_entry("info", journal->j_proc_entry); + remove_proc_entry("force_copy", journal->j_proc_entry); remove_proc_entry("stats", journal->j_proc_entry); remove_proc_entry(journal->j_devname, proc_jbd2_stats); } diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index b54db4a9b214..19921658c660 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1243,6 +1243,11 @@ struct journal_s */ struct transaction_stats_s j_stats; + /** + * @j_force_copy: if not zero, force to do buffer copy-out. + */ + unsigned int j_force_copy; + /** * @j_failed_commit: Failed journal commit ID. */ -- Gitee From 4688ce79305743c0ee929b5c777151adcb616f12 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Mon, 26 Feb 2024 15:51:33 +0800 Subject: [PATCH 0125/2138] anolis: jbd2: track slow handle which is preventing transaction committing ANBZ: #8366 While transaction is going to commit, it first sets its state to be T_LOCKED and waits all outstanding handles to complete, and the committing transaction will always be in locked state so long as it has outstanding handles, also the whole fs will be locked and all later fs modification operations will be stucked in wait_transaction_locked(). It's hard to tell why handles are that slow, so here we add a new staic tracepoint to track such slow handle, and show io wait time and sched wait time, output likes below: fsstress-20347 [024] .... 1570.305454: jbd2_slow_handle_stats: dev 254,17 tid 15853 type 4 line_no 3101 interval 126 sync 0 requested_blocks 24 dirtied_blocks 0 trans_wait 122 space_wait 0 sched_wait 0 io_wait 126 "trans_wait 122" means that this current committing transaction has been locked for 122ms, due to this handle is not completed quickly. From "io_wait 126", we can see that io is the major reason. In this patch, we also add a per fs control file used to determine whether a handle can be considered to be slow. /proc/fs/jbd2/vdb1-8/stall_thresh default value is 100ms, users can set new threshold by echoing new value to this file. Later I also plan to add a proc file fs per fs to record these info. Signed-off-by: Xiaoguang Wang Signed-off-by: Joseph Qi Reviewed-by: Gao Xiang Link: https://gitee.com/anolis/cloud-kernel/pulls/2795 --- fs/jbd2/commit.c | 1 + fs/jbd2/journal.c | 52 +++++++++++++++++++++++++++++++++++++ fs/jbd2/transaction.c | 39 ++++++++++++++++++++++++++++ include/linux/jbd2.h | 15 +++++++++++ include/trace/events/jbd2.h | 50 +++++++++++++++++++++++++++++++++++ 5 files changed, 157 insertions(+) diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 0cd7439470fc..97c2da3758a4 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -438,6 +438,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) journal->j_fc_off = 0; J_ASSERT(commit_transaction->t_state == T_RUNNING); commit_transaction->t_state = T_LOCKED; + WRITE_ONCE(commit_transaction->t_locked_time, jiffies); trace_jbd2_commit_locking(journal, commit_transaction); stats.run.rs_wait = commit_transaction->t_max_wait; diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 02cd958992bf..735828ef1e7b 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -1449,6 +1449,54 @@ static const struct proc_ops jbd2_force_copy_proc_ops = { .proc_release = single_release, }; +static int jbd2_seq_stall_thresh_show(struct seq_file *m, void *v) +{ + journal_t *journal = m->private; + + seq_printf(m, "%lu\n", journal->j_stall_thresh); + return 0; +} + +static int jbd2_seq_stall_thresh_open(struct inode *inode, struct file *filp) +{ + journal_t *journal = pde_data(inode); + + return single_open(filp, jbd2_seq_stall_thresh_show, journal); +} + +static ssize_t jbd2_seq_stall_thresh_write(struct file *file, + const char __user *buf, size_t count, loff_t *offset) +{ + struct inode *inode = file_inode(file); + journal_t *journal = pde_data(inode); + char buffer[PROC_NUMBUF]; + unsigned long long stall_thresh; + int err; + + memset(buffer, 0, sizeof(buffer)); + if (count > sizeof(buffer) - 1) + count = sizeof(buffer) - 1; + if (copy_from_user(buffer, buf, count)) { + err = -EFAULT; + goto out; + } + + err = kstrtoull(strstrip(buffer), 0, &stall_thresh); + if (err) + goto out; + WRITE_ONCE(journal->j_stall_thresh, stall_thresh); +out: + return err < 0 ? err : count; +} + +static const struct proc_ops jbd2_stall_thresh_proc_ops = { + .proc_open = jbd2_seq_stall_thresh_open, + .proc_read = seq_read, + .proc_write = jbd2_seq_stall_thresh_write, + .proc_lseek = seq_lseek, + .proc_release = single_release, +}; + static struct proc_dir_entry *proc_jbd2_stats; static void jbd2_stats_proc_init(journal_t *journal) @@ -1461,6 +1509,8 @@ static void jbd2_stats_proc_init(journal_t *journal) &jbd2_force_copy_proc_ops, journal); proc_create_data("stats", 0444, journal->j_proc_entry, &jbd2_stats_proc_ops, journal); + proc_create_data("stall_thresh", 0644, journal->j_proc_entry, + &jbd2_stall_thresh_proc_ops, journal); } } @@ -1469,6 +1519,7 @@ static void jbd2_stats_proc_exit(journal_t *journal) remove_proc_entry("info", journal->j_proc_entry); remove_proc_entry("force_copy", journal->j_proc_entry); remove_proc_entry("stats", journal->j_proc_entry); + remove_proc_entry("stall_thresh", journal->j_proc_entry); remove_proc_entry(journal->j_devname, proc_jbd2_stats); } @@ -1804,6 +1855,7 @@ static journal_t *journal_init_common(struct block_device *bdev, journal->j_commit_interval = (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE); journal->j_min_batch_time = 0; journal->j_max_batch_time = 15000; /* 15ms */ + journal->j_stall_thresh = JBD2_DEFAULT_TRANS_STALL_THRESH; atomic_set(&journal->j_reserved_credits, 0); lockdep_init_map(&journal->j_trans_commit_map, "jbd2_handle", &jbd2_trans_commit_key, 0); diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 76adab83cac3..9346d5592d1b 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -463,6 +463,11 @@ static handle_t *new_handle(int nblocks) return NULL; handle->h_total_credits = nblocks; handle->h_ref = 1; + handle->h_pre_start_jiffies = jiffies; +#ifdef CONFIG_SCHEDSTATS + handle->h_sched_wait_sum = current->stats.wait_sum; + handle->h_io_wait_sum = current->stats.iowait_sum; +#endif return handle; } @@ -1927,6 +1932,40 @@ int jbd2_journal_stop(handle_t *handle) wait_for_commit = 1; } + do { + unsigned long transaction_locked_time, delta; + unsigned long journal_space_wait; + u64 sched_wait, io_wait; + + transaction_locked_time = READ_ONCE(transaction->t_locked_time); + if (!transaction_locked_time) + break; + + delta = jiffies_to_msecs(jiffies - transaction_locked_time); + if (delta < READ_ONCE(journal->j_stall_thresh)) + break; + + journal_space_wait = handle->h_start_jiffies - + handle->h_pre_start_jiffies; +#ifdef CONFIG_SCHEDSTATS + sched_wait = current->stats.wait_sum - + handle->h_sched_wait_sum; + io_wait = current->stats.iowait_sum - + handle->h_io_wait_sum; +#else + sched_wait = 0; + io_wait = 0; +#endif + trace_jbd2_slow_handle_stats(journal->j_fs_dev->bd_dev, + transaction->t_tid, handle->h_type, handle->h_line_no, + jiffies - handle->h_start_jiffies, handle->h_sync, + handle->h_requested_credits, + handle->h_requested_credits - handle->h_total_credits, + delta, jiffies_to_msecs(journal_space_wait), + div_u64(sched_wait, NSEC_PER_MSEC), + div_u64(io_wait, NSEC_PER_MSEC)); + } while (0); + /* * Once stop_this_handle() drops t_updates, the transaction could start * committing on us and eventually disappear. So we must not diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 19921658c660..53b3123f8c2a 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -497,7 +497,10 @@ struct jbd2_journal_handle unsigned int h_type: 8; unsigned int h_line_no: 16; + unsigned long h_pre_start_jiffies; unsigned long h_start_jiffies; + u64 h_sched_wait_sum; + u64 h_io_wait_sum; unsigned int h_requested_credits; unsigned int saved_alloc_context; @@ -706,6 +709,9 @@ struct transaction_s * structures associated with the transaction */ struct list_head t_private_list; + + /* When this transaction is locked */ + unsigned long t_locked_time; }; struct transaction_run_stats_s { @@ -1248,6 +1254,15 @@ struct journal_s */ unsigned int j_force_copy; + /** + * @j_stall_thresh: when transaction is locked and there are still + * outstanding handles, such handles will prevent transaction + * committing, trace these handles if they have stalled the transaction + * for @j_stall_thresh time, unit is millisecond, default 100ms. + */ +#define JBD2_DEFAULT_TRANS_STALL_THRESH 100 + unsigned long j_stall_thresh; + /** * @j_failed_commit: Failed journal commit ID. */ diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h index 5646ae15a957..5779ac0df039 100644 --- a/include/trace/events/jbd2.h +++ b/include/trace/events/jbd2.h @@ -245,6 +245,56 @@ TRACE_EVENT(jbd2_handle_stats, __entry->dirtied_blocks) ); +TRACE_EVENT(jbd2_slow_handle_stats, + TP_PROTO(dev_t dev, unsigned long tid, unsigned int type, + unsigned int line_no, int interval, int sync, + int requested_blocks, int dirtied_blocks, + unsigned long trans_wait, unsigned long space_wait, + u64 sched_wait, u64 io_wait), + + TP_ARGS(dev, tid, type, line_no, interval, sync, + requested_blocks, dirtied_blocks, trans_wait, space_wait, + sched_wait, io_wait), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(unsigned long, tid) + __field(unsigned int, type) + __field(unsigned int, line_no) + __field(int, interval) + __field(int, sync) + __field(int, requested_blocks) + __field(int, dirtied_blocks) + __field(unsigned long, trans_wait) + __field(unsigned long, space_wait) + __field(u64, sched_wait) + __field(u64, io_wait) + ), + + TP_fast_assign( + __entry->dev = dev; + __entry->tid = tid; + __entry->type = type; + __entry->line_no = line_no; + __entry->interval = interval; + __entry->sync = sync; + __entry->requested_blocks = requested_blocks; + __entry->dirtied_blocks = dirtied_blocks; + __entry->trans_wait = trans_wait; + __entry->space_wait = space_wait; + __entry->sched_wait = sched_wait; + __entry->io_wait = io_wait; + ), + + TP_printk("dev %d,%d tid %lu type %u line_no %u interval %d " + "sync %d requested_blocks %d dirtied_blocks %d " + "trans_wait %lu space_wait %lu sched_wait %llu io_wait %llu", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, + __entry->type, __entry->line_no, __entry->interval, + __entry->sync, __entry->requested_blocks, + __entry->dirtied_blocks, __entry->trans_wait, + __entry->space_wait, __entry->sched_wait, __entry->io_wait) +); TRACE_EVENT(jbd2_run_stats, TP_PROTO(dev_t dev, tid_t tid, struct transaction_run_stats_s *stats), -- Gitee From d3af7751829895d6092c2b7fb960616a72a79dcd Mon Sep 17 00:00:00 2001 From: liuyun Date: Sat, 2 Dec 2023 10:08:30 +0800 Subject: [PATCH 0126/2138] anolis: LoongArch: add kernel setvirtmap for runtime ANBZ: #8435 Signed-off-by: liuyun Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- arch/loongarch/kernel/efi.c | 163 +++++++++++++++++++++++++++++++++++- 1 file changed, 162 insertions(+), 1 deletion(-) diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c index 4ae77e9300d5..bb53be4ce1d5 100644 --- a/arch/loongarch/kernel/efi.c +++ b/arch/loongarch/kernel/efi.c @@ -23,13 +23,16 @@ #include #include +#include #include +#include static unsigned long efi_nr_tables; static unsigned long efi_config_table; static unsigned long __initdata boot_memmap = EFI_INVALID_TABLE_ADDR; static unsigned long __initdata fdt_pointer = EFI_INVALID_TABLE_ADDR; +static __initdata pgd_t *pgd_efi; static efi_system_table_t *efi_systab; static efi_config_table_type_t arch_tables[] __initdata = { @@ -49,8 +52,162 @@ void __init *efi_fdt_pointer(void) return early_memremap_ro(fdt_pointer, SZ_64K); } +static int __init efimap_populate_hugepages( + unsigned long start, unsigned long end, + pgprot_t prot) +{ + unsigned long addr; + unsigned long next; + pmd_t entry; + pud_t *pud; + pmd_t *pmd; + + for (addr = start; addr < end; addr = next) { + next = pmd_addr_end(addr, end); + pud = pud_offset((p4d_t *)pgd_efi + pgd_index(addr), addr); + if (pud_none(*pud)) { + void *p = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); + + if (!p) + return -1; + pmd_init(p); + pud_populate(&init_mm, pud, p); + } + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) { + entry = pfn_pmd((addr >> PAGE_SHIFT), prot); + entry = pmd_mkhuge(entry); + set_pmd_at(&init_mm, addr, pmd, entry); + } + } + return 0; +} + +static void __init efi_map_pgt(void) +{ + unsigned long node; + unsigned long start, end; + unsigned long start_pfn, end_pfn; + + pgd_efi = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); + if (!pgd_efi) { + pr_err("alloc efi pgd failed!\n"); + return; + } + pgd_init(pgd_efi); + csr_write64((long)pgd_efi, LOONGARCH_CSR_PGDL); + + /* Low Memory, Cached */ + efimap_populate_hugepages(0, SZ_256M, PAGE_KERNEL); + + for_each_node_mask(node, node_possible_map) { + /* MMIO Registers, Uncached */ + efimap_populate_hugepages(SZ_256M | (node << 44), + SZ_512M | (node << 44), PAGE_KERNEL_SUC); + + get_pfn_range_for_nid(node, &start_pfn, &end_pfn); + start = ALIGN_DOWN(start_pfn << PAGE_SHIFT, PMD_SIZE); + end = ALIGN(end_pfn << PAGE_SHIFT, PMD_SIZE); + + /* System memory, Cached */ + efimap_populate_hugepages(node ? start : SZ_512M, end, PAGE_KERNEL); + } +} + +static int __init efimap_free_pgt(unsigned long start, unsigned long end) +{ + unsigned long addr; + unsigned long next; + pud_t *pud; + pmd_t *pmd; + + for (addr = start; addr < end; addr = next) { + next = pmd_addr_end(addr, end); + + pud = pud_offset((p4d_t *)pgd_efi + pgd_index(addr), addr); + if (!pud_present(*pud)) + continue; + pmd = pmd_offset(pud, addr); + memblock_free(pmd, PAGE_SIZE); + pud_clear(pud); + } + return 0; +} + +static void __init efi_unmap_pgt(void) +{ + unsigned long node; + unsigned long start, end; + unsigned long start_pfn, end_pfn; + + for_each_node_mask(node, node_possible_map) { + get_pfn_range_for_nid(node, &start_pfn, &end_pfn); + start = ALIGN_DOWN(start_pfn << PAGE_SHIFT, PMD_SIZE); + end = ALIGN(end_pfn << PAGE_SHIFT, PMD_SIZE); + + /* Free pagetable memory */ + efimap_free_pgt(start, end); + } + + memblock_free(pgd_efi, PAGE_SIZE); + csr_write64((long)invalid_pg_dir, LOONGARCH_CSR_PGDL); + local_flush_tlb_all(); +} + +/* + * set_virtual_map() - create a virtual mapping for the EFI memory map and call + * efi_set_virtual_address_map enter virtual for runtime service + * + * This function populates the virt_addr fields of all memory region descriptors + * in @memory_map whose EFI_MEMORY_RUNTIME attribute is set. Those descriptors + * are also copied to @runtime_map, and their total count is returned in @count. + */ +static int __init set_virtual_map(void) +{ + efi_status_t status; + int count = 0; + unsigned int size; + unsigned long attr; + efi_runtime_services_t *rt; + efi_set_virtual_address_map_t *svam; + efi_memory_desc_t *in, runtime_map[32]; + + size = sizeof(efi_memory_desc_t); + + for_each_efi_memory_desc(in) { + attr = in->attribute; + if (!(attr & EFI_MEMORY_RUNTIME)) + continue; + + if (attr & (EFI_MEMORY_WB | EFI_MEMORY_WT)) + in->virt_addr = TO_CACHE(in->phys_addr); + else + in->virt_addr = TO_UNCACHE(in->phys_addr); + + memcpy(&runtime_map[count++], in, size); + } + + rt = early_memremap_ro((unsigned long)efi_systab->runtime, sizeof(*rt)); + + /* Install the new virtual address map */ + svam = rt->set_virtual_address_map; + + efi_map_pgt(); + + status = svam(size * count, size, efi.memmap.desc_version, + (efi_memory_desc_t *)TO_PHYS((unsigned long)runtime_map)); + + efi_unmap_pgt(); + if (status != EFI_SUCCESS) + return -1; + + return 0; +} + void __init efi_runtime_init(void) { + efi_status_t status; + if (!efi_enabled(EFI_BOOT) || !efi_systab->runtime) return; @@ -59,7 +216,11 @@ void __init efi_runtime_init(void) return; } - efi.runtime = (efi_runtime_services_t *)efi_systab->runtime; + status = set_virtual_map(); + if (status < 0) + return; + + efi.runtime = READ_ONCE(efi_systab->runtime); efi.runtime_version = (unsigned int)efi.runtime->hdr.revision; efi_native_runtime_setup(); -- Gitee From 894194904508189864779ed9b0702e075661044b Mon Sep 17 00:00:00 2001 From: liuyun Date: Sat, 2 Dec 2023 10:08:31 +0800 Subject: [PATCH 0127/2138] anolis: LoongArch: Old BPI compatibility ANBZ: #8435 Signed-off-by: liuyun Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- arch/loongarch/include/asm/addrspace.h | 1 + arch/loongarch/include/asm/efi.h | 1 + arch/loongarch/kernel/Makefile | 1 + arch/loongarch/kernel/acpi.c | 7 +- arch/loongarch/kernel/efi.c | 12 +- arch/loongarch/kernel/env.c | 6 + arch/loongarch/kernel/irq.c | 25 +- arch/loongarch/kernel/legacy_boot.c | 468 +++++++++++++++++++++++++ arch/loongarch/kernel/legacy_boot.h | 90 +++++ arch/loongarch/kernel/mem.c | 26 +- arch/loongarch/kernel/numa.c | 39 ++- arch/loongarch/kernel/reset.c | 3 +- arch/loongarch/kernel/setup.c | 18 +- arch/loongarch/kernel/smp.c | 6 +- arch/loongarch/pci/acpi.c | 148 +++++++- drivers/firmware/efi/Makefile | 1 + drivers/irqchip/irq-loongarch-cpu.c | 7 +- drivers/irqchip/irq-loongson-eiointc.c | 4 +- drivers/irqchip/irq-loongson-pch-pic.c | 5 + 19 files changed, 842 insertions(+), 26 deletions(-) create mode 100644 arch/loongarch/kernel/legacy_boot.c create mode 100644 arch/loongarch/kernel/legacy_boot.h diff --git a/arch/loongarch/include/asm/addrspace.h b/arch/loongarch/include/asm/addrspace.h index b24437e28c6e..60a2ce1a6531 100644 --- a/arch/loongarch/include/asm/addrspace.h +++ b/arch/loongarch/include/asm/addrspace.h @@ -124,6 +124,7 @@ extern unsigned long vm_map_base; #define PCI_IOSIZE SZ_32M #define ISA_IOSIZE SZ_16K #define IO_SPACE_LIMIT (PCI_IOSIZE - 1) +#define ISA_PHY_IOBASE LOONGSON_LIO_BASE #define PHYS_LINK_KADDR PHYSADDR(VMLINUX_LOAD_ADDRESS) diff --git a/arch/loongarch/include/asm/efi.h b/arch/loongarch/include/asm/efi.h index eddc8e79b3fa..54b538d7b7c0 100644 --- a/arch/loongarch/include/asm/efi.h +++ b/arch/loongarch/include/asm/efi.h @@ -6,6 +6,7 @@ #define _ASM_LOONGARCH_EFI_H #include +#include void __init efi_init(void); void __init efi_runtime_init(void); diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile index 4fcc168f0732..10ee5fc7ac3e 100644 --- a/arch/loongarch/kernel/Makefile +++ b/arch/loongarch/kernel/Makefile @@ -9,6 +9,7 @@ obj-y += head.o cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \ traps.o irq.o idle.o process.o dma.o mem.o io.o reset.o switch.o \ elf.o syscall.o signal.o time.o topology.o inst.o ptrace.o vdso.o \ alternative.o unwind.o +obj-y += legacy_boot.o obj-$(CONFIG_ACPI) += acpi.o obj-$(CONFIG_EFI) += efi.o diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c index 55d6a48c76a8..58819b017ba8 100644 --- a/arch/loongarch/kernel/acpi.c +++ b/arch/loongarch/kernel/acpi.c @@ -17,6 +17,7 @@ #include #include #include +#include "legacy_boot.h" int acpi_disabled; EXPORT_SYMBOL(acpi_disabled); @@ -58,7 +59,7 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) } #ifdef CONFIG_SMP -static int set_processor_mask(u32 id, u32 flags) +int set_processor_mask(u32 id, u32 flags) { int cpu, cpuid = id; @@ -132,6 +133,10 @@ static void __init acpi_process_madt(void) __cpu_logical_map[i] = -1; } #endif + + if (efi_bp && bpi_version <= BPI_VERSION_V1) + legacy_madt_table_init(); + acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC, acpi_parse_processor, MAX_CORE_PIC); diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c index bb53be4ce1d5..c0fad2d75460 100644 --- a/arch/loongarch/kernel/efi.c +++ b/arch/loongarch/kernel/efi.c @@ -26,6 +26,7 @@ #include #include #include +#include "legacy_boot.h" static unsigned long efi_nr_tables; static unsigned long efi_config_table; @@ -172,6 +173,9 @@ static int __init set_virtual_map(void) efi_set_virtual_address_map_t *svam; efi_memory_desc_t *in, runtime_map[32]; + if (efi_bp) + return EFI_SUCCESS; + size = sizeof(efi_memory_desc_t); for_each_efi_memory_desc(in) { @@ -260,10 +264,12 @@ void __init efi_init(void) void *config_tables; struct efi_boot_memmap *tbl; - if (!efi_system_table) - return; + if (efi_system_table) + efi_systab = (efi_system_table_t *)early_memremap_ro(efi_system_table, + sizeof(*efi_systab)); + else + efi_systab = (efi_system_table_t *)efi_bp->systemtable; - efi_systab = (efi_system_table_t *)early_memremap_ro(efi_system_table, sizeof(*efi_systab)); if (!efi_systab) { pr_err("Can't find EFI system table.\n"); return; diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c index 6b3bfb0092e6..85dbfb1256eb 100644 --- a/arch/loongarch/kernel/env.c +++ b/arch/loongarch/kernel/env.c @@ -12,6 +12,7 @@ #include #include #include +#include "legacy_boot.h" u64 efi_system_table; struct loongson_system_configuration loongson_sysconf; @@ -22,6 +23,11 @@ void __init init_environ(void) int efi_boot = fw_arg0; char *cmdline = early_memremap_ro(fw_arg1, COMMAND_LINE_SIZE); + legacy_boot_init(fw_arg0, fw_arg1, fw_arg2); + + if (efi_bp) + return; + if (efi_boot) set_bit(EFI_BOOT, &efi.flags); else diff --git a/arch/loongarch/kernel/irq.c b/arch/loongarch/kernel/irq.c index df42c063f6c4..57b4720ddd87 100644 --- a/arch/loongarch/kernel/irq.c +++ b/arch/loongarch/kernel/irq.c @@ -20,6 +20,7 @@ #include #include #include +#include "legacy_boot.h" DEFINE_PER_CPU(unsigned long, irq_stack); DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); @@ -61,6 +62,12 @@ static int __init early_pci_mcfg_parse(struct acpi_table_header *header) if (header->length < sizeof(struct acpi_table_mcfg)) return -EINVAL; + for (i = 0; i < MAX_IO_PICS; i++) { + msi_group[i].pci_segment = -1; + msi_group[i].node = -1; + pch_group[i].node = -1; + } + n = (header->length - sizeof(struct acpi_table_mcfg)) / sizeof(struct acpi_mcfg_allocation); mcfg = (struct acpi_table_mcfg *)header; @@ -76,14 +83,6 @@ static int __init early_pci_mcfg_parse(struct acpi_table_header *header) static void __init init_vec_parent_group(void) { - int i; - - for (i = 0; i < MAX_IO_PICS; i++) { - msi_group[i].pci_segment = -1; - msi_group[i].node = -1; - pch_group[i].node = -1; - } - acpi_table_parse(ACPI_SIG_MCFG, early_pci_mcfg_parse); } @@ -99,7 +98,7 @@ static int __init get_ipi_irq(void) void __init init_IRQ(void) { - int i; + int i, ret; #ifdef CONFIG_SMP int r, ipi_irq; static int ipi_dummy_dev; @@ -111,7 +110,13 @@ void __init init_IRQ(void) clear_csr_estat(ESTATF_IP); init_vec_parent_group(); - irqchip_init(); + if (efi_bp && bpi_version <= BPI_VERSION_V1) { + ret = setup_legacy_IRQ(); + if (ret) + panic("IRQ domain init error!\n"); + } else { + irqchip_init(); + } #ifdef CONFIG_SMP ipi_irq = get_ipi_irq(); if (ipi_irq < 0) diff --git a/arch/loongarch/kernel/legacy_boot.c b/arch/loongarch/kernel/legacy_boot.c new file mode 100644 index 000000000000..4b9ee3320897 --- /dev/null +++ b/arch/loongarch/kernel/legacy_boot.c @@ -0,0 +1,468 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Author: Yun Liu, liuyun@loongson.cn + * Copyright (C) 2020 Loongson Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "legacy_boot.h" + +#define MAX_CORE_PIC 256 +#define PREFIX "ACPI: " + +#define MSI_MSG_ADDRESS 0x2FF00000 +#define MSI_MSG_DEFAULT_COUNT 0xC0 + +struct boot_params *efi_bp; +struct loongsonlist_mem_map *g_mmap; +struct acpi_madt_lio_pic *acpi_liointc; +struct acpi_madt_eio_pic *acpi_eiointc[MAX_IO_PICS]; + +struct acpi_madt_ht_pic *acpi_htintc; +struct acpi_madt_lpc_pic *acpi_pchlpc; +struct acpi_madt_msi_pic *acpi_pchmsi[MAX_IO_PICS]; +struct acpi_madt_bio_pic *acpi_pchpic[MAX_IO_PICS]; + +struct irq_domain *cpu_domain; +struct irq_domain *liointc_domain; +struct irq_domain *pch_lpc_domain; +struct irq_domain *pch_msi_domain[MAX_IO_PICS]; +struct irq_domain *pch_pic_domain[MAX_IO_PICS]; + +char arcs_cmdline[COMMAND_LINE_SIZE]; +int nr_io_pics; +int bpi_version; + +struct acpi_madt_lio_pic liointc_default = { + .address = LOONGSON_REG_BASE + 0x1400, + .size = 256, + .cascade = {2, 3}, + .cascade_map = {0x00FFFFFF, 0xff000000}, +}; + +struct acpi_madt_lpc_pic pchlpc_default = { + .address = LS7A_LPC_REG_BASE, + .size = SZ_4K, + .cascade = 19, +}; + +struct acpi_madt_eio_pic eiointc_default[MAX_IO_PICS]; +struct acpi_madt_msi_pic pchmsi_default[MAX_IO_PICS]; +struct acpi_madt_bio_pic pchpic_default[MAX_IO_PICS]; + +static int +acpi_parse_lapic(union acpi_subtable_headers *header, const unsigned long end) +{ + struct acpi_madt_local_apic *processor = NULL; + + processor = (struct acpi_madt_local_apic *)header; + if (BAD_MADT_ENTRY(processor, end)) + return -EINVAL; + + acpi_table_print_madt_entry(&header->common); + set_processor_mask(processor->id, processor->lapic_flags); + + return 0; +} + +static int bad_pch_pic(unsigned long address) +{ + if (nr_io_pics >= MAX_IO_PICS) { + pr_warn("WARNING: Max # of I/O PCH_PICs (%d) exceeded (found %d), skipping\n", + MAX_IO_PICS, nr_io_pics); + return 1; + } + if (!address) { + pr_warn("WARNING: Bogus (zero) I/O PCH_PIC address found in table, skipping!\n"); + return 1; + } + return 0; +} + +void register_default_pic(int id, u32 address, u32 irq_base) +{ + int idx, entries; + unsigned long addr; + + if (bad_pch_pic(address)) + return; + + idx = nr_io_pics; + + pchpic_default[idx].address = address; + if (idx) + pchpic_default[idx].address |= nid_to_addrbase(id) | HT1LO_OFFSET; + pchpic_default[idx].id = id; + pchpic_default[idx].version = 0; + pchpic_default[idx].size = 0x1000; + pchpic_default[idx].gsi_base = irq_base; + + msi_group[nr_io_pics].pci_segment = nr_io_pics; + pch_group[nr_io_pics].node = msi_group[nr_io_pics].node = id; + + addr = pchpic_default[idx].address; + /* Read INT_ID.int_num */ + entries = (((unsigned long)ls7a_readq(addr) >> 48) & 0xff) + 1; + pchmsi_default[idx].msg_address = MSI_MSG_ADDRESS; + pchmsi_default[idx].start = entries; + pchmsi_default[idx].count = MSI_MSG_DEFAULT_COUNT; + + eiointc_default[idx].cascade = 3; + eiointc_default[idx].node = id; + eiointc_default[idx].node_map = 1; + + if (idx) { + eiointc_default[idx].cascade = 0x4; + eiointc_default[0].node_map = 0x1DF; + eiointc_default[idx].node_map = 0xFE20; + } + + acpi_pchpic[idx] = &pchpic_default[idx]; + acpi_pchmsi[idx] = &pchmsi_default[idx]; + acpi_eiointc[idx] = &eiointc_default[idx]; + + nr_io_pics++; +} + +static int +acpi_parse_legacy_pch_pic(union acpi_subtable_headers *header, const unsigned long end) +{ + struct acpi_madt_io_apic *pch_pic = NULL; + + pch_pic = (struct acpi_madt_io_apic *)header; + + if (BAD_MADT_ENTRY(pch_pic, end)) + return -EINVAL; + + acpi_table_print_madt_entry(&header->common); + + register_default_pic(pch_pic->id, pch_pic->address, + pch_pic->global_irq_base); + + return 0; +} + +__init int legacy_madt_table_init(void) +{ + /* Parse MADT LAPIC entries */ + acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, acpi_parse_lapic, MAX_CORE_PIC); + acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_legacy_pch_pic, MAX_IO_PICS); + + acpi_liointc = &liointc_default; + acpi_pchlpc = &pchlpc_default; + + return 0; +} + +int setup_legacy_IRQ(void) +{ + int i, ret; + struct irq_domain *pic_domain; + + if (!acpi_eiointc[0]) + cpu_data[0].options &= ~LOONGARCH_CPU_EXTIOI; + + ret = cpuintc_acpi_init(NULL, 0); + if (ret) { + pr_err("CPU domain init error!\n"); + return -1; + } + cpu_domain = get_cpudomain(); + ret = liointc_acpi_init(cpu_domain, acpi_liointc); + if (ret) { + pr_err("Liointc domain init error!\n"); + return -1; + } + liointc_domain = irq_find_matching_fwnode(liointc_handle, DOMAIN_BUS_ANY); + if (cpu_has_extioi) { + pr_info("Using EIOINTC interrupt mode\n"); + for (i = 0; i < nr_io_pics; i++) { + ret = eiointc_acpi_init(cpu_domain, acpi_eiointc[i]); + if (ret) { + pr_err("Eiointc domain init error!\n"); + return -1; + } + + pch_pic_parse_madt((union acpi_subtable_headers *)acpi_pchpic[i], 0); + pch_msi_parse_madt((union acpi_subtable_headers *)acpi_pchmsi[i], 0); + } + /* HTVECINTC maybe not use */ + } else { + pr_info("Using HTVECINTC interrupt mode\n"); + ret = htvec_acpi_init(liointc_domain, acpi_htintc); + if (ret) { + pr_err("HTVECintc domain init error!\n"); + return -1; + } + pch_pic_parse_madt((union acpi_subtable_headers *)acpi_pchpic[0], 0); + pch_msi_parse_madt((union acpi_subtable_headers *)acpi_pchmsi[0], 0); + } + + pic_domain = get_pchpic_irq_domain(); + if (pic_domain) + pch_lpc_acpi_init(pic_domain, acpi_pchlpc); + + return 0; +} + +/* + * Manage initrd + */ +#ifdef CONFIG_BLK_DEV_INITRD +static __init int rd_start_early(char *p) +{ + phys_initrd_start = __pa(memparse(p, NULL)); + + return 0; +} +early_param("rd_start", rd_start_early); + +static __init int rd_size_early(char *p) +{ + phys_initrd_size = memparse(p, NULL); + + return 0; +} +early_param("rd_size", rd_size_early); + +#endif + +__init void fw_init_cmdline(unsigned long argc, unsigned long cmdp) +{ + int i; + char **_fw_argv; + + _fw_argv = (char **)cmdp; + + arcs_cmdline[0] = '\0'; + for (i = 1; i < argc; i++) { + strlcat(arcs_cmdline, _fw_argv[i], COMMAND_LINE_SIZE); + if (i < (argc - 1)) + strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE); + } + strscpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE); +} + +static u8 ext_listhdr_checksum(u8 *buffer, u32 length) +{ + u8 sum = 0; + u8 *end = buffer + length; + + while (buffer < end) + sum = (u8)(sum + *(buffer++)); + + return sum; +} + +static int parse_mem(struct _extention_list_hdr *head) +{ + g_mmap = (struct loongsonlist_mem_map *)head; + if (ext_listhdr_checksum((u8 *)g_mmap, head->length)) { + pr_err("mem checksum error\n"); + return -EPERM; + } + return 0; +} + +/* legacy firmware passed, add use this info if need vbios */ +static int parse_vbios(struct _extention_list_hdr *head) +{ + struct loongsonlist_vbios *pvbios; + + pvbios = (struct loongsonlist_vbios *)head; + + if (ext_listhdr_checksum((u8 *)pvbios, head->length)) { + pr_err("vbios_addr checksum error\n"); + return -EPERM; + } + return 0; +} + +/* legacy firmware passed, add use this info if need screeninfo KVM? */ +static int parse_screeninfo(struct _extention_list_hdr *head) +{ + struct loongsonlist_screeninfo *pscreeninfo; + + pscreeninfo = (struct loongsonlist_screeninfo *)head; + if (ext_listhdr_checksum((u8 *)pscreeninfo, head->length)) { + pr_err("screeninfo_addr checksum error\n"); + return -EPERM; + } + + memcpy(&screen_info, &pscreeninfo->si, sizeof(screen_info)); + return 0; +} + +static int list_find(struct boot_params *bp) +{ + struct _extention_list_hdr *fhead = NULL; + unsigned long index; + + fhead = bp->extlist; + if (!fhead) { + pr_err("the bp ext struct empty!\n"); + return -1; + } + do { + if (memcmp(&(fhead->signature), LOONGSON_MEM_SIGNATURE, 3) == 0) { + if (parse_mem(fhead) != 0) { + pr_err("parse mem failed\n"); + return -EPERM; + } + } else if (memcmp(&(fhead->signature), LOONGSON_VBIOS_SIGNATURE, 5) == 0) { + if (parse_vbios(fhead) != 0) { + pr_err("parse vbios failed\n"); + return -EPERM; + } + } else if (memcmp(&(fhead->signature), LOONGSON_SCREENINFO_SIGNATURE, 5) == 0) { + if (parse_screeninfo(fhead) != 0) { + pr_err("parse screeninfo failed\n"); + return -EPERM; + } + } + fhead = (struct _extention_list_hdr *)fhead->next; + index = (unsigned long)fhead; + } while (index); + return 0; +} + +unsigned int bpi_init(void) +{ + return list_find(efi_bp); +} + +static int get_bpi_version(u64 *signature) +{ + u8 data[9]; + int version = BPI_VERSION_NONE; + + data[8] = 0; + + memcpy(data, signature, sizeof(*signature)); + if (kstrtoint(&data[3], 10, &version)) + return BPI_VERSION_NONE; + return version; +} + +static void __init parse_bpi_flags(void) +{ + if (efi_bp->flags & BPI_FLAGS_UEFI_SUPPORTED) + set_bit(EFI_BOOT, &efi.flags); + else + clear_bit(EFI_BOOT, &efi.flags); +} + +__init unsigned long legacy_boot_init(unsigned long argc, unsigned long cmdptr, unsigned long bpi) +{ + int ret; + + if (!bpi || argc < 2) + return -1; + efi_bp = (struct boot_params *)bpi; + bpi_version = get_bpi_version(&efi_bp->signature); + pr_info("BPI%d with boot flags %llx.\n", bpi_version, efi_bp->flags); + if (bpi_version == BPI_VERSION_NONE) + panic("Fatal error, bpi ver BONE!\n"); + else if (bpi_version == BPI_VERSION_V2) + parse_bpi_flags(); + + fw_init_cmdline(argc, cmdptr); + ret = bpi_init(); + if (ret) { + pr_err("init legacy firmware error!\n"); + return -1; + } + + return 0; +} + +static int __init add_legacy_isa_io(struct fwnode_handle *fwnode, unsigned long isa_base) +{ + int ret = 0; + unsigned long vaddr; + struct logic_pio_hwaddr *range; + + range = kzalloc(sizeof(*range), GFP_ATOMIC); + if (!range) + return -ENOMEM; + + range->fwnode = fwnode; + range->size = ISA_IOSIZE; + range->hw_start = isa_base; + range->flags = LOGIC_PIO_CPU_MMIO; + + ret = logic_pio_register_range(range); + if (ret) { + kfree(range); + return ret; + } + + if (range->io_start != 0) { + logic_pio_unregister_range(range); + kfree(range); + return -EINVAL; + } + + vaddr = (unsigned long)(PCI_IOBASE + range->io_start); + ret = ioremap_page_range(vaddr, vaddr + range->size, range->hw_start, + pgprot_device(PAGE_KERNEL)); + return ret; +} + +static struct fwnode_handle * __init parse_isa_base(u64 *cpu_addr) +{ + struct device_node *np; + const __be32 *ranges = NULL; + int len; + struct device_node *node; + + for_each_node_by_name(np, "isa") { + node = of_node_get(np); + + if (!node) + break; + + ranges = of_get_property(node, "ranges", &len); + + if (!ranges || (ranges && len > 0)) + break; + } + if (ranges) { + ranges += 2; + *cpu_addr = of_translate_address(np, ranges); + return &np->fwnode; + } + + return NULL; +} + +static int __init register_legacy_isa_io(void) +{ + struct fwnode_handle *fwnode; + u64 cpu_addr; + + if (!acpi_disabled) { + cpu_addr = ISA_PHY_IOBASE; + fwnode = kzalloc(sizeof(*fwnode), GFP_ATOMIC); + } else { + fwnode = parse_isa_base(&cpu_addr); + } + + if (fwnode) + add_legacy_isa_io(fwnode, cpu_addr); + + return 0; +} +arch_initcall(register_legacy_isa_io); diff --git a/arch/loongarch/kernel/legacy_boot.h b/arch/loongarch/kernel/legacy_boot.h new file mode 100644 index 000000000000..982bf9b1de72 --- /dev/null +++ b/arch/loongarch/kernel/legacy_boot.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LEGACY_BOOT_H_ +#define __LEGACY_BOOT_H_ +#include +#include +#define ADDRESS_TYPE_SYSRAM 1 +#define ADDRESS_TYPE_RESERVED 2 +#define ADDRESS_TYPE_ACPI 3 +#define ADDRESS_TYPE_NVS 4 +#define ADDRESS_TYPE_PMEM 5 + +#define LOONGSON3_BOOT_MEM_MAP_MAX 128 +#define RT_MAP_START 100 +#define FIX_MAP_ENTRY 32 + +/* mask of the flags in bootparamsinterface */ +#define BPI_FLAGS_UEFI_SUPPORTED BIT(0) +#define BPI_FLAGS_SOC_CPU BIT(1) + +#define LOONGSON_DMA_MASK_BIT 64 +#define LOONGSON_MEM_SIGNATURE "MEM" +#define LOONGSON_VBIOS_SIGNATURE "VBIOS" +#define LOONGSON_EFIBOOT_SIGNATURE "BPI" +#define LOONGSON_SCREENINFO_SIGNATURE "SINFO" +#define LOONGSON_EFIBOOT_VERSION 1000 + +/* Values for Version firmware */ + +enum bpi_vers { + BPI_VERSION_NONE = 0, + BPI_VERSION_V1 = 1000, + BPI_VERSION_V2 = 1001, +}; + +struct boot_params { + u64 signature; /* {"BPIXXXXX"} */ + void *systemtable; + struct _extention_list_hdr *extlist; + u64 flags; +} __packed; + +struct _extention_list_hdr { + u64 signature; + u32 length; + u8 revision; + u8 checksum; + struct _extention_list_hdr *next; +} __packed; + +struct loongsonlist_mem_map { + struct _extention_list_hdr header; /*{"M", "E", "M"}*/ + u8 map_count; + struct _loongson_mem_map { + u32 mem_type; + u64 mem_start; + u64 mem_size; + } __packed map[LOONGSON3_BOOT_MEM_MAP_MAX]; +} __packed; + +struct loongsonlist_vbios { + struct _extention_list_hdr header; /* {VBIOS} */ + u64 vbios_addr; +} __packed; + +struct loongsonlist_screeninfo { + struct _extention_list_hdr header; + struct screen_info si; +}; +unsigned long legacy_boot_init(unsigned long argc, + unsigned long cmdptr, unsigned long bpi); +extern int bpi_version; +extern struct boot_params *efi_bp; +extern struct loongsonlist_mem_map *g_mmap; +extern int set_processor_mask(u32 id, u32 flags); +extern int __init setup_legacy_IRQ(void); +extern struct loongson_system_configuration loongson_sysconf; +extern unsigned long long smp_group[MAX_PACKAGES]; +extern int legacy_madt_table_init(void); +extern struct pch_pic *pch_pic_priv[MAX_IO_PICS]; +extern struct irq_domain *get_cpudomain(void); +extern int __init cpuintc_acpi_init(union acpi_subtable_headers *header, + const unsigned long end); +extern int __init +pch_pic_parse_madt(union acpi_subtable_headers *header, + const unsigned long end); +extern int __init +pch_msi_parse_madt(union acpi_subtable_headers *header, + const unsigned long end); +extern struct irq_domain *get_pchpic_irq_domain(void); +#endif diff --git a/arch/loongarch/kernel/mem.c b/arch/loongarch/kernel/mem.c index aed901c57fb4..5fd1bc3333bc 100644 --- a/arch/loongarch/kernel/mem.c +++ b/arch/loongarch/kernel/mem.c @@ -9,13 +9,35 @@ #include #include #include - +#include "legacy_boot.h" void __init memblock_init(void) { - u32 mem_type; + u32 i, mem_type; u64 mem_start, mem_end, mem_size; efi_memory_desc_t *md; + if (g_mmap) { + /* parse memory information */ + for (i = 0; i < g_mmap->map_count; i++) { + mem_type = g_mmap->map[i].mem_type; + mem_start = g_mmap->map[i].mem_start; + mem_size = g_mmap->map[i].mem_size; + mem_end = mem_start + mem_size; + + switch (mem_type) { + case ADDRESS_TYPE_SYSRAM: + pr_info("add memory region memblock - base: 0x%llx size: 0x%llx\n", mem_start, mem_size); + memblock_add(mem_start, mem_size); + if (max_low_pfn < (mem_end >> PAGE_SHIFT)) + max_low_pfn = mem_end >> PAGE_SHIFT; + break; + } + } + memblock_set_current_limit(PFN_PHYS(max_low_pfn)); + memblock_reserve(__pa_symbol(&_text), + __pa_symbol(&_end) - __pa_symbol(&_text)); + return; + } /* Parse memory information */ for_each_efi_memory_desc(md) { mem_type = md->type; diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c index 8fe21f868f72..97fcbf7678f6 100644 --- a/arch/loongarch/kernel/numa.c +++ b/arch/loongarch/kernel/numa.c @@ -25,6 +25,7 @@ #include #include #include +#include "legacy_boot.h" int numa_off; struct pglist_data *node_data[MAX_NUMNODES]; @@ -37,7 +38,6 @@ static struct numa_meminfo numa_meminfo; cpumask_t cpus_on_node[MAX_NUMNODES]; cpumask_t phys_cpus_on_node[MAX_NUMNODES]; EXPORT_SYMBOL(cpus_on_node); - /* * apicid, cpu, node mappings */ @@ -275,10 +275,45 @@ static void __init add_numamem_region(u64 start, u64 end, u32 type) static void __init init_node_memblock(void) { - u32 mem_type; + u32 i, mem_type; u64 mem_end, mem_start, mem_size; efi_memory_desc_t *md; + if (g_mmap) { + for (i = 0; i < g_mmap->map_count; i++) { + mem_type = g_mmap->map[i].mem_type; + mem_start = g_mmap->map[i].mem_start; + mem_size = g_mmap->map[i].mem_size; + mem_end = g_mmap->map[i].mem_start + mem_size; + + switch (mem_type) { + case ADDRESS_TYPE_SYSRAM: + mem_start = PFN_ALIGN(mem_start); + mem_end = PFN_ALIGN(mem_end - PAGE_SIZE + 1); + if (mem_start >= mem_end) + break; + add_numamem_region(mem_start, mem_end, EFI_PERSISTENT_MEMORY); + break; + + case ADDRESS_TYPE_ACPI: + mem_start = PFN_ALIGN(mem_start - PAGE_SIZE + 1); + mem_end = PFN_ALIGN(mem_end); + mem_size = mem_end - mem_start; + memblock_add(mem_start, mem_size); + memblock_mark_nomap(mem_start, mem_size); + memblock_set_node(mem_start, mem_size, + &memblock.memory, 0); + memblock_reserve(mem_start, mem_size); + break; + + case ADDRESS_TYPE_RESERVED: + memblock_reserve(mem_start, mem_size); + break; + } + } + return; + } + /* Parse memory information and activate */ for_each_efi_memory_desc(md) { mem_type = md->type; diff --git a/arch/loongarch/kernel/reset.c b/arch/loongarch/kernel/reset.c index 1ef8c6383535..e7282e8de1cd 100644 --- a/arch/loongarch/kernel/reset.c +++ b/arch/loongarch/kernel/reset.c @@ -49,7 +49,8 @@ void machine_power_off(void) #endif do_kernel_power_off(); #ifdef CONFIG_EFI - efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL); + if (efi.reset_system) + efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL); #endif while (true) { diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 7ef1c1ff1fc4..83d8e7662b06 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -48,6 +48,7 @@ #include #include #include +#include "legacy_boot.h" #define SMBIOS_BIOSSIZE_OFFSET 0x09 #define SMBIOS_BIOSEXTERN_OFFSET 0x13 @@ -137,9 +138,22 @@ static void __init parse_cpu_table(const struct dmi_header *dm) static void __init parse_bios_table(const struct dmi_header *dm) { + int bios_extern; char *dmi_data = (char *)dm; + bios_extern = *(dmi_data + SMBIOS_BIOSEXTERN_OFFSET); b_info.bios_size = (*(dmi_data + SMBIOS_BIOSSIZE_OFFSET) + 1) << 6; + + if (bpi_version == BPI_VERSION_V2) { + if ((!!(efi_bp->flags & BPI_FLAGS_UEFI_SUPPORTED)) != (!!(bios_extern & LOONGSON_EFI_ENABLE))) + pr_err("There is a conflict of definitions between efi_bp->flags and smbios\n"); + return; + } + + if (bios_extern & LOONGSON_EFI_ENABLE) + set_bit(EFI_BOOT, &efi.flags); + else + clear_bit(EFI_BOOT, &efi.flags); } static void __init find_tokens(const struct dmi_header *dm, void *dummy) @@ -617,7 +631,9 @@ void __init setup_arch(char **cmdline_p) pagetable_init(); bootcmdline_init(cmdline_p); parse_early_param(); - reserve_initrd_mem(); + /* The small fdt method should be skipped directly to avoid two reserved operations. */ + if (fw_arg2) + reserve_initrd_mem(); platform_init(); arch_mem_init(cmdline_p); diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index 9dbe7907a961..fc716e15415f 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -32,6 +32,7 @@ #include #include #include +#include "legacy_boot.h" int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ EXPORT_SYMBOL(__cpu_number_map); @@ -312,11 +313,12 @@ void __init loongson_prepare_cpus(unsigned int max_cpus) */ void loongson_boot_secondary(int cpu, struct task_struct *idle) { - unsigned long entry; + unsigned long entry = (unsigned long)&smpboot_entry; pr_info("Booting CPU#%d...\n", cpu); - entry = __pa_symbol((unsigned long)&smpboot_entry); + if (!efi_bp) + entry = __pa_symbol((unsigned long)&smpboot_entry); cpuboot_data.stack = (unsigned long)__KSTK_TOS(idle); cpuboot_data.thread_info = (unsigned long)task_thread_info(idle); diff --git a/arch/loongarch/pci/acpi.c b/arch/loongarch/pci/acpi.c index 1da4dc46df43..486dba309ac1 100644 --- a/arch/loongarch/pci/acpi.c +++ b/arch/loongarch/pci/acpi.c @@ -58,13 +58,159 @@ static void acpi_release_root_info(struct acpi_pci_root_info *ci) kfree(info); } +static void arch_pci_root_validate_resources(struct device *dev, + struct list_head *resources, + unsigned long type) +{ + LIST_HEAD(list); + struct resource *res1, *res2, *root = NULL; + struct resource_entry *tmp, *entry, *entry2; + + WARN_ON((type & (IORESOURCE_MEM | IORESOURCE_IO)) == 0); + root = (type & IORESOURCE_MEM) ? &iomem_resource : &ioport_resource; + + list_splice_init(resources, &list); + resource_list_for_each_entry_safe(entry, tmp, &list) { + bool free = false; + resource_size_t end; + + res1 = entry->res; + if (!(res1->flags & type)) + goto next; + + /* Exclude non-addressable range or non-addressable portion */ + end = min(res1->end, root->end); + if (end <= res1->start) { + dev_info(dev, "host bridge window %pR (ignored, not CPU addressable)\n", + res1); + free = true; + goto next; + } else if (res1->end != end) { + dev_info(dev, "host bridge window %pR ([%#llx-%#llx] ignored, not CPU addressable)\n", + res1, (unsigned long long)end + 1, + (unsigned long long)res1->end); + res1->end = end; + } + + resource_list_for_each_entry(entry2, resources) { + res2 = entry2->res; + if (!(res2->flags & type)) + continue; + + /* + * I don't like throwing away windows because then + * our resources no longer match the ACPI _CRS, but + * the kernel resource tree doesn't allow overlaps. + */ + if (resource_overlaps(res1, res2)) { + res2->start = min(res1->start, res2->start); + res2->end = max(res1->end, res2->end); + dev_info(dev, "host bridge window expanded to %pR; %pR ignored\n", + res2, res1); + free = true; + goto next; + } + } + +next: + resource_list_del(entry); + if (free) + resource_list_free_entry(entry); + else + resource_list_add_tail(entry, resources); + } +} +static void arch_pci_root_remap_iospace(struct fwnode_handle *fwnode, + struct resource_entry *entry) +{ + struct resource *res = entry->res; + resource_size_t cpu_addr = res->start; + resource_size_t pci_addr = cpu_addr - entry->offset; + resource_size_t length = resource_size(res); + unsigned long port; + + if (pci_register_io_range(fwnode, cpu_addr, length)) { + res->start += ISA_IOSIZE; + cpu_addr = res->start; + pci_addr = cpu_addr - entry->offset; + length = resource_size(res); + if (pci_register_io_range(fwnode, cpu_addr, length)) + goto err; + } + + port = pci_address_to_pio(cpu_addr); + if (port == (unsigned long)-1) + goto err; + + res->start = port; + res->end = port + length - 1; + entry->offset = port - pci_addr; + + if (pci_remap_iospace(res, cpu_addr) < 0) + goto err; + + pr_info("Remapped I/O %pa to %pR\n", &cpu_addr, res); + return; +err: + res->flags |= IORESOURCE_DISABLED; +} + +static int arch_pci_probe_root_resources(struct acpi_pci_root_info *info) +{ + int ret; + struct list_head *list = &info->resources; + struct acpi_device *device = info->bridge; + struct resource_entry *entry, *tmp; + unsigned long flags; + struct resource *res; + + flags = IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_MEM_8AND16BIT; + ret = acpi_dev_get_resources(device, list, + acpi_dev_filter_resource_type_cb, + (void *)flags); + if (ret < 0) + dev_warn(&device->dev, + "failed to parse _CRS method, error code %d\n", ret); + else if (ret == 0) + dev_dbg(&device->dev, + "no IO and memory resources present in _CRS\n"); + else { + resource_list_for_each_entry_safe(entry, tmp, list) { + if (entry->res->flags & IORESOURCE_IO) { + res = entry->res; + res->start = PFN_ALIGN(res->start); + res->end += 1; + res->end = PFN_ALIGN(res->end); + res->end -= 1; + if (!entry->offset) { + entry->offset = LOONGSON_LIO_BASE; + res->start |= LOONGSON_LIO_BASE; + res->end |= LOONGSON_LIO_BASE; + } + arch_pci_root_remap_iospace(&device->fwnode, + entry); + } + if (entry->res->flags & IORESOURCE_DISABLED) + resource_list_destroy_entry(entry); + else + entry->res->name = info->name; + } + arch_pci_root_validate_resources(&device->dev, list, + IORESOURCE_MEM); + arch_pci_root_validate_resources(&device->dev, list, + IORESOURCE_IO); + } + + return ret; +} + static int acpi_prepare_root_resources(struct acpi_pci_root_info *ci) { int status; struct resource_entry *entry, *tmp; struct acpi_device *device = ci->bridge; - status = acpi_pci_probe_root_resources(ci); + status = arch_pci_probe_root_resources(ci); if (status > 0) { resource_list_for_each_entry_safe(entry, tmp, &ci->resources) { if (entry->res->flags & IORESOURCE_MEM) { diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile index e489fefd23da..b4528af86517 100644 --- a/drivers/firmware/efi/Makefile +++ b/drivers/firmware/efi/Makefile @@ -37,6 +37,7 @@ obj-$(CONFIG_ARM) += $(arm-obj-y) obj-$(CONFIG_ARM64) += $(arm-obj-y) riscv-obj-$(CONFIG_EFI) := efi-init.o riscv-runtime.o obj-$(CONFIG_RISCV) += $(riscv-obj-y) +#obj-$(CONFIG_LOONGARCH) += efi-init.o obj-$(CONFIG_EFI_CAPSULE_LOADER) += capsule-loader.o obj-$(CONFIG_EFI_EARLYCON) += earlycon.o obj-$(CONFIG_UEFI_CPER_ARM) += cper-arm.o diff --git a/drivers/irqchip/irq-loongarch-cpu.c b/drivers/irqchip/irq-loongarch-cpu.c index b35903a06902..4380b4d8dd20 100644 --- a/drivers/irqchip/irq-loongarch-cpu.c +++ b/drivers/irqchip/irq-loongarch-cpu.c @@ -143,7 +143,12 @@ static int __init acpi_cascade_irqdomain_init(void) return 0; } -static int __init cpuintc_acpi_init(union acpi_subtable_headers *header, +struct irq_domain *get_cpudomain(void) +{ + return irq_domain; +} + +int __init cpuintc_acpi_init(union acpi_subtable_headers *header, const unsigned long end) { int ret; diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c index 08e95fad5b12..c11a6676a82d 100644 --- a/drivers/irqchip/irq-loongson-eiointc.c +++ b/drivers/irqchip/irq-loongson-eiointc.c @@ -329,7 +329,7 @@ static struct syscore_ops eiointc_syscore_ops = { .resume = eiointc_resume, }; -static int __init pch_pic_parse_madt(union acpi_subtable_headers *header, +int __init pch_pic_parse_madt(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_bio_pic *pchpic_entry = (struct acpi_madt_bio_pic *)header; @@ -342,7 +342,7 @@ static int __init pch_pic_parse_madt(union acpi_subtable_headers *header, return 0; } -static int __init pch_msi_parse_madt(union acpi_subtable_headers *header, +int __init pch_msi_parse_madt(union acpi_subtable_headers *header, const unsigned long end) { struct irq_domain *parent; diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c index 63db8e2172e0..372215f2b9ed 100644 --- a/drivers/irqchip/irq-loongson-pch-pic.c +++ b/drivers/irqchip/irq-loongson-pch-pic.c @@ -52,6 +52,11 @@ static struct pch_pic *pch_pic_priv[MAX_IO_PICS]; struct fwnode_handle *pch_pic_handle[MAX_IO_PICS]; +struct irq_domain *get_pchpic_irq_domain(void) +{ + return pch_pic_priv[0]->pic_domain; +} + static void pch_pic_bitset(struct pch_pic *priv, int offset, int bit) { u32 reg; -- Gitee From aea6e650775353c14b8db87148aec5a8a01bfbff Mon Sep 17 00:00:00 2001 From: liuyun Date: Sat, 2 Dec 2023 10:08:32 +0800 Subject: [PATCH 0128/2138] anolis: LoongArch: Fix virtual machine startup error ANBZ: #8435 Signed-off-by: liuyun Signed-off-by: maobibo Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- arch/loongarch/include/asm/irq.h | 1 + arch/loongarch/include/asm/loongarch.h | 1 + arch/loongarch/kernel/legacy_boot.c | 11 ++++--- drivers/irqchip/irq-loongson-eiointc.c | 40 +++++++++++++++++--------- 4 files changed, 36 insertions(+), 17 deletions(-) diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h index 218b4da0ea90..722eb1aa726f 100644 --- a/arch/loongarch/include/asm/irq.h +++ b/arch/loongarch/include/asm/irq.h @@ -53,6 +53,7 @@ struct acpi_vector_group { extern struct acpi_vector_group pch_group[MAX_IO_PICS]; extern struct acpi_vector_group msi_group[MAX_IO_PICS]; +#define MAX_CORES_PER_EIO_NODE 256 #define CORES_PER_EIO_NODE 4 #define LOONGSON_CPU_UART0_VEC 10 /* CPU UART0 */ diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h index 23232c7bdb9f..badb065f8383 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -171,6 +171,7 @@ /* IOCSR */ #define iocsr_read32(reg) __iocsrrd_w(reg) #define iocsr_read64(reg) __iocsrrd_d(reg) +#define iocsr_write8(val, reg) __iocsrwr_b(val, reg) #define iocsr_write32(val, reg) __iocsrwr_w(val, reg) #define iocsr_write64(val, reg) __iocsrwr_d(val, reg) diff --git a/arch/loongarch/kernel/legacy_boot.c b/arch/loongarch/kernel/legacy_boot.c index 4b9ee3320897..214e7e0b04af 100644 --- a/arch/loongarch/kernel/legacy_boot.c +++ b/arch/loongarch/kernel/legacy_boot.c @@ -211,7 +211,7 @@ int setup_legacy_IRQ(void) } pic_domain = get_pchpic_irq_domain(); - if (pic_domain) + if (pic_domain && !cpu_has_hypervisor) pch_lpc_acpi_init(pic_domain, acpi_pchlpc); return 0; @@ -373,9 +373,12 @@ __init unsigned long legacy_boot_init(unsigned long argc, unsigned long cmdptr, efi_bp = (struct boot_params *)bpi; bpi_version = get_bpi_version(&efi_bp->signature); pr_info("BPI%d with boot flags %llx.\n", bpi_version, efi_bp->flags); - if (bpi_version == BPI_VERSION_NONE) - panic("Fatal error, bpi ver BONE!\n"); - else if (bpi_version == BPI_VERSION_V2) + if (bpi_version == BPI_VERSION_NONE) { + if (cpu_has_hypervisor) + pr_err(FW_BUG "Fatal error, bpi ver NONE!\n"); + else + panic(FW_BUG "Fatal error, bpi ver NONE!\n"); + } else if (bpi_version == BPI_VERSION_V2) parse_bpi_flags(); fw_init_cmdline(argc, cmdptr); diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c index c11a6676a82d..503870c7c1cb 100644 --- a/drivers/irqchip/irq-loongson-eiointc.c +++ b/drivers/irqchip/irq-loongson-eiointc.c @@ -57,7 +57,9 @@ static void eiointc_enable(void) static int cpu_to_eio_node(int cpu) { - return cpu_logical_map(cpu) / CORES_PER_EIO_NODE; + int cores = (cpu_has_hypervisor ? MAX_CORES_PER_EIO_NODE : CORES_PER_EIO_NODE); + + return cpu_logical_map(cpu) / cores; } static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, nodemask_t *node_map) @@ -88,6 +90,11 @@ static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, static DEFINE_RAW_SPINLOCK(affinity_lock); +static void virt_extioi_set_irq_route(int irq, unsigned int cpu) +{ + iocsr_write8(cpu_logical_map(cpu), EIOINTC_REG_ROUTE + irq); +} + static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, bool force) { unsigned int cpu; @@ -110,16 +117,22 @@ static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *af vector = d->hwirq; regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2); - /* Mask target vector */ - csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)), - 0x0, priv->node * CORES_PER_EIO_NODE); - - /* Set route for target vector */ - eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map); - - /* Unmask target vector */ - csr_any_send(regaddr, EIOINTC_ALL_ENABLE, - 0x0, priv->node * CORES_PER_EIO_NODE); + if (cpu_has_hypervisor) { + iocsr_write32(EIOINTC_ALL_ENABLE & ~BIT(vector & 0x1F), regaddr); + virt_extioi_set_irq_route(vector, cpu); + iocsr_write32(EIOINTC_ALL_ENABLE, regaddr); + } else { + /* Mask target vector */ + csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)), + 0x0, priv->node * CORES_PER_EIO_NODE); + + /* Set route for target vector */ + eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map); + + /* Unmask target vector */ + csr_any_send(regaddr, EIOINTC_ALL_ENABLE, + 0x0, priv->node * CORES_PER_EIO_NODE); + } irq_data_update_effective_affinity(d, cpumask_of(cpu)); @@ -146,13 +159,14 @@ static int eiointc_router_init(unsigned int cpu) uint32_t data; uint32_t node = cpu_to_eio_node(cpu); int index = eiointc_index(node); + int cores = (cpu_has_hypervisor ? MAX_CORES_PER_EIO_NODE : CORES_PER_EIO_NODE); if (index < 0) { pr_err("Error: invalid nodemap!\n"); return -1; } - if ((cpu_logical_map(cpu) % CORES_PER_EIO_NODE) == 0) { + if ((cpu_logical_map(cpu) % cores) == 0) { eiointc_enable(); for (i = 0; i < eiointc_priv[0]->vec_count / 32; i++) { @@ -169,7 +183,7 @@ static int eiointc_router_init(unsigned int cpu) for (i = 0; i < eiointc_priv[0]->vec_count / 4; i++) { /* Route to Node-0 Core-0 */ if (index == 0) - bit = BIT(cpu_logical_map(0)); + bit = (cpu_has_hypervisor ? cpu_logical_map(0) : BIT(cpu_logical_map(0))); else bit = (eiointc_priv[index]->node << 4) | 1; -- Gitee From d34237b4a32605e4183952d7cf23f00ec5cf8217 Mon Sep 17 00:00:00 2001 From: yangqiming Date: Sat, 2 Dec 2023 10:08:33 +0800 Subject: [PATCH 0129/2138] anolis: LoongArch: Fixed EIOINTC structure members ANBZ: #8435 Resolve the problem that the multi-node cpus fail to boot. Signed-off-by: yangqiming Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- arch/loongarch/kernel/legacy_boot.c | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/arch/loongarch/kernel/legacy_boot.c b/arch/loongarch/kernel/legacy_boot.c index 214e7e0b04af..35a0a118486f 100644 --- a/arch/loongarch/kernel/legacy_boot.c +++ b/arch/loongarch/kernel/legacy_boot.c @@ -93,13 +93,15 @@ static int bad_pch_pic(unsigned long address) void register_default_pic(int id, u32 address, u32 irq_base) { - int idx, entries; + int j, idx, entries, cores; unsigned long addr; + u64 node_map = 0; if (bad_pch_pic(address)) return; idx = nr_io_pics; + cores = (cpu_has_hypervisor ? MAX_CORES_PER_EIO_NODE : CORES_PER_EIO_NODE); pchpic_default[idx].address = address; if (idx) @@ -119,14 +121,29 @@ void register_default_pic(int id, u32 address, u32 irq_base) pchmsi_default[idx].start = entries; pchmsi_default[idx].count = MSI_MSG_DEFAULT_COUNT; - eiointc_default[idx].cascade = 3; + for_each_possible_cpu(j) { + int node = cpu_logical_map(j) / cores; + + node_map |= (1 << node); + } + eiointc_default[idx].cascade = 3 + idx; eiointc_default[idx].node = id; - eiointc_default[idx].node_map = 1; + eiointc_default[idx].node_map = node_map; if (idx) { - eiointc_default[idx].cascade = 0x4; - eiointc_default[0].node_map = 0x1DF; - eiointc_default[idx].node_map = 0xFE20; + int i; + + for (i = 0; i < idx + 1; i++) { + node_map = 0; + + for_each_possible_cpu(j) { + int node = cpu_logical_map(j) / cores; + + if (((node & 7) < 4) ? !i : i) + node_map |= (1 << node); + } + eiointc_default[i].node_map = node_map; + } } acpi_pchpic[idx] = &pchpic_default[idx]; -- Gitee From 0913ea98f65361562448a37cac11e517bd3f2815 Mon Sep 17 00:00:00 2001 From: Hongchen Zhang Date: Sat, 2 Dec 2023 10:08:34 +0800 Subject: [PATCH 0130/2138] anolis: LoongArch: use arch specific phys_to_dma ANBZ: #8435 To be compatible with OLD firmware which has no _DMA method, we should use arch specific phys_to_dma. Signed-off-by: Hongchen Zhang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- arch/loongarch/Kconfig | 1 + arch/loongarch/kernel/dma.c | 27 +++++++++++++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 9fd8644a9a4c..edbe1915e122 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -144,6 +144,7 @@ config LOONGARCH select HAVE_SAMPLE_FTRACE_DIRECT_MULTI select HAVE_SETUP_PER_CPU_AREA if NUMA select HAVE_STACKPROTECTOR + select ARCH_HAS_PHYS_TO_DMA select HAVE_SYSCALL_TRACEPOINTS select HAVE_TIF_NOHZ select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP diff --git a/arch/loongarch/kernel/dma.c b/arch/loongarch/kernel/dma.c index 7a9c6a9dd2d0..cc0ccde58db8 100644 --- a/arch/loongarch/kernel/dma.c +++ b/arch/loongarch/kernel/dma.c @@ -4,6 +4,28 @@ */ #include #include +#include + +/* + * We extract 4bit node id (bit 44~47) from Loongson-3's + * 48bit physical address space and embed it into 40bit. + */ + +static int node_id_offset; + +dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) +{ + long nid = (paddr >> 44) & 0xf; + + return ((nid << 44) ^ paddr) | (nid << node_id_offset); +} + +phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) +{ + long nid = (daddr >> node_id_offset) & 0xf; + + return ((nid << node_id_offset) ^ daddr) | (nid << 44); +} void acpi_arch_dma_setup(struct device *dev) { @@ -11,6 +33,11 @@ void acpi_arch_dma_setup(struct device *dev) u64 mask, end = 0; const struct bus_dma_region *map = NULL; + if (node_id_offset == 0) { + node_id_offset = ((readl(LS7A_DMA_CFG) & LS7A_DMA_NODE_MASK) >> LS7A_DMA_NODE_SHF); + node_id_offset += 36; + } + ret = acpi_dma_get_range(dev, &map); if (!ret && map) { const struct bus_dma_region *r = map; -- Gitee From 465689284aecc2132a3f06027282b631ead3c6a1 Mon Sep 17 00:00:00 2001 From: liuyun Date: Mon, 11 Dec 2023 10:03:09 +0800 Subject: [PATCH 0131/2138] anolis: cpufreq:loongarch: Add cpufreq driver for LoongArch ANBZ: #8435 This patch dd cpufreq driver support for LoongArch. Signed-off-by: zhangtianyang Signed-off-by: liuyun Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- arch/loongarch/Kconfig | 1 + arch/loongarch/configs/loongson3_defconfig | 4 + arch/loongarch/include/asm/fpu.h | 13 +- drivers/cpufreq/Kconfig | 11 + drivers/cpufreq/Makefile | 1 + drivers/cpufreq/loongson3-acpi-cpufreq.c | 1527 ++++++++++++++++++++ 6 files changed, 1556 insertions(+), 1 deletion(-) create mode 100644 drivers/cpufreq/loongson3-acpi-cpufreq.c diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index edbe1915e122..1463213f3315 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -654,6 +654,7 @@ config ARCH_SUSPEND_POSSIBLE config ARCH_HIBERNATION_POSSIBLE def_bool y +source "drivers/cpufreq/Kconfig" source "kernel/power/Kconfig" source "drivers/acpi/Kconfig" diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index e5f70642ed20..a96c5bb1b130 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -61,6 +61,10 @@ CONFIG_ACPI_DOCK=y CONFIG_ACPI_IPMI=m CONFIG_ACPI_HOTPLUG_CPU=y CONFIG_ACPI_PCI_SLOT=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_LOONGSON3_ACPI_CPUFREQ=y CONFIG_ACPI_HOTPLUG_MEMORY=y CONFIG_EFI_ZBOOT=y CONFIG_EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER=y diff --git a/arch/loongarch/include/asm/fpu.h b/arch/loongarch/include/asm/fpu.h index c2d8962fda00..4d635b8e3245 100644 --- a/arch/loongarch/include/asm/fpu.h +++ b/arch/loongarch/include/asm/fpu.h @@ -48,6 +48,10 @@ static inline void disable_lasx(void); static inline void save_lasx(struct task_struct *t); static inline void restore_lasx(struct task_struct *t); +#ifdef CONFIG_LOONGSON3_ACPI_CPUFREQ +DECLARE_PER_CPU(unsigned long, msa_count); +DECLARE_PER_CPU(unsigned long, lasx_count); +#endif /* * Mask the FCSR Cause bits according to the Enable bits, observing * that Unimplemented is always enabled. @@ -210,6 +214,9 @@ static inline void enable_lsx(void) { if (cpu_has_lsx) csr_xchg32(CSR_EUEN_LSXEN, CSR_EUEN_LSXEN, LOONGARCH_CSR_EUEN); +#ifdef CONFIG_LOONGSON3_ACPI_CPUFREQ + per_cpu(msa_count, raw_smp_processor_id())++; +#endif } static inline void disable_lsx(void) @@ -256,8 +263,12 @@ static inline void restore_lsx_upper(struct task_struct *t) {} static inline void enable_lasx(void) { - if (cpu_has_lasx) + if (cpu_has_lasx) { csr_xchg32(CSR_EUEN_LASXEN, CSR_EUEN_LASXEN, LOONGARCH_CSR_EUEN); +#ifdef CONFIG_LOONGSON3_ACPI_CPUFREQ + per_cpu(lasx_count, raw_smp_processor_id())++; +#endif + } } static inline void disable_lasx(void) diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index f429b9b37b76..b14584bfdf3f 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -273,6 +273,17 @@ config LOONGSON2_CPUFREQ If in doubt, say N. endif +if LOONGARCH +config LOONGSON3_ACPI_CPUFREQ + bool "Loongson3 ACPI cpufreq driver" + depends on ACPI_PROCESSOR + help + This driver adds a CPUFreq driver which utilizes the ACPI + Processor Performance States. + This driver supports Loongson 3A5000 compatible CPUs. + If in doubt, say N. +endif + if SPARC64 config SPARC_US3_CPUFREQ tristate "UltraSPARC-III CPU Frequency driver" diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index ef8510774913..076ea3ac1b56 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -104,6 +104,7 @@ obj-$(CONFIG_POWERNV_CPUFREQ) += powernv-cpufreq.o obj-$(CONFIG_BMIPS_CPUFREQ) += bmips-cpufreq.o obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o +obj-$(CONFIG_LOONGSON3_ACPI_CPUFREQ) += loongson3-acpi-cpufreq.o obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o diff --git a/drivers/cpufreq/loongson3-acpi-cpufreq.c b/drivers/cpufreq/loongson3-acpi-cpufreq.c new file mode 100644 index 000000000000..018b529a0cf9 --- /dev/null +++ b/drivers/cpufreq/loongson3-acpi-cpufreq.c @@ -0,0 +1,1527 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * loongson3-acpi-cpufreq.c - Loongson ACPI Processor P-States Driver + * + * Copyright (C) 2020 lvjianmin + * Yijun + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "cpufreq_governor.h" + +#include +#define CPU_ID_FIELD 0xf + +#define COMPLETE_STATUS 0x80000000 +#define VOLTAGE_COMMAND 0x21 + +#define DVFS_INFO 0x22 +#define DVFS_INFO_BOOST_LEVEL 0x23 +#define DVFS_INFO_MIN_FREQ 0xf +#define DVFS_INFO_MAX_FREQ 0xf0 +#define DVFS_INFO_BOOST_CORE_FREQ 0xff00 +#define DVFS_INFO_NORMAL_CORE_UPPER_LIMIT 0xf0000 +#define DVFS_INFO_BOOST_CORES 0xf00000 + +#define BOOST_MODE 0x80000 +#define NORMAL_MODE 0x40000 + +MODULE_DESCRIPTION("Loongson 3A5000 ACPI Processor P-States Driver"); + +MODULE_LICENSE("GPL"); + +#define CPUFREQ_SAMPLING_INTERVAL (2 * TICK_NSEC / NSEC_PER_USEC) +#define LOONGSON_CONTROL_MASK (0xFF) +#define FACTOR (0xeac0c6e8) +#define BOOST_THRESHOLD (900) +#define MAX_CORES_PER_PACKAGE 64 +#define CPU_ID_FIELD 0xf +#define VOLTAGE_COMMAND 0x21 +#define MAX_READY_TIMEOUT 300000000 +#define RESERVED_FREQ 3 + +#define LOONGSON_BOOST_FREQ_MASK (0x7 << 8) +#define FREQ_STEP (25) + +static struct mutex boost_mutex[MAX_PACKAGES]; +static bool cpufreq_has_boost_freq; +static int max_boost_cores; +static int boost_gears; +static int boost_freqs[NR_CPUS + 1]; +struct package_data; +struct core_data; +static struct acpi_processor_performance __percpu *acpi_perf_data; +static struct cpufreq_driver loongson3_cpufreq_driver; +static struct freq_attr *loongson3_cpufreq_attr[]; +DECLARE_PER_CPU(struct clock_event_device, stable_clockevent_device); +static inline struct core_data *get_core_data(int cpu); + +static int min_freq_level; +static int max_freq_level; +static int max_upper_index; +static int max_boost_freq; + +/* threshold of core's get into msa */ +static int msa_count_threshold = 200; +/* threshold of core's get into lasx */ +static int lasx_count_threshold = 200; +/* other cores' upper load threshold when 1 core get into boost mode and enable msa/lasx */ +static int load_threshold = 60; + +DEFINE_PER_CPU(unsigned long, msa_count); +EXPORT_PER_CPU_SYMBOL(msa_count); + +#if defined(CONFIG_CPU_HAS_LASX) +DEFINE_PER_CPU(unsigned long, lasx_count); +EXPORT_PER_CPU_SYMBOL(lasx_count); +#endif + +struct ce_update_data { + struct clock_event_device *cd; + unsigned int new_freq; +}; + +static struct kthread_worker cpufreq_worker; +static struct task_struct *cpufreq_thread; +/** + * struct core_data - Store core related information + * @in_boost: the core is boosting to boost_freq + * @cpu: logical cpu of the core + * @update_util The update_util_data pointer of @cpu, is passed to the callback + * function, which will be called by cpufreq_update_util() + * @package The package_data structure the core belonged to + * @work_in_progress @work is busy + * @irq_work to enqueue callback handling on irq workqueue + * @work to enqueue work from irq workqueue on system workqueue + * @perf store frequency table related information from ACPI table + * @max_freq max normal freq of cpu + * @boost_freq max boost freq of cpu + * @clock_scale clock scale to calculate cpu_data[cpu].udelay_val in boost mode + * @package_id package id of core + * @shift clock shift to calculate cpu_data[cpu].udelay_val in boost mode + * @update_util_set if callback has been set for cpufreq_update_util() + * @load current load of the core + * @last_freq_update_time last freq update time + * @freq_update_delay_ns min interval of freq update, which is + * transition_latency configured in ACPI table + * + * following elements are used to calculate load of the core + * @prev_update_time + * @prev_cpu_idle + * @prev_load + * @sampling_rate + * + */ +struct core_data { + bool in_boost; + int cpu; + struct update_util_data update_util; + struct package_data *package; + bool work_in_progress; + struct irq_work irq_work; + struct kthread_work work; + struct acpi_processor_performance *perf; + unsigned int normal_max_freq; + unsigned int *boost_freq; + unsigned int *clock_scale; + unsigned int package_id; + unsigned int *shift; + bool update_util_set; + unsigned long long load; + + u64 last_freq_update_time; + s64 freq_update_delay_ns; + u64 prev_update_time; + u64 prev_cpu_idle; + u32 prev_load; + u32 sampling_rate; +}; + +struct package_data { + int boost_cores; + int max_boost_cores; + int nr_cores; + char in_boost; + int nr_full_load_cores; + struct core_data core[MAX_CORES_PER_PACKAGE]; +} all_package_data[MAX_PACKAGES]; + +static bool boost_supported(void) +{ + return loongson3_cpufreq_driver.set_boost; +} + +/* + * Check if target_freq is a boost freq + * + * target_freq must be a freq in freq table when + * calling the function. + */ +static int boost_level(struct acpi_processor_performance *perf, unsigned int target_freq) +{ + int i; + + for (i = 0; i < perf->state_count; i++) { + if (target_freq == (perf->states[i].core_frequency * 1000)) + return (perf->states[i].control & LOONGSON_BOOST_FREQ_MASK) >> 8; + } + return 0; +} + +#ifdef CONFIG_SMP +static int loongson3_cpu_freq_notifier(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct cpufreq_freqs *freqs; + struct clock_event_device __maybe_unused *cd; + struct core_data *core; + unsigned int __maybe_unused new_freq; + unsigned long cpu; + struct ce_update_data __maybe_unused ce_data; + int cur_boost_level; + + if (val == CPUFREQ_POSTCHANGE) { + freqs = (struct cpufreq_freqs *)data; + cpu = freqs->policy->cpu; + core = get_core_data(cpu); + cur_boost_level = boost_level(core->perf, freqs->new); + if (cur_boost_level != 0) { + lpj_fine = (unsigned int) (((int64_t)core->clock_scale[cur_boost_level] * + cpufreq_scale(loops_per_jiffy, boost_freqs[cur_boost_level] * 1000, + freqs->new)) / core->shift[cur_boost_level]); + } else { + lpj_fine = + cpufreq_scale(loops_per_jiffy, core->normal_max_freq * 1000, freqs->new); + } + } + + return 0; +} +#else +static int loongson3_cpu_freq_notifier(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct cpufreq_freqs *freqs; + struct clock_event_device __maybe_unused *cd; + struct core_data *core; + unsigned int __maybe_unused new_freq; + unsigned long cpu; + int cur_boost_level; + + if (val == CPUFREQ_POSTCHANGE) { + + freqs = (struct cpufreq_freqs *)data; + cpu = freqs->cpu; + core = get_core_data(cpu); + cur_boost_level = boost_level(core->perf, target_freq); + + if (cur_boost_level != 0) { + lpj_fine = (unsigned int) (((int64_t)core->clock_scale[cur_boost_level] * + loops_per_jiffy) / core->shift[cur_boost_level]); + } else { + lpj_fine = loops_per_jiffy; + } + } + + return 0; +} +#endif +static struct notifier_block loongson3_cpufreq_notifier_block = { + .notifier_call = loongson3_cpu_freq_notifier +}; + +static int cpufreq_perf_find_level(struct acpi_processor_performance *perf, + unsigned int target_freq, + unsigned int boost_level) +{ + int i; + + for (i = 0; i < perf->state_count; i++) { + if (boost_level) { + if (perf->states[i].control & LOONGSON_BOOST_FREQ_MASK) { + if (target_freq == (perf->states[i].core_frequency * 1000)) + return perf->states[i].control & LOONGSON_CONTROL_MASK; + } + } else { + if (!(perf->states[i].control & LOONGSON_BOOST_FREQ_MASK)) + if (target_freq == (perf->states[i].core_frequency * 1000)) + return perf->states[i].control; + } + } + return 0; +} + +static int cpufreq_perf_find_freq(struct acpi_processor_performance *perf, + unsigned int target_index, + unsigned int boost_level) +{ + int i; + + for (i = 0; i < perf->state_count; i++) { + if (boost_level) { + if (perf->states[i].control & LOONGSON_BOOST_FREQ_MASK) + if (target_index == (perf->states[i].control & LOONGSON_CONTROL_MASK)) + return perf->states[i].core_frequency; + } else { + if (!(perf->states[i].control & LOONGSON_BOOST_FREQ_MASK)) + if (target_index == perf->states[i].control) + return perf->states[i].core_frequency; + } + } + + return 0; +} + + +static inline struct core_data *get_core_data(int cpu) +{ + int package_id = cpu_data[cpu].package; + struct package_data *package = &all_package_data[package_id]; + int core_id = cpu_logical_map(cpu) % package->nr_cores; + + return &package->core[core_id]; +} + +static bool package_boost(struct package_data *package) +{ + int i; + int cur_full_load = 0; + +#if defined(CONFIG_CPU_HAS_LASX) + int lasx_enable_count = 0; + unsigned long lasx_num; + bool clear_lasx = false; +#endif + + int msa_enable_count = 0; + unsigned long msa_num; + bool clear_msa = false; + + for (i = 0; i < package->nr_cores; i++) { + +#if defined(CONFIG_CPU_HAS_LASX) + lasx_num = per_cpu(lasx_count, package->core[i].cpu); + + if (lasx_num) + lasx_enable_count++; + + if (lasx_num >= lasx_count_threshold) + clear_lasx = true; + + pr_debug("%s: lasx enabled, i %d, cpu %d, lasx_num %lu\n", + __func__, i, package->core[i].cpu, lasx_num); +#endif + msa_num = per_cpu(msa_count, package->core[i].cpu); + + if (msa_num) + msa_enable_count++; + + if (msa_num >= msa_count_threshold) + clear_msa = true; + + pr_debug("%s: msa enabled, i %d, cpu %d, msa_num %lu\n", + __func__, i, package->core[i].cpu, msa_num); + + if (package->core[i].prev_load >= load_threshold) + cur_full_load++; + } + +#if defined(CONFIG_CPU_HAS_LASX) + if (clear_lasx) { + for (i = 0; i < package->nr_cores; i++) + per_cpu(lasx_count, package->core[i].cpu) = 0; + } +#endif + + if (clear_msa) { + for (i = 0; i < package->nr_cores; i++) + per_cpu(msa_count, package->core[i].cpu) = 0; + } + +#if defined(CONFIG_CPU_HAS_LASX) + if (lasx_enable_count > 1 + || (lasx_enable_count && package->nr_full_load_cores > 1) + || (lasx_enable_count && cur_full_load > 1)) { + return false; + } +#endif + + if (msa_enable_count > 1 + || (msa_enable_count && package->nr_full_load_cores > 1) + || (msa_enable_count && cur_full_load > 1)) { + return false; + } + + if (package->nr_full_load_cores && + package->nr_full_load_cores <= package->max_boost_cores) + return true; + + return false; +} + +/* + * check if the cpu can be boosted. + * + * call the function after load of cpu updated. + */ +static bool cpu_can_boost(int cpu) +{ + struct core_data *core = get_core_data(cpu); + struct package_data *package = core->package; + + if (package->boost_cores >= package->max_boost_cores) + return false; + if (core->load > BOOST_THRESHOLD) + return true; + + return false; +} + +static void do_set_freq_level(int cpu, int freq_level) +{ + uint32_t message; + uint32_t val; + + message = (0 << 31) | (VOLTAGE_COMMAND << 24) + | ((uint32_t)freq_level << 4) + | (cpu & CPU_ID_FIELD); + iocsr_write32(message, 0x51c); + val = iocsr_read32(0x420); + + val |= 1 << 10; + iocsr_write32(val, 0x420); +} + +static int wait_for_ready_timeout(int64_t timeout) +{ + int ret; + struct timespec64 prev_ts; + struct timespec64 curr_ts; + ktime_t delay = ktime_set(0, 100); + + ktime_get_ts64(&prev_ts); + ktime_get_ts64(&curr_ts); + + ret = -EPERM; + while (((curr_ts.tv_sec - prev_ts.tv_sec) * 1000000000 + (curr_ts.tv_nsec - prev_ts.tv_nsec)) < timeout) { + ktime_get_ts64(&curr_ts); + + if (iocsr_read32(0x51c) & COMPLETE_STATUS) { + ret = 0; + break; + } + + __set_current_state(TASK_UNINTERRUPTIBLE); + schedule_hrtimeout(&delay, HRTIMER_MODE_REL); + } + return ret; +} + +/* Find closest freq to target in a table in ascending order */ +static int cpufreq_table_find_freq_ac(struct cpufreq_policy *policy, + unsigned int target_freq, + int boost_level) +{ + struct cpufreq_frequency_table *table = policy->freq_table; + struct cpufreq_frequency_table *pos; + unsigned int freq; + unsigned int best_freq = 0; + int idx, best = -1; + + cpufreq_for_each_valid_entry_idx(pos, table, idx) { + freq = pos->frequency; + + if (pos->driver_data != boost_level) + continue; + if (freq > policy->max || freq < policy->min) + continue; + if (freq == target_freq) + return freq; + + if (freq < target_freq) { + best = idx; + best_freq = freq; + continue; + } + + /* No freq found below target_freq, return freq above target_freq */ + if (best == -1) + return freq; + + /* Choose the closest freq */ + if (target_freq - table[best].frequency > freq - target_freq) + return freq; + + return best_freq; + } + + return best_freq; +} + +/* Find closest freq to target in a table in descending order */ +static int cpufreq_table_find_freq_dc(struct cpufreq_policy *policy, + unsigned int target_freq, + int boost_level) +{ + struct cpufreq_frequency_table *table = policy->freq_table; + struct cpufreq_frequency_table *pos; + unsigned int freq; + unsigned int best_freq = 0; + int idx, best = -1; + + cpufreq_for_each_valid_entry_idx(pos, table, idx) { + freq = pos->frequency; + + if (pos->driver_data != boost_level) + continue; + if (freq > policy->max || freq < policy->min) + continue; + + if (freq == target_freq) + return freq; + + if (freq > target_freq) { + best = idx; + best_freq = freq; + continue; + } + + /* No freq found above target_freq, return freq below target_freq */ + if (best == -1) + return freq; + + /* Choose the closest freq */ + if (table[best].frequency - target_freq > target_freq - freq) + return freq; + return best_freq; + } + + return best_freq; +} + +/* Works only on sorted freq-tables */ +static int cpufreq_table_find_freq(struct cpufreq_policy *policy, + unsigned int target_freq, + int boost_level) +{ + target_freq = clamp_val(target_freq, policy->min, policy->max); + if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) + return cpufreq_table_find_freq_ac(policy, target_freq, boost_level); + else + return cpufreq_table_find_freq_dc(policy, target_freq, boost_level); +} + +static void transition_end(struct cpufreq_policy *policy, + struct cpufreq_freqs *freqs, bool failed) +{ + if (unlikely(!policy->transition_ongoing)) + return; + cpufreq_freq_transition_end(policy, freqs, failed); +} +static void transition_begin(struct cpufreq_policy *policy, + struct cpufreq_freqs *freqs) +{ + if (unlikely(policy->transition_ongoing)) + cpufreq_freq_transition_end(policy, freqs, true); + + cpufreq_freq_transition_begin(policy, freqs); +} + +static void update_core_boost_info(struct core_data *core, bool boost_set) +{ + core->in_boost = boost_set; + if (boost_set) + core->package->boost_cores++; + else + core->package->boost_cores--; +} + +static unsigned int cores_freq_trans_notify(struct package_data *package, + bool before_trans, + bool trans_failed, + int find_level, + int find_freq, + unsigned int skip_cpumask) +{ + int i; + struct cpufreq_policy *policy; + struct cpufreq_freqs freqs; + unsigned int cores_level = 0; + unsigned int core_level; + + for (i = 0; i < package->nr_cores; i++) { + struct core_data *core = &package->core[i]; + + policy = cpufreq_cpu_get_raw(core->cpu); + if (((1 << i) & skip_cpumask) || !policy) + continue; + freqs.old = policy->cur; + freqs.flags = 0; + + /* find level from normal levels */ + core_level = cpufreq_perf_find_level(core->perf, policy->cur, find_level); + if (!core_level) { + pr_debug("cpu%d policy->cur=%d find_level=%d freq=%d skip_cpumask=%x \n", + policy->cpu, policy->cur, + find_level, find_freq, skip_cpumask); + } + freqs.new = cpufreq_perf_find_freq(core->perf, core_level, find_freq) * 1000; + if (!freqs.new) + pr_debug("%s: find freq error\n", __func__); + + pr_debug("%s: cpu %d, old freq %d, new freq %d, find_level %d, find_freq %d\n", + __func__, policy->cpu, freqs.old, freqs.new, find_level, find_freq); + cores_level |= (core_level << (i << 2)); + + if (before_trans) + transition_begin(policy, &freqs); + else + transition_end(policy, &freqs, trans_failed); + } + return cores_level; +} +static int loongson3_set_freq(struct core_data *core, unsigned long freq, int boost_level) +{ + int ret = 0; + int freq_level; + int phy_cpu; + int target_freq; + struct cpufreq_freqs freqs; + struct cpufreq_policy *policy = cpufreq_cpu_get_raw(core->cpu); + + if (!policy) + return -EINVAL; + + ret = wait_for_ready_timeout(MAX_READY_TIMEOUT); + if (ret) + return ret; + + phy_cpu = cpu_logical_map(core->cpu); + target_freq = cpufreq_table_find_freq(policy, freq, boost_level); + if (!target_freq) + return -1; + if (target_freq == policy->cur) + return -1; + + freqs.flags = 0; + freqs.old = policy->cur; + freqs.new = target_freq; + freq_level = cpufreq_perf_find_level(core->perf, target_freq, boost_level); + if (!freq_level) { + pr_debug("%s: cpu%d freq=%lu targetfreq=%d boost_level=%d find level error\n", + __func__, core->cpu, freq, target_freq, boost_level); + } + + transition_begin(policy, &freqs); + do_set_freq_level(phy_cpu, freq_level); + ret = wait_for_ready_timeout(MAX_READY_TIMEOUT); + transition_end(policy, &freqs, !!ret); + + return ret; +} + +int loongson3_set_mode(int mode, int freq_level) +{ + uint32_t val; + int ret = 0; + uint32_t message; + + ret = wait_for_ready_timeout(MAX_READY_TIMEOUT); + if (ret) + return ret; + + message = mode | (VOLTAGE_COMMAND << 24) | freq_level; + iocsr_write32(message, 0x51c); + val = iocsr_read32(0x420); + val |= 1 << 10; + iocsr_write32(val, 0x420); + return wait_for_ready_timeout(MAX_READY_TIMEOUT); +} + +enum freq_adjust_action { + FAA_NORMAL, + FAA_N2B, + FAA_B2N, + FAA_BOOST, +}; + +static int faa_normal(struct cpufreq_policy *policy, int load) +{ + int ret; + unsigned int freq_next, min_f, max_f; + struct core_data *core = get_core_data(policy->cpu); + + if (!core) + return -1; + + min_f = policy->min; + max_f = policy->max; + freq_next = min_f + load * (max_f - min_f) / 100; + ret = loongson3_set_freq(core, freq_next, 0); + return ret; +} + +static void handle_boost_cores(struct core_data *core, struct package_data *package, + unsigned long target_freq, bool skip_update_and_notify, bool update_core, bool inc_boost) +{ + int boost_level; + int find_level; + int find_freq; + int ret; + int inc_core = inc_boost ? 1 : -1; + + if (boost_gears == 1) { + find_level = 0; + boost_level = boost_gears; + } else { + find_level = package->boost_cores; + if (update_core) + boost_level = package->boost_cores + inc_core; + else + boost_level = package->boost_cores; + } + find_freq = boost_level; + ret = loongson3_set_freq(core, target_freq, boost_level); + if (ret) + return; + + if (skip_update_and_notify) { + if (update_core) + update_core_boost_info(core, inc_boost); + return; + } + + if (boost_gears != 1) { + cores_freq_trans_notify(package, true, false, + find_level, find_freq, 1 << core->cpu); + cores_freq_trans_notify(package, false, false, + find_level, find_freq, 1 << core->cpu); + } + if (update_core) + update_core_boost_info(core, inc_boost); +} + +static void faa_boost(struct cpufreq_policy *policy, int load) +{ + unsigned int min_f, max_f; + struct core_data *core = get_core_data(policy->cpu); + struct package_data *package = core->package; + unsigned long target_freq; + + /* boost cores form n to n + 1 */ + if (core->load > BOOST_THRESHOLD) { + if (package->boost_cores < package->max_boost_cores + && !core->in_boost) { + if (boost_gears == 1) { + target_freq = policy->max; + } else { + target_freq = cpufreq_table_find_freq(policy, policy->max, + package->boost_cores + 1); + if (!target_freq) { + pr_debug("%s: find freq error ,boost_level %d, cur freq %d\n", + __func__, package->boost_cores, policy->max); + } + } + handle_boost_cores(core, package, target_freq, false, true, true); + } + } else { + /* 1. core not in boost, level up but not change pll + * 2. core in boost, boost cores from n to n - 1 + */ + min_f = policy->min; + max_f = policy->max; + target_freq = min_f + load * (max_f - min_f) / 100; + handle_boost_cores(core, package, target_freq, !core->in_boost, core->in_boost, false); + } + + +} + +static void get_boost_cores(struct package_data *package, int *boost_cores, int *boost_count) +{ + struct core_data *core; + struct cpufreq_policy *policy; + int i; + + /* count boost cores */ + for (i = 0; i < package->nr_cores; i++) { + core = &package->core[i]; + policy = cpufreq_cpu_get_raw(core->cpu); + if (!policy) + continue; + + if (cpu_can_boost(core->cpu)) { + if (boost_cores) + *boost_cores |= (1 << i); + + (*boost_count)++; + } + } +} + +static void faa_n2b(struct package_data *package, struct core_data *core) +{ + int boost_cores = 0; + int boost_count = 0; + int freq_level; + int i; + + get_boost_cores(package, &boost_cores, &boost_count); + + if (boost_gears == 1) + boost_count = 1; + + freq_level = cores_freq_trans_notify(package, true, false, + 0, boost_count, 0); + if (!loongson3_set_mode(BOOST_MODE, freq_level)) { + cores_freq_trans_notify(package, false, false, + 0, boost_count, 0); + package->in_boost = true; + for (i = 0; i < package->nr_cores; i++) { + if (boost_cores & (1 << i)) + update_core_boost_info(&package->core[i], true); + } + } else + cores_freq_trans_notify(package, false, true, + 0, boost_count, 0); +} + +static void faa_b2n(struct package_data *package) +{ + int i; + int boost_count = package->boost_cores; + + if (boost_gears == 1) + boost_count = 1; + + cores_freq_trans_notify(package, true, false, + boost_count, 0, 0); + if (!loongson3_set_mode(NORMAL_MODE, 0)) { + cores_freq_trans_notify(package, false, false, + boost_count, 0, 0); + for (i = 0; i < package->nr_cores; i++) { + if (package->core[i].in_boost) + update_core_boost_info(&package->core[i], false); + } + package->in_boost = false; + } else + cores_freq_trans_notify(package, false, true, + boost_count, 0, 0); +} + + +unsigned int load_update(struct core_data *core) +{ + int i; + u64 update_time, cur_idle_time; + unsigned int idle_time, time_elapsed; + unsigned int load = 0; + struct package_data *package = core->package; + + cur_idle_time = get_cpu_idle_time(core->cpu, &update_time, true); + + time_elapsed = update_time - core->prev_update_time; + core->prev_update_time = update_time; + + idle_time = cur_idle_time - core->prev_cpu_idle; + core->prev_cpu_idle = cur_idle_time; + + if (unlikely(!time_elapsed)) { + /* + * That can only happen when this function is called + * twice in a row with a very short interval between the + * calls, so the previous load value can be used then. + */ + load = core->prev_load; + } else if (unlikely((int)idle_time > 2 * core->sampling_rate && + core->prev_load)) { + + load = core->prev_load; + core->prev_load = 0; + } else { + if (time_elapsed >= idle_time) + load = 100 * (time_elapsed - idle_time) / time_elapsed; + else + load = (int)idle_time < 0 ? 100 : 0; + core->prev_load = load; + } + + package->nr_full_load_cores = 0; + for (i = 0; i < package->nr_cores; i++) { + if (package->core[i].load > BOOST_THRESHOLD) + package->nr_full_load_cores++; + } + + return load; +} + +static bool cpufreq_should_update_freq(struct core_data *core, u64 time) +{ + s64 delta_ns; + + delta_ns = time - core->last_freq_update_time; + return delta_ns >= core->freq_update_delay_ns; +} + +static void cpufreq_update(struct cpufreq_policy *policy) +{ + int action; + struct core_data *core; + struct package_data *package; + unsigned long int load; + bool should_be_boost = 0; + + core = get_core_data(policy->cpu); + package = core->package; + + mutex_lock(&boost_mutex[core->package_id]); + + if (!core->update_util_set) { + mutex_unlock(&boost_mutex[core->package_id]); + return; + } + + load = load_update(core); + core->load = (u64)load + ((core->load * FACTOR) >> 32); + + if (cpufreq_boost_enabled()) { + should_be_boost = package_boost(package); + } else { + if (package->in_boost) + should_be_boost = false; + } + + action = (package->in_boost << 1) | should_be_boost; + switch (action) { + case FAA_NORMAL: + faa_normal(policy, load); + break; + case FAA_B2N: + faa_b2n(package); + break; + case FAA_N2B: + faa_n2b(package, core); + break; + case FAA_BOOST: + faa_boost(policy, load); + break; + } + mutex_unlock(&boost_mutex[core->package_id]); +} + +static void set_max_within_limits(struct cpufreq_policy *policy) +{ + struct core_data *core = get_core_data(policy->cpu); + /* + * policy->max <= cpu->pstate.max_freq indecates that + * the boost is disabled, so max freq is in normal range + * + * Skip performance policy with boost enabled!!! + * + */ + if (policy->max <= (core->normal_max_freq * 1000)) { + mutex_lock(&boost_mutex[core->package_id]); + if (!loongson3_set_freq(core, policy->max, 0)) + pr_debug("Set cpu %d to performance mode under normal range.\n", + policy->cpu); + mutex_unlock(&boost_mutex[core->package_id]); + } +} + +static void clear_update_util_hook(unsigned int cpu) +{ + struct core_data *core = get_core_data(cpu); + + if (!core->update_util_set) + return; + + cpufreq_remove_update_util_hook(cpu); + core->update_util_set = false; + synchronize_rcu(); +} + +static void update_util_handler(struct update_util_data *data, u64 time, + unsigned int flags) +{ + struct core_data *core = container_of(data, struct core_data, update_util); + + if (!cpufreq_should_update_freq(core, time)) + return; + if (!core->work_in_progress) { + core->last_freq_update_time = time; + core->work_in_progress = true; + irq_work_queue(&core->irq_work); + } +} +static void set_update_util_hook(unsigned int cpu) +{ + struct core_data *core = get_core_data(cpu); + + if (core->update_util_set) + return; + + cpufreq_add_update_util_hook(cpu, &core->update_util, + update_util_handler); + core->update_util_set = true; +} +static int loongson3_cpufreq_set_policy(struct cpufreq_policy *policy) +{ + if (!policy->cpuinfo.max_freq) + return -ENODEV; + + if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { + clear_update_util_hook(policy->cpu); + set_max_within_limits(policy); + } else { + set_update_util_hook(policy->cpu); + } + + return 0; +} + +static int loongson3_cpufreq_verify_policy(struct cpufreq_policy_data *policy) +{ + cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); + + return 0; +} + +static void set_boost_freq(bool has) +{ + cpufreq_has_boost_freq = has; +} + +static bool has_boost_freq(void) +{ + return cpufreq_has_boost_freq; +} + +static int compute_scale(int *shift, int dividor, int dividee) +{ + int i; + int result = 0; + int remainder = 0; + int scale_resolution = 8; + + result = dividor / dividee; + remainder = (dividor % dividee) * 10; + + for (i = 0; i < scale_resolution; i++) { + result = result * 10 + remainder / dividee; + remainder = (remainder % dividee) * 10; + *shift *= 10; + } + + return result; +} + +static void cpufreq_work_handler(struct kthread_work *work) +{ + struct core_data *core; + struct cpufreq_policy *policy; + + core = container_of(work, struct core_data, work); + policy = cpufreq_cpu_get_raw(core->cpu); + + if (policy) { + cpufreq_update(policy); + core->work_in_progress = false; + } +} + +static void cpufreq_irq_work(struct irq_work *irq_work) +{ + struct core_data *core = container_of(irq_work, struct core_data, irq_work); + + kthread_queue_work(&cpufreq_worker, &core->work); +} + +static void cpufreq_kthread_stop(void) +{ + kthread_flush_worker(&cpufreq_worker); + kthread_stop(cpufreq_thread); +} +static int cpufreq_kthread_create(void) +{ + struct sched_attr attr = { + .size = sizeof(struct sched_attr), + .sched_policy = SCHED_DEADLINE, + .sched_flags = 0x10000000, + .sched_nice = 0, + .sched_priority = 0, + .sched_runtime = 1000000, + .sched_deadline = 10000000, + .sched_period = 10000000, + }; + int ret; + + kthread_init_worker(&cpufreq_worker); + cpufreq_thread = kthread_create(kthread_worker_fn, &cpufreq_worker, "lsfrq:%d", 0); + if (IS_ERR(cpufreq_thread)) + return PTR_ERR(cpufreq_thread); + + ret = sched_setattr_nocheck(cpufreq_thread, &attr); + if (ret) { + kthread_stop(cpufreq_thread); + pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__); + return ret; + } + + wake_up_process(cpufreq_thread); + + return 0; +} + +static int init_acpi(struct acpi_processor_performance *perf) +{ + int result = 0; + int i; + + perf->shared_type = 0; + perf->state_count = (max_freq_level - min_freq_level + 1) * (boost_gears + 1); + + perf->states = + kmalloc_array(perf->state_count, + sizeof(struct acpi_processor_px), + GFP_KERNEL); + + if (!perf->states) { + result = -ENOMEM; + return result; + } + + for (i = 0; i < perf->state_count; i++) { + perf->states[i].power = 0x3A98; + perf->states[i].transition_latency = 10000; + perf->states[i].bus_master_latency = 10000; + perf->states[i].status = (RESERVED_FREQ + i / (boost_gears + 1)); + perf->states[i].control = (RESERVED_FREQ + i / (boost_gears + 1)); + + switch (i % (boost_gears + 1)) { + case 0: + perf->states[i].core_frequency = (cpu_clock_freq / 1000000) * (8 - i / (boost_gears + 1)) / 8; + break; + case 1: + case 2: + case 3: + case 4: + perf->states[i].core_frequency = + boost_freqs[i % (boost_gears + 1)] * (8 - i / (boost_gears + 1)) / 8; + perf->states[i].control |= ((i % (boost_gears + 1)) << 8); + break; + default: + pr_info("%s: i %d freq table error\n", __func__, i); + } + } + + return result; +} + +static int loongson3_cpufreq_cpu_init(struct cpufreq_policy *policy) +{ + unsigned int i; + struct acpi_processor_performance *perf; + struct cpufreq_frequency_table *freq_table; + struct core_data *core; + int package_id; + unsigned int cpu = policy->cpu; + unsigned int result = 0; + + perf = per_cpu_ptr(acpi_perf_data, cpu); + package_id = cpu_data[cpu].package; + core = get_core_data(cpu); + all_package_data[package_id].nr_cores = loongson_sysconf.cores_per_package; + all_package_data[package_id].max_boost_cores = max_boost_cores; + core->normal_max_freq = 0; + all_package_data[package_id].nr_full_load_cores = 0; + core->cpu = cpu; + core->work_in_progress = false; + core->last_freq_update_time = 0; + core->perf = perf; + core->package_id = package_id; + core->package = &all_package_data[package_id]; + + core->boost_freq = kmalloc_array(boost_gears + 1, sizeof(typeof(core->boost_freq)), GFP_KERNEL); + core->clock_scale = kmalloc_array(boost_gears + 1, sizeof(typeof(core->clock_scale)), GFP_KERNEL); + core->shift = kmalloc_array(boost_gears + 1, sizeof(typeof(core->shift)), GFP_KERNEL); + + for (i = 0; i < boost_gears + 1; i++) { + core->boost_freq[i] = boost_freqs[i]; + core->shift[i] = 1; + } + + if (!acpi_disabled) + result = acpi_processor_register_performance(perf, cpu); + else { + result = init_acpi(perf); + policy->shared_type = perf->shared_type; + } + + if (result) { + pr_info("CPU%d acpi_processor_register_performance failed.\n", cpu); + return result; + } + + for (i = 0; i < MAX_PACKAGES; i++) + mutex_init(&boost_mutex[i]); + + /* capability check */ + if (perf->state_count <= 1) { + pr_debug("No P-States\n"); + result = -ENODEV; + goto err_unreg; + } + + freq_table = kcalloc(perf->state_count + 1, sizeof(*freq_table), + GFP_KERNEL); + if (!freq_table) { + result = -ENOMEM; + goto err_unreg; + } + + /* detect transition latency */ + policy->cpuinfo.transition_latency = 0; + for (i = 0; i < perf->state_count; i++) { + if ((perf->states[i].transition_latency * 1000) > + policy->cpuinfo.transition_latency) + policy->cpuinfo.transition_latency = + perf->states[i].transition_latency * 1000; + if (perf->states[i].control & LOONGSON_BOOST_FREQ_MASK) { + set_boost_freq(true); + } else { + if (perf->states[i].core_frequency > core->normal_max_freq) + core->normal_max_freq = perf->states[i].core_frequency; + } + } + + core->freq_update_delay_ns = policy->cpuinfo.transition_latency; + + for (i = 0; i < boost_gears + 1; i++) { + core->clock_scale[i] = compute_scale(&core->shift[i], boost_freqs[i], core->normal_max_freq); + pr_debug("%s: boost_freqs[%d] %d, normal_max_freq %d, scale %d, shift %d\n", + __func__, i, boost_freqs[i], core->normal_max_freq, + core->clock_scale[i], core->shift[i]); + } + + /* table init */ + for (i = 0; i < perf->state_count; i++) { + freq_table[i].driver_data = (perf->states[i].control & LOONGSON_BOOST_FREQ_MASK) >> 8; + if (freq_table[i].driver_data) + freq_table[i].flags |= CPUFREQ_BOOST_FREQ; + freq_table[i].frequency = + perf->states[i].core_frequency * 1000; + } + freq_table[i].frequency = CPUFREQ_TABLE_END; + policy->freq_table = freq_table; + perf->state = 0; + + /* add boost-attr if supported. */ + if (has_boost_freq() && boost_supported()) + loongson3_cpufreq_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs; + + pr_info("CPU%u - ACPI performance management activated.\n", cpu); + for (i = 0; i < perf->state_count; i++) + pr_debug(" %cP%d: %d MHz, %d mW, %d uS %d level\n", + (i == perf->state ? '*' : ' '), i, + (u32) perf->states[i].core_frequency, + (u32) perf->states[i].power, + (u32) perf->states[i].transition_latency, + (u32) perf->states[i].control); + + /* + * the first call to ->target() should result in us actually + * writing something to the appropriate registers. + */ + policy->fast_switch_possible = false; + + init_irq_work(&core->irq_work, cpufreq_irq_work); + kthread_init_work(&core->work, cpufreq_work_handler); + core->sampling_rate = max_t(unsigned int, + CPUFREQ_SAMPLING_INTERVAL, + cpufreq_policy_transition_delay_us(policy)); + return result; + +err_unreg: + if (!acpi_disabled) + acpi_processor_unregister_performance(cpu); + + return result; +} + +static int loongson3_cpufreq_cpu_exit(struct cpufreq_policy *policy) +{ + struct core_data *core = get_core_data(policy->cpu); + + clear_update_util_hook(policy->cpu); + irq_work_sync(&core->irq_work); + kthread_cancel_work_sync(&core->work); + core->work_in_progress = false; + policy->fast_switch_possible = false; + if (!acpi_disabled) + acpi_processor_unregister_performance(policy->cpu); + kfree(policy->freq_table); + kfree(core->boost_freq); + kfree(core->clock_scale); + kfree(core->shift); + return 0; +} + +static struct freq_attr *loongson3_cpufreq_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, /* Extra space for boost-attr if supported */ + NULL, +}; + +static struct cpufreq_driver loongson3_cpufreq_driver = { + .verify = loongson3_cpufreq_verify_policy, + .setpolicy = loongson3_cpufreq_set_policy, + .init = loongson3_cpufreq_cpu_init, + .exit = loongson3_cpufreq_cpu_exit, + .name = "acpi-cpufreq", + .attr = loongson3_cpufreq_attr, +}; + +static void free_acpi_perf_data(void) +{ + unsigned int i; + + /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */ + for_each_possible_cpu(i) + free_cpumask_var(per_cpu_ptr(acpi_perf_data, i) + ->shared_cpu_map); + free_percpu(acpi_perf_data); +} + +static int __init loongson3_cpufreq_early_init(void) +{ + unsigned int i; + + acpi_perf_data = alloc_percpu(struct acpi_processor_performance); + if (!acpi_perf_data) + return -ENOMEM; + for_each_possible_cpu(i) { + if (!zalloc_cpumask_var_node( + &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, + GFP_KERNEL, cpu_to_node(i))) { + free_acpi_perf_data(); + return -ENOMEM; + } + } + return 0; +} + +static bool support_boost(void) +{ + int message; + int val; + int i; + + if (wait_for_ready_timeout(MAX_READY_TIMEOUT)) + return false; + message = DVFS_INFO << 24; + iocsr_write32(message, 0x51c); + val = iocsr_read32(0x420); + + val |= 1 << 10; + iocsr_write32(val, 0x420); + if (wait_for_ready_timeout(MAX_READY_TIMEOUT)) { + pr_info("%s: not support boost\n", __func__); + return false; + } + + val = iocsr_read32(0x51c); + + min_freq_level = val & DVFS_INFO_MIN_FREQ; + max_freq_level = (val & DVFS_INFO_MAX_FREQ) >> 4; + + if ((val & DVFS_INFO_BOOST_CORE_FREQ) && ((val & DVFS_INFO_BOOST_CORES) >> 20)) { + max_boost_cores = (val & DVFS_INFO_BOOST_CORES) >> 20; + max_boost_freq = ((val & DVFS_INFO_BOOST_CORE_FREQ) >> 8) * 25; + max_upper_index = (val & DVFS_INFO_NORMAL_CORE_UPPER_LIMIT) >> 16; + } else { + boost_gears = 0; + return false; + } + + /* Read boost levels */ + if (wait_for_ready_timeout(MAX_READY_TIMEOUT)) + return false; + + /* for version 1, single boost freq boost */ + message = DVFS_INFO_BOOST_LEVEL << 24; + iocsr_write32(message, 0x51c); + val = iocsr_read32(0x420); + + val |= 1 << 10; + iocsr_write32(val, 0x420); + + if (wait_for_ready_timeout(MAX_READY_TIMEOUT)) { + pr_info("%s: single boost mode\n", __func__); + boost_gears = 1; + boost_freqs[0] = calc_const_freq() / 1000000; + for (i = 1; i < boost_gears + 1; i++) + boost_freqs[i] = max_boost_freq; + + /* set 0x51c complete */ + iocsr_write32(COMPLETE_STATUS, 0x51c); + } else { + pr_info("%s: multi boost mode\n", __func__); + boost_gears = max_boost_cores; + val = iocsr_read32(0x51c); + + boost_freqs[0] = calc_const_freq() / 1000000; + boost_freqs[1] = max_boost_freq; + + if (boost_gears > 1) { + for (i = 2; i < boost_gears + 1; i++) + boost_freqs[i] = max_boost_freq - (((val >> ((i-2) * 4)) & 0xf) * FREQ_STEP); + } + } + + pr_info("%s: min_freq_level %d, max_freq_level %d, max_boost_cores %d, boost_gears %d\n", + __func__, min_freq_level, max_freq_level, max_boost_cores, boost_gears); + + return true; +} + +static int cpufreq_table_cpuinfo(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table, + bool boost) +{ + struct cpufreq_frequency_table *pos; + unsigned int min_freq = ~0; + unsigned int max_freq = 0; + unsigned int freq; + + cpufreq_for_each_valid_entry(pos, table) { + freq = pos->frequency; + + if (!boost) { + if (pos->driver_data) + continue; + } + if (freq < min_freq) + min_freq = freq; + if (freq > max_freq) + max_freq = freq; + } + + policy->min = policy->cpuinfo.min_freq = min_freq; + policy->max = policy->cpuinfo.max_freq = max_freq; + if (policy->min == ~0) + return -EINVAL; + else + return 0; +} + +static int set_boost(struct cpufreq_policy *policy, int state) +{ + if (!has_boost_freq()) + return -EINVAL; + + if (!policy) + return -EINVAL; + + if (!state) { + if (policy->policy == CPUFREQ_POLICY_POWERSAVE) + cpufreq_update(policy); + } + if (!policy->freq_table) + return -EINVAL; + + cpufreq_table_cpuinfo(policy, policy->freq_table, state); + down_write(&policy->rwsem); + up_write(&policy->rwsem); + + if (!state) + set_max_within_limits(policy); + + return 0; +} + +static void __init loongson3_cpufreq_boost_init(void) +{ + if (!support_boost()) { + pr_info("Boost capabilities not present in the processor\n"); + return; + } + + loongson3_cpufreq_driver.set_boost = set_boost; +} + +static int cpufreq_supported_detect(void) +{ + return wait_for_ready_timeout(MAX_READY_TIMEOUT); +} + +static int __init loongson3_cpufreq_init(void) +{ + int ret; + + if (!cpu_has_csr || !cpu_has_scalefreq) + return -ENODEV; + + /* don't keep reloading if cpufreq_driver exists */ + if (cpufreq_get_current_driver()) + return -EEXIST; + + if (cpufreq_supported_detect()) { + pr_info("%s failed!\n", __func__); + return -ENODEV; + } + + ret = loongson3_cpufreq_early_init(); + if (ret) + return ret; + loongson3_cpufreq_boost_init(); + + cpufreq_register_notifier(&loongson3_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + ret = cpufreq_register_driver(&loongson3_cpufreq_driver); + cpufreq_kthread_create(); + if (ret) + free_acpi_perf_data(); + + return ret; +} + +static void __exit loongson3_cpufreq_exit(void) +{ + cpufreq_unregister_driver(&loongson3_cpufreq_driver); + free_acpi_perf_data(); + cpufreq_kthread_stop(); +} + +late_initcall(loongson3_cpufreq_init); +module_exit(loongson3_cpufreq_exit); + +static const struct acpi_device_id processor_device_ids[] = { + {ACPI_PROCESSOR_OBJECT_HID, }, + {ACPI_PROCESSOR_DEVICE_HID, }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, processor_device_ids); + +MODULE_ALIAS("acpi"); -- Gitee From f72136119354fde2f0b4d4818539730e86d6481b Mon Sep 17 00:00:00 2001 From: Chong Qiao Date: Mon, 11 Dec 2023 10:03:10 +0800 Subject: [PATCH 0132/2138] anolis: fbdev: add ls2k500sfb driver for ls2k500 bmc. ANBZ: #8435 Signed-off-by: Chong Qiao Signed-off-by: Hongchen Zhang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- drivers/video/fbdev/Kconfig | 13 + drivers/video/fbdev/Makefile | 1 + drivers/video/fbdev/ls2k500sfb.c | 788 +++++++++++++++++++++++++++++++ 3 files changed, 802 insertions(+) create mode 100644 drivers/video/fbdev/ls2k500sfb.c diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 325298573e12..2ada2b100c51 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -1950,6 +1950,19 @@ config FB_SM712 called sm712fb. If you want to compile it as a module, say M here and read . +config FB_LS2K500 + tristate "Loongson LS2K500 frame buffer support" + depends on FB && PCI + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT + help + Frame buffer driver for the Loongson LS7A Platform-Bridge. + + This driver is also available as a module. + If you want to compile it as a module, say M here and read + . + source "drivers/video/fbdev/omap/Kconfig" source "drivers/video/fbdev/omap2/Kconfig" source "drivers/video/fbdev/mmp/Kconfig" diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile index 70569f7027ed..d3fbb185daa3 100644 --- a/drivers/video/fbdev/Makefile +++ b/drivers/video/fbdev/Makefile @@ -128,3 +128,4 @@ obj-$(CONFIG_FB_SIMPLE) += simplefb.o # the test framebuffer is last obj-$(CONFIG_FB_VIRTUAL) += vfb.o +obj-$(CONFIG_FB_LS2K500) += ls2k500sfb.o diff --git a/drivers/video/fbdev/ls2k500sfb.c b/drivers/video/fbdev/ls2k500sfb.c new file mode 100644 index 000000000000..a3722dcaada0 --- /dev/null +++ b/drivers/video/fbdev/ls2k500sfb.c @@ -0,0 +1,788 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * + * linux/drivers/video/ls2k500sfb.c + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive for + * more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static char mode_option[32] = "1280x1024-32@2M"; +module_param_string(mode, mode_option, sizeof(mode_option), 0444); +static int useshell; +module_param(useshell, int, 0664); +static int totty = 18; +module_param(totty, int, 0664); +static int resetdelay = 60; +module_param(resetdelay, int, 0664); +static int resetbootwait = 10; +module_param(resetbootwait, int, 0664); +static int GPIO = 14; +module_param(GPIO, int, 0664); +struct ls2k500sfb_struct { + struct pci_dev *dev; + struct platform_device *pd; + struct workqueue_struct *wq; + struct work_struct work; + struct delayed_work redraw_work; + int running; + unsigned long reset_time; + char *penv; + char saved_env[16]; +}; + +static int saved_console; +static unsigned long mscycles; +static atomic_t waiting_for_pciebreak_ipi; + +static int switch_console(int console) +{ + struct file *filp; + + filp = filp_open("/dev/tty1", O_RDWR, 0); + if (IS_ERR(filp)) + return -ENODEV; + + vfs_ioctl(filp, VT_ACTIVATE, console + 1); + filp_close(filp, NULL); + return 0; +} +static void ls2k500sfb_pciebreak_func(void *unused) +{ + atomic_dec(&waiting_for_pciebreak_ipi); + + while (atomic_read(&waiting_for_pciebreak_ipi)) + cpu_relax(); +} + +static void pciebreak_smp_send_stop(int ms) +{ + /* Wait at most 100 msecond for the other cpus to stop */ + unsigned long max_cycles = mscycles * ms; + unsigned long start_time = get_cycles(); + + atomic_set(&waiting_for_pciebreak_ipi, num_online_cpus()); + smp_call_function(ls2k500sfb_pciebreak_func, NULL, false); + while ((atomic_read(&waiting_for_pciebreak_ipi) > 1) + && get_cycles() - start_time < max_cycles) { + cpu_relax(); + } + if (atomic_read(&waiting_for_pciebreak_ipi) > 1) + pr_emerg("Non-pciebreaking CPUs did not react to IPI\n"); +} +static void ls2k500sfb_redraw_fn(struct work_struct *work) +{ + struct ls2k500sfb_struct *priv = + container_of(work, struct ls2k500sfb_struct, redraw_work.work); + /*restore resolution info */ + if (memcmp(priv->penv, priv->saved_env, sizeof(priv->saved_env))) + memcpy(priv->penv, priv->saved_env, sizeof(priv->saved_env)); + switch_console(saved_console); +} + +static void ls2k500sfb_events_fn(struct work_struct *work) +{ + struct ls2k500sfb_struct *priv = container_of(work, struct ls2k500sfb_struct, work); + struct pci_dev *pdev = priv->dev; + struct pci_dev *ppdev = pdev->bus->self; + uint32_t i, d, timeout, retry = 0; + static const uint32_t index[] = { + 0x10, 0x14, 0x18, 0x1c, 0x20, 0x24, 0x30, 0x3c, 0x54, 0x58, 0x78, 0x7c, 0x80, 4 + }; + + static uint32_t data[sizeof(index) / 4]; + static const uint32_t cindex[] = { 0x10, 0x3c, 4 }; + + static uint32_t cdata[sizeof(cindex) / 4]; + static uint32_t d80c, d71c, ctrl; + static void *p; + + if (!priv->running) { + for (i = 0; i < ARRAY_SIZE(index); i++) + pci_read_config_dword(ppdev, index[i], &data[i]); + for (i = 0; i < ARRAY_SIZE(cindex); i++) + pci_read_config_dword(pdev, cindex[i], &cdata[i]); + if (ppdev->vendor == 0x14) { + pci_read_config_dword(ppdev, 0x80c, &d80c); + d80c = (d80c & ~(3 << 17)) | (1 << 17); + + pci_read_config_dword(ppdev, 0x71c, &d71c); + d71c |= 1 << 26; + + p = pci_iomap(ppdev, 0, 0x100); + } + ctrl = readl(p); + return; + } + local_bh_disable(); + pciebreak_smp_send_stop(100); + wmb(); /* flush all write before we disable pcie window */ + pci_write_config_dword(ppdev, 0x18, 0); + pci_write_config_dword(ppdev, 0x1c, 0); + pci_write_config_dword(ppdev, 0x20, 0); + atomic_set(&waiting_for_pciebreak_ipi, 0); + wmb(); /* flush all write after change pcie window */ + local_bh_enable(); + if (ppdev->vendor == 0x14) { + timeout = 10000; + while (timeout) { + pci_read_config_dword(ppdev, 0x10, &d); + d &= ~0xf; + if (!d) + break; + mdelay(1); + timeout--; + }; + if (!timeout) + pr_info("bar not clear 0\n"); + + pci_read_config_dword(ppdev, 0x0, &d); + pr_info("pcie port deviceid=0x%x recover begin\n", d); +retrain: + while (1) { + pci_write_config_dword(ppdev, index[0], data[0]); + pci_read_config_dword(ppdev, index[0], &d); + d &= ~0xf; + if (d) + break; + mdelay(1); + } + + while (1) { + for (i = 0; i < ARRAY_SIZE(index); i++) { + if (index[i] != 0x18 && index[i] != 0x1c && index[i] != 0x20) + pci_write_config_dword(ppdev, index[i], data[i]); + } + pci_write_config_dword(ppdev, 0x80c, d80c); + pci_write_config_dword(ppdev, 0x71c, d71c); + + pci_read_config_dword(ppdev, 0x10, &d); + d &= ~0xf; + if (d) + break; + mdelay(1); + } + + timeout = 10000; + + writel(ctrl | 0x8, p); + while (1) { + d = readl(p + 0xc); + if ((d & 0x11) == 0x11) { + break; + } else if (!timeout) { + pr_info("pcie train failed status=0x%x\n", d); + goto out; + } + mdelay(1); + timeout--; + } + + + pr_info("pcie recovered done\n"); + + if (!retry) { + /*wait u-boot ddr config */ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(HZ*resetbootwait); + set_current_state(TASK_RUNNING); + pci_read_config_dword(ppdev, 0x10, &d); + d &= ~0xf; + if (!d) { + retry = 1; + goto retrain; + } + } + } else { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(HZ*resetbootwait); + set_current_state(TASK_RUNNING); + } + local_bh_disable(); + pciebreak_smp_send_stop(10000); + wmb(); /* flush all write before we update pcie window */ + for (i = 0; i < ARRAY_SIZE(index); i++) + pci_write_config_dword(ppdev, index[i], data[i]); + + for (i = 0; i < ARRAY_SIZE(cindex); i++) + pci_write_config_dword(pdev, cindex[i], cdata[i]); + atomic_set(&waiting_for_pciebreak_ipi, 0); + wmb(); /* flush all write after we update pcie window */ + local_bh_enable(); + + + pr_info("redraw console\n"); + + saved_console = fg_console; + switch_console(fg_console > 0?fg_console - 1 : fg_console + 1); + queue_delayed_work(priv->wq, &priv->redraw_work, HZ); +out: + priv->running = 0; +} + +irqreturn_t ls2k500sfb_interrupt(int irq, void *arg) +{ + struct ls2k500sfb_struct *priv = arg; + struct pci_dev *pdev = priv->dev; + + if (irq == pdev->irq) + pr_info("ls2k500sfb pcie interrupt\n"); + else + pr_info("ls2k500sfb gpio interrupt\n"); + if (system_state != SYSTEM_RUNNING) + return IRQ_HANDLED; + + if (!priv->running) { + if (!resetdelay || time_after(jiffies, priv->reset_time + resetdelay * HZ)) { + priv->running = 1; + queue_work(priv->wq, &priv->work); + } + priv->reset_time = jiffies; + } + return IRQ_HANDLED; +} + +#ifdef CONFIG_LOONGARCH +#define GPIO_OEN ((void *)IO_BASE+0x1fe00000+0x500) +#define GPIO_FUNCEN ((void *)IO_BASE+0x1fe00000+0x504) +#define GPIO_OUT ((void *)IO_BASE+0x1fe00000+0x508) +#define GPIO_IN ((void *)IO_BASE+0x1fe00000+0x50c) +#define GPIO_INTPOL ((void *)IO_BASE+0x1fe00000+0x510) +#define GPIO_INTEN ((void *)IO_BASE+0x1fe00000+0x514) + +static int gpiochip_match_name(struct gpio_chip *chip, void *data) +{ + const char *name = data; + + return !strcmp(chip->label, name); +} +static int get_gpio_irq_from_acpi_table(int gpio) +{ + struct gpio_chip *chip; + struct gpio_desc *desc; + + chip = gpiochip_find("LOON0007:00", gpiochip_match_name); + if (!chip) + return -ENOENT; + desc = gpiochip_request_own_desc(chip, gpio, "reboot", GPIO_LOOKUP_FLAGS_DEFAULT, GPIOD_IN); + if (!desc) + return -ENOENT; + return gpiod_to_irq(desc); +} + +static int get_gpio_irq_from_acpi_gsi(int gpio) +{ + int gsi = 16 + (gpio & 7); + + return acpi_register_gsi(NULL, gsi, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW); +} + +static int register_gpio_reboot_handler(struct ls2k500sfb_struct *priv) +{ + int irq = get_gpio_irq_from_acpi_table(GPIO); + + if (irq < 0) { + irq = get_gpio_irq_from_acpi_gsi(GPIO); + pr_notice("gsi gpio irq %d\n", irq); + } else + pr_notice("acpi gpio irq %d\n", irq); + writel(readl(GPIO_OEN) | (0x1 << GPIO), GPIO_OEN); + writel(readl(GPIO_FUNCEN) & ~(0x1 << GPIO), GPIO_FUNCEN); + writel(readl(GPIO_INTPOL) & ~(0x1 << GPIO), GPIO_INTPOL); + writel(readl(GPIO_INTEN) | (0x1 << GPIO), GPIO_INTEN); + if (request_irq(irq, ls2k500sfb_interrupt, IRQF_SHARED | IRQF_TRIGGER_FALLING, + "ls2k500sfb", priv)) + pr_err("request_irq(%d) failed\n", irq); + return 0; +} +#endif + +static const struct fb_fix_screeninfo simplefb_fix = { + .id = "simple", + .type = FB_TYPE_PACKED_PIXELS, + .visual = FB_VISUAL_TRUECOLOR, + .accel = FB_ACCEL_NONE, +}; + +static const struct fb_var_screeninfo simplefb_var = { + .height = -1, + .width = -1, + .activate = FB_ACTIVATE_NOW, + .vmode = FB_VMODE_NONINTERLACED, +}; + +#define PSEUDO_PALETTE_SIZE 16 +struct simplefb_par { + char *penv; + char *preg; + u32 palette[PSEUDO_PALETTE_SIZE]; +}; + +static u_long get_line_length(int xres_virtual, int bpp) +{ + u_long length; + + length = xres_virtual * bpp; + length = (length + 31) & ~31; + length >>= 3; + return length; +} + +static int simplefb_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + u_long line_length; + + /* + * FB_VMODE_CONUPDATE and FB_VMODE_SMOOTH_XPAN are equal! + * as FB_VMODE_SMOOTH_XPAN is only used internally + */ + + if (var->vmode & FB_VMODE_CONUPDATE) { + var->vmode |= FB_VMODE_YWRAP; + var->xoffset = info->var.xoffset; + var->yoffset = info->var.yoffset; + } + + /* + * Some very basic checks + */ + if (!var->xres) + var->xres = 1; + if (!var->yres) + var->yres = 1; + if (var->xres > var->xres_virtual) + var->xres_virtual = var->xres; + if (var->yres > var->yres_virtual) + var->yres_virtual = var->yres; + if (var->bits_per_pixel <= 16) + var->bits_per_pixel = 16; + else if (var->bits_per_pixel <= 32) + var->bits_per_pixel = 32; + else + return -EINVAL; + + if (var->xres_virtual < var->xoffset + var->xres) + var->xres_virtual = var->xoffset + var->xres; + if (var->yres_virtual < var->yoffset + var->yres) + var->yres_virtual = var->yoffset + var->yres; + + /* + * Memory limit + */ + line_length = + get_line_length(var->xres_virtual, var->bits_per_pixel); + if (line_length * var->yres_virtual > info->fix.smem_len) + return -ENOMEM; + + /* + * Now that we checked it we alter var. The reason being is that the video + * mode passed in might not work but slight changes to it might make it + * work. This way we let the user know what is acceptable. + */ + switch (var->bits_per_pixel) { + case 16: /* BGR 565 */ + var->red.offset = 11; + var->red.length = 5; + var->green.offset = 5; + var->green.length = 6; + var->blue.offset = 0; + var->blue.length = 5; + var->transp.offset = 0; + var->transp.length = 0; + break; + case 32: /* BGRA 8888 */ + var->red.offset = 16; + var->red.length = 8; + var->green.offset = 8; + var->green.length = 8; + var->blue.offset = 0; + var->blue.length = 8; + var->transp.offset = 24; + var->transp.length = 8; + break; + } + var->red.msb_right = 0; + var->green.msb_right = 0; + var->blue.msb_right = 0; + var->transp.msb_right = 0; + + return 0; +} + +static int simplefb_set_par(struct fb_info *info) +{ + struct simplefb_par *par = info->par; + int reg_val; + + info->fix.line_length = get_line_length(info->var.xres_virtual, + info->var.bits_per_pixel); + sprintf(par->penv, "video=%dx%d-%d@2M", + info->var.xres_virtual, + info->var.yres_virtual, + info->var.bits_per_pixel); + + reg_val = readl(par->preg); + writel(reg_val + 1, par->preg); + + return 0; +} + +static int simplefb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, + u_int transp, struct fb_info *info) +{ + u32 *pal = info->pseudo_palette; + u32 cr = red >> (16 - info->var.red.length); + u32 cg = green >> (16 - info->var.green.length); + u32 cb = blue >> (16 - info->var.blue.length); + u32 value; + + if (regno >= PSEUDO_PALETTE_SIZE) + return -EINVAL; + + value = (cr << info->var.red.offset) | + (cg << info->var.green.offset) | + (cb << info->var.blue.offset); + if (info->var.transp.length > 0) { + u32 mask = (1 << info->var.transp.length) - 1; + + mask <<= info->var.transp.offset; + value |= mask; + } + pal[regno] = value; + + return 0; +} + + +static void simplefb_destroy(struct fb_info *info) +{ + if (info->screen_base) + iounmap(info->screen_base); +} + +static const struct fb_ops simplefb_ops = { + .owner = THIS_MODULE, + .fb_destroy = simplefb_destroy, + .fb_setcolreg = simplefb_setcolreg, + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, + .fb_check_var = simplefb_check_var, + .fb_set_par = simplefb_set_par, +}; + +static struct simplefb_format simplefb_formats[] = SIMPLEFB_FORMATS; + +struct simplefb_params { + u32 width; + u32 height; + u32 stride; + struct simplefb_format *format; +}; + +static int simplefb_parse_pd(struct platform_device *pdev, + struct simplefb_params *params) +{ + struct simplefb_platform_data *pd = dev_get_platdata(&pdev->dev); + int i; + + params->width = pd->width; + params->height = pd->height; + params->stride = pd->stride; + + params->format = NULL; + for (i = 0; i < ARRAY_SIZE(simplefb_formats); i++) { + if (strcmp(pd->format, simplefb_formats[i].name)) + continue; + + params->format = &simplefb_formats[i]; + break; + } + + if (!params->format) { + dev_err(&pdev->dev, "Invalid format value\n"); + return -EINVAL; + } + + return 0; +} + +static int simplefb_probe(struct platform_device *pdev) +{ + int ret; + struct simplefb_params params; + struct fb_info *info; + struct simplefb_par *par; + struct resource *mem, *envmem, *regmem; + + ret = simplefb_parse_pd(pdev, ¶ms); + + if (ret) + return ret; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + envmem = platform_get_resource(pdev, IORESOURCE_MEM, 1); + regmem = platform_get_resource(pdev, IORESOURCE_MEM, 2); + if (!mem || !envmem || !regmem) { + dev_err(&pdev->dev, "No memory resource\n"); + return -EINVAL; + } + + info = framebuffer_alloc(sizeof(struct simplefb_par), &pdev->dev); + if (!info) + return -ENOMEM; + platform_set_drvdata(pdev, info); + + par = info->par; + par->penv = ioremap(envmem->start, resource_size(envmem)); + par->preg = ioremap(regmem->start, resource_size(regmem)); + + info->fix = simplefb_fix; + info->fix.smem_start = mem->start; + info->fix.smem_len = resource_size(mem); + info->fix.line_length = params.stride; + + info->var = simplefb_var; + info->var.xres = params.width; + info->var.yres = params.height; + info->var.xres_virtual = params.width; + info->var.yres_virtual = params.height; + info->var.bits_per_pixel = params.format->bits_per_pixel; + info->var.red = params.format->red; + info->var.green = params.format->green; + info->var.blue = params.format->blue; + info->var.transp = params.format->transp; + + ret = devm_aperture_acquire_for_platform_device(pdev, + info->fix.smem_start, + info->fix.smem_len); + if (ret) { + dev_info(&pdev->dev, "cannot acquire aperture\n"); + goto error_fb_release; + } + + info->fbops = &simplefb_ops; + info->flags = 0; + info->screen_base = ioremap_wc(info->fix.smem_start, + info->fix.smem_len); + if (!info->screen_base) { + ret = -ENOMEM; + goto error_fb_release; + } + info->pseudo_palette = par->palette; + + dev_info(&pdev->dev, "framebuffer at 0x%lx, 0x%x bytes, mapped to 0x%p\n", + info->fix.smem_start, info->fix.smem_len, + info->screen_base); + dev_info(&pdev->dev, "format=%s, mode=%dx%dx%d, linelength=%d\n", + params.format->name, + info->var.xres, info->var.yres, + info->var.bits_per_pixel, info->fix.line_length); + + ret = register_framebuffer(info); + if (ret < 0) { + dev_err(&pdev->dev, "Unable to register simplefb: %d\n", ret); + goto error_fb_release; + } else + dev_info(&pdev->dev, "fb%d: simplefb registered!\n", info->node); + + local_irq_disable(); + mscycles = get_cycles(); + mdelay(1); + mscycles = get_cycles() - mscycles; + local_irq_enable(); + + return ret; +error_fb_release: + framebuffer_release(info); + return ret; +} + +static int simplefb_remove(struct platform_device *pdev) +{ + struct fb_info *info = platform_get_drvdata(pdev); + + unregister_framebuffer(info); + framebuffer_release(info); + + return 0; +} + +static struct platform_driver simplefb_driver = { + .driver = { + .name = "virt-framebuffer", + }, + .probe = simplefb_probe, + .remove = simplefb_remove, +}; + +static int ls2k500sfb_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + struct simplefb_platform_data mode; + struct resource res[3]; + struct platform_device *pd; + struct ls2k500sfb_struct *priv; + long phybase, videooffset, videomemorysize; + char *pmode = mode_option; + int depth; + char *penv; + int ret, i; + + if (!dev->bus->number || pci_enable_device(dev)) + return -ENODEV; + priv = kzalloc(sizeof(struct ls2k500sfb_struct), GFP_KERNEL); + priv->dev = dev; + + /* pcimem bar last 16M free, 2MB offset from free for framebuffer */ + phybase = pci_resource_start(dev, 0); + phybase += pci_resource_len(dev, 0) - 0x1000000; + penv = ioremap(phybase, 0x100000); + /*env at last 16M's beginning, first env is video */ + if (!strncmp(penv, "video=", 6)) + pmode = penv + 6; + + priv->penv = penv + 6; + memcpy(priv->saved_env, priv->penv, sizeof(priv->saved_env)); + + mode.width = simple_strtoul(pmode, &pmode, 0); + pmode++; + mode.height = simple_strtoul(pmode, &pmode, 0); + pmode++; + depth = simple_strtoul(pmode, &pmode, 0); + if (pmode && pmode[0]) { + pmode++; + videooffset = simple_strtoul(pmode, &pmode, 0); + if (pmode && pmode[0]) { + switch (pmode[0]) { + case 'M': + case 'm': + videooffset *= 0x100000; + break; + case 'K': + case 'k': + videooffset *= 1024; + break; + } + } + } else + videooffset = 0x200000; + mode.stride = mode.width * depth / 8; + mode.format = depth == 32 ? "a8r8g8b8" : "r5g6b5"; + + videomemorysize = 0x400000; + + memset(res, 0, sizeof(res)); + res[0].start = phybase + videooffset; + res[0].end = phybase + videooffset + videomemorysize - 1; + res[0].flags = IORESOURCE_MEM; + res[0].parent = &dev->resource[0]; + + res[1].start = phybase; + res[1].end = phybase + 64 - 1; + res[1].flags = IORESOURCE_MEM; + res[1].parent = &dev->resource[0]; + + res[2].start = phybase + 0x00f00014; + res[2].end = phybase + 0x00f0001c - 1; + res[2].flags = IORESOURCE_MEM; + res[2].parent = &dev->resource[0]; + + priv->pd = pd = platform_device_register_resndata(NULL, "virt-framebuffer", 0, + res, 3, &mode, sizeof(mode)); + + ret = platform_driver_register(&simplefb_driver); + if (ret) + return ret; + priv->wq = create_singlethread_workqueue("ls2k500sfb wq"); + INIT_WORK(&priv->work, ls2k500sfb_events_fn); + INIT_DELAYED_WORK(&priv->redraw_work, ls2k500sfb_redraw_fn); + + ls2k500sfb_events_fn(&priv->work); + if (request_irq(dev->irq, ls2k500sfb_interrupt, IRQF_SHARED | IRQF_TRIGGER_RISING, + "ls2k500sfb", priv)) + pr_err("request_irq(%d) failed\n", dev->irq); + #ifdef CONFIG_LOONGARCH + register_gpio_reboot_handler(priv); + #endif + pci_set_drvdata(dev, priv); + for (i = 0; i < 5; i++) { + res[0].start = phybase + 0x00f00000 + 0x1c*i; + res[0].end = phybase + 0x00f00000 + 0x1c*(i+1) - 1; + platform_device_register_simple("ipmi_ls2k500_si", i, res, 1); + } + + return PTR_ERR_OR_ZERO(pd); +} + +static void ls2k500sfb_remove(struct pci_dev *dev) +{ + struct ls2k500sfb_struct *priv = pci_get_drvdata(dev); + + platform_device_del(priv->pd); +} + +static struct pci_device_id ls2k500sfb_devices[] = { + {PCI_DEVICE(0x14, 0x1a05)}, + {0, 0, 0, 0, 0, 0, 0} +}; +MODULE_DEVICE_TABLE(pci, ls2k500sfb_devices); + +static struct pci_driver ls2k500sfb_driver = { + .name = "ls2k500sfb", + .id_table = ls2k500sfb_devices, + .probe = ls2k500sfb_probe, + .remove = ls2k500sfb_remove, + .driver = { + .name = "ls2k500sfb", + }, +}; + +static int __init ls2k500sfb_init(void) +{ + return pci_register_driver(&ls2k500sfb_driver); +} + +module_init(ls2k500sfb_init); + +#ifdef MODULE +static void __exit ls2k500sfb_exit(void) +{ + pci_unregister_driver(&ls2k500sfb_driver); +} + +module_exit(ls2k500sfb_exit); +#endif + +MODULE_LICENSE("GPL"); -- Gitee From 0ded91ba1d79922d0d644452fa1b4307d0016d7e Mon Sep 17 00:00:00 2001 From: Chong Qiao Date: Mon, 11 Dec 2023 10:03:11 +0800 Subject: [PATCH 0133/2138] anolis: ipmi: add ls2k500 bmc ipmi support. ANBZ: #8435 Signed-off-by: Chong Qiao Signed-off-by: Hongchen Zhang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- drivers/char/ipmi/Makefile | 4 + drivers/char/ipmi/btlock.h | 92 +++++++++++++++ drivers/char/ipmi/ipmi_si.h | 11 ++ drivers/char/ipmi/ipmi_si_intf.c | 4 + drivers/char/ipmi/ipmi_si_ls2k500.c | 173 ++++++++++++++++++++++++++++ drivers/char/ipmi/kcs_bmc_ls2k500.h | 67 +++++++++++ drivers/video/fbdev/ls2k500sfb.c | 6 +- 7 files changed, 356 insertions(+), 1 deletion(-) create mode 100644 drivers/char/ipmi/btlock.h create mode 100644 drivers/char/ipmi/ipmi_si_ls2k500.c create mode 100644 drivers/char/ipmi/kcs_bmc_ls2k500.h diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile index cb6138b8ded9..bc9c6506fd59 100644 --- a/drivers/char/ipmi/Makefile +++ b/drivers/char/ipmi/Makefile @@ -13,6 +13,10 @@ ifdef CONFIG_PARISC ipmi_si-y += ipmi_si_parisc.o endif +ifdef CONFIG_LOONGARCH +ipmi_si-y += ipmi_si_ls2k500.o +endif + obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o obj-$(CONFIG_IPMI_SI) += ipmi_si.o diff --git a/drivers/char/ipmi/btlock.h b/drivers/char/ipmi/btlock.h new file mode 100644 index 000000000000..cf585e42d42d --- /dev/null +++ b/drivers/char/ipmi/btlock.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __BTLOCK_H__ +#define __BTLOCK_H__ + +#include +#include + +union btlock { + char b[2]; + unsigned int u; +}; + +/* + *wait delay us if lock failed. + *lock fail if another one get lock or both try get lock. + *c must compile b with byte access. + */ +static inline int btlock_lock(volatile union btlock *p, int n, unsigned char delay) +{ + union btlock t, t1; + unsigned long flags; + unsigned long c0 = get_cycles(), c1; + + if (n > 1) + return -1; + delay |= 0x80; + t1.u = 0; + t1.b[n] = delay; + + while (1) { + local_irq_save(flags); + p->b[n] = delay; + t.u = p->u; + if (t.u == t1.u) { + wmb(); /* flush write out immediately */ + local_irq_restore(flags); + return 0; + } + p->b[n] = 0; + t.u = p->u; + wmb(); /* flush write out immediately */ + local_irq_restore(flags); + c1 = get_cycles(); + if (c1 - c0 > *mscycles * 1000) + return -1; + ndelay(((t.b[1 - n] & 0x7f) + (c1 & 1)) * 100); + } + return 0; +} + +static inline int btlock_trylock(volatile union btlock *p, int n, unsigned char delay) +{ + union btlock t, t1; + unsigned long flags; + + if (n > 1) + return -1; + delay |= 0x80; + t1.u = 0; + t1.b[n] = delay; + + local_irq_save(flags); + p->b[n] = delay; + t.u = p->u; + if (t.u == t1.u) { + wmb(); /* flush write out immediately */ + local_irq_restore(flags); + return 0; + } + p->b[n] = 0; + t.u = p->u; + wmb(); /* flush write out immediately */ + local_irq_restore(flags); + ndelay(((t.b[1 - n] & 0x7f) + (get_cycles() & 1)) * 100); + return -1; +} + +static inline int btlock_unlock(volatile union btlock *p, int n) +{ + p->b[n] = 0; + wmb(); /* flush write out immediately */ + return p->u; +} + +static inline int btlock_islocked(volatile union btlock *p, int n) +{ + union btlock t; + + t.u = p->u; + return t.b[n] && !t.b[1 - n]; +} +#endif diff --git a/drivers/char/ipmi/ipmi_si.h b/drivers/char/ipmi/ipmi_si.h index a7ead2a4c753..aa2f81472ce5 100644 --- a/drivers/char/ipmi/ipmi_si.h +++ b/drivers/char/ipmi/ipmi_si.h @@ -51,6 +51,9 @@ struct si_sm_io { unsigned int regshift; enum ipmi_addr_space addr_space; unsigned long addr_data; +#ifdef CONFIG_LOONGARCH + void *addr_source_data; +#endif enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */ union ipmi_smi_info_union addr_info; @@ -101,6 +104,14 @@ static inline void ipmi_si_parisc_init(void) { } static inline void ipmi_si_parisc_shutdown(void) { } #endif +#ifdef CONFIG_LOONGARCH +int ipmi_si_ls2k500_init(void); +void ipmi_si_ls2k500_shutdown(void); +#else +static inline void ipmi_si_ls2k500_init(void) { } +static inline void ipmi_si_ls2k500_shutdown(void) { } +#endif + int ipmi_si_port_setup(struct si_sm_io *io); int ipmi_si_mem_setup(struct si_sm_io *io); diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 5cd031f3fc97..373ee71811e3 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -2104,6 +2104,8 @@ static int __init init_ipmi_si(void) ipmi_si_platform_init(); + ipmi_si_ls2k500_init(); + ipmi_si_pci_init(); ipmi_si_parisc_init(); @@ -2289,6 +2291,8 @@ static void cleanup_ipmi_si(void) ipmi_si_parisc_shutdown(); + ipmi_si_ls2k500_shutdown(); + ipmi_si_platform_shutdown(); mutex_lock(&smi_infos_lock); diff --git a/drivers/char/ipmi/ipmi_si_ls2k500.c b/drivers/char/ipmi/ipmi_si_ls2k500.c new file mode 100644 index 000000000000..7e259d85729f --- /dev/null +++ b/drivers/char/ipmi/ipmi_si_ls2k500.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * ipmi_si_pci.c + * + * Handling for IPMI devices on the PCI bus. + */ + +#define pr_fmt(fmt) "ipmi_pci: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ipmi_si.h" +static unsigned long *mscycles; +static unsigned long *event_jiffies; +#include "kcs_bmc_ls2k500.h" +static int resetbootwait = 60; +module_param(resetbootwait, int, 0664); + +#define KCS_STATUS_CMD_DAT BIT(3) + +static int pcie_busy(void) +{ + if (time_before(jiffies, *event_jiffies + resetbootwait*HZ)) + return -1; + return 0; +} + +static unsigned char intf_sim_inb(const struct si_sm_io *io, + unsigned int offset) +{ + IPMIKCS *ik = io->addr_source_data; + uint32_t ret; + + if (pcie_busy()) + return 0; + if (btlock_lock(&ik->lock, 0, 1) < 0) + return 0; + switch (offset & 1) { + case 0: + ret = ik->data_out_reg; + IPMI_KCS_SET_OBF(ik->status_reg, 0); + break; + case 1: + ret = ik->status_reg; + break; + } + btlock_unlock(&ik->lock, 0); + return ret; +} + +static void intf_sim_outb(const struct si_sm_io *io, unsigned int offset, + unsigned char val) +{ + IPMIKCS *ik = io->addr_source_data; + + if (pcie_busy()) + return; + if (btlock_lock(&ik->lock, 0, 1) < 0) + return; + if (IPMI_KCS_GET_IBF(ik->status_reg)) + goto out; + + switch (offset & 1) { + case 0: + ik->data_in_reg = val; + ik->status_reg &= ~KCS_STATUS_CMD_DAT; + break; + + case 1: + ik->cmd_reg = val; + ik->status_reg |= KCS_STATUS_CMD_DAT; + break; + } + IPMI_KCS_SET_IBF(ik->status_reg, 1); + ik->write_req++; +out: + btlock_unlock(&ik->lock, 0); +} + +static void ipmi_ls2k500_cleanup(struct si_sm_io *io) +{ +} + +int ipmi_si_sim_setup(struct si_sm_io *io) +{ + io->inputb = intf_sim_inb; + io->outputb = intf_sim_outb; + io->io_cleanup = ipmi_ls2k500_cleanup; + return 0; +} + +#define platform_resource_start(dev, bar) ((dev)->resource[(bar)].start) +#define platform_resource_end(dev, bar) ((dev)->resource[(bar)].end) +static int of_ipmi_ls2k500_probe(struct platform_device *pdev) +{ + int rv; + struct si_sm_io io; + void **kcs_data; + + memset(&io, 0, sizeof(io)); + io.addr_source = SI_PLATFORM; + dev_info(&pdev->dev, "probing via ls2k500 platform"); + io.si_type = SI_KCS; + + io.addr_space = IPMI_MEM_ADDR_SPACE; + io.io_setup = ipmi_si_sim_setup; + io.addr_data = pdev->resource[0].start; + io.addr_source_data = ioremap(pdev->resource[0].start, + pdev->resource[0].end - + pdev->resource[0].start + 1); + kcs_data = dev_get_platdata(&pdev->dev); + event_jiffies = kcs_data[0]; + mscycles = kcs_data[1]; + io.dev = &pdev->dev; + io.regspacing = 4; + io.regsize = DEFAULT_REGSIZE; + io.regshift = 0; + io.irq = 0; + if (io.irq) + io.irq_setup = ipmi_std_irq_setup; + + dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n", + &pdev->resource[0], io.regsize, io.regspacing, io.irq); + + rv = ipmi_si_add_smi(&io); + if (rv) + ipmi_si_remove_by_dev(&pdev->dev); + + return rv; +} + +static int ipmi_ls2k500_remove(struct platform_device *pdev) +{ + ipmi_si_remove_by_dev(&pdev->dev); + + return 0; +} + +#define LS2K500_SI_DEVICE_NAME "ipmi_ls2k500_si" +struct platform_driver ipmi_ls2k500_platform_driver = { + .driver = { + .name = LS2K500_SI_DEVICE_NAME, + }, + .probe = of_ipmi_ls2k500_probe, + .remove = ipmi_ls2k500_remove, +}; + +static bool platform_registered; +int ipmi_si_ls2k500_init(void) +{ + int rv; + + rv = platform_driver_register(&ipmi_ls2k500_platform_driver); + if (rv) + pr_err("Unable to register driver: %d\n", rv); + else + platform_registered = true; + return rv; +} + +void ipmi_si_ls2k500_shutdown(void) +{ + if (platform_registered) + platform_driver_unregister(&ipmi_ls2k500_platform_driver); +} diff --git a/drivers/char/ipmi/kcs_bmc_ls2k500.h b/drivers/char/ipmi/kcs_bmc_ls2k500.h new file mode 100644 index 000000000000..86e08a08d41a --- /dev/null +++ b/drivers/char/ipmi/kcs_bmc_ls2k500.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __KCS_BMC_LS2K500__ +#define __KCS_BMC_LS2K500__ 1 +#include +#include "btlock.h" +#define IPMI_KCS_OBF_BIT 0 +#define IPMI_KCS_IBF_BIT 1 +#define IPMI_KCS_SMS_ATN_BIT 2 +#define IPMI_KCS_CD_BIT 3 + +#define IPMI_KCS_OBF_MASK (1 << IPMI_KCS_OBF_BIT) +#define IPMI_KCS_GET_OBF(d) (((d) >> IPMI_KCS_OBF_BIT) & 0x1) +#define IPMI_KCS_SET_OBF(d, v) ((d) = (((d) & ~IPMI_KCS_OBF_MASK) | \ + (((v) & 1) << IPMI_KCS_OBF_BIT))) +#define IPMI_KCS_IBF_MASK (1 << IPMI_KCS_IBF_BIT) +#define IPMI_KCS_GET_IBF(d) (((d) >> IPMI_KCS_IBF_BIT) & 0x1) +#define IPMI_KCS_SET_IBF(d, v) ((d) = (((d) & ~IPMI_KCS_IBF_MASK) | \ + (((v) & 1) << IPMI_KCS_IBF_BIT))) +#define IPMI_KCS_SMS_ATN_MASK (1 << IPMI_KCS_SMS_ATN_BIT) +#define IPMI_KCS_GET_SMS_ATN(d) (((d) >> IPMI_KCS_SMS_ATN_BIT) & 0x1) +#define IPMI_KCS_SET_SMS_ATN(d, v) ((d) = (((d) & ~IPMI_KCS_SMS_ATN_MASK) | \ + ((v) & 1) << IPMI_KCS_SMS_ATN_BIT)) +#define IPMI_KCS_CD_MASK (1 << IPMI_KCS_CD_BIT) +#define IPMI_KCS_GET_CD(d) (((d) >> IPMI_KCS_CD_BIT) & 0x1) +#define IPMI_KCS_SET_CD(d, v) ((d) = (((d) & ~IPMI_KCS_CD_MASK) | \ + (((v) & 1) << IPMI_KCS_CD_BIT))) + +#define IPMI_KCS_IDLE_STATE 0 +#define IPMI_KCS_READ_STATE 1 +#define IPMI_KCS_WRITE_STATE 2 +#define IPMI_KCS_ERROR_STATE 3 + +#define IPMI_KCS_GET_STATE(d) (((d) >> 6) & 0x3) +#define IPMI_KCS_SET_STATE(d, v) ((d) = ((d) & ~0xc0) | (((v) & 0x3) << 6)) + +#define IPMI_KCS_ABORT_STATUS_CMD 0x60 +#define IPMI_KCS_WRITE_START_CMD 0x61 +#define IPMI_KCS_WRITE_END_CMD 0x62 +#define IPMI_KCS_READ_CMD 0x68 +#define IPMI_KCS_STATUS_NO_ERR 0x00 +#define IPMI_KCS_STATUS_ABORTED_ERR 0x01 +#define IPMI_KCS_STATUS_BAD_CC_ERR 0x02 +#define IPMI_KCS_STATUS_LENGTH_ERR 0x06 +#define KCS_STATUS_CMD_DAT BIT(3) + +typedef struct IPMIKCS { + union btlock lock; + uint8_t status_reg; + uint8_t data_out_reg; + + int16_t data_in_reg; + int16_t cmd_reg; + int16_t reserved2; + + uint32_t write_req; + uint32_t write_ack; + + uint32_t reserved3; + uint32_t reserved4; +} IPMIKCS; + +struct loongson_kcs_bmc { + struct list_head next; + IPMIKCS *kcs; + struct kcs_bmc *bmc; +}; +#endif diff --git a/drivers/video/fbdev/ls2k500sfb.c b/drivers/video/fbdev/ls2k500sfb.c index a3722dcaada0..00a83ea7c1e3 100644 --- a/drivers/video/fbdev/ls2k500sfb.c +++ b/drivers/video/fbdev/ls2k500sfb.c @@ -111,6 +111,7 @@ static void ls2k500sfb_redraw_fn(struct work_struct *work) switch_console(saved_console); } +static unsigned long event_jiffies; static void ls2k500sfb_events_fn(struct work_struct *work) { struct ls2k500sfb_struct *priv = container_of(work, struct ls2k500sfb_struct, work); @@ -151,6 +152,7 @@ static void ls2k500sfb_events_fn(struct work_struct *work) pci_write_config_dword(ppdev, 0x18, 0); pci_write_config_dword(ppdev, 0x1c, 0); pci_write_config_dword(ppdev, 0x20, 0); + event_jiffies = jiffies; atomic_set(&waiting_for_pciebreak_ipi, 0); wmb(); /* flush all write after change pcie window */ local_bh_enable(); @@ -648,6 +650,7 @@ static struct platform_driver simplefb_driver = { .remove = simplefb_remove, }; +static void *kcs_data[2] = {&event_jiffies, &mscycles}; static int ls2k500sfb_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct simplefb_platform_data mode; @@ -740,7 +743,8 @@ static int ls2k500sfb_probe(struct pci_dev *dev, const struct pci_device_id *id) for (i = 0; i < 5; i++) { res[0].start = phybase + 0x00f00000 + 0x1c*i; res[0].end = phybase + 0x00f00000 + 0x1c*(i+1) - 1; - platform_device_register_simple("ipmi_ls2k500_si", i, res, 1); + platform_device_register_resndata(NULL, "ipmi_ls2k500_si", i, res, 1, + kcs_data, sizeof(kcs_data)); } return PTR_ERR_OR_ZERO(pd); -- Gitee From f05a83a5080202c24f1b175edb2e93532030d701 Mon Sep 17 00:00:00 2001 From: Chong Qiao Date: Mon, 11 Dec 2023 10:03:12 +0800 Subject: [PATCH 0134/2138] anolis: LoongArch: defconfig: enable CONFIG_FB_LS2K500=m. ANBZ: #8435 Signed-off-by: Chong Qiao Signed-off-by: Hongchen Zhang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- arch/loongarch/configs/loongson3_defconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index a96c5bb1b130..0187730896f4 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -673,6 +673,7 @@ CONFIG_DRM_LOONGSON=y CONFIG_FB=y CONFIG_FB_EFI=y CONFIG_FB_RADEON=y +CONFIG_FB_LS2K500=m CONFIG_LCD_CLASS_DEVICE=y CONFIG_LCD_PLATFORM=m # CONFIG_VGA_CONSOLE is not set -- Gitee From 6223b6aa5885d1826e64c507317fdcc6d1b96912 Mon Sep 17 00:00:00 2001 From: Chong Qiao Date: Mon, 11 Dec 2023 10:03:13 +0800 Subject: [PATCH 0135/2138] anolis: LoongArch: fix ls2k500 bmc not work when installing iso ANBZ: #8435 Signed-off-by: Chong Qiao Signed-off-by: Hongchen Zhang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- drivers/gpu/drm/loongson/loongson_module.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/drivers/gpu/drm/loongson/loongson_module.c b/drivers/gpu/drm/loongson/loongson_module.c index d2a51bd395f6..37b7d97c4e70 100644 --- a/drivers/gpu/drm/loongson/loongson_module.c +++ b/drivers/gpu/drm/loongson/loongson_module.c @@ -19,6 +19,21 @@ module_param_named(vblank, loongson_vblank, int, 0400); static int __init loongson_module_init(void) { + struct pci_dev *pdev = NULL; + + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev))) { + /* + * Multiple video card workaround + * + * This integrated video card will always be selected as + * default boot device by vgaarb subsystem. + */ + if (pdev->vendor != PCI_VENDOR_ID_LOONGSON || pdev->device == 0x1a05) { + pr_info("Discrete graphic card detected, abort\n"); + return 0; + } + } + if (!loongson_modeset || video_firmware_drivers_only()) return -ENODEV; -- Gitee From 79eabfe5498a826845b7e6152b5b3cf52457c5ba Mon Sep 17 00:00:00 2001 From: Baoqi Zhang Date: Mon, 18 Dec 2023 10:20:11 +0800 Subject: [PATCH 0136/2138] anolis: LS7A2000: Add quirk for OHCI device rev 0x02 ANBZ: #8435 Signed-off-by: Baoqi Zhang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- drivers/pci/controller/pci-loongson.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c index bc630ab8a283..9078511271fd 100644 --- a/drivers/pci/controller/pci-loongson.c +++ b/drivers/pci/controller/pci-loongson.c @@ -32,6 +32,7 @@ #define DEV_LS7A_CONF 0x7a10 #define DEV_LS7A_GNET 0x7a13 #define DEV_LS7A_EHCI 0x7a14 +#define DEV_LS7A_OHCI 0x7a24 #define DEV_LS7A_DC2 0x7a36 #define DEV_LS7A_HDMI 0x7a37 @@ -175,6 +176,12 @@ static void loongson_pci_msi_quirk(struct pci_dev *dev) pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, val); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_PCIE_PORT5, loongson_pci_msi_quirk); +static void loongson_ohci_quirk(struct pci_dev *dev) +{ + if (dev->revision == 0x2) + dev->resource[0].start += 0x1000; +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_OHCI, loongson_ohci_quirk); static struct loongson_pci *pci_bus_to_loongson_pci(struct pci_bus *bus) { -- Gitee From b9a6d42649d91b976937f2bc545d8f6ce83f1b0d Mon Sep 17 00:00:00 2001 From: Tianli Xiong Date: Mon, 18 Dec 2023 10:20:12 +0800 Subject: [PATCH 0137/2138] anolis: PCI: Check if entry->offset already exist for mem resource ANBZ: #8435 Fix patch "LoongArch: Add PCI controller support" Signed-off-by: Tianli Xiong Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- arch/loongarch/pci/acpi.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/loongarch/pci/acpi.c b/arch/loongarch/pci/acpi.c index 486dba309ac1..b4a4fe253b43 100644 --- a/arch/loongarch/pci/acpi.c +++ b/arch/loongarch/pci/acpi.c @@ -214,9 +214,11 @@ static int acpi_prepare_root_resources(struct acpi_pci_root_info *ci) if (status > 0) { resource_list_for_each_entry_safe(entry, tmp, &ci->resources) { if (entry->res->flags & IORESOURCE_MEM) { - entry->offset = ci->root->mcfg_addr & GENMASK_ULL(63, 40); - entry->res->start |= entry->offset; - entry->res->end |= entry->offset; + if (!entry->offset) { + entry->offset = ci->root->mcfg_addr & GENMASK_ULL(63, 40); + entry->res->start |= entry->offset; + entry->res->end |= entry->offset; + } } } return status; -- Gitee From 001648ec34775a0fc31defd8a8695548a7e8b1e3 Mon Sep 17 00:00:00 2001 From: Tianli Xiong Date: Mon, 18 Dec 2023 10:20:13 +0800 Subject: [PATCH 0138/2138] anolis: PCI: Check if the pci controller can use both CFG0 and CFG1 mode to access configuration space ANBZ: #8435 Fix patch "PCI: loongson: Use generic 8/16/32-bit config ops on LS2K/LS7A" Signed-off-by: Tianli Xiong Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- drivers/pci/controller/pci-loongson.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c index 9078511271fd..5f422cdde349 100644 --- a/drivers/pci/controller/pci-loongson.c +++ b/drivers/pci/controller/pci-loongson.c @@ -328,6 +328,7 @@ static int loongson_pci_probe(struct platform_device *pdev) struct device_node *node = dev->of_node; struct pci_host_bridge *bridge; struct resource *regs; + unsigned int num = 0; if (!node) return -ENODEV; @@ -352,7 +353,9 @@ static int loongson_pci_probe(struct platform_device *pdev) } if (priv->data->flags & FLAG_CFG1) { - regs = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (priv->cfg0_base) + num = 1; + regs = platform_get_resource(pdev, IORESOURCE_MEM, num); if (!regs) dev_info(dev, "missing mem resource for cfg1\n"); else { -- Gitee From 10889e0cda0771a4f78afa5a5cb2fa3eeedf9082 Mon Sep 17 00:00:00 2001 From: Jianmin Lv Date: Mon, 18 Dec 2023 10:20:14 +0800 Subject: [PATCH 0139/2138] anolis: PCI: PM: fix pcie mrrs restoring ANBZ: #8435 Don't limit mrrs during resume, so that saved value can be restored. Fix patch "PCI: loongson: Improve the MRRS quirk for LS7A" Signed-off-by: Jianmin Lv Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- drivers/pci/pci.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 095fa1910d36..b0a87919ac0a 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -32,6 +32,7 @@ #include #include #include +#include #include "pci.h" DEFINE_MUTEX(pci_slot_mutex); @@ -172,6 +173,11 @@ static bool pci_bridge_d3_disable; /* Force bridge_d3 for all PCIe ports */ static bool pci_bridge_d3_force; +#ifndef CONFIG_PM_SLEEP +suspend_state_t pm_suspend_target_state; +#define pm_suspend_target_state (PM_SUSPEND_ON) +#endif + static int __init pcie_port_pm_setup(char *str) { if (!strcmp(str, "off")) @@ -6242,7 +6248,8 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) v = (ffs(rq) - 8) << 12; - if (bridge->no_inc_mrrs) { + if (pm_suspend_target_state == PM_SUSPEND_ON && + bridge->no_inc_mrrs) { int max_mrrs = pcie_get_readrq(dev); if (rq > max_mrrs) { -- Gitee From b059c56f595d4c8af8ef305c2f72993ac8c976fa Mon Sep 17 00:00:00 2001 From: Hongchen Zhang Date: Mon, 18 Dec 2023 10:20:15 +0800 Subject: [PATCH 0140/2138] anolis: PCI: fix kabi error caused by pm_suspend_target_state ANBZ: #8435 fix kabi error caused by pm_suspend_target_state,used only by loongson devices. Signed-off-by: Hongchen Zhang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- drivers/pci/pci.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index b0a87919ac0a..ac59f602cf51 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -32,7 +32,9 @@ #include #include #include +#ifdef CONFIG_MACH_LOONGSON64 #include +#endif #include "pci.h" DEFINE_MUTEX(pci_slot_mutex); @@ -173,11 +175,15 @@ static bool pci_bridge_d3_disable; /* Force bridge_d3 for all PCIe ports */ static bool pci_bridge_d3_force; +#ifdef CONFIG_MACH_LOONGSON64 + #ifndef CONFIG_PM_SLEEP suspend_state_t pm_suspend_target_state; #define pm_suspend_target_state (PM_SUSPEND_ON) #endif +#endif + static int __init pcie_port_pm_setup(char *str) { if (!strcmp(str, "off")) @@ -6229,8 +6235,9 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) { u16 v; int ret; +#ifdef CONFIG_MACH_LOONGSON64 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); - +#endif if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) return -EINVAL; @@ -6248,6 +6255,7 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) v = (ffs(rq) - 8) << 12; +#ifdef CONFIG_MACH_LOONGSON64 if (pm_suspend_target_state == PM_SUSPEND_ON && bridge->no_inc_mrrs) { int max_mrrs = pcie_get_readrq(dev); @@ -6257,6 +6265,7 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) return -EINVAL; } } +#endif ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_READRQ, v); -- Gitee From b59b9b6454dc93a674b704ccdedbf55e24029140 Mon Sep 17 00:00:00 2001 From: Tianli Xiong Date: Mon, 18 Dec 2023 10:20:16 +0800 Subject: [PATCH 0141/2138] anolis: LoongArch: fix some PCIE card not scanning properly ANBZ: #8435 Fix some pcie card not scanning properly when bus number is inconsistent during firmware and kernel scan phases. Signed-off-by: liuyun Signed-off-by: Tianli Xiong Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- drivers/pci/controller/pci-loongson.c | 34 +++++++++++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c index 5f422cdde349..7c0265d3c1db 100644 --- a/drivers/pci/controller/pci-loongson.c +++ b/drivers/pci/controller/pci-loongson.c @@ -262,6 +262,36 @@ static void __iomem *pci_loongson_map_bus(struct pci_bus *bus, return NULL; } +static int pci_loongson_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + void __iomem *addr; + + addr = bus->ops->map_bus(bus, devfn, where); + if (!addr) { + *val = ~0; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + if (size == 1) + *val = readb(addr); + else if (size == 2) + *val = readw(addr); + else + *val = readl(addr); + /* + * fix some pcie card not scanning properly when bus number is + * inconsistent during firmware and kernel scan phases. + */ + if (*val == 0x0 && where == PCI_VENDOR_ID) { + writel(*val, addr); + *val = readl(addr); + } + + + return PCIBIOS_SUCCESSFUL; +} + #ifdef CONFIG_OF static int loongson_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) @@ -285,7 +315,7 @@ static int loongson_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) /* LS2K/LS7A accept 8/16/32-bit PCI config operations */ static struct pci_ops loongson_pci_ops = { .map_bus = pci_loongson_map_bus, - .read = pci_generic_config_read, + .read = pci_loongson_config_read, .write = pci_generic_config_write, }; @@ -412,7 +442,7 @@ const struct pci_ecam_ops loongson_pci_ecam_ops = { .init = loongson_pci_ecam_init, .pci_ops = { .map_bus = pci_loongson_map_bus, - .read = pci_generic_config_read, + .read = pci_loongson_config_read, .write = pci_generic_config_write, } }; -- Gitee From 12155f39743fd6dea4212fd0f3882e50d680de23 Mon Sep 17 00:00:00 2001 From: Jianmin Lv Date: Mon, 18 Dec 2023 10:20:17 +0800 Subject: [PATCH 0142/2138] anolis: PCI: LS7A2000: fix pm transition of devices under pcie port ANBZ: #8435 Signed-off-by: Jianmin Lv Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- drivers/pci/controller/pci-loongson.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c index 7c0265d3c1db..a7dfd31c4eb6 100644 --- a/drivers/pci/controller/pci-loongson.c +++ b/drivers/pci/controller/pci-loongson.c @@ -81,6 +81,20 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_LPC, system_bus_quirk); +static void loongson_d3_quirk(struct pci_dev *pdev) +{ + pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3; + pdev->no_d1d2 = 1; +} +DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_PCIE_PORT3, loongson_d3_quirk); +DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_PCIE_PORT4, loongson_d3_quirk); +DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_PCIE_PORT5, loongson_d3_quirk); +DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_PCIE_PORT6, loongson_d3_quirk); + /* * Some Loongson PCIe ports have hardware limitations on their Maximum Read * Request Size. They can't handle anything larger than this. Sane -- Gitee From 6e7f5182473fabfe54ec7f80db0e3094ae5dd6ce Mon Sep 17 00:00:00 2001 From: Baoqi Zhang Date: Mon, 18 Dec 2023 10:20:18 +0800 Subject: [PATCH 0143/2138] anolis: PCI: LS7A2000: fix GPU card error ANBZ: #8435 Add window to solve GPU access error Signed-off-by: Baoqi Zhang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- drivers/pci/controller/pci-loongson.c | 38 +++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c index a7dfd31c4eb6..75649975c78e 100644 --- a/drivers/pci/controller/pci-loongson.c +++ b/drivers/pci/controller/pci-loongson.c @@ -197,6 +197,44 @@ static void loongson_ohci_quirk(struct pci_dev *dev) } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_OHCI, loongson_ohci_quirk); +static void loongson_display_quirk(struct pci_dev *dev) +{ + u32 val; + u64 mask, size; + u64 max_size = 0; + int i, num; + struct pci_bus *bus = dev->bus; + + if (!dev->bus->number) { + if (!(dev->vendor == PCI_VENDOR_ID_LOONGSON && dev->device == 0x7a25)) + return; + } else { + while (!pci_is_root_bus(bus->parent)) + bus = bus->parent; + + /* ensure slot is 7a2000 */ + if (bus->self->vendor != PCI_VENDOR_ID_LOONGSON || bus->self->device < 0x7a39) + return; + } + max_size = 0; + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + if (dev->resource[i].flags & IORESOURCE_MEM) { + size = dev->resource[i].end - dev->resource[i].start; + if (size > max_size) { + max_size = size; + num = i; + } + } + } + mask = ~(dev->resource[num].end - dev->resource[num].start); + val = (dev->resource[num].start >> (24 - 16)) | ((mask >> 24) & 0xffff); + writel(val, (volatile void *)0x80000efdfb000174UL); + writel(0x80000000, (volatile void *)0x80000efdfb000170UL); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, 0x7a25, loongson_display_quirk); +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, + PCI_BASE_CLASS_DISPLAY, 16, loongson_display_quirk); + static struct loongson_pci *pci_bus_to_loongson_pci(struct pci_bus *bus) { struct pci_config_window *cfg; -- Gitee From fcdd5362d0d54c8fbc17fadfa21dcde7354f185b Mon Sep 17 00:00:00 2001 From: suijingfeng Date: Mon, 18 Dec 2023 10:20:19 +0800 Subject: [PATCH 0144/2138] anolis: PCI: fix X server auto probe fail when both ast and etnaviv drm present ANBZ: #8435 According to PCI-to-PCI bridge spec, bit 3 of Bridge Control Register is VGA Enable bit which modifies the response by the bridge to VGA compatible addresses. The Bridge Control register provides extensions to the Command register that are specific to a bridge. The Bridge Control register provides many of the same controls for the secondary interface that are provided by the Command register for the primary interface. There are some bits that affect the operation of both interfaces of the bridge. If the VGA Enable bit is set, the bridge will positively decode and forward the following accesses on the primary interface to the secondary interface (and, conversely, block the forwarding of these addresses from the secondary to primary interface) Forwarding of these accesses is qualified by the I/O Enable and Memory Enable bits in the Command register.) The default state of this bit after reset must be 0. Bit 3 of Bridge Control Register is VGA Enable bit which modifies the response by the bridge to VGA compatible addresses. when 0: do not forward VGA compatible memory and I/O addresses from the primary to secondary interface (addresses defined below) unless they are enabled for forwarding by the defined I/O when 1: forward VGA compatible memory and I/O addresses (addresses defined below) from the primary interface to the secondary interface (if the I/O Enable and Memory Enable bits are set) independent of the I/O and memory address ranges and independent of the ISA Enable bit * memory accesses in the range 000A 0000h to 000B FFFFh * I/O addresses in the first 64 KB of the I/O address space (AD[31:16] are 0000h) where AD[9:: 0] are in the ranges 3B0h to 3BBh and 3C0h to 3DFh (inclusive of ISA address aliases - AD[15::10] are not decoded) If the VGA Enable bit is set, forwarding of these accesses is independent of the I/O address range and memory address ranges defined by the I/O Base and Limit registers, the Memory Base and Limit registers, and the Prefetchable Memory Base and Limit registers of the bridge. Forwarding of these accesses is also independent of the settings of the ISA Enable bit (in the Bridge Control register) or VGA Palette Snoop bits (in the Command register). The AST2500 hardware we are using do not set the VGA Enable bit on its bridge control reg, this cause vgaarb subsystem don't think the VGA card behind this pridge as a valid boot vga device which made X server choose wrong video card to use when multiple video card present in the system. Its seems more vgaarb's fault than the ast2500 bmc itself. even through bit 3 of Bridge Control Register is 0, it should still allow to forward the accesses when the addresses is in the range of IO/MEM Base and Limit registers. Nevertheless, in order to support loongson CPU product line, we provide a workaround to this bug for the Sugon L620-G30 and Sugon L820-G30 server. see similar bug: https://patchwork.kernel.org/project/linux-pci/patch/20170619023528.11532-1-dja@axtens.net/ Signed-off-by: suijingfeng Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- drivers/pci/controller/pci-loongson.c | 48 +++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c index 75649975c78e..9349d65b9dbc 100644 --- a/drivers/pci/controller/pci-loongson.c +++ b/drivers/pci/controller/pci-loongson.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "../pci.h" @@ -235,6 +236,53 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, 0x7a25, loongson_display_quirk); DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY, 16, loongson_display_quirk); +static void pci_fixup_aspeed(struct pci_dev *pdev) +{ + struct pci_dev *bridge; + struct pci_bus *bus; + struct pci_dev *vdevp = NULL; + u16 config; + + bus = pdev->bus; + bridge = bus->self; + + /* Is VGA routed to us? */ + if (bridge && (pci_is_bridge(bridge))) { + pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &config); + + /* Yes, this bridge is PCI bridge-to-bridge spec compliant, + * just return! + */ + if (config & PCI_BRIDGE_CTL_VGA) + return; + + dev_warn(&pdev->dev, "VGA bridge control is not enabled\n"); + } + + /* Just return if the system already have a default device */ + if (vga_default_device()) + return; + + /* No default vga device */ + while ((vdevp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, vdevp))) { + if (vdevp->vendor != 0x1a03) { + /* Have other vga devcie in the system, do nothing */ + dev_info(&pdev->dev, + "Another boot vga device: 0x%x:0x%x\n", + vdevp->vendor, vdevp->device); + return; + } + } + + vga_set_default_device(pdev); + + dev_info(&pdev->dev, + "Boot vga device set as 0x%x:0x%x\n", + pdev->vendor, pdev->device); +} +DECLARE_PCI_FIXUP_CLASS_FINAL(0x1a03, 0x2000, + PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_aspeed); + static struct loongson_pci *pci_bus_to_loongson_pci(struct pci_bus *bus) { struct pci_config_window *cfg; -- Gitee From 4d4f3965eb0d4af677c3d689359b27a2197c8836 Mon Sep 17 00:00:00 2001 From: Juxin Gao Date: Mon, 18 Dec 2023 10:20:20 +0800 Subject: [PATCH 0145/2138] anolis: PCI: irq: Add early_param pci_irq_limit to limit pci irq numbers ANBZ: #8435 Signed-off-by: Juxin Gao Signed-off-by: Hongchen Zhang Signed-off-by: Ming Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- drivers/pci/msi/msi.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c index 053bb9fac6e3..c429f9cce441 100644 --- a/drivers/pci/msi/msi.c +++ b/drivers/pci/msi/msi.c @@ -408,12 +408,32 @@ static int msi_capability_init(struct pci_dev *dev, int nvec, return ret; } +#ifdef CONFIG_LOONGARCH +static unsigned int pci_irq_numbers = 32; + +static int __init pci_irq_limit(char *str) +{ + get_option(&str, &pci_irq_numbers); + + if (pci_irq_numbers == 0) + pci_irq_numbers = 32; + return 0; +} + +early_param("pci_irq_limit", pci_irq_limit); +#endif + int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, struct irq_affinity *affd) { int nvec; int rc; +#ifdef CONFIG_LOONGARCH + if (maxvec > 32) + maxvec = pci_irq_numbers; +#endif + if (!pci_msi_supported(dev, minvec) || dev->current_state != PCI_D0) return -EINVAL; @@ -788,6 +808,11 @@ int __pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int { int hwsize, rc, nvec = maxvec; +#ifdef CONFIG_LOONGARCH + if (maxvec > 32) + nvec = pci_irq_numbers; +#endif + if (maxvec < minvec) return -ERANGE; -- Gitee From 749bc34301562e111ab6e387ecfc935d5d06f073 Mon Sep 17 00:00:00 2001 From: Tianli Xiong Date: Mon, 18 Dec 2023 10:20:21 +0800 Subject: [PATCH 0146/2138] anolis: LoongArch: pci root bridige set acpi companion only when not acpi_disabled. ANBZ: #8435 Fix patch "LoongArch: Add PCI controller support" Signed-off-by: Tianli Xiong Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2812 --- arch/loongarch/pci/acpi.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/arch/loongarch/pci/acpi.c b/arch/loongarch/pci/acpi.c index b4a4fe253b43..5ba4d3a169b2 100644 --- a/arch/loongarch/pci/acpi.c +++ b/arch/loongarch/pci/acpi.c @@ -26,16 +26,17 @@ void pcibios_add_bus(struct pci_bus *bus) int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) { - struct acpi_device *adev = NULL; - struct device *bus_dev = &bridge->bus->dev; - struct pci_config_window *cfg = bridge->bus->sysdata; - if (!acpi_disabled) - adev = to_acpi_device(cfg->parent); + if (!acpi_disabled) { + struct acpi_device *adev = NULL; + struct device *bus_dev = &bridge->bus->dev; + struct pci_config_window *cfg = bridge->bus->sysdata; - ACPI_COMPANION_SET(&bridge->dev, adev); - set_dev_node(bus_dev, pa_to_nid(cfg->res.start)); + adev = to_acpi_device(cfg->parent); + ACPI_COMPANION_SET(&bridge->dev, adev); + set_dev_node(bus_dev, pa_to_nid(cfg->res.start)); + } return 0; } -- Gitee From 9c6a81f72fd926cb975c50c403d39a981ce07902 Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:20 +0800 Subject: [PATCH 0147/2138] LoongArch: KVM: Add kvm related header files ANBZ: #8436 commit b37e6b680e3a4fad40d8c7b92cfe9b2806c6248e upstream. Add LoongArch KVM related header files, including kvm.h, kvm_host.h and kvm_types.h. All of those are about LoongArch virtualization features and kvm interfaces. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/include/asm/kvm_host.h | 237 +++++++++++++++++++++++++ arch/loongarch/include/asm/kvm_types.h | 11 ++ arch/loongarch/include/uapi/asm/kvm.h | 108 +++++++++++ include/uapi/linux/kvm.h | 9 + 4 files changed, 365 insertions(+) create mode 100644 arch/loongarch/include/asm/kvm_host.h create mode 100644 arch/loongarch/include/asm/kvm_types.h create mode 100644 arch/loongarch/include/uapi/asm/kvm.h diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h new file mode 100644 index 000000000000..11328700d4fa --- /dev/null +++ b/arch/loongarch/include/asm/kvm_host.h @@ -0,0 +1,237 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#ifndef __ASM_LOONGARCH_KVM_HOST_H__ +#define __ASM_LOONGARCH_KVM_HOST_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/* Loongarch KVM register ids */ +#define KVM_GET_IOC_CSR_IDX(id) ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT) +#define KVM_GET_IOC_CPUCFG_IDX(id) ((id & KVM_CPUCFG_IDX_MASK) >> LOONGARCH_REG_SHIFT) + +#define KVM_MAX_VCPUS 256 +#define KVM_MAX_CPUCFG_REGS 21 +/* memory slots that does not exposed to userspace */ +#define KVM_PRIVATE_MEM_SLOTS 0 + +#define KVM_HALT_POLL_NS_DEFAULT 500000 + +struct kvm_vm_stat { + struct kvm_vm_stat_generic generic; + u64 pages; + u64 hugepages; +}; + +struct kvm_vcpu_stat { + struct kvm_vcpu_stat_generic generic; + u64 int_exits; + u64 idle_exits; + u64 cpucfg_exits; + u64 signal_exits; +}; + +struct kvm_arch_memory_slot { +}; + +struct kvm_context { + unsigned long vpid_cache; + struct kvm_vcpu *last_vcpu; +}; + +struct kvm_world_switch { + int (*exc_entry)(void); + int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu); + unsigned long page_order; +}; + +#define MAX_PGTABLE_LEVELS 4 + +struct kvm_arch { + /* Guest physical mm */ + kvm_pte_t *pgd; + unsigned long gpa_size; + unsigned long invalid_ptes[MAX_PGTABLE_LEVELS]; + unsigned int pte_shifts[MAX_PGTABLE_LEVELS]; + unsigned int root_level; + + s64 time_offset; + struct kvm_context __percpu *vmcs; +}; + +#define CSR_MAX_NUMS 0x800 + +struct loongarch_csrs { + unsigned long csrs[CSR_MAX_NUMS]; +}; + +/* Resume Flags */ +#define RESUME_HOST 0 +#define RESUME_GUEST 1 + +enum emulation_result { + EMULATE_DONE, /* no further processing */ + EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ + EMULATE_DO_IOCSR, /* handle IOCSR request */ + EMULATE_FAIL, /* can't emulate this instruction */ + EMULATE_EXCEPT, /* A guest exception has been generated */ +}; + +#define KVM_LARCH_FPU (0x1 << 0) +#define KVM_LARCH_SWCSR_LATEST (0x1 << 1) +#define KVM_LARCH_HWCSR_USABLE (0x1 << 2) + +struct kvm_vcpu_arch { + /* + * Switch pointer-to-function type to unsigned long + * for loading the value into register directly. + */ + unsigned long host_eentry; + unsigned long guest_eentry; + + /* Pointers stored here for easy accessing from assembly code */ + int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu); + + /* Host registers preserved across guest mode execution */ + unsigned long host_sp; + unsigned long host_tp; + unsigned long host_pgd; + + /* Host CSRs are used when handling exits from guest */ + unsigned long badi; + unsigned long badv; + unsigned long host_ecfg; + unsigned long host_estat; + unsigned long host_percpu; + + /* GPRs */ + unsigned long gprs[32]; + unsigned long pc; + + /* Which auxiliary state is loaded (KVM_LARCH_*) */ + unsigned int aux_inuse; + + /* FPU state */ + struct loongarch_fpu fpu FPU_ALIGN; + + /* CSR state */ + struct loongarch_csrs *csr; + + /* GPR used as IO source/target */ + u32 io_gpr; + + /* KVM register to control count timer */ + u32 count_ctl; + struct hrtimer swtimer; + + /* Bitmask of intr that are pending */ + unsigned long irq_pending; + /* Bitmask of pending intr to be cleared */ + unsigned long irq_clear; + + /* Bitmask of exceptions that are pending */ + unsigned long exception_pending; + unsigned int esubcode; + + /* Cache for pages needed inside spinlock regions */ + struct kvm_mmu_memory_cache mmu_page_cache; + + /* vcpu's vpid */ + u64 vpid; + + /* Frequency of stable timer in Hz */ + u64 timer_mhz; + ktime_t expire; + + /* Last CPU the vCPU state was loaded on */ + int last_sched_cpu; + /* mp state */ + struct kvm_mp_state mp_state; + /* cpucfg */ + u32 cpucfg[KVM_MAX_CPUCFG_REGS]; +}; + +static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg) +{ + return csr->csrs[reg]; +} + +static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned long val) +{ + csr->csrs[reg] = val; +} + +/* Debug: dump vcpu state */ +int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); + +/* MMU handling */ +void kvm_flush_tlb_all(void); +void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa); +int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write); + +#define KVM_ARCH_WANT_MMU_NOTIFIER +void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable); +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); + +static inline void update_pc(struct kvm_vcpu_arch *arch) +{ + arch->pc += 4; +} + +/* + * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault. + * @vcpu: Virtual CPU. + * + * Returns: Whether the TLBL exception was likely due to an instruction + * fetch fault rather than a data load fault. + */ +static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch) +{ + return arch->pc == arch->badv; +} + +/* Misc */ +static inline void kvm_arch_hardware_unsetup(void) {} +static inline void kvm_arch_sync_events(struct kvm *kvm) {} +static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} +static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) {} +void kvm_check_vpid(struct kvm_vcpu *vcpu); +enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer); +void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot); +void kvm_init_vmcs(struct kvm *kvm); +void kvm_exc_entry(void); +int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu); + +extern unsigned long vpid_mask; +extern const unsigned long kvm_exception_size; +extern const unsigned long kvm_enter_guest_size; +extern struct kvm_world_switch *kvm_loongarch_ops; + +#define SW_GCSR (1 << 0) +#define HW_GCSR (1 << 1) +#define INVALID_GCSR (1 << 2) + +int get_gcsr_flag(int csr); +void set_hw_gcsr(int csr_id, unsigned long val); + +#endif /* __ASM_LOONGARCH_KVM_HOST_H__ */ diff --git a/arch/loongarch/include/asm/kvm_types.h b/arch/loongarch/include/asm/kvm_types.h new file mode 100644 index 000000000000..2fe1d4bdff66 --- /dev/null +++ b/arch/loongarch/include/asm/kvm_types.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#ifndef _ASM_LOONGARCH_KVM_TYPES_H +#define _ASM_LOONGARCH_KVM_TYPES_H + +#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40 + +#endif /* _ASM_LOONGARCH_KVM_TYPES_H */ diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h new file mode 100644 index 000000000000..c6ad2ee6106c --- /dev/null +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#ifndef __UAPI_ASM_LOONGARCH_KVM_H +#define __UAPI_ASM_LOONGARCH_KVM_H + +#include + +/* + * KVM LoongArch specific structures and definitions. + * + * Some parts derived from the x86 version of this file. + */ + +#define __KVM_HAVE_READONLY_MEM + +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 +#define KVM_DIRTY_LOG_PAGE_OFFSET 64 + +/* + * for KVM_GET_REGS and KVM_SET_REGS + */ +struct kvm_regs { + /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */ + __u64 gpr[32]; + __u64 pc; +}; + +/* + * for KVM_GET_FPU and KVM_SET_FPU + */ +struct kvm_fpu { + __u32 fcsr; + __u64 fcc; /* 8x8 */ + struct kvm_fpureg { + __u64 val64[4]; + } fpr[32]; +}; + +/* + * For LoongArch, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various + * registers. The id field is broken down as follows: + * + * bits[63..52] - As per linux/kvm.h + * bits[51..32] - Must be zero. + * bits[31..16] - Register set. + * + * Register set = 0: GP registers from kvm_regs (see definitions below). + * + * Register set = 1: CSR registers. + * + * Register set = 2: KVM specific registers (see definitions below). + * + * Register set = 3: FPU / SIMD registers (see definitions below). + * + * Other sets registers may be added in the future. Each set would + * have its own identifier in bits[31..16]. + */ + +#define KVM_REG_LOONGARCH_GPR (KVM_REG_LOONGARCH | 0x00000ULL) +#define KVM_REG_LOONGARCH_CSR (KVM_REG_LOONGARCH | 0x10000ULL) +#define KVM_REG_LOONGARCH_KVM (KVM_REG_LOONGARCH | 0x20000ULL) +#define KVM_REG_LOONGARCH_FPSIMD (KVM_REG_LOONGARCH | 0x30000ULL) +#define KVM_REG_LOONGARCH_CPUCFG (KVM_REG_LOONGARCH | 0x40000ULL) +#define KVM_REG_LOONGARCH_MASK (KVM_REG_LOONGARCH | 0x70000ULL) +#define KVM_CSR_IDX_MASK 0x7fff +#define KVM_CPUCFG_IDX_MASK 0x7fff + +/* + * KVM_REG_LOONGARCH_KVM - KVM specific control registers. + */ + +#define KVM_REG_LOONGARCH_COUNTER (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 1) +#define KVM_REG_LOONGARCH_VCPU_RESET (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 2) + +#define LOONGARCH_REG_SHIFT 3 +#define LOONGARCH_REG_64(TYPE, REG) (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT)) +#define KVM_IOC_CSRID(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG) +#define KVM_IOC_CPUCFG(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG) + +struct kvm_debug_exit_arch { +}; + +/* for KVM_SET_GUEST_DEBUG */ +struct kvm_guest_debug_arch { +}; + +/* definition of registers in kvm_run */ +struct kvm_sync_regs { +}; + +/* dummy definition */ +struct kvm_sregs { +}; + +struct kvm_iocsr_entry { + __u32 addr; + __u32 pad; + __u64 data; +}; + +#define KVM_NR_IRQCHIPS 1 +#define KVM_IRQCHIP_NUM_PINS 64 +#define KVM_MAX_CORES 256 + +#endif /* __UAPI_ASM_LOONGARCH_KVM_H */ diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 13065dd96132..863f84619a15 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -264,6 +264,7 @@ struct kvm_xen_exit { #define KVM_EXIT_RISCV_SBI 35 #define KVM_EXIT_RISCV_CSR 36 #define KVM_EXIT_NOTIFY 37 +#define KVM_EXIT_LOONGARCH_IOCSR 38 /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ @@ -336,6 +337,13 @@ struct kvm_run { __u32 len; __u8 is_write; } mmio; + /* KVM_EXIT_LOONGARCH_IOCSR */ + struct { + __u64 phys_addr; + __u8 data[8]; + __u32 len; + __u8 is_write; + } iocsr_io; /* KVM_EXIT_HYPERCALL */ struct { __u64 nr; @@ -1362,6 +1370,7 @@ struct kvm_dirty_tlb { #define KVM_REG_ARM64 0x6000000000000000ULL #define KVM_REG_MIPS 0x7000000000000000ULL #define KVM_REG_RISCV 0x8000000000000000ULL +#define KVM_REG_LOONGARCH 0x9000000000000000ULL #define KVM_REG_SIZE_SHIFT 52 #define KVM_REG_SIZE_MASK 0x00f0000000000000ULL -- Gitee From f169298b48a3a321b85eae8f25514a6a9101decf Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:20 +0800 Subject: [PATCH 0148/2138] LoongArch: KVM: Implement kvm module related interface ANBZ: #8436 commit 2bd6ac68726131da32ace9717aa63ff68cf6605c upstream. Implement LoongArch kvm module init, module exit interface, using kvm context to save the vpid info and vcpu world switch interface pointer. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/main.c | 358 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 358 insertions(+) create mode 100644 arch/loongarch/kvm/main.c diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c new file mode 100644 index 000000000000..267c0505ea89 --- /dev/null +++ b/arch/loongarch/kvm/main.c @@ -0,0 +1,358 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include +#include +#include +#include "trace.h" + +unsigned long vpid_mask; +struct kvm_world_switch *kvm_loongarch_ops; +static int gcsr_flag[CSR_MAX_NUMS]; +static struct kvm_context __percpu *vmcs; + +int get_gcsr_flag(int csr) +{ + if (csr < CSR_MAX_NUMS) + return gcsr_flag[csr]; + + return INVALID_GCSR; +} + +static inline void set_gcsr_sw_flag(int csr) +{ + if (csr < CSR_MAX_NUMS) + gcsr_flag[csr] |= SW_GCSR; +} + +static inline void set_gcsr_hw_flag(int csr) +{ + if (csr < CSR_MAX_NUMS) + gcsr_flag[csr] |= HW_GCSR; +} + +/* + * The default value of gcsr_flag[CSR] is 0, and we use this + * function to set the flag to 1 (SW_GCSR) or 2 (HW_GCSR) if the + * gcsr is software or hardware. It will be used by get/set_gcsr, + * if gcsr_flag is HW we should use gcsrrd/gcsrwr to access it, + * else use software csr to emulate it. + */ +static void kvm_init_gcsr_flag(void) +{ + set_gcsr_hw_flag(LOONGARCH_CSR_CRMD); + set_gcsr_hw_flag(LOONGARCH_CSR_PRMD); + set_gcsr_hw_flag(LOONGARCH_CSR_EUEN); + set_gcsr_hw_flag(LOONGARCH_CSR_MISC); + set_gcsr_hw_flag(LOONGARCH_CSR_ECFG); + set_gcsr_hw_flag(LOONGARCH_CSR_ESTAT); + set_gcsr_hw_flag(LOONGARCH_CSR_ERA); + set_gcsr_hw_flag(LOONGARCH_CSR_BADV); + set_gcsr_hw_flag(LOONGARCH_CSR_BADI); + set_gcsr_hw_flag(LOONGARCH_CSR_EENTRY); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBIDX); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBEHI); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBELO0); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBELO1); + set_gcsr_hw_flag(LOONGARCH_CSR_ASID); + set_gcsr_hw_flag(LOONGARCH_CSR_PGDL); + set_gcsr_hw_flag(LOONGARCH_CSR_PGDH); + set_gcsr_hw_flag(LOONGARCH_CSR_PGD); + set_gcsr_hw_flag(LOONGARCH_CSR_PWCTL0); + set_gcsr_hw_flag(LOONGARCH_CSR_PWCTL1); + set_gcsr_hw_flag(LOONGARCH_CSR_STLBPGSIZE); + set_gcsr_hw_flag(LOONGARCH_CSR_RVACFG); + set_gcsr_hw_flag(LOONGARCH_CSR_CPUID); + set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG1); + set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG2); + set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG3); + set_gcsr_hw_flag(LOONGARCH_CSR_KS0); + set_gcsr_hw_flag(LOONGARCH_CSR_KS1); + set_gcsr_hw_flag(LOONGARCH_CSR_KS2); + set_gcsr_hw_flag(LOONGARCH_CSR_KS3); + set_gcsr_hw_flag(LOONGARCH_CSR_KS4); + set_gcsr_hw_flag(LOONGARCH_CSR_KS5); + set_gcsr_hw_flag(LOONGARCH_CSR_KS6); + set_gcsr_hw_flag(LOONGARCH_CSR_KS7); + set_gcsr_hw_flag(LOONGARCH_CSR_TMID); + set_gcsr_hw_flag(LOONGARCH_CSR_TCFG); + set_gcsr_hw_flag(LOONGARCH_CSR_TVAL); + set_gcsr_hw_flag(LOONGARCH_CSR_TINTCLR); + set_gcsr_hw_flag(LOONGARCH_CSR_CNTC); + set_gcsr_hw_flag(LOONGARCH_CSR_LLBCTL); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBRENTRY); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBRBADV); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBRERA); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBRSAVE); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBRELO0); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBRELO1); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBREHI); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBRPRMD); + set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN0); + set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN1); + set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN2); + set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN3); + + set_gcsr_sw_flag(LOONGARCH_CSR_IMPCTL1); + set_gcsr_sw_flag(LOONGARCH_CSR_IMPCTL2); + set_gcsr_sw_flag(LOONGARCH_CSR_MERRCTL); + set_gcsr_sw_flag(LOONGARCH_CSR_MERRINFO1); + set_gcsr_sw_flag(LOONGARCH_CSR_MERRINFO2); + set_gcsr_sw_flag(LOONGARCH_CSR_MERRENTRY); + set_gcsr_sw_flag(LOONGARCH_CSR_MERRERA); + set_gcsr_sw_flag(LOONGARCH_CSR_MERRSAVE); + set_gcsr_sw_flag(LOONGARCH_CSR_CTAG); + set_gcsr_sw_flag(LOONGARCH_CSR_DEBUG); + set_gcsr_sw_flag(LOONGARCH_CSR_DERA); + set_gcsr_sw_flag(LOONGARCH_CSR_DESAVE); + + set_gcsr_sw_flag(LOONGARCH_CSR_FWPC); + set_gcsr_sw_flag(LOONGARCH_CSR_FWPS); + set_gcsr_sw_flag(LOONGARCH_CSR_MWPC); + set_gcsr_sw_flag(LOONGARCH_CSR_MWPS); + + set_gcsr_sw_flag(LOONGARCH_CSR_DB0ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_DB0MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_DB0CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_DB0ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_DB1ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_DB1MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_DB1CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_DB1ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_DB2ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_DB2MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_DB2CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_DB2ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_DB3ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_DB3MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_DB3CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_DB3ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_DB4ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_DB4MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_DB4CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_DB4ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_DB5ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_DB5MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_DB5CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_DB5ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_DB6ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_DB6MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_DB6CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_DB6ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_DB7ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_DB7MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_DB7CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_DB7ASID); + + set_gcsr_sw_flag(LOONGARCH_CSR_IB0ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_IB0MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_IB0CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_IB0ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_IB1ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_IB1MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_IB1CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_IB1ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_IB2ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_IB2MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_IB2CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_IB2ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_IB3ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_IB3MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_IB3CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_IB3ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_IB4ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_IB4MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_IB4CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_IB4ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_IB5ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_IB5MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_IB5CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_IB5ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_IB6ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_IB6MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_IB6CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_IB6ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_IB7ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_IB7MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_IB7CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_IB7ASID); + + set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL0); + set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR0); + set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL1); + set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR1); + set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL2); + set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR2); + set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL3); + set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR3); +} + +static void kvm_update_vpid(struct kvm_vcpu *vcpu, int cpu) +{ + unsigned long vpid; + struct kvm_context *context; + + context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); + vpid = context->vpid_cache + 1; + if (!(vpid & vpid_mask)) { + /* finish round of vpid loop */ + if (unlikely(!vpid)) + vpid = vpid_mask + 1; + + ++vpid; /* vpid 0 reserved for root */ + + /* start new vpid cycle */ + kvm_flush_tlb_all(); + } + + context->vpid_cache = vpid; + vcpu->arch.vpid = vpid; +} + +void kvm_check_vpid(struct kvm_vcpu *vcpu) +{ + int cpu; + bool migrated; + unsigned long ver, old, vpid; + struct kvm_context *context; + + cpu = smp_processor_id(); + /* + * Are we entering guest context on a different CPU to last time? + * If so, the vCPU's guest TLB state on this CPU may be stale. + */ + context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); + migrated = (vcpu->cpu != cpu); + + /* + * Check if our vpid is of an older version + * + * We also discard the stored vpid if we've executed on + * another CPU, as the guest mappings may have changed without + * hypervisor knowledge. + */ + ver = vcpu->arch.vpid & ~vpid_mask; + old = context->vpid_cache & ~vpid_mask; + if (migrated || (ver != old)) { + kvm_update_vpid(vcpu, cpu); + trace_kvm_vpid_change(vcpu, vcpu->arch.vpid); + vcpu->cpu = cpu; + } + + /* Restore GSTAT(0x50).vpid */ + vpid = (vcpu->arch.vpid & vpid_mask) << CSR_GSTAT_GID_SHIFT; + change_csr_gstat(vpid_mask << CSR_GSTAT_GID_SHIFT, vpid); +} + +static int kvm_loongarch_env_init(void) +{ + int cpu, order; + void *addr; + struct kvm_context *context; + + vmcs = alloc_percpu(struct kvm_context); + if (!vmcs) { + pr_err("kvm: failed to allocate percpu kvm_context\n"); + return -ENOMEM; + } + + kvm_loongarch_ops = kzalloc(sizeof(*kvm_loongarch_ops), GFP_KERNEL); + if (!kvm_loongarch_ops) { + free_percpu(vmcs); + vmcs = NULL; + return -ENOMEM; + } + + /* + * PGD register is shared between root kernel and kvm hypervisor. + * So world switch entry should be in DMW area rather than TLB area + * to avoid page fault reenter. + * + * In future if hardware pagetable walking is supported, we won't + * need to copy world switch code to DMW area. + */ + order = get_order(kvm_exception_size + kvm_enter_guest_size); + addr = (void *)__get_free_pages(GFP_KERNEL, order); + if (!addr) { + free_percpu(vmcs); + vmcs = NULL; + kfree(kvm_loongarch_ops); + kvm_loongarch_ops = NULL; + return -ENOMEM; + } + + memcpy(addr, kvm_exc_entry, kvm_exception_size); + memcpy(addr + kvm_exception_size, kvm_enter_guest, kvm_enter_guest_size); + flush_icache_range((unsigned long)addr, (unsigned long)addr + kvm_exception_size + kvm_enter_guest_size); + kvm_loongarch_ops->exc_entry = addr; + kvm_loongarch_ops->enter_guest = addr + kvm_exception_size; + kvm_loongarch_ops->page_order = order; + + vpid_mask = read_csr_gstat(); + vpid_mask = (vpid_mask & CSR_GSTAT_GIDBIT) >> CSR_GSTAT_GIDBIT_SHIFT; + if (vpid_mask) + vpid_mask = GENMASK(vpid_mask - 1, 0); + + for_each_possible_cpu(cpu) { + context = per_cpu_ptr(vmcs, cpu); + context->vpid_cache = vpid_mask + 1; + context->last_vcpu = NULL; + } + + kvm_init_gcsr_flag(); + + return 0; +} + +static void kvm_loongarch_env_exit(void) +{ + unsigned long addr; + + if (vmcs) + free_percpu(vmcs); + + if (kvm_loongarch_ops) { + if (kvm_loongarch_ops->exc_entry) { + addr = (unsigned long)kvm_loongarch_ops->exc_entry; + free_pages(addr, kvm_loongarch_ops->page_order); + } + kfree(kvm_loongarch_ops); + } +} + +static int kvm_loongarch_init(void) +{ + int r; + + if (!cpu_has_lvz) { + kvm_info("Hardware virtualization not available\n"); + return -ENODEV; + } + r = kvm_loongarch_env_init(); + if (r) + return r; + + return kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE); +} + +static void kvm_loongarch_exit(void) +{ + kvm_exit(); + kvm_loongarch_env_exit(); +} + +module_init(kvm_loongarch_init); +module_exit(kvm_loongarch_exit); + +#ifdef MODULE +static const struct cpu_feature kvm_feature[] = { + { .feature = cpu_feature(LOONGARCH_LVZ) }, + {}, +}; +MODULE_DEVICE_TABLE(cpu, kvm_feature); +#endif -- Gitee From 48be2e220263c58d040a28f5ead4ba762b862024 Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:20 +0800 Subject: [PATCH 0149/2138] LoongArch: KVM: Implement kvm hardware enable, disable interface ANBZ: #8436 commit 0d0df3c99d4fbc6561b0addb094e52f19f3c7baa upstream. Implement kvm hardware enable, disable interface, setting the guest config register to enable virtualization features when called the interface. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/main.c | 62 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c index 267c0505ea89..1c1d5199500e 100644 --- a/arch/loongarch/kvm/main.c +++ b/arch/loongarch/kvm/main.c @@ -249,6 +249,68 @@ void kvm_check_vpid(struct kvm_vcpu *vcpu) change_csr_gstat(vpid_mask << CSR_GSTAT_GID_SHIFT, vpid); } +void kvm_init_vmcs(struct kvm *kvm) +{ + kvm->arch.vmcs = vmcs; +} + +long kvm_arch_dev_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg) +{ + return -ENOIOCTLCMD; +} + +int kvm_arch_hardware_enable(void) +{ + unsigned long env, gcfg = 0; + + env = read_csr_gcfg(); + + /* First init gcfg, gstat, gintc, gtlbc. All guest use the same config */ + write_csr_gcfg(0); + write_csr_gstat(0); + write_csr_gintc(0); + clear_csr_gtlbc(CSR_GTLBC_USETGID | CSR_GTLBC_TOTI); + + /* + * Enable virtualization features granting guest direct control of + * certain features: + * GCI=2: Trap on init or unimplement cache instruction. + * TORU=0: Trap on Root Unimplement. + * CACTRL=1: Root control cache. + * TOP=0: Trap on Previlege. + * TOE=0: Trap on Exception. + * TIT=0: Trap on Timer. + */ + if (env & CSR_GCFG_GCIP_ALL) + gcfg |= CSR_GCFG_GCI_SECURE; + if (env & CSR_GCFG_MATC_ROOT) + gcfg |= CSR_GCFG_MATC_ROOT; + + gcfg |= CSR_GCFG_TIT; + write_csr_gcfg(gcfg); + + kvm_flush_tlb_all(); + + /* Enable using TGID */ + set_csr_gtlbc(CSR_GTLBC_USETGID); + kvm_debug("GCFG:%lx GSTAT:%lx GINTC:%lx GTLBC:%lx", + read_csr_gcfg(), read_csr_gstat(), read_csr_gintc(), read_csr_gtlbc()); + + return 0; +} + +void kvm_arch_hardware_disable(void) +{ + write_csr_gcfg(0); + write_csr_gstat(0); + write_csr_gintc(0); + clear_csr_gtlbc(CSR_GTLBC_USETGID | CSR_GTLBC_TOTI); + + /* Flush any remaining guest TLB entries */ + kvm_flush_tlb_all(); +} + static int kvm_loongarch_env_init(void) { int cpu, order; -- Gitee From b420d8caad97b651aebf6defc12b34108e8bd0c0 Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:27 +0800 Subject: [PATCH 0150/2138] LoongArch: KVM: Implement VM related functions ANBZ: #8436 commit 482795cb62aa63bbba3a1265fa5b5601be9d13df upstream. Implement LoongArch VM operations: Init and destroy vm interface, allocating memory page to save the vm pgd when init vm. Implement vm check extension, such as getting vcpu number info, memory slots info, and fpu info. And implement vm status description. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/vm.c | 94 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 arch/loongarch/kvm/vm.c diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c new file mode 100644 index 000000000000..0a37f6fa8f2d --- /dev/null +++ b/arch/loongarch/kvm/vm.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include + +const struct _kvm_stats_desc kvm_vm_stats_desc[] = { + KVM_GENERIC_VM_STATS(), + STATS_DESC_ICOUNTER(VM, pages), + STATS_DESC_ICOUNTER(VM, hugepages), +}; + +const struct kvm_stats_header kvm_vm_stats_header = { + .name_size = KVM_STATS_NAME_SIZE, + .num_desc = ARRAY_SIZE(kvm_vm_stats_desc), + .id_offset = sizeof(struct kvm_stats_header), + .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, + .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + + sizeof(kvm_vm_stats_desc), +}; + +int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) +{ + int i; + + /* Allocate page table to map GPA -> RPA */ + kvm->arch.pgd = kvm_pgd_alloc(); + if (!kvm->arch.pgd) + return -ENOMEM; + + kvm_init_vmcs(kvm); + kvm->arch.gpa_size = BIT(cpu_vabits - 1); + kvm->arch.root_level = CONFIG_PGTABLE_LEVELS - 1; + kvm->arch.invalid_ptes[0] = 0; + kvm->arch.invalid_ptes[1] = (unsigned long)invalid_pte_table; +#if CONFIG_PGTABLE_LEVELS > 2 + kvm->arch.invalid_ptes[2] = (unsigned long)invalid_pmd_table; +#endif +#if CONFIG_PGTABLE_LEVELS > 3 + kvm->arch.invalid_ptes[3] = (unsigned long)invalid_pud_table; +#endif + for (i = 0; i <= kvm->arch.root_level; i++) + kvm->arch.pte_shifts[i] = PAGE_SHIFT + i * (PAGE_SHIFT - 3); + + return 0; +} + +void kvm_arch_destroy_vm(struct kvm *kvm) +{ + kvm_destroy_vcpus(kvm); + free_page((unsigned long)kvm->arch.pgd); + kvm->arch.pgd = NULL; +} + +int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) +{ + int r; + + switch (ext) { + case KVM_CAP_ONE_REG: + case KVM_CAP_ENABLE_CAP: + case KVM_CAP_READONLY_MEM: + case KVM_CAP_SYNC_MMU: + case KVM_CAP_IMMEDIATE_EXIT: + case KVM_CAP_IOEVENTFD: + case KVM_CAP_MP_STATE: + r = 1; + break; + case KVM_CAP_NR_VCPUS: + r = num_online_cpus(); + break; + case KVM_CAP_MAX_VCPUS: + r = KVM_MAX_VCPUS; + break; + case KVM_CAP_MAX_VCPU_ID: + r = KVM_MAX_VCPU_IDS; + break; + case KVM_CAP_NR_MEMSLOTS: + r = KVM_USER_MEM_SLOTS; + break; + default: + r = 0; + break; + } + + return r; +} + +int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +{ + return -ENOIOCTLCMD; +} -- Gitee From 0501ef3960d3f64cd9db84dd202ab072193a035d Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:27 +0800 Subject: [PATCH 0151/2138] LoongArch: KVM: Add vcpu related header files ANBZ: #8436 commit dfe3dc07fa68f2be1bf8af98656e674e9636d965 upstream. Add LoongArch vcpu related header files, including vcpu csr information, irq number definitions, and some vcpu interfaces. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/include/asm/kvm_csr.h | 211 +++++++++++++++++++++++++ arch/loongarch/include/asm/kvm_vcpu.h | 93 +++++++++++ arch/loongarch/include/asm/loongarch.h | 19 ++- arch/loongarch/kvm/trace.h | 162 +++++++++++++++++++ 4 files changed, 480 insertions(+), 5 deletions(-) create mode 100644 arch/loongarch/include/asm/kvm_csr.h create mode 100644 arch/loongarch/include/asm/kvm_vcpu.h create mode 100644 arch/loongarch/kvm/trace.h diff --git a/arch/loongarch/include/asm/kvm_csr.h b/arch/loongarch/include/asm/kvm_csr.h new file mode 100644 index 000000000000..724ca8b7b401 --- /dev/null +++ b/arch/loongarch/include/asm/kvm_csr.h @@ -0,0 +1,211 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#ifndef __ASM_LOONGARCH_KVM_CSR_H__ +#define __ASM_LOONGARCH_KVM_CSR_H__ + +#include +#include +#include +#include + +#define gcsr_read(csr) \ +({ \ + register unsigned long __v; \ + __asm__ __volatile__( \ + " gcsrrd %[val], %[reg]\n\t" \ + : [val] "=r" (__v) \ + : [reg] "i" (csr) \ + : "memory"); \ + __v; \ +}) + +#define gcsr_write(v, csr) \ +({ \ + register unsigned long __v = v; \ + __asm__ __volatile__ ( \ + " gcsrwr %[val], %[reg]\n\t" \ + : [val] "+r" (__v) \ + : [reg] "i" (csr) \ + : "memory"); \ +}) + +#define gcsr_xchg(v, m, csr) \ +({ \ + register unsigned long __v = v; \ + __asm__ __volatile__( \ + " gcsrxchg %[val], %[mask], %[reg]\n\t" \ + : [val] "+r" (__v) \ + : [mask] "r" (m), [reg] "i" (csr) \ + : "memory"); \ + __v; \ +}) + +/* Guest CSRS read and write */ +#define read_gcsr_crmd() gcsr_read(LOONGARCH_CSR_CRMD) +#define write_gcsr_crmd(val) gcsr_write(val, LOONGARCH_CSR_CRMD) +#define read_gcsr_prmd() gcsr_read(LOONGARCH_CSR_PRMD) +#define write_gcsr_prmd(val) gcsr_write(val, LOONGARCH_CSR_PRMD) +#define read_gcsr_euen() gcsr_read(LOONGARCH_CSR_EUEN) +#define write_gcsr_euen(val) gcsr_write(val, LOONGARCH_CSR_EUEN) +#define read_gcsr_misc() gcsr_read(LOONGARCH_CSR_MISC) +#define write_gcsr_misc(val) gcsr_write(val, LOONGARCH_CSR_MISC) +#define read_gcsr_ecfg() gcsr_read(LOONGARCH_CSR_ECFG) +#define write_gcsr_ecfg(val) gcsr_write(val, LOONGARCH_CSR_ECFG) +#define read_gcsr_estat() gcsr_read(LOONGARCH_CSR_ESTAT) +#define write_gcsr_estat(val) gcsr_write(val, LOONGARCH_CSR_ESTAT) +#define read_gcsr_era() gcsr_read(LOONGARCH_CSR_ERA) +#define write_gcsr_era(val) gcsr_write(val, LOONGARCH_CSR_ERA) +#define read_gcsr_badv() gcsr_read(LOONGARCH_CSR_BADV) +#define write_gcsr_badv(val) gcsr_write(val, LOONGARCH_CSR_BADV) +#define read_gcsr_badi() gcsr_read(LOONGARCH_CSR_BADI) +#define write_gcsr_badi(val) gcsr_write(val, LOONGARCH_CSR_BADI) +#define read_gcsr_eentry() gcsr_read(LOONGARCH_CSR_EENTRY) +#define write_gcsr_eentry(val) gcsr_write(val, LOONGARCH_CSR_EENTRY) + +#define read_gcsr_asid() gcsr_read(LOONGARCH_CSR_ASID) +#define write_gcsr_asid(val) gcsr_write(val, LOONGARCH_CSR_ASID) +#define read_gcsr_pgdl() gcsr_read(LOONGARCH_CSR_PGDL) +#define write_gcsr_pgdl(val) gcsr_write(val, LOONGARCH_CSR_PGDL) +#define read_gcsr_pgdh() gcsr_read(LOONGARCH_CSR_PGDH) +#define write_gcsr_pgdh(val) gcsr_write(val, LOONGARCH_CSR_PGDH) +#define write_gcsr_pgd(val) gcsr_write(val, LOONGARCH_CSR_PGD) +#define read_gcsr_pgd() gcsr_read(LOONGARCH_CSR_PGD) +#define read_gcsr_pwctl0() gcsr_read(LOONGARCH_CSR_PWCTL0) +#define write_gcsr_pwctl0(val) gcsr_write(val, LOONGARCH_CSR_PWCTL0) +#define read_gcsr_pwctl1() gcsr_read(LOONGARCH_CSR_PWCTL1) +#define write_gcsr_pwctl1(val) gcsr_write(val, LOONGARCH_CSR_PWCTL1) +#define read_gcsr_stlbpgsize() gcsr_read(LOONGARCH_CSR_STLBPGSIZE) +#define write_gcsr_stlbpgsize(val) gcsr_write(val, LOONGARCH_CSR_STLBPGSIZE) +#define read_gcsr_rvacfg() gcsr_read(LOONGARCH_CSR_RVACFG) +#define write_gcsr_rvacfg(val) gcsr_write(val, LOONGARCH_CSR_RVACFG) + +#define read_gcsr_cpuid() gcsr_read(LOONGARCH_CSR_CPUID) +#define write_gcsr_cpuid(val) gcsr_write(val, LOONGARCH_CSR_CPUID) +#define read_gcsr_prcfg1() gcsr_read(LOONGARCH_CSR_PRCFG1) +#define write_gcsr_prcfg1(val) gcsr_write(val, LOONGARCH_CSR_PRCFG1) +#define read_gcsr_prcfg2() gcsr_read(LOONGARCH_CSR_PRCFG2) +#define write_gcsr_prcfg2(val) gcsr_write(val, LOONGARCH_CSR_PRCFG2) +#define read_gcsr_prcfg3() gcsr_read(LOONGARCH_CSR_PRCFG3) +#define write_gcsr_prcfg3(val) gcsr_write(val, LOONGARCH_CSR_PRCFG3) + +#define read_gcsr_kscratch0() gcsr_read(LOONGARCH_CSR_KS0) +#define write_gcsr_kscratch0(val) gcsr_write(val, LOONGARCH_CSR_KS0) +#define read_gcsr_kscratch1() gcsr_read(LOONGARCH_CSR_KS1) +#define write_gcsr_kscratch1(val) gcsr_write(val, LOONGARCH_CSR_KS1) +#define read_gcsr_kscratch2() gcsr_read(LOONGARCH_CSR_KS2) +#define write_gcsr_kscratch2(val) gcsr_write(val, LOONGARCH_CSR_KS2) +#define read_gcsr_kscratch3() gcsr_read(LOONGARCH_CSR_KS3) +#define write_gcsr_kscratch3(val) gcsr_write(val, LOONGARCH_CSR_KS3) +#define read_gcsr_kscratch4() gcsr_read(LOONGARCH_CSR_KS4) +#define write_gcsr_kscratch4(val) gcsr_write(val, LOONGARCH_CSR_KS4) +#define read_gcsr_kscratch5() gcsr_read(LOONGARCH_CSR_KS5) +#define write_gcsr_kscratch5(val) gcsr_write(val, LOONGARCH_CSR_KS5) +#define read_gcsr_kscratch6() gcsr_read(LOONGARCH_CSR_KS6) +#define write_gcsr_kscratch6(val) gcsr_write(val, LOONGARCH_CSR_KS6) +#define read_gcsr_kscratch7() gcsr_read(LOONGARCH_CSR_KS7) +#define write_gcsr_kscratch7(val) gcsr_write(val, LOONGARCH_CSR_KS7) + +#define read_gcsr_timerid() gcsr_read(LOONGARCH_CSR_TMID) +#define write_gcsr_timerid(val) gcsr_write(val, LOONGARCH_CSR_TMID) +#define read_gcsr_timercfg() gcsr_read(LOONGARCH_CSR_TCFG) +#define write_gcsr_timercfg(val) gcsr_write(val, LOONGARCH_CSR_TCFG) +#define read_gcsr_timertick() gcsr_read(LOONGARCH_CSR_TVAL) +#define write_gcsr_timertick(val) gcsr_write(val, LOONGARCH_CSR_TVAL) +#define read_gcsr_timeroffset() gcsr_read(LOONGARCH_CSR_CNTC) +#define write_gcsr_timeroffset(val) gcsr_write(val, LOONGARCH_CSR_CNTC) + +#define read_gcsr_llbctl() gcsr_read(LOONGARCH_CSR_LLBCTL) +#define write_gcsr_llbctl(val) gcsr_write(val, LOONGARCH_CSR_LLBCTL) + +#define read_gcsr_tlbidx() gcsr_read(LOONGARCH_CSR_TLBIDX) +#define write_gcsr_tlbidx(val) gcsr_write(val, LOONGARCH_CSR_TLBIDX) +#define read_gcsr_tlbrentry() gcsr_read(LOONGARCH_CSR_TLBRENTRY) +#define write_gcsr_tlbrentry(val) gcsr_write(val, LOONGARCH_CSR_TLBRENTRY) +#define read_gcsr_tlbrbadv() gcsr_read(LOONGARCH_CSR_TLBRBADV) +#define write_gcsr_tlbrbadv(val) gcsr_write(val, LOONGARCH_CSR_TLBRBADV) +#define read_gcsr_tlbrera() gcsr_read(LOONGARCH_CSR_TLBRERA) +#define write_gcsr_tlbrera(val) gcsr_write(val, LOONGARCH_CSR_TLBRERA) +#define read_gcsr_tlbrsave() gcsr_read(LOONGARCH_CSR_TLBRSAVE) +#define write_gcsr_tlbrsave(val) gcsr_write(val, LOONGARCH_CSR_TLBRSAVE) +#define read_gcsr_tlbrelo0() gcsr_read(LOONGARCH_CSR_TLBRELO0) +#define write_gcsr_tlbrelo0(val) gcsr_write(val, LOONGARCH_CSR_TLBRELO0) +#define read_gcsr_tlbrelo1() gcsr_read(LOONGARCH_CSR_TLBRELO1) +#define write_gcsr_tlbrelo1(val) gcsr_write(val, LOONGARCH_CSR_TLBRELO1) +#define read_gcsr_tlbrehi() gcsr_read(LOONGARCH_CSR_TLBREHI) +#define write_gcsr_tlbrehi(val) gcsr_write(val, LOONGARCH_CSR_TLBREHI) +#define read_gcsr_tlbrprmd() gcsr_read(LOONGARCH_CSR_TLBRPRMD) +#define write_gcsr_tlbrprmd(val) gcsr_write(val, LOONGARCH_CSR_TLBRPRMD) + +#define read_gcsr_directwin0() gcsr_read(LOONGARCH_CSR_DMWIN0) +#define write_gcsr_directwin0(val) gcsr_write(val, LOONGARCH_CSR_DMWIN0) +#define read_gcsr_directwin1() gcsr_read(LOONGARCH_CSR_DMWIN1) +#define write_gcsr_directwin1(val) gcsr_write(val, LOONGARCH_CSR_DMWIN1) +#define read_gcsr_directwin2() gcsr_read(LOONGARCH_CSR_DMWIN2) +#define write_gcsr_directwin2(val) gcsr_write(val, LOONGARCH_CSR_DMWIN2) +#define read_gcsr_directwin3() gcsr_read(LOONGARCH_CSR_DMWIN3) +#define write_gcsr_directwin3(val) gcsr_write(val, LOONGARCH_CSR_DMWIN3) + +/* Guest related CSRs */ +#define read_csr_gtlbc() csr_read64(LOONGARCH_CSR_GTLBC) +#define write_csr_gtlbc(val) csr_write64(val, LOONGARCH_CSR_GTLBC) +#define read_csr_trgp() csr_read64(LOONGARCH_CSR_TRGP) +#define read_csr_gcfg() csr_read64(LOONGARCH_CSR_GCFG) +#define write_csr_gcfg(val) csr_write64(val, LOONGARCH_CSR_GCFG) +#define read_csr_gstat() csr_read64(LOONGARCH_CSR_GSTAT) +#define write_csr_gstat(val) csr_write64(val, LOONGARCH_CSR_GSTAT) +#define read_csr_gintc() csr_read64(LOONGARCH_CSR_GINTC) +#define write_csr_gintc(val) csr_write64(val, LOONGARCH_CSR_GINTC) +#define read_csr_gcntc() csr_read64(LOONGARCH_CSR_GCNTC) +#define write_csr_gcntc(val) csr_write64(val, LOONGARCH_CSR_GCNTC) + +#define __BUILD_GCSR_OP(name) __BUILD_CSR_COMMON(gcsr_##name) + +__BUILD_CSR_OP(gcfg) +__BUILD_CSR_OP(gstat) +__BUILD_CSR_OP(gtlbc) +__BUILD_CSR_OP(gintc) +__BUILD_GCSR_OP(llbctl) +__BUILD_GCSR_OP(tlbidx) + +#define set_gcsr_estat(val) \ + gcsr_xchg(val, val, LOONGARCH_CSR_ESTAT) +#define clear_gcsr_estat(val) \ + gcsr_xchg(~(val), val, LOONGARCH_CSR_ESTAT) + +#define kvm_read_hw_gcsr(id) gcsr_read(id) +#define kvm_write_hw_gcsr(id, val) gcsr_write(val, id) + +#define kvm_save_hw_gcsr(csr, gid) (csr->csrs[gid] = gcsr_read(gid)) +#define kvm_restore_hw_gcsr(csr, gid) (gcsr_write(csr->csrs[gid], gid)) + +int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu); + +static __always_inline unsigned long kvm_read_sw_gcsr(struct loongarch_csrs *csr, int gid) +{ + return csr->csrs[gid]; +} + +static __always_inline void kvm_write_sw_gcsr(struct loongarch_csrs *csr, int gid, unsigned long val) +{ + csr->csrs[gid] = val; +} + +static __always_inline void kvm_set_sw_gcsr(struct loongarch_csrs *csr, + int gid, unsigned long val) +{ + csr->csrs[gid] |= val; +} + +static __always_inline void kvm_change_sw_gcsr(struct loongarch_csrs *csr, + int gid, unsigned long mask, unsigned long val) +{ + unsigned long _mask = mask; + + csr->csrs[gid] &= ~_mask; + csr->csrs[gid] |= val & _mask; +} + +#endif /* __ASM_LOONGARCH_KVM_CSR_H__ */ diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h new file mode 100644 index 000000000000..553cfa2b2b1c --- /dev/null +++ b/arch/loongarch/include/asm/kvm_vcpu.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#ifndef __ASM_LOONGARCH_KVM_VCPU_H__ +#define __ASM_LOONGARCH_KVM_VCPU_H__ + +#include +#include + +/* Controlled by 0x5 guest estat */ +#define CPU_SIP0 (_ULCAST_(1)) +#define CPU_SIP1 (_ULCAST_(1) << 1) +#define CPU_PMU (_ULCAST_(1) << 10) +#define CPU_TIMER (_ULCAST_(1) << 11) +#define CPU_IPI (_ULCAST_(1) << 12) + +/* Controlled by 0x52 guest exception VIP aligned to estat bit 5~12 */ +#define CPU_IP0 (_ULCAST_(1)) +#define CPU_IP1 (_ULCAST_(1) << 1) +#define CPU_IP2 (_ULCAST_(1) << 2) +#define CPU_IP3 (_ULCAST_(1) << 3) +#define CPU_IP4 (_ULCAST_(1) << 4) +#define CPU_IP5 (_ULCAST_(1) << 5) +#define CPU_IP6 (_ULCAST_(1) << 6) +#define CPU_IP7 (_ULCAST_(1) << 7) + +#define MNSEC_PER_SEC (NSEC_PER_SEC >> 20) + +/* KVM_IRQ_LINE irq field index values */ +#define KVM_LOONGSON_IRQ_TYPE_SHIFT 24 +#define KVM_LOONGSON_IRQ_TYPE_MASK 0xff +#define KVM_LOONGSON_IRQ_VCPU_SHIFT 16 +#define KVM_LOONGSON_IRQ_VCPU_MASK 0xff +#define KVM_LOONGSON_IRQ_NUM_SHIFT 0 +#define KVM_LOONGSON_IRQ_NUM_MASK 0xffff + +typedef union loongarch_instruction larch_inst; +typedef int (*exit_handle_fn)(struct kvm_vcpu *); + +int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst); +int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst); +int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_emu_idle(struct kvm_vcpu *vcpu); +int kvm_pending_timer(struct kvm_vcpu *vcpu); +int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault); +void kvm_deliver_intr(struct kvm_vcpu *vcpu); +void kvm_deliver_exception(struct kvm_vcpu *vcpu); + +void kvm_own_fpu(struct kvm_vcpu *vcpu); +void kvm_lose_fpu(struct kvm_vcpu *vcpu); +void kvm_save_fpu(struct loongarch_fpu *fpu); +void kvm_restore_fpu(struct loongarch_fpu *fpu); +void kvm_restore_fcsr(struct loongarch_fpu *fpu); + +void kvm_acquire_timer(struct kvm_vcpu *vcpu); +void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz); +void kvm_reset_timer(struct kvm_vcpu *vcpu); +void kvm_save_timer(struct kvm_vcpu *vcpu); +void kvm_restore_timer(struct kvm_vcpu *vcpu); + +int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq); + +/* + * Loongarch KVM guest interrupt handling + */ +static inline void kvm_queue_irq(struct kvm_vcpu *vcpu, unsigned int irq) +{ + set_bit(irq, &vcpu->arch.irq_pending); + clear_bit(irq, &vcpu->arch.irq_clear); +} + +static inline void kvm_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int irq) +{ + clear_bit(irq, &vcpu->arch.irq_pending); + set_bit(irq, &vcpu->arch.irq_clear); +} + +static inline int kvm_queue_exception(struct kvm_vcpu *vcpu, + unsigned int code, unsigned int subcode) +{ + /* only one exception can be injected */ + if (!vcpu->arch.exception_pending) { + set_bit(code, &vcpu->arch.exception_pending); + vcpu->arch.esubcode = subcode; + return 0; + } else + return -1; +} + +#endif /* __ASM_LOONGARCH_KVM_VCPU_H__ */ diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h index badb065f8383..a1774ba0167b 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -227,6 +227,7 @@ #define LOONGARCH_CSR_ECFG 0x4 /* Exception config */ #define CSR_ECFG_VS_SHIFT 16 #define CSR_ECFG_VS_WIDTH 3 +#define CSR_ECFG_VS_SHIFT_END (CSR_ECFG_VS_SHIFT + CSR_ECFG_VS_WIDTH - 1) #define CSR_ECFG_VS (_ULCAST_(0x7) << CSR_ECFG_VS_SHIFT) #define CSR_ECFG_IM_SHIFT 0 #define CSR_ECFG_IM_WIDTH 14 @@ -315,13 +316,14 @@ #define CSR_TLBLO1_V (_ULCAST_(0x1) << CSR_TLBLO1_V_SHIFT) #define LOONGARCH_CSR_GTLBC 0x15 /* Guest TLB control */ -#define CSR_GTLBC_RID_SHIFT 16 -#define CSR_GTLBC_RID_WIDTH 8 -#define CSR_GTLBC_RID (_ULCAST_(0xff) << CSR_GTLBC_RID_SHIFT) +#define CSR_GTLBC_TGID_SHIFT 16 +#define CSR_GTLBC_TGID_WIDTH 8 +#define CSR_GTLBC_TGID_SHIFT_END (CSR_GTLBC_TGID_SHIFT + CSR_GTLBC_TGID_WIDTH - 1) +#define CSR_GTLBC_TGID (_ULCAST_(0xff) << CSR_GTLBC_TGID_SHIFT) #define CSR_GTLBC_TOTI_SHIFT 13 #define CSR_GTLBC_TOTI (_ULCAST_(0x1) << CSR_GTLBC_TOTI_SHIFT) -#define CSR_GTLBC_USERID_SHIFT 12 -#define CSR_GTLBC_USERID (_ULCAST_(0x1) << CSR_GTLBC_USERID_SHIFT) +#define CSR_GTLBC_USETGID_SHIFT 12 +#define CSR_GTLBC_USETGID (_ULCAST_(0x1) << CSR_GTLBC_USETGID_SHIFT) #define CSR_GTLBC_GMTLBSZ_SHIFT 0 #define CSR_GTLBC_GMTLBSZ_WIDTH 6 #define CSR_GTLBC_GMTLBSZ (_ULCAST_(0x3f) << CSR_GTLBC_GMTLBSZ_SHIFT) @@ -476,6 +478,7 @@ #define LOONGARCH_CSR_GSTAT 0x50 /* Guest status */ #define CSR_GSTAT_GID_SHIFT 16 #define CSR_GSTAT_GID_WIDTH 8 +#define CSR_GSTAT_GID_SHIFT_END (CSR_GSTAT_GID_SHIFT + CSR_GSTAT_GID_WIDTH - 1) #define CSR_GSTAT_GID (_ULCAST_(0xff) << CSR_GSTAT_GID_SHIFT) #define CSR_GSTAT_GIDBIT_SHIFT 4 #define CSR_GSTAT_GIDBIT_WIDTH 6 @@ -526,6 +529,12 @@ #define CSR_GCFG_MATC_GUEST (_ULCAST_(0x0) << CSR_GCFG_MATC_SHITF) #define CSR_GCFG_MATC_ROOT (_ULCAST_(0x1) << CSR_GCFG_MATC_SHITF) #define CSR_GCFG_MATC_NEST (_ULCAST_(0x2) << CSR_GCFG_MATC_SHITF) +#define CSR_GCFG_MATP_NEST_SHIFT 2 +#define CSR_GCFG_MATP_NEST (_ULCAST_(0x1) << CSR_GCFG_MATP_NEST_SHIFT) +#define CSR_GCFG_MATP_ROOT_SHIFT 1 +#define CSR_GCFG_MATP_ROOT (_ULCAST_(0x1) << CSR_GCFG_MATP_ROOT_SHIFT) +#define CSR_GCFG_MATP_GUEST_SHIFT 0 +#define CSR_GCFG_MATP_GUEST (_ULCAST_(0x1) << CSR_GCFG_MATP_GUEST_SHIFT) #define LOONGARCH_CSR_GINTC 0x52 /* Guest interrupt control */ #define CSR_GINTC_HC_SHIFT 16 diff --git a/arch/loongarch/kvm/trace.h b/arch/loongarch/kvm/trace.h new file mode 100644 index 000000000000..a1e35d655418 --- /dev/null +++ b/arch/loongarch/kvm/trace.h @@ -0,0 +1,162 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_KVM_H + +#include +#include + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM kvm + +/* + * Tracepoints for VM enters + */ +DECLARE_EVENT_CLASS(kvm_transition, + TP_PROTO(struct kvm_vcpu *vcpu), + TP_ARGS(vcpu), + TP_STRUCT__entry( + __field(unsigned long, pc) + ), + + TP_fast_assign( + __entry->pc = vcpu->arch.pc; + ), + + TP_printk("PC: 0x%08lx", __entry->pc) +); + +DEFINE_EVENT(kvm_transition, kvm_enter, + TP_PROTO(struct kvm_vcpu *vcpu), + TP_ARGS(vcpu)); + +DEFINE_EVENT(kvm_transition, kvm_reenter, + TP_PROTO(struct kvm_vcpu *vcpu), + TP_ARGS(vcpu)); + +DEFINE_EVENT(kvm_transition, kvm_out, + TP_PROTO(struct kvm_vcpu *vcpu), + TP_ARGS(vcpu)); + +/* Further exit reasons */ +#define KVM_TRACE_EXIT_IDLE 64 +#define KVM_TRACE_EXIT_CACHE 65 + +/* Tracepoints for VM exits */ +#define kvm_trace_symbol_exit_types \ + { KVM_TRACE_EXIT_IDLE, "IDLE" }, \ + { KVM_TRACE_EXIT_CACHE, "CACHE" } + +DECLARE_EVENT_CLASS(kvm_exit, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), + TP_ARGS(vcpu, reason), + TP_STRUCT__entry( + __field(unsigned long, pc) + __field(unsigned int, reason) + ), + + TP_fast_assign( + __entry->pc = vcpu->arch.pc; + __entry->reason = reason; + ), + + TP_printk("[%s]PC: 0x%08lx", + __print_symbolic(__entry->reason, + kvm_trace_symbol_exit_types), + __entry->pc) +); + +DEFINE_EVENT(kvm_exit, kvm_exit_idle, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), + TP_ARGS(vcpu, reason)); + +DEFINE_EVENT(kvm_exit, kvm_exit_cache, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), + TP_ARGS(vcpu, reason)); + +DEFINE_EVENT(kvm_exit, kvm_exit, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), + TP_ARGS(vcpu, reason)); + +TRACE_EVENT(kvm_exit_gspr, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int inst_word), + TP_ARGS(vcpu, inst_word), + TP_STRUCT__entry( + __field(unsigned int, inst_word) + ), + + TP_fast_assign( + __entry->inst_word = inst_word; + ), + + TP_printk("Inst word: 0x%08x", __entry->inst_word) +); + +#define KVM_TRACE_AUX_SAVE 0 +#define KVM_TRACE_AUX_RESTORE 1 +#define KVM_TRACE_AUX_ENABLE 2 +#define KVM_TRACE_AUX_DISABLE 3 +#define KVM_TRACE_AUX_DISCARD 4 + +#define KVM_TRACE_AUX_FPU 1 + +#define kvm_trace_symbol_aux_op \ + { KVM_TRACE_AUX_SAVE, "save" }, \ + { KVM_TRACE_AUX_RESTORE, "restore" }, \ + { KVM_TRACE_AUX_ENABLE, "enable" }, \ + { KVM_TRACE_AUX_DISABLE, "disable" }, \ + { KVM_TRACE_AUX_DISCARD, "discard" } + +#define kvm_trace_symbol_aux_state \ + { KVM_TRACE_AUX_FPU, "FPU" } + +TRACE_EVENT(kvm_aux, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op, + unsigned int state), + TP_ARGS(vcpu, op, state), + TP_STRUCT__entry( + __field(unsigned long, pc) + __field(u8, op) + __field(u8, state) + ), + + TP_fast_assign( + __entry->pc = vcpu->arch.pc; + __entry->op = op; + __entry->state = state; + ), + + TP_printk("%s %s PC: 0x%08lx", + __print_symbolic(__entry->op, + kvm_trace_symbol_aux_op), + __print_symbolic(__entry->state, + kvm_trace_symbol_aux_state), + __entry->pc) +); + +TRACE_EVENT(kvm_vpid_change, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned long vpid), + TP_ARGS(vcpu, vpid), + TP_STRUCT__entry( + __field(unsigned long, vpid) + ), + + TP_fast_assign( + __entry->vpid = vpid; + ), + + TP_printk("VPID: 0x%08lx", __entry->vpid) +); + +#endif /* _TRACE_KVM_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../arch/loongarch/kvm +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace + +/* This part must be outside protection */ +#include -- Gitee From 75795017549637b1cd7562020e62203f8bfcadc2 Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:27 +0800 Subject: [PATCH 0152/2138] LoongArch: KVM: Implement basic vcpu interfaces ANBZ: #8436 commit 2fc3bd86db4b6f6992d4b459879a17b2ae6b2b3d upstream. Implement basic vcpu interfaces, including: 1, vcpu create and destroy interface, saving info into vcpu arch structure such as vcpu exception entrance, vcpu enter guest pointer, etc. Init vcpu timer and set address translation mode when vcpu create. 2, vcpu run interface, handling mmio, iocsr reading fault and deliver interrupt, lose fpu before vcpu enter guest. 3, vcpu handle exit interface, getting the exit code by ESTAT register and using kvm exception vector to handle it. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/vcpu.c | 261 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 261 insertions(+) create mode 100644 arch/loongarch/kvm/vcpu.c diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c new file mode 100644 index 000000000000..349cecca1e62 --- /dev/null +++ b/arch/loongarch/kvm/vcpu.c @@ -0,0 +1,261 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include "trace.h" + +/* + * kvm_check_requests - check and handle pending vCPU requests + * + * Return: RESUME_GUEST if we should enter the guest + * RESUME_HOST if we should exit to userspace + */ +static int kvm_check_requests(struct kvm_vcpu *vcpu) +{ + if (!kvm_request_pending(vcpu)) + return RESUME_GUEST; + + if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) + vcpu->arch.vpid = 0; /* Drop vpid for this vCPU */ + + if (kvm_dirty_ring_check_request(vcpu)) + return RESUME_HOST; + + return RESUME_GUEST; +} + +/* + * Check and handle pending signal and vCPU requests etc + * Run with irq enabled and preempt enabled + * + * Return: RESUME_GUEST if we should enter the guest + * RESUME_HOST if we should exit to userspace + * < 0 if we should exit to userspace, where the return value + * indicates an error + */ +static int kvm_enter_guest_check(struct kvm_vcpu *vcpu) +{ + int ret; + + /* + * Check conditions before entering the guest + */ + ret = xfer_to_guest_mode_handle_work(vcpu); + if (ret < 0) + return ret; + + ret = kvm_check_requests(vcpu); + + return ret; +} + +/* + * Called with irq enabled + * + * Return: RESUME_GUEST if we should enter the guest, and irq disabled + * Others if we should exit to userspace + */ +static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu) +{ + int ret; + + do { + ret = kvm_enter_guest_check(vcpu); + if (ret != RESUME_GUEST) + break; + + /* + * Handle vcpu timer, interrupts, check requests and + * check vmid before vcpu enter guest + */ + local_irq_disable(); + kvm_acquire_timer(vcpu); + kvm_deliver_intr(vcpu); + kvm_deliver_exception(vcpu); + /* Make sure the vcpu mode has been written */ + smp_store_mb(vcpu->mode, IN_GUEST_MODE); + kvm_check_vpid(vcpu); + vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY); + /* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */ + vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; + + if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) { + /* make sure the vcpu mode has been written */ + smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE); + local_irq_enable(); + ret = -EAGAIN; + } + } while (ret != RESUME_GUEST); + + return ret; +} + +/* + * Return 1 for resume guest and "<= 0" for resume host. + */ +static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + int ret = RESUME_GUEST; + unsigned long estat = vcpu->arch.host_estat; + u32 intr = estat & 0x1fff; /* Ignore NMI */ + u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT; + + vcpu->mode = OUTSIDE_GUEST_MODE; + + /* Set a default exit reason */ + run->exit_reason = KVM_EXIT_UNKNOWN; + + guest_timing_exit_irqoff(); + guest_state_exit_irqoff(); + local_irq_enable(); + + trace_kvm_exit(vcpu, ecode); + if (ecode) { + ret = kvm_handle_fault(vcpu, ecode); + } else { + WARN(!intr, "vm exiting with suspicious irq\n"); + ++vcpu->stat.int_exits; + } + + if (ret == RESUME_GUEST) + ret = kvm_pre_enter_guest(vcpu); + + if (ret != RESUME_GUEST) { + local_irq_disable(); + return ret; + } + + guest_timing_enter_irqoff(); + guest_state_enter_irqoff(); + trace_kvm_reenter(vcpu); + + return RESUME_GUEST; +} + +int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) +{ + return 0; +} + +int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) +{ + unsigned long timer_hz; + struct loongarch_csrs *csr; + + vcpu->arch.vpid = 0; + + hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); + vcpu->arch.swtimer.function = kvm_swtimer_wakeup; + + vcpu->arch.handle_exit = kvm_handle_exit; + vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry; + vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL); + if (!vcpu->arch.csr) + return -ENOMEM; + + /* + * All kvm exceptions share one exception entry, and host <-> guest + * switch also switch ECFG.VS field, keep host ECFG.VS info here. + */ + vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS); + + /* Init */ + vcpu->arch.last_sched_cpu = -1; + + /* + * Initialize guest register state to valid architectural reset state. + */ + timer_hz = calc_const_freq(); + kvm_init_timer(vcpu, timer_hz); + + /* Set Initialize mode for guest */ + csr = vcpu->arch.csr; + kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CRMD, CSR_CRMD_DA); + + /* Set cpuid */ + kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id); + + /* Start with no pending virtual guest interrupts */ + csr->csrs[LOONGARCH_CSR_GINTC] = 0; + + return 0; +} + +void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) +{ +} + +void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) +{ + int cpu; + struct kvm_context *context; + + hrtimer_cancel(&vcpu->arch.swtimer); + kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); + kfree(vcpu->arch.csr); + + /* + * If the vCPU is freed and reused as another vCPU, we don't want the + * matching pointer wrongly hanging around in last_vcpu. + */ + for_each_possible_cpu(cpu) { + context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); + if (context->last_vcpu == vcpu) + context->last_vcpu = NULL; + } +} + +int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) +{ + int r = -EINTR; + struct kvm_run *run = vcpu->run; + + if (vcpu->mmio_needed) { + if (!vcpu->mmio_is_write) + kvm_complete_mmio_read(vcpu, run); + vcpu->mmio_needed = 0; + } + + if (run->exit_reason == KVM_EXIT_LOONGARCH_IOCSR) { + if (!run->iocsr_io.is_write) + kvm_complete_iocsr_read(vcpu, run); + } + + if (run->immediate_exit) + return r; + + /* Clear exit_reason */ + run->exit_reason = KVM_EXIT_UNKNOWN; + lose_fpu(1); + vcpu_load(vcpu); + kvm_sigset_activate(vcpu); + r = kvm_pre_enter_guest(vcpu); + if (r != RESUME_GUEST) + goto out; + + guest_timing_enter_irqoff(); + guest_state_enter_irqoff(); + trace_kvm_enter(vcpu); + r = kvm_loongarch_ops->enter_guest(run, vcpu); + + trace_kvm_out(vcpu); + /* + * Guest exit is already recorded at kvm_handle_exit() + * return value must not be RESUME_GUEST + */ + local_irq_enable(); +out: + kvm_sigset_deactivate(vcpu); + vcpu_put(vcpu); + + return r; +} -- Gitee From 15913609ed99a1eb4ac237e21b02cf1af8acec06 Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:27 +0800 Subject: [PATCH 0153/2138] LoongArch: KVM: Implement basic vcpu ioctl interfaces ANBZ: #8436 commit f6deff355b5c7072a05232f0861cfdfe372c6bfd upstream. Implement basic vcpu ioctl interfaces, including: 1, vcpu KVM_ENABLE_CAP ioctl interface. 2, vcpu get registers and set registers operations, it is called when user space use the ioctl interface to get or set regs. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/vcpu.c | 261 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 261 insertions(+) diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 349cecca1e62..487065565909 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -141,6 +141,267 @@ static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) return RESUME_GUEST; } +static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val) +{ + unsigned long gintc; + struct loongarch_csrs *csr = vcpu->arch.csr; + + if (get_gcsr_flag(id) & INVALID_GCSR) + return -EINVAL; + + if (id == LOONGARCH_CSR_ESTAT) { + /* ESTAT IP0~IP7 get from GINTC */ + gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff; + *val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2); + return 0; + } + + /* + * Get software CSR state since software state is consistent + * with hardware for synchronous ioctl + */ + *val = kvm_read_sw_gcsr(csr, id); + + return 0; +} + +static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val) +{ + int ret = 0, gintc; + struct loongarch_csrs *csr = vcpu->arch.csr; + + if (get_gcsr_flag(id) & INVALID_GCSR) + return -EINVAL; + + if (id == LOONGARCH_CSR_ESTAT) { + /* ESTAT IP0~IP7 inject through GINTC */ + gintc = (val >> 2) & 0xff; + kvm_set_sw_gcsr(csr, LOONGARCH_CSR_GINTC, gintc); + + gintc = val & ~(0xffUL << 2); + kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc); + + return ret; + } + + kvm_write_sw_gcsr(csr, id, val); + + return ret; +} + +static int kvm_get_one_reg(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg, u64 *v) +{ + int id, ret = 0; + u64 type = reg->id & KVM_REG_LOONGARCH_MASK; + + switch (type) { + case KVM_REG_LOONGARCH_CSR: + id = KVM_GET_IOC_CSR_IDX(reg->id); + ret = _kvm_getcsr(vcpu, id, v); + break; + case KVM_REG_LOONGARCH_CPUCFG: + id = KVM_GET_IOC_CPUCFG_IDX(reg->id); + if (id >= 0 && id < KVM_MAX_CPUCFG_REGS) + *v = vcpu->arch.cpucfg[id]; + else + ret = -EINVAL; + break; + case KVM_REG_LOONGARCH_KVM: + switch (reg->id) { + case KVM_REG_LOONGARCH_COUNTER: + *v = drdtime() + vcpu->kvm->arch.time_offset; + break; + default: + ret = -EINVAL; + break; + } + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ + int ret = 0; + u64 v, size = reg->id & KVM_REG_SIZE_MASK; + + switch (size) { + case KVM_REG_SIZE_U64: + ret = kvm_get_one_reg(vcpu, reg, &v); + if (ret) + return ret; + ret = put_user(v, (u64 __user *)(long)reg->addr); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int kvm_set_one_reg(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg, u64 v) +{ + int id, ret = 0; + u64 type = reg->id & KVM_REG_LOONGARCH_MASK; + + switch (type) { + case KVM_REG_LOONGARCH_CSR: + id = KVM_GET_IOC_CSR_IDX(reg->id); + ret = _kvm_setcsr(vcpu, id, v); + break; + case KVM_REG_LOONGARCH_CPUCFG: + id = KVM_GET_IOC_CPUCFG_IDX(reg->id); + if (id >= 0 && id < KVM_MAX_CPUCFG_REGS) + vcpu->arch.cpucfg[id] = (u32)v; + else + ret = -EINVAL; + break; + case KVM_REG_LOONGARCH_KVM: + switch (reg->id) { + case KVM_REG_LOONGARCH_COUNTER: + /* + * gftoffset is relative with board, not vcpu + * only set for the first time for smp system + */ + if (vcpu->vcpu_id == 0) + vcpu->kvm->arch.time_offset = (signed long)(v - drdtime()); + break; + case KVM_REG_LOONGARCH_VCPU_RESET: + kvm_reset_timer(vcpu); + memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending)); + memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear)); + break; + default: + ret = -EINVAL; + break; + } + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ + int ret = 0; + u64 v, size = reg->id & KVM_REG_SIZE_MASK; + + switch (size) { + case KVM_REG_SIZE_U64: + ret = get_user(v, (u64 __user *)(long)reg->addr); + if (ret) + return ret; + break; + default: + return -EINVAL; + } + + return kvm_set_one_reg(vcpu, reg, v); +} + +int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) +{ + return -ENOIOCTLCMD; +} + +int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) +{ + return -ENOIOCTLCMD; +} + +int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) + regs->gpr[i] = vcpu->arch.gprs[i]; + + regs->pc = vcpu->arch.pc; + + return 0; +} + +int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + int i; + + for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) + vcpu->arch.gprs[i] = regs->gpr[i]; + + vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ + vcpu->arch.pc = regs->pc; + + return 0; +} + +static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, + struct kvm_enable_cap *cap) +{ + /* FPU is enabled by default, will support LSX/LASX later. */ + return -EINVAL; +} + +long kvm_arch_vcpu_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg) +{ + long r; + void __user *argp = (void __user *)arg; + struct kvm_vcpu *vcpu = filp->private_data; + + /* + * Only software CSR should be modified + * + * If any hardware CSR register is modified, vcpu_load/vcpu_put pair + * should be used. Since CSR registers owns by this vcpu, if switch + * to other vcpus, other vcpus need reload CSR registers. + * + * If software CSR is modified, bit KVM_LARCH_HWCSR_USABLE should + * be clear in vcpu->arch.aux_inuse, and vcpu_load will check + * aux_inuse flag and reload CSR registers form software. + */ + + switch (ioctl) { + case KVM_SET_ONE_REG: + case KVM_GET_ONE_REG: { + struct kvm_one_reg reg; + + r = -EFAULT; + if (copy_from_user(®, argp, sizeof(reg))) + break; + if (ioctl == KVM_SET_ONE_REG) { + r = kvm_set_reg(vcpu, ®); + vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE; + } else + r = kvm_get_reg(vcpu, ®); + break; + } + case KVM_ENABLE_CAP: { + struct kvm_enable_cap cap; + + r = -EFAULT; + if (copy_from_user(&cap, argp, sizeof(cap))) + break; + r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); + break; + } + default: + r = -ENOIOCTLCMD; + break; + } + + return r; +} + int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) { return 0; -- Gitee From 43255e69c90c7219238ce6f90b6820dba4e63a87 Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:28 +0800 Subject: [PATCH 0154/2138] LoongArch: KVM: Implement fpu operations for vcpu ANBZ: #8436 commit 84be4212dcda361b52fc5a071044e5fa237a58d7 upstream. Implement LoongArch fpu related interface for vcpu, such as get fpu, set fpu, own fpu and lose fpu, etc. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/vcpu.c | 56 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 487065565909..0f19c8b0c028 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -402,6 +402,62 @@ long kvm_arch_vcpu_ioctl(struct file *filp, return r; } +int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +{ + int i = 0; + + fpu->fcc = vcpu->arch.fpu.fcc; + fpu->fcsr = vcpu->arch.fpu.fcsr; + for (i = 0; i < NUM_FPU_REGS; i++) + memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64); + + return 0; +} + +int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +{ + int i = 0; + + vcpu->arch.fpu.fcc = fpu->fcc; + vcpu->arch.fpu.fcsr = fpu->fcsr; + for (i = 0; i < NUM_FPU_REGS; i++) + memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64); + + return 0; +} + +/* Enable FPU and restore context */ +void kvm_own_fpu(struct kvm_vcpu *vcpu) +{ + preempt_disable(); + + /* Enable FPU */ + set_csr_euen(CSR_EUEN_FPEN); + + kvm_restore_fpu(&vcpu->arch.fpu); + vcpu->arch.aux_inuse |= KVM_LARCH_FPU; + trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU); + + preempt_enable(); +} + +/* Save context and disable FPU */ +void kvm_lose_fpu(struct kvm_vcpu *vcpu) +{ + preempt_disable(); + + if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { + kvm_save_fpu(&vcpu->arch.fpu); + vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU; + trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU); + + /* Disable FPU */ + clear_csr_euen(CSR_EUEN_FPEN); + } + + preempt_enable(); +} + int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) { return 0; -- Gitee From 7156332032e9f93cab84b530b9dff9eba2698478 Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:28 +0800 Subject: [PATCH 0155/2138] LoongArch: KVM: Implement vcpu interrupt operations ANBZ: #8436 commit f45ad5b8aa9335bc6b30331b739e778f2f730b35 upstream. Implement vcpu interrupt operations such as vcpu set irq and vcpu clear irq, using set_gcsr_estat() to set irq which is parsed by the irq bitmap. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/interrupt.c | 183 +++++++++++++++++++++++++++++++++ arch/loongarch/kvm/vcpu.c | 38 +++++++ 2 files changed, 221 insertions(+) create mode 100644 arch/loongarch/kvm/interrupt.c diff --git a/arch/loongarch/kvm/interrupt.c b/arch/loongarch/kvm/interrupt.c new file mode 100644 index 000000000000..4c3f22de4b40 --- /dev/null +++ b/arch/loongarch/kvm/interrupt.c @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include + +static unsigned int priority_to_irq[EXCCODE_INT_NUM] = { + [INT_TI] = CPU_TIMER, + [INT_IPI] = CPU_IPI, + [INT_SWI0] = CPU_SIP0, + [INT_SWI1] = CPU_SIP1, + [INT_HWI0] = CPU_IP0, + [INT_HWI1] = CPU_IP1, + [INT_HWI2] = CPU_IP2, + [INT_HWI3] = CPU_IP3, + [INT_HWI4] = CPU_IP4, + [INT_HWI5] = CPU_IP5, + [INT_HWI6] = CPU_IP6, + [INT_HWI7] = CPU_IP7, +}; + +static int kvm_irq_deliver(struct kvm_vcpu *vcpu, unsigned int priority) +{ + unsigned int irq = 0; + + clear_bit(priority, &vcpu->arch.irq_pending); + if (priority < EXCCODE_INT_NUM) + irq = priority_to_irq[priority]; + + switch (priority) { + case INT_TI: + case INT_IPI: + case INT_SWI0: + case INT_SWI1: + set_gcsr_estat(irq); + break; + + case INT_HWI0 ... INT_HWI7: + set_csr_gintc(irq); + break; + + default: + break; + } + + return 1; +} + +static int kvm_irq_clear(struct kvm_vcpu *vcpu, unsigned int priority) +{ + unsigned int irq = 0; + + clear_bit(priority, &vcpu->arch.irq_clear); + if (priority < EXCCODE_INT_NUM) + irq = priority_to_irq[priority]; + + switch (priority) { + case INT_TI: + case INT_IPI: + case INT_SWI0: + case INT_SWI1: + clear_gcsr_estat(irq); + break; + + case INT_HWI0 ... INT_HWI7: + clear_csr_gintc(irq); + break; + + default: + break; + } + + return 1; +} + +void kvm_deliver_intr(struct kvm_vcpu *vcpu) +{ + unsigned int priority; + unsigned long *pending = &vcpu->arch.irq_pending; + unsigned long *pending_clr = &vcpu->arch.irq_clear; + + if (!(*pending) && !(*pending_clr)) + return; + + if (*pending_clr) { + priority = __ffs(*pending_clr); + while (priority <= INT_IPI) { + kvm_irq_clear(vcpu, priority); + priority = find_next_bit(pending_clr, + BITS_PER_BYTE * sizeof(*pending_clr), + priority + 1); + } + } + + if (*pending) { + priority = __ffs(*pending); + while (priority <= INT_IPI) { + kvm_irq_deliver(vcpu, priority); + priority = find_next_bit(pending, + BITS_PER_BYTE * sizeof(*pending), + priority + 1); + } + } +} + +int kvm_pending_timer(struct kvm_vcpu *vcpu) +{ + return test_bit(INT_TI, &vcpu->arch.irq_pending); +} + +/* + * Only support illegal instruction or illegal Address Error exception, + * Other exceptions are injected by hardware in kvm mode + */ +static void _kvm_deliver_exception(struct kvm_vcpu *vcpu, + unsigned int code, unsigned int subcode) +{ + unsigned long val, vec_size; + + /* + * BADV is added for EXCCODE_ADE exception + * Use PC register (GVA address) if it is instruction exeception + * Else use BADV from host side (GPA address) for data exeception + */ + if (code == EXCCODE_ADE) { + if (subcode == EXSUBCODE_ADEF) + val = vcpu->arch.pc; + else + val = vcpu->arch.badv; + kvm_write_hw_gcsr(LOONGARCH_CSR_BADV, val); + } + + /* Set exception instruction */ + kvm_write_hw_gcsr(LOONGARCH_CSR_BADI, vcpu->arch.badi); + + /* + * Save CRMD in PRMD + * Set IRQ disabled and PLV0 with CRMD + */ + val = kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD); + kvm_write_hw_gcsr(LOONGARCH_CSR_PRMD, val); + val = val & ~(CSR_CRMD_PLV | CSR_CRMD_IE); + kvm_write_hw_gcsr(LOONGARCH_CSR_CRMD, val); + + /* Set exception PC address */ + kvm_write_hw_gcsr(LOONGARCH_CSR_ERA, vcpu->arch.pc); + + /* + * Set exception code + * Exception and interrupt can be inject at the same time + * Hardware will handle exception first and then extern interrupt + * Exception code is Ecode in ESTAT[16:21] + * Interrupt code in ESTAT[0:12] + */ + val = kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT); + val = (val & ~CSR_ESTAT_EXC) | code; + kvm_write_hw_gcsr(LOONGARCH_CSR_ESTAT, val); + + /* Calculate expcetion entry address */ + val = kvm_read_hw_gcsr(LOONGARCH_CSR_ECFG); + vec_size = (val & CSR_ECFG_VS) >> CSR_ECFG_VS_SHIFT; + if (vec_size) + vec_size = (1 << vec_size) * 4; + val = kvm_read_hw_gcsr(LOONGARCH_CSR_EENTRY); + vcpu->arch.pc = val + code * vec_size; +} + +void kvm_deliver_exception(struct kvm_vcpu *vcpu) +{ + unsigned int code; + unsigned long *pending = &vcpu->arch.exception_pending; + + if (*pending) { + code = __ffs(*pending); + _kvm_deliver_exception(vcpu, code, vcpu->arch.esubcode); + *pending = 0; + vcpu->arch.esubcode = 0; + } +} diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 0f19c8b0c028..7576f5a735ea 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -458,6 +458,44 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu) preempt_enable(); } +int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) +{ + int intr = (int)irq->irq; + + if (intr > 0) + kvm_queue_irq(vcpu, intr); + else if (intr < 0) + kvm_dequeue_irq(vcpu, -intr); + else { + kvm_err("%s: invalid interrupt ioctl %d\n", __func__, irq->irq); + return -EINVAL; + } + + kvm_vcpu_kick(vcpu); + + return 0; +} + +long kvm_arch_vcpu_async_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg) +{ + void __user *argp = (void __user *)arg; + struct kvm_vcpu *vcpu = filp->private_data; + + if (ioctl == KVM_INTERRUPT) { + struct kvm_interrupt irq; + + if (copy_from_user(&irq, argp, sizeof(irq))) + return -EFAULT; + + kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq); + + return kvm_vcpu_ioctl_interrupt(vcpu, &irq); + } + + return -ENOIOCTLCMD; +} + int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) { return 0; -- Gitee From 04d5bcd27964eac7297424272f7eded2026e2a1f Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:28 +0800 Subject: [PATCH 0156/2138] LoongArch: KVM: Implement vcpu load and vcpu put operations ANBZ: #8436 commit 1f4c39b9892e12385e075efa9cb3f014b700204d upstream. Implement LoongArch vcpu load and vcpu put operations, including load csr value into hardware and save csr value into vcpu structure. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/vcpu.c | 203 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 203 insertions(+) diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 7576f5a735ea..b16fe2913e11 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -569,6 +569,209 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) } } +static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +{ + bool migrated; + struct kvm_context *context; + struct loongarch_csrs *csr = vcpu->arch.csr; + + /* + * Have we migrated to a different CPU? + * If so, any old guest TLB state may be stale. + */ + migrated = (vcpu->arch.last_sched_cpu != cpu); + + /* + * Was this the last vCPU to run on this CPU? + * If not, any old guest state from this vCPU will have been clobbered. + */ + context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); + if (migrated || (context->last_vcpu != vcpu)) + vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE; + context->last_vcpu = vcpu; + + /* Restore timer state regardless */ + kvm_restore_timer(vcpu); + + /* Control guest page CCA attribute */ + change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT); + + /* Don't bother restoring registers multiple times unless necessary */ + if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE) + return 0; + + write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset); + + /* Restore guest CSR registers */ + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CRMD); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PRMD); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EUEN); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_MISC); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ECFG); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ERA); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADV); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADI); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EENTRY); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ASID); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDL); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDH); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_RVACFG); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CPUID); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS2); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS3); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS4); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS5); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS6); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS7); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TMID); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CNTC); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL); + + /* Restore Root.GINTC from unused Guest.GINTC register */ + write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]); + + /* + * We should clear linked load bit to break interrupted atomics. This + * prevents a SC on the next vCPU from succeeding by matching a LL on + * the previous vCPU. + */ + if (vcpu->kvm->created_vcpus > 1) + set_gcsr_llbctl(CSR_LLBCTL_WCLLB); + + vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE; + + return 0; +} + +void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +{ + unsigned long flags; + + local_irq_save(flags); + if (vcpu->arch.last_sched_cpu != cpu) { + kvm_debug("[%d->%d]KVM vCPU[%d] switch\n", + vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id); + /* + * Migrate the timer interrupt to the current CPU so that it + * always interrupts the guest and synchronously triggers a + * guest timer interrupt. + */ + kvm_migrate_count(vcpu); + } + + /* Restore guest state to registers */ + _kvm_vcpu_load(vcpu, cpu); + local_irq_restore(flags); +} + +static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu) +{ + struct loongarch_csrs *csr = vcpu->arch.csr; + + kvm_lose_fpu(vcpu); + + /* + * Update CSR state from hardware if software CSR state is stale, + * most CSR registers are kept unchanged during process context + * switch except CSR registers like remaining timer tick value and + * injected interrupt state. + */ + if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST) + goto out; + + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CRMD); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRMD); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EUEN); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_MISC); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ECFG); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ERA); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADV); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADI); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EENTRY); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ASID); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDL); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDH); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_RVACFG); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CPUID); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG2); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG3); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS0); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS2); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS3); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS4); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS5); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS6); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS7); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TMID); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CNTC); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3); + + vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST; + +out: + kvm_save_timer(vcpu); + /* Save Root.GINTC into unused Guest.GINTC register */ + csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc(); + + return 0; +} + +void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) +{ + int cpu; + unsigned long flags; + + local_irq_save(flags); + cpu = smp_processor_id(); + vcpu->arch.last_sched_cpu = cpu; + + /* Save guest state in registers */ + _kvm_vcpu_put(vcpu, cpu); + local_irq_restore(flags); +} + int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) { int r = -EINTR; -- Gitee From a6e2573da6de15dfa8d401307d34aeafa895fe6d Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:28 +0800 Subject: [PATCH 0157/2138] LoongArch: KVM: Implement misc vcpu related interfaces ANBZ: #8436 commit 93a9a197b680dd6b98afb629d2bfb3bd51a83d84 upstream. 1, Implement LoongArch vcpu status description such as idle exits counter, signal exits counter, cpucfg exits counter, etc. 2, Implement some misc vcpu relaterd interfaces, such as vcpu runnable, vcpu should kick, vcpu dump regs, etc. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/vcpu.c | 120 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 120 insertions(+) diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index b16fe2913e11..73d0c2b9c1a5 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -13,6 +13,23 @@ #define CREATE_TRACE_POINTS #include "trace.h" +const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { + KVM_GENERIC_VCPU_STATS(), + STATS_DESC_COUNTER(VCPU, int_exits), + STATS_DESC_COUNTER(VCPU, idle_exits), + STATS_DESC_COUNTER(VCPU, cpucfg_exits), + STATS_DESC_COUNTER(VCPU, signal_exits), +}; + +const struct kvm_stats_header kvm_vcpu_stats_header = { + .name_size = KVM_STATS_NAME_SIZE, + .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), + .id_offset = sizeof(struct kvm_stats_header), + .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, + .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + + sizeof(kvm_vcpu_stats_desc), +}; + /* * kvm_check_requests - check and handle pending vCPU requests * @@ -141,6 +158,109 @@ static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) return RESUME_GUEST; } +int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) +{ + return !!(vcpu->arch.irq_pending) && + vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE; +} + +int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; +} + +bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) +{ + return false; +} + +vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) +{ + return VM_FAULT_SIGBUS; +} + +int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, + struct kvm_translation *tr) +{ + return -EINVAL; +} + +int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) +{ + return kvm_pending_timer(vcpu) || + kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI); +} + +int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) +{ + int i; + + kvm_debug("vCPU Register Dump:\n"); + kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc); + kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending); + + for (i = 0; i < 32; i += 4) { + kvm_debug("\tGPR%02d: %08lx %08lx %08lx %08lx\n", i, + vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1], + vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); + } + + kvm_debug("\tCRMD: 0x%08lx, ESTAT: 0x%08lx\n", + kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD), + kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT)); + + kvm_debug("\tERA: 0x%08lx\n", kvm_read_hw_gcsr(LOONGARCH_CSR_ERA)); + + return 0; +} + +int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state) +{ + *mp_state = vcpu->arch.mp_state; + + return 0; +} + +int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state) +{ + int ret = 0; + + switch (mp_state->mp_state) { + case KVM_MP_STATE_RUNNABLE: + vcpu->arch.mp_state = *mp_state; + break; + default: + ret = -EINVAL; + } + + return ret; +} + +int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, + struct kvm_guest_debug *dbg) +{ + return -EINVAL; +} + +/** + * kvm_migrate_count() - Migrate timer. + * @vcpu: Virtual CPU. + * + * Migrate hrtimer to the current CPU by cancelling and restarting it + * if the hrtimer is active. + * + * Must be called when the vCPU is migrated to a different CPU, so that + * the timer can interrupt the guest at the new CPU, and the timer irq can + * be delivered to the vCPU. + */ +static void kvm_migrate_count(struct kvm_vcpu *vcpu) +{ + if (hrtimer_cancel(&vcpu->arch.swtimer)) + hrtimer_restart(&vcpu->arch.swtimer); +} + static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val) { unsigned long gintc; -- Gitee From 0c6675d2bc1e7438c279dd3c974d8b7aa76855ac Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:28 +0800 Subject: [PATCH 0158/2138] LoongArch: KVM: Implement vcpu timer operations ANBZ: #8436 commit a5857b9ff6e06cac4adc8d671a74b7739a88623e upstream. Implement LoongArch vcpu timer operations such as init kvm timer, acquire kvm timer, save kvm timer and restore kvm timer. When vcpu exit, we use kvm soft timer to emulate hardware timer. If timeout happens, the vcpu timer interrupt will be set and it is going to be handled at vcpu next entrance. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/timer.c | 197 +++++++++++++++++++++++++++++++++++++ 1 file changed, 197 insertions(+) create mode 100644 arch/loongarch/kvm/timer.c diff --git a/arch/loongarch/kvm/timer.c b/arch/loongarch/kvm/timer.c new file mode 100644 index 000000000000..284bf553fefe --- /dev/null +++ b/arch/loongarch/kvm/timer.c @@ -0,0 +1,197 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include + +/* + * ktime_to_tick() - Scale ktime_t to timer tick value. + */ +static inline u64 ktime_to_tick(struct kvm_vcpu *vcpu, ktime_t now) +{ + u64 delta; + + delta = ktime_to_ns(now); + return div_u64(delta * vcpu->arch.timer_mhz, MNSEC_PER_SEC); +} + +static inline u64 tick_to_ns(struct kvm_vcpu *vcpu, u64 tick) +{ + return div_u64(tick * MNSEC_PER_SEC, vcpu->arch.timer_mhz); +} + +/* + * Push timer forward on timeout. + * Handle an hrtimer event by push the hrtimer forward a period. + */ +static enum hrtimer_restart kvm_count_timeout(struct kvm_vcpu *vcpu) +{ + unsigned long cfg, period; + + /* Add periodic tick to current expire time */ + cfg = kvm_read_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG); + if (cfg & CSR_TCFG_PERIOD) { + period = tick_to_ns(vcpu, cfg & CSR_TCFG_VAL); + hrtimer_add_expires_ns(&vcpu->arch.swtimer, period); + return HRTIMER_RESTART; + } else + return HRTIMER_NORESTART; +} + +/* Low level hrtimer wake routine */ +enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer) +{ + struct kvm_vcpu *vcpu; + + vcpu = container_of(timer, struct kvm_vcpu, arch.swtimer); + kvm_queue_irq(vcpu, INT_TI); + rcuwait_wake_up(&vcpu->wait); + + return kvm_count_timeout(vcpu); +} + +/* + * Initialise the timer to the specified frequency, zero it + */ +void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long timer_hz) +{ + vcpu->arch.timer_mhz = timer_hz >> 20; + + /* Starting at 0 */ + kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TVAL, 0); +} + +/* + * Restore hard timer state and enable guest to access timer registers + * without trap, should be called with irq disabled + */ +void kvm_acquire_timer(struct kvm_vcpu *vcpu) +{ + unsigned long cfg; + + cfg = read_csr_gcfg(); + if (!(cfg & CSR_GCFG_TIT)) + return; + + /* Enable guest access to hard timer */ + write_csr_gcfg(cfg & ~CSR_GCFG_TIT); + + /* + * Freeze the soft-timer and sync the guest stable timer with it. We do + * this with interrupts disabled to avoid latency. + */ + hrtimer_cancel(&vcpu->arch.swtimer); +} + +/* + * Restore soft timer state from saved context. + */ +void kvm_restore_timer(struct kvm_vcpu *vcpu) +{ + unsigned long cfg, delta, period; + ktime_t expire, now; + struct loongarch_csrs *csr = vcpu->arch.csr; + + /* + * Set guest stable timer cfg csr + */ + cfg = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ESTAT); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TCFG); + if (!(cfg & CSR_TCFG_EN)) { + /* Guest timer is disabled, just restore timer registers */ + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TVAL); + return; + } + + /* + * Set remainder tick value if not expired + */ + now = ktime_get(); + expire = vcpu->arch.expire; + if (ktime_before(now, expire)) + delta = ktime_to_tick(vcpu, ktime_sub(expire, now)); + else { + if (cfg & CSR_TCFG_PERIOD) { + period = cfg & CSR_TCFG_VAL; + delta = ktime_to_tick(vcpu, ktime_sub(now, expire)); + delta = period - (delta % period); + } else + delta = 0; + /* + * Inject timer here though sw timer should inject timer + * interrupt async already, since sw timer may be cancelled + * during injecting intr async in function kvm_acquire_timer + */ + kvm_queue_irq(vcpu, INT_TI); + } + + write_gcsr_timertick(delta); +} + +/* + * Save guest timer state and switch to software emulation of guest + * timer. The hard timer must already be in use, so preemption should be + * disabled. + */ +static void _kvm_save_timer(struct kvm_vcpu *vcpu) +{ + unsigned long ticks, delta; + ktime_t expire; + struct loongarch_csrs *csr = vcpu->arch.csr; + + ticks = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TVAL); + delta = tick_to_ns(vcpu, ticks); + expire = ktime_add_ns(ktime_get(), delta); + vcpu->arch.expire = expire; + if (ticks) { + /* + * Update hrtimer to use new timeout + * HRTIMER_MODE_PINNED is suggested since vcpu may run in + * the same physical cpu in next time + */ + hrtimer_cancel(&vcpu->arch.swtimer); + hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED); + } else + /* + * Inject timer interrupt so that hall polling can dectect and exit + */ + kvm_queue_irq(vcpu, INT_TI); +} + +/* + * Save guest timer state and switch to soft guest timer if hard timer was in + * use. + */ +void kvm_save_timer(struct kvm_vcpu *vcpu) +{ + unsigned long cfg; + struct loongarch_csrs *csr = vcpu->arch.csr; + + preempt_disable(); + cfg = read_csr_gcfg(); + if (!(cfg & CSR_GCFG_TIT)) { + /* Disable guest use of hard timer */ + write_csr_gcfg(cfg | CSR_GCFG_TIT); + + /* Save hard timer state */ + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TCFG); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TVAL); + if (kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG) & CSR_TCFG_EN) + _kvm_save_timer(vcpu); + } + + /* Save timer-related state to vCPU context */ + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ESTAT); + preempt_enable(); +} + +void kvm_reset_timer(struct kvm_vcpu *vcpu) +{ + write_gcsr_timercfg(0); + kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG, 0); + hrtimer_cancel(&vcpu->arch.swtimer); +} -- Gitee From 8a43bd4a51344bc0eff9e43ec0d9d6b3a6a8abd0 Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:28 +0800 Subject: [PATCH 0159/2138] LoongArch: KVM: Implement virtual machine tlb operations ANBZ: #8436 commit d7f4ed4b22908077bd219dd172b27b51927aff6d upstream. Implement LoongArch virtual machine tlb operations such as flush tlb by specific gpa parameter and flush all of the virtual machine's tlbs. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/tlb.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 arch/loongarch/kvm/tlb.c diff --git a/arch/loongarch/kvm/tlb.c b/arch/loongarch/kvm/tlb.c new file mode 100644 index 000000000000..02535df6b51f --- /dev/null +++ b/arch/loongarch/kvm/tlb.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include + +/* + * kvm_flush_tlb_all() - Flush all root TLB entries for guests. + * + * Invalidate all entries including GVA-->GPA and GPA-->HPA mappings. + */ +void kvm_flush_tlb_all(void) +{ + unsigned long flags; + + local_irq_save(flags); + invtlb_all(INVTLB_ALLGID, 0, 0); + local_irq_restore(flags); +} + +void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa) +{ + unsigned long flags; + + local_irq_save(flags); + gpa &= (PAGE_MASK << 1); + invtlb(INVTLB_GID_ADDR, read_csr_gstat() & CSR_GSTAT_GID, gpa); + local_irq_restore(flags); +} -- Gitee From 813adbbe632cf40307aca6f70fc87c22ffc3efcd Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:28 +0800 Subject: [PATCH 0160/2138] LoongArch: KVM: Implement kvm mmu operations ANBZ: #8436 commit 752e2cd7b4fb412f3e008493e0195e357bab9773 upstream. Implement LoongArch kvm mmu, it is used to switch gpa to hpa when guest exit because of address translation exception. This patch implement: allocating gpa page table, searching gpa from it, and flushing guest gpa in the table. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/include/asm/kvm_mmu.h | 139 ++++ arch/loongarch/kvm/mmu.c | 914 +++++++++++++++++++++++++++ 2 files changed, 1053 insertions(+) create mode 100644 arch/loongarch/include/asm/kvm_mmu.h create mode 100644 arch/loongarch/kvm/mmu.c diff --git a/arch/loongarch/include/asm/kvm_mmu.h b/arch/loongarch/include/asm/kvm_mmu.h new file mode 100644 index 000000000000..099bafc6f797 --- /dev/null +++ b/arch/loongarch/include/asm/kvm_mmu.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#ifndef __ASM_LOONGARCH_KVM_MMU_H__ +#define __ASM_LOONGARCH_KVM_MMU_H__ + +#include +#include +#include + +/* + * KVM_MMU_CACHE_MIN_PAGES is the number of GPA page table translation levels + * for which pages need to be cached. + */ +#define KVM_MMU_CACHE_MIN_PAGES (CONFIG_PGTABLE_LEVELS - 1) + +#define _KVM_FLUSH_PGTABLE 0x1 +#define _KVM_HAS_PGMASK 0x2 +#define kvm_pfn_pte(pfn, prot) (((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot)) +#define kvm_pte_pfn(x) ((phys_addr_t)((x & _PFN_MASK) >> PFN_PTE_SHIFT)) + +typedef unsigned long kvm_pte_t; +typedef struct kvm_ptw_ctx kvm_ptw_ctx; +typedef int (*kvm_pte_ops)(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx); + +struct kvm_ptw_ctx { + kvm_pte_ops ops; + unsigned long flag; + + /* for kvm_arch_mmu_enable_log_dirty_pt_masked use */ + unsigned long mask; + unsigned long gfn; + + /* page walk mmu info */ + unsigned int level; + unsigned long pgtable_shift; + unsigned long invalid_entry; + unsigned long *invalid_ptes; + unsigned int *pte_shifts; + void *opaque; + + /* free pte table page list */ + struct list_head list; +}; + +kvm_pte_t *kvm_pgd_alloc(void); + +static inline void kvm_set_pte(kvm_pte_t *ptep, kvm_pte_t val) +{ + WRITE_ONCE(*ptep, val); +} + +static inline int kvm_pte_write(kvm_pte_t pte) { return pte & _PAGE_WRITE; } +static inline int kvm_pte_dirty(kvm_pte_t pte) { return pte & _PAGE_DIRTY; } +static inline int kvm_pte_young(kvm_pte_t pte) { return pte & _PAGE_ACCESSED; } +static inline int kvm_pte_huge(kvm_pte_t pte) { return pte & _PAGE_HUGE; } + +static inline kvm_pte_t kvm_pte_mkyoung(kvm_pte_t pte) +{ + return pte | _PAGE_ACCESSED; +} + +static inline kvm_pte_t kvm_pte_mkold(kvm_pte_t pte) +{ + return pte & ~_PAGE_ACCESSED; +} + +static inline kvm_pte_t kvm_pte_mkdirty(kvm_pte_t pte) +{ + return pte | _PAGE_DIRTY; +} + +static inline kvm_pte_t kvm_pte_mkclean(kvm_pte_t pte) +{ + return pte & ~_PAGE_DIRTY; +} + +static inline kvm_pte_t kvm_pte_mkhuge(kvm_pte_t pte) +{ + return pte | _PAGE_HUGE; +} + +static inline kvm_pte_t kvm_pte_mksmall(kvm_pte_t pte) +{ + return pte & ~_PAGE_HUGE; +} + +static inline int kvm_need_flush(kvm_ptw_ctx *ctx) +{ + return ctx->flag & _KVM_FLUSH_PGTABLE; +} + +static inline kvm_pte_t *kvm_pgtable_offset(kvm_ptw_ctx *ctx, kvm_pte_t *table, + phys_addr_t addr) +{ + + return table + ((addr >> ctx->pgtable_shift) & (PTRS_PER_PTE - 1)); +} + +static inline phys_addr_t kvm_pgtable_addr_end(kvm_ptw_ctx *ctx, + phys_addr_t addr, phys_addr_t end) +{ + phys_addr_t boundary, size; + + size = 0x1UL << ctx->pgtable_shift; + boundary = (addr + size) & ~(size - 1); + return (boundary - 1 < end - 1) ? boundary : end; +} + +static inline int kvm_pte_present(kvm_ptw_ctx *ctx, kvm_pte_t *entry) +{ + if (!ctx || ctx->level == 0) + return !!(*entry & _PAGE_PRESENT); + + return *entry != ctx->invalid_entry; +} + +static inline int kvm_pte_none(kvm_ptw_ctx *ctx, kvm_pte_t *entry) +{ + return *entry == ctx->invalid_entry; +} + +static inline void kvm_ptw_enter(kvm_ptw_ctx *ctx) +{ + ctx->level--; + ctx->pgtable_shift = ctx->pte_shifts[ctx->level]; + ctx->invalid_entry = ctx->invalid_ptes[ctx->level]; +} + +static inline void kvm_ptw_exit(kvm_ptw_ctx *ctx) +{ + ctx->level++; + ctx->pgtable_shift = ctx->pte_shifts[ctx->level]; + ctx->invalid_entry = ctx->invalid_ptes[ctx->level]; +} + +#endif /* __ASM_LOONGARCH_KVM_MMU_H__ */ diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c new file mode 100644 index 000000000000..80480df5f550 --- /dev/null +++ b/arch/loongarch/kvm/mmu.c @@ -0,0 +1,914 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static inline void kvm_ptw_prepare(struct kvm *kvm, kvm_ptw_ctx *ctx) +{ + ctx->level = kvm->arch.root_level; + /* pte table */ + ctx->invalid_ptes = kvm->arch.invalid_ptes; + ctx->pte_shifts = kvm->arch.pte_shifts; + ctx->pgtable_shift = ctx->pte_shifts[ctx->level]; + ctx->invalid_entry = ctx->invalid_ptes[ctx->level]; + ctx->opaque = kvm; +} + +/* + * Mark a range of guest physical address space old (all accesses fault) in the + * VM's GPA page table to allow detection of commonly used pages. + */ +static int kvm_mkold_pte(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx) +{ + if (kvm_pte_young(*pte)) { + *pte = kvm_pte_mkold(*pte); + return 1; + } + + return 0; +} + +/* + * Mark a range of guest physical address space clean (writes fault) in the VM's + * GPA page table to allow dirty page tracking. + */ +static int kvm_mkclean_pte(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx) +{ + gfn_t offset; + kvm_pte_t val; + + val = *pte; + /* + * For kvm_arch_mmu_enable_log_dirty_pt_masked with mask, start and end + * may cross hugepage, for first huge page parameter addr is equal to + * start, however for the second huge page addr is base address of + * this huge page, rather than start or end address + */ + if ((ctx->flag & _KVM_HAS_PGMASK) && !kvm_pte_huge(val)) { + offset = (addr >> PAGE_SHIFT) - ctx->gfn; + if (!(BIT(offset) & ctx->mask)) + return 0; + } + + /* + * Need not split huge page now, just set write-proect pte bit + * Split huge page until next write fault + */ + if (kvm_pte_dirty(val)) { + *pte = kvm_pte_mkclean(val); + return 1; + } + + return 0; +} + +/* + * Clear pte entry + */ +static int kvm_flush_pte(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx) +{ + struct kvm *kvm; + + kvm = ctx->opaque; + if (ctx->level) + kvm->stat.hugepages--; + else + kvm->stat.pages--; + + *pte = ctx->invalid_entry; + + return 1; +} + +/* + * kvm_pgd_alloc() - Allocate and initialise a KVM GPA page directory. + * + * Allocate a blank KVM GPA page directory (PGD) for representing guest physical + * to host physical page mappings. + * + * Returns: Pointer to new KVM GPA page directory. + * NULL on allocation failure. + */ +kvm_pte_t *kvm_pgd_alloc(void) +{ + kvm_pte_t *pgd; + + pgd = (kvm_pte_t *)__get_free_pages(GFP_KERNEL, 0); + if (pgd) + pgd_init((void *)pgd); + + return pgd; +} + +static void _kvm_pte_init(void *addr, unsigned long val) +{ + unsigned long *p, *end; + + p = (unsigned long *)addr; + end = p + PTRS_PER_PTE; + do { + p[0] = val; + p[1] = val; + p[2] = val; + p[3] = val; + p[4] = val; + p += 8; + p[-3] = val; + p[-2] = val; + p[-1] = val; + } while (p != end); +} + +/* + * Caller must hold kvm->mm_lock + * + * Walk the page tables of kvm to find the PTE corresponding to the + * address @addr. If page tables don't exist for @addr, they will be created + * from the MMU cache if @cache is not NULL. + */ +static kvm_pte_t *kvm_populate_gpa(struct kvm *kvm, + struct kvm_mmu_memory_cache *cache, + unsigned long addr, int level) +{ + kvm_ptw_ctx ctx; + kvm_pte_t *entry, *child; + + kvm_ptw_prepare(kvm, &ctx); + child = kvm->arch.pgd; + while (ctx.level > level) { + entry = kvm_pgtable_offset(&ctx, child, addr); + if (kvm_pte_none(&ctx, entry)) { + if (!cache) + return NULL; + + child = kvm_mmu_memory_cache_alloc(cache); + _kvm_pte_init(child, ctx.invalid_ptes[ctx.level - 1]); + kvm_set_pte(entry, __pa(child)); + } else if (kvm_pte_huge(*entry)) { + return entry; + } else + child = (kvm_pte_t *)__va(PHYSADDR(*entry)); + kvm_ptw_enter(&ctx); + } + + entry = kvm_pgtable_offset(&ctx, child, addr); + + return entry; +} + +/* + * Page walker for VM shadow mmu at last level + * The last level is small pte page or huge pmd page + */ +static int kvm_ptw_leaf(kvm_pte_t *dir, phys_addr_t addr, phys_addr_t end, kvm_ptw_ctx *ctx) +{ + int ret; + phys_addr_t next, start, size; + struct list_head *list; + kvm_pte_t *entry, *child; + + ret = 0; + start = addr; + child = (kvm_pte_t *)__va(PHYSADDR(*dir)); + entry = kvm_pgtable_offset(ctx, child, addr); + do { + next = addr + (0x1UL << ctx->pgtable_shift); + if (!kvm_pte_present(ctx, entry)) + continue; + + ret |= ctx->ops(entry, addr, ctx); + } while (entry++, addr = next, addr < end); + + if (kvm_need_flush(ctx)) { + size = 0x1UL << (ctx->pgtable_shift + PAGE_SHIFT - 3); + if (start + size == end) { + list = (struct list_head *)child; + list_add_tail(list, &ctx->list); + *dir = ctx->invalid_ptes[ctx->level + 1]; + } + } + + return ret; +} + +/* + * Page walker for VM shadow mmu at page table dir level + */ +static int kvm_ptw_dir(kvm_pte_t *dir, phys_addr_t addr, phys_addr_t end, kvm_ptw_ctx *ctx) +{ + int ret; + phys_addr_t next, start, size; + struct list_head *list; + kvm_pte_t *entry, *child; + + ret = 0; + start = addr; + child = (kvm_pte_t *)__va(PHYSADDR(*dir)); + entry = kvm_pgtable_offset(ctx, child, addr); + do { + next = kvm_pgtable_addr_end(ctx, addr, end); + if (!kvm_pte_present(ctx, entry)) + continue; + + if (kvm_pte_huge(*entry)) { + ret |= ctx->ops(entry, addr, ctx); + continue; + } + + kvm_ptw_enter(ctx); + if (ctx->level == 0) + ret |= kvm_ptw_leaf(entry, addr, next, ctx); + else + ret |= kvm_ptw_dir(entry, addr, next, ctx); + kvm_ptw_exit(ctx); + } while (entry++, addr = next, addr < end); + + if (kvm_need_flush(ctx)) { + size = 0x1UL << (ctx->pgtable_shift + PAGE_SHIFT - 3); + if (start + size == end) { + list = (struct list_head *)child; + list_add_tail(list, &ctx->list); + *dir = ctx->invalid_ptes[ctx->level + 1]; + } + } + + return ret; +} + +/* + * Page walker for VM shadow mmu at page root table + */ +static int kvm_ptw_top(kvm_pte_t *dir, phys_addr_t addr, phys_addr_t end, kvm_ptw_ctx *ctx) +{ + int ret; + phys_addr_t next; + kvm_pte_t *entry; + + ret = 0; + entry = kvm_pgtable_offset(ctx, dir, addr); + do { + next = kvm_pgtable_addr_end(ctx, addr, end); + if (!kvm_pte_present(ctx, entry)) + continue; + + kvm_ptw_enter(ctx); + ret |= kvm_ptw_dir(entry, addr, next, ctx); + kvm_ptw_exit(ctx); + } while (entry++, addr = next, addr < end); + + return ret; +} + +/* + * kvm_flush_range() - Flush a range of guest physical addresses. + * @kvm: KVM pointer. + * @start_gfn: Guest frame number of first page in GPA range to flush. + * @end_gfn: Guest frame number of last page in GPA range to flush. + * @lock: Whether to hold mmu_lock or not + * + * Flushes a range of GPA mappings from the GPA page tables. + */ +static void kvm_flush_range(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn, int lock) +{ + int ret; + kvm_ptw_ctx ctx; + struct list_head *pos, *temp; + + ctx.ops = kvm_flush_pte; + ctx.flag = _KVM_FLUSH_PGTABLE; + kvm_ptw_prepare(kvm, &ctx); + INIT_LIST_HEAD(&ctx.list); + + if (lock) { + spin_lock(&kvm->mmu_lock); + ret = kvm_ptw_top(kvm->arch.pgd, start_gfn << PAGE_SHIFT, + end_gfn << PAGE_SHIFT, &ctx); + spin_unlock(&kvm->mmu_lock); + } else + ret = kvm_ptw_top(kvm->arch.pgd, start_gfn << PAGE_SHIFT, + end_gfn << PAGE_SHIFT, &ctx); + + /* Flush vpid for each vCPU individually */ + if (ret) + kvm_flush_remote_tlbs(kvm); + + /* + * free pte table page after mmu_lock + * the pte table page is linked together with ctx.list + */ + list_for_each_safe(pos, temp, &ctx.list) { + list_del(pos); + free_page((unsigned long)pos); + } +} + +/* + * kvm_mkclean_gpa_pt() - Make a range of guest physical addresses clean. + * @kvm: KVM pointer. + * @start_gfn: Guest frame number of first page in GPA range to flush. + * @end_gfn: Guest frame number of last page in GPA range to flush. + * + * Make a range of GPA mappings clean so that guest writes will fault and + * trigger dirty page logging. + * + * The caller must hold the @kvm->mmu_lock spinlock. + * + * Returns: Whether any GPA mappings were modified, which would require + * derived mappings (GVA page tables & TLB enties) to be + * invalidated. + */ +static int kvm_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn) +{ + kvm_ptw_ctx ctx; + + ctx.ops = kvm_mkclean_pte; + ctx.flag = 0; + kvm_ptw_prepare(kvm, &ctx); + return kvm_ptw_top(kvm->arch.pgd, start_gfn << PAGE_SHIFT, end_gfn << PAGE_SHIFT, &ctx); +} + +/* + * kvm_arch_mmu_enable_log_dirty_pt_masked() - write protect dirty pages + * @kvm: The KVM pointer + * @slot: The memory slot associated with mask + * @gfn_offset: The gfn offset in memory slot + * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory + * slot to be write protected + * + * Walks bits set in mask write protects the associated pte's. Caller must + * acquire @kvm->mmu_lock. + */ +void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, + struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) +{ + kvm_ptw_ctx ctx; + gfn_t base_gfn = slot->base_gfn + gfn_offset; + gfn_t start = base_gfn + __ffs(mask); + gfn_t end = base_gfn + __fls(mask) + 1; + + ctx.ops = kvm_mkclean_pte; + ctx.flag = _KVM_HAS_PGMASK; + ctx.mask = mask; + ctx.gfn = base_gfn; + kvm_ptw_prepare(kvm, &ctx); + + kvm_ptw_top(kvm->arch.pgd, start << PAGE_SHIFT, end << PAGE_SHIFT, &ctx); +} + +void kvm_arch_commit_memory_region(struct kvm *kvm, + struct kvm_memory_slot *old, + const struct kvm_memory_slot *new, + enum kvm_mr_change change) +{ + int needs_flush; + + /* + * If dirty page logging is enabled, write protect all pages in the slot + * ready for dirty logging. + * + * There is no need to do this in any of the following cases: + * CREATE: No dirty mappings will already exist. + * MOVE/DELETE: The old mappings will already have been cleaned up by + * kvm_arch_flush_shadow_memslot() + */ + if (change == KVM_MR_FLAGS_ONLY && + (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) && + new->flags & KVM_MEM_LOG_DIRTY_PAGES)) { + spin_lock(&kvm->mmu_lock); + /* Write protect GPA page table entries */ + needs_flush = kvm_mkclean_gpa_pt(kvm, new->base_gfn, + new->base_gfn + new->npages); + spin_unlock(&kvm->mmu_lock); + if (needs_flush) + kvm_flush_remote_tlbs(kvm); + } +} + +void kvm_arch_flush_shadow_all(struct kvm *kvm) +{ + kvm_flush_range(kvm, 0, kvm->arch.gpa_size >> PAGE_SHIFT, 0); +} + +void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) +{ + /* + * The slot has been made invalid (ready for moving or deletion), so we + * need to ensure that it can no longer be accessed by any guest vCPUs. + */ + kvm_flush_range(kvm, slot->base_gfn, slot->base_gfn + slot->npages, 1); +} + +bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) +{ + kvm_ptw_ctx ctx; + + ctx.flag = 0; + ctx.ops = kvm_flush_pte; + kvm_ptw_prepare(kvm, &ctx); + INIT_LIST_HEAD(&ctx.list); + + return kvm_ptw_top(kvm->arch.pgd, range->start << PAGE_SHIFT, + range->end << PAGE_SHIFT, &ctx); +} + +bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) +{ + unsigned long prot_bits; + kvm_pte_t *ptep; + kvm_pfn_t pfn = pte_pfn(range->arg.pte); + gpa_t gpa = range->start << PAGE_SHIFT; + + ptep = kvm_populate_gpa(kvm, NULL, gpa, 0); + if (!ptep) + return false; + + /* Replacing an absent or old page doesn't need flushes */ + if (!kvm_pte_present(NULL, ptep) || !kvm_pte_young(*ptep)) { + kvm_set_pte(ptep, 0); + return false; + } + + /* Fill new pte if write protected or page migrated */ + prot_bits = _PAGE_PRESENT | __READABLE; + prot_bits |= _CACHE_MASK & pte_val(range->arg.pte); + + /* + * Set _PAGE_WRITE or _PAGE_DIRTY iff old and new pte both support + * _PAGE_WRITE for map_page_fast if next page write fault + * _PAGE_DIRTY since gpa has already recorded as dirty page + */ + prot_bits |= __WRITEABLE & *ptep & pte_val(range->arg.pte); + kvm_set_pte(ptep, kvm_pfn_pte(pfn, __pgprot(prot_bits))); + + return true; +} + +bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) +{ + kvm_ptw_ctx ctx; + + ctx.flag = 0; + ctx.ops = kvm_mkold_pte; + kvm_ptw_prepare(kvm, &ctx); + + return kvm_ptw_top(kvm->arch.pgd, range->start << PAGE_SHIFT, + range->end << PAGE_SHIFT, &ctx); +} + +bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) +{ + gpa_t gpa = range->start << PAGE_SHIFT; + kvm_pte_t *ptep = kvm_populate_gpa(kvm, NULL, gpa, 0); + + if (ptep && kvm_pte_present(NULL, ptep) && kvm_pte_young(*ptep)) + return true; + + return false; +} + +/* + * kvm_map_page_fast() - Fast path GPA fault handler. + * @vcpu: vCPU pointer. + * @gpa: Guest physical address of fault. + * @write: Whether the fault was due to a write. + * + * Perform fast path GPA fault handling, doing all that can be done without + * calling into KVM. This handles marking old pages young (for idle page + * tracking), and dirtying of clean pages (for dirty page logging). + * + * Returns: 0 on success, in which case we can update derived mappings and + * resume guest execution. + * -EFAULT on failure due to absent GPA mapping or write to + * read-only page, in which case KVM must be consulted. + */ +static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) +{ + int ret = 0; + kvm_pfn_t pfn = 0; + kvm_pte_t *ptep, changed, new; + gfn_t gfn = gpa >> PAGE_SHIFT; + struct kvm *kvm = vcpu->kvm; + struct kvm_memory_slot *slot; + + spin_lock(&kvm->mmu_lock); + + /* Fast path - just check GPA page table for an existing entry */ + ptep = kvm_populate_gpa(kvm, NULL, gpa, 0); + if (!ptep || !kvm_pte_present(NULL, ptep)) { + ret = -EFAULT; + goto out; + } + + /* Track access to pages marked old */ + new = *ptep; + if (!kvm_pte_young(new)) + new = kvm_pte_mkyoung(new); + /* call kvm_set_pfn_accessed() after unlock */ + + if (write && !kvm_pte_dirty(new)) { + if (!kvm_pte_write(new)) { + ret = -EFAULT; + goto out; + } + + if (kvm_pte_huge(new)) { + /* + * Do not set write permission when dirty logging is + * enabled for HugePages + */ + slot = gfn_to_memslot(kvm, gfn); + if (kvm_slot_dirty_track_enabled(slot)) { + ret = -EFAULT; + goto out; + } + } + + /* Track dirtying of writeable pages */ + new = kvm_pte_mkdirty(new); + } + + changed = new ^ (*ptep); + if (changed) { + kvm_set_pte(ptep, new); + pfn = kvm_pte_pfn(new); + } + spin_unlock(&kvm->mmu_lock); + + /* + * Fixme: pfn may be freed after mmu_lock + * kvm_try_get_pfn(pfn)/kvm_release_pfn pair to prevent this? + */ + if (kvm_pte_young(changed)) + kvm_set_pfn_accessed(pfn); + + if (kvm_pte_dirty(changed)) { + mark_page_dirty(kvm, gfn); + kvm_set_pfn_dirty(pfn); + } + return ret; +out: + spin_unlock(&kvm->mmu_lock); + return ret; +} + +static bool fault_supports_huge_mapping(struct kvm_memory_slot *memslot, + unsigned long hva, unsigned long map_size, bool write) +{ + size_t size; + gpa_t gpa_start; + hva_t uaddr_start, uaddr_end; + + /* Disable dirty logging on HugePages */ + if (kvm_slot_dirty_track_enabled(memslot) && write) + return false; + + size = memslot->npages * PAGE_SIZE; + gpa_start = memslot->base_gfn << PAGE_SHIFT; + uaddr_start = memslot->userspace_addr; + uaddr_end = uaddr_start + size; + + /* + * Pages belonging to memslots that don't have the same alignment + * within a PMD for userspace and GPA cannot be mapped with stage-2 + * PMD entries, because we'll end up mapping the wrong pages. + * + * Consider a layout like the following: + * + * memslot->userspace_addr: + * +-----+--------------------+--------------------+---+ + * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz| + * +-----+--------------------+--------------------+---+ + * + * memslot->base_gfn << PAGE_SIZE: + * +---+--------------------+--------------------+-----+ + * |abc|def Stage-2 block | Stage-2 block |tvxyz| + * +---+--------------------+--------------------+-----+ + * + * If we create those stage-2 blocks, we'll end up with this incorrect + * mapping: + * d -> f + * e -> g + * f -> h + */ + if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1))) + return false; + + /* + * Next, let's make sure we're not trying to map anything not covered + * by the memslot. This means we have to prohibit block size mappings + * for the beginning and end of a non-block aligned and non-block sized + * memory slot (illustrated by the head and tail parts of the + * userspace view above containing pages 'abcde' and 'xyz', + * respectively). + * + * Note that it doesn't matter if we do the check using the + * userspace_addr or the base_gfn, as both are equally aligned (per + * the check above) and equally sized. + */ + return (hva & ~(map_size - 1)) >= uaddr_start && + (hva & ~(map_size - 1)) + map_size <= uaddr_end; +} + +/* + * Lookup the mapping level for @gfn in the current mm. + * + * WARNING! Use of host_pfn_mapping_level() requires the caller and the end + * consumer to be tied into KVM's handlers for MMU notifier events! + * + * There are several ways to safely use this helper: + * + * - Check mmu_invalidate_retry_hva() after grabbing the mapping level, before + * consuming it. In this case, mmu_lock doesn't need to be held during the + * lookup, but it does need to be held while checking the MMU notifier. + * + * - Hold mmu_lock AND ensure there is no in-progress MMU notifier invalidation + * event for the hva. This can be done by explicit checking the MMU notifier + * or by ensuring that KVM already has a valid mapping that covers the hva. + * + * - Do not use the result to install new mappings, e.g. use the host mapping + * level only to decide whether or not to zap an entry. In this case, it's + * not required to hold mmu_lock (though it's highly likely the caller will + * want to hold mmu_lock anyways, e.g. to modify SPTEs). + * + * Note! The lookup can still race with modifications to host page tables, but + * the above "rules" ensure KVM will not _consume_ the result of the walk if a + * race with the primary MMU occurs. + */ +static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, + const struct kvm_memory_slot *slot) +{ + int level = 0; + unsigned long hva; + unsigned long flags; + pgd_t pgd; + p4d_t p4d; + pud_t pud; + pmd_t pmd; + + /* + * Note, using the already-retrieved memslot and __gfn_to_hva_memslot() + * is not solely for performance, it's also necessary to avoid the + * "writable" check in __gfn_to_hva_many(), which will always fail on + * read-only memslots due to gfn_to_hva() assuming writes. Earlier + * page fault steps have already verified the guest isn't writing a + * read-only memslot. + */ + hva = __gfn_to_hva_memslot(slot, gfn); + + /* + * Disable IRQs to prevent concurrent tear down of host page tables, + * e.g. if the primary MMU promotes a P*D to a huge page and then frees + * the original page table. + */ + local_irq_save(flags); + + /* + * Read each entry once. As above, a non-leaf entry can be promoted to + * a huge page _during_ this walk. Re-reading the entry could send the + * walk into the weeks, e.g. p*d_large() returns false (sees the old + * value) and then p*d_offset() walks into the target huge page instead + * of the old page table (sees the new value). + */ + pgd = READ_ONCE(*pgd_offset(kvm->mm, hva)); + if (pgd_none(pgd)) + goto out; + + p4d = READ_ONCE(*p4d_offset(&pgd, hva)); + if (p4d_none(p4d) || !p4d_present(p4d)) + goto out; + + pud = READ_ONCE(*pud_offset(&p4d, hva)); + if (pud_none(pud) || !pud_present(pud)) + goto out; + + pmd = READ_ONCE(*pmd_offset(&pud, hva)); + if (pmd_none(pmd) || !pmd_present(pmd)) + goto out; + + if (kvm_pte_huge(pmd_val(pmd))) + level = 1; + +out: + local_irq_restore(flags); + return level; +} + +/* + * Split huge page + */ +static kvm_pte_t *kvm_split_huge(struct kvm_vcpu *vcpu, kvm_pte_t *ptep, gfn_t gfn) +{ + int i; + kvm_pte_t val, *child; + struct kvm *kvm = vcpu->kvm; + struct kvm_mmu_memory_cache *memcache; + + memcache = &vcpu->arch.mmu_page_cache; + child = kvm_mmu_memory_cache_alloc(memcache); + val = kvm_pte_mksmall(*ptep); + for (i = 0; i < PTRS_PER_PTE; i++) { + kvm_set_pte(child + i, val); + val += PAGE_SIZE; + } + + /* The later kvm_flush_tlb_gpa() will flush hugepage tlb */ + kvm_set_pte(ptep, __pa(child)); + + kvm->stat.hugepages--; + kvm->stat.pages += PTRS_PER_PTE; + + return child + (gfn & (PTRS_PER_PTE - 1)); +} + +/* + * kvm_map_page() - Map a guest physical page. + * @vcpu: vCPU pointer. + * @gpa: Guest physical address of fault. + * @write: Whether the fault was due to a write. + * + * Handle GPA faults by creating a new GPA mapping (or updating an existing + * one). + * + * This takes care of marking pages young or dirty (idle/dirty page tracking), + * asking KVM for the corresponding PFN, and creating a mapping in the GPA page + * tables. Derived mappings (GVA page tables and TLBs) must be handled by the + * caller. + * + * Returns: 0 on success + * -EFAULT if there is no memory region at @gpa or a write was + * attempted to a read-only memory region. This is usually handled + * as an MMIO access. + */ +static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) +{ + bool writeable; + int srcu_idx, err, retry_no = 0, level; + unsigned long hva, mmu_seq, prot_bits; + kvm_pfn_t pfn; + kvm_pte_t *ptep, new_pte; + gfn_t gfn = gpa >> PAGE_SHIFT; + struct kvm *kvm = vcpu->kvm; + struct kvm_memory_slot *memslot; + struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; + + /* Try the fast path to handle old / clean pages */ + srcu_idx = srcu_read_lock(&kvm->srcu); + err = kvm_map_page_fast(vcpu, gpa, write); + if (!err) + goto out; + + memslot = gfn_to_memslot(kvm, gfn); + hva = gfn_to_hva_memslot_prot(memslot, gfn, &writeable); + if (kvm_is_error_hva(hva) || (write && !writeable)) { + err = -EFAULT; + goto out; + } + + /* We need a minimum of cached pages ready for page table creation */ + err = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES); + if (err) + goto out; + +retry: + /* + * Used to check for invalidations in progress, of the pfn that is + * returned by pfn_to_pfn_prot below. + */ + mmu_seq = kvm->mmu_invalidate_seq; + /* + * Ensure the read of mmu_invalidate_seq isn't reordered with PTE reads in + * gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't + * risk the page we get a reference to getting unmapped before we have a + * chance to grab the mmu_lock without mmu_invalidate_retry() noticing. + * + * This smp_rmb() pairs with the effective smp_wmb() of the combination + * of the pte_unmap_unlock() after the PTE is zapped, and the + * spin_lock() in kvm_mmu_invalidate_invalidate_() before + * mmu_invalidate_seq is incremented. + */ + smp_rmb(); + + /* Slow path - ask KVM core whether we can access this GPA */ + pfn = gfn_to_pfn_prot(kvm, gfn, write, &writeable); + if (is_error_noslot_pfn(pfn)) { + err = -EFAULT; + goto out; + } + + /* Check if an invalidation has taken place since we got pfn */ + spin_lock(&kvm->mmu_lock); + if (mmu_invalidate_retry_hva(kvm, mmu_seq, hva)) { + /* + * This can happen when mappings are changed asynchronously, but + * also synchronously if a COW is triggered by + * gfn_to_pfn_prot(). + */ + spin_unlock(&kvm->mmu_lock); + kvm_release_pfn_clean(pfn); + if (retry_no > 100) { + retry_no = 0; + schedule(); + } + retry_no++; + goto retry; + } + + /* + * For emulated devices such virtio device, actual cache attribute is + * determined by physical machine. + * For pass through physical device, it should be uncachable + */ + prot_bits = _PAGE_PRESENT | __READABLE; + if (pfn_valid(pfn)) + prot_bits |= _CACHE_CC; + else + prot_bits |= _CACHE_SUC; + + if (writeable) { + prot_bits |= _PAGE_WRITE; + if (write) + prot_bits |= __WRITEABLE; + } + + /* Disable dirty logging on HugePages */ + level = 0; + if (!fault_supports_huge_mapping(memslot, hva, PMD_SIZE, write)) { + level = 0; + } else { + level = host_pfn_mapping_level(kvm, gfn, memslot); + if (level == 1) { + gfn = gfn & ~(PTRS_PER_PTE - 1); + pfn = pfn & ~(PTRS_PER_PTE - 1); + } + } + + /* Ensure page tables are allocated */ + ptep = kvm_populate_gpa(kvm, memcache, gpa, level); + new_pte = kvm_pfn_pte(pfn, __pgprot(prot_bits)); + if (level == 1) { + new_pte = kvm_pte_mkhuge(new_pte); + /* + * previous pmd entry is invalid_pte_table + * there is invalid tlb with small page + * need flush these invalid tlbs for current vcpu + */ + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); + ++kvm->stat.hugepages; + } else if (kvm_pte_huge(*ptep) && write) + ptep = kvm_split_huge(vcpu, ptep, gfn); + else + ++kvm->stat.pages; + kvm_set_pte(ptep, new_pte); + spin_unlock(&kvm->mmu_lock); + + if (prot_bits & _PAGE_DIRTY) { + mark_page_dirty_in_slot(kvm, memslot, gfn); + kvm_set_pfn_dirty(pfn); + } + + kvm_set_pfn_accessed(pfn); + kvm_release_pfn_clean(pfn); +out: + srcu_read_unlock(&kvm->srcu, srcu_idx); + return err; +} + +int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) +{ + int ret; + + ret = kvm_map_page(vcpu, gpa, write); + if (ret) + return ret; + + /* Invalidate this entry in the TLB */ + kvm_flush_tlb_gpa(vcpu, gpa); + + return 0; +} + +void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) +{ +} + +int kvm_arch_prepare_memory_region(struct kvm *kvm, const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, enum kvm_mr_change change) +{ + return 0; +} + +void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, + const struct kvm_memory_slot *memslot) +{ + kvm_flush_remote_tlbs(kvm); +} -- Gitee From 3086bdfa719086aed4c2837d7bc598307a6ec377 Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:28 +0800 Subject: [PATCH 0161/2138] LoongArch: KVM: Implement handle csr exception ANBZ: #8436 commit da50f5a693ff55ef367a4c5c9145f0bfea3e476d upstream. Implement kvm handle LoongArch vcpu exit caused by reading, writing and exchanging csr. Use kvm_vcpu_arch::csr structure to emulate the software registers. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/exit.c | 105 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 105 insertions(+) create mode 100644 arch/loongarch/kvm/exit.c diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c new file mode 100644 index 000000000000..37bc8a4209bd --- /dev/null +++ b/arch/loongarch/kvm/exit.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "trace.h" + +static unsigned long kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid) +{ + unsigned long val = 0; + struct loongarch_csrs *csr = vcpu->arch.csr; + + /* + * From LoongArch Reference Manual Volume 1 Chapter 4.2.1 + * For undefined CSR id, return value is 0 + */ + if (get_gcsr_flag(csrid) & SW_GCSR) + val = kvm_read_sw_gcsr(csr, csrid); + else + pr_warn_once("Unsupported csrrd 0x%x with pc %lx\n", csrid, vcpu->arch.pc); + + return val; +} + +static unsigned long kvm_emu_write_csr(struct kvm_vcpu *vcpu, int csrid, unsigned long val) +{ + unsigned long old = 0; + struct loongarch_csrs *csr = vcpu->arch.csr; + + if (get_gcsr_flag(csrid) & SW_GCSR) { + old = kvm_read_sw_gcsr(csr, csrid); + kvm_write_sw_gcsr(csr, csrid, val); + } else + pr_warn_once("Unsupported csrwr 0x%x with pc %lx\n", csrid, vcpu->arch.pc); + + return old; +} + +static unsigned long kvm_emu_xchg_csr(struct kvm_vcpu *vcpu, int csrid, + unsigned long csr_mask, unsigned long val) +{ + unsigned long old = 0; + struct loongarch_csrs *csr = vcpu->arch.csr; + + if (get_gcsr_flag(csrid) & SW_GCSR) { + old = kvm_read_sw_gcsr(csr, csrid); + val = (old & ~csr_mask) | (val & csr_mask); + kvm_write_sw_gcsr(csr, csrid, val); + old = old & csr_mask; + } else + pr_warn_once("Unsupported csrxchg 0x%x with pc %lx\n", csrid, vcpu->arch.pc); + + return old; +} + +static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst) +{ + unsigned int rd, rj, csrid; + unsigned long csr_mask, val = 0; + + /* + * CSR value mask imm + * rj = 0 means csrrd + * rj = 1 means csrwr + * rj != 0,1 means csrxchg + */ + rd = inst.reg2csr_format.rd; + rj = inst.reg2csr_format.rj; + csrid = inst.reg2csr_format.csr; + + /* Process CSR ops */ + switch (rj) { + case 0: /* process csrrd */ + val = kvm_emu_read_csr(vcpu, csrid); + vcpu->arch.gprs[rd] = val; + break; + case 1: /* process csrwr */ + val = vcpu->arch.gprs[rd]; + val = kvm_emu_write_csr(vcpu, csrid, val); + vcpu->arch.gprs[rd] = val; + break; + default: /* process csrxchg */ + val = vcpu->arch.gprs[rd]; + csr_mask = vcpu->arch.gprs[rj]; + val = kvm_emu_xchg_csr(vcpu, csrid, csr_mask, val); + vcpu->arch.gprs[rd] = val; + } + + return EMULATE_DONE; +} -- Gitee From eff76425f30045dea2d4ba1398df385c68c4922d Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:28 +0800 Subject: [PATCH 0162/2138] LoongArch: KVM: Implement handle iocsr exception ANBZ: #8436 commit 81efe043a35113ec1352f6eb2b954d02aac368db upstream. Implement kvm handle vcpu iocsr exception, setting the iocsr info into vcpu_run and return to user space to handle it. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/include/asm/inst.h | 16 ++++++ arch/loongarch/kvm/exit.c | 91 +++++++++++++++++++++++++++++++ 2 files changed, 107 insertions(+) diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h index 4fa53ad82efb..77a9fcf8e879 100644 --- a/arch/loongarch/include/asm/inst.h +++ b/arch/loongarch/include/asm/inst.h @@ -65,6 +65,14 @@ enum reg2_op { revbd_op = 0x0f, revh2w_op = 0x10, revhd_op = 0x11, + iocsrrdb_op = 0x19200, + iocsrrdh_op = 0x19201, + iocsrrdw_op = 0x19202, + iocsrrdd_op = 0x19203, + iocsrwrb_op = 0x19204, + iocsrwrh_op = 0x19205, + iocsrwrw_op = 0x19206, + iocsrwrd_op = 0x19207, }; enum reg2i5_op { @@ -318,6 +326,13 @@ struct reg2bstrd_format { unsigned int opcode : 10; }; +struct reg2csr_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int csr : 14; + unsigned int opcode : 8; +}; + struct reg3_format { unsigned int rd : 5; unsigned int rj : 5; @@ -346,6 +361,7 @@ union loongarch_instruction { struct reg2i14_format reg2i14_format; struct reg2i16_format reg2i16_format; struct reg2bstrd_format reg2bstrd_format; + struct reg2csr_format reg2csr_format; struct reg3_format reg3_format; struct reg3sa2_format reg3sa2_format; }; diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 37bc8a4209bd..7e729dd9e915 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -103,3 +103,94 @@ static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst) return EMULATE_DONE; } + +int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + int ret; + unsigned long val; + u32 addr, rd, rj, opcode; + + /* + * Each IOCSR with different opcode + */ + rd = inst.reg2_format.rd; + rj = inst.reg2_format.rj; + opcode = inst.reg2_format.opcode; + addr = vcpu->arch.gprs[rj]; + ret = EMULATE_DO_IOCSR; + run->iocsr_io.phys_addr = addr; + run->iocsr_io.is_write = 0; + + /* LoongArch is Little endian */ + switch (opcode) { + case iocsrrdb_op: + run->iocsr_io.len = 1; + break; + case iocsrrdh_op: + run->iocsr_io.len = 2; + break; + case iocsrrdw_op: + run->iocsr_io.len = 4; + break; + case iocsrrdd_op: + run->iocsr_io.len = 8; + break; + case iocsrwrb_op: + run->iocsr_io.len = 1; + run->iocsr_io.is_write = 1; + break; + case iocsrwrh_op: + run->iocsr_io.len = 2; + run->iocsr_io.is_write = 1; + break; + case iocsrwrw_op: + run->iocsr_io.len = 4; + run->iocsr_io.is_write = 1; + break; + case iocsrwrd_op: + run->iocsr_io.len = 8; + run->iocsr_io.is_write = 1; + break; + default: + ret = EMULATE_FAIL; + break; + } + + if (ret == EMULATE_DO_IOCSR) { + if (run->iocsr_io.is_write) { + val = vcpu->arch.gprs[rd]; + memcpy(run->iocsr_io.data, &val, run->iocsr_io.len); + } + vcpu->arch.io_gpr = rd; + } + + return ret; +} + +int kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + enum emulation_result er = EMULATE_DONE; + unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; + + switch (run->iocsr_io.len) { + case 1: + *gpr = *(s8 *)run->iocsr_io.data; + break; + case 2: + *gpr = *(s16 *)run->iocsr_io.data; + break; + case 4: + *gpr = *(s32 *)run->iocsr_io.data; + break; + case 8: + *gpr = *(s64 *)run->iocsr_io.data; + break; + default: + kvm_err("Bad IOCSR length: %d, addr is 0x%lx\n", + run->iocsr_io.len, vcpu->arch.badv); + er = EMULATE_FAIL; + break; + } + + return er; +} -- Gitee From babedda486030cc1b56a2b0b819b89d711e23965 Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:28 +0800 Subject: [PATCH 0163/2138] LoongArch: KVM: Implement handle idle exception ANBZ: #8436 commit f41c8bdbbdbe73343d4842e580c6ab9db9d84171 upstream. Implement kvm handle LoongArch vcpu idle exception, using kvm_vcpu_block to emulate it. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/exit.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 7e729dd9e915..d4d7e74f72af 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -194,3 +194,23 @@ int kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run) return er; } + +int kvm_emu_idle(struct kvm_vcpu *vcpu) +{ + ++vcpu->stat.idle_exits; + trace_kvm_exit_idle(vcpu, KVM_TRACE_EXIT_IDLE); + + if (!kvm_arch_vcpu_runnable(vcpu)) { + /* + * Switch to the software timer before halt-polling/blocking as + * the guest's timer may be a break event for the vCPU, and the + * hypervisor timer runs only when the CPU is in guest mode. + * Switch before halt-polling so that KVM recognizes an expired + * timer before blocking. + */ + kvm_save_timer(vcpu); + kvm_vcpu_block(vcpu); + } + + return EMULATE_DONE; +} -- Gitee From cfc8fe90db97b803f66cb1832e4a4f83b0291d25 Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:28 +0800 Subject: [PATCH 0164/2138] LoongArch: KVM: Implement handle gspr exception ANBZ: #8436 commit 13c82f5e6e5088a2998036714239cf00e48f5c10 upstream. Implement kvm handle gspr exception interface, including emulate the reading and writing of cpucfg, csr, iocsr resource. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/exit.c | 107 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 107 insertions(+) diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index d4d7e74f72af..33d1b4190a62 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -214,3 +214,110 @@ int kvm_emu_idle(struct kvm_vcpu *vcpu) return EMULATE_DONE; } + +static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu) +{ + int rd, rj; + unsigned int index; + unsigned long curr_pc; + larch_inst inst; + enum emulation_result er = EMULATE_DONE; + struct kvm_run *run = vcpu->run; + + /* Fetch the instruction */ + inst.word = vcpu->arch.badi; + curr_pc = vcpu->arch.pc; + update_pc(&vcpu->arch); + + trace_kvm_exit_gspr(vcpu, inst.word); + er = EMULATE_FAIL; + switch (((inst.word >> 24) & 0xff)) { + case 0x0: /* CPUCFG GSPR */ + if (inst.reg2_format.opcode == 0x1B) { + rd = inst.reg2_format.rd; + rj = inst.reg2_format.rj; + ++vcpu->stat.cpucfg_exits; + index = vcpu->arch.gprs[rj]; + er = EMULATE_DONE; + /* + * By LoongArch Reference Manual 2.2.10.5 + * return value is 0 for undefined cpucfg index + */ + if (index < KVM_MAX_CPUCFG_REGS) + vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index]; + else + vcpu->arch.gprs[rd] = 0; + } + break; + case 0x4: /* CSR{RD,WR,XCHG} GSPR */ + er = kvm_handle_csr(vcpu, inst); + break; + case 0x6: /* Cache, Idle and IOCSR GSPR */ + switch (((inst.word >> 22) & 0x3ff)) { + case 0x18: /* Cache GSPR */ + er = EMULATE_DONE; + trace_kvm_exit_cache(vcpu, KVM_TRACE_EXIT_CACHE); + break; + case 0x19: /* Idle/IOCSR GSPR */ + switch (((inst.word >> 15) & 0x1ffff)) { + case 0xc90: /* IOCSR GSPR */ + er = kvm_emu_iocsr(inst, run, vcpu); + break; + case 0xc91: /* Idle GSPR */ + er = kvm_emu_idle(vcpu); + break; + default: + er = EMULATE_FAIL; + break; + } + break; + default: + er = EMULATE_FAIL; + break; + } + break; + default: + er = EMULATE_FAIL; + break; + } + + /* Rollback PC only if emulation was unsuccessful */ + if (er == EMULATE_FAIL) { + kvm_err("[%#lx]%s: unsupported gspr instruction 0x%08x\n", + curr_pc, __func__, inst.word); + + kvm_arch_vcpu_dump_regs(vcpu); + vcpu->arch.pc = curr_pc; + } + + return er; +} + +/* + * Trigger GSPR: + * 1) Execute CPUCFG instruction; + * 2) Execute CACOP/IDLE instructions; + * 3) Access to unimplemented CSRs/IOCSRs. + */ +static int kvm_handle_gspr(struct kvm_vcpu *vcpu) +{ + int ret = RESUME_GUEST; + enum emulation_result er = EMULATE_DONE; + + er = kvm_trap_handle_gspr(vcpu); + + if (er == EMULATE_DONE) { + ret = RESUME_GUEST; + } else if (er == EMULATE_DO_MMIO) { + vcpu->run->exit_reason = KVM_EXIT_MMIO; + ret = RESUME_HOST; + } else if (er == EMULATE_DO_IOCSR) { + vcpu->run->exit_reason = KVM_EXIT_LOONGARCH_IOCSR; + ret = RESUME_HOST; + } else { + kvm_queue_exception(vcpu, EXCCODE_INE, 0); + ret = RESUME_GUEST; + } + + return ret; +} -- Gitee From 6c113971f4d2005d87a62bdc2a134e951780008d Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:28 +0800 Subject: [PATCH 0165/2138] LoongArch: KVM: Implement handle mmio exception ANBZ: #8436 commit d5b65882d57c91e5fec8c4c0b0a0a88f343b4525 upstream. Implement handle mmio exception, setting the mmio info into vcpu_run and return to user space to handle it. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/exit.c | 310 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 310 insertions(+) diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 33d1b4190a62..c31894b75b07 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -321,3 +321,313 @@ static int kvm_handle_gspr(struct kvm_vcpu *vcpu) return ret; } + +int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst) +{ + int ret; + unsigned int op8, opcode, rd; + struct kvm_run *run = vcpu->run; + + run->mmio.phys_addr = vcpu->arch.badv; + vcpu->mmio_needed = 2; /* signed */ + op8 = (inst.word >> 24) & 0xff; + ret = EMULATE_DO_MMIO; + + switch (op8) { + case 0x24 ... 0x27: /* ldptr.w/d process */ + rd = inst.reg2i14_format.rd; + opcode = inst.reg2i14_format.opcode; + + switch (opcode) { + case ldptrw_op: + run->mmio.len = 4; + break; + case ldptrd_op: + run->mmio.len = 8; + break; + default: + break; + } + break; + case 0x28 ... 0x2e: /* ld.b/h/w/d, ld.bu/hu/wu process */ + rd = inst.reg2i12_format.rd; + opcode = inst.reg2i12_format.opcode; + + switch (opcode) { + case ldb_op: + run->mmio.len = 1; + break; + case ldbu_op: + vcpu->mmio_needed = 1; /* unsigned */ + run->mmio.len = 1; + break; + case ldh_op: + run->mmio.len = 2; + break; + case ldhu_op: + vcpu->mmio_needed = 1; /* unsigned */ + run->mmio.len = 2; + break; + case ldw_op: + run->mmio.len = 4; + break; + case ldwu_op: + vcpu->mmio_needed = 1; /* unsigned */ + run->mmio.len = 4; + break; + case ldd_op: + run->mmio.len = 8; + break; + default: + ret = EMULATE_FAIL; + break; + } + break; + case 0x38: /* ldx.b/h/w/d, ldx.bu/hu/wu process */ + rd = inst.reg3_format.rd; + opcode = inst.reg3_format.opcode; + + switch (opcode) { + case ldxb_op: + run->mmio.len = 1; + break; + case ldxbu_op: + run->mmio.len = 1; + vcpu->mmio_needed = 1; /* unsigned */ + break; + case ldxh_op: + run->mmio.len = 2; + break; + case ldxhu_op: + run->mmio.len = 2; + vcpu->mmio_needed = 1; /* unsigned */ + break; + case ldxw_op: + run->mmio.len = 4; + break; + case ldxwu_op: + run->mmio.len = 4; + vcpu->mmio_needed = 1; /* unsigned */ + break; + case ldxd_op: + run->mmio.len = 8; + break; + default: + ret = EMULATE_FAIL; + break; + } + break; + default: + ret = EMULATE_FAIL; + } + + if (ret == EMULATE_DO_MMIO) { + /* Set for kvm_complete_mmio_read() use */ + vcpu->arch.io_gpr = rd; + run->mmio.is_write = 0; + vcpu->mmio_is_write = 0; + } else { + kvm_err("Read not supported Inst=0x%08x @%lx BadVaddr:%#lx\n", + inst.word, vcpu->arch.pc, vcpu->arch.badv); + kvm_arch_vcpu_dump_regs(vcpu); + vcpu->mmio_needed = 0; + } + + return ret; +} + +int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + enum emulation_result er = EMULATE_DONE; + unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; + + /* Update with new PC */ + update_pc(&vcpu->arch); + switch (run->mmio.len) { + case 1: + if (vcpu->mmio_needed == 2) + *gpr = *(s8 *)run->mmio.data; + else + *gpr = *(u8 *)run->mmio.data; + break; + case 2: + if (vcpu->mmio_needed == 2) + *gpr = *(s16 *)run->mmio.data; + else + *gpr = *(u16 *)run->mmio.data; + break; + case 4: + if (vcpu->mmio_needed == 2) + *gpr = *(s32 *)run->mmio.data; + else + *gpr = *(u32 *)run->mmio.data; + break; + case 8: + *gpr = *(s64 *)run->mmio.data; + break; + default: + kvm_err("Bad MMIO length: %d, addr is 0x%lx\n", + run->mmio.len, vcpu->arch.badv); + er = EMULATE_FAIL; + break; + } + + return er; +} + +int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst) +{ + int ret; + unsigned int rd, op8, opcode; + unsigned long curr_pc, rd_val = 0; + struct kvm_run *run = vcpu->run; + void *data = run->mmio.data; + + /* + * Update PC and hold onto current PC in case there is + * an error and we want to rollback the PC + */ + curr_pc = vcpu->arch.pc; + update_pc(&vcpu->arch); + + op8 = (inst.word >> 24) & 0xff; + run->mmio.phys_addr = vcpu->arch.badv; + ret = EMULATE_DO_MMIO; + switch (op8) { + case 0x24 ... 0x27: /* stptr.w/d process */ + rd = inst.reg2i14_format.rd; + opcode = inst.reg2i14_format.opcode; + + switch (opcode) { + case stptrw_op: + run->mmio.len = 4; + *(unsigned int *)data = vcpu->arch.gprs[rd]; + break; + case stptrd_op: + run->mmio.len = 8; + *(unsigned long *)data = vcpu->arch.gprs[rd]; + break; + default: + ret = EMULATE_FAIL; + break; + } + break; + case 0x28 ... 0x2e: /* st.b/h/w/d process */ + rd = inst.reg2i12_format.rd; + opcode = inst.reg2i12_format.opcode; + rd_val = vcpu->arch.gprs[rd]; + + switch (opcode) { + case stb_op: + run->mmio.len = 1; + *(unsigned char *)data = rd_val; + break; + case sth_op: + run->mmio.len = 2; + *(unsigned short *)data = rd_val; + break; + case stw_op: + run->mmio.len = 4; + *(unsigned int *)data = rd_val; + break; + case std_op: + run->mmio.len = 8; + *(unsigned long *)data = rd_val; + break; + default: + ret = EMULATE_FAIL; + break; + } + break; + case 0x38: /* stx.b/h/w/d process */ + rd = inst.reg3_format.rd; + opcode = inst.reg3_format.opcode; + + switch (opcode) { + case stxb_op: + run->mmio.len = 1; + *(unsigned char *)data = vcpu->arch.gprs[rd]; + break; + case stxh_op: + run->mmio.len = 2; + *(unsigned short *)data = vcpu->arch.gprs[rd]; + break; + case stxw_op: + run->mmio.len = 4; + *(unsigned int *)data = vcpu->arch.gprs[rd]; + break; + case stxd_op: + run->mmio.len = 8; + *(unsigned long *)data = vcpu->arch.gprs[rd]; + break; + default: + ret = EMULATE_FAIL; + break; + } + break; + default: + ret = EMULATE_FAIL; + } + + if (ret == EMULATE_DO_MMIO) { + run->mmio.is_write = 1; + vcpu->mmio_needed = 1; + vcpu->mmio_is_write = 1; + } else { + vcpu->arch.pc = curr_pc; + kvm_err("Write not supported Inst=0x%08x @%lx BadVaddr:%#lx\n", + inst.word, vcpu->arch.pc, vcpu->arch.badv); + kvm_arch_vcpu_dump_regs(vcpu); + /* Rollback PC if emulation was unsuccessful */ + } + + return ret; +} + +static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write) +{ + int ret; + larch_inst inst; + enum emulation_result er = EMULATE_DONE; + struct kvm_run *run = vcpu->run; + unsigned long badv = vcpu->arch.badv; + + ret = kvm_handle_mm_fault(vcpu, badv, write); + if (ret) { + /* Treat as MMIO */ + inst.word = vcpu->arch.badi; + if (write) { + er = kvm_emu_mmio_write(vcpu, inst); + } else { + /* A code fetch fault doesn't count as an MMIO */ + if (kvm_is_ifetch_fault(&vcpu->arch)) { + kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEF); + return RESUME_GUEST; + } + + er = kvm_emu_mmio_read(vcpu, inst); + } + } + + if (er == EMULATE_DONE) { + ret = RESUME_GUEST; + } else if (er == EMULATE_DO_MMIO) { + run->exit_reason = KVM_EXIT_MMIO; + ret = RESUME_HOST; + } else { + kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM); + ret = RESUME_GUEST; + } + + return ret; +} + +static int kvm_handle_read_fault(struct kvm_vcpu *vcpu) +{ + return kvm_handle_rdwr_fault(vcpu, false); +} + +static int kvm_handle_write_fault(struct kvm_vcpu *vcpu) +{ + return kvm_handle_rdwr_fault(vcpu, true); +} -- Gitee From 1066241f4967dba033f7aa71ad47c4a0d4f35875 Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:29 +0800 Subject: [PATCH 0166/2138] LoongArch: KVM: Implement handle fpu exception ANBZ: #8436 commit 37cdfc6dbf04169310a24f3a79b554c363260562 upstream. Implement handle fpu exception, using kvm_own_fpu() to enable fpu for guest. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/exit.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index c31894b75b07..e855ab9099b2 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -631,3 +631,30 @@ static int kvm_handle_write_fault(struct kvm_vcpu *vcpu) { return kvm_handle_rdwr_fault(vcpu, true); } + +/** + * kvm_handle_fpu_disabled() - Guest used fpu however it is disabled at host + * @vcpu: Virtual CPU context. + * + * Handle when the guest attempts to use fpu which hasn't been allowed + * by the root context. + */ +static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + + /* + * If guest FPU not present, the FPU operation should have been + * treated as a reserved instruction! + * If FPU already in use, we shouldn't get this at all. + */ + if (WARN_ON(vcpu->arch.aux_inuse & KVM_LARCH_FPU)) { + kvm_err("%s internal error\n", __func__); + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + return RESUME_HOST; + } + + kvm_own_fpu(vcpu); + + return RESUME_GUEST; +} -- Gitee From acc2ac1fe124a704faa21a59914347c4944280f6 Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:29 +0800 Subject: [PATCH 0167/2138] LoongArch: KVM: Implement kvm exception vectors ANBZ: #8436 commit 71f4fb845874c3c54527e2e5afd687493db9d4d4 upstream. Implement kvm exception vectors, using kvm_fault_tables array to save the handle function pointers and it is used when vcpu handle guest exit. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kvm/exit.c | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index e855ab9099b2..ce8de3fa472c 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -658,3 +658,39 @@ static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu) return RESUME_GUEST; } + +/* + * LoongArch KVM callback handling for unimplemented guest exiting + */ +static int kvm_fault_ni(struct kvm_vcpu *vcpu) +{ + unsigned int ecode, inst; + unsigned long estat, badv; + + /* Fetch the instruction */ + inst = vcpu->arch.badi; + badv = vcpu->arch.badv; + estat = vcpu->arch.host_estat; + ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT; + kvm_err("ECode: %d PC=%#lx Inst=0x%08x BadVaddr=%#lx ESTAT=%#lx\n", + ecode, vcpu->arch.pc, inst, badv, read_gcsr_estat()); + kvm_arch_vcpu_dump_regs(vcpu); + kvm_queue_exception(vcpu, EXCCODE_INE, 0); + + return RESUME_GUEST; +} + +static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = { + [0 ... EXCCODE_INT_START - 1] = kvm_fault_ni, + [EXCCODE_TLBI] = kvm_handle_read_fault, + [EXCCODE_TLBL] = kvm_handle_read_fault, + [EXCCODE_TLBS] = kvm_handle_write_fault, + [EXCCODE_TLBM] = kvm_handle_write_fault, + [EXCCODE_FPDIS] = kvm_handle_fpu_disabled, + [EXCCODE_GSPR] = kvm_handle_gspr, +}; + +int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault) +{ + return kvm_fault_tables[fault](vcpu); +} -- Gitee From 318e1fbdb5348908f75233259a52d4b3a312842f Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:29 +0800 Subject: [PATCH 0168/2138] LoongArch: KVM: Implement vcpu world switch ANBZ: #8436 commit 39fdf4be72f2b81238acbd4da48c75c135a6f1e0 upstream. Implement LoongArch vcpu world switch, including vcpu enter guest and vcpu exit from guest, both operations need to save or restore the host and guest registers. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/kernel/asm-offsets.c | 32 ++++ arch/loongarch/kvm/switch.S | 250 ++++++++++++++++++++++++++++ 2 files changed, 282 insertions(+) create mode 100644 arch/loongarch/kvm/switch.S diff --git a/arch/loongarch/kernel/asm-offsets.c b/arch/loongarch/kernel/asm-offsets.c index 8da0726777ed..173fe514fc9e 100644 --- a/arch/loongarch/kernel/asm-offsets.c +++ b/arch/loongarch/kernel/asm-offsets.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -289,3 +290,34 @@ void output_fgraph_ret_regs_defines(void) BLANK(); } #endif + +void output_kvm_defines(void) +{ + COMMENT("KVM/LoongArch Specific offsets."); + + OFFSET(VCPU_FCC, kvm_vcpu_arch, fpu.fcc); + OFFSET(VCPU_FCSR0, kvm_vcpu_arch, fpu.fcsr); + BLANK(); + + OFFSET(KVM_VCPU_ARCH, kvm_vcpu, arch); + OFFSET(KVM_VCPU_KVM, kvm_vcpu, kvm); + OFFSET(KVM_VCPU_RUN, kvm_vcpu, run); + BLANK(); + + OFFSET(KVM_ARCH_HSP, kvm_vcpu_arch, host_sp); + OFFSET(KVM_ARCH_HTP, kvm_vcpu_arch, host_tp); + OFFSET(KVM_ARCH_HPGD, kvm_vcpu_arch, host_pgd); + OFFSET(KVM_ARCH_HANDLE_EXIT, kvm_vcpu_arch, handle_exit); + OFFSET(KVM_ARCH_HEENTRY, kvm_vcpu_arch, host_eentry); + OFFSET(KVM_ARCH_GEENTRY, kvm_vcpu_arch, guest_eentry); + OFFSET(KVM_ARCH_GPC, kvm_vcpu_arch, pc); + OFFSET(KVM_ARCH_GGPR, kvm_vcpu_arch, gprs); + OFFSET(KVM_ARCH_HBADI, kvm_vcpu_arch, badi); + OFFSET(KVM_ARCH_HBADV, kvm_vcpu_arch, badv); + OFFSET(KVM_ARCH_HECFG, kvm_vcpu_arch, host_ecfg); + OFFSET(KVM_ARCH_HESTAT, kvm_vcpu_arch, host_estat); + OFFSET(KVM_ARCH_HPERCPU, kvm_vcpu_arch, host_percpu); + + OFFSET(KVM_GPGD, kvm, arch.pgd); + BLANK(); +} diff --git a/arch/loongarch/kvm/switch.S b/arch/loongarch/kvm/switch.S new file mode 100644 index 000000000000..0ed9040307b7 --- /dev/null +++ b/arch/loongarch/kvm/switch.S @@ -0,0 +1,250 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include +#include +#include + +#define HGPR_OFFSET(x) (PT_R0 + 8*x) +#define GGPR_OFFSET(x) (KVM_ARCH_GGPR + 8*x) + +.macro kvm_save_host_gpr base + .irp n,1,2,3,22,23,24,25,26,27,28,29,30,31 + st.d $r\n, \base, HGPR_OFFSET(\n) + .endr +.endm + +.macro kvm_restore_host_gpr base + .irp n,1,2,3,22,23,24,25,26,27,28,29,30,31 + ld.d $r\n, \base, HGPR_OFFSET(\n) + .endr +.endm + +/* + * Save and restore all GPRs except base register, + * and default value of base register is a2. + */ +.macro kvm_save_guest_gprs base + .irp n,1,2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 + st.d $r\n, \base, GGPR_OFFSET(\n) + .endr +.endm + +.macro kvm_restore_guest_gprs base + .irp n,1,2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 + ld.d $r\n, \base, GGPR_OFFSET(\n) + .endr +.endm + +/* + * Prepare switch to guest, save host regs and restore guest regs. + * a2: kvm_vcpu_arch, don't touch it until 'ertn' + * t0, t1: temp register + */ +.macro kvm_switch_to_guest + /* Set host ECFG.VS=0, all exceptions share one exception entry */ + csrrd t0, LOONGARCH_CSR_ECFG + bstrins.w t0, zero, CSR_ECFG_VS_SHIFT_END, CSR_ECFG_VS_SHIFT + csrwr t0, LOONGARCH_CSR_ECFG + + /* Load up the new EENTRY */ + ld.d t0, a2, KVM_ARCH_GEENTRY + csrwr t0, LOONGARCH_CSR_EENTRY + + /* Set Guest ERA */ + ld.d t0, a2, KVM_ARCH_GPC + csrwr t0, LOONGARCH_CSR_ERA + + /* Save host PGDL */ + csrrd t0, LOONGARCH_CSR_PGDL + st.d t0, a2, KVM_ARCH_HPGD + + /* Switch to kvm */ + ld.d t1, a2, KVM_VCPU_KVM - KVM_VCPU_ARCH + + /* Load guest PGDL */ + li.w t0, KVM_GPGD + ldx.d t0, t1, t0 + csrwr t0, LOONGARCH_CSR_PGDL + + /* Mix GID and RID */ + csrrd t1, LOONGARCH_CSR_GSTAT + bstrpick.w t1, t1, CSR_GSTAT_GID_SHIFT_END, CSR_GSTAT_GID_SHIFT + csrrd t0, LOONGARCH_CSR_GTLBC + bstrins.w t0, t1, CSR_GTLBC_TGID_SHIFT_END, CSR_GTLBC_TGID_SHIFT + csrwr t0, LOONGARCH_CSR_GTLBC + + /* + * Enable intr in root mode with future ertn so that host interrupt + * can be responsed during VM runs + * Guest CRMD comes from separate GCSR_CRMD register + */ + ori t0, zero, CSR_PRMD_PIE + csrxchg t0, t0, LOONGARCH_CSR_PRMD + + /* Set PVM bit to setup ertn to guest context */ + ori t0, zero, CSR_GSTAT_PVM + csrxchg t0, t0, LOONGARCH_CSR_GSTAT + + /* Load Guest GPRs */ + kvm_restore_guest_gprs a2 + /* Load KVM_ARCH register */ + ld.d a2, a2, (KVM_ARCH_GGPR + 8 * REG_A2) + + ertn /* Switch to guest: GSTAT.PGM = 1, ERRCTL.ISERR = 0, TLBRPRMD.ISTLBR = 0 */ +.endm + + /* + * Exception entry for general exception from guest mode + * - IRQ is disabled + * - kernel privilege in root mode + * - page mode keep unchanged from previous PRMD in root mode + * - Fixme: tlb exception cannot happen since registers relative with TLB + * - is still in guest mode, such as pgd table/vmid registers etc, + * - will fix with hw page walk enabled in future + * load kvm_vcpu from reserved CSR KVM_VCPU_KS, and save a2 to KVM_TEMP_KS + */ + .text + .cfi_sections .debug_frame +SYM_CODE_START(kvm_exc_entry) + csrwr a2, KVM_TEMP_KS + csrrd a2, KVM_VCPU_KS + addi.d a2, a2, KVM_VCPU_ARCH + + /* After save GPRs, free to use any GPR */ + kvm_save_guest_gprs a2 + /* Save guest A2 */ + csrrd t0, KVM_TEMP_KS + st.d t0, a2, (KVM_ARCH_GGPR + 8 * REG_A2) + + /* A2 is kvm_vcpu_arch, A1 is free to use */ + csrrd s1, KVM_VCPU_KS + ld.d s0, s1, KVM_VCPU_RUN + + csrrd t0, LOONGARCH_CSR_ESTAT + st.d t0, a2, KVM_ARCH_HESTAT + csrrd t0, LOONGARCH_CSR_ERA + st.d t0, a2, KVM_ARCH_GPC + csrrd t0, LOONGARCH_CSR_BADV + st.d t0, a2, KVM_ARCH_HBADV + csrrd t0, LOONGARCH_CSR_BADI + st.d t0, a2, KVM_ARCH_HBADI + + /* Restore host ECFG.VS */ + csrrd t0, LOONGARCH_CSR_ECFG + ld.d t1, a2, KVM_ARCH_HECFG + or t0, t0, t1 + csrwr t0, LOONGARCH_CSR_ECFG + + /* Restore host EENTRY */ + ld.d t0, a2, KVM_ARCH_HEENTRY + csrwr t0, LOONGARCH_CSR_EENTRY + + /* Restore host pgd table */ + ld.d t0, a2, KVM_ARCH_HPGD + csrwr t0, LOONGARCH_CSR_PGDL + + /* + * Disable PGM bit to enter root mode by default with next ertn + */ + ori t0, zero, CSR_GSTAT_PVM + csrxchg zero, t0, LOONGARCH_CSR_GSTAT + + /* + * Clear GTLBC.TGID field + * 0: for root tlb update in future tlb instr + * others: for guest tlb update like gpa to hpa in future tlb instr + */ + csrrd t0, LOONGARCH_CSR_GTLBC + bstrins.w t0, zero, CSR_GTLBC_TGID_SHIFT_END, CSR_GTLBC_TGID_SHIFT + csrwr t0, LOONGARCH_CSR_GTLBC + ld.d tp, a2, KVM_ARCH_HTP + ld.d sp, a2, KVM_ARCH_HSP + /* restore per cpu register */ + ld.d u0, a2, KVM_ARCH_HPERCPU + addi.d sp, sp, -PT_SIZE + + /* Prepare handle exception */ + or a0, s0, zero + or a1, s1, zero + ld.d t8, a2, KVM_ARCH_HANDLE_EXIT + jirl ra, t8, 0 + + or a2, s1, zero + addi.d a2, a2, KVM_VCPU_ARCH + + /* Resume host when ret <= 0 */ + blez a0, ret_to_host + + /* + * Return to guest + * Save per cpu register again, maybe switched to another cpu + */ + st.d u0, a2, KVM_ARCH_HPERCPU + + /* Save kvm_vcpu to kscratch */ + csrwr s1, KVM_VCPU_KS + kvm_switch_to_guest + +ret_to_host: + ld.d a2, a2, KVM_ARCH_HSP + addi.d a2, a2, -PT_SIZE + kvm_restore_host_gpr a2 + jr ra + +SYM_INNER_LABEL(kvm_exc_entry_end, SYM_L_LOCAL) +SYM_CODE_END(kvm_exc_entry) + +/* + * int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu) + * + * @register_param: + * a0: kvm_run* run + * a1: kvm_vcpu* vcpu + */ +SYM_FUNC_START(kvm_enter_guest) + /* Allocate space in stack bottom */ + addi.d a2, sp, -PT_SIZE + /* Save host GPRs */ + kvm_save_host_gpr a2 + + /* Save host CRMD, PRMD to stack */ + csrrd a3, LOONGARCH_CSR_CRMD + st.d a3, a2, PT_CRMD + csrrd a3, LOONGARCH_CSR_PRMD + st.d a3, a2, PT_PRMD + + addi.d a2, a1, KVM_VCPU_ARCH + st.d sp, a2, KVM_ARCH_HSP + st.d tp, a2, KVM_ARCH_HTP + /* Save per cpu register */ + st.d u0, a2, KVM_ARCH_HPERCPU + + /* Save kvm_vcpu to kscratch */ + csrwr a1, KVM_VCPU_KS + kvm_switch_to_guest +SYM_INNER_LABEL(kvm_enter_guest_end, SYM_L_LOCAL) +SYM_FUNC_END(kvm_enter_guest) + +SYM_FUNC_START(kvm_save_fpu) + fpu_save_csr a0 t1 + fpu_save_double a0 t1 + fpu_save_cc a0 t1 t2 + jr ra +SYM_FUNC_END(kvm_save_fpu) + +SYM_FUNC_START(kvm_restore_fpu) + fpu_restore_double a0 t1 + fpu_restore_csr a0 t1 t2 + fpu_restore_cc a0 t1 t2 + jr ra +SYM_FUNC_END(kvm_restore_fpu) + + .section ".rodata" +SYM_DATA(kvm_exception_size, .quad kvm_exc_entry_end - kvm_exc_entry) +SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest) -- Gitee From 6ba0430c491851243850fa7e57acd0d0e24cfb9c Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:29 +0800 Subject: [PATCH 0169/2138] LoongArch: KVM: Enable kvm config and add the makefile ANBZ: #8436 commit c1fc48aad14dbe7654f5986afb906332b528d54b upstream. Enable LoongArch kvm config and add the makefile to support build kvm module. Reviewed-by: Bibo Mao Tested-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- arch/loongarch/Kbuild | 2 ++ arch/loongarch/Kconfig | 6 ++++ arch/loongarch/configs/loongson3_defconfig | 2 ++ arch/loongarch/kvm/Kconfig | 40 ++++++++++++++++++++++ arch/loongarch/kvm/Makefile | 22 ++++++++++++ 5 files changed, 72 insertions(+) create mode 100644 arch/loongarch/kvm/Kconfig create mode 100644 arch/loongarch/kvm/Makefile diff --git a/arch/loongarch/Kbuild b/arch/loongarch/Kbuild index b01f5cdb27e0..beb8499dd8ed 100644 --- a/arch/loongarch/Kbuild +++ b/arch/loongarch/Kbuild @@ -3,5 +3,7 @@ obj-y += mm/ obj-y += net/ obj-y += vdso/ +obj-$(CONFIG_KVM) += kvm/ + # for cleaning subdir- += boot diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 1463213f3315..54a169dee80f 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -131,6 +131,7 @@ config LOONGARCH select HAVE_KPROBES select HAVE_KPROBES_ON_FTRACE select HAVE_KRETPROBES + select HAVE_KVM select HAVE_MOD_ARCH_SPECIFIC select HAVE_NMI select HAVE_PCI @@ -266,6 +267,9 @@ config AS_HAS_LASX_EXTENSION config AS_HAS_LBT_EXTENSION def_bool $(as-instr,movscr2gr \$a0$(comma)\$scr0) +config AS_HAS_LVZ_EXTENSION + def_bool $(as-instr,hvcl 0) + menu "Kernel type and options" source "kernel/Kconfig.hz" @@ -659,3 +663,5 @@ source "kernel/power/Kconfig" source "drivers/acpi/Kconfig" endmenu + +source "arch/loongarch/kvm/Kconfig" diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index 0187730896f4..8b974a34bcc1 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -70,6 +70,8 @@ CONFIG_EFI_ZBOOT=y CONFIG_EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER=y CONFIG_EFI_CAPSULE_LOADER=m CONFIG_EFI_TEST=m +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m CONFIG_JUMP_LABEL=y CONFIG_MODULES=y CONFIG_MODULE_FORCE_LOAD=y diff --git a/arch/loongarch/kvm/Kconfig b/arch/loongarch/kvm/Kconfig new file mode 100644 index 000000000000..fda425babfb2 --- /dev/null +++ b/arch/loongarch/kvm/Kconfig @@ -0,0 +1,40 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# KVM configuration +# + +source "virt/kvm/Kconfig" + +menuconfig VIRTUALIZATION + bool "Virtualization" + help + Say Y here to get to see options for using your Linux host to run + other operating systems inside virtual machines (guests). + This option alone does not add any kernel code. + + If you say N, all options in this submenu will be skipped and + disabled. + +if VIRTUALIZATION + +config KVM + tristate "Kernel-based Virtual Machine (KVM) support" + depends on AS_HAS_LVZ_EXTENSION + depends on HAVE_KVM + select HAVE_KVM_DIRTY_RING_ACQ_REL + select HAVE_KVM_EVENTFD + select HAVE_KVM_VCPU_ASYNC_IOCTL + select KVM_GENERIC_DIRTYLOG_READ_PROTECT + select KVM_GENERIC_HARDWARE_ENABLING + select KVM_MMIO + select KVM_XFER_TO_GUEST_WORK + select MMU_NOTIFIER + select PREEMPT_NOTIFIERS + help + Support hosting virtualized guest machines using + hardware virtualization extensions. You will need + a processor equipped with virtualization extensions. + + If unsure, say N. + +endif # VIRTUALIZATION diff --git a/arch/loongarch/kvm/Makefile b/arch/loongarch/kvm/Makefile new file mode 100644 index 000000000000..244467d7792a --- /dev/null +++ b/arch/loongarch/kvm/Makefile @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for LoongArch KVM support +# + +ccflags-y += -I $(srctree)/$(src) + +include $(srctree)/virt/kvm/Makefile.kvm + +obj-$(CONFIG_KVM) += kvm.o + +kvm-y += exit.o +kvm-y += interrupt.o +kvm-y += main.o +kvm-y += mmu.o +kvm-y += switch.o +kvm-y += timer.o +kvm-y += tlb.o +kvm-y += vcpu.o +kvm-y += vm.o + +CFLAGS_exit.o += $(call cc-option,-Wno-override-init,) -- Gitee From 88ff527ac5cd1e60acd868aac4d0344b48c9a79f Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:29 +0800 Subject: [PATCH 0170/2138] LoongArch: KVM: Supplement kvm document about LoongArch-specific part ANBZ: #8436 commit 6f0257a03212d4f66954ce14402adb5c68fed075 upstream. Supplement kvm document about LoongArch-specific part, such as add api introduction for GET/SET_ONE_REG, GET/SET_FPU, GET/SET_MP_STATE, etc. Reviewed-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- Documentation/virt/kvm/api.rst | 70 +++++++++++++++++++++++++++++----- 1 file changed, 61 insertions(+), 9 deletions(-) diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 21a7578142a1..edc682a94ca4 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -416,6 +416,13 @@ Reads the general purpose registers from the vcpu. __u64 pc; }; + /* LoongArch */ + struct kvm_regs { + /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */ + unsigned long gpr[32]; + unsigned long pc; + }; + 4.12 KVM_SET_REGS ----------------- @@ -506,7 +513,7 @@ translation mode. ------------------ :Capability: basic -:Architectures: x86, ppc, mips, riscv +:Architectures: x86, ppc, mips, riscv, loongarch :Type: vcpu ioctl :Parameters: struct kvm_interrupt (in) :Returns: 0 on success, negative on failure. @@ -592,6 +599,14 @@ b) KVM_INTERRUPT_UNSET This is an asynchronous vcpu ioctl and can be invoked from any thread. +LOONGARCH: +^^^^^^^^^^ + +Queues an external interrupt to be injected into the virtual CPU. A negative +interrupt number dequeues the interrupt. + +This is an asynchronous vcpu ioctl and can be invoked from any thread. + 4.17 KVM_DEBUG_GUEST -------------------- @@ -737,7 +752,7 @@ signal mask. ---------------- :Capability: basic -:Architectures: x86 +:Architectures: x86, loongarch :Type: vcpu ioctl :Parameters: struct kvm_fpu (out) :Returns: 0 on success, -1 on error @@ -746,7 +761,7 @@ Reads the floating point state from the vcpu. :: - /* for KVM_GET_FPU and KVM_SET_FPU */ + /* x86: for KVM_GET_FPU and KVM_SET_FPU */ struct kvm_fpu { __u8 fpr[8][16]; __u16 fcw; @@ -761,12 +776,21 @@ Reads the floating point state from the vcpu. __u32 pad2; }; + /* LoongArch: for KVM_GET_FPU and KVM_SET_FPU */ + struct kvm_fpu { + __u32 fcsr; + __u64 fcc; + struct kvm_fpureg { + __u64 val64[4]; + }fpr[32]; + }; + 4.23 KVM_SET_FPU ---------------- :Capability: basic -:Architectures: x86 +:Architectures: x86, loongarch :Type: vcpu ioctl :Parameters: struct kvm_fpu (in) :Returns: 0 on success, -1 on error @@ -775,7 +799,7 @@ Writes the floating point state to the vcpu. :: - /* for KVM_GET_FPU and KVM_SET_FPU */ + /* x86: for KVM_GET_FPU and KVM_SET_FPU */ struct kvm_fpu { __u8 fpr[8][16]; __u16 fcw; @@ -790,6 +814,15 @@ Writes the floating point state to the vcpu. __u32 pad2; }; + /* LoongArch: for KVM_GET_FPU and KVM_SET_FPU */ + struct kvm_fpu { + __u32 fcsr; + __u64 fcc; + struct kvm_fpureg { + __u64 val64[4]; + }fpr[32]; + }; + 4.24 KVM_CREATE_IRQCHIP ----------------------- @@ -1387,7 +1420,7 @@ documentation when it pops into existence). ------------------- :Capability: KVM_CAP_ENABLE_CAP -:Architectures: mips, ppc, s390, x86 +:Architectures: mips, ppc, s390, x86, loongarch :Type: vcpu ioctl :Parameters: struct kvm_enable_cap (in) :Returns: 0 on success; -1 on error @@ -1442,7 +1475,7 @@ for vm-wide capabilities. --------------------- :Capability: KVM_CAP_MP_STATE -:Architectures: x86, s390, arm64, riscv +:Architectures: x86, s390, arm64, riscv, loongarch :Type: vcpu ioctl :Parameters: struct kvm_mp_state (out) :Returns: 0 on success; -1 on error @@ -1460,7 +1493,7 @@ Possible values are: ========================== =============================================== KVM_MP_STATE_RUNNABLE the vcpu is currently running - [x86,arm64,riscv] + [x86,arm64,riscv,loongarch] KVM_MP_STATE_UNINITIALIZED the vcpu is an application processor (AP) which has not yet received an INIT signal [x86] KVM_MP_STATE_INIT_RECEIVED the vcpu has received an INIT signal, and is @@ -1516,11 +1549,14 @@ For riscv: The only states that are valid are KVM_MP_STATE_STOPPED and KVM_MP_STATE_RUNNABLE which reflect if the vcpu is paused or not. +On LoongArch, only the KVM_MP_STATE_RUNNABLE state is used to reflect +whether the vcpu is runnable. + 4.39 KVM_SET_MP_STATE --------------------- :Capability: KVM_CAP_MP_STATE -:Architectures: x86, s390, arm64, riscv +:Architectures: x86, s390, arm64, riscv, loongarch :Type: vcpu ioctl :Parameters: struct kvm_mp_state (in) :Returns: 0 on success; -1 on error @@ -1538,6 +1574,9 @@ For arm64/riscv: The only states that are valid are KVM_MP_STATE_STOPPED and KVM_MP_STATE_RUNNABLE which reflect if the vcpu should be paused or not. +On LoongArch, only the KVM_MP_STATE_RUNNABLE state is used to reflect +whether the vcpu is runnable. + 4.40 KVM_SET_IDENTITY_MAP_ADDR ------------------------------ @@ -2841,6 +2880,19 @@ Following are the RISC-V D-extension registers: 0x8020 0000 0600 0020 fcsr Floating point control and status register ======================= ========= ============================================= +LoongArch registers are mapped using the lower 32 bits. The upper 16 bits of +that is the register group type. + +LoongArch csr registers are used to control guest cpu or get status of guest +cpu, and they have the following id bit patterns:: + + 0x9030 0000 0001 00 (64-bit) + +LoongArch KVM control registers are used to implement some new defined functions +such as set vcpu counter or reset vcpu, and they have the following id bit patterns:: + + 0x9030 0000 0002 + 4.69 KVM_GET_ONE_REG -------------------- -- Gitee From 66b16ca808c2e326a92d622096a7693026587296 Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Mon, 2 Oct 2023 10:01:29 +0800 Subject: [PATCH 0171/2138] LoongArch: KVM: Add maintainers for LoongArch KVM ANBZ: #8436 commit 2c10cda4b777be4be9d9e69e4f70c818dbb15e21 upstream. Add maintainers for LoongArch KVM. Acked-by: Huacai Chen Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2814 --- MAINTAINERS | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index ae4c0cec5073..2b1759633adb 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11530,6 +11530,18 @@ F: include/kvm/arm_* F: tools/testing/selftests/kvm/*/aarch64/ F: tools/testing/selftests/kvm/aarch64/ +KERNEL VIRTUAL MACHINE FOR LOONGARCH (KVM/LoongArch) +M: Tianrui Zhao +M: Bibo Mao +M: Huacai Chen +L: kvm@vger.kernel.org +L: loongarch@lists.linux.dev +S: Maintained +T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git +F: arch/loongarch/include/asm/kvm* +F: arch/loongarch/include/uapi/asm/kvm* +F: arch/loongarch/kvm/ + KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips) M: Huacai Chen L: linux-mips@vger.kernel.org -- Gitee From 8f2c30540780cade35f172501597de4f7fc13078 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Tue, 19 Dec 2023 10:48:27 +0800 Subject: [PATCH 0172/2138] LoongArch: KVM: Optimization for memslot hugepage checking ANBZ: #8436 commit 7ab6fb505b2a7447c4a7237a12c59e3ad0c7298c upstream. During shadow mmu page fault, there is checking for huge page for specified memslot. Page fault is hot path, check logic can be done when memslot is created. Here two flags are added for huge page checking, KVM_MEM_HUGEPAGE_CAPABLE and KVM_MEM_HUGEPAGE_INCAPABLE. Indeed for an optimized qemu, memslot for DRAM is always huge page aligned. The flag is firstly checked during hot page fault path. Now only huge page flag is supported, there is a long way for super page support in LoongArch system. Since super page size is 64G for 16K pagesize and 1G for 4K pagesize, 64G physical address is rarely used and LoongArch kernel needs support super page for 4K. Also memory layout of LoongArch qemu VM should be 1G aligned. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2818 --- arch/loongarch/include/asm/kvm_host.h | 3 + arch/loongarch/kvm/mmu.c | 124 +++++++++++++++++--------- 2 files changed, 86 insertions(+), 41 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index 11328700d4fa..0e89db020481 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -45,7 +45,10 @@ struct kvm_vcpu_stat { u64 signal_exits; }; +#define KVM_MEM_HUGEPAGE_CAPABLE (1UL << 0) +#define KVM_MEM_HUGEPAGE_INCAPABLE (1UL << 1) struct kvm_arch_memory_slot { + unsigned long flags; }; struct kvm_context { diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c index 80480df5f550..915f17527893 100644 --- a/arch/loongarch/kvm/mmu.c +++ b/arch/loongarch/kvm/mmu.c @@ -13,6 +13,16 @@ #include #include +static inline bool kvm_hugepage_capable(struct kvm_memory_slot *slot) +{ + return slot->arch.flags & KVM_MEM_HUGEPAGE_CAPABLE; +} + +static inline bool kvm_hugepage_incapable(struct kvm_memory_slot *slot) +{ + return slot->arch.flags & KVM_MEM_HUGEPAGE_INCAPABLE; +} + static inline void kvm_ptw_prepare(struct kvm *kvm, kvm_ptw_ctx *ctx) { ctx->level = kvm->arch.root_level; @@ -365,6 +375,69 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, kvm_ptw_top(kvm->arch.pgd, start << PAGE_SHIFT, end << PAGE_SHIFT, &ctx); } +int kvm_arch_prepare_memory_region(struct kvm *kvm, const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, enum kvm_mr_change change) +{ + gpa_t gpa_start; + hva_t hva_start; + size_t size, gpa_offset, hva_offset; + + if ((change != KVM_MR_MOVE) && (change != KVM_MR_CREATE)) + return 0; + /* + * Prevent userspace from creating a memory region outside of the + * VM GPA address space + */ + if ((new->base_gfn + new->npages) > (kvm->arch.gpa_size >> PAGE_SHIFT)) + return -ENOMEM; + + new->arch.flags = 0; + size = new->npages * PAGE_SIZE; + gpa_start = new->base_gfn << PAGE_SHIFT; + hva_start = new->userspace_addr; + if (IS_ALIGNED(size, PMD_SIZE) && IS_ALIGNED(gpa_start, PMD_SIZE) + && IS_ALIGNED(hva_start, PMD_SIZE)) + new->arch.flags |= KVM_MEM_HUGEPAGE_CAPABLE; + else { + /* + * Pages belonging to memslots that don't have the same + * alignment within a PMD for userspace and GPA cannot be + * mapped with PMD entries, because we'll end up mapping + * the wrong pages. + * + * Consider a layout like the following: + * + * memslot->userspace_addr: + * +-----+--------------------+--------------------+---+ + * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz| + * +-----+--------------------+--------------------+---+ + * + * memslot->base_gfn << PAGE_SIZE: + * +---+--------------------+--------------------+-----+ + * |abc|def Stage-2 block | Stage-2 block |tvxyz| + * +---+--------------------+--------------------+-----+ + * + * If we create those stage-2 blocks, we'll end up with this + * incorrect mapping: + * d -> f + * e -> g + * f -> h + */ + gpa_offset = gpa_start & (PMD_SIZE - 1); + hva_offset = hva_start & (PMD_SIZE - 1); + if (gpa_offset != hva_offset) { + new->arch.flags |= KVM_MEM_HUGEPAGE_INCAPABLE; + } else { + if (gpa_offset == 0) + gpa_offset = PMD_SIZE; + if ((size + gpa_offset) < (PMD_SIZE * 2)) + new->arch.flags |= KVM_MEM_HUGEPAGE_INCAPABLE; + } + } + + return 0; +} + void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_memory_slot *old, const struct kvm_memory_slot *new, @@ -562,47 +635,23 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ } static bool fault_supports_huge_mapping(struct kvm_memory_slot *memslot, - unsigned long hva, unsigned long map_size, bool write) + unsigned long hva, bool write) { - size_t size; - gpa_t gpa_start; - hva_t uaddr_start, uaddr_end; + hva_t start, end; /* Disable dirty logging on HugePages */ if (kvm_slot_dirty_track_enabled(memslot) && write) return false; - size = memslot->npages * PAGE_SIZE; - gpa_start = memslot->base_gfn << PAGE_SHIFT; - uaddr_start = memslot->userspace_addr; - uaddr_end = uaddr_start + size; + if (kvm_hugepage_capable(memslot)) + return true; - /* - * Pages belonging to memslots that don't have the same alignment - * within a PMD for userspace and GPA cannot be mapped with stage-2 - * PMD entries, because we'll end up mapping the wrong pages. - * - * Consider a layout like the following: - * - * memslot->userspace_addr: - * +-----+--------------------+--------------------+---+ - * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz| - * +-----+--------------------+--------------------+---+ - * - * memslot->base_gfn << PAGE_SIZE: - * +---+--------------------+--------------------+-----+ - * |abc|def Stage-2 block | Stage-2 block |tvxyz| - * +---+--------------------+--------------------+-----+ - * - * If we create those stage-2 blocks, we'll end up with this incorrect - * mapping: - * d -> f - * e -> g - * f -> h - */ - if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1))) + if (kvm_hugepage_incapable(memslot)) return false; + start = memslot->userspace_addr; + end = start + memslot->npages * PAGE_SIZE; + /* * Next, let's make sure we're not trying to map anything not covered * by the memslot. This means we have to prohibit block size mappings @@ -615,8 +664,7 @@ static bool fault_supports_huge_mapping(struct kvm_memory_slot *memslot, * userspace_addr or the base_gfn, as both are equally aligned (per * the check above) and equally sized. */ - return (hva & ~(map_size - 1)) >= uaddr_start && - (hva & ~(map_size - 1)) + map_size <= uaddr_end; + return (hva >= ALIGN(start, PMD_SIZE)) && (hva < ALIGN_DOWN(end, PMD_SIZE)); } /* @@ -842,7 +890,7 @@ static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) /* Disable dirty logging on HugePages */ level = 0; - if (!fault_supports_huge_mapping(memslot, hva, PMD_SIZE, write)) { + if (!fault_supports_huge_mapping(memslot, hva, write)) { level = 0; } else { level = host_pfn_mapping_level(kvm, gfn, memslot); @@ -901,12 +949,6 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) { } -int kvm_arch_prepare_memory_region(struct kvm *kvm, const struct kvm_memory_slot *old, - struct kvm_memory_slot *new, enum kvm_mr_change change) -{ - return 0; -} - void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot) { -- Gitee From e30c88351a61d76742f201ee322177ffcf247f1e Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Tue, 19 Dec 2023 10:48:27 +0800 Subject: [PATCH 0173/2138] LoongArch: KVM: Remove SW timer switch when vcpu is halt polling ANBZ: #8436 commit 161267320158920a601e40d83fdac60bcaa2acb5 upstream. With halt-polling supported, there is checking for pending events or interrupts when vcpu executes idle instruction. Pending interrupts include injected SW interrupts and passthrough HW interrupts, such as HW timer interrupts, since HW timer works still even if vcpu exists from VM mode. Since HW timer pending interrupt can be set directly with CSR status register, and pending HW timer interrupt checking is used in vcpu block checking function, it is not necessary to switch to SW timer during halt-polling. This patch adds preemption disabling in function kvm_cpu_has_pending_timer(), and removes SW timer switching in idle instruction emulation function. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2818 --- arch/loongarch/kvm/exit.c | 13 ++----------- arch/loongarch/kvm/timer.c | 12 +++++++++--- arch/loongarch/kvm/vcpu.c | 9 ++++++++- 3 files changed, 19 insertions(+), 15 deletions(-) diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index ce8de3fa472c..e708a1786d6b 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -200,17 +200,8 @@ int kvm_emu_idle(struct kvm_vcpu *vcpu) ++vcpu->stat.idle_exits; trace_kvm_exit_idle(vcpu, KVM_TRACE_EXIT_IDLE); - if (!kvm_arch_vcpu_runnable(vcpu)) { - /* - * Switch to the software timer before halt-polling/blocking as - * the guest's timer may be a break event for the vCPU, and the - * hypervisor timer runs only when the CPU is in guest mode. - * Switch before halt-polling so that KVM recognizes an expired - * timer before blocking. - */ - kvm_save_timer(vcpu); - kvm_vcpu_block(vcpu); - } + if (!kvm_arch_vcpu_runnable(vcpu)) + kvm_vcpu_halt(vcpu); return EMULATE_DONE; } diff --git a/arch/loongarch/kvm/timer.c b/arch/loongarch/kvm/timer.c index 284bf553fefe..12d58040122d 100644 --- a/arch/loongarch/kvm/timer.c +++ b/arch/loongarch/kvm/timer.c @@ -155,11 +155,17 @@ static void _kvm_save_timer(struct kvm_vcpu *vcpu) */ hrtimer_cancel(&vcpu->arch.swtimer); hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED); - } else + } else if (vcpu->stat.generic.blocking) { /* - * Inject timer interrupt so that hall polling can dectect and exit + * Inject timer interrupt so that halt polling can dectect and exit. + * VCPU is scheduled out already and sleeps in rcuwait queue and + * will not poll pending events again. kvm_queue_irq() is not enough, + * hrtimer swtimer should be used here. */ - kvm_queue_irq(vcpu, INT_TI); + expire = ktime_add_ns(ktime_get(), 10); + vcpu->arch.expire = expire; + hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED); + } } /* diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 73d0c2b9c1a5..54f544b30f32 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -187,8 +187,15 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) { - return kvm_pending_timer(vcpu) || + int ret; + + /* Protect from TOD sync and vcpu_load/put() */ + preempt_disable(); + ret = kvm_pending_timer(vcpu) || kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI); + preempt_enable(); + + return ret; } int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) -- Gitee From a6473509b9551405bca9b435a717512adcf4a2bf Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Tue, 19 Dec 2023 10:48:27 +0800 Subject: [PATCH 0174/2138] LoongArch: KVM: Allow to access HW timer CSR registers always ANBZ: #8436 commit 0d2abe67029644741bf7400b0d00c2faa3e1c455 upstream. Currently HW timer CSR registers are allowed to access before entering to vm and disabled if switch to SW timer in host mode, instead it is not necessary to do so. HW timer CSR registers can be accessed always, it is nothing to do with whether it is in vm mode or host mode. This patch removes the limitation. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2818 --- arch/loongarch/kvm/main.c | 1 - arch/loongarch/kvm/timer.c | 27 ++++++--------------------- 2 files changed, 6 insertions(+), 22 deletions(-) diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c index 1c1d5199500e..86a2f2d0cb27 100644 --- a/arch/loongarch/kvm/main.c +++ b/arch/loongarch/kvm/main.c @@ -287,7 +287,6 @@ int kvm_arch_hardware_enable(void) if (env & CSR_GCFG_MATC_ROOT) gcfg |= CSR_GCFG_MATC_ROOT; - gcfg |= CSR_GCFG_TIT; write_csr_gcfg(gcfg); kvm_flush_tlb_all(); diff --git a/arch/loongarch/kvm/timer.c b/arch/loongarch/kvm/timer.c index 12d58040122d..d6d5bcea349b 100644 --- a/arch/loongarch/kvm/timer.c +++ b/arch/loongarch/kvm/timer.c @@ -70,15 +70,6 @@ void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long timer_hz) */ void kvm_acquire_timer(struct kvm_vcpu *vcpu) { - unsigned long cfg; - - cfg = read_csr_gcfg(); - if (!(cfg & CSR_GCFG_TIT)) - return; - - /* Enable guest access to hard timer */ - write_csr_gcfg(cfg & ~CSR_GCFG_TIT); - /* * Freeze the soft-timer and sync the guest stable timer with it. We do * this with interrupts disabled to avoid latency. @@ -174,21 +165,15 @@ static void _kvm_save_timer(struct kvm_vcpu *vcpu) */ void kvm_save_timer(struct kvm_vcpu *vcpu) { - unsigned long cfg; struct loongarch_csrs *csr = vcpu->arch.csr; preempt_disable(); - cfg = read_csr_gcfg(); - if (!(cfg & CSR_GCFG_TIT)) { - /* Disable guest use of hard timer */ - write_csr_gcfg(cfg | CSR_GCFG_TIT); - - /* Save hard timer state */ - kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TCFG); - kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TVAL); - if (kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG) & CSR_TCFG_EN) - _kvm_save_timer(vcpu); - } + + /* Save hard timer state */ + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TCFG); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TVAL); + if (kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG) & CSR_TCFG_EN) + _kvm_save_timer(vcpu); /* Save timer-related state to vCPU context */ kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ESTAT); -- Gitee From 54028d7910a766c5db01765fc6d6deaafb03f852 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Tue, 19 Dec 2023 10:48:28 +0800 Subject: [PATCH 0175/2138] LoongArch: KVM: Remove kvm_acquire_timer() before entering guest ANBZ: #8436 commit 1ab9c6099495f79bfbcd6058d02d7556034a89b0 upstream. Timer emulation method in VM is switch to SW timer, there are two places where timer emulation is needed. One is during vcpu thread context switch, the other is halt-polling with idle instruction emulation. SW timer switching is removed during halt-polling mode, so it is not necessary to disable SW timer before entering to guest. This patch removes SW timer handling before entering guest mode, and put it in HW timer restoring flow when vcpu thread is sched-in. With this patch, vm timer emulation is simpler, there is SW/HW timer switch only in vcpu thread context switch scenario. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2818 --- arch/loongarch/include/asm/kvm_vcpu.h | 1 - arch/loongarch/kvm/timer.c | 22 ++++++-------------- arch/loongarch/kvm/vcpu.c | 29 --------------------------- 3 files changed, 6 insertions(+), 46 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h index 553cfa2b2b1c..0e87652f780a 100644 --- a/arch/loongarch/include/asm/kvm_vcpu.h +++ b/arch/loongarch/include/asm/kvm_vcpu.h @@ -55,7 +55,6 @@ void kvm_save_fpu(struct loongarch_fpu *fpu); void kvm_restore_fpu(struct loongarch_fpu *fpu); void kvm_restore_fcsr(struct loongarch_fpu *fpu); -void kvm_acquire_timer(struct kvm_vcpu *vcpu); void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz); void kvm_reset_timer(struct kvm_vcpu *vcpu); void kvm_save_timer(struct kvm_vcpu *vcpu); diff --git a/arch/loongarch/kvm/timer.c b/arch/loongarch/kvm/timer.c index d6d5bcea349b..d362d87a54aa 100644 --- a/arch/loongarch/kvm/timer.c +++ b/arch/loongarch/kvm/timer.c @@ -64,19 +64,6 @@ void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long timer_hz) kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TVAL, 0); } -/* - * Restore hard timer state and enable guest to access timer registers - * without trap, should be called with irq disabled - */ -void kvm_acquire_timer(struct kvm_vcpu *vcpu) -{ - /* - * Freeze the soft-timer and sync the guest stable timer with it. We do - * this with interrupts disabled to avoid latency. - */ - hrtimer_cancel(&vcpu->arch.swtimer); -} - /* * Restore soft timer state from saved context. */ @@ -98,6 +85,11 @@ void kvm_restore_timer(struct kvm_vcpu *vcpu) return; } + /* + * Freeze the soft-timer and sync the guest stable timer with it. + */ + hrtimer_cancel(&vcpu->arch.swtimer); + /* * Set remainder tick value if not expired */ @@ -115,7 +107,7 @@ void kvm_restore_timer(struct kvm_vcpu *vcpu) /* * Inject timer here though sw timer should inject timer * interrupt async already, since sw timer may be cancelled - * during injecting intr async in function kvm_acquire_timer + * during injecting intr async */ kvm_queue_irq(vcpu, INT_TI); } @@ -140,11 +132,9 @@ static void _kvm_save_timer(struct kvm_vcpu *vcpu) vcpu->arch.expire = expire; if (ticks) { /* - * Update hrtimer to use new timeout * HRTIMER_MODE_PINNED is suggested since vcpu may run in * the same physical cpu in next time */ - hrtimer_cancel(&vcpu->arch.swtimer); hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED); } else if (vcpu->stat.generic.blocking) { /* diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 54f544b30f32..53fcef8b24a1 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -95,7 +95,6 @@ static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu) * check vmid before vcpu enter guest */ local_irq_disable(); - kvm_acquire_timer(vcpu); kvm_deliver_intr(vcpu); kvm_deliver_exception(vcpu); /* Make sure the vcpu mode has been written */ @@ -251,23 +250,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, return -EINVAL; } -/** - * kvm_migrate_count() - Migrate timer. - * @vcpu: Virtual CPU. - * - * Migrate hrtimer to the current CPU by cancelling and restarting it - * if the hrtimer is active. - * - * Must be called when the vCPU is migrated to a different CPU, so that - * the timer can interrupt the guest at the new CPU, and the timer irq can - * be delivered to the vCPU. - */ -static void kvm_migrate_count(struct kvm_vcpu *vcpu) -{ - if (hrtimer_cancel(&vcpu->arch.swtimer)) - hrtimer_restart(&vcpu->arch.swtimer); -} - static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val) { unsigned long gintc; @@ -796,17 +778,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) unsigned long flags; local_irq_save(flags); - if (vcpu->arch.last_sched_cpu != cpu) { - kvm_debug("[%d->%d]KVM vCPU[%d] switch\n", - vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id); - /* - * Migrate the timer interrupt to the current CPU so that it - * always interrupts the guest and synchronously triggers a - * guest timer interrupt. - */ - kvm_migrate_count(vcpu); - } - /* Restore guest state to registers */ _kvm_vcpu_load(vcpu, cpu); local_irq_restore(flags); -- Gitee From cc129732e9cbba0ff076a8c29462265ecb1c6b3e Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Tue, 19 Dec 2023 10:48:28 +0800 Subject: [PATCH 0176/2138] LoongArch: KVM: Fix timer emulation with oneshot mode ANBZ: #8436 commit 5b3d524993ff1fb36089be850ccb121ac3296bcf upstream. When timer is fired in oneshot mode, CSR TVAL will be -1 rather than 0. There needs special handing for this situation. There are two scenarios when oneshot timer is fired. One scenario is that time is fired after exiting to host, CSR TVAL is set with 0 in order to inject hw interrupt, and -1 will assigned to CSR TVAL soon. The other situation is that timer is fired in VM and guest kernel is hanlding timer IRQ, IRQ is acked and is ready to set next expired timer value, then vm exits to host. Timer interrupt should not be inject at this point, else there will be spurious timer interrupt. Here hw timer irq status in CSR ESTAT is used to judge these two scenarios. If CSR TVAL is -1, the oneshot timer is fired; and if timer hw irq is on in CSR ESTAT register, it happens after exiting to host; else if timer hw irq is off, we think that it happens in vm and timer IRQ handler has already acked IRQ. With this patch, runltp with version ltp20230516 passes to run in vm. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2818 --- arch/loongarch/kvm/timer.c | 68 ++++++++++++++++++++++++++++++-------- 1 file changed, 55 insertions(+), 13 deletions(-) diff --git a/arch/loongarch/kvm/timer.c b/arch/loongarch/kvm/timer.c index d362d87a54aa..111328f60872 100644 --- a/arch/loongarch/kvm/timer.c +++ b/arch/loongarch/kvm/timer.c @@ -69,14 +69,19 @@ void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long timer_hz) */ void kvm_restore_timer(struct kvm_vcpu *vcpu) { - unsigned long cfg, delta, period; + unsigned long cfg, estat; + unsigned long ticks, delta, period; ktime_t expire, now; struct loongarch_csrs *csr = vcpu->arch.csr; /* * Set guest stable timer cfg csr + * Disable timer before restore estat CSR register, avoid to + * get invalid timer interrupt for old timer cfg */ cfg = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG); + + write_gcsr_timercfg(0); kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ESTAT); kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TCFG); if (!(cfg & CSR_TCFG_EN)) { @@ -90,20 +95,47 @@ void kvm_restore_timer(struct kvm_vcpu *vcpu) */ hrtimer_cancel(&vcpu->arch.swtimer); + /* + * From LoongArch Reference Manual Volume 1 Chapter 7.6.2 + * If oneshot timer is fired, CSR TVAL will be -1, there are two + * conditions: + * 1) timer is fired during exiting to host + * 2) timer is fired and vm is doing timer irq, and then exiting to + * host. Host should not inject timer irq to avoid spurious + * timer interrupt again + */ + ticks = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TVAL); + estat = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT); + if (!(cfg & CSR_TCFG_PERIOD) && (ticks > cfg)) { + /* + * Writing 0 to LOONGARCH_CSR_TVAL will inject timer irq + * and set CSR TVAL with -1 + */ + write_gcsr_timertick(0); + + /* + * Writing CSR_TINTCLR_TI to LOONGARCH_CSR_TINTCLR will clear + * timer interrupt, and CSR TVAL keeps unchanged with -1, it + * avoids spurious timer interrupt + */ + if (!(estat & CPU_TIMER)) + gcsr_write(CSR_TINTCLR_TI, LOONGARCH_CSR_TINTCLR); + return; + } + /* * Set remainder tick value if not expired */ + delta = 0; now = ktime_get(); expire = vcpu->arch.expire; if (ktime_before(now, expire)) delta = ktime_to_tick(vcpu, ktime_sub(expire, now)); - else { - if (cfg & CSR_TCFG_PERIOD) { - period = cfg & CSR_TCFG_VAL; - delta = ktime_to_tick(vcpu, ktime_sub(now, expire)); - delta = period - (delta % period); - } else - delta = 0; + else if (cfg & CSR_TCFG_PERIOD) { + period = cfg & CSR_TCFG_VAL; + delta = ktime_to_tick(vcpu, ktime_sub(now, expire)); + delta = period - (delta % period); + /* * Inject timer here though sw timer should inject timer * interrupt async already, since sw timer may be cancelled @@ -122,15 +154,25 @@ void kvm_restore_timer(struct kvm_vcpu *vcpu) */ static void _kvm_save_timer(struct kvm_vcpu *vcpu) { - unsigned long ticks, delta; + unsigned long ticks, delta, cfg; ktime_t expire; struct loongarch_csrs *csr = vcpu->arch.csr; + cfg = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG); ticks = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TVAL); - delta = tick_to_ns(vcpu, ticks); - expire = ktime_add_ns(ktime_get(), delta); - vcpu->arch.expire = expire; - if (ticks) { + + /* + * From LoongArch Reference Manual Volume 1 Chapter 7.6.2 + * If period timer is fired, CSR TVAL will be reloaded from CSR TCFG + * If oneshot timer is fired, CSR TVAL will be -1 + * Here judge one-shot timer fired by checking whether TVAL is larger + * than TCFG + */ + if (ticks < cfg) { + delta = tick_to_ns(vcpu, ticks); + expire = ktime_add_ns(ktime_get(), delta); + vcpu->arch.expire = expire; + /* * HRTIMER_MODE_PINNED is suggested since vcpu may run in * the same physical cpu in next time -- Gitee From 7aabd0082a1d714694df3385781126c3778e2009 Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Tue, 19 Dec 2023 10:48:28 +0800 Subject: [PATCH 0177/2138] LoongArch: KVM: Add LSX (128bit SIMD) support ANBZ: #8436 commit db1ecca22edf27c5a3dd66af406c88b5b5ac7cc1 upstream. This patch adds LSX (128bit SIMD) support for LoongArch KVM. There will be LSX exception in KVM when guest use the LSX instructions. KVM will enable LSX and restore the vector registers for guest and then return to guest to continue running. Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2818 --- arch/loongarch/include/asm/kvm_host.h | 15 +- arch/loongarch/include/asm/kvm_vcpu.h | 10 ++ arch/loongarch/include/uapi/asm/kvm.h | 1 + arch/loongarch/kernel/fpu.S | 1 + arch/loongarch/kvm/exit.c | 21 +++ arch/loongarch/kvm/switch.S | 16 ++ arch/loongarch/kvm/trace.h | 4 +- arch/loongarch/kvm/vcpu.c | 220 +++++++++++++++++++++++++- 8 files changed, 280 insertions(+), 8 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index 0e89db020481..b0c5cdd8014c 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -95,8 +95,9 @@ enum emulation_result { }; #define KVM_LARCH_FPU (0x1 << 0) -#define KVM_LARCH_SWCSR_LATEST (0x1 << 1) -#define KVM_LARCH_HWCSR_USABLE (0x1 << 2) +#define KVM_LARCH_LSX (0x1 << 1) +#define KVM_LARCH_SWCSR_LATEST (0x1 << 2) +#define KVM_LARCH_HWCSR_USABLE (0x1 << 3) struct kvm_vcpu_arch { /* @@ -178,6 +179,16 @@ static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned csr->csrs[reg] = val; } +static inline bool kvm_guest_has_fpu(struct kvm_vcpu_arch *arch) +{ + return arch->cpucfg[2] & CPUCFG2_FP; +} + +static inline bool kvm_guest_has_lsx(struct kvm_vcpu_arch *arch) +{ + return arch->cpucfg[2] & CPUCFG2_LSX; +} + /* Debug: dump vcpu state */ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h index 0e87652f780a..db08dd46b525 100644 --- a/arch/loongarch/include/asm/kvm_vcpu.h +++ b/arch/loongarch/include/asm/kvm_vcpu.h @@ -55,6 +55,16 @@ void kvm_save_fpu(struct loongarch_fpu *fpu); void kvm_restore_fpu(struct loongarch_fpu *fpu); void kvm_restore_fcsr(struct loongarch_fpu *fpu); +#ifdef CONFIG_CPU_HAS_LSX +int kvm_own_lsx(struct kvm_vcpu *vcpu); +void kvm_save_lsx(struct loongarch_fpu *fpu); +void kvm_restore_lsx(struct loongarch_fpu *fpu); +#else +static inline int kvm_own_lsx(struct kvm_vcpu *vcpu) { } +static inline void kvm_save_lsx(struct loongarch_fpu *fpu) { } +static inline void kvm_restore_lsx(struct loongarch_fpu *fpu) { } +#endif + void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz); void kvm_reset_timer(struct kvm_vcpu *vcpu); void kvm_save_timer(struct kvm_vcpu *vcpu); diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h index c6ad2ee6106c..923d0bd38294 100644 --- a/arch/loongarch/include/uapi/asm/kvm.h +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -79,6 +79,7 @@ struct kvm_fpu { #define LOONGARCH_REG_64(TYPE, REG) (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT)) #define KVM_IOC_CSRID(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG) #define KVM_IOC_CPUCFG(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG) +#define KVM_LOONGARCH_VCPU_CPUCFG 0 struct kvm_debug_exit_arch { }; diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S index d53ab10f4644..a400924c0348 100644 --- a/arch/loongarch/kernel/fpu.S +++ b/arch/loongarch/kernel/fpu.S @@ -349,6 +349,7 @@ SYM_FUNC_START(_restore_lsx_upper) lsx_restore_all_upper a0 t0 t1 jr ra SYM_FUNC_END(_restore_lsx_upper) +EXPORT_SYMBOL(_restore_lsx_upper) SYM_FUNC_START(_init_lsx_upper) lsx_init_all_upper t1 diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index e708a1786d6b..676f7a3a335c 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -634,6 +634,11 @@ static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; + if (!kvm_guest_has_fpu(&vcpu->arch)) { + kvm_queue_exception(vcpu, EXCCODE_INE, 0); + return RESUME_GUEST; + } + /* * If guest FPU not present, the FPU operation should have been * treated as a reserved instruction! @@ -650,6 +655,21 @@ static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu) return RESUME_GUEST; } +/* + * kvm_handle_lsx_disabled() - Guest used LSX while disabled in root. + * @vcpu: Virtual CPU context. + * + * Handle when the guest attempts to use LSX when it is disabled in the root + * context. + */ +static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu) +{ + if (kvm_own_lsx(vcpu)) + kvm_queue_exception(vcpu, EXCCODE_INE, 0); + + return RESUME_GUEST; +} + /* * LoongArch KVM callback handling for unimplemented guest exiting */ @@ -678,6 +698,7 @@ static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = { [EXCCODE_TLBS] = kvm_handle_write_fault, [EXCCODE_TLBM] = kvm_handle_write_fault, [EXCCODE_FPDIS] = kvm_handle_fpu_disabled, + [EXCCODE_LSXDIS] = kvm_handle_lsx_disabled, [EXCCODE_GSPR] = kvm_handle_gspr, }; diff --git a/arch/loongarch/kvm/switch.S b/arch/loongarch/kvm/switch.S index 0ed9040307b7..00fbf772d16f 100644 --- a/arch/loongarch/kvm/switch.S +++ b/arch/loongarch/kvm/switch.S @@ -245,6 +245,22 @@ SYM_FUNC_START(kvm_restore_fpu) jr ra SYM_FUNC_END(kvm_restore_fpu) +#ifdef CONFIG_CPU_HAS_LSX +SYM_FUNC_START(kvm_save_lsx) + fpu_save_csr a0 t1 + fpu_save_cc a0 t1 t2 + lsx_save_data a0 t1 + jr ra +SYM_FUNC_END(kvm_save_lsx) + +SYM_FUNC_START(kvm_restore_lsx) + lsx_restore_data a0 t1 + fpu_restore_cc a0 t1 t2 + fpu_restore_csr a0 t1 t2 + jr ra +SYM_FUNC_END(kvm_restore_lsx) +#endif + .section ".rodata" SYM_DATA(kvm_exception_size, .quad kvm_exc_entry_end - kvm_exc_entry) SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest) diff --git a/arch/loongarch/kvm/trace.h b/arch/loongarch/kvm/trace.h index a1e35d655418..7da4e230e896 100644 --- a/arch/loongarch/kvm/trace.h +++ b/arch/loongarch/kvm/trace.h @@ -102,6 +102,7 @@ TRACE_EVENT(kvm_exit_gspr, #define KVM_TRACE_AUX_DISCARD 4 #define KVM_TRACE_AUX_FPU 1 +#define KVM_TRACE_AUX_LSX 2 #define kvm_trace_symbol_aux_op \ { KVM_TRACE_AUX_SAVE, "save" }, \ @@ -111,7 +112,8 @@ TRACE_EVENT(kvm_exit_gspr, { KVM_TRACE_AUX_DISCARD, "discard" } #define kvm_trace_symbol_aux_state \ - { KVM_TRACE_AUX_FPU, "FPU" } + { KVM_TRACE_AUX_FPU, "FPU" }, \ + { KVM_TRACE_AUX_LSX, "LSX" } TRACE_EVENT(kvm_aux, TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op, diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 53fcef8b24a1..80487d177ca4 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -298,6 +298,69 @@ static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val) return ret; } +static int _kvm_get_cpucfg(int id, u64 *v) +{ + int ret = 0; + + if (id < 0 && id >= KVM_MAX_CPUCFG_REGS) + return -EINVAL; + + switch (id) { + case 2: + /* Return CPUCFG2 features which have been supported by KVM */ + *v = CPUCFG2_FP | CPUCFG2_FPSP | CPUCFG2_FPDP | + CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV | + CPUCFG2_LAM; + /* + * If LSX is supported by CPU, it is also supported by KVM, + * as we implement it. + */ + if (cpu_has_lsx) + *v |= CPUCFG2_LSX; + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + +static int kvm_check_cpucfg(int id, u64 val) +{ + u64 mask; + int ret = 0; + + if (id < 0 && id >= KVM_MAX_CPUCFG_REGS) + return -EINVAL; + + if (_kvm_get_cpucfg(id, &mask)) + return ret; + + switch (id) { + case 2: + /* CPUCFG2 features checking */ + if (val & ~mask) + /* The unsupported features should not be set */ + ret = -EINVAL; + else if (!(val & CPUCFG2_LLFTP)) + /* The LLFTP must be set, as guest must has a constant timer */ + ret = -EINVAL; + else if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP))) + /* Single and double float point must both be set when enable FP */ + ret = -EINVAL; + else if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP)) + /* FP should be set when enable LSX */ + ret = -EINVAL; + else if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX)) + /* LSX, FP should be set when enable LASX, and FP has been checked before. */ + ret = -EINVAL; + break; + default: + break; + } + return ret; +} + static int kvm_get_one_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, u64 *v) { @@ -367,10 +430,10 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu, break; case KVM_REG_LOONGARCH_CPUCFG: id = KVM_GET_IOC_CPUCFG_IDX(reg->id); - if (id >= 0 && id < KVM_MAX_CPUCFG_REGS) - vcpu->arch.cpucfg[id] = (u32)v; - else - ret = -EINVAL; + ret = kvm_check_cpucfg(id, v); + if (ret) + break; + vcpu->arch.cpucfg[id] = (u32)v; break; case KVM_REG_LOONGARCH_KVM: switch (reg->id) { @@ -460,10 +523,94 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, return -EINVAL; } +static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + switch (attr->attr) { + case 2: + return 0; + default: + return -ENXIO; + } + + return -ENXIO; +} + +static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret = -ENXIO; + + switch (attr->group) { + case KVM_LOONGARCH_VCPU_CPUCFG: + ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr); + break; + default: + break; + } + + return ret; +} + +static int kvm_loongarch_get_cpucfg_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret = 0; + uint64_t val; + uint64_t __user *uaddr = (uint64_t __user *)attr->addr; + + ret = _kvm_get_cpucfg(attr->attr, &val); + if (ret) + return ret; + + put_user(val, uaddr); + + return ret; +} + +static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret = -ENXIO; + + switch (attr->group) { + case KVM_LOONGARCH_VCPU_CPUCFG: + ret = kvm_loongarch_get_cpucfg_attr(vcpu, attr); + break; + default: + break; + } + + return ret; +} + +static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} + +static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret = -ENXIO; + + switch (attr->group) { + case KVM_LOONGARCH_VCPU_CPUCFG: + ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr); + break; + default: + break; + } + + return ret; +} + long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { long r; + struct kvm_device_attr attr; void __user *argp = (void __user *)arg; struct kvm_vcpu *vcpu = filp->private_data; @@ -503,6 +650,27 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); break; } + case KVM_HAS_DEVICE_ATTR: { + r = -EFAULT; + if (copy_from_user(&attr, argp, sizeof(attr))) + break; + r = kvm_loongarch_vcpu_has_attr(vcpu, &attr); + break; + } + case KVM_GET_DEVICE_ATTR: { + r = -EFAULT; + if (copy_from_user(&attr, argp, sizeof(attr))) + break; + r = kvm_loongarch_vcpu_get_attr(vcpu, &attr); + break; + } + case KVM_SET_DEVICE_ATTR: { + r = -EFAULT; + if (copy_from_user(&attr, argp, sizeof(attr))) + break; + r = kvm_loongarch_vcpu_set_attr(vcpu, &attr); + break; + } default: r = -ENOIOCTLCMD; break; @@ -550,12 +718,54 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu) preempt_enable(); } +#ifdef CONFIG_CPU_HAS_LSX +/* Enable LSX and restore context */ +int kvm_own_lsx(struct kvm_vcpu *vcpu) +{ + if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch)) + return -EINVAL; + + preempt_disable(); + + /* Enable LSX for guest */ + set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN); + switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { + case KVM_LARCH_FPU: + /* + * Guest FPU state already loaded, + * only restore upper LSX state + */ + _restore_lsx_upper(&vcpu->arch.fpu); + break; + default: + /* Neither FP or LSX already active, + * restore full LSX state + */ + kvm_restore_lsx(&vcpu->arch.fpu); + break; + } + + trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX); + vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU; + preempt_enable(); + + return 0; +} +#endif + /* Save context and disable FPU */ void kvm_lose_fpu(struct kvm_vcpu *vcpu) { preempt_disable(); - if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { + if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) { + kvm_save_lsx(&vcpu->arch.fpu); + vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU); + trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX); + + /* Disable LSX & FPU */ + clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN); + } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { kvm_save_fpu(&vcpu->arch.fpu); vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU; trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU); -- Gitee From 7b333c04857046ce869a35f76897c015e2cac014 Mon Sep 17 00:00:00 2001 From: Tianrui Zhao Date: Tue, 19 Dec 2023 10:48:28 +0800 Subject: [PATCH 0178/2138] LoongArch: KVM: Add LASX (256bit SIMD) support ANBZ: #8436 commit 118e10cd893d57df55b3302dfd188a981b6e6d1c upstream. This patch adds LASX (256bit SIMD) support for LoongArch KVM. There will be LASX exception in KVM when guest use the LASX instructions. KVM will enable LASX and restore the vector registers for guest and then return to guest to continue running. Reviewed-by: Bibo Mao Signed-off-by: Tianrui Zhao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2818 --- arch/loongarch/include/asm/kvm_host.h | 10 ++++-- arch/loongarch/include/asm/kvm_vcpu.h | 10 ++++++ arch/loongarch/kernel/fpu.S | 1 + arch/loongarch/kvm/exit.c | 16 +++++++++ arch/loongarch/kvm/switch.S | 15 ++++++++ arch/loongarch/kvm/trace.h | 4 ++- arch/loongarch/kvm/vcpu.c | 51 ++++++++++++++++++++++++++- 7 files changed, 103 insertions(+), 4 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index b0c5cdd8014c..5bdb34b2c5d6 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -96,8 +96,9 @@ enum emulation_result { #define KVM_LARCH_FPU (0x1 << 0) #define KVM_LARCH_LSX (0x1 << 1) -#define KVM_LARCH_SWCSR_LATEST (0x1 << 2) -#define KVM_LARCH_HWCSR_USABLE (0x1 << 3) +#define KVM_LARCH_LASX (0x1 << 2) +#define KVM_LARCH_SWCSR_LATEST (0x1 << 3) +#define KVM_LARCH_HWCSR_USABLE (0x1 << 4) struct kvm_vcpu_arch { /* @@ -189,6 +190,11 @@ static inline bool kvm_guest_has_lsx(struct kvm_vcpu_arch *arch) return arch->cpucfg[2] & CPUCFG2_LSX; } +static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch) +{ + return arch->cpucfg[2] & CPUCFG2_LASX; +} + /* Debug: dump vcpu state */ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h index db08dd46b525..e71ceb88f29e 100644 --- a/arch/loongarch/include/asm/kvm_vcpu.h +++ b/arch/loongarch/include/asm/kvm_vcpu.h @@ -65,6 +65,16 @@ static inline void kvm_save_lsx(struct loongarch_fpu *fpu) { } static inline void kvm_restore_lsx(struct loongarch_fpu *fpu) { } #endif +#ifdef CONFIG_CPU_HAS_LASX +int kvm_own_lasx(struct kvm_vcpu *vcpu); +void kvm_save_lasx(struct loongarch_fpu *fpu); +void kvm_restore_lasx(struct loongarch_fpu *fpu); +#else +static inline int kvm_own_lasx(struct kvm_vcpu *vcpu) { } +static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { } +static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { } +#endif + void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz); void kvm_reset_timer(struct kvm_vcpu *vcpu); void kvm_save_timer(struct kvm_vcpu *vcpu); diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S index a400924c0348..4382e36ae3d4 100644 --- a/arch/loongarch/kernel/fpu.S +++ b/arch/loongarch/kernel/fpu.S @@ -385,6 +385,7 @@ SYM_FUNC_START(_restore_lasx_upper) lasx_restore_all_upper a0 t0 t1 jr ra SYM_FUNC_END(_restore_lasx_upper) +EXPORT_SYMBOL(_restore_lasx_upper) SYM_FUNC_START(_init_lasx_upper) lasx_init_all_upper t1 diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 676f7a3a335c..ed1d89d53e2e 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -670,6 +670,21 @@ static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu) return RESUME_GUEST; } +/* + * kvm_handle_lasx_disabled() - Guest used LASX while disabled in root. + * @vcpu: Virtual CPU context. + * + * Handle when the guest attempts to use LASX when it is disabled in the root + * context. + */ +static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu) +{ + if (kvm_own_lasx(vcpu)) + kvm_queue_exception(vcpu, EXCCODE_INE, 0); + + return RESUME_GUEST; +} + /* * LoongArch KVM callback handling for unimplemented guest exiting */ @@ -699,6 +714,7 @@ static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = { [EXCCODE_TLBM] = kvm_handle_write_fault, [EXCCODE_FPDIS] = kvm_handle_fpu_disabled, [EXCCODE_LSXDIS] = kvm_handle_lsx_disabled, + [EXCCODE_LASXDIS] = kvm_handle_lasx_disabled, [EXCCODE_GSPR] = kvm_handle_gspr, }; diff --git a/arch/loongarch/kvm/switch.S b/arch/loongarch/kvm/switch.S index 00fbf772d16f..ba976509bfe8 100644 --- a/arch/loongarch/kvm/switch.S +++ b/arch/loongarch/kvm/switch.S @@ -261,6 +261,21 @@ SYM_FUNC_START(kvm_restore_lsx) SYM_FUNC_END(kvm_restore_lsx) #endif +#ifdef CONFIG_CPU_HAS_LASX +SYM_FUNC_START(kvm_save_lasx) + fpu_save_csr a0 t1 + fpu_save_cc a0 t1 t2 + lasx_save_data a0 t1 + jr ra +SYM_FUNC_END(kvm_save_lasx) + +SYM_FUNC_START(kvm_restore_lasx) + lasx_restore_data a0 t1 + fpu_restore_cc a0 t1 t2 + fpu_restore_csr a0 t1 t2 + jr ra +SYM_FUNC_END(kvm_restore_lasx) +#endif .section ".rodata" SYM_DATA(kvm_exception_size, .quad kvm_exc_entry_end - kvm_exc_entry) SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest) diff --git a/arch/loongarch/kvm/trace.h b/arch/loongarch/kvm/trace.h index 7da4e230e896..c2484ad4cffa 100644 --- a/arch/loongarch/kvm/trace.h +++ b/arch/loongarch/kvm/trace.h @@ -103,6 +103,7 @@ TRACE_EVENT(kvm_exit_gspr, #define KVM_TRACE_AUX_FPU 1 #define KVM_TRACE_AUX_LSX 2 +#define KVM_TRACE_AUX_LASX 3 #define kvm_trace_symbol_aux_op \ { KVM_TRACE_AUX_SAVE, "save" }, \ @@ -113,7 +114,8 @@ TRACE_EVENT(kvm_exit_gspr, #define kvm_trace_symbol_aux_state \ { KVM_TRACE_AUX_FPU, "FPU" }, \ - { KVM_TRACE_AUX_LSX, "LSX" } + { KVM_TRACE_AUX_LSX, "LSX" }, \ + { KVM_TRACE_AUX_LASX, "LASX" } TRACE_EVENT(kvm_aux, TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op, diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 80487d177ca4..27701991886d 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -317,6 +317,13 @@ static int _kvm_get_cpucfg(int id, u64 *v) */ if (cpu_has_lsx) *v |= CPUCFG2_LSX; + /* + * if LASX is supported by CPU, it is also supported by KVM, + * as we implement it. + */ + if (cpu_has_lasx) + *v |= CPUCFG2_LASX; + break; default: ret = -EINVAL; @@ -753,12 +760,54 @@ int kvm_own_lsx(struct kvm_vcpu *vcpu) } #endif +#ifdef CONFIG_CPU_HAS_LASX +/* Enable LASX and restore context */ +int kvm_own_lasx(struct kvm_vcpu *vcpu) +{ + if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcpu->arch)) + return -EINVAL; + + preempt_disable(); + + set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN); + switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) { + case KVM_LARCH_LSX: + case KVM_LARCH_LSX | KVM_LARCH_FPU: + /* Guest LSX state already loaded, only restore upper LASX state */ + _restore_lasx_upper(&vcpu->arch.fpu); + break; + case KVM_LARCH_FPU: + /* Guest FP state already loaded, only restore upper LSX & LASX state */ + _restore_lsx_upper(&vcpu->arch.fpu); + _restore_lasx_upper(&vcpu->arch.fpu); + break; + default: + /* Neither FP or LSX already active, restore full LASX state */ + kvm_restore_lasx(&vcpu->arch.fpu); + break; + } + + trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX); + vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU; + preempt_enable(); + + return 0; +} +#endif + /* Save context and disable FPU */ void kvm_lose_fpu(struct kvm_vcpu *vcpu) { preempt_disable(); - if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) { + if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) { + kvm_save_lasx(&vcpu->arch.fpu); + vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX); + trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX); + + /* Disable LASX & LSX & FPU */ + clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN); + } else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) { kvm_save_lsx(&vcpu->arch.fpu); vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU); trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX); -- Gitee From 7f4456359c432b9d87cc846148eed2c1fd64689a Mon Sep 17 00:00:00 2001 From: WANG Xuerui Date: Fri, 23 Feb 2024 14:36:31 +0800 Subject: [PATCH 0179/2138] LoongArch: KVM: Fix input validation of _kvm_get_cpucfg() & kvm_check_cpucfg() ANBZ: #8436 commit 179af5751af59100305358ee0ee51eec9a7f3953 upstream. The range check for the CPUCFG ID is wrong (should have been a || instead of &&) and useless in effect, so fix the obvious mistake. Furthermore, the juggling of the temp return value is unnecessary, because it is semantically equivalent and more readable to just return at every switch case's end. This is done too to avoid potential bugs in the future related to the unwanted complexity. Also, the return value of _kvm_get_cpucfg is meant to be checked, but this was not done, so bad CPUCFG IDs wrongly fall back to the default case and 0 is incorrectly returned; check the return value to fix the UAPI behavior. While at it, also remove the redundant range check in kvm_check_cpucfg, because out-of-range CPUCFG IDs are already rejected by the -EINVAL as returned by _kvm_get_cpucfg(). Fixes: db1ecca22edf ("LoongArch: KVM: Add LSX (128bit SIMD) support") Fixes: 118e10cd893d ("LoongArch: KVM: Add LASX (256bit SIMD) support") Reviewed-by: Bibo Mao Signed-off-by: WANG Xuerui Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2818 --- arch/loongarch/kvm/vcpu.c | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 27701991886d..c8452aa5c11a 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -300,9 +300,7 @@ static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val) static int _kvm_get_cpucfg(int id, u64 *v) { - int ret = 0; - - if (id < 0 && id >= KVM_MAX_CPUCFG_REGS) + if (id < 0 || id >= KVM_MAX_CPUCFG_REGS) return -EINVAL; switch (id) { @@ -324,32 +322,35 @@ static int _kvm_get_cpucfg(int id, u64 *v) if (cpu_has_lasx) *v |= CPUCFG2_LASX; - break; + return 0; default: - ret = -EINVAL; - break; + /* + * No restrictions on other valid CPUCFG IDs' values, but + * CPUCFG data is limited to 32 bits as the LoongArch ISA + * manual says (Volume 1, Section 2.2.10.5 "CPUCFG"). + */ + *v = U32_MAX; + return 0; } - return ret; } static int kvm_check_cpucfg(int id, u64 val) { - u64 mask; - int ret = 0; - - if (id < 0 && id >= KVM_MAX_CPUCFG_REGS) - return -EINVAL; + int ret; + u64 mask = 0; - if (_kvm_get_cpucfg(id, &mask)) + ret = _kvm_get_cpucfg(id, &mask); + if (ret) return ret; + if (val & ~mask) + /* Unsupported features and/or the higher 32 bits should not be set */ + return -EINVAL; + switch (id) { case 2: /* CPUCFG2 features checking */ - if (val & ~mask) - /* The unsupported features should not be set */ - ret = -EINVAL; - else if (!(val & CPUCFG2_LLFTP)) + if (!(val & CPUCFG2_LLFTP)) /* The LLFTP must be set, as guest must has a constant timer */ ret = -EINVAL; else if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP))) -- Gitee From 5fbd6706b9e44be2b565c7c4c62a10019f1c3cdb Mon Sep 17 00:00:00 2001 From: WANG Xuerui Date: Fri, 23 Feb 2024 14:36:31 +0800 Subject: [PATCH 0180/2138] LoongArch: KVM: Rename _kvm_get_cpucfg() to _kvm_get_cpucfg_mask() ANBZ: #8436 commit ec83f39d2b078d6dd029bbde601835b5368fc886 upstream. The function is not actually a getter of guest CPUCFG, but rather validation of the input CPUCFG ID plus information about the supported bit flags of that CPUCFG leaf. So rename it to avoid confusion. Reviewed-by: Bibo Mao Signed-off-by: WANG Xuerui Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2818 --- arch/loongarch/kvm/vcpu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index c8452aa5c11a..98c4290af9c4 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -298,7 +298,7 @@ static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val) return ret; } -static int _kvm_get_cpucfg(int id, u64 *v) +static int _kvm_get_cpucfg_mask(int id, u64 *v) { if (id < 0 || id >= KVM_MAX_CPUCFG_REGS) return -EINVAL; @@ -339,7 +339,7 @@ static int kvm_check_cpucfg(int id, u64 val) int ret; u64 mask = 0; - ret = _kvm_get_cpucfg(id, &mask); + ret = _kvm_get_cpucfg_mask(id, &mask); if (ret) return ret; @@ -567,7 +567,7 @@ static int kvm_loongarch_get_cpucfg_attr(struct kvm_vcpu *vcpu, uint64_t val; uint64_t __user *uaddr = (uint64_t __user *)attr->addr; - ret = _kvm_get_cpucfg(attr->attr, &val); + ret = _kvm_get_cpucfg_mask(attr->attr, &val); if (ret) return ret; -- Gitee From 2e9bc09d764590e23d4f2b2257c5176eab0b20c8 Mon Sep 17 00:00:00 2001 From: WANG Xuerui Date: Fri, 23 Feb 2024 14:36:31 +0800 Subject: [PATCH 0181/2138] LoongArch: KVM: Streamline kvm_check_cpucfg() and improve comments ANBZ: #8436 commit f0f5c4894f89bac9074b45bccc447c3659a0fa6f upstream. All the checks currently done in kvm_check_cpucfg can be realized with early returns, so just do that to avoid extra cognitive burden related to the return value handling. While at it, clean up comments of _kvm_get_cpucfg_mask() and kvm_check_cpucfg(), by removing comments that are merely restatement of the code nearby, and paraphrasing the rest so they read more natural for English speakers (that likely are not familiar with the actual Chinese- influenced grammar). No functional changes intended. Reviewed-by: Bibo Mao Signed-off-by: WANG Xuerui Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2818 --- arch/loongarch/kvm/vcpu.c | 42 +++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 22 deletions(-) diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 98c4290af9c4..36106922b5d7 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -305,20 +305,16 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v) switch (id) { case 2: - /* Return CPUCFG2 features which have been supported by KVM */ + /* CPUCFG2 features unconditionally supported by KVM */ *v = CPUCFG2_FP | CPUCFG2_FPSP | CPUCFG2_FPDP | CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV | CPUCFG2_LAM; /* - * If LSX is supported by CPU, it is also supported by KVM, - * as we implement it. + * For the ISA extensions listed below, if one is supported + * by the host, then it is also supported by KVM. */ if (cpu_has_lsx) *v |= CPUCFG2_LSX; - /* - * if LASX is supported by CPU, it is also supported by KVM, - * as we implement it. - */ if (cpu_has_lasx) *v |= CPUCFG2_LASX; @@ -349,24 +345,26 @@ static int kvm_check_cpucfg(int id, u64 val) switch (id) { case 2: - /* CPUCFG2 features checking */ if (!(val & CPUCFG2_LLFTP)) - /* The LLFTP must be set, as guest must has a constant timer */ - ret = -EINVAL; - else if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP))) - /* Single and double float point must both be set when enable FP */ - ret = -EINVAL; - else if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP)) - /* FP should be set when enable LSX */ - ret = -EINVAL; - else if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX)) - /* LSX, FP should be set when enable LASX, and FP has been checked before. */ - ret = -EINVAL; - break; + /* Guests must have a constant timer */ + return -EINVAL; + if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP))) + /* Single and double float point must both be set when FP is enabled */ + return -EINVAL; + if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP)) + /* LSX architecturally implies FP but val does not satisfy that */ + return -EINVAL; + if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX)) + /* LASX architecturally implies LSX and FP but val does not satisfy that */ + return -EINVAL; + return 0; default: - break; + /* + * Values for the other CPUCFG IDs are not being further validated + * besides the mask check above. + */ + return 0; } - return ret; } static int kvm_get_one_reg(struct kvm_vcpu *vcpu, -- Gitee From 32df6c31416857493553433893016e6e154b286e Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 26 Jan 2024 16:22:07 +0800 Subject: [PATCH 0182/2138] LoongArch: KVM: Add returns to SIMD stubs ANBZ: #8436 commit 48ef9e87b407f89f230f804815af7ac2031ec17a upstream. The stubs for kvm_own/lsx()/kvm_own_lasx() when CONFIG_CPU_HAS_LSX or CONFIG_CPU_HAS_LASX is not defined should have a return value since they return an int, so add "return -EINVAL;" to the stubs. Fixes the build error: In file included from ../arch/loongarch/include/asm/kvm_csr.h:12, from ../arch/loongarch/kvm/interrupt.c:8: ../arch/loongarch/include/asm/kvm_vcpu.h: In function 'kvm_own_lasx': ../arch/loongarch/include/asm/kvm_vcpu.h:73:39: error: no return statement in function returning non-void [-Werror=return-type] 73 | static inline int kvm_own_lasx(struct kvm_vcpu *vcpu) { } Fixes: db1ecca22edf ("LoongArch: KVM: Add LSX (128bit SIMD) support") Fixes: 118e10cd893d ("LoongArch: KVM: Add LASX (256bit SIMD) support") Signed-off-by: Randy Dunlap Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2818 --- arch/loongarch/include/asm/kvm_vcpu.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h index e71ceb88f29e..0cb4fdb8a9b5 100644 --- a/arch/loongarch/include/asm/kvm_vcpu.h +++ b/arch/loongarch/include/asm/kvm_vcpu.h @@ -60,7 +60,7 @@ int kvm_own_lsx(struct kvm_vcpu *vcpu); void kvm_save_lsx(struct loongarch_fpu *fpu); void kvm_restore_lsx(struct loongarch_fpu *fpu); #else -static inline int kvm_own_lsx(struct kvm_vcpu *vcpu) { } +static inline int kvm_own_lsx(struct kvm_vcpu *vcpu) { return -EINVAL; } static inline void kvm_save_lsx(struct loongarch_fpu *fpu) { } static inline void kvm_restore_lsx(struct loongarch_fpu *fpu) { } #endif @@ -70,7 +70,7 @@ int kvm_own_lasx(struct kvm_vcpu *vcpu); void kvm_save_lasx(struct loongarch_fpu *fpu); void kvm_restore_lasx(struct loongarch_fpu *fpu); #else -static inline int kvm_own_lasx(struct kvm_vcpu *vcpu) { } +static inline int kvm_own_lasx(struct kvm_vcpu *vcpu) { return -EINVAL; } static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { } static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { } #endif -- Gitee From 4d5ac3553e45eff22bc80a25a0cc8a5a7e03623f Mon Sep 17 00:00:00 2001 From: zhangtianyang Date: Sat, 2 Mar 2024 11:48:22 +0800 Subject: [PATCH 0183/2138] anolis: LoongArch: Adapted SECTION_SIZE_BITS with page size ANBZ: #8435 Signed-off-by: zhangtianyang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2819 --- arch/loongarch/include/asm/sparsemem.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/loongarch/include/asm/sparsemem.h b/arch/loongarch/include/asm/sparsemem.h index 8d4af6aff8a8..1f331ee584ef 100644 --- a/arch/loongarch/include/asm/sparsemem.h +++ b/arch/loongarch/include/asm/sparsemem.h @@ -8,7 +8,7 @@ * SECTION_SIZE_BITS 2^N: how big each section will be * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space */ -#define SECTION_SIZE_BITS 29 /* 2^29 = Largest Huge Page Size */ +#define SECTION_SIZE_BITS 28 #define MAX_PHYSMEM_BITS 48 #ifdef CONFIG_SPARSEMEM_VMEMMAP -- Gitee From 3b02cfdc7e896f2c0de70aebef9271cc406b864c Mon Sep 17 00:00:00 2001 From: Jianmin Lv Date: Sat, 2 Mar 2024 11:48:23 +0800 Subject: [PATCH 0184/2138] anolis: LoongArch: Remove generic irq migration ANBZ: #8435 Signed-off-by: Jianmin Lv Signed-off-by: Juxin Gao Signed-off-by: Ming Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2819 --- arch/loongarch/Kconfig | 1 - arch/loongarch/include/asm/irq.h | 1 + arch/loongarch/kernel/irq.c | 36 ++++++++++++++++++++++++++++++++ arch/loongarch/kernel/smp.c | 3 +-- kernel/irq/Kconfig | 4 ++-- 5 files changed, 40 insertions(+), 5 deletions(-) diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 54a169dee80f..bad326ae58f2 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -428,7 +428,6 @@ config SMP config HOTPLUG_CPU bool "Support for hot-pluggable CPUs" depends on SMP - select GENERIC_IRQ_MIGRATION help Say Y here to allow turning CPUs off and on. CPUs can be controlled through /sys/devices/system/cpu. diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h index 722eb1aa726f..ed8e72db0dba 100644 --- a/arch/loongarch/include/asm/irq.h +++ b/arch/loongarch/include/asm/irq.h @@ -119,6 +119,7 @@ extern struct fwnode_handle *pch_lpc_handle; extern struct fwnode_handle *pch_pic_handle[MAX_IO_PICS]; extern irqreturn_t loongson_ipi_interrupt(int irq, void *dev); +extern void fixup_irqs(void); #include diff --git a/arch/loongarch/kernel/irq.c b/arch/loongarch/kernel/irq.c index 57b4720ddd87..ebcdb573104a 100644 --- a/arch/loongarch/kernel/irq.c +++ b/arch/loongarch/kernel/irq.c @@ -96,6 +96,42 @@ static int __init get_ipi_irq(void) return -EINVAL; } +#ifdef CONFIG_HOTPLUG_CPU +static void handle_irq_affinity(void) +{ + struct irq_desc *desc; + struct irq_chip *chip; + unsigned int irq; + unsigned long flags; + struct cpumask *affinity; + + for_each_active_irq(irq) { + desc = irq_to_desc(irq); + if (!desc) + continue; + + raw_spin_lock_irqsave(&desc->lock, flags); + + affinity = desc->irq_data.common->affinity; + if (!cpumask_intersects(affinity, cpu_online_mask)) + cpumask_copy(affinity, cpu_online_mask); + + chip = irq_data_get_irq_chip(&desc->irq_data); + if (chip && chip->irq_set_affinity) + chip->irq_set_affinity(&desc->irq_data, + desc->irq_data.common->affinity, true); + raw_spin_unlock_irqrestore(&desc->lock, flags); + } +} + +void fixup_irqs(void) +{ + handle_irq_affinity(); + irq_cpu_offline(); + clear_csr_ecfg(ECFG0_IM); +} +#endif + void __init init_IRQ(void) { int i, ret; diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index fc716e15415f..b6cdde7aad69 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -374,8 +374,7 @@ int loongson_cpu_disable(void) clear_cpu_sibling_map(cpu); calculate_cpu_foreign_map(); local_irq_save(flags); - irq_migrate_all_off_this_cpu(); - clear_csr_ecfg(ECFG0_IM); + fixup_irqs(); local_irq_restore(flags); local_flush_tlb_all(); diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 2531f3496ab6..8a65b0c34b5a 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig @@ -148,5 +148,5 @@ config GENERIC_IRQ_MULTI_HANDLER # Do not even think of enabling this on any new platform config DEPRECATED_IRQ_CPU_ONOFFLINE bool - depends on CAVIUM_OCTEON_SOC - default CAVIUM_OCTEON_SOC + depends on CAVIUM_OCTEON_SOC || LOONGARCH + default CAVIUM_OCTEON_SOC || LOONGARCH -- Gitee From 9c714301cccc9568f0199a1f51a72f65b13499bb Mon Sep 17 00:00:00 2001 From: Chong Qiao Date: Sat, 2 Mar 2024 11:48:24 +0800 Subject: [PATCH 0185/2138] anolis: irqchip/loongson-pch-pic: 7a1000 int_clear reg must use 64bit write. ANBZ: #8435 Signed-off-by: Chong Qiao Signed-off-by: Hongchen Zhang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2819 --- drivers/irqchip/irq-loongson-pch-pic.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c index 372215f2b9ed..3b150b6121fc 100644 --- a/drivers/irqchip/irq-loongson-pch-pic.c +++ b/drivers/irqchip/irq-loongson-pch-pic.c @@ -33,6 +33,10 @@ #define PIC_COUNT (PIC_COUNT_PER_REG * PIC_REG_COUNT) #define PIC_REG_IDX(irq_id) ((irq_id) / PIC_COUNT_PER_REG) #define PIC_REG_BIT(irq_id) ((irq_id) % PIC_COUNT_PER_REG) +#define PIC_COUNT_PER_REG64 64 +#define PIC_REG64_COUNT 1 +#define PIC_REG64_IDX(irq_id) ((irq_id) / PIC_COUNT_PER_REG64) +#define PIC_REG64_BIT(irq_id) ((irq_id) % PIC_COUNT_PER_REG64) static int nr_pics; @@ -93,8 +97,8 @@ static void pch_pic_unmask_irq(struct irq_data *d) { struct pch_pic *priv = irq_data_get_irq_chip_data(d); - writel(BIT(PIC_REG_BIT(d->hwirq)), - priv->base + PCH_PIC_CLR + PIC_REG_IDX(d->hwirq) * 4); + writeq(BIT(PIC_REG64_BIT(d->hwirq)), + priv->base + PCH_PIC_CLR + PIC_REG64_IDX(d->hwirq) * 8); irq_chip_unmask_parent(d); pch_pic_bitclr(priv, PCH_PIC_MASK, d->hwirq); @@ -141,8 +145,8 @@ static void pch_pic_ack_irq(struct irq_data *d) reg = readl(priv->base + PCH_PIC_EDGE + PIC_REG_IDX(d->hwirq) * 4); if (reg & BIT(PIC_REG_BIT(d->hwirq))) { - writel(BIT(PIC_REG_BIT(d->hwirq)), - priv->base + PCH_PIC_CLR + PIC_REG_IDX(d->hwirq) * 4); + writeq(BIT(PIC_REG64_BIT(d->hwirq)), + priv->base + PCH_PIC_CLR + PIC_REG64_IDX(d->hwirq) * 8); } irq_chip_ack_parent(d); } @@ -235,13 +239,15 @@ static void pch_pic_reset(struct pch_pic *priv) for (i = 0; i < PIC_REG_COUNT; i++) { /* Clear IRQ cause registers, mask all interrupts */ writel_relaxed(0xFFFFFFFF, priv->base + PCH_PIC_MASK + 4 * i); - writel_relaxed(0xFFFFFFFF, priv->base + PCH_PIC_CLR + 4 * i); /* Clear auto bounce, we don't need that */ writel_relaxed(0, priv->base + PCH_PIC_AUTO0 + 4 * i); writel_relaxed(0, priv->base + PCH_PIC_AUTO1 + 4 * i); /* Enable HTMSI transformer */ writel_relaxed(0xFFFFFFFF, priv->base + PCH_PIC_HTMSI_EN + 4 * i); } + + for (i = 0; i < PIC_REG64_COUNT; i++) + writeq_relaxed((u64)-1, priv->base + PCH_PIC_CLR + 8 * i); } static int pch_pic_suspend(void) -- Gitee From 24d5adf7ffd62d49d3cc1fe815e815b6db759a9f Mon Sep 17 00:00:00 2001 From: Ming Wang Date: Mon, 4 Mar 2024 13:57:13 +0800 Subject: [PATCH 0186/2138] anolis: LoongArch: kdump: Add memory reservation for old kernel ANBZ: #8435 After moving the old kernel memory reservation to elfcorehdr operation, avoid the elfcorehdr space from being destroyed. Signed-off-by: Youling Tang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2819 --- arch/loongarch/kernel/setup.c | 45 +++++++++++++++++++++++++++-------- 1 file changed, 35 insertions(+), 10 deletions(-) diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 83d8e7662b06..7c2622ce69ea 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -72,6 +72,8 @@ EXPORT_SYMBOL(cpu_data); struct loongson_board_info b_info; static const char dmi_empty_string[] = " "; +static phys_addr_t crashmem_start, crashmem_size; + /* * Setup information * @@ -209,16 +211,6 @@ static int __init early_parse_mem(char *p) return -EINVAL; } - /* - * If a user specifies memory size, we - * blow away any automatically generated - * size. - */ - if (usermem == 0) { - usermem = 1; - memblock_remove(memblock_start_of_DRAM(), - memblock_end_of_DRAM() - memblock_start_of_DRAM()); - } start = 0; size = memparse(p, &p); if (*p == '@') @@ -228,6 +220,23 @@ static int __init early_parse_mem(char *p) return -EINVAL; } + /* + * If a user specifies memory size, we + * blow away any automatically generated + * size. + */ + if (usermem == 0) { + usermem = 1; + if (!strstr(boot_command_line, "elfcorehdr")) { + memblock_remove(memblock_start_of_DRAM(), + memblock_end_of_DRAM() - memblock_start_of_DRAM()); + } else { + crashmem_start = start; + crashmem_size = size; + return 0; + } + } + if (!IS_ENABLED(CONFIG_NUMA)) memblock_add(start, size); else @@ -372,10 +381,26 @@ static void __init bootcmdline_init(char **cmdline_p) *cmdline_p = boot_command_line; } +/* + * After the kdump operation is performed to enter the capture kernel, the + * memory area used by the previous production kernel should be reserved to + * avoid destroy to the captured data. + */ +static void reserve_oldmem_region(void) +{ +#ifdef CONFIG_CRASH_DUMP + if (!is_kdump_kernel()) + return; + + memblock_cap_memory_range(crashmem_start, crashmem_size); +#endif +} + void __init platform_init(void) { arch_reserve_vmcore(); arch_parse_crashkernel(); + reserve_oldmem_region(); #ifdef CONFIG_ACPI_TABLE_UPGRADE acpi_table_upgrade(); -- Gitee From 3ea0bb8a7e887c6ddc9e340c623d5fd460f06499 Mon Sep 17 00:00:00 2001 From: Ming Wang Date: Mon, 4 Mar 2024 13:51:10 +0800 Subject: [PATCH 0187/2138] anolis: LoongArch: kexec: Add compatibility with old interfaces ANBZ: #8435 Old interface: a0 = argc, a1 = argv, a2 = bootparam New interface: a0 = efi flag, a1 = cmdline, a2 = systemtab The following interfaces are not supported: a0 = efi flag, a1 = fdt pointer, a2 = 0 Signed-off-by: Youling Tang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2819 --- arch/loongarch/kernel/machine_kexec.c | 45 +++++++++++++++++++++++++-- 1 file changed, 43 insertions(+), 2 deletions(-) diff --git a/arch/loongarch/kernel/machine_kexec.c b/arch/loongarch/kernel/machine_kexec.c index 2dcb9e003657..561706cb1e6d 100644 --- a/arch/loongarch/kernel/machine_kexec.c +++ b/arch/loongarch/kernel/machine_kexec.c @@ -59,6 +59,9 @@ static void kexec_image_info(const struct kimage *kimage) } } +#define MAX_ARGS 64 +#define KEXEC_CMDLINE_SIZE (COMMAND_LINE_SIZE * 2) + int machine_kexec_prepare(struct kimage *kimage) { int i; @@ -70,11 +73,49 @@ int machine_kexec_prepare(struct kimage *kimage) kimage->arch.efi_boot = fw_arg0; kimage->arch.systable_ptr = fw_arg2; + if (!fw_arg2) + pr_err("Small fdt mode is not supported!\n"); + /* Find the command line */ for (i = 0; i < kimage->nr_segments; i++) { if (!strncmp(bootloader, (char __user *)kimage->segment[i].buf, strlen(bootloader))) { - if (!copy_from_user(cmdline_ptr, kimage->segment[i].buf, COMMAND_LINE_SIZE)) - kimage->arch.cmdline_ptr = (unsigned long)cmdline_ptr; + if (fw_arg0 < 2) { + /* New firmware */ + if (!copy_from_user(cmdline_ptr, kimage->segment[i].buf, COMMAND_LINE_SIZE)) + kimage->arch.cmdline_ptr = (unsigned long)cmdline_ptr; + } else { + /* Old firmware */ + int argc = 0; + long offt; + char *ptr, *str; + unsigned long *argv; + + /* + * convert command line string to array + * of parameters (as bootloader does). + */ + argv = (unsigned long *)kmalloc(KEXEC_CMDLINE_SIZE, GFP_KERNEL); + argv[argc++] = (unsigned long)(KEXEC_CMDLINE_ADDR + KEXEC_CMDLINE_SIZE/2); + str = (char *)argv + KEXEC_CMDLINE_SIZE/2; + + if (copy_from_user(str, kimage->segment[i].buf, KEXEC_CMDLINE_SIZE/2)) + return -EINVAL; + + ptr = strchr(str, ' '); + + while (ptr && (argc < MAX_ARGS)) { + *ptr = '\0'; + if (ptr[1] != ' ') { + offt = (long)(ptr - str + 1); + argv[argc++] = (unsigned long)argv + KEXEC_CMDLINE_SIZE/2 + offt; + } + ptr = strchr(ptr + 1, ' '); + } + + kimage->arch.efi_boot = argc; + kimage->arch.cmdline_ptr = (unsigned long)argv; + break; + } break; } } -- Gitee From 44ed03c941f36f5be6b61840d0b5f474e00134b8 Mon Sep 17 00:00:00 2001 From: Ming Wang Date: Mon, 4 Mar 2024 13:51:45 +0800 Subject: [PATCH 0188/2138] anolis: LoongArch: Fix kdump failure on v40 interface specification ANBZ: #8435 The old memory should be reserved after efi_runtime_init() to avoid destroying the EFI space and causing failure when executing svam(). Fix the following problems when executing kdump: [ 0.000000] The BIOS Version: Loongson-UDK2018-V2.0.04082-beta7 [ 0.000000] CPU 0 Unable to handle kernel paging request at virtual address 00000000fdeb0e7c, era == 00000000fdeb0e7c, ra == 90000000dae6585c [ 0.000000] Oops[#1]: [ 0.000000] CPU: 0 PID: 0 Comm: swapper Not tainted 5.10.137+ #86 [ 0.000000] Hardware name: Loongson Loongson-3A5000-7A1000-1w-A2101/Loongson-LS3A5000-7A1000-1w-A2101, BIOS vUDK2018-LoongArch-V2.0.pre-beta8 06/15/2022 [ 0.000000] $ 0 : 0000000000000000 90000000dae6585c 90000000db200000 90000000db203840 [ 0.000000] $ 4 : 0000000000000078 0000000000000028 0000000000000001 00000000db203860 [ 0.000000] $ 8 : 0000000000000000 0000000000000040 90000000db203680 0000000000000000 [ 0.000000] $12 : 00000000fdeb0e7c ffffffffffffffc0 00000000fbffffff 0000000020000000 [ 0.000000] $16 : 000000000003e780 0000000020000000 90000000dad8c348 0000000000003fff [ 0.000000] $20 : 0000000000000018 90000000dad8bdd0 90000000db203850 0000000000000040 [ 0.000000] $24 : 000000000000000f 90000000db21a570 90000000daeb07a0 90000000db217000 [ 0.000000] $28 : 90000000db203858 0000000001ffffff 90000000db2171b0 0000000000000040 [ 0.000000] era : 00000000fdeb0e7c 0xfdeb0e7c [ 0.000000] ra : 90000000dae6585c set_virtual_map.isra.0+0x23c/0x394 [ 0.000000] CSR crmd: 90000000db21a570 [ 0.000000] CSR prmd: 00000000 [ 0.000000] CSR euen: 00000000 [ 0.000000] CSR ecfg: 90000000db203850 [ 0.000000] CSR estat: 90000000dae65800 [ 0.000000] ExcCode : 26 (SubCode 16b) [ 0.000000] PrId : 0014c012 (Loongson-64bit) [ 0.000000] Modules linked in: [ 0.000000] Process swapper (pid: 0, threadinfo=(____ptrval____), task=(____ptrval____)) [ 0.000000] Stack : 0000000000000001 00000000fdeb0e7c 0000000000036780 000000000003e780 [ 0.000000] 0000000000000006 0000000010000000 8000000010000000 0000000000010000 [ 0.000000] 8000000000000001 0000000000000005 00000000fde40000 90000000fde40000 [ 0.000000] 0000000000000100 800000000000000f 0000000000000006 00000000fdf40000 [ 0.000000] 90000000fdf40000 0000000000000300 800000000000000f 00000000000000b0 [ 0.000000] 0000000000000001 90000000da094cf0 0000000000000000 ffffffffffffffea [ 0.000000] 90000000db2039b8 ffff0a1000000609 0000000000000035 0000000000000030 [ 0.000000] 90000000dad7b258 0000000000000400 00000000000000b0 ffff0a1000000609 [ 0.000000] 90000000db2039a8 90000000db095730 000000007fffffff ffff0a1000000609 [ 0.000000] 90000000db203a90 90000000db203a30 90000000db2039d8 90000000db09570b [ 0.000000] ... [ 0.000000] Call Trace: [ 0.000000] [ 0.000000] Code: (Bad address in era) [ 0.000000] [ 0.000000] Signed-off-by: Youling Tang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2819 --- arch/loongarch/kernel/setup.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 7c2622ce69ea..23b248f24695 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -398,10 +398,6 @@ static void reserve_oldmem_region(void) void __init platform_init(void) { - arch_reserve_vmcore(); - arch_parse_crashkernel(); - reserve_oldmem_region(); - #ifdef CONFIG_ACPI_TABLE_UPGRADE acpi_table_upgrade(); #endif @@ -439,6 +435,10 @@ static void __init check_kernel_sections_mem(void) */ static void __init arch_mem_init(char **cmdline_p) { + arch_reserve_vmcore(); + arch_parse_crashkernel(); + reserve_oldmem_region(); + if (usermem) pr_info("User-defined physical RAM map overwrite\n"); -- Gitee From 5586a1654f1d14e22b905363a70f6f1d074f7597 Mon Sep 17 00:00:00 2001 From: Ming Wang Date: Mon, 4 Mar 2024 13:52:31 +0800 Subject: [PATCH 0189/2138] anolis: LoongArch: kdump: Add high memory reservation ANBZ: #8435 Reserve high memory for the capture kernel to avoid kdump operation failure on 3C5000 machines with old firmware. Signed-off-by: Youling Tang Signed-off-by: Hongchen Zhang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2819 --- arch/loongarch/kernel/setup.c | 49 ++++++++++++++++++++++++++++++++--- 1 file changed, 46 insertions(+), 3 deletions(-) diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 23b248f24695..0f07b41f9e61 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -386,13 +386,49 @@ static void __init bootcmdline_init(char **cmdline_p) * memory area used by the previous production kernel should be reserved to * avoid destroy to the captured data. */ -static void reserve_oldmem_region(void) +static void reserve_oldmem_region(int node, unsigned long s0, unsigned long e0) { #ifdef CONFIG_CRASH_DUMP + unsigned long s1, e1; + if (!is_kdump_kernel()) return; - memblock_cap_memory_range(crashmem_start, crashmem_size); + if ((e0 - s0) > (SZ_1G >> PAGE_SHIFT)) + e0 = e0 - (SZ_512M >> PAGE_SHIFT); + + /* crashmem_start is crashk_res reserved by primary production kernel */ + s1 = PFN_UP(crashmem_start); + e1 = PFN_DOWN(crashmem_start + crashmem_size); + + if (s1 == 0) + return; + + if (node == 0) { + memblock_reserve(PFN_PHYS(s0), (s1 - s0) << PAGE_SHIFT); + memblock_reserve(PFN_PHYS(e1), (e0 - e1) << PAGE_SHIFT); + } else { + memblock_reserve(PFN_PHYS(s0), (e0 - s0) << PAGE_SHIFT); + } +#endif +} + +/* Traditionally, LoongArch's contiguous low memory is 256M, so crashkernel=X@Y is + * unable to be large enough in some cases. Thus, if the total memory of a node + * is more than 1GB, we reserve the top 512MB for the capture kernel + */ +static void reserve_crashm_region(int node, unsigned long s0, unsigned long e0) +{ +#ifdef CONFIG_KEXEC + if (crashk_res.start == crashk_res.end) + return; + + if ((e0 - s0) <= (SZ_1G >> PAGE_SHIFT)) + return; + + s0 = e0 - (SZ_512M >> PAGE_SHIFT); + + memblock_reserve(PFN_PHYS(s0), (e0 - s0) << PAGE_SHIFT); #endif } @@ -435,9 +471,16 @@ static void __init check_kernel_sections_mem(void) */ static void __init arch_mem_init(char **cmdline_p) { + unsigned int node; + unsigned long start_pfn, end_pfn; + arch_reserve_vmcore(); arch_parse_crashkernel(); - reserve_oldmem_region(); + for_each_online_node(node) { + get_pfn_range_for_nid(node, &start_pfn, &end_pfn); + reserve_crashm_region(node, start_pfn, end_pfn); + reserve_oldmem_region(node, start_pfn, end_pfn); + } if (usermem) pr_info("User-defined physical RAM map overwrite\n"); -- Gitee From 574ec1ffce50a303a026fb837fe5faf3c811ba5c Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Sat, 2 Mar 2024 11:48:31 +0800 Subject: [PATCH 0190/2138] anolis: drm/radeon: Workaround radeon driver bug for Loongson ANBZ: #8435 Radeon driver can not handle the interrupt is faster than DMA data, so irq handler must update an old ih.rptr value in IH_RB_RPTR register to enable interrupt again when interrupt is faster than DMA data. Signed-off-by: Huacai Chen Signed-off-by: Zhijie Zhang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2819 --- drivers/gpu/drm/radeon/cik.c | 3 +++ drivers/gpu/drm/radeon/evergreen.c | 3 +++ drivers/gpu/drm/radeon/r600.c | 3 +++ drivers/gpu/drm/radeon/si.c | 3 +++ 4 files changed, 12 insertions(+) diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 341441b24183..c2d6b723aea8 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -8093,6 +8093,9 @@ int cik_irq_process(struct radeon_device *rdev) if (queue_thermal) schedule_work(&rdev->pm.dpm.thermal.work); rdev->ih.rptr = rptr; +#ifdef CONFIG_LOONGARCH + WREG32(IH_RB_RPTR, rptr); +#endif atomic_set(&rdev->ih.lock, 0); /* make sure wptr hasn't changed while processing */ diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index a7f9fc2b5239..ca2cc4c6a5ba 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -4922,6 +4922,9 @@ int evergreen_irq_process(struct radeon_device *rdev) if (queue_thermal && rdev->pm.dpm_enabled) schedule_work(&rdev->pm.dpm.thermal.work); rdev->ih.rptr = rptr; +#ifdef CONFIG_LOONGARCH + WREG32(IH_RB_RPTR, rptr); +#endif atomic_set(&rdev->ih.lock, 0); /* make sure wptr hasn't changed while processing */ diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 98d075c540e5..c7a9956a410d 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -4328,6 +4328,9 @@ int r600_irq_process(struct radeon_device *rdev) if (queue_thermal && rdev->pm.dpm_enabled) schedule_work(&rdev->pm.dpm.thermal.work); rdev->ih.rptr = rptr; +#ifdef CONFIG_LOONGARCH + WREG32(IH_RB_RPTR, rptr); +#endif atomic_set(&rdev->ih.lock, 0); /* make sure wptr hasn't changed while processing */ diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 312fe76944a9..5bf7e40bf354 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -6442,6 +6442,9 @@ int si_irq_process(struct radeon_device *rdev) if (queue_thermal && rdev->pm.dpm_enabled) schedule_work(&rdev->pm.dpm.thermal.work); rdev->ih.rptr = rptr; +#ifdef CONFIG_LOONGARCH + WREG32(IH_RB_RPTR, rptr); +#endif atomic_set(&rdev->ih.lock, 0); /* make sure wptr hasn't changed while processing */ -- Gitee From a995f2947a02a2196f8e5e6f6d2b60ca08e8f04b Mon Sep 17 00:00:00 2001 From: Hongchen Zhang Date: Sat, 2 Mar 2024 11:48:32 +0800 Subject: [PATCH 0191/2138] anolis: usb: xhci: add XHCI_NO_SOFT_RETRY quirk for EJ188 ANBZ: #8435 EJ188 has similar problems as the upstream commit commit a4a251f8c235 ("usb: xhci: do not perform Soft Retry for some xHCI hosts") so we add XHCI_NO_SOFT_RETRY quirk for it. Signed-off-by: Hongchen Zhang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2819 --- drivers/usb/host/xhci-pci.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 340d9597d1ab..5e6521323c3f 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -463,6 +463,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_BROKEN_STREAMS; } + if (pdev->vendor == PCI_VENDOR_ID_ETRON && + pdev->device == PCI_DEVICE_ID_EJ188) + xhci->quirks |= XHCI_NO_SOFT_RETRY; + if (pdev->vendor == PCI_VENDOR_ID_RENESAS && pdev->device == 0x0014) { xhci->quirks |= XHCI_ZERO_64B_REGS; -- Gitee From b900a339fa5732a478527cfbd94da70ad44fc112 Mon Sep 17 00:00:00 2001 From: Hongchen Zhang Date: Sat, 2 Mar 2024 11:48:33 +0800 Subject: [PATCH 0192/2138] anolis: net: stmmac: fix potential double free of dma descriptor resources ANBZ: #8435 reset the dma descriptor related resource's pointer to NULL,otherwise a potential double free problem may be triggered: stmmac_open alloc_dma_desc_resources init_dma_desc_rings stmmac_hw_setup (Failed) goto init_error; free_dma_desc_resources(priv); (DMA related resource pointer not reset to NULL) ... stmmac_open alloc_dma_desc_resources alloc_dma_tx_desc_resources (Failed) free_dma_tx_desc_resources (Double free of tx_q->tx_skbuff_dma tx_q->tx_skbuff) Signed-off-by: Hongchen Zhang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2819 --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index d6ee90fef2ec..f649d1bd25c5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1926,13 +1926,18 @@ static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, dma_free_coherent(priv->device, dma_conf->dma_rx_size * sizeof(struct dma_extended_desc), rx_q->dma_erx, rx_q->dma_rx_phy); + rx_q->dma_rx = NULL; + rx_q->dma_erx = NULL; if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq)) xdp_rxq_info_unreg(&rx_q->xdp_rxq); kfree(rx_q->buf_pool); + rx_q->buf_pool = NULL; + if (rx_q->page_pool) page_pool_destroy(rx_q->page_pool); + rx_q->page_pool = NULL; } static void free_dma_rx_desc_resources(struct stmmac_priv *priv, @@ -1978,8 +1983,14 @@ static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); + tx_q->dma_etx = NULL; + tx_q->dma_entx = NULL; + tx_q->dma_tx = NULL; + kfree(tx_q->tx_skbuff_dma); + tx_q->tx_skbuff_dma = NULL; kfree(tx_q->tx_skbuff); + tx_q->tx_skbuff = NULL; } static void free_dma_tx_desc_resources(struct stmmac_priv *priv, -- Gitee From de10b8686029abbe3b2947fde098ffe694336bad Mon Sep 17 00:00:00 2001 From: Juxin Gao Date: Tue, 5 Mar 2024 14:28:47 +0800 Subject: [PATCH 0193/2138] anolis: Update config for loongarch ANBZ: #8435 Signed-off-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2819 --- arch/loongarch/configs/loongson3_defconfig | 1569 ++++++++++++++++++-- 1 file changed, 1420 insertions(+), 149 deletions(-) diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index 8b974a34bcc1..4e2867185627 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -4,8 +4,9 @@ CONFIG_POSIX_MQUEUE=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_BPF_SYSCALL=y -CONFIG_BPF_JIT=y -CONFIG_PREEMPT=y +# CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set +CONFIG_PREEMPT_VOLUNTARY=y +CONFIG_IRQ_TIME_ACCOUNTING=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_TASKSTATS=y @@ -19,6 +20,7 @@ CONFIG_BLK_CGROUP=y CONFIG_CFS_BANDWIDTH=y CONFIG_RT_GROUP_SCHED=y CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_HUGETLB=y CONFIG_CPUSETS=y @@ -34,42 +36,25 @@ CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y CONFIG_KALLSYMS_ALL=y -CONFIG_PERF_EVENTS=y -CONFIG_LOONGARCH=y -CONFIG_64BIT=y -CONFIG_MACH_LOONGSON64=y -CONFIG_PAGE_SIZE_16KB=y -CONFIG_HZ_250=y -CONFIG_DMI=y -CONFIG_EFI=y -CONFIG_SMP=y -CONFIG_HOTPLUG_CPU=y -CONFIG_NR_CPUS=64 +CONFIG_PROFILING=y +CONFIG_KEXEC=y +CONFIG_CRASH_DUMP=y +CONFIG_NR_CPUS=256 CONFIG_NUMA=y -CONFIG_CPU_HAS_FPU=y CONFIG_CPU_HAS_LSX=y CONFIG_CPU_HAS_LASX=y -CONFIG_KEXEC=y -CONFIG_CRASH_DUMP=y CONFIG_RANDOMIZE_BASE=y -CONFIG_SUSPEND=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_LOONGSON3_ACPI_CPUFREQ=y CONFIG_HIBERNATION=y -CONFIG_ACPI=y CONFIG_ACPI_SPCR_TABLE=y CONFIG_ACPI_TAD=y CONFIG_ACPI_DOCK=y CONFIG_ACPI_IPMI=m -CONFIG_ACPI_HOTPLUG_CPU=y CONFIG_ACPI_PCI_SLOT=y -CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_STAT=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=y -CONFIG_LOONGSON3_ACPI_CPUFREQ=y CONFIG_ACPI_HOTPLUG_MEMORY=y -CONFIG_EFI_ZBOOT=y -CONFIG_EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER=y -CONFIG_EFI_CAPSULE_LOADER=m -CONFIG_EFI_TEST=m CONFIG_VIRTUALIZATION=y CONFIG_KVM=m CONFIG_JUMP_LABEL=y @@ -78,36 +63,53 @@ CONFIG_MODULE_FORCE_LOAD=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG_SHA256=y +CONFIG_BLK_DEV_ZONED=y CONFIG_BLK_DEV_THROTTLING=y +CONFIG_BLK_WBT=y CONFIG_PARTITION_ADVANCED=y CONFIG_BSD_DISKLABEL=y CONFIG_UNIXWARE_DISKLABEL=y CONFIG_IOSCHED_BFQ=y -CONFIG_BFQ_GROUP_IOSCHED=y CONFIG_BINFMT_MISC=m -CONFIG_ZPOOL=y CONFIG_ZSWAP=y CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y CONFIG_ZBUD=y CONFIG_ZSMALLOC=m +CONFIG_Z3FOLD=y +CONFIG_ZSMALLOC_STAT=y +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set # CONFIG_COMPAT_BRK is not set CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y CONFIG_MEMORY_HOTREMOVE=y CONFIG_KSM=y CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_CMA=y +CONFIG_IDLE_PAGE_TRACKING=y CONFIG_USERFAULTFD=y CONFIG_NET=y CONFIG_PACKET=y -CONFIG_UNIX=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX_DIAG=m CONFIG_TLS=m CONFIG_TLS_DEVICE=y +CONFIG_TLS_TOE=y CONFIG_XFRM_USER=y -CONFIG_NET_KEY=y +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m CONFIG_XDP_SOCKETS=y -CONFIG_INET=y +CONFIG_XDP_SOCKETS_DIAG=m CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y CONFIG_IP_MULTIPLE_TABLES=y CONFIG_IP_ROUTE_MULTIPATH=y CONFIG_IP_ROUTE_VERBOSE=y @@ -123,27 +125,83 @@ CONFIG_IP_MROUTE=y CONFIG_IP_MROUTE_MULTIPLE_TABLES=y CONFIG_IP_PIMSM_V1=y CONFIG_IP_PIMSM_V2=y +CONFIG_NET_IPVTI=m +CONFIG_NET_FOU_IP_TUNNELS=y +CONFIG_INET_AH=m CONFIG_INET_ESP=m -CONFIG_INET_UDP_DIAG=y +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_ESPINTCP=y +CONFIG_INET_IPCOMP=m +CONFIG_INET_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +CONFIG_INET_DIAG_DESTROY=y CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_CUBIC=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m CONFIG_TCP_CONG_BBR=m +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_ESPINTCP=y +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_SUBTREES=y CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_IPV6_RPL_LWTUNNEL=y +CONFIG_NETLABEL=y +CONFIG_MPTCP=y CONFIG_NETWORK_PHY_TIMESTAMPING=y CONFIG_NETFILTER=y CONFIG_BRIDGE_NETFILTER=m -CONFIG_NETFILTER_NETLINK_LOG=m CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m CONFIG_NF_CONNTRACK_NETBIOS_NS=m CONFIG_NF_CONNTRACK_SNMP=m CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m CONFIG_NFT_CONNLIMIT=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m @@ -156,22 +214,35 @@ CONFIG_NFT_QUOTA=m CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m CONFIG_NFT_HASH=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m CONFIG_NFT_SOCKET=m CONFIG_NFT_OSF=m CONFIG_NFT_TPROXY=m +CONFIG_NFT_SYNPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XTABLES=y CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_AUDIT=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_CONNMARK=m -CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m CONFIG_NETFILTER_XT_TARGET_DSCP=m CONFIG_NETFILTER_XT_TARGET_HMARK=m CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m CONFIG_NETFILTER_XT_TARGET_LED=m CONFIG_NETFILTER_XT_TARGET_LOG=m CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_SECMARK=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m @@ -187,7 +258,6 @@ CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m CONFIG_NETFILTER_XT_MATCH_CONNMARK=m CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m CONFIG_NETFILTER_XT_MATCH_CPU=m -CONFIG_NETFILTER_XT_MATCH_DCCP=m CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m CONFIG_NETFILTER_XT_MATCH_DSCP=m CONFIG_NETFILTER_XT_MATCH_ESP=m @@ -196,6 +266,7 @@ CONFIG_NETFILTER_XT_MATCH_HELPER=m CONFIG_NETFILTER_XT_MATCH_IPCOMP=m CONFIG_NETFILTER_XT_MATCH_IPRANGE=m CONFIG_NETFILTER_XT_MATCH_IPVS=m +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set CONFIG_NETFILTER_XT_MATCH_LENGTH=m CONFIG_NETFILTER_XT_MATCH_LIMIT=m CONFIG_NETFILTER_XT_MATCH_MAC=m @@ -205,10 +276,12 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m CONFIG_NETFILTER_XT_MATCH_OSF=m CONFIG_NETFILTER_XT_MATCH_OWNER=m CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m @@ -217,8 +290,25 @@ CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_TIME=m CONFIG_NETFILTER_XT_MATCH_U32=m CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m CONFIG_IP_VS=m CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_DEBUG=y CONFIG_IP_VS_PROTO_TCP=y CONFIG_IP_VS_PROTO_UDP=y CONFIG_IP_VS_PROTO_ESP=y @@ -226,11 +316,24 @@ CONFIG_IP_VS_PROTO_AH=y CONFIG_IP_VS_PROTO_SCTP=y CONFIG_IP_VS_RR=m CONFIG_IP_VS_WRR=m -CONFIG_IP_VS_NFCT=y -CONFIG_NF_TABLES_IPV4=y +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_PE_SIP=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m CONFIG_NF_TABLES_ARP=y +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -251,18 +354,21 @@ CONFIG_IP_NF_SECURITY=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_NF_TABLES_IPV6=y -CONFIG_IP6_NF_IPTABLES=y +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m CONFIG_IP6_NF_MATCH_FRAG=m CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m CONFIG_IP6_NF_MATCH_IPV6HEADER=m CONFIG_IP6_NF_MATCH_MH=m CONFIG_IP6_NF_MATCH_RPFILTER=m CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_MATCH_SRH=m -CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m @@ -272,76 +378,212 @@ CONFIG_IP6_NF_NAT=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_META=m +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_CONNTRACK_BRIDGE=m CONFIG_BRIDGE_NF_EBTABLES=m CONFIG_BRIDGE_EBT_BROUTE=m CONFIG_BRIDGE_EBT_T_FILTER=m CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m CONFIG_BRIDGE_EBT_ARP=m CONFIG_BRIDGE_EBT_IP=m CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m CONFIG_BPFILTER=y -CONFIG_IP_SCTP=m -CONFIG_RDS=y +CONFIG_IP_DCCP=m +CONFIG_IP_DCCP_CCID2_DEBUG=y +CONFIG_IP_DCCP_CCID3_DEBUG=y +CONFIG_IP_DCCP_DEBUG=y +CONFIG_SCTP_DBG_OBJCNT=y +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_RDS=m +CONFIG_RDS_RDMA=m +CONFIG_RDS_TCP=m +CONFIG_RDS_DEBUG=y +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +CONFIG_ATM_CLIP_NO_ICMP=y +CONFIG_ATM_LANE=m +CONFIG_ATM_MPOA=m +CONFIG_ATM_BR2684=m +CONFIG_ATM_BR2684_IPFILTER=y CONFIG_L2TP=m CONFIG_L2TP_V3=y CONFIG_L2TP_IP=m CONFIG_L2TP_ETH=m CONFIG_BRIDGE=m +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_BRIDGE_MRP=y +CONFIG_NET_DSA=m +CONFIG_NET_DSA_TAG_AR9331=m +CONFIG_NET_DSA_TAG_BRCM=m +CONFIG_NET_DSA_TAG_BRCM_PREPEND=m +CONFIG_NET_DSA_TAG_GSWIP=m +CONFIG_NET_DSA_TAG_DSA=m +CONFIG_NET_DSA_TAG_EDSA=m +CONFIG_NET_DSA_TAG_MTK=m +CONFIG_NET_DSA_TAG_KSZ=m +CONFIG_NET_DSA_TAG_OCELOT=m +CONFIG_NET_DSA_TAG_QCA=m +CONFIG_NET_DSA_TAG_RTL4_A=m +CONFIG_NET_DSA_TAG_LAN9303=m +CONFIG_NET_DSA_TAG_SJA1105=m +CONFIG_NET_DSA_TAG_TRAILER=m CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q_GVRP=y CONFIG_VLAN_8021Q_MVRP=y CONFIG_LLC2=m +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_X25=m +CONFIG_LAPB=m +CONFIG_PHONET=m +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_NHC is not set +CONFIG_IEEE802154=m +CONFIG_IEEE802154_NL802154_EXPERIMENTAL=y +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m CONFIG_NET_SCHED=y CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_TAPRIO=m +CONFIG_NET_SCH_GRED=m CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=y +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_FQ_PIE=m CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_ETS=m +CONFIG_NET_SCH_DEFAULT=y +CONFIG_DEFAULT_FQ_CODEL=y CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m CONFIG_NET_CLS_FW=m CONFIG_NET_CLS_U32=m -CONFIG_NET_CLS_CGROUP=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +CONFIG_NET_EMATCH_IPT=m CONFIG_NET_CLS_ACT=y CONFIG_NET_ACT_POLICE=m CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m CONFIG_NET_ACT_IPT=m CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_MPLS=m +CONFIG_NET_ACT_VLAN=m CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_CONNMARK=m +CONFIG_NET_ACT_CTINFO=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m +CONFIG_NET_ACT_GATE=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_NET_TC_SKB_EXT=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=y +CONFIG_BATMAN_ADV=m +CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_DEBUG=y CONFIG_OPENVSWITCH=m CONFIG_VSOCKETS=m CONFIG_VIRTIO_VSOCKETS=m -CONFIG_NETLINK_DIAG=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=y +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=y +CONFIG_HSR=m +CONFIG_QRTR=m +CONFIG_QRTR_TUN=m +CONFIG_NET_NCSI=y +CONFIG_NCSI_OEM_CMD_GET_MAC=y CONFIG_CGROUP_NET_PRIO=y CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_PKTGEN=m +CONFIG_CAN=m CONFIG_BT=m CONFIG_BT_RFCOMM=m CONFIG_BT_RFCOMM_TTY=y CONFIG_BT_BNEP=m CONFIG_BT_BNEP_MC_FILTER=y CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_CMTP=m CONFIG_BT_HIDP=m CONFIG_BT_HS=y CONFIG_BT_HCIBTUSB=m CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y -CONFIG_BT_HCIBTUSB_MTK=y +# CONFIG_BT_HCIBTUSB_BCM is not set +CONFIG_BT_HCIBTSDIO=m CONFIG_BT_HCIUART=m CONFIG_BT_HCIUART_BCSP=y CONFIG_BT_HCIUART_ATH3K=y -CONFIG_BT_HCIUART_INTEL=y -CONFIG_BT_HCIUART_AG6XX=y CONFIG_BT_HCIBCM203X=m CONFIG_BT_HCIBPA10X=m CONFIG_BT_HCIBFUSB=m -CONFIG_BT_HCIDTL1=m -CONFIG_BT_HCIBT3C=m -CONFIG_BT_HCIBLUECARD=m CONFIG_BT_HCIVHCI=m CONFIG_BT_MRVL=m +CONFIG_BT_MRVL_SDIO=m CONFIG_BT_ATH3K=m -CONFIG_BT_VIRTIO=m CONFIG_CFG80211=m CONFIG_CFG80211_WEXT=y CONFIG_MAC80211=m @@ -349,15 +591,19 @@ CONFIG_RFKILL=m CONFIG_RFKILL_INPUT=y CONFIG_NET_9P=y CONFIG_NET_9P_VIRTIO=y -CONFIG_CEPH_LIB=m -CONFIG_PCIEPORTBUS=y -CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y CONFIG_PCIEAER=y -# CONFIG_PCIEASPM is not set +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIE_DPC=y +CONFIG_PCI_STUB=y +CONFIG_PCI_PF_STUB=m CONFIG_PCI_IOV=y -CONFIG_HOTPLUG_PCI=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_HOTPLUG_PCI_ACPI=y CONFIG_HOTPLUG_PCI_SHPC=y CONFIG_PCCARD=m +# CONFIG_PCMCIA is not set CONFIG_YENTA=m CONFIG_RAPIDIO=y CONFIG_RAPIDIO_TSI721=y @@ -369,7 +615,12 @@ CONFIG_UEVENT_HELPER=y CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y CONFIG_FW_LOADER_COMPRESS=y -CONFIG_FW_LOADER_COMPRESS_ZSTD=y +CONFIG_CONNECTOR=y +CONFIG_DMI_SYSFS=y +CONFIG_ISCSI_IBFT=m +CONFIG_EFI_ZBOOT=y +CONFIG_EFI_CAPSULE_LOADER=m +CONFIG_EFI_TEST=m CONFIG_MTD=m CONFIG_MTD_BLOCK=m CONFIG_MTD_CFI=m @@ -379,22 +630,31 @@ CONFIG_MTD_CFI_AMDSTD=m CONFIG_MTD_CFI_STAA=m CONFIG_MTD_RAM=m CONFIG_MTD_ROM=m +CONFIG_MTD_BLOCK2MTD=m +CONFIG_MTD_SPI_NOR=m CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_GLUEBI=m CONFIG_MTD_UBI_BLOCK=y -CONFIG_PARPORT=y -CONFIG_PARPORT_PC=y -CONFIG_PARPORT_SERIAL=y +CONFIG_PARPORT=m +CONFIG_PARPORT_PC=m +CONFIG_PARPORT_SERIAL=m CONFIG_PARPORT_PC_FIFO=y +CONFIG_PARPORT_1284=y +# CONFIG_PNP_DEBUG_MESSAGES is not set +CONFIG_BLK_DEV_NULL_BLK=m CONFIG_ZRAM=m CONFIG_ZRAM_DEF_COMP_ZSTD=y -CONFIG_BLK_DEV_LOOP=y +CONFIG_ZRAM_WRITEBACK=y +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m -CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM=m CONFIG_BLK_DEV_RAM_SIZE=8192 -CONFIG_VIRTIO_BLK=y +CONFIG_CDROM_PKTCDVD=m +CONFIG_VIRTIO_BLK=m CONFIG_BLK_DEV_RBD=m -CONFIG_BLK_DEV_NVME=y +CONFIG_BLK_DEV_NVME=m CONFIG_NVME_MULTIPATH=y CONFIG_NVME_RDMA=m CONFIG_NVME_FC=m @@ -404,18 +664,40 @@ CONFIG_NVME_TARGET_PASSTHRU=y CONFIG_NVME_TARGET_LOOP=m CONFIG_NVME_TARGET_RDMA=m CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_FCLOOP=m CONFIG_NVME_TARGET_TCP=m +CONFIG_ENCLOSURE_SERVICES=m +CONFIG_APDS9802ALS=m +CONFIG_ISL29003=m +CONFIG_ISL29020=m +CONFIG_SENSORS_TSL2550=m +CONFIG_SENSORS_BH1770=m +CONFIG_SENSORS_APDS990X=m CONFIG_EEPROM_AT24=m -CONFIG_BLK_DEV_SD=y -CONFIG_BLK_DEV_SR=y -CONFIG_CHR_DEV_SG=y +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_SENSORS_LIS3_I2C=m +CONFIG_MISC_RTSX_PCI=m +CONFIG_MISC_RTSX_USB=m +CONFIG_UACCE=m +CONFIG_PVPANIC=y +CONFIG_BLK_DEV_SD=m +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=m CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y -CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_FC_ATTRS=m -CONFIG_SCSI_SAS_ATA=y CONFIG_ISCSI_TCP=m +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_SCSI_BNX2X_FCOE=m +CONFIG_BE2ISCSI=m +CONFIG_SCSI_HPSA=m +CONFIG_SCSI_AACRAID=m CONFIG_SCSI_MVSAS=y # CONFIG_SCSI_MVSAS_DEBUG is not set CONFIG_SCSI_MVSAS_TASKLET=y @@ -424,8 +706,10 @@ CONFIG_MEGARAID_NEWGEN=y CONFIG_MEGARAID_MM=y CONFIG_MEGARAID_MAILBOX=y CONFIG_MEGARAID_LEGACY=y -CONFIG_MEGARAID_SAS=y -CONFIG_SCSI_MPT2SAS=y +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=y +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_SMARTPQI=m CONFIG_LIBFC=m CONFIG_LIBFCOE=m CONFIG_FCOE=m @@ -433,35 +717,46 @@ CONFIG_SCSI_QLOGIC_1280=m CONFIG_SCSI_QLA_FC=m CONFIG_TCM_QLA2XXX=m CONFIG_SCSI_QLA_ISCSI=m -CONFIG_SCSI_LPFC=m CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_CHELSIO_FCOE=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y CONFIG_ATA=y CONFIG_SATA_AHCI=y CONFIG_SATA_AHCI_PLATFORM=y -CONFIG_AHCI_DWC=y +CONFIG_ATA_PIIX=m CONFIG_PATA_ATIIXP=y -CONFIG_PATA_PCMCIA=m +CONFIG_ATA_GENERIC=m CONFIG_MD=y -CONFIG_BLK_DEV_MD=m +CONFIG_BLK_DEV_MD=y CONFIG_MD_LINEAR=m -CONFIG_MD_RAID0=m -CONFIG_MD_RAID1=m -CONFIG_MD_RAID10=m -CONFIG_MD_RAID456=m CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m CONFIG_BCACHE=m -CONFIG_BLK_DEV_DM=y +CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m CONFIG_DM_CACHE=m CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m CONFIG_DM_RAID=m CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m CONFIG_DM_MULTIPATH_QL=m CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m CONFIG_TARGET_CORE=m CONFIG_TCM_IBLOCK=m CONFIG_TCM_FILEIO=m @@ -469,18 +764,45 @@ CONFIG_TCM_PSCSI=m CONFIG_TCM_USER2=m CONFIG_LOOPBACK_TARGET=m CONFIG_ISCSI_TARGET=m -CONFIG_NETDEVICES=y +CONFIG_ISCSI_TARGET_CXGB4=m +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +CONFIG_FUSION_SAS=m +CONFIG_FUSION_CTL=m +CONFIG_FUSION_LOGGING=y +CONFIG_FIREWIRE=m +CONFIG_FIREWIRE_OHCI=m +CONFIG_FIREWIRE_SBP2=m +CONFIG_FIREWIRE_NET=m CONFIG_BONDING=m -CONFIG_DUMMY=y +CONFIG_DUMMY=m CONFIG_WIREGUARD=m +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m -CONFIG_VXLAN=y +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_NTB_NETDEV=m CONFIG_RIONET=m CONFIG_TUN=m CONFIG_VETH=m CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ATM_DRIVERS is not set # CONFIG_NET_VENDOR_3COM is not set # CONFIG_NET_VENDOR_ADAPTEC is not set # CONFIG_NET_VENDOR_AGERE is not set @@ -492,36 +814,63 @@ CONFIG_VIRTIO_NET=m # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_VENDOR_ATHEROS is not set CONFIG_BNX2=y +CONFIG_TIGON3=m +CONFIG_BNX2X=m +CONFIG_BNXT=m +CONFIG_BNXT_DCB=y # CONFIG_NET_VENDOR_CAVIUM is not set CONFIG_CHELSIO_T1=m CONFIG_CHELSIO_T1_1G=y CONFIG_CHELSIO_T3=m -CONFIG_CHELSIO_T4=m +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_IPSEC_INLINE=m # CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_CORTINA is not set +CONFIG_DNET=m # CONFIG_NET_VENDOR_DEC is not set # CONFIG_NET_VENDOR_DLINK is not set # CONFIG_NET_VENDOR_EMULEX is not set # CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_I825XX is not set -CONFIG_E1000=y -CONFIG_E1000E=y -CONFIG_IGB=y -CONFIG_IXGBE=y +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBE_DCB=y +CONFIG_IXGBEVF=m +CONFIG_I40E=m +CONFIG_I40E_DCB=y +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_FM10K=m # CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MELLANOX is not set +CONFIG_MLX4_EN=m +# CONFIG_MLX4_CORE_GEN2 is not set +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_CORE_IPOIB=y +CONFIG_MLXSW_CORE=m # CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set # CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_NET_VENDOR_NI is not set # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_NETRONOME is not set # CONFIG_NET_VENDOR_NVIDIA is not set # CONFIG_NET_VENDOR_OKI is not set +CONFIG_ETHOC=m # CONFIG_NET_VENDOR_QLOGIC is not set # CONFIG_NET_VENDOR_BROCADE is not set # CONFIG_NET_VENDOR_QUALCOMM is not set # CONFIG_NET_VENDOR_RDC is not set CONFIG_8139CP=m CONFIG_8139TOO=m -CONFIG_R8169=y +# CONFIG_8139TOO_PIO is not set +CONFIG_8139TOO_8129=y +CONFIG_R8169=m # CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SAMSUNG is not set @@ -530,46 +879,150 @@ CONFIG_R8169=y # CONFIG_NET_VENDOR_SIS is not set # CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set CONFIG_STMMAC_ETH=y # CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_TEHUTI is not set # CONFIG_NET_VENDOR_TI is not set # CONFIG_NET_VENDOR_VIA is not set -CONFIG_NGBE=y -CONFIG_TXGBE=y +CONFIG_NGBE=m +CONFIG_TXGBE=m # CONFIG_NET_VENDOR_WIZNET is not set # CONFIG_NET_VENDOR_XILINX is not set +CONFIG_LED_TRIGGER_PHY=y +CONFIG_SFP=y +CONFIG_AMD_PHY=m +CONFIG_AQUANTIA_PHY=m +CONFIG_BROADCOM_PHY=m +CONFIG_BCM7XXX_PHY=m +CONFIG_BCM87XX_PHY=m +CONFIG_CICADA_PHY=m +CONFIG_CORTINA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_LXT_PHY=m +CONFIG_INTEL_XWAY_PHY=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_MARVELL_10G_PHY=y +CONFIG_MICREL_PHY=m +CONFIG_MICROCHIP_T1_PHY=m +CONFIG_MICROSEMI_PHY=m +CONFIG_NATIONAL_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_RENESAS_PHY=m +CONFIG_ROCKCHIP_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_DP83822_PHY=m +CONFIG_DP83TC811_PHY=m +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +CONFIG_VITESSE_PHY=m +CONFIG_XILINX_GMII2RGMII=m +CONFIG_MICREL_KS8995MA=m +CONFIG_CAN_VCAN=m +CONFIG_CAN_SLCAN=m +CONFIG_CAN_C_CAN=m +CONFIG_CAN_C_CAN_PLATFORM=m +CONFIG_CAN_C_CAN_PCI=m +CONFIG_CAN_CC770=m +CONFIG_CAN_CC770_PLATFORM=m +CONFIG_CAN_SJA1000=m +CONFIG_CAN_EMS_PCI=m +CONFIG_CAN_KVASER_PCI=m +CONFIG_CAN_PEAK_PCI=m +CONFIG_CAN_PLX_PCI=m +CONFIG_CAN_SJA1000_PLATFORM=m +CONFIG_CAN_SOFTING=m +CONFIG_CAN_8DEV_USB=m +CONFIG_CAN_EMS_USB=m +CONFIG_CAN_KVASER_USB=m +CONFIG_CAN_PEAK_USB=m +CONFIG_MDIO_BITBANG=m +CONFIG_MDIO_MSCC_MIIM=m +CONFIG_MDIO_THUNDER=m CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m CONFIG_PPP_FILTER=y CONFIG_PPP_MPPE=m CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m CONFIG_PPPOE=m CONFIG_PPTP=m CONFIG_PPPOL2TP=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m CONFIG_USB_RTL8150=m CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m # CONFIG_USB_NET_AX8817X is not set # CONFIG_USB_NET_AX88179_178A is not set CONFIG_USB_NET_CDC_EEM=m CONFIG_USB_NET_HUAWEI_CDC_NCM=m CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m # CONFIG_USB_NET_NET1080 is not set +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y # CONFIG_USB_BELKIN is not set # CONFIG_USB_ARMLINUX is not set +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y # CONFIG_USB_NET_ZAURUS is not set +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +# CONFIG_WLAN_VENDOR_ADMTEK is not set CONFIG_ATH9K=m +CONFIG_ATH9K_AHB=y +CONFIG_ATH9K_WOW=y CONFIG_ATH9K_HTC=m +CONFIG_ATH10K=m +CONFIG_ATH10K_PCI=m +# CONFIG_WLAN_VENDOR_ATMEL is not set +CONFIG_BRCMSMAC=m +CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_USB=y +CONFIG_BRCMFMAC_PCIE=y +# CONFIG_WLAN_VENDOR_CISCO is not set CONFIG_IWLWIFI=m CONFIG_IWLDVM=m CONFIG_IWLMVM=m -CONFIG_HOSTAP=m +# CONFIG_WLAN_VENDOR_INTERSIL is not set +CONFIG_MWIFIEX=m +CONFIG_MWIFIEX_SDIO=m +CONFIG_MWIFIEX_PCIE=m +CONFIG_MWIFIEX_USB=m CONFIG_MT7601U=m +CONFIG_MT76x0U=m +CONFIG_MT76x2U=m CONFIG_RT2X00=m +CONFIG_RT2800PCI=m CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT3573=y +CONFIG_RT2800USB_RT53XX=y +CONFIG_RT2800USB_RT55XX=y +CONFIG_RT2800USB_UNKNOWN=y CONFIG_RTL8192CE=m CONFIG_RTL8192SE=m CONFIG_RTL8192DE=m @@ -581,29 +1034,80 @@ CONFIG_RTL8821AE=m CONFIG_RTL8192CU=m # CONFIG_RTLWIFI_DEBUG is not set CONFIG_RTL8XXXU=m -CONFIG_RTW88=m -CONFIG_RTW88_8822BE=m -CONFIG_RTW88_8822CE=m -CONFIG_RTW88_8723DE=m -CONFIG_RTW88_8821CE=m -CONFIG_RTW89=m -CONFIG_RTW89_8852AE=m -CONFIG_RTW89_8852CE=m +# CONFIG_WLAN_VENDOR_RSI is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set CONFIG_ZD1211RW=m CONFIG_USB_NET_RNDIS_WLAN=m +CONFIG_MAC80211_HWSIM=m +CONFIG_WAN=y +CONFIG_HDLC=m +CONFIG_HDLC_RAW=m +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m +CONFIG_IEEE802154_FAKELB=m +CONFIG_VMXNET3=m +CONFIG_FUJITSU_ES=m +CONFIG_USB4_NET=m +CONFIG_NETDEVSIM=m +CONFIG_ISDN=y +CONFIG_MISDN=m +CONFIG_MISDN_DSP=m +CONFIG_MISDN_L1OIP=m +CONFIG_MISDN_HFCPCI=m +CONFIG_MISDN_HFCMULTI=m +CONFIG_MISDN_HFCUSB=m +CONFIG_MISDN_AVMFRITZ=m +CONFIG_MISDN_SPEEDFAX=m +CONFIG_MISDN_INFINEON=m +CONFIG_MISDN_W6692=m +CONFIG_MISDN_NETJET=m CONFIG_INPUT_MOUSEDEV=y CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_JOYDEV=m CONFIG_INPUT_EVDEV=y CONFIG_KEYBOARD_XTKBD=m CONFIG_MOUSE_PS2_ELANTECH=y CONFIG_MOUSE_PS2_SENTELIC=y CONFIG_MOUSE_SERIAL=m +CONFIG_MOUSE_APPLETOUCH=m +CONFIG_MOUSE_BCM5974=m +CONFIG_MOUSE_CYAPA=m +CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_SMBUS=y +CONFIG_MOUSE_VSXXXAA=m +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=m +CONFIG_TABLET_USB_AIPTEK=m +CONFIG_TABLET_USB_KBTAB=m +CONFIG_TABLET_SERIAL_WACOM4=m +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_ELO=m +CONFIG_TOUCHSCREEN_WACOM_W8001=m +CONFIG_TOUCHSCREEN_WACOM_I2C=m CONFIG_INPUT_MISC=y +CONFIG_INPUT_ATI_REMOTE2=m +CONFIG_INPUT_KEYSPAN_REMOTE=m +CONFIG_INPUT_POWERMATE=m +CONFIG_INPUT_YEALINK=m +CONFIG_INPUT_CM109=m CONFIG_INPUT_UINPUT=m +CONFIG_INPUT_GPIO_ROTARY_ENCODER=m +CONFIG_RMI4_I2C=m +CONFIG_RMI4_SPI=m +CONFIG_RMI4_SMB=m +CONFIG_RMI4_F34=y +CONFIG_RMI4_F55=y CONFIG_SERIO_SERPORT=m CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +CONFIG_SERIO_ARC_PS2=m CONFIG_LEGACY_PTY_COUNT=16 CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=16 CONFIG_SERIAL_8250_RUNTIME_UARTS=16 @@ -611,37 +1115,190 @@ CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_MANY_PORTS=y CONFIG_SERIAL_8250_SHARE_IRQ=y CONFIG_SERIAL_8250_RSA=y -CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_JSM=m +CONFIG_SERIAL_ARC=m CONFIG_SERIAL_NONSTANDARD=y +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +CONFIG_NOZOMI=m CONFIG_PRINTER=m +CONFIG_PPDEV=m CONFIG_VIRTIO_CONSOLE=y CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_PANIC_EVENT=y +CONFIG_IPMI_PANIC_STRING=y CONFIG_IPMI_DEVICE_INTERFACE=m -CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_TCG_TIS_SPI=m +CONFIG_TCG_TIS_I2C_ATMEL=m +CONFIG_TCG_TIS_I2C_INFINEON=m +CONFIG_TCG_TIS_I2C_NUVOTON=m +CONFIG_TCG_ATMEL=m +CONFIG_TCG_INFINEON=m +CONFIG_TCG_TIS_ST33ZP24_I2C=m +CONFIG_TCG_TIS_ST33ZP24_SPI=m CONFIG_I2C_CHARDEV=y +CONFIG_I2C_AMD756=m +CONFIG_I2C_AMD8111=m +CONFIG_I2C_ISCH=m CONFIG_I2C_PIIX4=y +CONFIG_I2C_NFORCE2=m +CONFIG_I2C_SIS96X=m +CONFIG_I2C_VIA=m +CONFIG_I2C_VIAPRO=m +CONFIG_I2C_SCMI=m +CONFIG_I2C_DESIGNWARE_PLATFORM=y CONFIG_I2C_GPIO=y -CONFIG_I2C_LS2X=y +CONFIG_I2C_LS2X=m +CONFIG_I2C_PCA_PLATFORM=m +CONFIG_I2C_SIMTEC=m +CONFIG_I2C_DIOLAN_U2C=m +CONFIG_I2C_PARPORT=m +CONFIG_I2C_TINY_USB=m +CONFIG_I2C_VIPERBOARD=m +CONFIG_I2C_STUB=m CONFIG_SPI=y -CONFIG_SPI_LOONGSON_PCI=m +CONFIG_SPI_LOONGSON_PCI=y CONFIG_SPI_LOONGSON_PLATFORM=m +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_PARPORT=m +CONFIG_PPS_CLIENT_GPIO=m +CONFIG_DP83640_PHY=m CONFIG_PINCTRL=y CONFIG_PINCTRL_LOONGSON2=y CONFIG_GPIO_SYSFS=y -CONFIG_GPIO_LOONGSON=y +CONFIG_GPIO_AMDPT=m CONFIG_GPIO_LOONGSON_64BIT=y +CONFIG_GPIO_VIPERBOARD=m CONFIG_POWER_RESET=y -CONFIG_POWER_RESET_RESTART=y -CONFIG_POWER_RESET_SYSCON=y -CONFIG_POWER_RESET_SYSCON_POWEROFF=y -CONFIG_SYSCON_REBOOT_MODE=y +CONFIG_SENSORS_AD7414=m +CONFIG_SENSORS_AD7418=m +CONFIG_SENSORS_ADM1025=m +CONFIG_SENSORS_ADM1026=m +CONFIG_SENSORS_ADM1029=m +CONFIG_SENSORS_ADM1031=m +CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADT7410=m +CONFIG_SENSORS_ADT7411=m +CONFIG_SENSORS_ADT7462=m +CONFIG_SENSORS_ADT7470=m +CONFIG_SENSORS_ADT7475=m +CONFIG_SENSORS_ASC7621=m +CONFIG_SENSORS_ATXP1=m +CONFIG_SENSORS_DS620=m +CONFIG_SENSORS_DS1621=m +CONFIG_SENSORS_I5K_AMB=m +CONFIG_SENSORS_F71805F=m +CONFIG_SENSORS_F71882FG=m +CONFIG_SENSORS_F75375S=m +CONFIG_SENSORS_GL518SM=m +CONFIG_SENSORS_GL520SM=m +CONFIG_SENSORS_G760A=m +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +CONFIG_SENSORS_IT87=m +CONFIG_SENSORS_JC42=m +CONFIG_SENSORS_LINEAGE=m +CONFIG_SENSORS_LTC4151=m +CONFIG_SENSORS_LTC4215=m +CONFIG_SENSORS_LTC4245=m +CONFIG_SENSORS_LTC4261=m +CONFIG_SENSORS_MAX16065=m +CONFIG_SENSORS_MAX1619=m +CONFIG_SENSORS_MAX1668=m +CONFIG_SENSORS_MAX197=m +CONFIG_SENSORS_MAX6639=m +CONFIG_SENSORS_MAX6650=m +CONFIG_SENSORS_MAX6697=m +CONFIG_SENSORS_MCP3021=m +CONFIG_SENSORS_LM63=m +CONFIG_SENSORS_LM73=m CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM77=m +CONFIG_SENSORS_LM78=m +CONFIG_SENSORS_LM80=m +CONFIG_SENSORS_LM83=m +CONFIG_SENSORS_LM85=m +CONFIG_SENSORS_LM87=m +CONFIG_SENSORS_LM90=m +CONFIG_SENSORS_LM92=m CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_LM95234=m +CONFIG_SENSORS_LM95241=m +CONFIG_SENSORS_LM95245=m +CONFIG_SENSORS_PC87360=m +CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_NTC_THERMISTOR=m +CONFIG_SENSORS_NCT6775=m +CONFIG_SENSORS_PCF8591=m +CONFIG_PMBUS=m +CONFIG_SENSORS_ADM1275=m +CONFIG_SENSORS_LM25066=m +CONFIG_SENSORS_LTC2978=m +CONFIG_SENSORS_MAX16064=m +CONFIG_SENSORS_MAX34440=m +CONFIG_SENSORS_MAX8688=m +CONFIG_SENSORS_UCD9000=m +CONFIG_SENSORS_UCD9200=m +CONFIG_SENSORS_ZL6100=m +CONFIG_SENSORS_SHT15=m +CONFIG_SENSORS_SHT21=m +CONFIG_SENSORS_SIS5595=m +CONFIG_SENSORS_DME1737=m +CONFIG_SENSORS_EMC1403=m +CONFIG_SENSORS_EMC6W201=m +CONFIG_SENSORS_SMSC47M1=m +CONFIG_SENSORS_SMSC47M192=m +CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SCH5627=m +CONFIG_SENSORS_SCH5636=m +CONFIG_SENSORS_ADS7828=m +CONFIG_SENSORS_AMC6821=m +CONFIG_SENSORS_INA209=m +CONFIG_SENSORS_INA2XX=m +CONFIG_SENSORS_THMC50=m +CONFIG_SENSORS_TMP102=m +CONFIG_SENSORS_TMP401=m +CONFIG_SENSORS_TMP421=m +CONFIG_SENSORS_VIA686A=m +CONFIG_SENSORS_VT1211=m +CONFIG_SENSORS_VT8231=m +CONFIG_SENSORS_W83781D=m +CONFIG_SENSORS_W83791D=m +CONFIG_SENSORS_W83792D=m +CONFIG_SENSORS_W83793=m CONFIG_SENSORS_W83795=m +CONFIG_SENSORS_W83L785TS=m +CONFIG_SENSORS_W83L786NG=m CONFIG_SENSORS_W83627HF=m +CONFIG_SENSORS_W83627EHF=m +CONFIG_SENSORS_ACPI_POWER=m +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_EMULATION=y CONFIG_LOONGSON2_THERMAL=m +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +CONFIG_WATCHDOG_SYSFS=y +CONFIG_SOFT_WATCHDOG=m +CONFIG_GPIO_WATCHDOG=m +CONFIG_WDAT_WDT=m +CONFIG_ALIM7101_WDT=m +CONFIG_I6300ESB_WDT=m +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m +CONFIG_USBPCWATCHDOG=m +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +CONFIG_MFD_VIPERBOARD=m +CONFIG_MFD_SM501=m +CONFIG_MFD_SM501_GPIO=y +CONFIG_MFD_VX855=m CONFIG_RC_CORE=m CONFIG_LIRC=y CONFIG_RC_DECODERS=y @@ -655,100 +1312,580 @@ CONFIG_IR_SANYO_DECODER=m CONFIG_IR_SHARP_DECODER=m CONFIG_IR_SONY_DECODER=m CONFIG_IR_XMP_DECODER=m +CONFIG_RC_DEVICES=y +CONFIG_IR_ENE=m +CONFIG_IR_FINTEK=m +CONFIG_IR_IGUANA=m +CONFIG_IR_IMON=m +CONFIG_IR_IMON_RAW=m +CONFIG_IR_ITE_CIR=m +CONFIG_IR_MCEUSB=m +CONFIG_IR_NUVOTON=m +CONFIG_IR_REDRAT3=m +CONFIG_IR_SERIAL=m +CONFIG_IR_SERIAL_TRANSMITTER=y +CONFIG_IR_STREAMZAP=m +CONFIG_IR_TTUSBIR=m +CONFIG_RC_ATI_REMOTE=m +CONFIG_USB_PULSE8_CEC=m +CONFIG_USB_RAINSHADOW_CEC=m CONFIG_MEDIA_SUPPORT=m +CONFIG_DVB_MAX_ADAPTERS=8 CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_USB_GSPCA=m +CONFIG_USB_GSPCA_BENQ=m +CONFIG_USB_GSPCA_CONEX=m +CONFIG_USB_GSPCA_CPIA1=m +CONFIG_USB_GSPCA_ETOMS=m +CONFIG_USB_GSPCA_FINEPIX=m +CONFIG_USB_GSPCA_JEILINJ=m +CONFIG_USB_GSPCA_JL2005BCD=m +CONFIG_USB_GSPCA_KONICA=m +CONFIG_USB_GSPCA_MARS=m +CONFIG_USB_GSPCA_MR97310A=m +CONFIG_USB_GSPCA_NW80X=m +CONFIG_USB_GSPCA_OV519=m +CONFIG_USB_GSPCA_OV534=m +CONFIG_USB_GSPCA_OV534_9=m +CONFIG_USB_GSPCA_PAC207=m +CONFIG_USB_GSPCA_PAC7302=m +CONFIG_USB_GSPCA_PAC7311=m +CONFIG_USB_GSPCA_SE401=m +CONFIG_USB_GSPCA_SN9C2028=m +CONFIG_USB_GSPCA_SN9C20X=m +CONFIG_USB_GSPCA_SONIXB=m +CONFIG_USB_GSPCA_SONIXJ=m +CONFIG_USB_GSPCA_SPCA1528=m +CONFIG_USB_GSPCA_SPCA500=m +CONFIG_USB_GSPCA_SPCA501=m +CONFIG_USB_GSPCA_SPCA505=m +CONFIG_USB_GSPCA_SPCA506=m +CONFIG_USB_GSPCA_SPCA508=m +CONFIG_USB_GSPCA_SPCA561=m +CONFIG_USB_GSPCA_SQ905=m +CONFIG_USB_GSPCA_SQ905C=m +CONFIG_USB_GSPCA_SQ930X=m +CONFIG_USB_GSPCA_STK014=m +CONFIG_USB_GSPCA_STV0680=m +CONFIG_USB_GSPCA_SUNPLUS=m +CONFIG_USB_GSPCA_T613=m +CONFIG_USB_GSPCA_TOPRO=m +CONFIG_USB_GSPCA_TV8532=m +CONFIG_USB_GSPCA_VC032X=m +CONFIG_USB_GSPCA_VICAM=m +CONFIG_USB_GSPCA_XIRLINK_CIT=m +CONFIG_USB_GSPCA_ZC3XX=m +CONFIG_USB_GL860=m +CONFIG_USB_M5602=m +CONFIG_USB_STV06XX=m +CONFIG_USB_PWC=m +CONFIG_USB_S2255=m CONFIG_USB_VIDEO_CLASS=m +CONFIG_VIDEO_HDPVR=m +CONFIG_VIDEO_PVRUSB2=m +CONFIG_VIDEO_AU0828=m +CONFIG_DVB_B2C2_FLEXCOP_USB=m +CONFIG_DVB_USB_V2=m +CONFIG_DVB_USB_AF9035=m +CONFIG_DVB_USB_ANYSEE=m +CONFIG_DVB_USB_AU6610=m +CONFIG_DVB_USB_AZ6007=m +CONFIG_DVB_USB_CE6230=m +CONFIG_DVB_USB_EC168=m +CONFIG_DVB_USB_GL861=m +CONFIG_DVB_USB_LME2510=m +CONFIG_DVB_USB_MXL111SF=m +CONFIG_DVB_USB=m +CONFIG_DVB_USB_A800=m +CONFIG_DVB_USB_AF9005=m +CONFIG_DVB_USB_AF9005_REMOTE=m +CONFIG_DVB_USB_AZ6027=m +CONFIG_DVB_USB_CINERGY_T2=m +CONFIG_DVB_USB_CXUSB=m +CONFIG_DVB_USB_DIB0700=m +CONFIG_DVB_USB_DIBUSB_MB=m +CONFIG_DVB_USB_DIBUSB_MC=m +CONFIG_DVB_USB_DIGITV=m +CONFIG_DVB_USB_DTT200U=m +CONFIG_DVB_USB_DTV5100=m +CONFIG_DVB_USB_DW2102=m +CONFIG_DVB_USB_GP8PSK=m +CONFIG_DVB_USB_M920X=m +CONFIG_DVB_USB_NOVA_T_USB2=m +CONFIG_DVB_USB_OPERA1=m +CONFIG_DVB_USB_PCTV452E=m +CONFIG_DVB_USB_TECHNISAT_USB2=m +CONFIG_DVB_USB_TTUSB2=m +CONFIG_DVB_USB_UMT_010=m +CONFIG_DVB_USB_VP702X=m +CONFIG_DVB_USB_VP7045=m +CONFIG_SMS_USB_DRV=m +CONFIG_DVB_TTUSB_BUDGET=m +CONFIG_DVB_TTUSB_DEC=m +CONFIG_VIDEO_EM28XX=m +CONFIG_VIDEO_EM28XX_ALSA=m +CONFIG_VIDEO_EM28XX_DVB=m CONFIG_MEDIA_PCI_SUPPORT=y +CONFIG_VIDEO_IVTV=m +CONFIG_VIDEO_FB_IVTV=m CONFIG_VIDEO_BT848=m CONFIG_DVB_BT8XX=m +CONFIG_VIDEO_CX18=m +CONFIG_VIDEO_CX23885=m +CONFIG_MEDIA_ALTERA_CI=m +CONFIG_VIDEO_CX88=m +CONFIG_VIDEO_CX88_ALSA=m +CONFIG_VIDEO_CX88_BLACKBIRD=m +CONFIG_VIDEO_CX88_DVB=m +# CONFIG_VIDEO_CX88_ENABLE_VP3054 is not set +CONFIG_VIDEO_SAA7134=m +CONFIG_VIDEO_SAA7134_ALSA=m +CONFIG_VIDEO_SAA7134_DVB=m +CONFIG_VIDEO_SAA7164=m +CONFIG_DVB_B2C2_FLEXCOP_PCI=m +CONFIG_DVB_DDBRIDGE=m +CONFIG_DVB_DM1105=m +CONFIG_MANTIS_CORE=m +CONFIG_DVB_MANTIS=m +CONFIG_DVB_HOPPER=m +CONFIG_DVB_NGENE=m +CONFIG_DVB_PLUTO2=m +CONFIG_DVB_PT1=m +CONFIG_DVB_BUDGET_CORE=m +CONFIG_DVB_BUDGET=m +CONFIG_DVB_BUDGET_CI=m +CONFIG_DVB_BUDGET_AV=m +CONFIG_SMS_SDIO_DRV=m +CONFIG_DVB_FIREDTV=m CONFIG_DRM=y +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DP_AUX_CHARDEV=y +CONFIG_DRM_DP_CEC=y +# CONFIG_DRM_I2C_CH7006 is not set +# CONFIG_DRM_I2C_SIL164 is not set CONFIG_DRM_RADEON=m CONFIG_DRM_RADEON_USERPTR=y CONFIG_DRM_AMDGPU=m CONFIG_DRM_AMDGPU_SI=y CONFIG_DRM_AMDGPU_CIK=y CONFIG_DRM_AMDGPU_USERPTR=y +CONFIG_DRM_NOUVEAU=m +CONFIG_DRM_VKMS=m +CONFIG_DRM_UDL=m CONFIG_DRM_AST=y +CONFIG_DRM_MGAG200=m CONFIG_DRM_QXL=m CONFIG_DRM_VIRTIO_GPU=m CONFIG_DRM_LOONGSON=y +CONFIG_DRM_BOCHS=m +CONFIG_DRM_CIRRUS_QEMU=m CONFIG_FB=y CONFIG_FB_EFI=y CONFIG_FB_RADEON=y CONFIG_FB_LS2K500=m -CONFIG_LCD_CLASS_DEVICE=y +CONFIG_FB_TILEBLITTING=y +CONFIG_LCD_CLASS_DEVICE=m CONFIG_LCD_PLATFORM=m +CONFIG_BACKLIGHT_LP855X=m # CONFIG_VGA_CONSOLE is not set -CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set CONFIG_SOUND=y CONFIG_SND=y +CONFIG_SND_OSSEMUL=y +CONFIG_SND_HRTIMER=m +# CONFIG_SND_SUPPORT_OLD_API is not set CONFIG_SND_SEQUENCER=m CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_SEQUENCER_OSS=m +CONFIG_SND_DUMMY=m +CONFIG_SND_ALOOP=m +CONFIG_SND_VIRMIDI=m +CONFIG_SND_MTPAV=m +CONFIG_SND_MPU401=m +CONFIG_SND_AC97_POWER_SAVE=y +CONFIG_SND_AC97_POWER_SAVE_DEFAULT=5 +CONFIG_SND_AD1889=m +CONFIG_SND_ATIIXP=m +CONFIG_SND_ATIIXP_MODEM=m +CONFIG_SND_AU8810=m +CONFIG_SND_AU8820=m +CONFIG_SND_AU8830=m CONFIG_SND_BT87X=m CONFIG_SND_BT87X_OVERCLOCK=y -CONFIG_SND_HDA_INTEL=y +CONFIG_SND_CA0106=m +CONFIG_SND_CMIPCI=m +CONFIG_SND_OXYGEN=m +CONFIG_SND_CS46XX=m +CONFIG_SND_CTXFI=m +CONFIG_SND_DARLA20=m +CONFIG_SND_GINA20=m +CONFIG_SND_LAYLA20=m +CONFIG_SND_DARLA24=m +CONFIG_SND_GINA24=m +CONFIG_SND_LAYLA24=m +CONFIG_SND_MONA=m +CONFIG_SND_MIA=m +CONFIG_SND_ECHO3G=m +CONFIG_SND_INDIGO=m +CONFIG_SND_INDIGOIO=m +CONFIG_SND_INDIGODJ=m +CONFIG_SND_INDIGOIOX=m +CONFIG_SND_INDIGODJX=m +CONFIG_SND_ENS1370=m +CONFIG_SND_ENS1371=m +CONFIG_SND_HDSP=m +CONFIG_SND_HDSPM=m +CONFIG_SND_ICE1724=m +CONFIG_SND_INTEL8X0=m +CONFIG_SND_INTEL8X0M=m +CONFIG_SND_KORG1212=m +CONFIG_SND_LOLA=m +CONFIG_SND_LX6464ES=m +CONFIG_SND_MIXART=m +CONFIG_SND_PCXHR=m +CONFIG_SND_RME32=m +CONFIG_SND_RME96=m +CONFIG_SND_RME9652=m +CONFIG_SND_VIA82XX=m +CONFIG_SND_VIA82XX_MODEM=m +CONFIG_SND_VIRTUOSO=m +CONFIG_SND_VX222=m +CONFIG_SND_HDA_INTEL=m CONFIG_SND_HDA_HWDEP=y CONFIG_SND_HDA_INPUT_BEEP=y +CONFIG_SND_HDA_INPUT_BEEP_MODE=0 CONFIG_SND_HDA_PATCH_LOADER=y -CONFIG_SND_HDA_CODEC_REALTEK=y -CONFIG_SND_HDA_CODEC_SIGMATEL=y -CONFIG_SND_HDA_CODEC_HDMI=y -CONFIG_SND_HDA_CODEC_CONEXANT=y +CONFIG_SND_HDA_CODEC_REALTEK=m +CONFIG_SND_HDA_CODEC_ANALOG=m +CONFIG_SND_HDA_CODEC_SIGMATEL=m +CONFIG_SND_HDA_CODEC_VIA=m +CONFIG_SND_HDA_CODEC_HDMI=m +CONFIG_SND_HDA_CODEC_CIRRUS=m +CONFIG_SND_HDA_CODEC_CONEXANT=m +CONFIG_SND_HDA_CODEC_CA0110=m +CONFIG_SND_HDA_CODEC_CA0132=m +CONFIG_SND_HDA_CODEC_CMEDIA=m +CONFIG_SND_HDA_CODEC_SI3054=m +CONFIG_SND_HDA_PREALLOC_SIZE=512 +# CONFIG_SND_SPI is not set CONFIG_SND_USB_AUDIO=m +CONFIG_SND_USB_UA101=m +CONFIG_SND_USB_CAIAQ=m +CONFIG_SND_USB_CAIAQ_INPUT=y +CONFIG_SND_USB_6FIRE=m +CONFIG_SND_USB_HIFACE=m +CONFIG_SND_BCD2000=m +CONFIG_SND_USB_POD=m +CONFIG_SND_USB_PODHD=m +CONFIG_SND_USB_TONEPORT=m +CONFIG_SND_USB_VARIAX=m +CONFIG_SND_DICE=m +CONFIG_SND_OXFW=m +CONFIG_SND_ISIGHT=m +CONFIG_SND_FIREWORKS=m +CONFIG_SND_BEBOB=m +CONFIG_SND_FIREWIRE_DIGI00X=m +CONFIG_SND_FIREWIRE_TASCAM=m +CONFIG_SND_FIREWIRE_MOTU=m +CONFIG_SND_FIREFACE=m +CONFIG_SND_SOC=m +CONFIG_HID_BATTERY_STRENGTH=y CONFIG_HIDRAW=y CONFIG_UHID=m CONFIG_HID_A4TECH=m +CONFIG_HID_ACRUX=m +CONFIG_HID_APPLE=m +CONFIG_HID_APPLEIR=m +CONFIG_HID_ASUS=m +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=m +CONFIG_HID_BETOP_FF=m CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CORSAIR=m +CONFIG_HID_PRODIKEYS=m +CONFIG_HID_CMEDIA=m +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +CONFIG_HID_ELAN=m +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +CONFIG_HID_EZKEY=m +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +CONFIG_HID_HOLTEK=m +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=m +CONFIG_HID_JABRA=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LCPOWER=m +CONFIG_HID_LENOVO=m CONFIG_HID_LOGITECH=m CONFIG_HID_LOGITECH_DJ=m CONFIG_LOGITECH_FF=y CONFIG_LOGIRUMBLEPAD2_FF=y CONFIG_LOGIG940_FF=y +CONFIG_HID_MAGICMOUSE=y CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m CONFIG_HID_MULTITOUCH=m +CONFIG_HID_NTI=m +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PLANTRONICS=m +CONFIG_HID_PRIMAX=m +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +CONFIG_HID_STEELSERIES=m CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +CONFIG_HID_SMARTJOYPLUS=m +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=y +CONFIG_HID_SENSOR_CUSTOM_SENSOR=m +CONFIG_HID_ALPS=m +CONFIG_HID_PID=y CONFIG_USB_HIDDEV=y +CONFIG_I2C_HID=m +CONFIG_USB_LED_TRIG=y CONFIG_USB=y -CONFIG_USB_OTG=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_LEDS_TRIGGER_USBPORT=m CONFIG_USB_MON=y CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_DBGCAP=y +CONFIG_USB_XHCI_PLATFORM=m CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_ROOT_HUB_TT=y CONFIG_USB_EHCI_HCD_PLATFORM=y CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_PLATFORM=y -CONFIG_USB_UHCI_HCD=m -CONFIG_USB_ACM=m +CONFIG_USB_UHCI_HCD=y CONFIG_USB_PRINTER=m +CONFIG_USB_TMC=m CONFIG_USB_STORAGE=m CONFIG_USB_STORAGE_REALTEK=m +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m CONFIG_USB_UAS=m +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m CONFIG_USB_DWC2=y CONFIG_USB_DWC2_HOST=y CONFIG_USB_SERIAL=m +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +CONFIG_USB_SERIAL_F8153X=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7715_PARPORT=y +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MXUPORT=m +CONFIG_USB_SERIAL_NAVMAN=m CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +CONFIG_USB_SERIAL_UPD78F0730=m +CONFIG_USB_SERIAL_DEBUG=m +CONFIG_USB_USS720=m +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +CONFIG_USB_IDMOUSE=m +CONFIG_USB_APPLEDISPLAY=m +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_LD=m +CONFIG_USB_IOWARRIOR=m +CONFIG_USB_ISIGHTFW=m +CONFIG_USB_HSIC_USB3503=m +CONFIG_USB_ATM=m +CONFIG_USB_SPEEDTOUCH=m +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m CONFIG_USB_GADGET=y CONFIG_TYPEC=m CONFIG_TYPEC_TCPM=m CONFIG_TYPEC_TCPCI=m +CONFIG_TYPEC_RT1711H=m +CONFIG_TYPEC_FUSB302=m CONFIG_TYPEC_UCSI=m CONFIG_UCSI_ACPI=m +CONFIG_TYPEC_TPS6598X=m +CONFIG_TYPEC_MUX_PI3USB30532=m +CONFIG_TYPEC_DP_ALTMODE=m +CONFIG_MMC=m +CONFIG_SDIO_UART=m +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +CONFIG_MMC_TIFM_SD=m +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +CONFIG_MMC_REALTEK_PCI=m +CONFIG_MMC_REALTEK_USB=m +CONFIG_MMC_SDHCI_XENON=m +CONFIG_MEMSTICK=m +CONFIG_MSPRO_BLOCK=m +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_MEMSTICK_REALTEK_PCI=m +CONFIG_MEMSTICK_REALTEK_USB=m +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_LM3530=m +CONFIG_LEDS_LP3944=m +CONFIG_LEDS_BLINKM=m +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +CONFIG_LEDS_TRIGGER_DISK=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +CONFIG_LEDS_TRIGGER_AUDIO=y CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_INFINIBAND_CXGB4=m +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +CONFIG_INFINIBAND_VMWARE_PVRDMA=m +CONFIG_RDMA_RXE=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_EFI=y +# CONFIG_RTC_SYSTOHC is not set +CONFIG_RTC_DRV_DS1307=m +CONFIG_RTC_DRV_DS1374=m +CONFIG_RTC_DRV_DS1672=m +CONFIG_RTC_DRV_MAX6900=m +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +CONFIG_RTC_DRV_FM3130=m +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +CONFIG_RTC_DRV_RV8803=m +CONFIG_RTC_DRV_RX4581=m +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_RV3029C2=m +# CONFIG_RTC_DRV_RV3029_HWMON is not set +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_EFI=m +CONFIG_RTC_DRV_STK17TA8=m +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_RP5C01=m CONFIG_RTC_DRV_LOONGSON=y CONFIG_DMADEVICES=y -CONFIG_UIO=m +CONFIG_DW_DMAC=m +CONFIG_ASYNC_TX_DMA=y +CONFIG_UIO_CIF=m CONFIG_UIO_PDRV_GENIRQ=m CONFIG_UIO_DMEM_GENIRQ=m +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m CONFIG_UIO_PCI_GENERIC=m CONFIG_VFIO=m +CONFIG_VFIO_NOIOMMU=y CONFIG_VFIO_PCI=m CONFIG_VIRTIO_PCI=y CONFIG_VIRTIO_BALLOON=m @@ -784,7 +1921,27 @@ CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y CONFIG_DEVFREQ_GOV_PERFORMANCE=y CONFIG_DEVFREQ_GOV_POWERSAVE=y CONFIG_DEVFREQ_GOV_USERSPACE=y +CONFIG_IIO=m +CONFIG_HID_SENSOR_ACCEL_3D=m +CONFIG_HID_SENSOR_GYRO_3D=m +CONFIG_HID_SENSOR_HUMIDITY=m +CONFIG_HID_SENSOR_ALS=m +CONFIG_HID_SENSOR_PROX=m +CONFIG_HID_SENSOR_MAGNETOMETER_3D=m +CONFIG_HID_SENSOR_INCLINOMETER_3D=m +CONFIG_HID_SENSOR_DEVICE_ROTATION=m +CONFIG_HID_SENSOR_PRESS=m +CONFIG_HID_SENSOR_TEMP=m +CONFIG_NTB=m +CONFIG_NTB_PINGPONG=m +CONFIG_NTB_TOOL=m +CONFIG_NTB_PERF=m +CONFIG_NTB_TRANSPORT=m CONFIG_PWM=y +CONFIG_POWERCAP=y +CONFIG_USB4=m +CONFIG_DAX=y +CONFIG_DEV_DAX=m CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y @@ -805,37 +1962,41 @@ CONFIG_BTRFS_FS=y CONFIG_BTRFS_FS_POSIX_ACL=y CONFIG_FANOTIFY=y CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y -CONFIG_QUOTA=y -# CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QFMT_V1=m -CONFIG_QFMT_V2=m +CONFIG_QFMT_V2=y CONFIG_AUTOFS_FS=y CONFIG_FUSE_FS=m CONFIG_CUSE=m CONFIG_VIRTIO_FS=m CONFIG_OVERLAY_FS=y +# CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW is not set CONFIG_OVERLAY_FS_INDEX=y CONFIG_OVERLAY_FS_XINO_AUTO=y CONFIG_OVERLAY_FS_METACOPY=y -CONFIG_FSCACHE=y +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y CONFIG_CACHEFILES=m -CONFIG_ISO9660_FS=y +CONFIG_ISO9660_FS=m CONFIG_JOLIET=y CONFIG_ZISOFS=y -CONFIG_UDF_FS=y +CONFIG_UDF_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_FAT_DEFAULT_CODEPAGE=936 CONFIG_FAT_DEFAULT_IOCHARSET="gb2312" CONFIG_EXFAT_FS=m +CONFIG_NTFS_FS=m CONFIG_NTFS3_FS=m CONFIG_NTFS3_64BIT_CLUSTER=y CONFIG_NTFS3_LZX_XPRESS=y CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE_DEVICE_DUMP=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_HUGETLBFS=y CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y CONFIG_ORANGEFS_FS=m CONFIG_ECRYPT_FS=m CONFIG_ECRYPT_FS_MESSAGING=y @@ -844,7 +2005,8 @@ CONFIG_HFSPLUS_FS=m CONFIG_UBIFS_FS=m CONFIG_UBIFS_FS_ADVANCED_COMPR=y CONFIG_CRAMFS=m -CONFIG_SQUASHFS=y +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_DIRECT=y CONFIG_SQUASHFS_XATTR=y CONFIG_SQUASHFS_LZ4=y CONFIG_SQUASHFS_LZO=y @@ -852,79 +2014,188 @@ CONFIG_SQUASHFS_XZ=y CONFIG_MINIX_FS=m CONFIG_ROMFS_FS=m CONFIG_PSTORE=m -CONFIG_PSTORE_LZO_COMPRESS=m -CONFIG_PSTORE_LZ4_COMPRESS=m -CONFIG_PSTORE_LZ4HC_COMPRESS=m -CONFIG_PSTORE_842_COMPRESS=y -CONFIG_PSTORE_ZSTD_COMPRESS=y -CONFIG_PSTORE_ZSTD_COMPRESS_DEFAULT=y CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_EROFS_FS=m CONFIG_EROFS_FS_ZIP_LZMA=y CONFIG_EROFS_FS_PCPU_KTHREAD=y CONFIG_NFS_FS=y +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3=m CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_V4_1=y CONFIG_NFS_V4_2=y -CONFIG_ROOT_NFS=y +# CONFIG_NFS_DISABLE_UDP_SUPPORT is not set CONFIG_NFSD=y CONFIG_NFSD_V3_ACL=y CONFIG_NFSD_V4=y CONFIG_NFSD_BLOCKLAYOUT=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_FLEXFILELAYOUT=y +CONFIG_NFSD_V4_2_INTER_SSC=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_SUNRPC_DEBUG=y CONFIG_CEPH_FS=m CONFIG_CEPH_FSCACHE=y CONFIG_CEPH_FS_POSIX_ACL=y CONFIG_CEPH_FS_SECURITY_LABEL=y CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y # CONFIG_CIFS_DEBUG is not set +CONFIG_CIFS_DFS_UPCALL=y CONFIG_9P_FS=y +CONFIG_NLS_DEFAULT="utf8" CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m CONFIG_NLS_UTF8=y CONFIG_DLM=m +CONFIG_DLM_DEBUG=y +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_TRUSTED_KEYS=y CONFIG_KEY_DH_OPERATIONS=y CONFIG_SECURITY=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HARDENED_USERCOPY=y CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y -CONFIG_SECURITY_SELINUX_DISABLE=y CONFIG_SECURITY_APPARMOR=y CONFIG_SECURITY_YAMA=y +CONFIG_SECURITY_LOCKDOWN_LSM=y +CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_PLATFORM_KEYRING=y +CONFIG_IMA=y +CONFIG_IMA_DEFAULT_HASH_SHA256=y +CONFIG_IMA_READ_POLICY=y +CONFIG_IMA_APPRAISE=y +CONFIG_IMA_LOAD_X509=y +CONFIG_EVM=y +CONFIG_EVM_LOAD_X509=y CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_LSM="landlock,lockdown,yama,loadpin,safesetid,integrity,bpf" +CONFIG_CRYPTO_FIPS=y CONFIG_CRYPTO_USER=m # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set CONFIG_CRYPTO_PCRYPT=m CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_SM2=y CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAMELLIA=m CONFIG_CRYPTO_CAST5=m CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_DEFLATE=m -CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m -CONFIG_CRYPTO_USER_API_HASH=m -CONFIG_CRYPTO_USER_API_SKCIPHER=m -CONFIG_CRYPTO_USER_API_RNG=m -CONFIG_CRYPTO_USER_API_AEAD=m +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +CONFIG_CRYPTO_USER_API_AEAD=y CONFIG_CRYPTO_CRC32_LOONGARCH=m +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m +CONFIG_CRYPTO_DEV_CHELSIO=m CONFIG_CRYPTO_DEV_VIRTIO=m +CONFIG_SIGNED_PE_FILE_VERIFICATION=y +CONFIG_SECONDARY_TRUSTED_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_REVOCATION_LIST=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=y +CONFIG_CRC7=m +CONFIG_DMA_CMA=y CONFIG_PRINTK_TIME=y +CONFIG_PRINTK_CALLER=y +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_FRAME_WARN=4096 CONFIG_STRIP_ASM_SYMS=y +CONFIG_DEBUG_SECTION_MISMATCH=y CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_FS=y +CONFIG_DEBUG_SHIRQ=y +CONFIG_PANIC_ON_OOPS=y # CONFIG_SCHED_DEBUG is not set CONFIG_SCHEDSTATS=y -# CONFIG_DEBUG_PREEMPT is not set -# CONFIG_FTRACE is not set +CONFIG_DEBUG_LIST=y +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +# CONFIG_STRICT_DEVMEM is not set +# CONFIG_RUNTIME_TESTING_MENU is not set -- Gitee From 5fbca358c367579923f2bcf3e608339abee78cbc Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 25 Dec 2023 18:19:33 +0800 Subject: [PATCH 0194/2138] anolis: ALSA: hda: Add support of Zhaoxin SB HDAC ANBZ: #7809 Add some special initialization for Zhaoxin SB HDAC. Signed-off-by: leoliu-oc Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2683 --- sound/pci/hda/hda_intel.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 8ffe22e53909..cb50f14265e6 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -1549,7 +1549,8 @@ static int check_position_fix(struct azx *chip, int fix) } /* Check VIA/ATI HD Audio Controller exist */ - if (chip->driver_type == AZX_DRIVER_VIA) { + if (chip->driver_type == AZX_DRIVER_VIA || + chip->driver_type == AZX_DRIVER_ZHAOXIN) { dev_dbg(chip->card->dev, "Using VIACOMBO position fix\n"); return POS_FIX_VIACOMBO; } @@ -1703,7 +1704,7 @@ static void azx_check_snoop_available(struct azx *chip) snoop = true; if (azx_get_snoop_type(chip) == AZX_SNOOP_TYPE_NONE && - chip->driver_type == AZX_DRIVER_VIA) { + (chip->driver_type == AZX_DRIVER_VIA || chip->driver_type == AZX_DRIVER_ZHAOXIN)) { /* force to non-snoop mode for a new VIA controller * when BIOS is set */ -- Gitee From 4a0369666cbf0e07ba4117f5071739f4f2c63cf9 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Fri, 26 Jan 2024 15:38:59 +0800 Subject: [PATCH 0195/2138] anolis: ALSA: hda: Add support of Zhaoxin NB HDAC ANBZ: #7809 Add the new PCI ID 0x1d17 0x9141/0x9142/0x9144 Zhaoxin NB HDAC support. And add some special initialization for Zhaoxin NB HDAC. Signed-off-by: leoliu-oc Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2683 --- sound/pci/hda/hda_controller.c | 17 ++++++++++- sound/pci/hda/hda_controller.h | 3 ++ sound/pci/hda/hda_intel.c | 56 ++++++++++++++++++++++++++++++++++ 3 files changed, 75 insertions(+), 1 deletion(-) diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c index 406779625fb5..b69e7b94673c 100644 --- a/sound/pci/hda/hda_controller.c +++ b/sound/pci/hda/hda_controller.c @@ -1061,6 +1061,16 @@ static void stream_update(struct hdac_bus *bus, struct hdac_stream *s) } } +static void azx_rirb_zxdelay(struct azx *chip, int enable) +{ + if (chip->remap_diu_addr) { + if (!enable) + writel(0x0, (char *)chip->remap_diu_addr + 0x490a8); + else + writel(0x1000000, (char *)chip->remap_diu_addr + 0x490a8); + } +} + irqreturn_t azx_interrupt(int irq, void *dev_id) { struct azx *chip = dev_id; @@ -1103,9 +1113,14 @@ irqreturn_t azx_interrupt(int irq, void *dev_id) azx_writeb(chip, RIRBSTS, RIRB_INT_MASK); active = true; if (status & RIRB_INT_RESPONSE) { - if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) + if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) || + (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)) { + azx_rirb_zxdelay(chip, 1); udelay(80); + } snd_hdac_bus_update_rirb(bus); + if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY) + azx_rirb_zxdelay(chip, 0); } } } while (active && ++repeat < 10); diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h index 8556031bcd68..9db89f4c7b3f 100644 --- a/sound/pci/hda/hda_controller.h +++ b/sound/pci/hda/hda_controller.h @@ -45,6 +45,7 @@ #define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */ #define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */ #define AZX_DCAPS_SEPARATE_STREAM_TAG (1 << 30) /* capture and playback use separate stream tag */ +#define AZX_DCAPS_RIRB_PRE_DELAY (1 << 31) /* Put a delay before read */ enum { AZX_SNOOP_TYPE_NONE, @@ -143,6 +144,8 @@ struct azx { unsigned int disabled:1; /* disabled by vga_switcheroo */ unsigned int pm_prepared:1; + void __iomem *remap_diu_addr; + /* GTS present */ unsigned int gts_present:1; diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index cb50f14265e6..f0f5c6ccc3e1 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -237,6 +237,7 @@ enum { AZX_DRIVER_CTHDA, AZX_DRIVER_CMEDIA, AZX_DRIVER_ZHAOXIN, + AZX_DRIVER_ZXHDMI, AZX_DRIVER_LOONGSON, AZX_DRIVER_HYGON, AZX_DRIVER_GENERIC, @@ -350,6 +351,7 @@ static const char * const driver_short_names[] = { [AZX_DRIVER_CTHDA] = "HDA Creative", [AZX_DRIVER_CMEDIA] = "HDA C-Media", [AZX_DRIVER_ZHAOXIN] = "HDA Zhaoxin", + [AZX_DRIVER_ZXHDMI] = "HDA Zhaoxin HDMI", [AZX_DRIVER_LOONGSON] = "HDA Loongson", [AZX_DRIVER_HYGON] = "HDA Hygon", [AZX_DRIVER_GENERIC] = "HD-Audio Generic", @@ -373,6 +375,31 @@ static void update_pci_byte(struct pci_dev *pci, unsigned int reg, pci_write_config_byte(pci, reg, data); } +static int azx_init_pci_zx(struct azx *chip) +{ + struct snd_card *card = chip->card; + unsigned int diu_reg; + struct pci_dev *diu_pci = NULL; + + azx_bus(chip)->polling_mode = 1; + diu_pci = pci_get_device(PCI_VENDOR_ID_ZHAOXIN, 0x3a03, NULL); + if (!diu_pci) { + dev_info(card->dev, "zx_hda no KX-5000 device.\n"); + return -ENXIO; + } + pci_read_config_dword(diu_pci, PCI_BASE_ADDRESS_0, &diu_reg); + chip->remap_diu_addr = ioremap(diu_reg, 0x50000); + pci_dev_put(diu_pci); + dev_info(card->dev, "zx_hda %x %p\n", diu_reg, chip->remap_diu_addr); + return 0; +} + +static void azx_free_pci_zx(struct azx *chip) +{ + if (chip->remap_diu_addr) + iounmap(chip->remap_diu_addr); +} + static void azx_init_pci(struct azx *chip) { int snoop_type = azx_get_snoop_type(chip); @@ -1362,6 +1389,9 @@ static void azx_free(struct azx *chip) hda->init_failed = 1; /* to be sure */ complete_all(&hda->probe_wait); + if (chip->driver_type == AZX_DRIVER_ZXHDMI) + azx_free_pci_zx(chip); + if (use_vga_switcheroo(hda)) { if (chip->disabled && hda->probe_continued) snd_hda_unlock_devices(&chip->bus); @@ -1756,6 +1786,8 @@ static int default_bdl_pos_adj(struct azx *chip) case AZX_DRIVER_ICH: case AZX_DRIVER_PCH: return 1; + case AZX_DRIVER_ZXHDMI: + return 128; default: return 32; } @@ -1885,6 +1917,11 @@ static int azx_first_init(struct azx *chip) chip->pci->device == PCI_DEVICE_ID_HYGON_18H_M05H_HDA) bus->hygon_dword_access = 1; + chip->remap_diu_addr = NULL; + + if (chip->driver_type == AZX_DRIVER_ZXHDMI) + azx_init_pci_zx(chip); + err = pcim_iomap_regions(pci, 1 << 0, "ICH HD audio"); if (err < 0) return err; @@ -1986,6 +2023,7 @@ static int azx_first_init(struct azx *chip) chip->capture_streams = ATIHDMI_NUM_CAPTURE; break; case AZX_DRIVER_GFHDMI: + case AZX_DRIVER_ZXHDMI: case AZX_DRIVER_GENERIC: default: chip->playback_streams = ICH6_NUM_PLAYBACK; @@ -2700,6 +2738,15 @@ static const struct pci_device_id azx_ids[] = { { PCI_VDEVICE(VIA, 0x9170), .driver_data = AZX_DRIVER_GENERIC }, /* VIA GFX VT6122/VX11 */ { PCI_VDEVICE(VIA, 0x9140), .driver_data = AZX_DRIVER_GENERIC }, + { PCI_VDEVICE(VIA, 0x9141), .driver_data = AZX_DRIVER_GENERIC }, + { PCI_VDEVICE(VIA, 0x9142), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, + { PCI_VDEVICE(VIA, 0x9144), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, + { PCI_VDEVICE(VIA, 0x9145), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, + { PCI_VDEVICE(VIA, 0x9146), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, /* SIS966 */ { PCI_VDEVICE(SI, 0x7502), .driver_data = AZX_DRIVER_SIS }, /* ULI M5461 */ @@ -2755,6 +2802,15 @@ static const struct pci_device_id azx_ids[] = { .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_HDMI }, /* Zhaoxin */ { PCI_VDEVICE(ZHAOXIN, 0x3288), .driver_data = AZX_DRIVER_ZHAOXIN }, + { PCI_VDEVICE(ZHAOXIN, 0x9141), .driver_data = AZX_DRIVER_GENERIC }, + { PCI_VDEVICE(ZHAOXIN, 0x9142), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, + { PCI_VDEVICE(ZHAOXIN, 0x9144), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, + { PCI_VDEVICE(ZHAOXIN, 0x9145), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, + { PCI_VDEVICE(ZHAOXIN, 0x9146), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, /* Loongson HDAudio*/ { PCI_VDEVICE(LOONGSON, PCI_DEVICE_ID_LOONGSON_HDA), .driver_data = AZX_DRIVER_LOONGSON }, -- Gitee From 85edd7edfb17f6e70d63e15d2bd518a08d562e0e Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 25 Dec 2023 18:19:35 +0800 Subject: [PATCH 0196/2138] anolis: ALSA: hda: Add support of Zhaoxin NB HDAC codec ANBZ: #7809 Add Zhaoxin NB HDAC codec support. Signed-off-by: leoliu-oc Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2683 --- sound/pci/hda/patch_hdmi.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index f030700cd60d..81c3a7ed3482 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -4496,6 +4496,20 @@ static int patch_via_hdmi(struct hda_codec *codec) return patch_simple_hdmi(codec, VIAHDMI_CVT_NID, VIAHDMI_PIN_NID); } +/* Zhaoxin HDMI Implementation */ +static int patch_zhaoxin_hdmi(struct hda_codec *codec) +{ + int err; + + err = patch_generic_hdmi(codec); + codec->no_sticky_stream = 1; + + if (err) + return err; + + return 0; +} + static int patch_gf_hdmi(struct hda_codec *codec) { int err; @@ -4618,6 +4632,15 @@ HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi), HDA_CODEC_ENTRY(0x11069f81, "VX900 HDMI/DP", patch_via_hdmi), HDA_CODEC_ENTRY(0x11069f84, "VX11 HDMI/DP", patch_generic_hdmi), HDA_CODEC_ENTRY(0x11069f85, "VX11 HDMI/DP", patch_generic_hdmi), +HDA_CODEC_ENTRY(0x11069f88, "KX-5000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x11069f89, "KX-5000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x11069f8a, "KX-6000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x11069f8b, "KX-6000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x11069f8c, "KX-6000G HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x11069f8d, "KX-6000G HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x11069f8e, "KX-7000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x11069f8f, "KX-7000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x11069f90, "KX-7000 HDMI/DP", patch_zhaoxin_hdmi), HDA_CODEC_ENTRY(0x80860054, "IbexPeak HDMI", patch_i915_cpt_hdmi), HDA_CODEC_ENTRY(0x80862800, "Geminilake HDMI", patch_i915_glk_hdmi), HDA_CODEC_ENTRY(0x80862801, "Bearlake HDMI", patch_generic_hdmi), @@ -4651,6 +4674,15 @@ HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi), HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi), HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_i915_byt_hdmi), HDA_CODEC_ENTRY(0x808629fb, "Crestline HDMI", patch_generic_hdmi), +HDA_CODEC_ENTRY(0x1d179f88, "KX-5000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x1d179f89, "KX-5000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x1d179f8a, "KX-6000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x1d179f8b, "KX-6000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x1d179f8c, "KX-6000G HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x1d179f8d, "KX-6000G HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x1d179f8e, "KX-7000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x1d179f8f, "KX-7000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x1d179f90, "KX-7000 HDMI/DP", patch_zhaoxin_hdmi), /* special ID for generic HDMI */ HDA_CODEC_ENTRY(HDA_CODEC_ID_GENERIC_HDMI, "Generic HDMI", patch_generic_hdmi), {} /* terminator */ -- Gitee From 0d63c96a26322b9a23053d216cae8ed0f7c3ab35 Mon Sep 17 00:00:00 2001 From: Maciej Wieczor-Retman Date: Tue, 10 Oct 2023 12:42:36 +0200 Subject: [PATCH 0197/2138] x86/resctrl: Rename arch_has_sparse_bitmaps MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8085 commit 39c6eed1f61594f737160e498d29673edbd9eefd upstream. Rename arch_has_sparse_bitmaps to arch_has_sparse_bitmasks to ensure consistent terminology throughout resctrl. Intel-SIG: commit 39c6eed1f615 x86/resctrl: Rename arch_has_sparse_bitmaps. Incremental backporting patches for Intel RDT on Intel Xeon platform. Suggested-by: Reinette Chatre Signed-off-by: Maciej Wieczor-Retman Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Ilpo Järvinen Reviewed-by: Peter Newman Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Peter Newman Link: https://lore.kernel.org/r/e330fcdae873ef1a831e707025a4b70fa346666e.1696934091.git.maciej.wieczor-retman@intel.com [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Kun(llfl) Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2740 --- arch/x86/kernel/cpu/resctrl/core.c | 4 ++-- arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 4 ++-- include/linux/resctrl.h | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index e3c6d6552ffc..6050ae8b5dd6 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -871,7 +871,7 @@ static __init void rdt_init_res_defs_intel(void) if (r->rid == RDT_RESOURCE_L3 || r->rid == RDT_RESOURCE_L2) { - r->cache.arch_has_sparse_bitmaps = false; + r->cache.arch_has_sparse_bitmasks = false; r->cache.arch_has_per_cpu_cfg = false; r->cache.min_cbm_bits = 1; } else if (r->rid == RDT_RESOURCE_MBA) { @@ -891,7 +891,7 @@ static __init void rdt_init_res_defs_amd(void) if (r->rid == RDT_RESOURCE_L3 || r->rid == RDT_RESOURCE_L2) { - r->cache.arch_has_sparse_bitmaps = true; + r->cache.arch_has_sparse_bitmasks = true; r->cache.arch_has_per_cpu_cfg = true; r->cache.min_cbm_bits = 0; } else if (r->rid == RDT_RESOURCE_MBA) { diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index a701e7921ea5..affe6f5f47a3 100644 --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -118,8 +118,8 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) first_bit = find_first_bit(&val, cbm_len); zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); - /* Are non-contiguous bitmaps allowed? */ - if (!r->cache.arch_has_sparse_bitmaps && + /* Are non-contiguous bitmasks allowed? */ + if (!r->cache.arch_has_sparse_bitmasks && (find_next_bit(&val, cbm_len, zero_bit) < cbm_len)) { rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val); return false; diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 8334eeacfec5..66942d7fba7f 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -94,7 +94,7 @@ struct rdt_domain { * zero CBM. * @shareable_bits: Bitmask of shareable resource with other * executing entities - * @arch_has_sparse_bitmaps: True if a bitmap like f00f is valid. + * @arch_has_sparse_bitmasks: True if a bitmask like f00f is valid. * @arch_has_per_cpu_cfg: True if QOS_CFG register for this cache * level has CPU scope. */ @@ -102,7 +102,7 @@ struct resctrl_cache { unsigned int cbm_len; unsigned int min_cbm_bits; unsigned int shareable_bits; - bool arch_has_sparse_bitmaps; + bool arch_has_sparse_bitmasks; bool arch_has_per_cpu_cfg; }; -- Gitee From f62c972ab8a48b86f583af622e4c819298da29c2 Mon Sep 17 00:00:00 2001 From: Maciej Wieczor-Retman Date: Tue, 10 Oct 2023 12:42:37 +0200 Subject: [PATCH 0198/2138] x86/resctrl: Enable non-contiguous CBMs in Intel CAT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8085 commit 0e3cd31f6e9074886dea5a999bfcc563d144e7de upstream. The setting for non-contiguous 1s support in Intel CAT is hardcoded to false. On these systems, writing non-contiguous 1s into the schemata file will fail before resctrl passes the value to the hardware. In Intel CAT CPUID.0x10.1:ECX[3] and CPUID.0x10.2:ECX[3] stopped being reserved and now carry information about non-contiguous 1s value support for L3 and L2 cache respectively. The CAT capacity bitmask (CBM) supports a non-contiguous 1s value if the bit is set. The exception are Haswell systems where non-contiguous 1s value support needs to stay disabled since they can't make use of CPUID for Cache allocation. Intel-SIG: commit 0e3cd31f6e90 x86/resctrl: Enable non-contiguous CBMs in Intel CAT. Incremental backporting patches for Intel RDT on Intel Xeon platform. Originally-by: Fenghua Yu Signed-off-by: Maciej Wieczor-Retman Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Ilpo Järvinen Reviewed-by: Peter Newman Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Peter Newman Link: https://lore.kernel.org/r/1849b487256fe4de40b30f88450cba3d9abc9171.1696934091.git.maciej.wieczor-retman@intel.com [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Kun(llfl) Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2740 --- arch/x86/kernel/cpu/resctrl/core.c | 9 ++++++--- arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 10 ++++++---- arch/x86/kernel/cpu/resctrl/internal.h | 9 +++++++++ 3 files changed, 21 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 6050ae8b5dd6..fbdaa9307138 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -152,6 +152,7 @@ static inline void cache_alloc_hsw_probe(void) r->cache.cbm_len = 20; r->cache.shareable_bits = 0xc0000; r->cache.min_cbm_bits = 2; + r->cache.arch_has_sparse_bitmasks = false; r->alloc_capable = true; rdt_alloc_capable = true; @@ -265,15 +266,18 @@ static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r) { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); union cpuid_0x10_1_eax eax; + union cpuid_0x10_x_ecx ecx; union cpuid_0x10_x_edx edx; - u32 ebx, ecx; + u32 ebx; - cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full); + cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx.full, &edx.full); hw_res->num_closid = edx.split.cos_max + 1; r->cache.cbm_len = eax.split.cbm_len + 1; r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1; r->cache.shareable_bits = ebx & r->default_ctrl; r->data_width = (r->cache.cbm_len + 3) / 4; + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) + r->cache.arch_has_sparse_bitmasks = ecx.split.noncont; r->alloc_capable = true; } @@ -871,7 +875,6 @@ static __init void rdt_init_res_defs_intel(void) if (r->rid == RDT_RESOURCE_L3 || r->rid == RDT_RESOURCE_L2) { - r->cache.arch_has_sparse_bitmasks = false; r->cache.arch_has_per_cpu_cfg = false; r->cache.min_cbm_bits = 1; } else if (r->rid == RDT_RESOURCE_MBA) { diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index affe6f5f47a3..e004ecbe3553 100644 --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -92,10 +92,12 @@ int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s, /* * Check whether a cache bit mask is valid. - * For Intel the SDM says: - * Please note that all (and only) contiguous '1' combinations - * are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.). - * Additionally Haswell requires at least two bits set. + * On Intel CPUs, non-contiguous 1s value support is indicated by CPUID: + * - CPUID.0x10.1:ECX[3]: L3 non-contiguous 1s value supported if 1 + * - CPUID.0x10.2:ECX[3]: L2 non-contiguous 1s value supported if 1 + * + * Haswell does not support a non-contiguous 1s value and additionally + * requires at least two bits set. * AMD allows non-contiguous bitmasks. */ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 566386abb877..ca86a96e80c2 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -490,6 +490,15 @@ union cpuid_0x10_3_eax { unsigned int full; }; +/* CPUID.(EAX=10H, ECX=ResID).ECX */ +union cpuid_0x10_x_ecx { + struct { + unsigned int reserved:3; + unsigned int noncont:1; + } split; + unsigned int full; +}; + /* CPUID.(EAX=10H, ECX=ResID).EDX */ union cpuid_0x10_x_edx { struct { -- Gitee From d56828de3b68ff4cea48aae7c80f928db78627e0 Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Tue, 10 Oct 2023 12:42:38 +0200 Subject: [PATCH 0199/2138] x86/resctrl: Add sparse_masks file in info MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8085 commit 4dba8f10b8fef9c5b0f9ed83dd1af91a1795ead1 upstream. Add the interface in resctrl FS to show if sparse cache allocation bit masks are supported on the platform. Reading the file returns either a "1" if non-contiguous 1s are supported and "0" otherwise. The file path is /sys/fs/resctrl/info/{resource}/sparse_masks, where {resource} can be either "L2" or "L3". Intel-SIG: commit 4dba8f10b8fe x86/resctrl: Add sparse_masks file in info. Incremental backporting patches for Intel RDT on Intel Xeon platform. Signed-off-by: Fenghua Yu Signed-off-by: Maciej Wieczor-Retman Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Ilpo Järvinen Reviewed-by: Peter Newman Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Peter Newman Link: https://lore.kernel.org/r/7300535160beba41fd8aa073749ec1ee29b4621f.1696934091.git.maciej.wieczor-retman@intel.com [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Kun(llfl) Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2740 --- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index d82d5de183b1..1c0f00cd212d 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -1117,6 +1117,17 @@ static enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type) } } +static int rdt_has_sparse_bitmasks_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + seq_printf(seq, "%u\n", r->cache.arch_has_sparse_bitmasks); + + return 0; +} + /** * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other * @r: Resource to which domain instance @d belongs. @@ -1841,6 +1852,13 @@ static struct rftype res_common_files[] = { .seq_show = rdtgroup_size_show, .fflags = RF_CTRL_BASE, }, + { + .name = "sparse_masks", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_has_sparse_bitmasks_show, + .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, + }, }; -- Gitee From a94a97de76bf4211e4d30c80283f2a964714aa20 Mon Sep 17 00:00:00 2001 From: Fenghua Yu Date: Tue, 10 Oct 2023 12:42:39 +0200 Subject: [PATCH 0200/2138] Documentation/x86: Document resctrl's new sparse_masks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8085 commit aaa5fa35743ab9f0726568611a85e3e15349b9bf upstream. The documentation mentions that non-contiguous bit masks are not supported in Intel Cache Allocation Technology (CAT). Update the documentation on how to determine if sparse bit masks are allowed in L2 and L3 CAT. Intel-SIG: commit aaa5fa35743a Documentation/x86: Document resctrl's new sparse_masks. Incremental backporting patches for Intel RDT on Intel Xeon platform. Signed-off-by: Fenghua Yu Signed-off-by: Maciej Wieczor-Retman Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Ilpo Järvinen Reviewed-by: Peter Newman Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Peter Newman Link: https://lore.kernel.org/r/3e9610997164f648e15c5c2e90d4944ce36504fe.1696934091.git.maciej.wieczor-retman@intel.com [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Kun(llfl) Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2740 --- Documentation/arch/x86/resctrl.rst | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/Documentation/arch/x86/resctrl.rst b/Documentation/arch/x86/resctrl.rst index cb05d90111b4..4c6421e2aa31 100644 --- a/Documentation/arch/x86/resctrl.rst +++ b/Documentation/arch/x86/resctrl.rst @@ -124,6 +124,13 @@ related to allocation: "P": Corresponding region is pseudo-locked. No sharing allowed. +"sparse_masks": + Indicates if non-contiguous 1s value in CBM is supported. + + "0": + Only contiguous 1s value in CBM is supported. + "1": + Non-contiguous 1s value in CBM is supported. Memory bandwidth(MB) subdirectory contains the following files with respect to allocation: @@ -445,12 +452,13 @@ For cache resources we describe the portion of the cache that is available for allocation using a bitmask. The maximum value of the mask is defined by each cpu model (and may be different for different cache levels). It is found using CPUID, but is also provided in the "info" directory of -the resctrl file system in "info/{resource}/cbm_mask". Intel hardware +the resctrl file system in "info/{resource}/cbm_mask". Some Intel hardware requires that these masks have all the '1' bits in a contiguous block. So 0x3, 0x6 and 0xC are legal 4-bit masks with two bits set, but 0x5, 0x9 -and 0xA are not. On a system with a 20-bit mask each bit represents 5% -of the capacity of the cache. You could partition the cache into four -equal parts with masks: 0x1f, 0x3e0, 0x7c00, 0xf8000. +and 0xA are not. Check /sys/fs/resctrl/info/{resource}/sparse_masks +if non-contiguous 1s value is supported. On a system with a 20-bit mask +each bit represents 5% of the capacity of the cache. You could partition +the cache into four equal parts with masks: 0x1f, 0x3e0, 0x7c00, 0xf8000. Memory bandwidth Allocation and monitoring ========================================== -- Gitee From f916af94cea95ea6b5d1c5799c011f55ab99e4bf Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Mon, 26 Feb 2024 17:08:46 +0800 Subject: [PATCH 0201/2138] anolis: fs: record page or bio info while process is waitting on it ANBZ: #8419 If one process context is stucked in wait_on_buffer(), lock_buffer(), lock_page() and wait_on_page_writeback() and wait_on_bit_io(), it's hard to tell true reason, for example, whether this page is under io, or this page is just locked too long by other process context. Normally io request has multiple bios, and every bio contains multiple pages which will hold data to be read from or written to device, so here we record page info or bio info in task_struct while process calls lock_page(), lock_buffer(), wait_on_page_writeback(), wait_on_buffer() and wait_on_bit_io(), we add a new proce interface: [lege@localhost linux]$ cat /proc/4516/wait_res 1 ffffd0969f95d3c0 4295369599 4295381596 Above info means that thread 4516 is waitting on a page, address is ffffd0969f95d3c0, and has waited for 11997ms. First field denotes the page address process is waitting on. Second field denotes the wait moment and the third denotes current moment. In practice, if we found a process waitting on one page for too long time, we can get page's address by reading /proc/$pid/wait_page, and search this page address in all block dev' /sys/kernel/debug/block/${devname}/rq_hang, if search operation hits one, we can get the request and know why this io request hangs that long. Signed-off-by: Xiaoguang Wang Signed-off-by: Joseph Qi Reviewed-by: Gao Xiang Link: https://gitee.com/anolis/cloud-kernel/pulls/2806 --- block/bio.c | 3 +++ fs/jbd2/transaction.c | 2 ++ fs/proc/base.c | 11 +++++++++++ include/linux/buffer_head.h | 10 ++++++++-- include/linux/sched.h | 37 +++++++++++++++++++++++++++++++++++++ mm/filemap.c | 2 ++ 6 files changed, 63 insertions(+), 2 deletions(-) diff --git a/block/bio.c b/block/bio.c index 62419aa09d73..c87160fc8974 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1376,12 +1376,15 @@ int submit_bio_wait(struct bio *bio) /* Prevent hang_check timer from firing at us during very long I/O */ hang_check = sysctl_hung_task_timeout_secs; + + task_set_wait_res(TASK_WAIT_BIO, bio); if (hang_check) while (!wait_for_completion_io_timeout(&done, hang_check * (HZ/2))) ; else wait_for_completion_io(&done); + task_clear_wait_res(); return blk_status_to_errno(bio->bi_status); } diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 9346d5592d1b..8594f28c96af 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -1098,7 +1098,9 @@ do_get_write_access(handle_t *handle, struct journal_head *jh, if (buffer_shadow(bh)) { JBUFFER_TRACE(jh, "on shadow: sleep"); spin_unlock(&jh->b_state_lock); + task_set_wait_res(TASK_WAIT_FOLIO, bh->b_folio); wait_on_bit_io(&bh->b_state, BH_Shadow, TASK_UNINTERRUPTIBLE); + task_clear_wait_res(); goto repeat; } diff --git a/fs/proc/base.c b/fs/proc/base.c index 699f085d4de7..ac4a9d53a13d 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -603,6 +603,15 @@ static int proc_oom_score(struct seq_file *m, struct pid_namespace *ns, return 0; } +static int proc_wait_res(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task) +{ + seq_printf(m, "%d %px %lu %lu\n", task->wait_res_type, task->wait_folio, + task->wait_moment, jiffies); + + return 0; +} + struct limit_names { const char *name; const char *unit; @@ -3411,6 +3420,7 @@ static const struct pid_entry tgid_base_stuff[] = { ONE("ksm_merging_pages", S_IRUSR, proc_pid_ksm_merging_pages), ONE("ksm_stat", S_IRUSR, proc_pid_ksm_stat), #endif + ONE("wait_res", 0444, proc_wait_res), }; static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx) @@ -3750,6 +3760,7 @@ static const struct pid_entry tid_base_stuff[] = { ONE("ksm_merging_pages", S_IRUSR, proc_pid_ksm_merging_pages), ONE("ksm_stat", S_IRUSR, proc_pid_ksm_stat), #endif + ONE("wait_res", 0444, proc_wait_res), }; static int proc_tid_base_readdir(struct file *file, struct dir_context *ctx) diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 44e9de51eedf..9711ae81d988 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -369,8 +369,11 @@ map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block) static inline void wait_on_buffer(struct buffer_head *bh) { might_sleep(); - if (buffer_locked(bh)) + if (buffer_locked(bh)) { + task_set_wait_res(TASK_WAIT_FOLIO, bh->b_folio); __wait_on_buffer(bh); + task_clear_wait_res(); + } } static inline int trylock_buffer(struct buffer_head *bh) @@ -381,8 +384,11 @@ static inline int trylock_buffer(struct buffer_head *bh) static inline void lock_buffer(struct buffer_head *bh) { might_sleep(); - if (!trylock_buffer(bh)) + if (!trylock_buffer(bh)) { + task_set_wait_res(TASK_WAIT_FOLIO, bh->b_folio); __lock_buffer(bh); + task_clear_wait_res(); + } } static inline struct buffer_head *getblk_unmovable(struct block_device *bdev, diff --git a/include/linux/sched.h b/include/linux/sched.h index d4f9d82c69e0..c598b36dabd5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1541,6 +1541,13 @@ struct task_struct { struct user_event_mm *user_event_mm; #endif + int wait_res_type; + union { + struct folio *wait_folio; + struct bio *wait_bio; + }; + unsigned long wait_moment; + /* * New fields for task_struct should be added above here, so that * they are included in the randomized portion of task_struct. @@ -1558,6 +1565,36 @@ struct task_struct { */ }; +enum { + TASK_WAIT_FOLIO = 1, + TASK_WAIT_BIO, +}; + +static inline void task_set_wait_res(int type, void *res) +{ + switch (type) { + case TASK_WAIT_FOLIO: + current->wait_folio = (struct folio *)res; + break; + case TASK_WAIT_BIO: + current->wait_bio = (struct bio *)res; + break; + default: + current->wait_folio = NULL; + break; + } + + current->wait_res_type = type; + current->wait_moment = jiffies; +} + +static inline void task_clear_wait_res(void) +{ + current->wait_folio = NULL; + current->wait_res_type = 0; + current->wait_moment = 0; +} + static inline struct pid *task_pid(struct task_struct *task) { return task->thread_pid; diff --git a/mm/filemap.c b/mm/filemap.c index 2c308413387f..0440e04ecdee 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1654,8 +1654,10 @@ EXPORT_SYMBOL(folio_end_writeback); */ void __folio_lock(struct folio *folio) { + task_set_wait_res(TASK_WAIT_FOLIO, folio); folio_wait_bit_common(folio, PG_locked, TASK_UNINTERRUPTIBLE, EXCLUSIVE); + task_clear_wait_res(); } EXPORT_SYMBOL(__folio_lock); -- Gitee From e5dfc0ae1cb36828d29f87d12c261bec03d7967c Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Mon, 26 Feb 2024 17:21:45 +0800 Subject: [PATCH 0202/2138] anolis: blk: add iohang check function ANBZ: #8419 Background: We do not have a dependable block layer interface to determine whether block device has io requests which have not been completed for somewhat long time. Currently we have 'in_flight' interface, it counts the number of I/O requests that have been issued to the device driver but have not yet completed, and it does not include I/O requests that are in the queue but not yet issued to the device driver, which means it will not count io requests that have been stucked in block layer. Also say that there are steady io requests issued to device driver, 'in_flight' maybe always non-zero, but you could not determine whether there is one io request which has not been completed for too long. Solution: To find io requests which have not been completed for too long, here add 3 new inferfaces: /sys/block/vdb/queue/hang_threshold If one io request's running time has been greater than this value, count this io as hang. /sys/block/vdb/hang Show read/write io requests' hang counter. /sys/kernel/debug/block/vdb/rq_hang Show all hang io requests's detailed info, like below: ffff97db96301200 {.op=WRITE, .cmd_flags=SYNC, .rq_flags=STARTED| ELVPRIV|IO_STAT|STATS, .state=in_flight, .tag=30, .internal_tag=169, .start_time_ns=140634088407, .io_start_time_ns=140634102958, .current_time=146497371953, .bio = ffff97db91e8e000, .bio_pages = { ffffd096a0602540 }, .bio = ffff97db91e8ec00, .bio_pages = { ffffd096a070eec0 }, .bio = ffff97db91e8f600, .bio_pages = { ffffd096a0424cc0 }, .bio = ffff97db91e8f300, .bio_pages = { ffffd096a0600a80 }} With above info, we can easily see this request's latency distribution, and see next patch for bio_pages's usage. Note this feature needs CONFIG_BLK_DEBUG_FS to be enabled. Signed-off-by: Xiaoguang Wang Signed-off-by: Joseph Qi Reviewed-by: Gao Xiang Link: https://gitee.com/anolis/cloud-kernel/pulls/2806 --- block/blk-core.c | 1 + block/blk-mq-debugfs.c | 81 +++++++++++++++++++++++++++++++++++++++++ block/blk-mq.c | 28 ++++++++++++++ block/blk-mq.h | 2 + block/blk-settings.c | 7 ++++ block/blk-sysfs.c | 22 +++++++++++ block/blk.h | 2 + block/genhd.c | 19 ++++++++++ block/partitions/core.c | 2 + include/linux/blkdev.h | 9 +++++ 10 files changed, 173 insertions(+) diff --git a/block/blk-core.c b/block/blk-core.c index 4f25d2c4bc70..f49a8f2f3d17 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -404,6 +404,7 @@ struct request_queue *blk_alloc_queue(int node_id) return NULL; q->last_merge = NULL; + q->rq_hang_threshold = BLK_REQ_HANG_THRESHOLD; q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL); if (q->id < 0) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index c3b5930106b2..271535f56bd2 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -155,12 +155,47 @@ static ssize_t queue_state_write(void *data, const char __user *buf, return count; } +static void blk_mq_debugfs_rq_hang_show(struct seq_file *m, struct request *rq); + +static bool blk_mq_check_rq_hang(struct request *rq, void *priv) +{ + struct seq_file *m = priv; + u64 now = ktime_get_ns(); + u64 duration; + + duration = div_u64(now - rq->start_time_ns, NSEC_PER_MSEC); + if (duration < rq->q->rq_hang_threshold) + return true; + + /* See comments in blk_mq_check_expired() */ + if (!req_ref_inc_not_zero(rq)) + return true; + + duration = div_u64(now - rq->start_time_ns, NSEC_PER_MSEC); + if (duration >= rq->q->rq_hang_threshold) + blk_mq_debugfs_rq_hang_show(m, rq); + + blk_mq_put_rq_ref(rq); + + return true; + +} + +static int queue_rq_hang_show(void *data, struct seq_file *m) +{ + struct request_queue *q = data; + + blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, m); + return 0; +} + static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = { { "poll_stat", 0400, queue_poll_stat_show }, { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops }, { "pm_only", 0600, queue_pm_only_show, NULL }, { "state", 0600, queue_state_show, queue_state_write }, { "zone_wlock", 0400, queue_zone_wlock_show, NULL }, + { "rq_hang", 0400, queue_rq_hang_show, NULL }, { }, }; @@ -310,6 +345,52 @@ int blk_mq_debugfs_rq_show(struct seq_file *m, void *v) } EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show); +static void blk_mq_debugfs_rq_hang_show(struct seq_file *m, struct request *rq) +{ + const struct blk_mq_ops *const mq_ops = rq->q->mq_ops; + const unsigned int op = req_op(rq); + const char *op_str = blk_op_str(op); + struct bio *bio; + struct bio_vec *bvec; + struct bvec_iter_all iter_all; + + seq_printf(m, "%p {.op=", rq); + if (strcmp(op_str, "UNKNOWN") == 0) + seq_printf(m, "%u", op); + else + seq_printf(m, "%s", op_str); + seq_puts(m, ", .cmd_flags="); + blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name, + ARRAY_SIZE(cmd_flag_name)); + seq_puts(m, ", .rq_flags="); + blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name, + ARRAY_SIZE(rqf_name)); + seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq))); + seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag, + rq->internal_tag); + seq_printf(m, ", .start_time_ns=%llu", rq->start_time_ns); + seq_printf(m, ", .io_start_time_ns=%llu", rq->io_start_time_ns); + seq_printf(m, ", .current_time=%llu", ktime_get_ns()); + + __rq_for_each_bio(bio, rq) { + seq_printf(m, ", .bio = %px", bio); + seq_printf(m, ", .sector = %llu, .len=%u", + bio->bi_iter.bi_sector, bio->bi_iter.bi_size); + seq_puts(m, ", .bio_pages = { "); + bio_for_each_segment_all(bvec, bio, iter_all) { + struct page *page = bvec->bv_page; + + if (!page) + continue; + seq_printf(m, "%px ", page); + } + seq_puts(m, "}"); + } + if (mq_ops->show_rq) + mq_ops->show_rq(m, rq); + seq_puts(m, "}\n"); +} + static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos) __acquires(&hctx->lock) { diff --git a/block/blk-mq.c b/block/blk-mq.c index 5da948b07058..0900ac500c0b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -89,6 +89,11 @@ struct mq_inflight { unsigned int inflight[2]; }; +struct mq_hang { + struct block_device *part; + unsigned int hang[2]; +}; + static bool blk_mq_check_inflight(struct request *rq, void *priv) { struct mq_inflight *mi = priv; @@ -121,6 +126,29 @@ void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part, inflight[1] = mi.inflight[1]; } +static bool blk_mq_check_hang(struct request *rq, void *priv) +{ + struct mq_hang *mh = priv; + u64 now = ktime_get_ns(), duration; + + duration = div_u64(now - rq->start_time_ns, NSEC_PER_MSEC); + if ((duration >= rq->q->rq_hang_threshold) && + (!mh->part->bd_partno || rq->part == mh->part)) + mh->hang[rq_data_dir(rq)]++; + + return true; +} + +void blk_mq_hang_rw(struct request_queue *q, struct block_device *part, + unsigned int hang[2]) +{ + struct mq_hang mh = { .part = part }; + + blk_mq_queue_tag_busy_iter(q, blk_mq_check_hang, &mh); + hang[0] = mh.hang[0]; + hang[1] = mh.hang[1]; +} + void blk_freeze_queue_start(struct request_queue *q) { mutex_lock(&q->mq_freeze_lock); diff --git a/block/blk-mq.h b/block/blk-mq.h index cf9f21772ddc..49e672e0211f 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -253,6 +253,8 @@ unsigned int blk_mq_in_flight(struct request_queue *q, struct block_device *part); void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part, unsigned int inflight[2]); +void blk_mq_hang_rw(struct request_queue *q, struct block_device *part, + unsigned int hang[2]); static inline void blk_mq_put_dispatch_budget(struct request_queue *q, int budget_token) diff --git a/block/blk-settings.c b/block/blk-settings.c index 7019b8e204d9..52fa777d2998 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -25,6 +25,13 @@ void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) } EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); +void blk_queue_rq_hang_threshold(struct request_queue *q, + unsigned int hang_threshold) +{ + q->rq_hang_threshold = hang_threshold; +} +EXPORT_SYMBOL_GPL(blk_queue_rq_hang_threshold); + /** * blk_set_default_limits - reset limits to default values * @lim: the queue_limits structure to reset diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 63e481262336..f852ce8b40a4 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -438,6 +438,26 @@ static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page, return count; } +static ssize_t queue_hang_threshold_show(struct request_queue *q, char *page) +{ + return sprintf(page, "%u\n", q->rq_hang_threshold); +} + +static ssize_t queue_hang_threshold_store(struct request_queue *q, const char *page, + size_t count) +{ + unsigned int hang_threshold; + int err; + + err = kstrtou32(page, 10, &hang_threshold); + if (err || hang_threshold == 0) + return -EINVAL; + + blk_queue_rq_hang_threshold(q, hang_threshold); + + return count; +} + static ssize_t queue_wc_show(struct request_queue *q, char *page) { if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) @@ -527,6 +547,7 @@ QUEUE_RO_ENTRY(queue_dax, "dax"); QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout"); QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask"); QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment"); +QUEUE_RW_ENTRY(queue_hang_threshold, "hang_threshold"); #ifdef CONFIG_BLK_DEV_THROTTLING_LOW QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time"); @@ -656,6 +677,7 @@ static struct attribute *queue_attrs[] = { #endif &queue_virt_boundary_mask_entry.attr, &queue_dma_alignment_entry.attr, + &queue_hang_threshold_entry.attr, NULL, }; diff --git a/block/blk.h b/block/blk.h index 67915b04b3c1..475bbb40bb83 100644 --- a/block/blk.h +++ b/block/blk.h @@ -285,6 +285,8 @@ ssize_t part_stat_show(struct device *dev, struct device_attribute *attr, char *buf); ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, char *buf); +ssize_t part_hang_show(struct device *dev, struct device_attribute *attr, + char *buf); ssize_t part_fail_show(struct device *dev, struct device_attribute *attr, char *buf); ssize_t part_fail_store(struct device *dev, struct device_attribute *attr, diff --git a/block/genhd.c b/block/genhd.c index 203c880c3e1c..4e5eaec73da4 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -1004,6 +1004,23 @@ ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, return sprintf(buf, "%8u %8u\n", inflight[0], inflight[1]); } +ssize_t part_hang_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct block_device *bdev = dev_to_bdev(dev); + struct request_queue *q = bdev_get_queue(bdev); + unsigned int hang[2] = {0, 0}; + + /* + * For now, we only support mq device, since don't find a generic method + * to track reqs in single queue device. + */ + if (queue_is_mq(q)) + blk_mq_hang_rw(q, bdev, hang); + + return sprintf(buf, "%8u %8u\n", hang[0], hang[1]); +} + static ssize_t disk_capability_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1054,6 +1071,7 @@ static DEVICE_ATTR(discard_alignment, 0444, disk_discard_alignment_show, NULL); static DEVICE_ATTR(capability, 0444, disk_capability_show, NULL); static DEVICE_ATTR(stat, 0444, part_stat_show, NULL); static DEVICE_ATTR(inflight, 0444, part_inflight_show, NULL); +static DEVICE_ATTR(hang, 0444, part_hang_show, NULL); static DEVICE_ATTR(badblocks, 0644, disk_badblocks_show, disk_badblocks_store); static DEVICE_ATTR(diskseq, 0444, diskseq_show, NULL); static DEVICE_ATTR(partscan, 0444, partscan_show, NULL); @@ -1098,6 +1116,7 @@ static struct attribute *disk_attrs[] = { &dev_attr_capability.attr, &dev_attr_stat.attr, &dev_attr_inflight.attr, + &dev_attr_hang.attr, &dev_attr_badblocks.attr, &dev_attr_events.attr, &dev_attr_events_async.attr, diff --git a/block/partitions/core.c b/block/partitions/core.c index fc0ab5d8ab70..549ce89a657b 100644 --- a/block/partitions/core.c +++ b/block/partitions/core.c @@ -208,6 +208,7 @@ static DEVICE_ATTR(alignment_offset, 0444, part_alignment_offset_show, NULL); static DEVICE_ATTR(discard_alignment, 0444, part_discard_alignment_show, NULL); static DEVICE_ATTR(stat, 0444, part_stat_show, NULL); static DEVICE_ATTR(inflight, 0444, part_inflight_show, NULL); +static DEVICE_ATTR(hang, 0444, part_hang_show, NULL); #ifdef CONFIG_FAIL_MAKE_REQUEST static struct device_attribute dev_attr_fail = __ATTR(make-it-fail, 0644, part_fail_show, part_fail_store); @@ -222,6 +223,7 @@ static struct attribute *part_attrs[] = { &dev_attr_discard_alignment.attr, &dev_attr_stat.attr, &dev_attr_inflight.attr, + &dev_attr_hang.attr, #ifdef CONFIG_FAIL_MAKE_REQUEST &dev_attr_fail.attr, #endif diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index ef35e9a9878c..53ad9ddcc776 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -388,6 +388,12 @@ struct blk_independent_access_ranges { struct blk_independent_access_range ia_range[]; }; +/* + * default request hang threshold, unit is millisecond. If one request does + * not complete in this threashold time, consider this request as hang. + */ +#define BLK_REQ_HANG_THRESHOLD 5000 + struct request_queue { struct request *last_merge; struct elevator_queue *elevator; @@ -464,6 +470,7 @@ struct request_queue { #endif unsigned int rq_timeout; + unsigned int rq_hang_threshold; struct timer_list timeout; struct work_struct timeout_work; @@ -955,6 +962,8 @@ extern void blk_queue_required_elevator_features(struct request_queue *q, unsigned int features); extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q, struct device *dev); +extern void blk_queue_rq_hang_threshold(struct request_queue *q, + unsigned int hang_threshold); bool __must_check blk_get_queue(struct request_queue *); extern void blk_put_queue(struct request_queue *); -- Gitee From 0385e7d8575c4a57fc392e9ba5208618e92f5357 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Mon, 26 Feb 2024 19:41:23 +0800 Subject: [PATCH 0203/2138] anolis: block: add counter to track io request's d2c time ANBZ: #8419 Indeed tool iostat's await is not good enough, which is somewhat sketchy and could not show request's latency on device driver's side. Here we add a new counter to track io request's d2c time, also with this patch, we can extend iostat to show this value easily. Note: I had checked how iostat is implemented, it just reads fields it needs, so iostat won't be affected by this change, so does tsar. Signed-off-by: Xiaoguang Wang Signed-off-by: Joseph Qi Reviewed-by: Gao Xiang Link: https://gitee.com/anolis/cloud-kernel/pulls/2806 --- Documentation/admin-guide/iostats.rst | 6 ++++++ block/blk-mq.c | 4 ++++ block/genhd.c | 18 +++++++++++++++--- include/linux/part_stat.h | 1 + 4 files changed, 26 insertions(+), 3 deletions(-) diff --git a/Documentation/admin-guide/iostats.rst b/Documentation/admin-guide/iostats.rst index 609a3201fd4e..f9af03371cc1 100644 --- a/Documentation/admin-guide/iostats.rst +++ b/Documentation/admin-guide/iostats.rst @@ -131,6 +131,12 @@ Field 16 -- # of flush requests completed Field 17 -- # of milliseconds spent flushing This is the total number of milliseconds spent by all flush requests. +Field 18 -- # of milliseconds spent reading on device driver's side + +Field 19 -- # of milliseconds spent writing on device driver's side + +Field 20 -- # of milliseconds spent discarding on device driver's side + To avoid introducing performance bottlenecks, no locks are held while modifying these counters. This implies that minor inaccuracies may be introduced when changes collide, so (for instance) adding up all the diff --git a/block/blk-mq.c b/block/blk-mq.c index 0900ac500c0b..93324d9fb176 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1025,6 +1025,10 @@ static inline void blk_account_io_done(struct request *req, u64 now) part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns); part_stat_local_dec(req->part, in_flight[op_is_write(req_op(req))]); + if (req->rq_flags & RQF_STATS) { + part_stat_add(req->part, d2c_nsecs[sgrp], + now - req->io_start_time_ns); + } part_stat_unlock(); } } diff --git a/block/genhd.c b/block/genhd.c index 4e5eaec73da4..40f3a35e5883 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -109,6 +109,7 @@ static void part_stat_read_all(struct block_device *part, for (group = 0; group < NR_STAT_GROUPS; group++) { stat->nsecs[group] += ptr->nsecs[group]; + stat->d2c_nsecs[group] += ptr->d2c_nsecs[group]; stat->sectors[group] += ptr->sectors[group]; stat->ios[group] += ptr->ios[group]; stat->merges[group] += ptr->merges[group]; @@ -964,7 +965,8 @@ ssize_t part_stat_show(struct device *dev, "%8lu %8lu %8llu %8u " "%8u %8u %8u " "%8lu %8lu %8llu %8u " - "%8lu %8u" + "%8lu %8u " + "%8u %8u %8u" "\n", stat.ios[STAT_READ], stat.merges[STAT_READ], @@ -986,7 +988,10 @@ ssize_t part_stat_show(struct device *dev, (unsigned long long)stat.sectors[STAT_DISCARD], (unsigned int)div_u64(stat.nsecs[STAT_DISCARD], NSEC_PER_MSEC), stat.ios[STAT_FLUSH], - (unsigned int)div_u64(stat.nsecs[STAT_FLUSH], NSEC_PER_MSEC)); + (unsigned int)div_u64(stat.nsecs[STAT_FLUSH], NSEC_PER_MSEC), + (unsigned int)div_u64(stat.d2c_nsecs[STAT_READ], NSEC_PER_MSEC), + (unsigned int)div_u64(stat.d2c_nsecs[STAT_WRITE], NSEC_PER_MSEC), + (unsigned int)div_u64(stat.d2c_nsecs[STAT_DISCARD], NSEC_PER_MSEC)); } ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, @@ -1284,7 +1289,8 @@ static int diskstats_show(struct seq_file *seqf, void *v) "%lu %lu %lu %u " "%u %u %u " "%lu %lu %lu %u " - "%lu %u" + "%lu %u " + "%u %u %u" "\n", MAJOR(hd->bd_dev), MINOR(hd->bd_dev), hd, stat.ios[STAT_READ], @@ -1311,6 +1317,12 @@ static int diskstats_show(struct seq_file *seqf, void *v) NSEC_PER_MSEC), stat.ios[STAT_FLUSH], (unsigned int)div_u64(stat.nsecs[STAT_FLUSH], + NSEC_PER_MSEC), + (unsigned int)div_u64(stat.d2c_nsecs[STAT_READ], + NSEC_PER_MSEC), + (unsigned int)div_u64(stat.d2c_nsecs[STAT_WRITE], + NSEC_PER_MSEC), + (unsigned int)div_u64(stat.d2c_nsecs[STAT_DISCARD], NSEC_PER_MSEC) ); } diff --git a/include/linux/part_stat.h b/include/linux/part_stat.h index abeba356bc3f..f03f0c0735de 100644 --- a/include/linux/part_stat.h +++ b/include/linux/part_stat.h @@ -7,6 +7,7 @@ struct disk_stats { u64 nsecs[NR_STAT_GROUPS]; + u64 d2c_nsecs[NR_STAT_GROUPS]; unsigned long sectors[NR_STAT_GROUPS]; unsigned long ios[NR_STAT_GROUPS]; unsigned long merges[NR_STAT_GROUPS]; -- Gitee From 743cdecd57045d8fe134f215507c0b380f6a2dc8 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Fri, 26 Aug 2022 15:49:58 +0800 Subject: [PATCH 0204/2138] anolis: ovl: change pr_warn() to pr_warn_ratelimited() ANBZ: #8457 Change pr_warn() to pr_warn_ratelimited() to avoid softlockup caused by high frequent printed messages in abnormal case. Signed-off-by: Joseph Qi Signed-off-by: Gao Xiang Link: https://gitee.com/anolis/cloud-kernel/pulls/2830 --- fs/overlayfs/super.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 2c056d737c27..a86de37f18c2 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -403,7 +403,8 @@ static int ovl_lower_dir(const char *name, struct path *path, (ofs->config.index && ofs->config.upperdir)) && !fh_type) { ofs->config.index = false; ofs->config.nfs_export = false; - pr_warn("fs on '%s' does not support file handles, falling back to index=off,nfs_export=off.\n", + pr_warn_ratelimited("fs on '%s' does not support file handles, " + "falling back to index=off,nfs_export=off.\n", name); } ofs->nofh |= !fh_type; @@ -526,11 +527,15 @@ static int ovl_setup_trap(struct super_block *sb, struct dentry *dir, static int ovl_report_in_use(struct ovl_fs *ofs, const char *name) { if (ofs->config.index) { - pr_err("%s is in-use as upperdir/workdir of another mount, mount with '-o index=off' to override exclusive upperdir protection.\n", + pr_err("%s is in-use as upperdir/workdir of another mount, " + "mount with '-o index=off' to override exclusive " + "upperdir protection.\n", name); return -EBUSY; } else { - pr_warn("%s is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.\n", + pr_warn_ratelimited("%s is in-use as upperdir/workdir of " + "another mount, accessing files from both mounts will " + "result in undefined behavior.\n", name); return 0; } -- Gitee From f8422f21080a715aef32437270fe0e8199632804 Mon Sep 17 00:00:00 2001 From: Ferry Meng Date: Wed, 6 Mar 2024 17:49:08 +0800 Subject: [PATCH 0205/2138] anolis: io_uring: re-add sqthread percpu polling support ANBZ: #8410 add sqthread percpu polling feature to acnk-6.6. ---- we should deal with sqthread creation method 'create_io_thread' when applying SQTHREAD_PERCPU feature in ANCK, which used to be 'kthread_create'. There are some cases we found and solved in this patch: 1. avoid premature destruction in sqthread_percpu_polling We found a cornor case in sqthread_percpu_polling usage: There are two processes (not in the same thread group) sharing the same sqthread. Besides, the first process created the sqthread. If the first process end its execution, the sqthread end its life and exit too, causing the second process's halt. This behaviour is not accepted. And the reason is: we want to make 'sqd->refs' to be the trigger for sqthread exiting, but when the first process end, it will also let sqthread end. Thus, we should check sqd->refs to make sure there isn't exist others using this sqthread. 2. modify create_io_thread to adapt sqthread_percpu_poll. In contrast to original kthread_create sqthread, create_io_thread is more suitable for threads in the same group sharing one sqthread. But this is not perfect for io_thread sharing between processes( like sqthread_percpu usage). This patch originates from a cornor case: There are two processes sharing one sqthread, and the process0 create the sqthread. If we send a SIGKILL to the process0, sqthread can't EOL normally. The reason is that sqthread shared io_uring_files and mm with process0 by using 'CLONE_VM' 'CLONE_FILES' etc. As we 'kill -9 process0', duing to reference count, the io_uring_file finally will be released by sqthread, not process0. What we want is: process0..1..2 -> -> sqd->refs-- && sqd->refs == 0 ? -> io_sq_thread EOL. But now it changes to: io_sq_thread should get out of its loop, and release io_uring file in . But we use 'sqd->refs' judgement to avoid its premature exit, which causing a 'deadlock'. In fact, if we make a sqthread sharing between processes, it has better not shared only special one process's FILES or VM. By delete these flags, sqthread's runtime is more like an independent kernel thread. By the way, in the original kthread_create sqthread creation mode, as it's generated by kthreadd, it has no mm, sharing 'init_fs' and 'init_files', this is quite different from current create_io_thread. Signed-off-by: Ferry Meng Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2822 --- include/linux/sched/task.h | 3 +- include/uapi/linux/io_uring.h | 1 + io_uring/io-wq.c | 4 +- io_uring/io_uring.c | 14 ++++-- io_uring/sqpoll.c | 81 ++++++++++++++++++++++++++++++----- io_uring/sqpoll.h | 2 +- kernel/fork.c | 21 ++++++--- 7 files changed, 101 insertions(+), 25 deletions(-) diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index a23af225c898..d2d46728da3e 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -94,7 +94,8 @@ extern void exit_itimers(struct task_struct *); extern pid_t kernel_clone(struct kernel_clone_args *kargs); struct task_struct *copy_process(struct pid *pid, int trace, int node, struct kernel_clone_args *args); -struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node); +struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node, + bool unshare); struct task_struct *fork_idle(int); extern pid_t kernel_thread(int (*fn)(void *), void *arg, const char *name, unsigned long flags); diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 8e61f8b7c2ce..46d129dd5d4f 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -165,6 +165,7 @@ enum { * Only one task is allowed to submit requests */ #define IORING_SETUP_SINGLE_ISSUER (1U << 12) +#define IORING_SETUP_SQPOLL_PERCPU (1U << 31) /* percpu SQ poll thread */ /* * Defer running task work to get events. diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c index a1e31723c9ed..eb426621603e 100644 --- a/io_uring/io-wq.c +++ b/io_uring/io-wq.c @@ -780,7 +780,7 @@ static void create_worker_cont(struct callback_head *cb) worker = container_of(cb, struct io_worker, create_work); clear_bit_unlock(0, &worker->create_state); wq = worker->wq; - tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE); + tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE, false); if (!IS_ERR(tsk)) { io_init_new_worker(wq, worker, tsk); io_worker_release(worker); @@ -849,7 +849,7 @@ static bool create_io_worker(struct io_wq *wq, int index) if (index == IO_WQ_ACCT_BOUND) set_bit(IO_WORKER_F_BOUND, &worker->flags); - tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE); + tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE, false); if (!IS_ERR(tsk)) { io_init_new_worker(wq, worker, tsk); } else if (!io_should_retry_thread(worker, PTR_ERR(tsk))) { diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 0122f220ef0d..524d9ddf15e4 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -137,6 +137,8 @@ struct io_defer_entry { u32 seq; }; +extern struct io_sq_data __percpu **percpu_sqd; + /* requests with any of those set should undergo io_disarm_next() */ #define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL) #define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK) @@ -4076,7 +4078,7 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params) IORING_SETUP_SQE128 | IORING_SETUP_CQE32 | IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN | IORING_SETUP_NO_MMAP | IORING_SETUP_REGISTERED_FD_ONLY | - IORING_SETUP_NO_SQARRAY)) + IORING_SETUP_NO_SQARRAY | IORING_SETUP_SQPOLL_PERCPU)) return -EINVAL; return io_uring_create(entries, &p, params); @@ -4365,7 +4367,7 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx, if (sqd) { mutex_unlock(&ctx->uring_lock); mutex_unlock(&sqd->lock); - io_put_sq_data(sqd); + io_put_sq_data(ctx, sqd); mutex_lock(&ctx->uring_lock); } @@ -4393,9 +4395,8 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx, if (sqd) { mutex_unlock(&ctx->uring_lock); mutex_unlock(&sqd->lock); - io_put_sq_data(sqd); + io_put_sq_data(ctx, sqd); mutex_lock(&ctx->uring_lock); - } return ret; } @@ -4616,6 +4617,8 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode, static int __init io_uring_init(void) { + int cpu; + #define __BUILD_BUG_VERIFY_OFFSET_SIZE(stype, eoffset, esize, ename) do { \ BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \ BUILD_BUG_ON(sizeof_field(stype, ename) != esize); \ @@ -4705,6 +4708,9 @@ static int __init io_uring_init(void) sizeof_field(struct io_kiocb, cmd.data), NULL); iou_wq = alloc_workqueue("iou_exit", WQ_UNBOUND, 64); + percpu_sqd = alloc_percpu(struct io_sq_data *); + for_each_possible_cpu(cpu) + *per_cpu_ptr(percpu_sqd, cpu) = NULL; #ifdef CONFIG_SYSCTL register_sysctl_init("kernel", kernel_io_uring_disabled_table); diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c index 489e66647e07..adc0dde11498 100644 --- a/io_uring/sqpoll.c +++ b/io_uring/sqpoll.c @@ -25,6 +25,9 @@ enum { IO_SQ_THREAD_SHOULD_PARK, }; +DEFINE_MUTEX(percpu_sqd_lock); +struct io_sq_data __percpu **percpu_sqd; + void io_sq_thread_unpark(struct io_sq_data *sqd) __releases(&sqd->lock) { @@ -65,14 +68,28 @@ void io_sq_thread_stop(struct io_sq_data *sqd) wait_for_completion(&sqd->exited); } -void io_put_sq_data(struct io_sq_data *sqd) +void io_put_sq_data(struct io_ring_ctx *ctx, struct io_sq_data *sqd) { + int percpu = 0; + + if ((ctx->flags & IORING_SETUP_SQ_AFF) && + (ctx->flags & IORING_SETUP_SQPOLL_PERCPU)) + percpu = 1; + + if (percpu) + mutex_lock(&percpu_sqd_lock); + if (refcount_dec_and_test(&sqd->refs)) { WARN_ON_ONCE(atomic_read(&sqd->park_pending)); io_sq_thread_stop(sqd); + if (percpu) + *per_cpu_ptr(percpu_sqd, sqd->sq_cpu) = NULL; kfree(sqd); } + + if (percpu) + mutex_unlock(&percpu_sqd_lock); } static __cold void io_sqd_update_thread_idle(struct io_sq_data *sqd) @@ -95,7 +112,7 @@ void io_sq_thread_finish(struct io_ring_ctx *ctx) io_sqd_update_thread_idle(sqd); io_sq_thread_unpark(sqd); - io_put_sq_data(sqd); + io_put_sq_data(ctx, sqd); ctx->sq_data = NULL; } } @@ -131,11 +148,11 @@ static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p) } static struct io_sq_data *io_get_sq_data(struct io_uring_params *p, - bool *attached) + bool *attached, bool *percpu_found) { struct io_sq_data *sqd; - *attached = false; + *attached = *percpu_found = false; if (p->flags & IORING_SETUP_ATTACH_WQ) { sqd = io_attach_sq_data(p); if (!IS_ERR(sqd)) { @@ -147,6 +164,19 @@ static struct io_sq_data *io_get_sq_data(struct io_uring_params *p, return sqd; } + if ((p->flags & IORING_SETUP_SQ_AFF) && + (p->flags & IORING_SETUP_SQPOLL_PERCPU)) { + mutex_lock(&percpu_sqd_lock); + sqd = *per_cpu_ptr(percpu_sqd, p->sq_thread_cpu); + if (sqd) { + refcount_inc(&sqd->refs); + mutex_unlock(&percpu_sqd_lock); + *percpu_found = true; + return sqd; + } + mutex_unlock(&percpu_sqd_lock); + } + sqd = kzalloc(sizeof(*sqd), GFP_KERNEL); if (!sqd) return ERR_PTR(-ENOMEM); @@ -211,8 +241,16 @@ static bool io_sqd_handle_event(struct io_sq_data *sqd) if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) || signal_pending(current)) { mutex_unlock(&sqd->lock); - if (signal_pending(current)) + if (signal_pending(current)) { did_sig = get_signal(&ksig); + if (did_sig && sqd->sq_cpu != -1 && + refcount_read(&sqd->refs) != 0) { + mutex_lock(&percpu_sqd_lock); + if (*per_cpu_ptr(percpu_sqd, sqd->sq_cpu) == sqd) + did_sig = false; + mutex_unlock(&percpu_sqd_lock); + } + } cond_resched(); mutex_lock(&sqd->lock); sqd->sq_cpu = raw_smp_processor_id(); @@ -372,13 +410,26 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx, if (ctx->flags & IORING_SETUP_SQPOLL) { struct task_struct *tsk; struct io_sq_data *sqd; - bool attached; + bool attached, percpu_found; ret = security_uring_sqpoll(); if (ret) return ret; - sqd = io_get_sq_data(p, &attached); + if ((ctx->flags & IORING_SETUP_ATTACH_WQ) && + (ctx->flags & IORING_SETUP_SQPOLL_PERCPU)) { + /* ATTACH_WQ and SQPOLL_PERCPU are mutual exclusive */ + ret = -EINVAL; + goto err; + } + if ((ctx->flags & IORING_SETUP_SQPOLL_PERCPU) && + !(ctx->flags & IORING_SETUP_SQ_AFF)) { + /* SQPOLL_PERCPU and SQ_AFF should both exist */ + ret = -EINVAL; + goto err; + } + + sqd = io_get_sq_data(p, &attached, &percpu_found); if (IS_ERR(sqd)) { ret = PTR_ERR(sqd); goto err; @@ -399,7 +450,7 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx, if (ret < 0) goto err; - if (attached) + if (attached || percpu_found) return 0; if (p->flags & IORING_SETUP_SQ_AFF) { @@ -426,7 +477,8 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx, sqd->task_pid = current->pid; sqd->task_tgid = current->tgid; - tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE); + tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE, + !!(ctx->flags & IORING_SETUP_SQPOLL_PERCPU)); if (IS_ERR(tsk)) { ret = PTR_ERR(tsk); goto err_sqpoll; @@ -434,12 +486,19 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx, sqd->thread = tsk; task_to_put = get_task_struct(tsk); + if ((p->flags & IORING_SETUP_SQ_AFF) && + (p->flags & IORING_SETUP_SQPOLL_PERCPU)) { + mutex_lock(&percpu_sqd_lock); + *per_cpu_ptr(percpu_sqd, sqd->sq_cpu) = sqd; + mutex_unlock(&percpu_sqd_lock); + } ret = io_uring_alloc_task_context(tsk, ctx); wake_up_new_task(tsk); if (ret) goto err; - } else if (p->flags & IORING_SETUP_SQ_AFF) { - /* Can't have SQ_AFF without SQPOLL */ + } else if (p->flags & (IORING_SETUP_SQ_AFF | + IORING_SETUP_SQPOLL_PERCPU)) { + /* Can't have SQ_AFF or SQPOLL_PERCPU without SQPOLL */ ret = -EINVAL; goto err; } diff --git a/io_uring/sqpoll.h b/io_uring/sqpoll.h index 8df37e8c9149..1579a0a2bdc3 100644 --- a/io_uring/sqpoll.h +++ b/io_uring/sqpoll.h @@ -25,6 +25,6 @@ void io_sq_thread_finish(struct io_ring_ctx *ctx); void io_sq_thread_stop(struct io_sq_data *sqd); void io_sq_thread_park(struct io_sq_data *sqd); void io_sq_thread_unpark(struct io_sq_data *sqd); -void io_put_sq_data(struct io_sq_data *sqd); +void io_put_sq_data(struct io_ring_ctx *ctx, struct io_sq_data *sqd); void io_sqpoll_wait_sq(struct io_ring_ctx *ctx); int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx, cpumask_var_t mask); diff --git a/kernel/fork.c b/kernel/fork.c index feedc398e854..f31cd315aad5 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2849,13 +2849,22 @@ struct task_struct * __init fork_idle(int cpu) * The returned task is inactive, and the caller must fire it up through * wake_up_new_task(p). All signals are blocked in the created task. */ -struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node) -{ - unsigned long flags = CLONE_FS|CLONE_FILES|CLONE_SIGHAND|CLONE_THREAD| - CLONE_IO; +struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node, + bool unshare) +{ + unsigned long flags = unshare ? 0 : (CLONE_FS|CLONE_FILES| + CLONE_SIGHAND|CLONE_THREAD| + CLONE_IO|CLONE_VM); + /* we use 'unshare' flag to try to create an independent io_thread, + * 'unshare' describes whether child share parent's mm directly (with + * refcount add one), or it should copy mm/files when copy_process(). + * By setting this flag, the io_thread won't share parent's mm + * directly, but can be shared among different tasks, and looks more + * reasonably. + */ struct kernel_clone_args args = { - .flags = ((lower_32_bits(flags) | CLONE_VM | - CLONE_UNTRACED) & ~CSIGNAL), + .flags = ((lower_32_bits(flags) | CLONE_UNTRACED) + & ~CSIGNAL), .exit_signal = (lower_32_bits(flags) & CSIGNAL), .fn = fn, .fn_arg = arg, -- Gitee From 5481eba22d42ce0dd2da6cf6f39446ac59e41703 Mon Sep 17 00:00:00 2001 From: Ferry Meng Date: Wed, 6 Mar 2024 17:49:31 +0800 Subject: [PATCH 0206/2138] anolis: io_uring: add support for us granularity of io_sq_thread_idle ANBZ: #8410 currently unit of io_sq_thread_idle is millisecond, the smallest value is 1ms, which means for IOPS > 1000, sqthread will very likely take 100% cpu usage. This is not necessary in some cases, like users may don't care about latency much in low IO pressure (like 1000 < IOPS < 20000), but cpu resource does matter. So we offer an option of macrosecond granularity of io_sq_thread_idle. Signed-off-by: Hao Xu Signed-off-by: Ferry Meng Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2822 --- include/uapi/linux/io_uring.h | 1 + io_uring/io_uring.c | 3 +- io_uring/sqpoll.c | 64 ++++++++++++++++++++++++++++------- io_uring/sqpoll.h | 2 ++ 4 files changed, 57 insertions(+), 13 deletions(-) diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 46d129dd5d4f..08dad474a82b 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -165,6 +165,7 @@ enum { * Only one task is allowed to submit requests */ #define IORING_SETUP_SINGLE_ISSUER (1U << 12) +#define IORING_SETUP_IDLE_US (1U << 30) /* unit of thread_idle is macrosecond */ #define IORING_SETUP_SQPOLL_PERCPU (1U << 31) /* percpu SQ poll thread */ /* diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 524d9ddf15e4..b8c435e36072 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -4078,7 +4078,8 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params) IORING_SETUP_SQE128 | IORING_SETUP_CQE32 | IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN | IORING_SETUP_NO_MMAP | IORING_SETUP_REGISTERED_FD_ONLY | - IORING_SETUP_NO_SQARRAY | IORING_SETUP_SQPOLL_PERCPU)) + IORING_SETUP_NO_SQARRAY | IORING_SETUP_SQPOLL_PERCPU | + IORING_SETUP_IDLE_US)) return -EINVAL; return io_uring_create(entries, &p, params); diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c index adc0dde11498..ff6d7d847dfe 100644 --- a/io_uring/sqpoll.c +++ b/io_uring/sqpoll.c @@ -97,11 +97,36 @@ static __cold void io_sqd_update_thread_idle(struct io_sq_data *sqd) struct io_ring_ctx *ctx; unsigned sq_thread_idle = 0; - list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) - sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle); + sqd->idle_mode_us = false; + list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { + bool idle_mode_us = ctx->flags & IORING_SETUP_IDLE_US; + unsigned int tmp_idle = idle_mode_us ? ctx->sq_thread_idle : + jiffies_to_usecs(ctx->sq_thread_idle); + + if (idle_mode_us && !sqd->idle_mode_us) + sqd->idle_mode_us = true; + + if (sq_thread_idle < tmp_idle) + sq_thread_idle = tmp_idle; + } + + if (!sqd->idle_mode_us) + sq_thread_idle = usecs_to_jiffies(sq_thread_idle); sqd->sq_thread_idle = sq_thread_idle; } +static inline u64 io_current_time(bool idle_mode_us) +{ + return idle_mode_us ? (ktime_get_ns() >> 10) : get_jiffies_64(); +} + +static inline bool io_time_after(bool idle_mode_us, u64 timeout) +{ + u64 now = io_current_time(idle_mode_us); + + return time_after64(now, timeout); +} + void io_sq_thread_finish(struct io_ring_ctx *ctx) { struct io_sq_data *sqd = ctx->sq_data; @@ -262,7 +287,7 @@ static int io_sq_thread(void *data) { struct io_sq_data *sqd = data; struct io_ring_ctx *ctx; - unsigned long timeout = 0; + u64 timeout = 0; char buf[TASK_COMM_LEN]; DEFINE_WAIT(wait); @@ -294,7 +319,7 @@ static int io_sq_thread(void *data) if (io_sqd_events_pending(sqd) || signal_pending(current)) { if (io_sqd_handle_event(sqd)) break; - timeout = jiffies + sqd->sq_thread_idle; + timeout = io_current_time(sqd->idle_mode_us) + sqd->sq_thread_idle; } cap_entries = !list_is_singular(&sqd->ctx_list); @@ -307,9 +332,9 @@ static int io_sq_thread(void *data) if (io_run_task_work()) sqt_spin = true; - if (sqt_spin || !time_after(jiffies, timeout)) { + if (sqt_spin || !io_time_after(sqd->idle_mode_us, timeout)) { if (sqt_spin) - timeout = jiffies + sqd->sq_thread_idle; + timeout = io_current_time(sqd->idle_mode_us) + sqd->sq_thread_idle; if (unlikely(need_resched())) { mutex_unlock(&sqd->lock); cond_resched(); @@ -356,7 +381,7 @@ static int io_sq_thread(void *data) } finish_wait(&sqd->wait, &wait); - timeout = jiffies + sqd->sq_thread_idle; + timeout = io_current_time(sqd->idle_mode_us) + sqd->sq_thread_idle; } io_uring_cancel_generic(true, sqd); @@ -387,6 +412,8 @@ void io_sqpoll_wait_sq(struct io_ring_ctx *ctx) finish_wait(&ctx->sqo_sq_wait, &wait); } +#define DEFAULT_SQ_IDLE_US 10 + __cold int io_sq_offload_create(struct io_ring_ctx *ctx, struct io_uring_params *p) { @@ -437,9 +464,22 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx, ctx->sq_creds = get_current_cred(); ctx->sq_data = sqd; - ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle); - if (!ctx->sq_thread_idle) - ctx->sq_thread_idle = HZ; + if ((ctx->flags & IORING_SETUP_IDLE_US) && + !(ctx->flags & IORING_SETUP_SQPOLL_PERCPU)) { + ret = -EINVAL; + goto err; + } + + /* + * for ms mode: ctx->sq_thread_idle is jiffies + * for us mode: ctx->sq_thread_idle is time in terms of microsecond + */ + if (ctx->flags & IORING_SETUP_IDLE_US) + ctx->sq_thread_idle = p->sq_thread_idle ? + p->sq_thread_idle : DEFAULT_SQ_IDLE_US; + else + ctx->sq_thread_idle = p->sq_thread_idle ? + msecs_to_jiffies(p->sq_thread_idle) : HZ; io_sq_thread_park(sqd); list_add(&ctx->sqd_list, &sqd->ctx_list); @@ -496,9 +536,9 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx, wake_up_new_task(tsk); if (ret) goto err; - } else if (p->flags & (IORING_SETUP_SQ_AFF | + } else if (p->flags & (IORING_SETUP_SQ_AFF | IORING_SETUP_IDLE_US | IORING_SETUP_SQPOLL_PERCPU)) { - /* Can't have SQ_AFF or SQPOLL_PERCPU without SQPOLL */ + /* Can't have SQ_AFF or IDLE_US or SQPOLL_PERCPU without SQPOLL */ ret = -EINVAL; goto err; } diff --git a/io_uring/sqpoll.h b/io_uring/sqpoll.h index 1579a0a2bdc3..60eec0c97864 100644 --- a/io_uring/sqpoll.h +++ b/io_uring/sqpoll.h @@ -18,6 +18,8 @@ struct io_sq_data { unsigned long state; struct completion exited; + + bool idle_mode_us; }; int io_sq_offload_create(struct io_ring_ctx *ctx, struct io_uring_params *p); -- Gitee From b3ee48b483c1a8cd962ac3defb98ac79976002a9 Mon Sep 17 00:00:00 2001 From: Ferry Meng Date: Wed, 6 Mar 2024 17:49:44 +0800 Subject: [PATCH 0207/2138] anolis: io_uring: submit sqes in the original context when waking up sqthread ANBZ: #8410 sqes are submitted by sqthread when it is leveraged, which means there is IO latency when waking up sqthread. To wipe it out, submit limited number of sqes in the original task context. [Rebase note:] 1. As sqthread using create_io_thread func and member var 'sqo_task' not used anymore, we should't use same_thread_group to judge the relationship between usermode app and sqthread. 2. Now notify type is specified at io_req creation time, and the lowest level is TWA_SIGNAL_NO_IPI (in older version its TWA_NONE), there is no need to additionally specify notify type for submit_on_idle. Refers to patch (9f010507bb io_uring: set task_work notify method at init time). Signed-off-by: Hao Xu Signed-off-by: Shile Zhang Signed-off-by: Ferry Meng Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2822 --- include/uapi/linux/io_uring.h | 1 + io_uring/io_uring.c | 20 ++++++++++++++++---- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 08dad474a82b..fdea50f53da3 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -468,6 +468,7 @@ struct io_cqring_offsets { #define IORING_ENTER_SQ_WAIT (1U << 2) #define IORING_ENTER_EXT_ARG (1U << 3) #define IORING_ENTER_REGISTERED_RING (1U << 4) +#define IORING_ENTER_SQ_SUBMIT_ON_IDLE (1U << 31) /* * Passed in for io_uring_setup(2). Copied back with updated info on success diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index b8c435e36072..5087203699b0 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -1738,11 +1738,12 @@ static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags) if (unlikely(needs_lock)) { /* * If IORING_SETUP_SQPOLL is enabled, sqes are either handle - * in sq thread task context or in io worker task context. If - * current task context is sq thread, we don't need to check - * whether should wake up sq thread. + * in sq thread task context or in io worker task context or + * in original context. If current task context is sq thread, + * we don't need to check whether should wake up sq thread. */ if ((ctx->flags & IORING_SETUP_SQPOLL) && + (current != ctx->sq_data->thread) && wq_has_sleeper(&ctx->sq_data->wait)) wake_up(&ctx->sq_data->wait); @@ -3627,6 +3628,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP | IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG | + IORING_ENTER_SQ_SUBMIT_ON_IDLE | IORING_ENTER_REGISTERED_RING))) return -EINVAL; @@ -3670,8 +3672,18 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, ret = -EOWNERDEAD; goto out; } - if (flags & IORING_ENTER_SQ_WAKEUP) + if (flags & IORING_ENTER_SQ_WAKEUP) { wake_up(&ctx->sq_data->wait); + if (flags & IORING_ENTER_SQ_SUBMIT_ON_IDLE) { + bool has_lock; + + has_lock = mutex_trylock(&ctx->uring_lock); + if (has_lock) { + io_submit_sqes(ctx, min(to_submit, 8U)); + mutex_unlock(&ctx->uring_lock); + } + } + } if (flags & IORING_ENTER_SQ_WAIT) io_sqpoll_wait_sq(ctx); -- Gitee From f6eba3310532b14745679ca5ba67a9aab2ce63b3 Mon Sep 17 00:00:00 2001 From: Ferry Meng Date: Wed, 6 Mar 2024 17:51:12 +0800 Subject: [PATCH 0208/2138] anolis: block-throttle: enable hierarchical throttling even on traditional hierarchy ANBZ: #8411 ECI may have an use case that configuring each device mapper disk throttling policy just under root blkio cgroup, but actually using them in different containers. Since hierarchical throttling is now only supported on cgroup v2 and ECI uses cgroup v1, so we have to enable hierarchical throttling on cgroup v1. This is ported from redhat 7u, and a year ago Jiufei already ported it to alikernel 4.9 as well. So I think this change should be acceptable. Signed-off-by: Joseph Qi Signed-off-by: Hao Xu Signed-off-by: Ferry Meng Link: https://gitee.com/anolis/cloud-kernel/pulls/2823 --- block/blk-throttle.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 16f5766620a4..824569637fca 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -405,7 +405,9 @@ static void throtl_pd_init(struct blkg_policy_data *pd) * regardless of the position of the group in the hierarchy. */ sq->parent_sq = &td->service_queue; - if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent) + + /* Enable hierarchical throttling even on traditional hierarchy */ + if (blkg->parent) sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue; tg->td = td; } -- Gitee From 616f7f167e5a1642f048164389d9f837591e2e6a Mon Sep 17 00:00:00 2001 From: Ferry Meng Date: Wed, 6 Mar 2024 17:51:21 +0800 Subject: [PATCH 0209/2138] anolis: blk-throttle: support io delay stats ANBZ: #8411 Add blkio.throttle.io_service_time and blkio.throttle.io_wait_time to get per-cgroup io delay statistics. io_service_time represents the time spent after io throttle to io completion, while io_wait_time represents the time spent on throttle queue. [Merge note:] As bio_init() has been changed, we should initialize new 'struct bio' member variables. Also squash patch "anolis: blk-throttle: fix tg NULL pointer dereference" Also squash patch "anolis: block: replace reserved field with extended bio_flags" Signed-off-by: Joseph Qi Signed-off-by: Hao Xu Signed-off-by: zhongjiang-ali Signed-off-by: Ferry Meng Link: https://gitee.com/anolis/cloud-kernel/pulls/2823 --- block/bio.c | 10 ++++ block/blk-throttle.c | 107 +++++++++++++++++++++++++++++++++++--- block/blk-throttle.h | 4 ++ include/linux/bio.h | 15 ++++++ include/linux/blk_types.h | 40 ++++++++++++++ 5 files changed, 169 insertions(+), 7 deletions(-) diff --git a/block/bio.c b/block/bio.c index c87160fc8974..6784bbe44d16 100644 --- a/block/bio.c +++ b/block/bio.c @@ -263,6 +263,12 @@ void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, bio->bi_issue.value = 0; if (bdev) bio_associate_blkg(bio); +#ifdef CONFIG_BLK_DEV_THROTTLING + bio->start_time_ns = 0; + bio->io_start_time_ns = 0; + bio->bi_tg_end_io = NULL; + bio->bi_tg_private = NULL; +#endif #ifdef CONFIG_BLK_CGROUP_IOCOST bio->bi_iocost_cost = 0; #endif @@ -1605,6 +1611,10 @@ void bio_endio(struct bio *bio) blk_throtl_bio_endio(bio); /* release cgroup info */ bio_uninit(bio); +#ifdef CONFIG_BLK_DEV_THROTTLING + if (bio->bi_tg_end_io) + bio->bi_tg_end_io(bio); +#endif if (bio->bi_end_io) bio->bi_end_io(bio); } diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 824569637fca..d1952cf949cc 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -345,11 +345,11 @@ static struct blkg_policy_data *throtl_pd_alloc(struct gendisk *disk, if (!tg) return NULL; - if (blkg_rwstat_init(&tg->stat_bytes, gfp)) - goto err_free_tg; - - if (blkg_rwstat_init(&tg->stat_ios, gfp)) - goto err_exit_stat_bytes; + if (blkg_rwstat_init(&tg->stat_bytes, gfp) || + blkg_rwstat_init(&tg->stat_ios, gfp) || + blkg_rwstat_init(&tg->service_time, gfp) || + blkg_rwstat_init(&tg->wait_time, gfp)) + goto err; throtl_service_queue_init(&tg->service_queue); @@ -376,9 +376,11 @@ static struct blkg_policy_data *throtl_pd_alloc(struct gendisk *disk, return &tg->pd; -err_exit_stat_bytes: +err: blkg_rwstat_exit(&tg->stat_bytes); -err_free_tg: + blkg_rwstat_exit(&tg->stat_ios); + blkg_rwstat_exit(&tg->service_time); + blkg_rwstat_exit(&tg->wait_time); kfree(tg); return NULL; } @@ -476,6 +478,8 @@ static void throtl_upgrade_state(struct throtl_data *td); static void throtl_pd_offline(struct blkg_policy_data *pd) { struct throtl_grp *tg = pd_to_tg(pd); + struct blkcg_gq *blkg = pd_to_blkg(pd); + struct blkcg_gq *parent = blkg->parent; tg->bps[READ][LIMIT_LOW] = 0; tg->bps[WRITE][LIMIT_LOW] = 0; @@ -486,6 +490,12 @@ static void throtl_pd_offline(struct blkg_policy_data *pd) if (!tg->td->limit_valid[tg->td->limit_index]) throtl_upgrade_state(tg->td); + if (parent) { + blkg_rwstat_add_aux(&blkg_to_tg(parent)->service_time, + &tg->service_time); + blkg_rwstat_add_aux(&blkg_to_tg(parent)->wait_time, + &tg->wait_time); + } } static void throtl_pd_free(struct blkg_policy_data *pd) @@ -495,9 +505,19 @@ static void throtl_pd_free(struct blkg_policy_data *pd) del_timer_sync(&tg->service_queue.pending_timer); blkg_rwstat_exit(&tg->stat_bytes); blkg_rwstat_exit(&tg->stat_ios); + blkg_rwstat_exit(&tg->service_time); + blkg_rwstat_exit(&tg->wait_time); kfree(tg); } +static void throtl_pd_reset(struct blkg_policy_data *pd) +{ + struct throtl_grp *tg = pd_to_tg(pd); + + blkg_rwstat_reset(&tg->service_time); + blkg_rwstat_reset(&tg->wait_time); +} + static struct throtl_grp * throtl_rb_first(struct throtl_service_queue *parent_sq) { @@ -960,6 +980,64 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, return false; } +static void throtl_stats_update_completion(struct throtl_grp *tg, + uint64_t start_time, + uint64_t io_start_time, + int op) +{ + unsigned long flags; + uint64_t now = sched_clock(); + + local_irq_save(flags); + if (time_after64(now, io_start_time)) + blkg_rwstat_add(&tg->service_time, op, now - io_start_time); + if (time_after64(io_start_time, start_time)) + blkg_rwstat_add(&tg->wait_time, op, io_start_time - start_time); + local_irq_restore(flags); +} + +static void throtl_bio_end_io(struct bio *bio) +{ + struct throtl_grp *tg; + + rcu_read_lock(); + /* see comments in throtl_bio_stats_start() */ + if (!bio_ext_flagged(bio, BIO_THROTL_STATED)) + goto out; + + tg = (struct throtl_grp *)bio->bi_tg_private; + if (!tg) + goto out; + + throtl_stats_update_completion(tg, bio_start_time_ns(bio), + bio_io_start_time_ns(bio), + bio_op(bio)); + blkg_put(tg_to_blkg(tg)); + bio_clear_ext_flag(bio, BIO_THROTL_STATED); +out: + rcu_read_unlock(); +} + +static inline void throtl_bio_stats_start(struct bio *bio, struct throtl_grp *tg) +{ + int op = bio_op(bio); + + /* + * It may happen that end_io will be called twice like dm-thin, + * which will save origin end_io first, and call its overwrite + * end_io and then the saved end_io. We use bio flag + * BIO_THROTL_STATED to do only once statistics. + */ + if ((op == REQ_OP_READ || op == REQ_OP_WRITE) && + !bio_ext_flagged(bio, BIO_THROTL_STATED)) { + blkg_get(tg_to_blkg(tg)); + bio_set_ext_flag(bio, BIO_THROTL_STATED); + bio->bi_tg_end_io = throtl_bio_end_io; + bio->bi_tg_private = tg; + bio_set_start_time_ns(bio); + } +} + static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) { bool rw = bio_data_dir(bio); @@ -1488,6 +1566,16 @@ static struct cftype throtl_legacy_files[] = { .private = offsetof(struct throtl_grp, stat_ios), .seq_show = tg_print_rwstat_recursive, }, + { + .name = "throttle.io_service_time", + .private = offsetof(struct throtl_grp, service_time), + .seq_show = tg_print_rwstat, + }, + { + .name = "throttle.io_wait_time", + .private = offsetof(struct throtl_grp, wait_time), + .seq_show = tg_print_rwstat, + }, { } /* terminate */ }; @@ -1716,6 +1804,7 @@ struct blkcg_policy blkcg_policy_throtl = { .pd_online_fn = throtl_pd_online, .pd_offline_fn = throtl_pd_offline, .pd_free_fn = throtl_pd_free, + .pd_reset_stats_fn = throtl_pd_reset, }; void blk_throtl_cancel_bios(struct gendisk *disk) @@ -2188,6 +2277,8 @@ bool __blk_throtl_bio(struct bio *bio) rcu_read_lock(); + throtl_bio_stats_start(bio, tg); + spin_lock_irq(&q->queue_lock); throtl_update_latency_buckets(td); @@ -2277,6 +2368,8 @@ bool __blk_throtl_bio(struct bio *bio) bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY; #endif spin_unlock_irq(&q->queue_lock); + if (!throttled) + bio_set_io_start_time_ns(bio); rcu_read_unlock(); return throttled; diff --git a/block/blk-throttle.h b/block/blk-throttle.h index bffbc9cfc8ab..1e1f6a858571 100644 --- a/block/blk-throttle.h +++ b/block/blk-throttle.h @@ -150,6 +150,10 @@ struct throtl_grp { struct blkg_rwstat stat_bytes; struct blkg_rwstat stat_ios; + /* total time spent on lower layer: scheduler, device and others */ + struct blkg_rwstat service_time; + /* total time spent on block throttle */ + struct blkg_rwstat wait_time; }; extern struct blkcg_policy blkcg_policy_throtl; diff --git a/include/linux/bio.h b/include/linux/bio.h index 0286bada25ce..efb40c3282ca 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -242,6 +242,21 @@ static inline void bio_clear_flag(struct bio *bio, unsigned int bit) bio->bi_flags &= ~(1U << bit); } +static inline bool bio_ext_flagged(struct bio *bio, unsigned int bit) +{ + return (bio->bi_ext_flags & (1U << bit)) != 0; +} + +static inline void bio_set_ext_flag(struct bio *bio, unsigned int bit) +{ + bio->bi_ext_flags |= (1U << bit); +} + +static inline void bio_clear_ext_flag(struct bio *bio, unsigned int bit) +{ + bio->bi_ext_flags &= ~(1U << bit); +} + static inline struct bio_vec *bio_first_bvec_all(struct bio *bio) { WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 92c8997b1938..ce14257bbf74 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -10,6 +10,7 @@ #include #include #include +#include struct bio_set; struct bio; @@ -287,6 +288,12 @@ struct bio { */ struct blkcg_gq *bi_blkg; struct bio_issue bi_issue; +#ifdef CONFIG_BLK_DEV_THROTTLING + unsigned long long start_time_ns; /* when passed to block throttle */ + unsigned long long io_start_time_ns; /* when no more throttle */ + bio_end_io_t *bi_tg_end_io; + void *bi_tg_private; +#endif #ifdef CONFIG_BLK_CGROUP_IOCOST u64 bi_iocost_cost; #endif @@ -316,6 +323,8 @@ struct bio { struct bio_set *bi_pool; + unsigned long bi_ext_flags; /* extend the bi_flags */ + /* * We can inline a number of vecs at the end of the bio, to avoid * double allocations for a small number of bio_vecs. This member @@ -349,6 +358,37 @@ enum { BIO_FLAG_LAST }; +/* + * Extend bio flags should be added in here + */ +#define BIO_THROTL_STATED 0 /* bio already stated */ + +#ifdef CONFIG_BLK_DEV_THROTTLING +static inline void bio_set_start_time_ns(struct bio *bio) +{ + preempt_disable(); + bio->start_time_ns = sched_clock(); + preempt_enable(); +} + +static inline void bio_set_io_start_time_ns(struct bio *bio) +{ + preempt_disable(); + bio->io_start_time_ns = sched_clock(); + preempt_enable(); +} + +static inline uint64_t bio_start_time_ns(struct bio *bio) +{ + return bio->start_time_ns; +} + +static inline uint64_t bio_io_start_time_ns(struct bio *bio) +{ + return bio->io_start_time_ns; +} +#endif + typedef __u32 __bitwise blk_mq_req_flags_t; #define REQ_OP_BITS 8 -- Gitee From 237c7c1bc45c1a5ffd95a8fa5d9de041ccc21d78 Mon Sep 17 00:00:00 2001 From: Ferry Meng Date: Wed, 6 Mar 2024 17:51:39 +0800 Subject: [PATCH 0210/2138] anolis: blk-throttle: add throttled io/bytes counter ANBZ: #8411 Add 2 interfaces to stat io throttle information: blkio.throttle.total_io_queued blkio.throttle.total_bytes_queued These interfaces are used for monitoring throttled io/bytes and analyzing if delay has relation with io throttle. Signed-off-by: Joseph Qi Signed-off-by: Hao Xu Signed-off-by: Ferry Meng Link: https://gitee.com/anolis/cloud-kernel/pulls/2823 --- block/blk-throttle.c | 27 ++++++++++++++++++++++++++- block/blk-throttle.h | 4 ++++ 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/block/blk-throttle.c b/block/blk-throttle.c index d1952cf949cc..8c2380c46f8a 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -348,7 +348,9 @@ static struct blkg_policy_data *throtl_pd_alloc(struct gendisk *disk, if (blkg_rwstat_init(&tg->stat_bytes, gfp) || blkg_rwstat_init(&tg->stat_ios, gfp) || blkg_rwstat_init(&tg->service_time, gfp) || - blkg_rwstat_init(&tg->wait_time, gfp)) + blkg_rwstat_init(&tg->wait_time, gfp) || + blkg_rwstat_init(&tg->total_bytes_queued, gfp) || + blkg_rwstat_init(&tg->total_io_queued, gfp)) goto err; throtl_service_queue_init(&tg->service_queue); @@ -381,6 +383,8 @@ static struct blkg_policy_data *throtl_pd_alloc(struct gendisk *disk, blkg_rwstat_exit(&tg->stat_ios); blkg_rwstat_exit(&tg->service_time); blkg_rwstat_exit(&tg->wait_time); + blkg_rwstat_exit(&tg->total_bytes_queued); + blkg_rwstat_exit(&tg->total_io_queued); kfree(tg); return NULL; } @@ -495,6 +499,10 @@ static void throtl_pd_offline(struct blkg_policy_data *pd) &tg->service_time); blkg_rwstat_add_aux(&blkg_to_tg(parent)->wait_time, &tg->wait_time); + blkg_rwstat_add_aux(&blkg_to_tg(parent)->total_bytes_queued, + &tg->total_bytes_queued); + blkg_rwstat_add_aux(&blkg_to_tg(parent)->total_io_queued, + &tg->total_io_queued); } } @@ -507,6 +515,8 @@ static void throtl_pd_free(struct blkg_policy_data *pd) blkg_rwstat_exit(&tg->stat_ios); blkg_rwstat_exit(&tg->service_time); blkg_rwstat_exit(&tg->wait_time); + blkg_rwstat_exit(&tg->total_bytes_queued); + blkg_rwstat_exit(&tg->total_io_queued); kfree(tg); } @@ -516,6 +526,8 @@ static void throtl_pd_reset(struct blkg_policy_data *pd) blkg_rwstat_reset(&tg->service_time); blkg_rwstat_reset(&tg->wait_time); + blkg_rwstat_reset(&tg->total_bytes_queued); + blkg_rwstat_reset(&tg->total_io_queued); } static struct throtl_grp * @@ -1083,6 +1095,9 @@ static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn, throtl_qnode_add_bio(bio, qn, &sq->queued[rw]); sq->nr_queued[rw]++; + blkg_rwstat_add(&tg->total_bytes_queued, bio_op(bio), + throtl_bio_data_size(bio)); + blkg_rwstat_add(&tg->total_io_queued, bio_op(bio), 1); throtl_enqueue_tg(tg); } @@ -1576,6 +1591,16 @@ static struct cftype throtl_legacy_files[] = { .private = offsetof(struct throtl_grp, wait_time), .seq_show = tg_print_rwstat, }, + { + .name = "throttle.total_bytes_queued", + .private = offsetof(struct throtl_grp, total_bytes_queued), + .seq_show = tg_print_rwstat, + }, + { + .name = "throttle.total_io_queued", + .private = offsetof(struct throtl_grp, total_io_queued), + .seq_show = tg_print_rwstat, + }, { } /* terminate */ }; diff --git a/block/blk-throttle.h b/block/blk-throttle.h index 1e1f6a858571..2db648bafe76 100644 --- a/block/blk-throttle.h +++ b/block/blk-throttle.h @@ -154,6 +154,10 @@ struct throtl_grp { struct blkg_rwstat service_time; /* total time spent on block throttle */ struct blkg_rwstat wait_time; + /* total bytes throttled */ + struct blkg_rwstat total_bytes_queued; + /* total IOs throttled */ + struct blkg_rwstat total_io_queued; }; extern struct blkcg_policy blkcg_policy_throtl; -- Gitee From c15c12e4a8f5f1404ac947e84d5f9905357ec75c Mon Sep 17 00:00:00 2001 From: Ferry Meng Date: Wed, 6 Mar 2024 17:51:53 +0800 Subject: [PATCH 0211/2138] anolis: block-throttle: add counters for completed io ANBZ: #8411 Now we have counters for wait_time and service_time, but no completed ios, so the average latency can not be measured. Signed-off-by: Jiufei Xue Signed-off-by: Joseph Qi Signed-off-by: Hao Xu Signed-off-by: Ferry Meng Link: https://gitee.com/anolis/cloud-kernel/pulls/2823 --- block/blk-throttle.c | 12 ++++++++++++ block/blk-throttle.h | 2 ++ 2 files changed, 14 insertions(+) diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 8c2380c46f8a..e322512de001 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -349,6 +349,7 @@ static struct blkg_policy_data *throtl_pd_alloc(struct gendisk *disk, blkg_rwstat_init(&tg->stat_ios, gfp) || blkg_rwstat_init(&tg->service_time, gfp) || blkg_rwstat_init(&tg->wait_time, gfp) || + blkg_rwstat_init(&tg->completed, gfp) || blkg_rwstat_init(&tg->total_bytes_queued, gfp) || blkg_rwstat_init(&tg->total_io_queued, gfp)) goto err; @@ -383,6 +384,7 @@ static struct blkg_policy_data *throtl_pd_alloc(struct gendisk *disk, blkg_rwstat_exit(&tg->stat_ios); blkg_rwstat_exit(&tg->service_time); blkg_rwstat_exit(&tg->wait_time); + blkg_rwstat_exit(&tg->completed); blkg_rwstat_exit(&tg->total_bytes_queued); blkg_rwstat_exit(&tg->total_io_queued); kfree(tg); @@ -499,6 +501,8 @@ static void throtl_pd_offline(struct blkg_policy_data *pd) &tg->service_time); blkg_rwstat_add_aux(&blkg_to_tg(parent)->wait_time, &tg->wait_time); + blkg_rwstat_add_aux(&blkg_to_tg(parent)->completed, + &tg->completed); blkg_rwstat_add_aux(&blkg_to_tg(parent)->total_bytes_queued, &tg->total_bytes_queued); blkg_rwstat_add_aux(&blkg_to_tg(parent)->total_io_queued, @@ -515,6 +519,7 @@ static void throtl_pd_free(struct blkg_policy_data *pd) blkg_rwstat_exit(&tg->stat_ios); blkg_rwstat_exit(&tg->service_time); blkg_rwstat_exit(&tg->wait_time); + blkg_rwstat_exit(&tg->completed); blkg_rwstat_exit(&tg->total_bytes_queued); blkg_rwstat_exit(&tg->total_io_queued); kfree(tg); @@ -526,6 +531,7 @@ static void throtl_pd_reset(struct blkg_policy_data *pd) blkg_rwstat_reset(&tg->service_time); blkg_rwstat_reset(&tg->wait_time); + blkg_rwstat_reset(&tg->completed); blkg_rwstat_reset(&tg->total_bytes_queued); blkg_rwstat_reset(&tg->total_io_queued); } @@ -1005,6 +1011,7 @@ static void throtl_stats_update_completion(struct throtl_grp *tg, blkg_rwstat_add(&tg->service_time, op, now - io_start_time); if (time_after64(io_start_time, start_time)) blkg_rwstat_add(&tg->wait_time, op, io_start_time - start_time); + blkg_rwstat_add(&tg->completed, op, 1); local_irq_restore(flags); } @@ -1591,6 +1598,11 @@ static struct cftype throtl_legacy_files[] = { .private = offsetof(struct throtl_grp, wait_time), .seq_show = tg_print_rwstat, }, + { + .name = "throttle.io_completed", + .private = offsetof(struct throtl_grp, completed), + .seq_show = tg_print_rwstat, + }, { .name = "throttle.total_bytes_queued", .private = offsetof(struct throtl_grp, total_bytes_queued), diff --git a/block/blk-throttle.h b/block/blk-throttle.h index 2db648bafe76..a65cdb0cad83 100644 --- a/block/blk-throttle.h +++ b/block/blk-throttle.h @@ -154,6 +154,8 @@ struct throtl_grp { struct blkg_rwstat service_time; /* total time spent on block throttle */ struct blkg_rwstat wait_time; + /* total IOs completed */ + struct blkg_rwstat completed; /* total bytes throttled */ struct blkg_rwstat total_bytes_queued; /* total IOs throttled */ -- Gitee From dcb4f21cc26d33788999cf3aad4fab4790efbf38 Mon Sep 17 00:00:00 2001 From: Ferry Meng Date: Wed, 6 Mar 2024 17:51:59 +0800 Subject: [PATCH 0212/2138] anolis: blk-throttle: limit bios to fix amount of pages entering writeback prematurely ANBZ: #8411 Currently in blk_throtl_bio(), if one bio exceeds its throtl_grp's bps or iops limit, this bio will be queued throtl_grp's throtl_service_queue, then obviously mm subsys will submit more pages, even underlying device can not handle these io requests, also this will make large amount of pages entering writeback prematurely, later if some process writes some of these pages, it will wait for long time. I have done some tests: one process does buffered writes on a 1GB file, and make this process's blkcg max bps limit be 10MB/s, I observe this: #cat /proc/meminfo | grep -i back Writeback: 900024 kB WritebackTmp: 0 kB I think this Writeback value is just too big, indeed many bios have been queued in throtl_grp's throtl_service_queue, if one process try to write the last bio's page in this queue, it will call wait_on_page_writeback(page), which must wait the previous bios to finish and will take long time, we have also see 120s hung task warning in our server. INFO: task kworker/u128:0:30072 blocked for more than 120 seconds. Tainted: G E 4.9.147-013.ck3000_015_test.cl7.x86_64 #1 "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. kworker/u128:0 D 0 30072 2 0x00000000 Workqueue: writeback wb_workfn (flush-8:16) ffff882ddd066b40 0000000000000000 ffff882e5cad3400 ffff882fbe959e80 ffff882fa50b1a00 ffffc9003a5a3768 ffffffff8173325d ffffc9003a5a3780 00ff882e5cad3400 ffff882fbe959e80 ffffffff81360b49 ffff882e5cad3400 Call Trace: [] ? __schedule+0x23d/0x6d0 [] ? alloc_request_struct+0x19/0x20 [] schedule+0x36/0x80 [] schedule_timeout+0x206/0x4b0 [] ? sched_clock+0x9/0x10 [] ? get_request+0x403/0x810 [] ? ktime_get+0x40/0xb0 [] io_schedule_timeout+0xda/0x170 [] ? bit_wait+0x60/0x60 [] bit_wait_io+0x1b/0x60 [] __wait_on_bit+0x58/0x90 [] ? find_get_pages_tag+0x161/0x2e0 [] wait_on_page_bit+0x82/0xa0 [] ? wake_atomic_t_function+0x60/0x60 [] mpage_prepare_extent_to_map+0x2d1/0x310 [ext4] [] ? kmem_cache_alloc+0x185/0x1a0 [] ? ext4_init_io_end+0x1f/0x40 [ext4] [] ext4_writepages+0x404/0xef0 [ext4] [] ? scsi_init_io+0x44/0x200 [] ? fprop_fraction_percpu+0x2f/0x80 [] do_writepages+0x1e/0x30 [] __writeback_single_inode+0x45/0x320 [] writeback_sb_inodes+0x272/0x600 [] wb_writeback+0x10b/0x300 [] wb_workfn+0xb4/0x380 [] ? try_to_wake_up+0x59/0x3e0 [] process_one_work+0x189/0x420 [] worker_thread+0x4e/0x4b0 [] ? process_one_work+0x420/0x420 [] kthread+0xe6/0x100 [] ? kthread_park+0x60/0x60 [] ret_from_fork+0x39/0x50 To fix this issue, we can simply limit throtl_service_queue's max queued bios, currently we limit it to throtl_grp's bps_limit or iops limit, if it still exteeds, we just sleep for a while. [merge note] Also includes commit "anolis: blk-throttle: fix race bug that loses wakeup event" Signed-off-by: Xiaoguang Wang Signed-off-by: Joseph Qi Signed-off-by: Hao Xu Signed-off-by: Shile Zhang Signed-off-by: Ferry Meng Link: https://gitee.com/anolis/cloud-kernel/pulls/2823 --- block/blk-core.c | 11 ++++++++++- block/blk-throttle.c | 29 ++++++++++++++++++++++++++++- block/blk-throttle.h | 16 ++++++++++++---- 3 files changed, 50 insertions(+), 6 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index f49a8f2f3d17..ad2b3ab4a246 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -735,6 +735,9 @@ void submit_bio_noacct(struct bio *bio) struct block_device *bdev = bio->bi_bdev; struct request_queue *q = bdev_get_queue(bdev); blk_status_t status = BLK_STS_IOERR; + DEFINE_WAIT(wait); + wait_queue_head_t *wait_head = NULL; + bool throtl; might_sleep(); @@ -808,7 +811,13 @@ void submit_bio_noacct(struct bio *bio) break; } - if (blk_throtl_bio(bio)) + throtl = blk_throtl_bio(bio, &wait_head, &wait); + if (wait_head) { + io_schedule(); + finish_wait(wait_head, &wait); + } + + if (throtl) return; submit_bio_noacct_nocheck(bio); return; diff --git a/block/blk-throttle.c b/block/blk-throttle.c index e322512de001..a78657b07ed4 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -331,6 +331,10 @@ static void throtl_service_queue_init(struct throtl_service_queue *sq) { INIT_LIST_HEAD(&sq->queued[READ]); INIT_LIST_HEAD(&sq->queued[WRITE]); + sq->nr_queued_bytes[READ] = 0; + sq->nr_queued_bytes[WRITE] = 0; + init_waitqueue_head(&sq->wait[READ]); + init_waitqueue_head(&sq->wait[WRITE]); sq->pending_tree = RB_ROOT_CACHED; timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0); } @@ -1102,6 +1106,7 @@ static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn, throtl_qnode_add_bio(bio, qn, &sq->queued[rw]); sq->nr_queued[rw]++; + sq->nr_queued_bytes[rw] += throtl_bio_data_size(bio); blkg_rwstat_add(&tg->total_bytes_queued, bio_op(bio), throtl_bio_data_size(bio)); blkg_rwstat_add(&tg->total_io_queued, bio_op(bio), 1); @@ -1160,6 +1165,15 @@ static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw) */ bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put); sq->nr_queued[rw]--; + sq->nr_queued_bytes[rw] -= throtl_bio_data_size(bio); + WARN_ON_ONCE(sq->nr_queued_bytes[rw] < 0); + + if (wq_has_sleeper(&sq->wait[rw])) { + if (sq->nr_queued_bytes[rw] > 0) + wake_up(&sq->wait[rw]); + else + wake_up_all(&sq->wait[rw]); + } throtl_charge_bio(tg, bio); @@ -2301,7 +2315,8 @@ static void throtl_upgrade_state(struct throtl_data *td) } #endif -bool __blk_throtl_bio(struct bio *bio) +bool __blk_throtl_bio(struct bio *bio, wait_queue_head_t **waitq, + wait_queue_entry_t *wait) { struct request_queue *q = bdev_get_queue(bio->bi_bdev); struct blkcg_gq *blkg = bio->bi_blkg; @@ -2385,6 +2400,18 @@ bool __blk_throtl_bio(struct bio *bio) tg->last_low_overflow_time[rw] = jiffies; td->nr_queued[rw]++; + + if (rw == WRITE) { + u64 bps_limit = tg_bps_limit(tg, rw); + + if (bps_limit != U64_MAX && + (wq_has_sleeper(&sq->wait[rw]) || + sq->nr_queued_bytes[rw] > div_u64(bps_limit, 2))) { + *waitq = &sq->wait[rw]; + prepare_to_wait_exclusive(*waitq, wait, TASK_UNINTERRUPTIBLE); + } + } + throtl_add_bio_tg(bio, qn, tg); throttled = true; diff --git a/block/blk-throttle.h b/block/blk-throttle.h index a65cdb0cad83..4b5ce538ca5b 100644 --- a/block/blk-throttle.h +++ b/block/blk-throttle.h @@ -41,6 +41,8 @@ struct throtl_service_queue { */ struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */ unsigned int nr_queued[2]; /* number of queued bios */ + long nr_queued_bytes[2]; /* number of queued bytes */ + wait_queue_head_t wait[2]; /* * RB tree of active children throtl_grp's, which are sorted by @@ -181,13 +183,18 @@ static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg) static inline int blk_throtl_init(struct gendisk *disk) { return 0; } static inline void blk_throtl_exit(struct gendisk *disk) { } static inline void blk_throtl_register(struct gendisk *disk) { } -static inline bool blk_throtl_bio(struct bio *bio) { return false; } +static inline bool blk_throtl_bio(struct bio *bio, wait_queue_head_t **waitq, + wait_queue_entry_t *wait) +{ + return false; +} static inline void blk_throtl_cancel_bios(struct gendisk *disk) { } #else /* CONFIG_BLK_DEV_THROTTLING */ int blk_throtl_init(struct gendisk *disk); void blk_throtl_exit(struct gendisk *disk); void blk_throtl_register(struct gendisk *disk); -bool __blk_throtl_bio(struct bio *bio); +bool __blk_throtl_bio(struct bio *bio, wait_queue_head_t **waitq, + wait_queue_entry_t *wait); void blk_throtl_cancel_bios(struct gendisk *disk); static inline bool blk_should_throtl(struct bio *bio) @@ -214,13 +221,14 @@ static inline bool blk_should_throtl(struct bio *bio) return false; } -static inline bool blk_throtl_bio(struct bio *bio) +static inline bool blk_throtl_bio(struct bio *bio, wait_queue_head_t **waitq, + wait_queue_entry_t *wait) { if (!blk_should_throtl(bio)) return false; - return __blk_throtl_bio(bio); + return __blk_throtl_bio(bio, waitq, wait); } #endif /* CONFIG_BLK_DEV_THROTTLING */ -- Gitee From 265e04f570402601a6f3b5e2aa3f28807b532b63 Mon Sep 17 00:00:00 2001 From: Ferry Meng Date: Wed, 6 Mar 2024 17:52:18 +0800 Subject: [PATCH 0213/2138] anolis: blk-throttle: add io latency indicators in cgroupV2 ANBZ: #8411 Currently we have already supported io_{wait_time/completed/service_time} and total_{bytes_queued/io_queued} counters in cgroupV1 (blkio cgroup). Now we offer the same interface in cgroupV2 (under io cgroup). Integrate all indicators into one file, named "io.exstat". Before you read it in subcgroup, remember to enable "io" in ancestor's "cgroup.subtree_control". Signed-off-by: Ferry Meng Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2823 --- Documentation/admin-guide/cgroup-v2.rst | 24 ++++++++++ block/blk-throttle.c | 61 +++++++++++++++++++++++++ 2 files changed, 85 insertions(+) diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index b26b5274eaaf..8238711ee842 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1717,6 +1717,30 @@ IO Interface Files 8:16 rbytes=1459200 wbytes=314773504 rios=192 wios=353 dbytes=0 dios=0 8:0 rbytes=90430464 wbytes=299008000 rios=8950 wios=1252 dbytes=50331648 dios=3021 + io.extstat + A read-only nested-keyed file. + + Lines are keyed by $MAJ:$MIN device numbers and not ordered. + The following nested keys are defined. + + ======== ============================= + rwait IO read wait time + wwait IO write wait time + rserv IO read service time + wserv IO write service time + rcomp Number of completed read IOs + wcomp Number of completed write IOs + rbytesq Bytes of queued read IOs + wbytesq Bytes of queued write IOs + riosq Number of queued read IOs + wiosq Number of queued write IOs + ======== ============================= + + An example read output follows:: + + 253:16 rwait=0 wwait=3300 rserv=0 wserv=414366321956 rcomp=0 wcomp=12 rbytesq=0 wbytesq=40960000 riosq=0 wiosq=12 + 253:0 rwait=0 wwait=0 rserv=0 wserv=0 rcomp=0 wcomp=0 rbytesq=0 wbytesq=0 riosq=0 wiosq=0 + io.cost.qos A read-write nested-keyed file which exists only on the root cgroup. diff --git a/block/blk-throttle.c b/block/blk-throttle.c index a78657b07ed4..8921f61d257a 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -1693,6 +1693,56 @@ static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd, return 0; } +static u64 tg_prfill_extstat(struct seq_file *sf, struct blkg_policy_data *pd, + int off) +{ + struct throtl_grp *tg = pd_to_tg(pd); + const char *dname = blkg_dev_name(pd->blkg); + char bufs[10][21] = { "0", "0", "0", "0", "0", "0", "0", "0", "0", "0" }; + struct blkg_rwstat_sample tmp = { }; + + if (!dname) + return 0; + + /* read/write IOs wait time */ + blkg_rwstat_read(&tg->wait_time, &tmp); + snprintf(bufs[0], sizeof(bufs[0]), "%llu", + tmp.cnt[BLKG_RWSTAT_READ]); + snprintf(bufs[1], sizeof(bufs[1]), "%llu", + tmp.cnt[BLKG_RWSTAT_WRITE]); + /* read/write IOs service time */ + blkg_rwstat_read(&tg->service_time, &tmp); + snprintf(bufs[2], sizeof(bufs[2]), "%llu", + tmp.cnt[BLKG_RWSTAT_READ]); + snprintf(bufs[3], sizeof(bufs[3]), "%llu", + tmp.cnt[BLKG_RWSTAT_WRITE]); + /* read/write completed IOs */ + blkg_rwstat_read(&tg->completed, &tmp); + snprintf(bufs[4], sizeof(bufs[4]), "%llu", + tmp.cnt[BLKG_RWSTAT_READ]); + snprintf(bufs[5], sizeof(bufs[5]), "%llu", + tmp.cnt[BLKG_RWSTAT_WRITE]); + /* read/write queued bytes */ + blkg_rwstat_read(&tg->total_bytes_queued, &tmp); + snprintf(bufs[6], sizeof(bufs[6]), "%llu", + tmp.cnt[BLKG_RWSTAT_READ]); + snprintf(bufs[7], sizeof(bufs[7]), "%llu", + tmp.cnt[BLKG_RWSTAT_WRITE]); + /* read/write queued IOs */ + blkg_rwstat_read(&tg->total_io_queued, &tmp); + snprintf(bufs[8], sizeof(bufs[8]), "%llu", + tmp.cnt[BLKG_RWSTAT_READ]); + snprintf(bufs[9], sizeof(bufs[9]), "%llu", + tmp.cnt[BLKG_RWSTAT_WRITE]); + + seq_printf(sf, "%s rwait=%s wwait=%s rserv=%s wserv=%s rcomp=%s wcomp=%s " + "rbytesq=%s wbytesq=%s riosq=%s wiosq=%s\n", + dname, bufs[0], bufs[1], bufs[2], bufs[3], bufs[4], + bufs[5], bufs[6], bufs[7], bufs[8], bufs[9]); + + return 0; +} + static int tg_print_limit(struct seq_file *sf, void *v) { blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit, @@ -1700,6 +1750,13 @@ static int tg_print_limit(struct seq_file *sf, void *v) return 0; } +static int tg_print_extstat(struct seq_file *sf, void *v) +{ + blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_extstat, + &blkcg_policy_throtl, 0, false); + return 0; +} + static ssize_t tg_set_limit(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { @@ -1836,6 +1893,10 @@ static struct cftype throtl_files[] = { .write = tg_set_limit, .private = LIMIT_MAX, }, + { + .name = "extstat", + .seq_show = tg_print_extstat, + }, { } /* terminate */ }; -- Gitee From 7c713a2efba0796e6fe1ac2c918b62e62e2f4f4e Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 11 Mar 2024 14:41:03 +0800 Subject: [PATCH 0214/2138] anolis: x86/perf: Add PMU uncore support for Zhaoxin CPU ANBZ: #7809 Add performance monitoring unit support for Zhaoxin processors. Signed-off-by: leoliu-oc Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2695 --- MAINTAINERS | 7 + arch/x86/events/zhaoxin/Makefile | 1 + arch/x86/events/zhaoxin/core.c | 53 +- arch/x86/events/zhaoxin/uncore.c | 2900 ++++++++++++++++++++++++++++++ arch/x86/events/zhaoxin/uncore.h | 371 ++++ 5 files changed, 3317 insertions(+), 15 deletions(-) create mode 100644 arch/x86/events/zhaoxin/uncore.c create mode 100644 arch/x86/events/zhaoxin/uncore.h diff --git a/MAINTAINERS b/MAINTAINERS index 2b1759633adb..24bc580c9cee 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -23885,6 +23885,13 @@ L: linux-kernel@vger.kernel.org S: Maintained F: arch/x86/kernel/cpu/zhaoxin.c +ZHAOXIN PMU UNCORE SUPPORT +M: Leoliu-oc +S: Maintained +F: arch/x86/events/zhaoxin/core.c +F: arch/x86/events/zhaoxin/uncore.c +F: arch/x86/events/zhaoxin/uncore.h + ZONEFS FILESYSTEM M: Damien Le Moal M: Naohiro Aota diff --git a/arch/x86/events/zhaoxin/Makefile b/arch/x86/events/zhaoxin/Makefile index 642c1174d662..767d6212bac1 100644 --- a/arch/x86/events/zhaoxin/Makefile +++ b/arch/x86/events/zhaoxin/Makefile @@ -1,2 +1,3 @@ # SPDX-License-Identifier: GPL-2.0 obj-y += core.o +obj-y += uncore.o diff --git a/arch/x86/events/zhaoxin/core.c b/arch/x86/events/zhaoxin/core.c index 3e9acdaeed1e..2957b416a6db 100644 --- a/arch/x86/events/zhaoxin/core.c +++ b/arch/x86/events/zhaoxin/core.c @@ -19,15 +19,15 @@ #include "../perf_event.h" /* - * Zhaoxin PerfMon, used on zxc and later. + * Zhaoxin PerfMon, used on Lujiazui and later. */ static u64 zx_pmon_event_map[PERF_COUNT_HW_MAX] __read_mostly = { [PERF_COUNT_HW_CPU_CYCLES] = 0x0082, [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, - [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0515, - [PERF_COUNT_HW_CACHE_MISSES] = 0x051a, [PERF_COUNT_HW_BUS_CYCLES] = 0x0083, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0028, + [PERF_COUNT_HW_BRANCH_MISSES] = 0x0029, }; static struct event_constraint zxc_event_constraints[] __read_mostly = { @@ -36,7 +36,7 @@ static struct event_constraint zxc_event_constraints[] __read_mostly = { EVENT_CONSTRAINT_END }; -static struct event_constraint zxd_event_constraints[] __read_mostly = { +static struct event_constraint wudaokou_event_constraints[] __read_mostly = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* retired instructions */ FIXED_EVENT_CONSTRAINT(0x0082, 1), /* unhalted core clock cycles */ @@ -44,7 +44,7 @@ static struct event_constraint zxd_event_constraints[] __read_mostly = { EVENT_CONSTRAINT_END }; -static __initconst const u64 zxd_hw_cache_event_ids +static __initconst const u64 wudaokou_hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { @@ -148,7 +148,7 @@ static __initconst const u64 zxd_hw_cache_event_ids }, }; -static __initconst const u64 zxe_hw_cache_event_ids +static __initconst const u64 lujiazui_hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { @@ -471,7 +471,7 @@ static const struct x86_pmu zhaoxin_pmu __initconst = { .max_events = ARRAY_SIZE(zx_pmon_event_map), .apic = 1, /* - * For zxd/zxe, read/write operation for PMCx MSR is 48 bits. + * For wudaokou/lujiazui, read/write operation for PMCx MSR is 48 bits. */ .max_period = (1ULL << 47) - 1, .get_event_constraints = zhaoxin_get_event_constraints, @@ -559,6 +559,8 @@ __init int zhaoxin_pmu_init(void) zx_pmon_event_map[PERF_COUNT_HW_CACHE_REFERENCES] = 0; zx_pmon_event_map[PERF_COUNT_HW_CACHE_MISSES] = 0; zx_pmon_event_map[PERF_COUNT_HW_BUS_CYCLES] = 0; + zx_pmon_event_map[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0; + zx_pmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0; pr_cont("ZXC events, "); break; @@ -574,26 +576,47 @@ __init int zhaoxin_pmu_init(void) switch (boot_cpu_data.x86_model) { case 0x1b: - memcpy(hw_cache_event_ids, zxd_hw_cache_event_ids, + memcpy(hw_cache_event_ids, wudaokou_hw_cache_event_ids, sizeof(hw_cache_event_ids)); - x86_pmu.event_constraints = zxd_event_constraints; + x86_pmu.event_constraints = wudaokou_event_constraints; + + zx_pmon_event_map[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0515; + zx_pmon_event_map[PERF_COUNT_HW_CACHE_MISSES] = 0x051a; zx_pmon_event_map[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0700; zx_pmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x0709; - pr_cont("ZXD events, "); + pr_cont("Wudaokou events, "); break; case 0x3b: - memcpy(hw_cache_event_ids, zxe_hw_cache_event_ids, + memcpy(hw_cache_event_ids, lujiazui_hw_cache_event_ids, sizeof(hw_cache_event_ids)); - x86_pmu.event_constraints = zxd_event_constraints; + x86_pmu.event_constraints = wudaokou_event_constraints; + + zx_pmon_event_map[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0515; + zx_pmon_event_map[PERF_COUNT_HW_CACHE_MISSES] = 0x051a; + + pr_cont("Lujiazui events, "); + break; + case 0x5b: + case 0x6b: + zx_pmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = + X86_CONFIG(.event = 0x02, .umask = 0x01, .inv = 0x01, + .cmask = 0x01); + + memcpy(hw_cache_event_ids, lujiazui_hw_cache_event_ids, + sizeof(hw_cache_event_ids)); + + x86_pmu.event_constraints = wudaokou_event_constraints; + + zx_pmon_event_map[PERF_COUNT_HW_CACHE_REFERENCES] = 0x051a; + zx_pmon_event_map[PERF_COUNT_HW_CACHE_MISSES] = 0; - zx_pmon_event_map[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0028; - zx_pmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x0029; + if (boot_cpu_data.x86_model == 0x5b) + pr_cont("Yongfeng events, "); - pr_cont("ZXE events, "); break; default: return -ENODEV; diff --git a/arch/x86/events/zhaoxin/uncore.c b/arch/x86/events/zhaoxin/uncore.c new file mode 100644 index 000000000000..8d898a10d953 --- /dev/null +++ b/arch/x86/events/zhaoxin/uncore.c @@ -0,0 +1,2900 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include "uncore.h" + +static struct zhaoxin_uncore_type *empty_uncore[] = { NULL, }; +static struct zhaoxin_uncore_type **uncore_msr_uncores = empty_uncore; +static struct zhaoxin_uncore_type **uncore_pci_uncores = empty_uncore; +static struct zhaoxin_uncore_type **uncore_mmio_uncores = empty_uncore; + + +static bool pcidrv_registered; +static struct pci_driver *uncore_pci_driver; + +/* mask of cpus that collect uncore events */ +static cpumask_t uncore_cpu_mask; +static cpumask_t uncore_cpu_subnode_mask; +static cpumask_t uncore_cpu_cluster_mask; + +/* constraint for the fixed counter */ +static struct event_constraint uncore_constraint_fixed = + EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL); + +static int max_packages, max_subnodes, max_clusters; +static int clusters_per_subnode; +static int subnodes_per_die; +static int dies_per_socket; + +#define KH40000_MAX_SUBNODE_NUMBER 8 +static int kh40000_pcibus_limit[KH40000_MAX_SUBNODE_NUMBER]; + +/* get CPU topology register */ +#define BJ_GLOBAL_STATUS_MSR 0x1610 +#define BJ_HDW_CONFIG_MSR 0X1628 + +/* KX5000/KX6000 event control */ +#define KX5000_UNC_CTL_EV_SEL_MASK 0x000000ff +#define KX5000_UNC_CTL_UMASK_MASK 0x0000ff00 +#define KX5000_UNC_CTL_EDGE_DET (1 << 18) +#define KX5000_UNC_CTL_EN (1 << 22) +#define KX5000_UNC_CTL_INVERT (1 << 23) +#define KX5000_UNC_CTL_CMASK_MASK 0x7000000 +#define KX5000_UNC_FIXED_CTR_CTL_EN (1 << 0) + +#define KX5000_UNC_RAW_EVENT_MASK (KX5000_UNC_CTL_EV_SEL_MASK | \ + KX5000_UNC_CTL_UMASK_MASK | \ + KX5000_UNC_CTL_EDGE_DET | \ + KX5000_UNC_CTL_INVERT | \ + KX5000_UNC_CTL_CMASK_MASK) + +/* KX5000/KX6000 uncore global register */ +#define KX5000_UNC_PERF_GLOBAL_CTL 0x391 +#define KX5000_UNC_FIXED_CTR 0x394 +#define KX5000_UNC_FIXED_CTR_CTRL 0x395 + +/* KX5000/KX6000 uncore global control */ +#define KX5000_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 4) - 1) +#define KX5000_UNC_GLOBAL_CTL_EN_FC (1ULL << 32) + +/* KX5000/KX6000 uncore register */ +#define KX5000_UNC_PERFEVTSEL0 0x3c0 +#define KX5000_UNC_UNCORE_PMC0 0x3b0 + +/* KH40000 event control */ +#define KH40000_PMON_CTL_EV_SEL_MASK 0x000000ff +#define KH40000_PMON_CTL_UMASK_MASK 0x0000ff00 +#define KH40000_PMON_CTL_RST (1 << 17) +#define KH40000_PMON_CTL_EDGE_DET (1 << 18) +#define KH40000_PMON_CTL_EV_SEL_EXT (1 << 21) +#define KH40000_PMON_CTL_EN (1 << 22) +#define KH40000_PMON_CTL_INVERT (1 << 23) +#define KH40000_PMON_CTL_TRESH_MASK 0xff000000 +#define KH40000_PMON_RAW_EVENT_MASK (KH40000_PMON_CTL_EV_SEL_MASK | \ + KH40000_PMON_CTL_UMASK_MASK | \ + KH40000_PMON_CTL_EDGE_DET | \ + KH40000_PMON_CTL_INVERT | \ + KH40000_PMON_CTL_TRESH_MASK) + +/* KH40000 LLC register*/ +#define KH40000_LLC_MSR_PMON_CTL0 0x1660 +#define KH40000_LLC_MSR_PMON_CTR0 0x165c +#define KH40000_LLC_MSR_PMON_BLK_CTL 0x1665 + +/* KH40000 HIF register*/ +#define KH40000_HIF_MSR_PMON_CTL0 0x1656 +#define KH40000_HIF_MSR_PMON_CTR0 0x1651 +#define KH40000_HIF_MSR_PMON_FIXED_CTL 0x1655 +#define KH40000_HIF_MSR_PMON_FIXED_CTR 0x1650 +#define KH40000_HIF_MSR_PMON_BLK_CTL 0x165b + +/* KH40000 ZZI(ZPI+ZOI+INI) register*/ +#define KH40000_ZZI_MSR_PMON_CTL0 0x166A +#define KH40000_ZZI_MSR_PMON_CTR0 0x1666 +#define KH40000_ZZI_MSR_PMON_BLK_CTL 0x166f + +/* KH40000 MC register*/ +#define KH40000_MC0_CHy_PMON_FIXED_CTL 0xf40 +#define KH40000_MC0_CHy_PMON_FIXED_CTR 0xf20 +#define KH40000_MC0_CHy_PMON_CTR0 0xf00 +#define KH40000_MC0_CHy_PMON_CTL0 0xf28 +#define KH40000_MC0_CHy_PMON_BLK_CTL 0xf44 + +#define KH40000_MC1_CHy_PMON_FIXED_CTL 0xf90 +#define KH40000_MC1_CHy_PMON_FIXED_CTR 0xf70 +#define KH40000_MC1_CHy_PMON_CTR0 0xf50 +#define KH40000_MC1_CHy_PMON_CTL0 0xf78 +#define KH40000_MC1_CHy_PMON_BLK_CTL 0xf94 + +/* KH40000 PCI register*/ +#define KH40000_PCI_PMON_CTR0 0xf00 +#define KH40000_PCI_PMON_CTL0 0xf28 +#define KH40000_PCI_PMON_BLK_CTL 0xf44 + +/* KH40000 ZPI_DLL register*/ +#define KH40000_ZPI_DLL_PMON_FIXED_CTL 0xf40 +#define KH40000_ZPI_DLL_PMON_FIXED_CTR 0xf20 +#define KH40000_ZPI_DLL_PMON_CTR0 0xf00 +#define KH40000_ZPI_DLL_PMON_CTL0 0xf28 +#define KH40000_ZPI_DLL_PMON_BLK_CTL 0xf44 + +/* KH40000 ZDI_DLL register*/ +#define KH40000_ZDI_DLL_PMON_FIXED_CTL 0xf40 +#define KH40000_ZDI_DLL_PMON_FIXED_CTR 0xf20 +#define KH40000_ZDI_DLL_PMON_CTR0 0xf00 +#define KH40000_ZDI_DLL_PMON_CTL0 0xf28 +#define KH40000_ZDI_DLL_PMON_BLK_CTL 0xf44 + +/* KH40000 PXPTRF register*/ +#define KH40000_PXPTRF_PMON_CTR0 0xf00 +#define KH40000_PXPTRF_PMON_CTL0 0xf28 +#define KH40000_PXPTRF_PMON_BLK_CTL 0xf44 + +/* KH40000 Box level control */ +#define KH40000_PMON_BOX_CTL_RST_CTRL (1 << 0) +#define KH40000_PMON_BOX_CTL_RST_CTRS (1 << 1) +#define KH40000_PMON_BOX_CTL_FRZ (1 << 8) +#define KH40000_PMON_PCI_BOX_PMON_EN (1 << 31) + +#define KH40000_PMON_BOX_CTL_INT (KH40000_PMON_BOX_CTL_RST_CTRL | \ + KH40000_PMON_BOX_CTL_RST_CTRS) + +#define KH40000_PMON_PCI_BOX_CTL_INT (KH40000_PMON_BOX_CTL_RST_CTRL | \ + KH40000_PMON_BOX_CTL_RST_CTRS | \ + KH40000_PMON_PCI_BOX_PMON_EN) + +/* KX8000 LLC register*/ +#define KX8000_LLC_MSR_PMON_CTL0 0x1979 +#define KX8000_LLC_MSR_PMON_CTR0 0x1975 +#define KX8000_LLC_MSR_PMON_BLK_CTL 0x197e + +/* KX8000 MESH register*/ +#define KX8000_MESH_MSR_PMON_CTL0 0x1983 +#define KX8000_MESH_MSR_PMON_CTR0 0x197f +#define KX8000_MESH_MSR_PMON_BLK_CTL 0x1987 + +/* KX8000 HOMESTOP register*/ +#define KX8000_HOMESTOP_MSR_PMON_CTL0 0x196a +#define KX8000_HOMESTOP_MSR_PMON_CTR0 0x1966 +#define KX8000_HOMESTOP_MSR_PMON_BLK_CTL 0x196e +#define KX8000_HOMESTOP_MSR_PMON_FIXED_CTR 0x1970 +#define KX8000_HOMESTOP_MSR_PMON_FIXED_CTL 0x1971 + +/* KX8000 CCDie ZDI_PL register*/ +#define KX8000_CCD_ZDI_PL_MSR_PMON_CTL0 0x1960 +#define KX8000_CCD_ZDI_PL_MSR_PMON_CTR0 0x195c +#define KX8000_CCD_ZDI_PL_MSR_PMON_BLK_CTL 0x1964 + +/* KX8000 cIODie ZDI_PL register*/ +#define KX8000_IOD_ZDI_PL_MSR_PMON_CTL0 0x1894 +#define KX8000_IOD_ZDI_PL_MSR_PMON_CTR0 0x1890 +#define KX8000_IOD_ZDI_PL_MSR_PMON_BLK_CTL 0x1898 +#define KX8000_IOD_ZDI_PL_MSR_PMON_FIXED_CTR 0x189A +#define KX8000_IOD_ZDI_PL_MSR_PMON_FIXED_CTL 0x189B + +/* KX8000 MC register*/ +#define KX8000_MC_A0_CHy_PMON_FIXED_CTL 0xe30 +#define KX8000_MC_A0_CHy_PMON_FIXED_CTR 0xe08 +#define KX8000_MC_A0_CHy_PMON_CTR0 0xe00 +#define KX8000_MC_A0_CHy_PMON_CTL0 0xe20 +#define KX8000_MC_A0_CHy_PMON_BLK_CTL 0xe34 + +#define KX8000_MC_A1_CHy_PMON_FIXED_CTL 0xe70 +#define KX8000_MC_A1_CHy_PMON_FIXED_CTR 0xe48 +#define KX8000_MC_A1_CHy_PMON_CTR0 0xe40 +#define KX8000_MC_A1_CHy_PMON_CTL0 0xe60 +#define KX8000_MC_A1_CHy_PMON_BLK_CTL 0xe74 + +#define KX8000_MC_B0_CHy_PMON_FIXED_CTL 0xeb0 +#define KX8000_MC_B0_CHy_PMON_FIXED_CTR 0xe88 +#define KX8000_MC_B0_CHy_PMON_CTR0 0xe80 +#define KX8000_MC_B0_CHy_PMON_CTL0 0xea0 +#define KX8000_MC_B0_CHy_PMON_BLK_CTL 0xeb4 + +#define KX8000_MC_B1_CHy_PMON_FIXED_CTL 0xef0 +#define KX8000_MC_B1_CHy_PMON_FIXED_CTR 0xec8 +#define KX8000_MC_B1_CHy_PMON_CTR0 0xec0 +#define KX8000_MC_B1_CHy_PMON_CTL0 0xee0 +#define KX8000_MC_B1_CHy_PMON_BLK_CTL 0xef4 + +#define KX8000_ZDI_DL_MMIO_PMON_CTR0 0xf00 +#define KX8000_ZDI_DL_MMIO_PMON_CTL0 0xf28 +#define KX8000_ZDI_DL_MMIO_PMON_BLK_CTL 0xf44 +#define KX8000_IOD_ZDI_DL_MMIO_BASE_OFFSET 0x168 +#define KX8000_CCD_ZDI_DL_MMIO_BASE_OFFSET 0x170 +#define KX8000_ZDI_DL_MMIO_BASE_MASK 0x3fff +#define KX8000_ZDI_DL_MMIO_BASE_MASK 0x3fff +#define KX8000_ZDI_DL_MMIO_MEM0_MASK 0xfffff000 +#define KX8000_ZDI_DL_MMIO_SIZE 0x1000 + + + + +DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); +DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); +DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); +DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); +DEFINE_UNCORE_FORMAT_ATTR(cmask3, cmask, "config:24-26"); +DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31"); + +static void get_hdw_config_msr(void *config) +{ + u64 *data = (u64 *)config; + + rdmsrl(BJ_HDW_CONFIG_MSR, *data); +} + +static void get_global_status_msr(void *status) +{ + u64 *data = (u64 *)status; + + rdmsrl(BJ_GLOBAL_STATUS_MSR, *data); +} + +/*topology number : get max packages/subnode/clusters number*/ +static void get_topology_number(void) +{ + int clusters; + int subnodes; + int dies; + int packages; + u64 data; + + rdmsrl(BJ_GLOBAL_STATUS_MSR, data); + + /* check packages number */ + packages = data & 0x1; + if (packages) + max_packages = 2; + else + max_packages = 1; + + /* only Yongfeng needs die/subnode/cluster info */ + if (boot_cpu_data.x86_model != ZHAOXIN_FAM7_KH40000) + return; + + /* check dies_per_socket */ + dies = (data >> 12) & 0x1; + if (dies) + dies_per_socket = 2; + else + dies_per_socket = 1; + + /* check subnodes_per_die */ + subnodes = (data >> 32) & 0x3; + if (subnodes == 0x3) + subnodes_per_die = 2; + else + subnodes_per_die = 1; + + /* check clusters_per_subnode */ + clusters = (data >> 6) & 0x3; + if (clusters == 0x3) + clusters_per_subnode = 2; + else + clusters_per_subnode = 1; + + max_subnodes = max_packages * dies_per_socket * subnodes_per_die; + max_clusters = clusters_per_subnode * max_subnodes; +} + +static int get_pcibus_limit(void) +{ + struct pci_dev *dev; + u32 val; + int i = 0; + + dev = pci_get_device(0x1D17, 0x31B1, NULL); + if (dev == NULL) + return -ENODEV; + + pci_read_config_dword(dev, 0x94, &val); + kh40000_pcibus_limit[i++] = (val & 0x1f) << 3 | 0x7; + kh40000_pcibus_limit[i++] = (val >> 8 & 0x1f) << 3 | 0x7; + if (dies_per_socket == 2) { + kh40000_pcibus_limit[i++] = (val >> 16 & 0x1f) << 3 | 0x7; + kh40000_pcibus_limit[i++] = (val >> 24 & 0x1f) << 3 | 0x7; + } + + if (max_packages == 2) { + pci_read_config_dword(dev, 0x9c, &val); + kh40000_pcibus_limit[i++] = (val & 0x1f) << 3 | 0x7; + kh40000_pcibus_limit[i++] = (val >> 8 & 0x1f) << 3 | 0x7; + if (dies_per_socket == 2) { + kh40000_pcibus_limit[i++] = (val >> 16 & 0x1f) << 3 | 0x7; + kh40000_pcibus_limit[i++] = (val >> 24 & 0x1f) << 3 | 0x7; + } + } + + return 0; +} + +static int uncore_pcibus_to_subnodeid(struct pci_bus *bus) +{ + int i; + + for (i = 0; i < KH40000_MAX_SUBNODE_NUMBER; i++) { + if (bus->number < kh40000_pcibus_limit[i]) + break; + } + + return i; +} + +DEFINE_PER_CPU(int, zx_package_id); +DEFINE_PER_CPU(int, zx_subnode_id); +DEFINE_PER_CPU(int, zx_cluster_id); + +static void get_topology_info(void) +{ + int cpu; + int cluster_id; + int socket_id; + int die_id; + int subnode_id; + + int die_info; + int subnode_info; + int cluster_info; + + u64 config; + + for_each_present_cpu(cpu) { + smp_call_function_single(cpu, get_global_status_msr, &config, 1); + socket_id = (int)((config >> 3) & 0x1); + per_cpu(zx_package_id, cpu) = socket_id; + + /* only kh40000 needs cluster and subnode info */ + if (boot_cpu_data.x86_model != ZHAOXIN_FAM7_KH40000) + continue; + + smp_call_function_single(cpu, get_hdw_config_msr, &config, 1); + + die_info = (int)((config >> 21) & 0x3); + die_id = socket_id * dies_per_socket + die_info; + + subnode_info = (int)((config >> 20) & 0x1); + subnode_id = die_id * subnodes_per_die + subnode_info; + per_cpu(zx_subnode_id, cpu) = subnode_id; + + cluster_info = (int)((config >> 18) & 0x3); + cluster_id = subnode_id * clusters_per_subnode + cluster_info; + per_cpu(zx_cluster_id, cpu) = cluster_id; + } +} + +static int zx_topology_cluster_id(int cpu) +{ + return per_cpu(zx_cluster_id, cpu); +} + +static int zx_topology_subnode_id(int cpu) +{ + return per_cpu(zx_subnode_id, cpu); +} + +static int zx_topology_package_id(int cpu) +{ + return per_cpu(zx_package_id, cpu); +} + +DEFINE_PER_CPU(cpumask_t, zx_cluster_core_bits); +DEFINE_PER_CPU(cpumask_t, zx_subnode_core_bits); + +static void zx_gen_core_map(void) +{ + int i, nr, cpu; + int cluster_id, subnode_id; + + for_each_present_cpu(cpu) { + cluster_id = zx_topology_cluster_id(cpu); + + for (i = 0; i < 4; i++) { + nr = (cluster_id << 2) + i; + cpumask_set_cpu(nr, &per_cpu(zx_cluster_core_bits, cpu)); + } + } + + for_each_present_cpu(cpu) { + subnode_id = zx_topology_subnode_id(cpu); + + for (i = 0; i < 8; i++) { + nr = (subnode_id << 3) + i; + cpumask_set_cpu(nr, &per_cpu(zx_subnode_core_bits, cpu)); + } + } +} + +static struct cpumask *topology_cluster_core_cpumask(int cpu) +{ + return &per_cpu(zx_cluster_core_bits, cpu); +} + +static struct cpumask *topology_subnode_core_cpumask(int cpu) +{ + return &per_cpu(zx_subnode_core_bits, cpu); +} + +static void uncore_free_pcibus_map(void) +{ + +} + +static int kh40000_pci2node_map_init(void) +{ + return 0; +} + +ssize_t zx_uncore_event_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct uncore_event_desc *event = + container_of(attr, struct uncore_event_desc, attr); + return sprintf(buf, "%s", event->config); +} + +static struct zhaoxin_uncore_box *uncore_pmu_to_box(struct zhaoxin_uncore_pmu *pmu, int cpu) +{ + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + if (!strcmp(pmu->type->name, "llc")) + return pmu->boxes[zx_topology_cluster_id(cpu)]; + else + return pmu->boxes[zx_topology_subnode_id(cpu)]; + } else { + return pmu->boxes[zx_topology_package_id(cpu)]; + } +} + +static u64 uncore_msr_read_counter(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ + u64 count; + + WARN_ON_ONCE(box->cpu != smp_processor_id()); + rdmsrl(event->hw.event_base, count); + return count; +} + +static void uncore_assign_hw_event(struct zhaoxin_uncore_box *box, + struct perf_event *event, int idx) +{ + struct hw_perf_event *hwc = &event->hw; + + hwc->idx = idx; + hwc->last_tag = ++box->tags[idx]; + + if (uncore_pmc_fixed(hwc->idx)) { + hwc->event_base = uncore_fixed_ctr(box); + hwc->config_base = uncore_fixed_ctl(box); + return; + } + + hwc->config_base = uncore_event_ctl(box, hwc->idx); + hwc->event_base = uncore_perf_ctr(box, hwc->idx); +} + +void uncore_perf_event_update(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ + u64 prev_count, new_count, delta; + int shift; + + if (uncore_pmc_fixed(event->hw.idx)) + shift = 64 - uncore_fixed_ctr_bits(box); + else + shift = 64 - uncore_perf_ctr_bits(box); + + /* the hrtimer might modify the previous event value */ +again: + prev_count = local64_read(&event->hw.prev_count); + new_count = uncore_read_counter(box, event); + if (local64_xchg(&event->hw.prev_count, new_count) != prev_count) + goto again; + + delta = (new_count << shift) - (prev_count << shift); + delta >>= shift; + + local64_add(delta, &event->count); +} + +/*KX5000/KX6000 uncore ops start*/ +static void kx5000_uncore_msr_disable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + wrmsrl(event->hw.config_base, 0); +} + +static void kx5000_uncore_msr_disable_box(struct zhaoxin_uncore_box *box) +{ + wrmsrl(KX5000_UNC_PERF_GLOBAL_CTL, 0); +} + +static void kx5000_uncore_msr_enable_box(struct zhaoxin_uncore_box *box) +{ + wrmsrl(KX5000_UNC_PERF_GLOBAL_CTL, + KX5000_UNC_GLOBAL_CTL_EN_PC_ALL | KX5000_UNC_GLOBAL_CTL_EN_FC); +} + +static void kx5000_uncore_msr_enable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (hwc->idx < UNCORE_PMC_IDX_FIXED) + wrmsrl(hwc->config_base, hwc->config | KX5000_UNC_CTL_EN); + else + wrmsrl(hwc->config_base, KX5000_UNC_FIXED_CTR_CTL_EN); +} + +static struct attribute *kx5000_uncore_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_cmask3.attr, + NULL, +}; + +static struct attribute_group kx5000_uncore_format_group = { + .name = "format", + .attrs = kx5000_uncore_formats_attr, +}; + +static struct uncore_event_desc kx5000_uncore_events[] = { + { /* end: all zeroes */ }, +}; + +static struct zhaoxin_uncore_ops kx5000_uncore_msr_ops = { + .disable_box = kx5000_uncore_msr_disable_box, + .enable_box = kx5000_uncore_msr_enable_box, + .disable_event = kx5000_uncore_msr_disable_event, + .enable_event = kx5000_uncore_msr_enable_event, + .read_counter = uncore_msr_read_counter, +}; + +static struct zhaoxin_uncore_type kx5000_uncore_box = { + .name = "", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KX5000_UNC_PERFEVTSEL0, + .perf_ctr = KX5000_UNC_UNCORE_PMC0, + .fixed_ctr = KX5000_UNC_FIXED_CTR, + .fixed_ctl = KX5000_UNC_FIXED_CTR_CTRL, + .event_mask = KX5000_UNC_RAW_EVENT_MASK, + .event_descs = kx5000_uncore_events, + .ops = &kx5000_uncore_msr_ops, + .format_group = &kx5000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type *kx5000_msr_uncores[] = { + &kx5000_uncore_box, + NULL, +}; +/*KX5000/KX6000 uncore ops end*/ + +/*KH40000 msr ops start*/ +static void kh40000_uncore_msr_disable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + wrmsrl(hwc->config_base, hwc->config); +} + +static void kh40000_uncore_msr_enable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + wrmsrl(hwc->config_base, hwc->config | KH40000_PMON_CTL_EN); +} + +static void kh40000_uncore_msr_disable_box(struct zhaoxin_uncore_box *box) +{ + u64 config; + unsigned int msr; + + msr = uncore_msr_box_ctl(box); + if (msr) { + rdmsrl(msr, config); + config |= KH40000_PMON_BOX_CTL_FRZ; + wrmsrl(msr, config); + } +} + +static void kh40000_uncore_msr_enable_box(struct zhaoxin_uncore_box *box) +{ + u64 config; + unsigned int msr; + + msr = uncore_msr_box_ctl(box); + if (msr) { + rdmsrl(msr, config); + config &= ~KH40000_PMON_BOX_CTL_FRZ; + wrmsrl(msr, config); + } +} + +static void kh40000_uncore_msr_init_box(struct zhaoxin_uncore_box *box) +{ + unsigned int msr = uncore_msr_box_ctl(box); + + if (msr) { + wrmsrl(msr, KH40000_PMON_BOX_CTL_INT); + wrmsrl(msr, 0); + } +} + +static struct attribute *kh40000_uncore_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_thresh8.attr, + NULL, +}; + +static struct attribute_group kh40000_uncore_format_group = { + .name = "format", + .attrs = kh40000_uncore_formats_attr, +}; + +static struct uncore_event_desc kh40000_uncore_llc_box_events[] = { + { /* end: all zeroes */ }, +}; + +static struct uncore_event_desc kh40000_uncore_hif_box_events[] = { + { /* end: all zeroes */ }, +}; + +static struct uncore_event_desc kh40000_uncore_zzi_box_events[] = { + { /* end: all zeroes */ }, +}; + +static struct zhaoxin_uncore_ops kh40000_uncore_msr_ops = { + .init_box = kh40000_uncore_msr_init_box, + .disable_box = kh40000_uncore_msr_disable_box, + .enable_box = kh40000_uncore_msr_enable_box, + .disable_event = kh40000_uncore_msr_disable_event, + .enable_event = kh40000_uncore_msr_enable_event, + .read_counter = uncore_msr_read_counter, +}; + +static struct zhaoxin_uncore_type kh40000_uncore_llc_box = { + .name = "llc", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_ctl = KH40000_LLC_MSR_PMON_CTL0, + .perf_ctr = KH40000_LLC_MSR_PMON_CTR0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_LLC_MSR_PMON_BLK_CTL, + .event_descs = kh40000_uncore_llc_box_events, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kh40000_uncore_hif_box = { + .name = "hif", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KH40000_HIF_MSR_PMON_CTL0, + .perf_ctr = KH40000_HIF_MSR_PMON_CTR0, + .fixed_ctr = KH40000_HIF_MSR_PMON_FIXED_CTR, + .fixed_ctl = KH40000_HIF_MSR_PMON_FIXED_CTL, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_HIF_MSR_PMON_BLK_CTL, + .event_descs = kh40000_uncore_hif_box_events, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kh40000_uncore_zzi_box = { + .name = "zzi", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_ctl = KH40000_ZZI_MSR_PMON_CTL0, + .perf_ctr = KH40000_ZZI_MSR_PMON_CTR0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_ZZI_MSR_PMON_BLK_CTL, + .event_descs = kh40000_uncore_zzi_box_events, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type *kh40000_msr_uncores[] = { + &kh40000_uncore_llc_box, + &kh40000_uncore_hif_box, + &kh40000_uncore_zzi_box, + NULL, +}; +/*KH40000 msr ops end*/ + +/*KH40000 pci ops start*/ +static void kh40000_uncore_pci_disable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + + pci_write_config_dword(pdev, hwc->config_base, hwc->config); +} + +static void kh40000_uncore_pci_enable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + + pci_write_config_dword(pdev, hwc->config_base, hwc->config | KH40000_PMON_CTL_EN); +} + +static void kh40000_uncore_pci_disable_box(struct zhaoxin_uncore_box *box) +{ + struct pci_dev *pdev = box->pci_dev; + int box_ctl = uncore_pci_box_ctl(box); + u32 config = 0; + + if (!pci_read_config_dword(pdev, box_ctl, &config)) { + config |= KH40000_PMON_BOX_CTL_FRZ; + pci_write_config_dword(pdev, box_ctl, config); + } +} + +static void kh40000_uncore_pci_enable_box(struct zhaoxin_uncore_box *box) +{ + struct pci_dev *pdev = box->pci_dev; + int box_ctl = uncore_pci_box_ctl(box); + u32 config = 0; + + if (!pci_read_config_dword(pdev, box_ctl, &config)) { + config &= ~KH40000_PMON_BOX_CTL_FRZ; + pci_write_config_dword(pdev, box_ctl, config); + } +} + +static u64 kh40000_uncore_pci_read_counter(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + u64 count = 0; + + pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count + 1); + pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count); + + return count; +} + +static void kh40000_uncore_pci_init_box(struct zhaoxin_uncore_box *box) +{ + struct pci_dev *pdev = box->pci_dev; + int box_ctl = uncore_pci_box_ctl(box); + + pci_write_config_dword(pdev, box_ctl, KH40000_PMON_PCI_BOX_CTL_INT); +} + +static struct uncore_event_desc kh40000_uncore_imc_events[] = { + { /* end: all zeroes */ }, +}; + +static struct uncore_event_desc kh40000_uncore_pci_events[] = { + { /* end: all zeroes */ }, +}; + +static struct uncore_event_desc kh40000_uncore_zpi_dll_events[] = { + { /* end: all zeroes */ }, +}; + +static struct uncore_event_desc kh40000_uncore_zdi_dll_events[] = { + { /* end: all zeroes */ }, +}; + +static struct uncore_event_desc kh40000_uncore_pxptrf_events[] = { + { /* end: all zeroes */ }, +}; + +static struct zhaoxin_uncore_ops kh40000_uncore_pci_ops = { + .init_box = kh40000_uncore_pci_init_box, + .disable_box = kh40000_uncore_pci_disable_box, + .enable_box = kh40000_uncore_pci_enable_box, + .disable_event = kh40000_uncore_pci_disable_event, + .enable_event = kh40000_uncore_pci_enable_event, + .read_counter = kh40000_uncore_pci_read_counter +}; + +static struct zhaoxin_uncore_type kh40000_uncore_mc0 = { + .name = "mc0", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KH40000_MC0_CHy_PMON_FIXED_CTR, + .fixed_ctl = KH40000_MC0_CHy_PMON_FIXED_CTL, + .event_descs = kh40000_uncore_imc_events, + .perf_ctr = KH40000_MC0_CHy_PMON_CTR0, + .event_ctl = KH40000_MC0_CHy_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_MC0_CHy_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kh40000_uncore_mc1 = { + .name = "mc1", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KH40000_MC1_CHy_PMON_FIXED_CTR, + .fixed_ctl = KH40000_MC1_CHy_PMON_FIXED_CTL, + .event_descs = kh40000_uncore_imc_events, + .perf_ctr = KH40000_MC1_CHy_PMON_CTR0, + .event_ctl = KH40000_MC1_CHy_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_MC1_CHy_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kh40000_uncore_pci = { + .name = "pci", + .num_counters = 4, + .num_boxes = 10, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_pci_events, + .perf_ctr = KH40000_PCI_PMON_CTR0, + .event_ctl = KH40000_PCI_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_PCI_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kh40000_uncore_zpi_dll = { + .name = "zpi_dll", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_zpi_dll_events, + .perf_ctr = KH40000_ZPI_DLL_PMON_CTR0, + .event_ctl = KH40000_ZPI_DLL_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_ZPI_DLL_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kh40000_uncore_zdi_dll = { + .name = "zdi_dll", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_zdi_dll_events, + .perf_ctr = KH40000_ZDI_DLL_PMON_CTR0, + .event_ctl = KH40000_ZDI_DLL_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_ZDI_DLL_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kh40000_uncore_pxptrf = { + .name = "pxptrf", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_pxptrf_events, + .perf_ctr = KH40000_PXPTRF_PMON_CTR0, + .event_ctl = KH40000_PXPTRF_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_PXPTRF_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + +enum { + KH40000_PCI_UNCORE_MC0, + KH40000_PCI_UNCORE_MC1, + KH40000_PCI_UNCORE_PCI, + KH40000_PCI_UNCORE_ZPI_DLL, + KH40000_PCI_UNCORE_ZDI_DLL, + KH40000_PCI_UNCORE_PXPTRF, +}; + +static struct zhaoxin_uncore_type *kh40000_pci_uncores[] = { + [KH40000_PCI_UNCORE_MC0] = &kh40000_uncore_mc0, + [KH40000_PCI_UNCORE_MC1] = &kh40000_uncore_mc1, + [KH40000_PCI_UNCORE_PCI] = &kh40000_uncore_pci, + [KH40000_PCI_UNCORE_ZPI_DLL] = &kh40000_uncore_zpi_dll, + [KH40000_PCI_UNCORE_ZDI_DLL] = &kh40000_uncore_zdi_dll, + [KH40000_PCI_UNCORE_PXPTRF] = &kh40000_uncore_pxptrf, + NULL, +}; + +static const struct pci_device_id kh40000_uncore_pci_ids[] = { + { /* MC Channe0/1 */ + PCI_DEVICE(0x1D17, 0x31b2), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_MC0, 0), + }, + + { /* PCIE D2F0 */ + PCI_DEVICE(0x1D17, 0x0717), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 0), + }, + + { /* PCIE D2F1 */ + PCI_DEVICE(0x1D17, 0x0718), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 1), + }, + + { /* PCIE D3F0 */ + PCI_DEVICE(0x1D17, 0x0719), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 2), + }, + + { /* PCIE D3F1 */ + PCI_DEVICE(0x1D17, 0x071A), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 3), + }, + + { /* PCIE D3F2 */ + PCI_DEVICE(0x1D17, 0x071B), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 4), + }, + + { /* PCIE D4F0 */ + PCI_DEVICE(0x1D17, 0x071C), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 5), + }, + + { /* PCIE D4F1 */ + PCI_DEVICE(0x1D17, 0x071D), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 6), + }, + + { /* PCIE D5F0 */ + PCI_DEVICE(0x1D17, 0x071E), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 7), + }, + + { /* PCIE D5F1 */ + PCI_DEVICE(0x1D17, 0x0731), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 8), + }, + + { /* PCIE D5F2 */ + PCI_DEVICE(0x1D17, 0x0732), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 9), + }, + + { /* ZPI_DLL */ + PCI_DEVICE(0x1D17, 0x91c1), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_ZPI_DLL, 0), + }, + + { /* ZDI_DLL */ + PCI_DEVICE(0x1D17, 0x3b03), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_ZDI_DLL, 0), + }, + + { /* PXPTRF */ + PCI_DEVICE(0x1D17, 0x31B4), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PXPTRF, 0), + }, + + { /* end: all zeroes */ } +}; + +static struct pci_driver kh40000_uncore_pci_driver = { + .name = "kh40000_uncore", + .id_table = kh40000_uncore_pci_ids, +}; +/*KH40000 pci ops end*/ + + +/*KX8000 msr ops start*/ +static unsigned int kx8000_uncore_msr_offsets[] = { + 0x0, 0x13, 0x27, 0x3b, 0x4f, 0x63, 0x77, 0x8b +}; + +static struct zhaoxin_uncore_type kx8000_uncore_mesh_box = { + .name = "mesh", + .num_counters = 4, + .num_boxes = 8, + .perf_ctr_bits = 48, + .event_ctl = KX8000_MESH_MSR_PMON_CTL0, + .perf_ctr = KX8000_MESH_MSR_PMON_CTR0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_MESH_MSR_PMON_BLK_CTL, + .msr_offsets = kx8000_uncore_msr_offsets, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx8000_uncore_llc_box = { + .name = "llc", + .num_counters = 4, + .num_boxes = 8, + .perf_ctr_bits = 48, + .event_ctl = KX8000_LLC_MSR_PMON_CTL0, + .perf_ctr = KX8000_LLC_MSR_PMON_CTR0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_LLC_MSR_PMON_BLK_CTL, + .msr_offsets = kx8000_uncore_msr_offsets, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx8000_uncore_homestop = { + .name = "homestop", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KX8000_HOMESTOP_MSR_PMON_CTL0, + .perf_ctr = KX8000_HOMESTOP_MSR_PMON_CTR0, + .fixed_ctr = KX8000_HOMESTOP_MSR_PMON_FIXED_CTR, + .fixed_ctl = KX8000_HOMESTOP_MSR_PMON_FIXED_CTL, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_HOMESTOP_MSR_PMON_BLK_CTL, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx8000_uncore_ccd_zdi_pl = { + .name = "ccd_zdi_pl", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KX8000_CCD_ZDI_PL_MSR_PMON_CTL0, + .perf_ctr = KX8000_CCD_ZDI_PL_MSR_PMON_CTR0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_CCD_ZDI_PL_MSR_PMON_BLK_CTL, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx8000_uncore_iod_zdi_pl = { + .name = "iod_zdi_pl", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KX8000_IOD_ZDI_PL_MSR_PMON_CTL0, + .perf_ctr = KX8000_IOD_ZDI_PL_MSR_PMON_CTR0, + .fixed_ctr = KX8000_IOD_ZDI_PL_MSR_PMON_FIXED_CTR, + .fixed_ctl = KX8000_IOD_ZDI_PL_MSR_PMON_FIXED_CTL, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_IOD_ZDI_PL_MSR_PMON_BLK_CTL, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, +}; + + +static struct zhaoxin_uncore_type *kx8000_msr_uncores[] = { + &kx8000_uncore_llc_box, + &kx8000_uncore_mesh_box, + &kh40000_uncore_hif_box, + &kx8000_uncore_homestop, + &kx8000_uncore_ccd_zdi_pl, + &kx8000_uncore_iod_zdi_pl, + NULL, +}; +/*KX8000 msr ops end*/ + +/*KX8000 pci ops start*/ +static unsigned int kx8000_mc_ctr_lh_offsets[] = { + 0xc, 0xe, 0x10, 0x12, 0x14 +}; + +static u64 kx8000_uncore_pci_mc_read_counter(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + u64 count = 0; + + pci_read_config_word(pdev, hwc->event_base, (u16 *)&count + 3); + pci_read_config_dword(pdev, hwc->event_base + kx8000_mc_ctr_lh_offsets[hwc->idx], + (u32 *)&count); + + return count; +} + +static struct zhaoxin_uncore_ops kx8000_uncore_pci_mc_ops = { + .init_box = kh40000_uncore_pci_init_box, + .disable_box = kh40000_uncore_pci_disable_box, + .enable_box = kh40000_uncore_pci_enable_box, + .disable_event = kh40000_uncore_pci_disable_event, + .enable_event = kh40000_uncore_pci_enable_event, + .read_counter = kx8000_uncore_pci_mc_read_counter +}; + +static struct zhaoxin_uncore_type kx8000_uncore_mc_a0 = { + .name = "mc_a0", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KX8000_MC_A0_CHy_PMON_FIXED_CTR, + .fixed_ctl = KX8000_MC_A0_CHy_PMON_FIXED_CTL, + .perf_ctr = KX8000_MC_A0_CHy_PMON_CTR0, + .event_ctl = KX8000_MC_A0_CHy_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_MC_A0_CHy_PMON_BLK_CTL, + .ops = &kx8000_uncore_pci_mc_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kx8000_uncore_mc_a1 = { + .name = "mc_a1", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KX8000_MC_A1_CHy_PMON_FIXED_CTR, + .fixed_ctl = KX8000_MC_A1_CHy_PMON_FIXED_CTL, + .perf_ctr = KX8000_MC_A1_CHy_PMON_CTR0, + .event_ctl = KX8000_MC_A1_CHy_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_MC_A1_CHy_PMON_BLK_CTL, + .ops = &kx8000_uncore_pci_mc_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kx8000_uncore_mc_b0 = { + .name = "mc_b0", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KX8000_MC_B0_CHy_PMON_FIXED_CTR, + .fixed_ctl = KX8000_MC_B0_CHy_PMON_FIXED_CTL, + .perf_ctr = KX8000_MC_B0_CHy_PMON_CTR0, + .event_ctl = KX8000_MC_B0_CHy_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_MC_B0_CHy_PMON_BLK_CTL, + .ops = &kx8000_uncore_pci_mc_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kx8000_uncore_mc_b1 = { + .name = "mc_b1", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KX8000_MC_B1_CHy_PMON_FIXED_CTR, + .fixed_ctl = KX8000_MC_B1_CHy_PMON_FIXED_CTL, + .perf_ctr = KX8000_MC_B1_CHy_PMON_CTR0, + .event_ctl = KX8000_MC_B1_CHy_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_MC_B1_CHy_PMON_BLK_CTL, + .ops = &kx8000_uncore_pci_mc_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kx8000_uncore_pci = { + .name = "pci", + .num_counters = 4, + .num_boxes = 17, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_pci_events, + .perf_ctr = KH40000_PCI_PMON_CTR0, + .event_ctl = KH40000_PCI_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_PCI_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + + +enum { + KX8000_PCI_UNCORE_MC_A0, + KX8000_PCI_UNCORE_MC_A1, + KX8000_PCI_UNCORE_MC_B0, + KX8000_PCI_UNCORE_MC_B1, + KX8000_PCI_UNCORE_PCI, + KX8000_PCI_UNCORE_PXPTRF, +}; + +static struct zhaoxin_uncore_type *kx8000_pci_uncores[] = { + [KX8000_PCI_UNCORE_MC_A0] = &kx8000_uncore_mc_a0, + [KX8000_PCI_UNCORE_MC_A1] = &kx8000_uncore_mc_a1, + [KX8000_PCI_UNCORE_MC_B0] = &kx8000_uncore_mc_b0, + [KX8000_PCI_UNCORE_MC_B1] = &kx8000_uncore_mc_b1, + [KX8000_PCI_UNCORE_PCI] = &kx8000_uncore_pci, + [KX8000_PCI_UNCORE_PXPTRF] = &kh40000_uncore_pxptrf, + NULL, +}; + +static const struct pci_device_id kx8000_uncore_pci_ids[] = { + { /* MC Channe A0/A1/B0/B1 */ + PCI_DEVICE(0x1D17, 0x31B2), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_MC_A0, 0), + }, + + { /* PCIE D2F0 */ + PCI_DEVICE(0x1D17, 0x0717), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 0), + }, + + { /* PCIE D2F1 */ + PCI_DEVICE(0x1D17, 0x0718), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 1), + }, + + { /* PCIE D2F2 */ + PCI_DEVICE(0x1D17, 0x0733), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 2), + }, + + { /* PCIE D2F3 */ + PCI_DEVICE(0x1D17, 0x0734), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 3), + }, + + { /* PCIE D3F0 */ + PCI_DEVICE(0x1D17, 0x0719), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 4), + }, + + { /* PCIE D3F1 */ + PCI_DEVICE(0x1D17, 0x0735), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 5), + }, + + { /* PCIE D3F2 */ + PCI_DEVICE(0x1D17, 0x0739), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 6), + }, + + { /* PCIE D3F3 */ + PCI_DEVICE(0x1D17, 0x073A), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 7), + }, + + { /* PCIE D4F0 */ + PCI_DEVICE(0x1D17, 0x071B), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 8), + }, + + { /* PCIE D4F1 */ + PCI_DEVICE(0x1D17, 0x071C), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 9), + }, + + { /* PCIE D4F2 */ + PCI_DEVICE(0x1D17, 0x0736), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 10), + }, + + { /* PCIE D4F3 */ + PCI_DEVICE(0x1D17, 0x0737), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 11), + }, + + { /* PCIE D4F4 */ + PCI_DEVICE(0x1D17, 0x0738), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 12), + }, + + { /* PCIE D5F0 */ + PCI_DEVICE(0x1D17, 0x071D), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 13), + }, + + { /* PCIE D5F1 */ + PCI_DEVICE(0x1D17, 0x071E), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 14), + }, + + { /* PCIE D5F2 */ + PCI_DEVICE(0x1D17, 0x0732), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 15), + }, + + { /* PCIE D5F3 */ + PCI_DEVICE(0x1D17, 0x073B), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 16), + }, + + { /* PXPTRF */ + PCI_DEVICE(0x1D17, 0x31B4), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PXPTRF, 0), + }, + + { /* end: all zeroes */ } +}; + + +static struct pci_driver kx8000_uncore_pci_driver = { + .name = "kx8000_uncore", + .id_table = kx8000_uncore_pci_ids, +}; +/*KX8000 pci ops end*/ + +/*KX8000 mmio ops start*/ +static void kx8000_uncore_mmio_init_box(struct zhaoxin_uncore_box *box) +{ + struct pci_dev *pdev = NULL; + unsigned int box_ctl = uncore_mmio_box_ctl(box); + resource_size_t addr; + u32 pci_dword; + int mmio_base_offset; + + pdev = pci_get_device(0x1d17, 0x31b1, pdev); + if (!pdev) + return; + + if (!strcmp(box->pmu->name, "iod_zdi_dl")) + mmio_base_offset = KX8000_IOD_ZDI_DL_MMIO_BASE_OFFSET; + else + mmio_base_offset = KX8000_CCD_ZDI_DL_MMIO_BASE_OFFSET; + + pci_read_config_dword(pdev, mmio_base_offset, &pci_dword); + addr = (u64)(pci_dword & KX8000_ZDI_DL_MMIO_BASE_MASK) << 32; + + pci_read_config_dword(pdev, mmio_base_offset + 4, &pci_dword); + addr |= pci_dword & KX8000_ZDI_DL_MMIO_MEM0_MASK; + + box->io_addr = ioremap(addr, KX8000_ZDI_DL_MMIO_SIZE); + if (!box->io_addr) + return; + + writel(KH40000_PMON_PCI_BOX_CTL_INT, box->io_addr + box_ctl); +} + +static void kx8000_uncore_mmio_disable_box(struct zhaoxin_uncore_box *box) +{ + u32 config; + unsigned int box_ctl = uncore_mmio_box_ctl(box); + + if (!box->io_addr) + return; + + config = readl(box->io_addr + box_ctl); + config |= KH40000_PMON_BOX_CTL_FRZ; + writel(config, box->io_addr + box_ctl); +} + +static void kx8000_uncore_mmio_enable_box(struct zhaoxin_uncore_box *box) +{ + u32 config; + unsigned int box_ctl = uncore_mmio_box_ctl(box); + + if (!box->io_addr) + return; + + config = readl(box->io_addr + box_ctl); + config &= ~KH40000_PMON_BOX_CTL_FRZ; + writel(config, box->io_addr + box_ctl); +} + +static void kx8000_uncore_mmio_enable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!box->io_addr) + return; + + writel(hwc->config | KH40000_PMON_CTL_EN, box->io_addr + hwc->config_base); +} + +static void kx8000_uncore_mmio_disable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!box->io_addr) + return; + + writel(hwc->config, box->io_addr + hwc->config_base); +} + +static void uncore_mmio_exit_box(struct zhaoxin_uncore_box *box) +{ + if (box->io_addr) + iounmap(box->io_addr); +} + +static u64 uncore_mmio_read_counter(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + u64 count = 0; + u64 count_low = 0; + u64 count_high = 0; + + if (!box->io_addr) + return 0; + + count_high = readl(box->io_addr + event->hw.event_base) & 0xffff; + count_low = readl(box->io_addr + event->hw.event_base + 4); + count = (count_high << 32) + count_low; + + return count; +} + +static struct zhaoxin_uncore_ops kx8000_uncore_mmio_ops = { + .init_box = kx8000_uncore_mmio_init_box, + .exit_box = uncore_mmio_exit_box, + .disable_box = kx8000_uncore_mmio_disable_box, + .enable_box = kx8000_uncore_mmio_enable_box, + .disable_event = kx8000_uncore_mmio_disable_event, + .enable_event = kx8000_uncore_mmio_enable_event, + .read_counter = uncore_mmio_read_counter, +}; + +static struct zhaoxin_uncore_type kx8000_uncore_iod_zdi_dl = { + .name = "iod_zdi_dl", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .perf_ctr = KX8000_ZDI_DL_MMIO_PMON_CTR0, + .event_ctl = KX8000_ZDI_DL_MMIO_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_ZDI_DL_MMIO_PMON_BLK_CTL, + .ops = &kx8000_uncore_mmio_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx8000_uncore_ccd_zdi_dl = { + .name = "ccd_zdi_dl", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .perf_ctr = KX8000_ZDI_DL_MMIO_PMON_CTR0, + .event_ctl = KX8000_ZDI_DL_MMIO_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_ZDI_DL_MMIO_PMON_BLK_CTL, + .ops = &kx8000_uncore_mmio_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type *kx8000_mmio_uncores[] = { + &kx8000_uncore_iod_zdi_dl, + &kx8000_uncore_ccd_zdi_dl, + NULL, +}; + +/*KX8000 mmio ops end*/ + + + +static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer) +{ + struct zhaoxin_uncore_box *box; + struct perf_event *event; + unsigned long flags; + int bit; + + box = container_of(hrtimer, struct zhaoxin_uncore_box, hrtimer); + if (!box->n_active || box->cpu != smp_processor_id()) + return HRTIMER_NORESTART; + /* + * disable local interrupt to prevent uncore_pmu_event_start/stop + * to interrupt the update process + */ + local_irq_save(flags); + + /* + * handle boxes with an active event list as opposed to active + * counters + */ + list_for_each_entry(event, &box->active_list, active_entry) { + uncore_perf_event_update(box, event); + } + + for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX) + uncore_perf_event_update(box, box->events[bit]); + + local_irq_restore(flags); + + hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration)); + return HRTIMER_RESTART; +} + +static void uncore_pmu_start_hrtimer(struct zhaoxin_uncore_box *box) +{ + hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration), + HRTIMER_MODE_REL_PINNED); +} + +static void uncore_pmu_cancel_hrtimer(struct zhaoxin_uncore_box *box) +{ + hrtimer_cancel(&box->hrtimer); +} + +static void uncore_pmu_init_hrtimer(struct zhaoxin_uncore_box *box) +{ + hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + box->hrtimer.function = uncore_pmu_hrtimer; +} + +static struct zhaoxin_uncore_box *uncore_alloc_box(struct zhaoxin_uncore_type *type, + int node) +{ + int i, size, numshared = type->num_shared_regs; + struct zhaoxin_uncore_box *box; + + size = sizeof(*box) + numshared * sizeof(struct zhaoxin_uncore_extra_reg); + + box = kzalloc_node(size, GFP_KERNEL, node); + if (!box) + return NULL; + + for (i = 0; i < numshared; i++) + raw_spin_lock_init(&box->shared_regs[i].lock); + + uncore_pmu_init_hrtimer(box); + box->cpu = -1; + box->package_id = -1; + box->cluster_id = -1; + box->subnode_id = -1; + + /* set default hrtimer timeout */ + box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL; + + INIT_LIST_HEAD(&box->active_list); + + return box; +} + +static bool is_box_event(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ + return &box->pmu->pmu == event->pmu; +} + +static int +uncore_collect_events(struct zhaoxin_uncore_box *box, struct perf_event *leader, + bool dogrp) +{ + struct perf_event *event; + int n, max_count; + + max_count = box->pmu->type->num_counters; + if (box->pmu->type->fixed_ctl) + max_count++; + + if (box->n_events >= max_count) + return -EINVAL; + + n = box->n_events; + + if (is_box_event(box, leader)) { + box->event_list[n] = leader; + n++; + } + + if (!dogrp) + return n; + + for_each_sibling_event(event, leader) { + if (!is_box_event(box, event) || + event->state <= PERF_EVENT_STATE_OFF) + continue; + + if (n >= max_count) + return -EINVAL; + + box->event_list[n] = event; + n++; + } + return n; +} + +static struct event_constraint * +uncore_get_event_constraint(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ + struct zhaoxin_uncore_type *type = box->pmu->type; + struct event_constraint *c; + + if (type->ops->get_constraint) { + c = type->ops->get_constraint(box, event); + if (c) + return c; + } + + if (event->attr.config == UNCORE_FIXED_EVENT) + return &uncore_constraint_fixed; + + if (type->constraints) { + for_each_event_constraint(c, type->constraints) { + if ((event->hw.config & c->cmask) == c->code) + return c; + } + } + + return &type->unconstrainted; +} + +static void uncore_put_event_constraint(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + if (box->pmu->type->ops->put_constraint) + box->pmu->type->ops->put_constraint(box, event); +} + +static int uncore_assign_events(struct zhaoxin_uncore_box *box, int assign[], int n) +{ + unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; + struct event_constraint *c; + int i, wmin, wmax, ret = 0; + struct hw_perf_event *hwc; + + bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX); + + for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) { + c = uncore_get_event_constraint(box, box->event_list[i]); + box->event_constraint[i] = c; + wmin = min(wmin, c->weight); + wmax = max(wmax, c->weight); + } + + /* fastpath, try to reuse previous register */ + for (i = 0; i < n; i++) { + hwc = &box->event_list[i]->hw; + c = box->event_constraint[i]; + + /* never assigned */ + if (hwc->idx == -1) + break; + + /* constraint still honored */ + if (!test_bit(hwc->idx, c->idxmsk)) + break; + + /* not already used */ + if (test_bit(hwc->idx, used_mask)) + break; + + __set_bit(hwc->idx, used_mask); + if (assign) + assign[i] = hwc->idx; + } + /* slow path */ + if (i != n) + ret = perf_assign_events(box->event_constraint, n, + wmin, wmax, n, assign); + + if (!assign || ret) { + for (i = 0; i < n; i++) + uncore_put_event_constraint(box, box->event_list[i]); + } + return ret ? -EINVAL : 0; +} + +static void uncore_pmu_event_start(struct perf_event *event, int flags) +{ + struct zhaoxin_uncore_box *box = uncore_event_to_box(event); + int idx = event->hw.idx; + + + if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX)) + return; + + if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) + return; + + event->hw.state = 0; + box->events[idx] = event; + box->n_active++; + __set_bit(idx, box->active_mask); + + local64_set(&event->hw.prev_count, uncore_read_counter(box, event)); + uncore_enable_event(box, event); + + if (box->n_active == 1) + uncore_pmu_start_hrtimer(box); +} + +static void uncore_pmu_event_stop(struct perf_event *event, int flags) +{ + struct zhaoxin_uncore_box *box = uncore_event_to_box(event); + struct hw_perf_event *hwc = &event->hw; + + if (__test_and_clear_bit(hwc->idx, box->active_mask)) { + uncore_disable_event(box, event); + box->n_active--; + box->events[hwc->idx] = NULL; + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); + hwc->state |= PERF_HES_STOPPED; + + if (box->n_active == 0) + uncore_pmu_cancel_hrtimer(box); + } + + if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { + /* + * Drain the remaining delta count out of a event + * that we are disabling: + */ + uncore_perf_event_update(box, event); + hwc->state |= PERF_HES_UPTODATE; + } +} + +static int uncore_pmu_event_add(struct perf_event *event, int flags) +{ + struct zhaoxin_uncore_box *box = uncore_event_to_box(event); + struct hw_perf_event *hwc = &event->hw; + int assign[UNCORE_PMC_IDX_MAX]; + int i, n, ret; + + if (!box) + return -ENODEV; + + ret = n = uncore_collect_events(box, event, false); + if (ret < 0) + return ret; + + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + if (!(flags & PERF_EF_START)) + hwc->state |= PERF_HES_ARCH; + + ret = uncore_assign_events(box, assign, n); + if (ret) + return ret; + + /* save events moving to new counters */ + for (i = 0; i < box->n_events; i++) { + event = box->event_list[i]; + hwc = &event->hw; + + if (hwc->idx == assign[i] && + hwc->last_tag == box->tags[assign[i]]) + continue; + /* + * Ensure we don't accidentally enable a stopped + * counter simply because we rescheduled. + */ + if (hwc->state & PERF_HES_STOPPED) + hwc->state |= PERF_HES_ARCH; + + uncore_pmu_event_stop(event, PERF_EF_UPDATE); + } + + /* reprogram moved events into new counters */ + for (i = 0; i < n; i++) { + event = box->event_list[i]; + hwc = &event->hw; + + if (hwc->idx != assign[i] || + hwc->last_tag != box->tags[assign[i]]) + uncore_assign_hw_event(box, event, assign[i]); + else if (i < box->n_events) + continue; + + if (hwc->state & PERF_HES_ARCH) + continue; + + uncore_pmu_event_start(event, 0); + } + box->n_events = n; + + return 0; +} + +static void uncore_pmu_event_del(struct perf_event *event, int flags) +{ + struct zhaoxin_uncore_box *box = uncore_event_to_box(event); + int i; + + uncore_pmu_event_stop(event, PERF_EF_UPDATE); + + for (i = 0; i < box->n_events; i++) { + if (event == box->event_list[i]) { + uncore_put_event_constraint(box, event); + + for (++i; i < box->n_events; i++) + box->event_list[i - 1] = box->event_list[i]; + + --box->n_events; + break; + } + } + + event->hw.idx = -1; + event->hw.last_tag = ~0ULL; +} + +static void uncore_pmu_event_read(struct perf_event *event) +{ + struct zhaoxin_uncore_box *box = uncore_event_to_box(event); + + uncore_perf_event_update(box, event); +} + +static int uncore_validate_group(struct zhaoxin_uncore_pmu *pmu, + struct perf_event *event) +{ + struct perf_event *leader = event->group_leader; + struct zhaoxin_uncore_box *fake_box; + int ret = -EINVAL, n; + + fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE); + if (!fake_box) + return -ENOMEM; + + fake_box->pmu = pmu; + /* + * the event is not yet connected with its + * siblings therefore we must first collect + * existing siblings, then add the new event + * before we can simulate the scheduling + */ + n = uncore_collect_events(fake_box, leader, true); + if (n < 0) + goto out; + + fake_box->n_events = n; + n = uncore_collect_events(fake_box, event, false); + if (n < 0) + goto out; + + fake_box->n_events = n; + + ret = uncore_assign_events(fake_box, NULL, n); +out: + kfree(fake_box); + return ret; +} + +static int uncore_pmu_event_init(struct perf_event *event) +{ + struct zhaoxin_uncore_pmu *pmu; + struct zhaoxin_uncore_box *box; + struct hw_perf_event *hwc = &event->hw; + int ret; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + pmu = uncore_event_to_pmu(event); + /* no device found for this pmu */ + if (pmu->func_id < 0) + return -ENOENT; + + /* Sampling not supported yet */ + if (hwc->sample_period) + return -EINVAL; + + /* + * Place all uncore events for a particular physical package + * onto a single cpu + */ + if (event->cpu < 0) + return -EINVAL; + box = uncore_pmu_to_box(pmu, event->cpu); + if (!box || box->cpu < 0) + return -EINVAL; + event->cpu = box->cpu; + event->pmu_private = box; + + //event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; + + event->hw.idx = -1; + event->hw.last_tag = ~0ULL; + event->hw.extra_reg.idx = EXTRA_REG_NONE; + event->hw.branch_reg.idx = EXTRA_REG_NONE; + + if (event->attr.config == UNCORE_FIXED_EVENT) { + /* no fixed counter */ + if (!pmu->type->fixed_ctl) + return -EINVAL; + /* + * if there is only one fixed counter, only the first pmu + * can access the fixed counter + */ + if (pmu->type->single_fixed && pmu->pmu_idx > 0) + return -EINVAL; + + /* fixed counters have event field hardcoded to zero */ + hwc->config = 0ULL; + } else { + hwc->config = event->attr.config & + (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32)); + if (pmu->type->ops->hw_config) { + ret = pmu->type->ops->hw_config(box, event); + if (ret) + return ret; + } + } + + if (event->group_leader != event) + ret = uncore_validate_group(pmu, event); + else + ret = 0; + + return ret; +} + +static void uncore_pmu_enable(struct pmu *pmu) +{ + struct zhaoxin_uncore_pmu *uncore_pmu; + struct zhaoxin_uncore_box *box; + + uncore_pmu = container_of(pmu, struct zhaoxin_uncore_pmu, pmu); + if (!uncore_pmu) + return; + + box = uncore_pmu_to_box(uncore_pmu, smp_processor_id()); + if (!box) + return; + + if (uncore_pmu->type->ops->enable_box) + uncore_pmu->type->ops->enable_box(box); +} + +static void uncore_pmu_disable(struct pmu *pmu) +{ + struct zhaoxin_uncore_pmu *uncore_pmu; + struct zhaoxin_uncore_box *box; + + uncore_pmu = container_of(pmu, struct zhaoxin_uncore_pmu, pmu); + if (!uncore_pmu) + return; + + box = uncore_pmu_to_box(uncore_pmu, smp_processor_id()); + if (!box) + return; + + if (uncore_pmu->type->ops->disable_box) + uncore_pmu->type->ops->disable_box(box); +} + +static ssize_t cpumask_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + cpumask_t *active_mask; + struct pmu *pmu; + struct zhaoxin_uncore_pmu *uncore_pmu; + + pmu = dev_get_drvdata(dev); + uncore_pmu = container_of(pmu, struct zhaoxin_uncore_pmu, pmu); + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + if (!strcmp(uncore_pmu->type->name, "llc")) + active_mask = &uncore_cpu_cluster_mask; + else + active_mask = &uncore_cpu_subnode_mask; + } else { + active_mask = &uncore_cpu_mask; + } + return cpumap_print_to_pagebuf(true, buf, active_mask); +} +static DEVICE_ATTR_RO(cpumask); + +static struct attribute *uncore_pmu_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static const struct attribute_group uncore_pmu_attr_group = { + .attrs = uncore_pmu_attrs, +}; + +static int uncore_pmu_register(struct zhaoxin_uncore_pmu *pmu) +{ + int ret; + + if (!pmu->type->pmu) { + pmu->pmu = (struct pmu) { + .attr_groups = pmu->type->attr_groups, + .task_ctx_nr = perf_invalid_context, + .pmu_enable = uncore_pmu_enable, + .pmu_disable = uncore_pmu_disable, + .event_init = uncore_pmu_event_init, + .add = uncore_pmu_event_add, + .del = uncore_pmu_event_del, + .start = uncore_pmu_event_start, + .stop = uncore_pmu_event_stop, + .read = uncore_pmu_event_read, + .module = THIS_MODULE, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + }; + } else { + pmu->pmu = *pmu->type->pmu; + pmu->pmu.attr_groups = pmu->type->attr_groups; + } + + if (pmu->type->num_boxes == 1) { + if (strlen(pmu->type->name) > 0) + sprintf(pmu->name, "uncore_%s", pmu->type->name); + else + sprintf(pmu->name, "uncore"); + } else { + sprintf(pmu->name, "uncore_%s_%d", pmu->type->name, + pmu->pmu_idx); + } + + ret = perf_pmu_register(&pmu->pmu, pmu->name, -1); + if (!ret) + pmu->registered = true; + return ret; +} + +static void uncore_pmu_unregister(struct zhaoxin_uncore_pmu *pmu) +{ + if (!pmu->registered) + return; + perf_pmu_unregister(&pmu->pmu); + pmu->registered = false; +} + +static void uncore_free_boxes(struct zhaoxin_uncore_pmu *pmu) +{ + int i, max; + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + if (!strcmp(pmu->type->name, "llc")) + max = max_clusters; + else + max = max_subnodes; + } else { + max = max_packages; + } + + for (i = 0; i < max; i++) + kfree(pmu->boxes[i]); + kfree(pmu->boxes); +} + +static void uncore_type_exit(struct zhaoxin_uncore_type *type) +{ + struct zhaoxin_uncore_pmu *pmu = type->pmus; + int i; + + if (pmu) { + for (i = 0; i < type->num_boxes; i++, pmu++) { + uncore_pmu_unregister(pmu); + uncore_free_boxes(pmu); + } + kfree(type->pmus); + type->pmus = NULL; + } + kfree(type->events_group); + type->events_group = NULL; +} + +static void uncore_types_exit(struct zhaoxin_uncore_type **types) +{ + for (; *types; types++) + uncore_type_exit(*types); +} + +static int __init uncore_type_init(struct zhaoxin_uncore_type *type, bool setid) +{ + struct zhaoxin_uncore_pmu *pmus; + size_t size; + int i, j; + + pmus = kcalloc(type->num_boxes, sizeof(*pmus), GFP_KERNEL); + if (!pmus) + return -ENOMEM; + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + if (!strcmp(type->name, "llc")) + size = max_clusters * sizeof(struct zhaoxin_uncore_box *); + else + size = max_subnodes * sizeof(struct zhaoxin_uncore_box *); + } else { + size = max_packages * sizeof(struct zhaoxin_uncore_box *); + } + + for (i = 0; i < type->num_boxes; i++) { + pmus[i].func_id = setid ? i : -1; + pmus[i].pmu_idx = i; + pmus[i].type = type; + pmus[i].boxes = kzalloc(size, GFP_KERNEL); + if (!pmus[i].boxes) + goto err; + } + + type->pmus = pmus; + type->unconstrainted = (struct event_constraint) + __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1, + 0, type->num_counters, 0, 0); + + if (type->event_descs) { + struct { + struct attribute_group group; + struct attribute *attrs[]; + } *attr_group; + for (i = 0; type->event_descs[i].attr.attr.name; i++) + ; + + attr_group = kzalloc(struct_size(attr_group, attrs, i + 1), GFP_KERNEL); + if (!attr_group) + goto err; + + attr_group->group.name = "events"; + attr_group->group.attrs = attr_group->attrs; + + for (j = 0; j < i; j++) + attr_group->attrs[j] = &type->event_descs[j].attr.attr; + + type->events_group = &attr_group->group; + } + + type->pmu_group = &uncore_pmu_attr_group; + + return 0; + +err: + for (i = 0; i < type->num_boxes; i++) + kfree(pmus[i].boxes); + kfree(pmus); + + return -ENOMEM; +} + +static int __init +uncore_types_init(struct zhaoxin_uncore_type **types, bool setid) +{ + int ret; + + for (; *types; types++) { + ret = uncore_type_init(*types, setid); + if (ret) + return ret; + } + return 0; +} + +/* + * add a pci uncore device + */ +static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct zhaoxin_uncore_type *type; + struct zhaoxin_uncore_pmu *pmu; + struct zhaoxin_uncore_box *box; + struct zhaoxin_uncore_box **boxes; + char mc_dev[10]; + int loop = 1; + int i, j = 0; + int subnode_id = 0; + int ret = 0; + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) + subnode_id = uncore_pcibus_to_subnodeid(pdev->bus); + + type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)]; + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + strscpy(mc_dev, "mc0", sizeof("mc0")); + if (!strcmp(type->name, mc_dev)) + loop = 2; + } else if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KX8000) { + strscpy(mc_dev, "mc_a0", sizeof("mc_a0")); + if (!strcmp(type->name, mc_dev)) + loop = 4; + } + + boxes = kcalloc(loop, sizeof(struct zhaoxin_uncore_box *), GFP_KERNEL); + if (!boxes) + return -ENOMEM; + + for (i = 0; i < loop; i++) { + type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data) + j]; + + if (!type) + continue; + /* + * for performance monitoring unit with multiple boxes, + * each box has a different function id. + */ + pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)]; + + if (WARN_ON_ONCE(pmu->boxes[subnode_id] != NULL)) + return -EINVAL; + + box = uncore_alloc_box(type, NUMA_NO_NODE); + if (!box) + return -ENOMEM; + + if (pmu->func_id < 0) + pmu->func_id = pdev->devfn; + else + WARN_ON_ONCE(pmu->func_id != pdev->devfn); + + atomic_inc(&box->refcnt); + box->subnode_id = subnode_id; + box->pci_dev = pdev; + box->pmu = pmu; + uncore_box_init(box); + boxes[i] = box; + + pci_set_drvdata(pdev, boxes); + pmu->boxes[subnode_id] = box; + if (atomic_inc_return(&pmu->activeboxes) > 1) { + if (!strcmp(type->name, mc_dev)) + goto next_loop; + else + return 0; + } + /* First active box registers the pmu */ + ret = uncore_pmu_register(pmu); + if (ret) { + pci_set_drvdata(pdev, NULL); + pmu->boxes[subnode_id] = NULL; + uncore_box_exit(box); + kfree(box); + } +next_loop: + j++; + } + + return ret; +} + +static void uncore_pci_remove(struct pci_dev *pdev) +{ + struct zhaoxin_uncore_box **boxes; + struct zhaoxin_uncore_box *box; + struct zhaoxin_uncore_pmu *pmu; + int subnode_id = 0; + int i = 0; + int loop = 1; + + boxes = pci_get_drvdata(pdev); + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + if (!strcmp(boxes[0]->pmu->type->name, "mc0")) + loop = 2; + else + loop = 1; + } else if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KX8000) { + if (!strcmp(boxes[0]->pmu->type->name, "mc_a0")) + loop = 4; + else + loop = 1; + } + + + for (i = 0; i < loop; i++) { + box = boxes[i]; + pmu = box->pmu; + if (WARN_ON_ONCE(subnode_id != box->subnode_id)) + return; + + pci_set_drvdata(pdev, NULL); + pmu->boxes[subnode_id] = NULL; + if (atomic_dec_return(&pmu->activeboxes) == 0) + uncore_pmu_unregister(pmu); + + uncore_box_exit(box); + kfree(box); + } + + kfree(boxes); +} + +static int __init uncore_pci_init(void) +{ + int ret; + + ret = uncore_types_init(uncore_pci_uncores, false); + if (ret) + goto errtype; + + uncore_pci_driver->probe = uncore_pci_probe; + uncore_pci_driver->remove = uncore_pci_remove; + + ret = pci_register_driver(uncore_pci_driver); + if (ret) + goto errtype; + + pcidrv_registered = true; + return 0; + +errtype: + uncore_types_exit(uncore_pci_uncores); + uncore_free_pcibus_map(); + uncore_pci_uncores = empty_uncore; + return ret; +} + +static void uncore_pci_exit(void) +{ + if (pcidrv_registered) { + pcidrv_registered = false; + pci_unregister_driver(uncore_pci_driver); + uncore_types_exit(uncore_pci_uncores); + uncore_free_pcibus_map(); + } +} + +static void uncore_change_type_ctx(struct zhaoxin_uncore_type *type, int old_cpu, + int new_cpu) +{ + struct zhaoxin_uncore_pmu *pmu = type->pmus; + struct zhaoxin_uncore_box *box; + int i, package_id, cluster_id = 0, subnode_id = 0; + + package_id = zx_topology_package_id(old_cpu < 0 ? new_cpu : old_cpu); + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + cluster_id = zx_topology_cluster_id(old_cpu < 0 ? new_cpu : old_cpu); + subnode_id = zx_topology_subnode_id(old_cpu < 0 ? new_cpu : old_cpu); + } + + for (i = 0; i < type->num_boxes; i++, pmu++) { + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + if (!strcmp(type->name, "llc")) { + box = pmu->boxes[cluster_id]; + if (!box) + continue; + } else { + box = pmu->boxes[subnode_id]; + if (!box) + continue; + } + } else { + box = pmu->boxes[package_id]; + if (!box) + continue; + } + + if (old_cpu < 0) { + + WARN_ON_ONCE(box->cpu != -1); + box->cpu = new_cpu; + continue; + } + WARN_ON_ONCE(box->cpu != old_cpu); + box->cpu = -1; + if (new_cpu < 0) + continue; + + uncore_pmu_cancel_hrtimer(box); + perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu); + box->cpu = new_cpu; + } +} + +static void uncore_change_context(struct zhaoxin_uncore_type **uncores, + int old_cpu, int new_cpu) +{ + for (; *uncores; uncores++) + uncore_change_type_ctx(*uncores, old_cpu, new_cpu); +} + +static void uncore_box_unref(struct zhaoxin_uncore_type **types, int id) +{ + struct zhaoxin_uncore_type *type; + struct zhaoxin_uncore_pmu *pmu; + struct zhaoxin_uncore_box *box; + int i; + + for (; *types; types++) { + type = *types; + pmu = type->pmus; + for (i = 0; i < type->num_boxes; i++, pmu++) { + box = pmu->boxes[id]; + if (box && atomic_dec_return(&box->refcnt) == 0) + uncore_box_exit(box); + } + } +} + +struct zhaoxin_uncore_type *uncore_msr_cluster_uncores[] = { + &kh40000_uncore_llc_box, + NULL, +}; + +struct zhaoxin_uncore_type *uncore_msr_subnode_uncores[] = { + &kh40000_uncore_hif_box, + &kh40000_uncore_zzi_box, + NULL, +}; + +struct zhaoxin_uncore_type *uncore_pci_subnode_uncores[] = { + &kh40000_uncore_mc0, + &kh40000_uncore_mc1, + &kh40000_uncore_pci, + &kh40000_uncore_zpi_dll, + &kh40000_uncore_zdi_dll, + &kh40000_uncore_pxptrf, + NULL, +}; + +static void kx5000_event_cpu_offline(int cpu) +{ + int package, target; + + /* Check if exiting cpu is used for collecting uncore events */ + + if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask)) + goto unref_cpu_mask; + + /* Find a new cpu to collect uncore events */ + target = cpumask_any_but(topology_core_cpumask(cpu), cpu); + + /* Migrate uncore events to the new target */ + if (target < nr_cpu_ids) + cpumask_set_cpu(target, &uncore_cpu_mask); + else + target = -1; + + uncore_change_context(uncore_msr_uncores, cpu, target); + uncore_change_context(uncore_mmio_uncores, cpu, target); + uncore_change_context(uncore_pci_uncores, cpu, target); + +unref_cpu_mask: + /*clear the references*/ + package = zx_topology_package_id(cpu); + uncore_box_unref(uncore_msr_uncores, package); + uncore_box_unref(uncore_mmio_uncores, package); +} + +static void kh40000_event_cpu_offline(int cpu) +{ + int cluster_target, subnode_target; + int cluster_id, subnode_id; + + cluster_id = zx_topology_cluster_id(cpu); + subnode_id = zx_topology_subnode_id(cpu); + + /* Check if exiting cpu is used for collecting uncore events */ + + if (cpumask_test_and_clear_cpu(cpu, &uncore_cpu_cluster_mask)) { + cluster_target = cpumask_any_but(topology_cluster_core_cpumask(cpu), cpu); + if (cluster_target < nr_cpu_ids) + cpumask_set_cpu(cluster_target, &uncore_cpu_cluster_mask); + else + cluster_target = -1; + uncore_change_context(uncore_msr_cluster_uncores, cpu, cluster_target); + } else { + uncore_box_unref(uncore_msr_cluster_uncores, cluster_id); + } + + if (cpumask_test_and_clear_cpu(cpu, &uncore_cpu_subnode_mask)) { + subnode_target = cpumask_any_but(topology_subnode_core_cpumask(cpu), cpu); + if (subnode_target < nr_cpu_ids) + cpumask_set_cpu(subnode_target, &uncore_cpu_subnode_mask); + else + subnode_target = -1; + uncore_change_context(uncore_msr_subnode_uncores, cpu, subnode_target); + uncore_change_context(uncore_pci_subnode_uncores, cpu, subnode_target); + } else { + uncore_box_unref(uncore_msr_subnode_uncores, subnode_id); + } + +} + +static int uncore_event_cpu_offline(unsigned int cpu) +{ + unsigned int x86_model; + + x86_model = boot_cpu_data.x86_model; + + if (x86_model == ZHAOXIN_FAM7_KH40000) + kh40000_event_cpu_offline(cpu); + else + kx5000_event_cpu_offline(cpu); + + return 0; +} + +static int kx5000_allocate_boxes(struct zhaoxin_uncore_type **types, + unsigned int id, unsigned int cpu) +{ + struct zhaoxin_uncore_box *box, *tmp; + struct zhaoxin_uncore_type *type; + struct zhaoxin_uncore_pmu *pmu; + LIST_HEAD(allocated); + int i; + + /* Try to allocate all required boxes */ + for (; *types; types++) { + type = *types; + pmu = type->pmus; + + for (i = 0; i < type->num_boxes; i++, pmu++) { + if (pmu->boxes[id]) + continue; + box = uncore_alloc_box(type, cpu_to_node(cpu)); + if (!box) + goto cleanup; + box->pmu = pmu; + box->package_id = id; + list_add(&box->active_list, &allocated); + } + } + + /* Install them in the pmus */ + list_for_each_entry_safe(box, tmp, &allocated, active_list) { + list_del_init(&box->active_list); + box->pmu->boxes[id] = box; + } + return 0; + +cleanup: + list_for_each_entry_safe(box, tmp, &allocated, active_list) { + list_del_init(&box->active_list); + kfree(box); + } + return -ENOMEM; +} + +static int kh40000_allocate_boxes(struct zhaoxin_uncore_type **types, + unsigned int id, unsigned int cpu) +{ + struct zhaoxin_uncore_box *box, *tmp; + struct zhaoxin_uncore_type *type; + struct zhaoxin_uncore_pmu *pmu; + LIST_HEAD(allocated); + int i; + + /* Try to allocate all required boxes */ + for (; *types; types++) { + type = *types; + pmu = type->pmus; + + for (i = 0; i < type->num_boxes; i++, pmu++) { + if (pmu->boxes[id]) + continue; + box = uncore_alloc_box(type, cpu_to_node(cpu)); + if (!box) + goto cleanup; + box->pmu = pmu; + if (!strcmp(type->name, "llc")) + box->cluster_id = id; + else + box->subnode_id = id; + list_add(&box->active_list, &allocated); + } + } + /* Install them in the pmus */ + list_for_each_entry_safe(box, tmp, &allocated, active_list) { + list_del_init(&box->active_list); + box->pmu->boxes[id] = box; + } + return 0; + +cleanup: + list_for_each_entry_safe(box, tmp, &allocated, active_list) { + list_del_init(&box->active_list); + kfree(box); + } + return -ENOMEM; +} + +static int uncore_box_ref(struct zhaoxin_uncore_type **types, + int id, unsigned int cpu) +{ + struct zhaoxin_uncore_type *type; + struct zhaoxin_uncore_pmu *pmu; + struct zhaoxin_uncore_box *box; + int i, ret = 0; + + int x86_model; + + x86_model = boot_cpu_data.x86_model; + + if (x86_model == ZHAOXIN_FAM7_KH40000) + ret = kh40000_allocate_boxes(types, id, cpu); + else + ret = kx5000_allocate_boxes(types, id, cpu); + + if (ret) + return ret; + + for (; *types; types++) { + type = *types; + pmu = type->pmus; + for (i = 0; i < type->num_boxes; i++, pmu++) { + box = pmu->boxes[id]; + if (box && atomic_inc_return(&box->refcnt) == 1) + uncore_box_init(box); + } + } + return 0; +} + +static int kx5000_event_cpu_online(unsigned int cpu) +{ + int package, target, msr_ret, mmio_ret; + + package = zx_topology_package_id(cpu); + msr_ret = uncore_box_ref(uncore_msr_uncores, package, cpu); + mmio_ret = uncore_box_ref(uncore_mmio_uncores, package, cpu); + if (msr_ret && mmio_ret) + return -ENOMEM; + + /* + * Check if there is an online cpu in the package + * which collects uncore events already. + */ + target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu)); + if (target < nr_cpu_ids) + return 0; + + cpumask_set_cpu(cpu, &uncore_cpu_mask); + + if (!msr_ret) + uncore_change_context(uncore_msr_uncores, -1, cpu); + if (!mmio_ret) + uncore_change_context(uncore_mmio_uncores, -1, cpu); + uncore_change_context(uncore_pci_uncores, -1, cpu); + + return 0; +} + +static int kh40000_event_cpu_online(unsigned int cpu) +{ + int cluster_target, subnode_target; + int cluster_id, subnode_id; + int cluster_ret, subnode_ret; + + cluster_id = zx_topology_cluster_id(cpu); + subnode_id = zx_topology_subnode_id(cpu); + + cluster_ret = uncore_box_ref(uncore_msr_cluster_uncores, cluster_id, cpu); + subnode_ret = uncore_box_ref(uncore_msr_subnode_uncores, subnode_id, cpu); + + if (cluster_ret && subnode_ret) + return -ENOMEM; + + /* + * Check if there is an online cpu in the cluster or subnode + * which collects uncore events already. + */ + + cluster_target = + cpumask_any_and(&uncore_cpu_cluster_mask, topology_cluster_core_cpumask(cpu)); + subnode_target = + cpumask_any_and(&uncore_cpu_subnode_mask, topology_subnode_core_cpumask(cpu)); + + if (cluster_target < nr_cpu_ids && subnode_target < nr_cpu_ids) + return 0; + + if (!cluster_ret && cluster_target >= nr_cpu_ids) { + cpumask_set_cpu(cpu, &uncore_cpu_cluster_mask); + uncore_change_context(uncore_msr_cluster_uncores, -1, cpu); + } + + if (!subnode_ret && subnode_target >= nr_cpu_ids) { + cpumask_set_cpu(cpu, &uncore_cpu_subnode_mask); + uncore_change_context(uncore_msr_subnode_uncores, -1, cpu); + uncore_change_context(uncore_pci_subnode_uncores, -1, cpu); + } + + return 0; +} + +static int uncore_event_cpu_online(unsigned int cpu) +{ + int x86_model; + int kx5000_ret = 0, kh40000_ret = 0; + + x86_model = boot_cpu_data.x86_model; + + if (x86_model == ZHAOXIN_FAM7_KH40000) + kh40000_ret = kh40000_event_cpu_online(cpu); + else + kx5000_ret = kx5000_event_cpu_online(cpu); + + if (kx5000_ret || kh40000_ret) + return -ENOMEM; + + return 0; +} + +static int __init type_pmu_register(struct zhaoxin_uncore_type *type) +{ + int i, ret; + + for (i = 0; i < type->num_boxes; i++) { + ret = uncore_pmu_register(&type->pmus[i]); + if (ret) + return ret; + } + return 0; +} + +static int __init uncore_msr_pmus_register(void) +{ + struct zhaoxin_uncore_type **types = uncore_msr_uncores; + int ret; + + for (; *types; types++) { + ret = type_pmu_register(*types); + if (ret) + return ret; + } + return 0; +} + +static int __init uncore_cpu_init(void) +{ + int ret; + + ret = uncore_types_init(uncore_msr_uncores, true); + if (ret) + goto err; + + ret = uncore_msr_pmus_register(); + if (ret) + goto err; + return 0; +err: + uncore_types_exit(uncore_msr_uncores); + uncore_msr_uncores = empty_uncore; + return ret; +} + +static int __init uncore_mmio_init(void) +{ + struct zhaoxin_uncore_type **types = uncore_mmio_uncores; + int ret; + + ret = uncore_types_init(types, true); + if (ret) + goto err; + + for (; *types; types++) { + ret = type_pmu_register(*types); + if (ret) + goto err; + } + return 0; +err: + uncore_types_exit(uncore_mmio_uncores); + uncore_mmio_uncores = empty_uncore; + return ret; +} + +struct zhaoxin_uncore_init_fun { + void (*cpu_init)(void); + int (*pci_init)(void); + void (*mmio_init)(void); +}; + +void kx5000_uncore_cpu_init(void) +{ + uncore_msr_uncores = kx5000_msr_uncores; +} + +static const struct zhaoxin_uncore_init_fun kx5000_uncore_init __initconst = { + .cpu_init = kx5000_uncore_cpu_init, +}; + +void kh40000_uncore_cpu_init(void) +{ + uncore_msr_uncores = kh40000_msr_uncores; +} + +int kh40000_uncore_pci_init(void) +{ + int ret = kh40000_pci2node_map_init();/*pci_bus to package mapping, do nothing*/ + + if (ret) + return ret; + uncore_pci_uncores = kh40000_pci_uncores; + uncore_pci_driver = &kh40000_uncore_pci_driver; + return 0; +} + +static const struct zhaoxin_uncore_init_fun kh40000_uncore_init __initconst = { + .cpu_init = kh40000_uncore_cpu_init, + .pci_init = kh40000_uncore_pci_init, +}; + +void kx8000_uncore_cpu_init(void) +{ + uncore_msr_uncores = kx8000_msr_uncores; +} + +int kx8000_uncore_pci_init(void) +{ + uncore_pci_uncores = kx8000_pci_uncores; + uncore_pci_driver = &kx8000_uncore_pci_driver; + + return 0; +} + +void kx8000_uncore_mmio_init(void) +{ + uncore_mmio_uncores = kx8000_mmio_uncores; +} + +static const struct zhaoxin_uncore_init_fun kx8000_uncore_init __initconst = { + .cpu_init = kx8000_uncore_cpu_init, + .pci_init = kx8000_uncore_pci_init, + .mmio_init = kx8000_uncore_mmio_init, +}; + +static const struct x86_cpu_id zhaoxin_uncore_match[] __initconst = { + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, ZHAOXIN_FAM7_KX5000, &kx5000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, ZHAOXIN_FAM7_KX6000, &kx5000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, ZHAOXIN_FAM7_KH40000, &kh40000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, ZHAOXIN_FAM7_KX8000, &kx8000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, ZHAOXIN_FAM7_KX5000, &kx5000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, ZHAOXIN_FAM7_KX6000, &kx5000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, ZHAOXIN_FAM7_KH40000, &kh40000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, ZHAOXIN_FAM7_KX8000, &kx8000_uncore_init), + {}, +}; +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_uncore_match); + +static int __init zhaoxin_uncore_init(void) +{ + const struct x86_cpu_id *id = NULL; + struct zhaoxin_uncore_init_fun *uncore_init; + int pret = 0, cret = 0, mret = 0, ret; + + id = x86_match_cpu(zhaoxin_uncore_match); + if (!id) + return -ENODEV; + + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) + return -ENODEV; + + pr_info("welcome to uncore.\n"); + + get_topology_number(); + get_topology_info(); + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + zx_gen_core_map(); + get_pcibus_limit(); + } + + uncore_init = (struct zhaoxin_uncore_init_fun *)id->driver_data; + + if (uncore_init->pci_init) { + pret = uncore_init->pci_init(); + if (!pret) + pret = uncore_pci_init(); + } + + if (uncore_init->cpu_init) { + uncore_init->cpu_init(); + cret = uncore_cpu_init(); + } + + if (uncore_init->mmio_init) { + uncore_init->mmio_init(); + mret = uncore_mmio_init(); + } + + if (cret && pret && mret) + return -ENODEV; + + ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE, + "perf/x86/zhaoxin/uncore:online", + uncore_event_cpu_online, + uncore_event_cpu_offline); + if (ret) + goto err; + pr_info("uncore init success!\n"); + + return 0; + +err: + uncore_types_exit(uncore_msr_uncores); + uncore_types_exit(uncore_mmio_uncores); + uncore_pci_exit(); + pr_info("uncore init fail!\n"); + + return ret; +} +module_init(zhaoxin_uncore_init); + +static void __exit zhaoxin_uncore_exit(void) +{ + cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE); + uncore_types_exit(uncore_msr_uncores); + uncore_types_exit(uncore_mmio_uncores); + uncore_pci_exit(); +} +module_exit(zhaoxin_uncore_exit); diff --git a/arch/x86/events/zhaoxin/uncore.h b/arch/x86/events/zhaoxin/uncore.h new file mode 100644 index 000000000000..5d09696f8bc7 --- /dev/null +++ b/arch/x86/events/zhaoxin/uncore.h @@ -0,0 +1,371 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#include +#include +#include +#include + +#include +#include "../perf_event.h" + +#define ZHAOXIN_FAM7_KX5000 0x1b +#define ZHAOXIN_FAM7_KX6000 0x3b +#define ZHAOXIN_FAM7_KH40000 0x5b +#define ZHAOXIN_FAM7_KX8000 0x6b + + + +#define UNCORE_PMU_NAME_LEN 32 +#define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC) + +#define UNCORE_FIXED_EVENT 0xff +#define UNCORE_PMC_IDX_MAX_GENERIC 4 +#define UNCORE_PMC_IDX_MAX_FIXED 1 +#define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC + +#define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FIXED + 1) + +#define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx) +#define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff) +#define UNCORE_PCI_DEV_IDX(data) (data & 0xff) + +struct zhaoxin_uncore_ops; +struct zhaoxin_uncore_pmu; +struct zhaoxin_uncore_box; +struct uncore_event_desc; + +struct zhaoxin_uncore_type { + const char *name; + int num_counters; + int num_boxes; + int perf_ctr_bits; + int fixed_ctr_bits; + unsigned int perf_ctr; + unsigned int event_ctl; + unsigned int event_mask; + unsigned int event_mask_ext; + unsigned int fixed_ctr; + unsigned int fixed_ctl; + unsigned int box_ctl; + union { + unsigned int msr_offset; + unsigned int mmio_offset; + }; + unsigned int num_shared_regs:8; + unsigned int single_fixed:1; + unsigned int pair_ctr_ctl:1; + unsigned int *msr_offsets; + struct event_constraint unconstrainted; + struct event_constraint *constraints; + struct zhaoxin_uncore_pmu *pmus; + struct zhaoxin_uncore_ops *ops; + struct uncore_event_desc *event_descs; + const struct attribute_group *attr_groups[4]; + struct pmu *pmu; /* for custom pmu ops */ +}; + +#define pmu_group attr_groups[0] +#define format_group attr_groups[1] +#define events_group attr_groups[2] + +struct zhaoxin_uncore_ops { + void (*init_box)(struct zhaoxin_uncore_box *box); + void (*exit_box)(struct zhaoxin_uncore_box *box); + void (*disable_box)(struct zhaoxin_uncore_box *box); + void (*enable_box)(struct zhaoxin_uncore_box *box); + void (*disable_event)(struct zhaoxin_uncore_box *box, struct perf_event *event); + void (*enable_event)(struct zhaoxin_uncore_box *box, struct perf_event *event); + u64 (*read_counter)(struct zhaoxin_uncore_box *box, struct perf_event *event); + int (*hw_config)(struct zhaoxin_uncore_box *box, struct perf_event *event); + struct event_constraint *(*get_constraint)(struct zhaoxin_uncore_box *box, + struct perf_event *event); + void (*put_constraint)(struct zhaoxin_uncore_box *box, struct perf_event *event); +}; + +struct zhaoxin_uncore_pmu { + struct pmu pmu; + char name[UNCORE_PMU_NAME_LEN]; + int pmu_idx; + int func_id; + bool registered; + atomic_t activeboxes; + struct zhaoxin_uncore_type *type; + struct zhaoxin_uncore_box **boxes; +}; + +struct zhaoxin_uncore_extra_reg { + raw_spinlock_t lock; + u64 config, config1, config2; + atomic_t ref; +}; + +struct zhaoxin_uncore_box { + int pci_phys_id; + int package_id; /*Package ID */ + int cluster_id; + int subnode_id; + int n_active; /* number of active events */ + int n_events; + int cpu; /* cpu to collect events */ + unsigned long flags; + atomic_t refcnt; + struct perf_event *events[UNCORE_PMC_IDX_MAX]; + struct perf_event *event_list[UNCORE_PMC_IDX_MAX]; + struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX]; + unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; + u64 tags[UNCORE_PMC_IDX_MAX]; + struct pci_dev *pci_dev; + struct zhaoxin_uncore_pmu *pmu; + u64 hrtimer_duration; /* hrtimer timeout for this box */ + struct hrtimer hrtimer; + struct list_head list; + struct list_head active_list; + void __iomem *io_addr; + struct zhaoxin_uncore_extra_reg shared_regs[]; +}; + +#define UNCORE_BOX_FLAG_INITIATED 0 + +struct uncore_event_desc { + struct device_attribute attr; + const char *config; +}; + +struct hw_info { + u64 config_info; + u64 active_state; +}; + +ssize_t zx_uncore_event_show(struct device *dev, + struct device_attribute *attr, char *buf); + +#define ZHAOXIN_UNCORE_EVENT_DESC(_name, _config) \ +{ \ + .attr = __ATTR(_name, 0444, zx_uncore_event_show, NULL), \ + .config = _config, \ +} + +#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \ +static ssize_t __uncore_##_var##_show(struct device *dev, \ + struct device_attribute *attr, \ + char *page) \ +{ \ + BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ + return sprintf(page, _format "\n"); \ +} \ +static struct device_attribute format_attr_##_var = \ + __ATTR(_name, 0444, __uncore_##_var##_show, NULL) + +static inline bool uncore_pmc_fixed(int idx) +{ + return idx == UNCORE_PMC_IDX_FIXED; +} + +static inline +unsigned int uncore_mmio_box_ctl(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->box_ctl + + box->pmu->type->mmio_offset * box->pmu->pmu_idx; +} + +static inline unsigned int uncore_pci_box_ctl(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->box_ctl; +} + +static inline unsigned int uncore_pci_fixed_ctl(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->fixed_ctl; +} + +static inline unsigned int uncore_pci_fixed_ctr(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->fixed_ctr; +} + +static inline +unsigned int uncore_pci_event_ctl(struct zhaoxin_uncore_box *box, int idx) +{ + return idx * 4 + box->pmu->type->event_ctl; +} + +static inline +unsigned int uncore_pci_perf_ctr(struct zhaoxin_uncore_box *box, int idx) +{ + if (!strncmp(box->pmu->type->name, "mc_", 3)) + return idx * 2 + box->pmu->type->perf_ctr; + else + return idx * 8 + box->pmu->type->perf_ctr; +} + +static inline unsigned int uncore_msr_box_offset(struct zhaoxin_uncore_box *box) +{ + struct zhaoxin_uncore_pmu *pmu = box->pmu; + + return pmu->type->msr_offsets ? + pmu->type->msr_offsets[pmu->pmu_idx] : + pmu->type->msr_offset * pmu->pmu_idx; +} + +static inline unsigned int uncore_msr_box_ctl(struct zhaoxin_uncore_box *box) +{ + if (!box->pmu->type->box_ctl) + return 0; + return box->pmu->type->box_ctl + uncore_msr_box_offset(box); +} + +static inline unsigned int uncore_msr_fixed_ctl(struct zhaoxin_uncore_box *box) +{ + if (!box->pmu->type->fixed_ctl) + return 0; + return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box); +} + +static inline unsigned int uncore_msr_fixed_ctr(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box); +} + +static inline +unsigned int uncore_msr_event_ctl(struct zhaoxin_uncore_box *box, int idx) +{ + return box->pmu->type->event_ctl + + (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + + uncore_msr_box_offset(box); +} + +static inline +unsigned int uncore_msr_perf_ctr(struct zhaoxin_uncore_box *box, int idx) +{ + return box->pmu->type->perf_ctr + + (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + + uncore_msr_box_offset(box); +} + +static inline +unsigned int uncore_fixed_ctl(struct zhaoxin_uncore_box *box) +{ + if (box->pci_dev) + return uncore_pci_fixed_ctl(box); + else + return uncore_msr_fixed_ctl(box); +} + +static inline +unsigned int uncore_fixed_ctr(struct zhaoxin_uncore_box *box) +{ + if (box->pci_dev) + return uncore_pci_fixed_ctr(box); + else + return uncore_msr_fixed_ctr(box); +} + +static inline +unsigned int uncore_event_ctl(struct zhaoxin_uncore_box *box, int idx) +{ if (box->pci_dev || box->io_addr) + return uncore_pci_event_ctl(box, idx); + else + return uncore_msr_event_ctl(box, idx); +} + +static inline +unsigned int uncore_perf_ctr(struct zhaoxin_uncore_box *box, int idx) +{ if (box->pci_dev || box->io_addr) + return uncore_pci_perf_ctr(box, idx); + else + return uncore_msr_perf_ctr(box, idx); +} + +static inline int uncore_perf_ctr_bits(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->perf_ctr_bits; +} + +static inline int uncore_fixed_ctr_bits(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->fixed_ctr_bits; +} + +static inline int uncore_num_counters(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->num_counters; +} + +static inline void uncore_disable_box(struct zhaoxin_uncore_box *box) +{ + if (box->pmu->type->ops->disable_box) + box->pmu->type->ops->disable_box(box); +} + +static inline void uncore_enable_box(struct zhaoxin_uncore_box *box) +{ + if (box->pmu->type->ops->enable_box) + box->pmu->type->ops->enable_box(box); +} + +static inline void uncore_disable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + box->pmu->type->ops->disable_event(box, event); +} + +static inline void uncore_enable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + box->pmu->type->ops->enable_event(box, event); +} + +static inline u64 uncore_read_counter(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + return box->pmu->type->ops->read_counter(box, event); +} + +static inline void uncore_box_init(struct zhaoxin_uncore_box *box) +{ + if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { + if (box->pmu->type->ops->init_box) + box->pmu->type->ops->init_box(box); + } +} + +static inline void uncore_box_exit(struct zhaoxin_uncore_box *box) +{ + if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { + if (box->pmu->type->ops->exit_box) + box->pmu->type->ops->exit_box(box); + } +} + +static inline bool uncore_box_is_fake(struct zhaoxin_uncore_box *box) +{ + return (box->package_id < 0); +} + +static inline struct zhaoxin_uncore_pmu *uncore_event_to_pmu(struct perf_event *event) +{ + return container_of(event->pmu, struct zhaoxin_uncore_pmu, pmu); +} + +static inline struct zhaoxin_uncore_box *uncore_event_to_box(struct perf_event *event) +{ + return event->pmu_private; +} + + +static struct zhaoxin_uncore_box *uncore_pmu_to_box(struct zhaoxin_uncore_pmu *pmu, int cpu); +static u64 uncore_msr_read_counter(struct zhaoxin_uncore_box *box, struct perf_event *event); +static void uncore_mmio_exit_box(struct zhaoxin_uncore_box *box); +static u64 uncore_mmio_read_counter(struct zhaoxin_uncore_box *box, + struct perf_event *event); +static void uncore_pmu_start_hrtimer(struct zhaoxin_uncore_box *box); +static void uncore_pmu_cancel_hrtimer(struct zhaoxin_uncore_box *box); +static void uncore_pmu_event_start(struct perf_event *event, int flags); +static void uncore_pmu_event_stop(struct perf_event *event, int flags); +static int uncore_pmu_event_add(struct perf_event *event, int flags); +static void uncore_pmu_event_del(struct perf_event *event, int flags); +static void uncore_pmu_event_read(struct perf_event *event); +static void uncore_perf_event_update(struct zhaoxin_uncore_box *box, struct perf_event *event); +struct event_constraint * +uncore_get_constraint(struct zhaoxin_uncore_box *box, struct perf_event *event); +void uncore_put_constraint(struct zhaoxin_uncore_box *box, struct perf_event *event); +u64 uncore_shared_reg_config(struct zhaoxin_uncore_box *box, int idx); -- Gitee From 1894b1f34ded5b103e92ca71492cc9d333d173b6 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Tue, 12 Mar 2024 15:05:43 +0800 Subject: [PATCH 0215/2138] anolis: cgroup: fix compile error when only config CONFIG_CGROUPS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8517 When CONFIG_CGROUPS is enabled without CONFIG_CGROUP_WRITEBACK, it will output the following errors when compile: ./include/linux/backing-dev.h:394:51: error: ‘struct cgroup_subsys’ declared inside parameter list will not be visible outside of this definition or declaration [-Werror] 394 | static inline void insert_memcg_blkcg_link(struct cgroup_subsys *ss, | ^~~~~~~~~~~~~ ./include/linux/backing-dev.h:409:51: error: ‘struct cgroup_subsys’ declared inside parameter list will not be visible outside of this definition or declaration [-Werror] 409 | static inline void delete_memcg_blkcg_link(struct cgroup_subsys *ss, | ^~~~~~~~~~~~~ Fix it by explicitly declaring "struct cgroup_subsys" in this case. Signed-off-by: Joseph Qi Reviewed-by: Gao Xiang Link: https://gitee.com/anolis/cloud-kernel/pulls/2875 --- include/linux/backing-dev.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 506f89a99a6c..81adf07c9637 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -391,6 +391,8 @@ static inline void wb_blkcg_offline(struct cgroup_subsys_state *css) } #ifdef CONFIG_CGROUPS +struct cgroup_subsys; + static inline void insert_memcg_blkcg_link(struct cgroup_subsys *ss, struct list_head *tmp_links, struct css_set *cset) -- Gitee From bda926262743550abea2c0d53d40b10c2cf5930a Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Sat, 21 Oct 2023 10:01:37 +0800 Subject: [PATCH 0216/2138] erofs: don't warn MicroLZMA format anymore ANBZ: #8524 commit 798eecaea0f0366306cbc76986a83041a7e8669f upstream. The LZMA algorithm support has been landed for more than one year since Linux 5.16. Besides, the new XZ Utils 5.4 has been available in most Linux distributions. Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20231021020137.1646959-1-hsiangkao@linux.alibaba.com Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- fs/erofs/Kconfig | 7 ++----- fs/erofs/decompressor_lzma.c | 2 -- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/fs/erofs/Kconfig b/fs/erofs/Kconfig index f6dc961e6c2b..e540648dedc2 100644 --- a/fs/erofs/Kconfig +++ b/fs/erofs/Kconfig @@ -91,13 +91,10 @@ config EROFS_FS_ZIP_LZMA select XZ_DEC_MICROLZMA help Saying Y here includes support for reading EROFS file systems - containing LZMA compressed data, specifically called microLZMA. it - gives better compression ratios than the LZ4 algorithm, at the + containing LZMA compressed data, specifically called microLZMA. It + gives better compression ratios than the default LZ4 format, at the expense of more CPU overhead. - LZMA support is an experimental feature for now and so most file - systems will be readable without selecting this option. - If unsure, say N. config EROFS_FS_ZIP_DEFLATE diff --git a/fs/erofs/decompressor_lzma.c b/fs/erofs/decompressor_lzma.c index ba4ec73f4aae..852dd8eac5df 100644 --- a/fs/erofs/decompressor_lzma.c +++ b/fs/erofs/decompressor_lzma.c @@ -96,8 +96,6 @@ int z_erofs_load_lzma_config(struct super_block *sb, return -EINVAL; } - erofs_info(sb, "EXPERIMENTAL MicroLZMA in use. Use at your own risk!"); - /* in case 2 z_erofs_load_lzma_config() race to avoid deadlock */ mutex_lock(&lzma_resize_mutex); -- Gitee From 0c9a65eb68afb560a6846dfe0efe662cf9b38846 Mon Sep 17 00:00:00 2001 From: Ferry Meng Date: Thu, 26 Oct 2023 10:16:26 +0800 Subject: [PATCH 0217/2138] erofs: get rid of ROOT_NID() ANBZ: #8524 commit 6b8a113cae6cc517579a33ad484355c3e4b3d8e7 upstream. Let's open code this helper for simplicity. Signed-off-by: Ferry Meng Reviewed-by: Gao Xiang Reviewed-by: Yue Hu Reviewed-by: Chao Yu Link: https://lore.kernel.org/r/20231026021627.23284-1-mengferry@linux.alibaba.com Signed-off-by: Gao Xiang Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- fs/erofs/internal.h | 2 -- fs/erofs/super.c | 6 +++--- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 787cc9ff9029..e1f0d6b0def4 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -221,8 +221,6 @@ struct erofs_buf { }; #define __EROFS_BUF_INITIALIZER ((struct erofs_buf){ .page = NULL }) -#define ROOT_NID(sb) ((sb)->root_nid) - #define erofs_blknr(sb, addr) ((addr) >> (sb)->s_blocksize_bits) #define erofs_blkoff(sb, addr) ((addr) & ((sb)->s_blocksize - 1)) #define erofs_pos(sb, blk) ((erofs_off_t)(blk) << (sb)->s_blocksize_bits) diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 113414e6f35b..a384d3cf7d2f 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -647,13 +647,13 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) xa_init(&sbi->managed_pslots); #endif - inode = erofs_iget(sb, ROOT_NID(sbi)); + inode = erofs_iget(sb, sbi->root_nid); if (IS_ERR(inode)) return PTR_ERR(inode); if (!S_ISDIR(inode->i_mode)) { erofs_err(sb, "rootino(nid %llu) is not a directory(i_mode %o)", - ROOT_NID(sbi), inode->i_mode); + sbi->root_nid, inode->i_mode); iput(inode); return -EINVAL; } @@ -683,7 +683,7 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) if (err) return err; - erofs_info(sb, "mounted with root inode @ nid %llu.", ROOT_NID(sbi)); + erofs_info(sb, "mounted with root inode @ nid %llu.", sbi->root_nid); return 0; } -- Gitee From c62c7de131d5273d52ae04ef7abcad29f902ef3c Mon Sep 17 00:00:00 2001 From: Ferry Meng Date: Thu, 26 Oct 2023 10:16:27 +0800 Subject: [PATCH 0218/2138] erofs: tidy up redundant includes ANBZ: #8524 commit f5deddce60b50b55bcafeebaab1408d203b0f204 upstream. - Remove unused includes like and ; - Move common includes into "internal.h". Signed-off-by: Ferry Meng Reviewed-by: Gao Xiang Reviewed-by: Yue Hu Reviewed-by: Chao Yu Link: https://lore.kernel.org/r/20231026021627.23284-2-mengferry@linux.alibaba.com Signed-off-by: Gao Xiang Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- fs/erofs/data.c | 2 -- fs/erofs/decompressor.c | 1 - fs/erofs/decompressor_deflate.c | 1 - fs/erofs/decompressor_lzma.c | 1 - fs/erofs/internal.h | 2 ++ fs/erofs/super.c | 3 --- 6 files changed, 2 insertions(+), 8 deletions(-) diff --git a/fs/erofs/data.c b/fs/erofs/data.c index 19ab9bb3a9a0..3d9721b3faa8 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -5,9 +5,7 @@ * Copyright (C) 2021, Alibaba Cloud */ #include "internal.h" -#include #include -#include #include void erofs_unmap_metabuf(struct erofs_buf *buf) diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index aa59788a61e6..15346683ee22 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -4,7 +4,6 @@ * https://www.huawei.com/ */ #include "compress.h" -#include #include #ifndef LZ4_DISTANCE_MAX /* history window size */ diff --git a/fs/erofs/decompressor_deflate.c b/fs/erofs/decompressor_deflate.c index aac2c837ef35..b7064a0ed3b6 100644 --- a/fs/erofs/decompressor_deflate.c +++ b/fs/erofs/decompressor_deflate.c @@ -1,5 +1,4 @@ // SPDX-License-Identifier: GPL-2.0-or-later -#include #include #include "compress.h" diff --git a/fs/erofs/decompressor_lzma.c b/fs/erofs/decompressor_lzma.c index 852dd8eac5df..2dd14f99c1dc 100644 --- a/fs/erofs/decompressor_lzma.c +++ b/fs/erofs/decompressor_lzma.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later #include -#include #include "compress.h" struct z_erofs_lzma { diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index e1f0d6b0def4..c69174675caf 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -8,8 +8,10 @@ #define __EROFS_INTERNAL_H #include +#include #include #include +#include #include #include #include diff --git a/fs/erofs/super.c b/fs/erofs/super.c index a384d3cf7d2f..6bdd4bb0ddc8 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -4,14 +4,11 @@ * https://www.huawei.com/ * Copyright (C) 2021, Alibaba Cloud */ -#include #include -#include #include #include #include #include -#include #include #include "xattr.h" -- Gitee From 477bcd659f84d98f2b42bbf0ef03dd7a0ad7e501 Mon Sep 17 00:00:00 2001 From: Ferry Meng Date: Thu, 9 Nov 2023 19:18:22 +0800 Subject: [PATCH 0219/2138] erofs: simplify erofs_read_inode() ANBZ: #8524 commit 914fa861e3d7803c9bbafc229652c2a69edb8b60 upstream. After commit 1c7f49a76773 ("erofs: tidy up EROFS on-disk naming"), there is a unique `union erofs_inode_i_u` so that we could parse the union directly. Besides, it also replaces `inode->i_sb` with `sb` for simplicity. Signed-off-by: Ferry Meng Reviewed-by: Gao Xiang Reviewed-by: Yue Hu Reviewed-by: Chao Yu Link: https://lore.kernel.org/r/20231109111822.17944-1-mengferry@linux.alibaba.com Signed-off-by: Gao Xiang Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- fs/erofs/inode.c | 98 +++++++++++++++++------------------------------- 1 file changed, 35 insertions(+), 63 deletions(-) diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c index 9e40bee3682f..4c9f84493421 100644 --- a/fs/erofs/inode.c +++ b/fs/erofs/inode.c @@ -15,11 +15,11 @@ static void *erofs_read_inode(struct erofs_buf *buf, struct erofs_sb_info *sbi = EROFS_SB(sb); struct erofs_inode *vi = EROFS_I(inode); const erofs_off_t inode_loc = erofs_iloc(inode); - erofs_blk_t blkaddr, nblks = 0; void *kaddr; struct erofs_inode_compact *dic; struct erofs_inode_extended *die, *copied = NULL; + union erofs_inode_i_u iu; unsigned int ifmt; int err; @@ -35,9 +35,8 @@ static void *erofs_read_inode(struct erofs_buf *buf, dic = kaddr + *ofs; ifmt = le16_to_cpu(dic->i_format); - if (ifmt & ~EROFS_I_ALL) { - erofs_err(inode->i_sb, "unsupported i_format %u of nid %llu", + erofs_err(sb, "unsupported i_format %u of nid %llu", ifmt, vi->nid); err = -EOPNOTSUPP; goto err_out; @@ -45,7 +44,7 @@ static void *erofs_read_inode(struct erofs_buf *buf, vi->datalayout = erofs_inode_datalayout(ifmt); if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) { - erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu", + erofs_err(sb, "unsupported datalayout %u of nid %llu", vi->datalayout, vi->nid); err = -EOPNOTSUPP; goto err_out; @@ -82,40 +81,15 @@ static void *erofs_read_inode(struct erofs_buf *buf, vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount); inode->i_mode = le16_to_cpu(die->i_mode); - switch (inode->i_mode & S_IFMT) { - case S_IFREG: - case S_IFDIR: - case S_IFLNK: - vi->raw_blkaddr = le32_to_cpu(die->i_u.raw_blkaddr); - break; - case S_IFCHR: - case S_IFBLK: - inode->i_rdev = - new_decode_dev(le32_to_cpu(die->i_u.rdev)); - break; - case S_IFIFO: - case S_IFSOCK: - inode->i_rdev = 0; - break; - default: - goto bogusimode; - } + iu = die->i_u; i_uid_write(inode, le32_to_cpu(die->i_uid)); i_gid_write(inode, le32_to_cpu(die->i_gid)); set_nlink(inode, le32_to_cpu(die->i_nlink)); - - /* extended inode has its own timestamp */ + /* each extended inode has its own timestamp */ inode_set_ctime(inode, le64_to_cpu(die->i_mtime), le32_to_cpu(die->i_mtime_nsec)); inode->i_size = le64_to_cpu(die->i_size); - - /* total blocks for compressed files */ - if (erofs_inode_is_data_compressed(vi->datalayout)) - nblks = le32_to_cpu(die->i_u.compressed_blocks); - else if (vi->datalayout == EROFS_INODE_CHUNK_BASED) - /* fill chunked inode summary info */ - vi->chunkformat = le16_to_cpu(die->i_u.c.format); kfree(copied); copied = NULL; break; @@ -125,49 +99,51 @@ static void *erofs_read_inode(struct erofs_buf *buf, vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount); inode->i_mode = le16_to_cpu(dic->i_mode); - switch (inode->i_mode & S_IFMT) { - case S_IFREG: - case S_IFDIR: - case S_IFLNK: - vi->raw_blkaddr = le32_to_cpu(dic->i_u.raw_blkaddr); - break; - case S_IFCHR: - case S_IFBLK: - inode->i_rdev = - new_decode_dev(le32_to_cpu(dic->i_u.rdev)); - break; - case S_IFIFO: - case S_IFSOCK: - inode->i_rdev = 0; - break; - default: - goto bogusimode; - } + iu = dic->i_u; i_uid_write(inode, le16_to_cpu(dic->i_uid)); i_gid_write(inode, le16_to_cpu(dic->i_gid)); set_nlink(inode, le16_to_cpu(dic->i_nlink)); - /* use build time for compact inodes */ inode_set_ctime(inode, sbi->build_time, sbi->build_time_nsec); inode->i_size = le32_to_cpu(dic->i_size); - if (erofs_inode_is_data_compressed(vi->datalayout)) - nblks = le32_to_cpu(dic->i_u.compressed_blocks); - else if (vi->datalayout == EROFS_INODE_CHUNK_BASED) - vi->chunkformat = le16_to_cpu(dic->i_u.c.format); break; default: - erofs_err(inode->i_sb, - "unsupported on-disk inode version %u of nid %llu", + erofs_err(sb, "unsupported on-disk inode version %u of nid %llu", erofs_inode_version(ifmt), vi->nid); err = -EOPNOTSUPP; goto err_out; } - if (vi->datalayout == EROFS_INODE_CHUNK_BASED) { + switch (inode->i_mode & S_IFMT) { + case S_IFREG: + case S_IFDIR: + case S_IFLNK: + vi->raw_blkaddr = le32_to_cpu(iu.raw_blkaddr); + break; + case S_IFCHR: + case S_IFBLK: + inode->i_rdev = new_decode_dev(le32_to_cpu(iu.rdev)); + break; + case S_IFIFO: + case S_IFSOCK: + inode->i_rdev = 0; + break; + default: + erofs_err(sb, "bogus i_mode (%o) @ nid %llu", inode->i_mode, + vi->nid); + err = -EFSCORRUPTED; + goto err_out; + } + + /* total blocks for compressed files */ + if (erofs_inode_is_data_compressed(vi->datalayout)) { + nblks = le32_to_cpu(iu.compressed_blocks); + } else if (vi->datalayout == EROFS_INODE_CHUNK_BASED) { + /* fill chunked inode summary info */ + vi->chunkformat = le16_to_cpu(iu.c.format); if (vi->chunkformat & ~EROFS_CHUNK_FORMAT_ALL) { - erofs_err(inode->i_sb, - "unsupported chunk format %x of nid %llu", + erofs_err(sb, "unsupported chunk format %x of nid %llu", vi->chunkformat, vi->nid); err = -EOPNOTSUPP; goto err_out; @@ -190,10 +166,6 @@ static void *erofs_read_inode(struct erofs_buf *buf, inode->i_blocks = nblks << (sb->s_blocksize_bits - 9); return kaddr; -bogusimode: - erofs_err(inode->i_sb, "bogus i_mode (%o) @ nid %llu", - inode->i_mode, vi->nid); - err = -EFSCORRUPTED; err_out: DBG_BUGON(1); kfree(copied); -- Gitee From 7a92dcf9601c18f7660d9d5d9858ed380d0e6e70 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Fri, 17 Nov 2023 16:53:29 +0800 Subject: [PATCH 0220/2138] MAINTAINERS: erofs: add EROFS webpage ANBZ: #8524 commit 62b241efff99fc4d88a86f1c67c7516e31f432a3 upstream. Add a new `W:` field of the EROFS entry points to the documentation site at . In addition, update the in-tree documentation and Kconfig too. Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20231117085329.1624223-1-hsiangkao@linux.alibaba.com Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- Documentation/filesystems/erofs.rst | 4 ++++ MAINTAINERS | 1 + fs/erofs/Kconfig | 2 +- 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/Documentation/filesystems/erofs.rst b/Documentation/filesystems/erofs.rst index f200d7874495..445224817823 100644 --- a/Documentation/filesystems/erofs.rst +++ b/Documentation/filesystems/erofs.rst @@ -91,6 +91,10 @@ compatibility checking tool (fsck.erofs), and a debugging tool (dump.erofs): - git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs-utils.git +For more information, please also refer to the documentation site: + +- https://erofs.docs.kernel.org + Bugs and patches are welcome, please kindly help us and send to the following linux-erofs mailing list: diff --git a/MAINTAINERS b/MAINTAINERS index 24bc580c9cee..2fe7c02c3545 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7741,6 +7741,7 @@ R: Yue Hu R: Jeffle Xu L: linux-erofs@lists.ozlabs.org S: Maintained +W: https://erofs.docs.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs.git F: Documentation/ABI/testing/sysfs-fs-erofs F: Documentation/filesystems/erofs.rst diff --git a/fs/erofs/Kconfig b/fs/erofs/Kconfig index e540648dedc2..1d318f85232d 100644 --- a/fs/erofs/Kconfig +++ b/fs/erofs/Kconfig @@ -21,7 +21,7 @@ config EROFS_FS performance under extremely memory pressure without extra cost. See the documentation at - for more details. + and the web pages at for more details. If unsure, say N. -- Gitee From bc228c1f5dc3bc28e0c8b66b4976edaba808e04e Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Wed, 6 Dec 2023 17:10:53 +0800 Subject: [PATCH 0221/2138] erofs: support I/O submission for sub-page compressed blocks ANBZ: #8524 commit 192351616a9dde686492bcb9d1e4895a1411a527 upstream. Add a basic I/O submission path first to support sub-page blocks: - Temporary short-lived pages will be used entirely; - In-place I/O pages can be used partially, but compressed pages need to be able to be mapped in contiguous virtual memory. As a start, currently cache decompression is explicitly disabled for sub-page blocks, which will be supported in the future. Reviewed-by: Yue Hu Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20231206091057.87027-2-hsiangkao@linux.alibaba.com Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- fs/erofs/zdata.c | 156 ++++++++++++++++++++++------------------------- 1 file changed, 74 insertions(+), 82 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 1c0e6167d8e7..1bbd76e5220c 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -1435,86 +1435,85 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, z_erofs_decompressqueue_work(&io->u.work); } -static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, - unsigned int nr, - struct page **pagepool, - struct address_space *mc) +static void z_erofs_fill_bio_vec(struct bio_vec *bvec, + struct z_erofs_decompress_frontend *f, + struct z_erofs_pcluster *pcl, + unsigned int nr, + struct address_space *mc) { - const pgoff_t index = pcl->obj.index; gfp_t gfp = mapping_gfp_mask(mc); bool tocache = false; - + struct z_erofs_bvec *zbv = pcl->compressed_bvecs + nr; struct address_space *mapping; - struct page *oldpage, *page; - int justfound; + struct page *page, *oldpage; + int justfound, bs = i_blocksize(f->inode); + /* Except for inplace pages, the entire page can be used for I/Os */ + bvec->bv_offset = 0; + bvec->bv_len = PAGE_SIZE; repeat: - page = READ_ONCE(pcl->compressed_bvecs[nr].page); - oldpage = page; - - if (!page) + oldpage = READ_ONCE(zbv->page); + if (!oldpage) goto out_allocpage; - justfound = (unsigned long)page & 1UL; - page = (struct page *)((unsigned long)page & ~1UL); + justfound = (unsigned long)oldpage & 1UL; + page = (struct page *)((unsigned long)oldpage & ~1UL); + bvec->bv_page = page; + DBG_BUGON(z_erofs_is_shortlived_page(page)); /* - * preallocated cached pages, which is used to avoid direct reclaim - * otherwise, it will go inplace I/O path instead. + * Handle preallocated cached pages. We tried to allocate such pages + * without triggering direct reclaim. If allocation failed, inplace + * file-backed pages will be used instead. */ if (page->private == Z_EROFS_PREALLOCATED_PAGE) { - WRITE_ONCE(pcl->compressed_bvecs[nr].page, page); set_page_private(page, 0); + WRITE_ONCE(zbv->page, page); tocache = true; goto out_tocache; } - mapping = READ_ONCE(page->mapping); + mapping = READ_ONCE(page->mapping); /* - * file-backed online pages in plcuster are all locked steady, - * therefore it is impossible for `mapping' to be NULL. + * File-backed pages for inplace I/Os are all locked steady, + * therefore it is impossible for `mapping` to be NULL. */ - if (mapping && mapping != mc) - /* ought to be unmanaged pages */ - goto out; - - /* directly return for shortlived page as well */ - if (z_erofs_is_shortlived_page(page)) - goto out; + if (mapping && mapping != mc) { + if (zbv->offset < 0) + bvec->bv_offset = round_up(-zbv->offset, bs); + bvec->bv_len = round_up(zbv->end, bs) - bvec->bv_offset; + return; + } lock_page(page); - /* only true if page reclaim goes wrong, should never happen */ DBG_BUGON(justfound && PagePrivate(page)); - /* the page is still in manage cache */ + /* the cached page is still in managed cache */ if (page->mapping == mc) { - WRITE_ONCE(pcl->compressed_bvecs[nr].page, page); - + WRITE_ONCE(zbv->page, page); + /* + * The cached page is still available but without a valid + * `->private` pcluster hint. Let's reconnect them. + */ if (!PagePrivate(page)) { - /* - * impossible to be !PagePrivate(page) for - * the current restriction as well if - * the page is already in compressed_bvecs[]. - */ DBG_BUGON(!justfound); - - justfound = 0; - set_page_private(page, (unsigned long)pcl); - SetPagePrivate(page); + /* compressed_bvecs[] already takes a ref */ + attach_page_private(page, pcl); + put_page(page); } - /* no need to submit io if it is already up-to-date */ + /* no need to submit if it is already up-to-date */ if (PageUptodate(page)) { unlock_page(page); - page = NULL; + bvec->bv_page = NULL; } - goto out; + return; } /* - * the managed page has been truncated, it's unsafe to - * reuse this one, let's allocate a new cache-managed page. + * It has been truncated, so it's unsafe to reuse this one. Let's + * allocate a new page for compressed data. */ DBG_BUGON(page->mapping); DBG_BUGON(!justfound); @@ -1523,25 +1522,23 @@ static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, unlock_page(page); put_page(page); out_allocpage: - page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL); - if (oldpage != cmpxchg(&pcl->compressed_bvecs[nr].page, - oldpage, page)) { - erofs_pagepool_add(pagepool, page); + page = erofs_allocpage(&f->pagepool, gfp | __GFP_NOFAIL); + if (oldpage != cmpxchg(&zbv->page, oldpage, page)) { + erofs_pagepool_add(&f->pagepool, page); cond_resched(); goto repeat; } + bvec->bv_page = page; out_tocache: - if (!tocache || add_to_page_cache_lru(page, mc, index + nr, gfp)) { - /* turn into temporary page if fails (1 ref) */ + if (!tocache || bs != PAGE_SIZE || + add_to_page_cache_lru(page, mc, pcl->obj.index + nr, gfp)) { + /* turn into a temporary shortlived page (1 ref) */ set_page_private(page, Z_EROFS_SHORTLIVED_PAGE); - goto out; + return; } attach_page_private(page, pcl); - /* drop a refcount added by allocpage (then we have 2 refs here) */ + /* drop a refcount added by allocpage (then 2 refs in total here) */ put_page(page); - -out: /* the only exit (for tracing and debugging) */ - return page; } static struct z_erofs_decompressqueue *jobqueue_init(struct super_block *sb, @@ -1596,7 +1593,7 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, qtail[JQ_BYPASS] = &pcl->next; } -static void z_erofs_decompressqueue_endio(struct bio *bio) +static void z_erofs_submissionqueue_endio(struct bio *bio) { struct z_erofs_decompressqueue *q = bio->bi_private; blk_status_t err = bio->bi_status; @@ -1608,7 +1605,6 @@ static void z_erofs_decompressqueue_endio(struct bio *bio) DBG_BUGON(PageUptodate(page)); DBG_BUGON(z_erofs_page_is_invalidated(page)); - if (erofs_page_is_managed(EROFS_SB(q->sb), page)) { if (!err) SetPageUptodate(page); @@ -1631,17 +1627,14 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, struct z_erofs_decompressqueue *q[NR_JOBQUEUES]; z_erofs_next_pcluster_t owned_head = f->owned_head; /* bio is NULL initially, so no need to initialize last_{index,bdev} */ - pgoff_t last_index; + erofs_off_t last_pa; struct block_device *last_bdev; unsigned int nr_bios = 0; struct bio *bio = NULL; unsigned long pflags; int memstall = 0; - /* - * if managed cache is enabled, bypass jobqueue is needed, - * no need to read from device for all pclusters in this queue. - */ + /* No need to read from device for pclusters in the bypass queue. */ q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL); q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, force_fg); @@ -1654,7 +1647,8 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, do { struct erofs_map_dev mdev; struct z_erofs_pcluster *pcl; - pgoff_t cur, end; + erofs_off_t cur, end; + struct bio_vec bvec; unsigned int i = 0; bool bypass = true; @@ -1673,18 +1667,14 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, }; (void)erofs_map_dev(sb, &mdev); - cur = erofs_blknr(sb, mdev.m_pa); - end = cur + pcl->pclusterpages; - + cur = mdev.m_pa; + end = cur + (pcl->pclusterpages << PAGE_SHIFT); do { - struct page *page; - - page = pickup_page_for_submission(pcl, i++, - &f->pagepool, mc); - if (!page) + z_erofs_fill_bio_vec(&bvec, f, pcl, i++, mc); + if (!bvec.bv_page) continue; - if (bio && (cur != last_index + 1 || + if (bio && (cur != last_pa || last_bdev != mdev.m_bdev)) { submit_bio_retry: submit_bio(bio); @@ -1695,7 +1685,8 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, bio = NULL; } - if (unlikely(PageWorkingset(page)) && !memstall) { + if (unlikely(PageWorkingset(bvec.bv_page)) && + !memstall) { psi_memstall_enter(&pflags); memstall = 1; } @@ -1703,23 +1694,24 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, if (!bio) { bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS, REQ_OP_READ, GFP_NOIO); - bio->bi_end_io = z_erofs_decompressqueue_endio; - - last_bdev = mdev.m_bdev; - bio->bi_iter.bi_sector = (sector_t)cur << - (sb->s_blocksize_bits - 9); + bio->bi_end_io = z_erofs_submissionqueue_endio; + bio->bi_iter.bi_sector = cur >> 9; bio->bi_private = q[JQ_SUBMIT]; if (readahead) bio->bi_opf |= REQ_RAHEAD; ++nr_bios; + last_bdev = mdev.m_bdev; } - if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) + if (cur + bvec.bv_len > end) + bvec.bv_len = end - cur; + if (!bio_add_page(bio, bvec.bv_page, bvec.bv_len, + bvec.bv_offset)) goto submit_bio_retry; - last_index = cur; + last_pa = cur + bvec.bv_len; bypass = false; - } while (++cur < end); + } while ((cur += bvec.bv_len) < end); if (!bypass) qtail[JQ_SUBMIT] = &pcl->next; -- Gitee From 7e5a6b5e26b06383a34441abec5fa056be677468 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Wed, 6 Dec 2023 17:10:54 +0800 Subject: [PATCH 0222/2138] erofs: record `pclustersize` in bytes instead of pages ANBZ: #8524 commit 54ed3fdd66055d073cb1cd2c6c65bbc0683c40cf upstream. Currently, compressed sizes are recorded in pages using `pclusterpages`, However, for tailpacking pclusters, `tailpacking_size` is used instead. This approach doesn't work when dealing with sub-page blocks. To address this, let's switch them to the unified `pclustersize` in bytes. Reviewed-by: Yue Hu Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20231206091057.87027-3-hsiangkao@linux.alibaba.com Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- fs/erofs/zdata.c | 64 ++++++++++++++++++++---------------------------- 1 file changed, 26 insertions(+), 38 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 1bbd76e5220c..5d5640173412 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -56,6 +56,9 @@ struct z_erofs_pcluster { /* L: total number of bvecs */ unsigned int vcnt; + /* I: pcluster size (compressed size) in bytes */ + unsigned int pclustersize; + /* I: page offset of start position of decompression */ unsigned short pageofs_out; @@ -70,14 +73,6 @@ struct z_erofs_pcluster { struct rcu_head rcu; }; - union { - /* I: physical cluster size in pages */ - unsigned short pclusterpages; - - /* I: tailpacking inline compressed size */ - unsigned short tailpacking_size; - }; - /* I: compression algorithm format */ unsigned char algorithmformat; @@ -115,9 +110,7 @@ static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl) static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl) { - if (z_erofs_is_inline_pcluster(pcl)) - return 1; - return pcl->pclusterpages; + return PAGE_ALIGN(pcl->pclustersize) >> PAGE_SHIFT; } /* @@ -298,12 +291,12 @@ static int z_erofs_create_pcluster_pool(void) return 0; } -static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int nrpages) +static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int size) { - int i; + unsigned int nrpages = PAGE_ALIGN(size) >> PAGE_SHIFT; + struct z_erofs_pcluster_slab *pcs = pcluster_pool; - for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) { - struct z_erofs_pcluster_slab *pcs = pcluster_pool + i; + for (; pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) { struct z_erofs_pcluster *pcl; if (nrpages > pcs->maxpages) @@ -312,7 +305,7 @@ static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int nrpages) pcl = kmem_cache_zalloc(pcs->slab, GFP_NOFS); if (!pcl) return ERR_PTR(-ENOMEM); - pcl->pclusterpages = nrpages; + pcl->pclustersize = size; return pcl; } return ERR_PTR(-EINVAL); @@ -559,6 +552,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) { struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode)); struct z_erofs_pcluster *pcl = fe->pcl; + unsigned int pclusterpages = z_erofs_pclusterpages(pcl); bool shouldalloc = z_erofs_should_alloc_cache(fe); bool standalone = true; /* @@ -572,10 +566,9 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED) return; - for (i = 0; i < pcl->pclusterpages; ++i) { - struct page *page; + for (i = 0; i < pclusterpages; ++i) { + struct page *page, *newpage; void *t; /* mark pages just found for debugging */ - struct page *newpage = NULL; /* the compressed page was loaded before */ if (READ_ONCE(pcl->compressed_bvecs[i].page)) @@ -585,6 +578,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) if (page) { t = (void *)((unsigned long)page | 1); + newpage = NULL; } else { /* I/O is needed, no possible to decompress directly */ standalone = false; @@ -592,9 +586,8 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) continue; /* - * try to use cached I/O if page allocation - * succeeds or fallback to in-place I/O instead - * to avoid any direct reclaim. + * Try cached I/O if allocation succeeds or fallback to + * in-place I/O instead to avoid any direct reclaim. */ newpage = erofs_allocpage(&fe->pagepool, gfp); if (!newpage) @@ -626,6 +619,7 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, { struct z_erofs_pcluster *const pcl = container_of(grp, struct z_erofs_pcluster, obj); + unsigned int pclusterpages = z_erofs_pclusterpages(pcl); int i; DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); @@ -633,7 +627,7 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, * refcount of workgroup is now freezed as 0, * therefore no need to worry about available decompression users. */ - for (i = 0; i < pcl->pclusterpages; ++i) { + for (i = 0; i < pclusterpages; ++i) { struct page *page = pcl->compressed_bvecs[i].page; if (!page) @@ -657,6 +651,7 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp) { struct z_erofs_pcluster *pcl = folio_get_private(folio); + unsigned int pclusterpages = z_erofs_pclusterpages(pcl); bool ret; int i; @@ -669,7 +664,7 @@ static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp) goto out; DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); - for (i = 0; i < pcl->pclusterpages; ++i) { + for (i = 0; i < pclusterpages; ++i) { if (pcl->compressed_bvecs[i].page == &folio->page) { WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); ret = true; @@ -778,20 +773,20 @@ static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f) static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe) { struct erofs_map_blocks *map = &fe->map; + struct super_block *sb = fe->inode->i_sb; bool ztailpacking = map->m_flags & EROFS_MAP_META; struct z_erofs_pcluster *pcl; struct erofs_workgroup *grp; int err; if (!(map->m_flags & EROFS_MAP_ENCODED) || - (!ztailpacking && !(map->m_pa >> PAGE_SHIFT))) { + (!ztailpacking && !erofs_blknr(sb, map->m_pa))) { DBG_BUGON(1); return -EFSCORRUPTED; } /* no available pcluster, let's allocate one */ - pcl = z_erofs_alloc_pcluster(ztailpacking ? 1 : - map->m_plen >> PAGE_SHIFT); + pcl = z_erofs_alloc_pcluster(map->m_plen); if (IS_ERR(pcl)) return PTR_ERR(pcl); @@ -815,9 +810,8 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe) if (ztailpacking) { pcl->obj.index = 0; /* which indicates ztailpacking */ - pcl->tailpacking_size = map->m_plen; } else { - pcl->obj.index = map->m_pa >> PAGE_SHIFT; + pcl->obj.index = erofs_blknr(sb, map->m_pa); grp = erofs_insert_workgroup(fe->inode->i_sb, &pcl->obj); if (IS_ERR(grp)) { @@ -1244,8 +1238,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, unsigned int pclusterpages = z_erofs_pclusterpages(pcl); const struct z_erofs_decompressor *decompressor = &erofs_decompressors[pcl->algorithmformat]; - unsigned int i, inputsize; - int err2; + int i, err2; struct page *page; bool overlapped; @@ -1282,18 +1275,13 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, if (err) goto out; - if (z_erofs_is_inline_pcluster(pcl)) - inputsize = pcl->tailpacking_size; - else - inputsize = pclusterpages * PAGE_SIZE; - err = decompressor->decompress(&(struct z_erofs_decompress_req) { .sb = be->sb, .in = be->compressed_pages, .out = be->decompressed_pages, .pageofs_in = pcl->pageofs_in, .pageofs_out = pcl->pageofs_out, - .inputsize = inputsize, + .inputsize = pcl->pclustersize, .outputsize = pcl->length, .alg = pcl->algorithmformat, .inplace_io = overlapped, @@ -1668,7 +1656,7 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, (void)erofs_map_dev(sb, &mdev); cur = mdev.m_pa; - end = cur + (pcl->pclusterpages << PAGE_SHIFT); + end = cur + pcl->pclustersize; do { z_erofs_fill_bio_vec(&bvec, f, pcl, i++, mc); if (!bvec.bv_page) -- Gitee From d2d47ce4e2a97d0e0a8d3bdb85ce7fb8ea261b93 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Wed, 6 Dec 2023 17:10:56 +0800 Subject: [PATCH 0223/2138] erofs: refine z_erofs_transform_plain() for sub-page block support ANBZ: #8524 commit 1ca01520148af399899ed66af5c78330bb9ecaf2 upstream. Sub-page block support is still unusable even with previous commits if interlaced PLAIN pclusters exist. Such pclusters can be found if the fragment feature is enabled. This commit tries to handle "the head part" of interlaced PLAIN pclusters first: it was once explained in commit fdffc091e6f9 ("erofs: support interlaced uncompressed data for compressed files"). It uses a unique way for both shifted and interlaced PLAIN pclusters. As an added bonus, PLAIN pclusters larger than the block size is also supported now for the upcoming large lclusters. Reviewed-by: Yue Hu Reviewed-by: Chao Yu [ Gao Xiang: min_t() will be used instead of min() for 6.6 LTS. ] Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20231206091057.87027-5-hsiangkao@linux.alibaba.com Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- fs/erofs/decompressor.c | 81 ++++++++++++++++++++++++----------------- 1 file changed, 48 insertions(+), 33 deletions(-) diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index 15346683ee22..662d550256cf 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -314,43 +314,58 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq, struct page **pagepool) { - const unsigned int inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT; - const unsigned int outpages = + const unsigned int nrpages_in = + PAGE_ALIGN(rq->pageofs_in + rq->inputsize) >> PAGE_SHIFT; + const unsigned int nrpages_out = PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; - const unsigned int righthalf = min_t(unsigned int, rq->outputsize, - PAGE_SIZE - rq->pageofs_out); - const unsigned int lefthalf = rq->outputsize - righthalf; - const unsigned int interlaced_offset = - rq->alg == Z_EROFS_COMPRESSION_SHIFTED ? 0 : rq->pageofs_out; - u8 *src; - - if (outpages > 2 && rq->alg == Z_EROFS_COMPRESSION_SHIFTED) { - DBG_BUGON(1); - return -EFSCORRUPTED; - } - - if (rq->out[0] == *rq->in) { - DBG_BUGON(rq->pageofs_out); - return 0; + const unsigned int bs = rq->sb->s_blocksize; + unsigned int cur = 0, ni = 0, no, pi, po, insz, cnt; + u8 *kin; + + DBG_BUGON(rq->outputsize > rq->inputsize); + if (rq->alg == Z_EROFS_COMPRESSION_INTERLACED) { + cur = bs - (rq->pageofs_out & (bs - 1)); + pi = (rq->pageofs_in + rq->inputsize - cur) & ~PAGE_MASK; + cur = min(cur, rq->outputsize); + if (cur && rq->out[0]) { + kin = kmap_local_page(rq->in[nrpages_in - 1]); + if (rq->out[0] == rq->in[nrpages_in - 1]) { + memmove(kin + rq->pageofs_out, kin + pi, cur); + flush_dcache_page(rq->out[0]); + } else { + memcpy_to_page(rq->out[0], rq->pageofs_out, + kin + pi, cur); + } + kunmap_local(kin); + } + rq->outputsize -= cur; } - src = kmap_local_page(rq->in[inpages - 1]) + rq->pageofs_in; - if (rq->out[0]) - memcpy_to_page(rq->out[0], rq->pageofs_out, - src + interlaced_offset, righthalf); - - if (outpages > inpages) { - DBG_BUGON(!rq->out[outpages - 1]); - if (rq->out[outpages - 1] != rq->in[inpages - 1]) { - memcpy_to_page(rq->out[outpages - 1], 0, src + - (interlaced_offset ? 0 : righthalf), - lefthalf); - } else if (!interlaced_offset) { - memmove(src, src + righthalf, lefthalf); - flush_dcache_page(rq->in[inpages - 1]); - } + for (; rq->outputsize; rq->pageofs_in = 0, cur += PAGE_SIZE, ni++) { + insz = min_t(unsigned int, PAGE_SIZE - rq->pageofs_in, rq->outputsize); + rq->outputsize -= insz; + if (!rq->in[ni]) + continue; + kin = kmap_local_page(rq->in[ni]); + pi = 0; + do { + no = (rq->pageofs_out + cur + pi) >> PAGE_SHIFT; + po = (rq->pageofs_out + cur + pi) & ~PAGE_MASK; + DBG_BUGON(no >= nrpages_out); + cnt = min_t(unsigned int, insz - pi, PAGE_SIZE - po); + if (rq->out[no] == rq->in[ni]) { + memmove(kin + po, + kin + rq->pageofs_in + pi, cnt); + flush_dcache_page(rq->out[no]); + } else if (rq->out[no]) { + memcpy_to_page(rq->out[no], po, + kin + rq->pageofs_in + pi, cnt); + } + pi += cnt; + } while (pi < insz); + kunmap_local(kin); } - kunmap_local(src); + DBG_BUGON(ni > nrpages_in); return 0; } -- Gitee From 7f5ebd6a2b6dcf11619d0e78d1c869d720f896df Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Wed, 6 Dec 2023 17:10:57 +0800 Subject: [PATCH 0224/2138] erofs: enable sub-page compressed block support ANBZ: #8524 commit 0ee3a0d59e007320167a2e9f4b8bf1304ada7771 upstream. Let's just disable cached decompression and inplace I/Os for partial pages as the first step in order to enable sub-page block initial support. In other words, currently it works primarily based on temporary short-lived pages. Don't expect too much in terms of performance. Reviewed-by: Yue Hu Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20231206091057.87027-6-hsiangkao@linux.alibaba.com Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- fs/erofs/inode.c | 6 ++++-- fs/erofs/zdata.c | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c index 4c9f84493421..9243a0cb6daf 100644 --- a/fs/erofs/inode.c +++ b/fs/erofs/inode.c @@ -250,8 +250,10 @@ static int erofs_fill_inode(struct inode *inode) if (erofs_inode_is_data_compressed(vi->datalayout)) { #ifdef CONFIG_EROFS_FS_ZIP - if (!erofs_is_fscache_mode(inode->i_sb) && - inode->i_sb->s_blocksize_bits == PAGE_SHIFT) { + if (!erofs_is_fscache_mode(inode->i_sb)) { + DO_ONCE_LITE_IF(inode->i_sb->s_blocksize != PAGE_SIZE, + erofs_info, inode->i_sb, + "EXPERIMENTAL EROFS subpage compressed block support in use. Use at your own risk!"); inode->i_mapping->a_ops = &z_erofs_aops; err = 0; goto out_unlock; diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 5d5640173412..8264936b8612 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -563,6 +563,8 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; unsigned int i; + if (i_blocksize(fe->inode) != PAGE_SIZE) + return; if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED) return; @@ -967,12 +969,12 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, struct inode *const inode = fe->inode; struct erofs_map_blocks *const map = &fe->map; const loff_t offset = page_offset(page); + const unsigned int bs = i_blocksize(inode); bool tight = true, exclusive; unsigned int cur, end, len, split; int err = 0; z_erofs_onlinepage_init(page); - split = 0; end = PAGE_SIZE; repeat: @@ -1021,7 +1023,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, * for inplace I/O or bvpage (should be processed in a strict order.) */ tight &= (fe->mode > Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE); - exclusive = (!cur && ((split <= 1) || tight)); + exclusive = (!cur && ((split <= 1) || (tight && bs == PAGE_SIZE))); if (cur) tight &= (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED); -- Gitee From faedbb68e01abbfb7f8fe21a8c8fe717dec0e565 Mon Sep 17 00:00:00 2001 From: Yue Hu Date: Thu, 21 Dec 2023 14:23:41 +0800 Subject: [PATCH 0225/2138] erofs: allow partially filled compressed bvecs ANBZ: #8524 commit 652cdaa886e3ad1d051e5aef733c5a546171362f upstream. In order to reduce memory footprints even further, let's allow partially filled compressed bvecs for readahead to bail out later. Signed-off-by: Yue Hu Reviewed-by: Gao Xiang Link: https://lore.kernel.org/r/20231221062341.23901-1-zbestahu@gmail.com Signed-off-by: Gao Xiang Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- fs/erofs/zdata.c | 36 +++++++++++++----------------------- 1 file changed, 13 insertions(+), 23 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 8264936b8612..692c0c39be63 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -1202,34 +1202,27 @@ static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be, struct z_erofs_bvec *bvec = &pcl->compressed_bvecs[i]; struct page *page = bvec->page; - /* compressed pages ought to be present before decompressing */ + /* compressed data ought to be valid before decompressing */ if (!page) { - DBG_BUGON(1); + err = -EIO; continue; } be->compressed_pages[i] = page; - if (z_erofs_is_inline_pcluster(pcl)) { + if (z_erofs_is_inline_pcluster(pcl) || + erofs_page_is_managed(EROFS_SB(be->sb), page)) { if (!PageUptodate(page)) err = -EIO; continue; } DBG_BUGON(z_erofs_page_is_invalidated(page)); - if (!z_erofs_is_shortlived_page(page)) { - if (erofs_page_is_managed(EROFS_SB(be->sb), page)) { - if (!PageUptodate(page)) - err = -EIO; - continue; - } - z_erofs_do_decompressed_bvec(be, bvec); - *overlapped = true; - } + if (z_erofs_is_shortlived_page(page)) + continue; + z_erofs_do_decompressed_bvec(be, bvec); + *overlapped = true; } - - if (err) - return err; - return 0; + return err; } static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, @@ -1238,7 +1231,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, struct erofs_sb_info *const sbi = EROFS_SB(be->sb); struct z_erofs_pcluster *pcl = be->pcl; unsigned int pclusterpages = z_erofs_pclusterpages(pcl); - const struct z_erofs_decompressor *decompressor = + const struct z_erofs_decompressor *decomp = &erofs_decompressors[pcl->algorithmformat]; int i, err2; struct page *page; @@ -1274,10 +1267,8 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, err2 = z_erofs_parse_in_bvecs(be, &overlapped); if (err2) err = err2; - if (err) - goto out; - - err = decompressor->decompress(&(struct z_erofs_decompress_req) { + if (!err) + err = decomp->decompress(&(struct z_erofs_decompress_req) { .sb = be->sb, .in = be->compressed_pages, .out = be->decompressed_pages, @@ -1291,7 +1282,6 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, .fillgaps = pcl->multibases, }, be->pagepool); -out: /* must handle all compressed pages before actual file pages */ if (z_erofs_is_inline_pcluster(pcl)) { page = pcl->compressed_bvecs[0].page; @@ -1302,7 +1292,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, /* consider shortlived pages added when decompressing */ page = be->compressed_pages[i]; - if (erofs_page_is_managed(sbi, page)) + if (!page || erofs_page_is_managed(sbi, page)) continue; (void)z_erofs_put_shortlivedpage(be->pagepool, page); WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); -- Gitee From 0b35f3bb4ce6fbf7e05de99fee12a72a476adeed Mon Sep 17 00:00:00 2001 From: Chunhai Guo Date: Wed, 3 Jan 2024 05:32:02 -0700 Subject: [PATCH 0226/2138] erofs: make erofs_{err,info}() support NULL sb parameter ANBZ: #8524 commit aa12a790d31be14b289d5a2c6f41ca535fcc7841 upstream. Make erofs_err() and erofs_info() support NULL sb parameter for more general usage. Suggested-by: Gao Xiang Signed-off-by: Chunhai Guo Link: https://lore.kernel.org/r/20240103123202.3054718-1-guochunhai@vivo.com Reviewed-by: Jingbo Xu Reviewed-by: Gao Xiang Signed-off-by: Gao Xiang Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- fs/erofs/super.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 6bdd4bb0ddc8..649f5a2d9a85 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -27,7 +27,10 @@ void _erofs_err(struct super_block *sb, const char *func, const char *fmt, ...) vaf.fmt = fmt; vaf.va = &args; - pr_err("(device %s): %s: %pV", sb->s_id, func, &vaf); + if (sb) + pr_err("(device %s): %s: %pV", sb->s_id, func, &vaf); + else + pr_err("%s: %pV", func, &vaf); va_end(args); } @@ -41,7 +44,10 @@ void _erofs_info(struct super_block *sb, const char *func, const char *fmt, ...) vaf.fmt = fmt; vaf.va = &args; - pr_info("(device %s): %pV", sb->s_id, &vaf); + if (sb) + pr_info("(device %s): %pV", sb->s_id, &vaf); + else + pr_info("%pV", &vaf); va_end(args); } -- Gitee From 462530424d73825f549af513311b7d73b3c6bb70 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 15 Jan 2024 22:46:35 +0800 Subject: [PATCH 0227/2138] erofs: Don't use certain unnecessary folio_*() functions ANBZ: #8524 commit 2b872b0f466d2acb4491da845c66b49246d5cdf9 upstream. Filesystems should use folio->index and folio->mapping, instead of folio_index(folio), folio_mapping() and folio_file_mapping() since they know that it's in the pagecache. Change this automagically with: perl -p -i -e 's/folio_mapping[(]([^)]*)[)]/\1->mapping/g' fs/erofs/*.c perl -p -i -e 's/folio_file_mapping[(]([^)]*)[)]/\1->mapping/g' fs/erofs/*.c perl -p -i -e 's/folio_index[(]([^)]*)[)]/\1->index/g' fs/erofs/*.c Reported-by: Matthew Wilcox Signed-off-by: David Howells Reviewed-by: Jeff Layton Cc: Chao Yu Cc: Yue Hu Cc: Jeffle Xu Cc: linux-erofs@lists.ozlabs.org Cc: linux-fsdevel@vger.kernel.org Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240115144635.1931422-1-hsiangkao@linux.alibaba.com Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- fs/erofs/fscache.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c index afc37c9029ce..943ce796c0fc 100644 --- a/fs/erofs/fscache.c +++ b/fs/erofs/fscache.c @@ -178,10 +178,10 @@ static int erofs_fscache_read_folios_async(struct fscache_cookie *cookie, static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio) { int ret; - struct erofs_fscache *ctx = folio_mapping(folio)->host->i_private; + struct erofs_fscache *ctx = folio->mapping->host->i_private; struct erofs_fscache_request *req; - req = erofs_fscache_req_alloc(folio_mapping(folio), + req = erofs_fscache_req_alloc(folio->mapping, folio_pos(folio), folio_size(folio)); if (IS_ERR(req)) { folio_unlock(folio); @@ -289,7 +289,7 @@ static int erofs_fscache_read_folio(struct file *file, struct folio *folio) struct erofs_fscache_request *req; int ret; - req = erofs_fscache_req_alloc(folio_mapping(folio), + req = erofs_fscache_req_alloc(folio->mapping, folio_pos(folio), folio_size(folio)); if (IS_ERR(req)) { folio_unlock(folio); -- Gitee From 190a834c43d2ca6f68ba80023d431ece1a773769 Mon Sep 17 00:00:00 2001 From: Jingbo Xu Date: Wed, 24 Jan 2024 11:19:45 +0800 Subject: [PATCH 0228/2138] erofs: get rid of unneeded GFP_NOFS ANBZ: #8524 commit 97cf5d53b4812dcb52c13fda700dad5aa8d3446c upstream. Clean up some leftovers since there is no way for EROFS to be called again from a reclaim context. Signed-off-by: Jingbo Xu Reviewed-by: Gao Xiang Link: https://lore.kernel.org/r/20240124031945.130782-1-jefflexu@linux.alibaba.com Signed-off-by: Gao Xiang Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- fs/erofs/fscache.c | 2 +- fs/erofs/inode.c | 2 +- fs/erofs/utils.c | 2 +- fs/erofs/zdata.c | 8 ++++---- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c index 943ce796c0fc..122a4753ecea 100644 --- a/fs/erofs/fscache.c +++ b/fs/erofs/fscache.c @@ -473,7 +473,7 @@ static struct erofs_fscache *erofs_fscache_acquire_cookie(struct super_block *sb inode->i_size = OFFSET_MAX; inode->i_mapping->a_ops = &erofs_fscache_meta_aops; - mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); + mapping_set_gfp_mask(inode->i_mapping, GFP_KERNEL); inode->i_blkbits = EROFS_SB(sb)->blkszbits; inode->i_private = ctx; diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c index 9243a0cb6daf..5372dbc27e2c 100644 --- a/fs/erofs/inode.c +++ b/fs/erofs/inode.c @@ -60,7 +60,7 @@ static void *erofs_read_inode(struct erofs_buf *buf, } else { const unsigned int gotten = sb->s_blocksize - *ofs; - copied = kmalloc(vi->inode_isize, GFP_NOFS); + copied = kmalloc(vi->inode_isize, GFP_KERNEL); if (!copied) { err = -ENOMEM; goto err_out; diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c index 4256a85719a1..603ded4db58e 100644 --- a/fs/erofs/utils.c +++ b/fs/erofs/utils.c @@ -81,7 +81,7 @@ struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb, repeat: xa_lock(&sbi->managed_pslots); pre = __xa_cmpxchg(&sbi->managed_pslots, grp->index, - NULL, grp, GFP_NOFS); + NULL, grp, GFP_KERNEL); if (pre) { if (xa_is_err(pre)) { pre = ERR_PTR(xa_err(pre)); diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 692c0c39be63..583c062cd0e4 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -230,7 +230,7 @@ static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter, struct page *nextpage = *candidate_bvpage; if (!nextpage) { - nextpage = erofs_allocpage(pagepool, GFP_NOFS); + nextpage = erofs_allocpage(pagepool, GFP_KERNEL); if (!nextpage) return -ENOMEM; set_page_private(nextpage, Z_EROFS_SHORTLIVED_PAGE); @@ -302,7 +302,7 @@ static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int size) if (nrpages > pcs->maxpages) continue; - pcl = kmem_cache_zalloc(pcs->slab, GFP_NOFS); + pcl = kmem_cache_zalloc(pcs->slab, GFP_KERNEL); if (!pcl) return ERR_PTR(-ENOMEM); pcl->pclustersize = size; @@ -694,7 +694,7 @@ static void z_erofs_cache_invalidate_folio(struct folio *folio, DBG_BUGON(stop > folio_size(folio) || stop < length); if (offset == 0 && stop == folio_size(folio)) - while (!z_erofs_cache_release_folio(folio, GFP_NOFS)) + while (!z_erofs_cache_release_folio(folio, 0)) cond_resched(); } @@ -713,7 +713,7 @@ int erofs_init_managed_cache(struct super_block *sb) set_nlink(inode, 1); inode->i_size = OFFSET_MAX; inode->i_mapping->a_ops = &z_erofs_cache_aops; - mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); + mapping_set_gfp_mask(inode->i_mapping, GFP_KERNEL); EROFS_SB(sb)->managed_cache = inode; return 0; } -- Gitee From d435b3f33a2ff394946fe4d852d41d97bb62d65c Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Thu, 25 Jan 2024 20:00:39 +0800 Subject: [PATCH 0229/2138] erofs: fix infinite loop due to a race of filling compressed_bvecs ANBZ: #8524 commit cc4b2dd95f0d1eba8c691b36e8f4d1795582f1ff upstream. I encountered a race issue after lengthy (~594647 secs) stress tests on a 64k-page arm64 VM with several 4k-block EROFS images. The timing is like below: z_erofs_try_inplace_io z_erofs_fill_bio_vec cmpxchg(&compressed_bvecs[].page, NULL, ..) [access bufvec] compressed_bvecs[] = *bvec; Previously, z_erofs_submit_queue() just accessed bufvec->page only, so other fields in bufvec didn't matter. After the subpage block support is landed, .offset and .end can be used too, but filling bufvec isn't an atomic operation which can cause inconsistency. Let's use a spinlock to keep the atomicity of each bufvec. More specifically, just reuse the existing spinlock `pcl->obj.lockref.lock` since it's rarely used (also it takes a short time if even used) as long as the pcluster has a reference. Fixes: 192351616a9d ("erofs: support I/O submission for sub-page compressed blocks") Signed-off-by: Gao Xiang Reviewed-by: Yue Hu Reviewed-by: Sandeep Dhavale Link: https://lore.kernel.org/r/20240125120039.3228103-1-hsiangkao@linux.alibaba.com Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- fs/erofs/zdata.c | 74 +++++++++++++++++++++++++----------------------- 1 file changed, 38 insertions(+), 36 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 583c062cd0e4..c1c77166b30f 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -563,21 +563,19 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; unsigned int i; - if (i_blocksize(fe->inode) != PAGE_SIZE) - return; - if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED) + if (i_blocksize(fe->inode) != PAGE_SIZE || + fe->mode < Z_EROFS_PCLUSTER_FOLLOWED) return; for (i = 0; i < pclusterpages; ++i) { struct page *page, *newpage; void *t; /* mark pages just found for debugging */ - /* the compressed page was loaded before */ + /* Inaccurate check w/o locking to avoid unneeded lookups */ if (READ_ONCE(pcl->compressed_bvecs[i].page)) continue; page = find_get_page(mc, pcl->obj.index + i); - if (page) { t = (void *)((unsigned long)page | 1); newpage = NULL; @@ -597,9 +595,13 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE); t = (void *)((unsigned long)newpage | 1); } - - if (!cmpxchg_relaxed(&pcl->compressed_bvecs[i].page, NULL, t)) + spin_lock(&pcl->obj.lockref.lock); + if (!pcl->compressed_bvecs[i].page) { + pcl->compressed_bvecs[i].page = t; + spin_unlock(&pcl->obj.lockref.lock); continue; + } + spin_unlock(&pcl->obj.lockref.lock); if (page) put_page(page); @@ -718,31 +720,25 @@ int erofs_init_managed_cache(struct super_block *sb) return 0; } -static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe, - struct z_erofs_bvec *bvec) -{ - struct z_erofs_pcluster *const pcl = fe->pcl; - - while (fe->icur > 0) { - if (!cmpxchg(&pcl->compressed_bvecs[--fe->icur].page, - NULL, bvec->page)) { - pcl->compressed_bvecs[fe->icur] = *bvec; - return true; - } - } - return false; -} - /* callers must be with pcluster lock held */ static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe, struct z_erofs_bvec *bvec, bool exclusive) { + struct z_erofs_pcluster *pcl = fe->pcl; int ret; if (exclusive) { /* give priority for inplaceio to use file pages first */ - if (z_erofs_try_inplace_io(fe, bvec)) + spin_lock(&pcl->obj.lockref.lock); + while (fe->icur > 0) { + if (pcl->compressed_bvecs[--fe->icur].page) + continue; + pcl->compressed_bvecs[fe->icur] = *bvec; + spin_unlock(&pcl->obj.lockref.lock); return 0; + } + spin_unlock(&pcl->obj.lockref.lock); + /* otherwise, check if it can be used as a bvpage */ if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED && !fe->candidate_bvpage) @@ -1423,23 +1419,26 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, { gfp_t gfp = mapping_gfp_mask(mc); bool tocache = false; - struct z_erofs_bvec *zbv = pcl->compressed_bvecs + nr; + struct z_erofs_bvec zbv; struct address_space *mapping; - struct page *page, *oldpage; + struct page *page; int justfound, bs = i_blocksize(f->inode); /* Except for inplace pages, the entire page can be used for I/Os */ bvec->bv_offset = 0; bvec->bv_len = PAGE_SIZE; repeat: - oldpage = READ_ONCE(zbv->page); - if (!oldpage) + spin_lock(&pcl->obj.lockref.lock); + zbv = pcl->compressed_bvecs[nr]; + page = zbv.page; + justfound = (unsigned long)page & 1UL; + page = (struct page *)((unsigned long)page & ~1UL); + pcl->compressed_bvecs[nr].page = page; + spin_unlock(&pcl->obj.lockref.lock); + if (!page) goto out_allocpage; - justfound = (unsigned long)oldpage & 1UL; - page = (struct page *)((unsigned long)oldpage & ~1UL); bvec->bv_page = page; - DBG_BUGON(z_erofs_is_shortlived_page(page)); /* * Handle preallocated cached pages. We tried to allocate such pages @@ -1448,7 +1447,6 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, */ if (page->private == Z_EROFS_PREALLOCATED_PAGE) { set_page_private(page, 0); - WRITE_ONCE(zbv->page, page); tocache = true; goto out_tocache; } @@ -1459,9 +1457,9 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, * therefore it is impossible for `mapping` to be NULL. */ if (mapping && mapping != mc) { - if (zbv->offset < 0) - bvec->bv_offset = round_up(-zbv->offset, bs); - bvec->bv_len = round_up(zbv->end, bs) - bvec->bv_offset; + if (zbv.offset < 0) + bvec->bv_offset = round_up(-zbv.offset, bs); + bvec->bv_len = round_up(zbv.end, bs) - bvec->bv_offset; return; } @@ -1471,7 +1469,6 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, /* the cached page is still in managed cache */ if (page->mapping == mc) { - WRITE_ONCE(zbv->page, page); /* * The cached page is still available but without a valid * `->private` pcluster hint. Let's reconnect them. @@ -1503,11 +1500,15 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, put_page(page); out_allocpage: page = erofs_allocpage(&f->pagepool, gfp | __GFP_NOFAIL); - if (oldpage != cmpxchg(&zbv->page, oldpage, page)) { + spin_lock(&pcl->obj.lockref.lock); + if (pcl->compressed_bvecs[nr].page) { erofs_pagepool_add(&f->pagepool, page); + spin_unlock(&pcl->obj.lockref.lock); cond_resched(); goto repeat; } + pcl->compressed_bvecs[nr].page = page; + spin_unlock(&pcl->obj.lockref.lock); bvec->bv_page = page; out_tocache: if (!tocache || bs != PAGE_SIZE || @@ -1685,6 +1686,7 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, if (cur + bvec.bv_len > end) bvec.bv_len = end - cur; + DBG_BUGON(bvec.bv_len < sb->s_blocksize); if (!bio_add_page(bio, bvec.bv_page, bvec.bv_len, bvec.bv_offset)) goto submit_bio_retry; -- Gitee From c9e4fb223be6bfe30052fd40f33716b98f0c28b7 Mon Sep 17 00:00:00 2001 From: Chunhai Guo Date: Fri, 26 Jan 2024 22:01:42 +0800 Subject: [PATCH 0230/2138] erofs: relaxed temporary buffers allocation on readahead ANBZ: #8524 commit d9281660ff3ffb4a05302b485cc59a87e709aefc upstream. Even with inplace decompression, sometimes very few temporary buffers may be still needed for a single decompression shot (e.g. 16 pages for 64k sliding window or 4 pages for 16k sliding window). In low-memory scenarios, it would be better to try to allocate with GFP_NOWAIT on readahead first. That can help reduce the time spent on page allocation under durative memory pressure. Here are detailed performance numbers under multi-app launch benchmark workload [1] on ARM64 Android devices (8-core CPU and 8GB of memory) running a 5.15 LTS kernel with EROFS of 4k pclusters: +----------------------------------------------+ | LZ4 | vanilla | patched | diff | |----------------+---------+---------+---------| | Average (ms) | 3364 | 2684 | -20.21% | [64k sliding window] |----------------+---------+---------+---------| | Average (ms) | 2079 | 1610 | -22.56% | [16k sliding window] +----------------------------------------------+ The total size of system images for 4k pclusters is almost unchanged: (64k sliding window) 9,117,044 KB (16k sliding window) 9,113,096 KB Therefore, in addition to switch the sliding window from 64k to 16k, after applying this patch, it can eventually save 52.14% (3364 -> 1610) on average with no memory reservation. That is particularly useful for embedded devices with limited resources. [1] https://lore.kernel.org/r/20240109074143.4138783-1-guochunhai@vivo.com Suggested-by: Gao Xiang Signed-off-by: Chunhai Guo Signed-off-by: Gao Xiang Reviewed-by: Yue Hu Link: https://lore.kernel.org/r/20240126140142.201718-1-hsiangkao@linux.alibaba.com Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- fs/erofs/compress.h | 5 ++--- fs/erofs/decompressor.c | 5 +++-- fs/erofs/decompressor_deflate.c | 19 +++++++++++++------ fs/erofs/decompressor_lzma.c | 17 ++++++++++++----- fs/erofs/zdata.c | 16 ++++++++++++---- 5 files changed, 42 insertions(+), 20 deletions(-) diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h index 279933e007d2..7cc5841577b2 100644 --- a/fs/erofs/compress.h +++ b/fs/erofs/compress.h @@ -11,13 +11,12 @@ struct z_erofs_decompress_req { struct super_block *sb; struct page **in, **out; - unsigned short pageofs_in, pageofs_out; unsigned int inputsize, outputsize; - /* indicate the algorithm will be used for decompression */ - unsigned int alg; + unsigned int alg; /* the algorithm for decompression */ bool inplace_io, partial_decoding, fillgaps; + gfp_t gfp; /* allocation flags for extra temporary buffers */ }; struct z_erofs_decompressor { diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index 662d550256cf..5203f399bb4e 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -111,8 +111,9 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx, victim = availables[--top]; get_page(victim); } else { - victim = erofs_allocpage(pagepool, - GFP_KERNEL | __GFP_NOFAIL); + victim = erofs_allocpage(pagepool, rq->gfp); + if (!victim) + return -ENOMEM; set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE); } rq->out[i] = victim; diff --git a/fs/erofs/decompressor_deflate.c b/fs/erofs/decompressor_deflate.c index b7064a0ed3b6..26350c5b040e 100644 --- a/fs/erofs/decompressor_deflate.c +++ b/fs/erofs/decompressor_deflate.c @@ -98,7 +98,7 @@ int z_erofs_load_deflate_config(struct super_block *sb, } int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, - struct page **pagepool) + struct page **pgpl) { const unsigned int nrpages_out = PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; @@ -161,8 +161,12 @@ int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, strm->z.avail_out = min_t(u32, outsz, PAGE_SIZE - pofs); outsz -= strm->z.avail_out; if (!rq->out[no]) { - rq->out[no] = erofs_allocpage(pagepool, - GFP_KERNEL | __GFP_NOFAIL); + rq->out[no] = erofs_allocpage(pgpl, rq->gfp); + if (!rq->out[no]) { + kout = NULL; + err = -ENOMEM; + break; + } set_page_private(rq->out[no], Z_EROFS_SHORTLIVED_PAGE); } @@ -214,8 +218,11 @@ int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, DBG_BUGON(erofs_page_is_managed(EROFS_SB(sb), rq->in[j])); - tmppage = erofs_allocpage(pagepool, - GFP_KERNEL | __GFP_NOFAIL); + tmppage = erofs_allocpage(pgpl, rq->gfp); + if (!tmppage) { + err = -ENOMEM; + goto failed; + } set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE); copy_highpage(tmppage, rq->in[j]); rq->in[j] = tmppage; @@ -233,7 +240,7 @@ int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, break; } } - +failed: if (zlib_inflateEnd(&strm->z) != Z_OK && !err) err = -EIO; if (kout) diff --git a/fs/erofs/decompressor_lzma.c b/fs/erofs/decompressor_lzma.c index 2dd14f99c1dc..6ca357d83cfa 100644 --- a/fs/erofs/decompressor_lzma.c +++ b/fs/erofs/decompressor_lzma.c @@ -148,7 +148,7 @@ int z_erofs_load_lzma_config(struct super_block *sb, } int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, - struct page **pagepool) + struct page **pgpl) { const unsigned int nrpages_out = PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; @@ -215,8 +215,11 @@ int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, PAGE_SIZE - pageofs); outlen -= strm->buf.out_size; if (!rq->out[no] && rq->fillgaps) { /* deduped */ - rq->out[no] = erofs_allocpage(pagepool, - GFP_KERNEL | __GFP_NOFAIL); + rq->out[no] = erofs_allocpage(pgpl, rq->gfp); + if (!rq->out[no]) { + err = -ENOMEM; + break; + } set_page_private(rq->out[no], Z_EROFS_SHORTLIVED_PAGE); } @@ -258,8 +261,11 @@ int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, DBG_BUGON(erofs_page_is_managed(EROFS_SB(rq->sb), rq->in[j])); - tmppage = erofs_allocpage(pagepool, - GFP_KERNEL | __GFP_NOFAIL); + tmppage = erofs_allocpage(pgpl, rq->gfp); + if (!tmppage) { + err = -ENOMEM; + goto failed; + } set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE); copy_highpage(tmppage, rq->in[j]); rq->in[j] = tmppage; @@ -277,6 +283,7 @@ int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, break; } } +failed: if (no < nrpages_out && strm->buf.out) kunmap(rq->out[no]); if (ni < nrpages_in) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index c1c77166b30f..ff0aa72b0db3 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -82,6 +82,9 @@ struct z_erofs_pcluster { /* L: indicate several pageofs_outs or not */ bool multibases; + /* L: whether extra buffer allocations are best-effort */ + bool besteffort; + /* A: compressed bvecs (can be cached or inplaced pages) */ struct z_erofs_bvec compressed_bvecs[]; }; @@ -960,7 +963,7 @@ static int z_erofs_read_fragment(struct super_block *sb, struct page *page, } static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, - struct page *page) + struct page *page, bool ra) { struct inode *const inode = fe->inode; struct erofs_map_blocks *const map = &fe->map; @@ -1010,6 +1013,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, err = z_erofs_pcluster_begin(fe); if (err) goto out; + fe->pcl->besteffort |= !ra; } /* @@ -1276,6 +1280,9 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, .inplace_io = overlapped, .partial_decoding = pcl->partial, .fillgaps = pcl->multibases, + .gfp = pcl->besteffort ? + GFP_KERNEL | __GFP_NOFAIL : + GFP_NOWAIT | __GFP_NORETRY }, be->pagepool); /* must handle all compressed pages before actual file pages */ @@ -1318,6 +1325,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, pcl->length = 0; pcl->partial = true; pcl->multibases = false; + pcl->besteffort = false; pcl->bvset.nextpage = NULL; pcl->vcnt = 0; @@ -1787,7 +1795,7 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f, if (PageUptodate(page)) unlock_page(page); else - (void)z_erofs_do_read_page(f, page); + (void)z_erofs_do_read_page(f, page, !!rac); put_page(page); } @@ -1808,7 +1816,7 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio) f.headoffset = (erofs_off_t)folio->index << PAGE_SHIFT; z_erofs_pcluster_readmore(&f, NULL, true); - err = z_erofs_do_read_page(&f, &folio->page); + err = z_erofs_do_read_page(&f, &folio->page, false); z_erofs_pcluster_readmore(&f, NULL, false); z_erofs_pcluster_end(&f); @@ -1849,7 +1857,7 @@ static void z_erofs_readahead(struct readahead_control *rac) folio = head; head = folio_get_private(folio); - err = z_erofs_do_read_page(&f, &folio->page); + err = z_erofs_do_read_page(&f, &folio->page, true); if (err && err != -EINTR) erofs_err(inode->i_sb, "readahead error at folio %lu @ nid %llu", folio->index, EROFS_I(inode)->nid); -- Gitee From 947829a64f644196f4fac339492d75dcff3d9bf0 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Mon, 4 Mar 2024 11:53:39 +0800 Subject: [PATCH 0231/2138] erofs: fix uninitialized page cache reported by KMSAN ANBZ: #8524 commit 893e5e9b7369a02e7ceaa6d98db6739162005b03 upstream. syzbot reports a KMSAN reproducer [1] which generates a crafted filesystem image and causes IMA to read uninitialized page cache. Later, (rq->outputsize > rq->inputsize) will be formally supported after either large uncompressed pclusters (> block size) or big lclusters are landed. However, currently there is no way to generate such filesystems by using mkfs.erofs. Thus, let's mark this condition as unsupported for now. [1] https://lore.kernel.org/r/0000000000002be12a0611ca7ff8@google.com Reported-and-tested-by: syzbot+7bc44a489f0ef0670bd5@syzkaller.appspotmail.com Fixes: 1ca01520148a ("erofs: refine z_erofs_transform_plain() for sub-page block support") Reviewed-by: Sandeep Dhavale Reviewed-by: Yue Hu Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240304035339.425857-1-hsiangkao@linux.alibaba.com Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2880 --- fs/erofs/decompressor.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index 5203f399bb4e..fce41d4875bf 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -323,7 +323,8 @@ static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq, unsigned int cur = 0, ni = 0, no, pi, po, insz, cnt; u8 *kin; - DBG_BUGON(rq->outputsize > rq->inputsize); + if (rq->outputsize > rq->inputsize) + return -EOPNOTSUPP; if (rq->alg == Z_EROFS_COMPRESSION_INTERLACED) { cur = bs - (rq->pageofs_out & (bs - 1)); pi = (rq->pageofs_in + rq->inputsize - cur) & ~PAGE_MASK; -- Gitee From 09f21ba60107fc6587c0365e28bd3b7b2aa51dc1 Mon Sep 17 00:00:00 2001 From: Xu Yu Date: Tue, 31 Jan 2023 17:33:28 +0800 Subject: [PATCH 0232/2138] anolis: mm: introduce vm_insert_page(s)_mkspecial ANBZ: #8525 This adds the ability to insert anonymous pages or file pages, used for direct IO or buffer IO respectively, to a user VM. The intention behind this is to facilitate mapping pages in IO requests to user space, which is usually the backend of remote block device. This integrates the advantage of vm_insert_pages (batching the pmd lock), and eliminates the overhead of remap_pfn_range (track_pfn_remap), since the pages to be inserted should always be ram. NOTE that it is the caller's responsibility to ensure the validity of pages to be inserted, i.e., that such pages are used for IO requests. Depending on this premise, such pages can be inserted as special PTE, without increasing the page refcount and mapcount. On the other hand, the special mapping should be carefully managed (e.g., zapped) when the IO request is done. Signed-off-by: Xu Yu Reviewed-by: Gang Deng Acked-by: Joseph Qi Signed-off-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2876 --- include/linux/mm.h | 4 ++ mm/memory.c | 170 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 174 insertions(+) diff --git a/include/linux/mm.h b/include/linux/mm.h index b6a4d6471b4a..45d605c57bad 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3467,6 +3467,10 @@ int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, struct page **pages, unsigned long *num); +int vm_insert_page_mkspecial(struct vm_area_struct *vma, unsigned long addr, + struct page *page); +int vm_insert_pages_mkspecial(struct vm_area_struct *vma, unsigned long addr, + struct page **pages, unsigned long *num); int vm_map_pages(struct vm_area_struct *vma, struct page **pages, unsigned long num); int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, diff --git a/mm/memory.c b/mm/memory.c index 742c2f65c2c8..bef28bd53831 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2575,6 +2575,176 @@ int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long } EXPORT_SYMBOL(vm_iomap_memory); +#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL +static int insert_page_into_pte_locked_mkspecial(struct mm_struct *mm, pte_t *pte, + unsigned long addr, struct page *page, pgprot_t prot) +{ + /* + * The page to be inserted should be either anonymous page or file page. + * + * In general, the anonymous page used in dio should be pinned, while + * the file page used in buffer IO is either locked (read) or writeback + * (sync). On the other hand, file page used in IO metadata read (e.g., + * ext4_get_inode_loc) can be unlocked, and the buffer_head is locked + * instead. + * + * Finally, it is the caller's responsibility to ensure the validity of + * pages to be inserted, i.e., such pages are used for IO requests. + */ + if (!PageAnon(page) && !page_is_file_lru(page)) + return -EINVAL; + + flush_dcache_page(page); + + if (!pte_none(*pte)) + return -EBUSY; + set_pte_at(mm, addr, pte, pte_mkspecial(mk_pte(page, prot))); + return 0; +} + +static int insert_page_mkspecial(struct vm_area_struct *vma, unsigned long addr, + struct page *page, pgprot_t prot) +{ + struct mm_struct *mm = vma->vm_mm; + int retval; + pte_t *pte; + spinlock_t *ptl; + + retval = -ENOMEM; + pte = get_locked_pte(mm, addr, &ptl); + if (!pte) + goto out; + retval = insert_page_into_pte_locked_mkspecial(mm, pte, addr, page, prot); + pte_unmap_unlock(pte, ptl); +out: + return retval; +} + +int vm_insert_page_mkspecial(struct vm_area_struct *vma, unsigned long addr, struct page *page) +{ + if (addr < vma->vm_start || addr >= vma->vm_end) + return -EFAULT; + if (!(vma->vm_flags & VM_MIXEDMAP)) { + BUG_ON(mmap_read_trylock(vma->vm_mm)); + BUG_ON(vma->vm_flags & VM_PFNMAP); + vm_flags_set(vma, VM_MIXEDMAP); + } + return insert_page_mkspecial(vma, addr, page, vma->vm_page_prot); +} +EXPORT_SYMBOL(vm_insert_page_mkspecial); + +#ifdef pte_index +/* + * insert_pages_mkspecial() amortizes the cost of spinlock operations + * when inserting pages in a loop. Arch *must* define pte_index. + */ +static int insert_pages_mkspecial(struct vm_area_struct *vma, unsigned long addr, + struct page **pages, unsigned long *num, pgprot_t prot) +{ + pmd_t *pmd = NULL; + pte_t *start_pte, *pte; + spinlock_t *pte_lock; + struct mm_struct *const mm = vma->vm_mm; + unsigned long curr_page_idx = 0; + unsigned long remaining_pages_total = *num; + unsigned long pages_to_write_in_pmd; + int ret; +more: + ret = -EFAULT; + pmd = walk_to_pmd(mm, addr); + if (!pmd) + goto out; + + pages_to_write_in_pmd = min_t(unsigned long, + remaining_pages_total, PTRS_PER_PTE - pte_index(addr)); + + /* Allocate the PTE if necessary; takes PMD lock once only. */ + ret = -ENOMEM; + if (pte_alloc(mm, pmd)) + goto out; + + while (pages_to_write_in_pmd) { + int pte_idx = 0; + const int batch_size = min_t(int, pages_to_write_in_pmd, 8); + + start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock); + for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) { + int err = insert_page_into_pte_locked_mkspecial(mm, pte, + addr, pages[curr_page_idx], prot); + if (unlikely(err)) { + pte_unmap_unlock(start_pte, pte_lock); + ret = err; + remaining_pages_total -= pte_idx; + goto out; + } + addr += PAGE_SIZE; + ++curr_page_idx; + } + pte_unmap_unlock(start_pte, pte_lock); + pages_to_write_in_pmd -= batch_size; + remaining_pages_total -= batch_size; + } + if (remaining_pages_total) + goto more; + ret = 0; +out: + *num = remaining_pages_total; + return ret; +} +#endif /* pte_index */ + +/* + * vm_insert_pages_mkspecial - variant of vm_insert_pages using insert_pfn. + * + * The main purpose of vm_insert_pages_mkspecial is to combine the advantages of + * vm_insert_pages (batching the pmd lock) and remap_pfn_range_notrack (skipping + * track_pfn_insert). + * + * The caller should ensure the isolation (refcounted, PG_locked, PG_writeback, etc.) + * of @pages, and account for error case where a subset of @pages are mapped. + */ +int vm_insert_pages_mkspecial(struct vm_area_struct *vma, unsigned long addr, + struct page **pages, unsigned long *num) +{ +#ifdef pte_index + const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1; + + if (addr < vma->vm_start || end_addr >= vma->vm_end) + return -EFAULT; + if (!(vma->vm_flags & VM_MIXEDMAP)) { + BUG_ON(mmap_read_trylock(vma->vm_mm)); + BUG_ON(vma->vm_flags & VM_PFNMAP); + vm_flags_set(vma, VM_MIXEDMAP); + } + return insert_pages_mkspecial(vma, addr, pages, num, vma->vm_page_prot); +#else + unsigned long idx = 0, pgcount = *num; + int err = -EINVAL; + + for (; idx < pgcount; ++idx) { + err = vm_insert_page_mkspecial(vma, addr + (PAGE_SIZE * idx), pages[idx]); + if (err) + break; + } + *num = pgcount - idx; + return err; +#endif /* pte_index */ +} +EXPORT_SYMBOL(vm_insert_pages_mkspecial); +#else +int vm_insert_page_mkspecial(struct vm_area_struct *vma, unsigned long addr, struct page *page) +{ + return -EINVAL; +} +EXPORT_SYMBOL(vm_insert_page_mkspecial); +int vm_insert_pages_mkspecial(struct vm_area_struct *vma, unsigned long addr, + struct page **pages, unsigned long *num) +{ + return -EINVAL; +} +EXPORT_SYMBOL(vm_insert_pages_mkspecial); +#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ + static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, pte_fn_t fn, void *data, bool create, -- Gitee From 2ca2937b615d713ec981f9eec0115209b047cd02 Mon Sep 17 00:00:00 2001 From: Guixin Liu Date: Mon, 14 Feb 2022 15:38:29 +0800 Subject: [PATCH 0233/2138] anolis: uio: add ioctl to uio ANBZ: #8525 In TCMU, if backstore holds its own userspace buffer, for read cmd, the data needs to be copied from userspace buffer to tcmu data area first, and then needs to be copied from tcmu data area to scsi sgl pages again. To solve this problem, add ioctl to uio to let userspace backstore can copy data between scsi sgl pages and its own buffer directly. Reviewed-by: Joseph Qi Reviewed-by: Xiaoguang Wang Signed-off-by: Guixin Liu Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2876 --- drivers/uio/uio.c | 21 +++++++++++++++++++++ include/linux/uio_driver.h | 1 + 2 files changed, 22 insertions(+) diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index 2d572f6c8ec8..caf9caa4ee73 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -815,6 +815,25 @@ static int uio_mmap(struct file *filep, struct vm_area_struct *vma) return ret; } +static long uio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct uio_listener *listener = filp->private_data; + struct uio_device *idev = listener->dev; + long retval = 0; + + mutex_lock(&idev->info_lock); + + if (!idev->info || !idev->info->ioctl) { + retval = -EINVAL; + goto out; + } + + retval = idev->info->ioctl(idev->info, cmd, arg); +out: + mutex_unlock(&idev->info_lock); + return retval; +} + static const struct file_operations uio_fops = { .owner = THIS_MODULE, .open = uio_open, @@ -825,6 +844,8 @@ static const struct file_operations uio_fops = { .poll = uio_poll, .fasync = uio_fasync, .llseek = noop_llseek, + .unlocked_ioctl = uio_ioctl, + .compat_ioctl = uio_ioctl, }; static int uio_major_init(void) diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h index 47c5962b876b..971d172b442f 100644 --- a/include/linux/uio_driver.h +++ b/include/linux/uio_driver.h @@ -109,6 +109,7 @@ struct uio_info { int (*open)(struct uio_info *info, struct inode *inode); int (*release)(struct uio_info *info, struct inode *inode); int (*irqcontrol)(struct uio_info *info, s32 irq_on); + long (*ioctl)(struct uio_info *info, unsigned int cmd, unsigned long arg); }; extern int __must_check -- Gitee From 273eeb3f56c0dd16f701591ecf5747ebb4fb8ff5 Mon Sep 17 00:00:00 2001 From: Guixin Liu Date: Mon, 14 Feb 2022 15:59:11 +0800 Subject: [PATCH 0234/2138] anolis: scsi:target: reduce one copy by using uio ioctl ANBZ: #8525 Currently there are two copies between sg, tcmu data area and userspace buffer, if the backstore holds its own userspace buffer, we can use uio ioctl to copy between sg and userspace buffer directly to improve performance. Use tcm_loop and tcmu(backstore is file) to evaluate performance, fio job: fio -filename=/dev/sdb -ioengine=libaio -direct=1 -size=2G -name=1 -thread -runtime=60 -time_based -rw=randread -numjobs=16 -iodepth=16 -bs=128k Without this patch: READ: bw=2511MiB/s (2633MB/s), 154MiB/s-158MiB/s (162MB/s-166MB/s), io=147GiB (158GB), run=60006-60008msec With this patch: READ: bw=2965MiB/s (3110MB/s), 183MiB/s-188MiB/s (192MB/s-197MB/s), io=174GiB (187GB), run=60005-60007msec There is about a 20% performance improvement in this case. Reviewed-by: Joseph Qi Reviewed-by: Xiaoguang Wang Signed-off-by: Guixin Liu Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2876 --- drivers/target/target_core_user.c | 189 +++++++++++++++++++++++--- include/uapi/linux/target_core_user.h | 10 ++ 2 files changed, 177 insertions(+), 22 deletions(-) diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 2e100b76914a..20f83a4f6dd4 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -123,6 +123,7 @@ struct tcmu_dev { #define TCMU_DEV_BIT_BLOCKED 2 #define TCMU_DEV_BIT_TMR_NOTIFY 3 #define TCMU_DEV_BIT_PLUGGED 4 +#define TCMU_DEV_BIT_BYPASS_DATA_AREA 5 unsigned long flags; struct uio_info uio_info; @@ -644,12 +645,17 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) tcmu_cmd->se_cmd = se_cmd; tcmu_cmd->tcmu_dev = udev; - tcmu_cmd_set_block_cnts(tcmu_cmd); - tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t), - GFP_NOIO); - if (!tcmu_cmd->dbi) { - kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); - return NULL; + if (!test_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags)) { + tcmu_cmd_set_block_cnts(tcmu_cmd); + tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t), + GFP_NOIO); + if (!tcmu_cmd->dbi) { + kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); + return NULL; + } + } else { + tcmu_cmd->dbi_cnt = 0; + tcmu_cmd->dbi = NULL; } return tcmu_cmd; @@ -1095,16 +1101,19 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) tcmu_cmd_reset_dbi_cur(tcmu_cmd); iov = &entry->req.iov[0]; - if (se_cmd->data_direction == DMA_TO_DEVICE || - se_cmd->se_cmd_flags & SCF_BIDI) - scatter_data_area(udev, tcmu_cmd, &iov); - else - tcmu_setup_iovs(udev, tcmu_cmd, &iov, se_cmd->data_length); + if (!test_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags)) { + if (se_cmd->data_direction == DMA_TO_DEVICE || + se_cmd->se_cmd_flags & SCF_BIDI) + scatter_data_area(udev, tcmu_cmd, &iov); + else + tcmu_setup_iovs(udev, tcmu_cmd, &iov, se_cmd->data_length); + } entry->req.iov_cnt = iov_cnt - iov_bidi_cnt; /* Handle BIDI commands */ - if (se_cmd->se_cmd_flags & SCF_BIDI) { + if ((se_cmd->se_cmd_flags & SCF_BIDI) + && !test_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags)) { iov++; tcmu_setup_iovs(udev, tcmu_cmd, &iov, tcmu_cmd->data_len_bidi); entry->req.iov_bidi_cnt = iov_bidi_cnt; @@ -1368,16 +1377,18 @@ static bool tcmu_handle_completion(struct tcmu_cmd *cmd, else se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL; } - if (se_cmd->se_cmd_flags & SCF_BIDI) { - /* Get Data-In buffer before clean up */ - gather_data_area(udev, cmd, true, read_len); - } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { - gather_data_area(udev, cmd, false, read_len); - } else if (se_cmd->data_direction == DMA_TO_DEVICE) { - /* TODO: */ - } else if (se_cmd->data_direction != DMA_NONE) { - pr_warn("TCMU: data direction was %d!\n", - se_cmd->data_direction); + if (!test_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags)) { + if (se_cmd->se_cmd_flags & SCF_BIDI) { + /* Get Data-In buffer before clean up */ + gather_data_area(udev, cmd, true, read_len); + } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { + gather_data_area(udev, cmd, false, read_len); + } else if (se_cmd->data_direction == DMA_TO_DEVICE) { + /* TODO: */ + } else if (se_cmd->data_direction != DMA_NONE) { + pr_warn("TCMU: data direction was %d!\n", + se_cmd->data_direction); + } } done: @@ -2000,6 +2011,104 @@ static int tcmu_release(struct uio_info *info, struct inode *inode) return 0; } +static long tcmu_do_copy_data(struct tcmu_cmd *tcmu_cmd, + struct iovec __user *uiovec, + unsigned int vcnt, + bool is_copy_to_sgl) +{ + struct iovec iovstack[UIO_FASTIOV]; + struct iovec *iov = iovstack; + struct iov_iter iter; + ssize_t ret; + struct se_cmd *se_cmd = tcmu_cmd->se_cmd; + struct scatterlist *data_sg, *sg; + int i; + unsigned int data_nents; + + if (se_cmd->se_cmd_flags & SCF_BIDI) { + data_sg = se_cmd->t_bidi_data_sg; + data_nents = se_cmd->t_bidi_data_nents; + } else { + data_sg = se_cmd->t_data_sg; + data_nents = se_cmd->t_data_nents; + } + + ret = import_iovec(is_copy_to_sgl ? ITER_SOURCE : ITER_DEST, + uiovec, vcnt, ARRAY_SIZE(iovstack), &iov, &iter); + if (ret < 0) { + pr_err("import iovec failed.\n"); + return -EFAULT; + } + + for_each_sg(data_sg, sg, data_nents, i) { + if (is_copy_to_sgl) + ret = copy_page_from_iter(sg_page(sg), sg->offset, sg->length, &iter); + else + ret = copy_page_to_iter(sg_page(sg), sg->offset, sg->length, &iter); + if (ret < 0) { + pr_err("copy failed.\n"); + break; + } + } + kfree(iov); + return ret < 0 ? -EFAULT : 0; +} + +static long tcmu_bypass_data_area_copy_data(struct tcmu_dev *udev, + unsigned long arg, + bool is_copy_to_sgl) +{ + struct tcmu_data_xfer __user *uxfer = (struct tcmu_data_xfer __user *)arg; + struct tcmu_data_xfer xfer; + struct tcmu_cmd *tcmu_cmd; + long ret; + + if (!test_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags)) + return -EINVAL; + + if (copy_from_user(&xfer, uxfer, sizeof(xfer))) + return -EFAULT; + + mutex_lock(&udev->cmdr_lock); + tcmu_cmd = xa_load(&udev->commands, xfer.cmd_id); + if (!tcmu_cmd) { + pr_err("Can not find tcmu command, cmd_id:%d\n", xfer.cmd_id); + set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); + ret = -EFAULT; + goto out; + } + + if (test_bit(TCMU_CMD_BIT_EXPIRED, &tcmu_cmd->flags)) { + pr_err("Command is expired, cmd_id:%d\n", xfer.cmd_id); + ret = -EFAULT; + goto out; + } + + ret = tcmu_do_copy_data(tcmu_cmd, xfer.iovec, + xfer.iov_cnt, is_copy_to_sgl); +out: + mutex_unlock(&udev->cmdr_lock); + return ret; +} + +static long tcmu_ioctl(struct uio_info *info, unsigned int cmd, unsigned long arg) +{ + struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); + long ret; + + switch (cmd) { + case TCMU_IOCTL_CMD_COPY_TO_SGL: + ret = tcmu_bypass_data_area_copy_data(udev, arg, true); + break; + case TCMU_IOCTL_CMD_COPY_FROM_SGL: + ret = tcmu_bypass_data_area_copy_data(udev, arg, false); + break; + default: + ret = -EINVAL; + } + return ret; +} + static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd) { struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; @@ -2256,6 +2365,7 @@ static int tcmu_configure_device(struct se_device *dev) info->mmap = tcmu_mmap; info->open = tcmu_open; info->release = tcmu_release; + info->ioctl = tcmu_ioctl; ret = uio_register_device(tcmu_root_device, info); if (ret) @@ -3137,6 +3247,40 @@ static ssize_t tcmu_free_kept_buf_store(struct config_item *item, const char *pa } CONFIGFS_ATTR_WO(tcmu_, free_kept_buf); +static ssize_t tcmu_bypass_data_area_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + + if (test_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags)) + return snprintf(page, PAGE_SIZE, "%s\n", "true"); + else + return snprintf(page, PAGE_SIZE, "%s\n", "false"); +} + +static ssize_t tcmu_bypass_data_area_store(struct config_item *item, const char *page, + size_t count) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + bool bypass_data_area; + int ret; + + ret = strtobool(page, &bypass_data_area); + if (ret < 0) + return ret; + + if (bypass_data_area) + set_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags); + else + clear_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags); + + return count; +} +CONFIGFS_ATTR(tcmu_, bypass_data_area); + static struct configfs_attribute *tcmu_attrib_attrs[] = { &tcmu_attr_cmd_time_out, &tcmu_attr_qfull_time_out, @@ -3148,6 +3292,7 @@ static struct configfs_attribute *tcmu_attrib_attrs[] = { &tcmu_attr_emulate_write_cache, &tcmu_attr_tmr_notification, &tcmu_attr_nl_reply_supported, + &tcmu_attr_bypass_data_area, NULL, }; diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h index f925a77f19ed..2ce13568f196 100644 --- a/include/uapi/linux/target_core_user.h +++ b/include/uapi/linux/target_core_user.h @@ -185,4 +185,14 @@ enum tcmu_genl_attr { }; #define TCMU_ATTR_MAX (__TCMU_ATTR_MAX - 1) +struct tcmu_data_xfer { + __u16 cmd_id; + __u16 __pad1; + __u32 iov_cnt; + struct iovec __user *iovec; +}; + +#define TCMU_IOCTL_CMD_COPY_TO_SGL _IOW('T', 0xe0, struct tcmu_data_xfer) +#define TCMU_IOCTL_CMD_COPY_FROM_SGL _IOR('T', 0xe1, struct tcmu_data_xfer) + #endif -- Gitee From d9ecb6677fab7d35b98551961656a9eb40a2b4e9 Mon Sep 17 00:00:00 2001 From: Guixin Liu Date: Tue, 31 May 2022 14:33:16 +0800 Subject: [PATCH 0235/2138] anolis: scsi: target: tcmu: Introduce cmd_lock to tcmu_cmd ANBZ: #8525 Currently uio framework's info_lock and tcmu's cmdr_lock force tcmu bypass data area ioctl commands to copy data sequentially, which impacts io throughput greatly, but because tcmu timeout handler may also run in, handle and set cmd expired, then the sg pages in se_cmd can not be accessed any more, we must hold tcmu's cmdr_lock to avoid race between tcmu timeout handler and bypass data area ioctls. To improve this a bit, introduce a cmd_lock per tcmu_cmd, then multiple bypass data area ioctl commands can run concurrently(of course, uio`s info_lock needs to disappear too), also ensure there isn`t race against tcmu timeout handler too. Reviewed-by: Joseph Qi Reviewed-by: Xiaoguang Wang Signed-off-by: Guixin Liu Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2876 --- drivers/target/target_core_user.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 20f83a4f6dd4..fd13ce3ee870 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -195,6 +195,8 @@ struct tcmu_cmd { #define TCMU_CMD_BIT_EXPIRED 0 #define TCMU_CMD_BIT_KEEP_BUF 1 unsigned long flags; + + struct mutex cmd_lock; }; struct tcmu_tmr { @@ -644,6 +646,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) INIT_LIST_HEAD(&tcmu_cmd->queue_entry); tcmu_cmd->se_cmd = se_cmd; tcmu_cmd->tcmu_dev = udev; + mutex_init(&tcmu_cmd->cmd_lock); if (!test_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags)) { tcmu_cmd_set_block_cnts(tcmu_cmd); @@ -1533,11 +1536,13 @@ static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd) if (!time_after_eq(jiffies, cmd->deadline)) return; + mutex_lock(&cmd->cmd_lock); set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); list_del_init(&cmd->queue_entry); se_cmd = cmd->se_cmd; se_cmd->priv = NULL; cmd->se_cmd = NULL; + mutex_unlock(&cmd->cmd_lock); pr_debug("Timing out inflight cmd %u on dev %s.\n", cmd->cmd_id, cmd->tcmu_dev->name); @@ -2069,15 +2074,14 @@ static long tcmu_bypass_data_area_copy_data(struct tcmu_dev *udev, if (copy_from_user(&xfer, uxfer, sizeof(xfer))) return -EFAULT; - mutex_lock(&udev->cmdr_lock); tcmu_cmd = xa_load(&udev->commands, xfer.cmd_id); if (!tcmu_cmd) { pr_err("Can not find tcmu command, cmd_id:%d\n", xfer.cmd_id); set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); - ret = -EFAULT; - goto out; + return -EFAULT; } + mutex_lock(&tcmu_cmd->cmd_lock); if (test_bit(TCMU_CMD_BIT_EXPIRED, &tcmu_cmd->flags)) { pr_err("Command is expired, cmd_id:%d\n", xfer.cmd_id); ret = -EFAULT; @@ -2087,7 +2091,7 @@ static long tcmu_bypass_data_area_copy_data(struct tcmu_dev *udev, ret = tcmu_do_copy_data(tcmu_cmd, xfer.iovec, xfer.iov_cnt, is_copy_to_sgl); out: - mutex_unlock(&udev->cmdr_lock); + mutex_unlock(&tcmu_cmd->cmd_lock); return ret; } -- Gitee From 35e81694a1d8b02abb72ba5d3fc011b2e16497a9 Mon Sep 17 00:00:00 2001 From: Guixin Liu Date: Mon, 14 Feb 2022 17:15:07 +0800 Subject: [PATCH 0236/2138] anolis: uio: Replace mutex info_lock with percpu_ref to improve performance ANBZ: #8525 The mutex info_lock was introduced to fix crash after the device is unregistered in commit 57c5f4df0a5a ("uio: fix crash after the device is unregistered"), we can replace it with more powerful percpu-ref to improve performance. Use tcm_loop and tcmu(backstore is file) to evaluate performance, fio job: fio -filename=/dev/sdb -ioengine=libaio -direct=1 -size=2G -name=1 -thread -runtime=60 -time_based -rw=randread -numjobs=16 -iodepth=16 -bs=128k Without this patch: READ: bw=2965MiB/s (3110MB/s), 183MiB/s-188MiB/s (192MB/s-197MB/s), io=174GiB (187GB), run=60005-60007msec With this patch: READ: bw=5823MiB/s (6106MB/s), 338MiB/s-379MiB/s (354MB/s-397MB/s), io=341GiB (366GB), run=60002-60005msec There is about a 100% performance improvement in this case. Reviewed-by: Joseph Qi Reviewed-by: Xiaoguang Wang Signed-off-by: Guixin Liu Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2876 --- drivers/uio/uio.c | 101 +++++++++++++++++++++++++++---------- include/linux/uio_driver.h | 5 +- 2 files changed, 79 insertions(+), 27 deletions(-) diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index caf9caa4ee73..ed942097ee33 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -12,6 +12,8 @@ * Base Functions */ +#include +#include #include #include #include @@ -218,7 +220,9 @@ static ssize_t name_show(struct device *dev, struct uio_device *idev = dev_get_drvdata(dev); int ret; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) + return -EINVAL; + if (!idev->info) { ret = -EINVAL; dev_err(dev, "the device has been unregistered\n"); @@ -228,7 +232,7 @@ static ssize_t name_show(struct device *dev, ret = sprintf(buf, "%s\n", idev->info->name); out: - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); return ret; } static DEVICE_ATTR_RO(name); @@ -239,7 +243,9 @@ static ssize_t version_show(struct device *dev, struct uio_device *idev = dev_get_drvdata(dev); int ret; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) + return -EINVAL; + if (!idev->info) { ret = -EINVAL; dev_err(dev, "the device has been unregistered\n"); @@ -249,7 +255,7 @@ static ssize_t version_show(struct device *dev, ret = sprintf(buf, "%s\n", idev->info->version); out: - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); return ret; } static DEVICE_ATTR_RO(version); @@ -489,16 +495,20 @@ static int uio_open(struct inode *inode, struct file *filep) listener->event_count = atomic_read(&idev->event); filep->private_data = listener; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) { + ret = -EINVAL; + goto err_infoopen; + } + if (!idev->info) { - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); ret = -EINVAL; goto err_infoopen; } if (idev->info->open) ret = idev->info->open(idev->info, inode); - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); if (ret) goto err_infoopen; @@ -531,10 +541,12 @@ static int uio_release(struct inode *inode, struct file *filep) struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) + return -EINVAL; + if (idev->info && idev->info->release) ret = idev->info->release(idev->info, inode); - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); module_put(idev->owner); kfree(listener); @@ -548,10 +560,12 @@ static __poll_t uio_poll(struct file *filep, poll_table *wait) struct uio_device *idev = listener->dev; __poll_t ret = 0; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) + return -EINVAL; + if (!idev->info || !idev->info->irq) ret = -EIO; - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); if (ret) return ret; @@ -577,13 +591,17 @@ static ssize_t uio_read(struct file *filep, char __user *buf, add_wait_queue(&idev->wait, &wait); do { - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) { + retval = -EINVAL; + break; + } + if (!idev->info || !idev->info->irq) { retval = -EIO; - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); break; } - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); set_current_state(TASK_INTERRUPTIBLE); @@ -631,7 +649,9 @@ static ssize_t uio_write(struct file *filep, const char __user *buf, if (copy_from_user(&irq_on, buf, count)) return -EFAULT; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) + return -EINVAL; + if (!idev->info) { retval = -EINVAL; goto out; @@ -650,7 +670,7 @@ static ssize_t uio_write(struct file *filep, const char __user *buf, retval = idev->info->irqcontrol(idev->info, irq_on); out: - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); return retval ? retval : sizeof(s32); } @@ -675,7 +695,9 @@ static vm_fault_t uio_vma_fault(struct vm_fault *vmf) vm_fault_t ret = 0; int mi; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) + return VM_FAULT_SIGBUS; + if (!idev->info) { ret = VM_FAULT_SIGBUS; goto out; @@ -702,8 +724,7 @@ static vm_fault_t uio_vma_fault(struct vm_fault *vmf) vmf->page = page; out: - mutex_unlock(&idev->info_lock); - + percpu_ref_put(&idev->info_ref); return ret; } @@ -772,7 +793,9 @@ static int uio_mmap(struct file *filep, struct vm_area_struct *vma) vma->vm_private_data = idev; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) + return -EINVAL; + if (!idev->info) { ret = -EINVAL; goto out; @@ -811,7 +834,7 @@ static int uio_mmap(struct file *filep, struct vm_area_struct *vma) } out: - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); return ret; } @@ -821,7 +844,8 @@ static long uio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) struct uio_device *idev = listener->dev; long retval = 0; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) + return -EINVAL; if (!idev->info || !idev->info->ioctl) { retval = -EINVAL; @@ -830,7 +854,7 @@ static long uio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retval = idev->info->ioctl(idev->info, cmd, arg); out: - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); return retval; } @@ -928,6 +952,14 @@ static void uio_device_release(struct device *dev) kfree(idev); } +static void uio_info_free(struct percpu_ref *ref) +{ + struct uio_device *idev = container_of(ref, struct uio_device, info_ref); + + complete(&idev->free_done); +} + + /** * __uio_register_device - register a new userspace IO device * @owner: module that creates the new device @@ -958,10 +990,18 @@ int __uio_register_device(struct module *owner, idev->owner = owner; idev->info = info; - mutex_init(&idev->info_lock); init_waitqueue_head(&idev->wait); atomic_set(&idev->event, 0); + ret = percpu_ref_init(&idev->info_ref, uio_info_free, 0, GFP_KERNEL); + if (ret) { + pr_err("percpu_ref init failed!\n"); + kfree(idev); + return ret; + } + init_completion(&idev->confirm_done); + init_completion(&idev->free_done); + ret = uio_get_minor(idev); if (ret) { kfree(idev); @@ -1057,6 +1097,13 @@ int __devm_uio_register_device(struct module *owner, } EXPORT_SYMBOL_GPL(__devm_uio_register_device); +static void uio_confirm_info(struct percpu_ref *ref) +{ + struct uio_device *idev = container_of(ref, struct uio_device, info_ref); + + complete(&idev->confirm_done); +} + /** * uio_unregister_device - unregister a industrial IO device * @info: UIO device capabilities @@ -1073,14 +1120,16 @@ void uio_unregister_device(struct uio_info *info) idev = info->uio_dev; minor = idev->minor; - mutex_lock(&idev->info_lock); + percpu_ref_kill_and_confirm(&idev->info_ref, uio_confirm_info); + wait_for_completion(&idev->confirm_done); + wait_for_completion(&idev->free_done); + /* now, we can set info to NULL */ uio_dev_del_attributes(idev); if (info->irq && info->irq != UIO_IRQ_CUSTOM) free_irq(info->irq, idev); idev->info = NULL; - mutex_unlock(&idev->info_lock); wake_up_interruptible(&idev->wait); kill_fasync(&idev->async_queue, SIGIO, POLL_HUP); diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h index 971d172b442f..46e2710985e6 100644 --- a/include/linux/uio_driver.h +++ b/include/linux/uio_driver.h @@ -16,6 +16,7 @@ #include #include #include +#include struct module; struct uio_map; @@ -74,9 +75,11 @@ struct uio_device { struct fasync_struct *async_queue; wait_queue_head_t wait; struct uio_info *info; - struct mutex info_lock; struct kobject *map_dir; struct kobject *portio_dir; + struct percpu_ref info_ref; + struct completion confirm_done; + struct completion free_done; }; /** -- Gitee From e75f875c36ff6c98ac44940224daac2b1694846d Mon Sep 17 00:00:00 2001 From: Xiaoguang Wang Date: Wed, 16 Mar 2022 13:40:00 +0800 Subject: [PATCH 0237/2138] anolis: mm: export zap_page_range_single() ANBZ: #8525 Module target_core_user will use it to implement zero copy feature. Reviewed-by: Guixin Liu Reviewed-by: Joseph Qi Signed-off-by: Xiaoguang Wang Signed-off-by: Guixin Liu Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2876 --- mm/memory.c | 1 + 1 file changed, 1 insertion(+) diff --git a/mm/memory.c b/mm/memory.c index bef28bd53831..2ac7d0a62c74 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1770,6 +1770,7 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, tlb_finish_mmu(&tlb); hugetlb_zap_end(vma, details); } +EXPORT_SYMBOL_GPL(zap_page_range_single); /** * zap_vma_ptes - remove ptes mapping the vma -- Gitee From 1179492ef8dc1a2df1325653008ded625df5c6b0 Mon Sep 17 00:00:00 2001 From: Xiaoguang Wang Date: Tue, 15 Mar 2022 14:40:19 +0800 Subject: [PATCH 0238/2138] anolis: scsi: target: tcmu: Support zero copy ANBZ: #8525 Currently in tcmu, for READ commands, it copies user space backstore's data buffer to tcmu internal data area, then copies data in data area to READ commands sgl pages. For WRITE commands, tcmu copies sgl pages to tcmu internal data area, then copies data in data area to user space backstore. For both cases, there are obvious copy overhead, which impact io throughput, especially for large io size. To mitigate this issue, we implement zero copy feature to tcmu, which map sgl pages to user space backstore's address space. Currently only sgl pages's offset and length are both aligned to page size, can this command go into tcmu zero copy path. Reviewed-by: Guixin Liu Reviewed-by: Joseph Qi Signed-off-by: Xiaoguang Wang Signed-off-by: Guixin Liu Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2876 --- drivers/target/target_core_user.c | 285 ++++++++++++++++++++++++-- include/uapi/linux/target_core_user.h | 8 + 2 files changed, 271 insertions(+), 22 deletions(-) diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index fd13ce3ee870..206f2991b533 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -16,6 +16,8 @@ #include #include #include +#include +#include #include #include #include @@ -73,6 +75,7 @@ */ #define DATA_PAGES_PER_BLK_DEF 1 #define DATA_AREA_PAGES_DEF (256 * 1024) +#define ZC_DATA_AREA_PAGES_DEF (256 * 1024) #define TCMU_MBS_TO_PAGES(_mbs) ((size_t)_mbs << (20 - PAGE_SHIFT)) #define TCMU_PAGES_TO_MBS(_pages) (_pages >> (20 - PAGE_SHIFT)) @@ -140,6 +143,7 @@ struct tcmu_dev { /* Must add data_off and mb_addr to get the address */ size_t data_off; int data_area_mb; + uint32_t zc_max_blocks; uint32_t max_blocks; size_t mmap_pages; @@ -154,6 +158,10 @@ struct tcmu_dev { uint32_t data_pages_per_blk; uint32_t data_blk_size; + uint32_t zc_dbi_max; + uint32_t zc_dbi_thresh; + unsigned long *zc_data_bitmap; + struct xarray commands; struct timer_list cmd_timer; @@ -179,6 +187,12 @@ struct tcmu_cmd { struct tcmu_dev *tcmu_dev; struct list_head queue_entry; + /* for zero_copy */ + struct mm_struct *vma_vm_mm; + struct vm_area_struct *vma; + struct iovec *iov; + int iov_cnt; + uint16_t cmd_id; /* Can't use se_cmd when cleaning up expired cmds, because if @@ -194,6 +208,7 @@ struct tcmu_cmd { #define TCMU_CMD_BIT_EXPIRED 0 #define TCMU_CMD_BIT_KEEP_BUF 1 +#define TCMU_CMD_BIT_ZEROCOPY 2 unsigned long flags; struct mutex cmd_lock; @@ -500,10 +515,38 @@ static struct genl_family tcmu_genl_family __ro_after_init = { static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len) { struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; + unsigned long *data_bitmap; uint32_t i; + if (test_bit(TCMU_CMD_BIT_ZEROCOPY, &tcmu_cmd->flags)) + data_bitmap = udev->zc_data_bitmap; + else + data_bitmap = udev->data_bitmap; + for (i = 0; i < len; i++) - clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap); + clear_bit(tcmu_cmd->dbi[i], data_bitmap); +} + +static inline int tcmu_get_zc_empty_block(struct tcmu_dev *udev, + struct tcmu_cmd *tcmu_cmd, + int prev_dbi, int *iov_cnt) +{ + int dbi; + + dbi = find_first_zero_bit(udev->zc_data_bitmap, udev->zc_dbi_thresh); + if (dbi == udev->zc_dbi_thresh) + return -1; + + if (dbi > udev->zc_dbi_max) + udev->zc_dbi_max = dbi; + + set_bit(dbi, udev->zc_data_bitmap); + tcmu_cmd_set_dbi(tcmu_cmd, dbi); + + if (dbi != prev_dbi + 1) + *iov_cnt += 1; + + return dbi; } static inline int tcmu_get_empty_block(struct tcmu_dev *udev, @@ -555,7 +598,8 @@ static inline int tcmu_get_empty_block(struct tcmu_dev *udev, } static int tcmu_get_empty_blocks(struct tcmu_dev *udev, - struct tcmu_cmd *tcmu_cmd, int length) + struct tcmu_cmd *tcmu_cmd, int length, + bool zero_copy) { /* start value of dbi + 1 must not be a valid dbi */ int dbi = -2; @@ -564,7 +608,10 @@ static int tcmu_get_empty_blocks(struct tcmu_dev *udev, for (; length > 0; length -= blk_size) { blk_data_len = min_t(uint32_t, length, blk_size); - dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, blk_data_len, + if (zero_copy) + dbi = tcmu_get_zc_empty_block(udev, tcmu_cmd, dbi, &iov_cnt); + else + dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, blk_data_len, &iov_cnt); if (dbi < 0) return -1; @@ -572,8 +619,40 @@ static int tcmu_get_empty_blocks(struct tcmu_dev *udev, return iov_cnt; } +static void tcmu_cmd_zerocopy_unmap(struct tcmu_cmd *cmd) +{ + struct mm_struct *mm; + struct vm_area_struct *vma; + struct iovec *iov = cmd->iov; + unsigned long address; + int i; + + mm = cmd->vma_vm_mm; + vma = cmd->vma; + if (!mm) + return; + + if (mmget_not_zero(mm)) { + mmap_read_lock(mm); + for (i = 0; i < cmd->iov_cnt; i++) { + address = (unsigned long)iov->iov_base; + zap_page_range_single(vma, address, iov->iov_len, NULL); + iov++; + } + mmap_read_unlock(mm); + mmput(mm); + } + + cmd->vma_vm_mm = NULL; + cmd->vma = NULL; + mmdrop(mm); + kfree(cmd->iov); +} + static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd) { + if (test_bit(TCMU_CMD_BIT_ZEROCOPY, &tcmu_cmd->flags)) + tcmu_cmd_zerocopy_unmap(tcmu_cmd); kfree(tcmu_cmd->dbi); kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); } @@ -861,37 +940,51 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size) * Called with ring lock held. */ static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd, - int *iov_bidi_cnt) + int *iov_bidi_cnt, bool zero_copy) { int space, iov_cnt = 0, ret = 0; + unsigned long *data_bitmap; + uint32_t *dbi_thresh, max_blocks; if (!cmd->dbi_cnt) goto wr_iov_cnts; + if (zero_copy) { + data_bitmap = udev->zc_data_bitmap; + dbi_thresh = &udev->zc_dbi_thresh; + max_blocks = udev->zc_max_blocks; + } else { + data_bitmap = udev->data_bitmap; + dbi_thresh = &udev->dbi_thresh; + max_blocks = udev->max_blocks; + } + /* try to check and get the data blocks as needed */ - space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh); + space = spc_bitmap_free(data_bitmap, *dbi_thresh); if (space < cmd->dbi_cnt) { - unsigned long blocks_left = - (udev->max_blocks - udev->dbi_thresh) + space; + unsigned long blocks_left = max_blocks - *dbi_thresh + space; if (blocks_left < cmd->dbi_cnt) { - pr_debug("no data space: only %lu available, but ask for %u\n", + pr_debug("no data space[%s]: only %lu available, but ask for %u\n", ++ zero_copy ? "zero copy" : "non zero copy", blocks_left * udev->data_blk_size, cmd->dbi_cnt * udev->data_blk_size); return -1; } - udev->dbi_thresh += cmd->dbi_cnt; - if (udev->dbi_thresh > udev->max_blocks) - udev->dbi_thresh = udev->max_blocks; + *dbi_thresh += cmd->dbi_cnt; + if (*dbi_thresh > max_blocks) + *dbi_thresh = max_blocks; } - iov_cnt = tcmu_get_empty_blocks(udev, cmd, cmd->se_cmd->data_length); + iov_cnt = tcmu_get_empty_blocks(udev, cmd, cmd->se_cmd->data_length, + zero_copy); if (iov_cnt < 0) return -1; if (cmd->dbi_bidi_cnt) { - ret = tcmu_get_empty_blocks(udev, cmd, cmd->data_len_bidi); + ret = tcmu_get_empty_blocks(udev, cmd, cmd->data_len_bidi, + zero_copy); if (ret < 0) return -1; } @@ -1032,6 +1125,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) uint32_t blk_size = udev->data_blk_size; /* size of data buffer needed */ size_t data_length = (size_t)tcmu_cmd->dbi_cnt * blk_size; + bool zero_copy = false; *scsi_err = TCM_NO_SENSE; @@ -1055,7 +1149,22 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) return -1; } - iov_cnt = tcmu_alloc_data_space(udev, tcmu_cmd, &iov_bidi_cnt); + if (!(se_cmd->se_cmd_flags & SCF_BIDI) && se_cmd->data_length && + IS_ALIGNED(se_cmd->data_length, PAGE_SIZE)) { + struct scatterlist *data_sg = se_cmd->t_data_sg, *sg; + unsigned int data_nents = se_cmd->t_data_nents; + int i; + + for_each_sg(data_sg, sg, data_nents, i) { + if ((sg->offset && !IS_ALIGNED(sg->offset, PAGE_SIZE)) || + !IS_ALIGNED(sg->length, PAGE_SIZE)) + break; + } + if (i == data_nents) + zero_copy = true; + } + + iov_cnt = tcmu_alloc_data_space(udev, tcmu_cmd, &iov_bidi_cnt, zero_copy); if (iov_cnt < 0) goto free_and_queue; @@ -1105,7 +1214,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) iov = &entry->req.iov[0]; if (!test_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags)) { - if (se_cmd->data_direction == DMA_TO_DEVICE || + if (((se_cmd->data_direction == DMA_TO_DEVICE) && !zero_copy) || se_cmd->se_cmd_flags & SCF_BIDI) scatter_data_area(udev, tcmu_cmd, &iov); else @@ -1125,6 +1234,19 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer); entry->hdr.cmd_id = tcmu_cmd->cmd_id; + if (zero_copy) { + int i; + struct iovec *tiov; + + tiov = &entry->req.iov[0]; + for (i = 0; i < entry->req.iov_cnt; i++) { + tiov->iov_base = tiov->iov_base + + (TCMU_MBS_TO_PAGES(udev->data_area_mb) << PAGE_SHIFT); + tiov++; + } + entry->hdr.kflags |= TCMU_KFLAG_ZERO_COPY; + set_bit(TCMU_CMD_BIT_ZEROCOPY, &tcmu_cmd->flags); + } tcmu_hdr_set_len(&entry->hdr.len_op, command_size); @@ -1381,7 +1503,9 @@ static bool tcmu_handle_completion(struct tcmu_cmd *cmd, se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL; } if (!test_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags)) { - if (se_cmd->se_cmd_flags & SCF_BIDI) { + if (test_bit(TCMU_CMD_BIT_ZEROCOPY, &cmd->flags)) { + tcmu_cmd_zerocopy_unmap(cmd); + } else if (se_cmd->se_cmd_flags & SCF_BIDI) { /* Get Data-In buffer before clean up */ gather_data_area(udev, cmd, true, read_len); } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { @@ -1537,6 +1661,8 @@ static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd) return; mutex_lock(&cmd->cmd_lock); + if (test_bit(TCMU_CMD_BIT_ZEROCOPY, &cmd->flags)) + tcmu_cmd_zerocopy_unmap(cmd); set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); list_del_init(&cmd->queue_entry); se_cmd = cmd->se_cmd; @@ -1635,6 +1761,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) udev->data_pages_per_blk = DATA_PAGES_PER_BLK_DEF; udev->max_blocks = DATA_AREA_PAGES_DEF / udev->data_pages_per_blk; + udev->zc_max_blocks = ZC_DATA_AREA_PAGES_DEF / udev->data_pages_per_blk; udev->cmdr_size = CMDR_SIZE_DEF; udev->data_area_mb = TCMU_PAGES_TO_MBS(DATA_AREA_PAGES_DEF); @@ -1756,6 +1883,7 @@ static void tcmu_dev_kref_release(struct kref *kref) tcmu_blocks_release(udev, 0, udev->dbi_max); bitmap_free(udev->data_bitmap); + bitmap_free(udev->zc_data_bitmap); mutex_unlock(&udev->cmdr_lock); pr_debug("dev_kref_release\n"); @@ -1944,7 +2072,7 @@ static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma) { struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); - vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); + vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP); vma->vm_ops = &tcmu_vm_ops; vma->vm_private_data = udev; @@ -1958,6 +2086,109 @@ static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma) return 0; } +#define TCMU_ZEROCOPY_PAGE_BATCH 32 + +static inline int tcmu_zerocopy_one_seg(struct iovec *iov, + struct vm_area_struct *vma, + struct sg_page_iter *sgiter) +{ + struct page *pages[TCMU_ZEROCOPY_PAGE_BATCH]; + unsigned int len = iov->iov_len; + unsigned long address = (unsigned long)iov->iov_base; + unsigned long pages_remaining, pg_index = 0; + struct page *page; + int ret; + + while (len > 0) { + __sg_page_iter_next(sgiter); + page = sg_page_iter_page(sgiter); + pages[pg_index++] = page; + len -= PAGE_SIZE; + if (pg_index == TCMU_ZEROCOPY_PAGE_BATCH || !len) { + pages_remaining = pg_index; + ret = vm_insert_pages_mkspecial(vma, address, pages, + &pages_remaining); + if (ret < 0) { + pr_err("vm insert pages failed, error code: %d\n", ret); + return ret; + } + address = address + pg_index * PAGE_SIZE; + pg_index = 0; + } + } + + return 0; +} + +long tcmu_ioctl_cmd_zerocopy(struct tcmu_dev *udev, unsigned long arg) +{ + struct tcmu_cmd *cmd; + struct se_cmd *se_cmd; + struct scatterlist *data_sg; + unsigned int data_nents; + struct tcmu_cmd_zerocopy zc; + struct iovec *iov, *tiov; + struct sg_page_iter sgiter; + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + int i, ret = 0; + + if (copy_from_user(&zc, (struct tcmu_cmd_zerocopy __user *)arg, sizeof(zc))) + return -EFAULT; + + if (zc.iov_cnt <= 0) + return -EINVAL; + + iov = kmalloc_array(zc.iov_cnt, sizeof(struct iovec), GFP_KERNEL); + if (!iov) + return -ENOMEM; + if (copy_from_user(iov, zc.iov, sizeof(struct iovec) * zc.iov_cnt)) { + kfree(iov); + return -EFAULT; + } + + mutex_lock(&udev->cmdr_lock); + mmap_read_lock(mm); + cmd = xa_load(&udev->commands, zc.cmd_id); + if (!cmd) { + ret = -EINVAL; + kfree(iov); + pr_err("tcmu zero copy: cmd_id %d not found\n", zc.cmd_id); + goto out; + } + se_cmd = cmd->se_cmd; + + vma = find_vma(current->mm, (unsigned long)iov->iov_base); + if (!vma) { + ret = -EINVAL; + kfree(iov); + pr_err("tcmu zero copy: invalid iov_base\n"); + goto out; + } + data_sg = se_cmd->t_data_sg; + data_nents = se_cmd->t_data_nents; + __sg_page_iter_start(&sgiter, data_sg, data_nents, 0); + tiov = iov; + for (i = 0; i < zc.iov_cnt; i++) { + ret = tcmu_zerocopy_one_seg(tiov, vma, &sgiter); + if (ret < 0) { + kfree(iov); + goto out; + } + tiov++; + } + + cmd->iov = iov; + cmd->iov_cnt = zc.iov_cnt; + cmd->vma_vm_mm = vma->vm_mm; + cmd->vma = vma; + mmgrab(cmd->vma_vm_mm); +out: + mmap_read_unlock(mm); + mutex_unlock(&udev->cmdr_lock); + return ret; +} + static int tcmu_open(struct uio_info *info, struct inode *inode) { struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); @@ -2107,6 +2338,9 @@ static long tcmu_ioctl(struct uio_info *info, unsigned int cmd, unsigned long ar case TCMU_IOCTL_CMD_COPY_FROM_SGL: ret = tcmu_bypass_data_area_copy_data(udev, arg, false); break; + case TCMU_IOCTL_CMD_ZEROCOPY: + ret = tcmu_ioctl_cmd_zerocopy(udev, arg); + break; default: ret = -EINVAL; } @@ -2313,6 +2547,7 @@ static int tcmu_configure_device(struct se_device *dev) struct uio_info *info; struct tcmu_mailbox *mb; size_t data_size; + size_t zc_data_size; int ret = 0; ret = tcmu_update_uio_info(udev); @@ -2323,10 +2558,11 @@ static int tcmu_configure_device(struct se_device *dev) mutex_lock(&udev->cmdr_lock); udev->data_bitmap = bitmap_zalloc(udev->max_blocks, GFP_KERNEL); + udev->zc_data_bitmap = bitmap_zalloc(udev->zc_max_blocks, GFP_KERNEL); mutex_unlock(&udev->cmdr_lock); - if (!udev->data_bitmap) { + if (!udev->data_bitmap || !udev->zc_data_bitmap) { ret = -ENOMEM; - goto err_bitmap_alloc; + goto err_vzalloc; } mb = vzalloc(udev->cmdr_size + CMDR_OFF); @@ -2340,9 +2576,12 @@ static int tcmu_configure_device(struct se_device *dev) udev->cmdr = (void *)mb + CMDR_OFF; udev->data_off = udev->cmdr_size + CMDR_OFF; data_size = TCMU_MBS_TO_PAGES(udev->data_area_mb) << PAGE_SHIFT; - udev->mmap_pages = (data_size + udev->cmdr_size + CMDR_OFF) >> PAGE_SHIFT; + zc_data_size = (udev->zc_max_blocks * udev->data_pages_per_blk) << PAGE_SHIFT; + udev->mmap_pages = (data_size + zc_data_size + udev->cmdr_size + + CMDR_OFF) >> PAGE_SHIFT; udev->data_blk_size = udev->data_pages_per_blk * PAGE_SIZE; udev->dbi_thresh = 0; /* Default in Idle state */ + udev->zc_dbi_thresh = 0; /* Default in Idle state */ /* Initialise the mailbox of the ring buffer */ mb->version = TCMU_MAILBOX_VERSION; @@ -2360,7 +2599,8 @@ static int tcmu_configure_device(struct se_device *dev) info->mem[0].name = "tcm-user command & data buffer"; info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr; - info->mem[0].size = data_size + udev->cmdr_size + CMDR_OFF; + info->mem[0].size = data_size + zc_data_size + + udev->cmdr_size + CMDR_OFF; info->mem[0].memtype = UIO_MEM_NONE; info->irqcontrol = tcmu_irqcontrol; @@ -2416,7 +2656,8 @@ static int tcmu_configure_device(struct se_device *dev) err_vzalloc: bitmap_free(udev->data_bitmap); udev->data_bitmap = NULL; -err_bitmap_alloc: + kfree(udev->zc_data_bitmap); + udev->zc_data_bitmap = NULL; kfree(info->name); info->name = NULL; diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h index 2ce13568f196..eba0cac0c8d2 100644 --- a/include/uapi/linux/target_core_user.h +++ b/include/uapi/linux/target_core_user.h @@ -73,6 +73,7 @@ enum tcmu_opcode { struct tcmu_cmd_entry_hdr { __u32 len_op; __u16 cmd_id; +#define TCMU_KFLAG_ZERO_COPY 0x1 __u8 kflags; #define TCMU_UFLAG_UNKNOWN_OP 0x1 #define TCMU_UFLAG_READ_LEN 0x2 @@ -194,5 +195,12 @@ struct tcmu_data_xfer { #define TCMU_IOCTL_CMD_COPY_TO_SGL _IOW('T', 0xe0, struct tcmu_data_xfer) #define TCMU_IOCTL_CMD_COPY_FROM_SGL _IOR('T', 0xe1, struct tcmu_data_xfer) +#define TCMU_IOCTL_CMD_ZEROCOPY _IOW('T', 0xe2, struct tcmu_cmd_zerocopy) + +struct tcmu_cmd_zerocopy { + struct iovec __user *iov; + __u32 iov_cnt; + __u16 cmd_id; +}; #endif -- Gitee From fce4de09ea968496440388647e9d281f444db7a8 Mon Sep 17 00:00:00 2001 From: Guixin Liu Date: Wed, 30 Mar 2022 13:42:06 +0800 Subject: [PATCH 0239/2138] anolis: scsi: target: tcmu: make zero copy and bypass data area configurable ANBZ: #8525 Add configfs file read_zc_size, write_zc_size, read_bypass_data_area, and write_bypass_data_area to control which cmd to bypass data area or zero copy. Reviewed-by: Joseph Qi Reviewed-by: Xiaoguang Wang Signed-off-by: Guixin Liu Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2876 --- drivers/target/target_core_user.c | 248 +++++++++++++++++++++----- include/uapi/linux/target_core_user.h | 1 + 2 files changed, 202 insertions(+), 47 deletions(-) diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 206f2991b533..1134384ad72f 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -126,7 +126,8 @@ struct tcmu_dev { #define TCMU_DEV_BIT_BLOCKED 2 #define TCMU_DEV_BIT_TMR_NOTIFY 3 #define TCMU_DEV_BIT_PLUGGED 4 -#define TCMU_DEV_BIT_BYPASS_DATA_AREA 5 +#define TCMU_DEV_BIT_READ_BYPASS_DATA_AREA 5 +#define TCMU_DEV_BIT_WRITE_BYPASS_DATA_AREA 6 unsigned long flags; struct uio_info uio_info; @@ -161,6 +162,8 @@ struct tcmu_dev { uint32_t zc_dbi_max; uint32_t zc_dbi_thresh; unsigned long *zc_data_bitmap; + uint32_t read_zc_size; + uint32_t write_zc_size; struct xarray commands; @@ -209,6 +212,7 @@ struct tcmu_cmd { #define TCMU_CMD_BIT_EXPIRED 0 #define TCMU_CMD_BIT_KEEP_BUF 1 #define TCMU_CMD_BIT_ZEROCOPY 2 +#define TCMU_CMD_BIT_BYPASS_DATA_AREA 3 unsigned long flags; struct mutex cmd_lock; @@ -712,11 +716,67 @@ static void tcmu_setup_iovs(struct tcmu_dev *udev, struct tcmu_cmd *cmd, dbi = new_block_to_iov(udev, cmd, iov, dbi, data_length); } +static void tcmu_set_cmd_bypass_data_area(struct tcmu_cmd *tcmu_cmd) +{ + struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; + struct se_cmd *se_cmd = tcmu_cmd->se_cmd; + + /* + * Zero copy is map sg pages to userspace, and bypass data area + * is copy data between sg pages and userspace buffer, so they + * are completely different. + */ + if (test_bit(TCMU_CMD_BIT_ZEROCOPY, &tcmu_cmd->flags)) + return; + + if (se_cmd->data_direction == DMA_FROM_DEVICE && + test_bit(TCMU_DEV_BIT_READ_BYPASS_DATA_AREA, &udev->flags)) + set_bit(TCMU_CMD_BIT_BYPASS_DATA_AREA, &tcmu_cmd->flags); + + if (se_cmd->data_direction == DMA_TO_DEVICE && + test_bit(TCMU_DEV_BIT_WRITE_BYPASS_DATA_AREA, &udev->flags)) + set_bit(TCMU_CMD_BIT_BYPASS_DATA_AREA, &tcmu_cmd->flags); +} + +static void tcmu_set_cmd_do_zero_copy(struct tcmu_cmd *tcmu_cmd) +{ + struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; + struct se_cmd *se_cmd = tcmu_cmd->se_cmd; + struct scatterlist *data_sg = se_cmd->t_data_sg, *sg; + unsigned int data_nents = se_cmd->t_data_nents; + int i; + + if ((se_cmd->se_cmd_flags & SCF_BIDI) || !se_cmd->data_length || + !IS_ALIGNED(se_cmd->data_length, PAGE_SIZE)) + return; + + if ((se_cmd->data_direction == DMA_FROM_DEVICE) && + (!udev->read_zc_size || + se_cmd->data_length < (udev->read_zc_size << 10))) + return; + + if ((se_cmd->data_direction == DMA_TO_DEVICE) && + (!udev->write_zc_size || + se_cmd->data_length < (udev->write_zc_size << 10))) + return; + + /* Now, check every sg pages is aligned. */ + for_each_sg(data_sg, sg, data_nents, i) { + if ((sg->offset && !IS_ALIGNED(sg->offset, PAGE_SIZE)) || + !IS_ALIGNED(sg->length, PAGE_SIZE)) + break; + } + if (i == data_nents) + set_bit(TCMU_CMD_BIT_ZEROCOPY, &tcmu_cmd->flags); +} + static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) { struct se_device *se_dev = se_cmd->se_dev; struct tcmu_dev *udev = TCMU_DEV(se_dev); struct tcmu_cmd *tcmu_cmd; + bool zero_copy; + bool bypass_data_area; tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_NOIO); if (!tcmu_cmd) @@ -727,7 +787,12 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) tcmu_cmd->tcmu_dev = udev; mutex_init(&tcmu_cmd->cmd_lock); - if (!test_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags)) { + tcmu_set_cmd_do_zero_copy(tcmu_cmd); + tcmu_set_cmd_bypass_data_area(tcmu_cmd); + + zero_copy = test_bit(TCMU_CMD_BIT_ZEROCOPY, &tcmu_cmd->flags); + bypass_data_area = test_bit(TCMU_CMD_BIT_BYPASS_DATA_AREA, &tcmu_cmd->flags); + if (zero_copy || !bypass_data_area) { tcmu_cmd_set_block_cnts(tcmu_cmd); tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t), GFP_NOIO); @@ -946,7 +1011,7 @@ static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd, unsigned long *data_bitmap; uint32_t *dbi_thresh, max_blocks; - if (!cmd->dbi_cnt) + if (test_bit(TCMU_CMD_BIT_BYPASS_DATA_AREA, &cmd->flags)) goto wr_iov_cnts; if (zero_copy) { @@ -1125,7 +1190,8 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) uint32_t blk_size = udev->data_blk_size; /* size of data buffer needed */ size_t data_length = (size_t)tcmu_cmd->dbi_cnt * blk_size; - bool zero_copy = false; + bool zero_copy = test_bit(TCMU_CMD_BIT_ZEROCOPY, &tcmu_cmd->flags); + bool bypass_data_area = test_bit(TCMU_CMD_BIT_BYPASS_DATA_AREA, &tcmu_cmd->flags); *scsi_err = TCM_NO_SENSE; @@ -1149,21 +1215,6 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) return -1; } - if (!(se_cmd->se_cmd_flags & SCF_BIDI) && se_cmd->data_length && - IS_ALIGNED(se_cmd->data_length, PAGE_SIZE)) { - struct scatterlist *data_sg = se_cmd->t_data_sg, *sg; - unsigned int data_nents = se_cmd->t_data_nents; - int i; - - for_each_sg(data_sg, sg, data_nents, i) { - if ((sg->offset && !IS_ALIGNED(sg->offset, PAGE_SIZE)) || - !IS_ALIGNED(sg->length, PAGE_SIZE)) - break; - } - if (i == data_nents) - zero_copy = true; - } - iov_cnt = tcmu_alloc_data_space(udev, tcmu_cmd, &iov_bidi_cnt, zero_copy); if (iov_cnt < 0) goto free_and_queue; @@ -1213,7 +1264,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) tcmu_cmd_reset_dbi_cur(tcmu_cmd); iov = &entry->req.iov[0]; - if (!test_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags)) { + if (zero_copy || !bypass_data_area) { if (((se_cmd->data_direction == DMA_TO_DEVICE) && !zero_copy) || se_cmd->se_cmd_flags & SCF_BIDI) scatter_data_area(udev, tcmu_cmd, &iov); @@ -1224,8 +1275,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) entry->req.iov_cnt = iov_cnt - iov_bidi_cnt; /* Handle BIDI commands */ - if ((se_cmd->se_cmd_flags & SCF_BIDI) - && !test_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags)) { + if ((se_cmd->se_cmd_flags & SCF_BIDI) && !bypass_data_area) { iov++; tcmu_setup_iovs(udev, tcmu_cmd, &iov, tcmu_cmd->data_len_bidi); entry->req.iov_bidi_cnt = iov_bidi_cnt; @@ -1245,9 +1295,11 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) tiov++; } entry->hdr.kflags |= TCMU_KFLAG_ZERO_COPY; - set_bit(TCMU_CMD_BIT_ZEROCOPY, &tcmu_cmd->flags); } + if (bypass_data_area) + entry->hdr.kflags |= TCMU_KFLAG_BYPASS_DATA_AREA; + tcmu_hdr_set_len(&entry->hdr.len_op, command_size); /* All offsets relative to mb_addr, not start of entry! */ @@ -1502,20 +1554,25 @@ static bool tcmu_handle_completion(struct tcmu_cmd *cmd, else se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL; } - if (!test_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags)) { - if (test_bit(TCMU_CMD_BIT_ZEROCOPY, &cmd->flags)) { - tcmu_cmd_zerocopy_unmap(cmd); - } else if (se_cmd->se_cmd_flags & SCF_BIDI) { - /* Get Data-In buffer before clean up */ - gather_data_area(udev, cmd, true, read_len); - } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { - gather_data_area(udev, cmd, false, read_len); - } else if (se_cmd->data_direction == DMA_TO_DEVICE) { - /* TODO: */ - } else if (se_cmd->data_direction != DMA_NONE) { - pr_warn("TCMU: data direction was %d!\n", - se_cmd->data_direction); - } + + if (test_bit(TCMU_CMD_BIT_ZEROCOPY, &cmd->flags)) { + tcmu_cmd_zerocopy_unmap(cmd); + goto done; + } + + if (test_bit(TCMU_CMD_BIT_BYPASS_DATA_AREA, &cmd->flags)) + goto done; + + if (se_cmd->se_cmd_flags & SCF_BIDI) { + /* Get Data-In buffer before clean up */ + gather_data_area(udev, cmd, true, read_len); + } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { + gather_data_area(udev, cmd, false, read_len); + } else if (se_cmd->data_direction == DMA_TO_DEVICE) { + /* TODO: */ + } else if (se_cmd->data_direction != DMA_NONE) { + pr_warn("TCMU: data direction was %d!\n", + se_cmd->data_direction); } done: @@ -1766,6 +1823,8 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) udev->data_area_mb = TCMU_PAGES_TO_MBS(DATA_AREA_PAGES_DEF); mutex_init(&udev->cmdr_lock); + udev->read_zc_size = 0; + udev->write_zc_size = 0; INIT_LIST_HEAD(&udev->node); INIT_LIST_HEAD(&udev->timedout_entry); @@ -2299,9 +2358,6 @@ static long tcmu_bypass_data_area_copy_data(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd; long ret; - if (!test_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags)) - return -EINVAL; - if (copy_from_user(&xfer, uxfer, sizeof(xfer))) return -EFAULT; @@ -2313,6 +2369,11 @@ static long tcmu_bypass_data_area_copy_data(struct tcmu_dev *udev, } mutex_lock(&tcmu_cmd->cmd_lock); + if (!test_bit(TCMU_CMD_BIT_BYPASS_DATA_AREA, &tcmu_cmd->flags)) { + ret = -EINVAL; + goto out; + } + if (test_bit(TCMU_CMD_BIT_EXPIRED, &tcmu_cmd->flags)) { pr_err("Command is expired, cmd_id:%d\n", xfer.cmd_id); ret = -EFAULT; @@ -3492,19 +3553,19 @@ static ssize_t tcmu_free_kept_buf_store(struct config_item *item, const char *pa } CONFIGFS_ATTR_WO(tcmu_, free_kept_buf); -static ssize_t tcmu_bypass_data_area_show(struct config_item *item, char *page) +static ssize_t tcmu_read_bypass_data_area_show(struct config_item *item, char *page) { struct se_dev_attrib *da = container_of(to_config_group(item), struct se_dev_attrib, da_group); struct tcmu_dev *udev = TCMU_DEV(da->da_dev); - if (test_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags)) + if (test_bit(TCMU_DEV_BIT_READ_BYPASS_DATA_AREA, &udev->flags)) return snprintf(page, PAGE_SIZE, "%s\n", "true"); else return snprintf(page, PAGE_SIZE, "%s\n", "false"); } -static ssize_t tcmu_bypass_data_area_store(struct config_item *item, const char *page, +static ssize_t tcmu_read_bypass_data_area_store(struct config_item *item, const char *page, size_t count) { struct se_dev_attrib *da = container_of(to_config_group(item), @@ -3518,13 +3579,103 @@ static ssize_t tcmu_bypass_data_area_store(struct config_item *item, const char return ret; if (bypass_data_area) - set_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags); + set_bit(TCMU_DEV_BIT_READ_BYPASS_DATA_AREA, &udev->flags); else - clear_bit(TCMU_DEV_BIT_BYPASS_DATA_AREA, &udev->flags); + clear_bit(TCMU_DEV_BIT_READ_BYPASS_DATA_AREA, &udev->flags); + + return count; +} +CONFIGFS_ATTR(tcmu_, read_bypass_data_area); + +static ssize_t tcmu_write_bypass_data_area_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + + if (test_bit(TCMU_DEV_BIT_WRITE_BYPASS_DATA_AREA, &udev->flags)) + return snprintf(page, PAGE_SIZE, "%s\n", "true"); + else + return snprintf(page, PAGE_SIZE, "%s\n", "false"); +} + +static ssize_t tcmu_write_bypass_data_area_store(struct config_item *item, const char *page, + size_t count) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + bool bypass_data_area; + int ret; + + ret = strtobool(page, &bypass_data_area); + if (ret < 0) + return ret; + + if (bypass_data_area) + set_bit(TCMU_DEV_BIT_WRITE_BYPASS_DATA_AREA, &udev->flags); + else + clear_bit(TCMU_DEV_BIT_WRITE_BYPASS_DATA_AREA, &udev->flags); + + return count; +} +CONFIGFS_ATTR(tcmu_, write_bypass_data_area); + +static ssize_t tcmu_read_zc_size_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + + return snprintf(page, PAGE_SIZE, "%ukb\n", udev->read_zc_size); +} + +static ssize_t tcmu_read_zc_size_store(struct config_item *item, const char *page, + size_t count) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + uint32_t read_zc_size; + int ret; + + ret = kstrtou32(page, 0, &read_zc_size); + if (ret < 0) + return ret; + + udev->read_zc_size = read_zc_size; + + return count; +} +CONFIGFS_ATTR(tcmu_, read_zc_size); + +static ssize_t tcmu_write_zc_size_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + + return snprintf(page, PAGE_SIZE, "%ukb\n", udev->write_zc_size); +} + +static ssize_t tcmu_write_zc_size_store(struct config_item *item, const char *page, + size_t count) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + uint32_t write_zc_size; + int ret; + + ret = kstrtou32(page, 0, &write_zc_size); + if (ret < 0) + return ret; + + udev->write_zc_size = write_zc_size; return count; } -CONFIGFS_ATTR(tcmu_, bypass_data_area); +CONFIGFS_ATTR(tcmu_, write_zc_size); static struct configfs_attribute *tcmu_attrib_attrs[] = { &tcmu_attr_cmd_time_out, @@ -3537,7 +3688,10 @@ static struct configfs_attribute *tcmu_attrib_attrs[] = { &tcmu_attr_emulate_write_cache, &tcmu_attr_tmr_notification, &tcmu_attr_nl_reply_supported, - &tcmu_attr_bypass_data_area, + &tcmu_attr_read_bypass_data_area, + &tcmu_attr_write_bypass_data_area, + &tcmu_attr_read_zc_size, + &tcmu_attr_write_zc_size, NULL, }; diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h index eba0cac0c8d2..8931c2bb0afe 100644 --- a/include/uapi/linux/target_core_user.h +++ b/include/uapi/linux/target_core_user.h @@ -74,6 +74,7 @@ struct tcmu_cmd_entry_hdr { __u32 len_op; __u16 cmd_id; #define TCMU_KFLAG_ZERO_COPY 0x1 +#define TCMU_KFLAG_BYPASS_DATA_AREA 0x2 __u8 kflags; #define TCMU_UFLAG_UNKNOWN_OP 0x1 #define TCMU_UFLAG_READ_LEN 0x2 -- Gitee From 5ad6a732b86394b2fadc388a74b36452628a01f9 Mon Sep 17 00:00:00 2001 From: Xiaoguang Wang Date: Tue, 15 Mar 2022 15:57:57 +0800 Subject: [PATCH 0240/2138] anolis: scsi: target: tcmu: use new rw_semaphore to protect truncate ANBZ: #8525 Currently tcmu_vma_fault() uses udev->cmdr_lock to avoid concurrent find_free_blocks(), which unmaps idle pages and truncates them. This work is really like many filesystem's truncate operations, but they use inode's i_mmap_sem to protect race normally. This patch replaces cmdr_lock with a new rw_semaphore in tcmu fault procedure, which will also make page-fault have concurrency. Reviewed-by: Guixin Liu Reviewed-by: Joseph Qi Signed-off-by: Xiaoguang Wang Signed-off-by: Guixin Liu Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2876 --- drivers/target/target_core_user.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 1134384ad72f..31d3bd1e3ebf 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -149,6 +149,7 @@ struct tcmu_dev { size_t mmap_pages; struct mutex cmdr_lock; + struct rw_semaphore i_mmap_sem; struct list_head qfull_queue; struct list_head tmr_queue; @@ -1825,6 +1826,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) mutex_init(&udev->cmdr_lock); udev->read_zc_size = 0; udev->write_zc_size = 0; + init_rwsem(&udev->i_mmap_sem); INIT_LIST_HEAD(&udev->node); INIT_LIST_HEAD(&udev->timedout_entry); @@ -2043,12 +2045,12 @@ static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi) { struct page *page; - mutex_lock(&udev->cmdr_lock); + down_read(&udev->i_mmap_sem); page = xa_load(&udev->data_pages, dpi); if (likely(page)) { get_page(page); lock_page(page); - mutex_unlock(&udev->cmdr_lock); + up_read(&udev->i_mmap_sem); return page; } @@ -2058,7 +2060,7 @@ static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi) */ pr_err("Invalid addr to data page mapping (dpi %u) on device %s\n", dpi, udev->name); - mutex_unlock(&udev->cmdr_lock); + up_read(&udev->i_mmap_sem); return NULL; } @@ -3756,6 +3758,7 @@ static void find_free_blocks(void) continue; } + down_write(&udev->i_mmap_sem); end = udev->dbi_max + 1; block = find_last_bit(udev->data_bitmap, end); if (block == udev->dbi_max) { @@ -3763,6 +3766,7 @@ static void find_free_blocks(void) * The last bit is dbi_max, so it is not possible * reclaim any blocks. */ + up_write(&udev->i_mmap_sem); mutex_unlock(&udev->cmdr_lock); continue; } else if (block == end) { @@ -3790,6 +3794,7 @@ static void find_free_blocks(void) off = udev->data_off + (loff_t)start * udev->data_blk_size; unmap_mapping_range(udev->inode->i_mapping, off, 0, 1); + up_write(&udev->i_mmap_sem); mutex_unlock(&udev->cmdr_lock); total_pages_freed += pages_freed; -- Gitee From c169383045543385ae11b1d36c41c471789a2ab3 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Wed, 21 Feb 2024 17:24:49 +0800 Subject: [PATCH 0241/2138] anolis: tcm_loop: allow sg_tablesize to be settable ANBZ: #8523 Currently tcm_loop default sg_tablesize is 256 and can only support aximum 1M io in the worst case, e.g. each segment takes only 4k. This won't fulfill requirement in some user scenarios. Just like "scsi: tcm_loop: Allow queues, can_queue and cmd_per_lun to be settable", make 'sg_tablesize' also can be settable by user. Signed-off-by: Joseph Qi Reviewed-by: Jingbo Xu Reviewed-by: Gao Xiang Signed-off-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2879 --- drivers/target/loopback/tcm_loop.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 4ec99a55ac30..8ec4667c48d4 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -54,6 +54,9 @@ module_param_named(can_queue, tcm_loop_can_queue, uint, 0644); static unsigned int tcm_loop_cmd_per_lun = 1024; module_param_named(cmd_per_lun, tcm_loop_cmd_per_lun, uint, 0644); +static unsigned short tcm_loop_sg_tablesize = 256; +module_param_named(sg_tablesize, tcm_loop_sg_tablesize, ushort, 0644); + /* * Called from struct target_core_fabric_ops->check_stop_free() */ @@ -301,7 +304,6 @@ static const struct scsi_host_template tcm_loop_driver_template = { .eh_device_reset_handler = tcm_loop_device_reset, .eh_target_reset_handler = tcm_loop_target_reset, .this_id = -1, - .sg_tablesize = 256, .max_sectors = 0xFFFF, .dma_boundary = PAGE_SIZE - 1, .module = THIS_MODULE, @@ -339,6 +341,7 @@ static int tcm_loop_driver_probe(struct device *dev) sh->nr_hw_queues = tcm_loop_nr_hw_queues; sh->can_queue = tcm_loop_can_queue; sh->cmd_per_lun = tcm_loop_cmd_per_lun; + sh->sg_tablesize = tcm_loop_sg_tablesize; host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | -- Gitee From dbc5380055b9021004797785bddf05111c7f1e31 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Wed, 17 Jan 2024 11:14:39 +0800 Subject: [PATCH 0242/2138] anolis: Revert "block: always define BIO_MAX_PAGES as 256" ANBZ: #8523 This reverts commit 6861428921b51113520cd47897be6c2774e4fc58. In some user scenarios, we want to write a big IO (e.g. 2M) and also expect it not to split. But in the worst case, each bio_vec only contains single page, so it can only support maximum 1M bio actually. Revert BIO_MAX_PAGES back to 512 to fulfill the above user scenarios. This is also to keep consistent with kernel 4.19. Signed-off-by: Joseph Qi Reviewed-by: Jingbo Xu Signed-off-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2879 --- include/linux/bio.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/include/linux/bio.h b/include/linux/bio.h index efb40c3282ca..797e17573e71 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -10,7 +10,15 @@ #include #include +#ifdef CONFIG_THP_SWAP +#if HPAGE_PMD_NR > 256 +#define BIO_MAX_VECS (HPAGE_PMD_NR * 1U) +#else #define BIO_MAX_VECS 256U +#endif +#else +#define BIO_MAX_VECS 256U +#endif struct queue_limits; -- Gitee From 6aaf0225f594850ad65be8ca603876c7ee8b0237 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Thu, 7 Mar 2024 16:10:15 +0800 Subject: [PATCH 0243/2138] anolis: Add support for Zhaoxin Serial ATA IDE. ANBZ: #7809 With this driver, Serial ATA device can run in IDE mode on Zhaoxin CPUs. Signed-off-by: leoliu-oc Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2684 --- drivers/ata/Kconfig | 9 + drivers/ata/Makefile | 1 + drivers/ata/sata_zhaoxin.c | 390 +++++++++++++++++++++++++++++++++++++ 3 files changed, 400 insertions(+) create mode 100644 drivers/ata/sata_zhaoxin.c diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 42b51c9812a0..0fd5a5bce3e4 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -553,6 +553,15 @@ config SATA_VITESSE If unsure, say N. +config SATA_ZHAOXIN + tristate "ZhaoXin SATA support" + depends on PCI + select SATA_HOST + help + This option enables support for ZhaoXin Serial ATA. + + If unsure, say N. + comment "PATA SFF controllers with BMDMA" config PATA_ALI diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index 20e6645ab737..4b846692e365 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile @@ -45,6 +45,7 @@ obj-$(CONFIG_SATA_SIL) += sata_sil.o obj-$(CONFIG_SATA_SIS) += sata_sis.o obj-$(CONFIG_SATA_SVW) += sata_svw.o obj-$(CONFIG_SATA_ULI) += sata_uli.o +obj-$(CONFIG_SATA_ZHAOXIN) += sata_zhaoxin.o obj-$(CONFIG_SATA_VIA) += sata_via.o obj-$(CONFIG_SATA_VITESSE) += sata_vsc.o diff --git a/drivers/ata/sata_zhaoxin.c b/drivers/ata/sata_zhaoxin.c new file mode 100644 index 000000000000..53c3e2ab6095 --- /dev/null +++ b/drivers/ata/sata_zhaoxin.c @@ -0,0 +1,390 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * sata_zhaoxin.c - ZhaoXin Serial ATA controllers + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "sata_zx" +#define DRV_VERSION "2.6.1" + +#define PCI_DEVICE_ID_ZHAOXIN_DUAL_CHANNEL 9002 +#define PCI_DEVICE_ID_ZHAOXIN_SING_CHANNEL 9003 + +enum board_ids_enum { + zx100s, +}; + +enum { + SATA_CHAN_ENAB = 0x40, /* SATA channel enable */ + SATA_INT_GATE = 0x41, /* SATA interrupt gating */ + SATA_NATIVE_MODE = 0x42, /* Native mode enable */ + PATA_UDMA_TIMING = 0xB3, /* PATA timing for DMA/ cable detect */ + PATA_PIO_TIMING = 0xAB, /* PATA timing register */ + + PORT0 = (1 << 1), + PORT1 = (1 << 0), + ALL_PORTS = PORT0 | PORT1, + + NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4), + + SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */ +}; + +static int zx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); +static int zx_scr_read(struct ata_link *link, unsigned int scr, u32 *val); +static int zx_scr_write(struct ata_link *link, unsigned int scr, u32 val); +static int zx_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline); + +static void zx_tf_load(struct ata_port *ap, const struct ata_taskfile *tf); + +static const struct pci_device_id zx_pci_tbl[] = { + { PCI_VDEVICE(ZHAOXIN, PCI_DEVICE_ID_ZHAOXIN_DUAL_CHANNEL), zx100s }, + { PCI_VDEVICE(ZHAOXIN, PCI_DEVICE_ID_ZHAOXIN_SING_CHANNEL), zx100s }, + + { } /* terminate list */ +}; + +static struct pci_driver zx_pci_driver = { + .name = DRV_NAME, + .id_table = zx_pci_tbl, + .probe = zx_init_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, +#endif + .remove = ata_pci_remove_one, +}; + +static struct scsi_host_template zx_sht = { + ATA_BMDMA_SHT(DRV_NAME), +}; + +static struct ata_port_operations zx_base_ops = { + .inherits = &ata_bmdma_port_ops, + .sff_tf_load = zx_tf_load, +}; + +static struct ata_port_operations zx_ops = { + .inherits = &zx_base_ops, + .hardreset = zx_hardreset, + .scr_read = zx_scr_read, + .scr_write = zx_scr_write, +}; + +static struct ata_port_info zx100s_port_info = { + .flags = ATA_FLAG_SATA | ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &zx_ops, +}; + + +static int zx_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + int rc; + + rc = sata_std_hardreset(link, class, deadline); + if (!rc || rc == -EAGAIN) { + struct ata_port *ap = link->ap; + int pmp = link->pmp; + int tmprc; + + if (pmp) { + ap->ops->sff_dev_select(ap, pmp); + tmprc = ata_sff_wait_ready(&ap->link, deadline); + } else { + tmprc = ata_sff_wait_ready(link, deadline); + } + if (tmprc) + ata_link_err(link, "COMRESET failed for wait (errno=%d)\n", + rc); + else + ata_link_err(link, "wait for bsy success\n"); + + ata_link_err(link, "COMRESET success (errno=%d) ap=%d link %d\n", + rc, link->ap->port_no, link->pmp); + } else { + ata_link_err(link, "COMRESET failed (errno=%d) ap=%d link %d\n", + rc, link->ap->port_no, link->pmp); + } + return rc; +} + +static int zx_scr_read(struct ata_link *link, unsigned int scr, u32 *val) +{ + static const u8 ipm_tbl[] = { 1, 2, 6, 0 }; + struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); + int slot = 2 * link->ap->port_no + link->pmp; + u32 v = 0; + u8 raw; + + switch (scr) { + case SCR_STATUS: + pci_read_config_byte(pdev, 0xA0 + slot, &raw); + + /* read the DET field, bit0 and 1 of the config byte */ + v |= raw & 0x03; + + /* read the SPD field, bit4 of the configure byte */ + v |= raw & 0x30; + + /* read the IPM field, bit2 and 3 of the config byte */ + v |= ((ipm_tbl[(raw >> 2) & 0x3])<<8); + break; + + case SCR_ERROR: + /* devices other than 5287 uses 0xA8 as base */ + WARN_ON(pdev->device != PCI_DEVICE_ID_ZHAOXIN_DUAL_CHANNEL && + pdev->device != PCI_DEVICE_ID_ZHAOXIN_SING_CHANNEL); + pci_write_config_byte(pdev, 0x42, slot); + pci_read_config_dword(pdev, 0xA8, &v); + break; + + case SCR_CONTROL: + pci_read_config_byte(pdev, 0xA4 + slot, &raw); + + /* read the DET field, bit0 and bit1 */ + v |= ((raw & 0x02) << 1) | (raw & 0x01); + + /* read the IPM field, bit2 and bit3 */ + v |= ((raw >> 2) & 0x03) << 8; + + break; + + default: + return -EINVAL; + } + + *val = v; + return 0; +} + +static int zx_scr_write(struct ata_link *link, unsigned int scr, u32 val) +{ + struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); + int slot = 2 * link->ap->port_no + link->pmp; + u32 v = 0; + + WARN_ON(pdev == NULL); + + switch (scr) { + case SCR_ERROR: + /* devices PCI_DEVICE_ID_ZHAOXIN_DUAL_CHANNEL uses 0xA8 as base */ + WARN_ON(pdev->device != PCI_DEVICE_ID_ZHAOXIN_DUAL_CHANNEL && + pdev->device != PCI_DEVICE_ID_ZHAOXIN_SING_CHANNEL); + pci_write_config_byte(pdev, 0x42, slot); + pci_write_config_dword(pdev, 0xA8, val); + return 0; + + case SCR_CONTROL: + /* set the DET field */ + v |= ((val & 0x4) >> 1) | (val & 0x1); + + /* set the IPM field */ + v |= ((val >> 8) & 0x3) << 2; + + + pci_write_config_byte(pdev, 0xA4 + slot, v); + + + return 0; + + default: + return -EINVAL; + } +} + + +/** + * zx_tf_load - send taskfile registers to host controller + * @ap: Port to which output is sent + * @tf: ATA taskfile register set + * + * Outputs ATA taskfile to standard ATA host controller. + * + * This is to fix the internal bug of zx chipsets, which will + * reset the device register after changing the IEN bit on ctl + * register. + */ +static void zx_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) +{ + struct ata_taskfile ttf; + + if (tf->ctl != ap->last_ctl) { + ttf = *tf; + ttf.flags |= ATA_TFLAG_DEVICE; + tf = &ttf; + } + ata_sff_tf_load(ap, tf); +} + +static const unsigned int zx_bar_sizes[] = { + 8, 4, 8, 4, 16, 256 +}; + +static const unsigned int zx100s_bar_sizes0[] = { + 8, 4, 8, 4, 16, 0 +}; + +static const unsigned int zx100s_bar_sizes1[] = { + 8, 4, 0, 0, 16, 0 +}; + +static int zx_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) +{ + const struct ata_port_info *ppi0[] = { + &zx100s_port_info, NULL + }; + const struct ata_port_info *ppi1[] = { + &zx100s_port_info, &ata_dummy_port_info + }; + struct ata_host *host; + int i, rc; + + if (pdev->device == PCI_DEVICE_ID_ZHAOXIN_DUAL_CHANNEL) + rc = ata_pci_bmdma_prepare_host(pdev, ppi0, &host); + else if (pdev->device == PCI_DEVICE_ID_ZHAOXIN_SING_CHANNEL) + rc = ata_pci_bmdma_prepare_host(pdev, ppi1, &host); + else + rc = -EINVAL; + + if (rc) + return rc; + + *r_host = host; + + /* 9002 hosts four sata ports as M/S of the two channels */ + /* 9003 hosts two sata ports as M/S of the one channel */ + for (i = 0; i < host->n_ports; i++) + ata_slave_link_init(host->ports[i]); + + return 0; +} + +static void zx_configure(struct pci_dev *pdev, int board_id) +{ + u8 tmp8; + + pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8); + dev_info(&pdev->dev, "routed to hard irq line %d\n", + (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f); + + /* make sure SATA channels are enabled */ + pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8); + if ((tmp8 & ALL_PORTS) != ALL_PORTS) { + dev_dbg(&pdev->dev, "enabling SATA channels (0x%x)\n", + (int)tmp8); + tmp8 |= ALL_PORTS; + pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8); + } + + /* make sure interrupts for each channel sent to us */ + pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8); + if ((tmp8 & ALL_PORTS) != ALL_PORTS) { + dev_dbg(&pdev->dev, "enabling SATA channel interrupts (0x%x)\n", + (int) tmp8); + tmp8 |= ALL_PORTS; + pci_write_config_byte(pdev, SATA_INT_GATE, tmp8); + } + + /* make sure native mode is enabled */ + pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8); + if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) { + dev_dbg(&pdev->dev, + "enabling SATA channel native mode (0x%x)\n", + (int) tmp8); + tmp8 |= NATIVE_MODE_ALL; + pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8); + } +} + +static int zx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + unsigned int i; + int rc; + struct ata_host *host = NULL; + int board_id = (int) ent->driver_data; + const unsigned int *bar_sizes; + int legacy_mode = 0; + + ata_print_version_once(&pdev->dev, DRV_VERSION); + + if (pdev->device == PCI_DEVICE_ID_ZHAOXIN_DUAL_CHANNEL || + pdev->device == PCI_DEVICE_ID_ZHAOXIN_SING_CHANNEL) { + if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { + u8 tmp8, mask; + + /* TODO: What if one channel is in native mode ... */ + pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8); + mask = (1 << 2) | (1 << 0); + if ((tmp8 & mask) != mask) + legacy_mode = 1; + } + if (legacy_mode) + return -EINVAL; + } + + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + if (board_id == zx100s && pdev->device == PCI_DEVICE_ID_ZHAOXIN_DUAL_CHANNEL) + bar_sizes = &zx100s_bar_sizes0[0]; + else if (board_id == zx100s && pdev->device == PCI_DEVICE_ID_ZHAOXIN_SING_CHANNEL) + bar_sizes = &zx100s_bar_sizes1[0]; + else + bar_sizes = &zx_bar_sizes[0]; + + for (i = 0; i < ARRAY_SIZE(zx_bar_sizes); i++) { + if ((pci_resource_start(pdev, i) == 0) || + (pci_resource_len(pdev, i) < bar_sizes[i])) { + if (bar_sizes[i] == 0) + continue; + + dev_err(&pdev->dev, + "invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n", + i, + (unsigned long long)pci_resource_start(pdev, i), + (unsigned long long)pci_resource_len(pdev, i)); + + return -ENODEV; + } + } + + switch (board_id) { + case zx100s: + rc = zx_prepare_host(pdev, &host); + break; + default: + rc = -EINVAL; + } + if (rc) + return rc; + + zx_configure(pdev, board_id); + + pci_set_master(pdev); + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, + IRQF_SHARED, &zx_sht); +} + +module_pci_driver(zx_pci_driver); + +MODULE_AUTHOR("Yanchen:YanchenSun@zhaoxin.com"); +MODULE_DESCRIPTION("SCSI low-level driver for ZX SATA controllers"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, zx_pci_tbl); +MODULE_VERSION(DRV_VERSION); -- Gitee From c5ab66ec22bbbc6c9e1e6c3ca0550d918aab648a Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Thu, 7 Mar 2024 16:58:15 +0800 Subject: [PATCH 0244/2138] anolis: Add support for Zhaoxin HW Random Number Generator ANBZ: #7809 This driver provides kernel-side support for the Random Number Generator hardware found on Zhaoxin based motherboards. Signed-off-by: leoliu-oc Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2685 --- drivers/char/hw_random/Kconfig | 13 ++++ drivers/char/hw_random/Makefile | 1 + drivers/char/hw_random/via-rng.c | 10 +-- drivers/char/hw_random/zhaoxin-rng.c | 98 ++++++++++++++++++++++++++++ 4 files changed, 117 insertions(+), 5 deletions(-) create mode 100644 drivers/char/hw_random/zhaoxin-rng.c diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 8de74dcfa18c..7c486989dd04 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -152,6 +152,19 @@ config HW_RANDOM_VIA If unsure, say Y. +config HW_RANDOM_ZHAOXIN + tristate "Zhaoxin HW Random Number Generator support" + depends on X86 + default HW_RANDOM + help + This driver provides kernel-side support for the Random Number + Generator hardware found on Zhaoxin based motherboards. + + To compile this driver as a module, choose M here: the + module will be called zhaoxin-rng. + + If unsure, say Y. + config HW_RANDOM_IXP4XX tristate "Intel IXP4xx NPU HW Pseudo-Random Number Generator support" depends on ARCH_IXP4XX || COMPILE_TEST diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index 32549a1186dc..ef5b3ae0794d 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile @@ -14,6 +14,7 @@ obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o obj-$(CONFIG_HW_RANDOM_N2RNG) += n2-rng.o n2-rng-y := n2-drv.o n2-asm.o obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o +obj-$(CONFIG_HW_RANDOM_ZHAOXIN) += zhaoxin-rng.o obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-trng.o obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index a9a0a3b09c8b..4288e1114fc9 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c @@ -35,7 +35,7 @@ #include #include - +static struct x86_cpu_id via_rng_cpu_id[]; enum { @@ -135,7 +135,7 @@ static int via_rng_init(struct hwrng *rng) * is always enabled if CPUID rng_en is set. There is no * RNG configuration like it used to be the case in this * register */ - if (((c->x86 == 6) && (c->x86_model >= 0x0f)) || (c->x86 > 6)){ + if ((c->x86 == 6) && (c->x86_model >= 0x0f)) { if (!boot_cpu_has(X86_FEATURE_XSTORE_EN)) { pr_err(PFX "can't enable hardware RNG " "if XSTORE is not enabled\n"); @@ -196,7 +196,7 @@ static int __init via_rng_mod_init(void) { int err; - if (!boot_cpu_has(X86_FEATURE_XSTORE)) + if (!x86_match_cpu(via_rng_cpu_id)) return -ENODEV; pr_info("VIA RNG detected\n"); @@ -217,8 +217,8 @@ static void __exit via_rng_mod_exit(void) } module_exit(via_rng_mod_exit); -static struct x86_cpu_id __maybe_unused via_rng_cpu_id[] = { - X86_MATCH_FEATURE(X86_FEATURE_XSTORE, NULL), +static struct x86_cpu_id via_rng_cpu_id[] = { + X86_MATCH_VENDOR_FAM_FEATURE(CENTAUR, 6, X86_FEATURE_XSTORE, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, via_rng_cpu_id); diff --git a/drivers/char/hw_random/zhaoxin-rng.c b/drivers/char/hw_random/zhaoxin-rng.c new file mode 100644 index 000000000000..f0bfda78fea1 --- /dev/null +++ b/drivers/char/hw_random/zhaoxin-rng.c @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * RNG driver for Zhaoxin RNGs + * + * Copyright 2023 (c) Zhaoxin Semiconductor Co., Ltd + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_VERSION "2.0.0" + +enum { + ZHAOXIN_RNG_CHUNK_8 = 0x00, /* 64 rand bits, 64 stored bits */ + ZHAOXIN_RNG_CHUNK_4 = 0x01, /* 32 rand bits, 32 stored bits */ + ZHAOXIN_RNG_CHUNK_2 = 0x02, /* 16 rand bits, 32 stored bits */ + ZHAOXIN_RNG_CHUNK_1 = 0x03, /* 8 rand bits, 32 stored bits */ + ZHAOXIN_RNG_MAX_SIZE = (128 * 1024), +}; + +static int zhaoxin_rng_init(struct hwrng *rng) +{ + if (!boot_cpu_has(X86_FEATURE_XSTORE_EN)) { + pr_err(PFX "can't enable hardware RNG if XSTORE is not enabled\n"); + return -ENODEV; + } + + return 0; +} + +static inline int rep_xstore(size_t size, size_t factor, void *result) +{ + asm(".byte 0xf3, 0x0f, 0xa7, 0xc0" + : "=m"(*(size_t *)result), "+c"(size), "+d"(factor), "+D"(result)); + + return 0; +} + +static int zhaoxin_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) +{ + if (max > ZHAOXIN_RNG_MAX_SIZE) + max = ZHAOXIN_RNG_MAX_SIZE; + + rep_xstore(max, ZHAOXIN_RNG_CHUNK_1, data); + + return max; +} + +static struct hwrng zhaoxin_rng = { + .name = "zhaoxin", + .init = zhaoxin_rng_init, + .read = zhaoxin_rng_read, +}; + +static struct x86_cpu_id zhaoxin_rng_cpu_ids[] = { + X86_MATCH_VENDOR_FAM_FEATURE(ZHAOXIN, 6, X86_FEATURE_XSTORE, NULL), + X86_MATCH_VENDOR_FAM_FEATURE(ZHAOXIN, 7, X86_FEATURE_XSTORE, NULL), + X86_MATCH_VENDOR_FAM_FEATURE(CENTAUR, 7, X86_FEATURE_XSTORE, NULL), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_rng_cpu_ids); + +static int __init zhaoxin_rng_mod_init(void) +{ + int err; + + if (!x86_match_cpu(zhaoxin_rng_cpu_ids)) { + pr_err(PFX "The CPU isn't support XSTORE.\n"); + return -ENODEV; + } + + pr_info("Zhaoxin RNG detected\n"); + + err = hwrng_register(&zhaoxin_rng); + if (err) + pr_err(PFX "RNG registering failed (%d)\n", err); + + return err; +} +module_init(zhaoxin_rng_mod_init); + +static void __exit zhaoxin_rng_mod_exit(void) +{ + hwrng_unregister(&zhaoxin_rng); +} +module_exit(zhaoxin_rng_mod_exit); + +MODULE_DESCRIPTION("H/W RNG driver for Zhaoxin CPUs"); +MODULE_AUTHOR("YunShen@zhaoxin.com"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRIVER_VERSION); -- Gitee From 551af6ca66516529db1906cbe2bfd2c3b6cbba22 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Fri, 15 Mar 2024 11:10:04 +0800 Subject: [PATCH 0245/2138] anolis: hwmon: Add support for Zhaoxin core temperature monitoring ANBZ: #8437 Add support for the temperature sensor inside CPU. Supported are all known variants of the Zhaoxin processors. Signed-off-by: leoliu-oc Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2821 --- MAINTAINERS | 6 + drivers/hwmon/Kconfig | 13 ++ drivers/hwmon/Makefile | 1 + drivers/hwmon/via-cputemp.c | 1 - drivers/hwmon/zhaoxin-cputemp.c | 305 ++++++++++++++++++++++++++++++++ 5 files changed, 325 insertions(+), 1 deletion(-) create mode 100644 drivers/hwmon/zhaoxin-cputemp.c diff --git a/MAINTAINERS b/MAINTAINERS index 2fe7c02c3545..d9a486e6587b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -23893,6 +23893,12 @@ F: arch/x86/events/zhaoxin/core.c F: arch/x86/events/zhaoxin/uncore.c F: arch/x86/events/zhaoxin/uncore.h +ZHAOXIN TEMPERATURE MONITORING DRIVERS +M: Leoliu-oc +L: linux-hwmon@vger.kernel.org +S: Maintained +F: drivers/hwmon/zhaoxin-cputemp.c + ZONEFS FILESYSTEM M: Damien Le Moal M: Naohiro Aota diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index a4c361b6619c..d6c5eead770a 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -2165,6 +2165,19 @@ config SENSORS_VIA_CPUTEMP sensor inside your CPU. Supported are all known variants of the VIA C7 and Nano. +config SENSORS_ZHAOXIN_CPUTEMP + tristate "Zhaoxin CPU temperature sensor" + depends on X86 + default m + select HWMON_VID + help + If you say yes here you get support for the temperature + sensor inside your CPU. Supported are all known variants of + the Zhaoxin processors. + + This driver can also be built as a module. If so, the module + will be called zhaoxin-cputemp. + config SENSORS_VIA686A tristate "VIA686A" depends on PCI diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 4ac9452b5430..cab312e74d3c 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -211,6 +211,7 @@ obj-$(CONFIG_SENSORS_TMP464) += tmp464.o obj-$(CONFIG_SENSORS_TMP513) += tmp513.o obj-$(CONFIG_SENSORS_VEXPRESS) += vexpress-hwmon.o obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o +obj-$(CONFIG_SENSORS_ZHAOXIN_CPUTEMP)+= zhaoxin-cputemp.o obj-$(CONFIG_SENSORS_VIA686A) += via686a.o obj-$(CONFIG_SENSORS_VT1211) += vt1211.o obj-$(CONFIG_SENSORS_VT8231) += vt8231.o diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c index e5d18dac8ee7..0a5057dbe51a 100644 --- a/drivers/hwmon/via-cputemp.c +++ b/drivers/hwmon/via-cputemp.c @@ -273,7 +273,6 @@ static const struct x86_cpu_id __initconst cputemp_ids[] = { X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 6, X86_CENTAUR_FAM6_C7_A, NULL), X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 6, X86_CENTAUR_FAM6_C7_D, NULL), X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 6, X86_CENTAUR_FAM6_NANO, NULL), - X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, X86_MODEL_ANY, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, cputemp_ids); diff --git a/drivers/hwmon/zhaoxin-cputemp.c b/drivers/hwmon/zhaoxin-cputemp.c new file mode 100644 index 000000000000..751d2c5a868a --- /dev/null +++ b/drivers/hwmon/zhaoxin-cputemp.c @@ -0,0 +1,305 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * zhaoxin-cputemp.c - Driver for Zhaoxin CPU core temperature monitoring + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRVNAME "zhaoxin_cputemp" + +enum { SHOW_TEMP, SHOW_LABEL, SHOW_NAME, SHOW_CRIT, SHOW_MAX }; + +/* Functions declaration */ + +struct zhaoxin_cputemp_data { + struct device *hwmon_dev; + const char *name; + u32 id; + u32 msr_temp; + u32 msr_crit; + u32 msr_max; +}; + +/* Sysfs stuff */ + +static ssize_t name_show(struct device *dev, struct device_attribute *devattr, char *buf) +{ + int ret; + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + struct zhaoxin_cputemp_data *data = dev_get_drvdata(dev); + + if (attr->index == SHOW_NAME) + ret = sprintf(buf, "%s\n", data->name); + else /* show label */ + ret = sprintf(buf, "Core %d\n", data->id); + return ret; +} + +static ssize_t temp_show(struct device *dev, struct device_attribute *devattr, char *buf) +{ + struct zhaoxin_cputemp_data *data = dev_get_drvdata(dev); + u32 eax, edx; + int err; + + err = rdmsr_safe_on_cpu(data->id, data->msr_temp, &eax, &edx); + if (err) + return -EAGAIN; + + return sprintf(buf, "%lu\n", ((unsigned long)eax & 0xffffff) * 1000); +} + +static ssize_t crit_show(struct device *dev, struct device_attribute *devattr, char *buf) +{ + struct zhaoxin_cputemp_data *data = dev_get_drvdata(dev); + u32 eax, edx; + int err; + + err = rdmsr_safe_on_cpu(data->id, data->msr_crit, &eax, &edx); + if (err) + return -EAGAIN; + + return sprintf(buf, "%lu\n", ((unsigned long)eax & 0xff) * 1000); +} + +static ssize_t max_show(struct device *dev, struct device_attribute *devattr, char *buf) +{ + struct zhaoxin_cputemp_data *data = dev_get_drvdata(dev); + u32 eax, edx; + int err; + + err = rdmsr_safe_on_cpu(data->id, data->msr_max, &eax, &edx); + if (err) + return -EAGAIN; + + return sprintf(buf, "%lu\n", ((unsigned long)eax & 0xff) * 1000); +} + +static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, SHOW_TEMP); +static SENSOR_DEVICE_ATTR_RO(temp1_label, name, SHOW_LABEL); +static SENSOR_DEVICE_ATTR_RO(name, name, SHOW_NAME); +static SENSOR_DEVICE_ATTR_RO(temp1_crit, crit, SHOW_CRIT); +static SENSOR_DEVICE_ATTR_RO(temp1_max, max, SHOW_MAX); + +static struct attribute *zhaoxin_cputemp_attributes[] = { + &sensor_dev_attr_name.dev_attr.attr, + &sensor_dev_attr_temp1_label.dev_attr.attr, + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp1_crit.dev_attr.attr, + &sensor_dev_attr_temp1_max.dev_attr.attr, + NULL +}; + +static const struct attribute_group zhaoxin_cputemp_group = { + .attrs = zhaoxin_cputemp_attributes, +}; + +static int zhaoxin_cputemp_probe(struct platform_device *pdev) +{ + struct zhaoxin_cputemp_data *data; + int err; + u32 eax, edx; + struct cpuinfo_x86 *c = &cpu_data(pdev->id); + + data = devm_kzalloc(&pdev->dev, sizeof(struct zhaoxin_cputemp_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->id = pdev->id; + data->name = "zhaoxin_cputemp"; + data->msr_temp = 0x1423; + if (c->x86_model == 0x6b) { + data->msr_crit = 0x175b; + data->msr_max = 0x175a; + } else { + data->msr_crit = 0x1416; + data->msr_max = 0x1415; + } + + /* test if we can access the TEMPERATURE MSR */ + err = rdmsr_safe_on_cpu(data->id, data->msr_temp, &eax, &edx); + if (err) { + dev_err(&pdev->dev, "Unable to access TEMPERATURE MSR, giving up\n"); + return err; + } + + platform_set_drvdata(pdev, data); + + err = sysfs_create_group(&pdev->dev.kobj, &zhaoxin_cputemp_group); + if (err) + return err; + + data->hwmon_dev = hwmon_device_register_for_thermal(&pdev->dev, data->name, data); + if (IS_ERR(data->hwmon_dev)) { + err = PTR_ERR(data->hwmon_dev); + dev_err(&pdev->dev, "Class registration failed (%d)\n", err); + goto exit_remove; + } + + return 0; + +exit_remove: + sysfs_remove_group(&pdev->dev.kobj, &zhaoxin_cputemp_group); + return err; +} + +static int zhaoxin_cputemp_remove(struct platform_device *pdev) +{ + struct zhaoxin_cputemp_data *data = platform_get_drvdata(pdev); + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&pdev->dev.kobj, &zhaoxin_cputemp_group); + return 0; +} + +static struct platform_driver zhaoxin_cputemp_driver = { + .driver = { + .name = DRVNAME, + }, + .probe = zhaoxin_cputemp_probe, + .remove = zhaoxin_cputemp_remove, +}; + +struct pdev_entry { + struct list_head list; + struct platform_device *pdev; + unsigned int cpu; +}; + +static LIST_HEAD(pdev_list); +static DEFINE_MUTEX(pdev_list_mutex); + +static int zhaoxin_cputemp_online(unsigned int cpu) +{ + int err; + struct platform_device *pdev; + struct pdev_entry *pdev_entry; + + pdev = platform_device_alloc(DRVNAME, cpu); + if (!pdev) { + err = -ENOMEM; + pr_err("Device allocation failed\n"); + goto exit; + } + + pdev_entry = kzalloc(sizeof(struct pdev_entry), GFP_KERNEL); + if (!pdev_entry) { + err = -ENOMEM; + goto exit_device_put; + } + + err = platform_device_add(pdev); + if (err) { + pr_err("Device addition failed (%d)\n", err); + goto exit_device_free; + } + + pdev_entry->pdev = pdev; + pdev_entry->cpu = cpu; + mutex_lock(&pdev_list_mutex); + list_add_tail(&pdev_entry->list, &pdev_list); + mutex_unlock(&pdev_list_mutex); + + return 0; + +exit_device_free: + kfree(pdev_entry); +exit_device_put: + platform_device_put(pdev); +exit: + return err; +} + +static int zhaoxin_cputemp_down_prep(unsigned int cpu) +{ + struct pdev_entry *p; + + mutex_lock(&pdev_list_mutex); + list_for_each_entry(p, &pdev_list, list) { + if (p->cpu == cpu) { + platform_device_unregister(p->pdev); + list_del(&p->list); + mutex_unlock(&pdev_list_mutex); + kfree(p); + return 0; + } + } + mutex_unlock(&pdev_list_mutex); + return 0; +} + +static const struct x86_cpu_id cputemp_ids[] __initconst = { + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, 0x3b, NULL), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, 0x3b, NULL), + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, 0x5b, NULL), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, 0x5b, NULL), + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, 0x6b, NULL), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, 0x6b, NULL), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, cputemp_ids); + +static enum cpuhp_state zhaoxin_temp_online; + +static int __init zhaoxin_cputemp_init(void) +{ + int err; + + if (!x86_match_cpu(cputemp_ids)) + return -ENODEV; + + err = platform_driver_register(&zhaoxin_cputemp_driver); + if (err) + goto exit; + + err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hwmon/zhaoxin:online", + zhaoxin_cputemp_online, zhaoxin_cputemp_down_prep); + if (err < 0) + goto exit_driver_unreg; + + zhaoxin_temp_online = err; + +#ifndef CONFIG_HOTPLUG_CPU + if (list_empty(&pdev_list)) { + err = -ENODEV; + goto exit_hp_unreg; + } +#endif + return 0; + +#ifndef CONFIG_HOTPLUG_CPU +exit_hp_unreg: + cpuhp_remove_state_nocalls(zhaoxin_temp_online); +#endif +exit_driver_unreg: + platform_driver_unregister(&zhaoxin_cputemp_driver); +exit: + return err; +} + +static void __exit zhaoxin_cputemp_exit(void) +{ + cpuhp_remove_state(zhaoxin_temp_online); + platform_driver_unregister(&zhaoxin_cputemp_driver); +} + +MODULE_DESCRIPTION("Zhaoxin CPU temperature monitor"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(HWMON_THERMAL); + +module_init(zhaoxin_cputemp_init) +module_exit(zhaoxin_cputemp_exit) -- Gitee From df1b3194aaad61c6895c8036e5a79284042eb823 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Tue, 5 Mar 2024 18:21:30 +0800 Subject: [PATCH 0246/2138] anolis: configs: enabled zhaoxin-cputemp as module ANBZ: #8437 Set zhaoxin cputemp driver as module: CONFIG_SENSORS_ZHAOXIN_CPUTEMP=m Signed-off-by: leoliu-oc Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2821 --- arch/x86/configs/anolis_defconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 3bafc6bbc1fe..38b641e976d0 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -4232,6 +4232,7 @@ CONFIG_SENSORS_TMP421=m # CONFIG_SENSORS_TMP464 is not set # CONFIG_SENSORS_TMP513 is not set CONFIG_SENSORS_VIA_CPUTEMP=m +CONFIG_SENSORS_ZHAOXIN_CPUTEMP=m CONFIG_SENSORS_VIA686A=m CONFIG_SENSORS_VT1211=m CONFIG_SENSORS_VT8231=m -- Gitee From 4ebedd2c326eaf3ecbdc33cf78ff2d159076c6f8 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 4 Mar 2024 10:12:17 +0800 Subject: [PATCH 0247/2138] anolis: USB:Fix kernel NULL pointer when unbind UHCI form vfio-pci ANBZ: #7809 This bug is found in Zhaoxin platform, but it's a commom code bug. Fail sequence: step1: Unbind UHCI controller from native driver; step2: Bind UHCI controller to vfio-pci, which will put UHCI controller in one vfio group's device list and set UHCI's dev->driver_data to struct vfio-pci(for UHCI) step3: Unbind EHCI controller from native driver, will try to tell UHCI native driver that "I'm removed by set companion_hcd->self.hs_companion to NULL. However, companion_hcd get from UHCI's dev->driver_data that has modified by vfio-pci already. So, the vfio-pci structure will be damaged! step4: Bind EHCI controller to vfio-pci driver, which will put EHCI controller in the same vfio group as UHCI controller; ... ... step5: Unbind UHCI controller from vfio-pci, which will delete UHCI from vfio group device list that has been damaged in step 3. So, delete operation can random result into a NULL pointer dereference with the below stack dump. step6: Bind UHCI controller to native driver; step7: Unbind EHCI controller from vfio-pci, which will try to remove EHCI controller from the vfio group; step8: Bind EHCI controller to native driver; [ 929.114641] uhci_hcd 0000:00:10.0: remove, state 1 [ 929.114652] usb usb1: USB disconnect, device number 1 [ 929.114655] usb 1-1: USB disconnect, device number 2 [ 929.270313] usb 1-2: USB disconnect, device number 3 [ 929.318404] uhci_hcd 0000:00:10.0: USB bus 1 deregistered [ 929.343029] uhci_hcd 0000:00:10.1: remove, state 4 [ 929.343045] usb usb3: USB disconnect, device number 1 [ 929.343685] uhci_hcd 0000:00:10.1: USB bus 3 deregistered [ 929.369087] ehci-pci 0000:00:10.7: remove, state 4 [ 929.369102] usb usb4: USB disconnect, device number 1 [ 929.370325] ehci-pci 0000:00:10.7: USB bus 4 deregistered [ 932.398494] BUG: unable to handle kernel NULL pointer dereference at 0000000000000000 [ 932.398496] PGD 42a67d067 P4D 42a67d067 PUD 42a65f067 PMD 0 [ 932.398502] Oops: 0002 [#2] SMP NOPTI [ 932.398505] CPU: 2 PID: 7824 Comm: vfio_unbind.sh Tainted: P D 4.19.65-2020051917-rainos #1 [ 932.398506] Hardware name: Shanghai Zhaoxin Semiconductor Co., Ltd. HX002EH/HX002EH, BIOS HX002EH0_01_R480_R_200408 04/08/2020 [ 932.398513] RIP: 0010:vfio_device_put+0x31/0xa0 [vfio] [ 932.398515] Code: 89 e5 41 54 53 4c 8b 67 18 48 89 fb 49 8d 74 24 30 e8 e3 0e f3 de 84 c0 74 67 48 8b 53 20 48 8b 43 28 48 8b 7b 18 48 89 42 08 <48> 89 10 48 b8+G26 00 01 00 00 00 00 ad de 48 89 43 20 48 b8 00 02 00 [ 932.398516] RSP: 0018:ffffbbfd04cffc18 EFLAGS: 00010202 [ 932.398518] RAX: 0000000000000000 RBX: ffff92c7ea717880 RCX: 0000000000000000 [ 932.398519] RDX: ffff92c7ea713620 RSI: ffff92c7ea713630 RDI: ffff92c7ea713600 [ 932.398521] RBP: ffffbbfd04cffc28 R08: ffff92c7f02a8080 R09: ffff92c7efc03980 [ 932.398522] R10: ffffbbfd04cff9a8 R11: 0000000000000000 R12: ffff92c7ea713600 [ 932.398523] R13: ffff92c7ed8bb0a8 R14: ffff92c7ea717880 R15: 0000000000000000 [ 932.398525] FS: 00007f3031500740(0000) GS:ffff92c7f0280000(0000) knlGS:0000000000000000 [ 932.398526] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 932.398527] CR2: 0000000000000000 CR3: 0000000428626004 CR4: 0000000000160ee0 [ 932.398528] Call Trace: [ 932.398534] vfio_del_group_dev+0xe8/0x2a0 [vfio] [ 932.398539] ? __blocking_notifier_call_chain+0x52/0x60 [ 932.398542] ? do_wait_intr_irq+0x90/0x90 [ 932.398546] ? iommu_bus_notifier+0x75/0x100 [ 932.398551] vfio_pci_remove+0x20/0xa0 [vfio_pci] [ 932.398554] pci_device_remove+0x3e/0xc0 [ 932.398557] device_release_driver_internal+0x17a/0x240 [ 932.398560] device_release_driver+0x12/0x20 [ 932.398561] unbind_store+0xee/0x180 [ 932.398564] drv_attr_store+0x27/0x40 [ 932.398567] sysfs_kf_write+0x3c/0x50 [ 932.398568] kernfs_fop_write+0x125/0x1a0 [ 932.398572] __vfs_write+0x3a/0x190 [ 932.398575] ? apparmor_file_permission+0x1a/0x20 [ 932.398577] ? security_file_permission+0x3b/0xc0 [ 932.398581] ? _cond_resched+0x1a/0x50 [ 932.398582] vfs_write+0xb8/0x1b0 [ 932.398584] ksys_write+0x5c/0xe0 [ 932.398586] __x64_sys_write+0x1a/0x20 [ 932.398589] do_syscall_64+0x5a/0x110 [ 932.398592] entry_SYSCALL_64_after_hwframe+0x44/0xa9 Using virt-manager/qemu to boot guest os, we can see the same fail sequence! Fix this by determine whether the PCI Driver of the USB controller is a kernel native driver. If not, do not let it modify UHCI's dev->driver_data. Signed-off-by: leoliu-oc Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2693 --- drivers/usb/core/hcd-pci.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index 990280688b25..df8f91e6a2c7 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c @@ -48,6 +48,9 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd, struct pci_dev *companion; struct usb_hcd *companion_hcd; unsigned int slot = PCI_SLOT(pdev->devfn); +#if IS_ENABLED(CONFIG_X86) + struct pci_driver *drv; +#endif /* * Iterate through other PCI functions in the same slot. @@ -60,6 +63,18 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd, PCI_SLOT(companion->devfn) != slot) continue; +#if IS_ENABLED(CONFIG_X86) + if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) { + drv = companion->driver; + if (drv && + strncmp(drv->name, "uhci_hcd", sizeof("uhci_hcd") - 1) && + strncmp(drv->name, "ohci-pci", sizeof("ohci-pci") - 1) && + strncmp(drv->name, "ehci-pci", sizeof("ehci-pci") - 1)) + continue; + } +#endif + /* * Companion device should be either UHCI,OHCI or EHCI host * controller, otherwise skip. -- Gitee From e3d9f44af5c2999481be355897a9cb9915bd1912 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Tue, 2 Jan 2024 15:09:56 +0800 Subject: [PATCH 0248/2138] anolis: rtc: Fix set RTC time delay 500ms on some Zhaoxin SOCs ANBZ: #7809 When the RTC divider is changed from reset to an operating time base, the first update cycle should be 500ms later. But on some Zhaoxin SOCs, this first update cycle is one second later. So set RTC time on these Zhaoxin SOCs will causing 500ms delay. Skip setup RTC divider on these SOCs in mc146818_set_time to fix it. Signed-off-by: leoliu-oc Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2696 --- drivers/rtc/rtc-mc146818-lib.c | 31 ++++++++++++++++++++++++------- 1 file changed, 24 insertions(+), 7 deletions(-) diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c index 651bf3c279c7..6b5947ec6e55 100644 --- a/drivers/rtc/rtc-mc146818-lib.c +++ b/drivers/rtc/rtc-mc146818-lib.c @@ -11,6 +11,21 @@ #define UIP_RECHECK_DELAY 100 /* usec */ #define UIP_RECHECK_DELAY_MS (USEC_PER_MSEC / UIP_RECHECK_DELAY) #define UIP_RECHECK_LOOPS_MS(x) (x / UIP_RECHECK_DELAY_MS) +#ifdef CONFIG_X86 +static inline bool follow_mc146818_divider_reset(void) +{ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) && + (boot_cpu_data.x86 <= 7 && boot_cpu_data.x86_model <= 59)) + return false; + return true; +} +#else +static inline bool follow_mc146818_divider_reset(void) +{ + return true; +} +#endif /* * Execute a function while the UIP (Update-in-progress) bit of the RTC is @@ -280,12 +295,13 @@ int mc146818_set_time(struct rtc_time *time) spin_lock_irqsave(&rtc_lock, flags); save_control = CMOS_READ(RTC_CONTROL); CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); - save_freq_select = CMOS_READ(RTC_FREQ_SELECT); - if (apply_amd_register_a_behavior()) - CMOS_WRITE((save_freq_select & ~RTC_AMD_BANK_SELECT), RTC_FREQ_SELECT); - else - CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); - + if (follow_mc146818_divider_reset()) { + save_freq_select = CMOS_READ(RTC_FREQ_SELECT); + if (apply_amd_register_a_behavior()) + CMOS_WRITE((save_freq_select & ~RTC_AMD_BANK_SELECT), RTC_FREQ_SELECT); + else + CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); + } #ifdef CONFIG_MACH_DECSTATION CMOS_WRITE(real_yrs, RTC_DEC_YEAR); #endif @@ -302,7 +318,8 @@ int mc146818_set_time(struct rtc_time *time) #endif CMOS_WRITE(save_control, RTC_CONTROL); - CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); + if (follow_mc146818_divider_reset()) + CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); spin_unlock_irqrestore(&rtc_lock, flags); -- Gitee From bf7ebcf1206a04f2dd48438e0ad232b5a58dbdde Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Wed, 27 Dec 2023 21:05:05 +0800 Subject: [PATCH 0249/2138] anolis: Turning off Zhaoxin ahci controller runtime pm from sysfs ANBZ: #7809 There exits some problems with enabling ahci controller runtime pm on various Zhaoxin platforms. For some Zhaoxin CPUs, link into listen mode, and PHY into P2, which will cause the device to continuously send comminit. When the controller is in D3, the comminit will continuously wake up the controller, resulting in the controller being unable to stabilize at D3; For another Zhaoxin CPUs, after entering and exiting runtime pm for a certain number of times, ahci controller reset accumulates for many times, which will make ahci and PXPTRF P2CW think that the other party can no longer receive requests and block the P2CW path, resulting in failure to recognize the device; There are alse some Zhaoxin CPUs, the test is normal, but considering that the server platform is not sensitive to power consumption, and this part has very little impact on power consumption; In summary, it is recommended to close the zx ahci controller runtime pm. Signed-off-by: leoliu-oc Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2698 --- drivers/base/power/sysfs.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index a1474fb67db9..525574c312d3 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -9,6 +9,7 @@ #include #include #include +#include #include "power.h" /* @@ -108,7 +109,19 @@ static ssize_t control_show(struct device *dev, struct device_attribute *attr, static ssize_t control_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t n) { + struct pci_dev *pdev = (!dev || !dev_is_pci(dev)) ? NULL : to_pci_dev(dev); + device_lock(dev); + + /* Zhaoxin sata controller may occur error when resume from runtime pm, so disable it */ + if (pdev && + pdev->vendor == PCI_VENDOR_ID_ZHAOXIN && + pdev->device == 0x9083 && + pdev->revision <= 0x20) { + device_unlock(dev); + return -EPERM; + } + if (sysfs_streq(buf, ctrl_auto)) pm_runtime_allow(dev); else if (sysfs_streq(buf, ctrl_on)) -- Gitee From 3c2ca6561d9fb6031e13a261d34458646be4d502 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 5 Feb 2024 16:09:33 +0800 Subject: [PATCH 0250/2138] anolis: Add support Zhaoxin GPIO pinctrl ANBZ: #7809 Implements gpio interrupt and gpio management functions and provides standard pinctrl and gpio interfaces. Signed-off-by: leoliu-oc Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2709 --- drivers/pinctrl/Kconfig | 1 + drivers/pinctrl/Makefile | 1 + drivers/pinctrl/zhaoxin/Kconfig | 28 + drivers/pinctrl/zhaoxin/Makefile | 4 + drivers/pinctrl/zhaoxin/pinctrl-kx7000.c | 354 ++++++++++ drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.c | 758 ++++++++++++++++++++++ drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.h | 136 ++++ 7 files changed, 1282 insertions(+) create mode 100644 drivers/pinctrl/zhaoxin/Kconfig create mode 100644 drivers/pinctrl/zhaoxin/Makefile create mode 100644 drivers/pinctrl/zhaoxin/pinctrl-kx7000.c create mode 100644 drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.c create mode 100644 drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.h diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index 7dfb7190580e..79753411b778 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -512,6 +512,7 @@ source "drivers/pinctrl/berlin/Kconfig" source "drivers/pinctrl/cirrus/Kconfig" source "drivers/pinctrl/freescale/Kconfig" source "drivers/pinctrl/intel/Kconfig" +source "drivers/pinctrl/zhaoxin/Kconfig" source "drivers/pinctrl/mediatek/Kconfig" source "drivers/pinctrl/meson/Kconfig" source "drivers/pinctrl/mvebu/Kconfig" diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile index dd6cda270294..4275eca92488 100644 --- a/drivers/pinctrl/Makefile +++ b/drivers/pinctrl/Makefile @@ -58,6 +58,7 @@ obj-$(CONFIG_PINCTRL_BERLIN) += berlin/ obj-y += cirrus/ obj-y += freescale/ obj-$(CONFIG_X86) += intel/ +obj-$(CONFIG_X86) += zhaoxin/ obj-y += mediatek/ obj-$(CONFIG_PINCTRL_MESON) += meson/ obj-y += mvebu/ diff --git a/drivers/pinctrl/zhaoxin/Kconfig b/drivers/pinctrl/zhaoxin/Kconfig new file mode 100644 index 000000000000..65f95ca80d5c --- /dev/null +++ b/drivers/pinctrl/zhaoxin/Kconfig @@ -0,0 +1,28 @@ +# SPDX-License-Identifier: GPL-2.0 +# Intel pin control drivers + +if (X86 || COMPILE_TEST) + +config PINCTRL_ZHAOXIN + tristate + select PINMUX + select PINCONF + select GENERIC_PINCONF + select GPIOLIB + select GPIOLIB_IRQCHIP + +config PINCTRL_KX7000 + tristate "Zhaoxin KX7000 pinctrl and GPIO driver" + depends on ACPI && X86 + default m + select PINCTRL_ZHAOXIN + help + This pinctrl driver provides an interface that allows configuring + of Zhaoxin KX7000 chipset pins and using them as GPIOs. + + To compile this driver as a module, choose M here: the + module will be called pinctrl-kx7000. + + If unsure, say Y. + +endif diff --git a/drivers/pinctrl/zhaoxin/Makefile b/drivers/pinctrl/zhaoxin/Makefile new file mode 100644 index 000000000000..a3acfa66f196 --- /dev/null +++ b/drivers/pinctrl/zhaoxin/Makefile @@ -0,0 +1,4 @@ +# zhaoxin pin control drivers + +obj-$(CONFIG_PINCTRL_ZHAOXIN) += pinctrl-zhaoxin.o +obj-$(CONFIG_PINCTRL_KX7000) += pinctrl-kx7000.o diff --git a/drivers/pinctrl/zhaoxin/pinctrl-kx7000.c b/drivers/pinctrl/zhaoxin/pinctrl-kx7000.c new file mode 100644 index 000000000000..f249dd369e7c --- /dev/null +++ b/drivers/pinctrl/zhaoxin/pinctrl-kx7000.c @@ -0,0 +1,354 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * zhaoxin KX7000 pinctrl/GPIO driver + * + * Copyright(c) 2023 Shanghai Zhaoxin Corporation. All rights reserved. + * + */ + +#define DRIVER_VERSION "1.0.0" + +#include +#include +#include + +#include + +#include "pinctrl-zhaoxin.h" + +#define ZX_CAL_ARRAY(a, b) \ +{ \ + .pmio_offset = (a), \ + .size = (b), \ +} + +#define PMIO_RX90 100 +#define PMIO_RX8C 200 + +#define ZX_CAL_INDEX_ARRAY(a, b, c) \ +{ \ + .reg_port_base = (PMIO_RX90), \ + .reg_data_base = (PMIO_RX8C), \ + .index = (a), \ + .cal_array = (b), \ + .size = (c), \ +} + +/* kx7000 pin define */ +static const struct pinctrl_pin_desc kx7000_pins[] = { + + PINCTRL_PIN(0, "IOD_CPUTCK"), + PINCTRL_PIN(1, "IOD_CPUTMS"), + PINCTRL_PIN(2, "IOD_CPUTRST"), + PINCTRL_PIN(3, "IOD_CPUTDO"), + PINCTRL_PIN(4, "IOD_CPUTDI"), + PINCTRL_PIN(5, "IOD_ZLSCLK0"), + PINCTRL_PIN(6, "IOD_ZLDATA0"), + PINCTRL_PIN(7, "IOD_ZLSCLK1"), + PINCTRL_PIN(8, "IOD_ZLDATA1"), + PINCTRL_PIN(9, "IOD_CLK27M"), + PINCTRL_PIN(10, "IOD_CPURST"), + PINCTRL_PIN(11, "IOD_PWORK"), + PINCTRL_PIN(12, "IOD_RSMRST"), + PINCTRL_PIN(13, "IOD_THRMTRIP"), + //GPIO range 0 + PINCTRL_PIN(14, "USBHOC0"), + PINCTRL_PIN(15, "USBHOC1"), + PINCTRL_PIN(16, "USBHOC2"), + PINCTRL_PIN(17, "USBHOC3"), + PINCTRL_PIN(18, "USBHOC4"), + PINCTRL_PIN(19, "USBHOC5"), + PINCTRL_PIN(20, "USBHOC6"), + PINCTRL_PIN(21, "USBHOC7"), + //gpio range 1 + PINCTRL_PIN(22, "USB4SBTX0"), + PINCTRL_PIN(23, "USB4SBRX0"), + PINCTRL_PIN(24, "USB4SBTX1"), + PINCTRL_PIN(25, "USB4SBRX1"), + //gpio range 2 + PINCTRL_PIN(26, "I2C1DT"), + PINCTRL_PIN(27, "I2C1CK"), + PINCTRL_PIN(28, "I2C1INT"), + //gpio range 3 + PINCTRL_PIN(29, "I2C2DT"), + PINCTRL_PIN(30, "I2C2CK"), + //gpio range 4 + PINCTRL_PIN(31, "I2C2INT"), + //gpio range 5 + PINCTRL_PIN(32, "SMBDT1"), + PINCTRL_PIN(33, "SMBCK1"), + PINCTRL_PIN(34, "SMBDT2"), + PINCTRL_PIN(35, "SMBCK2"), + PINCTRL_PIN(36, "SMBALRT"), + //gpio range 6 + PINCTRL_PIN(37, "SME_I2CDT"), + PINCTRL_PIN(38, "SME_I2CCK"), + //gpio range 7 + PINCTRL_PIN(39, "PWM"), + PINCTRL_PIN(40, "TACH"), + //gpio range 8 + PINCTRL_PIN(41, "GPIO0"), + PINCTRL_PIN(42, "GPIO1"), + PINCTRL_PIN(43, "GPIO2"), + PINCTRL_PIN(44, "GPIO3"), + PINCTRL_PIN(45, "GPIO4"), + PINCTRL_PIN(46, "GPIO5"), + PINCTRL_PIN(47, "GPIO6"), + PINCTRL_PIN(48, "GPIO7"), + PINCTRL_PIN(49, "GPIO8"), + PINCTRL_PIN(50, "GPIO9"), + PINCTRL_PIN(51, "LPCCLK"), + PINCTRL_PIN(52, "LPCDRQ1"), + //gpio range 9 + PINCTRL_PIN(53, "LPCDRQ0"), + PINCTRL_PIN(54, "LPCFRAME"), + PINCTRL_PIN(55, "LPCAD3"), + PINCTRL_PIN(56, "LPCAD2"), + PINCTRL_PIN(57, "LPCAD1"), + PINCTRL_PIN(58, "LPCAD0"), + //gpio range 10 + PINCTRL_PIN(59, "SERIRQ"), + PINCTRL_PIN(60, "AZRST"), + PINCTRL_PIN(61, "AZBITCLK"), + PINCTRL_PIN(62, "AZSDIN0"), + PINCTRL_PIN(63, "AZSDIN1"), + PINCTRL_PIN(64, "AZSDOUT"), + PINCTRL_PIN(65, "AZSYNC"), + //gpio range 11 + PINCTRL_PIN(66, "I2S1_SCLK"), + PINCTRL_PIN(67, "I2S1_TXD"), + PINCTRL_PIN(68, "I2S1_WS"), + PINCTRL_PIN(69, "I2S1_MCLK"), + //gpio range 12 + PINCTRL_PIN(70, "I2S1_RXD"), + //gpio range 13 + PINCTRL_PIN(71, "I2S1_INT"), + PINCTRL_PIN(72, "MSPIDI"), + PINCTRL_PIN(73, "MSPIDO"), + PINCTRL_PIN(74, "MSPIIO2"), + PINCTRL_PIN(75, "MSPIIO3"), + PINCTRL_PIN(76, "MSPICLK"), + PINCTRL_PIN(77, "MSPISS0"), + //gpio range 14 + PINCTRL_PIN(78, "MSPISS1"), + PINCTRL_PIN(79, "MSPISS2"), + //gpio range 15 + PINCTRL_PIN(80, "SPIDEVINT"), + PINCTRL_PIN(81, "BIOSSEL"), + //gpio range 16 + PINCTRL_PIN(82, "THRM"), + PINCTRL_PIN(83, "PEXWAKE"), + PINCTRL_PIN(84, "PWRBTN"), + //gpio range 17 + PINCTRL_PIN(85, "SPKR"), + PINCTRL_PIN(86, "PME"), + //gpio range 18 + PINCTRL_PIN(87, "BATLOW"), + PINCTRL_PIN(88, "EXTSMI"), + PINCTRL_PIN(89, "SUSA"), + PINCTRL_PIN(90, "SUSB"), + PINCTRL_PIN(91, "SUSC"), + PINCTRL_PIN(92, "GPWAKE"), + PINCTRL_PIN(93, "RING"), + PINCTRL_PIN(94, "LID"), + PINCTRL_PIN(95, "SLPS0"), + PINCTRL_PIN(96, "PCIRST"), + PINCTRL_PIN(97, "SVID_VREN"), + //gpio range 19 + PINCTRL_PIN(98, "INTRUDER"), + //gpio range 20 + PINCTRL_PIN(99, "GFX_I2CCLK0"), + PINCTRL_PIN(100, "GFX_I2CDAT0"), + PINCTRL_PIN(101, "GFX_I2CCLK1"), + PINCTRL_PIN(102, "GFX_I2CDAT1"), + PINCTRL_PIN(103, "GFX_I2CCLK2"), + PINCTRL_PIN(104, "GFX_I2CDAT2"), + PINCTRL_PIN(105, "GFX_I2CCLK3"), + PINCTRL_PIN(106, "GFX_I2CDAT3"), + PINCTRL_PIN(107, "GFX_GPIO0"), + PINCTRL_PIN(108, "GFX_GPIO1"), + PINCTRL_PIN(109, "GFX_GPIO2"), + PINCTRL_PIN(110, "GFX_GPIO3"), + PINCTRL_PIN(111, "CRTHSYNC"), + PINCTRL_PIN(112, "CRTVSYNC"), +}; + +#define NOT_DEFINE -30000 + +static int calibrate_int[] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 63, 64, 65, 66, 67, 68, + 69, 70, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62 +}; + +static int calibrate_sattus[] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 63, 64, 65, 66, 67, 68, + 69, 70, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62 +}; + +static const struct reg_cal_array kx7000_int_cal[] = { + ZX_CAL_ARRAY(0x58, 16), + ZX_CAL_ARRAY(0x5A, 2), + ZX_CAL_ARRAY(0xDA, 16), + ZX_CAL_ARRAY(0xDE, 16), +}; + +static const struct reg_calibrate int_cal[] = { + { + .reg = kx7000_int_cal, + .reg_cal_size = ARRAY_SIZE(kx7000_int_cal), + .cal_array = calibrate_int, + .size = ARRAY_SIZE(calibrate_int), + } +}; + +static const struct reg_cal_array kx7000_status_cal[] = { + ZX_CAL_ARRAY((0x8), 16), + ZX_CAL_ARRAY((0xE), 2), + ZX_CAL_ARRAY((0xA), 16), + ZX_CAL_ARRAY((0xC), 16), +}; + +static const struct reg_calibrate status_cal[] = { + { + .reg = kx7000_status_cal, + .reg_cal_size = ARRAY_SIZE(kx7000_status_cal), + .cal_array = calibrate_sattus, + .size = ARRAY_SIZE(calibrate_sattus), + } +}; + +static const struct reg_cal_array kx7000_mod_sel_cal[] = { + ZX_CAL_ARRAY((0x0), 16), + ZX_CAL_ARRAY((0x6), 2), + ZX_CAL_ARRAY((0x2), 16), + ZX_CAL_ARRAY((0x4), 16), +}; + +static const struct reg_calibrate mod_sel_cal[] = { + { + .reg = kx7000_mod_sel_cal, + .reg_cal_size = ARRAY_SIZE(kx7000_mod_sel_cal), + .cal_array = calibrate_sattus, + .size = ARRAY_SIZE(calibrate_sattus), + } +}; + +static const struct index_cal_array kx7000_gpio_in_cal[] = { + ZX_CAL_INDEX_ARRAY(0x98, NULL, 71), +}; + +static const struct index_cal_array kx7000_gpio_out_cal[] = { + ZX_CAL_INDEX_ARRAY(0x90, NULL, 71), +}; + +static int calibrate_trigger[] = { + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 18, 19, + 20, 21, 22, 23, + 24, 25, 26, 27, + 28, 29, 30, 31, + 32, 33, 34, 35, + 36, 50, 51, 52, + 53, 54, 55, 56, + 57, 58, 59, 60, + 61, 62, 63, 64, + 65, 66, 67, 68, + 69, 70 +}; + +static const struct index_cal_array kx7000_trigger_cal[] = { + ZX_CAL_INDEX_ARRAY(0xA0, calibrate_trigger, 50), +}; + +static const struct zhaoxin_pin_topology kx7000_pin_topologys[] = { + { + .int_cal = int_cal, + .status_cal = status_cal, + .mod_sel_cal = mod_sel_cal, + .gpio_in_cal = kx7000_gpio_in_cal, + .gpio_out_cal = kx7000_gpio_out_cal, + .trigger_cal = kx7000_trigger_cal, + } +}; + +#define KX7000_GPP(s, e, g) \ +{ \ + .zhaoxin_range_pin_base = (s), \ + .zhaoxin_range_pin_size = ((e) - (s) + 1), \ + .zhaoxin_range_gpio_base = (g), \ +} + +static const struct zhaoxin_pin_map2_gpio kx7000_pinmap_gpps[] = { + KX7000_GPP(0, 13, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(14, 19, 10), + KX7000_GPP(20, 21, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(22, 25, 65), + KX7000_GPP(26, 28, 43), + KX7000_GPP(29, 30, 41), + KX7000_GPP(31, 31, 49), + KX7000_GPP(32, 36, 16), + KX7000_GPP(37, 38, 69), + KX7000_GPP(39, 40, 67), + KX7000_GPP(41, 50, 0), + KX7000_GPP(51, 52, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(53, 53, 39), + KX7000_GPP(54, 58, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(59, 59, 40), + KX7000_GPP(60, 65, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(66, 69, 35), + KX7000_GPP(70, 70, 46), + KX7000_GPP(71, 71, 64), + KX7000_GPP(72, 77, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(78, 78, 50), + KX7000_GPP(79, 79, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(80, 80, 51), + KX7000_GPP(81, 81, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(82, 82, 52), + KX7000_GPP(83, 84, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(85, 85, 53), + KX7000_GPP(86, 86, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(87, 95, 54), + KX7000_GPP(96, 97, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(98, 98, 63), + KX7000_GPP(99, 112, 21), +}; + +static const struct zhaoxin_pinctrl_soc_data kx7000_soc_data = { + .pins = kx7000_pins, + .npins = ARRAY_SIZE(kx7000_pins), + .pin_topologys = kx7000_pin_topologys, + .zhaoxin_pin_maps = kx7000_pinmap_gpps, + .pin_map_size = ARRAY_SIZE(kx7000_pinmap_gpps), +}; + +static const struct acpi_device_id kx7000_pinctrl_acpi_match[] = { + { "KX8344B", (kernel_ulong_t)&kx7000_soc_data }, + { } +}; +MODULE_DEVICE_TABLE(acpi, kx7000_pinctrl_acpi_match); + +static const struct dev_pm_ops kx7000_pinctrl_pm_ops = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(zhaoxin_pinctrl_suspend_noirq, zhaoxin_pinctrl_resume_noirq) +}; + +static struct platform_driver kx7000_pinctrl_driver = { + .probe = zhaoxin_pinctrl_probe_by_hid, + .driver = { + .name = "kx7000-pinctrl", + .acpi_match_table = kx7000_pinctrl_acpi_match, + .pm = &kx7000_pinctrl_pm_ops, + }, +}; + +module_platform_driver(kx7000_pinctrl_driver); + +MODULE_AUTHOR("www.zhaoxin.com"); +MODULE_DESCRIPTION("Shanghai Zhaoxin pinctrl driver"); +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.c b/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.c new file mode 100644 index 000000000000..1e434869d3dd --- /dev/null +++ b/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.c @@ -0,0 +1,758 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * zhaoxin pinctrl common code + * + * Copyright(c) 2021 Shanghai Zhaoxin Corporation. All rights reserved. + * + */ + +#define DRIVER_VERSION "1.0.0" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "../core.h" +#include "pinctrl-zhaoxin.h" + +static int pin_to_hwgpio(struct pinctrl_gpio_range *range, unsigned int pin) +{ + int offset = 0; + + if (range->pins) { + for (offset = 0; offset < range->npins; offset++) + if (pin == range->pins[offset]) + break; + return range->base+offset-range->gc->base; + } else + return pin-range->pin_base+range->base-range->gc->base; +} + +static u16 zx_pad_read16(struct zhaoxin_pinctrl *pctrl, u8 index) +{ + outb(index, pctrl->pmio_rx90+pctrl->pmio_base); + return inw(pctrl->pmio_rx8c+pctrl->pmio_base); +} + +static void zx_pad_write16(struct zhaoxin_pinctrl *pctrl, u8 index, u16 value) +{ + outb(index, pctrl->pmio_rx90+pctrl->pmio_base); + outw(value, pctrl->pmio_rx8c+pctrl->pmio_base); +} + +static int zhaoxin_get_groups_count(struct pinctrl_dev *pctldev) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + return pctrl->soc->ngroups; +} + +static const char *zhaoxin_get_group_name(struct pinctrl_dev *pctldev, unsigned int group) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + return pctrl->soc->groups[group].name; +} + +static int zhaoxin_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group, + const unsigned int **pins, unsigned int *npins) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + *pins = pctrl->soc->groups[group].pins; + *npins = pctrl->soc->groups[group].npins; + + return 0; +} + +static void zhaoxin_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned int pin) +{ + +} + +static const struct pinctrl_ops zhaoxin_pinctrl_ops = { + .get_groups_count = zhaoxin_get_groups_count, + .get_group_name = zhaoxin_get_group_name, + .get_group_pins = zhaoxin_get_group_pins, + .pin_dbg_show = zhaoxin_pin_dbg_show, +}; + +static int zhaoxin_get_functions_count(struct pinctrl_dev *pctldev) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + return pctrl->soc->nfunctions; +} + +static const char *zhaoxin_get_function_name(struct pinctrl_dev *pctldev, unsigned int function) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + return pctrl->soc->functions[function].name; +} + +static int zhaoxin_get_function_groups(struct pinctrl_dev *pctldev, unsigned int function, + const char * const **groups, unsigned int *const ngroups) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + *groups = pctrl->soc->functions[function].groups; + *ngroups = pctrl->soc->functions[function].ngroups; + + return 0; +} + +static int zhaoxin_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function, + unsigned int group) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + dev_dbg(pctrl->dev, "%s,group=%d,func=%d\n", __func__, group, function); + return 0; +} + +#define ZHAOXIN_PULL_UP_20K 0x80 +#define ZHAOXIN_PULL_UP_10K 0x40 +#define ZHAOXIN_PULL_UP_47K 0x20 +#define ZHAOXIN_PULL_DOWN 0x10 + +#define ZHAOXIN_PULL_UP 0xe0 + +static void zhaoxin_gpio_set_gpio_mode_and_pull(struct zhaoxin_pinctrl *pctrl, unsigned int pin, + bool isup) +{ + u16 tmp = 0; + u16 value; + u16 value_back = 0; + + if (isup) + tmp = ZHAOXIN_PULL_UP_10K|1; + else + tmp = ZHAOXIN_PULL_DOWN|1; + value = zx_pad_read16(pctrl, pin); + + /* for gpio */ + if (pin <= 0x32 && pin >= 0x29) { + if (isup) { + value &= (~(ZHAOXIN_PULL_DOWN)); + value |= tmp; + } else { + value &= (~(ZHAOXIN_PULL_UP)); + value |= tmp; + } + value &= ~(0x1); + zx_pad_write16(pctrl, pin, value); + value_back = zx_pad_read16(pctrl, pin); + } else {/* for pgpio */ + if (isup) { + value &= (~(ZHAOXIN_PULL_DOWN)); + value |= tmp; + } else { + value &= (~(ZHAOXIN_PULL_UP)); + value |= tmp; + } + value |= 0x1; + zx_pad_write16(pctrl, pin, value); + value_back = zx_pad_read16(pctrl, pin); + } +} + +static int zhaoxin_gpio_request_enable(struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range, unsigned int pin) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + int hwgpio = pin_to_hwgpio(range, pin); + + dev_dbg(pctrl->dev, "%s, hwgpio=%d, pin=%d\n", __func__, hwgpio, pin); + zhaoxin_gpio_set_gpio_mode_and_pull(pctrl, pin, true); + return 0; +} + +static const struct pinmux_ops zhaoxin_pinmux_ops = { + .get_functions_count = zhaoxin_get_functions_count, + .get_function_name = zhaoxin_get_function_name, + .get_function_groups = zhaoxin_get_function_groups, + .set_mux = zhaoxin_pinmux_set_mux, + .gpio_request_enable = zhaoxin_gpio_request_enable, +}; + +static int zhaoxin_config_get(struct pinctrl_dev *pctldev, unsigned int pin, unsigned long *config) +{ + return 0; +} + +static int zhaoxin_config_set(struct pinctrl_dev *pctldev, unsigned int pin, unsigned long *configs, + unsigned int nconfigs) +{ + return 0; +} + +static const struct pinconf_ops zhaoxin_pinconf_ops = { + .is_generic = true, + .pin_config_get = zhaoxin_config_get, + .pin_config_set = zhaoxin_config_set, +}; + +static const struct pinctrl_desc zhaoxin_pinctrl_desc = { + .pctlops = &zhaoxin_pinctrl_ops, + .pmxops = &zhaoxin_pinmux_ops, + .confops = &zhaoxin_pinconf_ops, + .owner = THIS_MODULE, +}; + +static int zhaoxin_gpio_to_pin(struct zhaoxin_pinctrl *pctrl, unsigned int offset, + const struct zhaoxin_pin_topology **community, + const struct zhaoxin_pin_map2_gpio **padgrp) +{ + int i; + + for (i = 0; i < pctrl->pin_map_size; i++) { + const struct zhaoxin_pin_map2_gpio *map = &pctrl->pin_maps[i]; + + if (map->zhaoxin_range_gpio_base == ZHAOXIN_GPIO_BASE_NOMAP) + continue; + if (offset >= map->zhaoxin_range_gpio_base && + offset < map->zhaoxin_range_gpio_base + map->zhaoxin_range_pin_size) { + int pin; + + pin = map->zhaoxin_range_pin_base + offset - map->zhaoxin_range_gpio_base; + if (padgrp) + *padgrp = map; + return pin; + } + } + return -EINVAL; +} + +static __maybe_unused int zhaoxin_pin_to_gpio(struct zhaoxin_pinctrl *pctrl, int pin) +{ + const struct zhaoxin_pin_map2_gpio *pin_maps; + + pin_maps = pctrl->pin_maps; + if (!pin_maps) + return -EINVAL; + + return pin - pin_maps->zhaoxin_range_pin_base + pin_maps->zhaoxin_range_gpio_base; +} + +static int zhaoxin_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + struct zhaoxin_pinctrl *pctrl = gpiochip_get_data(chip); + const struct index_cal_array *gpio_in_cal; + int gap = offset/16; + int bit = offset%16; + int pin; + int value; + + gpio_in_cal = pctrl->pin_topologys->gpio_in_cal; + pin = zhaoxin_gpio_to_pin(pctrl, offset, NULL, NULL); + value = zx_pad_read16(pctrl, gpio_in_cal->index+gap); + value &= (1<pin_topologys->gpio_out_cal; + pin = zhaoxin_gpio_to_pin(pctrl, offset, NULL, NULL); + + raw_spin_lock_irqsave(&pctrl->lock, flags); + + org = zx_pad_read16(pctrl, gpio_out_cal->index+gap); + if (value) + org |= (1<index+gap, org); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); +} + +static int zhaoxin_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) +{ + return pinctrl_gpio_direction_input(chip->base + offset); +} + +static int zhaoxin_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value) +{ + return pinctrl_gpio_direction_output(chip->base + offset); +} + +static int zhaoxin_gpio_request(struct gpio_chip *gc, unsigned int offset) +{ + return gpiochip_generic_request(gc, offset); +} + +static void zhaoxin_gpio_free(struct gpio_chip *gc, unsigned int offset) +{ + gpiochip_generic_free(gc, offset); +} + +static int zhaoxin_gpio_config(struct gpio_chip *gc, unsigned int offset, unsigned long config) +{ + return gpiochip_generic_config(gc, offset, config); +} + +static const struct gpio_chip zhaoxin_gpio_chip = { + .owner = THIS_MODULE, + .request = zhaoxin_gpio_request, + .free = zhaoxin_gpio_free, + .direction_input = zhaoxin_gpio_direction_input, + .direction_output = zhaoxin_gpio_direction_output, + .get = zhaoxin_gpio_get, + .set = zhaoxin_gpio_set, + .set_config = zhaoxin_gpio_config, +}; + +static void zhaoxin_gpio_irq_ack(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct zhaoxin_pinctrl *pctrl = gpiochip_get_data(gc); + const struct reg_calibrate *status_cal; + const struct reg_cal_array *reg_off; + int gpio = irqd_to_hwirq(d); + int i, j; + int offset = 0; + int base_offset = 0; + int bit_off = 0; + u16 value; + u16 value_read; + + status_cal = pctrl->pin_topologys->status_cal; + if (gpio >= 0) { + for (i = 0; i < status_cal->size; i++) + if (gpio == status_cal->cal_array[i]) + break; + for (j = 0; j < status_cal->reg_cal_size; j++) { + if (offset > i) + break; + offset += status_cal->reg[j].size; + } + reg_off = &status_cal->reg[j-1]; + bit_off = i-(offset-reg_off->size); + base_offset = reg_off->pmio_offset; + value = readw(pctrl->pm_pmio_base+reg_off->pmio_offset); + value_read = value; + value |= (1<pm_pmio_base+reg_off->pmio_offset); + } +} + +static void zhaoxin_gpio_irq_mask_unmask(struct irq_data *d, bool mask) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct zhaoxin_pinctrl *pctrl = gpiochip_get_data(gc); + const struct reg_calibrate *int_cal; + const struct reg_calibrate *mod_sel_cal; + int gpio = irqd_to_hwirq(d); + int i, j; + int offset = 0; + int base_offset = 0; + const struct reg_cal_array *reg_off, *mod; + int bit_off = 0; + u16 value; + u16 value1; + + int_cal = pctrl->pin_topologys->int_cal; + mod_sel_cal = pctrl->pin_topologys->mod_sel_cal; + + if (gpio >= 0) { + for (i = 0; i < int_cal->size; i++) + if (gpio == int_cal->cal_array[i]) + break; + for (j = 0; j < int_cal->reg_cal_size; j++) { + if (offset > i) + break; + offset += int_cal->reg[j].size; + } + reg_off = &(int_cal->reg[j-1]); + mod = &(mod_sel_cal->reg[j-1]); + bit_off = i-(offset-reg_off->size); + base_offset = reg_off->pmio_offset; + value = inw(pctrl->pmio_base+reg_off->pmio_offset); + if (mask) + value &= (~(1<pmio_base+reg_off->pmio_offset); + if (mask) { + value1 = readw(pctrl->pm_pmio_base+mod->pmio_offset); + value1 |= (1<pm_pmio_base+mod->pmio_offset); + } else { + value1 = readw(pctrl->pm_pmio_base+mod->pmio_offset); + value1 |= (1<pm_pmio_base+mod->pmio_offset); + } + } +} + +static void zhaoxin_gpio_irq_mask(struct irq_data *d) +{ + zhaoxin_gpio_irq_mask_unmask(d, true); +} + +static void zhaoxin_gpio_irq_unmask(struct irq_data *d) +{ + zhaoxin_gpio_irq_mask_unmask(d, false); +} + +/* + * father domain irq handle + */ +static irqreturn_t zhaoxin_gpio_irq(int irq, void *data) +{ + struct zhaoxin_pinctrl *pctrl = data; + struct gpio_chip *gc = &pctrl->chip; + const struct reg_calibrate *init; + const struct reg_calibrate *stat_cal; + unsigned int i, bit_offset; + u16 status, enable; + unsigned long pending; + int index = 0; + int ret = 0; + int subirq; + unsigned int hwirq; + + init = pctrl->pin_topologys->int_cal; + stat_cal = pctrl->pin_topologys->status_cal; + for (i = 0; i < init->reg_cal_size; i++) { + pending = 0; + status = readw(pctrl->pm_pmio_base + stat_cal->reg[i].pmio_offset); + enable = inw(pctrl->pmio_base + init->reg[i].pmio_offset); + enable &= status; + pending = enable; + for_each_set_bit(bit_offset, &pending, init->reg[i].size) { + hwirq = init->cal_array[index + bit_offset]; + subirq = irq_find_mapping(gc->irq.domain, hwirq); + generic_handle_irq(subirq); + } + + ret += pending ? 1 : 0; + index += init->reg[i].size; + } + + return IRQ_RETVAL(ret); +} + +static int zhaoxin_gpio_irq_type(struct irq_data *d, unsigned int type) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct zhaoxin_pinctrl *pctrl = gpiochip_get_data(gc); + unsigned int gpio = irqd_to_hwirq(d); + const struct index_cal_array *trigger_cal; + unsigned int pin; + unsigned long flags; + u8 index; + int position, point; + u16 value; + bool isup = true; + + trigger_cal = pctrl->pin_topologys->trigger_cal; + pin = zhaoxin_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL); + if (type & IRQ_TYPE_EDGE_FALLING) + isup = true; + else if (type & IRQ_TYPE_EDGE_RISING) + isup = true; + else if (type & IRQ_TYPE_LEVEL_LOW) + isup = true; + else if (type & IRQ_TYPE_LEVEL_HIGH) + isup = false; + + zhaoxin_gpio_set_gpio_mode_and_pull(pctrl, pin, isup); + + for (position = 0; position < trigger_cal->size; position++) + if (trigger_cal->cal_array[position] == gpio) + break; + + index = trigger_cal->index + ALIGN(position+1, 4)/4-1; + point = position % 4; + + raw_spin_lock_irqsave(&pctrl->lock, flags); + + value = zx_pad_read16(pctrl, index); + + if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) + value |= TRIGGER_BOTH_EDGE << (point*4); + else if (type & IRQ_TYPE_EDGE_FALLING) + value |= TRIGGER_FALL_EDGE << (point*4); + else if (type & IRQ_TYPE_EDGE_RISING) + value |= TRIGGER_RISE_EDGE << (point*4); + else if (type & IRQ_TYPE_LEVEL_LOW) + value |= TRIGGER_LOW_LEVEL << (point*4); + else if (type & IRQ_TYPE_LEVEL_HIGH) + value |= TRIGGER_HIGH_LEVEL << (point*4); + else + dev_dbg(pctrl->dev, "%s wrang type\n", __func__); + + zx_pad_write16(pctrl, index, value); + + if (type & IRQ_TYPE_EDGE_BOTH) + irq_set_handler_locked(d, handle_edge_irq); + else if (type & IRQ_TYPE_LEVEL_MASK) + irq_set_handler_locked(d, handle_level_irq); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); + + return 0; +} + +static int zhaoxin_gpio_irq_wake(struct irq_data *d, unsigned int on) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct zhaoxin_pinctrl *pctrl = gpiochip_get_data(gc); + unsigned int pin; + + pin = zhaoxin_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL); + if (pin) { + if (on) + enable_irq_wake(pctrl->irq); + else + disable_irq_wake(pctrl->irq); + } + + return 0; +} + +static int zhaoxin_gpio_add_pin_ranges(struct gpio_chip *gc) +{ + struct zhaoxin_pinctrl *pctrl = gpiochip_get_data(gc); + int ret, i; + + for (i = 0; i < pctrl->pin_map_size; i++) { + struct zhaoxin_pin_map2_gpio *map = &pctrl->pin_maps[i]; + + if (map->zhaoxin_range_gpio_base == ZHAOXIN_GPIO_BASE_NOMAP) + continue; + ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev), + map->zhaoxin_range_gpio_base, map->zhaoxin_range_pin_base, + map->zhaoxin_range_pin_size); + if (ret) { + dev_err(pctrl->dev, "failed to add GPIO pin range\n"); + return ret; + } + } + + return 0; +} + +static unsigned int zhaoxin_gpio_ngpio(const struct zhaoxin_pinctrl *pctrl) +{ + const struct zhaoxin_pin_map2_gpio *pin_maps; + unsigned int ngpio = 0; + int i; + + for (i = 0; i < pctrl->pin_map_size; i++) { + pin_maps = &pctrl->pin_maps[i]; + if (pin_maps->zhaoxin_range_gpio_base == ZHAOXIN_GPIO_BASE_NOMAP) + continue; + if (pin_maps->zhaoxin_range_gpio_base + pin_maps->zhaoxin_range_pin_size > ngpio) + ngpio = pin_maps->zhaoxin_range_gpio_base + + pin_maps->zhaoxin_range_pin_size; + } + + return ngpio; +} + +static int zhaoxin_gpio_probe(struct zhaoxin_pinctrl *pctrl, int irq) +{ + int ret; + struct gpio_irq_chip *girq; + + pctrl->chip = zhaoxin_gpio_chip; + + pctrl->chip.ngpio = zhaoxin_gpio_ngpio(pctrl); + pctrl->chip.label = dev_name(pctrl->dev); + pctrl->chip.parent = pctrl->dev; + pctrl->chip.base = -1; + pctrl->chip.add_pin_ranges = zhaoxin_gpio_add_pin_ranges; + + pctrl->irq = irq; + + pctrl->irqchip.name = dev_name(pctrl->dev); + pctrl->irqchip.irq_ack = zhaoxin_gpio_irq_ack; + pctrl->irqchip.irq_mask = zhaoxin_gpio_irq_mask; + pctrl->irqchip.irq_unmask = zhaoxin_gpio_irq_unmask; + pctrl->irqchip.irq_set_type = zhaoxin_gpio_irq_type; + pctrl->irqchip.irq_set_wake = zhaoxin_gpio_irq_wake; + pctrl->irqchip.flags = IRQCHIP_MASK_ON_SUSPEND; + + ret = devm_request_irq(pctrl->dev, irq, zhaoxin_gpio_irq, IRQF_SHARED | IRQF_NO_THREAD, + dev_name(pctrl->dev), pctrl); + if (ret) { + dev_err(pctrl->dev, "failed to request interrupt\n"); + return ret; + } + girq = &pctrl->chip.irq; + girq->chip = &pctrl->irqchip; + /* This will let us handle the IRQ in the driver */ + girq->parent_handler = NULL; + girq->num_parents = 0; + girq->default_type = IRQ_TYPE_NONE; + girq->handler = handle_bad_irq; + ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl); + if (ret) { + dev_err(pctrl->dev, "failed to register gpiochip\n"); + return ret; + } + + return 0; +} + +static int zhaoxin_pinctrl_pm_init(struct zhaoxin_pinctrl *pctrl) +{ + return 0; +} + +static int zhaoxin_pinctrl_probe(struct platform_device *pdev, + const struct zhaoxin_pinctrl_soc_data *soc_data) +{ + struct zhaoxin_pinctrl *pctrl; + int ret, i, irq; + struct resource *res; + void __iomem *regs; + + pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL); + if (!pctrl) + return -ENOMEM; + pctrl->dev = &pdev->dev; + pctrl->soc = soc_data; + raw_spin_lock_init(&pctrl->lock); + pctrl->pin_topologys = pctrl->soc->pin_topologys; + pctrl->pin_map_size = pctrl->soc->pin_map_size; + pctrl->pin_maps = devm_kcalloc(&pdev->dev, pctrl->pin_map_size, + sizeof(*pctrl->pin_maps), GFP_KERNEL); + if (!pctrl->pin_maps) + return -ENOMEM; + for (i = 0; i < pctrl->pin_map_size; i++) { + struct zhaoxin_pin_map2_gpio *community = &pctrl->pin_maps[i]; + *community = pctrl->soc->zhaoxin_pin_maps[i]; + } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(regs)) + return PTR_ERR(regs); + + pctrl->pm_pmio_base = regs; + pctrl->pmio_base = 0x800; + pctrl->pmio_rx90 = 0x90; + pctrl->pmio_rx8c = 0x8c; + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + ret = zhaoxin_pinctrl_pm_init(pctrl); + if (ret) + return ret; + pctrl->pctldesc = zhaoxin_pinctrl_desc; + pctrl->pctldesc.name = dev_name(&pdev->dev); + pctrl->pctldesc.pins = pctrl->soc->pins; + pctrl->pctldesc.npins = pctrl->soc->npins; + pctrl->pctldev = devm_pinctrl_register(&pdev->dev, &pctrl->pctldesc, pctrl); + if (IS_ERR(pctrl->pctldev)) { + dev_err(&pdev->dev, "failed to register pinctrl driver\n"); + return PTR_ERR(pctrl->pctldev); + } + ret = zhaoxin_gpio_probe(pctrl, irq); + + if (ret) + return ret; + platform_set_drvdata(pdev, pctrl); + return 0; +} + +int zhaoxin_pinctrl_probe_by_hid(struct platform_device *pdev) +{ + const struct zhaoxin_pinctrl_soc_data *data; + + data = device_get_match_data(&pdev->dev); + if (!data) + return -ENODATA; + + return zhaoxin_pinctrl_probe(pdev, data); +} +EXPORT_SYMBOL_GPL(zhaoxin_pinctrl_probe_by_hid); + +int zhaoxin_pinctrl_probe_by_uid(struct platform_device *pdev) +{ + const struct zhaoxin_pinctrl_soc_data *data; + + data = zhaoxin_pinctrl_get_soc_data(pdev); + if (IS_ERR(data)) + return PTR_ERR(data); + + return zhaoxin_pinctrl_probe(pdev, data); +} +EXPORT_SYMBOL_GPL(zhaoxin_pinctrl_probe_by_uid); + +const struct zhaoxin_pinctrl_soc_data *zhaoxin_pinctrl_get_soc_data(struct platform_device *pdev) +{ + const struct zhaoxin_pinctrl_soc_data *data = NULL; + const struct zhaoxin_pinctrl_soc_data **table; + struct acpi_device *adev; + unsigned int i; + + adev = ACPI_COMPANION(&pdev->dev); + if (adev) { + const void *match = device_get_match_data(&pdev->dev); + + table = (const struct zhaoxin_pinctrl_soc_data **)match; + for (i = 0; table[i]; i++) { + if (!strcmp(adev->pnp.unique_id, table[i]->uid)) { + data = table[i]; + break; + } + } + } else { + const struct platform_device_id *id; + + id = platform_get_device_id(pdev); + if (!id) + return ERR_PTR(-ENODEV); + + table = (const struct zhaoxin_pinctrl_soc_data **)id->driver_data; + data = table[pdev->id]; + } + + return data ?: ERR_PTR(-ENODATA); +} +EXPORT_SYMBOL_GPL(zhaoxin_pinctrl_get_soc_data); + +#ifdef CONFIG_PM_SLEEP + +int zhaoxin_pinctrl_suspend_noirq(struct device *dev) +{ + return 0; +} +EXPORT_SYMBOL_GPL(zhaoxin_pinctrl_suspend_noirq); + +int zhaoxin_pinctrl_resume_noirq(struct device *dev) +{ + return 0; +} +EXPORT_SYMBOL_GPL(zhaoxin_pinctrl_resume_noirq); +#endif + +MODULE_AUTHOR("www.zhaoxin.com"); +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Zhaoxin pinctrl/GPIO core driver"); diff --git a/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.h b/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.h new file mode 100644 index 000000000000..cebea382dbe9 --- /dev/null +++ b/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * zhaoxin pinctrl common code + * Copyright(c) 2023 Shanghai Zhaoxin Corporation. All rights reserved. + */ + +#ifndef PINCTRL_zhaoxin_H +#define PINCTRL_zhaoxin_H + +#include +#include +#include +#include +#include +#include +#include +#include + +struct platform_device; +struct device; + +/** + * struct zhaoxin_pingroup pin define + */ +struct zhaoxin_pingroup { + const char *name; + const unsigned int *pins; + size_t npins; + unsigned short mode; + const unsigned int *modes; +}; + +/** + * struct zhaoxin_function + */ +struct zhaoxin_function { + const char *name; + const char * const *groups; + size_t ngroups; +}; + +/** + * struct zhaoxin_pin_map2_gpio + * @zhaoxin_range_pin_base + * @size: pin number + * @zhaoxin_range_gpio_base + */ +struct zhaoxin_pin_map2_gpio { + unsigned int zhaoxin_range_pin_base; + unsigned int zhaoxin_range_pin_size; + int zhaoxin_range_gpio_base; +}; + +#define MAX_GPIO 256 + +struct reg_cal_array { + int pmio_offset; + int size; +}; + +struct reg_calibrate { + const struct reg_cal_array *reg; + const int reg_cal_size; + const int *cal_array; + const int size; +}; + +struct index_cal_array { + int reg_port_base; + int reg_data_base; + int index; + int *cal_array; + int size; +}; + +struct zhaoxin_pin_topology { + const struct reg_calibrate *int_cal; + const struct reg_calibrate *mod_sel_cal; + const struct reg_calibrate *status_cal; + const struct index_cal_array *gpio_in_cal; + const struct index_cal_array *gpio_out_cal; + const struct index_cal_array *gpio_dir_cal; + const struct index_cal_array *trigger_cal; +}; + +#define TRIGGER_FALL_EDGE 0 +#define TRIGGER_RISE_EDGE 1 +#define TRIGGER_BOTH_EDGE 2 +#define TRIGGER_LOW_LEVEL 3 +#define TRIGGER_HIGH_LEVEL 4 + +#define ZHAOXIN_GPIO_BASE_NOMAP -1 + +struct zhaoxin_pinctrl_soc_data { + const char *uid; + const struct pinctrl_pin_desc *pins; + size_t npins; + const struct zhaoxin_pingroup *groups; + size_t ngroups; + const struct zhaoxin_function *functions; + size_t nfunctions; + const struct zhaoxin_pin_topology *pin_topologys; + const struct zhaoxin_pin_map2_gpio *zhaoxin_pin_maps; + size_t pin_map_size; +}; + +const struct zhaoxin_pinctrl_soc_data * + zhaoxin_pinctrl_get_soc_data(struct platform_device *pdev); + +struct zhaoxin_pinctrl { + struct device *dev; + raw_spinlock_t lock; + struct pinctrl_desc pctldesc; + struct pinctrl_dev *pctldev; + struct gpio_chip chip; + struct irq_chip irqchip; + const struct zhaoxin_pinctrl_soc_data *soc; + const struct zhaoxin_pin_topology *pin_topologys; + struct zhaoxin_pin_map2_gpio *pin_maps; + size_t pin_map_size; + int irq; + int pmio_base; + void __iomem *pm_pmio_base; + int pmio_rx90; + int pmio_rx8c; +}; + +int zhaoxin_pinctrl_probe_by_hid(struct platform_device *pdev); +int zhaoxin_pinctrl_probe_by_uid(struct platform_device *pdev); + +#ifdef CONFIG_PM_SLEEP +int zhaoxin_pinctrl_suspend_noirq(struct device *dev); +int zhaoxin_pinctrl_resume_noirq(struct device *dev); +#endif + +#endif /* PINCTRL_zhaoxin_H */ -- Gitee From d395ff7e09637b1cf68682c3b275c18f55468e14 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 5 Feb 2024 16:19:37 +0800 Subject: [PATCH 0251/2138] anolis: configs: add CONFIG_PINCTRL_ZHAOXIN and KX7000 ANBZ: #7809 Set CONFIG_PINCTRL_ZHAOXIN and CONFIG_PINCTRL_KX7000 to m in openeulr_defconfig. Signed-off-by: leoliu-oc Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2709 --- arch/x86/configs/anolis_defconfig | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 38b641e976d0..4abe45995548 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -3901,6 +3901,9 @@ CONFIG_PINCTRL_SUNRISEPOINT=m # CONFIG_PINCTRL_TIGERLAKE is not set # end of Intel pinctrl drivers +CONFIG_PINCTRL_ZHAOXIN=m +CONFIG_PINCTRL_KX7000=m + # # Renesas pinctrl drivers # -- Gitee From eedd752d24ef71d867519c4e5633d07aea22a0d7 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Tue, 2 Jan 2024 19:00:15 +0800 Subject: [PATCH 0252/2138] anolis: i2c: smbus: Add support for Zhaoxin SMBUS controller ANBZ: #7809 The Zhaoxin platform implements the SMBUS controller on the hardware, enabling information exchange and collaboration between devices using the SMBus protocol. Signed-off-by: leoliu-oc Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2711 --- drivers/i2c/busses/Kconfig | 10 + drivers/i2c/busses/Makefile | 1 + drivers/i2c/busses/i2c-zhaoxin-smbus.c | 385 +++++++++++++++++++++++++ 3 files changed, 396 insertions(+) create mode 100644 drivers/i2c/busses/i2c-zhaoxin-smbus.c diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index b9ee0e451e97..88a58f4e6e72 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -361,6 +361,16 @@ config I2C_SCMI To compile this driver as a module, choose M here: the module will be called i2c-scmi. +config I2C_ZHAOXIN_SMBUS + tristate "Zhaoxin SMBus Interface" + depends on PCI || COMPILE_TEST + help + If you say yes to this option, support will be included for the + ZHAOXIN SMBus interface + + This driver can also be built as a module. If so, the module + will be called i2c-zhaoxin-smbus. + endif # ACPI comment "Mac SMBus host controller drivers" diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index bef7c205433b..f8c8a3554427 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -139,6 +139,7 @@ obj-$(CONFIG_I2C_ROBOTFUZZ_OSIF) += i2c-robotfuzz-osif.o obj-$(CONFIG_I2C_TAOS_EVM) += i2c-taos-evm.o obj-$(CONFIG_I2C_TINY_USB) += i2c-tiny-usb.o obj-$(CONFIG_I2C_VIPERBOARD) += i2c-viperboard.o +obj-$(CONFIG_I2C_ZHAOXIN_SMBUS) += i2c-zhaoxin-smbus.o # Other I2C/SMBus bus drivers obj-$(CONFIG_I2C_ACORN) += i2c-acorn.o diff --git a/drivers/i2c/busses/i2c-zhaoxin-smbus.c b/drivers/i2c/busses/i2c-zhaoxin-smbus.c new file mode 100644 index 000000000000..52c689e928af --- /dev/null +++ b/drivers/i2c/busses/i2c-zhaoxin-smbus.c @@ -0,0 +1,385 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Zhaoxin SMBus controller driver + * + * Copyright(c) 2023 Shanghai Zhaoxin Semiconductor Corporation. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_VERSION "3.1.0" + +#define ZXSMB_NAME "smbus_zhaoxin" + +/* + * registers + */ +/* SMBus MMIO address offsets */ +#define ZXSMB_STS 0x00 +#define ZXSMB_BUSY BIT(0) +#define ZXSMB_CMD_CMPLET BIT(1) +#define ZXSMB_DEV_ERR BIT(2) +#define ZXSMB_BUS_CLSI BIT(3) +#define ZXSMB_FAIL_TRANS BIT(4) +#define ZXSMB_STS_MASK GENMASK(4, 0) +#define ZXSMB_NSMBSRST BIT(5) +#define ZXSMB_CTL 0x02 +#define ZXSMB_CMPLT_EN BIT(0) +#define ZXSMB_KILL_PRG BIT(1) +#define ZXSMB_START BIT(6) +#define ZXSMB_PEC_EN BIT(7) +#define ZXSMB_CMD 0x03 +#define ZXSMB_ADD 0x04 +#define ZXSMB_DAT0 0x05 +#define ZXSMB_DAT1 0x06 +#define ZXSMB_BLKDAT 0x07 + +/* + * platform related information + */ + /* protocol cmd constants */ +#define ZXSMB_QUICK 0x00 +#define ZXSMB_BYTE 0x04 +#define ZXSMB_BYTE_DATA 0x08 +#define ZXSMB_WORD_DATA 0x0C +#define ZXSMB_PROC_CALL 0x10 +#define ZXSMB_BLOCK_DATA 0x14 +#define ZXSMB_I2C_10_BIT_ADDR 0x18 +#define ZXSMB_I2C_PROC_CALL 0x30 +#define ZXSMB_I2C_BLOCK_DATA 0x34 +#define ZXSMB_I2C_7_BIT_ADDR 0x38 +#define ZXSMB_UNIVERSAL 0x3C + +#define ZXSMB_TIMEOUT 500 + +struct zxsmb { + struct device *dev; + struct i2c_adapter adap; + struct completion complete; + u16 base; + int irq; + u8 status; + int size; + u8 pec; +}; + +static irqreturn_t zxsmb_irq_handle(int irq, void *dev_id) +{ + struct zxsmb *smb = (struct zxsmb *)dev_id; + + smb->status = inb(smb->base + ZXSMB_STS); + if ((smb->status & ZXSMB_STS_MASK) == 0) + return IRQ_NONE; + + /* clear status */ + outb(smb->status, smb->base + ZXSMB_STS); + complete(&smb->complete); + + return IRQ_HANDLED; +} + +static int zxsmb_status_check(struct zxsmb *smb) +{ + if (smb->status & ZXSMB_CMD_CMPLET) + return 0; + + if (smb->status & ZXSMB_BUS_CLSI) { + dev_err(smb->dev, "Lost arbitration\n"); + outb(ZXSMB_KILL_PRG, smb->base + ZXSMB_CTL); + return -EAGAIN; + } + + dev_dbg(smb->dev, "Trans failed, status = 0x%X\n", smb->status); + + return -EIO; +} + +static int zxsmb_wait_interrput_finish(struct zxsmb *smb) +{ + int time_left; + + time_left = wait_for_completion_timeout(&smb->complete, msecs_to_jiffies(ZXSMB_TIMEOUT)); + if (time_left == 0) { + u8 status = inb(smb->base + ZXSMB_STS); + + /* some host's irq config not work well */ + if (status & ZXSMB_STS_MASK) { + outb(status, smb->base + ZXSMB_STS); + outb(ZXSMB_KILL_PRG, smb->base + ZXSMB_CTL); + devm_free_irq(smb->dev, smb->irq, smb); + smb->irq = 0; + dev_warn(smb->dev, "change to polling mode\n"); + + return -EAGAIN; + } + dev_dbg(smb->dev, "interrput timeout\n"); + return -EIO; + } + + return zxsmb_status_check(smb); +} + +static int zxsmb_wait_polling_finish(struct zxsmb *smb) +{ + int status; + int time_left = ZXSMB_TIMEOUT * 10; + + do { + usleep_range(100, 200); + status = inb(smb->base + ZXSMB_STS); + } while ((status & ZXSMB_BUSY) && (--time_left)); + + if (time_left == 0) { + dev_dbg(smb->dev, "polling timeout\n"); + return -EIO; + } + + /* clear status */ + outb(status, smb->base + ZXSMB_STS); + smb->status = status; + + return zxsmb_status_check(smb); +} + +static int zxsmb_trans_start(struct zxsmb *smb) +{ + u16 base = smb->base; + int tmp; + + /* Make sure the SMBus host is ready to start transmitting */ + tmp = inb(base + ZXSMB_STS); + if (tmp & ZXSMB_BUSY) { + outb(tmp, base + ZXSMB_STS); + usleep_range(1000, 5000); + tmp = inb(base + ZXSMB_STS); + if (tmp & ZXSMB_BUSY) { + dev_err(smb->dev, "SMBus reset failed! (0x%02x)\n", tmp); + return -EIO; + } + } + + tmp = ZXSMB_START | smb->size; + + if (smb->pec) + tmp |= ZXSMB_PEC_EN; + else + tmp &= (~ZXSMB_PEC_EN); + + if (smb->irq) + tmp |= ZXSMB_CMPLT_EN; + + reinit_completion(&smb->complete); + smb->status = 0; + outb(tmp, base + ZXSMB_CTL); + return 0; +} + +static int zxsmb_transaction(struct zxsmb *smb) +{ + int err; + + err = zxsmb_trans_start(smb); + if (err) + return err; + + if (smb->irq) + err = zxsmb_wait_interrput_finish(smb); + else + err = zxsmb_wait_polling_finish(smb); + + outb(0, smb->base + ZXSMB_CTL); + return err; +} + +static int zxsmb_smbus_xfer(struct i2c_adapter *adap, u16 addr, u16 flags, char read, u8 command, + int size, union i2c_smbus_data *data) +{ + int i; + int err; + u8 len; + struct zxsmb *smb = (struct zxsmb *)i2c_get_adapdata(adap); + u16 base = smb->base; + + switch (size) { + case I2C_SMBUS_QUICK: + size = ZXSMB_QUICK; + break; + case I2C_SMBUS_BYTE: + size = ZXSMB_BYTE; + if (!read) + outb(command, base + ZXSMB_CMD); + break; + case I2C_SMBUS_BYTE_DATA: + outb(command, base + ZXSMB_CMD); + if (!read) + outb(data->byte, base + ZXSMB_DAT0); + size = ZXSMB_BYTE_DATA; + break; + case I2C_SMBUS_PROC_CALL: + case I2C_SMBUS_WORD_DATA: + if (read && size == I2C_SMBUS_PROC_CALL) + goto exit_unsupported; + outb(command, base + ZXSMB_CMD); + if (!read) { + outb(data->word & 0xff, base + ZXSMB_DAT0); + outb((data->word & 0xff00) >> 8, base + ZXSMB_DAT1); + } + size = (size == I2C_SMBUS_PROC_CALL) ? + ZXSMB_PROC_CALL : ZXSMB_WORD_DATA; + break; + case I2C_SMBUS_I2C_BLOCK_DATA: + case I2C_SMBUS_BLOCK_DATA: + len = data->block[0]; + if (read && size == I2C_SMBUS_I2C_BLOCK_DATA) + outb(len, base + ZXSMB_DAT1); + outb(command, base + ZXSMB_CMD); + /* Reset ZXSMB_BLKDAT */ + inb(base + ZXSMB_CTL); + if (!read) { + outb(len, base + ZXSMB_DAT0); + outb(0, base + ZXSMB_DAT1); + for (i = 1; i <= len; i++) + outb(data->block[i], base + ZXSMB_BLKDAT); + } + size = (size == I2C_SMBUS_I2C_BLOCK_DATA) ? + ZXSMB_I2C_BLOCK_DATA : ZXSMB_BLOCK_DATA; + break; + default: + goto exit_unsupported; + } + + outb(((addr & 0x7f) << 1) | read, base + ZXSMB_ADD); + smb->size = size; + smb->pec = flags & I2C_CLIENT_PEC; + err = zxsmb_transaction(smb); + if (err) + return err; + + if ((read == I2C_SMBUS_WRITE) || (size == ZXSMB_QUICK)) { + if (unlikely(size == ZXSMB_PROC_CALL)) + goto prepare_read; + return 0; + } + +prepare_read: + switch (size) { + case ZXSMB_BYTE: + case ZXSMB_BYTE_DATA: + data->byte = inb(base + ZXSMB_DAT0); + break; + case ZXSMB_PROC_CALL: + case ZXSMB_WORD_DATA: + data->word = inb(base + ZXSMB_DAT0) + (inb(base + ZXSMB_DAT1) << 8); + break; + case ZXSMB_I2C_BLOCK_DATA: + case ZXSMB_BLOCK_DATA: + data->block[0] = inb(base + ZXSMB_DAT0); + if (data->block[0] > I2C_SMBUS_BLOCK_MAX) + data->block[0] = I2C_SMBUS_BLOCK_MAX; + /* Reset ZXSMB_BLKDAT */ + inb(base + ZXSMB_CTL); + for (i = 1; i <= data->block[0]; i++) + data->block[i] = inb(base + ZXSMB_BLKDAT); + break; + } + + return 0; + +exit_unsupported: + dev_err(smb->dev, "unsupported access, size:%x, dir:%s", size, read ? "read" : "write"); + return -EOPNOTSUPP; +} + +static u32 zxsmb_func(struct i2c_adapter *adapter) +{ + return I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm smbus_algorithm = { + .smbus_xfer = zxsmb_smbus_xfer, + .functionality = zxsmb_func, +}; + +static int zxsmb_probe(struct platform_device *pdev) +{ + struct zxsmb *smb; + struct resource *res; + struct i2c_adapter *adap; + + smb = devm_kzalloc(&pdev->dev, sizeof(*smb), GFP_KERNEL); + if (!smb) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (IS_ERR(res)) + return -ENODEV; + smb->base = res->start; + if (!devm_request_region(&pdev->dev, res->start, resource_size(res), pdev->name)) { + dev_err(&pdev->dev, "Can't get I/O resource\n"); + return -EBUSY; + } + + smb->irq = platform_get_irq(pdev, 0); + if (smb->irq < 0 || devm_request_irq(&pdev->dev, smb->irq, zxsmb_irq_handle, IRQF_SHARED, + pdev->name, smb)) { + dev_warn(&pdev->dev, "failed to request irq %d\n", smb->irq); + smb->irq = 0; + } else + init_completion(&smb->complete); + + smb->dev = &pdev->dev; + platform_set_drvdata(pdev, (void *)smb); + + adap = &smb->adap; + adap->algo = &smbus_algorithm; + adap->retries = 2; + adap->owner = THIS_MODULE; + adap->dev.parent = &pdev->dev; + ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev)); + snprintf(adap->name, sizeof(adap->name), "zhaoxin-%s-%s", dev_name(pdev->dev.parent), + dev_name(smb->dev)); + i2c_set_adapdata(&smb->adap, smb); + + return i2c_add_adapter(&smb->adap); +} + +static int zxsmb_remove(struct platform_device *pdev) +{ + struct zxsmb *smb = platform_get_drvdata(pdev); + + i2c_del_adapter(&(smb->adap)); + platform_set_drvdata(pdev, NULL); + devm_kfree(&pdev->dev, smb); + + return 0; +} + +static const struct acpi_device_id zxsmb_acpi_match[] = { + {"SMB3324", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, zxsmb_acpi_match); + +static struct platform_driver zxsmb_driver = { + .probe = zxsmb_probe, + .remove = zxsmb_remove, + .driver = { + .name = ZXSMB_NAME, + .acpi_match_table = ACPI_PTR(zxsmb_acpi_match), + }, +}; + +module_platform_driver(zxsmb_driver); + +MODULE_AUTHOR("hanshu@zhaoxin.com"); +MODULE_DESCRIPTION("Zhaoxin SMBus driver"); +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL"); -- Gitee From 8b3774f3f7f6379da4ef88a2e34fab117e4251b5 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Fri, 15 Mar 2024 11:36:57 +0800 Subject: [PATCH 0253/2138] fs/address_space: move i_mmap_rwsem to mitigate a false sharing with i_mmap. ANBZ: #8544 commit d3b1a9a778e1a014c5331d1e8d4863fd999eb0b5 upstream. In the struct address_space, there is a 32-byte gap between i_mmap and i_mmap_rwsem. Due to the alignment of struct address_space variables to 8 bytes, in certain situations, i_mmap and i_mmap_rwsem may end up in the same CACHE line. While running Unixbench/execl, we observe high false sharing issues when accessing i_mmap against i_mmap_rwsem. We move i_mmap_rwsem after i_private_list, ensuring a 64-byte gap between i_mmap and i_mmap_rwsem. For Intel Silver machines (2 sockets) using kernel v6.8 rc-2, the score of Unixbench/execl improves by ~3.94%, and the score of Unixbench/shell improves by ~3.26%. Baseline: ------------------------------------------------------------- 162 546 748 11374 21 0xffff92e266af90c0 ------------------------------------------------------------- 46.89% 44.65% 0.00% 0.00% 0x0 1 1 0xffffffff86d5fb96 460 258 271 1069 32 [k] __handle_mm_fault [kernel.vmlinux] memory.c:2940 0 1 4.21% 4.41% 0.00% 0.00% 0x4 1 1 0xffffffff86d0ed54 473 311 288 95 28 [k] filemap_read [kernel.vmlinux] atomic.h:23 0 1 0.00% 0.00% 0.04% 4.76% 0x8 1 1 0xffffffff86d4bcf1 0 0 0 5 4 [k] vma_interval_tree_remove [kernel.vmlinux] rbtree_augmented.h:204 0 1 6.41% 6.02% 0.00% 0.00% 0x8 1 1 0xffffffff86d4ba85 411 271 339 210 32 [k] vma_interval_tree_insert [kernel.vmlinux] interval_tree.c:23 0 1 0.00% 0.00% 0.47% 95.24% 0x10 1 1 0xffffffff86d4bd34 0 0 0 74 32 [k] vma_interval_tree_remove [kernel.vmlinux] rbtree_augmented.h:339 0 1 0.37% 0.13% 0.00% 0.00% 0x10 1 1 0xffffffff86d4bb4f 328 212 380 7 5 [k] vma_interval_tree_remove [kernel.vmlinux] rbtree_augmented.h:338 0 1 5.13% 5.08% 0.00% 0.00% 0x10 1 1 0xffffffff86d4bb4b 416 255 357 197 32 [k] vma_interval_tree_remove [kernel.vmlinux] rbtree_augmented.h:338 0 1 1.10% 0.53% 0.00% 0.00% 0x28 1 1 0xffffffff86e06eb8 395 228 351 24 14 [k] do_dentry_open [kernel.vmlinux] open.c:966 0 1 1.10% 2.14% 57.07% 0.00% 0x38 1 1 0xffffffff878c9225 1364 792 462 7003 32 [k] down_write [kernel.vmlinux] atomic64_64.h:109 0 1 0.00% 0.00% 0.01% 0.00% 0x38 1 1 0xffffffff878c8e75 0 0 252 3 2 [k] rwsem_down_write_slowpath [kernel.vmlinux] atomic64_64.h:109 0 1 0.00% 0.13% 0.00% 0.00% 0x38 1 1 0xffffffff878c8e23 0 596 63 2 2 [k] rwsem_down_write_slowpath [kernel.vmlinux] atomic64_64.h:15 0 1 2.38% 2.94% 6.53% 0.00% 0x38 1 1 0xffffffff878c8ccb 1150 818 570 1197 32 [k] rwsem_down_write_slowpath [kernel.vmlinux] atomic64_64.h:109 0 1 30.59% 32.22% 0.00% 0.00% 0x38 1 1 0xffffffff878c8cb4 423 251 380 648 32 [k] rwsem_down_write_slowpath [kernel.vmlinux] atomic64_64.h:15 0 1 1.83% 1.74% 35.88% 0.00% 0x38 1 1 0xffffffff86b4f833 1217 1112 565 4586 32 [k] up_write [kernel.vmlinux] atomic64_64.h:91 0 1 with this change: ------------------------------------------------------------- 360 12 300 57 35 0xffff982cdae76400 ------------------------------------------------------------- 50.00% 59.67% 0.00% 0.00% 0x0 1 1 0xffffffff8215fb86 352 200 191 558 32 [k] __handle_mm_fault [kernel.vmlinux] memory.c:2940 0 1 8.33% 5.00% 0.00% 0.00% 0x4 1 1 0xffffffff8210ed44 370 284 263 42 24 [k] filemap_read [kernel.vmlinux] atomic.h:23 0 1 0.00% 0.00% 5.26% 2.86% 0x8 1 1 0xffffffff8214bce1 0 0 0 4 4 [k] vma_interval_tree_remove [kernel.vmlinux] rbtree_augmented.h:204 0 1 33.33% 14.33% 0.00% 0.00% 0x8 1 1 0xffffffff8214ba75 344 186 219 140 32 [k] vma_interval_tree_insert [kernel.vmlinux] interval_tree.c:23 0 1 0.00% 0.00% 94.74% 97.14% 0x10 1 1 0xffffffff8214bd24 0 0 0 88 29 [k] vma_interval_tree_remove [kernel.vmlinux] rbtree_augmented.h:339 0 1 8.33% 20.00% 0.00% 0.00% 0x10 1 1 0xffffffff8214bb3b 296 209 226 167 31 [k] vma_interval_tree_remove [kernel.vmlinux] rbtree_augmented.h:338 0 1 0.00% 0.67% 0.00% 0.00% 0x28 1 1 0xffffffff82206f45 0 140 334 4 3 [k] do_dentry_open [kernel.vmlinux] open.c:966 0 1 0.00% 0.33% 0.00% 0.00% 0x38 1 1 0xffffffff8250a6c4 0 286 126 5 5 [k] errseq_sample [kernel.vmlinux] errseq.c:125 0 Signed-off-by: JonasZhou Link: https://lore.kernel.org/r/20240202083304.10995-1-JonasZhou-oc@zhaoxin.com Signed-off-by: Christian Brauner Signed-off-by: leoliu-oc Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/2888 --- include/linux/fs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/fs.h b/include/linux/fs.h index 6c3d86532e3f..aeb033344a6b 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -484,10 +484,10 @@ struct address_space { pgoff_t writeback_index; const struct address_space_operations *a_ops; unsigned long flags; - struct rw_semaphore i_mmap_rwsem; errseq_t wb_err; spinlock_t private_lock; struct list_head private_list; + struct rw_semaphore i_mmap_rwsem; void *private_data; } __attribute__((aligned(sizeof(long)))) __randomize_layout; /* -- Gitee From 9b99b5b102e99bde76217f0b1b585cf9a1d3e4c2 Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Fri, 8 Mar 2024 14:50:42 +0800 Subject: [PATCH 0254/2138] anolis: kfence: enhance kfence for 6.6 ANBZ: #8499 Port anolis own kfence features from ANCK5.10. Enhance the ability of memory debugging about slub objects. Signed-off-by: Tianchen Ding Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2874 --- arch/arm64/include/asm/kfence.h | 9 +- arch/arm64/mm/mmu.c | 13 +- arch/x86/include/asm/kfence.h | 50 +- include/linux/kfence.h | 110 +- include/linux/page-flags.h | 13 + include/trace/events/mmflags.h | 9 +- mm/kfence/core.c | 1880 ++++++++++++++++++++++++++----- mm/kfence/kfence.h | 77 +- mm/kfence/kfence_test.c | 32 +- mm/kfence/report.c | 17 +- mm/slab.c | 2 +- mm/slub.c | 2 +- 12 files changed, 1845 insertions(+), 369 deletions(-) diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h index a81937fae9f6..e5f86bbf4348 100644 --- a/arch/arm64/include/asm/kfence.h +++ b/arch/arm64/include/asm/kfence.h @@ -8,9 +8,14 @@ #ifndef __ASM_KFENCE_H #define __ASM_KFENCE_H +#include + #include -static inline bool arch_kfence_init_pool(void) { return true; } +static inline bool arch_kfence_init_pool(struct kfence_pool_area *kpa) +{ + return can_set_direct_map(); +} static inline bool kfence_protect_page(unsigned long addr, bool protect) { @@ -19,6 +24,8 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect) return true; } +static inline bool arch_kfence_free_pool(unsigned long addr) { return false; } + #ifdef CONFIG_KFENCE extern bool kfence_early_init; static inline bool arm64_kfence_can_set_direct_map(void) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 47781bec6171..3e26d444569e 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -511,6 +511,9 @@ void __init mark_linear_text_alias_ro(void) #ifdef CONFIG_KFENCE +static unsigned long __ro_after_init +kfence_pool_size = ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE); + bool __ro_after_init kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL; /* early_param() will be parsed before map_mem() below. */ @@ -531,7 +534,7 @@ static phys_addr_t __init arm64_kfence_alloc_pool(void) if (!kfence_early_init) return 0; - kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); + kfence_pool = memblock_phys_alloc(kfence_pool_size, PAGE_SIZE); if (!kfence_pool) { pr_err("failed to allocate kfence pool\n"); kfence_early_init = false; @@ -539,7 +542,7 @@ static phys_addr_t __init arm64_kfence_alloc_pool(void) } /* Temporarily mark as NOMAP. */ - memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE); + memblock_mark_nomap(kfence_pool, kfence_pool_size); return kfence_pool; } @@ -550,11 +553,11 @@ static void __init arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) return; /* KFENCE pool needs page-level mapping. */ - __map_memblock(pgdp, kfence_pool, kfence_pool + KFENCE_POOL_SIZE, + __map_memblock(pgdp, kfence_pool, kfence_pool + kfence_pool_size, pgprot_tagged(PAGE_KERNEL), NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); - memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE); - __kfence_pool = phys_to_virt(kfence_pool); + memblock_clear_nomap(kfence_pool, kfence_pool_size); + __kfence_pool_early_init = phys_to_virt(kfence_pool); } #else /* CONFIG_KFENCE */ diff --git a/arch/x86/include/asm/kfence.h b/arch/x86/include/asm/kfence.h index ff5c7134a37a..ba344b416ac5 100644 --- a/arch/x86/include/asm/kfence.h +++ b/arch/x86/include/asm/kfence.h @@ -19,11 +19,12 @@ #include /* Force 4K pages for __kfence_pool. */ -static inline bool arch_kfence_init_pool(void) +static inline bool arch_kfence_init_pool(struct kfence_pool_area *kpa) { + char *__kfence_pool = kpa->addr; unsigned long addr; - for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr); + for (addr = (unsigned long)__kfence_pool; is_kfence_address_area((void *)addr, kpa); addr += PAGE_SIZE) { unsigned int level; @@ -68,6 +69,51 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect) return true; } +/* + * This function is used to recover TLB to 1G kernel mapping. + * The caller MUST make sure there're no other active kfence + * pools in this 1G area. + */ +static inline bool arch_kfence_free_pool(unsigned long addr) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud, new_pud, old_pud; + + addr = ALIGN_DOWN(addr, PUD_SIZE); + + pgd = pgd_offset_k(addr); + if (pgd_none(*pgd)) + return false; + + p4d = p4d_offset(pgd, addr); + if (p4d_none(*p4d)) + return false; + + if (p4d_large(*p4d) || !p4d_present(*p4d)) + return false; + + pud = pud_offset(p4d, addr); + if (pud_none(*pud)) + return false; + + if (pud_large(*pud) || !pud_present(*pud)) + return false; + + new_pud = pfn_pud((unsigned long)__phys_to_pfn(__pa(addr)), + __pgprot(__PAGE_KERNEL_LARGE)); + + old_pud = xchg(pud, new_pud); + + flush_tlb_kernel_range(addr, addr + PUD_SIZE); + if (!pud_free_pmd_page(&old_pud, addr)) { + pr_warn("free old TLB error at 0x%p-0x%p\n", + (void *)addr, (void *)(addr + PUD_SIZE)); + } + + return true; +} + #endif /* !MODULE */ #endif /* _ASM_X86_KFENCE_H */ diff --git a/include/linux/kfence.h b/include/linux/kfence.h index 401af4757514..bb24956fefd8 100644 --- a/include/linux/kfence.h +++ b/include/linux/kfence.h @@ -16,19 +16,45 @@ #include #include +#include +#include -extern unsigned long kfence_sample_interval; +extern long kfence_sample_interval; -/* - * We allocate an even number of pages, as it simplifies calculations to map - * address to metadata indices; effectively, the very first page serves as an - * extended guard page, but otherwise has no special purpose. - */ -#define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE) -extern char *__kfence_pool; +struct kfence_pool_area { + struct rb_node rb_node; /* binary tree linked to root */ + struct kfence_metadata *meta; /* metadata per area */ + char *addr; /* start kfence pool address */ + unsigned long pool_size; /* size of kfence pool of this area */ + unsigned long nr_objects; /* max object number of this area, 0 marked as zombie area */ + int node; /* the numa node (freelist) this area belongs to, likely from phy mem node */ + atomic_t _ref; /* count kpa ref, to protect kpa itself */ + struct list_head list; /* ready to be added to kfence_pool_root */ + struct percpu_ref refcnt; /* count in-use objects, to protect pool, meta, etc... */ + struct work_struct work; /* use workqueue to free unused area */ +}; DECLARE_STATIC_KEY_FALSE(kfence_allocation_key); +DECLARE_STATIC_KEY_FALSE(kfence_skip_interval); extern atomic_t kfence_allocation_gate; +extern unsigned long kfence_num_objects; +extern char *__kfence_pool_early_init; + +/** + * is_kfence_address_area() - check if an address belongs to KFENCE pool in given area + * @addr: address to check + * @kpa: area to check + * + * Return: true or false depending on whether the address is within the KFENCE + * object range in given area. + * + * This function is used when you already know the nearest leftside area. + */ +static __always_inline bool is_kfence_address_area(const void *addr, + const struct kfence_pool_area *kpa) +{ + return unlikely(kpa && (unsigned long)((char *)addr - kpa->addr) < kpa->pool_size); +} /** * is_kfence_address() - check if an address belongs to KFENCE pool @@ -50,12 +76,17 @@ extern atomic_t kfence_allocation_gate; */ static __always_inline bool is_kfence_address(const void *addr) { +#if defined(CONFIG_KASAN) || defined(CONFIG_DEBUG_KMEMLEAK) /* - * The __kfence_pool != NULL check is required to deal with the case - * where __kfence_pool == NULL && addr < KFENCE_POOL_SIZE. Keep it in - * the slow-path after the range-check! + * KASAN functions such as kasan_record_aux_stack(), + * kasan_poison_shadow(), or kasan_unpoison_shadow() + * may give an invalid kaddr (direct mapping kernel address). + * We must add a check here. */ - return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && __kfence_pool); + return virt_addr_valid(addr) && PageKfence(virt_to_page(addr)); +#else + return PageKfence(virt_to_page(addr)); +#endif } /** @@ -72,6 +103,17 @@ void __init kfence_alloc_pool_and_metadata(void); */ void __init kfence_init(void); +/** + * update_kfence_booting_max() - analyse the max num_objects from cmdline + * + * Read the config from boot cmdline and limit kfence pool size. + * This function is called by kfence itself (e.g., kfence_alloc_pool()), or, + * by specific arch alloc (e.g., arm64_kfence_alloc_pool()). + * + * Return: 1 if kfence_num_objects is changed, otherwise 0. + */ +int __init update_kfence_booting_max(void); + /** * kfence_shutdown_cache() - handle shutdown_cache() for KFENCE objects * @s: cache being shut down @@ -97,7 +139,7 @@ void kfence_shutdown_cache(struct kmem_cache *s); * Allocate a KFENCE object. Allocators must not call this function directly, * use kfence_alloc() instead. */ -void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags); +void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags, int node); /** * kfence_alloc() - allocate a KFENCE object with a low probability @@ -124,9 +166,43 @@ static __always_inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp if (!static_branch_likely(&kfence_allocation_key)) return NULL; #endif - if (likely(atomic_read(&kfence_allocation_gate))) + if (!static_branch_likely(&kfence_skip_interval) && + likely(atomic_read(&kfence_allocation_gate))) return NULL; - return __kfence_alloc(s, size, flags); + return __kfence_alloc(s, size, flags, NUMA_NO_NODE); +} + +/** + * kfence_alloc_node() - allocate a KFENCE object with a low probability + * @s: struct kmem_cache with object requirements + * @size: exact size of the object to allocate (can be less than @s->size + * e.g. for kmalloc caches) + * @flags: GFP flags + * @node: alloc from kfence pool on which node + * + * Return: + * * NULL - must proceed with allocating as usual, + * * non-NULL - pointer to a KFENCE object. + * + * kfence_alloc_node() should be inserted into the heap allocation fast path, + * allowing it to transparently return KFENCE-allocated objects with a low + * probability using a static branch (the probability is controlled by the + * kfence.sample_interval boot parameter). + */ +static __always_inline void *kfence_alloc_node(struct kmem_cache *s, size_t size, gfp_t flags, + int node) +{ +#if defined(CONFIG_KFENCE_STATIC_KEYS) || CONFIG_KFENCE_SAMPLE_INTERVAL == 0 + if (!static_branch_unlikely(&kfence_allocation_key)) + return NULL; +#else + if (!static_branch_likely(&kfence_allocation_key)) + return NULL; +#endif + if (!static_branch_likely(&kfence_skip_interval) && + likely(atomic_read(&kfence_allocation_gate))) + return NULL; + return __kfence_alloc(s, size, flags, node); } /** @@ -228,6 +304,10 @@ static inline void kfence_alloc_pool_and_metadata(void) { } static inline void kfence_init(void) { } static inline void kfence_shutdown_cache(struct kmem_cache *s) { } static inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) { return NULL; } +static inline void *kfence_alloc_node(struct kmem_cache *s, size_t size, gfp_t flags, int node) +{ + return NULL; +} static inline size_t kfence_ksize(const void *addr) { return 0; } static inline void *kfence_object_start(const void *addr) { return NULL; } static inline void __kfence_free(void *addr) { } diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index a77f3a7d21d1..aae9f6230dba 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -135,6 +135,9 @@ enum pageflags { #ifdef CONFIG_ARCH_USES_PG_ARCH_X PG_arch_2, PG_arch_3, +#endif +#ifdef CONFIG_KFENCE + PG_kfence, /* Page in kfence pool */ #endif __NR_PAGEFLAGS, @@ -623,6 +626,10 @@ PAGEFLAG(VmemmapSelfHosted, vmemmap_self_hosted, PF_ANY) PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted) #endif +#ifdef CONFIG_KFENCE +__PAGEFLAG(Kfence, kfence, PF_ANY) +#endif + /* * On an anonymous page mapped into a user virtual memory area, * page->mapping points to its anon_vma, not to a struct address_space; @@ -1070,6 +1077,12 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page) #define __PG_MLOCKED 0 #endif +#ifdef CONFIG_KFENCE +#define __PG_KFENCE (1UL << PG_kfence) +#else +#define __PG_KFENCE 0 +#endif + /* * Flags checked when a page is freed. Pages being freed should not have * these flags set. If they are, there is a problem. diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index e010618f9326..867de6485902 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -95,6 +95,12 @@ #define IF_HAVE_PG_ARCH_X(_name) #endif +#ifdef CONFIG_KFENCE +#define IF_HAVE_PG_KFENCE(_name) ,{1UL << PG_##_name, __stringify(_name)} +#else +#define IF_HAVE_PG_KFENCE(_name) +#endif + #define DEF_PAGEFLAG_NAME(_name) { 1UL << PG_##_name, __stringify(_name) } #define __def_pageflag_names \ @@ -125,7 +131,8 @@ IF_HAVE_PG_HWPOISON(hwpoison) \ IF_HAVE_PG_IDLE(idle) \ IF_HAVE_PG_IDLE(young) \ IF_HAVE_PG_ARCH_X(arch_2) \ -IF_HAVE_PG_ARCH_X(arch_3) +IF_HAVE_PG_ARCH_X(arch_3) \ +IF_HAVE_PG_KFENCE(kfence) #define show_page_flags(flags) \ (flags) ? __print_flags(flags, "|", \ diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 3872528d0963..01d5945e1351 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -29,6 +30,7 @@ #include #include #include +#include #include #include @@ -40,8 +42,9 @@ ({ \ const bool __cond = WARN_ON(cond); \ if (unlikely(__cond)) { \ - WRITE_ONCE(kfence_enabled, false); \ disabled_by_warn = true; \ + WRITE_ONCE(kfence_enabled, false); \ + static_branch_disable(&kfence_allocation_key); \ } \ __cond; \ }) @@ -50,8 +53,27 @@ static bool kfence_enabled __read_mostly; static bool disabled_by_warn __read_mostly; +/* true = node mode, false = global mode. */ +static bool kfence_pool_node_mode __read_mostly; +static DEFINE_MUTEX(kfence_mutex); +unsigned long kfence_num_objects __read_mostly = CONFIG_KFENCE_NUM_OBJECTS; +EXPORT_SYMBOL_GPL(kfence_num_objects); +static unsigned long kfence_num_objects_snap __read_mostly; /* Used to record upstream ver. */ +static int *kfence_node_map __read_mostly; /* Map real node to "virtual kfence node". */ +bool kfence_panic_on_fault __read_mostly; +struct kfence_alloc_node_cond { + long need; + long allocated; +}; +/* + * An array to record how many objects need to be allocated + * and how many has been allocated on each node. + */ +static struct kfence_alloc_node_cond *kfence_num_objects_stat; +/* Only used in BOOTING, record partition info about __kfence_pool_area[] */ +static unsigned long kfence_nr_areas_per_node; -unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL; +long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL; EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */ #ifdef MODULE_PARAM_PREFIX @@ -59,25 +81,40 @@ EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */ #endif #define MODULE_PARAM_PREFIX "kfence." -static int kfence_enable_late(void); +DEFINE_STATIC_KEY_FALSE(kfence_short_canary); +DEFINE_STATIC_KEY_FALSE(kfence_skip_interval); +static DEFINE_STATIC_KEY_FALSE(kfence_once_enabled); + +#define KFENCE_MAX_OBJECTS_PER_AREA (PUD_SIZE / PAGE_SIZE / 2 - 1) + +static void kfence_enable_late(void); static int param_set_sample_interval(const char *val, const struct kernel_param *kp) { unsigned long num; - int ret = kstrtoul(val, 0, &num); + int ret = kstrtol(val, 0, &num); if (ret < 0) return ret; - /* Using 0 to indicate KFENCE is disabled. */ - if (!num && READ_ONCE(kfence_enabled)) { - pr_info("disabled\n"); - WRITE_ONCE(kfence_enabled, false); + if (system_state == SYSTEM_BOOTING) { + *((long *)kp->arg) = num; + return 0; } - *((unsigned long *)kp->arg) = num; + /* Not allow sample interval switching between positive and negative */ + if ((kfence_sample_interval > 0 && num < 0) || + (kfence_sample_interval < 0 && num > 0)) { + return -EINVAL; + } + + if (!num) /* Using 0 to indicate KFENCE is disabled. */ + kfence_disable(); + + *((long *)kp->arg) = num; + + if (num && !READ_ONCE(kfence_enabled)) + return disabled_by_warn ? -EINVAL : (kfence_enable_late(), 0); - if (num && !READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING) - return disabled_by_warn ? -EINVAL : kfence_enable_late(); return 0; } @@ -86,7 +123,7 @@ static int param_get_sample_interval(char *buffer, const struct kernel_param *kp if (!READ_ONCE(kfence_enabled)) return sprintf(buffer, "0\n"); - return param_get_ulong(buffer, kp); + return param_get_long(buffer, kp); } static const struct kernel_param_ops sample_interval_param_ops = { @@ -95,6 +132,107 @@ static const struct kernel_param_ops sample_interval_param_ops = { }; module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600); +static int param_set_num_objects(const char *val, const struct kernel_param *kp) +{ + unsigned long num; + int ret = kstrtoul(val, 0, &num); + + if (ret < 0) + return ret; + +#ifdef CONFIG_ARM64 + if (system_state == SYSTEM_BOOTING) + return 0; +#endif + + if (!num) + return -EINVAL; + + mutex_lock(&kfence_mutex); + + if (READ_ONCE(kfence_enabled)) { + ret = -EBUSY; /* can not change num_objects when enabled */ + goto out_unlock; + } + + *((unsigned long *)kp->arg) = num; + ret = 0; + +out_unlock: + mutex_unlock(&kfence_mutex); + return ret; +} + +static int param_get_num_objects(char *buffer, const struct kernel_param *kp) +{ + return param_get_ulong(buffer, kp); +} + +static const struct kernel_param_ops num_objects_param_ops = { + .set = param_set_num_objects, + .get = param_get_num_objects, +}; +module_param_cb(num_objects, &num_objects_param_ops, &kfence_num_objects, 0600); + +static int param_set_pool_mode(const char *val, const struct kernel_param *kp) +{ + bool mode; + char *s = strstrip((char *)val); + + if (READ_ONCE(kfence_enabled)) + return -EINVAL; /* can not change mode when enabled */ + + if (!strcmp(s, "global")) + mode = false; + else if (!strcmp(s, "node")) + mode = true; + else + return -EINVAL; + + *((bool *)kp->arg) = mode; + + return 0; +} + +static int param_get_pool_mode(char *buffer, const struct kernel_param *kp) +{ + return sprintf(buffer, "%s\n", *(bool *)kp->arg ? "node" : "global"); +} + +static const struct kernel_param_ops pool_mode_param_ops = { + .set = param_set_pool_mode, + .get = param_get_pool_mode, +}; +module_param_cb(pool_mode, &pool_mode_param_ops, &kfence_pool_node_mode, 0600); + +static int param_set_fault(const char *val, const struct kernel_param *kp) +{ + bool mode; + char *s = strstrip((char *)val); + + if (!strcmp(s, "report")) + mode = false; + else if (!strcmp(s, "panic")) + mode = true; + else + return -EINVAL; + + *((bool *)kp->arg) = mode; + + return 0; +} + +static int param_get_fault(char *buffer, const struct kernel_param *kp) +{ + return sprintf(buffer, "%s\n", *(bool *)kp->arg ? "panic" : "report"); +} + +static const struct kernel_param_ops fault_param_ops = { + .set = param_set_fault, + .get = param_get_fault, +}; +module_param_cb(fault, &fault_param_ops, &kfence_panic_on_fault, 0600); + /* Pool usage% threshold when currently covered allocations are skipped. */ static unsigned long kfence_skip_covered_thresh __read_mostly = 75; module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644); @@ -107,28 +245,39 @@ module_param_named(deferrable, kfence_deferrable, bool, 0444); static bool kfence_check_on_panic __read_mostly; module_param_named(check_on_panic, kfence_check_on_panic, bool, 0444); -/* The pool of pages used for guard pages and objects. */ -char *__kfence_pool __read_mostly; -EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */ - /* - * Per-object metadata, with one-to-one mapping of object metadata to - * backing pages (in __kfence_pool). + * The pool of pages used for guard pages and objects. + * Only used in booting init state. Will be cleared after that. */ -static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0); -struct kfence_metadata *kfence_metadata __read_mostly; +char **__kfence_pool_area; /* - * If kfence_metadata is not NULL, it may be accessed by kfence_shutdown_cache(). - * So introduce kfence_metadata_init to initialize metadata, and then make - * kfence_metadata visible after initialization is successful. This prevents - * potential UAF or access to uninitialized metadata. + * The pool of pages should be reserved earlier than kfence initialization. It's + * only assigned in arm64 architecture. */ -static struct kfence_metadata *kfence_metadata_init __read_mostly; +char *__kfence_pool_early_init; + +/* The binary tree maintaining all kfence pool areas */ +struct rb_root kfence_pool_root = RB_ROOT; +EXPORT_SYMBOL_GPL(kfence_pool_root); /* Freelist with available objects. */ -static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist); -static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */ +struct kfence_freelist_node { + struct list_head freelist; + raw_spinlock_t lock; +}; + +struct kfence_freelist_cpu { + struct list_head freelist; + unsigned long count; +}; + +struct kfence_freelist { + struct kfence_freelist_node *node; + struct kfence_freelist_cpu __percpu *cpu; +}; +static struct kfence_freelist freelist; +static atomic_t kfence_flush_res, kfence_refkill_res; /* * The static key to set up a KFENCE allocation; or if static keys are not used @@ -150,11 +299,11 @@ atomic_t kfence_allocation_gate = ATOMIC_INIT(1); * P(alloc_traces) = (1 - e^(-HNUM * (alloc_traces / SIZE)) ^ HNUM */ #define ALLOC_COVERED_HNUM 2 -#define ALLOC_COVERED_ORDER (const_ilog2(CONFIG_KFENCE_NUM_OBJECTS) + 2) -#define ALLOC_COVERED_SIZE (1 << ALLOC_COVERED_ORDER) -#define ALLOC_COVERED_HNEXT(h) hash_32(h, ALLOC_COVERED_ORDER) +static unsigned long alloc_covered_order __ro_after_init; +#define ALLOC_COVERED_HNEXT(h) hash_32(h, alloc_covered_order) +#define ALLOC_COVERED_SIZE (1 << alloc_covered_order) #define ALLOC_COVERED_MASK (ALLOC_COVERED_SIZE - 1) -static atomic_t alloc_covered[ALLOC_COVERED_SIZE]; +static atomic_t *alloc_covered __read_mostly; /* Stack depth used to determine uniqueness of an allocation. */ #define UNIQUE_ALLOC_STACK_DEPTH ((size_t)8) @@ -177,7 +326,10 @@ enum kfence_counter_id { KFENCE_COUNTER_SKIP_COVERED, KFENCE_COUNTER_COUNT, }; -static atomic_long_t counters[KFENCE_COUNTER_COUNT]; +struct kfence_counter { + s64 counter[KFENCE_COUNTER_COUNT]; +}; +static struct kfence_counter __percpu *counters; static const char *const counter_names[] = { [KFENCE_COUNTER_ALLOCATED] = "currently allocated", [KFENCE_COUNTER_ALLOCS] = "total allocations", @@ -194,13 +346,28 @@ static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT); static inline bool should_skip_covered(void) { - unsigned long thresh = (CONFIG_KFENCE_NUM_OBJECTS * kfence_skip_covered_thresh) / 100; + unsigned long thresh; + s64 sum; + int cpu; + + /* Only use this feature in upstream mode */ + if (!kfence_num_objects_snap) + return false; - return atomic_long_read(&counters[KFENCE_COUNTER_ALLOCATED]) > thresh; + thresh = (kfence_num_objects_snap * kfence_skip_covered_thresh) / 100; + sum = 0; + /* This may take some time but should be acceptable in sampling mode. */ + for_each_possible_cpu(cpu) + sum += per_cpu_ptr(counters, cpu)->counter[KFENCE_COUNTER_ALLOCATED]; + + return sum > thresh; } static u32 get_alloc_stack_hash(unsigned long *stack_entries, size_t num_entries) { + if (!kfence_num_objects_snap) + return 0; + num_entries = min(num_entries, UNIQUE_ALLOC_STACK_DEPTH); num_entries = filter_irq_stacks(stack_entries, num_entries); return jhash(stack_entries, num_entries * sizeof(stack_entries[0]), stack_hash_seed); @@ -210,10 +377,14 @@ static u32 get_alloc_stack_hash(unsigned long *stack_entries, size_t num_entries * Adds (or subtracts) count @val for allocation stack trace hash * @alloc_stack_hash from Counting Bloom filter. */ -static void alloc_covered_add(u32 alloc_stack_hash, int val) +static inline void alloc_covered_add(u32 alloc_stack_hash, int val) { int i; + /* Only use this feature in upstream mode */ + if (!kfence_num_objects_snap) + return; + for (i = 0; i < ALLOC_COVERED_HNUM; i++) { atomic_add(val, &alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]); alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash); @@ -249,14 +420,14 @@ static bool kfence_unprotect(unsigned long addr) static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta) { - unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2; - unsigned long pageaddr = (unsigned long)&__kfence_pool[offset]; + struct kfence_pool_area *kpa = meta->kpa; + unsigned long offset = (meta - kpa->meta + 1) * PAGE_SIZE * 2; + unsigned long pageaddr = (unsigned long)&kpa->addr[offset]; /* The checks do not affect performance; only called from slow-paths. */ /* Only call with a pointer into kfence_metadata. */ - if (KFENCE_WARN_ON(meta < kfence_metadata || - meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS)) + if (KFENCE_WARN_ON(meta < kpa->meta || meta >= kpa->meta + kpa->nr_objects)) return 0; /* @@ -280,8 +451,6 @@ metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state nex struct kfence_track *track = next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track; - lockdep_assert_held(&meta->lock); - if (stack_entries) { memcpy(track->stack_entries, stack_entries, num_stack_entries * sizeof(stack_entries[0])); @@ -314,7 +483,7 @@ static inline bool check_canary_byte(u8 *addr) if (likely(*addr == KFENCE_CANARY_PATTERN_U8(addr))) return true; - atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_BUGS]++; meta = addr_to_metadata((unsigned long)addr); raw_spin_lock_irqsave(&meta->lock, flags); @@ -327,24 +496,36 @@ static inline bool check_canary_byte(u8 *addr) static inline void set_canary(const struct kfence_metadata *meta) { const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE); - unsigned long addr = pageaddr; + unsigned long addr, start = pageaddr, end = pageaddr + PAGE_SIZE; + + /* this func will take most cost so we shrink it when no interval limit */ + if (static_branch_likely(&kfence_short_canary)) { + start = max(ALIGN_DOWN(meta->addr - 1, L1_CACHE_BYTES), start); + end = min(ALIGN(meta->addr + meta->size + 1, L1_CACHE_BYTES), end); + } /* * The canary may be written to part of the object memory, but it does * not affect it. The user should initialize the object before using it. */ - for (; addr < meta->addr; addr += sizeof(u64)) + for (addr = start; addr < meta->addr; addr += sizeof(u64)) *((u64 *)addr) = KFENCE_CANARY_PATTERN_U64; addr = ALIGN_DOWN(meta->addr + meta->size, sizeof(u64)); - for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64)) + for (; addr < end; addr += sizeof(u64)) *((u64 *)addr) = KFENCE_CANARY_PATTERN_U64; } static inline void check_canary(const struct kfence_metadata *meta) { const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE); - unsigned long addr = pageaddr; + unsigned long addr, start = pageaddr, end = pageaddr + PAGE_SIZE; + + /* this func will take most cost so we shrink it when no interval limit */ + if (static_branch_likely(&kfence_short_canary)) { + start = max(ALIGN_DOWN(meta->addr - 1, L1_CACHE_BYTES), start); + end = min(ALIGN(meta->addr + meta->size + 1, L1_CACHE_BYTES), end); + } /* * We'll iterate over each canary byte per-side until a corrupted byte @@ -356,7 +537,7 @@ static inline void check_canary(const struct kfence_metadata *meta) */ /* Apply to left of object. */ - for (; meta->addr - addr >= sizeof(u64); addr += sizeof(u64)) { + for (addr = start; meta->addr - addr >= sizeof(u64); addr += sizeof(u64)) { if (unlikely(*((u64 *)addr) != KFENCE_CANARY_PATTERN_U64)) break; } @@ -376,7 +557,7 @@ static inline void check_canary(const struct kfence_metadata *meta) if (unlikely(!check_canary_byte((u8 *)addr))) return; } - for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64)) { + for (; addr < end; addr += sizeof(u64)) { if (unlikely(*((u64 *)addr) != KFENCE_CANARY_PATTERN_U64)) { for (; addr - pageaddr < PAGE_SIZE; addr++) { @@ -387,48 +568,103 @@ static inline void check_canary(const struct kfence_metadata *meta) } } -static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp, - unsigned long *stack_entries, size_t num_stack_entries, - u32 alloc_stack_hash) +static inline struct kfence_metadata * +get_free_meta_from_node(struct kfence_freelist_node *kfence_freelist) { - struct kfence_metadata *meta = NULL; + struct kfence_metadata *object = NULL; unsigned long flags; - struct slab *slab; - void *addr; - const bool random_right_allocate = get_random_u32_below(2); - const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS && - !get_random_u32_below(CONFIG_KFENCE_STRESS_TEST_FAULTS); - /* Try to obtain a free object. */ - raw_spin_lock_irqsave(&kfence_freelist_lock, flags); - if (!list_empty(&kfence_freelist)) { - meta = list_entry(kfence_freelist.next, struct kfence_metadata, list); - list_del_init(&meta->list); - } - raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags); - if (!meta) { - atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_CAPACITY]); - return NULL; + raw_spin_lock_irqsave(&kfence_freelist->lock, flags); + if (!list_empty(&kfence_freelist->freelist)) { + object = list_entry(kfence_freelist->freelist.next, struct kfence_metadata, list); + list_del_init(&object->list); } + percpu_ref_get(&object->kpa->refcnt); + raw_spin_unlock_irqrestore(&kfence_freelist->lock, flags); - if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) { - /* - * This is extremely unlikely -- we are reporting on a - * use-after-free, which locked meta->lock, and the reporting - * code via printk calls kmalloc() which ends up in - * kfence_alloc() and tries to grab the same object that we're - * reporting on. While it has never been observed, lockdep does - * report that there is a possibility of deadlock. Fix it by - * using trylock and bailing out gracefully. - */ - raw_spin_lock_irqsave(&kfence_freelist_lock, flags); - /* Put the object back on the freelist. */ - list_add_tail(&meta->list, &kfence_freelist); - raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags); + return object; +} - return NULL; +#define KFENCE_FREELIST_PERCPU_SIZE 100 + +static struct kfence_metadata * +get_free_meta_slowpath(struct kfence_freelist_cpu *c, + struct kfence_freelist_node *kfence_freelist) +{ + struct kfence_metadata *object = NULL; + struct list_head *entry = &kfence_freelist->freelist; + + KFENCE_WARN_ON(!list_empty(&c->freelist)); + + raw_spin_lock(&kfence_freelist->lock); + + if (list_empty(&kfence_freelist->freelist)) + goto out; + + object = list_first_entry(entry, struct kfence_metadata, list); + list_del_init(&object->list); + + do { + entry = READ_ONCE(entry->next); + + if (entry == &kfence_freelist->freelist) { + entry = entry->prev; + break; + } + + c->count++; + } while (c->count < KFENCE_FREELIST_PERCPU_SIZE); + + list_cut_position(&c->freelist, &kfence_freelist->freelist, entry); + +out: + raw_spin_unlock(&kfence_freelist->lock); + + return object; +} + +static struct kfence_metadata *get_free_meta(int real_node) +{ + unsigned long flags; + struct kfence_freelist_cpu *c; + struct kfence_freelist_node *kfence_freelist; + struct kfence_metadata *object; + int node = kfence_node_map[real_node]; + + if (node >= 0) + kfence_freelist = &freelist.node[node]; + else + kfence_freelist = &freelist.node[real_node]; + + /* If target page not on current node, directly get from its nodelist */ + if (unlikely(node != kfence_node_map[numa_node_id()] || kfence_num_objects_snap)) + return get_free_meta_from_node(kfence_freelist); + + local_irq_save(flags); + c = get_cpu_ptr(freelist.cpu); + + if (unlikely(!c->count)) { + object = get_free_meta_slowpath(c, kfence_freelist); + } else { + object = list_first_entry(&c->freelist, struct kfence_metadata, list); + list_del_init(&object->list); + c->count--; } + percpu_ref_get(&object->kpa->refcnt); + + put_cpu_ptr(c); + local_irq_restore(flags); + + return object; +} + +static inline void __init_meta(struct kfence_metadata *meta, size_t size, struct kmem_cache *cache, + unsigned long *stack_entries, size_t num_stack_entries, + u32 alloc_stack_hash) +{ + struct kfence_counter *this_cpu_counter = raw_cpu_ptr(counters); + meta->addr = metadata_to_pageaddr(meta); /* Unprotect if we're reusing this page. */ if (meta->state == KFENCE_OBJECT_FREED) @@ -442,27 +678,72 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g * is that the out-of-bounds accesses detected are deterministic for * such allocations. */ - if (random_right_allocate) { + if (cache && this_cpu_counter->counter[KFENCE_COUNTER_ALLOCS] % 2) { /* Allocate on the "right" side, re-calculate address. */ meta->addr += PAGE_SIZE - size; meta->addr = ALIGN_DOWN(meta->addr, cache->align); } - addr = (void *)meta->addr; - /* Update remaining metadata. */ metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED, stack_entries, num_stack_entries); /* Pairs with READ_ONCE() in kfence_shutdown_cache(). */ WRITE_ONCE(meta->cache, cache); meta->size = size; meta->alloc_stack_hash = alloc_stack_hash; +} + +static void put_free_meta(struct kfence_metadata *object); +static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp, + unsigned long *stack_entries, size_t num_stack_entries, + u32 alloc_stack_hash, int node) +{ + struct kfence_counter *this_cpu_counter = raw_cpu_ptr(counters); + struct kfence_metadata *meta; + unsigned long flags; + struct page *page; + struct slab *slab; + void *addr; + const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS && + !get_random_u32_below(CONFIG_KFENCE_STRESS_TEST_FAULTS); + + /* Try to obtain a free object. */ + meta = get_free_meta(node); + if (!meta) { + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_SKIP_CAPACITY]++; + return NULL; + } + + if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) { + /* + * This is extremely unlikely -- we are reporting on a + * use-after-free, which locked meta->lock, and the reporting + * code via printk calls kmalloc() which ends up in + * kfence_alloc() and tries to grab the same object that we're + * reporting on. While it has never been observed, lockdep does + * report that there is a possibility of deadlock. Fix it by + * using trylock and bailing out gracefully. + */ + /* Put the object back on the freelist. */ + put_free_meta(meta); + + return NULL; + } + + __init_meta(meta, size, cache, stack_entries, num_stack_entries, alloc_stack_hash); + raw_spin_unlock_irqrestore(&meta->lock, flags); + addr = (void *)meta->addr; alloc_covered_add(alloc_stack_hash, 1); /* Set required slab fields. */ - slab = virt_to_slab((void *)meta->addr); + page = virt_to_page(addr); + slab = page_slab(page); + __SetPageSlab(page); slab->slab_cache = cache; +#ifdef CONFIG_MEMCG + slab->memcg_data = (unsigned long)meta->objcg | MEMCG_DATA_OBJCGS; +#endif #if defined(CONFIG_SLUB) slab->objects = 1; #elif defined(CONFIG_SLAB) @@ -485,15 +766,74 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g if (random_fault) kfence_protect(meta->addr); /* Random "faults" by protecting the object. */ - atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]); - atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]); + this_cpu_counter->counter[KFENCE_COUNTER_ALLOCATED]++; + this_cpu_counter->counter[KFENCE_COUNTER_ALLOCS]++; return addr; } -static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie) +static inline void put_free_meta_to_node(struct kfence_metadata *object, + struct kfence_freelist_node *kfence_freelist) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&kfence_freelist->lock, flags); + list_add_tail(&object->list, &kfence_freelist->freelist); + percpu_ref_put(&object->kpa->refcnt); + raw_spin_unlock_irqrestore(&kfence_freelist->lock, flags); +} + +static void put_free_meta_slowpath(struct kfence_freelist_cpu *c, + struct kfence_freelist_node *kfence_freelist) +{ + struct list_head *entry = &c->freelist, new_list; + + do { + entry = entry->next; + c->count--; + } while (c->count > KFENCE_FREELIST_PERCPU_SIZE); + + list_cut_position(&new_list, &c->freelist, entry); + raw_spin_lock(&kfence_freelist->lock); + list_splice_tail(&new_list, &kfence_freelist->freelist); + raw_spin_unlock(&kfence_freelist->lock); +} + +static void put_free_meta(struct kfence_metadata *object) +{ + int node = object->kpa->node; + unsigned long flags; + struct kfence_freelist_cpu *c; + struct kfence_freelist_node *kfence_freelist = &freelist.node[node]; + + KFENCE_WARN_ON(!list_empty(&object->list)); + + /* If meta not on current node, just return it to its own nodelist */ + if (unlikely(!kfence_node_map || node != kfence_node_map[numa_node_id()] || + kfence_num_objects_snap)) { + put_free_meta_to_node(object, kfence_freelist); + return; + } + + local_irq_save(flags); + c = get_cpu_ptr(freelist.cpu); + + list_add_tail(&object->list, &c->freelist); + c->count++; + + if (unlikely(c->count == KFENCE_FREELIST_PERCPU_SIZE * 2)) + put_free_meta_slowpath(c, kfence_freelist); + + percpu_ref_put(&object->kpa->refcnt); + + put_cpu_ptr(c); + local_irq_restore(flags); +} + +static inline bool __free_meta(void *addr, struct kfence_metadata *meta, bool zombie, bool is_page) { struct kcsan_scoped_access assert_page_exclusive; + struct kfence_counter *this_cpu_counter = raw_cpu_ptr(counters); unsigned long flags; bool init; @@ -501,11 +841,11 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) { /* Invalid or double-free, bail out. */ - atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); + this_cpu_counter->counter[KFENCE_COUNTER_BUGS]++; kfence_report_error((unsigned long)addr, false, NULL, meta, KFENCE_ERROR_INVALID_FREE); raw_spin_unlock_irqrestore(&meta->lock, flags); - return; + return false; } /* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */ @@ -525,38 +865,50 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z /* Mark the object as freed. */ metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0); - init = slab_want_init_on_free(meta->cache); + if (!is_page) + init = slab_want_init_on_free(meta->cache); + raw_spin_unlock_irqrestore(&meta->lock, flags); alloc_covered_add(meta->alloc_stack_hash, -1); - /* Check canary bytes for memory corruption. */ - check_canary(meta); + if (!is_page) { + /* Check canary bytes for memory corruption. */ + check_canary(meta); - /* - * Clear memory if init-on-free is set. While we protect the page, the - * data is still there, and after a use-after-free is detected, we - * unprotect the page, so the data is still accessible. - */ - if (!zombie && unlikely(init)) - memzero_explicit(addr, meta->size); + /* + * Clear memory if init-on-free is set. While we protect the page, the + * data is still there, and after a use-after-free is detected, we + * unprotect the page, so the data is still accessible. + */ + if (!zombie && unlikely(slab_want_init_on_free(meta->cache))) + memzero_explicit(addr, meta->size); + } /* Protect to detect use-after-frees. */ kfence_protect((unsigned long)addr); kcsan_end_scoped_access(&assert_page_exclusive); + + return true; +} + +static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie) +{ + struct kfence_counter *this_cpu_counter = raw_cpu_ptr(counters); + + if (!__free_meta(addr, meta, zombie, false)) + return; + if (!zombie) { /* Add it to the tail of the freelist for reuse. */ - raw_spin_lock_irqsave(&kfence_freelist_lock, flags); - KFENCE_WARN_ON(!list_empty(&meta->list)); - list_add_tail(&meta->list, &kfence_freelist); - raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags); + put_free_meta(meta); - atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]); - atomic_long_inc(&counters[KFENCE_COUNTER_FREES]); + this_cpu_counter->counter[KFENCE_COUNTER_ALLOCATED]--; + this_cpu_counter->counter[KFENCE_COUNTER_FREES]++; } else { /* See kfence_shutdown_cache(). */ - atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]); + this_cpu_counter->counter[KFENCE_COUNTER_ZOMBIES]++; } } @@ -567,22 +919,39 @@ static void rcu_guarded_free(struct rcu_head *h) kfence_guarded_free((void *)meta->addr, meta, false); } -/* - * Initialization of the KFENCE pool after its allocation. - * Returns 0 on success; otherwise returns the address up to - * which partial initialization succeeded. - */ -static unsigned long kfence_init_pool(void) +static void kfence_clear_page_info(unsigned long addr, unsigned long size) +{ + unsigned long i; + + for (i = addr; i < addr + size; i += PAGE_SIZE) { + struct page *page = virt_to_page((void *)i); + + if (PageSlab(page)) { +#ifdef CONFIG_MEMCG + page->memcg_data = 0; +#endif + __ClearPageSlab(page); + } + __ClearPageKfence(page); + page->mapping = NULL; + atomic_set(&page->_refcount, 1); + kfence_unprotect(i); + } +} + +static bool __kfence_init_pool_area(struct kfence_pool_area *kpa) { - unsigned long addr; + char *__kfence_pool = kpa->addr; + struct kfence_metadata *kfence_metadata = kpa->meta; + struct kfence_freelist_node *kfence_freelist = &freelist.node[kpa->node]; + unsigned long addr = (unsigned long)__kfence_pool, flags; struct page *pages; int i; - if (!arch_kfence_init_pool()) - return (unsigned long)__kfence_pool; + if (!__kfence_pool_early_init && !arch_kfence_init_pool(kpa)) + goto err; - addr = (unsigned long)__kfence_pool; - pages = virt_to_page(__kfence_pool); + pages = virt_to_page((void *)addr); /* * Set up object pages: they must have PG_slab set, to avoid freeing @@ -592,17 +961,10 @@ static unsigned long kfence_init_pool(void) * fast-path in SLUB, and therefore need to ensure kfree() correctly * enters __slab_free() slow-path. */ - for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) { - struct slab *slab = page_slab(nth_page(pages, i)); - - if (!i || (i % 2)) - continue; + for (i = 0; i < kpa->pool_size / PAGE_SIZE; i++) { + struct page *page = nth_page(pages, i); - __folio_set_slab(slab_folio(slab)); -#ifdef CONFIG_MEMCG - slab->memcg_data = (unsigned long)&kfence_metadata_init[i / 2 - 1].objcg | - MEMCG_DATA_OBJCGS; -#endif + __SetPageKfence(page); } /* @@ -613,96 +975,676 @@ static unsigned long kfence_init_pool(void) */ for (i = 0; i < 2; i++) { if (unlikely(!kfence_protect(addr))) - return addr; + goto err; addr += PAGE_SIZE; } - for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { - struct kfence_metadata *meta = &kfence_metadata_init[i]; + /* Protect the right redzone. */ + for (i = 0; i < kpa->nr_objects; i++) { + if (unlikely(!kfence_protect(addr + PAGE_SIZE))) + goto err; + addr += 2 * PAGE_SIZE; + } + + addr = (unsigned long)__kfence_pool + 2 * PAGE_SIZE; + raw_spin_lock_irqsave(&kfence_freelist->lock, flags); + for (i = 0; i < kpa->nr_objects; i++) { + struct kfence_metadata *meta = &kfence_metadata[i]; /* Initialize metadata. */ INIT_LIST_HEAD(&meta->list); raw_spin_lock_init(&meta->lock); meta->state = KFENCE_OBJECT_UNUSED; meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */ - list_add_tail(&meta->list, &kfence_freelist); - - /* Protect the right redzone. */ - if (unlikely(!kfence_protect(addr + PAGE_SIZE))) - goto reset_slab; + meta->kpa = kpa; + list_add_tail(&meta->list, &kfence_freelist->freelist); + /* No fail after here, since we've added this pool to freelist. */ addr += 2 * PAGE_SIZE; } + raw_spin_unlock_irqrestore(&kfence_freelist->lock, flags); /* - * Make kfence_metadata visible only when initialization is successful. - * Otherwise, if the initialization fails and kfence_metadata is freed, - * it may cause UAF in kfence_shutdown_cache(). + * The pool is live and will never be deallocated from this point on. + * Remove the pool object from the kmemleak object tree, as it would + * otherwise overlap with allocations returned by kfence_alloc(), which + * are registered with kmemleak through the slab post-alloc hook. */ - smp_store_release(&kfence_metadata, kfence_metadata_init); - return 0; + if (PageReserved(pages)) + kmemleak_ignore_phys(__pa(__kfence_pool)); -reset_slab: - for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) { - struct slab *slab = page_slab(nth_page(pages, i)); + return true; - if (!i || (i % 2)) - continue; -#ifdef CONFIG_MEMCG - slab->memcg_data = 0; -#endif - __folio_clear_slab(slab_folio(slab)); - } +err: + kfence_clear_page_info((unsigned long)kpa->addr, kpa->pool_size); + return false; +} - return addr; +static bool kfence_rb_less(struct rb_node *a, const struct rb_node *b) +{ + return (unsigned long)kfence_rbentry(a)->addr < (unsigned long)kfence_rbentry(b)->addr; } -static bool __init kfence_init_pool_early(void) +static void __init kfence_alloc_pool_node(int node) { - unsigned long addr; + unsigned long nr_need = kfence_num_objects_stat[node].need; + unsigned long nr_request = min(nr_need, KFENCE_MAX_OBJECTS_PER_AREA); + unsigned long index = kfence_nr_areas_per_node * node; + + while (nr_need) { + unsigned long kfence_pool_size = (nr_request + 1) * 2 * PAGE_SIZE; + + __kfence_pool_area[index] = memblock_alloc_node(kfence_pool_size, PUD_SIZE, node); + if (!__kfence_pool_area[index]) { + pr_err("kfence alloc pool on node %d failed\n", node); + break; + } + index++; + nr_need -= nr_request; + nr_request = min(nr_request, nr_need); + } +} + +static void kpa_release(struct percpu_ref *ref); +static void kfence_free_area(struct work_struct *work); +static inline bool init_kpa(struct kfence_pool_area *kpa, char *__kfence_pool, int node, + unsigned long nr_objects, unsigned long pool_size) +{ + kpa->meta = vzalloc_node(sizeof(struct kfence_metadata) * nr_objects, node); + if (!kpa->meta) + goto fail; + if (percpu_ref_init(&kpa->refcnt, kpa_release, PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) + goto fail; + INIT_WORK(&kpa->work, kfence_free_area); + kpa->addr = __kfence_pool; + kpa->pool_size = pool_size; + kpa->nr_objects = nr_objects; + kpa->node = node; + atomic_set(&kpa->_ref, 1); /* held by rb tree */ + + if (!__kfence_init_pool_area(kpa)) + goto fail; + + return true; + +fail: + vfree(kpa->meta); + percpu_ref_exit(&kpa->refcnt); + + return false; +} + +static bool __init kfence_init_pool_area(int node, int area) +{ + int index = node * kfence_nr_areas_per_node + area; + char *__kfence_pool = __kfence_pool_area[index]; + struct kfence_pool_area *kpa; + unsigned long nr_objects, pool_size; if (!__kfence_pool) return false; - addr = kfence_init_pool(); + nr_objects = min(kfence_num_objects, KFENCE_MAX_OBJECTS_PER_AREA); + pool_size = (nr_objects + 1) * 2 * PAGE_SIZE; - if (!addr) { - /* - * The pool is live and will never be deallocated from this point on. - * Ignore the pool object from the kmemleak phys object tree, as it would - * otherwise overlap with allocations returned by kfence_alloc(), which - * are registered with kmemleak through the slab post-alloc hook. - */ - kmemleak_ignore_phys(__pa(__kfence_pool)); - return true; + kpa = kzalloc_node(sizeof(struct kfence_pool_area), GFP_KERNEL, node); + if (!kpa) + goto fail; + + if (!init_kpa(kpa, __kfence_pool, node, nr_objects, pool_size)) + goto fail; + + rb_add(&kpa->rb_node, &kfence_pool_root, kfence_rb_less); + __kfence_pool_area[index] = NULL; + kfence_num_objects_stat[node].allocated += nr_objects; + + return true; + +fail: + memblock_free_late(__pa(__kfence_pool), pool_size); + __kfence_pool_area[index] = NULL; + kfree(kpa); + + return false; +} + +static bool __init kfence_init_pool(void) +{ + int area, node; + bool success_once = false; + + for_each_node(node) { + for (area = 0; area < kfence_nr_areas_per_node; area++) { + if (kfence_init_pool_area(node, area)) + success_once = true; + } + } + + return success_once; +} + +static void kfence_alloc_pool_late_node(int node, struct list_head *ready, bool fallback) +{ + unsigned long nr_need, nr_request; + struct kfence_alloc_node_cond *knos = &kfence_num_objects_stat[node]; + gfp_t gfp_mask = GFP_KERNEL | __GFP_ZERO; + + if (knos->allocated >= knos->need) + return; + + nr_need = roundup(knos->need - knos->allocated, KFENCE_MAX_OBJECTS_PER_AREA); + nr_request = KFENCE_MAX_OBJECTS_PER_AREA; + if (!fallback) + gfp_mask |= __GFP_THISNODE; + + while (nr_need) { + struct page *page; + struct kfence_pool_area *kpa; + unsigned long nr_pages = (nr_request + 1) * 2; +#ifdef CONFIG_CONTIG_ALLOC + page = alloc_contig_pages(nr_pages, gfp_mask, node, NULL); +#else + pr_warn("anolis kfence only supports enabled later with CONFIG_CONTIG_ALLOC\n"); + page = NULL; +#endif + if (!page) { + pr_err("kfence alloc pool on node %d failed\n", node); + return; + } + kpa = kzalloc_node(sizeof(struct kfence_pool_area), GFP_KERNEL, node); + if (!kpa) + goto fail; + + if (!init_kpa(kpa, page_to_virt(page), node, nr_request, nr_pages * PAGE_SIZE)) + goto fail; + + list_add(&kpa->list, ready); + nr_need -= nr_request; + knos->allocated += nr_request; + nr_request = min(nr_request, nr_need); + + continue; + +fail: +#ifdef CONFIG_CONTIG_ALLOC + free_contig_range(page_to_pfn(page), nr_pages); +#endif + kfree(kpa); + + return; + } +} + +static void kfence_free_pool_area(struct kfence_pool_area *kpa) +{ + phys_addr_t base = __pa(kpa->addr), size = kpa->pool_size; + phys_addr_t cursor = PFN_UP(base); + phys_addr_t end = PFN_DOWN(base + size); + + kmemleak_free_part_phys(base, size); + for (; cursor < end; cursor++) { + __free_pages_core(pfn_to_page(cursor), 0); + totalram_pages_inc(); + } +} + +static void kfence_free_pool_late_area(struct kfence_pool_area *kpa) +{ +#ifdef CONFIG_CONTIG_ALLOC + free_contig_range(page_to_pfn(virt_to_page(kpa->addr)), kpa->pool_size / PAGE_SIZE); +#endif +} + +static void get_kpa(struct kfence_pool_area *kpa) +{ + atomic_inc(&kpa->_ref); +} + +static void put_kpa(struct kfence_pool_area *kpa) +{ + if (atomic_dec_and_test(&kpa->_ref)) + kfree(kpa); +} + +static int kfence_update_pool_root(void *info) +{ + struct list_head *ready_list = info; + struct kfence_pool_area *kpa; + struct rb_node *cur, *next; + + for (cur = rb_first(&kfence_pool_root); cur; cur = next) { + kpa = kfence_rbentry(cur); + next = rb_next(cur); + if (!kpa->nr_objects) { + rb_erase(&kpa->rb_node, &kfence_pool_root); + put_kpa(kpa); + } else { + percpu_ref_resurrect(&kpa->refcnt); + } + } + + while (!list_empty(ready_list)) { + kpa = list_first_entry(ready_list, struct kfence_pool_area, list); + rb_add(&kpa->rb_node, &kfence_pool_root, kfence_rb_less); + list_del(&kpa->list); } + return 0; +} + +/* + * Flush this cpu's per cpu freelist to per node freelist. + * + * We don't need more sync methods to prevent race, because we can + * only reach here in two routes (with both kfence is disabled + * so no new allocatings will occur): + * + * 1) from update_kfence_node_map() when enabling kfence + * Since kfence_node_map is set to NULL, the objects + * will be directly freed to the per node freelist. + * + * 2) from kfence_free_area() when a kpa being released + * Since the refcnt of this kpa is down to 0, no objects + * from this kpa will be freed to per cpu freelist. + * If some objects from other kpas are freed after this + * check, it is ok because we will only free the space + * of our target kpa. Just let objects from other kpas + * remain in per cpu freelist. + */ +static void kfence_flush(struct kfence_freelist_cpu *c) +{ + struct kfence_freelist_node *kfence_freelist; + struct kfence_metadata *meta; + unsigned long flags; + + if (list_empty(&c->freelist)) { + if (KFENCE_WARN_ON(c->count)) + c->count = 0; + return; + } + + meta = list_first_entry(&c->freelist, struct kfence_metadata, list); + kfence_freelist = &freelist.node[meta->kpa->node]; + + raw_spin_lock_irqsave(&kfence_freelist->lock, flags); + list_splice_tail_init(&c->freelist, &kfence_freelist->freelist); + c->count = 0; + raw_spin_unlock_irqrestore(&kfence_freelist->lock, flags); +} + +static DECLARE_WAIT_QUEUE_HEAD(kfence_flush_wait); +static void kfence_flush_call(void *info) +{ + struct kfence_freelist_cpu *c = get_cpu_ptr(freelist.cpu); + + kfence_flush(c); + put_cpu_ptr(c); + + if (!atomic_dec_return(&kfence_flush_res)) + wake_up(&kfence_flush_wait); +} + +/* Flush percpu freelists on all cpus and wait for return. */ +static void kfence_flush_all_and_wait(void) +{ + int cpu; + + cpus_read_lock(); + atomic_set(&kfence_flush_res, num_online_cpus()); + on_each_cpu(kfence_flush_call, NULL, 0); + + /* Flush offline cpus. */ + preempt_disable(); + for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask) { + kfence_flush(per_cpu_ptr(freelist.cpu, cpu)); + } + preempt_enable(); + cpus_read_unlock(); + + wait_event_idle(kfence_flush_wait, !atomic_read(&kfence_flush_res)); +} + +static bool kfence_can_recover_tlb(struct kfence_pool_area *kpa) +{ +#ifdef CONFIG_X86_64 + /* only recover 1GiB aligned tlb */ + return kpa->pool_size == PUD_SIZE; +#else /* - * Only release unprotected pages, and do not try to go back and change - * page attributes due to risk of failing to do so as well. If changing - * page attributes for some pages fails, it is very likely that it also - * fails for the first page, and therefore expect addr==__kfence_pool in - * most failure cases. + * On arm64, the direct mapping area is already splited to page granularity + * with CONFIG_RODATA_FULL_DEFAULT_ENABLED=y, or CONFIG_KFENCE=y. So we will + * not recover tlb to pud huge. See upstream commit 840b23986344 + * ("arm64, kfence: enable KFENCE for ARM64") in detail. */ - memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool)); - __kfence_pool = NULL; + return false; +#endif +} + +static inline void __kfence_recover_tlb(unsigned long addr) +{ + if (!arch_kfence_free_pool(addr)) + pr_warn("fail to recover tlb to 1G at 0x%p-0x%p\n", + (void *)addr, (void *)(addr + PUD_SIZE)); +} + +static inline void kfence_recover_tlb(struct kfence_pool_area *kpa) +{ + unsigned long base = ALIGN_DOWN((unsigned long)kpa->addr, PUD_SIZE); + + if (kfence_can_recover_tlb(kpa)) + __kfence_recover_tlb(base); +} + +/* Free a specific area. The refcnt has been down to 0. */ +static void kfence_free_area(struct work_struct *work) +{ + unsigned long flags, i; + struct page *page; + struct kfence_pool_area *kpa = container_of(work, struct kfence_pool_area, work); + struct kfence_freelist_node *kfence_freelist; + + mutex_lock(&kfence_mutex); + if (!kpa->nr_objects || !percpu_ref_is_zero(&kpa->refcnt)) + goto out_unlock; + + kfence_flush_all_and_wait(); + + kfence_freelist = &freelist.node[kpa->node]; + raw_spin_lock_irqsave(&kfence_freelist->lock, flags); + for (i = 0; i < kpa->nr_objects; i++) + list_del(&kpa->meta[i].list); + + raw_spin_unlock_irqrestore(&kfence_freelist->lock, flags); + + pr_info("freed %lu bytes for %lu objects on node %d at 0x%p-0x%p\n", + kpa->pool_size, kpa->nr_objects, kpa->node, (void *)kpa->addr, + (void *)(kpa->addr + kpa->pool_size)); - memblock_free_late(__pa(kfence_metadata_init), KFENCE_METADATA_SIZE); - kfence_metadata_init = NULL; + kfence_clear_page_info((unsigned long)kpa->addr, kpa->pool_size); + kfence_recover_tlb(kpa); + page = virt_to_page(kpa->addr); + + if (PageReserved(page)) + kfence_free_pool_area(kpa); + else + kfence_free_pool_late_area(kpa); + + vfree(kpa->meta); + kpa->meta = NULL; + percpu_ref_exit(&kpa->refcnt); + kpa->nr_objects = 0; + kpa->pool_size = 0; + +out_unlock: + mutex_unlock(&kfence_mutex); + put_kpa(kpa); +} + +static void kpa_release(struct percpu_ref *ref) +{ + struct kfence_pool_area *kpa = container_of(ref, struct kfence_pool_area, refcnt); + + get_kpa(kpa); + if (!queue_work(system_long_wq, &kpa->work)) + put_kpa(kpa); +} + +static void calculate_need_alloc(void) +{ + int node, nr_kpas, base, remain, nr_node_has_cpu; + enum node_states node_stat = N_CPU; + + if (!kfence_num_objects_stat) + return; + + if (kfence_pool_node_mode) { + for_each_node(node) { + kfence_num_objects_stat[node].need = kfence_num_objects; + } + return; + } + + if (kfence_num_objects < KFENCE_MAX_OBJECTS_PER_AREA) { + kfence_num_objects_stat[first_online_node].need = kfence_num_objects; + return; + } + + /* In global mode, we only alloc on nodes with cpus (i.e., not on pmem nodes) */ + nr_node_has_cpu = num_node_state(node_stat); + if (!nr_node_has_cpu) { + node_stat = N_ONLINE; + nr_node_has_cpu = num_node_state(node_stat); + } + nr_kpas = kfence_num_objects / KFENCE_MAX_OBJECTS_PER_AREA; + base = nr_kpas / nr_node_has_cpu; + remain = nr_kpas - base * nr_node_has_cpu; + for_each_node_state(node, node_stat) { + kfence_num_objects_stat[node].need = (base + (!!remain)) * + KFENCE_MAX_OBJECTS_PER_AREA; + if (remain) + remain--; + } +} + +static inline bool __check_map_change(int *new_node_map) +{ + int node; + + for_each_node(node) { + if (kfence_node_map[node] != new_node_map[node]) + return true; + } return false; } +static void update_kfence_node_map(int *new_node_map) +{ + int *old_node_map; + int node; + enum node_states node_stat = N_CPU; + struct zonelist *zonelist; + struct zone *zone; + struct zoneref *z; + + memset(new_node_map, -1, sizeof(int) * nr_node_ids); + + if (!num_node_state(node_stat)) + node_stat = N_ONLINE; + + for_each_node_state(node, node_stat) { + if (kfence_num_objects_stat[node].allocated) { + new_node_map[node] = node; + continue; + } + + /* We borrow from zonelist to get the nearest node to map. */ + zonelist = node_zonelist(node, GFP_KERNEL); + for_each_zone_zonelist_nodemask(zone, z, zonelist, ZONE_NORMAL, NULL) { + if (kfence_num_objects_stat[zone_to_nid(zone)].allocated) { + new_node_map[node] = zone_to_nid(zone); + break; + } + } + } + + /* It's the first time of init */ + if (!kfence_node_map) { + kfence_node_map = new_node_map; + return; + } + + if (!__check_map_change(new_node_map)) { + kfree(new_node_map); + return; + } + + old_node_map = kfence_node_map; + kfence_node_map = NULL; + synchronize_rcu(); + + kfence_flush_all_and_wait(); + + kfence_node_map = new_node_map; + kfree(old_node_map); +} + +/* + * Get the last kfence.booting_max= from boot cmdline. + * Mainly copied from get_last_crashkernel(). + */ +static __init char *get_last_kfence_booting_max(char *name) +{ + char *p = boot_command_line, *ck_cmdline = NULL; + + /* find kfence.booting_max and use the last one if there are more */ + p = strstr(p, name); + while (p) { + char *end_p = strchr(p, ' '); + + if (!end_p) + end_p = p + strlen(p); + ck_cmdline = p; + p = strstr(p+1, name); + } + + if (!ck_cmdline) + return NULL; + + ck_cmdline += strlen(name); + return ck_cmdline; +} + +/* + * This function parses command lines in the format + * + * kfence.booting_max=ramsize-range:size[,...] + * + * The function returns 0 on success and -EINVAL on failure. + * Mainly copied from parse_crashkernel_mem(). + */ +static int __init parse_kfence_booting_max(char *cmdline, + unsigned long long system_ram, + unsigned long long *reserve_max) +{ + char *cur = cmdline, *tmp; + + /* for each entry of the comma-separated list */ + do { + unsigned long long start, end = ULLONG_MAX, size; + + /* get the start of the range */ + start = memparse(cur, &tmp); + if (cur == tmp) { + pr_warn("kfence.booting_max: Memory value expected\n"); + return -EINVAL; + } + cur = tmp; + if (*cur != '-') { + pr_warn("kfence.booting_max: '-' expected\n"); + return -EINVAL; + } + cur++; + + /* if no ':' is here, than we read the end */ + if (*cur != ':') { + end = memparse(cur, &tmp); + if (cur == tmp) { + pr_warn("kfence.booting_max: Memory value expected\n"); + return -EINVAL; + } + cur = tmp; + if (end <= start) { + pr_warn("kfence.booting_max: end <= start\n"); + return -EINVAL; + } + } + + if (*cur != ':') { + pr_warn("kfence.booting_max: ':' expected\n"); + return -EINVAL; + } + cur++; + + size = memparse(cur, &tmp); + if (cur == tmp) { + pr_warn("kfence.booting_max: Memory value expected\n"); + return -EINVAL; + } + cur = tmp; + + /* match ? */ + if (system_ram >= start && system_ram < end) { + *reserve_max = size; + break; + } + } while (*cur++ == ','); + + if (!*reserve_max) + pr_info("kfence.booting_max size resulted in zero bytes, disabled\n"); + + return 0; +} + /* === DebugFS Interface ==================================================== */ +static void print_pool_size(struct seq_file *seq, unsigned long byte) +{ + if (byte < SZ_1K) + seq_printf(seq, "%lu B\n", byte); + else if (byte < SZ_1M) + seq_printf(seq, "%lu KB\n", byte / SZ_1K); + else if (byte < SZ_1G) + seq_printf(seq, "%lu MB\n", byte / SZ_1M); + else + seq_printf(seq, "%lu GB\n", byte / SZ_1G); +} + static int stats_show(struct seq_file *seq, void *v) { - int i; + int i, cpu; + struct kfence_pool_area *kpa; + struct rb_node *iter; + unsigned long *size_count; seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled)); - for (i = 0; i < KFENCE_COUNTER_COUNT; i++) - seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i])); + + if (!counters) + return 0; + + for (i = 0; i < KFENCE_COUNTER_COUNT; i++) { + s64 sum = 0; + /* + * This calculation may not accurate, but don't mind since we are + * mostly interested in bugs and zombies. They are rare and likely + * not changed during calculating. + */ + for_each_possible_cpu(cpu) + sum += per_cpu_ptr(counters, cpu)->counter[i]; + seq_printf(seq, "%-35s:%20lld\n", counter_names[i], sum); + } + + size_count = kmalloc_array(nr_node_ids * 2, sizeof(unsigned long), GFP_KERNEL | __GFP_ZERO); + if (!size_count) + return 0; + + mutex_lock(&kfence_mutex); + kfence_for_each_area(kpa, iter) { + if (!kpa->nr_objects) + continue; + size_count[kpa->node] += kpa->nr_objects; + size_count[kpa->node + nr_node_ids] += kpa->pool_size; + } + mutex_unlock(&kfence_mutex); + + seq_puts(seq, "\nnode\tobject_size\tpool_size\n"); + for_each_node(i) { + seq_printf(seq, "%-8d%-16lu", i, size_count[i]); + print_pool_size(seq, size_count[i + nr_node_ids]); + } + + kfree(size_count); return 0; } @@ -715,28 +1657,59 @@ DEFINE_SHOW_ATTRIBUTE(stats); */ static void *start_object(struct seq_file *seq, loff_t *pos) { - if (*pos < CONFIG_KFENCE_NUM_OBJECTS) - return (void *)((long)*pos + 1); + loff_t index = *pos; + struct kfence_pool_area *kpa; + struct rb_node *iter; + + mutex_lock(&kfence_mutex); + kfence_for_each_area(kpa, iter) { + if (index >= kpa->nr_objects) { + index -= kpa->nr_objects; + continue; + } + return &kpa->meta[index]; + } return NULL; } static void stop_object(struct seq_file *seq, void *v) { + mutex_unlock(&kfence_mutex); } static void *next_object(struct seq_file *seq, void *v, loff_t *pos) { + struct kfence_metadata *meta = (struct kfence_metadata *)v; + struct kfence_pool_area *kpa = meta->kpa; + struct rb_node *cur = &kpa->rb_node; + ++*pos; - if (*pos < CONFIG_KFENCE_NUM_OBJECTS) - return (void *)((long)*pos + 1); - return NULL; + ++meta; + if (meta - kpa->meta < kpa->nr_objects) + return meta; + seq_puts(seq, "---------------------------------\n"); +next_meta: + cur = rb_next(cur); + if (!cur) + return NULL; + kpa = kfence_rbentry(cur); + if (!kpa->nr_objects) + goto next_meta; + + return kpa->meta; } static int show_object(struct seq_file *seq, void *v) { - struct kfence_metadata *meta = &kfence_metadata[(long)v - 1]; + struct kfence_metadata *meta = (struct kfence_metadata *)v; unsigned long flags; + char buf[20]; + if (!meta) + return 0; + + sprintf(buf, "node %d:\n", meta->kpa->node); + seq_puts(seq, buf); raw_spin_lock_irqsave(&meta->lock, flags); kfence_print_object(seq, meta); raw_spin_unlock_irqrestore(&meta->lock, flags); @@ -753,14 +1726,10 @@ static const struct seq_operations objects_sops = { }; DEFINE_SEQ_ATTRIBUTE(objects); -static int kfence_debugfs_init(void) +static int __init kfence_debugfs_init(void) { - struct dentry *kfence_dir; - - if (!READ_ONCE(kfence_enabled)) - return 0; + struct dentry *kfence_dir = debugfs_create_dir("kfence", NULL); - kfence_dir = debugfs_create_dir("kfence", NULL); debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops); debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops); return 0; @@ -772,13 +1741,17 @@ late_initcall(kfence_debugfs_init); static void kfence_check_all_canary(void) { + struct kfence_pool_area *kpa; + struct rb_node *iter; int i; - for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { - struct kfence_metadata *meta = &kfence_metadata[i]; + kfence_for_each_area(kpa, iter) { + for (i = 0; i < kpa->nr_objects; i++) { + struct kfence_metadata *meta = &kpa->meta[i]; - if (meta->state == KFENCE_OBJECT_ALLOCATED) - check_canary(meta); + if (meta->state == KFENCE_OBJECT_ALLOCATED) + check_canary(meta); + } } } @@ -840,36 +1813,79 @@ static void toggle_allocation_gate(struct work_struct *work) /* === Public interface ===================================================== */ -void __init kfence_alloc_pool_and_metadata(void) +int __init update_kfence_booting_max(void) { - if (!kfence_sample_interval) - return; + static bool done __initdata; + + unsigned long long parse_mem = PUD_SIZE; + unsigned long nr_pages, nr_obj_max; + char *cmdline; + int ret; /* - * If the pool has already been initialized by arch, there is no need to - * re-allocate the memory pool. + * We may reach here twice because some arch like aarch64 + * will call this function first. */ - if (!__kfence_pool) - __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); + if (done) + return 0; + done = true; - if (!__kfence_pool) { - pr_err("failed to allocate pool\n"); - return; - } + /* Boot cmdline is not set. Just leave. */ + cmdline = get_last_kfence_booting_max("kfence.booting_max="); + if (!cmdline) + return 0; + + ret = parse_kfence_booting_max(cmdline, memblock_phys_mem_size(), &parse_mem); + /* disable booting kfence on parsing fail. */ + if (ret) + goto nokfence; + + nr_pages = min_t(unsigned long, parse_mem, PUD_SIZE) / PAGE_SIZE; + /* We need at least 4 pages to enable KFENCE. */ + if (nr_pages < 4) + goto nokfence; - /* The memory allocated by memblock has been zeroed out. */ - kfence_metadata_init = memblock_alloc(KFENCE_METADATA_SIZE, PAGE_SIZE); - if (!kfence_metadata_init) { - pr_err("failed to allocate metadata\n"); - memblock_free(__kfence_pool, KFENCE_POOL_SIZE); - __kfence_pool = NULL; + nr_obj_max = nr_pages / 2 - 1; + if (kfence_num_objects > nr_obj_max) { + kfence_num_objects = nr_obj_max; + return 1; } + + return 0; + +nokfence: + kfence_num_objects = 0; + return 1; } -static void kfence_init_enable(void) +/* Only run for the first time. */ +static bool kfence_setup_once(void) { - if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS)) - static_branch_enable(&kfence_allocation_key); + int i; + + /* + * freelist.node, freelist.cpu, counters are inited together, + * we only need to check one of them and know whether + * we are now in re-enabling. + */ + if (counters) + return true; + + freelist.node = kmalloc_array(nr_node_ids, sizeof(struct kfence_freelist_node), + GFP_KERNEL); + freelist.cpu = alloc_percpu(struct kfence_freelist_cpu); + counters = alloc_percpu(struct kfence_counter); + + if (!freelist.node || !freelist.cpu || !counters) + goto fail; + + for_each_node(i) { + INIT_LIST_HEAD(&freelist.node[i].freelist); + raw_spin_lock_init(&freelist.node[i].lock); + } + + for_each_possible_cpu(i) + INIT_LIST_HEAD(&per_cpu_ptr(freelist.cpu, i)->freelist); if (kfence_deferrable) INIT_DEFERRABLE_WORK(&kfence_timer, toggle_allocation_gate); @@ -879,119 +1895,328 @@ static void kfence_init_enable(void) if (kfence_check_on_panic) atomic_notifier_chain_register(&panic_notifier_list, &kfence_check_canary_notifier); + return true; + +fail: + kfree(freelist.node); + freelist.node = NULL; + free_percpu(freelist.cpu); + freelist.cpu = NULL; + free_percpu(counters); + counters = NULL; + return false; +} + +static void start_kfence(void) +{ + unsigned long total_nr_objects = 0; + struct kfence_pool_area *kpa; + struct rb_node *iter; + + kfence_for_each_area(kpa, iter) { + pr_info("initialized - using %lu bytes for %lu objects on node %d at 0x%p-0x%p\n", + kpa->pool_size, kpa->nr_objects, kpa->node, (void *)kpa->addr, + (void *)(kpa->addr + kpa->pool_size)); + total_nr_objects += kpa->nr_objects; + } + + /* Update kfence_num_objects to export to /sys/module/ */ + if (total_nr_objects > KFENCE_MAX_OBJECTS_PER_AREA) + kfence_num_objects = rounddown(total_nr_objects, KFENCE_MAX_OBJECTS_PER_AREA); + else + kfence_num_objects = total_nr_objects; + + /* Forget upstream mode. */ + if (kfence_num_objects_snap && total_nr_objects > kfence_num_objects_snap) { + kfence_num_objects_snap = 0; + kvfree(alloc_covered); + alloc_covered = NULL; + } + WRITE_ONCE(kfence_enabled, true); - queue_delayed_work(system_unbound_wq, &kfence_timer, 0); + static_branch_enable(&kfence_once_enabled); + static_branch_enable(&kfence_allocation_key); + if (kfence_sample_interval < 0) { + static_branch_enable(&kfence_short_canary); + static_branch_enable(&kfence_skip_interval); + } else { + static_branch_disable(&kfence_skip_interval); + queue_delayed_work(system_unbound_wq, &kfence_timer, 0); + } +} + +void __init kfence_alloc_pool_and_metadata(void) +{ + int node; + + /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */ + if (!READ_ONCE(kfence_sample_interval)) + return; + + if (kfence_num_objects < KFENCE_MAX_OBJECTS_PER_AREA) { + /* + * Not allow both pool size < 1GiB and enabling node mode. + * Not allow both pool size < 1GiB and non-interval alloc. + */ + if (kfence_pool_node_mode || kfence_sample_interval < 0) + goto fail; + + /* + * Only limit upstream mode for online environment, + * as it makes no sense for limiting debug setup. + */ + update_kfence_booting_max(); + if (!kfence_num_objects) + goto fail; + } + + kfence_num_objects_stat = memblock_alloc(sizeof(struct kfence_alloc_node_cond) * + nr_node_ids, PAGE_SIZE); + if (!kfence_num_objects_stat) + goto fail; + + /* + * If pool size less than 1GiB, use the upstream mode; + * else, align pool size up to 1GiB, for tlb split and + * recover thought. + */ + if (kfence_num_objects >= KFENCE_MAX_OBJECTS_PER_AREA) + kfence_num_objects = roundup(kfence_num_objects, KFENCE_MAX_OBJECTS_PER_AREA); + else + kfence_num_objects_snap = kfence_num_objects; - pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE, - CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool, - (void *)(__kfence_pool + KFENCE_POOL_SIZE)); + calculate_need_alloc(); + + for_each_node(node) { + if (kfence_nr_areas_per_node < kfence_num_objects_stat[node].need) + kfence_nr_areas_per_node = kfence_num_objects_stat[node].need; + } + kfence_nr_areas_per_node /= KFENCE_MAX_OBJECTS_PER_AREA; + if (!kfence_nr_areas_per_node) + kfence_nr_areas_per_node = 1; + + __kfence_pool_area = memblock_alloc(sizeof(char *) * nr_node_ids * + kfence_nr_areas_per_node, PAGE_SIZE); + if (!__kfence_pool_area) + goto fail; + + if (__kfence_pool_early_init) { + __kfence_pool_area[first_online_node] = __kfence_pool_early_init; + return; + } + + for_each_node(node) + kfence_alloc_pool_node(node); + + return; + +fail: + if (kfence_num_objects_stat) { + memblock_free(kfence_num_objects_stat, + sizeof(struct kfence_alloc_node_cond) * nr_node_ids); + kfence_num_objects_stat = NULL; + } + WRITE_ONCE(kfence_sample_interval, 0); } void __init kfence_init(void) { + unsigned long nr_objects = min(kfence_num_objects, KFENCE_MAX_OBJECTS_PER_AREA); + unsigned long kfence_pool_size = (nr_objects + 1) * 2 * PAGE_SIZE; + int node, area, index; + int *new_node_map; + stack_hash_seed = get_random_u32(); /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */ - if (!kfence_sample_interval) + if (!READ_ONCE(kfence_sample_interval)) return; - if (!kfence_init_pool_early()) { - pr_err("%s failed\n", __func__); - return; + if (!kfence_setup_once()) + goto fail_alloc; + + if (kfence_num_objects_snap) { + alloc_covered_order = ilog2(kfence_num_objects_snap) + 2; + alloc_covered = kvmalloc_array(ALLOC_COVERED_SIZE, sizeof(atomic_t), + GFP_KERNEL | __GFP_ZERO); + if (!alloc_covered) + goto fail_alloc; + } + + /* pre-alloc here for update_kfence_node_map() to avoid complex error handling later. */ + new_node_map = kmalloc_array(nr_node_ids, sizeof(int), GFP_KERNEL | __GFP_ZERO); + if (!new_node_map) + goto fail_coverd; + + if (!kfence_init_pool()) { + pr_err("%s failed on all nodes!\n", __func__); + goto fail_node_map; + } + + update_kfence_node_map(new_node_map); + + start_kfence(); + goto out; + +fail_node_map: + kfree(new_node_map); +fail_coverd: + kvfree(alloc_covered); + alloc_covered = NULL; +fail_alloc: + for_each_node(node) { + for (area = 0; area < kfence_nr_areas_per_node; area++) { + index = kfence_nr_areas_per_node * node + area; + if (__kfence_pool_area[index]) { + memblock_free_late(__pa(__kfence_pool_area[index]), + kfence_pool_size); + __kfence_pool_area[index] = NULL; + } + } } - kfence_init_enable(); +out: + memblock_free_late(__pa(__kfence_pool_area), sizeof(char *) * nr_node_ids * + kfence_nr_areas_per_node); + __kfence_pool_area = NULL; + memblock_free_late(__pa(kfence_num_objects_stat), + sizeof(struct kfence_alloc_node_cond) * nr_node_ids); + kfence_num_objects_stat = NULL; + +} + +static DECLARE_WAIT_QUEUE_HEAD(kfence_refkill_wait); +static void kfence_kill_confirm(struct percpu_ref *ref) +{ + if (!atomic_dec_return(&kfence_refkill_res)) + wake_up(&kfence_refkill_wait); } -static int kfence_init_late(void) +static void kfence_enable_late(void) { - const unsigned long nr_pages_pool = KFENCE_POOL_SIZE / PAGE_SIZE; - const unsigned long nr_pages_meta = KFENCE_METADATA_SIZE / PAGE_SIZE; - unsigned long addr = (unsigned long)__kfence_pool; - unsigned long free_size = KFENCE_POOL_SIZE; - int err = -ENOMEM; + struct kfence_pool_area *kpa; + LIST_HEAD(ready_list); + struct rb_node *iter; + int *new_node_map; + int node; -#ifdef CONFIG_CONTIG_ALLOC - struct page *pages; + if (!READ_ONCE(kfence_sample_interval)) + return; - pages = alloc_contig_pages(nr_pages_pool, GFP_KERNEL, first_online_node, - NULL); - if (!pages) - return -ENOMEM; + /* + * If kfence pool is initialized later, the early init kfence pool has + * been released, reset the pointer here to avoid re-initialization if + * split_linear_mapping disabled. + */ + __kfence_pool_early_init = NULL; - __kfence_pool = page_to_virt(pages); - pages = alloc_contig_pages(nr_pages_meta, GFP_KERNEL, first_online_node, - NULL); - if (pages) - kfence_metadata_init = page_to_virt(pages); -#else - if (nr_pages_pool > MAX_ORDER_NR_PAGES || - nr_pages_meta > MAX_ORDER_NR_PAGES) { - pr_warn("KFENCE_NUM_OBJECTS too large for buddy allocator\n"); - return -EINVAL; - } + mutex_lock(&kfence_mutex); - __kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL); - if (!__kfence_pool) - return -ENOMEM; + if (READ_ONCE(kfence_enabled)) + goto out; - kfence_metadata_init = alloc_pages_exact(KFENCE_METADATA_SIZE, GFP_KERNEL); -#endif + /* + * Keep upstream mode remaining the same. + * Otherwise we "forget" the upstream version whose pool size < 1GiB. + */ + if (kfence_num_objects > kfence_num_objects_snap || kfence_pool_node_mode) + kfence_num_objects = roundup(kfence_num_objects, KFENCE_MAX_OBJECTS_PER_AREA); - if (!kfence_metadata_init) - goto free_pool; + if (kfence_num_objects < KFENCE_MAX_OBJECTS_PER_AREA && kfence_sample_interval < 0) + goto fail; - memzero_explicit(kfence_metadata_init, KFENCE_METADATA_SIZE); - addr = kfence_init_pool(); - if (!addr) { - kfence_init_enable(); - kfence_debugfs_init(); - return 0; + if (!kfence_setup_once()) + goto fail; + + /* pre-alloc here for update_kfence_node_map() to avoid complex error handling later. */ + new_node_map = kmalloc_array(nr_node_ids, sizeof(int), GFP_KERNEL | __GFP_ZERO); + if (!new_node_map) + goto fail; + + kfence_num_objects_stat = kmalloc_array(nr_node_ids, sizeof(struct kfence_alloc_node_cond), + GFP_KERNEL | __GFP_ZERO); + if (!kfence_num_objects_stat) + goto fail_node_map; + + calculate_need_alloc(); + + kfence_for_each_area(kpa, iter) { + if (kpa->nr_objects >= KFENCE_MAX_OBJECTS_PER_AREA || kfence_num_objects_snap) + kfence_num_objects_stat[kpa->node].allocated += kpa->nr_objects; } - pr_err("%s failed\n", __func__); - free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool); - err = -EBUSY; + for_each_node(node) + kfence_alloc_pool_late_node(node, &ready_list, false); -#ifdef CONFIG_CONTIG_ALLOC - free_contig_range(page_to_pfn(virt_to_page((void *)kfence_metadata_init)), - nr_pages_meta); -free_pool: - free_contig_range(page_to_pfn(virt_to_page((void *)addr)), - free_size / PAGE_SIZE); -#else - free_pages_exact((void *)kfence_metadata_init, KFENCE_METADATA_SIZE); -free_pool: - free_pages_exact((void *)addr, free_size); -#endif + /* + * Try to alloc again if there exists some nodes we fail to alloc on. + * These nodes may have no enough contig memory, so fallback to find on + * other nodes. + */ + for_each_node(node) + kfence_alloc_pool_late_node(node, &ready_list, true); + + update_kfence_node_map(new_node_map); + kfree(kfence_num_objects_stat); + kfence_num_objects_stat = NULL; - kfence_metadata_init = NULL; - __kfence_pool = NULL; - return err; + stop_machine(kfence_update_pool_root, &ready_list, NULL); + + if (RB_EMPTY_ROOT(&kfence_pool_root)) + goto fail; + + start_kfence(); + goto out; + +fail_node_map: + kfree(new_node_map); +fail: + WRITE_ONCE(kfence_sample_interval, 0); +out: + mutex_unlock(&kfence_mutex); } -static int kfence_enable_late(void) +void kfence_disable(void) { - if (!__kfence_pool) - return kfence_init_late(); + struct kfence_pool_area *kpa; + struct rb_node *iter; - WRITE_ONCE(kfence_enabled, true); - queue_delayed_work(system_unbound_wq, &kfence_timer, 0); - pr_info("re-enabled\n"); - return 0; + mutex_lock(&kfence_mutex); + + if (!xchg(&kfence_enabled, false)) + goto out_unlock; + + synchronize_rcu(); + + atomic_set(&kfence_allocation_gate, 1); +#ifdef CONFIG_KFENCE_STATIC_KEYS + wake_up(&allocation_wait); +#endif + static_branch_disable(&kfence_allocation_key); + + atomic_set(&kfence_refkill_res, 0); + kfence_for_each_area(kpa, iter) { + atomic_inc(&kfence_refkill_res); + percpu_ref_kill_and_confirm(&kpa->refcnt, kfence_kill_confirm); + } + + /* + * We must wait here until all percpu_ref being killed. + * After all tasks finished, then release the mutex lock. + */ + wait_event_idle(kfence_refkill_wait, !atomic_read(&kfence_refkill_res)); + +out_unlock: + mutex_unlock(&kfence_mutex); } -void kfence_shutdown_cache(struct kmem_cache *s) +static void kfence_shutdown_cache_area(struct kmem_cache *s, struct kfence_pool_area *kpa) { unsigned long flags; - struct kfence_metadata *meta; + struct kfence_metadata *meta, *kfence_metadata = kpa->meta; int i; - /* Pairs with release in kfence_init_pool(). */ - if (!smp_load_acquire(&kfence_metadata)) - return; - - for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { + for (i = 0; i < kpa->nr_objects; i++) { bool in_use; meta = &kfence_metadata[i]; @@ -1030,7 +2255,7 @@ void kfence_shutdown_cache(struct kmem_cache *s) } } - for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { + for (i = 0; i < kpa->nr_objects; i++) { meta = &kfence_metadata[i]; /* See above. */ @@ -1044,7 +2269,19 @@ void kfence_shutdown_cache(struct kmem_cache *s) } } -void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) +void kfence_shutdown_cache(struct kmem_cache *s) +{ + struct kfence_pool_area *kpa; + struct rb_node *iter; + + if (!static_branch_unlikely(&kfence_once_enabled)) + return; + + kfence_for_each_area(kpa, iter) + kfence_shutdown_cache_area(s, kpa); +} + +void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags, int node) { unsigned long stack_entries[KFENCE_STACK_DEPTH]; size_t num_stack_entries; @@ -1055,7 +2292,7 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) * we don't disable KFENCE without making an allocation. */ if (size > PAGE_SIZE) { - atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]); + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_SKIP_INCOMPAT]++; return NULL; } @@ -1066,7 +2303,7 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) */ if ((flags & GFP_ZONEMASK) || (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) { - atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]); + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_SKIP_INCOMPAT]++; return NULL; } @@ -1077,6 +2314,9 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) if (s->flags & SLAB_SKIP_KFENCE) return NULL; + if (static_branch_likely(&kfence_skip_interval)) + goto alloc; + if (atomic_inc_return(&kfence_allocation_gate) > 1) return NULL; #ifdef CONFIG_KFENCE_STATIC_KEYS @@ -1093,28 +2333,34 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) } #endif +alloc: if (!READ_ONCE(kfence_enabled)) return NULL; num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0); - /* - * Do expensive check for coverage of allocation in slow-path after - * allocation_gate has already become non-zero, even though it might - * mean not making any allocation within a given sample interval. - * - * This ensures reasonable allocation coverage when the pool is almost - * full, including avoiding long-lived allocations of the same source - * filling up the pool (e.g. pagecache allocations). - */ - alloc_stack_hash = get_alloc_stack_hash(stack_entries, num_stack_entries); - if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash)) { - atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_COVERED]); - return NULL; + if (!static_branch_likely(&kfence_skip_interval)) { + /* + * Do expensive check for coverage of allocation in slow-path after + * allocation_gate has already become non-zero, even though it might + * mean not making any allocation within a given sample interval. + * + * This ensures reasonable allocation coverage when the pool is almost + * full, including avoiding long-lived allocations of the same source + * filling up the pool (e.g. pagecache allocations). + */ + alloc_stack_hash = get_alloc_stack_hash(stack_entries, num_stack_entries); + if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash)) { + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_SKIP_COVERED]++; + return NULL; + } } + if (node == NUMA_NO_NODE) + node = numa_node_id(); + return kfence_guarded_alloc(s, size, flags, stack_entries, num_stack_entries, - alloc_stack_hash); + alloc_stack_hash, node); } size_t kfence_ksize(const void *addr) @@ -1130,7 +2376,12 @@ size_t kfence_ksize(const void *addr) void *kfence_object_start(const void *addr) { - const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); + struct kfence_metadata *meta; + + if (!static_branch_unlikely(&kfence_once_enabled)) + return NULL; + + meta = addr_to_metadata((unsigned long)addr); /* * Read locklessly -- if there is a race with __kfence_alloc(), this is @@ -1160,18 +2411,25 @@ void __kfence_free(void *addr) bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs) { - const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE; struct kfence_metadata *to_report = NULL; enum kfence_error_type error_type; + struct kfence_pool_area *kpa; unsigned long flags; + int page_index; - if (!is_kfence_address((void *)addr)) + if (!static_branch_unlikely(&kfence_once_enabled)) + return false; + + kpa = get_kfence_pool_area((void *)addr); + if (!kpa) return false; if (!READ_ONCE(kfence_enabled)) /* If disabled at runtime ... */ return kfence_unprotect(addr); /* ... unprotect and proceed. */ - atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_BUGS]++; + + page_index = (addr - (unsigned long)kpa->addr) / PAGE_SIZE; if (page_index % 2) { /* This is a redzone, report a buffer overflow. */ diff --git a/mm/kfence/kfence.h b/mm/kfence/kfence.h index f46fbb03062b..071aec5feb96 100644 --- a/mm/kfence/kfence.h +++ b/mm/kfence/kfence.h @@ -100,46 +100,83 @@ struct kfence_metadata { #ifdef CONFIG_MEMCG struct obj_cgroup *objcg; #endif + struct kfence_pool_area *kpa; }; -#define KFENCE_METADATA_SIZE PAGE_ALIGN(sizeof(struct kfence_metadata) * \ - CONFIG_KFENCE_NUM_OBJECTS) +extern bool kfence_panic_on_fault; +DECLARE_STATIC_KEY_FALSE(kfence_short_canary); -extern struct kfence_metadata *kfence_metadata; +/* KFENCE error types for report generation. */ +enum kfence_error_type { + KFENCE_ERROR_OOB, /* Detected a out-of-bounds access. */ + KFENCE_ERROR_UAF, /* Detected a use-after-free access. */ + KFENCE_ERROR_CORRUPTION, /* Detected a memory corruption on free. */ + KFENCE_ERROR_INVALID, /* Invalid access of unknown type. */ + KFENCE_ERROR_INVALID_FREE, /* Invalid free. */ +}; + +void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs, + const struct kfence_metadata *meta, enum kfence_error_type type); + +void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta); +void kfence_disable(void); +extern void __free_pages_core(struct page *page, unsigned int order); + +extern struct rb_root kfence_pool_root; +#define kfence_rbentry(cur) rb_entry((cur), struct kfence_pool_area, rb_node) +#define kfence_for_each_area(kpa, iter) \ + for ((iter) = rb_first(&kfence_pool_root); \ + (iter) && ((kpa) = kfence_rbentry((iter)));\ + (iter) = rb_next((iter))) + +/** + * get_kfence_pool_area() - find the kfence pool area of the address + * @addr: address to check + * + * Return: the kfence pool area, NULL if not a kfence address + */ +static inline struct kfence_pool_area *get_kfence_pool_area(const void *addr) +{ + struct rb_node *cur; + struct kfence_pool_area *res = NULL; + + for (cur = kfence_pool_root.rb_node; cur;) { + struct kfence_pool_area *kpa = kfence_rbentry(cur); + + if ((unsigned long)addr < (unsigned long)kpa->addr) + cur = cur->rb_left; + else { + res = kpa; + cur = cur->rb_right; + } + } + + return is_kfence_address_area(addr, res) ? res : NULL; +} static inline struct kfence_metadata *addr_to_metadata(unsigned long addr) { long index; + struct kfence_metadata *kfence_metadata; + struct kfence_pool_area *kpa = get_kfence_pool_area((void *)addr); /* The checks do not affect performance; only called from slow-paths. */ - if (!is_kfence_address((void *)addr)) + if (!kpa) return NULL; + kfence_metadata = kpa->meta; + /* * May be an invalid index if called with an address at the edge of * __kfence_pool, in which case we would report an "invalid access" * error. */ - index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1; - if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS) + index = (addr - (unsigned long)kpa->addr) / (PAGE_SIZE * 2) - 1; + if (index < 0 || index >= kpa->nr_objects) return NULL; return &kfence_metadata[index]; } -/* KFENCE error types for report generation. */ -enum kfence_error_type { - KFENCE_ERROR_OOB, /* Detected a out-of-bounds access. */ - KFENCE_ERROR_UAF, /* Detected a use-after-free access. */ - KFENCE_ERROR_CORRUPTION, /* Detected a memory corruption on free. */ - KFENCE_ERROR_INVALID, /* Invalid access of unknown type. */ - KFENCE_ERROR_INVALID_FREE, /* Invalid free. */ -}; - -void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs, - const struct kfence_metadata *meta, enum kfence_error_type type); - -void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta); - #endif /* MM_KFENCE_KFENCE_H */ diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c index 95b2b84c296d..27299531307b 100644 --- a/mm/kfence/kfence_test.c +++ b/mm/kfence/kfence_test.c @@ -243,6 +243,7 @@ enum allocation_policy { */ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocation_policy policy) { + long _kfence_sample_interval = kfence_sample_interval; void *alloc; unsigned long timeout, resched_after; const char *policy_name; @@ -269,13 +270,15 @@ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocat * 100x the sample interval should be more than enough to ensure we get * a KFENCE allocation eventually. */ - timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval); + if (kfence_sample_interval < 0) + _kfence_sample_interval = 100; + timeout = jiffies + msecs_to_jiffies(100 * _kfence_sample_interval); /* * Especially for non-preemption kernels, ensure the allocation-gate * timer can catch up: after @resched_after, every failed allocation * attempt yields, to ensure the allocation-gate timer is scheduled. */ - resched_after = jiffies + msecs_to_jiffies(kfence_sample_interval); + resched_after = jiffies + msecs_to_jiffies(_kfence_sample_interval); do { if (test_cache) alloc = kmem_cache_alloc(test_cache, gfp); @@ -305,6 +308,9 @@ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocat } else if (policy == ALLOCATE_NONE) return alloc; + if (kfence_sample_interval < 0 && policy == ALLOCATE_NONE) + return alloc; + test_free(alloc); if (time_after(jiffies, resched_after)) @@ -609,7 +615,7 @@ static void test_gfpzero(struct kunit *test) int i; /* Skip if we think it'd take too long. */ - KFENCE_TEST_REQUIRES(test, kfence_sample_interval <= 100); + KFENCE_TEST_REQUIRES(test, kfence_sample_interval <= 100 && kfence_num_objects <= 255); setup_test_cache(test, size, 0, NULL); buf1 = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY); @@ -624,7 +630,7 @@ static void test_gfpzero(struct kunit *test) break; test_free(buf2); - if (kthread_should_stop() || (i == CONFIG_KFENCE_NUM_OBJECTS)) { + if (kthread_should_stop() || (i == kfence_num_objects)) { kunit_warn(test, "giving up ... cannot get same object back\n"); return; } @@ -641,12 +647,19 @@ static void test_gfpzero(struct kunit *test) static void test_invalid_access(struct kunit *test) { - const struct expect_report expect = { + struct expect_report expect = { .type = KFENCE_ERROR_INVALID, .fn = test_invalid_access, - .addr = &__kfence_pool[10], .is_write = false, }; + struct rb_node *cur = kfence_pool_root.rb_node; + char *__kfence_pool; + + if (!cur) + return; + + __kfence_pool = kfence_rbentry(cur)->addr; + expect.addr = &__kfence_pool[10]; READ_ONCE(__kfence_pool[10]); KUNIT_EXPECT_TRUE(test, report_matches(&expect)); @@ -731,6 +744,7 @@ static void test_krealloc(struct kunit *test) /* Test that some objects from a bulk allocation belong to KFENCE pool. */ static void test_memcache_alloc_bulk(struct kunit *test) { + long _kfence_sample_interval = kfence_sample_interval; const size_t size = 32; bool pass = false; unsigned long timeout; @@ -741,7 +755,9 @@ static void test_memcache_alloc_bulk(struct kunit *test) * 100x the sample interval should be more than enough to ensure we get * a KFENCE allocation eventually. */ - timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval); + if (kfence_sample_interval < 0) + _kfence_sample_interval = 100; + timeout = jiffies + msecs_to_jiffies(100 * _kfence_sample_interval); do { void *objects[100]; int i, num = kmem_cache_alloc_bulk(test_cache, GFP_ATOMIC, ARRAY_SIZE(objects), @@ -804,7 +820,7 @@ static int test_init(struct kunit *test) unsigned long flags; int i; - if (!__kfence_pool) + if (!kfence_pool_root.rb_node) return -EINVAL; spin_lock_irqsave(&observed.lock, flags); diff --git a/mm/kfence/report.c b/mm/kfence/report.c index c509aed326ce..3d1c82b8d230 100644 --- a/mm/kfence/report.c +++ b/mm/kfence/report.c @@ -128,6 +128,7 @@ static void kfence_print_stack(struct seq_file *seq, const struct kfence_metadat void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta) { + struct kfence_metadata *kfence_metadata = meta->kpa->meta; const int size = abs(meta->size); const unsigned long start = meta->addr; const struct kmem_cache *const cache = meta->cache; @@ -163,7 +164,11 @@ static void print_diff_canary(unsigned long address, size_t bytes_to_show, /* Do not show contents of object nor read into following guard page. */ end = (const u8 *)(address < meta->addr ? min(show_until_addr, meta->addr) - : min(show_until_addr, PAGE_ALIGN(address))); + : static_branch_likely(&kfence_short_canary) ? + min(show_until_addr, + ALIGN(meta->addr + meta->size + 1, + L1_CACHE_BYTES)) : + min(show_until_addr, PAGE_ALIGN(address))); pr_cont("["); for (cur = (const u8 *)address; cur < end; cur++) { @@ -186,7 +191,7 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r const struct kfence_metadata *meta, enum kfence_error_type type) { unsigned long stack_entries[KFENCE_STACK_DEPTH] = { 0 }; - const ptrdiff_t object_index = meta ? meta - kfence_metadata : -1; + ptrdiff_t object_index = -1; int num_stack_entries; int skipnr = 0; @@ -201,8 +206,11 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r if (WARN_ON(type != KFENCE_ERROR_INVALID && !meta)) return; - if (meta) + if (meta) { lockdep_assert_held(&meta->lock); + object_index = meta - meta->kpa->meta; + } + /* * Because we may generate reports in printk-unfriendly parts of the * kernel, such as scheduler code, the use of printk() could deadlock. @@ -272,7 +280,8 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r lockdep_on(); - check_panic_on_warn("KFENCE"); + if (kfence_panic_on_fault) + panic("kfence.fault=panic set ...\n"); /* We encountered a memory safety error, taint the kernel! */ add_taint(TAINT_BAD_PAGE, LOCKDEP_STILL_OK); diff --git a/mm/slab.c b/mm/slab.c index 9ad3d0f2d1a5..dba95fd61ffb 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3222,7 +3222,7 @@ slab_alloc_node(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags, if (unlikely(!cachep)) return NULL; - objp = kfence_alloc(cachep, orig_size, flags); + objp = kfence_alloc_node(cachep, orig_size, flags, nodeid); if (unlikely(objp)) goto out; diff --git a/mm/slub.c b/mm/slub.c index d2544c88a5c4..347669be15c4 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3468,7 +3468,7 @@ static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list if (!s) return NULL; - object = kfence_alloc(s, orig_size, gfpflags); + object = kfence_alloc_node(s, orig_size, gfpflags, node); if (unlikely(object)) goto out; -- Gitee From 89c43972e2135c4a67d5396fded3301566d584e7 Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Mon, 11 Mar 2024 16:48:32 +0800 Subject: [PATCH 0255/2138] anolis: kfence: support order-0 page check ANBZ: #8499 Single kernel page is supported by kfence now. The kernel order-0 pages (including order-0 pages from vmalloc) will be allocated from kfence pool. To exclude some specific page type(e.g., slab pages and pgtable pages), a new gfp flag is introduced. Out of bounds read/write and use after free can be detected. Signed-off-by: Tianchen Ding Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2874 --- include/asm-generic/pgalloc.h | 2 +- include/linux/gfp_types.h | 10 +- include/linux/kfence.h | 61 +++++++++++ include/trace/events/mmflags.h | 13 ++- mm/kfence/core.c | 195 ++++++++++++++++++++++++++++++++- mm/kfence/report.c | 4 +- mm/page_alloc.c | 26 ++++- mm/slub.c | 1 + 8 files changed, 300 insertions(+), 12 deletions(-) diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h index c75d4a753849..f7413f68f8d5 100644 --- a/include/asm-generic/pgalloc.h +++ b/include/asm-generic/pgalloc.h @@ -4,7 +4,7 @@ #ifdef CONFIG_MMU -#define GFP_PGTABLE_KERNEL (GFP_KERNEL | __GFP_ZERO) +#define GFP_PGTABLE_KERNEL (GFP_KERNEL | __GFP_ZERO | __GFP_NOKFENCE) #define GFP_PGTABLE_USER (GFP_PGTABLE_KERNEL | __GFP_ACCOUNT) /** diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h index dfde1e1e321c..ff2899762749 100644 --- a/include/linux/gfp_types.h +++ b/include/linux/gfp_types.h @@ -60,6 +60,11 @@ typedef unsigned int __bitwise gfp_t; #else #define ___GFP_NOLOCKDEP 0 #endif +#ifdef CONFIG_KFENCE +#define ___GFP_NOKFENCE 0x8000000u +#else +#define ___GFP_NOKFENCE 0 +#endif /* If the above are modified, __GFP_BITS_SHIFT may need updating */ /* @@ -101,12 +106,15 @@ typedef unsigned int __bitwise gfp_t; * node with no fallbacks or placement policy enforcements. * * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg. + * + * %__GFP_NOKFENCE informs DO NOT try to alloc page from kfence pool. */ #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE) #define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT) +#define __GFP_NOKFENCE ((__force gfp_t)___GFP_NOKFENCE) /** * DOC: Watermark modifiers @@ -251,7 +259,7 @@ typedef unsigned int __bitwise gfp_t; #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) /* Room for N __GFP_FOO bits */ -#define __GFP_BITS_SHIFT (26 + IS_ENABLED(CONFIG_LOCKDEP)) +#define __GFP_BITS_SHIFT (28) #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /** diff --git a/include/linux/kfence.h b/include/linux/kfence.h index bb24956fefd8..6771c6eea720 100644 --- a/include/linux/kfence.h +++ b/include/linux/kfence.h @@ -140,6 +140,7 @@ void kfence_shutdown_cache(struct kmem_cache *s); * use kfence_alloc() instead. */ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags, int node); +struct page *__kfence_alloc_page(int node, gfp_t flags); /** * kfence_alloc() - allocate a KFENCE object with a low probability @@ -205,6 +206,36 @@ static __always_inline void *kfence_alloc_node(struct kmem_cache *s, size_t size return __kfence_alloc(s, size, flags, node); } +/** + * kfence_alloc_page() - allocate a KFENCE page with a low probability + * @node: preferred nid + * @flags: GFP flags + * + * Return: + * * NULL - must proceed with allocating as usual, + * * non-NULL - pointer to a KFENCE page. + * + * the order-0 page version of kfence_alloc(). + */ +static __always_inline struct page *kfence_alloc_page(unsigned int order, int node, gfp_t flags) +{ +#if defined(CONFIG_KFENCE_STATIC_KEYS) || CONFIG_KFENCE_SAMPLE_INTERVAL == 0 + if (!static_branch_unlikely(&kfence_allocation_key)) + return NULL; +#else + if (!static_branch_likely(&kfence_allocation_key)) + return NULL; +#endif + if (order) + return NULL; + + if (!static_branch_likely(&kfence_skip_interval) && + likely(atomic_read(&kfence_allocation_gate))) + return NULL; + + return __kfence_alloc_page(node, flags); +} + /** * kfence_ksize() - get actual amount of memory allocated for a KFENCE object * @addr: pointer to a heap object @@ -242,6 +273,7 @@ void *kfence_object_start(const void *addr); * Release a KFENCE object and mark it as freed. */ void __kfence_free(void *addr); +void __kfence_free_page(struct page *page, void *addr); /** * kfence_free() - try to release an arbitrary heap object to KFENCE pool @@ -264,6 +296,30 @@ static __always_inline __must_check bool kfence_free(void *addr) return true; } +/** + * kfence_free_page() - try to release a page to KFENCE pool + * @page: page to be freed + * + * Return: + * * false - page doesn't belong to KFENCE pool and was ignored, + * * true - page was released to KFENCE pool. + * + * Release a KFENCE page and mark it as freed. May be called on any page, + * even non-KFENCE page. The allocator must check the return value to + * determine if it was a KFENCE object or not. + */ +static __always_inline __must_check bool kfence_free_page(struct page *page) +{ + void *addr; + + if (!PageKfence(page)) + return false; + + addr = page_to_virt(page); + __kfence_free_page(page, addr); + return true; +} + /** * kfence_handle_page_fault() - perform page fault handling for KFENCE pages * @addr: faulting address @@ -308,10 +364,15 @@ static inline void *kfence_alloc_node(struct kmem_cache *s, size_t size, gfp_t f { return NULL; } +static inline struct page *kfence_alloc_page(unsigned int order, int node, gfp_t flags) +{ + return NULL; +} static inline size_t kfence_ksize(const void *addr) { return 0; } static inline void *kfence_object_start(const void *addr) { return NULL; } static inline void __kfence_free(void *addr) { } static inline bool __must_check kfence_free(void *addr) { return false; } +static inline bool __must_check kfence_free_page(struct page *page) { return false; } static inline bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs) { diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index 867de6485902..4ad037141eb7 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -60,9 +60,16 @@ #define __def_gfpflag_names_kasan #endif -#define show_gfp_flags(flags) \ - (flags) ? __print_flags(flags, "|", \ - __def_gfpflag_names __def_gfpflag_names_kasan \ +#ifdef CONFIG_KFENCE +#define __def_gfpflag_names_kfence , \ + gfpflag_string(__GFP_NOKFENCE) +#else +#define __def_gfpflag_names_kfence +#endif + +#define show_gfp_flags(flags) \ + (flags) ? __print_flags(flags, "|", \ + __def_gfpflag_names __def_gfpflag_names_kasan __def_gfpflag_names_kfence \ ) : "none" #ifdef CONFIG_MMU diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 01d5945e1351..17f3dda4ebf7 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -84,6 +84,7 @@ EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */ DEFINE_STATIC_KEY_FALSE(kfence_short_canary); DEFINE_STATIC_KEY_FALSE(kfence_skip_interval); static DEFINE_STATIC_KEY_FALSE(kfence_once_enabled); +DEFINE_STATIC_KEY_TRUE(kfence_order0_page); #define KFENCE_MAX_OBJECTS_PER_AREA (PUD_SIZE / PAGE_SIZE / 2 - 1) @@ -205,6 +206,33 @@ static const struct kernel_param_ops pool_mode_param_ops = { }; module_param_cb(pool_mode, &pool_mode_param_ops, &kfence_pool_node_mode, 0600); +static int param_set_order0_page(const char *val, const struct kernel_param *kp) +{ + bool res; + int ret = kstrtobool(val, &res); + + if (ret < 0) + return ret; + + if (res) + static_branch_enable(&kfence_order0_page); + else + static_branch_disable(&kfence_order0_page); + + return 0; +} + +static int param_get_order0_page(char *buffer, const struct kernel_param *kp) +{ + return sprintf(buffer, "%d\n", static_branch_likely(&kfence_order0_page) ? 1 : 0); +} + +static const struct kernel_param_ops order0_page_param_ops = { + .set = param_set_order0_page, + .get = param_get_order0_page, +}; +module_param_cb(order0_page, &order0_page_param_ops, NULL, 0600); + static int param_set_fault(const char *val, const struct kernel_param *kp) { bool mode; @@ -320,6 +348,9 @@ enum kfence_counter_id { KFENCE_COUNTER_ALLOCS, KFENCE_COUNTER_FREES, KFENCE_COUNTER_ZOMBIES, + KFENCE_COUNTER_ALLOCATED_PAGE, + KFENCE_COUNTER_ALLOCS_PAGE, + KFENCE_COUNTER_FREES_PAGE, KFENCE_COUNTER_BUGS, KFENCE_COUNTER_SKIP_INCOMPAT, KFENCE_COUNTER_SKIP_CAPACITY, @@ -331,10 +362,13 @@ struct kfence_counter { }; static struct kfence_counter __percpu *counters; static const char *const counter_names[] = { - [KFENCE_COUNTER_ALLOCATED] = "currently allocated", - [KFENCE_COUNTER_ALLOCS] = "total allocations", - [KFENCE_COUNTER_FREES] = "total frees", - [KFENCE_COUNTER_ZOMBIES] = "zombie allocations", + [KFENCE_COUNTER_ALLOCATED] = "currently slab allocated", + [KFENCE_COUNTER_ALLOCS] = "total slab allocations", + [KFENCE_COUNTER_FREES] = "total slab frees", + [KFENCE_COUNTER_ZOMBIES] = "zombie slab allocations", + [KFENCE_COUNTER_ALLOCATED_PAGE] = "currently page allocated", + [KFENCE_COUNTER_ALLOCS_PAGE] = "total page allocations", + [KFENCE_COUNTER_FREES_PAGE] = "total page frees", [KFENCE_COUNTER_BUGS] = "total bugs", [KFENCE_COUNTER_SKIP_INCOMPAT] = "skipped allocations (incompatible)", [KFENCE_COUNTER_SKIP_CAPACITY] = "skipped allocations (capacity)", @@ -772,6 +806,78 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g return addr; } +static struct page *kfence_guarded_alloc_page(int node, unsigned long *stack_entries, + size_t num_stack_entries, u32 alloc_stack_hash) +{ + struct kfence_counter *this_cpu_counter = raw_cpu_ptr(counters); + struct kfence_metadata *meta; + unsigned long flags; + struct page *page; + void *addr; + const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS && + !get_random_u32_below(CONFIG_KFENCE_STRESS_TEST_FAULTS); + + /* Try to obtain a free object. */ + meta = get_free_meta(node); + if (!meta) { + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_SKIP_CAPACITY]++; + return NULL; + } + + if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) { + /* + * This is extremely unlikely -- we are reporting on a + * use-after-free, which locked meta->lock, and the reporting + * code via printk calls kmalloc() which ends up in + * kfence_alloc() and tries to grab the same object that we're + * reporting on. While it has never been observed, lockdep does + * report that there is a possibility of deadlock. Fix it by + * using trylock and bailing out gracefully. + * Put the object back on the freelist. + */ + put_free_meta(meta); + + return NULL; + } + + __init_meta(meta, PAGE_SIZE, NULL, stack_entries, num_stack_entries, alloc_stack_hash); + + raw_spin_unlock_irqrestore(&meta->lock, flags); + + addr = (void *)meta->addr; + alloc_covered_add(alloc_stack_hash, 1); + + page = virt_to_page(addr); + if (PageSlab(page)) { + struct slab *slab = page_slab(page); + + /* + * For performance considerations, + * we clean slab info here (when allocating pages). + * So that slabs can reuse their flags and obj_cgroups + * without being cleared or freed if the previous user + * is slab too. + */ + slab->slab_cache = NULL; +#ifdef CONFIG_MEMCG + page->memcg_data = 0; +#endif + __ClearPageSlab(page); + } + page->mapping = NULL; +#ifdef CONFIG_DEBUG_VM + atomic_set(&page->_refcount, 0); +#endif + + if (random_fault) + kfence_protect(meta->addr); /* Random "faults" by protecting the object. */ + + this_cpu_counter->counter[KFENCE_COUNTER_ALLOCATED_PAGE]++; + this_cpu_counter->counter[KFENCE_COUNTER_ALLOCS_PAGE]++; + + return page; +} + static inline void put_free_meta_to_node(struct kfence_metadata *object, struct kfence_freelist_node *kfence_freelist) { @@ -912,6 +1018,20 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z } } +static void kfence_guarded_free_page(struct page *page, void *addr, struct kfence_metadata *meta) +{ + struct kfence_counter *this_cpu_counter = raw_cpu_ptr(counters); + + if (!__free_meta(addr, meta, false, true)) + return; + + put_free_meta(meta); + + this_cpu_counter->counter[KFENCE_COUNTER_ALLOCATED_PAGE]--; + this_cpu_counter->counter[KFENCE_COUNTER_FREES_PAGE]++; + +} + static void rcu_guarded_free(struct rcu_head *h) { struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head); @@ -2363,6 +2483,66 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags, int node) alloc_stack_hash, node); } +#define GFP_KFENCE_NOT_ALLOC ((GFP_ZONEMASK & ~__GFP_HIGHMEM) | __GFP_NOKFENCE | __GFP_THISNODE) +struct page *__kfence_alloc_page(int node, gfp_t flags) +{ + unsigned long stack_entries[KFENCE_STACK_DEPTH]; + size_t num_stack_entries; + u32 alloc_stack_hash; + + if (!static_branch_likely(&kfence_order0_page)) + return NULL; + + if ((flags & GFP_KFENCE_NOT_ALLOC) || (flags & GFP_USER) == GFP_USER) { + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_SKIP_INCOMPAT]++; + return NULL; + } + + if (static_branch_likely(&kfence_skip_interval)) + goto alloc; + + if (atomic_inc_return(&kfence_allocation_gate) > 1) + return NULL; +#ifdef CONFIG_KFENCE_STATIC_KEYS + /* + * waitqueue_active() is fully ordered after the update of + * kfence_allocation_gate per atomic_inc_return(). + */ + if (waitqueue_active(&allocation_wait)) { + /* + * Calling wake_up() here may deadlock when allocations happen + * from within timer code. Use an irq_work to defer it. + */ + irq_work_queue(&wake_up_kfence_timer_work); + } +#endif + +alloc: + if (!READ_ONCE(kfence_enabled)) + return NULL; + + num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0); + + if (!static_branch_likely(&kfence_skip_interval)) { + /* + * Do expensive check for coverage of allocation in slow-path after + * allocation_gate has already become non-zero, even though it might + * mean not making any allocation within a given sample interval. + * + * This ensures reasonable allocation coverage when the pool is almost + * full, including avoiding long-lived allocations of the same source + * filling up the pool (e.g. pagecache allocations). + */ + alloc_stack_hash = get_alloc_stack_hash(stack_entries, num_stack_entries); + if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash)) { + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_SKIP_COVERED]++; + return NULL; + } + } + + return kfence_guarded_alloc_page(node, stack_entries, num_stack_entries, alloc_stack_hash); +} + size_t kfence_ksize(const void *addr) { const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); @@ -2409,6 +2589,13 @@ void __kfence_free(void *addr) kfence_guarded_free(addr, meta, false); } +void __kfence_free_page(struct page *page, void *addr) +{ + struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); + + kfence_guarded_free_page(page, addr, meta); +} + bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs) { struct kfence_metadata *to_report = NULL; diff --git a/mm/kfence/report.c b/mm/kfence/report.c index 3d1c82b8d230..e2f051e223ef 100644 --- a/mm/kfence/report.c +++ b/mm/kfence/report.c @@ -132,6 +132,7 @@ void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *met const int size = abs(meta->size); const unsigned long start = meta->addr; const struct kmem_cache *const cache = meta->cache; + struct page *page = virt_to_page((void *)start); lockdep_assert_held(&meta->lock); @@ -142,7 +143,8 @@ void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *met seq_con_printf(seq, "kfence-#%td: 0x%p-0x%p, size=%d, cache=%s\n\n", meta - kfence_metadata, (void *)start, (void *)(start + size - 1), - size, (cache && cache->name) ? cache->name : ""); + size, (cache && cache->name) ? cache->name : PageSlab(page) ? + "" : "PAGE"); kfence_print_stack(seq, meta, true); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f47439e0ef10..2587b0b957e5 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -52,6 +52,7 @@ #include #include #include +#include #include #include "internal.h" #include "shuffle.h" @@ -958,6 +959,12 @@ static inline bool free_page_is_bad(struct page *page) if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE))) return false; +#ifdef CONFIG_KFENCE + /* It's not performance sensitive when reaching here */ + if (PageKfence(page)) + return false; +#endif + /* Something has gone sideways, find it */ free_page_is_bad_report(page); return true; @@ -1151,7 +1158,7 @@ static __always_inline bool free_pages_prepare(struct page *page, } page_cpupid_reset_last(page); - page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; + page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_KFENCE; reset_page_owner(page, order); page_table_check_free(page, order); @@ -1285,6 +1292,9 @@ static void __free_pages_ok(struct page *page, unsigned int order, if (!free_pages_prepare(page, order, fpi_flags)) return; + if (unlikely(!order && kfence_free_page(page))) + return; + /* * Calling get_pfnblock_migratetype() without spin_lock_irqsave() here * is used to avoid calling get_pfnblock_migratetype() under the lock. @@ -2398,6 +2408,10 @@ static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp, bool free_high; __count_vm_events(PGFREE, 1 << order); + + if (unlikely(!order && kfence_free_page(page))) + return; + pindex = order_to_pindex(migratetype, order); list_add(&page->pcp_list, &pcp->lists[pindex]); pcp->count += 1 << order; @@ -4361,7 +4375,9 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, continue; } - page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, + page = kfence_alloc_page(0, preferred_nid, gfp); + if (likely(!page)) + page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, pcp, pcp_list); if (unlikely(!page)) { /* Try and allocate at least one page */ @@ -4445,6 +4461,12 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, */ alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp); + page = kfence_alloc_page(order, preferred_nid, alloc_gfp); + if (unlikely(page)) { + prep_new_page(page, 0, alloc_gfp, alloc_flags); + goto out; + } + /* First allocation attempt */ page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); if (likely(page)) diff --git a/mm/slub.c b/mm/slub.c index 347669be15c4..801abe7a206f 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1872,6 +1872,7 @@ static inline struct slab *alloc_slab_page(gfp_t flags, int node, struct slab *slab; unsigned int order = oo_order(oo); + flags |= __GFP_NOKFENCE; if (node == NUMA_NO_NODE) folio = (struct folio *)alloc_pages(flags, order); else -- Gitee From 131b12291aad2521bf66999e00106b4b5fa27846 Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Mon, 11 Mar 2024 17:52:16 +0800 Subject: [PATCH 0256/2138] anolis: kfence: add order-0 page test cases ANBZ: #8499 Test cases of out of bounds read/write and use after free read for order-0 page, stack protection, vmalloc, and GFP_NOKFENCE are added. Since double free has been checked by put_page_testzero(), there is no need and no use to add this case. Signed-off-by: Tianchen Ding Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2874 --- mm/kfence/kfence_test.c | 144 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 144 insertions(+) diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c index 27299531307b..6e26b9b7ed4a 100644 --- a/mm/kfence/kfence_test.c +++ b/mm/kfence/kfence_test.c @@ -226,6 +226,8 @@ static __always_inline void test_free(void *ptr) kfree(ptr); } +#define test_free_page(addr) free_page((unsigned long)addr) + /* * If this should be a KFENCE allocation, and on which side the allocation and * the closest guard page should be. @@ -321,6 +323,50 @@ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocat return NULL; /* Unreachable. */ } +static struct page *test_alloc_page(struct kunit *test, bool is_vmalloc) +{ + long _kfence_sample_interval = kfence_sample_interval; + struct page *alloc; + void *addr; + unsigned long timeout, resched_after; + + kunit_info(test, "%s: size=%zu vmalloc=%d\n", __func__, PAGE_SIZE, is_vmalloc); + + /* + * 100x the sample interval should be more than enough to ensure we get + * a KFENCE allocation eventually. + */ + if (kfence_sample_interval < 0) + _kfence_sample_interval = 100; + timeout = jiffies + msecs_to_jiffies(100 * _kfence_sample_interval); + /* + * Especially for non-preemption kernels, ensure the allocation-gate + * timer can catch up: after @resched_after, every failed allocation + * attempt yields, to ensure the allocation-gate timer is scheduled. + */ + resched_after = jiffies + msecs_to_jiffies(_kfence_sample_interval); + do { + if (is_vmalloc) { + addr = vmalloc(PAGE_SIZE); + alloc = vmalloc_to_page(addr); + if (is_kfence_address(page_to_virt(alloc))) + return alloc; + vfree(addr); + } else { + alloc = alloc_page(GFP_KERNEL); + if (is_kfence_address(page_to_virt(alloc))) + return alloc; + __free_page(alloc); + } + + if (time_after(jiffies, resched_after)) + cond_resched(); + } while (time_before(jiffies, timeout)); + + KUNIT_ASSERT_TRUE_MSG(test, false, "failed to allocate page from KFENCE"); + return NULL; /* Unreachable. */ +} + static void test_out_of_bounds_read(struct kunit *test) { size_t size = 32; @@ -355,6 +401,33 @@ static void test_out_of_bounds_read(struct kunit *test) test_free(buf); } +static void test_out_of_bounds_read_page(struct kunit *test) +{ + struct expect_report expect = { + .type = KFENCE_ERROR_OOB, + .fn = test_out_of_bounds_read_page, + .is_write = false, + }; + char *buf; + struct page *page; + + /* Test both sides. */ + + page = test_alloc_page(test, false); + buf = page_address(page); + expect.addr = buf - 1; + READ_ONCE(*expect.addr); + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); + test_free_page(buf); + + page = test_alloc_page(test, false); + buf = page_address(page); + expect.addr = buf + PAGE_SIZE; + READ_ONCE(*expect.addr); + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); + test_free_page(buf); +} + static void test_out_of_bounds_write(struct kunit *test) { size_t size = 32; @@ -373,6 +446,24 @@ static void test_out_of_bounds_write(struct kunit *test) test_free(buf); } +static void test_out_of_bounds_write_page(struct kunit *test) +{ + struct expect_report expect = { + .type = KFENCE_ERROR_OOB, + .fn = test_out_of_bounds_write_page, + .is_write = true, + }; + char *buf; + struct page *page; + + page = test_alloc_page(test, false); + buf = page_address(page); + expect.addr = buf - 1; + WRITE_ONCE(*expect.addr, 42); + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); + test_free_page(buf); +} + static void test_use_after_free_read(struct kunit *test) { const size_t size = 32; @@ -389,6 +480,22 @@ static void test_use_after_free_read(struct kunit *test) KUNIT_EXPECT_TRUE(test, report_matches(&expect)); } +static void test_use_after_free_read_page(struct kunit *test) +{ + struct expect_report expect = { + .type = KFENCE_ERROR_UAF, + .fn = test_use_after_free_read_page, + .is_write = false, + }; + struct page *page; + + page = test_alloc_page(test, false); + expect.addr = page_address(page); + test_free_page(expect.addr); + READ_ONCE(*expect.addr); + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); +} + static void test_double_free(struct kunit *test) { const size_t size = 32; @@ -783,6 +890,37 @@ static void test_memcache_alloc_bulk(struct kunit *test) KUNIT_EXPECT_FALSE(test, report_available()); } +static void test_kernel_stack(struct kunit *test) +{ + unsigned long vaddr = (unsigned long)current->stack; + struct page *page; + int i; + + KFENCE_TEST_REQUIRES(test, IS_ENABLED(CONFIG_VMAP_STACK) && kfence_sample_interval < 0); + + for (i = 0 ; i < 1< Date: Tue, 12 Mar 2024 11:26:30 +0800 Subject: [PATCH 0257/2138] anolis: docs/kfence: update document for KFENCE ANBZ: #8499 Update Documentation/dev-tools/kfence.rst. Signed-off-by: Tianchen Ding Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2874 --- Documentation/dev-tools/kfence.rst | 131 ++++++++++++++++++++++++++++- 1 file changed, 130 insertions(+), 1 deletion(-) diff --git a/Documentation/dev-tools/kfence.rst b/Documentation/dev-tools/kfence.rst index 936f6aaa75c8..0cfa0f667f2a 100644 --- a/Documentation/dev-tools/kfence.rst +++ b/Documentation/dev-tools/kfence.rst @@ -67,13 +67,142 @@ The total memory dedicated to the KFENCE memory pool can be computed as:: Using the default config, and assuming a page size of 4 KiB, results in dedicating 2 MiB to the KFENCE memory pool. +You can change the KFENCE memory pool size by setting ``kfence.num_objects`` +in boot command line, or writing to +``/sys/module/kfence/parameters/num_objects`` when kfence is not enabled, +and the pool size of each node will be computed and updated +in the same way as above. You can set this value as large as possible, so +please be careful DO NOT use up all memorys. +When enabling KFENCE, ``num_objects`` will be adjusted to make the pool size +aligned up to 1GiB. That means, ``num_objects`` itself will be aligned up to +131071 (Unless ``num_objects`` is smaller than it and is regarded as using +the upstream mode). + +You can enable/disable KFENCE dynamically after startup by writing a proper +number to ``/sys/module/kfence/parameters/sample_interval``. Setting this value +to 0 means disabling KFENCE, and unused KFENCE pool memory will be +automatically freed. Otherwise KFENCE will be enabled, it will try to alloc +enough memory to hold ``num_objects`` the user has set. If this value is a +negative number, sample_interval will be invalid, and KFENCE will alloc slabs +and pages from its pool at all time if possible. + +You can change KFENCE pool mode by setting ``kfence.pool_mode`` in boot command +line, or writing to ``/sys/module/kfence/parameters/pool_mode`` when kfence is +not enabled. If the value is ``global`` (as default), ``num_objects`` becomes a +global total sum. The total KFENCE pools will hold ``num_objects`` slabs/pages. +Otherwise if the value is ``node``, ``num_objects`` becomes a per node value, +KFENCE pools on each node will hold ``num_objects`` slabs/pages separately. + Note: On architectures that support huge pages, KFENCE will ensure that the pool is using pages of size ``PAGE_SIZE``. This will result in additional page tables being allocated. +TLB recover issue +~~~~~~~~~~~~~~~~~ + +For some arch like x86, kernel virtual address directly mapping to physical +address is mapped by PUD huge TLB, so that performance can be improved since +kernel no need to visit PMD and PTE. Each PUD covers an 1GiB area. + +However, KFENCE needs to set guarded pages and breaks this design. PUD will be +splited to PTE, meaning that an 1GiB area will be splited to a large number of +4KiB (page size) areas. This may impact the performance. + +To solve this issue, the size of each kfence pool area is forced to be 1GiB, +and one area can hold 131071 objects, calculating by:: + + 1GiB / 4KiB / 2 - 1 = 131071 + +So the user input kfence.num_objects will be aligned up to 131071 for +convenience of splitting them to several 1GiB areas. + +One KFENCE pool area will be allocated in 1GiB aligned address, ensuring +only splitting one PUD. When KFENCE is disabled and there is no active +slabs/pages in this area, it will be freed and the corresponding TLB will +be recovered to the origin PUD (only on x86_64 now). + +An exception is the user input less than 131071 in boot cmdline. See mode 1 +of the following examples. + +Set a pool limit on various memory +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Like crashkernel, the user can limit the size of kfence pool by setting +``kfence.booting_max`` in boot command line. A reasonable config can be:: + + kfence.booting_max=0-128M:0,128M-256M:1M,256M-:2M + +So that: + On machines with memory of [0, 128M), kfence will not be enabled. + + On machines with memory of [128M, 256M), kfence will allocate at most 1MB + for kfence pool. (which means num_objects = 127 on page_size = 4KB) + + On machines with memory larger than 256M, kfence will allocate at most 2MB + for kfence pool. (which means num_objects = 255 on page_size = 4KB) + +Notes: + This config only sets the upper limit, so if the user sets num_objects = 127 + and ``kfence.booting_max=0-:2M``, kfence will still allocate 1MB for pool. + + This config only works for upstream mode. (pool_size < 1GB and + sample_interval > 0) Because if the user want to use debug mode, he must + focus on the specific machine and not need this general setting. + +Examples +~~~~~~~~ + +There are mainly three kinds of distributing method. + +1. Upstream mode:: + + num_objects < 131071 + pool_mode = global (cannot be node) + sample_interval cannot be negative + + In this mode, everything looks like it is in upstream. However, if user + enlarges ``num_objects`` after startup, it will be aligned up to 131071 + and become mode 2. + +2. Global mode:: + + num_objects >= 131071 + pool_mode = global + + For example, if num_objects = 131071 * 2, and there are 4 nodes in total. + Node 0 and node 1 will separately alloc 1GiB memoty for KFENCE pools, and + there is nothing to do with node 2 and node 3. Sampling slabs and pages on + these empty nodes (2 and 3) will be mapped to previous nodes (0 and 1). + + If num_objects = 131071 * 6, the memory usage will be [2, 2, 1, 1]GiB on + these 4 nodes. + +3. Per node mode:: + + num_objects >= 131071 + pool_mode = node + + This mode is easy to understand. If num_objects = 131071 * n, the memory + usage will be [n, n, n, n]GiB on 4 nodes. + +Monitoring specific slabs +~~~~~~~~~~~~~~~~~~~~~~~~~ + +If users want to enable or disable KFENCE for specific slabs, setting via +per_slab switch at ``/sys/kernel/slab//skip_kfence``. The default +value is 0 for all slabs (meaning do not skip). + +Users can also switch monitoring order0 pages by +setting ``kfence.order0_page`` in boot command line, +or writing to ``/sys/module/kfence/parameters/order0_page``. + Error reports ~~~~~~~~~~~~~ +By default, KFENCE will only report faults in dmesg. If users want to panic +the kernel, set ``kfence.fault=panic`` in boot command line, or write "panic" +to ``/sys/module/kfence/parameters/fault``. + A typical out-of-bounds access looks like this:: ================================================================== @@ -258,7 +387,7 @@ object page are "guard pages", whose attributes are changed to a protected state, and cause page faults on any attempted access. Such page faults are then intercepted by KFENCE, which handles the fault gracefully by reporting an out-of-bounds access, and marking the page as accessible so that the faulting -code can (wrongly) continue executing (set ``panic_on_warn`` to panic instead). +code can (wrongly) continue executing. To detect out-of-bounds writes to memory within the object's page itself, KFENCE also uses pattern-based redzones. For each object page, a redzone is set -- Gitee From 6a6c407256c68827363e09500a4a109751299367 Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Tue, 12 Mar 2024 11:34:38 +0800 Subject: [PATCH 0258/2138] anolis: configs: update kfence configs ANBZ: #8499 CONFIG_KFENCE_DEFERRABLE is useful to save power. Enable it. Signed-off-by: Tianchen Ding Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2874 --- arch/arm64/configs/anolis-debug_defconfig | 4 ++-- arch/arm64/configs/anolis_defconfig | 2 +- arch/x86/configs/anolis-debug_defconfig | 2 +- arch/x86/configs/anolis_defconfig | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index 419072dae8e2..eb020b30c47d 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -6884,9 +6884,9 @@ CONFIG_KASAN_VMALLOC=y # CONFIG_KASAN_MODULE_TEST is not set CONFIG_HAVE_ARCH_KFENCE=y CONFIG_KFENCE=y -CONFIG_KFENCE_SAMPLE_INTERVAL=0 +CONFIG_KFENCE_SAMPLE_INTERVAL=100 CONFIG_KFENCE_NUM_OBJECTS=255 -# CONFIG_KFENCE_DEFERRABLE is not set +CONFIG_KFENCE_DEFERRABLE=y CONFIG_KFENCE_STRESS_TEST_FAULTS=0 # end of Memory Debugging diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index 27ba33178302..aba19cdc5358 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -6868,7 +6868,7 @@ CONFIG_HAVE_ARCH_KFENCE=y CONFIG_KFENCE=y CONFIG_KFENCE_SAMPLE_INTERVAL=0 CONFIG_KFENCE_NUM_OBJECTS=255 -# CONFIG_KFENCE_DEFERRABLE is not set +CONFIG_KFENCE_DEFERRABLE=y CONFIG_KFENCE_STRESS_TEST_FAULTS=0 # end of Memory Debugging diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index ecdf002bc8a9..0a31d38d115c 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -7477,7 +7477,7 @@ CONFIG_HAVE_ARCH_KFENCE=y CONFIG_KFENCE=y CONFIG_KFENCE_SAMPLE_INTERVAL=100 CONFIG_KFENCE_NUM_OBJECTS=255 -# CONFIG_KFENCE_DEFERRABLE is not set +CONFIG_KFENCE_DEFERRABLE=y CONFIG_KFENCE_STRESS_TEST_FAULTS=0 CONFIG_HAVE_ARCH_KMSAN=y # end of Memory Debugging diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 4abe45995548..0b0922337278 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -7436,7 +7436,7 @@ CONFIG_HAVE_ARCH_KFENCE=y CONFIG_KFENCE=y CONFIG_KFENCE_SAMPLE_INTERVAL=0 CONFIG_KFENCE_NUM_OBJECTS=255 -# CONFIG_KFENCE_DEFERRABLE is not set +CONFIG_KFENCE_DEFERRABLE=y CONFIG_KFENCE_STRESS_TEST_FAULTS=0 CONFIG_HAVE_ARCH_KMSAN=y # end of Memory Debugging -- Gitee From 6f3c72c3d84777a3bfb2dc64befe6a0484c5afbe Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 8 Mar 2024 20:17:47 +0800 Subject: [PATCH 0259/2138] anolis: crypto: ccp: Fixup the capability of Hygon PSP during initialization ANBZ: #8560 The meaning of the data read from feature register of Hygon PSP is not exactly the same as AMD ASP. The bit 1 in feature register is used to indicates TEE in AMD ASP, but not in Hygon PSP, which will cause host to crash during module initialization, as shown below. [ 27.898723] BUG: kernel NULL pointer dereference, address: 0000000000000014 [ 27.906503] #PF: supervisor read access in kernel mode [ 27.912242] #PF: error_code(0x0000) - not-present page [ 27.917981] PGD 0 P4D 0 [ 27.920810] Oops: 0000 [#1] PREEMPT SMP NOPTI [ 27.925676] CPU: 67 PID: 1668 Comm: systemd-udevd Not tainted 6.6.7-for-gerrit #3 [ 27.934033] Hardware name: HYGON Hygon65N32/65N32, BIOS A0173036 02/01/2023 [ 27.941807] RIP: 0010:psp_firmware_is_visible+0x3c/0x70 [ccp] [ 27.948240] Code: 00 00 48 85 c0 74 12 48 81 fe e0 54 53 c1 74 2f 48 81 fe c0 54 53 c1 74 03 31 c0 c3 f6 40 70 02 74 f7 48 8b 50 10 48 8b 52 08 <8b> 52 14 85 d2 74 e8 48 03 50 38 48 89 d7 e8 51 71 0a d7 eb 14 48 [ 27.969204] RSP: 0018:ffffc9000b80fa70 EFLAGS: 00010202 [ 27.975039] RAX: ffff888113c2d9a8 RBX: ffffffffc1535460 RCX: 0000000000000124 [ 27.983008] RDX: 0000000000000000 RSI: ffffffffc15354c0 RDI: ffff8888830dc0c0 [ 27.993320] RBP: ffff888883060980 R08: 0000000000000001 R09: 00000006c8df7639 [ 28.005756] R10: ffff888100258278 R11: 0000000000000100 R12: ffff8888830dc0c0 [ 28.019695] R13: 0000000000000001 R14: 0000000000000000 R15: ffffffffc1535490 [ 28.032285] FS: 00007f7c9ba2b880(0000) GS:ffff88885fcc0000(0000) knlGS:0000000000000000 [ 28.044626] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 28.054928] CR2: 0000000000000014 CR3: 0000800106e50000 CR4: 00000000003506e0 [ 28.065028] Call Trace: [ 28.067751] [ 28.070095] ? __die_body+0x1f/0x60 [ 28.073995] ? page_fault_oops+0x15d/0x460 [ 28.078573] ? exc_page_fault+0x78/0x170 [ 28.082956] ? asm_exc_page_fault+0x26/0x30 [ 28.087632] ? psp_firmware_is_visible+0x3c/0x70 [ccp] [ 28.093384] internal_create_group+0xde/0x3a0 [ 28.093392] internal_create_groups.part.0+0x3d/0xa0 [ 28.093396] really_probe+0x197/0x3c0 [ 28.093402] ? __device_attach_driver+0x100/0x100 [[ 0 ;2382.m0 9 3O4K0 5 ] __driver_probe_device+0x78/0x160 [ 28.093409] driver_probe_device+0x1e/0xa0 [ 28.126379] __driver_attach+0xaa/0x160 [ 28.130667] ? __device_attach_driver+0x100/0x100 [ 28.135921] bus_for_each_dev+0x75/0xc0 [ 28.142419] bus_add_driver+0x112/0x210 [ 28.149240] driver_register+0x5c/0x110 [ 28.154875] ? 0xffffffffc14a4000 [ 28.160197] sp_mod_init+0x10/0x1000 [ccp] [ 28.166164] do_one_initcall+0x45/0x210 [ 28.170453] ? kmalloc_trace+0x29/0x90 [ 28.174642] do_init_module+0x64/0x240 [ 28.178831] load_module+0x1d84/0x2010 [ 28.183024] ? init_module_from_file+0x8b/0xd0 [ 28.187986] init_module_from_file+0x8b/0xd0 [ 28.192763] do_syscall_64+0x39/0x80 [ 28.206672] entry_SYSCALL_64_after_hwframe+0x63/0xcd [ 28.212318] RIP: 0033:0x7f7c9b91ea3d [ 28.216312] Code: 5b 41 5c c3 66 0f 1f 84 00 00 00 00 00 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d c3 a3 0f 00 f7 d8 64 89 01 48 [ 28.237272] RSP: 002b:00007ffe6cee5368 EFLAGS: 00000246 ORIG_RAX: 0000000000000139 [ 28.245725] RAX: ffffffffffffffda RBX: 000055700e302260 RCX: 00007f7c9b91ea3d [ 28.253691] RDX: 0000000000000000 RSI: 00007f7c9ba5cded RDI: 0000000000000006 [ 28.261658] RBP: 0000000000020000 R08: 0000000000000000 R09: 000055700e4d3188 [ 28.269624] R10: 0000000000000006 R11: 0000000000000246 R12: 00007f7c9ba5cded [ 28.277590] R13: 0000000000000000 R14: 000055700e4cb7b0 R15: 000055700e302260 [ 28.285552] [ 28.287995] Modules linked in: k10temp ccp(+) drm_kms_helper ipmi_si(+) ipmi_devintf ipmi_msghandler mac_hid sch_fq_codel parport_pc ppdev lp parport ramoops drm reed_solomon efi_pstore ip_tables x_tables autofs4 btrfs blake2b_generic raid10 raid456 async_raid6_recov async_memcpy async_pq async_xor async_tx xor raid6_pq libcrc32c raid1 raid0 multipath linear igb i2c_algo_bit dca ptp crc32_pclmul pps_core ahci libahci i2c_piix4 hid_generic usbhid hid [ 28.288027] CR2: 0000000000000014 [ 28.288031] ---[ end trace 0000000000000000 ]--- [ 28.533899] ipmi_si IPI0001:00: IPMI message handler: Found new BMC (man_id: 0x00d455, prod_id: 0x0202, dev_id: 0x20) [ 28.604507] RIP: 0010:psp_firmware_is_visible+0x3c/0x70 [ccp] [ 28.604527] Code: 00 00 48 85 c0 74 12 48 81 fe e0 54 53 c1 74 2f 48 81 fe c0 54 53 c1 74 03 31 c0 c3 f6 40 70 02 74 f7 48 8b 50 10 48 8b 52 08 <8b> 52 14 85 d2 74 e8 48 03 50 38 48 89 d7 e8 51 71 0a d7 eb 14 48 [ 28.604530] RSP: 0018:ffffc9000b80fa70 EFLAGS: 00010202 [ 28.604533] RAX: ffff888113c2d9a8 RBX: ffffffffc1535460 RCX: 0000000000000124 [ 28.604535] RDX: 0000000000000000 RSI: ffffffffc15354c0 RDI: ffff8888830dc0c0 [ 28.604536] RBP: ffff888883060980 R08: 0000000000000001 R09: 00000006c8df7639 [ 28.604537] R10: ffff888100258278 R11: 0000000000000100 R12: ffff8888830dc0c0 [ 28.604539] R13: 0000000000000001 R14: 0000000000000000 R15: ffffffffc1535490 [ 28.604540] FS: 00007f7c9ba2b880(0000) GS:ffff88885fcc0000(0000) knlGS:0000000000000000 [ 28.604542] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 28.604543] CR2: 0000000000000014 CR3: 0000800106e50000 CR4: 00000000003506e0 Also, the meaning of bit 7 in the feature register of Hygon PSP is not the same as AMD ASP. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2897 --- drivers/crypto/ccp/psp-dev.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index d42d7bc62352..223e198eddec 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -56,6 +56,13 @@ static irqreturn_t psp_irq_handler(int irq, void *data) return IRQ_HANDLED; } +static void hygon_fixup_psp_caps(struct psp_device *psp) +{ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + psp->capability &= ~(PSP_CAPABILITY_TEE | + PSP_CAPABILITY_PSP_SECURITY_REPORTING); +} + static unsigned int psp_get_capability(struct psp_device *psp) { unsigned int val = ioread32(psp->io_regs + psp->vdata->feature_reg); @@ -73,6 +80,12 @@ static unsigned int psp_get_capability(struct psp_device *psp) } psp->capability = val; + /* + * Fix capability of Hygon psp, the meaning of Hygon psp feature + * register is not exactly the same as AMD. + */ + hygon_fixup_psp_caps(psp); + /* Detect if TSME and SME are both enabled */ if (psp->capability & PSP_CAPABILITY_PSP_SECURITY_REPORTING && psp->capability & (PSP_SECURITY_TSME_STATUS << PSP_CAPABILITY_PSP_SECURITY_OFFSET) && -- Gitee From e94a1f2d3312363a124c1a67946662e10053a77f Mon Sep 17 00:00:00 2001 From: Hao Feng Date: Thu, 25 Mar 2021 13:36:31 +0800 Subject: [PATCH 0260/2138] anolis: crypto: ccp: Add support to detect Hygon PSP on Hygon 2nd/3rd CPUs ANBZ: #8560 There are 2 types of CCP devices on Hygon 2nd/3rd CPUs, add them in the device list. Signed-off-by: Hao Feng Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2897 --- drivers/crypto/ccp/sp-pci.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index b6ab56abeb68..dd7db55f9587 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -576,6 +576,8 @@ static const struct pci_device_id sp_pci_table[] = { { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[6] }, { PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] }, { PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] }, + { PCI_VDEVICE(HYGON, 0x1456), (kernel_ulong_t)&dev_vdata[1] }, + { PCI_VDEVICE(HYGON, 0x1468), (kernel_ulong_t)&dev_vdata[2] }, /* Last entry must be zero */ { 0, } }; -- Gitee From 30252a241cc06d45b246381b976eea3fbac9d744 Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Thu, 22 Sep 2022 10:59:03 +0800 Subject: [PATCH 0261/2138] anolis: crypto: ccp: Implement CSV_HGSC_CERT_IMPORT ioctl command ANBZ: #8562 The CSV_HGSC_CERT_IMPORT command can be used to import hygon general secure cert to the Secure Proccessor, to enable Hygon Secure Functions, such as CSV, TPM, TPCM, TDM. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Reviewed-by: Tianjia Zhang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2901 --- drivers/crypto/ccp/sev-dev.c | 73 +++++++++++++++++++++++++++++++++++- include/linux/psp-sev.h | 21 +++++++++++ include/uapi/linux/psp-sev.h | 24 ++++++++++++ 3 files changed, 116 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 07e6f782b622..36840e0b3be7 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -127,6 +127,15 @@ static int sev_wait_cmd_ioc(struct sev_device *sev, static int sev_cmd_buffer_len(int cmd) { + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + switch (cmd) { + case CSV_CMD_HGSC_CERT_IMPORT: + return sizeof(struct csv_data_hgsc_cert_import); + default: + break; + } + } + switch (cmd) { case SEV_CMD_INIT: return sizeof(struct sev_data_init); case SEV_CMD_INIT_EX: return sizeof(struct sev_data_init_ex); @@ -1070,6 +1079,50 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable) return ret; } +static int csv_ioctl_do_hgsc_import(struct sev_issue_cmd *argp) +{ + struct csv_user_data_hgsc_cert_import input; + struct csv_data_hgsc_cert_import *data; + void *hgscsk_blob, *hgsc_blob; + int ret; + + if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) + return -EFAULT; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + /* copy HGSCSK certificate blobs from userspace */ + hgscsk_blob = psp_copy_user_blob(input.hgscsk_cert_address, input.hgscsk_cert_len); + if (IS_ERR(hgscsk_blob)) { + ret = PTR_ERR(hgscsk_blob); + goto e_free; + } + + data->hgscsk_cert_address = __psp_pa(hgscsk_blob); + data->hgscsk_cert_len = input.hgscsk_cert_len; + + /* copy HGSC certificate blobs from userspace */ + hgsc_blob = psp_copy_user_blob(input.hgsc_cert_address, input.hgsc_cert_len); + if (IS_ERR(hgsc_blob)) { + ret = PTR_ERR(hgsc_blob); + goto e_free_hgscsk; + } + + data->hgsc_cert_address = __psp_pa(hgsc_blob); + data->hgsc_cert_len = input.hgsc_cert_len; + + ret = __sev_do_cmd_locked(CSV_CMD_HGSC_CERT_IMPORT, data, &argp->error); + + kfree(hgsc_blob); +e_free_hgscsk: + kfree(hgscsk_blob); +e_free: + kfree(data); + return ret; +} + static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) { void __user *argp = (void __user *)arg; @@ -1086,11 +1139,26 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd))) return -EFAULT; - if (input.cmd > SEV_MAX) - return -EINVAL; + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + if (input.cmd > CSV_MAX) + return -EINVAL; + } else { + if (input.cmd > SEV_MAX) + return -EINVAL; + } mutex_lock(&sev_cmd_mutex); + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + switch (input.cmd) { + case CSV_HGSC_CERT_IMPORT: + ret = csv_ioctl_do_hgsc_import(&input); + goto result_to_user; + default: + break; + } + } + switch (input.cmd) { case SEV_FACTORY_RESET: @@ -1126,6 +1194,7 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) goto out; } +result_to_user: if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd))) ret = -EFAULT; out: diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 7fd17e82bab4..2b40efb57274 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -81,6 +81,11 @@ enum sev_cmd { SEV_CMD_MAX, }; +enum csv_cmd { + CSV_CMD_HGSC_CERT_IMPORT = 0x300, + CSV_CMD_MAX, +}; + /** * struct sev_data_init - INIT command parameters * @@ -523,6 +528,22 @@ struct sev_data_attestation_report { u32 len; /* In/Out */ } __packed; +/** + * struct csv_data_hgsc_cert_import - HGSC_CERT_IMPORT command parameters + * + * @hgscsk_cert_address: HGSCSK certificate chain + * @hgscsk_cert_len: len of HGSCSK certificate + * @hgsc_cert_address: HGSC certificate chain + * @hgsc_cert_len: len of HGSC certificate + */ +struct csv_data_hgsc_cert_import { + u64 hgscsk_cert_address; /* In */ + u32 hgscsk_cert_len; /* In */ + u32 reserved; /* In */ + u64 hgsc_cert_address; /* In */ + u32 hgsc_cert_len; /* In */ +} __packed; + #ifdef CONFIG_CRYPTO_DEV_SP_PSP /** diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h index 1c9da485318f..ae76776c0b15 100644 --- a/include/uapi/linux/psp-sev.h +++ b/include/uapi/linux/psp-sev.h @@ -32,6 +32,15 @@ enum { SEV_MAX, }; +/** + * CSV platform commands + */ +enum { + CSV_HGSC_CERT_IMPORT = 201, + + CSV_MAX, +}; + /** * SEV Firmware status code */ @@ -154,6 +163,21 @@ struct sev_user_data_get_id2 { __u32 length; /* In/Out */ } __packed; +/** + * struct csv_user_data_hgsc_cert_import - HGSC_CERT_IMPORT command parameters + * + * @hgscsk_cert_address: HGSCSK certificate chain + * @hgscsk_cert_len: length of HGSCSK certificate + * @hgsc_cert_address: HGSC certificate chain + * @hgsc_cert_len: length of HGSC certificate + */ +struct csv_user_data_hgsc_cert_import { + __u64 hgscsk_cert_address; /* In */ + __u32 hgscsk_cert_len; /* In */ + __u64 hgsc_cert_address; /* In */ + __u32 hgsc_cert_len; /* In */ +} __packed; + /** * struct sev_issue_cmd - SEV ioctl parameters * -- Gitee From 6224dcbf0f4001fe4b3d13097bb2bb43fe43ae34 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Mon, 26 Apr 2021 10:47:46 +0800 Subject: [PATCH 0262/2138] anolis: KVM: x86: Support VM_ATTESTATION hypercall ANBZ: #8564 When sev guest wants to collect the attestation report, it cannot directly communicate with psp. Add VM_ATTESTATION hypercall to allow sev guest to tell host to help get the attestation report. Since sev guest memory is encrypted, host cannot tamper with the report data. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2902 --- arch/x86/include/asm/kvm-x86-ops.h | 1 + arch/x86/include/asm/kvm_host.h | 2 + arch/x86/kvm/svm/sev.c | 67 ++++++++++++++++++++++++++++++ arch/x86/kvm/svm/svm.c | 2 + arch/x86/kvm/svm/svm.h | 2 + arch/x86/kvm/x86.c | 8 +++- include/uapi/linux/kvm_para.h | 1 + 7 files changed, 82 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 9b419f0de713..3ab3e361ea81 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -135,6 +135,7 @@ KVM_X86_OP(msr_filter_changed) KVM_X86_OP(complete_emulated_msr) KVM_X86_OP(vcpu_deliver_sipi_vector) KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons); +KVM_X86_OP_OPTIONAL(vm_attestation) #undef KVM_X86_OP #undef KVM_X86_OP_OPTIONAL diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 257bf2e71d06..32fb7c0cdbd7 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1751,6 +1751,8 @@ struct kvm_x86_ops { * Returns vCPU specific APICv inhibit reasons */ unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *vcpu); + + int (*vm_attestation)(struct kvm *kvm, unsigned long gpa, unsigned long len); }; struct kvm_x86_nested_ops { diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 99e72b8a96ac..2c63a38e1a65 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -75,6 +75,8 @@ static unsigned int nr_asids; static unsigned long *sev_asid_bitmap; static unsigned long *sev_reclaim_asid_bitmap; +static const char sev_vm_mnonce[] = "VM_ATTESTATION"; + struct enc_region { struct list_head list; unsigned long npages; @@ -3182,3 +3184,68 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1); } + +int sev_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_attestation_report *data = NULL; + struct page **pages; + unsigned long guest_uaddr, n; + int ret = 0, offset, error; + + if (!sev_guest(kvm) || (boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)) + return -ENOTTY; + + /* + * The physical address of guest must valid and page aligned, and + * the length of guest memory region must be page size aligned. + */ + if (!gpa || (gpa & ~PAGE_MASK) || (len & ~PAGE_MASK)) { + pr_err("invalid guest address or length\n"); + return -EFAULT; + } + + guest_uaddr = gfn_to_hva(kvm, gpa_to_gfn(gpa)); + pages = sev_pin_memory(kvm, guest_uaddr, len, &n, 1); + if (IS_ERR(pages)) + return PTR_ERR(pages); + + /* + * The attestation report must be copied into contiguous memory region, + * lets verify that userspace memory pages are contiguous before we + * issue commmand. + */ + if (get_num_contig_pages(0, pages, n) != n) { + ret = -EINVAL; + goto e_unpin_memory; + } + + ret = -ENOMEM; + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto e_unpin_memory; + + /* sev_vm_mnonce indicates attestation request from guest */ + if (sizeof(sev_vm_mnonce) >= sizeof(data->mnonce)) { + ret = -EINVAL; + goto e_free; + } + + memcpy(data->mnonce, sev_vm_mnonce, sizeof(sev_vm_mnonce)); + + offset = guest_uaddr & (PAGE_SIZE - 1); + data->address = __sme_page_pa(pages[0]) + offset; + data->len = len; + + data->handle = sev->handle; + ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, data, &error); + + if (ret) + pr_err("vm attestation ret %#x, error %#x\n", ret, error); + +e_free: + kfree(data); +e_unpin_memory: + sev_unpin_memory(kvm, pages, n); + return ret; +} diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 413f1f2aadd1..5bf447377539 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -5069,6 +5069,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector, .vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons, + + .vm_attestation = sev_vm_attestation, }; /* diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 37ada9808d9b..7cd1063da561 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -695,6 +695,8 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa); void sev_es_unmap_ghcb(struct vcpu_svm *svm); +int sev_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len); + /* vmenter.S */ void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index dcd0c12c308e..b5deec1bb655 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9860,7 +9860,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) a3 &= 0xFFFFFFFF; } - if (static_call(kvm_x86_get_cpl)(vcpu) != 0) { + if (static_call(kvm_x86_get_cpl)(vcpu) != 0 && + nr != KVM_HC_VM_ATTESTATION) { ret = -KVM_EPERM; goto out; } @@ -9923,6 +9924,11 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) vcpu->arch.complete_userspace_io = complete_hypercall_exit; return 0; } + case KVM_HC_VM_ATTESTATION: + ret = -KVM_ENOSYS; + if (kvm_x86_ops.vm_attestation) + ret = static_call(kvm_x86_vm_attestation)(vcpu->kvm, a0, a1); + break; default: ret = -KVM_ENOSYS; break; diff --git a/include/uapi/linux/kvm_para.h b/include/uapi/linux/kvm_para.h index 960c7e93d1a9..67192835455e 100644 --- a/include/uapi/linux/kvm_para.h +++ b/include/uapi/linux/kvm_para.h @@ -30,6 +30,7 @@ #define KVM_HC_SEND_IPI 10 #define KVM_HC_SCHED_YIELD 11 #define KVM_HC_MAP_GPA_RANGE 12 +#define KVM_HC_VM_ATTESTATION 100 /* Specific to Hygon CPU */ /* * hypercalls use architecture specific -- Gitee From fa76ccc9827ff62ff1dfd1afb4c1809b032edbb4 Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Tue, 30 May 2023 17:34:30 +0800 Subject: [PATCH 0263/2138] anolis: driver/virt/coco: Add HYGON CSV Guest dirver. ANBZ: #8564 CSV firmware provides the guest a mechanism to communicate with the PSP without risk from a malicious hypervisor who wishes to read, alter, drop or replay the messages sent. The driver provides userspace interface to communicate with the PSP to request the attestation report and more. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2902 --- Documentation/virt/coco/csv-guest.rst | 33 +++++++ drivers/virt/Kconfig | 2 + drivers/virt/Makefile | 1 + drivers/virt/coco/csv-guest/Kconfig | 12 +++ drivers/virt/coco/csv-guest/Makefile | 2 + drivers/virt/coco/csv-guest/csv-guest.c | 114 ++++++++++++++++++++++++ drivers/virt/coco/csv-guest/csv-guest.h | 42 +++++++++ 7 files changed, 206 insertions(+) create mode 100644 Documentation/virt/coco/csv-guest.rst create mode 100644 drivers/virt/coco/csv-guest/Kconfig create mode 100644 drivers/virt/coco/csv-guest/Makefile create mode 100644 drivers/virt/coco/csv-guest/csv-guest.c create mode 100644 drivers/virt/coco/csv-guest/csv-guest.h diff --git a/Documentation/virt/coco/csv-guest.rst b/Documentation/virt/coco/csv-guest.rst new file mode 100644 index 000000000000..23cba2a5fd7c --- /dev/null +++ b/Documentation/virt/coco/csv-guest.rst @@ -0,0 +1,33 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=================================================================== +CSV Guest API Documentation +=================================================================== + +1. General description +====================== + +The CSV guest driver exposes IOCTL interfaces via the /dev/csv-guest misc +device to allow userspace to get certain CSV guest-specific details. + +2. API description +================== + +In this section, for each supported IOCTL, the following information is +provided along with a generic description. + +:Input parameters: Parameters passed to the IOCTL and related details. +:Output: Details about output data and return value (with details about + the non common error values). + +2.1 CSV_CMD_GET_REPORT +----------------------- + +:Input parameters: struct csv_report_req +:Output: Upon successful execution, CSV_REPORT data is copied to + csv_report_req.report_data and return 0. Return -EINVAL for invalid + operands, -EIO on VMMCALL failure or standard error number on other + common failures. + +The CSV_CMD_GET_REPORT IOCTL can be used by the attestation software to get +the CSV_REPORT from the CSV module using VMMCALL[KVM_HC_VM_ATTESTATION]. diff --git a/drivers/virt/Kconfig b/drivers/virt/Kconfig index f79ab13a5c28..b1c4efa00182 100644 --- a/drivers/virt/Kconfig +++ b/drivers/virt/Kconfig @@ -54,4 +54,6 @@ source "drivers/virt/coco/sev-guest/Kconfig" source "drivers/virt/coco/tdx-guest/Kconfig" +source "drivers/virt/coco/csv-guest/Kconfig" + endif diff --git a/drivers/virt/Makefile b/drivers/virt/Makefile index e9aa6fc96fab..62681a260758 100644 --- a/drivers/virt/Makefile +++ b/drivers/virt/Makefile @@ -12,3 +12,4 @@ obj-$(CONFIG_ACRN_HSM) += acrn/ obj-$(CONFIG_EFI_SECRET) += coco/efi_secret/ obj-$(CONFIG_SEV_GUEST) += coco/sev-guest/ obj-$(CONFIG_INTEL_TDX_GUEST) += coco/tdx-guest/ +obj-$(CONFIG_CSV_GUEST) += coco/csv-guest/ diff --git a/drivers/virt/coco/csv-guest/Kconfig b/drivers/virt/coco/csv-guest/Kconfig new file mode 100644 index 000000000000..4cbde598e665 --- /dev/null +++ b/drivers/virt/coco/csv-guest/Kconfig @@ -0,0 +1,12 @@ +config CSV_GUEST + tristate "HYGON CSV Guest driver" + default m + depends on AMD_MEM_ENCRYPT + help + CSV firmware provides the guest a mechanism to communicate with + the PSP without risk from a malicious hypervisor who wishes to read, + alter, drop or replay the messages sent. The driver provides + userspace interface to communicate with the PSP to request the + attestation report and more. + + If you choose 'M' here, this module will be called csv-guest. diff --git a/drivers/virt/coco/csv-guest/Makefile b/drivers/virt/coco/csv-guest/Makefile new file mode 100644 index 000000000000..a1c3a1499fc6 --- /dev/null +++ b/drivers/virt/coco/csv-guest/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_CSV_GUEST) += csv-guest.o diff --git a/drivers/virt/coco/csv-guest/csv-guest.c b/drivers/virt/coco/csv-guest/csv-guest.c new file mode 100644 index 000000000000..7bd9abe7d8b6 --- /dev/null +++ b/drivers/virt/coco/csv-guest/csv-guest.c @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Userspace interface for CSV guest driver + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: fangbaoshun + */ +#include +#include +#include +#include +#include +#include + +#include + +#include "csv-guest.h" + +static long csv_get_report(void __user *argp) +{ + u8 *csv_report; + long ret; + struct csv_report_req req; + + if (copy_from_user(&req, argp, sizeof(struct csv_report_req))) + return -EFAULT; + + if (req.len < CSV_REPORT_INPUT_DATA_LEN) + return -EINVAL; + + csv_report = kzalloc(req.len, GFP_KERNEL); + if (!csv_report) { + ret = -ENOMEM; + goto out; + } + + /* Save user input data */ + if (copy_from_user(csv_report, req.report_data, CSV_REPORT_INPUT_DATA_LEN)) { + ret = -EFAULT; + goto out; + } + + /* Generate CSV_REPORT using "KVM_HC_VM_ATTESTATION" VMMCALL */ + ret = kvm_hypercall2(KVM_HC_VM_ATTESTATION, __pa(csv_report), req.len); + if (ret) + goto out; + + if (copy_to_user(req.report_data, csv_report, req.len)) + ret = -EFAULT; + +out: + kfree(csv_report); + return ret; +} + +static long csv_guest_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case CSV_CMD_GET_REPORT: + return csv_get_report((void __user *)arg); + default: + return -ENOTTY; + } +} + +static void mem_test_init(void) +{ + char head_str[] = "test mem encrypt"; + u64 *va_addr = __va(0x0); + + if (va_addr) { + memset(va_addr, 0x66, PAGE_SIZE); + memcpy(va_addr, head_str, sizeof(head_str)); + clflush_cache_range(va_addr, PAGE_SIZE); + } else + pr_err("Initialize 1 page for csv memory test failed!\n"); +} + +static const struct file_operations csv_guest_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = csv_guest_ioctl, + .compat_ioctl = csv_guest_ioctl, +}; + +static struct miscdevice csv_guest_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "csv-guest", + .fops = &csv_guest_fops, + .mode = 0777, +}; + +static int __init csv_guest_init(void) +{ + // This module only working on CSV guest vm. + if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) + return -ENODEV; + + // Initialize 1 page for csv memory test + mem_test_init(); + + return misc_register(&csv_guest_dev); +} + +static void __exit csv_guest_exit(void) +{ + misc_deregister(&csv_guest_dev); +} + +MODULE_LICENSE("GPL"); +MODULE_VERSION("1.0.0"); +MODULE_DESCRIPTION("HYGON CSV Guest Driver"); +module_init(csv_guest_init); +module_exit(csv_guest_exit); diff --git a/drivers/virt/coco/csv-guest/csv-guest.h b/drivers/virt/coco/csv-guest/csv-guest.h new file mode 100644 index 000000000000..0342d5f16cb3 --- /dev/null +++ b/drivers/virt/coco/csv-guest/csv-guest.h @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * + * Userspace interface for CSV guest driver + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#ifndef __VIRT_CSVGUEST_H__ +#define __VIRT_CSVGUEST_H__ + +#include +#include + +/* Length of the user input datas used in VMMCALL */ +#define CSV_REPORT_USER_DATA_LEN 64 +#define CSV_REPORT_MNONCE_LEN 16 +#define CSV_REPORT_HASH_LEN 32 +#define CSV_REPORT_INPUT_DATA_LEN (CSV_REPORT_USER_DATA_LEN + CSV_REPORT_MNONCE_LEN \ + + CSV_REPORT_HASH_LEN) + +/** + * struct csv_report_req - Request struct for CSV_CMD_GET_REPORT IOCTL. + * + * @report_data:User buffer with REPORT_DATA to be included into CSV_REPORT, and it's also + * user buffer to store CSV_REPORT output from VMMCALL[KVM_HC_VM_ATTESTATION]. + * @len: Length of the user buffer. + */ +struct csv_report_req { + u8 *report_data; + int len; +}; + +/* + * CSV_CMD_GET_REPORT - Get CSV_REPORT using VMMCALL[KVM_HC_VM_ATTESTATION] + * + * Return 0 on success, -EIO on VMMCALL execution failure, and + * standard errno on other general error cases. + */ +#define CSV_CMD_GET_REPORT _IOWR('D', 1, struct csv_report_req) + +#endif /* __VIRT_CSVGUEST_H__ */ -- Gitee From 7de0fd98623110429ac13fa145991d04734edaff Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Thu, 10 Aug 2023 13:49:52 +0800 Subject: [PATCH 0264/2138] anolis: crypto: ccp: Add support to detect Hygon PSP on Hygon 4th CPUs ANBZ: #8568 Since Hygon 4th CPUs, there are Cryptographic Co-Processor devices with 3 different PCI device IDs, add them in the device list. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Tianjia Zhang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2904 --- drivers/crypto/ccp/sp-pci.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index dd7db55f9587..5185555a74a7 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -417,6 +417,12 @@ static const struct sev_vdata sevv2 = { .cmdbuff_addr_hi_reg = 0x109e4, /* C2PMSG_57 */ }; +static const struct sev_vdata csvv1 = { + .cmdresp_reg = 0x10580, + .cmdbuff_addr_lo_reg = 0x105e0, + .cmdbuff_addr_hi_reg = 0x105e4, +}; + static const struct tee_vdata teev1 = { .cmdresp_reg = 0x10544, /* C2PMSG_17 */ .cmdbuff_addr_lo_reg = 0x10548, /* C2PMSG_18 */ @@ -498,6 +504,13 @@ static const struct psp_vdata pspv6 = { .intsts_reg = 0x10514, /* P2CMSG_INTSTS */ }; +static const struct psp_vdata psp_csvv1 = { + .sev = &csvv1, + .feature_reg = 0x105fc, + .inten_reg = 0x10670, + .intsts_reg = 0x10674, +}; + #endif static const struct sp_dev_vdata dev_vdata[] = { @@ -562,6 +575,15 @@ static const struct sp_dev_vdata dev_vdata[] = { .bar = 2, #ifdef CONFIG_CRYPTO_DEV_SP_PSP .psp_vdata = &pspv6, +#endif + }, + { /* 9 */ + .bar = 2, +#ifdef CONFIG_CRYPTO_DEV_SP_CCP + .ccp_vdata = &ccpv5a, +#endif +#ifdef CONFIG_CRYPTO_DEV_SP_PSP + .psp_vdata = &psp_csvv1, #endif }, }; @@ -578,6 +600,9 @@ static const struct pci_device_id sp_pci_table[] = { { PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] }, { PCI_VDEVICE(HYGON, 0x1456), (kernel_ulong_t)&dev_vdata[1] }, { PCI_VDEVICE(HYGON, 0x1468), (kernel_ulong_t)&dev_vdata[2] }, + { PCI_VDEVICE(HYGON, 0x1486), (kernel_ulong_t)&dev_vdata[9] }, + { PCI_VDEVICE(HYGON, 0x14b8), (kernel_ulong_t)&dev_vdata[2] }, + { PCI_VDEVICE(HYGON, 0x14a6), (kernel_ulong_t)&dev_vdata[9] }, /* Last entry must be zero */ { 0, } }; -- Gitee From fa85e71ecf92ce756849dd16bda81d5b3a564bd8 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 14 Jul 2023 17:17:58 +0800 Subject: [PATCH 0265/2138] anolis: x86/mm: Print CSV info into the kernel log ANBZ: #8568 Add CSV and CSV2 to the list of memory encryption features. Also print CPU vendor while printing CSV infos. Signed-off-by: hanliyang Reviewed-by: Tianjia Zhang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2904 --- arch/x86/mm/mem_encrypt.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 9f27e14e185f..b97261dfd13d 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -39,6 +39,27 @@ bool force_dma_unencrypted(struct device *dev) return false; } +static void print_hygon_cc_feature_info(void) +{ + /* Secure Memory Encryption */ + if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { + /* + * HYGON SME is mutually exclusive with any of the + * HYGON CSV features below. + */ + pr_info(" HYGON SME"); + return; + } + + /* Secure Encrypted Virtualization */ + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) + pr_info(" HYGON CSV"); + + /* Encrypted Register State */ + if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) + pr_info(" HYGON CSV2"); +} + static void print_mem_encrypt_feature_info(void) { pr_info("Memory Encryption Features active:"); @@ -48,6 +69,11 @@ static void print_mem_encrypt_feature_info(void) return; } + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + print_hygon_cc_feature_info(); + return; + } + pr_cont(" AMD"); /* Secure Memory Encryption */ -- Gitee From 9f3c28d40de6e429b6e1bff2e554bd26fb44979a Mon Sep 17 00:00:00 2001 From: hanliyang Date: Mon, 17 Jul 2023 18:44:56 +0800 Subject: [PATCH 0266/2138] anolis: crypto: ccp: Print Hygon CSV API version when CSV support is detected ANBZ: #8568 The Cryptographic Co-Processor module will print 'SEV API' instead of 'CSV API' on Hygon CPU if CSV is supported. Fix this confused message here. Signed-off-by: hanliyang Reviewed-by: Tianjia Zhang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2904 --- drivers/crypto/ccp/sev-dev.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 36840e0b3be7..bd977b89e977 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -80,6 +80,13 @@ static void *sev_es_tmr; #define NV_LENGTH (32 * 1024) static void *sev_init_ex_buffer; +/* + * Hygon CSV build info: + * Hygon CSV build info is 32-bit in length other than 8-bit as that + * in AMD SEV. + */ +static u32 hygon_csv_build; + static inline bool sev_version_greater_or_equal(u8 maj, u8 min) { struct sev_device *sev = psp_master->sev_data; @@ -509,8 +516,12 @@ static int __sev_platform_init_locked(int *error) dev_dbg(sev->dev, "SEV firmware initialized\n"); - dev_info(sev->dev, "SEV API:%d.%d build:%d\n", sev->api_major, - sev->api_minor, sev->build); + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + dev_info(sev->dev, "CSV API:%d.%d build:%d\n", sev->api_major, + sev->api_minor, hygon_csv_build); + else + dev_info(sev->dev, "SEV API:%d.%d build:%d\n", sev->api_major, + sev->api_minor, sev->build); return 0; } @@ -732,6 +743,10 @@ static int sev_get_api_version(void) sev->build = status.build; sev->state = status.state; + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + hygon_csv_build = (status.flags >> 9) | + ((u32)status.build << 23); + return 0; } -- Gitee From ef4435e4e3120d3a2b249aeec23ea1878d82de41 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Mon, 17 Jul 2023 19:02:27 +0800 Subject: [PATCH 0267/2138] anolis: KVM: SVM: Print Hygon CSV support info if support is detected ANBZ: #8568 The KVM will print 'SEV supported' instead of 'CSV supported' on Hygon CPU if CSV is supported. Fix these confused messages here. Fix other 'SEV' messages in arch/x86/kvm/svm/svm.c. Signed-off-by: hanliyang Reviewed-by: Tianjia Zhang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2904 --- arch/x86/kvm/svm/sev.c | 6 ++++-- arch/x86/kvm/svm/svm.c | 5 ++++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 2c63a38e1a65..06a781d80a2f 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2290,13 +2290,15 @@ void __init sev_hardware_setup(void) out: if (boot_cpu_has(X86_FEATURE_SEV)) - pr_info("SEV %s (ASIDs %u - %u)\n", + pr_info("%s %s (ASIDs %u - %u)\n", + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? "CSV" : "SEV", sev_supported ? min_sev_asid <= max_sev_asid ? "enabled" : "unusable" : "disabled", min_sev_asid, max_sev_asid); if (boot_cpu_has(X86_FEATURE_SEV_ES)) - pr_info("SEV-ES %s (ASIDs %u - %u)\n", + pr_info("%s %s (ASIDs %u - %u)\n", + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? "CSV2" : "SEV-ES", sev_es_supported ? "enabled" : "disabled", min_sev_asid > 1 ? 1 : 0, min_sev_asid - 1); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 5bf447377539..20a7e6937c5f 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -547,7 +547,10 @@ static bool __kvm_is_svm_supported(void) } if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { - pr_info("KVM is unsupported when running as an SEV guest\n"); + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + pr_info("KVM is unsupported when running as an CSV guest\n"); + else + pr_info("KVM is unsupported when running as an SEV guest\n"); return false; } -- Gitee From 97f416fbb3077f46317dbdfa69ac4211808ca87c Mon Sep 17 00:00:00 2001 From: hanliyang Date: Mon, 31 Jul 2023 23:35:42 +0800 Subject: [PATCH 0268/2138] anolis: x86/cpu: Detect memory encryption features on Hygon CPUs ANBZ: #8568 Hygon SME is identified by CPUID 0x8000001f, but requires BIOS support to enable it (set bit 23 of MSR_AMD64_SYSCFG). Hygon CSV and CSV2 are identified by CPUID 0x8000001f, but requires BIOS support to enable it (set bit 23 of MSR_AMD64_SYSCFG and set bit 0 of MSR_K7_HWCR). Only show the SME, CSV, CSV2 features as available if reported by CPUID and enabled by BIOS. Signed-off-by: hanliyang Reviewed-by: Tianjia Zhang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2904 --- arch/x86/kernel/cpu/hygon.c | 46 +++++++++++++++++++++++++++++++++++++ arch/x86/kernel/cpu/proc.c | 10 ++++++-- 2 files changed, 54 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index b6f932d2d6aa..07a3a2863ae1 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -246,6 +246,50 @@ static void bsp_init_hygon(struct cpuinfo_x86 *c) resctrl_cpu_detect(c); } +static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) +{ + u64 msr; + u32 eax; + + eax = cpuid_eax(0x8000001f); + + /* Check whether SME or CSV is supported */ + if (!(eax & (BIT(0) | BIT(1)))) + return; + + /* If BIOS has not enabled SME then don't advertise the SME feature. */ + rdmsrl(MSR_AMD64_SYSCFG, msr); + if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT)) + goto clear_all; + + /* + * Always adjust physical address bits. Even though this will be a + * value above 32-bits this is still done for CONFIG_X86_32 so that + * accurate values are reported. + */ + c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f; + + /* Don't advertise SME and CSV features under CONFIG_X86_32. */ + if (IS_ENABLED(CONFIG_X86_32)) + goto clear_all; + + /* + * If BIOS has not enabled CSV then don't advertise the CSV and CSV2 + * feature. + */ + rdmsrl(MSR_K7_HWCR, msr); + if (!(msr & MSR_K7_HWCR_SMMLOCK)) + goto clear_csv; + + return; + +clear_all: + setup_clear_cpu_cap(X86_FEATURE_SME); +clear_csv: + setup_clear_cpu_cap(X86_FEATURE_SEV); + setup_clear_cpu_cap(X86_FEATURE_SEV_ES); +} + static void early_init_hygon(struct cpuinfo_x86 *c) { u32 dummy; @@ -294,6 +338,8 @@ static void early_init_hygon(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_VMMCALL); hygon_get_topology_early(c); + + early_detect_mem_encrypt(c); } static void init_hygon(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index 31c0e68f6227..a0f81db51eac 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c @@ -100,8 +100,14 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_puts(m, "flags\t\t:"); for (i = 0; i < 32*NCAPINTS; i++) - if (cpu_has(c, i) && x86_cap_flags[i] != NULL) - seq_printf(m, " %s", x86_cap_flags[i]); + if (cpu_has(c, i) && x86_cap_flags[i] != NULL) { + if (c->x86_vendor == X86_VENDOR_HYGON) + seq_printf(m, " %s", i == X86_FEATURE_SEV ? "csv" : + (i == X86_FEATURE_SEV_ES ? "csv2" : + x86_cap_flags[i])); + else + seq_printf(m, " %s", x86_cap_flags[i]); + } #ifdef CONFIG_X86_VMX_FEATURE_NAMES if (cpu_has(c, X86_FEATURE_VMX) && c->vmx_capability[0]) { -- Gitee From 93eccefe639e10eb662248b093136b477d760a09 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 4 Aug 2023 03:20:47 +0800 Subject: [PATCH 0269/2138] anolis: x86/cpufeatures: Add CPUID_8C86_0000_EDX CPUID leaf ANBZ: #8568 This is a pure feature bits leaf. Add SM3 and SM4 feature bits from this leaf on Hygon CPUs. Signed-off-by: hanliyang Reviewed-by: Tianjia Zhang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2904 [Fixes conflits: use cpufeatures' word 22] Signed-off-by: Qinyun Tan --- arch/x86/include/asm/cpufeature.h | 7 +++++-- arch/x86/include/asm/cpufeatures.h | 6 +++++- arch/x86/include/asm/disabled-features.h | 3 ++- arch/x86/include/asm/required-features.h | 3 ++- arch/x86/kernel/cpu/hygon.c | 13 +++++++++++++ 5 files changed, 27 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 3508f3fc928d..b59e37c4ecb4 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -34,6 +34,7 @@ enum cpuid_leafs CPUID_8000_001F_EAX, CPUID_8000_0021_EAX, CPUID_LNX_5, + CPUID_8C86_0000_EDX, NR_CPUID_WORDS, }; @@ -94,8 +95,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 19, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 20, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 21, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 22, feature_bit) || \ REQUIRED_MASK_CHECK || \ - BUILD_BUG_ON_ZERO(NCAPINTS != 22)) + BUILD_BUG_ON_ZERO(NCAPINTS != 23)) #define DISABLED_MASK_BIT_SET(feature_bit) \ ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \ @@ -120,8 +122,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 19, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 20, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 21, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 22, feature_bit) || \ DISABLED_MASK_CHECK || \ - BUILD_BUG_ON_ZERO(NCAPINTS != 22)) + BUILD_BUG_ON_ZERO(NCAPINTS != 23)) #define cpu_has(c, bit) \ (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 3e210b517953..12795c777a42 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -13,7 +13,7 @@ /* * Defines x86 CPU feature bits */ -#define NCAPINTS 22 /* N 32-bit words worth of info */ +#define NCAPINTS 23 /* N 32-bit words worth of info */ #define NBUGINTS 2 /* N 32-bit bug flags */ /* @@ -474,6 +474,10 @@ #define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */ #define X86_FEATURE_SRSO_NO (20*32+29) /* "" CPU is not affected by SRSO */ +/* HYGON-defined CPU features, CPUID level 0x8c860000:0 (EDX), word 22 */ +#define X86_FEATURE_SM3 (22*32 + 1) /* SM3 instructions */ +#define X86_FEATURE_SM4 (22*32 + 2) /* SM4 instructions */ + /* * Extended auxiliary flags: Linux defined - for features scattered in various * CPUID levels like 0x80000022, etc and Linux defined features. diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h index 88fcf08458d9..b108e656fa5b 100644 --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h @@ -144,6 +144,7 @@ #define DISABLED_MASK19 0 #define DISABLED_MASK20 0 #define DISABLED_MASK21 0 -#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22) +#define DISABLED_MASK22 0 +#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 23) #endif /* _ASM_X86_DISABLED_FEATURES_H */ diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h index e9187ddd3d1f..76953f757f3c 100644 --- a/arch/x86/include/asm/required-features.h +++ b/arch/x86/include/asm/required-features.h @@ -100,6 +100,7 @@ #define REQUIRED_MASK19 0 #define REQUIRED_MASK20 0 #define REQUIRED_MASK21 0 -#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22) +#define REQUIRED_MASK22 0 +#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 23) #endif /* _ASM_X86_REQUIRED_FEATURES_H */ diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index 07a3a2863ae1..340710ed89d0 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -246,6 +246,18 @@ static void bsp_init_hygon(struct cpuinfo_x86 *c) resctrl_cpu_detect(c); } +static void init_hygon_cap(struct cpuinfo_x86 *c) +{ + /* Test for Extended Feature Flags presence */ + if (cpuid_eax(0x8C860000) >= 0x8C860000) { + /* + * Store Extended Feature Flags of the CPU capability + * bit array + */ + c->x86_capability[CPUID_8C86_0000_EDX] = cpuid_edx(0x8C860000); + } +} + static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) { u64 msr; @@ -401,6 +413,7 @@ static void init_hygon(struct cpuinfo_x86 *c) /* Hygon CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */ clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE); + init_hygon_cap(c); } static void cpu_detect_tlb_hygon(struct cpuinfo_x86 *c) -- Gitee From c43901afc4b07c9be9b259967ac3c437a136469d Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 4 Aug 2023 03:54:15 +0800 Subject: [PATCH 0270/2138] anolis: x86/cpufeatures: Add CSV3 CPU feature ANBZ: #8568 Add CPU feature detection for Hygon 3rd CSV. This feature enhances CSV2 by also isolating NPT and VMCB, making them in-accessible to the hypervisor. Signed-off-by: hanliyang Reviewed-by: Tianjia Zhang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2904 --- arch/x86/include/asm/cpufeatures.h | 2 ++ arch/x86/kernel/cpu/hygon.c | 1 + 2 files changed, 3 insertions(+) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 12795c777a42..bebaa0a2af01 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -462,6 +462,8 @@ #define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* "" Virtual TSC_AUX */ #define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */ #define X86_FEATURE_DEBUG_SWAP (19*32+14) /* AMD SEV-ES full debug state swap support */ +/* HYGON 3rd CSV */ +#define X86_FEATURE_CSV3 (19*32 + 30) /* HYGON 3rd CSV */ /* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */ #define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* "" No Nested Data Breakpoints */ diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index 340710ed89d0..7b81626648f7 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -300,6 +300,7 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) clear_csv: setup_clear_cpu_cap(X86_FEATURE_SEV); setup_clear_cpu_cap(X86_FEATURE_SEV_ES); + setup_clear_cpu_cap(X86_FEATURE_CSV3); } static void early_init_hygon(struct cpuinfo_x86 *c) -- Gitee From 2c1d11cfcc6238ffdda8b81679ec252133f775f3 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Sun, 10 Mar 2024 14:58:10 +0800 Subject: [PATCH 0271/2138] anolis: x86/cpu/hygon: Clear SME feature flag when not in use ANBZ: #8568 The commit 08f253ec3767 ("x86/cpu: Clear SME feature flag when not in use") will clear SME feature flag if the kernel is not using it on AMD CPUs, this will help userspace to determine if SME is available and in use from /proc/cpuinfo. Apply this change to Hygon CPUs as well. Signed-off-by: hanliyang Reviewed-by: Tianjia Zhang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2904 --- arch/x86/kernel/cpu/hygon.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index 7b81626648f7..4b90eeb5110d 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -285,6 +285,10 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) if (IS_ENABLED(CONFIG_X86_32)) goto clear_all; + /* Clear the SME feature flag if the kernel is not using it. */ + if (!sme_me_mask) + setup_clear_cpu_cap(X86_FEATURE_SME); + /* * If BIOS has not enabled CSV then don't advertise the CSV and CSV2 * feature. -- Gitee From 2296b3f7230fb2c6bb89da8a96fdd957219ead68 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:04 +0800 Subject: [PATCH 0272/2138] anolis: sw64: add build infrastructure ANBZ: #4688 Add Kbuild, Makefile, Kconfig and link script for SW64 build infrastructure. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/Kbuild | 7 + arch/sw_64/Kconfig | 645 +++++++++++++++++++++++++++++ arch/sw_64/Kconfig.debug | 53 +++ arch/sw_64/Makefile | 69 +++ arch/sw_64/Makefile.postlink | 36 ++ arch/sw_64/boot/.gitignore | 2 + arch/sw_64/boot/Makefile | 29 ++ arch/sw_64/boot/dts/Makefile | 27 ++ arch/sw_64/include/asm/Kbuild | 16 + arch/sw_64/include/uapi/asm/Kbuild | 5 + arch/sw_64/kernel/.gitignore | 2 + arch/sw_64/kernel/Makefile | 51 +++ arch/sw_64/kernel/vmlinux.lds.S | 113 +++++ 13 files changed, 1055 insertions(+) create mode 100644 arch/sw_64/Kbuild create mode 100644 arch/sw_64/Kconfig create mode 100644 arch/sw_64/Kconfig.debug create mode 100644 arch/sw_64/Makefile create mode 100644 arch/sw_64/Makefile.postlink create mode 100644 arch/sw_64/boot/.gitignore create mode 100644 arch/sw_64/boot/Makefile create mode 100644 arch/sw_64/boot/dts/Makefile create mode 100644 arch/sw_64/include/asm/Kbuild create mode 100644 arch/sw_64/include/uapi/asm/Kbuild create mode 100644 arch/sw_64/kernel/.gitignore create mode 100644 arch/sw_64/kernel/Makefile create mode 100644 arch/sw_64/kernel/vmlinux.lds.S diff --git a/arch/sw_64/Kbuild b/arch/sw_64/Kbuild new file mode 100644 index 000000000000..aa0bf0507406 --- /dev/null +++ b/arch/sw_64/Kbuild @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-y += kernel/ mm/ platform/ +obj-$(CONFIG_NET) += net/ +obj-$(CONFIG_KVM) += kvm/ +obj-$(CONFIG_MATHEMU) += math-emu/ + +obj-$(CONFIG_BUILTIN_DTB) += boot/dts/ diff --git a/arch/sw_64/Kconfig b/arch/sw_64/Kconfig new file mode 100644 index 000000000000..0fd1be7195cc --- /dev/null +++ b/arch/sw_64/Kconfig @@ -0,0 +1,645 @@ +# SPDX-License-Identifier: GPL-2.0 +config SW64 + bool + default y + select ACPI + select ACPI_MCFG if (ACPI && PCI) + select ACPI_REDUCED_HARDWARE_ONLY + select ARCH_ATOMIC + select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI + select ARCH_HAS_ELF_RANDOMIZE + select ARCH_HAS_PHYS_TO_DMA + select ARCH_HAS_PMEM_API + select ARCH_HAS_PTE_DEVMAP + select ARCH_HAS_PTE_SPECIAL + select ARCH_HAS_SG_CHAIN + select ARCH_HAS_UACCESS_FLUSHCACHE + select ARCH_HAS_VM_GET_PAGE_PROT + select ARCH_HAS_ZONE_DEVICE + select ARCH_HAVE_NMI_SAFE_CMPXCHG + select ARCH_INLINE_READ_LOCK + select ARCH_INLINE_READ_LOCK_BH + select ARCH_INLINE_READ_LOCK_IRQ + select ARCH_INLINE_READ_LOCK_IRQSAVE + select ARCH_INLINE_READ_UNLOCK + select ARCH_INLINE_READ_UNLOCK_BH + select ARCH_INLINE_READ_UNLOCK_IRQ + select ARCH_INLINE_READ_UNLOCK_IRQRESTORE + select ARCH_INLINE_SPIN_LOCK + select ARCH_INLINE_SPIN_LOCK_BH + select ARCH_INLINE_SPIN_LOCK_IRQ + select ARCH_INLINE_SPIN_LOCK_IRQSAVE + select ARCH_INLINE_SPIN_TRYLOCK + select ARCH_INLINE_SPIN_TRYLOCK_BH + select ARCH_INLINE_SPIN_UNLOCK + select ARCH_INLINE_SPIN_UNLOCK_BH + select ARCH_INLINE_SPIN_UNLOCK_IRQ + select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE + select ARCH_INLINE_WRITE_LOCK + select ARCH_INLINE_WRITE_LOCK_BH + select ARCH_INLINE_WRITE_LOCK_IRQ + select ARCH_INLINE_WRITE_LOCK_IRQSAVE + select ARCH_INLINE_WRITE_UNLOCK + select ARCH_INLINE_WRITE_UNLOCK_BH + select ARCH_INLINE_WRITE_UNLOCK_IRQ + select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE + select ARCH_NO_PREEMPT + select ARCH_SUPPORTS_ACPI + select ARCH_SUPPORTS_ATOMIC_RMW + select ARCH_SUPPORTS_NUMA_BALANCING + select ARCH_SUPPORTS_UPROBES + select ARCH_USE_CMPXCHG_LOCKREF + select ARCH_USE_QUEUED_RWLOCKS + select ARCH_USE_QUEUED_SPINLOCKS + select ARCH_WANT_DEFAULT_BPF_JIT + select ARCH_WANT_FRAME_POINTERS + select ARCH_WANT_IPC_PARSE_VERSION + select AUDIT_ARCH + select COMMON_CLK + select DMA_OPS if PCI + select GENERIC_CLOCKEVENTS + select GENERIC_IRQ_LEGACY + select GENERIC_IRQ_MIGRATION if SMP + select GENERIC_IRQ_PROBE + select GENERIC_IRQ_SHOW + select GENERIC_PCI_IOMAP if PCI + select GENERIC_SMP_IDLE_THREAD + select GENERIC_STRNCPY_FROM_USER + select GENERIC_STRNLEN_USER + select GENERIC_TIME_VSYSCALL + select HANDLE_DOMAIN_IRQ + select HARDIRQS_SW_RESEND + select HAVE_ARCH_AUDITSYSCALL + select HAVE_ARCH_JUMP_LABEL + select HAVE_ARCH_KGDB + select HAVE_ARCH_SECCOMP_FILTER + select HAVE_ARCH_TRACEHOOK + select HAVE_ARCH_TRANSPARENT_HUGEPAGE + select HAVE_ASM_MODVERSIONS + select HAVE_C_RECORDMCOUNT + select HAVE_DEBUG_BUGVERBOSE + select HAVE_DYNAMIC_FTRACE + select HAVE_DYNAMIC_FTRACE_WITH_REGS + select HAVE_EBPF_JIT + select HAVE_FAST_GUP + select HAVE_FTRACE_MCOUNT_RECORD + select HAVE_FUNCTION_GRAPH_TRACER + select HAVE_FUNCTION_TRACER + select HAVE_IDE + select HAVE_KPROBES + select HAVE_KPROBES_ON_FTRACE + select HAVE_KRETPROBES + select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS + select HAVE_MEMBLOCK + select HAVE_MEMBLOCK_NODE_MAP + select HAVE_MOD_ARCH_SPECIFIC + select HAVE_PCI + select HAVE_PCSPKR_PLATFORM + select HAVE_PERF_EVENTS + select HAVE_PERF_REGS + select HAVE_PERF_USER_STACK_DUMP + select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_RELIABLE_STACKTRACE if STACKTRACE + select HAVE_RSEQ + select HAVE_SYSCALL_TRACEPOINTS + select IRQ_FORCED_THREADING + select LOCK_MM_AND_FIND_VMA + select MEMORY_HOTPLUG_SPARSE if MEMORY_HOTPLUG + select MODULES_USE_ELF_RELA + select NO_BOOTMEM + select OF_EARLY_FLATTREE if OF + select OLD_SIGSUSPEND + select PCI_DOMAINS_GENERIC if PCI + select PCI_ECAM if (ACPI && PCI) + select PCI_MSI_ARCH_FALLBACKS if PCI_MSI + select PCI_SW64 if PCI + select SET_FS + select SPARSEMEM_EXTREME if SPARSEMEM + select SW64_IRQ_CPU + select SW64_IRQ_MSI if PCI_MSI + select SW64_IRQ_MSI_VT if PCI_MSI + select SW64_TIMER + select SWIOTLB + select THREAD_INFO_IN_TASK + +config LOCKDEP_SUPPORT + def_bool y + +config 64BIT + def_bool y + +config MMU + bool + default y + +config PGTABLE_LEVELS + int + default 4 + +config ARCH_SUPPORTS_HUGETLBFS + def_bool y + +config ARCH_ENABLE_MEMORY_HOTPLUG + bool + default y + +config ARCH_ENABLE_MEMORY_HOTREMOVE + bool + default y + +config ARCH_HAS_ILOG2_U32 + bool + default n + +config ARCH_HAS_ILOG2_U64 + bool + default n + +config GENERIC_GPIO + bool + +config GENERIC_CALIBRATE_DELAY + bool + default y + +config ZONE_DMA32 + bool + default y + +config NEED_DMA_MAP_STATE + def_bool y + +config NEED_SG_DMA_LENGTH + def_bool y + +config ARCH_WANT_HUGE_PMD_SHARE + def_bool y + +config GENERIC_ISA_DMA + bool + default y + +config NONCACHE_PAGE + bool + depends on SW64 + default y + +config AUDIT_ARCH + bool + +config SYS_HAS_EARLY_PRINTK + bool + +config HAVE_CSRRW + bool + +menu "System setup" + +menu "Machine Configuration" + +choice + prompt "Subarchitecture Configuration" + +config SUBARCH_C3B + bool "C3B" + +config SUBARCH_C4 + bool "C4" + select HAVE_CSRRW + select GENERIC_SCHED_CLOCK +endchoice + +choice + prompt "Uncore Configuration" + +config UNCORE_XUELANG + bool "Uncore for C3B" + depends on SUBARCH_C3B + help + Sunway cpu uncore for C3B + +config UNCORE_JUNZHANG + bool "Uncore for C4" + depends on SUBARCH_C4 + help + Sunway cpu uncore for C4 +endchoice + +choice + prompt "Platform Type" + +config PLATFORM_XUELANG + bool "Xuelang" + depends on UNCORE_XUELANG + select SPARSE_IRQ + select SYS_HAS_EARLY_PRINTK + select SW64_INTC_V2 + select I2C_SUNWAY if I2C + help + Sunway board chipset for C3B + +config PLATFORM_JUNZHANG + bool "JunZhang" + depends on UNCORE_JUNZHANG + select SPARSE_IRQ + select SYS_HAS_EARLY_PRINTK + help + Sunway board chipset for C4 + +endchoice + +config MIGHT_HAVE_PC_SERIO + bool "Use PC serio device i8042" + select ARCH_MIGHT_HAVE_PC_SERIO + default n + +endmenu + +menu "CPU Power Management" +source "drivers/cpufreq/Kconfig" + +config SW64_CPUAUTOPLUG + bool "sw64 CPU Autoplug interface" + depends on SW64_CPUFREQ + default y + help + Turns on the interface for SW64_CPU CPUAUTOPLUG. + +endmenu +# clear all implied options (don't want default values for those): +# Most of these machines have ISA slots; not exactly sure which don't, +# and this doesn't activate hordes of code, so do it always. +config ISA + bool + default y + help + Find out whether you have ISA slots on your motherboard. ISA is the + name of a bus system, i.e. the way the CPU talks to the other stuff + inside your box. Other bus systems are PCI, EISA, MicroChannel + (MCA) or VESA. ISA is an older system, now being displaced by PCI; + newer boards don't support it. If you have ISA, say Y, otherwise N. + +config ISA_DMA_API + bool + default y + +config PCI_DOMAINS + def_bool PCI + +config PCI_DOMAINS_GENERIC + def_bool PCI + +config PCI_SYSCALL + def_bool PCI + +config IOMMU_HELPER + def_bool PCI + +config PHYSICAL_START + hex "Physical address where the kernel starts" + default "0x900000" + help + This gives the physical address where the kernel starts, and it + is 0x10000 before _text. If you plan to use kernel for capturing + the crash dump change this value to start of the reserved region + (the "X" value as specified in the "crashkernel=YM@XM" command + line boot parameter passed to the panic-ed kernel). + +config KEXEC + bool "Kexec system call (EXPERIMENTAL)" + select KEXEC_CORE + help + kexec is a system call that implements the ability to shutdown your + current kernel, and to start another kernel. It is like a reboot + but it is independent of the system firmware. And like a reboot + you can start any kernel with it, not just Linux. + + The name comes from the similarity to the exec system call. + + It is an ongoing process to be certain the hardware in a machine + is properly shutdown, so do not be surprised if this code does not + initially work for you. As of this writing the exact hardware + interface is strongly in flux, so no good recommendation can be + made. + +config CRASH_DUMP + bool "Kernel crash dumps (EXPERIMENTAL)" + help + Generate crash dump after being started by kexec. + This should be normally only set in special crash dump kernels + which are loaded in the main kernel with kexec-tools into + a specially reserved region and then later executed after + a crash by kdump/kexec. The crash dump kernel must be compiled + to a memory address not used by the main kernel or firmware using + PHYSICAL_START. + +config SECCOMP + def_bool y + prompt "Enable seccomp to safely compute untrusted bytecode" + help + This kernel feature is useful for number crunching applications + that may need to compute untrusted bytecode during their + execution. By using pipes or other transports made available to + the process as file descriptors supporting the read/write + syscalls, it's possible to isolate those applications in + their own address space using seccomp. Once seccomp is + enabled via prctl(PR_SET_SECCOMP), it cannot be disabled + and the task is only allowed to execute a few safe syscalls + defined by each seccomp mode. + + If unsure, say Y. Only embedded should say N here. + +config GENERIC_HWEIGHT + bool + default y + +config SMP + bool "Symmetric multi-processing support" + depends on SW64 + select USE_GENERIC_SMP_HELPERS + help + This enables support for systems with more than one CPU. If you have + a system with only one CPU, like most personal computers, say N. If + you have a system with more than one CPU, say Y. + + If you say N here, the kernel will run on single and multiprocessor + machines, but will use only one CPU of a multiprocessor machine. If + you say Y here, the kernel will run on many, but not all, + singleprocessor machines. On a singleprocessor machine, the kernel + will run faster if you say N here. + + See also the SMP-HOWTO available at + . + + If you don't know what to do here, say N. + +config ARCH_PROC_KCORE_TEXT + def_bool y + +config HAVE_DEC_LOCK + bool "Use arch-specified dec_and_lock" + depends on SMP && !NUMA + default y + +config TRACE_IRQFLAGS_SUPPORT + def_bool y + +config ARCH_SUPPORTS_UPROBES + def_bool y + +config SCHED_SMT + bool "SMT scheduler support" + depends on SMP && SUBARCH_C4 + help + Improves the CPU scheduler's decision making when dealing with + MultiThreading at a cost of slightly increased overhead in some + places. If unsure say N here. + +config NR_CPUS + int "Maximum number of CPUs (2-256)" + range 2 256 + depends on SMP + default "64" if UNCORE_XUELANG + help + SW6 support can handle a maximum of 256 CPUs. + +config HOTPLUG_CPU + bool "Support for hot-pluggable CPUs" + depends on SMP + help + Say Y here to allow turning CPUs off and on. CPUs can be + controlled through /sys/devices/system/cpu. + ( Note: power management support will enable this option + automatically on SMP systems. ) + Say N if you want to disable CPU hotplug. + +config ARCH_SPARSEMEM_ENABLE + bool "Sparse Memory Support" + depends on SMP + select SPARSEMEM_VMEMMAP_ENABLE + +source "kernel/livepatch/Kconfig" + +config NUMA + bool "NUMA Support" + depends on SMP && !FLATMEM + select ACPI_NUMA if ACPI + help + Say Y to compile the kernel to support NUMA (Non-Uniform Memory + Access). This option is for configuring high-end multiprocessor + server machines. If in doubt, say N. + +config USE_PERCPU_NUMA_NODE_ID + def_bool y + depends on NUMA + +config NODES_SHIFT + int + default "7" + depends on NUMA + +config RELOCATABLE + bool "Relocatable kernel" + help + This builds a kernel image that retains relocation information + so it can be loaded someplace besides the default 1MB. + The relocations make the kernel binary about 15% larger, + but are discarded at runtime + +config RELOCATION_TABLE_SIZE + hex "Relocation table size" + depends on RELOCATABLE + range 0x0 0x01000000 + default "0x80000" + help + A table of relocation data will be appended to the kernel binary + and parsed at boot to fix up the relocated kernel. + + This option allows the amount of space reserved for the table to be + adjusted, although the default of 1Mb should be ok in most cases. + + The build will fail and a valid size suggested if this is too small. + + If unsure, leave at the default value. + +config RANDOMIZE_BASE + bool "Randomize the address of the kernel image" + depends on RELOCATABLE + help + Randomizes the physical and virtual address at which the + kernel image is loaded, as a security feature that + deters exploit attempts relying on knowledge of the location + of kernel internals. + + Entropy is generated using any coprocessor 0 registers available. + + The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET. + + If unsure, say N. + +config RANDOMIZE_BASE_MAX_OFFSET + hex "Maximum kASLR offset" if EXPERT + depends on RANDOMIZE_BASE + range 0x0 0x20000000 + default "0x10000000" + help + When kASLR is active, this provides the maximum offset that will + be applied to the kernel image. It should be set according to the + amount of physical RAM available in the target system minus + PHYSICAL_START and must be a power of 2. + + This is limited by the size of KTEXT space, 512Mb. The default is 256MB. + +config HZ + int "HZ of the short timer" + default 500 + +source "drivers/eisa/Kconfig" + +source "drivers/pcmcia/Kconfig" + +source "fs/Kconfig.binfmt" + +source "arch/sw_64/lib/Kconfig" + +endmenu + +menu "Boot options" + +config USE_OF + bool "Flattened Device Tree support" + select OF + select IRQ_DOMAIN + help + Include support for flattened device tree machine descriptions. + +config BUILTIN_DTB + bool "Embed DTB in kernel image" + depends on OF + default n + help + Embeds a device tree binary in the kernel image. + +config BUILTIN_DTB_NAME + string "Built in DTB" + depends on BUILTIN_DTB + help + Set the name of the DTB to embed, leave blank to pick one + automatically based on kernel configuration. + +config EFI + bool "UEFI runtime support" + select UCS2_STRING + select EFI_RUNTIME_WRAPPERS + default y + help + This option provides support for runtime services provided + by UEFI firmware (such as non-volatile variables, realtime + clock, and platform reset). A UEFI stub is also provided to + allow the kernel to be booted as an EFI application. This + is only useful on systems that have UEFI firmware. + +config DMI + bool "Enable support for SMBIOS (DMI) tables" + depends on EFI + default y + help + This enables SMBIOS/DMI feature for systems. + + This option is only useful on systems that have UEFI firmware. + However, even with this option, the resultant kernel should + continue to boot on existing non-UEFI platforms. + + NOTE: This does *NOT* enable or encourage the use of DMI quirks, + i.e., the practice of identifying the platform via DMI to + decide whether certain workarounds for buggy hardware and/or + firmware need to be enabled. This would require the DMI subsystem + to be enabled much earlier than we do on ARM, which is non-trivial. + +config CMDLINE_BOOL + bool "Built-in kernel command line" + help + Allow for specifying boot arguments to the kernel at + build time. On some systems (e.g. embedded ones), it is + necessary or convenient to provide some or all of the + kernel boot arguments with the kernel itself (that is, + to not rely on the boot loader to provide them.) + + To compile command line arguments into the kernel, + set this option to 'Y', then fill in the + boot arguments in CONFIG_CMDLINE. + + Systems with fully functional boot loaders (i.e. non-embedded) + should leave this option set to 'N'. + +config CMDLINE + string "Built-in kernel command string" + depends on CMDLINE_BOOL + default "" + help + Enter arguments here that should be compiled into the kernel + image and used at boot time. If the boot loader provides a + command line at boot time, it is appended to this string to + form the full kernel command line, when the system boots. + + However, you can use the CONFIG_CMDLINE_OVERRIDE option to + change this behavior. + + In most cases, the command line (whether built-in or provided + by the boot loader) should specify the device for the root + file system. + +config CMDLINE_OVERRIDE + bool "Built-in command line overrides boot loader arguments" + depends on CMDLINE_BOOL + help + Set this option to 'Y' to have the kernel ignore the boot loader + command line, and use ONLY the built-in command line. + + This is used to work around broken boot loaders. This should + be set to 'N' under normal conditions. + +config FORCE_MAX_ZONEORDER + int + default "16" if (HUGETLB_PAGE) + default "11" + help + The kernel memory allocator divides physically contiguous memory + blocks into "zones", where each zone is a power of two number of + pages. This option selects the largest power of two that the kernel + keeps in the memory allocator. If you need to allocate very large + blocks of physically contiguous memory, then you may need to + increase this value. + + This config option is actually maximum order plus one. For example, + a value of 11 means that the largest free memory block is 2^10 pages. + + We make sure that we can allocate up to a HugePage size for each configuration. + Hence we have : + MAX_ORDER = (PMD_SHIFT - PAGE_SHIFT) + 1 => PAGE_SHIFT - 2 + +endmenu + +source "drivers/firmware/Kconfig" + +menu "Power management options" + +source "kernel/power/Kconfig" + +source "drivers/acpi/Kconfig" + +config ARCH_SUSPEND_POSSIBLE + depends on SW64 + def_bool y + +config ARCH_HIBERNATION_POSSIBLE + depends on SW64 + def_bool y + +source "drivers/cpuidle/Kconfig" + +source "drivers/idle/Kconfig" + +endmenu + +source "arch/sw_64/kvm/Kconfig" diff --git a/arch/sw_64/Kconfig.debug b/arch/sw_64/Kconfig.debug new file mode 100644 index 000000000000..6cb3c2488b36 --- /dev/null +++ b/arch/sw_64/Kconfig.debug @@ -0,0 +1,53 @@ +# SPDX-License-Identifier: GPL-2.0 +config EARLY_PRINTK + bool "Early printk" if EXPERT + depends on SYS_HAS_EARLY_PRINTK + default y + help + This option enables special console drivers which allow the kernel + to print messages very early in the bootup process. + + This is useful for kernel debugging when your machine crashes very + early before the console code is initialized. For normal operation, + it is not recommended because it looks ugly on some machines and + doesn't cooperate with an X server. You should normally say N here, + unless you want to debug such a crash. + +config UNA_PRINT + bool "Show debug info about user unalign memory access" + default n + +config MATHEMU + tristate "Kernel FP software completion" if DEBUG_KERNEL && !SMP + default y if !DEBUG_KERNEL || SMP + help + This option is required for IEEE compliant floating point arithmetic + on the SW. The only time you would ever not say Y is to say M in + order to debug the code. Say Y unless you know what you are doing. + +config STACKTRACE_SUPPORT + bool + default y + +config SW64_RRU + bool "Enable RRU(Remote Read User)" + depends on SW64 + default n + help + Duplicate user stdout and stderr to specific space. + Do not enable it in a production kernel. + +config SW64_RRK + bool "Enable RRK(Remote Read Kernel)" + depends on SW64 + default y + help + Duplicate kernel log to specific space. + Do not enable it in a production kernel. + +config DEBUG_MATCH + bool "instruction-flow and data-flow match debugfs interface" + depends on DEBUG_FS + default n + help + Turns on the DebugFS interface for instruction-flow and data-flow match. diff --git a/arch/sw_64/Makefile b/arch/sw_64/Makefile new file mode 100644 index 000000000000..84f0dca5e9f7 --- /dev/null +++ b/arch/sw_64/Makefile @@ -0,0 +1,69 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# sw/Makefile +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 1994 by Linus Torvalds +# + + +archscripts: scripts_basic + $(Q)$(MAKE) $(build)=arch/sw_64/tools relocs + +archheaders: + $(Q)$(MAKE) $(build)=arch/sw_64/kernel/syscalls all + +NM := $(NM) -B +CCVERSION := $(shell $(CC) -dumpversion) +LDFLAGS_vmlinux := -static -N #-relax +CHECKFLAGS += -D__sw__ + +ifeq ($(CONFIG_RELOCATABLE),y) +LDFLAGS_vmlinux += --emit-relocs +endif + +CHECKFLAGS += -D__sw__ +cflags-y := -pipe -ffixed-8 -mno-fp-regs #-msmall-data +ifeq ($(CONFIG_SUBARCH_C4),y) + cflags-y += -fsw-rev +endif +cflags-y += $(call cc-option, -fno-jump-tables) + +cflags-y += $(cpuflags-y) + +KBUILD_CFLAGS += $(cflags-y) +KBUILD_DEFCONFIG = xuelang_defconfig + +head-y := arch/sw_64/kernel/head.o + +core-y += arch/sw_64/ +drivers-$(CONFIG_PCI) += arch/sw_64/pci/ +libs-y += arch/sw_64/lib/ + +# export what is needed by arch/sw_64/boot/Makefile +LIBS_Y := $(patsubst %/, %/lib.a, $(libs-y)) +export LIBS_Y + +boot := arch/sw_64/boot + +#Default target when executing make with no arguments +all: $(boot)/vmlinux.bin.gz + +$(boot)/vmlinux.bin.gz: vmlinux + $(Q)$(MAKE) $(build)=$(boot) $@ + +bootimage bootpfile bootpzfile: vmlinux + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + +archclean: + $(Q)$(MAKE) $(clean)=$(boot) + $(Q)$(MAKE) $(clean)=arch/sw_64/tools + +KBUILD_IMAGE := $(boot)/vmlinux.bin + +define archhelp + echo '* boot - Compressed kernel image (arch/sw_64/boot/vmlinux.bin.gz)' +endef diff --git a/arch/sw_64/Makefile.postlink b/arch/sw_64/Makefile.postlink new file mode 100644 index 000000000000..248844d141dd --- /dev/null +++ b/arch/sw_64/Makefile.postlink @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: GPL-2.0 +# =========================================================================== +# Post-link SW64 pass +# =========================================================================== +# +# 1. Insert relocations into vmlinux + +PHONY := __archpost +__archpost: + +-include include/config/auto.conf +include scripts/Kbuild.include + +CMD_RELOCS = arch/sw_64/tools/relocs +quiet_cmd_relocs = RELOCS $@ + cmd_relocs = $(CMD_RELOCS) $@ + +# `@true` prevents complaint when there is nothing to be done + +vmlinux: FORCE + @true +ifeq ($(CONFIG_RELOCATABLE),y) + $(call if_changed,relocs) +endif + +%.ko: FORCE + @true + +clean: + @true + +PHONY += FORCE clean + +FORCE: + +.PHONY: $(PHONY) diff --git a/arch/sw_64/boot/.gitignore b/arch/sw_64/boot/.gitignore new file mode 100644 index 000000000000..8a90e24c76ab --- /dev/null +++ b/arch/sw_64/boot/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +vmlinux diff --git a/arch/sw_64/boot/Makefile b/arch/sw_64/boot/Makefile new file mode 100644 index 000000000000..dd0976484649 --- /dev/null +++ b/arch/sw_64/boot/Makefile @@ -0,0 +1,29 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# arch/sw_64/boot/Makefile +# +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Based on arch/arm64/boot/Makefile. +# + +OBJCOPYFLAGS_vmlinux.bin := -O binary + +targets := vmlinux vmlinux.bin vmlinux.bin.gz + +quiet_cmd_strip = STRIP $@ + cmd_strip = $(STRIP) -o $@ $< + +# Compressed kernel image +$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE + $(call if_changed,gzip) + @echo ' Kernel $@ is ready' + +$(obj)/vmlinux: vmlinux FORCE + $(call if_changed,strip) + +$(obj)/vmlinux.bin: $(obj)/vmlinux FORCE + $(call if_changed,objcopy) diff --git a/arch/sw_64/boot/dts/Makefile b/arch/sw_64/boot/dts/Makefile new file mode 100644 index 000000000000..e32c159cab64 --- /dev/null +++ b/arch/sw_64/boot/dts/Makefile @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: GPL-2.0 +# Built-in dtb + +ifeq ($(CONFIG_PLATFORM_XUELANG),y) +builtindtb-y := chip3 +endif + +ifeq ($(CONFIG_PLATFORM_JUNZHANG),y) +builtindtb-y := empty +endif + +ifeq ($(CONFIG_BUILTIN_DTB), y) +ifneq ($(CONFIG_BUILTIN_DTB_NAME),"") + builtindtb-y := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_NAME)) +endif + +obj-y += $(builtindtb-y).dtb.o +dtb-y := $(builtindtb-y).dtb + +# for CONFIG_OF_ALL_DTBS test +dtstree := $(srctree)/$(src) +dtb- := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts)) +else +dtb-y := $(builtindtb-y).dtb +endif + +clean-files := *.dtb *.dtb.S diff --git a/arch/sw_64/include/asm/Kbuild b/arch/sw_64/include/asm/Kbuild new file mode 100644 index 000000000000..0dd0a704d8f1 --- /dev/null +++ b/arch/sw_64/include/asm/Kbuild @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0 + +generic-y += clkdev.h +generic-y += export.h +generic-y += kvm_types.h +generic-y += mcs_spinlock.h +generic-y += param.h +generic-y += qrwlock.h +generic-y += qspinlock.h +generic-y += rwsem.h +generic-y += seccomp.h +generic-y += segment.h +generic-y += types.h +generic-y += user.h + +generated-y += syscall_table.h diff --git a/arch/sw_64/include/uapi/asm/Kbuild b/arch/sw_64/include/uapi/asm/Kbuild new file mode 100644 index 000000000000..15700040f138 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/Kbuild @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 +# UAPI Header export list + +generic-y += kvm_para.h +generated-y += unistd_64.h diff --git a/arch/sw_64/kernel/.gitignore b/arch/sw_64/kernel/.gitignore new file mode 100644 index 000000000000..46c9537c5551 --- /dev/null +++ b/arch/sw_64/kernel/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +vmlinux.lds diff --git a/arch/sw_64/kernel/Makefile b/arch/sw_64/kernel/Makefile new file mode 100644 index 000000000000..abf27ad19a94 --- /dev/null +++ b/arch/sw_64/kernel/Makefile @@ -0,0 +1,51 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the linux kernel. +# + +extra-y := vmlinux.lds +asflags-y := $(KBUILD_CFLAGS) +ccflags-y := -Wno-sign-compare + +ifdef CONFIG_FTRACE +CFLAGS_REMOVE_ftrace.o = -pg +CFLAGS_REMOVE_insn.o = -pg +CFLAGS_REMOVE_printk.o = -pg +endif + +obj-y := entry.o fpu.o traps.o process.o sys_sw64.o irq.o \ + irq_sw64.o signal.o setup.o ptrace.o time.o \ + systbls.o dup_print.o chip_setup.o \ + insn.o early_init.o topology.o cacheinfo.o \ + vdso.o vdso/ hmcall.o stacktrace.o idle.o reset.o \ + head.o termios.o + +obj-$(CONFIG_SUBARCH_C3B) += tc.o +obj-$(CONFIG_ACPI) += acpi.o +obj-$(CONFIG_SMP) += smp.o +obj-$(CONFIG_MODULES) += module.o +obj-$(CONFIG_PM) += pm.o +obj-$(CONFIG_SUSPEND) += suspend_asm.o suspend.o +obj-$(CONFIG_PERF_EVENTS) += perf_event.o +obj-$(CONFIG_HIBERNATION) += hibernate_asm.o hibernate.o +obj-$(CONFIG_AUDIT) += audit.o +obj-$(CONFIG_RELOCATABLE) += relocate.o +obj-$(CONFIG_DEBUG_FS) += segvdbg.o unaligned.o +obj-$(CONFIG_JUMP_LABEL) += jump_label.o +obj-$(CONFIG_DEBUG_MATCH) += match.o + +ifndef CONFIG_PCI +obj-y += pci-noop.o +endif + +# Core logic support +obj-$(CONFIG_SW64_CPUAUTOPLUG) += cpuautoplug.o + +obj-$(CONFIG_CRASH_DUMP) += crash_dump.o +obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o +obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o +obj-$(CONFIG_KPROBES) += kprobes/ +obj-$(CONFIG_UPROBES) += uprobes.o +obj-$(CONFIG_EARLY_PRINTK) += early_printk.o +obj-$(CONFIG_KGDB) += kgdb.o +obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o diff --git a/arch/sw_64/kernel/vmlinux.lds.S b/arch/sw_64/kernel/vmlinux.lds.S new file mode 100644 index 000000000000..9b81b2c7afb8 --- /dev/null +++ b/arch/sw_64/kernel/vmlinux.lds.S @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#define RUNTIME_DISCARD_EXIT +#define EMITS_PT_NOTE +#define RO_EXCEPTION_TABLE_ALIGN 16 + +#include +#include +#include +#include +#include + +OUTPUT_FORMAT("elf64-sw_64") +OUTPUT_ARCH(sw_64) +ENTRY(__start) +PHDRS { text PT_LOAD; note PT_NOTE; } +jiffies = jiffies_64; +SECTIONS +{ + . = _TEXT_START; + + __start = .; + _text = .; /* Text and read-only data */ + _stext = .; + .text : { + HEAD_TEXT + TEXT_TEXT + SCHED_TEXT + LOCK_TEXT + IRQENTRY_TEXT + SOFTIRQENTRY_TEXT + KPROBES_TEXT + *(.fixup) + *(.gnu.warning) + } :text + _etext = .; /* End of text section */ + + RO_DATA(PAGE_SIZE) + + /* Will be freed after init */ + __init_begin = ALIGN(PAGE_SIZE); + INIT_TEXT_SECTION(PAGE_SIZE) + INIT_DATA_SECTION(16) + /* we have to discard exit text and such at runtime, not link time */ + .exit.text : + { + EXIT_TEXT + } + .exit.data : + { + EXIT_DATA + } + PERCPU_SECTION(L1_CACHE_BYTES) + + /* + * Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page + * needed for the THREAD_SIZE aligned init_task gets freed after init + */ + . = ALIGN(THREAD_SIZE); + __init_end = .; + /* Freed after init ends here */ + + _sdata = .; /* Start of rw data section */ + _data = .; + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + .got : { +#ifdef CONFIG_RELOCATABLE + _got_start = .; +#endif + *(.got) +#ifdef CONFIG_RELOCATABLE + _got_end = .; +#endif + } + .sdata : { + *(.sdata) + } + _edata = .; /* End of data section */ + +#ifdef CONFIG_RELOCATABLE + _. = ALIGN(4); + .data.reloc : { + _relocation_start = .; + /* + * Space for relocation table + * This needs to be filled so that the + * relocs tool can overwrite the content. + * An invalid value is left at the start of the + * section to abort relocation if the table + * has not been filled in. + */ + LONG(0xFFFFFFFF); + FILL(0); + . += CONFIG_RELOCATION_TABLE_SIZE - 4; + _relocation_end = .; + } +#endif + BSS_SECTION(0, 0, 0) + _end = .; + + .mdebug 0 : { + *(.mdebug) + } + .note 0 : { + *(.note) + } + + STABS_DEBUG + DWARF_DEBUG + ELF_DETAILS + + DISCARDS +} -- Gitee From b76d7100fedea7aae06b7ccb33c1e25365def823 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:05 +0800 Subject: [PATCH 0273/2138] anolis: sw64: add CPU definition headers ANBZ: #4688 Add common headers (CPU definition) for basic SW64 support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/core.h | 86 ++++++++++++++++++++++++ arch/sw_64/include/asm/cpu.h | 5 ++ arch/sw_64/include/asm/csr.h | 97 ++++++++++++++++++++++++++++ arch/sw_64/include/uapi/asm/regdef.h | 45 +++++++++++++ 4 files changed, 233 insertions(+) create mode 100644 arch/sw_64/include/asm/core.h create mode 100644 arch/sw_64/include/asm/cpu.h create mode 100644 arch/sw_64/include/asm/csr.h create mode 100644 arch/sw_64/include/uapi/asm/regdef.h diff --git a/arch/sw_64/include/asm/core.h b/arch/sw_64/include/asm/core.h new file mode 100644 index 000000000000..2b6748cec93d --- /dev/null +++ b/arch/sw_64/include/asm/core.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_CORE_H +#define _ASM_SW64_CORE_H + +#include + +#define II_II0 0 +#define II_II1 1 +#define II_SLEEP 2 +#define II_WAKE 3 +#define II_NMII 6 + +#define II_RESET II_NMII + +#if defined(CONFIG_SUBARCH_C3B) + +#define DOMAIN_ID_BITS 2 +#define DOMAIN_ID_SHIFT 5 + +#define THREAD_ID_BITS 1 +#define THREAD_ID_SHIFT 31 + +#define CORE_ID_BITS 5 +#define CORE_ID_SHIFT 0 + +static inline bool core_is_ht(void) +{ + return 0; +} + +#elif defined(CONFIG_SUBARCH_C4) + +#define DOMAIN_ID_BITS 2 +#define DOMAIN_ID_SHIFT 12 + +#define THREAD_ID_BITS 1 +#define THREAD_ID_SHIFT 8 + +#define CORE_ID_BITS 6 +#define CORE_ID_SHIFT 0 + +static inline bool core_is_ht(void) +{ + return rdhtctl() == 0x3; +} + +#endif + +#define DOMAIN_ID_MASK (GENMASK(DOMAIN_ID_BITS - 1, 0) << DOMAIN_ID_SHIFT) +#define THREAD_ID_MASK (GENMASK(THREAD_ID_BITS - 1, 0) << THREAD_ID_SHIFT) +#define CORE_ID_MASK (GENMASK(CORE_ID_BITS - 1, 0) << CORE_ID_SHIFT) +#define MAX_CORES_PER_CPU (1 << CORE_ID_BITS) + +/* + * 0x00 ~ 0xff for hardware mm fault + */ + +#define MMCSR__TNV 0x0 +#define MMCSR__IACV 0x1 +#define MMCSR__FOR 0x2 +#define MMCSR__FOE 0x3 +#define MMCSR__FOW 0x4 + +#define MMCSR__BAD_DVA 0x6 +#define MMCSR__ACV1 0x7 +#define MMCSR__ACV0 0xc +#define MMCSR__BAD_IVA 0xf + +/* 0x100 ~ 0x1ff for match debug */ +#define MMCSR__DA_MATCH 0x100 +#define MMCSR__DV_MATCH 0x101 +#define MMCSR__DAV_MATCH 0x102 +#define MMCSR__IA_MATCH 0x103 +#define MMCSR__IDA_MATCH 0x104 +#define MMCSR__IV_MATCH 0x105 + + /* entry.S */ +extern void entArith(void); +extern void entIF(void); +extern void entInt(void); +extern void entMM(void); +extern void entSys(void); +extern void entUna(void); +/* head.S */ +extern void __smp_callin(unsigned long args); +#endif /* _ASM_SW64_CORE_H */ diff --git a/arch/sw_64/include/asm/cpu.h b/arch/sw_64/include/asm/cpu.h new file mode 100644 index 000000000000..4da30bb91d89 --- /dev/null +++ b/arch/sw_64/include/asm/cpu.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_CPU_H +#define _ASM_SW64_CPU_H + +#endif /* _ASM_SW64_CPU_H */ diff --git a/arch/sw_64/include/asm/csr.h b/arch/sw_64/include/asm/csr.h new file mode 100644 index 000000000000..0610384208a4 --- /dev/null +++ b/arch/sw_64/include/asm/csr.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_CSR_H +#define _ASM_SW64_CSR_H + +#include + +#define CSR_EXC_SUM 0xd +#define CSR_INT_EN 0x1a +#define CSR_INT_STAT 0x1b +#define CSR_PCIE_MSI0_INT 0x1d +#define CSR_PCIE_MSI1_INT 0x1e +#define CSR_PCIE_MSI2_INT 0x1f +#define CSR_PCIE_MSI3_INT 0x20 +#define CSR_INT_VEC 0x2d +#define CSR_PCIE_MSI0_INTEN 0x35 +#define CSR_PCIE_MSI1_INTEN 0x36 +#define CSR_PCIE_MSI2_INTEN 0x37 +#define CSR_PCIE_MSI3_INTEN 0x38 +#define CSR_EXC_GPA 0x3b +#define CSR_EXC_PC 0xe +#define CSR_AS_INFO 0x3c +#define CSR_DS_STAT 0x48 +#define CSR_SOFTCID 0xc9 +#define CSR_DVA 0x54 +#define CSR_PTBR_SYS 0x68 +#define CSR_PTBR_USR 0x69 +#define CSR_APTP 0x6a +#define CSR_CID 0xc4 +#define CSR_WR_FREGS 0xc8 +#define CSR_SHTCLOCK 0xca +#define CSR_SHTCLOCK_OFFSET 0xcb + +#ifdef CONFIG_SUBARCH_C4 +#define CSR_IA_VPNMATCH 0xa +#define CSR_UPCR 0x15 +#define CSR_VPCR 0x16 +#define CSR_IA_MATCH 0x17 +#define CSR_IA_MASK 0x18 +#define CSR_IV_MATCH 0x19 +#define CSR_IA_UPNMATCH 0x3a +#define CSR_DC_CTLP 0x4e +#define CSR_DA_MATCH 0x51 +#define CSR_DA_MASK 0x52 +#define CSR_DA_MATCH_MODE 0x53 +#define CSR_DV_MATCH 0x56 +#define CSR_DV_MASK 0x57 +#define CSR_IDA_MATCH 0xc5 +#define CSR_IDA_MASK 0xc6 + +#define DA_MATCH_EN_S 4 +#define DV_MATCH_EN_S 6 +#define DAV_MATCH_EN_S 7 +#define DPM_MATCH 8 +#define DPM_MATCH_EN_S 10 +#define IDA_MATCH_EN_S 53 +#define IV_PM_EN_S 61 +#define IV_MATCH_EN_S 62 +#define IA_MATCH_EN_S 63 + +#endif + + +#ifdef CONFIG_HAVE_CSRRW +#define read_csr(x) \ + ({ unsigned long __val; \ + __asm__ __volatile__("csrr %0,%1" : "=r"(__val) : "i"(x)); \ + __val; }) + +#define write_csr(x, y) \ + ({ __asm__ __volatile__("csrw %0,%1" ::"r"(x), "i"(y)); }) + +#define write_csr_imb(x, y) \ + ({ __asm__ __volatile__("csrw %0,%1; imemb" ::"r"(x), "i"(y)); }) + + +#ifndef __ASSEMBLY__ +#include +static inline void update_ptbr_sys(unsigned long ptbr) +{ + imemb(); + write_csr_imb(ptbr, CSR_PTBR_SYS); +} +#endif +#else +#define read_csr(x) (0) +#define write_csr(x, y) do { } while (0) +#define write_csr_imb(x, y) do { } while (0) + +#ifndef __ASSEMBLY__ +static inline void update_ptbr_sys(unsigned long ptbr) +{ + wrptbr(ptbr); +} +#endif + +#endif +#endif /* _ASM_SW64_CSR_H */ diff --git a/arch/sw_64/include/uapi/asm/regdef.h b/arch/sw_64/include/uapi/asm/regdef.h new file mode 100644 index 000000000000..7460a987c726 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/regdef.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_REGDEF_H +#define _UAPI_ASM_SW64_REGDEF_H + +#define v0 $0 /* function return value */ + +#define t0 $1 /* temporary registers (caller-saved) */ +#define t1 $2 +#define t2 $3 +#define t3 $4 +#define t4 $5 +#define t5 $6 +#define t6 $7 +#define t7 $8 + +#define s0 $9 /* saved-registers (callee-saved registers) */ +#define s1 $10 +#define s2 $11 +#define s3 $12 +#define s4 $13 +#define s5 $14 +#define s6 $15 +#define fp s6 /* frame-pointer (s6 in frame-less procedures) */ + +#define a0 $16 /* argument registers (caller-saved) */ +#define a1 $17 +#define a2 $18 +#define a3 $19 +#define a4 $20 +#define a5 $21 + +#define t8 $22 /* more temps (caller-saved) */ +#define t9 $23 +#define t10 $24 +#define t11 $25 +#define ra $26 /* return address register */ +#define t12 $27 + +#define pv t12 /* procedure-variable register */ +#define AT $at /* assembler temporary */ +#define gp $29 /* global pointer */ +#define sp $30 /* stack pointer */ +#define zero $31 /* reads as zero, writes are noops */ + +#endif /* _UAPI_ASM_SW64_REGDEF_H */ -- Gitee From 0a8ad05945ae488bdae0a3d2f953a30891ca9cf6 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:01 +0800 Subject: [PATCH 0274/2138] anolis: sw64: add atomic/locking headers ANBZ: #4688 Add common headers (atomic, bitops, barrier and locking) for basic SW64 support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/atomic.h | 547 +++++++++++++++++++++++++++++ arch/sw_64/include/asm/barrier.h | 30 ++ arch/sw_64/include/asm/bitops.h | 566 +++++++++++++++++++++++++++++++ arch/sw_64/include/asm/cmpxchg.h | 73 ++++ arch/sw_64/include/asm/percpu.h | 19 ++ arch/sw_64/include/asm/xchg.h | 485 ++++++++++++++++++++++++++ 6 files changed, 1720 insertions(+) create mode 100644 arch/sw_64/include/asm/atomic.h create mode 100644 arch/sw_64/include/asm/barrier.h create mode 100644 arch/sw_64/include/asm/bitops.h create mode 100644 arch/sw_64/include/asm/cmpxchg.h create mode 100644 arch/sw_64/include/asm/percpu.h create mode 100644 arch/sw_64/include/asm/xchg.h diff --git a/arch/sw_64/include/asm/atomic.h b/arch/sw_64/include/asm/atomic.h new file mode 100644 index 000000000000..4a68da09722c --- /dev/null +++ b/arch/sw_64/include/asm/atomic.h @@ -0,0 +1,547 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_ATOMIC_H +#define _ASM_SW64_ATOMIC_H + +#include +#include +#include + +/* + * Atomic operations that C can't guarantee us. Useful for + * resource counting etc... + * + * But use these as seldom as possible since they are much slower + * than regular operations. + */ + +#define ATOMIC_INIT(i) { (i) } +#define ATOMIC64_INIT(i) { (i) } + +#define arch_atomic_read(v) READ_ONCE((v)->counter) +#define arch_atomic64_read(v) READ_ONCE((v)->counter) + +#define arch_atomic_set(v, i) WRITE_ONCE((v)->counter, (i)) +#define arch_atomic64_set(v, i) WRITE_ONCE((v)->counter, (i)) + +/* + * To get proper branch prediction for the main line, we must branch + * forward to code at the end of this object's .text section, then + * branch back to restart the operation. + */ +#define arch_atomic64_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), old, new)) +#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new)) + +#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), old, new)) +#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new)) + + +#ifdef CONFIG_SUBARCH_C3B +/** + * arch_atomic_fetch_add_unless - add unless the number is a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns the old value of @v. + */ +static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + int old, new, c; + unsigned long addr; + + __asm__ __volatile__( + " ldi %3, %2\n" + "1: lldw %0, 0(%3)\n" + " cmpeq %0, %5, %4\n" + " seleq %4, 1, $31, %4\n" + " wr_f %4\n" + " addw %0, %6, %1\n" + " lstw %1, 0(%3)\n" + " rd_f %1\n" + " beq %4, 2f\n" + " beq %1, 3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (old), "=&r" (new), "=m" (v->counter), "=&r" (addr), "=&r" (c) + : "Ir" (u), "Ir" (a), "m" (v->counter)); + return old; +} +/** + * arch_atomic64_fetch_add_unless - add unless the number is a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns the old value of @v. + */ +static inline long arch_atomic64_fetch_add_unless(atomic64_t *v, long a, long u) +{ + long old, new, c; + unsigned long addr; + + __asm__ __volatile__( + " ldi %3, %2\n" + "1: lldl %0, 0(%3)\n" + " cmpeq %0, %5, %4\n" + " seleq %4, 1, $31, %4\n" + " wr_f %4\n" + " addl %0, %6, %1\n" + " lstl %1, 0(%3)\n" + " rd_f %1\n" + " beq %4, 2f\n" + " beq %1, 3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (old), "=&r" (new), "=m" (v->counter), "=&r" (addr), "=&r" (c) + : "Ir" (u), "Ir" (a), "m" (v->counter)); + return old; +} +/* + * arch_atomic64_dec_if_positive - decrement by 1 if old value positive + * @v: pointer of type atomic_t + * + * The function returns the old value of *v minus 1, even if + * the atomic variable, v, was not decremented. + */ +static inline long arch_atomic64_dec_if_positive(atomic64_t *v) +{ + unsigned long old, temp1, addr, temp2; + + __asm__ __volatile__( + " ldi %3, %2\n" + "1: lldl %4, 0(%3)\n" + " cmple %4, 0, %0\n" + " seleq %0, 1, $31, %0\n" + " wr_f %0\n" + " subl %4, 1, %1\n" + " lstl %1, 0(%3)\n" + " rd_f %1\n" + " beq %0, 2f\n" + " beq %1, 3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr), "=&r" (old) + : "m" (v->counter)); + return old - 1; +} + + + +#define ATOMIC_OP(op, asm_op) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ +{ \ + unsigned long temp1, temp2, addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldw %0, 0(%3)\n" \ + " ldi %1, 1\n" \ + " wr_f %1\n" \ + " " #asm_op " %0, %4, %0\n" \ + " lstw %0, 0(%3)\n" \ + " rd_f %0\n" \ + " beq %0, 2f\n" \ + ".subsection 2\n" \ + "2: br 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ +} \ + + +#define ATOMIC_OP_RETURN(op, asm_op) \ +static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \ +{ \ + int temp1, temp2; \ + unsigned long addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldw %0, 0(%3)\n" \ + " ldi %1, 1\n" \ + " wr_f %1\n" \ + " " #asm_op " %0, %4, %1\n" \ + " " #asm_op " %0, %4, %0\n" \ + " lstw %1, 0(%3)\n" \ + " rd_f %1\n" \ + " beq %1, 2f\n" \ + ".subsection 2\n" \ + "2: br 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ + return temp1; \ +} \ + + + +#define ATOMIC_FETCH_OP(op, asm_op) \ +static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ +{ \ + int temp1, temp2; \ + unsigned long addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldw %0, 0(%3)\n" \ + " ldi %1, 1\n" \ + " wr_f %1\n" \ + " " #asm_op " %0, %4, %1\n" \ + " lstw %1, 0(%3)\n" \ + " rd_f %1\n" \ + " beq %1, 2f\n" \ + ".subsection 2\n" \ + "2: br 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ + return temp1; \ +} \ + + +#define ATOMIC64_OP(op, asm_op) \ +static inline void arch_atomic64_##op(long i, atomic64_t *v) \ +{ \ + unsigned long temp1, temp2, addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldl %0, 0(%3)\n" \ + " ldi %1, 1\n" \ + " wr_f %1\n" \ + " " #asm_op " %0, %4, %0\n" \ + " lstl %0, 0(%3)\n" \ + " rd_f %0\n" \ + " beq %0, 2f\n" \ + ".subsection 2\n" \ + "2: br 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ +} \ + + +#define ATOMIC64_OP_RETURN(op, asm_op) \ +static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v)\ +{ \ + long temp1, temp2; \ + unsigned long addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldl %0, 0(%3)\n" \ + " ldi %1, 1\n" \ + " wr_f %1\n" \ + " " #asm_op " %0, %4, %1\n" \ + " " #asm_op " %0, %4, %0\n" \ + " lstl %1, 0(%3)\n" \ + " rd_f %1\n" \ + " beq %1, 2f\n" \ + ".subsection 2\n" \ + "2: br 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ + return temp1; \ +} + +#define ATOMIC64_FETCH_OP(op, asm_op) \ +static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v) \ +{ \ + long temp1, temp2; \ + unsigned long addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldl %0, 0(%3)\n" \ + " ldi %1, 1\n" \ + " wr_f %1\n" \ + " " #asm_op " %0, %4, %1\n" \ + " lstl %1, 0(%3)\n" \ + " rd_f %1\n" \ + " beq %1, 2f\n" \ + ".subsection 2\n" \ + "2: br 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ + return temp1; \ +} \ + +#else /* !CONFIG_SUBARCH_C3B */ + +/** + * arch_atomic_fetch_add_unless - add unless the number is a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns the old value of @v. + */ +static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + int old, new, c; + unsigned long addr; + + __asm__ __volatile__( + " ldi %3, %2\n" + "1: lldw %0, 0(%3)\n" + " cmpeq %0, %5, %4\n" + " bne %4, 2f\n" + " addw %0, %6, %1\n" + " lstw %1, 0(%3)\n" + " beq %1, 3f\n" + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (old), "=&r" (new), "=m" (v->counter), "=&r" (addr), "=&r" (c) + : "Ir" (u), "Ir" (a), "m" (v->counter)); + return old; +} + +/** + * arch_atomic64_fetch_add_unless - add unless the number is a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns the old value of @v. + */ +static inline long arch_atomic64_fetch_add_unless(atomic64_t *v, long a, long u) +{ + long old, new, c; + unsigned long addr; + + __asm__ __volatile__( + " ldi %3, %2\n" + "1: lldl %0, 0(%3)\n" + " cmpeq %0, %5, %4\n" + " bne %4, 2f\n" + " addl %0, %6, %1\n" + " lstl %1, 0(%3)\n" + " beq %1, 3f\n" + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (old), "=&r" (new), "=m" (v->counter), "=&r" (addr), "=&r" (c) + : "Ir" (u), "Ir" (a), "m" (v->counter)); + return old; +} + +/* + * arch_atomic64_dec_if_positive - decrement by 1 if old value positive + * @v: pointer of type atomic_t + * + * The function returns the old value of *v minus 1, even if + * the atomic variable, v, was not decremented. + */ +static inline long arch_atomic64_dec_if_positive(atomic64_t *v) +{ + unsigned long old, temp1, addr, temp2; + + __asm__ __volatile__( + " ldi %3, %2\n" + "1: lldl %4, 0(%3)\n" + " cmple %4, 0, %0\n" + " bne %0, 2f\n" + " subl %4, 1, %1\n" + " lstl %1, 0(%3)\n" + " beq %1, 3f\n" + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr), "=&r" (old) + : "m" (v->counter)); + return old - 1; +} + +#define ATOMIC_OP(op, asm_op) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ +{ \ + unsigned long temp1, addr; \ + __asm__ __volatile__( \ + " ldi %2, %1\n" \ + "1: lldw %0, 0(%2)\n" \ + " " #asm_op " %0, %3, %0\n" \ + " lstw %0, 0(%2)\n" \ + " beq %0, 2f\n" \ + ".subsection 2\n" \ + "2: lbr 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ +} \ + + +#define ATOMIC_OP_RETURN(op, asm_op) \ +static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \ +{ \ + int temp1, temp2; \ + unsigned long addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldw %0, 0(%3)\n" \ + " " #asm_op " %0, %4, %1\n" \ + " " #asm_op " %0, %4, %0\n" \ + " lstw %1, 0(%3)\n" \ + " beq %1, 2f\n" \ + ".subsection 2\n" \ + "2: lbr 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ + return temp1; \ +} \ + +#define ATOMIC_FETCH_OP(op, asm_op) \ +static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ +{ \ + int temp1, temp2; \ + unsigned long addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldw %0, 0(%3)\n" \ + " " #asm_op " %0, %4, %1\n" \ + " lstw %1, 0(%3)\n" \ + " beq %1, 2f\n" \ + ".subsection 2\n" \ + "2: lbr 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ + return temp1; \ +} \ + + +#define ATOMIC64_OP(op, asm_op) \ +static inline void arch_atomic64_##op(long i, atomic64_t *v) \ +{ \ + unsigned long temp1, addr; \ + __asm__ __volatile__( \ + " ldi %2, %1\n" \ + "1: lldl %0, 0(%2)\n" \ + " " #asm_op " %0, %3, %0\n" \ + " lstl %0, 0(%2)\n" \ + " beq %0, 2f\n" \ + ".subsection 2\n" \ + "2: lbr 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ +} \ + + +#define ATOMIC64_OP_RETURN(op, asm_op) \ +static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v)\ +{ \ + long temp1, temp2; \ + unsigned long addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldl %0, 0(%3)\n" \ + " " #asm_op " %0, %4, %1\n" \ + " " #asm_op " %0, %4, %0\n" \ + " lstl %1, 0(%3)\n" \ + " beq %1, 2f\n" \ + ".subsection 2\n" \ + "2: lbr 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ + return temp1; \ +} + +#define ATOMIC64_FETCH_OP(op, asm_op) \ +static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v) \ +{ \ + long temp1, temp2; \ + unsigned long addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldl %0, 0(%3)\n" \ + " " #asm_op " %0, %4, %1\n" \ + " lstl %1, 0(%3)\n" \ + " beq %1, 2f\n" \ + ".subsection 2\n" \ + "2: lbr 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ + return temp1; \ +} \ + +#endif /* CONFIG_SUBARCH_C3B */ + +#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless +#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless +#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive + +#define ATOMIC_OPS(op) \ + ATOMIC_OP(op, op##w) \ + ATOMIC_OP_RETURN(op, op##w) \ + ATOMIC_FETCH_OP(op, op##w) \ + ATOMIC64_OP(op, op##l) \ + ATOMIC64_OP_RETURN(op, op##l) \ + ATOMIC64_FETCH_OP(op, op##l) \ + +ATOMIC_OPS(add) +ATOMIC_OPS(sub) + +#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed +#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed +#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed +#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed + +#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed +#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed +#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed +#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed + + + + +#undef ATOMIC_OPS + +#define ATOMIC_OPS(op, asm) \ + ATOMIC_OP(op, asm) \ + ATOMIC_FETCH_OP(op, asm) \ + ATOMIC64_OP(op, asm) \ + ATOMIC64_FETCH_OP(op, asm) \ + + +ATOMIC_OPS(and, and) +ATOMIC_OPS(andnot, bic) +ATOMIC_OPS(or, bis) +ATOMIC_OPS(xor, xor) + + +#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed +#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed +#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed +#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed + +#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed +#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed +#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed +#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed + + +#undef ATOMIC_OPS +#undef ATOMIC64_FETCH_OP +#undef ATOMIC64_OP_RETURN +#undef ATOMIC64_OP +#undef ATOMIC_FETCH_OP +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP + +#define arch_atomic_andnot arch_atomic_andnot +#define arch_atomic64_andnot arch_atomic64_andnot + +#endif /* _ASM_SW64_ATOMIC_H */ diff --git a/arch/sw_64/include/asm/barrier.h b/arch/sw_64/include/asm/barrier.h new file mode 100644 index 000000000000..bff199126c9f --- /dev/null +++ b/arch/sw_64/include/asm/barrier.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_BARRIER_H +#define _ASM_SW64_BARRIER_H + +#include + +#define mb() __asm__ __volatile__("memb" : : : "memory") + +#define rmb() __asm__ __volatile__("memb" : : : "memory") + +#if defined(CONFIG_SUBARCH_C3B) +#define wmb() __asm__ __volatile__("memb" : : : "memory") +#elif defined(CONFIG_SUBARCH_C4) +#define wmb() __asm__ __volatile__("wmemb" : : : "memory") +#endif + +#define imemb() __asm__ __volatile__("imemb" : : : "memory") + +#ifdef CONFIG_SMP +#define __ASM_SMP_MB "\tmemb\n" +#else +#define __ASM_SMP_MB +#endif + +#define __smp_mb__before_atomic() barrier() +#define __smp_mb__after_atomic() barrier() + +#include + +#endif /* _ASM_SW64_BARRIER_H */ diff --git a/arch/sw_64/include/asm/bitops.h b/arch/sw_64/include/asm/bitops.h new file mode 100644 index 000000000000..b3cdabd95abf --- /dev/null +++ b/arch/sw_64/include/asm/bitops.h @@ -0,0 +1,566 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_BITOPS_H +#define _ASM_SW64_BITOPS_H + +#ifndef _LINUX_BITOPS_H +#error only can be included directly +#endif + +#include +#include + +#ifdef CONFIG_SUBARCH_C3B +/* + * These have to be done with inline assembly: that way the bit-setting + * is guaranteed to be atomic. All bit operations return 0 if the bit + * was cleared before the operation and != 0 if it was not. + * + * To get proper branch prediction for the main line, we must branch + * forward to code at the end of this object's .text section, then + * branch back to restart the operation. + * + * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1). + */ + +static inline void +set_bit(unsigned long nr, volatile void *addr) +{ + unsigned long temp1, temp2, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " ldi %1, 1\n" + " wr_f %1\n" + " bis %0, %4, %0\n" + " lstw %0, 0(%3)\n" + " rd_f %0\n" + " beq %0, 2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (temp1), "=&r" (temp2), "=m" (*m), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m)); +} + + +static inline void +clear_bit(unsigned long nr, volatile void *addr) +{ + unsigned long temp1, temp2, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " ldi %1, 1\n" + " wr_f %1\n" + " bic %0, %4, %0\n" + " lstw %0, 0(%3)\n" + " rd_f %0\n" + " beq %0, 2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (temp1), "=&r" (temp2), "=m" (*m), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m)); +} + +static inline void +change_bit(unsigned long nr, volatile void *addr) +{ + unsigned long temp1, temp2, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " ldi %1, 1\n" + " wr_f %1\n" + " xor %0, %4, %0\n" + " lstw %0, 0(%3)\n" + " rd_f %0\n" + " beq %0, 2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (temp1), "=&r" (temp2), "=m" (*m), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m)); +} + +static inline int +test_and_set_bit(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp1, temp2, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %4, %6\n" + "1: lldw %0, 0(%4)\n" + " and %0, %5, %3\n" + " seleq %3, 1, $31, %1\n" + " wr_f %1\n" + " bis %0, %5, %0\n" + " lstw %0, 0(%4)\n" + " rd_f %0\n" + " bne %3, 2f\n" // %3 is not zero, no need to set, return + " beq %0, 3f\n" // failed to set, try again. + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (temp1), "=&r" (temp2), "=m" (*m), "=&r" (oldbit), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +static inline int +test_and_set_bit_lock(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp1, temp2, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %4, %6\n" + "1: lldw %0, 0(%4)\n" + " and %0, %5, %3\n" + " seleq %3, 1, $31, %1\n" + " wr_f %1\n" + " bis %0, %5, %0\n" + " lstw %0, 0(%4)\n" + " rd_f %0\n" + " bne %3, 2f\n" // %3 is not zero, no need to set, return + " beq %0, 3f\n" // failed to set, try again. + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (temp1), "=&r" (temp2), "=m" (*m), "=&r" (oldbit), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +static inline int +test_and_clear_bit(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp1, temp2, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %4, %6\n" + "1: lldw %0, 0(%4)\n" + " and %0, %5, %3\n" + " selne %3, 1, $31, %1\n" //Note: here is SELNE!!! + " wr_f %1\n" + " bic %0, %5, %0\n" + " lstw %0, 0(%4)\n" + " rd_f %0\n" + " beq %3, 2f\n" // %3 is zero, no need to set, return + " beq %0, 3f\n" // failed to set, try again. + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (temp1), "=&r" (temp2), "=m" (*m), "=&r" (oldbit), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +static inline int +test_and_change_bit(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " ldi %2, 1\n" + " wr_f %2\n" + " and %0, %4, %2\n" + " xor %0, %4, %0\n" + " lstw %0, 0(%3)\n" + " rd_f %0\n" + " beq %0, 3f\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (temp), "=m" (*m), "=&r" (oldbit), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +#else /* !CONFIG_SUBARCH_C3B */ +static inline void +set_bit(unsigned long nr, volatile void *addr) +{ + unsigned long temp1, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %2, %4\n" + "1: lldw %0, 0(%2)\n" + " bis %0, %3, %0\n" + " lstw %0, 0(%2)\n" + " beq %0, 2f\n" + ".subsection 2\n" + "2: lbr 1b\n" + ".previous" + : "=&r" (temp1), "=m" (*m), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m)); +} + +static inline void +clear_bit(unsigned long nr, volatile void *addr) +{ + unsigned long temp1, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %2, %4\n" + "1: lldw %0, 0(%2)\n" + " bic %0, %3, %0\n" + " lstw %0, 0(%2)\n" + " beq %0, 2f\n" + ".subsection 2\n" + "2: lbr 1b\n" + ".previous" + : "=&r" (temp1), "=m" (*m), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m)); +} + +static inline void +change_bit(unsigned long nr, volatile void *addr) +{ + unsigned long temp1, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %2, %4\n" + "1: lldw %0, 0(%2)\n" + " xor %0, %3, %0\n" + " lstw %0, 0(%2)\n" + " beq %0, 2f\n" + ".subsection 2\n" + "2: lbr 1b\n" + ".previous" + : "=&r" (temp1), "=m" (*m), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m)); +} + +static inline int +test_and_set_bit(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp1, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " and %0, %4, %2\n" + " bne %2, 2f\n" // %2 is not zero, no need to set, return + " bis %0, %4, %0\n" + " lstw %0, 0(%3)\n" + " beq %0, 3f\n" // failed to set, try again. + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (temp1), "=m" (*m), "=&r" (oldbit), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +static inline int +test_and_set_bit_lock(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp1, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " and %0, %4, %2\n" + " bne %2, 2f\n" // %2 is not zero, no need to set, return + " bis %0, %4, %0\n" + " lstw %0, 0(%3)\n" + " beq %0, 3f\n" // failed to set, try again. + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (temp1), "=m" (*m), "=&r" (oldbit), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +static inline int +test_and_clear_bit(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp1, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " and %0, %4, %2\n" + " beq %2, 2f\n" // %2 is zero, no need to set, return + " bic %0, %4, %0\n" + " lstw %0, 0(%3)\n" + " beq %0, 3f\n" // failed to set, try again. + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (temp1), "=m" (*m), "=&r" (oldbit), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +static inline int +test_and_change_bit(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " and %0, %4, %2\n" + " xor %0, %4, %0\n" + " lstw %0, 0(%3)\n" + " beq %0, 3f\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (temp), "=m" (*m), "=&r" (oldbit), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + + +#endif /* CONFIG_SUBARCH_C3B */ + +/* + * WARNING: non atomic version. + */ +static __always_inline void +arch___set_bit(unsigned long nr, volatile unsigned long *addr) +{ + int *m = ((int *) addr) + (nr >> 5); + + *m |= 1 << (nr & 31); +} + +#define smp_mb__before_clear_bit() smp_mb() +#define smp_mb__after_clear_bit() smp_mb() + +static inline void +clear_bit_unlock(unsigned long nr, volatile void *addr) +{ + smp_mb(); + clear_bit(nr, addr); +} + +static __always_inline void +arch___clear_bit(unsigned long nr, volatile unsigned long *addr) +{ + int *m = ((int *) addr) + (nr >> 5); + + *m &= ~(1 << (nr & 31)); +} + +static inline void +__clear_bit_unlock(unsigned long nr, volatile void *addr) +{ + smp_mb(); + arch___clear_bit(nr, addr); +} + +static __always_inline void +arch___change_bit(unsigned long nr, volatile unsigned long *addr) +{ + int *m = ((int *) addr) + (nr >> 5); + + *m ^= 1 << (nr & 31); +} + +static __always_inline bool +arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = 1 << (nr & 0x1f); + int *m = ((int *) addr) + (nr >> 5); + int old = *m; + + *m = old | mask; + return (old & mask) != 0; +} + +static __always_inline bool +arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = 1 << (nr & 0x1f); + int *m = ((int *) addr) + (nr >> 5); + int old = *m; + + *m = old & ~mask; + return (old & mask) != 0; +} + +static __always_inline bool +arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = 1 << (nr & 0x1f); + int *m = ((int *) addr) + (nr >> 5); + int old = *m; + + *m = old ^ mask; + return (old & mask) != 0; +} + +#define arch_test_bit generic_test_bit +#define arch_test_bit_acquire generic_test_bit_acquire + +/* + * ffz = Find First Zero in word. Undefined if no zero exists, + * so code should check against ~0UL first.. + * + * Do a binary search on the bits. Due to the nature of large + * constants on the sw64, it is worthwhile to split the search. + */ +static inline unsigned long ffz_b(unsigned long x) +{ + unsigned long sum, x1, x2, x4; + + x = ~x & -~x; /* set first 0 bit, clear others */ + x1 = x & 0xAA; + x2 = x & 0xCC; + x4 = x & 0xF0; + sum = x2 ? 2 : 0; + sum += (x4 != 0) * 4; + sum += (x1 != 0); + + return sum; +} + +static inline unsigned long ffz(unsigned long word) +{ + return __kernel_cttz(~word); +} + +/* + * __ffs = Find First set bit in word. Undefined if no set bit exists. + */ +static inline unsigned long __ffs(unsigned long word) +{ + return __kernel_cttz(word); +} + +#ifdef __KERNEL__ + +/* + * ffs: find first bit set. This is defined the same way as + * the libc and compiler builtin ffs routines, therefore + * differs in spirit from the above __ffs. + */ + +static inline int ffs(int word) +{ + int result = __ffs(word) + 1; + + return word ? result : 0; +} + +/* + * fls: find last bit set. + */ +static inline int fls64(unsigned long word) +{ + return 64 - __kernel_ctlz(word); +} + +static inline unsigned long __fls(unsigned long x) +{ + return fls64(x) - 1; +} + +static inline int fls(int x) +{ + return fls64((unsigned int) x); +} + +/* + * hweightN: returns the hamming weight (i.e. the number + * of bits set) of a N-bit word + */ + +static inline unsigned long __arch_hweight64(unsigned long w) +{ + return __kernel_ctpop(w); +} + +static inline unsigned int __arch_hweight32(unsigned int w) +{ + return __arch_hweight64(w); +} + +static inline unsigned int __arch_hweight16(unsigned int w) +{ + return __arch_hweight64(w & 0xffff); +} + +static inline unsigned int __arch_hweight8(unsigned int w) +{ + return __arch_hweight64(w & 0xff); +} + +#include + +#endif /* __KERNEL__ */ + +#ifdef __KERNEL__ + +/* + * Every architecture must define this function. It's the fastest + * way of searching a 100-bit bitmap. It's guaranteed that at least + * one of the 100 bits is cleared. + */ +static inline unsigned long +sched_find_first_bit(const unsigned long b[2]) +{ + unsigned long b0, b1, ofs, tmp; + + b0 = b[0]; + b1 = b[1]; + ofs = (b0 ? 0 : 64); + tmp = (b0 ? b0 : b1); + + return __ffs(tmp) + ofs; +} + +#include + +#include + +#include + +#endif /* __KERNEL__ */ + +#endif /* _ASM_SW64_BITOPS_H */ diff --git a/arch/sw_64/include/asm/cmpxchg.h b/arch/sw_64/include/asm/cmpxchg.h new file mode 100644 index 000000000000..9f51d035313d --- /dev/null +++ b/arch/sw_64/include/asm/cmpxchg.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_CMPXCHG_H +#define _ASM_SW64_CMPXCHG_H + +/* + * Atomic exchange routines. + */ + +#define __ASM__MB +#define ____xchg(type, args...) __arch_xchg ## type ## _local(args) +#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args) +#include + +#define arch_xchg_local(ptr, x) \ +({ \ + __typeof__(*(ptr)) _x_ = (x); \ + (__typeof__(*(ptr))) __arch_xchg_local((ptr), (unsigned long)_x_, \ + sizeof(*(ptr))); \ +}) + +#define arch_cmpxchg_local(ptr, o, n) \ +({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, \ + sizeof(*(ptr))); \ +}) + +#define arch_cmpxchg64_local(ptr, o, n) \ +({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + arch_cmpxchg_local((ptr), (o), (n)); \ +}) + +#ifdef CONFIG_SMP +#undef __ASM__MB +#define __ASM__MB "\tmemb\n" +#endif +#undef ____xchg +#undef ____cmpxchg +#undef _ASM_SW64_XCHG_H +#define ____xchg(type, args...) __arch_xchg ##type(args) +#define ____cmpxchg(type, args...) __cmpxchg ##type(args) +#include + +#define arch_xchg(ptr, x) \ +({ \ + __typeof__(*(ptr)) _x_ = (x); \ + (__typeof__(*(ptr))) __arch_xchg((ptr), (unsigned long)_x_, \ + sizeof(*(ptr))); \ +}) + +#define arch_cmpxchg(ptr, o, n) \ +({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, sizeof(*(ptr)));\ +}) + +#define arch_cmpxchg64(ptr, o, n) \ +({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + arch_cmpxchg((ptr), (o), (n)); \ +}) + +#undef __ASM__MB +#undef ____cmpxchg + +#define __HAVE_ARCH_CMPXCHG 1 + +#endif /* _ASM_SW64_CMPXCHG_H */ diff --git a/arch/sw_64/include/asm/percpu.h b/arch/sw_64/include/asm/percpu.h new file mode 100644 index 000000000000..3acdf36bcf55 --- /dev/null +++ b/arch/sw_64/include/asm/percpu.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PERCPU_H +#define _ASM_SW64_PERCPU_H + +/* + * To calculate addresses of locally defined variables, GCC uses + * 32-bit displacement from the GP. Which doesn't work for per cpu + * variables in modules, as an offset to the kernel per cpu area is + * way above 4G. + * + * Always use weak definitions for percpu variables in modules. + */ +#if defined(MODULE) && defined(CONFIG_SMP) +#define ARCH_NEEDS_WEAK_PER_CPU +#endif + +#include + +#endif /* _ASM_SW64_PERCPU_H */ diff --git a/arch/sw_64/include/asm/xchg.h b/arch/sw_64/include/asm/xchg.h new file mode 100644 index 000000000000..38f067d5ed04 --- /dev/null +++ b/arch/sw_64/include/asm/xchg.h @@ -0,0 +1,485 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_XCHG_H +#define _ASM_SW64_XCHG_H + +#ifndef _ASM_SW64_CMPXCHG_H +#error Do not include xchg.h directly. Use cmpxchg.h +#endif +/* + * xchg/xchg_local and cmpxchg/cmpxchg_local share the same code + * except that local version do not have the expensive memory barrier. + * So this file is included twice from asm/cmpxchg.h. + */ + +#if defined(CONFIG_SUBARCH_C3B) +/* + * Atomic exchange. + * Since it can be used to implement critical sections + * it must clobber "memory" (also for interrupts in UP). + */ + +static inline unsigned long +____xchg(_u8, volatile char *m, unsigned long val) +{ + unsigned long ret, tmp, addr64; + + __asm__ __volatile__( + + " andnot %4, 7, %3\n" + " inslb %1, %4, %1\n" + "1: lldl %2, 0(%3)\n" + " ldi %0, 1\n" + " wr_f %0\n" + " extlb %2, %4, %0\n" + " masklb %2, %4, %2\n" + " or %1, %2, %2\n" + " lstl %2, 0(%3)\n" + " rd_f %2\n" + " beq %2, 2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) + : "r" ((long)m), "1" (val) : "memory"); + + return ret; +} + +static inline unsigned long +____xchg(_u16, volatile short *m, unsigned long val) +{ + unsigned long ret, tmp, addr64; + + __asm__ __volatile__( + " andnot %4, 7, %3\n" + " inslh %1, %4, %1\n" + "1: lldl %2, 0(%3)\n" + " ldi %0, 1\n" + " wr_f %0\n" + " extlh %2, %4, %0\n" + " masklh %2, %4, %2\n" + " or %1, %2, %2\n" + " lstl %2, 0(%3)\n" + " rd_f %2\n" + " beq %2, 2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) + : "r" ((long)m), "1" (val) : "memory"); + + return ret; +} + +static inline unsigned long +____xchg(_u32, volatile int *m, unsigned long val) +{ + unsigned long dummy, addr; + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " ldi %1, 1\n" + " wr_f %1\n" + " bis $31, %4, %1\n" + " lstw %1, 0(%3)\n" + " rd_f %1\n" + " beq %1, 2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (val), "=&r" (dummy), "=m" (*m), "=&r"(addr) + : "rI" (val), "m" (*m) : "memory"); + + return val; +} + +static inline unsigned long +____xchg(_u64, volatile long *m, unsigned long val) +{ + unsigned long dummy, addr; + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldl %0, 0(%3)\n" + " ldi %1, 1\n" + " wr_f %1\n" + " bis $31, %4, %1\n" + " lstl %1, 0(%3)\n" + " rd_f %1\n" + " beq %1, 2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (val), "=&r" (dummy), "=m" (*m), "=&r"(addr) + : "rI" (val), "m" (*m) : "memory"); + + return val; +} + +/* + * Atomic compare and exchange. Compare OLD with MEM, if identical, + * store NEW in MEM. Return the initial value in MEM. Success is + * indicated by comparing RETURN with OLD. + * + * The memory barrier should be placed in SMP only when we actually + * make the change. If we don't change anything (so if the returned + * prev is equal to old) then we aren't acquiring anything new and + * we don't need any memory barrier as far I can tell. + */ + +static inline unsigned long +____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new) +{ + unsigned long prev, tmp, cmp, addr64; + + __asm__ __volatile__( + " andnot %5, 7, %4\n" + " inslb %1, %5, %1\n" + "1: lldl %2, 0(%4)\n" + " extlb %2, %5, %0\n" + " cmpeq %0, %6, %3\n" + " wr_f %3\n" + " masklb %2, %5, %2\n" + " or %1, %2, %2\n" + " lstl %2, 0(%4)\n" + " rd_f %2\n" + " beq %3, 2f\n" + " beq %2, 3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) + : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new) +{ + unsigned long prev, tmp, cmp, addr64; + + __asm__ __volatile__( + " andnot %5, 7, %4\n" + " inslh %1, %5, %1\n" + "1: lldl %2, 0(%4)\n" + " extlh %2, %5, %0\n" + " cmpeq %0, %6, %3\n" + " wr_f %3\n" + " masklh %2, %5, %2\n" + " or %1, %2, %2\n" + " lstl %2, 0(%4)\n" + " rd_f %2\n" + " beq %3, 2f\n" + " beq %2, 3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) + : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg(_u32, volatile int *m, int old, int new) +{ + unsigned long prev, cmp, addr, tmp; + + __asm__ __volatile__( + " ldi %3, %7\n" + "1: lldw %0, 0(%3)\n" + " cmpeq %0, %5, %1\n" + " wr_f %1\n" + " bis $31, %6, %4\n" + " lstw %4, 0(%3)\n" + " rd_f %4\n" + " beq %1, 2f\n" + " beq %4, 3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r"(prev), "=&r"(cmp), "=m"(*m), "=&r"(addr), "=&r"(tmp) + : "r"((long) old), "r"(new), "m"(*m) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new) +{ + unsigned long prev, cmp, addr, tmp; + + __asm__ __volatile__( + " ldi %3, %7\n" + "1: lldl %0, 0(%3)\n" + " cmpeq %0, %5, %1\n" + " wr_f %1\n" + " bis $31, %6, %4\n" + " lstl %4, 0(%3)\n" + " rd_f %4\n" + " beq %1, 2f\n" + " beq %4, 3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r"(prev), "=&r"(cmp), "=m"(*m), "=&r"(addr), "=&r"(tmp) + : "r"((long) old), "r"(new), "m"(*m) : "memory"); + + return prev; +} + +#elif defined(CONFIG_SUBARCH_C4) +/* + * Atomic exchange. + * Since it can be used to implement critical sections + * it must clobber "memory" (also for interrupts in UP). + */ + +static inline unsigned long +____xchg(_u8, volatile char *m, unsigned long val) +{ + unsigned long ret, tmp, addr64; + + __asm__ __volatile__( + " andnot %4, 7, %3\n" + " inslb %1, %4, %1\n" + "1: lldl %2, 0(%3)\n" + " extlb %2, %4, %0\n" + " masklb %2, %4, %2\n" + " or %1, %2, %2\n" + " lstl %2, 0(%3)\n" + " beq %2, 2f\n" + ".subsection 2\n" + "2: lbr 1b\n" + ".previous" + : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) + : "r" ((long)m), "1" (val) : "memory"); + + return ret; +} + +static inline unsigned long +____xchg(_u16, volatile short *m, unsigned long val) +{ + unsigned long ret, tmp, addr64; + + __asm__ __volatile__( + " andnot %4, 7, %3\n" + " inslh %1, %4, %1\n" + "1: lldl %2, 0(%3)\n" + " extlh %2, %4, %0\n" + " masklh %2, %4, %2\n" + " or %1, %2, %2\n" + " lstl %2, 0(%3)\n" + " beq %2, 2f\n" + ".subsection 2\n" + "2: lbr 1b\n" + ".previous" + : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) + : "r" ((long)m), "1" (val) : "memory"); + + return ret; +} + +static inline unsigned long +____xchg(_u32, volatile int *m, unsigned long val) +{ + unsigned long dummy, addr; + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " bis $31, %4, %1\n" + " lstw %1, 0(%3)\n" + " beq %1, 2f\n" + ".subsection 2\n" + "2: lbr 1b\n" + ".previous" + : "=&r" (val), "=&r" (dummy), "=m" (*m), "=&r"(addr) + : "rI" (val), "m" (*m) : "memory"); + + return val; +} + +static inline unsigned long +____xchg(_u64, volatile long *m, unsigned long val) +{ + unsigned long dummy, addr; + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldl %0, 0(%3)\n" + " bis $31, %4, %1\n" + " lstl %1, 0(%3)\n" + " beq %1, 2f\n" + ".subsection 2\n" + "2: lbr 1b\n" + ".previous" + : "=&r" (val), "=&r" (dummy), "=m" (*m), "=&r"(addr) + : "rI" (val), "m" (*m) : "memory"); + + return val; +} + +/* + * Atomic compare and exchange. Compare OLD with MEM, if identical, + * store NEW in MEM. Return the initial value in MEM. Success is + * indicated by comparing RETURN with OLD. + * + * The memory barrier should be placed in SMP only when we actually + * make the change. If we don't change anything (so if the returned + * prev is equal to old) then we aren't acquiring anything new and + * we don't need any memory barrier as far I can tell. + */ +static inline unsigned long +____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new) +{ + unsigned long prev, tmp, cmp, addr64; + + __asm__ __volatile__( + " andnot %5, 7, %4\n" + " inslb %1, %5, %1\n" + "1: lldl %2, 0(%4)\n" + " extlb %2, %5, %0\n" + " cmpeq %0, %6, %3\n" + " beq %3, 2f\n" + " masklb %2, %5, %2\n" + " or %1, %2, %2\n" + " lstl %2, 0(%4)\n" + " beq %2, 3f\n" + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) + : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new) +{ + unsigned long prev, tmp, cmp, addr64; + + __asm__ __volatile__( + " andnot %5, 7, %4\n" + " inslh %1, %5, %1\n" + "1: lldl %2, 0(%4)\n" + " extlh %2, %5, %0\n" + " cmpeq %0, %6, %3\n" + " beq %3, 2f\n" + " masklh %2, %5, %2\n" + " or %1, %2, %2\n" + " lstl %2, 0(%4)\n" + " beq %2, 3f\n" + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) + : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg(_u32, volatile int *m, int old, int new) +{ + unsigned long prev, cmp, addr, tmp; + + __asm__ __volatile__( + " ldi %3, %7\n" + "1: lldw %0, 0(%3)\n" + " cmpeq %0, %5, %1\n" + " beq %1, 2f\n" + " bis $31, %6, %4\n" + " lstw %4, 0(%3)\n" + " beq %4, 3f\n" + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r"(prev), "=&r"(cmp), "=m"(*m), "=&r"(addr), "=&r"(tmp) + : "r"((long) old), "r"(new), "m"(*m) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new) +{ + unsigned long prev, cmp, addr, tmp; + + __asm__ __volatile__( + " ldi %3, %7\n" + "1: lldl %0, 0(%3)\n" + " cmpeq %0, %5, %1\n" + " beq %1, 2f\n" + " bis $31, %6, %4\n" + " lstl %4, 0(%3)\n" + " beq %4, 3f\n" + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r"(prev), "=&r"(cmp), "=m"(*m), "=&r"(addr), "=&r"(tmp) + : "r"((long) old), "r"(new), "m"(*m) : "memory"); + + return prev; +} + +#endif + +/* This function doesn't exist, so you'll get a linker error + * if something tries to do an invalid xchg(). + */ +extern void __xchg_called_with_bad_pointer(void); + +static __always_inline unsigned long +____xchg(, volatile void *ptr, unsigned long x, int size) +{ + switch (size) { + case 1: + return ____xchg(_u8, ptr, x); + case 2: + return ____xchg(_u16, ptr, x); + case 4: + return ____xchg(_u32, ptr, x); + case 8: + return ____xchg(_u64, ptr, x); + } + __xchg_called_with_bad_pointer(); + return x; +} + +/* This function doesn't exist, so you'll get a linker error + * if something tries to do an invalid cmpxchg(). + */ +extern void __cmpxchg_called_with_bad_pointer(void); + +static __always_inline unsigned long ____cmpxchg(, volatile void *ptr, + unsigned long old, + unsigned long new, int size) +{ + switch (size) { + case 1: + return ____cmpxchg(_u8, ptr, old, new); + case 2: + return ____cmpxchg(_u16, ptr, old, new); + case 4: + return ____cmpxchg(_u32, ptr, old, new); + case 8: + return ____cmpxchg(_u64, ptr, old, new); + } + __cmpxchg_called_with_bad_pointer(); + return old; +} + +#endif /* _ASM_SW64_XCHG_H */ -- Gitee From f28dd2fa90c5c2fbcef8a626bcea27bfdcc4eac9 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:11 +0800 Subject: [PATCH 0275/2138] anolis: sw64: add common headers ANBZ: #4688 Add some other common headers for basic SW64 support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/asm-offsets.h | 1 + arch/sw_64/include/asm/asm-prototypes.h | 22 ++ arch/sw_64/include/asm/bug.h | 8 + arch/sw_64/include/asm/device.h | 13 ++ arch/sw_64/include/asm/hmcall.h | 236 ++++++++++++++++++++++ arch/sw_64/include/asm/hw_init.h | 167 +++++++++++++++ arch/sw_64/include/asm/idle.h | 7 + arch/sw_64/include/asm/insn.h | 97 +++++++++ arch/sw_64/include/asm/linkage.h | 9 + arch/sw_64/include/asm/word-at-a-time.h | 43 ++++ arch/sw_64/include/uapi/asm/bitsperlong.h | 9 + arch/sw_64/include/uapi/asm/byteorder.h | 7 + arch/sw_64/include/uapi/asm/compiler.h | 83 ++++++++ arch/sw_64/include/uapi/asm/errno.h | 128 ++++++++++++ arch/sw_64/include/uapi/asm/hmcall.h | 17 ++ arch/sw_64/include/uapi/asm/mman.h | 88 ++++++++ arch/sw_64/include/uapi/asm/param.h | 9 + arch/sw_64/include/uapi/asm/setup.h | 7 + 18 files changed, 951 insertions(+) create mode 100644 arch/sw_64/include/asm/asm-offsets.h create mode 100644 arch/sw_64/include/asm/asm-prototypes.h create mode 100644 arch/sw_64/include/asm/bug.h create mode 100644 arch/sw_64/include/asm/device.h create mode 100644 arch/sw_64/include/asm/hmcall.h create mode 100644 arch/sw_64/include/asm/hw_init.h create mode 100644 arch/sw_64/include/asm/idle.h create mode 100644 arch/sw_64/include/asm/insn.h create mode 100644 arch/sw_64/include/asm/linkage.h create mode 100644 arch/sw_64/include/asm/word-at-a-time.h create mode 100644 arch/sw_64/include/uapi/asm/bitsperlong.h create mode 100644 arch/sw_64/include/uapi/asm/byteorder.h create mode 100644 arch/sw_64/include/uapi/asm/compiler.h create mode 100644 arch/sw_64/include/uapi/asm/errno.h create mode 100644 arch/sw_64/include/uapi/asm/hmcall.h create mode 100644 arch/sw_64/include/uapi/asm/mman.h create mode 100644 arch/sw_64/include/uapi/asm/param.h create mode 100644 arch/sw_64/include/uapi/asm/setup.h diff --git a/arch/sw_64/include/asm/asm-offsets.h b/arch/sw_64/include/asm/asm-offsets.h new file mode 100644 index 000000000000..d370ee36a182 --- /dev/null +++ b/arch/sw_64/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include diff --git a/arch/sw_64/include/asm/asm-prototypes.h b/arch/sw_64/include/asm/asm-prototypes.h new file mode 100644 index 000000000000..67746d6bffb7 --- /dev/null +++ b/arch/sw_64/include/asm/asm-prototypes.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_ASM_PROTOTYPES_H +#define _ASM_SW64_ASM_PROTOTYPES_H + +#include +#include +#include +#include +#include + +#include + +extern void __divl(void); +extern void __reml(void); +extern void __divw(void); +extern void __remw(void); +extern void __divlu(void); +extern void __remlu(void); +extern void __divwu(void); +extern void __remwu(void); + +#endif /* _ASM_SW64_ASM_PROTOTYPES_H */ diff --git a/arch/sw_64/include/asm/bug.h b/arch/sw_64/include/asm/bug.h new file mode 100644 index 000000000000..4a179f236ccf --- /dev/null +++ b/arch/sw_64/include/asm/bug.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_BUG_H +#define _ASM_SW64_BUG_H + +#include +#include + +#endif /* _ASM_SW64_BUG_H */ diff --git a/arch/sw_64/include/asm/device.h b/arch/sw_64/include/asm/device.h new file mode 100644 index 000000000000..d999207e07d1 --- /dev/null +++ b/arch/sw_64/include/asm/device.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_DEVICE_H +#define _ASM_SW64_DEVICE_H + +struct dev_archdata { +#if defined(CONFIG_SUNWAY_IOMMU) || defined(CONFIG_SUNWAY_IOMMU_V2) + void *iommu; +#endif +}; + +struct pdev_archdata { +}; +#endif /* _ASM_SW64_DEVICE_H */ diff --git a/arch/sw_64/include/asm/hmcall.h b/arch/sw_64/include/asm/hmcall.h new file mode 100644 index 000000000000..e3bac3016740 --- /dev/null +++ b/arch/sw_64/include/asm/hmcall.h @@ -0,0 +1,236 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_HMCALL_H +#define _ASM_SW64_HMCALL_H + +/* + * Common HMC-code + */ +/* 0x0 - 0x3F : Kernel Level HMC routine */ +#define HMC_halt 0x00 +#define HMC_rdio64 0x01 +#define HMC_rdio32 0x02 +#define HMC_cpuid 0x03 +#define HMC_sleepen 0x05 +#define HMC_rdksp 0x06 +#define HMC_wrasid 0x08 +#define HMC_rdktp 0x09 +#define HMC_wrktp 0x0A +#define HMC_rdptbr 0x0B +#define HMC_wrptbr 0x0C +#define HMC_rdhtctl 0x0D +#define HMC_wrksp 0x0E +#define HMC_mtinten 0x0F +#define HMC_load_mm 0x11 +#define HMC_tbisasid 0x14 +#define HMC_tbivpn 0x19 +#define HMC_ret 0x1A +#define HMC_wrvpcr 0x29 +#define HMC_wrfen 0x2B +#define HMC_sflush 0x2F +#define HMC_entervm 0x31 +#define HMC_hcall 0x32 +#define HMC_tbi 0x33 +#define HMC_wrent 0x34 +#define HMC_swpipl 0x35 +#define HMC_rdps 0x36 +#define HMC_wrkgp 0x37 +#define HMC_wrusp 0x38 +#define HMC_rvpcr 0x39 +#define HMC_rdusp 0x3A +#define HMC_wrtimer 0x3B +#define HMC_whami 0x3C +#define HMC_retsys 0x3D +#define HMC_sendii 0x3E +#define HMC_rti 0x3F + + +/* 0x80 - 0xBF : User Level HMC routine */ +#include + +/* Following will be deprecated from user level invocation */ +#define HMC_rwreg 0x87 +#define HMC_sz_uflush 0xA8 +#define HMC_longtime 0xB1 + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ + +#include +extern void __init fixup_hmcall(void); + +extern void halt(void) __noreturn; + +#define __CALL_HMC_VOID(NAME) \ +static inline void NAME(void) \ +{ \ + __asm__ __volatile__( \ + "sys_call %0 " : : "i" (HMC_ ## NAME)); \ +} + +#define __CALL_HMC_R0(NAME, TYPE) \ +static inline TYPE NAME(void) \ +{ \ + register TYPE __r0 __asm__("$0"); \ + __asm__ __volatile__( \ + "sys_call %1 # " #NAME \ + : "=r" (__r0) \ + : "i" (HMC_ ## NAME) \ + : "$1", "$16", "$22", "$23", "$24", "$25"); \ + return __r0; \ +} + +#define __CALL_HMC_W1(NAME, TYPE0) \ +static inline void NAME(TYPE0 arg0) \ +{ \ + register TYPE0 __r16 __asm__("$16") = arg0; \ + __asm__ __volatile__( \ + "sys_call %1 # "#NAME \ + : "=r"(__r16) \ + : "i"(HMC_ ## NAME), "0"(__r16) \ + : "$1", "$22", "$23", "$24", "$25"); \ +} + +#define __CALL_HMC_W2(NAME, TYPE0, TYPE1) \ +static inline void NAME(TYPE0 arg0, TYPE1 arg1) \ +{ \ + register TYPE0 __r16 __asm__("$16") = arg0; \ + register TYPE1 __r17 __asm__("$17") = arg1; \ + __asm__ __volatile__( \ + "sys_call %2 # "#NAME \ + : "=r"(__r16), "=r"(__r17) \ + : "i"(HMC_ ## NAME), "0"(__r16), "1"(__r17) \ + : "$1", "$22", "$23", "$24", "$25"); \ +} + +#define __CALL_HMC_RW1(NAME, RTYPE, TYPE0) \ +static inline RTYPE NAME(TYPE0 arg0) \ +{ \ + register RTYPE __r0 __asm__("$0"); \ + register TYPE0 __r16 __asm__("$16") = arg0; \ + __asm__ __volatile__( \ + "sys_call %2 # "#NAME \ + : "=r"(__r16), "=r"(__r0) \ + : "i"(HMC_ ## NAME), "0"(__r16) \ + : "$1", "$22", "$23", "$24", "$25"); \ + return __r0; \ +} + +#define __CALL_HMC_RW2(NAME, RTYPE, TYPE0, TYPE1) \ +static inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1) \ +{ \ + register RTYPE __r0 __asm__("$0"); \ + register TYPE0 __r16 __asm__("$16") = arg0; \ + register TYPE1 __r17 __asm__("$17") = arg1; \ + __asm__ __volatile__( \ + "sys_call %3 # "#NAME \ + : "=r"(__r16), "=r"(__r17), "=r"(__r0) \ + : "i"(HMC_ ## NAME), "0"(__r16), "1"(__r17) \ + : "$1", "$22", "$23", "$24", "$25"); \ + return __r0; \ +} + +#define __CALL_HMC_RW3(NAME, RTYPE, TYPE0, TYPE1, TYPE2) \ +static inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1, TYPE2 arg2) \ +{ \ + register RTYPE __r0 __asm__("$0"); \ + register TYPE0 __r16 __asm__("$16") = arg0; \ + register TYPE1 __r17 __asm__("$17") = arg1; \ + register TYPE2 __r18 __asm__("$18") = arg2; \ + __asm__ __volatile__( \ + "sys_call %4 # "#NAME \ + : "=r"(__r16), "=r"(__r17), "=r"(__r18), "=r"(__r0) \ + : "i"(HMC_ ## NAME), "0"(__r16), "1"(__r17), "2"(__r18) \ + : "$1", "$22", "$23", "$24", "$25"); \ + return __r0; \ +} + + +__CALL_HMC_VOID(imb); +__CALL_HMC_VOID(sflush); +__CALL_HMC_VOID(wrfen); +#define fpu_enable() wrfen() + +__CALL_HMC_VOID(sleepen); +__CALL_HMC_VOID(mtinten); + +__CALL_HMC_VOID(rdktp); +#define restore_ktp() rdktp() +__CALL_HMC_VOID(wrktp); +#define save_ktp() wrktp() + +__CALL_HMC_R0(rdps, unsigned long); + +__CALL_HMC_R0(rdusp, unsigned long); +__CALL_HMC_W1(wrusp, unsigned long); + +__CALL_HMC_R0(rdksp, unsigned long); +__CALL_HMC_W1(wrksp, unsigned long); +__CALL_HMC_R0(rdhtctl, unsigned long); + +/* + * Load a mm context. This is needed when we change the page + * table pointer(CSR:PTBR) or when we update the ASID. + * load_mm(asid, ptbr) + * + */ +__CALL_HMC_W2(load_mm, unsigned long, unsigned long); + +__CALL_HMC_W1(wrasid, unsigned long); +__CALL_HMC_R0(rdptbr, unsigned long); +__CALL_HMC_W1(wrptbr, unsigned long); + +__CALL_HMC_RW1(swpipl, unsigned long, unsigned long); +__CALL_HMC_R0(whami, unsigned long); +__CALL_HMC_RW1(rdio64, unsigned long, unsigned long); +__CALL_HMC_RW1(rdio32, unsigned int, unsigned long); +__CALL_HMC_W2(wrent, void*, unsigned long); +__CALL_HMC_W2(tbisasid, unsigned long, unsigned long); +__CALL_HMC_W1(wrkgp, unsigned long); +__CALL_HMC_RW2(wrperfmon, unsigned long, unsigned long, unsigned long); +__CALL_HMC_RW3(sendii, unsigned long, unsigned long, unsigned long, unsigned long); +__CALL_HMC_W1(wrtimer, unsigned long); +__CALL_HMC_RW3(tbivpn, unsigned long, unsigned long, unsigned long, unsigned long); +__CALL_HMC_RW2(cpuid, unsigned long, unsigned long, unsigned long); + +__CALL_HMC_W1(wrtp, unsigned long); +/* + * TB routines.. + */ +#define __tbi(nr, arg, arg1...) \ +({ \ + register unsigned long __r16 __asm__("$16") = (nr); \ + register unsigned long __r17 __asm__("$17"); arg; \ + __asm__ __volatile__( \ + "sys_call %3 #__tbi" \ + : "=r" (__r16), "=r" (__r17) \ + : "0" (__r16), "i" (HMC_tbi), ##arg1 \ + : "$0", "$1", "$22", "$23", "$24", "$25"); \ +}) + +#define tbi(x, y) __tbi(x, __r17 = (y), "1" (__r17)) + +/* Invalidate all TLB, only used by hypervisor */ +#define tbia() __tbi(-2, /* no second argument */) + +/* Invalidate TLB for all processes with currnet VPN */ +#define tbivp() __tbi(-1, /* no second argument */) + +/* Invalidate all TLB with current VPN */ +#define tbiv() __tbi(0, /* no second argument */) + +/* Invalidate ITLB of addr with current UPN and VPN */ +#define tbisi(addr) __tbi(1, __r17 = (addr), "1" (__r17)) + +/* Invalidate DTLB of addr with current UPN and VPN */ +#define tbisd(addr) __tbi(2, __r17 = (addr), "1" (__r17)) + +/* Invalidate TLB of addr with current UPN and VPN */ +#define tbis(addr) __tbi(3, __r17 = (addr), "1" (__r17)) + +/* Invalidate all user TLB with current UPN and VPN */ +#define tbiu() __tbi(4, /* no second argument */) + +#endif /* !__ASSEMBLY__ */ +#endif /* __KERNEL__ */ + +#endif /* _ASM_SW64_HMCALL_H */ diff --git a/arch/sw_64/include/asm/hw_init.h b/arch/sw_64/include/asm/hw_init.h new file mode 100644 index 000000000000..2078c66d1c4f --- /dev/null +++ b/arch/sw_64/include/asm/hw_init.h @@ -0,0 +1,167 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_HW_INIT_H +#define _ASM_SW64_HW_INIT_H +#include +#include + +#include + +#define MMSIZE __va(0x2040) + +/* + * Descriptor for a cache + */ +struct cache_desc { + unsigned int size; /* Bytes per way */ + unsigned int sets; /* Number of lines per set */ + unsigned char ways; /* Number of ways */ + unsigned char linesz; /* Size of line in bytes */ + unsigned char flags; /* Flags describing cache properties */ +}; + +struct cpuinfo_sw64 { + unsigned long last_asid; + unsigned long last_vpn; + unsigned long ipi_count; + struct cache_desc icache; /* Primary I-cache */ + struct cache_desc dcache; /* Primary D or combined I/D cache */ + struct cache_desc scache; /* Secondary cache */ + struct cache_desc tcache; /* Tertiary/split secondary cache */ +} __aligned(SMP_CACHE_BYTES); + +struct cpu_desc_t { + __u8 model; + __u8 family; + __u8 chip_var; + __u8 arch_var; + __u8 arch_rev; + __u8 pa_bits; + __u8 va_bits; + char vendor_id[16]; + char model_id[64]; + unsigned long frequency; +} __randomize_layout; + +#define MAX_NUMSOCKETS 8 +struct socket_desc_t { + bool is_online; /* 1 for online, 0 for offline */ + int numcores; + unsigned long socket_mem; +}; + +enum memmap_types { + memmap_reserved, + memmap_pci, + memmap_initrd, + memmap_kvm, + memmap_crashkernel, + memmap_acpi, + memmap_use, + memmap_protected, +}; + +#define MAX_NUMMEMMAPS 64 +struct memmap_entry { + u64 addr; /* start of memory segment */ + u64 size; /* size of memory segment */ + enum memmap_types type; +}; + +extern struct cpuinfo_sw64 cpu_data[NR_CPUS]; +extern void store_cpu_data(int cpu); + +extern struct cpu_desc_t cpu_desc; +extern struct socket_desc_t socket_desc[MAX_NUMSOCKETS]; +extern int memmap_nr; +extern struct memmap_entry memmap_map[MAX_NUMMEMMAPS]; +extern cpumask_t cpu_offline; +extern bool memblock_initialized; + +int __init add_memmap_region(u64 addr, u64 size, enum memmap_types type); +void __init process_memmap(void); + +static inline unsigned long get_cpu_freq(void) +{ + return cpu_desc.frequency; +} + +static inline void update_cpu_freq(unsigned long khz) +{ + cpu_desc.frequency = khz * 1000; +} + +#define EMUL_FLAG (0x1UL << 63) +#define MMSIZE_MASK (EMUL_FLAG - 1) + +DECLARE_STATIC_KEY_TRUE(run_mode_host_key); +DECLARE_STATIC_KEY_FALSE(run_mode_guest_key); +DECLARE_STATIC_KEY_FALSE(run_mode_emul_key); + +#define is_in_host() static_branch_likely(&run_mode_host_key) +#define is_in_guest() static_branch_unlikely(&run_mode_guest_key) +#define is_in_emul() static_branch_unlikely(&run_mode_emul_key) +#define is_guest_or_emul() !static_branch_likely(&run_mode_host_key) + +#define CPU_SW3231 0x31 +#define CPU_SW831 0x32 +#define CPU_SW8A 0x41 + +#define GET_TABLE_ENTRY 1 +#define GET_VENDOR_ID 2 +#define GET_MODEL 3 +#define GET_CPU_FREQ 4 +#define GET_CACHE_INFO 5 + +#define TABLE_ENTRY_MAX 32 +#define VENDOR_ID_MAX 2 +#define MODEL_MAX 8 +#define CACHE_INFO_MAX 4 + +#define L1_ICACHE 0 +#define L1_DCACHE 1 +#define L2_CACHE 2 +#define L3_CACHE 3 + +#define CPUID_ARCH_REV_MASK 0xf +#define CPUID_ARCH_REV(val) ((val) & CPUID_ARCH_REV_MASK) +#define CPUID_ARCH_VAR_SHIFT 4 +#define CPUID_ARCH_VAR_MASK (0xf << CPUID_ARCH_VAR_SHIFT) +#define CPUID_ARCH_VAR(val) \ + (((val) & CPUID_ARCH_VAR_MASK) >> CPUID_ARCH_VAR_SHIFT) +#define CPUID_CHIP_VAR_SHIFT 8 +#define CPUID_CHIP_VAR_MASK (0xf << CPUID_CHIP_VAR_SHIFT) +#define CPUID_CHIP_VAR(val) \ + (((val) & CPUID_CHIP_VAR_MASK) >> CPUID_CHIP_VAR_SHIFT) +#define CPUID_FAMILY_SHIFT 12 +#define CPUID_FAMILY_MASK (0xf << CPUID_FAMILY_SHIFT) +#define CPUID_FAMILY(val) \ + (((val) & CPUID_FAMILY_MASK) >> CPUID_FAMILY_SHIFT) +#define CPUID_MODEL_SHIFT 24 +#define CPUID_MODEL_MASK (0xff << CPUID_MODEL_SHIFT) +#define CPUID_MODEL(val) \ + (((val) & CPUID_MODEL_MASK) >> CPUID_MODEL_SHIFT) +#define CPUID_PA_BITS_SHIFT 32 +#define CPUID_PA_BITS_MASK (0x7fUL << CPUID_PA_BITS_SHIFT) +#define CPUID_PA_BITS(val) \ + (((val) & CPUID_PA_BITS_MASK) >> CPUID_PA_BITS_SHIFT) +#define CPUID_VA_BITS_SHIFT 39 +#define CPUID_VA_BITS_MASK (0x7fUL << CPUID_VA_BITS_SHIFT) +#define CPUID_VA_BITS(val) \ + (((val) & CPUID_VA_BITS_MASK) >> CPUID_VA_BITS_SHIFT) + + +#define CACHE_SIZE_SHIFT 0 +#define CACHE_SIZE_MASK (0xffffffffUL << CACHE_SIZE_SHIFT) +#define CACHE_SIZE(val) \ + (((val) & CACHE_SIZE_MASK) >> CACHE_SIZE_SHIFT) +#define CACHE_LINE_BITS_SHIFT 32 +#define CACHE_LINE_BITS_MASK (0xfUL << CACHE_LINE_BITS_SHIFT) +#define CACHE_LINE_BITS(val) \ + (((val) & CACHE_LINE_BITS_MASK) >> CACHE_LINE_BITS_SHIFT) +#define CACHE_INDEX_BITS_SHIFT 36 +#define CACHE_INDEX_BITS_MASK (0x3fUL << CACHE_INDEX_BITS_SHIFT) +#define CACHE_INDEX_BITS(val) \ + (((val) & CACHE_INDEX_BITS_MASK) >> CACHE_INDEX_BITS_SHIFT) +#define current_cpu_data cpu_data[smp_processor_id()] + +#endif /* _ASM_SW64_HW_INIT_H */ diff --git a/arch/sw_64/include/asm/idle.h b/arch/sw_64/include/asm/idle.h new file mode 100644 index 000000000000..95e145f25306 --- /dev/null +++ b/arch/sw_64/include/asm/idle.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_IDLE_H +#define _ASM_SW64_IDLE_H + +extern void arch_cpu_idle(void); + +#endif /* _ASM_SW64_IDLE_H */ diff --git a/arch/sw_64/include/asm/insn.h b/arch/sw_64/include/asm/insn.h new file mode 100644 index 000000000000..437cb48d1e93 --- /dev/null +++ b/arch/sw_64/include/asm/insn.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019, serveros, linyue + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef _ASM_SW64_INSN_H +#define _ASM_SW64_INSN_H +#include + +/* Register numbers */ +enum { + R26 = 26, + R27, + R28, + R31 = 31, +}; + +#define BR_MAX_DISP 0xfffff +/* SW64 instructions are always 32 bits. */ +#define SW64_INSN_SIZE 4 + +#define ___SW64_RA(a) (((a) & 0x1f) << 21) +#define ___SW64_RB(b) (((b) & 0x1f) << 16) +#define ___SW64_SIMP_RC(c) (((c) & 0x1f)) +#define ___SW64_ST_DISP(disp) (((disp) & 0xffff)) +#define ___SW64_SYSCALL_FUNC(func) ((func) & 0xff) +#define ___SW64_BR_DISP(disp) (((disp) & 0x1fffff)) + + +#define SW64_INSN_BIS 0x40000740 +#define SW64_INSN_CALL 0x04000000 +#define SW64_INSN_SYS_CALL 0x02000000 +#define SW64_INSN_BR 0x10000000 + +#define SW64_NOP (0x43ff075f) +#define SW64_BIS(a, b, c) (SW64_INSN_BIS | ___SW64_RA(a) | ___SW64_RB(b) | ___SW64_SIMP_RC(c)) +#define SW64_CALL(a, b, disp) (SW64_INSN_CALL | ___SW64_RA(a) | ___SW64_RB(b) | ___SW64_ST_DISP(disp)) +#define SW64_SYS_CALL(func) (SW64_INSN_SYS_CALL | ___SW64_SYSCALL_FUNC(func)) +#define SW64_BR(a, disp) (SW64_INSN_BR | ___SW64_RA(a) | ___SW64_BR_DISP(disp)) + +extern int sw64_insn_read(void *addr, u32 *insnp); +extern int sw64_insn_write(void *addr, u32 insn); +extern int sw64_insn_double_write(void *addr, u64 insn); +extern unsigned int sw64_insn_nop(void); +extern unsigned int sw64_insn_call(unsigned int ra, unsigned int rb); +extern unsigned int sw64_insn_sys_call(unsigned int num); +extern unsigned int sw64_insn_br(unsigned int ra, unsigned long pc, unsigned long new_pc); + +#define SW64_OPCODE_RA(opcode) ((opcode >> 21) & 0x1f) + +#define SW64_INSN(name, opcode, mask) \ +static inline bool sw64_insn_is_##name(u32 insn) \ +{ \ + return (insn & mask) == opcode; \ +} + +SW64_INSN(sys_call_b, 0x00000000, 0xfc000000); +SW64_INSN(sys_call, 0x00000001, 0xfc000000); +SW64_INSN(call, 0x04000000, 0xfc000000); +SW64_INSN(ret, 0x08000000, 0xfc000000); +SW64_INSN(jmp, 0x0c000000, 0xfc000000); +SW64_INSN(br, 0x10000000, 0xfc000000); +SW64_INSN(bsr, 0x14000000, 0xfc000000); +SW64_INSN(memb, 0x18000000, 0xfc00ffff); +SW64_INSN(imemb, 0x18000001, 0xfc00ffff); +SW64_INSN(rtc, 0x18000020, 0xfc00ffff); +SW64_INSN(halt, 0x18000080, 0xfc00ffff); +SW64_INSN(rd_f, 0x18001000, 0xfc00ffff); +SW64_INSN(beq, 0xc0000000, 0xfc000000); +SW64_INSN(bne, 0xc4000000, 0xfc000000); +SW64_INSN(blt, 0xc8000000, 0xfc000000); +SW64_INSN(ble, 0xcc000000, 0xfc000000); +SW64_INSN(bgt, 0xd0000000, 0xfc000000); +SW64_INSN(bge, 0xd4000000, 0xfc000000); +SW64_INSN(blbc, 0xd8000000, 0xfc000000); +SW64_INSN(blbs, 0xdc000000, 0xfc000000); +SW64_INSN(fbeq, 0xe0000000, 0xfc000000); +SW64_INSN(fbne, 0xe4000000, 0xfc000000); +SW64_INSN(fblt, 0xe8000000, 0xfc000000); +SW64_INSN(fble, 0xec000000, 0xfc000000); +SW64_INSN(fbgt, 0xf0000000, 0xfc000000); +SW64_INSN(fbge, 0xf4000000, 0xfc000000); +SW64_INSN(lldw, 0x20000000, 0xfc00f000); +SW64_INSN(lldl, 0x20001000, 0xfc00f000); + +#endif /* _ASM_SW64_INSN_H */ diff --git a/arch/sw_64/include/asm/linkage.h b/arch/sw_64/include/asm/linkage.h new file mode 100644 index 000000000000..85b279f6211e --- /dev/null +++ b/arch/sw_64/include/asm/linkage.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_LINKAGE_H +#define _ASM_SW64_LINKAGE_H + +#define cond_syscall(x) asm(".weak\t" #x "\n" #x " = sys_ni_syscall") +#define SYSCALL_ALIAS(alias, name) \ + asm (#alias " = " #name "\n\t.globl " #alias) + +#endif /* _ASM_SW64_LINKAGE_H */ diff --git a/arch/sw_64/include/asm/word-at-a-time.h b/arch/sw_64/include/asm/word-at-a-time.h new file mode 100644 index 000000000000..623efbec4429 --- /dev/null +++ b/arch/sw_64/include/asm/word-at-a-time.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_WORD_AT_A_TIME_H +#define _ASM_SW64_WORD_AT_A_TIME_H + +#include + +/* + * word-at-a-time interface for SW64. + */ + +/* + * We do not use the word_at_a_time struct on SW64, but it needs to be + * implemented to humour the generic code. + */ +struct word_at_a_time { + const unsigned long unused; +}; + +#define WORD_AT_A_TIME_CONSTANTS { 0 } + +/* Return nonzero if val has a zero */ +static inline unsigned long has_zero(unsigned long val, unsigned long *bits, const struct word_at_a_time *c) +{ + unsigned long zero_locations = __kernel_cmpgeb(0, val); + *bits = zero_locations; + return zero_locations; +} + +static inline unsigned long prep_zero_mask(unsigned long val, unsigned long bits, const struct word_at_a_time *c) +{ + return bits; +} + +#define create_zero_mask(bits) (bits) + +static inline unsigned long find_zero(unsigned long bits) +{ + return __kernel_cttz(bits); +} + +#define zero_bytemask(mask) ((2ul << (find_zero(mask) * 8)) - 1) + +#endif /* _ASM_SW64_WORD_AT_A_TIME_H */ diff --git a/arch/sw_64/include/uapi/asm/bitsperlong.h b/arch/sw_64/include/uapi/asm/bitsperlong.h new file mode 100644 index 000000000000..712c823e23d8 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/bitsperlong.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_BITSPERLONG_H +#define _UAPI_ASM_SW64_BITSPERLONG_H + +#define __BITS_PER_LONG 64 + +#include + +#endif /* _UAPI_ASM_SW64_BITSPERLONG_H */ diff --git a/arch/sw_64/include/uapi/asm/byteorder.h b/arch/sw_64/include/uapi/asm/byteorder.h new file mode 100644 index 000000000000..ededdd045e96 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/byteorder.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_BYTEORDER_H +#define _UAPI_ASM_SW64_BYTEORDER_H + +#include + +#endif /* _UAPI_ASM_SW64_BYTEORDER_H */ diff --git a/arch/sw_64/include/uapi/asm/compiler.h b/arch/sw_64/include/uapi/asm/compiler.h new file mode 100644 index 000000000000..64786df0f266 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/compiler.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_COMPILER_H +#define _UAPI_ASM_SW64_COMPILER_H + +/* + * Herein are macros we use when describing various patterns we want to GCC. + * In all cases we can get better schedules out of the compiler if we hide + * as little as possible inside inline assembly. However, we want to be + * able to know what we'll get out before giving up inline assembly. Thus + * these tests and macros. + */ + +#define __kernel_inslb(val, shift) \ +({ \ + unsigned long __kir; \ + __asm__("inslb %2, %1, %0" : "=r"(__kir) : "rI"(shift), "r"(val));\ + __kir; \ +}) + +#define __kernel_inslh(val, shift) \ +({ \ + unsigned long __kir; \ + __asm__("inslh %2, %1, %0" : "=r"(__kir) : "rI"(shift), "r"(val));\ + __kir; \ +}) + +#define __kernel_insll(val, shift) \ +({ \ + unsigned long __kir; \ + __asm__("insll %2, %1, %0" : "=r"(__kir) : "rI"(shift), "r"(val));\ + __kir; \ +}) + +#define __kernel_inshw(val, shift) \ +({ \ + unsigned long __kir; \ + __asm__("inshw %2, %1, %0" : "=r"(__kir) : "rI"(shift), "r"(val));\ + __kir; \ +}) + +#define __kernel_extlb(val, shift) \ +({ \ + unsigned long __kir; \ + __asm__("extlb %2, %1, %0" : "=r"(__kir) : "rI"(shift), "r"(val));\ + __kir; \ +}) + +#define __kernel_extlh(val, shift) \ +({ \ + unsigned long __kir; \ + __asm__("extlh %2, %1, %0" : "=r"(__kir) : "rI"(shift), "r"(val));\ + __kir; \ +}) + +#define __kernel_cmpgeb(a, b) \ +({ \ + unsigned long __kir; \ + __asm__("cmpgeb %r2, %1, %0" : "=r"(__kir) : "rI"(b), "rJ"(a)); \ + __kir; \ +}) + +#define __kernel_cttz(x) \ +({ \ + unsigned long __kir; \ + __asm__("cttz %1, %0" : "=r"(__kir) : "r"(x)); \ + __kir; \ +}) + +#define __kernel_ctlz(x) \ +({ \ + unsigned long __kir; \ + __asm__("ctlz %1, %0" : "=r"(__kir) : "r"(x)); \ + __kir; \ +}) + +#define __kernel_ctpop(x) \ +({ \ + unsigned long __kir; \ + __asm__("ctpop %1, %0" : "=r"(__kir) : "r"(x)); \ + __kir; \ +}) + +#endif /* _UAPI_ASM_SW64_COMPILER_H */ diff --git a/arch/sw_64/include/uapi/asm/errno.h b/arch/sw_64/include/uapi/asm/errno.h new file mode 100644 index 000000000000..969ee99ee86c --- /dev/null +++ b/arch/sw_64/include/uapi/asm/errno.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_ERRNO_H +#define _UAPI_ASM_SW64_ERRNO_H + +#include + +#undef EAGAIN /* 11 in errno-base.h */ + +#define EDEADLK 11 /* Resource deadlock would occur */ + +#define EAGAIN 35 /* Try again */ +#define EWOULDBLOCK EAGAIN /* Operation would block */ +#define EINPROGRESS 36 /* Operation now in progress */ +#define EALREADY 37 /* Operation already in progress */ +#define ENOTSOCK 38 /* Socket operation on non-socket */ +#define EDESTADDRREQ 39 /* Destination address required */ +#define EMSGSIZE 40 /* Message too long */ +#define EPROTOTYPE 41 /* Protocol wrong type for socket */ +#define ENOPROTOOPT 42 /* Protocol not available */ +#define EPROTONOSUPPORT 43 /* Protocol not supported */ +#define ESOCKTNOSUPPORT 44 /* Socket type not supported */ +#define EOPNOTSUPP 45 /* Operation not supported on transport endpoint */ +#define EPFNOSUPPORT 46 /* Protocol family not supported */ +#define EAFNOSUPPORT 47 /* Address family not supported by protocol */ +#define EADDRINUSE 48 /* Address already in use */ +#define EADDRNOTAVAIL 49 /* Cannot assign requested address */ +#define ENETDOWN 50 /* Network is down */ +#define ENETUNREACH 51 /* Network is unreachable */ +#define ENETRESET 52 /* Network dropped connection because of reset */ +#define ECONNABORTED 53 /* Software caused connection abort */ +#define ECONNRESET 54 /* Connection reset by peer */ +#define ENOBUFS 55 /* No buffer space available */ +#define EISCONN 56 /* Transport endpoint is already connected */ +#define ENOTCONN 57 /* Transport endpoint is not connected */ +#define ESHUTDOWN 58 /* Cannot send after transport endpoint shutdown */ +#define ETOOMANYREFS 59 /* Too many references: cannot splice */ +#define ETIMEDOUT 60 /* Connection timed out */ +#define ECONNREFUSED 61 /* Connection refused */ +#define ELOOP 62 /* Too many symbolic links encountered */ +#define ENAMETOOLONG 63 /* File name too long */ +#define EHOSTDOWN 64 /* Host is down */ +#define EHOSTUNREACH 65 /* No route to host */ +#define ENOTEMPTY 66 /* Directory not empty */ + +#define EUSERS 68 /* Too many users */ +#define EDQUOT 69 /* Quota exceeded */ +#define ESTALE 70 /* Stale NFS file handle */ +#define EREMOTE 71 /* Object is remote */ + +#define ENOLCK 77 /* No record locks available */ +#define ENOSYS 78 /* Function not implemented */ + +#define ENOMSG 80 /* No message of desired type */ +#define EIDRM 81 /* Identifier removed */ +#define ENOSR 82 /* Out of streams resources */ +#define ETIME 83 /* Timer expired */ +#define EBADMSG 84 /* Not a data message */ +#define EPROTO 85 /* Protocol error */ +#define ENODATA 86 /* No data available */ +#define ENOSTR 87 /* Device not a stream */ + +#define ENOPKG 92 /* Package not installed */ + +#define EILSEQ 116 /* Illegal byte sequence */ + +/* The following are just random noise.. */ +#define ECHRNG 88 /* Channel number out of range */ +#define EL2NSYNC 89 /* Level 2 not synchronized */ +#define EL3HLT 90 /* Level 3 halted */ +#define EL3RST 91 /* Level 3 reset */ + +#define ELNRNG 93 /* Link number out of range */ +#define EUNATCH 94 /* Protocol driver not attached */ +#define ENOCSI 95 /* No CSI structure available */ +#define EL2HLT 96 /* Level 2 halted */ +#define EBADE 97 /* Invalid exchange */ +#define EBADR 98 /* Invalid request descriptor */ +#define EXFULL 99 /* Exchange full */ +#define ENOANO 100 /* No anode */ +#define EBADRQC 101 /* Invalid request code */ +#define EBADSLT 102 /* Invalid slot */ + +#define EDEADLOCK EDEADLK + +#define EBFONT 104 /* Bad font file format */ +#define ENONET 105 /* Machine is not on the network */ +#define ENOLINK 106 /* Link has been severed */ +#define EADV 107 /* Advertise error */ +#define ESRMNT 108 /* Srmount error */ +#define ECOMM 109 /* Communication error on send */ +#define EMULTIHOP 110 /* Multihop attempted */ +#define EDOTDOT 111 /* RFS specific error */ +#define EOVERFLOW 112 /* Value too large for defined data type */ +#define ENOTUNIQ 113 /* Name not unique on network */ +#define EBADFD 114 /* File descriptor in bad state */ +#define EREMCHG 115 /* Remote address changed */ + +#define EUCLEAN 117 /* Structure needs cleaning */ +#define ENOTNAM 118 /* Not a XENIX named type file */ +#define ENAVAIL 119 /* No XENIX semaphores available */ +#define EISNAM 120 /* Is a named type file */ +#define EREMOTEIO 121 /* Remote I/O error */ + +#define ELIBACC 122 /* Can not access a needed shared library */ +#define ELIBBAD 123 /* Accessing a corrupted shared library */ +#define ELIBSCN 124 /* .lib section in a.out corrupted */ +#define ELIBMAX 125 /* Attempting to link in too many shared libraries */ +#define ELIBEXEC 126 /* Cannot exec a shared library directly */ +#define ERESTART 127 /* Interrupted system call should be restarted */ +#define ESTRPIPE 128 /* Streams pipe error */ + +#define ENOMEDIUM 129 /* No medium found */ +#define EMEDIUMTYPE 130 /* Wrong medium type */ +#define ECANCELED 131 /* Operation Cancelled */ +#define ENOKEY 132 /* Required key not available */ +#define EKEYEXPIRED 133 /* Key has expired */ +#define EKEYREVOKED 134 /* Key has been revoked */ +#define EKEYREJECTED 135 /* Key was rejected by service */ + +/* for robust mutexes */ +#define EOWNERDEAD 136 /* Owner died */ +#define ENOTRECOVERABLE 137 /* State not recoverable */ + +#define ERFKILL 138 /* Operation not possible due to RF-kill */ + +#define EHWPOISON 139 /* Memory page has hardware error */ + +#endif /* _UAPI_ASM_SW64_ERRNO_H */ diff --git a/arch/sw_64/include/uapi/asm/hmcall.h b/arch/sw_64/include/uapi/asm/hmcall.h new file mode 100644 index 000000000000..6867fb7b4d24 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/hmcall.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_HMCALL_H +#define _UAPI_ASM_SW64_HMCALL_H + +/* hmcall may be used in user mode */ + +#define HMC_bpt 0x80 +#define HMC_callsys 0x83 +#define HMC_imb 0x86 +#define HMC_rdtp 0x9E +#define HMC_wrtp 0x9F +#define HMC_rdunique HMC_rdtp +#define HMC_wrunique HMC_wrtp +#define HMC_gentrap 0xAA +#define HMC_wrperfmon 0xB0 + +#endif /* _UAPI_ASM_SW64_HMCALL_H */ diff --git a/arch/sw_64/include/uapi/asm/mman.h b/arch/sw_64/include/uapi/asm/mman.h new file mode 100644 index 000000000000..15cb7bfee3b1 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/mman.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_MMAN_H +#define _UAPI_ASM_SW64_MMAN_H + +#define PROT_READ 0x1 /* page can be read */ +#define PROT_WRITE 0x2 /* page can be written */ +#define PROT_EXEC 0x4 /* page can be executed */ +#define PROT_SEM 0x8 /* page may be used for atomic ops */ +#define PROT_NONE 0x0 /* page can not be accessed */ +#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ +#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ + +#define MAP_TYPE 0x0f /* Mask for type of mapping */ +#define MAP_FIXED 0x100 /* Interpret addr exactly */ +#define MAP_ANONYMOUS 0x10 /* don't use a file */ + +/* not used by linux, may be deprecated */ +#define _MAP_HASSEMAPHORE 0x0200 +#define _MAP_INHERIT 0x0400 +#define _MAP_UNALIGNED 0x0800 + +/* These are linux-specific */ +#define MAP_GROWSDOWN 0x01000 /* stack-like segment */ +#define MAP_DENYWRITE 0x02000 /* ETXTBSY */ +#define MAP_EXECUTABLE 0x04000 /* mark it as an executable */ +#define MAP_LOCKED 0x08000 /* lock the mapping */ +#define MAP_NORESERVE 0x10000 /* don't check for reservations */ +#define MAP_POPULATE 0x20000 /* populate (prefault) pagetables */ +#define MAP_NONBLOCK 0x40000 /* do not block on IO */ +#define MAP_STACK 0x80000 /* give out an address that is best suited for process/thread stacks */ +#define MAP_HUGETLB 0x100000 /* create a huge page mapping */ +#define MAP_FIXED_NOREPLACE 0x200000 /* MAP_FIXED which doesn't unmap underlying mapping */ + +#define MS_ASYNC 1 /* sync memory asynchronously */ +#define MS_SYNC 2 /* synchronous memory sync */ +#define MS_INVALIDATE 4 /* invalidate the caches */ + +#define MCL_CURRENT 8192 /* lock all currently mapped pages */ +#define MCL_FUTURE 16384 /* lock all additions to address space */ +#define MCL_ONFAULT 32768 /* lock all pages that are faulted in */ + +#define MLOCK_ONFAULT 0x01 /* Lock pages in range after they are faulted in, do not prefault */ + +#define MADV_NORMAL 0 /* no further special treatment */ +#define MADV_RANDOM 1 /* expect random page references */ +#define MADV_SEQUENTIAL 2 /* expect sequential page references */ +#define MADV_WILLNEED 3 /* will need these pages */ +#define MADV_SPACEAVAIL 5 /* ensure resources are available */ +#define MADV_DONTNEED 6 /* don't need these pages */ + +/* common/generic parameters */ +#define MADV_FREE 8 /* free pages only if memory pressure */ +#define MADV_REMOVE 9 /* remove these pages & resources */ +#define MADV_DONTFORK 10 /* don't inherit across fork */ +#define MADV_DOFORK 11 /* do inherit across fork */ + +#define MADV_MERGEABLE 12 /* KSM may merge identical pages */ +#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */ + +#define MADV_HUGEPAGE 14 /* Worth backing with hugepages */ +#define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */ + +#define MADV_DONTDUMP 16 /* Explicity exclude from the core dump, + overrides the coredump filter bits */ +#define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */ + +#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */ +#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */ + +#define MADV_COLD 20 /* deactivate these pages */ +#define MADV_PAGEOUT 21 /* reclaim these pages */ + +#define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */ +#define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */ + +#define MADV_DONTNEED_LOCKED 24 /* like DONTNEED, but drop locked pages too */ + +#define MADV_COLLAPSE 25 /* Synchronous hugepage collapse */ + +/* compatibility flags */ +#define MAP_FILE 0 + + +#define PKEY_DISABLE_ACCESS 0x1 +#define PKEY_DISABLE_WRITE 0x2 +#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE) + +#endif /* _UAPI_ASM_SW64_MMAN_H */ diff --git a/arch/sw_64/include/uapi/asm/param.h b/arch/sw_64/include/uapi/asm/param.h new file mode 100644 index 000000000000..d38e8202dd97 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/param.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_PARAM_H +#define _UAPI_ASM_SW64_PARAM_H + +#define EXEC_PAGESIZE 8192 + +#include + +#endif /* _UAPI_ASM_SW64_PARAM_H */ diff --git a/arch/sw_64/include/uapi/asm/setup.h b/arch/sw_64/include/uapi/asm/setup.h new file mode 100644 index 000000000000..e6cca4525049 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/setup.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_SETUP_H +#define _UAPI_ASM_SW64_SETUP_H + +#define COMMAND_LINE_SIZE 2048 + +#endif /* _UAPI_ASM_SW64_SETUP_H */ -- Gitee From 136e6bdc603b0238dff999aec757673dd9f8c528 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 17:14:53 +0800 Subject: [PATCH 0276/2138] anolis: sw64: add ELF support ANBZ: #4688 Add ELF-related definition for basic SW64 support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/elf.h | 152 +++++++++++++++++++++++++++ arch/sw_64/include/uapi/asm/auxvec.h | 11 ++ 2 files changed, 163 insertions(+) create mode 100644 arch/sw_64/include/asm/elf.h create mode 100644 arch/sw_64/include/uapi/asm/auxvec.h diff --git a/arch/sw_64/include/asm/elf.h b/arch/sw_64/include/asm/elf.h new file mode 100644 index 000000000000..95ba89a1aa9d --- /dev/null +++ b/arch/sw_64/include/asm/elf.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_ELF_H +#define _ASM_SW64_ELF_H +#ifdef __KERNEL__ +#include +#endif +/* Special values for the st_other field in the symbol table. */ + + +#define STO_SW64_NOPV 0x80 +#define STO_SW64_STD_GPLOAD 0x88 + +/* + * SW-64 ELF relocation types + */ +#define R_SW64_NONE 0 /* No reloc */ +#define R_SW64_REFLONG 1 /* Direct 32 bit */ +#define R_SW64_REFQUAD 2 /* Direct 64 bit */ +#define R_SW64_GPREL32 3 /* GP relative 32 bit */ +#define R_SW64_LITERAL 4 /* GP relative 16 bit w/optimization */ +#define R_SW64_LITUSE 5 /* Optimization hint for LITERAL */ +#define R_SW64_GPDISP 6 /* Add displacement to GP */ +#define R_SW64_BRADDR 7 /* PC+4 relative 23 bit shifted */ +#define R_SW64_HINT 8 /* PC+4 relative 16 bit shifted */ +#define R_SW64_SREL16 9 /* PC relative 16 bit */ +#define R_SW64_SREL32 10 /* PC relative 32 bit */ +#define R_SW64_SREL64 11 /* PC relative 64 bit */ +#define R_SW64_GPRELHIGH 17 /* GP relative 32 bit, high 16 bits */ +#define R_SW64_GPRELLOW 18 /* GP relative 32 bit, low 16 bits */ +#define R_SW64_GPREL16 19 /* GP relative 16 bit */ +#define R_SW64_COPY 24 /* Copy symbol at runtime */ +#define R_SW64_GLOB_DAT 25 /* Create GOT entry */ +#define R_SW64_JMP_SLOT 26 /* Create PLT entry */ +#define R_SW64_RELATIVE 27 /* Adjust by program base */ +#define R_SW64_BRSGP 28 +#define R_SW64_TLSGD 29 +#define R_SW64_TLS_LDM 30 +#define R_SW64_DTPMOD64 31 +#define R_SW64_GOTDTPREL 32 +#define R_SW64_DTPREL64 33 +#define R_SW64_DTPRELHI 34 +#define R_SW64_DTPRELLO 35 +#define R_SW64_DTPREL16 36 +#define R_SW64_GOTTPREL 37 +#define R_SW64_TPREL64 38 +#define R_SW64_TPRELHI 39 +#define R_SW64_TPRELLO 40 +#define R_SW64_TPREL16 41 +#define R_SW64_LITERAL_GOT 43 /* GP relative */ + +#define SHF_SW64_GPREL 0x10000000 + +/* Legal values for e_flags field of Elf64_Ehdr. */ + +#define EF_SW64_32BIT 1 /* All addresses are below 2GB */ + +/* + * ELF register definitions. + * + * For now, we just leave it at 33 (32 general regs + processor status word). + */ +#define ELF_NGREG 33 + +typedef unsigned long elf_greg_t; +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; + +/* Same with user_fpsimd_state */ +#include +typedef struct user_fpsimd_state elf_fpregset_t; + +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch(x) ((x)->e_machine == EM_SW64) + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_CLASS ELFCLASS64 +#define ELF_DATA ELFDATA2LSB +#define ELF_ARCH EM_SW64 + +#define CORE_DUMP_USE_REGSET +#define ELF_EXEC_PAGESIZE PAGE_SIZE + +/* + * This is the location that an ET_DYN program is loaded if exec'ed. Typical + * use of this is to invoke "./ld.so someprog" to test out a new version of + * the loader. We need to make sure that it is out of the way of the program + * that it will "exec", and that there is sufficient room for the brk. + */ + +#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) + +/* + * $0 is set by ld.so to a pointer to a function which might be + * registered using atexit. This provides a mean for the dynamic + * linker to call DT_FINI functions for shared libraries that have + * been loaded before the code runs. + + * So that we can use the same startup file with static executables, + * we start programs with a value of 0 to indicate that there is no + * such function. + */ + +#define ELF_PLAT_INIT(_r, load_addr) (_r->regs[0] = 0) + +/* + * The registers are laid out in pt_regs for HMCODE and syscall + * convenience. Re-order them for the linear elf_gregset_t. + */ + +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 +struct linux_binprm; +extern int arch_setup_additional_pages(struct linux_binprm *bprm, + int uses_interp); + +#ifdef __KERNEL__ +struct pt_regs; +struct task_struct; +extern void sw64_elf_core_copy_regs(elf_greg_t *dest, struct pt_regs *pt); +#define ELF_CORE_COPY_REGS(DEST, REGS) sw64_elf_core_copy_regs(DEST, REGS); + +/* + * This yields a mask that user programs can use to figure out what + * instruction set this CPU supports. + */ + +#define ELF_HWCAP 0 + +/* + * This yields a string that ld.so will use to load implementation + * specific libraries for optimization. This is more specific in + * intent than poking at uname or /proc/cpuinfo. + */ + +#define ELF_PLATFORM ("sw_64") + + +/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ +#define ARCH_DLINFO \ +do { \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, \ + (elf_addr_t)current->mm->context.vdso); \ +} while (0) + +struct mm_struct; +extern unsigned long arch_randomize_brk(struct mm_struct *mm); +#define arch_randomize_brk arch_randomize_brk +#endif + +#endif /* _ASM_SW64_ELF_H */ diff --git a/arch/sw_64/include/uapi/asm/auxvec.h b/arch/sw_64/include/uapi/asm/auxvec.h new file mode 100644 index 000000000000..309a8294be7a --- /dev/null +++ b/arch/sw_64/include/uapi/asm/auxvec.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_AUXVEC_H +#define _UAPI_ASM_SW64_AUXVEC_H + +/* VDSO location. */ +#define AT_SYSINFO_EHDR 33 + +/* entries in ARCH_DLINFO */ +#define AT_VECTOR_SIZE_ARCH 1 + +#endif /* _UAPI_ASM_SW64_AUXVEC_H */ -- Gitee From da2ab013c913708323866b86f42fb80553a56593 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:13 +0800 Subject: [PATCH 0277/2138] anolis: sw64: add some other headers ANBZ: #4688 Add some other uncommon headers for basic SW64 support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/ast2400.h | 168 +++++++++++++++++++++++++ arch/sw_64/include/asm/socket.h | 11 ++ arch/sw_64/include/uapi/asm/fcntl.h | 58 +++++++++ arch/sw_64/include/uapi/asm/ioctl.h | 19 +++ arch/sw_64/include/uapi/asm/ioctls.h | 128 +++++++++++++++++++ arch/sw_64/include/uapi/asm/resource.h | 16 +++ arch/sw_64/include/uapi/asm/socket.h | 161 ++++++++++++++++++++++++ arch/sw_64/include/uapi/asm/sockios.h | 17 +++ arch/sw_64/include/uapi/asm/stat.h | 50 ++++++++ arch/sw_64/include/uapi/asm/termbits.h | 167 ++++++++++++++++++++++++ arch/sw_64/include/uapi/asm/termios.h | 70 +++++++++++ 11 files changed, 865 insertions(+) create mode 100644 arch/sw_64/include/asm/ast2400.h create mode 100644 arch/sw_64/include/asm/socket.h create mode 100644 arch/sw_64/include/uapi/asm/fcntl.h create mode 100644 arch/sw_64/include/uapi/asm/ioctl.h create mode 100644 arch/sw_64/include/uapi/asm/ioctls.h create mode 100644 arch/sw_64/include/uapi/asm/resource.h create mode 100644 arch/sw_64/include/uapi/asm/socket.h create mode 100644 arch/sw_64/include/uapi/asm/sockios.h create mode 100644 arch/sw_64/include/uapi/asm/stat.h create mode 100644 arch/sw_64/include/uapi/asm/termbits.h create mode 100644 arch/sw_64/include/uapi/asm/termios.h diff --git a/arch/sw_64/include/asm/ast2400.h b/arch/sw_64/include/asm/ast2400.h new file mode 100644 index 000000000000..5f4cc84ff3a8 --- /dev/null +++ b/arch/sw_64/include/asm/ast2400.h @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015 Weiqiang Su + * + * Both AST2400D and AST2400F package variants are supported. + */ + +#ifndef _ASM_SW64_AST2400_H +#define _ASM_SW64_AST2400_H + +#include + +/* Logical Device Numbers (LDN). */ +#define AST2400_FDC 0x00 /* Floppy */ +#define AST2400_PP 0x01 /* Parallel port */ +#define AST2400_SP1 0x02 /* Com1 */ +#define AST2400_SP2 0x03 /* Com2 & IR */ +#define AST2400_KBC 0x05 /* PS/2 keyboard and mouse */ +#define AST2400_CIR 0x06 +#define AST2400_GPIO6789_V 0x07 +#define AST2400_WDT1_GPIO01A_V 0x08 +#define AST2400_GPIO1234567_V 0x09 +#define AST2400_ACPI 0x0A +#define AST2400_HWM_FPLED 0x0B /* Hardware monitor & front LED */ +#define AST2400_VID 0x0D +#define AST2400_CIRWKUP 0x0E /* CIR wakeup */ +#define AST2400_GPIO_PP_OD 0x0F /* GPIO Push-Pull/Open drain select */ +#define AST2400_SVID 0x14 +#define AST2400_DSLP 0x16 /* Deep sleep */ +#define AST2400_GPIOA_LDN 0x17 + +/* virtual LDN for GPIO and WDT */ +#define AST2400_WDT1 ((0 << 8) | AST2400_WDT1_GPIO01A_V) + +#define AST2400_GPIOBASE ((0 << 8) | AST2400_WDT1_GPIO01A_V) //? + +#define AST2400_GPIO0 ((1 << 8) | AST2400_WDT1_GPIO01A_V) +#define AST2400_GPIO1 ((1 << 8) | AST2400_GPIO1234567_V) +#define AST2400_GPIO2 ((2 << 8) | AST2400_GPIO1234567_V) +#define AST2400_GPIO3 ((3 << 8) | AST2400_GPIO1234567_V) +#define AST2400_GPIO4 ((4 << 8) | AST2400_GPIO1234567_V) +#define AST2400_GPIO5 ((5 << 8) | AST2400_GPIO1234567_V) +#define AST2400_GPIO6 ((6 << 8) | AST2400_GPIO1234567_V) +#define AST2400_GPIO7 ((7 << 8) | AST2400_GPIO1234567_V) +#define AST2400_GPIO8 ((0 << 8) | AST2400_GPIO6789_V) +#define AST2400_GPIO9 ((1 << 8) | AST2400_GPIO6789_V) +#define AST2400_GPIOA ((2 << 8) | AST2400_WDT1_GPIO01A_V) + +#define SUPERIO_PNP_PORT 0x2E +#define SUPERIO_CHIPID 0xC333 + +struct device_operations; +typedef struct pnp_device { + unsigned int port; + unsigned int device; + + struct device_operations *ops; +} *device_t; + +struct pnp_mode_ops { + void (*enter_conf_mode)(device_t dev); + void (*exit_conf_mode)(device_t dev); +}; + + +struct device_operations { + void (*read_resources)(device_t dev); + void (*set_resources)(device_t dev); + void (*enable_resources)(device_t dev); + void (*init)(device_t dev); + void (*final)(device_t dev); + void (*enable)(device_t dev); + void (*disable)(device_t dev); + + const struct pnp_mode_ops *ops_pnp_mode; +}; + +/* PNP helper operations */ +struct io_info { + unsigned int mask, set; +}; + +struct pnp_info { + bool enabled; /* set if we should enable the device */ + struct pnp_device pnp_device; + unsigned int function; /* Must be at least 16 bits (virtual LDNs)! */ +}; + +/* Chip operations */ +struct chip_operations { + void (*enable_dev)(struct device *dev); + void (*init)(void *chip_info); + void (*final)(void *chip_info); + unsigned int initialized : 1; + unsigned int finalized : 1; + const char *name; +}; + +typedef struct superio_ast2400_device { + struct device *dev; + const char *name; + unsigned int enabled : 1; /* set if we should enable the device */ + unsigned int superio_ast2400_efir; /* extended function index register */ + unsigned int superio_ast2400_efdr; /* extended function data register */ + struct chip_operations *chip_ops; + const void *chip_info; +} *superio_device_t; + + +static inline void pnp_enter_conf_mode_a5a5(device_t dev) +{ + outb(0xa5, dev->port); + outb(0xa5, dev->port); +} + +static inline void pnp_exit_conf_mode_aa(device_t dev) +{ + outb(0xaa, dev->port); +} + +/* PNP config mode wrappers */ + +static inline void pnp_enter_conf_mode(device_t dev) +{ + if (dev->ops->ops_pnp_mode) + dev->ops->ops_pnp_mode->enter_conf_mode(dev); +} + +static inline void pnp_exit_conf_mode(device_t dev) +{ + if (dev->ops->ops_pnp_mode) + dev->ops->ops_pnp_mode->exit_conf_mode(dev); +} + +/* PNP device operations */ +static inline u8 pnp_read_config(device_t dev, u8 reg) +{ + outb(reg, dev->port); + return inb(dev->port + 1); +} + +static inline void pnp_write_config(device_t dev, u8 reg, u8 value) +{ + outb(reg, dev->port); + outb(value, dev->port + 1); +} + +static inline void pnp_set_logical_device(device_t dev) +{ + pnp_write_config(dev, 0x07, dev->device & 0xff); +// pnp_write_config(dev, 0x07, 0x3); +} + +static inline void pnp_set_enable(device_t dev, int enable) +{ + u8 tmp; + + tmp = pnp_read_config(dev, 0x30); + + if (enable) + tmp |= 1; + else + tmp &= ~1; + + pnp_write_config(dev, 0x30, tmp); +} + +#endif /* _ASM_SW64_AST2400_H */ diff --git a/arch/sw_64/include/asm/socket.h b/arch/sw_64/include/asm/socket.h new file mode 100644 index 000000000000..e87043467775 --- /dev/null +++ b/arch/sw_64/include/asm/socket.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SOCKET_H +#define _ASM_SW64_SOCKET_H + +#include + +/* O_NONBLOCK clashes with the bits used for socket types. Therefore we + * have to define SOCK_NONBLOCK to a different value here. + */ +#define SOCK_NONBLOCK 0x40000000 +#endif /* _ASM_SW64_SOCKET_H */ diff --git a/arch/sw_64/include/uapi/asm/fcntl.h b/arch/sw_64/include/uapi/asm/fcntl.h new file mode 100644 index 000000000000..be2daae2cc4d --- /dev/null +++ b/arch/sw_64/include/uapi/asm/fcntl.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_FCNTL_H +#define _UAPI_ASM_SW64_FCNTL_H + +#define O_CREAT 01000 /* not fcntl */ +#define O_TRUNC 02000 /* not fcntl */ +#define O_EXCL 04000 /* not fcntl */ +#define O_NOCTTY 010000 /* not fcntl */ + +#define O_NONBLOCK 00004 +#define O_APPEND 00010 +#define O_DSYNC 040000 /* used to be O_SYNC, see below */ +#define O_DIRECTORY 0100000 /* must be a directory */ +#define O_NOFOLLOW 0200000 /* don't follow links */ +#define O_LARGEFILE 0400000 /* will be set by the kernel on every open */ +#define O_DIRECT 02000000 /* direct disk access */ +#define O_NOATIME 04000000 +#define O_CLOEXEC 010000000 /* set close_on_exec */ +/* + * Before Linux 2.6.33 only O_DSYNC semantics were implemented, but using + * the O_SYNC flag. We continue to use the existing numerical value + * for O_DSYNC semantics now, but using the correct symbolic name for it. + * This new value is used to request true Posix O_SYNC semantics. It is + * defined in this strange way to make sure applications compiled against + * new headers get at least O_DSYNC semantics on older kernels. + * + * This has the nice side-effect that we can simply test for O_DSYNC + * wherever we do not care if O_DSYNC or O_SYNC is used. + * + * Note: __O_SYNC must never be used directly. + */ +#define __O_SYNC 020000000 +#define O_SYNC (__O_SYNC|O_DSYNC) + +#define O_PATH 040000000 +#define __O_TMPFILE 0100000000 + +#define F_GETLK 7 +#define F_SETLK 8 +#define F_SETLKW 9 + +#define F_SETOWN 5 /* for sockets. */ +#define F_GETOWN 6 /* for sockets. */ +#define F_SETSIG 10 /* for sockets. */ +#define F_GETSIG 11 /* for sockets. */ + +/* for posix fcntl() and lockf() */ +#define F_RDLCK 1 +#define F_WRLCK 2 +#define F_UNLCK 8 + +/* for old implementation of bsd flock () */ +#define F_EXLCK 16 /* or 3 */ +#define F_SHLCK 32 /* or 4 */ + +#include + +#endif /* _UAPI_ASM_SW64_FCNTL_H */ diff --git a/arch/sw_64/include/uapi/asm/ioctl.h b/arch/sw_64/include/uapi/asm/ioctl.h new file mode 100644 index 000000000000..fb5267b034fc --- /dev/null +++ b/arch/sw_64/include/uapi/asm/ioctl.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_IOCTL_H +#define _UAPI_ASM_SW64_IOCTL_H + +#define _IOC_SIZEBITS 13 +#define _IOC_DIRBITS 3 + +/* + * Direction bits _IOC_NONE could be 0, but legacy version gives it a bit. + * And this turns out useful to catch old ioctl numbers in header files for + * us. + */ +#define _IOC_NONE 1U +#define _IOC_READ 2U +#define _IOC_WRITE 4U + +#include + +#endif /* _UAPI_ASM_SW64_IOCTL_H */ diff --git a/arch/sw_64/include/uapi/asm/ioctls.h b/arch/sw_64/include/uapi/asm/ioctls.h new file mode 100644 index 000000000000..36a7fc205aa7 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/ioctls.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_IOCTLS_H +#define _UAPI_ASM_SW64_IOCTLS_H + +#include + +#define FIOCLEX _IO('f', 1) +#define FIONCLEX _IO('f', 2) +#define FIOASYNC _IOW('f', 125, int) +#define FIONBIO _IOW('f', 126, int) +#define FIONREAD _IOR('f', 127, int) +#define TIOCINQ FIONREAD +#define FIOQSIZE _IOR('f', 128, loff_t) + +#define TIOCGETP _IOR('t', 8, struct sgttyb) +#define TIOCSETP _IOW('t', 9, struct sgttyb) +#define TIOCSETN _IOW('t', 10, struct sgttyb) /* TIOCSETP wo flush */ + +#define TIOCSETC _IOW('t', 17, struct tchars) +#define TIOCGETC _IOR('t', 18, struct tchars) +#define TCGETS _IOR('t', 19, struct termios) +#define TCSETS _IOW('t', 20, struct termios) +#define TCSETSW _IOW('t', 21, struct termios) +#define TCSETSF _IOW('t', 22, struct termios) + +#define TCGETA _IOR('t', 23, struct termio) +#define TCSETA _IOW('t', 24, struct termio) +#define TCSETAW _IOW('t', 25, struct termio) +#define TCSETAF _IOW('t', 28, struct termio) + +#define TCSBRK _IO('t', 29) +#define TCXONC _IO('t', 30) +#define TCFLSH _IO('t', 31) + +#define TCGETS2 _IOR('T', 42, struct termios2) +#define TCSETS2 _IOW('T', 43, struct termios2) +#define TCSETSW2 _IOW('T', 44, struct termios2) +#define TCSETSF2 _IOW('T', 45, struct termios2) + +#define TIOCSWINSZ _IOW('t', 103, struct winsize) +#define TIOCGWINSZ _IOR('t', 104, struct winsize) +#define TIOCSTART _IO('t', 110) /* start output, like ^Q */ +#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */ +#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */ + +#define TIOCGLTC _IOR('t', 116, struct ltchars) +#define TIOCSLTC _IOW('t', 117, struct ltchars) +#define TIOCSPGRP _IOW('t', 118, int) +#define TIOCGPGRP _IOR('t', 119, int) + +#define TIOCEXCL 0x540C +#define TIOCNXCL 0x540D +#define TIOCSCTTY 0x540E + +#define TIOCSTI 0x5412 +#define TIOCMGET 0x5415 +#define TIOCMBIS 0x5416 +#define TIOCMBIC 0x5417 +#define TIOCMSET 0x5418 +#define TIOCM_LE 0x001 +#define TIOCM_DTR 0x002 +#define TIOCM_RTS 0x004 +#define TIOCM_ST 0x008 +#define TIOCM_SR 0x010 +#define TIOCM_CTS 0x020 +#define TIOCM_CAR 0x040 +#define TIOCM_RNG 0x080 +#define TIOCM_DSR 0x100 +#define TIOCM_CD TIOCM_CAR +#define TIOCM_RI TIOCM_RNG +#define TIOCM_OUT1 0x2000 +#define TIOCM_OUT2 0x4000 +#define TIOCM_LOOP 0x8000 + +#define TIOCGSOFTCAR 0x5419 +#define TIOCSSOFTCAR 0x541A +#define TIOCLINUX 0x541C +#define TIOCCONS 0x541D +#define TIOCGSERIAL 0x541E +#define TIOCSSERIAL 0x541F +#define TIOCPKT 0x5420 +#define TIOCPKT_DATA 0 +#define TIOCPKT_FLUSHREAD 1 +#define TIOCPKT_FLUSHWRITE 2 +#define TIOCPKT_STOP 4 +#define TIOCPKT_START 8 +#define TIOCPKT_NOSTOP 16 +#define TIOCPKT_DOSTOP 32 +#define TIOCPKT_IOCTL 64 + + +#define TIOCNOTTY 0x5422 +#define TIOCSETD 0x5423 +#define TIOCGETD 0x5424 +#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ +#define TIOCSBRK 0x5427 /* BSD compatibility */ +#define TIOCCBRK 0x5428 /* BSD compatibility */ +#define TIOCGSID 0x5429 /* Return the session ID of FD */ +#define TIOCGRS485 _IOR('T', 0x2E, struct serial_rs485) +#define TIOCSRS485 _IOWR('T', 0x2F, struct serial_rs485) +#define TIOCGPTN _IOR('T', 0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ +#define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */ +#define TIOCGDEV _IOR('T', 0x32, unsigned int) /* Get primary device node of /dev/console */ +#define TIOCSIG _IOW('T', 0x36, int) /* Generate signal on Pty slave */ +#define TIOCVHANGUP 0x5437 +#define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */ +#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */ +#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */ +#define TIOCGPTPEER _IO('T', 0x41) /* Safely open the slave */ +#define TIOCGISO7816 _IOR('T', 0x42, struct serial_iso7816) +#define TIOCSISO7816 _IOWR('T', 0x43, struct serial_iso7816) + +#define TIOCSERCONFIG 0x5453 +#define TIOCSERGWILD 0x5454 +#define TIOCSERSWILD 0x5455 +#define TIOCGLCKTRMIOS 0x5456 +#define TIOCSLCKTRMIOS 0x5457 +#define TIOCSERGSTRUCT 0x5458 /* For debugging only */ +#define TIOCSERGETLSR 0x5459 /* Get line status register */ +/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ +#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ +#define TIOCSERGETMULTI 0x545A /* Get multiport config */ +#define TIOCSERSETMULTI 0x545B /* Set multiport config */ + +#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ +#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ + +#endif /* _UAPI_ASM_SW64_IOCTLS_H */ diff --git a/arch/sw_64/include/uapi/asm/resource.h b/arch/sw_64/include/uapi/asm/resource.h new file mode 100644 index 000000000000..2e1ce8f6ee64 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/resource.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_RESOURCE_H +#define _UAPI_ASM_SW64_RESOURCE_H + +/* + * SW-64/Linux-specific ordering of these four resource limit IDs, + * the rest comes from the generic header: + */ +#define RLIMIT_NOFILE 6 /* max number of open files */ +#define RLIMIT_AS 7 /* address space limit */ +#define RLIMIT_NPROC 8 /* max number of processes */ +#define RLIMIT_MEMLOCK 9 /* max locked-in-memory address space */ + +#include + +#endif /* _UAPI_ASM_SW64_RESOURCE_H */ diff --git a/arch/sw_64/include/uapi/asm/socket.h b/arch/sw_64/include/uapi/asm/socket.h new file mode 100644 index 000000000000..1094d11fff5b --- /dev/null +++ b/arch/sw_64/include/uapi/asm/socket.h @@ -0,0 +1,161 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_SOCKET_H +#define _UAPI_ASM_SW64_SOCKET_H + +#include +#include + +/* For setsockopt(2) */ +/* + * Note: we only bother about making the SOL_SOCKET options + * same as legacy, as that's all that "normal" programs are + * likely to set. We don't necessarily want to be binary + * compatible with _everything_. + */ +#define SOL_SOCKET 0xffff + +#define SO_DEBUG 0x0001 +#define SO_REUSEADDR 0x0004 +#define SO_KEEPALIVE 0x0008 +#define SO_DONTROUTE 0x0010 +#define SO_BROADCAST 0x0020 +#define SO_LINGER 0x0080 +#define SO_OOBINLINE 0x0100 +#define SO_REUSEPORT 0x0200 + +#define SO_TYPE 0x1008 +#define SO_ERROR 0x1007 +#define SO_SNDBUF 0x1001 +#define SO_RCVBUF 0x1002 +#define SO_SNDBUFFORCE 0x100a +#define SO_RCVBUFFORCE 0x100b +#define SO_RCVLOWAT 0x1010 +#define SO_SNDLOWAT 0x1011 +#define SO_RCVTIMEO_OLD 0x1012 +#define SO_SNDTIMEO_OLD 0x1013 +#define SO_ACCEPTCONN 0x1014 +#define SO_PROTOCOL 0x1028 +#define SO_DOMAIN 0x1029 + +/* linux-specific, might as well be the same as on i386 */ +#define SO_NO_CHECK 11 +#define SO_PRIORITY 12 +#define SO_BSDCOMPAT 14 + +#define SO_PASSCRED 17 +#define SO_PEERCRED 18 +#define SO_BINDTODEVICE 25 + +/* Socket filtering */ +#define SO_ATTACH_FILTER 26 +#define SO_DETACH_FILTER 27 +#define SO_GET_FILTER SO_ATTACH_FILTER + +#define SO_PEERNAME 28 + +#define SO_PEERSEC 30 +#define SO_PASSSEC 34 + +/* Security levels - as per NRL IPv6 - don't actually do anything */ +#define SO_SECURITY_AUTHENTICATION 19 +#define SO_SECURITY_ENCRYPTION_TRANSPORT 20 +#define SO_SECURITY_ENCRYPTION_NETWORK 21 + +#define SO_MARK 36 + +#define SO_RXQ_OVFL 40 + +#define SO_WIFI_STATUS 41 +#define SCM_WIFI_STATUS SO_WIFI_STATUS +#define SO_PEEK_OFF 42 + +/* Instruct lower device to use last 4-bytes of skb data as FCS */ +#define SO_NOFCS 43 + +#define SO_LOCK_FILTER 44 +#define SO_SELECT_ERR_QUEUE 45 +#define SO_BUSY_POLL 46 +#define SO_MAX_PACING_RATE 47 +#define SO_BPF_EXTENSIONS 48 +#define SO_INCOMING_CPU 49 +#define SO_ATTACH_BPF 50 +#define SO_DETACH_BPF SO_DETACH_FILTER + +#define SO_ATTACH_REUSEPORT_CBPF 51 +#define SO_ATTACH_REUSEPORT_EBPF 52 + +#define SO_CNX_ADVICE 53 + +#define SCM_TIMESTAMPING_OPT_STATS 54 + +#define SO_MEMINFO 55 + +#define SO_INCOMING_NAPI_ID 56 + +#define SO_COOKIE 57 + +#define SCM_TIMESTAMPING_PKTINFO 58 + +#define SO_PEERGROUPS 59 + +#define SO_ZEROCOPY 60 + +#define SO_TXTIME 61 +#define SCM_TXTIME SO_TXTIME + +#define SO_BINDTOIFINDEX 62 + +#define SO_TIMESTAMP_OLD 29 +#define SO_TIMESTAMPNS_OLD 35 +#define SO_TIMESTAMPING_OLD 37 + +#define SO_TIMESTAMP_NEW 63 +#define SO_TIMESTAMPNS_NEW 64 +#define SO_TIMESTAMPING_NEW 65 + +#define SO_RCVTIMEO_NEW 66 +#define SO_SNDTIMEO_NEW 67 + +#define SO_DETACH_REUSEPORT_BPF 68 + +#define SO_PREFER_BUSY_POLL 69 +#define SO_BUSY_POLL_BUDGET 70 + +#define SO_NETNS_COOKIE 71 + +#define SO_BUF_LOCK 72 + +#define SO_RESERVE_MEM 73 + +#define SO_TXREHASH 74 + +#define SO_RCVMARK 75 + +#define SO_PASSPIDFD 76 +#define SO_PEERPIDFD 77 + +#if !defined(__KERNEL__) + +#if __BITS_PER_LONG == 64 +#define SO_TIMESTAMP SO_TIMESTAMP_OLD +#define SO_TIMESTAMPNS SO_TIMESTAMPNS_OLD +#define SO_TIMESTAMPING SO_TIMESTAMPING_OLD + +#define SO_RCVTIMEO SO_RCVTIMEO_OLD +#define SO_SNDTIMEO SO_SNDTIMEO_OLD +#else +#define SO_TIMESTAMP (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMP_OLD : SO_TIMESTAMP_NEW) +#define SO_TIMESTAMPNS (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPNS_OLD : SO_TIMESTAMPNS_NEW) +#define SO_TIMESTAMPING (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPING_OLD : SO_TIMESTAMPING_NEW) + +#define SO_RCVTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_RCVTIMEO_OLD : SO_RCVTIMEO_NEW) +#define SO_SNDTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_SNDTIMEO_OLD : SO_SNDTIMEO_NEW) +#endif + +#define SCM_TIMESTAMP SO_TIMESTAMP +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS +#define SCM_TIMESTAMPING SO_TIMESTAMPING + +#endif + +#endif /* _UAPI_ASM_SW64_SOCKET_H */ diff --git a/arch/sw_64/include/uapi/asm/sockios.h b/arch/sw_64/include/uapi/asm/sockios.h new file mode 100644 index 000000000000..88e89dcf8300 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/sockios.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_SOCKIOS_H +#define _UAPI_ASM_SW64_SOCKIOS_H + +/* Socket-level I/O control calls. */ + +#define FIOGETOWN _IOR('f', 123, int) +#define FIOSETOWN _IOW('f', 124, int) + +#define SIOCATMARK _IOR('s', 7, int) +#define SIOCSPGRP _IOW('s', 8, pid_t) +#define SIOCGPGRP _IOR('s', 9, pid_t) + +#define SIOCGSTAMP_OLD 0x8906 /* Get stamp (timeval) */ +#define SIOCGSTAMPNS_OLD 0x8907 /* Get stamp (timespec) */ + +#endif /* _UAPI_ASM_SW64_SOCKIOS_H */ diff --git a/arch/sw_64/include/uapi/asm/stat.h b/arch/sw_64/include/uapi/asm/stat.h new file mode 100644 index 000000000000..677a75f1cf5b --- /dev/null +++ b/arch/sw_64/include/uapi/asm/stat.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_STAT_H +#define _UAPI_ASM_SW64_STAT_H + +struct stat { + unsigned int st_dev; + unsigned int st_ino; + unsigned int st_mode; + unsigned int st_nlink; + unsigned int st_uid; + unsigned int st_gid; + unsigned int st_rdev; + long st_size; + unsigned long st_atime; + unsigned long st_mtime; + unsigned long st_ctime; + unsigned int st_blksize; + unsigned int st_blocks; + unsigned int st_flags; + unsigned int st_gen; +}; + +/* The stat64 structure increases the size of dev_t, blkcnt_t, adds + * nanosecond resolution times, and padding for expansion. + */ + +struct stat64 { + unsigned long st_dev; + unsigned long st_ino; + unsigned long st_rdev; + long st_size; + unsigned long st_blocks; + + unsigned int st_mode; + unsigned int st_uid; + unsigned int st_gid; + unsigned int st_blksize; + unsigned int st_nlink; + unsigned int __pad0; + + unsigned long st_atime; + unsigned long st_atime_nsec; + unsigned long st_mtime; + unsigned long st_mtime_nsec; + unsigned long st_ctime; + unsigned long st_ctime_nsec; + long __unused[3]; +}; + +#endif /* _UAPI_ASM_SW64_STAT_H */ diff --git a/arch/sw_64/include/uapi/asm/termbits.h b/arch/sw_64/include/uapi/asm/termbits.h new file mode 100644 index 000000000000..a71aaf33c26c --- /dev/null +++ b/arch/sw_64/include/uapi/asm/termbits.h @@ -0,0 +1,167 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_TERMBITS_H +#define _UAPI_ASM_SW64_TERMBITS_H + +#include + +typedef unsigned int tcflag_t; + +/* + * termios type and macro definitions. Be careful about adding stuff + * to this file since it's used in GNU libc and there are strict rules + * concerning namespace pollution. + */ + +#define NCCS 19 +struct termios { + tcflag_t c_iflag; /* input mode flags */ + tcflag_t c_oflag; /* output mode flags */ + tcflag_t c_cflag; /* control mode flags */ + tcflag_t c_lflag; /* local mode flags */ + cc_t c_cc[NCCS]; /* control characters */ + cc_t c_line; /* line discipline (== c_cc[19]) */ + speed_t c_ispeed; /* input speed */ + speed_t c_ospeed; /* output speed */ +}; + +/* SW64 has identical termios and termios2 */ + +struct termios2 { + tcflag_t c_iflag; /* input mode flags */ + tcflag_t c_oflag; /* output mode flags */ + tcflag_t c_cflag; /* control mode flags */ + tcflag_t c_lflag; /* local mode flags */ + cc_t c_cc[NCCS]; /* control characters */ + cc_t c_line; /* line discipline (== c_cc[19]) */ + speed_t c_ispeed; /* input speed */ + speed_t c_ospeed; /* output speed */ +}; + +/* SW64 has matching termios and ktermios */ + +struct ktermios { + tcflag_t c_iflag; /* input mode flags */ + tcflag_t c_oflag; /* output mode flags */ + tcflag_t c_cflag; /* control mode flags */ + tcflag_t c_lflag; /* local mode flags */ + cc_t c_cc[NCCS]; /* control characters */ + cc_t c_line; /* line discipline (== c_cc[19]) */ + speed_t c_ispeed; /* input speed */ + speed_t c_ospeed; /* output speed */ +}; + +/* c_cc characters */ +#define VEOF 0 +#define VEOL 1 +#define VEOL2 2 +#define VERASE 3 +#define VWERASE 4 +#define VKILL 5 +#define VREPRINT 6 +#define VSWTC 7 +#define VINTR 8 +#define VQUIT 9 +#define VSUSP 10 +#define VSTART 12 +#define VSTOP 13 +#define VLNEXT 14 +#define VDISCARD 15 +#define VMIN 16 +#define VTIME 17 + +/* c_iflag bits */ +#define IXON 0x0200 +#define IXOFF 0x0400 +#define IUCLC 0x1000 +#define IMAXBEL 0x2000 +#define IUTF8 0x4000 + +/* c_oflag bits */ +#define ONLCR 0x00002 +#define OLCUC 0x00004 +#define NLDLY 0x00300 +#define NL0 0x00000 +#define NL1 0x00100 +#define NL2 0x00200 +#define NL3 0x00300 +#define TABDLY 0x00c00 +#define TAB0 0x00000 +#define TAB1 0x00400 +#define TAB2 0x00800 +#define TAB3 0x00c00 +#define CRDLY 0x03000 +#define CR0 0x00000 +#define CR1 0x01000 +#define CR2 0x02000 +#define CR3 0x03000 +#define FFDLY 0x04000 +#define FF0 0x00000 +#define FF1 0x04000 +#define BSDLY 0x08000 +#define BS0 0x00000 +#define BS1 0x08000 +#define VTDLY 0x10000 +#define VT0 0x00000 +#define VT1 0x10000 +/* + * Should be equivalent to TAB3, see description of TAB3 in + * POSIX.1-2008, Ch. 11.2.3 "Output Modes" + */ +#define XTABS TAB3 + +/* c_cflag bit meaning */ +#define CBAUD 0x0000001f +#define CBAUDEX 0x00000000 +#define BOTHER 0x0000001f +#define B57600 0x00000010 +#define B115200 0x00000011 +#define B230400 0x00000012 +#define B460800 0x00000013 +#define B500000 0x00000014 +#define B576000 0x00000015 +#define B921600 0x00000016 +#define B1000000 0x00000017 +#define B1152000 0x00000018 +#define B1500000 0x00000019 +#define B2000000 0x0000001a +#define B2500000 0x0000001b +#define B3000000 0x0000001c +#define B3500000 0x0000001d +#define B4000000 0x0000001e +#define CSIZE 0x00000300 +#define CS5 0x00000000 +#define CS6 0x00000100 +#define CS7 0x00000200 +#define CS8 0x00000300 +#define CSTOPB 0x00000400 +#define CREAD 0x00000800 +#define PARENB 0x00001000 +#define PARODD 0x00002000 +#define HUPCL 0x00004000 +#define CLOCAL 0x00008000 +#define CIBAUD 0x001f0000 + +/* c_lflag bits */ +#define ISIG 0x00000080 +#define ICANON 0x00000100 +#define XCASE 0x00004000 +#define ECHO 0x00000008 +#define ECHOE 0x00000002 +#define ECHOK 0x00000004 +#define ECHONL 0x00000010 +#define NOFLSH 0x80000000 +#define TOSTOP 0x00400000 +#define ECHOCTL 0x00000040 +#define ECHOPRT 0x00000020 +#define ECHOKE 0x00000001 +#define FLUSHO 0x00800000 +#define PENDIN 0x20000000 +#define IEXTEN 0x00000400 +#define EXTPROC 0x10000000 + +/* Values for the OPTIONAL_ACTIONS argument to `tcsetattr'. */ +#define TCSANOW 0 +#define TCSADRAIN 1 +#define TCSAFLUSH 2 + +#endif /* _UAPI_ASM_SW64_TERMBITS_H */ diff --git a/arch/sw_64/include/uapi/asm/termios.h b/arch/sw_64/include/uapi/asm/termios.h new file mode 100644 index 000000000000..62f4b40551b2 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/termios.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_TERMIOS_H +#define _UAPI_ASM_SW64_TERMIOS_H + +#include +#include + +struct sgttyb { + char sg_ispeed; + char sg_ospeed; + char sg_erase; + char sg_kill; + short sg_flags; +}; + +struct tchars { + char t_intrc; + char t_quitc; + char t_startc; + char t_stopc; + char t_eofc; + char t_brkc; +}; + +struct ltchars { + char t_suspc; + char t_dsuspc; + char t_rprntc; + char t_flushc; + char t_werasc; + char t_lnextc; +}; + +struct winsize { + unsigned short ws_row; + unsigned short ws_col; + unsigned short ws_xpixel; + unsigned short ws_ypixel; +}; + +#define NCC 8 +struct termio { + unsigned short c_iflag; /* input mode flags */ + unsigned short c_oflag; /* output mode flags */ + unsigned short c_cflag; /* control mode flags */ + unsigned short c_lflag; /* local mode flags */ + unsigned char c_line; /* line discipline */ + unsigned char c_cc[NCC]; /* control characters */ +}; + +/* + * c_cc characters in the termio structure. Oh, how I love being + * backwardly compatible. Notice that character 4 and 5 are + * interpreted differently depending on whether ICANON is set in + * c_lflag. If it's set, they are used as _VEOF and _VEOL, otherwise + * as _VMIN and V_TIME. This is for compatibility with sysV)... + */ +#define _VINTR 0 +#define _VQUIT 1 +#define _VERASE 2 +#define _VKILL 3 +#define _VEOF 4 +#define _VMIN 4 +#define _VEOL 5 +#define _VTIME 5 +#define _VEOL2 6 +#define _VSWTC 7 + + +#endif /* _UAPI_ASM_SW64_TERMIOS_H */ -- Gitee From 70b7ccbb44e0f5992e15906779a77ade30c12c1c Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:02 +0800 Subject: [PATCH 0278/2138] anolis: sw64: add boot and setup routines ANBZ: #4688 Add basic boot, setup and reset routines for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/platform.h | 32 + arch/sw_64/include/asm/setup.h | 51 ++ arch/sw_64/include/asm/sw64_init.h | 50 ++ arch/sw_64/include/uapi/asm/bootparam.h | 22 + arch/sw_64/kernel/cacheinfo.c | 99 +++ arch/sw_64/kernel/chip_setup.c | 245 ++++++ arch/sw_64/kernel/early_init.c | 11 + arch/sw_64/kernel/head.S | 112 +++ arch/sw_64/kernel/hmcall.c | 131 +++ arch/sw_64/kernel/reset.c | 120 +++ arch/sw_64/kernel/setup.c | 1061 +++++++++++++++++++++++ 11 files changed, 1934 insertions(+) create mode 100644 arch/sw_64/include/asm/platform.h create mode 100644 arch/sw_64/include/asm/setup.h create mode 100644 arch/sw_64/include/asm/sw64_init.h create mode 100644 arch/sw_64/include/uapi/asm/bootparam.h create mode 100644 arch/sw_64/kernel/cacheinfo.c create mode 100644 arch/sw_64/kernel/chip_setup.c create mode 100644 arch/sw_64/kernel/early_init.c create mode 100644 arch/sw_64/kernel/head.S create mode 100644 arch/sw_64/kernel/hmcall.c create mode 100644 arch/sw_64/kernel/reset.c create mode 100644 arch/sw_64/kernel/setup.c diff --git a/arch/sw_64/include/asm/platform.h b/arch/sw_64/include/asm/platform.h new file mode 100644 index 000000000000..ad54cdc772e1 --- /dev/null +++ b/arch/sw_64/include/asm/platform.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PLATFORM_H +#define _ASM_SW64_PLATFORM_H + +#include +#if defined(CONFIG_UNCORE_XUELANG) +#include +#elif defined(CONFIG_UNCORE_JUNZHANG) +#include +#endif + +#ifdef CONFIG_EFI +#define BIOS_VERSION_GUID EFI_GUID(0xc47a23c3, 0xcebb, 0x4cc9, 0xa5, 0xe2, 0xde, 0xd0, 0x8f, 0xe4, 0x20, 0xb5) + +#define BIOS_SUPPORT_RESET_CLALLBACK(bios_version) ((bios_version) != NULL) + +extern unsigned long bios_version; + +#endif + +extern struct boot_params *sunway_boot_params; + +extern void sw64_halt(void); +extern void sw64_poweroff(void); +extern void sw64_restart(void); +extern void (*pm_restart)(void); +extern void (*pm_halt)(void); +extern int i2c_set_adapter(void); +extern void cpld_write(uint8_t slave_addr, uint8_t reg, uint8_t data); +extern void fix_jm585_reset(void); + +#endif /* _ASM_SW64_PLATFORM_H */ diff --git a/arch/sw_64/include/asm/setup.h b/arch/sw_64/include/asm/setup.h new file mode 100644 index 000000000000..2d557b349555 --- /dev/null +++ b/arch/sw_64/include/asm/setup.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SETUP_H +#define _ASM_SW64_SETUP_H + +#include + +/* + * We leave one page for the initial stack page, and one page for + * the initial process structure. Also, the console eats 3 MB for + * the initial bootloader (one of which we can reclaim later). + */ +#define BOOT_PCB 0x20000000 +#define BOOT_ADDR 0x20000000 +/* Remove when official MILO sources have ELF support: */ +#define BOOT_SIZE (16 * 1024) + +#define KERNEL_START_PHYS CONFIG_PHYSICAL_START +#define KERNEL_START (__START_KERNEL_map + CONFIG_PHYSICAL_START) + +/* INIT_STACK may be used for merging lwk to kernel*/ +#define INIT_STACK (KERNEL_START + 0x02000) + +/* + * This is setup by the secondary bootstrap loader. Because + * the zero page is zeroed out as soon as the vm system is + * initialized, we need to copy things out into a more permanent + * place. + */ +#define PARAM (KERNEL_START + 0x0A000) +#define COMMAND_LINE ((char *)(KERNEL_START + 0x0B000)) +#define INITRD_START (*(unsigned long *)(PARAM + 0x100)) +#define INITRD_SIZE (*(unsigned long *)(PARAM + 0x108)) +#define DTB_START (*(unsigned long *)(PARAM + 0x118)) + +#define _TEXT_START (KERNEL_START + 0x10000) + +#define COMMAND_LINE_OFF (0x10000UL - 0xB000UL) +#define INITRD_START_OFF (0x10000UL - 0xA100UL) +#define INITRD_SIZE_OFF (0x10000UL - 0xA108UL) + +/* Motherboard Configuration Tables */ +#define MB_CONFIG_START 0x908000 +#define MB_MCLK (MB_CONFIG_START + 0x1) +#define MB_EXTCLK (MB_CONFIG_START + 0x11) + +#ifndef __ASSEMBLY__ +#include +extern struct boot_params *sunway_boot_params; +#endif + +#endif /* _ASM_SW64_SETUP_H */ diff --git a/arch/sw_64/include/asm/sw64_init.h b/arch/sw_64/include/asm/sw64_init.h new file mode 100644 index 000000000000..86ddd2cb65f8 --- /dev/null +++ b/arch/sw_64/include/asm/sw64_init.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SW64_INIT_H +#define _ASM_SW64_SW64_INIT_H + +#include +#include + +#include + +struct sw64_early_init_ops { + void (*setup_core_map)(struct cpumask *cpumask); + unsigned long (*get_node_mem)(int nodeid); + void (*get_smp_info)(void); +}; + +struct sw64_pci_init_ops { + int (*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin); + unsigned long (*get_rc_enable)(unsigned long node); + void (*hose_init)(struct pci_controller *hose); + void (*set_rc_piu)(unsigned long node, unsigned long index); + int (*check_pci_linkup)(unsigned long node, unsigned long index); + void (*set_intx)(unsigned long node, unsigned long index, + unsigned long int_conf); +}; + + +struct sw64_chip_init_ops { + struct sw64_early_init_ops early_init; + struct sw64_pci_init_ops pci_init; + void (*fixup)(void); +}; + +struct sw64_chip_ops { + int (*get_cpu_num)(void); + void (*device_interrupt)(unsigned long irq_info); + void (*suspend)(bool wake); + void (*fixup)(void); +}; + +extern void sw64_init_noop(void); +extern void setup_chip_ops(void); +extern struct sw64_chip_ops *sw64_chip; +extern struct sw64_chip_init_ops *sw64_chip_init; +#ifdef CONFIG_PM +extern struct syscore_ops io_syscore_ops; +#endif + +DECLARE_PER_CPU(unsigned long, hard_node_id); + +#endif /* _ASM_SW64_SW64_INIT_H */ diff --git a/arch/sw_64/include/uapi/asm/bootparam.h b/arch/sw_64/include/uapi/asm/bootparam.h new file mode 100644 index 000000000000..6ce75d65e86e --- /dev/null +++ b/arch/sw_64/include/uapi/asm/bootparam.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_BOOTPARAM_H +#define _UAPI_ASM_SW64_BOOTPARAM_H + +#ifndef __ASSEMBLY__ + +#include + +struct boot_params { + __u64 initrd_start; /* logical address of initrd */ + __u64 initrd_size; /* size of initrd */ + __u64 dtb_start; /* logical address of dtb */ + __u64 efi_systab; /* logical address of EFI system table */ + __u64 efi_memmap; /* logical address of EFI memory map */ + __u64 efi_memmap_size; /* size of EFI memory map */ + __u64 efi_memdesc_size; /* size of an EFI memory map descriptor */ + __u64 efi_memdesc_version; /* memory descriptor version */ + __u64 cmdline; /* logical address of cmdline */ +}; +#endif + +#endif /* _UAPI_ASM_SW64_BOOTPARAM_H */ diff --git a/arch/sw_64/kernel/cacheinfo.c b/arch/sw_64/kernel/cacheinfo.c new file mode 100644 index 000000000000..e340c53690a9 --- /dev/null +++ b/arch/sw_64/kernel/cacheinfo.c @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SW64 cacheinfo support + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +#include + +/* Populates leaf and increments to next leaf */ +#define populate_cache(cache, leaf, c_level, c_type, c_id) \ +do { \ + leaf->id = c_id; \ + leaf->attributes = CACHE_ID; \ + leaf->type = c_type; \ + leaf->level = c_level; \ + leaf->coherency_line_size = c->cache.linesz; \ + leaf->number_of_sets = c->cache.sets; \ + leaf->ways_of_associativity = c->cache.ways; \ + leaf->size = c->cache.size; \ + leaf++; \ +} while (0) + +int init_cache_level(unsigned int cpu) +{ + struct cpuinfo_sw64 *c = &cpu_data[cpu]; + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + int levels = 0, leaves = 0; + + /* + * If Dcache is not set, we assume the cache structures + * are not properly initialized. + */ + if (c->dcache.size) + levels += 1; + else + return -ENOENT; + + + leaves += (c->icache.size) ? 2 : 1; + + if (c->scache.size) { + levels++; + leaves++; + } + + if (c->tcache.size) { + levels++; + leaves++; + } + + this_cpu_ci->num_levels = levels; + this_cpu_ci->num_leaves = leaves; + return 0; +} + +int populate_cache_leaves(unsigned int cpu) +{ + struct cpuinfo_sw64 *c = &cpu_data[cpu]; + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + struct cacheinfo *this_leaf = this_cpu_ci->info_list; + struct cpu_topology *topo = &cpu_topology[cpu]; + + if (c->icache.size) { + cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); + populate_cache(dcache, this_leaf, 1, CACHE_TYPE_DATA, cpu); + cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); + populate_cache(icache, this_leaf, 1, CACHE_TYPE_INST, cpu); + + } else { + cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); + populate_cache(dcache, this_leaf, 1, CACHE_TYPE_UNIFIED, cpu); + } + + if (c->scache.size) { + cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); + populate_cache(scache, this_leaf, 2, CACHE_TYPE_UNIFIED, cpu); + } + + if (c->tcache.size) { + cpumask_copy(&this_leaf->shared_cpu_map, topology_llc_cpumask(cpu)); + populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED, topo->package_id); + } + + this_cpu_ci->cpu_map_populated = true; + + return 0; +} diff --git a/arch/sw_64/kernel/chip_setup.c b/arch/sw_64/kernel/chip_setup.c new file mode 100644 index 000000000000..b8c359db2ef6 --- /dev/null +++ b/arch/sw_64/kernel/chip_setup.c @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +#include +#include + +struct sw64_chip_ops *sw64_chip; +struct sw64_chip_init_ops *sw64_chip_init; + +static int get_cpu_nums(void) +{ + if (is_guest_or_emul()) + return 1; + + return __get_cpu_nums(); +} + +static unsigned long __init get_node_mem(int nodeid) +{ + + if (is_guest_or_emul()) + return *(unsigned long *)MMSIZE & MMSIZE_MASK; + + return __get_node_mem(nodeid); +} + +static void __init setup_core_map(struct cpumask *cpumask) +{ + int i, j, cpu_num, cpuid, max_cores_per_cpu; + unsigned long coreonline; + + cpu_num = get_cpu_nums(); + cpuid = 0; + for (i = 0; i < cpu_num; i++) { + coreonline = sw64_io_read(i, CORE_ONLINE); + max_cores_per_cpu = MAX_CORES_PER_CPU; + + if (is_guest_or_emul()) + max_cores_per_cpu = 64; + + for (j = 0; j < max_cores_per_cpu; j++) { + if (coreonline & (1UL << j)) { + __cpu_to_rcid[cpuid] = (i << DOMAIN_ID_SHIFT) | (j << CORE_ID_SHIFT); + cpuid++; + } + } + } + + if (is_in_host() && core_is_ht()) { + for (i = 0; i < cpuid; i++) + __cpu_to_rcid[cpuid + i] = __cpu_to_rcid[i] | (1 << THREAD_ID_SHIFT); + + cpuid = cpuid + i; + } + + while (cpuid < NR_CPUS) { + __cpu_to_rcid[cpuid] = -1; + cpuid++; + } +} + +#ifdef CONFIG_PM +static void i2c_srst(void) +{ + sw64_io_write(0, I2C0_SRST_L, 0x0); + sw64_io_write(0, I2C0_SRST_L, 0x1); + + sw64_io_write(0, I2C1_SRST_L, 0x0); + sw64_io_write(0, I2C1_SRST_L, 0x1); + + sw64_io_write(0, I2C2_SRST_L, 0x0); + sw64_io_write(0, I2C2_SRST_L, 0x1); +} + +static void pcie_save(void) +{ + struct pci_controller *hose; + struct piu_saved *piu_save; + unsigned long node, index; + unsigned long i; + + for (hose = hose_head; hose; hose = hose->next) { + piu_save = kzalloc(sizeof(*piu_save), GFP_KERNEL); + + node = hose->node; + index = hose->index; + hose->sysdata = piu_save; + + piu_save->piuconfig0 = read_piu_ior0(node, index, PIUCONFIG0); + piu_save->piuconfig1 = read_piu_ior1(node, index, PIUCONFIG1); + piu_save->epdmabar = read_piu_ior0(node, index, EPDMABAR); + piu_save->msiaddr = read_piu_ior0(node, index, MSIADDR); + + if (IS_ENABLED(CONFIG_UNCORE_XUELANG)) { + for (i = 0; i < 256; i++) { + piu_save->msiconfig[i] = read_piu_ior0(node, index, + MSICONFIG0 + (i << 7)); + } + } + + piu_save->iommuexcpt_ctrl = read_piu_ior0(node, index, IOMMUEXCPT_CTRL); + piu_save->dtbaseaddr = read_piu_ior0(node, index, DTBASEADDR); + + piu_save->intaconfig = read_piu_ior0(node, index, INTACONFIG); + piu_save->intbconfig = read_piu_ior0(node, index, INTBCONFIG); + piu_save->intcconfig = read_piu_ior0(node, index, INTCCONFIG); + piu_save->intdconfig = read_piu_ior0(node, index, INTDCONFIG); + piu_save->pmeintconfig = read_piu_ior0(node, index, PMEINTCONFIG); + piu_save->aererrintconfig = read_piu_ior0(node, index, AERERRINTCONFIG); + piu_save->hpintconfig = read_piu_ior0(node, index, HPINTCONFIG); + + } +} + +static void pcie_restore(void) +{ + struct pci_controller *hose; + struct piu_saved *piu_save; + unsigned long node, index; + u32 rc_misc_ctrl; + unsigned int value; + unsigned long i; + + for (hose = hose_head; hose; hose = hose->next) { + node = hose->node; + index = hose->index; + piu_save = hose->sysdata; + + write_piu_ior0(node, index, PIUCONFIG0, piu_save->piuconfig0); + write_piu_ior1(node, index, PIUCONFIG1, piu_save->piuconfig1); + write_piu_ior0(node, index, EPDMABAR, piu_save->epdmabar); + write_piu_ior0(node, index, MSIADDR, piu_save->msiaddr); + + if (IS_ENABLED(CONFIG_UNCORE_XUELANG)) { + for (i = 0; i < 256; i++) { + write_piu_ior0(node, index, MSICONFIG0 + (i << 7), + piu_save->msiconfig[i]); + } + } + + write_piu_ior0(node, index, IOMMUEXCPT_CTRL, piu_save->iommuexcpt_ctrl); + write_piu_ior0(node, index, DTBASEADDR, piu_save->dtbaseaddr); + + write_piu_ior0(node, index, INTACONFIG, piu_save->intaconfig); + write_piu_ior0(node, index, INTBCONFIG, piu_save->intbconfig); + write_piu_ior0(node, index, INTCCONFIG, piu_save->intcconfig); + write_piu_ior0(node, index, INTDCONFIG, piu_save->intdconfig); + write_piu_ior0(node, index, PMEINTCONFIG, piu_save->pmeintconfig); + write_piu_ior0(node, index, AERERRINTCONFIG, piu_save->aererrintconfig); + write_piu_ior0(node, index, HPINTCONFIG, piu_save->hpintconfig); + + /* Enable DBI_RO_WR_EN */ + rc_misc_ctrl = read_rc_conf(node, index, RC_MISC_CONTROL_1); + write_rc_conf(node, index, RC_MISC_CONTROL_1, rc_misc_ctrl | 0x1); + + /* Fix up DEVICE_ID_VENDOR_ID register */ + value = (PCI_DEVICE_ID_SW64_ROOT_BRIDGE << 16) | PCI_VENDOR_ID_JN; + write_rc_conf(node, index, RC_VENDOR_ID, value); + + /* Set PCI-E root class code */ + value = read_rc_conf(node, index, RC_REVISION_ID); + write_rc_conf(node, index, RC_REVISION_ID, (PCI_CLASS_BRIDGE_HOST << 16) | value); + + /* Disable DBI_RO_WR_EN */ + write_rc_conf(node, index, RC_MISC_CONTROL_1, rc_misc_ctrl); + } + +} + +static unsigned long saved_dvc_int, saved_long_time; + +static inline void intpu_save(void) +{ + switch (cpu_desc.model) { + case CPU_SW831: + saved_long_time = __io_read_longtime(0); + default: + break; + } +} + +static inline void intpu_restore(void) +{ + switch (cpu_desc.model) { + case CPU_SW831: + __io_write_longtime(0, saved_long_time); + __io_write_longtime_start_en(0, 0x1); + break; + default: + pr_info("long time start is disable!"); + break; + } +} + +static inline void spbu_save(void) +{ + saved_dvc_int = sw64_io_read(0, MCU_DVC_INT_EN); +} + +static inline void spbu_restore(void) +{ + i2c_srst(); + sw64_io_write(0, MCU_DVC_INT_EN, saved_dvc_int); +} + +static int io_suspend(void) +{ + spbu_save(); + intpu_save(); + pcie_save(); + + return 0; +} + +static void io_resume(void) +{ + pcie_restore(); + intpu_restore(); + spbu_restore(); +} +#endif /* CONFIG_PM */ + +static struct sw64_chip_init_ops chip_init_ops = { + .early_init = { + .setup_core_map = setup_core_map, + .get_node_mem = get_node_mem, + }, +}; + +static struct sw64_chip_ops chip_ops = { + .get_cpu_num = get_cpu_nums, +}; + +void __init setup_chip_ops(void) +{ + sw64_chip_init = &chip_init_ops; + sw64_chip = &chip_ops; + setup_chip_pci_ops(); +#ifdef CONFIG_PM + io_syscore_ops.suspend = io_suspend; + io_syscore_ops.resume = io_resume; +#endif +} diff --git a/arch/sw_64/kernel/early_init.c b/arch/sw_64/kernel/early_init.c new file mode 100644 index 000000000000..2ec7a3e99443 --- /dev/null +++ b/arch/sw_64/kernel/early_init.c @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +#include + +asmlinkage __visible void __init sw64_start_kernel(void) +{ + fixup_hmcall(); + save_ktp(); + start_kernel(); +} diff --git a/arch/sw_64/kernel/head.S b/arch/sw_64/kernel/head.S new file mode 100644 index 000000000000..fd0fbfbcf5b6 --- /dev/null +++ b/arch/sw_64/kernel/head.S @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * initial boot stuff.. At this point, the bootloader has already + * switched into HMcode, and loaded us at the correct address + * (START_ADDR). So there isn't much left for us to do: just set up + * the kernel global pointer and jump to the kernel entry-point. + */ + +#include +#include +#include +#include + +__HEAD + .globl _stext + .set noreorder + .globl __start + .ent __start +_stext: +__start: + .prologue 0 + br $27, 1f +1: ldgp $29, 0($27) + /* We need to get current_task_info loaded up... */ + ldi $8, init_task + ldl $30, TASK_STACK($8) + /* ... and find our stack ... */ + ldi $30, ASM_THREAD_SIZE($30) + + /* ... and then we can clear bss data. */ + ldi $16, __bss_start + ldi $18, __bss_stop + subl $18, $16, $18 + mov $31, $17 + call $26, __constant_c_memset +#ifdef CONFIG_RELOCATABLE + ldi $30, -8($30) + stl $29, 0($30) + /* Copy kernel and apply the relocations */ + call $26, relocate_kernel + ldl $29, 0($30) + addl $29, $0, $29 + addl $8, $0, $8 + ldi $30, 8($30) + /* Repoint the sp into the new kernel image */ + addl $30, $0, $30 +#endif + /* ... and then we can start the kernel. */ + call $26, sw64_start_kernel + sys_call HMC_halt + .end __start + +#ifdef CONFIG_SMP + .align 3 + .globl __smp_callin + .ent __smp_callin + /* On entry here the PCB of the idle task for this processor + * has been loaded. We've arranged for the tilde_pcb[x] for + * this process to contain the PCBB of the target idle task. + */ +__smp_callin: + .prologue 1 + br $27, 2f # we copy this from above "br $27 1f" +2: ldgp $29, 0($27) # First order of business, load the GP. + + bis $31, $31, $16 # invalidate all TLB with current VPN + sys_call HMC_tbi + +#if defined(CONFIG_SUBARCH_C3B) + sys_call HMC_whami # Get hard cid + ldi $1, __cpu_to_rcid + ldi $2, 0($31) + ldi $4, CONFIG_NR_CPUS +3: ldw $3, 0($1) + cmpeq $3, $0, $3 + bne $3, 4f + addl $1, 4, $1 + addl $2, 1, $2 + cmpeq $2, $4, $5 + bne $5, 5f + br $31, 3b +4: ldi $0, 0($2) +#else + rcid $0 +#endif + + ldi $2, idle_task_pointer + s8addl $0, $2, $2 + ldl $8, 0($2) # Get ksp of idle thread + sys_call HMC_wrktp + + ldl $30, TASK_STACK($8) + ldi $30, ASM_THREAD_SIZE($30) + + call $26, smp_callin +5: + sys_call HMC_halt + .end __smp_callin +#endif /* CONFIG_SMP */ + # + # It is handy, on occasion, to make halt actually just loop. + # Putting it here means we dont have to recompile the whole + # kernel. + # + + .align 3 + .globl halt + .ent halt +halt: + .prologue 0 + sys_call HMC_halt + .end halt diff --git a/arch/sw_64/kernel/hmcall.c b/arch/sw_64/kernel/hmcall.c new file mode 100644 index 000000000000..d2054a930bd7 --- /dev/null +++ b/arch/sw_64/kernel/hmcall.c @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sw_64/kernel/hmcall.c + * + * Copyright (C) 2022 WXIAT + * Author: He Sheng + */ + +#include +#include + +#define A0(func) (((HMC_##func & 0xFF) >> 6) & 0x1) +#define A1(func) ((((HMC_##func & 0xFF)>>6) & 0x2) >> 1) +#define A2(func) ((HMC_##func & 0x3F) << 7) + +#define T(func) ((A0(func) ^ A1(func)) & 0x1) +#define B0(func) ((T(func) | A0(func)) << 13) +#define B1(func) (((~T(func) & 1) | A1(func)) << 14) + +#define PRI_BASE 0x10000UL + +#define HMCALL_ENTRY(func) (PRI_BASE | B1(func) | B0(func) | A2(func)) + + +static inline void fixup_rdtp(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(rdtp)); + + entry[0] = 0x181ffec7; /* pri_rcsr $0, CSR__TID */ + entry[1] = 0x1ee00000; /* pri_ret $23 */ +} + +static inline void fixup_wrtp(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(wrtp)); + + entry[0] = 0x1a1fffc7; /* pri_wcsr $16, CSR__TID */ + entry[1] = 0x1ee00000; /* pri_ret $23 */ +} + +static inline void fixup_tbiasid(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(tbisasid)); + + entry[0] = 0x18fffe47; /* pri_rcsr p7, CSR__DTB_PCR*/ + entry[1] = 0x4a05c905; /* sll r16, CSR__DTB_PCR__UPN__S, p5 */ + entry[2] = 0xf89f03ff; /* ldi p4, CSR__DTB_PCR__UPN__M */ + entry[3] = 0x4885c904; /* sll p4, CSR__DTB_PCR__UPN__S, p4 */ + entry[4] = 0x40e40724; /* bic p7, p4, p4 */ + entry[5] = 0x40850745; /* bis p4, p5, p5 */ + entry[6] = 0x18bfff47; /* pri_wcsr p5, CSR__DTB_PCR */ + entry[7] = 0x1a3fff46; /* pri_wcsr r17, CSR__DTB_IS */ + entry[8] = 0x18ffff47; /* pri_wcsr p7, CSR__DTB_PCR */ + entry[9] = 0x4a04e906; /* sll r16, CSR__UPCR_UPN__UPN__S, p6 */ + entry[10] = 0x189ffe22; /* pri_rcsr p4, CSR__UPCR_UPN */ + entry[11] = 0x18dfff22; /* pri_wcsr p6, CSR__UPCR_UPN */ + entry[12] = 0x1a3fff06; /* pri_wcsr r17, CSR__ITB_IS */ + entry[13] = 0x1bffff15; /* pri_wcsr r31, CSR__IC_FLUSH */ + entry[14] = 0x189fff22; /* pri_wcsr p4, CSR__UPCR_UPN */ + entry[15] = 0x1ef00000; /* pri_ret/b p23 */ +} + +static inline void fixup_wrasid(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(wrasid)); + + entry[0] = 0x18fffe47; /* pri_rcsr p7, CSR__DTB_PCR*/ + entry[1] = 0x4a05c905; /* sll r16, CSR__DTB_PCR__UPN__S, p5 */ + entry[2] = 0xf89f03ff; /* ldi p4, CSR__DTB_PCR__UPN__M */ + entry[3] = 0x4885c904; /* sll p4, CSR__DTB_PCR__UPN__S, p4 */ + entry[4] = 0x40e40724; /* bic p7, p4, p4 */ + entry[5] = 0x40850745; /* bis p4, p5, p5 */ + entry[6] = 0x18bfff47; /* pri_wcsr p5, CSR__DTB_PCR */ + entry[7] = 0x4a04e906; /* sll r16, CSR__UPCR_UPN__UPN__S, p6 */ + entry[8] = 0x18dfff22; /* pri_wcsr p4, CSR__UPCR_UPN */ + entry[9] = 0x1ef00000; /* pri_ret/b p23 */ +} + +static inline void fixup_rdktp(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(rdktp)); + + entry[0] = 0x95161000; /* pri_ldl/p $8, VC__KTP(vcpucb) */ + entry[1] = 0x1ee00000; /* pri_ret $23 */ +} + +static inline void fixup_wrktp(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(wrktp)); + + entry[0] = 0xb5161000; /* pri_stl/p $8, VC__KTP(vcpucb) */ + entry[1] = 0x1ee00000; /* pri_ret $23 */ +} + +static inline void fixup_rdusp(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(rdusp)); + + entry[0] = 0x94161018; /* pri_ldl/p $0, VC__USP(vcpucb) */ + entry[1] = 0x1ee00000; /* pri_ret $23 */ +} + +static inline void fixup_wrusp(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(wrusp)); + + entry[0] = 0xb6161018; /* pri_stl/p $16, VC__USP(vcpucb) */ + entry[1] = 0x1ee00000; /* pri_ret $23 */ +} + +void __init fixup_hmcall(void) +{ +#if defined(CONFIG_SUBARCH_C3B) + fixup_rdtp(); + fixup_wrtp(); + fixup_tbiasid(); + fixup_wrasid(); + fixup_rdktp(); + fixup_wrktp(); + fixup_rdusp(); + fixup_wrusp(); + imemb(); +#endif +} + +#undef A0 +#undef A1 +#undef A2 +#undef T +#undef B0 +#undef B1 diff --git a/arch/sw_64/kernel/reset.c b/arch/sw_64/kernel/reset.c new file mode 100644 index 000000000000..955339557a7a --- /dev/null +++ b/arch/sw_64/kernel/reset.c @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Sunway Technology Corporation Limited + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +void fix_jm585_reset(void) +{ + struct pci_dev *pdev; + struct pci_controller *hose; + int val; + + pdev = pci_get_device(PCI_VENDOR_ID_JMICRON, + 0x0585, NULL); + if (pdev) { + hose = pci_bus_to_pci_controller(pdev->bus); + val = read_rc_conf(hose->node, hose->index, + RC_PORT_LINK_CTL); + write_rc_conf(hose->node, hose->index, + RC_PORT_LINK_CTL, val | 0x8); + write_rc_conf(hose->node, hose->index, + RC_PORT_LINK_CTL, val); + } + +} +static void default_halt(void) +{ + local_irq_disable(); + + pr_notice("\n\n** You can safely turn off the power now **\n\n"); + + while (true) + arch_cpu_idle(); +} + +static void default_poweroff(void) +{ + /* No point in taking interrupts anymore. */ + local_irq_disable(); +#ifdef CONFIG_EFI + efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL); +#endif + while (true) + arch_cpu_idle(); +} + +static void default_restart(void) +{ + /* No point in taking interrupts anymore. */ + local_irq_disable(); + + fix_jm585_reset(); +#ifdef CONFIG_EFI + if (efi_capsule_pending(NULL)) + efi_reboot(REBOOT_WARM, NULL); + else + efi_reboot(REBOOT_COLD, NULL); +#endif + + while (true) + arch_cpu_idle(); +} + +void (*pm_restart)(void); + +void (*pm_power_off)(void); +EXPORT_SYMBOL(pm_power_off); + +void (*pm_halt)(void); + +void machine_halt(void) +{ +#ifdef CONFIG_SMP + preempt_disable(); + smp_send_stop(); +#endif + pm_halt(); +} + +void machine_power_off(void) +{ +#ifdef CONFIG_SMP + preempt_disable(); + smp_send_stop(); +#endif + pm_power_off(); +} + +void machine_restart(char *command) +{ +#ifdef CONFIG_SMP + preempt_disable(); + smp_send_stop(); +#endif + do_kernel_restart(command); + pm_restart(); +} + +static int __init sw64_reboot_setup(void) +{ + pm_restart = default_restart; + pm_power_off = default_poweroff; + pm_halt = default_halt; + + return 0; +} +arch_initcall(sw64_reboot_setup); diff --git a/arch/sw_64/kernel/setup.c b/arch/sw_64/kernel/setup.c new file mode 100644 index 000000000000..0c1ddb9b46d7 --- /dev/null +++ b/arch/sw_64/kernel/setup.c @@ -0,0 +1,1061 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Bootup setup stuff. + */ + +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_MAGIC_SYSRQ +#include +#include +#endif +#ifdef CONFIG_DEBUG_FS +#include +#endif +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "proto.h" + +#undef DEBUG_DISCONTIG +#ifdef DEBUG_DISCONTIG +#define DBGDCONT(args...) pr_debug(args) +#else +#define DBGDCONT(args...) +#endif + +int __cpu_to_rcid[NR_CPUS]; /* Map logical to physical */ +EXPORT_SYMBOL(__cpu_to_rcid); + +DEFINE_PER_CPU(unsigned long, hard_node_id) = { 0 }; +static DEFINE_PER_CPU(struct cpu, cpu_devices); + +#ifdef CONFIG_SUBARCH_C3B +#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +struct cma *sw64_kvm_cma; +EXPORT_SYMBOL(sw64_kvm_cma); + +static phys_addr_t kvm_mem_size; +static phys_addr_t kvm_mem_base; + +struct gen_pool *sw64_kvm_pool; +EXPORT_SYMBOL(sw64_kvm_pool); +#endif +#endif + +static inline int phys_addr_valid(unsigned long addr) +{ + /* + * At this point memory probe has not been done such that max_pfn + * and other physical address variables cannot be used, so let's + * roughly judge physical address based on arch specific bit. + */ + return !(addr >> (cpu_desc.pa_bits - 1)); +} + +extern struct atomic_notifier_head panic_notifier_list; +static int sw64_panic_event(struct notifier_block *, unsigned long, void *); +static struct notifier_block sw64_panic_block = { + sw64_panic_event, + NULL, + INT_MAX /* try to do it first */ +}; + +/* the value is IOR: CORE_ONLIE*/ +cpumask_t core_start = CPU_MASK_NONE; + +static struct resource data_resource = { + .name = "Kernel data", + .start = 0, + .end = 0, + .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM +}; + +static struct resource code_resource = { + .name = "Kernel code", + .start = 0, + .end = 0, + .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM +}; + +static struct resource bss_resource = { + .name = "Kernel bss", + .start = 0, + .end = 0, + .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM +}; + +/* A collection of per-processor data. */ +struct cpuinfo_sw64 cpu_data[NR_CPUS]; +EXPORT_SYMBOL(cpu_data); + +DEFINE_STATIC_KEY_TRUE(run_mode_host_key); +DEFINE_STATIC_KEY_FALSE(run_mode_guest_key); +DEFINE_STATIC_KEY_FALSE(run_mode_emul_key); +struct cpu_desc_t cpu_desc; +struct socket_desc_t socket_desc[MAX_NUMSOCKETS]; +int memmap_nr; +struct memmap_entry memmap_map[MAX_NUMMEMMAPS]; +bool memblock_initialized; + +cpumask_t cpu_offline = CPU_MASK_NONE; + +static char command_line[COMMAND_LINE_SIZE] __initdata; +#ifdef CONFIG_CMDLINE_BOOL +static char builtin_cmdline[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; +#endif + +/* boot_params */ +struct boot_params *sunway_boot_params = (struct boot_params *) (PARAM + 0x100); + +/* + * The format of "screen_info" is strange, and due to early + * i386-setup code. This is just enough to make the console + * code think we're on a VGA color display. + */ + +struct screen_info screen_info = { + .orig_x = 0, + .orig_y = 25, + .orig_video_cols = 80, + .orig_video_lines = 25, + .orig_video_isVGA = 1, + .orig_video_points = 16 +}; +EXPORT_SYMBOL(screen_info); + +/* + * Move global data into per-processor storage. + */ +void store_cpu_data(int cpu) +{ + cpu_data[cpu].last_asid = ASID_FIRST_VERSION; +} + +#ifdef CONFIG_KEXEC + +void *kexec_control_page; + +#define KTEXT_MAX KERNEL_IMAGE_SIZE + +static void __init kexec_control_page_init(void) +{ + phys_addr_t addr; + + addr = memblock_phys_alloc_range(KEXEC_CONTROL_PAGE_SIZE, PAGE_SIZE, + 0, KTEXT_MAX); + kexec_control_page = (void *)(__START_KERNEL_map + addr); +} + +/* + * reserve_crashkernel() - reserves memory are for crash kernel + * + * This function reserves memory area given in "crashkernel=" kernel command + * line parameter. The memory reserved is used by a dump capture kernel when + * primary kernel is crashing. + */ +static void __init reserve_crashkernel(void) +{ + unsigned long long crash_size, crash_base; + int ret; + + ret = parse_crashkernel(boot_command_line, mem_desc.size, + &crash_size, &crash_base); + if (ret || !crash_size) + return; + + if (!crash_size) { + pr_warn("size of crash kernel memory unspecified, no memory reserved for crash kernel\n"); + return; + } + if (!crash_base) { + pr_warn("base of crash kernel memory unspecified, no memory reserved for crash kernel\n"); + return; + } + + if (!memblock_is_region_memory(crash_base, crash_size)) + memblock_add(crash_base, crash_size); + + ret = memblock_reserve(crash_base, crash_size); + if (ret < 0) { + pr_warn("crashkernel reservation failed - memory is in use [mem %#018llx-%#018llx]\n", + crash_base, crash_base + crash_size - 1); + return; + } + + pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n", + (unsigned long)(crash_size >> 20), + (unsigned long)(crash_base >> 20), + (unsigned long)(mem_desc.size >> 20)); + + ret = add_memmap_region(crash_base, crash_size, memmap_crashkernel); + if (ret) + pr_warn("Add crash kernel area [mem %#018llx-%#018llx] to memmap region failed.\n", + crash_base, crash_base + crash_size - 1); + + if (crash_base >= KERNEL_IMAGE_SIZE) + pr_warn("Crash base should be less than %#x\n", KERNEL_IMAGE_SIZE); + + crashk_res.start = crash_base; + crashk_res.end = crash_base + crash_size - 1; + insert_resource(&iomem_resource, &crashk_res); +} +#else /* !defined(CONFIG_KEXEC) */ +static void __init reserve_crashkernel(void) {} +static void __init kexec_control_page_init(void) {} +#endif /* !defined(CONFIG_KEXEC) */ + +/* + * I/O resources inherited from PeeCees. Except for perhaps the + * turbochannel SWs, everyone has these on some sort of SuperIO chip. + * + * ??? If this becomes less standard, move the struct out into the + * machine vector. + */ + +static void __init +reserve_std_resources(void) +{ + static struct resource standard_io_resources[] = { + { .name = "rtc", .start = -1, .end = -1 }, + { .name = "dma1", .start = 0x00, .end = 0x1f }, + { .name = "pic1", .start = 0x20, .end = 0x3f }, + { .name = "timer", .start = 0x40, .end = 0x5f }, + { .name = "keyboard", .start = 0x60, .end = 0x6f }, + { .name = "dma page reg", .start = 0x80, .end = 0x8f }, + { .name = "pic2", .start = 0xa0, .end = 0xbf }, + { .name = "dma2", .start = 0xc0, .end = 0xdf }, + }; + + struct resource *io = &ioport_resource; + size_t i; + + if (hose_head) { + struct pci_controller *hose; + + for (hose = hose_head; hose; hose = hose->next) + if (hose->index == 0) { + io = hose->io_space; + break; + } + } + + /* Fix up for the Jensen's queer RTC placement. */ + standard_io_resources[0].start = RTC_PORT(0); + standard_io_resources[0].end = RTC_PORT(0) + 0x10; + + for (i = 0; i < ARRAY_SIZE(standard_io_resources); ++i) + request_resource(io, standard_io_resources+i); +} + +static int __init parse_memmap_one(char *p) +{ + char *oldp; + u64 start_at, mem_size; + int ret; + + if (!p) + return -EINVAL; + + if (!strncmp(p, "exactmap", 8)) { + pr_err("\"memmap=exactmap\" not valid on sw64\n"); + return 0; + } + + oldp = p; + mem_size = memparse(p, &p); + if (p == oldp) + return -EINVAL; + + if (*p == '@') { + pr_err("\"memmap=nn@ss\" invalid on sw64\n"); + } else if (*p == '#') { + pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on sw64\n"); + } else if (*p == '$') { + start_at = memparse(p + 1, &p); + ret = add_memmap_region(start_at, mem_size, memmap_reserved); + if (ret) + return ret; + } else { + return -EINVAL; + } + return *p == '\0' ? 0 : -EINVAL; +} + +static int __init setup_memmap(char *str) +{ + while (str) { + char *k = strchr(str, ','); + + if (k) + *k++ = 0; + + parse_memmap_one(str); + str = k; + } + + return 0; +} +early_param("memmap", setup_memmap); + +static int __init setup_cpuoffline(char *p) +{ + cpulist_parse(p, &cpu_offline); + cpumask_clear_cpu(0, &cpu_offline); + return 0; +} +early_param("cpuoffline", setup_cpuoffline); + +#ifdef CONFIG_BLK_DEV_INITRD +static void * __init move_initrd(unsigned long mem_limit) +{ + void *start; + unsigned long size; + + size = initrd_end - initrd_start; + start = memblock_alloc_from(PAGE_ALIGN(size), PAGE_SIZE, 0); + if (!start || __pa(start) + size > mem_limit) { + initrd_start = initrd_end = 0; + return NULL; + } + memmove(start, (void *)initrd_start, size); + initrd_start = (unsigned long)start; + initrd_end = initrd_start + size; + pr_info("initrd moved to 0x%px\n", start); + return start; +} +#else +static void * __init move_initrd(unsigned long mem_limit) +{ + return NULL; +} +#endif + +static bool __init memmap_range_valid(phys_addr_t base, phys_addr_t *size) +{ + if (base > memblock_end_of_DRAM()) + return false; + + if ((base + *size) > memblock_end_of_DRAM()) + *size = memblock_end_of_DRAM() - base; + + return true; +} + +void __init process_memmap(void) +{ + static int i; // Make it static so we won't start over again every time. + int ret; + phys_addr_t base, size; + unsigned long dma_end __maybe_unused = (MAX_DMA32_PFN << PAGE_SHIFT); + + if (!memblock_initialized) + return; + + for (; i < memmap_nr; i++) { + base = memmap_map[i].addr; + size = memmap_map[i].size; + switch (memmap_map[i].type) { + case memmap_reserved: + if (!memmap_range_valid(base, &size)) { + pr_err("reserved memmap region [mem %#018llx-%#018llx] beyond end of memory (%#018llx)\n", + base, base + size - 1, memblock_end_of_DRAM()); + } else { + pr_info("reserved memmap region [mem %#018llx-%#018llx]\n", + base, base + size - 1); + ret = memblock_mark_nomap(base, size); + if (ret) + pr_err("reserve memmap region [mem %#018llx-%#018llx] failed\n", + base, base + size - 1); + else if (IS_ENABLED(CONFIG_ZONE_DMA32) && (base < dma_end)) + pr_warn("memmap region [mem %#018llx-%#018llx] overlapped with DMA32 region\n", + base, base + size - 1); + } + break; + case memmap_pci: + if (!memmap_range_valid(base, &size)) { + pr_err("pci memmap region [mem %#018llx-%#018llx] beyond end of memory (%#018llx)\n", + base, base + size - 1, memblock_end_of_DRAM()); + } else { + pr_info("pci memmap region [mem %#018llx-%#018llx]\n", + base, base + size - 1); + ret = memblock_mark_nomap(base, size); + if (ret) + pr_err("reserve memmap region [mem %#018llx-%#018llx] failed\n", + base, base + size - 1); + } + break; + case memmap_initrd: + if ((base + size) > memblock_end_of_DRAM()) { + phys_addr_t old_base = base; + + base = (unsigned long) move_initrd(memblock_end_of_DRAM()); + if (!base) { + pr_err("initrd memmap region [mem %#018llx-%#018llx] extends beyond end of memory (%#018llx)\n", + old_base, old_base + size - 1, memblock_end_of_DRAM()); + break; + } + memmap_map[i].addr = base; + } + pr_info("initrd memmap region [mem %#018llx-%#018llx]\n", base, base + size - 1); + ret = memblock_reserve(base, size); + if (ret) + pr_err("reserve memmap region [mem %#018llx-%#018llx] failed\n", + base, base + size - 1); + break; + case memmap_kvm: + case memmap_crashkernel: + /* kvm and crashkernel are handled elsewhere, skip */ + break; + case memmap_acpi: + pr_err("ACPI memmap region is not supported.\n"); + break; + case memmap_use: + pr_err("Force usage memmap region is not supported.\n"); + break; + case memmap_protected: + pr_err("Protected memmap region is not supported.\n"); + break; + default: + pr_err("Unknown type of memmap region.\n"); + } + } +} + +int __init add_memmap_region(u64 addr, u64 size, enum memmap_types type) +{ + if (memmap_nr >= ARRAY_SIZE(memmap_map)) { + pr_err("Ooops! Too many entries in the memory map!\n"); + return -EPERM; + } + + if (addr + size <= addr) { + pr_warn("Trying to add an invalid memory region, skipped\n"); + return -EINVAL; + } + + memmap_map[memmap_nr].addr = addr; + memmap_map[memmap_nr].size = size; + memmap_map[memmap_nr].type = type; + memmap_nr++; + + process_memmap(); + + return 0; +} + +static struct resource* __init +insert_ram_resource(u64 start, u64 end, bool reserved) +{ + struct resource *res = + kzalloc(sizeof(struct resource), GFP_ATOMIC); + if (!res) + return NULL; + if (reserved) { + res->name = "reserved"; + res->flags = IORESOURCE_MEM; + } else { + res->name = "System RAM"; + res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + } + res->start = start; + res->end = end; + if (insert_resource(&iomem_resource, res)) { + kfree(res); + return NULL; + } + return res; +} + +static int __init request_standard_resources(void) +{ + struct memblock_region *mblk; + + extern char _text[], _etext[]; + extern char _sdata[], _edata[]; + extern char __bss_start[], __bss_stop[]; + + for_each_mem_region(mblk) { + if (!memblock_is_nomap(mblk)) + insert_ram_resource(mblk->base, + mblk->base + mblk->size - 1, 0); + else + insert_ram_resource(mblk->base, + mblk->base + mblk->size - 1, 1); + } + + code_resource.start = __pa_symbol(_text); + code_resource.end = __pa_symbol(_etext)-1; + data_resource.start = __pa_symbol(_sdata); + data_resource.end = __pa_symbol(_edata)-1; + bss_resource.start = __pa_symbol(__bss_start); + bss_resource.end = __pa_symbol(__bss_stop)-1; + + insert_resource(&iomem_resource, &code_resource); + insert_resource(&iomem_resource, &data_resource); + insert_resource(&iomem_resource, &bss_resource); + + return 0; +} +subsys_initcall(request_standard_resources); + +#ifdef CONFIG_NUMA +extern void cpu_set_node(void); +#endif + +static void __init show_socket_mem_layout(void) +{ + int i; + phys_addr_t base, size, end; + + base = 0; + + pr_info("Socket memory layout:\n"); + for (i = 0; i < MAX_NUMSOCKETS; i++) { + if (socket_desc[i].is_online) { + size = socket_desc[i].socket_mem; + end = base + size - 1; + pr_info("Socket %d: [mem %#018llx-%#018llx], size %llu\n", + i, base, end, size); + base = end + 1; + } + } + pr_info("Reserved memory size for Socket 0: %#lx\n", NODE0_START); +} + +int page_is_ram(unsigned long pfn) +{ + pfn <<= PAGE_SHIFT; + + return pfn >= mem_desc.base && pfn < (mem_desc.base + mem_desc.size); +} + +static int __init topology_init(void) +{ + int i, ret; + + for_each_possible_cpu(i) { + struct cpu *cpu = &per_cpu(cpu_devices, i); + +#ifdef CONFIG_HOTPLUG_CPU + if (i != 0) + cpu->hotpluggable = 1; +#endif + ret = register_cpu(cpu, i); + if (unlikely(ret)) + pr_warn("Warning: %s: register_cpu %d failed (%d)\n", + __func__, i, ret); + } + + return 0; +} +subsys_initcall(topology_init); + +static void __init setup_machine_fdt(void) +{ +#ifdef CONFIG_USE_OF + void *dt_virt; + const char *name; + + /* Give a chance to select kernel builtin DTB firstly */ + if (IS_ENABLED(CONFIG_BUILTIN_DTB)) + dt_virt = (void *)__dtb_start; + else { + dt_virt = (void *)sunway_boot_params->dtb_start; + if (virt_to_phys(dt_virt) < virt_to_phys(__bss_stop)) { + pr_emerg("BUG: DTB has been corrupted by kernel image!\n"); + while (true) + cpu_relax(); + } + } + + if (!phys_addr_valid(__boot_pa(dt_virt)) || + !early_init_dt_scan(dt_virt)) { + pr_crit("\n" + "Error: invalid device tree blob at virtual address %px\n" + "The dtb must be 8-byte aligned and must not exceed 2 MB in size\n" + "\nPlease check your bootloader.", + dt_virt); + + while (true) + cpu_relax(); + } + + name = of_flat_dt_get_machine_name(); + if (!name) + return; + + pr_info("Machine model: %s\n", name); +#else + pr_info("Kernel disable device tree support.\n"); + return; +#endif +} + +void __init device_tree_init(void) +{ + unflatten_and_copy_device_tree(); + sunway_boot_params->dtb_start = (__u64)initial_boot_params; +} + +static void __init setup_cpu_info(void) +{ + int i; + struct cache_desc *c; + unsigned long val; + + val = cpuid(GET_TABLE_ENTRY, 0); + cpu_desc.model = CPUID_MODEL(val); + cpu_desc.family = CPUID_FAMILY(val); + cpu_desc.chip_var = CPUID_CHIP_VAR(val); + cpu_desc.arch_var = CPUID_ARCH_VAR(val); + cpu_desc.arch_rev = CPUID_ARCH_REV(val); + cpu_desc.pa_bits = CPUID_PA_BITS(val); + cpu_desc.va_bits = CPUID_VA_BITS(val); + + for (i = 0; i < VENDOR_ID_MAX; i++) { + val = cpuid(GET_VENDOR_ID, i); + memcpy(cpu_desc.vendor_id + (i * 8), &val, 8); + } + + for (i = 0; i < MODEL_MAX; i++) { + val = cpuid(GET_MODEL, i); + memcpy(cpu_desc.model_id + (i * 8), &val, 8); + } + + cpu_desc.frequency = cpuid(GET_CPU_FREQ, 0) * 1000UL * 1000UL; + + for (i = 0; i < NR_CPUS; i++) { + c = &(cpu_data[i].icache); + val = cpuid(GET_CACHE_INFO, L1_ICACHE); + c->size = CACHE_SIZE(val); + c->linesz = 1 << (CACHE_LINE_BITS(val)); + c->sets = 1 << (CACHE_INDEX_BITS(val)); + c->ways = c->size / c->sets / c->linesz; + + c = &(cpu_data[i].dcache); + val = cpuid(GET_CACHE_INFO, L1_DCACHE); + c->size = CACHE_SIZE(val); + c->linesz = 1 << (CACHE_LINE_BITS(val)); + c->sets = 1 << (CACHE_INDEX_BITS(val)); + c->ways = c->size / c->sets / c->linesz; + + c = &(cpu_data[i].scache); + val = cpuid(GET_CACHE_INFO, L2_CACHE); + c->size = CACHE_SIZE(val); + c->linesz = 1 << (CACHE_LINE_BITS(val)); + c->sets = 1 << (CACHE_INDEX_BITS(val)); + c->ways = c->size / c->sets / c->linesz; + + c = &(cpu_data[i].tcache); + val = cpuid(GET_CACHE_INFO, L3_CACHE); + c->size = CACHE_SIZE(val); + c->linesz = 1 << (CACHE_LINE_BITS(val)); + c->sets = 1 << (CACHE_INDEX_BITS(val)); + c->ways = c->size / c->sets / c->linesz; + } +} + +static void __init setup_run_mode(void) +{ + if (*(unsigned long *)MMSIZE) { + static_branch_disable(&run_mode_host_key); + if (*(unsigned long *)MMSIZE & EMUL_FLAG) { + pr_info("run mode: emul\n"); + static_branch_disable(&run_mode_guest_key); + static_branch_enable(&run_mode_emul_key); + + } else { + pr_info("run mode: guest\n"); + static_branch_enable(&run_mode_guest_key); + static_branch_disable(&run_mode_emul_key); + } + } else { + pr_info("run mode: host\n"); + static_branch_enable(&run_mode_host_key); + static_branch_disable(&run_mode_guest_key); + static_branch_disable(&run_mode_emul_key); + } +} + +static void __init setup_socket_info(void) +{ + int i; + int numsockets = sw64_chip->get_cpu_num(); + + memset(socket_desc, 0, MAX_NUMSOCKETS * sizeof(struct socket_desc_t)); + + for (i = 0; i < numsockets; i++) { + socket_desc[i].is_online = 1; + if (sw64_chip_init->early_init.get_node_mem) + socket_desc[i].socket_mem = sw64_chip_init->early_init.get_node_mem(i); + } +} + +#ifdef CONFIG_BLK_DEV_INITRD +static void __init reserve_mem_for_initrd(void) +{ + int ret; + + initrd_start = sunway_boot_params->initrd_start; + if (initrd_start) { + initrd_start = __pa(initrd_start) + PAGE_OFFSET; + initrd_end = initrd_start + sunway_boot_params->initrd_size; + pr_info("Initial ramdisk at: 0x%px (%llu bytes)\n", + (void *)initrd_start, sunway_boot_params->initrd_size); + + ret = add_memmap_region(__pa(initrd_start), initrd_end - initrd_start, memmap_initrd); + if (ret) + pr_err("Add initrd area [mem %#018lx-%#018lx] to memmap region failed.\n", + __pa(initrd_start), __pa(initrd_end - 1)); + } +} +#endif /* CONFIG_BLK_DEV_INITRD */ + +#ifdef CONFIG_SUBARCH_C3B +#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +static int __init early_kvm_reserved_mem(char *p) +{ + if (!p) { + pr_err("Config string not provided\n"); + return -EINVAL; + } + + kvm_mem_size = memparse(p, &p); + if (*p != '@') + return -EINVAL; + kvm_mem_base = memparse(p + 1, &p); + return 0; +} +early_param("kvm_mem", early_kvm_reserved_mem); + +void __init sw64_kvm_reserve(void) +{ + kvm_cma_declare_contiguous(kvm_mem_base, kvm_mem_size, 0, + PAGE_SIZE, 0, "sw64_kvm_cma", &sw64_kvm_cma); +} +#endif +#endif + +void __init +setup_arch(char **cmdline_p) +{ + /** + * Work around the unaligned access exception to parse ACPI + * tables in the following function acpi_boot_table_init(). + */ + trap_init(); + + jump_label_init(); + setup_cpu_info(); + setup_run_mode(); + setup_chip_ops(); + setup_socket_info(); + show_socket_mem_layout(); + sw64_chip_init->early_init.setup_core_map(&core_start); + if (is_guest_or_emul()) + get_vt_smp_info(); + + setup_sched_clock(); + + setup_machine_fdt(); + + /* Register a call for panic conditions. */ + atomic_notifier_chain_register(&panic_notifier_list, + &sw64_panic_block); + + callback_init(); + + /* command line */ + if (!sunway_boot_params->cmdline) + sunway_boot_params->cmdline = (unsigned long)COMMAND_LINE; + + strscpy(boot_command_line, (char *)sunway_boot_params->cmdline, COMMAND_LINE_SIZE); + +#if IS_ENABLED(CONFIG_CMDLINE_BOOL) +#if IS_ENABLED(CONFIG_CMDLINE_OVERRIDE) + strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); + strscpy((char *)sunway_boot_params->cmdline, boot_command_line, COMMAND_LINE_SIZE); +#else + if (builtin_cmdline[0]) { + /* append builtin to boot loader cmdline */ + strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); + strlcat(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); + } +#endif /* CMDLINE_EXTEND */ +#endif + + strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE); + *cmdline_p = command_line; + + /* + * Process command-line arguments. + */ + parse_early_param(); + + /* Find our memory. */ + mem_detect(); + +#ifdef CONFIG_PCI + reserve_mem_for_pci(); +#endif + +#ifdef CONFIG_BLK_DEV_INITRD + reserve_mem_for_initrd(); +#endif + + sw64_memblock_init(); + + reserve_crashkernel(); + + /* Reserve large chunks of memory for use by CMA for KVM. */ +#ifdef CONFIG_SUBARCH_C3B +#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) + sw64_kvm_reserve(); +#endif +#endif + + efi_init(); + + /* Try to upgrade ACPI tables via initrd */ + acpi_table_upgrade(); + + /* Parse the ACPI tables for possible boot-time configuration */ + acpi_boot_table_init(); + +#ifdef CONFIG_SMP + setup_smp(); +#else + store_cpu_data(0); +#endif + + sw64_numa_init(); + + memblock_dump_all(); + + sparse_init(); + + zone_sizes_init(); + + paging_init(); + + kexec_control_page_init(); + + /* + * Initialize the machine. Usually has to do with setting up + * DMA windows and the like. + */ + sw64_init_arch(); + + /* Reserve standard resources. */ + reserve_std_resources(); + + /* + * Give us a default console. TGA users will see nothing until + * chr_dev_init is called, rather late in the boot sequence. + */ + +#ifdef CONFIG_VT +#if defined(CONFIG_VGA_CONSOLE) + conswitchp = &vga_con; +#elif defined(CONFIG_DUMMY_CONSOLE) + conswitchp = &dummy_con; +#endif +#endif + + /* Default root filesystem to sda2. */ + ROOT_DEV = MKDEV(SCSI_DISK0_MAJOR, 2); + + if (acpi_disabled) { +#ifdef CONFIG_NUMA + cpu_set_node(); +#endif + device_tree_init(); + } +} + + +static int +show_cpuinfo(struct seq_file *f, void *slot) +{ + int i; + unsigned long cpu_freq; + + cpu_freq = cpuid(GET_CPU_FREQ, 0); + + for_each_online_cpu(i) { + /* + * glibc reads /proc/cpuinfo to determine the number of + * online processors, looking for lines beginning with + * "processor". Give glibc what it expects. + */ + seq_printf(f, "processor\t: %u\n" + "vendor_id\t: %s\n" + "cpu family\t: %d\n" + "model\t\t: %u\n" + "model name\t: %s CPU @ %lu.%lu%luGHz\n" + "cpu variation\t: %u\n" + "cpu revision\t: %u\n", + i, cpu_desc.vendor_id, cpu_desc.family, + cpu_desc.model, cpu_desc.model_id, + cpu_freq / 1000, (cpu_freq % 1000) / 100, + (cpu_freq % 100) / 10, + cpu_desc.arch_var, cpu_desc.arch_rev); + seq_printf(f, "cpu MHz\t\t: %lu.00\n" + "cache size\t: %u KB\n" + "physical id\t: %d\n" + "bogomips\t: %lu.%02lu\n", + get_cpu_freq() / 1000 / 1000, cpu_data[i].tcache.size >> 10, + cpu_topology[i].package_id, + loops_per_jiffy / (500000/HZ), + (loops_per_jiffy / (5000/HZ)) % 100); + + seq_printf(f, "flags\t\t: fpu simd vpn upn cpuid\n"); + seq_printf(f, "page size\t: %d\n", 8192); + seq_printf(f, "cache_alignment\t: %d\n", cpu_data[i].tcache.linesz); + seq_printf(f, "address sizes\t: %u bits physical, %u bits virtual\n\n", + cpu_desc.pa_bits, cpu_desc.va_bits); + } + return 0; +} + +/* + * We show only CPU #0 info. + */ +static void * +c_start(struct seq_file *f, loff_t *pos) +{ + return *pos < 1 ? (void *)1 : NULL; +} + +static void * +c_next(struct seq_file *f, void *v, loff_t *pos) +{ + (*pos)++; + return NULL; +} + +static void +c_stop(struct seq_file *f, void *v) +{ +} + +const struct seq_operations cpuinfo_op = { + .start = c_start, + .next = c_next, + .stop = c_stop, + .show = show_cpuinfo, +}; + + +static int +sw64_panic_event(struct notifier_block *this, unsigned long event, void *ptr) +{ + return NOTIFY_DONE; +} + +static __init int add_pcspkr(void) +{ + struct platform_device *pd; + int ret; + + pd = platform_device_alloc("pcspkr", -1); + if (!pd) + return -ENOMEM; + + ret = platform_device_add(pd); + if (ret) + platform_device_put(pd); + + return ret; +} +device_initcall(add_pcspkr); + +#ifdef CONFIG_DEBUG_FS +struct dentry *sw64_debugfs_dir; +EXPORT_SYMBOL(sw64_debugfs_dir); + +static int __init debugfs_sw64(void) +{ + struct dentry *d; + + d = debugfs_create_dir("sw64", NULL); + if (!d) + return -ENOMEM; + sw64_debugfs_dir = d; + return 0; +} +arch_initcall(debugfs_sw64); +#endif + +#ifdef CONFIG_OF +static int __init sw64_of_init(void) +{ + of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); + return 0; +} +core_initcall(sw64_of_init); +#endif + +#ifdef CONFIG_SUBARCH_C3B +#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +static int __init sw64_kvm_pool_init(void) +{ + int status = 0; + unsigned long kvm_pool_virt; + struct page *base_page, *end_page, *p; + + if (!sw64_kvm_cma) + goto out; + + kvm_pool_virt = (unsigned long)kvm_mem_base; + + sw64_kvm_pool = gen_pool_create(PAGE_SHIFT, -1); + if (!sw64_kvm_pool) + goto out; + + status = gen_pool_add_virt(sw64_kvm_pool, kvm_pool_virt, kvm_mem_base, + kvm_mem_size, -1); + if (status < 0) { + pr_err("failed to add memory chunks to sw64 kvm pool\n"); + gen_pool_destroy(sw64_kvm_pool); + sw64_kvm_pool = NULL; + goto out; + } + gen_pool_set_algo(sw64_kvm_pool, gen_pool_best_fit, NULL); + + base_page = pfn_to_page(kvm_mem_base >> PAGE_SHIFT); + end_page = pfn_to_page((kvm_mem_base + kvm_mem_size - 1) >> PAGE_SHIFT); + + p = base_page; + while (p <= end_page && page_ref_count(p) == 0) { + set_page_count(p, 1); + page_mapcount_reset(p); + SetPageReserved(p); + p++; + } + + return status; + +out: + return -ENOMEM; +} +core_initcall_sync(sw64_kvm_pool_init); +#endif +#endif -- Gitee From 1e073181b8ccf7f523257096fc166e36009c766e Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:38 +0800 Subject: [PATCH 0279/2138] anolis: sw64: add topology setup routine ANBZ: #4688 Add topology setup for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/topology.h | 71 ++++++++++ arch/sw_64/kernel/topology.c | 212 ++++++++++++++++++++++++++++++ 2 files changed, 283 insertions(+) create mode 100644 arch/sw_64/include/asm/topology.h create mode 100644 arch/sw_64/kernel/topology.c diff --git a/arch/sw_64/include/asm/topology.h b/arch/sw_64/include/asm/topology.h new file mode 100644 index 000000000000..25ec7b9e9431 --- /dev/null +++ b/arch/sw_64/include/asm/topology.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_TOPOLOGY_H +#define _ASM_SW64_TOPOLOGY_H + +#include +#include +#include +#include +#include +#include + +extern struct cpu_topology cpu_topology[NR_CPUS]; + +#define topology_physical_package_id(cpu) (cpu_topology[cpu].package_id) +#define topology_core_id(cpu) (cpu_topology[cpu].core_id) +#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) +#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) +#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling) + +void init_cpu_topology(void); +void store_cpu_topology(int cpuid); +void remove_cpu_topology(int cpuid); +const struct cpumask *cpu_coregroup_mask(int cpu); + +static inline int rcid_to_thread_id(int rcid) +{ + return (rcid & THREAD_ID_MASK) >> THREAD_ID_SHIFT; +} + +static inline int rcid_to_core_id(int rcid) +{ + return (rcid & CORE_ID_MASK) >> CORE_ID_SHIFT; +} + +static inline int rcid_to_domain_id(int rcid) +{ + return (rcid & DOMAIN_ID_MASK) >> DOMAIN_ID_SHIFT; +} + +#ifdef CONFIG_NUMA + +#ifndef CONFIG_DEBUG_PER_CPU_MAPS +extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; +/* Returns a pointer to the cpumask of CPUs on Node 'node'. */ +#define cpumask_of_node(node) ((node) == NUMA_NO_NODE ? \ + cpu_all_mask : \ + node_to_cpumask_map[node]) +#else +extern const struct cpumask *cpumask_of_node(int node); +#endif /* CONFIG_DEBUG_PER_CPU_MAPS */ + +extern void numa_add_cpu(unsigned int cpu); +extern void numa_remove_cpu(unsigned int cpu); +extern void numa_store_cpu_info(unsigned int cpu); +extern int __node_distance(int from, int to); +#define node_distance(a, b) __node_distance(a, b) +#define parent_node(node) (node) +#define cpumask_of_pcibus(bus) (cpu_online_mask) +#else /* !CONFIG_NUMA */ +static inline void numa_add_cpu(unsigned int cpu) { } +static inline void numa_remove_cpu(unsigned int cpu) { } +static inline void numa_store_cpu_info(unsigned int cpu) { } +#endif /* CONFIG_NUMA */ + +extern void get_vt_smp_info(void); + +#include + +static inline void arch_fix_phys_package_id(int num, u32 slot) { } + +#endif /* _ASM_SW64_TOPOLOGY_H */ diff --git a/arch/sw_64/kernel/topology.c b/arch/sw_64/kernel/topology.c new file mode 100644 index 000000000000..8371c013446f --- /dev/null +++ b/arch/sw_64/kernel/topology.c @@ -0,0 +1,212 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include + +static int __init parse_dt_topology(void) +{ + return 0; +} + +/* + * cpu topology table + */ +struct cpu_topology cpu_topology[NR_CPUS]; +EXPORT_SYMBOL_GPL(cpu_topology); + +int topo_nr_threads, topo_nr_cores, topo_nr_maxcpus; + +static int topo_nr_cpus; +static int topo_threads[NR_CPUS]; +static int topo_cores[NR_CPUS]; +static int topo_packages[NR_CPUS]; + +void __init get_vt_smp_info(void) +{ + unsigned long smp_info; + + smp_info = sw64_io_read(0, SMP_INFO); + if (smp_info == -1UL) + smp_info = 0; + topo_nr_threads = (smp_info >> VT_THREADS_SHIFT) & VT_THREADS_MASK; + topo_nr_cores = (smp_info >> VT_CORES_SHIFT) & VT_CORES_MASK; + topo_nr_maxcpus = (smp_info >> VT_MAX_CPUS_SHIFT) & VT_MAX_CPUS_MASK; +} + +static void __init init_topo_threads(void) +{ + int i, j; + + if (topo_nr_threads == 0) + topo_nr_threads = 1; + + for (i = 0; i < topo_nr_cpus; i += topo_nr_threads) { + for (j = 0; j < topo_nr_threads; j++) + topo_threads[i+j] = j; + } +} + +static void __init init_topo_cores(void) +{ + int i, j; + + if (topo_nr_cores == 0) + topo_nr_cores = topo_nr_cpus; + + for (i = 0; i < topo_nr_cpus; i += topo_nr_cores) { + for (j = 0; j < topo_nr_cores; j++) + topo_cores[i+j] = j; + } +} + +static void __init init_topo_packages(void) +{ + int i, j, packet_index = 0; + int topo_nr_packages = topo_nr_cpus / (topo_nr_cores * topo_nr_threads); + int div_package = topo_nr_cpus / topo_nr_packages; + + for (i = 0; i < topo_nr_cpus; i += div_package) { + for (j = 0 ; j < div_package; j++) + topo_packages[i+j] = packet_index; + packet_index++; + } + if (packet_index > topo_nr_packages) + pr_err("topo_cores init failed.\n"); +} + +static void __init init_topology_array(void) +{ + topo_nr_cpus = num_present_cpus(); + if (topo_nr_maxcpus > topo_nr_cpus) + topo_nr_cpus = topo_nr_maxcpus; + init_topo_threads(); + init_topo_cores(); + init_topo_packages(); +} + +const struct cpumask *cpu_coregroup_mask(int cpu) +{ + return topology_llc_cpumask(cpu); +} + +static void update_siblings_masks(int cpu) +{ + struct cpu_topology *cpu_topo = &cpu_topology[cpu]; + int sib; + + /* update core and thread sibling masks */ + for_each_online_cpu(sib) { + struct cpu_topology *sib_topo = &cpu_topology[sib]; + + if (cpu_topo->package_id == sib_topo->package_id) { + cpumask_set_cpu(cpu, &sib_topo->core_sibling); + cpumask_set_cpu(sib, &cpu_topo->core_sibling); + cpumask_set_cpu(cpu, &sib_topo->llc_sibling); + cpumask_set_cpu(sib, &cpu_topo->llc_sibling); + + if (cpu_topo->core_id == sib_topo->core_id) { + cpumask_set_cpu(cpu, &sib_topo->thread_sibling); + cpumask_set_cpu(sib, &cpu_topo->thread_sibling); + } + } + } +} + +void store_cpu_topology(int cpu) +{ + struct cpu_topology *cpu_topo = &cpu_topology[cpu]; + + if (cpu_topo->package_id != -1) + goto topology_populated; + + if (is_guest_or_emul()) { + cpu_topo->package_id = topo_packages[cpu]; + cpu_topo->core_id = topo_cores[cpu]; + cpu_topo->thread_id = topo_threads[cpu]; + goto topology_populated; + } + + cpu_topo->package_id = rcid_to_domain_id(cpu_to_rcid(cpu)); + cpu_topo->core_id = rcid_to_core_id(cpu_to_rcid(cpu)); + cpu_topo->thread_id = rcid_to_thread_id(cpu_to_rcid(cpu)); + + pr_debug("CPU%u: socket %d core %d thread %d\n", + cpu, cpu_topo->package_id, cpu_topo->core_id, + cpu_topo->thread_id); + +topology_populated: + update_siblings_masks(cpu); +} + +static void clear_cpu_topology(int cpu) +{ + struct cpu_topology *cpu_topo = &cpu_topology[cpu]; + + cpumask_clear(&cpu_topo->llc_sibling); + cpumask_set_cpu(cpu, &cpu_topo->llc_sibling); + + cpumask_clear(&cpu_topo->core_sibling); + cpumask_set_cpu(cpu, &cpu_topo->core_sibling); + cpumask_clear(&cpu_topo->thread_sibling); + cpumask_set_cpu(cpu, &cpu_topo->thread_sibling); +} + +static void __init reset_cpu_topology(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { + struct cpu_topology *cpu_topo = &cpu_topology[cpu]; + + cpu_topo->thread_id = -1; + cpu_topo->core_id = 0; + cpu_topo->package_id = -1; + + clear_cpu_topology(cpu); + } +} + +void remove_cpu_topology(int cpu) +{ + int sibling; + + for_each_cpu(sibling, topology_core_cpumask(cpu)) + cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); + for_each_cpu(sibling, topology_sibling_cpumask(cpu)) + cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); + for_each_cpu(sibling, topology_llc_cpumask(cpu)) + cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling)); + + clear_cpu_topology(cpu); +} + +#ifdef CONFIG_ACPI +static int __init parse_acpi_topology(void) +{ + return 0; +} +#else +static inline int __init parse_acpi_topology(void) +{ + return -EINVAL; +} +#endif + +void __init init_cpu_topology(void) +{ + reset_cpu_topology(); + + if (is_guest_or_emul()) + init_topology_array(); + /* + * Discard anything that was parsed if we hit an error so we + * don't use partial information. + */ + if (!acpi_disabled && parse_acpi_topology()) + reset_cpu_topology(); + else if (of_have_populated_dt() && parse_dt_topology()) + reset_cpu_topology(); +} -- Gitee From 4c9bd0610fa20574cc1b66b2292fd3dd68a54126 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:38 +0800 Subject: [PATCH 0280/2138] anolis: sw64: add timer support ANBZ: #4688 Add timer for basic SW64 support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/tc.h | 16 ++++++++++++++++ arch/sw_64/include/asm/timer.h | 11 +++++++++++ arch/sw_64/include/asm/timex.h | 23 +++++++++++++++++++++++ 3 files changed, 50 insertions(+) create mode 100644 arch/sw_64/include/asm/tc.h create mode 100644 arch/sw_64/include/asm/timer.h create mode 100644 arch/sw_64/include/asm/timex.h diff --git a/arch/sw_64/include/asm/tc.h b/arch/sw_64/include/asm/tc.h new file mode 100644 index 000000000000..aa39c3528e3f --- /dev/null +++ b/arch/sw_64/include/asm/tc.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_TC_H +#define _ASM_SW64_TC_H + +static inline unsigned long rdtc(void) +{ + unsigned long ret; + + __asm__ __volatile__ ("rtc %0" : "=r"(ret)); + return ret; +} + +extern void tc_sync_clear(void); +extern void tc_sync_ready(void *ignored); +extern void tc_sync_set(void); +#endif /* _ASM_SW64_TC_H */ diff --git a/arch/sw_64/include/asm/timer.h b/arch/sw_64/include/asm/timer.h new file mode 100644 index 000000000000..9ea9e0a538d0 --- /dev/null +++ b/arch/sw_64/include/asm/timer.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_TIMER_H +#define _ASM_SW64_TIMER_H + +extern void sw64_setup_clocksource(void); + +extern void sw64_setup_timer(void); + +extern void __init setup_sched_clock(void); + +#endif /* _ASM_SW64_TIMER_H */ diff --git a/arch/sw_64/include/asm/timex.h b/arch/sw_64/include/asm/timex.h new file mode 100644 index 000000000000..a5760bf8abd4 --- /dev/null +++ b/arch/sw_64/include/asm/timex.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_TIMEX_H +#define _ASM_SW64_TIMEX_H + +#include + +/* With only one or two oddballs, we use the RTC as the ticker, selecting + * the 32.768kHz reference clock, which nicely divides down to our HZ. + */ +#define CLOCK_TICK_RATE 32768 + +/* + * Standard way to access the cycle counter. + */ + +typedef unsigned long cycles_t; + +static inline cycles_t get_cycles(void) +{ + return rdtc(); +} + +#endif /* _ASM_SW64_TIMEX_H */ -- Gitee From f045628321508b61059a9d3be95f00cea91c463a Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:16 +0800 Subject: [PATCH 0281/2138] anolis: sw64: add irq handling support ANBZ: #4688 Add interrupt handling mechanism for basic SW64 support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/hardirq.h | 24 +++++++ arch/sw_64/include/asm/hw_irq.h | 16 +++++ arch/sw_64/include/asm/irq.h | 31 +++++++++ arch/sw_64/include/asm/irq_impl.h | 48 +++++++++++++ arch/sw_64/include/asm/irqflags.h | 55 +++++++++++++++ arch/sw_64/kernel/irq.c | 108 ++++++++++++++++++++++++++++++ arch/sw_64/kernel/irq_sw64.c | 84 +++++++++++++++++++++++ arch/sw_64/kernel/time.c | 63 +++++++++++++++++ 8 files changed, 429 insertions(+) create mode 100644 arch/sw_64/include/asm/hardirq.h create mode 100644 arch/sw_64/include/asm/hw_irq.h create mode 100644 arch/sw_64/include/asm/irq.h create mode 100644 arch/sw_64/include/asm/irq_impl.h create mode 100644 arch/sw_64/include/asm/irqflags.h create mode 100644 arch/sw_64/kernel/irq.c create mode 100644 arch/sw_64/kernel/irq_sw64.c create mode 100644 arch/sw_64/kernel/time.c diff --git a/arch/sw_64/include/asm/hardirq.h b/arch/sw_64/include/asm/hardirq.h new file mode 100644 index 000000000000..03368c3659dd --- /dev/null +++ b/arch/sw_64/include/asm/hardirq.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_HARDIRQ_H +#define _ASM_SW64_HARDIRQ_H + +void ack_bad_irq(unsigned int irq); +#define ack_bad_irq ack_bad_irq + +#include + +#define __ARCH_IRQ_STAT +typedef struct { + u16 __softirq_pending; + unsigned int timer_irqs_event; +} ____cacheline_aligned irq_cpustat_t; + +DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); + +#define inc_irq_stat(member) this_cpu_inc(irq_stat.member) +#define arch_irq_stat_cpu arch_irq_stat_cpu +#define arch_irq_stat arch_irq_stat +extern u64 arch_irq_stat_cpu(unsigned int cpu); +extern u64 arch_irq_stat(void); + +#endif /* _ASM_SW64_HARDIRQ_H */ diff --git a/arch/sw_64/include/asm/hw_irq.h b/arch/sw_64/include/asm/hw_irq.h new file mode 100644 index 000000000000..3cfc725f7517 --- /dev/null +++ b/arch/sw_64/include/asm/hw_irq.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_HW_IRQ_H +#define _ASM_SW64_HW_IRQ_H + +#include + +extern volatile unsigned long irq_err_count; +DECLARE_PER_CPU(unsigned long, irq_pmi_count); + +#define ACTUAL_NR_IRQS NR_IRQS + +#ifdef CONFIG_PCI_MSI +typedef unsigned int vector_irq_t[PERCPU_MSI_IRQS]; +DECLARE_PER_CPU(vector_irq_t, vector_irq); +#endif +#endif /* _ASM_SW64_HW_IRQ_H */ diff --git a/arch/sw_64/include/asm/irq.h b/arch/sw_64/include/asm/irq.h new file mode 100644 index 000000000000..b3ac4105c29e --- /dev/null +++ b/arch/sw_64/include/asm/irq.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_IRQ_H +#define _ASM_SW64_IRQ_H + +/* + * arch/sw/include/asm/irq.h + * + * (C) 2012 OSKernel JN + */ + +#include + +#define NR_VECTORS_PERCPU 256 +#define NR_IRQS_LEGACY 16 +#define NR_IRQS ((NR_VECTORS_PERCPU + NR_IRQS_LEGACY) * NR_CPUS) + +static inline int irq_canonicalize(int irq) +{ + /* + * XXX is this true for all Sw? The old serial driver + * did it this way for years without any complaints, so.... + */ + return ((irq == 2) ? 9 : irq); +} + +struct pt_regs; +extern void (*perf_irq)(unsigned long vector, struct pt_regs *regs); +extern void fixup_irqs(void); +extern void sw64_timer_interrupt(void); + +#endif /* _ASM_SW64_IRQ_H */ diff --git a/arch/sw_64/include/asm/irq_impl.h b/arch/sw_64/include/asm/irq_impl.h new file mode 100644 index 000000000000..797af433a126 --- /dev/null +++ b/arch/sw_64/include/asm/irq_impl.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This file contains declarations and inline functions for interfacing + * with the IRQ handling routines in irq.c. + */ + +#ifndef _ASM_SW64_IRQ_IMPL_H +#define _ASM_SW64_IRQ_IMPL_H + +#include +#include +#include + +#include + +#define SW64_PCIE0_INT_BASE 17 +#define SW64_PCIE0_MSI_BASE 21 + +#define SW64_PCIE1_INT_BASE 277 +#define SW64_PCIE1_MSI_BASE 281 + +#define RTC_IRQ 8 +#define SWI2C_IRQ 14 + +enum sw64_irq_type { + INT_IPI = 1, + INT_PC0 = 2, + INT_PC1 = 3, + INT_INTx = 5, + INT_MSI = 6, + INT_MT = 7, + INT_RTC = 9, + INT_FAULT = 10, + INT_VT_SERIAL = 12, + INT_VT_HOTPLUG = 13, + INT_DEV = 17, + INT_NMI = 18, + INT_LEGACY = 31, +}; + +extern struct irqaction timer_irqaction; +extern void init_rtc_irq(irq_handler_t handler); +extern void handle_irq(int irq); +extern void handle_ipi(struct pt_regs *regs); +extern void __init sw64_init_irq(void); +extern irqreturn_t timer_interrupt(int irq, void *dev); + +#endif /* _ASM_SW64_IRQ_IMPL_H */ diff --git a/arch/sw_64/include/asm/irqflags.h b/arch/sw_64/include/asm/irqflags.h new file mode 100644 index 000000000000..b4440f25a51d --- /dev/null +++ b/arch/sw_64/include/asm/irqflags.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_IRQFLAGS_H +#define _ASM_SW64_IRQFLAGS_H + +#include + +#define IPL_MIN 0 +#define IPL_MAX 7 + +#define getipl() (rdps() & 7) +#define setipl(ipl) ((void) swpipl(ipl)) + +static inline unsigned long arch_local_save_flags(void) +{ + return rdps(); +} + +static inline void arch_local_irq_disable(void) +{ + setipl(IPL_MAX); + barrier(); +} + +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags = swpipl(IPL_MAX); + + barrier(); + return flags; +} + +static inline void arch_local_irq_enable(void) +{ + barrier(); + setipl(IPL_MIN); +} + +static inline void arch_local_irq_restore(unsigned long flags) +{ + barrier(); + setipl(flags); + barrier(); +} + +static inline bool arch_irqs_disabled_flags(unsigned long flags) +{ + return flags > IPL_MIN; +} + +static inline bool arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(getipl()); +} + +#endif /* _ASM_SW64_IRQFLAGS_H */ diff --git a/arch/sw_64/kernel/irq.c b/arch/sw_64/kernel/irq.c new file mode 100644 index 000000000000..126fe2f70495 --- /dev/null +++ b/arch/sw_64/kernel/irq.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sw_64/kernel/irq.c + * + * Copyright (C) 1995 Linus Torvalds + * + * This file contains the code used by various IRQ handling routines: + * asking for different IRQ's should be done through these routines + * instead of just grabbing them. Thus setups with different IRQ numbers + * shouldn't result in any weird surprises, and installing new handlers + * should be easier. + */ + +#include +#include +#include +#include + +volatile unsigned long irq_err_count; +DEFINE_PER_CPU(unsigned long, irq_pmi_count); +DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); +EXPORT_PER_CPU_SYMBOL(irq_stat); + +void ack_bad_irq(unsigned int irq) +{ + irq_err_count++; + pr_crit("Unexpected IRQ trap at vector %u\n", irq); +} + +u64 arch_irq_stat_cpu(unsigned int cpu) +{ + u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event; + + return sum; +} + +u64 arch_irq_stat(void) +{ + return 0; +} + +int arch_show_interrupts(struct seq_file *p, int prec) +{ + int j; + + seq_printf(p, "%*s: ", prec, "TIMER"); + for_each_online_cpu(j) + seq_printf(p, "%10u", per_cpu(irq_stat, j).timer_irqs_event); + seq_puts(p, "\n"); + +#ifdef CONFIG_SMP + seq_printf(p, "%*s: ", prec, "IPI"); + for_each_online_cpu(j) + seq_printf(p, "%10lu ", cpu_data[j].ipi_count); + seq_puts(p, "\n"); +#endif + seq_printf(p, "%*s: ", prec, "PMI"); + for_each_online_cpu(j) + seq_printf(p, "%10lu ", per_cpu(irq_pmi_count, j)); + seq_puts(p, "\n"); + + seq_printf(p, "ERR: %10lu\n", irq_err_count); + return 0; +} + +/* + * handle_irq handles all normal device IRQ's (the special + * SMP cross-CPU interrupts have their own specific + * handlers). + */ + +#define MAX_ILLEGAL_IRQS 16 + +void +handle_irq(int irq) +{ + /* + * We ack quickly, we don't want the irq controller + * thinking we're snobs just because some other CPU has + * disabled global interrupts (we have already done the + * INT_ACK cycles, it's too late to try to pretend to the + * controller that we aren't taking the interrupt). + * + * 0 return value means that this irq is already being + * handled by some other CPU. (or is disabled) + */ + static unsigned int illegal_count; + struct irq_desc *desc = irq_to_desc(irq); + + if (!desc || ((unsigned int) irq > ACTUAL_NR_IRQS && + illegal_count < MAX_ILLEGAL_IRQS)) { + irq_err_count++; + illegal_count++; + pr_crit("device_interrupt: invalid interrupt %d\n", irq); + return; + } + + irq_enter(); + generic_handle_irq_desc(desc); + irq_exit(); +} + +#ifdef CONFIG_HOTPLUG_CPU +void fixup_irqs(void) +{ + irq_migrate_all_off_this_cpu(); +} +#endif diff --git a/arch/sw_64/kernel/irq_sw64.c b/arch/sw_64/kernel/irq_sw64.c new file mode 100644 index 000000000000..989d55ee1b1b --- /dev/null +++ b/arch/sw_64/kernel/irq_sw64.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SW64 specific irq code. + */ + +#include +#include + +#include +#include + +void __init +init_IRQ(void) +{ + /* + * Just in case the platform init_irq() causes interrupts/mchecks + * (as is the case with RAWHIDE, at least). + */ + if (is_in_host()) { + write_csr(0xffffffffffffffffUL, CSR_PCIE_MSI0_INTEN); + write_csr(0xffffffffffffffffUL, CSR_PCIE_MSI1_INTEN); + write_csr(0xffffffffffffffffUL, CSR_PCIE_MSI2_INTEN); + write_csr(0xffffffffffffffffUL, CSR_PCIE_MSI3_INTEN); + } + + wrent(entInt, 0); + + sw64_init_irq(); + irqchip_init(); +} + +DEFINE_SPINLOCK(irq_lock); + +static void +__enable_irq(struct irq_data *d) +{ +} + +static void +__disable_irq(struct irq_data *d) +{ +} + +static unsigned int +__startup_irq(struct irq_data *d) +{ + __enable_irq(d); + return 0; +} + +static void +__mask_and_ack_irq(struct irq_data *d) +{ + spin_lock(&irq_lock); + __disable_irq(d); + spin_unlock(&irq_lock); +} + +struct irq_chip sw64_irq_chip = { + .name = "SW64_NODE", + .irq_startup = __startup_irq, + .irq_unmask = __enable_irq, + .irq_mask = __disable_irq, + .irq_mask_ack = __mask_and_ack_irq, +}; + +void __weak arch_init_msi_domain(struct irq_domain *parent) {} + +int __init arch_early_irq_init(void) +{ + int i; + + for (i = 0; i < NR_IRQS; ++i) { + irq_set_chip_and_handler(i, &sw64_irq_chip, handle_level_irq); + irq_set_status_flags(i, IRQ_LEVEL); + } + arch_init_msi_domain(NULL); + return 0; +} + +int __init arch_probe_nr_irqs(void) +{ + return NR_IRQS_LEGACY; +} diff --git a/arch/sw_64/kernel/time.c b/arch/sw_64/kernel/time.c new file mode 100644 index 000000000000..533a6a14c200 --- /dev/null +++ b/arch/sw_64/kernel/time.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include + +#include +#include + +#include "proto.h" + +DEFINE_SPINLOCK(rtc_lock); +EXPORT_SYMBOL(rtc_lock); + +#define TICK_SIZE (tick_nsec / 1000) + +/* + * Shift amount by which scaled_ticks_per_cycle is scaled. Shifting + * by 48 gives us 16 bits for HZ while keeping the accuracy good even + * for large CPU clock rates. + */ +#define FIX_SHIFT 48 + +unsigned long est_cycle_freq; + +#ifdef CONFIG_IRQ_WORK + +DEFINE_PER_CPU(u8, irq_work_pending); + +#define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1) +#define test_irq_work_pending() __this_cpu_read(irq_work_pending) +#define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0) + +void arch_irq_work_raise(void) +{ + set_irq_work_pending_flag(); +} + +#else /* CONFIG_IRQ_WORK */ + +#define test_irq_work_pending() 0 +#define clear_irq_work_pending() + +#endif /* CONFIG_IRQ_WORK */ + +void __init +time_init(void) +{ + unsigned long cycle_freq; + + cycle_freq = get_cpu_freq(); + + pr_info("CPU Cycle frequency = %ld Hz\n", cycle_freq); + + /* Register clocksource */ + sw64_setup_clocksource(); + of_clk_init(NULL); + /* Startup the timer source. */ + sw64_setup_timer(); + /* Calibrate the delay loop directly */ + lpj_fine = cycle_freq / HZ; +} -- Gitee From cc8e7e91c4098c8e17dce8aee3f22f73511738a9 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:07 +0800 Subject: [PATCH 0282/2138] anolis: sw64: add exception handling support ANBZ: #4688 Add exception handling mechanism for basic SW64 support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/kdebug.h | 15 + arch/sw_64/include/uapi/asm/gentrap.h | 38 + arch/sw_64/include/uapi/asm/sysinfo.h | 20 + arch/sw_64/kernel/traps.c | 1542 +++++++++++++++++++++++++ arch/sw_64/kernel/unaligned.c | 80 ++ 5 files changed, 1695 insertions(+) create mode 100644 arch/sw_64/include/asm/kdebug.h create mode 100644 arch/sw_64/include/uapi/asm/gentrap.h create mode 100644 arch/sw_64/include/uapi/asm/sysinfo.h create mode 100644 arch/sw_64/kernel/traps.c create mode 100644 arch/sw_64/kernel/unaligned.c diff --git a/arch/sw_64/include/asm/kdebug.h b/arch/sw_64/include/asm/kdebug.h new file mode 100644 index 000000000000..73793057c3e8 --- /dev/null +++ b/arch/sw_64/include/asm/kdebug.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KDEBUG_H +#define _ASM_SW64_KDEBUG_H + +#include + +enum die_val { + DIE_OOPS = 1, + DIE_BREAK, + DIE_SSTEPBP, + DIE_UPROBE, + DIE_UPROBE_XOL, +}; + +#endif /* _ASM_SW64_KDEBUG_H */ diff --git a/arch/sw_64/include/uapi/asm/gentrap.h b/arch/sw_64/include/uapi/asm/gentrap.h new file mode 100644 index 000000000000..3786b8b52add --- /dev/null +++ b/arch/sw_64/include/uapi/asm/gentrap.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_GENTRAP_H +#define _UAPI_ASM_SW64_GENTRAP_H + +/* + * Definitions for gentrap causes. They are generated by user-level + * programs and therefore should be compatible with the corresponding + * legacy definitions. + */ +#define GEN_INTOVF -1 /* integer overflow */ +#define GEN_INTDIV -2 /* integer division by zero */ +#define GEN_FLTOVF -3 /* fp overflow */ +#define GEN_FLTDIV -4 /* fp division by zero */ +#define GEN_FLTUND -5 /* fp underflow */ +#define GEN_FLTINV -6 /* invalid fp operand */ +#define GEN_FLTINE -7 /* inexact fp operand */ +#define GEN_DECOVF -8 /* decimal overflow (for COBOL??) */ +#define GEN_DECDIV -9 /* decimal division by zero */ +#define GEN_DECINV -10 /* invalid decimal operand */ +#define GEN_ROPRAND -11 /* reserved operand */ +#define GEN_ASSERTERR -12 /* assertion error */ +#define GEN_NULPTRERR -13 /* null pointer error */ +#define GEN_STKOVF -14 /* stack overflow */ +#define GEN_STRLENERR -15 /* string length error */ +#define GEN_SUBSTRERR -16 /* substring error */ +#define GEN_RANGERR -17 /* range error */ +#define GEN_SUBRNG -18 +#define GEN_SUBRNG1 -19 +#define GEN_SUBRNG2 -20 +#define GEN_SUBRNG3 -21 /* these report range errors for */ +#define GEN_SUBRNG4 -22 /* subscripting (indexing) at levels 0..7 */ +#define GEN_SUBRNG5 -23 +#define GEN_SUBRNG6 -24 +#define GEN_SUBRNG7 -25 + +/* the remaining codes (-26..-1023) are reserved. */ + +#endif /* _UAPI_ASM_SW64_GENTRAP_H */ diff --git a/arch/sw_64/include/uapi/asm/sysinfo.h b/arch/sw_64/include/uapi/asm/sysinfo.h new file mode 100644 index 000000000000..667405c3447c --- /dev/null +++ b/arch/sw_64/include/uapi/asm/sysinfo.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * include/asm/sysinfo.h + */ + +#ifndef _UAPI_ASM_SW64_SYSINFO_H +#define _UAPI_ASM_SW64_SYSINFO_H + +#define GSI_IEEE_FP_CONTROL 45 + +#define SSI_IEEE_FP_CONTROL 14 +#define SSI_IEEE_RAISE_EXCEPTION 1001 /* linux specific */ + +#define UAC_BITMASK 7 +#define UAC_NOPRINT 1 +#define UAC_NOFIX 2 +#define UAC_SIGBUS 4 +#define PR_NOFIX 4 /* do not fix up unaligned accesses */ + +#endif /* _UAPI_ASM_SW64_SYSINFO_H */ diff --git a/arch/sw_64/kernel/traps.c b/arch/sw_64/kernel/traps.c new file mode 100644 index 000000000000..a30e18ad1f00 --- /dev/null +++ b/arch/sw_64/kernel/traps.c @@ -0,0 +1,1542 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sw_64/kernel/traps.c + * + * (C) Copyright 1994 Linus Torvalds + */ + +/* + * This file initializes the trap entry points + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "proto.h" + +enum SW64_IF_TYPES { + IF_BREAKPOINT = 0, + IF_RESERVED, + IF_GENTRAP, + IF_FEN, + IF_OPDEC, + IF_SIMDEMU, +}; + +void show_regs(struct pt_regs *regs) +{ + show_regs_print_info(KERN_DEFAULT); + + printk(KERN_DEFAULT "pc = [<%016lx>] ra = [<%016lx>] ps = %04lx %s\n", + regs->pc, regs->regs[26], regs->ps, print_tainted()); + printk(KERN_DEFAULT "pc is at %pSR\n", (void *)regs->pc); + printk(KERN_DEFAULT "ra is at %pSR\n", (void *)regs->regs[26]); + printk(KERN_DEFAULT "v0 = %016lx t0 = %016lx t1 = %016lx\n", + regs->regs[0], regs->regs[1], regs->regs[2]); + printk(KERN_DEFAULT "t2 = %016lx t3 = %016lx t4 = %016lx\n", + regs->regs[3], regs->regs[4], regs->regs[5]); + printk(KERN_DEFAULT "t5 = %016lx t6 = %016lx t7 = %016lx\n", + regs->regs[6], regs->regs[7], regs->regs[8]); + + printk(KERN_DEFAULT "s0 = %016lx s1 = %016lx s2 = %016lx\n", + regs->regs[9], regs->regs[10], regs->regs[11]); + printk(KERN_DEFAULT "s3 = %016lx s4 = %016lx s5 = %016lx\n", + regs->regs[12], regs->regs[13], regs->regs[14]); + printk(KERN_DEFAULT "s6 = %016lx\n", + regs->regs[15]); + + printk(KERN_DEFAULT "a0 = %016lx a1 = %016lx a2 = %016lx\n", + regs->regs[16], regs->regs[17], regs->regs[18]); + printk(KERN_DEFAULT "a3 = %016lx a4 = %016lx a5 = %016lx\n", + regs->regs[19], regs->regs[20], regs->regs[21]); + printk(KERN_DEFAULT "t8 = %016lx t9 = %016lx t10 = %016lx\n", + regs->regs[22], regs->regs[23], regs->regs[24]); + printk(KERN_DEFAULT "t11= %016lx pv = %016lx at = %016lx\n", + regs->regs[25], regs->regs[27], regs->regs[28]); + printk(KERN_DEFAULT "gp = %016lx sp = %016lx\n", regs->regs[29], regs->regs[30]); +} + +static void show_code(unsigned int *pc) +{ + long i; + unsigned int insn; + + printk(KERN_DEFAULT "Code:"); + for (i = -6; i < 2; i++) { + if (__get_user(insn, (unsigned int __user *)pc + i)) + break; + printk(KERN_DEFAULT "%c%08x%c", i ? ' ' : '<', insn, i ? ' ' : '>'); + } + printk(KERN_DEFAULT "\n"); +} + +static DEFINE_SPINLOCK(die_lock); + +void die(char *str, struct pt_regs *regs, long err) +{ + static int die_counter; + unsigned long flags; + int ret; + + oops_enter(); + + spin_lock_irqsave(&die_lock, flags); + console_verbose(); + bust_spinlocks(1); + + pr_emerg("%s [#%d]\n", str, ++die_counter); + + ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV); + + print_modules(); + show_regs(regs); + show_code((unsigned int *)regs->pc); + show_stack(current, NULL, KERN_EMERG); + + bust_spinlocks(0); + add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); + spin_unlock_irqrestore(&die_lock, flags); + oops_exit(); + + if (kexec_should_crash(current)) + crash_kexec(regs); + if (in_interrupt()) + panic("Fatal exception in interrupt"); + if (panic_on_oops) + panic("Fatal exception"); + + if (ret != NOTIFY_STOP) + make_task_dead(SIGSEGV); +} + +#ifndef CONFIG_MATHEMU +static long dummy_emul(void) +{ + return 0; +} + +long (*sw64_fp_emul_imprecise)(struct pt_regs *regs, unsigned long writemask) = (void *)dummy_emul; +EXPORT_SYMBOL_GPL(sw64_fp_emul_imprecise); + +long (*sw64_fp_emul)(unsigned long pc) = (void *)dummy_emul; +EXPORT_SYMBOL_GPL(sw64_fp_emul); +#else +long sw64_fp_emul_imprecise(struct pt_regs *regs, unsigned long writemask); +long sw64_fp_emul(unsigned long pc); +#endif + +asmlinkage void +do_entArith(unsigned long summary, unsigned long write_mask, + struct pt_regs *regs) +{ + long si_code = FPE_FLTINV; + + if (summary & 1) { + /* Software-completion summary bit is set, so try to + * emulate the instruction. If the processor supports + * precise exceptions, we don't have to search. + */ + si_code = sw64_fp_emul(regs->pc - 4); + if (si_code == 0) + return; + } + + if (!user_mode(regs)) + die("Arithmetic fault", regs, 0); + + /*summary<39> means integer divide by zero in C4.*/ + if ((summary >> 39) & 1) + si_code = FPE_INTDIV; + + force_sig_fault(SIGFPE, si_code, (void __user *)regs->pc); +} + +void simd_emulate(unsigned int inst, unsigned long va) +{ + unsigned long *fp; + int instr_opc, reg; + + instr_opc = (inst >> 26) & 0x3f; + reg = (inst >> 21) & 0x1f; + fp = (unsigned long *) va; + + switch (instr_opc) { + case 0x0d: /* vldd */ + sw64_write_simd_fp_reg_d(reg, fp[0], fp[1], fp[2], fp[3]); + return; + + case 0x0f: /* vstd */ + sw64_read_simd_fp_m_d(reg, fp); + return; + } +} + +/* + * BPT/GENTRAP/OPDEC make regs->pc = exc_pc + 4. debugger should + * do something necessary to handle it correctly. + */ +asmlinkage void +do_entIF(unsigned long inst_type, unsigned long va, struct pt_regs *regs) +{ + int signo, code; + unsigned int inst, type; + + type = inst_type & 0xffffffff; + inst = inst_type >> 32; + + if (type == IF_SIMDEMU) { + simd_emulate(inst, va); + return; + } + + if (!user_mode(regs) && type != IF_OPDEC) { + if (type == IF_BREAKPOINT) { + /* support kgdb */ + notify_die(0, "kgdb trap", regs, 0, 0, SIGTRAP); + return; + } + die((type == IF_RESERVED ? "Kernel Bug" : "Instruction fault"), + regs, type); + } + + switch (type) { + case IF_BREAKPOINT: /* gdb do pc-4 for sigtrap */ + force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc); + return; + + case IF_GENTRAP: + regs->pc -= 4; + switch ((long)regs->regs[16]) { + case GEN_INTOVF: + signo = SIGFPE; + code = FPE_INTOVF; + break; + case GEN_INTDIV: + signo = SIGFPE; + code = FPE_INTDIV; + break; + case GEN_FLTOVF: + signo = SIGFPE; + code = FPE_FLTOVF; + break; + case GEN_FLTDIV: + signo = SIGFPE; + code = FPE_FLTDIV; + break; + case GEN_FLTUND: + signo = SIGFPE; + code = FPE_FLTUND; + break; + case GEN_FLTINV: + signo = SIGFPE; + code = FPE_FLTINV; + break; + case GEN_FLTINE: + signo = SIGFPE; + code = FPE_FLTRES; + break; + case GEN_ROPRAND: + signo = SIGFPE; + code = FPE_FLTUNK; + break; + + case GEN_DECOVF: + case GEN_DECDIV: + case GEN_DECINV: + case GEN_ASSERTERR: + case GEN_NULPTRERR: + case GEN_STKOVF: + case GEN_STRLENERR: + case GEN_SUBSTRERR: + case GEN_RANGERR: + case GEN_SUBRNG: + case GEN_SUBRNG1: + case GEN_SUBRNG2: + case GEN_SUBRNG3: + case GEN_SUBRNG4: + case GEN_SUBRNG5: + case GEN_SUBRNG6: + case GEN_SUBRNG7: + default: + regs->pc += 4; + signo = SIGTRAP; + code = TRAP_UNK; + break; + } + + force_sig_fault(signo, code, (void __user *)regs->pc); + return; + + case IF_FEN: + fpu_enable(); + return; + + case IF_OPDEC: + switch (inst) { +#ifdef CONFIG_KPROBES + case BREAK_KPROBE: + if (notify_die(DIE_BREAK, "kprobe", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) + return; + break; + case BREAK_KPROBE_SS: + if (notify_die(DIE_SSTEPBP, "single_step", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) + return; + break; +#endif +#ifdef CONFIG_UPROBES + case UPROBE_BRK_UPROBE: + if (notify_die(DIE_UPROBE, "uprobe", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) + return; + break; + case UPROBE_BRK_UPROBE_XOL: + if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) + return; +#endif + } + + if (user_mode(regs)) + regs->pc -= 4; + else + die("Instruction fault", regs, type); + break; + + default: /* unexpected instruction-fault type */ + regs->pc -= 4; + break; + } + + force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)regs->pc); +} + +asmlinkage void +do_entUna(void *va, unsigned long opcode, unsigned long reg, + struct pt_regs *regs) +{ + long error; + unsigned long tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8; + unsigned long pc = regs->pc - 4; + + /* + * We don't want to use the generic get/put unaligned macros as + * we want to trap exceptions. Only if we actually get an + * exception will we decide whether we should have caught it. + */ + + switch (opcode) { + case 0x21: + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 1(%3)\n" + " extlh %1, %3, %1\n" + " exthh %2, %3, %2\n" + "3:\n" + ".section __ex_table,\"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + + if (error) + goto got_exception; + regs->regs[reg] = tmp1 | tmp2; + return; + + case 0x22: + __asm__ __volatile__( + "1: ldl_u %1,0(%3)\n" + "2: ldl_u %2,3(%3)\n" + " extlw %1,%3,%1\n" + " exthw %2,%3,%2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + + if (error) + goto got_exception; + regs->regs[reg] = (int)(tmp1 | tmp2); + return; + + case 0x23: /* ldl */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 7(%3)\n" + " extll %1, %3, %1\n" + " exthl %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + + if (error) + goto got_exception; + regs->regs[reg] = tmp1 | tmp2; + return; + + case 0x29: /* sth */ + __asm__ __volatile__( + " zap %6, 2, %1\n" + " srl %6, 8, %2\n" + "1: stb %1, 0x0(%5)\n" + "2: stb %2, 0x1(%5)\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %2, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %1, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), + "=&r"(tmp3), "=&r"(tmp4) + : "r"(va), "r"(regs->regs[reg]), "0"(0)); + + if (error) + goto got_exception; + return; + + case 0x2a: /* stw */ + __asm__ __volatile__( + " zapnot %6, 0x1, %1\n" + " srl %6, 8, %2\n" + " zapnot %2, 0x1,%2\n" + " srl %6, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %6, 24, %4\n" + " zapnot %4, 0x1, %4\n" + "1: stb %1, 0x0(%5)\n" + "2: stb %2, 0x1(%5)\n" + "3: stb %3, 0x2(%5)\n" + "4: stb %4, 0x3(%5)\n" + "5:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi $31, 5b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 5b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 5b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 5b-4b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), + "=&r"(tmp3), "=&r"(tmp4) + : "r"(va), "r"(regs->regs[reg]), "0"(0)); + + if (error) + goto got_exception; + return; + + case 0x2b: /* stl */ + __asm__ __volatile__( + " zapnot %10, 0x1, %1\n" + " srl %10, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %10, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %10, 24, %4\n" + " zapnot %4, 0x1, %4\n" + " srl %10, 32, %5\n" + " zapnot %5, 0x1, %5\n" + " srl %10, 40, %6\n" + " zapnot %6, 0x1, %6\n" + " srl %10, 48, %7\n" + " zapnot %7, 0x1, %7\n" + " srl %10, 56, %8\n" + " zapnot %8, 0x1, %8\n" + "1: stb %1, 0(%9)\n" + "2: stb %2, 1(%9)\n" + "3: stb %3, 2(%9)\n" + "4: stb %4, 3(%9)\n" + "5: stb %5, 4(%9)\n" + "6: stb %6, 5(%9)\n" + "7: stb %7, 6(%9)\n" + "8: stb %8, 7(%9)\n" + "9:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 9b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 9b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 9b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 9b-4b(%0)\n" + " .long 5b - .\n" + " ldi $31, 9b-5b(%0)\n" + " .long 6b - .\n" + " ldi $31, 9b-6b(%0)\n" + " .long 7b - .\n" + " ldi $31, 9b-7b(%0)\n" + " .long 8b - .\n" + " ldi $31, 9b-8b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) + : "r"(va), "r"(regs->regs[reg]), "0"(0)); + + if (error) + goto got_exception; + return; + } + + pr_warn("Bad unaligned kernel access at %016lx: %p %lx %lu\n", + pc, va, opcode, reg); + make_task_dead(SIGSEGV); + +got_exception: + /* Ok, we caught the exception, but we don't want it. Is there + * someone to pass it along to? + */ + if (fixup_exception(regs, pc)) { + pr_info("Forwarding unaligned exception at %lx (%lx)\n", + pc, regs->pc); + return; + } + + /* + * Yikes! No one to forward the exception to. + * Since the registers are in a weird format, dump them ourselves. + */ + + die("Unhandled unaligned exception", regs, error); +} + +/* + * Handle user-level unaligned fault. Handling user-level unaligned + * faults is *extremely* slow and produces nasty messages. A user + * program *should* fix unaligned faults ASAP. + * + * Notice that we have (almost) the regular kernel stack layout here, + * so finding the appropriate registers is a little more difficult + * than in the kernel case. + * + * Finally, we handle regular integer load/stores only. In + * particular, load-linked/store-conditionally and floating point + * load/stores are not supported. The former make no sense with + * unaligned faults (they are guaranteed to fail) and I don't think + * the latter will occur in any decent program. + * + * Sigh. We *do* have to handle some FP operations, because GCC will + * uses them as temporary storage for integer memory to memory copies. + * However, we need to deal with stt/ldt and sts/lds only. + */ +#define OP_INT_MASK (1L << 0x22 | 1L << 0x2a | /* ldw stw */ \ + 1L << 0x23 | 1L << 0x2b | /* ldl stl */ \ + 1L << 0x21 | 1L << 0x29 | /* ldhu sth */ \ + 1L << 0x20 | 1L << 0x28) /* ldbu stb */ + +asmlinkage void +do_entUnaUser(void __user *va, unsigned long opcode, + unsigned long reg, struct pt_regs *regs) +{ +#ifdef CONFIG_UNA_PRINT + static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); +#endif + + unsigned long tmp1, tmp2, tmp3, tmp4; + unsigned long fake_reg, *reg_addr = &fake_reg; + int si_code; + long error; + unsigned long tmp, tmp5, tmp6, tmp7, tmp8, vb; + unsigned long fp[4]; + unsigned long instr, instr_op, value; + +#ifdef CONFIG_DEBUG_FS + /* + * If command name is specified, record some information + * to debugfs. + */ + if (unaligned_task[0] && !strcmp(unaligned_task, current->comm)) { + int idx; + + idx = unaligned_count % UNA_MAX_ENTRIES; + unaligned[idx].va = (unsigned long)va; + unaligned[idx].pc = regs->pc; + unaligned_count++; + } +#endif + + /* Check the UAC bits to decide what the user wants us to do + * with the unaliged access. + */ + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, + 1, regs, regs->pc - 4); + +#ifdef CONFIG_UNA_PRINT + if (!(current_thread_info()->status & TS_UAC_NOPRINT)) { + if (__ratelimit(&ratelimit)) { + pr_info("%s(%d): unaligned trap at %016lx: %p %lx %ld\n", + current->comm, task_pid_nr(current), + regs->pc - 4, va, opcode, reg); + } + } +#endif + if ((current_thread_info()->status & TS_UAC_SIGBUS)) + goto give_sigbus; + /* Not sure why you'd want to use this, but... */ + if ((current_thread_info()->status & TS_UAC_NOFIX)) + return; + + /* Don't bother reading ds in the access check since we already + * know that this came from the user. Also rely on the fact that + * the page at TASK_SIZE is unmapped and so can't be touched anyway. + */ + if ((unsigned long)va >= TASK_SIZE) + goto give_sigsegv; + + if ((1L << opcode) & OP_INT_MASK) { + /* it's an integer load/store */ + if (reg < 31) { + reg_addr = ®s->regs[reg]; + } else { + /* zero "register" */ + fake_reg = 0; + } + } + + get_user(instr, (__u32 *)(regs->pc - 4)); + instr_op = (instr >> 26) & 0x3f; + + get_user(value, (__u64 *)va); + + switch (instr_op) { + + case 0x0c: /* vlds */ + if ((unsigned long)va << 61 == 0) { + __asm__ __volatile__( + "1: ldl %1, 0(%5)\n" + "2: ldl %2, 8(%5)\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) + : "r"(va), "0"(0)); + + if (error) + goto give_sigsegv; + + sw64_write_simd_fp_reg_s(reg, tmp1, tmp2); + + return; + } else { + __asm__ __volatile__( + "1: ldl_u %1, 0(%6)\n" + "2: ldl_u %2, 7(%6)\n" + "3: ldl_u %3, 15(%6)\n" + " extll %1, %6, %1\n" + " extll %2, %6, %5\n" + " exthl %2, %6, %4\n" + " exthl %3, %6, %3\n" + "4:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 4b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 4b-2b(%0)\n" + " .long 3b - .\n" + " ldi %3, 4b-3b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5) + : "r"(va), "0"(0)); + + if (error) + goto give_sigsegv; + + tmp1 = tmp1 | tmp4; + tmp2 = tmp5 | tmp3; + + sw64_write_simd_fp_reg_s(reg, tmp1, tmp2); + + return; + } + case 0x0a: /* ldse */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 3(%3)\n" + " extlw %1, %3, %1\n" + " exthw %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + + if (error) + goto give_sigsegv; + + tmp = tmp1 | tmp2; + tmp = tmp | (tmp << 32); + + sw64_write_simd_fp_reg_s(reg, tmp, tmp); + + return; + + case 0x0d: /* vldd */ + if ((unsigned long)va << 61 == 0) { + __asm__ __volatile__( + "1: ldl %1, 0(%5)\n" + "2: ldl %2, 8(%5)\n" + "3: ldl %3, 16(%5)\n" + "4: ldl %4, 24(%5)\n" + "5:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 5b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 5b-2b(%0)\n" + " .long 3b - .\n" + " ldi %3, 5b-3b(%0)\n" + " .long 4b - .\n" + " ldi %4, 5b-4b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) + : "r"(va), "0"(0)); + + if (error) + goto give_sigsegv; + + sw64_write_simd_fp_reg_d(reg, tmp1, tmp2, tmp3, tmp4); + + return; + } else { + __asm__ __volatile__( + "1: ldl_u %1, 0(%6)\n" + "2: ldl_u %2, 7(%6)\n" + "3: ldl_u %3, 15(%6)\n" + " extll %1, %6, %1\n" + " extll %2, %6, %5\n" + " exthl %2, %6, %4\n" + " exthl %3, %6, %3\n" + "4:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 4b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 4b-2b(%0)\n" + " .long 3b - .\n" + " ldi %3, 4b-3b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5) + : "r"(va), "0"(0)); + + if (error) + goto give_sigsegv; + + tmp7 = tmp1 | tmp4; //f0 + tmp8 = tmp5 | tmp3; //f1 + + vb = ((unsigned long)(va))+16; + + __asm__ __volatile__( + "1: ldl_u %1, 0(%6)\n" + "2: ldl_u %2, 7(%6)\n" + "3: ldl_u %3, 15(%6)\n" + " extll %1, %6, %1\n" + " extll %2, %6, %5\n" + " exthl %2, %6, %4\n" + " exthl %3, %6, %3\n" + "4:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 4b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 4b-2b(%0)\n" + " .long 3b - .\n" + " ldi %3, 4b-3b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5) + : "r"(vb), "0"(0)); + + if (error) + goto give_sigsegv; + + tmp = tmp1 | tmp4; // f2 + tmp2 = tmp5 | tmp3; // f3 + + sw64_write_simd_fp_reg_d(reg, tmp7, tmp8, tmp, tmp2); + return; + } + + case 0x0b: /* ldde */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 7(%3)\n" + " extll %1, %3, %1\n" + " exthl %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + + if (error) + goto give_sigsegv; + + tmp = tmp1 | tmp2; + + sw64_write_simd_fp_reg_d(reg, tmp, tmp, tmp, tmp); + return; + + case 0x09: /* ldwe */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 3(%3)\n" + " extlw %1, %3, %1\n" + " exthw %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + + if (error) + goto give_sigsegv; + + sw64_write_simd_fp_reg_ldwe(reg, (int)(tmp1 | tmp2)); + + return; + + case 0x0e: /* vsts */ + sw64_read_simd_fp_m_s(reg, fp); + if ((unsigned long)va << 61 == 0) { + __asm__ __volatile__( + " bis %4, %4, %1\n" + " bis %5, %5, %2\n" + "1: stl %1, 0(%3)\n" + "2: stl %2, 8(%3)\n" + "3:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "r"(fp[0]), "r"(fp[1]), "0"(0)); + + if (error) + goto give_sigsegv; + + return; + } else { + __asm__ __volatile__( + " zapnot %10, 0x1, %1\n" + " srl %10, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %10, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %10, 24, %4\n" + " zapnot %4, 0x1, %4\n" + " srl %10, 32, %5\n" + " zapnot %5, 0x1, %5\n" + " srl %10, 40, %6\n" + " zapnot %6, 0x1, %6\n" + " srl %10, 48, %7\n" + " zapnot %7, 0x1, %7\n" + " srl %10, 56, %8\n" + " zapnot %8, 0x1, %8\n" + "1: stb %1, 0(%9)\n" + "2: stb %2, 1(%9)\n" + "3: stb %3, 2(%9)\n" + "4: stb %4, 3(%9)\n" + "5: stb %5, 4(%9)\n" + "6: stb %6, 5(%9)\n" + "7: stb %7, 6(%9)\n" + "8: stb %8, 7(%9)\n" + "9:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 9b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 9b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 9b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 9b-4b(%0)\n" + " .long 5b - .\n" + " ldi $31, 9b-5b(%0)\n" + " .long 6b - .\n" + " ldi $31, 9b-6b(%0)\n" + " .long 7b - .\n" + " ldi $31, 9b-7b(%0)\n" + " .long 8b - .\n" + " ldi $31, 9b-8b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) + : "r"(va), "r"(fp[0]), "0"(0)); + + if (error) + goto give_sigsegv; + + + vb = ((unsigned long)va) + 8; + + __asm__ __volatile__( + " zapnot %10, 0x1, %1\n" + " srl %10, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %10, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %10, 24, %4\n" + " zapnot %4, 0x1, %4\n" + " srl %10, 32, %5\n" + " zapnot %5, 0x1, %5\n" + " srl %10, 40, %6\n" + " zapnot %6, 0x1, %6\n" + " srl %10, 48, %7\n" + " zapnot %7, 0x1, %7\n" + " srl %10, 56, %8\n" + " zapnot %8, 0x1, %8\n" + "1: stb %1, 0(%9)\n" + "2: stb %2, 1(%9)\n" + "3: stb %3, 2(%9)\n" + "4: stb %4, 3(%9)\n" + "5: stb %5, 4(%9)\n" + "6: stb %6, 5(%9)\n" + "7: stb %7, 6(%9)\n" + "8: stb %8, 7(%9)\n" + "9:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 9b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 9b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 9b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 9b-4b(%0)\n" + " .long 5b - .\n" + " ldi $31, 9b-5b(%0)\n" + " .long 6b - .\n" + " ldi $31, 9b-6b(%0)\n" + " .long 7b - .\n" + " ldi $31, 9b-7b(%0)\n" + " .long 8b - .\n" + " ldi $31, 9b-8b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) + : "r"(vb), "r"(fp[1]), "0"(0)); + + if (error) + goto give_sigsegv; + + return; + } + + case 0x0f: /* vstd */ + sw64_read_simd_fp_m_d(reg, fp); + if ((unsigned long)va << 61 == 0) { + __asm__ __volatile__( + " bis %4, %4, %1\n" + " bis %5, %5, %2\n" + "1: stl %1, 0(%3)\n" + "2: stl %2, 8(%3)\n" + "3:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "r"(fp[0]), "r"(fp[1]), "0"(0)); + + if (error) + goto give_sigsegv; + + vb = ((unsigned long)va)+16; + + + __asm__ __volatile__( + " bis %4, %4, %1\n" + " bis %5, %5, %2\n" + "1: stl %1, 0(%3)\n" + "2: stl %2, 8(%3)\n" + "3:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(vb), "r"(fp[2]), "r"(fp[3]), "0"(0)); + + if (error) + goto give_sigsegv; + + return; + } else { + __asm__ __volatile__( + " zapnot %10, 0x1, %1\n" + " srl %10, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %10, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %10, 24, %4\n" + " zapnot %4, 0x1, %4\n" + " srl %10, 32, %5\n" + " zapnot %5, 0x1, %5\n" + " srl %10, 40, %6\n" + " zapnot %6, 0x1, %6\n" + " srl %10, 48, %7\n" + " zapnot %7, 0x1, %7\n" + " srl %10, 56, %8\n" + " zapnot %8, 0x1, %8\n" + "1: stb %1, 0(%9)\n" + "2: stb %2, 1(%9)\n" + "3: stb %3, 2(%9)\n" + "4: stb %4, 3(%9)\n" + "5: stb %5, 4(%9)\n" + "6: stb %6, 5(%9)\n" + "7: stb %7, 6(%9)\n" + "8: stb %8, 7(%9)\n" + "9:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 9b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 9b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 9b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 9b-4b(%0)\n" + " .long 5b - .\n" + " ldi $31, 9b-5b(%0)\n" + " .long 6b - .\n" + " ldi $31, 9b-6b(%0)\n" + " .long 7b - .\n" + " ldi $31, 9b-7b(%0)\n" + " .long 8b - .\n" + " ldi $31, 9b-8b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) + : "r"(va), "r"(fp[0]), "0"(0)); + + if (error) + goto give_sigsegv; + + vb = ((unsigned long)va) + 8; + + __asm__ __volatile__( + " zapnot %10, 0x1, %1\n" + " srl %10, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %10, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %10, 24, %4\n" + " zapnot %4, 0x1, %4\n" + " srl %10, 32, %5\n" + " zapnot %5, 0x1, %5\n" + " srl %10, 40, %6\n" + " zapnot %6, 0x1, %6\n" + " srl %10, 48, %7\n" + " zapnot %7, 0x1, %7\n" + " srl %10, 56, %8\n" + " zapnot %8, 0x1, %8\n" + "1: stb %1, 0(%9)\n" + "2: stb %2, 1(%9)\n" + "3: stb %3, 2(%9)\n" + "4: stb %4, 3(%9)\n" + "5: stb %5, 4(%9)\n" + "6: stb %6, 5(%9)\n" + "7: stb %7, 6(%9)\n" + "8: stb %8, 7(%9)\n" + "9:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 9b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 9b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 9b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 9b-4b(%0)\n" + " .long 5b - .\n" + " ldi $31, 9b-5b(%0)\n" + " .long 6b - .\n" + " ldi $31, 9b-6b(%0)\n" + " .long 7b - .\n" + " ldi $31, 9b-7b(%0)\n" + " .long 8b - .\n" + " ldi $31, 9b-8b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) + : "r"(vb), "r"(fp[1]), "0"(0)); + + if (error) + goto give_sigsegv; + + vb = vb + 8; + + __asm__ __volatile__( + " zapnot %10, 0x1, %1\n" + " srl %10, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %10, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %10, 24, %4\n" + " zapnot %4, 0x1, %4\n" + " srl %10, 32, %5\n" + " zapnot %5, 0x1, %5\n" + " srl %10, 40, %6\n" + " zapnot %6, 0x1, %6\n" + " srl %10, 48, %7\n" + " zapnot %7, 0x1, %7\n" + " srl %10, 56, %8\n" + " zapnot %8, 0x1, %8\n" + "1: stb %1, 0(%9)\n" + "2: stb %2, 1(%9)\n" + "3: stb %3, 2(%9)\n" + "4: stb %4, 3(%9)\n" + "5: stb %5, 4(%9)\n" + "6: stb %6, 5(%9)\n" + "7: stb %7, 6(%9)\n" + "8: stb %8, 7(%9)\n" + "9:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 9b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 9b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 9b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 9b-4b(%0)\n" + " .long 5b - .\n" + " ldi $31, 9b-5b(%0)\n" + " .long 6b - .\n" + " ldi $31, 9b-6b(%0)\n" + " .long 7b - .\n" + " ldi $31, 9b-7b(%0)\n" + " .long 8b - .\n" + " ldi $31, 9b-8b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) + : "r"(vb), "r"(fp[2]), "0"(0)); + + if (error) + goto give_sigsegv; + + vb = vb + 8; + + __asm__ __volatile__( + " zapnot %10, 0x1, %1\n" + " srl %10, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %10, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %10, 24, %4\n" + " zapnot %4, 0x1, %4\n" + " srl %10, 32, %5\n" + " zapnot %5, 0x1, %5\n" + " srl %10, 40, %6\n" + " zapnot %6, 0x1, %6\n" + " srl %10, 48, %7\n" + " zapnot %7, 0x1, %7\n" + " srl %10, 56, %8\n" + " zapnot %8, 0x1, %8\n" + "1: stb %1, 0(%9)\n" + "2: stb %2, 1(%9)\n" + "3: stb %3, 2(%9)\n" + "4: stb %4, 3(%9)\n" + "5: stb %5, 4(%9)\n" + "6: stb %6, 5(%9)\n" + "7: stb %7, 6(%9)\n" + "8: stb %8, 7(%9)\n" + "9:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 9b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 9b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 9b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 9b-4b(%0)\n" + " .long 5b - .\n" + " ldi $31, 9b-5b(%0)\n" + " .long 6b - .\n" + " ldi $31, 9b-6b(%0)\n" + " .long 7b - .\n" + " ldi $31, 9b-7b(%0)\n" + " .long 8b - .\n" + " ldi $31, 9b-8b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) + : "r"(vb), "r"(fp[3]), "0"(0)); + + if (error) + goto give_sigsegv; + + return; + } + } + switch (opcode) { + case 0x21: /* ldhu */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 1(%3)\n" + " extlh %1, %3, %1\n" + " exthh %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + if (error) + goto give_sigsegv; + *reg_addr = tmp1 | tmp2; + break; + + case 0x26: /* flds */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 3(%3)\n" + " extlw %1, %3, %1\n" + " exthw %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + if (error) + goto give_sigsegv; + sw64_write_fp_reg_s(reg, tmp1 | tmp2); + return; + + case 0x27: /* fldd */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 7(%3)\n" + " extll %1, %3, %1\n" + " exthl %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + if (error) + goto give_sigsegv; + sw64_write_fp_reg(reg, tmp1 | tmp2); + return; + + case 0x22: /* ldw */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 3(%3)\n" + " extlw %1, %3, %1\n" + " exthw %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + if (error) + goto give_sigsegv; + *reg_addr = (int)(tmp1 | tmp2); + break; + + case 0x23: /* ldl */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 7(%3)\n" + " extll %1, %3, %1\n" + " exthl %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + if (error) + goto give_sigsegv; + *reg_addr = tmp1 | tmp2; + break; + + /* Note that the store sequences do not indicate that they change + * memory because it _should_ be affecting nothing in this context. + * (Otherwise we have other, much larger, problems.) + */ + case 0x29: /* sth with stb */ + __asm__ __volatile__( + " zap %6, 2, %1\n" + " srl %6, 8, %2\n" + "1: stb %1, 0x0(%5)\n" + "2: stb %2, 0x1(%5)\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %2, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %1, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), + "=&r"(tmp3), "=&r"(tmp4) + : "r"(va), "r"(*reg_addr), "0"(0)); + + if (error) + goto give_sigsegv; + return; + + case 0x2e: /* fsts*/ + fake_reg = sw64_read_fp_reg_s(reg); + fallthrough; + + case 0x2a: /* stw with stb*/ + __asm__ __volatile__( + " zapnot %6, 0x1, %1\n" + " srl %6, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %6, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %6, 24, %4\n" + " zapnot %4, 0x1, %4\n" + "1: stb %1, 0x0(%5)\n" + "2: stb %2, 0x1(%5)\n" + "3: stb %3, 0x2(%5)\n" + "4: stb %4, 0x3(%5)\n" + "5:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi $31, 5b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 5b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 5b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 5b-4b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), + "=&r"(tmp3), "=&r"(tmp4) + : "r"(va), "r"(*reg_addr), "0"(0)); + + if (error) + goto give_sigsegv; + return; + + case 0x2f: /* fstd */ + fake_reg = sw64_read_fp_reg(reg); + fallthrough; + + case 0x2b: /* stl */ + __asm__ __volatile__( + " zapnot %10, 0x1, %1\n" + " srl %10, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %10, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %10, 24, %4\n" + " zapnot %4, 0x1, %4\n" + " srl %10, 32, %5\n" + " zapnot %5, 0x1, %5\n" + " srl %10, 40, %6\n" + " zapnot %6, 0x1, %6\n" + " srl %10, 48, %7\n" + " zapnot %7, 0x1, %7\n" + " srl %10, 56, %8\n" + " zapnot %8, 0x1, %8\n" + "1: stb %1, 0(%9)\n" + "2: stb %2, 1(%9)\n" + "3: stb %3, 2(%9)\n" + "4: stb %4, 3(%9)\n" + "5: stb %5, 4(%9)\n" + "6: stb %6, 5(%9)\n" + "7: stb %7, 6(%9)\n" + "8: stb %8, 7(%9)\n" + "9:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 9b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 9b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 9b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 9b-4b(%0)\n" + " .long 5b - .\n" + " ldi $31, 9b-5b(%0)\n" + " .long 6b - .\n" + " ldi $31, 9b-6b(%0)\n" + " .long 7b - .\n" + " ldi $31, 9b-7b(%0)\n" + " .long 8b - .\n" + " ldi $31, 9b-8b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) + : "r"(va), "r"(*reg_addr), "0"(0)); + + if (error) + goto give_sigsegv; + return; + + default: + /* What instruction were you trying to use, exactly? */ + goto give_sigbus; + } + + return; + +give_sigsegv: + regs->pc -= 4; /* make pc point to faulting insn */ + + /* We need to replicate some of the logic in mm/fault.c, + * since we don't have access to the fault code in the + * exception handling return path. + */ + if ((unsigned long)va >= TASK_SIZE) + si_code = SEGV_ACCERR; + else { + struct mm_struct *mm = current->mm; + + down_read(&mm->mmap_lock); + if (find_vma(mm, (unsigned long)va)) + si_code = SEGV_ACCERR; + else + si_code = SEGV_MAPERR; + up_read(&mm->mmap_lock); + } + force_sig_fault(SIGSEGV, si_code, va); + return; + +give_sigbus: + regs->pc -= 4; + force_sig_fault(SIGBUS, BUS_ADRALN, va); +} + +asmlinkage void do_entSys(struct pt_regs *regs) +{ + long ret = -ENOSYS; + unsigned long nr; + unsigned long ti_flags = current_thread_info()->flags; + + regs->orig_r0 = regs->regs[0]; + regs->orig_r19 = regs->regs[19]; + nr = regs->regs[0]; + + if (ti_flags & _TIF_SYSCALL_WORK) { + nr = syscall_trace_enter(); + if (nr == NO_SYSCALL) + goto syscall_out; + regs->orig_r0 = regs->regs[0]; + regs->orig_r19 = regs->regs[19]; + } + + if (nr < __NR_syscalls) { + syscall_fn_t syscall_fn = sys_call_table[nr]; + + ret = syscall_fn(regs->regs[16], regs->regs[17], regs->regs[18], + regs->regs[19], regs->regs[20], regs->regs[21]); + } + + if ((nr != __NR_sigreturn) && (nr != __NR_rt_sigreturn)) { + if (likely((ret >= 0) || regs->orig_r0 == NO_SYSCALL)) + syscall_set_return_value(current, regs, 0, ret); + else + syscall_set_return_value(current, regs, ret, 0); + } + +syscall_out: + rseq_syscall(regs); + + if (ti_flags & _TIF_SYSCALL_WORK) + syscall_trace_leave(); +} + +void +trap_init(void) +{ + /* Tell HMcode what global pointer we want in the kernel. */ + register unsigned long gptr __asm__("$29"); + wrkgp(gptr); + + wrent(entArith, 1); + wrent(entMM, 2); + wrent(entIF, 3); + wrent(entUna, 4); + wrent(entSys, 5); +#ifdef CONFIG_EFI + if (smp_processor_id() == 0) + wrent((void *)entSuspend, 6); +#endif +} diff --git a/arch/sw_64/kernel/unaligned.c b/arch/sw_64/kernel/unaligned.c new file mode 100644 index 000000000000..40a17fb9cbd2 --- /dev/null +++ b/arch/sw_64/kernel/unaligned.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +#include +#include +#include + +unsigned long unaligned_count; +char unaligned_task[TASK_COMM_LEN]; +struct unaligned_stat unaligned[UNA_MAX_ENTRIES]; + +static ssize_t unaligned_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + size_t size; + + unaligned_count = 0; + size = min(sizeof(unaligned_task), len); + if (copy_from_user(unaligned_task, user_buf, size)) + return -EFAULT; + unaligned_task[size - 1] = '\0'; + + return len; +} + +static int unaligned_show(struct seq_file *m, void *v) +{ + int i, idx, nr; + + if (!unaligned_task[0]) { + seq_puts(m, "No task traced\n"); + return 0; + } + seq_printf(m, "Task command:\t\t%s\n", unaligned_task); + seq_printf(m, "Unaligned count:\t%ld\n", unaligned_count); + if (!unaligned_count) + return 0; + nr = 0; + idx = unaligned_count % UNA_MAX_ENTRIES; + seq_printf(m, "Latest %d unaligned stat:\nNo.\tVA\t\tPC\n", UNA_MAX_ENTRIES); + if (unaligned_count >= UNA_MAX_ENTRIES) { + for (i = idx; i < UNA_MAX_ENTRIES; i++) + seq_printf(m, "%d\t%#lx\t%#lx\n", + nr++, unaligned[i].va, unaligned[i].pc); + } + for (i = 0; i < idx; i++) + seq_printf(m, "%d\t%#lx\t%#lx\n", + nr++, unaligned[i].va, unaligned[i].pc); + return 0; +} + +static int unaligned_open(struct inode *inode, struct file *file) +{ + return single_open(file, unaligned_show, NULL); +} + +static const struct file_operations unaligned_fops = { + .read = seq_read, + .write = unaligned_set, + .open = unaligned_open, + .llseek = default_llseek, +}; + +static int __init unaligned_init(void) +{ + struct dentry *unaligned; + + if (!sw64_debugfs_dir) + return -ENODEV; + + unaligned = debugfs_create_file("unaligned", 0644, + sw64_debugfs_dir, NULL, + &unaligned_fops); + if (!unaligned) + return -ENOMEM; + + return 0; +} + +late_initcall(unaligned_init); -- Gitee From a7485e40ff5574ebccdfe3320c17ee0f6ef1f3a8 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:31 +0800 Subject: [PATCH 0283/2138] anolis: sw64: add process management ANBZ: #4688 Add process management support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/current.h | 19 + arch/sw_64/include/asm/processor.h | 100 ++++ arch/sw_64/include/asm/ptrace.h | 92 +++ arch/sw_64/include/asm/switch_to.h | 60 ++ arch/sw_64/include/asm/thread_info.h | 148 +++++ arch/sw_64/include/uapi/asm/ptrace.h | 56 ++ arch/sw_64/kernel/idle.c | 35 ++ arch/sw_64/kernel/process.c | 109 ++++ arch/sw_64/kernel/ptrace.c | 858 +++++++++++++++++++++++++++ 9 files changed, 1477 insertions(+) create mode 100644 arch/sw_64/include/asm/current.h create mode 100644 arch/sw_64/include/asm/processor.h create mode 100644 arch/sw_64/include/asm/ptrace.h create mode 100644 arch/sw_64/include/asm/switch_to.h create mode 100644 arch/sw_64/include/asm/thread_info.h create mode 100644 arch/sw_64/include/uapi/asm/ptrace.h create mode 100644 arch/sw_64/kernel/idle.c create mode 100644 arch/sw_64/kernel/process.c create mode 100644 arch/sw_64/kernel/ptrace.c diff --git a/arch/sw_64/include/asm/current.h b/arch/sw_64/include/asm/current.h new file mode 100644 index 000000000000..862caabb9c70 --- /dev/null +++ b/arch/sw_64/include/asm/current.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_CURRENT_H +#define _ASM_SW64_CURRENT_H + +#ifndef __ASSEMBLY__ + +struct task_struct; +static __always_inline struct task_struct *get_current(void) +{ + register struct task_struct *tp __asm__("$8"); + + return tp; +} + +#define current get_current() + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_SW64_CURRENT_H */ diff --git a/arch/sw_64/include/asm/processor.h b/arch/sw_64/include/asm/processor.h new file mode 100644 index 000000000000..ec68fe6cc6f2 --- /dev/null +++ b/arch/sw_64/include/asm/processor.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/asm-sw64/processor.h + * + * Copyright (C) 1994 Linus Torvalds + */ + +#ifndef _ASM_SW64_PROCESSOR_H +#define _ASM_SW64_PROCESSOR_H + +#include /* for ADDR_LIMIT_32BIT */ +#include + +#define task_pt_regs(task) \ + ((struct pt_regs *) (task->stack + THREAD_SIZE) - 1) + +/* + * Returns current instruction pointer ("program counter"). + */ +#define current_text_addr() \ + ({ void *__pc; __asm__ ("br %0, .+4" : "=r"(__pc)); __pc; }) + +/* + * SW64 does have an arch_pick_mmap_layout() + */ +#define HAVE_ARCH_PICK_MMAP_LAYOUT 1 + +/* + * We have a 52-bit user address space: 4PB user VM... + */ +#define TASK_SIZE (0x10000000000000UL) +#define UNMAPPED_BASE (TASK_SIZE >> 6) +#define STACK_TOP \ + (current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL) + +#define STACK_TOP_MAX 0x00120000000UL + +/* This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ +#define TASK_UNMAPPED_BASE \ + ((current->personality & ADDR_LIMIT_32BIT) ? 0x40000000 : UNMAPPED_BASE) + +struct thread_struct { + struct user_fpsimd_state fpstate; + /* Callee-saved registers */ + unsigned long ra; + unsigned long sp; + unsigned long s[7]; /* s0 ~ s6 */ +}; +#define INIT_THREAD { } + +struct task_struct; +struct pt_regs; + +/* Do necessary setup to start up a newly executed thread. */ +extern void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp); + +/* Free all resources held by a thread. */ +extern void release_thread(struct task_struct *dead_task); + +unsigned long __get_wchan(struct task_struct *p); + +#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) + +#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[30]) + +#define cpu_relax() barrier() + +#define ARCH_HAS_PREFETCH +#define ARCH_HAS_PREFETCHW +#define ARCH_HAS_SPINLOCK_PREFETCH + +#ifndef CONFIG_SMP +/* Nothing to prefetch. */ +#define spin_lock_prefetch(lock) do { } while (0) +#endif + +static inline void prefetch(const void *ptr) +{ + __builtin_prefetch(ptr, 0, 3); +} + +static inline void prefetchw(const void *ptr) +{ + __builtin_prefetch(ptr, 1, 3); +} + +#ifdef CONFIG_SMP +static inline void spin_lock_prefetch(const void *ptr) +{ + __builtin_prefetch(ptr, 1, 3); +} +#endif + +static inline void wait_for_interrupt(void) +{ + __asm__ __volatile__ ("halt"); +} +#endif /* _ASM_SW64_PROCESSOR_H */ diff --git a/arch/sw_64/include/asm/ptrace.h b/arch/sw_64/include/asm/ptrace.h new file mode 100644 index 000000000000..964f4fc730f2 --- /dev/null +++ b/arch/sw_64/include/asm/ptrace.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PTRACE_H +#define _ASM_SW64_PTRACE_H + +#include +#include +#include + +#define NO_SYSCALL _AC(-1, UL) + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ + +/* + * This struct defines the way the registers are stored on the + * kernel stack during a system call or other kernel entry + */ + +struct pt_regs { + union { + struct user_pt_regs user_regs; + struct { + unsigned long regs[31]; + unsigned long pc; + unsigned long ps; + }; + }; + unsigned long orig_r0; + unsigned long orig_r19; + /* These are saved by HMcode: */ + unsigned long hm_ps; + unsigned long hm_pc; + unsigned long hm_gp; + unsigned long hm_r16; + unsigned long hm_r17; + unsigned long hm_r18; +}; + +#define arch_has_single_step() (1) +#define user_mode(regs) (((regs)->ps & 8) != 0) +#define instruction_pointer(regs) ((regs)->pc) +#define profile_pc(regs) instruction_pointer(regs) +#define user_stack_pointer(pt_regs) ((pt_regs)->regs[30]) +#define kernel_stack_pointer(regs) ((unsigned long)((regs) + 1)) +#define instruction_pointer_set(regs, val) ((regs)->pc = val) + +#define force_successful_syscall_return() (current_pt_regs()->orig_r0 = NO_SYSCALL) + +#define MAX_REG_OFFSET (offsetof(struct pt_regs, orig_r0)) + +extern short regoffsets[]; + +extern unsigned long syscall_trace_enter(void); +extern void syscall_trace_leave(void); + +/** + * regs_get_register() - get register value from its offset + * @regs: pt_regs from which register value is gotten + * @offset: offset of the register. + * + * regs_get_register returns the value of a register whose offset from @regs. + * The @offset is the offset of the register in struct pt_regs. + * If @offset is bigger than MAX_REG_OFFSET, this returns 0. + */ +static inline u64 regs_get_register(struct pt_regs *regs, unsigned int offset) +{ + if (unlikely(offset > MAX_REG_OFFSET)) + return 0; + + return *(unsigned long *)((unsigned long)regs + offset); +} +extern int regs_query_register_offset(const char *name); +extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, + unsigned int n); + +static inline int is_syscall_success(struct pt_regs *regs) +{ + return !regs->regs[19]; +} + +static inline long regs_return_value(struct pt_regs *regs) +{ + if ((regs->orig_r0 == NO_SYSCALL) || is_syscall_success(regs)) + return regs->regs[0]; + else + return -regs->regs[0]; +} + +#endif /* !__ASSEMBLY__ */ +#endif /* __KERNEL__ */ + +#endif /* _ASM_SW64_PTRACE_H */ diff --git a/arch/sw_64/include/asm/switch_to.h b/arch/sw_64/include/asm/switch_to.h new file mode 100644 index 000000000000..5e2db4b9e266 --- /dev/null +++ b/arch/sw_64/include/asm/switch_to.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SWITCH_TO_H +#define _ASM_SW64_SWITCH_TO_H + +#include + +extern void __fpstate_save(struct task_struct *save_to); +extern void __fpstate_restore(struct task_struct *restore_from); +extern struct task_struct *__switch_to(struct task_struct *prev, + struct task_struct *next); +extern void restore_da_match_after_sched(void); + +static inline void aux_save(struct task_struct *task) +{ + struct pcb_struct *pcb; + + if (likely(!(task->flags & PF_KTHREAD))) { + pcb = &task_thread_info(task)->pcb; + pcb->tp = rtid(); + __fpstate_save(task); + } +} + +static inline void aux_restore(struct task_struct *task) +{ + struct pcb_struct *pcb; + + if (likely(!(task->flags & PF_KTHREAD))) { + pcb = &task_thread_info(task)->pcb; + wrtp(pcb->tp); + __fpstate_restore(task); + } +} + +static inline void __switch_to_aux(struct task_struct *prev, + struct task_struct *next) +{ + aux_save(prev); + aux_restore(next); +} + + +#define switch_to(prev, next, last) \ +do { \ + struct task_struct *__prev = (prev); \ + struct task_struct *__next = (next); \ + __switch_to_aux(__prev, __next); \ + (last) = __switch_to(__prev, __next); \ +} while (0) + + +/* TODO: finish_arch_switch has been removed from arch-independent code. */ + +/* + * finish_arch_switch will be called after switch_to + */ +#define finish_arch_post_lock_switch restore_da_match_after_sched + + +#endif /* _ASM_SW64_SWITCH_TO_H */ diff --git a/arch/sw_64/include/asm/thread_info.h b/arch/sw_64/include/asm/thread_info.h new file mode 100644 index 000000000000..4f3b837e2e90 --- /dev/null +++ b/arch/sw_64/include/asm/thread_info.h @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_THREAD_INFO_H +#define _ASM_SW64_THREAD_INFO_H + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ +#include +#include + +typedef struct { + unsigned long seg; +} mm_segment_t; + + +struct pcb_struct { + unsigned long tp; + unsigned long da_match, da_mask; + unsigned long dv_match, dv_mask; + union { + unsigned long dc_ctl; + unsigned long match_ctl; + }; + unsigned long ia_match, ia_mask; + unsigned long iv_match; + unsigned long ida_match, ida_mask; +}; + +struct thread_info { + struct pcb_struct pcb; /* hmcode state */ + + unsigned int flags; /* low level flags */ + unsigned int ieee_state; /* see fpu.h */ + + mm_segment_t addr_limit; /* thread address space */ + unsigned int cpu; /* current CPU */ + int preempt_count; /* 0 => preemptible, <0 => BUG */ + unsigned int status; /* thread-synchronous flags */ + + int bpt_nsaved; + unsigned long bpt_addr[2]; /* breakpoint handling */ + unsigned int bpt_insn[2]; +#ifdef CONFIG_DYNAMIC_FTRACE + unsigned long dyn_ftrace_addr; +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + unsigned long dyn_ftrace_regs_addr; +#endif +#endif +}; + +static __always_inline u64 rtid(void) +{ + u64 val; + + asm volatile("rtid %0" : "=r" (val) : :); + return val; +} + +/* + * Macros/functions for gaining access to the thread information structure. + */ +#define INIT_THREAD_INFO(tsk) \ +{ \ + .addr_limit = KERNEL_DS, \ + .preempt_count = INIT_PREEMPT_COUNT, \ +} + + +#endif /* __ASSEMBLY__ */ + +/* Thread information allocation. */ +#define THREAD_SIZE_ORDER 1 +#define THREAD_SIZE (2 * PAGE_SIZE) + +/* + * Thread information flags: + * - these are process state flags and used from assembly + * - pending work-to-be-done flags come first and must be assigned to be + * within bits 0 to 7 to fit in and immediate operand. + * + * TIF_SYSCALL_TRACE is known to be 0 via blbs. + */ +#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ +#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ +#define TIF_SIGPENDING 2 /* signal pending */ +#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ +#define TIF_SYSCALL_AUDIT 4 /* syscall audit active */ +#define TIF_UPROBE 5 /* uprobe breakpoint or singlestep */ +#define TIF_PATCH_PENDING 6 /* pending live patching update */ +#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */ +#define TIF_DIE_IF_KERNEL 9 /* dik recursion lock */ +#define TIF_SYSCALL_TRACEPOINT 10 +#define TIF_SECCOMP 11 /* secure computing */ +#define TIF_MEMDIE 13 /* is terminating due to OOM killer */ +#define TIF_POLLING_NRFLAG 14 /* idle is polling for TIF_NEED_RESCHED */ + +#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) +#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) +#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) +#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING) +#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) +#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) +#define _TIF_SECCOMP (1 << TIF_SECCOMP) +#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) +#define _TIF_UPROBE (1 << TIF_UPROBE) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) + +/* Work to do on interrupt/exception return. */ +#define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ + _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ + _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL) + +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ + _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP) + +/* Work to do on any return to userspace. */ +#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_SYSCALL_TRACE) + +#define TS_UAC_NOPRINT 0x0001 /* ! Preserve the following three */ +#define TS_UAC_NOFIX 0x0002 /* ! flags as they match */ +#define TS_UAC_SIGBUS 0x0004 /* ! userspace part of 'prctl' */ + +#define SET_UNALIGN_CTL(task, value) ({ \ + __u32 status = task_thread_info(task)->status & ~UAC_BITMASK; \ + if (value & PR_UNALIGN_NOPRINT) \ + status |= TS_UAC_NOPRINT; \ + if (value & PR_UNALIGN_SIGBUS) \ + status |= TS_UAC_SIGBUS; \ + if (value & PR_NOFIX) /* sw-specific */ \ + status |= TS_UAC_NOFIX; \ + task_thread_info(task)->status = status; \ + 0; }) + +#define GET_UNALIGN_CTL(task, value) ({ \ + __u32 status = task_thread_info(task)->status & ~UAC_BITMASK; \ + __u32 res = 0; \ + if (status & TS_UAC_NOPRINT) \ + res |= PR_UNALIGN_NOPRINT; \ + if (status & TS_UAC_SIGBUS) \ + res |= PR_UNALIGN_SIGBUS; \ + if (status & TS_UAC_NOFIX) \ + res |= PR_NOFIX; \ + put_user(res, (int __user *)(value)); \ + }) + +#endif /* __KERNEL__ */ +#endif /* _ASM_SW64_THREAD_INFO_H */ diff --git a/arch/sw_64/include/uapi/asm/ptrace.h b/arch/sw_64/include/uapi/asm/ptrace.h new file mode 100644 index 000000000000..3fd53450e418 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/ptrace.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_PTRACE_H +#define _UAPI_ASM_SW64_PTRACE_H + +#include + +#ifndef __ASSEMBLY__ +/* + * User structures for general purpose, floating point and debug registers. + */ +struct user_pt_regs { + __u64 regs[31]; + __u64 pc; + __u64 pstate; +}; + +/* 256 bits aligned for simd */ +struct fpreg { + __u64 v[4] __attribute__((aligned(32))); +}; + +struct user_fpsimd_state { + struct fpreg fp[31]; + __u64 fpcr; + __u64 __reserved[3]; +}; +#endif + +/* PTRACE_ATTACH is 16 */ +/* PTRACE_DETACH is 17 */ + +#define PT_REG_BASE 0 +#define PT_REG_END 30 +#define PT_FPREG_BASE 32 +#define PT_FPREG_END 62 +#define PT_FPCR 63 +#define PT_PC 64 +#define PT_TP 65 +#define PT_UNIQUE PT_TP +#define PT_VECREG_BASE 67 +#define PT_VECREG_END 161 +#define PT_F31_V1 98 +#define PT_F31_V2 130 +#define PT_DA_MATCH 163 +#define PT_DA_MASK 164 +#define PT_DV_MATCH 165 +#define PT_DV_MASK 166 +#define PT_DC_CTL 167 +#define PT_MATCH_CTL 167 +#define PT_IA_MATCH 168 +#define PT_IA_MASK 169 +#define PT_IV_MATCH 170 +#define PT_IDA_MATCH 171 +#define PT_IDA_MASK 172 + +#endif /* _UAPI_ASM_SW64_PTRACE_H */ diff --git a/arch/sw_64/kernel/idle.c b/arch/sw_64/kernel/idle.c new file mode 100644 index 000000000000..d26bdc405b53 --- /dev/null +++ b/arch/sw_64/kernel/idle.c @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sw64 idle loop support. + * + */ +#include +#include +#include +#include +#include + +void arch_cpu_idle(void) +{ + local_irq_enable(); + cpu_relax(); + + if (is_in_guest()) { + if (!need_resched()) + hcall(HCALL_HALT, 0, 0, 0); + } else { + asm( + ".globl __idle_start\n" + "__idle_start = .\n" + "ldw $1, %0($8)\n" + "srl $1, %1, $1\n" + "blbs $1, $need_resched\n" + "halt\n" + ".globl __idle_end\n" + "__idle_end = .\n" + "$need_resched:" + :: "i"(TI_FLAGS), "i"(TIF_NEED_RESCHED) + : "$1"); + } + local_irq_disable(); +} diff --git a/arch/sw_64/kernel/process.c b/arch/sw_64/kernel/process.c new file mode 100644 index 000000000000..fa58a0de4368 --- /dev/null +++ b/arch/sw_64/kernel/process.c @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file handles the architecture-dependent parts of process handling. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "proto.h" + +/* + * Re-start a thread when doing execve() + */ +void +start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) +{ + regs->pc = pc; + regs->ps = 8; + regs->regs[30] = sp; +} +EXPORT_SYMBOL(start_thread); + + +void +flush_thread(void) +{ + /* Arrange for each exec'ed process to start off with a clean slate + * with respect to the FPU. This is all exceptions disabled. + */ + current_thread_info()->ieee_state = 0; + wrfpcr(FPCR_INIT | ieee_swcr_to_fpcr(0)); + + /* Clean slate for TLS. */ + current_thread_info()->pcb.tp = 0; +} + +void +release_thread(struct task_struct *dead_task) +{ +} + +int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) +{ + /* + * aux_save() has to read the current TLS pointer from CSR:TID as it + * may be out-of-sync with the saved value. + */ + aux_save(src); + *dst = *src; + return 0; +} + +/* + * Copy architecture-specific thread state + */ + +int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) +{ + unsigned long clone_flags = args->flags; + unsigned long usp = args->stack; + unsigned long tls = args->tls; + struct thread_info *childti = task_thread_info(p); + struct pt_regs *childregs = task_pt_regs(p); + struct pt_regs *regs = current_pt_regs(); + + extern void ret_from_fork(void); + extern void ret_from_kernel_thread(void); + + p->thread.sp = (unsigned long) childregs; + + if (unlikely(args->fn)) { + /* kernel thread */ + memset(childregs, 0, sizeof(struct pt_regs)); + p->thread.ra = (unsigned long) ret_from_kernel_thread; + p->thread.s[0] = (unsigned long) args->fn; /* function */ + p->thread.s[1] = (unsigned long) args->fn_arg; + return 0; + } + + /* + * Note: if CLONE_SETTLS is not set, then we must inherit the + * value from the parent, which will have been set by the block + * copy in dup_task_struct. This is non-intuitive, but is + * required for proper operation in the case of a threaded + * application calling fork. + */ + if (clone_flags & CLONE_SETTLS) + childti->pcb.tp = tls; + else + regs->regs[20] = 0; + *childregs = *regs; + if (usp) + childregs->regs[30] = usp; + syscall_set_return_value(NULL, childregs, 0, 0); + p->thread.ra = (unsigned long) ret_from_fork; + return 0; +} + +unsigned long arch_randomize_brk(struct mm_struct *mm) +{ + return randomize_page(mm->brk, 0x02000000); +} diff --git a/arch/sw_64/kernel/ptrace.c b/arch/sw_64/kernel/ptrace.c new file mode 100644 index 000000000000..070e27ee2567 --- /dev/null +++ b/arch/sw_64/kernel/ptrace.c @@ -0,0 +1,858 @@ +// SPDX-License-Identifier: GPL-2.0 +/* ptrace.c */ +/* By Ross Biro 1/23/92 */ +/* edited by Linus Torvalds */ +/* mangled further by Bob Manson (manson@santafe.edu) */ +/* more mutilation by David Mosberger (davidm@azstarnet.com) */ + +#include +#include +#include +#include + +#include + +#include "proto.h" +#include + +#define CREATE_TRACE_POINTS +#include + +#define BREAKINST 0x00000080 /* sys_call bpt */ + +/* + * does not yet catch signals sent when the child dies. + * in exit.c or in signal.c. + */ + +/* + * Processes always block with the following stack-layout: + * + * +================================+ <---- task + 2*PAGE_SIZE + * | HMcode saved frame (ps, pc, | ^ + * | gp, a0, a1, a2) | | + * +================================+ | struct pt_regs + * | | | + * | frame generated by SAVE_ALL | | + * | | v + * +================================+ + */ + +/* + * The following table maps a register index into the stack offset at + * which the register is saved. Register indices are 0-31 for integer + * regs, 32-63 for fp regs, and 64 for the pc. Notice that sp and + * zero have no stack-slot and need to be treated specially (see + * get_reg/put_reg below). + */ +#define PCB_OFF(var) offsetof(struct pcb_struct, var) + +static int pcboff[] = { + [PT_TP] = PCB_OFF(tp), + [PT_DA_MATCH] = PCB_OFF(da_match), + [PT_DA_MASK] = PCB_OFF(da_mask), + [PT_DV_MATCH] = PCB_OFF(dv_match), + [PT_DV_MASK] = PCB_OFF(dv_mask), + [PT_DC_CTL] = PCB_OFF(dc_ctl), + [PT_MATCH_CTL] = PCB_OFF(match_ctl), + [PT_IA_MATCH] = PCB_OFF(ia_match), + [PT_IA_MASK] = PCB_OFF(ia_mask), + [PT_IV_MATCH] = PCB_OFF(iv_match), + [PT_IDA_MATCH] = PCB_OFF(ida_match), + [PT_IDA_MASK] = PCB_OFF(ida_mask) +}; + +static unsigned long zero; + +/* + * Get address of register REGNO in task TASK. + */ + +static unsigned long * +get_reg_addr(struct task_struct *task, unsigned long regno) +{ + void *addr; + int fno, vno; + + switch (regno) { + case PT_UNIQUE: + case PT_DA_MATCH: + case PT_DA_MASK: + case PT_DV_MATCH: + case PT_DV_MASK: + case PT_MATCH_CTL: + case PT_IA_MATCH: + case PT_IA_MASK: + case PT_IV_MATCH: + case PT_IDA_MATCH: + case PT_IDA_MASK: + addr = (void *)task_thread_info(task) + pcboff[regno]; + break; + case PT_REG_BASE ... PT_REG_END: + addr = &task_pt_regs(task)->regs[regno]; + break; + case PT_FPREG_BASE ... PT_FPREG_END: + fno = regno - PT_FPREG_BASE; + addr = &task->thread.fpstate.fp[fno].v[0]; + break; + case PT_VECREG_BASE ... PT_VECREG_END: + /* + * return addr for zero value if we catch vectors of f31 + * v0 and v3 of f31 are not in this range so ignore them + */ + if (regno == PT_F31_V1 || regno == PT_F31_V2) { + addr = &zero; + break; + } + fno = (regno - PT_VECREG_BASE) & 0x1f; + vno = 1 + ((regno - PT_VECREG_BASE) >> 5); + addr = &task->thread.fpstate.fp[fno].v[vno]; + break; + case PT_FPCR: + addr = &task->thread.fpstate.fpcr; + break; + case PT_PC: + addr = (void *)task_pt_regs(task) + PT_REGS_PC; + break; + default: + addr = &zero; + } + + return addr; +} + +/* + * Get contents of register REGNO in task TASK. + */ +unsigned long +get_reg(struct task_struct *task, unsigned long regno) +{ + return *get_reg_addr(task, regno); +} + +/* + * Write contents of register REGNO in task TASK. + */ +static int +put_reg(struct task_struct *task, unsigned long regno, unsigned long data) +{ + *get_reg_addr(task, regno) = data; + return 0; +} + +static inline int +read_int(struct task_struct *task, unsigned long addr, int *data) +{ + int copied = access_process_vm(task, addr, data, sizeof(int), FOLL_FORCE); + + return (copied == sizeof(int)) ? 0 : -EIO; +} + +static inline int +write_int(struct task_struct *task, unsigned long addr, int data) +{ + int copied = access_process_vm(task, addr, &data, sizeof(int), + FOLL_FORCE | FOLL_WRITE); + return (copied == sizeof(int)) ? 0 : -EIO; +} + +/* + * Set breakpoint. + */ +int +ptrace_set_bpt(struct task_struct *child) +{ + int displ, i, res, reg_b, nsaved = 0; + unsigned int insn, op_code; + unsigned long pc; + + pc = get_reg(child, PT_PC); + res = read_int(child, pc, (int *)&insn); + if (res < 0) + return res; + + op_code = insn >> 26; + /* br bsr beq bne blt ble bgt bge blbc blbs fbeq fbne fblt fble fbgt fbge */ + if ((1UL << op_code) & 0x3fff000000000030UL) { + /* + * It's a branch: instead of trying to figure out + * whether the branch will be taken or not, we'll put + * a breakpoint at either location. This is simpler, + * more reliable, and probably not a whole lot slower + * than the alternative approach of emulating the + * branch (emulation can be tricky for fp branches). + */ + displ = ((s32)(insn << 11)) >> 9; + task_thread_info(child)->bpt_addr[nsaved++] = pc + 4; + if (displ) /* guard against unoptimized code */ + task_thread_info(child)->bpt_addr[nsaved++] + = pc + 4 + displ; + /*call ret jmp*/ + } else if (op_code >= 0x1 && op_code <= 0x3) { + reg_b = (insn >> 16) & 0x1f; + task_thread_info(child)->bpt_addr[nsaved++] = get_reg(child, reg_b); + } else { + task_thread_info(child)->bpt_addr[nsaved++] = pc + 4; + } + + /* install breakpoints: */ + for (i = 0; i < nsaved; ++i) { + res = read_int(child, task_thread_info(child)->bpt_addr[i], + (int *)&insn); + if (res < 0) + return res; + task_thread_info(child)->bpt_insn[i] = insn; + res = write_int(child, task_thread_info(child)->bpt_addr[i], + BREAKINST); + if (res < 0) + return res; + } + task_thread_info(child)->bpt_nsaved = nsaved; + return 0; +} + +/* + * Ensure no single-step breakpoint is pending. Returns non-zero + * value if child was being single-stepped. + */ +int +ptrace_cancel_bpt(struct task_struct *child) +{ + int i, nsaved = task_thread_info(child)->bpt_nsaved; + + task_thread_info(child)->bpt_nsaved = 0; + + if (nsaved > 2) { + pr_info("%s: bogus nsaved: %d!\n", __func__, nsaved); + nsaved = 2; + } + + for (i = 0; i < nsaved; ++i) { + write_int(child, task_thread_info(child)->bpt_addr[i], + task_thread_info(child)->bpt_insn[i]); + } + return (nsaved != 0); +} + +void user_enable_single_step(struct task_struct *child) +{ + /* Mark single stepping. */ + task_thread_info(child)->bpt_nsaved = -1; +} + +void user_disable_single_step(struct task_struct *child) +{ + ptrace_cancel_bpt(child); +} + +/* + * Called by kernel/ptrace.c when detaching.. + * + * Make sure the single step bit is not set. + */ +void ptrace_disable(struct task_struct *child) +{ + user_disable_single_step(child); +} + +static int gpr_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + return membuf_write(&to, task_pt_regs(target), sizeof(struct user_pt_regs)); +} + +static int gpr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + return user_regset_copyin(&pos, &count, &kbuf, &ubuf, + task_pt_regs(target), 0, sizeof(struct user_pt_regs)); +} + +static int fpr_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + + return membuf_write(&to, &target->thread.fpstate, + sizeof(struct user_fpsimd_state)); +} + +static int fpr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + return user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.fpstate, 0, + sizeof(struct user_fpsimd_state)); +} + +enum sw64_regset { + REGSET_GPR, + REGSET_FPR, +}; + +static const struct user_regset sw64_regsets[] = { + [REGSET_GPR] = { + .core_note_type = NT_PRSTATUS, + .n = ELF_NGREG, + .size = sizeof(elf_greg_t), + .align = sizeof(elf_greg_t), + .regset_get = gpr_get, + .set = gpr_set + }, + [REGSET_FPR] = { + .core_note_type = NT_PRFPREG, + .n = sizeof(struct user_fpsimd_state) / sizeof(u64), + .size = sizeof(u64), + .align = sizeof(u64), + .regset_get = fpr_get, + .set = fpr_set + }, +}; + +static const struct user_regset_view user_sw64_view = { + .name = "sw64", .e_machine = EM_SW64, + .regsets = sw64_regsets, .n = ARRAY_SIZE(sw64_regsets) +}; + +const struct user_regset_view *task_user_regset_view(struct task_struct *task) +{ + return &user_sw64_view; +} + +long arch_ptrace(struct task_struct *child, long request, + unsigned long addr, unsigned long data) +{ + unsigned long tmp; + size_t copied; + long ret; + + switch (request) { + /* When I and D space are separate, these will need to be fixed. */ + case PTRACE_PEEKTEXT: /* read word at location addr. */ + case PTRACE_PEEKDATA: + copied = access_process_vm(child, addr, &tmp, sizeof(tmp), FOLL_FORCE); + ret = -EIO; + if (copied != sizeof(tmp)) + break; + + force_successful_syscall_return(); + ret = tmp; + break; + + /* Read register number ADDR. */ + case PTRACE_PEEKUSR: + force_successful_syscall_return(); + ret = get_reg(child, addr); + break; + + /* When I and D space are separate, this will have to be fixed. */ + case PTRACE_POKETEXT: /* write the word at location addr. */ + case PTRACE_POKEDATA: + ret = generic_ptrace_pokedata(child, addr, data); + break; + + case PTRACE_POKEUSR: /* write the specified register */ + ret = put_reg(child, addr, data); + break; + default: + ret = ptrace_request(child, request, addr, data); + break; + } + return ret; +} + +asmlinkage unsigned long syscall_trace_enter(void) +{ + unsigned long ret = 0; + struct pt_regs *regs = current_pt_regs(); + + if (test_thread_flag(TIF_SYSCALL_TRACE) && + ptrace_report_syscall_entry(regs)) + return NO_SYSCALL; + +#ifdef CONFIG_SECCOMP + /* Do seccomp after ptrace, to catch any tracer changes. */ + if (secure_computing() == -1) + return NO_SYSCALL; +#endif + + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) + trace_sys_enter(regs, regs->regs[0]); + audit_syscall_entry(regs->regs[0], regs->regs[16], regs->regs[17], regs->regs[18], regs->regs[19]); + return ret ?: regs->regs[0]; +} + +asmlinkage void +syscall_trace_leave(void) +{ + struct pt_regs *regs = current_pt_regs(); + + audit_syscall_exit(regs); + if (test_thread_flag(TIF_SYSCALL_TRACE)) + ptrace_report_syscall_exit(regs, 0); + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) + trace_sys_exit(regs, regs_return_value(regs)); +} + +#ifdef CONFIG_SUBARCH_C3B +static long rwcsr(int rw, unsigned long csr, unsigned long value) +{ + register unsigned long __r0 __asm__("$0"); + register unsigned long __r16 __asm__("$16") = rw; + register unsigned long __r17 __asm__("$17") = csr; + register unsigned long __r18 __asm__("$18") = value; + + __asm__ __volatile__( + "sys_call %4" + : "=r"(__r0), "=r"(__r16), "=r"(__r17), "=r"(__r18) + : "i"(HMC_rwreg), "1"(__r16), "2"(__r17), "3"(__r18) + : "$1", "$22", "$23", "$24", "$25"); + + return __r0; +} + +#define RCSR 0 +#define WCSR 1 + +#define CSR_DA_MATCH 0 +#define CSR_DA_MASK 1 +#define CSR_IA_MATCH 2 +#define CSR_IA_MASK 3 +#define CSR_IDA_MATCH 6 +#define CSR_IDA_MASK 7 +#define CSR_DC_CTL 11 +#define CSR_DV_MATCH 15 +#define CSR_DV_MASK 16 + +#define DV_MATCH_EN_S 19 +#define DAV_MATCH_EN_S 20 + +int do_match(unsigned long address, unsigned long mmcsr, long cause, struct pt_regs *regs) +{ + unsigned long dc_ctl; + unsigned long value; + + pr_info("%s: pid %d, name = %s,cause = %#lx, mmcsr = %#lx, address = %#lx, pc %#lx\n", + __func__, current->pid, current->comm, cause, mmcsr, address, regs->pc); + + switch (mmcsr) { + case MMCSR__DA_MATCH: + case MMCSR__DV_MATCH: + case MMCSR__DAV_MATCH: + show_regs(regs); + + if (!(current->ptrace & PT_PTRACED)) { + pr_notice(" pid %d %s not be ptraced, return\n", current->pid, current->comm); + if (mmcsr == MMCSR__DA_MATCH) + rwcsr(WCSR, CSR_DA_MATCH, 0); //clear da_match + if (mmcsr == MMCSR__DV_MATCH) { + value = rwcsr(RCSR, CSR_DV_MATCH, 0); + pr_notice("value is %#lx\n", value); + value = rwcsr(RCSR, CSR_DV_MASK, 0); + pr_notice("value is %#lx\n", value); + dc_ctl = rwcsr(RCSR, CSR_DC_CTL, 0); + dc_ctl &= ~(0x1UL << DV_MATCH_EN_S); + rwcsr(WCSR, CSR_DC_CTL, dc_ctl); + } + if (mmcsr == MMCSR__DAV_MATCH) { + dc_ctl = rwcsr(RCSR, CSR_DC_CTL, 0); + dc_ctl &= ~((0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + rwcsr(WCSR, CSR_DC_CTL, dc_ctl); + rwcsr(WCSR, CSR_DA_MATCH, 0); //clear da_match + } + task_thread_info(current)->pcb.da_match = 0; + task_thread_info(current)->pcb.dv_match = 0; + task_thread_info(current)->pcb.dc_ctl = 0; + return 1; + } + + if (mmcsr == MMCSR__DA_MATCH) { + rwcsr(WCSR, CSR_DA_MATCH, 0); //clear da_match + task_thread_info(current)->pcb.da_match = 0; + } + if (mmcsr == MMCSR__DV_MATCH) { + dc_ctl = rwcsr(RCSR, CSR_DC_CTL, 0); + dc_ctl &= ~(0x1UL << DV_MATCH_EN_S); + rwcsr(WCSR, CSR_DC_CTL, dc_ctl); + } + if (mmcsr == MMCSR__DAV_MATCH) { + dc_ctl = rwcsr(RCSR, CSR_DC_CTL, 0); + dc_ctl &= ~((0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + rwcsr(WCSR, CSR_DC_CTL, dc_ctl); + rwcsr(WCSR, CSR_DA_MATCH, 0); //clear da_match + } + task_thread_info(current)->pcb.dv_match = 0; + task_thread_info(current)->pcb.dc_ctl = 0; + pr_notice("do_page_fault: want to send SIGTRAP, pid = %d\n", current->pid); + force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void *) address); + return 1; + + case MMCSR__IA_MATCH: + rwcsr(WCSR, CSR_IA_MATCH, 0); //clear ia_match + return 1; + case MMCSR__IDA_MATCH: + rwcsr(WCSR, CSR_IDA_MATCH, 0); //clear ida_match + return 1; + } + + return 0; +} + +void restore_da_match_after_sched(void) +{ + unsigned long dc_ctl_mode; + unsigned long dc_ctl; + struct pcb_struct *pcb = &task_thread_info(current)->pcb; + + rwcsr(WCSR, CSR_DA_MATCH, 0); + rwcsr(WCSR, CSR_DA_MASK, pcb->da_mask); + rwcsr(WCSR, CSR_DA_MATCH, pcb->da_match); + dc_ctl_mode = pcb->dc_ctl; + dc_ctl = rwcsr(RCSR, CSR_DC_CTL, 0); + dc_ctl &= ~((0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + dc_ctl |= ((dc_ctl_mode << DV_MATCH_EN_S) & ((0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S))); + if (dc_ctl_mode & 0x1) { + rwcsr(WCSR, CSR_DV_MATCH, pcb->dv_match); + rwcsr(WCSR, CSR_DV_MASK, pcb->dv_mask); + rwcsr(WCSR, CSR_DC_CTL, dc_ctl); + } +} + +#elif defined(CONFIG_SUBARCH_C4) +int do_match(unsigned long address, unsigned long mmcsr, long cause, struct pt_regs *regs) +{ + kernel_siginfo_t info; + unsigned long match_ctl, ia_match; + sigval_t sw64_value; + + pr_info("%s: pid %d, name = %s, cause = %#lx, mmcsr = %#lx, address = %#lx, pc %#lx\n", + __func__, current->pid, current->comm, cause, mmcsr, address, regs->pc); + + switch (mmcsr) { + case MMCSR__DA_MATCH: + case MMCSR__DV_MATCH: + case MMCSR__DAV_MATCH: + case MMCSR__IA_MATCH: + case MMCSR__IDA_MATCH: + case MMCSR__IV_MATCH: + show_regs(regs); + + if (!(current->ptrace & PT_PTRACED)) { + pr_notice(" pid %d %s not be ptraced, return\n", current->pid, current->comm); + if (mmcsr == MMCSR__DA_MATCH) { + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~(0x3UL << DA_MATCH_EN_S); + write_csr(match_ctl, CSR_DC_CTLP); + write_csr(0, CSR_DA_MATCH); // clear da_match + task_thread_info(current)->pcb.match_ctl &= ~0x1; + task_thread_info(current)->pcb.da_match = 0; + } + if (mmcsr == MMCSR__DV_MATCH) { + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~(0x1UL << DV_MATCH_EN_S); + write_csr(match_ctl, CSR_DC_CTLP); + write_csr(0, CSR_DV_MATCH); // clear dv_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 << 1); + task_thread_info(current)->pcb.dv_match = 0; + } + if (mmcsr == MMCSR__DAV_MATCH) { + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~((0x3UL << DA_MATCH_EN_S) | (0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + write_csr(match_ctl, CSR_DC_CTLP); + write_csr(0, CSR_DA_MATCH); // clear da_match + write_csr(0, CSR_DV_MATCH); // clear dv_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 | (0x1 << 1) | (0x1 << 2)); + task_thread_info(current)->pcb.da_match = 0; + task_thread_info(current)->pcb.dv_match = 0; + } + if (mmcsr == MMCSR__IA_MATCH) { + ia_match = read_csr(CSR_IA_MATCH); + ia_match &= ~((0x1UL << IA_MATCH_EN_S) | (0x7ffffffffffffUL << 2)); + write_csr(ia_match, CSR_IA_MATCH); // clear ia_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 << 3); + task_thread_info(current)->pcb.ia_match = 0; + } + if (mmcsr == MMCSR__IV_MATCH) { + ia_match = read_csr(CSR_IA_MATCH); + ia_match &= ~((0x1UL << IV_MATCH_EN_S) | (0x1UL << IV_PM_EN_S)); + write_csr(ia_match, CSR_IA_MATCH); // clear ia_match + write_csr(0, CSR_IV_MATCH); // clear iv_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 << 4); + task_thread_info(current)->pcb.ia_match &= ~((0x1UL << IV_MATCH_EN_S) | (0x1UL << IV_PM_EN_S)); + task_thread_info(current)->pcb.iv_match = 0; + } + if (mmcsr == MMCSR__IDA_MATCH) { + write_csr(0, CSR_IDA_MATCH); // clear ida_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 << 5); + task_thread_info(current)->pcb.ida_match = 0; + } + return 1; + } + + info.si_signo = SIGTRAP; + info.si_addr = (void *) address; + sw64_value.sival_ptr = (void *)(regs->pc); + info.si_value = sw64_value; + info.si_code = TRAP_HWBKPT; + + if (mmcsr == MMCSR__DA_MATCH) { + info.si_errno = 1; + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~(0x3UL << DA_MATCH_EN_S); + write_csr(match_ctl, CSR_DC_CTLP); + write_csr(0, CSR_DA_MATCH); // clear da_match + task_thread_info(current)->pcb.match_ctl &= ~0x1; + task_thread_info(current)->pcb.da_match = 0; + } + if (mmcsr == MMCSR__DV_MATCH) { + info.si_errno = 2; + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~(0x1UL << DV_MATCH_EN_S); + write_csr(match_ctl, CSR_DC_CTLP); + write_csr(0, CSR_DV_MATCH); // clear dv_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 << 1); + task_thread_info(current)->pcb.dv_match = 0; + } + if (mmcsr == MMCSR__DAV_MATCH) { + info.si_errno = 3; + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~((0x3UL << DA_MATCH_EN_S) | (0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + write_csr(match_ctl, CSR_DC_CTLP); + write_csr(0, CSR_DA_MATCH); // clear da_match + write_csr(0, CSR_DV_MATCH); // clear dv_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 | (0x1 << 1) | (0x1 << 2)); + task_thread_info(current)->pcb.da_match = 0; + task_thread_info(current)->pcb.dv_match = 0; + } + if (mmcsr == MMCSR__IA_MATCH) { + info.si_errno = 4; + ia_match = read_csr(CSR_IA_MATCH); + ia_match &= ~((0x1UL << IA_MATCH_EN_S) | (0x7ffffffffffffUL << 2)); + write_csr(ia_match, CSR_IA_MATCH); // clear ia_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 << 3); + task_thread_info(current)->pcb.ia_match = 0; + } + if (mmcsr == MMCSR__IV_MATCH) { + info.si_errno = 5; + ia_match = read_csr(CSR_IA_MATCH); + ia_match &= ~((0x1UL << IV_MATCH_EN_S) | (0x1UL << IV_PM_EN_S)); + write_csr(ia_match, CSR_IA_MATCH); // clear ia_match + write_csr(0, CSR_IV_MATCH); // clear iv_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 << 4); + task_thread_info(current)->pcb.ia_match &= ~((0x1UL << IV_MATCH_EN_S) | (0x1UL << IV_PM_EN_S)); + task_thread_info(current)->pcb.iv_match = 0; + } + if (mmcsr == MMCSR__IDA_MATCH) { + info.si_errno = 6; + write_csr(0, CSR_IDA_MATCH); // clear ida_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 << 5); + task_thread_info(current)->pcb.ida_match = 0; + } + pr_notice("do_page_fault: want to send SIGTRAP, pid = %d\n", current->pid); + force_sig_info(&info); + return 1; + } + + return 0; +} + +/* + *pcb->match_ctl: + * [0] DA_MATCH + * [1] DV_MATCH + * [2] DAV_MATCH + * [3] IA_MATCH + * [4] IV_MATCH + * [5] IDA_MATCH + * [8:9] match_ctl_mode + * + */ +#define DA_MATCH 0x1 +#define DV_MATCH 0x2 +#define DAV_MATCH 0x4 +#define IA_MATCH 0x8 +#define IV_MATCH 0x10 +#define IDA_MATCH 0x20 + +void restore_da_match_after_sched(void) +{ + unsigned long match_ctl_mode; + unsigned long match_ctl; + struct pcb_struct *pcb = &task_thread_info(current)->pcb; + unsigned long vpn, upn; + + if (!pcb->match_ctl) + return; + pr_info("Restroe MATCH status, pid: %d\n", current->pid); + + if (pcb->match_ctl & DA_MATCH) { + write_csr(pcb->da_match, CSR_DA_MATCH); + write_csr(pcb->da_mask, CSR_DA_MASK); + match_ctl_mode = (pcb->match_ctl >> 8) & 0x3; + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~((0x1UL << 3) | (0x3UL << DA_MATCH_EN_S) | (0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + match_ctl |= (match_ctl_mode << DA_MATCH_EN_S) | (0x1UL << DPM_MATCH_EN_S) | (0x3UL << DPM_MATCH); + write_csr(match_ctl, CSR_DC_CTLP); + pr_info("da_match:%#lx da_mask:%#lx match_ctl:%#lx\n", pcb->da_match, pcb->da_mask, match_ctl); + } + + if (pcb->match_ctl & DV_MATCH) { + write_csr(pcb->dv_match, CSR_DV_MATCH); + write_csr(pcb->dv_mask, CSR_DV_MASK); + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~((0x1UL << 3) | (0x3UL << DA_MATCH_EN_S) | (0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + match_ctl |= (0x1UL << DV_MATCH_EN_S) | (0x1UL << DPM_MATCH_EN_S) | (0x3UL << DPM_MATCH); + write_csr(match_ctl, CSR_DC_CTLP); + pr_info("dv_match:%#lx dv_mask:%#lx match_ctl:%#lx\n", pcb->dv_match, pcb->dv_mask, match_ctl); + } + + if (pcb->match_ctl & DAV_MATCH) { + write_csr(pcb->da_match, CSR_DA_MATCH); + write_csr(pcb->da_mask, CSR_DA_MASK); + write_csr(pcb->dv_match, CSR_DV_MATCH); + write_csr(pcb->dv_mask, CSR_DV_MASK); + write_csr(0xfffffffff, CSR_DA_MATCH_MODE); + match_ctl_mode = (pcb->match_ctl >> 8) & 0x3; + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~((0x3UL << DA_MATCH_EN_S) | (0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + match_ctl |= (match_ctl_mode << DA_MATCH_EN_S) | (0x1UL << DV_MATCH_EN_S) + | (0x1UL << DAV_MATCH_EN_S) | (0x1UL << DPM_MATCH_EN_S) + | (0x3UL << DPM_MATCH); + write_csr(match_ctl, CSR_DC_CTLP); + pr_info("da_match:%#lx da_mask:%#lx dv_match:%#lx dv_mask:%#lx match_ctl:%#lx\n", + pcb->da_match, pcb->da_mask, pcb->dv_match, pcb->dv_mask, match_ctl); + } + + if (pcb->match_ctl & IA_MATCH) { + pcb->ia_match |= (0x1UL << IA_MATCH_EN_S) | 0x3; + pcb->ia_mask |= 0x3; + write_csr(pcb->ia_match, CSR_IA_MATCH); + write_csr(pcb->ia_mask, CSR_IA_MASK); + vpn = read_csr(CSR_VPCR) >> 44; + vpn &= 0x3ff; + upn = read_csr(CSR_UPCR); + upn &= 0x3ff; + write_csr(((0x3ff << 18) | vpn), CSR_IA_VPNMATCH); + write_csr(((0x3ff << 18) | upn), CSR_IA_UPNMATCH); + pr_info("ia_match:%#lx ia_mask:%#lx\n", pcb->ia_match, pcb->ia_mask); + } + if (pcb->match_ctl & IV_MATCH) { + pcb->ia_match |= (0x1UL << IV_MATCH_EN_S) | (0x1UL << IV_PM_EN_S) | 0x3; + write_csr(pcb->ia_match, CSR_IA_MATCH); + write_csr(pcb->iv_match, CSR_IV_MATCH); + pr_info("ia_match:%#lx iv_match:%#lx\n", pcb->ia_match, pcb->iv_match); + } + if (pcb->match_ctl & IDA_MATCH) { + pcb->ida_match |= (0x1UL << IDA_MATCH_EN_S) | 0x3; + pcb->ida_mask |= 0x3; + write_csr(pcb->ida_match, CSR_IDA_MATCH); + write_csr(pcb->ida_mask, CSR_IDA_MASK); + pr_info("ida_match:%#lx ida_mask:%#lx\n", pcb->ida_match, pcb->ida_mask); + } +} +#endif + +struct pt_regs_offset { + const char *name; + int offset; +}; + +#define GPR_OFFSET_NAME(r) { \ + .name = "r" #r, \ + .offset = offsetof(struct pt_regs, regs[r]) \ +} + +#define REG_OFFSET_NAME(r) { \ + .name = #r, \ + .offset = offsetof(struct pt_regs, r) \ +} + +#define REG_OFFSET_END { \ + .name = NULL, \ + .offset = 0 \ +} + +static const struct pt_regs_offset regoffset_table[] = { + GPR_OFFSET_NAME(0), + GPR_OFFSET_NAME(1), + GPR_OFFSET_NAME(2), + GPR_OFFSET_NAME(3), + GPR_OFFSET_NAME(4), + GPR_OFFSET_NAME(5), + GPR_OFFSET_NAME(6), + GPR_OFFSET_NAME(7), + GPR_OFFSET_NAME(8), + GPR_OFFSET_NAME(9), + GPR_OFFSET_NAME(10), + GPR_OFFSET_NAME(11), + GPR_OFFSET_NAME(12), + GPR_OFFSET_NAME(13), + GPR_OFFSET_NAME(14), + GPR_OFFSET_NAME(15), + GPR_OFFSET_NAME(16), + GPR_OFFSET_NAME(17), + GPR_OFFSET_NAME(18), + GPR_OFFSET_NAME(19), + GPR_OFFSET_NAME(20), + GPR_OFFSET_NAME(21), + GPR_OFFSET_NAME(22), + GPR_OFFSET_NAME(23), + GPR_OFFSET_NAME(24), + GPR_OFFSET_NAME(25), + GPR_OFFSET_NAME(26), + GPR_OFFSET_NAME(27), + GPR_OFFSET_NAME(28), + GPR_OFFSET_NAME(29), + GPR_OFFSET_NAME(30), + REG_OFFSET_NAME(pc), + REG_OFFSET_NAME(ps), + REG_OFFSET_END, +}; + +/** + * regs_query_register_offset() - query register offset from its name + * @name: the name of a register + * + * regs_query_register_offset() returns the offset of a register in struct + * pt_regs from its name. If the name is invalid, this returns -EINVAL; + */ +int regs_query_register_offset(const char *name) +{ + const struct pt_regs_offset *roff; + + for (roff = regoffset_table; roff->name != NULL; roff++) + if (!strcmp(roff->name, name)) + return roff->offset; + return -EINVAL; +} + +static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) +{ + unsigned long ksp = kernel_stack_pointer(regs); + + return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1)); +} + +/** + * regs_get_kernel_stack_nth() - get Nth entry of the stack + * @regs:pt_regs which contains kernel stack pointer. + * @n:stack entry number. + * + * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which + * is specifined by @regs. If the @n th entry is NOT in the kernel stack, + * this returns 0. + */ +unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) +{ + unsigned long addr; + + addr = kernel_stack_pointer(regs) + n * sizeof(long); + if (!regs_within_kernel_stack(regs, addr)) + return 0; + return *(unsigned long *)addr; +} -- Gitee From d236f42f279882def535b3536739945c9781da44 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Tue, 9 Jan 2024 12:27:52 +0800 Subject: [PATCH 0284/2138] anolis: sw64: add hardware match support ANBZ: #4688 Add hardware match mechanism for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/kernel/match.c | 551 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 551 insertions(+) create mode 100644 arch/sw_64/kernel/match.c diff --git a/arch/sw_64/kernel/match.c b/arch/sw_64/kernel/match.c new file mode 100644 index 000000000000..3926391270da --- /dev/null +++ b/arch/sw_64/kernel/match.c @@ -0,0 +1,551 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include + +#include +#include +#include + + +char da_match_buf[1024], dv_match_buf[1024], dav_match_buf[1024]; +char ia_match_buf[1024], iv_match_buf[1024], ida_match_buf[1024]; + +unsigned long da_match_cf1, da_match_cf2, da_match_cf3; +unsigned long dv_match_cf1, dv_match_cf2, dv_match_cf3; +unsigned long dav_match_cf1, dav_match_cf2, dav_match_cf3, + dav_match_cf4, dav_match_cf5; +unsigned long ia_match_cf1, ia_match_cf2, ia_match_cf3, ia_match_cf4; +unsigned long iv_match_cf1, iv_match_cf2; +unsigned long ida_match_cf1, ida_match_cf2; + +static int da_match_show(struct seq_file *m, void *v) +{ + + seq_printf(m, "%s", da_match_buf); + return 0; +} + +static int dv_match_show(struct seq_file *m, void *v) +{ + + seq_printf(m, "%s", dv_match_buf); + return 0; +} + +static int dav_match_show(struct seq_file *m, void *v) +{ + + seq_printf(m, "%s", dav_match_buf); + return 0; +} + +static int ia_match_show(struct seq_file *m, void *v) +{ + + seq_printf(m, "%s", ia_match_buf); + return 0; +} + +static int iv_match_show(struct seq_file *m, void *v) +{ + + seq_printf(m, "%s", iv_match_buf); + return 0; +} + +static int ida_match_show(struct seq_file *m, void *v) +{ + + seq_printf(m, "%s", ida_match_buf); + return 0; +} + +static int da_match_open(struct inode *inode, struct file *file) +{ + return single_open(file, da_match_show, NULL); +} + +static int dv_match_open(struct inode *inode, struct file *file) +{ + return single_open(file, dv_match_show, NULL); +} + +static int dav_match_open(struct inode *inode, struct file *file) +{ + return single_open(file, dav_match_show, NULL); +} + +static int ia_match_open(struct inode *inode, struct file *file) +{ + return single_open(file, ia_match_show, NULL); +} + +static int iv_match_open(struct inode *inode, struct file *file) +{ + return single_open(file, iv_match_show, NULL); +} + +static int ida_match_open(struct inode *inode, struct file *file) +{ + return single_open(file, ida_match_show, NULL); +} + +static void +write_da_match(void *i) +{ + unsigned long dc_ctl; + + write_csr(da_match_cf1, CSR_DA_MATCH); + write_csr(da_match_cf2, CSR_DA_MASK); + dc_ctl = read_csr(CSR_DC_CTLP); + dc_ctl &= ~((0x1UL << 3) | (0x3UL << DA_MATCH_EN_S) + | (0x1UL << DAV_MATCH_EN_S) | (0x1UL << DPM_MATCH_EN_S) + | (0x3UL << DPM_MATCH)); + dc_ctl |= da_match_cf3; + write_csr(dc_ctl, CSR_DC_CTLP); +} + +static void +write_dv_match(void *i) +{ + unsigned long dc_ctl; + + write_csr(dv_match_cf1, CSR_DV_MATCH); + write_csr(dv_match_cf2, CSR_DV_MASK); + dc_ctl = read_csr(CSR_DC_CTLP); + dc_ctl &= ~((0x1UL << DAV_MATCH_EN_S) | (0x1UL << DPM_MATCH_EN_S) + | (0x3UL << DPM_MATCH)); + dc_ctl |= ((0x1UL << DV_MATCH_EN_S) | dv_match_cf3); + write_csr(dc_ctl, CSR_DC_CTLP); +} + +static void +write_dav_match(void *i) +{ + unsigned long dc_ctl; + + write_csr(dav_match_cf1, CSR_DA_MATCH); + write_csr(dav_match_cf2, CSR_DA_MASK); + write_csr(dav_match_cf3, CSR_DV_MATCH); + write_csr(dav_match_cf4, CSR_DV_MASK); + dc_ctl = read_csr(CSR_DC_CTLP); + dc_ctl &= ~((0x1UL << 3) | (0x3UL << DA_MATCH_EN_S) + | (0x1UL << DPM_MATCH_EN_S) | (0x3UL << DPM_MATCH)); + dc_ctl |= ((0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S) + | dav_match_cf5); + write_csr(dc_ctl, CSR_DC_CTLP); +} + +static void +write_ia_match(void *i) +{ + ia_match_cf1 |= (0x1UL << IA_MATCH_EN_S); + write_csr_imb(ia_match_cf1, CSR_IA_MATCH); + write_csr_imb(ia_match_cf2, CSR_IA_MASK); + write_csr(((0x3ffUL << 18) | ia_match_cf3), CSR_IA_VPNMATCH); + write_csr(((0x3ffUL << 18) | ia_match_cf4), CSR_IA_UPNMATCH); +} + +static void +write_iv_match(void *i) +{ + unsigned long ia_match_tmp; + + ia_match_tmp = read_csr(CSR_IA_MATCH); + ia_match_tmp &= ~(0x1UL << IV_PM_EN_S); + ia_match_tmp |= ((((iv_match_cf2 >> IV_PM_EN_S) & 0x1) << IV_PM_EN_S) + | (iv_match_cf2 & 0x3) | (0x1UL << IV_MATCH_EN_S)); + write_csr_imb(iv_match_cf1, CSR_IV_MATCH); + write_csr_imb(ia_match_tmp, CSR_IA_MATCH); +} + +static void +write_ida_match(void *i) +{ + + ida_match_cf1 |= (0x1UL << IDA_MATCH_EN_S); + write_csr(ida_match_cf1, CSR_IDA_MATCH); + write_csr(ida_match_cf2, CSR_IDA_MASK); +} + +static ssize_t da_match_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + size_t size; + char tmp[400]; + char *p; + int i, m; + const char *sep = " "; + char tmp1[400]; + int err; + char *ret = NULL; + + size = min(sizeof(da_match_buf) - 1, len); + if (copy_from_user(da_match_buf, user_buf, size)) + return -EFAULT; + + da_match_buf[size] = '\0'; + strcpy(tmp, da_match_buf); + p = tmp; + + for (i = 0 ; i < 4; i++) { + m = i*100; + ret = strsep(&p, sep); + if (ret != NULL) + strcpy(&tmp1[m], ret); + + } + tmp1[400] = '\0'; + + err = kstrtoul(&tmp1[0], 0, &da_match_cf1); + if (err) + return err; + + err = kstrtoul(&tmp1[100], 0, &da_match_cf2); + if (err) + return err; + + err = kstrtoul(&tmp1[200], 0, &da_match_cf3); + if (err) + return err; + + if (on_each_cpu(write_da_match, NULL, 1)) + pr_crit("%s: timed out\n", __func__); + + return len; +} + +static ssize_t dv_match_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + size_t size; + char tmp[400]; + char *p; + int i, m; + const char *sep = " "; + char tmp1[400]; + int err; + char *ret = NULL; + + size = min(sizeof(dv_match_buf) - 1, len); + if (copy_from_user(dv_match_buf, user_buf, size)) + return -EFAULT; + + dv_match_buf[size] = '\0'; + strcpy(tmp, dv_match_buf); + p = tmp; + + for (i = 0 ; i < 4; i++) { + m = i*100; + ret = strsep(&p, sep); + if (ret != NULL) + strcpy(&tmp1[m], ret); + + } + tmp1[400] = '\0'; + + err = kstrtoul(&tmp1[0], 0, &dv_match_cf1); + if (err) + return err; + + err = kstrtoul(&tmp1[100], 0, &dv_match_cf2); + if (err) + return err; + + err = kstrtoul(&tmp1[200], 0, &dv_match_cf3); + if (err) + return err; + + if (on_each_cpu(write_dv_match, NULL, 1)) + pr_crit("%s: timed out\n", __func__); + + return len; +} + +static ssize_t dav_match_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + size_t size; + char tmp[500]; + char *p; + int i, m; + const char *sep = " "; + char tmp1[500]; + int err; + char *ret = NULL; + + size = min(sizeof(dav_match_buf) - 1, len); + if (copy_from_user(dav_match_buf, user_buf, size)) + return -EFAULT; + + dav_match_buf[size] = '\0'; + strcpy(tmp, dav_match_buf); + p = tmp; + + for (i = 0 ; i < 5; i++) { + m = i*100; + ret = strsep(&p, sep); + if (ret != NULL) + strcpy(&tmp1[m], ret); + + } + tmp1[500] = '\0'; + + err = kstrtoul(&tmp1[0], 0, &dav_match_cf1); + if (err) + return err; + + err = kstrtoul(&tmp1[100], 0, &dav_match_cf2); + if (err) + return err; + + err = kstrtoul(&tmp1[200], 0, &dav_match_cf3); + if (err) + return err; + + err = kstrtoul(&tmp1[300], 0, &dav_match_cf4); + if (err) + return err; + + err = kstrtoul(&tmp1[400], 0, &dav_match_cf5); + if (err) + return err; + + + if (on_each_cpu(write_dav_match, NULL, 1)) + pr_crit("%s: timed out\n", __func__); + return len; +} + +static ssize_t ia_match_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + size_t size; + char tmp[400]; + char *p; + int i, m; + const char *sep = " "; + char tmp1[400]; + int err; + char *ret = NULL; + + size = min(sizeof(ia_match_buf) - 1, len); + if (copy_from_user(ia_match_buf, user_buf, size)) + return -EFAULT; + + ia_match_buf[size] = '\0'; + strcpy(tmp, ia_match_buf); + p = tmp; + + for (i = 0 ; i < 4; i++) { + m = i*100; + ret = strsep(&p, sep); + if (ret != NULL) + strcpy(&tmp1[m], ret); + + } + tmp1[400] = '\0'; + + err = kstrtoul(&tmp1[0], 0, &ia_match_cf1); + if (err) + return err; + + err = kstrtoul(&tmp1[100], 0, &ia_match_cf2); + if (err) + return err; + + err = kstrtoul(&tmp1[200], 0, &ia_match_cf3); + if (err) + return err; + + err = kstrtoul(&tmp1[300], 0, &ia_match_cf4); + if (err) + return err; + + if (on_each_cpu(write_ia_match, NULL, 1)) + pr_crit("%s: timed out\n", __func__); + return len; +} + +static ssize_t iv_match_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + size_t size; + char tmp[400]; + char *p; + int i, m; + const char *sep = " "; + char tmp1[400]; + int err; + char *ret = NULL; + + size = min(sizeof(ia_match_buf) - 1, len); + if (copy_from_user(ia_match_buf, user_buf, size)) + return -EFAULT; + + ia_match_buf[size] = '\0'; + strcpy(tmp, ia_match_buf); + p = tmp; + + for (i = 0 ; i < 4; i++) { + m = i*100; + ret = strsep(&p, sep); + if (ret != NULL) + strcpy(&tmp1[m], ret); + + } + tmp1[400] = '\0'; + + err = kstrtoul(&tmp1[0], 0, &iv_match_cf1); + if (err) + return err; + + err = kstrtoul(&tmp1[100], 0, &iv_match_cf2); + if (err) + return err; + + if (on_each_cpu(write_iv_match, NULL, 1)) + pr_crit("%s: timed out\n", __func__); + return len; +} + + +static ssize_t ida_match_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + size_t size; + char tmp[400]; + char *p; + int i, m; + const char *sep = " "; + char tmp1[400]; + int err; + char *ret = NULL; + + size = min(sizeof(ida_match_buf) - 1, len); + if (copy_from_user(ida_match_buf, user_buf, size)) + return -EFAULT; + + ida_match_buf[size] = '\0'; + strcpy(tmp, ida_match_buf); + p = tmp; + + for (i = 0 ; i < 4; i++) { + m = i*100; + ret = strsep(&p, sep); + if (ret != NULL) + strcpy(&tmp1[m], ret); + } + tmp1[400] = '\0'; + + err = kstrtoul(&tmp1[0], 0, &ida_match_cf1); + if (err) + return err; + + err = kstrtoul(&tmp1[100], 0, &ida_match_cf2); + if (err) + return err; + + if (on_each_cpu(write_ida_match, NULL, 1)) + pr_crit("%s: timed out\n", __func__); + + return len; +} + +static const struct file_operations set_da_match_fops = { + .open = da_match_open, + .read = seq_read, + .write = da_match_set, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations set_dv_match_fops = { + .open = dv_match_open, + .read = seq_read, + .write = dv_match_set, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations set_dav_match_fops = { + .open = dav_match_open, + .read = seq_read, + .write = dav_match_set, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations set_ia_match_fops = { + .open = ia_match_open, + .read = seq_read, + .write = ia_match_set, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations set_iv_match_fops = { + .open = iv_match_open, + .read = seq_read, + .write = iv_match_set, + .llseek = seq_lseek, + .release = single_release, +}; + + +static const struct file_operations set_ida_match_fops = { + .open = ida_match_open, + .read = seq_read, + .write = ida_match_set, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init match_debugfs_init(void) +{ + struct dentry *match_entry; + + if (!sw64_debugfs_dir) + return -ENODEV; + + match_entry = debugfs_create_file("da_match", 0600, + sw64_debugfs_dir, NULL, + &set_da_match_fops); + if (!match_entry) + return -ENOMEM; + + match_entry = debugfs_create_file("dv_match", 0600, + sw64_debugfs_dir, NULL, + &set_dv_match_fops); + if (!match_entry) + return -ENOMEM; + + match_entry = debugfs_create_file("dav_match", 0600, + sw64_debugfs_dir, NULL, + &set_dav_match_fops); + if (!match_entry) + return -ENOMEM; + + match_entry = debugfs_create_file("ia_match", 0600, + sw64_debugfs_dir, NULL, + &set_ia_match_fops); + if (!match_entry) + return -ENOMEM; + + match_entry = debugfs_create_file("iv_match", 0600, + sw64_debugfs_dir, NULL, + &set_iv_match_fops); + if (!match_entry) + return -ENOMEM; + + match_entry = debugfs_create_file("ida_match", 0600, + sw64_debugfs_dir, NULL, + &set_ida_match_fops); + if (!match_entry) + return -ENOMEM; + + return 0; +} +late_initcall(match_debugfs_init); -- Gitee From fe9c4ea9ea2176709e806774e69077a851a2af74 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:27 +0800 Subject: [PATCH 0285/2138] anolis: sw64: add memory management ANBZ: #4688 Add memory management support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/cache.h | 13 + arch/sw_64/include/asm/cacheflush.h | 13 + arch/sw_64/include/asm/memory.h | 35 ++ arch/sw_64/include/asm/mmu.h | 10 + arch/sw_64/include/asm/mmu_context.h | 136 ++++ arch/sw_64/include/asm/mmzone.h | 17 + arch/sw_64/include/asm/page.h | 71 +++ arch/sw_64/include/asm/pgalloc.h | 51 ++ arch/sw_64/include/asm/pgtable-4level.h | 32 + arch/sw_64/include/asm/pgtable.h | 789 ++++++++++++++++++++++++ arch/sw_64/include/asm/sparsemem.h | 9 + arch/sw_64/include/asm/tlb.h | 13 + arch/sw_64/include/asm/tlbflush.h | 94 +++ arch/sw_64/include/asm/vmalloc.h | 5 + arch/sw_64/mm/Makefile | 16 + arch/sw_64/mm/extable.c | 25 + arch/sw_64/mm/fault.c | 305 +++++++++ arch/sw_64/mm/init.c | 339 ++++++++++ arch/sw_64/mm/mmap.c | 102 +++ arch/sw_64/mm/physaddr.c | 39 ++ 20 files changed, 2114 insertions(+) create mode 100644 arch/sw_64/include/asm/cache.h create mode 100644 arch/sw_64/include/asm/cacheflush.h create mode 100644 arch/sw_64/include/asm/memory.h create mode 100644 arch/sw_64/include/asm/mmu.h create mode 100644 arch/sw_64/include/asm/mmu_context.h create mode 100644 arch/sw_64/include/asm/mmzone.h create mode 100644 arch/sw_64/include/asm/page.h create mode 100644 arch/sw_64/include/asm/pgalloc.h create mode 100644 arch/sw_64/include/asm/pgtable-4level.h create mode 100644 arch/sw_64/include/asm/pgtable.h create mode 100644 arch/sw_64/include/asm/sparsemem.h create mode 100644 arch/sw_64/include/asm/tlb.h create mode 100644 arch/sw_64/include/asm/tlbflush.h create mode 100644 arch/sw_64/include/asm/vmalloc.h create mode 100644 arch/sw_64/mm/Makefile create mode 100644 arch/sw_64/mm/extable.c create mode 100644 arch/sw_64/mm/fault.c create mode 100644 arch/sw_64/mm/init.c create mode 100644 arch/sw_64/mm/mmap.c create mode 100644 arch/sw_64/mm/physaddr.c diff --git a/arch/sw_64/include/asm/cache.h b/arch/sw_64/include/asm/cache.h new file mode 100644 index 000000000000..6a6ce4e99265 --- /dev/null +++ b/arch/sw_64/include/asm/cache.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/asm/cache.h + */ +#ifndef _ASM_SW64_CACHE_H +#define _ASM_SW64_CACHE_H + +#define L1_CACHE_SHIFT 7 +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + +#define SMP_CACHE_BYTES L1_CACHE_BYTES + +#endif /* _ASM_SW64_CACHE_H */ diff --git a/arch/sw_64/include/asm/cacheflush.h b/arch/sw_64/include/asm/cacheflush.h new file mode 100644 index 000000000000..0d49830b8493 --- /dev/null +++ b/arch/sw_64/include/asm/cacheflush.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_CACHEFLUSH_H +#define _ASM_SW64_CACHEFLUSH_H + +/* + * DCache: PIPT + * ICache: + * - C3B is VIVT with ICTAG, support coherence. + * - C4 is VIPT + */ +#include + +#endif /* _ASM_SW64_CACHEFLUSH_H */ diff --git a/arch/sw_64/include/asm/memory.h b/arch/sw_64/include/asm/memory.h new file mode 100644 index 000000000000..b2b7492ae477 --- /dev/null +++ b/arch/sw_64/include/asm/memory.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_MEMORY_H +#define _ASM_SW64_MEMORY_H + +#ifdef CONFIG_NUMA +#include +#endif + +#define MIN_MEMORY_BLOCK_SIZE_VM_MEMHP (1UL << 30) +#define NODE0_START (_TEXT_START - __START_KERNEL_map) + +#define MAX_PHYSMEM_BITS 48 + +struct mem_desc_t { + unsigned long phys_base; /* start address of physical memory */ + unsigned long phys_size; /* size of physical memory */ + phys_addr_t base; /* start address of memory managed by kernel */ + phys_addr_t size; /* size of memory managed by kernel */ +}; +extern struct mem_desc_t mem_desc; + +struct numa_node_desc_t { + phys_addr_t base; + phys_addr_t size; +}; +extern struct numa_node_desc_t numa_nodes_desc[]; + +void __init callback_init(void); +void __init mem_detect(void); +void __init sw64_memblock_init(void); +void __init zone_sizes_init(void); +void __init sw64_numa_init(void); +void __init sw64_memory_present(void); + +#endif /* _ASM_SW64_MEMORY_H */ diff --git a/arch/sw_64/include/asm/mmu.h b/arch/sw_64/include/asm/mmu.h new file mode 100644 index 000000000000..f24219fac654 --- /dev/null +++ b/arch/sw_64/include/asm/mmu.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_MMU_H +#define _ASM_SW64_MMU_H + +/* The sw64 MMU context is one "unsigned long" bitmap per CPU*/ +typedef struct { + unsigned long asid[NR_CPUS]; + void *vdso; +} mm_context_t; +#endif /* _ASM_SW64_MMU_H */ diff --git a/arch/sw_64/include/asm/mmu_context.h b/arch/sw_64/include/asm/mmu_context.h new file mode 100644 index 000000000000..420ad5f745be --- /dev/null +++ b/arch/sw_64/include/asm/mmu_context.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_MMU_CONTEXT_H +#define _ASM_SW64_MMU_CONTEXT_H + +#include + +#include +#include + +/* + * The maximum ASID's the processor supports. + */ + +#if defined(CONFIG_SUBARCH_C3B) || defined(CONFIG_SUBARCH_C4) +#define ASID_BITS 10 +#endif + +#include +#define last_asid(cpu) (cpu_data[cpu].last_asid) + +#define ASID_FIRST_VERSION (1UL << ASID_BITS) +#define ASID_MASK ((1UL << ASID_BITS) - 1) + +#define cpu_asid(cpu, mm) ((mm)->context.asid[cpu] & ASID_MASK) + +static inline bool asid_valid(struct mm_struct *mm, unsigned int cpu) +{ + return !((mm->context.asid[cpu] ^ last_asid(cpu)) & ~ASID_MASK); +} + +/* + * NOTE! The way this is set up, the high bits of the "last_asid" (and + * the "mm->context.asid[cpu]") are the ASID _version_ code. A version + * of 0 is always considered invalid, so to invalidate another process + * you only need to do "p->mm->context.asid[cpu] = 0". + * + * If we need more ASID's than the processor has, we invalidate the old + * user TLB's (tbivp()) and start a new ASID version. That will force a + * new asid for any other processes the next time they want to run. + */ + +static inline void __get_new_mm_context(struct mm_struct *mm, long cpu) +{ + unsigned long asid = last_asid(cpu); + + if (!(++asid & ASID_MASK)) + tbivp(); + mm->context.asid[cpu] = last_asid(cpu) = asid; + +} + +static inline void +switch_mm_irqs_off(struct mm_struct *prev_mm, struct mm_struct *next_mm, + struct task_struct *next) +{ + /* Check if our ASID is of an older version, and thus invalid. */ + unsigned long asid, ptbr; + long cpu = smp_processor_id(); + + if (!asid_valid(next_mm, cpu)) + __get_new_mm_context(next_mm, cpu); + + /* Update CSR:UPN and CSR:PTBR. Another thread may have allocated + * a new mm->context[asid] (via flush_tlb_mm) without the ASID serial + * number wrapping. We have no way to detect when this is needed. + */ + asid = cpu_asid(cpu, next_mm); + ptbr = virt_to_pfn(next_mm->pgd); + load_mm(asid, ptbr); + cpumask_set_cpu(cpu, mm_cpumask(next_mm)); +} + +#define switch_mm_irqs_off switch_mm_irqs_off + +static inline void +switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, + struct task_struct *tsk) +{ + unsigned long flags; + + local_irq_save(flags); + switch_mm_irqs_off(prev_mm, next_mm, tsk); + local_irq_restore(flags); +} + +#define activate_mm(prev, next) switch_mm(prev, next, current) +#define deactivate_mm(tsk, mm) do { } while (0) + +static inline int init_new_context(struct task_struct *tsk, + struct mm_struct *mm) +{ + int i; + + for_each_possible_cpu(i) + mm->context.asid[i] = 0; + return 0; +} + +static inline void destroy_context(struct mm_struct *mm) +{ + /* Nothing to do. */ +} + +static inline void enter_lazy_tlb(struct mm_struct *mm, + struct task_struct *tsk) +{ +} + +static inline int arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) +{ + return 0; +} + +static inline void arch_exit_mmap(struct mm_struct *mm) +{ +} + +static inline void arch_unmap(struct mm_struct *mm, unsigned long start, + unsigned long end) +{ +} + +static inline void arch_bprm_mm_init(struct mm_struct *mm, + struct vm_area_struct *vma) +{ +} + +static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, + bool write, bool execute, + bool foreign) +{ + /* by default, allow everything */ + return true; +} +#endif /* _ASM_SW64_MMU_CONTEXT_H */ diff --git a/arch/sw_64/include/asm/mmzone.h b/arch/sw_64/include/asm/mmzone.h new file mode 100644 index 000000000000..363e2bc98a95 --- /dev/null +++ b/arch/sw_64/include/asm/mmzone.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_MMZONE_H +#define _ASM_SW64_MMZONE_H + +#include + +/* + * Following are macros that are specific to this numa platform. + */ + +extern pg_data_t *node_data[]; + +#ifdef CONFIG_NUMA +#define NODE_DATA(nid) (node_data[(nid)]) +#endif + +#endif /* _ASM_SW64_MMZONE_H */ diff --git a/arch/sw_64/include/asm/page.h b/arch/sw_64/include/asm/page.h new file mode 100644 index 000000000000..68b4f2fc1b48 --- /dev/null +++ b/arch/sw_64/include/asm/page.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PAGE_H +#define _ASM_SW64_PAGE_H + +#include +#include + +/* PAGE_SHIFT determines the page size */ +#define PAGE_SHIFT 13 +#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE - 1)) + +#define HPAGE_SHIFT PMD_SHIFT +#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) +#define HPAGE_MASK (~(HPAGE_SIZE - 1)) +#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) + +#define HUGE_MAX_HSTATE 2 + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ + +extern void clear_page(void *page); +#define clear_user_page(page, vaddr, pg) clear_page(page) + +#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ + alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) +#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE + +extern void copy_page(void *_to, void *_from); +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) + +typedef struct page *pgtable_t; + +extern unsigned long __phys_addr(unsigned long addr); +#ifdef CONFIG_SUBARCH_C3B +extern unsigned long __boot_phys_addr(unsigned long addr); +#else +#define __boot_phys_addr(x) __phys_addr(x) +#endif + +#endif /* !__ASSEMBLY__ */ + +#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) + +#include + +#define __START_KERNEL_map PAGE_OFFSET + +#define __pa(x) __phys_addr((unsigned long)(x)) +#define __va(x) ((void *)((unsigned long) (x) | PAGE_OFFSET)) + +#define __boot_pa(x) __boot_phys_addr((unsigned long)(x)) +#define __boot_va(x) __va(x) + +#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) +#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) + +#define virt_to_pfn(vaddr) (PHYS_PFN(__pa(vaddr))) +#define pfn_to_virt(pfn) (__va(PFN_PHYS(pfn))) + +#ifdef CONFIG_FLATMEM +#define pfn_valid(pfn) ((pfn) < max_mapnr) +#endif /* CONFIG_FLATMEM */ + +#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC +#include +#include +#endif + +#endif /* _ASM_SW64_PAGE_H */ diff --git a/arch/sw_64/include/asm/pgalloc.h b/arch/sw_64/include/asm/pgalloc.h new file mode 100644 index 000000000000..1cc03e3be5b6 --- /dev/null +++ b/arch/sw_64/include/asm/pgalloc.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PGALLOC_H +#define _ASM_SW64_PGALLOC_H + +#include +#include +#include /* for pte_{alloc,free}_one */ + +/* + * Allocate and free page tables. The xxx_kernel() versions are + * used to allocate a kernel page table - this turns on ASN bits + * if any. + */ + +static inline void +pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte) +{ + unsigned long pfn = page_to_pfn(pte); + + set_pmd(pmd, __pmd((pfn << _PFN_SHIFT) | _PAGE_TABLE)); +} + +static inline void +pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) +{ + unsigned long pfn = virt_to_pfn(pte); + + set_pmd(pmd, __pmd((pfn << _PFN_SHIFT) | _PAGE_TABLE)); +} + +static inline void +pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +{ + unsigned long pfn = virt_to_pfn(pmd); + + set_pud(pud, __pud((pfn << _PFN_SHIFT) | _PAGE_TABLE)); +} + +static inline void +p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) +{ + unsigned long pfn = virt_to_pfn(pud); + + set_p4d(p4d, __p4d((pfn << _PFN_SHIFT) | _PAGE_TABLE)); +} + +extern pgd_t *pgd_alloc(struct mm_struct *mm); + +#define check_pgt_cache() do { } while (0) + +#endif /* _ASM_SW64_PGALLOC_H */ diff --git a/arch/sw_64/include/asm/pgtable-4level.h b/arch/sw_64/include/asm/pgtable-4level.h new file mode 100644 index 000000000000..719e2c5377e3 --- /dev/null +++ b/arch/sw_64/include/asm/pgtable-4level.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PGTABLE_4LEVEL_H +#define _ASM_SW64_PGTABLE_4LEVEL_H + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ +/* + * These are used to make use of C type-checking.. + */ +typedef struct { unsigned long pte; } pte_t; +typedef struct { unsigned long pmd; } pmd_t; +typedef struct { unsigned long pgd; } pgd_t; +typedef struct { unsigned long pud; } pud_t; +typedef struct { unsigned long pgprot; } pgprot_t; + +#define pte_val(x) ((x).pte) +#define pmd_val(x) ((x).pmd) +#define pgd_val(x) ((x).pgd) +#define pud_val(x) ((x).pud) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) }) +#define __pmd(x) ((pmd_t) { (x) }) +#define __pud(x) ((pud_t) { (x) }) +#define __pgd(x) ((pgd_t) { (x) }) +#define __pgprot(x) ((pgprot_t) { (x) }) +#endif /* !__ASSEMBLY__ */ + +#define PAGE_OFFSET 0xfff0000000000000 + +#endif +#endif /* _ASM_SW64_PGTABLE_4LEVEL_H */ diff --git a/arch/sw_64/include/asm/pgtable.h b/arch/sw_64/include/asm/pgtable.h new file mode 100644 index 000000000000..0b1f825eb74c --- /dev/null +++ b/arch/sw_64/include/asm/pgtable.h @@ -0,0 +1,789 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PGTABLE_H +#define _ASM_SW64_PGTABLE_H + + +#include + +/* + * This file contains the functions and defines necessary to modify and use + * the sw64 page table tree. + * + * This hopefully works with any standard sw64 page-size, as defined + * in (currently 8192). + */ +#include +#include + +#include +#include +#include /* For TASK_SIZE */ +#include + +struct mm_struct; +struct vm_area_struct; + +static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) +{ + *pmdp = pmd; +} + +static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmdval) +{ + set_pmd(pmdp, pmdval); +} + +static inline void set_pud(pud_t *pudp, pud_t pud) +{ + *pudp = pud; +} + +static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) +{ + *p4dp = p4d; +} +/* PGDIR_SHIFT determines what a forth-level page table entry can map */ +#define PGDIR_SHIFT (PAGE_SHIFT + 3 * (PAGE_SHIFT - 3)) +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE - 1)) + +/* PUD_SHIFT determines the size of the area a third-level page table can map */ +#define PUD_SHIFT (PAGE_SHIFT + 2 * (PAGE_SHIFT - 3)) +#define PUD_SIZE (1UL << PUD_SHIFT) +#define PUD_MASK (~(PUD_SIZE-1)) + +/* PMD_SHIFT determines the size of the area a second-level page table can map */ +#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE - 1)) + +#define CONT_PMD_SHIFT 6 +#define CONT_PMDS (1 << CONT_PMD_SHIFT) +#define CONT_PMD_SIZE (CONT_PMDS * PMD_SIZE) +#define CONT_PMD_MASK (~(CONT_PMD_SIZE - 1)) + +/* + * Entries per page directory level: the sw64 is three-level, with + * all levels having a one-page page table. + */ +#define PTRS_PER_PTE (1UL << (PAGE_SHIFT - 3)) +#define PTRS_PER_PMD (1UL << (PAGE_SHIFT - 3)) +#define PTRS_PER_PGD (1UL << (PAGE_SHIFT - 3)) +#define PTRS_PER_PUD (1UL << (PAGE_SHIFT - 3)) + +#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) +#define FIRST_USER_ADDRESS 0UL + +/* Number of pointers that fit on a page: this will go away. */ +#define PTRS_PER_PAGE (1UL << (PAGE_SHIFT - 3)) + +#define VMALLOC_START (-2 * PGDIR_SIZE) +#ifndef CONFIG_SPARSEMEM_VMEMMAP +#define VMALLOC_END (-PGDIR_SIZE) +#else +#define VMEMMAP_END (-PGDIR_SIZE) +#define vmemmap ((struct page *)VMEMMAP_END - (1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT))) +#define VMALLOC_END ((unsigned long)vmemmap) +#endif + +/* + * HMcode-imposed page table bits + */ +#if defined(CONFIG_SUBARCH_C3B) + +#define _PAGE_VALID 0x0001 +#define _PAGE_PRESENT _PAGE_VALID +#define _PAGE_FOR 0x0002 /* used for page protection (fault on read) */ +#define _PAGE_FOW 0x0004 /* used for page protection (fault on write) */ +#define _PAGE_FOE 0x0008 /* used for page protection (fault on exec) */ +#define _PAGE_ASM 0x0010 +#define _PAGE_CONT 0x0020 /* used for 256M page size bit */ +#define _PAGE_LEAF 0x0040 /* used for 8M page size bit */ +#define _PAGE_PROTNONE 0x0080 /* used for numa page balancing */ +#define _PAGE_SPECIAL 0x0100 +#define _PAGE_KRE 0x0400 /* xxx - see below on the "accessed" bit */ +#define _PAGE_URE 0x0800 /* xxx */ +#define _PAGE_KWE 0x4000 /* used to do the dirty bit in software */ +#define _PAGE_UWE 0x8000 /* used to do the dirty bit in software */ + +/* .. and these are ours ... */ +#define _PAGE_DIRTY 0x20000 +#define _PAGE_ACCESSED 0x40000 + +#define _PAGE_SPLITTING 0x200000 /* For Transparent Huge Page */ +#define _PAGE_DEVMAP 0x400000 /* For ZONE DEVICE page */ + +#define _PAGE_BIT_FOW 2 /* bit of _PAGE_FOW */ +#define _PAGE_BIT_ACCESSED 18 /* bit of _PAGE_ACCESSED */ +#define _PAGE_BIT_SPLITTING 21 /* bit of _PAGE_SPLITTING */ +#define _PAGE_BIT_DEVMAP 22 /* bit of _PAGE_DEVMAP */ +/* + * NOTE! The "accessed" bit isn't necessarily exact: it can be kept exactly + * by software (use the KRE/URE/KWE/UWE bits appropriately), but I'll fake it. + * Under Linux/sw64, the "accessed" bit just means "read", and I'll just use + * the KRE/URE bits to watch for it. That way we don't need to overload the + * KWE/UWE bits with both handling dirty and accessed. + * + * Note that the kernel uses the accessed bit just to check whether to page + * out a page or not, so it doesn't have to be exact anyway. + */ + +/* Used for swap PTEs only. */ +#define _PAGE_SWP_EXCLUSIVE _BITUL(5) + +#define __DIRTY_BITS (_PAGE_DIRTY | _PAGE_KWE | _PAGE_UWE) +#define __ACCESS_BITS (_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE) + +#define _PFN_SHIFT 28 + +/* + * All the normal masks have the "page accessed" bits on, as any time they are used, + * the page is accessed. They are cleared only by the page-out routines + */ +#define PAGE_NONE __pgprot(__ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE | _PAGE_PROTNONE) +#define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS) +#define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) +#define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) +#define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE) +#define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x)) + +#define page_valid_kern(x) (0) + +#elif defined(CONFIG_SUBARCH_C4) + +#define _PAGE_VALID 0x0001 +#define _PAGE_PRESENT _PAGE_VALID +#define _PAGE_FOR 0x0002 /* used for page protection (fault on read) */ +#define _PAGE_FOW 0x0004 /* used for page protection (fault on write) */ +#define _PAGE_FOE 0x0008 /* used for page protection (fault on exec) */ +#define _PAGE_FIXED 0x0010 +#define _PAGE_CONT 0x0020 /* used for 512M page size bit*/ +#define _PAGE_LEAF 0x0040 /* used for huge page bit */ +#define _PAGE_PCD 0x0080 /* used for page cache disabled */ + +/* and these are sw definition */ +#define _PAGE_WCD 0x0100 +#define _PAGE_ACCESSED 0x0200 +#define _PAGE_SPLITTING 0x0400 /* For Transparent Huge Page */ +#define _PAGE_SPECIAL 0x0800 +#define _PAGE_DEVMAP 0x1000 /* For ZONE DEVICE page */ +#define _PAGE_KERN 0x2000 +#define _PAGE_DIRTY _BITUL(62) +#define _PAGE_PROTNONE _BITUL(63) +#define _PAGE_BIT_FOW 2 /* bit of _PAGE_FOW */ +#define _PAGE_BIT_ACCESSED 9 /* bit of _PAGE_ACCESSED */ +#define _PAGE_BIT_SPLITTING 10 /* bit of _PAGE_SPLITTING */ +#define _PAGE_BIT_DEVMAP 12 /* bit of _PAGE_DEVMAP */ + +/* Used for swap PTEs only. */ +#define _PAGE_SWP_EXCLUSIVE _BITUL(5) + +#define __DIRTY_BITS _PAGE_DIRTY +#define __ACCESS_BITS _PAGE_ACCESSED + +#define _PFN_SHIFT 24 + +/* + * All the normal masks have the "page accessed" bits on, as any time they are used, + * the page is accessed. They are cleared only by the page-out routines + */ +#define PAGE_NONE __pgprot(__ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE | _PAGE_LEAF | _PAGE_PROTNONE) +#define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_LEAF) +#define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_LEAF) +#define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_LEAF) +#define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_KERN | _PAGE_LEAF) +#define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_LEAF | (x)) + +#define page_valid_kern(x) ((x & (_PAGE_VALID | _PAGE_KERN)) == (_PAGE_VALID | _PAGE_KERN)) +#endif + +#define PFN_PTE_SHIFT _PFN_SHIFT + +#define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT) +#define _PFN_MASK (GENMASK(_PFN_BITS - 1, 0) << _PFN_SHIFT) + +#define _PAGE_TABLE (_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS) +#define _PAGE_CHG_MASK (_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS | _PAGE_SPECIAL | _PAGE_LEAF | _PAGE_CONT) + +#define _PAGE_P(x) _PAGE_NORMAL((x) | _PAGE_FOW) +#define _PAGE_S(x) _PAGE_NORMAL(x) + +/* + * pgprot_noncached() is only for infiniband pci support, and a real + * implementation for RAM would be more complicated. + */ +#define pgprot_noncached(prot) (prot) + +/* + * ZERO_PAGE is a global shared page that is always zero: used + * for zero-mapped memory areas etc.. + */ +extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; +#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) + +static inline void set_pte(pte_t *ptep, pte_t pteval) +{ + *ptep = pteval; + + if (page_valid_kern(pte_val(pteval))) { + mb(); + if ((pte_val(pteval) & _PAGE_FOE) == 0) + imemb(); + } +} + +static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) +{ + pte_t pte; + + pte_val(pte) = (pfn << _PFN_SHIFT) | pgprot_val(prot); + return pte; +} + +static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t prot) +{ + pmd_t pmd; + + pmd_val(pmd) = (pfn << _PFN_SHIFT) | pgprot_val(prot); + return pmd; +} +static inline pud_t pfn_pud(unsigned long pfn, pgprot_t pgprot) +{ + pud_t pud; + + pud_val(pud) = (pfn << _PFN_SHIFT) | pgprot_val(pgprot); + return pud; +} + +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); + return pte; +} + +static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) +{ + pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot); + return pmd; +} + +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + */ +#define page_to_pa(page) (page_to_pfn(page) << PAGE_SHIFT) + +#define p4d_pfn(p4d) ((p4d_val(p4d) & _PFN_MASK) >> _PFN_SHIFT) +#define pud_pfn(pud) ((pud_val(pud) & _PFN_MASK) >> _PFN_SHIFT) +#define pmd_pfn(pmd) ((pmd_val(pmd) & _PFN_MASK) >> _PFN_SHIFT) +#define pte_pfn(pte) ((pte_val(pte) & _PFN_MASK) >> _PFN_SHIFT) + +#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d)) +#define pud_page(pud) pfn_to_page(pud_pfn(pud)) +#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) +#define pte_page(pte) pfn_to_page(pte_pfn(pte)) + +#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) + +static inline pmd_t *pud_pgtable(pud_t pud) +{ + return (pmd_t *)pfn_to_virt(pud_pfn(pud)); +} + +static inline pud_t *p4d_pgtable(p4d_t p4d) +{ + return (pud_t *)pfn_to_virt(p4d_pfn(p4d)); +} + +static inline unsigned long p4d_page_vaddr(p4d_t p4d) +{ + return (unsigned long)pfn_to_virt(p4d_pfn(p4d)); +} + +static inline unsigned long pud_page_vaddr(pud_t pud) +{ + return (unsigned long)pfn_to_virt(pud_pfn(pud)); +} + +static inline unsigned long pmd_page_vaddr(pmd_t pmd) +{ + return (unsigned long)pfn_to_virt(pmd_pfn(pmd)); +} + +static inline int pte_none(pte_t pte) +{ + return !pte_val(pte); +} + +static inline int pte_valid(pte_t pte) +{ + return !!(pte_val(pte) & _PAGE_VALID); +} + +static inline int pte_present(pte_t pte) +{ + return !!(pte_val(pte) & (_PAGE_VALID | _PAGE_PROTNONE)); +} + +static inline int pte_huge(pte_t pte) +{ + return !!(pte_val(pte) & _PAGE_LEAF); +} + +static inline void pte_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + pte_val(*ptep) = 0; +} + +#define pte_accessible(mm, pte) \ + (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte)) + +static inline int pmd_none(pmd_t pmd) +{ + return !pmd_val(pmd); +} + +static inline int pmd_bad(pmd_t pmd) +{ + return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE; +} + +static inline int pmd_present(pmd_t pmd) +{ + /* + * Checking for _PAGE_LEAF is needed too because + * split_huge_page will temporarily clear the valid bit (but + * the _PAGE_LEAF flag will remain set at all times while the + * _PAGE_VALID bit is clear). + */ + return !!(pmd_val(pmd) & (_PAGE_VALID | _PAGE_PROTNONE | _PAGE_LEAF)); +} + +static inline void pmd_clear(pmd_t *pmdp) +{ + pmd_val(*pmdp) = 0; +} + +static inline int pmd_dirty(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_DIRTY); +} + +#define pmd_young pmd_young +static inline int pmd_young(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_ACCESSED); +} + +#define __HAVE_ARCH_PMD_WRITE +#define pmd_write pmd_write +static inline int pmd_write(pmd_t pmd) +{ + return !(pmd_val(pmd) & _PAGE_FOW); +} + +static inline pmd_t pmd_wrprotect(pmd_t pmd) +{ + pmd_val(pmd) |= _PAGE_FOW; + return pmd; +} + +static inline pmd_t pmd_mkinvalid(pmd_t pmd) +{ + pmd_val(pmd) &= ~(_PAGE_VALID | _PAGE_PROTNONE); + return pmd; +} + +static inline pmd_t pmd_mkclean(pmd_t pmd) +{ + pmd_val(pmd) &= ~(__DIRTY_BITS); + pmd_val(pmd) |= _PAGE_FOW; + return pmd; +} + +static inline pmd_t pmd_mkold(pmd_t pmd) +{ + pmd_val(pmd) &= ~(__ACCESS_BITS); + return pmd; +} + +static inline pmd_t pmd_mkwrite_novma(pmd_t pmd) +{ + pmd_val(pmd) &= ~_PAGE_FOW; + return pmd; +} + +static inline pmd_t pmd_mkdirty(pmd_t pmd) +{ + pmd_val(pmd) |= __DIRTY_BITS; + return pmd; +} + +static inline pmd_t pmd_mkdevmap(pmd_t pmd) +{ + pmd_val(pmd) |= _PAGE_DEVMAP; + return pmd; +} + +static inline pmd_t pmd_mkyoung(pmd_t pmd) +{ + pmd_val(pmd) |= __ACCESS_BITS; + return pmd; +} + +static inline pmd_t pmd_mkhuge(pmd_t pmd) +{ + pmd_val(pmd) |= _PAGE_LEAF; + return pmd; +} + +static inline pmd_t pmd_mkcont(pmd_t pmd) +{ + pmd_val(pmd) |= _PAGE_CONT; + return pmd; +} + +static inline int pud_none(pud_t pud) +{ + return !pud_val(pud); +} + +static inline int pud_bad(pud_t pud) +{ + return (pud_val(pud) & ~_PFN_MASK) != _PAGE_TABLE; +} + +static inline int pud_present(pud_t pud) +{ + return !!(pud_val(pud) & _PAGE_VALID); +} + +static inline void pud_clear(pud_t *pudp) +{ + pud_val(*pudp) = 0; +} + +static inline pud_t pud_mkhuge(pud_t pud) +{ + pud_val(pud) |= _PAGE_LEAF; + return pud; +} + +static inline int p4d_none(p4d_t p4d) +{ + return !p4d_val(p4d); +} + +static inline int p4d_bad(p4d_t p4d) +{ + return (p4d_val(p4d) & ~_PFN_MASK) != _PAGE_TABLE; +} + +static inline int p4d_present(p4d_t p4d) +{ + return !!(p4d_val(p4d) & _PAGE_VALID); +} + +static inline void p4d_clear(p4d_t *p4dp) +{ + p4d_val(*p4dp) = 0; +} + +static inline pte_t pmd_pte(pmd_t pmd) +{ + return __pte(pmd_val(pmd)); +} + +static inline pmd_t pte_pmd(pte_t pte) +{ + return __pmd(pte_val(pte)); +} + +/* + * The following only work if pte_present() is true. + * Undefined behaviour if not.. + */ +static inline int pte_write(pte_t pte) +{ + return !(pte_val(pte) & _PAGE_FOW); +} + +static inline int pte_dirty(pte_t pte) +{ + return !!(pte_val(pte) & _PAGE_DIRTY); +} + +static inline int pte_young(pte_t pte) +{ + return !!(pte_val(pte) & _PAGE_ACCESSED); +} + +static inline int pte_special(pte_t pte) +{ + return !!(pte_val(pte) & _PAGE_SPECIAL); +} + +static inline int pte_cont(pte_t pte) +{ + return !!(pte_val(pte) & _PAGE_CONT); +} + +static inline pte_t pte_wrprotect(pte_t pte) +{ + pte_val(pte) |= _PAGE_FOW; + return pte; +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + pte_val(pte) &= ~(__DIRTY_BITS); + pte_val(pte) |= _PAGE_FOW; + return pte; +} + +static inline pte_t pte_mkold(pte_t pte) +{ + pte_val(pte) &= ~(__ACCESS_BITS); + return pte; +} + +static inline pte_t pte_mkwrite_novma(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_FOW; + return pte; +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + pte_val(pte) |= __DIRTY_BITS; + return pte; +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + pte_val(pte) |= __ACCESS_BITS; + return pte; +} + +static inline pte_t pte_mkhuge(pte_t pte) +{ + pte_val(pte) |= _PAGE_LEAF; + return pte; +} + +static inline pte_t pte_mkspecial(pte_t pte) +{ + pte_val(pte) |= _PAGE_SPECIAL; + return pte; +} + +static inline pte_t pte_mkdevmap(pte_t pte) +{ + pte_val(pte) |= _PAGE_SPECIAL; + return pte; +} + +#ifdef CONFIG_NUMA_BALANCING +/* + * See the comment in include/asm-generic/pgtable.h + */ +static inline int pte_protnone(pte_t pte) +{ + return (pte_val(pte) & (_PAGE_PROTNONE | _PAGE_VALID)) + == _PAGE_PROTNONE; +} + +static inline int pmd_protnone(pmd_t pmd) +{ + return (pmd_val(pmd) & (_PAGE_PROTNONE | _PAGE_VALID)) + == _PAGE_PROTNONE; +} +#endif + +#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP +static inline int pte_devmap(pte_t a) +{ + return (pte_val(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP; +} +#endif + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + +/* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/ +#define pmdp_establish generic_pmdp_establish + +static inline int pmd_trans_splitting(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_SPLITTING); +} + +static inline int pmd_trans_cont(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_CONT); +} + +static inline int pmd_trans_huge(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_LEAF); +} + +static inline int has_transparent_hugepage(void) +{ + return 1; +} + +#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP +static inline int pmd_devmap(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_DEVMAP); +} + +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD +static inline int pud_devmap(pud_t pud) +{ + return !!(pud_val(pud) & _PAGE_DEVMAP); +} +#else +static inline int pud_devmap(pud_t pud) +{ + return 0; +} +#endif + +static inline int pgd_devmap(pgd_t pgd) +{ + return 0; +} +#endif +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#define __HAVE_ARCH_PMDP_GET_AND_CLEAR +static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, + unsigned long addr, pmd_t *pmdp) +{ + unsigned long pmd_val = xchg(&pmdp->pmd, 0); + pmd_t pmd = (pmd_t){pmd_val}; + return pmd; +} + +#define __HAVE_ARCH_PMDP_SET_WRPROTECT +static inline void pmdp_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pmd_t *pmdp) +{ + set_bit(_PAGE_BIT_FOW, (unsigned long *)pmdp); +} + +#define mk_pmd(page, prot) pfn_pmd(page_to_pfn(page), (prot)) + +#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS +extern int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty); + +#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG +extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp); + +#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH +extern int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); + +#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH +extern void pmdp_splitting_flush(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp); + +extern pgd_t swapper_pg_dir[1024]; + +/* + * The sw64 doesn't have any external MMU info: the kernel page + * tables contain all the necessary information. + */ +#define update_mmu_cache(vma, address, ptep) do { } while (0) +#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) + +static inline void update_mmu_cache_range(struct vm_fault *vmf, + struct vm_area_struct *vma, unsigned long address, + pte_t *ptep, unsigned int nr) +{ +} + +#if defined(CONFIG_SUBARCH_C3B) + +/* + * Encode and decode a swap entry: + * + * Format of swap PTE: + * bit 0: _PAGE_VALID (must be zero) + * bit 6: _PAGE_LEAF (must be zero) + * bit 7: _PAGE_PROTNONE (must be zero) + * bits 8-15: swap type + * bits 16-63: swap offset + */ +#define __SWP_TYPE_SHIFT 8 +#define __SWP_TYPE_BITS 8 + +#elif defined(CONFIG_SUBARCH_C4) + +/* + * Encode and decode a swap entry: + * + * Format of swap PTE: + * bit 0: _PAGE_VALID (must be zero) + * bit 6: _PAGE_LEAF (must be zero) + * bits 7-11: swap type + * bits 12-58: swap offset + * bit 63: _PAGE_PROTNONE (must be zero) + */ +#define __SWP_TYPE_SHIFT 7 +#define __SWP_TYPE_BITS 5 + +#endif + +#define __SWP_OFFSET_BITS 47 +#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1) +#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) +#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1) + +#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) +#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK) +#define __swp_entry(type, offset) \ + ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) + +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) + +static inline int pte_swp_exclusive(pte_t pte) +{ + return !!(pte_val(pte) & _PAGE_SWP_EXCLUSIVE); +} + +static inline pte_t pte_swp_mkexclusive(pte_t pte) +{ + pte_val(pte) |= _PAGE_SWP_EXCLUSIVE; + return pte; +} + +static inline pte_t pte_swp_clear_exclusive(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE; + return pte; +} + +#define kern_addr_valid(addr) (1) + +#define pte_ERROR(e) \ + pr_err("%s: %d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) +#define pmd_ERROR(e) \ + pr_err("%s: %d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) +#define pud_ERROR(e) \ + pr_err("%s: %d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e)) +#define pgd_ERROR(e) \ + pr_err("%s: %d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) +extern void paging_init(void); + +/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */ +#define HAVE_ARCH_UNMAPPED_AREA + +#endif /* _ASM_SW64_PGTABLE_H */ diff --git a/arch/sw_64/include/asm/sparsemem.h b/arch/sw_64/include/asm/sparsemem.h new file mode 100644 index 000000000000..a60e757f3838 --- /dev/null +++ b/arch/sw_64/include/asm/sparsemem.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SPARSEMEM_H +#define _ASM_SW64_SPARSEMEM_H + +#include + +#define SECTION_SIZE_BITS 28 + +#endif /* _ASM_SW64_SPARSEMEM_H */ diff --git a/arch/sw_64/include/asm/tlb.h b/arch/sw_64/include/asm/tlb.h new file mode 100644 index 000000000000..08c8f4f97de1 --- /dev/null +++ b/arch/sw_64/include/asm/tlb.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_TLB_H +#define _ASM_SW64_TLB_H + +#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) + +#include + +#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte) +#define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd) +#define __pud_free_tlb(tlb, pud, address) pud_free((tlb)->mm, pud) + +#endif /* _ASM_SW64_TLB_H */ diff --git a/arch/sw_64/include/asm/tlbflush.h b/arch/sw_64/include/asm/tlbflush.h new file mode 100644 index 000000000000..73995d9663a6 --- /dev/null +++ b/arch/sw_64/include/asm/tlbflush.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_TLBFLUSH_H +#define _ASM_SW64_TLBFLUSH_H + +#include +#include +#include +#include +#include +#include +#include + +static inline void local_flush_tlb_all(void) +{ + tbiv(); +} + +static inline void local_flush_tlb_mm(struct mm_struct *mm) +{ + int cpu; + unsigned long flags; + + local_irq_save(flags); + + cpu = smp_processor_id(); + if (!asid_valid(mm, cpu)) { + cpumask_clear_cpu(cpu, mm_cpumask(mm)); + goto out; + } + + if (current->mm == mm) { + __get_new_mm_context(mm, cpu); + wrasid(cpu_asid(cpu, mm)); + } else { + mm->context.asid[cpu] = 0; + cpumask_clear_cpu(cpu, mm_cpumask(mm)); + } +out: + local_irq_restore(flags); +} + +static inline void +local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) +{ + int cpu; + struct mm_struct *mm; + + cpu = smp_processor_id(); + mm = vma->vm_mm; + + if (asid_valid(mm, cpu)) + tbisasid(cpu_asid(cpu, mm), addr); + else + cpumask_clear_cpu(cpu, mm_cpumask(mm)); +} + +/* + * It flushes the whole user tlb now. + */ +static inline void +local_flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + local_flush_tlb_mm(vma->vm_mm); +} + +/* + * There is no way to invalidate kernel pages only, so it has to + * inlvalidate all mapping. + */ +static inline void +local_flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + local_flush_tlb_all(); +} + + +#ifdef CONFIG_SMP +extern void flush_tlb_all(void); +extern void flush_tlb_mm(struct mm_struct *mm); +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); +#else +#define flush_tlb_all() local_flush_tlb_all() +#define flush_tlb_mm(mm) local_flush_tlb_mm(mm) +#define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr) +#define flush_tlb_range(vma, start, end) local_flush_tlb_range(vma, start, end) +#define flush_tlb_kernel_range(start, end) local_flush_tlb_kernel_range(start, end) + +#endif /* CONFIG_SMP */ + +#endif /* _ASM_SW64_TLBFLUSH_H */ diff --git a/arch/sw_64/include/asm/vmalloc.h b/arch/sw_64/include/asm/vmalloc.h new file mode 100644 index 000000000000..a76d1133d6c6 --- /dev/null +++ b/arch/sw_64/include/asm/vmalloc.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_VMALLOC_H +#define _ASM_SW64_VMALLOC_H + +#endif /* _ASM_SW64_VMALLOC_H */ diff --git a/arch/sw_64/mm/Makefile b/arch/sw_64/mm/Makefile new file mode 100644 index 000000000000..8b9d6e4d2ebf --- /dev/null +++ b/arch/sw_64/mm/Makefile @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the linux sw_64-specific parts of the memory manager. +# + +#ccflags-y := -Werror + +obj-y := init.o fault.o physaddr.o mmap.o extable.o + +obj-$(CONFIG_NUMA) += numa.o +ifeq ($(CONFIG_SUBARCH_C4),y) +obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage_c4.o +else +obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o +endif +obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += thp.o diff --git a/arch/sw_64/mm/extable.c b/arch/sw_64/mm/extable.c new file mode 100644 index 000000000000..d2678e12a1b1 --- /dev/null +++ b/arch/sw_64/mm/extable.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +int fixup_exception(struct pt_regs *regs, unsigned long pc) +{ + const struct exception_table_entry *fixup; + + fixup = search_exception_tables(pc); + if (fixup) { + unsigned int valreg = fixup->fixup.bits.valreg; + unsigned int errreg = fixup->fixup.bits.errreg; + + if (valreg != 31) + regs->regs[valreg] = 0; + if (errreg != 31) + regs->regs[errreg] = -EFAULT; + pc += fixup->fixup.bits.nextinsn; + regs->pc = pc; + + return 1; + } + return 0; +} diff --git a/arch/sw_64/mm/fault.c b/arch/sw_64/mm/fault.c new file mode 100644 index 000000000000..e76560a7edca --- /dev/null +++ b/arch/sw_64/mm/fault.c @@ -0,0 +1,305 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 1995 Linus Torvalds + */ + +#include +#include +#include + +#include + +__read_mostly bool segv_debug_enabled; + +#ifdef CONFIG_KPROBES +static inline int notify_page_fault(struct pt_regs *regs, unsigned long mmcsr) +{ + int ret = 0; + /* kprobe_running() needs smp_processor_id() */ + if (!user_mode(regs)) { + preempt_disable(); + if (kprobe_running() && kprobe_fault_handler(regs, mmcsr)) + ret = 1; + preempt_enable(); + } + return ret; +} +#else +static inline int notify_page_fault(struct pt_regs *regs, unsigned long mmcsr) +{ + return 0; +} +#endif + +extern void die(char *, struct pt_regs *, long); +extern void show_regs(struct pt_regs *regs); + +void show_all_vma(void) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + + MA_STATE(mas, 0, 0, 0); + + if (!mm) + return; + + mas.tree = &mm->mm_mt; + + for (int i = 0; (vma = mas_find(&mas, ULONG_MAX)) != NULL; i++) { + unsigned long start = vma->vm_start; + unsigned long end = vma->vm_end; + struct file *file = vma->vm_file; + + if (file) + pr_info("vma[%d]: [%#lx, %#lx], len = %#lx, flags = %#lx, file = %s, name = %s\n", + i, start, end, (end - start), vma->vm_flags, + file->f_path.dentry->d_name.name, current->comm); + else + pr_info("vma[%d]: [%#lx, %#lx], len = %#lx, flags = %#lx, name = %s\n", + i, start, end, (end - start), vma->vm_flags, current->comm); + } +} + +/* + * This routine handles page faults. It determines the address, + * and the problem, and then passes it off to handle_mm_fault(). + * + * mmcsr: + * 0 = translation not valid + * 1 = access violation + * 2 = fault-on-read + * 3 = fault-on-execute + * 4 = fault-on-write + * + * cause: + * -1 = instruction fetch + * 0 = load + * 1 = store + * + * Registers $9 through $15 are saved in a block just prior to `regs' and + * are saved and restored around the call to allow exception code to + * modify them. + */ + +unsigned long show_va_to_pa(struct mm_struct *mm, unsigned long addr) +{ + pgd_t *pgd = NULL; + p4d_t *p4d = NULL; + pud_t *pud = NULL; + pmd_t *pmd = NULL; + pte_t *pte = NULL; + unsigned long ret = 0UL; + + pgd = pgd_offset(mm, addr); + if (pgd_none(*pgd)) { + ret = 0; + pr_debug("addr = %#lx, pgd = %#lx\n", addr, pgd_val(*pgd)); + goto out; + } + p4d = p4d_offset(pgd, addr); + if (p4d_none(*p4d)) { + ret = 0; + pr_debug("addr = %#lx, pgd = %#lx, p4d = %#lx\n", + addr, pgd_val(*pgd), p4d_val(*p4d)); + goto out; + } + pud = pud_offset(p4d, addr); + if (pud_none(*pud)) { + ret = 0; + pr_debug("addr = %#lx, pgd = %#lx, pud = %#lx\n", + addr, pgd_val(*pgd), pud_val(*pud)); + goto out; + } + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) { + ret = 0; + pr_debug("addr = %#lx, pgd = %#lx, pud = %#lx, pmd = %#lx\n", + addr, pgd_val(*pgd), pud_val(*pud), pmd_val(*pmd)); + goto out; + + } + pte = pte_offset_map(pmd, addr); + if (pte_present(*pte)) { + ret = (unsigned long)pfn_to_virt(pte_pfn(*pte)); + pr_debug("addr = %#lx, pgd = %#lx, pud = %#lx, pmd = %#lx, pte = %#lx, ret = %#lx\n", + addr, *(unsigned long *)pgd, *(unsigned long *)pud, + *(unsigned long *)pmd, *(unsigned long *)pte, ret); + } +out: + return ret; +} + +extern int do_match(unsigned long address, unsigned long mmcsr, long cause, struct pt_regs *regs); + +asmlinkage void notrace +do_page_fault(unsigned long address, unsigned long mmcsr, + long cause, struct pt_regs *regs) +{ + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; + int si_code = SEGV_MAPERR; + vm_fault_t fault; + unsigned int flags = FAULT_FLAG_DEFAULT; + + if (notify_page_fault(regs, mmcsr)) + return; + + if (unlikely(mmcsr >= MMCSR__DA_MATCH)) { + if (do_match(address, mmcsr, cause, regs) == 1) + return; + } + + if (unlikely(mmcsr == MMCSR__ACV1)) { + if (!user_mode(regs)) + goto no_context; + else { + mmap_read_unlock(mm); + goto bad_area; + } + } + + /* + * If we're in an interrupt context, or have no user context, + * we must not take the fault. + */ + if (!mm || faulthandler_disabled()) + goto no_context; + + if (user_mode(regs)) + flags |= FAULT_FLAG_USER; + + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); + +retry: + vma = lock_mm_and_find_vma(mm, address, regs); + if (!vma) + goto bad_area_nosemaphore; + + /* + * Ok, we have a good vm_area for this memory access, so + * we can handle it. + */ + si_code = SEGV_ACCERR; + if (cause < 0) { + if (!(vma->vm_flags & VM_EXEC)) + goto bad_area; + } else if (!cause) { + /* Allow reads even for write-only mappings */ + if (!(vma->vm_flags & (VM_READ | VM_WRITE))) + goto bad_area; + } else { + if (!(vma->vm_flags & VM_WRITE)) + goto bad_area; + flags |= FAULT_FLAG_WRITE; + } + + /* + * If for any reason at all we couldn't handle the fault, + * make sure we exit gracefully rather than endlessly redo + * the fault. + */ + fault = handle_mm_fault(vma, address, flags, regs); + + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + goto no_context; + return; + } + + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return; + + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); + } + + if (fault & VM_FAULT_MAJOR) { + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, + regs, address); + current->maj_flt++; + } else { + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, + regs, address); + current->min_flt++; + } + + if (fault & VM_FAULT_RETRY) { + flags |= FAULT_FLAG_TRIED; + + /* No need to mmap_read_unlock(mm) as we would + * have already released it in __lock_page_or_retry + * in mm/filemap.c. + */ + + goto retry; + } + + mmap_read_unlock(mm); + + return; + + /* + * Something tried to access memory that isn't in our memory map. + * Fix it, but check if it's kernel or user first. + */ + bad_area: + mmap_read_unlock(mm); + + bad_area_nosemaphore: + if (user_mode(regs)) + goto do_sigsegv; + + no_context: + /* Are we prepared to handle this fault as an exception? */ + if (fixup_exception(regs, regs->pc)) + return; + + /* + * Oops. The kernel tried to access some bad page. We'll have to + * terminate things with extreme prejudice. + */ + pr_alert("Unable to handle kernel paging request at virtual address %016lx\n", + address); + die("Oops", regs, cause); + make_task_dead(SIGKILL); + + /* + * We ran out of memory, or some other thing happened to us that + * made us unable to handle the page fault gracefully. + */ + out_of_memory: + mmap_read_unlock(mm); + if (!user_mode(regs)) + goto no_context; + pagefault_out_of_memory(); + return; + + do_sigbus: + mmap_read_unlock(mm); + /* + * Send a sigbus, regardless of whether we were in kernel + * or user mode. + */ + force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *) address); + if (!user_mode(regs)) + goto no_context; + return; + + do_sigsegv: + force_sig_fault(SIGSEGV, si_code, (void __user *) address); + + if (unlikely(segv_debug_enabled)) { + pr_info("fault: want to send_segv: pid %d, cause = %#lx, mmcsr = %#lx, address = %#lx, pc %#lx\n", + current->pid, cause, mmcsr, address, regs->pc); + show_regs(regs); + show_all_vma(); + } +} diff --git a/arch/sw_64/mm/init.c b/arch/sw_64/mm/init.c new file mode 100644 index 000000000000..ca761b602ab6 --- /dev/null +++ b/arch/sw_64/mm/init.c @@ -0,0 +1,339 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 1995 Linus Torvalds + */ + +/* 2.3.x zone allocator, 1999 Andrea Arcangeli */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +struct mem_desc_t mem_desc; +#ifndef CONFIG_NUMA +struct numa_node_desc_t numa_nodes_desc[1]; +#endif /* CONFIG_NUMA */ + +/* + * empty_zero_page is a special page that is used for + * zero-initialized data and COW. + */ +unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; +EXPORT_SYMBOL(empty_zero_page); +pg_data_t *node_data[MAX_NUMNODES] __read_mostly; +EXPORT_SYMBOL(node_data); + +pgd_t swapper_pg_dir[1024] __aligned(PAGE_SIZE); +static pud_t vmalloc_pud[1024] __aligned(PAGE_SIZE); + +static phys_addr_t mem_start; +static phys_addr_t mem_size_limit; + +#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE +unsigned long memory_block_size_bytes(void) +{ + if (is_in_guest()) + return MIN_MEMORY_BLOCK_SIZE_VM_MEMHP; + else + return MIN_MEMORY_BLOCK_SIZE; +} +#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ + +static int __init setup_mem_size(char *p) +{ + char *oldp; + unsigned long start, size; + + start = 0; + oldp = p; + size = memparse(p, &p); + if (p == oldp) + return -EINVAL; + + if (*p == '@') + start = memparse(p + 1, &p); + + mem_start = start; + mem_size_limit = size; + return 0; +} +early_param("mem", setup_mem_size); + +#if defined(CONFIG_SUBARCH_C3B) +pgd_t * +pgd_alloc(struct mm_struct *mm) +{ + pgd_t *ret, *init; + + ret = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); + init = pgd_offset(&init_mm, 0UL); + if (ret) + pgd_val(ret[PTRS_PER_PGD-2]) = pgd_val(init[PTRS_PER_PGD-2]); + + return ret; +} +#elif defined(CONFIG_SUBARCH_C4) +pgd_t * +pgd_alloc(struct mm_struct *mm) +{ + pgd_t *ret; + + ret = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); + + return ret; +} +#endif + +/* Set up initial PCB, VPTB, and other such nicities. */ + +static inline void +switch_to_system_map(void) +{ + memset(swapper_pg_dir, 0, PAGE_SIZE); + update_ptbr_sys(virt_to_phys(swapper_pg_dir)); + tbiv(); +} + +void __init callback_init(void) +{ + pgd_t *pgd; + p4d_t *p4d; + + switch_to_system_map(); + + /* Allocate one PGD and one PUD. */ + pgd = pgd_offset_k(VMALLOC_START); + p4d = p4d_offset(pgd, VMALLOC_START); + p4d_populate(&init_mm, p4d, (pud_t *)vmalloc_pud); +} + +void __init zone_sizes_init(void) +{ + unsigned long max_zone_pfns[MAX_NR_ZONES]; + + memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); + +#ifdef CONFIG_ZONE_DMA32 + max_zone_pfns[ZONE_DMA32] = min(MAX_DMA32_PFN, max_low_pfn); +#endif + max_zone_pfns[ZONE_NORMAL] = max_low_pfn; + + free_area_init(max_zone_pfns); +} + +/* + * paging_init() sets up the memory map. + */ +void __init paging_init(void) +{ +} + +void __init mem_detect(void) +{ + int i; + + mem_desc.phys_base = 0; + for (i = 0; i < MAX_NUMSOCKETS; i++) { + if (socket_desc[i].is_online) + mem_desc.phys_size += socket_desc[i].socket_mem; + } + + if (mem_start >= NODE0_START) { + mem_desc.base = mem_start; + } else { + mem_desc.base = NODE0_START; + mem_size_limit -= NODE0_START - mem_start; + } + + if (mem_size_limit && mem_size_limit < mem_desc.phys_size - NODE0_START) + mem_desc.size = mem_size_limit; + else + mem_desc.size = mem_desc.phys_size - NODE0_START; +} + +void __init sw64_memblock_init(void) +{ + memblock_add(mem_desc.base, mem_desc.size); + + memblock_remove(1ULL << MAX_PHYSMEM_BITS, PHYS_ADDR_MAX); + + max_pfn = max_low_pfn = PFN_DOWN(memblock_end_of_DRAM()); + + memblock_allow_resize(); + memblock_initialized = true; + process_memmap(); + + /* Make sure kernel text is in memory range. */ + memblock_add(__pa_symbol(_text), _end - _text); + memblock_reserve(__pa_symbol(_text), _end - _text); + + /* Make sure initrd is in memory range. */ + if (sunway_boot_params->initrd_start) { + phys_addr_t base = __boot_pa(sunway_boot_params->initrd_start); + phys_addr_t size = sunway_boot_params->initrd_size; + + memblock_add(base, size); + memblock_reserve(base, size); + } + + /* end of DRAM range may have been changed */ + max_pfn = max_low_pfn = PFN_DOWN(memblock_end_of_DRAM()); +} + +#ifndef CONFIG_NUMA +void __init sw64_numa_init(void) +{ + const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES); + u64 nd_pa; + void *nd; + int tnid; + + memblock_set_node(mem_desc.base, mem_desc.size, &memblock.memory, 0); + nd_pa = memblock_phys_alloc(nd_size, SMP_CACHE_BYTES); + nd = __va(nd_pa); + + /* report and initialize */ + pr_info("NODE_DATA [mem %#018llx-%#018llx]\n", + nd_pa, nd_pa + nd_size - 1); + tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); + if (tnid != 0) + pr_info("NODE_DATA(%d) on node %d\n", 0, tnid); + + node_data[0] = nd; + memset(NODE_DATA(0), 0, sizeof(pg_data_t)); + NODE_DATA(0)->node_id = 0; + NODE_DATA(0)->node_start_pfn = mem_desc.base >> PAGE_SHIFT; + NODE_DATA(0)->node_spanned_pages = mem_desc.size >> PAGE_SHIFT; + node_set_online(0); +} +#endif /* CONFIG_NUMA */ + +void __init +mem_init(void) +{ + set_max_mapnr(max_low_pfn); + high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); +#ifdef CONFIG_SWIOTLB + swiotlb_init(true, SWIOTLB_VERBOSE); +#endif + memblock_free_all(); +} + +#ifdef CONFIG_SPARSEMEM_VMEMMAP +int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, + struct vmem_altmap *altmap) +{ + return vmemmap_populate_basepages(start, end, node, altmap); +} + +void vmemmap_free(unsigned long start, unsigned long end, + struct vmem_altmap *altmap) +{ +} +#endif + +#ifdef CONFIG_HAVE_MEMBLOCK +#ifndef MIN_MEMBLOCK_ADDR +#define MIN_MEMBLOCK_ADDR __pa(PAGE_OFFSET) +#endif +#ifndef MAX_MEMBLOCK_ADDR +#define MAX_MEMBLOCK_ADDR ((phys_addr_t)~0) +#endif +void __init early_init_dt_add_memory_arch(u64 base, u64 size) +{ + const u64 phys_offset = MIN_MEMBLOCK_ADDR; + + if (acpi_disabled) { + if (!PAGE_ALIGNED(base)) { + if (size < PAGE_SIZE - (base & ~PAGE_MASK)) { + pr_warn("Ignoring memory block 0x%llx - 0x%llx\n", + base, base + size); + return; + } + size -= PAGE_SIZE - (base & ~PAGE_MASK); + base = PAGE_ALIGN(base); + } + size &= PAGE_MASK; + + if (base > MAX_MEMBLOCK_ADDR) { + pr_warn("Ignoring memory block 0x%llx - 0x%llx\n", + base, base + size); + return; + } + + if (base + size - 1 > MAX_MEMBLOCK_ADDR) { + pr_warn("Ignoring memory range 0x%llx - 0x%llx\n", + ((u64)MAX_MEMBLOCK_ADDR) + 1, base + size); + size = MAX_MEMBLOCK_ADDR - base + 1; + } + + if (base + size < phys_offset) { + pr_warn("Ignoring memory block 0x%llx - 0x%llx\n", + base, base + size); + return; + } + + if (base < phys_offset) { + pr_warn("Ignoring memory range 0x%llx - 0x%llx\n", + base, phys_offset); + size -= phys_offset - base; + base = phys_offset; + } + memblock_add(base, size); + } else + return; +} +#endif + +#ifdef CONFIG_MEMORY_HOTPLUG +int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params) +{ + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; + int ret; + + ret = __add_pages(nid, start_pfn, nr_pages, params); + if (ret) + pr_warn("%s: Problem encountered in __add_pages() as ret=%d\n", + __func__, ret); + + return ret; +} + +void arch_remove_memory(int nid, u64 start, u64 size, + struct vmem_altmap *altmap) +{ + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; + + __remove_pages(start_pfn, nr_pages, altmap); +} +#endif + +static const pgprot_t protection_map[16] = { + [VM_NONE] = _PAGE_P(_PAGE_FOE | _PAGE_FOW | + _PAGE_FOR), + [VM_READ] = _PAGE_P(_PAGE_FOE | _PAGE_FOW), + [VM_WRITE] = _PAGE_P(_PAGE_FOE), + [VM_WRITE | VM_READ] = _PAGE_P(_PAGE_FOE), + [VM_EXEC] = _PAGE_P(_PAGE_FOW | _PAGE_FOR), + [VM_EXEC | VM_READ] = _PAGE_P(_PAGE_FOW), + [VM_EXEC | VM_WRITE] = _PAGE_P(0), + [VM_EXEC | VM_WRITE | VM_READ] = _PAGE_P(0), + [VM_SHARED] = _PAGE_S(_PAGE_FOE | _PAGE_FOW | + _PAGE_FOR), + [VM_SHARED | VM_READ] = _PAGE_S(_PAGE_FOE | _PAGE_FOW), + [VM_SHARED | VM_WRITE] = _PAGE_S(_PAGE_FOE), + [VM_SHARED | VM_WRITE | VM_READ] = _PAGE_S(_PAGE_FOE), + [VM_SHARED | VM_EXEC] = _PAGE_S(_PAGE_FOW | _PAGE_FOR), + [VM_SHARED | VM_EXEC | VM_READ] = _PAGE_S(_PAGE_FOW), + [VM_SHARED | VM_EXEC | VM_WRITE] = _PAGE_S(0), + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = _PAGE_S(0) +}; +DECLARE_VM_GET_PAGE_PROT diff --git a/arch/sw_64/mm/mmap.c b/arch/sw_64/mm/mmap.c new file mode 100644 index 000000000000..a7a189fc36d6 --- /dev/null +++ b/arch/sw_64/mm/mmap.c @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include + +#include + +unsigned long +arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + struct vm_unmapped_area_info info; + unsigned long limit; + + /* Support 32 bit heap. */ + if (current->personality & ADDR_LIMIT_32BIT) + limit = 0x80000000; + else + limit = TASK_SIZE; + + if (len > limit) + return -ENOMEM; + + if (flags & MAP_FIXED) { + if (addr + len > TASK_SIZE) + return -EINVAL; + + return addr; + } + + if (addr) { + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && + (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + + info.flags = 0; + info.length = len; + info.low_limit = mm->mmap_base; + info.high_limit = limit; + info.align_mask = 0; + info.align_offset = pgoff << PAGE_SHIFT; + + return vm_unmapped_area(&info); +} + +unsigned long arch_mmap_rnd(void) +{ + unsigned long rnd; + + /* 8MB for 32bit, 256MB for 64bit */ + if (current->personality & ADDR_LIMIT_32BIT) + rnd = get_random_long() & 0x7ffffful; + else + rnd = get_random_long() & 0xffffffful; + + return rnd << PAGE_SHIFT; +} + +/* + * This function, called very early during the creation of a new process VM + * image, sets up which VM layout function to use: + */ +void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) +{ + unsigned long random_factor = 0UL; + + if (current->flags & PF_RANDOMIZE) + random_factor = arch_mmap_rnd(); + + /* + * Fall back to the standard layout if the personality bit is set, or + * if the expected stack growth is unlimited: + */ + mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; + mm->get_unmapped_area = arch_get_unmapped_area; +} + +SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, + unsigned long, prot, unsigned long, flags, unsigned long, fd, + unsigned long, off) +{ + unsigned long ret = -EINVAL; + + if ((off + PAGE_ALIGN(len)) < off) + goto out; + if (off & ~PAGE_MASK) + goto out; + ret = ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); + out: + return ret; +} diff --git a/arch/sw_64/mm/physaddr.c b/arch/sw_64/mm/physaddr.c new file mode 100644 index 000000000000..3c6ecb8ee86a --- /dev/null +++ b/arch/sw_64/mm/physaddr.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include + +unsigned long __phys_addr(unsigned long addr) +{ + VIRTUAL_BUG_ON(addr < PAGE_OFFSET); + addr &= ~PAGE_OFFSET; + VIRTUAL_BUG_ON(!phys_addr_valid(addr)); + return addr; +} +EXPORT_SYMBOL(__phys_addr); + +bool __virt_addr_valid(unsigned long addr) +{ + if (addr < PAGE_OFFSET) + return false; + addr &= ~PAGE_OFFSET; + return pfn_valid(addr >> PAGE_SHIFT); +} +EXPORT_SYMBOL(__virt_addr_valid); + +#ifdef CONFIG_SUBARCH_C3B +#define LEGACY_BOOT_VA 0xffffffff80000000 +unsigned long __boot_phys_addr(unsigned long addr) +{ + if (addr >= LEGACY_BOOT_VA) { + addr &= ~LEGACY_BOOT_VA; + VIRTUAL_BUG_ON(addr >= KERNEL_IMAGE_SIZE); + } else { + VIRTUAL_BUG_ON(addr < PAGE_OFFSET); + addr &= ~PAGE_OFFSET; + VIRTUAL_BUG_ON(!phys_addr_valid(addr)); + } + return addr; +} +#endif -- Gitee From 30d265050f1084da7d2d07157ea6fc1895c3794e Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:14 +0800 Subject: [PATCH 0286/2138] anolis: sw64: add hugetlb support ANBZ: #4688 Add hugetlb support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/hugetlb.h | 43 +++ arch/sw_64/mm/hugetlbpage.c | 313 +++++++++++++++++++++ arch/sw_64/mm/hugetlbpage_c4.c | 452 +++++++++++++++++++++++++++++++ arch/sw_64/mm/thp.c | 55 ++++ 4 files changed, 863 insertions(+) create mode 100644 arch/sw_64/include/asm/hugetlb.h create mode 100644 arch/sw_64/mm/hugetlbpage.c create mode 100644 arch/sw_64/mm/hugetlbpage_c4.c create mode 100644 arch/sw_64/mm/thp.c diff --git a/arch/sw_64/include/asm/hugetlb.h b/arch/sw_64/include/asm/hugetlb.h new file mode 100644 index 000000000000..f4c8cbe0891a --- /dev/null +++ b/arch/sw_64/include/asm/hugetlb.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_HUGETLB_H +#define _ASM_SW64_HUGETLB_H + +#include + +#ifdef CONFIG_SUBARCH_C4 +#define __HAVE_ARCH_HUGE_PTE_CLEAR +extern void huge_pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long sz); + +#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT +extern void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned long sz); + +#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR +extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep); + +#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH +extern pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep); + +#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT +extern void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep); + +#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS +extern int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, pte_t pte, int dirty); + +#define arch_make_huge_pte arch_make_huge_pte +extern pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, + vm_flags_t flags); + +#define set_huge_swap_pte_at set_huge_swap_pte_at +extern void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned long sz); +#endif + +#include + +#endif /* _ASM_SW64_HUGETLB_H */ diff --git a/arch/sw_64/mm/hugetlbpage.c b/arch/sw_64/mm/hugetlbpage.c new file mode 100644 index 000000000000..fae1fa8bf7df --- /dev/null +++ b/arch/sw_64/mm/hugetlbpage.c @@ -0,0 +1,313 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SW64 Huge TLB Page Support for Kernel. + */ + +#include +#include +#include +#include + +#include +#include + +/* + * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal + * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry. + * Otherwise, returns 0. + */ +int pmd_huge(pmd_t pmd) +{ + return !pmd_none(pmd) && + (pmd_val(pmd) & (_PAGE_VALID | _PAGE_LEAF)) != _PAGE_VALID; +} + +int pud_huge(pud_t pud) +{ + return 0; +} + +pte_t *sw64_256m_hugepte_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr) +{ + int i; + struct page *page; + pmd_t *pmd; + pte_t *pte = NULL; + + pmd = pmd_alloc(mm, pud, addr); + if (pmd == NULL) + return NULL; + + pte = pte_alloc_map(mm, pmd, addr); + if (pte == NULL) + return NULL; + + page = virt_to_page(pte); + pmd_val(*pmd) = pmd_val(*pmd) | _PAGE_LEAF | _PAGE_CONT; + for (i = 1; i < 32; i++) + pmd_val(*(pmd+i)) = pmd_val(*pmd); + return pte; +} + +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, unsigned long sz) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pte_t *pte = NULL; + + pgd = pgd_offset(mm, addr); + p4d = p4d_alloc(mm, pgd, addr); + pud = pud_alloc(mm, p4d, addr); + if (pud) { + if (sz == PMD_SIZE) { + if (want_pmd_share(vma, addr) && pud_none(*pud)) + pte = huge_pmd_share(mm, vma, addr, pud); + else + pte = (pte_t *)pmd_alloc(mm, pud, addr); + } else if (sz == (PMD_SIZE << 5)) { + pte = sw64_256m_hugepte_alloc(mm, pud, addr); + } else { + pr_warn("Unsupported page size %lx\n", sz); + return NULL; + } + } + BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); + + return pte; +} + +pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, + unsigned long sz) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd = NULL; + pte_t *pte = NULL; + + pgd = pgd_offset(mm, addr); + if (pgd_present(*pgd)) { + p4d = p4d_offset(pgd, addr); + if (p4d_present(*p4d)) { + pud = pud_offset(p4d, addr); + if (pud_present(*pud)) { + pmd = pmd_offset(pud, addr); + if (!pmd_present(*pmd)) + return NULL; + if (pmd_val(*pmd) & _PAGE_CONT) + pte = pte_offset_map(pmd, addr); + else + pte = (pte_t *) pmd; + } + } + } + return pte; +} + +static inline int sw64_huge_pmd_bad(pmd_t pmd) +{ + return !(((pmd_val(pmd) & ~_PFN_MASK) == _PAGE_TABLE) || + ((pmd_val(pmd) & _PAGE_CONT) == _PAGE_CONT)); +} + +static inline int sw64_huge_pmd_none_or_clear_bad(pmd_t *pmd) +{ + if (pmd_none(*pmd)) + return 1; + if (unlikely(sw64_huge_pmd_bad(*pmd))) { + pmd_clear_bad(pmd); + return 1; + } + return 0; +} + +static void sw64_huge_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, + unsigned long addr) +{ + if ((((unsigned long)pmd & 0xffUL) == 0) && + ((pmd_val(*pmd) & _PAGE_CONT) == _PAGE_CONT)) { + pgtable_t token = pmd_pgtable(*pmd); + + pmd_clear(pmd); + pte_free_tlb(tlb, token, addr); + mm_dec_nr_ptes(tlb->mm); + } else { + pmd_clear(pmd); + } +} + +static inline void sw64_huge_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, + unsigned long addr, unsigned long end, + unsigned long floor, unsigned long ceiling) +{ + pmd_t *pmd; + unsigned long next; + unsigned long start; + + start = addr; + pmd = pmd_offset(pud, addr); + do { + next = pmd_addr_end(addr, end); + if (sw64_huge_pmd_none_or_clear_bad(pmd)) + continue; + sw64_huge_free_pte_range(tlb, pmd, addr); + } while (pmd++, addr = next, addr != end); + + start &= PUD_MASK; + if (start < floor) + return; + if (ceiling) { + ceiling &= PUD_MASK; + if (!ceiling) + return; + } + if (end - 1 > ceiling - 1) + return; + + pmd = pmd_offset(pud, start); + pud_clear(pud); + pmd_free_tlb(tlb, pmd, start); + mm_dec_nr_pmds(tlb->mm); +} + +static inline void sw64_huge_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d, + unsigned long addr, unsigned long end, + unsigned long floor, unsigned long ceiling) +{ + pud_t *pud; + unsigned long next; + unsigned long start; + + start = addr; + pud = pud_offset(p4d, addr); + do { + next = pud_addr_end(addr, end); + if (pud_none_or_clear_bad(pud)) + continue; + sw64_huge_free_pmd_range(tlb, pud, addr, next, floor, ceiling); + } while (pud++, addr = next, addr != end); + + start &= PGDIR_MASK; + if (start < floor) + return; + if (ceiling) { + ceiling &= PGDIR_MASK; + if (!ceiling) + return; + } + if (end - 1 > ceiling - 1) + return; + + pud = pud_offset(p4d, start); + p4d_clear(p4d); + pud_free_tlb(tlb, pud, start); + mm_dec_nr_puds(tlb->mm); +} + +#ifdef CONFIG_HUGETLB_PAGE +static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + struct vm_unmapped_area_info info; + + info.flags = 0; + info.length = len; + info.low_limit = current->mm->mmap_legacy_base; + info.high_limit = TASK_SIZE; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; + return vm_unmapped_area(&info); +} + +static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, + unsigned long addr0, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + struct vm_unmapped_area_info info; + unsigned long addr; + + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; + info.low_limit = PAGE_SIZE; + info.high_limit = current->mm->mmap_base; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; + addr = vm_unmapped_area(&info); + + /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ + if (addr & ~PAGE_MASK) { + VM_BUG_ON(addr != -ENOMEM); + info.flags = 0; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = TASK_SIZE; + addr = vm_unmapped_area(&info); + } + + return addr; +} + +unsigned long +hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + + if (len & ~huge_page_mask(h)) + return -EINVAL; + if (len > TASK_SIZE) + return -ENOMEM; + + if (flags & MAP_FIXED) { + if (prepare_hugepage_range(file, addr, len)) + return -EINVAL; + return addr; + } + + if (addr) { + addr = ALIGN(addr, huge_page_size(h)); + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && + (!vma || addr + len <= vma->vm_start)) + return addr; + } + if (mm->get_unmapped_area == arch_get_unmapped_area) + return hugetlb_get_unmapped_area_bottomup(file, addr, len, + pgoff, flags); + else + return hugetlb_get_unmapped_area_topdown(file, addr, len, + pgoff, flags); +} + +#if (defined(CONFIG_FORCE_MAX_ZONEORDER) && (CONFIG_FORCE_MAX_ZONEORDER >= 16)) +static __init int sw64_256m_hugetlb_init(void) +{ + if (!size_to_hstate(1UL << (PMD_SHIFT + 5))) + hugetlb_add_hstate(PMD_SHIFT + 5 - PAGE_SHIFT); + return 0; +} +arch_initcall(sw64_256m_hugetlb_init); +#endif +#endif /* CONFIG_HUGETLB_PAGE */ + +bool __init arch_hugetlb_valid_size(unsigned long size) +{ + switch (size) { + case PMD_SIZE: + case (PMD_SIZE<<5): + return true; + } + + return false; +} diff --git a/arch/sw_64/mm/hugetlbpage_c4.c b/arch/sw_64/mm/hugetlbpage_c4.c new file mode 100644 index 000000000000..913389cd2577 --- /dev/null +++ b/arch/sw_64/mm/hugetlbpage_c4.c @@ -0,0 +1,452 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SW_64 Huge TLB Page Support for Kernel. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* + * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal + * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry. + * Otherwise, returns 0. + */ +int pmd_huge(pmd_t pmd) +{ + return !pmd_none(pmd) && + (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_LEAF)) != _PAGE_PRESENT; +} + +int pud_huge(pud_t pud) +{ + return !pud_none(pud) && + (pud_val(pud) & (_PAGE_PRESENT|_PAGE_LEAF)) != _PAGE_PRESENT; +} +EXPORT_SYMBOL(pud_huge); + +/* + * Select all bits except the pfn + */ +static inline pgprot_t pte_pgprot(pte_t pte) +{ + unsigned long pfn = pte_pfn(pte); + + return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte)); +} + +static inline int num_contig_ptes(unsigned long size, size_t *pgsize) +{ + int contig_ptes = 0; + + *pgsize = size; + + switch (size) { + case PUD_SIZE: + case PMD_SIZE: + contig_ptes = 1; + break; + case CONT_PMD_SIZE: + *pgsize = PMD_SIZE; + contig_ptes = CONT_PMDS; + break; + default: + break; + } + + return contig_ptes; +} + +static pte_t get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, + unsigned long pgsize, unsigned long ncontig) +{ + pte_t orig_pte = huge_ptep_get(ptep); + unsigned long i; + + for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) { + pte_t pte = ptep_get_and_clear(mm, addr, ptep); + + if (pte_dirty(pte)) + orig_pte = pte_mkdirty(orig_pte); + + if (pte_young(pte)) + orig_pte = pte_mkyoung(orig_pte); + } + + return orig_pte; +} + +static pte_t get_clear_contig_flush(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long pgsize, + unsigned long ncontig) +{ + pte_t orig_pte = get_and_clear(mm, addr, ptep, pgsize, ncontig); + struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0); + unsigned long i, saddr = addr; + + for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) + pte_clear(mm, addr, ptep); + + flush_tlb_range(&vma, saddr, addr); + return orig_pte; +} + +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, unsigned long sz) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pte_t *pte = NULL; + + pgd = pgd_offset(mm, addr); + p4d = p4d_alloc(mm, pgd, addr); + pud = pud_alloc(mm, p4d, addr); + if (!pud) + return NULL; + + if (sz == PUD_SIZE) { + pte = (pte_t *)pud; + } else if (sz == PMD_SIZE) { + if (want_pmd_share(vma, addr) && pud_none(*pud)) + pte = huge_pmd_share(mm, vma, addr, pud); + else + pte = (pte_t *)pmd_alloc(mm, pud, addr); + } else if (sz == (PMD_SIZE * CONT_PMDS)) { + pte = (pte_t *)pmd_alloc(mm, pud, addr); + WARN_ON(addr & (sz - 1)); + } + + WARN_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); + return pte; +} + +pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, + unsigned long sz) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd = NULL; + + pgd = pgd_offset(mm, addr); + if (!pgd_present(*pgd)) + return NULL; + + p4d = p4d_offset(pgd, addr); + if (!p4d_present(*p4d)) + return NULL; + + pud = pud_offset(p4d, addr); + + if (sz != PUD_SIZE && pud_none(*pud)) + return NULL; + /* hugepage or swap? */ + if (pud_huge(*pud) || !pud_present(*pud)) + return (pte_t *)pud; + /* table; check the next level */ + + if (sz == CONT_PMD_SIZE) + addr &= CONT_PMD_MASK; + + pmd = pmd_offset(pud, addr); + if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) && + pmd_none(*pmd)) + return NULL; + if (pmd_huge(*pmd) || !pmd_present(*pmd)) + return (pte_t *)pmd; + + return NULL; +} + +pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags) +{ + size_t pagesize = 1UL << shift; + + if (pagesize == CONT_PMD_SIZE) { + entry = pmd_pte(pmd_mkcont(pte_pmd(entry))); + } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) { + pr_warn("%s: unrecognized huge page size 0x%lx\n", + __func__, pagesize); + } + return entry; +} + +void huge_pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long sz) +{ + int i, ncontig; + size_t pgsize; + + ncontig = num_contig_ptes(sz, &pgsize); + + for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) + pte_clear(mm, addr, ptep); +} + +void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned long sz) +{ + size_t pgsize; + int i; + int ncontig; + unsigned long pfn; + pgprot_t hugeprot; + + /* + * Code needs to be expanded to handle huge swap and migration + * entries. Needed for HUGETLB and MEMORY_FAILURE. + */ + WARN_ON(!pte_present(pte)); + + if (!pte_cont(pte)) { + set_pte_at(mm, addr, ptep, pte); + return; + } + + ncontig = num_contig_ptes(sz, &pgsize); + pfn = pte_pfn(pte); + hugeprot = pte_pgprot(pte); + + get_and_clear(mm, addr, ptep, pgsize, ncontig); + + for (i = 0; i < ncontig; i++, ptep++, addr += pgsize) + set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot)); +} + +void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned long sz) +{ + int i, ncontig; + size_t pgsize; + + ncontig = num_contig_ptes(sz, &pgsize); + + for (i = 0; i < ncontig; i++, ptep++) + set_pte(ptep, pte); +} + +void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + unsigned long pfn; + pgprot_t hugeprot; + int ncontig, i; + size_t pgsize; + pte_t pte; + + if (!pte_cont(READ_ONCE(*ptep))) { + ptep_set_wrprotect(mm, addr, ptep); + return; + } + + ncontig = CONT_PMDS; + + pte = get_and_clear(mm, addr, ptep, pgsize, ncontig); + pte = pte_wrprotect(pte); + + hugeprot = pte_pgprot(pte); + pfn = pte_pfn(pte); + + for (i = 0; i < ncontig; i++, ptep++, addr += pgsize) + set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot)); +} + +pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + int ncontig; + size_t pgsize; + pte_t orig_pte = huge_ptep_get(ptep); + + if (!pte_cont(orig_pte)) + return ptep_get_and_clear(mm, addr, ptep); + + ncontig = CONT_PMDS; + + return get_and_clear(mm, addr, ptep, pgsize, ncontig); +} + +pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + struct mm_struct *mm = vma->vm_mm; + size_t pgsize; + int ncontig; + + if (!pte_cont(READ_ONCE(*ptep))) + return ptep_clear_flush(vma, addr, ptep); + + ncontig = CONT_PMDS; + return get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig); +} + +static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig) +{ + int i; + + if (pte_write(pte) != pte_write(huge_ptep_get(ptep))) + return 1; + + for (i = 0; i < ncontig; i++) { + pte_t orig_pte = huge_ptep_get(ptep + i); + + if (pte_dirty(pte) != pte_dirty(orig_pte)) + return 1; + + if (pte_young(pte) != pte_young(orig_pte)) + return 1; + } + + return 0; +} + +int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty) +{ + int ncontig, i; + size_t pgsize = 0; + unsigned long pfn = pte_pfn(pte); + pgprot_t hugeprot; + pte_t orig_pte; + + if (!pte_cont(pte)) + return ptep_set_access_flags(vma, addr, ptep, pte, dirty); + + ncontig = CONT_PMDS; + + if (!__cont_access_flags_changed(ptep, pte, ncontig)) + return 0; + + orig_pte = get_and_clear(vma->vm_mm, addr, ptep, pgsize, ncontig); + flush_tlb_fix_spurious_fault(vma, addr, ptep); + + /* Make sure we don't lose the dirty or young state */ + if (pte_dirty(orig_pte)) + pte = pte_mkdirty(pte); + + if (pte_young(orig_pte)) + pte = pte_mkyoung(pte); + + hugeprot = pte_pgprot(pte); + for (i = 0; i < ncontig; i++, ptep++, addr += pgsize) + set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot)); + + return 1; +} + +#ifdef CONFIG_HUGETLB_PAGE +static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + struct vm_unmapped_area_info info; + + info.flags = 0; + info.length = len; + info.low_limit = current->mm->mmap_legacy_base; + info.high_limit = TASK_SIZE; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; + return vm_unmapped_area(&info); +} + +static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, + unsigned long addr0, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + struct vm_unmapped_area_info info; + unsigned long addr; + + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; + info.low_limit = PAGE_SIZE; + info.high_limit = current->mm->mmap_base; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; + addr = vm_unmapped_area(&info); + + /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ + if (addr & ~PAGE_MASK) { + VM_BUG_ON(addr != -ENOMEM); + info.flags = 0; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = TASK_SIZE; + addr = vm_unmapped_area(&info); + } + + return addr; +} + + unsigned long +hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + + if (len & ~huge_page_mask(h)) + return -EINVAL; + if (len > TASK_SIZE) + return -ENOMEM; + + if (flags & MAP_FIXED) { + if (prepare_hugepage_range(file, addr, len)) + return -EINVAL; + return addr; + } + + if (addr) { + addr = ALIGN(addr, huge_page_size(h)); + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && + (!vma || addr + len <= vma->vm_start)) + return addr; + } + if (mm->get_unmapped_area == arch_get_unmapped_area) + return hugetlb_get_unmapped_area_bottomup(file, addr, len, + pgoff, flags); + else + return hugetlb_get_unmapped_area_topdown(file, addr, len, + pgoff, flags); +} +#endif /* CONFIG_HUGETLB_PAGE */ + +static __init int setup_hugepagesz(char *opt) +{ + unsigned long ps = memparse(opt, &opt); + + switch (ps) { + case PUD_SIZE: + case PMD_SIZE * CONT_PMDS: + case PMD_SIZE: + hugetlb_add_hstate(ilog2(ps) - PAGE_SHIFT); + return 1; + } + + pr_err("hugepagesz: Unsupported page size %lu M\n", + ps >> 20); + return 0; +} +__setup("hugepagesz=", setup_hugepagesz); diff --git a/arch/sw_64/mm/thp.c b/arch/sw_64/mm/thp.c new file mode 100644 index 000000000000..833bb59f79d0 --- /dev/null +++ b/arch/sw_64/mm/thp.c @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty) +{ + int changed = !pmd_same(*pmdp, entry); + + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + + if (changed && dirty) { + *pmdp = entry; + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + } + + return changed; +} +int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp) +{ + int ret = 0; + + if (pmd_young(*pmdp)) + ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, + (unsigned long *)pmdp); + return ret; +} + +int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + int young; + + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + + young = pmdp_test_and_clear_young(vma, address, pmdp); + if (young) + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + + return young; +} +void pmdp_splitting_flush(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + int set; + + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + set = !test_and_set_bit(_PAGE_BIT_SPLITTING, (unsigned long *)pmdp); + if (set) { + /* need tlb flush only to serialize against gup-fast */ + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + } +} -- Gitee From 5fb3c0471040829ba5b0d30dcc5b0b8c069416e4 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:37 +0800 Subject: [PATCH 0287/2138] anolis: sw64: add system call support ANBZ: #4688 Add system call support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/syscall.h | 82 ++++ arch/sw_64/include/asm/uaccess.h | 311 +++++++++++++++ arch/sw_64/include/asm/unistd.h | 27 ++ arch/sw_64/include/uapi/asm/unistd.h | 12 + arch/sw_64/kernel/sys_sw64.c | 151 +++++++ arch/sw_64/kernel/syscalls/Makefile | 32 ++ arch/sw_64/kernel/syscalls/syscall.tbl | 528 +++++++++++++++++++++++++ arch/sw_64/kernel/systbls.S | 15 + 8 files changed, 1158 insertions(+) create mode 100644 arch/sw_64/include/asm/syscall.h create mode 100644 arch/sw_64/include/asm/uaccess.h create mode 100644 arch/sw_64/include/asm/unistd.h create mode 100644 arch/sw_64/include/uapi/asm/unistd.h create mode 100644 arch/sw_64/kernel/sys_sw64.c create mode 100644 arch/sw_64/kernel/syscalls/Makefile create mode 100644 arch/sw_64/kernel/syscalls/syscall.tbl create mode 100644 arch/sw_64/kernel/systbls.S diff --git a/arch/sw_64/include/asm/syscall.h b/arch/sw_64/include/asm/syscall.h new file mode 100644 index 000000000000..a821bf68be16 --- /dev/null +++ b/arch/sw_64/include/asm/syscall.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SYSCALL_H +#define _ASM_SW64_SYSCALL_H + +#include + +#ifndef __ASSEMBLY__ + +typedef long (*syscall_fn_t)(ulong, ulong, ulong, ulong, ulong, ulong); + +extern syscall_fn_t sys_call_table[]; + +static inline int syscall_get_nr(struct task_struct *task, + struct pt_regs *regs) +{ + return regs->regs[0]; +} + +static inline long +syscall_get_error(struct task_struct *task, struct pt_regs *regs) +{ + return regs->regs[19] ? -regs->regs[0] : 0; +} + +static inline long syscall_get_return_value(struct task_struct *task, + struct pt_regs *regs) +{ + return regs->regs[0]; +} + +static inline void syscall_set_return_value(struct task_struct *task, + struct pt_regs *regs, + int error, long val) +{ + if (error) { + regs->regs[0] = -error; + regs->regs[19] = 1; + } else { + regs->regs[0] = val; + regs->regs[19] = 0; + } +} + +static inline void syscall_rollback(struct task_struct *task, + struct pt_regs *regs) +{ + regs->regs[0] = regs->orig_r0; + regs->regs[19] = regs->orig_r19; +} + +static inline void syscall_get_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned long *args) +{ + *args++ = regs->regs[16]; + *args++ = regs->regs[17]; + *args++ = regs->regs[18]; + *args++ = regs->regs[19]; + *args++ = regs->regs[20]; + *args = regs->regs[21]; +} + +static inline void syscall_set_arguments(struct task_struct *task, + struct pt_regs *regs, + const unsigned long *args) +{ + regs->regs[16] = *args++; + regs->regs[17] = *args++; + regs->regs[18] = *args++; + regs->regs[19] = *args++; + regs->regs[20] = *args++; + regs->regs[21] = *args; +} + +static inline int syscall_get_arch(struct task_struct *task) +{ + return AUDIT_ARCH_SW64; +} + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_SW64_SYSCALL_H */ diff --git a/arch/sw_64/include/asm/uaccess.h b/arch/sw_64/include/asm/uaccess.h new file mode 100644 index 000000000000..f6b119f7fa78 --- /dev/null +++ b/arch/sw_64/include/asm/uaccess.h @@ -0,0 +1,311 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_UACCESS_H +#define _ASM_SW64_UACCESS_H + +#include + +/* + * The fs value determines whether argument validity checking should be + * performed or not. If get_fs() == USER_DS, checking is performed, with + * get_fs() == KERNEL_DS, checking is bypassed. + * + * Or at least it did once upon a time. Nowadays it is a mask that + * defines which bits of the address space are off limits. This is a + * wee bit faster than the above. + * + * For historical reasons, these macros are grossly misnamed. + */ + +#define KERNEL_DS ((mm_segment_t) { 0UL }) +#define USER_DS ((mm_segment_t) { -0x10000000000000UL }) + +#define get_fs() (current_thread_info()->addr_limit) +#define get_ds() (KERNEL_DS) +#define set_fs(x) (current_thread_info()->addr_limit = (x)) + +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) + +/* + * These are the main single-value transfer routines. They automatically + * use the right size if we just have the right pointer type. + * + * As the sw64 uses the same address space for kernel and user + * data, we can just do these as direct assignments. (Of course, the + * exception handling means that it's no longer "just"...) + * + * Careful to not + * (a) re-use the arguments for side effects (sizeof/typeof is ok) + * (b) require any knowledge of processes at this stage + */ +#define put_user(x, ptr) \ + __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) +#define get_user(x, ptr) \ + __get_user_check((x), (ptr), sizeof(*(ptr))) + +/* + * The "__xxx" versions do not do address space checking, useful when + * doing multiple accesses to the same area (the programmer has to do the + * checks by hand with "access_ok()") + */ +#define __put_user(x, ptr) \ + __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) +#define __get_user(x, ptr) \ + __get_user_nocheck((x), (ptr), sizeof(*(ptr))) +/* + * The "ldi %1, 2b-1b(%0)" bits are magic to get the assembler to + * encode the bits we need for resolving the exception. See the + * more extensive comments with fixup_inline_exception below for + * more information. + */ + +extern void __get_user_unknown(void); + +#define __get_user_nocheck(x, ptr, size) \ +({ \ + long __gu_err = 0; \ + unsigned long __gu_val; \ + __chk_user_ptr(ptr); \ + switch (size) { \ + case 1: \ + __get_user_8(ptr); \ + break; \ + case 2: \ + __get_user_16(ptr); \ + break; \ + case 4: \ + __get_user_32(ptr); \ + break; \ + case 8: \ + __get_user_64(ptr); \ + break; \ + default: \ + __get_user_unknown(); \ + break; \ + } \ + (x) = (__force __typeof__(*(ptr))) __gu_val; \ + __gu_err; \ +}) + +#define __get_user_check(x, ptr, size) \ +({ \ + long __gu_err = -EFAULT; \ + unsigned long __gu_val = 0; \ + const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ + if (__access_ok(__gu_addr, size)) { \ + __gu_err = 0; \ + switch (size) { \ + case 1: \ + __get_user_8(__gu_addr); \ + break; \ + case 2: \ + __get_user_16(__gu_addr); \ + break; \ + case 4: \ + __get_user_32(__gu_addr); \ + break; \ + case 8: \ + __get_user_64(__gu_addr); \ + break; \ + default: \ + __get_user_unknown(); \ + break; \ + } \ + } \ + (x) = (__force __typeof__(*(ptr))) __gu_val; \ + __gu_err; \ +}) + +struct __large_struct { unsigned long buf[100]; }; +#define __m(x) (*(struct __large_struct __user *)(x)) + +#define __get_user_64(addr) \ + __asm__("1: ldl %0,%2\n" \ + "2:\n" \ + ".section __ex_table,\"a\"\n" \ + " .long 1b - .\n" \ + " ldi %0, 2b-1b(%1)\n" \ + ".previous" \ + : "=r"(__gu_val), "=r"(__gu_err) \ + : "m"(__m(addr)), "1"(__gu_err)) + +#define __get_user_32(addr) \ + __asm__("1: ldw %0,%2\n" \ + "2:\n" \ + ".section __ex_table,\"a\"\n" \ + " .long 1b - .\n" \ + " ldi %0, 2b-1b(%1)\n" \ + ".previous" \ + : "=r"(__gu_val), "=r"(__gu_err) \ + : "m"(__m(addr)), "1"(__gu_err)) + +#define __get_user_16(addr) \ + __asm__("1: ldhu %0,%2\n" \ + "2:\n" \ + ".section __ex_table,\"a\"\n" \ + " .long 1b - .\n" \ + " ldi %0, 2b-1b(%1)\n" \ + ".previous" \ + : "=r"(__gu_val), "=r"(__gu_err) \ + : "m"(__m(addr)), "1"(__gu_err)) + +#define __get_user_8(addr) \ + __asm__("1: ldbu %0,%2\n" \ + "2:\n" \ + ".section __ex_table,\"a\"\n" \ + " .long 1b - .\n" \ + " ldi %0, 2b-1b(%1)\n" \ + ".previous" \ + : "=r"(__gu_val), "=r"(__gu_err) \ + : "m"(__m(addr)), "1"(__gu_err)) + +extern void __put_user_unknown(void); + +#define __put_user_nocheck(x, ptr, size) \ +({ \ + long __pu_err = 0; \ + __chk_user_ptr(ptr); \ + switch (size) { \ + case 1: \ + __put_user_8(x, ptr); \ + break; \ + case 2: \ + __put_user_16(x, ptr); \ + break; \ + case 4: \ + __put_user_32(x, ptr); \ + break; \ + case 8: \ + __put_user_64(x, ptr); \ + break; \ + default: \ + __put_user_unknown(); \ + break; \ + } \ + __pu_err; \ +}) + +#define __put_user_check(x, ptr, size) \ +({ \ + long __pu_err = -EFAULT; \ + __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ + if (__access_ok(__pu_addr, size)) { \ + __pu_err = 0; \ + switch (size) { \ + case 1: \ + __put_user_8(x, __pu_addr); \ + break; \ + case 2: \ + __put_user_16(x, __pu_addr); \ + break; \ + case 4: \ + __put_user_32(x, __pu_addr); \ + break; \ + case 8: \ + __put_user_64(x, __pu_addr); \ + break; \ + default: \ + __put_user_unknown(); \ + break; \ + } \ + } \ + __pu_err; \ +}) + +/* + * The "__put_user_xx()" macros tell gcc they read from memory + * instead of writing: this is because they do not write to + * any memory gcc knows about, so there are no aliasing issues + */ +#define __put_user_64(x, addr) \ +__asm__ __volatile__("1: stl %r2, %1\n" \ + "2:\n" \ + ".section __ex_table, \"a\"\n" \ + " .long 1b - .\n" \ + " ldi $31, 2b-1b(%0)\n" \ + ".previous" \ + : "=r"(__pu_err) \ + : "m" (__m(addr)), "rJ" (x), "0"(__pu_err)) + +#define __put_user_32(x, addr) \ +__asm__ __volatile__("1: stw %r2, %1\n" \ + "2:\n" \ + ".section __ex_table, \"a\"\n" \ + " .long 1b - .\n" \ + " ldi $31, 2b-1b(%0)\n" \ + ".previous" \ + : "=r"(__pu_err) \ + : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) + +#define __put_user_16(x, addr) \ +__asm__ __volatile__("1: sth %r2, %1\n" \ + "2:\n" \ + ".section __ex_table, \"a\"\n" \ + " .long 1b - .\n" \ + " ldi $31, 2b-1b(%0)\n" \ + ".previous" \ + : "=r"(__pu_err) \ + : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) + +#define __put_user_8(x, addr) \ +__asm__ __volatile__("1: stb %r2, %1\n" \ + "2:\n" \ + ".section __ex_table, \"a\"\n" \ + " .long 1b - .\n" \ + " ldi $31, 2b-1b(%0)\n" \ + ".previous" \ + : "=r"(__pu_err) \ + : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) + +/* + * Complex access routines + */ + +extern long __copy_user(void *to, const void *from, long len); + +static inline unsigned long +raw_copy_from_user(void *to, const void __user *from, unsigned long len) +{ + return __copy_user(to, (__force const void *)from, len); +} + +static inline unsigned long +raw_copy_to_user(void __user *to, const void *from, unsigned long len) +{ + return __copy_user((__force void *)to, from, len); +} +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER + +extern long __clear_user(void __user *to, long len); + +static inline long +clear_user(void __user *to, long len) +{ + if (__access_ok(to, len)) + len = __clear_user(to, len); + return len; +} + +#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE) + +extern long strncpy_from_user(char *dest, const char __user *src, long count); +extern __must_check long strlen_user(const char __user *str); +extern __must_check long strnlen_user(const char __user *str, long n); + +#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE +struct page; +void memcpy_page_flushcache(char *to, struct page *page, size_t offset, + size_t len); +extern unsigned long __must_check __copy_user_flushcache(void *to, + const void __user *from, unsigned long n); + +static inline int +__copy_from_user_flushcache(void *dst, const void __user *src, unsigned long size) +{ + kasan_check_write(dst, size); + return __copy_user_flushcache(dst, src, size); +} +#endif + +#include +#endif /* _ASM_SW64_UACCESS_H */ diff --git a/arch/sw_64/include/asm/unistd.h b/arch/sw_64/include/asm/unistd.h new file mode 100644 index 000000000000..6d1b8d1e2011 --- /dev/null +++ b/arch/sw_64/include/asm/unistd.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_UNISTD_H +#define _ASM_SW64_UNISTD_H + +#include + +#define NR_SYSCALLS __NR_syscalls +#define NR_syscalls NR_SYSCALLS + +#define __ARCH_WANT_NEW_STAT +#define __ARCH_WANT_OLD_READDIR +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYS_GETHOSTNAME +#define __ARCH_WANT_SYS_FADVISE64 +#define __ARCH_WANT_SYS_GETPGRP +#define __ARCH_WANT_SYS_OLD_GETRLIMIT +#define __ARCH_WANT_SYS_OLDUMOUNT +#define __ARCH_WANT_SYS_SIGPENDING +#define __ARCH_WANT_SYS_UTIME +#define __ARCH_WANT_SYS_FORK +#define __ARCH_WANT_SYS_VFORK +#define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_SYS_SOCKETCALL +#define __ARCH_WANT_SYS_SIGPROCMASK +#define __ARCH_WANT_SYS_CLONE3 + +#endif /* _ASM_SW64_UNISTD_H */ diff --git a/arch/sw_64/include/uapi/asm/unistd.h b/arch/sw_64/include/uapi/asm/unistd.h new file mode 100644 index 000000000000..be844b2be9d5 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/unistd.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_UNISTD_H +#define _UAPI_ASM_SW64_UNISTD_H + +/* + * These are traditionally the names uses for generic system calls + */ +#define __NR_umount __NR_umount2 + +#include + +#endif /* _UAPI_ASM_SW64_UNISTD_H */ diff --git a/arch/sw_64/kernel/sys_sw64.c b/arch/sw_64/kernel/sys_sw64.c new file mode 100644 index 000000000000..d0198aef554d --- /dev/null +++ b/arch/sw_64/kernel/sys_sw64.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +SYSCALL_DEFINE5(getsysinfo, unsigned long, op, void __user *, buffer, + unsigned long, nbytes, int __user *, start, void __user *, arg) +{ + unsigned long w; + + switch (op) { + case GSI_IEEE_FP_CONTROL: + /* Return current software fp control & status bits. */ + /* Note that DU doesn't verify available space here. */ + + w = current_thread_info()->ieee_state & IEEE_SW_MASK; + w = swcr_update_status(w, rdfpcr()); + if (put_user(w, (unsigned long __user *) buffer)) + return -EFAULT; + return 0; + default: + break; + } + + return -EOPNOTSUPP; +} + +SYSCALL_DEFINE5(setsysinfo, unsigned long, op, void __user *, buffer, + unsigned long, nbytes, int __user *, start, void __user *, arg) +{ + switch (op) { + case SSI_IEEE_FP_CONTROL: { + unsigned long swcr, fpcr; + unsigned int *state; + + /* + * Sw_64 Architecture Handbook 4.7.7.3: + * To be fully IEEE compiant, we must track the current IEEE + * exception state in software, because spurious bits can be + * set in the trap shadow of a software-complete insn. + */ + + if (get_user(swcr, (unsigned long __user *)buffer)) + return -EFAULT; + state = ¤t_thread_info()->ieee_state; + + /* Update softare trap enable bits. */ + *state = (*state & ~IEEE_SW_MASK) | (swcr & IEEE_SW_MASK); + + /* Update the real fpcr. */ + fpcr = rdfpcr() & FPCR_DYN_MASK; + fpcr |= ieee_swcr_to_fpcr(swcr); + wrfpcr(fpcr); + + return 0; + } + + case SSI_IEEE_RAISE_EXCEPTION: { + unsigned long exc, swcr, fpcr, fex; + unsigned int *state; + + if (get_user(exc, (unsigned long __user *)buffer)) + return -EFAULT; + state = ¤t_thread_info()->ieee_state; + exc &= IEEE_STATUS_MASK; + + /* Update softare trap enable bits. */ + swcr = (*state & IEEE_SW_MASK) | exc; + *state |= exc; + + /* Update the real fpcr. */ + fpcr = rdfpcr(); + fpcr |= ieee_swcr_to_fpcr(swcr); + wrfpcr(fpcr); + + /* If any exceptions set by this call, and are unmasked, + * send a signal. Old exceptions are not signaled. + */ + fex = (exc >> IEEE_STATUS_TO_EXCSUM_SHIFT) & swcr; + if (fex) { + int si_code = FPE_FLTUNK; + + if (fex & IEEE_TRAP_ENABLE_DNO) + si_code = FPE_FLTUND; + if (fex & IEEE_TRAP_ENABLE_INE) + si_code = FPE_FLTRES; + if (fex & IEEE_TRAP_ENABLE_UNF) + si_code = FPE_FLTUND; + if (fex & IEEE_TRAP_ENABLE_OVF) + si_code = FPE_FLTOVF; + if (fex & IEEE_TRAP_ENABLE_DZE) + si_code = FPE_FLTDIV; + if (fex & IEEE_TRAP_ENABLE_INV) + si_code = FPE_FLTINV; + + send_sig_fault(SIGFPE, si_code, (void __user *)NULL, current); + } + return 0; + } + default: + break; + } + + return -EOPNOTSUPP; +} + +SYSCALL_DEFINE2(odd_getpriority, int, which, int, who) +{ + int prio = sys_getpriority(which, who); + + if (prio >= 0) { + /* Return value is the unbiased priority, i.e. 20 - prio. + * This does result in negative return values, so signal + * no error. + */ + force_successful_syscall_return(); + prio = 20 - prio; + } + return prio; +} + +SYSCALL_DEFINE0(getxuid) +{ + current_pt_regs()->regs[20] = sys_geteuid(); + return sys_getuid(); +} + +SYSCALL_DEFINE0(getxgid) +{ + current_pt_regs()->regs[20] = sys_getegid(); + return sys_getgid(); +} + +SYSCALL_DEFINE0(getxpid) +{ + current_pt_regs()->regs[20] = sys_getppid(); + return sys_getpid(); +} + +SYSCALL_DEFINE0(sw64_pipe) +{ + int fd[2]; + int res = do_pipe_flags(fd, 0); + + if (!res) { + /* The return values are in $0 and $20. */ + current_pt_regs()->regs[20] = fd[1]; + res = fd[0]; + } + return res; +} diff --git a/arch/sw_64/kernel/syscalls/Makefile b/arch/sw_64/kernel/syscalls/Makefile new file mode 100644 index 000000000000..cdfe761d7282 --- /dev/null +++ b/arch/sw_64/kernel/syscalls/Makefile @@ -0,0 +1,32 @@ +# SPDX-License-Identifier: GPL-2.0 +kapi := arch/$(SRCARCH)/include/generated/asm +uapi := arch/$(SRCARCH)/include/generated/uapi/asm + +$(shell mkdir -p $(uapi) $(kapi)) + +syscall := $(src)/syscall.tbl +syshdr := $(srctree)/scripts/syscallhdr.sh +systbl := $(srctree)/scripts/syscalltbl.sh + +quiet_cmd_syshdr = SYSHDR $@ + cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr $< $@ + +quiet_cmd_systbl = SYSTBL $@ + cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@ + +$(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE + $(call if_changed,syshdr) + +$(kapi)/syscall_table.h: $(syscall) $(systbl) FORCE + $(call if_changed,systbl) + +uapisyshdr-y += unistd_64.h +kapisyshdr-y += syscall_table.h + +uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) +kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) +targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y)) + +PHONY += all +all: $(uapisyshdr-y) $(kapisyshdr-y) + @: diff --git a/arch/sw_64/kernel/syscalls/syscall.tbl b/arch/sw_64/kernel/syscalls/syscall.tbl new file mode 100644 index 000000000000..fdf9e4cb03eb --- /dev/null +++ b/arch/sw_64/kernel/syscalls/syscall.tbl @@ -0,0 +1,528 @@ +# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# +# system call numbers and entry vectors for sw64 +# +# The format is: +# +# +# The is always "common" for this file +# +#0 is unused +1 common exit sys_exit +2 common fork sys_fork +3 common read sys_read +4 common write sys_write +#5 is unused +6 common close sys_close +#7 is unused +#8 is unused +9 common link sys_link +10 common unlink sys_unlink +#11 is unused +12 common chdir sys_chdir +13 common fchdir sys_fchdir +14 common mknod sys_mknod +15 common chmod sys_chmod +16 common chown sys_chown +17 common brk sys_brk +#18 is unused +19 common lseek sys_lseek +20 common getxpid sys_getxpid +#21 is unused +22 common umount2 sys_umount +23 common setuid sys_setuid +24 common getxuid sys_getxuid +#25 is unused +26 common ptrace sys_ptrace +#27 is unused +#28 is unused +#29 is unused +#30 is unused +#31 is unused +#32 is unused +33 common access sys_access +#34 is unused +#35 is unused +36 common sync sys_sync +37 common kill sys_kill +#38 is unused +39 common setpgid sys_setpgid +#40 is unused +41 common dup sys_dup +42 common pipe sys_sw64_pipe +#43 is unused +#44 is unused +45 common open sys_open +#46 is unused +47 common getxgid sys_getxgid +48 common odd_sigprocmask sys_odd_sigprocmask +#49 is unused +#50 is unused +51 common acct sys_acct +52 common sigpending sys_sigpending +#53 is unused +54 common ioctl sys_ioctl +#55 is unused +#56 is unused +57 common symlink sys_symlink +58 common readlink sys_readlink +59 common execve sys_execve +60 common umask sys_umask +61 common chroot sys_chroot +#62 is unused +63 common getpgrp sys_getpgrp +#64 is unused +#65 is unused +66 common vfork sys_vfork +67 common stat sys_newstat +68 common lstat sys_newlstat +#69 is unused +#70 is unused +71 common mmap sys_mmap +#72 is unused +73 common munmap sys_munmap +74 common mprotect sys_mprotect +75 common madvise sys_madvise +76 common vhangup sys_vhangup +#77 is unused +#78 is unused +79 common getgroups sys_getgroups +80 common setgroups sys_setgroups +#81 is unused +82 common setpgrp sys_setpgid +#83 is unused +#84 is unused +#85 is unused +#86 is unused +87 common gethostname sys_gethostname +88 common sethostname sys_sethostname +#89 is unused +90 common dup2 sys_dup2 +91 common fstat sys_newfstat +92 common fcntl sys_fcntl +#93 is unused +94 common poll sys_poll +95 common fsync sys_fsync +96 common setpriority sys_setpriority +97 common socket sys_socket +98 common connect sys_connect +99 common accept sys_accept +100 common odd_getpriority sys_odd_getpriority +101 common send sys_send +102 common recv sys_recv +103 common sigreturn sys_sigreturn +104 common bind sys_bind +105 common setsockopt sys_setsockopt +106 common listen sys_listen +#107 is unused +#108 is unused +#109 is unused +#110 is unused +111 common sigsuspend sys_sigsuspend +#112 is unused +113 common recvmsg sys_recvmsg +114 common sendmsg sys_sendmsg +#115 is unused +#116 is unused +#117 is unused +118 common getsockopt sys_getsockopt +119 common socketcall sys_socketcall +120 common readv sys_readv +121 common writev sys_writev +#122 is unused +123 common fchown sys_fchown +124 common fchmod sys_fchmod +125 common recvfrom sys_recvfrom +126 common setreuid sys_setreuid +127 common setregid sys_setregid +128 common rename sys_rename +129 common truncate sys_truncate +130 common ftruncate sys_ftruncate +131 common flock sys_flock +132 common setgid sys_setgid +133 common sendto sys_sendto +134 common shutdown sys_shutdown +135 common socketpair sys_socketpair +136 common mkdir sys_mkdir +137 common rmdir sys_rmdir +#138 is unused +#139 is unused +#140 is unused +141 common getpeername sys_getpeername +#142 is unused +#143 is unused +144 common getrlimit sys_getrlimit +145 common setrlimit sys_setrlimit +#146 is unused +147 common setsid sys_setsid +148 common quotactl sys_quotactl +#149 is unused +150 common getsockname sys_getsockname +#151 is unused +#152 is unused +#153 is unused +#154 is unused +#155 is unused +156 common sigaction sys_odd_sigaction +#157 is unused +#158 is unused +#159 is unused +#160 is unused +#161 is unused +#162 is unused +#163 is unused +#164 is unused +#165 is unused +166 common setdomainname sys_setdomainname +#167 is unused +#168 is unused +#169 is unused +170 common bpf sys_bpf +171 common userfaultfd sys_userfaultfd +172 common membarrier sys_membarrier +173 common mlock2 sys_mlock2 +174 common getpid sys_getpid +175 common getppid sys_getppid +176 common getuid sys_getuid +177 common geteuid sys_geteuid +178 common getgid sys_getgid +179 common getegid sys_getegid +180 common epoll_pwait2 sys_epoll_pwait2 +181 common mount_setattr sys_mount_setattr +182 common quotactl_fd sys_quotactl_fd +183 common landlock_create_ruleset sys_landlock_create_ruleset +184 common landlock_add_rule sys_landlock_add_rule +185 common landlock_restrict_self sys_landlock_restrict_self +# 186 reserved for memfd_secret +187 common process_mrelease sys_process_mrelease +188 common futex_waitv sys_futex_waitv +189 common set_mempolicy_home_node sys_ni_syscall +190 common cachestat sys_cachestat +191 common fchmodat2 sys_fchmodat2 +#192 is unused +#193 is unused +#194 is unused +#195 is unused +#196 is unused +#197 is unused +#198 is unused +#199 is unused +200 common msgctl sys_old_msgctl +201 common msgget sys_msgget +202 common msgrcv sys_msgrcv +203 common msgsnd sys_msgsnd +204 common semctl sys_old_semctl +205 common semget sys_semget +206 common semop sys_semop +#207 is unused +208 common lchown sys_lchown +209 common shmat sys_shmat +210 common shmctl sys_old_shmctl +211 common shmdt sys_shmdt +212 common shmget sys_shmget +#213 is unused +#214 is unused +#215 is unused +#216 is unused +217 common msync sys_msync +#218 is unused +#219 is unused +#220 is unused +#221 is unused +#222 is unused +#223 is unused +#224 is unused +#225 is unused +#226 is unused +#227 is unused +#228 is unused +229 common statfs64 sys_statfs64 +230 common fstatfs64 sys_fstatfs64 +#231 is unused +#232 is unused +233 common getpgid sys_getpgid +234 common getsid sys_getsid +235 common sigaltstack sys_sigaltstack +#236 is unused +#237 is unused +#238 is unused +#239 is unused +#240 is unused +#241 is unused +#242 is unused +#243 is unused +#244 is unused +#245 is unused +#246 is unused +#247 is unused +#248 is unused +#249 is unused +#250 is unused +#251 is unused +#252 is unused +#253 is unused +254 common sysfs sys_sysfs +#255 is unused +256 common getsysinfo sys_getsysinfo +257 common setsysinfo sys_setsysinfo +#258 is unused +#259 is unused +#260 is unused +#261 is unused +#262 is unused +#263 is unused +#264 is unused +#265 is unused +#266 is unused +#267 is unused +#268 is unused +#269 is unused +#270 is unused +271 common pidfd_send_signal sys_pidfd_send_signal +272 common io_uring_setup sys_io_uring_setup +273 common io_uring_enter sys_io_uring_enter +274 common io_uring_register sys_io_uring_register +275 common open_tree sys_open_tree +276 common move_mount sys_move_mount +277 common fsopen sys_fsopen +278 common fsconfig sys_fsconfig +279 common fsmount sys_fsmount +280 common fspick sys_fspick +281 common pidfd_open sys_pidfd_open +282 common clone3 sys_clone3 +283 common close_range sys_close_range +284 common openat2 sys_openat2 +285 common pidfd_getfd sys_pidfd_getfd +286 common faccessat2 sys_faccessat2 +287 common process_madvise sys_process_madvise +288 common pkey_mprotect sys_pkey_mprotect +289 common pkey_alloc sys_pkey_alloc +290 common pkey_free sys_pkey_free +#291 is unused +#292 is unused +#293 is unused +#294 is unused +#295 is unused +#296 is unused +#297 is unused +298 common getpriority sys_getpriority +299 common sigprocmask sys_sigprocmask +300 common bdflush sys_ni_syscall +#301 is unused +302 common mount sys_mount +#303 is unused +304 common swapoff sys_swapoff +305 common getdents sys_getdents +306 common create_module sys_ni_syscall +307 common init_module sys_init_module +308 common delete_module sys_delete_module +309 common get_kernel_syms sys_ni_syscall +310 common syslog sys_syslog +311 common reboot sys_reboot +312 common clone sys_clone +313 common uselib sys_uselib +314 common mlock sys_mlock +315 common munlock sys_munlock +316 common mlockall sys_mlockall +317 common munlockall sys_munlockall +318 common sysinfo sys_sysinfo +#319 is unused +#320 is unused +321 common oldumount sys_oldumount +322 common swapon sys_swapon +323 common times sys_times +324 common personality sys_personality +325 common setfsuid sys_setfsuid +326 common setfsgid sys_setfsgid +327 common ustat sys_ustat +328 common statfs sys_statfs +329 common fstatfs sys_fstatfs +330 common sched_setparam sys_sched_setparam +331 common sched_getparam sys_sched_getparam +332 common sched_setscheduler sys_sched_setscheduler +333 common sched_getscheduler sys_sched_getscheduler +334 common sched_yield sys_sched_yield +335 common sched_get_priority_max sys_sched_get_priority_max +336 common sched_get_priority_min sys_sched_get_priority_min +337 common sched_rr_get_interval sys_sched_rr_get_interval +338 common afs_syscall sys_ni_syscall +339 common uname sys_newuname +340 common nanosleep sys_nanosleep +341 common mremap sys_mremap +342 common nfsservctl sys_ni_syscall +343 common setresuid sys_setresuid +344 common getresuid sys_getresuid +345 common pciconfig_read sys_pciconfig_read +346 common pciconfig_write sys_pciconfig_write +347 common query_module sys_ni_syscall +348 common prctl sys_prctl +349 common pread64 sys_pread64 +350 common pwrite64 sys_pwrite64 +351 common rt_sigreturn sys_rt_sigreturn +352 common rt_sigaction sys_rt_sigaction +353 common rt_sigprocmask sys_rt_sigprocmask +354 common rt_sigpending sys_rt_sigpending +355 common rt_sigtimedwait sys_rt_sigtimedwait +356 common rt_sigqueueinfo sys_rt_sigqueueinfo +357 common rt_sigsuspend sys_rt_sigsuspend +358 common select sys_select +359 common gettimeofday sys_gettimeofday +360 common settimeofday sys_settimeofday +361 common getitimer sys_getitimer +362 common setitimer sys_setitimer +363 common utimes sys_utimes +364 common getrusage sys_getrusage +365 common wait4 sys_wait4 +366 common adjtimex sys_adjtimex +367 common getcwd sys_getcwd +368 common capget sys_capget +369 common capset sys_capset +370 common sendfile sys_sendfile64 +371 common setresgid sys_setresgid +372 common getresgid sys_getresgid +373 common dipc sys_ni_syscall +374 common pivot_root sys_pivot_root +375 common mincore sys_mincore +376 common pciconfig_iobase sys_pciconfig_iobase +377 common getdents64 sys_getdents64 +378 common gettid sys_gettid +379 common readahead sys_readahead +#380 is unused +381 common tkill sys_tkill +382 common setxattr sys_setxattr +383 common lsetxattr sys_lsetxattr +384 common fsetxattr sys_fsetxattr +385 common getxattr sys_getxattr +386 common lgetxattr sys_lgetxattr +387 common fgetxattr sys_fgetxattr +388 common listxattr sys_listxattr +389 common llistxattr sys_llistxattr +390 common flistxattr sys_flistxattr +391 common removexattr sys_removexattr +392 common lremovexattr sys_lremovexattr +393 common fremovexattr sys_fremovexattr +394 common futex sys_futex +395 common sched_setaffinity sys_sched_setaffinity +396 common sched_getaffinity sys_sched_getaffinity +397 common tuxcall sys_ni_syscall +398 common io_setup sys_io_setup +399 common io_destroy sys_io_destroy +400 common io_getevents sys_io_getevents +401 common io_submit sys_io_submit +402 common io_cancel sys_io_cancel +403 common io_pgetevents sys_io_pgetevents +404 common rseq sys_rseq +405 common exit_group sys_exit_group +406 common lookup_dcookie sys_lookup_dcookie +407 common epoll_create sys_epoll_create +408 common epoll_ctl sys_epoll_ctl +409 common epoll_wait sys_epoll_wait +410 common remap_file_pages sys_remap_file_pages +411 common set_tid_address sys_set_tid_address +412 common restart_syscall sys_restart_syscall +413 common fadvise64 sys_fadvise64 +414 common timer_create sys_timer_create +415 common timer_settime sys_timer_settime +416 common timer_gettime sys_timer_gettime +417 common timer_getoverrun sys_timer_getoverrun +418 common timer_delete sys_timer_delete +419 common clock_settime sys_clock_settime +420 common clock_gettime sys_clock_gettime +421 common clock_getres sys_clock_getres +422 common clock_nanosleep sys_clock_nanosleep +423 common semtimedop sys_semtimedop +424 common tgkill sys_tgkill +425 common stat64 sys_stat64 +426 common lstat64 sys_lstat64 +427 common fstat64 sys_fstat64 +428 common vserver sys_ni_syscall +429 common mbind sys_mbind +430 common get_mempolicy sys_get_mempolicy +431 common set_mempolicy sys_set_mempolicy +432 common mq_open sys_mq_open +433 common mq_unlink sys_mq_unlink +434 common mq_timedsend sys_mq_timedsend +435 common mq_timedreceive sys_mq_timedreceive +436 common mq_notify sys_mq_notify +437 common mq_getsetattr sys_mq_getsetattr +438 common waitid sys_waitid +439 common add_key sys_add_key +440 common request_key sys_request_key +441 common keyctl sys_keyctl +442 common ioprio_set sys_ioprio_set +443 common ioprio_get sys_ioprio_get +444 common inotify_init sys_inotify_init +445 common inotify_add_watch sys_inotify_add_watch +446 common inotify_rm_watch sys_inotify_rm_watch +447 common fdatasync sys_fdatasync +448 common kexec_load sys_kexec_load +449 common migrate_pages sys_migrate_pages +450 common openat sys_openat +451 common mkdirat sys_mkdirat +452 common mknodat sys_mknodat +453 common fchownat sys_fchownat +454 common futimesat sys_futimesat +455 common fstatat64 sys_fstatat64 +456 common unlinkat sys_unlinkat +457 common renameat sys_renameat +458 common linkat sys_linkat +459 common symlinkat sys_symlinkat +460 common readlinkat sys_readlinkat +461 common fchmodat sys_fchmodat +462 common faccessat sys_faccessat +463 common pselect6 sys_pselect6 +464 common ppoll sys_ppoll +465 common unshare sys_unshare +466 common set_robust_list sys_set_robust_list +467 common get_robust_list sys_get_robust_list +468 common splice sys_splice +469 common sync_file_range sys_sync_file_range +470 common tee sys_tee +471 common vmsplice sys_vmsplice +472 common move_pages sys_move_pages +473 common getcpu sys_getcpu +474 common epoll_pwait sys_epoll_pwait +475 common utimensat sys_utimensat +476 common signalfd sys_signalfd +477 common timerfd sys_ni_syscall +478 common eventfd sys_eventfd +479 common recvmmsg sys_recvmmsg +480 common fallocate sys_fallocate +481 common timerfd_create sys_timerfd_create +482 common timerfd_settime sys_timerfd_settime +483 common timerfd_gettime sys_timerfd_gettime +484 common signalfd4 sys_signalfd4 +485 common eventfd2 sys_eventfd2 +486 common epoll_create1 sys_epoll_create1 +487 common dup3 sys_dup3 +488 common pipe2 sys_pipe2 +489 common inotify_init1 sys_inotify_init1 +490 common preadv sys_preadv +491 common pwritev sys_pwritev +492 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo +493 common perf_event_open sys_perf_event_open +494 common fanotify_init sys_fanotify_init +495 common fanotify_mark sys_fanotify_mark +496 common prlimit64 sys_prlimit64 +497 common name_to_handle_at sys_name_to_handle_at +498 common open_by_handle_at sys_open_by_handle_at +499 common clock_adjtime sys_clock_adjtime +500 common syncfs sys_syncfs +501 common setns sys_setns +502 common accept4 sys_accept4 +503 common sendmmsg sys_sendmmsg +504 common process_vm_readv sys_process_vm_readv +505 common process_vm_writev sys_process_vm_writev +506 common kcmp sys_kcmp +507 common finit_module sys_finit_module +508 common sched_setattr sys_sched_setattr +509 common sched_getattr sys_sched_getattr +510 common renameat2 sys_renameat2 +511 common getrandom sys_getrandom +512 common memfd_create sys_memfd_create +513 common execveat sys_execveat +514 common seccomp sys_seccomp +515 common copy_file_range sys_copy_file_range +516 common preadv2 sys_preadv2 +517 common pwritev2 sys_pwritev2 +518 common statx sys_statx diff --git a/arch/sw_64/kernel/systbls.S b/arch/sw_64/kernel/systbls.S new file mode 100644 index 000000000000..010ca3f8e016 --- /dev/null +++ b/arch/sw_64/kernel/systbls.S @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arch/sw_64/kernel/systbls.S + * + * The system call table. + */ + +#include + +#define __SYSCALL(nr, entry) .quad entry + .data + .align 3 + .globl sys_call_table +sys_call_table: +#include -- Gitee From 3bdea373963b57ec0358a04cc0405f20191f2b37 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:34 +0800 Subject: [PATCH 0288/2138] anolis: sw64: add signal handling support ANBZ: #4688 Add ucontext/sigcontext definition and signal handling support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/signal.h | 28 ++ arch/sw_64/include/uapi/asm/sigcontext.h | 34 ++ arch/sw_64/include/uapi/asm/siginfo.h | 10 + arch/sw_64/include/uapi/asm/signal.h | 119 +++++++ arch/sw_64/include/uapi/asm/ucontext.h | 14 + arch/sw_64/kernel/signal.c | 378 +++++++++++++++++++++++ 6 files changed, 583 insertions(+) create mode 100644 arch/sw_64/include/asm/signal.h create mode 100644 arch/sw_64/include/uapi/asm/sigcontext.h create mode 100644 arch/sw_64/include/uapi/asm/siginfo.h create mode 100644 arch/sw_64/include/uapi/asm/signal.h create mode 100644 arch/sw_64/include/uapi/asm/ucontext.h create mode 100644 arch/sw_64/kernel/signal.c diff --git a/arch/sw_64/include/asm/signal.h b/arch/sw_64/include/asm/signal.h new file mode 100644 index 000000000000..4dc3b6510b86 --- /dev/null +++ b/arch/sw_64/include/asm/signal.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SIGNAL_H +#define _ASM_SW64_SIGNAL_H + +#include + +/* Digital Unix defines 64 signals. Most things should be clean enough + * to redefine this at will, if care is taken to make libc match. + */ + +#define _NSIG 64 +#define _NSIG_BPW 64 +#define _NSIG_WORDS (_NSIG / _NSIG_BPW) + +typedef unsigned long old_sigset_t; /* at least 32 bits */ + +typedef struct { + unsigned long sig[_NSIG_WORDS]; +} sigset_t; + +struct odd_sigaction { + __sighandler_t sa_handler; + old_sigset_t sa_mask; + int sa_flags; +}; + +#include +#endif /* _ASM_SW64_SIGNAL_H */ diff --git a/arch/sw_64/include/uapi/asm/sigcontext.h b/arch/sw_64/include/uapi/asm/sigcontext.h new file mode 100644 index 000000000000..08a081470383 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/sigcontext.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_SIGCONTEXT_H +#define _UAPI_ASM_SW64_SIGCONTEXT_H + +/* + * Signal context structure + * + * The context is saved before a signal handler is invoked, and it is + * restored by sys_sigreturn / sys_rt_sigreturn. + */ +struct sigcontext { + long sc_onstack; + long sc_mask; + long sc_pc; + long sc_ps; + long sc_regs[32]; + long sc_ownedfp; + long sc_fpregs[128]; /* SIMD-FP */ + unsigned long sc_fpcr; + /* TODO: Following are unused, to be removed and synced with libc */ + unsigned long sc_fp_control; + unsigned long sc_reserved1, sc_reserved2; + unsigned long sc_ssize; + char *sc_sbase; + unsigned long sc_traparg_a0; + unsigned long sc_traparg_a1; + unsigned long sc_traparg_a2; + unsigned long sc_fp_trap_pc; + unsigned long sc_fp_trigger_sum; + unsigned long sc_fp_trigger_inst; +}; + + +#endif /* _UAPI_ASM_SW64_SIGCONTEXT_H */ diff --git a/arch/sw_64/include/uapi/asm/siginfo.h b/arch/sw_64/include/uapi/asm/siginfo.h new file mode 100644 index 000000000000..f47fb917c9b2 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/siginfo.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_SIGINFO_H +#define _UAPI_ASM_SW64_SIGINFO_H + +#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) + +#include + + +#endif /* _UAPI_ASM_SW64_SIGINFO_H */ diff --git a/arch/sw_64/include/uapi/asm/signal.h b/arch/sw_64/include/uapi/asm/signal.h new file mode 100644 index 000000000000..0d7a935fe37c --- /dev/null +++ b/arch/sw_64/include/uapi/asm/signal.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_SIGNAL_H +#define _UAPI_ASM_SW64_SIGNAL_H + +#include + +/* Avoid too many header ordering problems. */ +struct siginfo; + +#ifndef __KERNEL__ +/* Here we must cater to libcs that poke about in kernel headers. */ + +#define NSIG 32 +typedef unsigned long sigset_t; + +#endif /* __KERNEL__ */ + + +/* + * Linux/sw64 different signal numbers that Linux/i386. + */ +#define SIGHUP 1 +#define SIGINT 2 +#define SIGQUIT 3 +#define SIGILL 4 +#define SIGTRAP 5 +#define SIGABRT 6 +#define SIGEMT 7 +#define SIGFPE 8 +#define SIGKILL 9 +#define SIGBUS 10 +#define SIGSEGV 11 +#define SIGSYS 12 +#define SIGPIPE 13 +#define SIGALRM 14 +#define SIGTERM 15 +#define SIGURG 16 +#define SIGSTOP 17 +#define SIGTSTP 18 +#define SIGCONT 19 +#define SIGCHLD 20 +#define SIGTTIN 21 +#define SIGTTOU 22 +#define SIGIO 23 +#define SIGXCPU 24 +#define SIGXFSZ 25 +#define SIGVTALRM 26 +#define SIGPROF 27 +#define SIGWINCH 28 +#define SIGINFO 29 +#define SIGUSR1 30 +#define SIGUSR2 31 + +#define SIGPOLL SIGIO +#define SIGPWR SIGINFO +#define SIGIOT SIGABRT + +/* These should not be considered constants from userland. */ +#define SIGRTMIN 32 +#define SIGRTMAX _NSIG + +/* + * SA_FLAGS values: + * + * SA_ONSTACK indicates that a registered stack_t will be used. + * SA_RESTART flag to get restarting signals (which were the default long ago) + * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. + * SA_RESETHAND clears the handler when the signal is delivered. + * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. + * SA_NODEFER prevents the current signal from being masked in the handler. + * + * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single + * Unix names RESETHAND and NODEFER respectively. + */ + +#define SA_ONSTACK 0x00000001 +#define SA_RESTART 0x00000002 +#define SA_NOCLDSTOP 0x00000004 +#define SA_NODEFER 0x00000008 +#define SA_RESETHAND 0x00000010 +#define SA_NOCLDWAIT 0x00000020 +#define SA_SIGINFO 0x00000040 + +#define SA_ONESHOT SA_RESETHAND +#define SA_NOMASK SA_NODEFER + +#define MINSIGSTKSZ 4096 +#define SIGSTKSZ 16384 + +#define SIG_BLOCK 1 /* for blocking signals */ +#define SIG_UNBLOCK 2 /* for unblocking signals */ +#define SIG_SETMASK 3 /* for setting the signal mask */ + +#include + +#ifndef __KERNEL__ +/* Here we must cater to libcs that poke about in kernel headers. */ + +struct sigaction { + union { + __sighandler_t _sa_handler; + void (*_sa_sigaction)(int sig, struct siginfo *info, void *ucontext); + } _u; + sigset_t sa_mask; + int sa_flags; +}; + +#define sa_handler _u._sa_handler +#define sa_sigaction _u._sa_sigaction + +#endif /* __KERNEL__ */ + +typedef struct sigaltstack { + void __user *ss_sp; + int ss_flags; + size_t ss_size; +} stack_t; + +#endif /* _UAPI_ASM_SW64_SIGNAL_H */ diff --git a/arch/sw_64/include/uapi/asm/ucontext.h b/arch/sw_64/include/uapi/asm/ucontext.h new file mode 100644 index 000000000000..c5d6e24e3e5f --- /dev/null +++ b/arch/sw_64/include/uapi/asm/ucontext.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_UCONTEXT_H +#define _UAPI_ASM_SW64_UCONTEXT_H + +struct ucontext { + unsigned long uc_flags; + struct ucontext *uc_link; + old_sigset_t uc_old_sigmask; + stack_t uc_stack; + struct sigcontext uc_mcontext; + sigset_t uc_sigmask; /* mask last for extensibility */ +}; + +#endif /* _UAPI_ASM_SW64_UCONTEXT_H */ diff --git a/arch/sw_64/kernel/signal.c b/arch/sw_64/kernel/signal.c new file mode 100644 index 000000000000..496f33bb1c89 --- /dev/null +++ b/arch/sw_64/kernel/signal.c @@ -0,0 +1,378 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sw_64/kernel/signal.c + * + * Copyright (C) 1995 Linus Torvalds + * + * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "proto.h" + + +#define DEBUG_SIG 0 + +#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) + +SYSCALL_DEFINE2(odd_sigprocmask, int, how, unsigned long, newmask) +{ + sigset_t oldmask; + sigset_t mask; + unsigned long res; + + siginitset(&mask, newmask & _BLOCKABLE); + res = sigprocmask(how, &mask, &oldmask); + if (!res) { + force_successful_syscall_return(); + res = oldmask.sig[0]; + } + return res; +} + +SYSCALL_DEFINE3(odd_sigaction, int, sig, + const struct odd_sigaction __user *, act, + struct odd_sigaction __user *, oact) +{ + struct k_sigaction new_ka, old_ka; + old_sigset_t mask; + int ret; + + if (act) { + if (!access_ok(act, sizeof(*act)) || + __get_user(new_ka.sa.sa_handler, &act->sa_handler) || + __get_user(new_ka.sa.sa_flags, &act->sa_flags) || + __get_user(mask, &act->sa_mask)) + return -EFAULT; + siginitset(&new_ka.sa.sa_mask, mask); + } + + ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); + + if (!ret && oact) { + if (!access_ok(oact, sizeof(*oact)) || + __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || + __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || + __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) + return -EFAULT; + } + + return ret; +} + +/* + * Do a signal return; undo the signal stack. + */ + +#if _NSIG_WORDS > 1 +# error "Non SA_SIGINFO frame needs rearranging" +#endif + +struct rt_sigframe { + struct siginfo info; + struct ucontext uc; +}; + +/* + * If this changes, userland unwinders that Know Things about our signal + * frame will break. Do not undertake lightly. It also implies an ABI + * change wrt the size of siginfo_t, which may cause some pain. + */ +extern char compile_time_assert + [offsetof(struct rt_sigframe, uc.uc_mcontext) == 176 ? 1 : -1]; + +static long +restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs) +{ + long err = __get_user(regs->pc, &sc->sc_pc); + + err |= __copy_from_user(regs, sc->sc_regs, sizeof_field(struct pt_regs, regs)); + /* simd-fp */ + err |= __copy_from_user(¤t->thread.fpstate, &sc->sc_fpregs, + offsetof(struct user_fpsimd_state, fpcr)); + err |= __get_user(current->thread.fpstate.fpcr, &sc->sc_fpcr); + + if (likely(!err)) + __fpstate_restore(current); + + return err; +} + +/* + * Note that this syscall is also used by setcontext(3) to install + * a given sigcontext. This because it's impossible to set *all* + * registers and transfer control from userland. + */ + +SYSCALL_DEFINE1(sigreturn, struct sigcontext __user *, sc) +{ + struct pt_regs *regs = current_pt_regs(); + sigset_t set; + + force_successful_syscall_return(); + + /* Always make any pending restarted system calls return -EINTR */ + current->restart_block.fn = do_no_restart_syscall; + + /* Verify that it's a good sigcontext before using it */ + if (!access_ok(sc, sizeof(*sc))) + goto give_sigsegv; + if (__get_user(set.sig[0], &sc->sc_mask)) + goto give_sigsegv; + + set_current_blocked(&set); + + if (restore_sigcontext(sc, regs)) + goto give_sigsegv; + + /* Send SIGTRAP if we're single-stepping: */ + if (ptrace_cancel_bpt(current)) { + force_sig_fault(SIGTRAP, TRAP_BRKPT, + (void __user *)regs->pc); + } + return regs->regs[0]; + +give_sigsegv: + force_sig(SIGSEGV); + return 0; +} + +SYSCALL_DEFINE1(rt_sigreturn, struct rt_sigframe __user *, frame) +{ + struct pt_regs *regs = current_pt_regs(); + sigset_t set; + + force_successful_syscall_return(); + + /* Always make any pending restarted system calls return -EINTR */ + current->restart_block.fn = do_no_restart_syscall; + + /* Verify that it's a good ucontext_t before using it */ + if (!access_ok(&frame->uc, sizeof(frame->uc))) + goto give_sigsegv; + if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) + goto give_sigsegv; + + set_current_blocked(&set); + + if (restore_sigcontext(&frame->uc.uc_mcontext, regs)) + goto give_sigsegv; + + if (restore_altstack(&frame->uc.uc_stack)) + goto give_sigsegv; + + /* Send SIGTRAP if we're single-stepping: */ + if (ptrace_cancel_bpt(current)) { + force_sig_fault(SIGTRAP, TRAP_BRKPT, + (void __user *)regs->pc); + } + return regs->regs[0]; + +give_sigsegv: + force_sig(SIGSEGV); + return 0; +} + + +/* + * Set up a signal frame. + */ + +static inline void __user * +get_sigframe(struct ksignal *ksig, unsigned long sp, size_t frame_size) +{ + return (void __user *)((sigsp(sp, ksig) - frame_size) & -32ul); +} + +static long +setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, + unsigned long mask) +{ + long err = 0; + + err |= __put_user(on_sig_stack((unsigned long)sc), &sc->sc_onstack); + err |= __put_user(mask, &sc->sc_mask); + err |= __put_user(regs->pc, &sc->sc_pc); + err |= __put_user(8, &sc->sc_ps); + + err |= __copy_to_user(sc->sc_regs, regs, sizeof_field(struct pt_regs, regs)); + err |= __put_user(0, sc->sc_regs+31); + /* simd-fp */ + __fpstate_save(current); + err |= __copy_to_user(&sc->sc_fpregs, ¤t->thread.fpstate, + offsetof(struct user_fpsimd_state, fpcr)); + err |= __put_user(current->thread.fpstate.fpcr, &sc->sc_fpcr); + + return err; +} + +static int +setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) +{ + unsigned long err = 0; + struct rt_sigframe __user *frame; + + frame = get_sigframe(ksig, regs->regs[30], sizeof(*frame)); + if (!access_ok(frame, sizeof(*frame))) + return -EFAULT; + + if (ksig->ka.sa.sa_flags & SA_SIGINFO) + err |= copy_siginfo_to_user(&frame->info, &ksig->info); + + /* Create the ucontext. */ + err |= __put_user(0, &frame->uc.uc_flags); + err |= __put_user(0, &frame->uc.uc_link); + err |= __put_user(set->sig[0], &frame->uc.uc_old_sigmask); + err |= __save_altstack(&frame->uc.uc_stack, regs->regs[30]); + err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]); + err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); + if (err) + return -EFAULT; + + /* "Return" to the handler */ + regs->regs[26] = VDSO_SYMBOL(current->mm->context.vdso, rt_sigreturn); + regs->regs[27] = regs->pc = (unsigned long) ksig->ka.sa.sa_handler; + regs->regs[16] = ksig->sig; /* a0: signal number */ + if (ksig->ka.sa.sa_flags & SA_SIGINFO) { + /* a1: siginfo pointer, a2: ucontext pointer */ + regs->regs[17] = (unsigned long) &frame->info; + regs->regs[18] = (unsigned long) &frame->uc; + } else { + /* a1: exception code, a2: sigcontext pointer */ + regs->regs[17] = 0; + regs->regs[18] = (unsigned long) &frame->uc.uc_mcontext; + } + regs->regs[30] = (unsigned long) frame; + +#if DEBUG_SIG + pr_info("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n", + current->comm, current->pid, frame, regs->pc, regs->regs[26]); +#endif + + return 0; +} + +/* + * OK, we're invoking a handler. + */ +static inline void +handle_signal(struct ksignal *ksig, struct pt_regs *regs) +{ + sigset_t *oldset = sigmask_to_save(); + int ret; + + rseq_signal_deliver(ksig, regs); + + ret = setup_rt_frame(ksig, oldset, regs); + + signal_setup_done(ret, ksig, 0); +} + +/* + * Note that 'init' is a special process: it doesn't get signals it doesn't + * want to handle. Thus you cannot kill init even with a SIGKILL even by + * mistake. + * + * Note that we go through the signals twice: once to check the signals that + * the kernel can handle, and then we build all the user-level signal handling + * stack-frames in one go after that. + */ +static void +do_signal(struct pt_regs *regs) +{ + unsigned long single_stepping = ptrace_cancel_bpt(current); + struct ksignal ksig; + + /* This lets the debugger run, ... */ + if (get_signal(&ksig)) { + /* ... so re-check the single stepping. */ + single_stepping |= ptrace_cancel_bpt(current); + /* Whee! Actually deliver the signal. */ + if (regs->orig_r0 != NO_SYSCALL) { + switch (syscall_get_error(current, regs)) { + case -ERESTARTSYS: + if (!(ksig.ka.sa.sa_flags & SA_RESTART)) { + regs->regs[0] = EINTR; + break; + } + fallthrough; + case -ERESTARTNOINTR: + /* reset v0 and a3 and replay syscall */ + regs->regs[0] = regs->orig_r0; + regs->regs[19] = regs->orig_r19; + regs->pc -= 4; + break; + case -ERESTARTNOHAND: + case -ERESTART_RESTARTBLOCK: + regs->regs[0] = EINTR; + break; + } + regs->orig_r0 = NO_SYSCALL; + } + handle_signal(&ksig, regs); + } else { + single_stepping |= ptrace_cancel_bpt(current); + if (regs->orig_r0 != NO_SYSCALL) { + switch (syscall_get_error(current, regs)) { + case -ERESTARTSYS: + case -ERESTARTNOINTR: + case -ERESTARTNOHAND: + /* Reset v0 and a3 and replay syscall. */ + regs->regs[0] = regs->orig_r0; + regs->regs[19] = regs->orig_r19; + regs->pc -= 4; + break; + case -ERESTART_RESTARTBLOCK: + /* Set v0 to the restart_syscall and replay */ + regs->regs[0] = __NR_restart_syscall; + regs->pc -= 4; + break; + } + regs->orig_r0 = NO_SYSCALL; + } + restore_saved_sigmask(); + } + if (single_stepping) + ptrace_set_bpt(current); /* re-set breakpoint */ +} + +asmlinkage void +do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) +{ + do { + local_irq_enable(); + + if (thread_flags & _TIF_NEED_RESCHED) + schedule(); + + if (thread_flags & _TIF_UPROBE) { + unsigned long pc = regs->pc; + + uprobe_notify_resume(regs); + sw64_fix_uretprobe(regs, pc - 4); + } + + if (thread_flags & _TIF_PATCH_PENDING) + klp_update_patch_state(current); + + if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) + do_signal(regs); + + if (thread_flags & _TIF_NOTIFY_RESUME) + resume_user_mode_work(regs); + + local_irq_disable(); + thread_flags = READ_ONCE(current_thread_info()->flags); + } while (thread_flags & _TIF_WORK_MASK); +} -- Gitee From 52687ed509de5e5c9680f69022d9eb99a11e1d8c Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:08 +0800 Subject: [PATCH 0289/2138] anolis: sw64: add FPU support ANBZ: #4688 Add FPU and floating-point emulation support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/fpu.h | 91 ++ arch/sw_64/include/asm/sfp-machine.h | 69 + arch/sw_64/include/uapi/asm/fpu.h | 233 +++ arch/sw_64/kernel/fpu.S | 111 ++ arch/sw_64/math-emu/Makefile | 10 + arch/sw_64/math-emu/math.c | 2255 ++++++++++++++++++++++++++ arch/sw_64/math-emu/qrnnd.S | 133 ++ arch/sw_64/math-emu/sfp-util.h | 41 + 8 files changed, 2943 insertions(+) create mode 100644 arch/sw_64/include/asm/fpu.h create mode 100644 arch/sw_64/include/asm/sfp-machine.h create mode 100644 arch/sw_64/include/uapi/asm/fpu.h create mode 100644 arch/sw_64/kernel/fpu.S create mode 100644 arch/sw_64/math-emu/Makefile create mode 100644 arch/sw_64/math-emu/math.c create mode 100644 arch/sw_64/math-emu/qrnnd.S create mode 100644 arch/sw_64/math-emu/sfp-util.h diff --git a/arch/sw_64/include/asm/fpu.h b/arch/sw_64/include/asm/fpu.h new file mode 100644 index 000000000000..a0b0ff5af168 --- /dev/null +++ b/arch/sw_64/include/asm/fpu.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_FPU_H +#define _ASM_SW64_FPU_H + +#include +#ifdef __KERNEL__ + +/* + * The following two functions don't need trapb/excb instructions + * around the mf_fpcr/mt_fpcr instructions because (a) the kernel + * never generates arithmetic faults and (b) sys_call instructions + * are implied trap barriers. + */ + +static inline unsigned long +rdfpcr(void) +{ + unsigned long ret; + unsigned long fp[4] __aligned(32); + + __asm__ __volatile__ ( + " vstd $f0, %0\n\t" + " rfpcr $f0\n\t" + " fimovd $f0, %1\n\t" + " vldd $f0, %0\n\t" + : "=m"(*fp), "=r"(ret)); + + return ret; +} + +static inline void +wrfpcr(unsigned long val) +{ + unsigned long tmp; + unsigned long fp[4] __aligned(32); + + __asm__ __volatile__ ( + " vstd $f0, %0\n\t" + " ifmovd %2, $f0\n\t" + " wfpcr $f0\n\t" + " and %2, 0x3, %1\n\t" + " beq %1, 1f\n\t" + " subl %1, 1, %1\n\t" + " beq %1, 2f\n\t" + " subl %1, 1, %1\n\t" + " beq %1, 3f\n\t" + " setfpec3\n\t" + " br 6f\n\t" + "1: setfpec0\n\t" + " br 6f\n\t" + "2: setfpec1\n\t" + " br 6f\n\t" + "3: setfpec2\n\t" + "6: vldd $f0, %0\n\t" + : "=m"(*fp), "=&r"(tmp) : "r"(val)); +} + +static inline unsigned long +swcr_update_status(unsigned long swcr, unsigned long fpcr) +{ + /* + * SW64 implements most of the bits in hardware. Collect + * the acrued exception bits from the real fpcr. + */ + swcr &= ~(IEEE_STATUS_MASK0 | IEEE_STATUS_MASK1 + | IEEE_STATUS_MASK2 | IEEE_STATUS_MASK3); + swcr |= (fpcr >> 35) & IEEE_STATUS_MASK0; + swcr |= (fpcr >> 13) & IEEE_STATUS_MASK1; + swcr |= (fpcr << 14) & IEEE_STATUS_MASK2; + swcr |= (fpcr << 36) & IEEE_STATUS_MASK3; + return swcr; +} + +extern unsigned long sw64_read_fp_reg(unsigned long reg); +extern void sw64_write_fp_reg(unsigned long reg, unsigned long val); +extern unsigned long sw64_read_fp_reg_s(unsigned long reg); +extern void sw64_write_fp_reg_s(unsigned long reg, unsigned long val); + + +extern void sw64_write_simd_fp_reg_s(unsigned long reg, + unsigned long f0, unsigned long f1); +extern void sw64_write_simd_fp_reg_d(unsigned long reg, + unsigned long f0, unsigned long f1, + unsigned long f2, unsigned long f3); +extern void sw64_write_simd_fp_reg_ldwe(unsigned long reg, int a); +extern void sw64_read_simd_fp_m_s(unsigned long reg, unsigned long *fp_value); +extern void sw64_read_simd_fp_m_d(unsigned long reg, unsigned long *fp_value); + +#endif /* __KERNEL__ */ + +#endif /* _ASM_SW64_FPU_H */ diff --git a/arch/sw_64/include/asm/sfp-machine.h b/arch/sw_64/include/asm/sfp-machine.h new file mode 100644 index 000000000000..156bebc9c515 --- /dev/null +++ b/arch/sw_64/include/asm/sfp-machine.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Machine-dependent software floating-point definitions. + * sw64 kernel version. + * Copyright (C) 1997,1998,1999 Free Software Foundation, Inc. + * This file is part of the GNU C Library. + * Contributed by Richard Henderson (rth@cygnus.com), + * Jakub Jelinek (jakub@redhat.com) and + * David S. Miller (davem@redhat.com). + */ + +#ifndef _ASM_SW64_SFP_MACHINE_H +#define _ASM_SW64_SFP_MACHINE_H + +#define _FP_W_TYPE_SIZE 64 +#define _FP_W_TYPE unsigned long +#define _FP_WS_TYPE signed long +#define _FP_I_TYPE long + +#define _FP_MUL_MEAT_S(R, X, Y) \ + _FP_MUL_MEAT_1_imm(_FP_WFRACBITS_S, R, X, Y) +#define _FP_MUL_MEAT_D(R, X, Y) \ + _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D, R, X, Y, umul_ppmm) +#define _FP_MUL_MEAT_Q(R, X, Y) \ + _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_Q, R, X, Y, umul_ppmm) + +#define _FP_DIV_MEAT_S(R, X, Y) _FP_DIV_MEAT_1_imm(S, R, X, Y, _FP_DIV_HELP_imm) +#define _FP_DIV_MEAT_D(R, X, Y) _FP_DIV_MEAT_1_udiv(D, R, X, Y) +#define _FP_DIV_MEAT_Q(R, X, Y) _FP_DIV_MEAT_2_udiv(Q, R, X, Y) + +#define _FP_NANFRAC_S _FP_QNANBIT_S +#define _FP_NANFRAC_D _FP_QNANBIT_D +#define _FP_NANFRAC_Q _FP_QNANBIT_Q +#define _FP_NANSIGN_S 1 +#define _FP_NANSIGN_D 1 +#define _FP_NANSIGN_Q 1 + +#define _FP_KEEPNANFRACP 1 + +/* Sw_64 Architecture Handbook, 4.7.10.4 sais that + * we should prefer any type of NaN in Fb, then Fa. + */ +#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \ +do { \ + R##_s = Y##_s; \ + _FP_FRAC_COPY_##wc(R, X); \ + R##_c = FP_CLS_NAN; \ +} while (0) + +/* Obtain the current rounding mode. */ +#define FP_ROUNDMODE mode +#define FP_RND_NEAREST (FPCR_DYN_NORMAL >> FPCR_DYN_SHIFT) +#define FP_RND_ZERO (FPCR_DYN_CHOPPED >> FPCR_DYN_SHIFT) +#define FP_RND_PINF (FPCR_DYN_PLUS >> FPCR_DYN_SHIFT) +#define FP_RND_MINF (FPCR_DYN_MINUS >> FPCR_DYN_SHIFT) + +/* Exception flags. */ +#define FP_EX_INVALID IEEE_TRAP_ENABLE_INV +#define FP_EX_OVERFLOW IEEE_TRAP_ENABLE_OVF +#define FP_EX_UNDERFLOW IEEE_TRAP_ENABLE_UNF +#define FP_EX_DIVZERO IEEE_TRAP_ENABLE_DZE +#define FP_EX_INEXACT IEEE_TRAP_ENABLE_INE +#define FP_EX_DENORM IEEE_TRAP_ENABLE_DNO + +#define FP_DENORM_ZERO (swcr & IEEE_MAP_DMZ) + +/* We write the results always */ +#define FP_INHIBIT_RESULTS 0 + +#endif /* _ASM_SW64_SFP_MACHINE_H */ diff --git a/arch/sw_64/include/uapi/asm/fpu.h b/arch/sw_64/include/uapi/asm/fpu.h new file mode 100644 index 000000000000..8945816c542b --- /dev/null +++ b/arch/sw_64/include/uapi/asm/fpu.h @@ -0,0 +1,233 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_FPU_H +#define _UAPI_ASM_SW64_FPU_H + +/* + * SW-64 floating-point control register defines: + */ +#define FPCR_DNOD (1UL << 47) /* denorm INV trap disable */ +#ifdef CONFIG_SUBARCH_C3B +#define FPCR_DNZ (1UL << 48) /* denorms to zero */ +#else +#define FPCR_DNOE (1UL << 48) /* hardware denormal support */ +#endif +#define FPCR_INVD (1UL << 49) /* invalid op disable (opt.) */ +#define FPCR_DZED (1UL << 50) /* division by zero disable (opt.) */ +#define FPCR_OVFD (1UL << 51) /* overflow disable (optional) */ +#define FPCR_INV (1UL << 52) /* invalid operation */ +#define FPCR_DZE (1UL << 53) /* division by zero */ +#define FPCR_OVF (1UL << 54) /* overflow */ +#define FPCR_UNF (1UL << 55) /* underflow */ +#define FPCR_INE (1UL << 56) /* inexact */ +#define FPCR_IOV (1UL << 57) /* integer overflow */ +#define FPCR_UNDZ (1UL << 60) /* underflow to zero (opt.) */ +#define FPCR_UNFD (1UL << 61) /* underflow disable (opt.) */ +#define FPCR_INED (1UL << 62) /* inexact disable (opt.) */ +#define FPCR_SUM (1UL << 63) /* summary bit */ + +#define FPCR_DYN_SHIFT 58 /* first dynamic rounding mode bit */ +#define FPCR_DYN_CHOPPED (0x0UL << FPCR_DYN_SHIFT) /* towards 0 */ +#define FPCR_DYN_MINUS (0x1UL << FPCR_DYN_SHIFT) /* towards -INF */ +#define FPCR_DYN_NORMAL (0x2UL << FPCR_DYN_SHIFT) /* towards nearest */ +#define FPCR_DYN_PLUS (0x3UL << FPCR_DYN_SHIFT) /* towards +INF */ +#define FPCR_DYN_MASK (0x3UL << FPCR_DYN_SHIFT) + +#define FPCR_MASK 0xffff800000000000L + +#ifdef CONFIG_SUBARCH_C3B +#define FPCR_INIT FPCR_DYN_NORMAL +#else +#define FPCR_INIT (FPCR_DYN_NORMAL | FPCR_DNOE) +#endif + +/* status bit coming from hardware fpcr . definde by fire3 */ +#define FPCR_STATUS_INV0 (1UL << 52) +#define FPCR_STATUS_DZE0 (1UL << 53) +#define FPCR_STATUS_OVF0 (1UL << 54) +#define FPCR_STATUS_UNF0 (1UL << 55) +#define FPCR_STATUS_INE0 (1UL << 56) +#define FPCR_STATUS_OVI0 (1UL << 57) + +#define FPCR_STATUS_INV1 (1UL << 36) +#define FPCR_STATUS_DZE1 (1UL << 37) +#define FPCR_STATUS_OVF1 (1UL << 38) +#define FPCR_STATUS_UNF1 (1UL << 39) +#define FPCR_STATUS_INE1 (1UL << 40) +#define FPCR_STATUS_OVI1 (1UL << 41) + +#define FPCR_STATUS_INV2 (1UL << 20) +#define FPCR_STATUS_DZE2 (1UL << 21) +#define FPCR_STATUS_OVF2 (1UL << 22) +#define FPCR_STATUS_UNF2 (1UL << 23) +#define FPCR_STATUS_INE2 (1UL << 24) +#define FPCR_STATUS_OVI2 (1UL << 25) + +#define FPCR_STATUS_INV3 (1UL << 4) +#define FPCR_STATUS_DZE3 (1UL << 5) +#define FPCR_STATUS_OVF3 (1UL << 6) +#define FPCR_STATUS_UNF3 (1UL << 7) +#define FPCR_STATUS_INE3 (1UL << 8) +#define FPCR_STATUS_OVI3 (1UL << 9) + +#define FPCR_STATUS_MASK0 (FPCR_STATUS_INV0 | FPCR_STATUS_DZE0 | \ + FPCR_STATUS_OVF0 | FPCR_STATUS_UNF0 | \ + FPCR_STATUS_INE0 | FPCR_STATUS_OVI0) + +#define FPCR_STATUS_MASK1 (FPCR_STATUS_INV1 | FPCR_STATUS_DZE1 | \ + FPCR_STATUS_OVF1 | FPCR_STATUS_UNF1 | \ + FPCR_STATUS_INE1 | FPCR_STATUS_OVI1) + +#define FPCR_STATUS_MASK2 (FPCR_STATUS_INV2 | FPCR_STATUS_DZE2 | \ + FPCR_STATUS_OVF2 | FPCR_STATUS_UNF2 | \ + FPCR_STATUS_INE2 | FPCR_STATUS_OVI2) + +#define FPCR_STATUS_MASK3 (FPCR_STATUS_INV3 | FPCR_STATUS_DZE3 | \ + FPCR_STATUS_OVF3 | FPCR_STATUS_UNF3 | \ + FPCR_STATUS_INE3 | FPCR_STATUS_OVI3) + + +/* + * IEEE trap enables are implemented in software. These per-thread + * bits are stored in the "ieee_state" field of "struct thread_info". + * Thus, the bits are defined so as not to conflict with the + * floating-point enable bit (which is architected). + */ +#define IEEE_TRAP_ENABLE_INV (1UL << 1) /* invalid op */ +#define IEEE_TRAP_ENABLE_DZE (1UL << 2) /* division by zero */ +#define IEEE_TRAP_ENABLE_OVF (1UL << 3) /* overflow */ +#define IEEE_TRAP_ENABLE_UNF (1UL << 4) /* underflow */ +#define IEEE_TRAP_ENABLE_INE (1UL << 5) /* inexact */ +#define IEEE_TRAP_ENABLE_DNO (1UL << 6) /* denorm */ +#define IEEE_TRAP_ENABLE_MASK (IEEE_TRAP_ENABLE_INV | IEEE_TRAP_ENABLE_DZE |\ + IEEE_TRAP_ENABLE_OVF | IEEE_TRAP_ENABLE_UNF |\ + IEEE_TRAP_ENABLE_INE | IEEE_TRAP_ENABLE_DNO) + +/* Denorm and Underflow flushing */ +#define IEEE_MAP_DMZ (1UL << 12) /* Map denorm inputs to zero */ +#define IEEE_MAP_UMZ (1UL << 13) /* Map underflowed outputs to zero */ + +#define IEEE_MAP_MASK (IEEE_MAP_DMZ | IEEE_MAP_UMZ) + +/* status bits coming from fpcr: */ +#define IEEE_STATUS_INV (1UL << 17) +#define IEEE_STATUS_DZE (1UL << 18) +#define IEEE_STATUS_OVF (1UL << 19) +#define IEEE_STATUS_UNF (1UL << 20) +#define IEEE_STATUS_INE (1UL << 21) +#define IEEE_STATUS_DNO (1UL << 22) + + +#define IEEE_STATUS_MASK (IEEE_STATUS_INV | IEEE_STATUS_DZE | \ + IEEE_STATUS_OVF | IEEE_STATUS_UNF | \ + IEEE_STATUS_INE | IEEE_STATUS_DNO) + +#define IEEE_SW_MASK (IEEE_TRAP_ENABLE_MASK | \ + IEEE_STATUS_MASK | IEEE_MAP_MASK) + +#define IEEE_CURRENT_RM_SHIFT 32 +#define IEEE_CURRENT_RM_MASK (3UL << IEEE_CURRENT_RM_SHIFT) + +#define IEEE_STATUS_TO_EXCSUM_SHIFT 16 + +#define IEEE_INHERIT (1UL << 63) /* inherit on thread create? */ + +/* ieee_state expand to surport simd added by fire3 */ + +#define IEEE_STATUS_INV0 (1UL << 17) +#define IEEE_STATUS_DZE0 (1UL << 18) +#define IEEE_STATUS_OVF0 (1UL << 19) +#define IEEE_STATUS_UNF0 (1UL << 20) +#define IEEE_STATUS_INE0 (1UL << 21) +#define IEEE_STATUS_DNO0 (1UL << 22) +#define IEEE_STATUS_MASK0 (IEEE_STATUS_INV0 | IEEE_STATUS_DZE0 | \ + IEEE_STATUS_OVF0 | IEEE_STATUS_UNF0 | \ + IEEE_STATUS_INE0 | IEEE_STATUS_DNO0) + +#define IEEE_STATUS0_TO_EXCSUM_SHIFT 16 + +#define IEEE_STATUS_INV1 (1UL << 23) +#define IEEE_STATUS_DZE1 (1UL << 24) +#define IEEE_STATUS_OVF1 (1UL << 25) +#define IEEE_STATUS_UNF1 (1UL << 26) +#define IEEE_STATUS_INE1 (1UL << 27) +#define IEEE_STATUS_DNO1 (1UL << 28) +#define IEEE_STATUS_MASK1 (IEEE_STATUS_INV1 | IEEE_STATUS_DZE1 | \ + IEEE_STATUS_OVF1 | IEEE_STATUS_UNF1 | \ + IEEE_STATUS_INE1 | IEEE_STATUS_DNO1) + +#define IEEE_STATUS1_TO_EXCSUM_SHIFT 22 + +#define IEEE_STATUS_INV2 (1UL << 34) +#define IEEE_STATUS_DZE2 (1UL << 35) +#define IEEE_STATUS_OVF2 (1UL << 36) +#define IEEE_STATUS_UNF2 (1UL << 37) +#define IEEE_STATUS_INE2 (1UL << 38) +#define IEEE_STATUS_DNO2 (1UL << 39) +#define IEEE_STATUS_MASK2 (IEEE_STATUS_INV2 | IEEE_STATUS_DZE2 | \ + IEEE_STATUS_OVF2 | IEEE_STATUS_UNF2 | \ + IEEE_STATUS_INE2 | IEEE_STATUS_DNO2) + +#define IEEE_STATUS2_TO_EXCSUM_SHIFT 33 + +#define IEEE_STATUS_INV3 (1UL << 40) +#define IEEE_STATUS_DZE3 (1UL << 41) +#define IEEE_STATUS_OVF3 (1UL << 42) +#define IEEE_STATUS_UNF3 (1UL << 43) +#define IEEE_STATUS_INE3 (1UL << 44) +#define IEEE_STATUS_DNO3 (1UL << 45) +#define IEEE_STATUS_MASK3 (IEEE_STATUS_INV3 | IEEE_STATUS_DZE3 | \ + IEEE_STATUS_OVF3 | IEEE_STATUS_UNF3 | \ + IEEE_STATUS_INE3 | IEEE_STATUS_DNO3) + +#define IEEE_STATUS3_TO_EXCSUM_SHIFT 39 + + +/* + * Convert the software IEEE trap enable and status bits into the + * hardware fpcr format. + * + * Digital Unix engineers receive my thanks for not defining the + * software bits identical to the hardware bits. The chip designers + * receive my thanks for making all the not-implemented fpcr bits + * RAZ forcing us to use system calls to read/write this value. + */ +static inline unsigned long +ieee_swcr_to_fpcr(unsigned long sw) +{ + unsigned long fp; + + fp = (sw & IEEE_STATUS_MASK0) << 35; + fp |= (sw & IEEE_STATUS_MASK1) << 13; + fp |= (sw & IEEE_STATUS_MASK2) >> 14; + fp |= (sw & IEEE_STATUS_MASK3) >> 36; + + fp |= (sw & IEEE_MAP_DMZ) << 36; + fp |= (sw & IEEE_STATUS_MASK0 ? FPCR_SUM : 0); + fp |= (sw & IEEE_STATUS_MASK1 ? FPCR_SUM : 0); + fp |= (sw & IEEE_STATUS_MASK2 ? FPCR_SUM : 0); + fp |= (sw & IEEE_STATUS_MASK3 ? FPCR_SUM : 0); + fp |= (~sw & (IEEE_TRAP_ENABLE_INV + | IEEE_TRAP_ENABLE_DZE + | IEEE_TRAP_ENABLE_OVF)) << 48; + fp |= (~sw & (IEEE_TRAP_ENABLE_UNF | IEEE_TRAP_ENABLE_INE)) << 57; + fp |= (sw & IEEE_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); + fp |= (~sw & IEEE_TRAP_ENABLE_DNO) << 41; + return fp; +} + +static inline unsigned long +ieee_fpcr_to_swcr(unsigned long fp) +{ + unsigned long sw; + + sw = (fp >> 35) & IEEE_STATUS_MASK; + sw |= (fp >> 36) & IEEE_MAP_DMZ; + sw |= (~fp >> 48) & (IEEE_TRAP_ENABLE_INV + | IEEE_TRAP_ENABLE_DZE + | IEEE_TRAP_ENABLE_OVF); + sw |= (~fp >> 57) & (IEEE_TRAP_ENABLE_UNF | IEEE_TRAP_ENABLE_INE); + sw |= (fp >> 47) & IEEE_MAP_UMZ; + sw |= (~fp >> 41) & IEEE_TRAP_ENABLE_DNO; + return sw; +} +#endif /* _UAPI_ASM_SW64_FPU_H */ diff --git a/arch/sw_64/kernel/fpu.S b/arch/sw_64/kernel/fpu.S new file mode 100644 index 000000000000..ddc988681fdd --- /dev/null +++ b/arch/sw_64/kernel/fpu.S @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +#include +#include +#include +#include + + .text + .set noat +ENTRY(__fpstate_save) + /* a0: prev task */ +#ifdef CONFIG_SUBARCH_C4 + csrr $1, CSR_WR_FREGS + beq $1, out +#endif + vstd $f0, TASK_THREAD_F0(a0) + vstd $f1, TASK_THREAD_F1(a0) + vstd $f2, TASK_THREAD_F2(a0) + vstd $f3, TASK_THREAD_F3(a0) + vstd $f4, TASK_THREAD_F4(a0) + vstd $f5, TASK_THREAD_F5(a0) + vstd $f6, TASK_THREAD_F6(a0) + vstd $f7, TASK_THREAD_F7(a0) + vstd $f8, TASK_THREAD_F8(a0) + vstd $f9, TASK_THREAD_F9(a0) + vstd $f10, TASK_THREAD_F10(a0) + vstd $f11, TASK_THREAD_F11(a0) + vstd $f12, TASK_THREAD_F12(a0) + vstd $f13, TASK_THREAD_F13(a0) + vstd $f14, TASK_THREAD_F14(a0) + vstd $f15, TASK_THREAD_F15(a0) + vstd $f16, TASK_THREAD_F16(a0) + vstd $f17, TASK_THREAD_F17(a0) + vstd $f18, TASK_THREAD_F18(a0) + vstd $f19, TASK_THREAD_F19(a0) + vstd $f20, TASK_THREAD_F20(a0) + vstd $f21, TASK_THREAD_F21(a0) + vstd $f22, TASK_THREAD_F22(a0) + vstd $f23, TASK_THREAD_F23(a0) + vstd $f24, TASK_THREAD_F24(a0) + vstd $f25, TASK_THREAD_F25(a0) + vstd $f26, TASK_THREAD_F26(a0) + vstd $f27, TASK_THREAD_F27(a0) + rfpcr $f0 + vstd $f28, TASK_THREAD_F28(a0) + vstd $f29, TASK_THREAD_F29(a0) + vstd $f30, TASK_THREAD_F30(a0) + fstd $f0, TASK_THREAD_FPCR(a0) + vldd $f0, TASK_THREAD_F0(a0) +out: + ret +END(__fpstate_save) + +ENTRY(__fpstate_restore) + /* a0: next task */ + fldd $f0, TASK_THREAD_FPCR(a0) + wfpcr $f0 + fimovd $f0, t1 + and t1, 0x3, t1 + beq t1, $setfpec_0 + subl t1, 0x1, t1 + beq t1, $setfpec_1 + subl t1, 0x1, t1 + beq t1, $setfpec_2 + setfpec3 + br $setfpec_over +$setfpec_0: + setfpec0 + br $setfpec_over +$setfpec_1: + setfpec1 + br $setfpec_over +$setfpec_2: + setfpec2 +$setfpec_over: + vldd $f0, TASK_THREAD_F0(a0) + vldd $f1, TASK_THREAD_F1(a0) + vldd $f2, TASK_THREAD_F2(a0) + vldd $f3, TASK_THREAD_F3(a0) + vldd $f4, TASK_THREAD_F4(a0) + vldd $f5, TASK_THREAD_F5(a0) + vldd $f6, TASK_THREAD_F6(a0) + vldd $f7, TASK_THREAD_F7(a0) + vldd $f8, TASK_THREAD_F8(a0) + vldd $f9, TASK_THREAD_F9(a0) + vldd $f10, TASK_THREAD_F10(a0) + vldd $f11, TASK_THREAD_F11(a0) + vldd $f12, TASK_THREAD_F12(a0) + vldd $f13, TASK_THREAD_F13(a0) + vldd $f14, TASK_THREAD_F14(a0) + vldd $f15, TASK_THREAD_F15(a0) + vldd $f16, TASK_THREAD_F16(a0) + vldd $f17, TASK_THREAD_F17(a0) + vldd $f18, TASK_THREAD_F18(a0) + vldd $f19, TASK_THREAD_F19(a0) + vldd $f20, TASK_THREAD_F20(a0) + vldd $f21, TASK_THREAD_F21(a0) + vldd $f22, TASK_THREAD_F22(a0) + vldd $f23, TASK_THREAD_F23(a0) + vldd $f24, TASK_THREAD_F24(a0) + vldd $f25, TASK_THREAD_F25(a0) + vldd $f26, TASK_THREAD_F26(a0) + vldd $f27, TASK_THREAD_F27(a0) + vldd $f28, TASK_THREAD_F28(a0) + vldd $f29, TASK_THREAD_F29(a0) + vldd $f30, TASK_THREAD_F30(a0) +#ifdef CONFIG_SUBARCH_C4 + csrw $31, CSR_WR_FREGS +#endif + ret +END(__fpstate_restore) diff --git a/arch/sw_64/math-emu/Makefile b/arch/sw_64/math-emu/Makefile new file mode 100644 index 000000000000..72e750d138e6 --- /dev/null +++ b/arch/sw_64/math-emu/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the FPU instruction emulation. +# + +ccflags-y := -w + +obj-$(CONFIG_MATHEMU) += math-emu.o + +math-emu-objs := math.o qrnnd.o diff --git a/arch/sw_64/math-emu/math.c b/arch/sw_64/math-emu/math.c new file mode 100644 index 000000000000..b578752f0730 --- /dev/null +++ b/arch/sw_64/math-emu/math.c @@ -0,0 +1,2255 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Modify History + * + * who when what + * --- ---- ---- + * stone 2004-09-02 Add SIMD floating emulation code + * fire3 2008-12-27 Add SIMD floating emulation code for SW64 + */ + +#include + +#include + +#include "sfp-util.h" + +#include +#include +#include + +/* + * This is for sw64 + */ + +#define IEEE_E_STATUS_MASK IEEE_STATUS_MASK +#define IEEE_E_STATUS_TO_EXCSUM_SHIFT 0 +#define SW64_FP_DENOMAL 1 /* A denormal data */ +#define SW64_FP_NORMAL 0 /* A denormal data */ +#define SW64_FP_NAN 2 + +#define SW64_FP_NAN_S(X, val) \ +do { \ + union _FP_UNION_S *_flo = \ + (union _FP_UNION_S *)(val); \ + \ + X##_f = _flo->bits.frac; \ + X##_e = _flo->bits.exp; \ + X##_s = _flo->bits.sign; \ + \ + switch (X##_e) { \ + case 255: \ + if (_FP_FRAC_ZEROP_1(X)) \ + X##_c = SW64_FP_NORMAL; \ + else \ + X##_c = SW64_FP_NAN; \ + break; \ + default: \ + X##_c = SW64_FP_NORMAL; \ + break; \ + } \ +} while (0) + + +#define SW64_FP_NAN_D(X, val) \ +do { \ + union _FP_UNION_D *_flo = \ + (union _FP_UNION_D *)(val); \ + \ + X##_f = _flo->bits.frac; \ + X##_e = _flo->bits.exp; \ + X##_s = _flo->bits.sign; \ + \ + switch (X##_e) { \ + case 2047: \ + if (_FP_FRAC_ZEROP_1(X)) \ + X##_c = SW64_FP_NORMAL; \ + else \ + X##_c = SW64_FP_NAN; \ + break; \ + default: \ + X##_c = SW64_FP_NORMAL; \ + break; \ + } \ +} while (0) + + + +#define SW64_FP_NORMAL_S(X, val) \ +do { \ + union _FP_UNION_S *_flo = \ + (union _FP_UNION_S *)(val); \ + \ + X##_f = _flo->bits.frac; \ + X##_e = _flo->bits.exp; \ + X##_s = _flo->bits.sign; \ + \ + switch (X##_e) { \ + case 0: \ + if (_FP_FRAC_ZEROP_1(X)) \ + X##_c = SW64_FP_NORMAL; \ + else \ + X##_c = SW64_FP_DENOMAL; \ + break; \ + default: \ + X##_c = SW64_FP_NORMAL; \ + break; \ + } \ +} while (0) + +#define SW64_FP_NORMAL_D(X, val) \ +do { \ + union _FP_UNION_D *_flo = \ + (union _FP_UNION_D *)(val); \ + \ + X##_f = _flo->bits.frac; \ + X##_e = _flo->bits.exp; \ + X##_s = _flo->bits.sign; \ + \ + switch (X##_e) { \ + case 0: \ + if (_FP_FRAC_ZEROP_1(X)) \ + X##_c = SW64_FP_NORMAL; \ + else \ + X##_c = SW64_FP_DENOMAL; \ + break; \ + default: \ + X##_c = SW64_FP_NORMAL; \ + break; \ + } \ +} while (0) + +/* Operation Code for SW64 */ +#define OP_SIMD_1 0x1A +#define OP_SIMD_2 0x1B +#define OP_SIMD_MUL_ADD 0x1B +#define OP_SIMD_NORMAL 0x1A +#define OP_MUL_ADD 0x19 + +#define FNC_FMAS 0x0 +#define FNC_FMAD 0x1 +#define FNC_FMSS 0x2 +#define FNC_FMSD 0x3 +#define FNC_FNMAS 0x4 +#define FNC_FNMAD 0x5 +#define FNC_FNMSS 0x6 +#define FNC_FNMSD 0x7 + +#define FNC_VADDS 0x80 +#define FNC_VADDD 0x81 +#define FNC_VSUBS 0x82 +#define FNC_VSUBD 0x83 +#define FNC_VMULS 0x84 +#define FNC_VMULD 0x85 +#define FNC_VDIVS 0x86 +#define FNC_VDIVD 0x87 +#define FNC_VSQRTS 0x88 +#define FNC_VSQRTD 0x89 + +#define FNC_VFCMPEQ 0x8c +#define FNC_VFCMPLE 0x8d +#define FNC_VFCMPLT 0x8e +#define FNC_VFCMPUN 0x8f + +#define FNC_VCPYS 0x90 +#define FNC_VCPYSE 0x91 +#define FNC_VCPYSN 0x92 + +#define FNC_VMAS 0x0 +#define FNC_VMAD 0x1 +#define FNC_VMSS 0x2 +#define FNC_VMSD 0x3 +#define FNC_VNMAS 0x4 +#define FNC_VNMAD 0x5 +#define FNC_VNMSS 0x6 +#define FNC_VNMSD 0x7 + +long simd_fp_emul_s(unsigned long pc); +long simd_fp_emul_d(unsigned long pc); +long mul_add_fp_emul(unsigned long pc); +long simd_cmp_emul_d(unsigned long pc); + +long simd_mul_add_fp_emul_d(unsigned long pc); +long simd_mul_add_fp_emul_s(unsigned long pc); + +void read_fp_reg_s(unsigned long reg, unsigned long *p0, + unsigned long *p1, unsigned long *p2, unsigned long *p3); +void read_fp_reg_d(unsigned long reg, unsigned long *val_p0, + unsigned long *p1, unsigned long *p2, unsigned long *p3); +void write_fp_reg_s(unsigned long reg, unsigned long val_p0, + unsigned long p1, unsigned long p2, unsigned long p3); +void write_fp_reg_d(unsigned long reg, unsigned long val_p0, + unsigned long p1, unsigned long p2, unsigned long p3); +#define LOW_64_WORKING 1 +#define HIGH_64_WORKING 2 + +/* + * End for sw64 + */ + +#define OPC_HMC 0x00 +#define OPC_INTA 0x10 +#define OPC_INTL 0x11 +#define OPC_INTS 0x12 +#define OPC_INTM 0x13 +#define OPC_FLTC 0x14 +#define OPC_FLTV 0x15 +#define OPC_FLTI 0x16 +#define OPC_FLTL 0x17 +#define OPC_MISC 0x18 +#define OPC_JSR 0x1a + +#define FOP_SRC_S 0 +#define FOP_SRC_T 2 +#define FOP_SRC_Q 3 + +#define FOP_FNC_ADDx 0 +#define FOP_FNC_CVTQL 0 +#define FOP_FNC_SUBx 1 +#define FOP_FNC_MULx 2 +#define FOP_FNC_DIVx 3 +#define FOP_FNC_CMPxUN 4 +#define FOP_FNC_CMPxEQ 5 +#define FOP_FNC_CMPxLT 6 +#define FOP_FNC_CMPxLE 7 +#define FOP_FNC_SQRTx 11 +#define FOP_FNC_CVTxS 12 +#define FOP_FNC_CVTxT 14 +#define FOP_FNC_CVTxQ 15 + +/* this is for sw64 added by fire3*/ +#define FOP_FNC_ADDS 0 +#define FOP_FNC_ADDD 1 +#define FOP_FNC_SUBS 2 +#define FOP_FNC_SUBD 3 +#define FOP_FNC_MULS 4 +#define FOP_FNC_MULD 5 +#define FOP_FNC_DIVS 6 +#define FOP_FNC_DIVD 7 +#define FOP_FNC_SQRTS 8 +#define FOP_FNC_SQRTD 9 + +#define FOP_FNC_CMPEQ 0x10 +#define FOP_FNC_CMPLE 0x11 +#define FOP_FNC_CMPLT 0x12 +#define FOP_FNC_CMPUN 0x13 + +#define FOP_FNC_CVTSD 0x20 +#define FOP_FNC_CVTDS 0x21 +#define FOP_FNC_CVTLS 0x2D +#define FOP_FNC_CVTLD 0x2F +#define FOP_FNC_CVTDL 0x27 +#define FOP_FNC_CVTDL_G 0x22 +#define FOP_FNC_CVTDL_P 0x23 +#define FOP_FNC_CVTDL_Z 0x24 +#define FOP_FNC_CVTDL_N 0x25 + +#define FOP_FNC_CVTWL 0x28 +#define FOP_FNC_CVTLW 0x29 + +/* fire3 added end */ + + +#define MISC_TRAPB 0x0000 +#define MISC_EXCB 0x0400 + +extern unsigned long sw64_read_fp_reg(unsigned long reg); +extern void sw64_write_fp_reg(unsigned long reg, unsigned long val); +extern unsigned long sw64_read_fp_reg_s(unsigned long reg); +extern void sw64_write_fp_reg_s(unsigned long reg, unsigned long val); + + +#ifdef MODULE + +MODULE_DESCRIPTION("FP Software completion module"); + +extern long (*sw64_fp_emul_imprecise)(struct pt_regs *regs, unsigned long write_mask); +extern long (*sw64_fp_emul)(unsigned long pc); + +static long (*save_emul_imprecise)(struct pt_regs *regs, unsigned long write_mask); +static long (*save_emul)(unsigned long pc); + +long do_sw_fp_emul_imprecise(struct pt_regs *regs, unsigned long write_mask); +long do_sw_fp_emul(unsigned long pc); + +int init_module(void) +{ + save_emul_imprecise = sw64_fp_emul_imprecise; + save_emul = sw64_fp_emul; + sw64_fp_emul_imprecise = do_sw_fp_emul_imprecise; + sw64_fp_emul = do_sw_fp_emul; + return 0; +} + +void cleanup_module(void) +{ + sw64_fp_emul_imprecise = save_emul_imprecise; + sw64_fp_emul = save_emul; +} + +#undef sw64_fp_emul_imprecise +#define sw64_fp_emul_imprecise do_sw_fp_emul_imprecise +#undef sw64_fp_emul +#define sw64_fp_emul do_sw_fp_emul + +#endif /* MODULE */ + + +/* + * Emulate the floating point instruction at address PC. Returns -1 if the + * instruction to be emulated is illegal (such as with the opDEC trap), else + * the SI_CODE for a SIGFPE signal, else 0 if everything's ok. + * + * Notice that the kernel does not and cannot use FP regs. This is good + * because it means that instead of saving/restoring all fp regs, we simply + * stick the result of the operation into the appropriate register. + */ +long sw64_fp_emul(unsigned long pc) +{ + FP_DECL_EX; + FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); + FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); + + unsigned long fa, fb, fc, func, mode, mode_bk, src; + unsigned long res, va, vb, vc, swcr, fpcr; + __u32 insn; + long si_code; + unsigned long opcode; + + get_user(insn, (__u32 *)pc); + opcode = (insn >> 26) & 0x3f; + fc = (insn >> 0) & 0x1f; /* destination register */ + fb = (insn >> 16) & 0x1f; + fa = (insn >> 21) & 0x1f; + func = (insn >> 5) & 0xff; + fpcr = rdfpcr(); + mode = (fpcr >> FPCR_DYN_SHIFT) & 0x3; + pr_debug("======= Entering Floating mathe emulation =====\n"); + pr_debug("Floating math emulation insn = %#lx, opcode=%d, func=%d\n", insn, opcode, func); + pr_debug("SW64 hardware fpcr = %#lx\n", fpcr); + swcr = swcr_update_status(current_thread_info()->ieee_state, fpcr); + pr_debug("SW64 software swcr = %#lx\n", swcr); + pr_debug("fa:%#lx,fb:%#lx,fc:%#lx,func:%#lx,mode:%#lx\n", fa, fb, fc, func, mode); + + if (opcode == OP_SIMD_NORMAL) { /* float simd math */ + if (func == FNC_VADDS || func == FNC_VSUBS || func == FNC_VSQRTS + || func == FNC_VMULS || func == FNC_VDIVS) + si_code = simd_fp_emul_s(pc); + if (func == FNC_VADDD || func == FNC_VSUBD || func == FNC_VSQRTD + || func == FNC_VMULD || func == FNC_VDIVD) + si_code = simd_fp_emul_d(pc); + if (func == FNC_VFCMPUN || func == FNC_VFCMPLT || func == FNC_VFCMPLE + || func == FNC_VFCMPEQ) + si_code = simd_cmp_emul_d(pc); + return si_code; + } + if (opcode == OP_SIMD_MUL_ADD) {/* simd mul and add */ + func = (insn >> 10) & 0x3f; + if (func == FNC_VMAS || func == FNC_VMSS || func == FNC_VNMAS + || func == FNC_VNMSS) { + si_code = simd_mul_add_fp_emul_s(pc); + return si_code; + } + + if (func == FNC_VMAD || func == FNC_VMSD || func == FNC_VNMAD + || func == FNC_VNMSD) { + si_code = simd_mul_add_fp_emul_d(pc); + return si_code; + } + func = (insn >> 5) & 0xff; + } + + if (opcode == OP_MUL_ADD) { + si_code = mul_add_fp_emul(pc); + return si_code; + } + switch (func) { + case FOP_FNC_SUBS: + va = sw64_read_fp_reg_s(fa); + vb = sw64_read_fp_reg_s(fb); + FP_UNPACK_SP(SA, &va); + FP_UNPACK_SP(SB, &vb); + FP_SUB_S(SR, SA, SB); + goto pack_s; + + case FOP_FNC_SUBD: + va = sw64_read_fp_reg(fa); + vb = sw64_read_fp_reg(fb); + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + FP_SUB_D(DR, DA, DB); + goto pack_d; + + case FOP_FNC_ADDS: + va = sw64_read_fp_reg_s(fa); + vb = sw64_read_fp_reg_s(fb); + FP_UNPACK_SP(SA, &va); + FP_UNPACK_SP(SB, &vb); + FP_ADD_S(SR, SA, SB); + goto pack_s; + + case FOP_FNC_ADDD: + va = sw64_read_fp_reg(fa); + vb = sw64_read_fp_reg(fb); + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + FP_ADD_D(DR, DA, DB); + goto pack_d; + + case FOP_FNC_MULS: + va = sw64_read_fp_reg_s(fa); + vb = sw64_read_fp_reg_s(fb); + FP_UNPACK_SP(SA, &va); + FP_UNPACK_SP(SB, &vb); + FP_MUL_S(SR, SA, SB); + goto pack_s; + + case FOP_FNC_MULD: + va = sw64_read_fp_reg(fa); + vb = sw64_read_fp_reg(fb); + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + FP_MUL_D(DR, DA, DB); + goto pack_d; + + case FOP_FNC_DIVS: + pr_debug("FOP_FNC_DIVS\n"); + va = sw64_read_fp_reg_s(fa); + vb = sw64_read_fp_reg_s(fb); + FP_UNPACK_SP(SA, &va); + FP_UNPACK_SP(SB, &vb); + FP_DIV_S(SR, SA, SB); + goto pack_s; + + case FOP_FNC_DIVD: + pr_debug("FOP_FNC_DIVD\n"); + va = sw64_read_fp_reg(fa); + vb = sw64_read_fp_reg(fb); + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + FP_DIV_D(DR, DA, DB); + goto pack_d; + + case FOP_FNC_SQRTS: + va = sw64_read_fp_reg_s(fa); + vb = sw64_read_fp_reg_s(fb); + FP_UNPACK_SP(SA, &va); + FP_UNPACK_SP(SB, &vb); + FP_SQRT_S(SR, SB); + goto pack_s; + case FOP_FNC_SQRTD: + va = sw64_read_fp_reg(fa); + vb = sw64_read_fp_reg(fb); + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + FP_SQRT_D(DR, DB); + goto pack_d; + } + + + va = sw64_read_fp_reg(fa); + vb = sw64_read_fp_reg(fb); + if ((func & ~0xf) == FOP_FNC_CMPEQ) { + va = sw64_read_fp_reg(fa); + vb = sw64_read_fp_reg(fb); + + FP_UNPACK_RAW_DP(DA, &va); + FP_UNPACK_RAW_DP(DB, &vb); + if (!DA_e && !_FP_FRAC_ZEROP_1(DA)) { + FP_SET_EXCEPTION(FP_EX_DENORM); + if (FP_DENORM_ZERO) + _FP_FRAC_SET_1(DA, _FP_ZEROFRAC_1); + } + if (!DB_e && !_FP_FRAC_ZEROP_1(DB)) { + FP_SET_EXCEPTION(FP_EX_DENORM); + if (FP_DENORM_ZERO) + _FP_FRAC_SET_1(DB, _FP_ZEROFRAC_1); + } + FP_CMP_D(res, DA, DB, 3); + vc = 0x4000000000000000; + /* CMPTEQ, CMPTUN don't trap on QNaN, while CMPTLT and CMPTLE do */ + if (res == 3 && (((func == FOP_FNC_CMPLT) || (func == FOP_FNC_CMPLE)) + || FP_ISSIGNAN_D(DA) || FP_ISSIGNAN_D(DB))) { + pr_debug("CMPLT CMPLE:func:%d, trap on QNaN.", func); + FP_SET_EXCEPTION(FP_EX_INVALID); + } + switch (func) { + case FOP_FNC_CMPUN: + if (res != 3) + vc = 0; + break; + case FOP_FNC_CMPEQ: + if (res) + vc = 0; + break; + case FOP_FNC_CMPLT: + if (res != -1) + vc = 0; + break; + case FOP_FNC_CMPLE: + if ((long)res > 0) + vc = 0; + break; + } + goto done_d; + } + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + + if (func == FOP_FNC_CVTSD) { + vb = sw64_read_fp_reg_s(fb); + FP_UNPACK_SP(SB, &vb); + DR_c = DB_c; + DR_s = DB_s; + DR_e = DB_e + (1024 - 128); + DR_f = SB_f << (52 - 23); + goto pack_d; + } + + if (func == FOP_FNC_CVTDS) { + FP_CONV(S, D, 1, 1, SR, DB); + goto pack_s; + } + + if (func == FOP_FNC_CVTDL || func == FOP_FNC_CVTDL_G || func == FOP_FNC_CVTDL_P + || func == FOP_FNC_CVTDL_Z || func == FOP_FNC_CVTDL_N) { + mode_bk = mode; + if (func == FOP_FNC_CVTDL_Z) + mode = 0x0UL; + else if (func == FOP_FNC_CVTDL_N) + mode = 0x1UL; + else if (func == FOP_FNC_CVTDL_G) + mode = 0x2UL; + else if (func == FOP_FNC_CVTDL_P) + mode = 0x3UL; + + if (DB_c == FP_CLS_NAN && (_FP_FRAC_HIGH_RAW_D(DB) & _FP_QNANBIT_D)) { + /* AAHB Table B-2 says QNaN should not trigger INV */ + vc = 0; + } else + FP_TO_INT_ROUND_D(vc, DB, 64, 2); + mode = mode_bk; + goto done_d; + } + + vb = sw64_read_fp_reg(fb); + + switch (func) { + case FOP_FNC_CVTLW: + /* + * Notice: We can get here only due to an integer + * overflow. Such overflows are reported as invalid + * ops. We return the result the hw would have + * computed. + */ + vc = ((vb & 0xc0000000) << 32 | /* sign and msb */ + (vb & 0x3fffffff) << 29); /* rest of the int */ + FP_SET_EXCEPTION(FP_EX_INVALID); + goto done_d; + + case FOP_FNC_CVTLS: + FP_FROM_INT_S(SR, ((long)vb), 64, long); + goto pack_s; + + case FOP_FNC_CVTLD: + FP_FROM_INT_D(DR, ((long)vb), 64, long); + goto pack_d; + } + goto bad_insn; + + +pack_s: + FP_PACK_SP(&vc, SR); + + if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) + vc = 0; + pr_debug("SW64 Emulation S-floating _fex=%#lx, va=%#lx, vb=%#lx, vc=%#lx\n", _fex, va, vb, vc); + pr_debug("SW64 Emulation S-floating mode=%#lx,func=%#lx, swcr=%#lx\n", mode, func, swcr); + sw64_write_fp_reg_s(fc, vc); + goto done; + +pack_d: + FP_PACK_DP(&vc, DR); + if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) + vc = 0; + pr_debug("SW64 Emulation D-floating _fex=%#lx, va=%#lx, vb=%#lx, vc=%#lx\n", _fex, va, vb, vc); + pr_debug("SW64 Emulation D-floating mode=%#lx,func=%#lx, swcr=%#lx\n", mode, func, swcr); +done_d: + sw64_write_fp_reg(fc, vc); + goto done; + + /* + * Take the appropriate action for each possible + * floating-point result: + * + * - Set the appropriate bits in the FPCR + * - If the specified exception is enabled in the FPCR, + * return. The caller (entArith) will dispatch + * the appropriate signal to the translated program. + * + * In addition, properly track the exception state in software + * as described in the SW64 Architecture Handbook section 4.7.7.3. + */ +done: + if (_fex) { + /* Record exceptions in software control word. */ + swcr |= (_fex << IEEE_STATUS_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (_fex << IEEE_STATUS_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr |= ieee_swcr_to_fpcr(swcr); + pr_debug("SW64 before write fpcr = %#lx\n", fpcr); + wrfpcr(fpcr); + + /* Do we generate a signal? */ + _fex = _fex & swcr & IEEE_TRAP_ENABLE_MASK; + si_code = 0; + if (_fex) { + if (_fex & IEEE_TRAP_ENABLE_DNO) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_INE) + si_code = FPE_FLTRES; + if (_fex & IEEE_TRAP_ENABLE_UNF) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_OVF) + si_code = FPE_FLTOVF; + if (_fex & IEEE_TRAP_ENABLE_DZE) + si_code = FPE_FLTDIV; + if (_fex & IEEE_TRAP_ENABLE_INV) + si_code = FPE_FLTINV; + } + + return si_code; + } + + /* + * We used to write the destination register here, but DEC FORTRAN + * requires that the result *always* be written... so we do the write + * immediately after the operations above. + */ + + return 0; + +bad_insn: + pr_err("%s: Invalid FP insn %#x at %#lx\n", __func__, insn, pc); + return -1; +} + +long sw64_fp_emul_imprecise(struct pt_regs *regs, unsigned long write_mask) +{ + unsigned long trigger_pc = regs->pc - 4; + unsigned long insn, opcode, rc, si_code = 0; + + + /* + * Turn off the bits corresponding to registers that are the + * target of instructions that set bits in the exception + * summary register. We have some slack doing this because a + * register that is the target of a trapping instruction can + * be written at most once in the trap shadow. + * + * Branches, jumps, TRAPBs, EXCBs and calls to HMcode all + * bound the trap shadow, so we need not look any further than + * up to the first occurrence of such an instruction. + */ + while (write_mask) { + get_user(insn, (__u32 *)(trigger_pc)); + opcode = insn >> 26; + rc = insn & 0x1f; + + switch (opcode) { + case OPC_HMC: + case OPC_JSR: + case 0x30 ... 0x3f: /* branches */ + goto egress; + + case OPC_MISC: + switch (insn & 0xffff) { + case MISC_TRAPB: + case MISC_EXCB: + goto egress; + + default: + break; + } + break; + + case OPC_INTA: + case OPC_INTL: + case OPC_INTS: + case OPC_INTM: + write_mask &= ~(1UL << rc); + break; + + case OPC_FLTC: + case OPC_FLTV: + case OPC_FLTI: + case OPC_FLTL: + write_mask &= ~(1UL << (rc + 32)); + break; + } + if (!write_mask) { + /* Re-execute insns in the trap-shadow. */ + regs->pc = trigger_pc + 4; + si_code = sw64_fp_emul(trigger_pc); + goto egress; + } + trigger_pc -= 4; + } + +egress: + return si_code; +} + +#define WORKING_PART_0 0 +#define WORKING_PART_1 1 +#define WORKING_PART_2 2 +#define WORKING_PART_3 3 + + +/* + * This is for sw64 + */ + +long simd_cmp_emul_d(unsigned long pc) +{ + FP_DECL_EX; + FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); FP_DECL_D(DC); + unsigned long fa, fb, fc, func, mode, src; + unsigned long res, va, vb, vc, swcr, fpcr; + __u32 insn; + long si_code; + + unsigned long va_p0, va_p1, va_p2, va_p3; + unsigned long vb_p0, vb_p1, vb_p2, vb_p3; + unsigned long vc_p0, vc_p1, vc_p2, vc_p3; + unsigned long fex_p0, fex_p1, fex_p2, fex_p3; + + int working_part; + + get_user(insn, (__u32 *)pc); + fc = (insn >> 0) & 0x1f; /* destination register */ + fb = (insn >> 16) & 0x1f; + fa = (insn >> 21) & 0x1f; + func = (insn >> 5) & 0xff; + fpcr = rdfpcr(); + mode = (fpcr >> FPCR_DYN_SHIFT) & 0x3; + + pr_debug("======== Entering SIMD floating-CMP math emulation =======\n"); + pr_debug("hardware fpcr = %#lx\n", fpcr); + swcr = swcr_update_status(current_thread_info()->ieee_state, fpcr); + pr_debug("software swcr = %#lx\n", swcr); + pr_debug("fa:%#lx,fb:%#lx,fc:%#lx,func:%#lx,mode:%#lx\n", fa, fb, fc, func, mode); + read_fp_reg_d(fa, &va_p0, &va_p1, &va_p2, &va_p3); + read_fp_reg_d(fb, &vb_p0, &vb_p1, &vb_p2, &vb_p3); + read_fp_reg_d(fc, &vc_p0, &vc_p1, &vc_p2, &vc_p3); + pr_debug("va_p0:%#lx, va_p1:%#lx, va_p2:%#lx, va_p3:%#lx\n", va_p0, va_p1, va_p2, va_p3); + pr_debug("vb_p0:%#lx, vb_p1:%#lx, vb_p2:%#lx, vb_p3:%#lx\n", vb_p0, vb_p1, vb_p2, vb_p3); + pr_debug("vc_p0:%#lx, vc_p1:%#lx, vc_p2:%#lx, vc_p3:%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + working_part = WORKING_PART_0; +simd_working: + _fex = 0; + switch (working_part) { + case WORKING_PART_0: + pr_debug("WORKING_PART_0\n"); + va = va_p0; + vb = vb_p0; + vc = vc_p0; + break; + case WORKING_PART_1: + pr_debug("WORKING_PART_1\n"); + va = va_p1; + vb = vb_p1; + vc = vc_p1; + break; + case WORKING_PART_2: + pr_debug("WORKING_PART_2\n"); + va = va_p2; + vb = vb_p2; + vc = vc_p2; + break; + case WORKING_PART_3: + pr_debug("WORKING_PART_3\n"); + va = va_p3; + vb = vb_p3; + vc = vc_p3; + break; + } + pr_debug("Before unpack va:%#lx, vb:%#lx\n", va, vb); + FP_UNPACK_RAW_DP(DA, &va); + FP_UNPACK_RAW_DP(DB, &vb); + pr_debug("DA_e:%d, _FP_FRAC_ZEROP_1(DA):%d\n", DA_e, _FP_FRAC_ZEROP_1(DA)); + pr_debug("DB_e:%d, _FP_FRAC_ZEROP_1(DB):%d\n", DA_e, _FP_FRAC_ZEROP_1(DA)); + pr_debug("DA iszero:%d, DB iszero:%d\n", ((!DA_e && _FP_FRAC_ZEROP_1(DA)) ? 1 : 0), + ((!DB_e && _FP_FRAC_ZEROP_1(DB)))); + if (!DA_e && !_FP_FRAC_ZEROP_1(DA)) { + FP_SET_EXCEPTION(FP_EX_DENORM); + if (FP_DENORM_ZERO) + _FP_FRAC_SET_1(DA, _FP_ZEROFRAC_1); + } + if (!DB_e && !_FP_FRAC_ZEROP_1(DB)) { + FP_SET_EXCEPTION(FP_EX_DENORM); + if (FP_DENORM_ZERO) + _FP_FRAC_SET_1(DB, _FP_ZEROFRAC_1); + } + FP_CMP_D(res, DA, DB, 3); + vc = 0x4000000000000000; + /* CMPTEQ, CMPTUN don't trap on QNaN, while CMPTLT and CMPTLE do */ + if (res == 3 && (((func == FOP_FNC_CMPLT) || (func == FOP_FNC_CMPLE)) + || FP_ISSIGNAN_D(DA) || FP_ISSIGNAN_D(DB))) { + pr_debug("CMPLT CMPLE:func:%d, trap on QNaN.", func); + FP_SET_EXCEPTION(FP_EX_INVALID); + } + pr_debug("res:%d\n", res); + switch (func) { + case FNC_VFCMPUN: + if (res != 3) + vc = 0; + break; + case FNC_VFCMPEQ: + if (res) + vc = 0; + break; + case FNC_VFCMPLT: + if (res != -1) + vc = 0; + break; + case FNC_VFCMPLE: + if ((long)res > 0) + vc = 0; + break; + } +next_working_s: + switch (working_part) { + case WORKING_PART_0: + working_part = WORKING_PART_1; + vc_p0 = vc; + fex_p0 = _fex; + goto simd_working; + case WORKING_PART_1: + working_part = WORKING_PART_2; + vc_p1 = vc; + fex_p1 = _fex; + goto simd_working; + case WORKING_PART_2: + working_part = WORKING_PART_3; + vc_p2 = vc; + fex_p2 = _fex; + goto simd_working; + case WORKING_PART_3: + vc_p3 = vc; + fex_p3 = _fex; + goto done; + } +done: + if (fex_p0 || fex_p1 || fex_p2 || fex_p3) { + unsigned long fpcr_p0, fpcr_p1, fpcr_p2, fpcr_p3; + unsigned long swcr_p0, swcr_p1, swcr_p2, swcr_p3; + + fpcr_p0 = fpcr_p1 = fpcr_p2 = fpcr_p3 = 0; + swcr_p0 = swcr_p1 = swcr_p2 = swcr_p3 = swcr; + /* manage fpcr_p0 */ + if (fex_p0) { + swcr_p0 |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p0 = fpcr; + fpcr_p0 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p0 |= ieee_swcr_to_fpcr(swcr_p0); + } + + if (fex_p1) { + swcr_p1 |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p1 = fpcr; + fpcr_p1 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p1 |= ieee_swcr_to_fpcr(swcr_p1); + } + + if (fex_p2) { + swcr_p2 |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p2 = fpcr; + fpcr_p2 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p2 |= ieee_swcr_to_fpcr(swcr_p2); + } + + if (fex_p3) { + swcr_p3 |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p3 = fpcr; + fpcr_p3 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p3 |= ieee_swcr_to_fpcr(swcr_p3); + } + + fpcr = fpcr_p0 | fpcr_p1 | fpcr_p2 | fpcr_p3; + pr_debug("fex_p0 = %#lx\n", fex_p0); + pr_debug("fex_p1 = %#lx\n", fex_p1); + pr_debug("fex_p2 = %#lx\n", fex_p2); + pr_debug("fex_p3 = %#lx\n", fex_p3); + pr_debug("SIMD emulation almost finished.before write fpcr = %#lx\n", fpcr); + wrfpcr(fpcr); + pr_debug("Before write fp: vc_p0=%#lx, vc_p1=%#lx, vc_p2=%#lx, vc_p3=%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + write_fp_reg_d(fc, vc_p0, vc_p1, vc_p2, vc_p3); + + /* Do we generate a signal? */ + _fex = (fex_p0 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p1 & swcr & IEEE_TRAP_ENABLE_MASK) + | (fex_p2 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p3 & swcr & IEEE_TRAP_ENABLE_MASK); + si_code = 0; + if (_fex) { + if (_fex & IEEE_TRAP_ENABLE_DNO) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_INE) + si_code = FPE_FLTRES; + if (_fex & IEEE_TRAP_ENABLE_UNF) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_OVF) + si_code = FPE_FLTOVF; + if (_fex & IEEE_TRAP_ENABLE_DZE) + si_code = FPE_FLTDIV; + if (_fex & IEEE_TRAP_ENABLE_INV) + si_code = FPE_FLTINV; + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return si_code; + + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return 0; + +bad_insn: + pr_err("%s: Invalid FP insn %#x at %#lx\n", __func__, insn, pc); + return -1; +} + + +long simd_fp_emul_d(unsigned long pc) +{ + FP_DECL_EX; + FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); FP_DECL_D(DC); + unsigned long fa, fb, fc, func, mode, src; + unsigned long res, va, vb, vc, swcr, fpcr; + __u32 insn; + long si_code; + + unsigned long va_p0, va_p1, va_p2, va_p3; + unsigned long vb_p0, vb_p1, vb_p2, vb_p3; + unsigned long vc_p0, vc_p1, vc_p2, vc_p3; + unsigned long fex_p0, fex_p1, fex_p2, fex_p3; + + int working_part; + + get_user(insn, (__u32 *)pc); + fc = (insn >> 0) & 0x1f; /* destination register */ + fb = (insn >> 16) & 0x1f; + fa = (insn >> 21) & 0x1f; + func = (insn >> 5) & 0xff; + fpcr = rdfpcr(); + mode = (fpcr >> FPCR_DYN_SHIFT) & 0x3; + + pr_debug("======== Entering SIMD D-floating math emulation =======\n"); + pr_debug("hardware fpcr = %#lx\n", fpcr); + swcr = swcr_update_status(current_thread_info()->ieee_state, fpcr); + pr_debug("software swcr = %#lx\n", swcr); + pr_debug("fa:%#lx,fb:%#lx,fc:%#lx,func:%#lx,mode:%#lx\n", fa, fb, fc, func, mode); + read_fp_reg_d(fa, &va_p0, &va_p1, &va_p2, &va_p3); + read_fp_reg_d(fb, &vb_p0, &vb_p1, &vb_p2, &vb_p3); + read_fp_reg_d(fc, &vc_p0, &vc_p1, &vc_p2, &vc_p3); + pr_debug("va_p0:%#lx, va_p1:%#lx, va_p2:%#lx, va_p3:%#lx\n", va_p0, va_p1, va_p2, va_p3); + pr_debug("vb_p0:%#lx, vb_p1:%#lx, vb_p2:%#lx, vb_p3:%#lx\n", vb_p0, vb_p1, vb_p2, vb_p3); + pr_debug("vc_p0:%#lx, vc_p1:%#lx, vc_p2:%#lx, vc_p3:%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + working_part = WORKING_PART_0; +simd_working: + _fex = 0; + switch (working_part) { + case WORKING_PART_0: + pr_debug("WORKING_PART_0\n"); + va = va_p0; + vb = vb_p0; + vc = vc_p0; + if ((fpcr & FPCR_STATUS_MASK0) == 0) { + SW64_FP_NORMAL_D(DA, &va); + SW64_FP_NORMAL_D(DB, &vb); + if ((DA_c == SW64_FP_NORMAL) && (DB_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("LOW: DA_c = %#lx, DB_c = %#lx\n", DA_c, DB_c); + } else { + SW64_FP_NAN_D(DA, &va); + SW64_FP_NAN_D(DB, &vb); + if (((DA_c == SW64_FP_NAN) || (DB_c == SW64_FP_NAN))) + goto next_working_s; + } + break; + case WORKING_PART_1: + pr_debug("WORKING_PART_1\n"); + va = va_p1; + vb = vb_p1; + vc = vc_p1; + if ((fpcr & FPCR_STATUS_MASK1) == 0) { + SW64_FP_NORMAL_D(DA, &va); + SW64_FP_NORMAL_D(DB, &vb); + if ((DA_c == SW64_FP_NORMAL) && (DB_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: DA_c = %#lx, DB_c = %#lx\n", DA_c, DB_c); + } else { + SW64_FP_NAN_D(DA, &va); + SW64_FP_NAN_D(DB, &vb); + if (((DA_c == SW64_FP_NAN) || (DB_c == SW64_FP_NAN))) + goto next_working_s; + } + + break; + case WORKING_PART_2: + pr_debug("WORKING_PART_2\n"); + va = va_p2; + vb = vb_p2; + vc = vc_p2; + if ((fpcr & FPCR_STATUS_MASK2) == 0) { + SW64_FP_NORMAL_D(DA, &va); + SW64_FP_NORMAL_D(DB, &vb); + if ((DA_c == SW64_FP_NORMAL) && (DB_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: DA_c = %#lx, DB_c = %#lx\n", DA_c, DB_c); + } else { + SW64_FP_NAN_D(DA, &va); + SW64_FP_NAN_D(DB, &vb); + if (((DA_c == SW64_FP_NAN) || (DB_c == SW64_FP_NAN))) + goto next_working_s; + } + break; + case WORKING_PART_3: + pr_debug("WORKING_PART_3\n"); + va = va_p3; + vb = vb_p3; + vc = vc_p3; + if ((fpcr & FPCR_STATUS_MASK3) == 0) { + SW64_FP_NORMAL_D(DA, &va); + SW64_FP_NORMAL_D(DB, &vb); + if ((DA_c == SW64_FP_NORMAL) && (DB_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: DA_c = %#lx, DB_c = %#lx\n", DA_c, DB_c); + } else { + SW64_FP_NAN_D(DA, &va); + SW64_FP_NAN_D(DB, &vb); + if (((DA_c == SW64_FP_NAN) || (DB_c == SW64_FP_NAN))) + goto next_working_s; + } + break; + } + + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + + switch (func) { + case FNC_VSUBD: + pr_debug("FNC_VSUBD\n"); + FP_SUB_D(DR, DA, DB); + goto pack_d; + case FNC_VMULD: + pr_debug("FNC_VMULD\n"); + FP_MUL_D(DR, DA, DB); + goto pack_d; + case FNC_VADDD: + pr_debug("FNC_VADDD\n"); + FP_ADD_D(DR, DA, DB); + goto pack_d; + case FNC_VDIVD: + pr_debug("FNC_VDIVD\n"); + FP_DIV_D(DR, DA, DB); + goto pack_d; + case FNC_VSQRTD: + pr_debug("FNC_VSQRTD\n"); + FP_SQRT_D(DR, DB); + goto pack_d; + } +pack_d: + FP_PACK_DP(&vc, DR); + if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) { + pr_debug("pack_d, vc=0 !!!!\n"); + vc = 0; + } + + pr_debug("SW64 SIMD Emulation D-floating _fex=%#lx, va=%#lx, vb=%#lx, vc=%#lx\n", _fex, va, vb, vc); + pr_debug("SW64 SIMD Emulation D-floating mode=%#lx,func=%#lx, swcr=%#lx\n", mode, func, swcr); +next_working_s: + switch (working_part) { + case WORKING_PART_0: + working_part = WORKING_PART_1; + vc_p0 = vc; + fex_p0 = _fex; + goto simd_working; + case WORKING_PART_1: + working_part = WORKING_PART_2; + vc_p1 = vc; + fex_p1 = _fex; + goto simd_working; + case WORKING_PART_2: + working_part = WORKING_PART_3; + vc_p2 = vc; + fex_p2 = _fex; + goto simd_working; + case WORKING_PART_3: + vc_p3 = vc; + fex_p3 = _fex; + goto done; + } +done: + if (fex_p0 || fex_p1 || fex_p2 || fex_p3) { + unsigned long fpcr_p0, fpcr_p1, fpcr_p2, fpcr_p3; + unsigned long swcr_p0, swcr_p1, swcr_p2, swcr_p3; + + fpcr_p0 = fpcr_p1 = fpcr_p2 = fpcr_p3 = 0; + swcr_p0 = swcr_p1 = swcr_p2 = swcr_p3 = swcr; + /* manage fpcr_p0 */ + if (fex_p0) { + swcr_p0 |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p0 = fpcr; + fpcr_p0 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p0 |= ieee_swcr_to_fpcr(swcr_p0); + } + + if (fex_p1) { + swcr_p1 |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p1 = fpcr; + fpcr_p1 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p1 |= ieee_swcr_to_fpcr(swcr_p1); + } + + if (fex_p2) { + swcr_p2 |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p2 = fpcr; + fpcr_p2 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p2 |= ieee_swcr_to_fpcr(swcr_p2); + } + + if (fex_p3) { + swcr_p3 |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p3 = fpcr; + fpcr_p3 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p3 |= ieee_swcr_to_fpcr(swcr_p3); + } + + fpcr = fpcr_p0 | fpcr_p1 | fpcr_p2 | fpcr_p3; + pr_debug("fex_p0 = %#lx\n", fex_p0); + pr_debug("fex_p1 = %#lx\n", fex_p1); + pr_debug("fex_p2 = %#lx\n", fex_p2); + pr_debug("fex_p3 = %#lx\n", fex_p3); + pr_debug("SIMD emulation almost finished.before write fpcr = %#lx\n", fpcr); + wrfpcr(fpcr); + pr_debug("Before write fp: vp_p0=%#lx, vc_p1=%#lx, vc_p2=%#lx, vc_p3=%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + write_fp_reg_d(fc, vc_p0, vc_p1, vc_p2, vc_p3); + + /* Do we generate a signal? */ + _fex = (fex_p0 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p1 & swcr & IEEE_TRAP_ENABLE_MASK) + | (fex_p2 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p3 & swcr & IEEE_TRAP_ENABLE_MASK); + si_code = 0; + if (_fex) { + if (_fex & IEEE_TRAP_ENABLE_DNO) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_INE) + si_code = FPE_FLTRES; + if (_fex & IEEE_TRAP_ENABLE_UNF) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_OVF) + si_code = FPE_FLTOVF; + if (_fex & IEEE_TRAP_ENABLE_DZE) + si_code = FPE_FLTDIV; + if (_fex & IEEE_TRAP_ENABLE_INV) + si_code = FPE_FLTINV; + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return si_code; + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return 0; + +bad_insn: + pr_err("%s: Invalid FP insn %#x at %#lx\n", __func__, insn, pc); + return -1; +} + +long simd_fp_emul_s(unsigned long pc) +{ + FP_DECL_EX; + FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); + + unsigned long fa, fb, fc, func, mode, src; + unsigned long res, va, vb, vc, swcr, fpcr; + __u32 insn; + long si_code; + + unsigned long va_p0, va_p1, va_p2, va_p3; + unsigned long vb_p0, vb_p1, vb_p2, vb_p3; + unsigned long vc_p0, vc_p1, vc_p2, vc_p3; + unsigned long fex_p0, fex_p1, fex_p2, fex_p3; + + int working_part; + + get_user(insn, (__u32 *)pc); + fc = (insn >> 0) & 0x1f; /* destination register */ + fb = (insn >> 16) & 0x1f; + fa = (insn >> 21) & 0x1f; + func = (insn >> 5) & 0xff; + fpcr = rdfpcr(); + mode = (fpcr >> FPCR_DYN_SHIFT) & 0x3; + + pr_debug("======== Entering SIMD S-floating math emulation =======\n"); + pr_debug("hardware fpcr = %#lx\n", fpcr); + swcr = swcr_update_status(current_thread_info()->ieee_state, fpcr); + pr_debug("software swcr = %#lx\n", swcr); + pr_debug("fa:%#lx,fb:%#lx,fc:%#lx,func:%#lx,mode:%#lx\n", fa, fb, fc, func, mode); + read_fp_reg_s(fa, &va_p0, &va_p1, &va_p2, &va_p3); + read_fp_reg_s(fb, &vb_p0, &vb_p1, &vb_p2, &vb_p3); + read_fp_reg_s(fc, &vc_p0, &vc_p1, &vc_p2, &vc_p3); + pr_debug("va_p0:%#lx, va_p1:%#lx, va_p2:%#lx, va_p3:%#lx\n", va_p0, va_p1, va_p2, va_p3); + pr_debug("vb_p0:%#lx, vb_p1:%#lx, vb_p2:%#lx, vb_p3:%#lx\n", vb_p0, vb_p1, vb_p2, vb_p3); + pr_debug("vc_p0:%#lx, vc_p1:%#lx, vc_p2:%#lx, vc_p3:%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + working_part = WORKING_PART_0; +simd_working: + _fex = 0; + switch (working_part) { + case WORKING_PART_0: + pr_debug("WORKING_PART_0\n"); + va = va_p0; + vb = vb_p0; + vc = vc_p0; + if ((fpcr & FPCR_STATUS_MASK0) == 0) { + SW64_FP_NORMAL_S(SA, &va); + SW64_FP_NORMAL_S(SB, &vb); + if ((SA_c == SW64_FP_NORMAL) && (SB_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("PART0: SA_c = %#lx, SB_c = %#lx\n", SA_c, SB_c); + } else { + SW64_FP_NAN_S(SA, &va); + SW64_FP_NAN_S(SB, &vb); + if ((SA_c == SW64_FP_NAN) && (SB_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_1: + pr_debug("WORKING_PART_1\n"); + va = va_p1; + vb = vb_p1; + vc = vc_p1; + if ((fpcr & FPCR_STATUS_MASK1) == 0) { + SW64_FP_NORMAL_S(SA, &va); + SW64_FP_NORMAL_S(SB, &vb); + if ((SA_c == SW64_FP_NORMAL) && (SB_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("PART1: SA_c = %#lx, SB_c = %#lx\n", SA_c, SB_c); + } else { + SW64_FP_NAN_S(SA, &va); + SW64_FP_NAN_S(SB, &vb); + if ((SA_c == SW64_FP_NAN) && (SB_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_2: + pr_debug("WORKING_PART_2\n"); + va = va_p2; + vb = vb_p2; + vc = vc_p2; + if ((fpcr & FPCR_STATUS_MASK2) == 0) { + SW64_FP_NORMAL_S(SA, &va); + SW64_FP_NORMAL_S(SB, &vb); + if ((SA_c == SW64_FP_NORMAL) && (SB_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("PART2: SA_c = %#lx, SB_c = %#lx\n", SA_c, SB_c); + } else { + SW64_FP_NAN_S(SA, &va); + SW64_FP_NAN_S(SB, &vb); + if ((SA_c == SW64_FP_NAN) && (SB_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_3: + pr_debug("WORKING_PART_3\n"); + va = va_p3; + vb = vb_p3; + vc = vc_p3; + if ((fpcr & FPCR_STATUS_MASK3) == 0) { + SW64_FP_NORMAL_S(SA, &va); + SW64_FP_NORMAL_S(SB, &vb); + if ((SA_c == SW64_FP_NORMAL) && (SB_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("PART3: SA_c = %#lx, SB_c = %#lx\n", SA_c, SB_c); + } else { + SW64_FP_NAN_S(SA, &va); + SW64_FP_NAN_S(SB, &vb); + if ((SA_c == SW64_FP_NAN) && (SB_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + + } + + FP_UNPACK_SP(SA, &va); + FP_UNPACK_SP(SB, &vb); + + switch (func) { + case FNC_VSUBS: + pr_debug("FNC_VSUBS\n"); + FP_SUB_S(SR, SA, SB); + goto pack_s; + case FNC_VMULS: + pr_debug("FNC_VMULS\n"); + FP_MUL_S(SR, SA, SB); + goto pack_s; + case FNC_VADDS: + pr_debug("FNC_VADDS\n"); + FP_ADD_S(SR, SA, SB); + goto pack_s; + case FNC_VDIVS: + pr_debug("FNC_VDIVS\n"); + FP_DIV_S(SR, SA, SB); + goto pack_s; + case FNC_VSQRTS: + pr_debug("FNC_VSQRTS\n"); + FP_SQRT_S(SR, SB); + goto pack_s; + } +pack_s: + FP_PACK_SP(&vc, SR); + if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) { + pr_debug("pack_s, vc=0 !!!!\n"); + vc = 0; + } + + pr_debug("SW64 SIMD Emulation S-floating _fex=%#lx, va=%#lx, vb=%#lx, vc=%#lx\n", _fex, va, vb, vc); + pr_debug("SW64 SIMD Emulation S-floating mode=%#lx,func=%#lx, swcr=%#lx\n", mode, func, swcr); +next_working_s: + switch (working_part) { + case WORKING_PART_0: + working_part = WORKING_PART_1; + vc_p0 = vc; + fex_p0 = _fex; + goto simd_working; + case WORKING_PART_1: + working_part = WORKING_PART_2; + vc_p1 = vc; + fex_p1 = _fex; + goto simd_working; + case WORKING_PART_2: + working_part = WORKING_PART_3; + vc_p2 = vc; + fex_p2 = _fex; + goto simd_working; + case WORKING_PART_3: + vc_p3 = vc; + fex_p3 = _fex; + goto done; + } +done: + if (fex_p0 || fex_p1 || fex_p2 || fex_p3) { + unsigned long fpcr_p0, fpcr_p1, fpcr_p2, fpcr_p3; + unsigned long swcr_p0, swcr_p1, swcr_p2, swcr_p3; + + fpcr_p0 = fpcr_p1 = fpcr_p2 = fpcr_p3 = 0; + swcr_p0 = swcr_p1 = swcr_p2 = swcr_p3 = swcr; + /* manage fpcr_p0 */ + if (fex_p0) { + swcr_p0 |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p0 = fpcr; + fpcr_p0 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p0 |= ieee_swcr_to_fpcr(swcr_p0); + pr_debug("fex_p0: fpcr_p0:%#lx\n", fpcr_p0); + } + + if (fex_p1) { + swcr_p1 |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p1 = fpcr; + fpcr_p1 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p1 |= ieee_swcr_to_fpcr(swcr_p1); + pr_debug("fex_p1: fpcr_p1:%#lx\n", fpcr_p1); + } + + if (fex_p2) { + swcr_p2 |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p2 = fpcr; + fpcr_p2 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p2 |= ieee_swcr_to_fpcr(swcr_p2); + pr_debug("fex_p2: fpcr_p2:%#lx\n", fpcr_p2); + } + + if (fex_p3) { + swcr_p3 |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p3 = fpcr; + fpcr_p3 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p3 |= ieee_swcr_to_fpcr(swcr_p3); + pr_debug("fex_p3: fpcr_p3:%#lx\n", fpcr_p3); + } + + fpcr = fpcr_p0 | fpcr_p1 | fpcr_p2 | fpcr_p3; + pr_debug("fex_p0 = %#lx\n", fex_p0); + pr_debug("fex_p1 = %#lx\n", fex_p1); + pr_debug("fex_p2 = %#lx\n", fex_p2); + pr_debug("fex_p3 = %#lx\n", fex_p3); + pr_debug("SIMD emulation almost finished.before write fpcr = %#lx\n", fpcr); + wrfpcr(fpcr); + + pr_debug("Before write fp: vc_p0=%#lx, vc_p1=%#lx, vc_p2=%#lx, vc_p3=%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + write_fp_reg_s(fc, vc_p0, vc_p1, vc_p2, vc_p3); + + /* Do we generate a signal? */ + _fex = (fex_p0 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p1 & swcr & IEEE_TRAP_ENABLE_MASK) + | (fex_p2 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p3 & swcr & IEEE_TRAP_ENABLE_MASK); + si_code = 0; + if (_fex) { + if (_fex & IEEE_TRAP_ENABLE_DNO) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_INE) + si_code = FPE_FLTRES; + if (_fex & IEEE_TRAP_ENABLE_UNF) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_OVF) + si_code = FPE_FLTOVF; + if (_fex & IEEE_TRAP_ENABLE_DZE) + si_code = FPE_FLTDIV; + if (_fex & IEEE_TRAP_ENABLE_INV) + si_code = FPE_FLTINV; + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return si_code; + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return 0; + +bad_insn: + pr_err("%s: Invalid FP insn %#x at %#lx\n", __func__, insn, pc); + return -1; + +} + +static inline unsigned long negative_value(unsigned long va) +{ + return (va ^ 0x8000000000000000UL); +} + +static inline unsigned long s_negative_value(unsigned long va) +{ + return (va ^ 0x80000000UL); +} + +/* + * sw64 mul-add floating emulation + */ +long mul_add_fp_emul(unsigned long pc) +{ + FP_DECL_EX; + FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(S_TMP); FP_DECL_S(SR); + FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(D_TMP); FP_DECL_D(DR); + FP_DECL_S(S_ZERO); + FP_DECL_D(D_ZERO); + FP_DECL_S(S_TMP2); + FP_DECL_D(D_TMP2); + + unsigned long fa, fb, fc, fd, func, mode, src; + unsigned long res, va, vb, vc, vd, vtmp, vtmp2, swcr, fpcr; + __u32 insn; + long si_code; + unsigned long vzero = 0; + + get_user(insn, (__u32 *)pc); + fd = (insn >> 0) & 0x1f; /* destination register */ + fc = (insn >> 5) & 0x1f; + fb = (insn >> 16) & 0x1f; + fa = (insn >> 21) & 0x1f; + func = (insn >> 10) & 0x3f; + + fpcr = rdfpcr(); + mode = (fpcr >> FPCR_DYN_SHIFT) & 0x3; + + pr_debug("===== Entering SW64 MUL-ADD Emulation =====\n"); + pr_debug("hardware fpcr = %#lx\n", fpcr); + swcr = swcr_update_status(current_thread_info()->ieee_state, fpcr); + pr_debug("software swcr = %#lx\n", swcr); + + if (func == FNC_FMAS || func == FNC_FMSS || func == FNC_FNMAS || func == FNC_FNMSS) { + va = sw64_read_fp_reg_s(fa); + vb = sw64_read_fp_reg_s(fb); + vc = sw64_read_fp_reg_s(fc); + FP_UNPACK_SP(SA, &va); + FP_UNPACK_SP(SB, &vb); + FP_UNPACK_SP(SC, &vc); + FP_UNPACK_SP(S_ZERO, &vzero); + } + if (func == FNC_FMAD || func == FNC_FMSD || func == FNC_FNMAD || func == FNC_FNMSD) { + va = sw64_read_fp_reg(fa); + vb = sw64_read_fp_reg(fb); + vc = sw64_read_fp_reg(fc); + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + FP_UNPACK_DP(DC, &vc); + FP_UNPACK_DP(D_ZERO, &vzero); + } + pr_debug("va = %#lx, vb = %#lx, vc = %#lx\n", va, vb, vc); + switch (func) { + case FNC_FMAS: + FP_MUL_S(S_TMP, SA, SB); + FP_ADD_S(SR, S_TMP, SC); + goto pack_s; + case FNC_FMSS: + FP_MUL_S(S_TMP, SA, SB); + FP_SUB_S(SR, S_TMP, SC); + goto pack_s; + case FNC_FNMAS: /* (-va*vb) + vc */ + va = s_negative_value(va); + FP_UNPACK_SP(SA, &va); + FP_MUL_S(S_TMP, SA, SB); + FP_ADD_S(SR, S_TMP, SC); + goto pack_s; + case FNC_FNMSS: /* (-va*vb) - vc */ + va = s_negative_value(va); + FP_UNPACK_SP(SA, &va); + FP_MUL_S(S_TMP, SA, SB); + FP_SUB_S(SR, S_TMP, SC); + goto pack_s; + case FNC_FMAD: + FP_MUL_D(D_TMP, DA, DB); + FP_ADD_D(DR, D_TMP, DC); + goto pack_d; + case FNC_FMSD: + FP_MUL_D(D_TMP, DA, DB); + FP_SUB_D(DR, D_TMP, DC); + goto pack_d; + case FNC_FNMAD: + va = negative_value(va); + FP_UNPACK_DP(DA, &va); + FP_MUL_D(D_TMP, DA, DB); + FP_ADD_D(DR, D_TMP, DC); + goto pack_d; + case FNC_FNMSD: + va = negative_value(va); + FP_UNPACK_DP(DA, &va); + FP_MUL_D(D_TMP, DA, DB); + FP_SUB_D(DR, D_TMP, DC); + goto pack_d; + default: + goto bad_insn; + + } +pack_s: + FP_PACK_SP(&vd, SR); + if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) + vd = 0; + sw64_write_fp_reg_s(fd, vd); + goto done; + +pack_d: + FP_PACK_DP(&vd, DR); + if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) + vd = 0; + sw64_write_fp_reg(fd, vd); + +done: + pr_debug("vd = %#lx\n", vd); + if (_fex) { + /* Record exceptions in software control word. */ + swcr |= (_fex << IEEE_STATUS_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (_fex << IEEE_STATUS_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr |= ieee_swcr_to_fpcr(swcr); + wrfpcr(fpcr); /** wrfpcr will destroy vector register! */ + if (func == FNC_FMAS || func == FNC_FMSS || func == FNC_FNMAS || func == FNC_FNMSS) + sw64_write_fp_reg_s(fd, vd); + if (func == FNC_FMAD || func == FNC_FMSD || func == FNC_FNMAD || func == FNC_FNMSD) + sw64_write_fp_reg(fd, vd); + + /* Do we generate a signal? */ + _fex = _fex & swcr & IEEE_TRAP_ENABLE_MASK; + si_code = 0; + if (_fex) { + if (_fex & IEEE_TRAP_ENABLE_DNO) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_INE) + si_code = FPE_FLTRES; + if (_fex & IEEE_TRAP_ENABLE_UNF) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_OVF) + si_code = FPE_FLTOVF; + if (_fex & IEEE_TRAP_ENABLE_DZE) + si_code = FPE_FLTDIV; + if (_fex & IEEE_TRAP_ENABLE_INV) + si_code = FPE_FLTINV; + } + + return si_code; + } + + /* + * We used to write the destination register here, but DEC FORTRAN + * requires that the result *always* be written... so we do the write + * immediately after the operations above. + */ + + return 0; + +bad_insn: + pr_err("%s: Invalid FP insn %#x at %#lx\n", __func__, insn, pc); + return -1; +} + + +long simd_mul_add_fp_emul_s(unsigned long pc) +{ + FP_DECL_EX; + FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(S_TMP); FP_DECL_S(SR); + FP_DECL_S(S_ZERO); + FP_DECL_S(S_TMP2); + + unsigned long fa, fb, fc, fd, func, mode, src; + unsigned long res, va, vb, vc, vd, vtmp, vtmp2, swcr, fpcr; + __u32 insn; + long si_code; + unsigned long vzero = 0; + + get_user(insn, (__u32 *)pc); + fd = (insn >> 0) & 0x1f; /* destination register */ + fc = (insn >> 5) & 0x1f; + fb = (insn >> 16) & 0x1f; + fa = (insn >> 21) & 0x1f; + func = (insn >> 10) & 0x3f; + + fpcr = rdfpcr(); + mode = (fpcr >> FPCR_DYN_SHIFT) & 0x3; + + unsigned long va_p0, va_p1, va_p2, va_p3; + unsigned long vb_p0, vb_p1, vb_p2, vb_p3; + unsigned long vc_p0, vc_p1, vc_p2, vc_p3; + unsigned long vd_p0, vd_p1, vd_p2, vd_p3; + unsigned long fex_p0, fex_p1, fex_p2, fex_p3; + + int working_part; + + pr_debug("======== Entering SIMD S-floating mul-add emulation =======\n"); + swcr = swcr_update_status(current_thread_info()->ieee_state, fpcr); + pr_debug("software swcr = %#lx\n", swcr); + pr_debug("hardware fpcr = %#lx\n", fpcr); + read_fp_reg_s(fa, &va_p0, &va_p1, &va_p2, &va_p3); + read_fp_reg_s(fb, &vb_p0, &vb_p1, &vb_p2, &vb_p3); + read_fp_reg_s(fc, &vc_p0, &vc_p1, &vc_p2, &vc_p3); + read_fp_reg_s(fd, &vd_p0, &vd_p1, &vd_p2, &vd_p3); + pr_debug("va_p0:%#lx, va_p1:%#lx, va_p2:%#lx, va_p3:%#lx\n", va_p0, va_p1, va_p2, va_p3); + pr_debug("vb_p0:%#lx, vb_p1:%#lx, vb_p2:%#lx, vb_p3:%#lx\n", vb_p0, vb_p1, vb_p2, vb_p3); + pr_debug("vc_p0:%#lx, vc_p1:%#lx, vc_p2:%#lx, vc_p3:%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + pr_debug("vd_p0:%#lx, vd_p1:%#lx, vd_p2:%#lx, vd_p3:%#lx\n", vd_p0, vd_p1, vd_p2, vd_p3); + working_part = WORKING_PART_0; +simd_working: + _fex = 0; + switch (working_part) { + case WORKING_PART_0: + pr_debug("WORKING_PART_0\n"); + va = va_p0; + vb = vb_p0; + vc = vc_p0; + pr_debug("FPCR_STATUS_MASK0 : %#lx, fpcr :%#lx\n", FPCR_STATUS_MASK0, fpcr); + if ((fpcr & FPCR_STATUS_MASK0) == 0) { + SW64_FP_NORMAL_S(SA, &va); + SW64_FP_NORMAL_S(SB, &vb); + SW64_FP_NORMAL_S(SC, &vc); + if ((SA_c == SW64_FP_NORMAL) && (SB_c == SW64_FP_NORMAL) && (SC_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("LOW: SA_c = %#lx, SB_c = %#lx\n", SA_c, SB_c); + } else { + SW64_FP_NAN_S(SA, &va); + SW64_FP_NAN_S(SB, &vb); + if ((SA_c == SW64_FP_NAN) && (SB_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_1: + pr_debug("WORKING_PART_1\n"); + va = va_p1; + vb = vb_p1; + vc = vc_p1; + pr_debug("FPCR_STATUS_MASK1 : %#lx, fpcr :%#lx\n", FPCR_STATUS_MASK0, fpcr); + if ((fpcr & FPCR_STATUS_MASK1) == 0) { + SW64_FP_NORMAL_S(SA, &va); + SW64_FP_NORMAL_S(SB, &vb); + SW64_FP_NORMAL_S(SC, &vc); + if ((SA_c == SW64_FP_NORMAL) && (SB_c == SW64_FP_NORMAL) && (SC_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: SA_c = %#lx, SB_c = %#lx\n", SA_c, SB_c); + } else { + SW64_FP_NAN_S(SA, &va); + SW64_FP_NAN_S(SB, &vb); + if ((SA_c == SW64_FP_NAN) && (SB_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_2: + pr_debug("WORKING_PART_2\n"); + va = va_p2; + vb = vb_p2; + vc = vc_p2; + if ((fpcr & FPCR_STATUS_MASK2) == 0) { + SW64_FP_NORMAL_S(SA, &va); + SW64_FP_NORMAL_S(SB, &vb); + SW64_FP_NORMAL_S(SC, &vc); + if ((SA_c == SW64_FP_NORMAL) && (SB_c == SW64_FP_NORMAL) && (SC_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: SA_c = %#lx, SB_c = %#lx\n", SA_c, SB_c); + } else { + SW64_FP_NAN_S(SA, &va); + SW64_FP_NAN_S(SB, &vb); + if ((SA_c == SW64_FP_NAN) && (SB_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_3: + pr_debug("WORKING_PART_3\n"); + va = va_p3; + vb = vb_p3; + vc = vc_p3; + if ((fpcr & FPCR_STATUS_MASK3) == 0) { + SW64_FP_NORMAL_S(SA, &va); + SW64_FP_NORMAL_S(SB, &vb); + SW64_FP_NORMAL_S(SC, &vc); + if ((SA_c == SW64_FP_NORMAL) && (SB_c == SW64_FP_NORMAL) && (SC_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: SA_c = %#lx, SB_c = %#lx\n", SA_c, SB_c); + } else { + SW64_FP_NAN_S(SA, &va); + SW64_FP_NAN_S(SB, &vb); + if ((SA_c == SW64_FP_NAN) && (SB_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + } + + FP_UNPACK_SP(SA, &va); + FP_UNPACK_SP(SB, &vb); + FP_UNPACK_SP(SC, &vc); + FP_UNPACK_SP(S_ZERO, &vzero); + switch (func) { + case FNC_FMAS: + FP_MUL_S(S_TMP, SA, SB); + FP_ADD_S(SR, S_TMP, SC); + goto pack_s; + case FNC_FMSS: + FP_MUL_S(S_TMP, SA, SB); + FP_SUB_S(SR, S_TMP, SC); + goto pack_s; + case FNC_FNMAS: /* (-va*vb) + vc */ + va = s_negative_value(va); + FP_UNPACK_SP(SA, &va); + FP_MUL_S(S_TMP, SA, SB); + FP_ADD_S(SR, S_TMP, SC); + goto pack_s; + case FNC_FNMSS: /* (-va*vb) - vc */ + va = s_negative_value(va); + FP_UNPACK_SP(SA, &va); + FP_MUL_S(S_TMP, SA, SB); + FP_SUB_S(SR, S_TMP, SC); + goto pack_s; + default: + goto bad_insn; + } + +pack_s: + FP_PACK_SP(&vd, SR); + if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) + vd = 0; + pr_debug("SW64 SIMD Emulation S-floating _fex=%#lx, va=%#lx, vb=%#lx, vc=%#lx\n", _fex, va, vb, vc); + pr_debug("SW64 SIMD Emulation S-floating mode=%#lx,func=%#lx, swcr=%#lx\n", mode, func, swcr); +next_working_s: + switch (working_part) { + case WORKING_PART_0: + working_part = WORKING_PART_1; + vd_p0 = vd; + fex_p0 = _fex; + goto simd_working; + case WORKING_PART_1: + working_part = WORKING_PART_2; + vd_p1 = vd; + fex_p1 = _fex; + goto simd_working; + case WORKING_PART_2: + working_part = WORKING_PART_3; + vd_p2 = vd; + fex_p2 = _fex; + goto simd_working; + case WORKING_PART_3: + vd_p3 = vd; + fex_p3 = _fex; + goto done; + } +done: + if (fex_p0 || fex_p1 || fex_p2 || fex_p3) { + unsigned long fpcr_p0, fpcr_p1, fpcr_p2, fpcr_p3; + unsigned long swcr_p0, swcr_p1, swcr_p2, swcr_p3; + + fpcr_p0 = fpcr_p1 = fpcr_p2 = fpcr_p3 = 0; + swcr_p0 = swcr_p1 = swcr_p2 = swcr_p3 = swcr; + /* manage fpcr_p0 */ + if (fex_p0) { + swcr_p0 |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p0 = fpcr; + fpcr_p0 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p0 |= ieee_swcr_to_fpcr(swcr_p0); + } + + if (fex_p1) { + swcr_p1 |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p1 = fpcr; + fpcr_p1 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p1 |= ieee_swcr_to_fpcr(swcr_p1); + } + + if (fex_p2) { + swcr_p2 |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p2 = fpcr; + fpcr_p2 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p2 |= ieee_swcr_to_fpcr(swcr_p2); + } + + if (fex_p3) { + swcr_p3 |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p3 = fpcr; + fpcr_p3 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p3 |= ieee_swcr_to_fpcr(swcr_p3); + } + + fpcr = fpcr_p0 | fpcr_p1 | fpcr_p2 | fpcr_p3; + pr_debug("fex_p0 = %#lx\n", fex_p0); + pr_debug("fex_p1 = %#lx\n", fex_p1); + pr_debug("fex_p2 = %#lx\n", fex_p2); + pr_debug("fex_p3 = %#lx\n", fex_p3); + pr_debug("SIMD emulation almost finished.before write fpcr = %#lx\n", fpcr); + wrfpcr(fpcr); + pr_debug("Before write fp: vp_p0=%#lx, vc_p1=%#lx, vc_p2=%#lx, vc_p3=%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + write_fp_reg_s(fd, vd_p0, vd_p1, vd_p2, vd_p3); /* write to fd */ + + /* Do we generate a signal? */ + _fex = (fex_p0 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p1 & swcr & IEEE_TRAP_ENABLE_MASK) + | (fex_p2 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p3 & swcr & IEEE_TRAP_ENABLE_MASK); + si_code = 0; + if (_fex) { + if (_fex & IEEE_TRAP_ENABLE_DNO) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_INE) + si_code = FPE_FLTRES; + if (_fex & IEEE_TRAP_ENABLE_UNF) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_OVF) + si_code = FPE_FLTOVF; + if (_fex & IEEE_TRAP_ENABLE_DZE) + si_code = FPE_FLTDIV; + if (_fex & IEEE_TRAP_ENABLE_INV) + si_code = FPE_FLTINV; + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return si_code; + + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return 0; + +bad_insn: + pr_err("%s: Invalid FP insn %#x at %#lx\n", __func__, insn, pc); + return -1; +} + +long simd_mul_add_fp_emul_d(unsigned long pc) +{ + FP_DECL_EX; + FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(D_TMP); FP_DECL_D(DR); + FP_DECL_D(D_ZERO); + FP_DECL_D(D_TMP2); + + unsigned long fa, fb, fc, fd, func, mode, src; + unsigned long res, va, vb, vc, vd, vtmp, vtmp2, swcr, fpcr; + __u32 insn; + long si_code; + unsigned long vzero = 0; + + get_user(insn, (__u32 *)pc); + fd = (insn >> 0) & 0x1f; /* destination register */ + fc = (insn >> 5) & 0x1f; + fb = (insn >> 16) & 0x1f; + fa = (insn >> 21) & 0x1f; + func = (insn >> 10) & 0x3f; + + fpcr = rdfpcr(); + mode = (fpcr >> FPCR_DYN_SHIFT) & 0x3; + + unsigned long va_p0, va_p1, va_p2, va_p3; + unsigned long vb_p0, vb_p1, vb_p2, vb_p3; + unsigned long vc_p0, vc_p1, vc_p2, vc_p3; + unsigned long vd_p0, vd_p1, vd_p2, vd_p3; + unsigned long fex_p0, fex_p1, fex_p2, fex_p3; + + int working_part; + + pr_debug("======== Entering SIMD D-floating mul-add emulation =======\n"); + pr_debug("hardware fpcr = %#lx\n", fpcr); + swcr = swcr_update_status(current_thread_info()->ieee_state, fpcr); + pr_debug("software swcr = %#lx\n", swcr); + read_fp_reg_d(fa, &va_p0, &va_p1, &va_p2, &va_p3); + read_fp_reg_d(fb, &vb_p0, &vb_p1, &vb_p2, &vb_p3); + read_fp_reg_d(fc, &vc_p0, &vc_p1, &vc_p2, &vc_p3); + read_fp_reg_d(fd, &vd_p0, &vd_p1, &vd_p2, &vd_p3); + pr_debug("va_p0:%#lx, va_p1:%#lx, va_p2:%#lx, va_p3:%#lx\n", va_p0, va_p1, va_p2, va_p3); + pr_debug("vb_p0:%#lx, vb_p1:%#lx, vb_p2:%#lx, vb_p3:%#lx\n", vb_p0, vb_p1, vb_p2, vb_p3); + pr_debug("vc_p0:%#lx, vc_p1:%#lx, vc_p2:%#lx, vc_p3:%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + pr_debug("vd_p0:%#lx, vd_p1:%#lx, vd_p2:%#lx, vd_p3:%#lx\n", vd_p0, vd_p1, vd_p2, vd_p3); + working_part = WORKING_PART_0; +simd_working: + _fex = 0; + switch (working_part) { + case WORKING_PART_0: + pr_debug("WORKING_PART_0\n"); + va = va_p0; + vb = vb_p0; + vc = vc_p0; + vd = vd_p0; + if ((fpcr & FPCR_STATUS_MASK0) == 0) { + SW64_FP_NORMAL_D(DA, &va); + SW64_FP_NORMAL_D(DB, &vb); + SW64_FP_NORMAL_D(DC, &vc); + if ((DA_c == SW64_FP_NORMAL) && (DB_c == SW64_FP_NORMAL) && (DC_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("LOW: DA_c = %#lx, DB_c = %#lx\n", DA_c, DB_c); + } else { + SW64_FP_NAN_D(DA, &va); + SW64_FP_NAN_D(DB, &vb); + SW64_FP_NAN_D(DC, &vc); + if ((DA_c == SW64_FP_NAN) && (DB_c == SW64_FP_NAN) && (DC_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_1: + pr_debug("WORKING_PART_1\n"); + va = va_p1; + vb = vb_p1; + vc = vc_p1; + vd = vd_p1; + if ((fpcr & FPCR_STATUS_MASK1) == 0) { + SW64_FP_NORMAL_D(DA, &va); + SW64_FP_NORMAL_D(DB, &vb); + SW64_FP_NORMAL_D(DC, &vc); + if ((DA_c == SW64_FP_NORMAL) && (DB_c == SW64_FP_NORMAL) && (DC_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: DA_c = %#lx, DB_c = %#lx\n", DA_c, DB_c); + } else { + SW64_FP_NAN_D(DA, &va); + SW64_FP_NAN_D(DB, &vb); + SW64_FP_NAN_D(DC, &vc); + if ((DA_c == SW64_FP_NAN) && (DB_c == SW64_FP_NAN) && (DC_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_2: + pr_debug("WORKING_PART_2\n"); + va = va_p2; + vb = vb_p2; + vc = vc_p2; + vd = vd_p2; + if ((fpcr & FPCR_STATUS_MASK2) == 0) { + SW64_FP_NORMAL_D(DA, &va); + SW64_FP_NORMAL_D(DB, &vb); + SW64_FP_NORMAL_D(DC, &vc); + if ((DA_c == SW64_FP_NORMAL) && (DB_c == SW64_FP_NORMAL) && (DC_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: DA_c = %#lx, DB_c = %#lx\n", DA_c, DB_c); + } else { + SW64_FP_NAN_D(DA, &va); + SW64_FP_NAN_D(DB, &vb); + SW64_FP_NAN_D(DC, &vc); + if ((DA_c == SW64_FP_NAN) && (DB_c == SW64_FP_NAN) && (DC_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_3: + pr_debug("WORKING_PART_3\n"); + va = va_p3; + vb = vb_p3; + vc = vc_p3; + vd = vd_p3; + if ((fpcr & FPCR_STATUS_MASK3) == 0) { + SW64_FP_NORMAL_D(DA, &va); + SW64_FP_NORMAL_D(DB, &vb); + SW64_FP_NORMAL_D(DC, &vc); + if ((DA_c == SW64_FP_NORMAL) && (DB_c == SW64_FP_NORMAL) && (DC_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: DA_c = %#lx, DB_c = %#lx\n", DA_c, DB_c); + } else { + SW64_FP_NAN_D(DA, &va); + SW64_FP_NAN_D(DB, &vb); + SW64_FP_NAN_D(DC, &vc); + if ((DA_c == SW64_FP_NAN) && (DB_c == SW64_FP_NAN) && (DC_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + } + + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + FP_UNPACK_DP(DC, &vc); + FP_UNPACK_DP(D_ZERO, &vzero); + + switch (func) { + case FNC_FMAD: + FP_MUL_D(D_TMP, DA, DB); + FP_ADD_D(DR, D_TMP, DC); + goto pack_d; + case FNC_FMSD: + FP_MUL_D(D_TMP, DA, DB); + FP_SUB_D(DR, D_TMP, DC); + goto pack_d; + case FNC_FNMAD: + va = negative_value(va); + FP_UNPACK_DP(DA, &va); + FP_MUL_D(D_TMP, DA, DB); + FP_ADD_D(DR, D_TMP, DC); + goto pack_d; + case FNC_FNMSD: + va = negative_value(va); + FP_UNPACK_DP(DA, &va); + FP_MUL_D(D_TMP, DA, DB); + FP_SUB_D(DR, D_TMP, DC); + + goto pack_d; + default: + goto bad_insn; + } + +pack_d: + FP_PACK_DP(&vd, DR); + if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) + vd = 0; + pr_debug("SW64 SIMD Emulation D-floating _fex=%#lx, va=%#lx, vb=%#lx, vc=%#lx\n", _fex, va, vb, vc); + pr_debug("SW64 SIMD Emulation D-floating mode=%#lx,func=%#lx, swcr=%#lx\n", mode, func, swcr); +next_working_s: + switch (working_part) { + case WORKING_PART_0: + working_part = WORKING_PART_1; + vd_p0 = vd; + fex_p0 = _fex; + goto simd_working; + case WORKING_PART_1: + working_part = WORKING_PART_2; + vd_p1 = vd; + fex_p1 = _fex; + goto simd_working; + case WORKING_PART_2: + working_part = WORKING_PART_3; + vd_p2 = vd; + fex_p2 = _fex; + goto simd_working; + case WORKING_PART_3: + vd_p3 = vd; + fex_p3 = _fex; + goto done; + } +done: + if (fex_p0 || fex_p1 || fex_p2 || fex_p3) { + unsigned long fpcr_p0, fpcr_p1, fpcr_p2, fpcr_p3; + unsigned long swcr_p0, swcr_p1, swcr_p2, swcr_p3; + + fpcr_p0 = fpcr_p1 = fpcr_p2 = fpcr_p3 = 0; + swcr_p0 = swcr_p1 = swcr_p2 = swcr_p3 = swcr; + /* manage fpcr_p0 */ + if (fex_p0) { + swcr_p0 |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p0 = fpcr; + fpcr_p0 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p0 |= ieee_swcr_to_fpcr(swcr_p0); + } + + if (fex_p1) { + swcr_p1 |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p1 = fpcr; + fpcr_p1 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p1 |= ieee_swcr_to_fpcr(swcr_p1); + } + + if (fex_p2) { + swcr_p2 |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p2 = fpcr; + fpcr_p2 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p2 |= ieee_swcr_to_fpcr(swcr_p2); + } + + if (fex_p3) { + swcr_p3 |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p3 = fpcr; + fpcr_p3 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p3 |= ieee_swcr_to_fpcr(swcr_p3); + } + + fpcr = fpcr_p0 | fpcr_p1 | fpcr_p2 | fpcr_p3; + pr_debug("fex_p0 = %#lx\n", fex_p0); + pr_debug("fex_p1 = %#lx\n", fex_p1); + pr_debug("fex_p2 = %#lx\n", fex_p2); + pr_debug("fex_p3 = %#lx\n", fex_p3); + pr_debug("SIMD emulation almost finished.before write fpcr = %#lx\n", fpcr); + wrfpcr(fpcr); + + pr_debug("Before write fp: vp_p0=%#lx, vc_p1=%#lx, vc_p2=%#lx, vc_p3=%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + write_fp_reg_d(fd, vd_p0, vd_p1, vd_p2, vd_p3); /* write to fd */ + + /* Do we generate a signal? */ + _fex = (fex_p0 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p1 & swcr & IEEE_TRAP_ENABLE_MASK) + | (fex_p2 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p3 & swcr & IEEE_TRAP_ENABLE_MASK); + si_code = 0; + if (_fex) { + if (_fex & IEEE_TRAP_ENABLE_DNO) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_INE) + si_code = FPE_FLTRES; + if (_fex & IEEE_TRAP_ENABLE_UNF) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_OVF) + si_code = FPE_FLTOVF; + if (_fex & IEEE_TRAP_ENABLE_DZE) + si_code = FPE_FLTDIV; + if (_fex & IEEE_TRAP_ENABLE_INV) + si_code = FPE_FLTINV; + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return si_code; + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return 0; + +bad_insn: + pr_err("%s: Invalid FP insn %#x at %#lx\n", __func__, insn, pc); + return -1; +} + +void read_fp_reg_s(unsigned long reg, unsigned long *val_p0, + unsigned long *val_p1, unsigned long *val_p2, unsigned long *val_p3) +{ + unsigned long fp[2]; + + sw64_read_simd_fp_m_s(reg, fp); + *val_p0 = fp[0] & 0xffffffffUL; + *val_p1 = (fp[0] >> 32) & 0xffffffffUL; + *val_p2 = fp[1] & 0xffffffffUL; + *val_p3 = (fp[1] >> 32) & 0xffffffffUL; +} + +void read_fp_reg_d(unsigned long reg, unsigned long *val_p0, + unsigned long *val_p1, unsigned long *val_p2, unsigned long *val_p3) +{ + unsigned long fp[4]; + + sw64_read_simd_fp_m_d(reg, fp); + *val_p0 = fp[0]; + *val_p1 = fp[1]; + *val_p2 = fp[2]; + *val_p3 = fp[3]; +} + +void write_fp_reg_s(unsigned long reg, unsigned long val_p0, + unsigned long val_p1, unsigned long val_p2, unsigned long val_p3) +{ + unsigned long fp[2]; + + fp[0] = ((val_p1 & 0xffffffffUL) << 32) | (val_p0 & 0xffffffffUL); + fp[1] = ((val_p3 & 0xffffffffUL) << 32) | (val_p2 & 0xffffffffUL); + sw64_write_simd_fp_reg_s(reg, fp[0], fp[1]); +} + +void write_fp_reg_d(unsigned long reg, unsigned long val_p0, + unsigned long val_p1, unsigned long val_p2, unsigned long val_p3) +{ + sw64_write_simd_fp_reg_d(reg, val_p0, val_p1, val_p2, val_p3); +} diff --git a/arch/sw_64/math-emu/qrnnd.S b/arch/sw_64/math-emu/qrnnd.S new file mode 100644 index 000000000000..1e732f2e68c0 --- /dev/null +++ b/arch/sw_64/math-emu/qrnnd.S @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + # __udiv_qrnnd + # Copyright (C) 1992, 1994, 1995, 2000 Free Software Foundation, Inc. + + # This file is part of GCC. + + .set noreorder + .set noat + + .text + + .globl __udiv_qrnnd + .ent __udiv_qrnnd +__udiv_qrnnd: + .frame $30, 0, $26, 0 + .prologue 0 + + # ldiq $2,16 + ldi $2, 16($31) + blt $19, $largedivisor + +$loop1: cmplt $18, 0, $3 + addl $17, $17, $17 + bis $17, $3, $17 + addl $18, $18, $18 + cmpule $19, $17, $20 + subl $17, $19, $3 + selne $20, $3, $17, $17 + bis $18, $20, $18 + cmplt $18, 0, $3 + addl $17, $17, $17 + bis $17, $3, $17 + addl $18, $18, $18 + cmpule $19, $17, $20 + subl $17, $19, $3 + selne $20, $3, $17, $17 + bis $18, $20, $18 + cmplt $18, 0, $3 + addl $17, $17, $17 + bis $17, $3, $17 + addl $18, $18, $18 + cmpule $19, $17, $20 + subl $17, $19, $3 + selne $20, $3, $17, $17 + bis $18, $20, $18 + cmplt $18, 0, $3 + addl $17, $17, $17 + bis $17, $3, $17 + addl $18, $18, $18 + cmpule $19, $17, $20 + subl $17, $19, $3 + selne $20, $3, $17, $17 + bis $18, $20, $18 + subl $2, 1, $2 + bgt $2, $loop1 + stl $17, 0($16) + bis $31, $18, $0 + ret $31, ($26), 1 + +$largedivisor: + and $18, 1, $4 + + srl $18, 1, $18 + sll $17, 63, $3 + or $3, $18, $18 + srl $17, 1, $17 + + and $19, 1, $6 + srl $19, 1, $5 + addl $5, $6, $5 + +$loop2: cmplt $18, 0, $3 + addl $17, $17, $17 + bis $17, $3, $17 + addl $18, $18, $18 + cmpule $5, $17, $20 + subl $17, $5, $3 + selne $20, $3, $17, $17 + bis $18, $20, $18 + cmplt $18, 0, $3 + addl $17, $17, $17 + bis $17, $3, $17 + addl $18, $18, $18 + cmpule $5, $17, $20 + subl $17, $5, $3 + selne $20, $3, $17, $17 + bis $18, $20, $18 + cmplt $18, 0, $3 + addl $17, $17, $17 + bis $17, $3, $17 + addl $18, $18, $18 + cmpule $5, $17, $20 + subl $17, $5, $3 + selne $20, $3, $17, $17 + bis $18, $20, $18 + cmplt $18, 0, $3 + addl $17, $17, $17 + bis $17, $3, $17 + addl $18, $18, $18 + cmpule $5, $17, $20 + subl $17, $5, $3 + selne $20, $3, $17, $17 + bis $18, $20, $18 + subl $2, 1, $2 + bgt $2, $loop2 + + addl $17, $17, $17 + addl $4, $17, $17 + bne $6, $Odd + stl $17, 0($16) + bis $31, $18, $0 + ret $31, ($26), 1 + +$Odd: + # q' in $18. r' in $17 + addl $17, $18, $17 + + cmpult $17, $18, $3 # $3 := carry from addl + subl $17, $19, $at + addl $18, $3, $18 + selne $3, $at, $17, $17 + + cmpult $17, $19, $3 + addl $18, 1, $at + seleq $3, $at, $18, $18 + subl $17, $19, $at + seleq $3, $at, $17, $17 + + stl $17, 0($16) + bis $31, $18, $0 + ret $31, ($26), 1 + + .end __udiv_qrnnd diff --git a/arch/sw_64/math-emu/sfp-util.h b/arch/sw_64/math-emu/sfp-util.h new file mode 100644 index 000000000000..0769c0223e0d --- /dev/null +++ b/arch/sw_64/math-emu/sfp-util.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SW64_MATH_EMU_SFP_UTIL_H +#define _SW64_MATH_EMU_SFP_UTIL_H + +#include +#include +#include +#include +#include + +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + ((sl) = (al) + (bl), (sh) = (ah) + (bh) + ((sl) < (al))) + +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + ((sl) = (al) - (bl), (sh) = (ah) - (bh) - ((al) < (bl))) + +#define umul_ppmm(wh, wl, u, v) \ + __asm__ ("mull %2, %3, %1; umulh %2, %3, %0" \ + : "=r" ((UDItype)(wh)), \ + "=&r" ((UDItype)(wl)) \ + : "r" ((UDItype)(u)), \ + "r" ((UDItype)(v))) + +#define udiv_qrnnd(q, r, n1, n0, d) \ +do { unsigned long __r; \ + (q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \ + (r) = __r; \ +} while (0) +extern unsigned long __udiv_qrnnd(unsigned long *, unsigned long, + unsigned long, unsigned long); + +#define UDIV_NEEDS_NORMALIZATION 1 + +#define abort() goto bad_insn + +#ifndef __LITTLE_ENDIAN +#define __LITTLE_ENDIAN -1 +#endif +#define __BYTE_ORDER __LITTLE_ENDIAN + +#endif /* _SW64_MATH_EMU_SFP_UTIL_H */ -- Gitee From 1d6f828630783d4bd607f53456e90f3d400568e0 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:15 +0800 Subject: [PATCH 0290/2138] anolis: sw64: add basic IO support ANBZ: #4688 Add basic IO support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/early_ioremap.h | 29 ++ arch/sw_64/include/asm/io.h | 288 ++++++++++++++++ arch/sw_64/include/asm/sw64io.h | 109 ++++++ arch/sw_64/include/asm/uncore_io_junzhang.h | 201 +++++++++++ .../include/asm/uncore_io_ops_junzhang.h | 39 +++ .../sw_64/include/asm/uncore_io_ops_xuelang.h | 65 ++++ arch/sw_64/include/asm/uncore_io_xuelang.h | 323 ++++++++++++++++++ 7 files changed, 1054 insertions(+) create mode 100644 arch/sw_64/include/asm/early_ioremap.h create mode 100644 arch/sw_64/include/asm/io.h create mode 100644 arch/sw_64/include/asm/sw64io.h create mode 100644 arch/sw_64/include/asm/uncore_io_junzhang.h create mode 100644 arch/sw_64/include/asm/uncore_io_ops_junzhang.h create mode 100644 arch/sw_64/include/asm/uncore_io_ops_xuelang.h create mode 100644 arch/sw_64/include/asm/uncore_io_xuelang.h diff --git a/arch/sw_64/include/asm/early_ioremap.h b/arch/sw_64/include/asm/early_ioremap.h new file mode 100644 index 000000000000..172b96a401cb --- /dev/null +++ b/arch/sw_64/include/asm/early_ioremap.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_EARLY_IOREMAP_H +#define _ASM_SW64_EARLY_IOREMAP_H + +#include +#include + +static inline void __iomem * +early_ioremap(unsigned long phys_addr, unsigned long size) +{ + unsigned long y = 0; + + if (phys_addr >= __START_KERNEL_map) { + y = (unsigned long) phys_to_virt(__pa(phys_addr)); + } else { + y = phys_addr; + y |= PAGE_OFFSET; + } + + return (void __iomem *) y; +} +#define early_memremap(phys_addr, size) early_ioremap(phys_addr, size) + +static inline void early_iounmap(volatile void __iomem *addr, unsigned long size) +{ +} +#define early_memunmap(addr, size) early_iounmap(addr, size) + +#endif /* _ASM_SW64_EARLY_IOREMAP_H */ diff --git a/arch/sw_64/include/asm/io.h b/arch/sw_64/include/asm/io.h new file mode 100644 index 000000000000..2b045be5257e --- /dev/null +++ b/arch/sw_64/include/asm/io.h @@ -0,0 +1,288 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_IO_H +#define _ASM_SW64_IO_H + +#ifdef __KERNEL__ + +#include +#include +#include +#include + +/* The generic header contains only prototypes. Including it ensures that + * the implementation we have here matches that interface. + */ +#include + +/* We don't use IO slowdowns on the sw64, but.. */ +#define __SLOW_DOWN_IO do { } while (0) +#define SLOW_DOWN_IO do { } while (0) + +#define page_to_phys(page) page_to_pa(page) + +/* Maximum PIO space address supported? */ +#define IO_SPACE_LIMIT 0xffffffffffffffff + +/* + * Generic IO read/write. These perform native-endian accesses. + */ + +#define __raw_writeb __raw_writeb +static inline void __raw_writeb(u8 val, volatile void __iomem *addr) +{ + asm volatile("stb %0, 0(%1)" : : "r" (val), "r" (addr)); +} + +#define __raw_writew __raw_writew +static inline void __raw_writew(u16 val, volatile void __iomem *addr) +{ + asm volatile("sth %0, 0(%1)" : : "r" (val), "r" (addr)); +} + +#define __raw_writel __raw_writel +static inline void __raw_writel(u32 val, volatile void __iomem *addr) +{ + asm volatile("stw %0, 0(%1)" : : "r" (val), "r" (addr)); +} + +#define __raw_writeq __raw_writeq +static inline void __raw_writeq(u64 val, volatile void __iomem *addr) +{ + asm volatile("stl %0, 0(%1)" : : "r" (val), "r" (addr)); +} + +#define __raw_readb __raw_readb +static inline u8 __raw_readb(const volatile void __iomem *addr) +{ + u8 val; + + asm volatile("ldbu %0, 0(%1)" : "=r" (val) : "r" (addr)); + return val; +} + +#define __raw_readw __raw_readw +static inline u16 __raw_readw(const volatile void __iomem *addr) +{ + u16 val; + + asm volatile("ldhu %0, 0(%1)" : "=r" (val) : "r" (addr)); + return val; +} + +#define __raw_readl __raw_readl +static inline u32 __raw_readl(const volatile void __iomem *addr) +{ + u32 val; + + asm volatile("ldw %0, 0(%1)\n" + "zapnot %0, 0xf, %0\n" + : "=r" (val) : "r" (addr)); + return val; +} + +#define __raw_readq __raw_readq +static inline u64 __raw_readq(const volatile void __iomem *addr) +{ + u64 val; + + asm volatile("ldl %0, 0(%1)" : "=r" (val) : "r" (addr)); + return val; +} + +/* IO barriers */ + +#define __iormb() rmb() +#define __iowmb() wmb() +#define mmiowb() do { } while (0) + +/* + * Relaxed I/O memory access primitives. These follow the Device memory + * ordering rules but do not guarantee any ordering relative to Normal memory + * accesses. + */ +#define readb_relaxed(c) __raw_readb(c) +#define readw_relaxed(c) __raw_readw(c) +#define readl_relaxed(c) __raw_readl(c) +#define readq_relaxed(c) __raw_readq(c) + +#define writeb_relaxed(v, c) __raw_writeb((v), (c)) +#define writew_relaxed(v, c) __raw_writew((v), (c)) +#define writel_relaxed(v, c) __raw_writel((v), (c)) +#define writeq_relaxed(v, c) __raw_writeq((v), (c)) + +/* + * I/O memory access primitives. Reads are ordered relative to any + * following Normal memory access. Writes are ordered relative to any prior + * Normal memory access. + */ +#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) +#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) +#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) +#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(); __v; }) + +#define writeb(v, c) ({ __iowmb(); writeb_relaxed((v), (c)); }) +#define writew(v, c) ({ __iowmb(); writew_relaxed((v), (c)); }) +#define writel(v, c) ({ __iowmb(); writel_relaxed((v), (c)); }) +#define writeq(v, c) ({ __iowmb(); writeq_relaxed((v), (c)); }) +/* + * We always have external versions of these routines. + */ +extern u8 inb(unsigned long port); +extern u16 inw(unsigned long port); +extern u32 inl(unsigned long port); +extern void outb(u8 b, unsigned long port); +extern void outw(u16 b, unsigned long port); +extern void outl(u32 b, unsigned long port); +#define inb inb +#define inw inw +#define inl inl +#define outb outb +#define outw outw +#define outl outl + +static inline void __iomem *__ioremap(phys_addr_t addr, size_t size, + pgprot_t prot) +{ + unsigned long tmp = addr | PAGE_OFFSET; + + return (void __iomem *)(tmp); +} + +#define ioremap(addr, size) __ioremap((addr), (size), PAGE_KERNEL) +#define ioremap_nocache(addr, size) __ioremap((addr), (size), PAGE_KERNEL) +#define ioremap_cache(addr, size) __ioremap((addr), (size), PAGE_KERNEL) +#define ioremap_uc ioremap_nocache + +#define ioport_map ioport_map +#define ioport_unmap ioport_unmap + +static inline void __iounmap(volatile void __iomem *addr) +{ +} + +#define iounmap __iounmap + +#define ioread16be(p) be16_to_cpu(ioread16(p)) +#define ioread32be(p) be32_to_cpu(ioread32(p)) +#define iowrite16be(v, p) iowrite16(cpu_to_be16(v), (p)) +#define iowrite32be(v, p) iowrite32(cpu_to_be32(v), (p)) + +#define inb_p inb +#define inw_p inw +#define inl_p inl +#define outb_p outb +#define outw_p outw +#define outl_p outl + + +/* + * String version of IO memory access ops: + */ +#define memcpy_fromio memcpy_fromio +extern void memcpy_fromio(void *buffer, const volatile void __iomem *addr, long len); + +#define memcpy_toio memcpy_toio +extern void memcpy_toio(volatile void __iomem *addr, const void *buffer, long len); + +extern void _memset_c_io(volatile void __iomem *addr, unsigned long c, long len); + +#define memset_io memset_io +static inline void memset_io(volatile void __iomem *addr, u8 c, long len) +{ + _memset_c_io(addr, 0x0101010101010101UL * c, len); +} + +static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len) +{ + _memset_c_io(addr, 0x0001000100010001UL * c, len); +} + +/* + * String versions of in/out ops: + */ +extern void insb(unsigned long port, void *dst, unsigned long count); +extern void insw(unsigned long port, void *dst, unsigned long count); +extern void insl(unsigned long port, void *dst, unsigned long count); +extern void outsb(unsigned long port, const void *src, unsigned long count); +extern void outsw(unsigned long port, const void *src, unsigned long count); +extern void outsl(unsigned long port, const void *src, unsigned long count); + +#define insb insb +#define insw insw +#define insl insl +#define outsb outsb +#define outsw outsw +#define outsl outsl + +/* + * These defines will override the defaults when doing RTC queries + */ + +#define RTC_PORT(x) (0x70 + (x)) +#define RTC_ALWAYS_BCD 0 + +/* + * Convert a physical pointer to a virtual kernel pointer for /dev/mem + * access + */ +#define xlate_dev_mem_ptr(p) __va(p) + +/* + * Convert a virtual cached pointer to an uncached pointer + */ +#define xlate_dev_kmem_ptr(p) p + +/* + * These get provided from since sw64 does not + * select GENERIC_IOMAP. + */ +#define ioread8 ioread8 +#define ioread16 ioread16 +#define ioread32 ioread32 +#define ioread64 ioread64 +#define iowrite8 iowrite8 +#define iowrite16 iowrite16 +#define iowrite32 iowrite32 +#define iowrite64 iowrite64 +#define ioread64be ioread64be +#define iowrite64be iowrite64be +#define ioread8_rep ioread8_rep +#define ioread16_rep ioread16_rep +#define ioread32_rep ioread32_rep +#define iowrite8_rep iowrite8_rep +#define iowrite16_rep iowrite16_rep +#define iowrite32_rep iowrite32_rep +#define pci_iounmap pci_iounmap + +#include + +/* + * Change addresses as seen by the kernel (virtual) to addresses as + * seen by a device (bus), and vice versa. + * + * Note that this only works for a limited range of kernel addresses, + * and very well may not span all memory. Consider this interface + * deprecated in favour of the DMA-mapping API. + */ +static inline unsigned long __deprecated virt_to_bus(void *address) +{ + return virt_to_phys(address); +} +#define isa_virt_to_bus virt_to_bus + +static inline void * __deprecated bus_to_virt(unsigned long address) +{ + void *virt; + + /* This check is a sanity check but also ensures that bus address 0 + * maps to virtual address 0 which is useful to detect null pointers + * (the NCR driver is much simpler if NULL pointers are preserved). + */ + virt = phys_to_virt(address); + return (long)address <= 0 ? NULL : virt; +} +#define isa_bus_to_virt bus_to_virt + +#endif /* __KERNEL__ */ + +#endif /* _ASM_SW64_IO_H */ diff --git a/arch/sw_64/include/asm/sw64io.h b/arch/sw_64/include/asm/sw64io.h new file mode 100644 index 000000000000..d52cd8cc86bf --- /dev/null +++ b/arch/sw_64/include/asm/sw64io.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SW64IO_H +#define _ASM_SW64_SW64IO_H + +#include +#include + +#if defined(CONFIG_UNCORE_XUELANG) +#include +#endif + +#if defined(CONFIG_UNCORE_JUNZHANG) +#include +#endif + +#define MK_RC_CFG(nid, idx) \ + (SW64_PCI_IO_BASE((nid), (idx)) | PCI_RC_CFG) +#define MK_PIU_IOR0(nid, idx) \ + (SW64_PCI_IO_BASE((nid), (idx)) | PCI_IOR0_BASE) +#define MK_PIU_IOR1(nid, idx) \ + (SW64_PCI_IO_BASE((nid), (idx)) | PCI_IOR1_BASE) + +static inline unsigned int +read_rc_conf(unsigned long node, unsigned long rc, + unsigned int offset) +{ + void __iomem *addr; + + addr = __va(MK_RC_CFG(node, rc) | offset); + return readl(addr); +} + +static inline void +write_rc_conf(unsigned long node, unsigned long rc, + unsigned int offset, unsigned int data) +{ + void __iomem *addr; + + addr = __va(MK_RC_CFG(node, rc) | offset); + writel(data, addr); +} + +static inline unsigned long +read_piu_ior0(unsigned long node, unsigned long rc, + unsigned int reg) +{ + void __iomem *addr; + + addr = __va(MK_PIU_IOR0(node, rc) + reg); + return readq(addr); +} + +static inline void +write_piu_ior0(unsigned long node, unsigned long rc, + unsigned int reg, unsigned long data) +{ + void __iomem *addr; + + addr = __va(MK_PIU_IOR0(node, rc) + reg); + writeq(data, addr); +} + +static inline unsigned long +read_piu_ior1(unsigned long node, unsigned long rc, + unsigned int reg) +{ + void __iomem *addr; + + addr = __va(MK_PIU_IOR1(node, rc) + reg); + return readq(addr); +} + +static inline void +write_piu_ior1(unsigned long node, unsigned long rc, + unsigned int reg, unsigned long data) +{ + void __iomem *addr; + + addr = __va(MK_PIU_IOR1(node, rc) + reg); + writeq(data, addr); +} + +static inline unsigned long +sw64_io_read(unsigned long node, unsigned long reg) +{ + void __iomem *addr; + + addr = __va(SW64_IO_BASE(node) | reg); + return readq(addr); +} + +static inline void +sw64_io_write(unsigned long node, unsigned long reg, unsigned long data) +{ + void __iomem *addr; + + addr = __va(SW64_IO_BASE(node) | reg); + writeq(data, addr); +} + +#if defined(CONFIG_UNCORE_XUELANG) +#include +#endif + +#if defined(CONFIG_UNCORE_JUNZHANG) +#include +#endif + +#endif /* _ASM_SW64_SW64IO_H */ diff --git a/arch/sw_64/include/asm/uncore_io_junzhang.h b/arch/sw_64/include/asm/uncore_io_junzhang.h new file mode 100644 index 000000000000..37cfe1fd6807 --- /dev/null +++ b/arch/sw_64/include/asm/uncore_io_junzhang.h @@ -0,0 +1,201 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_UNCORE_IO_JUNZHANG_H +#define _ASM_SW64_UNCORE_IO_JUNZHANG_H + +#include + +#define IO_BASE (0x1UL << 47) +#define PCI_BASE (0x1UL << 43) +#define PCI_IOR0_BASE (0x2UL << 32) +#define PCI_IOR1_BASE (0x3UL << 32) + +#define PCI_RC_CFG (0x5UL << 32) + +#define PCI_EP_CFG (0x3UL << 33) +#define PCI_LEGACY_IO (0x1UL << 32) +#define PCI_LEGACY_IO_SIZE (0x100000000UL) +#define PCI_MEM_UNPRE 0x0UL +#define PCI_32BIT_VT_MEMIO (0xc0000000UL) +#define PCI_32BIT_MEMIO (0xe0000000UL) +#define PCI_32BIT_MEMIO_SIZE (0x20000000UL) +#define PCI_64BIT_MEMIO (0x1UL << 39) +#define PCI_64BIT_MEMIO_SIZE (0x8000000000UL) + +#define IO_RC_SHIFT 40 +#define IO_NODE_SHIFT 44 +#define IO_MARK_BIT 47 + +#define VT_MAX_CPUS_SHIFT 0 +#define VT_MAX_CPUS_MASK 0x3ff +#define VT_CORES_SHIFT 10 +#define VT_CORES_MASK 0x3ff +#define VT_THREADS_SHIFT 20 +#define VT_THREADS_MASK 0xfff + +#define QEMU_PRINTF_BUFF_BASE (IO_BASE | SPBU_BASE | 0x40000UL) + +/* MSIConfig */ +#define MSICONFIG_VALID (0x1UL << 63) +#define MSICONFIG_EN (0x1UL << 62) +#define MSICONFIG_VECTOR_SHIFT 10 + +#define MSIX_MSG_ADDR (0xfff00000UL) + +#define SW64_PCI_IO_BASE(m, n) \ + (IO_BASE | ((m) << IO_NODE_SHIFT) | PCI_BASE | ((n) << IO_RC_SHIFT)) +#define SW64_IO_BASE(x) (IO_BASE | ((x) << IO_NODE_SHIFT)) + +#define SW64_PCI0_BUS 0 +#define PCI0_BUS SW64_PCI0_BUS + +#define MAX_NR_NODES 0x2 +#define MAX_NR_RCS 0x6 + +#define SPBU_BASE (0x3UL << 36) +#define INTPU_BASE (0x3aUL << 32) +#define IIC0_BASE (0x31UL << 32) +#define SPI_BASE (0x32UL << 32) +#define UART_BASE (0x33UL << 32) +#define IIC1_BASE (0x34UL << 32) +#define IIC2_BASE (0x35UL << 32) +#define GPIO_BASE (0x36UL << 32) +#define LPC_BASE (0x37UL << 32) +#define LPC_LEGACY_IO (0x1UL << 28 | IO_BASE | LPC_BASE) +#define LPC_MEM_IO (0x2UL << 28 | IO_BASE | LPC_BASE) +#define LPC_FIRMWARE_IO (0x3UL << 28 | IO_BASE | LPC_BASE) +#define PCI_VT_LEGACY_IO (IO_BASE | PCI_BASE | PCI_LEGACY_IO) + +#define PME_ENABLE_INTD_CORE0 (0x1UL << 62 | 0x8UL << 10) +#define AER_ENABLE_INTD_CORE0 (0x1UL << 62 | 0x8UL << 10) + +#define PIUCONFIG0_INIT_VAL 0x38016 + +/*-----------------------addr-----------------------*/ +/* INTPU REG */ +enum { + DEVINT_MISS = INTPU_BASE | 0x100UL, + MT_INT_CONFIG = INTPU_BASE | 0x300UL, + DEV_INT_CONFIG = INTPU_BASE | 0x480UL, + FMT_ERR = INTPU_BASE | 0x700UL, + FAULT_INT_CONFIG = INTPU_BASE | 0x780UL, + SERR_CNTTH = INTPU_BASE | 0x880UL, + SPBUSERR_CNT = INTPU_BASE | 0x900UL, + IRUSERR_CNT = INTPU_BASE | 0xa80UL, + ERRRPT_EN = INTPU_BASE | 0xb00UL, + IINT_MISS_VECTOR0 = INTPU_BASE | 0x1080UL, + IINT_MISS_VECTOR1 = INTPU_BASE | 0x1100UL, + IINT_MISS = INTPU_BASE | 0x1180UL, + IINT_MISS_RPTEN = INTPU_BASE | 0x1200UL, + DEVINT_MISS_RPTEN = INTPU_BASE | 0x1280UL, + ECCSERR = INTPU_BASE | 0x1300UL, + ECCSERR_RPTEN = INTPU_BASE | 0x1380UL, + ECCMERR = INTPU_BASE | 0x1400UL, + ECCMERR_RPTEN = INTPU_BASE | 0x1480UL, + DEVINT_WKEN = INTPU_BASE | 0x1500UL, + ADR_INT_CONFIG = INTPU_BASE | 0x1580UL, + DEVINTWK_INTEN = INTPU_BASE | 0x1600UL, +}; + +/* SPBU CSR */ +enum { + SMP_INFO = SPBU_BASE | 0x80UL, + INIT_CTL = SPBU_BASE | 0x680UL, + CORE_ONLINE = SPBU_BASE | 0x780UL, + DLI_RLTD_FAULT = SPBU_BASE | 0x980UL, + DLI_RLTD_FAULT_EN = SPBU_BASE | 0xa00UL, + DLI_RLTD_FAULT_INTEN = SPBU_BASE | 0xa80UL, + CFG_INFO = SPBU_BASE | 0x1100UL, + IO_START = SPBU_BASE | 0x1300UL, + I2C0_SRST_L = SPBU_BASE | 0x1900UL, + I2C1_SRST_L = SPBU_BASE | 0x1980UL, + I2C2_SRST_L = SPBU_BASE | 0x1a00UL, + MCU_DVC_INT = SPBU_BASE | 0x3000UL, + MCU_DVC_INT_EN = SPBU_BASE | 0x3080UL, + SI_FAULT_STAT = SPBU_BASE | 0x3100UL, + SI_FAULT_STAT_EN = SPBU_BASE | 0x3180UL, + SI_FAULT_INT_EN = SPBU_BASE | 0x3200UL, + ADR_CTL = SPBU_BASE | 0x3600UL, + MC_ONLINE = SPBU_BASE | 0x3780UL, + PIU_TOP0_CONFIG = SPBU_BASE | 0x4c80UL, + PIU_TOP1_CONFIG = SPBU_BASE | 0x4d00UL, + SOFT_INFO0 = SPBU_BASE | 0xa000UL, +}; + +/*--------------------------offset-----------------------------------*/ +/* PIU IOR0 */ +enum { + PIUCONFIG0 = 0x0UL, + EPDMABAR = 0x80UL, + IOMMUSEGITEM0 = 0x100UL, + IOMMUEXCPT_CTRL = 0x2100UL, + MSIADDR = 0x2180UL, + MSICONFIG0 = 0x2200UL, + INTACONFIG = 0xa200UL, + INTBCONFIG = 0xa280UL, + INTCCONFIG = 0xa300UL, + INTDCONFIG = 0xa380UL, + AERERRINTCONFIG = 0xa400UL, + AERERRMSICONFIG = 0xa480UL, + PMEINTCONFIG = 0xa500UL, + PMEMSICONFIG = 0xa580UL, + HPINTCONFIG = 0xa600UL, + HPMSICONFIG = 0xa680UL, + DTBASEADDR = 0xb000UL, + DTLB_FLUSHALL = 0xb080UL, + DTLB_FLUSHDEV = 0xb100UL, + PTLB_FLUSHALL = 0xb180UL, + PTLB_FLUSHDEV = 0xb200UL, + PTLB_FLUSHVADDR = 0xb280UL, + PCACHE_FLUSHALL = 0xb300UL, + PCACHE_FLUSHDEV = 0xb380UL, + PCACHE_FLUSHPADDR = 0xb400UL, + TIMEOUT_CONFIG = 0xb480UL, + IOMMUEXCPT_STATUS = 0xb500UL, + IOMMUPAGE_PADDR1 = 0xb580UL, + IOMMUPAGE_PADDR2 = 0xb600UL, + IOMMUPAGE_PADDR3 = 0xb680UL, + PTLB_ACCESS = 0xb700UL, + PTLB_ITEM_TAG = 0xb780UL, + PTLB_ITEM_DATA = 0xb800UL, + PCACHE_ACCESS = 0xb880UL, + PCACHE_ITEM_TAG = 0xb900UL, + PCACHE_ITEM_DATA0 = 0xb980UL, +}; + +/* PIU IOR1 */ +enum { + PIUCONFIG1 = 0x0UL, + ERRENABLE = 0x880UL, + RCDEBUGINF1 = 0xc80UL, + DCACONTROL = 0x1a00UL, + DEVICEID0 = 0x1a80UL, +}; + +/* RC */ +enum { + RC_VENDOR_ID = 0x0UL, + RC_COMMAND = 0x80UL, + RC_REVISION_ID = 0x100UL, + RC_PRIMARY_BUS = 0x300UL, + RC_MSI_CONTROL = 0xa00UL, + RC_EXP_DEVCAP = 0xe80UL, + RC_EXP_DEVCTL = 0xf00UL, + RC_SLOT_CTRL = 0x1100UL, + RC_LINK_STAT = 0x1000UL, + RC_CONTROL = 0X1180UL, + RC_STATUS = 0X1200UL, + RC_EXP_DEVCTL2 = 0x1300UL, + RC_PORT_LINK_CTL = 0xe200UL, + RC_ORDER_RULE_CTL = 0x11680UL, + RC_MISC_CONTROL_1 = 0x11780UL, + RC_PHY_INT_REG = 0x80000UL, + RC_PHY_EXT_GEN1 = 0x82400UL, + RC_PHY_EXT_GEN2 = 0x82480UL, +}; +/* GPIO */ +enum { + GPIO_SWPORTA_DR = GPIO_BASE | 0x0UL, + GPIO_SWPORTA_DDR = GPIO_BASE | 0x200UL, +}; +/*--------------------------------------------------------------------------*/ +#endif /* _ASM_SW64_UNCORE_IO_JUNZHANG_H */ diff --git a/arch/sw_64/include/asm/uncore_io_ops_junzhang.h b/arch/sw_64/include/asm/uncore_io_ops_junzhang.h new file mode 100644 index 000000000000..95a3b5c80531 --- /dev/null +++ b/arch/sw_64/include/asm/uncore_io_ops_junzhang.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_UNCORE_IO_OPS_JUNZHANG_H +#define _ASM_SW64_UNCORE_IO_OPS_JUNZHANG_H + +static inline int __get_cpu_nums(void) +{ + int cpus; + unsigned long cfg_info; + + cfg_info = sw64_io_read(0, CFG_INFO); + cfg_info = (cfg_info >> 33) & 0x3; + cpus = 1 << cfg_info; + + return cpus; +} + +static inline unsigned long __get_node_mem(int node) +{ + unsigned long node_mem; + unsigned long total_mem; + + total_mem = sw64_io_read(node, CFG_INFO) >> 3; + total_mem = (total_mem & 0xffff) << 28; + node_mem = total_mem / __get_cpu_nums(); + + return node_mem; +} + +#define __io_read_longtime(node) (0UL) +#define __io_write_longtime(node, data) do { } while (0) +#define __io_write_longtime_start_en(node, data) do { } while (0) + +static inline void +__io_write_fault_int_en(int node, unsigned long data) +{ + sw64_io_write(node, FAULT_INT_CONFIG, data); +} + +#endif /* _ASM_SW64_UNCORE_IO_OPS_JUNZHANG_H */ diff --git a/arch/sw_64/include/asm/uncore_io_ops_xuelang.h b/arch/sw_64/include/asm/uncore_io_ops_xuelang.h new file mode 100644 index 000000000000..9336e473211d --- /dev/null +++ b/arch/sw_64/include/asm/uncore_io_ops_xuelang.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_UNCORE_IO_OPS_XUELANG_H +#define _ASM_SW64_UNCORE_IO_OPS_XUELANG_H + +static inline int __get_cpu_nums(void) +{ + int cpus; + unsigned long trkmode; + + trkmode = sw64_io_read(0, TRKMODE); + trkmode = (trkmode >> 6) & 0x3; + cpus = 1 << trkmode; + + return cpus; +} + +static inline unsigned long __get_node_mem(int node) +{ + unsigned long node_mem; + unsigned long mc_config; + unsigned long mc_online; + unsigned long mc_cap; + unsigned long mc_num; + + mc_config = sw64_io_read(node, MC_CAP_CFG) & 0xf; + mc_cap = (1UL << mc_config) << 28; + mc_online = sw64_io_read(node, MC_ONLINE) & 0xff; + mc_num = __kernel_ctpop(mc_online); + node_mem = mc_cap * mc_num; + + return node_mem; +} + +static inline unsigned long +__io_read_longtime(int node) +{ + return sw64_io_read(node, LONG_TIME); +} + +static inline void +__io_write_longtime(int node, unsigned long data) +{ + sw64_io_write(node, LONG_TIME, data); +} + +static inline void +__io_write_longtime_start_en(int node, unsigned long data) +{ + sw64_io_write(node, LONG_TIME_START_EN, data); +} + +static inline void +__io_write_fault_int_en(int node, unsigned long data) +{ + sw64_io_write(node, DUAL_CG0_FAULT_INTEN, data); + sw64_io_write(node, DUAL_CG1_FAULT_INTEN, data); + sw64_io_write(node, DUAL_CG2_FAULT_INTEN, data); + sw64_io_write(node, DUAL_CG3_FAULT_INTEN, data); + sw64_io_write(node, DUAL_CG4_FAULT_INTEN, data); + sw64_io_write(node, DUAL_CG5_FAULT_INTEN, data); + sw64_io_write(node, DUAL_CG6_FAULT_INTEN, data); + sw64_io_write(node, DUAL_CG7_FAULT_INTEN, data); +} + +#endif /* _ASM_SW64_UNCORE_IO_OPS_XUELANG_H */ diff --git a/arch/sw_64/include/asm/uncore_io_xuelang.h b/arch/sw_64/include/asm/uncore_io_xuelang.h new file mode 100644 index 000000000000..aeaadec5be16 --- /dev/null +++ b/arch/sw_64/include/asm/uncore_io_xuelang.h @@ -0,0 +1,323 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_UNCORE_IO_XUELANG_H +#define _ASM_SW64_UNCORE_IO_XUELANG_H + +#include + +#define IO_BASE (0x1UL << 47) +#define PCI_BASE (0x1UL << 43) +#define PCI_IOR0_BASE (0x2UL << 32) +#define PCI_IOR1_BASE (0x3UL << 32) + +#define PCI_RC_CFG (0x5UL << 32) + +#define PCI_EP_CFG (0x3UL << 33) +#define PCI_LEGACY_IO (0x1UL << 32) +#define PCI_LEGACY_IO_SIZE (0x100000000UL) +#define PCI_MEM_UNPRE 0x0UL +#define PCI_32BIT_MEMIO (0xe0000000UL) +#define PCI_32BIT_MEMIO_SIZE (0x20000000UL) +#define PCI_64BIT_MEMIO (0x1UL << 39) +#define PCI_64BIT_MEMIO_SIZE (0x8000000000UL) + +#define IO_RC_SHIFT 40 +#define IO_NODE_SHIFT 44 +#define IO_MARK_BIT 47 + +#define VT_MAX_CPUS_SHIFT 0 +#define VT_MAX_CPUS_MASK 0x3ff +#define VT_CORES_SHIFT 10 +#define VT_CORES_MASK 0x3ff +#define VT_THREADS_SHIFT 20 +#define VT_THREADS_MASK 0xfff + +#define QEMU_PRINTF_BUFF_BASE (IO_BASE | MCU_BASE | 0x40000UL) + +/* MSIConfig */ +#define MSICONFIG_VALID (0x1UL << 63) +#define MSICONFIG_EN (0x1UL << 62) +#define MSICONFIG_VECTOR_SHIFT 10 + +#define MSIX_MSG_ADDR (0x91abc0UL) + +#define SW64_PCI_IO_BASE(m, n) \ + (IO_BASE | ((m) << IO_NODE_SHIFT) | PCI_BASE | ((n) << IO_RC_SHIFT)) +#define SW64_IO_BASE(x) (IO_BASE | ((x) << IO_NODE_SHIFT)) + +#define SW64_PCI0_BUS 0 +#define PCI0_BUS SW64_PCI0_BUS + +#define MAX_NR_NODES 0x2 +#define MAX_NR_RCS 0x6 + +#define MCU_BASE (0x3UL << 36) +#define CAB0_BASE (0x10UL << 32) +#define INTPU_BASE (0x2aUL << 32) +#define IIC0_BASE (0x31UL << 32) +#define SPI_BASE (0x32UL << 32) +#define UART_BASE (0x33UL << 32) +#define IIC1_BASE (0x34UL << 32) +#define IIC2_BASE (0x35UL << 32) +#define GPIO_BASE (0x36UL << 32) +#define LPC_BASE (0x37UL << 32) +#define LPC_LEGACY_IO (0x1UL << 28 | IO_BASE | LPC_BASE) +#define LPC_MEM_IO (0x2UL << 28 | IO_BASE | LPC_BASE) +#define LPC_FIRMWARE_IO (0x3UL << 28 | IO_BASE | LPC_BASE) +#define DLIA_BASE (0x20UL << 32) +#define DLIB_BASE (0x21UL << 32) +#define DLIC_BASE (0x22UL << 32) +#define DLI_PHY_CTL (0x10UL << 24) +#define PCI_VT_LEGACY_IO (IO_BASE | PCI_BASE | PCI_LEGACY_IO) + +#define PME_ENABLE_INTD_CORE0 (0x1UL << 62 | 0x1UL << 10) +#define AER_ENABLE_INTD_CORE0 (0x1UL << 62 | 0x1UL << 10) + +#define PIUCONFIG0_INIT_VAL 0x38056 + +/*-----------------------addr-----------------------*/ +/* CAB0 REG */ +enum { + TRKMODE = CAB0_BASE | 0x80UL, +}; + +/* DLIA IO REG */ +enum { + DLIA_BWTEST_PAT = DLIA_BASE | 0x100980UL, + DLIA_PHY_VLDLANE = DLIA_BASE | DLI_PHY_CTL | 0x300UL, +}; + +/* DLIB IO REG */ +enum { + DLIB_BWTEST_PAT = DLIB_BASE | 0x100980UL, + DLIB_PHY_VLDLANE = DLIB_BASE | DLI_PHY_CTL | 0x300UL, +}; + +/* DLIC IO REG */ +enum { + DLIC_BWTEST_PAT = DLIC_BASE | 0x100980UL, + DLIC_PHY_VLDLANE = DLIC_BASE | DLI_PHY_CTL | 0x300UL, +}; +/* INTPU REG */ +enum { + LCORE_SLEEPY = INTPU_BASE | 0x0UL, + LCORE_SLEEP = INTPU_BASE | 0x80UL, + DEVICE_MISS = INTPU_BASE | 0x100UL, + LONG_TIME = INTPU_BASE | 0x180UL, + LCORE_IDLE = INTPU_BASE | 0x280UL, + MT_INT_CONFIG = INTPU_BASE | 0x300UL, + DEV_INT_CONFIG = INTPU_BASE | 0x480UL, + FMT_ERR = INTPU_BASE | 0x700UL, + FAULT_INT_CONFIG = INTPU_BASE | 0x780UL, + SERR_CNTTH = INTPU_BASE | 0x880UL, + MCUSERR_CNT = INTPU_BASE | 0x900UL, + IRUSERR_CNT = INTPU_BASE | 0xa80UL, + ERRRPT_EN = INTPU_BASE | 0xb00UL, + IINT_MISS_VECTOR = INTPU_BASE | 0x1100UL, + IINT_MIS = INTPU_BASE | 0x1180UL, + IINT_MISS_RPTEN = INTPU_BASE | 0x1200UL, + DEVINT_MISS_RPTEN = INTPU_BASE | 0x1280UL, + ECCSERR = INTPU_BASE | 0x1300UL, + ECCSERR_RPTEN = INTPU_BASE | 0x1380UL, + ECCMERR = INTPU_BASE | 0x1400UL, + ECCMERR_RPTEN = INTPU_BASE | 0x1480UL, + DEVINT_WKEN = INTPU_BASE | 0x1500UL, + NMI_INT_CONFIG = INTPU_BASE | 0x1580UL, + DEVINTWK_INTEN = INTPU_BASE | 0x1600UL, +}; + +/* MC IO REG */ +enum { + CFGDEC = 0x400UL, + CFGCR = 0x480UL, + INIT_CTRL = 0x580UL, + CFGERR = 0xd00UL, + FSMSTAT = 0xe00UL, + PUB_INTERFACE = 0x1000UL, + POWERCTRL = 0x1080UL, + CFGMR0 = 0x1280UL, + CFGMR1 = 0x1300UL, + CFGMR2 = 0x1380UL, + CFGMR3 = 0x1400UL, + PERF_CTRL = 0x1480UL, + MC_PERF0 = 0x1500UL, + CFGMR4 = 0x1800UL, + CFGMR5 = 0x1880UL, + CFGMR6 = 0x1900UL, + MC_CTRL = 0x1c00UL, + MEMSERR_P = 0x1c80UL, + MEMSERR = 0x1d00UL, +}; + +/* MCU CSR */ +enum { + SMP_INFO = MCU_BASE | 0x80UL, + INIT_CTL = MCU_BASE | 0x680UL, + MT_STATE = MCU_BASE | 0x700UL, + CORE_ONLINE = MCU_BASE | 0x780UL, + MT_INT = MCU_BASE | 0x800UL, + MT_INT_END = MCU_BASE | 0x880UL, + CPU_ID = MCU_BASE | 0x900UL, + DLI_RLTD_FAULT = MCU_BASE | 0x980UL, + DLI_RLTD_FAULT_EN = MCU_BASE | 0xa00UL, + DLI_RLTD_FAULT_INTEN = MCU_BASE | 0xa80UL, + FAULT_SOURCE = MCU_BASE | 0xb00UL, + INT_SOURCE = MCU_BASE | 0xb80UL, + CORE_STATE0 = MCU_BASE | 0xc00UL, + CORE_STATE1 = MCU_BASE | 0xc80UL, + CFG_INFO = MCU_BASE | 0x1100UL, + MC_CAP_CFG = MCU_BASE | 0x1180UL, + IO_START = MCU_BASE | 0x1300UL, + UART_ONLINE = MCU_BASE | 0x1780UL, + I2C0_SRST_L = MCU_BASE | 0x1900UL, + I2C1_SRST_L = MCU_BASE | 0x1980UL, + I2C2_SRST_L = MCU_BASE | 0x1a00UL, + MCU_DVC_INT = MCU_BASE | 0x3000UL, + MCU_DVC_INT_EN = MCU_BASE | 0x3080UL, + SI_FAULT_STAT = MCU_BASE | 0x3100UL, + SI_FAULT_EN = MCU_BASE | 0x3180UL, + SI_FAULT_INT_EN = MCU_BASE | 0x3200UL, + FIFO_SYNSEL = MCU_BASE | 0x3400UL, + CPU_INFO = MCU_BASE | 0x3480UL, + WAKEUP_CTL = MCU_BASE | 0x3500UL, + FLAGREG = MCU_BASE | 0x3580UL, + NMI_CTL = MCU_BASE | 0x3600UL, + PIUPLL_CNT = MCU_BASE | 0x3680UL, + MC_ONLINE = MCU_BASE | 0x3780UL, + FLASH_INFO = MCU_BASE | 0x3800UL, + RTPUSROMCNT = MCU_BASE | 0x3880UL, + CLU_LV1_SEL = MCU_BASE | 0x3a80UL, + CLU_LV2_SEL = MCU_BASE | 0x3b00UL, + CLK_CTL = MCU_BASE | 0x3b80UL, + SLEEP_WAIT_CNT = MCU_BASE | 0x4980UL, + CHIP_ID = MCU_BASE | 0x4b00UL, + PIU_TOP0_CONFIG = MCU_BASE | 0x4c80UL, + PIU_TOP1_CONFIG = MCU_BASE | 0x4d00UL, + LVDS_CTL = MCU_BASE | 0x4d80UL, + LPC_DMAREQ_TOTH = MCU_BASE | 0x5100UL, + DLI_ONLINE = MCU_BASE | 0x6180UL, + LPC_DMAREQ_HADR = MCU_BASE | 0x6200UL, + PIU_PHY_SRST_H = MCU_BASE | 0x6280UL, + CLK_SEL_PCIE0 = MCU_BASE | 0x6280UL, + CLK_SEL_PCIE1 = MCU_BASE | 0x6300UL, + CLK_SEL_PCIE2 = MCU_BASE | 0x6380UL, + CLK_SEL_PCIE3 = MCU_BASE | 0x6400UL, + CLK_SEL_PCIE4 = MCU_BASE | 0x6480UL, + CLK_SEL_PCIE5 = MCU_BASE | 0x6500UL, + PERST_N_PCIE0 = MCU_BASE | 0x6680UL, + PERST_N_PCIE1 = MCU_BASE | 0x6700UL, + PERST_N_PCIE2 = MCU_BASE | 0x6780UL, + PERST_N_PCIE3 = MCU_BASE | 0x6800UL, + PERST_N_PCIE4 = MCU_BASE | 0x6880UL, + PERST_N_PCIE5 = MCU_BASE | 0x6900UL, + BUTTON_RST_N_PCIE0 = MCU_BASE | 0x6a80UL, + BUTTON_RST_N_PCIE1 = MCU_BASE | 0x6b00UL, + BUTTON_RST_N_PCIE2 = MCU_BASE | 0x6b80UL, + BUTTON_RST_N_PCIE3 = MCU_BASE | 0x6c00UL, + BUTTON_RST_N_PCIE4 = MCU_BASE | 0x6c80UL, + BUTTON_RST_N_PCIE5 = MCU_BASE | 0x6d00UL, + DUAL_CG0_FAULT = MCU_BASE | 0x6d80UL, + DUAL_CG1_FAULT = MCU_BASE | 0x6e00UL, + DUAL_CG2_FAULT = MCU_BASE | 0x6e80UL, + DUAL_CG3_FAULT = MCU_BASE | 0x6f00UL, + DUAL_CG4_FAULT = MCU_BASE | 0x6f80UL, + DUAL_CG5_FAULT = MCU_BASE | 0x7000UL, + DUAL_CG6_FAULT = MCU_BASE | 0x7080UL, + DUAL_CG7_FAULT = MCU_BASE | 0x7100UL, + DUAL_CG0_FAULT_EN = MCU_BASE | 0x7180UL, + DUAL_CG1_FAULT_EN = MCU_BASE | 0x7200UL, + DUAL_CG2_FAULT_EN = MCU_BASE | 0x7280UL, + DUAL_CG3_FAULT_EN = MCU_BASE | 0x7300UL, + DUAL_CG4_FAULT_EN = MCU_BASE | 0x7380UL, + DUAL_CG5_FAULT_EN = MCU_BASE | 0x7400UL, + DUAL_CG6_FAULT_EN = MCU_BASE | 0x7480UL, + DUAL_CG7_FAULT_EN = MCU_BASE | 0x7500UL, + DUAL_CG0_FAULT_INTEN = MCU_BASE | 0x7580UL, + DUAL_CG1_FAULT_INTEN = MCU_BASE | 0x7600UL, + DUAL_CG2_FAULT_INTEN = MCU_BASE | 0x7680UL, + DUAL_CG3_FAULT_INTEN = MCU_BASE | 0x7700UL, + DUAL_CG4_FAULT_INTEN = MCU_BASE | 0x7780UL, + DUAL_CG5_FAULT_INTEN = MCU_BASE | 0x7800UL, + DUAL_CG6_FAULT_INTEN = MCU_BASE | 0x7880UL, + DUAL_CG7_FAULT_INTEN = MCU_BASE | 0x7900UL, + SOFT_INFO0 = MCU_BASE | 0x7f00UL, + LONG_TIME_START_EN = MCU_BASE | 0x9000UL, +}; + +/*--------------------------offset-----------------------------------*/ +/* PIU IOR0 */ +enum { + PIUCONFIG0 = 0x0UL, + EPDMABAR = 0x80UL, + IOMMUSEGITEM0 = 0x100UL, + IOMMUEXCPT_CTRL = 0x2100UL, + MSIADDR = 0x2180UL, + MSICONFIG0 = 0x2200UL, + INTACONFIG = 0xa200UL, + INTBCONFIG = 0xa280UL, + INTCCONFIG = 0xa300UL, + INTDCONFIG = 0xa380UL, + AERERRINTCONFIG = 0xa400UL, + AERERRMSICONFIG = 0xa480UL, + PMEINTCONFIG = 0xa500UL, + PMEMSICONFIG = 0xa580UL, + HPINTCONFIG = 0xa600UL, + HPMSICONFIG = 0xa680UL, + DTBASEADDR = 0xb000UL, + DTLB_FLUSHALL = 0xb080UL, + DTLB_FLUSHDEV = 0xb100UL, + PTLB_FLUSHALL = 0xb180UL, + PTLB_FLUSHDEV = 0xb200UL, + PTLB_FLUSHVADDR = 0xb280UL, + PCACHE_FLUSHALL = 0xb300UL, + PCACHE_FLUSHDEV = 0xb380UL, + PCACHE_FLUSHPADDR = 0xb400UL, + TIMEOUT_CONFIG = 0xb480UL, + IOMMUEXCPT_STATUS = 0xb500UL, + IOMMUPAGE_PADDR1 = 0xb580UL, + IOMMUPAGE_PADDR2 = 0xb600UL, + IOMMUPAGE_PADDR3 = 0xb680UL, + PTLB_ACCESS = 0xb700UL, + PTLB_ITEM_TAG = 0xb780UL, + PTLB_ITEM_DATA = 0xb800UL, + PCACHE_ACCESS = 0xb880UL, + PCACHE_ITEM_TAG = 0xb900UL, + PCACHE_ITEM_DATA0 = 0xb980UL, +}; + +/* PIU IOR1 */ +enum { + PIUCONFIG1 = 0x0UL, + ERRENABLE = 0x880UL, + RCDEBUGINF1 = 0xc80UL, + DCACONTROL = 0x1a00UL, + DEVICEID0 = 0x1a80UL, +}; + +/* RC */ +enum { + RC_VENDOR_ID = 0x0UL, + RC_COMMAND = 0x80UL, + RC_REVISION_ID = 0x100UL, + RC_PRIMARY_BUS = 0x300UL, + RC_MSI_CONTROL = 0xa00UL, + RC_EXP_DEVCAP = 0xe80UL, + RC_EXP_DEVCTL = 0xf00UL, + RC_SLOT_CTRL = 0x1100UL, + RC_LINK_STAT = 0x1000UL, + RC_CONTROL = 0X1180UL, + RC_STATUS = 0X1200UL, + RC_EXP_DEVCTL2 = 0x1300UL, + RC_PORT_LINK_CTL = 0xe200UL, + RC_ORDER_RULE_CTL = 0x11680UL, + RC_MISC_CONTROL_1 = 0x11780UL, + RC_PHY_INT_REG = 0x80000UL, + RC_PHY_EXT_GEN1 = 0x82400UL, + RC_PHY_EXT_GEN2 = 0x82480UL, +}; +/* GPIO */ +enum { + GPIO_SWPORTA_DR = GPIO_BASE | 0x0UL, + GPIO_SWPORTA_DDR = GPIO_BASE | 0x200UL, +}; +/*--------------------------------------------------------------------------*/ +#endif /* _ASM_SW64_UNCORE_IO_XUELANG_H */ -- Gitee From cfe1e898f741c1d9f8a497bd66960077826a9c8f Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:27 +0800 Subject: [PATCH 0291/2138] anolis: sw64: add module support ANBZ: #4688 Add kernel module support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/module.h | 17 ++ arch/sw_64/kernel/module.c | 279 ++++++++++++++++++++++++++++++++ 2 files changed, 296 insertions(+) create mode 100644 arch/sw_64/include/asm/module.h create mode 100644 arch/sw_64/kernel/module.c diff --git a/arch/sw_64/include/asm/module.h b/arch/sw_64/include/asm/module.h new file mode 100644 index 000000000000..d1663aab4097 --- /dev/null +++ b/arch/sw_64/include/asm/module.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_MODULE_H +#define _ASM_SW64_MODULE_H + +#include + +struct mod_arch_specific { + unsigned int gotsecindex; +}; + +#define ARCH_SHF_SMALL SHF_SW64_GPREL + +#ifdef MODULE +asm(".section .got, \"aw\", @progbits; .align 3; .previous"); +#endif + +#endif /* _ASM_SW64_MODULE_H */ diff --git a/arch/sw_64/kernel/module.c b/arch/sw_64/kernel/module.c new file mode 100644 index 000000000000..67264e3644a7 --- /dev/null +++ b/arch/sw_64/kernel/module.c @@ -0,0 +1,279 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +#define DEBUGP(fmt...) + +/* Allocate the GOT at the end of the core sections. */ + +struct got_entry { + struct got_entry *next; + Elf64_Sxword r_addend; + int got_offset; +}; + +static inline void +process_reloc_for_got(Elf64_Rela *rela, + struct got_entry *chains, Elf64_Xword *poffset) +{ + unsigned long r_sym = ELF64_R_SYM(rela->r_info); + unsigned long r_type = ELF64_R_TYPE(rela->r_info); + Elf64_Sxword r_addend = rela->r_addend; + struct got_entry *g; + + if (r_type != R_SW64_LITERAL) + return; + + for (g = chains + r_sym; g ; g = g->next) + if (g->r_addend == r_addend) { + if (g->got_offset == 0) { + g->got_offset = *poffset; + *poffset += 8; + } + goto found_entry; + } + + g = kmalloc(sizeof(*g), GFP_KERNEL); + g->next = chains[r_sym].next; + g->r_addend = r_addend; + g->got_offset = *poffset; + *poffset += 8; + chains[r_sym].next = g; + + found_entry: + /* + * Trick: most of the ELF64_R_TYPE field is unused. There are + * 42 valid relocation types, and a 32-bit field. Co-opt the + * bits above 256 to store the got offset for this reloc. + */ + rela->r_info |= g->got_offset << 8; +} + +int +module_frob_arch_sections(Elf64_Ehdr *hdr, Elf64_Shdr *sechdrs, + char *secstrings, struct module *me) +{ + struct got_entry *chains; + Elf64_Rela *rela; + Elf64_Shdr *esechdrs, *symtab, *s, *got; + unsigned long nsyms, nrela, i; + + esechdrs = sechdrs + hdr->e_shnum; + symtab = got = NULL; + + /* Find out how large the symbol table is. Allocate one got_entry + * head per symbol. Normally this will be enough, but not always. + * We'll chain different offsets for the symbol down each head. + */ + for (s = sechdrs; s < esechdrs; ++s) + if (s->sh_type == SHT_SYMTAB) + symtab = s; + else if (!strcmp(".got", secstrings + s->sh_name)) { + got = s; + me->arch.gotsecindex = s - sechdrs; + } + + if (!symtab) { + pr_err("module %s: no symbol table\n", me->name); + return -ENOEXEC; + } + if (!got) { + pr_err("module %s: no got section\n", me->name); + return -ENOEXEC; + } + + nsyms = symtab->sh_size / sizeof(Elf64_Sym); + chains = kcalloc(nsyms, sizeof(struct got_entry), GFP_KERNEL); + if (!chains) { + pr_err("module %s: no memory for symbol chain buffer\n", + me->name); + return -ENOMEM; + } + + got->sh_size = 0; + got->sh_addralign = 8; + got->sh_type = SHT_NOBITS; + + /* Examine all LITERAL relocations to find out what GOT entries + * are required. This sizes the GOT section as well. + */ + for (s = sechdrs; s < esechdrs; ++s) + if (s->sh_type == SHT_RELA) { + nrela = s->sh_size / sizeof(Elf64_Rela); + rela = (void *)hdr + s->sh_offset; + for (i = 0; i < nrela; ++i) + process_reloc_for_got(rela+i, chains, + &got->sh_size); + } + + /* Free the memory we allocated. */ + for (i = 0; i < nsyms; ++i) { + struct got_entry *g, *n; + + for (g = chains[i].next; g ; g = n) { + n = g->next; + kfree(g); + } + } + kfree(chains); + + return 0; +} + +int +apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, + unsigned int symindex, unsigned int relsec, + struct module *me) +{ + Elf64_Rela *rela = (void *)sechdrs[relsec].sh_addr; + unsigned long i, n = sechdrs[relsec].sh_size / sizeof(*rela); + Elf64_Sym *symtab, *sym; + void *base, *location; + unsigned long got, gp; + + DEBUGP("Applying relocate section %u to %u\n", relsec, + sechdrs[relsec].sh_info); + + base = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr; + symtab = (Elf64_Sym *)sechdrs[symindex].sh_addr; + + /* The small sections were sorted to the end of the segment. + * The following should definitely cover them. + */ + got = sechdrs[me->arch.gotsecindex].sh_addr; + gp = got + 0x8000; + + for (i = 0; i < n; i++) { + unsigned long r_sym = ELF64_R_SYM(rela[i].r_info); + unsigned long r_type = ELF64_R_TYPE(rela[i].r_info); + unsigned long r_got_offset = r_type >> 8; + unsigned long value, hi, lo; + + r_type &= 0xff; + + /* This is where to make the change. */ + location = base + rela[i].r_offset; + + /* This is the symbol it is referring to. Note that all + * unresolved symbols have been resolved. + */ + sym = symtab + r_sym; + value = sym->st_value + rela[i].r_addend; + + switch (r_type) { + case R_SW64_NONE: + break; + case R_SW64_REFLONG: + *(u32 *)location = value; + break; + case R_SW64_REFQUAD: + /* BUG() can produce misaligned relocations. */ + ((u32 *)location)[0] = value; + ((u32 *)location)[1] = value >> 32; + break; + case R_SW64_GPREL32: + value -= gp; + if ((int)value != value) + goto reloc_overflow; + *(u32 *)location = value; + break; + case R_SW64_LITERAL: + hi = got + r_got_offset; + lo = hi - gp; + if ((short)lo != lo) { + unsigned long over_offset = (lo + 0x8000) >> 16; + + if ((over_offset & 0x8000) == 0) { + *(u16 *)(location - 0x4) = over_offset; + *(u16 *)location = lo - ((over_offset << 16) + gp); + *(u64 *)hi = value; + } else { + goto reloc_overflow; + } + } else { + *(u16 *)location = lo; + *(u64 *)hi = value; + } + break; + case R_SW64_LITERAL_GOT: + /* empty for now need to fill */ + break; + case R_SW64_LITUSE: + break; + case R_SW64_GPDISP: + value = gp - (u64)location; + lo = (short)value; + hi = (int)(value - lo); + if (hi + lo != value) + goto reloc_overflow; + *(u16 *)location = hi >> 16; + *(u16 *)(location + rela[i].r_addend) = lo; + break; + case R_SW64_BRSGP: + /* + * BRSGP is only allowed to bind to local symbols. + * If the section is undef, this means that the + * value was resolved from somewhere else. + */ + if (sym->st_shndx == SHN_UNDEF) + goto reloc_overflow; + if ((sym->st_other & STO_SW64_STD_GPLOAD) == + STO_SW64_STD_GPLOAD) + /* Omit the prologue. */ + value += 8; + fallthrough; + case R_SW64_BRADDR: + value -= (u64)location + 4; + if (value & 3) + goto reloc_overflow; + value = (long)value >> 2; + if (value + (1<<21) >= 1<<22) + goto reloc_overflow; + value &= 0x1fffff; + value |= *(u32 *)location & ~0x1fffff; + *(u32 *)location = value; + break; + case R_SW64_HINT: + break; + case R_SW64_SREL32: + value -= (u64)location; + if ((int)value != value) + goto reloc_overflow; + *(u32 *)location = value; + break; + case R_SW64_SREL64: + value -= (u64)location; + *(u64 *)location = value; + break; + case R_SW64_GPRELHIGH: + value = (long)(value - gp + 0x8000) >> 16; + if ((short) value != value) + goto reloc_overflow; + *(u16 *)location = value; + break; + case R_SW64_GPRELLOW: + value -= gp; + *(u16 *)location = value; + break; + case R_SW64_GPREL16: + value -= gp; + if ((short) value != value) + goto reloc_overflow; + *(u16 *)location = value; + break; + default: + pr_err("module %s: Unknown relocation: %lu\n", me->name, r_type); + return -ENOEXEC; +reloc_overflow: + if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION) + pr_err("module %s: Relocation (type %lu) overflow vs section %d\n", + me->name, r_type, sym->st_shndx); + else + pr_err("module %s: Relocation (type %lu) overflow vs %s\n", + me->name, r_type, strtab + sym->st_name); + return -ENOEXEC; + } + } + + return 0; +} -- Gitee From 54bf48f9d874857a9a7e2716a28df22fa4ae1375 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:33 +0800 Subject: [PATCH 0292/2138] anolis: sw64: add some common routines ANBZ: #4688 Add some other common routines for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/debug.h | 38 ++++ arch/sw_64/include/asm/extable.h | 47 +++++ arch/sw_64/include/asm/futex.h | 168 +++++++++++++++++ arch/sw_64/kernel/asm-offsets.c | 240 ++++++++++++++++++++++++ arch/sw_64/kernel/audit.c | 61 ++++++ arch/sw_64/kernel/early_printk.c | 183 ++++++++++++++++++ arch/sw_64/kernel/entry.S | 306 +++++++++++++++++++++++++++++++ 7 files changed, 1043 insertions(+) create mode 100644 arch/sw_64/include/asm/debug.h create mode 100644 arch/sw_64/include/asm/extable.h create mode 100644 arch/sw_64/include/asm/futex.h create mode 100644 arch/sw_64/kernel/asm-offsets.c create mode 100644 arch/sw_64/kernel/audit.c create mode 100644 arch/sw_64/kernel/early_printk.c create mode 100644 arch/sw_64/kernel/entry.S diff --git a/arch/sw_64/include/asm/debug.h b/arch/sw_64/include/asm/debug.h new file mode 100644 index 000000000000..8db5a8bb9ab7 --- /dev/null +++ b/arch/sw_64/include/asm/debug.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 Mao Minkai + * Author: Mao Minkai + * + * This code is taken from arch/mips/include/asm/debug.h + * Copyright (C) 2015 Imagination Technologies + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef _ASM_SW64_DEBUG_H +#define _ASM_SW64_DEBUG_H + +#include + +/* + * sw64_debugfs_dir corresponds to the "sw_64" directory at the top level + * of the DebugFS hierarchy. SW64-specific DebugFS entries should be + * placed beneath this directory. + */ +extern struct dentry *sw64_debugfs_dir; + +#define UNA_MAX_ENTRIES 64 + +struct unaligned_stat { + unsigned long pc; + unsigned long va; +}; + +extern char unaligned_task[]; +extern unsigned long unaligned_count; +extern struct unaligned_stat unaligned[]; + +#endif /* _ASM_SW64_DEBUG_H */ diff --git a/arch/sw_64/include/asm/extable.h b/arch/sw_64/include/asm/extable.h new file mode 100644 index 000000000000..42f872ce6c3b --- /dev/null +++ b/arch/sw_64/include/asm/extable.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_EXTABLE_H +#define _ASM_SW64_EXTABLE_H + +/* + * About the exception table: + * + * - insn is a 32-bit pc-relative offset from the faulting insn. + * - nextinsn is a 16-bit offset off of the faulting instruction + * (not off of the *next* instruction as branches are). + * - errreg is the register in which to place -EFAULT. + * - valreg is the final target register for the load sequence + * and will be zeroed. + * + * Either errreg or valreg may be $31, in which case nothing happens. + * + * The exception fixup information "just so happens" to be arranged + * as in a MEM format instruction. This lets us emit our three + * values like so: + * + * lda valreg, nextinsn(errreg) + * + */ + +struct exception_table_entry { + signed int insn; + union exception_fixup { + unsigned int unit; + struct { + signed int nextinsn : 16; + unsigned int errreg : 5; + unsigned int valreg : 5; + } bits; + } fixup; +}; + +#define ARCH_HAS_RELATIVE_EXTABLE + +extern int fixup_exception(struct pt_regs *regs, unsigned long pc); + +#define swap_ex_entry_fixup(a, b, tmp, delta) \ + do { \ + (a)->fixup.unit = (b)->fixup.unit; \ + (b)->fixup.unit = (tmp).fixup.unit; \ + } while (0) + +#endif /* _ASM_SW64_EXTABLE_H */ diff --git a/arch/sw_64/include/asm/futex.h b/arch/sw_64/include/asm/futex.h new file mode 100644 index 000000000000..783799813980 --- /dev/null +++ b/arch/sw_64/include/asm/futex.h @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_FUTEX_H +#define _ASM_SW64_FUTEX_H + +#ifdef __KERNEL__ + +#include +#include +#include +#include + +#ifdef CONFIG_SUBARCH_C3B + +#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg, tmp) \ + __asm__ __volatile__( \ + "1: lldw %0, 0(%3)\n" \ + " ldi %2, 1\n" \ + " wr_f %2\n" \ + insn \ + "2: lstw %1, 0(%3)\n" \ + " rd_f %1\n" \ + " beq %1, 4f\n" \ + " bis $31, $31, %1\n" \ + "3: .subsection 2\n" \ + "4: br 1b\n" \ + " .previous\n" \ + " .section __ex_table, \"a\"\n" \ + " .long 1b-.\n" \ + " ldi $31, 3b-1b(%1)\n" \ + " .long 2b-.\n" \ + " ldi $31, 3b-2b(%1)\n" \ + " .previous\n" \ + : "=&r" (oldval), "=&r"(ret), "=&r"(tmp) \ + : "r" (uaddr), "r"(oparg) \ + : "memory") + +static inline int +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) +{ + int ret = 0, cmp; + u32 prev, tmp; + + if (!access_ok(uaddr, sizeof(u32))) + return -EFAULT; + + __asm__ __volatile__ ( + "1: lldw %1, 0(%4)\n" + " cmpeq %1, %5, %2\n" + " wr_f %2\n" + " bis $31, %6, %3\n" + "2: lstw %3, 0(%4)\n" + " rd_f %3\n" + " beq %2, 3f\n" + " beq %3, 4f\n" + "3: .subsection 2\n" + "4: br 1b\n" + " .previous\n" + " .section __ex_table, \"a\"\n" + " .long 1b-.\n" + " ldi $31, 3b-1b(%0)\n" + " .long 2b-.\n" + " ldi $31, 3b-2b(%0)\n" + " .previous\n" + : "+r"(ret), "=&r"(prev), "=&r"(cmp), "=&r"(tmp) + : "r"(uaddr), "r"((long)(int)oldval), "r"(newval) + : "memory"); + + *uval = prev; + return ret; +} +#else /* !CONFIG_SUBARCH_C3B */ + +#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg, tmp) \ + __asm__ __volatile__( \ + "1: lldw %0, 0(%3)\n" \ + insn \ + "2: lstw %1, 0(%3)\n" \ + " beq %1, 4f\n" \ + " bis $31, $31, %1\n" \ + "3: .subsection 2\n" \ + "4: lbr 1b\n" \ + " .previous\n" \ + " .section __ex_table, \"a\"\n" \ + " .long 1b-.\n" \ + " ldi $31, 3b-1b(%1)\n" \ + " .long 2b-.\n" \ + " ldi $31, 3b-2b(%1)\n" \ + " .previous\n" \ + : "=&r" (oldval), "=&r"(ret), "=&r"(tmp) \ + : "r" (uaddr), "r"(oparg) \ + : "memory") + + +static inline int +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) +{ + int ret = 0, cmp; + u32 prev, tmp; + + if (!access_ok(uaddr, sizeof(u32))) + return -EFAULT; + + __asm__ __volatile__ ( + "1: lldw %1, 0(%4)\n" + " cmpeq %1, %5, %2\n" + " beq %2, 3f\n" + " bis $31, %6, %3\n" + "2: lstw %3, 0(%4)\n" + " beq %3, 4f\n" + "3: .subsection 2\n" + "4: lbr 1b\n" + " .previous\n" + " .section __ex_table, \"a\"\n" + " .long 1b-.\n" + " ldi $31, 3b-1b(%0)\n" + " .long 2b-.\n" + " ldi $31, 3b-2b(%0)\n" + " .previous\n" + : "+r"(ret), "=&r"(prev), "=&r"(cmp), "=&r"(tmp) + : "r"(uaddr), "r"((long)(int)oldval), "r"(newval) + : "memory"); + + *uval = prev; + return ret; +} +#endif /* CONFIG_SUBARCH_C3B */ + +static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, + u32 __user *uaddr) +{ + int oldval = 0, ret; + unsigned long tmp; + + pagefault_disable(); + + switch (op) { + case FUTEX_OP_SET: + __futex_atomic_op("mov %4, %1\n", ret, oldval, uaddr, oparg, tmp); + break; + case FUTEX_OP_ADD: + __futex_atomic_op("addw %0, %4, %1\n", ret, oldval, uaddr, oparg, tmp); + break; + case FUTEX_OP_OR: + __futex_atomic_op("or %0, %4, %1\n", ret, oldval, uaddr, oparg, tmp); + break; + case FUTEX_OP_ANDN: + __futex_atomic_op("andnot %0, %4, %1\n", ret, oldval, uaddr, oparg, tmp); + break; + case FUTEX_OP_XOR: + __futex_atomic_op("xor %0, %4, %1\n", ret, oldval, uaddr, oparg, tmp); + break; + default: + ret = -ENOSYS; + } + + pagefault_enable(); + + if (!ret) + *oval = oldval; + + return ret; +} + +#endif /* __KERNEL__ */ + +#endif /* _ASM_SW64_FUTEX_H */ diff --git a/arch/sw_64/kernel/asm-offsets.c b/arch/sw_64/kernel/asm-offsets.c new file mode 100644 index 000000000000..41310a8a7af1 --- /dev/null +++ b/arch/sw_64/kernel/asm-offsets.c @@ -0,0 +1,240 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Generate definitions needed by assembly language modules. + * This code generates raw asm output which is post-processed to extract + * and format the required data. + */ + +#define GENERATING_ASM_OFFSETS /* asm/smp.h */ +#include +#include +#include +#include + +#include +#include + +#include "traps.c" +#include "signal.c" + +void foo(void) +{ + DEFINE(ASM_THREAD_SIZE, THREAD_SIZE); + DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); + BLANK(); + + DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked)); + DEFINE(TASK_CRED, offsetof(struct task_struct, cred)); + DEFINE(TASK_REAL_PARENT, offsetof(struct task_struct, real_parent)); + DEFINE(TASK_GROUP_LEADER, offsetof(struct task_struct, group_leader)); + DEFINE(TASK_TGID, offsetof(struct task_struct, tgid)); + DEFINE(TASK_STACK, offsetof(struct task_struct, stack)); +#ifdef CONFIG_SMP + DEFINE(TASK_CPU, offsetof(struct task_struct, thread_info.cpu)); +#endif + BLANK(); + + OFFSET(PSTATE_REGS, processor_state, regs); + OFFSET(PSTATE_FPREGS, processor_state, fpregs); + OFFSET(PSTATE_FPCR, processor_state, fpcr); + OFFSET(PSTATE_KTP, processor_state, ktp); +#ifdef CONFIG_HIBERNATION + OFFSET(PSTATE_SP, processor_state, sp); +#endif + OFFSET(PBE_ADDR, pbe, address); + OFFSET(PBE_ORIG_ADDR, pbe, orig_address); + OFFSET(PBE_NEXT, pbe, next); + OFFSET(CALLEE_R9, callee_saved_regs, r9); + OFFSET(CALLEE_R10, callee_saved_regs, r10); + OFFSET(CALLEE_R11, callee_saved_regs, r11); + OFFSET(CALLEE_R12, callee_saved_regs, r12); + OFFSET(CALLEE_R13, callee_saved_regs, r13); + OFFSET(CALLEE_R14, callee_saved_regs, r14); + OFFSET(CALLEE_R15, callee_saved_regs, r15); + OFFSET(CALLEE_RA, callee_saved_regs, ra); + OFFSET(CALLEE_F2, callee_saved_fpregs, f2); + OFFSET(CALLEE_F3, callee_saved_fpregs, f3); + OFFSET(CALLEE_F4, callee_saved_fpregs, f4); + OFFSET(CALLEE_F5, callee_saved_fpregs, f5); + OFFSET(CALLEE_F6, callee_saved_fpregs, f6); + OFFSET(CALLEE_F7, callee_saved_fpregs, f7); + OFFSET(CALLEE_F8, callee_saved_fpregs, f8); + OFFSET(CALLEE_F9, callee_saved_fpregs, f9); + BLANK(); + DEFINE(CRED_UID, offsetof(struct cred, uid)); + DEFINE(CRED_EUID, offsetof(struct cred, euid)); + DEFINE(CRED_GID, offsetof(struct cred, gid)); + DEFINE(CRED_EGID, offsetof(struct cred, egid)); + BLANK(); + + DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs)); + DEFINE(PT_REGS_R0, offsetof(struct pt_regs, regs[0])); + DEFINE(PT_REGS_R1, offsetof(struct pt_regs, regs[1])); + DEFINE(PT_REGS_R2, offsetof(struct pt_regs, regs[2])); + DEFINE(PT_REGS_R3, offsetof(struct pt_regs, regs[3])); + DEFINE(PT_REGS_R4, offsetof(struct pt_regs, regs[4])); + DEFINE(PT_REGS_R5, offsetof(struct pt_regs, regs[5])); + DEFINE(PT_REGS_R6, offsetof(struct pt_regs, regs[6])); + DEFINE(PT_REGS_R7, offsetof(struct pt_regs, regs[7])); + DEFINE(PT_REGS_R8, offsetof(struct pt_regs, regs[8])); + DEFINE(PT_REGS_R9, offsetof(struct pt_regs, regs[9])); + DEFINE(PT_REGS_R10, offsetof(struct pt_regs, regs[10])); + DEFINE(PT_REGS_R11, offsetof(struct pt_regs, regs[11])); + DEFINE(PT_REGS_R12, offsetof(struct pt_regs, regs[12])); + DEFINE(PT_REGS_R13, offsetof(struct pt_regs, regs[13])); + DEFINE(PT_REGS_R14, offsetof(struct pt_regs, regs[14])); + DEFINE(PT_REGS_R15, offsetof(struct pt_regs, regs[15])); + DEFINE(PT_REGS_R16, offsetof(struct pt_regs, regs[16])); + DEFINE(PT_REGS_R17, offsetof(struct pt_regs, regs[17])); + DEFINE(PT_REGS_R18, offsetof(struct pt_regs, regs[18])); + DEFINE(PT_REGS_R19, offsetof(struct pt_regs, regs[19])); + DEFINE(PT_REGS_R20, offsetof(struct pt_regs, regs[20])); + DEFINE(PT_REGS_R21, offsetof(struct pt_regs, regs[21])); + DEFINE(PT_REGS_R22, offsetof(struct pt_regs, regs[22])); + DEFINE(PT_REGS_R23, offsetof(struct pt_regs, regs[23])); + DEFINE(PT_REGS_R24, offsetof(struct pt_regs, regs[24])); + DEFINE(PT_REGS_R25, offsetof(struct pt_regs, regs[25])); + DEFINE(PT_REGS_R26, offsetof(struct pt_regs, regs[26])); + DEFINE(PT_REGS_R27, offsetof(struct pt_regs, regs[27])); + DEFINE(PT_REGS_R28, offsetof(struct pt_regs, regs[28])); + DEFINE(PT_REGS_GP, offsetof(struct pt_regs, regs[29])); + DEFINE(PT_REGS_SP, offsetof(struct pt_regs, regs[30])); + DEFINE(PT_REGS_PC, offsetof(struct pt_regs, pc)); + DEFINE(PT_REGS_PS, offsetof(struct pt_regs, ps)); + DEFINE(PT_REGS_ORIG_R0, offsetof(struct pt_regs, orig_r0)); + DEFINE(PT_REGS_ORIG_R19, offsetof(struct pt_regs, orig_r19)); + DEFINE(PT_REGS_HM_PS, offsetof(struct pt_regs, hm_ps)); + DEFINE(PT_REGS_HM_PC, offsetof(struct pt_regs, hm_pc)); + DEFINE(PT_REGS_HM_GP, offsetof(struct pt_regs, hm_gp)); + DEFINE(PT_REGS_HM_R16, offsetof(struct pt_regs, hm_r16)); + DEFINE(PT_REGS_HM_R17, offsetof(struct pt_regs, hm_r17)); + DEFINE(PT_REGS_HM_R18, offsetof(struct pt_regs, hm_r18)); + BLANK(); + + DEFINE(KVM_REGS_SIZE, sizeof(struct kvm_regs)); + DEFINE(KVM_REGS_R0, offsetof(struct kvm_regs, r0)); + DEFINE(KVM_REGS_R1, offsetof(struct kvm_regs, r1)); + DEFINE(KVM_REGS_R2, offsetof(struct kvm_regs, r2)); + DEFINE(KVM_REGS_R3, offsetof(struct kvm_regs, r3)); + DEFINE(KVM_REGS_R4, offsetof(struct kvm_regs, r4)); + DEFINE(KVM_REGS_R5, offsetof(struct kvm_regs, r5)); + DEFINE(KVM_REGS_R6, offsetof(struct kvm_regs, r6)); + DEFINE(KVM_REGS_R7, offsetof(struct kvm_regs, r7)); + DEFINE(KVM_REGS_R8, offsetof(struct kvm_regs, r8)); + DEFINE(KVM_REGS_R9, offsetof(struct kvm_regs, r9)); + DEFINE(KVM_REGS_R10, offsetof(struct kvm_regs, r10)); + DEFINE(KVM_REGS_R11, offsetof(struct kvm_regs, r11)); + DEFINE(KVM_REGS_R12, offsetof(struct kvm_regs, r12)); + DEFINE(KVM_REGS_R13, offsetof(struct kvm_regs, r13)); + DEFINE(KVM_REGS_R14, offsetof(struct kvm_regs, r14)); + DEFINE(KVM_REGS_R15, offsetof(struct kvm_regs, r15)); + DEFINE(KVM_REGS_R19, offsetof(struct kvm_regs, r19)); + DEFINE(KVM_REGS_R20, offsetof(struct kvm_regs, r20)); + DEFINE(KVM_REGS_R21, offsetof(struct kvm_regs, r21)); + DEFINE(KVM_REGS_R22, offsetof(struct kvm_regs, r22)); + DEFINE(KVM_REGS_R23, offsetof(struct kvm_regs, r23)); + DEFINE(KVM_REGS_R24, offsetof(struct kvm_regs, r24)); + DEFINE(KVM_REGS_R25, offsetof(struct kvm_regs, r25)); + DEFINE(KVM_REGS_R26, offsetof(struct kvm_regs, r26)); + DEFINE(KVM_REGS_R27, offsetof(struct kvm_regs, r27)); + DEFINE(KVM_REGS_R28, offsetof(struct kvm_regs, r28)); + DEFINE(KVM_REGS_FPCR, offsetof(struct kvm_regs, fpcr)); + DEFINE(KVM_REGS_F0, offsetof(struct kvm_regs, fp[0 * 4])); + DEFINE(KVM_REGS_F1, offsetof(struct kvm_regs, fp[1 * 4])); + DEFINE(KVM_REGS_F2, offsetof(struct kvm_regs, fp[2 * 4])); + DEFINE(KVM_REGS_F3, offsetof(struct kvm_regs, fp[3 * 4])); + DEFINE(KVM_REGS_F4, offsetof(struct kvm_regs, fp[4 * 4])); + DEFINE(KVM_REGS_F5, offsetof(struct kvm_regs, fp[5 * 4])); + DEFINE(KVM_REGS_F6, offsetof(struct kvm_regs, fp[6 * 4])); + DEFINE(KVM_REGS_F7, offsetof(struct kvm_regs, fp[7 * 4])); + DEFINE(KVM_REGS_F8, offsetof(struct kvm_regs, fp[8 * 4])); + DEFINE(KVM_REGS_F9, offsetof(struct kvm_regs, fp[9 * 4])); + DEFINE(KVM_REGS_F10, offsetof(struct kvm_regs, fp[10 * 4])); + DEFINE(KVM_REGS_F11, offsetof(struct kvm_regs, fp[11 * 4])); + DEFINE(KVM_REGS_F12, offsetof(struct kvm_regs, fp[12 * 4])); + DEFINE(KVM_REGS_F13, offsetof(struct kvm_regs, fp[13 * 4])); + DEFINE(KVM_REGS_F14, offsetof(struct kvm_regs, fp[14 * 4])); + DEFINE(KVM_REGS_F15, offsetof(struct kvm_regs, fp[15 * 4])); + DEFINE(KVM_REGS_F16, offsetof(struct kvm_regs, fp[16 * 4])); + DEFINE(KVM_REGS_F17, offsetof(struct kvm_regs, fp[17 * 4])); + DEFINE(KVM_REGS_F18, offsetof(struct kvm_regs, fp[18 * 4])); + DEFINE(KVM_REGS_F19, offsetof(struct kvm_regs, fp[19 * 4])); + DEFINE(KVM_REGS_F20, offsetof(struct kvm_regs, fp[20 * 4])); + DEFINE(KVM_REGS_F21, offsetof(struct kvm_regs, fp[21 * 4])); + DEFINE(KVM_REGS_F22, offsetof(struct kvm_regs, fp[22 * 4])); + DEFINE(KVM_REGS_F23, offsetof(struct kvm_regs, fp[23 * 4])); + DEFINE(KVM_REGS_F24, offsetof(struct kvm_regs, fp[24 * 4])); + DEFINE(KVM_REGS_F25, offsetof(struct kvm_regs, fp[25 * 4])); + DEFINE(KVM_REGS_F26, offsetof(struct kvm_regs, fp[26 * 4])); + DEFINE(KVM_REGS_F27, offsetof(struct kvm_regs, fp[27 * 4])); + DEFINE(KVM_REGS_F28, offsetof(struct kvm_regs, fp[28 * 4])); + DEFINE(KVM_REGS_F29, offsetof(struct kvm_regs, fp[29 * 4])); + DEFINE(KVM_REGS_F30, offsetof(struct kvm_regs, fp[30 * 4])); + DEFINE(KVM_REGS_PS, offsetof(struct kvm_regs, ps)); + DEFINE(KVM_REGS_PC, offsetof(struct kvm_regs, pc)); + DEFINE(KVM_REGS_GP, offsetof(struct kvm_regs, gp)); + DEFINE(KVM_REGS_R16, offsetof(struct kvm_regs, r16)); + DEFINE(KVM_REGS_R17, offsetof(struct kvm_regs, r17)); + DEFINE(KVM_REGS_R18, offsetof(struct kvm_regs, r18)); + BLANK(); + + DEFINE(VCPU_RET_SIZE, sizeof(struct vcpu_run_ret_stack)); + DEFINE(VCPU_RET_RA, offsetof(struct vcpu_run_ret_stack, ra)); + DEFINE(VCPU_RET_R0, offsetof(struct vcpu_run_ret_stack, r0)); + BLANK(); + + DEFINE(HOST_INT_SIZE, sizeof(struct host_int_args)); + DEFINE(HOST_INT_R18, offsetof(struct host_int_args, r18)); + DEFINE(HOST_INT_R17, offsetof(struct host_int_args, r17)); + DEFINE(HOST_INT_R16, offsetof(struct host_int_args, r16)); + BLANK(); + + OFFSET(TASK_THREAD, task_struct, thread); + OFFSET(TASK_THREAD_F0, task_struct, thread.fpstate.fp[0]); + OFFSET(TASK_THREAD_F1, task_struct, thread.fpstate.fp[1]); + OFFSET(TASK_THREAD_F2, task_struct, thread.fpstate.fp[2]); + OFFSET(TASK_THREAD_F3, task_struct, thread.fpstate.fp[3]); + OFFSET(TASK_THREAD_F4, task_struct, thread.fpstate.fp[4]); + OFFSET(TASK_THREAD_F5, task_struct, thread.fpstate.fp[5]); + OFFSET(TASK_THREAD_F6, task_struct, thread.fpstate.fp[6]); + OFFSET(TASK_THREAD_F7, task_struct, thread.fpstate.fp[7]); + OFFSET(TASK_THREAD_F8, task_struct, thread.fpstate.fp[8]); + OFFSET(TASK_THREAD_F9, task_struct, thread.fpstate.fp[9]); + OFFSET(TASK_THREAD_F10, task_struct, thread.fpstate.fp[10]); + OFFSET(TASK_THREAD_F11, task_struct, thread.fpstate.fp[11]); + OFFSET(TASK_THREAD_F12, task_struct, thread.fpstate.fp[12]); + OFFSET(TASK_THREAD_F13, task_struct, thread.fpstate.fp[13]); + OFFSET(TASK_THREAD_F14, task_struct, thread.fpstate.fp[14]); + OFFSET(TASK_THREAD_F15, task_struct, thread.fpstate.fp[15]); + OFFSET(TASK_THREAD_F16, task_struct, thread.fpstate.fp[16]); + OFFSET(TASK_THREAD_F17, task_struct, thread.fpstate.fp[17]); + OFFSET(TASK_THREAD_F18, task_struct, thread.fpstate.fp[18]); + OFFSET(TASK_THREAD_F19, task_struct, thread.fpstate.fp[19]); + OFFSET(TASK_THREAD_F20, task_struct, thread.fpstate.fp[20]); + OFFSET(TASK_THREAD_F21, task_struct, thread.fpstate.fp[21]); + OFFSET(TASK_THREAD_F22, task_struct, thread.fpstate.fp[22]); + OFFSET(TASK_THREAD_F23, task_struct, thread.fpstate.fp[23]); + OFFSET(TASK_THREAD_F24, task_struct, thread.fpstate.fp[24]); + OFFSET(TASK_THREAD_F25, task_struct, thread.fpstate.fp[25]); + OFFSET(TASK_THREAD_F26, task_struct, thread.fpstate.fp[26]); + OFFSET(TASK_THREAD_F27, task_struct, thread.fpstate.fp[27]); + OFFSET(TASK_THREAD_F28, task_struct, thread.fpstate.fp[28]); + OFFSET(TASK_THREAD_F29, task_struct, thread.fpstate.fp[29]); + OFFSET(TASK_THREAD_F30, task_struct, thread.fpstate.fp[30]); + OFFSET(TASK_THREAD_FPCR, task_struct, thread.fpstate.fpcr); + BLANK(); + OFFSET(TASK_THREAD_RA, task_struct, thread.ra); + OFFSET(TASK_THREAD_SP, task_struct, thread.sp); + OFFSET(TASK_THREAD_S0, task_struct, thread.s[0]); + OFFSET(TASK_THREAD_S1, task_struct, thread.s[1]); + OFFSET(TASK_THREAD_S2, task_struct, thread.s[2]); + OFFSET(TASK_THREAD_S3, task_struct, thread.s[3]); + OFFSET(TASK_THREAD_S4, task_struct, thread.s[4]); + OFFSET(TASK_THREAD_S5, task_struct, thread.s[5]); + OFFSET(TASK_THREAD_S6, task_struct, thread.s[6]); + BLANK(); + DEFINE(ASM_THREAD_SIZE, THREAD_SIZE); + BLANK(); + DEFINE(RT_SIGFRAME_SIZE, sizeof(struct rt_sigframe)); + OFFSET(RT_SIGFRAME_MCTX, rt_sigframe, uc.uc_mcontext); +} diff --git a/arch/sw_64/kernel/audit.c b/arch/sw_64/kernel/audit.c new file mode 100644 index 000000000000..dcf58deee3e2 --- /dev/null +++ b/arch/sw_64/kernel/audit.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +#include + +static unsigned int dir_class[] = { +#include +~0U +}; + +static unsigned int read_class[] = { +#include +~0U +}; + +static unsigned int write_class[] = { +#include +~0U +}; + +static unsigned int chattr_class[] = { +#include +~0U +}; + +static unsigned int signal_class[] = { +#include +~0U +}; + +int audit_classify_arch(int arch) +{ + return 0; +} + +int audit_classify_syscall(int abi, unsigned int syscall) +{ + switch (syscall) { + case __NR_open: + return 2; + case __NR_openat: + return 3; + case __NR_execve: + return 5; + default: + return 0; + } +} + +static int __init audit_classes_init(void) +{ + audit_register_class(AUDIT_CLASS_WRITE, write_class); + audit_register_class(AUDIT_CLASS_READ, read_class); + audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class); + audit_register_class(AUDIT_CLASS_CHATTR, chattr_class); + audit_register_class(AUDIT_CLASS_SIGNAL, signal_class); + return 0; +} + +device_initcall(audit_classes_init); diff --git a/arch/sw_64/kernel/early_printk.c b/arch/sw_64/kernel/early_printk.c new file mode 100644 index 000000000000..66af1165e89b --- /dev/null +++ b/arch/sw_64/kernel/early_printk.c @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +#include + +static unsigned long early_serial_base; /* ttyS0 */ + +#define XMTRDY 0x20 + +#define DLAB 0x80 + +#define TXR 0 /* Transmit register (WRITE) */ +#define RXR 0 /* Receive register (READ) */ +#define IER 1 /* Interrupt Enable */ +#define IIR 2 /* Interrupt ID */ +#define FCR 2 /* FIFO control */ +#define LCR 3 /* Line control */ +#define MCR 4 /* Modem control */ +#define LSR 5 /* Line Status */ +#define MSR 6 /* Modem Status */ +#define DLL 0 /* Divisor Latch Low */ +#define DLH 1 /* Divisor latch High */ + +static void mem32_serial_out(unsigned long addr, int offset, int value) +{ + void __iomem *vaddr = (void __iomem *)addr; + + offset = offset << 9; + + writel(value, vaddr + offset); +} + +static unsigned int mem32_serial_in(unsigned long addr, int offset) +{ + void __iomem *vaddr = (void __iomem *)addr; + + offset = offset << 9; + + return readl(vaddr + offset); +} + +static unsigned int (*serial_in)(unsigned long addr, int offset) = mem32_serial_in; +static void (*serial_out)(unsigned long addr, int offset, int value) = mem32_serial_out; + +static int early_serial_putc(unsigned char ch) +{ + unsigned int timeout = 0xffff; + + while ((serial_in(early_serial_base, LSR) & XMTRDY) == 0 && --timeout) + cpu_relax(); + serial_out(early_serial_base, TXR, ch); + + return timeout ? 0 : -1; +} + +static void early_serial_write(struct console *con, const char *s, unsigned int n) +{ + while (*s && n-- > 0) { + if (*s == '\n') + early_serial_putc('\r'); + early_serial_putc(*s); + s++; + } +} + +static unsigned int uart_get_refclk(void) +{ + return 24000000UL; +} + +static unsigned int uart_calculate_baudrate_divisor(unsigned long baudrate) +{ + unsigned int refclk = uart_get_refclk(); + + return (1 + (2 * refclk) / (baudrate * 16)) / 2; +} + +static __init void early_serial_hw_init(unsigned long baud) +{ + unsigned char c; + unsigned long divisor = uart_calculate_baudrate_divisor(baud); + + serial_out(early_serial_base, LCR, 0x3); /* 8n1 */ + serial_out(early_serial_base, IER, 0); /* no interrupt */ + serial_out(early_serial_base, FCR, 0); /* no fifo */ + serial_out(early_serial_base, MCR, 0x3); /* DTR + RTS */ + + c = serial_in(early_serial_base, LCR); + serial_out(early_serial_base, LCR, c | DLAB); + serial_out(early_serial_base, DLL, divisor & 0xff); + serial_out(early_serial_base, DLH, (divisor >> 8) & 0xff); + serial_out(early_serial_base, LCR, c & ~DLAB); +} + +#define DEFAULT_BAUD 115200 + +static __init void early_serial_init(char *s) +{ + unsigned long baud = DEFAULT_BAUD; + int err; + + if (*s == ',') + ++s; + + if (*s) { + unsigned int port; + static const long bases[] __initconst = { 0xfff0803300000000ULL, + 0xfff0903300000000ULL }; + + if (!strncmp(s, "ttyS", 4)) + s += 4; + err = kstrtouint(s, 10, &port); + if (err || port > 1) + port = 0; + early_serial_base = bases[port]; + s += strcspn(s, ","); + if (*s == ',') + s++; + } + + if (*s) { + err = kstrtoul(s, 0, &baud); + if (err || baud == 0) + baud = DEFAULT_BAUD; + } + + /* These will always be IO based ports */ + serial_in = mem32_serial_in; + serial_out = mem32_serial_out; + + /* Set up the HW */ + early_serial_hw_init(baud); +} + +static struct console early_serial_console = { + .name = "early", + .write = early_serial_write, + .flags = CON_PRINTBUFFER, + .index = -1, +}; + +static void early_console_register(struct console *con, int keep_early) +{ + if (con->index != -1) { + pr_crit("ERROR: earlyprintk= %s already used\n", + con->name); + return; + } + early_console = con; + + if (keep_early) + early_console->flags &= ~CON_BOOT; + else + early_console->flags |= CON_BOOT; + + register_console(early_console); +} + +static int __init setup_early_printk(char *buf) +{ + int keep; + + if (!buf) + return 0; + + if (early_console) + return 0; + + keep = (strstr(buf, "keep") != NULL); + + if (!strncmp(buf, "serial", 6)) { + buf += 6; + early_serial_init(buf); + early_console_register(&early_serial_console, keep); + if (!strncmp(buf, ",ttyS", 5)) + buf += 5; + } + + return 0; +} + +early_param("earlyprintk", setup_early_printk); diff --git a/arch/sw_64/kernel/entry.S b/arch/sw_64/kernel/entry.S new file mode 100644 index 000000000000..59c2ff4eb915 --- /dev/null +++ b/arch/sw_64/kernel/entry.S @@ -0,0 +1,306 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Kernel entry-points. + */ + +#include +#include +#include +#include +#include +#include + + .text + .set noat +/* + * This defines the normal kernel pt-regs layout. + * + * regs 9-15 preserved by C code, saving to pt_regs will make + * them easier to be accessed in an unified way. + * regs 16-18 saved by HMcode + * regs 29-30 saved and set up by HMcode + */ + + .macro SAVE_ALL + ldi $sp, -PT_REGS_HM_PS($sp) + stl $0, PT_REGS_R0($sp) + stl $1, PT_REGS_R1($sp) + stl $2, PT_REGS_R2($sp) + stl $3, PT_REGS_R3($sp) + stl $4, PT_REGS_R4($sp) + stl $28, PT_REGS_R28($sp) + stl $5, PT_REGS_R5($sp) + stl $6, PT_REGS_R6($sp) + stl $7, PT_REGS_R7($sp) + stl $8, PT_REGS_R8($sp) + stl $9, PT_REGS_R9($sp) + stl $10, PT_REGS_R10($sp) + stl $11, PT_REGS_R11($sp) + stl $12, PT_REGS_R12($sp) + stl $13, PT_REGS_R13($sp) + stl $14, PT_REGS_R14($sp) + stl $15, PT_REGS_R15($sp) + stl $19, PT_REGS_R19($sp) + stl $20, PT_REGS_R20($sp) + stl $21, PT_REGS_R21($sp) + stl $22, PT_REGS_R22($sp) + stl $23, PT_REGS_R23($sp) + stl $24, PT_REGS_R24($sp) + stl $25, PT_REGS_R25($sp) + stl $26, PT_REGS_R26($sp) + stl $27, PT_REGS_R27($sp) + ldl $1, PT_REGS_HM_R16($sp) + ldl $2, PT_REGS_HM_R17($sp) + ldl $3, PT_REGS_HM_R18($sp) + ldl $4, PT_REGS_HM_GP($sp) + ldl $5, PT_REGS_HM_PC($sp) + ldl $6, PT_REGS_HM_PS($sp) + stl $1, PT_REGS_R16($sp) + stl $2, PT_REGS_R17($sp) + stl $3, PT_REGS_R18($sp) + stl $4, PT_REGS_GP($sp) + stl $5, PT_REGS_PC($sp) + stl $6, PT_REGS_PS($sp) + and $6, 0x8, $7 + beq $7, 1f + sys_call HMC_rdusp + br 2f +1: ldi $0, PT_REGS_SIZE($sp) +2: stl $0, PT_REGS_SP($sp) + ldi $1, NO_SYSCALL + stl $1, PT_REGS_ORIG_R0($sp) + sys_call HMC_rdktp + .endm + + .macro RESTORE_ALL + ldl $16, PT_REGS_SP($sp) + /* skip wrusp if returning to kernel */ + blt $16, 1f + sys_call HMC_wrusp +1: ldl $1, PT_REGS_R16($sp) + ldl $2, PT_REGS_R17($sp) + ldl $3, PT_REGS_R18($sp) + ldl $4, PT_REGS_GP($sp) + ldl $5, PT_REGS_PC($sp) + ldl $6, PT_REGS_PS($sp) + stl $1, PT_REGS_HM_R16($sp) + stl $2, PT_REGS_HM_R17($sp) + stl $3, PT_REGS_HM_R18($sp) + stl $4, PT_REGS_HM_GP($sp) + stl $5, PT_REGS_HM_PC($sp) + stl $6, PT_REGS_HM_PS($sp) + ldl $0, PT_REGS_R0($sp) + ldl $1, PT_REGS_R1($sp) + ldl $2, PT_REGS_R2($sp) + ldl $3, PT_REGS_R3($sp) + ldl $4, PT_REGS_R4($sp) + ldl $5, PT_REGS_R5($sp) + ldl $6, PT_REGS_R6($sp) + ldl $7, PT_REGS_R7($sp) + ldl $8, PT_REGS_R8($sp) + ldl $9, PT_REGS_R9($sp) + ldl $10, PT_REGS_R10($sp) + ldl $11, PT_REGS_R11($sp) + ldl $12, PT_REGS_R12($sp) + ldl $13, PT_REGS_R13($sp) + ldl $14, PT_REGS_R14($sp) + ldl $15, PT_REGS_R15($sp) + ldl $19, PT_REGS_R19($sp) + ldl $20, PT_REGS_R20($sp) + ldl $21, PT_REGS_R21($sp) + ldl $22, PT_REGS_R22($sp) + ldl $23, PT_REGS_R23($sp) + ldl $24, PT_REGS_R24($sp) + ldl $25, PT_REGS_R25($sp) + ldl $26, PT_REGS_R26($sp) + ldl $27, PT_REGS_R27($sp) + ldl $28, PT_REGS_R28($sp) + ldi $sp, PT_REGS_HM_PS($sp) + .endm + +/* + * Non-syscall kernel entry points. + */ + + .align 4 + .globl entInt + .ent entInt +entInt: + SAVE_ALL + mov $sp, $19 + call $26, do_entInt + br ret_from_sys_call + .end entInt + + .align 4 + .globl entArith + .ent entArith +entArith: + SAVE_ALL + mov $sp, $18 + call $26, do_entArith + br ret_from_sys_call + .end entArith + + .align 4 + .globl entMM + .ent entMM +entMM: + SAVE_ALL + mov $sp, $19 + call $26, do_page_fault + br ret_from_sys_call + .end entMM + + .align 4 + .globl entIF + .ent entIF +entIF: + SAVE_ALL + mov $sp, $18 + call $26, do_entIF + br ret_from_sys_call + .end entIF + +/* + * Handle unalignment exception. + * We don't handle the "gp" register correctly, but if we fault on a + * gp-register unaligned load/store, something is _very_ wrong in the + * kernel anyway. + */ + .align 4 + .globl entUna + .ent entUna +entUna: + SAVE_ALL + mov $sp, $19 + ldl $0, PT_REGS_PS($sp) + and $0, 8, $0 /* user mode ? */ + beq $0, 1f + call $26, do_entUnaUser /* return to ret_from_syscall */ + br ret_from_sys_call +1: ldl $9, PT_REGS_GP($sp) + call $26, do_entUna + stl $9, PT_REGS_GP($sp) + RESTORE_ALL + sys_call HMC_rti + .end entUna + +/* + * The system call entry point is special. Most importantly, it looks + * like a function call to userspace as far as clobbered registers. We + * do preserve the argument registers (for syscall restarts) and $26 + * (for leaf syscall functions). + * + * So much for theory. We don't take advantage of this yet. + * + * Note that a0-a2 are not saved by HMcode as with the other entry points. + */ + + .align 4 + .globl entSys + .ent entSys +entSys: + SAVE_ALL + stl $16, PT_REGS_R16($sp) + stl $17, PT_REGS_R17($sp) + stl $18, PT_REGS_R18($sp) + mov $sp, $16 + call $26, do_entSys + br ret_from_sys_call + .end entSys + + .align 4 + .globl ret_from_sys_call + .ent ret_from_sys_call +ret_from_sys_call: +#ifdef CONFIG_SUBARCH_C3B + fillcs 0($sp) /* prefetch */ + fillcs 128($sp) /* prefetch */ +#endif + br $27, 1f +1: ldgp $29, 0($27) + /* Make sure need_resched and sigpending don't change between + sampling and the rti. */ + ldi $16, 7 + sys_call HMC_swpipl + ldl $0, PT_REGS_PS($sp) + and $0, 8, $0 + beq $0, restore_all +ret_to_user: + ldw $17, TI_FLAGS($8) + and $17, _TIF_WORK_MASK, $2 + beq $2, restore_all + mov $sp, $16 + call $26, do_notify_resume +restore_all: + RESTORE_ALL + sys_call HMC_rti + .end ret_from_sys_call + +/* + * Integer register context switch + * The callee-saved registers must be saved and restored. + * + * a0: previous task_struct (must be preserved across the switch) + * a1: next task_struct + * + * The value of a0 must be preserved by this function, as that's how + * arguments are passed to schedule_tail. + */ + .align 4 + .globl __switch_to + .ent __switch_to +__switch_to: + .prologue 0 + /* Save context into prev->thread */ + stl $26, TASK_THREAD_RA($16) + stl $30, TASK_THREAD_SP($16) + stl $9, TASK_THREAD_S0($16) + stl $10, TASK_THREAD_S1($16) + stl $11, TASK_THREAD_S2($16) + stl $12, TASK_THREAD_S3($16) + stl $13, TASK_THREAD_S4($16) + stl $14, TASK_THREAD_S5($16) + stl $15, TASK_THREAD_S6($16) + /* Restore context from next->thread */ + ldl $26, TASK_THREAD_RA($17) + ldl $30, TASK_THREAD_SP($17) + ldl $9, TASK_THREAD_S0($17) + ldl $10, TASK_THREAD_S1($17) + ldl $11, TASK_THREAD_S2($17) + ldl $12, TASK_THREAD_S3($17) + ldl $13, TASK_THREAD_S4($17) + ldl $14, TASK_THREAD_S5($17) + ldl $15, TASK_THREAD_S6($17) + mov $17, $8 + sys_call HMC_wrktp + mov $16, $0 + ret + .end __switch_to + +/* + * New processes begin life here. + */ + + .globl ret_from_fork + .align 4 + .ent ret_from_fork +ret_from_fork: + call $26, schedule_tail + br ret_from_sys_call + .end ret_from_fork + +/* + * ... and new kernel threads - here + */ + .align 4 + .globl ret_from_kernel_thread + .ent ret_from_kernel_thread +ret_from_kernel_thread: + call $26, schedule_tail + mov $9, $27 + mov $10, $16 + call $26, ($9) + br ret_to_user + .end ret_from_kernel_thread -- Gitee From 1da491bbc8a2b14bcb773cad4a7684c86a6ee753 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:34 +0800 Subject: [PATCH 0293/2138] anolis: sw64: add some other routines ANBZ: #4688 Add some uncommon routines for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/kernel/dup_print.c | 88 +++++++++++++++++++++++++++++++++++ arch/sw_64/kernel/proc_misc.c | 25 ++++++++++ arch/sw_64/kernel/proto.h | 18 +++++++ arch/sw_64/kernel/segvdbg.c | 26 +++++++++++ arch/sw_64/kernel/tc.c | 36 ++++++++++++++ arch/sw_64/kernel/termios.c | 62 ++++++++++++++++++++++++ 6 files changed, 255 insertions(+) create mode 100644 arch/sw_64/kernel/dup_print.c create mode 100644 arch/sw_64/kernel/proc_misc.c create mode 100644 arch/sw_64/kernel/proto.h create mode 100644 arch/sw_64/kernel/segvdbg.c create mode 100644 arch/sw_64/kernel/tc.c create mode 100644 arch/sw_64/kernel/termios.c diff --git a/arch/sw_64/kernel/dup_print.c b/arch/sw_64/kernel/dup_print.c new file mode 100644 index 000000000000..439ac75feb01 --- /dev/null +++ b/arch/sw_64/kernel/dup_print.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +#include +#include + +#ifdef CONFIG_SW64_RRK + +#define KERNEL_PRINTK_BUFF_BASE (0x700000UL + __START_KERNEL_map) + +static DEFINE_SPINLOCK(printk_lock); + +unsigned long sw64_printk_offset; +#define PRINTK_SIZE 0x100000UL + +int sw64_printk(const char *fmt, va_list args) +{ + char *sw64_printk_buf; + int printed_len = 0; + unsigned long flags; + + spin_lock_irqsave(&printk_lock, flags); + + sw64_printk_buf = (char *)(KERNEL_PRINTK_BUFF_BASE + sw64_printk_offset); + + if (sw64_printk_offset >= (PRINTK_SIZE-1024)) { //printk wrapped + sw64_printk_offset = 0; + sw64_printk_buf = (char *)(KERNEL_PRINTK_BUFF_BASE + sw64_printk_offset); + memset(sw64_printk_buf, 0, PRINTK_SIZE); + printed_len += vscnprintf(sw64_printk_buf, 1024, fmt, args); + } else { + printed_len += vscnprintf(sw64_printk_buf, 1024, fmt, args); + if (is_in_emul()) { + void __iomem *addr = __va(QEMU_PRINTF_BUFF_BASE); + u64 data = ((u64)sw64_printk_buf & 0xffffffffUL) + | ((u64)printed_len << 32); + *(u64 *)addr = data; + } + } + sw64_printk_offset += printed_len; + spin_unlock_irqrestore(&printk_lock, flags); + return printed_len; +} +#endif + +#ifdef CONFIG_SW64_RRU +#include + +static DEFINE_SPINLOCK(printf_lock); +#define USER_PRINT_BUFF_BASE (0x600000UL + __START_KERNEL_map) +#define USER_PRINT_BUFF_LEN 0x100000UL +#define USER_MESSAGE_MAX_LEN 0x100000UL +unsigned long sw64_printf_offset; +int sw64_user_printf(const char __user *buf, int len) +{ + static char *user_printf_buf; + unsigned long flags; + + if (current->pid <= 0) + return 0; + + /* + * do not write large (fake) message which may not be from + * STDOUT/STDERR any more as file descriptor could be duplicated + * in a pipe. + */ + if (len > USER_MESSAGE_MAX_LEN) + return 0; + + spin_lock_irqsave(&printf_lock, flags); + user_printf_buf = (char *)(USER_PRINT_BUFF_BASE + sw64_printf_offset); + + if (sw64_printf_offset == 0) + memset(user_printf_buf, 0, USER_PRINT_BUFF_LEN); + + if ((sw64_printf_offset + len) > USER_PRINT_BUFF_LEN) { + sw64_printf_offset = 0; + user_printf_buf = (char *)(USER_PRINT_BUFF_BASE + sw64_printf_offset); + memset(user_printf_buf, 0, USER_PRINT_BUFF_LEN); + } + copy_from_user(user_printf_buf, buf, len); + sw64_printf_offset += len; + spin_unlock_irqrestore(&printf_lock, flags); + return 0; +} +#endif diff --git a/arch/sw_64/kernel/proc_misc.c b/arch/sw_64/kernel/proc_misc.c new file mode 100644 index 000000000000..ca107ec1e05e --- /dev/null +++ b/arch/sw_64/kernel/proc_misc.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include + +extern const struct seq_operations cpu_active_mask_op; +static int cpu_active_mask_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &cpu_active_mask_op); +} + +static const struct file_operations proc_cpu_active_mask_operations = { + .open = cpu_active_mask_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int __init proc_cpu_active_mask_init(void) +{ + proc_create("cpu_active_mask", 0, NULL, &proc_cpu_active_mask_operations); + return 0; +} +fs_initcall(proc_cpu_active_mask_init); diff --git a/arch/sw_64/kernel/proto.h b/arch/sw_64/kernel/proto.h new file mode 100644 index 000000000000..d7222334d1b9 --- /dev/null +++ b/arch/sw_64/kernel/proto.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SW64_KERNEL_PROTO_H +#define _SW64_KERNEL_PROTO_H + +#include +#include +#include +#include + +/* ptrace.c */ +extern int ptrace_set_bpt(struct task_struct *child); +extern int ptrace_cancel_bpt(struct task_struct *child); + +/* traps.c */ +extern void show_regs(struct pt_regs *regs); +extern void die(char *str, struct pt_regs *regs, long err); + +#endif /* _SW64_KERNEL_PROTO_H */ diff --git a/arch/sw_64/kernel/segvdbg.c b/arch/sw_64/kernel/segvdbg.c new file mode 100644 index 000000000000..148d639a08db --- /dev/null +++ b/arch/sw_64/kernel/segvdbg.c @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Zhi Tongze + * Author: Zhi Tongze + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include + +#include + +extern bool segv_debug_enabled; + +static int __init segv_debug_init(void) +{ + if (!sw64_debugfs_dir) + return -ENODEV; + + debugfs_create_bool("segv_debug", 0644, + sw64_debugfs_dir, &segv_debug_enabled); + return 0; +} +late_initcall(segv_debug_init); diff --git a/arch/sw_64/kernel/tc.c b/arch/sw_64/kernel/tc.c new file mode 100644 index 000000000000..f2de5ac3d9dc --- /dev/null +++ b/arch/sw_64/kernel/tc.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019, serveros, linyue + */ + + +#include +#include + +/* + * Entry/exit counters that make sure that both CPUs + * run the measurement code at once: + */ +unsigned long time_sync; + +DEFINE_PER_CPU(u64, tc_offset); + +void tc_sync_clear(void) +{ + time_sync = 0; +} + +void tc_sync_ready(void *ignored) +{ + /* make sure we can see time_sync been set to 0 */ + smp_mb(); + while (!time_sync) + cpu_relax(); + + __this_cpu_write(tc_offset, time_sync - rdtc()); +} + +void tc_sync_set(void) +{ + time_sync = rdtc() + __this_cpu_read(tc_offset); +} diff --git a/arch/sw_64/kernel/termios.c b/arch/sw_64/kernel/termios.c new file mode 100644 index 000000000000..5c76a513c896 --- /dev/null +++ b/arch/sw_64/kernel/termios.c @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +/* + * Translate a "termio" structure into a "termios". Ugh. + */ + +int user_termio_to_kernel_termios(struct ktermios *a_termios, struct termio __user *u_termio) +{ + struct ktermios *k_termios = (a_termios); + struct termio k_termio; + int canon, ret; + + ret = copy_from_user(&k_termio, u_termio, sizeof(k_termio)); + if (!ret) { + /* Overwrite only the low bits. */ + *(unsigned short *)&k_termios->c_iflag = k_termio.c_iflag; + *(unsigned short *)&k_termios->c_oflag = k_termio.c_oflag; + *(unsigned short *)&k_termios->c_cflag = k_termio.c_cflag; + *(unsigned short *)&k_termios->c_lflag = k_termio.c_lflag; + canon = k_termio.c_lflag & ICANON; + + k_termios->c_cc[VINTR] = k_termio.c_cc[_VINTR]; + k_termios->c_cc[VQUIT] = k_termio.c_cc[_VQUIT]; + k_termios->c_cc[VERASE] = k_termio.c_cc[_VERASE]; + k_termios->c_cc[VKILL] = k_termio.c_cc[_VKILL]; + k_termios->c_cc[VEOL2] = k_termio.c_cc[_VEOL2]; + k_termios->c_cc[VSWTC] = k_termio.c_cc[_VSWTC]; + k_termios->c_cc[canon ? VEOF : VMIN] = k_termio.c_cc[_VEOF]; + k_termios->c_cc[canon ? VEOL : VTIME] = k_termio.c_cc[_VEOL]; + } + return ret; +} + +/* + * Translate a "termios" structure into a "termio". Ugh. + * + * Note the "fun" _VMIN overloading. + */ +int kernel_termios_to_user_termio(struct termio __user *u_termio, struct ktermios *a_termios) +{ + struct ktermios *k_termios = (a_termios); + struct termio k_termio; + int canon; + + k_termio.c_iflag = k_termios->c_iflag; + k_termio.c_oflag = k_termios->c_oflag; + k_termio.c_cflag = k_termios->c_cflag; + canon = (k_termio.c_lflag = k_termios->c_lflag) & ICANON; + + k_termio.c_line = k_termios->c_line; + k_termio.c_cc[_VINTR] = k_termios->c_cc[VINTR]; + k_termio.c_cc[_VQUIT] = k_termios->c_cc[VQUIT]; + k_termio.c_cc[_VERASE] = k_termios->c_cc[VERASE]; + k_termio.c_cc[_VKILL] = k_termios->c_cc[VKILL]; + k_termio.c_cc[_VEOF] = k_termios->c_cc[canon ? VEOF : VMIN]; + k_termio.c_cc[_VEOL] = k_termios->c_cc[canon ? VEOL : VTIME]; + k_termio.c_cc[_VEOL2] = k_termios->c_cc[VEOL2]; + k_termio.c_cc[_VSWTC] = k_termios->c_cc[VSWTC]; + + return copy_to_user(u_termio, &k_termio, sizeof(k_termio)); +} -- Gitee From f1337920ae16e0f1505255e7c766c0ef1e1ff341 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:25 +0800 Subject: [PATCH 0294/2138] anolis: sw64: add some library functions ANBZ: #4688 Add some library functions for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/checksum.h | 126 ++++ arch/sw_64/include/asm/delay.h | 11 + arch/sw_64/include/asm/string.h | 54 ++ arch/sw_64/include/asm/xor.h | 857 +++++++++++++++++++++ arch/sw_64/include/uapi/asm/swab.h | 43 ++ arch/sw_64/lib/Kconfig | 47 ++ arch/sw_64/lib/Makefile | 53 ++ arch/sw_64/lib/checksum.c | 147 ++++ arch/sw_64/lib/clear_page.S | 46 ++ arch/sw_64/lib/clear_user.S | 102 +++ arch/sw_64/lib/copy_page.S | 71 ++ arch/sw_64/lib/copy_user.S | 106 +++ arch/sw_64/lib/csum_ipv6_magic.S | 113 +++ arch/sw_64/lib/csum_partial_copy.c | 154 ++++ arch/sw_64/lib/deep-clear_page.S | 53 ++ arch/sw_64/lib/deep-clear_user.S | 52 ++ arch/sw_64/lib/deep-copy_page.S | 60 ++ arch/sw_64/lib/deep-copy_template.S | 301 ++++++++ arch/sw_64/lib/deep-copy_template_c4.S | 108 +++ arch/sw_64/lib/deep-copy_user.S | 53 ++ arch/sw_64/lib/deep-memcpy.S | 24 + arch/sw_64/lib/deep-memset.S | 97 +++ arch/sw_64/lib/deep-set_template.S | 133 ++++ arch/sw_64/lib/deep-set_template_c4.S | 93 +++ arch/sw_64/lib/divide.S | 190 +++++ arch/sw_64/lib/fls.c | 33 + arch/sw_64/lib/fpreg.c | 992 +++++++++++++++++++++++++ arch/sw_64/lib/iomap.c | 477 ++++++++++++ arch/sw_64/lib/iomap_copy.c | 52 ++ arch/sw_64/lib/memcpy.S | 201 +++++ arch/sw_64/lib/memmove.S | 148 ++++ arch/sw_64/lib/memset.S | 153 ++++ arch/sw_64/lib/strcpy.S | 131 ++++ arch/sw_64/lib/strncpy.S | 156 ++++ arch/sw_64/lib/uaccess_flushcache.c | 42 ++ arch/sw_64/lib/udelay.c | 59 ++ 36 files changed, 5538 insertions(+) create mode 100644 arch/sw_64/include/asm/checksum.h create mode 100644 arch/sw_64/include/asm/delay.h create mode 100644 arch/sw_64/include/asm/string.h create mode 100644 arch/sw_64/include/asm/xor.h create mode 100644 arch/sw_64/include/uapi/asm/swab.h create mode 100644 arch/sw_64/lib/Kconfig create mode 100644 arch/sw_64/lib/Makefile create mode 100644 arch/sw_64/lib/checksum.c create mode 100644 arch/sw_64/lib/clear_page.S create mode 100644 arch/sw_64/lib/clear_user.S create mode 100644 arch/sw_64/lib/copy_page.S create mode 100644 arch/sw_64/lib/copy_user.S create mode 100644 arch/sw_64/lib/csum_ipv6_magic.S create mode 100644 arch/sw_64/lib/csum_partial_copy.c create mode 100644 arch/sw_64/lib/deep-clear_page.S create mode 100644 arch/sw_64/lib/deep-clear_user.S create mode 100644 arch/sw_64/lib/deep-copy_page.S create mode 100644 arch/sw_64/lib/deep-copy_template.S create mode 100644 arch/sw_64/lib/deep-copy_template_c4.S create mode 100644 arch/sw_64/lib/deep-copy_user.S create mode 100644 arch/sw_64/lib/deep-memcpy.S create mode 100644 arch/sw_64/lib/deep-memset.S create mode 100644 arch/sw_64/lib/deep-set_template.S create mode 100644 arch/sw_64/lib/deep-set_template_c4.S create mode 100644 arch/sw_64/lib/divide.S create mode 100644 arch/sw_64/lib/fls.c create mode 100644 arch/sw_64/lib/fpreg.c create mode 100644 arch/sw_64/lib/iomap.c create mode 100644 arch/sw_64/lib/iomap_copy.c create mode 100644 arch/sw_64/lib/memcpy.S create mode 100644 arch/sw_64/lib/memmove.S create mode 100644 arch/sw_64/lib/memset.S create mode 100644 arch/sw_64/lib/strcpy.S create mode 100644 arch/sw_64/lib/strncpy.S create mode 100644 arch/sw_64/lib/uaccess_flushcache.c create mode 100644 arch/sw_64/lib/udelay.c diff --git a/arch/sw_64/include/asm/checksum.h b/arch/sw_64/include/asm/checksum.h new file mode 100644 index 000000000000..7f3768290402 --- /dev/null +++ b/arch/sw_64/include/asm/checksum.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_CHECKSUM_H +#define _ASM_SW64_CHECKSUM_H + +#include + +#define extll(x, y, z) \ + ({__asm__ __volatile__("extll %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +#define exthl(x, y, z) \ + ({__asm__ __volatile__("exthl %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +#define maskll(x, y, z) \ + ({__asm__ __volatile__("maskll %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +#define maskhl(x, y, z) \ + ({__asm__ __volatile__("maskhl %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +#define insll(x, y, z) \ + ({__asm__ __volatile__("insll %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +#define inshl(x, y, z) \ + ({__asm__ __volatile__("inshl %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +/* + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksum on 4 octet boundaries. + */ +extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); + +/* + * computes the checksum of the TCP/UDP pseudo-header + * returns a 16-bit checksum, already complemented + */ +__sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, __wsum sum); + +__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, __wsum sum); + +/* + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +extern __wsum csum_partial(const void *buff, int len, __wsum sum); + +/* + * the same as csum_partial, but copies from src while it + * checksums + * + * here even more important to align src and dst on a 32-bit (or even + * better 64-bit) boundary + */ +#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER +#define _HAVE_ARCH_CSUM_AND_COPY +__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len); + +__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len); + +/* + * this routine is used for miscellaneous IP-like checksums, mainly + * in icmp.c + */ + +extern __sum16 ip_compute_csum(const void *buff, int len); + +/* + * Fold a partial checksum without adding pseudo headers + */ + +static inline __sum16 csum_fold(__wsum csum) +{ + u32 sum = (__force u32)csum; + + sum = (sum & 0xffff) + (sum >> 16); + sum = (sum & 0xffff) + (sum >> 16); + return (__force __sum16)~sum; +} + +#define _HAVE_ARCH_IPV6_CSUM +extern __sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, __u32 len, + __u8 proto, __wsum sum); + +static inline unsigned short from64to16(unsigned long x) +{ + /* + * Using extract instructions is a bit more efficient + * than the original shift/bitmask version. + */ + + union { + unsigned long ul; + unsigned int ui[2]; + unsigned short us[4]; + } in_v, tmp_v, out_v; + + in_v.ul = x; + tmp_v.ul = (unsigned long)in_v.ui[0] + (unsigned long)in_v.ui[1]; + + /* + * Since the bits of tmp_v.sh[3] are going to always be zero, + * we don't have to bother to add that in. + */ + out_v.ul = (unsigned long)tmp_v.us[0] + (unsigned long)tmp_v.us[1] + + (unsigned long)tmp_v.us[2]; + + /* Similarly, out_v.us[2] is always zero for the final add. */ + return out_v.us[0] + out_v.us[1]; +} + +#endif /* _ASM_SW64_CHECKSUM_H */ diff --git a/arch/sw_64/include/asm/delay.h b/arch/sw_64/include/asm/delay.h new file mode 100644 index 000000000000..f4080753e954 --- /dev/null +++ b/arch/sw_64/include/asm/delay.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_DELAY_H +#define _ASM_SW64_DELAY_H + +extern void __delay(unsigned long loops); +extern void udelay(unsigned long usecs); + +extern void ndelay(unsigned long nsecs); +#define ndelay ndelay + +#endif /* _ASM_SW64_DELAY_H */ diff --git a/arch/sw_64/include/asm/string.h b/arch/sw_64/include/asm/string.h new file mode 100644 index 000000000000..87d93f4cd4d5 --- /dev/null +++ b/arch/sw_64/include/asm/string.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_STRING_H +#define _ASM_SW64_STRING_H + +#ifdef __KERNEL__ + +/* + * GCC of any recent vintage doesn't do stupid things with bcopy. + * EGCS 1.1 knows all about expanding memcpy inline, others don't. + * + * Similarly for a memset with data = 0. + */ + +#define __HAVE_ARCH_MEMCPY +extern void *memcpy(void *dest, const void *src, size_t n); +/* For backward compatibility with modules. Unused otherwise. */ +extern void *__memcpy(void *dest, const void *src, size_t n); + +#define __HAVE_ARCH_MEMMOVE +extern void *memmove(void *dest, const void *src, size_t n); + +#define __HAVE_ARCH_MEMSET +extern void *__constant_c_memset(void *s, unsigned long c, size_t n); +extern void *___memset(void *s, int c, size_t n); +extern void *__memset(void *s, int c, size_t n); +extern void *memset(void *s, int c, size_t n); + +#define __HAVE_ARCH_STRCPY +extern char *strcpy(char *dest, const char *src); + +#define __HAVE_ARCH_STRNCPY +extern char *strncpy(char *dest, const char *src, size_t n); + +/* The following routine is like memset except that it writes 16-bit + * aligned values. The DEST and COUNT parameters must be even for + * correct operation. + */ + +#define __HAVE_ARCH_MEMSETW +extern void *__memsetw(void *dest, unsigned short c, size_t count); + +#define memsetw(s, c, n) \ +(__builtin_constant_p(c) \ + ? __constant_c_memset((s), 0x0001000100010001UL * (unsigned short)(c), (n)) \ + : __memsetw((s), (c), (n))) + +#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE +#define __HAVE_ARCH_MEMCPY_FLUSHCACHE +void memcpy_flushcache(void *dst, const void *src, size_t cnt); +#endif + +#endif /* __KERNEL__ */ + +#endif /* _ASM_SW64_STRING_H */ diff --git a/arch/sw_64/include/asm/xor.h b/arch/sw_64/include/asm/xor.h new file mode 100644 index 000000000000..0aff8804f503 --- /dev/null +++ b/arch/sw_64/include/asm/xor.h @@ -0,0 +1,857 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Optimized RAID-5 checksumming functions. + */ + +#ifndef _ASM_SW64_XOR_H +#define _ASM_SW64_XOR_H + +extern void xor_sw64_2(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2); +extern void xor_sw64_3(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3); +extern void xor_sw64_4(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3, + const unsigned long *__restrict p4); +extern void xor_sw64_5(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3, + const unsigned long *__restrict p4, + const unsigned long *__restrict p5); + +extern void xor_sw64_prefetch_2(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2); +extern void xor_sw64_prefetch_3(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3); +extern void xor_sw64_prefetch_4(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3, + const unsigned long *__restrict p4); +extern void xor_sw64_prefetch_5(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3, + const unsigned long *__restrict p4, + const unsigned long *__restrict p5); + +asm(" \n\ + .text \n\ + .align 3 \n\ + .ent xor_sw64_2 \n\ +xor_sw64_2: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + .align 4 \n\ +2: \n\ + ldl $0, 0($17) \n\ + ldl $1, 0($18) \n\ + ldl $2, 8($17) \n\ + ldl $3, 8($18) \n\ + \n\ + ldl $4, 16($17) \n\ + ldl $5, 16($18) \n\ + ldl $6, 24($17) \n\ + ldl $7, 24($18) \n\ + \n\ + ldl $19, 32($17) \n\ + ldl $20, 32($18) \n\ + ldl $21, 40($17) \n\ + ldl $22, 40($18) \n\ + \n\ + ldl $23, 48($17) \n\ + ldl $24, 48($18) \n\ + ldl $25, 56($17) \n\ + xor $0, $1, $0 # 7 cycles from $1 load \n\ + \n\ + ldl $27, 56($18) \n\ + xor $2, $3, $2 \n\ + stl $0, 0($17) \n\ + xor $4, $5, $4 \n\ + \n\ + stl $2, 8($17) \n\ + xor $6, $7, $6 \n\ + stl $4, 16($17) \n\ + xor $19, $20, $19 \n\ + \n\ + stl $6, 24($17) \n\ + xor $21, $22, $21 \n\ + stl $19, 32($17) \n\ + xor $23, $24, $23 \n\ + \n\ + stl $21, 40($17) \n\ + xor $25, $27, $25 \n\ + stl $23, 48($17) \n\ + subl $16, 1, $16 \n\ + \n\ + stl $25, 56($17) \n\ + addl $17, 64, $17 \n\ + addl $18, 64, $18 \n\ + bgt $16, 2b \n\ + \n\ + ret \n\ + .end xor_sw64_2 \n\ + \n\ + .align 3 \n\ + .ent xor_sw64_3 \n\ +xor_sw64_3: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + .align 4 \n\ +3: \n\ + ldl $0, 0($17) \n\ + ldl $1, 0($18) \n\ + ldl $2, 0($19) \n\ + ldl $3, 8($17) \n\ + \n\ + ldl $4, 8($18) \n\ + ldl $6, 16($17) \n\ + ldl $7, 16($18) \n\ + ldl $21, 24($17) \n\ + \n\ + ldl $22, 24($18) \n\ + ldl $24, 32($17) \n\ + ldl $25, 32($18) \n\ + ldl $5, 8($19) \n\ + \n\ + ldl $20, 16($19) \n\ + ldl $23, 24($19) \n\ + ldl $27, 32($19) \n\ + \n\ + xor $0, $1, $1 # 8 cycles from $0 load \n\ + xor $3, $4, $4 # 6 cycles from $4 load \n\ + xor $6, $7, $7 # 6 cycles from $7 load \n\ + xor $21, $22, $22 # 5 cycles from $22 load \n\ + \n\ + xor $1, $2, $2 # 9 cycles from $2 load \n\ + xor $24, $25, $25 # 5 cycles from $25 load \n\ + stl $2, 0($17) \n\ + xor $4, $5, $5 # 6 cycles from $5 load \n\ + \n\ + stl $5, 8($17) \n\ + xor $7, $20, $20 # 7 cycles from $20 load \n\ + stl $20, 16($17) \n\ + xor $22, $23, $23 # 7 cycles from $23 load \n\ + \n\ + stl $23, 24($17) \n\ + xor $25, $27, $27 # 7 cycles from $27 load \n\ + stl $27, 32($17) \n\ + \n\ + ldl $0, 40($17) \n\ + ldl $1, 40($18) \n\ + ldl $3, 48($17) \n\ + ldl $4, 48($18) \n\ + \n\ + ldl $6, 56($17) \n\ + ldl $7, 56($18) \n\ + ldl $2, 40($19) \n\ + ldl $5, 48($19) \n\ + \n\ + ldl $20, 56($19) \n\ + xor $0, $1, $1 # 4 cycles from $1 load \n\ + xor $3, $4, $4 # 5 cycles from $4 load \n\ + xor $6, $7, $7 # 5 cycles from $7 load \n\ + \n\ + xor $1, $2, $2 # 4 cycles from $2 load \n\ + xor $4, $5, $5 # 5 cycles from $5 load \n\ + stl $2, 40($17) \n\ + xor $7, $20, $20 # 4 cycles from $20 load \n\ + \n\ + stl $5, 48($17) \n\ + subl $16, 1, $16 \n\ + stl $20, 56($17) \n\ + addl $19, 64, $19 \n\ + \n\ + addl $18, 64, $18 \n\ + addl $17, 64, $17 \n\ + bgt $16, 3b \n\ + ret \n\ + .end xor_sw64_3 \n\ + \n\ + .align 3 \n\ + .ent xor_sw64_4 \n\ +xor_sw64_4: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + .align 4 \n\ +4: \n\ + ldl $0, 0($17) \n\ + ldl $1, 0($18) \n\ + ldl $2, 0($19) \n\ + ldl $3, 0($20) \n\ + \n\ + ldl $4, 8($17) \n\ + ldl $5, 8($18) \n\ + ldl $6, 8($19) \n\ + ldl $7, 8($20) \n\ + \n\ + ldl $21, 16($17) \n\ + ldl $22, 16($18) \n\ + ldl $23, 16($19) \n\ + ldl $24, 16($20) \n\ + \n\ + ldl $25, 24($17) \n\ + xor $0, $1, $1 # 6 cycles from $1 load \n\ + ldl $27, 24($18) \n\ + xor $2, $3, $3 # 6 cycles from $3 load \n\ + \n\ + ldl $0, 24($19) \n\ + xor $1, $3, $3 \n\ + ldl $1, 24($20) \n\ + xor $4, $5, $5 # 7 cycles from $5 load \n\ + \n\ + stl $3, 0($17) \n\ + xor $6, $7, $7 \n\ + xor $21, $22, $22 # 7 cycles from $22 load \n\ + xor $5, $7, $7 \n\ + \n\ + stl $7, 8($17) \n\ + xor $23, $24, $24 # 7 cycles from $24 load \n\ + ldl $2, 32($17) \n\ + xor $22, $24, $24 \n\ + \n\ + ldl $3, 32($18) \n\ + ldl $4, 32($19) \n\ + ldl $5, 32($20) \n\ + xor $25, $27, $27 # 8 cycles from $27 load \n\ + \n\ + ldl $6, 40($17) \n\ + ldl $7, 40($18) \n\ + ldl $21, 40($19) \n\ + ldl $22, 40($20) \n\ + \n\ + stl $24, 16($17) \n\ + xor $0, $1, $1 # 9 cycles from $1 load \n\ + xor $2, $3, $3 # 5 cycles from $3 load \n\ + xor $27, $1, $1 \n\ + \n\ + stl $1, 24($17) \n\ + xor $4, $5, $5 # 5 cycles from $5 load \n\ + ldl $23, 48($17) \n\ + ldl $24, 48($18) \n\ + \n\ + ldl $25, 48($19) \n\ + xor $3, $5, $5 \n\ + ldl $27, 48($20) \n\ + ldl $0, 56($17) \n\ + \n\ + ldl $1, 56($18) \n\ + ldl $2, 56($19) \n\ + xor $6, $7, $7 # 8 cycles from $6 load \n\ + ldl $3, 56($20) \n\ + \n\ + stl $5, 32($17) \n\ + xor $21, $22, $22 # 8 cycles from $22 load \n\ + xor $7, $22, $22 \n\ + xor $23, $24, $24 # 5 cycles from $24 load \n\ + \n\ + stl $22, 40($17) \n\ + xor $25, $27, $27 # 5 cycles from $27 load \n\ + xor $24, $27, $27 \n\ + xor $0, $1, $1 # 5 cycles from $1 load \n\ + \n\ + stl $27, 48($17) \n\ + xor $2, $3, $3 # 4 cycles from $3 load \n\ + xor $1, $3, $3 \n\ + subl $16, 1, $16 \n\ + \n\ + stl $3, 56($17) \n\ + addl $20, 64, $20 \n\ + addl $19, 64, $19 \n\ + addl $18, 64, $18 \n\ + \n\ + addl $17, 64, $17 \n\ + bgt $16, 4b \n\ + ret \n\ + .end xor_sw64_4 \n\ + \n\ + .align 3 \n\ + .ent xor_sw64_5 \n\ +xor_sw64_5: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + .align 4 \n\ +5: \n\ + ldl $0, 0($17) \n\ + ldl $1, 0($18) \n\ + ldl $2, 0($19) \n\ + ldl $3, 0($20) \n\ + \n\ + ldl $4, 0($21) \n\ + ldl $5, 8($17) \n\ + ldl $6, 8($18) \n\ + ldl $7, 8($19) \n\ + \n\ + ldl $22, 8($20) \n\ + ldl $23, 8($21) \n\ + ldl $24, 16($17) \n\ + ldl $25, 16($18) \n\ + \n\ + ldl $27, 16($19) \n\ + xor $0, $1, $1 # 6 cycles from $1 load \n\ + ldl $28, 16($20) \n\ + xor $2, $3, $3 # 6 cycles from $3 load \n\ + \n\ + ldl $0, 16($21) \n\ + xor $1, $3, $3 \n\ + ldl $1, 24($17) \n\ + xor $3, $4, $4 # 7 cycles from $4 load \n\ + \n\ + stl $4, 0($17) \n\ + xor $5, $6, $6 # 7 cycles from $6 load \n\ + xor $7, $22, $22 # 7 cycles from $22 load \n\ + xor $6, $23, $23 # 7 cycles from $23 load \n\ + \n\ + ldl $2, 24($18) \n\ + xor $22, $23, $23 \n\ + ldl $3, 24($19) \n\ + xor $24, $25, $25 # 8 cycles from $25 load \n\ + \n\ + stl $23, 8($17) \n\ + xor $25, $27, $27 # 8 cycles from $27 load \n\ + ldl $4, 24($20) \n\ + xor $28, $0, $0 # 7 cycles from $0 load \n\ + \n\ + ldl $5, 24($21) \n\ + xor $27, $0, $0 \n\ + ldl $6, 32($17) \n\ + ldl $7, 32($18) \n\ + \n\ + stl $0, 16($17) \n\ + xor $1, $2, $2 # 6 cycles from $2 load \n\ + ldl $22, 32($19) \n\ + xor $3, $4, $4 # 4 cycles from $4 load \n\ + \n\ + ldl $23, 32($20) \n\ + xor $2, $4, $4 \n\ + ldl $24, 32($21) \n\ + ldl $25, 40($17) \n\ + \n\ + ldl $27, 40($18) \n\ + ldl $28, 40($19) \n\ + ldl $0, 40($20) \n\ + xor $4, $5, $5 # 7 cycles from $5 load \n\ + \n\ + stl $5, 24($17) \n\ + xor $6, $7, $7 # 7 cycles from $7 load \n\ + ldl $1, 40($21) \n\ + ldl $2, 48($17) \n\ + \n\ + ldl $3, 48($18) \n\ + xor $7, $22, $22 # 7 cycles from $22 load \n\ + ldl $4, 48($19) \n\ + xor $23, $24, $24 # 6 cycles from $24 load \n\ + \n\ + ldl $5, 48($20) \n\ + xor $22, $24, $24 \n\ + ldl $6, 48($21) \n\ + xor $25, $27, $27 # 7 cycles from $27 load \n\ + \n\ + stl $24, 32($17) \n\ + xor $27, $28, $28 # 8 cycles from $28 load \n\ + ldl $7, 56($17) \n\ + xor $0, $1, $1 # 6 cycles from $1 load \n\ + \n\ + ldl $22, 56($18) \n\ + ldl $23, 56($19) \n\ + ldl $24, 56($20) \n\ + ldl $25, 56($21) \n\ + \n\ + xor $28, $1, $1 \n\ + xor $2, $3, $3 # 9 cycles from $3 load \n\ + xor $3, $4, $4 # 9 cycles from $4 load \n\ + xor $5, $6, $6 # 8 cycles from $6 load \n\ + \n\ + stl $1, 40($17) \n\ + xor $4, $6, $6 \n\ + xor $7, $22, $22 # 7 cycles from $22 load \n\ + xor $23, $24, $24 # 6 cycles from $24 load \n\ + \n\ + stl $6, 48($17) \n\ + xor $22, $24, $24 \n\ + subl $16, 1, $16 \n\ + xor $24, $25, $25 # 8 cycles from $25 load \n\ + \n\ + stl $25, 56($17) \n\ + addl $21, 64, $21 \n\ + addl $20, 64, $20 \n\ + addl $19, 64, $19 \n\ + \n\ + addl $18, 64, $18 \n\ + addl $17, 64, $17 \n\ + bgt $16, 5b \n\ + ret \n\ + .end xor_sw64_5 \n\ + \n\ + .align 3 \n\ + .ent xor_sw64_prefetch_2 \n\ +xor_sw64_prefetch_2: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + \n\ + fillde 0($17) \n\ + fillde 0($18) \n\ + \n\ + fillde 64($17) \n\ + fillde 64($18) \n\ + \n\ + fillde 128($17) \n\ + fillde 128($18) \n\ + \n\ + fillde 192($17) \n\ + fillde 192($18) \n\ + .align 4 \n\ +2: \n\ + ldl $0, 0($17) \n\ + ldl $1, 0($18) \n\ + ldl $2, 8($17) \n\ + ldl $3, 8($18) \n\ + \n\ + ldl $4, 16($17) \n\ + ldl $5, 16($18) \n\ + ldl $6, 24($17) \n\ + ldl $7, 24($18) \n\ + \n\ + ldl $19, 32($17) \n\ + ldl $20, 32($18) \n\ + ldl $21, 40($17) \n\ + ldl $22, 40($18) \n\ + \n\ + ldl $23, 48($17) \n\ + ldl $24, 48($18) \n\ + ldl $25, 56($17) \n\ + ldl $27, 56($18) \n\ + \n\ + fillde 256($17) \n\ + xor $0, $1, $0 # 8 cycles from $1 load \n\ + fillde 256($18) \n\ + xor $2, $3, $2 \n\ + \n\ + stl $0, 0($17) \n\ + xor $4, $5, $4 \n\ + stl $2, 8($17) \n\ + xor $6, $7, $6 \n\ + \n\ + stl $4, 16($17) \n\ + xor $19, $20, $19 \n\ + stl $6, 24($17) \n\ + xor $21, $22, $21 \n\ + \n\ + stl $19, 32($17) \n\ + xor $23, $24, $23 \n\ + stl $21, 40($17) \n\ + xor $25, $27, $25 \n\ + \n\ + stl $23, 48($17) \n\ + subl $16, 1, $16 \n\ + stl $25, 56($17) \n\ + addl $17, 64, $17 \n\ + \n\ + addl $18, 64, $18 \n\ + bgt $16, 2b \n\ + ret \n\ + .end xor_sw64_prefetch_2 \n\ + \n\ + .align 3 \n\ + .ent xor_sw64_prefetch_3 \n\ +xor_sw64_prefetch_3: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + \n\ + fillde 0($17) \n\ + fillde 0($18) \n\ + fillde 0($19) \n\ + \n\ + fillde 64($17) \n\ + fillde 64($18) \n\ + fillde 64($19) \n\ + \n\ + fillde 128($17) \n\ + fillde 128($18) \n\ + fillde 128($19) \n\ + \n\ + fillde 192($17) \n\ + fillde 192($18) \n\ + fillde 192($19) \n\ + .align 4 \n\ +3: \n\ + ldl $0, 0($17) \n\ + ldl $1, 0($18) \n\ + ldl $2, 0($19) \n\ + ldl $3, 8($17) \n\ + \n\ + ldl $4, 8($18) \n\ + ldl $6, 16($17) \n\ + ldl $7, 16($18) \n\ + ldl $21, 24($17) \n\ + \n\ + ldl $22, 24($18) \n\ + ldl $24, 32($17) \n\ + ldl $25, 32($18) \n\ + ldl $5, 8($19) \n\ + \n\ + ldl $20, 16($19) \n\ + ldl $23, 24($19) \n\ + ldl $27, 32($19) \n\ + \n\ + xor $0, $1, $1 # 8 cycles from $0 load \n\ + xor $3, $4, $4 # 7 cycles from $4 load \n\ + xor $6, $7, $7 # 6 cycles from $7 load \n\ + xor $21, $22, $22 # 5 cycles from $22 load \n\ + \n\ + xor $1, $2, $2 # 9 cycles from $2 load \n\ + xor $24, $25, $25 # 5 cycles from $25 load \n\ + stl $2, 0($17) \n\ + xor $4, $5, $5 # 6 cycles from $5 load \n\ + \n\ + stl $5, 8($17) \n\ + xor $7, $20, $20 # 7 cycles from $20 load \n\ + stl $20, 16($17) \n\ + xor $22, $23, $23 # 7 cycles from $23 load \n\ + \n\ + stl $23, 24($17) \n\ + xor $25, $27, $27 # 7 cycles from $27 load \n\ + stl $27, 32($17) \n\ + \n\ + ldl $0, 40($17) \n\ + ldl $1, 40($18) \n\ + ldl $3, 48($17) \n\ + ldl $4, 48($18) \n\ + \n\ + ldl $6, 56($17) \n\ + ldl $7, 56($18) \n\ + ldl $2, 40($19) \n\ + ldl $5, 48($19) \n\ + \n\ + ldl $20, 56($19) \n\ + fillde 256($17) \n\ + fillde 256($18) \n\ + fillde 256($19) \n\ + \n\ + xor $0, $1, $1 # 6 cycles from $1 load \n\ + xor $3, $4, $4 # 5 cycles from $4 load \n\ + xor $6, $7, $7 # 5 cycles from $7 load \n\ + xor $1, $2, $2 # 4 cycles from $2 load \n\ + \n\ + xor $4, $5, $5 # 5 cycles from $5 load \n\ + xor $7, $20, $20 # 4 cycles from $20 load \n\ + stl $2, 40($17) \n\ + subl $16, 1, $16 \n\ + \n\ + stl $5, 48($17) \n\ + addl $19, 64, $19 \n\ + stl $20, 56($17) \n\ + addl $18, 64, $18 \n\ + \n\ + addl $17, 64, $17 \n\ + bgt $16, 3b \n\ + ret \n\ + .end xor_sw64_prefetch_3 \n\ + \n\ + .align 3 \n\ + .ent xor_sw64_prefetch_4 \n\ +xor_sw64_prefetch_4: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + \n\ + fillde 0($17) \n\ + fillde 0($18) \n\ + fillde 0($19) \n\ + fillde 0($20) \n\ + \n\ + fillde 64($17) \n\ + fillde 64($18) \n\ + fillde 64($19) \n\ + fillde 64($20) \n\ + \n\ + fillde 128($17) \n\ + fillde 128($18) \n\ + fillde 128($19) \n\ + fillde 128($20) \n\ + \n\ + fillde 192($17) \n\ + fillde 192($18) \n\ + fillde 192($19) \n\ + fillde 192($20) \n\ + .align 4 \n\ +4: \n\ + ldl $0, 0($17) \n\ + ldl $1, 0($18) \n\ + ldl $2, 0($19) \n\ + ldl $3, 0($20) \n\ + \n\ + ldl $4, 8($17) \n\ + ldl $5, 8($18) \n\ + ldl $6, 8($19) \n\ + ldl $7, 8($20) \n\ + \n\ + ldl $21, 16($17) \n\ + ldl $22, 16($18) \n\ + ldl $23, 16($19) \n\ + ldl $24, 16($20) \n\ + \n\ + ldl $25, 24($17) \n\ + xor $0, $1, $1 # 6 cycles from $1 load \n\ + ldl $27, 24($18) \n\ + xor $2, $3, $3 # 6 cycles from $3 load \n\ + \n\ + ldl $0, 24($19) \n\ + xor $1, $3, $3 \n\ + ldl $1, 24($20) \n\ + xor $4, $5, $5 # 7 cycles from $5 load \n\ + \n\ + stl $3, 0($17) \n\ + xor $6, $7, $7 \n\ + xor $21, $22, $22 # 7 cycles from $22 load \n\ + xor $5, $7, $7 \n\ + \n\ + stl $7, 8($17) \n\ + xor $23, $24, $24 # 7 cycles from $24 load \n\ + ldl $2, 32($17) \n\ + xor $22, $24, $24 \n\ + \n\ + ldl $3, 32($18) \n\ + ldl $4, 32($19) \n\ + ldl $5, 32($20) \n\ + xor $25, $27, $27 # 8 cycles from $27 load \n\ + \n\ + ldl $6, 40($17) \n\ + ldl $7, 40($18) \n\ + ldl $21, 40($19) \n\ + ldl $22, 40($20) \n\ + \n\ + stl $24, 16($17) \n\ + xor $0, $1, $1 # 9 cycles from $1 load \n\ + xor $2, $3, $3 # 5 cycles from $3 load \n\ + xor $27, $1, $1 \n\ + \n\ + stl $1, 24($17) \n\ + xor $4, $5, $5 # 5 cycles from $5 load \n\ + ldl $23, 48($17) \n\ + xor $3, $5, $5 \n\ + \n\ + ldl $24, 48($18) \n\ + ldl $25, 48($19) \n\ + ldl $27, 48($20) \n\ + ldl $0, 56($17) \n\ + \n\ + ldl $1, 56($18) \n\ + ldl $2, 56($19) \n\ + ldl $3, 56($20) \n\ + xor $6, $7, $7 # 8 cycles from $6 load \n\ + \n\ + fillde 256($17) \n\ + xor $21, $22, $22 # 8 cycles from $22 load \n\ + fillde 256($18) \n\ + xor $7, $22, $22 \n\ + \n\ + fillde 256($19) \n\ + xor $23, $24, $24 # 6 cycles from $24 load \n\ + fillde 256($20) \n\ + xor $25, $27, $27 # 6 cycles from $27 load \n\ + \n\ + stl $5, 32($17) \n\ + xor $24, $27, $27 \n\ + xor $0, $1, $1 # 7 cycles from $1 load \n\ + xor $2, $3, $3 # 6 cycles from $3 load \n\ + \n\ + stl $22, 40($17) \n\ + xor $1, $3, $3 \n\ + stl $27, 48($17) \n\ + subl $16, 1, $16 \n\ + \n\ + stl $3, 56($17) \n\ + addl $20, 64, $20 \n\ + addl $19, 64, $19 \n\ + addl $18, 64, $18 \n\ + \n\ + addl $17, 64, $17 \n\ + bgt $16, 4b \n\ + ret \n\ + .end xor_sw64_prefetch_4 \n\ + \n\ + .align 3 \n\ + .ent xor_sw64_prefetch_5 \n\ +xor_sw64_prefetch_5: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + \n\ + fillde 0($17) \n\ + fillde 0($18) \n\ + fillde 0($19) \n\ + fillde 0($20) \n\ + fillde 0($21) \n\ + \n\ + fillde 64($17) \n\ + fillde 64($18) \n\ + fillde 64($19) \n\ + fillde 64($20) \n\ + fillde 64($21) \n\ + \n\ + fillde 128($17) \n\ + fillde 128($18) \n\ + fillde 128($19) \n\ + fillde 128($20) \n\ + fillde 128($21) \n\ + \n\ + fillde 192($17) \n\ + fillde 192($18) \n\ + fillde 192($19) \n\ + fillde 192($20) \n\ + fillde 192($21) \n\ + .align 4 \n\ +5: \n\ + ldl $0, 0($17) \n\ + ldl $1, 0($18) \n\ + ldl $2, 0($19) \n\ + ldl $3, 0($20) \n\ + \n\ + ldl $4, 0($21) \n\ + ldl $5, 8($17) \n\ + ldl $6, 8($18) \n\ + ldl $7, 8($19) \n\ + \n\ + ldl $22, 8($20) \n\ + ldl $23, 8($21) \n\ + ldl $24, 16($17) \n\ + ldl $25, 16($18) \n\ + \n\ + ldl $27, 16($19) \n\ + xor $0, $1, $1 # 6 cycles from $1 load \n\ + ldl $28, 16($20) \n\ + xor $2, $3, $3 # 6 cycles from $3 load \n\ + \n\ + ldl $0, 16($21) \n\ + xor $1, $3, $3 \n\ + ldl $1, 24($17) \n\ + xor $3, $4, $4 # 7 cycles from $4 load \n\ + \n\ + stl $4, 0($17) \n\ + xor $5, $6, $6 # 7 cycles from $6 load \n\ + xor $7, $22, $22 # 7 cycles from $22 load \n\ + xor $6, $23, $23 # 7 cycles from $23 load \n\ + \n\ + ldl $2, 24($18) \n\ + xor $22, $23, $23 \n\ + ldl $3, 24($19) \n\ + xor $24, $25, $25 # 8 cycles from $25 load \n\ + \n\ + stl $23, 8($17) \n\ + xor $25, $27, $27 # 8 cycles from $27 load \n\ + ldl $4, 24($20) \n\ + xor $28, $0, $0 # 7 cycles from $0 load \n\ + \n\ + ldl $5, 24($21) \n\ + xor $27, $0, $0 \n\ + ldl $6, 32($17) \n\ + ldl $7, 32($18) \n\ + \n\ + stl $0, 16($17) \n\ + xor $1, $2, $2 # 6 cycles from $2 load \n\ + ldl $22, 32($19) \n\ + xor $3, $4, $4 # 4 cycles from $4 load \n\ + \n\ + ldl $23, 32($20) \n\ + xor $2, $4, $4 \n\ + ldl $24, 32($21) \n\ + ldl $25, 40($17) \n\ + \n\ + ldl $27, 40($18) \n\ + ldl $28, 40($19) \n\ + ldl $0, 40($20) \n\ + xor $4, $5, $5 # 7 cycles from $5 load \n\ + \n\ + stl $5, 24($17) \n\ + xor $6, $7, $7 # 7 cycles from $7 load \n\ + ldl $1, 40($21) \n\ + ldl $2, 48($17) \n\ + \n\ + ldl $3, 48($18) \n\ + xor $7, $22, $22 # 7 cycles from $22 load \n\ + ldl $4, 48($19) \n\ + xor $23, $24, $24 # 6 cycles from $24 load \n\ + \n\ + ldl $5, 48($20) \n\ + xor $22, $24, $24 \n\ + ldl $6, 48($21) \n\ + xor $25, $27, $27 # 7 cycles from $27 load \n\ + \n\ + stl $24, 32($17) \n\ + xor $27, $28, $28 # 8 cycles from $28 load \n\ + ldl $7, 56($17) \n\ + xor $0, $1, $1 # 6 cycles from $1 load \n\ + \n\ + ldl $22, 56($18) \n\ + ldl $23, 56($19) \n\ + ldl $24, 56($20) \n\ + ldl $25, 56($21) \n\ + \n\ + fillde 256($17) \n\ + xor $28, $1, $1 \n\ + fillde 256($18) \n\ + xor $2, $3, $3 # 9 cycles from $3 load \n\ + \n\ + fillde 256($19) \n\ + xor $3, $4, $4 # 9 cycles from $4 load \n\ + fillde 256($20) \n\ + xor $5, $6, $6 # 8 cycles from $6 load \n\ + \n\ + stl $1, 40($17) \n\ + xor $4, $6, $6 \n\ + xor $7, $22, $22 # 7 cycles from $22 load \n\ + xor $23, $24, $24 # 6 cycles from $24 load \n\ + \n\ + stl $6, 48($17) \n\ + xor $22, $24, $24 \n\ + fillde 256($21) \n\ + xor $24, $25, $25 # 8 cycles from $25 load \n\ + \n\ + stl $25, 56($17) \n\ + subl $16, 1, $16 \n\ + addl $21, 64, $21 \n\ + addl $20, 64, $20 \n\ + \n\ + addl $19, 64, $19 \n\ + addl $18, 64, $18 \n\ + addl $17, 64, $17 \n\ + bgt $16, 5b \n\ + \n\ + ret \n\ + .end xor_sw64_prefetch_5 \n\ +"); + +static struct xor_block_template xor_block_sw64 = { + .name = "sw64", + .do_2 = xor_sw64_2, + .do_3 = xor_sw64_3, + .do_4 = xor_sw64_4, + .do_5 = xor_sw64_5, +}; + +static struct xor_block_template xor_block_sw64_prefetch = { + .name = "sw64 prefetch", + .do_2 = xor_sw64_prefetch_2, + .do_3 = xor_sw64_prefetch_3, + .do_4 = xor_sw64_prefetch_4, + .do_5 = xor_sw64_prefetch_5, +}; + +/* For grins, also test the generic routines. */ +#include + +#undef XOR_TRY_TEMPLATES +#define XOR_TRY_TEMPLATES \ + do { \ + xor_speed(&xor_block_8regs); \ + xor_speed(&xor_block_32regs); \ + xor_speed(&xor_block_sw64); \ + xor_speed(&xor_block_sw64_prefetch); \ + } while (0) + +/* Force the use of sw64_prefetch as it is significantly + * faster in the cold cache case. + */ +#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sw64_prefetch) + +#endif /* _ASM_SW64_XOR_H */ diff --git a/arch/sw_64/include/uapi/asm/swab.h b/arch/sw_64/include/uapi/asm/swab.h new file mode 100644 index 000000000000..275661b346ac --- /dev/null +++ b/arch/sw_64/include/uapi/asm/swab.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_SWAB_H +#define _UAPI_ASM_SW64_SWAB_H + +#include +#include +#include + +#ifdef __GNUC__ + +static inline __attribute_const__ __u32 __arch_swab32(__u32 x) +{ + /* + * Unfortunately, we can't use the 6 instruction sequence + * on sw64 since the latency of the UNPKBW is 3, which is + * pretty hard to hide. Just in case a future implementation + * has a lower latency, here's the sequence (also by Mike Burrows) + * + * UNPKBW a0, v0 v0: 00AA00BB00CC00DD + * SLL v0, 24, a0 a0: BB00CC00DD000000 + * BIS v0, a0, a0 a0: BBAACCBBDDCC00DD + * EXTWL a0, 6, v0 v0: 000000000000BBAA + * ZAP a0, 0xf3, a0 a0: 00000000DDCC0000 + * ADDL a0, v0, v0 v0: ssssssssDDCCBBAA + */ + + __u64 t0, t1, t2, t3; + + t0 = __kernel_inshw(x, 7); /* t0 : 0000000000AABBCC */ + t1 = __kernel_inslh(x, 3); /* t1 : 000000CCDD000000 */ + t1 |= t0; /* t1 : 000000CCDDAABBCC */ + t2 = t1 >> 16; /* t2 : 0000000000CCDDAA */ + t0 = t1 & 0xFF00FF00; /* t0 : 00000000DD00BB00 */ + t3 = t2 & 0x00FF00FF; /* t3 : 0000000000CC00AA */ + t1 = t0 + t3; /* t1 : ssssssssDDCCBBAA */ + + return t1; +} +#define __arch_swab32 __arch_swab32 + +#endif /* __GNUC__ */ + +#endif /* _UAPI_ASM_SW64_SWAB_H */ diff --git a/arch/sw_64/lib/Kconfig b/arch/sw_64/lib/Kconfig new file mode 100644 index 000000000000..e22751a457ce --- /dev/null +++ b/arch/sw_64/lib/Kconfig @@ -0,0 +1,47 @@ +# SPDX-License-Identifier: GPL-2.0 +menu "Library optimization options" + +config DEEP_CLEAR_PAGE + bool "Clear Page with SIMD optimization" + default y + help + This option enables the use of SIMD version of clear page routine. + Say N if you want to use the generic version. + +config DEEP_CLEAR_USER + bool "Clear User with SIMD optimization" + default y + help + This option enables the use of SIMD version of clear user routine. + Say N if you want to use the generic version. + +config DEEP_COPY_PAGE + bool "Copy Page with SIMD optimization" + default y + help + This option enables the use of SIMD version of copy page routine. + Say N if you want to use the generic version. + +config DEEP_COPY_USER + bool "Copy User with SIMD optimization" + default y + help + This option enables the use of SIMD version of copy user routine. + Say N if you want to use the generic version. + + +config DEEP_MEMCPY + bool "Memory Copy with SIMD optimization" + default y + help + This option enables the use of SIMD version of memory copy routine. + Say N if you want to use the generic version. + +config DEEP_MEMSET + bool "Memory Set with SIMD optimization" + default y + help + This option enables the use of SIMD version of memory set routine. + Say N if you want to use the generic version. + +endmenu diff --git a/arch/sw_64/lib/Makefile b/arch/sw_64/lib/Makefile new file mode 100644 index 000000000000..e6455bb51139 --- /dev/null +++ b/arch/sw_64/lib/Makefile @@ -0,0 +1,53 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for sw-specific library files.. +# + +asflags-y := $(KBUILD_CFLAGS) +ccflags-y := -Werror + +lib-y = __divlu.o __remlu.o __divwu.o __remwu.o \ + udelay.o \ + memmove.o \ + checksum.o \ + csum_partial_copy.o \ + fpreg.o \ + strcpy.o \ + strncpy.o \ + fls.o \ + csum_ipv6_magic.o + +lib-clear_page-y := clear_page.o +lib-clear_page-$(CONFIG_DEEP_CLEAR_PAGE) := deep-clear_page.o + +lib-clear_user-y := clear_user.o +lib-clear_user-$(CONFIG_DEEP_CLEAR_USER) := deep-clear_user.o + +lib-copy_page-y := copy_page.o +lib-copy_page-$(CONFIG_DEEP_COPY_PAGE) := deep-copy_page.o + +lib-copy_user-y := copy_user.o +lib-copy_user-$(CONFIG_DEEP_COPY_USER) := deep-copy_user.o + +lib-memcpy-y := memcpy.o +lib-memcpy-$(CONFIG_DEEP_MEMCPY) := deep-memcpy.o + +lib-memset-y := memset.o +lib-memset-$(CONFIG_DEEP_MEMSET) := deep-memset.o + +lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o + +lib-y += $(lib-clear_page-y) $(lib-clear_user-y) $(lib-copy_page-y) $(lib-copy_user-y) $(lib-memcpy-y) $(lib-memset-y) + +obj-y = iomap.o +obj-y += iomap_copy.o + +# The division routines are built from single source, with different defines. +AFLAGS___divlu.o = -DDIV +AFLAGS___remlu.o = -DREM +AFLAGS___divwu.o = -DDIV -DINTSIZE +AFLAGS___remwu.o = -DREM -DINTSIZE + +$(addprefix $(obj)/,__divlu.o __remlu.o __divwu.o __remwu.o): \ + $(src)/divide.S FORCE + $(call if_changed_rule,as_o_S) diff --git a/arch/sw_64/lib/checksum.c b/arch/sw_64/lib/checksum.c new file mode 100644 index 000000000000..d1314caa15bf --- /dev/null +++ b/arch/sw_64/lib/checksum.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file contains network checksum routines that are better done + * in an architecture-specific manner due to speed.. + * Comments in other versions indicate that the algorithms are from RFC1071 + */ +#include +#include +#include +#include + +/* + * computes the checksum of the TCP/UDP pseudo-header + * returns a 16-bit checksum, already complemented. + */ +__sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, __wsum sum) +{ + return (__force __sum16)~from64to16( + (__force u64)saddr + (__force u64)daddr + + (__force u64)sum + ((len + proto) << 8)); +} +EXPORT_SYMBOL(csum_tcpudp_magic); + +__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, __wsum sum) +{ + unsigned long result; + + result = (__force u64)saddr + (__force u64)daddr + + (__force u64)sum + ((len + proto) << 8); + + /* + * Fold down to 32-bits so we don't lose in the typedef-less + * network stack. + * + * 64 to 33 + */ + result = (result & 0xffffffff) + (result >> 32); + /* 33 to 32 */ + result = (result & 0xffffffff) + (result >> 32); + return (__force __wsum)result; +} +EXPORT_SYMBOL(csum_tcpudp_nofold); + +/* + * Do a 64-bit checksum on an arbitrary memory area.. + */ +static inline unsigned long do_csum(const unsigned char *buff, int len) +{ + const unsigned long *dst = (unsigned long *)buff; + unsigned long doff = 7 & (unsigned long) dst; + unsigned long checksum = 0; + unsigned long word, patch; + unsigned long partial_dest, second_dest; + + len -= 8; + + if (!doff) { + while (len > 0) { + word = *dst; + checksum += word; + checksum += (checksum < word); + dst++; + len -= 8; + } + + len += 8; + word = *dst; + + if (len != 8) + maskll(word, len, word); + + checksum += word; + checksum += (checksum < word); + } else { + dst = (unsigned long *)((unsigned long)dst & (~7UL)); + word = *dst; + inshl(word, 8 - doff, partial_dest); + dst++; + + while (len >= 0) { + word = *dst; + insll(word, 8 - doff, second_dest); + patch = partial_dest | second_dest; + checksum += patch; + checksum += (checksum < patch); + inshl(word, 8 - doff, partial_dest); + dst++; + len -= 8; + } + + len += 8; + word = *dst; + insll(word, 8 - doff, second_dest); + patch = partial_dest | second_dest; + maskll(patch, len, patch); + checksum += patch; + checksum += (checksum < patch); + } + + return from64to16(checksum); +} + +/* + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksum on 4 octet boundaries. + */ +__sum16 ip_fast_csum(const void *iph, unsigned int ihl) +{ + return (__force __sum16)~do_csum(iph, ihl*4); +} +EXPORT_SYMBOL(ip_fast_csum); + +/* + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +__wsum csum_partial(const void *buff, int len, __wsum sum) +{ + unsigned long result = do_csum(buff, len); + + /* add in old sum, and carry.. */ + result += (__force u32)sum; + /* 32+c bits -> 32 bits */ + result = (result & 0xffffffff) + (result >> 32); + return (__force __wsum)result; +} +EXPORT_SYMBOL(csum_partial); + +/* + * this routine is used for miscellaneous IP-like checksums, mainly + * in icmp.c + */ +__sum16 ip_compute_csum(const void *buff, int len) +{ + return (__force __sum16)~from64to16(do_csum(buff, len)); +} +EXPORT_SYMBOL(ip_compute_csum); diff --git a/arch/sw_64/lib/clear_page.S b/arch/sw_64/lib/clear_page.S new file mode 100644 index 000000000000..e1cc7cddfd2f --- /dev/null +++ b/arch/sw_64/lib/clear_page.S @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Zero an entire page. + */ +#include + .text + .align 4 + .global clear_page + .ent clear_page +clear_page: + .prologue 0 + + ldi $0, 64 + +/* Optimize by GUOY from SOC 2013-06-04 */ +1: + + stl_nc $31, 0x0($16) + stl_nc $31, 0x8($16) + stl_nc $31, 0x10($16) + stl_nc $31, 0x18($16) + + stl_nc $31, 0x20($16) + stl_nc $31, 0x28($16) + stl_nc $31, 0x30($16) + stl_nc $31, 0x38($16) + + stl_nc $31, 0x40($16) + stl_nc $31, 0x48($16) + stl_nc $31, 0x50($16) + stl_nc $31, 0x58($16) + + stl_nc $31, 0x60($16) + stl_nc $31, 0x68($16) + subl $0, 1, $0 + + stl_nc $31, 0x70($16) + stl_nc $31, 0x78($16) + addl $16, 128, $16 + bne $0, 1b + + memb + ret + + .end clear_page + EXPORT_SYMBOL(clear_page) diff --git a/arch/sw_64/lib/clear_user.S b/arch/sw_64/lib/clear_user.S new file mode 100644 index 000000000000..5ac77fc8ca0d --- /dev/null +++ b/arch/sw_64/lib/clear_user.S @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Contributed by Richard Henderson + * + * Zero user space, handling exceptions as we go. + * + * We have to make sure that $0 is always up-to-date and contains the + * right "bytes left to zero" value (and that it is updated only _after_ + * a successful copy). There is also some rather minor exception setup + * stuff. + * + */ +#include +/* Allow an exception for an insn; exit if we get one. */ +#define EX(x,y...) \ + 99: x,##y; \ + .section __ex_table,"a"; \ + .long 99b - .; \ + ldi $31, $exception-99b($31); \ + .previous + + .set noat + .set noreorder + .align 4 + + .globl __clear_user + .ent __clear_user + .frame $30, 0, $26 + .prologue 0 +__clear_user: + and $17, $17, $0 + and $16, 7, $4 + beq $0, $zerolength + addl $0, $4, $1 + and $1, 7, $2 + srl $1, 3, $1 + beq $4, $loop + + subl $4, 8, $4 + addl $0, $4, $0 + beq $1, $oneword + +$head: + EX(stb $31, 0($16)) + addl $16, 1, $16 + addl $4, 1, $4 + bne $4, $head + subl $1, 1, $1 + br $loop + unop + +$oneword: + EX(stb $31, 0($16)) + addl $16, 1, $16 + addl $4, 1, $4 + bne $4, $oneword + clr $0 + +$zerolength: +$exception: + ret $31, ($26), 1 + +$loop: + and $1, 3, $4 + beq $4, 1f + +0: EX(stl $31, 0($16)) + subl $0, 8, $0 + subl $4, 1, $4 + addl $16, 8, $16 + bne $4, 0b + unop + +1: bic $1, 3, $1 + beq $1, $tail + +2: EX(stl $31, 0($16)) + subl $0, 8, $0 + EX(stl $31, 8($16)) + subl $0, 8, $0 + EX(stl $31, 16($16)) + subl $0, 8, $0 + EX(stl $31, 24($16)) + subl $0, 8, $0 + subl $1, 4, $1 + addl $16, 32, $16 + bne $1, 2b + +$tail: + bne $2, 1f + ret $31, ($26), 1 + +1: + EX(stb $31, 0($16)) + addl $16, 1, $16 + subl $2, 1, $2 + bne $2, 1b + clr $0 + ret $31, ($26), 1 + + .end __clear_user + EXPORT_SYMBOL(__clear_user) diff --git a/arch/sw_64/lib/copy_page.S b/arch/sw_64/lib/copy_page.S new file mode 100644 index 000000000000..898472c36c80 --- /dev/null +++ b/arch/sw_64/lib/copy_page.S @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arch/sw/lib/copy_page.S + * + * Copy an entire page. + */ +#include + + .text + .align 4 + .global copy_page + .ent copy_page +copy_page: + .prologue 0 + + ldi $18, 64 + +/* Optimize by GUOY from SOC 2013-06-04 */ +1: + ldl $0, 0($17) + ldl $1, 8($17) + ldl $2, 16($17) + ldl $3, 24($17) + + stl_nc $0, 0($16) + stl_nc $1, 8($16) + stl_nc $2, 16($16) + stl_nc $3, 24($16) + + ldl $4, 32($17) + ldl $5, 40($17) + ldl $6, 48($17) + ldl $7, 56($17) + + stl_nc $4, 32($16) + stl_nc $5, 40($16) + stl_nc $6, 48($16) + stl_nc $7, 56($16) + + ldl $0, 64($17) + ldl $1, 72($17) + ldl $2, 80($17) + ldl $3, 88($17) + + stl_nc $0, 64($16) + stl_nc $1, 72($16) + stl_nc $2, 80($16) + stl_nc $3, 88($16) + + ldl $4, 96($17) + ldl $5, 104($17) + ldl $6, 112($17) + ldl $7, 120($17) + + stl_nc $4, 96($16) + stl_nc $5, 104($16) + stl_nc $6, 112($16) + stl_nc $7, 120($16) + + ldwe $f31, 3 * 0x80($17) + subl $18, 1, $18 + addl $17, 128, $17 + + addl $16, 128, $16 + bne $18, 1b + + memb + ret + + .end copy_page + EXPORT_SYMBOL(copy_page) diff --git a/arch/sw_64/lib/copy_user.S b/arch/sw_64/lib/copy_user.S new file mode 100644 index 000000000000..2c3dd0b5656c --- /dev/null +++ b/arch/sw_64/lib/copy_user.S @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copy to/from user space, handling exceptions as we go.. This + * isn't exactly pretty. + * + * This is essentially the same as "memcpy()", but with a few twists. + * Notably, we have to make sure that $0 is always up-to-date and + * contains the right "bytes left to copy" value (and that it is updated + * only _after_ a successful copy). There is also some rather minor + * exception setup stuff.. + */ +#include +/* Allow an exception for an insn; exit if we get one. */ +#define EXI(x,y...) \ + 99: x,##y; \ + .section __ex_table, "a"; \ + .long 99b - .; \ + ldi $31, $exitin-99b($31); \ + .previous + +#define EXO(x,y...) \ + 99: x, ##y; \ + .section __ex_table, "a"; \ + .long 99b - .; \ + ldi $31, $exitout-99b($31); \ + .previous + + .set noat + .align 4 + .globl __copy_user + .ent __copy_user +__copy_user: + .prologue 0 + and $18, $18, $0 + and $16, 7, $3 + beq $0, $35 + beq $3, $36 + subl $3, 8, $3 + .align 4 +$37: + EXI(ldbu $1, 0($17)) + EXO(stb $1, 0($16)) + addl $3, 1, $3 + subl $0, 1, $0 + addl $16, 1, $16 + addl $17, 1, $17 + beq $0, $41 + bne $3, $37 +$36: + and $17, 7, $1 + bic $0, 7, $4 + beq $1, $43 + beq $4, $48 + EXI(ldl_u $3, 0($17)) + .align 4 +$50: + EXI(ldl_u $2, 8($17)) + subl $4, 8, $4 + extll $3, $17, $3 + exthl $2, $17, $1 + bis $3, $1, $1 + EXO(stl $1,0($16)) + addl $17, 8, $17 + subl $0, 8, $0 + addl $16, 8, $16 + bis $2, $2, $3 + bne $4, $50 +$48: + beq $0, $41 + .align 4 +$57: + EXI(ldbu $1, 0($17)) + EXO(stb $1, 0($16)) + subl $0, 1, $0 + addl $16, 1, $16 + addl $17, 1, $17 + bne $0, $57 + br $31, $41 + .align 4 +$43: + beq $4, $65 + .align 4 +$66: + EXI(ldl $1, 0($17)) + subl $4, 8, $4 + EXO(stl $1,0($16)) + addl $17, 8, $17 + subl $0, 8, $0 + addl $16, 8, $16 + bne $4, $66 +$65: + beq $0, $41 + EXI(ldbu $1, 0($17)) + EXO(stb $1, 0($16)) + addl $17, 1, $17 + addl $16, 1, $16 + subl $0, 1, $0 + br $31, $65 +$41: +$35: +$exitin: +$exitout: + ret $31, ($26), 1 + + .end __copy_user + EXPORT_SYMBOL(__copy_user) diff --git a/arch/sw_64/lib/csum_ipv6_magic.S b/arch/sw_64/lib/csum_ipv6_magic.S new file mode 100644 index 000000000000..755e1c13cb25 --- /dev/null +++ b/arch/sw_64/lib/csum_ipv6_magic.S @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Contributed by Richard Henderson + * + * unsigned short csum_ipv6_magic(struct in6_addr *saddr, + * struct in6_addr *daddr, __u32 len, + * unsigned short proto, unsigned int csum); + * + * Misalignment handling (which costs 16 instructions / 8 cycles) + * added by Ivan Kokshaysky + */ +#include + .globl csum_ipv6_magic + .align 4 + .ent csum_ipv6_magic + .frame $30, 0, $26, 0 +csum_ipv6_magic: + .prologue 0 + + ldl_u $0, 0($16) + zapnot $20, 15, $20 + exthl $18, 1, $4 + ldl_u $21, 7($16) + + extlb $18, 1, $5 + ldl_u $1, 8($16) + extlb $18, 2, $6 + ldl_u $22, 15($16) + + extlb $18, 3, $18 + ldl_u $2, 0($17) + sra $4, 32, $4 + ldl_u $23, 7($17) + + extll $0, $16, $0 + ldl_u $3, 8($17) + exthl $21, $16, $21 + ldl_u $24, 15($17) + + sll $5, 16, $5 + or $0, $21, $0 + extll $1, $16, $1 + addl $20, $0, $20 + + exthl $22, $16, $22 + cmpult $20, $0, $0 + sll $6, 8, $6 + or $1, $22, $1 + + extll $2, $17, $2 + or $4, $18, $18 + exthl $23, $17, $23 + or $5, $6, $5 + + extll $3, $17, $3 + or $2, $23, $2 + exthl $24, $17, $24 + or $18, $5, $18 + + exthh $19, 7, $7 + or $3, $24, $3 + extlb $19, 1, $19 + addl $20, $1, $20 + + or $19, $7, $19 + cmpult $20, $1, $1 + sll $19, 48, $19 + + sra $19, 32, $19 + addl $20, $2, $20 + cmpult $20, $2, $2 + addl $20, $3, $20 + + cmpult $20, $3, $3 + addl $20, $18, $20 + cmpult $20, $18, $18 + addl $20, $19, $20 + + cmpult $20, $19, $19 + addl $0, $1, $0 + addl $2, $3, $2 + addl $18, $19, $18 + + addl $0, $2, $0 + addl $20, $18, $20 + addl $0, $20, $0 + unop + + extlh $0, 2, $2 + zapnot $0, 3, $3 + extlh $0, 4, $1 + addl $2, $3, $3 + + extlh $0, 6, $0 + addl $3, $1, $3 + addl $0, $3, $0 + unop + + extlh $0, 2, $1 + zapnot $0, 3, $0 + addl $0, $1, $0 + unop + + extlh $0, 2, $1 + zapnot $0, 3, $0 + addl $0, $1, $0 + not $0, $0 + + zapnot $0, 3, $0 + ret + + .end csum_ipv6_magic + EXPORT_SYMBOL(csum_ipv6_magic) diff --git a/arch/sw_64/lib/csum_partial_copy.c b/arch/sw_64/lib/csum_partial_copy.c new file mode 100644 index 000000000000..1a8c18757e09 --- /dev/null +++ b/arch/sw_64/lib/csum_partial_copy.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * csum_partial_copy - do IP checksumming and copy + * + * (C) Copyright 1996 Linus Torvalds + * + * Don't look at this too closely - you'll go mad. The things + * we do for performance.. + */ + +#include +#include +#include +#include + + +#define ldl_u(x, y) \ + __asm__ __volatile__("ldl_u %0, %1":"=r" (x):"m" (*(const unsigned long *)(y))) + +#define stl_u(x, y) \ + __asm__ __volatile__("stl_u %1, %0":"=m" (*(unsigned long *)(y)):"r" (x)) + +static inline void stll_u(unsigned long data, unsigned long *dst) +{ + int i = 0; + unsigned long doff = (unsigned long)dst & 7; + + for (; doff < 8; i++, doff++) + *((char *)dst + i) = *((char *)&data + i); +} + +static inline void sthl_u(unsigned long data, unsigned long *dst) +{ + int i = 0; + unsigned long doff = (unsigned long)dst & 7; + + for (; i < doff; i++) + *((char *)dst + 8 - doff + i) = *((char *)&data + 8 - doff + i); +} + +#define __get_word(insn, x, ptr) \ +({ \ + long __guu_err; \ + __asm__ __volatile__( \ + "1: "#insn" %0,%2\n" \ + "2:\n" \ + ".section __ex_table,\"a\"\n" \ + " .long 1b - .\n" \ + " ldi %0,2b-1b(%1)\n" \ + ".previous" \ + : "=r"(x), "=r"(__guu_err) \ + : "m"(__m(ptr)), "1"(0)); \ + __guu_err; \ +}) + +static inline unsigned long +csum_partial_cfu_dest_aligned(const unsigned long __user *src, + unsigned long *dst, long len) +{ + unsigned long word; + unsigned long checksum = ~0U; + int err = 0; + + err = __copy_from_user(dst, src, len+8); + + while (len > 0) { + word = *dst; + checksum += word; + checksum += (checksum < word); + dst++; + len -= 8; + } + len += 8; + word = *dst; + + if (len != 8) + maskll(word, len, word); + checksum += word; + checksum += (checksum < word); + + return checksum; +} + +static inline unsigned long +csum_partial_cfu_dest_unaligned(const unsigned long __user *src, + unsigned long *dst, unsigned long doff, long len) +{ + unsigned long word, patch; + unsigned long partial_dest, second_dest; + unsigned long checksum = ~0U; + int err = 0; + + err = __copy_from_user(dst, src, len+8); + + dst = (unsigned long *)((unsigned long)dst & (~7UL)); + word = *dst; + inshl(word, 8 - doff, partial_dest); + dst++; + + while (len >= 0) { + word = *dst; + insll(word, 8 - doff, second_dest); + patch = partial_dest | second_dest; + checksum += patch; + checksum += (checksum < patch); + inshl(word, 8 - doff, partial_dest); + dst++; + len -= 8; + } + + len += 8; + word = *dst; + insll(word, 8 - doff, second_dest); + patch = partial_dest | second_dest; + maskll(patch, len, patch); + checksum += patch; + checksum += (checksum < patch); + + return checksum; +} + +static __wsum __csum_and_copy(const void __user *src, void *dst, int len) +{ + unsigned long checksum; + unsigned long doff = 7 & (unsigned long) dst; + + if (!doff) { + checksum = csum_partial_cfu_dest_aligned( + (const unsigned long __user *) src, + (unsigned long *) dst, len-8); + } else { + checksum = csum_partial_cfu_dest_unaligned( + (const unsigned long __user *) src, + (unsigned long *) dst, doff, len-8); + } + return (__force __wsum)from64to16(checksum); +} + +__wsum +csum_and_copy_from_user(const void __user *src, void *dst, int len) +{ + if (!access_ok(src, len)) + return 0; + return __csum_and_copy(src, dst, len); +} +EXPORT_SYMBOL(csum_and_copy_from_user); + +__wsum +csum_partial_copy_nocheck(const void *src, void *dst, int len) +{ + return __csum_and_copy((__force const void __user *)src, + dst, len); +} +EXPORT_SYMBOL(csum_partial_copy_nocheck); diff --git a/arch/sw_64/lib/deep-clear_page.S b/arch/sw_64/lib/deep-clear_page.S new file mode 100644 index 000000000000..52a3db33fc17 --- /dev/null +++ b/arch/sw_64/lib/deep-clear_page.S @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Zero an entire page. + */ +#include + .text + .align 4 + .global clear_page + .ent clear_page +clear_page: + .prologue 0 + + ldi $0,64 + +/* Optimize by GUOY from SOC 2013-06-04 */ +1: + +/* + stl_nc $31,0x0($16) + stl_nc $31,0x8($16) + stl_nc $31,0x10($16) + stl_nc $31,0x18($16) + + stl_nc $31,0x20($16) + stl_nc $31,0x28($16) + stl_nc $31,0x30($16) + stl_nc $31,0x38($16) + + stl_nc $31,0x40($16) + stl_nc $31,0x48($16) + stl_nc $31,0x50($16) + stl_nc $31,0x58($16) + + stl_nc $31,0x60($16) + stl_nc $31,0x68($16) + stl_nc $31,0x70($16) + stl_nc $31,0x78($16) +*/ + + vstd_nc $f31, 0x0($16) + vstd_nc $f31, 0x20($16) + subl $0, 1, $0 + vstd_nc $f31, 0x40($16) + + vstd_nc $f31, 0x60($16) + addl $16, 128, $16 + bne $0, 1b + + memb + ret + + .end clear_page + EXPORT_SYMBOL(clear_page) diff --git a/arch/sw_64/lib/deep-clear_user.S b/arch/sw_64/lib/deep-clear_user.S new file mode 100644 index 000000000000..c81418ed99a2 --- /dev/null +++ b/arch/sw_64/lib/deep-clear_user.S @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Contributed by Mao Minkai + * + * Zero user space, handling exceptions as we go. + * + * We have to make sure that $0 is always up-to-date and contains the + * right "bytes left to zero" value (and that it is updated only _after_ + * a successful copy). There is also some rather minor exception setup + * stuff. + * + */ +#include +/* Allow an exception for an insn; exit if we get one. */ +#define FIXUP_LDST(x,y...) \ + 99: x,##y; \ + .section __ex_table,"a"; \ + .long 99b - .; \ + ldi $31, $out-99b($31); \ + .previous + +/* + * $7: SIMD status + * 0: not in simd loop + * 1: in simd loop + * 2: in simd_u loop + * $18: bytes left to copy + * + */ + .globl __clear_user + .ent __clear_user +__clear_user: + .prologue 0 + bis $31, $31, $7 + mov $17, $18 + bis $31, $31, $17 +#if defined(CONFIG_SUBARCH_C3B) +#include "deep-set_template.S" +#elif defined(CONFIG_SUBARCH_C4) +#include "deep-set_template_c4.S" +#endif +$out: + bis $31, $18, $0 + beq $7, $return + +$restore_simd: + RESTORE_SIMD_REGS + +$return: + ret + .end __clear_user + EXPORT_SYMBOL(__clear_user) diff --git a/arch/sw_64/lib/deep-copy_page.S b/arch/sw_64/lib/deep-copy_page.S new file mode 100644 index 000000000000..a9b9d97f318a --- /dev/null +++ b/arch/sw_64/lib/deep-copy_page.S @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arch/sw/lib/copy_page.S + * + * Copy an entire page. + */ +#include +#include + + .text + .align 4 + .global copy_page + .ent copy_page +copy_page: + .prologue 0 + + ldi $18, 64 + subl $sp, 0x60, $sp + ldi $4, 0x40($sp) + stl $4, 0($sp) + bic $4, 0x1f, $4 + vstd $f16, 0($4) +#ifdef CONFIG_SUBARCH_C4 + csrr $5, CSR_WR_FREGS +#endif + +/* Optimize by GUOY from SOC 2013-06-04 */ +1: + vldd $f16, 0($17) + vstd_nc $f16, 0($16) + + vldd $f16, 32($17) + vstd_nc $f16, 32($16) + + vldd $f16, 64($17) + vstd_nc $f16, 64($16) + + vldd $f16, 96($17) + vstd_nc $f16, 96($16) + + ldwe $f31, 5*0x80($17) + subl $18, 1, $18 + addl $17, 128, $17 + + addl $16, 128, $16 + bne $18, 1b + + memb + ldl $4, 0($sp) + ldi $4, 0x40($sp) + bic $4, 0x1f, $4 + vldd $f16, 0($4) +#ifdef CONFIG_SUBARCH_C4 + csrw $5, CSR_WR_FREGS +#endif + addl $sp, 0x60, $sp + ret + + .end copy_page + EXPORT_SYMBOL(copy_page) diff --git a/arch/sw_64/lib/deep-copy_template.S b/arch/sw_64/lib/deep-copy_template.S new file mode 100644 index 000000000000..7705eb3f36d4 --- /dev/null +++ b/arch/sw_64/lib/deep-copy_template.S @@ -0,0 +1,301 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * template for memcpy and copy_user with SIMD + * + * $4: 8-byte misalignment of src when dest is 8-byte aligned + * $5: 32-byte misalignment of src when dest is 32-byte aligned + * $7: SIMD status + * 0: not in simd loop + * 1: in simd loop + * 2: in simd_u loop + * $16: latest dest, clobbered + * $17: latest src, clobbered + * $18: bytes left to copy + * + */ + +#define NC_STORE_THRESHOLD 2048 + +#define SAVE_SIMD_REGS \ + ldi $sp, -0x60($sp); \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vstd $f1, 0($23); \ + vstd $f2, 0x20($23); \ + ldi $7, 1 + +#define RESTORE_SIMD_REGS \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vldd $f1, 0($23); \ + vldd $f2, 0x20($23); \ + ldi $sp, 0x60($sp); \ + bis $31, $31, $7 + +#define SAVE_SIMD_U_REGS \ + ldi $sp, -0xc0($sp); \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vstd $f1, 0($23); \ + vstd $f2, 0x20($23); \ + vstd $f4, 0x40($23); \ + vstd $f5, 0x60($23); \ + vstd $f3, 0x80($23); \ + ldi $7, 2 + +#define RESTORE_SIMD_U_REGS \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vldd $f1, 0($23); \ + vldd $f2, 0x20($23); \ + vldd $f4, 0x40($23); \ + vldd $f5, 0x60($23); \ + vldd $f3, 0x80($23); \ + ldi $sp, 0xc0($sp); \ + bis $31, $31, $7 + + ble $18, $out + and $16, 7, $1 + beq $1, $dest_aligned_8 + +$byte_loop_head: + FIXUP_LDST( ldbu $2, 0($17) ) + FIXUP_LDST( stb $2, 0($16) ) + subl $18, 1, $18 + addl $17, 1, $17 + addl $16, 1, $16 + ble $18, $out + and $16, 7, $1 + bne $1, $byte_loop_head + +$dest_aligned_8: + and $17, 7, $4 + cmplt $18, 16, $1 + bne $1, $quad_loop_end + and $16, 31, $1 + beq $1, $dest_aligned_32 + cmplt $18, 64, $1 + bne $1, $simd_end + bne $4, $quad_u_loop_head + +$quad_loop_head: + FIXUP_LDST( ldl $2, 0($17) ) + FIXUP_LDST( stl $2, 0($16) ) + addl $16, 8, $16 + addl $17, 8, $17 + subl $18, 8, $18 + and $16, 31, $1 + beq $1, $dest_aligned_32 + br $31, $quad_loop_head + +$dest_aligned_32: + cmplt $18, 64, $1 + bne $1, $simd_end + and $17, 31, $5 + bne $5, $prep_simd_u_loop + +$prep_simd_loop: + SAVE_SIMD_REGS + ldi $1, NC_STORE_THRESHOLD($31) + cmple $18, $1, $1 + bne $1, $simd_loop + + .align 4 +$simd_loop_nc: + FIXUP_LDST( vldd $f1, 0($17) ) + FIXUP_LDST( vldd $f2, 32($17) ) + FIXUP_LDST( vstd_nc $f1, 0($16) ) + FIXUP_LDST( vstd_nc $f2, 32($16) ) + subl $18, 64, $18 + addl $17, 64, $17 + addl $16, 64, $16 + cmplt $18, 64, $1 + beq $1, $simd_loop_nc + memb # required for _nc store instructions + br $31, $simd_loop_end + + .align 4 +$simd_loop: + FIXUP_LDST( vldd $f1, 0($17) ) + FIXUP_LDST( vldd $f2, 32($17) ) + FIXUP_LDST( vstd $f1, 0($16) ) + FIXUP_LDST( vstd $f2, 32($16) ) + subl $18, 64, $18 + addl $17, 64, $17 + addl $16, 64, $16 + cmplt $18, 64, $1 + beq $1, $simd_loop + +$simd_loop_end: + cmplt $18, 32, $1 + bne $1, $no_more_simd + FIXUP_LDST( vldd $f1, 0($17) ) + FIXUP_LDST( vstd $f1, 0($16) ) + subl $18, 32, $18 + addl $17, 32, $17 + addl $16, 32, $16 + +$no_more_simd: + RESTORE_SIMD_REGS + +$simd_end: + ble $18, $out + cmplt $18, 16, $1 + bne $1, $quad_loop_end + bne $4, $prep_quad_u_loop_tail + + .align 4 +$quad_loop_tail: + FIXUP_LDST( ldl $2, 0($17) ) + FIXUP_LDST( ldl $3, 8($17) ) + FIXUP_LDST( stl $2, 0($16) ) + FIXUP_LDST( stl $3, 8($16) ) + subl $18, 16, $18 + addl $17, 16, $17 + addl $16, 16, $16 + cmplt $18, 16, $1 + beq $1, $quad_loop_tail + +$quad_loop_end: + ble $18, $out + cmplt $18, 8, $1 + bne $1, $byte_loop_tail + bne $4, $move_one_quad_u + +$move_one_quad: + FIXUP_LDST( ldl $2, 0($17) ) + FIXUP_LDST( stl $2, 0($16) ) + subl $18, 8, $18 + addl $17, 8, $17 + addl $16, 8, $16 + ble $18, $out + + .align 3 +$byte_loop_tail: + FIXUP_LDST( ldbu $2, 0($17) ) + FIXUP_LDST( stb $2, 0($16) ) + subl $18, 1, $18 + addl $17, 1, $17 + addl $16, 1, $16 + bgt $18, $byte_loop_tail + br $31, $out + +/* misaligned src and dst */ +$quad_u_loop_head: + FIXUP_LDST( ldl_u $2, 0($17) ) + FIXUP_LDST( ldl_u $3, 7($17) ) + extll $2, $4, $2 + exthl $3, $4, $3 + bis $2, $3, $2 + FIXUP_LDST( stl $2, 0($16) ) + addl $16, 8, $16 + addl $17, 8, $17 + subl $18, 8, $18 + and $16, 31, $1 + beq $1, $dest_aligned_32 + br $31, $quad_u_loop_head + +$prep_simd_u_loop: + SAVE_SIMD_U_REGS + andnot $17, 31, $3 + ldi $2, 256($31) + sll $5, 3, $1 + subl $2, $1, $2 + sll $1, 29, $1 + sll $2, 29, $2 + ifmovd $1, $f1 + ifmovd $2, $f2 + FIXUP_LDST( vldd $f4, 0($3) ) + ldi $1, NC_STORE_THRESHOLD($31) + cmple $18, $1, $1 + bne $1, $simd_u_loop + + .align 4 +$simd_u_loop_nc: + FIXUP_LDST( vldd $f5, 32($3) ) + srlow $f4, $f1, $f4 + sllow $f5, $f2, $f3 + vlogfc $f3, $f4, $f31, $f3 + FIXUP_LDST( vstd_nc $f3, 0($16) ) + FIXUP_LDST( vldd $f4, 64($3) ) + srlow $f5, $f1, $f5 + sllow $f4, $f2, $f3 + vlogfc $f5, $f3, $f31, $f5 + FIXUP_LDST( vstd_nc $f5, 32($16) ) + subl $18, 64, $18 + addl $3, 64, $3 + addl $16, 64, $16 + cmplt $18, 64, $1 + beq $1, $simd_u_loop_nc + memb # required for _nc store instructions + br $31, $simd_u_loop_end + + .align 4 +$simd_u_loop: + FIXUP_LDST( vldd $f5, 32($3) ) + srlow $f4, $f1, $f4 + sllow $f5, $f2, $f3 + vlogfc $f4, $f3, $f31, $f3 + FIXUP_LDST( vstd $f3, 0($16) ) + FIXUP_LDST( vldd $f4, 64($3) ) + srlow $f5, $f1, $f5 + sllow $f4, $f2, $f3 + vlogfc $f5, $f3, $f31, $f3 + FIXUP_LDST( vstd $f3, 32($16) ) + subl $18, 64, $18 + addl $3, 64, $3 + addl $16, 64, $16 + cmplt $18, 64, $1 + beq $1, $simd_u_loop + +$simd_u_loop_end: + cmplt $18, 32, $1 + bne $1, $no_more_simd_u + FIXUP_LDST( vldd $f5, 32($3) ) + srlow $f4, $f1, $f4 + sllow $f5, $f2, $f3 + vlogfc $f4, $f3, $f31, $f3 + FIXUP_LDST( vstd $f3, 0($16) ) + subl $18, 32, $18 + addl $3, 32, $3 + addl $16, 32, $16 + +$no_more_simd_u: + RESTORE_SIMD_U_REGS + bis $3, $5, $17 + br $31, $simd_end + +$prep_quad_u_loop_tail: + FIXUP_LDST( ldl_u $2, 0($17) ) + .align 4 +$quad_u_loop_tail: + FIXUP_LDST( ldl_u $3, 8($17) ) + extll $2, $4, $22 + exthl $3, $4, $23 + bis $22, $23, $22 + FIXUP_LDST( stl $22, 0($16) ) + FIXUP_LDST( ldl_u $2, 16($17) ) + extll $3, $4, $24 + exthl $2, $4, $25 + bis $24, $25, $24 + FIXUP_LDST( stl $24, 8($16) ) + subl $18, 16, $18 + addl $17, 16, $17 + addl $16, 16, $16 + cmplt $18, 16, $1 + beq $1, $quad_u_loop_tail + br $31, $quad_loop_end + +$move_one_quad_u: + FIXUP_LDST( ldl_u $2, 0($17) ) + FIXUP_LDST( ldl_u $3, 8($17) ) + extll $2, $4, $22 + exthl $3, $4, $23 + bis $22, $23, $22 + FIXUP_LDST( stl $22, 0($16) ) + subl $18, 8, $18 + addl $17, 8, $17 + addl $16, 8, $16 + ble $18, $out + br $31, $byte_loop_tail diff --git a/arch/sw_64/lib/deep-copy_template_c4.S b/arch/sw_64/lib/deep-copy_template_c4.S new file mode 100644 index 000000000000..e0740874dfa3 --- /dev/null +++ b/arch/sw_64/lib/deep-copy_template_c4.S @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * template for memcpy and copy_user with SIMD + * + * $7: SIMD status + * 0: not in simd loop + * 1: in simd and simd_u loop + * $16: latest dest, clobbered + * $17: latest src, clobbered + * $18: bytes left to copy + * + */ + +#define SAVE_SIMD_REGS \ + ldi $sp, -0x60($sp); \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vstd $f1, 0($23); \ + vstd $f2, 0x20($23); \ + ldi $7, 1 + +#define RESTORE_SIMD_REGS \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vldd $f1, 0($23); \ + vldd $f2, 0x20($23); \ + ldi $sp, 0x60($sp); \ + bis $31, $31, $7 + + + ble $18, $out + + cmplt $18, 8, $1 + bne $1, $byte_loop_tail + cmplt $18, 16, $1 + bne $1, $quad_loop_end + cmplt $18, 32, $1 + bne $1, $simd_end + +$prep_simd_loop: + SAVE_SIMD_REGS + cmplt $18, 64, $1 + bne $1, $simd_loop_end + + .align 4 +$simd_loop: + FIXUP_LDST( vldd $f1, 0($17) ) + FIXUP_LDST( vldd $f2, 32($17) ) + FIXUP_LDST( vstd $f1, 0($16) ) + FIXUP_LDST( vstd $f2, 32($16) ) + subl $18, 64, $18 + addl $17, 64, $17 + addl $16, 64, $16 + cmplt $18, 64, $1 + beq $1, $simd_loop + +$simd_loop_end: + cmplt $18, 32, $1 + bne $1, $no_more_simd + FIXUP_LDST( vldd $f1, 0($17) ) + FIXUP_LDST( vstd $f1, 0($16) ) + subl $18, 32, $18 + addl $17, 32, $17 + addl $16, 32, $16 + +$no_more_simd: + RESTORE_SIMD_REGS + +$simd_end: + ble $18, $out + cmplt $18, 16, $1 + bne $1, $quad_loop_end + + .align 4 +$quad_loop_tail: + FIXUP_LDST( ldl $2, 0($17) ) + FIXUP_LDST( ldl $3, 8($17) ) + FIXUP_LDST( stl $2, 0($16) ) + FIXUP_LDST( stl $3, 8($16) ) + subl $18, 16, $18 + addl $17, 16, $17 + addl $16, 16, $16 + cmplt $18, 16, $1 + beq $1, $quad_loop_tail + +$quad_loop_end: + ble $18, $out + cmplt $18, 8, $1 + bne $1, $byte_loop_tail + +$move_one_quad: + FIXUP_LDST( ldl $2, 0($17) ) + FIXUP_LDST( stl $2, 0($16) ) + subl $18, 8, $18 + addl $17, 8, $17 + addl $16, 8, $16 + ble $18, $out + + .align 3 +$byte_loop_tail: + FIXUP_LDST( ldbu $2, 0($17) ) + FIXUP_LDST( stb $2, 0($16) ) + subl $18, 1, $18 + addl $17, 1, $17 + addl $16, 1, $16 + bgt $18, $byte_loop_tail + br $31, $out diff --git a/arch/sw_64/lib/deep-copy_user.S b/arch/sw_64/lib/deep-copy_user.S new file mode 100644 index 000000000000..b79f8f3f0f4a --- /dev/null +++ b/arch/sw_64/lib/deep-copy_user.S @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include + +/* Allow an exception for an insn; exit if we get one. */ +#define FIXUP_LDST(x, y) \ + 99: x, y; \ + .section __ex_table, "a"; \ + .long 99b - .; \ + ldi $31, $out-99b($31); \ + .previous + +/* + * $7: SIMD status for C3B + * 0: not in simd loop + * 1: in simd loop + * 2: in simd_u loop + * $7: SIMD status for C4 + * 0: not in simd loop + * 1: in simd and simd_u loop + * $18: bytes left to copy + * + */ + .globl __copy_user + .ent __copy_user +__copy_user: + .prologue 0 + .set noreorder + bis $31, $31, $7 +#if defined(CONFIG_SUBARCH_C3B) +#include "deep-copy_template.S" +#elif defined(CONFIG_SUBARCH_C4) +#include "deep-copy_template_c4.S" +#endif +$out: + bis $31, $18, $0 + beq $7, $return + subl $7, 1, $7 + beq $7, $restore_simd + +#if defined(CONFIG_SUBARCH_C3B) +$restore_simd_u: + RESTORE_SIMD_U_REGS + br $31, $return +#endif + +$restore_simd: + RESTORE_SIMD_REGS + +$return: + ret + .end __copy_user + EXPORT_SYMBOL(__copy_user) diff --git a/arch/sw_64/lib/deep-memcpy.S b/arch/sw_64/lib/deep-memcpy.S new file mode 100644 index 000000000000..78a6bd85cf01 --- /dev/null +++ b/arch/sw_64/lib/deep-memcpy.S @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include + +#define FIXUP_LDST(x, y) \ + x, y + + .globl memcpy + .ent memcpy +memcpy: + .frame $30, 0, $26, 0 + .prologue 0 + mov $16, $0 +#if defined(CONFIG_SUBARCH_C3B) +#include "deep-copy_template.S" +#elif defined(CONFIG_SUBARCH_C4) +#include "deep-copy_template_c4.S" +#endif +$out: + ret + .end memcpy + EXPORT_SYMBOL(memcpy) +__memcpy = memcpy +.globl __memcpy diff --git a/arch/sw_64/lib/deep-memset.S b/arch/sw_64/lib/deep-memset.S new file mode 100644 index 000000000000..c6b5355beec6 --- /dev/null +++ b/arch/sw_64/lib/deep-memset.S @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Optimized memset() for SW64 with SIMD instructions + * + * Copyright (C) Mao Minkai + * Author: Mao Minkai + * + * Fill SIZE bytes pointed to by SRC with CHAR. + * + * Input: + * $16: SRC, clobbered + * $17: CHAR, clobbered + * $18: SIZE, clobbered + * + * Output: + * $0: SRC + * + * Temporaries: + * $1: unaligned parts of addr (0 means aligned addr), tmp data + * $2: tmp data + * $3: tmp data + * $4: tmp data + * $5: compare result + * $f10: 32 bytes data (manually saved) + * + */ + +#include +#include + +#define FIXUP_LDST(x, y) \ + x, y + + .set noat + .set noreorder + .text + .align 4 + .globl memset + .globl __memset + .globl ___memset + .globl __memsetw + .globl __constant_c_memset + .ent ___memset +___memset: + .frame $30, 0, $26, 0 + .prologue 0 + +#ifdef CONFIG_SUBARCH_C4 + csrr $6, CSR_WR_FREGS +#endif +/* expand 1 byte data to 8 bytes */ + and $17, 0xff, $17 + sll $17, 8, $4 + bis $17, $4, $17 + sll $17, 16, $4 + bis $17, $4, $17 + sll $17, 32, $4 + bis $17, $4, $17 + +__constant_c_memset: + bis $31, $31, $7 + bis $31, $16, $0 +#if defined(CONFIG_SUBARCH_C3B) +#include "deep-set_template.S" +#elif defined(CONFIG_SUBARCH_C4) +#include "deep-set_template_c4.S" +#endif +$out: +#ifdef CONFIG_SUBARCH_C4 + csrw $6, CSR_WR_FREGS +#endif + ret + + .end ___memset + EXPORT_SYMBOL(___memset) + + .align 5 + .ent __memsetw +__memsetw: + .prologue 0 + + inslh $17, 0, $1 + inslh $17, 2, $2 + inslh $17, 4, $3 + bis $1, $2, $1 + inslh $17, 6, $4 + bis $1, $3, $1 + bis $1, $4, $17 + br $31, __constant_c_memset + + .end __memsetw + EXPORT_SYMBOL(__memsetw) + +memset = ___memset +EXPORT_SYMBOL(memset) +__memset = ___memset +EXPORT_SYMBOL(__memset) diff --git a/arch/sw_64/lib/deep-set_template.S b/arch/sw_64/lib/deep-set_template.S new file mode 100644 index 000000000000..f9073d638468 --- /dev/null +++ b/arch/sw_64/lib/deep-set_template.S @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * template for memcpy and copy_user with SIMD + * + * $7: SIMD status + * 0: not in simd loop + * 1: in simd loop + * 2: in simd_u loop + * $16: latest dest, clobbered + * $17: 8-byte data to set + * $18: bytes left to copy + * + */ + +#define NC_STORE_THRESHOLD 2048 + +#define SAVE_SIMD_REGS \ + ldi $sp, -0x40($sp); \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vstd $f1, 0($23); \ + ldi $7, 1 + +#define RESTORE_SIMD_REGS \ + vldd $f1, 0($23); \ + ldi $sp, 0x40($sp); \ + bis $31, $31, $7 + + ble $18, $out + and $16, 7, $1 + beq $1, $dest_aligned_8 + + .align 3 +$byte_loop_head: + FIXUP_LDST( stb $17, 0($16) ) + subl $18, 1, $18 + addl $16, 1, $16 + ble $18, $out + and $16, 7, $1 + bne $1, $byte_loop_head + +$dest_aligned_8: + cmplt $18, 16, $1 + bne $1, $quad_loop_end + and $16, 31, $1 + beq $1, $dest_aligned_32 + cmplt $18, 64, $1 + bne $1, $simd_end + + .align 3 +$quad_loop_head: + FIXUP_LDST( stl $17, 0($16) ) + addl $16, 8, $16 + subl $18, 8, $18 + and $16, 31, $1 + beq $1, $dest_aligned_32 + br $31, $quad_loop_head + +$dest_aligned_32: + cmplt $18, 64, $1 + bne $1, $simd_end + +$prep_simd_loop: + SAVE_SIMD_REGS + ifmovd $17, $f1 + vcpyf $f1, $f1 + ldi $1, NC_STORE_THRESHOLD($31) + cmple $18, $1, $1 + bne $1, $simd_loop + + .align 3 +$simd_loop_nc: + FIXUP_LDST( vstd_nc $f1, 0($16) ) + FIXUP_LDST( vstd_nc $f1, 32($16) ) + subl $18, 64, $18 + addl $16, 64, $16 + cmplt $18, 64, $1 + beq $1, $simd_loop_nc + memb # required for _nc store instructions + br $31, $simd_loop_end + + .align 3 +$simd_loop: + FIXUP_LDST( vstd $f1, 0($16) ) + FIXUP_LDST( vstd $f1, 32($16) ) + subl $18, 64, $18 + addl $16, 64, $16 + cmplt $18, 64, $1 + beq $1, $simd_loop + +$simd_loop_end: + cmplt $18, 32, $1 + bne $1, $no_more_simd + FIXUP_LDST( vstd $f1, 0($16) ) + subl $18, 32, $18 + addl $16, 32, $16 + +$no_more_simd: + RESTORE_SIMD_REGS + +$simd_end: + ble $18, $out + cmplt $18, 16, $1 + bne $1, $quad_loop_end + + .align 3 +$quad_loop_tail: + FIXUP_LDST( stl $17, 0($16) ) + FIXUP_LDST( stl $17, 8($16) ) + subl $18, 16, $18 + addl $16, 16, $16 + cmplt $18, 16, $1 + beq $1, $quad_loop_tail + +$quad_loop_end: + ble $18, $out + cmplt $18, 8, $1 + bne $1, $byte_loop_tail + +$move_one_quad: + FIXUP_LDST( stl $17, 0($16) ) + subl $18, 8, $18 + addl $16, 8, $16 + ble $18, $out + + .align 3 +$byte_loop_tail: + FIXUP_LDST( stb $17, 0($16) ) + subl $18, 1, $18 + addl $16, 1, $16 + bgt $18, $byte_loop_tail + br $31, $out diff --git a/arch/sw_64/lib/deep-set_template_c4.S b/arch/sw_64/lib/deep-set_template_c4.S new file mode 100644 index 000000000000..2b1bcab8fec9 --- /dev/null +++ b/arch/sw_64/lib/deep-set_template_c4.S @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * template for memset and clear_user with SIMD + * + * $7: SIMD status + * 0: not in simd loop + * 1: in simd loop + * $16: latest dest, clobbered + * $17: 8-byte data to set + * $18: bytes left to copy + * + */ + +#define SAVE_SIMD_REGS \ + ldi $sp, -0x40($sp); \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vstd $f1, 0($23); \ + ldi $7, 1 + +#define RESTORE_SIMD_REGS \ + vldd $f1, 0($23); \ + ldi $sp, 0x40($sp); \ + bis $31, $31, $7 + + ble $18, $out + + cmplt $18, 8, $1 + bne $1, $byte_loop_tail + cmplt $18, 16, $1 + bne $1, $quad_loop_end + cmplt $18, 32, $1 + bne $1, $simd_end + +$prep_simd_loop: + SAVE_SIMD_REGS + ifmovd $17, $f1 + vcpyf $f1, $f1 + cmplt $18, 64, $1 + bne $1, $simd_loop_end + + .align 3 +$simd_loop: + FIXUP_LDST( vstd $f1, 0($16) ) + FIXUP_LDST( vstd $f1, 32($16) ) + subl $18, 64, $18 + addl $16, 64, $16 + cmplt $18, 64, $1 + beq $1, $simd_loop + +$simd_loop_end: + cmplt $18, 32, $1 + bne $1, $no_more_simd + FIXUP_LDST( vstd $f1, 0($16) ) + subl $18, 32, $18 + addl $16, 32, $16 + +$no_more_simd: + RESTORE_SIMD_REGS + +$simd_end: + ble $18, $out + cmplt $18, 16, $1 + bne $1, $quad_loop_end + + .align 3 +$quad_loop_tail: + FIXUP_LDST( stl $17, 0($16) ) + FIXUP_LDST( stl $17, 8($16) ) + subl $18, 16, $18 + addl $16, 16, $16 + cmplt $18, 16, $1 + beq $1, $quad_loop_tail + +$quad_loop_end: + ble $18, $out + cmplt $18, 8, $1 + bne $1, $byte_loop_tail + +$move_one_quad: + FIXUP_LDST( stl $17, 0($16) ) + subl $18, 8, $18 + addl $16, 8, $16 + ble $18, $out + + .align 3 +$byte_loop_tail: + FIXUP_LDST( stb $17, 0($16) ) + subl $18, 1, $18 + addl $16, 1, $16 + bgt $18, $byte_loop_tail + br $31, $out diff --git a/arch/sw_64/lib/divide.S b/arch/sw_64/lib/divide.S new file mode 100644 index 000000000000..ceef343a6084 --- /dev/null +++ b/arch/sw_64/lib/divide.S @@ -0,0 +1,190 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * (C) 1995 Linus Torvalds + * + * The sw64 chip doesn't provide hardware division, so we have to do it + * by hand. The compiler expects the functions + * + * __divlu: 64-bit unsigned long divide + * __remlu: 64-bit unsigned long remainder + * __divls/__remqs: signed 64-bit + * __divwu/__remlu: unsigned 32-bit + * __divws/__remls: signed 32-bit + * + * These are not normal C functions: instead of the normal + * calling sequence, these expect their arguments in registers + * $24 and $25, and return the result in $27. Register $28 may + * be clobbered (assembly temporary), anything else must be saved. + * + * In short: painful. + * + * This is a rather simple bit-at-a-time algorithm: it's very good + * at dividing random 64-bit numbers, but the more usual case where + * the divisor is small is handled better by the DEC algorithm + * using lookup tables. This uses much less memory, though, and is + * nicer on the cache.. Besides, I don't know the copyright status + * of the DEC code. + */ + +/* + * My temporaries: + * $0 - current bit + * $1 - shifted divisor + * $2 - modulus/quotient + * + * $23 - return address + * $24 - dividend + * $25 - divisor + * + * $27 - quotient/modulus + * $28 - compare status + */ +#include + +#define halt .long 0 + +/* + * Select function type and registers + */ +#define mask $0 +#define divisor $1 +#define compare $28 +#define tmp1 $3 +#define tmp2 $4 + +#ifdef DIV +#define DIV_ONLY(x,y...) x, ##y +#define MOD_ONLY(x,y...) +#define func(x) __div##x +#define modulus $2 +#define quotient $27 +#define GETSIGN(x) xor $24, $25, x +#define STACK 48 +#else +#define DIV_ONLY(x,y...) +#define MOD_ONLY(x,y...) x, ##y +#define func(x) __rem##x +#define modulus $27 +#define quotient $2 +#define GETSIGN(x) bis $24, $24, x +#define STACK 32 +#endif + +/* + * For 32-bit operations, we need to extend to 64-bit + */ +#ifdef INTSIZE +#define ufunction func(wu) +#define sfunction func(w) +#define LONGIFY(x) zapnot x, 15, x +#define SLONGIFY(x) addw x, 0, x +#else +#define ufunction func(lu) +#define sfunction func(l) +#define LONGIFY(x) +#define SLONGIFY(x) +#endif + +.set noat +.align 3 +.globl ufunction +.ent ufunction +ufunction: + subl $30, STACK, $30 + .frame $30, STACK, $23 + .prologue 0 + +7: stl $1, 0($30) + bis $25, $25, divisor + stl $2, 8($30) + bis $24, $24, modulus + stl $0, 16($30) + bis $31, $31, quotient + LONGIFY(divisor) + stl tmp1, 24($30) + LONGIFY(modulus) + bis $31, 1, mask + DIV_ONLY(stl tmp2, 32($30)) + beq divisor, 9f # div by zero + +#ifdef INTSIZE + /* + * shift divisor left, using 3-bit shifts for + * 32-bit divides as we can't overflow. Three-bit + * shifts will result in looping three times less + * here, but can result in two loops more later. + * Thus using a large shift isn't worth it (and + * s8add pairs better than a sll..) + */ +1: cmpult divisor, modulus, compare + s8addl divisor, $31, divisor + s8addl mask, $31, mask + bne compare, 1b +#else +1: cmpult divisor, modulus, compare + blt divisor, 2f + addl divisor, divisor, divisor + addl mask, mask, mask + bne compare, 1b +#endif + + /* ok, start to go right again.. */ +2: DIV_ONLY(addl quotient, mask, tmp2) + srl mask, 1, mask + cmpule divisor, modulus, compare + subl modulus, divisor, tmp1 + DIV_ONLY(selne compare, tmp2, quotient, quotient) + srl divisor, 1, divisor + selne compare, tmp1, modulus, modulus + bne mask, 2b + +9: ldl $1, 0($30) + ldl $2, 8($30) + ldl $0, 16($30) + ldl tmp1, 24($30) + DIV_ONLY(ldl tmp2, 32($30)) + addl $30, STACK, $30 + ret $31, ($23), 1 + .end ufunction + EXPORT_SYMBOL(ufunction) +/* + * Uhh.. Ugly signed division. I'd rather not have it at all, but + * it's needed in some circumstances. There are different ways to + * handle this, really. This does: + * -a / b = a / -b = -(a / b) + * -a % b = -(a % b) + * a % -b = a % b + * which is probably not the best solution, but at least should + * have the property that (x/y)*y + (x%y) = x. + */ +.align 3 +.globl sfunction +.ent sfunction +sfunction: + subl $30, STACK, $30 + .frame $30, STACK, $23 + .prologue 0 + bis $24, $25, $28 + SLONGIFY($28) + bge $28, 7b + stl $24, 0($30) + subl $31, $24, $28 + stl $25, 8($30) + sellt $24, $28, $24, $24 # abs($24) + stl $23, 16($30) + subl $31, $25, $28 + stl tmp1, 24($30) + sellt $25, $28, $25, $25 # abs($25) + bsr $23, ufunction + ldl $24, 0($30) + ldl $25, 8($30) + GETSIGN($28) + subl $31, $27, tmp1 + SLONGIFY($28) + ldl $23, 16($30) + sellt $28, tmp1, $27, $27 + ldl tmp1, 24($30) + addl $30, STACK, $30 + ret $31, ($23), 1 + .end sfunction + EXPORT_SYMBOL(sfunction) diff --git a/arch/sw_64/lib/fls.c b/arch/sw_64/lib/fls.c new file mode 100644 index 000000000000..aa4231f7e472 --- /dev/null +++ b/arch/sw_64/lib/fls.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +/* This is fls(x)-1, except zero is held to zero. This allows most + * efficient input into extbl, plus it allows easy handling of fls(0)=0. + */ + +const unsigned char __flsm1_tab[256] = { + 0, + 0, + 1, 1, + 2, 2, 2, 2, + 3, 3, 3, 3, 3, 3, 3, 3, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, +}; +EXPORT_SYMBOL(__flsm1_tab); diff --git a/arch/sw_64/lib/fpreg.c b/arch/sw_64/lib/fpreg.c new file mode 100644 index 000000000000..178870310908 --- /dev/null +++ b/arch/sw_64/lib/fpreg.c @@ -0,0 +1,992 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) Copyright 1998 Linus Torvalds + */ + +#include +#include + +#define STT(reg, val) \ + asm volatile("fimovd $f"#reg", %0" : "=r"(val)) +#define STS(reg, val) \ + asm volatile("fimovs $f"#reg", %0" : "=r"(val)) +#define LDT(reg, val) \ + asm volatile("ifmovd %0, $f"#reg : : "r"(val)) +#define LDS(reg, val) \ + asm volatile("ifmovs %0, $f"#reg : : "r"(val)) +#define VLDD(reg, val) \ + asm volatile("vldd $f"#reg", %0" : : "m"(val) : "memory") +#define VSTD(reg, val) \ + asm volatile("vstd $f"#reg", %0" : "=m"(val) : : "memory") +#define VLDS(reg, val) \ + asm volatile("vlds $f"#reg", %0" : : "m"(val) : "memory") +#define LDWE(reg, val) \ + asm volatile("ldwe $f"#reg", %0" : : "m"(val) : "memory") +#define VSTS(reg, val) \ + asm volatile("vsts $f"#reg", %0" : "=m"(val) : : "memory") +#define STDH(reg, val) \ + asm volatile("vstd $f"#reg", %0" : "=m"(val) : : "memory") + +void +sw64_write_simd_fp_reg_s(unsigned long reg, unsigned long f0, unsigned long f1) +{ + + unsigned long tmpa[4] __aligned(16); + + tmpa[0] = f0; + tmpa[1] = f1; + + switch (reg) { + case 0: + VLDS(0, *tmpa); + break; + case 1: + VLDS(1, *tmpa); + break; + case 2: + VLDS(2, *tmpa); + break; + case 3: + VLDS(3, *tmpa); + break; + case 4: + VLDS(4, *tmpa); + break; + case 5: + VLDS(5, *tmpa); + break; + case 6: + VLDS(6, *tmpa); + break; + case 7: + VLDS(7, *tmpa); + break; + case 8: + VLDS(8, *tmpa); + break; + case 9: + VLDS(9, *tmpa); + break; + case 10: + VLDS(10, *tmpa); + break; + case 11: + VLDS(11, *tmpa); + break; + case 12: + VLDS(12, *tmpa); + break; + case 13: + VLDS(13, *tmpa); + break; + case 14: + VLDS(14, *tmpa); + break; + case 15: + VLDS(15, *tmpa); + break; + case 16: + VLDS(16, *tmpa); + break; + case 17: + VLDS(17, *tmpa); + break; + case 18: + VLDS(18, *tmpa); + break; + case 19: + VLDS(19, *tmpa); + break; + case 20: + VLDS(20, *tmpa); + break; + case 21: + VLDS(21, *tmpa); + break; + case 22: + VLDS(22, *tmpa); + break; + case 23: + VLDS(23, *tmpa); + break; + case 24: + VLDS(24, *tmpa); + break; + case 25: + VLDS(25, *tmpa); + break; + case 26: + VLDS(26, *tmpa); + break; + case 27: + VLDS(27, *tmpa); + break; + case 28: + VLDS(28, *tmpa); + break; + case 29: + VLDS(29, *tmpa); + break; + case 30: + VLDS(30, *tmpa); + break; + case 31: + break; + } + +} + + +void sw64_write_simd_fp_reg_d(unsigned long reg, unsigned long f0, + unsigned long f1, unsigned long f2, unsigned long f3) +{ + unsigned long tmpa[4] __aligned(32); + + tmpa[0] = f0; + tmpa[1] = f1; + tmpa[2] = f2; + tmpa[3] = f3; + + switch (reg) { + case 0: + VLDD(0, *tmpa); + break; + case 1: + VLDD(1, *tmpa); + break; + case 2: + VLDD(2, *tmpa); + break; + case 3: + VLDD(3, *tmpa); + break; + case 4: + VLDD(4, *tmpa); + break; + case 5: + VLDD(5, *tmpa); + break; + case 6: + VLDD(6, *tmpa); + break; + case 7: + VLDD(7, *tmpa); + break; + case 8: + VLDD(8, *tmpa); + break; + case 9: + VLDD(9, *tmpa); + break; + case 10: + VLDD(10, *tmpa); + break; + case 11: + VLDD(11, *tmpa); + break; + case 12: + VLDD(12, *tmpa); + break; + case 13: + VLDD(13, *tmpa); + break; + case 14: + VLDD(14, *tmpa); + break; + case 15: + VLDD(15, *tmpa); + break; + case 16: + VLDD(16, *tmpa); + break; + case 17: + VLDD(17, *tmpa); + break; + case 18: + VLDD(18, *tmpa); + break; + case 19: + VLDD(19, *tmpa); + break; + case 20: + VLDD(20, *tmpa); + break; + case 21: + VLDD(21, *tmpa); + break; + case 22: + VLDD(22, *tmpa); + break; + case 23: + VLDD(23, *tmpa); + break; + case 24: + VLDD(24, *tmpa); + break; + case 25: + VLDD(25, *tmpa); + break; + case 26: + VLDD(26, *tmpa); + break; + case 27: + VLDD(27, *tmpa); + break; + case 28: + VLDD(28, *tmpa); + break; + case 29: + VLDD(29, *tmpa); + break; + case 30: + VLDD(30, *tmpa); + break; + case 31: + break; + } + + +} + + +void sw64_write_simd_fp_reg_ldwe(unsigned long reg, int a) +{ + switch (reg) { + case 0: + LDWE(0, a); + break; + case 1: + LDWE(1, a); + break; + case 2: + LDWE(2, a); + break; + case 3: + LDWE(3, a); + break; + case 4: + LDWE(4, a); + break; + case 5: + LDWE(5, a); + break; + case 6: + LDWE(6, a); + break; + case 7: + LDWE(7, a); + break; + case 8: + LDWE(8, a); + break; + case 9: + LDWE(9, a); + break; + case 10: + LDWE(10, a); + break; + case 11: + LDWE(11, a); + break; + case 12: + LDWE(12, a); + break; + case 13: + LDWE(13, a); + break; + case 14: + LDWE(14, a); + break; + case 15: + LDWE(15, a); + break; + case 16: + LDWE(16, a); + break; + case 17: + LDWE(17, a); + break; + case 18: + LDWE(18, a); + break; + case 19: + LDWE(19, a); + break; + case 20: + LDWE(20, a); + break; + case 21: + LDWE(21, a); + break; + case 22: + LDWE(22, a); + break; + case 23: + LDWE(23, a); + break; + case 24: + LDWE(24, a); + break; + case 25: + LDWE(25, a); + break; + case 26: + LDWE(26, a); + break; + case 27: + LDWE(27, a); + break; + case 28: + LDWE(28, a); + break; + case 29: + LDWE(29, a); + break; + case 30: + LDWE(30, a); + break; + case 31: + break; + } +} + + +void sw64_read_simd_fp_m_s(unsigned long reg, unsigned long *fp_value) +{ + volatile unsigned long tmpa[2] __aligned(16); + + switch (reg) { + case 0: + VSTS(0, *tmpa); + break; + case 1: + VSTS(1, *tmpa); + break; + case 2: + VSTS(2, *tmpa); + break; + case 3: + VSTS(3, *tmpa); + break; + case 4: + VSTS(4, *tmpa); + break; + case 5: + VSTS(5, *tmpa); + break; + case 6: + VSTS(6, *tmpa); + break; + case 7: + VSTS(7, *tmpa); + break; + case 8: + VSTS(8, *tmpa); + break; + case 9: + VSTS(9, *tmpa); + break; + case 10: + VSTS(10, *tmpa); + break; + case 11: + VSTS(11, *tmpa); + break; + case 12: + VSTS(12, *tmpa); + break; + case 13: + VSTS(13, *tmpa); + break; + case 14: + VSTS(14, *tmpa); + break; + case 15: + VSTS(15, *tmpa); + break; + case 16: + VSTS(16, *tmpa); + break; + case 17: + VSTS(17, *tmpa); + break; + case 18: + VSTS(18, *tmpa); + break; + case 19: + VSTS(19, *tmpa); + break; + case 20: + VSTS(20, *tmpa); + break; + case 21: + VSTS(21, *tmpa); + break; + case 22: + VSTS(22, *tmpa); + break; + case 23: + VSTS(23, *tmpa); + break; + case 24: + VSTS(24, *tmpa); + break; + case 25: + VSTS(25, *tmpa); + break; + case 26: + VSTS(26, *tmpa); + break; + case 27: + VSTS(27, *tmpa); + break; + case 28: + VSTS(28, *tmpa); + break; + case 29: + VSTS(29, *tmpa); + break; + case 30: + VSTS(30, *tmpa); + break; + case 31: + VSTS(31, *tmpa); + break; + } + + *fp_value = tmpa[0]; + *(fp_value+1) = tmpa[1]; +} + +void sw64_read_simd_fp_m_d(unsigned long reg, unsigned long *fp_value) +{ + volatile unsigned long tmpa[4] __aligned(32); + + switch (reg) { + case 0: + VSTD(0, *tmpa); + break; + case 1: + VSTD(1, *tmpa); + break; + case 2: + VSTD(2, *tmpa); + break; + case 3: + VSTD(3, *tmpa); + break; + case 4: + VSTD(4, *tmpa); + break; + case 5: + VSTD(5, *tmpa); + break; + case 6: + VSTD(6, *tmpa); + break; + case 7: + VSTD(7, *tmpa); + break; + case 8: + VSTD(8, *tmpa); + break; + case 9: + VSTD(9, *tmpa); + break; + case 10: + VSTD(10, *tmpa); + break; + case 11: + VSTD(11, *tmpa); + break; + case 12: + VSTD(12, *tmpa); + break; + case 13: + VSTD(13, *tmpa); + break; + case 14: + VSTD(14, *tmpa); + break; + case 15: + VSTD(15, *tmpa); + break; + case 16: + VSTD(16, *tmpa); + break; + case 17: + VSTD(17, *tmpa); + break; + case 18: + VSTD(18, *tmpa); + break; + case 19: + VSTD(19, *tmpa); + break; + case 20: + VSTD(20, *tmpa); + break; + case 21: + VSTD(21, *tmpa); + break; + case 22: + VSTD(22, *tmpa); + break; + case 23: + VSTD(23, *tmpa); + break; + case 24: + VSTD(24, *tmpa); + break; + case 25: + VSTD(25, *tmpa); + break; + case 26: + VSTD(26, *tmpa); + break; + case 27: + VSTD(27, *tmpa); + break; + case 28: + VSTD(28, *tmpa); + break; + case 29: + VSTD(29, *tmpa); + break; + case 30: + VSTD(30, *tmpa); + break; + case 31: + VSTD(31, *tmpa); + break; + } + + *fp_value = tmpa[0]; + *(fp_value+1) = tmpa[1]; + *(fp_value+2) = tmpa[2]; + *(fp_value+3) = tmpa[3]; +} + +unsigned long sw64_read_fp_reg(unsigned long reg) +{ + unsigned long val; + + switch (reg) { + case 0: + STT(0, val); + break; + case 1: + STT(1, val); + break; + case 2: + STT(2, val); + break; + case 3: + STT(3, val); + break; + case 4: + STT(4, val); + break; + case 5: + STT(5, val); + break; + case 6: + STT(6, val); + break; + case 7: + STT(7, val); + break; + case 8: + STT(8, val); + break; + case 9: + STT(9, val); + break; + case 10: + STT(10, val); + break; + case 11: + STT(11, val); + break; + case 12: + STT(12, val); + break; + case 13: + STT(13, val); + break; + case 14: + STT(14, val); + break; + case 15: + STT(15, val); + break; + case 16: + STT(16, val); + break; + case 17: + STT(17, val); + break; + case 18: + STT(18, val); + break; + case 19: + STT(19, val); + break; + case 20: + STT(20, val); + break; + case 21: + STT(21, val); + break; + case 22: + STT(22, val); + break; + case 23: + STT(23, val); + break; + case 24: + STT(24, val); + break; + case 25: + STT(25, val); + break; + case 26: + STT(26, val); + break; + case 27: + STT(27, val); + break; + case 28: + STT(28, val); + break; + case 29: + STT(29, val); + break; + case 30: + STT(30, val); + break; + case 31: + STT(31, val); + break; + default: + return 0; + } + + return val; +} +EXPORT_SYMBOL(sw64_read_fp_reg); + +void sw64_write_fp_reg(unsigned long reg, unsigned long val) +{ + switch (reg) { + case 0: + LDT(0, val); + break; + case 1: + LDT(1, val); + break; + case 2: + LDT(2, val); + break; + case 3: + LDT(3, val); + break; + case 4: + LDT(4, val); + break; + case 5: + LDT(5, val); + break; + case 6: + LDT(6, val); + break; + case 7: + LDT(7, val); + break; + case 8: + LDT(8, val); + break; + case 9: + LDT(9, val); + break; + case 10: + LDT(10, val); + break; + case 11: + LDT(11, val); + break; + case 12: + LDT(12, val); + break; + case 13: + LDT(13, val); + break; + case 14: + LDT(14, val); + break; + case 15: + LDT(15, val); + break; + case 16: + LDT(16, val); + break; + case 17: + LDT(17, val); + break; + case 18: + LDT(18, val); + break; + case 19: + LDT(19, val); + break; + case 20: + LDT(20, val); + break; + case 21: + LDT(21, val); + break; + case 22: + LDT(22, val); + break; + case 23: + LDT(23, val); + break; + case 24: + LDT(24, val); + break; + case 25: + LDT(25, val); + break; + case 26: + LDT(26, val); + break; + case 27: + LDT(27, val); + break; + case 28: + LDT(28, val); + break; + case 29: + LDT(29, val); + break; + case 30: + LDT(30, val); + break; + case 31: + LDT(31, val); + break; + } +} +EXPORT_SYMBOL(sw64_write_fp_reg); + +unsigned long sw64_read_fp_reg_s(unsigned long reg) +{ + unsigned long val; + + switch (reg) { + case 0: + STS(0, val); + break; + case 1: + STS(1, val); + break; + case 2: + STS(2, val); + break; + case 3: + STS(3, val); + break; + case 4: + STS(4, val); + break; + case 5: + STS(5, val); + break; + case 6: + STS(6, val); + break; + case 7: + STS(7, val); + break; + case 8: + STS(8, val); + break; + case 9: + STS(9, val); + break; + case 10: + STS(10, val); + break; + case 11: + STS(11, val); + break; + case 12: + STS(12, val); + break; + case 13: + STS(13, val); + break; + case 14: + STS(14, val); + break; + case 15: + STS(15, val); + break; + case 16: + STS(16, val); + break; + case 17: + STS(17, val); + break; + case 18: + STS(18, val); + break; + case 19: + STS(19, val); + break; + case 20: + STS(20, val); + break; + case 21: + STS(21, val); + break; + case 22: + STS(22, val); + break; + case 23: + STS(23, val); + break; + case 24: + STS(24, val); + break; + case 25: + STS(25, val); + break; + case 26: + STS(26, val); + break; + case 27: + STS(27, val); + break; + case 28: + STS(28, val); + break; + case 29: + STS(29, val); + break; + case 30: + STS(30, val); + break; + case 31: + STS(31, val); + break; + default: + return 0; + } + + return val; +} +EXPORT_SYMBOL(sw64_read_fp_reg_s); + +void sw64_write_fp_reg_s(unsigned long reg, unsigned long val) +{ + switch (reg) { + case 0: + LDS(0, val); + break; + case 1: + LDS(1, val); + break; + case 2: + LDS(2, val); + break; + case 3: + LDS(3, val); + break; + case 4: + LDS(4, val); + break; + case 5: + LDS(5, val); + break; + case 6: + LDS(6, val); + break; + case 7: + LDS(7, val); + break; + case 8: + LDS(8, val); + break; + case 9: + LDS(9, val); + break; + case 10: + LDS(10, val); + break; + case 11: + LDS(11, val); + break; + case 12: + LDS(12, val); + break; + case 13: + LDS(13, val); + break; + case 14: + LDS(14, val); + break; + case 15: + LDS(15, val); + break; + case 16: + LDS(16, val); + break; + case 17: + LDS(17, val); + break; + case 18: + LDS(18, val); + break; + case 19: + LDS(19, val); + break; + case 20: + LDS(20, val); + break; + case 21: + LDS(21, val); + break; + case 22: + LDS(22, val); + break; + case 23: + LDS(23, val); + break; + case 24: + LDS(24, val); + break; + case 25: + LDS(25, val); + break; + case 26: + LDS(26, val); + break; + case 27: + LDS(27, val); + break; + case 28: + LDS(28, val); + break; + case 29: + LDS(29, val); + break; + case 30: + LDS(30, val); + break; + case 31: + LDS(31, val); + break; + } +} +EXPORT_SYMBOL(sw64_write_fp_reg_s); diff --git a/arch/sw_64/lib/iomap.c b/arch/sw_64/lib/iomap.c new file mode 100644 index 000000000000..d9c66a89131e --- /dev/null +++ b/arch/sw_64/lib/iomap.c @@ -0,0 +1,477 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Sw_64 IO and memory functions. + */ + +#include + +#include +#include + +/* + * Here comes the sw64 implementation of the IOMAP interfaces. + */ +unsigned int ioread8(const void __iomem *addr) +{ + return readb(addr); +} +EXPORT_SYMBOL(ioread8); + +unsigned int ioread16(const void __iomem *addr) +{ + return readw(addr); +} +EXPORT_SYMBOL(ioread16); + +unsigned int ioread32(const void __iomem *addr) +{ + return readl(addr); +} +EXPORT_SYMBOL(ioread32); + +void iowrite8(u8 b, void __iomem *addr) +{ + writeb(b, addr); +} +EXPORT_SYMBOL(iowrite8); + +void iowrite16(u16 b, void __iomem *addr) +{ + writew(b, addr); +} +EXPORT_SYMBOL(iowrite16); + +void iowrite32(u32 b, void __iomem *addr) +{ + writel(b, addr); +} +EXPORT_SYMBOL(iowrite32); + +u8 inb(unsigned long port) +{ + return ioread8(ioport_map(port, 1)); +} +EXPORT_SYMBOL(inb); + +u16 inw(unsigned long port) +{ + return ioread16(ioport_map(port, 2)); +} +EXPORT_SYMBOL(inw); + +u32 inl(unsigned long port) +{ + return ioread32(ioport_map(port, 4)); +} +EXPORT_SYMBOL(inl); + +void outb(u8 b, unsigned long port) +{ + iowrite8(b, ioport_map(port, 1)); +} +EXPORT_SYMBOL(outb); + +void outw(u16 b, unsigned long port) +{ + iowrite16(b, ioport_map(port, 2)); +} +EXPORT_SYMBOL(outw); + +void outl(u32 b, unsigned long port) +{ + iowrite32(b, ioport_map(port, 4)); +} +EXPORT_SYMBOL(outl); + + +/* + * Read COUNT 8-bit bytes from port PORT into memory starting at SRC. + */ +void ioread8_rep(const void __iomem *port, void *dst, unsigned long count) +{ + while ((unsigned long)dst & 0x3) { + if (!count) + return; + count--; + *(unsigned char *)dst = ioread8(port); + dst += 1; + } + + while (count >= 4) { + unsigned int w; + + count -= 4; + w = ioread8(port); + w |= ioread8(port) << 8; + w |= ioread8(port) << 16; + w |= ioread8(port) << 24; + *(unsigned int *)dst = w; + dst += 4; + } + + while (count) { + --count; + *(unsigned char *)dst = ioread8(port); + dst += 1; + } +} +EXPORT_SYMBOL(ioread8_rep); + +void insb(unsigned long port, void *dst, unsigned long count) +{ + ioread8_rep(ioport_map(port, 1), dst, count); +} +EXPORT_SYMBOL(insb); + +/* + * Read COUNT 16-bit words from port PORT into memory starting at + * SRC. SRC must be at least short aligned. This is used by the + * IDE driver to read disk sectors. Performance is important, but + * the interfaces seems to be slow: just using the inlined version + * of the inw() breaks things. + */ +void ioread16_rep(const void __iomem *port, void *dst, unsigned long count) +{ + if (unlikely((unsigned long)dst & 0x3)) { + if (!count) + return; + BUG_ON((unsigned long)dst & 0x1); + count--; + *(unsigned short *)dst = ioread16(port); + dst += 2; + } + + while (count >= 2) { + unsigned int w; + + count -= 2; + w = ioread16(port); + w |= ioread16(port) << 16; + *(unsigned int *)dst = w; + dst += 4; + } + + if (count) + *(unsigned short *)dst = ioread16(port); +} +EXPORT_SYMBOL(ioread16_rep); + +void insw(unsigned long port, void *dst, unsigned long count) +{ + ioread16_rep(ioport_map(port, 2), dst, count); +} +EXPORT_SYMBOL(insw); + + +/* + * Read COUNT 32-bit words from port PORT into memory starting at + * SRC. Now works with any alignment in SRC. Performance is important, + * but the interfaces seems to be slow: just using the inlined version + * of the inl() breaks things. + */ +void ioread32_rep(const void __iomem *port, void *dst, unsigned long count) +{ + if (unlikely((unsigned long)dst & 0x3)) { + while (count--) { + struct S { int x __packed; }; + ((struct S *)dst)->x = ioread32(port); + dst += 4; + } + } else { + /* Buffer 32-bit aligned. */ + while (count--) { + *(unsigned int *)dst = ioread32(port); + dst += 4; + } + } +} +EXPORT_SYMBOL(ioread32_rep); + +void insl(unsigned long port, void *dst, unsigned long count) +{ + ioread32_rep(ioport_map(port, 4), dst, count); +} +EXPORT_SYMBOL(insl); + + +/* + * Like insb but in the opposite direction. + * Don't worry as much about doing aligned memory transfers: + * doing byte reads the "slow" way isn't nearly as slow as + * doing byte writes the slow way (no r-m-w cycle). + */ +void iowrite8_rep(void __iomem *port, const void *xsrc, unsigned long count) +{ + const unsigned char *src = xsrc; + + while (count--) + iowrite8(*src++, port); +} +EXPORT_SYMBOL(iowrite8_rep); + +void outsb(unsigned long port, const void *src, unsigned long count) +{ + iowrite8_rep(ioport_map(port, 1), src, count); +} +EXPORT_SYMBOL(outsb); + + +/* + * Like insw but in the opposite direction. This is used by the IDE + * driver to write disk sectors. Performance is important, but the + * interfaces seems to be slow: just using the inlined version of the + * outw() breaks things. + */ +void iowrite16_rep(void __iomem *port, const void *src, unsigned long count) +{ + if (unlikely((unsigned long)src & 0x3)) { + if (!count) + return; + BUG_ON((unsigned long)src & 0x1); + iowrite16(*(unsigned short *)src, port); + src += 2; + --count; + } + + while (count >= 2) { + unsigned int w; + + count -= 2; + w = *(unsigned int *)src; + src += 4; + iowrite16(w >> 0, port); + iowrite16(w >> 16, port); + } + + if (count) + iowrite16(*(unsigned short *)src, port); +} +EXPORT_SYMBOL(iowrite16_rep); + +void outsw(unsigned long port, const void *src, unsigned long count) +{ + iowrite16_rep(ioport_map(port, 2), src, count); +} +EXPORT_SYMBOL(outsw); + + +/* + * Like insl but in the opposite direction. This is used by the IDE + * driver to write disk sectors. Works with any alignment in SRC. + * Performance is important, but the interfaces seems to be slow: + * just using the inlined version of the outl() breaks things. + */ +void iowrite32_rep(void __iomem *port, const void *src, unsigned long count) +{ + if (unlikely((unsigned long)src & 0x3)) { + while (count--) { + struct S { int x __packed; }; + iowrite32(((struct S *)src)->x, port); + src += 4; + } + } else { + /* Buffer 32-bit aligned. */ + while (count--) { + iowrite32(*(unsigned int *)src, port); + src += 4; + } + } +} +EXPORT_SYMBOL(iowrite32_rep); + +void outsl(unsigned long port, const void *src, unsigned long count) +{ + iowrite32_rep(ioport_map(port, 4), src, count); +} +EXPORT_SYMBOL(outsl); + + +/* + * Copy data from IO memory space to "real" memory space. + * This needs to be optimized. + */ +void memcpy_fromio(void *to, const volatile void __iomem *from, long count) +{ + /* + * Optimize co-aligned transfers. Everything else gets handled + * a byte at a time. + */ + + if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) { + count -= 8; + do { + *(u64 *)to = __raw_readq(from); + count -= 8; + to += 8; + from += 8; + } while (count >= 0); + count += 8; + } + + if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) { + count -= 4; + do { + *(u32 *)to = __raw_readl(from); + count -= 4; + to += 4; + from += 4; + } while (count >= 0); + count += 4; + } + + if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) { + count -= 2; + do { + *(u16 *)to = __raw_readw(from); + count -= 2; + to += 2; + from += 2; + } while (count >= 0); + count += 2; + } + + while (count > 0) { + *(u8 *) to = __raw_readb(from); + count--; + to++; + from++; + } + mb(); +} +EXPORT_SYMBOL(memcpy_fromio); + + +/* + * Copy data from "real" memory space to IO memory space. + * This needs to be optimized. + */ +void memcpy_toio(volatile void __iomem *to, const void *from, long count) +{ + /* + * Optimize co-aligned transfers. Everything else gets handled + * a byte at a time. + * FIXME -- align FROM. + */ + + if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) { + count -= 8; + do { + __raw_writeq(*(const u64 *)from, to); + count -= 8; + to += 8; + from += 8; + } while (count >= 0); + count += 8; + } + + if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) { + count -= 4; + do { + __raw_writel(*(const u32 *)from, to); + count -= 4; + to += 4; + from += 4; + } while (count >= 0); + count += 4; + } + + if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) { + count -= 2; + do { + __raw_writew(*(const u16 *)from, to); + count -= 2; + to += 2; + from += 2; + } while (count >= 0); + count += 2; + } + + while (count > 0) { + __raw_writeb(*(const u8 *) from, to); + count--; + to++; + from++; + } + mb(); +} +EXPORT_SYMBOL(memcpy_toio); + + +/* + * "memset" on IO memory space. + */ +void _memset_c_io(volatile void __iomem *to, unsigned long c, long count) +{ + /* Handle any initial odd byte */ + if (count > 0 && ((u64)to & 1)) { + __raw_writeb(c, to); + to++; + count--; + } + + /* Handle any initial odd halfword */ + if (count >= 2 && ((u64)to & 2)) { + __raw_writew(c, to); + to += 2; + count -= 2; + } + + /* Handle any initial odd word */ + if (count >= 4 && ((u64)to & 4)) { + __raw_writel(c, to); + to += 4; + count -= 4; + } + + /* + * Handle all full-sized quadwords: we're aligned + * (or have a small count) + */ + count -= 8; + if (count >= 0) { + do { + __raw_writeq(c, to); + to += 8; + count -= 8; + } while (count >= 0); + } + count += 8; + + /* The tail is word-aligned if we still have count >= 4 */ + if (count >= 4) { + __raw_writel(c, to); + to += 4; + count -= 4; + } + + /* The tail is half-word aligned if we have count >= 2 */ + if (count >= 2) { + __raw_writew(c, to); + to += 2; + count -= 2; + } + + /* And finally, one last byte.. */ + if (count) + __raw_writeb(c, to); + mb(); +} +EXPORT_SYMBOL(_memset_c_io); + +void __iomem *ioport_map(unsigned long port, unsigned int size) +{ + unsigned long io_offset; + + if (port < 0x100000) { + io_offset = is_in_host() ? LPC_LEGACY_IO : PCI_VT_LEGACY_IO; + port = port | io_offset; + } + + return __va(port); +} +EXPORT_SYMBOL(ioport_map); + +void ioport_unmap(void __iomem *addr) +{ +} +EXPORT_SYMBOL(ioport_unmap); diff --git a/arch/sw_64/lib/iomap_copy.c b/arch/sw_64/lib/iomap_copy.c new file mode 100644 index 000000000000..1c75bd602d7e --- /dev/null +++ b/arch/sw_64/lib/iomap_copy.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +/** + * __iowrite32_copy - copy data to MMIO space, in 32-bit units + * @to: destination, in MMIO space (must be 32-bit aligned) + * @from: source (must be 32-bit aligned) + * @count: number of 32-bit quantities to copy + * + * Copy data from kernel space to MMIO space, in units of 32 bits at a + * time. Order of access is not guaranteed, nor is a memory barrier + * performed afterwards. + */ +void __iowrite32_copy(void __iomem *to, + const void *from, + size_t count) +{ + u32 __iomem *dst = to; + const u32 *src = from; + const u32 *end = src + count; + + while (src < end) { + __raw_writel(*src++, dst++); + mb(); + } + +} + +/** + * __iowrite64_copy - copy data to MMIO space, in 64-bit or 32-bit units + * @to: destination, in MMIO space (must be 64-bit aligned) + * @from: source (must be 64-bit aligned) + * @count: number of 64-bit quantities to copy + * + * Copy data from kernel space to MMIO space, in units of 32 or 64 bits at a + * time. Order of access is not guaranteed, nor is a memory barrier + * performed afterwards. + */ +void __iowrite64_copy(void __iomem *to, + const void *from, + size_t count) +{ + u64 __iomem *dst = to; + const u64 *src = from; + const u64 *end = src + count; + + while (src < end) { + __raw_writeq(*src++, dst++); + mb(); + } +} diff --git a/arch/sw_64/lib/memcpy.S b/arch/sw_64/lib/memcpy.S new file mode 100644 index 000000000000..31c422b393ee --- /dev/null +++ b/arch/sw_64/lib/memcpy.S @@ -0,0 +1,201 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Reasonably optimized memcpy() routine for the sw64 + * + * - memory accessed as aligned quadwords only + * - uses bcmpge to compare 8 bytes in parallel + * + * Temp usage notes: + * $1, $2, - scratch + */ +#include + .set noreorder + .set noat + + .align 4 + .globl memcpy + .ent memcpy +memcpy: + .frame $30, 0, $26, 0 + .prologue 0 + + mov $16, $0 + ble $18, $nomoredata + xor $16, $17, $1 + and $1, 7, $1 + + bne $1, $misaligned + /* source and dest are same mod 8 address */ + and $16, 7, $1 + beq $1, $both_0mod8 + + /* + * source and dest are same misalignment. move a byte at a time + * until a 0mod8 alignment for both is reached. + * At least one byte more to move + */ + +$head_align: + ldbu $1, 0($17) + subl $18, 1, $18 + addl $17, 1, $17 + stb $1, 0($16) + addl $16, 1, $16 + and $16, 7, $1 + ble $18, $nomoredata + bne $1, $head_align + +$both_0mod8: + cmple $18, 127, $1 + bne $1, $no_unroll + and $16, 63, $1 + beq $1, $do_unroll + +$single_head_quad: + ldl $1, 0($17) + subl $18, 8, $18 + addl $17, 8, $17 + + stl $1, 0($16) + addl $16, 8, $16 + and $16, 63, $1 + bne $1, $single_head_quad + +$do_unroll: + addl $16, 64, $7 + cmple $18, 127, $1 + bne $1, $tail_quads + +$unroll_body: + #wh64 ($7) + fillde 0($7) + + ldl $6, 0($17) + + ldl $4, 8($17) + ldl $5, 16($17) + addl $7, 64, $7 + + ldl $3, 24($17) + addl $16, 64, $1 + + addl $17, 32, $17 + stl $6, 0($16) + + stl $4, 8($16) + stl $5, 16($16) + subl $18, 192, $2 + + stl $3, 24($16) + addl $16, 32, $16 + + ldl $6, 0($17) + ldl $4, 8($17) + #cmovlt $2, $1, $7 + sellt $2, $1, $7, $7 + + ldl $5, 16($17) + ldl $3, 24($17) + addl $16, 32, $16 + subl $18, 64, $18 + + addl $17, 32, $17 + stl $6, -32($16) + stl $4, -24($16) + cmple $18, 63, $1 + + stl $5, -16($16) + stl $3, -8($16) + beq $1, $unroll_body + +$tail_quads: +$no_unroll: + .align 4 + subl $18, 8, $18 + blt $18, $less_than_8 + +$move_a_quad: + ldl $1, 0($17) + subl $18, 8, $18 + addl $17, 8, $17 + + stl $1, 0($16) + addl $16, 8, $16 + bge $18, $move_a_quad + +$less_than_8: + .align 4 + addl $18, 8, $18 + ble $18, $nomoredata + + /* Trailing bytes */ +$tail_bytes: + subl $18, 1, $18 + ldbu $1, 0($17) + addl $17, 1, $17 + + stb $1, 0($16) + addl $16, 1, $16 + bgt $18, $tail_bytes + + /* branching to exit takes 3 extra cycles, so replicate exit here */ + ret $31, ($26), 1 + +$misaligned: + mov $0, $4 + and $0, 7, $1 + beq $1, $dest_0mod8 + +$aligndest: + ble $18, $nomoredata + ldbu $1, 0($17) + subl $18, 1, $18 + addl $17, 1, $17 + + stb $1, 0($4) + addl $4, 1, $4 + and $4, 7, $1 + bne $1, $aligndest + + /* Source has unknown alignment, but dest is known to be 0mod8 */ +$dest_0mod8: + subl $18, 8, $18 + blt $18, $misalign_tail + ldl_u $3, 0($17) + +$mis_quad: + ldl_u $16, 8($17) + extll $3, $17, $3 + exthl $16, $17, $1 + bis $3, $1, $1 + + subl $18, 8, $18 + addl $17, 8, $17 + stl $1, 0($4) + mov $16, $3 + + addl $4, 8, $4 + bge $18, $mis_quad + +$misalign_tail: + addl $18, 8, $18 + ble $18, $nomoredata + +$misalign_byte: + ldbu $1, 0($17) + subl $18, 1, $18 + addl $17, 1, $17 + + stb $1, 0($4) + addl $4, 1, $4 + bgt $18, $misalign_byte + + +$nomoredata: + ret $31, ($26), 1 + + .end memcpy + EXPORT_SYMBOL(memcpy) +/* For backwards module compatibility. */ +__memcpy = memcpy +.globl __memcpy diff --git a/arch/sw_64/lib/memmove.S b/arch/sw_64/lib/memmove.S new file mode 100644 index 000000000000..3e34fcd5b217 --- /dev/null +++ b/arch/sw_64/lib/memmove.S @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Barely optimized memmove routine for sw64. + * This is hand-massaged output from the original memcpy.c. We defer to + * memcpy whenever possible; the backwards copy loops are not unrolled. + */ +#include + .set noat + .set noreorder + .text + + .align 4 + .globl memmove + .ent memmove +memmove: + ldgp $29, 0($27) + unop + .prologue 1 + + addl $16, $18, $4 + addl $17, $18, $5 + cmpule $4, $17, $1 # dest + n <= src + cmpule $5, $16, $2 # dest >= src + n + + bis $1, $2, $1 + mov $16, $0 + xor $16, $17, $2 + bne $1, memcpy # samegp + + and $2, 7, $2 # Test for src/dest co-alignment. + and $16, 7, $1 + cmpule $16, $17, $3 + bne $3, $memmove_up # dest < src + + and $4, 7, $1 + bne $2, $misaligned_dn + unop + beq $1, $skip_aligned_byte_loop_head_dn + +$aligned_byte_loop_head_dn: + ldi $4, -1($4) + ldi $5, -1($5) + unop + ble $18, $egress + + ldbu $1, 0($5) + ldi $18, -1($18) + stb $1, 0($4) + + and $4, 7, $6 + bne $6, $aligned_byte_loop_head_dn + +$skip_aligned_byte_loop_head_dn: + ldi $18, -8($18) + blt $18, $skip_aligned_word_loop_dn + +$aligned_word_loop_dn: + ldl $1, -8($5) + ldi $5, -8($5) + ldi $18, -8($18) + + stl $1, -8($4) + ldi $4, -8($4) + bge $18, $aligned_word_loop_dn + +$skip_aligned_word_loop_dn: + ldi $18, 8($18) + bgt $18, $byte_loop_tail_dn + unop + ret $31, ($26), 1 + + .align 4 +$misaligned_dn: + fnop + unop + beq $18, $egress + +$byte_loop_tail_dn: + ldbu $1, -1($5) + ldi $5, -1($5) + ldi $4, -1($4) + + ldi $18, -1($18) + stb $1, 0($4) + + bgt $18, $byte_loop_tail_dn + br $egress + +$memmove_up: + mov $16, $4 + mov $17, $5 + bne $2, $misaligned_up + beq $1, $skip_aligned_byte_loop_head_up + +$aligned_byte_loop_head_up: + unop + ble $18, $egress + ldbu $1, 0($5) + + ldi $18, -1($18) + + ldi $5, 1($5) + stb $1, 0($4) + ldi $4, 1($4) + + and $4, 7, $6 + bne $6, $aligned_byte_loop_head_up + +$skip_aligned_byte_loop_head_up: + ldi $18, -8($18) + blt $18, $skip_aligned_word_loop_up + +$aligned_word_loop_up: + ldl $1, 0($5) + ldi $5, 8($5) + ldi $18, -8($18) + + stl $1, 0($4) + ldi $4, 8($4) + bge $18, $aligned_word_loop_up + +$skip_aligned_word_loop_up: + ldi $18, 8($18) + bgt $18, $byte_loop_tail_up + unop + ret $31, ($26), 1 + + .align 4 +$misaligned_up: + fnop + unop + beq $18, $egress + +$byte_loop_tail_up: + ldbu $1, 0($5) + ldi $18, -1($18) + + stb $1, 0($4) + + ldi $5, 1($5) + ldi $4, 1($4) + bgt $18, $byte_loop_tail_up + +$egress: + ret $31, ($26), 1 + + .end memmove + EXPORT_SYMBOL(memmove) diff --git a/arch/sw_64/lib/memset.S b/arch/sw_64/lib/memset.S new file mode 100644 index 000000000000..dbc4d775c7ea --- /dev/null +++ b/arch/sw_64/lib/memset.S @@ -0,0 +1,153 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This is an efficient (and small) implementation of the C library "memset()" + * function for the sw. + * + * (C) Copyright 1996 Linus Torvalds + * + * This routine is "moral-ware": you are free to use it any way you wish, and + * the only obligation I put on you is a moral one: if you make any improvements + * to the routine, please send me your improvements for me to use similarly. + * + * The scheduling comments are according to the documentation (and done by + * hand, so they might well be incorrect, please do tell me about it..) + */ + +#include + + .set noat + .set noreorder +.text + .globl memset + .globl __memset + .globl ___memset + .globl __memsetw + .globl __constant_c_memset + + .ent ___memset +.align 5 +___memset: + .frame $30, 0, $26, 0 + .prologue 0 + + and $17, 255, $1 + inslb $17, 1, $17 + bis $17, $1, $17 + sll $17, 16, $1 + + bis $17, $1, $17 + sll $17, 32, $1 + bis $17, $1, $17 + ldl_u $31, 0($30) + +.align 5 +__constant_c_memset: + addl $18, $16, $6 + bis $16, $16, $0 + xor $16, $6, $1 + ble $18, end + + bic $1, 7, $1 + beq $1, within_one_quad + and $16, 7, $3 + beq $3, aligned + + bis $16, $16, $5 + subl $3, 8, $3 + addl $18, $3, $18 + subl $16, $3, $16 + + eqv $3, $31, $3 + addl $3, 1, $3 +unaligned_start_loop: + stb $17, 0($5) + subl $3, 1, $3 + addl $5, 1, $5 + bgt $3, unaligned_start_loop + + +.align 4 +aligned: + sra $18, 3, $3 + and $18, 7, $18 + bis $16, $16, $5 + beq $3, no_quad + +/*added by JJ*/ + ldi $3, -8($3) + blt $3, nounrol + +.align 3 +wloop: + fillde 256($5) + stl $17, 0($5) + stl $17, 8($5) + stl $17, 16($5) + stl $17, 24($5) + subl $3, 8, $3 + stl $17, 32($5) + stl $17, 40($5) + stl $17, 48($5) + stl $17, 56($5) + addl $5, 0x40, $5 + bge $3, wloop + +nounrol: + addl $3, 8, $3 + beq $3, no_quad +/*end JJ*/ + +.align 3 +loop: + stl $17, 0($5) + subl $3, 1, $3 + addl $5, 8, $5 + bne $3, loop + +no_quad: + bis $31, $31, $31 + beq $18, end + and $6, 7, $6 +no_quad_loop: + stb $17, 0($5) + subl $6, 1, $6 + addl $5, 1, $5 + bgt $6, no_quad_loop + ret $31, ($26), 1 + +.align 3 +within_one_quad: + bis $18, $18, $1 + bis $16, $16, $5 +within_one_quad_loop: + stb $17, 0($5) + subl $1, 1, $1 + addl $5, 1, $5 + bgt $1, within_one_quad_loop + +end: + ret $31, ($26), 1 + .end ___memset + EXPORT_SYMBOL(___memset) + + .align 5 + .ent __memsetw +__memsetw: + .prologue 0 + + inslh $17, 0, $1 + inslh $17, 2, $2 + inslh $17, 4, $3 + or $1, $2, $1 + inslh $17, 6, $4 + or $1, $3, $1 + or $1, $4, $17 + br __constant_c_memset + + .end __memsetw + EXPORT_SYMBOL(__memsetw) + +memset = ___memset +EXPORT_SYMBOL(memset) +__memset = ___memset +EXPORT_SYMBOL(__memset) diff --git a/arch/sw_64/lib/strcpy.S b/arch/sw_64/lib/strcpy.S new file mode 100644 index 000000000000..61b6141f88e2 --- /dev/null +++ b/arch/sw_64/lib/strcpy.S @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Optimized strcpy() for SW64 + + * Copyright (C) Mao Minkai + * Author: Mao Minkai + * + * Copy a null-terminated string from SRC to DST. + * + * Input: + * $16: DST, clobbered + * $17: SRC, clobbered + * + * Output: + * $0: DST + * + * Temporaries: + * $1: unaligned parts of addr (0 means aligned addr) + * $4: current data to copy (could have 1 byte or 8 bytes) + * $5: parts of current data, compare result + * $6: number of bytes left to copy + * + * Tag naming: + * co: SRC and DST are co-aligned + * mis: SRC and DST are not co-aligned + * a: SRC or DST has aligned address + * una: SRC or DST has unaligned address + * + */ + +#include + + .text + .align 4 + .globl strcpy + .ent strcpy +strcpy: + .frame $30, 0, $26 + .prologue 0 + + bis $31, $16, $0 # set return value + + xor $16, $17, $1 + and $1, 7, $1 + bne $1, $mis_aligned + +/* src and dst are co-aligned */ + and $16, 7, $1 + bne $1, $co_una_head + +/* do the copy in loop, for (co)-aligned src and dst with (a)ligned addr */ +$co_a_loop: + ldl $4, 0($17) + cmpgeb $31, $4, $5 + bne $5, $tail_loop # we find null + stl $4, 0($16) + addl $17, 8, $17 + addl $16, 8, $16 + br $31, $co_a_loop + +/* src and dst are co-aligned but have unaligned address */ +$co_una_head: + ldl_u $4, 0($17) + extll $4, $16, $4 + cmpgeb $31, $4, $5 + bne $5, $tail_loop # we find null + ldi $6, 8($31) + subl $6, $1, $6 + addl $17, $6, $17 # prepare addr of middle part + +/* copy the unaligned part in loop */ +$co_una_head_loop: + stb $4, 0($16) + addl $16, 1, $16 + subl $6, 1, $6 + beq $6, $co_a_loop + addl $4, 1, $4 + br $31, $co_una_head_loop + +/* src and dst are not co-aligned */ +$mis_aligned: + and $16, 7, $1 + beq $1, $mis_a_dst + ldi $6, 8($31) + subl $6, $1, $6 + +/* copy the first few bytes to make dst aligned */ +$mis_una_head_loop: + bis $31, $31, $6 + ldbu $4, 0($17) + stb $4, 0($16) + beq $4, $out # we have reached null, return + addl $17, 1, $17 + addl $16, 1, $16 + subl $6, 1, $6 + beq $6, $mis_a_dst + br $31, $mis_una_head_loop + +/* dst has aligned addr */ +$mis_a_dst: + and $17, 7, $1 + +$mis_a_dst_loop: + ldl_u $4, 0($17) + ldl_u $5, 7($17) + extll $4, $1, $4 + exthl $5, $1, $5 + bis $4, $5, $4 + cmpgeb $31, $4, $5 + bne $5, $tail_loop # we find null + stl $4, 0($16) + addl $17, 8, $17 + addl $16, 8, $16 + br $31, $mis_a_dst_loop + +/* we have find null in the last few bytes, copy one byte each time */ +$tail_loop: + ldbu $4, 0($17) + stb $4, 0($16) + beq $4, $out # we have reached null, return + addl $17, 1, $17 + addl $16, 1, $16 + br $31, $tail_loop + +/* copy is done, return */ +$out: + ret + + .end strcpy + EXPORT_SYMBOL(strcpy) diff --git a/arch/sw_64/lib/strncpy.S b/arch/sw_64/lib/strncpy.S new file mode 100644 index 000000000000..f50c70599bb4 --- /dev/null +++ b/arch/sw_64/lib/strncpy.S @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Optimized strncpy() for SW64 + + * Copyright (C) Mao Minkai + * Author: Mao Minkai + * + * Copy a string from SRC to DST. At most SIZE bytes are coppied. + * + * Input: + * $16: DST, clobbered + * $17: SRC, clobbered + * $18: SIZE, clobbered + * + * Output: + * $0: DST + * + * Temporaries: + * $1: unaligned parts of addr (0 means aligned addr) + * $4: current data to copy (could have 1 byte or 8 bytes) + * $5: parts of current data, compare result + * $6: number of bytes left to copy in head + * + * Tag naming: + * co: SRC and DST are co-aligned + * mis: SRC and DST are not co-aligned + * a: SRC or DST has aligned address + * una: SRC or DST has unaligned address + * + */ + +#include + + .text + .align 4 + .globl strncpy + .ent strncpy +strncpy: + .frame $30, 0, $26 + .prologue 0 + + bis $31, $16, $0 # set return value + beq $18, $out # return if size is 0 + cmplt $18, 8, $5 # size less than 8, do 1-byte copy + bne $5, $tail_loop + + xor $16, $17, $1 + and $1, 7, $1 + bne $1, $mis_aligned + +/* src and dst are co-aligned */ + and $16, 7, $1 + bne $1, $co_una_head + +/* do the copy in loop, for (co)-aligned src and dst with (a)ligned addr */ +$co_a_loop: + ldl $4, 0($17) + cmpgeb $31, $4, $5 + bne $5, $tail_loop # we find null + subl $18, 8, $5 + blt $5, $tail_loop # we have fewer than 8 bytes to copy + stl $4, 0($16) + subl $18, 8, $18 + beq $18, $out + addl $17, 8, $17 + addl $16, 8, $16 + br $31, $co_a_loop + +/* src and dst are co-aligned but have unaligned address */ +$co_una_head: + ldl_u $4, 0($17) + extll $4, $16, $4 + cmpgeb $31, $4, $5 + bne $5, $tail_loop # we find null + ldi $6, 8($31) + subl $6, $1, $6 + addl $17, $6, $17 # prepare addr of middle part + subl $18, $6, $18 # sub bytes going to be copy + +/* copy the unaligned part in loop */ +$co_una_head_loop: + stb $4, 0($16) + addl $16, 1, $16 + subl $6, 1, $6 + beq $6, $co_a_loop + addl $4, 1, $4 + br $31, $co_una_head_loop + +/* src and dst are not co-aligned */ +$mis_aligned: + and $16, 7, $1 + beq $1, $mis_a_dst + +$mis_una_head: + ldi $6, 8($31) + subl $6, $1, $6 + +/* copy the first few bytes to make dst aligned */ +$mis_una_head_loop: + ldbu $4, 0($17) + stb $4, 0($16) + subl $18, 1, $18 + beq $18, $out + beq $4, $null_padding # we have reached null + addl $17, 1, $17 + addl $16, 1, $16 + subl $6, 1, $6 + beq $6, $mis_a_dst + br $31, $mis_una_head_loop + +/* dst has aligned addr */ +$mis_a_dst: + and $17, 7, $1 + +$mis_a_dst_loop: + ldl_u $4, 0($17) + ldl_u $5, 7($17) + extll $4, $1, $4 + exthl $5, $1, $5 + bis $4, $5, $4 + cmpgeb $31, $4, $5 + bne $5, $tail_loop # we find null + subl $18, 8, $5 + blt $5, $tail_loop # we have fewer than 8 bytes to copy + stl $4, 0($16) + subl $18, 8, $18 + beq $5, $out + addl $17, 8, $17 + addl $16, 8, $16 + br $31, $mis_a_dst_loop + +/* we have find null in the last few bytes, copy one byte each time */ +$tail_loop: + ldbu $4, 0($17) + stb $4, 0($16) + subl $18, 1, $18 + beq $18, $out + beq $4, $null_padding # we have reached null + addl $17, 1, $17 + addl $16, 1, $16 + br $31, $tail_loop + +$null_padding: + addl $16, 1, $16 + subl $18, 1, $18 + stb $31, 0($16) + beq $18, $out + br $31, $null_padding + +/* copy is done, return */ +$out: + ret + + .end strncpy + EXPORT_SYMBOL(strncpy) diff --git a/arch/sw_64/lib/uaccess_flushcache.c b/arch/sw_64/lib/uaccess_flushcache.c new file mode 100644 index 000000000000..353d5ac15248 --- /dev/null +++ b/arch/sw_64/lib/uaccess_flushcache.c @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include + +void memcpy_flushcache(void *dst, const void *src, size_t cnt) +{ + memcpy(dst, src, cnt); + flush_cache_all(); +} +EXPORT_SYMBOL_GPL(memcpy_flushcache); + +void memcpy_page_flushcache(char *to, struct page *page, size_t offset, + size_t len) +{ + memcpy_flushcache(to, page_address(page) + offset, len); +} + +unsigned long __copy_user_flushcache(void *to, const void __user *from, + unsigned long n) +{ + unsigned long rc = __copy_from_user(to, from, n); + + flush_cache_all(); + return rc; +} + +#ifdef CONFIG_ARCH_HAS_PMEM_API +void arch_wb_cache_pmem(void *addr, size_t size) +{ + flush_cache_all(); +} +EXPORT_SYMBOL_GPL(arch_wb_cache_pmem); + +void arch_invalidate_pmem(void *addr, size_t size) +{ + flush_cache_all(); +} +EXPORT_SYMBOL_GPL(arch_invalidate_pmem); +#endif diff --git a/arch/sw_64/lib/udelay.c b/arch/sw_64/lib/udelay.c new file mode 100644 index 000000000000..59ca8a97d748 --- /dev/null +++ b/arch/sw_64/lib/udelay.c @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 1993, 2000 Linus Torvalds + * + * Delay routines, using a pre-computed "loops_per_jiffy" value. + */ + +#include + +/* + * Use only for very small delays (< 1 msec). + * + * The active part of our cycle counter is only 32-bits wide, and + * we're treating the difference between two marks as signed. On + * a 1GHz box, that's about 2 seconds. + */ +void __delay(unsigned long loops) +{ + unsigned long tmp; + + __asm__ __volatile__( + " rtc %0\n" + " addl %1,%0,%1\n" + "1: rtc %0\n" + " subl %1,%0,%0\n" + " bgt %0,1b" + : "=&r" (tmp), "=r" (loops) : "1"(loops)); +} +EXPORT_SYMBOL(__delay); + +void udelay(unsigned long usecs) +{ + unsigned long loops = usecs * get_cpu_freq() / 1000000; + unsigned long tmp; + + __asm__ __volatile__( + " rtc %0\n" + " addl %1,%0,%1\n" + "1: rtc %0\n" + " subl %1,%0,%0\n" + " bgt %0,1b" + : "=&r" (tmp), "=r" (loops) : "1"(loops)); +} +EXPORT_SYMBOL(udelay); + +void ndelay(unsigned long nsecs) +{ + unsigned long loops = nsecs * get_cpu_freq() / 1000000000; + unsigned long tmp; + + __asm__ __volatile__( + " rtc %0\n" + " addl %1,%0,%1\n" + "1: rtc %0\n" + " subl %1,%0,%0\n" + " bgt %0,1b" + : "=&r" (tmp), "=r" (loops) : "1"(loops)); +} +EXPORT_SYMBOL(ndelay); -- Gitee From bafc6cba229bef5f3f136865b4fe1daa8f82c823 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:39 +0800 Subject: [PATCH 0295/2138] anolis: sw64: add VDSO support ANBZ: #4688 Add VDSO support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/vdso.h | 116 ++++++++++++++ arch/sw_64/kernel/vdso.c | 143 ++++++++++++++++++ arch/sw_64/kernel/vdso/.gitignore | 4 + arch/sw_64/kernel/vdso/Makefile | 74 +++++++++ arch/sw_64/kernel/vdso/so2s.sh | 4 + arch/sw_64/kernel/vdso/vdso.S | 30 ++++ arch/sw_64/kernel/vdso/vdso.lds.S | 89 +++++++++++ arch/sw_64/kernel/vdso/vgettimeofday.c | 201 +++++++++++++++++++++++++ arch/sw_64/kernel/vdso/vrt_sigreturn.S | 68 +++++++++ 9 files changed, 729 insertions(+) create mode 100644 arch/sw_64/include/asm/vdso.h create mode 100644 arch/sw_64/kernel/vdso.c create mode 100644 arch/sw_64/kernel/vdso/.gitignore create mode 100644 arch/sw_64/kernel/vdso/Makefile create mode 100755 arch/sw_64/kernel/vdso/so2s.sh create mode 100644 arch/sw_64/kernel/vdso/vdso.S create mode 100644 arch/sw_64/kernel/vdso/vdso.lds.S create mode 100644 arch/sw_64/kernel/vdso/vgettimeofday.c create mode 100644 arch/sw_64/kernel/vdso/vrt_sigreturn.S diff --git a/arch/sw_64/include/asm/vdso.h b/arch/sw_64/include/asm/vdso.h new file mode 100644 index 000000000000..7a2e23c648f3 --- /dev/null +++ b/arch/sw_64/include/asm/vdso.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 SW64 Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef _ASM_SW64_VDSO_H +#define _ASM_SW64_VDSO_H + +#ifdef __KERNEL__ + +/* + * Default link address for the vDSO. + * Since we randomise the VDSO mapping, there's little point in trying + * to prelink this. + */ +#define VDSO_LBASE 0x0 + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#define VDSO_SYMBOL(base, name) \ +({ \ + extern const unsigned long __vdso_##name; \ + ((unsigned long)(base) + __vdso_##name); \ +}) + + +struct vdso_data { + u64 xtime_sec; + u64 xtime_nsec; + u64 wall_to_mono_sec; + u64 wall_to_mono_nsec; + u32 cs_shift; + u32 cs_mult; + u64 cs_cycle_last; + u64 cs_mask; + s32 tz_minuteswest; + s32 tz_dsttime; + u32 seq_count; +}; + +static inline unsigned long get_vdso_base(void) +{ + unsigned long addr, tmp; + __asm__ __volatile__( + " br %1, 1f\n" + "1: ldi %0, 0(%1)\n" + : "=r" (addr), "=&r" (tmp) + ::); + + addr &= ~(PAGE_SIZE - 1); + return addr; +} + +static inline const struct vdso_data *get_vdso_data(void) +{ + return (const struct vdso_data *)(get_vdso_base() - PAGE_SIZE); +} + +static inline u32 vdso_data_read_begin(const struct vdso_data *data) +{ + u32 seq; + + while (true) { + seq = READ_ONCE(data->seq_count); + if (likely(!(seq & 1))) { + /* Paired with smp_wmb() in vdso_data_write_*(). */ + smp_rmb(); + return seq; + } + + cpu_relax(); + } +} + +static inline bool vdso_data_read_retry(const struct vdso_data *data, + u32 start_seq) +{ + /* Paired with smp_wmb() in vdso_data_write_*(). */ + smp_rmb(); + return unlikely(data->seq_count != start_seq); +} + +static inline void vdso_data_write_begin(struct vdso_data *data) +{ + ++data->seq_count; + + /* Ensure sequence update is written before other data page values. */ + smp_wmb(); +} + +static inline void vdso_data_write_end(struct vdso_data *data) +{ + /* Ensure data values are written before updating sequence again. */ + smp_wmb(); + ++data->seq_count; +} + + +#endif /* !__ASSEMBLY__ */ + +#endif /* __KERNEL__ */ +#endif /* _ASM_SW64_VDSO_H */ diff --git a/arch/sw_64/kernel/vdso.c b/arch/sw_64/kernel/vdso.c new file mode 100644 index 000000000000..b4126cbaa4bd --- /dev/null +++ b/arch/sw_64/kernel/vdso.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +#include +#include +#include +#include + +#include + +extern char vdso_start, vdso_end; +static unsigned long vdso_pages; +static struct page **vdso_pagelist; + +/* + * The vDSO data page. + */ +static union { + struct vdso_data data; + u8 page[PAGE_SIZE]; +} vdso_data_store __page_aligned_data; +struct vdso_data *vdso_data = &vdso_data_store.data; + +static struct vm_special_mapping vdso_spec[2]; + +static int __init vdso_init(void) +{ + int i; + + if (memcmp(&vdso_start, "\177ELF", 4)) { + pr_err("vDSO is not a valid ELF object!\n"); + return -EINVAL; + } + + vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT; + pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n", + vdso_pages + 1, vdso_pages, &vdso_start, 1L, vdso_data); + + /* Allocate the vDSO pagelist, plus a page for the data. */ + vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *), + GFP_KERNEL); + if (vdso_pagelist == NULL) + return -ENOMEM; + + /* Grab the vDSO data page. */ + vdso_pagelist[0] = virt_to_page(vdso_data); + + /* Grab the vDSO code pages. */ + for (i = 0; i < vdso_pages; i++) + vdso_pagelist[i + 1] = virt_to_page(&vdso_start + i * PAGE_SIZE); + + /* Populate the special mapping structures */ + vdso_spec[0] = (struct vm_special_mapping) { + .name = "[vvar]", + .pages = vdso_pagelist, + }; + + vdso_spec[1] = (struct vm_special_mapping) { + .name = "[vdso]", + .pages = &vdso_pagelist[1], + }; + + return 0; +} +arch_initcall(vdso_init); + +int arch_setup_additional_pages(struct linux_binprm *bprm, + int uses_interp) +{ + struct mm_struct *mm = current->mm; + unsigned long vdso_base, vdso_text_len, vdso_mapping_len; + void *ret; + + vdso_text_len = vdso_pages << PAGE_SHIFT; + /* Be sure to map the data page */ + vdso_mapping_len = vdso_text_len + PAGE_SIZE; + + if (down_write_killable(&mm->mmap_lock)) + return -EINTR; + vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); + if (IS_ERR_VALUE(vdso_base)) { + ret = ERR_PTR(vdso_base); + goto up_fail; + } + ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE, + VM_READ|VM_MAYREAD, + &vdso_spec[0]); + if (IS_ERR(ret)) + goto up_fail; + + vdso_base += PAGE_SIZE; + mm->context.vdso = (void *)vdso_base; + ret = _install_special_mapping(mm, vdso_base, vdso_text_len, + VM_READ|VM_EXEC| + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, + &vdso_spec[1]); + if (IS_ERR(ret)) + goto up_fail; + + up_write(&mm->mmap_lock); + return 0; + +up_fail: + mm->context.vdso = NULL; + up_write(&mm->mmap_lock); + return PTR_ERR(ret); +} + +void update_vsyscall(struct timekeeper *tk) +{ + vdso_data_write_begin(vdso_data); + + vdso_data->xtime_sec = tk->xtime_sec; + vdso_data->xtime_nsec = tk->tkr_mono.xtime_nsec; + vdso_data->wall_to_mono_sec = tk->wall_to_monotonic.tv_sec; + vdso_data->wall_to_mono_nsec = tk->wall_to_monotonic.tv_nsec; + vdso_data->cs_shift = tk->tkr_mono.shift; + + vdso_data->cs_mult = tk->tkr_mono.mult; + vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; + vdso_data->cs_mask = tk->tkr_mono.mask; + + vdso_data_write_end(vdso_data); +} + +void update_vsyscall_tz(void) +{ + vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; + vdso_data->tz_dsttime = sys_tz.tz_dsttime; +} diff --git a/arch/sw_64/kernel/vdso/.gitignore b/arch/sw_64/kernel/vdso/.gitignore new file mode 100644 index 000000000000..2b6a8b0ed7ca --- /dev/null +++ b/arch/sw_64/kernel/vdso/.gitignore @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 +vdso.lds +vdso.so.dbg.tmp +vdso-syms.S diff --git a/arch/sw_64/kernel/vdso/Makefile b/arch/sw_64/kernel/vdso/Makefile new file mode 100644 index 000000000000..190cc345dbb9 --- /dev/null +++ b/arch/sw_64/kernel/vdso/Makefile @@ -0,0 +1,74 @@ +# SPDX-License-Identifier: GPL-2.0 +# Symbols present in the vdso +vdso-syms = rt_sigreturn gettimeofday + +# Files to link into the vdso +obj-vdso = $(patsubst %, v%.o, $(vdso-syms)) + +# Build rules +targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-syms.S +obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) + +obj-y += vdso.o vdso-syms.o +extra-y += vdso.lds +CPPFLAGS_vdso.lds += -P -C -U$(ARCH) + +# vDSO code runs in userspace and -pg doesn't help with profiling anyway. +CFLAGS_REMOVE_vdso.o = -pg +CFLAGS_REMOVE_vrt_sigreturn.o = -pg +CFLAGS_REMOVE_vgettimeofday.o = -pg + +ifdef CONFIG_FEEDBACK_COLLECT +# vDSO code runs in userspace, not collecting feedback data. +CFLAGS_REMOVE_vdso.o = -ffeedback-generate +CFLAGS_REMOVE_vrt_sigreturn.o = -ffeedback-generate +CFLAGS_REMOVE_vgettimeofday.o = -ffeedback-generate +endif + +# Disable gcov profiling for VDSO code +GCOV_PROFILE := n + +# Force dependency +$(obj)/vdso.o: $(obj)/vdso.so + +# link rule for the .so file, .lds has to be first +SYSCFLAGS_vdso.so.dbg = $(c_flags) +$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE + $(call if_changed,vdsold) +SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \ + $(call cc-ldoption, -Wl$(comma)--hash-style=both) + +$(obj)/vdso-syms.S: $(obj)/vdso.so FORCE + $(call if_changed,so2s) + +# strip rule for the .so file +$(obj)/%.so: OBJCOPYFLAGS := -S +$(obj)/%.so: $(obj)/%.so.dbg FORCE + $(call if_changed,objcopy) + +# actual build commands +# The DSO images are built using a special linker script +# Add -lgcc so tilepro gets static muldi3 and lshrdi3 definitions. +# Make sure only to export the intended __vdso_xxx symbol offsets. +quiet_cmd_vdsold = VDSOLD $@ + cmd_vdsold = $(CC) $(KCFLAGS) -nostdlib $(SYSCFLAGS_$(@F)) \ + -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp -lgcc && \ + $(CROSS_COMPILE)objcopy \ + $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \ + rm $@.tmp + +# Extracts symbol offsets from the VDSO, converting them into an assembly file +# that contains the same symbols at the same offsets. +quiet_cmd_so2s = SO2S $@ + cmd_so2s = $(NM) -D $< | $(srctree)/$(src)/so2s.sh > $@ + +# install commands for the unstripped file +quiet_cmd_vdso_install = INSTALL $@ + cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ + +vdso.so: $(obj)/vdso.so.dbg + @mkdir -p $(MODLIB)/vdso + $(call cmd,vdso_install) + + +vdso_install: vdso.so diff --git a/arch/sw_64/kernel/vdso/so2s.sh b/arch/sw_64/kernel/vdso/so2s.sh new file mode 100755 index 000000000000..e1763af8e730 --- /dev/null +++ b/arch/sw_64/kernel/vdso/so2s.sh @@ -0,0 +1,4 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0+ + +grep "__vdso_" | sed 's/\([0-9a-f]*\) T \([a-z0-9_]*\)\(@@LINUX_.*\)*/.globl\t\2\n\2:\n.quad\t0x\1/' diff --git a/arch/sw_64/kernel/vdso/vdso.S b/arch/sw_64/kernel/vdso/vdso.S new file mode 100644 index 000000000000..edd9be27db9d --- /dev/null +++ b/arch/sw_64/kernel/vdso/vdso.S @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +#include +#include + + __PAGE_ALIGNED_DATA + + .globl vdso_start, vdso_end + .balign PAGE_SIZE +vdso_start: + .incbin "arch/sw_64/kernel/vdso/vdso.so" + .balign PAGE_SIZE +vdso_end: + + .previous diff --git a/arch/sw_64/kernel/vdso/vdso.lds.S b/arch/sw_64/kernel/vdso/vdso.lds.S new file mode 100644 index 000000000000..de1782ccb7b6 --- /dev/null +++ b/arch/sw_64/kernel/vdso/vdso.lds.S @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * GNU linker script for the VDSO library. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * Heavily based on the vDSO linker scripts for other archs. + */ + +#include +#include +#include + +OUTPUT_FORMAT("elf64-sw_64") +OUTPUT_ARCH(sw_64) + +SECTIONS +{ + PROVIDE(_vdso_data = . - PAGE_SIZE); + . = VDSO_LBASE + SIZEOF_HEADERS; + + .hash : { *(.hash) } :text + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + + .note : { *(.note.*) } :text :note + + . = ALIGN(16); + .text : { *(.text*) } + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + + .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr + .eh_frame : { KEEP (*(.eh_frame)) } :text + + .dynamic : { *(.dynamic) } :text :dynamic + + .rodata : { *(.rodata*) } :text + + _end = .; + PROVIDE(end = .); + + /DISCARD/ : { + *(.note.GNU-stack) + *(.data .data.* .gnu.linkonce.d.* .sdata*) + *(.bss .sbss .dynbss .dynsbss) + } +} + +/* + * We must supply the ELF program headers explicitly to get just one + * PT_LOAD segment, and set the flags explicitly to make segments read-only. + */ +PHDRS +{ + text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ + dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ + note PT_NOTE FLAGS(4); /* PF_R */ + eh_frame_hdr PT_GNU_EH_FRAME; +} + +/* + * This controls what symbols we export from the DSO. + */ +VERSION +{ + LINUX_2.6 { + global: + __vdso_rt_sigreturn; + __vdso_gettimeofday; + __vdso_clock_gettime; + local: *; + }; +} diff --git a/arch/sw_64/kernel/vdso/vgettimeofday.c b/arch/sw_64/kernel/vdso/vgettimeofday.c new file mode 100644 index 000000000000..0aa16e988e88 --- /dev/null +++ b/arch/sw_64/kernel/vdso/vgettimeofday.c @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + */ + +#include + +#include +#include +#include +#include + +static __always_inline int syscall_fallback(clockid_t clkid, struct timespec64 *ts) +{ + register int r0 asm("$0"); + register unsigned long r19 asm("$19"); + asm volatile( + " mov %0, $16\n" + " mov %1, $17\n" + " ldi $0, %2\n" + " sys_call %3\n" + :: "r"(clkid), "r"(ts), "i"(__NR_clock_gettime), "i"(HMC_callsys) + : "$0", "$16", "$17", "$19"); + if (unlikely(r19)) + return -r0; + else + return r0; +} + +static __always_inline int do_realtime_coarse(struct timespec64 *ts, + const struct vdso_data *data) +{ + u32 start_seq; + + do { + start_seq = vdso_data_read_begin(data); + + ts->tv_sec = data->xtime_sec; + ts->tv_nsec = data->xtime_nsec >> data->cs_shift; + } while (vdso_data_read_retry(data, start_seq)); + + return 0; +} + + +static __always_inline int do_monotonic_coarse(struct timespec64 *ts, + const struct vdso_data *data) +{ + u32 start_seq; + u64 to_mono_sec; + u64 to_mono_nsec; + + do { + start_seq = vdso_data_read_begin(data); + + ts->tv_sec = data->xtime_sec; + ts->tv_nsec = data->xtime_nsec >> data->cs_shift; + + to_mono_sec = data->wall_to_mono_sec; + to_mono_nsec = data->wall_to_mono_nsec; + } while (vdso_data_read_retry(data, start_seq)); + + ts->tv_sec += to_mono_sec; + timespec64_add_ns(ts, to_mono_nsec); + + return 0; +} + +#if defined(CONFIG_SUBARCH_C3B) +static __always_inline u64 read_longtime(void) +{ + register unsigned long __r0 __asm__("$0"); + + __asm__ __volatile__( + "sys_call %1" : "=r"(__r0) : "i" (HMC_longtime)); + + return __r0; +} +#elif defined(CONFIG_SUBARCH_C4) +static __always_inline u64 read_longtime(void) +{ + return read_csr(CSR_SHTCLOCK); +} +#endif + +static __always_inline u64 get_ns(const struct vdso_data *data) +{ + u64 cycle_now, delta, nsec; + + cycle_now = read_longtime(); + delta = (cycle_now - data->cs_cycle_last) & data->cs_mask; + + nsec = (delta * data->cs_mult) + data->xtime_nsec; + nsec >>= data->cs_shift; + + return nsec; +} + + +static __always_inline int do_realtime(struct timespec64 *ts, + const struct vdso_data *data) +{ + u32 start_seq; + u64 ns; + + do { + start_seq = vdso_data_read_begin(data); + + ts->tv_sec = data->xtime_sec; + ns = get_ns(data); + } while (vdso_data_read_retry(data, start_seq)); + + ts->tv_nsec = 0; + timespec64_add_ns(ts, ns); + + return 0; +} + +static __always_inline int do_monotonic(struct timespec64 *ts, + const struct vdso_data *data) +{ + u32 start_seq; + u64 ns; + u64 to_mono_sec; + u64 to_mono_nsec; + + do { + start_seq = vdso_data_read_begin(data); + + ts->tv_sec = data->xtime_sec; + ns = get_ns(data); + + to_mono_sec = data->wall_to_mono_sec; + to_mono_nsec = data->wall_to_mono_nsec; + } while (vdso_data_read_retry(data, start_seq)); + + ts->tv_sec += to_mono_sec; + ts->tv_nsec = 0; + timespec64_add_ns(ts, ns + to_mono_nsec); + + return 0; +} + + +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) +{ + const struct vdso_data *data = get_vdso_data(); + struct timespec64 ts; + int ret; + + ret = do_realtime(&ts, data); + if (ret) + return ret; + + if (tv) { + tv->tv_sec = ts.tv_sec; + tv->tv_usec = ts.tv_nsec / 1000; + } + + if (tz) { + tz->tz_minuteswest = data->tz_minuteswest; + tz->tz_dsttime = data->tz_dsttime; + } + + return 0; +} + +int __vdso_clock_gettime(clockid_t clkid, struct timespec64 *ts) +{ + const struct vdso_data *data = get_vdso_data(); + int ret; + + switch (clkid) { + case CLOCK_REALTIME_COARSE: + ret = do_realtime_coarse(ts, data); + break; + case CLOCK_MONOTONIC_COARSE: + ret = do_monotonic_coarse(ts, data); + break; + case CLOCK_REALTIME: + ret = do_realtime(ts, data); + break; + case CLOCK_MONOTONIC: + ret = do_monotonic(ts, data); + break; + default: + /* fall back to a syscall */ + ret = syscall_fallback(clkid, ts); + } + + return ret; +} diff --git a/arch/sw_64/kernel/vdso/vrt_sigreturn.S b/arch/sw_64/kernel/vdso/vrt_sigreturn.S new file mode 100644 index 000000000000..cdbf6501ad64 --- /dev/null +++ b/arch/sw_64/kernel/vdso/vrt_sigreturn.S @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Sigreturn trampoline for returning from a signal when the SA_RESTORER + * flag is not set. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +#include +#include +#include +#include + + .text + + .macro SIGCONTEXT_REGS_I base, from = 0 + .cfi_offset \from, \base + (4 + \from) * 8 + .if 30 - \from + SIGCONTEXT_REGS_I \base, "(\from + 1)" + .endif + .endm + + .macro SIGCONTEXT_REGS_F base, from = 32 + .cfi_offset \from, \base + (4 + 32 + 1) * 8 + (\from - 32) * 32 + .if 62 - \from + SIGCONTEXT_REGS_F \base, "(\from + 1)" + .endif + .endm + + .macro SIGCONTEXT_REGS_V base, from = 67 + .cfi_offset \from, \base + (4 + 32 + 1) * 8 + ((\from - 67) & 0x1f) * 32 + (((\from - 67) >> 5) + 1) * 8 + .if 161 - \from + SIGCONTEXT_REGS_V \base, "(\from + 1)" + .endif + .endm + + .macro SIGCONTEXT_REGS base + SIGCONTEXT_REGS_I \base + SIGCONTEXT_REGS_F \base + SIGCONTEXT_REGS_V \base + .cfi_offset 63, \base + (4 + 32 + 1) * 8 + 32 * 32 + .cfi_offset 64, \base + 2 * 8 + .endm + + .cfi_startproc + .cfi_return_column 64 + .cfi_signal_frame + SIGCONTEXT_REGS (-RT_SIGFRAME_SIZE + RT_SIGFRAME_MCTX) + .cfi_def_cfa_offset RT_SIGFRAME_SIZE + + nop +ENTRY(__vdso_rt_sigreturn) + mov $sp, $16 + ldi $0, __NR_rt_sigreturn + sys_call HMC_callsys +ENDPROC(__vdso_rt_sigreturn) + .cfi_endproc -- Gitee From 6f53606557766c108a4a576bb184bed49b0658b8 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:35 +0800 Subject: [PATCH 0296/2138] anolis: sw64: add SMP support ANBZ: #4688 Add Symmetric Multi-Processing (SMP) support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/smp.h | 95 ++++++ arch/sw_64/kernel/smp.c | 578 +++++++++++++++++++++++++++++++++++ 2 files changed, 673 insertions(+) create mode 100644 arch/sw_64/include/asm/smp.h create mode 100644 arch/sw_64/kernel/smp.c diff --git a/arch/sw_64/include/asm/smp.h b/arch/sw_64/include/asm/smp.h new file mode 100644 index 000000000000..3a2fcf62b30c --- /dev/null +++ b/arch/sw_64/include/asm/smp.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SMP_H +#define _ASM_SW64_SMP_H + +#include +#include +#include +#include +#include + +#include +#include +#include + +/* HACK: Cabrio WHAMI return value is bogus if more than 8 bits used.. :-( */ + +extern cpumask_t core_start; + +static inline unsigned long +read_vpcr(void) +{ + register unsigned long __r0 __asm__("$0"); + __asm__ __volatile__( + "sys_call %1 #rvpcr" + : "=r"(__r0) + : "i" (0x39) + : "$1", "$22", "$23", "$24", "$25"); + return __r0; +} + +#ifdef CONFIG_SMP +/* SMP initialization hook for setup_arch */ +void __init setup_smp(void); + +#include + +/* smp reset control block */ +struct smp_rcb_struct { + void (*restart_entry)(unsigned long args); + unsigned long restart_args; + unsigned long ready; + unsigned long init_done; +}; + +#define INIT_SMP_RCB ((struct smp_rcb_struct *) __va(0x820000UL)) + + +#ifdef GENERATING_ASM_OFFSETS +#define raw_smp_processor_id() (0) +#else +#include +#define raw_smp_processor_id() (*((unsigned int *)((void *)current + TASK_CPU))) +#endif +#define hard_smp_processor_id() cpu_to_rcid(raw_smp_processor_id()) + +/* The map from sequential logical cpu number to hard cid. */ +extern int __cpu_to_rcid[NR_CPUS]; +#define cpu_to_rcid(cpu) __cpu_to_rcid[cpu] +#define cpu_physical_id(cpu) __cpu_to_rcid[cpu] + +extern unsigned long tidle_pcb[NR_CPUS]; +extern void arch_send_call_function_single_ipi(int cpu); +extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); + +#ifdef CONFIG_HOTPLUG_CPU +int __cpu_disable(void); +void __cpu_die(unsigned int cpu); +#endif /* CONFIG_HOTPLUG_CPU */ + +#else /* CONFIG_SMP */ +#define hard_smp_processor_id() 0 +#define smp_call_function_on_cpu(func, info, wait, cpu) ({ 0; }) +/* The map from sequential logical cpu number to hard cid. */ +extern int __cpu_to_rcid[NR_CPUS]; +#define cpu_to_rcid(cpu) __cpu_to_rcid[0] +#define cpu_physical_id(cpu) __cpu_to_rcid[0] +#endif /* CONFIG_SMP */ + +#define NO_PROC_ID (-1) + +static inline void send_ipi(int cpu, unsigned long type) +{ + int rcid; + + rcid = cpu_to_rcid(cpu); + + if (is_in_guest()) + hcall(HCALL_IVI, rcid, type, 0); + else + sendii(rcid, type, 0); +} + +#define reset_cpu(cpu) send_ipi((cpu), II_RESET) + +#endif /* _ASM_SW64_SMP_H */ diff --git a/arch/sw_64/kernel/smp.c b/arch/sw_64/kernel/smp.c new file mode 100644 index 000000000000..6d1aab4be1c0 --- /dev/null +++ b/arch/sw_64/kernel/smp.c @@ -0,0 +1,578 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sw_64/kernel/smp.c + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "proto.h" + +struct smp_rcb_struct *smp_rcb; + +extern struct cpuinfo_sw64 cpu_data[NR_CPUS]; + +int smp_booted; + +void *idle_task_pointer[NR_CPUS]; + +/* State of each CPU */ +DEFINE_PER_CPU(int, cpu_state) = { 0 }; + +/* A collection of single bit ipi messages. */ +static struct { + unsigned long bits ____cacheline_aligned; +} ipi_data[NR_CPUS] __cacheline_aligned; + +enum ipi_message_type { + IPI_RESCHEDULE, + IPI_CALL_FUNC, + IPI_CPU_STOP, +}; + +int smp_num_cpus = 1; /* Number that came online. */ +EXPORT_SYMBOL(smp_num_cpus); + +#define send_sleep_interrupt(cpu) send_ipi((cpu), II_SLEEP) +#define send_wakeup_interrupt(cpu) send_ipi((cpu), II_WAKE) + +/* + * Where secondaries begin a life of C. + */ +void smp_callin(void) +{ + int cpuid = smp_processor_id(); + + local_irq_disable(); + + if (cpu_online(cpuid)) { + pr_err("??, cpu 0x%x already present??\n", cpuid); + BUG(); + } + set_cpu_online(cpuid, true); + + /* clear ksp, usp */ + wrksp(0); + wrusp(0); + + /* Set trap vectors. */ + trap_init(); + + /* Set interrupt vector. */ + if (is_in_host()) { + write_csr(0xffffffffffffffffUL, CSR_PCIE_MSI0_INTEN); + write_csr(0xffffffffffffffffUL, CSR_PCIE_MSI1_INTEN); + write_csr(0xffffffffffffffffUL, CSR_PCIE_MSI2_INTEN); + write_csr(0xffffffffffffffffUL, CSR_PCIE_MSI3_INTEN); + } + wrent(entInt, 0); + + /* Get our local ticker going. */ + sw64_setup_timer(); + + /* All kernel threads share the same mm context. */ + mmgrab(&init_mm); + current->active_mm = &init_mm; + /* update csr:ptbr */ + update_ptbr_sys(virt_to_phys(init_mm.pgd)); + + /* inform the notifiers about the new cpu */ + notify_cpu_starting(cpuid); + + per_cpu(cpu_state, cpuid) = CPU_ONLINE; + per_cpu(hard_node_id, cpuid) = rcid_to_domain_id(cpu_to_rcid(cpuid)); + + /* Must have completely accurate bogos. */ + local_irq_enable(); + + /* Cpu0 init preempt_count at start_kernel, other smp cpus do here. */ + preempt_disable(); + + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); +} + + +/* + * Set ready for secondary cpu. + */ +static inline void set_secondary_ready(int cpuid) +{ + smp_rcb->ready = cpuid; +} + +/* + * Convince the hmcode to have a secondary cpu begin execution. + */ +static int secondary_cpu_start(int cpuid, struct task_struct *idle) +{ + unsigned long timeout; + /* + * Precalculate the target ksp. + */ + idle_task_pointer[cpuid] = idle; + + set_cpu_online(cpuid, false); + wmb(); + + set_secondary_ready(cpuid); + + /* Wait 10 seconds for secondary cpu. */ + timeout = jiffies + 10*HZ; + while (time_before(jiffies, timeout)) { + if (cpu_online(cpuid)) + goto started; + udelay(10); + barrier(); + } + pr_err("SMP: Processor %d failed to start.\n", cpuid); + return -1; + +started: + store_cpu_topology(cpuid); + numa_add_cpu(cpuid); + return 0; +} + +/* + * Bring one cpu online. + */ +static int smp_boot_one_cpu(int cpuid, struct task_struct *idle) +{ + per_cpu(cpu_state, cpuid) = CPU_UP_PREPARE; + + return secondary_cpu_start(cpuid, idle); +} + +static void __init process_nr_cpu_ids(void) +{ + int i; + + for (i = nr_cpu_ids; i < NR_CPUS; i++) { + set_cpu_possible(i, false); + set_cpu_present(i, false); + } + + nr_cpu_ids = num_possible_cpus(); +} + +void __init smp_rcb_init(void) +{ + smp_rcb = INIT_SMP_RCB; + memset(smp_rcb, 0, sizeof(struct smp_rcb_struct)); + /* Setup SMP_RCB fields that uses to activate secondary CPU */ + smp_rcb->restart_entry = __smp_callin; + smp_rcb->init_done = 0xDEADBEEFUL; + mb(); +} + +/* + * Called from setup_arch. Detect an SMP system and which processors + * are present. + */ +void __init setup_smp(void) +{ + int i = 0, num = 0; + + init_cpu_possible(cpu_none_mask); + + /* For unified kernel, NR_CPUS is the maximum possible value */ + for (; i < NR_CPUS; i++) { + if (cpu_to_rcid(i) != -1) { + set_cpu_possible(num, true); + store_cpu_data(num); + if (!cpumask_test_cpu(i, &cpu_offline)) + set_cpu_present(num, true); + num++; + } + } + + process_nr_cpu_ids(); + + pr_info("Detected %u possible CPU(s), %u CPU(s) are present\n", + nr_cpu_ids, num_present_cpus()); + + smp_rcb_init(); +} +/* + * Called by smp_init prepare the secondaries + */ +void __init smp_prepare_cpus(unsigned int max_cpus) +{ + unsigned int cpu; + /* Take care of some initial bookkeeping. */ + memset(ipi_data, 0, sizeof(ipi_data)); + + init_cpu_topology(); + store_cpu_topology(smp_processor_id()); + numa_add_cpu(smp_processor_id()); + + for_each_possible_cpu(cpu) { + numa_store_cpu_info(cpu); + } + + /* Nothing to do on a UP box, or when told not to. */ + if (nr_cpu_ids == 1 || max_cpus == 0) { + init_cpu_possible(cpumask_of(0)); + init_cpu_present(cpumask_of(0)); + pr_info("SMP mode deactivated.\n"); + return; + } + + pr_info("SMP starting up secondaries.\n"); +} + +void smp_prepare_boot_cpu(void) +{ + int me = smp_processor_id(); + + per_cpu(cpu_state, me) = CPU_ONLINE; +} + +int vt_cpu_up(unsigned int cpu, struct task_struct *tidle) +{ + pr_info("%s: cpu = %d\n", __func__, cpu); + + wmb(); + smp_rcb->ready = 0; + if (smp_booted) { + /* irq must be disabled before reset vCPU */ + reset_cpu(cpu); + } + smp_boot_one_cpu(cpu, tidle); + + return cpu_online(cpu) ? 0 : -EIO; +} + +#ifdef CONFIG_SUBARCH_C3B +DECLARE_STATIC_KEY_FALSE(use_tc_as_sched_clock); +#endif + +int __cpu_up(unsigned int cpu, struct task_struct *tidle) +{ + if (is_in_guest()) + return vt_cpu_up(cpu, tidle); + + wmb(); + smp_rcb->ready = 0; + + /* send wake up signal */ + send_wakeup_interrupt(cpu); + /* send reset signal */ + if (smp_booted) { + if (is_in_host()) { + reset_cpu(cpu); + } else { + while (1) + cpu_relax(); + } + } + smp_boot_one_cpu(cpu, tidle); + +#ifdef CONFIG_SUBARCH_C3B + if (static_branch_likely(&use_tc_as_sched_clock)) { + if (smp_booted) { + tc_sync_clear(); + smp_call_function_single(cpu, tc_sync_ready, NULL, 0); + tc_sync_set(); + } + } +#endif + + return cpu_online(cpu) ? 0 : -EIO; +} + +void __init smp_cpus_done(unsigned int max_cpus) +{ + smp_booted = 1; + pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); +} + +int setup_profiling_timer(unsigned int multiplier) +{ + return -EINVAL; +} + + +static void send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation) +{ + int i; + + mb(); + for_each_cpu(i, to_whom) + set_bit(operation, &ipi_data[i].bits); + + mb(); + for_each_cpu(i, to_whom) + send_ipi(i, II_II0); +} + +static void ipi_cpu_stop(int cpu) +{ + local_irq_disable(); + set_cpu_online(cpu, false); + while (1) + wait_for_interrupt(); +} + +void handle_ipi(struct pt_regs *regs) +{ + int cpu = smp_processor_id(); + unsigned long *pending_ipis = &ipi_data[cpu].bits; + unsigned long ops; + + mb(); /* Order interrupt and bit testing. */ + while ((ops = xchg(pending_ipis, 0)) != 0) { + mb(); /* Order bit clearing and data access. */ + do { + unsigned long which; + + which = ops & -ops; + ops &= ~which; + which = __ffs(which); + + switch (which) { + case IPI_RESCHEDULE: + scheduler_ipi(); + break; + + case IPI_CALL_FUNC: + irq_enter(); + generic_smp_call_function_interrupt(); + irq_exit(); + break; + + case IPI_CPU_STOP: + ipi_cpu_stop(cpu); + break; + + default: + pr_crit("Unknown IPI on CPU %d: %lu\n", cpu, which); + break; + } + } while (ops); + + mb(); /* Order data access and bit testing. */ + } + + cpu_data[cpu].ipi_count++; +} + +void arch_smp_send_reschedule(int cpu) +{ + send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); +} +EXPORT_SYMBOL(arch_smp_send_reschedule); + +void smp_send_stop(void) +{ + unsigned long timeout; + + if (num_online_cpus() > 1) { + cpumask_t mask; + + cpumask_copy(&mask, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), &mask); + + if (system_state <= SYSTEM_RUNNING) + pr_crit("SMP: stopping secondary CPUs\n"); + send_ipi_message(&mask, IPI_CPU_STOP); + } + + /* Wait up to one second for other CPUs to stop */ + timeout = USEC_PER_SEC; + while (num_online_cpus() > 1 && timeout--) + udelay(1); + + if (num_online_cpus() > 1) + pr_warn("SMP: failed to stop secondary CPUs %*pbl\n", + cpumask_pr_args(cpu_online_mask)); +} + +void arch_send_call_function_ipi_mask(const struct cpumask *mask) +{ + send_ipi_message(mask, IPI_CALL_FUNC); +} + +void arch_send_call_function_single_ipi(int cpu) +{ + send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); +} + +static void ipi_flush_tlb_all(void *ignored) +{ + local_flush_tlb_all(); +} + +void flush_tlb_all(void) +{ + /* Although we don't have any data to pass, we do want to + * synchronize with the other processors. + */ + on_each_cpu(ipi_flush_tlb_all, NULL, 1); +} + +static void ipi_flush_tlb_mm(void *x) +{ + local_flush_tlb_mm((struct mm_struct *)x); +} + +void flush_tlb_mm(struct mm_struct *mm) +{ + + /* happens as a result of exit_mmap() + * Shall we clear mm->context.asid[] here? + */ + if (atomic_read(&mm->mm_users) == 0) + return; + + preempt_disable(); + + if (atomic_read(&mm->mm_users) != 1 || mm != current->mm) { + on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1); + } else { + int cpu, this_cpu = smp_processor_id(); + + for_each_online_cpu(cpu) { + if (cpu != this_cpu && mm->context.asid[cpu]) + mm->context.asid[cpu] = 0; + } + local_flush_tlb_mm(mm); + } + + preempt_enable(); +} +EXPORT_SYMBOL(flush_tlb_mm); + +struct flush_tlb_info { + struct vm_area_struct *vma; + unsigned long addr; +#define start addr + unsigned long end; +}; + +static void ipi_flush_tlb_page(void *x) +{ + struct flush_tlb_info *info = x; + + local_flush_tlb_page(info->vma, info->addr); +} + +void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) +{ + struct mm_struct *mm = vma->vm_mm; + + preempt_disable(); + + if (atomic_read(&mm->mm_users) != 1 || mm != current->mm) { + struct flush_tlb_info info = { + .vma = vma, + .addr = addr, + }; + on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_page, &info, 1); + } else { + int cpu, this_cpu = smp_processor_id(); + + for_each_online_cpu(cpu) { + if (cpu != this_cpu && mm->context.asid[cpu]) + mm->context.asid[cpu] = 0; + } + local_flush_tlb_page(vma, addr); + } + + preempt_enable(); +} +EXPORT_SYMBOL(flush_tlb_page); + +/* It always flush the whole user tlb by now. To be optimized. */ +void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) +{ + flush_tlb_mm(vma->vm_mm); +} +EXPORT_SYMBOL(flush_tlb_range); + +static void ipi_flush_tlb_kernel_range(void *x) +{ + struct flush_tlb_info *info = x; + + local_flush_tlb_kernel_range(info->start, info->end); +} + +void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + struct flush_tlb_info info = { + .start = start, + .end = end, + }; + + on_each_cpu(ipi_flush_tlb_kernel_range, &info, 1); +} +EXPORT_SYMBOL(flush_tlb_kernel_range); + +#ifdef CONFIG_HOTPLUG_CPU +int __cpu_disable(void) +{ + int cpu = smp_processor_id(); + + set_cpu_online(cpu, false); + remove_cpu_topology(cpu); + numa_remove_cpu(cpu); + clear_tasks_mm_cpumask(cpu); + return 0; +} + +void __cpu_die(unsigned int cpu) +{ + /* We don't do anything here: idle task is faking death itself. */ + unsigned int i; + + for (i = 0; i < 10; i++) { + /* They ack this in play_dead by setting CPU_DEAD */ + if (per_cpu(cpu_state, cpu) == CPU_DEAD) { + if (system_state == SYSTEM_RUNNING) + pr_info("CPU %u is now offline\n", cpu); + smp_rcb->ready = 0; + return; + } + msleep(100); + } + pr_err("CPU %u didn't die...\n", cpu); +} + +void arch_cpu_idle_dead(void) +{ + idle_task_exit(); + mb(); + __this_cpu_write(cpu_state, CPU_DEAD); + fixup_irqs(); + local_irq_disable(); + + if (is_in_guest()) { + hcall(HCALL_SET_CLOCKEVENT, 0, 0, 0); + hcall(HCALL_STOP, 0, 0, 0); + } else { + wrtimer(0); + } + +#ifdef CONFIG_SUSPEND + sleepen(); + send_sleep_interrupt(smp_processor_id()); + while (1) + asm("nop"); +#else + asm volatile("memb"); + asm volatile("halt"); +#endif +} +#endif -- Gitee From 3f48e3edc9d5c1b57b14f8294f4b1c2898ee2327 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:28 +0800 Subject: [PATCH 0297/2138] anolis: sw64: add NUMA support ANBZ: #4688 Add Non Uniform Memory Access (NUMA) support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/numa.h | 36 +++ arch/sw_64/mm/numa.c | 466 ++++++++++++++++++++++++++++++++++ 2 files changed, 502 insertions(+) create mode 100644 arch/sw_64/include/asm/numa.h create mode 100644 arch/sw_64/mm/numa.c diff --git a/arch/sw_64/include/asm/numa.h b/arch/sw_64/include/asm/numa.h new file mode 100644 index 000000000000..a2e3171caff1 --- /dev/null +++ b/arch/sw_64/include/asm/numa.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_SW64_NUMA_H +#define _ASM_SW64_NUMA_H + +#include +#include + +#ifdef CONFIG_NUMA +extern nodemask_t numa_nodes_parsed __initdata; +extern int numa_off; + +struct numa_memblk { + u64 start; + u64 end; + int nid; +}; + +#define NR_NODE_MEMBLKS (MAX_NUMNODES*2) +struct numa_meminfo { + int nr_blks; + struct numa_memblk blk[NR_NODE_MEMBLKS]; +}; +extern int __init numa_add_memblk(int nodeid, u64 start, u64 end); +extern void numa_clear_node(unsigned int cpu); +extern void __init numa_set_distance(int from, int to, int distance); +extern void __init early_map_cpu_to_node(unsigned int cpu, int nid); + +#else /* CONFIG_NUMA */ + +static inline void numa_clear_node(unsigned int cpu) { } +static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { } + +#endif /* CONFIG_NUMA */ + +#endif /* _ASM_SW64_NUMA_H */ diff --git a/arch/sw_64/mm/numa.c b/arch/sw_64/mm/numa.c new file mode 100644 index 000000000000..fcf1f97a7840 --- /dev/null +++ b/arch/sw_64/mm/numa.c @@ -0,0 +1,466 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DISCONTIGMEM NUMA sw64 support. + */ + +#include +#include +#include +#include + +#include + +int cpu_to_node_map[NR_CPUS]; +cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; +EXPORT_SYMBOL(node_to_cpumask_map); + +struct numa_node_desc_t numa_nodes_desc[MAX_NUMNODES]; +nodemask_t numa_nodes_parsed __initdata; + +static int numa_distance_cnt; +static u8 *numa_distance; +int numa_off; + +static __init int numa_setup(char *opt) +{ + if (!opt) + return -EINVAL; + if (!strncmp(opt, "off", 3)) + numa_off = 1; + return 0; +} +early_param("numa", numa_setup); + +/* + * Allocate node_to_cpumask_map based on number of available nodes + * Requires node_possible_map to be valid. + * + * Note: cpumask_of_node() is not valid until after this is done. + * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) + */ +static void __init setup_node_to_cpumask_map(void) +{ + int node; + + /* setup nr_node_ids if not done yet */ + if (nr_node_ids == MAX_NUMNODES) + setup_nr_node_ids(); + + /* allocate and clear the mapping */ + for (node = 0; node < nr_node_ids; node++) { + alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); + cpumask_clear(node_to_cpumask_map[node]); + } + + /* cpumask_of_node() will now work */ + pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); +} + +/** + * numa_add_memblk - Set node id to memblk + * @nid: NUMA node ID of the new memblk + * @start: Start address of the new memblk + * @end: End address of the new memblk + * + * RETURNS: + * 0 on success, -errno on failure. + */ +int __init numa_add_memblk(int nid, u64 start, u64 end) +{ + int ret; + + ret = memblock_set_node(start, (end - start), &memblock.memory, nid); + if (ret < 0) { + pr_err("memblock [0x%llx - 0x%llx] failed to add on node %d\n", + start, (end - 1), nid); + return ret; + } + + node_set(nid, numa_nodes_parsed); + return ret; +} + +/** + * Initialize NODE_DATA for a node on the local memory + */ +static void __init setup_node_data(int nid, unsigned long start_pfn, unsigned long end_pfn) +{ + const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES); + u64 nd_pa; + void *nd; + int tnid; + + if (start_pfn >= end_pfn) + pr_info("Initmem setup node %d []\n", nid); + + nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); + nd = __va(nd_pa); + + /* report and initialize */ + pr_info("NODE_DATA [mem %#018llx-%#018llx]\n", + nd_pa, nd_pa + nd_size - 1); + tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); + if (tnid != nid) + pr_info("NODE_DATA(%d) on node %d\n", nid, tnid); + + node_data[nid] = nd; + memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); + NODE_DATA(nid)->node_id = nid; + NODE_DATA(nid)->node_start_pfn = start_pfn; + NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; +} + +/** + * numa_free_distance + * + * Free current distance table. + */ +void __init numa_free_distance(void) +{ + size_t size; + + if (!numa_distance) + return; + + size = numa_distance_cnt * numa_distance_cnt * + sizeof(numa_distance[0]); + + memblock_free(numa_distance, size); + numa_distance_cnt = 0; + numa_distance = NULL; +} + +/** + * + * Create a new NUMA distance table. + * + */ +static int __init numa_alloc_distance(void) +{ + size_t size; + phys_addr_t phys; + int i, j; + + size = nr_node_ids * nr_node_ids * sizeof(numa_distance[0]); + phys = memblock_phys_alloc(size, PAGE_SIZE); + if (WARN_ON(!phys)) + return -ENOMEM; + + numa_distance = __va(phys); + numa_distance_cnt = nr_node_ids; + + /* fill with the default distances */ + for (i = 0; i < numa_distance_cnt; i++) + for (j = 0; j < numa_distance_cnt; j++) { + numa_distance[i * numa_distance_cnt + j] = i == j ? + LOCAL_DISTANCE : REMOTE_DISTANCE; + } + + pr_info("Initialized distance table, cnt=%d\n", numa_distance_cnt); + + return 0; +} + +/** + * numa_set_distance - Set inter node NUMA distance from node to node. + * @from: the 'from' node to set distance + * @to: the 'to' node to set distance + * @distance: NUMA distance + * + * Set the distance from node @from to @to to @distance. + * If distance table doesn't exist, a warning is printed. + * + * If @from or @to is higher than the highest known node or lower than zero + * or @distance doesn't make sense, the call is ignored. + * + */ +void __init numa_set_distance(int from, int to, int distance) +{ + if (!numa_distance) { + pr_warn_once("Warning: distance table not allocated yet\n"); + return; + } + + if (from >= numa_distance_cnt || to >= numa_distance_cnt || + from < 0 || to < 0) { + pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n", + from, to, distance); + return; + } + + if ((u8)distance != distance || + (from == to && distance != LOCAL_DISTANCE)) { + pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n", + from, to, distance); + return; + } + + numa_distance[from * numa_distance_cnt + to] = distance; +} + +/** + * Return NUMA distance @from to @to + */ +int __node_distance(int from, int to) +{ + if (from >= numa_distance_cnt || to >= numa_distance_cnt) + return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE; + return numa_distance[from * numa_distance_cnt + to]; +} +EXPORT_SYMBOL(__node_distance); + +static int __init numa_register_nodes(void) +{ + int nid; + struct memblock_region *mblk; + + /* Check that valid nid is set to memblks */ + for_each_mem_region(mblk) { + pr_info("memblk node %d [mem %#018llx-%#018llx]\n", + mblk->nid, mblk->base, + mblk->base + mblk->size - 1); + if (mblk->nid == NUMA_NO_NODE || mblk->nid >= MAX_NUMNODES) { + pr_warn("Warning: invalid memblk node %d [mem %#018llx-%#018llx]\n", + mblk->nid, mblk->base, + mblk->base + mblk->size - 1); + return -EINVAL; + } + } + + /* Finally register nodes */ + for_each_node_mask(nid, numa_nodes_parsed) { + unsigned long start_pfn, end_pfn; + + get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); + setup_node_data(nid, start_pfn, end_pfn); + node_set_online(nid); + } + + /* Setup online nodes to actual nodes */ + node_possible_map = numa_nodes_parsed; + + return 0; +} + +static int __init numa_init(int (*init_func)(void)) +{ + int ret; + + nodes_clear(numa_nodes_parsed); + nodes_clear(node_possible_map); + nodes_clear(node_online_map); + numa_free_distance(); + + ret = numa_alloc_distance(); + if (ret < 0) + return ret; + + ret = init_func(); + if (ret < 0) + return ret; + + if (nodes_empty(numa_nodes_parsed)) { + pr_info("No NUMA configuration found\n"); + return -EINVAL; + } + + ret = numa_register_nodes(); + if (ret < 0) + return ret; + + setup_node_to_cpumask_map(); + + return 0; +} + +static void __init get_numa_info_socket(void) +{ + int i; + + phys_addr_t base = 0; + + for (i = 0; i < MAX_NUMSOCKETS; i++) { + if (socket_desc[i].is_online) { + numa_nodes_desc[i].base = base; + numa_nodes_desc[i].size = socket_desc[i].socket_mem; + base += numa_nodes_desc[i].size; + } + } +} + +static int __init manual_numa_init(void) +{ + int ret, nid; + struct memblock_region *mblk; + phys_addr_t node_base, node_size, node_end; + + if (numa_off) { + pr_info("NUMA disabled\n"); /* Forced off on command line. */ + pr_info("Faking one node at [mem %#018llx-%#018llx]\n", + memblock_start_of_DRAM(), memblock_end_of_DRAM() - 1); + for_each_mem_region(mblk) { + ret = numa_add_memblk(0, mblk->base, mblk->base + mblk->size); + if (!ret) + continue; + + pr_err("NUMA init failed\n"); + return ret; + } + } else { + get_numa_info_socket(); + + for (nid = 0; nid < MAX_NUMNODES; nid++) { + node_base = numa_nodes_desc[nid].base; + node_size = numa_nodes_desc[nid].size; + node_end = node_base + node_size; + ret = 0; + + if (!node_end) + continue; + + for_each_mem_region(mblk) { + if (mblk->base >= node_base && mblk->base < node_end) { + if (mblk->base + mblk->size < node_end) + ret = numa_add_memblk(nid, mblk->base, mblk->base + mblk->size); + else + ret = numa_add_memblk(nid, mblk->base, node_end); + } + } + + if (!node_size) { + memblock_add_node(node_base, node_size, nid, MEMBLOCK_NONE); + node_set(nid, numa_nodes_parsed); + pr_info("Setup empty node %d from %#llx\n", nid, node_base); + } + + if (!ret) + continue; + + pr_err("NUMA init failed for node %d, [mem %#018llx-%#018llx]", + nid, node_base, node_end - 1); + } + } + + return 0; +} + +void __init sw64_numa_init(void) +{ + if (!numa_off) { + if (!acpi_disabled && !numa_init(acpi_numa_init)) + return; + if (acpi_disabled && !numa_init(of_numa_init)) + return; + } + + numa_init(manual_numa_init); +} + +void cpu_set_node(void) +{ + int i; + + if (numa_off) { + for (i = 0; i < nr_cpu_ids; i++) + cpu_to_node_map[i] = 0; + } else { + int rr, default_node, cid; + + rr = first_node(node_online_map); + for (i = 0; i < nr_cpu_ids; i++) { + cid = cpu_to_rcid(i); + default_node = rcid_to_domain_id(cid); + if (node_online(default_node)) { + cpu_to_node_map[i] = default_node; + } else { + cpu_to_node_map[i] = rr; + rr = next_node(rr, node_online_map); + if (rr == MAX_NUMNODES) + rr = first_node(node_online_map); + } + } + } + /* + * Setup numa_node for cpu 0 before per_cpu area for booting. + * Actual setup of numa_node will be done in native_smp_prepare_cpus(). + */ + set_cpu_numa_node(0, cpu_to_node_map[0]); +} + +void numa_store_cpu_info(unsigned int cpu) +{ + set_cpu_numa_node(cpu, cpu_to_node_map[cpu]); +} + +void __init early_map_cpu_to_node(unsigned int cpu, int nid) +{ + /* fallback to node 0 */ + if (nid < 0 || nid >= MAX_NUMNODES || numa_off) + nid = 0; + + cpu_to_node_map[cpu] = nid; + + /* + * We should set the numa node of cpu0 as soon as possible, because it + * has already been set up online before. cpu_to_node(0) will soon be + * called. + */ + if (!cpu) + set_cpu_numa_node(cpu, nid); +} + +#ifdef CONFIG_DEBUG_PER_CPU_MAPS +/* + * Returns a pointer to the bitmask of CPUs on Node 'node'. + */ +const struct cpumask *cpumask_of_node(int node) +{ + + if (node == NUMA_NO_NODE) { + pr_warn("%s: NUMA_NO_NODE\n", __func__); + return cpu_all_mask; + } + + if (WARN_ON(node < 0 || node >= nr_node_ids)) { + pr_warn("%s: invalid node %d\n", __func__, node); + return cpu_none_mask; + } + + if (WARN_ON(node_to_cpumask_map[node] == NULL)) { + pr_warn("%s: uninitialized node %d\n", __func__, node); + return cpu_online_mask; + } + + return node_to_cpumask_map[node]; +} +EXPORT_SYMBOL(cpumask_of_node); +#endif + +static void numa_update_cpu(unsigned int cpu, bool remove) +{ + int nid = cpu_to_node(cpu); + + if (nid == NUMA_NO_NODE) + return; + + if (remove) + cpumask_clear_cpu(cpu, node_to_cpumask_map[nid]); + else + cpumask_set_cpu(cpu, node_to_cpumask_map[nid]); +} + +void numa_add_cpu(unsigned int cpu) +{ + numa_update_cpu(cpu, false); +} + +void numa_remove_cpu(unsigned int cpu) +{ + numa_update_cpu(cpu, true); +} + +void numa_clear_node(unsigned int cpu) +{ + numa_remove_cpu(cpu); + set_cpu_numa_node(cpu, NUMA_NO_NODE); +} -- Gitee From d5a7addfb791d322c54cf6f1e5900e829f6c773d Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:05 +0800 Subject: [PATCH 0298/2138] anolis: sw64: add default configs ANBZ: #4688 Add default config files for SW64 based xuelang and junzhang platforms. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/configs/junzhang_defconfig | 667 +++++++++++++++++++++ arch/sw_64/configs/kata_guest_defconfig | 633 ++++++++++++++++++++ arch/sw_64/configs/kata_xuelang_defconfig | 616 ++++++++++++++++++++ arch/sw_64/configs/xuelang_defconfig | 668 ++++++++++++++++++++++ 4 files changed, 2584 insertions(+) create mode 100644 arch/sw_64/configs/junzhang_defconfig create mode 100644 arch/sw_64/configs/kata_guest_defconfig create mode 100644 arch/sw_64/configs/kata_xuelang_defconfig create mode 100644 arch/sw_64/configs/xuelang_defconfig diff --git a/arch/sw_64/configs/junzhang_defconfig b/arch/sw_64/configs/junzhang_defconfig new file mode 100644 index 000000000000..4f25770ca193 --- /dev/null +++ b/arch/sw_64/configs/junzhang_defconfig @@ -0,0 +1,667 @@ +CONFIG_LOCALVERSION="-junzhang" +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +# CONFIG_CROSS_MEMORY_ATTACH is not set +CONFIG_USELIB=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_IKHEADERS=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_NAMESPACES=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +CONFIG_KALLSYMS_ALL=y +CONFIG_PERF_EVENTS=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +CONFIG_SUBARCH_C4=y +CONFIG_SMP=y +CONFIG_SCHED_SMT=y +CONFIG_NR_CPUS=64 +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_NUMA=y +CONFIG_HZ=100 +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_USE_OF=y +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMI_SYSFS=m +CONFIG_ACPI_TAD=y +# CONFIG_CPU_IDLE is not set +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST_CROSS_ENDIAN_LEGACY=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +# CONFIG_COMPAT_BRK is not set +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_CMA_AREAS=7 +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=y +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_XFRM_USER=m +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_IP_MROUTE=y +CONFIG_NET_IPVTI=m +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_UDP_DIAG=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=m +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_NETFILTER=y +CONFIG_BRIDGE_NETFILTER=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XT_SET=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_LOG_ARP=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_BRIDGE=m +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_DEFAULT=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_OPENVSWITCH=m +CONFIG_VSOCKETS=m +CONFIG_NETLINK_DIAG=m +CONFIG_CGROUP_NET_PRIO=y +# CONFIG_WIRELESS is not set +CONFIG_PCI=y +CONFIG_PCIEPORTBUS=y +CONFIG_PCIEAER=y +# CONFIG_PCIEASPM is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_IOV=y +CONFIG_UEVENT_HELPER=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_ROM=y +CONFIG_MTD_ABSENT=y +CONFIG_MTD_COMPLEX_MAPPINGS=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_PHYSMAP_OF=y +CONFIG_MTD_PLATRAM=y +CONFIG_MTD_SPI_NOR=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=5000000 +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_NVME=y +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=y +CONFIG_NVME_TARGET=y +CONFIG_NVME_TARGET_LOOP=y +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=y +CONFIG_NVME_TARGET_FCLOOP=y +CONFIG_RAID_ATTRS=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=y +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SRP_ATTRS=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=y +CONFIG_SCSI_CXGB3_ISCSI=m +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +# CONFIG_ATA_SFF is not set +CONFIG_MD=y +CONFIG_MD_LINEAR=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +CONFIG_BCACHE_DEBUG=y +CONFIG_BCACHE_CLOSURES_DEBUG=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING=y +CONFIG_DM_DEBUG_BLOCK_STACK_TRACING=y +CONFIG_DM_UNSTRIPED=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +CONFIG_NET_FC=y +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_VIRTIO_NET=y +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_CAVIUM_PTP=y +# CONFIG_NET_VENDOR_CIRRUS is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_E100=y +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_IGB=y +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBEVF=m +CONFIG_I40E=y +CONFIG_I40EVF=y +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_MLX4_EN=y +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLXSW_CORE=y +CONFIG_MLXSW_PCI=y +CONFIG_MLXSW_I2C=y +CONFIG_MLXSW_MINIMAL=y +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_WLAN is not set +CONFIG_INPUT_FF_MEMLESS=y +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_EVDEV=y +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +# CONFIG_SERIAL_8250_PCI is not set +CONFIG_SERIAL_8250_SUNWAY=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_VIRTIO_CONSOLE=y +# CONFIG_HW_RANDOM is not set +# CONFIG_I2C_COMPAT is not set +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=y +CONFIG_SPI=y +CONFIG_SPI_SPIDEV=y +CONFIG_SENSORS_PVT=y +CONFIG_SENSORS_LM75=y +CONFIG_SSB=y +CONFIG_DRM=y +CONFIG_DRM_RADEON=y +CONFIG_DRM_AST=y +CONFIG_DRM_VIRTIO_GPU=y +CONFIG_LCD_CLASS_DEVICE=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +CONFIG_INFINIBAND_MTHCA=m +# CONFIG_INFINIBAND_MTHCA_DEBUG is not set +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_RTC_CLASS=y +# CONFIG_RTC_NVMEM is not set +# CONFIG_RTC_INTF_PROC is not set +CONFIG_RTC_DRV_PCF8523=y +CONFIG_UIO=y +CONFIG_UIO_PCI_GENERIC=m +CONFIG_VIRTIO_PCI=y +# CONFIG_VIRTIO_PCI_LEGACY is not set +CONFIG_VIRTIO_MMIO=y +CONFIG_STAGING=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_DEBUG=y +CONFIG_XFS_FS=y +CONFIG_GFS2_FS=y +CONFIG_FANOTIFY=y +CONFIG_QUOTA=y +CONFIG_FUSE_FS=y +CONFIG_FSCACHE=y +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_UTF8=y +CONFIG_NTFS_FS=y +CONFIG_NTFS_RW=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_CONFIGFS_FS=y +# CONFIG_MISC_FILESYSTEMS is not set +CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFS_SWAP=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NFS_V4_1_MIGRATION=y +CONFIG_ROOT_NFS=y +CONFIG_NFS_FSCACHE=y +CONFIG_NFS_USE_LEGACY_DNS=y +CONFIG_NFSD=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_CODEPAGE_950=y +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_PATH=y +CONFIG_CRYPTO_AUTHENC=y +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=y +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_HW is not set +CONFIG_CONSOLE_LOGLEVEL_QUIET=7 +# CONFIG_FRAME_POINTER is not set +CONFIG_SCHEDSTATS=y +# CONFIG_RCU_TRACE is not set diff --git a/arch/sw_64/configs/kata_guest_defconfig b/arch/sw_64/configs/kata_guest_defconfig new file mode 100644 index 000000000000..8122155c1276 --- /dev/null +++ b/arch/sw_64/configs/kata_guest_defconfig @@ -0,0 +1,633 @@ +CONFIG_LOCALVERSION="-xuelang" +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +# CONFIG_CROSS_MEMORY_ATTACH is not set +CONFIG_USELIB=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_NAMESPACES=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +CONFIG_KALLSYMS_ALL=y +CONFIG_BPF_SYSCALL=y +CONFIG_PERF_EVENTS=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +# CONFIG_COMPAT_BRK is not set +CONFIG_CPUFREQ_DEBUGFS=y +# CONFIG_LOCK_MEMB is not set +CONFIG_SMP=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_NUMA=y +CONFIG_HZ=100 +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_BINFMT_MISC=y +CONFIG_USE_OF=y +CONFIG_SW64_BUILTIN_DTB=y +CONFIG_SW64_BUILTIN_DTB_NAME="chip_vt" +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMI_SYSFS=m +CONFIG_GOOGLE_FIRMWARE=y +CONFIG_SW64_SUSPEND_DEEPSLEEP_NONBOOT_CORE=y +CONFIG_SW64_SUSPEND_DEEPSLEEP_BOOTCORE=y +# CONFIG_CPU_IDLE is not set +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST_CROSS_ENDIAN_LEGACY=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=y +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_IP_MROUTE=y +CONFIG_NET_IPVTI=m +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_UDP_DIAG=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_NETDEV=m +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_TABLES=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XT_SET=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_FLOW_TABLE_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NF_TABLES_BRIDGE=y +CONFIG_NF_LOG_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_BRIDGE=y +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_DEFAULT=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_OPENVSWITCH=m +CONFIG_VSOCKETS=y +CONFIG_VSOCKETS_DIAG=m +CONFIG_VIRTIO_VSOCKETS=y +CONFIG_NETLINK_DIAG=m +CONFIG_CGROUP_NET_PRIO=y +CONFIG_BPF_JIT=y +# CONFIG_WIRELESS is not set +CONFIG_NET_9P=y +CONFIG_NET_9P_VIRTIO=y +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_ROM=y +CONFIG_MTD_ABSENT=y +CONFIG_MTD_COMPLEX_MAPPINGS=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_PHYSMAP_OF=y +CONFIG_MTD_PLATRAM=y +CONFIG_MTD_SPI_NOR=y +CONFIG_OF_OVERLAY=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=5000000 +CONFIG_VIRTIO_BLK=y +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=y +CONFIG_NVME_TARGET=y +CONFIG_NVME_TARGET_LOOP=y +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=y +CONFIG_NVME_TARGET_FCLOOP=y +CONFIG_RAID_ATTRS=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=y +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SRP_ATTRS=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=y +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +CONFIG_ATA=y +# CONFIG_ATA_SFF is not set +CONFIG_MD=y +CONFIG_MD_LINEAR=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +CONFIG_BCACHE_DEBUG=y +CONFIG_BCACHE_CLOSURES_DEBUG=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING=y +CONFIG_DM_DEBUG_BLOCK_STACK_TRACING=y +CONFIG_DM_UNSTRIPED=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +CONFIG_INPUT_FF_MEMLESS=y +CONFIG_INPUT_POLLDEV=y +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_EVDEV=y +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_SUNWAY=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_VIRTIO_CONSOLE=y +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_VIRTIO=y +# CONFIG_DEVPORT is not set +# CONFIG_I2C_COMPAT is not set +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=y +CONFIG_SPI=y +CONFIG_SPI_SPIDEV=y +CONFIG_SENSORS_PVT=y +CONFIG_SENSORS_LM75=y +CONFIG_SSB=y +CONFIG_SUNWAY_SUPERIO_AST2400=y +CONFIG_DRM=y +CONFIG_DRM_VIRTIO_GPU=y +CONFIG_FIRMWARE_EDID=y +CONFIG_LCD_CLASS_DEVICE=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_LOGO=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_RTC_CLASS=y +# CONFIG_RTC_NVMEM is not set +# CONFIG_RTC_INTF_PROC is not set +CONFIG_RTC_DRV_PCF8523=y +CONFIG_UIO=y +CONFIG_VIRTIO_MMIO=y +CONFIG_STAGING=y +CONFIG_SW64_LPC_INTC=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_DEBUG=y +CONFIG_XFS_FS=y +CONFIG_GFS2_FS=y +CONFIG_FANOTIFY=y +CONFIG_QUOTA=y +CONFIG_AUTOFS4_FS=y +CONFIG_FUSE_FS=y +CONFIG_OVERLAY_FS=y +CONFIG_OVERLAY_FS_INDEX=y +CONFIG_OVERLAY_FS_XINO_AUTO=y +CONFIG_OVERLAY_FS_METACOPY=y +CONFIG_FSCACHE=y +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_UTF8=y +CONFIG_NTFS_FS=y +CONFIG_NTFS_RW=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_CONFIGFS_FS=y +CONFIG_SQUASHFS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFS_SWAP=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NFS_V4_1_MIGRATION=y +CONFIG_ROOT_NFS=y +CONFIG_NFS_FSCACHE=y +CONFIG_NFS_USE_LEGACY_DNS=y +CONFIG_NFSD=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_9P_FS=y +CONFIG_9P_FSCACHE=y +CONFIG_9P_FS_POSIX_ACL=y +CONFIG_9P_FS_SECURITY=y +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_CODEPAGE_950=y +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_PATH=y +CONFIG_CRYPTO_AUTHENC=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_ECHAINIV=y +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_HW is not set +CONFIG_CONSOLE_LOGLEVEL_QUIET=7 +# CONFIG_ENABLE_MUST_CHECK is not set +# CONFIG_FRAME_POINTER is not set +CONFIG_SCHEDSTATS=y +# CONFIG_RCU_TRACE is not set diff --git a/arch/sw_64/configs/kata_xuelang_defconfig b/arch/sw_64/configs/kata_xuelang_defconfig new file mode 100644 index 000000000000..f553f0e71dbf --- /dev/null +++ b/arch/sw_64/configs/kata_xuelang_defconfig @@ -0,0 +1,616 @@ +CONFIG_LOCALVERSION="-xuelang" +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +# CONFIG_CROSS_MEMORY_ATTACH is not set +CONFIG_USELIB=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_NAMESPACES=y +CONFIG_USER_NS=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +CONFIG_KALLSYMS_ALL=y +CONFIG_PERF_EVENTS=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +# CONFIG_COMPAT_BRK is not set +CONFIG_CPUFREQ_DEBUGFS=y +# CONFIG_LOCK_MEMB is not set +CONFIG_SMP=y +CONFIG_HOTPLUG_CPU=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_NUMA=y +CONFIG_HZ=100 +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_USE_OF=y +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMI_SYSFS=m +# CONFIG_SUSPEND is not set +# CONFIG_CPU_IDLE is not set +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_VSOCK=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_BLK_DEV_THROTTLING=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=y +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_XFRM_USER=m +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_IP_MROUTE=y +CONFIG_NET_IPVTI=m +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_UDP_DIAG=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=m +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_NETDEV=m +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XT_SET=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_NFCT=y +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_FLOW_TABLE_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NF_LOG_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_DEFAULT=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_OPENVSWITCH=m +CONFIG_VSOCKETS=y +CONFIG_VSOCKETS_DIAG=m +CONFIG_NETLINK_DIAG=m +CONFIG_CGROUP_NET_PRIO=y +CONFIG_BPF_JIT=y +CONFIG_NET_DROP_MONITOR=m +# CONFIG_WIRELESS is not set +CONFIG_CAIF=m +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_ROM=y +CONFIG_MTD_ABSENT=y +CONFIG_MTD_COMPLEX_MAPPINGS=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_PHYSMAP_OF=y +CONFIG_MTD_PLATRAM=y +CONFIG_MTD_SPI_NOR=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=5000000 +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=y +CONFIG_NVME_TARGET=y +CONFIG_NVME_TARGET_LOOP=y +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=y +CONFIG_NVME_TARGET_FCLOOP=y +CONFIG_RAID_ATTRS=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=y +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SRP_ATTRS=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=y +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +CONFIG_ATA=y +# CONFIG_ATA_SFF is not set +CONFIG_MD=y +CONFIG_MD_LINEAR=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +CONFIG_BCACHE_DEBUG=y +CONFIG_BCACHE_CLOSURES_DEBUG=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING=y +CONFIG_DM_DEBUG_BLOCK_STACK_TRACING=y +CONFIG_DM_UNSTRIPED=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +CONFIG_INPUT_FF_MEMLESS=y +CONFIG_INPUT_POLLDEV=y +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_EVDEV=y +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_SUNWAY=y +# CONFIG_HW_RANDOM is not set +# CONFIG_I2C_COMPAT is not set +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=y +CONFIG_SPI=y +CONFIG_SPI_SPIDEV=y +CONFIG_SSB=y +CONFIG_DRM=y +CONFIG_FIRMWARE_EDID=y +CONFIG_LCD_CLASS_DEVICE=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_LOGO=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_RTC_CLASS=y +# CONFIG_RTC_NVMEM is not set +# CONFIG_RTC_INTF_PROC is not set +CONFIG_RTC_DRV_PCF8523=y +CONFIG_UIO=y +CONFIG_STAGING=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_DEBUG=y +CONFIG_XFS_FS=y +CONFIG_GFS2_FS=y +CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS_POSIX_ACL=y +CONFIG_FANOTIFY=y +CONFIG_QUOTA=y +CONFIG_AUTOFS4_FS=y +CONFIG_FUSE_FS=y +CONFIG_OVERLAY_FS=m +CONFIG_FSCACHE=y +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_UTF8=y +CONFIG_NTFS_FS=y +CONFIG_NTFS_RW=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_CONFIGFS_FS=y +# CONFIG_MISC_FILESYSTEMS is not set +CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFS_SWAP=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NFS_V4_1_MIGRATION=y +CONFIG_ROOT_NFS=y +CONFIG_NFS_FSCACHE=y +CONFIG_NFS_USE_LEGACY_DNS=y +CONFIG_NFSD=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_CODEPAGE_950=y +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_PATH=y +CONFIG_CRYPTO_AUTHENC=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_ECHAINIV=y +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_HW is not set +CONFIG_CONSOLE_LOGLEVEL_QUIET=7 +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_SCHEDSTATS=y +# CONFIG_RCU_TRACE is not set diff --git a/arch/sw_64/configs/xuelang_defconfig b/arch/sw_64/configs/xuelang_defconfig new file mode 100644 index 000000000000..b1c0101d0089 --- /dev/null +++ b/arch/sw_64/configs/xuelang_defconfig @@ -0,0 +1,668 @@ +CONFIG_LOCALVERSION="-xuelang" +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +# CONFIG_CROSS_MEMORY_ATTACH is not set +CONFIG_USELIB=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_IKHEADERS=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_NAMESPACES=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +CONFIG_KALLSYMS_ALL=y +CONFIG_PERF_EVENTS=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +CONFIG_SMP=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_NUMA=y +CONFIG_HZ=100 +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_USE_OF=y +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMI_SYSFS=m +CONFIG_ACPI_TAD=y +# CONFIG_CPU_IDLE is not set +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST_CROSS_ENDIAN_LEGACY=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +# CONFIG_COMPAT_BRK is not set +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_CMA_AREAS=7 +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=y +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_XFRM_USER=m +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_IP_MROUTE=y +CONFIG_NET_IPVTI=m +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_UDP_DIAG=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=m +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_NETFILTER=y +CONFIG_BRIDGE_NETFILTER=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XT_SET=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_LOG_ARP=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_BRIDGE=m +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_DEFAULT=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_OPENVSWITCH=m +CONFIG_VSOCKETS=m +CONFIG_NETLINK_DIAG=m +CONFIG_CGROUP_NET_PRIO=y +# CONFIG_WIRELESS is not set +CONFIG_PCI=y +CONFIG_PCIEPORTBUS=y +CONFIG_PCIEAER=y +# CONFIG_PCIEASPM is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_IOV=y +CONFIG_UEVENT_HELPER=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_ROM=y +CONFIG_MTD_ABSENT=y +CONFIG_MTD_COMPLEX_MAPPINGS=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_PHYSMAP_OF=y +CONFIG_MTD_PLATRAM=y +CONFIG_MTD_SPI_NOR=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=5000000 +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_NVME=y +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=y +CONFIG_NVME_TARGET=y +CONFIG_NVME_TARGET_LOOP=y +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=y +CONFIG_NVME_TARGET_FCLOOP=y +CONFIG_RAID_ATTRS=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=y +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SRP_ATTRS=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=y +CONFIG_SCSI_CXGB3_ISCSI=m +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +# CONFIG_ATA_SFF is not set +CONFIG_MD=y +CONFIG_MD_LINEAR=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +CONFIG_BCACHE_DEBUG=y +CONFIG_BCACHE_CLOSURES_DEBUG=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING=y +CONFIG_DM_DEBUG_BLOCK_STACK_TRACING=y +CONFIG_DM_UNSTRIPED=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +CONFIG_NET_FC=y +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_VIRTIO_NET=y +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_CAVIUM_PTP=y +# CONFIG_NET_VENDOR_CIRRUS is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_E100=y +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_IGB=y +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBEVF=m +CONFIG_I40E=y +CONFIG_I40EVF=y +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_MLX4_EN=y +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLXSW_CORE=y +CONFIG_MLXSW_PCI=y +CONFIG_MLXSW_I2C=y +CONFIG_MLXSW_MINIMAL=y +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_WLAN is not set +CONFIG_INPUT_FF_MEMLESS=y +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_EVDEV=y +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +# CONFIG_SERIAL_8250_PCI is not set +CONFIG_SERIAL_8250_SUNWAY=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_VIRTIO_CONSOLE=y +# CONFIG_HW_RANDOM is not set +# CONFIG_I2C_COMPAT is not set +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=y +CONFIG_SPI=y +CONFIG_SPI_CHIP3=y +CONFIG_SPI_SPIDEV=y +CONFIG_SENSORS_PVT=y +CONFIG_SENSORS_LM75=y +CONFIG_SSB=y +CONFIG_DRM=y +CONFIG_DRM_RADEON=y +CONFIG_DRM_AST=y +CONFIG_DRM_VIRTIO_GPU=y +CONFIG_LCD_CLASS_DEVICE=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +CONFIG_INFINIBAND_MTHCA=m +# CONFIG_INFINIBAND_MTHCA_DEBUG is not set +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_RTC_CLASS=y +# CONFIG_RTC_NVMEM is not set +# CONFIG_RTC_INTF_PROC is not set +CONFIG_RTC_DRV_PCF8523=y +CONFIG_UIO=y +CONFIG_UIO_PCI_GENERIC=m +CONFIG_VIRTIO_PCI=y +# CONFIG_VIRTIO_PCI_LEGACY is not set +CONFIG_VIRTIO_MMIO=y +CONFIG_STAGING=y +CONFIG_IOMMU_DEFAULT_PASSTHROUGH=y +CONFIG_SUNWAY_IOMMU=y +CONFIG_SW64_LPC_INTC=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_DEBUG=y +CONFIG_XFS_FS=y +CONFIG_GFS2_FS=y +CONFIG_FANOTIFY=y +CONFIG_QUOTA=y +CONFIG_FUSE_FS=y +CONFIG_FSCACHE=y +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_UTF8=y +CONFIG_NTFS_FS=y +CONFIG_NTFS_RW=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_CONFIGFS_FS=y +# CONFIG_MISC_FILESYSTEMS is not set +CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFS_SWAP=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NFS_V4_1_MIGRATION=y +CONFIG_ROOT_NFS=y +CONFIG_NFS_FSCACHE=y +CONFIG_NFS_USE_LEGACY_DNS=y +CONFIG_NFSD=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_CODEPAGE_950=y +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_PATH=y +CONFIG_CRYPTO_AUTHENC=y +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=y +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_HW is not set +CONFIG_CONSOLE_LOGLEVEL_QUIET=7 +# CONFIG_FRAME_POINTER is not set +CONFIG_SCHEDSTATS=y +# CONFIG_RCU_TRACE is not set -- Gitee From 5b208ce122742d466a51287d44af3f641b97599b Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:29 +0800 Subject: [PATCH 0299/2138] anolis: sw64: add PCI support ANBZ: #4688 Add basic PCI support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/pci.h | 163 ++++++++++ arch/sw_64/include/asm/pci_impl.h | 27 ++ arch/sw_64/kernel/pci-noop.c | 138 ++++++++ arch/sw_64/pci/Makefile | 8 + arch/sw_64/pci/pci-legacy.c | 508 ++++++++++++++++++++++++++++++ arch/sw_64/pci/pci-sysfs.c | 359 +++++++++++++++++++++ arch/sw_64/pci/pci.c | 436 +++++++++++++++++++++++++ 7 files changed, 1639 insertions(+) create mode 100644 arch/sw_64/include/asm/pci.h create mode 100644 arch/sw_64/include/asm/pci_impl.h create mode 100644 arch/sw_64/kernel/pci-noop.c create mode 100644 arch/sw_64/pci/Makefile create mode 100644 arch/sw_64/pci/pci-legacy.c create mode 100644 arch/sw_64/pci/pci-sysfs.c create mode 100644 arch/sw_64/pci/pci.c diff --git a/arch/sw_64/include/asm/pci.h b/arch/sw_64/include/asm/pci.h new file mode 100644 index 000000000000..21bfcef21c5f --- /dev/null +++ b/arch/sw_64/include/asm/pci.h @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PCI_H +#define _ASM_SW64_PCI_H + +#ifdef __KERNEL__ + +#include +#include +#include + +/* + * The following structure is used to manage multiple PCI busses. + */ + +struct pci_dev; +struct pci_bus; +struct resource; +struct sunway_iommu; +struct page; + +struct piu_saved { + unsigned long piuconfig0; + unsigned long piuconfig1; + unsigned long epdmabar; + unsigned long msiaddr; + unsigned long msiconfig[256]; + unsigned long iommuexcpt_ctrl; + unsigned long dtbaseaddr; + unsigned long hpintconfig; + unsigned long pmeintconfig; + unsigned long aererrintconfig; + unsigned long intaconfig; + unsigned long intbconfig; + unsigned long intcconfig; + unsigned long intdconfig; +}; + +/* A controller. Used to manage multiple PCI busses. */ +struct pci_controller { + struct pci_controller *next; + struct pci_bus *bus; + struct resource *io_space; + struct resource *mem_space; + struct resource *pre_mem_space; + struct resource *busn_space; + unsigned long sparse_mem_base; + unsigned long dense_mem_base; + unsigned long sparse_io_base; + unsigned long dense_io_base; + + /* This one's for the kernel only. It's in KSEG somewhere. */ + void __iomem *ep_config_space_base; + void __iomem *rc_config_space_base; + + unsigned long index; + unsigned long node; + DECLARE_BITMAP(piu_msiconfig, 256); + int int_irq; + int service_irq; + /* For compatibility with current (as of July 2003) pciutils + * and XFree86. Eventually will be removed. + */ + unsigned int need_domain_info; + bool iommu_enable; + struct sunway_iommu *pci_iommu; + int first_busno; + int last_busno; + int self_busno; + void *sysdata; +}; + +/* Override the logic in pci_scan_bus for skipping already-configured + * bus numbers. + */ + +#define pcibios_assign_all_busses() (pci_has_flag(PCI_REASSIGN_ALL_BUS)) + +#define PCIBIOS_MIN_IO 0 +#define PCIBIOS_MIN_MEM 0 + +extern void __init sw64_init_pci(void); +extern void __init sw64_device_interrupt(unsigned long vector); +extern void __init sw64_init_irq(void); +extern void __init sw64_init_arch(void); +extern struct pci_ops sw64_pci_ops; +extern int sw64_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); +extern struct pci_controller *hose_head; +#ifdef CONFIG_PCI_SW64 +extern void __init setup_chip_pci_ops(void); +#else +#define setup_chip_pci_ops() do { } while (0) +#endif + +extern struct pci_controller *pci_bus_to_pci_controller(const struct pci_bus *bus); +extern struct pci_controller *bus_num_to_pci_controller(unsigned long bus_num); + +extern void sw64_pci_root_bridge_prepare(struct pci_host_bridge *bridge); +extern void sw64_pci_root_bridge_scan_finish_up(struct pci_host_bridge *bridge); +extern int sw64_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); + +#ifdef CONFIG_PCI_DOMAINS +static inline int pci_proc_domain(struct pci_bus *bus) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + return hose->need_domain_info; +} +#endif + +#ifdef CONFIG_NUMA +static inline int __pcibus_to_node(const struct pci_bus *bus) +{ + struct pci_controller *hose; + + hose = pci_bus_to_pci_controller(bus); + if (!node_online(hose->node)) + return next_node_in(hose->node, node_online_map); + else + return hose->node; +} +#define pcibus_to_node(bus) __pcibus_to_node(bus) +#endif + +#endif /* __KERNEL__ */ + +/* Values for the `which' argument to sys_pciconfig_iobase. */ +#define IOBASE_HOSE 0 +#define IOBASE_SPARSE_MEM 1 +#define IOBASE_DENSE_MEM 2 +#define IOBASE_SPARSE_IO 3 +#define IOBASE_DENSE_IO 4 +#define IOBASE_ROOT_BUS 5 +#define IOBASE_FROM_HOSE 0x10000 + +extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, + size_t count); +extern int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, + size_t count); +extern int pci_mmap_legacy_page_range(struct pci_bus *bus, + struct vm_area_struct *vma, + enum pci_mmap_state mmap_state); +extern void pci_adjust_legacy_attr(struct pci_bus *bus, + enum pci_mmap_state mmap_type); +#define HAVE_PCI_LEGACY 1 + +extern int pci_create_resource_files(struct pci_dev *dev); +extern void pci_remove_resource_files(struct pci_dev *dev); +extern void __init reserve_mem_for_pci(void); +extern int chip_pcie_configure(struct pci_controller *hose); + +#define PCI_VENDOR_ID_JN 0x5656 +#define PCI_DEVICE_ID_SW64_ROOT_BRIDGE 0x3231 +#define PCI_DEVICE_ID_JN_PCIESW 0x1000 +#define PCI_DEVICE_ID_JN_PCIEUSIP 0x1200 +#define PCI_DEVICE_ID_JN_PCIE2PCI 0x1314 + +#define NR_IRQ_VECTORS NR_IRQS + +#define LAST_DEVICE_VECTOR 31 + +#define PCITODMA_OFFSET 0x0 /*0 offset*/ + +#endif /* _ASM_SW64_PCI_H */ diff --git a/arch/sw_64/include/asm/pci_impl.h b/arch/sw_64/include/asm/pci_impl.h new file mode 100644 index 000000000000..aa17a69b73f8 --- /dev/null +++ b/arch/sw_64/include/asm/pci_impl.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This file contains declarations and inline functions for interfacing + * with the PCI initialization routines. + */ +#ifndef _SW64_KERNEL_PCI_IMPL_H +#define _SW64_KERNEL_PCI_IMPL_H + +#include + +struct pci_dev; +struct pci_controller; + +/* The hose list. */ +extern struct pci_controller *hose_head, **hose_tail; + +extern void common_init_pci(void); +extern struct pci_controller *alloc_pci_controller(void); +extern struct resource *alloc_resource(void); + +extern unsigned long size_for_memory(unsigned long max); + +extern const struct dma_map_ops sw64_dma_direct_ops; + +extern struct cma *sw64_kvm_cma; +extern struct gen_pool *sw64_kvm_pool; +#endif /* _SW64_KERNEL_PCI_IMPL_H */ diff --git a/arch/sw_64/kernel/pci-noop.c b/arch/sw_64/kernel/pci-noop.c new file mode 100644 index 000000000000..abfba92fa6a9 --- /dev/null +++ b/arch/sw_64/kernel/pci-noop.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sw/kernel/pci-noop.c + * + * Stub PCI interfaces for NO PCI kernels. + */ + +#include +#include +#include +#include + +/* + * The PCI controller list. + */ + +struct pci_controller *hose_head, **hose_tail = &hose_head; + +struct pci_controller * __init +alloc_pci_controller(void) +{ + struct pci_controller *hose; + + hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES); + + *hose_tail = hose; + hose_tail = &hose->next; + + return hose; +} + +struct resource * __init +alloc_resource(void) +{ + struct resource *res; + + res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES); + + return res; +} + +asmlinkage long +sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn) +{ + return -ENODEV; +} + +asmlinkage long +sys_pciconfig_read(unsigned long bus, unsigned long dfn, + unsigned long off, unsigned long len, void *buf) +{ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + else + return -ENODEV; +} + +asmlinkage long +sys_pciconfig_write(unsigned long bus, unsigned long dfn, + unsigned long off, unsigned long len, void *buf) +{ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + else + return -ENODEV; +} + +static void *sw64_noop_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + unsigned long attrs) +{ + void *ret; + + if (!dev || *dev->dma_mask >= 0xffffffffUL) + gfp &= ~GFP_DMA; + ret = (void *)__get_free_pages(gfp, get_order(size)); + if (ret) { + memset(ret, 0, size); + *dma_handle = virt_to_phys(ret); + } + return ret; +} + +static void sw64_noop_free_coherent(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_addr, + unsigned long attrs) +{ + free_pages((unsigned long)cpu_addr, get_order(size)); +} + +static dma_addr_t sw64_noop_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + return page_to_pa(page) + offset; +} + +static int sw64_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents, + enum dma_data_direction dir, unsigned long attrs) +{ + int i; + struct scatterlist *sg; + + for_each_sg(sgl, sg, nents, i) { + void *va; + + BUG_ON(!sg_page(sg)); + va = sg_virt(sg); + sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va); + sg_dma_len(sg) = sg->length; + } + + return nents; +} + +static int sw64_noop_supported(struct device *dev, u64 mask) +{ + return mask < 0x00ffffffUL ? 0 : 1; +} + +const struct dma_map_ops sw64_noop_ops = { + .alloc = sw64_noop_alloc_coherent, + .free = sw64_noop_free_coherent, + .map_page = sw64_noop_map_page, + .map_sg = sw64_noop_map_sg, + .dma_supported = sw64_noop_supported, +}; + +const struct dma_map_ops *dma_ops = &sw64_noop_ops; +EXPORT_SYMBOL(dma_ops); + +void __init common_init_pci(void) +{ +} + +void __init sw64_init_arch(void) { } +void __init sw64_init_irq(void) { } diff --git a/arch/sw_64/pci/Makefile b/arch/sw_64/pci/Makefile new file mode 100644 index 000000000000..327efb163b12 --- /dev/null +++ b/arch/sw_64/pci/Makefile @@ -0,0 +1,8 @@ +PDX-License-Identifier: GPL-2.0 +# +# Makefile for the linux kernel. +# + +obj-y += pci.o pci-legacy.o pci-sysfs.o +obj-$(CONFIG_ACPI) += acpi.o +obj-$(CONFIG_PCI_MSI) += msi.o diff --git a/arch/sw_64/pci/pci-legacy.c b/arch/sw_64/pci/pci-legacy.c new file mode 100644 index 000000000000..2a44463db0a4 --- /dev/null +++ b/arch/sw_64/pci/pci-legacy.c @@ -0,0 +1,508 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include + +#include +#include + +unsigned long rc_linkup; + +/* + * The PCI controller list. + */ + +struct pci_controller *hose_head, **hose_tail = &hose_head; +static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus); + +static int __init +pcibios_init(void) +{ + if (acpi_disabled) + sw64_init_pci(); + return 0; +} +subsys_initcall(pcibios_init); + +void __init pcibios_claim_one_bus(struct pci_bus *b) +{ + struct pci_dev *dev; + struct pci_bus *child_bus; + + list_for_each_entry(dev, &b->devices, bus_list) { + int i; + + for (i = 0; i < PCI_NUM_RESOURCES; i++) { + struct resource *r = &dev->resource[i]; + + if (r->parent || !r->start || !r->flags) + continue; + if (r->flags & IORESOURCE_PCI_FIXED) { + if (pci_claim_resource(dev, i) == 0) + continue; + + pci_claim_bridge_resource(dev, i); + } + } + } + + list_for_each_entry(child_bus, &b->children, node) + pcibios_claim_one_bus(child_bus); +} + +static void __init +pcibios_claim_console_setup(void) +{ + struct pci_bus *b; + + list_for_each_entry(b, &pci_root_buses, node) + pcibios_claim_one_bus(b); +} + +int __weak chip_pcie_configure(struct pci_controller *hose) +{ + return 0; +} + +unsigned char last_bus = PCI0_BUS; +void __init common_init_pci(void) +{ + struct pci_controller *hose; + struct pci_host_bridge *bridge; + struct pci_bus *bus; + unsigned int init_busnr; + int need_domain_info = 0; + int ret; + unsigned long offset; + + /* Scan all of the recorded PCI controllers. */ + hose = hose_head; + for (hose = hose_head; hose; hose = hose->next) { + bridge = pci_alloc_host_bridge(0); + if (!bridge) + continue; + hose->busn_space->start = last_bus; + init_busnr = (0xff << 16) + ((last_bus + 1) << 8) + (last_bus); + write_rc_conf(hose->node, hose->index, RC_PRIMARY_BUS, init_busnr); + offset = hose->mem_space->start - PCI_32BIT_MEMIO; + if (is_in_host()) + hose->first_busno = last_bus + 1; + else + hose->first_busno = last_bus; + pci_add_resource_offset(&bridge->windows, hose->mem_space, offset); + pci_add_resource_offset(&bridge->windows, hose->io_space, hose->io_space->start); + pci_add_resource_offset(&bridge->windows, hose->pre_mem_space, 0); + pci_add_resource_offset(&bridge->windows, hose->busn_space, 0); + bridge->dev.parent = NULL; + bridge->sysdata = hose; + bridge->busnr = hose->busn_space->start; + bridge->ops = &sw64_pci_ops; + bridge->swizzle_irq = pci_common_swizzle; + bridge->map_irq = sw64_map_irq; + + ret = pci_scan_root_bus_bridge(bridge); + if (ret) { + pci_free_host_bridge(bridge); + continue; + } + + bus = hose->bus = bridge->bus; + hose->need_domain_info = need_domain_info; + + if (is_in_host()) + last_bus = chip_pcie_configure(hose); + else + while (pci_find_bus(pci_domain_nr(bus), last_bus)) + last_bus++; + + hose->last_busno = hose->busn_space->end = last_bus; + init_busnr = read_rc_conf(hose->node, hose->index, RC_PRIMARY_BUS); + init_busnr &= ~(0xff << 16); + init_busnr |= last_bus << 16; + write_rc_conf(hose->node, hose->index, RC_PRIMARY_BUS, init_busnr); + pci_bus_update_busn_res_end(bus, last_bus); + last_bus++; + } + + pcibios_claim_console_setup(); + + if (is_in_host()) { + list_for_each_entry(bus, &pci_root_buses, node) + pcibios_reserve_legacy_regions(bus); + } + + pr_info("SW arch assign unassigned resources.\n"); + + pci_assign_unassigned_resources(); + + for (hose = hose_head; hose; hose = hose->next) { + bus = hose->bus; + if (bus) + pci_bus_add_devices(bus); + } +} + +struct pci_controller * __init +alloc_pci_controller(void) +{ + struct pci_controller *hose; + + hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES); + + *hose_tail = hose; + hose_tail = &hose->next; + + return hose; +} + +struct resource * __init +alloc_resource(void) +{ + struct resource *res; + + res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES); + + return res; +} + +static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + resource_size_t offset; + struct resource *res; + + pr_debug("Reserving legacy ranges for domain %04x\n", pci_domain_nr(bus)); + + /* Check for IO */ + if (!(hose->io_space->flags & IORESOURCE_IO)) + goto no_io; + offset = (unsigned long)hose->io_space->start; + res = kzalloc(sizeof(struct resource), GFP_KERNEL); + BUG_ON(res == NULL); + res->name = "Legacy IO"; + res->flags = IORESOURCE_IO; + res->start = offset; + res->end = (offset + 0xfff) & 0xfffffffffffffffful; + pr_debug("Candidate legacy IO: %pR\n", res); + if (request_resource(hose->io_space, res)) { + pr_debug("PCI %04x:%02x Cannot reserve Legacy IO %pR\n", + pci_domain_nr(bus), bus->number, res); + kfree(res); + } + +no_io: + return; +} + +/* PCIe RC operations */ +int sw6_pcie_read_rc_cfg(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + u32 data; + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + void __iomem *cfg_iobase = hose->rc_config_space_base; + + if (IS_ENABLED(CONFIG_PCI_DEBUG)) + pr_debug("rc read addr:%px bus %d, devfn %#x, where %#x size=%d\t", + cfg_iobase + ((where & ~3) << 5), bus->number, devfn, where, size); + + if ((uintptr_t)where & (size - 1)) { + *val = 0; + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + if (unlikely(devfn > 0)) { + *val = ~0; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + data = readl(cfg_iobase + ((where & ~3) << 5)); + + switch (size) { + case 1: + *val = (data >> (8 * (where & 0x3))) & 0xff; + break; + case 2: + *val = (data >> (8 * (where & 0x2))) & 0xffff; + break; + default: + *val = data; + break; + } + + if (IS_ENABLED(CONFIG_PCI_DEBUG)) + pr_debug("*val %#x\n ", *val); + + return PCIBIOS_SUCCESSFUL; +} + +int sw6_pcie_write_rc_cfg(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + u32 data; + u32 shift = 8 * (where & 3); + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + void __iomem *cfg_iobase = (void *)hose->rc_config_space_base; + + if ((uintptr_t)where & (size - 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + switch (size) { + case 1: + data = readl(cfg_iobase + ((where & ~3) << 5)); + data &= ~(0xff << shift); + data |= (val & 0xff) << shift; + break; + case 2: + data = readl(cfg_iobase + ((where & ~3) << 5)); + data &= ~(0xffff << shift); + data |= (val & 0xffff) << shift; + break; + default: + data = val; + break; + } + + if (IS_ENABLED(CONFIG_PCI_DEBUG)) + pr_debug("rc write addr:%px bus %d, devfn %#x, where %#x *val %#x size %d\n", + cfg_iobase + ((where & ~3) << 5), bus->number, devfn, where, val, size); + + writel(data, cfg_iobase + ((where & ~3) << 5)); + + return PCIBIOS_SUCCESSFUL; +} + +int sw6_pcie_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + int ret = PCIBIOS_DEVICE_NOT_FOUND; + + if (is_guest_or_emul()) + return pci_generic_config_read(bus, devfn, where, size, val); + + hose->self_busno = hose->busn_space->start; + + if (unlikely(bus->number == hose->self_busno)) { + ret = sw6_pcie_read_rc_cfg(bus, devfn, where, size, val); + } else { + if (test_bit(hose->node * 8 + hose->index, &rc_linkup)) + ret = pci_generic_config_read(bus, devfn, where, size, val); + else + return ret; + } + return ret; +} + +int sw6_pcie_config_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + if (is_guest_or_emul()) + return pci_generic_config_write(bus, devfn, where, size, val); + + hose->self_busno = hose->busn_space->start; + + if (unlikely(bus->number == hose->self_busno)) + return sw6_pcie_write_rc_cfg(bus, devfn, where, size, val); + else + return pci_generic_config_write(bus, devfn, where, size, val); +} + +/* + *sw6_pcie_valid_device - Check if a valid device is present on bus + *@bus: PCI Bus structure + *@devfn: device/function + * + *Return: 'true' on success and 'false' if invalid device is found + */ +static bool sw6_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + if (is_in_host()) { + /* Only one device down on each root complex */ + if (bus->number == hose->self_busno && devfn > 0) + return false; + } + + return true; +} + +/* + *sw6_pcie_map_bus - Get configuration base + *@bus: PCI Bus structure + *@devfn: Device/function + *@where: Offset from base + * + *Return: Base address of the configuration space needed to be + *accessed. + */ +static void __iomem *sw6_pcie_map_bus(struct pci_bus *bus, + unsigned int devfn, int where) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + void __iomem *cfg_iobase; + unsigned long relbus; + + if (!sw6_pcie_valid_device(bus, devfn)) + return NULL; + + relbus = (bus->number << 24) | (devfn << 16) | where; + + cfg_iobase = hose->ep_config_space_base + relbus; + + if (IS_ENABLED(CONFIG_PCI_DEBUG)) + pr_debug("addr:%px bus %d, devfn %d, where %d\n", + cfg_iobase, bus->number, devfn, where); + return cfg_iobase; +} + +struct pci_ops sw64_pci_ops = { + .map_bus = sw6_pcie_map_bus, + .read = sw6_pcie_config_read, + .write = sw6_pcie_config_write, +}; + +int sw64_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + return sw64_chip_init->pci_init.map_irq(dev, slot, pin); +} + +static void __init +sw64_init_host(unsigned long node, unsigned long index) +{ + struct pci_controller *hose; + int ret = 0; + + hose = alloc_pci_controller(); + if (!hose) { + pr_warn("alloc NODE %ld RC %ld hose failed\n", node, index); + return; + } + hose->iommu_enable = false; + hose->io_space = alloc_resource(); + hose->mem_space = alloc_resource(); + hose->pre_mem_space = alloc_resource(); + hose->busn_space = alloc_resource(); + hose->index = index; + hose->node = node; + + sw64_chip_init->pci_init.hose_init(hose); + + if (sw64_chip_init->pci_init.set_rc_piu) + sw64_chip_init->pci_init.set_rc_piu(node, index); + + ret = sw64_chip_init->pci_init.check_pci_linkup(node, index); + if (ret == 0) { + /* Root Complex downstream port is link up */ + set_bit(node * 8 + index, &rc_linkup); //8-bit per node + } +} + +void __weak set_devint_wken(int node) {} +void __weak set_adr_int(int node) {} + +void __init sw64_init_arch(void) +{ + if (IS_ENABLED(CONFIG_PCI)) { + unsigned long node, cpu_num; + unsigned long rc_enable; + char id[8], msg[64]; + int i; + + cpu_num = sw64_chip->get_cpu_num(); + + for (node = 0; node < cpu_num; node++) { + if (is_in_host()) { + set_devint_wken(node); + set_adr_int(node); + } + } + + if (!acpi_disabled) + return; + + pr_info("SW arch PCI initialize!\n"); + for (node = 0; node < cpu_num; node++) { + rc_enable = sw64_chip_init->pci_init.get_rc_enable(node); + if (rc_enable == 0) { + pr_notice("PCIe is disabled on node %ld\n", node); + continue; + } + for (i = 0; i < MAX_NR_RCS; i++) { + if ((rc_enable >> i) & 0x1) + sw64_init_host(node, i); + } + if ((rc_linkup >> node * 8) & 0xff) { + memset(msg, 0, 64); + sprintf(msg, "Node %ld: RC [ ", node); + for (i = 0; i < MAX_NR_RCS; i++) { + if ((rc_linkup >> (i + node * 8)) & 1) { + memset(id, 0, 8); + sprintf(id, "%d ", i); + strcat(msg, id); + } + } + strcat(msg, "] link up"); + pr_info("%s\n", msg); + } else { + pr_info("Node %ld: no RC link up\n", node); + } + } + } +} + +void __weak set_pcieport_service_irq(int node, int index) {} + +static void __init sw64_init_intx(struct pci_controller *hose) +{ + unsigned long int_conf, node, val_node; + unsigned long index, irq; + int rcid; + + node = hose->node; + index = hose->index; + + if (!node_online(node)) + val_node = next_node_in(node, node_online_map); + else + val_node = node; + irq = irq_alloc_descs_from(NR_IRQS_LEGACY, 2, val_node); + WARN_ON(irq < 0); + irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_level_irq); + irq_set_status_flags(irq, IRQ_LEVEL); + hose->int_irq = irq; + irq_set_chip_and_handler(irq + 1, &dummy_irq_chip, handle_level_irq); + hose->service_irq = irq + 1; + rcid = cpu_to_rcid(0); + + pr_info_once("INTx are directed to node %d core %d.\n", + ((rcid >> 6) & 0x3), (rcid & 0x1f)); + int_conf = 1UL << 62 | rcid; /* rebase all intx on the first logical cpu */ + if (sw64_chip_init->pci_init.set_intx) + sw64_chip_init->pci_init.set_intx(node, index, int_conf); + + set_pcieport_service_irq(node, index); +} + +void __init sw64_init_irq(void) +{ + struct pci_controller *hose; + + /* Scan all of the recorded PCI controllers. */ + hose = hose_head; + for (hose = hose_head; hose; hose = hose->next) + sw64_init_intx(hose); +} + +void __init +sw64_init_pci(void) +{ + pci_add_flags(PCI_REASSIGN_ALL_BUS); + common_init_pci(); + pci_clear_flags(PCI_REASSIGN_ALL_BUS); +} diff --git a/arch/sw_64/pci/pci-sysfs.c b/arch/sw_64/pci/pci-sysfs.c new file mode 100644 index 000000000000..5b52a534fa80 --- /dev/null +++ b/arch/sw_64/pci/pci-sysfs.c @@ -0,0 +1,359 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Sw_64 PCI resource files. + * + * Loosely based on generic HAVE_PCI_MMAP implementation in + * drivers/pci/pci-sysfs.c + */ + +#include + +static int hose_mmap_page_range(struct pci_controller *hose, + struct vm_area_struct *vma, + enum pci_mmap_state mmap_type, int sparse) +{ + unsigned long base; + + if (mmap_type == pci_mmap_mem) + base = sparse ? hose->sparse_mem_base : hose->dense_mem_base; + else + base = sparse ? hose->sparse_io_base : hose->dense_io_base; + + vma->vm_pgoff |= base >> PAGE_SHIFT; + + return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); +} + +static int __pci_mmap_fits(struct pci_dev *pdev, int num, + struct vm_area_struct *vma, int sparse) +{ + unsigned long nr, start, size; + int shift = sparse ? 5 : 0; + + nr = vma_pages(vma); + start = vma->vm_pgoff; + size = ((pci_resource_len(pdev, num) - 1) >> (PAGE_SHIFT - shift)) + 1; + + if (start < size && size - start >= nr) + return 1; + WARN(1, "process \"%s\" tried to map%s 0x%08lx-0x%08lx on %s BAR %d (size 0x%08lx)\n", + current->comm, sparse ? " sparse" : "", start, start + nr, + pci_name(pdev), num, size); + return 0; +} + +/** + * pci_mmap_resource - map a PCI resource into user memory space + * @kobj: kobject for mapping + * @attr: struct bin_attribute for the file being mapped + * @vma: struct vm_area_struct passed into the mmap + * @sparse: address space type + * + * Use the bus mapping routines to map a PCI resource into userspace. + */ +static int pci_mmap_resource(struct kobject *kobj, + struct bin_attribute *attr, + struct vm_area_struct *vma, int sparse) +{ + struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); + struct resource *res = attr->private; + enum pci_mmap_state mmap_type; + struct pci_bus_region bar; + int i; + + for (i = 0; i < PCI_ROM_RESOURCE; i++) + if (res == &pdev->resource[i]) + break; + if (i >= PCI_ROM_RESOURCE) + return -ENODEV; + + if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start)) + return -EINVAL; + + if (!__pci_mmap_fits(pdev, i, vma, sparse)) + return -EINVAL; + + pcibios_resource_to_bus(pdev->bus, &bar, res); + vma->vm_pgoff += bar.start >> (PAGE_SHIFT - (sparse ? 5 : 0)); + mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io; + + return hose_mmap_page_range(pdev->sysdata, vma, mmap_type, sparse); +} + +static int pci_mmap_resource_sparse(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + struct vm_area_struct *vma) +{ + return pci_mmap_resource(kobj, attr, vma, 1); +} + +static int pci_mmap_resource_dense(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + struct vm_area_struct *vma) +{ + return pci_mmap_resource(kobj, attr, vma, 0); +} + +/** + * pci_remove_resource_files - cleanup resource files + * @dev: dev to cleanup + * + * If we created resource files for @dev, remove them from sysfs and + * free their resources. + */ +void pci_remove_resource_files(struct pci_dev *pdev) +{ + int i; + + for (i = 0; i < PCI_ROM_RESOURCE; i++) { + struct bin_attribute *res_attr; + + res_attr = pdev->res_attr[i]; + if (res_attr) { + sysfs_remove_bin_file(&pdev->dev.kobj, res_attr); + kfree(res_attr); + } + + res_attr = pdev->res_attr_wc[i]; + if (res_attr) { + sysfs_remove_bin_file(&pdev->dev.kobj, res_attr); + kfree(res_attr); + } + } +} + +static int sparse_mem_mmap_fits(struct pci_dev *pdev, int num) +{ + struct pci_bus_region bar; + struct pci_controller *hose = pci_bus_to_pci_controller(pdev->bus); + long dense_offset; + unsigned long sparse_size; + + pcibios_resource_to_bus(pdev->bus, &bar, &pdev->resource[num]); + + /* + * All core logic chips have 4G sparse address space, except + * CIA which has 16G (see xxx_SPARSE_MEM and xxx_DENSE_MEM + * definitions in asm/core_xxx.h files). This corresponds + * to 128M or 512M of the bus space. + */ + dense_offset = (long)(hose->dense_mem_base - hose->sparse_mem_base); + sparse_size = dense_offset >= 0x400000000UL ? 0x20000000 : 0x8000000; + + return bar.end < sparse_size; +} + +static int pci_create_one_attr(struct pci_dev *pdev, int num, char *name, + char *suffix, struct bin_attribute *res_attr, + unsigned long sparse) +{ + size_t size = pci_resource_len(pdev, num); + + sprintf(name, "resource%d%s", num, suffix); + res_attr->mmap = sparse ? pci_mmap_resource_sparse : + pci_mmap_resource_dense; + res_attr->attr.name = name; + res_attr->attr.mode = 0600; + res_attr->size = sparse ? size << 5 : size; + res_attr->private = &pdev->resource[num]; + return sysfs_create_bin_file(&pdev->dev.kobj, res_attr); +} + +static int pci_create_attr(struct pci_dev *pdev, int num) +{ + /* allocate attribute structure, piggyback attribute name */ + int retval, nlen1, nlen2 = 0, res_count = 1; + unsigned long sparse_base, dense_base; + struct bin_attribute *attr; + struct pci_controller *hose = pci_bus_to_pci_controller(pdev->bus); + char *suffix, *attr_name; + + suffix = ""; + nlen1 = 10; + + if (pdev->resource[num].flags & IORESOURCE_MEM) { + sparse_base = hose->sparse_mem_base; + dense_base = hose->dense_mem_base; + if (sparse_base && !sparse_mem_mmap_fits(pdev, num)) { + sparse_base = 0; + suffix = "_dense"; + nlen1 = 16; /* resourceN_dense */ + } + } else { + sparse_base = hose->sparse_io_base; + dense_base = hose->dense_io_base; + } + + if (sparse_base) { + suffix = "_sparse"; + nlen1 = 17; + if (dense_base) { + nlen2 = 16; /* resourceN_dense */ + res_count = 2; + } + } + + attr = kzalloc(sizeof(*attr) * res_count + nlen1 + nlen2, GFP_ATOMIC); + if (!attr) + return -ENOMEM; + + attr_name = (char *)(attr + res_count); + pdev->res_attr[num] = attr; + retval = pci_create_one_attr(pdev, num, attr_name, suffix, attr, + sparse_base); + if (retval || res_count == 1) + return retval; + + /* Create dense file */ + attr_name += nlen1; + attr++; + pdev->res_attr_wc[num] = attr; + return pci_create_one_attr(pdev, num, attr_name, "_dense", attr, 0); +} + +/** + * pci_create_resource_files - create resource files in sysfs for @dev + * @dev: dev in question + * + * Walk the resources in @dev creating files for each resource available. + */ +int pci_create_resource_files(struct pci_dev *pdev) +{ + int i; + int retval; + + /* Expose the PCI resources from this device as files */ + for (i = 0; i < PCI_ROM_RESOURCE; i++) { + + /* skip empty resources */ + if (!pci_resource_len(pdev, i)) + continue; + + retval = pci_create_attr(pdev, i); + if (retval) { + pci_remove_resource_files(pdev); + return retval; + } + } + return 0; +} + +/* Legacy I/O bus mapping stuff. */ + +static int __legacy_mmap_fits(struct pci_controller *hose, + struct vm_area_struct *vma, + unsigned long res_size, int sparse) +{ + unsigned long nr, start, size; + + nr = vma_pages(vma); + start = vma->vm_pgoff; + size = ((res_size - 1) >> PAGE_SHIFT) + 1; + + if (start < size && size - start >= nr) + return 1; + WARN(1, "process \"%s\" tried to map%s 0x%08lx-0x%08lx on hose %ld (size 0x%08lx)\n", + current->comm, sparse ? " sparse" : "", start, start + nr, + hose->index, size); + return 0; +} + +static inline int has_sparse(struct pci_controller *hose, + enum pci_mmap_state mmap_type) +{ + unsigned long base; + + base = (mmap_type == pci_mmap_mem) ? hose->sparse_mem_base : + hose->sparse_io_base; + + return base != 0; +} + +int pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma, + enum pci_mmap_state mmap_type) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + int sparse = has_sparse(hose, mmap_type); + unsigned long res_size; + + res_size = (mmap_type == pci_mmap_mem) ? bus->legacy_mem->size : + bus->legacy_io->size; + if (!__legacy_mmap_fits(hose, vma, res_size, sparse)) + return -EINVAL; + + return hose_mmap_page_range(hose, vma, mmap_type, sparse); +} + +/** + * pci_adjust_legacy_attr - adjustment of legacy file attributes + * @b: bus to create files under + * @mmap_type: I/O port or memory + * + * Adjust file name and size for sparse mappings. + */ +void pci_adjust_legacy_attr(struct pci_bus *bus, enum pci_mmap_state mmap_type) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + if (!has_sparse(hose, mmap_type)) + return; + + if (mmap_type == pci_mmap_mem) { + bus->legacy_mem->attr.name = "legacy_mem_sparse"; + bus->legacy_mem->size <<= 5; + } else { + bus->legacy_io->attr.name = "legacy_io_sparse"; + bus->legacy_io->size <<= 5; + } +} + +/* Legacy I/O bus read/write functions */ +int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + port += hose->io_space->start; + + switch (size) { + case 1: + *((u8 *)val) = inb(port); + return 1; + case 2: + if (port & 1) + return -EINVAL; + *((u16 *)val) = inw(port); + return 2; + case 4: + if (port & 3) + return -EINVAL; + *((u32 *)val) = inl(port); + return 4; + } + return -EINVAL; +} + +int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + port += hose->io_space->start; + + switch (size) { + case 1: + outb(port, val); + return 1; + case 2: + if (port & 1) + return -EINVAL; + outw(port, val); + return 2; + case 4: + if (port & 3) + return -EINVAL; + outl(port, val); + return 4; + } + return -EINVAL; +} diff --git a/arch/sw_64/pci/pci.c b/arch/sw_64/pci/pci.c new file mode 100644 index 000000000000..3db9816e19f1 --- /dev/null +++ b/arch/sw_64/pci/pci.c @@ -0,0 +1,436 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +#include +#include + +/* + * raw_pci_read/write - Platform-specific PCI config space access. + */ +int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn, + int reg, int len, u32 *val) +{ + struct pci_bus *bus_tmp = pci_find_bus(domain, bus); + + if (bus_tmp) + return bus_tmp->ops->read(bus_tmp, devfn, reg, len, val); + + return -EINVAL; +} + +int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn, + int reg, int len, u32 val) +{ + struct pci_bus *bus_tmp = pci_find_bus(domain, bus); + + if (bus_tmp) + return bus_tmp->ops->write(bus_tmp, devfn, reg, len, val); + + return -EINVAL; +} + +resource_size_t pcibios_default_alignment(void) +{ + if (is_in_guest()) + return PAGE_SIZE; + else + return 0; +} + +/** + * Just declaring that the power-of-ten prefixes are actually the + * power-of-two ones doesn't make it true :) + */ +#define KB 1024 +#define MB (1024*KB) +#define GB (1024*MB) + +resource_size_t pcibios_align_resource(void *data, const struct resource *res, + resource_size_t size, resource_size_t align) +{ + struct pci_dev *dev = data; + struct pci_controller *hose = pci_bus_to_pci_controller(dev->bus); + unsigned long alignto; + resource_size_t start = res->start; + + if (res->flags & IORESOURCE_IO) { + /* Make sure we start at our min on all hoses */ + if (start - hose->io_space->start < PCIBIOS_MIN_IO) + start = PCIBIOS_MIN_IO + hose->io_space->start; + /* + * Put everything into 0x00-0xff region modulo 0x400 + */ + if (start & 0x300) + start = (start + 0x3ff) & ~0x3ff; + } else if (res->flags & IORESOURCE_MEM) { + /* Make sure we start at our min on all hoses */ + if (start - hose->mem_space->start < PCIBIOS_MIN_MEM) + start = PCIBIOS_MIN_MEM + hose->mem_space->start; + /* + * The following holds at least for the Low Cost + * SW64 implementation of the PCI interface: + * + * In sparse memory address space, the first + * octant (16MB) of every 128MB segment is + * aliased to the very first 16 MB of the + * address space (i.e., it aliases the ISA + * memory address space). Thus, we try to + * avoid allocating PCI devices in that range. + * Can be allocated in 2nd-7th octant only. + * Devices that need more than 112MB of + * address space must be accessed through + * dense memory space only! + */ + + /* Align to multiple of size of minimum base. */ + alignto = max_t(resource_size_t, 0x1000UL, align); + start = ALIGN(start, alignto); + if (hose->sparse_mem_base && size <= 7 * 16*MB) { + if (((start / (16*MB)) & 0x7) == 0) { + start &= ~(128*MB - 1); + start += 16*MB; + start = ALIGN(start, alignto); + } + if (start/(128*MB) != (start + size - 1)/(128*MB)) { + start &= ~(128*MB - 1); + start += (128 + 16)*MB; + start = ALIGN(start, alignto); + } + } + } + + return start; +} + +#undef KB +#undef MB +#undef GB + +char *pcibios_setup(char *str) +{ + return str; +} + +void pcibios_fixup_bus(struct pci_bus *bus) +{ + /* Propagate hose info into the subordinate devices. */ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + struct pci_dev *dev = bus->self; + + if (!dev || bus->number == hose->first_busno) { + bus->resource[0] = hose->io_space; + bus->resource[1] = hose->mem_space; + bus->resource[2] = hose->pre_mem_space; + } +} + +/** + * Provide information on locations of various I/O regions in physical + * memory. Do this on a per-card basis so that we choose the right hose. + */ +asmlinkage long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn) +{ + struct pci_controller *hose; + + hose = bus_num_to_pci_controller(bus); + if (hose == NULL) + return -ENODEV; + + switch (which & ~IOBASE_FROM_HOSE) { + case IOBASE_HOSE: + return hose->index; + case IOBASE_SPARSE_MEM: + return hose->sparse_mem_base; + case IOBASE_DENSE_MEM: + return hose->dense_mem_base; + case IOBASE_SPARSE_IO: + return hose->sparse_io_base; + case IOBASE_DENSE_IO: + return hose->dense_io_base; + case IOBASE_ROOT_BUS: + return hose->bus->number; + } + + return -EOPNOTSUPP; +} + +void pci_iounmap(struct pci_dev *dev, void __iomem *addr) +{ +} +EXPORT_SYMBOL(pci_iounmap); + +void __init reserve_mem_for_pci(void) +{ + int ret; + unsigned long base = PCI_32BIT_MEMIO; + + ret = add_memmap_region(base, PCI_32BIT_MEMIO_SIZE, memmap_pci); + if (ret) { + pr_err("reserved pages for pcie memory space failed\n"); + return; + } + + pr_info("reserved pages for pcie memory space %lx:%lx\n", base >> PAGE_SHIFT, + (base + PCI_32BIT_MEMIO_SIZE) >> PAGE_SHIFT); +} + +const struct dma_map_ops *dma_ops; +EXPORT_SYMBOL(dma_ops); + +/* Quirks */ +static void quirk_isa_bridge(struct pci_dev *dev) +{ + dev->class = PCI_CLASS_BRIDGE_ISA << 8; +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82378, quirk_isa_bridge); + +/* + * Early fix up the Root Complex settings + */ +static void fixup_root_complex(struct pci_dev *dev) +{ + int i; + struct pci_bus *bus = dev->bus; + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + hose->self_busno = hose->busn_space->start; + + if (likely(bus->number == hose->self_busno)) { + if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) { + /* Check Root Complex port again */ + dev->is_hotplug_bridge = 0; + dev->current_state = PCI_D0; + } + + dev->class &= 0xff; + dev->class |= PCI_CLASS_BRIDGE_PCI << 8; + for (i = 0; i < PCI_NUM_RESOURCES; i++) { + dev->resource[i].start = 0; + dev->resource[i].end = 0; + dev->resource[i].flags = IORESOURCE_PCI_FIXED; + } + } + atomic_inc(&dev->enable_cnt); + + dev->no_msi = 1; +} + +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JN, PCI_DEVICE_ID_SW64_ROOT_BRIDGE, fixup_root_complex); + +static int setup_bus_dma_cb(struct pci_dev *pdev, void *data) +{ + pdev->dev.bus_dma_limit = DMA_BIT_MASK(32); + return 0; +} + +static void fix_bus_dma_limit(struct pci_dev *dev) +{ + pci_walk_bus(dev->subordinate, setup_bus_dma_cb, NULL); + pr_info("Set zx200 bus_dma_limit to 32-bit\n"); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ZHAOXIN, 0x071f, fix_bus_dma_limit); + +#ifdef CONFIG_DCA +static void enable_sw_dca(struct pci_dev *dev) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(dev->bus); + unsigned long node, rc_index, dca_ctl, dca_conf; + int i; + + if (dev->class >> 8 != PCI_CLASS_NETWORK_ETHERNET) + return; + + node = hose->node; + rc_index = hose->index; + + for (i = 0; i < 256; i++) { + dca_conf = read_piu_ior1(node, rc_index, DEVICEID0 + (i << 7)); + if (dca_conf >> 63) + continue; + else { + dca_conf = (1UL << 63) | (dev->bus->number << 8) | dev->devfn; + pr_info("dca device index %d, dca_conf = %#lx\n", i, dca_conf); + write_piu_ior1(node, rc_index, DEVICEID0 + (i << 7), dca_conf); + break; + } + } + + dca_ctl = read_piu_ior1(node, rc_index, DCACONTROL); + if (dca_ctl & 0x1) { + dca_ctl = 0x2; + write_piu_ior1(node, rc_index, DCACONTROL, dca_ctl); + pr_info("Node %ld RC %ld enable DCA 1.0\n", node, rc_index); + } +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, enable_sw_dca); +#endif + +/** + * There are some special aspects to the Root Complex of Sunway: + * 1. Root Complex config space base addr is different + * from EP config space base addr. + * 2. For the case of multiple Root Complex, different + * Root Complex have config space base addr. + * + * These means that even if multiple Root Complex share + * the same segment group number, their bus numbers can + * still overlap. + * + * But due to a Xorg related issue, we can not overlap + * the bus numbers of multiple Root Complex. So, after + * scanning the Root Complex, use "last_bus" to record + * the next bus number of the current maximum used bus + * number, and use it as the start bus number of the + * next Root Complex to be scanned. + * + * A question: when there is too much RCs, may 256 bus + * numbers be insufficient? + */ +static unsigned char last_bus; + +void sw64_pci_root_bridge_prepare(struct pci_host_bridge *bridge) +{ + struct pci_controller *hose = NULL; + struct resource_entry *entry = NULL; + struct pci_bus *bus = bridge->bus; + unsigned long flags = 0; + unsigned int init_busnr = 0; + + hose = pci_bus_to_pci_controller(bus); + + resource_list_for_each_entry(entry, &bridge->windows) { + flags = entry->res->flags; + if (flags & IORESOURCE_IO) { + entry->offset = entry->res->start; + hose->io_space = entry->res; + } else if (flags & IORESOURCE_BUS) { + entry->res->start = last_bus; + hose->busn_space = entry->res; + } else if (flags & IORESOURCE_MEM) { + if (!(flags & IORESOURCE_PREFETCH)) { + entry->offset = entry->res->start - PCI_32BIT_MEMIO; + hose->mem_space = entry->res; + } else + hose->pre_mem_space = entry->res; + } + } + + /** + * We scan Root Complex and update bus num in kernel, + * not in firmware. Firmware just pass 0x0-0xff via _CRS. + * + * So, need to update bus num of pci host bridge here. + */ + bridge->busnr = last_bus; + dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(bus), last_bus); + + /** + * At this point, pci_bus has been created and use old + * bridge->busnr, so need to update bus->number here. + */ + bus->number = last_bus; + + bridge->swizzle_irq = pci_common_swizzle; + bridge->map_irq = sw64_pci_map_irq; + + init_busnr = (0xff << 16) + ((last_bus + 1) << 8) + (last_bus); + write_rc_conf(hose->node, hose->index, RC_PRIMARY_BUS, init_busnr); + + hose->first_busno = last_bus + (is_in_host() ? 1 : 0); + + pci_add_flags(PCI_REASSIGN_ALL_BUS); +} + +static void sw64_pci_root_bridge_reserve_legacy_io(struct pci_host_bridge *bridge) +{ + struct pci_bus *bus = bridge->bus; + struct resource_entry *entry = NULL; + struct resource *res = NULL; + + resource_list_for_each_entry(entry, &bridge->windows) { + if (!(entry->res->flags & IORESOURCE_IO)) + continue; + + res = kzalloc(sizeof(struct resource), GFP_KERNEL); + if (res == NULL) { + pr_err("alloc resource for legacy io out of mem\n"); + return; + } + + res->name = "legacy io"; + res->flags = IORESOURCE_IO; + res->start = entry->res->start; + res->end = (res->start + 0xFFF) & 0xFFFFFFFFFFFFFFFFUL; + + pr_info("reserving legacy io %pR for domain %04x\n", + res, pci_domain_nr(bus)); + if (request_resource(entry->res, res)) { + pr_err("pci %04x:%02x reserve legacy io %pR failed\n", + pci_domain_nr(bus), bus->number, res); + kfree(res); + } + } +} + +void sw64_pci_root_bridge_scan_finish_up(struct pci_host_bridge *bridge) +{ + struct pci_controller *hose = NULL; + struct pci_bus *bus = NULL; + unsigned int init_busnr = 0; + + bus = bridge->bus; + + hose = pci_bus_to_pci_controller(bus); + hose->bus = bus; + + if (is_in_host()) + last_bus = chip_pcie_configure(hose); + else { + while (pci_find_bus(pci_domain_nr(bus), last_bus)) + last_bus++; + } + + hose->last_busno = last_bus; + hose->busn_space->end = last_bus; + + init_busnr = read_rc_conf(hose->node, hose->index, RC_PRIMARY_BUS); + init_busnr &= ~(0xff << 16); + init_busnr |= last_bus << 16; + write_rc_conf(hose->node, hose->index, RC_PRIMARY_BUS, init_busnr); + + pci_bus_update_busn_res_end(bus, last_bus); + last_bus++; + + pr_info("bus number update to %u\n", last_bus); + + if (is_in_host()) + sw64_pci_root_bridge_reserve_legacy_io(bridge); + + /** + * Root Complex of SW64 does not support ASPM, causing + * control field(_OSC) unable to be updated. + * + * Related logic can be found in "negotiate_os_control". + */ + bridge->native_aer = 1; + bridge->native_pme = 1; + + /** + * Since some buggy firmwares may configure invalid bridge bus numbers, + * the kernel re-assigns all PCI bus numbers when scan Root Complex. + * + * However, users may trigger a pci bus rescan in the userspace by the + * command below: + * + * > echo 1 > /sys/bus/pci/rescan + * + * Unexpected errors may occur on the endpoint devices due to the re-assign + * bus numbers of upstream bridges. + * + * To work around this problem, the flag PCI_REASSIGN_ALL_BUS is set before + * scanning Root Complex and cleared after scanning Root Complex. + */ + pci_clear_flags(PCI_REASSIGN_ALL_BUS); +} -- Gitee From 4a5842d5bcb6c3bc8a14b56825dcf317b2851ea4 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:28 +0800 Subject: [PATCH 0300/2138] anolis: sw64: add MSI support ANBZ: #4688 Add basic MSI support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/msi.h | 93 ++++++++++++++++++++++++++++++++++++ arch/sw_64/pci/msi.c | 21 ++++++++ 2 files changed, 114 insertions(+) create mode 100644 arch/sw_64/include/asm/msi.h create mode 100644 arch/sw_64/pci/msi.c diff --git a/arch/sw_64/include/asm/msi.h b/arch/sw_64/include/asm/msi.h new file mode 100644 index 000000000000..dbf6f81843be --- /dev/null +++ b/arch/sw_64/include/asm/msi.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_MSI_H +#define _ASM_SW64_MSI_H + +#include + +#define NR_VECTORS NR_IRQS +#define NR_IRQ_VECTORS NR_IRQS + +#define AUTO_ASSIGN 0 + +#define LAST_DEVICE_VECTOR 31 + +#define MSI_OFFSET 0x44 + +#define NUM_MSI_IRQS 256 + +#define PERCPU_MSI_IRQS 256 + +#define VT_MSIX_MSG_ADDR (0x8000fee00000UL) +#define VT_MSIX_ADDR_DEST_ID_SHIFT 12 +#define VT_MSIX_ADDR_DEST_ID_MASK (0xff << VT_MSIX_ADDR_DEST_ID_SHIFT) +#define VT_MSIX_ADDR_DEST_ID(dest) \ + (((dest) << VT_MSIX_ADDR_DEST_ID_SHIFT) & VT_MSIX_ADDR_DEST_ID_MASK) + + +#ifdef CONFIG_PCI_MSI +extern void vt_sw64_vector_free_irqs(unsigned int virq, unsigned int nr_irqs); +extern int sw64_setup_vt_msi_irqs(struct pci_dev *dev, int nvec, int type); +extern bool find_free_cpu_vector(const struct cpumask *search_mask, + int *found_cpu, int *found_vector); +extern int msi_compose_msg(unsigned int irq, struct msi_msg *msg); +extern void sw64_irq_noop(struct irq_data *d); +extern struct irq_chip sw64_irq_chip; +extern void handle_pci_msi_interrupt(unsigned long type, + unsigned long vector, + unsigned long pci_msi1_addr); + +#define MSI_ADDR_BASE_HI 0 +#define MSI_ADDR_BASE_LO 0x91abc0 + +#define MSI_ADDR_SHIFT 20 +#define MSI_ADDR_DEST_ID_SHIFT 10 + +struct sw64_msi_chip_data { + spinlock_t cdata_lock; + union { + unsigned long msi_config; + unsigned long msiaddr; + }; + unsigned long rc_node; + unsigned long rc_index; + unsigned int msi_config_index; + unsigned int dst_cpu; + unsigned int vector; + unsigned int prev_cpu; + unsigned int prev_vector; + unsigned int multi_msi; + bool move_in_progress; +}; + +static inline int rcid_to_msicid(int rcid) +{ + int msicid = 0; + + msicid |= (rcid_to_domain_id(rcid) << 7); + msicid |= (rcid_to_thread_id(rcid) << 6); + msicid |= (rcid_to_core_id(rcid) << 0); + + return msicid; +} + +extern void arch_init_msi_domain(struct irq_domain *domain); +enum irq_alloc_type { + IRQ_ALLOC_TYPE_MSI, + IRQ_ALLOC_TYPE_MSIX, + IRQ_ALLOC_TYPE_INTX, +}; +struct irq_alloc_info { + struct msi_desc *desc; + enum irq_alloc_type type; + struct pci_dev *msi_dev; + irq_hw_number_t hwirq; +}; +typedef struct irq_alloc_info msi_alloc_info_t; +#else /* !CONFIG_PCI_MSI */ +static inline void handle_pci_msi_interrupt(unsigned long type, + unsigned long vector, unsigned long pci_msi1_addr) +{ + pr_warn("SW arch disable CONFIG_PCI_MSI option.\n"); +} +#endif /* CONFIG_PCI_MSI */ +#endif /* _ASM_SW64_MSI_H */ diff --git a/arch/sw_64/pci/msi.c b/arch/sw_64/pci/msi.c new file mode 100644 index 000000000000..fc2c122c37ef --- /dev/null +++ b/arch/sw_64/pci/msi.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include + +int msi_compose_msg(unsigned int irq, struct msi_msg *msg) +{ + msg->address_hi = (unsigned int)(MSIX_MSG_ADDR >> 32); + msg->address_lo = (unsigned int)(MSIX_MSG_ADDR & 0xffffffff); + msg->data = irq; + return irq; +} + +void sw64_irq_noop(struct irq_data *d) +{ +} + +void arch_teardown_msi_irq(unsigned int irq) +{ +} -- Gitee From 34c53b02a09b90d7dfeb69e8a89c587f9179f9cc Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:06 +0800 Subject: [PATCH 0301/2138] anolis: sw64: add device trees ANBZ: #4688 Add device trees for SW64 based chip3 platform and virtual machines (including an empty device tree for platforms that are under development). Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/boot/dts/chip3.dts | 240 ++++++++++++++++++++++++++++++++ arch/sw_64/boot/dts/chip_vt.dts | 55 ++++++++ arch/sw_64/boot/dts/empty.dts | 15 ++ 3 files changed, 310 insertions(+) create mode 100644 arch/sw_64/boot/dts/chip3.dts create mode 100644 arch/sw_64/boot/dts/chip_vt.dts create mode 100644 arch/sw_64/boot/dts/empty.dts diff --git a/arch/sw_64/boot/dts/chip3.dts b/arch/sw_64/boot/dts/chip3.dts new file mode 100644 index 000000000000..082506393ac9 --- /dev/null +++ b/arch/sw_64/boot/dts/chip3.dts @@ -0,0 +1,240 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Default device tree; + */ + +/dts-v1/; +/ { + compatible = "sunway,chip3"; + model = "chip3"; + #address-cells = <2>; + #size-cells = <2>; + + soc { + compatible = "simple-bus"; + #address-cells = <2>; + #size-cells = <2>; + ranges; + + clocks { + i2cclk: i2cclk { + compatible = "fixed-clock"; + clock-frequency = <25000000>; + #clock-cells = <0>; + clock-output-names = "i2cclk_25mhz"; + }; + spiclk: spiclk { + compatible = "fixed-clock"; + clock-frequency = <25000000>; + #clock-cells = <0>; + clock-output-names = "spiclk_25mhz"; + }; + + }; + + intc: interrupt-controller { + compatible = "sw64,sw6_irq_controller"; + interrupt-controller; + #interrupt-cells = <1>; + }; + + lpc_intc: interrupt-controller@0x8037 { + compatible = "sw64,lpc_intc"; + reg = <0x8037 0x40000000 0x0 0x8000>; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&intc>; + interrupts = <2>; + }; + + uart: serial0@8033 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "sw6,sunway-apb-uart"; + reg = <0x8033 0x0 0x0 0x1000>; + interrupt-parent=<&intc>; + interrupts = <3>; + reg-shift = <9>; + reg-io-width = <4>; + clock-frequency = <24000000>; + status = "okay"; + }; + + serial1@9033 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "sw6,sunway-apb-uart"; + reg = <0x9033 0x0 0x0 0x1000>; + reg-shift = <9>; + reg-io-width = <4>; + clock-frequency = <24000000>; + status = "okay"; + }; + + + i2c0@0x8031 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "snps,designware-i2c"; + reg = <0x8031 0x0 0x0 0x8000>; + clock-frequency = <100000>; + clocks = <&i2cclk>; + interrupt-parent=<&intc>; + interrupts = <5>; + status = "okay"; + }; + + i2c1@0x8034 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,designware-i2c"; + reg = <0x8034 0x0 0x0 0x8000>; + clock-frequency = <100000>; + clocks = <&i2cclk>; + interrupt-parent=<&intc>; + interrupts = <6>; + status = "okay"; + }; + + i2c2@0x8035 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,designware-i2c"; + reg = <0x8035 0x0 0x0 0x8000>; + clock-frequency = <100000>; + clocks = <&i2cclk>; + interrupt-parent=<&intc>; + interrupts = <7>; + status = "okay"; + + rtc: pcf8523@68 { + compatible = "nxp,pcf8523"; + reg = <0x68>; + }; + + lm75: at30tse752a@48 { + compatible = "microchip,tcn75"; + reg = <0x48>; + }; + }; + + pvt: pvt@0x8030 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "sw64,pvt-vol"; + reg = <0x8030 0x0 0x0 0x7c00>; + status = "okay"; + }; + + spi: spi@0x8032 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "sunway,chip3-spi"; + reg = <0x8032 0x0 0x0 0x8000>; + clocks = <&spiclk>; + interrupt-parent=<&intc>; + interrupts = <4>; + status = "okay"; + + flash@0 { + compatible = "winbond,w25q32dw", "jedec,spi-flash"; + spi-max-frequency = <25000000>; + m25p,fast-read; + spi-cpha; + spi-cpol; + poll_mode = <1>; /* poll_mode:1 interrupt mode: 0 */ + reg-io-width = <2>; + reg = <0 0 0 0 >; /* 0: flash chip selected bit */ + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "spares0"; + reg = <0 0x400000>; + }; + }; + }; + + flash@1 { + compatible = "winbond,w25q32dw", "jedec,spi-flash"; + spi-max-frequency = <25000000>; + m25p,fast-read; + spi-cpha; + spi-cpol; + poll_mode = <1>; /* poll_mode:1 interrupt mode: 0 */ + reg-io-width = <2>; + reg = <1 0 0 0 >; /* 1: flash chip selected bit */ + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "spares1"; + reg = <0 0x400000>; + }; + }; + }; + }; + + lpc: lpc@0x8037 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "sunway,chip3_lpc"; + reg = <0x8037 0x40000000 0x0 0x8000>; + status = "okay"; + + }; + + ipmi-kcs@0x8037 { + #address-cells = <2>; + #size-cells = <2>; + device_type = "ipmi"; + compatible = "ipmi-kcs"; + reg = <0x8037 0x10000ca2 0x0 0x10>; + reg-size = <1>; + reg-spacing = <1>; + reg-shift = <0>; + status = "disabled"; + }; + + ipmi-bt@0x8037 { + #address-cells = <2>; + #size-cells = <2>; + device_type = "ipmi"; + compatible = "ipmi-bt"; + reg = <0x8037 0x100000e4 0x0 0x10>; + interrupt-parent=<&lpc_intc>; + interrupts = <10>; + reg-size = <1>; + reg-spacing = <1>; + reg-shift = <0>; + status = "disabled"; + }; + + gpio: gpio@8036 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "snps,sw-gpio"; + reg = <0x8036 0x0 0x0 0x8000>; + status = "okay"; + + porta: gpio-contraller@0 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; + snps,nr-gpios = <8>; + reg = <0 0 0 0>; + interrupt-controller; + #interrupt-cells = <2>; + interrupt-parent=<&intc>; + interrupts = <0>; + }; + }; + + }; +}; diff --git a/arch/sw_64/boot/dts/chip_vt.dts b/arch/sw_64/boot/dts/chip_vt.dts new file mode 100644 index 000000000000..f26285367f98 --- /dev/null +++ b/arch/sw_64/boot/dts/chip_vt.dts @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Default device tree; + */ + +/dts-v1/; +/ { + compatible = "sunway,chip3"; + model = "chip3"; + #address-cells = <2>; + #size-cells = <2>; + + soc { + compatible = "simple-bus"; + #address-cells = <2>; + #size-cells = <2>; + ranges; + + intc: interrupt-controller{ + compatible = "sw64,sw6_irq_vt_controller"; + interrupt-controller; + #interrupt-cells = <1>; + }; + + uart: serial0@8801 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "ns16550a"; + reg = <0x8801 0x3f8 0x0 0x10>; + interrupt-parent=<&intc>; + interrupts = <12>; + reg-shift = <0>; + reg-io-width = <1>; + clock-frequency = <24000000>; + status = "okay"; + }; + misc: misc0@8036 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "sw6,sunway-ged"; + reg = <0x8036 0x0 0x0 0x20>; + interrupt-parent=<&intc>; + interrupts = <13>; + reg-shift = <0>; + reg-io-width = <8>; + clock-frequency = <24000000>; + status = "okay"; + }; + fw_cfg: fw_cfg@8049 { + dma-coherent; + reg = <0x8049 0x20000000 0x0 0x18>; + compatible = "qemu,fw-cfg-mmio"; + }; + }; +}; diff --git a/arch/sw_64/boot/dts/empty.dts b/arch/sw_64/boot/dts/empty.dts new file mode 100644 index 000000000000..f8fe34e29641 --- /dev/null +++ b/arch/sw_64/boot/dts/empty.dts @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Default device tree; + */ + +/dts-v1/; +/ { + compatible = "sunway,chip3"; + model = "chip3"; + #address-cells = <2>; + #size-cells = <2>; + + soc { + }; +}; -- Gitee From 7753fe761b7663a79c5d18e1fde1c256643ca321 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:00 +0800 Subject: [PATCH 0302/2138] anolis: sw64: add ACPI support ANBZ: #4688 Add basic ACPI support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/acenv.h | 40 +++++ arch/sw_64/include/asm/acpi.h | 117 +++++++++++++ arch/sw_64/kernel/acpi.c | 304 +++++++++++++++++++++++++++++++++ arch/sw_64/pci/acpi.c | 245 ++++++++++++++++++++++++++ 4 files changed, 706 insertions(+) create mode 100644 arch/sw_64/include/asm/acenv.h create mode 100644 arch/sw_64/include/asm/acpi.h create mode 100644 arch/sw_64/kernel/acpi.c create mode 100644 arch/sw_64/pci/acpi.c diff --git a/arch/sw_64/include/asm/acenv.h b/arch/sw_64/include/asm/acenv.h new file mode 100644 index 000000000000..53b2898718fe --- /dev/null +++ b/arch/sw_64/include/asm/acenv.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_SW64_ACENV_H +#define _ASM_SW64_ACENV_H + +#define COMPILER_DEPENDENT_INT64 long +#define COMPILER_DEPENDENT_UINT64 unsigned long + +/* + * Calling conventions: + * + * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) + * ACPI_EXTERNAL_XFACE - External ACPI interfaces + * ACPI_INTERNAL_XFACE - Internal ACPI interfaces + * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces + */ +#define ACPI_SYSTEM_XFACE +#define ACPI_EXTERNAL_XFACE +#define ACPI_INTERNAL_XFACE +#define ACPI_INTERNAL_VAR_XFACE + +/* Asm macros */ +#define ACPI_FLUSH_CPU_CACHE() + +int __acpi_acquire_global_lock(unsigned int *lock); +int __acpi_release_global_lock(unsigned int *lock); + +#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \ + ((Acq) = __acpi_acquire_global_lock(&facs->global_lock)) + +#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \ + ((Acq) = __acpi_release_global_lock(&facs->global_lock)) + +/* + * Math helper asm macros + */ +#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) + +#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) +#endif /* _ASM_SW64_ACENV_H */ diff --git a/arch/sw_64/include/asm/acpi.h b/arch/sw_64/include/asm/acpi.h new file mode 100644 index 000000000000..ef46f481e1fd --- /dev/null +++ b/arch/sw_64/include/asm/acpi.h @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_SW64_ACPI_H +#define _ASM_SW64_ACPI_H + +#include +#include +#include +#include + +#ifdef CONFIG_ACPI +extern int acpi_noirq; +extern int acpi_strict; +extern int acpi_disabled; +extern int acpi_pci_disabled; + +/* _ASM_SW64_PDC_H */ +#define ACPI_PDC_P_FFH (0x0001) +#define ACPI_PDC_C_C1_HALT (0x0002) +#define ACPI_PDC_T_FFH (0x0004) +#define ACPI_PDC_SMP_C1PT (0x0008) +#define ACPI_PDC_SMP_C2C3 (0x0010) +#define ACPI_PDC_SMP_P_SWCOORD (0x0020) +#define ACPI_PDC_SMP_C_SWCOORD (0x0040) +#define ACPI_PDC_SMP_T_SWCOORD (0x0080) +#define ACPI_PDC_C_C1_FFH (0x0100) +#define ACPI_PDC_C_C2C3_FFH (0x0200) +#define ACPI_PDC_SMP_P_HWCOORD (0x0800) + +#define ACPI_PDC_EST_CAPABILITY_SMP (ACPI_PDC_SMP_C1PT | \ + ACPI_PDC_C_C1_HALT | \ + ACPI_PDC_P_FFH) + +#define ACPI_PDC_EST_CAPABILITY_SWSMP (ACPI_PDC_SMP_C1PT | \ + ACPI_PDC_C_C1_HALT | \ + ACPI_PDC_SMP_P_SWCOORD | \ + ACPI_PDC_SMP_P_HWCOORD | \ + ACPI_PDC_P_FFH) + +#define ACPI_PDC_C_CAPABILITY_SMP (ACPI_PDC_SMP_C2C3 | \ + ACPI_PDC_SMP_C1PT | \ + ACPI_PDC_C_C1_HALT | \ + ACPI_PDC_C_C1_FFH | \ + ACPI_PDC_C_C2C3_FFH) + +#define ACPI_TABLE_UPGRADE_MAX_PHYS MEMBLOCK_ALLOC_ACCESSIBLE + +/** + * Use the number 64 is just because this number is the most + * frequently used number in other architectures. Actually, + * SW64 does not have fixmap area in memory layout. + */ +#define NR_FIX_BTMAPS 64 + +static inline void disable_acpi(void) +{ + acpi_disabled = 1; + acpi_pci_disabled = 1; + acpi_noirq = 1; +} + +static inline void enable_acpi(void) +{ + acpi_disabled = 0; + acpi_pci_disabled = 0; + acpi_noirq = 0; +} + +static inline void acpi_noirq_set(void) +{ + acpi_noirq = 1; +} + +static inline void acpi_disable_pci(void) +{ + acpi_pci_disabled = 1; + acpi_noirq_set(); +} + +static inline bool acpi_has_cpu_in_madt(void) +{ + return true; +} + +/* Low-level suspend routine. */ +extern int (*acpi_suspend_lowlevel)(void); +extern unsigned long long arch_acpi_wakeup_start; + +/* Physical address to resume after wakeup */ +#define acpi_wakeup_address arch_acpi_wakeup_start + +/* + * Check if the CPU can handle C2 and deeper + */ +static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate) +{ + return max_cstate; +} + +static inline bool arch_has_acpi_pdc(void) +{ + return false; +} + +static inline void arch_acpi_set_pdc_bits(u32 *buf) +{ +} +#else /* !CONFIG_ACPI */ + +static inline void acpi_noirq_set(void) { } +static inline void acpi_disable_pci(void) { } +static inline void disable_acpi(void) { } + +#endif /* !CONFIG_ACPI */ + +#define acpi_unlazy_tlb(x) +#endif /* _ASM_SW64_ACPI_H */ diff --git a/arch/sw_64/kernel/acpi.c b/arch/sw_64/kernel/acpi.c new file mode 100644 index 000000000000..9779d4bdea0d --- /dev/null +++ b/arch/sw_64/kernel/acpi.c @@ -0,0 +1,304 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include + +#include + +#ifdef CONFIG_ACPI_HOTPLUG_CPU +#include +#endif + +int acpi_disabled = 1; +EXPORT_SYMBOL(acpi_disabled); + +int acpi_noirq = 1; /* skip ACPI IRQ initialization */ +int acpi_pci_disabled = 1; /* skip ACPI PCI scan and IRQ initialization */ +EXPORT_SYMBOL(acpi_pci_disabled); + +static bool param_acpi_on __initdata; +static bool param_acpi_off __initdata; + +int acpi_strict; +u64 arch_acpi_wakeup_start; +u64 acpi_saved_sp_s3; + +#define MAX_LOCAL_APIC 256 + +#define PREFIX "ACPI: " +/* + * The default interrupt routing model is PIC (8259). This gets + * overridden if IOAPICs are enumerated (below). + */ +enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_IOSAPIC; +void __iomem *__init __acpi_map_table(unsigned long phys, unsigned long size) +{ + if (!phys || !size) + return NULL; + + return early_ioremap(phys, size); +} +void __init __acpi_unmap_table(void __iomem *map, unsigned long size) +{ + if (!map || !size) + return; + + early_iounmap(map, size); +} +/* + * Following __acpi_xx functions should be implemented for sepecific cpu. + */ +int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp) +{ + if (irqp != NULL) + *irqp = acpi_register_gsi(NULL, gsi, -1, -1); + + return 0; +} +EXPORT_SYMBOL_GPL(acpi_gsi_to_irq); + +int acpi_isa_irq_to_gsi(unsigned int isa_irq, u32 *gsi) +{ + if (gsi) + *gsi = isa_irq; + + return 0; +} + +int (*acpi_suspend_lowlevel)(void); + +/* + * success: return IRQ number (>=0) + * failure: return < 0 + */ +static struct irq_domain *irq_default_domain; +int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) +{ + u32 irq; + + irq = irq_find_mapping(irq_default_domain, gsi); + + return irq; +} +EXPORT_SYMBOL_GPL(acpi_register_gsi); + +void acpi_unregister_gsi(u32 gsi) +{ + +} +EXPORT_SYMBOL_GPL(acpi_unregister_gsi); + +/* + * ACPI based hotplug support for CPU + */ +#ifdef CONFIG_ACPI_HOTPLUG_CPU +/* wrapper to silence section mismatch warning */ +int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) +{ + return 0; +} +EXPORT_SYMBOL(acpi_map_lsapic); + +int acpi_unmap_lsapic(int cpu) +{ + return 0; +} +EXPORT_SYMBOL(acpi_unmap_lsapic); +#endif /* CONFIG_ACPI_HOTPLUG_CPU */ + +u8 acpi_checksum(u8 *table, u32 length) +{ + u8 ret = 0; + + while (length--) { + ret += *table; + table++; + } + return -ret; +} + +static int __init parse_acpi(char *arg) +{ + if (!arg) + return -EINVAL; + + /* disable both ACPI table parsing and interpreter */ + if (strcmp(arg, "off") == 0) + param_acpi_off = true; + else if (strcmp(arg, "on") == 0) /* prefer ACPI over device tree */ + param_acpi_on = true; + else + return -EINVAL; /* Core will printk when we return error. */ + + return 0; +} +early_param("acpi", parse_acpi); + +/* + * __acpi_acquire_global_lock + * will always return -1 indicating owning the lock. + * + * __acpi_release_global_lock will always return 0 indicating + * no acquring request pending. + */ +int __acpi_acquire_global_lock(unsigned int *lock) +{ + return -1; +} + +int __acpi_release_global_lock(unsigned int *lock) +{ + return 0; +} + +#ifdef CONFIG_ACPI_NUMA +static int rcid_to_cpu(int physical_id) +{ + int i; + + for (i = 0; i < NR_CPUS; ++i) { + if (__cpu_to_rcid[i] == physical_id) + return i; + } + + /* physical id not found */ + return -1; +} + +/* Callback for Proximity Domain -> CPUID mapping */ +void __init +acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) +{ + int pxm, node; + int cpu; // logical core id + + if (srat_disabled()) + return; + if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) { + bad_srat(); + return; + } + if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0) + return; + pxm = pa->proximity_domain_lo; + if (acpi_srat_revision >= 2) { + pxm |= (pa->proximity_domain_hi[0] << 8); + pxm |= (pa->proximity_domain_hi[1] << 16); + pxm |= (pa->proximity_domain_hi[2] << 24); + } + + node = acpi_map_pxm_to_node(pxm); + if (node < 0) { + pr_err("SRAT: Too many proximity domains %x\n", pxm); + bad_srat(); + return; + } + + if (pa->apic_id >= CONFIG_NR_CPUS) { + pr_err("SRAT: PXM %u -> CPU 0x%02x -> Node %u skipped apicid that is too big\n", pxm, pa->apic_id, node); + return; + } + + /* Record the mapping from logical core id to node id */ + cpu = rcid_to_cpu(pa->apic_id); + if (cpu < 0) { + pr_err("SRAT: Can not find the logical id for physical Core 0x%02x\n", pa->apic_id); + return; + } + + early_map_cpu_to_node(cpu, node); + + node_set(node, numa_nodes_parsed); + pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node); +} + +#ifdef CONFIG_MEMORY_HOTPLUG +static inline int save_add_info(void) { return 1; } +#else +static inline int save_add_info(void) { return 0; } +#endif + +#endif + +void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size) +{ +} + +#ifdef CONFIG_ACPI_HOTPLUG_CPU +static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) +{ +#ifdef CONFIG_ACPI_NUMA + int nid; + + nid = acpi_get_node(handle); + if (nid != NUMA_NO_NODE) { + set_cpuid_to_node(cpu, nid); + node_set(nid, numa_nodes_parsed); + } +#endif + return 0; +} + +int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, + int *pcpu) +{ + int cpu; + struct acpi_madt_local_apic *processor; + + processor = kzalloc(sizeof(struct acpi_madt_local_apic), GFP_KERNEL); + processor->id = physid; + processor->processor_id = acpi_id; + processor->lapic_flags = ACPI_MADT_ENABLED; + + cpu = set_processor_mask(processor); + if (cpu < 0) { + pr_info(PREFIX "Unable to map lapic to logical cpu number\n"); + return cpu; + } + + acpi_map_cpu2node(handle, cpu, physid); + + *pcpu = cpu; + return 0; +} +EXPORT_SYMBOL(acpi_map_cpu); + +int acpi_unmap_cpu(int cpu) +{ +#ifdef CONFIG_ACPI_NUMA + set_cpuid_to_node(cpu, NUMA_NO_NODE); +#endif + set_cpu_present(cpu, false); + num_processors--; + + pr_info("cpu%d hot remove!\n", cpu); + + return 0; +} +EXPORT_SYMBOL(acpi_unmap_cpu); +#endif /* CONFIG_ACPI_HOTPLUG_CPU */ + +void __init acpi_boot_table_init(void) +{ + /** + * ACPI is disabled by default. + * ACPI is only enabled when firmware passes ACPI table + * and sets boot parameter "acpi=on". + */ + if (param_acpi_on) + enable_acpi(); + + /* + * If acpi_disabled, bail out + */ + if (!acpi_disabled) { + pr_warn("Currently, ACPI is an experimental feature!\n"); + if (acpi_table_init()) { + pr_err("Failed to init ACPI tables\n"); + disable_acpi(); + } else + pr_info("Successfully parsed ACPI table\n"); + } +} diff --git a/arch/sw_64/pci/acpi.c b/arch/sw_64/pci/acpi.c new file mode 100644 index 000000000000..1353994320b3 --- /dev/null +++ b/arch/sw_64/pci/acpi.c @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include + +struct pci_root_info { + struct acpi_pci_root_info info; + struct pci_config_window *cfg; +}; + +static void pci_acpi_release_root_info(struct acpi_pci_root_info *ci) +{ + struct pci_root_info *pci_ri; + + pci_ri = container_of(ci, struct pci_root_info, info); + pci_ecam_free(pci_ri->cfg); + kfree(ci->ops); + kfree(pci_ri); +} + +int acpi_pci_bus_find_domain_nr(struct pci_bus *bus) +{ + struct pci_config_window *cfg = bus->sysdata; + struct acpi_device *adev = to_acpi_device(cfg->parent); + struct acpi_pci_root *root = acpi_driver_data(adev); + + return root->segment; +} + +/** + * Lookup the MCFG table entry corresponding to the current + * PCI host controller, and set up config space mapping. + */ +static struct pci_config_window * +pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root) +{ + struct device *dev = &root->device->dev; + struct pci_config_window *cfg = NULL; + const struct pci_ecam_ops *ecam_ops = NULL; + struct resource *bus_res = &root->secondary; + struct resource cfg_res; + struct acpi_device *adev = NULL; + int ret = 0, bus_shift = 0; + u16 seg = root->segment; + + ret = pci_mcfg_lookup(root, &cfg_res, &ecam_ops); + if (ret < 0) { + dev_err(dev, "%04x:%pR ECAM region not found\n", seg, bus_res); + return NULL; + } + + /** + * Do the quirk of bus shift here, since we can not + * know the ECAM addr in MCFG table when fill mcfg_quirks + */ + bus_shift = ecam_ops->bus_shift; + cfg_res.start = root->mcfg_addr + (bus_res->start << bus_shift); + cfg_res.end = cfg_res.start + ((resource_size(bus_res)) << bus_shift) - 1; + cfg_res.flags = IORESOURCE_MEM; + + /** + * ECAM area considered as the mem resource of the current + * PCI host controller, we'd better record this resource + * in ACPI namespace(_CRS). + */ + adev = acpi_resource_consumer(&cfg_res); + if (adev) + dev_info(dev, "ECAM area %pR reserved by %s\n", &cfg_res, + dev_name(&adev->dev)); + else + dev_info(dev, "Note: ECAM area %pR not reserved in ACPI namespace\n", + &cfg_res); + + cfg = pci_ecam_create(dev, &cfg_res, bus_res, ecam_ops); + if (IS_ERR(cfg)) { + dev_err(dev, "%04x:%pR error %ld mapping ECAM\n", seg, bus_res, + PTR_ERR(cfg)); + return NULL; + } + + return cfg; +} + +static int pci_acpi_prepare_root_resources(struct acpi_pci_root_info *ci) +{ + int status = 0; + acpi_status rc; + unsigned long long mem_space_base = 0; + struct resource_entry *entry = NULL, *tmp = NULL; + struct acpi_device *device = ci->bridge; + + /** + * Get host bridge resources via _CRS method, the return value + * is the num of resource parsed. + */ + status = acpi_pci_probe_root_resources(ci); + if (status > 0) { + /** + * To distinguish between mem and pre_mem, firmware only pass the + * lower 32bits of mem via acpi and use vendor specific "MEMH" to + * record the upper 32 bits of mem. + * + * Get the upper 32 bits here. + */ + rc = acpi_evaluate_integer(ci->bridge->handle, + "MEMH", NULL, &mem_space_base); + if (rc != AE_OK) { + dev_err(&device->dev, "unable to retrieve MEMH\n"); + return -EEXIST; + } + + resource_list_for_each_entry_safe(entry, tmp, &ci->resources) { + if (entry->res->flags & IORESOURCE_MEM) { + if (!(entry->res->end & 0xFFFFFFFF00000000ULL)) { + /* Patch the mem resource with upper 32 bits */ + entry->res->start |= (mem_space_base << 32); + entry->res->end |= (mem_space_base << 32); + } else { + /** + * Add PREFETCH and MEM_64 flags for pre_mem, + * so that we can distinguish between mem and + * pre_mem. + */ + entry->res->flags |= IORESOURCE_PREFETCH; + entry->res->flags |= IORESOURCE_MEM_64; + } + } + + dev_dbg(&device->dev, + "host bridge resource: 0x%llx-0x%llx flags [0x%lx]\n", + entry->res->start, entry->res->end, entry->res->flags); + } + return status; + } + + /** + * If not successfully parse resources, destroy + * resources which have been parsed. + */ + resource_list_for_each_entry_safe(entry, tmp, &ci->resources) { + dev_info(&device->dev, + "host bridge resource(ignored): 0x%llx-0x%llx flags [0x%lx]\n", + entry->res->start, entry->res->end, entry->res->flags); + resource_list_destroy_entry(entry); + } + + return 0; +} + +/** + * This function is called from ACPI code and used to + * setup PCI host controller. + */ +struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) +{ + struct pci_bus *bus = NULL, *child = NULL; + struct pci_root_info *pci_ri = NULL; + struct acpi_pci_root_ops *root_ops = NULL; + int domain = root->segment; + int busnum = root->secondary.start; + + pci_ri = kzalloc(sizeof(*pci_ri), GFP_KERNEL); + if (!pci_ri) + goto out_of_mem_0; + + root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL); + if (!root_ops) + goto out_of_mem_1; + + pci_ri->cfg = pci_acpi_setup_ecam_mapping(root); + if (!pci_ri->cfg) + goto setup_ecam_err; + + root_ops->release_info = pci_acpi_release_root_info; + root_ops->prepare_resources = pci_acpi_prepare_root_resources; + root_ops->pci_ops = (struct pci_ops *)&pci_ri->cfg->ops->pci_ops; + + bus = pci_find_bus(domain, busnum); + if (bus) { + memcpy(bus->sysdata, pci_ri->cfg, sizeof(struct pci_config_window)); + kfree(pci_ri->cfg); + kfree(pci_ri); + kfree(root_ops); + } else { + bus = acpi_pci_root_create(root, root_ops, &pci_ri->info, pci_ri->cfg); + + /** + * No need to do kfree here, because acpi_pci_root_create will free + * mem alloced when it cannot create pci_bus. + */ + if (!bus) + return NULL; + + /* Some quirks for pci controller of Sunway after scanning Root Complex */ + sw64_pci_root_bridge_scan_finish_up(pci_find_host_bridge(bus)); + + pci_bus_size_bridges(bus); + pci_bus_assign_resources(bus); + + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); + } + + return bus; + +setup_ecam_err: + kfree(root_ops); +out_of_mem_1: + kfree(pci_ri); +out_of_mem_0: + pr_warn("RC [%04x:%02x:] failed (out of memory or setup ecam error)!\n", + domain, busnum); + + return NULL; +} + +int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) +{ + if (!acpi_disabled) { + struct pci_config_window *cfg = bridge->sysdata; + struct acpi_device *adev = to_acpi_device(cfg->parent); + struct pci_controller *hose = cfg->priv; + struct device *bus_dev = &bridge->bus->dev; + + ACPI_COMPANION_SET(&bridge->dev, adev); + set_dev_node(bus_dev, hose->node); + + /* Some quirks for pci controller of Sunway before scanning Root Complex */ + sw64_pci_root_bridge_prepare(bridge); + } + + return 0; +} + +void pcibios_add_bus(struct pci_bus *bus) +{ + acpi_pci_add_bus(bus); +} + +void pcibios_remove_bus(struct pci_bus *bus) +{ + acpi_pci_remove_bus(bus); +} -- Gitee From 3a776748b8f58bf00086361d2cc573e488024978 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:06 +0800 Subject: [PATCH 0303/2138] anolis: sw64: add DMA support ANBZ: #4688 Add DMA support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/dma-direct.h | 15 ++ arch/sw_64/include/asm/dma-mapping.h | 14 ++ arch/sw_64/include/asm/dma.h | 350 +++++++++++++++++++++++++++ 3 files changed, 379 insertions(+) create mode 100644 arch/sw_64/include/asm/dma-direct.h create mode 100644 arch/sw_64/include/asm/dma-mapping.h create mode 100644 arch/sw_64/include/asm/dma.h diff --git a/arch/sw_64/include/asm/dma-direct.h b/arch/sw_64/include/asm/dma-direct.h new file mode 100644 index 000000000000..dee1680b8f6d --- /dev/null +++ b/arch/sw_64/include/asm/dma-direct.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_DMA_DIRECT_H +#define _ASM_SW64_DMA_DIRECT_H + +static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) +{ + return paddr; +} + +static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) +{ + return daddr; +} + +#endif /* _ASM_SW64_DMA_DIRECT_H */ diff --git a/arch/sw_64/include/asm/dma-mapping.h b/arch/sw_64/include/asm/dma-mapping.h new file mode 100644 index 000000000000..65795f8e5792 --- /dev/null +++ b/arch/sw_64/include/asm/dma-mapping.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_DMA_MAPPING_H +#define _ASM_SW64_DMA_MAPPING_H + + +extern const struct dma_map_ops *dma_ops; + +static inline const struct dma_map_ops *get_arch_dma_ops(void) +{ + return dma_ops; +} + + +#endif /* _ASM_SW64_DMA_MAPPING_H */ diff --git a/arch/sw_64/include/asm/dma.h b/arch/sw_64/include/asm/dma.h new file mode 100644 index 000000000000..cf6a9cf75233 --- /dev/null +++ b/arch/sw_64/include/asm/dma.h @@ -0,0 +1,350 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/asm-sw_64/dma.h + * + * This is essentially the same as the i386 DMA stuff, as the SW64PCs + * use ISA-compatible dma. The only extension is support for high-page + * registers that allow to set the top 8 bits of a 32-bit DMA address. + * This register should be written last when setting up a DMA address + * as this will also enable DMA across 64 KB boundaries. + */ + +/* $Id: dma.h,v 1.7 1992/12/14 00:29:34 root Exp root $ + * linux/include/asm/dma.h: Defines for using and allocating dma channels. + * Written by Hennus Bergman, 1992. + * High DMA channel support & info by Hannu Savolainen + * and John Boyd, Nov. 1992. + */ + +#ifndef _ASM_SW64_DMA_H +#define _ASM_SW64_DMA_H + +#include +#include + +#define dma_outb outb +#define dma_inb inb + +/* + * NOTES about DMA transfers: + * + * controller 1: channels 0-3, byte operations, ports 00-1F + * controller 2: channels 4-7, word operations, ports C0-DF + * + * - ALL registers are 8 bits only, regardless of transfer size + * - channel 4 is not used - cascades 1 into 2. + * - channels 0-3 are byte - addresses/counts are for physical bytes + * - channels 5-7 are word - addresses/counts are for physical words + * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries + * - transfer count loaded to registers is 1 less than actual count + * - controller 2 offsets are all even (2x offsets for controller 1) + * - page registers for 5-7 don't use data bit 0, represent 128K pages + * - page registers for 0-3 use bit 0, represent 64K pages + * + * DMA transfers are limited to the lower 16MB of _physical_ memory. + * Note that addresses loaded into registers must be _physical_ addresses, + * not logical addresses (which may differ if paging is active). + * + * Address mapping for channels 0-3: + * + * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses) + * | ... | | ... | | ... | + * | ... | | ... | | ... | + * | ... | | ... | | ... | + * P7 ... P0 A7 ... A0 A7 ... A0 + * | Page | Addr MSB | Addr LSB | (DMA registers) + * + * Address mapping for channels 5-7: + * + * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses) + * | ... | \ \ ... \ \ \ ... \ \ + * | ... | \ \ ... \ \ \ ... \ (not used) + * | ... | \ \ ... \ \ \ ... \ + * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0 + * | Page | Addr MSB | Addr LSB | (DMA registers) + * + * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses + * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at + * the hardware level, so odd-byte transfers aren't possible). + * + * Transfer count (_not # bytes_) is limited to 64K, represented as actual + * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more, + * and up to 128K bytes may be transferred on channels 5-7 in one operation. + * + */ + +#define MAX_DMA_CHANNELS 8 + +/* + * ISA DMA limitations on sw64 platforms, + + * These may be due to SIO (PCI<->ISA bridge) chipset limitation, or + * just a wiring limit. + */ + +/* + * Maximum address for all the others is the complete 32-bit bus + * address space. + */ +#define MAX_ISA_DMA_ADDRESS 0x100000000UL + +#define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT)) + +/* + * If we have the iommu, we don't have any address limitations on DMA. + * Otherwise (Nautilus, RX164), we have to have 0-16 Mb DMA zone + * like i386. + */ +#define MAX_DMA_ADDRESS ~0UL + +/* 8237 DMA controllers */ +#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ +#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */ + +/* DMA controller registers */ +#define DMA1_CMD_REG 0x08 /* command register (w) */ +#define DMA1_STAT_REG 0x08 /* status register (r) */ +#define DMA1_REQ_REG 0x09 /* request register (w) */ +#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */ +#define DMA1_MODE_REG 0x0B /* mode register (w) */ +#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */ +#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */ +#define DMA1_RESET_REG 0x0D /* Master Clear (w) */ +#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */ +#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */ +#define DMA1_EXT_MODE_REG (0x400 | DMA1_MODE_REG) + +#define DMA2_CMD_REG 0xD0 /* command register (w) */ +#define DMA2_STAT_REG 0xD0 /* status register (r) */ +#define DMA2_REQ_REG 0xD2 /* request register (w) */ +#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */ +#define DMA2_MODE_REG 0xD6 /* mode register (w) */ +#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */ +#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */ +#define DMA2_RESET_REG 0xDA /* Master Clear (w) */ +#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */ +#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */ +#define DMA2_EXT_MODE_REG (0x400 | DMA2_MODE_REG) + +#define DMA_ADDR_0 0x00 /* DMA address registers */ +#define DMA_ADDR_1 0x02 +#define DMA_ADDR_2 0x04 +#define DMA_ADDR_3 0x06 +#define DMA_ADDR_4 0xC0 +#define DMA_ADDR_5 0xC4 +#define DMA_ADDR_6 0xC8 +#define DMA_ADDR_7 0xCC + +#define DMA_CNT_0 0x01 /* DMA count registers */ +#define DMA_CNT_1 0x03 +#define DMA_CNT_2 0x05 +#define DMA_CNT_3 0x07 +#define DMA_CNT_4 0xC2 +#define DMA_CNT_5 0xC6 +#define DMA_CNT_6 0xCA +#define DMA_CNT_7 0xCE + +#define DMA_PAGE_0 0x87 /* DMA page registers */ +#define DMA_PAGE_1 0x83 +#define DMA_PAGE_2 0x81 +#define DMA_PAGE_3 0x82 +#define DMA_PAGE_5 0x8B +#define DMA_PAGE_6 0x89 +#define DMA_PAGE_7 0x8A + +#define DMA_HIPAGE_0 (0x400 | DMA_PAGE_0) +#define DMA_HIPAGE_1 (0x400 | DMA_PAGE_1) +#define DMA_HIPAGE_2 (0x400 | DMA_PAGE_2) +#define DMA_HIPAGE_3 (0x400 | DMA_PAGE_3) +#define DMA_HIPAGE_4 (0x400 | DMA_PAGE_4) +#define DMA_HIPAGE_5 (0x400 | DMA_PAGE_5) +#define DMA_HIPAGE_6 (0x400 | DMA_PAGE_6) +#define DMA_HIPAGE_7 (0x400 | DMA_PAGE_7) + +#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */ +#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */ +#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */ + +#define DMA_AUTOINIT 0x10 + +extern spinlock_t dma_spin_lock; + +static inline unsigned long claim_dma_lock(void) +{ + unsigned long flags; + + spin_lock_irqsave(&dma_spin_lock, flags); + return flags; +} + +static inline void release_dma_lock(unsigned long flags) +{ + spin_unlock_irqrestore(&dma_spin_lock, flags); +} + +/* enable/disable a specific DMA channel */ +static inline void enable_dma(unsigned int dmanr) +{ + if (dmanr <= 3) + dma_outb(dmanr, DMA1_MASK_REG); + else + dma_outb(dmanr & 3, DMA2_MASK_REG); +} + +static inline void disable_dma(unsigned int dmanr) +{ + if (dmanr <= 3) + dma_outb(dmanr | 4, DMA1_MASK_REG); + else + dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); +} + +/* Clear the 'DMA Pointer Flip Flop'. + * Write 0 for LSB/MSB, 1 for MSB/LSB access. + * Use this once to initialize the FF to a known state. + * After that, keep track of it. :-) + * --- In order to do that, the DMA routines below should --- + * --- only be used while interrupts are disabled! --- + */ +static inline void clear_dma_ff(unsigned int dmanr) +{ + if (dmanr <= 3) + dma_outb(0, DMA1_CLEAR_FF_REG); + else + dma_outb(0, DMA2_CLEAR_FF_REG); +} + +/* set mode (above) for a specific DMA channel */ +static inline void set_dma_mode(unsigned int dmanr, char mode) +{ + if (dmanr <= 3) + dma_outb(mode | dmanr, DMA1_MODE_REG); + else + dma_outb(mode | (dmanr & 3), DMA2_MODE_REG); +} + +/* set extended mode for a specific DMA channel */ +static inline void set_dma_ext_mode(unsigned int dmanr, char ext_mode) +{ + if (dmanr <= 3) + dma_outb(ext_mode | dmanr, DMA1_EXT_MODE_REG); + else + dma_outb(ext_mode | (dmanr & 3), DMA2_EXT_MODE_REG); +} + +/* Set only the page register bits of the transfer address. + * This is used for successive transfers when we know the contents of + * the lower 16 bits of the DMA current address register. + */ +static inline void set_dma_page(unsigned int dmanr, unsigned int pagenr) +{ + switch (dmanr) { + case 0: + dma_outb(pagenr, DMA_PAGE_0); + dma_outb((pagenr >> 8), DMA_HIPAGE_0); + break; + case 1: + dma_outb(pagenr, DMA_PAGE_1); + dma_outb((pagenr >> 8), DMA_HIPAGE_1); + break; + case 2: + dma_outb(pagenr, DMA_PAGE_2); + dma_outb((pagenr >> 8), DMA_HIPAGE_2); + break; + case 3: + dma_outb(pagenr, DMA_PAGE_3); + dma_outb((pagenr >> 8), DMA_HIPAGE_3); + break; + case 5: + dma_outb(pagenr & 0xfe, DMA_PAGE_5); + dma_outb((pagenr >> 8), DMA_HIPAGE_5); + break; + case 6: + dma_outb(pagenr & 0xfe, DMA_PAGE_6); + dma_outb((pagenr >> 8), DMA_HIPAGE_6); + break; + case 7: + dma_outb(pagenr & 0xfe, DMA_PAGE_7); + dma_outb((pagenr >> 8), DMA_HIPAGE_7); + break; + } +} + + +/* Set transfer address & page bits for specific DMA channel. + * Assumes dma flipflop is clear. + */ +static inline void set_dma_addr(unsigned int dmanr, unsigned int a) +{ + if (dmanr <= 3) { + dma_outb(a & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE); + dma_outb((a >> 8) & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE); + } else { + dma_outb((a >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); + dma_outb((a >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); + } + set_dma_page(dmanr, a >> 16); /* set hipage last to enable 32-bit mode */ +} + + +/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for + * a specific DMA channel. + * You must ensure the parameters are valid. + * NOTE: from a manual: "the number of transfers is one more + * than the initial word count"! This is taken into account. + * Assumes dma flip-flop is clear. + * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. + */ +static inline void set_dma_count(unsigned int dmanr, unsigned int count) +{ + count--; + if (dmanr <= 3) { + dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); + dma_outb((count >> 8) & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); + } else { + dma_outb((count >> 1) & 0xff, ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); + dma_outb((count >> 9) & 0xff, ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); + } +} + + +/* Get DMA residue count. After a DMA transfer, this + * should return zero. Reading this while a DMA transfer is + * still in progress will return unpredictable results. + * If called before the channel has been used, it may return 1. + * Otherwise, it returns the number of _bytes_ left to transfer. + * + * Assumes DMA flip-flop is clear. + */ +static inline int get_dma_residue(unsigned int dmanr) +{ + unsigned int io_port = (dmanr <= 3) ? + ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE : + ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE; + + /* using short to get 16-bit wrap around */ + unsigned short count; + + count = 1 + dma_inb(io_port); + count += dma_inb(io_port) << 8; + + return (dmanr <= 3) ? count : (count << 1); +} + + +/* These are in kernel/dma.c: */ +extern int request_dma(unsigned int dmanr, const char *device_id); /* reserve a DMA channel */ +extern void free_dma(unsigned int dmanr); /* release it again */ +#define KERNEL_HAVE_CHECK_DMA +extern int check_dma(unsigned int dmanr); + +/* From PCI */ + +#ifdef CONFIG_PCI +extern int isa_dma_bridge_buggy; +#else +#define isa_dma_bridge_buggy (0) +#endif + + +#endif /* _ASM_SW64_DMA_H */ -- Gitee From d45ecd322139536a5b517d17ee00ff2b1abf53fe Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:07 +0800 Subject: [PATCH 0304/2138] anolis: sw64: add EFI support ANBZ: #4688 Add basic EFI support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/dmi.h | 30 ++++++++++++++++++++++++++++++ arch/sw_64/include/asm/efi.h | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 65 insertions(+) create mode 100644 arch/sw_64/include/asm/dmi.h create mode 100644 arch/sw_64/include/asm/efi.h diff --git a/arch/sw_64/include/asm/dmi.h b/arch/sw_64/include/asm/dmi.h new file mode 100644 index 000000000000..05e80c9a3a76 --- /dev/null +++ b/arch/sw_64/include/asm/dmi.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arch/sw_64/include/asm/dmi.h + * + * Copyright (C) 2019 Deepin Limited. + * Porting by: Deepin Kernel Team (kernel@deepin.com) + * + * based on arch/x864/include/asm/dmi.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_SW64_DMI_H +#define _ASM_SW64_DMI_H + +#include +#include +#include +#include + +/* Use early IO mappings for DMI because it's initialized early */ +#define dmi_early_remap(x, l) early_ioremap(x, l) +#define dmi_early_unmap(x, l) early_iounmap(x, l) +#define dmi_remap(x, l) early_ioremap(x, l) +#define dmi_unmap(x) early_iounmap(x, 0) +#define dmi_alloc(l) kzalloc(l, GFP_KERNEL) + +#endif /* _ASM_SW64_DMI_H */ diff --git a/arch/sw_64/include/asm/efi.h b/arch/sw_64/include/asm/efi.h new file mode 100644 index 000000000000..34d5637e23c2 --- /dev/null +++ b/arch/sw_64/include/asm/efi.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_EFI_H +#define _ASM_SW64_EFI_H + +#include +#include +#ifdef CONFIG_EFI +extern void efi_init(void); +extern unsigned long entSuspend; + +#define SLEEP_ENTRY_GUID EFI_GUID(0x59cb76bb, 0x9c3a, 0x4c8f, 0xbd, 0x5c, 0xc0, 0x0f, 0x20, 0x61, 0x18, 0x4b) + +#else +#define efi_init() +#define efi_idmap_init() +#endif + +#define arch_efi_call_virt_setup() +#define arch_efi_call_virt_teardown() + +#define ARCH_EFI_IRQ_FLAGS_MASK 0x00000001 + +/* arch specific definitions used by the stub code */ + +/* + * AArch64 requires the DTB to be 8-byte aligned in the first 512MiB from + * start of kernel and may not cross a 2MiB boundary. We set alignment to + * 2MiB so we know it won't cross a 2MiB boundary. + */ +#define EFI_FDT_ALIGN SZ_2M /* used by allocate_new_fdt_and_exit_boot() */ +#define MAX_FDT_OFFSET SZ_512M + +#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__) + +#endif /* _ASM_SW64_EFI_H */ -- Gitee From 77ebe1337a0da0439530f344a272018a7a7967e1 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:21 +0800 Subject: [PATCH 0305/2138] anolis: sw64: add KVM support ANBZ: #4688 Add KVM support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/hcall.h | 41 + arch/sw_64/include/asm/kvm_asm.h | 38 + arch/sw_64/include/asm/kvm_cma.h | 11 + arch/sw_64/include/asm/kvm_emulate.h | 46 + arch/sw_64/include/asm/kvm_host.h | 225 ++++ arch/sw_64/include/asm/kvm_mmio.h | 17 + arch/sw_64/include/asm/kvm_mmu.h | 131 +++ arch/sw_64/include/asm/kvm_para.h | 26 + arch/sw_64/include/asm/kvm_timer.h | 9 + arch/sw_64/include/asm/vcpu.h | 106 ++ arch/sw_64/include/uapi/asm/kvm.h | 131 +++ arch/sw_64/kvm/Kconfig | 49 + arch/sw_64/kvm/Makefile | 20 + arch/sw_64/kvm/emulate.c | 128 +++ arch/sw_64/kvm/entry.S | 263 +++++ arch/sw_64/kvm/handle_exit.c | 85 ++ arch/sw_64/kvm/irq.h | 12 + arch/sw_64/kvm/kvm_cma.c | 269 +++++ arch/sw_64/kvm/kvm_core3.c | 419 +++++++ arch/sw_64/kvm/kvm_core4.c | 132 +++ arch/sw_64/kvm/kvm_timer.c | 83 ++ arch/sw_64/kvm/mmio.c | 89 ++ arch/sw_64/kvm/mmu.c | 1561 ++++++++++++++++++++++++++ arch/sw_64/kvm/perf.c | 27 + arch/sw_64/kvm/sw64.c | 592 ++++++++++ arch/sw_64/kvm/trace.h | 62 + arch/sw_64/kvm/vmem.c | 183 +++ 27 files changed, 4755 insertions(+) create mode 100644 arch/sw_64/include/asm/hcall.h create mode 100644 arch/sw_64/include/asm/kvm_asm.h create mode 100644 arch/sw_64/include/asm/kvm_cma.h create mode 100644 arch/sw_64/include/asm/kvm_emulate.h create mode 100644 arch/sw_64/include/asm/kvm_host.h create mode 100644 arch/sw_64/include/asm/kvm_mmio.h create mode 100644 arch/sw_64/include/asm/kvm_mmu.h create mode 100644 arch/sw_64/include/asm/kvm_para.h create mode 100644 arch/sw_64/include/asm/kvm_timer.h create mode 100644 arch/sw_64/include/asm/vcpu.h create mode 100644 arch/sw_64/include/uapi/asm/kvm.h create mode 100644 arch/sw_64/kvm/Kconfig create mode 100644 arch/sw_64/kvm/Makefile create mode 100644 arch/sw_64/kvm/emulate.c create mode 100644 arch/sw_64/kvm/entry.S create mode 100644 arch/sw_64/kvm/handle_exit.c create mode 100644 arch/sw_64/kvm/irq.h create mode 100644 arch/sw_64/kvm/kvm_cma.c create mode 100644 arch/sw_64/kvm/kvm_core3.c create mode 100644 arch/sw_64/kvm/kvm_core4.c create mode 100644 arch/sw_64/kvm/kvm_timer.c create mode 100644 arch/sw_64/kvm/mmio.c create mode 100644 arch/sw_64/kvm/mmu.c create mode 100644 arch/sw_64/kvm/perf.c create mode 100644 arch/sw_64/kvm/sw64.c create mode 100644 arch/sw_64/kvm/trace.h create mode 100644 arch/sw_64/kvm/vmem.c diff --git a/arch/sw_64/include/asm/hcall.h b/arch/sw_64/include/asm/hcall.h new file mode 100644 index 000000000000..bded05779db7 --- /dev/null +++ b/arch/sw_64/include/asm/hcall.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_HCALL_H +#define _ASM_SW64_HCALL_H + +#define HMC_hcall 0x32 +/* HCALL must > 0 */ +enum HCALL_TYPE { + HCALL_HALT = 10, + HCALL_NOTIFY = 11, + HCALL_SHUTDOWN = 12, + HCALL_SET_CLOCKEVENT = 13, + HCALL_IVI = 14, /* interrupt between virtual cpu */ + HCALL_TBI = 15, /* tlb flush for virtual cpu */ + HCALL_STOP = 16, /* indicate virtual cpu stopped */ + HCALL_RESTART = 17, /* indicate virtual cpu restarted */ + HCALL_MSI = 18, /* guest request msi intr */ + HCALL_MSIX = 19, /* guest request msix intr */ + HCALL_SWNET = 20, /* guest request swnet service */ + HCALL_SWNET_IRQ = 21, /* guest request swnet intr */ + HCALL_FATAL_ERROR = 22, /* guest fatal error, issued by hmcode */ + HCALL_MEMHOTPLUG = 23, /* guest memory hotplug event */ + NR_HCALL +}; + +static inline unsigned long hcall(unsigned long hcall, unsigned long arg0, + unsigned long arg1, unsigned long arg2) +{ + register unsigned long __r0 __asm__("$0"); + register unsigned long __r16 __asm__("$16") = hcall; + register unsigned long __r17 __asm__("$17") = arg0; + register unsigned long __r18 __asm__("$18") = arg1; + register unsigned long __r19 __asm__("$19") = arg2; + __asm__ __volatile__( + "sys_call %5 " + : "=r"(__r16), "=r"(__r17), "=r"(__r18), "=r"(__r19), "=r"(__r0) + : "i"(HMC_hcall), "0"(__r16), "1"(__r17), "2"(__r18), "3"(__r19) + : "$1", "$22", "$23", "$24", "$25"); + return __r0; +} + +#endif /* _ASM_SW64_HCALL_H */ diff --git a/arch/sw_64/include/asm/kvm_asm.h b/arch/sw_64/include/asm/kvm_asm.h new file mode 100644 index 000000000000..fd1b25018fc8 --- /dev/null +++ b/arch/sw_64/include/asm/kvm_asm.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KVM_ASM_H +#define _ASM_SW64_KVM_ASM_H + +#define SW64_KVM_EXIT_HOST_INTR 0 +#define SW64_KVM_EXIT_IO 1 +#define SW64_KVM_MIGRATION_SET_DIRTY 2 +#define SW64_KVM_MIGRATION_SET_DIRTY_HM 3 +#define SW64_KVM_EXIT_HALT 10 +#define SW64_KVM_EXIT_SHUTDOWN 12 +#define SW64_KVM_EXIT_TIMER 13 +#define SW64_KVM_EXIT_IPI 14 +#define SW64_KVM_EXIT_STOP 16 +#define SW64_KVM_EXIT_RESTART 17 +#define SW64_KVM_EXIT_APT_FAULT 18 +#define SW64_KVM_EXIT_FATAL_ERROR 22 +#define SW64_KVM_EXIT_MEMHOTPLUG 23 +#define SW64_KVM_EXIT_DEBUG 24 + + +#define kvm_sw64_exception_type \ + {0, "HOST_INTR" }, \ + {1, "IO" }, \ + {10, "HALT" }, \ + {12, "SHUTDOWN" }, \ + {13, "TIMER" }, \ + {14, "IPI" }, \ + {16, "STOP" }, \ + {17, "RESTART" }, \ + {18, "APT_FAULT" }, \ + {22, "FATAL_ERROR" }, \ + {23, "MEMHOTPLUG" }, \ + {24, "DEBUG" } + + +#include + +#endif /* _ASM_SW64_KVM_ASM_H */ diff --git a/arch/sw_64/include/asm/kvm_cma.h b/arch/sw_64/include/asm/kvm_cma.h new file mode 100644 index 000000000000..d50ba599ceb7 --- /dev/null +++ b/arch/sw_64/include/asm/kvm_cma.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KVM_CMA_H +#define _ASM_SW64_KVM_CMA_H + +#include + +extern int __init kvm_cma_declare_contiguous(phys_addr_t base, + phys_addr_t size, phys_addr_t limit, + phys_addr_t alignment, unsigned int order_per_bit, + const char *name, struct cma **res_cma); +#endif /* _ASM_SW64_KVM_CMA_H */ diff --git a/arch/sw_64/include/asm/kvm_emulate.h b/arch/sw_64/include/asm/kvm_emulate.h new file mode 100644 index 000000000000..915aa6c0bce2 --- /dev/null +++ b/arch/sw_64/include/asm/kvm_emulate.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KVM_EMULATE_H +#define _ASM_SW64_KVM_EMULATE_H + +#include +#include + +#define R(x) ((size_t) &((struct kvm_regs *)0)->x) + +static int reg_offsets[32] = { + R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8), + R(r9), R(r10), R(r11), R(r12), R(r13), R(r14), R(r15), + R(r16), R(r17), R(r18), + R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26), + R(r27), R(r28), R(gp), + 0, 0, +}; + + +static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, + unsigned long val) +{ + void *regs_ptr = (void *)&vcpu->arch.regs; + + regs_ptr += reg_offsets[reg_num]; + *(unsigned long *)regs_ptr = val; +} + +static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu, u8 reg_num) +{ + void *regs_ptr = (void *)&vcpu->arch.regs; + + if (reg_num == 31) + return 0; + regs_ptr += reg_offsets[reg_num]; + return *(unsigned long *)regs_ptr; +} + +void sw64_decode(struct kvm_vcpu *vcpu, unsigned int insn, + struct kvm_run *run); + +unsigned int interrupt_pending(struct kvm_vcpu *vcpu, bool *more); +void clear_vcpu_irq(struct kvm_vcpu *vcpu); +void inject_vcpu_irq(struct kvm_vcpu *vcpu, unsigned int irq); +void try_deliver_interrupt(struct kvm_vcpu *vcpu, unsigned int irq, bool more); +#endif /* _ASM_SW64_KVM_EMULATE_H */ diff --git a/arch/sw_64/include/asm/kvm_host.h b/arch/sw_64/include/asm/kvm_host.h new file mode 100644 index 000000000000..09a995218a2c --- /dev/null +++ b/arch/sw_64/include/asm/kvm_host.h @@ -0,0 +1,225 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KVM_HOST_H +#define _ASM_SW64_KVM_HOST_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#define last_vpn(cpu) (cpu_data[cpu].last_vpn) + +#ifdef CONFIG_SUBARCH_C3B +#define VPN_BITS 8 +#define GUEST_RESET_PC 0xffffffff80011100 +#endif + +#ifdef CONFIG_SUBARCH_C4 +#define VPN_BITS 10 +#define GUEST_RESET_PC 0xfff0000000011002 +#endif + +#define VPN_FIRST_VERSION (1UL << VPN_BITS) +#define VPN_MASK ((1UL << VPN_BITS) - 1) +#define VPN_SHIFT (64 - VPN_BITS) + +#define KVM_MAX_VCPUS 64 +#define KVM_INTERNAL_MEM_SLOTS (KVM_MEM_SLOTS_NUM - 512) + +#define KVM_HALT_POLL_NS_DEFAULT 0 +#define KVM_IRQCHIP_NUM_PINS 256 +/* KVM Hugepage definitions for sw64 */ +#define KVM_NR_PAGE_SIZES 3 +#define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9) +#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x)) +#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x)) +#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) +#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) + +/* + * The architecture supports 48-bit GPA as input to the addtional stage translations. + */ +#define KVM_PHYS_SHIFT (48) +#define KVM_PHYS_SIZE (_AC(1, ULL) << KVM_PHYS_SHIFT) +#define KVM_PHYS_MASK (KVM_PHYS_SIZE - _AC(1, ULL)) + +struct kvm_arch_memory_slot { + unsigned long host_phys_addr; + bool valid; +}; + +struct kvm_arch { + unsigned long host_phys_addr; + unsigned long size; + + /* segment table */ + unsigned long *seg_pgd; + + struct swvm_mem mem; + /* Addtional stage page table*/ + pgd_t *pgd; +}; + +#define KVM_NR_MEM_OBJS 40 + +/* + * We don't want allocation failures within the mmu code, so we preallocate + * enough memory for a single page fault in a cache. + */ +struct kvm_mmu_memory_cache { + int nobjs; + void *objects[KVM_NR_MEM_OBJS]; +}; + +struct kvm_vcpu_arch { + struct kvm_regs regs __aligned(32); + struct vcpucb vcb; + struct task_struct *tsk; + unsigned int pcpu_id; /* current running pcpu id */ + + /* Virtual clock device */ + struct hrtimer hrt; + unsigned long timer_next_event; + unsigned long vtimer_freq; + + int first_run; + int halted; + int stopped; + int restart; + + /* Pending virtual interrupts */ + DECLARE_BITMAP(irqs_pending, SWVM_IRQS); + unsigned long vpnc[NR_CPUS]; + + /* Detect first run of a vcpu */ + bool has_run_once; + + /* WAIT executed */ + int wait; + + /* vcpu power-off state */ + bool power_off; + + /* Don't run the guest (internal implementation need) */ + bool pause; + + struct kvm_decode mmio_decode; + + /* Cache some mmu pages needed inside spinlock regions */ + struct kvm_mmu_memory_cache mmu_page_cache; + + /* guest live migration */ + unsigned long migration_mark; + unsigned long shtclock; +}; + +struct vmem_info { + unsigned long start; + size_t size; + atomic_t refcnt; +}; + +struct kvm_vm_stat { + struct kvm_vm_stat_generic generic; +}; + +struct kvm_vcpu_stat { + struct kvm_vcpu_stat_generic generic; + u64 pid; + u64 exits; + u64 io_exits; + u64 mmio_exits; + u64 migration_set_dirty; + u64 shutdown_exits; + u64 restart_exits; + u64 stop_exits; + u64 ipi_exits; + u64 timer_exits; + u64 debug_exits; +#ifdef CONFIG_KVM_MEMHOTPLUG + u64 memhotplug_exits; +#endif + u64 fatal_error_exits; + u64 halt_exits; + u64 halt_successful_poll; + u64 halt_attempted_poll; + u64 halt_wakeup; + u64 halt_poll_success_ns; + u64 halt_poll_fail_ns; + u64 halt_poll_invalid; + u64 signal_exits; + u64 steal; + u64 st_max; + u64 utime; + u64 stime; + u64 gtime; +}; + +#ifdef CONFIG_KVM_MEMHOTPLUG +void vcpu_mem_hotplug(struct kvm_vcpu *vcpu, unsigned long start_addr); +#endif +#ifdef CONFIG_SUBARCH_C4 +#define KVM_ARCH_WANT_MMU_NOTIFIER +#endif +int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable); +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); + +void update_vcpu_stat_time(struct kvm_vcpu_stat *vcpu_stat); +void check_vcpu_requests(struct kvm_vcpu *vcpu); +void sw64_kvm_switch_vpn(struct kvm_vcpu *vcpu); +int vmem_init(void); +void vmem_exit(void); +int __sw64_vcpu_run(unsigned long vcb_pa, struct kvm_regs *regs, + struct hcall_args *args); +int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, + int exception_index, struct hcall_args *hargs); +void vcpu_send_ipi(struct kvm_vcpu *vcpu, int target_vcpuid, int type); +static inline void kvm_arch_hardware_disable(void) {} +static inline void kvm_arch_sync_events(struct kvm *kvm) {} +static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} +static inline void kvm_arch_free_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot) {} +static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} +static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} + +void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); + +int kvm_sw64_perf_init(void); +int kvm_sw64_perf_teardown(void); +void kvm_flush_tlb_all(void); +void kvm_sw64_update_vpn(struct kvm_vcpu *vcpu, unsigned long vpn); +int kvm_sw64_init_vm(struct kvm *kvm); +void kvm_sw64_destroy_vm(struct kvm *kvm); +int kvm_sw64_vcpu_reset(struct kvm_vcpu *vcpu); +long kvm_sw64_set_vcb(struct file *filp, unsigned long arg); +long kvm_sw64_get_vcb(struct file *filp, unsigned long arg); + +void update_aptp(unsigned long pgd); +void vcpu_set_numa_affinity(struct kvm_vcpu *vcpu); +#endif /* _ASM_SW64_KVM_HOST_H */ diff --git a/arch/sw_64/include/asm/kvm_mmio.h b/arch/sw_64/include/asm/kvm_mmio.h new file mode 100644 index 000000000000..c87b259e9395 --- /dev/null +++ b/arch/sw_64/include/asm/kvm_mmio.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KVM_MMIO_H +#define _ASM_SW64_KVM_MMIO_H + +#include +#include + +struct kvm_decode { + unsigned long rt; + bool sign_extend; +}; + +int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); +int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, + struct hcall_args *hargs); + +#endif /* _ASM_SW64_KVM_MMIO_H */ diff --git a/arch/sw_64/include/asm/kvm_mmu.h b/arch/sw_64/include/asm/kvm_mmu.h new file mode 100644 index 000000000000..f4493de934ba --- /dev/null +++ b/arch/sw_64/include/asm/kvm_mmu.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KVM_MMU_H +#define _ASM_SW64_KVM_MMU_H + +#define AF_ACCESS_TYPE_SHIFT 55 +#define AF_INV_LEVEL_SHIFT 53 +#define AF_FAULT_STATUS_SHIFT 48 + +#define AF_ACCESS_TYPE_MASK 0x3 +#define AF_INV_LEVEL_MASK 0x3 +#define AF_FAULT_STATUS_MASK 0x1f +#define AF_ENTRY_ADDR_MASK ((0x1UL << AF_FAULT_STATUS_SHIFT) - 1) + +/* access type defination */ +#define AF_READ_ACCESS_TYPE 0x1 +#define AF_WRITE_ACCESS_TYPE 0x2 +#define AF_EXEC_ACCESS_TYPE 0x3 + +/* invalid page level */ +#define AF_INV_LEVEL_1 0 +#define AF_INV_LEVEL_2 1 +#define AF_INV_LEVEL_3 2 +#define AF_INV_LEVEL_4 3 + +/* fault status */ +#define AF_STATUS_MISCONFIG 0x1 +#define AF_STATUS_FOR 0x2 +#define AF_STATUS_FOW 0x4 +#define AF_STATUS_FOE 0x8 +#define AF_STATUS_INV 0x10 + +#define KVM_MMU_CACHE_MIN_PAGES 2 + +static inline void kvm_set_aptpte_readonly(pte_t *pte) +{ + pte_val(*pte) |= _PAGE_FOW; +} + +static inline bool kvm_aptpte_readonly(pte_t *pte) +{ + return (pte_val(*pte) & _PAGE_FOW) == _PAGE_FOW; +} + +static inline void kvm_set_aptpmd_readonly(pmd_t *pmd) +{ + pmd_val(*pmd) |= _PAGE_FOW; +} + +static inline bool kvm_aptpmd_readonly(pmd_t *pmd) +{ + return (pmd_val(*pmd) & _PAGE_FOW) == _PAGE_FOW; +} + +static inline void kvm_set_aptpud_readonly(pud_t *pud) +{ + pud_val(*pud) |= _PAGE_FOW; +} + +static inline bool kvm_aptpud_readonly(pud_t *pud) +{ + return (pud_val(*pud) & _PAGE_FOW) == _PAGE_FOW; +} + +static inline pte_t kvm_pte_mkwrite(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_FOW; + return pte; +} + +static inline pte_t kvm_pte_mkexec(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_FOE; + return pte; +} + +static inline bool kvm_pte_exec(pte_t *pte) +{ + return !(pte_val(*pte) & _PAGE_FOE); +} + +static inline pmd_t kvm_pmd_mkwrite(pmd_t pmd) +{ + pmd_val(pmd) &= ~_PAGE_FOW; + return pmd; +} + +static inline pmd_t kvm_pmd_mkexec(pmd_t pmd) +{ + pmd_val(pmd) &= ~_PAGE_FOE; + return pmd; +} + +static inline bool kvm_pmd_exec(pmd_t *pmd) +{ + return !(pmd_val(*pmd) & _PAGE_FOE); +} + +static inline pud_t kvm_pud_mkwrite(pud_t pud) +{ + pud_val(pud) &= ~_PAGE_FOW; + return pud; +} + +static inline pud_t kvm_pud_mkexec(pud_t pud) +{ + pud_val(pud) &= ~_PAGE_FOE; + return pud; +} + +static inline bool kvm_pud_exec(pud_t *pud) +{ + return !(pud_val(*pud) & _PAGE_FOE); +} + +void kvm_core4_commit_memory_region(struct kvm *kvm, + const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + const struct kvm_memory_slot *new, + enum kvm_mr_change change); +void kvm_core4_flush_shadow_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot); +void kvm_core4_flush_shadow_all(struct kvm *kvm); +void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); +void kvm_handle_apt_fault(struct kvm_vcpu *vcpu); +int kvm_alloc_addtional_stage_pgd(struct kvm *kvm); +void kvm_arch_flush_shadow_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot); +int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); +void apt_unmap_vm(struct kvm *kvm); +#endif /* _ASM_SW64_KVM_MMU_H */ diff --git a/arch/sw_64/include/asm/kvm_para.h b/arch/sw_64/include/asm/kvm_para.h new file mode 100644 index 000000000000..442f1c7d9f83 --- /dev/null +++ b/arch/sw_64/include/asm/kvm_para.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KVM_PARA_H +#define _ASM_SW64_KVM_PARA_H + +#include + +#define HMC_hcall 0x32 + +static inline unsigned long kvm_hypercall3(unsigned long num, + unsigned long arg0, + unsigned long arg1, + unsigned long arg2) +{ + register unsigned long __r0 __asm__("$0"); + register unsigned long __r16 __asm__("$16") = num; + register unsigned long __r17 __asm__("$17") = arg0; + register unsigned long __r18 __asm__("$18") = arg1; + register unsigned long __r19 __asm__("$19") = arg2; + __asm__ __volatile__( + "sys_call %5" + : "=r"(__r16), "=r"(__r17), "=r"(__r18), "=r"(__r19), "=r"(__r0) + : "i"(HMC_hcall), "0"(__r16), "1"(__r17), "2"(__r18), "3"(__r19) + : "$1", "$22", "$23", "$24", "$25"); + return __r0; +} +#endif /* _ASM_SW64_KVM_PARA_H */ diff --git a/arch/sw_64/include/asm/kvm_timer.h b/arch/sw_64/include/asm/kvm_timer.h new file mode 100644 index 000000000000..8080873c684f --- /dev/null +++ b/arch/sw_64/include/asm/kvm_timer.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KVM_TIMER_H +#define _ASM_SW64_KVM_TIMER_H + +void set_timer(struct kvm_vcpu *vcpu, unsigned long delta); +void set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq); +enum hrtimer_restart clockdev_fn(struct hrtimer *timer); + +#endif /* _ASM_SW64_KVM_TIMER_H */ diff --git a/arch/sw_64/include/asm/vcpu.h b/arch/sw_64/include/asm/vcpu.h new file mode 100644 index 000000000000..c4e3caacbc70 --- /dev/null +++ b/arch/sw_64/include/asm/vcpu.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_VCPU_H +#define _ASM_SW64_VCPU_H + +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_SUBARCH_C3B + +struct vcpucb { + unsigned long go_flag; + unsigned long pcbb; + unsigned long ksp; + unsigned long usp; + unsigned long kgp; + unsigned long ent_arith; + unsigned long ent_if; + unsigned long ent_int; + unsigned long ent_mm; + unsigned long ent_sys; + unsigned long ent_una; + unsigned long stack_pc; + unsigned long new_a0; + unsigned long new_a1; + unsigned long new_a2; + unsigned long soft_cid; + unsigned long csr_save; + unsigned long wakeup_magic; + unsigned long host_vcpucb; + unsigned long upcr; + unsigned long vpcr; + unsigned long dtb_vpcr; + unsigned long guest_ksp; + unsigned long guest_usp; + unsigned long vcpu_irq_disabled; + unsigned long vcpu_irq; + unsigned long ptbr; + unsigned long soft_tid; + unsigned long int_stat1; + unsigned long int_stat2; + unsigned long int_stat3; + unsigned long reset_entry; + unsigned long pvcpu; + unsigned long exit_reason; + unsigned long ipaddr; + unsigned long vcpu_irq_vector; + unsigned long pri_base; + unsigned long stack_pc_dfault; + unsigned long guest_p20; + unsigned long guest_dfault_double; + unsigned long guest_irqs_pending; + unsigned long guest_hm_r30; + unsigned long migration_mark; + unsigned long guest_longtime; + unsigned long guest_longtime_offset; + unsigned long reserved[3]; +}; + +#else + +struct vcpucb { + unsigned long ktp; + unsigned long pcbb; + unsigned long ksp; + unsigned long usp; + unsigned long kgp; + unsigned long ent_arith; + unsigned long ent_if; + unsigned long ent_int; + unsigned long ent_mm; + unsigned long ent_sys; + unsigned long ent_una; + unsigned long stack_pc; + unsigned long new_a0; + unsigned long new_a1; + unsigned long new_a2; + unsigned long soft_cid; + unsigned long csr_save; + unsigned long wakeup_magic; + unsigned long host_vcpucb; + unsigned long upcr; + unsigned long vpcr; + unsigned long dtb_vpcr; + unsigned long dtb_upcr; + unsigned long guest_ksp; + unsigned long guest_usp; + unsigned long vcpu_irq_disabled; + unsigned long vcpu_irq; + unsigned long ptbr_usr; + unsigned long ptbr_sys; + unsigned long soft_tid; + unsigned long int_stat0; + unsigned long int_stat1; + unsigned long int_stat2; + unsigned long int_stat3; + unsigned long reset_entry; + unsigned long pvcpu; + unsigned long exit_reason; + unsigned long ipaddr; + unsigned long vcpu_pc_save; + unsigned long shtclock_offset; + unsigned long reserved[8]; +}; +#endif + +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_SW64_VCPU_H */ diff --git a/arch/sw_64/include/uapi/asm/kvm.h b/arch/sw_64/include/uapi/asm/kvm.h new file mode 100644 index 000000000000..2253475deaa5 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/kvm.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_KVM_H +#define _UAPI_ASM_SW64_KVM_H + +/* + * KVM SW specific structures and definitions. + */ +#define SWVM_IRQS 256 +#define IRQ_PENDING_INTX_SHIFT 16 +#define IRQ_PENDING_MSI_VECTORS_SHIFT 17 + +enum SW64_KVM_IRQ { + SW64_KVM_IRQ_IPI = 27, + SW64_KVM_IRQ_TIMER = 9, + SW64_KVM_IRQ_KBD = 29, + SW64_KVM_IRQ_MOUSE = 30, +}; + +#define SWVM_VM_TYPE_DEFAULT 0 +#define SWVM_VM_TYPE_PHYVCPU 1 +#define __KVM_HAVE_IRQ_LINE + +#define SWVM_NUM_NUMA_MEMBANKS 1 +#define KVM_NR_IRQCHIPS 1 +/* + * for KVM_GET_REGS and KVM_SET_REGS + */ +struct kvm_regs { + unsigned long r0; + unsigned long r1; + unsigned long r2; + unsigned long r3; + + unsigned long r4; + unsigned long r5; + unsigned long r6; + unsigned long r7; + + unsigned long r8; + unsigned long r9; + unsigned long r10; + unsigned long r11; + + unsigned long r12; + unsigned long r13; + unsigned long r14; + unsigned long r15; + + unsigned long r19; + unsigned long r20; + unsigned long r21; + unsigned long r22; + + unsigned long r23; + unsigned long r24; + unsigned long r25; + unsigned long r26; + + unsigned long r27; + unsigned long r28; + unsigned long __padding0; + unsigned long fpcr; + + unsigned long fp[124]; + /* These are saved by HMcode: */ + unsigned long ps; + unsigned long pc; + unsigned long gp; + unsigned long r16; + unsigned long r17; + unsigned long r18; +}; + + +/* + * return stack for __sw64_vcpu_run + */ +struct vcpu_run_ret_stack { + unsigned long ra; + unsigned long r0; +}; + +struct host_int_args { + unsigned long r18; + unsigned long r17; + unsigned long r16; +}; + +/* + * for KVM_GET_FPU and KVM_SET_FPU + */ +struct kvm_fpu { +}; + +struct hcall_args { + unsigned long arg0, arg1, arg2; +}; + +struct phyvcpu_hcall_args { + unsigned long call; + struct hcall_args args; +}; + +struct kvm_debug_exit_arch { + unsigned long epc; +}; + +/* for KVM_SET_GUEST_DEBUG */ +struct kvm_guest_debug_arch { +}; + +/* definition of registers in kvm_run */ +struct kvm_sync_regs { +}; + +/* dummy definition */ +struct kvm_sregs { +}; + +struct swvm_mem_bank { + unsigned long guest_phys_addr; + unsigned long host_phys_addr; + unsigned long host_addr; + unsigned long size; +}; + +struct swvm_mem { + struct swvm_mem_bank membank[SWVM_NUM_NUMA_MEMBANKS]; +}; + +#endif /* _UAPI_ASM_SW64_KVM_H */ diff --git a/arch/sw_64/kvm/Kconfig b/arch/sw_64/kvm/Kconfig new file mode 100644 index 000000000000..b7e43d0bae51 --- /dev/null +++ b/arch/sw_64/kvm/Kconfig @@ -0,0 +1,49 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# KVM configuration +# +source "virt/kvm/Kconfig" + +menuconfig VIRTUALIZATION + bool "Virtualization" + help + Say Y here to get to see options for using your Linux host to run + other operating systems inside virtual machines (guests). + This option alone does not add any kernel code. + + If you say N, all options in this submenu will be skipped and disabled. + +if VIRTUALIZATION + +config KVM + tristate "Kernel-based Virtual Machine (KVM) support" + select PREEMPT_NOTIFIERS + select CMA + depends on NET + select HAVE_KVM_EVENTFD + select HAVE_KVM_IRQCHIP + select HAVE_KVM_IRQ_ROUTING + select HAVE_KVM_IRQFD + select HAVE_KVM_MSI + select KVM_VFIO + select MMU_NOTIFIER + select KVM_GENERIC_DIRTYLOG_READ_PROTECT + select TUN + select GENERIC_ALLOCATOR + select KVM_GENERIC_DIRTYLOG_READ_PROTECT + help + Support for hosting Guest kernels. + We don't support KVM with 3-level page tables yet. + + If unsure, say N. + +config KVM_MEMHOTPLUG + bool "Memory hotplug support for guest" + depends on KVM && MEMORY_HOTPLUG && SUBARCH_C3B + help + Provides memory hotplug support for SW64 guest. + + +source "drivers/vhost/Kconfig" + +endif # VIRTUALIZATION diff --git a/arch/sw_64/kvm/Makefile b/arch/sw_64/kvm/Makefile new file mode 100644 index 000000000000..8111014c5cca --- /dev/null +++ b/arch/sw_64/kvm/Makefile @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for Kernel-based Virtual Machine module +# + +ccflags-y += -I $(srctree)/$(src) + +include $(srctree)/virt/kvm/Makefile.kvm + +obj-$(CONFIG_KVM) += kvm.o + +kvm-y += sw64.o +kvm-y += entry.o +kvm-y += emulate.o +kvm-y += mmio.o +kvm-y += kvm_timer.o +kvm-y += handle_exit.o +kvm-y += perf.o +kvm-$(CONFIG_SUBARCH_C3B) += kvm_core3.o kvm_cma.o +kvm-$(CONFIG_SUBARCH_C4) += kvm_core4.o mmu.o diff --git a/arch/sw_64/kvm/emulate.c b/arch/sw_64/kvm/emulate.c new file mode 100644 index 000000000000..fc37461b97a0 --- /dev/null +++ b/arch/sw_64/kvm/emulate.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 - os kernal + * Author: fire3 yangzh + * linhn + */ +#include +#include +#include +#include + +void sw64_decode(struct kvm_vcpu *vcpu, unsigned int insn, struct kvm_run *run) +{ + int opc, ra; + +#ifdef CONFIG_SUBARCH_C3B + opc = (insn >> 26) & 0x3f; + ra = (insn >> 21) & 0x1f; +#elif defined(CONFIG_SUBARCH_C4) + unsigned long ds_stat, exc_sum; + + ds_stat = read_csr(CSR_DS_STAT); + exc_sum = read_csr(CSR_EXC_SUM); + + opc = (ds_stat >> 4) & 0x3f; + ra = (exc_sum >> 8) & 0x1f; +#endif + + switch (opc) { + case 0x20: /* LDBU */ + run->mmio.is_write = 0; + run->mmio.len = 1; + vcpu->arch.mmio_decode.rt = ra; + break; + case 0x21: /* LDHU */ + run->mmio.is_write = 0; + run->mmio.len = 2; + vcpu->arch.mmio_decode.rt = ra; + break; + case 0x22: /* LDW */ + run->mmio.is_write = 0; + run->mmio.len = 4; + vcpu->arch.mmio_decode.rt = ra; + break; + case 0x23: /* LDL */ + case 0x24: /* LDL_U */ + run->mmio.is_write = 0; + run->mmio.len = 8; + vcpu->arch.mmio_decode.rt = ra; + break; + case 0x28: /* STB */ + run->mmio.is_write = 1; + *(unsigned long *)run->mmio.data = vcpu_get_reg(vcpu, ra) & 0xffUL; + run->mmio.len = 1; + break; + case 0x29: /* STH */ + run->mmio.is_write = 1; + *(unsigned long *)run->mmio.data = vcpu_get_reg(vcpu, ra) & 0xffffUL; + run->mmio.len = 2; + break; + case 0x2a: /* STW */ + run->mmio.is_write = 1; + *(unsigned long *)run->mmio.data = vcpu_get_reg(vcpu, ra) & 0xffffffffUL; + run->mmio.len = 4; + break; + case 0x2b: /* STL */ + case 0x2c: /* STL_U */ + run->mmio.is_write = 1; + *(unsigned long *)run->mmio.data = vcpu_get_reg(vcpu, ra); + run->mmio.len = 8; + break; + default: + pr_info("Miss done opc %d\n", opc); + break; + } +} + +/* + * Virtual Interrupts. + */ +unsigned int interrupt_pending(struct kvm_vcpu *vcpu, bool *more) +{ + unsigned int irq; + DECLARE_BITMAP(blk, SWVM_IRQS); + + bitmap_copy(blk, vcpu->arch.irqs_pending, SWVM_IRQS); + + irq = find_last_bit(blk, SWVM_IRQS); + + return irq; +} + +void clear_vcpu_irq(struct kvm_vcpu *vcpu) +{ + vcpu->arch.vcb.vcpu_irq = 0xffffffffffffffffUL; +} + +void inject_vcpu_irq(struct kvm_vcpu *vcpu, unsigned int irq) +{ + vcpu->arch.vcb.vcpu_irq = irq; +} + +/* + * This actually diverts the Guest to running an interrupt handler, once an + * interrupt has been identified by interrupt_pending(). + */ +void try_deliver_interrupt(struct kvm_vcpu *vcpu, unsigned int irq, bool more) +{ + BUG_ON(irq >= SWVM_IRQS); + + /* Otherwise we check if they have interrupts disabled. */ + if (vcpu->arch.vcb.vcpu_irq_disabled) { + clear_vcpu_irq(vcpu); + return; + } + + /* If they don't have a handler (yet?), we just ignore it */ + if (vcpu->arch.vcb.ent_int != 0) { + /* OK, mark it no longer pending and deliver it. */ + clear_bit(irq, (vcpu->arch.irqs_pending)); + /* + * set_guest_interrupt() takes the interrupt descriptor and a + * flag to say whether this interrupt pushes an error code onto + * the stack as well: virtual interrupts never do. + */ + inject_vcpu_irq(vcpu, irq); + } +} diff --git a/arch/sw_64/kvm/entry.S b/arch/sw_64/kvm/entry.S new file mode 100644 index 000000000000..a61ecc387d26 --- /dev/null +++ b/arch/sw_64/kvm/entry.S @@ -0,0 +1,263 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 - os kernal + * Author: fire3 + */ + .text +#include +#include +#include +#include + + .set noat + +/* + * r16: physical address of guest kvm_vcpu.arch.vcb + * r17: pointer to guest kvm_vcpu.arch.kvm_regs + * r18: pointer to hcall args + */ +ENTRY(__sw64_vcpu_run) + /* save host fpregs */ + rfpcr $f0 + fstd $f0, TASK_THREAD_FPCR($8) + vstd $f2, TASK_THREAD_F2($8) + vstd $f3, TASK_THREAD_F3($8) + vstd $f4, TASK_THREAD_F4($8) + vstd $f5, TASK_THREAD_F5($8) + vstd $f6, TASK_THREAD_F6($8) + vstd $f7, TASK_THREAD_F7($8) + vstd $f8, TASK_THREAD_F8($8) + vstd $f9, TASK_THREAD_F9($8) + + ldi sp, -VCPU_RET_SIZE(sp) + /* save host pt_regs to current kernel stack */ + ldi sp, -PT_REGS_SIZE(sp) + stl $9, PT_REGS_R9(sp) + stl $8, PT_REGS_R8(sp) + stl $10, PT_REGS_R10(sp) + stl $11, PT_REGS_R11(sp) + stl $12, PT_REGS_R12(sp) + stl $13, PT_REGS_R13(sp) + stl $14, PT_REGS_R14(sp) + stl $15, PT_REGS_R15(sp) + stl $26, PT_REGS_R26(sp) + + /* restore guest switch stack from guest kvm_regs struct */ + ldl $0, KVM_REGS_R0($17) + ldl $1, KVM_REGS_R1($17) + /* restore $2 later */ + ldl $3, KVM_REGS_R3($17) + ldl $4, KVM_REGS_R4($17) + ldl $5, KVM_REGS_R5($17) + ldl $6, KVM_REGS_R6($17) + ldl $7, KVM_REGS_R7($17) + ldl $8, KVM_REGS_R8($17) + ldl $9, KVM_REGS_R9($17) + ldl $10, KVM_REGS_R10($17) + ldl $11, KVM_REGS_R11($17) + ldl $12, KVM_REGS_R12($17) + ldl $13, KVM_REGS_R13($17) + ldl $14, KVM_REGS_R14($17) + ldl $15, KVM_REGS_R15($17) + ldl $19, KVM_REGS_R19($17) + ldl $20, KVM_REGS_R20($17) + ldl $21, KVM_REGS_R21($17) + ldl $22, KVM_REGS_R22($17) + ldl $23, KVM_REGS_R23($17) + ldl $24, KVM_REGS_R24($17) + ldl $25, KVM_REGS_R25($17) + ldl $26, KVM_REGS_R26($17) + ldl $27, KVM_REGS_R27($17) + ldl $28, KVM_REGS_R28($17) + + fldd $f0, KVM_REGS_FPCR($17) + wfpcr $f0 + fimovd $f0, $2 + and $2, 0x3, $2 + beq $2, $g_setfpec_0 + subl $2, 0x1, $2 + beq $2, $g_setfpec_1 + subl $2, 0x1, $2 + beq $2, $g_setfpec_2 + setfpec3 + br $g_setfpec_over +$g_setfpec_0: + setfpec0 + br $g_setfpec_over +$g_setfpec_1: + setfpec1 + br $g_setfpec_over +$g_setfpec_2: + setfpec2 +$g_setfpec_over: + ldl $2, KVM_REGS_R2($17) + vldd $f0, KVM_REGS_F0($17) + vldd $f1, KVM_REGS_F1($17) + vldd $f2, KVM_REGS_F2($17) + vldd $f3, KVM_REGS_F3($17) + vldd $f4, KVM_REGS_F4($17) + vldd $f5, KVM_REGS_F5($17) + vldd $f6, KVM_REGS_F6($17) + vldd $f7, KVM_REGS_F7($17) + vldd $f8, KVM_REGS_F8($17) + vldd $f9, KVM_REGS_F9($17) + vldd $f10, KVM_REGS_F10($17) + vldd $f11, KVM_REGS_F11($17) + vldd $f12, KVM_REGS_F12($17) + vldd $f13, KVM_REGS_F13($17) + vldd $f14, KVM_REGS_F14($17) + vldd $f15, KVM_REGS_F15($17) + vldd $f16, KVM_REGS_F16($17) + vldd $f17, KVM_REGS_F17($17) + vldd $f18, KVM_REGS_F18($17) + vldd $f19, KVM_REGS_F19($17) + vldd $f20, KVM_REGS_F20($17) + vldd $f21, KVM_REGS_F21($17) + vldd $f22, KVM_REGS_F22($17) + vldd $f23, KVM_REGS_F23($17) + vldd $f24, KVM_REGS_F24($17) + vldd $f25, KVM_REGS_F25($17) + vldd $f26, KVM_REGS_F26($17) + vldd $f27, KVM_REGS_F27($17) + vldd $f28, KVM_REGS_F28($17) + vldd $f29, KVM_REGS_F29($17) + vldd $f30, KVM_REGS_F30($17) + + ldi $17, KVM_REGS_PS($17) + + /* enter guest */ + /* r16 = guest vcpucb pointer */ + /* r17 = base of guest kvm_regs.ps, saved/restored by hmcode */ + + /* enter guest now */ + sys_call 0x31 + /* exit guest now */ + + ldi $17, -KVM_REGS_PS($17) /* r17: base of kvm_regs */ + + vstd $f0, KVM_REGS_F0($17) + vstd $f1, KVM_REGS_F1($17) + vstd $f2, KVM_REGS_F2($17) + vstd $f3, KVM_REGS_F3($17) + vstd $f4, KVM_REGS_F4($17) + vstd $f5, KVM_REGS_F5($17) + vstd $f6, KVM_REGS_F6($17) + vstd $f7, KVM_REGS_F7($17) + vstd $f8, KVM_REGS_F8($17) + vstd $f9, KVM_REGS_F9($17) + vstd $f10, KVM_REGS_F10($17) + vstd $f11, KVM_REGS_F11($17) + vstd $f12, KVM_REGS_F12($17) + vstd $f13, KVM_REGS_F13($17) + vstd $f14, KVM_REGS_F14($17) + vstd $f15, KVM_REGS_F15($17) + vstd $f16, KVM_REGS_F16($17) + vstd $f17, KVM_REGS_F17($17) + vstd $f18, KVM_REGS_F18($17) + vstd $f19, KVM_REGS_F19($17) + vstd $f20, KVM_REGS_F20($17) + vstd $f21, KVM_REGS_F21($17) + vstd $f22, KVM_REGS_F22($17) + vstd $f23, KVM_REGS_F23($17) + vstd $f24, KVM_REGS_F24($17) + vstd $f25, KVM_REGS_F25($17) + vstd $f26, KVM_REGS_F26($17) + vstd $f27, KVM_REGS_F27($17) + vstd $f28, KVM_REGS_F28($17) + vstd $f29, KVM_REGS_F29($17) + vstd $f30, KVM_REGS_F30($17) + + rfpcr $f0 + fstd $f0, KVM_REGS_FPCR($17) + + /* don't save r0 Hmcode have saved r0 for us */ + stl $1, KVM_REGS_R1($17) + stl $2, KVM_REGS_R2($17) + stl $3, KVM_REGS_R3($17) + stl $4, KVM_REGS_R4($17) + stl $5, KVM_REGS_R5($17) + stl $6, KVM_REGS_R6($17) + stl $7, KVM_REGS_R7($17) + stl $8, KVM_REGS_R8($17) + stl $9, KVM_REGS_R9($17) + stl $10, KVM_REGS_R10($17) + stl $11, KVM_REGS_R11($17) + stl $12, KVM_REGS_R12($17) + stl $13, KVM_REGS_R13($17) + stl $14, KVM_REGS_R14($17) + stl $15, KVM_REGS_R15($17) + stl $19, KVM_REGS_R19($17) + stl $20, KVM_REGS_R20($17) + stl $21, KVM_REGS_R21($17) + stl $22, KVM_REGS_R22($17) + stl $23, KVM_REGS_R23($17) + stl $24, KVM_REGS_R24($17) + stl $25, KVM_REGS_R25($17) + stl $26, KVM_REGS_R26($17) + stl $27, KVM_REGS_R27($17) + stl $28, KVM_REGS_R28($17) + + /* restore host regs from host sp */ + ldl $8, PT_REGS_R8(sp) + ldl $9, PT_REGS_R9(sp) + ldl $10, PT_REGS_R10(sp) + ldl $11, PT_REGS_R11(sp) + ldl $12, PT_REGS_R12(sp) + ldl $13, PT_REGS_R13(sp) + ldl $14, PT_REGS_R14(sp) + ldl $15, PT_REGS_R15(sp) + ldl $26, PT_REGS_R26(sp) + ldi sp, PT_REGS_SIZE(sp) + + /* restore host fpregs */ + fldd $f0, TASK_THREAD_FPCR($8) + wfpcr $f0 + fimovd $f0, $2 + and $2, 0x3, $2 + beq $2, $setfpec_0 + subl $2, 0x1, $2 + beq $2, $setfpec_1 + subl $2, 0x1, $2 + beq $2, $setfpec_2 + setfpec3 + br $setfpec_over +$setfpec_0: + setfpec0 + br $setfpec_over +$setfpec_1: + setfpec1 + br $setfpec_over +$setfpec_2: + setfpec2 +$setfpec_over: + vldd $f2, TASK_THREAD_F2($8) + vldd $f3, TASK_THREAD_F3($8) + vldd $f4, TASK_THREAD_F4($8) + vldd $f5, TASK_THREAD_F5($8) + vldd $f6, TASK_THREAD_F6($8) + vldd $f7, TASK_THREAD_F7($8) + vldd $f8, TASK_THREAD_F8($8) + vldd $f9, TASK_THREAD_F9($8) + + /* if $0 > 0, handle hcall */ + bgt $0, $ret_to + + stl $26, VCPU_RET_RA(sp) + stl $0, VCPU_RET_R0(sp) + + /* Hmcode will setup in */ + /* restore $16 $17 $18, do interrupt trick */ + ldi sp, -(HOST_INT_SIZE + PT_REGS_SIZE)(sp) + ldl $16, HOST_INT_R16(sp) + ldl $17, HOST_INT_R17(sp) + ldl $18, HOST_INT_R18(sp) + ldi sp, (HOST_INT_SIZE + PT_REGS_SIZE)(sp) + + ldi $19, -PT_REGS_SIZE(sp) + call $26, do_entInt + ldl $26, VCPU_RET_RA(sp) + ldl $0, VCPU_RET_R0(sp) +$ret_to: + /* ret($0) indicate hcall number */ + ldi sp, VCPU_RET_SIZE(sp) /* pop stack */ + ret diff --git a/arch/sw_64/kvm/handle_exit.c b/arch/sw_64/kvm/handle_exit.c new file mode 100644 index 000000000000..69b97860db88 --- /dev/null +++ b/arch/sw_64/kvm/handle_exit.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 - os kernal + * Author: fire3 yangzh + * linhn + */ +#include +#include +#include +#include +#include +#include +#include + +int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, + int exception_index, struct hcall_args *hargs) +{ + gfn_t gfn __maybe_unused; + + switch (exception_index) { + case SW64_KVM_EXIT_IO: + vcpu->stat.io_exits++; + return io_mem_abort(vcpu, run, hargs); + case SW64_KVM_MIGRATION_SET_DIRTY_HM: + case SW64_KVM_MIGRATION_SET_DIRTY: + vcpu->stat.migration_set_dirty++; + gfn = hargs->arg2 >> 24; + mutex_lock(&vcpu->kvm->slots_lock); + kvm_vcpu_mark_page_dirty(vcpu, gfn); + mutex_unlock(&vcpu->kvm->slots_lock); + return 1; + case SW64_KVM_EXIT_HALT: + vcpu->stat.halt_exits++; + vcpu->arch.halted = 1; + kvm_vcpu_block(vcpu); + return 1; + case SW64_KVM_EXIT_SHUTDOWN: + vcpu->stat.shutdown_exits++; + vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; + vcpu->run->system_event.type = KVM_SYSTEM_EVENT_SHUTDOWN; + return 0; + case SW64_KVM_EXIT_RESTART: + vcpu->stat.restart_exits++; + vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; + vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET; + return 0; + case SW64_KVM_EXIT_STOP: + vcpu->stat.stop_exits++; + vcpu->arch.halted = 1; + memset(&vcpu->arch.irqs_pending, 0, sizeof(vcpu->arch.irqs_pending)); + kvm_vcpu_block(vcpu); + return 1; + case SW64_KVM_EXIT_TIMER: + vcpu->stat.timer_exits++; + set_timer(vcpu, hargs->arg0); + return 1; + case SW64_KVM_EXIT_IPI: + vcpu->stat.ipi_exits++; + vcpu_send_ipi(vcpu, hargs->arg0, hargs->arg1); + return 1; + case SW64_KVM_EXIT_DEBUG: + vcpu->stat.debug_exits++; + vcpu->run->exit_reason = KVM_EXIT_DEBUG; + vcpu->run->debug.arch.epc = vcpu->arch.regs.pc; + return 0; +#ifdef CONFIG_KVM_MEMHOTPLUG + case SW64_KVM_EXIT_MEMHOTPLUG: + vcpu->stat.memhotplug_exits++; + vcpu_mem_hotplug(vcpu, hargs->arg0); + return 1; +#endif +#ifdef CONFIG_SUBARCH_C4 + case SW64_KVM_EXIT_APT_FAULT: + return kvm_handle_guest_abort(vcpu, run); +#endif + case SW64_KVM_EXIT_FATAL_ERROR: + vcpu->stat.fatal_error_exits++; + pr_err("Guest fatal error: Reason=[%lx], EXC_PC=[%lx], DVA=[%lx]", hargs->arg0, hargs->arg1, hargs->arg2); + vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; + vcpu->run->hw.hardware_exit_reason = hargs->arg0; + return 0; + } + + return 1; +} diff --git a/arch/sw_64/kvm/irq.h b/arch/sw_64/kvm/irq.h new file mode 100644 index 000000000000..9268ab6af492 --- /dev/null +++ b/arch/sw_64/kvm/irq.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * irq.h: in kernel interrupt controller related definitions + */ + +#ifndef _SW64_KVM_IRQ_H +#define _SW64_KVM_IRQ_H +static inline int irqchip_in_kernel(struct kvm *kvm) +{ + return 1; +} +#endif /* _SW64_KVM_IRQ_H */ diff --git a/arch/sw_64/kvm/kvm_cma.c b/arch/sw_64/kvm/kvm_cma.c new file mode 100644 index 000000000000..de04eb5d20d7 --- /dev/null +++ b/arch/sw_64/kvm/kvm_cma.c @@ -0,0 +1,269 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Contiguous Memory Allocator for KVM + * + * This program is modified on the basis of CMA, to achieve cross-node + * memory reservation, as well as reserved memory information statistics. + */ + +#define pr_fmt(fmt) "kvm_cma: " fmt + +#include +#include +#include +#include +#include +#include + +#include "../../../mm/cma.h" +#include "../../../mm/internal.h" + +struct cma kvm_cma_areas[MAX_CMA_AREAS]; +unsigned int kvm_cma_area_count; + +static void __init init_kvm_cma_reserved_pageblock(struct page *page) +{ + unsigned int i = pageblock_nr_pages; + struct page *p = page; + + do { + __ClearPageReserved(p); + set_page_count(p, 0); + } while (++p, --i); + + set_pageblock_migratetype(page, MIGRATE_ISOLATE); + + if (pageblock_order >= MAX_ORDER) { + i = pageblock_nr_pages; + p = page; + do { + set_page_refcounted(p); + __free_pages(p, MAX_ORDER - 1); + p += MAX_ORDER_NR_PAGES; + } while (i -= MAX_ORDER_NR_PAGES); + } else { + set_page_refcounted(page); + __free_pages(page, pageblock_order); + } + + adjust_managed_page_count(page, pageblock_nr_pages); +} + +static int __init kvm_cma_activate_area(struct cma *cma) +{ + int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long); + unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; + unsigned int i = cma->count >> pageblock_order; + + cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); + + if (!cma->bitmap) { + cma->count = 0; + return -ENOMEM; + } + + WARN_ON_ONCE(!pfn_valid(pfn)); + + do { + unsigned int j; + + base_pfn = pfn; + + for (j = pageblock_nr_pages; j; --j, pfn++) + WARN_ON_ONCE(!pfn_valid(pfn)); + + init_kvm_cma_reserved_pageblock(pfn_to_page(base_pfn)); + } while (--i); + + spin_lock_init(&cma->lock); + + return 0; +} + +static int __init kvm_cma_init_reserved_areas(void) +{ + int i; + + for (i = 0; i < kvm_cma_area_count; i++) { + int ret = kvm_cma_activate_area(&kvm_cma_areas[i]); + + if (ret) + return ret; + } + + return 0; +} +core_initcall(kvm_cma_init_reserved_areas); + +/** + * kvm_cma_init_reserved_mem() - create custom contiguous area + * from reserved memory + * @base: Base address of the reserved area + * @size: Size of the reserved area (in bytes), + * @order_per_bit: Order of pages represented by one bit on bitmap. + * @name: The name of the area. If this parameter is NULL, the name of + * the area will be set to "cmaN", where N is a running counter of + * used areas. + * @res_cma: Pointer to store the created cma region. + * + * This function creates custom contiguous area from already reserved memory. + */ +int __init kvm_cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, + unsigned int order_per_bit, const char *name, + struct cma **res_cma) +{ + struct cma *cma; + phys_addr_t alignment; + + /* Sanity checks */ + if (kvm_cma_area_count == ARRAY_SIZE(kvm_cma_areas)) { + pr_err("Not enough slots for CMA reserved regions!\n"); + return -ENOSPC; + } + + if (!size || !memblock_is_region_reserved(base, size)) + return -EINVAL; + + /* ensure minimal alignment required by mm core */ + alignment = PAGE_SIZE << + max_t(unsigned long, MAX_ORDER - 1, pageblock_order); + + /* alignment should be aligned with order_per_bit */ + if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit)) + return -EINVAL; + + if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size) + return -EINVAL; + + /* + * Each reserved area must be initialised later, when more kernel + * subsystems (like slab allocator) are available. + */ + cma = &kvm_cma_areas[kvm_cma_area_count]; + + if (name) + snprintf(cma->name, CMA_MAX_NAME, name); + else + snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count); + + cma->base_pfn = PFN_DOWN(base); + cma->count = size >> PAGE_SHIFT; + cma->order_per_bit = order_per_bit; + *res_cma = cma; + kvm_cma_area_count++; + totalcma_pages += (size / PAGE_SIZE); + + return 0; +} + +/** + * kvm_cma_declare_contiguous() - reserve contiguous area for VM + * @base: Base address of the reserved area optional, + * @size: Size of the reserved area (in bytes), + * @limit: End address of the reserved memory (optional, 0 for any). + * @alignment: Alignment for the CMA area, should be power of 2 or zero + * @order_per_bit: Order of pages represented by one bit on bitmap. + * @name: The name of the area. See function cma_init_reserved_mem() + * @res_cma: Pointer to store the created cma region. + * + * This function reserves memory from early allocator. It should be + * called by arch specific code once the early allocator (memblock or bootmem) + * has been activated and all other subsystems have already allocated/reserved + * memory. This function allows to create custom reserved areas. + */ +int __init kvm_cma_declare_contiguous(phys_addr_t base, + phys_addr_t size, phys_addr_t limit, + phys_addr_t alignment, unsigned int order_per_bit, + const char *name, struct cma **res_cma) +{ + phys_addr_t memblock_end = memblock_end_of_DRAM(); + phys_addr_t highmem_start; + int ret = 0; + + /* + * We can't use __pa(high_memory) directly, since high_memory + * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly) + * complain. Find the boundary by adding one to the last valid + * address. + */ + highmem_start = __pa(high_memory - 1) + 1; + + if (!size) + return -EINVAL; + + if (alignment && !is_power_of_2(alignment)) + return -EINVAL; + + /* + * Sanitise input arguments. + * Pages both ends in CMA area could be merged into adjacent unmovable + * migratetype page by page allocator's buddy algorithm. In the case, + * you couldn't get a contiguous memory, which is not what we want. + */ + alignment = max(alignment, (phys_addr_t)PAGE_SIZE << + max_t(unsigned long, MAX_ORDER - 1, pageblock_order)); + if (base & (alignment - 1)) { + ret = -EINVAL; + pr_err("Region at %pa must be aligned to %pa bytes\n", + &base, &alignment); + goto err; + } + base = ALIGN(base, alignment); + size = ALIGN(size, alignment); + limit &= ~(alignment - 1); + + if (!base) { + pr_err("Base address of region must be needed!\n"); + goto err; + } + + /* size should be aligned with order_per_bit */ + if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) + return -EINVAL; + + /* + * The request region must not cross the low/high memory boundary. + */ + if (base < highmem_start && base + size > highmem_start) { + ret = -EINVAL; + pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", + &base, &highmem_start); + goto err; + } + + /* + * If the limit is unspecified or above the memblock end, its effective + * value will be the memblock end. Set it explicitly to simplify further + * checks. + */ + if (limit == 0 || limit > memblock_end) + limit = memblock_end; + + if (base + size > limit) { + ret = -EINVAL; + pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n", + &size, &base, &limit); + goto err; + } + + /* Reserve memory */ + if (memblock_is_region_reserved(base, size) || + memblock_reserve(base, size) < 0) { + ret = -EBUSY; + goto err; + } + ret = kvm_cma_init_reserved_mem(base, size, order_per_bit, + name, res_cma); + if (ret) + goto free_mem; + + pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M, + &base); + return 0; + +free_mem: + memblock_free((void *)base, size); +err: + pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); + return ret; +} diff --git a/arch/sw_64/kvm/kvm_core3.c b/arch/sw_64/kvm/kvm_core3.c new file mode 100644 index 000000000000..f7e9150d40e0 --- /dev/null +++ b/arch/sw_64/kvm/kvm_core3.c @@ -0,0 +1,419 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 - os kernal + * Author: fire3 yangzh + * linhn + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "trace.h" +#include "vmem.c" + +__read_mostly bool bind_vcpu_enabled; + +#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_NUMA) +static int __init bind_vcpu_init(void) +{ + if (!sw64_debugfs_dir) + return -ENODEV; + debugfs_create_bool("bind_vcpu", 0644, + sw64_debugfs_dir, &bind_vcpu_enabled); + return 0; +} + +static void bind_vcpu_exit(void) +{ + bind_vcpu_enabled = false; +} +#else +static int __init bind_vcpu_init(void) +{ + return 0; +} + +static void bind_vcpu_exit(void) { } + +#endif + +static unsigned long longtime_offset; + +#ifdef CONFIG_KVM_MEMHOTPLUG +static unsigned long get_vpcr(struct kvm_vcpu *vcpu, u64 vpn) +{ + unsigned long base; + + base = virt_to_phys(vcpu->kvm->arch.seg_pgd); + return base | ((vpn & VPN_MASK) << 44); +} +#else +static unsigned long get_vpcr(struct kvm_vcpu *vcpu, u64 vpn) +{ + unsigned long base, size; + + base = vcpu->kvm->arch.host_phys_addr; + size = vcpu->kvm->arch.size; + return (base >> 23) | ((size >> 23) << 16) | ((vpn & VPN_MASK) << 44); +} +#endif + +void vcpu_set_numa_affinity(struct kvm_vcpu *vcpu) +{ + if (vcpu->arch.vcb.vpcr == 0) { + vcpu->arch.vcb.vpcr = get_vpcr(vcpu, 0); +#ifndef CONFIG_KVM_MEMHOTPLUG + if (unlikely(bind_vcpu_enabled)) { + int nid; + unsigned long end; + + end = vcpu->kvm->arch.host_phys_addr + vcpu->kvm->arch.size; + nid = pfn_to_nid(PHYS_PFN(vcpu->kvm->arch.host_phys_addr)); + if (pfn_to_nid(PHYS_PFN(end)) == nid) + set_cpus_allowed_ptr(vcpu->arch.tsk, cpumask_of_node(nid)); + } +#endif + vcpu->arch.vcb.upcr = 0x7; + } +} + +void kvm_flush_tlb_all(void) +{ + tbia(); +} + +void kvm_sw64_update_vpn(struct kvm_vcpu *vcpu, unsigned long vpn) +{ + vcpu->arch.vcb.vpcr = ((vcpu->arch.vcb.vpcr) & (~(VPN_MASK << 44))) | (vpn << 44); + vcpu->arch.vcb.dtb_vpcr = ((vcpu->arch.vcb.dtb_vpcr) & (~(VPN_MASK << VPN_SHIFT))) | (vpn << VPN_SHIFT); +} + +int kvm_sw64_init_vm(struct kvm *kvm) +{ +#ifdef CONFIG_KVM_MEMHOTPLUG + unsigned long *seg_pgd; + + if (kvm->arch.seg_pgd != NULL) { + kvm_err("kvm_arch already initialized?\n"); + return -EINVAL; + } + + seg_pgd = alloc_pages_exact(PAGE_SIZE, GFP_KERNEL | __GFP_ZERO); + if (!seg_pgd) + return -ENOMEM; + + kvm->arch.seg_pgd = seg_pgd; + #endif + return 0; +} + +void kvm_sw64_destroy_vm(struct kvm *kvm) +{ + #ifdef CONFIG_KVM_MEMHOTPLUG + void *seg_pgd = NULL; + + if (kvm->arch.seg_pgd) { + seg_pgd = READ_ONCE(kvm->arch.seg_pgd); + kvm->arch.seg_pgd = NULL; + } + + if (seg_pgd) + free_pages_exact(seg_pgd, PAGE_SIZE); + #endif + kvm_destroy_vcpus(kvm); +} + +#ifdef CONFIG_KVM_MEMHOTPLUG +static void setup_segment_table(struct kvm *kvm, + struct kvm_memory_slot *memslot, unsigned long addr, size_t size) +{ + unsigned long *seg_pgd = kvm->arch.seg_pgd; + unsigned long num_of_entry; + unsigned long base_hpa = addr; + unsigned long i; + + num_of_entry = round_up(size, 1 << 30) >> 30; + + for (i = 0; i < num_of_entry; i++) { + *seg_pgd = base_hpa + (i << 30); + seg_pgd++; + } +} +#endif + +int kvm_arch_prepare_memory_region(struct kvm *kvm, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, + enum kvm_mr_change change) +{ + unsigned long addr; + struct file *vm_file; + struct vm_area_struct *vma; + struct vmem_info *info; + struct kvm_userspace_memory_region new_mem; + struct kvm_userspace_memory_region *mem = &new_mem; + unsigned long ret; + size_t size; + + mem->flags = new->flags; + mem->guest_phys_addr = ((new->base_gfn) << PAGE_SHIFT); + mem->memory_size = ((new->npages) << PAGE_SHIFT); + mem->userspace_addr = new->userspace_addr; + + if (change == KVM_MR_FLAGS_ONLY || change == KVM_MR_DELETE) + return 0; + + if (test_bit(IO_MARK_BIT, (unsigned long *)(&(mem->guest_phys_addr)))) + return 0; + + if (test_bit(IO_MARK_BIT + 1, (unsigned long *)(&(mem->guest_phys_addr)))) + return 0; + +#ifndef CONFIG_KVM_MEMHOTPLUG + if (mem->guest_phys_addr) { + pr_info("%s, No KVM MEMHOTPLUG support!\n", __func__); + return 0; + } +#endif + if (!sw64_kvm_pool) + return -ENOMEM; + + pr_info("%s: %#llx %#llx, user addr: %#llx\n", __func__, + mem->guest_phys_addr, mem->memory_size, mem->userspace_addr); + + vma = find_vma(current->mm, mem->userspace_addr); + if (!vma) + return -ENOMEM; + vm_file = vma->vm_file; + + if (!vm_file) { + info = kzalloc(sizeof(struct vmem_info), GFP_KERNEL); + + size = round_up(mem->memory_size, 8<<20); + addr = gen_pool_alloc(sw64_kvm_pool, size); + if (!addr) + return -ENOMEM; + vm_munmap(mem->userspace_addr, mem->memory_size); + ret = vm_mmap(vm_file, mem->userspace_addr, mem->memory_size, + PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_FIXED, 0); + if ((long)ret < 0) + return ret; + + vma = find_vma(current->mm, mem->userspace_addr); + if (!vma) + return -ENOMEM; + + info->start = addr; + info->size = size; + vma->vm_private_data = (void *) info; + + vma->vm_ops = &vmem_vm_ops; + vma->vm_ops->open(vma); + + ret = vmem_vm_insert_page(vma); + if ((int)ret < 0) + return ret; + } else { + info = vm_file->private_data; + addr = info->start; + } + + pr_info("guest phys addr = %#lx, size = %#lx\n", + addr, vma->vm_end - vma->vm_start); + + kvm->arch.host_phys_addr = (u64)addr; + kvm->arch.size = round_up(mem->memory_size, 8<<20); + + memset(__va(addr), 0, 0x2000000); + + return 0; +} + +/* + * kvm_mark_migration write the mark on every vcpucbs of the kvm, which tells + * the system to do migration while the mark is on, and flush all vcpu's tlbs + * at the beginning of the migration. + */ +void kvm_mark_migration(struct kvm *kvm, int mark) +{ + struct kvm_vcpu *vcpu; + unsigned long cpu; + + kvm_for_each_vcpu(cpu, vcpu, kvm) + vcpu->arch.vcb.migration_mark = mark << 2; + + kvm_flush_remote_tlbs(kvm); +} + +void kvm_arch_commit_memory_region(struct kvm *kvm, + struct kvm_memory_slot *old, + const struct kvm_memory_slot *new, + enum kvm_mr_change change) +{ + /* + * At this point memslot has been committed and there is an + * allocated dirty_bitmap[], dirty pages will be tracked while the + * memory slot is write protected. + */ + + + /* If it's the first time dirty logging, flush all vcpu tlbs. */ + if ((change == KVM_MR_FLAGS_ONLY) && (new->flags & KVM_MEM_LOG_DIRTY_PAGES)) + kvm_mark_migration(kvm, 1); +} + +int kvm_sw64_vcpu_reset(struct kvm_vcpu *vcpu) +{ + unsigned long addr = vcpu->kvm->arch.host_phys_addr; + + hrtimer_cancel(&vcpu->arch.hrt); + vcpu->arch.vcb.soft_cid = vcpu->vcpu_id; + vcpu->arch.vcb.vcpu_irq_disabled = 1; + vcpu->arch.pcpu_id = -1; /* force flush tlb for the first time */ + vcpu->arch.power_off = 0; + memset(&vcpu->arch.irqs_pending, 0, sizeof(vcpu->arch.irqs_pending)); + + if (vcpu->vcpu_id == 0) + memset(__va(addr), 0, 0x2000000); + + return 0; +} + +long kvm_sw64_get_vcb(struct file *filp, unsigned long arg) +{ + struct kvm_vcpu *vcpu = filp->private_data; + + if (vcpu->arch.vcb.migration_mark) { + unsigned long result = sw64_io_read(0, LONG_TIME) + + vcpu->arch.vcb.guest_longtime_offset; + vcpu->arch.vcb.guest_longtime = result; + vcpu->arch.vcb.guest_irqs_pending = vcpu->arch.irqs_pending[0]; + } + + if (copy_to_user((void __user *)arg, &(vcpu->arch.vcb), sizeof(struct vcpucb))) + return -EINVAL; + + return 0; +} + +long kvm_sw64_set_vcb(struct file *filp, unsigned long arg) +{ + unsigned long result; + struct kvm_vcpu *vcpu = filp->private_data; + struct vcpucb *kvm_vcb; + + kvm_vcb = memdup_user((void __user *)arg, sizeof(*kvm_vcb)); + memcpy(&(vcpu->arch.vcb), kvm_vcb, sizeof(struct vcpucb)); + + if (vcpu->arch.vcb.migration_mark) { + /* updated vpcr needed by destination vm */ + vcpu->arch.vcb.vpcr = get_vpcr(vcpu, 0); + /* synchronize the longtime of source and destination */ + if (vcpu->arch.vcb.soft_cid == 0) { + result = sw64_io_read(0, LONG_TIME); + vcpu->arch.vcb.guest_longtime_offset = vcpu->arch.vcb.guest_longtime - result; + longtime_offset = vcpu->arch.vcb.guest_longtime_offset; + } else + vcpu->arch.vcb.guest_longtime_offset = longtime_offset; + + set_timer(vcpu, 200000000); + vcpu->arch.vcb.migration_mark = 0; + } + + return 0; +} + +#ifdef CONFIG_KVM_MEMHOTPLUG +void vcpu_mem_hotplug(struct kvm_vcpu *vcpu, unsigned long start_addr) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_memory_slot *slot; + unsigned long start_pfn = start_addr >> PAGE_SHIFT; + + kvm_for_each_memslot(slot, kvm_memslots(kvm)) { + if (start_pfn == slot->base_gfn) { + unsigned long *seg_pgd; + unsigned long num_of_entry = slot->npages >> 17; + unsigned long base_hpa = slot->arch.host_phys_addr; + unsigned long i; + + seg_pgd = kvm->arch.seg_pgd + (start_pfn >> 17); + for (i = 0; i < num_of_entry; i++) { + *seg_pgd = base_hpa + (i << 30); + seg_pgd++; + } + } + } +} +#endif + +void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) +{ +} + +void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, + struct kvm_memory_slot *slot, gfn_t gfn_offset, + unsigned long mask) +{ +} + +void kvm_arch_flush_shadow_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot) +{ +} + +void kvm_arch_flush_shadow_all(struct kvm *kvm) +{ +} + +void update_aptp(unsigned long pgd) +{ +} + +static int __init kvm_core3_init(void) +{ + int i, ret; + + bind_vcpu_init(); + + ret = vmem_init(); + if (unlikely(ret)) + goto out; + + for (i = 0; i < NR_CPUS; i++) + last_vpn(i) = VPN_FIRST_VERSION; + + ret = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE); + + if (likely(!ret)) + return 0; + + vmem_exit(); +out: + bind_vcpu_exit(); + return ret; +} + +static void __exit kvm_core3_exit(void) +{ + kvm_exit(); + vmem_exit(); + bind_vcpu_exit(); +} + +module_init(kvm_core3_init); +module_exit(kvm_core3_exit); diff --git a/arch/sw_64/kvm/kvm_core4.c b/arch/sw_64/kvm/kvm_core4.c new file mode 100644 index 000000000000..08d28a365a3b --- /dev/null +++ b/arch/sw_64/kvm/kvm_core4.c @@ -0,0 +1,132 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 - os kernal + * Author: fire3 yangzh + * linhn + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include "trace.h" + +static unsigned long shtclock_offset; + +void update_aptp(unsigned long pgd) +{ + imemb(); + write_csr_imb(pgd, CSR_APTP); +} + +void kvm_sw64_update_vpn(struct kvm_vcpu *vcpu, unsigned long vpn) +{ + vcpu->arch.vcb.vpcr = vpn << 44; + vcpu->arch.vcb.dtb_vpcr = vpn; +} + +void kvm_flush_tlb_all(void) +{ + tbivpn(-1, 0, 0); +} + +int kvm_sw64_init_vm(struct kvm *kvm) +{ + return kvm_alloc_addtional_stage_pgd(kvm); +} + +void kvm_sw64_destroy_vm(struct kvm *kvm) +{ + kvm_destroy_vcpus(kvm); +} + +int kvm_sw64_vcpu_reset(struct kvm_vcpu *vcpu) +{ + if (vcpu->arch.has_run_once) + apt_unmap_vm(vcpu->kvm); + + hrtimer_cancel(&vcpu->arch.hrt); + vcpu->arch.vcb.soft_cid = vcpu->vcpu_id; + vcpu->arch.vcb.vcpu_irq_disabled = 1; + vcpu->arch.pcpu_id = -1; /* force flush tlb for the first time */ + vcpu->arch.power_off = 0; + memset(&vcpu->arch.irqs_pending, 0, sizeof(vcpu->arch.irqs_pending)); + + return 0; +} + +long kvm_sw64_get_vcb(struct file *filp, unsigned long arg) +{ + struct kvm_vcpu *vcpu = filp->private_data; + + if (vcpu->arch.migration_mark) + vcpu->arch.shtclock = read_csr(CSR_SHTCLOCK) + + vcpu->arch.vcb.shtclock_offset; + if (copy_to_user((void __user *)arg, &(vcpu->arch.vcb), sizeof(struct vcpucb))) + return -EINVAL; + + return 0; +} + +long kvm_sw64_set_vcb(struct file *filp, unsigned long arg) +{ + struct kvm_vcpu *vcpu = filp->private_data; + struct vcpucb *kvm_vcb; + + kvm_vcb = memdup_user((void __user *)arg, sizeof(*kvm_vcb)); + memcpy(&(vcpu->arch.vcb), kvm_vcb, sizeof(struct vcpucb)); + + if (vcpu->arch.migration_mark) { + /* synchronize the longtime of source and destination */ + if (vcpu->arch.vcb.soft_cid == 0) + shtclock_offset = vcpu->arch.shtclock - read_csr(CSR_SHTCLOCK); + vcpu->arch.vcb.shtclock_offset = shtclock_offset; + set_timer(vcpu, 200000000); + vcpu->arch.migration_mark = 0; + } + return 0; +} + +int kvm_arch_prepare_memory_region(struct kvm *kvm, + struct kvm_memory_slot *memslot, + const struct kvm_userspace_memory_region *mem, + enum kvm_mr_change change) +{ + return 0; +} + +void vcpu_set_numa_affinity(struct kvm_vcpu *vcpu) +{ +} + +static int __init kvm_core4_init(void) +{ + int i, ret; + + for (i = 0; i < NR_CPUS; i++) + last_vpn(i) = VPN_FIRST_VERSION; + + ret = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE); + + if (ret) + return ret; + + return 0; +} + +static void __exit kvm_core4_exit(void) +{ + kvm_exit(); +} + +module_init(kvm_core4_init); +module_exit(kvm_core4_exit); diff --git a/arch/sw_64/kvm/kvm_timer.c b/arch/sw_64/kvm/kvm_timer.c new file mode 100644 index 000000000000..895be63cd8d1 --- /dev/null +++ b/arch/sw_64/kvm/kvm_timer.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 - os kernal + * Author: fire3 yangzh + */ +#include +#include +#include +#include + +/* + * The Guest Clock. + * + * There are two sources of virtual interrupts. We saw one in lguest_user.c: + * the Launcher sending interrupts for virtual devices. The other is the Guest + * timer interrupt. + * + * The Guest uses the LHCALL_SET_CLOCKEVENT hypercall to tell us how long to + * the next timer interrupt (in ticks). We use the high-resolution timer + * infrastructure to set a callback at that time. + * + * 0 means "turn off the clock". + */ + +void set_timer(struct kvm_vcpu *vcpu, unsigned long delta) +{ + ktime_t expires; + + if (unlikely(delta == 0)) { + /* Clock event device is shutting down. */ + hrtimer_cancel(&vcpu->arch.hrt); + return; + } + + /* Convert clock event device ticks to nanoseconds */ + delta = delta * NSEC_PER_SEC; + do_div(delta, vcpu->arch.vtimer_freq); + + /* + * We use wallclock time here, so the Guest might not be running for + * all the time between now and the timer interrupt it asked for. This + * is almost always the right thing to do. + */ + + expires = ktime_add_ns(ktime_get_real(), delta); + vcpu->arch.timer_next_event = expires; + hrtimer_start(&vcpu->arch.hrt, expires, HRTIMER_MODE_ABS); +} + +/* And this is the routine when we want to set an interrupt for the Guest. */ +void set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq) +{ + /* + * Next time the Guest runs, the core code will see if it can deliver + * this interrupt. + */ + set_bit(irq, (vcpu->arch.irqs_pending)); + + /* + * Make sure it sees it; it might be asleep (eg. halted), or running + * the Guest right now, in which case kick_process() will knock it out. + */ + kvm_vcpu_kick(vcpu); +} + +enum hrtimer_restart clockdev_fn(struct hrtimer *timer) +{ + struct kvm_vcpu *vcpu; + ktime_t now, delta; + + vcpu = container_of(timer, struct kvm_vcpu, arch.hrt); + + now = ktime_get_real(); + + if (now < vcpu->arch.timer_next_event) { + delta = vcpu->arch.timer_next_event - now; + hrtimer_forward_now(timer, delta); + return HRTIMER_RESTART; + } + + set_interrupt(vcpu, SW64_KVM_IRQ_TIMER); + return HRTIMER_NORESTART; +} diff --git a/arch/sw_64/kvm/mmio.c b/arch/sw_64/kvm/mmio.c new file mode 100644 index 000000000000..21ad89722f9a --- /dev/null +++ b/arch/sw_64/kvm/mmio.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 - os kernal + * Author: fire3 yangzh + * linhn + */ +#include +#include +#include +#include + +static unsigned long mmio_read_buf(char *buf, unsigned int len) +{ + unsigned long data = 0; + union { + u16 hword; + u32 word; + u64 dword; + } tmp; + + switch (len) { + case 1: + data = buf[0]; + break; + case 2: + memcpy(&tmp.hword, buf, len); + data = tmp.hword; + break; + case 4: + memcpy(&tmp.word, buf, len); + data = tmp.word; + break; + case 8: + memcpy(&tmp.dword, buf, len); + data = tmp.dword; + break; + } + + return data; +} + +int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + unsigned long data; + unsigned int len; + + if (!run->mmio.is_write) { + len = run->mmio.len; + if (len > sizeof(unsigned long)) + return -EINVAL; + + data = mmio_read_buf(run->mmio.data, len); + vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data); + } + + vcpu->stat.mmio_exits++; + vcpu->arch.regs.pc += 4; + + return 0; +} + +int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, + struct hcall_args *hargs) +{ + int ret; + +#ifdef CONFIG_SUBARCH_C3B + run->mmio.phys_addr = hargs->arg1 & 0xfffffffffffffUL; + sw64_decode(vcpu, hargs->arg2, run); +#elif defined(CONFIG_SUBARCH_C4) + run->mmio.phys_addr = read_csr(CSR_DVA) & 0xfffffffffffffUL; + sw64_decode(vcpu, 0, run); +#endif + if (run->mmio.is_write) + ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, + run->mmio.len, run->mmio.data); + else + ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, + run->mmio.len, run->mmio.data); + + if (!ret) { + /* We handled the access successfully in the kernel. */ + kvm_handle_mmio_return(vcpu, run); + return 1; + } + + run->exit_reason = KVM_EXIT_MMIO; + return 0; +} diff --git a/arch/sw_64/kvm/mmu.c b/arch/sw_64/kvm/mmu.c new file mode 100644 index 000000000000..b0b492a4fbff --- /dev/null +++ b/arch/sw_64/kvm/mmu.c @@ -0,0 +1,1561 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 - os kernal + * Author: lff + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#define KVM_APT_FLAG_LOGGING_ACTIVE (1UL << 1) + +static bool memslot_is_logging(struct kvm_memory_slot *memslot) +{ + return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY); +} + +/* + * Return values of kvm_handle_mmio_page_fault and mmu.page_fault: + * RET_AF_RETRY: let CPU fault again on the address. + * RET_AF_EMULATE: mmio page fault, emulate the instruction directly. + * + * For kvm_handle_mmio_page_fault only: + * RET_AF_INVALID: the spte is invalid, let the real page fault path update it. + */ +enum { + RET_AF_RETRY = 0, + RET_AF_EMULATE = 1, + RET_AF_INVALID = 2, +}; + +/** + * apt_dissolve_pmd() - clear and flush huge PMD entry + * @kvm: pointer to kvm structure. + * @addr: IPA + * @pmd: pmd pointer for IPA + * + * Function clears a PMD entry, flushes TLBs. + */ +static void apt_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd) +{ + int i; + + if (!pmd_trans_huge(*pmd)) + return; + + if (pmd_trans_cont(*pmd)) { + for (i = 0; i < CONT_PMDS; i++, pmd++) + pmd_clear(pmd); + } else + pmd_clear(pmd); + + kvm_flush_remote_tlbs(kvm); + put_page(virt_to_page(pmd)); +} + +/** + * apt_dissolve_pud() - clear and flush huge PUD entry + * @kvm: pointer to kvm structure. + * @addr: IPA + * @pud: pud pointer for IPA + * + * Function clears a PUD entry, flushes TLBs. + */ +static void apt_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp) +{ + if (!pud_huge(*pudp)) + return; + + pud_clear(pudp); + kvm_flush_remote_tlbs(kvm); + put_page(virt_to_page(pudp)); +} + +static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, + int min, int max) +{ + void *page; + + BUG_ON(max > KVM_NR_MEM_OBJS); + if (cache->nobjs >= min) + return 0; + while (cache->nobjs < max) { + page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); + if (!page) + return -ENOMEM; + cache->objects[cache->nobjs++] = page; + } + return 0; +} + +static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) +{ + while (mc->nobjs) + free_page((unsigned long)mc->objects[--mc->nobjs]); +} + +void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) +{ + mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); +} + +static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) +{ + void *p; + + BUG_ON(!mc || !mc->nobjs); + p = mc->objects[--mc->nobjs]; + return p; +} + +static void unmap_apt_ptes(struct kvm *kvm, pmd_t *pmd, + phys_addr_t addr, phys_addr_t end) +{ + pte_t *pte, *start_pte; + struct page *ptr_page; + + start_pte = pte = pte_offset_kernel(pmd, addr); + do { + if (!pte_none(*pte)) { + /* Do we need WRITE_ONCE(pte, 0)? */ + set_pte(pte, __pte(0)); + put_page(virt_to_page(pte)); + } + } while (pte++, addr += PAGE_SIZE, addr != end); + + ptr_page = virt_to_page(start_pte); + if (page_count(ptr_page) == 1) { + pte_t *pte_table = pte_offset_kernel(pmd, 0); + + pmd_clear(pmd); + free_page((unsigned long)pte_table); + put_page(virt_to_page(pmd)); + } +} + +static void unmap_apt_pmds(struct kvm *kvm, pud_t *pud, + phys_addr_t addr, phys_addr_t end) +{ + phys_addr_t next; + pmd_t *pmd, *start_pmd; + struct page *ptr_page; + int i; + + start_pmd = pmd = pmd_offset(pud, addr); + do { + next = pmd_addr_end(addr, end); + if (!pmd_none(*pmd)) { + if (pmd_trans_huge(*pmd)) { + if (pmd_trans_cont(*pmd)) { + for (i = 0; i < CONT_PMDS; i++, pmd++) + pmd_clear(pmd); + } else + pmd_clear(pmd); + /* Do we need flush tlb???? edited by lff */ + kvm_flush_remote_tlbs(kvm); + put_page(virt_to_page(pmd)); + } else { + unmap_apt_ptes(kvm, pmd, addr, next); + } + } + } while (pmd++, addr = next, addr != end); + + ptr_page = virt_to_page(start_pmd); + if (page_count(ptr_page) == 1) { + pmd_t *pmd_table __maybe_unused = pmd_offset(pud, 0UL); + + pud_clear(pud); + free_page((unsigned long)pmd_table); + put_page(virt_to_page(pud)); + } +} + +static void unmap_apt_puds(struct kvm *kvm, p4d_t *p4d, + phys_addr_t addr, phys_addr_t end) +{ + phys_addr_t next; + pud_t *pud, *start_pud; + struct page *ptr_page; + + start_pud = pud = pud_offset(p4d, addr); + do { + next = pud_addr_end(addr, end); + if (!pud_none(*pud)) { + if (pud_huge(*pud)) { + pud_clear(pud); + /* Do we need flush tlb???? edited by lff */ + kvm_flush_remote_tlbs(kvm); + put_page(virt_to_page(pud)); + } else { + unmap_apt_pmds(kvm, pud, addr, next); + } + } + } while (pud++, addr = next, addr != end); + + ptr_page = virt_to_page(start_pud); + if (page_count(ptr_page) == 1) { + pud_t *pud_table __maybe_unused = pud_offset(p4d, 0UL); + + p4d_clear(p4d); + kvm_flush_remote_tlbs(kvm); + free_page((unsigned long)pud_table); + put_page(virt_to_page(p4d)); + } +} + +/** + * unmap_apt_range -- Clear addtional page table entries to unmap a range + * @kvm: The VM pointer + * @start: The intermediate physical base address of the range to unmap + * @size: The size of the area to unmap + * + * Clear a range of apt mappings, lowering the various ref-counts. Must + * be called while holding mmu_lock (unless for freeing the apt pgd before + * destroying the VM), otherwise another faulting VCPU may come in and mess + * with things behind our backs. + */ +static void unmap_apt_range(struct kvm *kvm, phys_addr_t start, u64 size) +{ + pgd_t *pgd; + p4d_t *p4d; + phys_addr_t addr = start, end = start + size; + phys_addr_t next; + + assert_spin_locked(&kvm->mmu_lock); + WARN_ON(size & ~PAGE_MASK); + + pgd = kvm->arch.pgd + pgd_index(addr); + p4d = p4d_offset(pgd, addr); + do { + /* + * Make sure the page table is still active, as another thread + * could have possibly freed the page table, while we released + * the lock. + */ + if (!READ_ONCE(kvm->arch.pgd)) + break; + next = p4d_addr_end(addr, end); + if (!p4d_none(*p4d)) + unmap_apt_puds(kvm, p4d, addr, next); + /* + * If the range is too large, release the kvm->mmu_lock + * to prevent starvation and lockup detector warnings. + */ + if (next != end) + cond_resched_lock(&kvm->mmu_lock); + } while (pgd++, addr = next, addr != end); +} + +static void apt_unmap_memslot(struct kvm *kvm, + struct kvm_memory_slot *memslot) +{ + hva_t hva = memslot->userspace_addr; + phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; + phys_addr_t size = PAGE_SIZE * memslot->npages; + hva_t reg_end = hva + size; + + /* + * A memory region could potentially cover multiple VMAs, and any holes + * between them, so iterate over all of them to find out if we should + * unmap any of them. + * + * +--------------------------------------------+ + * +---------------+----------------+ +----------------+ + * | : VMA 1 | VMA 2 | | VMA 3 : | + * +---------------+----------------+ +----------------+ + * | memory region | + * +--------------------------------------------+ + */ + do { + struct vm_area_struct *vma = find_vma(current->mm, hva); + hva_t vm_start, vm_end; + + if (!vma || vma->vm_start >= reg_end) + break; + + /* + * Take the intersection of this VMA with the memory region + */ + vm_start = max(hva, vma->vm_start); + vm_end = min(reg_end, vma->vm_end); + + if (!(vma->vm_flags & VM_PFNMAP)) { + gpa_t gpa = addr + (vm_start - memslot->userspace_addr); + + unmap_apt_range(kvm, gpa, vm_end - vm_start); + } + hva = vm_end; + } while (hva < reg_end); +} + +/** + * apt_unmap_vm - Unmap Additional Stage RAM mappings + * @kvm: The struct kvm pointer + * + * Go through the memregions and unmap any reguler RAM + * backing memory already mapped to the VM. + */ +void apt_unmap_vm(struct kvm *kvm) +{ + struct kvm_memslots *slots; + struct kvm_memory_slot *memslot; + int idx; + + idx = srcu_read_lock(&kvm->srcu); + down_read(¤t->mm->mmap_lock); + spin_lock(&kvm->mmu_lock); + + slots = kvm_memslots(kvm); + kvm_for_each_memslot(memslot, slots) + apt_unmap_memslot(kvm, memslot); + spin_unlock(&kvm->mmu_lock); + up_read(¤t->mm->mmap_lock); + srcu_read_unlock(&kvm->srcu, idx); +} + +static pud_t *apt_get_pud(pgd_t *pgd, struct kvm_mmu_memory_cache *cache, + phys_addr_t addr) +{ + p4d_t *p4d; + pud_t *pud; + + pgd += pgd_index(addr); + if (pgd_none(*pgd)) { + /* Not used on SW64 yet */ + VM_BUG_ON(pgd); + return NULL; + } + p4d = p4d_offset(pgd, addr); + if (p4d_none(*p4d)) { + if (!cache) + return NULL; + pud = mmu_memory_cache_alloc(cache); + p4d_populate(NULL, p4d, pud); + get_page(virt_to_page(p4d)); + } + return pud_offset(p4d, addr); +} + +static pmd_t *apt_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, + phys_addr_t addr, unsigned long sz) +{ + pud_t *pud; + pmd_t *pmd; + + pud = apt_get_pud(kvm->arch.pgd, cache, addr); + if (!pud || pud_huge(*pud)) + return NULL; + + if (pud_none(*pud)) { + if (!cache) + return NULL; + pmd = mmu_memory_cache_alloc(cache); + pud_populate(NULL, pud, pmd); + get_page(virt_to_page(pud)); + } + if (sz == CONT_PMD_SIZE) + addr &= CONT_PMD_MASK; + + return pmd_offset(pud, addr); +} + +static bool kvm_is_write_fault(unsigned long access_type) +{ + if (access_type == AF_WRITE_ACCESS_TYPE) + return true; + + return false; +} + +static bool kvm_is_exec_fault(unsigned long access_type) +{ + if (access_type == AF_EXEC_ACCESS_TYPE) + return true; + + return false; +} +/** + * apt_wp_ptes - write protect PMD range + * @pmd: pointer to pmd entry + * @addr: range start address + * @end: range end address + */ +static void apt_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end) +{ + pte_t *pte; + + pte = pte_offset_kernel(pmd, addr); + do { + if (!pte_none(*pte)) { + if (!kvm_aptpte_readonly(pte)) + kvm_set_aptpte_readonly(pte); + } + } while (pte++, addr += PAGE_SIZE, addr != end); +} + +/** + * apt_wp_pmds - write protect PUD range + * @pud: pointer to pud entry + * @addr: range start address + * @end: range end address + */ +static void apt_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end) +{ + pmd_t *pmd; + phys_addr_t next; + + pmd = pmd_offset(pud, addr); + + do { + next = pmd_addr_end(addr, end); + if (!pmd_none(*pmd)) { + if (pmd_trans_huge(*pmd)) { + if (!kvm_aptpmd_readonly(pmd)) + kvm_set_aptpmd_readonly(pmd); + } else { + apt_wp_ptes(pmd, addr, next); + } + } + } while (pmd++, addr = next, addr != end); +} + +/** + * apt_wp_puds - write protect PGD range + * @pgd: pointer to pgd entry + * @addr: range start address + * @end: range end address + * + * Process PUD entries, for a huge PUD we cause a panic. + */ +static void apt_wp_puds(p4d_t *p4d, phys_addr_t addr, phys_addr_t end) +{ + pud_t *pud; + phys_addr_t next; + + pud = pud_offset(p4d, addr); + do { + next = pud_addr_end(addr, end); + if (!pud_none(*pud)) { + if (pud_huge(*pud)) { + if (!kvm_aptpud_readonly(pud)) + kvm_set_aptpud_readonly(pud); + } else { + /* TODO:PUD not supported, revisit later if supported */ +// BUG_ON(pud_trans_huge(*pud)); + apt_wp_pmds(pud, addr, next); + } + } + } while (pud++, addr = next, addr != end); +} + +/** + * apt_wp_range() - write protect apt memory region range + * @kvm: The KVM pointer + * @addr: Start address of range + * @end: End address of range + */ +static void apt_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) +{ + pgd_t *pgd; + p4d_t *p4d; + phys_addr_t next; + + pgd = kvm->arch.pgd + pgd_index(addr); + p4d = p4d_offset(pgd, addr); + + do { + /* + * Release kvm_mmu_lock periodically if the memory region is + * large. Otherwise, we may see kernel panics with + * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR, + * CONFIG_LOCKDEP. Additionally, holding the lock too long + * will also starve other vCPUs. We have to also make sure + * that the page tables are not freed while we released + * the lock. + */ + cond_resched_lock(&kvm->mmu_lock); + if (!READ_ONCE(kvm->arch.pgd)) + break; + next = p4d_addr_end(addr, end); + if (p4d_present(*p4d)) + apt_wp_puds(p4d, addr, next); + } while (p4d++, addr = next, addr != end); +} + +/** + * kvm_mmu_wp_memory_region() - write protect apt entries for memory slot + * @kvm: The KVM pointer + * @slot: The memory slot to write protect + * + * Called to start logging dirty pages after memory region + * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns + * all present PMD and PTEs are write protected in the memory region. + * Afterwards read of dirty page log can be called. + * + * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired, + * serializing operations for VM memory regions. + */ +void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot) +{ + struct kvm_memslots *slots = kvm_memslots(kvm); + struct kvm_memory_slot *memslot = id_to_memslot(slots, slot); + phys_addr_t start = memslot->base_gfn << PAGE_SHIFT; + phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; + + spin_lock(&kvm->mmu_lock); + apt_wp_range(kvm, start, end); + spin_unlock(&kvm->mmu_lock); + kvm_flush_remote_tlbs(kvm); // 需要通知其他vcpu进行tlb刷新,利用request机制 +} + +void kvm_mark_migration(struct kvm *kvm, int mark) +{ + struct kvm_vcpu *vcpu; + unsigned long cpu; + + kvm_for_each_vcpu(cpu, vcpu, kvm) + vcpu->arch.migration_mark = mark; +} + +void kvm_arch_commit_memory_region(struct kvm *kvm, + struct kvm_memory_slot *old, + const struct kvm_memory_slot *new, + enum kvm_mr_change change) +{ + /* + * At this point memslot has been committed and there is an + * allocated dirty_bitmap[], dirty pages will be tracked while the + * memory slot is write protected. + */ + if (change == KVM_MR_FLAGS_ONLY && (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) && + new->flags & KVM_MEM_LOG_DIRTY_PAGES)) { + kvm_mark_migration(kvm, 1); + kvm_mmu_wp_memory_region(kvm, new->id); + } + /* If dirty logging has been stopped, do nothing for now. */ + if ((change != KVM_MR_DELETE) + && (old->flags & KVM_MEM_LOG_DIRTY_PAGES) + && (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES))) { + kvm_mark_migration(kvm, 0); + return; + } +} + +void kvm_arch_flush_shadow_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot) +{ + gpa_t gpa = slot->base_gfn << PAGE_SHIFT; + phys_addr_t size = slot->npages << PAGE_SHIFT; + + spin_lock(&kvm->mmu_lock); +// flush_apt_tlbs(kvm); + unmap_apt_range(kvm, gpa, size); + spin_unlock(&kvm->mmu_lock); +} + +/** + * kvm_alloc_addtional_stage_pgd - allocate level-1 table for addtional stage translation. + * @kvm: The KVM struct pointer for the VM. + * + * Allocates only the addtional stage HW PGD level table(s) (can support full + * 48-bit input addresses). Clears the allocated pages. + * + * Note we don't need locking here as this is only called when the VM is + * created, which can only be done once. + */ +int kvm_alloc_addtional_stage_pgd(struct kvm *kvm) +{ + pgd_t *pgd; + + if (kvm->arch.pgd != NULL) { + kvm_err("kvm_arch already initialized?\n"); + return -EINVAL; + } + + /* Allocate the HW PGD, making sure that each page gets its own refcount */ + pgd = alloc_pages_exact(PAGE_SIZE, GFP_KERNEL | __GFP_ZERO); + if (!pgd) + return -ENOMEM; + + kvm->arch.pgd = pgd; + return 0; +} + +/** + * kvm_free_apt_pgd - free all apt tables + * @kvm: The KVM struct pointer for the VM. + * + * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all + * underlying level-2 and level-3 tables before freeing the actual level-1 table + * and setting the struct pointer to NULL. + */ +void kvm_free_apt_pgd(struct kvm *kvm) +{ + void *pgd = NULL; + + spin_lock(&kvm->mmu_lock); + if (kvm->arch.pgd) { + unmap_apt_range(kvm, 0, KVM_PHYS_SIZE); + pgd = READ_ONCE(kvm->arch.pgd); + kvm->arch.pgd = NULL; + } + spin_unlock(&kvm->mmu_lock); + + /* Free the HW pgd, one page at a time */ + if (pgd) + free_pages_exact(pgd, PAGE_SIZE); +} + +void kvm_arch_flush_shadow_all(struct kvm *kvm) +{ + kvm_free_apt_pgd(kvm); +} + +static void kvm_send_hwpoison_signal(unsigned long address, + struct vm_area_struct *vma) +{ + kernel_siginfo_t info; + + clear_siginfo(&info); + info.si_signo = SIGBUS; + info.si_errno = 0; + info.si_code = BUS_MCEERR_AR; + info.si_addr = (void __user *)address; + + if (is_vm_hugetlb_page(vma)) + info.si_addr_lsb = huge_page_shift(hstate_vma(vma)); + else + info.si_addr_lsb = PAGE_SHIFT; + + send_sig_info(SIGBUS, &info, current); +} + +static bool fault_supports_apt_huge_mapping(struct kvm_memory_slot *memslot, + unsigned long hva, + unsigned long map_size) +{ + gpa_t gpa_start; + hva_t uaddr_start, uaddr_end; + size_t size; + + /* The memslot and the VMA are guaranteed to be aligned to PAGE_SIZE */ + if (map_size == PAGE_SIZE) + return true; + + size = memslot->npages * PAGE_SIZE; + + gpa_start = memslot->base_gfn << PAGE_SHIFT; + + uaddr_start = memslot->userspace_addr; + uaddr_end = uaddr_start + size; + + /* + * Pages belonging to memslots that don't have the same alignment + * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2 + * PMD/PUD entries, because we'll end up mapping the wrong pages. + * + * Consider a layout like the following: + * + * memslot->userspace_addr: + * +-----+--------------------+--------------------+---+ + * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz| + * +-----+--------------------+--------------------+---+ + * + * memslot->base_gfn << PAGE_SHIFT: + * +---+--------------------+--------------------+-----+ + * |abc|def Stage-2 block | Stage-2 block |tvxyz| + * +---+--------------------+--------------------+-----+ + * + * If we create those stage-2 blocks, we'll end up with this incorrect + * mapping: + * d -> f + * e -> g + * f -> h + */ + if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1))) + return false; + + /* + * Next, let's make sure we're not trying to map anything not covered + * by the memslot. This means we have to prohibit block size mappings + * for the beginning and end of a non-block aligned and non-block sized + * memory slot (illustrated by the head and tail parts of the + * userspace view above containing pages 'abcde' and 'xyz', + * respectively). + * + * Note that it doesn't matter if we do the check using the + * userspace_addr or the base_gfn, as both are equally aligned (per + * the check above) and equally sized. + */ + return (hva & ~(map_size - 1)) >= uaddr_start && + (hva & ~(map_size - 1)) + map_size <= uaddr_end; +} + +/* + * apt_get_leaf_entry - walk the stage2 VM page tables and return + * true if a valid and present leaf-entry is found. A pointer to the + * leaf-entry is returned in the appropriate level variable - pudpp, + * pmdpp, ptepp. + */ +static bool apt_get_leaf_entry(struct kvm *kvm, phys_addr_t addr, + pud_t **pudpp, pmd_t **pmdpp, pte_t **ptepp) +{ + pud_t *pudp; + pmd_t *pmdp; + pte_t *ptep; + + *pudpp = NULL; + *pmdpp = NULL; + *ptepp = NULL; + + pudp = apt_get_pud(kvm->arch.pgd, NULL, addr); + if (!pudp || pud_none(*pudp) || !pud_present(*pudp)) + return false; + + if (pud_huge(*pudp)) { + *pudpp = pudp; + return true; + } + + pmdp = pmd_offset(pudp, addr); + if (!pmdp || pmd_none(*pmdp) || !pmd_present(*pmdp)) + return false; + + if (pmd_trans_huge(*pmdp)) { + *pmdpp = pmdp; + return true; + } + + ptep = pte_offset_kernel(pmdp, addr); + if (!ptep || pte_none(*ptep) || !pte_present(*ptep)) + return false; + + *ptepp = ptep; + return true; +} + +static bool apt_is_exec(struct kvm *kvm, phys_addr_t addr) +{ + pud_t *pudp; + pmd_t *pmdp; + pte_t *ptep; + bool found; + + found = apt_get_leaf_entry(kvm, addr, &pudp, &pmdp, &ptep); + if (!found) + return false; + + if (pudp) + return kvm_pud_exec(pudp); + else if (pmdp) + return kvm_pmd_exec(pmdp); + else + return kvm_pte_exec(ptep); +} + +static int apt_set_pte_fast(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, + phys_addr_t addr, const pte_t *new_pte, + unsigned long flags) +{ + pud_t *pud; + pmd_t *pmd; + pte_t *pte, old_pte; + bool logging_active = flags & KVM_APT_FLAG_LOGGING_ACTIVE; + int inv_level = ((read_csr(CSR_AS_INFO)) >> AF_INV_LEVEL_SHIFT) & AF_INV_LEVEL_MASK; + unsigned long inv_hpa = read_csr(CSR_AS_INFO) & AF_ENTRY_ADDR_MASK; + + VM_BUG_ON(logging_active && !cache); + + if (inv_level == 1) { + pud = (pud_t *)(inv_hpa | PAGE_OFFSET); + goto find_pud; + } else if (inv_level == 2) { + pmd = (pmd_t *)(inv_hpa | PAGE_OFFSET); + goto find_pmd; + } else if (inv_level == 3) { + pte = (pte_t *)(inv_hpa | PAGE_OFFSET); + goto find_pte; + } + + /* Create addtional page table mapping - Levels 0 and 1 */ + pud = apt_get_pud(kvm->arch.pgd, cache, addr); + if (!pud) { + /* + * Ignore calls from kvm_set_spte_hva for unallocated + * address ranges. + */ + return 0; + } + + /* + * While dirty page logging - dissolve huge PUD, then continue + * on to allocate page. + */ + if (logging_active) + apt_dissolve_pud(kvm, addr, pud); + +find_pud: + if (pud_none(*pud)) { + if (!cache) + return 0; /* ignore calls from kvm_set_spte_hva */ + pmd = mmu_memory_cache_alloc(cache); + pud_populate(NULL, pud, pmd); + get_page(virt_to_page(pud)); + } + + pmd = pmd_offset(pud, addr); + if (!pmd) { + /* + * Ignore calls from kvm_set_spte_hva for unallocated + * address ranges. + */ + return 0; + } + + /* + * While dirty page logging - dissolve huge PMD, then continue on to + * allocate page. + */ + if (logging_active) + apt_dissolve_pmd(kvm, addr, pmd); + +find_pmd: + /* Create stage-2 page mappings - Level 2 */ + if (pmd_none(*pmd)) { + if (!cache) + return 0; /* ignore calls from kvm_set_spte_hva */ + pte = mmu_memory_cache_alloc(cache); + pmd_populate_kernel(NULL, pmd, pte); + get_page(virt_to_page(pmd)); + } + + pte = pte_offset_kernel(pmd, addr); + +find_pte: + /* Create 2nd stage page table mapping - Level 3 */ + old_pte = *pte; + + /* new pte should be readonly? */ +// *new_pte = pte_wrprotect(*new_pte); + + if (pte_present(old_pte)) { + /* Skip page table update if there is no change */ + if (pte_val(old_pte) == pte_val(*new_pte)) + return 0; + + /* Do we need WRITE_ONCE(pte, 0)? */ + set_pte(pte, __pte(0)); + kvm_flush_remote_tlbs(kvm); + } else { + get_page(virt_to_page(pte)); + } + + /* Do we need WRITE_ONCE(pte, new_pte)? */ + set_pte(pte, *new_pte); + return 0; +} + +static int apt_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, + phys_addr_t addr, const pte_t *new_pte, + unsigned long flags) +{ + pud_t *pud; + pmd_t *pmd; + pte_t *pte, old_pte; + bool logging_active = flags & KVM_APT_FLAG_LOGGING_ACTIVE; + + VM_BUG_ON(logging_active && !cache); + + /* Create addtional page table mapping - Levels 0 and 1 */ + pud = apt_get_pud(kvm->arch.pgd, cache, addr); + if (!pud) { + /* + * Ignore calls from kvm_set_spte_hva for unallocated + * address ranges. + */ + return 0; + } + + /* + * While dirty page logging - dissolve huge PUD, then continue + * on to allocate page. + */ + if (logging_active) + apt_dissolve_pud(kvm, addr, pud); + + if (pud_none(*pud)) { + if (!cache) + return 0; /* ignore calls from kvm_set_spte_hva */ + pmd = mmu_memory_cache_alloc(cache); + pud_populate(NULL, pud, pmd); + get_page(virt_to_page(pud)); + } + + pmd = pmd_offset(pud, addr); + if (!pmd) { + /* + * Ignore calls from kvm_set_spte_hva for unallocated + * address ranges. + */ + return 0; + } + + /* + * While dirty page logging - dissolve huge PMD, then continue on to + * allocate page. + */ + if (logging_active) + apt_dissolve_pmd(kvm, addr, pmd); + + /* Create stage-2 page mappings - Level 2 */ + if (pmd_none(*pmd)) { + if (!cache) + return 0; /* ignore calls from kvm_set_spte_hva */ + pte = mmu_memory_cache_alloc(cache); + pmd_populate_kernel(NULL, pmd, pte); + get_page(virt_to_page(pmd)); + } + + pte = pte_offset_kernel(pmd, addr); + + /* Create 2nd stage page table mapping - Level 3 */ + old_pte = *pte; + + /* new pte should be readonly? */ +// *new_pte = pte_wrprotect(*new_pte); + + if (pte_present(old_pte)) { + /* Skip page table update if there is no change */ + if (pte_val(old_pte) == pte_val(*new_pte)) + return 0; + + /* Do we need WRITE_ONCE(pte, 0)? */ + set_pte(pte, __pte(0)); + kvm_flush_remote_tlbs(kvm); + } else { + get_page(virt_to_page(pte)); + } + + /* Do we need WRITE_ONCE(pte, new_pte)? */ + set_pte(pte, *new_pte); + return 0; +} + + + +static int apt_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache + *cache, phys_addr_t addr, const pmd_t *new_pmd, unsigned long sz) +{ + pmd_t *pmd, old_pmd, *ori_pmd; + int i; +retry: + pmd = apt_get_pmd(kvm, cache, addr, sz); + VM_BUG_ON(!pmd); + ori_pmd = pmd; + old_pmd = *pmd; + if (pmd_present(old_pmd)) { + /* + * If we already have PTE level mapping for this block, + * we must unmap it to avoid inconsistent TLB state and + * leaking the table page. We could end up in this situation + * if the memory slot was marked for dirty logging and was + * reverted, leaving PTE level mappings for the pages accessed + * during the period. So, unmap the PTE level mapping for this + * block and retry, as we could have released the upper level + * table in the process. + * + * Normal THP split/merge follows mmu_notifier callbacks and do + * get handled accordingly. + */ + if (!pmd_trans_huge(old_pmd)) { + unmap_apt_range(kvm, addr & PMD_MASK, PMD_SIZE); + goto retry; + } + /* + * Multiple vcpus faulting on the same PMD entry, can + * lead to them sequentially updating the PMD with the + * same value. Following the break-before-make + * (pmd_clear() followed by tlb_flush()) process can + * hinder forward progress due to refaults generated + * on missing translations. + * + * Skip updating the page table if the entry is + * unchanged. + */ + if (pmd_val(old_pmd) == pmd_val(*new_pmd)) + return 0; + + /* + * Mapping in huge pages should only happen through a + * fault. If a page is merged into a transparent huge + * page, the individual subpages of that huge page + * should be unmapped through MMU notifiers before we + * get here. + * + * Merging of CompoundPages is not supported; they + * should become splitting first, unmapped, merged, + * and mapped back in on-demand. + */ + VM_BUG_ON(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd)); + + if (sz == CONT_PMD_SIZE) { + for (i = 0; i < CONT_PMDS; i++, pmd++) + pmd_clear(pmd); + } else + pmd_clear(pmd); + kvm_flush_remote_tlbs(kvm); + } else { + get_page(virt_to_page(pmd)); + } + + /* Do we need WRITE_ONCE(pmd, new_pmd)? */ + if (sz == CONT_PMD_SIZE) { + for (i = 0; i < CONT_PMDS; i++, ori_pmd++) + set_pmd(ori_pmd, *new_pmd); + } else + set_pmd(pmd, *new_pmd); + return 0; +} + +static int apt_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, + phys_addr_t addr, const pud_t *new_pudp) +{ + pud_t *pudp, old_pud; + +retry: + pudp = apt_get_pud(kvm->arch.pgd, cache, addr); + VM_BUG_ON(!pudp); + + old_pud = *pudp; + + /* + * A large number of vcpus faulting on the same stage 2 entry, + * can lead to a refault due to the stage2_pud_clear()/tlb_flush(). + * Skip updating the page tables if there is no change. + */ + if (pud_val(old_pud) == pud_val(*new_pudp)) + return 0; + + if (pud_present(old_pud)) { + /* + * If we already have table level mapping for this block, unmap + * the range for this block and retry. + */ + if (!pud_huge(old_pud)) { + unmap_apt_range(kvm, addr & PUD_MASK, PUD_SIZE); + goto retry; + } + +// WARN_ON_ONCE(kvm_pud_pfn(old_pud) != kvm_pud_pfn(*new_pudp)); + pud_clear(pudp); + kvm_flush_remote_tlbs(kvm); + } else { + get_page(virt_to_page(pudp)); + } + + set_pud(pudp, *new_pudp); + return 0; +} + +static unsigned long +transparent_hugepage_adjust(struct kvm_memory_slot *memslot, + unsigned long hva, kvm_pfn_t *pfnp, + phys_addr_t *gpap) +{ + kvm_pfn_t pfn = *pfnp; + struct page *page = pfn_to_page(pfn); + + /* + * Make sure the adjustment is done only for THP pages. Also make + * sure that the HVA and IPA are sufficiently aligned and that the + * block map is contained within the memslot. + */ + if (!PageHuge(page) && PageTransCompoundMap(page) && + fault_supports_apt_huge_mapping(memslot, hva, PMD_SIZE)) { + /* + * The address we faulted on is backed by a transparent huge + * page. However, because we map the compound huge page and + * not the individual tail page, we need to transfer the + * refcount to the head page. We have to be careful that the + * THP doesn't start to split while we are adjusting the + * refcounts. + * + * We are sure this doesn't happen, because mmu_notifier_retry + * was successful and we are holding the mmu_lock, so if this + * THP is trying to split, it will be blocked in the mmu + * notifier before touching any of the pages, specifically + * before being able to call __split_huge_page_refcount(). + * + * We can therefore safely transfer the refcount from PG_tail + * to PG_head and switch the pfn from a tail page to the head + * page accordingly. + */ + *gpap &= PMD_MASK; + kvm_release_pfn_clean(pfn); + pfn &= ~(PTRS_PER_PMD - 1); + kvm_get_pfn(pfn); + *pfnp = pfn; + return PMD_SIZE; + } + + return PAGE_SIZE; +} + +static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_gpa, + struct kvm_memory_slot *memslot, unsigned long hva, + unsigned long fault_status) +{ + int ret; + bool write_fault, exec_fault, writable, force_pte = false; + unsigned long mmu_seq; + gfn_t gfn = fault_gpa >> PAGE_SHIFT; + struct kvm *kvm = vcpu->kvm; + struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; + struct vm_area_struct *vma; + kvm_pfn_t pfn; + pgprot_t mem_type = PAGE_READONLY; + bool logging_active = memslot_is_logging(memslot); + unsigned long vma_pagesize, flags = 0; + unsigned long as_info, access_type; + unsigned int vma_shift; + + as_info = read_csr(CSR_AS_INFO); + access_type = (as_info >> AF_ACCESS_TYPE_SHIFT) & AF_ACCESS_TYPE_MASK; + write_fault = kvm_is_write_fault(access_type); + exec_fault = kvm_is_exec_fault(access_type); + VM_BUG_ON(write_fault && exec_fault); + + if (fault_status == AF_STATUS_FOR) { + kvm_err("Unexpected APT read permission error\n"); + return -EFAULT; + } + + /* Let's check if we will get back a huge page backed by hugetlbfs */ + down_read(¤t->mm->mmap_lock); + vma = find_vma_intersection(current->mm, hva, hva + 1); + if (unlikely(!vma)) { + kvm_err("Failed to find VMA for hva 0x%lx\n", hva); + up_read(¤t->mm->mmap_lock); + return -EFAULT; + } + + if (is_vm_hugetlb_page(vma)) + vma_shift = huge_page_shift(hstate_vma(vma)); + else + vma_shift = PAGE_SHIFT; + + vma_pagesize = 1ULL << vma_shift; + if (logging_active || (vma->vm_flags & VM_PFNMAP) || + !fault_supports_apt_huge_mapping(memslot, hva, vma_pagesize)) { + force_pte = true; + vma_pagesize = PAGE_SIZE; + } + + if (vma_pagesize == PMD_SIZE || vma_pagesize == CONT_PMD_SIZE || vma_pagesize == PUD_SIZE) + gfn = (fault_gpa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; + up_read(¤t->mm->mmap_lock); + /* We need minimum second+third level pages */ + ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES, + KVM_NR_MEM_OBJS); + if (ret) + return ret; + + mmu_seq = vcpu->kvm->mmu_notifier_seq; + /* + * Ensure the read of mmu_notifier_seq happens before we call + * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk + * the page we just got a reference to gets unmapped before we have a + * chance to grab the mmu_lock, which ensure that if the page gets + * unmapped afterwards, the call to kvm_unmap_hva will take it away + * from us again properly. This smp_rmb() interacts with the smp_wmb() + * in kvm_mmu_notifier_invalidate_. + */ + smp_rmb(); + + pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable); + if (pfn == KVM_PFN_ERR_HWPOISON) { + kvm_send_hwpoison_signal(hva, vma); + return 0; + } + if (is_error_noslot_pfn(pfn)) + return -EFAULT; + + if (logging_active) { + /* + * Faults on pages in a memslot with logging enabled + * should not be mapped with huge pages (it introduces churn + * and performance degradation), so force a pte mapping. + */ + flags |= KVM_APT_FLAG_LOGGING_ACTIVE; + + /* + * Only actually map the page as writable if this was a write + * fault. + */ + if (!write_fault) + writable = false; + } + + spin_lock(&kvm->mmu_lock); + if (mmu_notifier_retry(kvm, mmu_seq)) + goto out_unlock; + + /* + * If we are not forced to use page mapping, check if we are + * backed by a THP and thus use block mapping if possible. + */ + if (vma_pagesize == PAGE_SIZE && !force_pte) { + vma_pagesize = transparent_hugepage_adjust(memslot, hva, + &pfn, &fault_gpa); + } + + if (vma_pagesize == PUD_SIZE) { + pud_t new_pud = pfn_pud(pfn, mem_type); + + new_pud = pud_mkhuge(new_pud); + + if (writable) { + new_pud = kvm_pud_mkwrite(new_pud); + kvm_set_pfn_dirty(pfn); + } + + if (exec_fault && fault_status == AF_STATUS_INV) { + new_pud = kvm_pud_mkexec(new_pud); + } else if (fault_status == AF_STATUS_FOE) { + /* Preserve execute if FOE was already cleared */ + if (apt_is_exec(kvm, fault_gpa)) + new_pud = kvm_pud_mkexec(new_pud); + } + + ret = apt_set_pud_huge(kvm, memcache, fault_gpa, &new_pud); + } else if (vma_pagesize == CONT_PMD_SIZE) { + pmd_t new_pmd = pfn_pmd(pfn, mem_type); + + new_pmd = pmd_mkhuge(new_pmd); + new_pmd = pmd_mkcont(new_pmd); + + if (writable) { + new_pmd = kvm_pmd_mkwrite(new_pmd); + kvm_set_pfn_dirty(pfn); + } + + if (exec_fault && fault_status == AF_STATUS_INV) { + new_pmd = kvm_pmd_mkexec(new_pmd); + } else if (fault_status == AF_STATUS_FOE) { + /* Preserve execute if FOE was already cleared */ + if (apt_is_exec(kvm, fault_gpa)) + new_pmd = kvm_pmd_mkexec(new_pmd); + } + + ret = apt_set_pmd_huge(kvm, memcache, fault_gpa, &new_pmd, vma_pagesize); + } else if (vma_pagesize == PMD_SIZE) { + pmd_t new_pmd = pfn_pmd(pfn, mem_type); + + new_pmd = pmd_mkhuge(new_pmd); + + if (writable) { + new_pmd = kvm_pmd_mkwrite(new_pmd); + kvm_set_pfn_dirty(pfn); + } + + if (exec_fault && fault_status == AF_STATUS_INV) { + new_pmd = kvm_pmd_mkexec(new_pmd); + } else if (fault_status == AF_STATUS_FOE) { + /* Preserve execute if FOE was already cleared */ + if (apt_is_exec(kvm, fault_gpa)) + new_pmd = kvm_pmd_mkexec(new_pmd); + } + + ret = apt_set_pmd_huge(kvm, memcache, fault_gpa, &new_pmd, vma_pagesize); + } else { + pte_t new_pte = pfn_pte(pfn, mem_type); + + if (writable) { + new_pte = kvm_pte_mkwrite(new_pte); + kvm_set_pfn_dirty(pfn); + mark_page_dirty(kvm, gfn); + } + + if (exec_fault && fault_status == AF_STATUS_INV) { + new_pte = kvm_pte_mkexec(new_pte); + } else if (fault_status == AF_STATUS_FOE) { + /* Preserve execute if FOE was already cleared */ + if (apt_is_exec(kvm, fault_gpa)) + new_pte = kvm_pte_mkexec(new_pte); + } + + ret = apt_set_pte_fast(kvm, memcache, fault_gpa, &new_pte, flags); + if (!ret) + goto out_unlock; + } + +out_unlock: + spin_unlock(&kvm->mmu_lock); + kvm_set_pfn_accessed(pfn); + kvm_release_pfn_clean(pfn); + return ret; +} + +/** + * kvm_handle_guest_abort - handles all 2nd stage aborts + * @vcpu: the VCPU pointer + * @run: the kvm_run structure + * + * Any abort that gets to the host is almost guaranteed to be caused by a + * missing second stage translation table entry, which can mean that either the + * guest simply needs more memory and we must allocate an appropriate page or it + * can mean that the guest tried to access I/O memory, which is emulated by user + * space. The distinction is based on the IPA causing the fault and whether this + * memory region has been registered as standard RAM by user space. + */ +#ifdef CONFIG_SUBARCH_C4 +int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + unsigned long as_info; /* the value of CSR: AS_INFO */ + unsigned int access_type, inv_level; + unsigned int fault_status; + unsigned long fault_entry_addr; + phys_addr_t fault_gpa; + struct kvm_memory_slot *memslot; + unsigned long hva; + bool write_fault, writable; + gfn_t gfn; + + int ret, idx; + + as_info = read_csr(CSR_AS_INFO); + access_type = (as_info >> AF_ACCESS_TYPE_SHIFT) & AF_ACCESS_TYPE_MASK; + inv_level = (as_info >> AF_INV_LEVEL_SHIFT) & AF_INV_LEVEL_MASK; + fault_status = (as_info >> AF_FAULT_STATUS_SHIFT) & AF_FAULT_STATUS_MASK; + fault_entry_addr = (as_info & AF_ENTRY_ADDR_MASK) >> 3; + + fault_gpa = read_csr(CSR_EXC_GPA); + idx = srcu_read_lock(&vcpu->kvm->srcu); + + gfn = fault_gpa >> PAGE_SHIFT; + memslot = gfn_to_memslot(vcpu->kvm, gfn); + hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); + + write_fault = kvm_is_write_fault(access_type); + + /* The memory slot for IO doesn't register in memory region + * with kvm, if hva == KVM_HVA_ERR_BAD, the gpa used for MMIO + * needs emulation. + */ + + if (hva == KVM_HVA_ERR_BAD) { + ret = io_mem_abort(vcpu, run, NULL); + goto out_unlock; + } + /* Userspace should not be able to register out-of-bounds IPAs */ + VM_BUG_ON(fault_gpa >= KVM_PHYS_SIZE); + + ret = user_mem_abort(vcpu, fault_gpa, memslot, hva, fault_status); + if (ret == 0) + ret = 1; +out_unlock: + srcu_read_unlock(&vcpu->kvm->srcu, idx); + return ret; +} +#endif +static int handle_hva_to_gpa(struct kvm *kvm, unsigned long start, unsigned long end, + int (*handler)(struct kvm *kvm, gpa_t gpa, u64 size, void *data), + void *data) +{ + struct kvm_memslots *slots; + struct kvm_memory_slot *memslot; + int ret = 0; + + slots = kvm_memslots(kvm); + + /* we only care about the pages that the guest sees */ + kvm_for_each_memslot(memslot, slots) { + unsigned long hva_start, hva_end; + gfn_t gpa; + + hva_start = max(start, memslot->userspace_addr); + hva_end = min(end, memslot->userspace_addr + + (memslot->npages << PAGE_SHIFT)); + if (hva_start >= hva_end) + continue; + + gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT; + ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data); + } + + return ret; +} + +static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) +{ + unmap_apt_range(kvm, gpa, size); + return 0; +} + +int kvm_unmap_hva_range(struct kvm *kvm, + unsigned long start, unsigned long end, bool blockable) +{ + if (!kvm->arch.pgd) + return 0; + + handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); + return 1; +} + +static int apt_ptep_test_and_clear_young(pte_t *pte) +{ + if (pte_young(*pte)) { + *pte = pte_mkold(*pte); + return 1; + } + return 0; +} + +static int apt_pmdp_test_and_clear_young(pmd_t *pmd) +{ + return apt_ptep_test_and_clear_young((pte_t *)pmd); +} + +static int apt_pudp_test_and_clear_young(pud_t *pud) +{ + return apt_ptep_test_and_clear_young((pte_t *)pud); +} + +static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) +{ + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); + if (!apt_get_leaf_entry(kvm, gpa, &pud, &pmd, &pte)) + return 0; + + if (pud) + return apt_pudp_test_and_clear_young(pud); + else if (pmd) + return apt_pmdp_test_and_clear_young(pmd); + else + return apt_ptep_test_and_clear_young(pte); +} + +static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) +{ + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); + if (!apt_get_leaf_entry(kvm, gpa, &pud, &pmd, &pte)) + return 0; + + if (pud) + return apt_pudp_test_and_clear_young(pud); + else if (pmd) + return apt_pmdp_test_and_clear_young(pmd); + else + return apt_ptep_test_and_clear_young(pte); +} + +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) +{ + if (!kvm->arch.pgd) + return 0; + + return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL); +} + +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) +{ + if (!kvm->arch.pgd) + return 0; + return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL); +} + +static int kvm_set_apte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) +{ + pte_t *pte = (pte_t *)data; + + WARN_ON(size != PAGE_SIZE); + + apt_set_pte(kvm, NULL, gpa, pte, 0); + return 0; +} + +int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) +{ + unsigned long end = hva + PAGE_SIZE; + pte_t apt_pte; + + if (!kvm->arch.pgd) + return 0; + + apt_pte = pte_wrprotect(pte); + handle_hva_to_gpa(kvm, hva, end, &kvm_set_apte_handler, &apt_pte); + return 0; +} + +/** + * kvm_mmu_write_protect_pt_masked() - write protect dirty pages + * @kvm: The KVM pointer + * @slot: The memory slot associated with mask + * @gfn_offset: The gfn offset in memory slot + * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory + * slot to be write protected + * + * Walks bits set in mask write protects the associated pte's. Caller must + * acquire kvm_mmu_lock. + */ +static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, + struct kvm_memory_slot *slot, + gfn_t gfn_offset, unsigned long mask) +{ + phys_addr_t base_gfn = slot->base_gfn + gfn_offset; + phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT; + phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT; + + apt_wp_range(kvm, start, end); +} + +/* + * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected + * dirty pages. + * + * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to + * enable dirty logging for them. + */ +void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, + struct kvm_memory_slot *slot, + gfn_t gfn_offset, unsigned long mask) +{ + kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); +} diff --git a/arch/sw_64/kvm/perf.c b/arch/sw_64/kvm/perf.c new file mode 100644 index 000000000000..730dd1feeccf --- /dev/null +++ b/arch/sw_64/kvm/perf.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Performance events support for KVM. + */ + +#include +#include + +#include + + +bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) +{ + return (vcpu->arch.regs.ps & 8) != 0; +} + +unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.regs.pc; +} + + + +static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu) +{ + return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu; +} diff --git a/arch/sw_64/kvm/sw64.c b/arch/sw_64/kvm/sw64.c new file mode 100644 index 000000000000..f6bfb2452938 --- /dev/null +++ b/arch/sw_64/kvm/sw64.c @@ -0,0 +1,592 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include "trace.h" +#include "irq.h" + +bool set_msi_flag; + + + +static unsigned long get_new_vpn_context(struct kvm_vcpu *vcpu, long cpu) +{ + unsigned long vpn = last_vpn(cpu); + unsigned long next = vpn + 1; + + if ((vpn & VPN_MASK) >= VPN_MASK) { + kvm_flush_tlb_all(); + next = (vpn & ~VPN_MASK) + VPN_FIRST_VERSION + 1; /* bypass 0 */ + } + last_vpn(cpu) = next; + return next; +} + +int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level) +{ + set_bit(number, (vcpu->arch.irqs_pending)); + kvm_vcpu_kick(vcpu); + return 0; +} + +int kvm_arch_check_processor_compat(void *opaque) +{ + return 0; +} + +int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq_source_id, + int level, bool line_status) +{ + unsigned int vcid; + unsigned int vcpu_idx; + struct kvm_vcpu *vcpu = NULL; + int irq = e->msi.data & 0xff; + + vcid = (e->msi.address_lo & VT_MSIX_ADDR_DEST_ID_MASK) >> VT_MSIX_ADDR_DEST_ID_SHIFT; + vcpu_idx = vcid & 0x1f; + vcpu = kvm_get_vcpu(kvm, vcpu_idx); + + if (!vcpu) + return -EINVAL; + + return vcpu_interrupt_line(vcpu, irq, true); +} + +void sw64_kvm_switch_vpn(struct kvm_vcpu *vcpu) +{ + unsigned long vpn; + unsigned long vpnc; + long cpu = smp_processor_id(); + + vpn = last_vpn(cpu); + vpnc = vcpu->arch.vpnc[cpu]; + + if ((vpnc ^ vpn) & ~VPN_MASK) { + /* vpnc and cpu vpn not in the same version, get new vpnc and vpn */ + vpnc = get_new_vpn_context(vcpu, cpu); + vcpu->arch.vpnc[cpu] = vpnc; + } + + vpn = vpnc & VPN_MASK; + + /* Always update vpn */ + /* Just setup vcb, hardware CSR will be changed later in HMcode */ + kvm_sw64_update_vpn(vcpu, vpn); + + /* + * If vcpu migrate to a new physical cpu, the new physical cpu may keep + * old tlb entries for this vcpu's vpn, upn in the old tlb entries and + * current vcpu's upn may not in the same version. + * For now, we don't know the vcpu's upn version and the current version. + * If we keep track of the vcpu's upn version, the TLB-flush could be less. + * To be safe and correct, flush all tlb entries of current vpn for now. + */ + + if (vcpu->arch.pcpu_id != cpu) { + tbivpn(0, 0, vpn); + vcpu->arch.pcpu_id = cpu; + vcpu->cpu = cpu; + } +} + +void check_vcpu_requests(struct kvm_vcpu *vcpu) +{ + unsigned long vpn; + long cpu = smp_processor_id(); + + if (kvm_request_pending(vcpu)) { + if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { + vpn = vcpu->arch.vpnc[cpu] & VPN_MASK; + tbivpn(0, 0, vpn); + } + } +} + + +int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) +{ + return ((!bitmap_empty(vcpu->arch.irqs_pending, SWVM_IRQS) || !vcpu->arch.halted) + && !vcpu->arch.power_off); +} + +int kvm_arch_hardware_enable(void) +{ + return 0; +} + +void kvm_arch_hardware_unsetup(void) +{ +} + +bool kvm_arch_has_vcpu_debugfs(void) +{ + return false; +} + +int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) +{ + return 0; +} + +int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; +} + +int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) +{ + int r = 0; + + switch (ext) { + case KVM_CAP_IRQCHIP: + case KVM_CAP_IOEVENTFD: + case KVM_CAP_SYNC_MMU: + r = 1; + break; + case KVM_CAP_NR_VCPUS: + case KVM_CAP_MAX_VCPUS: + r = KVM_MAX_VCPUS; + break; + default: + r = 0; + } + + return r; +} + +void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) +{ +} + +int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) +{ + return test_bit(SW64_KVM_IRQ_TIMER, vcpu->arch.irqs_pending); +} + +int kvm_arch_hardware_setup(void *opaque) +{ + return 0; +} + +int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) +{ + if (type) + return -EINVAL; + + return kvm_sw64_init_vm(kvm); +} + +void kvm_arch_destroy_vm(struct kvm *kvm) +{ + return kvm_sw64_destroy_vm(kvm); +} + +long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +{ + return -EINVAL; +} + +int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, + unsigned long npages) +{ + return 0; +} + +void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) +{ + kvm_mmu_free_memory_caches(vcpu); + hrtimer_cancel(&vcpu->arch.hrt); +} + +void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) +{ + kvm_arch_vcpu_free(vcpu); +} + +int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) +{ + /* Set up the timer for Guest */ + pr_info("vcpu: [%d], regs addr = %#lx, vcpucb = %#lx\n", vcpu->vcpu_id, + (unsigned long)&vcpu->arch.regs, (unsigned long)&vcpu->arch.vcb); + vcpu->arch.vtimer_freq = cpuid(GET_CPU_FREQ, 0) * 1000UL * 1000UL; + hrtimer_init(&vcpu->arch.hrt, CLOCK_REALTIME, HRTIMER_MODE_ABS); + vcpu->arch.hrt.function = clockdev_fn; + vcpu->arch.tsk = current; + + vcpu->arch.vcb.soft_cid = vcpu->vcpu_id; + vcpu->arch.vcb.vcpu_irq_disabled = 1; + vcpu->arch.pcpu_id = -1; /* force flush tlb for the first time */ + + return 0; +} + +int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) +{ + return 0; +} + +int kvm_set_routing_entry(struct kvm *kvm, + struct kvm_kernel_irq_routing_entry *e, + const struct kvm_irq_routing_entry *ue) +{ + int r = -EINVAL; + + switch (ue->type) { + case KVM_IRQ_ROUTING_MSI: + e->set = kvm_set_msi; + e->msi.address_lo = ue->u.msi.address_lo; + e->msi.address_hi = ue->u.msi.address_hi; + e->msi.data = ue->u.msi.data; + e->msi.flags = ue->flags; + e->msi.devid = ue->u.msi.devid; + set_msi_flag = true; + break; + default: + goto out; + } + r = 0; +out: + return r; +} + +int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, + struct kvm_translation *tr) +{ + return -EINVAL; /* not implemented yet */ +} + +int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) +{ + return 0; +} + + +void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +{ + vcpu->cpu = cpu; +} + +void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) +{ + /* + * The arch-generic KVM code expects the cpu field of a vcpu to be -1 + * if the vcpu is no longer assigned to a cpu. This is used for the + * optimized make_all_cpus_request path. + */ + vcpu->cpu = -1; +} + +int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state) +{ + return -ENOIOCTLCMD; +} + +int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state) +{ + return -ENOIOCTLCMD; +} + +int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + memcpy(&(vcpu->arch.regs), regs, sizeof(struct kvm_regs)); + return 0; +} + +int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + memcpy(regs, &(vcpu->arch.regs), sizeof(struct kvm_regs)); + return 0; +} + +int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, + struct kvm_guest_debug *dbg) +{ + return 0; +} + + +/* + * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on + * proper exit to userspace. + */ +int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + struct vcpucb *vcb = &(vcpu->arch.vcb); + struct hcall_args hargs; + int irq, ret; + bool more; + sigset_t sigsaved; + + /* Set guest vcb */ + /* vpn will update later when vcpu is running */ + vcpu_set_numa_affinity(vcpu); +#ifdef CONFIG_PERF_EVENTS + vcpu_load(vcpu); +#endif + if (vcpu->sigset_active) + sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); + + if (run->exit_reason == KVM_EXIT_MMIO) + kvm_handle_mmio_return(vcpu, run); + + run->exit_reason = KVM_EXIT_UNKNOWN; + ret = 1; + while (ret > 0) { + /* Check conditions before entering the guest */ + cond_resched(); + + preempt_disable(); + local_irq_disable(); + + if (signal_pending(current)) { + ret = -EINTR; + run->exit_reason = KVM_EXIT_INTR; + } + + if (ret <= 0) { + local_irq_enable(); + preempt_enable(); + continue; + } + + memset(&hargs, 0, sizeof(hargs)); + + clear_vcpu_irq(vcpu); + + if (vcpu->arch.restart == 1) { + /* handle reset vCPU */ + vcpu->arch.regs.pc = GUEST_RESET_PC; + vcpu->arch.restart = 0; + } + + irq = interrupt_pending(vcpu, &more); + if (irq < SWVM_IRQS) + try_deliver_interrupt(vcpu, irq, more); + + vcpu->arch.halted = 0; + + sw64_kvm_switch_vpn(vcpu); + check_vcpu_requests(vcpu); + guest_enter_irqoff(); + + /* update aptp before the guest runs */ + update_aptp((unsigned long)vcpu->kvm->arch.pgd); + + /* Enter the guest */ + trace_kvm_sw64_entry(vcpu->vcpu_id, vcpu->arch.regs.pc); + vcpu->mode = IN_GUEST_MODE; + + ret = __sw64_vcpu_run(__pa(vcb), &(vcpu->arch.regs), &hargs); + + /* Back from guest */ + vcpu->mode = OUTSIDE_GUEST_MODE; + + local_irq_enable(); + guest_exit_irqoff(); + + trace_kvm_sw64_exit(ret, vcpu->arch.regs.pc); + + preempt_enable(); + + /* ret = 0 indicate interrupt in guest mode, ret > 0 indicate hcall */ + ret = handle_exit(vcpu, run, ret, &hargs); + } + + if (vcpu->sigset_active) + sigprocmask(SIG_SETMASK, &sigsaved, NULL); + +#ifdef CONFIG_PERF_EVENTS + vcpu_put(vcpu); +#endif + + return ret; +} + +long kvm_arch_vcpu_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg) +{ + struct kvm_vcpu *vcpu = filp->private_data; + int r; + + switch (ioctl) { + case KVM_SW64_VCPU_INIT: + r = kvm_sw64_vcpu_reset(vcpu); + break; + case KVM_SW64_GET_VCB: + r = kvm_sw64_get_vcb(filp, arg); + break; + case KVM_SW64_SET_VCB: + r = kvm_sw64_set_vcb(filp, arg); + break; + default: + r = -EINVAL; + } + + return r; +} + +int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +{ + struct kvm *kvm __maybe_unused = filp->private_data; + long r; + + switch (ioctl) { + case KVM_CREATE_IRQCHIP: { + struct kvm_irq_routing_entry routing; + + r = -EINVAL; + memset(&routing, 0, sizeof(routing)); + r = kvm_set_irq_routing(kvm, &routing, 0, 0); + break; + } + default: + r = -ENOIOCTLCMD; + } + return r; +} + +int kvm_arch_init(void *opaque) +{ + return 0; +} + +void kvm_arch_exit(void) +{ +} + +int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs) +{ + return -ENOIOCTLCMD; +} + +int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs) +{ + return -ENOIOCTLCMD; +} + +void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) +{ +} + +int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +{ + return -ENOIOCTLCMD; +} + +int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +{ + return -ENOIOCTLCMD; +} + +vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) +{ + return VM_FAULT_SIGBUS; +} + +void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, + struct kvm_memory_slot *memslot) +{ + /* Let implementation handle TLB/GVA invalidation */ + kvm_arch_flush_shadow_memslot(kvm, memslot); +} + +int kvm_dev_ioctl_check_extension(long ext) +{ + int r; + + switch (ext) { + case KVM_CAP_IOEVENTFD: + r = 1; + break; + case KVM_CAP_NR_VCPUS: + case KVM_CAP_MAX_VCPUS: + r = KVM_MAX_VCPUS; + break; + default: + r = 0; + } + + return r; +} + +void vcpu_send_ipi(struct kvm_vcpu *vcpu, int target_vcpuid, int type) +{ + struct kvm_vcpu *target_vcpu = kvm_get_vcpu(vcpu->kvm, target_vcpuid); + + if (type == II_RESET) + target_vcpu->arch.restart = 1; + + if (target_vcpu != NULL) + vcpu_interrupt_line(target_vcpu, 1, 1); +} + +int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, + bool line_status) +{ + u32 irq = irq_level->irq; + unsigned int irq_num; + struct kvm_vcpu *vcpu = NULL; + bool level = irq_level->level; + + irq_num = irq; + /* target core for Intx is core0 */ + vcpu = kvm_get_vcpu(kvm, 0); + if (!vcpu) + return -EINVAL; + + return vcpu_interrupt_line(vcpu, irq_num, level); +} + + +const struct _kvm_stats_desc kvm_vm_stats_desc[] = { + KVM_GENERIC_VM_STATS() +}; + +const struct kvm_stats_header kvm_vm_stats_header = { + .name_size = KVM_STATS_NAME_SIZE, + .num_desc = ARRAY_SIZE(kvm_vm_stats_desc), + .id_offset = sizeof(struct kvm_stats_header), + .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, + .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + + sizeof(kvm_vm_stats_desc), +}; + +const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { + KVM_GENERIC_VCPU_STATS(), +}; + +const struct kvm_stats_header kvm_vcpu_stats_header = { + .name_size = KVM_STATS_NAME_SIZE, + .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), + .id_offset = sizeof(struct kvm_stats_header), + .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, + .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + + sizeof(kvm_vcpu_stats_desc), +}; + + + +bool kvm_arch_irqchip_in_kernel(struct kvm *kvm) +{ + return irqchip_in_kernel(kvm); +} diff --git a/arch/sw_64/kvm/trace.h b/arch/sw_64/kvm/trace.h new file mode 100644 index 000000000000..2611df3d3fa5 --- /dev/null +++ b/arch/sw_64/kvm/trace.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#if !defined(_SW64_KVM_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _SW64_KVM_TRACE_H + +#include + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM kvm + +/* + * Tracepoint for guest mode entry. + */ +TRACE_EVENT(kvm_sw64_entry, + TP_PROTO(unsigned int vcpu_id, unsigned int vcpu_pc), + TP_ARGS(vcpu_id, vcpu_pc), + + TP_STRUCT__entry( + __field(unsigned int, vcpu_id) + __field(unsigned int, vcpu_pc) + ), + + TP_fast_assign( + __entry->vcpu_id = vcpu_id; + __entry->vcpu_pc = vcpu_pc; + ), + + TP_printk("VCPU %u: PC: 0x%08x", __entry->vcpu_id, __entry->vcpu_pc) +); + +/* + * Tracepoint for guest mode exit. + */ + +TRACE_EVENT(kvm_sw64_exit, + TP_PROTO(unsigned int exit_reason, unsigned long vcpu_pc), + TP_ARGS(exit_reason, vcpu_pc), + + TP_STRUCT__entry( + __field(unsigned int, exit_reason) + __field(unsigned long, vcpu_pc) + ), + + TP_fast_assign( + __entry->exit_reason = exit_reason; + __entry->vcpu_pc = vcpu_pc; + ), + + TP_printk("exit_reason: 0x%04x (%11s), PC: 0x%08lx", + __entry->exit_reason, + __print_symbolic(__entry->exit_reason, kvm_sw64_exception_type), + __entry->vcpu_pc) +); + +#endif /* _SW64_KVM_TRACE_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace + +/* This part must be outside protection */ +#include diff --git a/arch/sw_64/kvm/vmem.c b/arch/sw_64/kvm/vmem.c new file mode 100644 index 000000000000..688449b65fa5 --- /dev/null +++ b/arch/sw_64/kvm/vmem.c @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include + +static bool addr_in_pool(struct gen_pool *pool, + unsigned long start, size_t size) +{ + bool found = false; + unsigned long end = start + size - 1; + struct gen_pool_chunk *chunk; + + rcu_read_lock(); + list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) { + if (start >= chunk->start_addr && start <= chunk->end_addr) { + if (end <= chunk->end_addr) { + found = true; + break; + } + } + } + rcu_read_unlock(); + return found; +} + +static int vmem_vm_insert_page(struct vm_area_struct *vma) +{ + unsigned long addr, uaddr; + struct page *vmem_page; + struct vmem_info *info; + size_t size; + int ret; + + info = vma->vm_private_data; + addr = info->start; + size = info->size; + uaddr = vma->vm_start; + + vm_flags_init(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP); + vmem_page = pfn_to_page(addr >> PAGE_SHIFT); + do { + ret = vm_insert_page(vma, uaddr, vmem_page); + if (ret < 0) { + pr_info("vm_insert_page failed: %d\n", ret); + return ret; + } + vmem_page++; + uaddr += PAGE_SIZE; + size -= PAGE_SIZE; + } while (size > 0); + + return 0; +} + +static void vmem_vm_open(struct vm_area_struct *vma) +{ + struct vmem_info *info = vma->vm_private_data; + + atomic_inc(&info->refcnt); +} + +static void vmem_vm_close(struct vm_area_struct *vma) +{ + unsigned long addr; + size_t size; + struct vmem_info *info; + + info = vma->vm_private_data; + addr = info->start; + size = round_up(info->size, 8 << 20); + + if (atomic_dec_and_test(&info->refcnt)) { + if (sw64_kvm_pool && addr_in_pool(sw64_kvm_pool, addr, size)) { + pr_info("gen pool free addr: %#lx, size: %#lx\n", + addr, size); + gen_pool_free(sw64_kvm_pool, addr, size); + } + kfree(info); + } +} + +const struct vm_operations_struct vmem_vm_ops = { + .open = vmem_vm_open, + .close = vmem_vm_close, +}; +EXPORT_SYMBOL_GPL(vmem_vm_ops); + +static int vmem_open(struct inode *inode, struct file *flip) +{ + flip->private_data = NULL; + return 0; +} + +static loff_t vmem_llseek(struct file *filp, loff_t offset, int whence) +{ + loff_t newpos = 256UL << 30; + return newpos; +} + +static int vmem_release(struct inode *inode, struct file *flip) +{ + return 0; +} + +static int vmem_mmap(struct file *flip, struct vm_area_struct *vma) +{ + unsigned long addr; + static struct vmem_info *info; + size_t size = vma->vm_end - vma->vm_start; + int ret; + + if (!(vma->vm_flags & VM_SHARED)) { + pr_err("%s: mapping must be shared\n", __func__); + return -EINVAL; + } + + if (!sw64_kvm_pool) + return -ENOMEM; + + if (flip->private_data == NULL) { + addr = gen_pool_alloc(sw64_kvm_pool, round_up(size, 8 << 20)); + if (!addr) + return -ENOMEM; + + info = kzalloc(sizeof(struct vmem_info), GFP_KERNEL); + pr_info("guest phys addr=%#lx, size=%#lx\n", addr, size); + info->start = addr; + info->size = size; + flip->private_data = (void *)info; + } else { + info = flip->private_data; + addr = info->start; + } + + vma->vm_private_data = (void *)info; + vma->vm_ops = &vmem_vm_ops; + vma->vm_ops->open(vma); + + /*to do if size bigger than vm_mem_size*/ + pr_info("sw64_vmem: vm_start=%#lx, size= %#lx\n", vma->vm_start, size); + + vmem_vm_insert_page(vma); + if (ret < 0) + return ret; + + return 0; +} + +static const struct file_operations vmem_fops = { + .owner = THIS_MODULE, + .open = vmem_open, + .llseek = vmem_llseek, + .release = vmem_release, + .mmap = vmem_mmap, +}; + +static struct miscdevice vmem_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "sw64_vmem", + .fops = &vmem_fops, +}; + +int __init vmem_init(void) +{ + int err; + + err = misc_register(&vmem_dev); + if (err != 0) { + pr_err("Could not register sw64_vmem device\n"); + return err; + } + return 0; +} + +void vmem_exit(void) +{ + misc_deregister(&vmem_dev); +} -- Gitee From 34e3c3dac1e8462fcb554605a97529289b492dad Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:35 +0800 Subject: [PATCH 0306/2138] anolis: sw64: add stacktrace support ANBZ: #4688 Add stacktrace support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/stacktrace.h | 72 ++++++++ arch/sw_64/kernel/stacktrace.c | 247 ++++++++++++++++++++++++++++ 2 files changed, 319 insertions(+) create mode 100644 arch/sw_64/include/asm/stacktrace.h create mode 100644 arch/sw_64/kernel/stacktrace.c diff --git a/arch/sw_64/include/asm/stacktrace.h b/arch/sw_64/include/asm/stacktrace.h new file mode 100644 index 000000000000..958c9892fd6d --- /dev/null +++ b/arch/sw_64/include/asm/stacktrace.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_SW64_STACKTRACE_H +#define _ASM_SW64_STACKTRACE_H + +#include +#include +#include +#include +#include + +struct stackframe { + unsigned long pc; + unsigned long fp; +}; + +enum stack_type { + STACK_TYPE_UNKNOWN, + STACK_TYPE_TASK, +}; + +struct stack_info { + unsigned long low; + unsigned long high; + enum stack_type type; +}; + +/* The form of the top of the frame on the stack */ +struct stack_frame { + unsigned long return_address; + struct stack_frame *next_frame; +}; + +extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame); +extern void walk_stackframe(struct task_struct *tsk, struct pt_regs *regs, + int (*fn)(unsigned long, void *), void *data); + +static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp, + struct stack_info *info) +{ + unsigned long low = (unsigned long)task_stack_page(tsk); + unsigned long high = low + THREAD_SIZE; + + if (sp < low || sp >= high) + return false; + + if (info) { + info->low = low; + info->high = high; + info->type = STACK_TYPE_TASK; + } + + return true; +} + +/* + * We can only safely access per-cpu stacks from current in a non-preemptible + * context. + */ +static inline bool on_accessible_stack(struct task_struct *tsk, + unsigned long sp, + struct stack_info *info) +{ + if (on_task_stack(tsk, sp, info)) + return true; + if (tsk != current || preemptible()) + return false; + + return false; +} + +#endif /* _ASM_SW64_STACKTRACE_H */ diff --git a/arch/sw_64/kernel/stacktrace.c b/arch/sw_64/kernel/stacktrace.c new file mode 100644 index 000000000000..ff00506d5b82 --- /dev/null +++ b/arch/sw_64/kernel/stacktrace.c @@ -0,0 +1,247 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Stack trace management functions + * + * Copyright (C) 2018 snyh + */ +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * sw_64 PCS assigns the frame pointer to r15. + * + * A simple function prologue looks like this: + * ldi sp,-xx(sp) + * stl ra,0(sp) + * stl fp,8(sp) + * mov sp,fp + * + * A simple function epilogue looks like this: + * mov fp,sp + * ldl ra,0(sp) + * ldl fp,8(sp) + * ldi sp,+xx(sp) + */ + +#ifdef CONFIG_FRAME_POINTER + +int unwind_frame(struct task_struct *tsk, struct stackframe *frame) +{ + unsigned long fp = frame->fp; + + if (fp & 0x7) + return -EINVAL; + + if (!tsk) + tsk = current; + + if (!on_accessible_stack(tsk, fp, NULL)) + return -EINVAL; + + frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp)); + frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8)); + + /* + * Frames created upon entry from user have NULL FP and PC values, so + * don't bother reporting these. Frames created by __noreturn functions + * might have a valid FP even if PC is bogus, so only terminate where + * both are NULL. + */ + if (!frame->fp && !frame->pc) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL_GPL(unwind_frame); + +void walk_stackframe(struct task_struct *tsk, struct pt_regs *regs, + int (*fn)(unsigned long, void *), void *data) +{ + unsigned long pc, fp; + + struct stackframe frame; + + if (regs) { + unsigned long offset; + + pc = regs->pc; + fp = regs->regs[15]; + if (kallsyms_lookup_size_offset(pc, NULL, &offset) + && offset < 16) { + /* call stack has not been setup + * store pc first then loop from ra + */ + if (fn(pc, data)) + return; + pc = regs->regs[26]; + } + } else if (tsk == current || tsk == NULL) { + fp = (unsigned long)__builtin_frame_address(0); + pc = (unsigned long)walk_stackframe; + } else { + fp = tsk->thread.s[6]; + pc = tsk->thread.ra; + } + + if (!__kernel_text_address(pc) || fn(pc, data)) + return; + + frame.pc = pc; + frame.fp = fp; + while (1) { + int ret; + + ret = unwind_frame(tsk, &frame); + if (ret < 0) + break; + + if (fn(frame.pc, data)) + break; + } +} +EXPORT_SYMBOL_GPL(walk_stackframe); + +#else /* !CONFIG_FRAME_POINTER */ +void walk_stackframe(struct task_struct *tsk, struct pt_regs *regs, + int (*fn)(unsigned long, void *), void *data) +{ + unsigned long *ksp; + unsigned long sp, pc; + + if (regs) { + sp = (unsigned long)(regs+1); + pc = regs->pc; + } else if (tsk == current || tsk == NULL) { + register unsigned long current_sp __asm__ ("$30"); + sp = current_sp; + pc = (unsigned long)walk_stackframe; + } else { + sp = tsk->thread.sp; + pc = tsk->thread.ra; + } + + ksp = (unsigned long *)sp; + + while (!kstack_end(ksp)) { + if (__kernel_text_address(pc) && fn(pc, data)) + break; + pc = *ksp++; + } +} +EXPORT_SYMBOL_GPL(walk_stackframe); + +#endif/* CONFIG_FRAME_POINTER */ + +static int print_address_trace(unsigned long pc, void *data) +{ + print_ip_sym((const char *)data, pc); + return 0; +} + +void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) +{ + pr_info("Trace:\n"); + walk_stackframe(task, NULL, print_address_trace, (void *)loglvl); +} + +#ifdef CONFIG_STACKTRACE +/* + * Save stack-backtrace addresses into a stack_trace buffer. + */ +struct stack_trace_data { + struct stack_trace *trace; + unsigned int nosched; +}; + +int save_trace(unsigned long pc, void *d) +{ + struct stack_trace_data *data = d; + struct stack_trace *trace = data->trace; + + if (data->nosched && in_sched_functions(pc)) + return 0; + if (trace->skip > 0) { + trace->skip--; + return 0; + } + + trace->entries[trace->nr_entries++] = pc; + return (trace->nr_entries >= trace->max_entries); +} + +void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) +{ + struct stack_trace_data data; + + data.trace = trace; + data.nosched = 0; + + walk_stackframe(current, regs, save_trace, &data); + + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; +} + +static void __save_stack_trace(struct task_struct *tsk, + struct stack_trace *trace, unsigned int nosched) +{ + struct stack_trace_data data; + + data.trace = trace; + data.nosched = nosched; + + walk_stackframe(tsk, NULL, save_trace, &data); + + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; +} + +void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +{ + __save_stack_trace(tsk, trace, 1); +} +EXPORT_SYMBOL_GPL(save_stack_trace_tsk); + +void save_stack_trace(struct stack_trace *trace) +{ + __save_stack_trace(current, trace, 0); +} +EXPORT_SYMBOL_GPL(save_stack_trace); +#endif + +static int save_pc(unsigned long pc, void *data) +{ + unsigned long *p = data; + *p = 0; + + if (!in_sched_functions(pc)) + *p = pc; + + return *p; +} + +unsigned long __get_wchan(struct task_struct *tsk) +{ + unsigned long pc; + + if (!tsk || tsk == current || task_is_running(tsk)) + return 0; + walk_stackframe(tsk, NULL, save_pc, &pc); + + return pc; +} + +#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE +int save_stack_trace_tsk_reliable(struct task_struct *tsk, + struct stack_trace *trace) +{ + return 0; +} +#endif -- Gitee From dcfaf71ea8379ac400afc62287cfde7bc0d76565 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:31 +0800 Subject: [PATCH 0307/2138] anolis: sw64: add qspinlock support ANBZ: #4688 Add qspinlock support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/spinlock.h | 24 ++++++++++++++++++++++++ arch/sw_64/include/asm/spinlock_types.h | 8 ++++++++ 2 files changed, 32 insertions(+) create mode 100644 arch/sw_64/include/asm/spinlock.h create mode 100644 arch/sw_64/include/asm/spinlock_types.h diff --git a/arch/sw_64/include/asm/spinlock.h b/arch/sw_64/include/asm/spinlock.h new file mode 100644 index 000000000000..64358f32cd9a --- /dev/null +++ b/arch/sw_64/include/asm/spinlock.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef _ASM_SW64_SPINLOCK_H +#define _ASM_SW64_SPINLOCK_H + +#include +#include + +/* See include/linux/spinlock.h */ +#define smp_mb__after_spinlock() smp_mb() + +#endif /* _ASM_SW64_SPINLOCK_H */ diff --git a/arch/sw_64/include/asm/spinlock_types.h b/arch/sw_64/include/asm/spinlock_types.h new file mode 100644 index 000000000000..62e554e4f48c --- /dev/null +++ b/arch/sw_64/include/asm/spinlock_types.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SPINLOCK_TYPES_H +#define _ASM_SW64_SPINLOCK_TYPES_H + +#include +#include + +#endif /* _ASM_SW64_SPINLOCK_TYPES_H */ -- Gitee From 6e02950c8edb23264fa5a5a3b4602201ba94e61b Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:30 +0800 Subject: [PATCH 0308/2138] anolis: sw64: add perf events support ANBZ: #4688 Add basic perf events support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/perf_event.h | 16 + arch/sw_64/include/asm/pmc.h | 55 ++ arch/sw_64/include/uapi/asm/perf_regs.h | 41 ++ arch/sw_64/kernel/perf_event.c | 787 ++++++++++++++++++++++++ arch/sw_64/kernel/perf_regs.c | 33 + 5 files changed, 932 insertions(+) create mode 100644 arch/sw_64/include/asm/perf_event.h create mode 100644 arch/sw_64/include/asm/pmc.h create mode 100644 arch/sw_64/include/uapi/asm/perf_regs.h create mode 100644 arch/sw_64/kernel/perf_event.c create mode 100644 arch/sw_64/kernel/perf_regs.c diff --git a/arch/sw_64/include/asm/perf_event.h b/arch/sw_64/include/asm/perf_event.h new file mode 100644 index 000000000000..dc55a361babd --- /dev/null +++ b/arch/sw_64/include/asm/perf_event.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PERF_EVENT_H +#define _ASM_SW64_PERF_EVENT_H + +#include +#include + +#ifdef CONFIG_PERF_EVENTS +struct pt_regs; +extern unsigned long perf_instruction_pointer(struct pt_regs *regs); +extern unsigned long perf_misc_flags(struct pt_regs *regs); +#define perf_misc_flags(regs) perf_misc_flags(regs) +#define perf_arch_bpf_user_pt_regs(regs) ®s->user_regs +#endif + +#endif /* _ASM_SW64_PERF_EVENT_H */ diff --git a/arch/sw_64/include/asm/pmc.h b/arch/sw_64/include/asm/pmc.h new file mode 100644 index 000000000000..d5672dd940a7 --- /dev/null +++ b/arch/sw_64/include/asm/pmc.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Definitions for use with the sw64 PMC interface. + */ + +#ifndef _ASM_SW64_PMC_H +#define _ASM_SW64_PMC_H + +#define PMC_PC0 0 +#define PMC_PC1 1 + +/* Following commands are implemented on all CPUs */ +#define PMC_CMD_DISABLE 0 +#define PMC_CMD_ENABLE 1 +#define PMC_CMD_EVENT_BASE 2 +#define PMC_CMD_PM 4 +#define PMC_CMD_READ 5 +#define PMC_CMD_READ_CLEAR 6 +#define PMC_CMD_WRITE_BASE 7 + +#define PMC_DISABLE_BASE 1 + +#define PMC_ENABLE_BASE 1 + +#define PC0_RAW_BASE 0x0 +#define PC1_RAW_BASE 0x100 +#define PC0_MAX 0xF +#define PC1_MAX 0x3D + +#define SW64_PERFCTRL_KM 2 +#define SW64_PERFCTRL_UM 3 +#define SW64_PERFCTRL_AM 4 + +/* pc0 events */ +#define PC0_INSTRUCTIONS 0x0 +#define PC0_BRANCH_INSTRUCTIONS 0x3 +#define PC0_CPU_CYCLES 0x8 +#define PC0_ITB_READ 0x9 +#define PC0_DTB_READ 0xA +#define PC0_ICACHE_READ 0xB +#define PC0_DCACHE_READ 0xC +#define PC0_SCACHE_REFERENCES 0xD + +/* pc1 events */ +#define PC1_BRANCH_MISSES 0xB +#define PC1_SCACHE_MISSES 0x10 +#define PC1_ICACHE_READ_MISSES 0x16 +#define PC1_ITB_MISSES 0x17 +#define PC1_DTB_SINGLE_MISSES 0x30 +#define PC1_DCACHE_MISSES 0x32 + +#define MAX_HWEVENTS 2 +#define PMC_COUNT_MASK ((1UL << 58) - 1) + +#endif /* _ASM_SW64_PMC_H */ diff --git a/arch/sw_64/include/uapi/asm/perf_regs.h b/arch/sw_64/include/uapi/asm/perf_regs.h new file mode 100644 index 000000000000..871ad4663d1d --- /dev/null +++ b/arch/sw_64/include/uapi/asm/perf_regs.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ + +#ifndef _UAPI_ASM_SW64_PERF_REGS_H +#define _UAPI_ASM_SW64_PERF_REGS_H + +enum perf_event_sw64_regs { + PERF_REG_SW64_R0, + PERF_REG_SW64_R1, + PERF_REG_SW64_R2, + PERF_REG_SW64_R3, + PERF_REG_SW64_R4, + PERF_REG_SW64_R5, + PERF_REG_SW64_R6, + PERF_REG_SW64_R7, + PERF_REG_SW64_R8, + PERF_REG_SW64_R9, + PERF_REG_SW64_R10, + PERF_REG_SW64_R11, + PERF_REG_SW64_R12, + PERF_REG_SW64_R13, + PERF_REG_SW64_R14, + PERF_REG_SW64_R15, + PERF_REG_SW64_R16, + PERF_REG_SW64_R17, + PERF_REG_SW64_R18, + PERF_REG_SW64_R19, + PERF_REG_SW64_R20, + PERF_REG_SW64_R21, + PERF_REG_SW64_R22, + PERF_REG_SW64_R23, + PERF_REG_SW64_R24, + PERF_REG_SW64_R25, + PERF_REG_SW64_R26, + PERF_REG_SW64_R27, + PERF_REG_SW64_R28, + PERF_REG_SW64_GP, + PERF_REG_SW64_SP, + PERF_REG_SW64_PC, + PERF_REG_SW64_MAX, +}; +#endif /* _UAPI_ASM_SW64_PERF_REGS_H */ diff --git a/arch/sw_64/kernel/perf_event.c b/arch/sw_64/kernel/perf_event.c new file mode 100644 index 000000000000..83bb051be9de --- /dev/null +++ b/arch/sw_64/kernel/perf_event.c @@ -0,0 +1,787 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Performance events support for SW64 platforms. + * + * This code is based upon riscv and sparc perf event code. + */ + +#include +#include + +/* For tracking PMCs and the hw events they monitor on each CPU. */ +struct cpu_hw_events { + /* + * Set the bit (indexed by the counter number) when the counter + * is used for an event. + */ + unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; + /* Array of events current scheduled on this cpu. */ + struct perf_event *event[MAX_HWEVENTS]; +}; + +DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); + +struct sw64_perf_event { + /* pmu index */ + int counter; + /* events selector */ + int event; +}; + +/* + * A structure to hold the description of the PMCs available on a particular + * type of SW64 CPU. + */ +struct sw64_pmu_t { + /* generic hw/cache events table */ + const struct sw64_perf_event *hw_events; + const struct sw64_perf_event (*cache_events)[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; + + /* method used to map hw/cache events */ + const struct sw64_perf_event *(*map_hw_event)(u64 config); + const struct sw64_perf_event *(*map_cache_event)(u64 config); + + /* The number of entries in the hw_event_map */ + int max_events; + + /* The number of counters on this pmu */ + int num_pmcs; + + /* + * All PMC counters reside in the IBOX register PCTR. This is the + * LSB of the counter. + */ + int pmc_count_shift[MAX_HWEVENTS]; + + /* + * The mask that isolates the PMC bits when the LSB of the counter + * is shifted to bit 0. + */ + unsigned long pmc_count_mask; + + /* The maximum period the PMC can count. */ + unsigned long pmc_max_period; + + /* + * The maximum value that may be written to the counter due to + * hardware restrictions is pmc_max_period - pmc_left. + */ + long pmc_left; + + /* Subroutine for checking validity of a raw event for this PMU. */ + bool (*raw_event_valid)(u64 config); +}; + +/* + * The SW64 PMU description currently in operation. This is set during + * the boot process to the specific CPU of the machine. + */ +static const struct sw64_pmu_t *sw64_pmu; + +/* + * SW64 PMC event types + * + * There is no one-to-one mapping of the possible hw event types to the + * actual codes that are used to program the PMCs hence we introduce our + * own hw event type identifiers. + */ +#define SW64_OP_UNSUP {-1, -1} + +/* Mapping of the hw event types to the perf tool interface */ +static const struct sw64_perf_event core3_hw_event_map[] = { + [PERF_COUNT_HW_CPU_CYCLES] = {PMC_PC0, PC0_CPU_CYCLES}, + [PERF_COUNT_HW_INSTRUCTIONS] = {PMC_PC0, PC0_INSTRUCTIONS}, + [PERF_COUNT_HW_CACHE_REFERENCES] = {PMC_PC0, PC0_SCACHE_REFERENCES}, + [PERF_COUNT_HW_CACHE_MISSES] = {PMC_PC1, PC1_SCACHE_MISSES}, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {PMC_PC0, PC0_BRANCH_INSTRUCTIONS}, + [PERF_COUNT_HW_BRANCH_MISSES] = {PMC_PC1, PC1_BRANCH_MISSES}, +}; + +/* Mapping of the hw cache event types to the perf tool interface */ +#define C(x) PERF_COUNT_HW_CACHE_##x +static const struct sw64_perf_event core3_cache_event_map + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + [C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = {PMC_PC0, PC0_DCACHE_READ}, + [C(RESULT_MISS)] = {PMC_PC1, PC1_DCACHE_MISSES} + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = {PMC_PC0, PC0_ICACHE_READ}, + [C(RESULT_MISS)] = {PMC_PC1, PC1_ICACHE_READ_MISSES}, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + }, + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + }, + [C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = {PMC_PC0, PC0_DTB_READ}, + [C(RESULT_MISS)] = {PMC_PC1, PC1_DTB_SINGLE_MISSES}, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = {PMC_PC0, PC0_ITB_READ}, + [C(RESULT_MISS)] = {PMC_PC1, PC1_ITB_MISSES}, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + }, + [C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + }, + +}; + +static const struct sw64_perf_event *core3_map_hw_event(u64 config) +{ + return &sw64_pmu->hw_events[config]; +} + +static const struct sw64_perf_event *core3_map_cache_event(u64 config) +{ + unsigned int cache_type, cache_op, cache_result; + const struct sw64_perf_event *perf_event; + + cache_type = (config >> 0) & 0xff; + if (cache_type >= PERF_COUNT_HW_CACHE_MAX) + return ERR_PTR(-EINVAL); + + cache_op = (config >> 8) & 0xff; + if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) + return ERR_PTR(-EINVAL); + + cache_result = (config >> 16) & 0xff; + if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return ERR_PTR(-EINVAL); + + perf_event = &((*sw64_pmu->cache_events)[cache_type][cache_op][cache_result]); + if (perf_event->counter == -1) /* SW64_OP_UNSUP */ + return ERR_PTR(-ENOENT); + + return perf_event; +} + +/* + * r0xx for counter0, r1yy for counter1. + * According to the datasheet, 00 <= xx <= 0F, 00 <= yy <= 3D + */ +static bool core3_raw_event_valid(u64 config) +{ + if ((config >= PC0_RAW_BASE && config <= (PC0_RAW_BASE + PC0_MAX)) || + (config >= PC1_RAW_BASE && config <= (PC1_RAW_BASE + PC1_MAX))) + return true; + + pr_info("sw64 pmu: invalid raw event config %#llx\n", config); + return false; +} + +static const struct sw64_pmu_t core3_pmu = { + .max_events = ARRAY_SIZE(core3_hw_event_map), + .hw_events = core3_hw_event_map, + .map_hw_event = core3_map_hw_event, + .cache_events = &core3_cache_event_map, + .map_cache_event = core3_map_cache_event, + .num_pmcs = MAX_HWEVENTS, + .pmc_count_mask = PMC_COUNT_MASK, + .pmc_max_period = PMC_COUNT_MASK, + .pmc_left = 4, + .raw_event_valid = core3_raw_event_valid, +}; + +/* + * Low-level functions: reading/writing counters + */ +static void sw64_write_pmc(int idx, unsigned long val) +{ + wrperfmon(PMC_CMD_WRITE_BASE + idx, val); +} + +static unsigned long sw64_read_pmc(int idx) +{ + return wrperfmon(PMC_CMD_READ, idx); +} + +/* Set a new period to sample over */ +static int sw64_perf_event_set_period(struct perf_event *event, + struct hw_perf_event *hwc, int idx) +{ + long left = local64_read(&hwc->period_left); + long period = hwc->sample_period; + int overflow = 0; + unsigned long value; + + if (unlikely(left <= -period)) { + left = period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + overflow = 1; + } + + if (unlikely(left <= 0)) { + left += period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + overflow = 1; + } + + if (left > (long)sw64_pmu->pmc_max_period) + left = sw64_pmu->pmc_max_period; + + value = sw64_pmu->pmc_max_period - left; + local64_set(&hwc->prev_count, value); + sw64_write_pmc(idx, value); + + perf_event_update_userpage(event); + + return overflow; +} + +/* + * Calculates the count (the 'delta') since the last time the PMC was read. + * + * As the PMCs' full period can easily be exceeded within the perf system + * sampling period we cannot use any high order bits as a guard bit in the + * PMCs to detect overflow as is done by other architectures. The code here + * calculates the delta on the basis that there is no overflow when ovf is + * zero. The value passed via ovf by the interrupt handler corrects for + * overflow. + * + * This can be racey on rare occasions -- a call to this routine can occur + * with an overflowed counter just before the PMI service routine is called. + * The check for delta negative hopefully always rectifies this situation. + */ +static unsigned long sw64_perf_event_update(struct perf_event *event, + struct hw_perf_event *hwc, int idx, long ovf) +{ + long prev_raw_count, new_raw_count; + long delta; + +again: + prev_raw_count = local64_read(&hwc->prev_count); + new_raw_count = sw64_read_pmc(idx); + + if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) + goto again; + + delta = (new_raw_count - (prev_raw_count & sw64_pmu->pmc_count_mask)) + ovf; + + /* It is possible on very rare occasions that the PMC has overflowed + * but the interrupt is yet to come. Detect and fix this situation. + */ + if (unlikely(delta < 0)) + delta += sw64_pmu->pmc_max_period + 1; + + local64_add(delta, &event->count); + local64_sub(delta, &hwc->period_left); + + return new_raw_count; +} + +/* + * State transition functions: + * + * add()/del() & start()/stop() + * + */ + +/* + * pmu->start: start the event. + */ +static void sw64_pmu_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) + return; + + if (flags & PERF_EF_RELOAD) { + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + sw64_perf_event_set_period(event, hwc, hwc->idx); + } + + hwc->state = 0; + + /* counting in selected modes, for both counters */ + wrperfmon(PMC_CMD_PM, hwc->config_base); + wrperfmon(PMC_CMD_EVENT_BASE + hwc->idx, hwc->event_base); + wrperfmon(PMC_CMD_ENABLE, PMC_ENABLE_BASE + hwc->idx); +} + +/* + * pmu->stop: stop the counter + */ +static void sw64_pmu_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!(hwc->state & PERF_HES_STOPPED)) { + wrperfmon(PMC_CMD_DISABLE, PMC_DISABLE_BASE + hwc->idx); + hwc->state |= PERF_HES_STOPPED; + barrier(); + } + + if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { + sw64_perf_event_update(event, hwc, hwc->idx, 0); + hwc->state |= PERF_HES_UPTODATE; + } +} + +/* + * pmu->add: add the event to PMU. + */ +static int sw64_pmu_add(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int err = 0; + unsigned long irq_flags; + + local_irq_save(irq_flags); + + if (__test_and_set_bit(hwc->idx, cpuc->used_mask)) { + err = -ENOSPC; + goto out; + } + + cpuc->event[hwc->idx] = event; + + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + if (flags & PERF_EF_START) + sw64_pmu_start(event, PERF_EF_RELOAD); + + /* Propagate our changes to the userspace mapping. */ + perf_event_update_userpage(event); + +out: + local_irq_restore(irq_flags); + + return err; +} + +/* + * pmu->del: delete the event from PMU. + */ +static void sw64_pmu_del(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + unsigned long irq_flags; + + local_irq_save(irq_flags); + + sw64_pmu_stop(event, PERF_EF_UPDATE); + cpuc->event[hwc->idx] = NULL; + __clear_bit(event->hw.idx, cpuc->used_mask); + + /* Absorb the final count and turn off the event. */ + perf_event_update_userpage(event); + + local_irq_restore(irq_flags); +} + +/* + * pmu->read: read and update the counter + */ +static void sw64_pmu_read(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + sw64_perf_event_update(event, hwc, hwc->idx, 0); +} + +static bool supported_cpu(void) +{ + return true; +} + +static void hw_perf_event_destroy(struct perf_event *event) +{ + /* Nothing to be done! */ +} + +static int __hw_perf_event_init(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + struct hw_perf_event *hwc = &event->hw; + const struct sw64_perf_event *event_type; + + + /* + * SW64 does not have per-counter usr/os/guest/host bits, + * we can distinguish exclude_user and exclude_kernel by + * sample mode. + */ + if (event->attr.exclude_hv || event->attr.exclude_idle || + event->attr.exclude_host || event->attr.exclude_guest) + return -EINVAL; + + /* + * SW64 does not support precise ip feature, and system hang when + * detecting precise_ip by perf_event_attr__set_max_precise_ip + * in userspace + */ + if (attr->precise_ip != 0) + return -EOPNOTSUPP; + + /* SW64 has fixed counter for given event type */ + if (attr->type == PERF_TYPE_HARDWARE) { + if (attr->config >= sw64_pmu->max_events) + return -EINVAL; + event_type = sw64_pmu->map_hw_event(attr->config); + hwc->idx = event_type->counter; + hwc->event_base = event_type->event; + } else if (attr->type == PERF_TYPE_HW_CACHE) { + event_type = sw64_pmu->map_cache_event(attr->config); + if (IS_ERR(event_type)) /* */ + return PTR_ERR(event_type); + hwc->idx = event_type->counter; + hwc->event_base = event_type->event; + } else { /* PERF_TYPE_RAW */ + if (!sw64_pmu->raw_event_valid(attr->config)) + return -EINVAL; + hwc->idx = attr->config >> 8; /* counter selector */ + hwc->event_base = attr->config & 0xff; /* event selector */ + } + + hwc->config_base = SW64_PERFCTRL_AM; + + if (attr->exclude_user) + hwc->config_base = SW64_PERFCTRL_KM; + if (attr->exclude_kernel) + hwc->config_base = SW64_PERFCTRL_UM; + + hwc->config = attr->config; + + if (!is_sampling_event(event)) + pr_debug("not sampling event\n"); + + event->destroy = hw_perf_event_destroy; + + if (!hwc->sample_period) { + hwc->sample_period = sw64_pmu->pmc_max_period; + hwc->last_period = hwc->sample_period; + local64_set(&hwc->period_left, hwc->sample_period); + } + + return 0; +} + +/* + * Main entry point to initialise a HW performance event. + */ +static int sw64_pmu_event_init(struct perf_event *event) +{ + int err; + + /* does not support taken branch sampling */ + if (has_branch_stack(event)) + return -EOPNOTSUPP; + + switch (event->attr.type) { + case PERF_TYPE_RAW: + case PERF_TYPE_HARDWARE: + case PERF_TYPE_HW_CACHE: + break; + default: + return -ENOENT; + } + + if (!sw64_pmu) + return -ENODEV; + + /* Do the real initialisation work. */ + err = __hw_perf_event_init(event); + + return err; +} + +static struct pmu pmu = { + .name = "core3-base", + .capabilities = PERF_PMU_CAP_NO_NMI, + .event_init = sw64_pmu_event_init, + .add = sw64_pmu_add, + .del = sw64_pmu_del, + .start = sw64_pmu_start, + .stop = sw64_pmu_stop, + .read = sw64_pmu_read, +}; + +void perf_event_print_debug(void) +{ + unsigned long flags; + unsigned long pcr0, pcr1; + int cpu; + + if (!supported_cpu()) + return; + + local_irq_save(flags); + + cpu = smp_processor_id(); + + pcr0 = wrperfmon(PMC_CMD_READ, PMC_PC0); + pcr1 = wrperfmon(PMC_CMD_READ, PMC_PC1); + + pr_info("CPU#%d: PCTR0[%lx] PCTR1[%lx]\n", cpu, pcr0, pcr1); + + local_irq_restore(flags); +} + +static void sw64_perf_event_irq_handler(unsigned long idx, + struct pt_regs *regs) +{ + struct cpu_hw_events *cpuc; + struct perf_sample_data data; + struct perf_event *event; + struct hw_perf_event *hwc; + + __this_cpu_inc(irq_pmi_count); + cpuc = this_cpu_ptr(&cpu_hw_events); + + event = cpuc->event[idx]; + + if (unlikely(!event)) { + irq_err_count++; + return; + } + + hwc = &event->hw; + sw64_perf_event_update(event, hwc, idx, sw64_pmu->pmc_max_period + 1); + perf_sample_data_init(&data, 0, hwc->last_period); + + if (sw64_perf_event_set_period(event, hwc, idx)) { + if (perf_event_overflow(event, &data, regs)) { + /* Interrupts coming too quickly; "throttle" the + * counter, i.e., disable it for a little while. + */ + sw64_pmu_stop(event, 0); + } + } +} + +bool valid_utext_addr(unsigned long addr) +{ + return addr >= current->mm->start_code && addr <= current->mm->end_code; +} + +bool valid_dy_addr(unsigned long addr) +{ + bool ret = false; + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; + + if (addr > TASK_SIZE || addr < TASK_UNMAPPED_BASE) + return ret; + vma = find_vma(mm, addr); + if (vma && vma->vm_start <= addr && (vma->vm_flags & VM_EXEC)) + ret = true; + return ret; +} + +#ifdef CONFIG_FRAME_POINTER +void perf_callchain_user(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + + struct stack_frame frame; + unsigned long __user *fp; + int err; + + perf_callchain_store(entry, regs->pc); + + fp = (unsigned long __user *)regs->regs[15]; + + while (entry->nr < entry->max_stack && (unsigned long)fp < current->mm->start_stack) { + if (!access_ok(fp, sizeof(frame))) + break; + + pagefault_disable(); + err = __copy_from_user_inatomic(&frame, fp, sizeof(frame)); + pagefault_enable(); + + if (err) + break; + + if (valid_utext_addr(frame.return_address) || valid_dy_addr(frame.return_address)) + perf_callchain_store(entry, frame.return_address); + fp = (void __user *)frame.next_frame; + } +} +#else /* !CONFIG_FRAME_POINTER */ +void perf_callchain_user(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + unsigned long usp = rdusp(); + unsigned long user_addr; + int err; + + perf_callchain_store(entry, regs->pc); + + while (entry->nr < entry->max_stack && usp < current->mm->start_stack) { + if (!access_ok((const void __user *)usp, 8)) + break; + + pagefault_disable(); + err = __get_user(user_addr, (unsigned long *)usp); + pagefault_enable(); + + if (err) + break; + + if (valid_utext_addr(user_addr) || valid_dy_addr(user_addr)) + perf_callchain_store(entry, user_addr); + usp = usp + 8; + } +} +#endif/* CONFIG_FRAME_POINTER */ + +/* + * Gets called by walk_stackframe() for every stackframe. This will be called + * whist unwinding the stackframe and is like a subroutine return so we use + * the PC. + */ +static int callchain_trace(unsigned long pc, void *data) +{ + struct perf_callchain_entry_ctx *entry = data; + + perf_callchain_store(entry, pc); + return 0; +} + +void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + walk_stackframe(NULL, regs, callchain_trace, entry); +} + +/* + * Gets the perf_instruction_pointer and perf_misc_flags for guest os. + */ + +unsigned long perf_instruction_pointer(struct pt_regs *regs) +{ + if (perf_guest_state()) + return perf_guest_get_ip(); + + return instruction_pointer(regs); +} + +unsigned long perf_misc_flags(struct pt_regs *regs) +{ + unsigned int guest_state = perf_guest_state(); + int misc = 0; + + if (guest_state) { + if (guest_state & PERF_GUEST_USER) + misc |= PERF_RECORD_MISC_GUEST_USER; + else + misc |= PERF_RECORD_MISC_GUEST_KERNEL; + } else { + if (user_mode(regs)) + misc |= PERF_RECORD_MISC_USER; + else + misc |= PERF_RECORD_MISC_KERNEL; + } + + return misc; +} + +/* + * Init call to initialise performance events at kernel startup. + */ +int __init init_hw_perf_events(void) +{ + if (!supported_cpu()) { + pr_info("Performance events: Unsupported CPU type!\n"); + return 0; + } + + pr_info("Performance events: Supported CPU type!\n"); + + /* Override performance counter IRQ vector */ + + perf_irq = sw64_perf_event_irq_handler; + + /* And set up PMU specification */ + sw64_pmu = &core3_pmu; + + perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); + + return 0; +} +early_initcall(init_hw_perf_events); diff --git a/arch/sw_64/kernel/perf_regs.c b/arch/sw_64/kernel/perf_regs.c new file mode 100644 index 000000000000..b036f213936b --- /dev/null +++ b/arch/sw_64/kernel/perf_regs.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +u64 perf_reg_value(struct pt_regs *regs, int idx) +{ + if (WARN_ON_ONCE((u32)idx >= PERF_REG_SW64_MAX)) + return 0; + + return ((unsigned long *)regs)[idx]; +} + +#define REG_RESERVED (~((1ULL << PERF_REG_SW64_MAX) - 1)) + +int perf_reg_validate(u64 mask) +{ + if (!mask || mask & REG_RESERVED) + return -EINVAL; + return 0; +} + +u64 perf_reg_abi(struct task_struct *task) +{ + return PERF_SAMPLE_REGS_ABI_64; +} + +void perf_get_regs_user(struct perf_regs *regs_user, + struct pt_regs *regs) +{ + regs_user->regs = task_pt_regs(current); + regs_user->abi = perf_reg_abi(current); +} -- Gitee From 1c6e2a599ec32115d3563a73826481d70e6b9a41 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:17 +0800 Subject: [PATCH 0309/2138] anolis: sw64: add kexec support ANBZ: #4688 Add kexec support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/kexec.h | 82 ++++++++++++ arch/sw_64/kernel/machine_kexec.c | 209 ++++++++++++++++++++++++++++++ 2 files changed, 291 insertions(+) create mode 100644 arch/sw_64/include/asm/kexec.h create mode 100644 arch/sw_64/kernel/machine_kexec.c diff --git a/arch/sw_64/include/asm/kexec.h b/arch/sw_64/include/asm/kexec.h new file mode 100644 index 000000000000..25e0d8da84f8 --- /dev/null +++ b/arch/sw_64/include/asm/kexec.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KEXEC_H +#define _ASM_SW64_KEXEC_H + +#ifdef CONFIG_KEXEC + +/* Maximum physical address we can use pages from */ +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) +/* Maximum address we can reach in physical address mode */ +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) +/* Maximum address we can use for the control code buffer */ +#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL) + +#define KEXEC_CONTROL_PAGE_SIZE 8192 + +#define KEXEC_ARCH KEXEC_ARCH_SW64 + +#define KEXEC_SW64_ATAGS_OFFSET 0x1000 +#define KEXEC_SW64_ZIMAGE_OFFSET 0x8000 + +#ifndef __ASSEMBLY__ + +/** + * crash_setup_regs() - save registers for the panic kernel + * @newregs: registers are saved here + * @oldregs: registers to be saved (may be %NULL) + * + * Function copies machine registers from @oldregs to @newregs. If @oldregs is + * %NULL then current registers are stored there. + */ +static inline void crash_setup_regs(struct pt_regs *newregs, + struct pt_regs *oldregs) +{ + if (oldregs) { + memcpy(newregs, oldregs, sizeof(*newregs)); + } else { + __asm__ __volatile__ ("stl $0, %0" : "=m" (newregs->regs[0])); + __asm__ __volatile__ ("stl $1, %0" : "=m" (newregs->regs[1])); + __asm__ __volatile__ ("stl $2, %0" : "=m" (newregs->regs[2])); + __asm__ __volatile__ ("stl $3, %0" : "=m" (newregs->regs[3])); + __asm__ __volatile__ ("stl $4, %0" : "=m" (newregs->regs[4])); + __asm__ __volatile__ ("stl $5, %0" : "=m" (newregs->regs[5])); + __asm__ __volatile__ ("stl $6, %0" : "=m" (newregs->regs[6])); + __asm__ __volatile__ ("stl $7, %0" : "=m" (newregs->regs[7])); + __asm__ __volatile__ ("stl $8, %0" : "=m" (newregs->regs[8])); + __asm__ __volatile__ ("stl $9, %0" : "=m" (newregs->regs[9])); + __asm__ __volatile__ ("stl $10, %0" : "=m" (newregs->regs[10])); + __asm__ __volatile__ ("stl $11, %0" : "=m" (newregs->regs[11])); + __asm__ __volatile__ ("stl $12, %0" : "=m" (newregs->regs[12])); + __asm__ __volatile__ ("stl $13, %0" : "=m" (newregs->regs[13])); + __asm__ __volatile__ ("stl $14, %0" : "=m" (newregs->regs[14])); + __asm__ __volatile__ ("stl $15, %0" : "=m" (newregs->regs[15])); + __asm__ __volatile__ ("stl $16, %0" : "=m" (newregs->regs[16])); + __asm__ __volatile__ ("stl $17, %0" : "=m" (newregs->regs[17])); + __asm__ __volatile__ ("stl $18, %0" : "=m" (newregs->regs[18])); + __asm__ __volatile__ ("stl $19, %0" : "=m" (newregs->regs[19])); + __asm__ __volatile__ ("stl $20, %0" : "=m" (newregs->regs[20])); + __asm__ __volatile__ ("stl $21, %0" : "=m" (newregs->regs[21])); + __asm__ __volatile__ ("stl $22, %0" : "=m" (newregs->regs[22])); + __asm__ __volatile__ ("stl $23, %0" : "=m" (newregs->regs[23])); + __asm__ __volatile__ ("stl $24, %0" : "=m" (newregs->regs[24])); + __asm__ __volatile__ ("stl $25, %0" : "=m" (newregs->regs[25])); + __asm__ __volatile__ ("stl $26, %0" : "=m" (newregs->regs[26])); + __asm__ __volatile__ ("stl $27, %0" : "=m" (newregs->regs[27])); + __asm__ __volatile__ ("stl $28, %0" : "=m" (newregs->regs[28])); + __asm__ __volatile__ ("stl $29, %0" : "=m" (newregs->regs[29])); + __asm__ __volatile__ ("stl $30, %0" : "=m" (newregs->regs[30])); + newregs->pc = (unsigned long)current_text_addr(); + } +} + +/* Function pointer to optional machine-specific reinitialization */ +extern void (*kexec_reinit)(void); + +#endif /* __ASSEMBLY__ */ + +struct kimage; +extern unsigned long kexec_args[4]; + +#endif /* CONFIG_KEXEC */ + +#endif /* _ASM_SW64_KEXEC_H */ diff --git a/arch/sw_64/kernel/machine_kexec.c b/arch/sw_64/kernel/machine_kexec.c new file mode 100644 index 000000000000..950998476cda --- /dev/null +++ b/arch/sw_64/kernel/machine_kexec.c @@ -0,0 +1,209 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * machine_kexec.c for kexec + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ +#include +#include +#include +#include +#include + +#include + +extern void *kexec_control_page; +extern const unsigned char relocate_new_kernel[]; +extern const size_t relocate_new_kernel_size; + +extern unsigned long kexec_start_address; +extern unsigned long kexec_indirection_page; + +static atomic_t waiting_for_crash_ipi; + +#ifdef CONFIG_SMP +extern struct smp_rcb_struct *smp_rcb; + +/* + * Wait for relocation code is prepared and send + * secondary CPUs to spin until kernel is relocated. + */ +static void kexec_smp_down(void *ignored) +{ + int cpu = smp_processor_id(); + + local_irq_disable(); + while (READ_ONCE(smp_rcb->ready) != 0) + mdelay(1); + set_cpu_online(cpu, false); + reset_cpu(cpu); +} +#endif + +int machine_kexec_prepare(struct kimage *kimage) +{ + return 0; +} + +void machine_kexec_cleanup(struct kimage *kimage) +{ +} + +void machine_shutdown(void) +{ +#ifdef CONFIG_SMP + WRITE_ONCE(smp_rcb->ready, 0); + smp_call_function(kexec_smp_down, NULL, 0); + smp_wmb(); + while (num_online_cpus() > 1) { + cpu_relax(); + mdelay(1); + } +#endif +} + +#ifdef CONFIG_SMP +static void machine_crash_nonpanic_core(void *unused) +{ + int cpu; + struct pt_regs regs; + + cpu = smp_processor_id(); + + local_irq_disable(); + crash_setup_regs(®s, NULL); + pr_debug("CPU %u will stop doing anything useful since another CPU has crashed\n", cpu); + crash_save_cpu(®s, cpu); + flush_cache_all(); + + set_cpu_online(cpu, false); + atomic_dec(&waiting_for_crash_ipi); + while (READ_ONCE(smp_rcb->ready) != 0) + mdelay(1); + if (cpu != 0) + reset_cpu(cpu); + else + machine_kexec(kexec_crash_image); +} +#else +static inline void machine_crash_nonpanic_core(void *unused) { } +#endif + +static void machine_kexec_mask_interrupts(void) +{ + unsigned int i; + struct irq_desc *desc; + + for_each_irq_desc(i, desc) { + struct irq_chip *chip; + + chip = irq_desc_get_chip(desc); + if (!chip) + continue; + + if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data)) + chip->irq_eoi(&desc->irq_data); + + if (chip->irq_mask) + chip->irq_mask(&desc->irq_data); + + if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data)) + chip->irq_disable(&desc->irq_data); + } +} + +void machine_crash_shutdown(struct pt_regs *regs) +{ + int cpu; + unsigned long msecs; + + cpu = smp_processor_id(); + local_irq_disable(); + kernel_restart_prepare(NULL); + atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); + smp_call_function(machine_crash_nonpanic_core, NULL, false); + msecs = 1000; /* Wait at most a second for the other cpus to stop */ + while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { + mdelay(1); + msecs--; + } + if (atomic_read(&waiting_for_crash_ipi) > 0) + pr_warn("Non-crashing CPUs did not react to IPI\n"); + + crash_save_cpu(regs, cpu); + machine_kexec_mask_interrupts(); + pr_info("Loading crashdump kernel...\n"); +#ifdef CONFIG_SMP + WRITE_ONCE(smp_rcb->ready, 0); + if (cpu != 0) + reset_cpu(cpu); +#endif +} + +#define phys_to_ktext(pa) (__START_KERNEL_map + (pa)) + +typedef void (*noretfun_t)(void) __noreturn; + +void machine_kexec(struct kimage *image) +{ + void *reboot_code_buffer; + unsigned long entry; + unsigned long *ptr; + struct boot_params *params = sunway_boot_params; + + + reboot_code_buffer = kexec_control_page; + pr_info("reboot_code_buffer = %px\n", reboot_code_buffer); + kexec_start_address = phys_to_ktext(image->start); + pr_info("kexec_start_address = %#lx\n", kexec_start_address); + if (image->type == KEXEC_TYPE_DEFAULT) + kexec_indirection_page = + (unsigned long) phys_to_virt(image->head & PAGE_MASK); + else + kexec_indirection_page = (unsigned long)&image->head; + + pr_info("kexec_indirection_page = %#lx, image->head=%#lx\n", + kexec_indirection_page, image->head); + + params->cmdline = kexec_start_address - COMMAND_LINE_OFF; + params->initrd_start = *(__u64 *)(kexec_start_address - INITRD_START_OFF); + params->initrd_size = *(__u64 *)(kexec_start_address - INITRD_SIZE_OFF); + + pr_info("initrd_start = %#llx, initrd_size = %#llx\n" + "dtb_start = %#llx, efi_systab = %#llx\n" + "efi_memmap = %#llx, efi_memmap_size = %#llx\n" + "efi_memdesc_size = %#llx, efi_memdesc_version = %#llx\n" + "cmdline = %#llx\n", + params->initrd_start, params->initrd_size, + params->dtb_start, params->efi_systab, + params->efi_memmap, params->efi_memmap_size, + params->efi_memdesc_size, params->efi_memdesc_version, + params->cmdline); + + memcpy(reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size); + + /* + * The generic kexec code builds a page list with physical + * addresses. they are directly accessible through KSEG0 (or + * CKSEG0 or XPHYS if on 64bit system), hence the + * phys_to_virt() call. + */ + for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); + ptr = (entry & IND_INDIRECTION) ? + phys_to_virt(entry & PAGE_MASK) : ptr + 1) { + if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION || + *ptr & IND_DESTINATION) + *ptr = (unsigned long) phys_to_virt(*ptr); + } + + /* + * we do not want to be bothered. + */ + local_irq_disable(); + + pr_info("Will call new kernel at %08lx\n", image->start); + pr_info("Bye ...\n"); + smp_wmb(); + ((noretfun_t) reboot_code_buffer)(); +} -- Gitee From a5688ab00b09283ea00be2431f2ea779a41ea21c Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:16 +0800 Subject: [PATCH 0310/2138] anolis: sw64: add kdump support ANBZ: #4688 Add kdump support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/kernel/crash_dump.c | 56 ++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) create mode 100644 arch/sw_64/kernel/crash_dump.c diff --git a/arch/sw_64/kernel/crash_dump.c b/arch/sw_64/kernel/crash_dump.c new file mode 100644 index 000000000000..4484673823b8 --- /dev/null +++ b/arch/sw_64/kernel/crash_dump.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sw_64/kernel/crash_dump.c + * + * Copyright (C) 2019 JN + * Author: He Sheng + * + * This code is taken from arch/x86/kernel/crash_dump_64.c + * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) + * Copyright (C) IBM Corporation, 2004. All rights reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include + +/** + * copy_oldmem_page() - copy one page from old kernel memory + * @pfn: page frame number to be copied + * @buf: buffer where the copied page is placed + * @csize: number of bytes to copy + * @offset: offset in bytes into the page + * @userbuf: if set, @buf is int he user address space + * + * This function copies one page from old kernel memory into buffer pointed by + * @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes + * copied or negative error in case of failure. + */ +ssize_t copy_oldmem_page(unsigned long pfn, char *buf, + size_t csize, unsigned long offset, + int userbuf) +{ + void *vaddr; + + if (!csize) + return 0; + + vaddr = ioremap(__pfn_to_phys(pfn), PAGE_SIZE); + if (!vaddr) + return -ENOMEM; + + if (userbuf) { + if (copy_to_user(buf, vaddr + offset, csize)) { + iounmap(vaddr); + return -EFAULT; + } + } else { + memcpy(buf, vaddr + offset, csize); + } + + iounmap(vaddr); + return csize; +} -- Gitee From ce105521af11a400a4fe03508206918728f345e1 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:03 +0800 Subject: [PATCH 0311/2138] anolis: sw64: add eBPF JIT support ANBZ: #4688 Add eBPF JIT support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/uapi/asm/bpf_perf_event.h | 9 + arch/sw_64/net/Makefile | 5 + arch/sw_64/net/bpf_jit.h | 368 +++++ arch/sw_64/net/bpf_jit_comp.c | 1455 ++++++++++++++++++ 4 files changed, 1837 insertions(+) create mode 100644 arch/sw_64/include/uapi/asm/bpf_perf_event.h create mode 100644 arch/sw_64/net/Makefile create mode 100644 arch/sw_64/net/bpf_jit.h create mode 100644 arch/sw_64/net/bpf_jit_comp.c diff --git a/arch/sw_64/include/uapi/asm/bpf_perf_event.h b/arch/sw_64/include/uapi/asm/bpf_perf_event.h new file mode 100644 index 000000000000..52f6f1e555f1 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/bpf_perf_event.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_BPF_PERF_EVENT_H +#define _UAPI_ASM_SW64_BPF_PERF_EVENT_H + +#include + +typedef struct user_pt_regs bpf_user_pt_regs_t; + +#endif /* _UAPI_ASM_SW64_BPF_PERF_EVENT_H */ diff --git a/arch/sw_64/net/Makefile b/arch/sw_64/net/Makefile new file mode 100644 index 000000000000..d4663b4bf509 --- /dev/null +++ b/arch/sw_64/net/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Arch-specific network modules +# +obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o diff --git a/arch/sw_64/net/bpf_jit.h b/arch/sw_64/net/bpf_jit.h new file mode 100644 index 000000000000..929036d8ea6b --- /dev/null +++ b/arch/sw_64/net/bpf_jit.h @@ -0,0 +1,368 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * BPF JIT compiler for SW64 + * + * Copyright (C) Mao Minkai + * Author: Mao Minkai + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef _SW64_NET_BPF_JIT_H +#define _SW64_NET_BPF_JIT_H + +/* SW64 instruction field shift */ +#define SW64_BPF_OPCODE_OFFSET 26 +#define SW64_BPF_RA_OFFSET 21 +#define SW64_BPF_RB_OFFSET 16 +#define SW64_BPF_SIMPLE_ALU_IMM_OFFSET 13 +#define SW64_BPF_SIMPLE_ALU_FUNC_OFFSET 5 +#define SW64_BPF_SIMPLE_ALU_RC_OFFSET 0 +#define SW64_BPF_LS_FUNC_OFFSET 12 + +/* SW64 instruction opcodes */ +#define SW64_BPF_OPCODE_CALL 0x01 +#define SW64_BPF_OPCODE_RET 0x02 +#define SW64_BPF_OPCODE_JMP 0x03 +#define SW64_BPF_OPCODE_BR 0x04 +#define SW64_BPF_OPCODE_BSR 0x05 +#define SW64_BPF_OPCODE_MISC 0x06 +#define SW64_BPF_OPCODE_LOCK 0x08 +#define SW64_BPF_OPCODE_ALU_REG 0x10 +#define SW64_BPF_OPCODE_ALU_IMM 0x12 +#define SW64_BPF_OPCODE_LDBU 0x20 +#define SW64_BPF_OPCODE_LDHU 0x21 +#define SW64_BPF_OPCODE_LDW 0x22 +#define SW64_BPF_OPCODE_LDL 0x23 +#define SW64_BPF_OPCODE_STB 0x28 +#define SW64_BPF_OPCODE_STH 0x29 +#define SW64_BPF_OPCODE_STW 0x2A +#define SW64_BPF_OPCODE_STL 0x2B +#define SW64_BPF_OPCODE_BEQ 0x30 +#define SW64_BPF_OPCODE_BNE 0x31 +#define SW64_BPF_OPCODE_BLT 0x32 +#define SW64_BPF_OPCODE_BLE 0x33 +#define SW64_BPF_OPCODE_BGT 0x34 +#define SW64_BPF_OPCODE_BGE 0x35 +#define SW64_BPF_OPCODE_BLBC 0x36 +#define SW64_BPF_OPCODE_BLBS 0x37 +#define SW64_BPF_OPCODE_LDI 0x3E +#define SW64_BPF_OPCODE_LDIH 0x3F + +/* SW64 MISC instructions function codes */ +#define SW64_BPF_FUNC_MISC_RD_F 0x1000 +#define SW64_BPF_FUNC_MISC_WR_F 0x1020 + +/* SW64 LOCK instructions function codes */ +#define SW64_BPF_FUNC_LOCK_LLDW 0x0 +#define SW64_BPF_FUNC_LOCK_LLDL 0x1 +#define SW64_BPF_FUNC_LOCK_LSTW 0x8 +#define SW64_BPF_FUNC_LOCK_LSTL 0x9 + +/* SW64 ALU instructions function codes */ +#define SW64_BPF_FUNC_ALU_ADDW 0x00 +#define SW64_BPF_FUNC_ALU_SUBW 0x01 +#define SW64_BPF_FUNC_ALU_ADDL 0x08 +#define SW64_BPF_FUNC_ALU_SUBL 0x09 +#define SW64_BPF_FUNC_ALU_MULW 0x10 +#define SW64_BPF_FUNC_ALU_MULL 0x18 +#define SW64_BPF_FUNC_ALU_CMPEQ 0x28 +#define SW64_BPF_FUNC_ALU_CMPLT 0x29 +#define SW64_BPF_FUNC_ALU_CMPLE 0x2A +#define SW64_BPF_FUNC_ALU_CMPULT 0x2B +#define SW64_BPF_FUNC_ALU_CMPULE 0x2C +#define SW64_BPF_FUNC_ALU_AND 0x38 +#define SW64_BPF_FUNC_ALU_BIC 0x39 +#define SW64_BPF_FUNC_ALU_BIS 0x3A +#define SW64_BPF_FUNC_ALU_ORNOT 0x3B +#define SW64_BPF_FUNC_ALU_XOR 0x3C +#define SW64_BPF_FUNC_ALU_EQV 0x3D +#define SW64_BPF_FUNC_ALU_SLL 0x48 +#define SW64_BPF_FUNC_ALU_SRL 0x49 +#define SW64_BPF_FUNC_ALU_SRA 0x4A +#define SW64_BPF_FUNC_ALU_ZAP 0x68 +#define SW64_BPF_FUNC_ALU_ZAPNOT 0x69 +#define SW64_BPF_FUNC_ALU_SEXTB 0x6A +#define SW64_BPF_FUNC_ALU_SEXTH 0x6B + +/* special instuction used in jit_fill_hole() */ +#define SW64_BPF_ILLEGAL_INSN (0x1ff00000) /* pri_ret/b $31 */ + +enum sw64_bpf_registers { + SW64_BPF_REG_V0 = 0, /* keep return value */ + SW64_BPF_REG_T0 = 1, + SW64_BPF_REG_T1 = 2, + SW64_BPF_REG_T2 = 3, + SW64_BPF_REG_T3 = 4, + SW64_BPF_REG_T4 = 5, + SW64_BPF_REG_T5 = 6, + SW64_BPF_REG_T6 = 7, + SW64_BPF_REG_T7 = 8, + SW64_BPF_REG_S0 = 9, /* callee saved */ + SW64_BPF_REG_S1 = 10, /* callee saved */ + SW64_BPF_REG_S2 = 11, /* callee saved */ + SW64_BPF_REG_S3 = 12, /* callee saved */ + SW64_BPF_REG_S4 = 13, /* callee saved */ + SW64_BPF_REG_S5 = 14, /* callee saved */ + SW64_BPF_REG_S6 = 15, /* callee saved */ + SW64_BPF_REG_FP = 15, /* frame pointer if necessary */ + SW64_BPF_REG_A0 = 16, /* argument 0 */ + SW64_BPF_REG_A1 = 17, /* argument 1 */ + SW64_BPF_REG_A2 = 18, /* argument 2 */ + SW64_BPF_REG_A3 = 19, /* argument 3 */ + SW64_BPF_REG_A4 = 20, /* argument 4 */ + SW64_BPF_REG_A5 = 21, /* argument 5 */ + SW64_BPF_REG_T8 = 22, + SW64_BPF_REG_T9 = 23, + SW64_BPF_REG_T10 = 24, + SW64_BPF_REG_T11 = 25, + SW64_BPF_REG_RA = 26, /* callee saved, keep retuen address */ + SW64_BPF_REG_T12 = 27, + SW64_BPF_REG_PV = 27, + SW64_BPF_REG_AT = 28, /* reserved by assembler */ + SW64_BPF_REG_GP = 29, /* global pointer */ + SW64_BPF_REG_SP = 30, /* callee saved, stack pointer */ + SW64_BPF_REG_ZR = 31 /* read 0 */ +}; + +/* SW64 load and store instructions */ +#define SW64_BPF_LDBU(dst, rb, offset16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_LDBU, dst, rb, offset16) +#define SW64_BPF_LDHU(dst, rb, offset16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_LDHU, dst, rb, offset16) +#define SW64_BPF_LDW(dst, rb, offset16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_LDW, dst, rb, offset16) +#define SW64_BPF_LDL(dst, rb, offset16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_LDL, dst, rb, offset16) +#define SW64_BPF_STB(src, rb, offset16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_STB, src, rb, offset16) +#define SW64_BPF_STH(src, rb, offset16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_STH, src, rb, offset16) +#define SW64_BPF_STW(src, rb, offset16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_STW, src, rb, offset16) +#define SW64_BPF_STL(src, rb, offset16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_STL, src, rb, offset16) +#define SW64_BPF_LDI(dst, rb, imm16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_LDI, dst, rb, imm16) +#define SW64_BPF_LDIH(dst, rb, imm16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_LDIH, dst, rb, imm16) + +/* SW64 lock instructions */ +#define SW64_BPF_LLDW(ra, rb, offset16) \ + sw64_bpf_gen_format_ls_func(SW64_BPF_OPCODE_LOCK, \ + ra, rb, offset16, SW64_BPF_FUNC_LOCK_LLDW) +#define SW64_BPF_LLDL(ra, rb, offset16) \ + sw64_bpf_gen_format_ls_func(SW64_BPF_OPCODE_LOCK, \ + ra, rb, offset16, SW64_BPF_FUNC_LOCK_LLDL) +#define SW64_BPF_LSTW(ra, rb, offset16) \ + sw64_bpf_gen_format_ls_func(SW64_BPF_OPCODE_LOCK, \ + ra, rb, offset16, SW64_BPF_FUNC_LOCK_LSTW) +#define SW64_BPF_LSTL(ra, rb, offset16) \ + sw64_bpf_gen_format_ls_func(SW64_BPF_OPCODE_LOCK, \ + ra, rb, offset16, SW64_BPF_FUNC_LOCK_LSTL) +#define SW64_BPF_RD_F(ra) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_MISC, \ + ra, SW64_BPF_REG_ZR, SW64_BPF_FUNC_MISC_RD_F) +#define SW64_BPF_WR_F(ra) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_MISC, \ + ra, SW64_BPF_REG_ZR, SW64_BPF_FUNC_MISC_WR_F) + +/* SW64 ALU instructions REG format */ +#define SW64_BPF_ADDW_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_ADDW) +#define SW64_BPF_ADDL_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_ADDL) +#define SW64_BPF_SUBW_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_SUBW) +#define SW64_BPF_SUBL_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_SUBL) +#define SW64_BPF_MULW_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_MULW) +#define SW64_BPF_MULL_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_MULL) +#define SW64_BPF_ZAP_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_ZAP) +#define SW64_BPF_ZAPNOT_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_ZAPNOT) +#define SW64_BPF_SEXTB_REG(rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + SW64_BPF_REG_ZR, rb, dst, SW64_BPF_FUNC_ALU_SEXTB) +#define SW64_BPF_SEXTH_REG(rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + SW64_BPF_REG_ZR, rb, dst, SW64_BPF_FUNC_ALU_SEXTH) + +/* SW64 ALU instructions IMM format */ +#define SW64_BPF_ADDW_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_ADDW) +#define SW64_BPF_ADDL_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_ADDL) +#define SW64_BPF_SUBW_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_SUBW) +#define SW64_BPF_SUBL_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_SUBL) +#define SW64_BPF_MULW_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_MULW) +#define SW64_BPF_MULL_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_MULL) +#define SW64_BPF_ZAP_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_ZAP) +#define SW64_BPF_ZAPNOT_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_ZAPNOT) +#define SW64_BPF_SEXTB_IMM(imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + SW64_BPF_REG_ZR, imm8, dst, SW64_BPF_FUNC_ALU_SEXTB) +#define SW64_BPF_SEXTH_IMM(imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + SW64_BPF_REG_ZR, imm8, dst, SW64_BPF_FUNC_ALU_SEXTH) + +/* SW64 bit shift instructions REG format */ +#define SW64_BPF_SLL_REG(src, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + src, rb, dst, SW64_BPF_FUNC_ALU_SLL) +#define SW64_BPF_SRL_REG(src, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + src, rb, dst, SW64_BPF_FUNC_ALU_SRL) +#define SW64_BPF_SRA_REG(src, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + src, rb, dst, SW64_BPF_FUNC_ALU_SRA) + +/* SW64 bit shift instructions IMM format */ +#define SW64_BPF_SLL_IMM(src, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + src, imm8, dst, SW64_BPF_FUNC_ALU_SLL) +#define SW64_BPF_SRL_IMM(src, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + src, imm8, dst, SW64_BPF_FUNC_ALU_SRL) +#define SW64_BPF_SRA_IMM(src, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + src, imm8, dst, SW64_BPF_FUNC_ALU_SRA) + +/* SW64 control instructions */ +#define SW64_BPF_CALL(ra, rb) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_CALL, ra, rb, 0) +#define SW64_BPF_RET(rb) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_RET, SW64_BPF_REG_ZR, rb, 0) +#define SW64_BPF_JMP(ra, rb) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_JMP, ra, rb, 0) +#define SW64_BPF_BR(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BR, ra, offset) +#define SW64_BPF_BSR(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BSR, ra, offset) +#define SW64_BPF_BEQ(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BEQ, ra, offset) +#define SW64_BPF_BNE(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BNE, ra, offset) +#define SW64_BPF_BLT(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BLT, ra, offset) +#define SW64_BPF_BLE(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BLE, ra, offset) +#define SW64_BPF_BGT(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BGT, ra, offset) +#define SW64_BPF_BGE(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BGE, ra, offset) +#define SW64_BPF_BLBC(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BLBC, ra, offset) +#define SW64_BPF_BLBS(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BLBS, ra, offset) + +/* SW64 bit logic instructions REG format */ +#define SW64_BPF_AND_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_AND) +#define SW64_BPF_ANDNOT_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_BIC) +#define SW64_BPF_BIS_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_BIS) +#define SW64_BPF_ORNOT_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_ORNOT) +#define SW64_BPF_XOR_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_XOR) +#define SW64_BPF_EQV_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_EQV) + +/* SW64 bit logic instructions IMM format */ +#define SW64_BPF_AND_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_AND) +#define SW64_BPF_ANDNOT_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_BIC) +#define SW64_BPF_BIS_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_BIS) +#define SW64_BPF_ORNOT_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_ORNOT) +#define SW64_BPF_XOR_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_XOR) +#define SW64_BPF_EQV_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_EQV) + +/* SW64 compare instructions REG format */ +#define SW64_BPF_CMPEQ_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_CMPEQ) +#define SW64_BPF_CMPLT_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_CMPLT) +#define SW64_BPF_CMPLE_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_CMPLE) +#define SW64_BPF_CMPULT_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_CMPULT) +#define SW64_BPF_CMPULE_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_CMPULE) + +/* SW64 compare instructions imm format */ +#define SW64_BPF_CMPEQ_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_CMPEQ) +#define SW64_BPF_CMPLT_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_CMPLT) +#define SW64_BPF_CMPLE_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_CMPLE) +#define SW64_BPF_CMPULT_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_CMPULT) +#define SW64_BPF_CMPULE_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_CMPULE) + +#endif /* _SW64_NET_BPF_JIT_H */ diff --git a/arch/sw_64/net/bpf_jit_comp.c b/arch/sw_64/net/bpf_jit_comp.c new file mode 100644 index 000000000000..31202dd0f9cf --- /dev/null +++ b/arch/sw_64/net/bpf_jit_comp.c @@ -0,0 +1,1455 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * BPF JIT compiler for SW64 + * + * Copyright (C) Mao Minkai + * Author: Mao Minkai + * + * This file is taken from arch/arm64/net/bpf_jit_comp.c + * Copyright (C) 2014-2016 Zi Shen Lim + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include + +#include "bpf_jit.h" + +#define TCALL_CNT (MAX_BPF_JIT_REG + 0) + +static const int bpf2sw64[] = { + /* return value from in-kernel function, and exit value from eBPF */ + [BPF_REG_0] = SW64_BPF_REG_V0, + /* arguments from eBPF program to in-kernel function */ + [BPF_REG_1] = SW64_BPF_REG_A0, + [BPF_REG_2] = SW64_BPF_REG_A1, + [BPF_REG_3] = SW64_BPF_REG_A2, + [BPF_REG_4] = SW64_BPF_REG_A3, + [BPF_REG_5] = SW64_BPF_REG_A4, + /* callee saved registers that in-kernel function will preserve */ + [BPF_REG_6] = SW64_BPF_REG_S0, + [BPF_REG_7] = SW64_BPF_REG_S1, + [BPF_REG_8] = SW64_BPF_REG_S2, + [BPF_REG_9] = SW64_BPF_REG_S3, + /* read-only frame pointer to access stack */ + [BPF_REG_FP] = SW64_BPF_REG_FP, + /* tail_call_cnt */ + [TCALL_CNT] = SW64_BPF_REG_S4, + /* temporary register for blinding constants */ + [BPF_REG_AX] = SW64_BPF_REG_T11, +}; + +struct jit_ctx { + const struct bpf_prog *prog; + int idx; // JITed instruction index + int current_tmp_reg; + int epilogue_offset; + int *insn_offset; // [bpf_insn_idx] = jited_insn_idx + int exentry_idx; + u32 *image; // JITed instruction + u32 stack_size; +}; + +struct sw64_jit_data { + struct bpf_binary_header *header; + u8 *image; // bpf instruction + struct jit_ctx ctx; +}; + +static inline u32 sw64_bpf_gen_format_br(int opcode, enum sw64_bpf_registers ra, u32 disp) +{ + opcode = opcode << SW64_BPF_OPCODE_OFFSET; + ra = ra << SW64_BPF_RA_OFFSET; + return opcode | ra | (disp & 0x1fffff); +} + +static inline u32 sw64_bpf_gen_format_ls(int opcode, enum sw64_bpf_registers ra, + enum sw64_bpf_registers rb, u16 disp) +{ + opcode = opcode << SW64_BPF_OPCODE_OFFSET; + ra = ra << SW64_BPF_RA_OFFSET; + rb = rb << SW64_BPF_RB_OFFSET; + return opcode | ra | rb | (disp & 0xffff); +} + +static inline u32 sw64_bpf_gen_format_ls_func(int opcode, enum sw64_bpf_registers ra, + enum sw64_bpf_registers rb, u16 disp, int function) +{ + opcode = opcode << SW64_BPF_OPCODE_OFFSET; + ra = ra << SW64_BPF_RA_OFFSET; + rb = rb << SW64_BPF_RB_OFFSET; + function = function << SW64_BPF_LS_FUNC_OFFSET; + return opcode | ra | rb | function | (disp & 0xfff); +} + +static inline u32 sw64_bpf_gen_format_simple_alu_reg(int opcode, enum sw64_bpf_registers ra, + enum sw64_bpf_registers rb, enum sw64_bpf_registers rc, int function) +{ + opcode = opcode << SW64_BPF_OPCODE_OFFSET; + ra = ra << SW64_BPF_RA_OFFSET; + rb = rb << SW64_BPF_RB_OFFSET; + rc = rc << SW64_BPF_SIMPLE_ALU_RC_OFFSET; + function = function << SW64_BPF_SIMPLE_ALU_FUNC_OFFSET; + return opcode | ra | rb | function | rc; +} + +static inline u32 sw64_bpf_gen_format_simple_alu_imm(int opcode, enum sw64_bpf_registers ra, + u32 imm, enum sw64_bpf_registers rc, int function) +{ + opcode = opcode << SW64_BPF_OPCODE_OFFSET; + ra = ra << SW64_BPF_RA_OFFSET; + imm = (imm & 0xff) << SW64_BPF_SIMPLE_ALU_IMM_OFFSET; + rc = rc << SW64_BPF_SIMPLE_ALU_RC_OFFSET; + function = function << SW64_BPF_SIMPLE_ALU_FUNC_OFFSET; + return opcode | ra | imm | function | rc; +} + +static inline void emit(const u32 insn, struct jit_ctx *ctx) +{ + if (ctx->image != NULL) + ctx->image[ctx->idx] = insn; + + ctx->idx++; +} + +static inline int get_tmp_reg(struct jit_ctx *ctx) +{ + ctx->current_tmp_reg++; + /* Do not use 22-25. Should be more than enough. */ + if (unlikely(ctx->current_tmp_reg == 8)) { + pr_err("eBPF JIT %s[%d]: not enough temporary registers!\n", + current->comm, current->pid); + return -1; + } + return ctx->current_tmp_reg; +} + +static inline void put_tmp_reg(struct jit_ctx *ctx) +{ + ctx->current_tmp_reg--; + if (ctx->current_tmp_reg == 21) + ctx->current_tmp_reg = 7; +} + +static void emit_sw64_ldu32(const int dst, const u32 imm, struct jit_ctx *ctx) +{ + u16 imm_tmp; + u8 reg_tmp = get_tmp_reg(ctx); + + if (!imm) { + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, SW64_BPF_REG_ZR, dst), ctx); + put_tmp_reg(ctx); + return; + } + + if (imm <= S16_MAX) { + emit(SW64_BPF_LDI(dst, SW64_BPF_REG_ZR, imm), ctx); + put_tmp_reg(ctx); + return; + } + + if (imm >= U32_MAX - S16_MAX) { + emit(SW64_BPF_LDI(dst, SW64_BPF_REG_ZR, imm), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + put_tmp_reg(ctx); + return; + } + + imm_tmp = (imm >> 30) & 3; + emit(SW64_BPF_LDI(dst, SW64_BPF_REG_ZR, imm_tmp), ctx); + if (imm_tmp) + emit(SW64_BPF_SLL_IMM(dst, 30, dst), ctx); + + imm_tmp = (imm >> 15) & 0x7fff; + if (imm_tmp) { + emit(SW64_BPF_LDI(reg_tmp, SW64_BPF_REG_ZR, imm_tmp), ctx); + emit(SW64_BPF_SLL_IMM(reg_tmp, 15, reg_tmp), ctx); + emit(SW64_BPF_ADDL_REG(dst, reg_tmp, dst), ctx); + } + + imm_tmp = imm & 0x7fff; + if (imm_tmp) + emit(SW64_BPF_LDI(dst, dst, imm_tmp), ctx); + + put_tmp_reg(ctx); +} + +static void emit_sw64_lds32(const int dst, const s32 imm, struct jit_ctx *ctx) +{ + s16 hi = imm >> 16; + s16 lo = imm & 0xffff; + u8 reg_tmp = get_tmp_reg(ctx); + + if (!imm) { + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, SW64_BPF_REG_ZR, dst), ctx); + put_tmp_reg(ctx); + return; + } + + if (imm >= S16_MIN && imm <= S16_MAX) { + emit(SW64_BPF_LDI(dst, SW64_BPF_REG_ZR, imm), ctx); + put_tmp_reg(ctx); + return; + } + + emit(SW64_BPF_LDIH(dst, SW64_BPF_REG_ZR, hi), ctx); + if (lo & 0x8000) { // sign bit is 1 + lo = lo & 0x7fff; + emit(SW64_BPF_LDI(reg_tmp, SW64_BPF_REG_ZR, 1), ctx); + emit(SW64_BPF_SLL_IMM(reg_tmp, 15, reg_tmp), ctx); + emit(SW64_BPF_ADDL_REG(dst, reg_tmp, dst), ctx); + if (lo) + emit(SW64_BPF_LDI(dst, dst, lo), ctx); + } else { // sign bit is 0 + if (lo) + emit(SW64_BPF_LDI(dst, dst, lo), ctx); + } + + put_tmp_reg(ctx); +} + +static void emit_sw64_ldu64(const int dst, const u64 imm, struct jit_ctx *ctx) +{ + u16 imm_tmp; + u8 reg_tmp = get_tmp_reg(ctx); + + if (!imm) { + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, SW64_BPF_REG_ZR, dst), ctx); + put_tmp_reg(ctx); + return; + } + + if (imm <= U32_MAX) { + put_tmp_reg(ctx); + return emit_sw64_ldu32(dst, (u32)imm, ctx); + } + + if (imm >= (U64_MAX - S16_MAX) || imm <= S16_MAX) { + emit(SW64_BPF_LDI(dst, SW64_BPF_REG_ZR, imm), ctx); + put_tmp_reg(ctx); + return; + } + + imm_tmp = (imm >> 60) & 0xf; + emit(SW64_BPF_LDI(dst, SW64_BPF_REG_ZR, imm_tmp), ctx); + if (imm_tmp) + emit(SW64_BPF_SLL_IMM(dst, 60, dst), ctx); + + imm_tmp = (imm >> 45) & 0x7fff; + if (imm_tmp) { + emit(SW64_BPF_LDI(reg_tmp, SW64_BPF_REG_ZR, imm_tmp), ctx); + emit(SW64_BPF_SLL_IMM(reg_tmp, 45, reg_tmp), ctx); + emit(SW64_BPF_ADDL_REG(dst, reg_tmp, dst), ctx); + } + + imm_tmp = (imm >> 30) & 0x7fff; + if (imm_tmp) { + emit(SW64_BPF_LDI(reg_tmp, SW64_BPF_REG_ZR, imm_tmp), ctx); + emit(SW64_BPF_SLL_IMM(reg_tmp, 30, reg_tmp), ctx); + emit(SW64_BPF_ADDL_REG(dst, reg_tmp, dst), ctx); + } + + imm_tmp = (imm >> 15) & 0x7fff; + if (imm_tmp) { + emit(SW64_BPF_LDI(reg_tmp, SW64_BPF_REG_ZR, imm_tmp), ctx); + emit(SW64_BPF_SLL_IMM(reg_tmp, 15, reg_tmp), ctx); + emit(SW64_BPF_ADDL_REG(dst, reg_tmp, dst), ctx); + } + + imm_tmp = imm & 0x7fff; + if (imm_tmp) + emit(SW64_BPF_LDI(dst, dst, imm_tmp), ctx); + + put_tmp_reg(ctx); +} + +/* Do not change!!! See arch/sw_64/lib/divide.S for more detail */ +#define REG(x) "$"str(x) +#define str(x) #x +#define DIV_RET_ADDR 23 +#define DIVIDEND 24 +#define DIVISOR 25 +#define RESULT 27 + +#include +static void emit_sw64_divmod(const int dst, const int src, struct jit_ctx *ctx, u8 code) +{ + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, dst, DIVIDEND), ctx); + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, src, DIVISOR), ctx); + switch (BPF_CLASS(code)) { + case BPF_ALU: + switch (BPF_OP(code)) { + case BPF_DIV: + emit_sw64_ldu64(SW64_BPF_REG_PV, (u64)__divwu, ctx); + break; + case BPF_MOD: + emit_sw64_ldu64(SW64_BPF_REG_PV, (u64)__remwu, ctx); + break; + } + emit(SW64_BPF_CALL(DIV_RET_ADDR, SW64_BPF_REG_PV), ctx); + emit(SW64_BPF_ZAP_IMM(RESULT, 0xf0, dst), ctx); + break; + case BPF_ALU64: + switch (BPF_OP(code)) { + case BPF_DIV: + emit_sw64_ldu64(SW64_BPF_REG_PV, (u64)__divlu, ctx); + break; + case BPF_MOD: + emit_sw64_ldu64(SW64_BPF_REG_PV, (u64)__remlu, ctx); + break; + } + emit(SW64_BPF_CALL(DIV_RET_ADDR, SW64_BPF_REG_PV), ctx); + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, RESULT, dst), ctx); + break; + } +} + +#undef REG +#undef str +#undef DIVIDEND +#undef DIVISOR +#undef RESULT + +/* STX XADD: lock *(u32 *)(dst + off) += src */ +static void emit_sw64_xadd32(const int src, int dst, s16 off, struct jit_ctx *ctx) +{ + int atomic_start; + int atomic_end; + u8 tmp1 = get_tmp_reg(ctx); + u8 tmp2 = get_tmp_reg(ctx); + u8 tmp3 = get_tmp_reg(ctx); + + if (off < -0x800 || off > 0x7ff) { + emit(SW64_BPF_LDI(tmp1, dst, off), ctx); + dst = tmp1; + off = 0; + } + + atomic_start = ctx->idx; + emit(SW64_BPF_LLDW(tmp2, dst, off), ctx); + emit(SW64_BPF_LDI(tmp3, SW64_BPF_REG_ZR, 1), ctx); + emit(SW64_BPF_WR_F(tmp3), ctx); + emit(SW64_BPF_ADDW_REG(tmp2, src, tmp2), ctx); + if (ctx->idx & 1) + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, SW64_BPF_REG_ZR, SW64_BPF_REG_ZR), ctx); + emit(SW64_BPF_LSTW(tmp2, dst, off), ctx); + emit(SW64_BPF_RD_F(tmp3), ctx); + atomic_end = ctx->idx; + emit(SW64_BPF_BEQ(tmp3, atomic_start - atomic_end - 1), ctx); + + put_tmp_reg(ctx); + put_tmp_reg(ctx); + put_tmp_reg(ctx); +} + +/* STX XADD: lock *(u64 *)(dst + off) += src */ +static void emit_sw64_xadd64(const int src, int dst, s16 off, struct jit_ctx *ctx) +{ + int atomic_start; + int atomic_end; + u8 tmp1 = get_tmp_reg(ctx); + u8 tmp2 = get_tmp_reg(ctx); + u8 tmp3 = get_tmp_reg(ctx); + + if (off < -0x800 || off > 0x7ff) { + emit(SW64_BPF_LDI(tmp1, dst, off), ctx); + dst = tmp1; + off = 0; + } + + atomic_start = ctx->idx; + emit(SW64_BPF_LLDL(tmp2, dst, off), ctx); + emit(SW64_BPF_LDI(tmp3, SW64_BPF_REG_ZR, 1), ctx); + emit(SW64_BPF_WR_F(tmp3), ctx); + emit(SW64_BPF_ADDL_REG(tmp2, src, tmp2), ctx); + if (ctx->idx & 1) + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, SW64_BPF_REG_ZR, SW64_BPF_REG_ZR), ctx); + emit(SW64_BPF_LSTL(tmp2, dst, off), ctx); + emit(SW64_BPF_RD_F(tmp3), ctx); + atomic_end = ctx->idx; + emit(SW64_BPF_BEQ(tmp3, atomic_start - atomic_end - 1), ctx); + + put_tmp_reg(ctx); + put_tmp_reg(ctx); + put_tmp_reg(ctx); +} + +static void emit_sw64_htobe16(const int dst, struct jit_ctx *ctx) +{ + u8 tmp = get_tmp_reg(ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x2, tmp), ctx); + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x1, dst), ctx); + emit(SW64_BPF_SRL_IMM(tmp, 8, tmp), ctx); + emit(SW64_BPF_SLL_IMM(dst, 8, dst), ctx); + emit(SW64_BPF_BIS_REG(dst, tmp, dst), ctx); + + put_tmp_reg(ctx); +} + +static void emit_sw64_htobe32(const int dst, struct jit_ctx *ctx) +{ + u8 tmp1 = get_tmp_reg(ctx); + u8 tmp2 = get_tmp_reg(ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x8, tmp1), ctx); + emit(SW64_BPF_SRL_IMM(tmp1, 24, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x4, tmp1), ctx); + emit(SW64_BPF_SRL_IMM(tmp1, 8, tmp1), ctx); + emit(SW64_BPF_BIS_REG(tmp2, tmp1, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x2, tmp1), ctx); + emit(SW64_BPF_SLL_IMM(tmp1, 8, tmp1), ctx); + emit(SW64_BPF_BIS_REG(tmp2, tmp1, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x1, dst), ctx); + emit(SW64_BPF_SLL_IMM(dst, 24, dst), ctx); + emit(SW64_BPF_BIS_REG(dst, tmp2, dst), ctx); + + put_tmp_reg(ctx); + put_tmp_reg(ctx); +} + +static void emit_sw64_htobe64(const int dst, struct jit_ctx *ctx) +{ + u8 tmp1 = get_tmp_reg(ctx); + u8 tmp2 = get_tmp_reg(ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x80, tmp1), ctx); + emit(SW64_BPF_SRL_IMM(tmp1, 56, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x40, tmp1), ctx); + emit(SW64_BPF_SRL_IMM(tmp1, 40, tmp1), ctx); + emit(SW64_BPF_BIS_REG(tmp2, tmp1, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x20, tmp1), ctx); + emit(SW64_BPF_SRL_IMM(tmp1, 24, tmp1), ctx); + emit(SW64_BPF_BIS_REG(tmp2, tmp1, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x10, tmp1), ctx); + emit(SW64_BPF_SRL_IMM(tmp1, 8, tmp1), ctx); + emit(SW64_BPF_BIS_REG(tmp2, tmp1, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x08, tmp1), ctx); + emit(SW64_BPF_SLL_IMM(tmp1, 8, tmp1), ctx); + emit(SW64_BPF_BIS_REG(tmp2, tmp1, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x04, tmp1), ctx); + emit(SW64_BPF_SLL_IMM(tmp1, 24, tmp1), ctx); + emit(SW64_BPF_BIS_REG(tmp2, tmp1, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x02, tmp1), ctx); + emit(SW64_BPF_SLL_IMM(tmp1, 40, tmp1), ctx); + emit(SW64_BPF_BIS_REG(tmp2, tmp1, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x01, dst), ctx); + emit(SW64_BPF_SLL_IMM(dst, 56, dst), ctx); + emit(SW64_BPF_BIS_REG(dst, tmp2, dst), ctx); + + put_tmp_reg(ctx); + put_tmp_reg(ctx); +} + +static void jit_fill_hole(void *area, unsigned int size) +{ + unsigned long c = SW64_BPF_ILLEGAL_INSN; + + c |= c << 32; + __constant_c_memset(area, c, size); +} + +static int offset_to_epilogue(const struct jit_ctx *ctx); +static int bpf2sw64_offset(int bpf_idx, s32 off, const struct jit_ctx *ctx) +{ + int from = ctx->insn_offset[bpf_idx + 1]; + int to = ctx->insn_offset[bpf_idx + 1 + off]; + + if (ctx->image == NULL) + return 0; + + return to - from; +} + +static int offset_to_epilogue(const struct jit_ctx *ctx) +{ + if (ctx->image == NULL) + return 0; + + return ctx->epilogue_offset - ctx->idx; +} + +/* For tail call, jump to set up function call stack */ +#define PROLOGUE_OFFSET 11 + +static void build_prologue(struct jit_ctx *ctx, bool was_classic) +{ + const u8 r6 = bpf2sw64[BPF_REG_6]; + const u8 r7 = bpf2sw64[BPF_REG_7]; + const u8 r8 = bpf2sw64[BPF_REG_8]; + const u8 r9 = bpf2sw64[BPF_REG_9]; + const u8 fp = bpf2sw64[BPF_REG_FP]; + const u8 tcc = bpf2sw64[TCALL_CNT]; + + /* Save callee-saved registers */ + emit(SW64_BPF_LDI(SW64_BPF_REG_SP, SW64_BPF_REG_SP, -64), ctx); + emit(SW64_BPF_STL(SW64_BPF_REG_RA, SW64_BPF_REG_SP, 0), ctx); + emit(SW64_BPF_STL(fp, SW64_BPF_REG_SP, 8), ctx); + emit(SW64_BPF_STL(r6, SW64_BPF_REG_SP, 16), ctx); + emit(SW64_BPF_STL(r7, SW64_BPF_REG_SP, 24), ctx); + emit(SW64_BPF_STL(r8, SW64_BPF_REG_SP, 32), ctx); + emit(SW64_BPF_STL(r9, SW64_BPF_REG_SP, 40), ctx); + emit(SW64_BPF_STL(tcc, SW64_BPF_REG_SP, 48), ctx); + emit(SW64_BPF_STL(SW64_BPF_REG_GP, SW64_BPF_REG_SP, 56), ctx); + + /* Set up BPF prog stack base register */ + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, SW64_BPF_REG_SP, fp), ctx); + if (!was_classic) + /* Initialize tail_call_cnt */ + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, SW64_BPF_REG_ZR, tcc), ctx); + + /* Set up function call stack */ + ctx->stack_size = (ctx->prog->aux->stack_depth + 15) & (~15); + emit(SW64_BPF_LDI(SW64_BPF_REG_SP, SW64_BPF_REG_SP, -ctx->stack_size), ctx); +} + +static void build_epilogue(struct jit_ctx *ctx) +{ + const u8 r6 = bpf2sw64[BPF_REG_6]; + const u8 r7 = bpf2sw64[BPF_REG_7]; + const u8 r8 = bpf2sw64[BPF_REG_8]; + const u8 r9 = bpf2sw64[BPF_REG_9]; + const u8 fp = bpf2sw64[BPF_REG_FP]; + const u8 tcc = bpf2sw64[TCALL_CNT]; + + /* Destroy function call stack */ + emit(SW64_BPF_LDI(SW64_BPF_REG_SP, SW64_BPF_REG_SP, ctx->stack_size), ctx); + + /* Restore callee-saved registers */ + emit(SW64_BPF_LDL(SW64_BPF_REG_RA, SW64_BPF_REG_SP, 0), ctx); + emit(SW64_BPF_LDL(fp, SW64_BPF_REG_SP, 8), ctx); + emit(SW64_BPF_LDL(r6, SW64_BPF_REG_SP, 16), ctx); + emit(SW64_BPF_LDL(r7, SW64_BPF_REG_SP, 24), ctx); + emit(SW64_BPF_LDL(r8, SW64_BPF_REG_SP, 32), ctx); + emit(SW64_BPF_LDL(r9, SW64_BPF_REG_SP, 40), ctx); + emit(SW64_BPF_LDL(tcc, SW64_BPF_REG_SP, 48), ctx); + emit(SW64_BPF_LDL(SW64_BPF_REG_GP, SW64_BPF_REG_SP, 56), ctx); + emit(SW64_BPF_LDI(SW64_BPF_REG_SP, SW64_BPF_REG_SP, 64), ctx); + + /* Return */ + emit(SW64_BPF_RET(SW64_BPF_REG_RA), ctx); +} + +static int emit_bpf_tail_call(struct jit_ctx *ctx) +{ + /* bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index) */ + const u8 r2 = bpf2sw64[BPF_REG_2]; /* struct bpf_array *array */ + const u8 r3 = bpf2sw64[BPF_REG_3]; /* u32 index */ + + const u8 tmp = get_tmp_reg(ctx); + const u8 prg = get_tmp_reg(ctx); + const u8 tcc = bpf2sw64[TCALL_CNT]; + u64 offset; + static int out_idx; +#define out_offset (ctx->image ? (out_idx - ctx->idx - 1) : 0) + + /* if (index >= array->map.max_entries) + * goto out; + */ + offset = offsetof(struct bpf_array, map.max_entries); + emit_sw64_ldu64(tmp, offset, ctx); + emit(SW64_BPF_ADDL_REG(r2, tmp, tmp), ctx); /* tmp = r2 + tmp = &map.max_entries */ + emit(SW64_BPF_LDW(tmp, tmp, 0), ctx); /* tmp = *tmp = map.max_entries */ + emit(SW64_BPF_ZAP_IMM(tmp, 0xf0, tmp), ctx); /* map.max_entries is u32 */ + emit(SW64_BPF_ZAP_IMM(r3, 0xf0, r3), ctx); /* index is u32 */ + emit(SW64_BPF_CMPULE_REG(tmp, r3, tmp), ctx); + emit(SW64_BPF_BNE(tmp, out_offset), ctx); + + /* if (tail_call_cnt > MAX_TAIL_CALL_CNT) + * goto out; + * tail_call_cnt++; + */ + emit_sw64_ldu64(tmp, MAX_TAIL_CALL_CNT, ctx); + emit(SW64_BPF_CMPULT_REG(tmp, tcc, tmp), ctx); + emit(SW64_BPF_BNE(tmp, out_offset), ctx); + emit(SW64_BPF_ADDL_IMM(tcc, 1, tcc), ctx); + + /* prog = array->ptrs[index]; + * if (prog == NULL) + * goto out; + */ + offset = offsetof(struct bpf_array, ptrs); + emit_sw64_ldu64(tmp, offset, ctx); + emit(SW64_BPF_ADDL_REG(r2, tmp, tmp), ctx); /* tmp = r2 + tmp = &ptrs[0] */ + emit(SW64_BPF_SLL_IMM(r3, 3, prg), ctx); /* prg = r3 * 8, each entry is a pointer */ + emit(SW64_BPF_ADDL_REG(tmp, prg, prg), ctx); /* prg = tmp + prg = &ptrs[index] */ + emit(SW64_BPF_LDL(prg, prg, 0), ctx); /* prg = *prg = ptrs[index] = prog */ + emit(SW64_BPF_BEQ(prg, out_offset), ctx); + + /* goto *(prog->bpf_func + prologue_offset); */ + offset = offsetof(struct bpf_prog, bpf_func); + emit_sw64_ldu64(tmp, offset, ctx); + emit(SW64_BPF_ADDL_REG(prg, tmp, tmp), ctx); /* tmp = prg + tmp = &bpf_func */ + emit(SW64_BPF_LDL(tmp, tmp, 0), ctx); /* tmp = *tmp = bpf_func */ + emit(SW64_BPF_BEQ(tmp, out_offset), ctx); + emit(SW64_BPF_LDI(tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx); + emit(SW64_BPF_LDI(SW64_BPF_REG_SP, SW64_BPF_REG_SP, ctx->stack_size), ctx); + emit(SW64_BPF_JMP(SW64_BPF_REG_ZR, tmp), ctx); + + put_tmp_reg(ctx); + put_tmp_reg(ctx); + + /* out */ + if (ctx->image == NULL) + out_idx = ctx->idx; + if (ctx->image != NULL && out_idx <= 0) + return -1; +#undef out_offset + return 0; +} + +/* For accesses to BTF pointers, add an entry to the exception table */ +static int add_exception_handler(const struct bpf_insn *insn, + struct jit_ctx *ctx, + int dst_reg) +{ + off_t offset; + unsigned long pc; + struct exception_table_entry *ex; + + if (!ctx->image) + /* First pass */ + return 0; + + if (!ctx->prog->aux->extable || BPF_MODE(insn->code) != BPF_PROBE_MEM) + return 0; + + if (WARN_ON_ONCE(ctx->exentry_idx >= ctx->prog->aux->num_exentries)) + return -EINVAL; + + ex = &ctx->prog->aux->extable[ctx->exentry_idx]; + pc = (unsigned long)&ctx->image[ctx->idx - 1]; + + offset = (long)&ex->insn - pc; + ex->insn = offset; + + ex->fixup.bits.nextinsn = sizeof(u32); + ex->fixup.bits.valreg = dst_reg; + ex->fixup.bits.errreg = SW64_BPF_REG_ZR; + + ctx->exentry_idx++; + return 0; +} + +/* JITs an eBPF instruction. + * Returns: + * 0 - successfully JITed an 8-byte eBPF instruction. + * >0 - successfully JITed a 16-byte eBPF instruction. + * <0 - failed to JIT. + */ +static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) +{ + const u8 code = insn->code; + u8 dst = bpf2sw64[insn->dst_reg]; + u8 src = bpf2sw64[insn->src_reg]; + const u8 tmp1 __maybe_unused = get_tmp_reg(ctx); + const u8 tmp2 __maybe_unused = get_tmp_reg(ctx); + const s16 off = insn->off; + const s32 imm = insn->imm; + const int bpf_idx = insn - ctx->prog->insnsi; + s32 jmp_offset; + u64 func; + struct bpf_insn insn1; + u64 imm64; + int ret; + + switch (code) { + case BPF_ALU | BPF_MOV | BPF_X: + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_MOV | BPF_X: + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, src, dst), ctx); + break; + case BPF_ALU | BPF_ADD | BPF_X: + emit(SW64_BPF_ADDW_REG(dst, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_ADD | BPF_X: + emit(SW64_BPF_ADDL_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_SUB | BPF_X: + emit(SW64_BPF_SUBW_REG(dst, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_SUB | BPF_X: + emit(SW64_BPF_SUBL_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_MUL | BPF_X: + emit(SW64_BPF_MULW_REG(dst, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_MUL | BPF_X: + emit(SW64_BPF_MULL_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_DIV | BPF_X: + emit_sw64_divmod(dst, src, ctx, code); + break; + case BPF_ALU64 | BPF_DIV | BPF_X: + emit_sw64_divmod(dst, src, ctx, code); + break; + case BPF_ALU | BPF_MOD | BPF_X: + emit_sw64_divmod(dst, src, ctx, code); + break; + case BPF_ALU64 | BPF_MOD | BPF_X: + emit_sw64_divmod(dst, src, ctx, code); + break; + case BPF_ALU | BPF_LSH | BPF_X: + emit(SW64_BPF_SLL_REG(dst, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_LSH | BPF_X: + emit(SW64_BPF_SLL_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_RSH | BPF_X: + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + fallthrough; + case BPF_ALU64 | BPF_RSH | BPF_X: + emit(SW64_BPF_SRL_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_ARSH | BPF_X: + emit(SW64_BPF_ADDW_REG(SW64_BPF_REG_ZR, dst, dst), ctx); + emit(SW64_BPF_SRA_REG(dst, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_ARSH | BPF_X: + emit(SW64_BPF_SRA_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_AND | BPF_X: + emit(SW64_BPF_AND_REG(dst, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_AND | BPF_X: + emit(SW64_BPF_AND_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_OR | BPF_X: + emit(SW64_BPF_BIS_REG(dst, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_OR | BPF_X: + emit(SW64_BPF_BIS_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_XOR | BPF_X: + emit(SW64_BPF_XOR_REG(dst, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_XOR | BPF_X: + emit(SW64_BPF_XOR_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_NEG: + emit(SW64_BPF_SUBW_REG(SW64_BPF_REG_ZR, dst, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_NEG: + emit(SW64_BPF_SUBL_REG(SW64_BPF_REG_ZR, dst, dst), ctx); + break; + case BPF_ALU | BPF_END | BPF_TO_LE: + switch (imm) { + case 16: + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x3, dst), ctx); + break; + case 32: + emit(SW64_BPF_ZAPNOT_IMM(dst, 0xf, dst), ctx); + break; + case 64: + break; + default: + pr_err("eBPF JIT %s[%d]: BPF_TO_LE unknown size\n", + current->comm, current->pid); + return -EINVAL; + } + break; + case BPF_ALU | BPF_END | BPF_TO_BE: + switch (imm) { + case 16: + emit_sw64_htobe16(dst, ctx); + break; + case 32: + emit_sw64_htobe32(dst, ctx); + break; + case 64: + emit_sw64_htobe64(dst, ctx); + break; + default: + pr_err("eBPF JIT %s[%d]: BPF_TO_BE unknown size\n", + current->comm, current->pid); + return -EINVAL; + } + break; + + case BPF_ALU | BPF_MOV | BPF_K: + if (imm >= S16_MIN && imm <= S16_MAX) + emit(SW64_BPF_LDI(dst, SW64_BPF_REG_ZR, imm), ctx); + else + emit_sw64_ldu32(dst, imm, ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_MOV | BPF_K: + if (imm >= S16_MIN && imm <= S16_MAX) + emit(SW64_BPF_LDI(dst, SW64_BPF_REG_ZR, imm), ctx); + else + emit_sw64_lds32(dst, imm, ctx); + break; + case BPF_ALU | BPF_ADD | BPF_K: + if (imm >= S16_MIN && imm <= S16_MAX) { + emit(SW64_BPF_LDI(dst, dst, imm), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_ADDW_REG(dst, tmp1, dst), ctx); + } + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_ADD | BPF_K: + if (imm >= S16_MIN && imm <= S16_MAX) { + emit(SW64_BPF_LDI(dst, dst, imm), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_ADDL_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU | BPF_SUB | BPF_K: + if (imm >= -S16_MAX && imm <= -S16_MIN) { + emit(SW64_BPF_LDI(dst, dst, -imm), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_SUBL_REG(dst, tmp1, dst), ctx); + } + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_SUB | BPF_K: + if (imm >= -S16_MAX && imm <= -S16_MIN) { + emit(SW64_BPF_LDI(dst, dst, -imm), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_SUBL_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU | BPF_MUL | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_MULL_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_MULL_REG(dst, tmp1, dst), ctx); + } + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_MUL | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_MULL_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_MULL_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU | BPF_DIV | BPF_K: + emit_sw64_ldu32(tmp1, imm, ctx); + emit_sw64_divmod(dst, tmp1, ctx, code); + break; + case BPF_ALU64 | BPF_DIV | BPF_K: + emit_sw64_lds32(tmp1, imm, ctx); + emit_sw64_divmod(dst, tmp1, ctx, code); + break; + case BPF_ALU | BPF_MOD | BPF_K: + emit_sw64_ldu32(tmp1, imm, ctx); + emit_sw64_divmod(dst, tmp1, ctx, code); + break; + case BPF_ALU64 | BPF_MOD | BPF_K: + emit_sw64_lds32(tmp1, imm, ctx); + emit_sw64_divmod(dst, tmp1, ctx, code); + break; + case BPF_ALU | BPF_LSH | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_SLL_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_SLL_REG(dst, tmp1, dst), ctx); + } + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_LSH | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_SLL_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_SLL_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU | BPF_RSH | BPF_K: + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_SRL_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_SRL_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU64 | BPF_RSH | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_SRL_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_SRL_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU | BPF_ARSH | BPF_K: + emit(SW64_BPF_ADDW_REG(SW64_BPF_REG_ZR, dst, dst), ctx); + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_SRA_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_SRA_REG(dst, tmp1, dst), ctx); + } + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_ARSH | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_SRA_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_SRA_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU | BPF_AND | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_AND_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_AND_REG(dst, tmp1, dst), ctx); + } + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_AND | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_AND_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_AND_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU | BPF_OR | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_BIS_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_BIS_REG(dst, tmp1, dst), ctx); + } + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_OR | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_BIS_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_BIS_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU | BPF_XOR | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_XOR_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_XOR_REG(dst, tmp1, dst), ctx); + } + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_XOR | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_XOR_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_XOR_REG(dst, tmp1, dst), ctx); + } + break; + + case BPF_JMP | BPF_JA: + jmp_offset = bpf2sw64_offset(bpf_idx, off, ctx); + if (jmp_offset >= -0x100000 && jmp_offset <= 0xfffff) { + emit(SW64_BPF_BR(SW64_BPF_REG_ZR, jmp_offset), ctx); + } else { + pr_err("eBPF JIT %s[%d]: BPF_JMP out of range, %d instructions\n", + current->comm, current->pid, jmp_offset); + return -EINVAL; + } + break; + + case BPF_JMP32 | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_X: + case BPF_JMP32 | BPF_JSET | BPF_X: + emit(SW64_BPF_ADDW_REG(SW64_BPF_REG_ZR, src, tmp1), ctx); + src = tmp1; + emit(SW64_BPF_ADDW_REG(SW64_BPF_REG_ZR, dst, tmp2), ctx); + dst = tmp2; + fallthrough; + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JLT | BPF_X: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JLE | BPF_X: + case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_X: + case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_X: + case BPF_JMP | BPF_JSET | BPF_X: + switch (BPF_OP(code)) { + case BPF_JEQ: + emit(SW64_BPF_CMPEQ_REG(dst, src, tmp1), ctx); + break; + case BPF_JGT: + emit(SW64_BPF_CMPULT_REG(src, dst, tmp1), ctx); + break; + case BPF_JLT: + emit(SW64_BPF_CMPULT_REG(dst, src, tmp1), ctx); + break; + case BPF_JGE: + emit(SW64_BPF_CMPULE_REG(src, dst, tmp1), ctx); + break; + case BPF_JLE: + emit(SW64_BPF_CMPULE_REG(dst, src, tmp1), ctx); + break; + case BPF_JNE: + emit(SW64_BPF_CMPEQ_REG(dst, src, tmp1), ctx); + emit(SW64_BPF_XOR_IMM(tmp1, 1, tmp1), ctx); + break; + case BPF_JSGT: + emit(SW64_BPF_CMPLT_REG(src, dst, tmp1), ctx); + break; + case BPF_JSLT: + emit(SW64_BPF_CMPLT_REG(dst, src, tmp1), ctx); + break; + case BPF_JSGE: + emit(SW64_BPF_CMPLE_REG(src, dst, tmp1), ctx); + break; + case BPF_JSLE: + emit(SW64_BPF_CMPLE_REG(dst, src, tmp1), ctx); + break; + case BPF_JSET: + emit(SW64_BPF_AND_REG(dst, src, tmp1), ctx); + break; + } + jmp_offset = bpf2sw64_offset(bpf_idx, off, ctx); + if (jmp_offset >= -0x100000 && jmp_offset <= 0xfffff) { + emit(SW64_BPF_BNE(tmp1, jmp_offset), ctx); + } else { + pr_err("eBPF JIT %s[%d]: BPF_JMP out of range, %d instructions\n", + current->comm, current->pid, jmp_offset); + return -EINVAL; + } + break; + + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_K: + emit(SW64_BPF_ADDW_REG(SW64_BPF_REG_ZR, dst, tmp2), ctx); + dst = tmp2; + fallthrough; + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JLT | BPF_K: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JLE | BPF_K: + case BPF_JMP | BPF_JNE | BPF_K: + case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSLT | BPF_K: + case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP | BPF_JSLE | BPF_K: + case BPF_JMP | BPF_JSET | BPF_K: + emit_sw64_lds32(tmp1, imm, ctx); + switch (BPF_OP(code)) { + case BPF_JEQ: + emit(SW64_BPF_CMPEQ_REG(dst, tmp1, tmp2), ctx); + break; + case BPF_JGT: + emit(SW64_BPF_CMPULT_REG(tmp1, dst, tmp2), ctx); + break; + case BPF_JLT: + emit(SW64_BPF_CMPULT_REG(dst, tmp1, tmp2), ctx); + break; + case BPF_JGE: + emit(SW64_BPF_CMPULE_REG(tmp1, dst, tmp2), ctx); + break; + case BPF_JLE: + emit(SW64_BPF_CMPULE_REG(dst, tmp1, tmp2), ctx); + break; + case BPF_JNE: + emit(SW64_BPF_CMPEQ_REG(dst, tmp1, tmp2), ctx); + emit(SW64_BPF_XOR_IMM(tmp2, 1, tmp2), ctx); + break; + case BPF_JSGT: + emit(SW64_BPF_CMPLT_REG(tmp1, dst, tmp2), ctx); + break; + case BPF_JSLT: + emit(SW64_BPF_CMPLT_REG(dst, tmp1, tmp2), ctx); + break; + case BPF_JSGE: + emit(SW64_BPF_CMPLE_REG(tmp1, dst, tmp2), ctx); + break; + case BPF_JSLE: + emit(SW64_BPF_CMPLE_REG(dst, tmp1, tmp2), ctx); + break; + case BPF_JSET: + emit(SW64_BPF_AND_REG(dst, tmp1, tmp2), ctx); + break; + } + jmp_offset = bpf2sw64_offset(bpf_idx, off, ctx); + if (jmp_offset >= -0x100000 && jmp_offset <= 0xfffff) { + emit(SW64_BPF_BNE(tmp2, jmp_offset), ctx); + } else { + pr_err("eBPF JIT %s[%d]: BPF_JMP out of range, %d instructions\n", + current->comm, current->pid, jmp_offset); + return -EINVAL; + } + break; + + case BPF_JMP | BPF_CALL: + func = (u64)__bpf_call_base + imm; + if ((func & ~(KERNEL_IMAGE_SIZE - 1)) != __START_KERNEL_map) + /* calling bpf program, switch to vmalloc addr */ + func = (func & U32_MAX) | VMALLOC_START; + emit_sw64_ldu64(SW64_BPF_REG_PV, func, ctx); + emit(SW64_BPF_CALL(SW64_BPF_REG_RA, SW64_BPF_REG_PV), ctx); + break; + + case BPF_JMP | BPF_TAIL_CALL: + if (emit_bpf_tail_call(ctx)) + return -EFAULT; + break; + + case BPF_JMP | BPF_EXIT: + // if this is the last bpf instruction, skip to epilogue + if (bpf_idx == ctx->prog->len - 1) + break; + jmp_offset = offset_to_epilogue(ctx) - 1; + // epilogue is always at the end, must jump forward + if (jmp_offset >= -1 && jmp_offset <= 0xfffff) { + if (ctx->image && !jmp_offset) + // if this is the last jited instruction, generate nop + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, SW64_BPF_REG_ZR, SW64_BPF_REG_ZR), ctx); + else + emit(SW64_BPF_BR(SW64_BPF_REG_ZR, jmp_offset), ctx); + } else { + pr_err("eBPF JIT %s[%d]: BPF_EXIT out of range, %d instructions\n", + current->comm, current->pid, jmp_offset); + return -EINVAL; + } + break; + + case BPF_LD | BPF_IMM | BPF_DW: + insn1 = insn[1]; + imm64 = ((u64)insn1.imm << 32) | (u32)imm; + emit_sw64_ldu64(dst, imm64, ctx); + put_tmp_reg(ctx); + put_tmp_reg(ctx); + return 1; + + /* LDX: dst = *(size *)(src + off) */ + case BPF_LDX | BPF_MEM | BPF_W: + case BPF_LDX | BPF_MEM | BPF_H: + case BPF_LDX | BPF_MEM | BPF_B: + case BPF_LDX | BPF_MEM | BPF_DW: + case BPF_LDX | BPF_PROBE_MEM | BPF_DW: + case BPF_LDX | BPF_PROBE_MEM | BPF_W: + case BPF_LDX | BPF_PROBE_MEM | BPF_H: + case BPF_LDX | BPF_PROBE_MEM | BPF_B: + switch (BPF_SIZE(code)) { + case BPF_W: + emit(SW64_BPF_LDW(dst, src, off), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_H: + emit(SW64_BPF_LDHU(dst, src, off), ctx); + break; + case BPF_B: + emit(SW64_BPF_LDBU(dst, src, off), ctx); + break; + case BPF_DW: + emit(SW64_BPF_LDL(dst, src, off), ctx); + break; + } + + ret = add_exception_handler(insn, ctx, dst); + if (ret) + return ret; + break; + + /* ST: *(size *)(dst + off) = imm */ + case BPF_ST | BPF_MEM | BPF_W: + case BPF_ST | BPF_MEM | BPF_H: + case BPF_ST | BPF_MEM | BPF_B: + case BPF_ST | BPF_MEM | BPF_DW: + /* Load imm to a register then store it */ + emit_sw64_lds32(tmp1, imm, ctx); + switch (BPF_SIZE(code)) { + case BPF_W: + emit(SW64_BPF_STW(tmp1, dst, off), ctx); + break; + case BPF_H: + emit(SW64_BPF_STH(tmp1, dst, off), ctx); + break; + case BPF_B: + emit(SW64_BPF_STB(tmp1, dst, off), ctx); + break; + case BPF_DW: + emit(SW64_BPF_STL(tmp1, dst, off), ctx); + break; + } + break; + + /* STX: *(size *)(dst + off) = src */ + case BPF_STX | BPF_MEM | BPF_W: + emit(SW64_BPF_STW(src, dst, off), ctx); + break; + case BPF_STX | BPF_MEM | BPF_H: + emit(SW64_BPF_STH(src, dst, off), ctx); + break; + case BPF_STX | BPF_MEM | BPF_B: + emit(SW64_BPF_STB(src, dst, off), ctx); + break; + case BPF_STX | BPF_MEM | BPF_DW: + emit(SW64_BPF_STL(src, dst, off), ctx); + break; + + /* STX XADD: lock *(u32 *)(dst + off) += src */ + case BPF_STX | BPF_XADD | BPF_W: + emit_sw64_xadd32(src, dst, off, ctx); + break; + /* STX XADD: lock *(u64 *)(dst + off) += src */ + case BPF_STX | BPF_XADD | BPF_DW: + emit_sw64_xadd64(src, dst, off, ctx); + break; + + default: + pr_err("eBPF JIT %s[%d]: unknown opcode 0x%02x\n", + current->comm, current->pid, code); + return -EINVAL; + } + + put_tmp_reg(ctx); + put_tmp_reg(ctx); + return 0; +} + +static int build_body(struct jit_ctx *ctx) +{ + const struct bpf_prog *prog = ctx->prog; + int i; + + for (i = 0; i < prog->len; i++) { + const struct bpf_insn *insn = &prog->insnsi[i]; + int ret; + + if (ctx->image == NULL) + ctx->insn_offset[i] = ctx->idx; + ret = build_insn(insn, ctx); + if (ret < 0) + return ret; + while (ret > 0) { + i++; + if (ctx->image == NULL) + ctx->insn_offset[i] = ctx->insn_offset[i - 1]; + ret--; + } + } + + return 0; +} + +static int validate_code(struct jit_ctx *ctx) +{ + int i; + + for (i = 0; i < ctx->idx; i++) { + if (ctx->image[i] == SW64_BPF_ILLEGAL_INSN) + return -1; + } + + if (WARN_ON_ONCE(ctx->exentry_idx != ctx->prog->aux->num_exentries)) + return -1; + + return 0; +} + +static inline void bpf_flush_icache(void *start, void *end) +{ + flush_icache_range((unsigned long)start, (unsigned long)end); +} + +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) +{ + struct bpf_prog *tmp, *orig_prog = prog; + struct bpf_binary_header *header; + struct sw64_jit_data *jit_data; + bool was_classic = bpf_prog_was_classic(prog); + bool tmp_blinded = false; + bool extra_pass = false; + struct jit_ctx ctx; + int image_size, prog_size, extable_size; + u8 *image_ptr; + + if (!prog->jit_requested) + return orig_prog; + + tmp = bpf_jit_blind_constants(prog); + /* If blinding was requested and we failed during blinding, + * we must fall back to the interpreter. + */ + if (IS_ERR(tmp)) + return orig_prog; + if (tmp != prog) { + tmp_blinded = true; + prog = tmp; + } + + jit_data = prog->aux->jit_data; + if (!jit_data) { + jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); + if (!jit_data) { + prog = orig_prog; + goto out; + } + prog->aux->jit_data = jit_data; + } + if (jit_data->ctx.insn_offset) { + ctx = jit_data->ctx; + image_ptr = jit_data->image; + header = jit_data->header; + extra_pass = true; + prog_size = sizeof(u32) * ctx.idx; + goto skip_init_ctx; + } + memset(&ctx, 0, sizeof(ctx)); + ctx.prog = prog; + + ctx.insn_offset = kcalloc(prog->len + 1, sizeof(int), GFP_KERNEL); + if (ctx.insn_offset == NULL) { + prog = orig_prog; + goto out_off; + } + + /* 1. Initial fake pass to compute ctx->idx. */ + + /* Fake pass to fill in ctx->offset. */ + build_prologue(&ctx, was_classic); + + if (build_body(&ctx)) { + prog = orig_prog; + goto out_off; + } + + ctx.insn_offset[prog->len] = ctx.epilogue_offset = ctx.idx; + build_epilogue(&ctx); + + extable_size = prog->aux->num_exentries * + sizeof(struct exception_table_entry); + + /* Now we know the actual image size. */ + /* And we need extra 8 bytes for lock instructions alignment */ + prog_size = sizeof(u32) * ctx.idx + 8; + image_size = prog_size + extable_size; + header = bpf_jit_binary_alloc(image_size, &image_ptr, + sizeof(u32), jit_fill_hole); + if (header == NULL) { + prog = orig_prog; + goto out_off; + } + + /* 2. Now, the actual pass. */ + + /* lock instructions need 8-byte alignment */ + ctx.image = (u32 *)(((unsigned long)image_ptr + 7) & (~7)); + if (extable_size) + prog->aux->extable = (void *)image_ptr + prog_size; +skip_init_ctx: + ctx.idx = 0; + ctx.exentry_idx = 0; + + build_prologue(&ctx, was_classic); + + if (build_body(&ctx)) { + bpf_jit_binary_free(header); + prog = orig_prog; + goto out_off; + } + + build_epilogue(&ctx); + + /* 3. Extra pass to validate JITed code. */ + if (validate_code(&ctx)) { + bpf_jit_binary_free(header); + prog = orig_prog; + goto out_off; + } + + /* And we're done. */ + if (bpf_jit_enable > 1) + bpf_jit_dump(prog->len, prog_size, 2, ctx.image); + + bpf_flush_icache(header, ctx.image + ctx.idx); + + if (!prog->is_func || extra_pass) { + bpf_jit_binary_lock_ro(header); + } else { + jit_data->ctx = ctx; + jit_data->image = image_ptr; + jit_data->header = header; + } + prog->bpf_func = (void *)ctx.image; + prog->jited = 1; + prog->jited_len = prog_size; + if (ctx.current_tmp_reg) { + pr_err("eBPF JIT %s[%d]: unreleased temporary regsters %d\n", + current->comm, current->pid, ctx.current_tmp_reg); + } + + if (!prog->is_func || extra_pass) { +out_off: + kfree(ctx.insn_offset); + kfree(jit_data); + prog->aux->jit_data = NULL; + } +out: + if (tmp_blinded) + bpf_jit_prog_release_other(prog, prog == orig_prog ? + tmp : orig_prog); + return prog; +} -- Gitee From 4006337ddc0bb3ab43283e4641edd27f2f21d992 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:36 +0800 Subject: [PATCH 0312/2138] anolis: sw64: add suspend support ANBZ: #4688 Add suspend support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/suspend.h | 50 ++++++++++++++++ arch/sw_64/kernel/suspend.c | 57 ++++++++++++++++++ arch/sw_64/kernel/suspend_asm.S | 99 ++++++++++++++++++++++++++++++++ 3 files changed, 206 insertions(+) create mode 100644 arch/sw_64/include/asm/suspend.h create mode 100644 arch/sw_64/kernel/suspend.c create mode 100644 arch/sw_64/kernel/suspend_asm.S diff --git a/arch/sw_64/include/asm/suspend.h b/arch/sw_64/include/asm/suspend.h new file mode 100644 index 000000000000..833e27f9d5e1 --- /dev/null +++ b/arch/sw_64/include/asm/suspend.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SUSPEND_H +#define _ASM_SW64_SUSPEND_H + +#include +#include +#include +#define SOFTINF_SLEEP_MAGIC 0x0123456789ABCDEFUL + +#ifdef CONFIG_HIBERNATION +#include +#include +#endif + +struct callee_saved_regs { + unsigned long r9; + unsigned long r10; + unsigned long r11; + unsigned long r12; + unsigned long r13; + unsigned long r14; + unsigned long r15; + unsigned long ra; +}; + +struct callee_saved_fpregs { + unsigned long f2[4]; + unsigned long f3[4]; + unsigned long f4[4]; + unsigned long f5[4]; + unsigned long f6[4]; + unsigned long f7[4]; + unsigned long f8[4]; + unsigned long f9[4]; +} __aligned(32); /* 256 bits aligned for simd */ + +struct processor_state { + struct callee_saved_regs regs; + struct callee_saved_fpregs fpregs; + unsigned long fpcr; + unsigned long ktp; +#ifdef CONFIG_HIBERNATION + unsigned long sp; + struct vcpucb vcb; +#endif +}; + +extern void sw64_suspend_deep_sleep(struct processor_state *state); +extern const struct platform_suspend_ops native_suspend_ops; +#endif /* _ASM_SW64_SUSPEND_H */ diff --git a/arch/sw_64/kernel/suspend.c b/arch/sw_64/kernel/suspend.c new file mode 100644 index 000000000000..27a240e66149 --- /dev/null +++ b/arch/sw_64/kernel/suspend.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +#include +#include + +struct processor_state suspend_state; + +static int native_suspend_state_valid(suspend_state_t pm_state) +{ + switch (pm_state) { + case PM_SUSPEND_ON: + case PM_SUSPEND_STANDBY: + case PM_SUSPEND_MEM: + return 1; + default: + return 0; + } +} + +void disable_local_timer(void) +{ + wrtimer(0); +} + +extern struct pci_controller *hose_head; + +/* + * Boot Core will enter suspend stat here. + */ +void sw64_suspend_enter(void) +{ + /* boot processor will go to deep sleep mode from here + * After wake up boot processor, pc will go here + */ + disable_local_timer(); + current_thread_info()->pcb.tp = rtid(); + + sw64_suspend_deep_sleep(&suspend_state); + wrtp(current_thread_info()->pcb.tp); + + disable_local_timer(); +} + +static int native_suspend_enter(suspend_state_t state) +{ + if (is_in_guest()) + return 0; + /* processor specific suspend */ + sw64_suspend_enter(); + return 0; +} + +const struct platform_suspend_ops native_suspend_ops = { + .valid = native_suspend_state_valid, + .enter = native_suspend_enter, +}; diff --git a/arch/sw_64/kernel/suspend_asm.S b/arch/sw_64/kernel/suspend_asm.S new file mode 100644 index 000000000000..34ee349515a7 --- /dev/null +++ b/arch/sw_64/kernel/suspend_asm.S @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +#include +#include +#include + + .text + .set noat +ENTRY(sw64_suspend_deep_sleep) + /* a0 $16 will be the address of suspend_state */ + ldi $1, PSTATE_REGS($16) + stl $9, CALLEE_R9($1) + stl $10, CALLEE_R10($1) + stl $11, CALLEE_R11($1) + stl $12, CALLEE_R12($1) + stl $13, CALLEE_R13($1) + stl $14, CALLEE_R14($1) + stl $15, CALLEE_R15($1) + stl $26, CALLEE_RA($1) + /* SIMD-FP */ + ldi $1, PSTATE_FPREGS($16) + vstd $f2, CALLEE_F2($1) + vstd $f3, CALLEE_F3($1) + vstd $f4, CALLEE_F4($1) + vstd $f5, CALLEE_F5($1) + vstd $f6, CALLEE_F6($1) + vstd $f7, CALLEE_F7($1) + vstd $f8, CALLEE_F8($1) + vstd $f9, CALLEE_F9($1) + rfpcr $f0 + fstd $f0, PSTATE_FPCR($16) + stl $8, PSTATE_KTP($16) + + /* save the address of suspend_state to $18 */ + mov $16, $18 + + /* + * Now will Go to Deep Sleep + * HMcode should save pc, gp, ps, r16, r17, r18 + */ + + sys_call HMC_sleepen + sys_call HMC_whami + bis $0, $0, $16 + ldi $17, 0x2($31) + sys_call HMC_sendii + + /* wait for a while to receive interrupt */ + ldi $16, 0x1($31) + sll $16, 24, $16 +$subloop: + subl $16, 1, $16 + bis $16, $16, $16 + bis $16, $16, $16 + bne $16, $subloop + + + ldl $8, PSTATE_KTP($18) + ldi $1, PSTATE_REGS($18) + ldl $9, CALLEE_R9($1) + ldl $10, CALLEE_R10($1) + ldl $11, CALLEE_R11($1) + ldl $12, CALLEE_R12($1) + ldl $13, CALLEE_R13($1) + ldl $14, CALLEE_R14($1) + ldl $15, CALLEE_R15($1) + ldl $26, CALLEE_RA($1) + /* SIMD-FP */ + fldd $f0, PSTATE_FPCR($18) + wfpcr $f0 + fimovd $f0, $2 + and $2, 0x3, $2 + beq $2, $suspend_setfpec_0 + subl $2, 0x1, $2 + beq $2, $suspend_setfpec_1 + subl $2, 0x1, $2 + beq $2, $suspend_setfpec_2 + setfpec3 + br $suspend_setfpec_over +$suspend_setfpec_0: + setfpec0 + br $suspend_setfpec_over +$suspend_setfpec_1: + setfpec1 + br $suspend_setfpec_over +$suspend_setfpec_2: + setfpec2 +$suspend_setfpec_over: + ldi $1, PSTATE_FPREGS($18) + vldd $f2, CALLEE_F2($1) + vldd $f3, CALLEE_F3($1) + vldd $f4, CALLEE_F4($1) + vldd $f5, CALLEE_F5($1) + vldd $f6, CALLEE_F6($1) + vldd $f7, CALLEE_F7($1) + vldd $f8, CALLEE_F8($1) + vldd $f9, CALLEE_F9($1) + ret +END(sw64_suspend_deep_sleep) -- Gitee From 5d619e9c9b68f6b1bf1c41e9c0af23cfe2bb4df1 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:13 +0800 Subject: [PATCH 0313/2138] anolis: sw64: add hibernation support ANBZ: #4688 Add hibernation support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/kernel/hibernate.c | 79 +++++++++++++++++++ arch/sw_64/kernel/hibernate_asm.S | 122 ++++++++++++++++++++++++++++++ arch/sw_64/kernel/pm.c | 18 +++++ 3 files changed, 219 insertions(+) create mode 100644 arch/sw_64/kernel/hibernate.c create mode 100644 arch/sw_64/kernel/hibernate_asm.S create mode 100644 arch/sw_64/kernel/pm.c diff --git a/arch/sw_64/kernel/hibernate.c b/arch/sw_64/kernel/hibernate.c new file mode 100644 index 000000000000..644ea8504313 --- /dev/null +++ b/arch/sw_64/kernel/hibernate.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include + +struct processor_state hibernate_state; +/* Defined in hibernate_asm.S */ +extern int restore_image(void); + +void save_processor_state(void) +{ + struct vcpucb *vcb = &(hibernate_state.vcb); + + vcb->ksp = rdksp(); + vcb->usp = rdusp(); + vcb->soft_tid = rtid(); + vcb->ptbr = rdptbr(); +} + +void restore_processor_state(void) +{ + struct vcpucb *vcb = &(hibernate_state.vcb); + + wrksp(vcb->ksp); + wrusp(vcb->usp); + wrtp(vcb->soft_tid); + wrptbr(vcb->ptbr); + sflush(); + tbiv(); +} + +int swsusp_arch_resume(void) +{ + restore_image(); + return 0; +} +/* References to section boundaries */ +extern const void __nosave_begin, __nosave_end; +int pfn_is_nosave(unsigned long pfn) +{ + unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin)); + unsigned long nosave_end_pfn = PFN_UP(__pa(&__nosave_end)); + + return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); +} + +struct restore_data_record { + unsigned long magic; +}; + +#define RESTORE_MAGIC 0x0123456789ABCDEFUL + +/** + * arch_hibernation_header_save - populate the architecture specific part + * of a hibernation image header + * @addr: address to save the data at + */ +int arch_hibernation_header_save(void *addr, unsigned int max_size) +{ + struct restore_data_record *rdr = addr; + + if (max_size < sizeof(struct restore_data_record)) + return -EOVERFLOW; + rdr->magic = RESTORE_MAGIC; + return 0; +} + +/** + * arch_hibernation_header_restore - read the architecture specific data + * from the hibernation image header + * @addr: address to read the data from + */ +int arch_hibernation_header_restore(void *addr) +{ + struct restore_data_record *rdr = addr; + + return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL; +} diff --git a/arch/sw_64/kernel/hibernate_asm.S b/arch/sw_64/kernel/hibernate_asm.S new file mode 100644 index 000000000000..ff997cd76c5a --- /dev/null +++ b/arch/sw_64/kernel/hibernate_asm.S @@ -0,0 +1,122 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +#include +#include +#include + + .text + .set noat +ENTRY(swsusp_arch_suspend) + ldi $16, hibernate_state + ldi $1, PSTATE_REGS($16) + stl $9, CALLEE_R9($1) + stl $10, CALLEE_R10($1) + stl $11, CALLEE_R11($1) + stl $12, CALLEE_R12($1) + stl $13, CALLEE_R13($1) + stl $14, CALLEE_R14($1) + stl $15, CALLEE_R15($1) + stl $26, CALLEE_RA($1) + /* SIMD-FP */ + ldi $1, PSTATE_FPREGS($16) + vstd $f2, CALLEE_F2($1) + vstd $f3, CALLEE_F3($1) + vstd $f4, CALLEE_F4($1) + vstd $f5, CALLEE_F5($1) + vstd $f6, CALLEE_F6($1) + vstd $f7, CALLEE_F7($1) + vstd $f8, CALLEE_F8($1) + vstd $f9, CALLEE_F9($1) + rfpcr $f0 + fstd $f0, PSTATE_FPCR($16) + + stl $8, PSTATE_KTP($16) + stl sp, PSTATE_SP($16) + call swsusp_save + ldi $16, hibernate_state + ldi $1, PSTATE_REGS($16) + ldl $26, CALLEE_RA($1) + + /* save current_thread_info()->pcbb */ + ret +END(swsusp_arch_suspend) + +ENTRY(restore_image) + /* prepare to copy image data to their original locations */ + ldi t0, restore_pblist + ldl t0, 0(t0) +$loop: + beq t0, $done + + /* get addresses from the pbe and copy the page */ + ldl t1, PBE_ADDR(t0) /* source */ + ldl t2, PBE_ORIG_ADDR(t0) /* destination */ + ldi t3, PAGE_SIZE + addl t1, t3, t3 +$cpyloop: + ldl t8, 0(t1) + stl t8, 0(t2) + addl t1, 8, t1 + addl t2, 8, t2 + cmpeq t1, t3, t4 + beq t4, $cpyloop + + /* progress to the next pbe */ + ldl t0, PBE_NEXT(t0) + bne t0, $loop +$done: + + /* tell the hibernation core that we've just restored the memory */ + ldi $0, in_suspend + stl $31, 0($0) + + ldi $16, hibernate_state + ldi $1, PSTATE_REGS($16) + + ldl $9, CALLEE_R9($1) + ldl $10, CALLEE_R10($1) + ldl $11, CALLEE_R11($1) + ldl $12, CALLEE_R12($1) + ldl $13, CALLEE_R13($1) + ldl $14, CALLEE_R14($1) + ldl $15, CALLEE_R15($1) + ldl $26, CALLEE_RA($1) + /* SIMD-FP */ + fldd $f0, PSTATE_FPCR($16) + wfpcr $f0 + fimovd $f0, $2 + and $2, 0x3, $2 + beq $2, $hibernate_setfpec_0 + subl $2, 0x1, $2 + beq $2, $hibernate_setfpec_1 + subl $2, 0x1, $2 + beq $2, $hibernate_setfpec_2 + setfpec3 + br $hibernate_setfpec_over +$hibernate_setfpec_0: + setfpec0 + br $hibernate_setfpec_over +$hibernate_setfpec_1: + setfpec1 + br $hibernate_setfpec_over +$hibernate_setfpec_2: + setfpec2 +$hibernate_setfpec_over: + ldi $1, PSTATE_FPREGS($16) + vldd $f2, CALLEE_F2($1) + vldd $f3, CALLEE_F3($1) + vldd $f4, CALLEE_F4($1) + vldd $f5, CALLEE_F5($1) + vldd $f6, CALLEE_F6($1) + vldd $f7, CALLEE_F7($1) + vldd $f8, CALLEE_F8($1) + vldd $f9, CALLEE_F9($1) + + ldl sp, PSTATE_SP($16) + ldl $8, PSTATE_KTP($16) + sys_call HMC_wrktp + + ldi $0, 0($31) + + ret +END(restore_image) diff --git a/arch/sw_64/kernel/pm.c b/arch/sw_64/kernel/pm.c new file mode 100644 index 000000000000..f0a35e5d0486 --- /dev/null +++ b/arch/sw_64/kernel/pm.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +#include + +struct syscore_ops io_syscore_ops; + +static int __init sw64_pm_init(void) +{ +#ifdef CONFIG_SUSPEND + suspend_set_ops(&native_suspend_ops); +#endif + register_syscore_ops(&io_syscore_ops); + + return 0; +} +device_initcall(sw64_pm_init); -- Gitee From 1a5e4e90b6243b2827c5df2b5b546408c961f175 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:09 +0800 Subject: [PATCH 0314/2138] anolis: sw64: add ftrace support ANBZ: #4688 Add ftrace support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/ftrace.h | 44 ++++ arch/sw_64/include/asm/livepatch.h | 22 ++ arch/sw_64/kernel/entry-ftrace.S | 326 +++++++++++++++++++++++++++++ arch/sw_64/kernel/ftrace.c | 176 ++++++++++++++++ 4 files changed, 568 insertions(+) create mode 100644 arch/sw_64/include/asm/ftrace.h create mode 100644 arch/sw_64/include/asm/livepatch.h create mode 100644 arch/sw_64/kernel/entry-ftrace.S create mode 100644 arch/sw_64/kernel/ftrace.c diff --git a/arch/sw_64/include/asm/ftrace.h b/arch/sw_64/include/asm/ftrace.h new file mode 100644 index 000000000000..7ed6e3c06a33 --- /dev/null +++ b/arch/sw_64/include/asm/ftrace.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arch/sw_64/include/asm/ftrace.h + * + * Copyright (C) 2019, serveros, linyue + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASM_SW64_FTRACE_H +#define _ASM_SW64_FTRACE_H + +#define MCOUNT_ADDR ((unsigned long)_mcount) +#define MCOUNT_INSN_SIZE 20 /* 5 * SW64_INSN_SIZE */ +#define MCOUNT_LDGP_SIZE 8 /* 2 * SW64_INSN_SIZE */ + +#define ARCH_SUPPORTS_FTRACE_OPS 1 + +#ifndef __ASSEMBLY__ +#include +#include + + +extern void _mcount(unsigned long); + +struct dyn_arch_ftrace { + /* No extra data needed for sw64 */ +}; + +extern unsigned long ftrace_graph_call; + + +static inline unsigned long ftrace_call_adjust(unsigned long addr) +{ + /* + * addr is the address of the mcount call instruction. + * recordmcount does the necessary offset calculation. + */ + return addr; +} + +#endif /* ifndef __ASSEMBLY__ */ +#endif /* _ASM_SW64_FTRACE_H */ diff --git a/arch/sw_64/include/asm/livepatch.h b/arch/sw_64/include/asm/livepatch.h new file mode 100644 index 000000000000..1feec0f6be76 --- /dev/null +++ b/arch/sw_64/include/asm/livepatch.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * livepatch.h - sw64-specific Kernel Live Patching Core + */ + +#ifndef _ASM_SW64_LIVEPATCH_H +#define _ASM_SW64_LIVEPATCH_H + +#include + +static inline int klp_check_compiler_support(void) +{ + return 0; +} + +static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip) +{ + regs->regs[27] = ip; + regs->regs[28] = ip; +} + +#endif /* _ASM_SW64_LIVEPATCH_H */ diff --git a/arch/sw_64/kernel/entry-ftrace.S b/arch/sw_64/kernel/entry-ftrace.S new file mode 100644 index 000000000000..73e8e043fc9d --- /dev/null +++ b/arch/sw_64/kernel/entry-ftrace.S @@ -0,0 +1,326 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arch/sw_64/kernel/entry-ftrace.S + * + * Author: linyue + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#include +#include +#include + + .text + .set noat + .align 4 + +#define FTRACE_SP_OFF 0x50 + .macro mcount_enter + subl $sp, FTRACE_SP_OFF, $sp + stl $16, 0($sp) + stl $17, 0x8($sp) + stl $18, 0x10($sp) + stl $26, 0x18($sp) +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + stl $9, 0x20($sp) +#endif + stl $28, 0x28($sp) + stl $29, 0x30($sp) + stl $19, 0x38($sp) + stl $20, 0x40($sp) + stl $21, 0x48($sp) + .endm + + .macro mcount_end + ldl $16, 0($sp) + ldl $17, 0x8($sp) + ldl $18, 0x10($sp) + ldl $26, 0x18($sp) +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + ldl $9, 0x20($sp) +#endif + ldl $28, 0x28($sp) + ldl $29, 0x30($sp) + ldl $19, 0x38($sp) + ldl $20, 0x40($sp) + ldl $21, 0x48($sp) + addl $sp, FTRACE_SP_OFF, $sp + .endm + + .macro RESTORE_GRAPH_ARGS + ldi $16, 0x18($sp) /* &ra */ + bis $31, $9, $17 /* pc */ + #ifdef HAVE_FUNCTION_GRAPH_FP_TEST + bis $31, $15, $18 /* fp */ + #endif + .endm + + .macro SAVE_PT_REGS + ldi $sp, -PT_REGS_SIZE($sp) + stl $0, PT_REGS_R0($sp) + stl $1, PT_REGS_R1($sp) + stl $2, PT_REGS_R2($sp) + stl $3, PT_REGS_R3($sp) + stl $4, PT_REGS_R4($sp) + stl $5, PT_REGS_R5($sp) + stl $6, PT_REGS_R6($sp) + stl $7, PT_REGS_R7($sp) + stl $8, PT_REGS_R8($sp) + stl $9, PT_REGS_R9($sp) + stl $10, PT_REGS_R10($sp) + stl $11, PT_REGS_R11($sp) + stl $12, PT_REGS_R12($sp) + stl $13, PT_REGS_R13($sp) + stl $14, PT_REGS_R14($sp) + stl $15, PT_REGS_R15($sp) + stl $16, PT_REGS_R16($sp) + stl $17, PT_REGS_R17($sp) + stl $18, PT_REGS_R18($sp) + stl $19, PT_REGS_R19($sp) + stl $20, PT_REGS_R20($sp) + stl $21, PT_REGS_R21($sp) + stl $22, PT_REGS_R22($sp) + stl $23, PT_REGS_R23($sp) + stl $24, PT_REGS_R24($sp) + stl $25, PT_REGS_R25($sp) + stl $26, PT_REGS_R26($sp) + stl $27, PT_REGS_R27($sp) + stl $28, PT_REGS_R28($sp) + stl $29, PT_REGS_GP($sp) + ldi $0, PT_REGS_SIZE($sp) + stl $0, PT_REGS_SP($sp) + .endm + + .macro RESTORE_PT_REGS + ldl $0, PT_REGS_R0($sp) + ldl $1, PT_REGS_R1($sp) + ldl $2, PT_REGS_R2($sp) + ldl $3, PT_REGS_R3($sp) + ldl $4, PT_REGS_R4($sp) + ldl $5, PT_REGS_R5($sp) + ldl $6, PT_REGS_R6($sp) + ldl $7, PT_REGS_R7($sp) + ldl $8, PT_REGS_R8($sp) + ldl $9, PT_REGS_R9($sp) + ldl $10, PT_REGS_R10($sp) + ldl $11, PT_REGS_R11($sp) + ldl $12, PT_REGS_R12($sp) + ldl $13, PT_REGS_R13($sp) + ldl $14, PT_REGS_R14($sp) + ldl $15, PT_REGS_R15($sp) + ldl $16, PT_REGS_R16($sp) + ldl $17, PT_REGS_R17($sp) + ldl $18, PT_REGS_R18($sp) + ldl $19, PT_REGS_R19($sp) + ldl $20, PT_REGS_R20($sp) + ldl $21, PT_REGS_R21($sp) + ldl $22, PT_REGS_R22($sp) + ldl $23, PT_REGS_R23($sp) + ldl $24, PT_REGS_R24($sp) + ldl $25, PT_REGS_R25($sp) + ldl $26, PT_REGS_R26($sp) + ldl $27, PT_REGS_R27($sp) + ldl $28, PT_REGS_R28($sp) + ldl $29, PT_REGS_GP($sp) + ldi $sp, PT_REGS_SIZE($sp) + .endm + + .macro RESTORE_GRAPH_REG_ARGS + ldi $16, PT_REGS_R26($sp) + bis $31, $9, $17 +#ifdef HAVE_FUNCTION_GRAPH_FP_TEST + bis $31, $15, $18 +#endif + .endm + + /* save return value regs*/ + .macro save_return_regs + subl $sp, 0x8, $sp + stl $0, 0x0($sp) + .endm + + /* restore return value regs*/ + .macro restore_return_regs + ldl $0, 0x0($sp) + addl $sp, 0x8, $sp + .endm + + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +/* + * void ftrace_graph_caller(void) + * + * Called from ftrace_caller() or ftrace_regs_caller() when function_graph + * tracer is selected. + * This function prepare_ftrace_return() fakes ra's value on the call + * stack in order to intercept instrumented function's return path and + * run return_to_handler() later on its exit. + */ + +ENTRY(ftrace_graph_caller) + ldgp $29, 0($27) + ldi $sp, -16($sp) + stl $26, 0($sp) + stl $15, 8($sp) + bis $31, $sp, $15 + + ldi $27, prepare_ftrace_return +ftrace_graph_call: + .global ftrace_graph_call + /* + * Calling ftrace_enable/disable_ftrace_graph_caller would overwrite + * the nop below. + */ + nop /* nop, or call prepare_ftrace_return() */ + + ldl $26, 0($sp) + ldl $15, 8($sp) + ldi $sp, 16($sp) + ret $31, ($26), 1 +ENDPROC(ftrace_graph_caller) + +/* + * void return_to_handler(void) + * + * Run ftrace_return_to_handler() before going back to parent. + * @fp is checked against the value passed by ftrace_graph_caller() + * only when HAVE_FUNCTION_GRAPH_FP_TEST is enabled. + * + * It is run by "ret" instruction which does not modify $27, so it + * has to recaculate $27 before ldgp. + */ +ENTRY(return_to_handler) + br $27, 1f +1: ldgp $29, 0($27) + save_return_regs + bis $31, $15, $16 /* parent's fp */ + ldi $27, ftrace_return_to_handler + call $26, ($27) + bis $31, $0, $26 + restore_return_regs + ret $31, ($26), 1 +END(return_to_handler) + +#endif + +#ifdef CONFIG_DYNAMIC_FTRACE + .global _mcount + .ent _mcount +_mcount: + ret $31, ($28), 1 + .end _mcount + + + .global ftrace_caller + .ent ftrace_caller +ftrace_caller: + mcount_enter + br $27, 1f +1: ldgp $29, 0($27) + + subl $28, MCOUNT_INSN_SIZE, $16 + bis $26, $31, $17 + ldl $18, function_trace_op + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + /* + * the graph tracer (specifically, prepare_ftrace_return) needs these + * arguments but for now the function tracer occupies the regs, so we + * save them in callee-saved regs to recover later. + */ + bis $31, $16, $9 +#endif + ldi $4, current_tracer + ldl $27, 0($4) + + .global ftrace_call +ftrace_call: /* tracer(pc, ra); */ + nop + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + RESTORE_GRAPH_ARGS + call ftrace_graph_caller +#endif + mcount_end + ret $31, ($28), 1 + .end ftrace_caller +#else /* !CONFIG_DYNAMIC_FTRACE */ + + .global _mcount + .ent _mcount +_mcount: + mcount_enter + br $27, 1f +1: ldgp $29, 0($27) + + ldl $27, ftrace_trace_function // if (ftrace_trace_function + ldi $5, ftrace_stub // != ftrace_stub) + cmpeq $27, $5, $6 // + bne $6, skip_ftrace + + subl $28, MCOUNT_INSN_SIZE, $16 // function's pc +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + bis $31, $16, $9 +#endif + bis $26, $31, $17 // function's ra (parent's pc) + call $26, ($27) // (*ftrace_trace_function)(pc, ra); + +skip_ftrace: +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + ldl $4, ftrace_graph_return // if ((ftrace_graph_return + cmpeq $4, $5, $6 // != ftrace_stub) + beq $6, 2f + ldl $4, ftrace_graph_entry // || (ftrace_graph_entry + ldi $5, ftrace_graph_entry_stub // != ftrace_graph_entry_stub)) + cmpeq $4, $5, $6 + bne $6, 3f +2: RESTORE_GRAPH_ARGS + call ftrace_graph_caller // ftrace_graph_caller(); +#endif +3: mcount_end + ret $31, ($28), 1 + .end _mcount + +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + .global ftrace_regs_caller + .ent ftrace_regs_caller +ftrace_regs_caller: + SAVE_PT_REGS + br $27, 1f +1: ldgp $29, 0($27) + + subl $28, MCOUNT_INSN_SIZE, $16 + bis $26, $31, $17 + ldi $4, function_trace_op + ldl $18, 0($4) + mov $sp, $19 + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + bis $31, $16, $9 +#endif + ldi $4, current_tracer + ldl $27, 0($4) + + .global ftrace_regs_call +ftrace_regs_call: + nop + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + RESTORE_GRAPH_REG_ARGS + call ftrace_graph_caller +#endif + RESTORE_PT_REGS + ret $31, ($28), 1 + .end ftrace_regs_caller +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ + + .global ftrace_stub + .ent ftrace_stub +ftrace_stub: + ret $31, ($26), 1 + .end ftrace_stub diff --git a/arch/sw_64/kernel/ftrace.c b/arch/sw_64/kernel/ftrace.c new file mode 100644 index 000000000000..fb25ffe3dbda --- /dev/null +++ b/arch/sw_64/kernel/ftrace.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Based on arch/arm64/kernel/ftrace.c + * + * Copyright (C) 2019 os kernel team + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include + +#include + +#ifdef CONFIG_FUNCTION_TRACER +EXPORT_SYMBOL(_mcount); +#endif + +#ifdef CONFIG_DYNAMIC_FTRACE + +#define TI_FTRACE_ADDR (offsetof(struct thread_info, dyn_ftrace_addr)) +#define TI_FTRACE_REGS_ADDR \ + (offsetof(struct thread_info, dyn_ftrace_regs_addr)) + +unsigned long current_tracer = (unsigned long)ftrace_stub; + +/* + * Replace a single instruction, which may be a branch or NOP. + */ +static int ftrace_modify_code(unsigned long pc, u32 new) +{ + if (sw64_insn_write((void *)pc, new)) + return -EPERM; + return 0; +} + +/* + * Replace tracer function in ftrace_caller() + */ +int ftrace_update_ftrace_func(ftrace_func_t func) +{ + unsigned long pc; + u32 new; + int ret; + + current_tracer = (unsigned long)func; + pc = (unsigned long)&ftrace_call; + new = SW64_CALL(R26, R27, 0); + ret = ftrace_modify_code(pc, new); + + if (!ret) { + pc = (unsigned long)&ftrace_regs_call; + new = SW64_CALL(R26, R27, 0); + ret = ftrace_modify_code(pc, new); + } + + return ret; +} + +/* + * Turn on the call to ftrace_caller() in instrumented function + */ +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned int insn[3]; + unsigned long pc = rec->ip + MCOUNT_LDGP_SIZE; + unsigned long offset; + + if (addr == FTRACE_ADDR) + offset = TI_FTRACE_ADDR; + else + offset = TI_FTRACE_REGS_ADDR; + + insn[0] = SW64_NOP; + /* ldl r28,(ftrace_addr_offset)(r8) */ + insn[1] = (0x23U << 26) | (28U << 21) | (8U << 16) | offset; + insn[2] = SW64_CALL(R28, R28, 0); + + /* replace the 3 mcount instructions at once */ + return copy_to_kernel_nofault((void *)pc, insn, 3 * SW64_INSN_SIZE); +} + +/* + * Turn off the call to ftrace_caller() in instrumented function + */ +int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, + unsigned long addr) +{ + unsigned long pc = rec->ip + MCOUNT_LDGP_SIZE; + unsigned int insn[3] = {SW64_NOP, SW64_NOP, SW64_NOP}; + + return copy_to_kernel_nofault((void *)pc, insn, 3 * SW64_INSN_SIZE); +} + +void arch_ftrace_update_code(int command) +{ + ftrace_modify_all_code(command); +} + +int __init ftrace_dyn_arch_init(void) +{ + struct thread_info *ti = task_thread_info(&init_task); + + ti->dyn_ftrace_addr = FTRACE_ADDR; + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + ti->dyn_ftrace_regs_addr = FTRACE_REGS_ADDR; +#endif + return 0; +} +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, + unsigned long addr) +{ + return 0; +} +#endif + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +/* + * function_graph tracer expects ftrace_return_to_handler() to be called + * on the way back to parent. For this purpose, this function is called + * in _mcount() or ftrace_caller() to replace return address (*parent) on + * the call stack to return_to_handler. + * + * Note that @frame_pointer is used only for sanity check later. + */ +void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, + unsigned long frame_pointer) +{ + unsigned long return_hooker = (unsigned long)&return_to_handler; + unsigned long old; + + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return; + + /* + * Note: + * No protection against faulting at *parent, which may be seen + * on other archs. It's unlikely on AArch64. + */ + old = *parent; + + if (!function_graph_enter(old, self_addr, frame_pointer, NULL)) + *parent = return_hooker; +} + +#ifdef CONFIG_DYNAMIC_FTRACE +/* + * Turn on/off the call to ftrace_graph_caller() in ftrace_caller() + * depending on @enable. + */ +static int ftrace_modify_graph_caller(bool enable) +{ + unsigned long pc = (unsigned long)&ftrace_graph_call; + u32 new = SW64_NOP; + + if (enable) + new = SW64_CALL(R26, R27, 0); + return ftrace_modify_code(pc, new); +} + +int ftrace_enable_ftrace_graph_caller(void) +{ + return ftrace_modify_graph_caller(true); +} + +int ftrace_disable_ftrace_graph_caller(void) +{ + return ftrace_modify_graph_caller(false); +} +#endif /* CONFIG_DYNAMIC_FTRACE */ +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ -- Gitee From 6e9554b702d516425266733b0119036e74ec123b Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:32 +0800 Subject: [PATCH 0315/2138] anolis: sw64: add kernel relocation support ANBZ: #4688 Add kernel relocation support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/kernel/relocate.c | 284 +++++++++++++ arch/sw_64/kernel/relocate_kernel.S | 176 ++++++++ arch/sw_64/tools/.gitignore | 2 + arch/sw_64/tools/Makefile | 8 + arch/sw_64/tools/relocs.c | 635 ++++++++++++++++++++++++++++ arch/sw_64/tools/relocs.h | 72 ++++ arch/sw_64/tools/relocs_main.c | 86 ++++ 7 files changed, 1263 insertions(+) create mode 100644 arch/sw_64/kernel/relocate.c create mode 100644 arch/sw_64/kernel/relocate_kernel.S create mode 100644 arch/sw_64/tools/.gitignore create mode 100644 arch/sw_64/tools/Makefile create mode 100644 arch/sw_64/tools/relocs.c create mode 100644 arch/sw_64/tools/relocs.h create mode 100644 arch/sw_64/tools/relocs_main.c diff --git a/arch/sw_64/kernel/relocate.c b/arch/sw_64/kernel/relocate.c new file mode 100644 index 000000000000..ebdf7d894805 --- /dev/null +++ b/arch/sw_64/kernel/relocate.c @@ -0,0 +1,284 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Support for kernel relocation at boot time. + * + * Based on arch/mips/kernel/relocate.c + * + * Copyright (C) 2019 He Sheng + * Authors: He Sheng (hesheng05@gmail.com) + */ +#include +#include +#include + +#include + +#define KTEXT_MAX 0xffffffffa0000000UL +#define RELOCATED(x) ((void *)((unsigned long)x + offset)) + +extern unsigned long _got_start[]; +extern unsigned long _got_end[]; +extern char pre_start_kernel[]; + +extern unsigned int _relocation_start[]; /* End kernel image / start relocation table */ +extern unsigned int _relocation_end[]; /* End relocation table */ + +extern unsigned long __start___ex_table; /* Start exception table */ +extern unsigned long __stop___ex_table; /* End exception table */ +extern union thread_union init_thread_union; + +/* + * This function may be defined for a platform to perform any post-relocation + * fixup necessary. + * Return non-zero to abort relocation + */ +int __weak plat_post_relocation(long offset) +{ + return 0; +} + +static int __init apply_r_sw64_refquad(unsigned long *loc_orig, unsigned long *loc_new, unsigned int offset) +{ + *(unsigned long *)loc_new += offset; + + return 0; +} + +static int (*reloc_handlers_rel[]) (unsigned long *, unsigned long *, unsigned int) __initdata = { + [R_SW64_REFQUAD] = apply_r_sw64_refquad, +}; + +int __init do_relocations(void *kbase_old, void *kbase_new, unsigned int offset) +{ + unsigned int *r; + unsigned long *loc_orig; + unsigned long *loc_new; + int type; + int res; + + for (r = _relocation_start; r < _relocation_end; r++) { + /* Sentinel for last relocation */ + if (*r == 0) + break; + + type = (*r >> 24) & 0xff; + loc_orig = kbase_old + ((*r & 0x00ffffff) << 2); + loc_new = RELOCATED(loc_orig); + + if (reloc_handlers_rel[type] == NULL) { + /* Unsupported relocation */ + pr_err("Unhandled relocation type %d at 0x%pK\n", + type, loc_orig); + return -ENOEXEC; + } + + res = reloc_handlers_rel[type](loc_orig, loc_new, offset); + if (res) + return res; + } + + return 0; +} + +static int __init relocate_got(unsigned int offset) +{ + unsigned long *got_start, *got_end, *e; + + got_start = RELOCATED(&_got_start); + got_end = RELOCATED(&_got_end); + + for (e = got_start; e < got_end; e++) + *e += offset; + + return 0; +} + +#ifdef CONFIG_RANDOMIZE_BASE + +static inline __init unsigned long rotate_xor(unsigned long hash, + const void *area, size_t size) +{ + size_t i; + unsigned long start, *ptr; + /* Make sure start is 8 byte aligned */ + start = ALIGN((unsigned long)area, 8); + size -= (start - (unsigned long)area); + ptr = (unsigned long *) start; + for (i = 0; i < size / sizeof(hash); i++) { + /* Rotate by odd number of bits and XOR. */ + hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7); + hash ^= ptr[i]; + } + return hash; +} + +static inline __init unsigned long get_random_boot(void) +{ + unsigned long entropy = random_get_entropy(); + unsigned long hash = 0; + + /* Attempt to create a simple but unpredictable starting entropy. */ + hash = rotate_xor(hash, linux_banner, strlen(linux_banner)); + + /* Add in any runtime entropy we can get */ + hash = rotate_xor(hash, &entropy, sizeof(entropy)); + + return hash; +} + +static inline __init bool kaslr_disabled(void) +{ + char *str; + + str = strstr(COMMAND_LINE, "nokaslr"); + if (str == COMMAND_LINE || (str > COMMAND_LINE && *(str - 1) == ' ')) + return true; + + return false; +} + +static unsigned long __init determine_relocation_offset(void) +{ + /* Choose a new address for the kernel */ + unsigned long kernel_length; + unsigned long offset; + + if (kaslr_disabled()) + return 0; + + kernel_length = (unsigned long)_end - (unsigned long)(&_text); + + /* TODO: offset is 64K align. maybe 8KB align is okay. */ + offset = get_random_boot() << 16; + offset &= (CONFIG_RANDOMIZE_BASE_MAX_OFFSET - 1); + if (offset < kernel_length) + offset += ALIGN(kernel_length, 0x10000); + + /* + * TODO:new location should not overlaps initrd, dtb, acpi + * tables, etc. + */ + + if ((KTEXT_MAX - (unsigned long)_end) < offset) + offset = 0; + + return offset; +} + +#else + +static inline unsigned long __init determine_relocation_offset(void) +{ + /* + * Choose a new address for the kernel + * For now we'll hard code the destination offset. + */ + return 0; +} + +#endif + +static inline int __init relocation_offset_valid(unsigned long offset) +{ + unsigned long loc_new = (unsigned long)_text + offset; + + if (loc_new & 0x0000ffff) { + /* Inappropriately aligned new location */ + return 0; + } + if (loc_new < (unsigned long)&_end) { + /* New location overlaps original kernel */ + return 0; + } + return 1; +} + +unsigned int __init relocate_kernel(void) +{ + void *loc_new; + unsigned long kernel_length; + unsigned long bss_length; + unsigned int offset = 0; + int res = 1; + + kernel_length = (unsigned long)(&_relocation_start) - (long)(&_text); + bss_length = (unsigned long)&__bss_stop - (long)&__bss_start; + + offset = determine_relocation_offset(); + /* Reset the command line now so we don't end up with a duplicate */ + + /* Sanity check relocation address */ + if (offset && relocation_offset_valid(offset)) { + + loc_new = RELOCATED(&_text); + /* Copy the kernel to it's new location */ + memcpy(loc_new, &_text, kernel_length); + + /* Perform relocations on the new kernel */ + res = do_relocations(&_text, loc_new, offset); + if (res < 0) + goto out; + + res = relocate_got(offset); + if (res < 0) + goto out; + + /* + * The original .bss has already been cleared, and + * some variables such as command line parameters + * stored to it so make a copy in the new location. + */ + memcpy(RELOCATED(&__bss_start), &__bss_start, bss_length); + + /* + * Last chance for the platform to abort relocation. + * This may also be used by the platform to perform any + * initialisation required now that the new kernel is + * resident in memory and ready to be executed. + */ + if (plat_post_relocation(offset)) + goto out; + + /* Return the new kernel's offset */ + return offset; + } +out: + return 0; +} + +/* + * Show relocation information on panic. + */ +void show_kernel_relocation(const char *level) +{ + unsigned long offset; + + offset = __pa_symbol(_text) - __pa_symbol(_TEXT_START); + + if (IS_ENABLED(CONFIG_RELOCATABLE) && offset > 0) { + printk(level); + pr_cont("Kernel relocated by 0x%pK\n", (void *)offset); + pr_cont(" .text @ 0x%pK\n", _text); + pr_cont(" .data @ 0x%pK\n", _sdata); + pr_cont(" .bss @ 0x%pK\n", __bss_start); + } +} + +static int kernel_location_notifier_fn(struct notifier_block *self, + unsigned long v, void *p) +{ + show_kernel_relocation(KERN_EMERG); + return NOTIFY_DONE; +} + +static struct notifier_block kernel_location_notifier = { + .notifier_call = kernel_location_notifier_fn +}; + +static int __init register_kernel_offset_dumper(void) +{ + atomic_notifier_chain_register(&panic_notifier_list, + &kernel_location_notifier); + return 0; +} +device_initcall(register_kernel_offset_dumper); diff --git a/arch/sw_64/kernel/relocate_kernel.S b/arch/sw_64/kernel/relocate_kernel.S new file mode 100644 index 000000000000..f1a160636212 --- /dev/null +++ b/arch/sw_64/kernel/relocate_kernel.S @@ -0,0 +1,176 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * relocate_kernel.S for kexec + * Created by Jul 2 2019 + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#include +#include + + .align 3 + .globl relocate_new_kernel + .ent relocate_new_kernel + +relocate_new_kernel: + .prologue 0 + ldl a0, arg0 + ldl a1, arg1 + ldl a2, arg2 + ldl a3, arg3 + + ldl s0, kexec_indirection_page + ldl s1, kexec_start_address + +process_entry: + ldl s2, 0(s0) + addl s0, 8, s0 + + /* + * In case of a kdump/crash kernel, the indirection page is not + * populated as the kernel is directly copied to a reserved location + */ + beq s2, done + + /* destination page */ + and s2, 0x1, s3 + beq s3, 1f + bic s2, 0x1, s4/* store destination addr in s4 */ + br $31, process_entry + +1: + /* indirection page, update s0*/ + and s2, 0x2, s3 + beq s3, 1f + bic s2, 0x2, s0 + br $31, process_entry + +1: + /* done page */ + and s2, 0x4, s3 + beq s3, 1f + br $31, done +1: + /* source page */ + and s2, 0x8, s3 + beq s3, process_entry + bic s2, 0x8, s2 + ldi s6, 0x1 + sll s6, (PAGE_SHIFT - 3), s6 + +copy_word: + /* copy page word by word */ + ldl s5, 0(s2) + stl s5, 0(s4) + addl s4, 8, s4 + addl s2, 8, s2 + subl s6, 1, s6 + beq s6, process_entry + br $31, copy_word + br $31, process_entry + +done: +#ifdef CONFIG_CRASH_SMP /* unsupported now!!!! */ + /* kexec_flag reset is signal to other CPUs what kernel + * was moved to it's location. Note - we need relocated address + * of kexec_flag. + */ + + br ra, 1f +1: mov ra, t1 + ldi t2, 1b + ldi t0, kexec_flag + subl t0, t2, t0 + addl t1, t0, t0 + stl zero, 0(t0) +#endif + memb + jmp ra, (s1) + .end relocate_new_kernel + .size relocate_new_kernel, .-relocate_new_kernel + +#ifdef CONFIG_CRASH_SMP + /* + * Other CPUs should wait until code is relocated and + * then start at entry (?) point. + */ + .align 3 + .globl kexec_smp_wait + .ent kexec_smp_wait +kexec_smp_wait: + ldl a0, s_arg0 + ldl a1, s_arg1 + ldl a2, s_arg2 + ldl a3, s_arg3 + ldl s1, kexec_start_address + + /* Non-relocated address works for args and kexec_start_address (old + * kernel is not overwritten). But we need relocated address of + * kexec_flag. + */ + + bsr ra, 1f +1: mov ra, t1 + ldi t2, 1b + ldi t0, kexec_flag + subl t0, t2, t0 + addl t1, t0, t0 + +1: stl s0, 0(t0) + bne s0, 1b + memb + jmp ra, (s1) + .end kexec_smp_wait + .size kexec_smp_wait, .-kexec_smp_wait +#endif + + .align 3 + + /* All parameters to new kernel are passed in registers a0-a3. + * kexec_args[0..3] are uses to prepare register values. + */ + +kexec_args: + .globl kexec_args +arg0: .quad 0x0 +arg1: .quad 0x0 +arg2: .quad 0x0 +arg3: .quad 0x0 + .size kexec_args, 8*4 + +#ifdef CONFIG_CRASH_SMP + /* + * Secondary CPUs may have different kernel parameters in + * their registers a0-a3. secondary_kexec_args[0..3] are used + * to prepare register values. + */ +secondary_kexec_args: + .globl secondary_kexec_args +s_arg0: .quad 0x0 +s_arg1: .quad 0x0 +s_arg2: .quad 0x0 +s_arg3: .quad 0x0 + .size secondary_kexec_args, 8*4 + +kexec_flag: + .quad 0x1 +#endif + +kexec_start_address: + .globl kexec_start_address + .quad 0x0 + .size kexec_start_address, 8 + +kexec_indirection_page: + .globl kexec_indirection_page + .quad 0 + .size kexec_indirection_page, 8 + +relocate_new_kernel_end: + +relocate_new_kernel_size: + .global relocate_new_kernel_size + .quad relocate_new_kernel_end - relocate_new_kernel + .size relocate_new_kernel_size, 8 diff --git a/arch/sw_64/tools/.gitignore b/arch/sw_64/tools/.gitignore new file mode 100644 index 000000000000..f73e86272b76 --- /dev/null +++ b/arch/sw_64/tools/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +relocs diff --git a/arch/sw_64/tools/Makefile b/arch/sw_64/tools/Makefile new file mode 100644 index 000000000000..66f55b035e22 --- /dev/null +++ b/arch/sw_64/tools/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 + +hostprogs += relocs +relocs-objs += relocs.o +relocs-objs += relocs_main.o +PHONY += relocs +relocs: $(obj)/relocs + @: diff --git a/arch/sw_64/tools/relocs.c b/arch/sw_64/tools/relocs.c new file mode 100644 index 000000000000..ec0ed422a836 --- /dev/null +++ b/arch/sw_64/tools/relocs.c @@ -0,0 +1,635 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "relocs.h" + +#define ELF_BITS 64 + +#define ELF_MACHINE EM_SW64 +#define ELF_MACHINE_NAME "SW64" +#define SHT_REL_TYPE SHT_RELA +#define Elf_Rel Elf64_Rela + +#define ELF_CLASS ELFCLASS64 +#define ELF_R_SYM(val) ELF64_R_SYM(val) +#define ELF_R_TYPE(val) ELF64_R_TYPE(val) +#define ELF_ST_TYPE(o) ELF64_ST_TYPE(o) +#define ELF_ST_BIND(o) ELF64_ST_BIND(o) +#define ELF_ST_VISIBILITY(o) ELF64_ST_VISIBILITY(o) + +#define ElfW(type) _ElfW(ELF_BITS, type) +#define _ElfW(bits, type) __ElfW(bits, type) +#define __ElfW(bits, type) Elf##bits##_##type + +#define Elf_Addr ElfW(Addr) +#define Elf_Ehdr ElfW(Ehdr) +#define Elf_Phdr ElfW(Phdr) +#define Elf_Shdr ElfW(Shdr) +#define Elf_Sym ElfW(Sym) + +static Elf_Ehdr ehdr; + +struct relocs { + uint32_t *offset; + unsigned long count; + unsigned long size; +}; + +static struct relocs relocs; + +struct section { + Elf_Shdr shdr; + struct section *link; + Elf_Sym *symtab; + Elf_Rel *reltab; + char *strtab; + long shdr_offset; +}; +static struct section *secs; + +static const char * const regex_sym_kernel = { +/* Symbols matching these regex's should never be relocated */ + "^(__crc_)", +}; + +static regex_t sym_regex_c; + +static int regex_skip_reloc(const char *sym_name) +{ + return !regexec(&sym_regex_c, sym_name, 0, NULL, 0); +} + +static void regex_init(void) +{ + char errbuf[128]; + int err; + + err = regcomp(&sym_regex_c, regex_sym_kernel, + REG_EXTENDED|REG_NOSUB); + + if (err) { + regerror(err, &sym_regex_c, errbuf, sizeof(errbuf)); + die("%s", errbuf); + } +} + +static const char *rel_type(unsigned int type) +{ + static const char * const type_name[] = { +#define REL_TYPE(X)[X] = #X + REL_TYPE(R_SW64_NONE), + REL_TYPE(R_SW64_REFQUAD), + REL_TYPE(R_SW64_LITERAL), + REL_TYPE(R_SW64_LITUSE), + REL_TYPE(R_SW64_GPDISP), + REL_TYPE(R_SW64_BRADDR), + REL_TYPE(R_SW64_HINT), + REL_TYPE(R_SW64_SREL32), + REL_TYPE(R_SW64_GPRELHIGH), + REL_TYPE(R_SW64_GPRELLOW), +#undef REL_TYPE + }; + const char *name = "unknown type rel type name"; + + if (type < ARRAY_SIZE(type_name) && type_name[type]) + name = type_name[type]; + return name; +} + +static const char *sec_name(unsigned int shndx) +{ + const char *sec_strtab; + const char *name; + + sec_strtab = secs[ehdr.e_shstrndx].strtab; + if (shndx < ehdr.e_shnum) + name = sec_strtab + secs[shndx].shdr.sh_name; + else if (shndx == SHN_ABS) + name = "ABSOLUTE"; + else if (shndx == SHN_COMMON) + name = "COMMON"; + else + name = ""; + return name; +} + +static struct section *sec_lookup(const char *secname) +{ + int i; + + for (i = 0; i < ehdr.e_shnum; i++) + if (strcmp(secname, sec_name(i)) == 0) + return &secs[i]; + + return NULL; +} + +static const char *sym_name(const char *sym_strtab, Elf_Sym *sym) +{ + const char *name; + + if (sym->st_name) + name = sym_strtab + sym->st_name; + else + name = sec_name(sym->st_shndx); + return name; +} + +#define le16_to_cpu(val) (val) +#define le32_to_cpu(val) (val) +#define le64_to_cpu(val) (val) + +#define cpu_to_le16(val) (val) +#define cpu_to_le32(val) (val) +#define cpu_to_le64(val) (val) + +static uint16_t elf16_to_cpu(uint16_t val) +{ + return le16_to_cpu(val); +} + +static uint32_t elf32_to_cpu(uint32_t val) +{ + return le32_to_cpu(val); +} + +static uint32_t cpu_to_elf32(uint32_t val) +{ + return cpu_to_le32(val); +} + +#define elf_half_to_cpu(x) elf16_to_cpu(x) +#define elf_word_to_cpu(x) elf32_to_cpu(x) + +#if ELF_BITS == 64 +static uint64_t elf64_to_cpu(uint64_t val) +{ + return le64_to_cpu(val); +} +#define elf_addr_to_cpu(x) elf64_to_cpu(x) +#define elf_off_to_cpu(x) elf64_to_cpu(x) +#define elf_xword_to_cpu(x) elf64_to_cpu(x) +#else +#define elf_addr_to_cpu(x) elf32_to_cpu(x) +#define elf_off_to_cpu(x) elf32_to_cpu(x) +#define elf_xword_to_cpu(x) elf32_to_cpu(x) +#endif + +static void read_ehdr(FILE *fp) +{ + if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1) + die("Cannot read ELF header: %s\n", strerror(errno)); + + if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0) + die("No ELF magic\n"); + + if (ehdr.e_ident[EI_CLASS] != ELF_CLASS) + die("Not a %d bit executable\n", ELF_BITS); + + if ((ehdr.e_ident[EI_DATA] != ELFDATA2LSB) && + (ehdr.e_ident[EI_DATA] != ELFDATA2MSB)) + die("Unknown ELF Endianness\n"); + + if (ehdr.e_ident[EI_VERSION] != EV_CURRENT) + die("Unknown ELF version\n"); + + /* Convert the fields to native endian */ + ehdr.e_type = elf_half_to_cpu(ehdr.e_type); + ehdr.e_machine = elf_half_to_cpu(ehdr.e_machine); + ehdr.e_version = elf_word_to_cpu(ehdr.e_version); + ehdr.e_entry = elf_addr_to_cpu(ehdr.e_entry); + ehdr.e_phoff = elf_off_to_cpu(ehdr.e_phoff); + ehdr.e_shoff = elf_off_to_cpu(ehdr.e_shoff); + ehdr.e_flags = elf_word_to_cpu(ehdr.e_flags); + ehdr.e_ehsize = elf_half_to_cpu(ehdr.e_ehsize); + ehdr.e_phentsize = elf_half_to_cpu(ehdr.e_phentsize); + ehdr.e_phnum = elf_half_to_cpu(ehdr.e_phnum); + ehdr.e_shentsize = elf_half_to_cpu(ehdr.e_shentsize); + ehdr.e_shnum = elf_half_to_cpu(ehdr.e_shnum); + ehdr.e_shstrndx = elf_half_to_cpu(ehdr.e_shstrndx); + + if ((ehdr.e_type != ET_EXEC) && (ehdr.e_type != ET_DYN)) + die("Unsupported ELF header type\n"); + + if (ehdr.e_machine != ELF_MACHINE) + die("Not for %s\n", ELF_MACHINE_NAME); + + if (ehdr.e_version != EV_CURRENT) + die("Unknown ELF version\n"); + + if (ehdr.e_ehsize != sizeof(Elf_Ehdr)) + die("Bad Elf header size\n"); + + if (ehdr.e_phentsize != sizeof(Elf_Phdr)) + die("Bad program header entry\n"); + + if (ehdr.e_shentsize != sizeof(Elf_Shdr)) + die("Bad section header entry\n"); + + if (ehdr.e_shstrndx >= ehdr.e_shnum) + die("String table index out of bounds\n"); +} + +static void read_shdrs(FILE *fp) +{ + int i; + Elf_Shdr shdr; + + secs = calloc(ehdr.e_shnum, sizeof(struct section)); + if (!secs) + die("Unable to allocate %d section headers\n", ehdr.e_shnum); + + if (fseek(fp, ehdr.e_shoff, SEEK_SET) < 0) + die("Seek to %d failed: %s\n", ehdr.e_shoff, strerror(errno)); + + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + + sec->shdr_offset = ftell(fp); + if (fread(&shdr, sizeof(shdr), 1, fp) != 1) + die("Cannot read ELF section headers %d/%d: %s\n", + i, ehdr.e_shnum, strerror(errno)); + sec->shdr.sh_name = elf_word_to_cpu(shdr.sh_name); + sec->shdr.sh_type = elf_word_to_cpu(shdr.sh_type); + sec->shdr.sh_flags = elf_xword_to_cpu(shdr.sh_flags); + sec->shdr.sh_addr = elf_addr_to_cpu(shdr.sh_addr); + sec->shdr.sh_offset = elf_off_to_cpu(shdr.sh_offset); + sec->shdr.sh_size = elf_xword_to_cpu(shdr.sh_size); + sec->shdr.sh_link = elf_word_to_cpu(shdr.sh_link); + sec->shdr.sh_info = elf_word_to_cpu(shdr.sh_info); + sec->shdr.sh_addralign = elf_xword_to_cpu(shdr.sh_addralign); + sec->shdr.sh_entsize = elf_xword_to_cpu(shdr.sh_entsize); + if (sec->shdr.sh_link < ehdr.e_shnum) + sec->link = &secs[sec->shdr.sh_link]; + } +} + +static void read_strtabs(FILE *fp) +{ + int i; + + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + + if (sec->shdr.sh_type != SHT_STRTAB) + continue; + + sec->strtab = malloc(sec->shdr.sh_size); + if (!sec->strtab) + die("malloc of %d bytes for strtab failed\n", + sec->shdr.sh_size); + + if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) + die("Seek to %d failed: %s\n", + sec->shdr.sh_offset, strerror(errno)); + + if (fread(sec->strtab, 1, sec->shdr.sh_size, fp) != + sec->shdr.sh_size) + die("Cannot read symbol table: %s\n", strerror(errno)); + } +} + +static void read_symtabs(FILE *fp) +{ + int i, j; + + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + + if (sec->shdr.sh_type != SHT_SYMTAB) + continue; + + sec->symtab = malloc(sec->shdr.sh_size); + if (!sec->symtab) + die("malloc of %d bytes for symtab failed\n", + sec->shdr.sh_size); + + if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) + die("Seek to %d failed: %s\n", + sec->shdr.sh_offset, strerror(errno)); + + if (fread(sec->symtab, 1, sec->shdr.sh_size, fp) != + sec->shdr.sh_size) + die("Cannot read symbol table: %s\n", strerror(errno)); + + for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Sym); j++) { + Elf_Sym *sym = &sec->symtab[j]; + + sym->st_name = elf_word_to_cpu(sym->st_name); + sym->st_value = elf_addr_to_cpu(sym->st_value); + sym->st_size = elf_xword_to_cpu(sym->st_size); + sym->st_shndx = elf_half_to_cpu(sym->st_shndx); + } + } +} + +static void read_relocs(FILE *fp) +{ + static unsigned long base; + int i, j; + + if (!base) { + struct section *sec = sec_lookup(".text"); + + if (!sec) + die("Could not find .text section\n"); + + base = sec->shdr.sh_addr; + } + + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + + if (sec->shdr.sh_type != SHT_REL_TYPE) + continue; + + sec->reltab = malloc(sec->shdr.sh_size); + if (!sec->reltab) + die("malloc of %d bytes for relocs failed\n", + sec->shdr.sh_size); + + if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) + die("Seek to %d failed: %s\n", + sec->shdr.sh_offset, strerror(errno)); + + if (fread(sec->reltab, 1, sec->shdr.sh_size, fp) != + sec->shdr.sh_size) + die("Cannot read symbol table: %s\n", strerror(errno)); + + for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) { + Elf_Rel *rel = &sec->reltab[j]; + + rel->r_offset = elf_addr_to_cpu(rel->r_offset); + /* Set offset into kernel image */ + rel->r_offset -= base; + /* Convert SW64 RELA format - only the symbol + * index needs converting to native endianness + */ + rel->r_info = elf_xword_to_cpu(rel->r_info); +#if (SHT_REL_TYPE == SHT_RELA) + rel->r_addend = elf_xword_to_cpu(rel->r_addend); +#endif + } + } +} + +static void remove_relocs(FILE *fp) +{ + int i; + Elf_Shdr shdr; + + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + + if (sec->shdr.sh_type != SHT_REL_TYPE) + continue; + + if (fseek(fp, sec->shdr_offset, SEEK_SET) < 0) + die("Seek to %d failed: %s\n", + sec->shdr_offset, strerror(errno)); + + if (fread(&shdr, sizeof(shdr), 1, fp) != 1) + die("Cannot read ELF section headers %d/%d: %s\n", + i, ehdr.e_shnum, strerror(errno)); + + /* Set relocation section size to 0, effectively removing it. + * This is necessary due to lack of support for relocations + * in objcopy when creating 32bit elf from 64bit elf. + */ + shdr.sh_size = 0; + + if (fseek(fp, sec->shdr_offset, SEEK_SET) < 0) + die("Seek to %d failed: %s\n", + sec->shdr_offset, strerror(errno)); + + if (fwrite(&shdr, sizeof(shdr), 1, fp) != 1) + die("Cannot write ELF section headers %d/%d: %s\n", + i, ehdr.e_shnum, strerror(errno)); + } +} + +static void add_reloc(struct relocs *r, uint32_t offset, unsigned int type) +{ + /* Relocation representation in binary table: + * |76543210|76543210|76543210|76543210| + * | Type | offset from _text >> 2 | + */ + offset >>= 2; + if (offset > 0x00FFFFFF) + die("Kernel image exceeds maximum size for relocation!\n"); + + offset = (offset & 0x00FFFFFF) | ((type & 0xFF) << 24); + + if (r->count == r->size) { + unsigned long newsize = r->size + 50000; + void *mem = realloc(r->offset, newsize * sizeof(r->offset[0])); + + if (!mem) + die("realloc failed\n"); + + r->offset = mem; + r->size = newsize; + } + r->offset[r->count++] = offset; +} + +static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel, + Elf_Sym *sym, const char *symname)) +{ + int i; + + /* Walk through the relocations */ + for (i = 0; i < ehdr.e_shnum; i++) { + char *sym_strtab; + Elf_Sym *sh_symtab; + struct section *sec_applies, *sec_symtab; + int j; + struct section *sec = &secs[i]; + + if (sec->shdr.sh_type != SHT_REL_TYPE) + continue; + sec_symtab = sec->link; + sec_applies = &secs[sec->shdr.sh_info]; + if (!(sec_applies->shdr.sh_flags & SHF_ALLOC)) + continue; + + sh_symtab = sec_symtab->symtab; + sym_strtab = sec_symtab->link->strtab; + for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) { + Elf_Rel *rel = &sec->reltab[j]; + Elf_Sym *sym = &sh_symtab[ELF_R_SYM(rel->r_info)]; + const char *symname = sym_name(sym_strtab, sym); + + process(sec, rel, sym, symname); + } + } +} + +static int do_reloc(struct section *sec, Elf_Rel *rel, Elf_Sym *sym, + const char *symname) +{ + unsigned int r_type = ELF_R_TYPE(rel->r_info); + unsigned int bind = ELF_ST_BIND(sym->st_info); + + if ((bind == STB_WEAK) && (sym->st_value == 0)) { + /* Don't relocate weak symbols without a target */ + return 0; + } + + if (regex_skip_reloc(symname)) + return 0; + + switch (r_type) { + case R_SW64_NONE: + case R_SW64_LITERAL: /* relocated by GOT */ + case R_SW64_LITUSE: + case R_SW64_GPDISP: + case R_SW64_BRADDR: + case R_SW64_HINT: + case R_SW64_SREL32: + case R_SW64_GPRELHIGH: + case R_SW64_GPRELLOW: + case R_SW64_LITERAL_GOT: + /* + * NONE can be ignored and PC relative relocations don't + * need to be adjusted. + */ + break; + + case R_SW64_REFQUAD: + add_reloc(&relocs, rel->r_offset, r_type); + break; + + default: + die("Unsupported relocation type: %s (%d)\n", + rel_type(r_type), r_type); + break; + } + + return 0; +} + +static int write_reloc_as_bin(uint32_t v, FILE *f) +{ + unsigned char buf[4]; + + v = cpu_to_elf32(v); + + memcpy(buf, &v, sizeof(uint32_t)); + return fwrite(buf, 1, 4, f); +} + +static int write_reloc_as_text(uint32_t v, FILE *f) +{ + int res; + + res = fprintf(f, "\t.long 0x%08"PRIx32"\n", v); + if (res < 0) + return res; + else + return sizeof(uint32_t); +} + +static void emit_relocs(int as_text, int as_bin, FILE *outf) +{ + int i; + int (*write_reloc)(uint32_t, FILE *) = write_reloc_as_bin; + int size = 0; + int size_reserved; + struct section *sec_reloc; + + sec_reloc = sec_lookup(".data.reloc"); + if (!sec_reloc) + die("Could not find relocation section\n"); + + size_reserved = sec_reloc->shdr.sh_size; + /* Collect up the relocations */ + walk_relocs(do_reloc); + + /* Print the relocations */ + if (as_text) { + /* Print the relocations in a form suitable that + * gas will like. + */ + printf(".section \".data.reloc\",\"a\"\n"); + printf(".balign 8\n"); + /* Output text to stdout */ + write_reloc = write_reloc_as_text; + outf = stdout; + } else if (as_bin) { + /* Output raw binary to stdout */ + outf = stdout; + } else { + /* + * Seek to offset of the relocation section. + * Each relocation is then written into the + * vmlinux kernel image. + */ + if (fseek(outf, sec_reloc->shdr.sh_offset, SEEK_SET) < 0) { + die("Seek to %d failed: %s\n", + sec_reloc->shdr.sh_offset, strerror(errno)); + } + } + + for (i = 0; i < relocs.count; i++) + size += write_reloc(relocs.offset[i], outf); + + /* Print a stop, but only if we've actually written some relocs */ + if (size) + size += write_reloc(0, outf); + + if (size > size_reserved) + /* + * Die, but suggest a value for CONFIG_RELOCATION_TABLE_SIZE + * which will fix this problem and allow a bit of headroom + * if more kernel features are enabled + */ + die("Relocations overflow available space!\n" + "Please adjust CONFIG_RELOCATION_TABLE_SIZE " + "to at least 0x%08x\n", (size + 0x1000) & ~0xFFF); +} + +/* + * As an aid to debugging problems with different linkers + * print summary information about the relocs. + * Since different linkers tend to emit the sections in + * different orders we use the section names in the output. + */ +static int do_reloc_info(struct section *sec, Elf_Rel *rel, ElfW(Sym) * sym, + const char *symname) +{ + printf("%16s 0x%x %16s %40s %16s\n", + sec_name(sec->shdr.sh_info), + (unsigned int)rel->r_offset, + rel_type(ELF_R_TYPE(rel->r_info)), + symname, + sec_name(sym->st_shndx)); + return 0; +} + +static void print_reloc_info(void) +{ + printf("%16s %10s %16s %40s %16s\n", + "reloc section", + "offset", + "reloc type", + "symbol", + "symbol section"); + walk_relocs(do_reloc_info); +} + +void process(FILE *fp, int as_text, int as_bin, + int show_reloc_info, int keep_relocs) +{ + regex_init(); + read_ehdr(fp); + read_shdrs(fp); + read_strtabs(fp); + read_symtabs(fp); + read_relocs(fp); + if (show_reloc_info) { + print_reloc_info(); + return; + } + emit_relocs(as_text, as_bin, fp); + if (!keep_relocs) + remove_relocs(fp); +} diff --git a/arch/sw_64/tools/relocs.h b/arch/sw_64/tools/relocs.h new file mode 100644 index 000000000000..17c7e31113a0 --- /dev/null +++ b/arch/sw_64/tools/relocs.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SW64_TOOLS_RELOCS_H +#define _SW64_TOOLS_RELOCS_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#define USE_BSD +#include +#include + +#define EM_SW64 0x9916 +/* + * SW64 ELF relocation types + */ +#define R_SW64_NONE 0 /* No reloc */ +#define R_SW64_REFLONG 1 /* Direct 32 bit */ +#define R_SW64_REFQUAD 2 /* Direct 64 bit */ +#define R_SW64_GPREL32 3 /* GP relative 32 bit */ +#define R_SW64_LITERAL 4 /* GP relative 16 bit w/optimization */ +#define R_SW64_LITUSE 5 /* Optimization hint for LITERAL */ +#define R_SW64_GPDISP 6 /* Add displacement to GP */ +#define R_SW64_BRADDR 7 /* PC+4 relative 23 bit shifted */ +#define R_SW64_HINT 8 /* PC+4 relative 16 bit shifted */ +#define R_SW64_SREL16 9 /* PC relative 16 bit */ +#define R_SW64_SREL32 10 /* PC relative 32 bit */ +#define R_SW64_SREL64 11 /* PC relative 64 bit */ +#define R_SW64_GPRELHIGH 17 /* GP relative 32 bit, high 16 bits */ +#define R_SW64_GPRELLOW 18 /* GP relative 32 bit, low 16 bits */ +#define R_SW64_GPREL16 19 /* GP relative 16 bit */ +#define R_SW64_COPY 24 /* Copy symbol at runtime */ +#define R_SW64_GLOB_DAT 25 /* Create GOT entry */ +#define R_SW64_JMP_SLOT 26 /* Create PLT entry */ +#define R_SW64_RELATIVE 27 /* Adjust by program base */ +#define R_SW64_BRSGP 28 +#define R_SW64_TLSGD 29 +#define R_SW64_TLS_LDM 30 +#define R_SW64_DTPMOD64 31 +#define R_SW64_GOTDTPREL 32 +#define R_SW64_DTPREL64 33 +#define R_SW64_DTPRELHI 34 +#define R_SW64_DTPRELLO 35 +#define R_SW64_DTPREL16 36 +#define R_SW64_GOTTPREL 37 +#define R_SW64_TPREL64 38 +#define R_SW64_TPRELHI 39 +#define R_SW64_TPRELLO 40 +#define R_SW64_TPREL16 41 +#define R_SW64_LITERAL_GOT 43 /* GP relative */ + +void die(char *fmt, ...); + +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +enum symtype { + S_ABS, + S_REL, + S_SEG, + S_LIN, + S_NSYMTYPES +}; + +void process(FILE *fp, int as_text, int as_bin, + int show_reloc_info, int keep_relocs); +#endif /* _SW64_TOOLS_RELOCS_H */ diff --git a/arch/sw_64/tools/relocs_main.c b/arch/sw_64/tools/relocs_main.c new file mode 100644 index 000000000000..30a830a070db --- /dev/null +++ b/arch/sw_64/tools/relocs_main.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "relocs.h" + +void die(char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + vfprintf(stderr, fmt, ap); + va_end(ap); + exit(1); +} + +static void usage(void) +{ + die("relocs [--reloc-info|--text|--bin|--keep] vmlinux\n"); +} + +int main(int argc, char **argv) +{ + int show_reloc_info, as_text, as_bin, keep_relocs; + const char *fname; + FILE *fp; + int i; + unsigned char e_ident[EI_NIDENT]; + + show_reloc_info = 0; + as_text = 0; + as_bin = 0; + keep_relocs = 0; + fname = NULL; + for (i = 1; i < argc; i++) { + char *arg = argv[i]; + + if (*arg == '-') { + if (strcmp(arg, "--reloc-info") == 0) { + show_reloc_info = 1; + continue; + } + if (strcmp(arg, "--text") == 0) { + as_text = 1; + continue; + } + if (strcmp(arg, "--bin") == 0) { + as_bin = 1; + continue; + } + if (strcmp(arg, "--keep") == 0) { + keep_relocs = 1; + continue; + } + } else if (!fname) { + fname = arg; + continue; + } + usage(); + } + if (!fname) + usage(); + + fp = fopen(fname, "r+"); + if (!fp) + die("Cannot open %s: %s\n", fname, strerror(errno)); + + if (fread(&e_ident, 1, EI_NIDENT, fp) != EI_NIDENT) + die("Cannot read %s: %s", fname, strerror(errno)); + + rewind(fp); + if (e_ident[EI_CLASS] == ELFCLASS64) + process(fp, as_text, as_bin, show_reloc_info, keep_relocs); + else + die("Unsupport ELF class on SW64: %s", fname); + //process_32(fp, as_text, as_bin, show_reloc_info, keep_relocs); + fclose(fp); + return 0; +} -- Gitee From 3c8e6930fa12f8099b1dbbaa51d158f2ae6365b5 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:18 +0800 Subject: [PATCH 0316/2138] anolis: sw64: add kprobe support ANBZ: #4688 Add kprobe support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/kprobes.h | 76 +++++ arch/sw_64/kernel/insn.c | 110 ++++++++ arch/sw_64/kernel/kprobes/Makefile | 3 + arch/sw_64/kernel/kprobes/common.h | 9 + arch/sw_64/kernel/kprobes/decode-insn.c | 101 +++++++ arch/sw_64/kernel/kprobes/kprobes-ftrace.c | 48 ++++ arch/sw_64/kernel/kprobes/kprobes.c | 309 +++++++++++++++++++++ 7 files changed, 656 insertions(+) create mode 100644 arch/sw_64/include/asm/kprobes.h create mode 100644 arch/sw_64/kernel/insn.c create mode 100644 arch/sw_64/kernel/kprobes/Makefile create mode 100644 arch/sw_64/kernel/kprobes/common.h create mode 100644 arch/sw_64/kernel/kprobes/decode-insn.c create mode 100644 arch/sw_64/kernel/kprobes/kprobes-ftrace.c create mode 100644 arch/sw_64/kernel/kprobes/kprobes.c diff --git a/arch/sw_64/include/asm/kprobes.h b/arch/sw_64/include/asm/kprobes.h new file mode 100644 index 000000000000..0c7be8109ed2 --- /dev/null +++ b/arch/sw_64/include/asm/kprobes.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Kernel Probes (KProbes) + * Based on arch/mips/include/asm/kprobes.h + */ + +#ifndef _ASM_SW64_KPROBES_H +#define _ASM_SW64_KPROBES_H + +#include + +#define BREAK_KPROBE 0x40ffffff +#define BREAK_KPROBE_SS 0x40fffeff + +#ifdef CONFIG_KPROBES +#include +#include + +#include +#include + +#define __ARCH_WANT_KPROBES_INSN_SLOT + +struct kprobe; +struct pt_regs; + +typedef u32 kprobe_opcode_t; + +#define MAX_INSN_SIZE 2 + +#define flush_insn_slot(p) \ +do { \ + if (p->addr) \ + flush_icache_range((unsigned long)p->addr, \ + (unsigned long)p->addr + \ + (MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \ +} while (0) + + +#define kretprobe_blacklist_size 0 + +void arch_remove_kprobe(struct kprobe *p); + +/* Architecture specific copy of original instruction*/ +struct arch_specific_insn { + /* copy of the original instruction */ + kprobe_opcode_t *insn; + /* + * Set in kprobes code, initially to 0. If the instruction can be + * eumulated, this is set to 1, if not, to -1. + */ + int boostable; +}; + +struct prev_kprobe { + struct kprobe *kp; + unsigned long status; +}; + +#define SKIP_DELAYSLOT 0x0001 + +/* per-cpu kprobe control block */ +struct kprobe_ctlblk { + unsigned long kprobe_status; + /* Per-thread fields, used while emulating branches */ + unsigned long flags; + unsigned long target_pc; + struct prev_kprobe prev_kprobe; +}; +extern int kprobe_handler(struct pt_regs *regs); +extern int post_kprobe_handler(struct pt_regs *regs); +extern int kprobe_fault_handler(struct pt_regs *regs, unsigned long mmcsr); + + +#endif /* CONFIG_KPROBES */ +#endif /* _ASM_SW64_KPROBES_H */ diff --git a/arch/sw_64/kernel/insn.c b/arch/sw_64/kernel/insn.c new file mode 100644 index 000000000000..281578e1bfc0 --- /dev/null +++ b/arch/sw_64/kernel/insn.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019, serveros, linyue + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include + +//static DEFINE_RAW_SPINLOCK(patch_lock); + +int __kprobes sw64_insn_read(void *addr, u32 *insnp) +{ + int ret; + __le32 val; + + ret = copy_from_kernel_nofault(&val, addr, SW64_INSN_SIZE); + if (!ret) + *insnp = le32_to_cpu(val); + + return ret; +} + +static int __kprobes __sw64_insn_write(void *addr, __le32 insn) +{ + void *waddr = addr; + int ret; + + //raw_spin_lock_irqsave(&patch_lock, flags); + + ret = copy_to_kernel_nofault(waddr, &insn, SW64_INSN_SIZE); + + //raw_spin_unlock_irqrestore(&patch_lock, flags); + + return ret; +} + +static int __kprobes __sw64_insn_double_write(void *addr, __le64 insn) +{ + void *waddr = addr; + //unsigned long flags = 0; + int ret; + + //raw_spin_lock_irqsave(&patch_lock, flags); + + ret = copy_to_kernel_nofault(waddr, &insn, 2 * SW64_INSN_SIZE); + + //raw_spin_unlock_irqrestore(&patch_lock, flags); + + return ret; +} + +int __kprobes sw64_insn_write(void *addr, u32 insn) +{ + u32 *tp = addr; + /* SW64 instructions must be word aligned */ + if ((uintptr_t)tp & 0x3) + return -EINVAL; + return __sw64_insn_write(addr, cpu_to_le32(insn)); +} + +int __kprobes sw64_insn_double_write(void *addr, u64 insn) +{ + u32 *tp = addr; + /* SW64 instructions must be word aligned */ + if ((uintptr_t)tp & 0x3) + return -EINVAL; + return __sw64_insn_double_write(addr, cpu_to_le64(insn)); +} +unsigned int __kprobes sw64_insn_nop(void) +{ + return SW64_BIS(R31, R31, R31); +} + +unsigned int __kprobes sw64_insn_call(unsigned int ra, unsigned int rb) +{ + return SW64_CALL(ra, rb, 0); +} + +unsigned int __kprobes sw64_insn_sys_call(unsigned int num) +{ + return SW64_SYS_CALL(num); +} + +/* 'pc' is the address of br instruction, not the +4 PC. 'new_pc' is the target address. */ +unsigned int __kprobes sw64_insn_br(unsigned int ra, unsigned long pc, unsigned long new_pc) +{ + int offset = new_pc - pc; + unsigned int disp, minus = 0x1fffff; + + if (!(offset <= BR_MAX_DISP && offset >= -BR_MAX_DISP)) + return -1; + if (offset > 0) + disp = (offset - 4) / 4; + else + disp = ~(-offset / 4) & minus; + + return SW64_BR(ra, disp); + +} diff --git a/arch/sw_64/kernel/kprobes/Makefile b/arch/sw_64/kernel/kprobes/Makefile new file mode 100644 index 000000000000..110ba2bf7752 --- /dev/null +++ b/arch/sw_64/kernel/kprobes/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o +obj-$(CONFIG_KPROBES_ON_FTRACE) += kprobes-ftrace.o diff --git a/arch/sw_64/kernel/kprobes/common.h b/arch/sw_64/kernel/kprobes/common.h new file mode 100644 index 000000000000..de10058f0376 --- /dev/null +++ b/arch/sw_64/kernel/kprobes/common.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SW64_KERNEL_KPROBES_COMMON_H +#define _SW64_KERNEL_KPROBES_COMMON_H + + +extern bool sw64_insn_can_kprobe(kprobe_opcode_t *addr); + + +#endif /* _SW64_KERNEL_KPROBES_COMMON_H */ diff --git a/arch/sw_64/kernel/kprobes/decode-insn.c b/arch/sw_64/kernel/kprobes/decode-insn.c new file mode 100644 index 000000000000..91c31111f2b7 --- /dev/null +++ b/arch/sw_64/kernel/kprobes/decode-insn.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Based on arch/arm64/kernel/probes/decode-insn.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include + +#include "common.h" + +static bool __kprobes sw64_insn_is_steppable(u32 insn) +{ + /* + * Branch instructions will write a new value into the PC which is + * likely to be relative to the XOL address and therefore invalid. + * Deliberate generation of an exception during stepping is also not + * currently safe. Lastly, MSR instructions can do any number of nasty + * things we can't handle during single-stepping. + */ + if (sw64_insn_is_sys_call_b(insn) || + sw64_insn_is_sys_call(insn) || + sw64_insn_is_call(insn) || + sw64_insn_is_ret(insn) || + sw64_insn_is_jmp(insn) || + sw64_insn_is_br(insn) || + sw64_insn_is_bsr(insn) || + sw64_insn_is_memb(insn) || + sw64_insn_is_imemb(insn) || + sw64_insn_is_rtc(insn) || + sw64_insn_is_lldl(insn) || + sw64_insn_is_lldw(insn) || + sw64_insn_is_beq(insn) || + sw64_insn_is_bne(insn) || + sw64_insn_is_blt(insn) || + sw64_insn_is_ble(insn) || + sw64_insn_is_bgt(insn) || + sw64_insn_is_bge(insn) || + sw64_insn_is_blbc(insn) || + sw64_insn_is_blbs(insn) || + sw64_insn_is_fbeq(insn) || + sw64_insn_is_fbne(insn) || + sw64_insn_is_fblt(insn) || + sw64_insn_is_fble(insn) || + sw64_insn_is_fbgt(insn) || + sw64_insn_is_fbge(insn)) + return false; + + return true; +} + + +#ifdef CONFIG_KPROBES +// lldl rd_f +static bool __kprobes is_probed_between_atomic(kprobe_opcode_t *addr) +{ + int count = 0; + unsigned long size = 0, offset = 0; + kprobe_opcode_t *scan_start = NULL; + + if (kallsyms_lookup_size_offset((unsigned long)addr, &size, &offset)) + scan_start = addr - (offset / sizeof(kprobe_opcode_t)); + + while (scan_start < addr) { + if (sw64_insn_is_lldl(le32_to_cpu(*scan_start)) || + sw64_insn_is_lldw(le32_to_cpu(*scan_start))) + count++; + if (sw64_insn_is_rd_f(le32_to_cpu(*scan_start))) + count--; + scan_start++; + } + if (count) + return false; + + return true; +} + +bool __kprobes sw64_insn_can_kprobe(kprobe_opcode_t *addr) +{ + u32 insn = le32_to_cpu(*addr); + + if (!sw64_insn_is_steppable(insn)) { + pr_warn("addr is not steppable\n"); + return false; + } +#ifdef CONFIG_SUBARCH_C3B + if (!is_probed_between_atomic(addr)) { + pr_warn("addr between atomic can't probe\n"); + return false; + } +#endif + return true; +} +#endif diff --git a/arch/sw_64/kernel/kprobes/kprobes-ftrace.c b/arch/sw_64/kernel/kprobes/kprobes-ftrace.c new file mode 100644 index 000000000000..89d7dba9dc25 --- /dev/null +++ b/arch/sw_64/kernel/kprobes/kprobes-ftrace.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Dynamic Ftrace based Kprobes Optimization + */ + +#include +#include +#include +#include +#include + +/* Ftrace callback handler for kprobes */ +void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *ops, struct pt_regs *regs) +{ + struct kprobe *p; + struct kprobe_ctlblk *kcb; + + p = get_kprobe((kprobe_opcode_t *)ip); + if (unlikely(!p) || kprobe_disabled(p)) + return; + + kcb = get_kprobe_ctlblk(); + if (kprobe_running()) { + kprobes_inc_nmissed_count(p); + } else { + regs->regs[28] -= MCOUNT_INSN_SIZE; + + __this_cpu_write(current_kprobe, p); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + if (!p->pre_handler || !p->pre_handler(p, regs)) { + regs->regs[28] += MCOUNT_INSN_SIZE; + if (unlikely(p->post_handler)) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + p->post_handler(p, regs, 0); + } + } + __this_cpu_write(current_kprobe, NULL); + } +} +NOKPROBE_SYMBOL(kprobe_ftrace_handler); + +int arch_prepare_kprobe_ftrace(struct kprobe *p) +{ + p->ainsn.insn = NULL; + p->ainsn.boostable = -1; + return 0; +} diff --git a/arch/sw_64/kernel/kprobes/kprobes.c b/arch/sw_64/kernel/kprobes/kprobes.c new file mode 100644 index 000000000000..024ce7d99e61 --- /dev/null +++ b/arch/sw_64/kernel/kprobes/kprobes.c @@ -0,0 +1,309 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Kernel Probes (KProbes) + * arch/sw_64/kernel/kprobes.c + */ + +#include +#include +#include + +#include "common.h" + +static u32 breakpoint_insn = BREAK_KPROBE; +static u32 breakpoint2_insn = BREAK_KPROBE_SS; + +int post_kprobe_handler(struct pt_regs *regs); + +DEFINE_PER_CPU(struct kprobe *, current_kprobe); +DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); + +int __kprobes arch_prepare_kprobe(struct kprobe *p) +{ + int ret = 0; + extern char __start_rodata[]; + extern char __end_rodata[]; + unsigned long probe_addr = (unsigned long)p->addr; + + if (probe_addr & 0x3) + return -EINVAL; + + if (!sw64_insn_can_kprobe(p->addr)) + return -EINVAL; + /* copy instruction */ + p->opcode = le32_to_cpu(*p->addr); + + + if (probe_addr >= (unsigned long) __start_rodata && + probe_addr <= (unsigned long) __end_rodata) + return -EINVAL; + + + /* insn: must be on special executable page on mips. */ + p->ainsn.insn = get_insn_slot(); + if (!p->ainsn.insn) { + ret = -ENOMEM; + goto out; + } + /* + * In the kprobe->ainsn.insn[] array we store the original + * instruction at index zero and a break trap instruction at + * index one. + */ + p->ainsn.insn[0] = p->opcode; + p->ainsn.insn[1] = breakpoint2_insn; +out: + return ret; +} + +void __kprobes arch_arm_kprobe(struct kprobe *p) +{ + sw64_insn_write(p->addr, breakpoint_insn); + flush_insn_slot(p); +} + +void __kprobes arch_disarm_kprobe(struct kprobe *p) +{ + sw64_insn_write(p->addr, p->opcode); + flush_insn_slot(p); +} + +void __kprobes arch_remove_kprobe(struct kprobe *p) +{ + if (p->ainsn.insn) { + free_insn_slot(p->ainsn.insn, 0); + p->ainsn.insn = NULL; + } +} + +static void save_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + kcb->prev_kprobe.kp = kprobe_running(); + kcb->prev_kprobe.status = kcb->kprobe_status; +} + +static void restore_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); + kcb->kprobe_status = kcb->prev_kprobe.status; +} + +static void __kprobes set_current_kprobe(struct kprobe *p) +{ + __this_cpu_write(current_kprobe, p); +} + + +static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb, int reenter) +{ + if (reenter) { + save_previous_kprobe(kcb); + set_current_kprobe(p); + kcb->kprobe_status = KPROBE_REENTER; + } else { + kcb->kprobe_status = KPROBE_HIT_SS; + } + + /* insn simulation */ + kcb->target_pc = regs->pc; + regs->pc = (unsigned long)&p->ainsn.insn[0]; +} + +static int __kprobes reenter_kprobe(struct kprobe *p, + struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + switch (kcb->kprobe_status) { + case KPROBE_HIT_SSDONE: + case KPROBE_HIT_ACTIVE: + kprobes_inc_nmissed_count(p); + setup_singlestep(p, regs, kcb, 1); + break; + case KPROBE_HIT_SS: + case KPROBE_REENTER: + pr_warn("Unrecoverable kprobe detected.\n"); + dump_kprobe(p); + BUG(); + break; + default: + WARN_ON(1); + return 0; + } + return 1; +} + +int __kprobes kprobe_handler(struct pt_regs *regs) +{ + struct kprobe *p; + struct kprobe_ctlblk *kcb; + unsigned long addr = instruction_pointer(regs); + + if (user_mode(regs)) + return 0; + /* + * We don't want to be preempted for the entire + * duration of kprobe processing + */ + preempt_disable(); + kcb = get_kprobe_ctlblk(); + p = get_kprobe((kprobe_opcode_t *)(addr - 4)); + + if (p) { + if (kprobe_running()) { + if (reenter_kprobe(p, regs, kcb)) + return 1; + } else { + set_current_kprobe(p); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + + /* + * If we have no pre-handler or it returned 0, we + * continue with normal processing. If we have a + * pre-handler and it returned non-zero, that means + * user handler setup registers to exit to another + * instruction, we must skip the single stepping. + */ + if (!p->pre_handler || !p->pre_handler(p, regs)) + setup_singlestep(p, regs, kcb, 0); + else + reset_current_kprobe(); + return 1; + } + } + return 0; + +} +int __kprobes post_kprobe_handler(struct pt_regs *regs) +{ + struct kprobe *cur = kprobe_running(); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + if (!cur) + return 0; + + if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + cur->post_handler(cur, regs, 0); + } + + // resume_execution(cur, regs, kcb); + regs->pc = kcb->target_pc; + + + /* Restore back the original saved kprobes variables and continue. */ + if (kcb->kprobe_status == KPROBE_REENTER) { + restore_previous_kprobe(kcb); + goto out; + } + reset_current_kprobe(); +out: + preempt_enable_no_resched(); + + return 1; +} + +int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned long mmcsr) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + if (kcb->kprobe_status & KPROBE_HIT_SS) { + regs->pc = kcb->target_pc; + + reset_current_kprobe(); + preempt_enable_no_resched(); + } + return 0; +} + +/* + * Wrapper routine for handling exceptions. + */ +int __kprobes kprobe_exceptions_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + + struct die_args *args = (struct die_args *)data; + int ret = NOTIFY_DONE; + + switch (val) { + case DIE_BREAK: + if (kprobe_handler(args->regs)) + ret = NOTIFY_STOP; + break; + case DIE_SSTEPBP: + if (post_kprobe_handler(args->regs)) + ret = NOTIFY_STOP; + break; + default: + break; + } + return ret; +} +/* + * Function return probe trampoline: + * - init_kprobes() establishes a probepoint here + * - When the probed function returns, this probe causes the + * handlers to fire + */ +static void __used kretprobe_trampoline_holder(void) +{ + asm volatile( + /* Keep the assembler from reordering and placing JR here. */ + ".set noreorder\n\t" + "nop\n\t" + ".global __kretprobe_trampoline\n" + "__kretprobe_trampoline:\n\t" + "nop\n\t" + : : : "memory"); +} + +void __kretprobe_trampoline(void); + +void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, + struct pt_regs *regs) +{ + ri->ret_addr = (kprobe_opcode_t *) regs->regs[26]; + ri->fp = NULL; + + /* Replace the return addr with trampoline addr */ + regs->regs[26] = (unsigned long)__kretprobe_trampoline; +} + +/* + * Called when the probe at kretprobe trampoline is hit + */ +static int __kprobes trampoline_probe_handler(struct kprobe *p, + struct pt_regs *regs) +{ + unsigned long orig_ret_address; + + orig_ret_address = __kretprobe_trampoline_handler(regs, NULL); + instruction_pointer(regs) = orig_ret_address; + regs->regs[26] = orig_ret_address; + + /* + * By returning a non-zero value, we are telling + * kprobe_handler() that we don't want the post_handler + * to run (and have re-enabled preemption) + */ + return 1; +} + +int __kprobes arch_trampoline_kprobe(struct kprobe *p) +{ + if (p->addr == (kprobe_opcode_t *)__kretprobe_trampoline) + return 1; + + return 0; +} + +static struct kprobe trampoline_p = { + .addr = (kprobe_opcode_t *)__kretprobe_trampoline, + .pre_handler = trampoline_probe_handler +}; + +int __init arch_init_kprobes(void) +{ + return register_kprobe(&trampoline_p); +} -- Gitee From ad02163b28b47e6c8a813a9a9b7a67758360d13b Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:38 +0800 Subject: [PATCH 0317/2138] anolis: sw64: add uprobe support ANBZ: #4688 Add uprobe support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/uprobes.h | 45 ++++++++ arch/sw_64/kernel/uprobes.c | 182 +++++++++++++++++++++++++++++++ 2 files changed, 227 insertions(+) create mode 100644 arch/sw_64/include/asm/uprobes.h create mode 100644 arch/sw_64/kernel/uprobes.c diff --git a/arch/sw_64/include/asm/uprobes.h b/arch/sw_64/include/asm/uprobes.h new file mode 100644 index 000000000000..fcd2026c3622 --- /dev/null +++ b/arch/sw_64/include/asm/uprobes.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#ifndef _ASM_SW64_UPROBES_H +#define _ASM_SW64_UPROBES_H + +#include +#include +#include + +/* + * We want this to be defined as union sw64_instruction but that makes the + * generic code blow up. + */ +typedef u32 uprobe_opcode_t; + +#define MAX_UINSN_BYTES SW64_INSN_SIZE +#define UPROBE_XOL_SLOT_BYTES SW64_INSN_SIZE + +#define UPROBE_BRK_UPROBE 0x000d000d /* break 13 */ +#define UPROBE_BRK_UPROBE_XOL 0x000e000d /* break 14 */ + +#define UPROBE_SWBP_INSN UPROBE_BRK_UPROBE +#define UPROBE_SWBP_INSN_SIZE MAX_UINSN_BYTES + +struct arch_uprobe { + u32 insn; + u32 ixol[2]; +}; + +struct arch_uprobe_task { + unsigned long saved_trap_nr; +}; + +#ifdef CONFIG_UPROBES +void sw64_fix_uretprobe(struct pt_regs *regs, unsigned long exc_pc); +#else +static inline void +sw64_fix_uretprobe(struct pt_regs *regs, unsigned long exc_pc) {} +#endif + +#endif /* _ASM_SW64_UPROBES_H */ diff --git a/arch/sw_64/kernel/uprobes.c b/arch/sw_64/kernel/uprobes.c new file mode 100644 index 000000000000..928312d62cfd --- /dev/null +++ b/arch/sw_64/kernel/uprobes.c @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include + +/** + * arch_uprobe_analyze_insn - instruction analysis including validity and fixups. + * @mm: the probed address space. + * @arch_uprobe: the probepoint information. + * @addr: virtual address at which to install the probepoint + * Return 0 on success or a -ve number on error. + */ +int arch_uprobe_analyze_insn(struct arch_uprobe *aup, + struct mm_struct *mm, unsigned long addr) +{ + u32 inst; + + if (addr & 0x03) + return -EINVAL; + + inst = aup->insn; + + aup->ixol[0] = aup->insn; + aup->ixol[1] = UPROBE_BRK_UPROBE_XOL; /* NOP */ + + return 0; +} + +void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, + void *src, unsigned long len) +{ + unsigned long kaddr, kstart; + + /* Initialize the slot */ + kaddr = (unsigned long)kmap_local_page(page); + kstart = kaddr + (vaddr & ~PAGE_MASK); + memcpy((void *)kstart, src, len); + flush_icache_range(kstart, kstart + len); + kunmap_local((void *)kaddr); +} + +/* + * arch_uprobe_pre_xol - prepare to execute out of line. + * @auprobe: the probepoint information. + * @regs: reflects the saved user state of current task. + */ +int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + /* Instruction points to execute ol */ + instruction_pointer_set(regs, utask->xol_vaddr); + + return 0; +} + +int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + /* Instruction points to execute next to breakpoint address */ + instruction_pointer_set(regs, utask->vaddr + 4); + + return 0; +} + +/* + * If xol insn itself traps and generates a signal(Say, + * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped + * instruction jumps back to its own address. It is assumed that anything + * like do_page_fault/do_trap/etc sets thread.trap_nr != -1. + * + * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr, + * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to + * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol(). + */ +bool arch_uprobe_xol_was_trapped(struct task_struct *tsk) +{ + return false; +} + +int arch_uprobe_exception_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + struct die_args *args = data; + struct pt_regs *regs = args->regs; + + /* regs == NULL is a kernel bug */ + if (WARN_ON(!regs)) + return NOTIFY_DONE; + + /* We are only interested in userspace traps */ + if (!user_mode(regs)) + return NOTIFY_DONE; + + switch (val) { + case DIE_UPROBE: + if (uprobe_pre_sstep_notifier(regs)) + return NOTIFY_STOP; + break; + case DIE_UPROBE_XOL: + if (uprobe_post_sstep_notifier(regs)) + return NOTIFY_STOP; + default: + break; + } + + return 0; +} + +/* + * This function gets called when XOL instruction either gets trapped or + * the thread has a fatal signal. Reset the instruction pointer to its + * probed address for the potential restart or for post mortem analysis. + */ +void arch_uprobe_abort_xol(struct arch_uprobe *aup, + struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + instruction_pointer_set(regs, utask->vaddr); +} + +unsigned long arch_uretprobe_hijack_return_addr( + unsigned long trampoline_vaddr, struct pt_regs *regs) +{ + unsigned long ra; + + ra = regs->regs[26]; + + /* Replace the return address with the trampoline address */ + regs->regs[26] = trampoline_vaddr; + + return ra; +} + +/* + * See if the instruction can be emulated. + * Returns true if instruction was emulated, false otherwise. + * + * For now we always emulate so this function just returns 0. + */ +bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + return 0; +} + +/* + * struct xol_area and get_trampoline_vaddr() are copied from + * kernel/events/uprobes.c to avoid modifying arch-independent + * code. + */ +struct xol_area { + wait_queue_head_t wq; + atomic_t slot_count; + unsigned long *bitmap; + struct vm_special_mapping xol_mapping; + struct page *pages[2]; + unsigned long vaddr; +}; + +static unsigned long get_trampoline_vaddr(void) +{ + struct xol_area *area; + unsigned long trampoline_vaddr = -1; + + area = READ_ONCE(current->mm->uprobes_state.xol_area); + if (area) + trampoline_vaddr = area->vaddr; + + return trampoline_vaddr; +} + +void sw64_fix_uretprobe(struct pt_regs *regs, unsigned long exc_pc) +{ + /* + * regs->pc has been changed to orig_ret_vaddr in handle_trampoline(). + */ + if (exc_pc == get_trampoline_vaddr()) + regs->regs[26] = regs->pc; +} -- Gitee From 2bb7a637b46ed2d1b759724d8b10c8f80e0eb183 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:16 +0800 Subject: [PATCH 0318/2138] anolis: sw64: add jump_label support ANBZ: #4688 Add jump_label support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/jump_label.h | 50 +++++++++++++++++++++++++++++ arch/sw_64/kernel/jump_label.c | 32 ++++++++++++++++++ 2 files changed, 82 insertions(+) create mode 100644 arch/sw_64/include/asm/jump_label.h create mode 100644 arch/sw_64/kernel/jump_label.c diff --git a/arch/sw_64/include/asm/jump_label.h b/arch/sw_64/include/asm/jump_label.h new file mode 100644 index 000000000000..32fbf7573b20 --- /dev/null +++ b/arch/sw_64/include/asm/jump_label.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_SW64_JUMP_LABEL_H +#define _ASM_SW64_JUMP_LABEL_H + +#ifndef __ASSEMBLY__ + +#include +#include + +#define JUMP_LABEL_NOP_SIZE SW64_INSN_SIZE + +static __always_inline bool arch_static_branch(struct static_key *key, bool branch) +{ + asm_volatile_goto("1: nop\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".align 3\n\t" + ".quad 1b, %l[l_yes], %0\n\t" + ".popsection\n\t" + : : "i"(&((char *)key)[branch]) : : l_yes); + + return false; +l_yes: + return true; +} + +static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) +{ + asm_volatile_goto("1: br %l[l_yes]\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".align 3\n\t" + ".quad 1b, %l[l_yes], %0\n\t" + ".popsection\n\t" + : : "i"(&((char *)key)[branch]) : : l_yes); + + return false; +l_yes: + return true; +} + +typedef u64 jump_label_t; + +struct jump_entry { + jump_label_t code; + jump_label_t target; + jump_label_t key; +}; + +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_SW64_JUMP_LABEL_H */ diff --git a/arch/sw_64/kernel/jump_label.c b/arch/sw_64/kernel/jump_label.c new file mode 100644 index 000000000000..f3bc40370e4d --- /dev/null +++ b/arch/sw_64/kernel/jump_label.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include + +#include +#include + +void arch_jump_label_transform(struct jump_entry *entry, + enum jump_label_type type) +{ + u32 *insnp = (u32 *)entry->code; + u32 insn; + + if (type == JUMP_LABEL_JMP) { + insn = sw64_insn_br(R31, (entry->code), entry->target); + BUG_ON(insn == -1); + } else { + insn = sw64_insn_nop(); + } + + *insnp = insn; + + flush_icache_range(entry->code, entry->code + SW64_INSN_SIZE); +} + +void arch_jump_label_transform_static(struct jump_entry *entry, + enum jump_label_type type) +{ + /* + * no need to rewrite NOP + */ +} -- Gitee From 7e2b123140e8f47a07962a7025b877cc511813f5 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:17 +0800 Subject: [PATCH 0319/2138] anolis: sw64: add kgdb support ANBZ: #4688 Add kgdb support for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/kgdb.h | 68 ++++++++++ arch/sw_64/kernel/kgdb.c | 233 ++++++++++++++++++++++++++++++++++ 2 files changed, 301 insertions(+) create mode 100644 arch/sw_64/include/asm/kgdb.h create mode 100644 arch/sw_64/kernel/kgdb.c diff --git a/arch/sw_64/include/asm/kgdb.h b/arch/sw_64/include/asm/kgdb.h new file mode 100644 index 000000000000..a00a45ce767c --- /dev/null +++ b/arch/sw_64/include/asm/kgdb.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * sw64 KGDB support + * + * Based on arch/arm64/include/kgdb.h + * + * Copyright (C) Xia Bin + * Author: Xia Bin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef _ASM_SW64_KGDB_H +#define _ASM_SW64_KGDB_H + +#include +#include + +#ifndef __ASSEMBLY__ + + +#define GDB_ADJUSTS_BREAK_OFFSET +#define BREAK_INSTR_SIZE 4 +#define CACHE_FLUSH_IS_SAFE 0 + +static inline void arch_kgdb_breakpoint(void) +{ + __asm__ __volatile__("sys_call %0" : : "i"(HMC_bpt)); +} + +void sw64_task_to_gdb_regs(struct task_struct *task, unsigned long *regs); + +extern void kgdb_handle_bus_error(void); +extern int kgdb_fault_expected; +extern unsigned long get_reg(struct task_struct *task, unsigned long regno); + +#endif /* !__ASSEMBLY__ */ + +/* + * general purpose registers size in bytes. + */ +#define DBG_MAX_REG_NUM (67) + +/* + * Size of I/O buffer for gdb packet. + * considering to hold all register contents, size is set + */ + +#define BUFMAX 4096 + +/* + * Number of bytes required for gdb_regs buffer. + * _GP_REGS: 8 bytes, _FP_REGS: 16 bytes and _EXTRA_REGS: 4 bytes each + * GDB fails to connect for size beyond this with error + * "'g' packet reply is too long" + */ +#define NUMREGBYTES (DBG_MAX_REG_NUM * 8) + +#endif /* _ASM_SW64_KGDB_H */ diff --git a/arch/sw_64/kernel/kgdb.c b/arch/sw_64/kernel/kgdb.c new file mode 100644 index 000000000000..833f72a1577c --- /dev/null +++ b/arch/sw_64/kernel/kgdb.c @@ -0,0 +1,233 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sw64 KGDB support + * + * Based on arch/arm64/kernel/kgdb.c + * + * Copyright (C) Xia Bin + * Author: Xia Bin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include + +struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { + { "r0", 8, offsetof(struct pt_regs, regs[0])}, + { "r1", 8, offsetof(struct pt_regs, regs[1])}, + { "r2", 8, offsetof(struct pt_regs, regs[2])}, + { "r3", 8, offsetof(struct pt_regs, regs[3])}, + { "r4", 8, offsetof(struct pt_regs, regs[4])}, + { "r5", 8, offsetof(struct pt_regs, regs[5])}, + { "r6", 8, offsetof(struct pt_regs, regs[6])}, + { "r7", 8, offsetof(struct pt_regs, regs[7])}, + { "r8", 8, offsetof(struct pt_regs, regs[8])}, + + { "r9", 8, offsetof(struct pt_regs, regs[9])}, + { "r10", 8, offsetof(struct pt_regs, regs[10])}, + { "r11", 8, offsetof(struct pt_regs, regs[11])}, + { "r12", 8, offsetof(struct pt_regs, regs[12])}, + { "r13", 8, offsetof(struct pt_regs, regs[13])}, + { "r14", 8, offsetof(struct pt_regs, regs[14])}, + { "r15", 8, offsetof(struct pt_regs, regs[15])}, + + { "r16", 8, offsetof(struct pt_regs, regs[16])}, + { "r17", 8, offsetof(struct pt_regs, regs[17])}, + { "r18", 8, offsetof(struct pt_regs, regs[18])}, + + { "r19", 8, offsetof(struct pt_regs, regs[19])}, + { "r20", 8, offsetof(struct pt_regs, regs[20])}, + { "r21", 8, offsetof(struct pt_regs, regs[21])}, + { "r22", 8, offsetof(struct pt_regs, regs[22])}, + { "r23", 8, offsetof(struct pt_regs, regs[23])}, + { "r24", 8, offsetof(struct pt_regs, regs[24])}, + { "r25", 8, offsetof(struct pt_regs, regs[25])}, + { "r26", 8, offsetof(struct pt_regs, regs[26])}, + { "r27", 8, offsetof(struct pt_regs, regs[27])}, + { "at", 8, offsetof(struct pt_regs, regs[28])}, + { "gp", 8, offsetof(struct pt_regs, regs[29])}, + { "sp", 8, offsetof(struct pt_regs, regs[30])}, + { "zero", 8, -1 }, + + { "f0", 8, -1 }, + { "f1", 8, -1 }, + { "f2", 8, -1 }, + { "f3", 8, -1 }, + { "f4", 8, -1 }, + { "f5", 8, -1 }, + { "f6", 8, -1 }, + { "f7", 8, -1 }, + { "f8", 8, -1 }, + { "f9", 8, -1 }, + { "f10", 8, -1 }, + { "f11", 8, -1 }, + { "f12", 8, -1 }, + { "f13", 8, -1 }, + { "f14", 8, -1 }, + { "f15", 8, -1 }, + { "f16", 8, -1 }, + { "f17", 8, -1 }, + { "f18", 8, -1 }, + { "f19", 8, -1 }, + { "f20", 8, -1 }, + { "f21", 8, -1 }, + { "f22", 8, -1 }, + { "f23", 8, -1 }, + { "f24", 8, -1 }, + { "f25", 8, -1 }, + { "f26", 8, -1 }, + { "f27", 8, -1 }, + { "f28", 8, -1 }, + { "f29", 8, -1 }, + { "f30", 8, -1 }, + { "fpcr", 8, -1 }, + + { "pc", 8, offsetof(struct pt_regs, pc)}, + { "", 8, -1 }, + { "tp", 8, -1}, +}; + +char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) +{ + if (regno >= DBG_MAX_REG_NUM || regno < 0) + return NULL; + + if (dbg_reg_def[regno].offset != -1) + memcpy(mem, (void *)regs + dbg_reg_def[regno].offset, + dbg_reg_def[regno].size); + else + memset(mem, 0, dbg_reg_def[regno].size); + return dbg_reg_def[regno].name; +} + +int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) +{ + if (regno >= DBG_MAX_REG_NUM || regno < 0) + return -EINVAL; + + if (dbg_reg_def[regno].offset != -1) + memcpy((void *)regs + dbg_reg_def[regno].offset, mem, + dbg_reg_def[regno].size); + return 0; +} + +void +sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) +{ + int i; + /* Initialize to zero */ + memset((char *)gdb_regs, 0, NUMREGBYTES); + for (i = 0; i < DBG_MAX_REG_NUM; i++) + gdb_regs[i] = get_reg(task, i); +} + +void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) +{ + pr_info("BEFORE SET PC WITH %lx\n", pc); + instruction_pointer(regs) = pc; + pr_info("AFTER SET PC IS %lx\n", instruction_pointer(regs)); +} + +void kgdb_call_nmi_hook(void *ignored) +{ + kgdb_nmicallback(raw_smp_processor_id(), NULL); +} + +void kgdb_roundup_cpus(void) +{ + local_irq_enable(); + smp_call_function(kgdb_call_nmi_hook, NULL, 0); + local_irq_disable(); +} + +int kgdb_arch_handle_exception(int exception_vector, int signo, + int err_code, char *remcom_in_buffer, + char *remcom_out_buffer, + struct pt_regs *linux_regs) +{ + char *ptr; + unsigned long address = -1; + + switch (remcom_in_buffer[0]) { + case 'c': + ptr = &remcom_in_buffer[1]; + if (kgdb_hex2long(&ptr, &address)) + kgdb_arch_set_pc(linux_regs, address); + return 0; + } + return -1; +} + +static int __kgdb_notify(struct die_args *args, unsigned long cmd) +{ + struct pt_regs *regs = args->regs; + + /* Userspace events, ignore. */ + if (user_mode(regs)) + return NOTIFY_DONE; + + if (kgdb_handle_exception(1, args->signr, cmd, regs)) + return NOTIFY_DONE; + + return NOTIFY_STOP; +} + +static int +kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr) +{ + unsigned long flags; + int ret; + + local_irq_save(flags); + ret = __kgdb_notify(ptr, cmd); + local_irq_restore(flags); + + return ret; +} + +static struct notifier_block kgdb_notifier = { + .notifier_call = kgdb_notify, +}; + +/* + * kgdb_arch_init - Perform any architecture specific initalization. + * This function will handle the initalization of any architecture + * specific callbacks. + */ +int kgdb_arch_init(void) +{ + int ret = register_die_notifier(&kgdb_notifier); + + if (ret != 0) + return ret; + return 0; +} + +/* + * kgdb_arch_exit - Perform any architecture specific uninitalization. + * This function will handle the uninitalization of any architecture + * specific callbacks, for dynamic registration and unregistration. + */ +void kgdb_arch_exit(void) +{ + unregister_die_notifier(&kgdb_notifier); +} + +/* + * sw64 instructions are always in LE. + * Break instruction is encoded in LE format + */ +const struct kgdb_arch arch_kgdb_ops = { + .gdb_bpt_instr = {0x80, 00, 00, 00} +}; -- Gitee From 146313b5b92342025bd8dc5ed5c1632c85b81023 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:30 +0800 Subject: [PATCH 0320/2138] anolis: sw64: add dynamic frequency scaling support ANBZ: #4688 Add dynamic frequency scaling support for SW64 based xuelang platform. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/cpufreq.h | 66 ++++++++++++ arch/sw_64/platform/Makefile | 2 + arch/sw_64/platform/cpufreq_xuelang.c | 140 ++++++++++++++++++++++++++ 3 files changed, 208 insertions(+) create mode 100644 arch/sw_64/include/asm/cpufreq.h create mode 100644 arch/sw_64/platform/Makefile create mode 100644 arch/sw_64/platform/cpufreq_xuelang.c diff --git a/arch/sw_64/include/asm/cpufreq.h b/arch/sw_64/include/asm/cpufreq.h new file mode 100644 index 000000000000..cf47f1fc6866 --- /dev/null +++ b/arch/sw_64/include/asm/cpufreq.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_SW64_CPUFREQ_H +#define _ASM_SW64_CPUFREQ_H + +#include +#include +#include +#include +#include + +struct clk; + +extern char curruent_policy[CPUFREQ_NAME_LEN]; + +struct clk_ops { + void (*init)(struct clk *clk); + void (*enable)(struct clk *clk); + void (*disable)(struct clk *clk); + void (*recalc)(struct clk *clk); + int (*set_rate)(struct clk *clk, unsigned long rate, int algo_id); + long (*round_rate)(struct clk *clk, unsigned long rate); +}; + +struct clk { + struct list_head node; + const char *name; + int id; + struct module *owner; + + struct clk *parent; + const struct clk_ops *ops; + + struct kref kref; + + unsigned long rate; + unsigned long flags; +}; + +#define CLK_ALWAYS_ENABLED (1 << 0) +#define CLK_RATE_PROPAGATES (1 << 1) + +#define CLK_PRT 0x1UL +#define CORE_CLK0_V (0x1UL << 1) +#define CORE_CLK0_R (0x1UL << 2) +#define CORE_CLK2_V (0x1UL << 15) +#define CORE_CLK2_R (0x1UL << 16) + +#define CLK_LV1_SEL_PRT 0x1UL +#define CLK_LV1_SEL_MUXA (0x1UL << 2) +#define CLK_LV1_SEL_MUXB (0x1UL << 3) + +#define CORE_PLL0_CFG_SHIFT 4 +#define CORE_PLL2_CFG_SHIFT 18 + +extern struct cpufreq_frequency_table freq_table[]; + +int clk_init(void); +void sw64_set_rate(unsigned int index); + +struct clk *sw64_clk_get(struct device *dev, const char *id); + +void sw64_update_clockevents(unsigned long cpu, u32 freq); + +unsigned int __sw64_cpufreq_get(struct cpufreq_policy *policy); +#endif /* _ASM_SW64_CPUFREQ_H */ diff --git a/arch/sw_64/platform/Makefile b/arch/sw_64/platform/Makefile new file mode 100644 index 000000000000..4c0edceb4a2c --- /dev/null +++ b/arch/sw_64/platform/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_PLATFORM_XUELANG) += cpufreq_xuelang.o diff --git a/arch/sw_64/platform/cpufreq_xuelang.c b/arch/sw_64/platform/cpufreq_xuelang.c new file mode 100644 index 000000000000..1259e58dc874 --- /dev/null +++ b/arch/sw_64/platform/cpufreq_xuelang.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include + +#include +#include +#include + +/* Minimum CLK support */ +enum { + DC_0, DC_1, DC_2, DC_3, DC_4, DC_5, DC_6, DC_7, DC_8, + DC_9, DC_10, DC_11, DC_12, DC_13, DC_14, DC_15, DC_RESV +}; + +struct cpufreq_frequency_table freq_table[] = { + {0, 200, CPUFREQ_ENTRY_INVALID}, + {0, DC_1, CPUFREQ_ENTRY_INVALID}, + {0, DC_2, 0}, + {0, DC_3, 0}, + {0, DC_4, 0}, + {0, DC_5, 0}, + {0, DC_6, 0}, + {0, DC_7, 0}, + {0, DC_8, 0}, + {0, DC_9, 0}, + {0, DC_10, 0}, + {0, DC_11, 0}, + {0, DC_12, 0}, + {0, DC_13, 0}, + {0, DC_14, 0}, + {0, DC_15, 0}, + {-1, DC_RESV, CPUFREQ_TABLE_END}, +}; + + +static struct platform_device sw64_cpufreq_device = { + .name = "sw64_cpufreq", + .id = -1, +}; + +static int __init sw64_cpufreq_init(void) +{ + int i; + unsigned char external_clk; + unsigned long max_rate, freq_off; + + max_rate = get_cpu_freq() / 1000; + + external_clk = *((unsigned char *)__va(MB_EXTCLK)); + + if (external_clk == 240) + freq_off = 60000; + else + freq_off = 50000; + + /* clock table init */ + for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { + if (i == 1) + freq_table[i].driver_data = freq_off * 24; + if (i == 2) + freq_table[i].frequency = freq_off * 36; + if (i > 2) + freq_table[i].frequency = freq_off * 38 + ((i - 3) * freq_off); + + if (freq_table[i].frequency == max_rate) + freq_table[i + 1].frequency = CPUFREQ_TABLE_END; + } + + return platform_device_register(&sw64_cpufreq_device); +} +arch_initcall(sw64_cpufreq_init); + +char curruent_policy[CPUFREQ_NAME_LEN]; + +static struct clk cpu_clk = { + .name = "cpu_clk", + .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES, + .rate = 2400000000, +}; + +struct clk *sw64_clk_get(struct device *dev, const char *id) +{ + return &cpu_clk; +} +EXPORT_SYMBOL(sw64_clk_get); + +unsigned int __sw64_cpufreq_get(struct cpufreq_policy *policy) +{ + int i; + u64 val; + struct cpufreq_frequency_table *ft = policy->freq_table; + + val = sw64_io_read(0, CLK_CTL) >> CORE_PLL2_CFG_SHIFT; + + for (i = 0; ft[i].frequency != CPUFREQ_TABLE_END; i++) { + if (val == i) + return ft[i].frequency; + } + return 0; +} +EXPORT_SYMBOL(__sw64_cpufreq_get); + +void sw64_set_rate(unsigned int index) +{ + unsigned int i, val; + int cpu_num; + + cpu_num = sw64_chip->get_cpu_num(); + + for (i = 0; i < cpu_num; i++) { + sw64_io_write(i, CLK_CTL, CORE_CLK2_R | CORE_CLK2_V | CLK_PRT); + val = sw64_io_read(i, CLK_CTL); + + sw64_io_write(i, CLK_CTL, val | index << CORE_PLL2_CFG_SHIFT); + + udelay(1); + + sw64_io_write(i, CLK_CTL, CORE_CLK2_V | CLK_PRT + | index << CORE_PLL2_CFG_SHIFT); + val = sw64_io_read(i, CLK_CTL); + + /* LV1 select PLL1/PLL2 */ + sw64_io_write(i, CLU_LV1_SEL, CLK_LV1_SEL_MUXA | CLK_LV1_SEL_PRT); + + /* Set CLK_CTL PLL0 */ + sw64_io_write(i, CLK_CTL, val | CORE_CLK0_R | CORE_CLK0_V); + + sw64_io_write(i, CLK_CTL, val | CORE_CLK0_R | CORE_CLK0_V + | index << CORE_PLL0_CFG_SHIFT); + + udelay(1); + + sw64_io_write(i, CLK_CTL, val | CORE_CLK0_V + | index << CORE_PLL0_CFG_SHIFT); + + /* LV1 select PLL0/PLL1 */ + sw64_io_write(i, CLU_LV1_SEL, CLK_LV1_SEL_MUXB | CLK_LV1_SEL_PRT); + } +} +EXPORT_SYMBOL_GPL(sw64_set_rate); -- Gitee From edb688453a39d03f931f354c8c8014217176e95e Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:20:06 +0800 Subject: [PATCH 0321/2138] anolis: sw64: add dynamic turning on/off cores support ANBZ: #4688 Add dynamic turning on/off cores support for SW64 based xuelang platform. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2906 --- arch/sw_64/include/asm/cputime.h | 9 + arch/sw_64/kernel/cpuautoplug.c | 485 +++++++++++++++++++++++++++++++ 2 files changed, 494 insertions(+) create mode 100644 arch/sw_64/include/asm/cputime.h create mode 100644 arch/sw_64/kernel/cpuautoplug.c diff --git a/arch/sw_64/include/asm/cputime.h b/arch/sw_64/include/asm/cputime.h new file mode 100644 index 000000000000..cdd46b05e228 --- /dev/null +++ b/arch/sw_64/include/asm/cputime.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_CPUTIME_H +#define _ASM_SW64_CPUTIME_H + +typedef u64 __nocast cputime64_t; + +#define jiffies64_to_cputime64(__jif) ((__force cputime64_t)(__jif)) + +#endif /* _ASM_SW64_CPUTIME_H */ diff --git a/arch/sw_64/kernel/cpuautoplug.c b/arch/sw_64/kernel/cpuautoplug.c new file mode 100644 index 000000000000..a7571a77a72c --- /dev/null +++ b/arch/sw_64/kernel/cpuautoplug.c @@ -0,0 +1,485 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +int autoplug_enabled; +int autoplug_verbose; +int autoplug_adjusting; + +DEFINE_PER_CPU(int, cpu_adjusting); + +struct cpu_autoplug_info { + cputime64_t prev_idle; + cputime64_t prev_wall; + struct delayed_work work; + unsigned int sampling_rate; + int maxcpus; /* max cpus for autoplug */ + int mincpus; /* min cpus for autoplug */ + int dec_reqs; /* continuous core-decreasing requests */ + int inc_reqs; /* continuous core-increasing requests */ +}; + +struct cpu_autoplug_info ap_info; + +static ssize_t enabled_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", autoplug_enabled); +} + + +static ssize_t enabled_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char val[5]; + int n; + + memcpy(val, buf, count); + n = kstrtol(val, 0, 0); + + if (n > 1 || n < 0) + return -EINVAL; + + autoplug_enabled = n; + + return count; +} + +static ssize_t verbose_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", autoplug_verbose); +} + +static ssize_t verbose_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char val[5]; + int n; + + memcpy(val, buf, count); + n = kstrtol(val, 0, 0); + + if (n > 1 || n < 0) + return -EINVAL; + + autoplug_verbose = n; + + return count; +} + +static ssize_t maxcpus_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", ap_info.maxcpus); +} + +static ssize_t maxcpus_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char val[5]; + int n; + + memcpy(val, buf, count); + n = kstrtol(val, 0, 0); + + if (n > num_possible_cpus() || n < ap_info.mincpus) + return -EINVAL; + + ap_info.maxcpus = n; + + return count; +} + +static ssize_t mincpus_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", ap_info.mincpus); +} + +static ssize_t mincpus_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char val[5]; + int n; + + memcpy(val, buf, count); + n = kstrtol(val, 0, 0); + + if (n > ap_info.maxcpus || n < 1) + return -EINVAL; + + ap_info.mincpus = n; + + return count; +} + +static ssize_t sampling_rate_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", ap_info.sampling_rate); +} + +#define SAMPLING_RATE_MAX 1000 +#define SAMPLING_RATE_MIN 600 + +static ssize_t sampling_rate_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char val[6]; + int n; + + memcpy(val, buf, count); + n = kstrtol(val, 0, 0); + + if (n > SAMPLING_RATE_MAX || n < SAMPLING_RATE_MIN) + return -EINVAL; + + ap_info.sampling_rate = n; + + return count; +} + +static ssize_t available_value_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "enabled: 0-1\nverbose: 0-1\nmaxcpus:" + "1-%d\nmincpus: 1-%d\nsampling_rate: %d-%d\n", + num_possible_cpus(), num_possible_cpus(), + SAMPLING_RATE_MIN, SAMPLING_RATE_MAX); +} + +static DEVICE_ATTR_RW(enabled); +static DEVICE_ATTR_RW(verbose); +static DEVICE_ATTR_RW(maxcpus); +static DEVICE_ATTR_RW(mincpus); +static DEVICE_ATTR_RW(sampling_rate); +static DEVICE_ATTR_RO(available_value); + +static struct attribute *cpuclass_default_attrs[] = { + &dev_attr_enabled.attr, + &dev_attr_verbose.attr, + &dev_attr_maxcpus.attr, + &dev_attr_mincpus.attr, + &dev_attr_sampling_rate.attr, + &dev_attr_available_value.attr, + NULL +}; + +static struct attribute_group cpuclass_attr_group = { + .attrs = cpuclass_default_attrs, + .name = "cpuautoplug", +}; + +static int __init setup_autoplug(char *str) +{ + if (!strcmp(str, "off")) + autoplug_enabled = 0; + else if (!strcmp(str, "on")) + autoplug_enabled = 1; + else + return 0; + return 1; +} + +__setup("autoplug=", setup_autoplug); + +static cputime64_t calc_busy_time(unsigned int cpu) +{ + cputime64_t busy_time; + + busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; + busy_time += 1; + + return busy_time; +} + +static inline cputime64_t get_idle_time_jiffy(cputime64_t *wall) +{ + unsigned int cpu; + cputime64_t idle_time = 0; + cputime64_t cur_wall_time; + cputime64_t busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + + for_each_online_cpu(cpu) { + busy_time = calc_busy_time(cpu); + + idle_time += cur_wall_time - busy_time; + } + + if (wall) + *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); + + return (cputime64_t)jiffies_to_usecs(idle_time); +} + +static inline cputime64_t get_idle_time(cputime64_t *wall) +{ + unsigned int cpu; + u64 idle_time = 0; + + for_each_online_cpu(cpu) { + idle_time += get_cpu_idle_time_us(cpu, wall); + if (idle_time == -1ULL) + return get_idle_time_jiffy(wall); + } + + return idle_time; +} + +static cputime64_t get_min_busy_time(cputime64_t arr[], int size) +{ + int i, min_cpu_idx; + cputime64_t min_time = arr[0]; + + for (i = 0; i < size; i++) { + if (arr[i] > 0 && arr[i] < min_time) { + min_time = arr[i]; + min_cpu_idx = i; + } + } + + return min_cpu_idx; +} + +static int find_min_busy_cpu(void) +{ + int nr_all_cpus = num_possible_cpus(); + unsigned int cpus, target_cpu; + cputime64_t busy_time; + cputime64_t b_time[NR_CPUS]; + + memset(b_time, 0, sizeof(b_time)); + for_each_online_cpu(cpus) { + busy_time = calc_busy_time(cpus); + b_time[cpus] = busy_time; + } + target_cpu = get_min_busy_time(b_time, nr_all_cpus); + return target_cpu; +} + +static void increase_cores(int cur_cpus) +{ + struct device *dev; + + if (cur_cpus == ap_info.maxcpus) + return; + + cur_cpus = cpumask_next_zero(0, cpu_online_mask); + + dev = get_cpu_device(cur_cpus); + + per_cpu(cpu_adjusting, dev->id) = 1; + lock_device_hotplug(); + cpu_device_up(dev); + pr_info("The target_cpu is %d, After cpu_up, the cpu_num is %d\n", + dev->id, num_online_cpus()); + get_cpu_device(dev->id)->offline = false; + unlock_device_hotplug(); + per_cpu(cpu_adjusting, dev->id) = 0; +} + +static void decrease_cores(int cur_cpus) +{ + struct device *dev; + + if (cur_cpus == ap_info.mincpus) + return; + + cur_cpus = find_min_busy_cpu(); + + dev = get_cpu_device(cur_cpus); + + if (dev->id > 0) { + per_cpu(cpu_adjusting, dev->id) = -1; + lock_device_hotplug(); + cpu_device_down(dev); + pr_info("The target_cpu is %d. After cpu_down, the cpu_num is %d\n", + cur_cpus, num_online_cpus()); + get_cpu_device(dev->id)->offline = true; + unlock_device_hotplug(); + per_cpu(cpu_adjusting, dev->id) = 0; + } +} + +#define INC_THRESHOLD 80 +#define DEC_THRESHOLD 40 + +static void do_autoplug_timer(struct work_struct *work) +{ + cputime64_t cur_wall_time = 0, cur_idle_time; + unsigned long idle_time, wall_time; + int delay, load; + int nr_cur_cpus = num_online_cpus(); + int nr_all_cpus = num_possible_cpus(); + int inc_req = 1, dec_req = 2; + struct cpufreq_policy *policy = cpufreq_cpu_get_raw(smp_processor_id()); + + if (!policy || IS_ERR(policy->clk)) { + pr_err("%s: No %s associated to cpu: %d\n", + __func__, policy ? "clk" : "policy", 0); + return; + } + + ap_info.maxcpus = + setup_max_cpus > nr_cpu_ids ? nr_cpu_ids : setup_max_cpus; + ap_info.mincpus = ap_info.maxcpus / 4; + + if (strcmp(policy->governor->name, "performance") == 0) { + ap_info.mincpus = ap_info.maxcpus; + } else if (strcmp(policy->governor->name, "powersave") == 0) { + ap_info.maxcpus = ap_info.mincpus; + } else if (strcmp(policy->governor->name, "ondemand") == 0) { + ap_info.sampling_rate = 500; + inc_req = 0; + dec_req = 2; + } else if (strcmp(policy->governor->name, "conservative") == 0) { + inc_req = 1; + dec_req = 3; + ap_info.sampling_rate = 1000; /* 1s */ + } + + BUG_ON(smp_processor_id() != 0); + delay = msecs_to_jiffies(ap_info.sampling_rate); + if (!autoplug_enabled || system_state != SYSTEM_RUNNING) + goto out; + + autoplug_adjusting = 1; + + if (nr_cur_cpus > ap_info.maxcpus) { + decrease_cores(nr_cur_cpus); + autoplug_adjusting = 0; + goto out; + } + if (nr_cur_cpus < ap_info.mincpus) { + increase_cores(nr_cur_cpus); + autoplug_adjusting = 0; + goto out; + } + + cur_idle_time = get_idle_time(&cur_wall_time); + if (cur_wall_time == 0) + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + + wall_time = (unsigned int)(cur_wall_time - ap_info.prev_wall); + ap_info.prev_wall = cur_wall_time; + + idle_time = (unsigned int)(cur_idle_time - ap_info.prev_idle); + idle_time += wall_time * (nr_all_cpus - nr_cur_cpus); + ap_info.prev_wall = cur_idle_time; + + if (unlikely(!wall_time || wall_time * nr_all_cpus < idle_time)) { + autoplug_adjusting = 0; + goto out; + } + + load = 100 * (wall_time * nr_all_cpus - idle_time) / wall_time; + + if (load < (nr_cur_cpus - 1) * 100 - DEC_THRESHOLD) { + ap_info.inc_reqs = 0; + if (ap_info.dec_reqs < dec_req) + ap_info.dec_reqs++; + else { + ap_info.dec_reqs = 0; + decrease_cores(nr_cur_cpus); + } + } else { + ap_info.dec_reqs = 0; + if (load > (nr_cur_cpus - 1) * 100 + INC_THRESHOLD) { + if (ap_info.inc_reqs < inc_req) + ap_info.inc_reqs++; + else { + ap_info.inc_reqs = 0; + increase_cores(nr_cur_cpus); + } + } + } + + autoplug_adjusting = 0; +out: + schedule_delayed_work_on(0, &ap_info.work, delay); +} + +static struct platform_device_id platform_device_ids[] = { + { + .name = "sw64_cpuautoplug", + }, + {} +}; + +MODULE_DEVICE_TABLE(platform, platform_device_ids); + +static struct platform_driver platform_driver = { + .driver = { + .name = "sw64_cpuautoplug", + .owner = THIS_MODULE, + }, + .id_table = platform_device_ids, +}; + +static int __init cpuautoplug_init(void) +{ + int i, ret, delay; + + ret = sysfs_create_group(&cpu_subsys.dev_root->kobj, + &cpuclass_attr_group); + if (ret) + return ret; + + ret = platform_driver_register(&platform_driver); + if (ret) + return ret; + + pr_info("cpuautoplug: SW64 CPU autoplug driver.\n"); + + ap_info.maxcpus = + setup_max_cpus > nr_cpu_ids ? nr_cpu_ids : setup_max_cpus; + ap_info.mincpus = ap_info.maxcpus / 4; + ap_info.dec_reqs = 0; + ap_info.inc_reqs = 0; + ap_info.sampling_rate = 720; /* 720ms */ + if (setup_max_cpus == 0) { /* boot with npsmp */ + ap_info.maxcpus = 1; + autoplug_enabled = 0; + } + if (setup_max_cpus > num_possible_cpus()) + ap_info.maxcpus = num_possible_cpus(); + + pr_info("mincpu = %d, maxcpu = %d, autoplug_enabled = %d, rate = %d\n", + ap_info.mincpus, ap_info.maxcpus, autoplug_enabled, + ap_info.sampling_rate); + + for_each_possible_cpu(i) + per_cpu(cpu_adjusting, i) = 0; + delay = msecs_to_jiffies(ap_info.sampling_rate * 24); + INIT_DEFERRABLE_WORK(&ap_info.work, do_autoplug_timer); + schedule_delayed_work_on(0, &ap_info.work, delay); + + if (!autoplug_enabled) + cancel_delayed_work_sync(&ap_info.work); + + return ret; +} + +late_initcall(cpuautoplug_init); -- Gitee From f9a3333b0bf7fad692f3ac902cf48f152b2f1fc8 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:38:32 +0800 Subject: [PATCH 0322/2138] anolis: sw64: fix build support ANBZ: #4688 Modify scripts for SW64 build support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- scripts/package/buildtar | 3 +++ scripts/package/mkdebian | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/scripts/package/buildtar b/scripts/package/buildtar index 65b4ea502962..93158943a4f7 100755 --- a/scripts/package/buildtar +++ b/scripts/package/buildtar @@ -64,6 +64,9 @@ case "${ARCH}" in alpha) [ -f "${objtree}/arch/alpha/boot/vmlinux.gz" ] && cp -v -- "${objtree}/arch/alpha/boot/vmlinux.gz" "${tmpdir}/boot/vmlinuz-${KERNELRELEASE}" ;; + sw_64) + [ -f "${objtree}/arch/sw_64/boot/vmlinux.bin" ] && cp -v -- "${objtree}/arch/sw_64/boot/vmlinux.bin" "${tmpdir}/boot/vmlinux-bin-${KERNELRELEASE}" + ;; parisc*) [ -f "${KBUILD_IMAGE}" ] && cp -v -- "${KBUILD_IMAGE}" "${tmpdir}/boot/vmlinux-${KERNELRELEASE}" [ -f "${objtree}/lifimage" ] && cp -v -- "${objtree}/lifimage" "${tmpdir}/boot/lifimage-${KERNELRELEASE}" diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian index 5044224cf671..2586bcd5f43a 100755 --- a/scripts/package/mkdebian +++ b/scripts/package/mkdebian @@ -26,7 +26,7 @@ set_debarch() { # Attempt to find the correct Debian architecture case "$UTS_MACHINE" in - i386|ia64|alpha|m68k|riscv*) + i386|ia64|alpha|m68k|riscv*|sw_64) debarch="$UTS_MACHINE" ;; x86_64) debarch=amd64 ;; -- Gitee From 3e068f5423b663584b105ab1c5c7370cb4d821a8 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:36:01 +0800 Subject: [PATCH 0323/2138] anolis: sw64: fix ELF support ANBZ: #4688 Modify generic headers for SW64 ELF support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- include/uapi/linux/elf-em.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h index ef38c2bc5ab7..32458706a403 100644 --- a/include/uapi/linux/elf-em.h +++ b/include/uapi/linux/elf-em.h @@ -59,6 +59,7 @@ * up with a final number. */ #define EM_ALPHA 0x9026 +#define EM_SW64 0x9916 /* Bogus old m32r magic number, used by old tools. */ #define EM_CYGNUS_M32R 0x9041 -- Gitee From 4ab6ae6a5f26aed7983e74f86830d044de3d645d Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:37:09 +0800 Subject: [PATCH 0324/2138] anolis: sw64: fix rrk support ANBZ: #4688 Modify generic routines for SW64 rrk support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- kernel/printk/printk.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 0fca282c0a25..b4e390e0b4bd 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -2202,10 +2202,17 @@ int vprintk_store(int facility, int level, u16 text_len; int ret = 0; u64 ts_nsec; +#ifdef CONFIG_SW64_RRK + extern int sw64_printk(const char *fmt, va_list args); +#endif if (!printk_enter_irqsave(recursion_ptr, irqflags)) return 0; +#ifdef CONFIG_SW64_RRK + sw64_printk(fmt, args); +#endif + /* * Since the duration of printk() can vary depending on the message * and state of the ringbuffer, grab the timestamp now so that it is -- Gitee From d11c95d36d7b94baff6a733bfdc9b9f2f708b188 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:30:40 +0800 Subject: [PATCH 0325/2138] anolis: sw64: fix ACPI support ANBZ: #4688 Modify generic headers for SW64 ACPI support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- include/acpi/pdc_sw64.h | 34 ++++++++++++++++++++++++++++++++++ include/linux/acpi.h | 2 +- 2 files changed, 35 insertions(+), 1 deletion(-) create mode 100644 include/acpi/pdc_sw64.h diff --git a/include/acpi/pdc_sw64.h b/include/acpi/pdc_sw64.h new file mode 100644 index 000000000000..4724f10e8c6a --- /dev/null +++ b/include/acpi/pdc_sw64.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_PDC_SW64_H +#define _ASM_PDC_SW64_H + +#define ACPI_PDC_P_FFH (0x0001) +#define ACPI_PDC_C_C1_HALT (0x0002) +#define ACPI_PDC_T_FFH (0x0004) +#define ACPI_PDC_SMP_C1PT (0x0008) +#define ACPI_PDC_SMP_C2C3 (0x0010) +#define ACPI_PDC_SMP_P_SWCOORD (0x0020) +#define ACPI_PDC_SMP_C_SWCOORD (0x0040) +#define ACPI_PDC_SMP_T_SWCOORD (0x0080) +#define ACPI_PDC_C_C1_FFH (0x0100) +#define ACPI_PDC_C_C2C3_FFH (0x0200) +#define ACPI_PDC_SMP_P_HWCOORD (0x0800) + +#define ACPI_PDC_EST_CAPABILITY_SMP (ACPI_PDC_SMP_C1PT | \ + ACPI_PDC_C_C1_HALT | \ + ACPI_PDC_P_FFH) + +#define ACPI_PDC_EST_CAPABILITY_SWSMP (ACPI_PDC_SMP_C1PT | \ + ACPI_PDC_C_C1_HALT | \ + ACPI_PDC_SMP_P_SWCOORD | \ + ACPI_PDC_SMP_P_HWCOORD | \ + ACPI_PDC_P_FFH) + +#define ACPI_PDC_C_CAPABILITY_SMP (ACPI_PDC_SMP_C2C3 | \ + ACPI_PDC_SMP_C1PT | \ + ACPI_PDC_C_C1_HALT | \ + ACPI_PDC_C_C1_FFH | \ + ACPI_PDC_C_C2C3_FFH) + +#endif /* _ASM_PDC_SW64_H */ diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 1b76d2f83eac..29654f5d65db 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -259,7 +259,7 @@ void acpi_table_print_madt_entry (struct acpi_subtable_header *madt); /* the following numa functions are architecture-dependent */ void acpi_numa_slit_init (struct acpi_table_slit *slit); -#if defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_LOONGARCH) +#if defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_LOONGARCH) || defined(CONFIG_SW64) void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); #else static inline void -- Gitee From cd59968774283c21635cc42d6eb9827406c16527 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:33:32 +0800 Subject: [PATCH 0326/2138] anolis: sw64: fix module support ANBZ: #4688 Modify generic headers for SW64 kernel module support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- include/linux/moduleparam.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index 962cd41a2cb5..c401b4e975cc 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h @@ -276,7 +276,7 @@ struct kparam_array read-only sections (which is part of respective UNIX ABI on these platforms). So 'const' makes no sense and even causes compile failures with some compilers. */ -#if defined(CONFIG_ALPHA) || defined(CONFIG_IA64) || defined(CONFIG_PPC64) +#if defined(CONFIG_ALPHA) || defined(CONFIG_IA64) || defined(CONFIG_PPC64) || defined(CONFIG_SW64) #define __moduleparam_const #else #define __moduleparam_const const -- Gitee From 66bdf83e9b6eff41e7ff70df1383fce2f22b7d48 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:32:24 +0800 Subject: [PATCH 0327/2138] anolis: sw64: fix KVM support ANBZ: #4688 Modify generic headers and routines for SW64 KVM support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- include/linux/kvm_host.h | 18 ++++++ include/uapi/linux/kvm.h | 5 ++ virt/kvm/kvm_main.c | 126 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 149 insertions(+) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index fb6c6109fdca..d027f8fd23bf 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1766,6 +1766,9 @@ static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa) enum kvm_stat_kind { KVM_STAT_VM, KVM_STAT_VCPU, +#ifdef CONFIG_SW64 + KVM_STAT_DFX_SW64, /* Detail For vcpu stat EXtension */ +#endif }; struct kvm_stat_data { @@ -1895,6 +1898,21 @@ struct _kvm_stats_desc { HALT_POLL_HIST_COUNT), \ STATS_DESC_IBOOLEAN(VCPU_GENERIC, blocking) +#ifdef CONFIG_SW64 +enum dfx_sw64_stat_kind { + DFX_SW64_STAT_U64, + DFX_SW64_STAT_CPUTIME, +}; + +/* Detail For vcpu stat EXtension debugfs item */ +struct dfx_sw64_kvm_stats_debugfs_item { + const char *name; + int offset; + enum dfx_sw64_stat_kind dfx_kind; + struct dentry *dentry; +}; +extern struct dfx_sw64_kvm_stats_debugfs_item dfx_sw64_debugfs_entries[]; +#endif extern struct dentry *kvm_debugfs_dir; ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header, diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 863f84619a15..2eea9dd73c64 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1572,6 +1572,11 @@ struct kvm_s390_ucas_mapping { /* Available with KVM_CAP_COUNTER_OFFSET */ #define KVM_ARM_SET_COUNTER_OFFSET _IOW(KVMIO, 0xb5, struct kvm_arm_counter_offset) +/* ioctl for SW vcpu init*/ +#define KVM_SW64_VCPU_INIT _IO(KVMIO, 0xba) +#define KVM_SW64_GET_VCB _IO(KVMIO, 0xbc) +#define KVM_SW64_SET_VCB _IO(KVMIO, 0xbd) + /* ioctl for vm fd */ #define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 44c228bcd699..3a14fe491050 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -154,6 +154,11 @@ static unsigned long long kvm_active_vms; static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask); +#ifdef CONFIG_SW64 +#define DFX_SW64_MAX_VCPU 1024 +#define DFX_SW64_MAX_VCPU_STAT_SIZE 1024 +#endif + __weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) { } @@ -4158,6 +4163,9 @@ static long kvm_vcpu_ioctl(struct file *filp, if (oldpid) synchronize_rcu(); put_pid(oldpid); +#ifdef CONFIG_SW64 + vcpu->stat.pid = current->pid; +#endif } r = kvm_arch_vcpu_ioctl_run(vcpu); trace_kvm_userspace_exit(vcpu->run->exit_reason, r); @@ -5750,6 +5758,10 @@ static int kvm_stat_data_get(void *data, u64 *val) r = kvm_get_stat_per_vcpu(stat_data->kvm, stat_data->desc->desc.offset, val); break; +#ifdef CONFIG_SW64 + case KVM_STAT_DFX_SW64: + break; +#endif } return r; @@ -5772,6 +5784,10 @@ static int kvm_stat_data_clear(void *data, u64 val) r = kvm_clear_stat_per_vcpu(stat_data->kvm, stat_data->desc->desc.offset); break; +#ifdef CONFIG_SW64 + case KVM_STAT_DFX_SW64: + break; +#endif } return r; @@ -5866,6 +5882,116 @@ DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear, "%llu\n"); DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n"); +#ifdef CONFIG_SW64 +void __weak kvm_arch_vcpu_stat_reset(struct kvm_vcpu_stat *vcpu_stat) +{ +} + +/* + * copy of seq_buf_alloc of kernel, kernel not export it + */ +static void *dfx_sw64_seq_buf_alloc(unsigned long size) +{ + return kvmalloc(size, GFP_KERNEL_ACCOUNT); +} + +static void dfx_sw64_seq_buf_free(const void *buf) +{ + kvfree(buf); +} + +static int dfx_sw64_seq_buf_alloc_vcpu(struct seq_file *p, int vcpu_nr) +{ + char *buf; + size_t size; + + size = (vcpu_nr + 1) * DFX_SW64_MAX_VCPU_STAT_SIZE; + buf = dfx_sw64_seq_buf_alloc(size); + if (!buf) + return -ENOMEM; + if (p->buf) + dfx_sw64_seq_buf_free(p->buf); + p->buf = buf; + p->size = size; + return 0; +} + +static int __dfx_sw64_vcpu_stats_get(struct seq_file *p, void *v) +{ + struct kvm *kvm; + struct kvm_vcpu *vcpu; + struct kvm_vcpu_stat *vcpu_stats; + struct dfx_sw64_kvm_stats_debugfs_item *dp; + int vcpu_nr = 0; + int index = 0; + unsigned long i; + + mutex_lock(&kvm_lock); + list_for_each_entry(kvm, &vm_list, vm_list) + kvm_for_each_vcpu(i, vcpu, kvm) { + vcpu_nr++; + } + mutex_unlock(&kvm_lock); + vcpu_nr = min(vcpu_nr, DFX_SW64_MAX_VCPU); + if (!vcpu_nr) { + seq_putc(p, '\n'); + return 0; + } + + if (dfx_sw64_seq_buf_alloc_vcpu(p, vcpu_nr)) + return -ENOMEM; + + vcpu_stats = vmalloc(vcpu_nr * sizeof(struct kvm_vcpu_stat)); + if (!vcpu_stats) + return -ENOMEM; + + mutex_lock(&kvm_lock); + list_for_each_entry(kvm, &vm_list, vm_list) { + kvm_for_each_vcpu(i, vcpu, kvm) { + if (index >= vcpu_nr) + break; + memcpy(vcpu_stats + index, &(vcpu->stat), + sizeof(struct kvm_vcpu_stat)); + kvm_arch_vcpu_stat_reset(&vcpu->stat); + ++index; + } + } + mutex_unlock(&kvm_lock); + for (i = 0; i < vcpu_nr; i++) { + for (dp = dfx_sw64_debugfs_entries; dp->name; ++dp) { + switch (dp->dfx_kind) { + case DFX_SW64_STAT_U64: + seq_put_decimal_ull(p, " ", + *(u64 *)((void *)&vcpu_stats[i] + dp->offset)); + break; + case DFX_SW64_STAT_CPUTIME: + pr_warn("DFX_SW64_STAT_CPUTIME not supported currently!"); + break; + default: + pr_warn("Bad dfx_sw64_kind in dfx_debugfs_entries!"); + break; + } + } + seq_putc(p, '\n'); + } + + vfree(vcpu_stats); + return 0; +} + +static int dfx_sw64_vcpu_stats_open(struct inode *inode, struct file *file) +{ + return single_open(file, __dfx_sw64_vcpu_stats_get, NULL); +} + +static const struct file_operations dfx_sw64_stat_fops = { + .open = dfx_sw64_vcpu_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) { struct kobj_uevent_env *env; -- Gitee From 3fbe843ee40d71587f6481bc1606da9d2dd890ad Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:34:00 +0800 Subject: [PATCH 0328/2138] anolis: sw64: fix PCI support ANBZ: #4688 Modify generic headers for SW64 PCI support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- include/linux/pci-ecam.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h index 6b1301e2498e..863e572202e2 100644 --- a/include/linux/pci-ecam.h +++ b/include/linux/pci-ecam.h @@ -88,6 +88,7 @@ extern const struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x extern const struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */ extern const struct pci_ecam_ops tegra194_pcie_ops; /* Tegra194 PCIe */ extern const struct pci_ecam_ops loongson_pci_ecam_ops; /* Loongson PCIe */ +extern const struct pci_ecam_ops sw64_pci_ecam_ops; /* SW64 PCIe */ #endif #if IS_ENABLED(CONFIG_PCI_HOST_COMMON) -- Gitee From 023a145801830e49ccedb840cb2d3169ca96097d Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:36:22 +0800 Subject: [PATCH 0329/2138] anolis: sw64: fix kexec support ANBZ: #4688 Modify generic headers for SW64 kexec support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- include/uapi/linux/kexec.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h index 01766dd839b0..3be3e81c67ae 100644 --- a/include/uapi/linux/kexec.h +++ b/include/uapi/linux/kexec.h @@ -45,6 +45,7 @@ #define KEXEC_ARCH_AARCH64 (183 << 16) #define KEXEC_ARCH_RISCV (243 << 16) #define KEXEC_ARCH_LOONGARCH (258 << 16) +#define KEXEC_ARCH_SW64 (0x9916UL << 16) /* The artificial cap on the number of segments passed to kexec_load. */ #define KEXEC_SEGMENT_MAX 16 -- Gitee From 4a02172fa73712a9b27b7b2db6571bdb8ffa77c2 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:36:41 +0800 Subject: [PATCH 0330/2138] anolis: sw64: fix audit support ANBZ: #4688 Modify generic headers for SW64 audit support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- include/uapi/linux/audit.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h index d676ed2b246e..f428015e85de 100644 --- a/include/uapi/linux/audit.h +++ b/include/uapi/linux/audit.h @@ -441,6 +441,7 @@ enum { #define AUDIT_ARCH_XTENSA (EM_XTENSA) #define AUDIT_ARCH_LOONGARCH32 (EM_LOONGARCH|__AUDIT_ARCH_LE) #define AUDIT_ARCH_LOONGARCH64 (EM_LOONGARCH|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) +#define AUDIT_ARCH_SW64 (EM_SW64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) #define AUDIT_PERM_EXEC 1 #define AUDIT_PERM_WRITE 2 -- Gitee From f31f8ad2aa734991a9e9fa3e4f5e49b5dc8436a6 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:38:04 +0800 Subject: [PATCH 0331/2138] anolis: sw64: fix ftrace support ANBZ: #4688 Modify scripts for SW64 ftrace support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- scripts/recordmcount.c | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c index 40ae6b2c7a6d..73558f7eb690 100644 --- a/scripts/recordmcount.c +++ b/scripts/recordmcount.c @@ -52,6 +52,12 @@ #define R_AARCH64_CALL26 283 +#ifndef EM_SW64 +#define EM_SW64 0x9916 +#define R_SW64_NONE 0 +#define R_SW64_REFQUAD 2 /* Direct 64 bit */ +#endif + static int fd_map; /* File descriptor for file being modified. */ static int mmap_failed; /* Boolean flag. */ static char gpfx; /* prefix for global symbol name (sometimes '_') */ @@ -326,6 +332,16 @@ static int make_nop_arm64(void *map, size_t const offset) return 0; } +static unsigned char ideal_nop4_sw64[4] = {0x5f, 0x07, 0xff, 0x43}; + +static int make_nop_sw64(void *map, size_t const offset) +{ + /* Convert to nop */ + ulseek(offset, SEEK_SET); + uwrite(ideal_nop, 4); + return 0; +} + static int write_file(const char *fname) { char tmp_file[strlen(fname) + 4]; @@ -475,6 +491,21 @@ static int LARCH64_is_fake_mcount(Elf64_Rel const *rp) return 1; } +#define SW64_FAKEMCOUNT_OFFSET 4 + +static int sw64_is_fake_mcount(Elf64_Rel const *rp) +{ + static Elf64_Addr old_r_offset = ~(Elf64_Addr)0; + Elf64_Addr current_r_offset = _w(rp->r_offset); + int is_fake; + + is_fake = (old_r_offset != ~(Elf64_Addr)0) && + (current_r_offset - old_r_offset == SW64_FAKEMCOUNT_OFFSET); + old_r_offset = current_r_offset; + + return is_fake; +} + /* 64-bit EM_MIPS has weird ELF64_Rela.r_info. * http://techpubs.sgi.com/library/manuals/4000/007-4658-001/pdf/007-4658-001.pdf * We interpret Table 29 Relocation Operation (Elf64_Rel, Elf64_Rela) [p.40] @@ -598,6 +629,14 @@ static int do_file(char const *const fname) case EM_S390: /* reltype: e_class */ break; case EM_SH: reltype = R_SH_DIR32; gpfx = 0; break; case EM_SPARCV9: reltype = R_SPARC_64; break; + case EM_SW64: + reltype = R_SW64_REFQUAD; + make_nop = make_nop_sw64; + rel_type_nop = R_SW64_NONE; + ideal_nop = ideal_nop4_sw64; + mcount_adjust_64 = -12; + is_fake_mcount64 = sw64_is_fake_mcount; + break; case EM_X86_64: make_nop = make_nop_x86; ideal_nop = ideal_nop5_x86_64; -- Gitee From 85c05ee5e0de82c19811c62a9b32bdd425f7fda2 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:23:00 +0800 Subject: [PATCH 0332/2138] anolis: tools: add basic sw64 support ANBZ: #4688 Add common headers and routines for SW64 support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- tools/arch/sw_64/include/asm/barrier.h | 9 ++ .../arch/sw_64/include/uapi/asm/bitsperlong.h | 9 ++ tools/arch/sw_64/include/uapi/asm/errno.h | 128 ++++++++++++++++++ tools/arch/sw_64/include/uapi/asm/mman.h | 46 +++++++ tools/arch/sw_64/include/uapi/asm/perf_regs.h | 41 ++++++ tools/build/feature/test-libunwind-sw_64.c | 27 ++++ 6 files changed, 260 insertions(+) create mode 100644 tools/arch/sw_64/include/asm/barrier.h create mode 100644 tools/arch/sw_64/include/uapi/asm/bitsperlong.h create mode 100644 tools/arch/sw_64/include/uapi/asm/errno.h create mode 100644 tools/arch/sw_64/include/uapi/asm/mman.h create mode 100644 tools/arch/sw_64/include/uapi/asm/perf_regs.h create mode 100644 tools/build/feature/test-libunwind-sw_64.c diff --git a/tools/arch/sw_64/include/asm/barrier.h b/tools/arch/sw_64/include/asm/barrier.h new file mode 100644 index 000000000000..bc4aeffeb681 --- /dev/null +++ b/tools/arch/sw_64/include/asm/barrier.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _TOOLS_LINUX_ASM_SW64_BARRIER_H +#define _TOOLS_LINUX_ASM_SW64_BARRIER_H + +#define mb() __asm__ __volatile__("mb" : : : "memory") +#define rmb() __asm__ __volatile__("mb" : : : "memory") +#define wmb() __asm__ __volatile__("mb" : : : "memory") + +#endif /* _TOOLS_LINUX_ASM_SW64_BARRIER_H */ diff --git a/tools/arch/sw_64/include/uapi/asm/bitsperlong.h b/tools/arch/sw_64/include/uapi/asm/bitsperlong.h new file mode 100644 index 000000000000..f6a510c28233 --- /dev/null +++ b/tools/arch/sw_64/include/uapi/asm/bitsperlong.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef __ASM_SW64_BITSPERLONG_H +#define __ASM_SW64_BITSPERLONG_H + +#define __BITS_PER_LONG 64 + +#include + +#endif /* __ASM_SW64_BITSPERLONG_H */ diff --git a/tools/arch/sw_64/include/uapi/asm/errno.h b/tools/arch/sw_64/include/uapi/asm/errno.h new file mode 100644 index 000000000000..2a43a943581a --- /dev/null +++ b/tools/arch/sw_64/include/uapi/asm/errno.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _SW64_ERRNO_H +#define _SW64_ERRNO_H + +#include + +#undef EAGAIN /* 11 in errno-base.h */ + +#define EDEADLK 11 /* Resource deadlock would occur */ + +#define EAGAIN 35 /* Try again */ +#define EWOULDBLOCK EAGAIN /* Operation would block */ +#define EINPROGRESS 36 /* Operation now in progress */ +#define EALREADY 37 /* Operation already in progress */ +#define ENOTSOCK 38 /* Socket operation on non-socket */ +#define EDESTADDRREQ 39 /* Destination address required */ +#define EMSGSIZE 40 /* Message too long */ +#define EPROTOTYPE 41 /* Protocol wrong type for socket */ +#define ENOPROTOOPT 42 /* Protocol not available */ +#define EPROTONOSUPPORT 43 /* Protocol not supported */ +#define ESOCKTNOSUPPORT 44 /* Socket type not supported */ +#define EOPNOTSUPP 45 /* Operation not supported on transport endpoint */ +#define EPFNOSUPPORT 46 /* Protocol family not supported */ +#define EAFNOSUPPORT 47 /* Address family not supported by protocol */ +#define EADDRINUSE 48 /* Address already in use */ +#define EADDRNOTAVAIL 49 /* Cannot assign requested address */ +#define ENETDOWN 50 /* Network is down */ +#define ENETUNREACH 51 /* Network is unreachable */ +#define ENETRESET 52 /* Network dropped connection because of reset */ +#define ECONNABORTED 53 /* Software caused connection abort */ +#define ECONNRESET 54 /* Connection reset by peer */ +#define ENOBUFS 55 /* No buffer space available */ +#define EISCONN 56 /* Transport endpoint is already connected */ +#define ENOTCONN 57 /* Transport endpoint is not connected */ +#define ESHUTDOWN 58 /* Cannot send after transport endpoint shutdown */ +#define ETOOMANYREFS 59 /* Too many references: cannot splice */ +#define ETIMEDOUT 60 /* Connection timed out */ +#define ECONNREFUSED 61 /* Connection refused */ +#define ELOOP 62 /* Too many symbolic links encountered */ +#define ENAMETOOLONG 63 /* File name too long */ +#define EHOSTDOWN 64 /* Host is down */ +#define EHOSTUNREACH 65 /* No route to host */ +#define ENOTEMPTY 66 /* Directory not empty */ + +#define EUSERS 68 /* Too many users */ +#define EDQUOT 69 /* Quota exceeded */ +#define ESTALE 70 /* Stale file handle */ +#define EREMOTE 71 /* Object is remote */ + +#define ENOLCK 77 /* No record locks available */ +#define ENOSYS 78 /* Function not implemented */ + +#define ENOMSG 80 /* No message of desired type */ +#define EIDRM 81 /* Identifier removed */ +#define ENOSR 82 /* Out of streams resources */ +#define ETIME 83 /* Timer expired */ +#define EBADMSG 84 /* Not a data message */ +#define EPROTO 85 /* Protocol error */ +#define ENODATA 86 /* No data available */ +#define ENOSTR 87 /* Device not a stream */ + +#define ENOPKG 92 /* Package not installed */ + +#define EILSEQ 116 /* Illegal byte sequence */ + +/* The following are just random noise.. */ +#define ECHRNG 88 /* Channel number out of range */ +#define EL2NSYNC 89 /* Level 2 not synchronized */ +#define EL3HLT 90 /* Level 3 halted */ +#define EL3RST 91 /* Level 3 reset */ + +#define ELNRNG 93 /* Link number out of range */ +#define EUNATCH 94 /* Protocol driver not attached */ +#define ENOCSI 95 /* No CSI structure available */ +#define EL2HLT 96 /* Level 2 halted */ +#define EBADE 97 /* Invalid exchange */ +#define EBADR 98 /* Invalid request descriptor */ +#define EXFULL 99 /* Exchange full */ +#define ENOANO 100 /* No anode */ +#define EBADRQC 101 /* Invalid request code */ +#define EBADSLT 102 /* Invalid slot */ + +#define EDEADLOCK EDEADLK + +#define EBFONT 104 /* Bad font file format */ +#define ENONET 105 /* Machine is not on the network */ +#define ENOLINK 106 /* Link has been severed */ +#define EADV 107 /* Advertise error */ +#define ESRMNT 108 /* Srmount error */ +#define ECOMM 109 /* Communication error on send */ +#define EMULTIHOP 110 /* Multihop attempted */ +#define EDOTDOT 111 /* RFS specific error */ +#define EOVERFLOW 112 /* Value too large for defined data type */ +#define ENOTUNIQ 113 /* Name not unique on network */ +#define EBADFD 114 /* File descriptor in bad state */ +#define EREMCHG 115 /* Remote address changed */ + +#define EUCLEAN 117 /* Structure needs cleaning */ +#define ENOTNAM 118 /* Not a XENIX named type file */ +#define ENAVAIL 119 /* No XENIX semaphores available */ +#define EISNAM 120 /* Is a named type file */ +#define EREMOTEIO 121 /* Remote I/O error */ + +#define ELIBACC 122 /* Can not access a needed shared library */ +#define ELIBBAD 123 /* Accessing a corrupted shared library */ +#define ELIBSCN 124 /* .lib section in a.out corrupted */ +#define ELIBMAX 125 /* Attempting to link in too many shared libraries */ +#define ELIBEXEC 126 /* Cannot exec a shared library directly */ +#define ERESTART 127 /* Interrupted system call should be restarted */ +#define ESTRPIPE 128 /* Streams pipe error */ + +#define ENOMEDIUM 129 /* No medium found */ +#define EMEDIUMTYPE 130 /* Wrong medium type */ +#define ECANCELED 131 /* Operation Cancelled */ +#define ENOKEY 132 /* Required key not available */ +#define EKEYEXPIRED 133 /* Key has expired */ +#define EKEYREVOKED 134 /* Key has been revoked */ +#define EKEYREJECTED 135 /* Key was rejected by service */ + +/* for robust mutexes */ +#define EOWNERDEAD 136 /* Owner died */ +#define ENOTRECOVERABLE 137 /* State not recoverable */ + +#define ERFKILL 138 /* Operation not possible due to RF-kill */ + +#define EHWPOISON 139 /* Memory page has hardware error */ + +#endif /* _SW64_ERRNO_H */ diff --git a/tools/arch/sw_64/include/uapi/asm/mman.h b/tools/arch/sw_64/include/uapi/asm/mman.h new file mode 100644 index 000000000000..a9603c93a34b --- /dev/null +++ b/tools/arch/sw_64/include/uapi/asm/mman.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef TOOLS_ARCH_SW64_UAPI_ASM_MMAN_FIX_H +#define TOOLS_ARCH_SW64_UAPI_ASM_MMAN_FIX_H +#define MADV_DODUMP 17 +#define MADV_DOFORK 11 +#define MADV_DONTDUMP 16 +#define MADV_DONTFORK 10 +#define MADV_DONTNEED 6 +#define MADV_FREE 8 +#define MADV_HUGEPAGE 14 +#define MADV_MERGEABLE 12 +#define MADV_NOHUGEPAGE 15 +#define MADV_NORMAL 0 +#define MADV_RANDOM 1 +#define MADV_REMOVE 9 +#define MADV_SEQUENTIAL 2 +#define MADV_UNMERGEABLE 13 +#define MADV_WILLNEED 3 +#define MAP_ANONYMOUS 0x10 +#define MAP_DENYWRITE 0x02000 +#define MAP_EXECUTABLE 0x04000 +#define MAP_FILE 0 +#define MAP_FIXED 0x100 +#define MAP_GROWSDOWN 0x01000 +#define MAP_HUGETLB 0x100000 +#define MAP_LOCKED 0x08000 +#define MAP_NONBLOCK 0x40000 +#define MAP_NORESERVE 0x10000 +#define MAP_POPULATE 0x20000 +#define MAP_STACK 0x80000 +#define PROT_EXEC 0x4 +#define PROT_GROWSDOWN 0x01000000 +#define PROT_GROWSUP 0x02000000 +#define PROT_NONE 0x0 +#define PROT_READ 0x1 +#define PROT_SEM 0x8 +#define PROT_WRITE 0x2 +/* MADV_HWPOISON is undefined on alpha, fix it for perf */ +#define MADV_HWPOISON 100 +/* MADV_SOFT_OFFLINE is undefined on alpha, fix it for perf */ +#define MADV_SOFT_OFFLINE 101 +/* MAP_32BIT is undefined on alpha, fix it for perf */ +#define MAP_32BIT 0 +/* MAP_UNINITIALIZED is undefined on alpha, fix it for perf */ +#define MAP_UNINITIALIZED 0 +#endif /* TOOLS_ARCH_SW64_UAPI_ASM_MMAN_FIX_H */ diff --git a/tools/arch/sw_64/include/uapi/asm/perf_regs.h b/tools/arch/sw_64/include/uapi/asm/perf_regs.h new file mode 100644 index 000000000000..892be5261026 --- /dev/null +++ b/tools/arch/sw_64/include/uapi/asm/perf_regs.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ + +#ifndef _ASM_SW64_PERF_REGS_H +#define _ASM_SW64_PERF_REGS_H + +enum perf_event_sw64_regs { + PERF_REG_SW64_R0, + PERF_REG_SW64_R1, + PERF_REG_SW64_R2, + PERF_REG_SW64_R3, + PERF_REG_SW64_R4, + PERF_REG_SW64_R5, + PERF_REG_SW64_R6, + PERF_REG_SW64_R7, + PERF_REG_SW64_R8, + PERF_REG_SW64_R9, + PERF_REG_SW64_R10, + PERF_REG_SW64_R11, + PERF_REG_SW64_R12, + PERF_REG_SW64_R13, + PERF_REG_SW64_R14, + PERF_REG_SW64_R15, + PERF_REG_SW64_R16, + PERF_REG_SW64_R17, + PERF_REG_SW64_R18, + PERF_REG_SW64_R19, + PERF_REG_SW64_R20, + PERF_REG_SW64_R21, + PERF_REG_SW64_R22, + PERF_REG_SW64_R23, + PERF_REG_SW64_R24, + PERF_REG_SW64_R25, + PERF_REG_SW64_R26, + PERF_REG_SW64_R27, + PERF_REG_SW64_R28, + PERF_REG_SW64_GP, + PERF_REG_SW64_SP, + PERF_REG_SW64_PC, + PERF_REG_SW64_MAX, +}; +#endif /* _ASM_SW64_PERF_REGS_H */ diff --git a/tools/build/feature/test-libunwind-sw_64.c b/tools/build/feature/test-libunwind-sw_64.c new file mode 100644 index 000000000000..274948b961f4 --- /dev/null +++ b/tools/build/feature/test-libunwind-sw_64.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +extern int UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as, + unw_word_t ip, + unw_dyn_info_t *di, + unw_proc_info_t *pi, + int need_unwind_info, void *arg); + +#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table) + +static unw_accessors_t accessors; + +int main(void) +{ + unw_addr_space_t addr_space; + + addr_space = unw_create_addr_space(&accessors, 0); + if (addr_space) + return 0; + + unw_init_remote(NULL, addr_space, NULL); + dwarf_search_unwind_table(addr_space, 0, NULL, NULL, 0, NULL); + + return 0; +} -- Gitee From c70170ee7fa102709d30a28aef8c74c82dabd2f3 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:28:12 +0800 Subject: [PATCH 0333/2138] anolis: tools: fix basic sw64 support ANBZ: #4688 Modify generic headers and Makefiles for SW64 support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- tools/build/Makefile.feature | 1 + tools/build/feature/Makefile | 9 +++++++++ tools/include/uapi/asm/bitsperlong.h | 2 ++ tools/include/uapi/asm/errno.h | 2 ++ 4 files changed, 14 insertions(+) diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature index 934e2777a2db..fb290a90b263 100644 --- a/tools/build/Makefile.feature +++ b/tools/build/Makefile.feature @@ -54,6 +54,7 @@ FEATURE_TESTS_BASIC := \ libtracefs \ libcrypto \ libunwind \ + libunwind-sw_64 \ pthread-attr-setaffinity-np \ pthread-barrier \ reallocarray \ diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile index dad79ede4e0a..cb57e46cec4b 100644 --- a/tools/build/feature/Makefile +++ b/tools/build/feature/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 include ../../scripts/Makefile.include +ARCH ?= $(shell uname -m) FILES= \ test-all.bin \ test-backtrace.bin \ @@ -45,6 +46,7 @@ FILES= \ test-libunwind-x86_64.bin \ test-libunwind-arm.bin \ test-libunwind-aarch64.bin \ + test-libunwind-sw_64.bin \ test-libunwind-debug-frame-arm.bin \ test-libunwind-debug-frame-aarch64.bin \ test-pthread-attr-setaffinity-np.bin \ @@ -86,7 +88,11 @@ all: $(FILES) __BUILD = $(CC) $(CFLAGS) -MD -Wall -Werror -o $@ $(patsubst %.bin,%.c,$(@F)) $(LDFLAGS) BUILD = $(__BUILD) > $(@:.bin=.make.output) 2>&1 +ifeq ($(ARCH),sw_64) + BUILD_BFD = $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl -liberty -lz +else BUILD_BFD = $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl +endif BUILD_ALL = $(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -lslang $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz -llzma -lzstd -lcap __BUILDXX = $(CXX) $(CXXFLAGS) -MD -Wall -Werror -o $@ $(patsubst %.bin,%.cpp,$(@F)) $(LDFLAGS) @@ -189,6 +195,9 @@ $(OUTPUT)test-libunwind-arm.bin: $(OUTPUT)test-libunwind-aarch64.bin: $(BUILD) -lelf -lunwind-aarch64 +$(OUTPUT)test-libunwind-sw_64.bin: + $(BUILD) -lelf -lunwind-sw_64 + $(OUTPUT)test-libunwind-debug-frame-arm.bin: $(BUILD) -lelf -lunwind-arm diff --git a/tools/include/uapi/asm/bitsperlong.h b/tools/include/uapi/asm/bitsperlong.h index c65267afc341..036e2fc92d1a 100644 --- a/tools/include/uapi/asm/bitsperlong.h +++ b/tools/include/uapi/asm/bitsperlong.h @@ -13,6 +13,8 @@ #include "../../../arch/ia64/include/uapi/asm/bitsperlong.h" #elif defined(__alpha__) #include "../../../arch/alpha/include/uapi/asm/bitsperlong.h" +#elif defined(__sw_64__) +#include "../../../arch/sw_64/include/uapi/asm/bitsperlong.h" #else #include #endif diff --git a/tools/include/uapi/asm/errno.h b/tools/include/uapi/asm/errno.h index 869379f91fe4..bcfa3d742933 100644 --- a/tools/include/uapi/asm/errno.h +++ b/tools/include/uapi/asm/errno.h @@ -11,6 +11,8 @@ #include "../../../arch/mips/include/uapi/asm/errno.h" #elif defined(__hppa__) #include "../../../arch/parisc/include/uapi/asm/errno.h" +#elif defined(__sw_64__) +#include "../../../arch/sw_64/include/uapi/asm/errno.h" #else #include #endif -- Gitee From 0e6e0b25fdbccc92fde3b9338e09d18aa736b48a Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:27:19 +0800 Subject: [PATCH 0334/2138] anolis: perf: add sw64 support ANBZ: #4688 Add Build, Makefiles, common headers and routines for SW64 support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- tools/perf/arch/sw_64/Build | 2 + tools/perf/arch/sw_64/Makefile | 4 + tools/perf/arch/sw_64/include/arch-tests.h | 12 +++ tools/perf/arch/sw_64/include/perf_regs.h | 92 ++++++++++++++++++ tools/perf/arch/sw_64/tests/Build | 3 + tools/perf/arch/sw_64/tests/arch-tests.c | 16 ++++ tools/perf/arch/sw_64/tests/dwarf-unwind.c | 63 +++++++++++++ tools/perf/arch/sw_64/tests/regs_load.S | 47 ++++++++++ tools/perf/arch/sw_64/util/Build | 4 + tools/perf/arch/sw_64/util/dwarf-regs.c | 94 +++++++++++++++++++ tools/perf/arch/sw_64/util/perf_regs.c | 6 ++ tools/perf/arch/sw_64/util/unwind-libdw.c | 60 ++++++++++++ tools/perf/arch/sw_64/util/unwind-libunwind.c | 84 +++++++++++++++++ tools/perf/util/libunwind/sw64.c | 33 +++++++ 14 files changed, 520 insertions(+) create mode 100644 tools/perf/arch/sw_64/Build create mode 100644 tools/perf/arch/sw_64/Makefile create mode 100644 tools/perf/arch/sw_64/include/arch-tests.h create mode 100644 tools/perf/arch/sw_64/include/perf_regs.h create mode 100644 tools/perf/arch/sw_64/tests/Build create mode 100644 tools/perf/arch/sw_64/tests/arch-tests.c create mode 100644 tools/perf/arch/sw_64/tests/dwarf-unwind.c create mode 100644 tools/perf/arch/sw_64/tests/regs_load.S create mode 100644 tools/perf/arch/sw_64/util/Build create mode 100644 tools/perf/arch/sw_64/util/dwarf-regs.c create mode 100644 tools/perf/arch/sw_64/util/perf_regs.c create mode 100644 tools/perf/arch/sw_64/util/unwind-libdw.c create mode 100644 tools/perf/arch/sw_64/util/unwind-libunwind.c create mode 100644 tools/perf/util/libunwind/sw64.c diff --git a/tools/perf/arch/sw_64/Build b/tools/perf/arch/sw_64/Build new file mode 100644 index 000000000000..36222e64bbf7 --- /dev/null +++ b/tools/perf/arch/sw_64/Build @@ -0,0 +1,2 @@ +perf-y += util/ +perf-$(CONFIG_DWARF_UNWIND) += tests/ diff --git a/tools/perf/arch/sw_64/Makefile b/tools/perf/arch/sw_64/Makefile new file mode 100644 index 000000000000..1aa9dd772489 --- /dev/null +++ b/tools/perf/arch/sw_64/Makefile @@ -0,0 +1,4 @@ +ifndef NO_DWARF +PERF_HAVE_DWARF_REGS := 1 +endif +PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1 diff --git a/tools/perf/arch/sw_64/include/arch-tests.h b/tools/perf/arch/sw_64/include/arch-tests.h new file mode 100644 index 000000000000..90ec4c8cb880 --- /dev/null +++ b/tools/perf/arch/sw_64/include/arch-tests.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ARCH_TESTS_H +#define ARCH_TESTS_H + +#ifdef HAVE_DWARF_UNWIND_SUPPORT +struct thread; +struct perf_sample; +#endif + +extern struct test arch_tests[]; + +#endif diff --git a/tools/perf/arch/sw_64/include/perf_regs.h b/tools/perf/arch/sw_64/include/perf_regs.h new file mode 100644 index 000000000000..e0c1b15375b5 --- /dev/null +++ b/tools/perf/arch/sw_64/include/perf_regs.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ARCH_PERF_REGS_H +#define ARCH_PERF_REGS_H + +#include +#include +#include + +void perf_regs_load(u64 *regs); + +#define PERF_REGS_MASK ((1ULL << PERF_REG_SW64_MAX) - 1) +#define PERF_REGS_MAX PERF_REG_SW64_MAX +#define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_64 + +#define PERF_REG_IP PERF_REG_SW64_PC +#define PERF_REG_SP PERF_REG_SW64_SP + +static inline const char *perf_reg_name(int id) +{ + switch (id) { + case PERF_REG_SW64_R0: + return "r0"; + case PERF_REG_SW64_R1: + return "r1"; + case PERF_REG_SW64_R2: + return "r2"; + case PERF_REG_SW64_R3: + return "r3"; + case PERF_REG_SW64_R4: + return "r4"; + case PERF_REG_SW64_R5: + return "r5"; + case PERF_REG_SW64_R6: + return "r6"; + case PERF_REG_SW64_R7: + return "r7"; + case PERF_REG_SW64_R8: + return "r8"; + case PERF_REG_SW64_R9: + return "r9"; + case PERF_REG_SW64_R10: + return "r10"; + case PERF_REG_SW64_R11: + return "r11"; + case PERF_REG_SW64_R12: + return "r12"; + case PERF_REG_SW64_R13: + return "r13"; + case PERF_REG_SW64_R14: + return "r14"; + case PERF_REG_SW64_R15: + return "r15"; + case PERF_REG_SW64_R16: + return "r16"; + case PERF_REG_SW64_R17: + return "r17"; + case PERF_REG_SW64_R18: + return "r18"; + case PERF_REG_SW64_R19: + return "r19"; + case PERF_REG_SW64_R20: + return "r20"; + case PERF_REG_SW64_R21: + return "r21"; + case PERF_REG_SW64_R22: + return "r22"; + case PERF_REG_SW64_R23: + return "r23"; + case PERF_REG_SW64_R24: + return "r24"; + case PERF_REG_SW64_R25: + return "r25"; + case PERF_REG_SW64_R26: + return "r26"; + case PERF_REG_SW64_R27: + return "r27"; + case PERF_REG_SW64_R28: + return "r28"; + case PERF_REG_SW64_GP: + return "gp"; + case PERF_REG_SW64_SP: + return "sp"; + case PERF_REG_SW64_PC: + return "pc"; + default: + return NULL; + } + + return NULL; +} + +#endif /* ARCH_PERF_REGS_H */ diff --git a/tools/perf/arch/sw_64/tests/Build b/tools/perf/arch/sw_64/tests/Build new file mode 100644 index 000000000000..b8a38eadfb35 --- /dev/null +++ b/tools/perf/arch/sw_64/tests/Build @@ -0,0 +1,3 @@ +perf-y += regs_load.o +perf-y += dwarf-unwind.o +perf-y += arch-tests.o diff --git a/tools/perf/arch/sw_64/tests/arch-tests.c b/tools/perf/arch/sw_64/tests/arch-tests.c new file mode 100644 index 000000000000..5b1543c98022 --- /dev/null +++ b/tools/perf/arch/sw_64/tests/arch-tests.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include "tests/tests.h" +#include "arch-tests.h" + +struct test arch_tests[] = { +#ifdef HAVE_DWARF_UNWIND_SUPPORT + { + .desc = "DWARF unwind", + .func = test__dwarf_unwind, + }, +#endif + { + .func = NULL, + }, +}; diff --git a/tools/perf/arch/sw_64/tests/dwarf-unwind.c b/tools/perf/arch/sw_64/tests/dwarf-unwind.c new file mode 100644 index 000000000000..cd7047b7a546 --- /dev/null +++ b/tools/perf/arch/sw_64/tests/dwarf-unwind.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include "perf_regs.h" +#include "thread.h" +#include "map.h" +#include "maps.h" +#include "event.h" +#include "debug.h" +#include "tests/tests.h" + +#define STACK_SIZE 8192 + +static int sample_ustack(struct perf_sample *sample, + struct thread *thread, u64 *regs) +{ + struct stack_dump *stack = &sample->user_stack; + struct map *map; + unsigned long sp; + u64 stack_size, *buf; + + buf = malloc(STACK_SIZE); + if (!buf) { + printf("failed to allocate sample uregs data\n"); + return -1; + } + + sp = (unsigned long) regs[PERF_REG_SW64_SP]; + + map = maps__find(thread->maps, (u64)sp); + if (!map) { + printf("failed to get stack map\n"); + free(buf); + return -1; + } + + stack_size = map->end - sp; + stack_size = stack_size > STACK_SIZE ? STACK_SIZE : stack_size; + + memcpy(buf, (void *) sp, stack_size); + stack->data = (char *) buf; + stack->size = stack_size; + return 0; +} + +int test__arch_unwind_sample(struct perf_sample *sample, + struct thread *thread) +{ + struct regs_dump *regs = &sample->user_regs; + u64 *buf; + + buf = calloc(1, sizeof(u64) * PERF_REGS_MAX); + if (!buf) { + printf("failed to allocate sample uregs data\n"); + return -1; + } + + perf_regs_load(buf); + regs->abi = PERF_SAMPLE_REGS_ABI; + regs->regs = buf; + regs->mask = PERF_REGS_MASK; + + return sample_ustack(sample, thread, buf); +} diff --git a/tools/perf/arch/sw_64/tests/regs_load.S b/tools/perf/arch/sw_64/tests/regs_load.S new file mode 100644 index 000000000000..8c5aabc2c6fb --- /dev/null +++ b/tools/perf/arch/sw_64/tests/regs_load.S @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include + +.text +.set noat +.type perf_regs_load,%function +#define STL_REG(r) stl $r, (8 * r)($16) +#define LDL_REG(r) ldl $r, (8 * r)($16) +#define SP (8 * 30) +#define PC (8 * 31) +SYM_FUNC_START(perf_regs_load) + STL_REG(0) + STL_REG(1) + STL_REG(2) + STL_REG(3) + STL_REG(4) + STL_REG(5) + STL_REG(6) + STL_REG(7) + STL_REG(8) + STL_REG(9) + STL_REG(10) + STL_REG(11) + STL_REG(12) + STL_REG(13) + STL_REG(14) + STL_REG(15) + STL_REG(16) + STL_REG(17) + STL_REG(18) + STL_REG(19) + STL_REG(20) + STL_REG(21) + STL_REG(22) + STL_REG(23) + STL_REG(24) + STL_REG(25) + STL_REG(26) + STL_REG(27) + STL_REG(28) + STL_REG(29) + mov $30, $17 + stl $17, (SP)($16) + stl $26, (PC)($16) + LDL_REG(17) + ret +SYM_FUNC_END(perf_regs_load) diff --git a/tools/perf/arch/sw_64/util/Build b/tools/perf/arch/sw_64/util/Build new file mode 100644 index 000000000000..39f459b636a0 --- /dev/null +++ b/tools/perf/arch/sw_64/util/Build @@ -0,0 +1,4 @@ +perf-y += perf_regs.o +perf-$(CONFIG_DWARF) += dwarf-regs.o +perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o +perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o diff --git a/tools/perf/arch/sw_64/util/dwarf-regs.c b/tools/perf/arch/sw_64/util/dwarf-regs.c new file mode 100644 index 000000000000..11c1ee5444da --- /dev/null +++ b/tools/perf/arch/sw_64/util/dwarf-regs.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Mapping of DWARF debug register numbers into register names. + * + * Copyright (C) 2010 Will Deacon, ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include /* for struct user_pt_regs */ +#include +#include "util.h" + +struct pt_regs_dwarfnum { + const char *name; + unsigned int dwarfnum; +}; + +#define REG_DWARFNUM_NAME(r, num) {.name = r, .dwarfnum = num} +#define GPR_DWARFNUM_NAME(num) \ + {.name = __stringify(%x##num), .dwarfnum = num} +#define REG_DWARFNUM_END {.name = NULL, .dwarfnum = 0} +#define DWARFNUM2OFFSET(index) \ + (index * sizeof((struct user_pt_regs *)0)->regs[0]) + +static const struct pt_regs_dwarfnum regdwarfnum_table[] = { + GPR_DWARFNUM_NAME(0), + GPR_DWARFNUM_NAME(1), + GPR_DWARFNUM_NAME(2), + GPR_DWARFNUM_NAME(3), + GPR_DWARFNUM_NAME(4), + GPR_DWARFNUM_NAME(5), + GPR_DWARFNUM_NAME(6), + GPR_DWARFNUM_NAME(7), + GPR_DWARFNUM_NAME(8), + GPR_DWARFNUM_NAME(9), + GPR_DWARFNUM_NAME(10), + GPR_DWARFNUM_NAME(11), + GPR_DWARFNUM_NAME(12), + GPR_DWARFNUM_NAME(13), + GPR_DWARFNUM_NAME(14), + GPR_DWARFNUM_NAME(15), + REG_DWARFNUM_NAME("%fp", 15), + GPR_DWARFNUM_NAME(16), + GPR_DWARFNUM_NAME(17), + GPR_DWARFNUM_NAME(18), + GPR_DWARFNUM_NAME(19), + GPR_DWARFNUM_NAME(20), + GPR_DWARFNUM_NAME(21), + GPR_DWARFNUM_NAME(22), + GPR_DWARFNUM_NAME(23), + GPR_DWARFNUM_NAME(24), + GPR_DWARFNUM_NAME(25), + GPR_DWARFNUM_NAME(26), + GPR_DWARFNUM_NAME(27), + GPR_DWARFNUM_NAME(28), + REG_DWARFNUM_NAME("%gp", 29), + REG_DWARFNUM_NAME("%sp", 30), + REG_DWARFNUM_END, +}; + +/** + * get_arch_regstr() - lookup register name from it's DWARF register number + * @n: the DWARF register number + * + * get_arch_regstr() returns the name of the register in struct + * regdwarfnum_table from it's DWARF register number. If the register is not + * found in the table, this returns NULL; + */ +const char *get_arch_regstr(unsigned int n) +{ + const struct pt_regs_dwarfnum *roff; + + for (roff = regdwarfnum_table; roff->name != NULL; roff++) + if (roff->dwarfnum == n) + return roff->name; + return NULL; +} + +int regs_query_register_offset(const char *name) +{ + const struct pt_regs_dwarfnum *roff; + + for (roff = regdwarfnum_table; roff->name != NULL; roff++) + if (!strcmp(roff->name, name)) + return DWARFNUM2OFFSET(roff->dwarfnum); + return -EINVAL; +} diff --git a/tools/perf/arch/sw_64/util/perf_regs.c b/tools/perf/arch/sw_64/util/perf_regs.c new file mode 100644 index 000000000000..2833e101a7c6 --- /dev/null +++ b/tools/perf/arch/sw_64/util/perf_regs.c @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "../../../util/perf_regs.h" + +const struct sample_reg sample_reg_masks[] = { + SMPL_REG_END +}; diff --git a/tools/perf/arch/sw_64/util/unwind-libdw.c b/tools/perf/arch/sw_64/util/unwind-libdw.c new file mode 100644 index 000000000000..3e2b6acc40ac --- /dev/null +++ b/tools/perf/arch/sw_64/util/unwind-libdw.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include "../../util/unwind-libdw.h" +#include "../../util/perf_regs.h" +#include "../../util/event.h" + +bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg) +{ + struct unwind_info *ui = arg; + struct regs_dump *user_regs = &ui->sample->user_regs; + Dwarf_Word dwarf_regs[PERF_REG_SW64_MAX], dwarf_pc; + +#define REG(r) ({ \ + Dwarf_Word val = 0; \ + perf_reg_value(&val, user_regs, PERF_REG_SW64_##r); \ + val; \ +}) + + dwarf_regs[0] = REG(R0); + dwarf_regs[1] = REG(R1); + dwarf_regs[2] = REG(R2); + dwarf_regs[3] = REG(R3); + dwarf_regs[4] = REG(R4); + dwarf_regs[5] = REG(R5); + dwarf_regs[6] = REG(R6); + dwarf_regs[7] = REG(R7); + dwarf_regs[8] = REG(R8); + dwarf_regs[9] = REG(R9); + dwarf_regs[10] = REG(R10); + dwarf_regs[11] = REG(R11); + dwarf_regs[12] = REG(R12); + dwarf_regs[13] = REG(R13); + dwarf_regs[14] = REG(R14); + dwarf_regs[15] = REG(R15); + dwarf_regs[16] = REG(R16); + dwarf_regs[17] = REG(R17); + dwarf_regs[18] = REG(R18); + dwarf_regs[19] = REG(R19); + dwarf_regs[20] = REG(R20); + dwarf_regs[21] = REG(R21); + dwarf_regs[22] = REG(R22); + dwarf_regs[23] = REG(R23); + dwarf_regs[24] = REG(R24); + dwarf_regs[25] = REG(R25); + dwarf_regs[26] = REG(R26); + dwarf_regs[27] = REG(R27); + dwarf_regs[28] = REG(R28); + dwarf_regs[29] = REG(R29); + dwarf_regs[30] = REG(R30); + dwarf_regs[31] = REG(R31); + + if (!dwfl_thread_state_registers(thread, 0, PERF_REG_SW64_MAX, + dwarf_regs)) + return false; + + dwarf_pc = REG(PC); + dwfl_thread_state_register_pc(thread, dwarf_pc); + + return true; +} diff --git a/tools/perf/arch/sw_64/util/unwind-libunwind.c b/tools/perf/arch/sw_64/util/unwind-libunwind.c new file mode 100644 index 000000000000..134e3c2280d2 --- /dev/null +++ b/tools/perf/arch/sw_64/util/unwind-libunwind.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +#ifndef REMOTE_UNWIND_LIBUNWIND +#include +#include "perf_regs.h" +#include "../../util/unwind.h" +#include "../../util/debug.h" +#endif + +int LIBUNWIND__ARCH_REG_ID(int regnum) +{ + switch (regnum) { + case UNW_SW_64_R0: + return PERF_REG_SW64_R0; + case UNW_SW_64_R1: + return PERF_REG_SW64_R1; + case UNW_SW_64_R2: + return PERF_REG_SW64_R2; + case UNW_SW_64_R3: + return PERF_REG_SW64_R3; + case UNW_SW_64_R4: + return PERF_REG_SW64_R4; + case UNW_SW_64_R5: + return PERF_REG_SW64_R5; + case UNW_SW_64_R6: + return PERF_REG_SW64_R6; + case UNW_SW_64_R7: + return PERF_REG_SW64_R7; + case UNW_SW_64_R8: + return PERF_REG_SW64_R8; + case UNW_SW_64_R9: + return PERF_REG_SW64_R9; + case UNW_SW_64_R10: + return PERF_REG_SW64_R10; + case UNW_SW_64_R11: + return PERF_REG_SW64_R11; + case UNW_SW_64_R12: + return PERF_REG_SW64_R12; + case UNW_SW_64_R13: + return PERF_REG_SW64_R13; + case UNW_SW_64_R14: + return PERF_REG_SW64_R14; + case UNW_SW_64_R15: + return PERF_REG_SW64_R15; + case UNW_SW_64_R16: + return PERF_REG_SW64_R16; + case UNW_SW_64_R17: + return PERF_REG_SW64_R17; + case UNW_SW_64_R18: + return PERF_REG_SW64_R18; + case UNW_SW_64_R19: + return PERF_REG_SW64_R19; + case UNW_SW_64_R20: + return PERF_REG_SW64_R20; + case UNW_SW_64_R21: + return PERF_REG_SW64_R21; + case UNW_SW_64_R22: + return PERF_REG_SW64_R22; + case UNW_SW_64_R23: + return PERF_REG_SW64_R23; + case UNW_SW_64_R24: + return PERF_REG_SW64_R24; + case UNW_SW_64_R25: + return PERF_REG_SW64_R25; + case UNW_SW_64_R26: + return PERF_REG_SW64_R26; + case UNW_SW_64_R27: + return PERF_REG_SW64_R27; + case UNW_SW_64_R28: + return PERF_REG_SW64_R28; + case UNW_SW_64_R29: + return PERF_REG_SW64_GP; + case UNW_SW_64_R30: + return PERF_REG_SW64_SP; + case UNW_SW_64_PC: + return PERF_REG_SW64_PC; + default: + pr_err("unwind: invalid reg id %d\n", regnum); + return -EINVAL; + } + + return -EINVAL; +} diff --git a/tools/perf/util/libunwind/sw64.c b/tools/perf/util/libunwind/sw64.c new file mode 100644 index 000000000000..12452bf2ab8b --- /dev/null +++ b/tools/perf/util/libunwind/sw64.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file setups defines to compile arch specific binary from the + * generic one. + * + * The function 'LIBUNWIND__ARCH_REG_ID' name is set according to arch + * name and the defination of this function is included directly from + * 'arch/arm64/util/unwind-libunwind.c', to make sure that this function + * is defined no matter what arch the host is. + * + * Finally, the arch specific unwind methods are exported which will + * be assigned to each arm64 thread. + */ + +#define REMOTE_UNWIND_LIBUNWIND + +/* Define arch specific functions & regs for libunwind, should be + * defined before including "unwind.h" + */ +#define LIBUNWIND__ARCH_REG_ID(regnum) libunwind__sw_64_reg_id(regnum) +#define LIBUNWIND__ARCH_REG_IP PERF_REG_SW64_PC +#define LIBUNWIND__ARCH_REG_SP PERF_REG_SW64_SP + +#include "unwind.h" +#include "debug.h" +#include "libunwind-sw_64.h" +#include <../../../arch/sw_64/include/uapi/asm/perf_regs.h> +#include "../../arch/sw_64/util/unwind-libunwind.c" + +#include "util/unwind-libunwind-local.c" + +struct unwind_libunwind_ops * +sw64_unwind_libunwind_ops = &_unwind_libunwind_ops; -- Gitee From 4367ada414ddee5d770b083055c3f8f51842a944 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:27:37 +0800 Subject: [PATCH 0335/2138] anolis: perf: fix sw64 support ANBZ: #4688 Modify generic Build, Makefiles and routines for SW64 support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- tools/perf/Makefile.config | 13 +++++++++++++ tools/perf/tests/Build | 2 +- tools/perf/util/Build | 1 + tools/perf/util/annotate.c | 3 +++ tools/perf/util/env.c | 2 ++ tools/perf/util/unwind-libunwind.c | 4 ++++ 6 files changed, 24 insertions(+), 1 deletion(-) diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index d66b52407e19..a48258330cc0 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -94,6 +94,12 @@ ifeq ($(SRCARCH),csky) NO_PERF_REGS := 0 endif +ifeq ($(SRCARCH),sw_64) + NO_PERF_REGS := 0 + CFLAGS += -mieee + LIBUNWIND_LIBS = -lunwind -lunwind-sw_64 +endif + ifeq ($(ARCH),s390) NO_PERF_REGS := 0 CFLAGS += -fPIC -I$(OUTPUT)arch/s390/include/generated @@ -640,6 +646,13 @@ ifndef NO_LIBUNWIND CFLAGS += -DNO_LIBUNWIND_DEBUG_FRAME_AARCH64 endif endif + ifeq ($(feature-libunwind-sw_64), 1) + $(call detected,CONFIG_LIBUNWIND_SW64) + CFLAGS += -DHAVE_LIBUNWIND_SW_64_SUPPORT + LDFLAGS += -lunwind-sw_64 + EXTLIBS_LIBUNWIND += -lunwind-sw_64 + have_libunwind = 1 + endif ifneq ($(feature-libunwind), 1) msg := $(warning No libunwind found. Please install libunwind-dev[el] >= 1.1 and/or set LIBUNWIND_DIR); diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build index 2b45ffa462a6..29c065768a8b 100644 --- a/tools/perf/tests/Build +++ b/tools/perf/tests/Build @@ -68,7 +68,7 @@ perf-y += event_groups.o perf-y += symbols.o perf-y += util.o -ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc)) +ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc sw_64)) perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o endif diff --git a/tools/perf/util/Build b/tools/perf/util/Build index 6d657c9927f7..89a051732e87 100644 --- a/tools/perf/util/Build +++ b/tools/perf/util/Build @@ -199,6 +199,7 @@ perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind-local.o perf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o perf-$(CONFIG_LIBUNWIND_X86) += libunwind/x86_32.o perf-$(CONFIG_LIBUNWIND_AARCH64) += libunwind/arm64.o +perf-$(CONFIG_LIBUNWIND_SW64) += libunwind/sw64.o ifeq ($(CONFIG_LIBTRACEEVENT),y) perf-$(CONFIG_LIBBABELTRACE) += data-convert-bt.o diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 6dfe11cbf30e..51625af5f85d 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -183,6 +183,9 @@ static struct arch architectures[] = { .comment_char = '#', }, }, + { + .name = "sw_64", + }, { .name = "x86", .init = x86__annotate_init, diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c index d2c7b6e6eae5..8175df5df556 100644 --- a/tools/perf/util/env.c +++ b/tools/perf/util/env.c @@ -436,6 +436,8 @@ static const char *normalize_arch(char *arch) return "arm64"; if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110")) return "arm"; + if (!strncmp(arch, "sw_64", 5)) + return "sw_64"; if (!strncmp(arch, "s390", 4)) return "s390"; if (!strncmp(arch, "parisc", 6)) diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c index 76cd63de80a8..c2e84a827e33 100644 --- a/tools/perf/util/unwind-libunwind.c +++ b/tools/perf/util/unwind-libunwind.c @@ -11,6 +11,7 @@ struct unwind_libunwind_ops __weak *local_unwind_libunwind_ops; struct unwind_libunwind_ops __weak *x86_32_unwind_libunwind_ops; struct unwind_libunwind_ops __weak *arm64_unwind_libunwind_ops; +struct unwind_libunwind_ops __weak *sw64_unwind_libunwind_ops; static void unwind__register_ops(struct maps *maps, struct unwind_libunwind_ops *ops) { @@ -53,6 +54,9 @@ int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized } else if (!strcmp(arch, "arm64") || !strcmp(arch, "arm")) { if (dso_type == DSO__TYPE_64BIT) ops = arm64_unwind_libunwind_ops; + } else if (!strcmp(arch, "sw_64")) { + if (dso_type == DSO__TYPE_64BIT) + ops = sw64_unwind_libunwind_ops; } if (!ops) { -- Gitee From 38a486fc78eb53b955e2f29b7f05164ab61dc093 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Mon, 8 Jan 2024 16:29:33 +0800 Subject: [PATCH 0336/2138] anolis: selftests: fix sw64 support ANBZ: #4688 Modify generic routines for SW64 support. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- .../ftrace/test.d/kprobe/kprobe_args_string.tc | 3 +++ .../ftrace/test.d/kprobe/kprobe_args_syntax.tc | 4 ++++ .../testing/selftests/mm/virtual_address_range.c | 5 +++++ .../testing/selftests/seccomp/seccomp_benchmark.c | 5 +++++ tools/testing/selftests/seccomp/seccomp_bpf.c | 15 ++++++++++++++- 5 files changed, 31 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc index 4f72c2875f6b..dbc76ca50ab5 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc @@ -31,6 +31,9 @@ mips*) loongarch*) ARG1=%r4 ;; +sw_64) + ARG1=%r16 +;; *) echo "Please implement other architecture here" exit_untested diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc index 1df61e13a812..8de38fb00bae 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc @@ -44,6 +44,10 @@ loongarch*) GOODREG=%r4 BADREG=%r12 ;; +sw_64) + GOODREG=%r16 + BADREG=%ps +;; *) echo "Please implement other architecture here" exit_untested diff --git a/tools/testing/selftests/mm/virtual_address_range.c b/tools/testing/selftests/mm/virtual_address_range.c index bae0ceaf95b1..76efbd5637cb 100644 --- a/tools/testing/selftests/mm/virtual_address_range.c +++ b/tools/testing/selftests/mm/virtual_address_range.c @@ -54,6 +54,11 @@ #define HIGH_ADDR_SHIFT 49 #define NR_CHUNKS_LOW NR_CHUNKS_256TB #define NR_CHUNKS_HIGH NR_CHUNKS_3840TB +#elif defined __sw_64__ +#define HIGH_ADDR_MARK ADDR_MARK_128TB * 32UL +#define HIGH_ADDR_SHIFT 53 +#define NR_CHUNKS_LOW NR_CHUNKS_128TB * 32UL +#define NR_CHUNKS_HIGH 0 #else #define HIGH_ADDR_MARK ADDR_MARK_128TB #define HIGH_ADDR_SHIFT 48 diff --git a/tools/testing/selftests/seccomp/seccomp_benchmark.c b/tools/testing/selftests/seccomp/seccomp_benchmark.c index 5b5c9d558dee..7004099ce11b 100644 --- a/tools/testing/selftests/seccomp/seccomp_benchmark.c +++ b/tools/testing/selftests/seccomp/seccomp_benchmark.c @@ -20,6 +20,11 @@ #include "../kselftest.h" +#ifdef __sw_64__ +#define __NR_getpid 174 +#define __NR_getppid 175 +#endif + unsigned long long timing(clockid_t clk_id, unsigned long long samples) { struct timespec start, finish; diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index cacf6507f690..e5b1c58721ac 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c @@ -66,6 +66,11 @@ # define PR_SET_PTRACER 0x59616d61 #endif +#ifdef __sw_64__ +#define __NR_getpid 174 +#define __NR_getppid 175 +#endif + #ifndef PR_SET_NO_NEW_PRIVS #define PR_SET_NO_NEW_PRIVS 38 #define PR_GET_NO_NEW_PRIVS 39 @@ -142,6 +147,8 @@ struct seccomp_data { # define __NR_seccomp 372 # elif defined(__mc68000__) # define __NR_seccomp 380 +# elif defined(__sw_64__) +# define __NR_seccomp 514 # else # warning "seccomp syscall number unknown for this architecture" # define __NR_seccomp 0xffff @@ -1850,6 +1857,12 @@ TEST_F(TRACE_poke, getpid_runs_normally) # define ARCH_REGS struct user_regs_struct # define SYSCALL_NUM(_regs) (_regs).orig_d0 # define SYSCALL_RET(_regs) (_regs).d0 +#elif defined(__sw_64__) +# define ARCH_REGS struct user_pt_regs +# define SYSCALL_NUM(_regs) (_regs).regs[0] +# define SYSCALL_RET(_regs) (_regs).regs[0] +# define SYSCALL_RET_SET(_regs, _val) \ + TH_LOG("Can't modify syscall return on this architecture") #else # error "Do not know how to find your architecture's registers and syscalls" #endif @@ -1914,7 +1927,7 @@ const bool ptrace_entry_set_syscall_ret = * Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for * architectures without HAVE_ARCH_TRACEHOOK (e.g. User-mode Linux). */ -#if defined(__x86_64__) || defined(__i386__) || defined(__mips__) || defined(__mc68000__) +#if defined(__x86_64__) || defined(__i386__) || defined(__mips__) || defined(__mc68000__) || defined(__sw_64__) # define ARCH_GETREGS(_regs) ptrace(PTRACE_GETREGS, tracee, 0, &(_regs)) # define ARCH_SETREGS(_regs) ptrace(PTRACE_SETREGS, tracee, 0, &(_regs)) #else -- Gitee From 3574f0c70a34263b183bfd059a23330237cff011 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:55:15 +0800 Subject: [PATCH 0337/2138] anolis: drivers: acpi: add sw64 support ANBZ: #4688 Add acpi drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/acpi/acpi_apd.c | 19 ++++++++++++++++++- drivers/acpi/numa/Kconfig | 2 +- drivers/acpi/numa/srat.c | 2 +- drivers/acpi/pci_mcfg.c | 26 ++++++++++++++++++++++++++ 4 files changed, 46 insertions(+), 3 deletions(-) diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index 80f945cbec8a..791f4b234e02 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c @@ -40,7 +40,8 @@ struct apd_private_data { const struct apd_device_desc *dev_desc; }; -#if defined(CONFIG_X86_AMD_PLATFORM_DEVICE) || defined(CONFIG_ARM64) +#if defined(CONFIG_X86_AMD_PLATFORM_DEVICE) || \ +defined(CONFIG_ARM64) || defined(CONFIG_SW64) #define APD_ADDR(desc) ((unsigned long)&desc) static int acpi_apd_setup(struct apd_private_data *pdata) @@ -178,6 +179,18 @@ static const struct apd_device_desc hip08_spi_desc = { }; #endif /* CONFIG_ARM64 */ +#ifdef CONFIG_SW64 +static const struct apd_device_desc sunway_i2c_desc = { + .setup = acpi_apd_setup, + .fixed_clk_rate = 25000000, +}; + +static const struct apd_device_desc sunway_spi_desc = { + .setup = acpi_apd_setup, + .fixed_clk_rate = 25000000, +}; +#endif + #endif /* @@ -246,6 +259,10 @@ static const struct acpi_device_id acpi_apd_device_ids[] = { { "HISI02A3", APD_ADDR(hip08_lite_i2c_desc) }, { "HISI0173", APD_ADDR(hip08_spi_desc) }, { "NXP0001", APD_ADDR(nxp_i2c_desc) }, +#endif +#ifdef CONFIG_SW64 + { "HISI02A1", APD_ADDR(sunway_i2c_desc) }, + { "HISI0173", APD_ADDR(sunway_spi_desc) }, #endif { } }; diff --git a/drivers/acpi/numa/Kconfig b/drivers/acpi/numa/Kconfig index 39b1f34c21df..67d1f40bfa9f 100644 --- a/drivers/acpi/numa/Kconfig +++ b/drivers/acpi/numa/Kconfig @@ -2,7 +2,7 @@ config ACPI_NUMA bool "NUMA support" depends on NUMA - depends on (X86 || IA64 || ARM64 || LOONGARCH) + depends on (X86 || IA64 || ARM64 || LOONGARCH || SW64) default y if IA64 || ARM64 config ACPI_HMAT diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c index a44c0761fd1c..8ed90017a56d 100644 --- a/drivers/acpi/numa/srat.c +++ b/drivers/acpi/numa/srat.c @@ -211,7 +211,7 @@ __weak int __init numa_fill_memblks(u64 start, u64 end) return NUMA_NO_MEMBLK; } -#if defined(CONFIG_X86) || defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH) +#if defined(CONFIG_X86) || defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH) || defined(CONFIG_SW64) /* * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for * I/O localities since SRAT does not list them. I/O localities are diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c index 860014b89b8e..1dccb26b2b7f 100644 --- a/drivers/acpi/pci_mcfg.c +++ b/drivers/acpi/pci_mcfg.c @@ -182,6 +182,32 @@ static struct mcfg_fixup mcfg_quirks[] = { LOONGSON_ECAM_MCFG("\0", 1), LOONGSON_ECAM_MCFG("LOONGSON", 1), #endif /* LOONGARCH */ + +#ifdef CONFIG_SW64 +#define _SW64_ECAM_QUIRK(rev, seg) \ + { "SUNWAY", "SUNWAY. ", rev, seg, MCFG_BUS_ANY, &sw64_pci_ecam_ops } +#define SW64_ECAM_QUIRK(rev, node) _SW64_ECAM_QUIRK(rev, node * 8 + 0),\ + _SW64_ECAM_QUIRK(rev, node * 8 + 1),\ + _SW64_ECAM_QUIRK(rev, node * 8 + 2),\ + _SW64_ECAM_QUIRK(rev, node * 8 + 3),\ + _SW64_ECAM_QUIRK(rev, node * 8 + 4),\ + _SW64_ECAM_QUIRK(rev, node * 8 + 5),\ + _SW64_ECAM_QUIRK(rev, node * 8 + 6),\ + _SW64_ECAM_QUIRK(rev, node * 8 + 7) + + /** + * According to the address space of sw64, up to 8 nodes supported + * with a maximum of 8 pcie controllers per node + */ + SW64_ECAM_QUIRK(1, 0x00), + SW64_ECAM_QUIRK(1, 0x01), + SW64_ECAM_QUIRK(1, 0x02), + SW64_ECAM_QUIRK(1, 0x03), + SW64_ECAM_QUIRK(1, 0x04), + SW64_ECAM_QUIRK(1, 0x05), + SW64_ECAM_QUIRK(1, 0x06), + SW64_ECAM_QUIRK(1, 0x07), +#endif /* SW64 */ }; static char mcfg_oem_id[ACPI_OEM_ID_SIZE]; -- Gitee From 9f0af269b5f07d073ba653cf8eac4e4f9e9965e0 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:42:24 +0800 Subject: [PATCH 0338/2138] anolis: drivers: clocksource: add sw64 support ANBZ: #4688 Add clocksource driver for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/clocksource/Kconfig | 3 + drivers/clocksource/Makefile | 1 + drivers/clocksource/timer-sw64.c | 411 +++++++++++++++++++++++++++++++ 3 files changed, 415 insertions(+) create mode 100644 drivers/clocksource/timer-sw64.c diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 8208a3d89563..a944c3122b7b 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -733,4 +733,7 @@ config GOLDFISH_TIMER help Support for the timer/counter of goldfish-rtc +config SW64_TIMER + bool + endmenu diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 368c3461dab8..b9ef4c79915e 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -89,3 +89,4 @@ obj-$(CONFIG_MSC313E_TIMER) += timer-msc313e.o obj-$(CONFIG_GOLDFISH_TIMER) += timer-goldfish.o obj-$(CONFIG_GXP_TIMER) += timer-gxp.o obj-$(CONFIG_CLKSRC_LOONGSON1_PWM) += timer-loongson1-pwm.o +obj-$(CONFIG_SW64_TIMER) += timer-sw64.o diff --git a/drivers/clocksource/timer-sw64.c b/drivers/clocksource/timer-sw64.c new file mode 100644 index 000000000000..a124b6d8fed9 --- /dev/null +++ b/drivers/clocksource/timer-sw64.c @@ -0,0 +1,411 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#define SHTCLK_RATE_KHZ 25000 +#define SHTCLK_RATE (SHTCLK_RATE_KHZ * 1000) + +#if defined(CONFIG_SUBARCH_C4) +static u64 read_longtime(struct clocksource *cs) +{ + return read_csr(CSR_SHTCLOCK); +} + +static struct clocksource clocksource_longtime = { + .name = "longtime", + .rating = 100, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .mask = CLOCKSOURCE_MASK(64), + .shift = 0, + .mult = 0, + .read = read_longtime, +}; + +static u64 notrace read_sched_clock(void) +{ + return read_csr(CSR_SHTCLOCK); +} + +void __init sw64_setup_clocksource(void) +{ + clocksource_register_khz(&clocksource_longtime, SHTCLK_RATE_KHZ); + sched_clock_register(read_sched_clock, BITS_PER_LONG, SHTCLK_RATE); +} + +void __init setup_sched_clock(void) { } +#elif defined(CONFIG_SUBARCH_C3B) +#ifdef CONFIG_SMP +static u64 read_longtime(struct clocksource *cs) +{ + unsigned long node; + + node = __this_cpu_read(hard_node_id); + return __io_read_longtime(node); +} + +static int longtime_enable(struct clocksource *cs) +{ + switch (cpu_desc.model) { + case CPU_SW3231: + sw64_io_write(0, GPIO_SWPORTA_DR, 0); + sw64_io_write(0, GPIO_SWPORTA_DDR, 0xff); + break; + case CPU_SW831: + __io_write_longtime_start_en(0, 0x1); + break; + default: + break; + } + + return 0; +} + +static struct clocksource clocksource_longtime = { + .name = "longtime", + .rating = 100, + .enable = longtime_enable, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .mask = CLOCKSOURCE_MASK(64), + .shift = 0, + .mult = 0, + .read = read_longtime, +}; + +static u64 read_vtime(struct clocksource *cs) +{ + unsigned long vtime_addr; + + vtime_addr = IO_BASE | LONG_TIME; + return rdio64(vtime_addr); +} + +static int vtime_enable(struct clocksource *cs) +{ + return 0; +} + +static struct clocksource clocksource_vtime = { + .name = "vtime", + .rating = 100, + .enable = vtime_enable, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .mask = CLOCKSOURCE_MASK(64), + .shift = 0, + .mult = 0, + .read = read_vtime, +}; +#else /* !SMP */ +static u64 read_tc(struct clocksource *cs) +{ + return rdtc(); +} + +static struct clocksource clocksource_tc = { + .name = "tc", + .rating = 300, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .mask = CLOCKSOURCE_MASK(64), + .shift = 22, + .mult = 0, /* To be filled in */ + .read = read_tc, +}; +#endif /* SMP */ + +#define DEFAULT_MCLK 25 /* Mhz */ + +void __init sw64_setup_clocksource(void) +{ + unsigned int mclk = *((unsigned char *)__va(MB_MCLK)); + + if (!mclk) + mclk = DEFAULT_MCLK; + +#ifdef CONFIG_SMP + if (is_in_host()) + clocksource_register_khz(&clocksource_longtime, mclk * 1000); + else + clocksource_register_khz(&clocksource_vtime, DEFAULT_MCLK * 1000); +#else + clocksource_register_hz(&clocksource_tc, get_cpu_freq()); + pr_info("Setup clocksource TC, mult = %d\n", clocksource_tc.mult); +#endif +} + +DECLARE_PER_CPU(u64, tc_offset); +static u64 sc_start, sc_shift, sc_multi; +DEFINE_STATIC_KEY_FALSE(use_tc_as_sched_clock); + +static int __init sched_clock_setup(char *opt) +{ + if (!opt) + return -EINVAL; + + if (!strncmp(opt, "on", 2)) { + static_branch_enable(&use_tc_as_sched_clock); + pr_info("Using TC instead of jiffies as source of sched_clock()\n"); + } + + return 0; +} +early_param("tc_sched_clock", sched_clock_setup); + +static void __init calibrate_sched_clock(void) +{ + sc_start = rdtc(); +} + +void __init setup_sched_clock(void) +{ + unsigned long step; + + sc_shift = 7; + step = 1UL << sc_shift; + sc_multi = step * NSEC_PER_SEC / get_cpu_freq(); + calibrate_sched_clock(); + + pr_info("sched_clock: sc_multi=%llu, sc_shift=%llu\n", sc_multi, sc_shift); +} + +#ifdef CONFIG_GENERIC_SCHED_CLOCK +static u64 notrace read_sched_clock(void) +{ + return (rdtc() - sc_start) >> sc_shift; +} + +void __init sw64_sched_clock_init(void) +{ + sched_clock_register(sched_clock_read, BITS_PER_LONG, get_cpu_freq() >> sc_shift); +} +#else /* !CONFIG_GENERIC_SCHED_CLOCK */ +/* + * scheduler clock - returns current time in nanoseconds. + */ +unsigned long long notrace sched_clock(void) +{ + if (static_branch_likely(&use_tc_as_sched_clock)) + return ((rdtc() - sc_start + __this_cpu_read(tc_offset)) >> sc_shift) * sc_multi; + else + return (jiffies - INITIAL_JIFFIES) * (NSEC_PER_SEC / HZ); +} + +#ifdef CONFIG_DEBUG_FS +static ssize_t sched_clock_status_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + char buf[2]; + + if (static_key_enabled(&use_tc_as_sched_clock)) + buf[0] = 'Y'; + else + buf[0] = 'N'; + buf[1] = '\n'; + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t sched_clock_status_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + int r; + bool bv; + bool val = static_key_enabled(&use_tc_as_sched_clock); + + r = kstrtobool_from_user(user_buf, count, &bv); + if (!r) { + if (val != bv) { + if (bv) { + static_branch_enable(&use_tc_as_sched_clock); + pr_info("source of sched_clock() switched from jiffies to TC\n"); + } else { + static_branch_disable(&use_tc_as_sched_clock); + pr_info("source of sched_clock() switched from TC to jiffies\n"); + } + } else { + if (val) + pr_info("source of sched_clock() unchanged (using TC)\n"); + else + pr_info("source of sched_clock() unchanged (using jiffies)\n"); + } + } + + return count; +} + +static const struct file_operations sched_clock_status_fops = { + .read = sched_clock_status_read, + .write = sched_clock_status_write, + .open = nonseekable_open, + .llseek = no_llseek, +}; + +static int __init sched_clock_debug_init(void) +{ + struct dentry *sched_clock_status; + + if (!sw64_debugfs_dir) + return -ENODEV; + + sched_clock_status = debugfs_create_file("tc_sched_clock", + 0644, sw64_debugfs_dir, NULL, + &sched_clock_status_fops); + + if (!sched_clock_status) + return -ENOMEM; + + return 0; +} +late_initcall(sched_clock_debug_init); +#endif /* CONFIG_DEBUG_FS */ +#endif /* CONFIG_GENERIC_SCHED_CLOCK */ + +#endif + + + +static int timer_next_event(unsigned long delta, + struct clock_event_device *evt); +static int timer_set_shutdown(struct clock_event_device *evt); +static int timer_set_oneshot(struct clock_event_device *evt); + +/* + * The local apic timer can be used for any function which is CPU local. + */ +static struct clock_event_device timer_clockevent = { + .name = "timer", + .features = CLOCK_EVT_FEAT_ONESHOT, + .shift = 20, + .mult = 0, + .set_state_shutdown = timer_set_shutdown, + .set_state_oneshot = timer_set_oneshot, + .set_next_event = timer_next_event, + .rating = 300, + .irq = -1, +}; + +static int vtimer_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + hcall(HCALL_SET_CLOCKEVENT, delta, 0, 0); + return 0; +} + +static int vtimer_shutdown(struct clock_event_device *evt) +{ + hcall(HCALL_SET_CLOCKEVENT, 0, 0, 0); + return 0; +} + +static int vtimer_set_oneshot(struct clock_event_device *evt) +{ + return 0; +} +static struct clock_event_device vtimer_clockevent = { + .name = "vtimer", + .features = CLOCK_EVT_FEAT_ONESHOT, + .shift = 20, + .mult = 0, + .set_state_shutdown = vtimer_shutdown, + .set_state_oneshot = vtimer_set_oneshot, + .set_next_event = vtimer_next_event, + .rating = 300, + .irq = -1, +}; + +static DEFINE_PER_CPU(struct clock_event_device, timer_events); + +/* + * Program the next event, relative to now + */ +static int timer_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + wrtimer(delta); + return 0; +} + +static int timer_set_shutdown(struct clock_event_device *evt) +{ + wrtimer(0); + return 0; +} + +static int timer_set_oneshot(struct clock_event_device *evt) +{ + /* + * SW-TIMER support CLOCK_EVT_MODE_ONESHOT only, and automatically. + * unlike PIT and HPET, which support ONESHOT or PERIODIC by setting PIT_MOD or HPET_Tn_CFG + * so, nothing to do here ... + */ + return 0; +} + +void sw64_update_clockevents(unsigned long cpu, u32 freq) +{ + struct clock_event_device *swevt = &per_cpu(timer_events, cpu); + + if (cpu == smp_processor_id()) + clockevents_update_freq(swevt, freq); + else { + clockevents_calc_mult_shift(swevt, freq, 4); + swevt->min_delta_ns = clockevent_delta2ns(swevt->min_delta_ticks, swevt); + swevt->max_delta_ns = clockevent_delta2ns(swevt->max_delta_ticks, swevt); + } +} + +/* + * Setup the local timer for this CPU. Copy the initialized values + * of the boot CPU and register the clock event in the framework. + */ +void sw64_setup_timer(void) +{ + unsigned long min_delta; + int cpu = smp_processor_id(); + struct clock_event_device *swevt = &per_cpu(timer_events, cpu); + + /* min_delta ticks => 100ns */ + min_delta = get_cpu_freq()/1000/1000/10; + + if (is_in_guest()) { + memcpy(swevt, &vtimer_clockevent, sizeof(*swevt)); + /* + * CUIWEI: This value is very important. + * If it's too small, the timer will timeout when the IER + * haven't been opened. + */ + min_delta *= 4; + } else { + memcpy(swevt, &timer_clockevent, sizeof(*swevt)); + } + swevt->cpumask = cpumask_of(cpu); + swevt->set_state_shutdown(swevt); + clockevents_config_and_register(swevt, get_cpu_freq(), min_delta, ULONG_MAX); +} + +void sw64_timer_interrupt(void) +{ + struct clock_event_device *evt = this_cpu_ptr(&timer_events); + + irq_enter(); + if (!evt->event_handler) { + pr_warn("Spurious local timer interrupt on cpu %d\n", + smp_processor_id()); + timer_set_shutdown(evt); + return; + } + + inc_irq_stat(timer_irqs_event); + + evt->event_handler(evt); + + irq_exit(); +} -- Gitee From d591c7428fde4fc899443a2681f05603ffee750e Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:42:49 +0800 Subject: [PATCH 0339/2138] anolis: drivers: cpufreq: add sw64 support ANBZ: #4688 Add cpufreq drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/cpufreq/Kconfig | 23 ++++ drivers/cpufreq/Makefile | 2 + drivers/cpufreq/sw64_cpufreq.c | 175 +++++++++++++++++++++++++ drivers/cpufreq/sw64_cpufreq_debugfs.c | 101 ++++++++++++++ 4 files changed, 301 insertions(+) create mode 100644 drivers/cpufreq/sw64_cpufreq.c create mode 100644 drivers/cpufreq/sw64_cpufreq_debugfs.c diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index b14584bfdf3f..d1fdea27eb0d 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -314,6 +314,29 @@ config SH_CPU_FREQ If unsure, say N. endif +if SW64 +config SW64_CPUFREQ + bool "SW64 CPU Frequency interface" + depends on UNCORE_XUELANG + default y + help + This adds the CPUFreq driver for SW64 processor which supports + software configurable cpu frequency. + + For details, take a look at . + + If unsure, say N. + +config SW64_CPUFREQ_DEBUGFS + bool "SW64 CPU Frequency debugfs interface" + depends on SW64_CPUFREQ && DEBUG_FS + default y + help + Turns on the DebugFS interface for CPU Frequency. + + If you don't know what to do here, say N. +endif + config QORIQ_CPUFREQ tristate "CPU frequency scaling driver for Freescale QorIQ SoCs" depends on OF && COMMON_CLK diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 076ea3ac1b56..f9c1c9012ce7 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -108,3 +108,5 @@ obj-$(CONFIG_LOONGSON3_ACPI_CPUFREQ) += loongson3-acpi-cpufreq.o obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o +obj-$(CONFIG_SW64_CPUFREQ) += sw64_cpufreq.o +obj-$(CONFIG_SW64_CPUFREQ_DEBUGFS) += sw64_cpufreq_debugfs.o diff --git a/drivers/cpufreq/sw64_cpufreq.c b/drivers/cpufreq/sw64_cpufreq.c new file mode 100644 index 000000000000..f4bf5f3cc550 --- /dev/null +++ b/drivers/cpufreq/sw64_cpufreq.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sw/kernel/setup.c + * + * Copyright (C) 1995 Linus Torvalds + */ + +/* + * Cpufreq driver for the sw64 processors + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include /* set_cpus_allowed() */ +#include +#include +#include + +#include +#include +#include + +static uint nowait; + +static struct clk *cpuclk; + + +static int sw64_cpu_freq_notifier(struct notifier_block *nb, + unsigned long val, void *data); + +static struct notifier_block sw64_cpufreq_notifier_block = { + .notifier_call = sw64_cpu_freq_notifier +}; + +static int sw64_cpu_freq_notifier(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct cpufreq_freqs *freqs = (struct cpufreq_freqs *)data; + unsigned long cpu = freqs->policy->cpu; + + if (val == CPUFREQ_POSTCHANGE) + sw64_update_clockevents(cpu, freqs->new * 1000); + + return 0; +} + +static unsigned int sw64_cpufreq_get(unsigned int cpu) +{ + struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); + + if (!policy || IS_ERR(policy->clk)) { + pr_err("%s: No %s associated to cpu: %d\n", + __func__, policy ? "clk" : "policy", cpu); + return 0; + } + + return __sw64_cpufreq_get(policy); +} + +/* + * Here we notify other drivers of the proposed change and the final change. + */ +static int sw64_cpufreq_target(struct cpufreq_policy *policy, + unsigned int index) +{ + unsigned int cpu = policy->cpu; + + if (!cpu_online(cpu)) + return -ENODEV; + + /* setting the cpu frequency */ + sw64_set_rate(index); + update_cpu_freq(freq_table[index].frequency); + + return 0; +} + +static int sw64_cpufreq_cpu_init(struct cpufreq_policy *policy) +{ + cpuclk = sw64_clk_get(NULL, "cpu_clk"); + if (IS_ERR(cpuclk)) { + pr_err("couldn't get CPU clk\n"); + return PTR_ERR(cpuclk); + } + + policy->clk = cpuclk; + + cpufreq_generic_init(policy, freq_table, 0); + + return 0; +} + +static int sw64_cpufreq_verify(struct cpufreq_policy_data *policy) +{ + return cpufreq_frequency_table_verify(policy, freq_table); +} + +static int sw64_cpufreq_exit(struct cpufreq_policy *policy) +{ + return 0; +} + +static struct freq_attr *sw64_table_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, NULL, +}; + +static struct cpufreq_driver sw64_cpufreq_driver = { + .name = "sw64", + .init = sw64_cpufreq_cpu_init, + .verify = sw64_cpufreq_verify, + .target_index = sw64_cpufreq_target, + .get = sw64_cpufreq_get, + .exit = sw64_cpufreq_exit, + .attr = sw64_table_attr, +}; + +static const struct platform_device_id platform_device_ids[] = { + { + .name = "sw64_cpufreq", + }, + {} +}; + +MODULE_DEVICE_TABLE(platform, platform_device_ids); + +static struct platform_driver platform_driver = { + .driver = { + .name = "sw64_cpufreq", + }, + .id_table = platform_device_ids, +}; + + +static int __init cpufreq_init(void) +{ + int ret; + + if (is_in_guest()) { + pr_warn("Now sw_64 CPUFreq does not support virtual machines\n"); + return -ENODEV; + } + + /* Register platform stuff */ + ret = platform_driver_register(&platform_driver); + if (ret) + return ret; + + pr_info("SW-64 CPU frequency driver\n"); + + cpufreq_register_notifier(&sw64_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + + return cpufreq_register_driver(&sw64_cpufreq_driver); +} + +static void __exit cpufreq_exit(void) +{ + cpufreq_unregister_driver(&sw64_cpufreq_driver); + cpufreq_unregister_notifier(&sw64_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + + platform_driver_unregister(&platform_driver); +} + +module_init(cpufreq_init); +module_exit(cpufreq_exit); + +module_param(nowait, uint, 0644); +MODULE_PARM_DESC(nowait, "Disable SW-64 specific wait"); + +MODULE_DESCRIPTION("cpufreq driver for sw64"); +MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/sw64_cpufreq_debugfs.c b/drivers/cpufreq/sw64_cpufreq_debugfs.c new file mode 100644 index 000000000000..bb4ae26bc22b --- /dev/null +++ b/drivers/cpufreq/sw64_cpufreq_debugfs.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include + +#include +#include +#include + +static int cpufreq_show(struct seq_file *m, void *v) +{ + int i; + u64 val; + int freq; + + val = sw64_io_read(0, CLK_CTL); + val = val >> CORE_PLL2_CFG_SHIFT; + + for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { + if (freq_table[i].frequency != CPUFREQ_ENTRY_INVALID) + freq = freq_table[i].frequency; + else + freq = freq_table[i].driver_data; + + if (val == i) + seq_printf(m, "[%d] ", freq); + else + seq_printf(m, "%d ", freq); + } + seq_puts(m, "\n"); + + return 0; +} + +static int cpufreq_open(struct inode *inode, struct file *file) +{ + return single_open(file, cpufreq_show, NULL); +} + +static ssize_t cpufreq_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + char buf[5]; + size_t size; + int cf, i, err, index, freq; + + size = min(sizeof(buf) - 1, len); + if (copy_from_user(buf, user_buf, size)) + return -EFAULT; + buf[size] = '\0'; + + err = kstrtoint(buf, 10, &cf); + if (err) + return err; + + index = -1; + for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { + if (freq_table[i].frequency != CPUFREQ_ENTRY_INVALID) + freq = freq_table[i].frequency; + else + freq = freq_table[i].driver_data; + + if (cf == freq) { + index = i; + break; + } + } + + if (index < 0) + return -EINVAL; + + sw64_set_rate(index); + update_cpu_freq(freq); + return len; +} + +static const struct file_operations set_cpufreq_fops = { + .open = cpufreq_open, + .read = seq_read, + .write = cpufreq_set, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init cpufreq_debugfs_init(void) +{ + struct dentry *cpufreq_entry; + + if (!sw64_debugfs_dir) + return -ENODEV; + + cpufreq_entry = debugfs_create_file("cpufreq", 0600, + sw64_debugfs_dir, NULL, + &set_cpufreq_fops); + if (!cpufreq_entry) + return -ENOMEM; + + return 0; +} +late_initcall(cpufreq_debugfs_init); -- Gitee From d52f18486bb429fb2cf3cef6fc59dc364fcd127c Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:47:32 +0800 Subject: [PATCH 0340/2138] anolis: drivers: efi: add sw64 support ANBZ: #4688 Add efi drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/firmware/efi/Kconfig | 2 +- drivers/firmware/efi/Makefile | 2 + drivers/firmware/efi/efi.c | 2 +- drivers/firmware/efi/sunway-init.c | 221 ++++++++++++++++++++++++++ drivers/firmware/efi/sunway-runtime.c | 85 ++++++++++ 5 files changed, 310 insertions(+), 2 deletions(-) create mode 100644 drivers/firmware/efi/sunway-init.c create mode 100644 drivers/firmware/efi/sunway-runtime.c diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 231f1c70d1db..138491a4b494 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -224,7 +224,7 @@ config EFI_DISABLE_PCI_DMA config EFI_EARLYCON def_bool y - depends on SERIAL_EARLYCON && !ARM && !IA64 + depends on SERIAL_EARLYCON && !ARM && !IA64 && !SW64 select FONT_SUPPORT select ARCH_USE_MEMREMAP_PROT diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile index b4528af86517..7c1b924e8ea3 100644 --- a/drivers/firmware/efi/Makefile +++ b/drivers/firmware/efi/Makefile @@ -35,6 +35,8 @@ obj-$(CONFIG_SYSFB) += sysfb_efi.o arm-obj-$(CONFIG_EFI) := efi-init.o arm-runtime.o obj-$(CONFIG_ARM) += $(arm-obj-y) obj-$(CONFIG_ARM64) += $(arm-obj-y) +sw64-obj-$(CONFIG_EFI) := sunway-init.o sunway-runtime.o +obj-$(CONFIG_SW64) += $(sw64-obj-y) riscv-obj-$(CONFIG_EFI) := efi-init.o riscv-runtime.o obj-$(CONFIG_RISCV) += $(riscv-obj-y) #obj-$(CONFIG_LOONGARCH) += efi-init.o diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 2c1095dcc2f2..f5b7f34e8069 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -809,7 +809,7 @@ int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr) return 0; } -#ifndef CONFIG_IA64 +#if !defined(CONFIG_IA64) && !defined(CONFIG_SW64) static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor, size_t size) { diff --git a/drivers/firmware/efi/sunway-init.c b/drivers/firmware/efi/sunway-init.c new file mode 100644 index 000000000000..870abc2f5afe --- /dev/null +++ b/drivers/firmware/efi/sunway-init.c @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Extensible Firmware Interface + * + * Based on Extensible Firmware Interface Specification version 2.4 + * + * Copyright (C) 2013 - 2015 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#define pr_fmt(fmt) "efi: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +unsigned long entSuspend; +unsigned long bios_version; + +static int __init is_memory(efi_memory_desc_t *md) +{ + if (md->attribute & (EFI_MEMORY_WB|EFI_MEMORY_WT|EFI_MEMORY_WC)) + return 1; + return 0; +} +static efi_config_table_type_t arch_tables[] __initdata = { + {SMBIOS3_TABLE_GUID, NULL, ""}, + {SLEEP_ENTRY_GUID, &entSuspend, "SLEEP ENTRY"}, + {BIOS_VERSION_GUID, &bios_version, "BIOS VERSION"}, + {}, +}; + +static int __init uefi_init(u64 efi_system_table) +{ + efi_char16_t *c16; + efi_config_table_t *config_tables; + efi_system_table_t *systab; + size_t table_size; + char vendor[100] = "unknown"; + int i, retval; + + systab = early_memremap(efi_system_table, + sizeof(efi_system_table_t)); + if (systab == NULL) { + pr_warn("Unable to map EFI system table.\n"); + return -ENOMEM; + } + + set_bit(EFI_BOOT, &efi.flags); + if (IS_ENABLED(CONFIG_64BIT)) + set_bit(EFI_64BIT, &efi.flags); + + /* + * Verify the EFI Table + */ + if (systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) { + pr_err("System table signature incorrect\n"); + retval = -EINVAL; + goto out; + } + if ((systab->hdr.revision >> 16) < 2) + pr_warn("Warning: EFI system table version %d.%02d, expected 2.00 or greater\n", + systab->hdr.revision >> 16, + systab->hdr.revision & 0xffff); + + efi.runtime = systab->runtime; + efi.runtime_version = systab->hdr.revision; + + /* Show what we know for posterity */ + c16 = early_memremap(systab->fw_vendor, + sizeof(vendor) * sizeof(efi_char16_t)); + if (c16) { + for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i) + vendor[i] = c16[i]; + vendor[i] = '\0'; + early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t)); + } + + pr_info("EFI v%u.%.02u by %s\n", + systab->hdr.revision >> 16, + systab->hdr.revision & 0xffff, vendor); + + table_size = sizeof(efi_config_table_64_t) * systab->nr_tables; + config_tables = early_memremap(systab->tables, table_size); + if (config_tables == NULL) { + pr_warn("Unable to map EFI config table array.\n"); + retval = -ENOMEM; + goto out; + } + + retval = efi_config_parse_tables(config_tables, systab->nr_tables, + arch_tables); + + early_memunmap(config_tables, table_size); +out: + early_memunmap(systab, sizeof(efi_system_table_t)); + + if (!bios_version) + retval = -EINVAL; + + return retval; +} + +/* + * Return true for regions that can be used as System RAM. + */ +static __init int is_usable_memory(efi_memory_desc_t *md) +{ + switch (md->type) { + case EFI_LOADER_CODE: + case EFI_LOADER_DATA: + case EFI_ACPI_RECLAIM_MEMORY: + case EFI_BOOT_SERVICES_CODE: + case EFI_BOOT_SERVICES_DATA: + case EFI_CONVENTIONAL_MEMORY: + case EFI_PERSISTENT_MEMORY: + /* + * According to the spec, these regions are no longer reserved + * after calling ExitBootServices(). However, we can only use + * them as System RAM if they can be mapped writeback cacheable. + */ + return (md->attribute & EFI_MEMORY_WB); + default: + break; + } + return false; +} + +static __init void reserve_regions(void) +{ + efi_memory_desc_t *md; + u64 paddr, npages, size; + + if (efi_enabled(EFI_DBG)) + pr_info("Processing EFI memory map:\n"); + + for_each_efi_memory_desc(md) { + paddr = md->phys_addr; + npages = md->num_pages; + + if (efi_enabled(EFI_DBG)) { + char buf[64]; + + pr_info(" 0x%012llx-0x%012llx %s\n", + paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1, + efi_md_typeattr_format(buf, sizeof(buf), md)); + } + + memrange_efi_to_native(&paddr, &npages); + size = npages << PAGE_SHIFT; + + if (is_memory(md)) { + early_init_dt_add_memory_arch(paddr, size); + + if (!is_usable_memory(md)) + memblock_mark_nomap(paddr, size); + + /* keep ACPI reclaim memory intact for kexec etc. */ + if (md->type == EFI_ACPI_RECLAIM_MEMORY) + memblock_reserve(paddr, size); + } + } +} + +void __init efi_init(void) +{ + struct efi_memory_map_data data; + u64 efi_system_table; + + if (sunway_boot_params->efi_systab == 0) { + pr_info("System Table is not exist, disabling EFI.\n"); + return; + } + + /* Grab UEFI information placed in struct boot_params by stub */ + efi_system_table = sunway_boot_params->efi_systab; + if (!efi_system_table) + return; + + data.desc_version = sunway_boot_params->efi_memdesc_version; + data.desc_size = sunway_boot_params->efi_memdesc_size; + data.size = sunway_boot_params->efi_memmap_size; + data.phys_map = sunway_boot_params->efi_memmap; + + if (efi_memmap_init_early(&data) < 0) { + /* + * If we are booting via UEFI, the UEFI memory map is the only + * description of memory we have, so there is little point in + * proceeding if we cannot access it. + */ + panic("Unable to map EFI memory map.\n"); + } + + WARN(efi.memmap.desc_version != 1, + "Unexpected EFI_MEMORY_DESCRIPTOR version %ld", + efi.memmap.desc_version); + + if (uefi_init(efi_system_table) < 0) { + efi_memmap_unmap(); + return; + } + + reserve_regions(); + + memblock_reserve(sunway_boot_params->efi_memmap & PAGE_MASK, + PAGE_ALIGN(sunway_boot_params->efi_memmap_size + + (sunway_boot_params->efi_memmap & ~PAGE_MASK))); + +} diff --git a/drivers/firmware/efi/sunway-runtime.c b/drivers/firmware/efi/sunway-runtime.c new file mode 100644 index 000000000000..6bd96cff7d5d --- /dev/null +++ b/drivers/firmware/efi/sunway-runtime.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Extensible Firmware Interface + * + * Based on Extensible Firmware Interface Specification version 2.4 + * + * Copyright (C) 2013, 2014 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/* + * Enable the UEFI Runtime Services if all prerequisites are in place, i.e., + * non-early mapping of the UEFI system table and virtual mappings for all + * EFI_MEMORY_RUNTIME regions. + */ +static int __init sunway_enable_runtime_services(void) +{ + u64 mapsize; + + if (!efi_enabled(EFI_BOOT)) { + pr_info("EFI services will not be available.\n"); + return 0; + } + + efi_memmap_unmap(); + + mapsize = efi.memmap.desc_size * efi.memmap.nr_map; + + if (efi_memmap_init_late(efi.memmap.phys_map, mapsize)) { + pr_err("Failed to remap EFI memory map\n"); + return 0; + } + + if (efi_runtime_disabled()) { + pr_info("EFI runtime services will be disabled.\n"); + return 0; + } + + if (efi_enabled(EFI_RUNTIME_SERVICES)) { + pr_info("EFI runtime services access via paravirt.\n"); + return 0; + } + + /* Set up runtime services function pointers */ + efi_native_runtime_setup(); + set_bit(EFI_RUNTIME_SERVICES, &efi.flags); + + return 0; +} +early_initcall(sunway_enable_runtime_services); + + +static int __init sunway_dmi_init(void) +{ + /* + * On SW64, DMI depends on UEFI, and dmi_scan_machine() needs to + * be called early because dmi_id_init(), which is an arch_initcall + * itself, depends on dmi_scan_machine() having been called already. + */ + dmi_setup(); + return 0; +} +core_initcall(sunway_dmi_init); -- Gitee From 633e4b805aa2387a3568f6c177f8c6baef9eedcf Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:43:45 +0800 Subject: [PATCH 0341/2138] anolis: drivers: gpio: add sw64 support ANBZ: #4688 Add gpio drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/gpio/Kconfig | 9 + drivers/gpio/Makefile | 1 + drivers/gpio/gpio-sunway.c | 861 ++++++++++++++++++++++ include/linux/platform_data/gpio-sunway.h | 33 + 4 files changed, 904 insertions(+) create mode 100644 drivers/gpio/gpio-sunway.c create mode 100644 include/linux/platform_data/gpio-sunway.h diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index ebd4e113dc26..509f42e6ab6a 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -246,6 +246,15 @@ config GPIO_DWAPB Say Y or M here to build support for the Synopsys DesignWare APB GPIO block. +config GPIO_SUNWAY + tristate "Sunway gpio driver" + depends on SW64 + select GPIO_GENERIC + select GENERIC_IRQ_CHIP + help + Say Y or M here to build support for the Sunway + GPIO block. + config GPIO_EIC_SPRD tristate "Spreadtrum EIC support" depends on ARCH_SPRD || COMPILE_TEST diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index eb73b5d633eb..e44a700ec7d3 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -195,3 +195,4 @@ obj-$(CONFIG_GPIO_XTENSA) += gpio-xtensa.o obj-$(CONFIG_GPIO_ZEVIO) += gpio-zevio.o obj-$(CONFIG_GPIO_ZYNQ) += gpio-zynq.o obj-$(CONFIG_GPIO_ZYNQMP_MODEPIN) += gpio-zynqmp-modepin.o +obj-$(CONFIG_GPIO_SUNWAY) += gpio-sunway.o diff --git a/drivers/gpio/gpio-sunway.c b/drivers/gpio/gpio-sunway.c new file mode 100644 index 000000000000..b9c6848317db --- /dev/null +++ b/drivers/gpio/gpio-sunway.c @@ -0,0 +1,861 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2011 Jamie Iles + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * All enquiries to support@picochip.com + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gpiolib.h" +#include "gpiolib-acpi.h" + + +#define GPIO_SWPORTA_DR (0x00UL<<7) +#define GPIO_SWPORTA_DDR (0X04UL<<7) +#define GPIO_SWPORTB_DR (0X0CUL<<7) +#define GPIO_SWPORTB_DDR (0X10UL<<7) +#define GPIO_SWPORTC_DR (0x18UL<<7) +#define GPIO_SWPORTC_DDR (0x1cUL<<7) +#define GPIO_SWPORTD_DR (0x24UL<<7) +#define GPIO_SWPORTD_DDR (0x28UL<<7) +#define GPIO_INTEN (0x30UL<<7) +#define GPIO_INTMASK (0x34UL<<7) +#define GPIO_INTTYPE_LEVEL (0x38UL<<7) +#define GPIO_INT_POLARITY (0x3cUL<<7) +#define GPIO_INTSTATUS (0x40UL<<7) +#define GPIO_PORTA_DEBOUNCE (0x48UL<<7) +#define GPIO_PORTA_EOI (0x4cUL<<7) +#define GPIO_EXT_PORTA (0x50UL<<7) +#define GPIO_EXT_PORTB (0x54UL<<7) +#define GPIO_EXT_PORTC (0x58UL<<7) +#define GPIO_EXT_PORTD (0x5cUL<<7) + +#define DWAPB_MAX_PORTS 4 +#define GPIO_EXT_PORT_STRIDE 0x04 /* register stride 32 bits */ +#define GPIO_SWPORT_DR_STRIDE 0x0c /* register stride 3*32 bits */ +#define GPIO_SWPORT_DDR_STRIDE 0x0c /* register stride 3*32 bits */ + +#define GPIO_REG_OFFSET_V2 1 + +#define GPIO_INTMASK_V2 0x44 +#define GPIO_INTTYPE_LEVEL_V2 0x34 +#define GPIO_INT_POLARITY_V2 0x38 +#define GPIO_INTSTATUS_V2 0x3c +#define GPIO_PORTA_EOI_V2 0x40 + +struct sunway_gpio; + +#ifdef CONFIG_PM_SLEEP +/* Store GPIO context across system-wide suspend/resume transitions */ +struct sunway_context { + u32 data; + u32 dir; + u32 ext; + u32 int_en; + u32 int_mask; + u32 int_type; + u32 int_pol; + u32 int_deb; + u32 wake_en; +}; +#endif + +struct sunway_gpio_port { + struct gpio_chip gc; + bool is_registered; + struct sunway_gpio *gpio; +#ifdef CONFIG_PM_SLEEP + struct sunway_context *ctx; +#endif + unsigned int idx; +}; + +struct sunway_gpio { + struct device *dev; + void __iomem *regs; + struct sunway_gpio_port *ports; + unsigned int nr_ports; + struct irq_domain *domain; + unsigned int flags; + struct reset_control *rst; + struct clk *clk; +}; + +static inline u32 gpio_reg_v2_convert(unsigned int offset) +{ + switch (offset) { + case GPIO_INTMASK: + return GPIO_INTMASK_V2; + case GPIO_INTTYPE_LEVEL: + return GPIO_INTTYPE_LEVEL_V2; + case GPIO_INT_POLARITY: + return GPIO_INT_POLARITY_V2; + case GPIO_INTSTATUS: + return GPIO_INTSTATUS_V2; + case GPIO_PORTA_EOI: + return GPIO_PORTA_EOI_V2; + } + + return offset; +} + +static inline u32 gpio_reg_convert(struct sunway_gpio *gpio, unsigned int offset) +{ + if (gpio->flags & GPIO_REG_OFFSET_V2) + return gpio_reg_v2_convert(offset); + + return offset; +} + +static inline u32 sunway_read(struct sunway_gpio *gpio, unsigned int offset) +{ + struct gpio_chip *gc = &gpio->ports[0].gc; + void __iomem *reg_base = gpio->regs; + + return gc->read_reg(reg_base + gpio_reg_convert(gpio, offset)); +} + +static inline void sunway_write(struct sunway_gpio *gpio, unsigned int offset, + u32 val) +{ + struct gpio_chip *gc = &gpio->ports[0].gc; + void __iomem *reg_base = gpio->regs; + + gc->write_reg(reg_base + gpio_reg_convert(gpio, offset), val); +} + +static int sunway_gpio_to_irq(struct gpio_chip *gc, unsigned int offset) +{ + struct sunway_gpio_port *port = gpiochip_get_data(gc); + struct sunway_gpio *gpio = port->gpio; + + return irq_find_mapping(gpio->domain, offset); +} + +static struct sunway_gpio_port *sunway_offs_to_port(struct sunway_gpio *gpio, unsigned int offs) +{ + struct sunway_gpio_port *port; + int i; + + for (i = 0; i < gpio->nr_ports; i++) { + port = &gpio->ports[i]; + if (port->idx == offs / 32) + return port; + } + + return NULL; +} + +static void sunway_toggle_trigger(struct sunway_gpio *gpio, unsigned int offs) +{ + struct sunway_gpio_port *port = sunway_offs_to_port(gpio, offs); + struct gpio_chip *gc; + u32 pol; + int val; + + if (!port) + return; + gc = &port->gc; + + pol = sunway_read(gpio, GPIO_INT_POLARITY); + /* Just read the current value right out of the data register */ + val = gc->get(gc, offs % 32); + if (val) + pol &= ~BIT(offs); + else + pol |= BIT(offs); + + sunway_write(gpio, GPIO_INT_POLARITY, pol); +} + +static u32 sunway_do_irq(struct sunway_gpio *gpio) +{ + u32 irq_status = sunway_read(gpio, GPIO_INTSTATUS); + u32 ret = irq_status; + + while (irq_status) { + int hwirq = fls(irq_status) - 1; + int gpio_irq = irq_find_mapping(gpio->domain, hwirq); + + generic_handle_irq(gpio_irq); + irq_status &= ~BIT(hwirq); + + if ((irq_get_trigger_type(gpio_irq) & IRQ_TYPE_SENSE_MASK) + == IRQ_TYPE_EDGE_BOTH) + sunway_toggle_trigger(gpio, hwirq); + } + + return ret; +} + +static void sunway_irq_handler(struct irq_desc *desc) +{ + struct sunway_gpio *gpio = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + + sunway_do_irq(gpio); + + if (chip->irq_eoi) + chip->irq_eoi(irq_desc_get_irq_data(desc)); +} + +static void sunway_irq_enable(struct irq_data *d) +{ + struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d); + struct sunway_gpio *gpio = igc->private; + struct gpio_chip *gc = &gpio->ports[0].gc; + unsigned long flags; + u32 val; + + spin_lock_irqsave(&gc->bgpio_lock, flags); + val = sunway_read(gpio, GPIO_INTEN); + val |= BIT(d->hwirq); + sunway_write(gpio, GPIO_INTEN, val); + spin_unlock_irqrestore(&gc->bgpio_lock, flags); +} + +static void sunway_irq_disable(struct irq_data *d) +{ + struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d); + struct sunway_gpio *gpio = igc->private; + struct gpio_chip *gc = &gpio->ports[0].gc; + unsigned long flags; + u32 val; + + spin_lock_irqsave(&gc->bgpio_lock, flags); + val = sunway_read(gpio, GPIO_INTEN); + val &= ~BIT(d->hwirq); + sunway_write(gpio, GPIO_INTEN, val); + spin_unlock_irqrestore(&gc->bgpio_lock, flags); +} + +static int sunway_irq_reqres(struct irq_data *d) +{ + struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d); + struct sunway_gpio *gpio = igc->private; + struct gpio_chip *gc = &gpio->ports[0].gc; + int ret; + + ret = gpiochip_lock_as_irq(gc, irqd_to_hwirq(d)); + if (ret) { + dev_err(gpio->dev, "unable to lock HW IRQ %lu for IRQ\n", + irqd_to_hwirq(d)); + return ret; + } + return 0; +} + +static void sunway_irq_relres(struct irq_data *d) +{ + struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d); + struct sunway_gpio *gpio = igc->private; + struct gpio_chip *gc = &gpio->ports[0].gc; + + gpiochip_unlock_as_irq(gc, irqd_to_hwirq(d)); +} + +static int sunway_irq_set_type(struct irq_data *d, u32 type) +{ + struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d); + struct sunway_gpio *gpio = igc->private; + struct gpio_chip *gc = &gpio->ports[0].gc; + int bit = d->hwirq; + unsigned long level, polarity, flags; + + if (type & ~(IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING | + IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) + return -EINVAL; + + spin_lock_irqsave(&gc->bgpio_lock, flags); + level = sunway_read(gpio, GPIO_INTTYPE_LEVEL); + polarity = sunway_read(gpio, GPIO_INT_POLARITY); + + switch (type) { + case IRQ_TYPE_EDGE_BOTH: + level |= BIT(bit); + sunway_toggle_trigger(gpio, bit); + break; + case IRQ_TYPE_EDGE_RISING: + level |= BIT(bit); + polarity |= BIT(bit); + break; + case IRQ_TYPE_EDGE_FALLING: + level |= BIT(bit); + polarity &= ~BIT(bit); + break; + case IRQ_TYPE_LEVEL_HIGH: + level &= ~BIT(bit); + polarity |= BIT(bit); + break; + case IRQ_TYPE_LEVEL_LOW: + level &= ~BIT(bit); + polarity &= ~BIT(bit); + break; + } + + irq_setup_alt_chip(d, type); + + sunway_write(gpio, GPIO_INTTYPE_LEVEL, level); + if (type != IRQ_TYPE_EDGE_BOTH) + sunway_write(gpio, GPIO_INT_POLARITY, polarity); + spin_unlock_irqrestore(&gc->bgpio_lock, flags); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sunway_irq_set_wake(struct irq_data *d, unsigned int enable) +{ + struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d); + struct sunway_gpio *gpio = igc->private; + struct sunway_context *ctx = gpio->ports[0].ctx; + + if (enable) + ctx->wake_en |= BIT(d->hwirq); + else + ctx->wake_en &= ~BIT(d->hwirq); + + return 0; +} +#endif + +static int sunway_gpio_set_debounce(struct gpio_chip *gc, + unsigned int offset, unsigned int debounce) +{ + struct sunway_gpio_port *port = gpiochip_get_data(gc); + struct sunway_gpio *gpio = port->gpio; + unsigned long flags, val_deb; + unsigned long mask = BIT(offset); + + spin_lock_irqsave(&gc->bgpio_lock, flags); + + val_deb = sunway_read(gpio, GPIO_PORTA_DEBOUNCE); + if (debounce) + sunway_write(gpio, GPIO_PORTA_DEBOUNCE, val_deb | mask); + else + sunway_write(gpio, GPIO_PORTA_DEBOUNCE, val_deb & ~mask); + + spin_unlock_irqrestore(&gc->bgpio_lock, flags); + + return 0; +} + +static int sunway_gpio_set_config(struct gpio_chip *gc, unsigned int offset, + unsigned long config) +{ + u32 debounce; + + if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE) + return -ENOTSUPP; + + debounce = pinconf_to_config_argument(config); + return sunway_gpio_set_debounce(gc, offset, debounce); +} + +static irqreturn_t sunway_irq_handler_mfd(int irq, void *dev_id) +{ + u32 worked; + struct sunway_gpio *gpio = dev_id; + + worked = sunway_do_irq(gpio); + + return worked ? IRQ_HANDLED : IRQ_NONE; +} + +static void sunway_configure_irqs(struct sunway_gpio *gpio, + struct sunway_gpio_port *port, + struct sunway_port_property *pp) +{ + struct gpio_chip *gc = &port->gc; + struct fwnode_handle *fwnode = pp->fwnode; + struct irq_chip_generic *irq_gc = NULL; + unsigned int hwirq, ngpio = gc->ngpio; + struct irq_chip_type *ct; + int err, i; + + gpio->domain = irq_domain_create_linear(fwnode, ngpio, + &irq_generic_chip_ops, gpio); + if (!gpio->domain) + return; + + err = irq_alloc_domain_generic_chips(gpio->domain, ngpio, 2, + "gpio-dwapb", handle_level_irq, + IRQ_NOREQUEST, 0, + IRQ_GC_INIT_NESTED_LOCK); + if (err) { + dev_info(gpio->dev, "irq_alloc_domain_generic_chips failed\n"); + irq_domain_remove(gpio->domain); + gpio->domain = NULL; + return; + } + + irq_gc = irq_get_domain_generic_chip(gpio->domain, 0); + if (!irq_gc) { + irq_domain_remove(gpio->domain); + gpio->domain = NULL; + return; + } + + irq_gc->reg_base = gpio->regs; + irq_gc->private = gpio; + + for (i = 0; i < 2; i++) { + ct = &irq_gc->chip_types[i]; + ct->chip.irq_ack = irq_gc_ack_set_bit; + ct->chip.irq_mask = irq_gc_mask_set_bit; + ct->chip.irq_unmask = irq_gc_mask_clr_bit; + ct->chip.irq_set_type = sunway_irq_set_type; + ct->chip.irq_enable = sunway_irq_enable; + ct->chip.irq_disable = sunway_irq_disable; + ct->chip.irq_request_resources = sunway_irq_reqres; + ct->chip.irq_release_resources = sunway_irq_relres; +#ifdef CONFIG_PM_SLEEP + ct->chip.irq_set_wake = sunway_irq_set_wake; +#endif + ct->regs.ack = gpio_reg_convert(gpio, GPIO_PORTA_EOI); + ct->regs.mask = gpio_reg_convert(gpio, GPIO_INTMASK); + ct->type = IRQ_TYPE_LEVEL_MASK; + } + + irq_gc->chip_types[0].type = IRQ_TYPE_LEVEL_MASK; + irq_gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH; + irq_gc->chip_types[1].handler = handle_edge_irq; + + if (!pp->irq_shared) { + int i; + + for (i = 0; i < pp->ngpio; i++) { + if (pp->irq[i] >= 0) + irq_set_chained_handler_and_data(pp->irq[i], + sunway_irq_handler, gpio); + } + } else { + /* + * Request a shared IRQ since where MFD would have devices + * using the same irq pin + */ + err = devm_request_irq(gpio->dev, pp->irq[0], + sunway_irq_handler_mfd, + IRQF_SHARED, "gpio-dwapb-mfd", gpio); + if (err) { + dev_err(gpio->dev, "error requesting IRQ\n"); + irq_domain_remove(gpio->domain); + gpio->domain = NULL; + return; + } + } + + for (hwirq = 0 ; hwirq < ngpio ; hwirq++) + irq_create_mapping(gpio->domain, hwirq); + + port->gc.to_irq = sunway_gpio_to_irq; +} + +static void sunway_irq_teardown(struct sunway_gpio *gpio) +{ + struct sunway_gpio_port *port = &gpio->ports[0]; + struct gpio_chip *gc = &port->gc; + unsigned int ngpio = gc->ngpio; + irq_hw_number_t hwirq; + + if (!gpio->domain) + return; + + for (hwirq = 0 ; hwirq < ngpio ; hwirq++) + irq_dispose_mapping(irq_find_mapping(gpio->domain, hwirq)); + + irq_domain_remove(gpio->domain); + gpio->domain = NULL; +} + +static int sunway_gpio_add_port(struct sunway_gpio *gpio, + struct sunway_port_property *pp, + unsigned int offs) +{ + struct sunway_gpio_port *port; + void __iomem *dat, *set, *dirout; + int err; + + port = &gpio->ports[offs]; + port->gpio = gpio; + port->idx = pp->idx; + +#ifdef CONFIG_PM_SLEEP + port->ctx = devm_kzalloc(gpio->dev, sizeof(*port->ctx), GFP_KERNEL); + if (!port->ctx) + return -ENOMEM; +#endif + + dat = gpio->regs + GPIO_EXT_PORTA + (pp->idx * GPIO_EXT_PORT_STRIDE); + set = gpio->regs + GPIO_SWPORTA_DR + (pp->idx * GPIO_SWPORT_DR_STRIDE); + dirout = gpio->regs + GPIO_SWPORTA_DDR + + (pp->idx * GPIO_SWPORT_DDR_STRIDE); + + /* This registers 32 GPIO lines per port */ + err = bgpio_init(&port->gc, gpio->dev, 4, dat, set, NULL, dirout, + NULL, 0); + if (err) { + dev_err(gpio->dev, "failed to init gpio chip for port%d\n", + port->idx); + return err; + } + +#ifdef CONFIG_OF_GPIO + port->gc.of_node = to_of_node(pp->fwnode); +#endif + port->gc.ngpio = pp->ngpio; + port->gc.base = pp->gpio_base; + + /* Only port A support debounce */ + if (pp->idx == 0) + port->gc.set_config = sunway_gpio_set_config; + + if (pp->has_irq) + sunway_configure_irqs(gpio, port, pp); + + err = gpiochip_add_data(&port->gc, port); + if (err) + dev_err(gpio->dev, "failed to register gpiochip for port%d\n", + port->idx); + else + port->is_registered = true; + + /* Add GPIO-signaled ACPI event support */ + if (pp->has_irq) + acpi_gpiochip_request_interrupts(&port->gc); + + return err; +} + +static void sunway_gpio_unregister(struct sunway_gpio *gpio) +{ + unsigned int m; + + for (m = 0; m < gpio->nr_ports; ++m) + if (gpio->ports[m].is_registered) + gpiochip_remove(&gpio->ports[m].gc); +} + +static struct sunway_platform_data * +sunway_gpio_get_pdata(struct device *dev) +{ + struct fwnode_handle *fwnode; + struct sunway_platform_data *pdata; + struct sunway_port_property *pp; + int nports; + int i, j; + + nports = device_get_child_node_count(dev); + if (nports == 0) + return ERR_PTR(-ENODEV); + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return ERR_PTR(-ENOMEM); + + pdata->properties = devm_kcalloc(dev, nports, sizeof(*pp), GFP_KERNEL); + if (!pdata->properties) + return ERR_PTR(-ENOMEM); + + pdata->nports = nports; + + i = 0; + device_for_each_child_node(dev, fwnode) { + struct device_node *np = NULL; + + pp = &pdata->properties[i++]; + pp->fwnode = fwnode; + + if (fwnode_property_read_u32(fwnode, "reg", &pp->idx) || + pp->idx >= DWAPB_MAX_PORTS) { + dev_err(dev, + "missing/invalid port index for port%d\n", i); + fwnode_handle_put(fwnode); + return ERR_PTR(-EINVAL); + } + + if (fwnode_property_read_u32(fwnode, "snps,nr-gpios", + &pp->ngpio)) { + dev_info(dev, + "failed to get number of gpios for port%d\n", + i); + pp->ngpio = 32; + } + + pp->irq_shared = false; + pp->gpio_base = -1; + + /* + * Only port A can provide interrupts in all configurations of + * the IP. + */ + if (pp->idx != 0) + continue; + + if (dev->of_node && fwnode_property_read_bool(fwnode, + "interrupt-controller")) { + np = to_of_node(fwnode); + } + + for (j = 0; j < pp->ngpio; j++) { + pp->irq[j] = -ENXIO; + + if (np) + pp->irq[j] = of_irq_get(np, j); + else if (has_acpi_companion(dev)) + pp->irq[j] = platform_get_irq(to_platform_device(dev), j); + + if (pp->irq[j] >= 0) + pp->has_irq = true; + } + + if (!pp->has_irq) + dev_warn(dev, "no irq for port%d\n", pp->idx); + } + + return pdata; +} + +static const struct of_device_id sunway_of_match[] = { + { .compatible = "snps,sw-gpio", .data = (void *)0}, + { .compatible = "apm,xgene-gpio-v2", .data = (void *)GPIO_REG_OFFSET_V2}, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sunway_of_match); + +static const struct acpi_device_id sunway_acpi_match[] = { + {"HISI0181", 0}, + {"APMC0D07", 0}, + {"APMC0D81", GPIO_REG_OFFSET_V2}, + { } +}; +MODULE_DEVICE_TABLE(acpi, sunway_acpi_match); + +static int sunway_gpio_probe(struct platform_device *pdev) +{ + unsigned int i; + struct resource *res; + struct sunway_gpio *gpio; + int err; + struct device *dev = &pdev->dev; + struct sunway_platform_data *pdata = dev_get_platdata(dev); + + if (!pdata) { + pdata = sunway_gpio_get_pdata(dev); + if (IS_ERR(pdata)) + return PTR_ERR(pdata); + } + + if (!pdata->nports) + return -ENODEV; + + gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL); + if (!gpio) + return -ENOMEM; + + gpio->dev = &pdev->dev; + gpio->nr_ports = pdata->nports; + + gpio->rst = devm_reset_control_get_optional_shared(dev, NULL); + if (IS_ERR(gpio->rst)) + return PTR_ERR(gpio->rst); + + reset_control_deassert(gpio->rst); + + gpio->ports = devm_kcalloc(&pdev->dev, gpio->nr_ports, + sizeof(*gpio->ports), GFP_KERNEL); + if (!gpio->ports) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + gpio->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(gpio->regs)) + return PTR_ERR(gpio->regs); + + /* Optional bus clock */ + gpio->clk = devm_clk_get(&pdev->dev, "bus"); + if (!IS_ERR(gpio->clk)) { + err = clk_prepare_enable(gpio->clk); + if (err) { + dev_info(&pdev->dev, "Cannot enable clock\n"); + return err; + } + } + + gpio->flags = 0; + if (dev->of_node) { + gpio->flags = (uintptr_t)of_device_get_match_data(dev); + } else if (has_acpi_companion(dev)) { + const struct acpi_device_id *acpi_id; + + acpi_id = acpi_match_device(sunway_acpi_match, dev); + if (acpi_id) { + if (acpi_id->driver_data) + gpio->flags = acpi_id->driver_data; + } + } + + for (i = 0; i < gpio->nr_ports; i++) { + err = sunway_gpio_add_port(gpio, &pdata->properties[i], i); + if (err) + goto out_unregister; + } + platform_set_drvdata(pdev, gpio); + + return 0; + +out_unregister: + sunway_gpio_unregister(gpio); + sunway_irq_teardown(gpio); + clk_disable_unprepare(gpio->clk); + + return err; +} + +static int sunway_gpio_remove(struct platform_device *pdev) +{ + struct sunway_gpio *gpio = platform_get_drvdata(pdev); + + sunway_gpio_unregister(gpio); + sunway_irq_teardown(gpio); + reset_control_assert(gpio->rst); + clk_disable_unprepare(gpio->clk); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sunway_gpio_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct sunway_gpio *gpio = platform_get_drvdata(pdev); + struct gpio_chip *gc = &gpio->ports[0].gc; + unsigned long flags; + int i; + + spin_lock_irqsave(&gc->bgpio_lock, flags); + for (i = 0; i < gpio->nr_ports; i++) { + unsigned int offset; + unsigned int idx = gpio->ports[i].idx; + struct sunway_context *ctx = gpio->ports[i].ctx; + + BUG_ON(!ctx); + + offset = GPIO_SWPORTA_DDR + idx * GPIO_SWPORT_DDR_STRIDE; + ctx->dir = sunway_read(gpio, offset); + + offset = GPIO_SWPORTA_DR + idx * GPIO_SWPORT_DR_STRIDE; + ctx->data = sunway_read(gpio, offset); + + offset = GPIO_EXT_PORTA + idx * GPIO_EXT_PORT_STRIDE; + ctx->ext = sunway_read(gpio, offset); + + /* Only port A can provide interrupts */ + if (idx == 0) { + ctx->int_mask = sunway_read(gpio, GPIO_INTMASK); + ctx->int_en = sunway_read(gpio, GPIO_INTEN); + ctx->int_pol = sunway_read(gpio, GPIO_INT_POLARITY); + ctx->int_type = sunway_read(gpio, GPIO_INTTYPE_LEVEL); + ctx->int_deb = sunway_read(gpio, GPIO_PORTA_DEBOUNCE); + + /* Mask out interrupts */ + sunway_write(gpio, GPIO_INTMASK, + 0xffffffff & ~ctx->wake_en); + } + } + spin_unlock_irqrestore(&gc->bgpio_lock, flags); + + clk_disable_unprepare(gpio->clk); + + return 0; +} + +static int sunway_gpio_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct sunway_gpio *gpio = platform_get_drvdata(pdev); + struct gpio_chip *gc = &gpio->ports[0].gc; + unsigned long flags; + int i; + + if (!IS_ERR(gpio->clk)) + clk_prepare_enable(gpio->clk); + + spin_lock_irqsave(&gc->bgpio_lock, flags); + for (i = 0; i < gpio->nr_ports; i++) { + unsigned int offset; + unsigned int idx = gpio->ports[i].idx; + struct sunway_context *ctx = gpio->ports[i].ctx; + + BUG_ON(!ctx); + + offset = GPIO_SWPORTA_DR + idx * GPIO_SWPORT_DR_STRIDE; + sunway_write(gpio, offset, ctx->data); + + offset = GPIO_SWPORTA_DDR + idx * GPIO_SWPORT_DDR_STRIDE; + sunway_write(gpio, offset, ctx->dir); + + offset = GPIO_EXT_PORTA + idx * GPIO_EXT_PORT_STRIDE; + sunway_write(gpio, offset, ctx->ext); + + /* Only port A can provide interrupts */ + if (idx == 0) { + sunway_write(gpio, GPIO_INTTYPE_LEVEL, ctx->int_type); + sunway_write(gpio, GPIO_INT_POLARITY, ctx->int_pol); + sunway_write(gpio, GPIO_PORTA_DEBOUNCE, ctx->int_deb); + sunway_write(gpio, GPIO_INTEN, ctx->int_en); + sunway_write(gpio, GPIO_INTMASK, ctx->int_mask); + + /* Clear out spurious interrupts */ + sunway_write(gpio, GPIO_PORTA_EOI, 0xffffffff); + } + } + spin_unlock_irqrestore(&gc->bgpio_lock, flags); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(sunway_gpio_pm_ops, sunway_gpio_suspend, + sunway_gpio_resume); + +static struct platform_driver sunway_gpio_driver = { + .driver = { + .name = "gpio-sunway", + .pm = &sunway_gpio_pm_ops, + .of_match_table = of_match_ptr(sunway_of_match), + .acpi_match_table = ACPI_PTR(sunway_acpi_match), + }, + .probe = sunway_gpio_probe, + .remove = sunway_gpio_remove, +}; + +module_platform_driver(sunway_gpio_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jamie Iles"); +MODULE_DESCRIPTION("Sunway GPIO driver"); diff --git a/include/linux/platform_data/gpio-sunway.h b/include/linux/platform_data/gpio-sunway.h new file mode 100644 index 000000000000..58b1bddeb409 --- /dev/null +++ b/include/linux/platform_data/gpio-sunway.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright(c) 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef GPIO_SUNWAY_H +#define GPIO_SUNWAY_H + +struct sunway_port_property { + struct fwnode_handle *fwnode; + unsigned int idx; + unsigned int ngpio; + unsigned int gpio_base; + int irq[32]; + bool has_irq; + bool irq_shared; +}; + +struct sunway_platform_data { + struct sunway_port_property *properties; + unsigned int nports; +}; + +#endif -- Gitee From 10d9e67bd0594782a9ce7870928cfb99cc5918da Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:55:35 +0800 Subject: [PATCH 0342/2138] anolis: drivers: hwmon: add sw64 support ANBZ: #4688 Add hwmon drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/hwmon/Kconfig | 10 ++ drivers/hwmon/Makefile | 1 + drivers/hwmon/sw64_pvt.c | 224 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 235 insertions(+) create mode 100644 drivers/hwmon/sw64_pvt.c diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index d6c5eead770a..ea41c7e24c6e 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -38,6 +38,16 @@ config HWMON_DEBUG_CHIP comment "Native drivers" +config SENSORS_PVT + tristate "SW64 PVT monitor" + depends on SW64 + help + If you say yes here you get support for the voltage + sensor inside your CPU. + + This driver can also be built as a module. If so, the module + will be called PVT. + config SENSORS_ABITUGURU tristate "Abit uGuru (rev 1 & 2)" depends on X86 && DMI diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index cab312e74d3c..f7da084cfc46 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -221,6 +221,7 @@ obj-$(CONFIG_SENSORS_W83L786NG) += w83l786ng.o obj-$(CONFIG_SENSORS_WM831X) += wm831x-hwmon.o obj-$(CONFIG_SENSORS_WM8350) += wm8350-hwmon.o obj-$(CONFIG_SENSORS_XGENE) += xgene-hwmon.o +obj-$(CONFIG_SENSORS_PVT) += sw64_pvt.o obj-$(CONFIG_SENSORS_OCC) += occ/ obj-$(CONFIG_SENSORS_PECI) += peci/ diff --git a/drivers/hwmon/sw64_pvt.c b/drivers/hwmon/sw64_pvt.c new file mode 100644 index 000000000000..aedc29d44077 --- /dev/null +++ b/drivers/hwmon/sw64_pvt.c @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * PVT device driver. + * + * Part of lm_sensors, Linux kernel modules + * for hardware monitoring in sunway. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#define PVT_VSYS 0 +#define PVT0_CTRL 0x7c00 +#define PVT02SPBU_DATA_OUT (0x1 << 26) +#define PVT_READ 0xc000 +#define PVT_WADDR 0xc800 +#define PVT_WDATA 0xcc00 + +/* The PVT registers */ +#define PVT_SAFECTRL 0x0 +#define CLK_SEL 0x1 +#define PVT_RUN 0x2 +#define PVT_CONFIG 0x3 +#define PVT_WAIT_TIME 0x4 +#define TS_ALARM_HVALUE_L 0x5 +#define TS_ALARM_HVALUE_H 0x6 +#define TS_ALARM_LVALUE_L 0x7 +#define TS_ALARM_LVALUE_H 0x8 +#define TS_ALARM_TIMES 0x9 +#define TRIMG 0xa +#define TRIMO 0xb +#define VS_ALARM_HVALUE_L 0xc +#define VS_ALARM_HVALUE_H 0xd +#define VS_ALARM_LVALUE_L 0xe +#define VS_ALARM_LVALUE_H 0xf +#define VS_ALARM_TIMES 0x10 +#define PVT_ALARM_CLEAR 0x11 +#define PVT_ALARM_MASK 0x12 +#define PVT_DATA_OUT_L 0x13 +#define PVT_DATA_OUT_H 0x14 +#define PVT_STATE_INFO 0x15 +#define PVT_ALARM_INFO 0x16 +#define COFFICIENT 71 +#define FIXEDVAL 45598 + +#define vol_algorithm(m, n) (((((m >> 16) & 0x3) * 0x100) +\ + ((n >> 16) & 0xff)) * COFFICIENT + FIXEDVAL) + + +struct pvt_hwmon { + struct pvt *pvt; + void __iomem *base; +}; + +static const char * const input_names[] = { + [PVT_VSYS] = "voltage", +}; + +static inline void pvt_write_reg(struct pvt_hwmon *pvtvol, u64 a, + u64 b, unsigned int offset) +{ + writel(a | b, pvtvol->base + offset); +} + +static inline u64 pvt_read_reg(struct pvt_hwmon *pvtvol, unsigned int offset) +{ + u64 value; + + value = readl(pvtvol->base + offset); + return value; +} + +void pvt_configure(struct pvt_hwmon *pvtvol, u64 value, u64 reg) +{ + pvt_write_reg(pvtvol, PVT_WDATA, value, PVT0_CTRL); + pvt_write_reg(pvtvol, PVT_WADDR, reg, PVT0_CTRL); +} + +static inline u64 pvt_read_vol(struct pvt_hwmon *pvtvol, u64 data, + u64 reg, unsigned int offset) +{ + unsigned int value; + + pvt_write_reg(pvtvol, data, reg, offset); + msleep(100); + value = pvt_read_reg(pvtvol, offset); + return value; +} + +static int pvt_get_vol(struct pvt_hwmon *pvtvol) +{ + unsigned long long data_h, data_l; + + pvt_configure(pvtvol, 0x1, PVT_SAFECTRL); + + /* configure PVT mode */ + pvt_configure(pvtvol, 0x3, PVT_CONFIG); + + /* PVT monitor enable */ + pvt_configure(pvtvol, 0x1, PVT_RUN); + + /* get the upper 2 bits of the PVT voltage */ + data_h = pvt_read_vol(pvtvol, PVT_READ, PVT_DATA_OUT_H, PVT0_CTRL); + if ((data_h & PVT02SPBU_DATA_OUT) == 0) { + pr_err("error: the Voltage_h is error\n"); + return false; + } + + /* get the lower 8 bits of the PVT voltage */ + data_l = pvt_read_vol(pvtvol, PVT_READ, PVT_DATA_OUT_L, PVT0_CTRL); + if ((data_l & PVT02SPBU_DATA_OUT) == 0) { + pr_err("error: the Voltage_l is error\n"); + return false; + } + + return vol_algorithm(data_h, data_l); +} + +static ssize_t pvt_read(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct pvt_hwmon *pvtvol = dev_get_drvdata(dev); + unsigned long long pvt_vol; + + pvt_vol = pvt_get_vol(pvtvol); + return sprintf(buf, "%lld\n", (pvt_vol / 100)); +} + +static ssize_t show_label(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + return sprintf(buf, "%s\n", + input_names[to_sensor_dev_attr(devattr)->index]); +} + +static SENSOR_DEVICE_ATTR(in0_input, 0444, pvt_read, NULL, + PVT_VSYS); +static SENSOR_DEVICE_ATTR(in0_label, 0444, show_label, NULL, + PVT_VSYS); + +static struct attribute *pvt_attrs[] = { + &sensor_dev_attr_in0_input.dev_attr.attr, + &sensor_dev_attr_in0_label.dev_attr.attr, + NULL +}; + +ATTRIBUTE_GROUPS(pvt); + +static int pvt_vol_plat_probe(struct platform_device *pdev) +{ + struct resource *res; + struct pvt_hwmon *pvtvol; + struct device *hwmon_dev; + unsigned long long value; + struct device *dev = &pdev->dev; + + pvtvol = devm_kzalloc(&pdev->dev, sizeof(*pvtvol), GFP_KERNEL); + if (!pvtvol) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + goto err; + + pvtvol->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pvtvol->base)) + return PTR_ERR(pvtvol->base); + + platform_set_drvdata(pdev, pvtvol); + hwmon_dev = devm_hwmon_device_register_with_groups(dev, "pvt", + pvtvol, pvt_groups); + + if (IS_ERR(hwmon_dev)) + return PTR_ERR(hwmon_dev); + + value = pvt_get_vol(pvtvol); + if (!value) { + dev_info(&pdev->dev, "pvt_vol get failed\n"); + return false; + } + + return 0; + +err: + dev_err(&pdev->dev, "no PVT resource\n"); + return -ENXIO; +} + +#ifdef CONFIG_OF +static const struct of_device_id pvt_vol_of_match[] = { + { .compatible = "sw64,pvt-vol", }, + {}, +}; +MODULE_DEVICE_TABLE(of, pvt_vol_of_match); +#endif + +static struct platform_driver pvt_vol_driver = { + .probe = pvt_vol_plat_probe, + .driver = { + .name = "pvt-sw64", + .of_match_table = of_match_ptr(pvt_vol_of_match), + }, +}; + +static int __init pvt_vol_init_driver(void) +{ + return platform_driver_register(&pvt_vol_driver); +} +subsys_initcall(pvt_vol_init_driver); + +static void __exit pvt_vol_exit_driver(void) +{ + platform_driver_unregister(&pvt_vol_driver); +} +module_exit(pvt_vol_exit_driver); + +MODULE_AUTHOR("Wang Yingying "); +MODULE_DESCRIPTION("pvt controller"); +MODULE_LICENSE("GPL"); -- Gitee From 01661d8dee832a65410baa3b5e72c029873daa3d Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:54:01 +0800 Subject: [PATCH 0343/2138] anolis: drivers: i2c: add sw64 support ANBZ: #4688 Add i2c drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/i2c/busses/Kconfig | 11 + drivers/i2c/busses/Makefile | 1 + drivers/i2c/busses/i2c-designware-common.c | 8 + drivers/i2c/busses/i2c-designware-core.h | 3 +- drivers/i2c/busses/i2c-designware-platdrv.c | 5 + drivers/i2c/busses/i2c-sunway.c | 405 ++++++++++++++++++++ 6 files changed, 432 insertions(+), 1 deletion(-) create mode 100644 drivers/i2c/busses/i2c-sunway.c diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 88a58f4e6e72..3161c33981e1 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -347,6 +347,17 @@ config I2C_ZHAOXIN This driver can also be built as a module. If so, the module will be called i2c-zhaoxin. +config I2C_SUNWAY + tristate "Sunway i2c lib" + depends on SW64 + help + If you say yes to this option, support will be included for the + Sunway Soc I2C interface on SW64 platform. + + This driver can also be built as a module. If so, the module + will be called i2c-sunway. + + if ACPI comment "ACPI drivers" diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index f8c8a3554427..738519b0a9cb 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -30,6 +30,7 @@ obj-$(CONFIG_I2C_SIS96X) += i2c-sis96x.o obj-$(CONFIG_I2C_VIA) += i2c-via.o obj-$(CONFIG_I2C_VIAPRO) += i2c-viapro.o obj-$(CONFIG_I2C_ZHAOXIN) += i2c-zhaoxin.o +obj-$(CONFIG_I2C_SUNWAY) += i2c-sunway.o # Mac SMBus host controller drivers obj-$(CONFIG_I2C_HYDRA) += i2c-hydra.o diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c index ced2fb4aeda8..c283743916fe 100644 --- a/drivers/i2c/busses/i2c-designware-common.c +++ b/drivers/i2c/busses/i2c-designware-common.c @@ -63,6 +63,9 @@ static int dw_reg_read(void *context, unsigned int reg, unsigned int *val) { struct dw_i2c_dev *dev = context; + if ((dev->flags & MODEL_MASK) == MODEL_SUNWAY) + reg = reg << 7; + *val = readl(dev->base + reg); return 0; @@ -72,6 +75,9 @@ static int dw_reg_write(void *context, unsigned int reg, unsigned int val) { struct dw_i2c_dev *dev = context; + if ((dev->flags & MODEL_MASK) == MODEL_SUNWAY) + reg = reg << 7; + writel(val, dev->base + reg); return 0; @@ -149,6 +155,8 @@ int i2c_dw_init_regmap(struct dw_i2c_dev *dev) return ret; reg = readl(dev->base + DW_IC_COMP_TYPE); + if ((dev->flags & MODEL_MASK) == MODEL_SUNWAY) + reg = readl(dev->base + (DW_IC_COMP_TYPE << 7)); i2c_dw_release_lock(dev); if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU) diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index 5eb130c1d671..d4909e9b1c84 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -310,7 +310,8 @@ struct dw_i2c_dev { #define MODEL_BAIKAL_BT1 BIT(9) #define MODEL_AMD_NAVI_GPU BIT(10) #define MODEL_WANGXUN_SP BIT(11) -#define MODEL_MASK GENMASK(11, 8) +#define MODEL_SUNWAY BIT(12) +#define MODEL_MASK GENMASK(12, 8) /* * Enable UCSI interrupt by writing 0xd at register diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 855b698e99c0..c818e9d14b9a 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -150,9 +150,14 @@ static int dw_i2c_of_configure(struct platform_device *pdev) } static const struct of_device_id dw_i2c_of_match[] = { +#ifdef CONFIG_SW64 + { .compatible = "snps,designware-i2c", .data = (void *)MODEL_SUNWAY }, +#else { .compatible = "snps,designware-i2c", }, +#endif { .compatible = "mscc,ocelot-i2c", .data = (void *)MODEL_MSCC_OCELOT }, { .compatible = "baikal,bt1-sys-i2c", .data = (void *)MODEL_BAIKAL_BT1 }, + { .compatible = "sunway,suntai-i2c", .data = (void *)MODEL_SUNWAY }, {}, }; MODULE_DEVICE_TABLE(of, dw_i2c_of_match); diff --git a/drivers/i2c/busses/i2c-sunway.c b/drivers/i2c/busses/i2c-sunway.c new file mode 100644 index 000000000000..cc7268c6a2da --- /dev/null +++ b/drivers/i2c/busses/i2c-sunway.c @@ -0,0 +1,405 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 WXIAT Platform Software + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * The drivers in this file are synchronous/blocking. In addition, + * use poll mode to read/write slave devices on the I2C bus instead + * of the interrupt mode. + */ + +#include +#include +#include +#include + +#include + +#define CPLD_BUSNR 2 + +#define IC_CLK_KHZ 25000 + +/* I2C register definitions */ +#define DW_IC_CON 0x0 +#define DW_IC_STATUS 0x3800 +#define DW_IC_DATA_CMD 0x0800 +#define DW_IC_TAR 0x00200 +#define DW_IC_ENABLE 0x3600 +#define DW_IC_CMD 0x0100 +#define DW_IC_STOP 0x0200 +#define DW_IC_SDA_HOLD 0x3e00 +#define DW_IC_SDA_SETUP 0x4a00 +#define DW_IC_SS_SCL_HCNT 0x0a00 +#define DW_IC_SS_SCL_LCNT 0x0c00 +#define DW_IC_FS_SCL_HCNT 0x0e00 +#define DW_IC_FS_SCL_LCNT 0x1000 +#define DW_IC_TX_TL 0x1e00 +#define DW_IC_RX_TL 0x1c00 +#define DW_IC_INTR_MASK 0x1800 + +#define MAX_RETRY 10000000 + +#define DW_IC_STATUS_ACTIVITY 0x1 +#define DW_IC_STATUS_TFNF 0x2 +#define DW_IC_STATUS_TFE 0x4 +#define DW_IC_STATUS_RFNE 0x8 +#define DW_IC_STATUS_RFF 0x10 + +#define DW_IC_CON_MASTER 0x1 +#define DW_IC_CON_SPEED_STD 0x2 +#define DW_IC_CON_SPEED_FAST 0x4 +#define DW_IC_CON_10BITADDR_MASTER 0x10 +#define DW_IC_CON_RESTART_EN 0x20 +#define DW_IC_CON_SLAVE_DISABLE 0x40 + +#define INTEL_MID_STD_CFG (DW_IC_CON_MASTER | \ + DW_IC_CON_SLAVE_DISABLE | \ + DW_IC_CON_RESTART_EN) + +#define DW_IC_INTR_RX_UNDER 0x001 +#define DW_IC_INTR_RX_OVER 0x002 +#define DW_IC_INTR_RX_FULL 0x004 +#define DW_IC_INTR_TX_OVER 0x008 +#define DW_IC_INTR_TX_EMPTY 0x010 +#define DW_IC_INTR_RD_REQ 0x020 +#define DW_IC_INTR_TX_ABRT 0x040 +#define DW_IC_INTR_RX_DONE 0x080 +#define DW_IC_INTR_ACTIVITY 0x100 +#define DW_IC_INTR_STOP_DET 0x200 +#define DW_IC_INTR_START_DET 0x400 +#define DW_IC_INTR_GEN_CALL 0x800 + +#define DW_IC_INTR_DEFAULT_MASK (DW_IC_INTR_RX_FULL | \ + DW_IC_INTR_TX_EMPTY | \ + DW_IC_INTR_TX_ABRT | \ + DW_IC_INTR_STOP_DET) + +enum i2c_bus_operation { + I2C_BUS_READ, + I2C_BUS_WRITE, +}; + +static void __iomem *m_i2c_base_address; + +/* + * This function get I2Cx controller base address + * + * @param i2c_controller_index Bus Number of I2C controller. + * @return I2C BAR. + */ +void __iomem *get_i2c_bar_addr(uint8_t i2c_controller_index) +{ + switch (i2c_controller_index) { + case 0: + return __va(IO_BASE | IIC0_BASE); + case 1: + return __va(IO_BASE | IIC1_BASE); + case 2: + return __va(IO_BASE | IIC2_BASE); + default: + return NULL; + } +} + +static inline void write_cpu_i2c_controller(uint64_t offset, uint32_t data) +{ + writel(data, m_i2c_base_address + offset); +} + +static inline uint32_t read_cpu_i2c_controller(uint64_t offset) +{ + return readl(m_i2c_base_address + offset); +} + +static int poll_for_status_set0(uint16_t status_bit) +{ + uint64_t retry = 0; + uint32_t temp = read_cpu_i2c_controller(DW_IC_STATUS); + + temp = read_cpu_i2c_controller(DW_IC_STATUS); + + while (retry < MAX_RETRY) { + if (read_cpu_i2c_controller(DW_IC_STATUS) & status_bit) + break; + retry++; + } + + if (retry == MAX_RETRY) + return -ETIME; + + return 0; +} + +static uint32_t i2c_dw_scl_lcnt(uint32_t ic_clk, uint32_t t_low, + uint32_t tf, uint32_t offset) +{ + /* + * Conditional expression: + * + * IC_[FS]S_SCL_LCNT + 1 >= IC_CLK * (t_low + tf) + * + * DW I2C core starts counting the SCL CNTs for the LOW period + * of the SCL clock (t_low) as soon as it pulls the SCL line. + * In order to meet the t_low timing spec, we need to take into + * account the fall time of SCL signal (tf). Default tf value + * should be 0.3 us, for safety. + */ + return ((ic_clk * (t_low + tf) + 500000) / 1000000) - 1 + offset; +} + +static uint32_t i2c_dw_scl_hcnt(uint32_t ic_clk, uint32_t t_symbol, + uint32_t tf, uint32_t cond, uint32_t offset) +{ + /* + * DesignWare I2C core doesn't seem to have solid strategy to meet + * the tHD;STA timing spec. Configuring _HCNT based on tHIGH spec + * will result in violation of the tHD;STA spec. + */ + if (cond) + /* + * Conditional expression: + * + * IC_[FS]S_SCL_HCNT + (1+4+3) >= IC_CLK * tHIGH + * + * This is based on the DW manuals, and represents an ideal + * configuration. The resulting I2C bus speed will be faster + * than any of the others. + * + * If your hardware is free from tHD;STA issue, try this one. + */ + return (ic_clk * t_symbol + 500000) / 1000000 - 8 + offset; + /* + * Conditional expression: + * + * IC_[FS]S_SCL_HCNT + 3 >= IC_CLK * (tHD;STA + tf) + * + * This is just experimental rule; the tHD;STA period turned + * out to be proportinal to (_HCNT + 3). With this setting, + * we could meet both tHIGH and tHD;STA timing specs. + * + * If unsure, you'd better to take this alternative. + * + * The reason why we need to take into account "tf" here, + * is the same as described in i2c_dw_scl_lcnt(). + */ + return (ic_clk * (t_symbol + tf) + 500000) / 1000000 - 3 + offset; +} + +static int wait_for_cpu_i2c_bus_busy(void) +{ + uint64_t retry = 0; + uint32_t status = 0; + + do { + retry++; + status = !!(read_cpu_i2c_controller(DW_IC_STATUS) & DW_IC_STATUS_ACTIVITY); + } while ((retry < MAX_RETRY) && status); + + if (retry == MAX_RETRY) + return -ETIME; + + return 0; +} + +static int i2c_read(uint8_t reg_offset, uint8_t *buffer, uint32_t length) +{ + int status; + uint32_t i; + + status = poll_for_status_set0(DW_IC_STATUS_TFE); + if (status) + return status; + + write_cpu_i2c_controller(DW_IC_DATA_CMD, reg_offset); + + for (i = 0; i < length; i++) { + if (i == length - 1) + write_cpu_i2c_controller(DW_IC_DATA_CMD, DW_IC_CMD | DW_IC_STOP); + else + write_cpu_i2c_controller(DW_IC_DATA_CMD, DW_IC_CMD); + + if (poll_for_status_set0(DW_IC_STATUS_RFNE) == 0) + buffer[i] = readb(m_i2c_base_address + DW_IC_DATA_CMD); + else + pr_err("Read timeout line %d.\n", __LINE__); + } + + return 0; +} + +static int i2c_write(uint8_t reg_offset, uint8_t *buffer, uint32_t length) +{ + int status; + uint32_t i; + + /* Data transfer, poll till transmit ready bit is set */ + status = poll_for_status_set0(DW_IC_STATUS_TFE); + if (status) { + pr_err("In i2c-lib.c, line %d.\n", __LINE__); + return status; + } + + write_cpu_i2c_controller(DW_IC_DATA_CMD, reg_offset); + + for (i = 0; i < length; i++) { + if (poll_for_status_set0(DW_IC_STATUS_TFNF) == 0) { + if (i == length - 1) + write_cpu_i2c_controller(DW_IC_DATA_CMD, buffer[i] | DW_IC_STOP); + else + write_cpu_i2c_controller(DW_IC_DATA_CMD, buffer[i]); + } else { + pr_err("Write timeout %d.\n", __LINE__); + } + } + + mdelay(200); + status = poll_for_status_set0(DW_IC_STATUS_TFE); + if (status) { + pr_err("In i2c-lib.c, line %d.\n", __LINE__); + return status; + } + + return 0; +} + +/* Initialize I2c controller */ +void init_cpu_i2c_controller(void) +{ + uint32_t h_cnt; + uint32_t l_cnt; + uint32_t input_ic_clk_rate = IC_CLK_KHZ; /* by unit KHz ie. 25MHz */ + uint32_t sda_falling_time = 300; + uint32_t scl_falling_time = 300; + + /* + * The I2C protocol specification requires 300ns of hold time on the + * SDA signal (tHD;DAT) in standard and fast speed modes, and a hold + * time long enough to bridge the undefined part between logic 1 and + * logic 0 of the falling edge of SCL in high speed mode. + */ + uint32_t sda_hold_time = 432; + uint32_t sda_hold = 0; + + /* Firstly disable the controller. */ + pr_debug("Initialize CPU I2C controller\n"); + + write_cpu_i2c_controller(DW_IC_ENABLE, 0); + + sda_hold = (input_ic_clk_rate * sda_hold_time + 500000) / 1000000; + write_cpu_i2c_controller(DW_IC_SDA_HOLD, sda_hold); + + /* Set standard and fast speed deviders for high/low periods. */ + /* Standard-mode */ + h_cnt = i2c_dw_scl_hcnt(input_ic_clk_rate, 4000, sda_falling_time, 0, 0); + l_cnt = i2c_dw_scl_lcnt(input_ic_clk_rate, 4700, scl_falling_time, 0); + + write_cpu_i2c_controller(DW_IC_SS_SCL_HCNT, h_cnt); + write_cpu_i2c_controller(DW_IC_SS_SCL_LCNT, l_cnt); + + pr_debug("Standard-mode HCNT=%x, LCNT=%x\n", h_cnt, l_cnt); + + /* Fast-mode */ + h_cnt = i2c_dw_scl_hcnt(input_ic_clk_rate, 600, sda_falling_time, 0, 0); + l_cnt = i2c_dw_scl_lcnt(input_ic_clk_rate, 1300, scl_falling_time, 0); + + write_cpu_i2c_controller(DW_IC_FS_SCL_HCNT, h_cnt); + write_cpu_i2c_controller(DW_IC_FS_SCL_LCNT, l_cnt); + + pr_debug("Fast-mode HCNT=%x, LCNT=%d\n\n", h_cnt, l_cnt); + + /* Configure Tx/Rx FIFO threshold levels, since we will be working + * in polling mode set both thresholds to their minimum + */ + write_cpu_i2c_controller(DW_IC_TX_TL, 0); + write_cpu_i2c_controller(DW_IC_RX_TL, 0); + write_cpu_i2c_controller(DW_IC_INTR_MASK, DW_IC_INTR_DEFAULT_MASK); + + /* Configure the i2c master */ + write_cpu_i2c_controller(DW_IC_CON, + INTEL_MID_STD_CFG | DW_IC_CON_SPEED_STD); + +} + +/* + * This function enables I2C controllers. + * + * @param i2c_controller_index Bus Number of I2C controllers. + */ +void enable_i2c_controller(uint8_t i2c_controller_index) +{ + m_i2c_base_address = get_i2c_bar_addr(i2c_controller_index); + init_cpu_i2c_controller(); +} + +/* + * Write/Read data from I2C device. + * + * @i2c_controller_index: i2c bus number + * @slave_address: slave address + * @operation: to read or write + * @length: number of bytes + * @reg_offset: register offset + * @buffer: in/out buffer + */ +int i2c_bus_rw(uint8_t i2c_controller_index, uint8_t slave_address, + enum i2c_bus_operation operation, uint32_t length, + uint8_t reg_offset, void *buffer) +{ + uint8_t *byte_buffer = buffer; + int status = 0; + uint32_t databuffer, temp; + + m_i2c_base_address = get_i2c_bar_addr(i2c_controller_index); + status = wait_for_cpu_i2c_bus_busy(); + if (status) { + pr_err("%d\n", __LINE__); + return status; + } + + mdelay(1000); + + /* Set the slave address. */ + write_cpu_i2c_controller(DW_IC_ENABLE, 0x0); /* Disable controller */ + databuffer = read_cpu_i2c_controller(DW_IC_CON); + databuffer &= ~DW_IC_CON_10BITADDR_MASTER; + write_cpu_i2c_controller(DW_IC_CON, databuffer); + + /* Fill the target addr. */ + write_cpu_i2c_controller(DW_IC_TAR, slave_address); + + temp = read_cpu_i2c_controller(DW_IC_TAR); + + /* Configure Tx/Rx FIFO threshold levels. */ + write_cpu_i2c_controller(DW_IC_ENABLE, 0x1); /* Enable the adapter */ + write_cpu_i2c_controller(DW_IC_INTR_MASK, DW_IC_INTR_DEFAULT_MASK); + + if (operation == I2C_BUS_READ) + status = i2c_read(reg_offset, byte_buffer, length); + else if (operation == I2C_BUS_WRITE) + status = i2c_write(reg_offset, byte_buffer, length); + + /* Disable controller */ + write_cpu_i2c_controller(DW_IC_ENABLE, 0x0); + + return status; +} + +void disable_i2c_controller(uint8_t i2c_controller_index) +{ + m_i2c_base_address = get_i2c_bar_addr(i2c_controller_index); + + /* Disable controller */ + write_cpu_i2c_controller(DW_IC_ENABLE, 0x0); + m_i2c_base_address = 0; +} + +void cpld_write(uint8_t slave_addr, uint8_t reg, uint8_t data) +{ + enable_i2c_controller(CPLD_BUSNR); + i2c_bus_rw(CPLD_BUSNR, slave_addr, I2C_BUS_WRITE, sizeof(uint8_t), reg, &data); + disable_i2c_controller(CPLD_BUSNR); +} -- Gitee From 78873997726755b65983bcde5c9b21572c4811e1 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:49:04 +0800 Subject: [PATCH 0344/2138] anolis: drivers: iommu: add sw64 support ANBZ: #4688 Add iommu drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/iommu/Kconfig | 1 + drivers/iommu/Makefile | 2 +- drivers/iommu/sw64/Kconfig | 21 + drivers/iommu/sw64/Makefile | 3 + drivers/iommu/sw64/iommu.c | 1277 +++++++++++++++++++++ drivers/iommu/sw64/iommu_v2.c | 1780 +++++++++++++++++++++++++++++ drivers/iommu/sw64/sunway_iommu.h | 79 ++ 7 files changed, 3162 insertions(+), 1 deletion(-) create mode 100644 drivers/iommu/sw64/Kconfig create mode 100644 drivers/iommu/sw64/Makefile create mode 100644 drivers/iommu/sw64/iommu.c create mode 100644 drivers/iommu/sw64/iommu_v2.c create mode 100644 drivers/iommu/sw64/sunway_iommu.h diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index d57c5adf932e..b1df0a09601b 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -187,6 +187,7 @@ config MSM_IOMMU source "drivers/iommu/amd/Kconfig" source "drivers/iommu/intel/Kconfig" source "drivers/iommu/iommufd/Kconfig" +source "drivers/iommu/sw64/Kconfig" config IRQ_REMAP bool "Support for Interrupt Remapping" diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 769e43d780ce..f74b08c2fb00 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -obj-y += amd/ intel/ arm/ iommufd/ +obj-y += amd/ intel/ arm/ iommufd/ sw64/ obj-$(CONFIG_IOMMU_API) += iommu.o obj-$(CONFIG_IOMMU_API) += iommu-traces.o obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o diff --git a/drivers/iommu/sw64/Kconfig b/drivers/iommu/sw64/Kconfig new file mode 100644 index 000000000000..3a6a1e994f31 --- /dev/null +++ b/drivers/iommu/sw64/Kconfig @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: GPL-2.0-only +# SW64 IOMMU SUPPORT +config SUNWAY_IOMMU + bool "Sunway IOMMU Support" + select IOMMU_API + select IOMMU_IOVA + select IOMMU_DMA + depends on SW64 && PCI && SUBARCH_C3B + help + Support for IOMMU on SW64 platform. It can enable or bypass specific device by + adding boot param "iommu_enable" and "iommu.passthrough". + +# SW64 IOMMU V2 SUPPORT +config SUNWAY_IOMMU_V2 + bool "Sunway IOMMU V2 Support" + select IOMMU_API + select IOMMU_IOVA + depends on SW64 && PCI && SUBARCH_C4 + help + Support for IOMMU V2 on SW64 platform. It can enable or bypass specific device by + adding boot param "iommu_enable" and "iommu.passthrough". diff --git a/drivers/iommu/sw64/Makefile b/drivers/iommu/sw64/Makefile new file mode 100644 index 000000000000..e61b343490aa --- /dev/null +++ b/drivers/iommu/sw64/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_SUNWAY_IOMMU) += iommu.o +obj-$(CONFIG_SUNWAY_IOMMU_V2) += iommu_v2.o diff --git a/drivers/iommu/sw64/iommu.c b/drivers/iommu/sw64/iommu.c new file mode 100644 index 000000000000..32b18f726fd9 --- /dev/null +++ b/drivers/iommu/sw64/iommu.c @@ -0,0 +1,1277 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * iommu.c: Generic sw64 IOMMU support + * + * This is designed and tested for 3231. If there are no changes in hardware + * in later chips, then it should work just as well. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "sunway_iommu.h" + +#define MAX_DOMAIN_NUM 65536 +#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) +#define SW64_DMA_LIMIT (0xe0000000 - 1) +#define SW64_BAR_ADDRESS (IO_BASE | PCI_BASE) + +#define SW64_IOMMU_LEVEL1_OFFSET 0x1ff +#define SW64_IOMMU_LEVEL2_OFFSET 0x3ff + +#define SW64_IOMMU_GRN_8K ((0UL) << 4) /* page size as 8KB */ +#define SW64_IOMMU_GRN_8M ((0x2UL) << 4) /* page size as 8MB */ +#define SW64_IOMMU_PGSIZES (((1ULL) << PAGE_SHIFT) | ((1ULL) << PAGE_8M_SHIFT)) + +#define IDENTMAP_ALL ((1U) << 0) +#define DMA_MASK64 ((1U) << 1) + +/* IOMMU Exceptional Status */ +enum exceptype { + DTE_LEVEL1 = 0x0, + DTE_LEVEL2, + PTE_LEVEL1, + PTE_LEVEL2, + UNAUTHORIZED_ACCESS, + ILLEGAL_RESPONSE, + DTE_LEVEL1_VAL, + DTE_LEVEL2_VAL, + PTE_LEVEL1_VAL, + PTE_LEVEL2_VAL, +}; + +u64 iommu_enable_cmd; /* default IOMMU boot param: 0 */ + +unsigned long *sunway_iommu_domain_bitmap; + +static DEFINE_SPINLOCK(domain_bitmap_lock); +static DEFINE_SPINLOCK(sunway_iommu_device_table_lock); +spinlock_t sunway_domain_lock; + +static LLIST_HEAD(dev_data_list); +LIST_HEAD(sunway_domain_list); + +struct dma_domain { + struct sunway_iommu_domain sdomain; + struct iova_domain iovad; +}; +const struct iommu_ops sunway_iommu_ops; + +static int iommu_identity_mapping; + +/* flush helpers */ +static void piu_flush_all(struct pci_controller *hose) +{ + write_piu_ior0(hose->node, hose->index, DTLB_FLUSHALL, 0); + write_piu_ior0(hose->node, hose->index, PTLB_FLUSHALL, 0); + write_piu_ior0(hose->node, hose->index, PCACHE_FLUSHALL, 0); +} + +void dev_flush_dtlb(struct sunway_iommu_domain *sdomain, + struct sunway_iommu_dev *sdev_data) +{ + struct pci_controller *hose; + int devid; + + list_for_each_entry(sdev_data, &sdomain->dev_list, list) { + hose = pci_bus_to_pci_controller(sdev_data->pdev->bus); + devid = sdev_data->devid; + + write_piu_ior0(hose->node, hose->index, DTLB_FLUSHDEV, devid); + } +} + +void flush_pcache_by_addr(struct sunway_iommu_domain *sdomain, + unsigned long flush_addr) +{ + struct pci_controller *hose; + struct sunway_iommu_dev *sdev_data; + + list_for_each_entry(sdev_data, &sdomain->dev_list, list) { + hose = pci_bus_to_pci_controller(sdev_data->pdev->bus); + + flush_addr = __pa(flush_addr); + write_piu_ior0(hose->node, hose->index, + PCACHE_FLUSHPADDR, flush_addr); + } +} + +void flush_ptlb_by_addr(struct sunway_iommu_domain *sdomain, + unsigned long flush_addr) +{ + struct pci_controller *hose; + struct pci_dev *pdev; + struct sunway_iommu_dev *sdev_data; + + list_for_each_entry(sdev_data, &sdomain->dev_list, list) { + pdev = sdev_data->pdev; + hose = pci_bus_to_pci_controller(pdev->bus); + + flush_addr = (pdev->bus->number << 8) + | pdev->devfn | (flush_addr << 16); + write_piu_ior0(hose->node, hose->index, + PTLB_FLUSHVADDR, flush_addr); + } +} + +/* domain helpers */ +static struct sunway_iommu_domain *to_sunway_domain(struct iommu_domain *dom) +{ + return container_of(dom, struct sunway_iommu_domain, domain); +} + +static struct dma_domain *to_dma_domain(struct sunway_iommu_domain *sdomain) +{ + return container_of(sdomain, struct dma_domain, sdomain); +} + +static void add_domain_to_list(struct sunway_iommu_domain *sdomain) +{ + unsigned long flags; + + spin_lock_irqsave(&sunway_domain_lock, flags); + list_add(&sdomain->list, &sunway_domain_list); + spin_unlock_irqrestore(&sunway_domain_lock, flags); +} + +static void del_domain_from_list(struct sunway_iommu_domain *sdomain) +{ + unsigned long flags; + + spin_lock_irqsave(&sunway_domain_lock, flags); + list_del(&sdomain->list); + spin_unlock_irqrestore(&sunway_domain_lock, flags); +} + +static void free_pagetable(struct sunway_iommu_domain *sdomain) +{ + unsigned long pde; + unsigned long *pde_ptr; + int i, pdes_one_page; + + pde_ptr = sdomain->pt_root; + if (!pde_ptr) + return; + + pdes_one_page = PAGE_SIZE/sizeof(pde); + for (i = 0; i < pdes_one_page; i++, pde_ptr++) { + pde = *pde_ptr; + if ((pde & SW64_IOMMU_ENTRY_VALID) == 0) + continue; + + pde &= ~(SW64_IOMMU_ENTRY_VALID) & PAGE_MASK; + pde |= PAGE_OFFSET; + free_page(pde); + } + + free_page((unsigned long)sdomain->pt_root); +} + +static void domain_id_free(int id) +{ + spin_lock(&domain_bitmap_lock); + if (id > 0) + __clear_bit(id, sunway_iommu_domain_bitmap); + spin_unlock(&domain_bitmap_lock); +} + +static void dma_domain_free(struct dma_domain *dma_dom) +{ + if (!dma_dom) + return; + + del_domain_from_list(&dma_dom->sdomain); + put_iova_domain(&dma_dom->iovad); + free_pagetable(&dma_dom->sdomain); + if (dma_dom->sdomain.id) + domain_id_free(dma_dom->sdomain.id); + + kfree(dma_dom); +} + +static void sunway_domain_free(struct sunway_iommu_domain *sdomain) +{ + if (!sdomain) + return; + + del_domain_from_list(sdomain); + if (sdomain->id) + domain_id_free(sdomain->id); + + kfree(sdomain); +} + +static u16 sunway_domain_id_alloc(void) +{ + int id; + + spin_lock(&domain_bitmap_lock); + id = find_first_zero_bit(sunway_iommu_domain_bitmap, MAX_DOMAIN_NUM); + if (id > 0 && id < MAX_DOMAIN_NUM) + __set_bit(id, sunway_iommu_domain_bitmap); + else + id = 0; + spin_unlock(&domain_bitmap_lock); + + return id; +} + +static int sunway_domain_init(struct sunway_iommu_domain *sdomain) +{ + spin_lock_init(&sdomain->lock); + mutex_init(&sdomain->api_lock); + sdomain->id = sunway_domain_id_alloc(); + if (!sdomain->id) + return -ENOMEM; + INIT_LIST_HEAD(&sdomain->dev_list); + + return 1; +} + +static struct sunway_iommu_domain *sunway_domain_alloc(void) +{ + struct sunway_iommu_domain *sdomain; + + sdomain = kzalloc(sizeof(struct sunway_iommu_domain), GFP_KERNEL); + if (!sdomain) + return NULL; + + if (!sunway_domain_init(sdomain)) { + kfree(sdomain); + return NULL; + } + + add_domain_to_list(sdomain); + return sdomain; +} + +static struct dma_domain *dma_domain_alloc(void) +{ + struct dma_domain *dma_dom; + struct page; + + dma_dom = kzalloc(sizeof(struct dma_domain), GFP_KERNEL); + if (!dma_dom) + return NULL; + + sunway_domain_init(&dma_dom->sdomain); + dma_dom->sdomain.type = IOMMU_DOMAIN_DMA; + + dma_dom->sdomain.pt_root = (unsigned long *)get_zeroed_page(GFP_KERNEL); + if (dma_dom->sdomain.pt_root == NULL) { + pr_err("Allocating a new sdomain pt_root failed!\n"); + dma_domain_free(dma_dom); + return NULL; + } + + add_domain_to_list(&dma_dom->sdomain); + + return dma_dom; +} + +static void device_flush_all(struct sunway_iommu_dev *sdata) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(sdata->pdev->bus); + + if (hose == NULL) + return; + + write_piu_ior0(hose->node, hose->index, DTLB_FLUSHDEV, sdata->devid); + write_piu_ior0(hose->node, hose->index, PTLB_FLUSHDEV, sdata->devid); + write_piu_ior0(hose->node, hose->index, PCACHE_FLUSHDEV, sdata->devid); +} + +/* iommu_ops device attach/unattach helpers */ +static void +set_dte_entry(struct sunway_iommu_dev *sdev, struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu *iommu; + struct pci_dev *pdev; + struct page *page; + unsigned long *dte_l1, *dte_l2; + unsigned long dte_l1_val, dte_l2_base, dte_l2_val; + + pdev = sdev->pdev; + if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) + return; + + sdev->devid = PCI_DEVID(pdev->bus->number, pdev->devfn); + iommu = sdev->iommu; + dte_l1 = iommu->iommu_dtbr + (pdev->bus->number); + dte_l1_val = *dte_l1; + + if (!dte_l1_val) { + /* Alloc a new level-2 device table page */ + page = alloc_pages_node(iommu->node, __GFP_ZERO, + get_order(PAGE_SIZE)); + if (!page) { + pr_err("Allocating a new level-2 device table page failed.\n"); + return; + } + + dte_l2_base = (unsigned long)page_address(page); + dte_l1_val = (__pa(dte_l2_base) & PAGE_MASK) | SW64_IOMMU_ENTRY_VALID; + *dte_l1 = dte_l1_val; + } + + dte_l2 = __va(dte_l1_val & ~(SW64_IOMMU_ENTRY_VALID) & PAGE_MASK) + (pdev->devfn << 3); + dte_l2_val = (__pa(sdomain->pt_root) & PAGE_MASK) | SW64_IOMMU_ENTRY_VALID; + if (iommu_identity_mapping) { + dte_l2_val |= 0x1; + sdev->passthrough = IDENTMAP_ALL; + } + *dte_l2 = dte_l2_val; + + device_flush_all(sdev); +} + +static void +do_attach(struct sunway_iommu_dev *sdev_data, struct sunway_iommu_domain *sdomain) +{ + sdev_data->domain = sdomain; + list_add(&sdev_data->list, &sdomain->dev_list); + + sdomain->dev_cnt++; + set_dte_entry(sdev_data, sdomain); + + pr_debug("iommu: device %d add to domain: %d\n", + sdev_data->devid, sdomain->id); +} + +static void do_detach(struct sunway_iommu_dev *sdev_data) +{ + struct sunway_iommu_domain *sdomain = sdev_data->domain; + + sdev_data->domain = NULL; + list_del(&sdev_data->list); + device_flush_all(sdev_data); + + sdomain->dev_cnt--; + pr_debug("iommu: device %d detached from domain %d\n", + sdev_data->devid, sdomain->id); +} + +static int +__attach_device(struct sunway_iommu_dev *sdev_data, struct sunway_iommu_domain *sdomain) +{ + int ret; + + spin_lock(&sdomain->lock); + ret = -EBUSY; + if (sdev_data->domain != NULL) + goto out_unlock; + + do_attach(sdev_data, sdomain); + ret = 0; + +out_unlock: + spin_unlock(&sdomain->lock); + return ret; +} + +static void __detach_device(struct sunway_iommu_dev *sunway_dev_data) +{ + struct sunway_iommu_domain *domain; + + domain = sunway_dev_data->domain; + + spin_lock(&domain->lock); + do_detach(sunway_dev_data); + spin_unlock(&domain->lock); +} + +static int attach_device(struct device *dev, struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu_dev *sdev; + unsigned long flags; + int ret; + + sdev = dev_iommu_priv_get(dev); + + spin_lock_irqsave(&sunway_iommu_device_table_lock, flags); + ret = __attach_device(sdev, sdomain); + spin_unlock_irqrestore(&sunway_iommu_device_table_lock, flags); + + return ret; +} + +static void detach_device(struct device *dev) +{ + struct sunway_iommu_domain *sunway_domain; + struct sunway_iommu_dev *sdev_data; + unsigned long flags; + + sdev_data = dev_iommu_priv_get(dev); + sunway_domain = sdev_data->domain; + + if (WARN_ON(!sdev_data->domain)) + return; + + spin_lock_irqsave(&sunway_iommu_device_table_lock, flags); + __detach_device(sdev_data); + spin_unlock_irqrestore(&sunway_iommu_device_table_lock, flags); + + if (!dev_is_pci(dev)) + return; +} + +static struct sunway_iommu_dev *search_dev_data(u16 devid) +{ + struct sunway_iommu_dev *sdev_data; + struct llist_node *node; + + if (llist_empty(&dev_data_list)) + return NULL; + + node = dev_data_list.first; + llist_for_each_entry(sdev_data, node, dev_data_list) { + if (sdev_data->devid == devid) + return sdev_data; + } + + return NULL; +} + +/********************************************************************** + * + * Following functions describe IOMMU init ops + * + **********************************************************************/ + +static struct sunway_iommu *sunway_iommu_early_init(struct pci_controller *hose) +{ + struct sunway_iommu *iommu; + struct page *page; + unsigned long base; + + hose->pci_iommu = kzalloc(sizeof(struct sunway_iommu), GFP_KERNEL); + if (!hose->pci_iommu) + return 0; + + iommu = hose->pci_iommu; + spin_lock_init(&iommu->dt_lock); + + iommu->node = hose->node; + if (!node_online(hose->node)) + iommu->node = -1; + + page = alloc_pages_node(iommu->node, __GFP_ZERO, get_order(PAGE_SIZE)); + if (!page) { + pr_err("Allocating a new iommu_dtbr page failed.\n"); + kfree(hose->pci_iommu); + return NULL; + } + + iommu->iommu_dtbr = page_address(page); + + iommu->hose_pt = hose; + iommu->index = hose->index; + + iommu->enabled = true; + + base = __pa(iommu->iommu_dtbr) & PAGE_MASK; + write_piu_ior0(hose->node, hose->index, DTBASEADDR, base); + + return iommu; +} + +unsigned long fetch_dte(struct sunway_iommu *iommu, unsigned long devid, + enum exceptype type) +{ + unsigned long *dte_l1, *dte_l2; + unsigned long dte_l1_val, dte_l2_val; + + if (!iommu) + return 0; + dte_l1 = iommu->iommu_dtbr + (devid >> 8); + if (type == DTE_LEVEL1) + return (unsigned long)dte_l1; + + dte_l1_val = *dte_l1; + if (type == DTE_LEVEL1_VAL) + return dte_l1_val; + + dte_l1_val &= (~(SW64_IOMMU_ENTRY_VALID)) & (PAGE_MASK); + dte_l1_val |= PAGE_OFFSET; + dte_l2 = (unsigned long *)(dte_l1_val + ((devid & 0xff) << 3)); + if (type == DTE_LEVEL2) + return (unsigned long)dte_l2; + + dte_l2_val = *dte_l2; + if (type == DTE_LEVEL2_VAL) + return dte_l2_val; + + return dte_l2_val; +} + +unsigned long fetch_pte(struct sunway_iommu_domain *sdomain, dma_addr_t iova, + enum exceptype type) +{ + unsigned long iova_pfn, pte_l1_val, pte_l2_val; + unsigned long *pte_l1, *pte_l2; + unsigned long pte_root; + unsigned long offset; + + if (!sdomain) + return -EINVAL; + + pte_root = __pa(sdomain->pt_root) & PAGE_MASK; + iova_pfn = iova >> PAGE_SHIFT; + pte_root = ((pte_root) & (~(SW64_IOMMU_ENTRY_VALID)) & (PAGE_MASK)); + pte_root |= PAGE_OFFSET; + offset = ((iova_pfn >> 10) & SW64_IOMMU_LEVEL1_OFFSET) << 3; + pte_l1 = (unsigned long *)(pte_root + offset); + if (type == PTE_LEVEL1) + return (unsigned long)pte_l1; + + pte_l1_val = *pte_l1; + if (type == PTE_LEVEL1_VAL) + return pte_l1_val; + + pte_l1_val &= (~(SW64_IOMMU_ENTRY_VALID)) & (PAGE_MASK); + pte_l1_val |= PAGE_OFFSET; + offset = (iova_pfn & SW64_IOMMU_LEVEL2_OFFSET) << 3; + pte_l2 = (unsigned long *)(pte_l1_val + offset); + + if (type == PTE_LEVEL2) + return (unsigned long)pte_l2; + + pte_l2_val = *pte_l2; + if (type == PTE_LEVEL2_VAL) + return pte_l2_val; + + return pte_l2_val; +} + +/* IOMMU Interrupt handle */ +irqreturn_t iommu_interrupt(int irq, void *dev) +{ + struct pci_controller *hose = (struct pci_controller *)dev; + struct sunway_iommu_domain *sdomain; + struct sunway_iommu_dev *sdev; + unsigned long iommu_status; + unsigned long type; + unsigned long devid, dva; + + iommu_status = read_piu_ior0(hose->node, hose->index, IOMMUEXCPT_STATUS); + if (!(iommu_status >> 63)) + return IRQ_NONE; + + type = (iommu_status >> 59) & 0x7; + devid = (iommu_status >> 37) & 0xffff; + dva = iommu_status & 0xffffffff; + pr_info("%s, iommu_status = %#lx, devid %#lx, dva %#lx, ", + __func__, iommu_status, devid, dva); + + sdev = search_dev_data(devid); + if (sdev == NULL) { + pr_info("no such dev!!!\n"); + + iommu_status &= ~(1UL << 62); + write_piu_ior0(hose->node, hose->index, + IOMMUEXCPT_STATUS, iommu_status); + + return IRQ_HANDLED; + } + + sdomain = sdev->domain; + switch (type) { + case DTE_LEVEL1: + pr_info("invalid level1 dte, addr:%#lx, val:%#lx\n", + fetch_dte(hose->pci_iommu, devid, DTE_LEVEL1), + fetch_dte(hose->pci_iommu, devid, DTE_LEVEL1_VAL)); + break; + case DTE_LEVEL2: + pr_info("invalid level2 dte, addr:%#lx, val:%#lx\n", + fetch_dte(hose->pci_iommu, devid, DTE_LEVEL2), + fetch_dte(hose->pci_iommu, devid, DTE_LEVEL2_VAL)); + break; + case PTE_LEVEL1: + pr_info("invalid level1 pte, addr: %#lx, val:%#lx\n", + fetch_pte(sdomain, dva, PTE_LEVEL1), + fetch_pte(sdomain, dva, PTE_LEVEL1_VAL)); + break; + case PTE_LEVEL2: + pr_info("invalid level2 pte, addr: %#lx, val: %#lx\n", + fetch_pte(sdomain, dva, PTE_LEVEL2), + fetch_pte(sdomain, dva, PTE_LEVEL2_VAL)); + + iommu_status &= ~(1UL << 62); + write_piu_ior0(hose->node, hose->index, + IOMMUEXCPT_STATUS, iommu_status); + break; + + case UNAUTHORIZED_ACCESS: + pr_info("unauthorized access\n"); + break; + case ILLEGAL_RESPONSE: + pr_info("illegal response\n"); + break; + default: + pr_info("unknown error\n"); + break; + } + + return IRQ_HANDLED; +} + +struct irqaction iommu_irqaction = { + .handler = iommu_interrupt, + .flags = IRQF_SHARED | IRQF_NO_THREAD, + .name = "sunway_iommu", +}; + +void sunway_enable_iommu_func(struct pci_controller *hose) +{ + unsigned int iommu_irq, err; + unsigned long iommu_conf, iommu_ctrl; + + iommu_irq = hose->int_irq; + pr_debug("%s node %ld rc %ld iommu_irq %d\n", + __func__, hose->node, hose->index, iommu_irq); + err = request_irq(iommu_irq, iommu_interrupt, + IRQF_SHARED, "sunway_iommu", hose); + if (err < 0) + pr_info("sw iommu request irq failed!\n"); + + iommu_ctrl = (1UL << 63) | (0x100UL << 10); + write_piu_ior0(hose->node, hose->index, IOMMUEXCPT_CTRL, iommu_ctrl); + iommu_conf = read_piu_ior0(hose->node, hose->index, PIUCONFIG0); + iommu_conf = iommu_conf | (0x3 << 7); + write_piu_ior0(hose->node, hose->index, PIUCONFIG0, iommu_conf); + write_piu_ior0(hose->node, hose->index, TIMEOUT_CONFIG, 0xf); + iommu_conf = read_piu_ior0(hose->node, hose->index, PIUCONFIG0); + pr_debug("SW arch configure node %ld hose-%ld iommu_conf = %#lx\n", + hose->node, hose->index, iommu_conf); +} + +static bool is_iommu_enable(struct pci_controller *hose) +{ + u64 rc_mask = 0x1; + + rc_mask <<= (8 * hose->node + hose->index); + if (iommu_enable_cmd & rc_mask) + return true; + + return false; +} + +/* iommu cpu syscore ops */ +static int iommu_cpu_suspend(void) +{ + return 0; +} + +static void iommu_cpu_resume(void) +{ + +} + +struct syscore_ops iommu_cpu_syscore_ops = { + .suspend = iommu_cpu_suspend, + .resume = iommu_cpu_resume, +}; + +static struct iommu_domain *sunway_iommu_domain_alloc(unsigned int type); + +static int sunway_iommu_init(void) +{ + struct pci_controller *hose; + struct sunway_iommu *iommu; + int ret; + int iommu_index = 0; + + sunway_iommu_domain_bitmap = + (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + get_order(MAX_DOMAIN_NUM / 8)); + if (sunway_iommu_domain_bitmap == NULL) + return 0; + __set_bit(0, sunway_iommu_domain_bitmap); + + /* Do the loop */ + for (hose = hose_head; hose; hose = hose->next) { + if (!is_iommu_enable(hose)) { + hose->iommu_enable = false; + continue; + } + + iommu = sunway_iommu_early_init(hose); + if (!iommu) { + pr_err("Allocating sunway_iommu failed\n"); + hose->iommu_enable = false; + continue; + } + + iommu_device_sysfs_add(&iommu->iommu, NULL, NULL, "%d", + iommu_index); + iommu_index++; + sunway_enable_iommu_func(hose); + hose->iommu_enable = true; + + iommu_device_register(&iommu->iommu, &sunway_iommu_ops, NULL); + } + + ret = iova_cache_get(); + if (ret) + return ret; + + for (hose = hose_head; hose; hose = hose->next) + if (hose->iommu_enable) + piu_flush_all(hose); + + register_syscore_ops(&iommu_cpu_syscore_ops); + + return 1; +} +device_initcall(sunway_iommu_init); + +/******************************************************************************* + * + * DMA OPS Functions + * + ******************************************************************************/ + +struct sunway_iommu *get_first_iommu_from_domain(struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu *iommu; + struct sunway_iommu_dev *entry; + + entry = list_first_entry(&sdomain->dev_list, struct sunway_iommu_dev, list); + iommu = entry->iommu; + + return iommu; +} + +static unsigned long +sunway_iommu_unmap_page(struct sunway_iommu_domain *sunway_domain, + unsigned long iova, unsigned long page_size) +{ + unsigned long *pte_l2, unmapped; + + pr_debug("%s iova %#lx, page_size %#lx\n", __func__, iova, page_size); + BUG_ON(!is_power_of_2(page_size)); + + unmapped = 0; + while (unmapped < page_size) { + pte_l2 = (unsigned long *)fetch_pte(sunway_domain, iova, PTE_LEVEL2); + *pte_l2 = 0; + + flush_pcache_by_addr(sunway_domain, (unsigned long)pte_l2); + flush_ptlb_by_addr(sunway_domain, (iova >> PAGE_SHIFT)); + + iova += PAGE_SIZE; + unmapped += PAGE_SIZE; + } + + return unmapped; +} + +int sunway_iommu_map_page(struct sunway_iommu_domain *sunway_domain, + unsigned long bus_addr, unsigned long paddr, + size_t page_size) +{ + /* + * pde: page table entry + * pte: level 2 page table entry + * pte_root: page table root + */ + struct page *page; + struct sunway_iommu *iommu; + unsigned long pde, pte, iova_pfn; + unsigned long pdebaseaddr; + u64 *ptebasecond, ptebaseaddr; + u64 pte_root = (__pa(sunway_domain->pt_root) & PAGE_MASK); + + iova_pfn = (unsigned long)(bus_addr >> PAGE_SHIFT); + + pdebaseaddr = ((iova_pfn >> 10) & SW64_IOMMU_LEVEL1_OFFSET) << 3; + pdebaseaddr += ((pte_root) & (~(SW64_IOMMU_ENTRY_VALID)) & (PAGE_MASK)) + + PAGE_OFFSET; + + pde = *(unsigned long *)pdebaseaddr; + if (pde) { + ptebaseaddr = (pde & (~SW64_IOMMU_ENTRY_VALID) & PAGE_MASK) + PAGE_OFFSET; + ptebaseaddr += (iova_pfn & SW64_IOMMU_LEVEL2_OFFSET) << 3; + + goto direct_map; + } + + iommu = get_first_iommu_from_domain(sunway_domain); + if (!iommu) + return -1; + page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0); + if (!page) { + pr_err("Allocating pages failed.\n"); + return -1; + } + + ptebasecond = page_address(page); + pde = (__pa(ptebasecond) & PAGE_MASK) | SW64_IOMMU_ENTRY_VALID; + + /* + * If pde exists, no need to allocate a new page. + * Atomic compare and exchange, compare the value the pointer points to + * with 0UL. If identical, store pde where the pointer points to, return + * 0UL. Otherwise, return the value the pointer points to. + */ + if (cmpxchg64((volatile u64 *)pdebaseaddr, 0ULL, pde)) { + ptebaseaddr = ((*(volatile u64 *)pdebaseaddr) + & (~SW64_IOMMU_ENTRY_VALID) & PAGE_MASK) + PAGE_OFFSET; + ptebaseaddr += (iova_pfn & SW64_IOMMU_LEVEL2_OFFSET) << 3; + free_page((unsigned long)ptebasecond); + } else { + flush_pcache_by_addr(sunway_domain, pdebaseaddr); + ptebaseaddr = (unsigned long)ptebasecond + + ((iova_pfn & SW64_IOMMU_LEVEL2_OFFSET) << 3); + } + +direct_map: + /* case 8K */ + if (page_size == (1UL << PAGE_SHIFT)) { + if (*(volatile u64 *)ptebaseaddr) { + pr_err("IOVA 4G overlap. IOVA is %#lx.\n", bus_addr); + return -EFAULT; + } + + pte = (paddr & PAGE_MASK) | SW64_IOMMU_ENTRY_VALID + | SW64_IOMMU_GRN_8K | SW64_IOMMU_ENABLE; + *(volatile u64 *)ptebaseaddr = pte; + flush_pcache_by_addr(sunway_domain, ptebaseaddr); + /* case 8M */ + } else if (page_size == (1UL << PAGE_8M_SHIFT)) { + unsigned long *ptr; + int i, ptes_one_page, ptes_one_cache; + + ptr = (unsigned long *)ptebaseaddr; + ptes_one_page = PAGE_SIZE/sizeof(pte); + ptes_one_cache = L1_CACHE_BYTES/sizeof(pte); + + pte = (paddr & PAGE_MASK) | SW64_IOMMU_ENTRY_VALID + | SW64_IOMMU_GRN_8M | SW64_IOMMU_ENABLE; + + for (i = 0; i < ptes_one_page; i++) { + if (*ptr) { + pr_err("IOVA 4G overlap. IOVA is %#lx.\n", bus_addr); + return -EFAULT; + } + + *ptr = pte; + + /* just do once flush per cache line */ + if (i % ptes_one_cache == (ptes_one_cache - 1)) + flush_pcache_by_addr(sunway_domain, (unsigned long)ptr); + ptr++; + } + } +#ifdef CONFIG_SW64_GUEST + flush_ptlb_by_addr(sunway_domain, pfn | SW64_IOMMU_MAP_FLAG); +#endif + return 0; +} + +/********************************************************************** + * + * IOMMU OPS Functions + * + **********************************************************************/ + +static struct iommu_domain *sunway_iommu_domain_alloc(unsigned int type) +{ + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + + switch (type) { + case IOMMU_DOMAIN_UNMANAGED: + sdomain = sunway_domain_alloc(); + if (!sdomain) { + pr_err("Allocating sunway_domain failed!\n"); + return NULL; + } + + sdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); + if (!sdomain->pt_root) { + pr_err("Allocating pt_root failed!\n"); + sunway_domain_free(sdomain); + return NULL; + } + + sdomain->domain.geometry.aperture_start = 0ULL; + sdomain->domain.geometry.aperture_end = (~0ULL); + sdomain->domain.geometry.force_aperture = true; + sdomain->type = IOMMU_DOMAIN_UNMANAGED; + break; + + case IOMMU_DOMAIN_DMA: + dma_dom = dma_domain_alloc(); + if (!dma_dom) { + pr_err("Failed to alloc dma domain!\n"); + return NULL; + } + + sdomain = &dma_dom->sdomain; + break; + + case IOMMU_DOMAIN_IDENTITY: + sdomain = sunway_domain_alloc(); + if (!sdomain) + return NULL; + + sdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); + if (!sdomain->pt_root) { + pr_err("Allocating pt_root failed!\n"); + sunway_domain_free(sdomain); + return NULL; + } + + sdomain->type = IOMMU_DOMAIN_IDENTITY; + iommu_identity_mapping = 1; + break; + + default: + return NULL; + } + + return &sdomain->domain; +} + +static void clean_domain(struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu_dev *entry; + unsigned long flags; + + spin_lock_irqsave(&sunway_iommu_device_table_lock, flags); + + while (!list_empty(&sdomain->dev_list)) { + entry = list_first_entry(&sdomain->dev_list, + struct sunway_iommu_dev, list); + + __detach_device(entry); + } + + spin_unlock_irqrestore(&sunway_iommu_device_table_lock, flags); +} + +static void sunway_iommu_domain_free(struct iommu_domain *dom) +{ + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + + sdomain = to_sunway_domain(dom); + + if (sdomain->dev_cnt > 0) + clean_domain(sdomain); + + if (!dom) + return; + + switch (dom->type) { + case IOMMU_DOMAIN_DMA: + dma_dom = to_dma_domain(sdomain); + dma_domain_free(dma_dom); + break; + + default: + free_pagetable(sdomain); + sunway_domain_free(sdomain); + break; + } + +} + +static int sunway_iommu_attach_device(struct iommu_domain *dom, struct device *dev) +{ + struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); + struct sunway_iommu_dev *sdev_data; + struct pci_dev *pdev; + struct pci_controller *hose; + int ret; + + if (!dev_is_pci(dev)) + return -ENODEV; + + pdev = to_pci_dev(dev); + if (!pdev) + return -EINVAL; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose) + return -EINVAL; + + if (!hose->iommu_enable) + return -EINVAL; + + sdev_data = dev_iommu_priv_get(dev); + if (!sdev_data) + return -EINVAL; + + if (sdev_data->domain) + detach_device(dev); + + ret = attach_device(dev, sdomain); + + return ret; +} + +static phys_addr_t +sunway_iommu_iova_to_phys(struct iommu_domain *dom, dma_addr_t iova) +{ + struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); + unsigned long paddr, grn; + + if (iova >= SW64_BAR_ADDRESS) + return iova; + + paddr = fetch_pte(sdomain, iova, PTE_LEVEL2_VAL); + + if ((paddr & SW64_IOMMU_ENTRY_VALID) == 0) + return 0; + + paddr &= ~SW64_IOMMU_ENTRY_VALID; + grn = paddr & SW64_PTE_GRN_MASK; /* get page granularity */ + paddr &= PAGE_MASK; + + switch (grn) { + case SW64_IOMMU_GRN_8M: + paddr += (iova & ~HPAGE_MASK); + break; + case SW64_IOMMU_GRN_8K: + default: + paddr += (iova & ~PAGE_MASK); + break; + } + + return paddr; +} + +static int +sunway_iommu_map_pages(struct iommu_domain *dom, unsigned long iova, + phys_addr_t paddr, size_t page_size, size_t pgcount, + int iommu_prot, gfp_t gfp, size_t *mapped) +{ + struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); + size_t size = pgcount << PAGE_SHIFT; + int ret; + + /* + * As VFIO cannot distinguish between normal DMA request + * and pci device BAR, check should be introduced manually + * to avoid VFIO trying to map pci config space. + */ + if (iova >= SW64_BAR_ADDRESS) + return 0; + + mutex_lock(&sdomain->api_lock); + while (pgcount--) { + ret = sunway_iommu_map_page(sdomain, iova, paddr, page_size); + if (ret) { + pr_info("Failed to map page from IOVA %lx.\n", iova); + return ret; + } + iova += page_size; + paddr += page_size; + } + mutex_unlock(&sdomain->api_lock); + + if (!ret && mapped) + *mapped = size; + + return ret; +} + +static size_t +sunway_iommu_unmap_pages(struct iommu_domain *dom, unsigned long iova, + size_t page_size, size_t pgcount, + struct iommu_iotlb_gather *gather) +{ + struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); + size_t unmap_size; + size_t total_unmap = 0; + + if (iova >= SW64_BAR_ADDRESS) + return page_size; + + mutex_lock(&sdomain->api_lock); + while (pgcount--) { + unmap_size = sunway_iommu_unmap_page(sdomain, iova, page_size); + iova += page_size; + total_unmap += page_size; + } + mutex_unlock(&sdomain->api_lock); + + return total_unmap; +} + +static struct iommu_group *sunway_iommu_device_group(struct device *dev) +{ + return generic_device_group(dev); +} + +static int iommu_init_device(struct device *dev) +{ + struct sunway_iommu_dev *sdev; + struct sunway_iommu *iommu; + struct pci_dev *pdev; + struct pci_controller *hose; + + if (dev_iommu_priv_get(dev)) + return 0; + + sdev = kzalloc(sizeof(struct sunway_iommu_dev), GFP_KERNEL); + if (!sdev) + return -ENOMEM; + + pdev = to_pci_dev(dev); + hose = pci_bus_to_pci_controller(pdev->bus); + iommu = hose->pci_iommu; + llist_add(&sdev->dev_data_list, &dev_data_list); + sdev->pdev = pdev; + sdev->iommu = iommu; + + dev_iommu_priv_set(dev, sdev); + + return 0; +} + +static void iommu_uninit_device(struct device *dev) +{ + struct sunway_iommu_dev *sdev; + + sdev = dev_iommu_priv_get(dev); + if (!sdev) + return; + + if (sdev->domain) + detach_device(dev); + + dev_iommu_priv_set(dev, NULL); +} + +static void sunway_iommu_release_device(struct device *dev) +{ + struct pci_dev *pdev; + struct pci_controller *hose; + + pdev = to_pci_dev(dev); + if (!pdev) + return; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose->iommu_enable) + return; + + iommu_uninit_device(dev); +} + +static struct iommu_device *sunway_iommu_probe_device(struct device *dev) +{ + struct pci_dev *pdev; + struct pci_controller *hose; + struct sunway_iommu *iommu; + int ret; + + if (!dev_is_pci(dev)) + return ERR_PTR(-ENODEV); + + pdev = to_pci_dev(dev); + if (!pdev) + return ERR_PTR(-ENODEV); + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose) + return ERR_PTR(-ENODEV); + + if (!hose->iommu_enable) + return ERR_PTR(-ENODEV); + + if (dev_iommu_priv_get(dev)) + return &iommu->iommu; + + ret = iommu_init_device(dev); + if (ret) + return ERR_PTR(ret); + + iommu = hose->pci_iommu; + + return &iommu->iommu; +} + +static int sunway_iommu_def_domain_type(struct device *dev) +{ + if (dev_is_pci(dev)) { + if (iommu_identity_mapping) + return IOMMU_DOMAIN_IDENTITY; + } + + return 0; +} + +static bool sunway_iommu_capable(struct device *dev, enum iommu_cap cap) +{ + return false; +} + +static void sunway_iommu_probe_finalize(struct device *dev) +{ + set_dma_ops(dev, NULL); + iommu_setup_dma_ops(dev, 0, SW64_DMA_LIMIT); +} + +const struct iommu_ops sunway_iommu_ops = { + .capable = sunway_iommu_capable, + .domain_alloc = sunway_iommu_domain_alloc, + .probe_device = sunway_iommu_probe_device, + .probe_finalize = sunway_iommu_probe_finalize, + .release_device = sunway_iommu_release_device, + .device_group = sunway_iommu_device_group, + .pgsize_bitmap = SW64_IOMMU_PGSIZES, + .def_domain_type = sunway_iommu_def_domain_type, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = sunway_iommu_attach_device, + .map_pages = sunway_iommu_map_pages, + .unmap_pages = sunway_iommu_unmap_pages, + .iova_to_phys = sunway_iommu_iova_to_phys, + .free = sunway_iommu_domain_free, + } +}; + +/***************************************************************************** + * + * Boot param handle + * Each bit of iommu_enable bitmap represents an rc enable, and every 8 bits + * represents one cpu node. For example, iommu_enable=0x0100 means enabling + * rc0 for cpu node 1. + * + *****************************************************************************/ +static int __init iommu_enable_setup(char *str) +{ + int ret; + unsigned long rc_bitmap = 0xffffffffUL; + + ret = kstrtoul(str, 16, &rc_bitmap); + iommu_enable_cmd = rc_bitmap; + + return ret; +} +early_param("iommu_enable", iommu_enable_setup); diff --git a/drivers/iommu/sw64/iommu_v2.c b/drivers/iommu/sw64/iommu_v2.c new file mode 100644 index 000000000000..f3e19e524210 --- /dev/null +++ b/drivers/iommu/sw64/iommu_v2.c @@ -0,0 +1,1780 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * iommu.c: Generic sw64 IOMMU support + * + * This is designed and tested for 3231. If there are no changes in hardware + * in later chips, then it should work just as well. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sunway_iommu.h" + +#define MAX_DOMAIN_NUM 65536 +#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) +#define SW64_32BIT_DMA_LIMIT (0xe0000000 - 1) +#define SW64_64BIT_DMA_LIMIT ((1UL << 41) - 1) +#define SW64_BAR_ADDRESS (IO_BASE | PCI_BASE) + +#define SW64_IOMMU_PGSIZES (((1ULL) << PAGE_SHIFT) \ + | ((1ULL) << PAGE_8M_SHIFT) \ + | ((1ULL) << PAGE_512M_SHIFT) \ + | ((1ULL) << PAGE_8G_SHIFT)) + +#define IDENTMAP_ALL ((1U) << 0) +#define DMA_MASK64 ((1U) << 1) + +#define PTE_VALID 0x8000000000000000UL +#define LAST_STAGE 0x100UL +#define PTE_GRN_8M 0x10UL +#define PTE_GRN_512M 0x20UL +#define PTE_GRN_8G 0x30UL +#define PTE_WRITEE 0x2UL +#define PTE_READE 0x1UL +#define PTE_RWE 0x3UL +#define PTE_FLAGS_MASK 0x8000000000000133UL +#define PAGE_8G_OFFSET_MASK ((1UL << PAGE_8G_SHIFT) - 1) +#define PAGE_512M_OFFSET_MASK ((1UL << PAGE_512M_SHIFT) - 1) +#define PAGE_8M_OFFSET_MASK ((1UL << PAGE_8M_SHIFT) - 1) + +/* IOMMU Exceptional Status */ +enum exceptype { + DTE_LEVEL1 = 0x0, + DTE_LEVEL2, + PTE_LEVEL1, + PTE_LEVEL2, + PTE_LEVEL3, + UNAUTHORIZED_ACCESS, + ILLEGAL_RESPONSE, + DTE_LEVEL1_VAL, + DTE_LEVEL2_VAL, + PTE_LEVEL1_VAL, + PTE_LEVEL2_VAL, + PTE_LEVEL3_VAL, +}; + +u64 iommu_enable_cmd; /* default IOMMU boot param: 0 */ + +unsigned long *sunway_iommu_domain_bitmap; + +static DEFINE_SPINLOCK(domain_bitmap_lock); +static DEFINE_SPINLOCK(sunway_iommu_device_table_lock); +spinlock_t sunway_domain_lock; + +static LLIST_HEAD(dev_data_list); +LIST_HEAD(sunway_domain_list); + +struct dma_domain { + struct sunway_iommu_domain sdomain; + struct iova_domain iovad; +}; +const struct iommu_ops sunway_iommu_ops; +static const struct dma_map_ops sunway_dma_ops; + + +/* flush helpers */ +static void piu_flush_all(struct pci_controller *hose) +{ + write_piu_ior0(hose->node, hose->index, DTLB_FLUSHALL, 0); + write_piu_ior0(hose->node, hose->index, PTLB_FLUSHALL, 0); + write_piu_ior0(hose->node, hose->index, PCACHE_FLUSHALL, 0); +} + +void flush_pcache_by_addr(struct sunway_iommu_domain *sdomain, unsigned long flush_addr) +{ + struct pci_controller *hose; + struct sunway_iommu_dev *sdev; + + list_for_each_entry(sdev, &sdomain->dev_list, list) { + hose = pci_bus_to_pci_controller(sdev->pdev->bus); + + flush_addr = __pa(flush_addr); + /* Set memory bar here */ + mb(); + write_piu_ior0(hose->node, hose->index, + PCACHE_FLUSHPADDR, flush_addr); + } +} + +void flush_ptlb_by_addr(struct sunway_iommu_domain *sdomain, unsigned long flush_addr) +{ + struct pci_controller *hose; + struct sunway_iommu_dev *sdev; + struct pci_dev *pdev; + + list_for_each_entry(sdev, &sdomain->dev_list, list) { + pdev = sdev->pdev; + hose = pci_bus_to_pci_controller(pdev->bus); + + flush_addr = (pdev->bus->number << 8) + | pdev->devfn | (flush_addr << 16); + write_piu_ior0(hose->node, hose->index, + PTLB_FLUSHVADDR, flush_addr); + } +} + +/* domain helpers */ +static struct sunway_iommu_domain *to_sunway_domain(struct iommu_domain *dom) +{ + return container_of(dom, struct sunway_iommu_domain, domain); +} + +static struct dma_domain *to_dma_domain(struct sunway_iommu_domain *sdomain) +{ + return container_of(sdomain, struct dma_domain, sdomain); +} + +static void add_domain_to_list(struct sunway_iommu_domain *sdomain) +{ + unsigned long flags; + + spin_lock_irqsave(&sunway_domain_lock, flags); + list_add(&sdomain->list, &sunway_domain_list); + spin_unlock_irqrestore(&sunway_domain_lock, flags); +} + +static void del_domain_from_list(struct sunway_iommu_domain *sdomain) +{ + unsigned long flags; + + spin_lock_irqsave(&sunway_domain_lock, flags); + list_del(&sdomain->list); + spin_unlock_irqrestore(&sunway_domain_lock, flags); +} + +static void free_pagetable(struct sunway_iommu_domain *sdomain) +{ + unsigned long *l2_pte, *l3_pte; + unsigned long l2_pte_val, l3_pte_val; + int l2_index, l3_index, ptes_one_page; + + l2_pte = sdomain->pt_root; + if (!l2_pte) + return; + + ptes_one_page = PAGE_SIZE/sizeof(unsigned long); + for (l2_index = 0; l2_index < ptes_one_page; l2_index++, l2_pte++) { + l2_pte_val = *l2_pte; + if ((l2_pte_val & SW64_IOMMU_ENTRY_VALID) == 0) + continue; + + l2_pte_val &= ~(SW64_IOMMU_ENTRY_VALID) & PAGE_MASK; + l2_pte_val |= PAGE_OFFSET; + l3_pte = (unsigned long *)l2_pte_val; + for (l3_index = 0; l3_index < ptes_one_page; l3_index++, l3_pte++) { + l3_pte_val = *l3_pte; + if ((l3_pte_val & SW64_IOMMU_ENTRY_VALID) == 0) + continue; + + l3_pte_val &= ~(SW64_IOMMU_ENTRY_VALID) & PAGE_MASK; + l3_pte_val |= PAGE_OFFSET; + free_page(l3_pte_val); + } + free_page(l2_pte_val); + } + + free_page((unsigned long)sdomain->pt_root); +} + +static void domain_id_free(int id) +{ + spin_lock(&domain_bitmap_lock); + if (id > 0) + __clear_bit(id, sunway_iommu_domain_bitmap); + spin_unlock(&domain_bitmap_lock); +} + +static void dma_domain_free(struct dma_domain *dma_dom) +{ + if (!dma_dom) + return; + + del_domain_from_list(&dma_dom->sdomain); + put_iova_domain(&dma_dom->iovad); + free_pagetable(&dma_dom->sdomain); + if (dma_dom->sdomain.id) + domain_id_free(dma_dom->sdomain.id); + + kfree(dma_dom); +} + +static void sunway_domain_free(struct sunway_iommu_domain *sdomain) +{ + if (!sdomain) + return; + + del_domain_from_list(sdomain); + if (sdomain->id) + domain_id_free(sdomain->id); + + kfree(sdomain); +} + +static u16 sunway_domain_id_alloc(void) +{ + int id; + + spin_lock(&domain_bitmap_lock); + id = find_first_zero_bit(sunway_iommu_domain_bitmap, MAX_DOMAIN_NUM); + if (id > 0 && id < MAX_DOMAIN_NUM) + __set_bit(id, sunway_iommu_domain_bitmap); + else + id = 0; + spin_unlock(&domain_bitmap_lock); + + return id; +} + +static int sunway_domain_init(struct sunway_iommu_domain *sdomain) +{ + spin_lock_init(&sdomain->lock); + mutex_init(&sdomain->api_lock); + sdomain->id = sunway_domain_id_alloc(); + if (!sdomain->id) + return -ENOMEM; + INIT_LIST_HEAD(&sdomain->dev_list); + + return 1; +} + +static struct sunway_iommu_domain *sunway_domain_alloc(void) +{ + struct sunway_iommu_domain *sdomain; + + sdomain = kzalloc(sizeof(struct sunway_iommu_domain), GFP_KERNEL); + if (!sdomain) + return NULL; + + if (!sunway_domain_init(sdomain)) { + kfree(sdomain); + return NULL; + } + + add_domain_to_list(sdomain); + return sdomain; +} + +static struct dma_domain *dma_domain_alloc(void) +{ + struct dma_domain *dma_dom; + struct page; + + dma_dom = kzalloc(sizeof(struct dma_domain), GFP_KERNEL); + if (!dma_dom) + return NULL; + + sunway_domain_init(&dma_dom->sdomain); + dma_dom->sdomain.type = IOMMU_DOMAIN_DMA; + init_iova_domain(&dma_dom->iovad, PAGE_SIZE, IOVA_PFN(SW64_DMA_START)); + reserve_iova(&dma_dom->iovad, (0xe0000000UL >> PAGE_SHIFT), (0x100000000UL >> PAGE_SHIFT)); + + add_domain_to_list(&dma_dom->sdomain); + + return dma_dom; +} + +static void device_flush_all(struct sunway_iommu_dev *sdata) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(sdata->pdev->bus); + + if (hose == NULL) + return; + + write_piu_ior0(hose->node, hose->index, DTLB_FLUSHDEV, sdata->devid); + write_piu_ior0(hose->node, hose->index, PTLB_FLUSHDEV, sdata->devid); + write_piu_ior0(hose->node, hose->index, PCACHE_FLUSHDEV, sdata->devid); +} + +/* iommu_ops device attach/unattach helpers */ +static void +set_dte_entry(struct sunway_iommu_dev *sdev, struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu *iommu; + struct pci_dev *pdev; + struct page *dt_page, *pt_page; + unsigned long *dte_l1, *dte_l2; + unsigned long dte_l1_val, dte_l2_base, dte_l2_val; + + pdev = sdev->pdev; + if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) + return; + + sdev->devid = PCI_DEVID(pdev->bus->number, pdev->devfn); + iommu = sdev->iommu; + dte_l1 = iommu->iommu_dtbr + (pdev->bus->number); + dte_l1_val = *dte_l1; + + if (!dte_l1_val) { + /* Alloc a new level-2 device table page */ + dt_page = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, + get_order(PAGE_SIZE)); + if (!dt_page) { + pr_err("Allocating a new level-2 device table page failed.\n"); + return; + } + + dte_l2_base = (unsigned long)page_address(dt_page); + dte_l1_val = (__pa(dte_l2_base) & PAGE_MASK) | SW64_IOMMU_ENTRY_VALID; + *dte_l1 = dte_l1_val; + } + + if (!sdomain->pt_root) { + pt_page = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, 0); + if (!pt_page) { + pr_err("Allocating pt_root failed!\n"); + return; + } + + sdomain->pt_root = page_address(pt_page); + } + + dte_l2 = __va(dte_l1_val & ~(SW64_IOMMU_ENTRY_VALID) & PAGE_MASK) + (pdev->devfn << 3); + dte_l2_val = (__pa(sdomain->pt_root) & PAGE_MASK) | SW64_IOMMU_ENTRY_VALID; + if (sdomain->type == IOMMU_DOMAIN_IDENTITY) { + dte_l2_val |= 0x1; + sdev->passthrough = IDENTMAP_ALL; + } + *dte_l2 = dte_l2_val; + device_flush_all(sdev); +} + +static void +do_attach(struct sunway_iommu_dev *sdev_data, struct sunway_iommu_domain *sdomain) +{ + sdev_data->domain = sdomain; + list_add(&sdev_data->list, &sdomain->dev_list); + + sdomain->dev_cnt++; + set_dte_entry(sdev_data, sdomain); + + pr_debug("iommu: device %d add to domain: %d\n", + sdev_data->devid, sdomain->id); +} + +static void do_detach(struct sunway_iommu_dev *sdev_data) +{ + struct sunway_iommu_domain *sdomain = sdev_data->domain; + + sdev_data->domain = NULL; + list_del(&sdev_data->list); + device_flush_all(sdev_data); + + sdomain->dev_cnt--; + pr_debug("iommu: device %d detached from domain %d\n", + sdev_data->devid, sdomain->id); +} + +static int +__attach_device(struct sunway_iommu_dev *sdev_data, struct sunway_iommu_domain *sdomain) +{ + int ret; + + spin_lock(&sdomain->lock); + ret = -EBUSY; + if (sdev_data->domain != NULL) + goto out_unlock; + + do_attach(sdev_data, sdomain); + ret = 0; + +out_unlock: + spin_unlock(&sdomain->lock); + return ret; +} + +static void __detach_device(struct sunway_iommu_dev *sunway_dev_data) +{ + struct sunway_iommu_domain *domain; + + domain = sunway_dev_data->domain; + + spin_lock(&domain->lock); + do_detach(sunway_dev_data); + spin_unlock(&domain->lock); +} + +static int attach_device(struct device *dev, struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu_dev *sdev; + unsigned long flags; + int ret; + + sdev = dev_iommu_priv_get(dev); + + spin_lock_irqsave(&sunway_iommu_device_table_lock, flags); + ret = __attach_device(sdev, sdomain); + spin_unlock_irqrestore(&sunway_iommu_device_table_lock, flags); + + return ret; +} + +static void detach_device(struct device *dev) +{ + struct sunway_iommu_domain *sunway_domain; + struct sunway_iommu_dev *sdev; + unsigned long flags; + + sdev = dev_iommu_priv_get(dev); + sunway_domain = sdev->domain; + + if (WARN_ON(!sdev->domain)) + return; + + spin_lock_irqsave(&sunway_iommu_device_table_lock, flags); + __detach_device(sdev); + spin_unlock_irqrestore(&sunway_iommu_device_table_lock, flags); + + if (!dev_is_pci(dev)) + return; +} + +static struct sunway_iommu_dev *search_dev_data(u16 devid) +{ + struct sunway_iommu_dev *sdev_data; + struct llist_node *node; + + if (llist_empty(&dev_data_list)) + return NULL; + + node = dev_data_list.first; + llist_for_each_entry(sdev_data, node, dev_data_list) { + if (sdev_data->devid == devid) + return sdev_data; + } + + return NULL; +} + +/* dma_ops helpers*/ +static struct sunway_iommu_domain *get_sunway_domain(struct device *dev) +{ + struct sunway_iommu_domain *sdomain; + struct iommu_domain *domain; + struct pci_dev *pdev; + struct sunway_iommu_dev *sdev; + + pdev = to_pci_dev(dev); + if (!pdev) + return ERR_PTR(-ENODEV); + + sdev = dev_iommu_priv_get(dev); + sdomain = sdev->domain; + if (sdomain == NULL) { + domain = iommu_get_domain_for_dev(dev); + sdomain = to_sunway_domain(domain); + attach_device(dev, sdomain); + } + + if (sdomain == NULL) + return ERR_PTR(-EBUSY); + + return sdomain; +} + +/********************************************************************** + * + * Following functions describe IOMMU init ops + * + **********************************************************************/ + +static struct sunway_iommu *sunway_iommu_early_init(struct pci_controller *hose) +{ + struct sunway_iommu *iommu; + struct page *page; + unsigned long base; + + hose->pci_iommu = kzalloc(sizeof(struct sunway_iommu), GFP_KERNEL); + if (!hose->pci_iommu) + return 0; + + iommu = hose->pci_iommu; + spin_lock_init(&iommu->dt_lock); + + iommu->node = hose->node; + if (!node_online(hose->node)) + iommu->node = -1; + + page = alloc_pages_node(iommu->node, __GFP_ZERO, get_order(PAGE_SIZE)); + if (!page) { + pr_err("Allocating a new iommu_dtbr page failed.\n"); + kfree(hose->pci_iommu); + return NULL; + } + iommu->iommu_dtbr = page_address(page); + + iommu->hose_pt = hose; + iommu->index = hose->index; + + iommu->enabled = true; + + base = __pa(iommu->iommu_dtbr) & PAGE_MASK; + write_piu_ior0(hose->node, hose->index, DTBASEADDR, base); + + return iommu; +} + +unsigned long fetch_dte(struct sunway_iommu *iommu, unsigned long devid, + enum exceptype type) +{ + unsigned long *dte_l1, *dte_l2; + unsigned long dte_l1_val, dte_l2_val; + + if (!iommu) + return 0; + dte_l1 = iommu->iommu_dtbr + (devid >> 8); + if (type == DTE_LEVEL1) + return (unsigned long)dte_l1; + + dte_l1_val = *dte_l1; + if (type == DTE_LEVEL1_VAL) + return dte_l1_val; + + dte_l1_val &= (~(SW64_IOMMU_ENTRY_VALID)) & (PAGE_MASK); + dte_l1_val |= PAGE_OFFSET; + dte_l2 = (unsigned long *)(dte_l1_val + ((devid & 0xff) << 3)); + if (type == DTE_LEVEL2) + return (unsigned long)dte_l2; + + dte_l2_val = *dte_l2; + if (type == DTE_LEVEL2_VAL) + return dte_l2_val; + + return dte_l2_val; +} + +unsigned long fetch_pte(struct sunway_iommu_domain *sdomain, dma_addr_t iova, + enum exceptype type) +{ + unsigned long iova_pfn; + unsigned long pte_l1_val, pte_l2_val, pte_l3_val; + unsigned long *pte_l1, *pte_l2, *pte_l3; + unsigned long pte_root; + unsigned long offset; + + if (!sdomain) + return -EINVAL; + + pte_root = __pa(sdomain->pt_root) & PAGE_MASK; + iova_pfn = iova >> PAGE_SHIFT; + pte_root = ((pte_root) & (~(SW64_IOMMU_ENTRY_VALID)) & (PAGE_MASK)); + pte_root |= PAGE_OFFSET; + offset = ((iova_pfn >> 20) & SW64_IOMMU_LEVEL1_OFFSET) << 3; + pte_l1 = (unsigned long *)(pte_root + offset); + if (type == PTE_LEVEL1) + return (unsigned long)pte_l1; + + pte_l1_val = *pte_l1; + if (type == PTE_LEVEL1_VAL) + return pte_l1_val; + + pte_l1_val &= (~(SW64_IOMMU_ENTRY_VALID)) & (PAGE_MASK); + pte_l1_val |= PAGE_OFFSET; + offset = ((iova_pfn >> 10) & SW64_IOMMU_LEVEL2_OFFSET) << 3; + pte_l2 = (unsigned long *)(pte_l1_val + offset); + + if (type == PTE_LEVEL2) + return (unsigned long)pte_l2; + + pte_l2_val = *pte_l2; + if (type == PTE_LEVEL2_VAL) + return pte_l2_val; + + pte_l2_val &= (~(SW64_IOMMU_ENTRY_VALID)) & (PAGE_MASK); + pte_l2_val |= PAGE_OFFSET; + offset = (iova_pfn & SW64_IOMMU_LEVEL3_OFFSET) << 3; + pte_l3 = (unsigned long *)(pte_l2_val + offset); + if (type == PTE_LEVEL3) + return (unsigned long)pte_l3; + + pte_l3_val = *pte_l3; + if (type == PTE_LEVEL3_VAL) + return pte_l3_val; + + return pte_l3_val; +} + +/* IOMMU Interrupt handle */ +irqreturn_t iommu_interrupt(int irq, void *dev) +{ + struct pci_controller *hose = (struct pci_controller *)dev; + struct sunway_iommu_domain *sdomain; + struct sunway_iommu_dev *sdev; + unsigned long iommu_status; + unsigned long type; + unsigned long devid, dva; + + iommu_status = read_piu_ior0(hose->node, hose->index, IOMMUEXCPT_STATUS); + if (!(iommu_status >> 63)) + return IRQ_NONE; + + type = (iommu_status >> 58) & 0xf; + devid = (iommu_status >> 36) & 0xffff; + dva = ((iommu_status & 0xffffffff) >> 3) << 13; + pr_info("%s, iommu_status = %#lx, devid %#lx, dva %#lx, ", + __func__, iommu_status, devid, dva); + + sdev = search_dev_data(devid); + if (sdev == NULL) { + pr_info("no such dev!!!\n"); + + iommu_status &= ~(1UL << 62); + write_piu_ior0(hose->node, hose->index, + IOMMUEXCPT_STATUS, iommu_status); + + return IRQ_HANDLED; + } + + sdomain = sdev->domain; + switch (type) { + case DTE_LEVEL1: + pr_info("invalid level1 dte, addr:%#lx, val:%#lx\n", + fetch_dte(hose->pci_iommu, devid, DTE_LEVEL1), + fetch_dte(hose->pci_iommu, devid, DTE_LEVEL1_VAL)); + break; + case DTE_LEVEL2: + pr_info("invalid level2 dte, addr:%#lx, val:%#lx\n", + fetch_dte(hose->pci_iommu, devid, DTE_LEVEL2), + fetch_dte(hose->pci_iommu, devid, DTE_LEVEL2_VAL)); + break; + case PTE_LEVEL1: + pr_info("invalid level1 pte, addr: %#lx, val:%#lx\n", + fetch_pte(sdomain, dva, PTE_LEVEL1), + fetch_pte(sdomain, dva, PTE_LEVEL1_VAL)); + + iommu_status &= ~(1UL << 62); + write_piu_ior0(hose->node, hose->index, + IOMMUEXCPT_STATUS, iommu_status); + break; + case PTE_LEVEL2: + pr_info("invalid level2 pte, addr: %#lx, val: %#lx\n", + fetch_pte(sdomain, dva, PTE_LEVEL2), + fetch_pte(sdomain, dva, PTE_LEVEL2_VAL)); + + iommu_status &= ~(1UL << 62); + write_piu_ior0(hose->node, hose->index, + IOMMUEXCPT_STATUS, iommu_status); + break; + + case PTE_LEVEL3: + pr_info("invalid level3 pte, addr: %#lx, val: %#lx\n", + fetch_pte(sdomain, dva, PTE_LEVEL3), + fetch_pte(sdomain, dva, PTE_LEVEL3_VAL)); + + iommu_status &= ~(1UL << 62); + write_piu_ior0(hose->node, hose->index, + IOMMUEXCPT_STATUS, iommu_status); + break; + default: + pr_info("iommu exception type %ld\n", type); + break; + } + + return IRQ_HANDLED; +} + +struct irqaction iommu_irqaction = { + .handler = iommu_interrupt, + .flags = IRQF_SHARED | IRQF_NO_THREAD, + .name = "sunway_iommu", +}; + +void sunway_enable_iommu_func(struct pci_controller *hose) +{ + unsigned int iommu_irq, err; + unsigned long iommu_conf, iommu_ctrl; + + iommu_irq = hose->int_irq; + pr_debug("%s node %ld rc %ld iommu_irq %d\n", + __func__, hose->node, hose->index, iommu_irq); + err = request_irq(iommu_irq, iommu_interrupt, + IRQF_SHARED, "sunway_iommu", hose); + if (err < 0) + pr_info("sw iommu request irq failed!\n"); + + iommu_ctrl = (1UL << 63) | (0x100UL << 10); + write_piu_ior0(hose->node, hose->index, IOMMUEXCPT_CTRL, iommu_ctrl); + iommu_conf = read_piu_ior0(hose->node, hose->index, PIUCONFIG0); + iommu_conf = iommu_conf | (0x3 << 7); + write_piu_ior0(hose->node, hose->index, PIUCONFIG0, iommu_conf); + write_piu_ior0(hose->node, hose->index, TIMEOUT_CONFIG, 0xf); + iommu_conf = read_piu_ior0(hose->node, hose->index, PIUCONFIG0); + pr_debug("SW arch configure node %ld hose-%ld iommu_conf = %#lx\n", + hose->node, hose->index, iommu_conf); +} + +static bool is_iommu_enable(struct pci_controller *hose) +{ + u64 rc_mask = 0x1; + + rc_mask <<= (8 * hose->node + hose->index); + if (iommu_enable_cmd & rc_mask) + return true; + + return false; +} + +/* iommu cpu syscore ops */ +static int iommu_cpu_suspend(void) +{ + return 0; +} + +static void iommu_cpu_resume(void) +{ + +} + +struct syscore_ops iommu_cpu_syscore_ops = { + .suspend = iommu_cpu_suspend, + .resume = iommu_cpu_resume, +}; + +static struct iommu_domain *sunway_iommu_domain_alloc(unsigned int type); + +static int sunway_iommu_init(void) +{ + struct pci_controller *hose; + struct sunway_iommu *iommu; + int ret; + int iommu_index = 0; + + sunway_iommu_domain_bitmap = + (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + get_order(MAX_DOMAIN_NUM / 8)); + if (sunway_iommu_domain_bitmap == NULL) + return 0; + __set_bit(0, sunway_iommu_domain_bitmap); + + /* Do the loop */ + for (hose = hose_head; hose; hose = hose->next) { + if (!is_iommu_enable(hose)) { + hose->iommu_enable = false; + continue; + } + + iommu = sunway_iommu_early_init(hose); + if (!iommu) { + pr_err("Allocating sunway_iommu failed\n"); + hose->iommu_enable = false; + continue; + } + + iommu_device_sysfs_add(&iommu->iommu, NULL, NULL, "%d", + iommu_index); + iommu_device_set_ops(&iommu->iommu, &sunway_iommu_ops); + iommu_device_register(&iommu->iommu); + iommu_index++; + sunway_enable_iommu_func(hose); + hose->iommu_enable = true; + } + + ret = iova_cache_get(); + if (ret) + return ret; + + ret = bus_set_iommu(&pci_bus_type, &sunway_iommu_ops); + if (ret) + return ret; + + for (hose = hose_head; hose; hose = hose->next) + if (hose->iommu_enable) + piu_flush_all(hose); + + register_syscore_ops(&iommu_cpu_syscore_ops); + + return 1; +} +subsys_initcall_sync(sunway_iommu_init); + +/******************************************************************************* + * + * DMA OPS Functions + * + ******************************************************************************/ + +struct sunway_iommu *get_first_iommu_from_domain(struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu *iommu; + struct sunway_iommu_dev *entry; + + entry = list_first_entry(&sdomain->dev_list, struct sunway_iommu_dev, list); + iommu = entry->iommu; + + return iommu; +} + +static unsigned long +sunway_iommu_unmap_page(struct sunway_iommu_domain *sunway_domain, + unsigned long iova, unsigned long page_size) +{ + unsigned long offset, iova_pfn; + unsigned long *pte_base, *pte; + unsigned long grn; + int level, current_level; + int tmp = 1; + + pr_debug("%s iova %#lx, page_size %#lx\n", __func__, iova, page_size); + BUG_ON(!is_power_of_2(page_size)); + + switch (page_size) { + case (1UL << 33): + level = 1; + grn = PTE_GRN_8G; + break; + case (1UL << 29): + level = 2; + grn = PTE_GRN_512M; + break; + case (1UL << 23): + level = 2; + grn = PTE_GRN_8M; + break; + default: + level = 3; + break; + } + + pte_base = sunway_domain->pt_root; + iova_pfn = iova >> PAGE_SHIFT; + offset = (iova_pfn >> 20) & 0x1ff; + current_level = 1; + while (current_level <= level) { + pte = &pte_base[offset]; + if (current_level == level) { + if (grn == PTE_GRN_512M) { + int i; + + for (i = 0; i < 64; i++) { + *(pte + i) = 0; + flush_pcache_by_addr(sunway_domain, (unsigned long)pte); + } + + } else { + *pte = 0; + flush_pcache_by_addr(sunway_domain, (unsigned long)pte); + } + flush_ptlb_by_addr(sunway_domain, (iova >> PAGE_SHIFT)); + break; + } + + pte_base = (unsigned long *)((*pte & (~PTE_FLAGS_MASK)) | PAGE_OFFSET); + offset = (iova_pfn >> (tmp--) * 10) & 0x3ff; + current_level++; + } + + return page_size; +} + +int sunway_iommu_map_page(struct sunway_iommu_domain *sunway_domain, + unsigned long bus_addr, unsigned long paddr, + size_t page_size) +{ + struct page *page; + struct sunway_iommu *iommu; + unsigned long iova_pfn, pte_val; + unsigned long *pte_base, *pte; + unsigned long offset, grn = 0; + int level = 0, current_level; + int tmp = 1; + + iommu = get_first_iommu_from_domain(sunway_domain); + if (!iommu) + return -1; + iova_pfn = bus_addr >> PAGE_SHIFT; + pte_base = sunway_domain->pt_root; + + switch (page_size) { + case (1UL << 33): + level = 1; + grn = PTE_GRN_8G; + break; + case (1UL << 29): + level = 2; + grn = PTE_GRN_512M; + break; + case (1UL << 23): + grn = PTE_GRN_8M; + level = 2; + break; + default: + level = 3; + break; + } + + offset = (iova_pfn >> 20) & 0x1ff; + current_level = 1; + while (current_level <= level) { + pte = &pte_base[offset]; + + if (!(*pte) || (current_level == level)) { + pte_val = PTE_VALID | PTE_RWE | grn; + if (current_level == level) { + *(volatile u64 *)(pte) = 0; + pte_val |= ((paddr & PAGE_MASK) | LAST_STAGE); + } else { + page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0); + if (!page) { + pr_err("Allocating level%d page table pages failed.\n", (level + 1)); + return -ENOMEM; + } + + pte_val |= (page_to_phys(page) & PAGE_MASK); + } + + if ((grn == PTE_GRN_512M) && (current_level == 2)) { + int i; + + for (i = 0; i < 64; i++) { + cmpxchg64((volatile u64 *)(pte + i), 0UL, pte_val); + flush_pcache_by_addr(sunway_domain, (unsigned long)(pte + i)); + } + } else { + if (cmpxchg64((volatile u64 *)pte, 0UL, pte_val)) + free_page((unsigned long)page_address(page)); + else + flush_pcache_by_addr(sunway_domain, (unsigned long)pte); + } + } + + pte_base = (unsigned long *)__va((*pte) & (~PTE_FLAGS_MASK)); + offset = (iova_pfn >> (tmp--) * 10) & 0x3ff; + current_level++; + } + + return 0; +} + +static unsigned long +sunway_alloc_iova(struct dma_domain *dma_dom, unsigned long pages, struct pci_dev *pdev) +{ + struct device *dev; + unsigned long pfn = 0; + + pages = __roundup_pow_of_two(pages); + dev = &(pdev->dev); + if (min(dev->coherent_dma_mask, *dev->dma_mask) == DMA_BIT_MASK(32)) { + pfn = alloc_iova_fast(&dma_dom->iovad, pages, + IOVA_PFN(SW64_32BIT_DMA_LIMIT), true); + } else { + /* IOVA boundary should be 16M ~ 3.5G */ + pfn = alloc_iova_fast(&dma_dom->iovad, pages, + IOVA_PFN(SW64_64BIT_DMA_LIMIT), true); + } + + return (pfn << PAGE_SHIFT); +} + +static void sunway_free_iova(struct dma_domain *dma_dom, + unsigned long address, unsigned long pages) +{ + pages = __roundup_pow_of_two(pages); + address >>= PAGE_SHIFT; + + free_iova_fast(&dma_dom->iovad, address, pages); +} + +static dma_addr_t +__sunway_map_single(struct dma_domain *dma_dom, + struct pci_dev *pdev, phys_addr_t paddr, size_t size) +{ + dma_addr_t ret, address, start; + unsigned long npages, i; + + npages = iommu_num_pages(paddr, size, PAGE_SIZE); + + address = sunway_alloc_iova(dma_dom, npages, pdev); + if (!address) + return 0; + + start = address; + for (i = 0; i < npages; ++i) { + ret = sunway_iommu_map_page(&dma_dom->sdomain, start, + paddr, PAGE_SIZE); + if (ret) { + pr_info("error when map page.\n"); + goto out_unmap; + } + + start += PAGE_SIZE; + paddr += PAGE_SIZE; + } + + address += paddr & ~PAGE_MASK; + return address; + +out_unmap: + for (--i; i >= 0; --i) { + start -= PAGE_SIZE; + sunway_iommu_unmap_page(&dma_dom->sdomain, start, PAGE_SIZE); + } + + sunway_free_iova(dma_dom, address, npages); + return 0; +} + +static dma_addr_t +pci_iommu_map_single(struct pci_dev *pdev, + struct dma_domain *dma_dom, void *cpu_addr, size_t size) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(pdev->bus); + unsigned long paddr; + + if (hose == NULL) { + pr_err("%s: hose does not exist!\n", __func__); + return 0; + } + + paddr = __sunway_map_single(dma_dom, pdev, __pa(cpu_addr), size); + + pr_debug("pci_alloc_consistent: %zx -> [%px,%lx] from %ps\n", + size, cpu_addr, paddr, __builtin_return_address(0)); + + return paddr; +} + +static void *sunway_alloc_coherent(struct device *dev, + size_t size, + dma_addr_t *dma_addr, gfp_t gfp, + unsigned long attrs) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct pci_controller *hose; + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + struct sunway_iommu_dev *sdev; + struct page *page; + void *cpu_addr; + + if (!pdev) + return NULL; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose) + return NULL; + + gfp &= ~GFP_DMA; + +try_again: + page = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, get_order(size)); + if (!page) { + pr_err("Allocating pages failed.\n"); + return NULL; + } + + cpu_addr = page_address(page); + if (!cpu_addr) { + pr_info + ("pci_alloc_consistent: get_free_pages failed from %ps\n", + __builtin_return_address(0)); + + return NULL; + } + + *dma_addr = __pa(cpu_addr); + if (!(hose->iommu_enable)) + return cpu_addr; + + sdev = dev_iommu_priv_get(dev); + if (sdev->passthrough & DMA_MASK64) + return cpu_addr; + else if (sdev->passthrough) { + if (min(dev->coherent_dma_mask, *dev->dma_mask) > DMA_BIT_MASK(32)) { + sdev->passthrough |= DMA_MASK64; + return cpu_addr; + } + + __free_pages(page, get_order(size)); + set_dma_ops(dev, get_arch_dma_ops(dev->bus)); + return dev->dma_ops->alloc(dev, size, dma_addr, gfp, attrs); + } + + sdomain = get_sunway_domain(dev); + dma_dom = to_dma_domain(sdomain); + + *dma_addr = pci_iommu_map_single(pdev, dma_dom, cpu_addr, size); + if (*dma_addr == 0) { + free_pages((unsigned long)cpu_addr, get_order(size)); + if (gfp & GFP_DMA) + return NULL; + + gfp |= GFP_DMA; + goto try_again; + } + + return cpu_addr; +} + +static void +__sunway_unmap_single(struct dma_domain *dma_dom, dma_addr_t dma_addr, size_t size) +{ + dma_addr_t start; + unsigned long npages; + int i; + + npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); + dma_addr &= PAGE_MASK; + start = dma_addr; + + for (i = 0; i < npages; i++) { + sunway_iommu_unmap_page(&dma_dom->sdomain, start, PAGE_SIZE); + start += PAGE_SIZE; + } + + sunway_free_iova(dma_dom, dma_addr, npages); + pr_debug("pci_free_consistent: %zx -> [%llx] from %ps\n", + size, dma_addr, __builtin_return_address(0)); + +} + +static void +sunway_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_addr, unsigned long attrs) +{ + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + struct pci_dev *pdev = to_pci_dev(dev); + struct pci_controller *hose; + struct sunway_iommu_dev *sdev; + + if (!pdev) + goto out_unmap; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose || !(hose->iommu_enable)) + goto out_unmap; + + sdev = dev_iommu_priv_get(dev); + if (sdev->passthrough) + goto out_unmap; + + sdomain = get_sunway_domain(dev); + dma_dom = to_dma_domain(sdomain); + __sunway_unmap_single(dma_dom, dma_addr, size); + goto out_free; + +out_unmap: + pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); + +out_free: + pr_debug("sunway_free_consistent: [%llx,%zx] from %ps\n", + dma_addr, size, __builtin_return_address(0)); + + free_pages((unsigned long)vaddr, get_order(size)); +} + +static dma_addr_t +sunway_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + struct pci_controller *hose; + struct sunway_iommu_dev *sdev; + phys_addr_t paddr = page_to_phys(page) + offset; + + if (!pdev) + return 0; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose || !(hose->iommu_enable)) + return paddr; + + sdev = dev_iommu_priv_get(dev); + if (sdev->passthrough & DMA_MASK64) + return paddr; + else if (sdev->passthrough) { + if (min(dev->coherent_dma_mask, *dev->dma_mask) > DMA_BIT_MASK(32)) { + sdev->passthrough |= DMA_MASK64; + return paddr; + } + + set_dma_ops(dev, get_arch_dma_ops(dev->bus)); + return dev->dma_ops->map_page(dev, page, offset, size, dir, attrs); + } + + sdomain = get_sunway_domain(dev); + dma_dom = to_dma_domain(sdomain); + + return pci_iommu_map_single(pdev, dma_dom, + (char *)page_address(page) + offset, size); +} + +static void +sunway_unmap_page(struct device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + struct pci_dev *pdev; + struct pci_controller *hose; + struct sunway_iommu_dev *sdev; + + pdev = to_pci_dev(dev); + if (!pdev) + return; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (hose == NULL) + return; + + if (!hose->iommu_enable) + return; + + sdev = dev_iommu_priv_get(dev); + if (sdev->passthrough) + return; + + sdomain = get_sunway_domain(dev); + dma_dom = to_dma_domain(sdomain); + __sunway_unmap_single(dma_dom, dma_addr, size); +} + +#define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG))) +static int +sunway_map_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom = NULL; + struct scatterlist *sg; + struct pci_dev *pdev = to_pci_dev(dev); + struct pci_controller *hose; + struct sunway_iommu_dev *sdev; + int i, out_nents = 0; + + if (dir == PCI_DMA_NONE) + BUG(); + + if (!pdev) + return 0; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose) + return 0; + + sdomain = get_sunway_domain(dev); + dma_dom = to_dma_domain(sdomain); + + for_each_sg(sgl, sg, nents, i) { + BUG_ON(!sg_page(sg)); + + sg_dma_address(sg) = __pa(SG_ENT_VIRT_ADDRESS(sg)); + if (!(hose->iommu_enable)) + goto check; + + sdev = dev_iommu_priv_get(dev); + if (sdev->passthrough & DMA_MASK64) + goto check; + else if (sdev->passthrough) { + if (min(dev->coherent_dma_mask, *dev->dma_mask) > DMA_BIT_MASK(32)) { + sdev->passthrough |= DMA_MASK64; + goto check; + } + + set_dma_ops(dev, get_arch_dma_ops(dev->bus)); + return dev->dma_ops->map_sg(dev, sgl, nents, dir, attrs); + } + + sg_dma_address(sg) = + pci_iommu_map_single(pdev, dma_dom, + SG_ENT_VIRT_ADDRESS(sg), sg->length); +check: + if (sg_dma_address(sg) == 0) + goto error; + + sg_dma_len(sg) = sg->length; + out_nents++; + } + + return nents; + +error: + pr_warn("pci_map_sg failed:"); + pr_warn("could not allocate dma page tables\n"); + + if (out_nents) + pci_unmap_sg(pdev, sgl, out_nents, dir); + return 0; +} + +static void +sunway_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + struct scatterlist *sg; + struct pci_dev *pdev; + struct pci_controller *hose; + struct sunway_iommu_dev *sdev; + dma_addr_t dma_addr; + long size; + int j; + + pdev = to_pci_dev(dev); + if (!pdev) + return; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose->iommu_enable) + return; + + sdev = dev_iommu_priv_get(dev); + if (sdev->passthrough) + return; + + sdomain = get_sunway_domain(dev); + dma_dom = to_dma_domain(sdomain); + + for_each_sg(sgl, sg, nents, j) { + dma_addr = sg->dma_address; + size = sg->dma_length; + if (!size) + break; + + __sunway_unmap_single(dma_dom, dma_addr, size); + } +} + +static const struct dma_map_ops sunway_dma_ops = { + .alloc = sunway_alloc_coherent, + .free = sunway_free_coherent, + .map_sg = sunway_map_sg, + .unmap_sg = sunway_unmap_sg, + .map_page = sunway_map_page, + .unmap_page = sunway_unmap_page, + .dma_supported = dma_direct_supported, +}; + +/********************************************************************** + * + * IOMMU OPS Functions + * + **********************************************************************/ + +static struct iommu_domain *sunway_iommu_domain_alloc(unsigned int type) +{ + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + + switch (type) { + case IOMMU_DOMAIN_UNMANAGED: + sdomain = sunway_domain_alloc(); + if (!sdomain) { + pr_err("Allocating sunway_domain failed!\n"); + return NULL; + } + + sdomain->domain.geometry.aperture_start = 0UL; + sdomain->domain.geometry.aperture_end = ~0ULL; + sdomain->domain.geometry.force_aperture = true; + sdomain->type = IOMMU_DOMAIN_UNMANAGED; + break; + + case IOMMU_DOMAIN_DMA: + dma_dom = dma_domain_alloc(); + if (!dma_dom) { + pr_err("Failed to alloc dma domain!\n"); + return NULL; + } + + sdomain = &dma_dom->sdomain; + break; + + case IOMMU_DOMAIN_IDENTITY: + sdomain = sunway_domain_alloc(); + if (!sdomain) + return NULL; + + sdomain->type = IOMMU_DOMAIN_IDENTITY; + break; + + default: + return NULL; + } + + return &sdomain->domain; +} + +static void clean_domain(struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu_dev *entry; + unsigned long flags; + + spin_lock_irqsave(&sunway_iommu_device_table_lock, flags); + + while (!list_empty(&sdomain->dev_list)) { + entry = list_first_entry(&sdomain->dev_list, + struct sunway_iommu_dev, list); + + BUG_ON(!entry->domain); + __detach_device(entry); + } + + spin_unlock_irqrestore(&sunway_iommu_device_table_lock, flags); +} + +static void sunway_iommu_domain_free(struct iommu_domain *dom) +{ + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + + sdomain = to_sunway_domain(dom); + + if (sdomain->dev_cnt > 0) + clean_domain(sdomain); + + BUG_ON(sdomain->dev_cnt != 0); + + if (!dom) + return; + + switch (dom->type) { + case IOMMU_DOMAIN_DMA: + dma_dom = to_dma_domain(sdomain); + dma_domain_free(dma_dom); + break; + + default: + free_pagetable(sdomain); + sunway_domain_free(sdomain); + break; + } + +} + +static int sunway_iommu_attach_device(struct iommu_domain *dom, struct device *dev) +{ + struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); + struct sunway_iommu_dev *sdev; + struct pci_dev *pdev; + struct pci_controller *hose; + int ret; + + pdev = to_pci_dev(dev); + if (!pdev) + return -EINVAL; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose) + return -EINVAL; + + if (!hose->iommu_enable) + return -EINVAL; + + sdev = dev_iommu_priv_get(dev); + if (!sdev) + return -EINVAL; + + if (sdev->domain) + detach_device(dev); + + ret = attach_device(dev, sdomain); + + return ret; +} + +static void sunway_iommu_detach_device(struct iommu_domain *dom, struct device *dev) +{ + struct sunway_iommu_dev *sdev; + struct pci_dev *pdev = to_pci_dev(dev); + + if (!pdev) + return; + + sdev = dev_iommu_priv_get(dev); + if (sdev->domain != NULL) + detach_device(dev); +} + +static phys_addr_t +sunway_iommu_iova_to_phys(struct iommu_domain *dom, dma_addr_t iova) +{ + struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); + unsigned long paddr, grn; + unsigned long is_last; + + if (iova > SW64_BAR_ADDRESS) + return iova; + + paddr = fetch_pte(sdomain, iova, PTE_LEVEL1_VAL); + if ((paddr & SW64_IOMMU_ENTRY_VALID) == 0) + return 0; + + is_last = paddr & SW64_PTE_LAST_MASK; + grn = paddr & SW64_PTE_GRN_MASK; + if (is_last) { + if (grn == PTE_GRN_8G) { + paddr &= ~PTE_FLAGS_MASK; + paddr += iova & PAGE_8G_OFFSET_MASK; + return paddr; + } + + return 0; + } + + paddr = fetch_pte(sdomain, iova, PTE_LEVEL2_VAL); + if ((paddr & SW64_IOMMU_ENTRY_VALID) == 0) + return 0; + + is_last = paddr & SW64_PTE_LAST_MASK; + grn = paddr & SW64_PTE_GRN_MASK; + if (is_last) { + if (grn == PTE_GRN_512M) { + paddr &= ~PTE_FLAGS_MASK; + paddr += iova & PAGE_512M_OFFSET_MASK; + return paddr; + } + + if (grn == PTE_GRN_8M) { + paddr &= ~PTE_FLAGS_MASK; + paddr += iova & PAGE_8M_OFFSET_MASK; + return paddr; + } + + return 0; + } + + paddr = fetch_pte(sdomain, iova, PTE_LEVEL3_VAL); + if ((paddr & SW64_IOMMU_ENTRY_VALID) == 0) + return 0; + + grn = paddr & SW64_PTE_GRN_MASK; + if (grn != 0) + return 0; + + paddr &= ~PTE_FLAGS_MASK; + paddr += iova & PAGE_MASK; + return paddr; +} + +static int +sunway_iommu_map(struct iommu_domain *dom, unsigned long iova, + phys_addr_t paddr, size_t page_size, int iommu_prot, gfp_t gfp) +{ + struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); + int ret; + + /* + * As VFIO cannot distinguish between normal DMA request + * and pci device BAR, check should be introduced manually + * to avoid VFIO trying to map pci config space. + */ + if (iova > SW64_BAR_ADDRESS) + return 0; + + mutex_lock(&sdomain->api_lock); + ret = sunway_iommu_map_page(sdomain, iova, paddr, page_size); + mutex_unlock(&sdomain->api_lock); + + return ret; +} + +static size_t +sunway_iommu_unmap(struct iommu_domain *dom, unsigned long iova, + size_t page_size, + struct iommu_iotlb_gather *gather) +{ + struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); + size_t unmap_size; + + if (iova > SW64_BAR_ADDRESS) + return page_size; + + mutex_lock(&sdomain->api_lock); + unmap_size = sunway_iommu_unmap_page(sdomain, iova, page_size); + mutex_unlock(&sdomain->api_lock); + + return unmap_size; +} + +static struct iommu_group *sunway_iommu_device_group(struct device *dev) +{ + return pci_device_group(dev); +} + +static void iommu_uninit_device(struct device *dev) +{ + struct sunway_iommu_dev *sdev; + + sdev = dev_iommu_priv_get(dev); + if (!sdev) + return; + + if (sdev->domain) + detach_device(dev); + + dev_iommu_priv_set(dev, NULL); +} + +static void sunway_iommu_release_device(struct device *dev) +{ + struct pci_dev *pdev; + struct pci_controller *hose; + + pdev = to_pci_dev(dev); + if (!pdev) + return; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose->iommu_enable) + return; + + iommu_uninit_device(dev); +} + +static int iommu_init_device(struct device *dev) +{ + struct sunway_iommu_dev *sdev; + struct sunway_iommu *iommu; + struct pci_dev *pdev; + struct pci_controller *hose; + + if (dev_iommu_priv_get(dev)) + return 0; + + sdev = kzalloc(sizeof(struct sunway_iommu_dev), GFP_KERNEL); + if (!sdev) + return -ENOMEM; + + pdev = to_pci_dev(dev); + hose = pci_bus_to_pci_controller(pdev->bus); + iommu = hose->pci_iommu; + llist_add(&sdev->dev_data_list, &dev_data_list); + sdev->pdev = pdev; + sdev->iommu = iommu; + + dev_iommu_priv_set(dev, sdev); + + return 0; +} + +static struct iommu_device *sunway_iommu_probe_device(struct device *dev) +{ + struct pci_dev *pdev; + struct pci_controller *hose; + struct sunway_iommu *iommu; + int ret; + + pdev = to_pci_dev(dev); + if (!pdev) + return ERR_PTR(-ENODEV); + + if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) + return ERR_PTR(-ENODEV); + + if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) + return ERR_PTR(-ENODEV); + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose) + return ERR_PTR(-ENODEV); + + if (!hose->iommu_enable) + return ERR_PTR(-ENODEV); + + if (dev_iommu_priv_get(dev)) { + iommu = hose->pci_iommu; + return &iommu->iommu; + } + + ret = iommu_init_device(dev); + if (ret) + return ERR_PTR(ret); + + iommu = hose->pci_iommu; + + return &iommu->iommu; +} + +static int sunway_iommu_def_domain_type(struct device *dev) +{ + struct sunway_iommu_dev *sdev; + + sdev = dev_iommu_priv_get(dev); + if (sdev->domain) + return 0; + + return sdev->domain->type; +} + +static bool sunway_iommu_capable(enum iommu_cap cap) +{ + switch (cap) { + case IOMMU_CAP_INTR_REMAP: + return true; + default: + return false; + } +} + +static void sunway_iommu_probe_finalize(struct device *dev) +{ + struct iommu_domain *domain; + + domain = iommu_get_domain_for_dev(dev); + if (domain) + set_dma_ops(dev, &sunway_dma_ops); +} + +const struct iommu_ops sunway_iommu_ops = { + .capable = sunway_iommu_capable, + .domain_alloc = sunway_iommu_domain_alloc, + .domain_free = sunway_iommu_domain_free, + .attach_dev = sunway_iommu_attach_device, + .detach_dev = sunway_iommu_detach_device, + .probe_device = sunway_iommu_probe_device, + .probe_finalize = sunway_iommu_probe_finalize, + .release_device = sunway_iommu_release_device, + .map = sunway_iommu_map, + .unmap = sunway_iommu_unmap, + .iova_to_phys = sunway_iommu_iova_to_phys, + .device_group = sunway_iommu_device_group, + .pgsize_bitmap = SW64_IOMMU_PGSIZES, + .def_domain_type = sunway_iommu_def_domain_type, +}; + +/***************************************************************************** + * + * Boot param handle + * Each bit of iommu_enable bitmap represents an rc enable, and every 8 bits + * represents one cpu node. For example, iommu_enable=0x0100 means enabling + * rc0 for cpu node 1. + * + *****************************************************************************/ +static int __init iommu_enable_setup(char *str) +{ + int ret; + unsigned long rc_bitmap = 0xffffffffUL; + + ret = kstrtoul(str, 16, &rc_bitmap); + iommu_enable_cmd = rc_bitmap; + + return ret; +} +__setup("iommu_enable=", iommu_enable_setup); diff --git a/drivers/iommu/sw64/sunway_iommu.h b/drivers/iommu/sw64/sunway_iommu.h new file mode 100644 index 000000000000..94a155001d1b --- /dev/null +++ b/drivers/iommu/sw64/sunway_iommu.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This file contains declarations and inline functions for interfacing + * with the PCI initialization routines. + */ +#include +#include +#include +#include + +struct sunway_iommu_bypass_id { + unsigned int vendor; + unsigned int device; +}; + +struct sunway_iommu { + int index; + bool enabled; + unsigned long *iommu_dtbr; + spinlock_t dt_lock; /* Device Table Lock */ + int node; /* NUMA node */ + + struct pci_controller *hose_pt; + struct iommu_device iommu; /* IOMMU core code handle */ +}; + +struct sunway_iommu_dev { + struct list_head list; /* For domain->dev_list */ + struct llist_node dev_data_list; /* Global device list */ + u16 devid; + int alias; + unsigned int passthrough; + struct sunway_iommu *iommu; + struct pci_dev *pdev; + + spinlock_t lock; /* Lock the page table mainly */ + struct sunway_iommu_domain *domain; /* Domain device is bound to */ +}; + +struct sunway_iommu_domain { + unsigned int type; + spinlock_t lock; + struct mutex api_lock; + u16 id; /* Domain ID */ + struct list_head list; /* For list of all SW domains */ + struct list_head dev_list; /* List of devices in this domain */ + struct iommu_domain domain; /* IOMMU domain handle */ + unsigned long *pt_root; /* Page Table root */ + unsigned int dev_cnt; /* Number of devices in this domain */ +}; + +struct sw64dev_table_entry { + u64 data; +}; + +struct sunway_iommu_group { + struct pci_dev *dev; + struct iommu_group *group; +}; + +#define SW64_IOMMU_ENTRY_VALID ((1UL) << 63) +#define SW64_PTE_LAST_MASK ((1UL) << 8) /*last stage valid*/ +#define SW64_DMA_START 0x1000000 +#define SW64_PTE_GRN_MASK ((0x3UL) << 4) +#define PAGE_8M_SHIFT 23 +#define PAGE_512M_SHIFT 29 +#define PAGE_8G_SHIFT 33 +#define SW64_IOMMU_ENABLE 3 +#define SW64_IOMMU_DISABLE 0 +#define SW64_IOMMU_LEVEL1_OFFSET 0x1ff +#define SW64_IOMMU_LEVEL2_OFFSET 0x3ff +#define SW64_IOMMU_LEVEL3_OFFSET 0x3ff +#define SW64_IOMMU_BYPASS 0x1 +#define SW64_IOMMU_MAP_FLAG ((0x1UL) << 20) + +#define PAGE_SHIFT_IOMMU 18 +#define PAGE_SIZE_IOMMU (_AC(1, UL) << PAGE_SHIFT_IOMMU) + +#define PCACHE_FLUSHPADDR_MASK 0xffffffffff80UL -- Gitee From 77b7a42bc7d5010f1cdbafbd6d9a307625d448c8 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:48:45 +0800 Subject: [PATCH 0345/2138] anolis: drivers: irqchip: add sw64 support ANBZ: #4688 Add irqchip drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/irqchip/Kconfig | 33 ++ drivers/irqchip/Makefile | 11 + drivers/irqchip/irq-sunway-cpu.c | 213 ++++++++++++ drivers/irqchip/irq-sunway-msi-v2.c | 512 ++++++++++++++++++++++++++++ drivers/irqchip/irq-sunway-msi-vt.c | 280 +++++++++++++++ drivers/irqchip/irq-sunway-msi.c | 472 +++++++++++++++++++++++++ drivers/irqchip/irq-sw64-intc-v2.c | 89 +++++ drivers/irqchip/irq-sw64-lpc-intc.c | 137 ++++++++ 8 files changed, 1747 insertions(+) create mode 100644 drivers/irqchip/irq-sunway-cpu.c create mode 100644 drivers/irqchip/irq-sunway-msi-v2.c create mode 100644 drivers/irqchip/irq-sunway-msi-vt.c create mode 100644 drivers/irqchip/irq-sunway-msi.c create mode 100644 drivers/irqchip/irq-sw64-intc-v2.c create mode 100644 drivers/irqchip/irq-sw64-lpc-intc.c diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index e7b736800dd0..3d506b42f31b 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -11,6 +11,39 @@ config ARM_GIC select IRQ_DOMAIN_HIERARCHY select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP +config SW64_INTC_V2 + bool "SW64 Interrupt Controller V2" + depends on UNCORE_XUELANG + default y + select GENERIC_IRQ_CHIP + select IRQ_DOMAIN + help + This enables support for the INTC chip found in SW CHIP3 systems. + The INTC controls devices interrupts and connects them to each + core's local interrupt controller. + +config SW64_LPC_INTC + bool "SW64 cpu builtin LPC Interrupt Controller" + depends on SW64_INTC_V2 + help + Say yes here to add support for the SW64 cpu builtin LPC + IRQ controller. + +config SW64_IRQ_CPU + bool + depends on SW64 + default y + +config SW64_IRQ_MSI + bool + depends on SW64 && PCI_MSI + default y + +config SW64_IRQ_MSI_VT + bool + depends on SW64_IRQ_MSI + default y + config ARM_GIC_PM bool depends on PM diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index ffd945fe71aa..466eb0bd2b52 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -27,6 +27,17 @@ obj-$(CONFIG_SUN6I_R_INTC) += irq-sun6i-r.o obj-$(CONFIG_SUNXI_NMI_INTC) += irq-sunxi-nmi.o obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o +obj-$(CONFIG_SW64_INTC_V2) += irq-sw64-intc-v2.o +obj-$(CONFIG_SW64_LPC_INTC) += irq-sw64-lpc-intc.o +obj-$(CONFIG_SW64_IRQ_CPU) += irq-sunway-cpu.o + +ifeq ($(CONFIG_UNCORE_XUELANG),y) +obj-$(CONFIG_SW64_IRQ_MSI) += irq-sunway-msi.o +else +obj-$(CONFIG_SW64_IRQ_MSI) += irq-sunway-msi-v2.o +endif + +obj-$(CONFIG_SW64_IRQ_MSI_VT) += irq-sunway-msi-vt.o obj-$(CONFIG_ARM_GIC_PM) += irq-gic-pm.o obj-$(CONFIG_ARCH_REALVIEW) += irq-gic-realview.o obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o diff --git a/drivers/irqchip/irq-sunway-cpu.c b/drivers/irqchip/irq-sunway-cpu.c new file mode 100644 index 000000000000..ff7455c0f3ec --- /dev/null +++ b/drivers/irqchip/irq-sunway-cpu.c @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +#include +#include +#include +#include + +static void handle_intx(unsigned int offset) +{ + struct pci_controller *hose; + unsigned long value; + + hose = hose_head; + for (hose = hose_head; hose; hose = hose->next) { + value = read_piu_ior0(hose->node, hose->index, INTACONFIG + (offset << 7)); + if (value >> 63) { + value = value & (~(1UL << 62)); + write_piu_ior0(hose->node, hose->index, INTACONFIG + (offset << 7), value); + handle_irq(hose->int_irq); + value = value | (1UL << 62); + write_piu_ior0(hose->node, hose->index, INTACONFIG + (offset << 7), value); + } + + if (IS_ENABLED(CONFIG_PCIE_PME)) { + value = read_piu_ior0(hose->node, hose->index, PMEINTCONFIG); + if (value >> 63) { + handle_irq(hose->service_irq); + write_piu_ior0(hose->node, hose->index, PMEINTCONFIG, value); + } + } + + if (IS_ENABLED(CONFIG_PCIEAER)) { + value = read_piu_ior0(hose->node, hose->index, AERERRINTCONFIG); + if (value >> 63) { + handle_irq(hose->service_irq); + write_piu_ior0(hose->node, hose->index, AERERRINTCONFIG, value); + } + } + + if (hose->iommu_enable) { + value = read_piu_ior0(hose->node, hose->index, IOMMUEXCPT_STATUS); + if (value >> 63) + handle_irq(hose->int_irq); + } + } +} + +static void handle_device_interrupt(unsigned long irq_info) +{ + unsigned int i; + + if (is_guest_or_emul()) { + handle_irq(irq_info); + return; + } + + for (i = 0; i < 4; i++) { + if ((irq_info >> i) & 0x1) + handle_intx(i); + } +} + +/* Performance counter hook. A module can override this to do something useful. */ +static void dummy_perf(unsigned long vector, struct pt_regs *regs) +{ + irq_err_count++; + pr_crit("Performance counter interrupt!\n"); +} + +void (*perf_irq)(unsigned long vector, struct pt_regs *regs) = dummy_perf; +EXPORT_SYMBOL(perf_irq); + +static void handle_fault_int(void) +{ + int node; + unsigned long value; + + node = __this_cpu_read(hard_node_id); + pr_info("enter fault int, si_fault_stat = %#lx\n", + sw64_io_read(node, SI_FAULT_STAT)); + sw64_io_write(node, SI_FAULT_INT_EN, 0); + sw64_io_write(node, DLI_RLTD_FAULT_INTEN, 0); +#if defined(CONFIG_UNCORE_XUELANG) + value = 0; +#elif defined(CONFIG_UNCORE_JUNZHANG) + value = sw64_io_read(node, FAULT_INT_CONFIG); + value |= (1 << 8); +#endif + __io_write_fault_int_en(node, value); +} + +static void handle_mt_int(void) +{ + pr_info("enter mt int\n"); +} + +static void handle_nmi_int(void) +{ + pr_info("enter nmi int\n"); +} + +static void handle_dev_int(struct pt_regs *regs) +{ + unsigned long config_val, val, stat; + int node = 0; + unsigned int hwirq; + + config_val = sw64_io_read(node, DEV_INT_CONFIG); + val = config_val & (~(1UL << 8)); + sw64_io_write(node, DEV_INT_CONFIG, val); + stat = sw64_io_read(node, MCU_DVC_INT); + + while (stat) { + hwirq = ffs(stat) - 1; + generic_handle_domain_irq(NULL, hwirq); + stat &= ~(1UL << hwirq); + } + /*do handle irq */ + + sw64_io_write(node, DEV_INT_CONFIG, config_val); +} + +asmlinkage void do_entInt(unsigned long type, unsigned long vector, + unsigned long irq_arg, struct pt_regs *regs) +{ + struct pt_regs *old_regs; + extern char __idle_start[], __idle_end[]; + + if (is_guest_or_emul()) { + if ((type & 0xffff) > 15) { + vector = type; + if (vector == 16) + type = INT_INTx; + else + type = INT_MSI; + } + } + + /* restart idle routine if it is interrupted */ + if (regs->pc > (u64)__idle_start && regs->pc < (u64)__idle_end) + regs->pc = (u64)__idle_start; + + switch (type & 0xffff) { + case INT_MSI: + old_regs = set_irq_regs(regs); + handle_pci_msi_interrupt(type, vector, irq_arg); + set_irq_regs(old_regs); + return; + case INT_INTx: + old_regs = set_irq_regs(regs); + handle_device_interrupt(vector); + set_irq_regs(old_regs); + return; + + case INT_IPI: +#ifdef CONFIG_SMP + handle_ipi(regs); + return; +#else + irq_err_count++; + pr_crit("Interprocessor interrupt? You must be kidding!\n"); +#endif + break; + case INT_RTC: + old_regs = set_irq_regs(regs); + sw64_timer_interrupt(); + set_irq_regs(old_regs); + return; + case INT_VT_SERIAL: + old_regs = set_irq_regs(regs); + handle_irq(type); + set_irq_regs(old_regs); + return; + case INT_VT_HOTPLUG: + old_regs = set_irq_regs(regs); + handle_irq(type); + set_irq_regs(old_regs); + return; + case INT_PC0: + perf_irq(PMC_PC0, regs); + return; + case INT_PC1: + perf_irq(PMC_PC1, regs); + return; + case INT_DEV: + old_regs = set_irq_regs(regs); + handle_dev_int(regs); + set_irq_regs(old_regs); + return; + case INT_FAULT: + old_regs = set_irq_regs(regs); + handle_fault_int(); + set_irq_regs(old_regs); + return; + case INT_MT: + old_regs = set_irq_regs(regs); + handle_mt_int(); + set_irq_regs(old_regs); + return; + case INT_NMI: + old_regs = set_irq_regs(regs); + handle_nmi_int(); + set_irq_regs(old_regs); + return; + default: + pr_crit("Hardware intr %ld %lx? uh?\n", type, vector); + } + pr_crit("PC = %016lx PS = %04lx\n", regs->pc, regs->ps); +} +EXPORT_SYMBOL(do_entInt); diff --git a/drivers/irqchip/irq-sunway-msi-v2.c b/drivers/irqchip/irq-sunway-msi-v2.c new file mode 100644 index 000000000000..36790dfedb33 --- /dev/null +++ b/drivers/irqchip/irq-sunway-msi-v2.c @@ -0,0 +1,512 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include + +#include +#include + +static struct irq_domain *msi_default_domain; +static DEFINE_RAW_SPINLOCK(vector_lock); +DEFINE_PER_CPU(vector_irq_t, vector_irq) = { + [0 ... PERCPU_MSI_IRQS - 1] = 0, +}; + +static struct sw64_msi_chip_data *alloc_sw_msi_chip_data(struct irq_data *irq_data) +{ + struct sw64_msi_chip_data *data; + int node; + + node = irq_data_get_node(irq_data); + data = kzalloc_node(sizeof(*data), GFP_KERNEL, node); + if (!data) + return NULL; + spin_lock_init(&data->cdata_lock); + return data; +} + +static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct sw64_msi_chip_data *chip_data; + int rcid; + + chip_data = irq_data_get_irq_chip_data(data->parent_data); + rcid = cpu_to_rcid(chip_data->dst_cpu); + msg->address_hi = MSI_ADDR_BASE_HI; + msg->address_lo = + (unsigned int)chip_data->msiaddr | + (rcid_to_msicid(rcid) << MSI_ADDR_DEST_ID_SHIFT); + msg->data = chip_data->vector; +} + +bool find_free_cpu_vector(const struct cpumask *search_mask, + int *found_cpu, int *found_vector) +{ + int vector, max_vector, cpu; + bool find_once_global = false; + + cpu = cpumask_first(search_mask); +try_again: + if (is_guest_or_emul()) { + vector = IRQ_PENDING_MSI_VECTORS_SHIFT; + max_vector = SWVM_IRQS; + } else { + vector = 0; + max_vector = 256; + } + for (; vector < max_vector; vector++) { + while (per_cpu(vector_irq, cpu)[vector]) { + cpu = cpumask_next(cpu, search_mask); + if (cpu >= nr_cpu_ids) { + if (vector == 255) { + if (find_once_global) { + pr_warn("No global free vector\n"); + return false; + } + pr_warn("No local free vector\n"); + search_mask = cpu_online_mask; + cpu = cpumask_first(search_mask); + find_once_global = true; + goto try_again; + } + cpu = cpumask_first(search_mask); + break; + } + } + if (!per_cpu(vector_irq, cpu)[vector]) + break; + } + + *found_cpu = cpu; + *found_vector = vector; + return true; +} + +static bool find_free_cpu_vectors(const struct cpumask *search_mask, int *found_cpu, int *found_vector, unsigned int nr_irqs) +{ + int i, vector, cpu; + bool found = false, find_once_global = false; + + cpu = cpumask_first(search_mask); +try_again: + for (vector = 0; vector < 256; vector++) { + for (i = 0; i < nr_irqs; i++) + if (per_cpu(vector_irq, cpu)[vector + i]) + break; + + if (i == nr_irqs) { + found = true; + *found_cpu = cpu; + *found_vector = vector; + return found; + } + + vector += i; + } + + cpu = cpumask_next(cpu, search_mask); + if (cpu < nr_cpu_ids) + goto try_again; + else { + if (find_once_global) { + pr_warn("No global free vectors\n"); + return found; + } + pr_warn("No local free vectors\n"); + search_mask = cpu_online_mask; + cpu = cpumask_first(search_mask); + find_once_global = true; + goto try_again; + } +} + +static int sw64_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force) +{ + struct sw64_msi_chip_data *cdata; + struct irq_data *irqd; + struct msi_desc *entry; + struct cpumask searchmask; + unsigned long flags; + int vector, cpu; + int i; + struct msi_msg msg; + + /* Is this valid ? */ + if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) + return -EINVAL; + + irqd = irq_domain_get_irq_data(msi_default_domain->parent, d->irq); + /* Don't do anything if the interrupt isn't started */ + if (!irqd_is_started(irqd)) + return IRQ_SET_MASK_OK; + + cdata = irqd->chip_data; + if (!cdata) + return -ENOMEM; + + /* + * If existing target cpu is already in the new mask and is online + * then do nothing. + */ + if (cpu_online(cdata->dst_cpu) && cpumask_test_cpu(cdata->dst_cpu, cpumask)) + return IRQ_SET_MASK_OK; + + raw_spin_lock_irqsave(&vector_lock, flags); + + cpumask_and(&searchmask, cpumask, cpu_online_mask); + if (cdata->multi_msi > 1) { + if (!find_free_cpu_vectors(&searchmask, &cpu, + &vector, cdata->multi_msi)) { + raw_spin_unlock_irqrestore(&vector_lock, flags); + return -ENOSPC; + } + } else { + if (!find_free_cpu_vector(&searchmask, &cpu, &vector)) { + raw_spin_unlock_irqrestore(&vector_lock, flags); + return -ENOSPC; + } + } + + /* update new setting */ + entry = irq_get_msi_desc(irqd->irq); + spin_lock(&cdata->cdata_lock); + for (i = 0; i < cdata->multi_msi; i++) + per_cpu(vector_irq, cpu)[vector + i] = entry->irq + i; + BUG_ON(irq_chip_compose_msi_msg(irqd, &msg)); + __pci_write_msi_msg(entry, &msg); + cdata->prev_vector = cdata->vector; + cdata->prev_cpu = cdata->dst_cpu; + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->move_in_progress = true; + spin_unlock(&cdata->cdata_lock); + cpumask_copy(irq_data_get_affinity_mask(irqd), &searchmask); + + raw_spin_unlock_irqrestore(&vector_lock, flags); + + return 0; +} + +static void chip_irq_ack(struct irq_data *data) +{ +} + +static struct irq_chip pci_msi_controller = { + .name = "PCI-MSI", + .irq_unmask = pci_msi_unmask_irq, + .irq_mask = pci_msi_mask_irq, + .irq_ack = chip_irq_ack, + .irq_compose_msi_msg = irq_msi_compose_msg, + .flags = IRQCHIP_SKIP_SET_WAKE, + .irq_set_affinity = sw64_set_affinity, +}; + +static int __assign_irq_vector(int virq, unsigned int nr_irqs, + struct irq_domain *domain, enum irq_alloc_type type) +{ + struct irq_data *irq_data; + const struct cpumask *mask; + struct cpumask searchmask; + struct sw64_msi_chip_data *cdata; + int node; + int i, vector, cpu; + unsigned long msiaddr; + + if (unlikely((nr_irqs > 1) && (!is_power_of_2(nr_irqs)))) + nr_irqs = __roundup_pow_of_two(nr_irqs); + + irq_data = irq_domain_get_irq_data(domain, virq); + BUG_ON(!irq_data); + irq_data->chip = &pci_msi_controller; + + if (irqd_affinity_is_managed(irq_data)) { + mask = irq_data_get_affinity_mask(irq_data); + cpumask_and(&searchmask, mask, cpu_online_mask); + } else { + node = irq_data_get_node(irq_data); + cpumask_copy(&searchmask, cpumask_of_node(node)); + } + + if (cpumask_first(&searchmask) >= nr_cpu_ids) + cpumask_copy(&searchmask, cpu_online_mask); + + if (type == IRQ_ALLOC_TYPE_MSI && nr_irqs > 1) { + if (!find_free_cpu_vectors(&searchmask, &cpu, + &vector, nr_irqs)) + return -ENOSPC; + + cdata = alloc_sw_msi_chip_data(irq_data); + if (!cdata) { + pr_warn("error alloc irq chip data\n"); + return -ENOMEM; + } + + for (i = 0; i < nr_irqs; i++) { + per_cpu(vector_irq, cpu)[vector + i] = virq + i; + + if (i) { + irq_data = irq_domain_get_irq_data(domain, virq + i); + irq_data->chip = &pci_msi_controller; + } + + irq_data->chip_data = cdata; + } + + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->msiaddr = MSIX_MSG_ADDR; + cdata->prev_cpu = cpu; + cdata->prev_vector = vector; + cdata->multi_msi = nr_irqs; + cdata->move_in_progress = false; + } else { + for (i = 0; i < nr_irqs; i++) { + if (!find_free_cpu_vector(&searchmask, &cpu, &vector)) + return -ENOSPC; + + per_cpu(vector_irq, cpu)[vector] = virq + i; + + if (i) { + irq_data = irq_domain_get_irq_data(domain, virq + i); + irq_data->chip = &pci_msi_controller; + } + + cdata = alloc_sw_msi_chip_data(irq_data); + if (!cdata) { + pr_warn("error alloc irq chip data\n"); + return -ENOMEM; + } + + irq_data->chip_data = cdata; + + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->msiaddr = MSIX_MSG_ADDR; + cdata->prev_cpu = cpu; + cdata->prev_vector = vector; + cdata->multi_msi = 1; + cdata->move_in_progress = false; + } + } + return 0; +} + +static int assign_irq_vector(int irq, unsigned int nr_irqs, + struct irq_domain *domain, enum irq_alloc_type type) +{ + int err; + unsigned long flags; + + raw_spin_lock_irqsave(&vector_lock, flags); + err = __assign_irq_vector(irq, nr_irqs, domain, type); + raw_spin_unlock_irqrestore(&vector_lock, flags); + return err; +} + +static void sw64_vector_free_irqs(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + int i, j; + struct irq_data *irq_data; + unsigned long flags; + unsigned int multi_msi; + + for (i = 0; i < nr_irqs; i++) { + irq_data = irq_domain_get_irq_data(domain, virq + i); + if (irq_data && irq_data->chip_data) { + struct sw64_msi_chip_data *cdata; + + raw_spin_lock_irqsave(&vector_lock, flags); + cdata = irq_data->chip_data; + irq_domain_reset_irq_data(irq_data); + multi_msi = cdata->multi_msi; + for (j = 0; j < multi_msi; j++) + per_cpu(vector_irq, cdata->dst_cpu)[cdata->vector + j] = 0; + kfree(cdata); + raw_spin_unlock_irqrestore(&vector_lock, flags); + if (multi_msi > 1) + break; + } + } +} + +static void sw64_irq_free_descs(unsigned int virq, unsigned int nr_irqs) +{ + if (is_guest_or_emul()) { + vt_sw64_vector_free_irqs(virq, nr_irqs); + return irq_free_descs(virq, nr_irqs); + } + + return irq_domain_free_irqs(virq, nr_irqs); +} + +void arch_teardown_msi_irqs(struct pci_dev *dev) +{ + struct msi_desc *desc; + int i; + + for_each_pci_msi_entry(desc, dev) { + if (desc->irq) { + for (i = 0; i < desc->nvec_used; i++) + sw64_irq_free_descs(desc->irq + i, 1); + desc->irq = 0; + } + } +} + +static int sw64_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int err; + struct irq_alloc_info *info = arg; + enum irq_alloc_type msi_type; + + if (arg == NULL) + return -ENODEV; + msi_type = info->type; + err = assign_irq_vector(virq, nr_irqs, domain, msi_type); + if (err) + goto error; + return 0; +error: + sw64_vector_free_irqs(domain, virq, nr_irqs); + return err; +} + +static int pci_msi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *arg) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct msi_desc *desc = first_pci_msi_entry(pdev); + + memset(arg, 0, sizeof(*arg)); + arg->msi_dev = pdev; + if (desc->msi_attrib.is_msix) + arg->type = IRQ_ALLOC_TYPE_MSIX; + else + arg->type = IRQ_ALLOC_TYPE_MSI; + return 0; +} + +static struct msi_domain_ops pci_msi_domain_ops = { + .msi_prepare = pci_msi_prepare, +}; + +static struct msi_domain_info pci_msi_domain_info = { + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX, + .ops = &pci_msi_domain_ops, + .chip = &pci_msi_controller, + .handler = handle_edge_irq, + .handler_name = "edge", +}; + +static int sw64_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw) +{ + irq_set_chip_and_handler(virq, &sw64_irq_chip, handle_level_irq); + irq_set_status_flags(virq, IRQ_LEVEL); + return 0; +} + +const struct irq_domain_ops sw64_msi_domain_ops = { + .map = sw64_irq_map, + .alloc = sw64_vector_alloc_irqs, + .free = sw64_vector_free_irqs, +}; + +int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) +{ + int err; + + if (is_guest_or_emul()) + return sw64_setup_vt_msi_irqs(pdev, nvec, type); + + if (!msi_default_domain) + return -EIO; + err = msi_domain_alloc_irqs(msi_default_domain, &pdev->dev, nvec); + return err; +} + +void arch_init_msi_domain(struct irq_domain *parent) +{ + struct irq_domain *sw64_irq_domain; + + if (is_guest_or_emul()) + return; + + sw64_irq_domain = irq_domain_add_tree(NULL, &sw64_msi_domain_ops, NULL); + BUG_ON(sw64_irq_domain == NULL); + irq_set_default_host(sw64_irq_domain); + msi_default_domain = pci_msi_create_irq_domain(NULL, + &pci_msi_domain_info, sw64_irq_domain); + if (!msi_default_domain) + pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n"); +} + +static void irq_move_complete(struct sw64_msi_chip_data *cdata, int cpu, int vector) +{ + if (likely(!cdata->move_in_progress)) + return; + if (cdata->dst_cpu == cpu) { + if (vector >= cdata->vector && + vector < cdata->vector + cdata->multi_msi) { + int i; + + raw_spin_lock(&vector_lock); + cdata->move_in_progress = false; + for (i = 0; i < cdata->multi_msi; i++) + per_cpu(vector_irq, cdata->prev_cpu)[cdata->prev_vector + i] = 0; + raw_spin_unlock(&vector_lock); + } + } +} + +void handle_pci_msi_interrupt(unsigned long type, unsigned long vector, unsigned long pci_msi1_addr) +{ + int i, irq, msi_index = 0; + int cpu, vector_index = 0; + unsigned long int_pci_msi[3]; + unsigned long *ptr; + struct irq_data *irq_data; + struct sw64_msi_chip_data *cdata; + + if (is_guest_or_emul()) { + cpu = smp_processor_id(); + irq = per_cpu(vector_irq, cpu)[vector]; + handle_irq(irq); + return; + } + + ptr = (unsigned long *)pci_msi1_addr; + int_pci_msi[0] = *ptr; + int_pci_msi[1] = *(ptr + 1); + int_pci_msi[2] = *(ptr + 2); + + cpu = smp_processor_id(); + + for (i = 0; i < 4; i++) { + vector_index = i * 64; + while (vector != 0) { + int irq = 0; + + msi_index = find_next_bit(&vector, 64, msi_index); + if (msi_index == 64) { + msi_index = 0; + continue; + } + + irq = per_cpu(vector_irq, cpu)[vector_index + msi_index]; + irq_data = irq_domain_get_irq_data(msi_default_domain->parent, irq); + cdata = irq_data_get_irq_chip_data(irq_data); + spin_lock(&cdata->cdata_lock); + irq_move_complete(cdata, cpu, vector_index + msi_index); + spin_unlock(&cdata->cdata_lock); + handle_irq(irq); + + vector = vector & (~(1UL << msi_index)); + } + + vector = int_pci_msi[i % 3]; + } +} diff --git a/drivers/irqchip/irq-sunway-msi-vt.c b/drivers/irqchip/irq-sunway-msi-vt.c new file mode 100644 index 000000000000..df8c7d72671b --- /dev/null +++ b/drivers/irqchip/irq-sunway-msi-vt.c @@ -0,0 +1,280 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +static DEFINE_RAW_SPINLOCK(vector_lock); + +static void __vt_irq_msi_compose_msg(struct sw64_msi_chip_data *cdata, + struct msi_msg *msg) +{ + msg->address_hi = (u32)(VT_MSIX_MSG_ADDR >> 32); + msg->address_lo = (u32)(VT_MSIX_MSG_ADDR & 0xffffffff) + | VT_MSIX_ADDR_DEST_ID(cdata->dst_cpu); + msg->data = cdata->vector; +} + +static void vt_irq_msi_compose_msg(struct irq_data *irqd, struct msi_msg *msg) +{ + struct sw64_msi_chip_data *cdata; + + cdata = irqd->chip_data; + __vt_irq_msi_compose_msg(cdata, msg); +} + +static void vt_irq_msi_update_msg(struct irq_data *irqd, + struct sw64_msi_chip_data *cdata) +{ + struct msi_msg msg[2] = { [1] = { }, }; + + __vt_irq_msi_compose_msg(cdata, msg); + pci_write_msi_msg(irqd->irq, msg); +} + +static int +vt_set_affinity(struct irq_data *irqd, const struct cpumask *cpumask, + bool force) +{ + struct sw64_msi_chip_data *cdata; + struct cpumask searchmask; + int cpu, vector; + + /* Is this valid ? */ + if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) + return -EINVAL; + + if (!irqd_is_started(irqd)) + return IRQ_SET_MASK_OK; + + cdata = irqd->chip_data; + if (!cdata) + return -ENOMEM; + + /* + * If existing target coreid is already in the new mask, + * and is online then do nothing. + */ + if (cpu_online(cdata->dst_cpu) && cpumask_test_cpu(cdata->dst_cpu, cpumask)) + return IRQ_SET_MASK_OK; + + cpumask_and(&searchmask, cpumask, cpu_online_mask); + if (!find_free_cpu_vector(&searchmask, &cpu, &vector)) + return -ENOSPC; + + per_cpu(vector_irq, cpu)[vector] = irqd->irq; + spin_lock(&cdata->cdata_lock); + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->prev_cpu = cdata->dst_cpu; + cdata->prev_vector = cdata->vector; + cdata->move_in_progress = true; + spin_unlock(&cdata->cdata_lock); + cpumask_copy((struct cpumask *)irq_data_get_affinity_mask(irqd), &searchmask); + vt_irq_msi_update_msg(irqd, irqd->chip_data); + + return 0; +} + +static struct irq_chip vt_pci_msi_controller = { + .name = "PCI-MSI", + .irq_unmask = pci_msi_unmask_irq, + .irq_mask = pci_msi_mask_irq, + .irq_ack = sw64_irq_noop, + .irq_compose_msi_msg = vt_irq_msi_compose_msg, + .irq_set_affinity = vt_set_affinity, +}; + +int chip_setup_vt_msix_irq(struct pci_dev *dev, struct msi_desc *desc) +{ + int virq, val_node = 0; + struct irq_data *irq_data; + struct sw64_msi_chip_data *cdata; + struct pci_controller *hose = pci_bus_to_pci_controller(dev->bus); + unsigned long flags, node, rc_index; + const struct cpumask *mask; + + struct cpumask searchmask; + int cpu, vector; + + node = hose->node; + rc_index = hose->index; + mask = cpumask_of_node(node); + + raw_spin_lock_irqsave(&vector_lock, flags); + /* Find unused msi config reg in PIU-IOR0 */ + if (!node_online(node)) + val_node = next_node_in(node, node_online_map); + else + val_node = node; + + virq = irq_alloc_descs_from(NR_IRQS_LEGACY, desc->nvec_used, val_node); + if (virq < 0) { + pr_err("Failed to allocate IRQ(base 16, count %d)\n", desc->nvec_used); + raw_spin_unlock_irqrestore(&vector_lock, flags); + return virq; + } + + irq_data = irq_get_irq_data(virq); + + if (irqd_affinity_is_managed(irq_data)) { + mask = irq_data_get_affinity_mask(irq_data); + cpumask_and(&searchmask, mask, cpu_online_mask); + } else { + node = irq_data_get_node(irq_data); + cpumask_copy(&searchmask, cpumask_of_node(node)); + } + if (cpumask_first(&searchmask) >= nr_cpu_ids) + cpumask_copy(&searchmask, cpu_online_mask); + + if (!find_free_cpu_vector(&searchmask, &cpu, &vector)) + return -ENOSPC; + + cdata = kzalloc(sizeof(*cdata), GFP_KERNEL); + if (!cdata) + return -ENOMEM; + + per_cpu(vector_irq, cpu)[vector] = virq; + + irq_set_msi_desc(virq, desc); + irq_set_chip_and_handler_name(virq, &vt_pci_msi_controller, + handle_edge_irq, "edge"); + + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->rc_index = hose->index; + cdata->rc_node = hose->node; + cdata->prev_cpu = cpu; + cdata->prev_vector = vector; + + irq_data->chip_data = cdata; + + vt_irq_msi_update_msg(irq_data, irq_data->chip_data); + raw_spin_unlock_irqrestore(&vector_lock, flags); + return 0; +} +EXPORT_SYMBOL(chip_setup_vt_msix_irq); + +int chip_setup_vt_msi_irqs(struct pci_dev *dev, int nvec, int type) +{ + struct msi_desc *desc; + struct pci_controller *hose = pci_bus_to_pci_controller(dev->bus); + struct irq_data *irq_data; + struct sw64_msi_chip_data *cdata; + unsigned long node, rc_index; + int virq = -1, val_node = 0; + unsigned long flags; + + const struct cpumask *mask; + struct cpumask searchmask; + int i, vector, cpu; + + if (type == PCI_CAP_ID_MSI && nvec > 32) + return 1; + + node = hose->node; + rc_index = hose->index; + raw_spin_lock_irqsave(&vector_lock, flags); + msi_for_each_desc(desc, &(dev->dev), MSI_DESC_ALL) { + /* Find unused msi config reg in PIU-IOR0 */ + if (!node_online(node)) + val_node = next_node_in(node, node_online_map); + else + val_node = node; + virq = irq_alloc_descs_from(NR_IRQS_LEGACY, desc->nvec_used, val_node); + if (virq < 0) { + pr_err("Failed to allocate IRQ(base 16, count %d)\n", desc->nvec_used); + raw_spin_unlock_irqrestore(&vector_lock, flags); + return virq; + } + + irq_data = irq_get_irq_data(virq); + if (irqd_affinity_is_managed(irq_data)) { + mask = irq_data_get_affinity_mask(irq_data); + cpumask_and(&searchmask, mask, cpu_online_mask); + } else { + node = irq_data_get_node(irq_data); + cpumask_copy(&searchmask, cpumask_of_node(node)); + } + if (cpumask_first(&searchmask) >= nr_cpu_ids) + cpumask_copy(&searchmask, cpu_online_mask); + + for (i = 0; i < desc->nvec_used; i++) { + if (!find_free_cpu_vector(&searchmask, &cpu, &vector)) + return -ENOSPC; + + cdata = kzalloc(sizeof(*cdata), GFP_KERNEL); + if (!cdata) + return -ENOMEM; + + per_cpu(vector_irq, cpu)[vector] = virq + i; + irq_set_msi_desc_off(virq, i, desc); + irq_set_chip_and_handler_name(virq + i, &vt_pci_msi_controller, handle_edge_irq, "edge"); + irq_data = irq_get_irq_data(virq + i); + + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->rc_index = hose->index; + cdata->rc_node = hose->node; + cdata->prev_cpu = cpu; + cdata->prev_vector = vector; + + irq_data->chip_data = cdata; + + vt_irq_msi_update_msg(irq_data, irq_data->chip_data); + } + } + + raw_spin_unlock_irqrestore(&vector_lock, flags); + return 0; +} +EXPORT_SYMBOL(chip_setup_vt_msi_irqs); + +void vt_sw64_vector_free_irqs(unsigned int virq, unsigned int nr_irqs) +{ + int i; + unsigned long flags; + struct irq_data *irq_data; + struct sw64_msi_chip_data *cdata; + + for (i = 0; i < nr_irqs; i++) { + irq_data = irq_get_irq_data(virq + i); + if (irq_data && irq_data->chip_data) { + raw_spin_lock_irqsave(&vector_lock, flags); + cdata = irq_data->chip_data; + irq_data->hwirq = 0; + irq_data->chip = &no_irq_chip; + irq_data->chip_data = NULL; + per_cpu(vector_irq, cdata->dst_cpu)[cdata->vector] = 0; + kfree(cdata); + raw_spin_unlock_irqrestore(&vector_lock, flags); + } + } +} + +int __arch_setup_vt_msix_irqs(struct pci_dev *dev, int nvec, int type) +{ + struct msi_desc *entry; + int ret; + + msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) { + ret = chip_setup_vt_msix_irq(dev, entry); + if (ret) + return ret; + } + + return 0; +} + +int sw64_setup_vt_msi_irqs(struct pci_dev *dev, int nvec, int type) +{ + int ret = 0; + + if (type == PCI_CAP_ID_MSI) + ret = chip_setup_vt_msi_irqs(dev, nvec, type); + else if (type == PCI_CAP_ID_MSIX) + ret = __arch_setup_vt_msix_irqs(dev, nvec, type); + else + pr_info("SW arch do not identify ID:%d\n", type); + + return ret; +} diff --git a/drivers/irqchip/irq-sunway-msi.c b/drivers/irqchip/irq-sunway-msi.c new file mode 100644 index 000000000000..060aa96711b7 --- /dev/null +++ b/drivers/irqchip/irq-sunway-msi.c @@ -0,0 +1,472 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +#include +#include + +static struct irq_domain *msi_default_domain; +static DEFINE_RAW_SPINLOCK(vector_lock); +DEFINE_PER_CPU(vector_irq_t, vector_irq) = { + [0 ... PERCPU_MSI_IRQS - 1] = 0, +}; + +static struct sw64_msi_chip_data *alloc_sw_msi_chip_data(struct irq_data *irq_data) +{ + struct sw64_msi_chip_data *data; + int node; + + node = irq_data_get_node(irq_data); + data = kzalloc_node(sizeof(*data), GFP_KERNEL, node); + if (!data) + return NULL; + spin_lock_init(&data->cdata_lock); + return data; +} + +static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct sw64_msi_chip_data *chip_data; + + chip_data = irq_data_get_irq_chip_data(data->parent_data); + + msg->address_hi = MSI_ADDR_BASE_HI; + msg->address_lo = MSI_ADDR_BASE_LO; + msg->data = chip_data->msi_config_index; +} + +bool find_free_cpu_vector(const struct cpumask *search_mask, + int *found_cpu, int *found_vector) +{ + int vector, max_vector, cpu; + bool find_once_global = false; + + cpu = cpumask_first(search_mask); +try_again: + if (is_guest_or_emul()) { + vector = IRQ_PENDING_MSI_VECTORS_SHIFT; + max_vector = SWVM_IRQS; + } else { + vector = 0; + max_vector = 256; + } + for (; vector < max_vector; vector++) { + while (per_cpu(vector_irq, cpu)[vector]) { + cpu = cpumask_next(cpu, search_mask); + if (cpu >= nr_cpu_ids) { + if (vector == 255) { + if (find_once_global) { + pr_warn("No global free vector\n"); + return false; + } + pr_warn("No local free vector\n"); + search_mask = cpu_online_mask; + cpu = cpumask_first(search_mask); + find_once_global = true; + goto try_again; + } + cpu = cpumask_first(search_mask); + break; + } + } + if (!per_cpu(vector_irq, cpu)[vector]) + break; + } + + *found_cpu = cpu; + *found_vector = vector; + return true; +} + +static unsigned long set_piu_msi_config(struct pci_controller *hose, int cpu, + int msiconf_index, int vector) +{ + unsigned int reg; + unsigned long msi_config; + int phy_cpu; + + msi_config = (1UL << 62) | ((unsigned long)vector << 10); + phy_cpu = cpu_to_rcid(cpu); + msi_config |= ((phy_cpu >> 5) << 6) | (phy_cpu & 0x1f); + reg = MSICONFIG0 + (unsigned long)(msiconf_index << 7); + write_piu_ior0(hose->node, hose->index, reg, msi_config); + msi_config = read_piu_ior0(hose->node, hose->index, reg); + set_bit(msiconf_index, hose->piu_msiconfig); + + return msi_config; +} + +static int sw64_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force) +{ + struct sw64_msi_chip_data *cdata; + struct pci_controller *hose; + struct pci_dev *pdev; + struct irq_data *irqd; + struct msi_desc *entry; + struct cpumask searchmask; + unsigned long flags, msi_config; + int vector, cpu; + + /* Is this valid ? */ + if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) + return -EINVAL; + + irqd = irq_domain_get_irq_data(msi_default_domain->parent, d->irq); + /* Don't do anything if the interrupt isn't started */ + if (!irqd_is_started(irqd)) + return IRQ_SET_MASK_OK; + + cdata = irqd->chip_data; + if (!cdata) + return -ENOMEM; + + /* + * If existing target cpu is already in the new mask and is online + * then do nothing. + */ + if (cpu_online(cdata->dst_cpu) && cpumask_test_cpu(cdata->dst_cpu, cpumask)) + return IRQ_SET_MASK_OK; + + raw_spin_lock_irqsave(&vector_lock, flags); + + cpumask_and(&searchmask, cpumask, cpu_online_mask); + if (!find_free_cpu_vector(&searchmask, &cpu, &vector)) { + raw_spin_unlock_irqrestore(&vector_lock, flags); + return -ENOSPC; + } + + /* update new setting */ + entry = irq_get_msi_desc(irqd->irq); + pdev = (struct pci_dev *)msi_desc_to_pci_dev(entry); + hose = pci_bus_to_pci_controller(pdev->bus); + spin_lock(&cdata->cdata_lock); + per_cpu(vector_irq, cpu)[vector] = irqd->irq; + msi_config = set_piu_msi_config(hose, cpu, cdata->msi_config_index, vector); + cdata->prev_vector = cdata->vector; + cdata->prev_cpu = cdata->dst_cpu; + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->msi_config = msi_config; + cdata->move_in_progress = true; + spin_unlock(&cdata->cdata_lock); + cpumask_copy((struct cpumask *)irq_data_get_affinity_mask(irqd), &searchmask); + + raw_spin_unlock_irqrestore(&vector_lock, flags); + + return 0; +} + +static void chip_irq_ack(struct irq_data *data) +{ +} + +static struct irq_chip pci_msi_controller = { + .name = "PCI-MSI", + .irq_unmask = pci_msi_unmask_irq, + .irq_mask = pci_msi_mask_irq, + .irq_ack = chip_irq_ack, + .irq_compose_msi_msg = irq_msi_compose_msg, + .flags = IRQCHIP_SKIP_SET_WAKE, + .irq_set_affinity = sw64_set_affinity, +}; + +static int __assign_irq_vector(int virq, unsigned int nr_irqs, + struct irq_domain *domain, struct pci_controller *hose) +{ + struct irq_data *irq_data; + const struct cpumask *mask; + struct cpumask searchmask; + struct sw64_msi_chip_data *cdata; + int msiconf_index, node; + int i, vector, cpu; + unsigned long msi_config; + int start_index; + + if (unlikely((nr_irqs > 1) && (!is_power_of_2(nr_irqs)))) + nr_irqs = __roundup_pow_of_two(nr_irqs); + + msiconf_index = bitmap_find_next_zero_area(hose->piu_msiconfig, 256, 0, + nr_irqs, nr_irqs - 1); + + if (msiconf_index >= 256) { + pr_warn("No free msi on PIU!\n"); + return -ENOSPC; + } + + start_index = msiconf_index; + irq_data = irq_domain_get_irq_data(domain, virq); + BUG_ON(!irq_data); + irq_data->chip = &pci_msi_controller; + + if (irqd_affinity_is_managed(irq_data)) { + mask = irq_data_get_affinity_mask(irq_data); + cpumask_and(&searchmask, mask, cpu_online_mask); + } else { + node = irq_data_get_node(irq_data); + cpumask_copy(&searchmask, cpumask_of_node(node)); + } + + if (cpumask_first(&searchmask) >= nr_cpu_ids) + cpumask_copy(&searchmask, cpu_online_mask); + + for (i = 0; i < nr_irqs; i++) { + if (!find_free_cpu_vector(&searchmask, &cpu, &vector)) + return -ENOSPC; + + per_cpu(vector_irq, cpu)[vector] = virq + i; + + if (i) { + irq_data = irq_domain_get_irq_data(domain, virq + i); + irq_data->chip = &pci_msi_controller; + } + + cdata = alloc_sw_msi_chip_data(irq_data); + if (!cdata) { + pr_warn("error alloc irq chip data\n"); + return -ENOMEM; + } + + irq_data->chip_data = cdata; + msiconf_index = start_index + i; + msi_config = set_piu_msi_config(hose, cpu, msiconf_index, vector); + + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->rc_index = hose->index; + cdata->rc_node = hose->node; + cdata->msi_config = msi_config; + cdata->msi_config_index = msiconf_index; + cdata->prev_cpu = cpu; + cdata->prev_vector = vector; + cdata->move_in_progress = false; + } + return 0; +} + +static int assign_irq_vector(int irq, unsigned int nr_irqs, + struct irq_domain *domain, struct pci_controller *hose) +{ + int err; + unsigned long flags; + + raw_spin_lock_irqsave(&vector_lock, flags); + err = __assign_irq_vector(irq, nr_irqs, domain, hose); + raw_spin_unlock_irqrestore(&vector_lock, flags); + return err; +} + +static void sw64_vector_free_irqs(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + int i; + struct irq_data *irq_data; + struct pci_dev *pdev; + unsigned long flags; + + for (i = 0; i < nr_irqs; i++) { + irq_data = irq_domain_get_irq_data(domain, virq + i); + if (irq_data && irq_data->chip_data) { + struct sw64_msi_chip_data *cdata; + struct msi_desc *entry; + struct pci_controller *hose; + + raw_spin_lock_irqsave(&vector_lock, flags); + cdata = irq_data->chip_data; + entry = irq_get_msi_desc(virq + i); + if (entry) { + pdev = (struct pci_dev *)msi_desc_to_pci_dev(entry); + hose = pci_bus_to_pci_controller(pdev->bus); + clear_bit(cdata->msi_config_index, hose->piu_msiconfig); + } + irq_domain_reset_irq_data(irq_data); + per_cpu(vector_irq, cdata->dst_cpu)[cdata->vector] = 0; + kfree(cdata); + raw_spin_unlock_irqrestore(&vector_lock, flags); + } + } +} + +static void sw64_irq_free_descs(unsigned int virq, unsigned int nr_irqs) +{ + if (is_guest_or_emul()) { + vt_sw64_vector_free_irqs(virq, nr_irqs); + return irq_free_descs(virq, nr_irqs); + } + + return irq_domain_free_irqs(virq, nr_irqs); +} + +void arch_teardown_msi_irqs(struct pci_dev *dev) +{ + struct msi_desc *desc; + int i; + + msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL) { + if (desc->irq) { + for (i = 0; i < desc->nvec_used; i++) + sw64_irq_free_descs(desc->irq + i, 1); + desc->irq = 0; + } + } +} + +static int sw64_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int err; + struct irq_alloc_info *info = arg; + struct pci_controller *hose; + + if (arg == NULL) + return -ENODEV; + + hose = pci_bus_to_pci_controller(info->msi_dev->bus); + err = assign_irq_vector(virq, nr_irqs, domain, hose); + if (err) + goto error; + return 0; +error: + sw64_vector_free_irqs(domain, virq, nr_irqs); + return err; +} + +static int pci_msi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *arg) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + memset(arg, 0, sizeof(*arg)); + arg->msi_dev = pdev; + if (pdev->msix_enabled) + arg->type = IRQ_ALLOC_TYPE_MSIX; + else + arg->type = IRQ_ALLOC_TYPE_MSI; + return 0; +} + +static struct msi_domain_ops pci_msi_domain_ops = { + .msi_prepare = pci_msi_prepare, +}; + +static struct msi_domain_info pci_msi_domain_info = { + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX, + .ops = &pci_msi_domain_ops, + .chip = &pci_msi_controller, + .handler = handle_edge_irq, + .handler_name = "edge", +}; + +static int sw64_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw) +{ + irq_set_chip_and_handler(virq, &sw64_irq_chip, handle_level_irq); + irq_set_status_flags(virq, IRQ_LEVEL); + return 0; +} + +const struct irq_domain_ops sw64_msi_domain_ops = { + .map = sw64_irq_map, + .alloc = sw64_vector_alloc_irqs, + .free = sw64_vector_free_irqs, +}; + +int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) +{ + int err; + + if (is_guest_or_emul()) + return sw64_setup_vt_msi_irqs(pdev, nvec, type); + + if (!msi_default_domain) + return -EIO; + + err = msi_domain_alloc_irqs_all_locked(&pdev->dev, MSI_DEFAULT_DOMAIN, nvec); + return err; +} + +void arch_init_msi_domain(struct irq_domain *parent) +{ + struct irq_domain *sw64_irq_domain; + + if (is_guest_or_emul()) + return; + + sw64_irq_domain = irq_domain_add_tree(NULL, &sw64_msi_domain_ops, NULL); + BUG_ON(sw64_irq_domain == NULL); + irq_set_default_host(sw64_irq_domain); + msi_default_domain = pci_msi_create_irq_domain(NULL, + &pci_msi_domain_info, sw64_irq_domain); + if (!msi_default_domain) + pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n"); +} + +int pcibios_device_add(struct pci_dev *dev) +{ + if (msi_default_domain) + dev_set_msi_domain(&dev->dev, msi_default_domain); + return 0; +} + +static void irq_move_complete(struct sw64_msi_chip_data *cdata, int cpu, int vector) +{ + if (likely(!cdata->move_in_progress)) + return; + if (vector == cdata->vector && cdata->dst_cpu == cpu) { + raw_spin_lock(&vector_lock); + cdata->move_in_progress = 0; + per_cpu(vector_irq, cdata->prev_cpu)[cdata->prev_vector] = 0; + raw_spin_unlock(&vector_lock); + } +} + +void handle_pci_msi_interrupt(unsigned long type, unsigned long vector, unsigned long pci_msi1_addr) +{ + int i, irq, piu_index, msi_index = 0; + int cpu, vector_index = 0; + unsigned long value = 0; + unsigned long int_pci_msi[3]; + unsigned long *ptr; + struct irq_data *irq_data; + struct sw64_msi_chip_data *cdata; + + if (is_guest_or_emul()) { + cpu = smp_processor_id(); + irq = per_cpu(vector_irq, cpu)[vector]; + handle_irq(irq); + return; + } + + ptr = (unsigned long *)pci_msi1_addr; + int_pci_msi[0] = *ptr; + int_pci_msi[1] = *(ptr + 1); + int_pci_msi[2] = *(ptr + 2); + + cpu = smp_processor_id(); + + for (i = 0; i < 4; i++) { + vector_index = i * 64; + while (vector != 0) { + msi_index = find_next_bit(&vector, 64, msi_index); + if (msi_index == 64) { + msi_index = 0; + continue; + } + + irq = per_cpu(vector_irq, cpu)[vector_index + msi_index]; + irq_data = irq_domain_get_irq_data(msi_default_domain->parent, irq); + cdata = irq_data_get_irq_chip_data(irq_data); + spin_lock(&cdata->cdata_lock); + irq_move_complete(cdata, cpu, vector_index + msi_index); + piu_index = cdata->msi_config_index; + value = cdata->msi_config | (1UL << 63); + write_piu_ior0(cdata->rc_node, cdata->rc_index, MSICONFIG0 + (piu_index << 7), value); + spin_unlock(&cdata->cdata_lock); + handle_irq(irq); + + vector = vector & (~(1UL << msi_index)); + } + + vector = int_pci_msi[i % 3]; + } +} diff --git a/drivers/irqchip/irq-sw64-intc-v2.c b/drivers/irqchip/irq-sw64-intc-v2.c new file mode 100644 index 000000000000..bc2c8ef3ed2f --- /dev/null +++ b/drivers/irqchip/irq-sw64-intc-v2.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include + +static void fake_irq_mask(struct irq_data *data) +{ +} + +static void fake_irq_unmask(struct irq_data *data) +{ +} + +static struct irq_chip onchip_intc = { + .name = "SW fake Intc", + .irq_mask = fake_irq_mask, + .irq_unmask = fake_irq_unmask, +}; + +static int sw64_intc_domain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ + + irq_set_chip_and_handler(irq, &onchip_intc, handle_level_irq); + irq_set_status_flags(irq, IRQ_LEVEL); + return 0; +} + +static const struct irq_domain_ops sw64_intc_domain_ops = { + .xlate = irq_domain_xlate_onecell, + .map = sw64_intc_domain_map, +}; + +#ifdef CONFIG_OF +static struct irq_domain *root_domain; + +static int __init +init_onchip_IRQ(struct device_node *intc, struct device_node *parent) +{ + + int node = 0; + int hwirq = 0, nirq = 8; + + if (parent) + panic("DeviceTree incore intc not a root irq controller\n"); + + root_domain = irq_domain_add_linear(intc, 8, + &sw64_intc_domain_ops, NULL); + + if (!root_domain) + panic("root irq domain not avail\n"); + + /* with this we don't need to export root_domain */ + irq_set_default_host(root_domain); + + for (hwirq = 0 ; hwirq < nirq ; hwirq++) + irq_create_mapping(root_domain, hwirq); + + /*enable MCU_DVC_INT_EN*/ + sw64_io_write(node, MCU_DVC_INT_EN, 0xff); + + return 0; +} + +IRQCHIP_DECLARE(sw64_intc, "sw64,sw6_irq_controller", init_onchip_IRQ); + +static int __init +init_onchip_vt_IRQ(struct device_node *intc, struct device_node *parent) +{ + if (parent) + panic("DeviceTree incore intc not a root irq controller\n"); + + root_domain = irq_domain_add_legacy(intc, 16, 0, 0, + &sw64_intc_domain_ops, NULL); + + if (!root_domain) + panic("root irq domain not avail\n"); + + /* with this we don't need to export root_domain */ + irq_set_default_host(root_domain); + + return 0; +} + +IRQCHIP_DECLARE(sw64_vt_intc, "sw64,sw6_irq_vt_controller", init_onchip_vt_IRQ); +#endif diff --git a/drivers/irqchip/irq-sw64-lpc-intc.c b/drivers/irqchip/irq-sw64-lpc-intc.c new file mode 100644 index 000000000000..1cbf87478242 --- /dev/null +++ b/drivers/irqchip/irq-sw64-lpc-intc.c @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include + +#define LPC_NR_IRQS 16 +#define LPC_IRQ 0x4 +#define LPC_IRQ_MASK 0x8 + +struct lpc_intc_data { + struct irq_domain *domain; + struct irq_chip_generic *gc; +}; + +static void lpc_irq_mask_ack(struct irq_data *data) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); + struct irq_chip_type *ct = irq_data_get_chip_type(data); + unsigned int mask = data->mask; + + irq_gc_lock(gc); + *ct->mask_cache |= mask; + irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); + irq_reg_writel(gc, mask, ct->regs.ack); + irq_gc_unlock(gc); +} + +static void lpc_irq_handler(struct irq_desc *desc) +{ + struct lpc_intc_data *b = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned int irq; + u32 status; + + chained_irq_enter(chip, desc); + + status = irq_reg_readl(b->gc, LPC_IRQ); + + if (status == 0) { + raw_spin_lock(&desc->lock); + handle_bad_irq(desc); + raw_spin_unlock(&desc->lock); + goto out; + } + + while (status) { + irq = __ffs(status); + status &= ~BIT(irq); + generic_handle_irq(irq_find_mapping(b->domain, irq)); + } + +out: + chained_irq_exit(chip, desc); +} + +static int __init lpc_intc_of_init(struct device_node *np, + struct device_node *parent) +{ + unsigned int set = IRQ_NOPROBE | IRQ_LEVEL; + struct lpc_intc_data *data; + struct irq_chip_type *ct; + int parent_irq, ret; + void __iomem *base; + int hwirq = 0; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + base = of_iomap(np, 0); + if (!base) { + pr_err("failed to remap lpc intc registers\n"); + ret = -ENOMEM; + goto out_free; + } + + parent_irq = irq_of_parse_and_map(np, 0); + if (!parent_irq) { + pr_err("failed to find parent interrupt\n"); + ret = -EINVAL; + goto out_unmap; + } + + data->domain = irq_domain_add_linear(np, LPC_NR_IRQS, + &irq_generic_chip_ops, NULL); + if (!data->domain) { + ret = -ENOMEM; + goto out_unmap; + } + + /* Allocate a single Generic IRQ chip for this node */ + ret = irq_alloc_domain_generic_chips(data->domain, 16, 1, np->name, + handle_level_irq, 0, set, + IRQ_GC_INIT_MASK_CACHE); + if (ret) { + pr_err("failed to allocate generic irq chip\n"); + goto out_free_domain; + } + + /* Set the IRQ chaining logic */ + irq_set_chained_handler_and_data(parent_irq, + lpc_irq_handler, data); + + data->gc = irq_get_domain_generic_chip(data->domain, 0); + data->gc->reg_base = base; + data->gc->private = data; + + ct = data->gc->chip_types; + + ct->regs.ack = LPC_IRQ; + ct->regs.mask = LPC_IRQ_MASK; + ct->chip.irq_mask = irq_gc_mask_set_bit; + ct->chip.irq_unmask = irq_gc_mask_clr_bit; + ct->chip.irq_ack = irq_gc_ack_set_bit; + ct->chip.irq_mask_ack = lpc_irq_mask_ack; + + for (hwirq = 0 ; hwirq < 16 ; hwirq++) + irq_create_mapping(data->domain, hwirq); + + /* Enable LPC interrupts */ + writel(0xffffebdd, base + LPC_IRQ_MASK); + + return 0; + +out_free_domain: + irq_domain_remove(data->domain); +out_unmap: + iounmap(base); +out_free: + kfree(data); + return ret; +} +IRQCHIP_DECLARE(sw_lpc_intc, "sw64,lpc_intc", lpc_intc_of_init); -- Gitee From 96842356d320e04bbb55a1b30b76b82083394142 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:53:23 +0800 Subject: [PATCH 0346/2138] anolis: drivers: mfd: add sw64 support ANBZ: #4688 Add mfd drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/mfd/Kconfig | 16 ++ drivers/mfd/Makefile | 3 + drivers/mfd/lpc_sunway_chip3.c | 325 +++++++++++++++++++++++++++++++++ drivers/mfd/sunway_ast2400.c | 223 ++++++++++++++++++++++ 4 files changed, 567 insertions(+) create mode 100644 drivers/mfd/lpc_sunway_chip3.c create mode 100644 drivers/mfd/sunway_ast2400.c diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 68d71b4b55bd..6b653487d954 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -604,6 +604,22 @@ config LPC_SCH LPC bridge function of the Intel SCH provides support for System Management Bus and General Purpose I/O. +config LPC_CHIP3 + tristate "CHIP3 LPC" + depends on UNCORE_XUELANG + select MFD_CORE + help + LPC bridge function of the chip3 provides support for + System Management Bus and General Purpose I/O. + +config SUNWAY_SUPERIO_AST2400 + tristate "SUNWAY SUPERIO AST2400" + depends on SW64 + select MFD_CORE + help + Nuvoton AST2400 Super I/O chip platform driver written + for SUNWAY LPC controller. + config INTEL_SOC_PMIC bool "Support for Crystal Cove PMIC" depends on HAS_IOMEM && I2C=y && GPIOLIB && COMMON_CLK diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index db1ba39de3b5..50b42df268ea 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -269,6 +269,9 @@ obj-$(CONFIG_MFD_KHADAS_MCU) += khadas-mcu.o obj-$(CONFIG_MFD_ACER_A500_EC) += acer-ec-a500.o obj-$(CONFIG_MFD_QCOM_PM8008) += qcom-pm8008.o +obj-$(CONFIG_LPC_CHIP3) += lpc_sunway_chip3.o +obj-$(CONFIG_SUNWAY_SUPERIO_AST2400) += sunway_ast2400.o + obj-$(CONFIG_SGI_MFD_IOC3) += ioc3.o obj-$(CONFIG_MFD_SIMPLE_MFD_I2C) += simple-mfd-i2c.o obj-$(CONFIG_MFD_SMPRO) += smpro-core.o diff --git a/drivers/mfd/lpc_sunway_chip3.c b/drivers/mfd/lpc_sunway_chip3.c new file mode 100644 index 000000000000..1bcf40d6a6f7 --- /dev/null +++ b/drivers/mfd/lpc_sunway_chip3.c @@ -0,0 +1,325 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * lpc_sunway_chip3.c - LPC interface for SUNWAY CHIP3 + * + * LPC bridge function contains many other functional units, + * such as Interrupt controllers, Timers, Power Management, + * System Management, GPIO, RTC, and LPC Configuration + * Registers. + * + * Copyright (c) 2014 JN + * Author: Weiqiang Su + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum features { + LPC_USE_MSI = (1 << 0), + LPC_USE_INTX = (1 << 1), +}; + +enum { + LPC_HST_BAR = 0, + LPC_MEM_BAR = 2, + LPC_FWH_BAR = 4, +}; + +enum { + LPC_CTL = 0x0, + LPC_IRQ = 0x4, + LPC_IRQ_MASK = 0x8, + LPC_STAT = 0xc, + LPC_ERR_INF = 0x10, + LPC_MEM_HADDR = 0x14, + LPC_FWH_IDSEL_R1 = 0x18, + LPC_FWH_IDSEL_R2 = 0x1c, + LPC_FWH_IDSEL_R3 = 0x20, + LPC_FWH_IDSEL_R4 = 0x24, + LPC_FWH_IDSEL_R5 = 0x28, + LPC_FWH_DEC_EN1 = 0x2c, + LPC_FWH_DEC_EN2 = 0x30, + LPC_DMA_CTL = 0x34, + LPC_CH_STAT = 0x38, + LPC_CH0_ADDR = 0x3c, + LPC_CH1_ADDR = 0x40, + LPC_CH2_ADDR = 0x44, + LPC_CH3_ADDR = 0x48, + LPC_CH0_LENG = 0x4c, + LPC_CH1_LENG = 0x50, + LPC_CH2_LENG = 0x54, + LPC_CH3_LENG = 0x58, + LPC_CH0_MODE = 0x5c, + LPC_CH1_MODE = 0x60, + LPC_CH2_MODE = 0x64, + LPC_CH3_MODE = 0x68, + LPC_CH_MASK = 0x6c, + LPC_DMA_SWRST = 0x70, +}; + +struct lpc_chip3_adapter { + void __iomem *hst_regs; + struct device *dev; + int irq; + unsigned int features; +}; + +static struct resource superio_chip3_resources[] = { + { + .flags = IORESOURCE_IO, + } +}; + +static struct resource mem_flash_resource = { + .flags = IORESOURCE_MEM, +}; + +static struct resource fw_flash_resource = { + .flags = IORESOURCE_MEM, +}; + +static struct physmap_flash_data mem_flash_data = { + .width = 1, +}; + +static struct physmap_flash_data fw_flash_data = { + .width = 1, +}; + +static struct mfd_cell lpc_chip3_cells[] = { + { + .name = "sunway_superio_ast2400", + .id = 0, + .num_resources = ARRAY_SIZE(superio_chip3_resources), + .resources = superio_chip3_resources, + }, + { + .name = "chip3-flash", + .id = 0, + .num_resources = 1, + .resources = &mem_flash_resource, + .platform_data = &mem_flash_data, + .pdata_size = sizeof(mem_flash_data), + }, + { + .name = "chip3_fwh-flash", + .id = 0, + .num_resources = 1, + .resources = &fw_flash_resource, + .platform_data = &fw_flash_data, + .pdata_size = sizeof(fw_flash_data), + } +}; + +static inline void lpc_writel(void *address, int reg_base, int value) +{ + unsigned long addr = (unsigned long)address + reg_base; + + writel(value, (void *)addr); +} + +static inline int lpc_readl(void *address, int reg_base) +{ + unsigned long addr = (unsigned long)address + reg_base; + int value = readl((void *)addr); + + return value; +} + +static void lpc_enable(struct lpc_chip3_adapter *lpc_adapter) +{ + unsigned int value; + + value = lpc_readl(lpc_adapter->hst_regs, LPC_CTL); + value |= 0x1600; + + /* LPC host enable */ + lpc_writel(lpc_adapter->hst_regs, LPC_CTL, value); +} + +static void lpc_mem_flash_init(struct platform_device *pdev, + struct lpc_chip3_adapter *lpc_adapter) +{ + mem_flash_resource.start = + (((unsigned long)(lpc_adapter->hst_regs) & (~(0xfUL << 28))) | (0x2UL << 28)); + mem_flash_resource.end = mem_flash_resource.start + SZ_256M - 1; + + writel(0x1f, lpc_adapter->hst_regs + LPC_MEM_HADDR); +} + +static void lpc_fw_flash_init(struct platform_device *pdev, + struct lpc_chip3_adapter *lpc_adapter) +{ + fw_flash_resource.start = + (((unsigned long)(lpc_adapter->hst_regs) & (~(0xfUL << 28))) | (0x3UL << 28)); + fw_flash_resource.end = fw_flash_resource.start + SZ_256M - 1; + + writel(0xff0f, lpc_adapter->hst_regs + LPC_FWH_DEC_EN1); + writel(0xffff11ff, lpc_adapter->hst_regs + LPC_FWH_IDSEL_R5); + writel(0xffffffff, lpc_adapter->hst_regs + LPC_FWH_IDSEL_R4); + writel(0xffffffff, lpc_adapter->hst_regs + LPC_FWH_IDSEL_R3); + writel(0xffffffff, lpc_adapter->hst_regs + LPC_FWH_IDSEL_R2); + writel(0xffffffff, lpc_adapter->hst_regs + LPC_FWH_IDSEL_R1); + +} + +static int lpc_chip3_probe(struct platform_device *pdev) +{ + int ret; + struct lpc_chip3_adapter *lpc_adapter; + struct resource *mem; + + lpc_adapter = kzalloc(sizeof(*lpc_adapter), GFP_KERNEL); + if (lpc_adapter == NULL) { + dev_err(&pdev->dev, "%s kzalloc failed !\n", __func__); + return -ENOMEM; + } + + platform_set_drvdata(pdev, lpc_adapter); + /* Get basic io resource and map it */ + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + dev_err(&pdev->dev, "no mem resource?\n"); + return -EINVAL; + } + + lpc_adapter->hst_regs = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(lpc_adapter->hst_regs)) { + dev_err(&pdev->dev, "lpc region map failed\n"); + return PTR_ERR(lpc_adapter->hst_regs); + } + + lpc_adapter->dev = &pdev->dev; + lpc_adapter->features = 0; + + lpc_enable(lpc_adapter); + + lpc_mem_flash_init(pdev, lpc_adapter); + lpc_fw_flash_init(pdev, lpc_adapter); + + ret = mfd_add_devices(&pdev->dev, 0, + lpc_chip3_cells, ARRAY_SIZE(lpc_chip3_cells), + NULL, 0, NULL); + if (ret) + goto out_dev; + + dev_info(lpc_adapter->dev, "probe succeed !\n"); + + return ret; + +out_dev: + dev_info(lpc_adapter->dev, "probe failed !\n"); + + mfd_remove_devices(&pdev->dev); + kfree(lpc_adapter); + + return ret; +} + +static int lpc_chip3_remove(struct platform_device *pdev) +{ + struct lpc_chip3_adapter *lpc_adapter = platform_get_drvdata(pdev); + + mfd_remove_devices(&pdev->dev); + iounmap(lpc_adapter->hst_regs); + kfree(lpc_adapter); + + return 0; +} + +static const struct of_device_id chip3_lpc_of_match[] = { + {.compatible = "sunway,chip3_lpc",}, + { /* end of table */ } +}; + +MODULE_DEVICE_TABLE(of, chip3_lpc_of_match); + +#ifdef CONFIG_PM_SLEEP +unsigned int lpc_irq_ctrl_value; +unsigned int lpc_irq_irq_value; +unsigned int lpc_irq_mask_value; + +/** + * chip3_lpc_platform_suspend - Suspend an chip3_lpc-platform device + * @dev: the platform device to suspend + * + * This function stores the lpc controller register values and + * restores them when the machine wakes up. + */ +int chip3_lpc_platform_suspend(struct device *dev) +{ + struct lpc_chip3_adapter *lpc_adapter = dev_get_drvdata(dev); + + lpc_irq_ctrl_value = lpc_readl(lpc_adapter->hst_regs, LPC_CTL); + lpc_irq_irq_value = lpc_readl(lpc_adapter->hst_regs, LPC_IRQ); + lpc_irq_mask_value = lpc_readl(lpc_adapter->hst_regs, LPC_IRQ_MASK); + + return 0; +} + +/** + * chip3_lpc_platform_resume - Resume an chip3_lpc-platform device + * @dev: the platform device to resume + * + * This function restores the register value before the suspend. + */ +int chip3_lpc_platform_resume(struct device *dev) +{ + struct lpc_chip3_adapter *lpc_adapter = dev_get_drvdata(dev); + + lpc_writel(lpc_adapter->hst_regs, LPC_CTL, lpc_irq_ctrl_value); + lpc_writel(lpc_adapter->hst_regs, LPC_IRQ, lpc_irq_irq_value); + lpc_writel(lpc_adapter->hst_regs, LPC_IRQ_MASK, lpc_irq_mask_value); + + return 0; +} +static SIMPLE_DEV_PM_OPS(chip3_lpc_pm_ops, chip3_lpc_platform_suspend, + chip3_lpc_platform_resume); +#endif + + +static struct platform_driver chip3_lpc_platform_driver = { + .driver = { + .name = "chip3_lpc", + .of_match_table = chip3_lpc_of_match, +#ifdef CONFIG_PM_SLEEP + .pm = &chip3_lpc_pm_ops, +#endif + }, + .remove = lpc_chip3_remove, +}; + +static int __init chip3_lpc_drvinit(void) +{ + return platform_driver_probe(&chip3_lpc_platform_driver, + lpc_chip3_probe); +} + +/* + * lpc controller init configure before serial drivers; + * The lpc & ast2400 should be initialized much before + * the serial initialized functions are called. + */ +subsys_initcall_sync(chip3_lpc_drvinit); + +static void __exit chip3_lpc_drvexit(void) +{ + platform_driver_unregister(&chip3_lpc_platform_driver); +} + +module_exit(chip3_lpc_drvexit); + +MODULE_AUTHOR("Weiqiang Su "); +MODULE_DESCRIPTION("LPC Interface for CHIP3"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/sunway_ast2400.c b/drivers/mfd/sunway_ast2400.c new file mode 100644 index 000000000000..fbea07813643 --- /dev/null +++ b/drivers/mfd/sunway_ast2400.c @@ -0,0 +1,223 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/drivers/mfd/sunway_ast2400.c + * + * Copyright (C) 20014 - 2015 JN + * Author: Weiqiang Su + * + * Nuvoton AST2400 Super I/O chip platform driver written for + * SUNWAY LPC controller. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +static int superio_uart0_irq; +static int superio_uart1_irq; +static void pnp_enable(device_t dev) +{ + pnp_enter_conf_mode(dev); + pnp_set_logical_device(dev); + pnp_set_enable(dev, 1); + pnp_exit_conf_mode(dev); +} + +const struct pnp_mode_ops pnp_conf_mode_8787_aa = { + .enter_conf_mode = pnp_enter_conf_mode_a5a5, + .exit_conf_mode = pnp_exit_conf_mode_aa, +}; + +static struct device_operations ops = { + .enable = pnp_enable, + .ops_pnp_mode = &pnp_conf_mode_8787_aa, +}; + +static struct pnp_info pnp_dev_info[] = { + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_FDC}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_PP }, + { true, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_SP1}, + { true, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_SP2}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_KBC}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_CIR}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_ACPI}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_HWM_FPLED}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_VID}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_CIRWKUP}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO_PP_OD}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_SVID}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_DSLP}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIOA_LDN}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_WDT1}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIOBASE}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO0}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO1}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO2}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO3}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO4}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO5}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO6}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO7}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO8}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO9}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIOA}, +}; + +static void superio_com1_init(struct pnp_device *device) +{ + pnp_enter_conf_mode(device); + pnp_set_logical_device(device); + pnp_set_enable(device, 1); + + pnp_write_config(device, 0x60, 0x3); + pnp_write_config(device, 0x61, 0xf8); + + pnp_write_config(device, 0x70, superio_uart0_irq); + pnp_write_config(device, 0x71, 0x1); + + pnp_write_config(device, 0xf0, 0x0); + + pnp_exit_conf_mode(device); +} + +static void superio_com2_init(struct pnp_device *device) +{ + pnp_enter_conf_mode(device); + pnp_set_logical_device(device); + pnp_set_enable(device, 1); + + pnp_write_config(device, 0x60, 0x2); + pnp_write_config(device, 0x61, 0xf8); + + pnp_write_config(device, 0x70, superio_uart1_irq); + pnp_write_config(device, 0x71, 0x1); + + pnp_write_config(device, 0xf0, 0x0); + + pnp_exit_conf_mode(device); +} + +static void pnp_enable_devices(superio_device_t superio_device, + struct device_operations *ops, + unsigned int functions, struct pnp_info *info) +{ + int i = 0; + struct pnp_info *each_info; + struct pnp_device *each_device; + + /* Setup the ops and resources on the newly allocated devices. */ + for (i = 0; i < functions; i++) { + each_info = info + i; + each_device = &each_info->pnp_device; + + /* Skip logical devices this Super I/O doesn't enable. */ + if (each_info->enabled == false) + continue; + + each_device->device = each_info->function; + each_device->ops = ops; + each_device->port = superio_device->superio_ast2400_efir; + + switch (each_device->device) { + case AST2400_SP1: + each_device->ops->init = superio_com1_init; + break; + case AST2400_SP2: + each_device->ops->init = superio_com2_init; + break; + } + + if (each_device->ops->init) + each_device->ops->init(each_device); + } +} + +static void superio_enable_devices(superio_device_t superio_device) +{ + pnp_enable_devices(superio_device, &ops, + ARRAY_SIZE(pnp_dev_info), pnp_dev_info); +} + +static int superio_ast2400_probe(struct platform_device *pdev) +{ + int err = 0; + superio_device_t superio_device; + struct resource *res; + resource_size_t physaddr = 0; + + /* allocate space for device info */ + superio_device = kzalloc(sizeof(struct superio_ast2400_device), GFP_KERNEL); + if (superio_device == NULL) { + err = -ENOMEM; + return err; + } + + res = platform_get_resource(pdev, IORESOURCE_IO, 1); + if (res) { + physaddr = res->start; + dev_info(&pdev->dev, "request memory region %pR\n", res); + } + + superio_device->dev = &pdev->dev; + superio_device->enabled = 1; + superio_device->superio_ast2400_efir = physaddr + SUPERIO_PNP_PORT; + superio_device->superio_ast2400_efdr = physaddr + SUPERIO_PNP_PORT + 1; + superio_uart0_irq = platform_get_irq_byname(pdev, "uart0_irq"); + superio_uart1_irq = platform_get_irq_byname(pdev, "uart1_irq"); + + superio_enable_devices(superio_device); + + platform_set_drvdata(pdev, superio_device); + + dev_info(superio_device->dev, "probe succeed !\n"); + + return 0; +} + +static int superio_ast2400_remove(struct platform_device *pdev) +{ + superio_device_t superio_device = platform_get_drvdata(pdev); + + platform_set_drvdata(pdev, NULL); + + kfree(superio_device); + + return 0; +} + +static struct platform_driver superio_nuvoton_ast2400_driver = { + .probe = superio_ast2400_probe, + .remove = superio_ast2400_remove, + .driver = { + .name = "sunway_superio_ast2400" + }, +}; + +static int __init superio_nuvoton_ast2400_init(void) +{ + return platform_driver_register(&superio_nuvoton_ast2400_driver); +} + +subsys_initcall_sync(superio_nuvoton_ast2400_init); + +static void __exit superio_nuvoton_ast2400_exit(void) +{ + platform_driver_unregister(&superio_nuvoton_ast2400_driver); +} + +module_exit(superio_nuvoton_ast2400_exit); + +MODULE_DESCRIPTION("NUVOTON AST2400 Super I/O DRIVER"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRIVER_NAME); +MODULE_AUTHOR("Weiqiang Su"); -- Gitee From 1fe85f97f56cfe84d29158563e52e19f86faa9b9 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:55:48 +0800 Subject: [PATCH 0347/2138] anolis: drivers: misc: add sw64 support ANBZ: #4688 Add misc drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/misc/Kconfig | 8 ++ drivers/misc/Makefile | 1 + drivers/misc/kgdbts.c | 3 +- drivers/misc/sunway-ged.c | 253 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 264 insertions(+), 1 deletion(-) create mode 100644 drivers/misc/sunway-ged.c diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index cadd4a820c03..1e9def44eb09 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -376,6 +376,14 @@ config HMC6352 This driver provides support for the Honeywell HMC6352 compass, providing configuration and heading data via sysfs. +config SUNWAY_GED + tristate "sunway generic device driver for memhotplug" + depends on SW64 + depends on MEMORY_HOTPLUG + help + This driver provides support for sunway generic device driver for + memhotplug, providing configuration and heading data via sysfs. + config DS1682 tristate "Dallas DS1682 Total Elapsed Time Recorder with Alarm" depends on I2C diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index f2a4d1ff65d4..ccf5456e1d88 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -35,6 +35,7 @@ obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o obj-$(CONFIG_DS1682) += ds1682.o obj-$(CONFIG_C2PORT) += c2port/ obj-$(CONFIG_HMC6352) += hmc6352.o +obj-$(CONFIG_SUNWAY_GED) += sunway-ged.o obj-y += eeprom/ obj-y += cb710/ obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c index 88b91ad8e541..6cd73f2a487f 100644 --- a/drivers/misc/kgdbts.c +++ b/drivers/misc/kgdbts.c @@ -130,7 +130,8 @@ static int hw_break_val2; static int cont_instead_of_sstep; static unsigned long cont_thread_id; static unsigned long sstep_thread_id; -#if defined(CONFIG_ARM) || defined(CONFIG_MIPS) || defined(CONFIG_SPARC) +#if defined(CONFIG_ARM) || defined(CONFIG_MIPS) || defined(CONFIG_SPARC) \ + || defined(CONFIG_SW64) static int arch_needs_sstep_emulation = 1; #else static int arch_needs_sstep_emulation; diff --git a/drivers/misc/sunway-ged.c b/drivers/misc/sunway-ged.c new file mode 100644 index 000000000000..b4e4ca315852 --- /dev/null +++ b/drivers/misc/sunway-ged.c @@ -0,0 +1,253 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Generic Event Device for ACPI. */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define OFFSET_START_ADDR 0 +#define OFFSET_LENGTH 8 +#define OFFSET_STATUS 16 +#define OFFSET_SLOT 24 + +/* Memory hotplug event */ +#define SUNWAY_MEMHOTPLUG_ADD 0x1 +#define SUNWAY_MEMHOTPLUG_REMOVE 0x2 + +struct sunway_memory_device { + struct sunway_ged_device *device; + unsigned int state; /* State of the memory device */ + struct list_head list; + + u64 start_addr; /* Memory Range start physical addr */ + u64 length; /* Memory Range length */ + u64 slot; /* Memory Range slot */ + unsigned int enabled:1; +}; + +struct sunway_ged_device { + struct device *dev; + void __iomem *membase; + void *driver_data; + spinlock_t lock; + struct list_head dev_list; +}; + +static int sunway_memory_enable_device(struct sunway_memory_device *mem_device) +{ + int num_enabled = 0; + int result = 0; + + if (mem_device->enabled) { /* just sanity check...*/ + num_enabled++; + goto out; + } + + /* + * If the memory block size is zero, please ignore it. + * Don't try to do the following memory hotplug flowchart. + */ + if (!mem_device->length) + goto out; + + lock_device_hotplug(); + /* suppose node = 0, fix me! */ + result = __add_memory(0, mem_device->start_addr, mem_device->length); + unlock_device_hotplug(); + /* + * If the memory block has been used by the kernel, add_memory() + * returns -EEXIST. If add_memory() returns the other error, it + * means that this memory block is not used by the kernel. + */ + if (result && result != -EEXIST) + goto out; + + mem_device->enabled = 1; + + /* + * Add num_enable even if add_memory() returns -EEXIST, so the + * device is bound to this driver. + */ + num_enabled++; +out: + if (!num_enabled) { + dev_err(mem_device->device->dev, "add_memory failed\n"); + return -EINVAL; + } + + return 0; +} + +static int sunway_memory_get_meminfo(struct sunway_memory_device *mem_device) +{ + struct sunway_ged_device *geddev; + + if (!mem_device) + return -EINVAL; + + if (mem_device->enabled) + return 0; + + geddev = mem_device->device; + + mem_device->start_addr = readq(geddev->membase + OFFSET_START_ADDR); + mem_device->length = readq(geddev->membase + OFFSET_LENGTH); + + return 0; +} + +static void sunway_memory_device_remove(struct sunway_ged_device *device) +{ + struct sunway_memory_device *mem_dev, *n; + unsigned long start_addr, length, slot; + + if (!device) + return; + + start_addr = readq(device->membase + OFFSET_START_ADDR); + length = readq(device->membase + OFFSET_LENGTH); + slot = readq(device->membase + OFFSET_SLOT); + + list_for_each_entry_safe(mem_dev, n, &device->dev_list, list) { + if (!mem_dev->enabled) + continue; + + if ((start_addr == mem_dev->start_addr) && + (length == mem_dev->length)) { + /* suppose node = 0, fix me! */ + remove_memory(0, start_addr, length); + list_del(&mem_dev->list); + kfree(mem_dev); + } + } + + writeq(slot, device->membase + OFFSET_SLOT); +} + +static int sunway_memory_device_add(struct sunway_ged_device *device) +{ + struct sunway_memory_device *mem_device; + int result; + + if (!device) + return -EINVAL; + + mem_device = kzalloc(sizeof(struct sunway_memory_device), GFP_KERNEL); + if (!mem_device) + return -ENOMEM; + + INIT_LIST_HEAD(&mem_device->list); + mem_device->device = device; + + /* Get the range from the IO */ + mem_device->start_addr = readq(device->membase + OFFSET_START_ADDR); + mem_device->length = readq(device->membase + OFFSET_LENGTH); + mem_device->slot = readq(device->membase + OFFSET_SLOT); + + result = sunway_memory_enable_device(mem_device); + if (result) { + dev_err(device->dev, "sunway_memory_enable_device() error\n"); + sunway_memory_device_remove(device); + + return result; + } + + list_add_tail(&mem_device->list, &device->dev_list); + dev_dbg(device->dev, "Memory device configured\n"); + + hcall(HCALL_MEMHOTPLUG, mem_device->start_addr, 0, 0); + + return 1; +} + +static irqreturn_t sunwayged_ist(int irq, void *data) +{ + struct sunway_ged_device *sunwayged_dev = data; + unsigned int status; + + status = readl(sunwayged_dev->membase + OFFSET_STATUS); + + /* through IO status to add or remove memory device */ + if (status & SUNWAY_MEMHOTPLUG_ADD) + sunway_memory_device_add(sunwayged_dev); + + if (status & SUNWAY_MEMHOTPLUG_REMOVE) + sunway_memory_device_remove(sunwayged_dev); + + return IRQ_HANDLED; +} + +static irqreturn_t sunwayged_irq_handler(int irq, void *data) +{ + return IRQ_WAKE_THREAD; +} + +static int sunwayged_probe(struct platform_device *pdev) +{ + struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + int irq = platform_get_irq(pdev, 0); + struct sunway_ged_device *geddev; + struct device *dev; + int irqflags; + + if (!regs) { + dev_err(dev, "no registers defined\n"); + return -EINVAL; + } + + geddev = devm_kzalloc(&pdev->dev, sizeof(*geddev), GFP_KERNEL); + if (!geddev) + return -ENOMEM; + + spin_lock_init(&geddev->lock); + geddev->membase = devm_ioremap(&pdev->dev, + regs->start, resource_size(regs)); + if (!geddev->membase) + return -ENOMEM; + + INIT_LIST_HEAD(&geddev->dev_list); + geddev->dev = &pdev->dev; + irqflags = IRQF_SHARED; + + if (request_threaded_irq(irq, sunwayged_irq_handler, sunwayged_ist, + irqflags, "SUNWAY:Ged", geddev)) { + dev_err(dev, "failed to setup event handler for irq %u\n", irq); + + return -EINVAL; + } + + platform_set_drvdata(pdev, geddev); + + return 0; +} + +static int sunwayged_remove(struct platform_device *pdev) +{ + return 0; +} + +static const struct of_device_id sunwayged_of_match[] = { + {.compatible = "sw6,sunway-ged", }, + { } +}; +MODULE_DEVICE_TABLE(of, sunwayged_of_match); + +static struct platform_driver sunwayged_platform_driver = { + .driver = { + .name = "sunway-ged", + .of_match_table = sunwayged_of_match, + }, + .probe = sunwayged_probe, + .remove = sunwayged_remove, +}; +module_platform_driver(sunwayged_platform_driver); + +MODULE_AUTHOR("Lu Feifei"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Sunway ged driver"); -- Gitee From da83531c6d409b46603f3d69baf9061eeead9afe Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:44:12 +0800 Subject: [PATCH 0348/2138] anolis: drivers: pci: add sw64 support ANBZ: #4688 Add common pci and pci controller drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/pci/controller/Kconfig | 4 + drivers/pci/controller/Makefile | 1 + drivers/pci/controller/pci-sunway.c | 805 ++++++++++++++++++++++++++++ drivers/pci/pci.c | 4 + drivers/pci/probe.c | 1 + drivers/pci/quirks.c | 2 + 6 files changed, 817 insertions(+) create mode 100644 drivers/pci/controller/pci-sunway.c diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index c0c3f2824990..2a2a3ccd66ad 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -342,6 +342,10 @@ config PCIE_XILINX_CPM Say 'Y' here if you want kernel support for the Xilinx Versal CPM host bridge. +config PCI_SW64 + bool + depends on SW64 && PCI + source "drivers/pci/controller/cadence/Kconfig" source "drivers/pci/controller/dwc/Kconfig" source "drivers/pci/controller/mobiveil/Kconfig" diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile index 37c8663de7fe..9d161c053bc4 100644 --- a/drivers/pci/controller/Makefile +++ b/drivers/pci/controller/Makefile @@ -39,6 +39,7 @@ obj-$(CONFIG_PCI_LOONGSON) += pci-loongson.o obj-$(CONFIG_PCIE_HISI_ERR) += pcie-hisi-error.o obj-$(CONFIG_PCIE_APPLE) += pcie-apple.o obj-$(CONFIG_PCIE_MT7621) += pcie-mt7621.o +obj-$(CONFIG_PCI_SW64) += pci-sunway.o # pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW obj-y += dwc/ diff --git a/drivers/pci/controller/pci-sunway.c b/drivers/pci/controller/pci-sunway.c new file mode 100644 index 000000000000..036994ffde38 --- /dev/null +++ b/drivers/pci/controller/pci-sunway.c @@ -0,0 +1,805 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +#include + +void set_devint_wken(int node) +{ + unsigned long val; + + /* enable INTD wakeup */ + val = 0x80; + sw64_io_write(node, DEVINT_WKEN, val); + sw64_io_write(node, DEVINTWK_INTEN, val); +} + +#ifdef CONFIG_UNCORE_JUNZHANG +void set_adr_int(int node) +{ + sw64_io_write(node, ADR_INT_CONFIG, (0x0 << 16 | 0x3f)); + sw64_io_write(node, ADR_CTL, 0xc); +} +#endif + +void set_pcieport_service_irq(int node, int index) +{ + if (IS_ENABLED(CONFIG_PCIE_PME)) + write_piu_ior0(node, index, PMEINTCONFIG, PME_ENABLE_INTD_CORE0); + + if (IS_ENABLED(CONFIG_PCIEAER)) + write_piu_ior0(node, index, AERERRINTCONFIG, AER_ENABLE_INTD_CORE0); +} + +int chip_pcie_configure(struct pci_controller *hose) +{ + struct pci_dev *dev; + struct pci_bus *bus, *top; + struct list_head *next; + unsigned int max_read_size, smallest_max_payload; + int max_payloadsize; + unsigned long rc_index, node; + unsigned long piuconfig0, value; + unsigned int pcie_caps_offset; + unsigned int rc_conf_value; + u16 devctl, new_values; + bool rc_ari_disabled = false, found = false; + unsigned char bus_max_num; + + node = hose->node; + rc_index = hose->index; + smallest_max_payload = read_rc_conf(node, rc_index, RC_EXP_DEVCAP); + smallest_max_payload &= PCI_EXP_DEVCAP_PAYLOAD; + bus_max_num = hose->busn_space->start; + + top = hose->bus; + bus = top; + next = top->devices.next; + + for (;;) { + if (next == &bus->devices) { + /* end of this bus, go up or finish */ + if (bus == top) + break; + + next = bus->self->bus_list.next; + bus = bus->self->bus; + continue; + } + dev = list_entry(next, struct pci_dev, bus_list); + if (dev->subordinate) { + /* this is a pci-pci bridge, do its devices next */ + next = dev->subordinate->devices.next; + bus = dev->subordinate; + } else + next = dev->bus_list.next; + + if (!found) { + if (pci_is_root_bus(dev->bus)) { + if (list_empty(&dev->subordinate->devices)) + rc_ari_disabled = true; + } else { + if (!pci_ari_enabled(dev->bus)) { + rc_ari_disabled = true; + found = true; + } + } + } + + if (bus->busn_res.end > bus_max_num) + bus_max_num = bus->busn_res.end; + + /* Query device PCIe capability register */ + pcie_caps_offset = dev->pcie_cap; + if (pcie_caps_offset == 0) + continue; + max_payloadsize = dev->pcie_mpss; + if (max_payloadsize < smallest_max_payload) + smallest_max_payload = max_payloadsize; + } + + if (rc_ari_disabled) { + rc_conf_value = read_rc_conf(node, rc_index, RC_EXP_DEVCTL2); + rc_conf_value &= ~PCI_EXP_DEVCTL2_ARI; + write_rc_conf(node, rc_index, RC_EXP_DEVCTL2, rc_conf_value); + } else { + rc_conf_value = read_rc_conf(node, rc_index, RC_EXP_DEVCTL2); + rc_conf_value |= PCI_EXP_DEVCTL2_ARI; + write_rc_conf(node, rc_index, RC_EXP_DEVCTL2, rc_conf_value); + } + + rc_conf_value = read_rc_conf(node, rc_index, RC_EXP_DEVCAP); + rc_conf_value &= PCI_EXP_DEVCAP_PAYLOAD; + max_payloadsize = rc_conf_value; + if (max_payloadsize < smallest_max_payload) + smallest_max_payload = max_payloadsize; + + max_read_size = 0x2; /* Limit to 512B */ + value = read_rc_conf(node, rc_index, RC_EXP_DEVCTL); + value &= ~(PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ); + value |= (max_read_size << 12) | (smallest_max_payload << 5); + write_rc_conf(node, rc_index, RC_EXP_DEVCTL, value); + new_values = (max_read_size << 12) | (smallest_max_payload << 5); + + piuconfig0 = read_piu_ior0(node, rc_index, PIUCONFIG0); + piuconfig0 &= ~(0x7fUL << 9); + if (smallest_max_payload == 0x2) { + piuconfig0 |= (0x20UL << 9); + write_piu_ior0(node, rc_index, PIUCONFIG0, piuconfig0); + } else { + piuconfig0 |= (0x40UL << 9); + write_piu_ior0(node, rc_index, PIUCONFIG0, piuconfig0); + } + + pr_info("Node%ld RC%ld MPSS %luB, MRRS %luB, Piuconfig0 %#lx, ARI %s\n", + node, rc_index, (1UL << smallest_max_payload) << 7, + (1UL << max_read_size) << 7, piuconfig0, + rc_ari_disabled ? "disabled" : "enabled"); + + /* Now, set the max_payload_size for all devices to that value. */ + bus = top; + next = top->devices.next; + for (;;) { + if (next == &bus->devices) { + /* end of this bus, go up or finish */ + if (bus == top) + break; + next = bus->self->bus_list.next; + bus = bus->self->bus; + continue; + } + dev = list_entry(next, struct pci_dev, bus_list); + if (dev->subordinate) { + /* this is a pci-pci bridge, do its devices next */ + next = dev->subordinate->devices.next; + bus = dev->subordinate; + } else + next = dev->bus_list.next; + + pcie_caps_offset = dev->pcie_cap; + if (pcie_caps_offset == 0) + continue; + + pci_read_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL, &devctl); + devctl &= ~(PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ); + devctl |= new_values; + pci_write_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL, devctl); + } + + return bus_max_num; +} + +static int check_pci_linkup(unsigned long node, unsigned long index) +{ + unsigned long rc_debug; + + if (is_guest_or_emul()) { + if (node == 0 && index == 0) + return 0; + else + return 1; + } else { + rc_debug = read_piu_ior1(node, index, RCDEBUGINF1); + } + + return !(rc_debug == 0x111); +} + +static void set_rc_piu(unsigned long node, unsigned long index) +{ + unsigned int i __maybe_unused; + unsigned int value; + u32 rc_misc_ctrl; + + if (is_guest_or_emul()) + return; + + /* configure RC, set PCI-E root controller */ + write_rc_conf(node, index, RC_COMMAND, 0x00100007); + write_rc_conf(node, index, RC_PORT_LINK_CTL, 0x1f0020); + write_rc_conf(node, index, RC_EXP_DEVCTL, 0x2850); + write_rc_conf(node, index, RC_EXP_DEVCTL2, 0x6); + write_rc_conf(node, index, RC_ORDER_RULE_CTL, 0x0100); + + /* enable DBI_RO_WR_EN */ + rc_misc_ctrl = read_rc_conf(node, index, RC_MISC_CONTROL_1); + write_rc_conf(node, index, RC_MISC_CONTROL_1, rc_misc_ctrl | 0x1); + + /* fix up DEVICE_ID_VENDOR_ID register */ + value = (PCI_DEVICE_ID_SW64_ROOT_BRIDGE << 16) | PCI_VENDOR_ID_JN; + write_rc_conf(node, index, RC_VENDOR_ID, value); + + /* set PCI-E root class code */ + value = read_rc_conf(node, index, RC_REVISION_ID); + write_rc_conf(node, index, RC_REVISION_ID, (PCI_CLASS_BRIDGE_HOST << 16) | value); + + /* disable DBI_RO_WR_EN */ + write_rc_conf(node, index, RC_MISC_CONTROL_1, rc_misc_ctrl); + + write_rc_conf(node, index, RC_PRIMARY_BUS, 0xffffff); + write_piu_ior0(node, index, PIUCONFIG0, PIUCONFIG0_INIT_VAL); + + write_piu_ior1(node, index, PIUCONFIG1, 0x2); + write_piu_ior1(node, index, ERRENABLE, -1); + + /* set DMA offset value PCITODMA_OFFSET */ + write_piu_ior0(node, index, EPDMABAR, PCITODMA_OFFSET); + if (IS_ENABLED(CONFIG_PCI_MSI)) { + write_piu_ior0(node, index, MSIADDR, MSIX_MSG_ADDR); +#ifdef CONFIG_UNCORE_XUELANG + for (i = 0; i < 256; i++) + write_piu_ior0(node, index, MSICONFIG0 + (i << 7), 0); +#endif + } +} + +static void set_intx(unsigned long node, unsigned long index, + unsigned long int_conf) +{ + if (is_guest_or_emul()) + return; + +#if defined(CONFIG_UNCORE_XUELANG) + write_piu_ior0(node, index, INTACONFIG, int_conf | (0x8UL << 10)); + write_piu_ior0(node, index, INTBCONFIG, int_conf | (0x4UL << 10)); + write_piu_ior0(node, index, INTCCONFIG, int_conf | (0x2UL << 10)); + write_piu_ior0(node, index, INTDCONFIG, int_conf | (0x1UL << 10)); +#elif defined(CONFIG_UNCORE_JUNZHANG) + write_piu_ior0(node, index, INTACONFIG, int_conf | (0x1UL << 10)); + write_piu_ior0(node, index, INTBCONFIG, int_conf | (0x2UL << 10)); + write_piu_ior0(node, index, INTCCONFIG, int_conf | (0x4UL << 10)); + write_piu_ior0(node, index, INTDCONFIG, int_conf | (0x8UL << 10)); +#endif +} + +static unsigned long get_rc_enable(unsigned long node) +{ + unsigned long rc_enable; + + if (is_guest_or_emul()) + return 1; + + rc_enable = sw64_io_read(node, IO_START); + + return rc_enable; +} + +static int map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(dev->bus); + + if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) + return hose->service_irq; + else + return hose->int_irq; +} + +static void hose_init(struct pci_controller *hose) +{ + unsigned long pci_io_base; + + hose->sparse_mem_base = 0; + hose->sparse_io_base = 0; + pci_io_base = IO_BASE | (hose->node << IO_NODE_SHIFT) + | PCI_BASE | (hose->index << IO_RC_SHIFT); + + hose->dense_mem_base = pci_io_base; + hose->dense_io_base = pci_io_base | PCI_LEGACY_IO; + hose->ep_config_space_base = __va(pci_io_base | PCI_EP_CFG); + hose->rc_config_space_base = __va(pci_io_base | PCI_RC_CFG); + + hose->mem_space->start = pci_io_base + PCI_32BIT_MEMIO; + hose->mem_space->end = hose->mem_space->start + PCI_32BIT_MEMIO_SIZE - 1; + hose->mem_space->name = "pci memory space"; + hose->mem_space->flags = IORESOURCE_MEM; + + if (request_resource(&iomem_resource, hose->mem_space) < 0) + pr_err("Failed to request MEM on hose %ld\n", hose->index); + hose->pre_mem_space->start = pci_io_base | PCI_64BIT_MEMIO; + hose->pre_mem_space->end = hose->pre_mem_space->start + PCI_64BIT_MEMIO_SIZE - 1; + hose->pre_mem_space->name = "pci pre mem space"; + hose->pre_mem_space->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_MEM_64; + + if (request_resource(&iomem_resource, hose->pre_mem_space) < 0) + pr_err("Failed to request 64bit MEM on hose %ld\n", hose->index); + hose->io_space->start = pci_io_base | PCI_LEGACY_IO; + hose->io_space->end = hose->io_space->start + PCI_LEGACY_IO_SIZE - 1; + hose->io_space->name = "pci io space"; + hose->io_space->flags = IORESOURCE_IO; + + if (request_resource(&ioport_resource, hose->io_space) < 0) + pr_err("Failed to request IO on hose %ld\n", hose->index); + hose->busn_space->name = "PCI busn"; + hose->busn_space->start = 0xff; + hose->busn_space->end = 0xff; + hose->busn_space->flags = IORESOURCE_BUS; + hose->first_busno = hose->self_busno = hose->busn_space->start; + hose->last_busno = hose->busn_space->end; + + if (is_in_host()) { + if (IS_ENABLED(CONFIG_PCI_MSI)) + memset(hose->piu_msiconfig, 0, 256/8); + } +}; + +static struct sw64_pci_init_ops chip_pci_init_ops = { + .map_irq = map_irq, + .get_rc_enable = get_rc_enable, + .hose_init = hose_init, + .set_rc_piu = set_rc_piu, + .check_pci_linkup = check_pci_linkup, + .set_intx = set_intx, +}; + +void __init setup_chip_pci_ops(void) +{ + sw64_chip_init->pci_init = chip_pci_init_ops; +} + +static unsigned long rc_linkup; +static struct pci_controller *head, **tail = &head; + +static void pci_mark_rc_linkup(unsigned long node, unsigned long index) +{ + set_bit(node * 8 + index, &rc_linkup); +} + +static int pci_get_rc_linkup(unsigned long node, unsigned long index) +{ + return test_bit(node * 8 + index, &rc_linkup); +} + +/** + * Link the specified pci controller to list + */ +extern struct pci_controller *hose_head; +static void pci_link_controller(struct pci_controller *hose) +{ + if (unlikely(!hose)) + return; + + *tail = hose; + tail = &hose->next; + + if (!hose_head) + hose_head = head; +} + +struct pci_controller *bus_num_to_pci_controller(unsigned long bus_num) +{ + struct pci_controller *hose; + + for (hose = head; hose; hose = hose->next) { + if (bus_num >= hose->first_busno && bus_num <= hose->last_busno) + return hose; + } + + return NULL; +} + +struct pci_controller *pci_bus_to_pci_controller(const struct pci_bus *bus) +{ + struct pci_config_window *cfg = NULL; + + if (unlikely(!bus)) + return NULL; + + if (acpi_disabled) + return (struct pci_controller *)(bus->sysdata); + + cfg = (struct pci_config_window *)bus->sysdata; + return (struct pci_controller *)(cfg->priv); +} + +/** + * PCIe Root Complex read config space operations + */ +static int sw64_pcie_read_rc_cfg(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + u32 data; + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + void __iomem *cfg_iobase = hose->rc_config_space_base; + + if (IS_ENABLED(CONFIG_PCI_DEBUG)) + pr_debug("rc read addr:%px bus %d, devfn %#x, where %#x size=%d\t", + cfg_iobase + ((where & ~3) << 5), bus->number, devfn, where, size); + + if ((uintptr_t)where & (size - 1)) { + *val = 0; + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + /** + * Workaround for sw6a chipset due to only support scan with devfn = 0, + * while sw6b does not have this limit. + */ + if (unlikely(devfn > 0)) { + *val = ~0; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + data = readl(cfg_iobase + ((where & ~3) << 5)); + + switch (size) { + case 1: + *val = (data >> (8 * (where & 0x3))) & 0xff; + break; + case 2: + *val = (data >> (8 * (where & 0x2))) & 0xffff; + break; + default: + *val = data; + break; + } + + if (IS_ENABLED(CONFIG_PCI_DEBUG)) + pr_debug("*val %#x\n ", *val); + + return PCIBIOS_SUCCESSFUL; +} + +/** + * PCIe Root Complex write config space operations + */ +int sw64_pcie_write_rc_cfg(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + u32 data; + u32 shift = 8 * (where & 3); + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + void __iomem *cfg_iobase = (void *)hose->rc_config_space_base; + + if ((uintptr_t)where & (size - 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + switch (size) { + case 1: + data = readl(cfg_iobase + ((where & ~3) << 5)); + data &= ~(0xff << shift); + data |= (val & 0xff) << shift; + break; + case 2: + data = readl(cfg_iobase + ((where & ~3) << 5)); + data &= ~(0xffff << shift); + data |= (val & 0xffff) << shift; + break; + default: + data = val; + break; + } + + if (IS_ENABLED(CONFIG_PCI_DEBUG)) + pr_debug("rc write addr:%px bus %d, devfn %#x, where %#x *val %#x size %d\n", + cfg_iobase + ((where & ~3) << 5), bus->number, devfn, where, val, size); + + writel(data, cfg_iobase + ((where & ~3) << 5)); + + return PCIBIOS_SUCCESSFUL; +} + +/** + * sw64_pcie_valid_device - check if a valid device is present on bus + * @bus : PCI bus structure + * @devfn: device/function + * + * @return: 'true' on success and 'false' if invalid device is found + */ +static bool sw64_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + if (is_in_host()) { + /* Only one device down on each root complex */ + if (bus->number == hose->self_busno && devfn > 0) + return false; + } + + return true; +} + +/** + * sw64_pcie_config_read - read val from config space of PCI host controller or device + * @bus : PCI bus structure + * @devfn: device/function + * @where: offset from base + * @size : size of val + * @val[out]: the value read from PCI host controller or device + * + * @return: Whether read operation success + */ +static int sw64_pcie_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + int ret = PCIBIOS_DEVICE_NOT_FOUND; + + if (is_guest_or_emul()) + return pci_generic_config_read(bus, devfn, where, size, val); + + hose->self_busno = hose->busn_space->start; + + if (unlikely(bus->number == hose->self_busno)) { + ret = sw64_pcie_read_rc_cfg(bus, devfn, where, size, val); + } else { + if (pci_get_rc_linkup(hose->node, hose->index)) + ret = pci_generic_config_read(bus, devfn, where, size, val); + else + return ret; + } + return ret; +} + +/** + * sw64_pcie_config_write - write val to config space of PCI host controller or device + * @bus : PCI bus structure + * @devfn: device/function + * @where: offset from base + * @size : size of val + * @val : the value write to PCI host controller or device + * + * @return: Whether write operation success + */ +static int sw64_pcie_config_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + if (is_guest_or_emul()) + return pci_generic_config_write(bus, devfn, where, size, val); + + hose->self_busno = hose->busn_space->start; + + if (unlikely(bus->number == hose->self_busno)) + return sw64_pcie_write_rc_cfg(bus, devfn, where, size, val); + else + return pci_generic_config_write(bus, devfn, where, size, val); +} + +/** + * sw64_pcie_map_bus - get configuration base address + * @bus : PCI bus structure + * @devfn: device/function + * @where: offset from base + * + * @return: base address of the configuration space needed to be + * accessed. + */ +static void __iomem *sw64_pcie_map_bus(struct pci_bus *bus, + unsigned int devfn, int where) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + void __iomem *cfg_iobase; + unsigned long relbus; + + if (!sw64_pcie_valid_device(bus, devfn)) + return NULL; + + /** + * ECAM of Sunway PCI host controller is slightly + * different from the standrad: + * [31:24]: bus number + * [23:19]: device number + * [18:16]: function number + * [15:12]: reserved + * [11:8] : extended config space registers + * [7:2] : legacy config space registers + */ + relbus = (bus->number << 24) | (devfn << 16) | where; + + cfg_iobase = hose->ep_config_space_base + relbus; + + if (IS_ENABLED(CONFIG_PCI_DEBUG)) + pr_debug("addr:%px bus %d, devfn %d, where %d\n", + cfg_iobase, bus->number, devfn, where); + return cfg_iobase; +} + +#ifdef CONFIG_ACPI +int sw64_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + return map_irq(dev, slot, pin); +} + +static void setup_intx_irqs(struct pci_controller *hose) +{ + unsigned long int_conf, node, val_node; + unsigned long index, irq; + int rcid; + + node = hose->node; + index = hose->index; + + if (!node_online(node)) + val_node = next_node_in(node, node_online_map); + else + val_node = node; + irq = irq_alloc_descs_from(NR_IRQS_LEGACY, 2, val_node); + WARN_ON(irq < 0); + irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_level_irq); + irq_set_status_flags(irq, IRQ_LEVEL); + hose->int_irq = irq; + irq_set_chip_and_handler(irq + 1, &dummy_irq_chip, handle_level_irq); + hose->service_irq = irq + 1; + rcid = cpu_to_rcid(0); + + pr_info_once("INTx are directed to node %d core %d.\n", + ((rcid >> 6) & 0x3), (rcid & 0x1f)); + int_conf = 1UL << 62 | rcid; /* rebase all intx on the first logical cpu */ + + set_intx(node, index, int_conf); + + set_pcieport_service_irq(node, index); +} + +static int sw64_pci_prepare_controller(struct pci_controller *hose, + struct acpi_device *adev) +{ + unsigned long long index, node; + unsigned long long rc_config_base_addr; + unsigned long long pci_io_base_addr; + unsigned long long ep_io_base_addr; + acpi_status rc; + + /* Get node from ACPI namespace */ + node = acpi_get_node(adev->handle); + if (node == NUMA_NO_NODE) { + dev_err(&adev->dev, "unable to get node ID\n"); + return -EEXIST; + } + + /* Get index from ACPI namespace */ + rc = acpi_evaluate_integer(adev->handle, "INDX", NULL, &index); + if (rc != AE_OK) { + dev_err(&adev->dev, "unable to retrieve INDX\n"); + return -EEXIST; + } + + /** + * Get Root Complex config space base address. + * + * For sw64, Root Complex config space base addr is different + * from Endpoint config space base address. Use MCFG table to + * pass Endpoint config space base address, and define Root Complex + * config space base address("RCCB") separately in the ACPI namespace. + */ + rc = acpi_evaluate_integer(adev->handle, "RCCB", NULL, &rc_config_base_addr); + if (rc != AE_OK) { + dev_err(&adev->dev, "unable to retrieve RCCB\n"); + return -EEXIST; + } + + /* Get Root Complex I/O space base addr from ACPI namespace */ + rc = acpi_evaluate_integer(adev->handle, "RCIO", NULL, &pci_io_base_addr); + if (rc != AE_OK) { + dev_err(&adev->dev, "unable to retrieve RCIO\n"); + return -EEXIST; + } + + /* Get Endpoint I/O space base addr from ACPI namespace */ + rc = acpi_evaluate_integer(adev->handle, "EPIO", NULL, &ep_io_base_addr); + if (rc != AE_OK) { + dev_err(&adev->dev, "unable to retrieve EPIO\n"); + return -EEXIST; + } + + hose->iommu_enable = false; + hose->index = index; + hose->node = node; + + hose->sparse_mem_base = 0; + hose->sparse_io_base = 0; + hose->dense_mem_base = pci_io_base_addr; + hose->dense_io_base = ep_io_base_addr; + + hose->rc_config_space_base = __va(rc_config_base_addr); + + hose->first_busno = 0xff; + hose->last_busno = 0xff; + hose->self_busno = 0xff; + + hose->need_domain_info = 0; + +#if IS_ENABLED(CONFIG_PCI_MSI) + if (is_in_host()) + memset(hose->piu_msiconfig, 0, 256 / 8); /* 256 bits bitmap */ +#endif + + /** + * There are two prerequisites for Root Complex + * of Sunway to work: + * 1. Root Complex enable + * 2. Root Complex link up + */ + set_rc_piu(hose->node, hose->index); + if (check_pci_linkup(hose->node, hose->index)) { + /** + * Root Complex link up failed. + * This usually means that no device on the slot. + */ + dev_info(&adev->dev, ": failed to link up\n", + hose->node, hose->index); + } else { + pci_mark_rc_linkup(hose->node, hose->index); + dev_info(&adev->dev, ": successfully link up\n", + hose->node, hose->index); + } + + setup_intx_irqs(hose); + + pci_link_controller(hose); + + return 0; +} + +/** + * Use the info from ACPI to init pci_controller + */ +static int sw64_pci_ecam_init(struct pci_config_window *cfg) +{ + struct pci_controller *hose = NULL; + struct device *dev = cfg->parent; + struct acpi_device *adev = to_acpi_device(dev); + phys_addr_t mcfg_addr; + int ret; + + /** + * First, check whether Root Complex is enabled. + * If Root Complex disabled, there's no need to continue. + * + * In ACPI namespace, we use _STA method to indicate + * whether Root Complex is enabled. + * + * The _STA has been checked when creating acpi_device. + * Double check here to get the latest hardware status. + */ + ret = acpi_bus_get_status(adev); + if (ret) { + dev_err(dev, "unable to retrieve _STA\n"); + return ret; + } + + if (!adev->status.present) { + dev_err(dev, "RC is not enabled\n"); + return -ENODEV; + } + + hose = kzalloc(sizeof(*hose), GFP_KERNEL); + if (!hose) { + dev_err(dev, "out of memory when alloc mem for pci_controller\n"); + return -ENOMEM; + } + + /* Get Endpoint config space base address from MCFG table */ + mcfg_addr = cfg->res.start - (cfg->busr.start << cfg->ops->bus_shift); + + /** + * "__va(mcfg_addr)" is equal to "cfg->win", so we can also use + * "hose->ep_config_space_base = cfg->win" here + */ + hose->ep_config_space_base = __va(mcfg_addr); + + /* Init pci_controller */ + ret = sw64_pci_prepare_controller(hose, adev); + if (ret) { + kfree(hose); + dev_err(&adev->dev, "failed to init pci controller\n"); + return ret; + } + + cfg->priv = (void *)hose; + + return 0; +} + +const struct pci_ecam_ops sw64_pci_ecam_ops = { + .bus_shift = 24, + .init = sw64_pci_ecam_init, + .pci_ops = { + .map_bus = sw64_pcie_map_bus, + .read = sw64_pcie_config_read, + .write = sw64_pcie_config_write, + } +}; +#endif diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index ac59f602cf51..3d57d1ec15b4 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -4846,7 +4846,11 @@ int pcie_flr(struct pci_dev *dev) * 100ms, but may silently discard requests while the FLR is in * progress. Wait 100ms before trying to access the device. */ +#ifdef CONFIG_SW64 + msleep(1000); +#else msleep(100); +#endif return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS); } diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 7e84e472b338..5314ab5e9dc6 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -142,6 +142,7 @@ static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar) flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK; flags |= IORESOURCE_MEM; + if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) flags |= IORESOURCE_PREFETCH; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index fd35ad0648a0..12ddf42b141b 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -4556,6 +4556,7 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a01, PCI_CLASS_NOT_DEFINED, DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a02, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); +#ifndef CONFIG_SW64 /* * Per PCIe r3.0, sec 2.2.9, "Completion headers must supply the same * values for the Attribute as were supplied in the header of the @@ -4612,6 +4613,7 @@ static void quirk_chelsio_T5_disable_root_port_attributes(struct pci_dev *pdev) } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID, quirk_chelsio_T5_disable_root_port_attributes); +#endif /* * pci_acs_ctrl_enabled - compare desired ACS controls with those provided -- Gitee From 75eb449fab051cb199c2d7e231deb991ea52a5cb Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:48:27 +0800 Subject: [PATCH 0349/2138] anolis: drivers: platform: add sw64 support ANBZ: #4688 Add platform drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/platform/Makefile | 1 + drivers/platform/sw64/Makefile | 2 + drivers/platform/sw64/legacy_xuelang.c | 63 ++++++++++++++++++++++++++ 3 files changed, 66 insertions(+) create mode 100644 drivers/platform/sw64/Makefile create mode 100644 drivers/platform/sw64/legacy_xuelang.c diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile index 41640172975a..8296d4c41eb7 100644 --- a/drivers/platform/Makefile +++ b/drivers/platform/Makefile @@ -5,6 +5,7 @@ obj-$(CONFIG_X86) += x86/ obj-$(CONFIG_LOONGARCH) += loongarch/ +obj-$(CONFIG_SW64) += sw64/ obj-$(CONFIG_MELLANOX_PLATFORM) += mellanox/ obj-$(CONFIG_MIPS) += mips/ obj-$(CONFIG_OLPC_EC) += olpc/ diff --git a/drivers/platform/sw64/Makefile b/drivers/platform/sw64/Makefile new file mode 100644 index 000000000000..28922224fb17 --- /dev/null +++ b/drivers/platform/sw64/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_PLATFORM_XUELANG) += legacy_xuelang.o diff --git a/drivers/platform/sw64/legacy_xuelang.c b/drivers/platform/sw64/legacy_xuelang.c new file mode 100644 index 000000000000..8a63d9edf9f2 --- /dev/null +++ b/drivers/platform/sw64/legacy_xuelang.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +#include +#include + +static void vt_mode_kill_arch(int mode) +{ + hcall(HCALL_SET_CLOCKEVENT, 0, 0, 0); + + switch (mode) { + case LINUX_REBOOT_CMD_RESTART: + hcall(HCALL_RESTART, 0, 0, 0); + mb(); + break; + case LINUX_REBOOT_CMD_HALT: + case LINUX_REBOOT_CMD_POWER_OFF: + hcall(HCALL_SHUTDOWN, 0, 0, 0); + mb(); + break; + default: + break; + } +} + +void sw64_halt(void) +{ + if (is_in_host()) + cpld_write(0x64, 0x00, 0xf0); + else + vt_mode_kill_arch(LINUX_REBOOT_CMD_HALT); +} + +void sw64_poweroff(void) +{ + if (is_in_host()) + cpld_write(0x64, 0x00, 0xf0); + else + vt_mode_kill_arch(LINUX_REBOOT_CMD_POWER_OFF); +} + +void sw64_restart(void) +{ + if (is_in_host()) { + fix_jm585_reset(); + cpld_write(0x64, 0x00, 0xc3); + } else + vt_mode_kill_arch(LINUX_REBOOT_CMD_RESTART); +} + +static int sw64_reset_init(void) +{ +#ifdef CONFIG_EFI + if (BIOS_SUPPORT_RESET_CLALLBACK((void *)bios_version)) + return 0; +#endif + pm_restart = sw64_restart; + pm_power_off = sw64_poweroff; + pm_halt = sw64_halt; + return 0; +} +subsys_initcall(sw64_reset_init); -- Gitee From 5068ce6ce7edd8d3dc327656cb3779cea4c4a1a7 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:46:49 +0800 Subject: [PATCH 0350/2138] anolis: drivers: qemu_fw_cfg: add sw64 support ANBZ: #4688 Add qemu_fw_cfg drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/firmware/Kconfig | 2 +- drivers/firmware/qemu_fw_cfg.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index f0e9f250669e..cc4716c037a6 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -155,7 +155,7 @@ config RASPBERRYPI_FIRMWARE config FW_CFG_SYSFS tristate "QEMU fw_cfg device support in sysfs" - depends on SYSFS && (ARM || ARM64 || PARISC || PPC_PMAC || SPARC || X86) + depends on SYSFS && (ARM || ARM64 || PARISC || PPC_PMAC || SPARC || X86 || SW64) depends on HAS_IOPORT_MAP default n help diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c index a69399a6b7c0..f4fea1ec3201 100644 --- a/drivers/firmware/qemu_fw_cfg.c +++ b/drivers/firmware/qemu_fw_cfg.c @@ -211,7 +211,7 @@ static void fw_cfg_io_cleanup(void) /* arch-specific ctrl & data register offsets are not available in ACPI, DT */ #if !(defined(FW_CFG_CTRL_OFF) && defined(FW_CFG_DATA_OFF)) -# if (defined(CONFIG_ARM) || defined(CONFIG_ARM64)) +# if (defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_SW64)) # define FW_CFG_CTRL_OFF 0x08 # define FW_CFG_DATA_OFF 0x00 # define FW_CFG_DMA_OFF 0x10 -- Gitee From 857915648172ad8f84e0a99712d1d1a8125bd859 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:54:17 +0800 Subject: [PATCH 0351/2138] anolis: drivers: rtc: add sw64 rtc support ANBZ: #4688 Add virtual rtc drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/rtc/Kconfig | 7 +++ drivers/rtc/Makefile | 5 ++ drivers/rtc/rtc-sw64-guest.c | 54 +++++++++++++++++++ drivers/rtc/rtc-sw64-virt-platform.c | 25 +++++++++ drivers/rtc/rtc-sw64-virt.c | 77 ++++++++++++++++++++++++++++ 5 files changed, 168 insertions(+) create mode 100644 drivers/rtc/rtc-sw64-guest.c create mode 100644 drivers/rtc/rtc-sw64-virt-platform.c create mode 100644 drivers/rtc/rtc-sw64-virt.c diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 92f46a6312c2..6f270577df86 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -984,6 +984,13 @@ config RTC_DRV_ALPHA Direct support for the real-time clock found on every Alpha system, specifically MC146818 compatibles. If in doubt, say Y. +config RTC_DRV_SW64_VIRT + bool "SW64 Hypervisor based RTC" + depends on SW64 + default y + help + Get support for the Hypervisor based RTC on SW64 systems. + config RTC_DRV_DS1216 tristate "Dallas DS1216" depends on SNI_RM diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index fd209883ee2e..7711f79787ac 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -10,6 +10,10 @@ obj-$(CONFIG_RTC_CLASS) += rtc-core.o obj-$(CONFIG_RTC_MC146818_LIB) += rtc-mc146818-lib.o rtc-core-y := class.o interface.o +ifdef CONFIG_RTC_DRV_SW64_VIRT +rtc-core-y += rtc-sw64-virt-platform.o +endif + rtc-core-$(CONFIG_RTC_NVMEM) += nvmem.o rtc-core-$(CONFIG_RTC_INTF_DEV) += dev.o rtc-core-$(CONFIG_RTC_INTF_PROC) += proc.o @@ -168,6 +172,7 @@ obj-$(CONFIG_RTC_DRV_ST_LPC) += rtc-st-lpc.o obj-$(CONFIG_RTC_DRV_STM32) += rtc-stm32.o obj-$(CONFIG_RTC_DRV_STMP) += rtc-stmp3xxx.o obj-$(CONFIG_RTC_DRV_SUN4V) += rtc-sun4v.o +obj-$(CONFIG_RTC_DRV_SW64_VIRT) += rtc-sw64-virt.o obj-$(CONFIG_RTC_DRV_SUN6I) += rtc-sun6i.o obj-$(CONFIG_RTC_DRV_SUNPLUS) += rtc-sunplus.o obj-$(CONFIG_RTC_DRV_SUNXI) += rtc-sunxi.o diff --git a/drivers/rtc/rtc-sw64-guest.c b/drivers/rtc/rtc-sw64-guest.c new file mode 100644 index 000000000000..5d86ce20a1fb --- /dev/null +++ b/drivers/rtc/rtc-sw64-guest.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Lu Feifei + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include + +#define RTC_IO_ADDR (0x804910000000ULL) + +static int sw_guest_read_time(struct device *dev, struct rtc_time *tm) +{ + unsigned long *ioaddr; + + ioaddr = ioremap(RTC_IO_ADDR, sizeof(long)); + rtc_time64_to_tm(*ioaddr, tm); + return 0; +} + +static const struct rtc_class_ops rtc_sw_guest_ops = { + .read_time = sw_guest_read_time, +}; + +static int __init rtc_sw_guest_probe(struct platform_device *pdev) +{ + struct rtc_device *rtc; + + rtc = devm_rtc_device_register(&pdev->dev, "sw_guest", + &rtc_sw_guest_ops, THIS_MODULE); + if (IS_ERR(rtc)) + return PTR_ERR(rtc); + + platform_set_drvdata(pdev, rtc); + return 0; +} + +static struct platform_driver rtc_sw_guest_driver = { + .driver = { + .name = "rtc_sw_guest", + }, +}; + +module_platform_driver_probe(rtc_sw_guest_driver, rtc_sw_guest_probe); + +MODULE_AUTHOR("Lu Feifei "); +MODULE_DESCRIPTION("SW GUEST RTC driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:rtc_sw_guest"); diff --git a/drivers/rtc/rtc-sw64-virt-platform.c b/drivers/rtc/rtc-sw64-virt-platform.c new file mode 100644 index 000000000000..3db9ff2f0e64 --- /dev/null +++ b/drivers/rtc/rtc-sw64-virt-platform.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +static struct platform_device rtc_sw64_virt_device = { + .name = "rtc_sw64_virt", + .id = -1, +}; + +static int __init rtc_sw64_virt_init(void) +{ + if (is_in_host()) + return 0; + + if (platform_device_register(&rtc_sw64_virt_device) < 0) + pr_err("unable to register rtc device...\n"); + /* not necessarily an error */ + return 0; +} +module_init(rtc_sw64_virt_init); diff --git a/drivers/rtc/rtc-sw64-virt.c b/drivers/rtc/rtc-sw64-virt.c new file mode 100644 index 000000000000..23c93d7ddbae --- /dev/null +++ b/drivers/rtc/rtc-sw64-virt.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0 +/* rtc-sw64-virt.c: Hypervisor based RTC for SW64 systems. + * + * Copyright (C) 2021 Lu Feifei + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include + +#define RTC_IO_ADDR (0x804910000000ULL) +unsigned long vtime_old, vtime_new; + +static int sw64_virt_read_time(struct device *dev, struct rtc_time *tm) +{ + unsigned long *ioaddr; + unsigned long vtime_now; + long vtime_offset; + + ioaddr = ioremap(RTC_IO_ADDR, sizeof(long)); + if (!vtime_new) { + rtc_time64_to_tm(*ioaddr, tm); + } else { + vtime_now = *ioaddr; + vtime_offset = vtime_new - vtime_old; + vtime_now += vtime_offset; + rtc_time64_to_tm(vtime_now, tm); + } + return 0; +} + +static int sw64_virt_set_time(struct device *dev, struct rtc_time *tm) +{ + unsigned long *ioaddr; + + ioaddr = ioremap(RTC_IO_ADDR, sizeof(long)); + vtime_old = *ioaddr; + + vtime_new = rtc_tm_to_time64(tm); + return 0; +} + +static const struct rtc_class_ops rtc_sw64_virt_ops = { + .read_time = sw64_virt_read_time, + .set_time = sw64_virt_set_time, +}; + +static int __init rtc_sw64_virt_probe(struct platform_device *pdev) +{ + struct rtc_device *rtc; + + rtc = devm_rtc_device_register(&pdev->dev, "sw64_virt", + &rtc_sw64_virt_ops, THIS_MODULE); + if (IS_ERR(rtc)) + return PTR_ERR(rtc); + + platform_set_drvdata(pdev, rtc); + return 0; +} + +static struct platform_driver rtc_sw64_virt_driver = { + .driver = { + .name = "rtc_sw64_virt", + }, +}; + +module_platform_driver_probe(rtc_sw64_virt_driver, rtc_sw64_virt_probe); + +MODULE_AUTHOR("Lu Feifei "); +MODULE_DESCRIPTION("Sunway virtual RTC driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:rtc_sw64_virt"); -- Gitee From 327aecdb73eff851f1a4e288a75a4cbdb8e361dc Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:56:09 +0800 Subject: [PATCH 0352/2138] anolis: drivers: scsi: add sw64 support ANBZ: #4688 Add scsi drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/scsi/megaraid/megaraid_sas_fusion.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 8a83f3fc2b86..d4b97f0a5013 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -3589,6 +3589,14 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex, d_val.u.high != cpu_to_le32(UINT_MAX)) { smid = le16_to_cpu(reply_desc->SMID); +#ifdef CONFIG_SUBARCH_C3B + if (smid == 0xffff) { + smid = d_val.u.low >> 16; + if (smid == 0xffff) + break; + } +#endif + cmd_fusion = fusion->cmd_list[smid - 1]; scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *) cmd_fusion->io_request; -- Gitee From fa2d3722154a648536af05f2a7cbf31377cf89bf Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:53:34 +0800 Subject: [PATCH 0353/2138] anolis: drivers: spi: add sw64 support ANBZ: #4688 Add spi drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/spi/Kconfig | 6 + drivers/spi/Makefile | 1 + drivers/spi/spi-chip3-mmio.c | 147 +++++++++++++ drivers/spi/spi-chip3.c | 404 +++++++++++++++++++++++++++++++++++ drivers/spi/spi-chip3.h | 245 +++++++++++++++++++++ 5 files changed, 803 insertions(+) create mode 100644 drivers/spi/spi-chip3-mmio.c create mode 100644 drivers/spi/spi-chip3.c create mode 100644 drivers/spi/spi-chip3.h diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 3ce0fd5df8e9..60826b7ed21e 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -1179,6 +1179,12 @@ config SPI_AMD # # Add new SPI master controllers in alphabetical order above this line # +config SPI_CHIP3 + tristate "Memory-mapped io interface driver for SUNWAY CHIP3 SPI core" + depends on UNCORE_XUELANG + help + general driver for SPI controller core from DesignWare + comment "SPI Multiplexer support" diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 6af54842b9fa..26bf16fcf890 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -48,6 +48,7 @@ spi-dw-y := spi-dw-core.o spi-dw-$(CONFIG_SPI_DW_DMA) += spi-dw-dma.o obj-$(CONFIG_SPI_DW_BT1) += spi-dw-bt1.o obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o +obj-$(CONFIG_SPI_CHIP3) += spi-chip3.o spi-chip3-mmio.o obj-$(CONFIG_SPI_DW_PCI) += spi-dw-pci.o obj-$(CONFIG_SPI_EP93XX) += spi-ep93xx.o obj-$(CONFIG_SPI_FALCON) += spi-falcon.o diff --git a/drivers/spi/spi-chip3-mmio.c b/drivers/spi/spi-chip3-mmio.c new file mode 100644 index 000000000000..a907f13d4ae5 --- /dev/null +++ b/drivers/spi/spi-chip3-mmio.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Memory-mapped interface driver for SUNWAY CHIP3 SPI Core + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "spi-chip3.h" + +#define DRIVER_NAME "sunway_chip3_spi" + +struct chip3_spi_mmio { + struct chip3_spi dws; + struct clk *clk; + void *priv; +}; + +static int chip3_spi_mmio_probe(struct platform_device *pdev) +{ + int (*init_func)(struct platform_device *pdev, + struct chip3_spi_mmio *dwsmmio); + struct chip3_spi_mmio *dwsmmio; + struct chip3_spi *dws; + struct resource *mem; + int ret; + int num_cs; + + dwsmmio = devm_kzalloc(&pdev->dev, sizeof(struct chip3_spi_mmio), + GFP_KERNEL); + if (!dwsmmio) + return -ENOMEM; + + dws = &dwsmmio->dws; + + /* Get basic io resource and map it */ + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dws->regs = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(dws->regs)) { + dev_err(&pdev->dev, "SPI region map failed\n"); + return PTR_ERR(dws->regs); + } + + dws->irq = platform_get_irq(pdev, 0); + if (dws->irq < 0) { + dev_err(&pdev->dev, "no irq resource?\n"); + return dws->irq; /* -ENXIO */ + } + + dwsmmio->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(dwsmmio->clk)) + return PTR_ERR(dwsmmio->clk); + ret = clk_prepare_enable(dwsmmio->clk); + if (ret) + return ret; + + dws->bus_num = pdev->id; + dws->max_freq = clk_get_rate(dwsmmio->clk); + + device_property_read_u32(&pdev->dev, "reg-io-width", + &dws->reg_io_width); + + num_cs = 4; + device_property_read_u32(&pdev->dev, "num-cs", &num_cs); + dws->num_cs = num_cs; + + if (pdev->dev.of_node) { + int i; + + for (i = 0; i < dws->num_cs; i++) { + int cs_gpio = of_get_named_gpio(pdev->dev.of_node, + "cs-gpios", i); + + if (cs_gpio == -EPROBE_DEFER) { + ret = cs_gpio; + goto out; + } + + if (gpio_is_valid(cs_gpio)) { + ret = devm_gpio_request(&pdev->dev, cs_gpio, + dev_name(&pdev->dev)); + if (ret) + goto out; + } + } + } + + init_func = device_get_match_data(&pdev->dev); + if (init_func) { + ret = init_func(pdev, dwsmmio); + if (ret) + goto out; + } + + ret = chip3_spi_add_host(&pdev->dev, dws); + if (ret) + goto out; + + platform_set_drvdata(pdev, dwsmmio); + + return 0; +out: + clk_disable_unprepare(dwsmmio->clk); + return ret; +} + +static int chip3_spi_mmio_remove(struct platform_device *pdev) +{ + struct chip3_spi_mmio *dwsmmio = platform_get_drvdata(pdev); + + chip3_spi_remove_host(&dwsmmio->dws); + clk_disable_unprepare(dwsmmio->clk); + + return 0; +} + +static const struct of_device_id chip3_spi_mmio_of_match[] = { + { .compatible = "sunway,chip3-spi", }, + { /* end of table */} +}; +MODULE_DEVICE_TABLE(of, chip3_spi_mmio_of_match); + +static struct platform_driver chip3_spi_mmio_driver = { + .probe = chip3_spi_mmio_probe, + .remove = chip3_spi_mmio_remove, + .driver = { + .name = DRIVER_NAME, + .of_match_table = chip3_spi_mmio_of_match, + }, +}; +module_platform_driver(chip3_spi_mmio_driver); + +MODULE_AUTHOR("Platform@wxiat.com"); +MODULE_DESCRIPTION("Memory-mapped I/O interface driver for Sunway CHIP3"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-chip3.c b/drivers/spi/spi-chip3.c new file mode 100644 index 000000000000..8186c84eca8c --- /dev/null +++ b/drivers/spi/spi-chip3.c @@ -0,0 +1,404 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SUNWAY CHIP3 SPI core controller driver + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "spi-chip3.h" + +/* Slave spi_dev related */ +struct chip_data { + u8 tmode; /* TR/TO/RO/EEPROM */ + u8 type; /* SPI/SSP/MicroWire */ + + u8 poll_mode; /* 1 means use poll mode */ + + u16 clk_div; /* baud rate divider */ + u32 speed_hz; /* baud rate */ + void (*cs_control)(u32 command); +}; + +static void chip3_spi_handle_err(struct spi_controller *master, + struct spi_message *msg) +{ + struct chip3_spi *dws = spi_controller_get_devdata(master); + + spi_reset_chip(dws); +} + +static size_t chip3_spi_max_length(struct spi_device *spi) +{ + struct chip3_spi *dws = spi_controller_get_devdata(spi->master); + + return dws->fifo_len; +} + +static int chip3_spi_transfer_one_message(struct spi_controller *master, + struct spi_message *m) +{ + struct chip3_spi *dws = spi_controller_get_devdata(master); + struct spi_transfer *t = NULL; + u16 clk_div; + u32 freq; + u32 speed_hz; + u32 status; + u32 len = 0; + int ret = 0; + int i = 0; + + spi_enable_chip(dws, 0); + + /* Handle per transfer options for bpw and speed. */ + freq = clamp(m->spi->max_speed_hz, 0U, dws->max_freq); + clk_div = (DIV_ROUND_UP(dws->max_freq, freq) + 1) & 0xfffe; + speed_hz = dws->max_freq / clk_div; + + if (dws->current_freq != speed_hz) { + spi_set_clk(dws, clk_div); + dws->current_freq = speed_hz; + } + + dws->n_bytes = 1; + + /* For poll mode just disable all interrupts */ + spi_mask_intr(dws, 0xff); + + chip3_writel(dws, CHIP3_SPI_CTRL0, SPI_TRANSMIT_RECEIVE); + + spi_enable_chip(dws, 1); + + list_for_each_entry(t, &m->transfers, transfer_list) { + len += t->len; + /* Judge if data is overflow */ + if (len > dws->fifo_len) { + pr_err("SPI transfer overflow.\n"); + m->actual_length = 0; + m->status = -EIO; + ret = -EIO; + goto way_out; + } + + if (t->tx_buf) + memcpy(&dws->buf[len], t->tx_buf, t->len); + else + memset(&dws->buf[len], 0, t->len); + } + + chip3_writel(dws, CHIP3_SPI_SER, 0x0); + for (i = 0; i < len; i++) + chip3_writel(dws, CHIP3_SPI_DR, dws->buf[i]); + chip3_writel(dws, CHIP3_SPI_SER, BIT(m->spi->chip_select)); + + do { + status = chip3_readl(dws, CHIP3_SPI_SR); + } while (status & SR_BUSY); + + list_for_each_entry(t, &m->transfers, transfer_list) { + if (t->rx_buf) { + for (i = 0; i < t->len; i++, t->rx_buf += 1) + *(u8 *)t->rx_buf = chip3_readl(dws, CHIP3_SPI_DR); + } else { + for (i = 0; i < t->len; i++) + chip3_readl(dws, CHIP3_SPI_DR); + } + } + + m->actual_length = len; + m->status = 0; + spi_finalize_current_message(master); + +way_out: + return ret; +} + +static int chip3_spi_adjust_mem_op_size(struct spi_mem *mem, + struct spi_mem_op *op) +{ + struct chip3_spi *dws = spi_controller_get_devdata(mem->spi->controller); + size_t len; + + len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes; + + op->data.nbytes = min((size_t)op->data.nbytes, (dws->fifo_len - len)); + if (!op->data.nbytes) + return -EINVAL; + + return 0; +} + +static int chip3_spi_init_mem_buf(struct chip3_spi *dws, + const struct spi_mem_op *op) +{ + int ret = 0; + int i, j, len; + + /* Calculate the total length of the transfer. */ + len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes; + + /* Judge if data is overflow */ + if (len + op->data.nbytes > dws->fifo_len) { + ret = -EIO; + goto way_out; + } + + /* + * Collect the operation code, address and dummy bytes into the single + * buffer. If it's a transfer with data to be sent, also copy it into + * the single buffer. + */ + for (i = 0; i < sizeof(op->cmd.opcode); i++) + dws->buf[i] = op->cmd.opcode; + for (j = 0; j < op->addr.nbytes; i++, j++) + dws->buf[i] = op->addr.val >> (8 * (op->addr.nbytes - i)); + for (j = 0; j < op->dummy.nbytes; i++, j++) + dws->buf[i] = 0xff; + + if (op->data.dir == SPI_MEM_DATA_OUT) { + memcpy(&dws->buf[i], op->data.buf.out, op->data.nbytes); + len += op->data.nbytes; + } + + dws->tx_len = len; + + if (op->data.dir == SPI_MEM_DATA_IN) { + dws->rx = op->data.buf.in; + dws->rx_len = op->data.nbytes; + } else { + dws->rx = NULL; + dws->rx_len = 0; + } + +way_out: + return ret; +} + +static int chip3_spi_exec_mem_op(struct spi_mem *mem, + const struct spi_mem_op *op) +{ + struct chip3_spi *dws = spi_controller_get_devdata(mem->spi->controller); + u16 clk_div; + int ret = 0; + int i; + unsigned short value; + u32 freq; + u32 speed_hz; + + ret = chip3_spi_init_mem_buf(dws, op); + if (ret) + return ret; + + spi_enable_chip(dws, 0); + + /* Handle per transfer options for bpw and speed. */ + freq = clamp(mem->spi->max_speed_hz, 0U, dws->max_freq); + clk_div = (DIV_ROUND_UP(dws->max_freq, freq) + 1) & 0xfffe; + speed_hz = dws->max_freq / clk_div; + + if (dws->current_freq != speed_hz) { + spi_set_clk(dws, clk_div); + dws->current_freq = speed_hz; + } + + dws->n_bytes = 1; + + /* For poll mode just disable all interrupts */ + spi_mask_intr(dws, 0xff); + + if ((dws->tx_len != 0) && (dws->rx_len != 0)) { + chip3_writel(dws, CHIP3_SPI_CTRL0, SPI_EEPROM_READ); + chip3_writel(dws, CHIP3_SPI_CTRL1, (dws->rx_len - 1)); + } else { + chip3_writel(dws, CHIP3_SPI_CTRL0, SPI_TRANSMIT_ONLY); + } + + spi_enable_chip(dws, 1); + + chip3_writel(dws, CHIP3_SPI_SER, 0x0); + for (i = 0; i < dws->tx_len; i++) + chip3_writel(dws, CHIP3_SPI_DR, dws->buf[i]); + chip3_writel(dws, CHIP3_SPI_SER, BIT(mem->spi->chip_select)); + + value = chip3_readl(dws, CHIP3_SPI_SR); + while (value & SR_BUSY) + value = chip3_readl(dws, CHIP3_SPI_SR); + + for (i = 0; i < dws->rx_len; dws->rx += dws->n_bytes, i++) + *(u8 *)dws->rx = chip3_readl(dws, CHIP3_SPI_DR); + + return ret; +} + +/* This may be called twice for each spi dev */ +static int chip3_spi_setup(struct spi_device *spi) +{ + struct chip3_spi_chip *chip_info = NULL; + struct chip_data *chip; + u32 poll_mode = 0; + struct device_node *np = spi->dev.of_node; + + /* Only alloc on first setup */ + chip = spi_get_ctldata(spi); + if (!chip) { + chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); + if (!chip) + return -ENOMEM; + spi_set_ctldata(spi, chip); + } + + /* + * Protocol drivers may change the chip settings, so... + * if chip_info exists, use it + */ + chip_info = spi->controller_data; + + /* chip_info doesn't always exist */ + if (chip_info) { + if (chip_info->cs_control) + chip->cs_control = chip_info->cs_control; + + chip->poll_mode = chip_info->poll_mode; + chip->type = chip_info->type; + } else { + if (np) { + of_property_read_u32(np, "poll_mode", &poll_mode); + chip->poll_mode = poll_mode; + } + + } + + chip->tmode = SPI_TMOD_TR; + return 0; +} + +static void chip3_spi_cleanup(struct spi_device *spi) +{ + struct chip_data *chip = spi_get_ctldata(spi); + + kfree(chip); + spi_set_ctldata(spi, NULL); +} + +/* Restart the controller, disable all interrupts, clean rx fifo */ +static void spi_hw_init(struct device *dev, struct chip3_spi *dws) +{ + spi_reset_chip(dws); + + /* + * Try to detect the FIFO depth if not set by interface driver, + * the depth could be from 2 to 256 from HW spec + */ + if (!dws->fifo_len) { + u32 fifo; + + for (fifo = 1; fifo < 256; fifo++) { + chip3_writel(dws, CHIP3_SPI_TXFLTR, fifo); + if (fifo != chip3_readl(dws, CHIP3_SPI_TXFLTR)) + break; + } + chip3_writel(dws, CHIP3_SPI_TXFLTR, 0); + + dws->fifo_len = (fifo == 1) ? 0 : fifo; + dev_info(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len); + } +} + +static const struct spi_controller_mem_ops chip3_mem_ops = { + .adjust_op_size = chip3_spi_adjust_mem_op_size, + .exec_op = chip3_spi_exec_mem_op, +}; + + +int chip3_spi_add_host(struct device *dev, struct chip3_spi *dws) +{ + struct spi_controller *master; + int ret; + + BUG_ON(dws == NULL); + + master = spi_alloc_master(dev, 0); + if (!master) + return -ENOMEM; + + dws->master = master; + dws->type = SSI_MOTO_SPI; + + spi_controller_set_devdata(master, dws); + + master->mode_bits = SPI_CPOL | SPI_CPHA; + master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); + master->bus_num = dws->bus_num; + master->num_chipselect = dws->num_cs; + master->setup = chip3_spi_setup; + master->cleanup = chip3_spi_cleanup; + master->transfer_one_message = chip3_spi_transfer_one_message; + master->handle_err = chip3_spi_handle_err; + master->max_speed_hz = dws->max_freq; + master->dev.of_node = dev->of_node; + master->flags = SPI_CONTROLLER_GPIO_SS; + master->max_transfer_size = chip3_spi_max_length; + master->max_message_size = chip3_spi_max_length; + + master->mem_ops = &chip3_mem_ops; + + /* Basic HW init */ + spi_hw_init(dev, dws); + + ret = devm_spi_register_controller(dev, master); + if (ret) { + dev_err(&master->dev, "problem registering spi master\n"); + spi_enable_chip(dws, 0); + free_irq(dws->irq, master); + } + + return 0; +} +EXPORT_SYMBOL_GPL(chip3_spi_add_host); + +void chip3_spi_remove_host(struct chip3_spi *dws) +{ + spi_shutdown_chip(dws); + + free_irq(dws->irq, dws->master); +} +EXPORT_SYMBOL_GPL(chip3_spi_remove_host); + +int chip3_spi_suspend_host(struct chip3_spi *dws) +{ + int ret; + + ret = spi_controller_suspend(dws->master); + if (ret) + return ret; + + spi_shutdown_chip(dws); + return 0; +} +EXPORT_SYMBOL_GPL(chip3_spi_suspend_host); + +int chip3_spi_resume_host(struct chip3_spi *dws) +{ + int ret; + + spi_hw_init(&dws->master->dev, dws); + ret = spi_controller_resume(dws->master); + if (ret) + dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret); + return ret; +} +EXPORT_SYMBOL_GPL(chip3_spi_resume_host); + +MODULE_AUTHOR("Platform@wxiat.com"); +MODULE_DESCRIPTION("Driver for Sunway CHIP3 SPI controller core"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-chip3.h b/drivers/spi/spi-chip3.h new file mode 100644 index 000000000000..88e49a9091a5 --- /dev/null +++ b/drivers/spi/spi-chip3.h @@ -0,0 +1,245 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef CHIP3_SPI_HEADER_H +#define CHIP3_SPI_HEADER_H + +#include +#include +#include +#include + +/* Register offsets */ +#define CHIP3_SPI_CTRL0 (0x00<<7) +#define CHIP3_SPI_CTRL1 (0x04<<7) +#define CHIP3_SPI_SSIENR (0x08<<7) +#define CHIP3_SPI_MWCR (0x0c<<7) +#define CHIP3_SPI_SER (0x10<<7) +#define CHIP3_SPI_BAUDR (0x14<<7) +#define CHIP3_SPI_TXFLTR (0x18<<7) +#define CHIP3_SPI_RXFLTR (0x1c<<7) +#define CHIP3_SPI_TXFLR (0x20<<7) +#define CHIP3_SPI_RXFLR (0x24<<7) +#define CHIP3_SPI_SR (0x28<<7) +#define CHIP3_SPI_IMR (0x2c<<7) +#define CHIP3_SPI_ISR (0x30<<7) +#define CHIP3_SPI_RISR (0x34<<7) +#define CHIP3_SPI_TXOICR (0x38<<7) +#define CHIP3_SPI_RXOICR (0x3c<<7) +#define CHIP3_SPI_RXUICR (0x40<<7) +#define CHIP3_SPI_MSTICR (0x44<<7) +#define CHIP3_SPI_ICR (0x48<<7) +#define CHIP3_SPI_DMACR (0x4c<<7) +#define CHIP3_SPI_DMATDLR (0x50<<7) +#define CHIP3_SPI_DMARDLR (0x54<<7) +#define CHIP3_SPI_IDR (0x58<<7) +#define CHIP3_SPI_VERSION (0x5c<<7) +#define CHIP3_SPI_DR (0x60<<7) + +/* Bit fields in CTRLR0 */ +#define SPI_DFS_OFFSET 0 + +#define SPI_FRF_OFFSET 4 +#define SPI_FRF_SPI 0x0 +#define SPI_FRF_SSP 0x1 +#define SPI_FRF_MICROWIRE 0x2 +#define SPI_FRF_RESV 0x3 + +#define SPI_MODE_OFFSET 6 +#define SPI_SCPH_OFFSET 6 +#define SPI_SCOL_OFFSET 7 + +#define SPI_TMOD_OFFSET 8 +#define SPI_TMOD_MASK (0x3 << SPI_TMOD_OFFSET) +#define SPI_TMOD_TR 0x0 /* xmit & recv */ +#define SPI_TMOD_TO 0x1 /* xmit only */ +#define SPI_TMOD_RO 0x2 /* recv only */ +#define SPI_TMOD_EPROMREAD 0x3 /* eeprom read mode */ + +#define SPI_SLVOE_OFFSET 10 +#define SPI_SRL_OFFSET 11 +#define SPI_CFS_OFFSET 12 + +/* Bit fields in SR, 7 bits */ +#define SR_MASK 0x7f /* cover 7 bits */ +#define SR_BUSY (1 << 0) +#define SR_TF_NOT_FULL (1 << 1) +#define SR_TF_EMPT (1 << 2) +#define SR_RF_NOT_EMPT (1 << 3) +#define SR_RF_FULL (1 << 4) +#define SR_TX_ERR (1 << 5) +#define SR_DCOL (1 << 6) + +/* Bit fields in ISR, IMR, RISR, 7 bits */ +#define SPI_INT_TXEI (1 << 0) +#define SPI_INT_TXOI (1 << 1) +#define SPI_INT_RXUI (1 << 2) +#define SPI_INT_RXOI (1 << 3) +#define SPI_INT_RXFI (1 << 4) +#define SPI_INT_MSTI (1 << 5) + +/* Bit fields in DMACR */ +#define SPI_DMA_RDMAE (1 << 0) +#define SPI_DMA_TDMAE (1 << 1) + +/* TX RX interrupt level threshold, max can be 256 */ +#define SPI_INT_THRESHOLD 32 + +/* The depth of the FIFO buffer is 256, so the max transfer length is 256. */ +#define MAX_LEN 256 + +/* The mode of spi controller. */ +#define SPI_TRANSMIT_RECEIVE 0x0c7 +#define SPI_EEPROM_READ 0x3c7 +#define SPI_TRANSMIT_ONLY 0x1c7 + +enum chip3_ssi_type { + SSI_MOTO_SPI = 0, + SSI_TI_SSP, + SSI_NS_MICROWIRE, +}; + +struct chip3_spi; + +struct chip3_spi { + struct spi_controller *master; + enum chip3_ssi_type type; + + void __iomem *regs; + unsigned long paddr; + int irq; + u32 fifo_len; /* depth of the FIFO buffer */ + u32 max_freq; /* max bus freq supported */ + + u32 reg_io_width; /* DR I/O width in bytes */ + u16 bus_num; + u16 num_cs; /* supported slave numbers */ + void (*set_cs)(struct spi_device *spi, bool enable); + + /* Current message transfer state info */ + size_t len; + void *tx; + unsigned int tx_len; + void *rx; + unsigned int rx_len; + u8 n_bytes; /* current is a 1/2 bytes op */ + u32 current_freq; /* frequency in hz */ + + u8 buf[MAX_LEN]; + + /* Bus interface info */ + void *priv; +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs; +#endif +}; + +static inline u32 chip3_readl(struct chip3_spi *dws, u32 offset) +{ + return __raw_readl(dws->regs + offset); +} + +static inline u16 chip3_readw(struct chip3_spi *dws, u32 offset) +{ + return __raw_readw(dws->regs + offset); +} + +static inline void chip3_writel(struct chip3_spi *dws, u32 offset, u32 val) +{ + __raw_writel(val, dws->regs + offset); +} + +static inline void chip3_writew(struct chip3_spi *dws, u32 offset, u16 val) +{ + __raw_writew(val, dws->regs + offset); +} + +static inline u32 chip3_read_io_reg(struct chip3_spi *dws, u32 offset) +{ + switch (dws->reg_io_width) { + case 2: + return chip3_readw(dws, offset); + case 4: + default: + return chip3_readl(dws, offset); + } +} + +static inline void chip3_write_io_reg(struct chip3_spi *dws, u32 offset, u32 val) +{ + switch (dws->reg_io_width) { + case 2: + chip3_writew(dws, offset, val); + break; + case 4: + default: + chip3_writel(dws, offset, val); + break; + } +} + +static inline void spi_enable_chip(struct chip3_spi *dws, int enable) +{ + chip3_writel(dws, CHIP3_SPI_SSIENR, (enable ? 1 : 0)); +} + +static inline void spi_set_clk(struct chip3_spi *dws, u16 div) +{ + chip3_writel(dws, CHIP3_SPI_BAUDR, div); +} + +/* Disable IRQ bits */ +static inline void spi_mask_intr(struct chip3_spi *dws, u32 mask) +{ + u32 new_mask; + + new_mask = chip3_readl(dws, CHIP3_SPI_IMR) & ~mask; + chip3_writel(dws, CHIP3_SPI_IMR, new_mask); +} + +/* Enable IRQ bits */ +static inline void spi_umask_intr(struct chip3_spi *dws, u32 mask) +{ + u32 new_mask; + + new_mask = chip3_readl(dws, CHIP3_SPI_IMR) | mask; + chip3_writel(dws, CHIP3_SPI_IMR, new_mask); +} + +/* + * This does disable the SPI controller, interrupts, and re-enable the + * controller back. Transmit and receive FIFO buffers are cleared when the + * device is disabled. + */ +static inline void spi_reset_chip(struct chip3_spi *dws) +{ + spi_enable_chip(dws, 0); + spi_mask_intr(dws, 0xff); + spi_enable_chip(dws, 1); +} + +static inline void spi_shutdown_chip(struct chip3_spi *dws) +{ + spi_enable_chip(dws, 0); + spi_set_clk(dws, 0); +} + +/* + * Each SPI slave device to work with chip3_api controller should + * has such a structure claiming its working mode (poll or PIO/DMA), + * which can be save in the "controller_data" member of the + * struct spi_device. + */ +struct chip3_spi_chip { + u8 poll_mode; /* 1 for controller polling mode */ + u8 type; /* SPI/SSP/MicroWire */ + u8 chip_select; + void (*cs_control)(u32 command); +}; + +extern int chip3_spi_add_host(struct device *dev, struct chip3_spi *dws); +extern void chip3_spi_remove_host(struct chip3_spi *dws); +extern int chip3_spi_suspend_host(struct chip3_spi *dws); +extern int chip3_spi_resume_host(struct chip3_spi *dws); + +/* platform related setup */ +extern int chip3_spi_mid_init(struct chip3_spi *dws); /* Intel MID platforms */ +#endif /* CHIP3_SPI_HEADER_H */ -- Gitee From f71d4006ff044d1ed859258de8107372eff0d856 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:53:47 +0800 Subject: [PATCH 0354/2138] anolis: drivers: tty: add sw64 support ANBZ: #4688 Add tty drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/tty/serial/8250/8250_sunway.c | 786 ++++++++++++++++++++++++++ drivers/tty/serial/8250/Kconfig | 7 + drivers/tty/serial/8250/Makefile | 1 + 3 files changed, 794 insertions(+) create mode 100644 drivers/tty/serial/8250/8250_sunway.c diff --git a/drivers/tty/serial/8250/8250_sunway.c b/drivers/tty/serial/8250/8250_sunway.c new file mode 100644 index 000000000000..9e3db232c832 --- /dev/null +++ b/drivers/tty/serial/8250/8250_sunway.c @@ -0,0 +1,786 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Synopsys SUNWAY 8250 driver. + * + * Copyright 2011 Picochip, Jamie Iles. + * Copyright 2013 Intel Corporation + * + * The Synopsys SUNWAY 8250 has an extra feature whereby it detects if the + * LCR is written whilst busy. If it is, then a busy detect interrupt is + * raised, the LCR needs to be rewritten and the uart status register read. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "8250.h" + +/* Offsets for the DesignWare specific registers */ +#define SUNWAY_UART_USR 0x1f /* UART Status Register */ +#define SUNWAY_UART_DLF 0xc0 /* Divisor Latch Fraction Register */ +#define SUNWAY_UART_CPR 0xf4 /* Component Parameter Register */ +#define SUNWAY_UART_UCV 0xf8 /* UART Component Version */ + +/* Component Parameter Register bits */ +#define SUNWAY_UART_CPR_ABP_DATA_WIDTH (3 << 0) +#define SUNWAY_UART_CPR_AFCE_MODE (1 << 4) +#define SUNWAY_UART_CPR_THRE_MODE (1 << 5) +#define SUNWAY_UART_CPR_SIR_MODE (1 << 6) +#define SUNWAY_UART_CPR_SIR_LP_MODE (1 << 7) +#define SUNWAY_UART_CPR_ADDITIONAL_FEATURES (1 << 8) +#define SUNWAY_UART_CPR_FIFO_ACCESS (1 << 9) +#define SUNWAY_UART_CPR_FIFO_STAT (1 << 10) +#define SUNWAY_UART_CPR_SHADOW (1 << 11) +#define SUNWAY_UART_CPR_ENCODED_PARMS (1 << 12) +#define SUNWAY_UART_CPR_DMA_EXTRA (1 << 13) +#define SUNWAY_UART_CPR_FIFO_MODE (0xff << 16) +/* Helper for fifo size calculation */ +#define SUNWAY_UART_CPR_FIFO_SIZE(a) (((a >> 16) & 0xff) * 16) + +/* DesignWare specific register fields */ +#define SUNWAY_UART_MCR_SIRE BIT(6) + +struct sunway8250_data { + u8 usr_reg; + u8 dlf_size; + int line; + int msr_mask_on; + int msr_mask_off; + struct clk *clk; + struct clk *pclk; + struct reset_control *rst; + struct uart_8250_dma dma; + + unsigned int skip_autocfg:1; + unsigned int uart_16550_compatible:1; +}; + +static inline u32 sunway8250_readl_ext(struct uart_port *p, int offset) +{ + if (p->iotype == UPIO_MEM32BE) + return ioread32be(p->membase + offset); + return readl(p->membase + offset); +} + +static inline void sunway8250_writel_ext(struct uart_port *p, int offset, u32 reg) +{ + if (p->iotype == UPIO_MEM32BE) + iowrite32be(reg, p->membase + offset); + else + writel(reg, p->membase + offset); +} + +static inline int sunway8250_modify_msr(struct uart_port *p, int offset, int value) +{ + struct sunway8250_data *d = p->private_data; + + /* Override any modem control signals if needed */ + if (offset == UART_MSR) { + value |= d->msr_mask_on; + value &= ~d->msr_mask_off; + } + + return value; +} + +static void sunway8250_force_idle(struct uart_port *p) +{ + struct uart_8250_port *up = up_to_u8250p(p); + + serial8250_clear_and_reinit_fifos(up); + (void)p->serial_in(p, UART_RX); +} + +static void sunway8250_check_lcr(struct uart_port *p, int value) +{ + void __iomem *offset = p->membase + (UART_LCR << p->regshift); + int tries = 1000; + + /* Make sure LCR write wasn't ignored */ + while (tries--) { + unsigned int lcr = p->serial_in(p, UART_LCR); + + if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR)) + return; + + sunway8250_force_idle(p); + +#ifdef CONFIG_64BIT + if (p->type == PORT_OCTEON) { + __raw_writeq(value & 0xff, offset); + continue; + } +#endif + if (p->iotype == UPIO_MEM32) + writel(value, offset); + else if (p->iotype == UPIO_MEM32BE) + iowrite32be(value, offset); + else + writeb(value, offset); + } + /* + * FIXME: this deadlocks if port->lock is already held + * dev_err(p->dev, "Couldn't set LCR to %d\n", value); + */ +} + +/* Returns once the transmitter is empty or we run out of retries */ +static void sunway8250_tx_wait_empty(struct uart_port *p) +{ + unsigned int tries = 20000; + unsigned int delay_threshold = tries - 1000; + unsigned int lsr; + + while (tries--) { + lsr = readb(p->membase + (UART_LSR << p->regshift)); + if (lsr & UART_LSR_TEMT) + break; + + /* + * The device is first given a chance to empty without delay, + * to avoid slowdowns at high bitrates. If after 1000 tries + * the buffer has still not emptied, allow more time for low- + * speed links. + */ + if (tries < delay_threshold) + udelay(1); + } +} + +static void sunway8250_serial_out38x(struct uart_port *p, int offset, int value) +{ + struct sunway8250_data *d = p->private_data; + + /* Allow the TX to drain before we reconfigure */ + if (offset == UART_LCR) + sunway8250_tx_wait_empty(p); + + writeb(value, p->membase + (offset << p->regshift)); + + if (offset == UART_LCR && !d->uart_16550_compatible) + sunway8250_check_lcr(p, value); +} + + +static void sunway8250_serial_out(struct uart_port *p, int offset, int value) +{ + struct sunway8250_data *d = p->private_data; + + writeb(value, p->membase + (offset << p->regshift)); + + if (offset == UART_LCR && !d->uart_16550_compatible) + sunway8250_check_lcr(p, value); +} + +static unsigned int sunway8250_serial_in(struct uart_port *p, int offset) +{ + unsigned int value = readb(p->membase + (offset << p->regshift)); + + return sunway8250_modify_msr(p, offset, value); +} + +#ifdef CONFIG_64BIT +static unsigned int sunway8250_serial_inq(struct uart_port *p, int offset) +{ + unsigned int value; + + value = (u8)__raw_readq(p->membase + (offset << p->regshift)); + + return sunway8250_modify_msr(p, offset, value); +} + +static void sunway8250_serial_outq(struct uart_port *p, int offset, int value) +{ + struct sunway8250_data *d = p->private_data; + + value &= 0xff; + __raw_writeq(value, p->membase + (offset << p->regshift)); + /* Read back to ensure register write ordering. */ + __raw_readq(p->membase + (UART_LCR << p->regshift)); + + if (offset == UART_LCR && !d->uart_16550_compatible) + sunway8250_check_lcr(p, value); +} +#endif /* CONFIG_64BIT */ + +static void sunway8250_serial_out32(struct uart_port *p, int offset, int value) +{ + struct sunway8250_data *d = p->private_data; + + writel(value, p->membase + (offset << p->regshift)); + + if (offset == UART_LCR && !d->uart_16550_compatible) + sunway8250_check_lcr(p, value); +} + +static unsigned int sunway8250_serial_in32(struct uart_port *p, int offset) +{ + unsigned int value = readl(p->membase + (offset << p->regshift)); + + return sunway8250_modify_msr(p, offset, value); +} + +static void sunway8250_serial_out32be(struct uart_port *p, int offset, int value) +{ + struct sunway8250_data *d = p->private_data; + + iowrite32be(value, p->membase + (offset << p->regshift)); + + if (offset == UART_LCR && !d->uart_16550_compatible) + sunway8250_check_lcr(p, value); +} + +static unsigned int sunway8250_serial_in32be(struct uart_port *p, int offset) +{ + unsigned int value = ioread32be(p->membase + (offset << p->regshift)); + + return sunway8250_modify_msr(p, offset, value); +} + + +static int sunway8250_handle_irq(struct uart_port *p) +{ + struct uart_8250_port *up = up_to_u8250p(p); + struct sunway8250_data *d = p->private_data; + unsigned int iir = p->serial_in(p, UART_IIR); + unsigned int status; + unsigned long flags; + + /* + * There are ways to get Designware-based UARTs into a state where + * they are asserting UART_IIR_RX_TIMEOUT but there is no actual + * data available. If we see such a case then we'll do a bogus + * read. If we don't do this then the "RX TIMEOUT" interrupt will + * fire forever. + * + * This problem has only been observed so far when not in DMA mode + * so we limit the workaround only to non-DMA mode. + */ + if (!up->dma && ((iir & 0x3f) == UART_IIR_RX_TIMEOUT)) { + spin_lock_irqsave(&p->lock, flags); + status = p->serial_in(p, UART_LSR); + + if (!(status & (UART_LSR_DR | UART_LSR_BI))) + (void) p->serial_in(p, UART_RX); + + spin_unlock_irqrestore(&p->lock, flags); + } + + if (serial8250_handle_irq(p, iir)) + return 1; + + if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) { + /* Clear the USR */ + (void)p->serial_in(p, d->usr_reg); + + return 1; + } + + return 0; +} + +static void +sunway8250_do_pm(struct uart_port *port, unsigned int state, unsigned int old) +{ + if (!state) + pm_runtime_get_sync(port->dev); + + serial8250_do_pm(port, state, old); + + if (state) + pm_runtime_put_sync_suspend(port->dev); +} + +static void sunway8250_set_termios(struct uart_port *p, struct ktermios *termios, + const struct ktermios *old) +{ + unsigned int baud = tty_termios_baud_rate(termios); + struct sunway8250_data *d = p->private_data; + long rate; + int ret; + + if (IS_ERR(d->clk)) + goto out; + + clk_disable_unprepare(d->clk); + rate = clk_round_rate(d->clk, baud * 16); + if (rate < 0) + ret = rate; + else if (rate == 0) + ret = -ENOENT; + else + ret = clk_set_rate(d->clk, rate); + clk_prepare_enable(d->clk); + + if (!ret) + p->uartclk = rate; + +out: + p->status &= ~UPSTAT_AUTOCTS; + if (termios->c_cflag & CRTSCTS) + p->status |= UPSTAT_AUTOCTS; + + serial8250_do_set_termios(p, termios, old); +} + +static void sunway8250_set_ldisc(struct uart_port *p, struct ktermios *termios) +{ + struct uart_8250_port *up = up_to_u8250p(p); + unsigned int mcr = p->serial_in(p, UART_MCR); + + if (up->capabilities & UART_CAP_IRDA) { + if (termios->c_line == N_IRDA) + mcr |= SUNWAY_UART_MCR_SIRE; + else + mcr &= ~SUNWAY_UART_MCR_SIRE; + + p->serial_out(p, UART_MCR, mcr); + } + serial8250_do_set_ldisc(p, termios); +} + +/* + * sunway8250_fallback_dma_filter will prevent the UART from getting just any free + * channel on platforms that have DMA engines, but don't have any channels + * assigned to the UART. + * + * REVISIT: This is a work around for limitation in the DMA Engine API. Once the + * core problem is fixed, this function is no longer needed. + */ +static bool sunway8250_fallback_dma_filter(struct dma_chan *chan, void *param) +{ + return false; +} + +static bool sunway8250_idma_filter(struct dma_chan *chan, void *param) +{ + return param == chan->device->dev; +} + +/* + * divisor = div(I) + div(F) + * "I" means integer, "F" means fractional + * quot = div(I) = clk / (16 * baud) + * frac = div(F) * 2^dlf_size + * + * let rem = clk % (16 * baud) + * we have: div(F) * (16 * baud) = rem + * so frac = 2^dlf_size * rem / (16 * baud) = (rem << dlf_size) / (16 * baud) + */ +static unsigned int sunway8250_get_divisor(struct uart_port *p, + unsigned int baud, + unsigned int *frac) +{ + unsigned int quot, rem, base_baud = baud * 16; + struct sunway8250_data *d = p->private_data; + + quot = p->uartclk / base_baud; + rem = p->uartclk % base_baud; + *frac = DIV_ROUND_CLOSEST(rem << d->dlf_size, base_baud); + + return quot; +} + +static void sunway8250_set_divisor(struct uart_port *p, unsigned int baud, + unsigned int quot, unsigned int quot_frac) +{ + sunway8250_writel_ext(p, SUNWAY_UART_DLF, quot_frac); + serial8250_do_set_divisor(p, baud, quot, quot_frac); +} + +static void sunway8250_quirks(struct uart_port *p, struct sunway8250_data *data) +{ + if (p->dev->of_node) { + struct device_node *np = p->dev->of_node; + int id; + + /* get index of serial line, if found in DT aliases */ + id = of_alias_get_id(np, "serial"); + if (id >= 0) + p->line = id; +#ifdef CONFIG_64BIT + if (of_device_is_compatible(np, "cavium,octeon-3860-uart")) { + p->serial_in = sunway8250_serial_inq; + p->serial_out = sunway8250_serial_outq; + p->flags = UPF_SKIP_TEST | UPF_SHARE_IRQ | UPF_FIXED_TYPE; + p->type = PORT_OCTEON; + data->usr_reg = 0x27; + data->skip_autocfg = true; + } +#endif + if (of_device_is_big_endian(p->dev->of_node)) { + p->iotype = UPIO_MEM32BE; + p->serial_in = sunway8250_serial_in32be; + p->serial_out = sunway8250_serial_out32be; + } + if (of_device_is_compatible(np, "marvell,armada-38x-uart")) + p->serial_out = sunway8250_serial_out38x; + + } else if (acpi_dev_present("APMC0D08", NULL, -1)) { + p->iotype = UPIO_MEM32; + p->regshift = 2; + p->serial_in = sunway8250_serial_in32; + data->uart_16550_compatible = true; + } + + /* Platforms with iDMA 64-bit */ + if (platform_get_resource_byname(to_platform_device(p->dev), + IORESOURCE_MEM, "lpss_priv")) { + data->dma.rx_param = p->dev->parent; + data->dma.tx_param = p->dev->parent; + data->dma.fn = sunway8250_idma_filter; + } +} + +static void sunway8250_setup_port(struct uart_port *p) +{ + struct uart_8250_port *up = up_to_u8250p(p); + u32 reg; + + /* + * If the Component Version Register returns zero, we know that + * ADDITIONAL_FEATURES are not enabled. No need to go any further. + */ + reg = sunway8250_readl_ext(p, SUNWAY_UART_UCV); + if (!reg) + return; + + dev_dbg(p->dev, "Designware UART version %c.%c%c\n", + (reg >> 24) & 0xff, (reg >> 16) & 0xff, (reg >> 8) & 0xff); + + sunway8250_writel_ext(p, SUNWAY_UART_DLF, ~0U); + reg = sunway8250_readl_ext(p, SUNWAY_UART_DLF); + sunway8250_writel_ext(p, SUNWAY_UART_DLF, 0); + + if (reg) { + struct sunway8250_data *d = p->private_data; + + d->dlf_size = fls(reg); + p->get_divisor = sunway8250_get_divisor; + p->set_divisor = sunway8250_set_divisor; + } + + reg = sunway8250_readl_ext(p, SUNWAY_UART_CPR); + if (!reg) + return; + + /* Select the type based on fifo */ + if (reg & SUNWAY_UART_CPR_FIFO_MODE) { + p->type = PORT_16550A; + p->flags |= UPF_FIXED_TYPE; + p->fifosize = SUNWAY_UART_CPR_FIFO_SIZE(reg); + up->capabilities = UART_CAP_FIFO; + } + + if (reg & SUNWAY_UART_CPR_AFCE_MODE) + up->capabilities |= UART_CAP_AFE; + + if (reg & SUNWAY_UART_CPR_SIR_MODE) + up->capabilities |= UART_CAP_IRDA; +} + +static int sunway8250_probe(struct platform_device *pdev) +{ + struct uart_8250_port uart = {}; + struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + int irq = platform_get_irq(pdev, 0); + struct uart_port *p = &uart.port; + struct device *dev = &pdev->dev; + struct sunway8250_data *data; + int err; + u32 val; + + if (!regs) { + dev_err(dev, "no registers defined\n"); + return -EINVAL; + } + + if (irq < 0) { + if (irq != -EPROBE_DEFER) + dev_err(dev, "cannot get irq\n"); + irq = 0; // Set serial poll mode + } + + spin_lock_init(&p->lock); + p->mapbase = regs->start; + p->irq = irq; + p->handle_irq = sunway8250_handle_irq; + p->pm = sunway8250_do_pm; + p->type = PORT_8250; + p->flags = UPF_SHARE_IRQ | UPF_FIXED_PORT; + p->dev = dev; + p->iotype = UPIO_MEM; + p->serial_in = sunway8250_serial_in; + p->serial_out = sunway8250_serial_out; + p->set_ldisc = sunway8250_set_ldisc; + p->set_termios = sunway8250_set_termios; + + p->membase = devm_ioremap(dev, regs->start, resource_size(regs)); + if (!p->membase) + return -ENOMEM; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->dma.fn = sunway8250_fallback_dma_filter; + data->usr_reg = SUNWAY_UART_USR; + p->private_data = data; + + data->uart_16550_compatible = device_property_read_bool(dev, + "snps,uart-16550-compatible"); + + err = device_property_read_u32(dev, "reg-shift", &val); + if (!err) + p->regshift = val; + + err = device_property_read_u32(dev, "reg-io-width", &val); + if (!err && val == 4) { + p->iotype = UPIO_MEM32; + p->serial_in = sunway8250_serial_in32; + p->serial_out = sunway8250_serial_out32; + } + + if (device_property_read_bool(dev, "dcd-override")) { + /* Always report DCD as active */ + data->msr_mask_on |= UART_MSR_DCD; + data->msr_mask_off |= UART_MSR_DDCD; + } + + if (device_property_read_bool(dev, "dsr-override")) { + /* Always report DSR as active */ + data->msr_mask_on |= UART_MSR_DSR; + data->msr_mask_off |= UART_MSR_DDSR; + } + + if (device_property_read_bool(dev, "cts-override")) { + /* Always report CTS as active */ + data->msr_mask_on |= UART_MSR_CTS; + data->msr_mask_off |= UART_MSR_DCTS; + } + + if (device_property_read_bool(dev, "ri-override")) { + /* Always report Ring indicator as inactive */ + data->msr_mask_off |= UART_MSR_RI; + data->msr_mask_off |= UART_MSR_TERI; + } + + /* Always ask for fixed clock rate from a property. */ + device_property_read_u32(dev, "clock-frequency", &p->uartclk); + + /* If there is separate baudclk, get the rate from it. */ + data->clk = devm_clk_get(dev, "baudclk"); + if (IS_ERR(data->clk) && PTR_ERR(data->clk) != -EPROBE_DEFER) + data->clk = devm_clk_get(dev, NULL); + if (IS_ERR(data->clk) && PTR_ERR(data->clk) == -EPROBE_DEFER) + return -EPROBE_DEFER; + if (!IS_ERR_OR_NULL(data->clk)) { + err = clk_prepare_enable(data->clk); + if (err) + dev_warn(dev, "could not enable optional baudclk: %d\n", + err); + else + p->uartclk = clk_get_rate(data->clk); + } + + /* If no clock rate is defined, fail. */ + if (!p->uartclk) { + dev_err(dev, "clock rate not defined\n"); + err = -EINVAL; + goto err_clk; + } + + data->pclk = devm_clk_get(dev, "apb_pclk"); + if (IS_ERR(data->pclk) && PTR_ERR(data->pclk) == -EPROBE_DEFER) { + err = -EPROBE_DEFER; + goto err_clk; + } + if (!IS_ERR(data->pclk)) { + err = clk_prepare_enable(data->pclk); + if (err) { + dev_err(dev, "could not enable apb_pclk\n"); + goto err_clk; + } + } + + data->rst = devm_reset_control_get_optional_exclusive(dev, NULL); + if (IS_ERR(data->rst)) { + err = PTR_ERR(data->rst); + goto err_pclk; + } + reset_control_deassert(data->rst); + + sunway8250_quirks(p, data); + + /* If the Busy Functionality is not implemented, don't handle it */ + if (data->uart_16550_compatible) + p->handle_irq = NULL; + + if (!data->skip_autocfg) + sunway8250_setup_port(p); + + /* If we have a valid fifosize, try hooking up DMA */ + if (p->fifosize) { + data->dma.rxconf.src_maxburst = p->fifosize / 4; + data->dma.txconf.dst_maxburst = p->fifosize / 4; + uart.dma = &data->dma; + } + + data->line = serial8250_register_8250_port(&uart); + if (data->line < 0) { + err = data->line; + goto err_reset; + } + + platform_set_drvdata(pdev, data); + + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + + return 0; + +err_reset: + reset_control_assert(data->rst); + +err_pclk: + if (!IS_ERR(data->pclk)) + clk_disable_unprepare(data->pclk); + +err_clk: + if (!IS_ERR(data->clk)) + clk_disable_unprepare(data->clk); + + return err; +} + +static int sunway8250_remove(struct platform_device *pdev) +{ + struct sunway8250_data *data = platform_get_drvdata(pdev); + + pm_runtime_get_sync(&pdev->dev); + + serial8250_unregister_port(data->line); + + reset_control_assert(data->rst); + + if (!IS_ERR(data->pclk)) + clk_disable_unprepare(data->pclk); + + if (!IS_ERR(data->clk)) + clk_disable_unprepare(data->clk); + + pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sunway8250_suspend(struct device *dev) +{ + struct sunway8250_data *data = dev_get_drvdata(dev); + + serial8250_suspend_port(data->line); + + return 0; +} + +static int sunway8250_resume(struct device *dev) +{ + struct sunway8250_data *data = dev_get_drvdata(dev); + + serial8250_resume_port(data->line); + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM +static int sunway8250_runtime_suspend(struct device *dev) +{ + struct sunway8250_data *data = dev_get_drvdata(dev); + + if (!IS_ERR(data->clk)) + clk_disable_unprepare(data->clk); + + if (!IS_ERR(data->pclk)) + clk_disable_unprepare(data->pclk); + + return 0; +} + +static int sunway8250_runtime_resume(struct device *dev) +{ + struct sunway8250_data *data = dev_get_drvdata(dev); + + if (!IS_ERR(data->pclk)) + clk_prepare_enable(data->pclk); + + if (!IS_ERR(data->clk)) + clk_prepare_enable(data->clk); + + return 0; +} +#endif + +static const struct dev_pm_ops sunway8250_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(sunway8250_suspend, sunway8250_resume) + SET_RUNTIME_PM_OPS(sunway8250_runtime_suspend, sunway8250_runtime_resume, NULL) +}; + +static const struct of_device_id sunway8250_of_match[] = { + { .compatible = "sw6,sunway-apb-uart" }, + { .compatible = "cavium,octeon-3860-uart" }, + { .compatible = "marvell,armada-38x-uart" }, + { .compatible = "renesas,rzn1-uart" }, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sunway8250_of_match); + +static const struct acpi_device_id sunway8250_acpi_match[] = { + { "INT33C4", 0 }, + { "INT33C5", 0 }, + { "INT3434", 0 }, + { "INT3435", 0 }, + { "80860F0A", 0 }, + { "8086228A", 0 }, + { "APMC0D08", 0}, + { "AMD0020", 0 }, + { "AMDI0020", 0 }, + { "BRCM2032", 0 }, + { "HISI0031", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, sunway8250_acpi_match); + +static struct platform_driver sunway8250_platform_driver = { + .driver = { + .name = "sunway-apb-uart", + .pm = &sunway8250_pm_ops, + .of_match_table = sunway8250_of_match, + .acpi_match_table = ACPI_PTR(sunway8250_acpi_match), + }, + .probe = sunway8250_probe, + .remove = sunway8250_remove, +}; + +module_platform_driver(sunway8250_platform_driver); + +MODULE_AUTHOR("Jamie Iles"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Synopsys DesignWare 8250 serial port driver"); +MODULE_ALIAS("platform:sunway-apb-uart"); diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig index ee17cf5c44c6..e8edd9388d76 100644 --- a/drivers/tty/serial/8250/Kconfig +++ b/drivers/tty/serial/8250/Kconfig @@ -407,6 +407,13 @@ config SERIAL_8250_DW Selecting this option will enable handling of the extra features present in the Synopsys DesignWare APB UART. +config SERIAL_8250_SUNWAY + tristate "Support for SW6B Builtin Synopsys DesignWare 8250 quirks" + depends on SERIAL_8250 && SW64 + help + Selecting this option will enable handling of the extra features + present in the Synopsys DesignWare APB UART of SW6. + config SERIAL_8250_EM tristate "Support for Emma Mobile integrated serial port" depends on SERIAL_8250 && HAVE_CLK diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile index 628b75be312e..8186ea891405 100644 --- a/drivers/tty/serial/8250/Makefile +++ b/drivers/tty/serial/8250/Makefile @@ -32,6 +32,7 @@ obj-$(CONFIG_SERIAL_8250_FSL) += 8250_fsl.o obj-$(CONFIG_SERIAL_8250_MEN_MCB) += 8250_men_mcb.o obj-$(CONFIG_SERIAL_8250_DFL) += 8250_dfl.o obj-$(CONFIG_SERIAL_8250_DW) += 8250_dw.o +obj-$(CONFIG_SERIAL_8250_SUNWAY) += 8250_sunway.o obj-$(CONFIG_SERIAL_8250_EM) += 8250_em.o obj-$(CONFIG_SERIAL_8250_IOC3) += 8250_ioc3.o obj-$(CONFIG_SERIAL_8250_OMAP) += 8250_omap.o -- Gitee From ece4b209e8444eaf77c02841cde280e9f88bf10b Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:55:58 +0800 Subject: [PATCH 0355/2138] anolis: drivers: usb: add sw64 support ANBZ: #4688 Add usb drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/usb/core/Makefile | 2 +- drivers/usb/host/pci-quirks.c | 127 ++++++++++++++++++++++++++++++++++ 2 files changed, 128 insertions(+), 1 deletion(-) diff --git a/drivers/usb/core/Makefile b/drivers/usb/core/Makefile index 7d338e9c0657..8ee58be1fb37 100644 --- a/drivers/usb/core/Makefile +++ b/drivers/usb/core/Makefile @@ -9,7 +9,7 @@ usbcore-y += devio.o notify.o generic.o quirks.o devices.o usbcore-y += phy.o port.o usbcore-$(CONFIG_OF) += of.o -usbcore-$(CONFIG_USB_PCI) += hcd-pci.o +usbcore-$(CONFIG_USB_PCI) += hcd-pci.o usbcore-$(CONFIG_ACPI) += usb-acpi.o ifdef CONFIG_USB_ONBOARD_HUB diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 2665832f9add..498497cace20 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -1283,3 +1283,130 @@ static void quirk_usb_early_handoff(struct pci_dev *pdev) } DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff); + +#ifdef CONFIG_SW64 +#include +#define XHCI_STS_FATAL (1 << 2) +#define XHCI_STS_EINT (1 << 3) +#define XHCI_STS_PORT (1 << 4) +#define XHCI_STS_SRE (1 << 10) +#define STS_RW1C_BITS (XHCI_STS_FATAL | XHCI_STS_EINT | XHCI_STS_PORT | XHCI_STS_SRE) + +static void +fixup_usb_xhci_reset(struct pci_dev *dev) +{ + void __iomem *op_reg_base; + int timeout; + u32 xhci_command; + u32 tmp, val; + void __iomem *base; + struct pci_controller *hose = pci_bus_to_pci_controller(dev->bus); + unsigned long offset; + int ext_cap_offset; + int retries = 3; + + pci_read_config_dword(dev, PCI_COMMAND, &tmp); + tmp |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + pci_write_config_dword(dev, PCI_COMMAND, tmp); + + pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &tmp); + if (tmp & PCI_BASE_ADDRESS_MEM_TYPE_MASK) { + pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &val); + offset = (unsigned long)(val) << 32 | (tmp & (~0xf)); + } else + offset = (unsigned long)(tmp & (~0xf)); + + if (offset == 0) + return; + + base = (void *)__va(SW64_PCI_IO_BASE(hose->node, hose->index) | offset); + + ext_cap_offset = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_LEGACY); + if (!ext_cap_offset) + goto hc_init; + + val = readl(base + ext_cap_offset); + + if ((dev->vendor == PCI_VENDOR_ID_TI && dev->device == 0x8241) || + (dev->vendor == PCI_VENDOR_ID_RENESAS + && dev->device == 0x0014)) { + val = (val | XHCI_HC_OS_OWNED) & ~XHCI_HC_BIOS_OWNED; + writel(val, base + ext_cap_offset); + } + + if (val & XHCI_HC_BIOS_OWNED) { + writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset); + + timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED, + 0, 1000000, 10); + if (timeout) { + pr_err("xHCI BIOS handoff failed (BIOS bug ?) %08x\n", val); + writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset); + } + } + + val = readl(base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET); + val &= XHCI_LEGACY_DISABLE_SMI; + val |= XHCI_LEGACY_SMI_EVENTS; + writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET); + +hc_init: + if (dev->vendor == PCI_VENDOR_ID_INTEL) + usb_enable_intel_xhci_ports(dev); + + op_reg_base = base + XHCI_HC_LENGTH(readl(base)); + + timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0, + 5000000, 10); + if (timeout) { + val = readl(op_reg_base + XHCI_STS_OFFSET); + pr_err("xHCI HW not ready after 5 sec (HC bug?) status = 0x%x\n", val); + } + + xhci_command = readl(op_reg_base + XHCI_CMD_OFFSET); + xhci_command |= 0x2; + writel(xhci_command, op_reg_base + XHCI_CMD_OFFSET); + + timeout = handshake(op_reg_base + XHCI_CMD_OFFSET, + 0x2, 0, 10 * 1000 * 1000, 125); + if (timeout) + pr_err("xHCI BIOS handoff time out\n"); + +retry: + val = readl(op_reg_base + XHCI_STS_OFFSET); + val |= STS_RW1C_BITS; + writel(val, op_reg_base + XHCI_STS_OFFSET); + val = readl(op_reg_base + XHCI_STS_OFFSET); + + if ((val & STS_RW1C_BITS) && retries--) { + pr_err("clear USB Status Register (status = %#x) failed, retry\n", val); + goto retry; + } + + val = readl(op_reg_base + XHCI_CMD_OFFSET); + val &= ~(XHCI_CMD_RUN | XHCI_IRQS); + writel(val, op_reg_base + XHCI_CMD_OFFSET); + timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1, + XHCI_MAX_HALT_USEC, 125); + if (timeout) { + val = readl(op_reg_base + XHCI_STS_OFFSET); + pr_err("xHCI HW did not halt within %d usec status = 0x%x\n", + XHCI_MAX_HALT_USEC, val); + } + + xhci_command = readl(op_reg_base + XHCI_CMD_OFFSET); + xhci_command |= 0x2; + writel(xhci_command, op_reg_base + XHCI_CMD_OFFSET); + + timeout = handshake(op_reg_base + XHCI_CMD_OFFSET, + 0x2, 0, 10 * 1000 * 1000, 125); + if (timeout) + pr_err("xHCI BIOS handoff time out\n"); + + pci_read_config_dword(dev, PCI_COMMAND, &tmp); + tmp &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + pci_write_config_dword(dev, PCI_COMMAND, tmp); +} +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_SERIAL_USB_XHCI, 0, fixup_usb_xhci_reset); +#endif -- Gitee From 5b8916fe13b9c6dc38c5ea108aea0aee663d5f14 Mon Sep 17 00:00:00 2001 From: Mao Minkai Date: Thu, 18 Jan 2024 16:56:19 +0800 Subject: [PATCH 0356/2138] anolis: drivers: vfio: add sw64 support ANBZ: #4688 Add vfio drivers for SW64. Signed-off-by: Mao Minkai Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/vfio/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig index 6bda6dbb4878..d80b6ffefd9d 100644 --- a/drivers/vfio/Kconfig +++ b/drivers/vfio/Kconfig @@ -39,7 +39,7 @@ config VFIO_GROUP config VFIO_CONTAINER bool "Support for the VFIO container /dev/vfio/vfio" - select VFIO_IOMMU_TYPE1 if MMU && (X86 || S390 || ARM || ARM64) + select VFIO_IOMMU_TYPE1 if MMU && (X86 || S390 || ARM || ARM64 || SW64) depends on VFIO_GROUP default y help -- Gitee From ba42be7de629eb5cc4077963720df21ca583c892 Mon Sep 17 00:00:00 2001 From: Gu Zitao Date: Mon, 18 Mar 2024 17:11:13 +0800 Subject: [PATCH 0357/2138] anolis: sw64: fix a compile error for vrt_sigreturn ANBZ: #4688 Signed-off-by: Gu Zitao Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- arch/sw_64/include/asm/linkage.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/sw_64/include/asm/linkage.h b/arch/sw_64/include/asm/linkage.h index 85b279f6211e..1721753b4d98 100644 --- a/arch/sw_64/include/asm/linkage.h +++ b/arch/sw_64/include/asm/linkage.h @@ -6,4 +6,8 @@ #define SYSCALL_ALIAS(alias, name) \ asm (#alias " = " #name "\n\t.globl " #alias) +#define SYM_END(name, sym_type) \ + .type name sym_type ASM_NL \ + .size name, .-name + #endif /* _ASM_SW64_LINKAGE_H */ -- Gitee From 242c910b3ec3fb1439530e001b225296f3f5084c Mon Sep 17 00:00:00 2001 From: Gu Zitao Date: Mon, 18 Mar 2024 17:11:47 +0800 Subject: [PATCH 0358/2138] anolis: sw64: fix compile errors for CONFIG_FTRACE=y ANBZ: #4688 Signed-off-by: Gu Zitao Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- arch/sw_64/kernel/kprobes/kprobes-ftrace.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/arch/sw_64/kernel/kprobes/kprobes-ftrace.c b/arch/sw_64/kernel/kprobes/kprobes-ftrace.c index 89d7dba9dc25..a0b33a52a9e4 100644 --- a/arch/sw_64/kernel/kprobes/kprobes-ftrace.c +++ b/arch/sw_64/kernel/kprobes/kprobes-ftrace.c @@ -11,14 +11,22 @@ /* Ftrace callback handler for kprobes */ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, - struct ftrace_ops *ops, struct pt_regs *regs) + struct ftrace_ops *ops, struct ftrace_regs *fregs) { struct kprobe *p; struct kprobe_ctlblk *kcb; + struct pt_regs *regs; + int bit; + bit = ftrace_test_recursion_trylock(ip, parent_ip); + if (bit < 0) + return; + + regs = ftrace_get_regs(fregs); + preempt_disable_notrace(); p = get_kprobe((kprobe_opcode_t *)ip); if (unlikely(!p) || kprobe_disabled(p)) - return; + goto out; kcb = get_kprobe_ctlblk(); if (kprobe_running()) { @@ -37,6 +45,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, } __this_cpu_write(current_kprobe, NULL); } +out: + preempt_enable_notrace(); + ftrace_test_recursion_unlock(bit); } NOKPROBE_SYMBOL(kprobe_ftrace_handler); -- Gitee From 2bb8a4d4e7706ef997e24c6142a4a13d7a80ffe3 Mon Sep 17 00:00:00 2001 From: Gu Zitao Date: Mon, 18 Mar 2024 17:12:57 +0800 Subject: [PATCH 0359/2138] anolis: sw64: fix compile errors for CONFIG_SW64_CPUAUTOPLUG=y ANBZ: #4688 Signed-off-by: Gu Zitao Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- arch/sw_64/kernel/cpuautoplug.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/arch/sw_64/kernel/cpuautoplug.c b/arch/sw_64/kernel/cpuautoplug.c index a7571a77a72c..b4ea0ef080d8 100644 --- a/arch/sw_64/kernel/cpuautoplug.c +++ b/arch/sw_64/kernel/cpuautoplug.c @@ -234,7 +234,7 @@ static inline cputime64_t get_idle_time_jiffy(cputime64_t *wall) return (cputime64_t)jiffies_to_usecs(idle_time); } -static inline cputime64_t get_idle_time(cputime64_t *wall) +static inline cputime64_t sw64_get_idle_time(cputime64_t *wall) { unsigned int cpu; u64 idle_time = 0; @@ -378,7 +378,7 @@ static void do_autoplug_timer(struct work_struct *work) goto out; } - cur_idle_time = get_idle_time(&cur_wall_time); + cur_idle_time = sw64_get_idle_time(&cur_wall_time); if (cur_wall_time == 0) cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); @@ -441,11 +441,16 @@ static struct platform_driver platform_driver = { static int __init cpuautoplug_init(void) { int i, ret, delay; - - ret = sysfs_create_group(&cpu_subsys.dev_root->kobj, - &cpuclass_attr_group); - if (ret) - return ret; + struct device *dev_root; + + dev_root = bus_get_dev_root(&cpu_subsys); + if (dev_root) { + ret = sysfs_create_group(&dev_root->kobj, + &cpuclass_attr_group); + put_device(dev_root); + if (ret) + return ret; + } ret = platform_driver_register(&platform_driver); if (ret) -- Gitee From 80fdf7f424563a97f258ae1e72123b25a587f642 Mon Sep 17 00:00:00 2001 From: Gu Zitao Date: Tue, 19 Mar 2024 08:43:50 +0800 Subject: [PATCH 0360/2138] anolis: sw64: fix compile errors for CONFIG_KEXEC_CORE=y ANBZ: #4688 Signed-off-by: Gu Zitao Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- arch/sw_64/kernel/process.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/arch/sw_64/kernel/process.c b/arch/sw_64/kernel/process.c index fa58a0de4368..9a887140edef 100644 --- a/arch/sw_64/kernel/process.c +++ b/arch/sw_64/kernel/process.c @@ -103,6 +103,24 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) return 0; } +/* + * Fill in the user structure for a ELF core dump. + * @regs: should be signal_pt_regs() or task_pt_reg(task) + */ +void sw64_elf_core_copy_regs(elf_greg_t *dest, struct pt_regs *regs) +{ + int i; + struct thread_info *ti; + + ti = (void *)((__u64)regs & ~(THREAD_SIZE - 1)); + + for (i = 0; i < 31; i++) + dest[i] = regs->regs[i]; + dest[31] = regs->pc; + dest[32] = ti->pcb.tp; +} +EXPORT_SYMBOL(sw64_elf_core_copy_regs); + unsigned long arch_randomize_brk(struct mm_struct *mm) { return randomize_page(mm->brk, 0x02000000); -- Gitee From 3e8795c33952579af0631eef19d29c6689f0760b Mon Sep 17 00:00:00 2001 From: Gu Zitao Date: Tue, 19 Mar 2024 08:46:14 +0800 Subject: [PATCH 0361/2138] anolis: drivers: virtio: add sw64 support ANBZ: #4688 Signed-off-by: Gu Zitao Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- drivers/virtio/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index 0a53a61231c2..da89d498d14e 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -117,7 +117,7 @@ config VIRTIO_BALLOON config VIRTIO_MEM tristate "Virtio mem driver" - depends on X86_64 || ARM64 + depends on X86_64 || ARM64 || SW64 depends on VIRTIO depends on MEMORY_HOTPLUG depends on MEMORY_HOTREMOVE -- Gitee From 2016a43712cc2ab2b3ad8363e941f68756aac5e2 Mon Sep 17 00:00:00 2001 From: Gu Zitao Date: Tue, 19 Mar 2024 08:49:09 +0800 Subject: [PATCH 0362/2138] anolis: sw64: fix compile errors for CONFIG_ADVISE_SYSCALLS=y ANBZ: #4688 Signed-off-by: Gu Zitao Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- arch/sw_64/include/uapi/asm/mman.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/sw_64/include/uapi/asm/mman.h b/arch/sw_64/include/uapi/asm/mman.h index 15cb7bfee3b1..c83c4b50662a 100644 --- a/arch/sw_64/include/uapi/asm/mman.h +++ b/arch/sw_64/include/uapi/asm/mman.h @@ -2,6 +2,8 @@ #ifndef _UAPI_ASM_SW64_MMAN_H #define _UAPI_ASM_SW64_MMAN_H +#include + #define PROT_READ 0x1 /* page can be read */ #define PROT_WRITE 0x2 /* page can be written */ #define PROT_EXEC 0x4 /* page can be executed */ -- Gitee From e8bc5a8b02684e252c99604e86d1359603264b9b Mon Sep 17 00:00:00 2001 From: Gu Zitao Date: Tue, 19 Mar 2024 08:54:02 +0800 Subject: [PATCH 0363/2138] anolis: sw64: remove nid parameter from arch_remove_memory() ANBZ: #4688 Signed-off-by: Gu Zitao Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- arch/sw_64/mm/init.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/sw_64/mm/init.c b/arch/sw_64/mm/init.c index ca761b602ab6..1f402809128f 100644 --- a/arch/sw_64/mm/init.c +++ b/arch/sw_64/mm/init.c @@ -306,8 +306,7 @@ int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params) return ret; } -void arch_remove_memory(int nid, u64 start, u64 size, - struct vmem_altmap *altmap) +void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; -- Gitee From efb5558ee09fde1e26558f2e211db6c8a3041afd Mon Sep 17 00:00:00 2001 From: Gu Zitao Date: Tue, 19 Mar 2024 08:57:32 +0800 Subject: [PATCH 0364/2138] anolis: sw64: add initial anolis_xuelang_defconfig ANBZ: #4688 Signed-off-by: Gu Zitao Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- arch/sw_64/configs/anolis_xuelang_defconfig | 1104 +++++++++++++++++++ 1 file changed, 1104 insertions(+) create mode 100644 arch/sw_64/configs/anolis_xuelang_defconfig diff --git a/arch/sw_64/configs/anolis_xuelang_defconfig b/arch/sw_64/configs/anolis_xuelang_defconfig new file mode 100644 index 000000000000..8c9c4dda69ed --- /dev/null +++ b/arch/sw_64/configs/anolis_xuelang_defconfig @@ -0,0 +1,1104 @@ +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_BPF_LSM=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +CONFIG_PSI_DEFAULT_DISABLED=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_NUMA_BALANCING=y +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +CONFIG_USER_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_KALLSYMS_ALL=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +CONFIG_PROFILING=y +CONFIG_KEXEC=y +CONFIG_CRASH_DUMP=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_SMP=y +CONFIG_SCHED_MC=y +CONFIG_NR_CPUS=256 +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_LIVEPATCH=y +CONFIG_NUMA=y +CONFIG_HZ=100 +CONFIG_BINFMT_MISC=m +CONFIG_USE_OF=y +CONFIG_DMI_SYSFS=y +CONFIG_FW_CFG_SYSFS=y +CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y +CONFIG_HIBERNATION=y +CONFIG_PM_DEBUG=y +CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_TAD=m +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_HED=y +CONFIG_ACPI_NFIT=m +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST_CROSS_ENDIAN_LEGACY=y +CONFIG_JUMP_LABEL=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_ALL is not set +CONFIG_MODULE_SIG_SHA256=y +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +CONFIG_BLK_CGROUP_IOCOST=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +CONFIG_IOSCHED_BFQ=y +CONFIG_ZSWAP=y +CONFIG_ZSMALLOC=y +CONFIG_ZSMALLOC_STAT=y +# CONFIG_SLAB_MERGE_DEFAULT is not set +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_COMPAT_BRK is not set +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_KSM=y +CONFIG_MEMORY_FAILURE=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_READ_ONLY_THP_FOR_FS=y +CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_USERFAULTFD=y +CONFIG_LRU_GEN=y +CONFIG_DAMON=y +CONFIG_DAMON_VADDR=y +CONFIG_DAMON_PADDR=y +CONFIG_DAMON_DBGFS=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_NET_IPVTI=m +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_BBR=m +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_NETLABEL=y +CONFIG_MPTCP=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_BRIDGE_NETFILTER=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XTABLES=y +CONFIG_NETFILTER_XT_SET=m +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_PE_SIP=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_ATM=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_X25=m +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=y +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_DEFAULT=y +CONFIG_DEFAULT_FQ_CODEL=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m +CONFIG_NET_TC_SKB_EXT=y +CONFIG_DCB=y +CONFIG_OPENVSWITCH=m +CONFIG_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=y +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=y +CONFIG_NET_SWITCHDEV=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_PKTGEN=m +CONFIG_NET_DROP_MONITOR=y +# CONFIG_WIRELESS is not set +CONFIG_RFKILL=m +CONFIG_NET_IFE=m +CONFIG_LWTUNNEL=y +CONFIG_PCI=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIE_DPC=y +CONFIG_PCIE_EDR=y +CONFIG_PCI_MSI=y +CONFIG_PCI_STUB=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_CPCI=y +CONFIG_HOTPLUG_PCI_SHPC=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +CONFIG_CONNECTOR=y +CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_ROM=y +CONFIG_MTD_ABSENT=y +CONFIG_MTD_COMPLEX_MAPPINGS=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_PHYSMAP_OF=y +CONFIG_MTD_PLATRAM=y +CONFIG_MTD_SPI_NOR=y +CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_ZRAM=m +CONFIG_ZRAM_WRITEBACK=y +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_CDROM_PKTCDVD=m +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_RBD=m +CONFIG_BLK_DEV_UBLK=m +CONFIG_BLK_DEV_NVME=m +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TCP=m +CONFIG_NVME_TARGET=y +CONFIG_NVME_TARGET_LOOP=y +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=y +CONFIG_NVME_TARGET_FCLOOP=m +CONFIG_ENCLOSURE_SERVICES=m +CONFIG_PVPANIC=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=m +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_ISCSI_TCP=m +CONFIG_SCSI_CXGB3_ISCSI=m +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_BE2ISCSI=m +CONFIG_SCSI_HPSA=m +CONFIG_SCSI_AHA152X=m +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_SMARTPQI=m +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +CONFIG_SCSI_FDOMAIN_ISA=m +CONFIG_SCSI_STEX=m +CONFIG_SCSI_SYM53C8XX_2=m +CONFIG_SCSI_QLOGIC_FAS=m +CONFIG_SCSI_QLOGIC_1280=m +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_SCSI_LPFC=m +CONFIG_SCSI_DEBUG=m +CONFIG_SCSI_PMCRAID=m +CONFIG_SCSI_PM8001=m +CONFIG_SCSI_BFA_FC=m +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +CONFIG_SATA_AHCI_PLATFORM=m +CONFIG_SATA_SIL24=m +CONFIG_ATA_GENERIC=m +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_LINEAR=m +CONFIG_MD_FAULTY=m +CONFIG_MD_CLUSTER=m +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING=y +CONFIG_DM_DEBUG_BLOCK_STACK_TRACING=y +CONFIG_DM_UNSTRIPED=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_WIREGUARD=m +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ATM_DRIVERS is not set +# CONFIG_NET_VENDOR_3COM is not set +CONFIG_ET131X=m +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_TIGON3=m +CONFIG_BNX2X=m +CONFIG_BNXT=m +CONFIG_BNXT_DCB=y +CONFIG_THUNDER_NIC_PF=m +CONFIG_THUNDER_NIC_VF=m +CONFIG_CAVIUM_PTP=y +CONFIG_CHELSIO_T1=m +CONFIG_CHELSIO_T1_1G=y +# CONFIG_NET_VENDOR_CIRRUS is not set +CONFIG_DNET=m +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_IGB=y +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBE_DCB=y +CONFIG_IXGBEVF=m +CONFIG_I40E=m +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_FM10K=m +CONFIG_IGC=m +CONFIG_JME=m +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_MLX4_EN=m +# CONFIG_MLX4_CORE_GEN2 is not set +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_CORE_IPOIB=y +CONFIG_MLXSW_CORE=m +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +CONFIG_QCA7000_SPI=m +CONFIG_QCOM_EMAC=m +CONFIG_RMNET=m +# CONFIG_NET_VENDOR_SEEQ is not set +CONFIG_SFC=m +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_VIA is not set +CONFIG_NGBE=m +CONFIG_TXGBE=m +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_PHYLIB=y +CONFIG_MDIO_BUS_MUX_MULTIPLEXER=m +CONFIG_MDIO_BUS_MUX_MMIOREG=m +CONFIG_PPP=m +CONFIG_PPPOE=m +CONFIG_SLIP=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +CONFIG_SLIP_MODE_SLIP6=y +# CONFIG_WLAN is not set +CONFIG_WAN=y +CONFIG_NETDEVSIM=m +CONFIG_ISDN=y +CONFIG_INPUT_LEDS=m +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_EVDEV=y +# CONFIG_KEYBOARD_ATKBD is not set +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +CONFIG_SERIO_ARC_PS2=m +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_PCI=m +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_SUNWAY=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_VIRTIO_CONSOLE=m +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_PANIC_EVENT=y +CONFIG_IPMI_PANIC_STRING=y +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM=y +CONFIG_TCG_TIS=y +CONFIG_TCG_ATMEL=m +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_MUX=m +# CONFIG_I2C_HELPER_AUTO is not set +CONFIG_I2C_SMBUS=m +CONFIG_SPI=y +CONFIG_SPI_SPIDEV=y +CONFIG_GPIO_GENERIC_PLATFORM=m +CONFIG_POWER_RESET=y +CONFIG_SENSORS_PVT=y +CONFIG_SENSORS_LM75=m +CONFIG_PMBUS=m +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +CONFIG_WATCHDOG_SYSFS=y +CONFIG_SOFT_WATCHDOG=m +CONFIG_I6300ESB_WDT=m +CONFIG_SSB=y +CONFIG_RC_CORE=m +CONFIG_DRM=m +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +# CONFIG_DRM_I2C_SIL164 is not set +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_USERPTR=y +CONFIG_DRM_AMDGPU=m +CONFIG_DRM_NOUVEAU=m +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=m +CONFIG_DRM_MGAG200=m +CONFIG_DRM_QXL=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_BOCHS=m +CONFIG_DRM_CIRRUS_QEMU=m +CONFIG_FB=y +CONFIG_FB_EFI=y +CONFIG_FIRMWARE_EDID=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y +CONFIG_LCD_CLASS_DEVICE=y +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_LOGO=y +# CONFIG_HID_ITE is not set +CONFIG_HID_LOGITECH=m +# CONFIG_HID_REDRAGON is not set +# CONFIG_I2C_HID is not set +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_TRIGGERS=y +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_ERDMA=m +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +CONFIG_RDMA_RXE=m +CONFIG_RDMA_SIW=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG_DATA=y +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +CONFIG_RTC_CLASS=y +# CONFIG_RTC_SYSTOHC is not set +CONFIG_RTC_DRV_DS1307=m +CONFIG_RTC_DRV_PCF8523=m +CONFIG_DMADEVICES=y +CONFIG_DW_DMAC=m +CONFIG_ASYNC_TX_DMA=y +CONFIG_DMATEST=m +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=m +CONFIG_UIO_DMEM_GENIRQ=m +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +CONFIG_VFIO=m +CONFIG_VFIO_NOIOMMU=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PMEM=m +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=y +CONFIG_STAGING=y +CONFIG_IOMMU_DEFAULT_PASSTHROUGH=y +CONFIG_DAX=y +CONFIG_STM=m +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_REISERFS_FS=m +CONFIG_XFS_FS=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_GFS2_FS=y +CONFIG_BTRFS_FS=y +CONFIG_BTRFS_FS_POSIX_ACL=y +CONFIG_F2FS_FS=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QFMT_V2=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_VIRTIO_FS=m +CONFIG_VIRT_FUSE=m +CONFIG_OVERLAY_FS=m +CONFIG_OVERLAY_FS_REDIRECT_DIR=y +CONFIG_OVERLAY_FS_INDEX=y +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +CONFIG_CACHEFILES=m +CONFIG_CACHEFILES_ONDEMAND=y +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE_DEVICE_DUMP=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +CONFIG_CRAMFS=m +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +CONFIG_PSTORE=y +CONFIG_PSTORE_CONSOLE=y +CONFIG_PSTORE_RAM=y +CONFIG_EROFS_FS=m +CONFIG_EROFS_FS_ZIP_LZMA=y +CONFIG_EROFS_FS_ZIP_DEFLATE=y +CONFIG_EROFS_FS_ONDEMAND=y +CONFIG_NFS_FS=m +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_V4_2_READ_PLUS is not set +CONFIG_NFSD=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_SUNRPC_DEBUG=y +CONFIG_CEPH_FS=m +CONFIG_CEPH_FS_POSIX_ACL=y +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_DFS_UPCALL=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=m +CONFIG_DLM=m +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_BIG_KEYS=y +CONFIG_TRUSTED_KEYS=m +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HARDENED_USERCOPY=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_YAMA=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_PLATFORM_KEYRING=y +CONFIG_IMA=y +CONFIG_IMA_SIG_TEMPLATE=y +CONFIG_IMA_DEFAULT_HASH_SHA256=y +CONFIG_IMA_WRITE_POLICY=y +CONFIG_IMA_APPRAISE=y +CONFIG_IMA_APPRAISE_BUILD_POLICY=y +CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY=y +CONFIG_IMA_BLACKLIST_KEYRING=y +CONFIG_IMA_LOAD_X509=y +CONFIG_EVM=y +CONFIG_EVM_LOAD_X509=y +CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity,bpf" +CONFIG_BUG_ON_DATA_CORRUPTION=y +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=y +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_DH=m +CONFIG_CRYPTO_SM2=y +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4_GENERIC=y +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=y +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=y +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_SM3_GENERIC=y +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ZSTD=m +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_DEV_VIRTIO=m +CONFIG_SIGNED_PE_FILE_VERIFICATION=y +CONFIG_SYSTEM_EXTRA_CERTIFICATE=y +CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE=8192 +CONFIG_SECONDARY_TRUSTED_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=y +CONFIG_CRC_ITU_T=y +CONFIG_CRC7=m +CONFIG_CRC8=m +CONFIG_DMA_CMA=y +CONFIG_PRINTK_TIME=y +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_INFO_DWARF4=y +CONFIG_STRIP_ASM_SYMS=y +CONFIG_DEBUG_SECTION_MISMATCH=y +# CONFIG_FRAME_POINTER is not set +CONFIG_KGDB=y +CONFIG_KGDB_TESTS=y +CONFIG_KGDB_KDB=y +CONFIG_KDB_KEYBOARD=y +CONFIG_DEBUG_SHIRQ=y +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_TIMEOUT=1 +CONFIG_SOFTLOCKUP_DETECTOR=y +CONFIG_SCHEDSTATS=y +CONFIG_DEBUG_LIST=y +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +CONFIG_STACK_TRACER=y +CONFIG_SCHED_TRACER=y +CONFIG_HWLAT_TRACER=y +CONFIG_TIMERLAT_TRACER=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_HIST_TRIGGERS=y +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_BPF=m -- Gitee From 82e3f6b41fd4176a51909df21e3b726f25190e50 Mon Sep 17 00:00:00 2001 From: Gu Zitao Date: Tue, 19 Mar 2024 09:01:27 +0800 Subject: [PATCH 0365/2138] anolis: sw64: add several options support ANBZ: #4688 Signed-off-by: Gu Zitao Reviewed-by: He Sheng Signed-off-by: Gu Zitao Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2916 --- arch/sw_64/Kconfig | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/arch/sw_64/Kconfig b/arch/sw_64/Kconfig index 0fd1be7195cc..62f655ceae3f 100644 --- a/arch/sw_64/Kconfig +++ b/arch/sw_64/Kconfig @@ -72,6 +72,7 @@ config SW64 select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_KGDB + select HAVE_ARCH_PREL32_RELOCATIONS select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRANSPARENT_HUGEPAGE @@ -121,6 +122,12 @@ config SW64 select SW64_TIMER select SWIOTLB select THREAD_INFO_IN_TASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select IOMMU_DMA if IOMMU_SUPPORT + select ARCH_SUPPORTS_MEMORY_FAILURE + select HAVE_CONTEXT_TRACKING + select HAVE_NMI + select HAVE_DMA_CONTIGUOUS config LOCKDEP_SUPPORT def_bool y @@ -162,6 +169,10 @@ config GENERIC_CALIBRATE_DELAY bool default y +config ZONE_DMA + bool "Support DMA zone" if EXPERT + default y + config ZONE_DMA32 bool default y @@ -193,6 +204,10 @@ config SYS_HAS_EARLY_PRINTK config HAVE_CSRRW bool +config ILLEGAL_POINTER_VALUE + hex + default 0xdead000000000000 + menu "System setup" menu "Machine Configuration" @@ -395,6 +410,13 @@ config SCHED_SMT MultiThreading at a cost of slightly increased overhead in some places. If unsure say N here. +config SCHED_MC + bool "Multi-core scheduler support" + help + Multi-core scheduler support improves the CPU scheduler's decision + making when dealing with multi-core CPU chips at a cost of slightly + increased overhead in some places. If unsure say N here. + config NR_CPUS int "Maximum number of CPUs (2-256)" range 2 256 @@ -636,6 +658,9 @@ config ARCH_HIBERNATION_POSSIBLE depends on SW64 def_bool y +config ARCH_SELECT_MEMORY_MODEL + def_bool ARCH_SPARSEMEM_ENABLE + source "drivers/cpuidle/Kconfig" source "drivers/idle/Kconfig" -- Gitee From e5a6514e4673ba169eb335253ef5f0a845b79995 Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Wed, 9 Mar 2022 14:58:23 +0800 Subject: [PATCH 0366/2138] anolis: sched: introduce ACPU accounting ANBZ: #8547 When SMT is on, tasks will be disturbed by the tasks on it's SMT sibling, which will make the tasks running sometimes fast and sometimes slowly. So far, there isn't any way to assess how much disturbance the task has received. To assess the SMT disturbance, we introduce ACPU(assess CPU), which will account how long the task is running with SMT sibling idle. The statistical data is shown in /proc//sched, row se.core_sibidletime. Only when kernel.sched_schedstats is on, the data will be counted and shown. Co-developed-by: Cruz Zhao Signed-off-by: Tianchen Ding Signed-off-by: Cruz Zhao Link: https://gitee.com/anolis/cloud-kernel/pulls/2893 --- include/linux/kernel_stat.h | 6 +++ include/linux/sched.h | 10 +++++ kernel/sched/core.c | 89 +++++++++++++++++++++++++++++++++++++ kernel/sched/cputime.c | 8 ++++ kernel/sched/debug.c | 3 ++ kernel/sched/sched.h | 6 +++ kernel/smpboot.c | 1 + lib/Kconfig.debug | 7 +++ 8 files changed, 130 insertions(+) diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 9935f7ecbfb9..955b08e976bb 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -30,6 +30,9 @@ enum cpu_usage_stat { CPUTIME_GUEST_NICE, #ifdef CONFIG_SCHED_CORE CPUTIME_FORCEIDLE, +#endif +#ifdef CONFIG_SCHED_ACPU + CPUTIME_SIBIDLE, #endif NR_STATS, }; @@ -133,5 +136,8 @@ extern void account_idle_ticks(unsigned long ticks); #ifdef CONFIG_SCHED_CORE extern void __account_forceidle_time(struct task_struct *tsk, u64 delta); #endif +#ifdef CONFIG_SCHED_ACPU +extern void __account_sibidle_time(struct task_struct *tsk, u64 delta); +#endif #endif /* _LINUX_KERNEL_STAT_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index c598b36dabd5..eb72e0bc1a54 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -545,6 +545,10 @@ struct sched_statistics { #ifdef CONFIG_SCHED_CORE u64 core_forceidle_sum; #endif +#ifdef CONFIG_SCHED_ACPU + u64 core_sibidle_sum; +#endif + #endif /* CONFIG_SCHEDSTATS */ } ____cacheline_aligned; @@ -2501,3 +2505,9 @@ static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); } extern void sched_set_stop_task(int cpu, struct task_struct *stop); #endif + +#ifdef CONFIG_SCHED_ACPU +extern void acpu_enable(void); +#else +static inline void acpu_enable(void) { } +#endif diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f02dbe357801..a61d31cba3f6 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -155,6 +155,10 @@ __read_mostly int scheduler_running; DEFINE_STATIC_KEY_FALSE(__sched_core_enabled); +#ifdef CONFIG_SCHED_ACPU +DEFINE_STATIC_KEY_FALSE(acpu_enabled); +#endif + /* kernel prio, less is more */ static inline int __task_prio(const struct task_struct *p) { @@ -4983,6 +4987,83 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr, #endif /* CONFIG_PREEMPT_NOTIFIERS */ +#ifdef CONFIG_SCHED_ACPU +void acpu_enable(void) +{ + int i; + + for_each_possible_cpu(i) { + struct rq *rq = cpu_rq(i); + + /* It may be not that accurate, but useful enough. */ + rq->last_acpu_update_time = rq->clock; + } + static_branch_enable(&acpu_enabled); +} + +static void update_acpu(struct rq *rq, struct task_struct *prev, struct task_struct *next) +{ + const int cpu = cpu_of(rq); + const struct cpumask *smt_mask = cpu_smt_mask(cpu); + u64 now = rq_clock(rq); + u64 sibidle_sum, last_update_time; + s64 delta, last; + int i; + + if (!static_branch_likely(&acpu_enabled) || !schedstat_enabled()) + return; + + /* Update idle sum and busy sum for current rq. */ + delta = now - rq->last_acpu_update_time; + if (prev == rq->idle) + rq->acpu_idle_sum += delta; + + /* + * Be carefule, smt_mask maybe NULL. + * We only consider the case where there are two SMT at this stage. + */ + if (unlikely(!smt_mask) || unlikely(cpumask_weight(smt_mask) != 2)) + goto out; + + for_each_cpu(i, smt_mask) { + if (i != cpu) { + struct rq *rq_i = cpu_rq(i); + struct task_struct *curr_i = rq_i->curr; + + last = (s64)(rq->last_acpu_update_time - + rq_i->last_acpu_update_time); + last_update_time = last >= 0 ? rq->last_acpu_update_time : + rq_i->last_acpu_update_time; + /* + * Sibling may update acpu at the same time, and it's + * timestamp may be newer than this rq. + */ + delta = now - last_update_time; + delta = delta > 0 ? delta : 0; + + /* Add the delta to improve accuracy. */ + sibidle_sum = last >= 0 ? rq->sibidle_sum : rq_i->acpu_idle_sum; + if (curr_i == rq_i->idle) + sibidle_sum += delta; + } + } + + if (prev != rq->idle) { + delta = sibidle_sum - rq->sibidle_sum; + delta = delta > 0 ? delta : 0; + __account_sibidle_time(prev, delta); + } + + rq->sibidle_sum = sibidle_sum; +out: + rq->last_acpu_update_time = now; +} +#else +static inline void update_acpu(struct rq *rq, struct task_struct *prev, struct task_struct *next) +{ +} +#endif /* CONFIG_SCHED_ACPU */ + static inline void prepare_task(struct task_struct *next) { #ifdef CONFIG_SMP @@ -5191,6 +5272,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev, { kcov_prepare_switch(prev); sched_info_switch(rq, prev, next); + update_acpu(rq, prev, next); perf_event_task_sched_out(prev, next); rseq_preempt(prev); fire_sched_out_preempt_notifiers(prev, next); @@ -5666,6 +5748,7 @@ void scheduler_tick(void) thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq)); update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure); curr->sched_class->task_tick(rq, curr, 0); + update_acpu(rq, curr, curr); if (sched_feat(LATENCY_WARN)) resched_latency = cpu_resched_latency(rq); calc_global_load_tick(rq); @@ -10075,6 +10158,12 @@ void __init sched_init(void) rcuwait_init(&rq->hotplug_wait); #endif #endif /* CONFIG_SMP */ + +#ifdef CONFIG_SCHED_ACPU + rq->acpu_idle_sum = 0; + rq->sibidle_sum = 0; + rq->last_acpu_update_time = rq->clock; +#endif hrtick_rq_init(rq); atomic_set(&rq->nr_iowait, 0); diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index b453f8a6a7c7..e64c52d319d9 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -244,6 +244,14 @@ void __account_forceidle_time(struct task_struct *p, u64 delta) task_group_account_field(p, CPUTIME_FORCEIDLE, delta); } #endif +#ifdef CONFIG_SCHED_ACPU +void __account_sibidle_time(struct task_struct *p, u64 delta) +{ + __schedstat_add(p->stats.core_sibidle_sum, delta); + + task_group_account_field(p, CPUTIME_SIBIDLE, delta); +} +#endif /* * When a guest is interrupted for a longer amount of time, missed clock diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 4c3d0d9f3db6..464fa6b7c2a9 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -1059,6 +1059,9 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, #ifdef CONFIG_SCHED_CORE PN_SCHEDSTAT(core_forceidle_sum); +#endif +#ifdef CONFIG_SCHED_ACPU + PN_SCHEDSTAT(core_sibidle_sum); #endif } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index ec7dd031f1ab..aedb52fe9b8a 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1167,6 +1167,12 @@ struct rq { call_single_data_t cfsb_csd; struct list_head cfsb_csd_list; #endif + +#ifdef CONFIG_SCHED_ACPU + u64 acpu_idle_sum; + u64 sibidle_sum; + u64 last_acpu_update_time; +#endif }; #ifdef CONFIG_FAIR_GROUP_SCHED diff --git a/kernel/smpboot.c b/kernel/smpboot.c index 1992b62e980b..af26aca11742 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c @@ -73,6 +73,7 @@ void __init idle_threads_init(void) if (cpu != boot_cpu) idle_init(cpu); } + acpu_enable(); } #endif diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index f94c3e957b82..8267aa4255c7 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1256,6 +1256,13 @@ config SCHEDSTATS application, you can say N to avoid the very slight overhead this adds. +config SCHED_ACPU + bool "ACPU info: account idle time of smt to task" + depends on DEBUG_KERNEL && PROC_FS && SMP && SCHED_SMT + default y + help + Add ACPU info in /proc//sched. + endmenu config DEBUG_TIMEKEEPING -- Gitee From 3568fc35a079b0dda9ddbf1c7f581f47fab1a2e5 Mon Sep 17 00:00:00 2001 From: Cruz Zhao Date: Tue, 19 Sep 2023 17:22:19 +0800 Subject: [PATCH 0367/2138] anolis: sched: introduce sysctl_sched_acpu_enabled ANBZ: #8547 In order to be able to dynamically turn on and off acpu accounting, we introduce sysctl_sched_acpu_enabled, instead of default on. Signed-off-by: Cruz Zhao Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2893 --- include/linux/sched.h | 6 ------ include/linux/sched/sysctl.h | 6 ++++++ kernel/sched/core.c | 32 +++++++++++++++++++++++++++++++- kernel/smpboot.c | 1 - kernel/sysctl.c | 11 +++++++++++ 5 files changed, 48 insertions(+), 8 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index eb72e0bc1a54..962a4841f1ff 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2505,9 +2505,3 @@ static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); } extern void sched_set_stop_task(int cpu, struct task_struct *stop); #endif - -#ifdef CONFIG_SCHED_ACPU -extern void acpu_enable(void); -#else -static inline void acpu_enable(void) { } -#endif diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 5a64582b086b..1c45773304fc 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -29,4 +29,10 @@ extern int sysctl_numa_balancing_mode; #define sysctl_numa_balancing_mode 0 #endif +#ifdef CONFIG_SCHED_ACPU +extern unsigned int sysctl_sched_acpu_enabled; +extern int sched_acpu_enable_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); +#endif #endif /* _LINUX_SCHED_SYSCTL_H */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a61d31cba3f6..03e241ee72d1 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -157,6 +157,7 @@ DEFINE_STATIC_KEY_FALSE(__sched_core_enabled); #ifdef CONFIG_SCHED_ACPU DEFINE_STATIC_KEY_FALSE(acpu_enabled); +unsigned int sysctl_sched_acpu_enabled; #endif /* kernel prio, less is more */ @@ -4988,7 +4989,7 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr, #endif /* CONFIG_PREEMPT_NOTIFIERS */ #ifdef CONFIG_SCHED_ACPU -void acpu_enable(void) +static void acpu_enable(void) { int i; @@ -5001,6 +5002,35 @@ void acpu_enable(void) static_branch_enable(&acpu_enabled); } +static void acpu_disable(void) +{ + static_branch_disable(&acpu_enabled); +} + +int sched_acpu_enable_handler(struct ctl_table *table, int write, void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + int ret; + unsigned int old, new; + + if (!write) { + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); + return ret; + } + + old = sysctl_sched_acpu_enabled; + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); + new = sysctl_sched_acpu_enabled; + if (!ret && write && (old != new)) { + if (new) + acpu_enable(); + else + acpu_disable(); + } + + return ret; +} + static void update_acpu(struct rq *rq, struct task_struct *prev, struct task_struct *next) { const int cpu = cpu_of(rq); diff --git a/kernel/smpboot.c b/kernel/smpboot.c index af26aca11742..1992b62e980b 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c @@ -73,7 +73,6 @@ void __init idle_threads_init(void) if (cpu != boot_cpu) idle_init(cpu); } - acpu_enable(); } #endif diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 204528a81b43..47bdd8216fc5 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2068,6 +2068,17 @@ static struct ctl_table kern_table[] = { .extra2 = &userns_max_level_max, }, #endif +#ifdef CONFIG_SCHED_ACPU + { + .procname = "sched_acpu", + .data = &sysctl_sched_acpu_enabled, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = sched_acpu_enable_handler, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, +#endif /* CONFIG_SCHED_ACPU*/ { } }; -- Gitee From 3c4c0fa8d29cc4b0d2ed2d8cb31ca498192b0705 Mon Sep 17 00:00:00 2001 From: Cruz Zhao Date: Sat, 30 Sep 2023 14:52:52 +0800 Subject: [PATCH 0368/2138] anolis: sched: account sibidle for core scheduling ANBZ: #8547 Accounting for sibling idle time for core scheduling, which is time where a cookie'd task running with SMT sibling idle, including sibling forced idle time and sibling real idle time, collectively called sibidle. A few details: - For SMT > 2, we scale the amount of idle charged based on the number of idle siblings in function account_sibidle_time(). Additionally, we split the time up and evenly charge it to all running tasks, as each is equally responsible forthe idle. - When core sched is enabled and sibidle count is not zero, we account sibidle in function account_sibidle_time(), otherwise in function update_acpu(). Signed-off-by: Cruz Zhao Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2893 --- include/linux/kernel_stat.h | 9 +++---- include/linux/sched.h | 2 +- kernel/sched/core.c | 50 ++++++++++++++++++++++++------------- kernel/sched/core_sched.c | 45 ++++++++++++++++++++------------- kernel/sched/cputime.c | 21 +++++++--------- kernel/sched/sched.h | 13 +++++----- 6 files changed, 81 insertions(+), 59 deletions(-) diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 955b08e976bb..01f0c6391a98 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -31,7 +31,7 @@ enum cpu_usage_stat { #ifdef CONFIG_SCHED_CORE CPUTIME_FORCEIDLE, #endif -#ifdef CONFIG_SCHED_ACPU +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) CPUTIME_SIBIDLE, #endif NR_STATS, @@ -133,11 +133,8 @@ extern void account_process_tick(struct task_struct *, int user); extern void account_idle_ticks(unsigned long ticks); -#ifdef CONFIG_SCHED_CORE -extern void __account_forceidle_time(struct task_struct *tsk, u64 delta); -#endif -#ifdef CONFIG_SCHED_ACPU -extern void __account_sibidle_time(struct task_struct *tsk, u64 delta); +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) +extern void __account_sibidle_time(struct task_struct *tsk, u64 delta, bool fi); #endif #endif /* _LINUX_KERNEL_STAT_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 962a4841f1ff..0c2b973f4987 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -545,7 +545,7 @@ struct sched_statistics { #ifdef CONFIG_SCHED_CORE u64 core_forceidle_sum; #endif -#ifdef CONFIG_SCHED_ACPU +#if defined(CONFIG_SCHED_CORE) || defined(CONFIG_SCHED_ACPU) u64 core_sibidle_sum; #endif diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 03e241ee72d1..534cbafe2232 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -373,7 +373,7 @@ static void __sched_core_flip(bool enabled) for_each_cpu(t, smt_mask) cpu_rq(t)->core_enabled = enabled; - cpu_rq(cpu)->core->core_forceidle_start = 0; + cpu_rq(cpu)->core->core_sibidle_start = 0; sched_core_unlock(cpu, &flags); @@ -5043,6 +5043,15 @@ static void update_acpu(struct rq *rq, struct task_struct *prev, struct task_str if (!static_branch_likely(&acpu_enabled) || !schedstat_enabled()) return; + /* + * If core sched is enabled and core_sibidle_count is not zero, we update sibidle + * time in function __sched_core_account_sibidle(). + */ +#ifdef CONFIG_SCHED_CORE + if (rq->core->core_sibidle_count) + goto out; +#endif + /* Update idle sum and busy sum for current rq. */ delta = now - rq->last_acpu_update_time; if (prev == rq->idle) @@ -5081,7 +5090,7 @@ static void update_acpu(struct rq *rq, struct task_struct *prev, struct task_str if (prev != rq->idle) { delta = sibidle_sum - rq->sibidle_sum; delta = delta > 0 ? delta : 0; - __account_sibidle_time(prev, delta); + __account_sibidle_time(prev, delta, false); } rq->sibidle_sum = sibidle_sum; @@ -6246,18 +6255,21 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) /* reset state */ rq->core->core_cookie = 0UL; - if (rq->core->core_forceidle_count) { + if (rq->core->core_sibidle_count) { if (!core_clock_updated) { update_rq_clock(rq->core); core_clock_updated = true; } - sched_core_account_forceidle(rq); + sched_core_account_sibidle(rq); /* reset after accounting force idle */ - rq->core->core_forceidle_start = 0; - rq->core->core_forceidle_count = 0; - rq->core->core_forceidle_occupation = 0; - need_sync = true; - fi_before = true; + rq->core->core_sibidle_start = 0; + rq->core->core_sibidle_count = 0; + rq->core->core_sibidle_occupation = 0; + if (rq->core->core_forceidle_count) { + rq->core->core_forceidle_count = 0; + need_sync = true; + fi_before = true; + } } /* @@ -6333,6 +6345,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) rq_i->core_pick = p; if (p == rq_i->idle) { + rq->core->core_sibidle_count++; if (rq_i->nr_running) { rq->core->core_forceidle_count++; if (!fi_before) @@ -6343,9 +6356,9 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) } } - if (schedstat_enabled() && rq->core->core_forceidle_count) { - rq->core->core_forceidle_start = rq_clock(rq->core); - rq->core->core_forceidle_occupation = occ; + if (schedstat_enabled() && rq->core->core_sibidle_count) { + rq->core->core_sibidle_start = rq_clock(rq->core); + rq->core->core_sibidle_occupation = occ; } rq->core->core_pick_seq = rq->core->core_task_seq; @@ -6387,7 +6400,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) if (!(fi_before && rq->core->core_forceidle_count)) task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count); - rq_i->core_pick->core_occupation = occ; + if (rq->core->core_forceidle_count) + rq_i->core_pick->core_occupation = occ; if (i == cpu) { rq_i->core_pick = NULL; @@ -6602,14 +6616,15 @@ static void sched_core_cpu_deactivate(unsigned int cpu) core_rq->core_cookie = rq->core_cookie; core_rq->core_forceidle_count = rq->core_forceidle_count; core_rq->core_forceidle_seq = rq->core_forceidle_seq; - core_rq->core_forceidle_occupation = rq->core_forceidle_occupation; + core_rq->core_sibidle_occupation = rq->core_sibidle_occupation; + core_rq->core_sibidle_count = rq->core_sibidle_count; /* * Accounting edge for forced idle is handled in pick_next_task(). * Don't need another one here, since the hotplug thread shouldn't * have a cookie. */ - core_rq->core_forceidle_start = 0; + core_rq->core_sibidle_start = 0; /* install new leader */ for_each_cpu(t, smt_mask) { @@ -10203,8 +10218,9 @@ void __init sched_init(void) rq->core_enabled = 0; rq->core_tree = RB_ROOT; rq->core_forceidle_count = 0; - rq->core_forceidle_occupation = 0; - rq->core_forceidle_start = 0; + rq->core_sibidle_count = 0; + rq->core_sibidle_occupation = 0; + rq->core_sibidle_start = 0; rq->core_cookie = 0UL; #endif diff --git a/kernel/sched/core_sched.c b/kernel/sched/core_sched.c index a57fd8f27498..8db2999e51c8 100644 --- a/kernel/sched/core_sched.c +++ b/kernel/sched/core_sched.c @@ -237,7 +237,7 @@ int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type, #ifdef CONFIG_SCHEDSTATS /* REQUIRES: rq->core's clock recently updated. */ -void __sched_core_account_forceidle(struct rq *rq) +void __sched_core_account_sibidle(struct rq *rq) { const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq)); u64 delta, now = rq_clock(rq->core); @@ -247,28 +247,31 @@ void __sched_core_account_forceidle(struct rq *rq) lockdep_assert_rq_held(rq); - WARN_ON_ONCE(!rq->core->core_forceidle_count); + WARN_ON_ONCE(!rq->core->core_sibidle_count); - if (rq->core->core_forceidle_start == 0) - return; + /* can't be forced idle without a running task */ + WARN_ON_ONCE(!rq->core->core_sibidle_occupation && + rq->core->core_forceidle_count); + + if (rq->core->core_sibidle_start == 0 || + rq->core->core_sibidle_occupation == 0) + goto out; - delta = now - rq->core->core_forceidle_start; + delta = now - rq->core->core_sibidle_start; if (unlikely((s64)delta <= 0)) - return; + goto out; - rq->core->core_forceidle_start = now; + rq->core->core_sibidle_start = now; - if (WARN_ON_ONCE(!rq->core->core_forceidle_occupation)) { - /* can't be forced idle without a running task */ - } else if (rq->core->core_forceidle_count > 1 || - rq->core->core_forceidle_occupation > 1) { + if (rq->core->core_sibidle_count > 1 || + rq->core->core_sibidle_occupation > 1) { /* * For larger SMT configurations, we need to scale the charged * forced idle amount since there can be more than one forced * idle sibling and more than one running cookied task. */ - delta *= rq->core->core_forceidle_count; - delta = div_u64(delta, rq->core->core_forceidle_occupation); + delta *= rq->core->core_sibidle_count; + delta = div_u64(delta, rq->core->core_sibidle_occupation); } for_each_cpu(i, smt_mask) { @@ -279,22 +282,30 @@ void __sched_core_account_forceidle(struct rq *rq) continue; /* - * Note: this will account forceidle to the current cpu, even + * Note: this will account sibidle to the current cpu, even * if it comes from our SMT sibling. */ - __account_forceidle_time(p, delta); + __account_sibidle_time(p, delta, !!rq->core->core_forceidle_count); + } + +out: +#ifdef CONFIG_SCHED_ACPU + for_each_cpu(i, smt_mask) { + rq_i = cpu_rq(i); + rq->last_acpu_update_time = now; } +#endif } void __sched_core_tick(struct rq *rq) { - if (!rq->core->core_forceidle_count) + if (!rq->core->core_sibidle_count) return; if (rq != rq->core) update_rq_clock(rq->core); - __sched_core_account_forceidle(rq); + __sched_core_account_sibidle(rq); } #endif /* CONFIG_SCHEDSTATS */ diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index e64c52d319d9..c4c48183e2f7 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -231,25 +231,22 @@ void account_idle_time(u64 cputime) } -#ifdef CONFIG_SCHED_CORE +#if defined(CONFIG_SCHED_CORE) || defined(CONFIG_SCHED_ACPU) /* - * Account for forceidle time due to core scheduling. + * Account for sibidle, and for forceidle time due to core scheduling. * * REQUIRES: schedstat is enabled. */ -void __account_forceidle_time(struct task_struct *p, u64 delta) -{ - __schedstat_add(p->stats.core_forceidle_sum, delta); - - task_group_account_field(p, CPUTIME_FORCEIDLE, delta); -} -#endif -#ifdef CONFIG_SCHED_ACPU -void __account_sibidle_time(struct task_struct *p, u64 delta) +void __account_sibidle_time(struct task_struct *p, u64 delta, bool fi) { __schedstat_add(p->stats.core_sibidle_sum, delta); - task_group_account_field(p, CPUTIME_SIBIDLE, delta); +#ifdef CONFIG_SCHED_CORE + if (fi) { + __schedstat_add(p->stats.core_forceidle_sum, delta); + task_group_account_field(p, CPUTIME_FORCEIDLE, delta); + } +#endif } #endif diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index aedb52fe9b8a..29967f2a8c6a 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1156,8 +1156,9 @@ struct rq { unsigned long core_cookie; unsigned int core_forceidle_count; unsigned int core_forceidle_seq; - unsigned int core_forceidle_occupation; - u64 core_forceidle_start; + unsigned int core_sibidle_occupation; + u64 core_sibidle_start; + unsigned int core_sibidle_count; #endif /* Scratch cpumask to be temporarily used under rq_lock */ @@ -1968,12 +1969,12 @@ static inline const struct cpumask *task_user_cpus(struct task_struct *p) #if defined(CONFIG_SCHED_CORE) && defined(CONFIG_SCHEDSTATS) -extern void __sched_core_account_forceidle(struct rq *rq); +extern void __sched_core_account_sibidle(struct rq *rq); -static inline void sched_core_account_forceidle(struct rq *rq) +static inline void sched_core_account_sibidle(struct rq *rq) { if (schedstat_enabled()) - __sched_core_account_forceidle(rq); + __sched_core_account_sibidle(rq); } extern void __sched_core_tick(struct rq *rq); @@ -1986,7 +1987,7 @@ static inline void sched_core_tick(struct rq *rq) #else -static inline void sched_core_account_forceidle(struct rq *rq) {} +static inline void sched_core_account_sibidle(struct rq *rq) {} static inline void sched_core_tick(struct rq *rq) {} -- Gitee From a0659ebeb3e43fc97f0c08ff80022c28e336987e Mon Sep 17 00:00:00 2001 From: Cruz Zhao Date: Mon, 18 Sep 2023 10:24:20 +0800 Subject: [PATCH 0369/2138] anolis: sched: introduce per cgroup sibidle accounting ANBZ: #8547 This patch extends per task sibidle accounting into cgroups. rstat is used for cgroup accounting, except for the root, which uses kcpustat in order to bypass the need for doing an rstat flush when reading root stats. Data is displayed via /sys/fs/cgroup/cpu//cpu.stat, row sibidle_sum. Similar to the task accounting, the cgroup accounting requires that schedstats is enabled. Signed-off-by: Cruz Zhao Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2893 --- include/linux/cgroup-defs.h | 3 +++ kernel/cgroup/rstat.c | 29 +++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 6eefe5153a6f..6b7077b70fa4 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -305,6 +305,9 @@ struct cgroup_base_stat { #ifdef CONFIG_SCHED_CORE u64 forceidle_sum; #endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + u64 sibidle_sum; +#endif }; /* diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c index d80d7a608141..a29c5275c68e 100644 --- a/kernel/cgroup/rstat.c +++ b/kernel/cgroup/rstat.c @@ -327,6 +327,9 @@ static void cgroup_base_stat_add(struct cgroup_base_stat *dst_bstat, #ifdef CONFIG_SCHED_CORE dst_bstat->forceidle_sum += src_bstat->forceidle_sum; #endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + dst_bstat->sibidle_sum += src_bstat->sibidle_sum; +#endif } static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat, @@ -338,6 +341,9 @@ static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat, #ifdef CONFIG_SCHED_CORE dst_bstat->forceidle_sum -= src_bstat->forceidle_sum; #endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + dst_bstat->sibidle_sum -= src_bstat->sibidle_sum; +#endif } static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu) @@ -430,6 +436,11 @@ void __cgroup_account_cputime_field(struct cgroup *cgrp, case CPUTIME_FORCEIDLE: rstatc->bstat.forceidle_sum += delta_exec; break; +#endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + case CPUTIME_SIBIDLE: + rstatc->bstat.sibidle_sum += delta_exec; + break; #endif default: break; @@ -473,6 +484,9 @@ static void root_cgroup_cputime(struct cgroup_base_stat *bstat) #ifdef CONFIG_SCHED_CORE bstat->forceidle_sum += cpustat[CPUTIME_FORCEIDLE]; +#endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + bstat->sibidle_sum += cpustat[CPUTIME_SIBIDLE]; #endif } } @@ -485,6 +499,9 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) #ifdef CONFIG_SCHED_CORE u64 forceidle_time; #endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + u64 sibidle_time; +#endif if (cgroup_parent(cgrp)) { cgroup_rstat_flush_hold(cgrp); @@ -493,6 +510,9 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) &utime, &stime); #ifdef CONFIG_SCHED_CORE forceidle_time = cgrp->bstat.forceidle_sum; +#endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + sibidle_time = cgrp->bstat.sibidle_sum; #endif cgroup_rstat_flush_release(); } else { @@ -502,6 +522,9 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) stime = bstat.cputime.stime; #ifdef CONFIG_SCHED_CORE forceidle_time = bstat.forceidle_sum; +#endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + sibidle_time = bstat.sibidle_sum; #endif } @@ -511,6 +534,9 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) #ifdef CONFIG_SCHED_CORE do_div(forceidle_time, NSEC_PER_USEC); #endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + do_div(sibidle_time, NSEC_PER_USEC); +#endif seq_printf(seq, "usage_usec %llu\n" "user_usec %llu\n" @@ -520,6 +546,9 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) #ifdef CONFIG_SCHED_CORE seq_printf(seq, "core_sched.force_idle_usec %llu\n", forceidle_time); #endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + seq_printf(seq, "sibidle_usec %llu\n", sibidle_time); +#endif } /* Add bpf kfuncs for cgroup_rstat_updated() and cgroup_rstat_flush() */ -- Gitee From e361781fe8d287ea668c228b7ff53209135c1802 Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Fri, 10 Nov 2023 11:21:19 +0800 Subject: [PATCH 0370/2138] anolis: sched: fix percpu account for CPUTIME_*IDLE ANBZ: #8547 Since commit 0a1658bedfa7 ("sched/core: add forced idle accounting for cgroups") add percpu status CPUTIME_FORCEIDLE, but task_group_account_field() will add time delta on the current cpu instead of the forceidle cpu. As a result, the total sum is correct, but percpu value is wrong. Fix it by adding delta to the right cpu. Signed-off-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2893 --- kernel/sched/cputime.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index c4c48183e2f7..ace56789d46e 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -239,12 +239,16 @@ void account_idle_time(u64 cputime) */ void __account_sibidle_time(struct task_struct *p, u64 delta, bool fi) { + unsigned int cpu = task_cpu(p); + __schedstat_add(p->stats.core_sibidle_sum, delta); - task_group_account_field(p, CPUTIME_SIBIDLE, delta); + kcpustat_cpu(cpu).cpustat[CPUTIME_SIBIDLE] += delta; + cgroup_account_cputime_field(p, CPUTIME_SIBIDLE, delta); #ifdef CONFIG_SCHED_CORE if (fi) { __schedstat_add(p->stats.core_forceidle_sum, delta); - task_group_account_field(p, CPUTIME_FORCEIDLE, delta); + kcpustat_cpu(cpu).cpustat[CPUTIME_FORCEIDLE] += delta; + cgroup_account_cputime_field(p, CPUTIME_FORCEIDLE, delta); } #endif } -- Gitee From 30beebc412cc6ec892e5f4e88ece57a821dcf32e Mon Sep 17 00:00:00 2001 From: Cruz Zhao Date: Wed, 8 Nov 2023 11:22:08 +0800 Subject: [PATCH 0371/2138] anolis: sched: enable CONFIG_SCHED_ACPU by default ANBZ: #8547 Enable CONFIG_SCHED_ACPU by default Signed-off-by: Cruz Zhao Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2893 --- arch/arm64/configs/anolis-debug_defconfig | 1 + arch/arm64/configs/anolis_defconfig | 1 + arch/x86/configs/anolis-debug_defconfig | 1 + arch/x86/configs/anolis_defconfig | 1 + 4 files changed, 4 insertions(+) diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index eb020b30c47d..e4df4d2eb48c 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -114,6 +114,7 @@ CONFIG_PREEMPT_VOLUNTARY=y CONFIG_PREEMPT_COUNT=y # CONFIG_PREEMPT_DYNAMIC is not set CONFIG_SCHED_CORE=y +CONFIG_SCHED_ACPU=y # # CPU/Task time and stats accounting diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index aba19cdc5358..c2849134d486 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -112,6 +112,7 @@ CONFIG_PREEMPT_VOLUNTARY=y # CONFIG_PREEMPT is not set # CONFIG_PREEMPT_DYNAMIC is not set CONFIG_SCHED_CORE=y +CONFIG_SCHED_ACPU=y # # CPU/Task time and stats accounting diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index 0a31d38d115c..3c4c4795fb2d 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -134,6 +134,7 @@ CONFIG_PREEMPT_COUNT=y CONFIG_PREEMPTION=y CONFIG_PREEMPT_DYNAMIC=y CONFIG_SCHED_CORE=y +CONFIG_SCHED_ACPU=y # # CPU/Task time and stats accounting diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 0b0922337278..35b9d5b8a1cc 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -133,6 +133,7 @@ CONFIG_PREEMPT_COUNT=y CONFIG_PREEMPTION=y CONFIG_PREEMPT_DYNAMIC=y CONFIG_SCHED_CORE=y +CONFIG_SCHED_ACPU=y # # CPU/Task time and stats accounting -- Gitee From 5bb98748a7faea46e4a47fdce047b6fe093638b2 Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Wed, 20 Mar 2024 17:11:07 +0800 Subject: [PATCH 0372/2138] anolis: kfence: fix a NULL pointer dereference ANBZ: #8499 When kfence pool exhausted, object will be NULL. Do a check before get its refcnt. Fixes: e61ac77f426a ("anolis: kfence: enhance kfence for 6.6") Signed-off-by: Tianchen Ding Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2922 --- mm/kfence/core.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 17f3dda4ebf7..571ef4dcccf8 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -613,7 +613,8 @@ get_free_meta_from_node(struct kfence_freelist_node *kfence_freelist) object = list_entry(kfence_freelist->freelist.next, struct kfence_metadata, list); list_del_init(&object->list); } - percpu_ref_get(&object->kpa->refcnt); + if (object) + percpu_ref_get(&object->kpa->refcnt); raw_spin_unlock_irqrestore(&kfence_freelist->lock, flags); return object; @@ -684,8 +685,8 @@ static struct kfence_metadata *get_free_meta(int real_node) list_del_init(&object->list); c->count--; } - - percpu_ref_get(&object->kpa->refcnt); + if (object) + percpu_ref_get(&object->kpa->refcnt); put_cpu_ptr(c); local_irq_restore(flags); -- Gitee From 1d29ded28c08ba5bd2e4f4ae1a56c460198fbdae Mon Sep 17 00:00:00 2001 From: Zelin Deng Date: Tue, 22 Nov 2022 16:38:20 +0800 Subject: [PATCH 0373/2138] anolis: mm/early_ioremap.c: Always build early_memremap_prot() in x86 commit e1347ea0e4825294d441f6d8b4405412774ef313 OpenAnolis. ANBZ: #3267 In some scenarios, we'd want to specify protection attributes when we are doing early memory map. As early_memremap_prot() is also defined in loongarch it's better to select ARCH_USE_MEMREMAP_PROT on X86 config to avoid redefined error. Signed-off-by: Zelin Deng Reviewed-by: Guanjun Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/932 [ hly: Fix conflict. ] Signed-off-by: hanliyang Link: https://gitee.com/anolis/cloud-kernel/pulls/2917 --- arch/x86/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 45562660bd52..f841dba33e14 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -100,6 +100,7 @@ config X86 select ARCH_HAS_SYNC_CORE_BEFORE_USERMODE select ARCH_HAS_SYSCALL_WRAPPER select ARCH_HAS_UBSAN_SANITIZE_ALL + select ARCH_USE_MEMREMAP_PROT select ARCH_HAS_DEBUG_WX select ARCH_HAS_ZONE_DMA_SET if EXPERT select ARCH_HAVE_NMI_SAFE_CMPXCHG -- Gitee From de3f1cf183144f70ec5d73769c8eb60439433bdd Mon Sep 17 00:00:00 2001 From: Zelin Deng Date: Tue, 22 Nov 2022 16:02:20 +0800 Subject: [PATCH 0374/2138] anolis: x86/setup: Preserve _ENC flag when initrd is being relocated commit ea30196aea830c17565060644034ac7183d27a1a OpenAnolis. ANBZ: #3267 Commit 107cd2532181 ("Encrypt the initrd earlier for BSP microcode update") when SME is enabled, initrd will be encrypted at earlier stage. If initrd is located at e820 reserved area the initrd will be copied to direct mapping area in relocate_initrd(). In this case source address of initrd should be mapped as encrypted while copy_from_early_mem() will clear encrypted attribute as the source address is not in kernel usable area, therefore relocated initrd is encrypted data and is not able to be unpacked later. Add new function copy_early_initrd() to preserve _ENC flag in setup.c and remove copy_from_early_mem() as it's only used once here by x86. Signed-off-by: Zelin Deng Reviewed-by: Guanjun Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/932 Signed-off-by: hanliyang Link: https://gitee.com/anolis/cloud-kernel/pulls/2917 --- arch/x86/kernel/setup.c | 30 ++++++++++++++++++++++++++++- include/asm-generic/early_ioremap.h | 6 ------ mm/early_ioremap.c | 21 -------------------- 3 files changed, 29 insertions(+), 28 deletions(-) diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 3993353af472..1716ef357439 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -251,6 +251,34 @@ static u64 __init get_ramdisk_size(void) return ramdisk_size; } +#define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT) + +static void __init copy_early_initrd(void *dest, phys_addr_t src, + unsigned long size) +{ + unsigned long slop, clen; + char *p; + + while (size) { + slop = offset_in_page(src); + clen = size; + if (clen > MAX_MAP_CHUNK - slop) + clen = MAX_MAP_CHUNK - slop; + /* + * _ENC flag should be preserved so that when SME is enabled initrd + * can be mapped as encrypted, as it had been encrypted earlier. + * This flag won't impact on other platforms like TDX/SEV enabled. + */ + p = early_memremap_prot(src & PAGE_MASK, clen + slop, + pgprot_val(FIXMAP_PAGE_NORMAL)); + memcpy(dest, p + slop, clen); + early_memunmap(p, clen + slop); + dest += clen; + src += clen; + size -= clen; + } +} + static void __init relocate_initrd(void) { /* Assume only end is not page aligned */ @@ -270,7 +298,7 @@ static void __init relocate_initrd(void) printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n", relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1); - copy_from_early_mem((void *)initrd_start, ramdisk_image, ramdisk_size); + copy_early_initrd((void *)initrd_start, ramdisk_image, ramdisk_size); printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to" " [mem %#010llx-%#010llx]\n", diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h index 9d0479f50f97..be1ce406f481 100644 --- a/include/asm-generic/early_ioremap.h +++ b/include/asm-generic/early_ioremap.h @@ -32,12 +32,6 @@ extern void early_ioremap_setup(void); */ extern void early_ioremap_reset(void); -/* - * Early copy from unmapped memory to kernel mapped memory. - */ -extern void copy_from_early_mem(void *dest, phys_addr_t src, - unsigned long size); - #else static inline void early_ioremap_init(void) { } static inline void early_ioremap_setup(void) { } diff --git a/mm/early_ioremap.c b/mm/early_ioremap.c index ce06b2884789..9d4d27399f80 100644 --- a/mm/early_ioremap.c +++ b/mm/early_ioremap.c @@ -243,27 +243,6 @@ early_memremap_prot(resource_size_t phys_addr, unsigned long size, } #endif -#define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT) - -void __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size) -{ - unsigned long slop, clen; - char *p; - - while (size) { - slop = offset_in_page(src); - clen = size; - if (clen > MAX_MAP_CHUNK - slop) - clen = MAX_MAP_CHUNK - slop; - p = early_memremap(src & PAGE_MASK, clen + slop); - memcpy(dest, p + slop, clen); - early_memunmap(p, clen + slop); - dest += clen; - src += clen; - size -= clen; - } -} - #else /* CONFIG_MMU */ void __init __iomem * -- Gitee From f1dbd3afa301567994e792a7e5df62869fe1eeb5 Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Thu, 21 Mar 2024 11:18:13 +0800 Subject: [PATCH 0375/2138] anolis: configs: refresh kconfigs ANBZ: #8598 Refresh kconfigs by follow command, No Functional Change. `ARCH=${arch} CROSS_COMPILE=script/dummy-tools make olddefconfig` Signed-off-by: Qiao Ma Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/2938 --- arch/arm64/configs/anolis-debug_defconfig | 6 +++++- arch/arm64/configs/anolis_defconfig | 7 ++++++- arch/x86/configs/anolis-debug_defconfig | 14 ++++++++++++-- arch/x86/configs/anolis_defconfig | 10 ++++++++-- 4 files changed, 31 insertions(+), 6 deletions(-) diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index e4df4d2eb48c..4c2362da512f 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -114,7 +114,6 @@ CONFIG_PREEMPT_VOLUNTARY=y CONFIG_PREEMPT_COUNT=y # CONFIG_PREEMPT_DYNAMIC is not set CONFIG_SCHED_CORE=y -CONFIG_SCHED_ACPU=y # # CPU/Task time and stats accounting @@ -2503,6 +2502,7 @@ CONFIG_ATA_PIIX=y # CONFIG_SATA_ULI is not set # CONFIG_SATA_VIA is not set # CONFIG_SATA_VITESSE is not set +# CONFIG_SATA_ZHAOXIN is not set # # PATA SFF controllers with BMDMA @@ -3414,11 +3414,13 @@ CONFIG_I2C_NFORCE2=m # CONFIG_I2C_SIS96X is not set # CONFIG_I2C_VIA is not set # CONFIG_I2C_VIAPRO is not set +# CONFIG_I2C_ZHAOXIN is not set # # ACPI drivers # # CONFIG_I2C_SCMI is not set +# CONFIG_I2C_ZHAOXIN_SMBUS is not set # # I2C system bus drivers (mostly embedded / system-on-chip) @@ -4485,6 +4487,7 @@ CONFIG_FB_EFI=y CONFIG_FB_SIMPLE=y CONFIG_FB_SSD1307=m # CONFIG_FB_SM712 is not set +# CONFIG_FB_LS2K500 is not set CONFIG_FB_CORE=y CONFIG_FB_NOTIFY=y # CONFIG_FIRMWARE_EDID is not set @@ -6924,6 +6927,7 @@ CONFIG_WQ_WATCHDOG=y CONFIG_SCHED_DEBUG=y CONFIG_SCHED_INFO=y CONFIG_SCHEDSTATS=y +CONFIG_SCHED_ACPU=y # end of Scheduler Debugging # CONFIG_DEBUG_TIMEKEEPING is not set diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index c2849134d486..b659891af7af 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -112,7 +112,6 @@ CONFIG_PREEMPT_VOLUNTARY=y # CONFIG_PREEMPT is not set # CONFIG_PREEMPT_DYNAMIC is not set CONFIG_SCHED_CORE=y -CONFIG_SCHED_ACPU=y # # CPU/Task time and stats accounting @@ -2523,6 +2522,7 @@ CONFIG_ATA_PIIX=y # CONFIG_SATA_ULI is not set # CONFIG_SATA_VIA is not set # CONFIG_SATA_VITESSE is not set +# CONFIG_SATA_ZHAOXIN is not set # # PATA SFF controllers with BMDMA @@ -3434,11 +3434,13 @@ CONFIG_I2C_NFORCE2=m # CONFIG_I2C_SIS96X is not set # CONFIG_I2C_VIA is not set # CONFIG_I2C_VIAPRO is not set +# CONFIG_I2C_ZHAOXIN is not set # # ACPI drivers # # CONFIG_I2C_SCMI is not set +# CONFIG_I2C_ZHAOXIN_SMBUS is not set # # I2C system bus drivers (mostly embedded / system-on-chip) @@ -4505,6 +4507,7 @@ CONFIG_FB_EFI=y CONFIG_FB_SIMPLE=y CONFIG_FB_SSD1307=m # CONFIG_FB_SM712 is not set +# CONFIG_FB_LS2K500 is not set CONFIG_FB_CORE=y CONFIG_FB_NOTIFY=y # CONFIG_FIRMWARE_EDID is not set @@ -6707,6 +6710,7 @@ CONFIG_CMA_ALIGNMENT=8 # CONFIG_DMA_MAP_BENCHMARK is not set CONFIG_SGL_ALLOC=y CONFIG_CHECK_SIGNATURE=y +# CONFIG_CPUMASK_OFFSTACK is not set CONFIG_CPU_RMAP=y CONFIG_DQL=y CONFIG_GLOB=y @@ -6906,6 +6910,7 @@ CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 CONFIG_SCHED_DEBUG=y CONFIG_SCHED_INFO=y CONFIG_SCHEDSTATS=y +CONFIG_SCHED_ACPU=y # end of Scheduler Debugging # CONFIG_DEBUG_TIMEKEEPING is not set diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index 3c4c4795fb2d..ba42e39c5c62 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -134,7 +134,6 @@ CONFIG_PREEMPT_COUNT=y CONFIG_PREEMPTION=y CONFIG_PREEMPT_DYNAMIC=y CONFIG_SCHED_CORE=y -CONFIG_SCHED_ACPU=y # # CPU/Task time and stats accounting @@ -2575,6 +2574,7 @@ CONFIG_ATA_PIIX=m # CONFIG_SATA_ULI is not set # CONFIG_SATA_VIA is not set # CONFIG_SATA_VITESSE is not set +# CONFIG_SATA_ZHAOXIN is not set # # PATA SFF controllers with BMDMA @@ -3707,6 +3707,7 @@ CONFIG_HW_RANDOM_INTEL=m CONFIG_HW_RANDOM_AMD=m # CONFIG_HW_RANDOM_BA431 is not set CONFIG_HW_RANDOM_VIA=m +CONFIG_HW_RANDOM_ZHAOXIN=y CONFIG_HW_RANDOM_VIRTIO=y # CONFIG_HW_RANDOM_XIPHERA is not set # CONFIG_APPLICOM is not set @@ -3792,11 +3793,13 @@ CONFIG_I2C_NFORCE2_S4985=m CONFIG_I2C_SIS96X=m CONFIG_I2C_VIA=m CONFIG_I2C_VIAPRO=m +# CONFIG_I2C_ZHAOXIN is not set # # ACPI drivers # CONFIG_I2C_SCMI=m +# CONFIG_I2C_ZHAOXIN_SMBUS is not set # # I2C system bus drivers (mostly embedded / system-on-chip) @@ -3908,6 +3911,9 @@ CONFIG_PINCTRL_SUNRISEPOINT=m # CONFIG_PINCTRL_TIGERLAKE is not set # end of Intel pinctrl drivers +CONFIG_PINCTRL_ZHAOXIN=m +CONFIG_PINCTRL_KX7000=m + # # Renesas pinctrl drivers # @@ -4239,6 +4245,7 @@ CONFIG_SENSORS_TMP421=m # CONFIG_SENSORS_TMP464 is not set # CONFIG_SENSORS_TMP513 is not set CONFIG_SENSORS_VIA_CPUTEMP=m +CONFIG_SENSORS_ZHAOXIN_CPUTEMP=m CONFIG_SENSORS_VIA686A=m CONFIG_SENSORS_VT1211=m CONFIG_SENSORS_VT8231=m @@ -4734,6 +4741,7 @@ CONFIG_FB_HYPERV=m # CONFIG_FB_SIMPLE is not set # CONFIG_FB_SSD1307 is not set # CONFIG_FB_SM712 is not set +# CONFIG_FB_LS2K500 is not set CONFIG_FB_CORE=y CONFIG_FB_NOTIFY=y CONFIG_FIRMWARE_EDID=y @@ -5583,6 +5591,7 @@ CONFIG_VMGENID=y CONFIG_EFI_SECRET=m CONFIG_SEV_GUEST=m CONFIG_TDX_GUEST_DRIVER=m +CONFIG_CSV_GUEST=m CONFIG_VIRTIO_ANCHOR=y CONFIG_VIRTIO=y CONFIG_VIRTIO_PCI_LIB=y @@ -7114,11 +7123,11 @@ CONFIG_CRYPTO_DEV_QAT_C62X=m CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m CONFIG_CRYPTO_DEV_QAT_C62XVF=m -CONFIG_CRYPTO_DEV_TSSE=m CONFIG_CRYPTO_DEV_CHELSIO=m # CONFIG_CRYPTO_DEV_VIRTIO is not set # CONFIG_CRYPTO_DEV_SAFEXCEL is not set # CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set +CONFIG_CRYPTO_DEV_TSSE=m CONFIG_ASYMMETRIC_KEY_TYPE=y CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y CONFIG_X509_CERTIFICATE_PARSER=y @@ -7517,6 +7526,7 @@ CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 CONFIG_SCHED_DEBUG=y CONFIG_SCHED_INFO=y CONFIG_SCHEDSTATS=y +CONFIG_SCHED_ACPU=y # end of Scheduler Debugging # CONFIG_DEBUG_TIMEKEEPING is not set diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 35b9d5b8a1cc..128303b14bf0 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -133,7 +133,6 @@ CONFIG_PREEMPT_COUNT=y CONFIG_PREEMPTION=y CONFIG_PREEMPT_DYNAMIC=y CONFIG_SCHED_CORE=y -CONFIG_SCHED_ACPU=y # # CPU/Task time and stats accounting @@ -2570,6 +2569,7 @@ CONFIG_ATA_PIIX=m # CONFIG_SATA_ULI is not set # CONFIG_SATA_VIA is not set # CONFIG_SATA_VITESSE is not set +# CONFIG_SATA_ZHAOXIN is not set # # PATA SFF controllers with BMDMA @@ -3701,6 +3701,7 @@ CONFIG_HW_RANDOM_INTEL=m CONFIG_HW_RANDOM_AMD=m # CONFIG_HW_RANDOM_BA431 is not set CONFIG_HW_RANDOM_VIA=m +CONFIG_HW_RANDOM_ZHAOXIN=y CONFIG_HW_RANDOM_VIRTIO=y # CONFIG_HW_RANDOM_XIPHERA is not set # CONFIG_APPLICOM is not set @@ -3786,11 +3787,13 @@ CONFIG_I2C_NFORCE2_S4985=m CONFIG_I2C_SIS96X=m CONFIG_I2C_VIA=m CONFIG_I2C_VIAPRO=m +# CONFIG_I2C_ZHAOXIN is not set # # ACPI drivers # CONFIG_I2C_SCMI=m +# CONFIG_I2C_ZHAOXIN_SMBUS is not set # # I2C system bus drivers (mostly embedded / system-on-chip) @@ -4732,6 +4735,7 @@ CONFIG_FB_HYPERV=m # CONFIG_FB_SIMPLE is not set # CONFIG_FB_SSD1307 is not set # CONFIG_FB_SM712 is not set +# CONFIG_FB_LS2K500 is not set CONFIG_FB_CORE=y CONFIG_FB_NOTIFY=y CONFIG_FIRMWARE_EDID=y @@ -5580,6 +5584,7 @@ CONFIG_VMGENID=y CONFIG_EFI_SECRET=m CONFIG_SEV_GUEST=m CONFIG_TDX_GUEST_DRIVER=m +CONFIG_CSV_GUEST=m CONFIG_VIRTIO_ANCHOR=y CONFIG_VIRTIO=y CONFIG_VIRTIO_PCI_LIB=y @@ -7109,11 +7114,11 @@ CONFIG_CRYPTO_DEV_QAT_C62X=m CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m CONFIG_CRYPTO_DEV_QAT_C62XVF=m -CONFIG_CRYPTO_DEV_TSSE=m CONFIG_CRYPTO_DEV_CHELSIO=m # CONFIG_CRYPTO_DEV_VIRTIO is not set # CONFIG_CRYPTO_DEV_SAFEXCEL is not set # CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set +CONFIG_CRYPTO_DEV_TSSE=m CONFIG_ASYMMETRIC_KEY_TYPE=y CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y CONFIG_X509_CERTIFICATE_PARSER=y @@ -7476,6 +7481,7 @@ CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 CONFIG_SCHED_DEBUG=y CONFIG_SCHED_INFO=y CONFIG_SCHEDSTATS=y +CONFIG_SCHED_ACPU=y # end of Scheduler Debugging # CONFIG_DEBUG_TIMEKEEPING is not set -- Gitee From d501aff36c45c05339e4e957263d9eb07e99e88e Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Thu, 21 Mar 2024 11:48:57 +0800 Subject: [PATCH 0376/2138] anolis: configs: adjust some L0 level kconfigs for arch x86 ANBZ: #8598 Adjust some L0 level kconfigs to make ANCK compatible with more server hardwares. Signed-off-by: Qiao Ma Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/2938 --- arch/arm64/configs/anolis-debug_defconfig | 74 ++---- arch/arm64/configs/anolis_defconfig | 101 ++----- arch/x86/configs/anolis-debug_defconfig | 306 +++++++++++++++++++++- arch/x86/configs/anolis_defconfig | 294 ++++++++++++++++++++- 4 files changed, 635 insertions(+), 140 deletions(-) diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index 4c2362da512f..af35e1ac7f6a 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -107,12 +107,13 @@ CONFIG_BPF_UNPRIV_DEFAULT_OFF=y CONFIG_BPF_LSM=y # end of BPF subsystem -CONFIG_PREEMPT_VOLUNTARY_BUILD=y +CONFIG_PREEMPT_BUILD=y # CONFIG_PREEMPT_NONE is not set CONFIG_PREEMPT_VOLUNTARY=y # CONFIG_PREEMPT is not set CONFIG_PREEMPT_COUNT=y -# CONFIG_PREEMPT_DYNAMIC is not set +CONFIG_PREEMPTION=y +CONFIG_PREEMPT_DYNAMIC=y CONFIG_SCHED_CORE=y # @@ -139,9 +140,11 @@ CONFIG_CPU_ISOLATION=y # RCU Subsystem # CONFIG_TREE_RCU=y +CONFIG_PREEMPT_RCU=y # CONFIG_RCU_EXPERT is not set CONFIG_TREE_SRCU=y CONFIG_TASKS_RCU_GENERIC=y +CONFIG_TASKS_RCU=y CONFIG_TASKS_RUDE_RCU=y CONFIG_TASKS_TRACE_RCU=y CONFIG_RCU_STALL_COMMON=y @@ -862,7 +865,7 @@ CONFIG_BLK_DEV_BSG_COMMON=y CONFIG_BLK_ICQ=y CONFIG_BLK_DEV_BSGLIB=y CONFIG_BLK_DEV_INTEGRITY=y -CONFIG_BLK_DEV_INTEGRITY_T10=y +CONFIG_BLK_DEV_INTEGRITY_T10=m CONFIG_BLK_DEV_ZONED=y CONFIG_BLK_DEV_THROTTLING=y # CONFIG_BLK_DEV_THROTTLING_LOW is not set @@ -920,32 +923,6 @@ CONFIG_BFQ_GROUP_IOSCHED=y CONFIG_PREEMPT_NOTIFIERS=y CONFIG_PADATA=y CONFIG_ASN1=y -CONFIG_ARCH_INLINE_SPIN_TRYLOCK=y -CONFIG_ARCH_INLINE_SPIN_TRYLOCK_BH=y -CONFIG_ARCH_INLINE_SPIN_LOCK=y -CONFIG_ARCH_INLINE_SPIN_LOCK_BH=y -CONFIG_ARCH_INLINE_SPIN_LOCK_IRQ=y -CONFIG_ARCH_INLINE_SPIN_LOCK_IRQSAVE=y -CONFIG_ARCH_INLINE_SPIN_UNLOCK=y -CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y -CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y -CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y -CONFIG_ARCH_INLINE_READ_LOCK=y -CONFIG_ARCH_INLINE_READ_LOCK_BH=y -CONFIG_ARCH_INLINE_READ_LOCK_IRQ=y -CONFIG_ARCH_INLINE_READ_LOCK_IRQSAVE=y -CONFIG_ARCH_INLINE_READ_UNLOCK=y -CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y -CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y -CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y -CONFIG_ARCH_INLINE_WRITE_LOCK=y -CONFIG_ARCH_INLINE_WRITE_LOCK_BH=y -CONFIG_ARCH_INLINE_WRITE_LOCK_IRQ=y -CONFIG_ARCH_INLINE_WRITE_LOCK_IRQSAVE=y -CONFIG_ARCH_INLINE_WRITE_UNLOCK=y -CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y -CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y -CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y CONFIG_UNINLINE_SPIN_UNLOCK=y CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y CONFIG_MUTEX_SPIN_ON_OWNER=y @@ -2355,7 +2332,7 @@ CONFIG_SCSI_PROC_FS=y # # SCSI support type (disk, tape, CD-ROM) # -CONFIG_BLK_DEV_SD=y +CONFIG_BLK_DEV_SD=m CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=m CONFIG_CHR_DEV_SG=m @@ -2456,7 +2433,7 @@ CONFIG_SCSI_DH_EMC=y CONFIG_SCSI_DH_ALUA=y # end of SCSI device support -CONFIG_ATA=y +CONFIG_ATA=m CONFIG_SATA_HOST=y CONFIG_PATA_TIMINGS=y CONFIG_ATA_VERBOSE_ERROR=y @@ -2468,7 +2445,7 @@ CONFIG_SATA_PMP=y # # Controllers with non-SFF native interface # -CONFIG_SATA_AHCI=y +CONFIG_SATA_AHCI=m CONFIG_SATA_MOBILE_LPM_POLICY=0 CONFIG_SATA_AHCI_PLATFORM=m # CONFIG_AHCI_DWC is not set @@ -2491,7 +2468,7 @@ CONFIG_ATA_BMDMA=y # # SATA SFF controllers with BMDMA # -CONFIG_ATA_PIIX=y +CONFIG_ATA_PIIX=m # CONFIG_SATA_DWC is not set # CONFIG_SATA_MV is not set # CONFIG_SATA_NV is not set @@ -5163,7 +5140,8 @@ CONFIG_RTC_LIB=y CONFIG_RTC_CLASS=y CONFIG_RTC_HCTOSYS=y CONFIG_RTC_HCTOSYS_DEVICE="rtc0" -# CONFIG_RTC_SYSTOHC is not set +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" # CONFIG_RTC_DEBUG is not set CONFIG_RTC_NVMEM=y @@ -5757,7 +5735,7 @@ CONFIG_ARM_SPE_PMU=m # CONFIG_ARM_DMC620_PMU is not set # CONFIG_MARVELL_CN10K_TAD_PMU is not set CONFIG_ALIBABA_UNCORE_DRW_PMU=m -CONFIG_HISI_PMU=y +CONFIG_HISI_PMU=m # CONFIG_HISI_PCIE_PMU is not set # CONFIG_HNS3_PMU is not set # CONFIG_MARVELL_CN10K_DDR_PMU is not set @@ -5842,7 +5820,9 @@ CONFIG_FS_IOMAP=y CONFIG_BUFFER_HEAD=y CONFIG_LEGACY_DIRECT_IO=y # CONFIG_EXT2_FS is not set -# CONFIG_EXT3_FS is not set +CONFIG_EXT3_FS=m +# CONFIG_EXT3_FS_POSIX_ACL is not set +# CONFIG_EXT3_FS_SECURITY is not set CONFIG_EXT4_FS=m CONFIG_EXT4_USE_FOR_EXT2=y CONFIG_EXT4_FS_POSIX_ACL=y @@ -5937,7 +5917,10 @@ CONFIG_FAT_DEFAULT_IOCHARSET="ascii" # CONFIG_FAT_DEFAULT_UTF8 is not set # CONFIG_EXFAT_FS is not set # CONFIG_NTFS_FS is not set -# CONFIG_NTFS3_FS is not set +CONFIG_NTFS3_FS=m +# CONFIG_NTFS3_64BIT_CLUSTER is not set +# CONFIG_NTFS3_LZX_XPRESS is not set +# CONFIG_NTFS3_FS_POSIX_ACL is not set # end of DOS/FAT/EXFAT/NT Filesystems # @@ -6155,9 +6138,8 @@ CONFIG_IO_WQ=y CONFIG_KEYS=y # CONFIG_KEYS_REQUEST_CACHE is not set CONFIG_PERSISTENT_KEYRINGS=y -CONFIG_TRUSTED_KEYS=m +CONFIG_TRUSTED_KEYS=y CONFIG_TRUSTED_KEYS_TPM=y -CONFIG_TRUSTED_KEYS_TEE=y CONFIG_ENCRYPTED_KEYS=y # CONFIG_USER_DECRYPTED_DATA is not set # CONFIG_KEY_DH_OPERATIONS is not set @@ -6345,8 +6327,8 @@ CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_SM4=y -CONFIG_CRYPTO_SM4_GENERIC=y +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_SM4_GENERIC=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_TWOFISH_COMMON=m @@ -6490,11 +6472,7 @@ CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m CONFIG_CRYPTO_HW=y # CONFIG_CRYPTO_DEV_ATMEL_ECC is not set # CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set -CONFIG_CRYPTO_DEV_CCP=y -CONFIG_CRYPTO_DEV_CCP_DD=m -CONFIG_CRYPTO_DEV_SP_CCP=y -CONFIG_CRYPTO_DEV_CCP_CRYPTO=m -# CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set +# CONFIG_CRYPTO_DEV_CCP is not set CONFIG_CRYPTO_DEV_CPT=m CONFIG_CAVIUM_CPT=m CONFIG_CRYPTO_DEV_NITROX=m @@ -6728,7 +6706,7 @@ CONFIG_OBJAGG=m CONFIG_GENERIC_IOREMAP=y CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y CONFIG_PLDMFW=y -CONFIG_ASN1_ENCODER=m +CONFIG_ASN1_ENCODER=y # # Kernel hacking @@ -6931,6 +6909,7 @@ CONFIG_SCHED_ACPU=y # end of Scheduler Debugging # CONFIG_DEBUG_TIMEKEEPING is not set +# CONFIG_DEBUG_PREEMPT is not set # # Lock Debugging (spinlocks, mutexes, etc...) @@ -7031,6 +7010,7 @@ CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y # CONFIG_FUNCTION_PROFILER is not set CONFIG_STACK_TRACER=y # CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set CONFIG_SCHED_TRACER=y CONFIG_HWLAT_TRACER=y CONFIG_OSNOISE_TRACER=y diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index b659891af7af..8e118362d2e0 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -106,11 +106,13 @@ CONFIG_BPF_UNPRIV_DEFAULT_OFF=y CONFIG_BPF_LSM=y # end of BPF subsystem -CONFIG_PREEMPT_VOLUNTARY_BUILD=y +CONFIG_PREEMPT_BUILD=y # CONFIG_PREEMPT_NONE is not set CONFIG_PREEMPT_VOLUNTARY=y # CONFIG_PREEMPT is not set -# CONFIG_PREEMPT_DYNAMIC is not set +CONFIG_PREEMPT_COUNT=y +CONFIG_PREEMPTION=y +CONFIG_PREEMPT_DYNAMIC=y CONFIG_SCHED_CORE=y # @@ -137,9 +139,11 @@ CONFIG_CPU_ISOLATION=y # RCU Subsystem # CONFIG_TREE_RCU=y +CONFIG_PREEMPT_RCU=y # CONFIG_RCU_EXPERT is not set CONFIG_TREE_SRCU=y CONFIG_TASKS_RCU_GENERIC=y +CONFIG_TASKS_RCU=y CONFIG_TASKS_RUDE_RCU=y CONFIG_TASKS_TRACE_RCU=y CONFIG_RCU_STALL_COMMON=y @@ -858,7 +862,7 @@ CONFIG_BLK_DEV_BSG_COMMON=y CONFIG_BLK_ICQ=y CONFIG_BLK_DEV_BSGLIB=y CONFIG_BLK_DEV_INTEGRITY=y -CONFIG_BLK_DEV_INTEGRITY_T10=y +CONFIG_BLK_DEV_INTEGRITY_T10=m CONFIG_BLK_DEV_ZONED=y CONFIG_BLK_DEV_THROTTLING=y # CONFIG_BLK_DEV_THROTTLING_LOW is not set @@ -916,57 +920,7 @@ CONFIG_BFQ_GROUP_IOSCHED=y CONFIG_PREEMPT_NOTIFIERS=y CONFIG_PADATA=y CONFIG_ASN1=y -CONFIG_ARCH_INLINE_SPIN_TRYLOCK=y -CONFIG_ARCH_INLINE_SPIN_TRYLOCK_BH=y -CONFIG_ARCH_INLINE_SPIN_LOCK=y -CONFIG_ARCH_INLINE_SPIN_LOCK_BH=y -CONFIG_ARCH_INLINE_SPIN_LOCK_IRQ=y -CONFIG_ARCH_INLINE_SPIN_LOCK_IRQSAVE=y -CONFIG_ARCH_INLINE_SPIN_UNLOCK=y -CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y -CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y -CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y -CONFIG_ARCH_INLINE_READ_LOCK=y -CONFIG_ARCH_INLINE_READ_LOCK_BH=y -CONFIG_ARCH_INLINE_READ_LOCK_IRQ=y -CONFIG_ARCH_INLINE_READ_LOCK_IRQSAVE=y -CONFIG_ARCH_INLINE_READ_UNLOCK=y -CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y -CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y -CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y -CONFIG_ARCH_INLINE_WRITE_LOCK=y -CONFIG_ARCH_INLINE_WRITE_LOCK_BH=y -CONFIG_ARCH_INLINE_WRITE_LOCK_IRQ=y -CONFIG_ARCH_INLINE_WRITE_LOCK_IRQSAVE=y -CONFIG_ARCH_INLINE_WRITE_UNLOCK=y -CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y -CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y -CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y -CONFIG_INLINE_SPIN_TRYLOCK=y -CONFIG_INLINE_SPIN_TRYLOCK_BH=y -CONFIG_INLINE_SPIN_LOCK=y -CONFIG_INLINE_SPIN_LOCK_BH=y -CONFIG_INLINE_SPIN_LOCK_IRQ=y -CONFIG_INLINE_SPIN_LOCK_IRQSAVE=y -CONFIG_INLINE_SPIN_UNLOCK_BH=y -CONFIG_INLINE_SPIN_UNLOCK_IRQ=y -CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE=y -CONFIG_INLINE_READ_LOCK=y -CONFIG_INLINE_READ_LOCK_BH=y -CONFIG_INLINE_READ_LOCK_IRQ=y -CONFIG_INLINE_READ_LOCK_IRQSAVE=y -CONFIG_INLINE_READ_UNLOCK=y -CONFIG_INLINE_READ_UNLOCK_BH=y -CONFIG_INLINE_READ_UNLOCK_IRQ=y -CONFIG_INLINE_READ_UNLOCK_IRQRESTORE=y -CONFIG_INLINE_WRITE_LOCK=y -CONFIG_INLINE_WRITE_LOCK_BH=y -CONFIG_INLINE_WRITE_LOCK_IRQ=y -CONFIG_INLINE_WRITE_LOCK_IRQSAVE=y -CONFIG_INLINE_WRITE_UNLOCK=y -CONFIG_INLINE_WRITE_UNLOCK_BH=y -CONFIG_INLINE_WRITE_UNLOCK_IRQ=y -CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_UNINLINE_SPIN_UNLOCK=y CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y CONFIG_MUTEX_SPIN_ON_OWNER=y CONFIG_RWSEM_SPIN_ON_OWNER=y @@ -2375,7 +2329,7 @@ CONFIG_SCSI_PROC_FS=y # # SCSI support type (disk, tape, CD-ROM) # -CONFIG_BLK_DEV_SD=y +CONFIG_BLK_DEV_SD=m CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=m CONFIG_CHR_DEV_SG=m @@ -2476,7 +2430,7 @@ CONFIG_SCSI_DH_EMC=y CONFIG_SCSI_DH_ALUA=y # end of SCSI device support -CONFIG_ATA=y +CONFIG_ATA=m CONFIG_SATA_HOST=y CONFIG_PATA_TIMINGS=y CONFIG_ATA_VERBOSE_ERROR=y @@ -2488,7 +2442,7 @@ CONFIG_SATA_PMP=y # # Controllers with non-SFF native interface # -CONFIG_SATA_AHCI=y +CONFIG_SATA_AHCI=m CONFIG_SATA_MOBILE_LPM_POLICY=0 CONFIG_SATA_AHCI_PLATFORM=m # CONFIG_AHCI_DWC is not set @@ -2511,7 +2465,7 @@ CONFIG_ATA_BMDMA=y # # SATA SFF controllers with BMDMA # -CONFIG_ATA_PIIX=y +CONFIG_ATA_PIIX=m # CONFIG_SATA_DWC is not set # CONFIG_SATA_MV is not set # CONFIG_SATA_NV is not set @@ -5183,7 +5137,8 @@ CONFIG_RTC_LIB=y CONFIG_RTC_CLASS=y CONFIG_RTC_HCTOSYS=y CONFIG_RTC_HCTOSYS_DEVICE="rtc0" -# CONFIG_RTC_SYSTOHC is not set +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" # CONFIG_RTC_DEBUG is not set CONFIG_RTC_NVMEM=y @@ -5776,7 +5731,7 @@ CONFIG_ARM_SPE_PMU=m # CONFIG_ARM_DMC620_PMU is not set # CONFIG_MARVELL_CN10K_TAD_PMU is not set CONFIG_ALIBABA_UNCORE_DRW_PMU=m -CONFIG_HISI_PMU=y +CONFIG_HISI_PMU=m # CONFIG_HISI_PCIE_PMU is not set # CONFIG_HNS3_PMU is not set # CONFIG_MARVELL_CN10K_DDR_PMU is not set @@ -5861,7 +5816,9 @@ CONFIG_FS_IOMAP=y CONFIG_BUFFER_HEAD=y CONFIG_LEGACY_DIRECT_IO=y # CONFIG_EXT2_FS is not set -# CONFIG_EXT3_FS is not set +CONFIG_EXT3_FS=m +# CONFIG_EXT3_FS_POSIX_ACL is not set +# CONFIG_EXT3_FS_SECURITY is not set CONFIG_EXT4_FS=m CONFIG_EXT4_USE_FOR_EXT2=y CONFIG_EXT4_FS_POSIX_ACL=y @@ -5956,7 +5913,10 @@ CONFIG_FAT_DEFAULT_IOCHARSET="ascii" # CONFIG_FAT_DEFAULT_UTF8 is not set # CONFIG_EXFAT_FS is not set # CONFIG_NTFS_FS is not set -# CONFIG_NTFS3_FS is not set +CONFIG_NTFS3_FS=m +# CONFIG_NTFS3_64BIT_CLUSTER is not set +# CONFIG_NTFS3_LZX_XPRESS is not set +# CONFIG_NTFS3_FS_POSIX_ACL is not set # end of DOS/FAT/EXFAT/NT Filesystems # @@ -6174,9 +6134,8 @@ CONFIG_IO_WQ=y CONFIG_KEYS=y # CONFIG_KEYS_REQUEST_CACHE is not set CONFIG_PERSISTENT_KEYRINGS=y -CONFIG_TRUSTED_KEYS=m +CONFIG_TRUSTED_KEYS=y CONFIG_TRUSTED_KEYS_TPM=y -CONFIG_TRUSTED_KEYS_TEE=y CONFIG_ENCRYPTED_KEYS=y # CONFIG_USER_DECRYPTED_DATA is not set # CONFIG_KEY_DH_OPERATIONS is not set @@ -6364,8 +6323,8 @@ CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_SM4=y -CONFIG_CRYPTO_SM4_GENERIC=y +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_SM4_GENERIC=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_TWOFISH_COMMON=m @@ -6509,11 +6468,7 @@ CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m CONFIG_CRYPTO_HW=y # CONFIG_CRYPTO_DEV_ATMEL_ECC is not set # CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set -CONFIG_CRYPTO_DEV_CCP=y -CONFIG_CRYPTO_DEV_CCP_DD=m -CONFIG_CRYPTO_DEV_SP_CCP=y -CONFIG_CRYPTO_DEV_CCP_CRYPTO=m -# CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set +# CONFIG_CRYPTO_DEV_CCP is not set CONFIG_CRYPTO_DEV_CPT=m CONFIG_CAVIUM_CPT=m CONFIG_CRYPTO_DEV_NITROX=m @@ -6745,7 +6700,7 @@ CONFIG_OBJAGG=m CONFIG_GENERIC_IOREMAP=y CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y CONFIG_PLDMFW=y -CONFIG_ASN1_ENCODER=m +CONFIG_ASN1_ENCODER=y # # Kernel hacking @@ -6914,6 +6869,7 @@ CONFIG_SCHED_ACPU=y # end of Scheduler Debugging # CONFIG_DEBUG_TIMEKEEPING is not set +# CONFIG_DEBUG_PREEMPT is not set # # Lock Debugging (spinlocks, mutexes, etc...) @@ -7000,6 +6956,7 @@ CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y # CONFIG_FUNCTION_PROFILER is not set CONFIG_STACK_TRACER=y # CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set CONFIG_SCHED_TRACER=y CONFIG_HWLAT_TRACER=y CONFIG_OSNOISE_TRACER=y diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index ba42e39c5c62..2e13de357040 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -1807,7 +1807,7 @@ CONFIG_NET_ACT_CT=m CONFIG_NET_TC_SKB_EXT=y CONFIG_NET_SCH_FIFO=y CONFIG_DCB=y -CONFIG_DNS_RESOLVER=y +CONFIG_DNS_RESOLVER=m # CONFIG_BATMAN_ADV is not set CONFIG_OPENVSWITCH=m CONFIG_OPENVSWITCH_GRE=m @@ -2090,6 +2090,7 @@ CONFIG_GENERIC_CPU_AUTOPROBE=y CONFIG_GENERIC_CPU_VULNERABILITIES=y CONFIG_REGMAP=y CONFIG_REGMAP_I2C=m +CONFIG_REGMAP_SPI=m CONFIG_DMA_SHARED_BUFFER=y # CONFIG_DMA_FENCE_TRACE is not set # CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set @@ -2224,6 +2225,10 @@ CONFIG_MTD_CFI_I2=y # Self-contained MTD device drivers # # CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_MCHP48L640 is not set +# CONFIG_MTD_SST25L is not set # CONFIG_MTD_SLRAM is not set # CONFIG_MTD_PHRAM is not set # CONFIG_MTD_MTDRAM is not set @@ -2240,6 +2245,7 @@ CONFIG_MTD_CFI_I2=y # # CONFIG_MTD_ONENAND is not set # CONFIG_MTD_RAW_NAND is not set +# CONFIG_MTD_SPI_NAND is not set # # ECC engine support @@ -2256,6 +2262,7 @@ CONFIG_MTD_CFI_I2=y # CONFIG_MTD_LPDDR is not set # end of LPDDR & LPDDR2 PCM memory drivers +# CONFIG_MTD_SPI_NOR is not set CONFIG_MTD_UBI=m CONFIG_MTD_UBI_WL_THRESHOLD=4096 CONFIG_MTD_UBI_BEB_LIMIT=20 @@ -2359,6 +2366,7 @@ CONFIG_SENSORS_APDS990X=m # CONFIG_HMC6352 is not set # CONFIG_DS1682 is not set CONFIG_VMWARE_BALLOON=m +# CONFIG_LATTICE_ECP3_CONFIG is not set # CONFIG_SRAM is not set # CONFIG_DW_XDATA_PCIE is not set # CONFIG_PCI_ENDPOINT_TEST is not set @@ -2370,9 +2378,11 @@ CONFIG_MISC_RTSX=m # EEPROM support # # CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set CONFIG_EEPROM_LEGACY=m CONFIG_EEPROM_MAX6875=m CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_93XX46 is not set # CONFIG_EEPROM_IDT_89HPESX is not set # CONFIG_EEPROM_EE1004 is not set # end of EEPROM support @@ -2768,6 +2778,7 @@ CONFIG_NET_VENDOR_AQUANTIA=y CONFIG_AQTION=m # CONFIG_NET_VENDOR_ARC is not set CONFIG_NET_VENDOR_ASIX=y +# CONFIG_SPI_AX88796C is not set CONFIG_NET_VENDOR_ATHEROS=y CONFIG_ATL2=m CONFIG_ATL1=m @@ -2814,6 +2825,7 @@ CONFIG_NET_VENDOR_CISCO=y CONFIG_ENIC=m # CONFIG_NET_VENDOR_CORTINA is not set CONFIG_NET_VENDOR_DAVICOM=y +# CONFIG_DM9051 is not set CONFIG_DNET=m CONFIG_NET_VENDOR_DEC=y # CONFIG_NET_TULIP is not set @@ -2861,6 +2873,8 @@ CONFIG_ICE_HWTS=y CONFIG_FM10K=m CONFIG_IGC=m # CONFIG_JME is not set +CONFIG_NET_VENDOR_ADI=y +# CONFIG_ADIN1110 is not set CONFIG_NET_VENDOR_LITEX=y # CONFIG_NET_VENDOR_MARVELL is not set CONFIG_NET_VENDOR_MELLANOX=y @@ -2968,6 +2982,7 @@ CONFIG_SFC_MCDI_LOGGING=y # CONFIG_NET_VENDOR_TEHUTI is not set # CONFIG_NET_VENDOR_TI is not set CONFIG_NET_VENDOR_VERTEXCOM=y +# CONFIG_MSE102X is not set # CONFIG_NET_VENDOR_VIA is not set CONFIG_NET_VENDOR_WANGXUN=y CONFIG_LIBWX=m @@ -3042,6 +3057,7 @@ CONFIG_DP83867_PHY=m # CONFIG_DP83TD510_PHY is not set CONFIG_VITESSE_PHY=m CONFIG_XILINX_GMII2RGMII=m +# CONFIG_MICREL_KS8995MA is not set # CONFIG_PSE_CONTROLLER is not set CONFIG_MDIO_DEVICE=y CONFIG_MDIO_BUS=y @@ -3244,6 +3260,7 @@ CONFIG_MT76x2U=m # CONFIG_MT7996E is not set CONFIG_WLAN_VENDOR_MICROCHIP=y # CONFIG_WILC1000_SDIO is not set +# CONFIG_WILC1000_SPI is not set CONFIG_WLAN_VENDOR_PURELIFI=y # CONFIG_PLFXLC is not set CONFIG_WLAN_VENDOR_RALINK=y @@ -3346,7 +3363,13 @@ CONFIG_HDLC_PPP=m # CONFIG_FARSYNC is not set CONFIG_IEEE802154_DRIVERS=m CONFIG_IEEE802154_FAKELB=m +# CONFIG_IEEE802154_AT86RF230 is not set +# CONFIG_IEEE802154_MRF24J40 is not set +# CONFIG_IEEE802154_CC2520 is not set # CONFIG_IEEE802154_ATUSB is not set +# CONFIG_IEEE802154_ADF7242 is not set +# CONFIG_IEEE802154_CA8210 is not set +# CONFIG_IEEE802154_MCR20A is not set # CONFIG_IEEE802154_HWSIM is not set # @@ -3412,6 +3435,7 @@ CONFIG_INPUT_KEYBOARD=y # CONFIG_KEYBOARD_ADC is not set # CONFIG_KEYBOARD_ADP5588 is not set # CONFIG_KEYBOARD_ADP5589 is not set +# CONFIG_KEYBOARD_APPLESPI is not set CONFIG_KEYBOARD_ATKBD=y # CONFIG_KEYBOARD_QT1050 is not set # CONFIG_KEYBOARD_QT1070 is not set @@ -3473,6 +3497,8 @@ CONFIG_TABLET_USB_KBTAB=m # CONFIG_TABLET_USB_PEGASUS is not set CONFIG_TABLET_SERIAL_WACOM4=m CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set # CONFIG_TOUCHSCREEN_AD7879 is not set # CONFIG_TOUCHSCREEN_ADC is not set # CONFIG_TOUCHSCREEN_ATMEL_MXT is not set @@ -3523,12 +3549,14 @@ CONFIG_TOUCHSCREEN_WACOM_I2C=m # CONFIG_TOUCHSCREEN_TOUCHIT213 is not set # CONFIG_TOUCHSCREEN_TSC_SERIO is not set # CONFIG_TOUCHSCREEN_TSC2004 is not set +# CONFIG_TOUCHSCREEN_TSC2005 is not set # CONFIG_TOUCHSCREEN_TSC2007 is not set # CONFIG_TOUCHSCREEN_RM_TS is not set # CONFIG_TOUCHSCREEN_SILEAD is not set # CONFIG_TOUCHSCREEN_SIS_I2C is not set # CONFIG_TOUCHSCREEN_ST1232 is not set # CONFIG_TOUCHSCREEN_STMFTS is not set +# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set # CONFIG_TOUCHSCREEN_SX8654 is not set # CONFIG_TOUCHSCREEN_TPS6507X is not set # CONFIG_TOUCHSCREEN_ZET6223 is not set @@ -3575,6 +3603,7 @@ CONFIG_INPUT_XEN_KBDDEV_FRONTEND=m # CONFIG_INPUT_DRV2667_HAPTICS is not set CONFIG_RMI4_CORE=m CONFIG_RMI4_I2C=m +# CONFIG_RMI4_SPI is not set CONFIG_RMI4_SMB=m CONFIG_RMI4_F03=y CONFIG_RMI4_F03_SERIO=m @@ -3656,6 +3685,8 @@ CONFIG_SERIAL_8250_PERICOM=y # Non-8250 serial port support # # CONFIG_SERIAL_KGDB_NMI is not set +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set # CONFIG_SERIAL_UARTLITE is not set CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y @@ -3724,6 +3755,7 @@ CONFIG_TCG_TPM=y CONFIG_HW_RANDOM_TPM=y CONFIG_TCG_TIS_CORE=y CONFIG_TCG_TIS=y +# CONFIG_TCG_TIS_SPI is not set # CONFIG_TCG_TIS_I2C is not set # CONFIG_TCG_TIS_I2C_CR50 is not set CONFIG_TCG_TIS_I2C_ATMEL=m @@ -3737,6 +3769,7 @@ CONFIG_TCG_CRB=y # CONFIG_TCG_VTPM_PROXY is not set CONFIG_TCG_TIS_ST33ZP24=m CONFIG_TCG_TIS_ST33ZP24_I2C=m +# CONFIG_TCG_TIS_ST33ZP24_SPI is not set CONFIG_TELCLOCK=m # CONFIG_XILLYBUS is not set # CONFIG_XILLYUSB is not set @@ -3845,7 +3878,49 @@ CONFIG_I2C_STUB=m # end of I2C support # CONFIG_I3C is not set -# CONFIG_SPI is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +# CONFIG_SPI_MEM is not set + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_BUTTERFLY is not set +# CONFIG_SPI_CADENCE is not set +# CONFIG_SPI_DESIGNWARE is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_LM70_LLP is not set +# CONFIG_SPI_MICROCHIP_CORE is not set +# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set +# CONFIG_SPI_LANTIQ_SSC is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PCI1XXXX is not set +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_SIFIVE is not set +# CONFIG_SPI_MXIC is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_ZYNQMP_GQSPI is not set +# CONFIG_SPI_AMD is not set + +# +# SPI Multiplexer support +# +# CONFIG_SPI_MUX is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +CONFIG_SPI_DYNAMIC=y # CONFIG_SPMI is not set # CONFIG_HSI is not set CONFIG_PPS=y @@ -3982,6 +4057,16 @@ CONFIG_GPIO_ICH=m # CONFIG_GPIO_RDC321X is not set # end of PCI GPIO expanders +# +# SPI GPIO expanders +# +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set +# end of SPI GPIO expanders + # # USB GPIO expanders # @@ -4047,6 +4132,7 @@ CONFIG_HWMON_VID=m # CONFIG_SENSORS_ABITUGURU=m CONFIG_SENSORS_ABITUGURU3=m +# CONFIG_SENSORS_AD7314 is not set CONFIG_SENSORS_AD7414=m CONFIG_SENSORS_AD7418=m CONFIG_SENSORS_ADM1025=m @@ -4056,6 +4142,7 @@ CONFIG_SENSORS_ADM1031=m # CONFIG_SENSORS_ADM1177 is not set CONFIG_SENSORS_ADM9240=m CONFIG_SENSORS_ADT7X10=m +# CONFIG_SENSORS_ADT7310 is not set CONFIG_SENSORS_ADT7410=m CONFIG_SENSORS_ADT7411=m CONFIG_SENSORS_ADT7462=m @@ -4102,6 +4189,7 @@ CONFIG_SENSORS_JC42=m CONFIG_SENSORS_LINEAGE=m # CONFIG_SENSORS_LTC2945 is not set # CONFIG_SENSORS_LTC2947_I2C is not set +# CONFIG_SENSORS_LTC2947_SPI is not set # CONFIG_SENSORS_LTC2990 is not set # CONFIG_SENSORS_LTC2992 is not set CONFIG_SENSORS_LTC4151=m @@ -4110,11 +4198,13 @@ CONFIG_SENSORS_LTC4215=m CONFIG_SENSORS_LTC4245=m # CONFIG_SENSORS_LTC4260 is not set CONFIG_SENSORS_LTC4261=m +# CONFIG_SENSORS_MAX1111 is not set # CONFIG_SENSORS_MAX127 is not set CONFIG_SENSORS_MAX16065=m CONFIG_SENSORS_MAX1619=m CONFIG_SENSORS_MAX1668=m CONFIG_SENSORS_MAX197=m +# CONFIG_SENSORS_MAX31722 is not set # CONFIG_SENSORS_MAX31730 is not set # CONFIG_SENSORS_MAX31760 is not set # CONFIG_MAX31827 is not set @@ -4130,7 +4220,9 @@ CONFIG_SENSORS_MCP3021=m # CONFIG_SENSORS_TC654 is not set # CONFIG_SENSORS_TPS23861 is not set # CONFIG_SENSORS_MR75203 is not set +# CONFIG_SENSORS_ADCXX is not set CONFIG_SENSORS_LM63=m +# CONFIG_SENSORS_LM70 is not set CONFIG_SENSORS_LM73=m CONFIG_SENSORS_LM75=m CONFIG_SENSORS_LM77=m @@ -4230,6 +4322,7 @@ CONFIG_SENSORS_SCH5636=m # CONFIG_SENSORS_STTS751 is not set # CONFIG_SENSORS_ADC128D818 is not set CONFIG_SENSORS_ADS7828=m +# CONFIG_SENSORS_ADS7871 is not set CONFIG_SENSORS_AMC6821=m CONFIG_SENSORS_INA209=m CONFIG_SENSORS_INA2XX=m @@ -4410,10 +4503,12 @@ CONFIG_MFD_CORE=y # CONFIG_MFD_AXP20X_I2C is not set # CONFIG_MFD_CS42L43_I2C is not set # CONFIG_MFD_MADERA is not set +# CONFIG_MFD_DA9052_SPI is not set # CONFIG_MFD_DA9062 is not set # CONFIG_MFD_DA9063 is not set # CONFIG_MFD_DA9150 is not set # CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_MC13XXX_SPI is not set # CONFIG_MFD_MC13XXX_I2C is not set # CONFIG_MFD_MP2629 is not set # CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set @@ -4435,6 +4530,8 @@ CONFIG_MFD_INTEL_LPSS_PCI=m # CONFIG_MFD_MT6370 is not set # CONFIG_MFD_MT6397 is not set # CONFIG_MFD_MENF21BMC is not set +# CONFIG_MFD_OCELOT is not set +# CONFIG_EZX_PCAP is not set CONFIG_MFD_VIPERBOARD=m # CONFIG_MFD_RETU is not set # CONFIG_MFD_PCF50633 is not set @@ -4457,14 +4554,19 @@ CONFIG_MFD_SM501_GPIO=y # CONFIG_MFD_TPS65086 is not set # CONFIG_MFD_TI_LP873X is not set # CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set # CONFIG_MFD_TPS6594_I2C is not set +# CONFIG_MFD_TPS6594_SPI is not set # CONFIG_MFD_WL1273_CORE is not set # CONFIG_MFD_LM3533 is not set # CONFIG_MFD_TQMX86 is not set CONFIG_MFD_VX855=m # CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM831X_SPI is not set # CONFIG_MFD_WM8994 is not set # CONFIG_MFD_ATC260X_I2C is not set +# CONFIG_MFD_INTEL_M10_BMC_SPI is not set # end of Multifunction device drivers # CONFIG_REGULATOR is not set @@ -4663,7 +4765,10 @@ CONFIG_DRM_PANEL=y # # Display Panels # +# CONFIG_DRM_PANEL_AUO_A030JTN01 is not set +# CONFIG_DRM_PANEL_ORISETECH_OTA5601A is not set # CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set +# CONFIG_DRM_PANEL_WIDECHIPS_WS2401 is not set # end of Display Panels CONFIG_DRM_BRIDGE=y @@ -4680,7 +4785,17 @@ CONFIG_DRM_PANEL_BRIDGE=y CONFIG_DRM_BOCHS=m CONFIG_DRM_CIRRUS_QEMU=m # CONFIG_DRM_GM12U320 is not set +# CONFIG_DRM_PANEL_MIPI_DBI is not set # CONFIG_DRM_SIMPLEDRM is not set +# CONFIG_TINYDRM_HX8357D is not set +# CONFIG_TINYDRM_ILI9163 is not set +# CONFIG_TINYDRM_ILI9225 is not set +# CONFIG_TINYDRM_ILI9341 is not set +# CONFIG_TINYDRM_ILI9486 is not set +# CONFIG_TINYDRM_MI0283QT is not set +# CONFIG_TINYDRM_REPAPER is not set +# CONFIG_TINYDRM_ST7586 is not set +# CONFIG_TINYDRM_ST7735R is not set # CONFIG_DRM_XEN_FRONTEND is not set # CONFIG_DRM_VBOXVIDEO is not set # CONFIG_DRM_GUD is not set @@ -4766,7 +4881,18 @@ CONFIG_FB_TILEBLITTING=y # Backlight & LCD device support # CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set CONFIG_LCD_PLATFORM=m +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set CONFIG_BACKLIGHT_CLASS_DEVICE=y # CONFIG_BACKLIGHT_KTD253 is not set # CONFIG_BACKLIGHT_KTZ8866 is not set @@ -5017,6 +5143,7 @@ CONFIG_USB_EHCI_PCI=y # CONFIG_USB_EHCI_HCD_PLATFORM is not set # CONFIG_USB_OXU210HP_HCD is not set # CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_PCI=y # CONFIG_USB_OHCI_HCD_PLATFORM is not set @@ -5234,6 +5361,7 @@ CONFIG_MMC_SDHCI_PLTFM=m # CONFIG_MMC_SDHCI_F_SDH30 is not set # CONFIG_MMC_WBSD is not set CONFIG_MMC_TIFM_SD=m +# CONFIG_MMC_SPI is not set CONFIG_MMC_CB710=m CONFIG_MMC_VIA_SDMMC=m CONFIG_MMC_VUB300=m @@ -5287,6 +5415,7 @@ CONFIG_LEDS_LP3944=m # CONFIG_LEDS_PCA955X is not set # CONFIG_LEDS_PCA963X is not set # CONFIG_LEDS_PCA995X is not set +# CONFIG_LEDS_DAC124S085 is not set # CONFIG_LEDS_PWM is not set # CONFIG_LEDS_BD2606MVV is not set # CONFIG_LEDS_BD2802 is not set @@ -5409,7 +5538,8 @@ CONFIG_RTC_MC146818_LIB=y CONFIG_RTC_CLASS=y CONFIG_RTC_HCTOSYS=y CONFIG_RTC_HCTOSYS_DEVICE="rtc0" -# CONFIG_RTC_SYSTOHC is not set +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" # CONFIG_RTC_DEBUG is not set CONFIG_RTC_NVMEM=y @@ -5460,6 +5590,20 @@ CONFIG_RTC_DRV_EM3027=m # # SPI RTC drivers # +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1302 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1343 is not set +# CONFIG_RTC_DRV_DS1347 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6916 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RX4581 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_PCF2123 is not set +# CONFIG_RTC_DRV_MCP795 is not set CONFIG_RTC_I2C_AND_SPI=m # @@ -5658,6 +5802,7 @@ CONFIG_MLXREG_HOTPLUG=m # CONFIG_MLXREG_LC is not set # CONFIG_NVSW_SN2201 is not set CONFIG_SURFACE_PLATFORMS=y +# CONFIG_SURFACE3_WMI is not set # CONFIG_SURFACE_3_POWER_OPREGION is not set # CONFIG_SURFACE_GPE is not set # CONFIG_SURFACE_HOTPLUG is not set @@ -5756,6 +5901,7 @@ CONFIG_SONY_LAPTOP=m CONFIG_SONYPI_COMPAT=y # CONFIG_SYSTEM76_ACPI is not set CONFIG_TOPSTAR_LAPTOP=m +# CONFIG_SERIAL_MULTI_INSTANTIATE is not set CONFIG_MLX_PLATFORM=m CONFIG_INTEL_IPS=m # CONFIG_INTEL_SCU_PCI is not set @@ -5767,6 +5913,7 @@ CONFIG_P2SB=y CONFIG_HAVE_CLK=y CONFIG_HAVE_CLK_PREPARE=y CONFIG_COMMON_CLK=y +# CONFIG_LMK04832 is not set # CONFIG_COMMON_CLK_MAX9485 is not set # CONFIG_COMMON_CLK_SI5341 is not set # CONFIG_COMMON_CLK_SI5351 is not set @@ -5907,27 +6054,39 @@ CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 # # Accelerometers # +# CONFIG_ADIS16201 is not set +# CONFIG_ADIS16209 is not set # CONFIG_ADXL313_I2C is not set +# CONFIG_ADXL313_SPI is not set # CONFIG_ADXL345_I2C is not set +# CONFIG_ADXL345_SPI is not set # CONFIG_ADXL355_I2C is not set +# CONFIG_ADXL355_SPI is not set +# CONFIG_ADXL367_SPI is not set # CONFIG_ADXL367_I2C is not set +# CONFIG_ADXL372_SPI is not set # CONFIG_ADXL372_I2C is not set # CONFIG_BMA180 is not set +# CONFIG_BMA220 is not set # CONFIG_BMA400 is not set # CONFIG_BMC150_ACCEL is not set +# CONFIG_BMI088_ACCEL is not set # CONFIG_DA280 is not set # CONFIG_DA311 is not set # CONFIG_DMARD06 is not set # CONFIG_DMARD09 is not set # CONFIG_DMARD10 is not set # CONFIG_FXLS8962AF_I2C is not set +# CONFIG_FXLS8962AF_SPI is not set CONFIG_HID_SENSOR_ACCEL_3D=m # CONFIG_IIO_ST_ACCEL_3AXIS is not set +# CONFIG_IIO_KX022A_SPI is not set # CONFIG_IIO_KX022A_I2C is not set # CONFIG_KXSD9 is not set # CONFIG_KXCJK1013 is not set # CONFIG_MC3230 is not set # CONFIG_MMA7455_I2C is not set +# CONFIG_MMA7455_SPI is not set # CONFIG_MMA7660 is not set # CONFIG_MMA8452 is not set # CONFIG_MMA9551 is not set @@ -5935,6 +6094,8 @@ CONFIG_HID_SENSOR_ACCEL_3D=m # CONFIG_MSA311 is not set # CONFIG_MXC4005 is not set # CONFIG_MXC6255 is not set +# CONFIG_SCA3000 is not set +# CONFIG_SCA3300 is not set # CONFIG_STK8312 is not set # CONFIG_STK8BA50 is not set # end of Accelerometers @@ -5942,26 +6103,67 @@ CONFIG_HID_SENSOR_ACCEL_3D=m # # Analog to digital converters # +# CONFIG_AD4130 is not set # CONFIG_AD7091R5 is not set +# CONFIG_AD7124 is not set +# CONFIG_AD7192 is not set +# CONFIG_AD7266 is not set +# CONFIG_AD7280 is not set # CONFIG_AD7291 is not set +# CONFIG_AD7292 is not set +# CONFIG_AD7298 is not set +# CONFIG_AD7476 is not set # CONFIG_AD7606_IFACE_PARALLEL is not set +# CONFIG_AD7606_IFACE_SPI is not set +# CONFIG_AD7766 is not set +# CONFIG_AD7768_1 is not set +# CONFIG_AD7780 is not set +# CONFIG_AD7791 is not set +# CONFIG_AD7793 is not set +# CONFIG_AD7887 is not set +# CONFIG_AD7923 is not set +# CONFIG_AD7949 is not set # CONFIG_AD799X is not set # CONFIG_ENVELOPE_DETECTOR is not set +# CONFIG_HI8435 is not set # CONFIG_HX711 is not set # CONFIG_INA2XX_ADC is not set # CONFIG_LTC2471 is not set # CONFIG_LTC2485 is not set +# CONFIG_LTC2496 is not set # CONFIG_LTC2497 is not set +# CONFIG_MAX1027 is not set +# CONFIG_MAX11100 is not set +# CONFIG_MAX1118 is not set +# CONFIG_MAX11205 is not set +# CONFIG_MAX11410 is not set +# CONFIG_MAX1241 is not set # CONFIG_MAX1363 is not set # CONFIG_MAX9611 is not set +# CONFIG_MCP320X is not set # CONFIG_MCP3422 is not set +# CONFIG_MCP3911 is not set # CONFIG_NAU7802 is not set # CONFIG_RICHTEK_RTQ6056 is not set # CONFIG_SD_ADC_MODULATOR is not set # CONFIG_TI_ADC081C is not set +# CONFIG_TI_ADC0832 is not set +# CONFIG_TI_ADC084S021 is not set +# CONFIG_TI_ADC12138 is not set +# CONFIG_TI_ADC108S102 is not set +# CONFIG_TI_ADC128S052 is not set +# CONFIG_TI_ADC161S626 is not set # CONFIG_TI_ADS1015 is not set # CONFIG_TI_ADS7924 is not set # CONFIG_TI_ADS1100 is not set +# CONFIG_TI_ADS7950 is not set +# CONFIG_TI_ADS8344 is not set +# CONFIG_TI_ADS8688 is not set +# CONFIG_TI_ADS124S08 is not set +# CONFIG_TI_ADS131E08 is not set +# CONFIG_TI_LMP92064 is not set +# CONFIG_TI_TLC4541 is not set +# CONFIG_TI_TSC2046 is not set # CONFIG_VF610_ADC is not set # CONFIG_VIPERBOARD_ADC is not set # CONFIG_XILINX_XADC is not set @@ -5970,6 +6172,8 @@ CONFIG_HID_SENSOR_ACCEL_3D=m # # Analog to digital and digital to analog converters # +# CONFIG_AD74115 is not set +# CONFIG_AD74413R is not set # end of Analog to digital and digital to analog converters # @@ -5981,6 +6185,8 @@ CONFIG_HID_SENSOR_ACCEL_3D=m # # Amplifiers # +# CONFIG_AD8366 is not set +# CONFIG_ADA4250 is not set # CONFIG_HMC425 is not set # end of Amplifiers @@ -6023,24 +6229,51 @@ CONFIG_HID_SENSOR_IIO_TRIGGER=m # # SSP Sensor Common # +# CONFIG_IIO_SSP_SENSORHUB is not set # end of SSP Sensor Common # # Digital to analog converters # +# CONFIG_AD3552R is not set # CONFIG_AD5064 is not set +# CONFIG_AD5360 is not set # CONFIG_AD5380 is not set +# CONFIG_AD5421 is not set # CONFIG_AD5446 is not set +# CONFIG_AD5449 is not set +# CONFIG_AD5592R is not set # CONFIG_AD5593R is not set +# CONFIG_AD5504 is not set +# CONFIG_AD5624R_SPI is not set +# CONFIG_LTC2688 is not set +# CONFIG_AD5686_SPI is not set # CONFIG_AD5696_I2C is not set +# CONFIG_AD5755 is not set +# CONFIG_AD5758 is not set +# CONFIG_AD5761 is not set +# CONFIG_AD5764 is not set +# CONFIG_AD5766 is not set +# CONFIG_AD5770R is not set +# CONFIG_AD5791 is not set +# CONFIG_AD7293 is not set +# CONFIG_AD7303 is not set +# CONFIG_AD8801 is not set # CONFIG_DPOT_DAC is not set # CONFIG_DS4424 is not set +# CONFIG_LTC1660 is not set +# CONFIG_LTC2632 is not set # CONFIG_M62332 is not set # CONFIG_MAX517 is not set +# CONFIG_MAX5522 is not set # CONFIG_MAX5821 is not set # CONFIG_MCP4725 is not set # CONFIG_MCP4728 is not set +# CONFIG_MCP4922 is not set +# CONFIG_TI_DAC082S085 is not set # CONFIG_TI_DAC5571 is not set +# CONFIG_TI_DAC7311 is not set +# CONFIG_TI_DAC7612 is not set # CONFIG_VF610_DAC is not set # end of Digital to analog converters @@ -6052,6 +6285,7 @@ CONFIG_HID_SENSOR_IIO_TRIGGER=m # # Filters # +# CONFIG_ADMV8818 is not set # end of Filters # @@ -6061,17 +6295,31 @@ CONFIG_HID_SENSOR_IIO_TRIGGER=m # # Clock Generator/Distribution # +# CONFIG_AD9523 is not set # end of Clock Generator/Distribution # # Phase-Locked Loop (PLL) frequency synthesizers # +# CONFIG_ADF4350 is not set +# CONFIG_ADF4371 is not set +# CONFIG_ADF4377 is not set +# CONFIG_ADMV1013 is not set +# CONFIG_ADMV1014 is not set +# CONFIG_ADMV4420 is not set +# CONFIG_ADRF6780 is not set # end of Phase-Locked Loop (PLL) frequency synthesizers # end of Frequency Synthesizers DDS/PLL # # Digital gyroscope sensors # +# CONFIG_ADIS16080 is not set +# CONFIG_ADIS16130 is not set +# CONFIG_ADIS16136 is not set +# CONFIG_ADIS16260 is not set +# CONFIG_ADXRS290 is not set +# CONFIG_ADXRS450 is not set # CONFIG_BMG160 is not set # CONFIG_FXAS21002C is not set CONFIG_HID_SENSOR_GYRO_3D=m @@ -6087,6 +6335,7 @@ CONFIG_HID_SENSOR_GYRO_3D=m # # Heart Rate Monitors # +# CONFIG_AFE4403 is not set # CONFIG_AFE4404 is not set # CONFIG_MAX30100 is not set # CONFIG_MAX30102 is not set @@ -6110,12 +6359,20 @@ CONFIG_HID_SENSOR_HUMIDITY=m # # Inertial measurement units # +# CONFIG_ADIS16400 is not set +# CONFIG_ADIS16460 is not set +# CONFIG_ADIS16475 is not set +# CONFIG_ADIS16480 is not set # CONFIG_BMI160_I2C is not set +# CONFIG_BMI160_SPI is not set # CONFIG_BOSCH_BNO055_I2C is not set # CONFIG_FXOS8700_I2C is not set +# CONFIG_FXOS8700_SPI is not set # CONFIG_KMX61 is not set # CONFIG_INV_ICM42600_I2C is not set +# CONFIG_INV_ICM42600_SPI is not set # CONFIG_INV_MPU6050_I2C is not set +# CONFIG_INV_MPU6050_SPI is not set # CONFIG_IIO_ST_LSM6DSX is not set # CONFIG_IIO_ST_LSM9DS0 is not set # end of Inertial measurement units @@ -6185,12 +6442,15 @@ CONFIG_HID_SENSOR_PROX=m # CONFIG_AK8975 is not set # CONFIG_AK09911 is not set # CONFIG_BMC150_MAGN_I2C is not set +# CONFIG_BMC150_MAGN_SPI is not set # CONFIG_MAG3110 is not set CONFIG_HID_SENSOR_MAGNETOMETER_3D=m # CONFIG_MMC35240 is not set # CONFIG_IIO_ST_MAGN_3AXIS is not set # CONFIG_SENSORS_HMC5843_I2C is not set +# CONFIG_SENSORS_HMC5843_SPI is not set # CONFIG_SENSORS_RM3100_I2C is not set +# CONFIG_SENSORS_RM3100_SPI is not set # CONFIG_TI_TMAG5273 is not set # CONFIG_YAMAHA_YAS530 is not set # end of Magnetometer sensors @@ -6228,9 +6488,14 @@ CONFIG_HID_SENSOR_DEVICE_ROTATION=m # CONFIG_AD5272 is not set # CONFIG_DS1803 is not set # CONFIG_MAX5432 is not set +# CONFIG_MAX5481 is not set +# CONFIG_MAX5487 is not set # CONFIG_MCP4018 is not set +# CONFIG_MCP4131 is not set # CONFIG_MCP4531 is not set +# CONFIG_MCP41010 is not set # CONFIG_TPL0102 is not set +# CONFIG_X9250 is not set # end of Digital potentiometers # @@ -6250,6 +6515,7 @@ CONFIG_HID_SENSOR_PRESS=m # CONFIG_HP03 is not set # CONFIG_ICP10100 is not set # CONFIG_MPL115_I2C is not set +# CONFIG_MPL115_SPI is not set # CONFIG_MPL3115 is not set # CONFIG_MPRLS0025PA is not set # CONFIG_MS5611 is not set @@ -6263,6 +6529,7 @@ CONFIG_HID_SENSOR_PRESS=m # # Lightning sensors # +# CONFIG_AS3935 is not set # end of Lightning sensors # @@ -6287,11 +6554,15 @@ CONFIG_HID_SENSOR_PRESS=m # # Resolver to digital converters # +# CONFIG_AD2S90 is not set +# CONFIG_AD2S1200 is not set # end of Resolver to digital converters # # Temperature sensors # +# CONFIG_LTC2983 is not set +# CONFIG_MAXIM_THERMOCOUPLE is not set CONFIG_HID_SENSOR_TEMP=m # CONFIG_MLX90614 is not set # CONFIG_MLX90632 is not set @@ -6301,6 +6572,8 @@ CONFIG_HID_SENSOR_TEMP=m # CONFIG_TSYS01 is not set # CONFIG_TSYS02D is not set # CONFIG_MAX30208 is not set +# CONFIG_MAX31856 is not set +# CONFIG_MAX31865 is not set # end of Temperature sensors CONFIG_NTB=m @@ -6442,7 +6715,9 @@ CONFIG_FS_IOMAP=y CONFIG_BUFFER_HEAD=y CONFIG_LEGACY_DIRECT_IO=y # CONFIG_EXT2_FS is not set -# CONFIG_EXT3_FS is not set +CONFIG_EXT3_FS=m +# CONFIG_EXT3_FS_POSIX_ACL is not set +# CONFIG_EXT3_FS_SECURITY is not set CONFIG_EXT4_FS=y CONFIG_EXT4_USE_FOR_EXT2=y CONFIG_EXT4_FS_POSIX_ACL=y @@ -6507,7 +6782,7 @@ CONFIG_OVERLAY_FS_INDEX=y # # Caches # -CONFIG_NETFS_SUPPORT=y +CONFIG_NETFS_SUPPORT=m CONFIG_NETFS_STATS=y CONFIG_FSCACHE=m CONFIG_FSCACHE_STATS=y @@ -6538,7 +6813,10 @@ CONFIG_FAT_DEFAULT_IOCHARSET="ascii" # CONFIG_FAT_DEFAULT_UTF8 is not set # CONFIG_EXFAT_FS is not set # CONFIG_NTFS_FS is not set -# CONFIG_NTFS3_FS is not set +CONFIG_NTFS3_FS=m +# CONFIG_NTFS3_64BIT_CLUSTER is not set +# CONFIG_NTFS3_LZX_XPRESS is not set +# CONFIG_NTFS3_FS_POSIX_ACL is not set # end of DOS/FAT/EXFAT/NT Filesystems # @@ -6678,7 +6956,7 @@ CONFIG_CEPH_FS=m # CONFIG_CEPH_FSCACHE is not set CONFIG_CEPH_FS_POSIX_ACL=y # CONFIG_CEPH_FS_SECURITY_LABEL is not set -CONFIG_CIFS=y +CONFIG_CIFS=m # CONFIG_CIFS_STATS2 is not set CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y CONFIG_CIFS_UPCALL=y @@ -6689,8 +6967,10 @@ CONFIG_CIFS_DEBUG=y # CONFIG_CIFS_DEBUG_DUMP_KEYS is not set CONFIG_CIFS_DFS_UPCALL=y # CONFIG_CIFS_SWN_UPCALL is not set +# CONFIG_CIFS_SMB_DIRECT is not set +# CONFIG_CIFS_FSCACHE is not set # CONFIG_SMB_SERVER is not set -CONFIG_SMBFS=y +CONFIG_SMBFS=m # CONFIG_CODA_FS is not set # CONFIG_AFS_FS is not set CONFIG_NLS=y @@ -6744,7 +7024,7 @@ CONFIG_NLS_MAC_INUIT=m CONFIG_NLS_MAC_ROMANIAN=m CONFIG_NLS_MAC_TURKISH=m CONFIG_NLS_UTF8=m -CONFIG_NLS_UCS2_UTILS=y +CONFIG_NLS_UCS2_UTILS=m CONFIG_DLM=m CONFIG_DLM_DEBUG=y # CONFIG_UNICODE is not set @@ -6949,8 +7229,8 @@ CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_SM4=y -CONFIG_CRYPTO_SM4_GENERIC=y +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_SM4_GENERIC=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_TWOFISH_COMMON=m @@ -7076,8 +7356,8 @@ CONFIG_CRYPTO_DES3_EDE_X86_64=m CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m CONFIG_CRYPTO_SERPENT_AVX_X86_64=m CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m -CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64=y -CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64=y +CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64=m +CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64=m CONFIG_CRYPTO_TWOFISH_X86_64=m CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 128303b14bf0..981a32ab9cf6 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -2085,6 +2085,7 @@ CONFIG_GENERIC_CPU_AUTOPROBE=y CONFIG_GENERIC_CPU_VULNERABILITIES=y CONFIG_REGMAP=y CONFIG_REGMAP_I2C=m +CONFIG_REGMAP_SPI=m CONFIG_DMA_SHARED_BUFFER=y # CONFIG_DMA_FENCE_TRACE is not set # CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set @@ -2219,6 +2220,10 @@ CONFIG_MTD_CFI_I2=y # Self-contained MTD device drivers # # CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_MCHP48L640 is not set +# CONFIG_MTD_SST25L is not set # CONFIG_MTD_SLRAM is not set # CONFIG_MTD_PHRAM is not set # CONFIG_MTD_MTDRAM is not set @@ -2235,6 +2240,7 @@ CONFIG_MTD_CFI_I2=y # # CONFIG_MTD_ONENAND is not set # CONFIG_MTD_RAW_NAND is not set +# CONFIG_MTD_SPI_NAND is not set # # ECC engine support @@ -2251,6 +2257,7 @@ CONFIG_MTD_CFI_I2=y # CONFIG_MTD_LPDDR is not set # end of LPDDR & LPDDR2 PCM memory drivers +# CONFIG_MTD_SPI_NOR is not set CONFIG_MTD_UBI=m CONFIG_MTD_UBI_WL_THRESHOLD=4096 CONFIG_MTD_UBI_BEB_LIMIT=20 @@ -2354,6 +2361,7 @@ CONFIG_SENSORS_APDS990X=m # CONFIG_HMC6352 is not set # CONFIG_DS1682 is not set CONFIG_VMWARE_BALLOON=m +# CONFIG_LATTICE_ECP3_CONFIG is not set # CONFIG_SRAM is not set # CONFIG_DW_XDATA_PCIE is not set # CONFIG_PCI_ENDPOINT_TEST is not set @@ -2365,9 +2373,11 @@ CONFIG_MISC_RTSX=m # EEPROM support # # CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set CONFIG_EEPROM_LEGACY=m CONFIG_EEPROM_MAX6875=m CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_93XX46 is not set # CONFIG_EEPROM_IDT_89HPESX is not set # CONFIG_EEPROM_EE1004 is not set # end of EEPROM support @@ -2763,6 +2773,7 @@ CONFIG_NET_VENDOR_AQUANTIA=y CONFIG_AQTION=m # CONFIG_NET_VENDOR_ARC is not set CONFIG_NET_VENDOR_ASIX=y +# CONFIG_SPI_AX88796C is not set CONFIG_NET_VENDOR_ATHEROS=y CONFIG_ATL2=m CONFIG_ATL1=m @@ -2809,6 +2820,7 @@ CONFIG_NET_VENDOR_CISCO=y CONFIG_ENIC=m # CONFIG_NET_VENDOR_CORTINA is not set CONFIG_NET_VENDOR_DAVICOM=y +# CONFIG_DM9051 is not set CONFIG_DNET=m CONFIG_NET_VENDOR_DEC=y # CONFIG_NET_TULIP is not set @@ -2856,6 +2868,8 @@ CONFIG_ICE_HWTS=y CONFIG_FM10K=m CONFIG_IGC=m # CONFIG_JME is not set +CONFIG_NET_VENDOR_ADI=y +# CONFIG_ADIN1110 is not set CONFIG_NET_VENDOR_LITEX=y # CONFIG_NET_VENDOR_MARVELL is not set CONFIG_NET_VENDOR_MELLANOX=y @@ -2963,6 +2977,7 @@ CONFIG_SFC_MCDI_LOGGING=y # CONFIG_NET_VENDOR_TEHUTI is not set # CONFIG_NET_VENDOR_TI is not set CONFIG_NET_VENDOR_VERTEXCOM=y +# CONFIG_MSE102X is not set # CONFIG_NET_VENDOR_VIA is not set CONFIG_NET_VENDOR_WANGXUN=y CONFIG_LIBWX=m @@ -3037,6 +3052,7 @@ CONFIG_DP83867_PHY=m # CONFIG_DP83TD510_PHY is not set CONFIG_VITESSE_PHY=m CONFIG_XILINX_GMII2RGMII=m +# CONFIG_MICREL_KS8995MA is not set # CONFIG_PSE_CONTROLLER is not set CONFIG_MDIO_DEVICE=y CONFIG_MDIO_BUS=y @@ -3238,6 +3254,7 @@ CONFIG_MT76x2U=m # CONFIG_MT7996E is not set CONFIG_WLAN_VENDOR_MICROCHIP=y # CONFIG_WILC1000_SDIO is not set +# CONFIG_WILC1000_SPI is not set CONFIG_WLAN_VENDOR_PURELIFI=y # CONFIG_PLFXLC is not set CONFIG_WLAN_VENDOR_RALINK=y @@ -3340,7 +3357,13 @@ CONFIG_HDLC_PPP=m # CONFIG_FARSYNC is not set CONFIG_IEEE802154_DRIVERS=m CONFIG_IEEE802154_FAKELB=m +# CONFIG_IEEE802154_AT86RF230 is not set +# CONFIG_IEEE802154_MRF24J40 is not set +# CONFIG_IEEE802154_CC2520 is not set # CONFIG_IEEE802154_ATUSB is not set +# CONFIG_IEEE802154_ADF7242 is not set +# CONFIG_IEEE802154_CA8210 is not set +# CONFIG_IEEE802154_MCR20A is not set # CONFIG_IEEE802154_HWSIM is not set # @@ -3406,6 +3429,7 @@ CONFIG_INPUT_KEYBOARD=y # CONFIG_KEYBOARD_ADC is not set # CONFIG_KEYBOARD_ADP5588 is not set # CONFIG_KEYBOARD_ADP5589 is not set +# CONFIG_KEYBOARD_APPLESPI is not set CONFIG_KEYBOARD_ATKBD=y # CONFIG_KEYBOARD_QT1050 is not set # CONFIG_KEYBOARD_QT1070 is not set @@ -3467,6 +3491,8 @@ CONFIG_TABLET_USB_KBTAB=m # CONFIG_TABLET_USB_PEGASUS is not set CONFIG_TABLET_SERIAL_WACOM4=m CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set # CONFIG_TOUCHSCREEN_AD7879 is not set # CONFIG_TOUCHSCREEN_ADC is not set # CONFIG_TOUCHSCREEN_ATMEL_MXT is not set @@ -3517,12 +3543,14 @@ CONFIG_TOUCHSCREEN_WACOM_I2C=m # CONFIG_TOUCHSCREEN_TOUCHIT213 is not set # CONFIG_TOUCHSCREEN_TSC_SERIO is not set # CONFIG_TOUCHSCREEN_TSC2004 is not set +# CONFIG_TOUCHSCREEN_TSC2005 is not set # CONFIG_TOUCHSCREEN_TSC2007 is not set # CONFIG_TOUCHSCREEN_RM_TS is not set # CONFIG_TOUCHSCREEN_SILEAD is not set # CONFIG_TOUCHSCREEN_SIS_I2C is not set # CONFIG_TOUCHSCREEN_ST1232 is not set # CONFIG_TOUCHSCREEN_STMFTS is not set +# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set # CONFIG_TOUCHSCREEN_SX8654 is not set # CONFIG_TOUCHSCREEN_TPS6507X is not set # CONFIG_TOUCHSCREEN_ZET6223 is not set @@ -3569,6 +3597,7 @@ CONFIG_INPUT_XEN_KBDDEV_FRONTEND=m # CONFIG_INPUT_DRV2667_HAPTICS is not set CONFIG_RMI4_CORE=m CONFIG_RMI4_I2C=m +# CONFIG_RMI4_SPI is not set CONFIG_RMI4_SMB=m CONFIG_RMI4_F03=y CONFIG_RMI4_F03_SERIO=m @@ -3650,6 +3679,8 @@ CONFIG_SERIAL_8250_PERICOM=y # Non-8250 serial port support # # CONFIG_SERIAL_KGDB_NMI is not set +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set # CONFIG_SERIAL_UARTLITE is not set CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y @@ -3718,6 +3749,7 @@ CONFIG_TCG_TPM=y CONFIG_HW_RANDOM_TPM=y CONFIG_TCG_TIS_CORE=y CONFIG_TCG_TIS=y +# CONFIG_TCG_TIS_SPI is not set # CONFIG_TCG_TIS_I2C is not set # CONFIG_TCG_TIS_I2C_CR50 is not set CONFIG_TCG_TIS_I2C_ATMEL=m @@ -3731,6 +3763,7 @@ CONFIG_TCG_CRB=y # CONFIG_TCG_VTPM_PROXY is not set CONFIG_TCG_TIS_ST33ZP24=m CONFIG_TCG_TIS_ST33ZP24_I2C=m +# CONFIG_TCG_TIS_ST33ZP24_SPI is not set CONFIG_TELCLOCK=m # CONFIG_XILLYBUS is not set # CONFIG_XILLYUSB is not set @@ -3839,7 +3872,49 @@ CONFIG_I2C_STUB=m # end of I2C support # CONFIG_I3C is not set -# CONFIG_SPI is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +# CONFIG_SPI_MEM is not set + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_BUTTERFLY is not set +# CONFIG_SPI_CADENCE is not set +# CONFIG_SPI_DESIGNWARE is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_LM70_LLP is not set +# CONFIG_SPI_MICROCHIP_CORE is not set +# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set +# CONFIG_SPI_LANTIQ_SSC is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PCI1XXXX is not set +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_SIFIVE is not set +# CONFIG_SPI_MXIC is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_ZYNQMP_GQSPI is not set +# CONFIG_SPI_AMD is not set + +# +# SPI Multiplexer support +# +# CONFIG_SPI_MUX is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +CONFIG_SPI_DYNAMIC=y # CONFIG_SPMI is not set # CONFIG_HSI is not set CONFIG_PPS=y @@ -3976,6 +4051,16 @@ CONFIG_GPIO_ICH=m # CONFIG_GPIO_RDC321X is not set # end of PCI GPIO expanders +# +# SPI GPIO expanders +# +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set +# end of SPI GPIO expanders + # # USB GPIO expanders # @@ -4041,6 +4126,7 @@ CONFIG_HWMON_VID=m # CONFIG_SENSORS_ABITUGURU=m CONFIG_SENSORS_ABITUGURU3=m +# CONFIG_SENSORS_AD7314 is not set CONFIG_SENSORS_AD7414=m CONFIG_SENSORS_AD7418=m CONFIG_SENSORS_ADM1025=m @@ -4050,6 +4136,7 @@ CONFIG_SENSORS_ADM1031=m # CONFIG_SENSORS_ADM1177 is not set CONFIG_SENSORS_ADM9240=m CONFIG_SENSORS_ADT7X10=m +# CONFIG_SENSORS_ADT7310 is not set CONFIG_SENSORS_ADT7410=m CONFIG_SENSORS_ADT7411=m CONFIG_SENSORS_ADT7462=m @@ -4096,6 +4183,7 @@ CONFIG_SENSORS_JC42=m CONFIG_SENSORS_LINEAGE=m # CONFIG_SENSORS_LTC2945 is not set # CONFIG_SENSORS_LTC2947_I2C is not set +# CONFIG_SENSORS_LTC2947_SPI is not set # CONFIG_SENSORS_LTC2990 is not set # CONFIG_SENSORS_LTC2992 is not set CONFIG_SENSORS_LTC4151=m @@ -4104,11 +4192,13 @@ CONFIG_SENSORS_LTC4215=m CONFIG_SENSORS_LTC4245=m # CONFIG_SENSORS_LTC4260 is not set CONFIG_SENSORS_LTC4261=m +# CONFIG_SENSORS_MAX1111 is not set # CONFIG_SENSORS_MAX127 is not set CONFIG_SENSORS_MAX16065=m CONFIG_SENSORS_MAX1619=m CONFIG_SENSORS_MAX1668=m CONFIG_SENSORS_MAX197=m +# CONFIG_SENSORS_MAX31722 is not set # CONFIG_SENSORS_MAX31730 is not set # CONFIG_SENSORS_MAX31760 is not set # CONFIG_MAX31827 is not set @@ -4124,7 +4214,9 @@ CONFIG_SENSORS_MCP3021=m # CONFIG_SENSORS_TC654 is not set # CONFIG_SENSORS_TPS23861 is not set # CONFIG_SENSORS_MR75203 is not set +# CONFIG_SENSORS_ADCXX is not set CONFIG_SENSORS_LM63=m +# CONFIG_SENSORS_LM70 is not set CONFIG_SENSORS_LM73=m CONFIG_SENSORS_LM75=m CONFIG_SENSORS_LM77=m @@ -4224,6 +4316,7 @@ CONFIG_SENSORS_SCH5636=m # CONFIG_SENSORS_STTS751 is not set # CONFIG_SENSORS_ADC128D818 is not set CONFIG_SENSORS_ADS7828=m +# CONFIG_SENSORS_ADS7871 is not set CONFIG_SENSORS_AMC6821=m CONFIG_SENSORS_INA209=m CONFIG_SENSORS_INA2XX=m @@ -4404,10 +4497,12 @@ CONFIG_MFD_CORE=y # CONFIG_MFD_AXP20X_I2C is not set # CONFIG_MFD_CS42L43_I2C is not set # CONFIG_MFD_MADERA is not set +# CONFIG_MFD_DA9052_SPI is not set # CONFIG_MFD_DA9062 is not set # CONFIG_MFD_DA9063 is not set # CONFIG_MFD_DA9150 is not set # CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_MC13XXX_SPI is not set # CONFIG_MFD_MC13XXX_I2C is not set # CONFIG_MFD_MP2629 is not set # CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set @@ -4429,6 +4524,8 @@ CONFIG_MFD_INTEL_LPSS_PCI=m # CONFIG_MFD_MT6370 is not set # CONFIG_MFD_MT6397 is not set # CONFIG_MFD_MENF21BMC is not set +# CONFIG_MFD_OCELOT is not set +# CONFIG_EZX_PCAP is not set CONFIG_MFD_VIPERBOARD=m # CONFIG_MFD_RETU is not set # CONFIG_MFD_PCF50633 is not set @@ -4451,14 +4548,19 @@ CONFIG_MFD_SM501_GPIO=y # CONFIG_MFD_TPS65086 is not set # CONFIG_MFD_TI_LP873X is not set # CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set # CONFIG_MFD_TPS6594_I2C is not set +# CONFIG_MFD_TPS6594_SPI is not set # CONFIG_MFD_WL1273_CORE is not set # CONFIG_MFD_LM3533 is not set # CONFIG_MFD_TQMX86 is not set CONFIG_MFD_VX855=m # CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM831X_SPI is not set # CONFIG_MFD_WM8994 is not set # CONFIG_MFD_ATC260X_I2C is not set +# CONFIG_MFD_INTEL_M10_BMC_SPI is not set # end of Multifunction device drivers # CONFIG_REGULATOR is not set @@ -4657,7 +4759,10 @@ CONFIG_DRM_PANEL=y # # Display Panels # +# CONFIG_DRM_PANEL_AUO_A030JTN01 is not set +# CONFIG_DRM_PANEL_ORISETECH_OTA5601A is not set # CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set +# CONFIG_DRM_PANEL_WIDECHIPS_WS2401 is not set # end of Display Panels CONFIG_DRM_BRIDGE=y @@ -4674,7 +4779,17 @@ CONFIG_DRM_PANEL_BRIDGE=y CONFIG_DRM_BOCHS=m CONFIG_DRM_CIRRUS_QEMU=m # CONFIG_DRM_GM12U320 is not set +# CONFIG_DRM_PANEL_MIPI_DBI is not set # CONFIG_DRM_SIMPLEDRM is not set +# CONFIG_TINYDRM_HX8357D is not set +# CONFIG_TINYDRM_ILI9163 is not set +# CONFIG_TINYDRM_ILI9225 is not set +# CONFIG_TINYDRM_ILI9341 is not set +# CONFIG_TINYDRM_ILI9486 is not set +# CONFIG_TINYDRM_MI0283QT is not set +# CONFIG_TINYDRM_REPAPER is not set +# CONFIG_TINYDRM_ST7586 is not set +# CONFIG_TINYDRM_ST7735R is not set # CONFIG_DRM_XEN_FRONTEND is not set # CONFIG_DRM_VBOXVIDEO is not set # CONFIG_DRM_GUD is not set @@ -4760,7 +4875,18 @@ CONFIG_FB_TILEBLITTING=y # Backlight & LCD device support # CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set CONFIG_LCD_PLATFORM=m +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set CONFIG_BACKLIGHT_CLASS_DEVICE=y # CONFIG_BACKLIGHT_KTD253 is not set # CONFIG_BACKLIGHT_KTZ8866 is not set @@ -5011,6 +5137,7 @@ CONFIG_USB_EHCI_PCI=y # CONFIG_USB_EHCI_HCD_PLATFORM is not set # CONFIG_USB_OXU210HP_HCD is not set # CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_PCI=y # CONFIG_USB_OHCI_HCD_PLATFORM is not set @@ -5228,6 +5355,7 @@ CONFIG_MMC_SDHCI_PLTFM=m # CONFIG_MMC_SDHCI_F_SDH30 is not set # CONFIG_MMC_WBSD is not set CONFIG_MMC_TIFM_SD=m +# CONFIG_MMC_SPI is not set CONFIG_MMC_CB710=m CONFIG_MMC_VIA_SDMMC=m CONFIG_MMC_VUB300=m @@ -5281,6 +5409,7 @@ CONFIG_LEDS_LP3944=m # CONFIG_LEDS_PCA955X is not set # CONFIG_LEDS_PCA963X is not set # CONFIG_LEDS_PCA995X is not set +# CONFIG_LEDS_DAC124S085 is not set # CONFIG_LEDS_PWM is not set # CONFIG_LEDS_BD2606MVV is not set # CONFIG_LEDS_BD2802 is not set @@ -5403,7 +5532,8 @@ CONFIG_RTC_MC146818_LIB=y CONFIG_RTC_CLASS=y CONFIG_RTC_HCTOSYS=y CONFIG_RTC_HCTOSYS_DEVICE="rtc0" -# CONFIG_RTC_SYSTOHC is not set +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" # CONFIG_RTC_DEBUG is not set CONFIG_RTC_NVMEM=y @@ -5454,6 +5584,20 @@ CONFIG_RTC_DRV_EM3027=m # # SPI RTC drivers # +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1302 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1343 is not set +# CONFIG_RTC_DRV_DS1347 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6916 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RX4581 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_PCF2123 is not set +# CONFIG_RTC_DRV_MCP795 is not set CONFIG_RTC_I2C_AND_SPI=m # @@ -5651,6 +5795,7 @@ CONFIG_MLXREG_HOTPLUG=m # CONFIG_MLXREG_LC is not set # CONFIG_NVSW_SN2201 is not set CONFIG_SURFACE_PLATFORMS=y +# CONFIG_SURFACE3_WMI is not set # CONFIG_SURFACE_3_POWER_OPREGION is not set # CONFIG_SURFACE_GPE is not set # CONFIG_SURFACE_HOTPLUG is not set @@ -5749,6 +5894,7 @@ CONFIG_SONY_LAPTOP=m CONFIG_SONYPI_COMPAT=y # CONFIG_SYSTEM76_ACPI is not set CONFIG_TOPSTAR_LAPTOP=m +# CONFIG_SERIAL_MULTI_INSTANTIATE is not set CONFIG_MLX_PLATFORM=m CONFIG_INTEL_IPS=m # CONFIG_INTEL_SCU_PCI is not set @@ -5760,6 +5906,7 @@ CONFIG_P2SB=y CONFIG_HAVE_CLK=y CONFIG_HAVE_CLK_PREPARE=y CONFIG_COMMON_CLK=y +# CONFIG_LMK04832 is not set # CONFIG_COMMON_CLK_MAX9485 is not set # CONFIG_COMMON_CLK_SI5341 is not set # CONFIG_COMMON_CLK_SI5351 is not set @@ -5896,27 +6043,39 @@ CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 # # Accelerometers # +# CONFIG_ADIS16201 is not set +# CONFIG_ADIS16209 is not set # CONFIG_ADXL313_I2C is not set +# CONFIG_ADXL313_SPI is not set # CONFIG_ADXL345_I2C is not set +# CONFIG_ADXL345_SPI is not set # CONFIG_ADXL355_I2C is not set +# CONFIG_ADXL355_SPI is not set +# CONFIG_ADXL367_SPI is not set # CONFIG_ADXL367_I2C is not set +# CONFIG_ADXL372_SPI is not set # CONFIG_ADXL372_I2C is not set # CONFIG_BMA180 is not set +# CONFIG_BMA220 is not set # CONFIG_BMA400 is not set # CONFIG_BMC150_ACCEL is not set +# CONFIG_BMI088_ACCEL is not set # CONFIG_DA280 is not set # CONFIG_DA311 is not set # CONFIG_DMARD06 is not set # CONFIG_DMARD09 is not set # CONFIG_DMARD10 is not set # CONFIG_FXLS8962AF_I2C is not set +# CONFIG_FXLS8962AF_SPI is not set CONFIG_HID_SENSOR_ACCEL_3D=m # CONFIG_IIO_ST_ACCEL_3AXIS is not set +# CONFIG_IIO_KX022A_SPI is not set # CONFIG_IIO_KX022A_I2C is not set # CONFIG_KXSD9 is not set # CONFIG_KXCJK1013 is not set # CONFIG_MC3230 is not set # CONFIG_MMA7455_I2C is not set +# CONFIG_MMA7455_SPI is not set # CONFIG_MMA7660 is not set # CONFIG_MMA8452 is not set # CONFIG_MMA9551 is not set @@ -5924,6 +6083,8 @@ CONFIG_HID_SENSOR_ACCEL_3D=m # CONFIG_MSA311 is not set # CONFIG_MXC4005 is not set # CONFIG_MXC6255 is not set +# CONFIG_SCA3000 is not set +# CONFIG_SCA3300 is not set # CONFIG_STK8312 is not set # CONFIG_STK8BA50 is not set # end of Accelerometers @@ -5931,26 +6092,67 @@ CONFIG_HID_SENSOR_ACCEL_3D=m # # Analog to digital converters # +# CONFIG_AD4130 is not set # CONFIG_AD7091R5 is not set +# CONFIG_AD7124 is not set +# CONFIG_AD7192 is not set +# CONFIG_AD7266 is not set +# CONFIG_AD7280 is not set # CONFIG_AD7291 is not set +# CONFIG_AD7292 is not set +# CONFIG_AD7298 is not set +# CONFIG_AD7476 is not set # CONFIG_AD7606_IFACE_PARALLEL is not set +# CONFIG_AD7606_IFACE_SPI is not set +# CONFIG_AD7766 is not set +# CONFIG_AD7768_1 is not set +# CONFIG_AD7780 is not set +# CONFIG_AD7791 is not set +# CONFIG_AD7793 is not set +# CONFIG_AD7887 is not set +# CONFIG_AD7923 is not set +# CONFIG_AD7949 is not set # CONFIG_AD799X is not set # CONFIG_ENVELOPE_DETECTOR is not set +# CONFIG_HI8435 is not set # CONFIG_HX711 is not set # CONFIG_INA2XX_ADC is not set # CONFIG_LTC2471 is not set # CONFIG_LTC2485 is not set +# CONFIG_LTC2496 is not set # CONFIG_LTC2497 is not set +# CONFIG_MAX1027 is not set +# CONFIG_MAX11100 is not set +# CONFIG_MAX1118 is not set +# CONFIG_MAX11205 is not set +# CONFIG_MAX11410 is not set +# CONFIG_MAX1241 is not set # CONFIG_MAX1363 is not set # CONFIG_MAX9611 is not set +# CONFIG_MCP320X is not set # CONFIG_MCP3422 is not set +# CONFIG_MCP3911 is not set # CONFIG_NAU7802 is not set # CONFIG_RICHTEK_RTQ6056 is not set # CONFIG_SD_ADC_MODULATOR is not set # CONFIG_TI_ADC081C is not set +# CONFIG_TI_ADC0832 is not set +# CONFIG_TI_ADC084S021 is not set +# CONFIG_TI_ADC12138 is not set +# CONFIG_TI_ADC108S102 is not set +# CONFIG_TI_ADC128S052 is not set +# CONFIG_TI_ADC161S626 is not set # CONFIG_TI_ADS1015 is not set # CONFIG_TI_ADS7924 is not set # CONFIG_TI_ADS1100 is not set +# CONFIG_TI_ADS7950 is not set +# CONFIG_TI_ADS8344 is not set +# CONFIG_TI_ADS8688 is not set +# CONFIG_TI_ADS124S08 is not set +# CONFIG_TI_ADS131E08 is not set +# CONFIG_TI_LMP92064 is not set +# CONFIG_TI_TLC4541 is not set +# CONFIG_TI_TSC2046 is not set # CONFIG_VF610_ADC is not set # CONFIG_VIPERBOARD_ADC is not set # CONFIG_XILINX_XADC is not set @@ -5959,6 +6161,8 @@ CONFIG_HID_SENSOR_ACCEL_3D=m # # Analog to digital and digital to analog converters # +# CONFIG_AD74115 is not set +# CONFIG_AD74413R is not set # end of Analog to digital and digital to analog converters # @@ -5970,6 +6174,8 @@ CONFIG_HID_SENSOR_ACCEL_3D=m # # Amplifiers # +# CONFIG_AD8366 is not set +# CONFIG_ADA4250 is not set # CONFIG_HMC425 is not set # end of Amplifiers @@ -6012,24 +6218,51 @@ CONFIG_HID_SENSOR_IIO_TRIGGER=m # # SSP Sensor Common # +# CONFIG_IIO_SSP_SENSORHUB is not set # end of SSP Sensor Common # # Digital to analog converters # +# CONFIG_AD3552R is not set # CONFIG_AD5064 is not set +# CONFIG_AD5360 is not set # CONFIG_AD5380 is not set +# CONFIG_AD5421 is not set # CONFIG_AD5446 is not set +# CONFIG_AD5449 is not set +# CONFIG_AD5592R is not set # CONFIG_AD5593R is not set +# CONFIG_AD5504 is not set +# CONFIG_AD5624R_SPI is not set +# CONFIG_LTC2688 is not set +# CONFIG_AD5686_SPI is not set # CONFIG_AD5696_I2C is not set +# CONFIG_AD5755 is not set +# CONFIG_AD5758 is not set +# CONFIG_AD5761 is not set +# CONFIG_AD5764 is not set +# CONFIG_AD5766 is not set +# CONFIG_AD5770R is not set +# CONFIG_AD5791 is not set +# CONFIG_AD7293 is not set +# CONFIG_AD7303 is not set +# CONFIG_AD8801 is not set # CONFIG_DPOT_DAC is not set # CONFIG_DS4424 is not set +# CONFIG_LTC1660 is not set +# CONFIG_LTC2632 is not set # CONFIG_M62332 is not set # CONFIG_MAX517 is not set +# CONFIG_MAX5522 is not set # CONFIG_MAX5821 is not set # CONFIG_MCP4725 is not set # CONFIG_MCP4728 is not set +# CONFIG_MCP4922 is not set +# CONFIG_TI_DAC082S085 is not set # CONFIG_TI_DAC5571 is not set +# CONFIG_TI_DAC7311 is not set +# CONFIG_TI_DAC7612 is not set # CONFIG_VF610_DAC is not set # end of Digital to analog converters @@ -6041,6 +6274,7 @@ CONFIG_HID_SENSOR_IIO_TRIGGER=m # # Filters # +# CONFIG_ADMV8818 is not set # end of Filters # @@ -6050,17 +6284,31 @@ CONFIG_HID_SENSOR_IIO_TRIGGER=m # # Clock Generator/Distribution # +# CONFIG_AD9523 is not set # end of Clock Generator/Distribution # # Phase-Locked Loop (PLL) frequency synthesizers # +# CONFIG_ADF4350 is not set +# CONFIG_ADF4371 is not set +# CONFIG_ADF4377 is not set +# CONFIG_ADMV1013 is not set +# CONFIG_ADMV1014 is not set +# CONFIG_ADMV4420 is not set +# CONFIG_ADRF6780 is not set # end of Phase-Locked Loop (PLL) frequency synthesizers # end of Frequency Synthesizers DDS/PLL # # Digital gyroscope sensors # +# CONFIG_ADIS16080 is not set +# CONFIG_ADIS16130 is not set +# CONFIG_ADIS16136 is not set +# CONFIG_ADIS16260 is not set +# CONFIG_ADXRS290 is not set +# CONFIG_ADXRS450 is not set # CONFIG_BMG160 is not set # CONFIG_FXAS21002C is not set CONFIG_HID_SENSOR_GYRO_3D=m @@ -6076,6 +6324,7 @@ CONFIG_HID_SENSOR_GYRO_3D=m # # Heart Rate Monitors # +# CONFIG_AFE4403 is not set # CONFIG_AFE4404 is not set # CONFIG_MAX30100 is not set # CONFIG_MAX30102 is not set @@ -6099,12 +6348,20 @@ CONFIG_HID_SENSOR_HUMIDITY=m # # Inertial measurement units # +# CONFIG_ADIS16400 is not set +# CONFIG_ADIS16460 is not set +# CONFIG_ADIS16475 is not set +# CONFIG_ADIS16480 is not set # CONFIG_BMI160_I2C is not set +# CONFIG_BMI160_SPI is not set # CONFIG_BOSCH_BNO055_I2C is not set # CONFIG_FXOS8700_I2C is not set +# CONFIG_FXOS8700_SPI is not set # CONFIG_KMX61 is not set # CONFIG_INV_ICM42600_I2C is not set +# CONFIG_INV_ICM42600_SPI is not set # CONFIG_INV_MPU6050_I2C is not set +# CONFIG_INV_MPU6050_SPI is not set # CONFIG_IIO_ST_LSM6DSX is not set # CONFIG_IIO_ST_LSM9DS0 is not set # end of Inertial measurement units @@ -6174,12 +6431,15 @@ CONFIG_HID_SENSOR_PROX=m # CONFIG_AK8975 is not set # CONFIG_AK09911 is not set # CONFIG_BMC150_MAGN_I2C is not set +# CONFIG_BMC150_MAGN_SPI is not set # CONFIG_MAG3110 is not set CONFIG_HID_SENSOR_MAGNETOMETER_3D=m # CONFIG_MMC35240 is not set # CONFIG_IIO_ST_MAGN_3AXIS is not set # CONFIG_SENSORS_HMC5843_I2C is not set +# CONFIG_SENSORS_HMC5843_SPI is not set # CONFIG_SENSORS_RM3100_I2C is not set +# CONFIG_SENSORS_RM3100_SPI is not set # CONFIG_TI_TMAG5273 is not set # CONFIG_YAMAHA_YAS530 is not set # end of Magnetometer sensors @@ -6217,9 +6477,14 @@ CONFIG_HID_SENSOR_DEVICE_ROTATION=m # CONFIG_AD5272 is not set # CONFIG_DS1803 is not set # CONFIG_MAX5432 is not set +# CONFIG_MAX5481 is not set +# CONFIG_MAX5487 is not set # CONFIG_MCP4018 is not set +# CONFIG_MCP4131 is not set # CONFIG_MCP4531 is not set +# CONFIG_MCP41010 is not set # CONFIG_TPL0102 is not set +# CONFIG_X9250 is not set # end of Digital potentiometers # @@ -6239,6 +6504,7 @@ CONFIG_HID_SENSOR_PRESS=m # CONFIG_HP03 is not set # CONFIG_ICP10100 is not set # CONFIG_MPL115_I2C is not set +# CONFIG_MPL115_SPI is not set # CONFIG_MPL3115 is not set # CONFIG_MPRLS0025PA is not set # CONFIG_MS5611 is not set @@ -6252,6 +6518,7 @@ CONFIG_HID_SENSOR_PRESS=m # # Lightning sensors # +# CONFIG_AS3935 is not set # end of Lightning sensors # @@ -6276,11 +6543,15 @@ CONFIG_HID_SENSOR_PRESS=m # # Resolver to digital converters # +# CONFIG_AD2S90 is not set +# CONFIG_AD2S1200 is not set # end of Resolver to digital converters # # Temperature sensors # +# CONFIG_LTC2983 is not set +# CONFIG_MAXIM_THERMOCOUPLE is not set CONFIG_HID_SENSOR_TEMP=m # CONFIG_MLX90614 is not set # CONFIG_MLX90632 is not set @@ -6290,6 +6561,8 @@ CONFIG_HID_SENSOR_TEMP=m # CONFIG_TSYS01 is not set # CONFIG_TSYS02D is not set # CONFIG_MAX30208 is not set +# CONFIG_MAX31856 is not set +# CONFIG_MAX31865 is not set # end of Temperature sensors CONFIG_NTB=m @@ -6431,7 +6704,9 @@ CONFIG_FS_IOMAP=y CONFIG_BUFFER_HEAD=y CONFIG_LEGACY_DIRECT_IO=y # CONFIG_EXT2_FS is not set -# CONFIG_EXT3_FS is not set +CONFIG_EXT3_FS=m +# CONFIG_EXT3_FS_POSIX_ACL is not set +# CONFIG_EXT3_FS_SECURITY is not set CONFIG_EXT4_FS=y CONFIG_EXT4_USE_FOR_EXT2=y CONFIG_EXT4_FS_POSIX_ACL=y @@ -6527,7 +6802,10 @@ CONFIG_FAT_DEFAULT_IOCHARSET="ascii" # CONFIG_FAT_DEFAULT_UTF8 is not set # CONFIG_EXFAT_FS is not set # CONFIG_NTFS_FS is not set -# CONFIG_NTFS3_FS is not set +CONFIG_NTFS3_FS=m +# CONFIG_NTFS3_64BIT_CLUSTER is not set +# CONFIG_NTFS3_LZX_XPRESS is not set +# CONFIG_NTFS3_FS_POSIX_ACL is not set # end of DOS/FAT/EXFAT/NT Filesystems # @@ -6940,8 +7218,8 @@ CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_SM4=y -CONFIG_CRYPTO_SM4_GENERIC=y +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_SM4_GENERIC=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_TWOFISH_COMMON=m @@ -7067,8 +7345,8 @@ CONFIG_CRYPTO_DES3_EDE_X86_64=m CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m CONFIG_CRYPTO_SERPENT_AVX_X86_64=m CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m -CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64=y -CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64=y +CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64=m +CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64=m CONFIG_CRYPTO_TWOFISH_X86_64=m CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m -- Gitee From 41a9151eded27cf08aa9e68e979d1eb48542b490 Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Thu, 21 Mar 2024 19:26:14 +0800 Subject: [PATCH 0377/2138] anolis: configs: adjust L1 level kconfigs ANBZ: #8598 Adjust L1 kconfigs to further improve the compatibility of ANCK. Signed-off-by: Qiao Ma Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/2938 --- arch/arm64/configs/anolis-debug_defconfig | 77 ++++++++++++++++------- arch/arm64/configs/anolis_defconfig | 77 ++++++++++++++++------- arch/x86/configs/anolis-debug_defconfig | 57 ++++++++++++----- arch/x86/configs/anolis_defconfig | 57 ++++++++++++----- 4 files changed, 198 insertions(+), 70 deletions(-) diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index af35e1ac7f6a..b2a67eec8bf0 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -394,6 +394,7 @@ CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE=y CONFIG_ARM64_ERRATUM_2054223=y CONFIG_ARM64_ERRATUM_2067961=y CONFIG_ARM64_ERRATUM_2441009=y +CONFIG_ARM64_ERRATUM_2457168=y CONFIG_ARM64_ERRATUM_2645198=y CONFIG_ARM64_ERRATUM_2966298=y CONFIG_CAVIUM_ERRATUM_22375=y @@ -424,7 +425,7 @@ CONFIG_ARM64_PA_BITS=48 # CONFIG_CPU_BIG_ENDIAN is not set CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_SCHED_MC=y -# CONFIG_SCHED_CLUSTER is not set +CONFIG_SCHED_CLUSTER=y CONFIG_SCHED_SMT=y CONFIG_NR_CPUS=1024 CONFIG_HOTPLUG_CPU=y @@ -495,7 +496,7 @@ CONFIG_AS_HAS_LDAPR=y # # ARMv8.4 architectural features # -# CONFIG_ARM64_AMU_EXTN is not set +CONFIG_ARM64_AMU_EXTN=y CONFIG_AS_HAS_ARMV8_4=y CONFIG_ARM64_TLB_RANGE=y # end of ARMv8.4 architectural features @@ -615,7 +616,7 @@ CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y # # CONFIG_CPUFREQ_DT is not set # CONFIG_CPUFREQ_DT_PLATDEV is not set -CONFIG_ACPI_CPPC_CPUFREQ=y +CONFIG_ACPI_CPPC_CPUFREQ=m CONFIG_ACPI_CPPC_CPUFREQ_FIE=y CONFIG_ARM_SCPI_CPUFREQ=m # CONFIG_ARM_QCOM_CPUFREQ_HW is not set @@ -626,6 +627,7 @@ CONFIG_ARCH_SUPPORTS_ACPI=y CONFIG_ACPI=y CONFIG_ACPI_GENERIC_GSI=y CONFIG_ACPI_CCA_REQUIRED=y +CONFIG_ACPI_TABLE_LIB=y CONFIG_ACPI_DEBUGGER=y CONFIG_ACPI_DEBUGGER_USER=m CONFIG_ACPI_SPCR_TABLE=y @@ -861,6 +863,7 @@ CONFIG_BLOCK=y CONFIG_BLOCK_LEGACY_AUTOLOAD=y CONFIG_BLK_RQ_ALLOC_TIME=y CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_CGROUP_PUNT_BIO=y CONFIG_BLK_DEV_BSG_COMMON=y CONFIG_BLK_ICQ=y CONFIG_BLK_DEV_BSGLIB=y @@ -1055,6 +1058,7 @@ CONFIG_ZONE_DMA=y CONFIG_ZONE_DMA32=y CONFIG_ZONE_DEVICE=y CONFIG_HMM_MIRROR=y +CONFIG_GET_FREE_REGION=y # CONFIG_DEVICE_PRIVATE is not set CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y CONFIG_ARCH_USES_PG_ARCH_X=y @@ -1846,6 +1850,7 @@ CONFIG_PCI_QUIRKS=y CONFIG_PCI_STUB=y # CONFIG_PCI_PF_STUB is not set CONFIG_PCI_ATS=y +CONFIG_PCI_DOE=y CONFIG_PCI_ECAM=y CONFIG_PCI_IOV=y CONFIG_PCI_PRI=y @@ -1915,7 +1920,17 @@ CONFIG_PCI_HISI=y # CONFIG_PCI_SW_SWITCHTEC is not set # end of PCI switch controller drivers -# CONFIG_CXL_BUS is not set +CONFIG_CXL_BUS=m +CONFIG_CXL_PCI=m +# CONFIG_CXL_MEM_RAW_COMMANDS is not set +CONFIG_CXL_ACPI=m +CONFIG_CXL_PMEM=m +CONFIG_CXL_MEM=m +CONFIG_CXL_PORT=m +CONFIG_CXL_SUSPEND=y +CONFIG_CXL_REGION=y +# CONFIG_CXL_REGION_INVALIDATION_TEST is not set +CONFIG_CXL_PMU=m CONFIG_PCCARD=y # CONFIG_PCMCIA is not set CONFIG_CARDBUS=y @@ -1947,11 +1962,13 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y # CONFIG_FW_LOADER=y CONFIG_FW_LOADER_DEBUG=y +CONFIG_FW_LOADER_PAGED_BUF=y +CONFIG_FW_LOADER_SYSFS=y CONFIG_EXTRA_FIRMWARE="" # CONFIG_FW_LOADER_USER_HELPER is not set # CONFIG_FW_LOADER_COMPRESS is not set CONFIG_FW_CACHE=y -# CONFIG_FW_UPLOAD is not set +CONFIG_FW_UPLOAD=y # end of Firmware loader CONFIG_ALLOW_DEV_COREDUMP=y @@ -2387,7 +2404,7 @@ CONFIG_MEGARAID_SAS=m CONFIG_SCSI_MPT3SAS=m CONFIG_SCSI_MPT2SAS_MAX_SGE=128 CONFIG_SCSI_MPT3SAS_MAX_SGE=128 -# CONFIG_SCSI_MPT2SAS is not set +CONFIG_SCSI_MPT2SAS=m # CONFIG_SCSI_MPI3MR is not set CONFIG_SCSI_SMARTPQI=m # CONFIG_SCSI_HPTIOP is not set @@ -2396,7 +2413,7 @@ CONFIG_SCSI_SMARTPQI=m # CONFIG_SCSI_MYRS is not set CONFIG_LIBFC=m CONFIG_LIBFCOE=m -# CONFIG_FCOE is not set +CONFIG_FCOE=m # CONFIG_SCSI_SNIC is not set # CONFIG_SCSI_DMX3191D is not set # CONFIG_SCSI_FDOMAIN_PCI is not set @@ -2749,7 +2766,7 @@ CONFIG_HINIC=m # CONFIG_NET_VENDOR_I825XX is not set CONFIG_NET_VENDOR_INTEL=y # CONFIG_E100 is not set -# CONFIG_E1000 is not set +CONFIG_E1000=m CONFIG_E1000E=m CONFIG_IGB=m CONFIG_IGB_HWMON=y @@ -3411,7 +3428,7 @@ CONFIG_I2C_DESIGNWARE_PLATFORM=m # CONFIG_I2C_EMEV2 is not set CONFIG_I2C_GPIO=m CONFIG_I2C_GPIO_FAULT_INJECTOR=y -# CONFIG_I2C_HISI is not set +CONFIG_I2C_HISI=m # CONFIG_I2C_NOMADIK is not set # CONFIG_I2C_OCORES is not set CONFIG_I2C_PCA_PLATFORM=m @@ -3612,7 +3629,7 @@ CONFIG_GPIO_DWAPB=m # CONFIG_GPIO_FTGPIO010 is not set CONFIG_GPIO_GENERIC_PLATFORM=m # CONFIG_GPIO_GRGPIO is not set -# CONFIG_GPIO_HISI is not set +CONFIG_GPIO_HISI=m # CONFIG_GPIO_HLWD is not set # CONFIG_GPIO_LOGICVC is not set # CONFIG_GPIO_MB86S7X is not set @@ -5345,8 +5362,16 @@ CONFIG_VFIO_PCI=m # # VFIO support for platform devices # -# CONFIG_VFIO_PLATFORM is not set +CONFIG_VFIO_PLATFORM_BASE=m +CONFIG_VFIO_PLATFORM=m # CONFIG_VFIO_AMBA is not set + +# +# VFIO platform reset drivers +# +# CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET is not set +# CONFIG_VFIO_PLATFORM_AMDXGBE_RESET is not set +# end of VFIO platform reset drivers # end of VFIO support for platform devices # CONFIG_VIRT_DRIVERS is not set @@ -5563,7 +5588,7 @@ CONFIG_ARM_SMMU_V3=y # # Hisilicon SoC drivers # -# CONFIG_KUNPENG_HCCS is not set +CONFIG_KUNPENG_HCCS=m # end of Hisilicon SoC drivers # @@ -5736,7 +5761,7 @@ CONFIG_ARM_SPE_PMU=m # CONFIG_MARVELL_CN10K_TAD_PMU is not set CONFIG_ALIBABA_UNCORE_DRW_PMU=m CONFIG_HISI_PMU=m -# CONFIG_HISI_PCIE_PMU is not set +CONFIG_HISI_PCIE_PMU=m # CONFIG_HNS3_PMU is not set # CONFIG_MARVELL_CN10K_DDR_PMU is not set # CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU is not set @@ -5766,6 +5791,7 @@ CONFIG_DAX=y CONFIG_DEV_DAX=m CONFIG_DEV_DAX_PMEM=m CONFIG_DEV_DAX_HMEM=m +CONFIG_DEV_DAX_CXL=m CONFIG_DEV_DAX_HMEM_DEVICES=y # CONFIG_DEV_DAX_KMEM is not set CONFIG_NVMEM=y @@ -5844,7 +5870,13 @@ CONFIG_XFS_WARN=y # CONFIG_XFS_DEBUG is not set # CONFIG_GFS2_FS is not set # CONFIG_OCFS2_FS is not set -# CONFIG_BTRFS_FS is not set +CONFIG_BTRFS_FS=m +# CONFIG_BTRFS_FS_POSIX_ACL is not set +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set # CONFIG_NILFS2_FS is not set # CONFIG_F2FS_FS is not set # CONFIG_ZONEFS_FS is not set @@ -6302,11 +6334,12 @@ CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_RSA=y CONFIG_CRYPTO_DH=m # CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set -# CONFIG_CRYPTO_ECDH is not set +CONFIG_CRYPTO_ECC=m +CONFIG_CRYPTO_ECDH=m # CONFIG_CRYPTO_ECDSA is not set # CONFIG_CRYPTO_ECRDSA is not set CONFIG_CRYPTO_SM2=y -# CONFIG_CRYPTO_CURVE25519 is not set +CONFIG_CRYPTO_CURVE25519=m # end of Public-key cryptography # @@ -6492,11 +6525,12 @@ CONFIG_CRYPTO_DEV_CHELSIO=m # CONFIG_CRYPTO_DEV_VIRTIO is not set # CONFIG_CRYPTO_DEV_SAFEXCEL is not set # CONFIG_CRYPTO_DEV_CCREE is not set -# CONFIG_CRYPTO_DEV_HISI_SEC is not set +CONFIG_CRYPTO_DEV_HISI_SEC=m # CONFIG_CRYPTO_DEV_HISI_SEC2 is not set +CONFIG_CRYPTO_DEV_HISI_QM=m # CONFIG_CRYPTO_DEV_HISI_ZIP is not set -# CONFIG_CRYPTO_DEV_HISI_HPRE is not set -# CONFIG_CRYPTO_DEV_HISI_TRNG is not set +CONFIG_CRYPTO_DEV_HISI_HPRE=m +CONFIG_CRYPTO_DEV_HISI_TRNG=m # CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set CONFIG_ASYMMETRIC_KEY_TYPE=y CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y @@ -6691,6 +6725,7 @@ CONFIG_FONT_SUPPORT=y # CONFIG_FONTS is not set CONFIG_FONT_8x8=y CONFIG_FONT_8x16=y +CONFIG_SG_SPLIT=y CONFIG_SG_POOL=y CONFIG_ARCH_HAS_PMEM_API=y CONFIG_MEMREGION=y @@ -6737,8 +6772,8 @@ CONFIG_DEBUG_MISC=y CONFIG_DEBUG_INFO=y CONFIG_AS_HAS_NON_CONST_LEB128=y # CONFIG_DEBUG_INFO_NONE is not set -# CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT is not set -CONFIG_DEBUG_INFO_DWARF4=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y +# CONFIG_DEBUG_INFO_DWARF4 is not set # CONFIG_DEBUG_INFO_DWARF5 is not set # CONFIG_DEBUG_INFO_REDUCED is not set CONFIG_DEBUG_INFO_COMPRESSED_NONE=y diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index 8e118362d2e0..092a98239318 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -392,6 +392,7 @@ CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE=y CONFIG_ARM64_ERRATUM_2054223=y CONFIG_ARM64_ERRATUM_2067961=y CONFIG_ARM64_ERRATUM_2441009=y +CONFIG_ARM64_ERRATUM_2457168=y CONFIG_ARM64_ERRATUM_2645198=y CONFIG_ARM64_ERRATUM_2966298=y CONFIG_CAVIUM_ERRATUM_22375=y @@ -422,7 +423,7 @@ CONFIG_ARM64_PA_BITS=48 # CONFIG_CPU_BIG_ENDIAN is not set CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_SCHED_MC=y -# CONFIG_SCHED_CLUSTER is not set +CONFIG_SCHED_CLUSTER=y CONFIG_SCHED_SMT=y CONFIG_NR_CPUS=1024 CONFIG_HOTPLUG_CPU=y @@ -493,7 +494,7 @@ CONFIG_AS_HAS_LDAPR=y # # ARMv8.4 architectural features # -# CONFIG_ARM64_AMU_EXTN is not set +CONFIG_ARM64_AMU_EXTN=y CONFIG_AS_HAS_ARMV8_4=y CONFIG_ARM64_TLB_RANGE=y # end of ARMv8.4 architectural features @@ -613,7 +614,7 @@ CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y # # CONFIG_CPUFREQ_DT is not set # CONFIG_CPUFREQ_DT_PLATDEV is not set -CONFIG_ACPI_CPPC_CPUFREQ=y +CONFIG_ACPI_CPPC_CPUFREQ=m CONFIG_ACPI_CPPC_CPUFREQ_FIE=y CONFIG_ARM_SCPI_CPUFREQ=m # CONFIG_ARM_QCOM_CPUFREQ_HW is not set @@ -624,6 +625,7 @@ CONFIG_ARCH_SUPPORTS_ACPI=y CONFIG_ACPI=y CONFIG_ACPI_GENERIC_GSI=y CONFIG_ACPI_CCA_REQUIRED=y +CONFIG_ACPI_TABLE_LIB=y # CONFIG_ACPI_DEBUGGER is not set CONFIG_ACPI_SPCR_TABLE=y # CONFIG_ACPI_FPDT is not set @@ -858,6 +860,7 @@ CONFIG_BLOCK=y CONFIG_BLOCK_LEGACY_AUTOLOAD=y CONFIG_BLK_RQ_ALLOC_TIME=y CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_CGROUP_PUNT_BIO=y CONFIG_BLK_DEV_BSG_COMMON=y CONFIG_BLK_ICQ=y CONFIG_BLK_DEV_BSGLIB=y @@ -1052,6 +1055,7 @@ CONFIG_ZONE_DMA=y CONFIG_ZONE_DMA32=y CONFIG_ZONE_DEVICE=y CONFIG_HMM_MIRROR=y +CONFIG_GET_FREE_REGION=y # CONFIG_DEVICE_PRIVATE is not set CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y CONFIG_ARCH_USES_PG_ARCH_X=y @@ -1843,6 +1847,7 @@ CONFIG_PCI_QUIRKS=y CONFIG_PCI_STUB=y # CONFIG_PCI_PF_STUB is not set CONFIG_PCI_ATS=y +CONFIG_PCI_DOE=y CONFIG_PCI_ECAM=y CONFIG_PCI_IOV=y CONFIG_PCI_PRI=y @@ -1912,7 +1917,17 @@ CONFIG_PCI_HISI=y # CONFIG_PCI_SW_SWITCHTEC is not set # end of PCI switch controller drivers -# CONFIG_CXL_BUS is not set +CONFIG_CXL_BUS=m +CONFIG_CXL_PCI=m +# CONFIG_CXL_MEM_RAW_COMMANDS is not set +CONFIG_CXL_ACPI=m +CONFIG_CXL_PMEM=m +CONFIG_CXL_MEM=m +CONFIG_CXL_PORT=m +CONFIG_CXL_SUSPEND=y +CONFIG_CXL_REGION=y +# CONFIG_CXL_REGION_INVALIDATION_TEST is not set +CONFIG_CXL_PMU=m CONFIG_PCCARD=y # CONFIG_PCMCIA is not set CONFIG_CARDBUS=y @@ -1944,11 +1959,13 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y # CONFIG_FW_LOADER=y CONFIG_FW_LOADER_DEBUG=y +CONFIG_FW_LOADER_PAGED_BUF=y +CONFIG_FW_LOADER_SYSFS=y CONFIG_EXTRA_FIRMWARE="" # CONFIG_FW_LOADER_USER_HELPER is not set # CONFIG_FW_LOADER_COMPRESS is not set CONFIG_FW_CACHE=y -# CONFIG_FW_UPLOAD is not set +CONFIG_FW_UPLOAD=y # end of Firmware loader CONFIG_ALLOW_DEV_COREDUMP=y @@ -2384,7 +2401,7 @@ CONFIG_MEGARAID_SAS=m CONFIG_SCSI_MPT3SAS=m CONFIG_SCSI_MPT2SAS_MAX_SGE=128 CONFIG_SCSI_MPT3SAS_MAX_SGE=128 -# CONFIG_SCSI_MPT2SAS is not set +CONFIG_SCSI_MPT2SAS=m # CONFIG_SCSI_MPI3MR is not set CONFIG_SCSI_SMARTPQI=m # CONFIG_SCSI_HPTIOP is not set @@ -2393,7 +2410,7 @@ CONFIG_SCSI_SMARTPQI=m # CONFIG_SCSI_MYRS is not set CONFIG_LIBFC=m CONFIG_LIBFCOE=m -# CONFIG_FCOE is not set +CONFIG_FCOE=m # CONFIG_SCSI_SNIC is not set # CONFIG_SCSI_DMX3191D is not set # CONFIG_SCSI_FDOMAIN_PCI is not set @@ -2746,7 +2763,7 @@ CONFIG_HINIC=m # CONFIG_NET_VENDOR_I825XX is not set CONFIG_NET_VENDOR_INTEL=y # CONFIG_E100 is not set -# CONFIG_E1000 is not set +CONFIG_E1000=m CONFIG_E1000E=m CONFIG_IGB=m CONFIG_IGB_HWMON=y @@ -3408,7 +3425,7 @@ CONFIG_I2C_DESIGNWARE_PLATFORM=m # CONFIG_I2C_EMEV2 is not set CONFIG_I2C_GPIO=m # CONFIG_I2C_GPIO_FAULT_INJECTOR is not set -# CONFIG_I2C_HISI is not set +CONFIG_I2C_HISI=m # CONFIG_I2C_NOMADIK is not set # CONFIG_I2C_OCORES is not set CONFIG_I2C_PCA_PLATFORM=m @@ -3609,7 +3626,7 @@ CONFIG_GPIO_DWAPB=m # CONFIG_GPIO_FTGPIO010 is not set CONFIG_GPIO_GENERIC_PLATFORM=m # CONFIG_GPIO_GRGPIO is not set -# CONFIG_GPIO_HISI is not set +CONFIG_GPIO_HISI=m # CONFIG_GPIO_HLWD is not set # CONFIG_GPIO_LOGICVC is not set # CONFIG_GPIO_MB86S7X is not set @@ -5341,8 +5358,16 @@ CONFIG_VFIO_PCI=m # # VFIO support for platform devices # -# CONFIG_VFIO_PLATFORM is not set +CONFIG_VFIO_PLATFORM_BASE=m +CONFIG_VFIO_PLATFORM=m # CONFIG_VFIO_AMBA is not set + +# +# VFIO platform reset drivers +# +# CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET is not set +# CONFIG_VFIO_PLATFORM_AMDXGBE_RESET is not set +# end of VFIO platform reset drivers # end of VFIO support for platform devices # CONFIG_VIRT_DRIVERS is not set @@ -5559,7 +5584,7 @@ CONFIG_ARM_SMMU_V3=y # # Hisilicon SoC drivers # -# CONFIG_KUNPENG_HCCS is not set +CONFIG_KUNPENG_HCCS=m # end of Hisilicon SoC drivers # @@ -5732,7 +5757,7 @@ CONFIG_ARM_SPE_PMU=m # CONFIG_MARVELL_CN10K_TAD_PMU is not set CONFIG_ALIBABA_UNCORE_DRW_PMU=m CONFIG_HISI_PMU=m -# CONFIG_HISI_PCIE_PMU is not set +CONFIG_HISI_PCIE_PMU=m # CONFIG_HNS3_PMU is not set # CONFIG_MARVELL_CN10K_DDR_PMU is not set # CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU is not set @@ -5762,6 +5787,7 @@ CONFIG_DAX=y CONFIG_DEV_DAX=m CONFIG_DEV_DAX_PMEM=m CONFIG_DEV_DAX_HMEM=m +CONFIG_DEV_DAX_CXL=m CONFIG_DEV_DAX_HMEM_DEVICES=y # CONFIG_DEV_DAX_KMEM is not set CONFIG_NVMEM=y @@ -5840,7 +5866,13 @@ CONFIG_XFS_POSIX_ACL=y # CONFIG_XFS_DEBUG is not set # CONFIG_GFS2_FS is not set # CONFIG_OCFS2_FS is not set -# CONFIG_BTRFS_FS is not set +CONFIG_BTRFS_FS=m +# CONFIG_BTRFS_FS_POSIX_ACL is not set +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set # CONFIG_NILFS2_FS is not set # CONFIG_F2FS_FS is not set # CONFIG_ZONEFS_FS is not set @@ -6298,11 +6330,12 @@ CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_RSA=y CONFIG_CRYPTO_DH=m # CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set -# CONFIG_CRYPTO_ECDH is not set +CONFIG_CRYPTO_ECC=m +CONFIG_CRYPTO_ECDH=m # CONFIG_CRYPTO_ECDSA is not set # CONFIG_CRYPTO_ECRDSA is not set CONFIG_CRYPTO_SM2=y -# CONFIG_CRYPTO_CURVE25519 is not set +CONFIG_CRYPTO_CURVE25519=m # end of Public-key cryptography # @@ -6488,11 +6521,12 @@ CONFIG_CRYPTO_DEV_CHELSIO=m # CONFIG_CRYPTO_DEV_VIRTIO is not set # CONFIG_CRYPTO_DEV_SAFEXCEL is not set # CONFIG_CRYPTO_DEV_CCREE is not set -# CONFIG_CRYPTO_DEV_HISI_SEC is not set +CONFIG_CRYPTO_DEV_HISI_SEC=m # CONFIG_CRYPTO_DEV_HISI_SEC2 is not set +CONFIG_CRYPTO_DEV_HISI_QM=m # CONFIG_CRYPTO_DEV_HISI_ZIP is not set -# CONFIG_CRYPTO_DEV_HISI_HPRE is not set -# CONFIG_CRYPTO_DEV_HISI_TRNG is not set +CONFIG_CRYPTO_DEV_HISI_HPRE=m +CONFIG_CRYPTO_DEV_HISI_TRNG=m # CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set CONFIG_ASYMMETRIC_KEY_TYPE=y CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y @@ -6686,6 +6720,7 @@ CONFIG_FONT_SUPPORT=y # CONFIG_FONTS is not set CONFIG_FONT_8x8=y CONFIG_FONT_8x16=y +CONFIG_SG_SPLIT=y CONFIG_SG_POOL=y CONFIG_ARCH_HAS_PMEM_API=y CONFIG_MEMREGION=y @@ -6731,8 +6766,8 @@ CONFIG_DEBUG_MISC=y CONFIG_DEBUG_INFO=y CONFIG_AS_HAS_NON_CONST_LEB128=y # CONFIG_DEBUG_INFO_NONE is not set -# CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT is not set -CONFIG_DEBUG_INFO_DWARF4=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y +# CONFIG_DEBUG_INFO_DWARF4 is not set # CONFIG_DEBUG_INFO_DWARF5 is not set # CONFIG_DEBUG_INFO_REDUCED is not set CONFIG_DEBUG_INFO_COMPRESSED_NONE=y diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index 2e13de357040..36a089094635 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -40,13 +40,13 @@ CONFIG_HAVE_KERNEL_XZ=y CONFIG_HAVE_KERNEL_LZO=y CONFIG_HAVE_KERNEL_LZ4=y CONFIG_HAVE_KERNEL_ZSTD=y -CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_GZIP is not set # CONFIG_KERNEL_BZIP2 is not set # CONFIG_KERNEL_LZMA is not set # CONFIG_KERNEL_XZ is not set # CONFIG_KERNEL_LZO is not set # CONFIG_KERNEL_LZ4 is not set -# CONFIG_KERNEL_ZSTD is not set +CONFIG_KERNEL_ZSTD=y CONFIG_DEFAULT_INIT="" CONFIG_DEFAULT_HOSTNAME="(none)" CONFIG_SYSVIPC=y @@ -367,7 +367,7 @@ CONFIG_SCHED_OMIT_FRAME_POINTER=y CONFIG_HYPERVISOR_GUEST=y CONFIG_PARAVIRT=y # CONFIG_PARAVIRT_DEBUG is not set -# CONFIG_PARAVIRT_SPINLOCKS is not set +CONFIG_PARAVIRT_SPINLOCKS=y CONFIG_X86_HV_CALLBACK_VECTOR=y CONFIG_XEN=y # CONFIG_XEN_PV is not set @@ -414,7 +414,7 @@ CONFIG_NR_CPUS_RANGE_BEGIN=2 CONFIG_NR_CPUS_RANGE_END=8192 CONFIG_NR_CPUS_DEFAULT=64 CONFIG_NR_CPUS=1024 -CONFIG_SCHED_CLUSTER=y +# CONFIG_SCHED_CLUSTER is not set CONFIG_SCHED_SMT=y CONFIG_SCHED_MC=y CONFIG_SCHED_MC_PRIO=y @@ -578,6 +578,7 @@ CONFIG_ACPI=y CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y +CONFIG_ACPI_TABLE_LIB=y CONFIG_ACPI_DEBUGGER=y CONFIG_ACPI_DEBUGGER_USER=m CONFIG_ACPI_SPCR_TABLE=y @@ -824,7 +825,7 @@ CONFIG_SECCOMP_FILTER=y CONFIG_HAVE_ARCH_STACKLEAK=y CONFIG_HAVE_STACKPROTECTOR=y CONFIG_STACKPROTECTOR=y -# CONFIG_STACKPROTECTOR_STRONG is not set +CONFIG_STACKPROTECTOR_STRONG=y CONFIG_ARCH_SUPPORTS_LTO_CLANG=y CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y CONFIG_LTO_NONE=y @@ -942,6 +943,7 @@ CONFIG_BLOCK=y CONFIG_BLOCK_LEGACY_AUTOLOAD=y CONFIG_BLK_RQ_ALLOC_TIME=y CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_CGROUP_PUNT_BIO=y CONFIG_BLK_DEV_BSG_COMMON=y CONFIG_BLK_ICQ=y CONFIG_BLK_DEV_BSGLIB=y @@ -1984,6 +1986,7 @@ CONFIG_PCI_QUIRKS=y CONFIG_PCI_STUB=y CONFIG_PCI_PF_STUB=y CONFIG_PCI_ATS=y +CONFIG_PCI_DOE=y CONFIG_PCI_LOCKLESS_CONFIG=y CONFIG_PCI_IOV=y CONFIG_PCI_PRI=y @@ -2035,7 +2038,17 @@ CONFIG_PCI_HYPERV_INTERFACE=m # CONFIG_PCI_SW_SWITCHTEC is not set # end of PCI switch controller drivers -# CONFIG_CXL_BUS is not set +CONFIG_CXL_BUS=m +CONFIG_CXL_PCI=m +# CONFIG_CXL_MEM_RAW_COMMANDS is not set +CONFIG_CXL_ACPI=m +CONFIG_CXL_PMEM=m +CONFIG_CXL_MEM=m +CONFIG_CXL_PORT=m +CONFIG_CXL_SUSPEND=y +CONFIG_CXL_REGION=y +# CONFIG_CXL_REGION_INVALIDATION_TEST is not set +CONFIG_CXL_PMU=m CONFIG_PCCARD=y # CONFIG_PCMCIA is not set CONFIG_CARDBUS=y @@ -2074,7 +2087,7 @@ CONFIG_FW_LOADER_USER_HELPER=y # CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set # CONFIG_FW_LOADER_COMPRESS is not set CONFIG_FW_CACHE=y -# CONFIG_FW_UPLOAD is not set +CONFIG_FW_UPLOAD=y # end of Firmware loader CONFIG_WANT_DEV_COREDUMP=y @@ -2502,7 +2515,7 @@ CONFIG_VMWARE_PVSCSI=m CONFIG_HYPERV_STORAGE=m CONFIG_LIBFC=m CONFIG_LIBFCOE=m -# CONFIG_FCOE is not set +CONFIG_FCOE=m CONFIG_FCOE_FNIC=m # CONFIG_SCSI_SNIC is not set # CONFIG_SCSI_DMX3191D is not set @@ -5855,10 +5868,14 @@ CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y CONFIG_INTEL_IFS=m # CONFIG_INTEL_SAR_INT1092 is not set CONFIG_INTEL_PMC_CORE=m +CONFIG_INTEL_PMT_CLASS=m +CONFIG_INTEL_PMT_TELEMETRY=m +CONFIG_INTEL_PMT_CRASHLOG=m # # Intel Speed Select Technology interface support # +CONFIG_INTEL_SPEED_SELECT_TPMI=m CONFIG_INTEL_SPEED_SELECT_INTERFACE=m # end of Intel Speed Select Technology interface support @@ -5879,9 +5896,11 @@ CONFIG_INTEL_OAKTRAIL=m # CONFIG_INTEL_ISHTP_ECLITE is not set # CONFIG_INTEL_PUNIT_IPC is not set CONFIG_INTEL_RST=m +# CONFIG_INTEL_SDSI is not set # CONFIG_INTEL_SMARTCONNECT is not set +CONFIG_INTEL_TPMI=m CONFIG_INTEL_TURBO_MAX_3=y -# CONFIG_INTEL_VSEC is not set +CONFIG_INTEL_VSEC=y # CONFIG_MSI_EC is not set CONFIG_MSI_LAPTOP=m CONFIG_MSI_WMI=m @@ -6603,7 +6622,9 @@ CONFIG_PWM_LPSS_PLATFORM=m # end of IRQ chip support # CONFIG_IPACK_BUS is not set -# CONFIG_RESET_CONTROLLER is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_TI_SYSCON is not set +# CONFIG_RESET_TI_TPS380X is not set # # PHY Subsystem @@ -6627,6 +6648,7 @@ CONFIG_PWM_LPSS_PLATFORM=m CONFIG_POWERCAP=y CONFIG_INTEL_RAPL_CORE=m CONFIG_INTEL_RAPL=m +# CONFIG_INTEL_RAPL_TPMI is not set CONFIG_IDLE_INJECT=y # CONFIG_MCB is not set @@ -6660,6 +6682,7 @@ CONFIG_DAX=y CONFIG_DEV_DAX=y CONFIG_DEV_DAX_PMEM=y CONFIG_DEV_DAX_HMEM=y +CONFIG_DEV_DAX_CXL=m CONFIG_DEV_DAX_HMEM_DEVICES=y CONFIG_DEV_DAX_KMEM=y CONFIG_NVMEM=y @@ -6740,7 +6763,13 @@ CONFIG_XFS_WARN=y CONFIG_GFS2_FS=m CONFIG_GFS2_FS_LOCKING_DLM=y # CONFIG_OCFS2_FS is not set -# CONFIG_BTRFS_FS is not set +CONFIG_BTRFS_FS=m +# CONFIG_BTRFS_FS_POSIX_ACL is not set +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set # CONFIG_NILFS2_FS is not set # CONFIG_F2FS_FS is not set # CONFIG_ZONEFS_FS is not set @@ -7374,7 +7403,7 @@ CONFIG_CRYPTO_POLY1305_X86_64=m CONFIG_CRYPTO_SHA1_SSSE3=y CONFIG_CRYPTO_SHA256_SSSE3=y CONFIG_CRYPTO_SHA512_SSSE3=y -CONFIG_CRYPTO_SM3_AVX_X86_64=y +CONFIG_CRYPTO_SM3_AVX_X86_64=m CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m CONFIG_CRYPTO_CRC32C_INTEL=m CONFIG_CRYPTO_CRC32_PCLMUL=m @@ -7633,8 +7662,8 @@ CONFIG_DEBUG_MISC=y CONFIG_DEBUG_INFO=y CONFIG_AS_HAS_NON_CONST_LEB128=y # CONFIG_DEBUG_INFO_NONE is not set -# CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT is not set -CONFIG_DEBUG_INFO_DWARF4=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y +# CONFIG_DEBUG_INFO_DWARF4 is not set # CONFIG_DEBUG_INFO_DWARF5 is not set # CONFIG_DEBUG_INFO_REDUCED is not set CONFIG_DEBUG_INFO_COMPRESSED_NONE=y diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 981a32ab9cf6..145ac54de178 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -39,13 +39,13 @@ CONFIG_HAVE_KERNEL_XZ=y CONFIG_HAVE_KERNEL_LZO=y CONFIG_HAVE_KERNEL_LZ4=y CONFIG_HAVE_KERNEL_ZSTD=y -CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_GZIP is not set # CONFIG_KERNEL_BZIP2 is not set # CONFIG_KERNEL_LZMA is not set # CONFIG_KERNEL_XZ is not set # CONFIG_KERNEL_LZO is not set # CONFIG_KERNEL_LZ4 is not set -# CONFIG_KERNEL_ZSTD is not set +CONFIG_KERNEL_ZSTD=y CONFIG_DEFAULT_INIT="" CONFIG_DEFAULT_HOSTNAME="(none)" CONFIG_SYSVIPC=y @@ -364,7 +364,7 @@ CONFIG_SCHED_OMIT_FRAME_POINTER=y CONFIG_HYPERVISOR_GUEST=y CONFIG_PARAVIRT=y # CONFIG_PARAVIRT_DEBUG is not set -# CONFIG_PARAVIRT_SPINLOCKS is not set +CONFIG_PARAVIRT_SPINLOCKS=y CONFIG_X86_HV_CALLBACK_VECTOR=y CONFIG_XEN=y # CONFIG_XEN_PV is not set @@ -411,7 +411,7 @@ CONFIG_NR_CPUS_RANGE_BEGIN=2 CONFIG_NR_CPUS_RANGE_END=8192 CONFIG_NR_CPUS_DEFAULT=64 CONFIG_NR_CPUS=1024 -CONFIG_SCHED_CLUSTER=y +# CONFIG_SCHED_CLUSTER is not set CONFIG_SCHED_SMT=y CONFIG_SCHED_MC=y CONFIG_SCHED_MC_PRIO=y @@ -574,6 +574,7 @@ CONFIG_ACPI=y CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y +CONFIG_ACPI_TABLE_LIB=y # CONFIG_ACPI_DEBUGGER is not set CONFIG_ACPI_SPCR_TABLE=y # CONFIG_ACPI_FPDT is not set @@ -819,7 +820,7 @@ CONFIG_SECCOMP_FILTER=y CONFIG_HAVE_ARCH_STACKLEAK=y CONFIG_HAVE_STACKPROTECTOR=y CONFIG_STACKPROTECTOR=y -# CONFIG_STACKPROTECTOR_STRONG is not set +CONFIG_STACKPROTECTOR_STRONG=y CONFIG_ARCH_SUPPORTS_LTO_CLANG=y CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y CONFIG_LTO_NONE=y @@ -937,6 +938,7 @@ CONFIG_BLOCK=y CONFIG_BLOCK_LEGACY_AUTOLOAD=y CONFIG_BLK_RQ_ALLOC_TIME=y CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_CGROUP_PUNT_BIO=y CONFIG_BLK_DEV_BSG_COMMON=y CONFIG_BLK_ICQ=y CONFIG_BLK_DEV_BSGLIB=y @@ -1979,6 +1981,7 @@ CONFIG_PCI_QUIRKS=y CONFIG_PCI_STUB=y CONFIG_PCI_PF_STUB=y CONFIG_PCI_ATS=y +CONFIG_PCI_DOE=y CONFIG_PCI_LOCKLESS_CONFIG=y CONFIG_PCI_IOV=y CONFIG_PCI_PRI=y @@ -2030,7 +2033,17 @@ CONFIG_PCI_HYPERV_INTERFACE=m # CONFIG_PCI_SW_SWITCHTEC is not set # end of PCI switch controller drivers -# CONFIG_CXL_BUS is not set +CONFIG_CXL_BUS=m +CONFIG_CXL_PCI=m +# CONFIG_CXL_MEM_RAW_COMMANDS is not set +CONFIG_CXL_ACPI=m +CONFIG_CXL_PMEM=m +CONFIG_CXL_MEM=m +CONFIG_CXL_PORT=m +CONFIG_CXL_SUSPEND=y +CONFIG_CXL_REGION=y +# CONFIG_CXL_REGION_INVALIDATION_TEST is not set +CONFIG_CXL_PMU=m CONFIG_PCCARD=y # CONFIG_PCMCIA is not set CONFIG_CARDBUS=y @@ -2069,7 +2082,7 @@ CONFIG_FW_LOADER_USER_HELPER=y # CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set # CONFIG_FW_LOADER_COMPRESS is not set CONFIG_FW_CACHE=y -# CONFIG_FW_UPLOAD is not set +CONFIG_FW_UPLOAD=y # end of Firmware loader CONFIG_WANT_DEV_COREDUMP=y @@ -2497,7 +2510,7 @@ CONFIG_VMWARE_PVSCSI=m CONFIG_HYPERV_STORAGE=m CONFIG_LIBFC=m CONFIG_LIBFCOE=m -# CONFIG_FCOE is not set +CONFIG_FCOE=m CONFIG_FCOE_FNIC=m # CONFIG_SCSI_SNIC is not set # CONFIG_SCSI_DMX3191D is not set @@ -5848,10 +5861,14 @@ CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y CONFIG_INTEL_IFS=m # CONFIG_INTEL_SAR_INT1092 is not set CONFIG_INTEL_PMC_CORE=m +CONFIG_INTEL_PMT_CLASS=m +CONFIG_INTEL_PMT_TELEMETRY=m +CONFIG_INTEL_PMT_CRASHLOG=m # # Intel Speed Select Technology interface support # +CONFIG_INTEL_SPEED_SELECT_TPMI=m CONFIG_INTEL_SPEED_SELECT_INTERFACE=m # end of Intel Speed Select Technology interface support @@ -5872,9 +5889,11 @@ CONFIG_INTEL_OAKTRAIL=m # CONFIG_INTEL_ISHTP_ECLITE is not set # CONFIG_INTEL_PUNIT_IPC is not set CONFIG_INTEL_RST=m +# CONFIG_INTEL_SDSI is not set # CONFIG_INTEL_SMARTCONNECT is not set +CONFIG_INTEL_TPMI=m CONFIG_INTEL_TURBO_MAX_3=y -# CONFIG_INTEL_VSEC is not set +CONFIG_INTEL_VSEC=y # CONFIG_MSI_EC is not set CONFIG_MSI_LAPTOP=m CONFIG_MSI_WMI=m @@ -6592,7 +6611,9 @@ CONFIG_PWM_LPSS_PLATFORM=m # end of IRQ chip support # CONFIG_IPACK_BUS is not set -# CONFIG_RESET_CONTROLLER is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_TI_SYSCON is not set +# CONFIG_RESET_TI_TPS380X is not set # # PHY Subsystem @@ -6616,6 +6637,7 @@ CONFIG_PWM_LPSS_PLATFORM=m CONFIG_POWERCAP=y CONFIG_INTEL_RAPL_CORE=m CONFIG_INTEL_RAPL=m +# CONFIG_INTEL_RAPL_TPMI is not set CONFIG_IDLE_INJECT=y # CONFIG_MCB is not set @@ -6649,6 +6671,7 @@ CONFIG_DAX=y CONFIG_DEV_DAX=y CONFIG_DEV_DAX_PMEM=y CONFIG_DEV_DAX_HMEM=y +CONFIG_DEV_DAX_CXL=m CONFIG_DEV_DAX_HMEM_DEVICES=y CONFIG_DEV_DAX_KMEM=y CONFIG_NVMEM=y @@ -6729,7 +6752,13 @@ CONFIG_XFS_POSIX_ACL=y CONFIG_GFS2_FS=m CONFIG_GFS2_FS_LOCKING_DLM=y # CONFIG_OCFS2_FS is not set -# CONFIG_BTRFS_FS is not set +CONFIG_BTRFS_FS=m +# CONFIG_BTRFS_FS_POSIX_ACL is not set +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set # CONFIG_NILFS2_FS is not set # CONFIG_F2FS_FS is not set # CONFIG_ZONEFS_FS is not set @@ -7363,7 +7392,7 @@ CONFIG_CRYPTO_POLY1305_X86_64=m CONFIG_CRYPTO_SHA1_SSSE3=y CONFIG_CRYPTO_SHA256_SSSE3=y CONFIG_CRYPTO_SHA512_SSSE3=y -CONFIG_CRYPTO_SM3_AVX_X86_64=y +CONFIG_CRYPTO_SM3_AVX_X86_64=m CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m CONFIG_CRYPTO_CRC32C_INTEL=m CONFIG_CRYPTO_CRC32_PCLMUL=m @@ -7620,8 +7649,8 @@ CONFIG_DEBUG_MISC=y CONFIG_DEBUG_INFO=y CONFIG_AS_HAS_NON_CONST_LEB128=y # CONFIG_DEBUG_INFO_NONE is not set -# CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT is not set -CONFIG_DEBUG_INFO_DWARF4=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y +# CONFIG_DEBUG_INFO_DWARF4 is not set # CONFIG_DEBUG_INFO_DWARF5 is not set # CONFIG_DEBUG_INFO_REDUCED is not set CONFIG_DEBUG_INFO_COMPRESSED_NONE=y -- Gitee From 1fe6009b2091126bcc3c946eeafd9742f375530c Mon Sep 17 00:00:00 2001 From: MinLi Date: Tue, 12 Mar 2024 20:07:33 +0800 Subject: [PATCH 0378/2138] anolis: loongarch: anck 6.6 support loongarch ANBZ: #7099 ANCK-6.6 supports loongarch arch firstly, commit spec and corresponding scripts and config Signed-off-by: Gu Mi Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/2935 --- anolis/genrpmtree.sh | 5 +- anolis/rpm/kernel.spec.template | 37 +- arch/loongarch/configs/anolis-debug_defconfig | 2203 +++++++++++++++++ arch/loongarch/configs/anolis_defconfig | 2203 +++++++++++++++++ 4 files changed, 4438 insertions(+), 10 deletions(-) create mode 100644 arch/loongarch/configs/anolis-debug_defconfig create mode 100644 arch/loongarch/configs/anolis_defconfig diff --git a/anolis/genrpmtree.sh b/anolis/genrpmtree.sh index dd19f111f649..7f1f4036f797 100644 --- a/anolis/genrpmtree.sh +++ b/anolis/genrpmtree.sh @@ -28,7 +28,10 @@ function do_prep() { cp ${DIST_SRCROOT}/arch/x86/configs/anolis-debug_defconfig ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-x86_64-debug.config cp ${DIST_SRCROOT}/arch/arm64/configs/anolis_defconfig ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-aarch64.config cp ${DIST_SRCROOT}/arch/arm64/configs/anolis-debug_defconfig ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-aarch64-debug.config - + cp ${DIST_SRCROOT}/arch/loongarch/configs/anolis_defconfig \ + ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-loongarch64.config + cp ${DIST_SRCROOT}/arch/loongarch/configs/anolis-debug_defconfig \ + ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-loongarch64-debug.config } do_prep \ No newline at end of file diff --git a/anolis/rpm/kernel.spec.template b/anolis/rpm/kernel.spec.template index ff6c6285960b..109e09ae53fe 100644 --- a/anolis/rpm/kernel.spec.template +++ b/anolis/rpm/kernel.spec.template @@ -58,6 +58,12 @@ # should we do C=1 builds with sparse %define with_sparse %{?_with_sparse: 1} %{?!_with_sparse: 0} +#For loongarch, disable kernel-debug and with_bpftool for unsupport. +%ifarch loongarch64 +%define with_debug 0 +%define with_bpftool 0 +%endif + %define with_gcov %{?_with_gcov: 1} %{?!_with_gcov: 0} # turn off debug kernel for gcov builds @@ -133,6 +139,14 @@ %define kernel_image arch/arm64/boot/Image.gz %endif +%ifarch loongarch64 +%define all_arch_configs %{name}-%{version}-loongarch64*.config +%define asmarch loongarch +%define make_target vmlinux +%define hdrarch loongarch +%define kernel_image vmlinux +%endif + # To temporarily exclude an architecture from being built, add it to # %%nobuildarches. Do _NOT_ use the ExclusiveArch: line, because if we # don't build kernel-headers then the new build system will no longer let @@ -173,7 +187,7 @@ Release: %{pkg_release} Summary: The Linux kernel, based on version %{version}, heavily modified with backports # DO NOT CHANGE THE 'ExclusiveArch' LINE TO TEMPORARILY EXCLUDE AN ARCHITECTURE BUILD. # SET %%nobuildarches (ABOVE) INSTEAD -ExclusiveArch: noarch i686 x86_64 aarch64 +ExclusiveArch: noarch i686 x86_64 aarch64 loongarch64 ExclusiveOS: Linux @@ -252,7 +266,8 @@ Source21: kernel-%{version}-aarch64-debug.config Source39: kernel-%{version}-x86_64.config Source40: kernel-%{version}-x86_64-debug.config Source43: generate_bls_conf.sh - +Source45: kernel-%{version}-loongarch64.config +Source46: kernel-%{version}-loongarch64-debug.config # Sources for kernel-tools @@ -973,13 +988,17 @@ BuildKernel() { # Run depmod on the resulting module tree and make sure it isn't broken depmod -b . -aeF ./System.map $KernelVer &> depmod.out - if [ -s depmod.out ]; then - echo "Depmod failure" - cat depmod.out - exit 1 - else - rm depmod.out - fi + %ifnarch loongarch64 + if [ -s depmod.out ]; then + echo "Depmod failure" + cat depmod.out + exit 1 + else + rm depmod.out + fi + %else + rm -rf depmod.out + %endif else # Ensure important files/directories exist to let the packaging succeed mkdir -p lib/modules/$KernelVer/kernel diff --git a/arch/loongarch/configs/anolis-debug_defconfig b/arch/loongarch/configs/anolis-debug_defconfig new file mode 100644 index 000000000000..db41cbf5efd4 --- /dev/null +++ b/arch/loongarch/configs/anolis-debug_defconfig @@ -0,0 +1,2203 @@ +# +## Automatically generated file; DO NOT EDIT. +## Linux/loongarch 6.6.7 Kernel Configuration +## +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BPF_SYSCALL=y +# CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set +CONFIG_PREEMPT_VOLUNTARY=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_NUMA_BALANCING=y +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +CONFIG_NAMESPACES=y +CONFIG_USER_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +CONFIG_KALLSYMS_ALL=y +CONFIG_PROFILING=y +CONFIG_KEXEC=y +CONFIG_CRASH_DUMP=y +CONFIG_NR_CPUS=256 +CONFIG_NUMA=y +CONFIG_CPU_HAS_LSX=y +CONFIG_CPU_HAS_LASX=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_LOONGSON3_ACPI_CPUFREQ=y +CONFIG_HIBERNATION=y +CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_TAD=y +CONFIG_ACPI_DOCK=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m +CONFIG_JUMP_LABEL=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG_SHA256=y +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +CONFIG_BLK_WBT=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_BSD_DISKLABEL=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BINFMT_MISC=m +CONFIG_ZSWAP=y +CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y +CONFIG_Z3FOLD=y +CONFIG_ZSMALLOC_STAT=y +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set +# CONFIG_COMPAT_BRK is not set +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_KSM=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_CMA=y +CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_USERFAULTFD=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_TLS_TOE=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_RARP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_NET_IPVTI=m +CONFIG_NET_FOU_IP_TUNNELS=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_ESPINTCP=y +CONFIG_INET_IPCOMP=m +CONFIG_INET_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +CONFIG_INET_DIAG_DESTROY=y +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_CUBIC=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m +CONFIG_TCP_CONG_BBR=m +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=m +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_ESPINTCP=y +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_IPV6_RPL_LWTUNNEL=y +CONFIG_NETLABEL=y +CONFIG_MPTCP=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_BRIDGE_NETFILTER=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_SYNPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XTABLES=y +CONFIG_NETFILTER_XT_SET=m +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_DEBUG=y +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_PE_SIP=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_MATCH_SRH=m +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_META=m +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_CONNTRACK_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_BPFILTER=y +CONFIG_IP_DCCP=m +CONFIG_IP_DCCP_CCID2_DEBUG=y +CONFIG_IP_DCCP_CCID3_DEBUG=y +CONFIG_IP_DCCP_DEBUG=y +CONFIG_SCTP_DBG_OBJCNT=y +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_RDS=m +CONFIG_RDS_RDMA=m +CONFIG_RDS_TCP=m +CONFIG_RDS_DEBUG=y +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +CONFIG_ATM_CLIP_NO_ICMP=y +CONFIG_ATM_LANE=m +CONFIG_ATM_MPOA=m +CONFIG_ATM_BR2684=m +CONFIG_ATM_BR2684_IPFILTER=y +CONFIG_L2TP=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_BRIDGE_MRP=y +CONFIG_NET_DSA=m +CONFIG_NET_DSA_TAG_AR9331=m +CONFIG_NET_DSA_TAG_BRCM=m +CONFIG_NET_DSA_TAG_BRCM_PREPEND=m +CONFIG_NET_DSA_TAG_GSWIP=m +CONFIG_NET_DSA_TAG_DSA=m +CONFIG_NET_DSA_TAG_EDSA=m +CONFIG_NET_DSA_TAG_MTK=m +CONFIG_NET_DSA_TAG_KSZ=m +CONFIG_NET_DSA_TAG_OCELOT=m +CONFIG_NET_DSA_TAG_QCA=m +CONFIG_NET_DSA_TAG_RTL4_A=m +CONFIG_NET_DSA_TAG_LAN9303=m +CONFIG_NET_DSA_TAG_SJA1105=m +CONFIG_NET_DSA_TAG_TRAILER=m +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_LLC2=m +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_X25=m +CONFIG_LAPB=m +CONFIG_PHONET=m +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_NHC is not set +CONFIG_IEEE802154=m +CONFIG_IEEE802154_NL802154_EXPERIMENTAL=y +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_TAPRIO=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=y +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_FQ_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_ETS=m +CONFIG_NET_SCH_DEFAULT=y +CONFIG_DEFAULT_FQ_CODEL=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +CONFIG_NET_EMATCH_IPT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_MPLS=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_CONNMARK=m +CONFIG_NET_ACT_CTINFO=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m +CONFIG_NET_ACT_GATE=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_NET_TC_SKB_EXT=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=y +CONFIG_BATMAN_ADV=m +CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_DEBUG=y +CONFIG_OPENVSWITCH=m +CONFIG_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=y +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=y +CONFIG_HSR=m +CONFIG_QRTR=m +CONFIG_QRTR_TUN=m +CONFIG_NET_NCSI=y +CONFIG_NCSI_OEM_CMD_GET_MAC=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_PKTGEN=m +CONFIG_CAN=m +CONFIG_BT=m +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_CMTP=m +CONFIG_BT_HIDP=m +CONFIG_BT_HS=y +CONFIG_BT_HCIBTUSB=m +CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y +# CONFIG_BT_HCIBTUSB_BCM is not set +CONFIG_BT_HCIBTSDIO=m +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_BCSP=y +CONFIG_BT_HCIUART_ATH3K=y +CONFIG_BT_HCIBCM203X=m +CONFIG_BT_HCIBPA10X=m +CONFIG_BT_HCIBFUSB=m +CONFIG_BT_HCIVHCI=m +CONFIG_BT_MRVL=m +CONFIG_BT_MRVL_SDIO=m +CONFIG_BT_ATH3K=m +CONFIG_CFG80211=m +CONFIG_CFG80211_WEXT=y +CONFIG_MAC80211=m +CONFIG_RFKILL=m +CONFIG_RFKILL_INPUT=y +CONFIG_NET_9P=y +CONFIG_NET_9P_VIRTIO=y +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIE_DPC=y +CONFIG_PCI_STUB=y +CONFIG_PCI_PF_STUB=m +CONFIG_PCI_IOV=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_SHPC=y +CONFIG_PCCARD=m +# CONFIG_PCMCIA is not set +CONFIG_YENTA=m +CONFIG_RAPIDIO=y +CONFIG_RAPIDIO_TSI721=y +CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS=y +CONFIG_RAPIDIO_ENUM_BASIC=m +CONFIG_RAPIDIO_CHMAN=m +CONFIG_RAPIDIO_MPORT_CDEV=m +CONFIG_UEVENT_HELPER=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_FW_LOADER_COMPRESS=y +CONFIG_CONNECTOR=y +CONFIG_DMI_SYSFS=y +CONFIG_ISCSI_IBFT=m +CONFIG_EFI_ZBOOT=y +CONFIG_EFI_CAPSULE_LOADER=m +CONFIG_EFI_TEST=m +CONFIG_MTD=m +CONFIG_MTD_BLOCK=m +CONFIG_MTD_CFI=m +CONFIG_MTD_JEDECPROBE=m +CONFIG_MTD_CFI_INTELEXT=m +CONFIG_MTD_CFI_AMDSTD=m +CONFIG_MTD_CFI_STAA=m +CONFIG_MTD_RAM=m +CONFIG_MTD_ROM=m +CONFIG_MTD_BLOCK2MTD=m +CONFIG_MTD_SPI_NOR=m +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_GLUEBI=m +CONFIG_MTD_UBI_BLOCK=y +CONFIG_PARPORT=m +CONFIG_PARPORT_PC=m +CONFIG_PARPORT_SERIAL=m +CONFIG_PARPORT_PC_FIFO=y +CONFIG_PARPORT_1284=y +# CONFIG_PNP_DEBUG_MESSAGES is not set +CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_ZRAM=m +CONFIG_ZRAM_DEF_COMP_ZSTD=y +CONFIG_ZRAM_WRITEBACK=y +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 +CONFIG_BLK_DEV_DRBD=m +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_CDROM_PKTCDVD=m +CONFIG_VIRTIO_BLK=m +CONFIG_BLK_DEV_RBD=m +CONFIG_BLK_DEV_NVME=m +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TCP=m +CONFIG_NVME_TARGET=m +CONFIG_NVME_TARGET_PASSTHRU=y +CONFIG_NVME_TARGET_LOOP=m +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_FCLOOP=m +CONFIG_NVME_TARGET_TCP=m +CONFIG_ENCLOSURE_SERVICES=m +CONFIG_APDS9802ALS=m +CONFIG_ISL29003=m +CONFIG_ISL29020=m +CONFIG_SENSORS_TSL2550=m +CONFIG_SENSORS_BH1770=m +CONFIG_SENSORS_APDS990X=m +CONFIG_EEPROM_AT24=m +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_SENSORS_LIS3_I2C=m +CONFIG_MISC_RTSX_PCI=m +CONFIG_MISC_RTSX_USB=m +CONFIG_UACCE=m +CONFIG_PVPANIC=y +CONFIG_BLK_DEV_SD=m +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=m +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_ISCSI_TCP=m +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_SCSI_BNX2X_FCOE=m +CONFIG_BE2ISCSI=m +CONFIG_SCSI_HPSA=m +CONFIG_SCSI_AACRAID=m +CONFIG_SCSI_MVSAS=y +# CONFIG_SCSI_MVSAS_DEBUG is not set +CONFIG_SCSI_MVSAS_TASKLET=y +CONFIG_SCSI_MVUMI=y +CONFIG_MEGARAID_NEWGEN=y +CONFIG_MEGARAID_MM=y +CONFIG_MEGARAID_MAILBOX=y +CONFIG_MEGARAID_LEGACY=y +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=y +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_SMARTPQI=m +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +CONFIG_FCOE=m +CONFIG_SCSI_QLOGIC_1280=m +CONFIG_SCSI_QLA_FC=m +CONFIG_TCM_QLA2XXX=m +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_CHELSIO_FCOE=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +CONFIG_SATA_AHCI_PLATFORM=y +CONFIG_ATA_PIIX=m +CONFIG_PATA_ATIIXP=y +CONFIG_ATA_GENERIC=m +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_LINEAR=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +CONFIG_BLK_DEV_DM=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +CONFIG_ISCSI_TARGET_CXGB4=m +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +CONFIG_FUSION_SAS=m +CONFIG_FUSION_CTL=m +CONFIG_FUSION_LOGGING=y +CONFIG_FIREWIRE=m +CONFIG_FIREWIRE_OHCI=m +CONFIG_FIREWIRE_SBP2=m +CONFIG_FIREWIRE_NET=m +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_WIREGUARD=m +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_NTB_NETDEV=m +CONFIG_RIONET=m +CONFIG_TUN=m +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ATM_DRIVERS is not set +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_NET_VENDOR_AMAZON is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set +# CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_ATHEROS is not set +CONFIG_BNX2=y +CONFIG_TIGON3=m +CONFIG_BNX2X=m +CONFIG_BNXT=m +CONFIG_BNXT_DCB=y +# CONFIG_NET_VENDOR_CAVIUM is not set +CONFIG_CHELSIO_T1=m +CONFIG_CHELSIO_T1_1G=y +CONFIG_CHELSIO_T3=m +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_IPSEC_INLINE=m +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_CORTINA is not set +CONFIG_DNET=m +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBE_DCB=y +CONFIG_IXGBEVF=m +CONFIG_I40E=m +CONFIG_I40E_DCB=y +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_FM10K=m +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_MLX4_EN=m +# CONFIG_MLX4_CORE_GEN2 is not set +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_CORE_IPOIB=y +CONFIG_MLXSW_CORE=m +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +CONFIG_ETHOC=m +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +CONFIG_8139TOO_8129=y +CONFIG_R8169=m +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SOLARFLARE is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +CONFIG_STMMAC_ETH=y +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +CONFIG_NGBE=m +CONFIG_TXGBE=m +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_NET_VENDOR_XILINX is not set +CONFIG_LED_TRIGGER_PHY=y +CONFIG_SFP=y +CONFIG_AMD_PHY=m +CONFIG_AQUANTIA_PHY=m +CONFIG_BROADCOM_PHY=m +CONFIG_BCM7XXX_PHY=m +CONFIG_BCM87XX_PHY=m +CONFIG_CICADA_PHY=m +CONFIG_CORTINA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_LXT_PHY=m +CONFIG_INTEL_XWAY_PHY=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_MARVELL_10G_PHY=y +CONFIG_MICREL_PHY=m +CONFIG_MICROCHIP_T1_PHY=m +CONFIG_MICROSEMI_PHY=m +CONFIG_NATIONAL_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_RENESAS_PHY=m +CONFIG_ROCKCHIP_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_DP83822_PHY=m +CONFIG_DP83TC811_PHY=m +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +CONFIG_VITESSE_PHY=m +CONFIG_XILINX_GMII2RGMII=m +CONFIG_MICREL_KS8995MA=m +CONFIG_CAN_VCAN=m +CONFIG_CAN_SLCAN=m +CONFIG_CAN_C_CAN=m +CONFIG_CAN_C_CAN_PLATFORM=m +CONFIG_CAN_C_CAN_PCI=m +CONFIG_CAN_CC770=m +CONFIG_CAN_CC770_PLATFORM=m +CONFIG_CAN_SJA1000=m +CONFIG_CAN_EMS_PCI=m +CONFIG_CAN_KVASER_PCI=m +CONFIG_CAN_PEAK_PCI=m +CONFIG_CAN_PLX_PCI=m +CONFIG_CAN_SJA1000_PLATFORM=m +CONFIG_CAN_SOFTING=m +CONFIG_CAN_8DEV_USB=m +CONFIG_CAN_EMS_USB=m +CONFIG_CAN_KVASER_USB=m +CONFIG_CAN_PEAK_USB=m +CONFIG_MDIO_BITBANG=m +CONFIG_MDIO_MSCC_MIIM=m +CONFIG_MDIO_THUNDER=m +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +# CONFIG_USB_NET_AX8817X is not set +# CONFIG_USB_NET_AX88179_178A is not set +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +# CONFIG_USB_NET_NET1080 is not set +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y +# CONFIG_USB_BELKIN is not set +# CONFIG_USB_ARMLINUX is not set +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y +# CONFIG_USB_NET_ZAURUS is not set +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +# CONFIG_WLAN_VENDOR_ADMTEK is not set +CONFIG_ATH9K=m +CONFIG_ATH9K_AHB=y +CONFIG_ATH9K_WOW=y +CONFIG_ATH9K_HTC=m +CONFIG_ATH10K=m +CONFIG_ATH10K_PCI=m +# CONFIG_WLAN_VENDOR_ATMEL is not set +CONFIG_BRCMSMAC=m +CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_USB=y +CONFIG_BRCMFMAC_PCIE=y +# CONFIG_WLAN_VENDOR_CISCO is not set +CONFIG_IWLWIFI=m +CONFIG_IWLDVM=m +CONFIG_IWLMVM=m +# CONFIG_WLAN_VENDOR_INTERSIL is not set +CONFIG_MWIFIEX=m +CONFIG_MWIFIEX_SDIO=m +CONFIG_MWIFIEX_PCIE=m +CONFIG_MWIFIEX_USB=m +CONFIG_MT7601U=m +CONFIG_MT76x0U=m +CONFIG_MT76x2U=m +CONFIG_RT2X00=m +CONFIG_RT2800PCI=m +CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT3573=y +CONFIG_RT2800USB_RT53XX=y +CONFIG_RT2800USB_RT55XX=y +CONFIG_RT2800USB_UNKNOWN=y +CONFIG_RTL8192CE=m +CONFIG_RTL8192SE=m +CONFIG_RTL8192DE=m +CONFIG_RTL8723AE=m +CONFIG_RTL8723BE=m +CONFIG_RTL8188EE=m +CONFIG_RTL8192EE=m +CONFIG_RTL8821AE=m +CONFIG_RTL8192CU=m +# CONFIG_RTLWIFI_DEBUG is not set +CONFIG_RTL8XXXU=m +# CONFIG_WLAN_VENDOR_RSI is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set +CONFIG_ZD1211RW=m +CONFIG_USB_NET_RNDIS_WLAN=m +CONFIG_MAC80211_HWSIM=m +CONFIG_WAN=y +CONFIG_HDLC=m +CONFIG_HDLC_RAW=m +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m +CONFIG_IEEE802154_FAKELB=m +CONFIG_VMXNET3=m +CONFIG_FUJITSU_ES=m +CONFIG_USB4_NET=m +CONFIG_NETDEVSIM=m +CONFIG_ISDN=y +CONFIG_MISDN=m +CONFIG_MISDN_DSP=m +CONFIG_MISDN_L1OIP=m +CONFIG_MISDN_HFCPCI=m +CONFIG_MISDN_HFCMULTI=m +CONFIG_MISDN_HFCUSB=m +CONFIG_MISDN_AVMFRITZ=m +CONFIG_MISDN_SPEEDFAX=m +CONFIG_MISDN_INFINEON=m +CONFIG_MISDN_W6692=m +CONFIG_MISDN_NETJET=m +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_JOYDEV=m +CONFIG_INPUT_EVDEV=y +CONFIG_KEYBOARD_XTKBD=m +CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_SENTELIC=y +CONFIG_MOUSE_SERIAL=m +CONFIG_MOUSE_APPLETOUCH=m +CONFIG_MOUSE_BCM5974=m +CONFIG_MOUSE_CYAPA=m +CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_SMBUS=y +CONFIG_MOUSE_VSXXXAA=m +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=m +CONFIG_TABLET_USB_AIPTEK=m +CONFIG_TABLET_USB_KBTAB=m +CONFIG_TABLET_SERIAL_WACOM4=m +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_ELO=m +CONFIG_TOUCHSCREEN_WACOM_W8001=m +CONFIG_TOUCHSCREEN_WACOM_I2C=m +CONFIG_INPUT_MISC=y +CONFIG_INPUT_ATI_REMOTE2=m +CONFIG_INPUT_KEYSPAN_REMOTE=m +CONFIG_INPUT_POWERMATE=m +CONFIG_INPUT_YEALINK=m +CONFIG_INPUT_CM109=m +CONFIG_INPUT_UINPUT=m +CONFIG_INPUT_GPIO_ROTARY_ENCODER=m +CONFIG_RMI4_I2C=m +CONFIG_RMI4_SPI=m +CONFIG_RMI4_SMB=m +CONFIG_RMI4_F34=y +CONFIG_RMI4_F55=y +CONFIG_SERIO_SERPORT=m +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +CONFIG_SERIO_ARC_PS2=m +CONFIG_LEGACY_PTY_COUNT=16 +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_NR_UARTS=16 +CONFIG_SERIAL_8250_RUNTIME_UARTS=16 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_JSM=m +CONFIG_SERIAL_ARC=m +CONFIG_SERIAL_NONSTANDARD=y +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +CONFIG_NOZOMI=m +CONFIG_PRINTER=m +CONFIG_PPDEV=m +CONFIG_VIRTIO_CONSOLE=y +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_PANIC_EVENT=y +CONFIG_IPMI_PANIC_STRING=y +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_TCG_TIS_SPI=m +CONFIG_TCG_TIS_I2C_ATMEL=m +CONFIG_TCG_TIS_I2C_INFINEON=m +CONFIG_TCG_TIS_I2C_NUVOTON=m +CONFIG_TCG_ATMEL=m +CONFIG_TCG_INFINEON=m +CONFIG_TCG_TIS_ST33ZP24_I2C=m +CONFIG_TCG_TIS_ST33ZP24_SPI=m +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_AMD756=m +CONFIG_I2C_AMD8111=m +CONFIG_I2C_ISCH=m +CONFIG_I2C_PIIX4=y +CONFIG_I2C_NFORCE2=m +CONFIG_I2C_SIS96X=m +CONFIG_I2C_VIA=m +CONFIG_I2C_VIAPRO=m +CONFIG_I2C_SCMI=m +CONFIG_I2C_DESIGNWARE_PLATFORM=y +CONFIG_I2C_GPIO=y +CONFIG_I2C_LS2X=m +CONFIG_I2C_PCA_PLATFORM=m +CONFIG_I2C_SIMTEC=m +CONFIG_I2C_DIOLAN_U2C=m +CONFIG_I2C_PARPORT=m +CONFIG_I2C_TINY_USB=m +CONFIG_I2C_VIPERBOARD=m +CONFIG_I2C_STUB=m +CONFIG_SPI=y +CONFIG_SPI_LOONGSON_PCI=y +CONFIG_SPI_LOONGSON_PLATFORM=m +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_PARPORT=m +CONFIG_PPS_CLIENT_GPIO=m +CONFIG_DP83640_PHY=m +CONFIG_PINCTRL=y +CONFIG_PINCTRL_LOONGSON2=y +CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_AMDPT=m +CONFIG_GPIO_LOONGSON_64BIT=y +CONFIG_GPIO_VIPERBOARD=m +CONFIG_POWER_RESET=y +CONFIG_SENSORS_AD7414=m +CONFIG_SENSORS_AD7418=m +CONFIG_SENSORS_ADM1025=m +CONFIG_SENSORS_ADM1026=m +CONFIG_SENSORS_ADM1029=m +CONFIG_SENSORS_ADM1031=m +CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADT7410=m +CONFIG_SENSORS_ADT7411=m +CONFIG_SENSORS_ADT7462=m +CONFIG_SENSORS_ADT7470=m +CONFIG_SENSORS_ADT7475=m +CONFIG_SENSORS_ASC7621=m +CONFIG_SENSORS_ATXP1=m +CONFIG_SENSORS_DS620=m +CONFIG_SENSORS_DS1621=m +CONFIG_SENSORS_I5K_AMB=m +CONFIG_SENSORS_F71805F=m +CONFIG_SENSORS_F71882FG=m +CONFIG_SENSORS_F75375S=m +CONFIG_SENSORS_GL518SM=m +CONFIG_SENSORS_GL520SM=m +CONFIG_SENSORS_G760A=m +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +CONFIG_SENSORS_IT87=m +CONFIG_SENSORS_JC42=m +CONFIG_SENSORS_LINEAGE=m +CONFIG_SENSORS_LTC4151=m +CONFIG_SENSORS_LTC4215=m +CONFIG_SENSORS_LTC4245=m +CONFIG_SENSORS_LTC4261=m +CONFIG_SENSORS_MAX16065=m +CONFIG_SENSORS_MAX1619=m +CONFIG_SENSORS_MAX1668=m +CONFIG_SENSORS_MAX197=m +CONFIG_SENSORS_MAX6639=m +CONFIG_SENSORS_MAX6650=m +CONFIG_SENSORS_MAX6697=m +CONFIG_SENSORS_MCP3021=m +CONFIG_SENSORS_LM63=m +CONFIG_SENSORS_LM73=m +CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM77=m +CONFIG_SENSORS_LM78=m +CONFIG_SENSORS_LM80=m +CONFIG_SENSORS_LM83=m +CONFIG_SENSORS_LM85=m +CONFIG_SENSORS_LM87=m +CONFIG_SENSORS_LM90=m +CONFIG_SENSORS_LM92=m +CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_LM95234=m +CONFIG_SENSORS_LM95241=m +CONFIG_SENSORS_LM95245=m +CONFIG_SENSORS_PC87360=m +CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_NTC_THERMISTOR=m +CONFIG_SENSORS_NCT6775=m +CONFIG_SENSORS_PCF8591=m +CONFIG_PMBUS=m +CONFIG_SENSORS_ADM1275=m +CONFIG_SENSORS_LM25066=m +CONFIG_SENSORS_LTC2978=m +CONFIG_SENSORS_MAX16064=m +CONFIG_SENSORS_MAX34440=m +CONFIG_SENSORS_MAX8688=m +CONFIG_SENSORS_UCD9000=m +CONFIG_SENSORS_UCD9200=m +CONFIG_SENSORS_ZL6100=m +CONFIG_SENSORS_SHT15=m +CONFIG_SENSORS_SHT21=m +CONFIG_SENSORS_SIS5595=m +CONFIG_SENSORS_DME1737=m +CONFIG_SENSORS_EMC1403=m +CONFIG_SENSORS_EMC6W201=m +CONFIG_SENSORS_SMSC47M1=m +CONFIG_SENSORS_SMSC47M192=m +CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SCH5627=m +CONFIG_SENSORS_SCH5636=m +CONFIG_SENSORS_ADS7828=m +CONFIG_SENSORS_AMC6821=m +CONFIG_SENSORS_INA209=m +CONFIG_SENSORS_INA2XX=m +CONFIG_SENSORS_THMC50=m +CONFIG_SENSORS_TMP102=m +CONFIG_SENSORS_TMP401=m +CONFIG_SENSORS_TMP421=m +CONFIG_SENSORS_VIA686A=m +CONFIG_SENSORS_VT1211=m +CONFIG_SENSORS_VT8231=m +CONFIG_SENSORS_W83781D=m +CONFIG_SENSORS_W83791D=m +CONFIG_SENSORS_W83792D=m +CONFIG_SENSORS_W83793=m +CONFIG_SENSORS_W83795=m +CONFIG_SENSORS_W83L785TS=m +CONFIG_SENSORS_W83L786NG=m +CONFIG_SENSORS_W83627HF=m +CONFIG_SENSORS_W83627EHF=m +CONFIG_SENSORS_ACPI_POWER=m +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_EMULATION=y +CONFIG_LOONGSON2_THERMAL=m +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +CONFIG_WATCHDOG_SYSFS=y +CONFIG_SOFT_WATCHDOG=m +CONFIG_GPIO_WATCHDOG=m +CONFIG_WDAT_WDT=m +CONFIG_ALIM7101_WDT=m +CONFIG_I6300ESB_WDT=m +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m +CONFIG_USBPCWATCHDOG=m +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +CONFIG_MFD_VIPERBOARD=m +CONFIG_MFD_SM501=m +CONFIG_MFD_SM501_GPIO=y +CONFIG_MFD_VX855=m +CONFIG_RC_CORE=m +CONFIG_LIRC=y +CONFIG_RC_DECODERS=y +CONFIG_IR_IMON_DECODER=m +CONFIG_IR_JVC_DECODER=m +CONFIG_IR_MCE_KBD_DECODER=m +CONFIG_IR_NEC_DECODER=m +CONFIG_IR_RC5_DECODER=m +CONFIG_IR_RC6_DECODER=m +CONFIG_IR_SANYO_DECODER=m +CONFIG_IR_SHARP_DECODER=m +CONFIG_IR_SONY_DECODER=m +CONFIG_IR_XMP_DECODER=m +CONFIG_RC_DEVICES=y +CONFIG_IR_ENE=m +CONFIG_IR_FINTEK=m +CONFIG_IR_IGUANA=m +CONFIG_IR_IMON=m +CONFIG_IR_IMON_RAW=m +CONFIG_IR_ITE_CIR=m +CONFIG_IR_MCEUSB=m +CONFIG_IR_NUVOTON=m +CONFIG_IR_REDRAT3=m +CONFIG_IR_SERIAL=m +CONFIG_IR_SERIAL_TRANSMITTER=y +CONFIG_IR_STREAMZAP=m +CONFIG_IR_TTUSBIR=m +CONFIG_RC_ATI_REMOTE=m +CONFIG_USB_PULSE8_CEC=m +CONFIG_USB_RAINSHADOW_CEC=m +CONFIG_MEDIA_SUPPORT=m +CONFIG_DVB_MAX_ADAPTERS=8 +CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_USB_GSPCA=m +CONFIG_USB_GSPCA_BENQ=m +CONFIG_USB_GSPCA_CONEX=m +CONFIG_USB_GSPCA_CPIA1=m +CONFIG_USB_GSPCA_ETOMS=m +CONFIG_USB_GSPCA_FINEPIX=m +CONFIG_USB_GSPCA_JEILINJ=m +CONFIG_USB_GSPCA_JL2005BCD=m +CONFIG_USB_GSPCA_KONICA=m +CONFIG_USB_GSPCA_MARS=m +CONFIG_USB_GSPCA_MR97310A=m +CONFIG_USB_GSPCA_NW80X=m +CONFIG_USB_GSPCA_OV519=m +CONFIG_USB_GSPCA_OV534=m +CONFIG_USB_GSPCA_OV534_9=m +CONFIG_USB_GSPCA_PAC207=m +CONFIG_USB_GSPCA_PAC7302=m +CONFIG_USB_GSPCA_PAC7311=m +CONFIG_USB_GSPCA_SE401=m +CONFIG_USB_GSPCA_SN9C2028=m +CONFIG_USB_GSPCA_SN9C20X=m +CONFIG_USB_GSPCA_SONIXB=m +CONFIG_USB_GSPCA_SONIXJ=m +CONFIG_USB_GSPCA_SPCA1528=m +CONFIG_USB_GSPCA_SPCA500=m +CONFIG_USB_GSPCA_SPCA501=m +CONFIG_USB_GSPCA_SPCA505=m +CONFIG_USB_GSPCA_SPCA506=m +CONFIG_USB_GSPCA_SPCA508=m +CONFIG_USB_GSPCA_SPCA561=m +CONFIG_USB_GSPCA_SQ905=m +CONFIG_USB_GSPCA_SQ905C=m +CONFIG_USB_GSPCA_SQ930X=m +CONFIG_USB_GSPCA_STK014=m +CONFIG_USB_GSPCA_STV0680=m +CONFIG_USB_GSPCA_SUNPLUS=m +CONFIG_USB_GSPCA_T613=m +CONFIG_USB_GSPCA_TOPRO=m +CONFIG_USB_GSPCA_TV8532=m +CONFIG_USB_GSPCA_VC032X=m +CONFIG_USB_GSPCA_VICAM=m +CONFIG_USB_GSPCA_XIRLINK_CIT=m +CONFIG_USB_GSPCA_ZC3XX=m +CONFIG_USB_GL860=m +CONFIG_USB_M5602=m +CONFIG_USB_STV06XX=m +CONFIG_USB_PWC=m +CONFIG_USB_S2255=m +CONFIG_USB_VIDEO_CLASS=m +CONFIG_VIDEO_HDPVR=m +CONFIG_VIDEO_PVRUSB2=m +CONFIG_VIDEO_AU0828=m +CONFIG_DVB_B2C2_FLEXCOP_USB=m +CONFIG_DVB_USB_V2=m +CONFIG_DVB_USB_AF9035=m +CONFIG_DVB_USB_ANYSEE=m +CONFIG_DVB_USB_AU6610=m +CONFIG_DVB_USB_AZ6007=m +CONFIG_DVB_USB_CE6230=m +CONFIG_DVB_USB_EC168=m +CONFIG_DVB_USB_GL861=m +CONFIG_DVB_USB_LME2510=m +CONFIG_DVB_USB_MXL111SF=m +CONFIG_DVB_USB=m +CONFIG_DVB_USB_A800=m +CONFIG_DVB_USB_AF9005=m +CONFIG_DVB_USB_AF9005_REMOTE=m +CONFIG_DVB_USB_AZ6027=m +CONFIG_DVB_USB_CINERGY_T2=m +CONFIG_DVB_USB_CXUSB=m +CONFIG_DVB_USB_DIB0700=m +CONFIG_DVB_USB_DIBUSB_MB=m +CONFIG_DVB_USB_DIBUSB_MC=m +CONFIG_DVB_USB_DIGITV=m +CONFIG_DVB_USB_DTT200U=m +CONFIG_DVB_USB_DTV5100=m +CONFIG_DVB_USB_DW2102=m +CONFIG_DVB_USB_GP8PSK=m +CONFIG_DVB_USB_M920X=m +CONFIG_DVB_USB_NOVA_T_USB2=m +CONFIG_DVB_USB_OPERA1=m +CONFIG_DVB_USB_PCTV452E=m +CONFIG_DVB_USB_TECHNISAT_USB2=m +CONFIG_DVB_USB_TTUSB2=m +CONFIG_DVB_USB_UMT_010=m +CONFIG_DVB_USB_VP702X=m +CONFIG_DVB_USB_VP7045=m +CONFIG_SMS_USB_DRV=m +CONFIG_DVB_TTUSB_BUDGET=m +CONFIG_DVB_TTUSB_DEC=m +CONFIG_VIDEO_EM28XX=m +CONFIG_VIDEO_EM28XX_ALSA=m +CONFIG_VIDEO_EM28XX_DVB=m +CONFIG_MEDIA_PCI_SUPPORT=y +CONFIG_VIDEO_IVTV=m +CONFIG_VIDEO_FB_IVTV=m +CONFIG_VIDEO_BT848=m +CONFIG_DVB_BT8XX=m +CONFIG_VIDEO_CX18=m +CONFIG_VIDEO_CX23885=m +CONFIG_MEDIA_ALTERA_CI=m +CONFIG_VIDEO_CX88=m +CONFIG_VIDEO_CX88_ALSA=m +CONFIG_VIDEO_CX88_BLACKBIRD=m +CONFIG_VIDEO_CX88_DVB=m +# CONFIG_VIDEO_CX88_ENABLE_VP3054 is not set +CONFIG_VIDEO_SAA7134=m +CONFIG_VIDEO_SAA7134_ALSA=m +CONFIG_VIDEO_SAA7134_DVB=m +CONFIG_VIDEO_SAA7164=m +CONFIG_DVB_B2C2_FLEXCOP_PCI=m +CONFIG_DVB_DDBRIDGE=m +CONFIG_DVB_DM1105=m +CONFIG_MANTIS_CORE=m +CONFIG_DVB_MANTIS=m +CONFIG_DVB_HOPPER=m +CONFIG_DVB_NGENE=m +CONFIG_DVB_PLUTO2=m +CONFIG_DVB_PT1=m +CONFIG_DVB_BUDGET_CORE=m +CONFIG_DVB_BUDGET=m +CONFIG_DVB_BUDGET_CI=m +CONFIG_DVB_BUDGET_AV=m +CONFIG_SMS_SDIO_DRV=m +CONFIG_DVB_FIREDTV=m +CONFIG_DRM=y +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DP_AUX_CHARDEV=y +CONFIG_DRM_DP_CEC=y +# CONFIG_DRM_I2C_CH7006 is not set +# CONFIG_DRM_I2C_SIL164 is not set +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_USERPTR=y +CONFIG_DRM_AMDGPU=m +CONFIG_DRM_AMDGPU_SI=y +CONFIG_DRM_AMDGPU_CIK=y +CONFIG_DRM_AMDGPU_USERPTR=y +CONFIG_DRM_NOUVEAU=m +CONFIG_DRM_VKMS=m +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=y +CONFIG_DRM_MGAG200=m +CONFIG_DRM_QXL=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_LOONGSON=y +CONFIG_DRM_BOCHS=m +CONFIG_DRM_CIRRUS_QEMU=m +CONFIG_FB=y +CONFIG_FB_EFI=y +CONFIG_FB_RADEON=y +CONFIG_FB_LS2K500=m +CONFIG_FB_TILEBLITTING=y +CONFIG_LCD_CLASS_DEVICE=m +CONFIG_LCD_PLATFORM=m +CONFIG_BACKLIGHT_LP855X=m +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_OSSEMUL=y +CONFIG_SND_HRTIMER=m +# CONFIG_SND_SUPPORT_OLD_API is not set +CONFIG_SND_SEQUENCER=m +CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_SEQUENCER_OSS=m +CONFIG_SND_DUMMY=m +CONFIG_SND_ALOOP=m +CONFIG_SND_VIRMIDI=m +CONFIG_SND_MTPAV=m +CONFIG_SND_MPU401=m +CONFIG_SND_AC97_POWER_SAVE=y +CONFIG_SND_AC97_POWER_SAVE_DEFAULT=5 +CONFIG_SND_AD1889=m +CONFIG_SND_ATIIXP=m +CONFIG_SND_ATIIXP_MODEM=m +CONFIG_SND_AU8810=m +CONFIG_SND_AU8820=m +CONFIG_SND_AU8830=m +CONFIG_SND_BT87X=m +CONFIG_SND_BT87X_OVERCLOCK=y +CONFIG_SND_CA0106=m +CONFIG_SND_CMIPCI=m +CONFIG_SND_OXYGEN=m +CONFIG_SND_CS46XX=m +CONFIG_SND_CTXFI=m +CONFIG_SND_DARLA20=m +CONFIG_SND_GINA20=m +CONFIG_SND_LAYLA20=m +CONFIG_SND_DARLA24=m +CONFIG_SND_GINA24=m +CONFIG_SND_LAYLA24=m +CONFIG_SND_MONA=m +CONFIG_SND_MIA=m +CONFIG_SND_ECHO3G=m +CONFIG_SND_INDIGO=m +CONFIG_SND_INDIGOIO=m +CONFIG_SND_INDIGODJ=m +CONFIG_SND_INDIGOIOX=m +CONFIG_SND_INDIGODJX=m +CONFIG_SND_ENS1370=m +CONFIG_SND_ENS1371=m +CONFIG_SND_HDSP=m +CONFIG_SND_HDSPM=m +CONFIG_SND_ICE1724=m +CONFIG_SND_INTEL8X0=m +CONFIG_SND_INTEL8X0M=m +CONFIG_SND_KORG1212=m +CONFIG_SND_LOLA=m +CONFIG_SND_LX6464ES=m +CONFIG_SND_MIXART=m +CONFIG_SND_PCXHR=m +CONFIG_SND_RME32=m +CONFIG_SND_RME96=m +CONFIG_SND_RME9652=m +CONFIG_SND_VIA82XX=m +CONFIG_SND_VIA82XX_MODEM=m +CONFIG_SND_VIRTUOSO=m +CONFIG_SND_VX222=m +CONFIG_SND_HDA_INTEL=m +CONFIG_SND_HDA_HWDEP=y +CONFIG_SND_HDA_INPUT_BEEP=y +CONFIG_SND_HDA_INPUT_BEEP_MODE=0 +CONFIG_SND_HDA_PATCH_LOADER=y +CONFIG_SND_HDA_CODEC_REALTEK=m +CONFIG_SND_HDA_CODEC_ANALOG=m +CONFIG_SND_HDA_CODEC_SIGMATEL=m +CONFIG_SND_HDA_CODEC_VIA=m +CONFIG_SND_HDA_CODEC_HDMI=m +CONFIG_SND_HDA_CODEC_CIRRUS=m +CONFIG_SND_HDA_CODEC_CONEXANT=m +CONFIG_SND_HDA_CODEC_CA0110=m +CONFIG_SND_HDA_CODEC_CA0132=m +CONFIG_SND_HDA_CODEC_CMEDIA=m +CONFIG_SND_HDA_CODEC_SI3054=m +CONFIG_SND_HDA_PREALLOC_SIZE=512 +# CONFIG_SND_SPI is not set +CONFIG_SND_USB_AUDIO=m +CONFIG_SND_USB_UA101=m +CONFIG_SND_USB_CAIAQ=m +CONFIG_SND_USB_CAIAQ_INPUT=y +CONFIG_SND_USB_6FIRE=m +CONFIG_SND_USB_HIFACE=m +CONFIG_SND_BCD2000=m +CONFIG_SND_USB_POD=m +CONFIG_SND_USB_PODHD=m +CONFIG_SND_USB_TONEPORT=m +CONFIG_SND_USB_VARIAX=m +CONFIG_SND_DICE=m +CONFIG_SND_OXFW=m +CONFIG_SND_ISIGHT=m +CONFIG_SND_FIREWORKS=m +CONFIG_SND_BEBOB=m +CONFIG_SND_FIREWIRE_DIGI00X=m +CONFIG_SND_FIREWIRE_TASCAM=m +CONFIG_SND_FIREWIRE_MOTU=m +CONFIG_SND_FIREFACE=m +CONFIG_SND_SOC=m +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_A4TECH=m +CONFIG_HID_ACRUX=m +CONFIG_HID_APPLE=m +CONFIG_HID_APPLEIR=m +CONFIG_HID_ASUS=m +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=m +CONFIG_HID_BETOP_FF=m +CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CORSAIR=m +CONFIG_HID_PRODIKEYS=m +CONFIG_HID_CMEDIA=m +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +CONFIG_HID_ELAN=m +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +CONFIG_HID_EZKEY=m +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +CONFIG_HID_HOLTEK=m +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=m +CONFIG_HID_JABRA=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LCPOWER=m +CONFIG_HID_LENOVO=m +CONFIG_HID_LOGITECH=m +CONFIG_HID_LOGITECH_DJ=m +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGIG940_FF=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m +CONFIG_HID_MULTITOUCH=m +CONFIG_HID_NTI=m +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PLANTRONICS=m +CONFIG_HID_PRIMAX=m +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +CONFIG_HID_SMARTJOYPLUS=m +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=y +CONFIG_HID_SENSOR_CUSTOM_SENSOR=m +CONFIG_HID_ALPS=m +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +CONFIG_I2C_HID=m +CONFIG_USB_LED_TRIG=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_MON=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_DBGCAP=y +CONFIG_USB_XHCI_PLATFORM=m +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_PRINTER=m +CONFIG_USB_TMC=m +CONFIG_USB_STORAGE=m +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +CONFIG_USB_DWC2=y +CONFIG_USB_DWC2_HOST=y +CONFIG_USB_SERIAL=m +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +CONFIG_USB_SERIAL_F8153X=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7715_PARPORT=y +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MXUPORT=m +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +CONFIG_USB_SERIAL_UPD78F0730=m +CONFIG_USB_SERIAL_DEBUG=m +CONFIG_USB_USS720=m +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +CONFIG_USB_IDMOUSE=m +CONFIG_USB_APPLEDISPLAY=m +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_LD=m +CONFIG_USB_IOWARRIOR=m +CONFIG_USB_ISIGHTFW=m +CONFIG_USB_HSIC_USB3503=m +CONFIG_USB_ATM=m +CONFIG_USB_SPEEDTOUCH=m +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m +CONFIG_USB_GADGET=y +CONFIG_TYPEC=m +CONFIG_TYPEC_TCPM=m +CONFIG_TYPEC_TCPCI=m +CONFIG_TYPEC_RT1711H=m +CONFIG_TYPEC_FUSB302=m +CONFIG_TYPEC_UCSI=m +CONFIG_UCSI_ACPI=m +CONFIG_TYPEC_TPS6598X=m +CONFIG_TYPEC_MUX_PI3USB30532=m +CONFIG_TYPEC_DP_ALTMODE=m +CONFIG_MMC=m +CONFIG_SDIO_UART=m +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +CONFIG_MMC_TIFM_SD=m +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +CONFIG_MMC_REALTEK_PCI=m +CONFIG_MMC_REALTEK_USB=m +CONFIG_MMC_SDHCI_XENON=m +CONFIG_MEMSTICK=m +CONFIG_MSPRO_BLOCK=m +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_MEMSTICK_REALTEK_PCI=m +CONFIG_MEMSTICK_REALTEK_USB=m +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_LM3530=m +CONFIG_LEDS_LP3944=m +CONFIG_LEDS_BLINKM=m +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +CONFIG_LEDS_TRIGGER_DISK=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +CONFIG_LEDS_TRIGGER_AUDIO=y +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_INFINIBAND_CXGB4=m +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +CONFIG_INFINIBAND_VMWARE_PVRDMA=m +CONFIG_RDMA_RXE=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +CONFIG_RTC_CLASS=y +# CONFIG_RTC_SYSTOHC is not set +CONFIG_RTC_DRV_DS1307=m +CONFIG_RTC_DRV_DS1374=m +CONFIG_RTC_DRV_DS1672=m +CONFIG_RTC_DRV_MAX6900=m +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +CONFIG_RTC_DRV_FM3130=m +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +CONFIG_RTC_DRV_RV8803=m +CONFIG_RTC_DRV_RX4581=m +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_RV3029C2=m +# CONFIG_RTC_DRV_RV3029_HWMON is not set +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_EFI=m +CONFIG_RTC_DRV_STK17TA8=m +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_RP5C01=m +CONFIG_RTC_DRV_LOONGSON=y +CONFIG_DMADEVICES=y +CONFIG_DW_DMAC=m +CONFIG_ASYNC_TX_DMA=y +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=m +CONFIG_UIO_DMEM_GENIRQ=m +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +CONFIG_VFIO=m +CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_PCI=m +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=m +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_COMEDI=m +CONFIG_COMEDI_PCI_DRIVERS=m +CONFIG_COMEDI_8255_PCI=m +CONFIG_COMEDI_ADL_PCI6208=m +CONFIG_COMEDI_ADL_PCI7X3X=m +CONFIG_COMEDI_ADL_PCI8164=m +CONFIG_COMEDI_ADL_PCI9111=m +CONFIG_COMEDI_ADL_PCI9118=m +CONFIG_COMEDI_ADV_PCI1710=m +CONFIG_COMEDI_ADV_PCI1720=m +CONFIG_COMEDI_ADV_PCI1723=m +CONFIG_COMEDI_ADV_PCI1724=m +CONFIG_COMEDI_ADV_PCI1760=m +CONFIG_COMEDI_ADV_PCI_DIO=m +CONFIG_COMEDI_NI_LABPC_PCI=m +CONFIG_COMEDI_NI_PCIDIO=m +CONFIG_COMEDI_NI_PCIMIO=m +CONFIG_STAGING=y +CONFIG_COMMON_CLK_LOONGSON2=y +CONFIG_LOONGSON2_GUTS=y +CONFIG_LOONGSON2_PM=y +CONFIG_PM_DEVFREQ=y +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y +CONFIG_DEVFREQ_GOV_PERFORMANCE=y +CONFIG_DEVFREQ_GOV_POWERSAVE=y +CONFIG_DEVFREQ_GOV_USERSPACE=y +CONFIG_IIO=m +CONFIG_HID_SENSOR_ACCEL_3D=m +CONFIG_HID_SENSOR_GYRO_3D=m +CONFIG_HID_SENSOR_HUMIDITY=m +CONFIG_HID_SENSOR_ALS=m +CONFIG_HID_SENSOR_PROX=m +CONFIG_HID_SENSOR_MAGNETOMETER_3D=m +CONFIG_HID_SENSOR_INCLINOMETER_3D=m +CONFIG_HID_SENSOR_DEVICE_ROTATION=m +CONFIG_HID_SENSOR_PRESS=m +CONFIG_HID_SENSOR_TEMP=m +CONFIG_NTB=m +CONFIG_NTB_PINGPONG=m +CONFIG_NTB_TOOL=m +CONFIG_NTB_PERF=m +CONFIG_NTB_TRANSPORT=m +CONFIG_PWM=y +CONFIG_POWERCAP=y +CONFIG_USB4=m +CONFIG_DAX=y +CONFIG_DEV_DAX=m +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +CONFIG_JFS_SECURITY=y +CONFIG_XFS_FS=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_GFS2_FS=m +CONFIG_GFS2_FS_LOCKING_DLM=y +CONFIG_OCFS2_FS=m +CONFIG_BTRFS_FS=y +CONFIG_BTRFS_FS_POSIX_ACL=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QFMT_V1=m +CONFIG_QFMT_V2=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_VIRTIO_FS=m +CONFIG_OVERLAY_FS=y +# CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW is not set +CONFIG_OVERLAY_FS_INDEX=y +CONFIG_OVERLAY_FS_XINO_AUTO=y +CONFIG_OVERLAY_FS_METACOPY=y +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +CONFIG_CACHEFILES=m +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=936 +CONFIG_FAT_DEFAULT_IOCHARSET="gb2312" +CONFIG_EXFAT_FS=m +CONFIG_NTFS_FS=m +CONFIG_NTFS3_FS=m +CONFIG_NTFS3_64BIT_CLUSTER=y +CONFIG_NTFS3_LZX_XPRESS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE_DEVICE_DUMP=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +CONFIG_ORANGEFS_FS=m +CONFIG_ECRYPT_FS=m +CONFIG_ECRYPT_FS_MESSAGING=y +CONFIG_HFS_FS=m +CONFIG_HFSPLUS_FS=m +CONFIG_UBIFS_FS=m +CONFIG_UBIFS_FS_ADVANCED_COMPR=y +CONFIG_CRAMFS=m +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +CONFIG_MINIX_FS=m +CONFIG_ROMFS_FS=m +CONFIG_PSTORE=m +CONFIG_SYSV_FS=m +CONFIG_UFS_FS=m +CONFIG_EROFS_FS=m +CONFIG_EROFS_FS_ZIP_LZMA=y +CONFIG_EROFS_FS_PCPU_KTHREAD=y +CONFIG_NFS_FS=y +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +# CONFIG_NFS_DISABLE_UDP_SUPPORT is not set +CONFIG_NFSD=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_BLOCKLAYOUT=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_FLEXFILELAYOUT=y +CONFIG_NFSD_V4_2_INTER_SSC=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_SUNRPC_DEBUG=y +CONFIG_CEPH_FS=m +CONFIG_CEPH_FSCACHE=y +CONFIG_CEPH_FS_POSIX_ACL=y +CONFIG_CEPH_FS_SECURITY_LABEL=y +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +# CONFIG_CIFS_DEBUG is not set +CONFIG_CIFS_DFS_UPCALL=y +CONFIG_9P_FS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=y +CONFIG_DLM=m +CONFIG_DLM_DEBUG=y +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_TRUSTED_KEYS=y +CONFIG_KEY_DH_OPERATIONS=y +CONFIG_SECURITY=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HARDENED_USERCOPY=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_APPARMOR=y +CONFIG_SECURITY_YAMA=y +CONFIG_SECURITY_LOCKDOWN_LSM=y +CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_PLATFORM_KEYRING=y +CONFIG_IMA=y +CONFIG_IMA_DEFAULT_HASH_SHA256=y +CONFIG_IMA_READ_POLICY=y +CONFIG_IMA_APPRAISE=y +CONFIG_IMA_LOAD_X509=y +CONFIG_EVM=y +CONFIG_EVM_LOAD_X509=y +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_LSM="landlock,lockdown,yama,loadpin,safesetid,integrity,bpf" +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_SM2=y +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_842=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_CRC32_LOONGARCH=m +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m +CONFIG_CRYPTO_DEV_CHELSIO=m +CONFIG_CRYPTO_DEV_VIRTIO=m +CONFIG_SIGNED_PE_FILE_VERIFICATION=y +CONFIG_SECONDARY_TRUSTED_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_REVOCATION_LIST=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=y +CONFIG_CRC7=m +CONFIG_DMA_CMA=y +CONFIG_PRINTK_TIME=y +CONFIG_PRINTK_CALLER=y +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_FRAME_WARN=4096 +CONFIG_STRIP_ASM_SYMS=y +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_SHIRQ=y +CONFIG_PANIC_ON_OOPS=y +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHEDSTATS=y +CONFIG_DEBUG_LIST=y +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +# CONFIG_STRICT_DEVMEM is not set +# CONFIG_RUNTIME_TESTING_MENU is not set diff --git a/arch/loongarch/configs/anolis_defconfig b/arch/loongarch/configs/anolis_defconfig new file mode 100644 index 000000000000..db41cbf5efd4 --- /dev/null +++ b/arch/loongarch/configs/anolis_defconfig @@ -0,0 +1,2203 @@ +# +## Automatically generated file; DO NOT EDIT. +## Linux/loongarch 6.6.7 Kernel Configuration +## +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BPF_SYSCALL=y +# CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set +CONFIG_PREEMPT_VOLUNTARY=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_NUMA_BALANCING=y +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +CONFIG_NAMESPACES=y +CONFIG_USER_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +CONFIG_KALLSYMS_ALL=y +CONFIG_PROFILING=y +CONFIG_KEXEC=y +CONFIG_CRASH_DUMP=y +CONFIG_NR_CPUS=256 +CONFIG_NUMA=y +CONFIG_CPU_HAS_LSX=y +CONFIG_CPU_HAS_LASX=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_LOONGSON3_ACPI_CPUFREQ=y +CONFIG_HIBERNATION=y +CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_TAD=y +CONFIG_ACPI_DOCK=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m +CONFIG_JUMP_LABEL=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG_SHA256=y +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +CONFIG_BLK_WBT=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_BSD_DISKLABEL=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BINFMT_MISC=m +CONFIG_ZSWAP=y +CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y +CONFIG_Z3FOLD=y +CONFIG_ZSMALLOC_STAT=y +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set +# CONFIG_COMPAT_BRK is not set +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_KSM=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_CMA=y +CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_USERFAULTFD=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_TLS_TOE=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_RARP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_NET_IPVTI=m +CONFIG_NET_FOU_IP_TUNNELS=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_ESPINTCP=y +CONFIG_INET_IPCOMP=m +CONFIG_INET_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +CONFIG_INET_DIAG_DESTROY=y +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_CUBIC=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m +CONFIG_TCP_CONG_BBR=m +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=m +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_ESPINTCP=y +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_IPV6_RPL_LWTUNNEL=y +CONFIG_NETLABEL=y +CONFIG_MPTCP=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_BRIDGE_NETFILTER=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_SYNPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XTABLES=y +CONFIG_NETFILTER_XT_SET=m +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_DEBUG=y +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_PE_SIP=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_MATCH_SRH=m +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_META=m +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_CONNTRACK_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_BPFILTER=y +CONFIG_IP_DCCP=m +CONFIG_IP_DCCP_CCID2_DEBUG=y +CONFIG_IP_DCCP_CCID3_DEBUG=y +CONFIG_IP_DCCP_DEBUG=y +CONFIG_SCTP_DBG_OBJCNT=y +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_RDS=m +CONFIG_RDS_RDMA=m +CONFIG_RDS_TCP=m +CONFIG_RDS_DEBUG=y +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +CONFIG_ATM_CLIP_NO_ICMP=y +CONFIG_ATM_LANE=m +CONFIG_ATM_MPOA=m +CONFIG_ATM_BR2684=m +CONFIG_ATM_BR2684_IPFILTER=y +CONFIG_L2TP=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_BRIDGE_MRP=y +CONFIG_NET_DSA=m +CONFIG_NET_DSA_TAG_AR9331=m +CONFIG_NET_DSA_TAG_BRCM=m +CONFIG_NET_DSA_TAG_BRCM_PREPEND=m +CONFIG_NET_DSA_TAG_GSWIP=m +CONFIG_NET_DSA_TAG_DSA=m +CONFIG_NET_DSA_TAG_EDSA=m +CONFIG_NET_DSA_TAG_MTK=m +CONFIG_NET_DSA_TAG_KSZ=m +CONFIG_NET_DSA_TAG_OCELOT=m +CONFIG_NET_DSA_TAG_QCA=m +CONFIG_NET_DSA_TAG_RTL4_A=m +CONFIG_NET_DSA_TAG_LAN9303=m +CONFIG_NET_DSA_TAG_SJA1105=m +CONFIG_NET_DSA_TAG_TRAILER=m +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_LLC2=m +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_X25=m +CONFIG_LAPB=m +CONFIG_PHONET=m +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_NHC is not set +CONFIG_IEEE802154=m +CONFIG_IEEE802154_NL802154_EXPERIMENTAL=y +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_TAPRIO=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=y +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_FQ_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_ETS=m +CONFIG_NET_SCH_DEFAULT=y +CONFIG_DEFAULT_FQ_CODEL=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +CONFIG_NET_EMATCH_IPT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_MPLS=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_CONNMARK=m +CONFIG_NET_ACT_CTINFO=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m +CONFIG_NET_ACT_GATE=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_NET_TC_SKB_EXT=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=y +CONFIG_BATMAN_ADV=m +CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_DEBUG=y +CONFIG_OPENVSWITCH=m +CONFIG_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=y +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=y +CONFIG_HSR=m +CONFIG_QRTR=m +CONFIG_QRTR_TUN=m +CONFIG_NET_NCSI=y +CONFIG_NCSI_OEM_CMD_GET_MAC=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_PKTGEN=m +CONFIG_CAN=m +CONFIG_BT=m +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_CMTP=m +CONFIG_BT_HIDP=m +CONFIG_BT_HS=y +CONFIG_BT_HCIBTUSB=m +CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y +# CONFIG_BT_HCIBTUSB_BCM is not set +CONFIG_BT_HCIBTSDIO=m +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_BCSP=y +CONFIG_BT_HCIUART_ATH3K=y +CONFIG_BT_HCIBCM203X=m +CONFIG_BT_HCIBPA10X=m +CONFIG_BT_HCIBFUSB=m +CONFIG_BT_HCIVHCI=m +CONFIG_BT_MRVL=m +CONFIG_BT_MRVL_SDIO=m +CONFIG_BT_ATH3K=m +CONFIG_CFG80211=m +CONFIG_CFG80211_WEXT=y +CONFIG_MAC80211=m +CONFIG_RFKILL=m +CONFIG_RFKILL_INPUT=y +CONFIG_NET_9P=y +CONFIG_NET_9P_VIRTIO=y +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIE_DPC=y +CONFIG_PCI_STUB=y +CONFIG_PCI_PF_STUB=m +CONFIG_PCI_IOV=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_SHPC=y +CONFIG_PCCARD=m +# CONFIG_PCMCIA is not set +CONFIG_YENTA=m +CONFIG_RAPIDIO=y +CONFIG_RAPIDIO_TSI721=y +CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS=y +CONFIG_RAPIDIO_ENUM_BASIC=m +CONFIG_RAPIDIO_CHMAN=m +CONFIG_RAPIDIO_MPORT_CDEV=m +CONFIG_UEVENT_HELPER=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_FW_LOADER_COMPRESS=y +CONFIG_CONNECTOR=y +CONFIG_DMI_SYSFS=y +CONFIG_ISCSI_IBFT=m +CONFIG_EFI_ZBOOT=y +CONFIG_EFI_CAPSULE_LOADER=m +CONFIG_EFI_TEST=m +CONFIG_MTD=m +CONFIG_MTD_BLOCK=m +CONFIG_MTD_CFI=m +CONFIG_MTD_JEDECPROBE=m +CONFIG_MTD_CFI_INTELEXT=m +CONFIG_MTD_CFI_AMDSTD=m +CONFIG_MTD_CFI_STAA=m +CONFIG_MTD_RAM=m +CONFIG_MTD_ROM=m +CONFIG_MTD_BLOCK2MTD=m +CONFIG_MTD_SPI_NOR=m +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_GLUEBI=m +CONFIG_MTD_UBI_BLOCK=y +CONFIG_PARPORT=m +CONFIG_PARPORT_PC=m +CONFIG_PARPORT_SERIAL=m +CONFIG_PARPORT_PC_FIFO=y +CONFIG_PARPORT_1284=y +# CONFIG_PNP_DEBUG_MESSAGES is not set +CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_ZRAM=m +CONFIG_ZRAM_DEF_COMP_ZSTD=y +CONFIG_ZRAM_WRITEBACK=y +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 +CONFIG_BLK_DEV_DRBD=m +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_CDROM_PKTCDVD=m +CONFIG_VIRTIO_BLK=m +CONFIG_BLK_DEV_RBD=m +CONFIG_BLK_DEV_NVME=m +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TCP=m +CONFIG_NVME_TARGET=m +CONFIG_NVME_TARGET_PASSTHRU=y +CONFIG_NVME_TARGET_LOOP=m +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_FCLOOP=m +CONFIG_NVME_TARGET_TCP=m +CONFIG_ENCLOSURE_SERVICES=m +CONFIG_APDS9802ALS=m +CONFIG_ISL29003=m +CONFIG_ISL29020=m +CONFIG_SENSORS_TSL2550=m +CONFIG_SENSORS_BH1770=m +CONFIG_SENSORS_APDS990X=m +CONFIG_EEPROM_AT24=m +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_SENSORS_LIS3_I2C=m +CONFIG_MISC_RTSX_PCI=m +CONFIG_MISC_RTSX_USB=m +CONFIG_UACCE=m +CONFIG_PVPANIC=y +CONFIG_BLK_DEV_SD=m +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=m +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_ISCSI_TCP=m +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_SCSI_BNX2X_FCOE=m +CONFIG_BE2ISCSI=m +CONFIG_SCSI_HPSA=m +CONFIG_SCSI_AACRAID=m +CONFIG_SCSI_MVSAS=y +# CONFIG_SCSI_MVSAS_DEBUG is not set +CONFIG_SCSI_MVSAS_TASKLET=y +CONFIG_SCSI_MVUMI=y +CONFIG_MEGARAID_NEWGEN=y +CONFIG_MEGARAID_MM=y +CONFIG_MEGARAID_MAILBOX=y +CONFIG_MEGARAID_LEGACY=y +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=y +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_SMARTPQI=m +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +CONFIG_FCOE=m +CONFIG_SCSI_QLOGIC_1280=m +CONFIG_SCSI_QLA_FC=m +CONFIG_TCM_QLA2XXX=m +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_CHELSIO_FCOE=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +CONFIG_SATA_AHCI_PLATFORM=y +CONFIG_ATA_PIIX=m +CONFIG_PATA_ATIIXP=y +CONFIG_ATA_GENERIC=m +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_LINEAR=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +CONFIG_BLK_DEV_DM=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +CONFIG_ISCSI_TARGET_CXGB4=m +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +CONFIG_FUSION_SAS=m +CONFIG_FUSION_CTL=m +CONFIG_FUSION_LOGGING=y +CONFIG_FIREWIRE=m +CONFIG_FIREWIRE_OHCI=m +CONFIG_FIREWIRE_SBP2=m +CONFIG_FIREWIRE_NET=m +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_WIREGUARD=m +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_NTB_NETDEV=m +CONFIG_RIONET=m +CONFIG_TUN=m +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ATM_DRIVERS is not set +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_NET_VENDOR_AMAZON is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set +# CONFIG_NET_VENDOR_ARC is not set +# CONFIG_NET_VENDOR_ATHEROS is not set +CONFIG_BNX2=y +CONFIG_TIGON3=m +CONFIG_BNX2X=m +CONFIG_BNXT=m +CONFIG_BNXT_DCB=y +# CONFIG_NET_VENDOR_CAVIUM is not set +CONFIG_CHELSIO_T1=m +CONFIG_CHELSIO_T1_1G=y +CONFIG_CHELSIO_T3=m +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_IPSEC_INLINE=m +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_CORTINA is not set +CONFIG_DNET=m +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBE_DCB=y +CONFIG_IXGBEVF=m +CONFIG_I40E=m +CONFIG_I40E_DCB=y +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_FM10K=m +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_MLX4_EN=m +# CONFIG_MLX4_CORE_GEN2 is not set +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_CORE_IPOIB=y +CONFIG_MLXSW_CORE=m +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +CONFIG_ETHOC=m +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +CONFIG_8139TOO_8129=y +CONFIG_R8169=m +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SOLARFLARE is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +CONFIG_STMMAC_ETH=y +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set +CONFIG_NGBE=m +CONFIG_TXGBE=m +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_NET_VENDOR_XILINX is not set +CONFIG_LED_TRIGGER_PHY=y +CONFIG_SFP=y +CONFIG_AMD_PHY=m +CONFIG_AQUANTIA_PHY=m +CONFIG_BROADCOM_PHY=m +CONFIG_BCM7XXX_PHY=m +CONFIG_BCM87XX_PHY=m +CONFIG_CICADA_PHY=m +CONFIG_CORTINA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_LXT_PHY=m +CONFIG_INTEL_XWAY_PHY=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_MARVELL_10G_PHY=y +CONFIG_MICREL_PHY=m +CONFIG_MICROCHIP_T1_PHY=m +CONFIG_MICROSEMI_PHY=m +CONFIG_NATIONAL_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_RENESAS_PHY=m +CONFIG_ROCKCHIP_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_DP83822_PHY=m +CONFIG_DP83TC811_PHY=m +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +CONFIG_VITESSE_PHY=m +CONFIG_XILINX_GMII2RGMII=m +CONFIG_MICREL_KS8995MA=m +CONFIG_CAN_VCAN=m +CONFIG_CAN_SLCAN=m +CONFIG_CAN_C_CAN=m +CONFIG_CAN_C_CAN_PLATFORM=m +CONFIG_CAN_C_CAN_PCI=m +CONFIG_CAN_CC770=m +CONFIG_CAN_CC770_PLATFORM=m +CONFIG_CAN_SJA1000=m +CONFIG_CAN_EMS_PCI=m +CONFIG_CAN_KVASER_PCI=m +CONFIG_CAN_PEAK_PCI=m +CONFIG_CAN_PLX_PCI=m +CONFIG_CAN_SJA1000_PLATFORM=m +CONFIG_CAN_SOFTING=m +CONFIG_CAN_8DEV_USB=m +CONFIG_CAN_EMS_USB=m +CONFIG_CAN_KVASER_USB=m +CONFIG_CAN_PEAK_USB=m +CONFIG_MDIO_BITBANG=m +CONFIG_MDIO_MSCC_MIIM=m +CONFIG_MDIO_THUNDER=m +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +# CONFIG_USB_NET_AX8817X is not set +# CONFIG_USB_NET_AX88179_178A is not set +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +# CONFIG_USB_NET_NET1080 is not set +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y +# CONFIG_USB_BELKIN is not set +# CONFIG_USB_ARMLINUX is not set +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y +# CONFIG_USB_NET_ZAURUS is not set +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +# CONFIG_WLAN_VENDOR_ADMTEK is not set +CONFIG_ATH9K=m +CONFIG_ATH9K_AHB=y +CONFIG_ATH9K_WOW=y +CONFIG_ATH9K_HTC=m +CONFIG_ATH10K=m +CONFIG_ATH10K_PCI=m +# CONFIG_WLAN_VENDOR_ATMEL is not set +CONFIG_BRCMSMAC=m +CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_USB=y +CONFIG_BRCMFMAC_PCIE=y +# CONFIG_WLAN_VENDOR_CISCO is not set +CONFIG_IWLWIFI=m +CONFIG_IWLDVM=m +CONFIG_IWLMVM=m +# CONFIG_WLAN_VENDOR_INTERSIL is not set +CONFIG_MWIFIEX=m +CONFIG_MWIFIEX_SDIO=m +CONFIG_MWIFIEX_PCIE=m +CONFIG_MWIFIEX_USB=m +CONFIG_MT7601U=m +CONFIG_MT76x0U=m +CONFIG_MT76x2U=m +CONFIG_RT2X00=m +CONFIG_RT2800PCI=m +CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT3573=y +CONFIG_RT2800USB_RT53XX=y +CONFIG_RT2800USB_RT55XX=y +CONFIG_RT2800USB_UNKNOWN=y +CONFIG_RTL8192CE=m +CONFIG_RTL8192SE=m +CONFIG_RTL8192DE=m +CONFIG_RTL8723AE=m +CONFIG_RTL8723BE=m +CONFIG_RTL8188EE=m +CONFIG_RTL8192EE=m +CONFIG_RTL8821AE=m +CONFIG_RTL8192CU=m +# CONFIG_RTLWIFI_DEBUG is not set +CONFIG_RTL8XXXU=m +# CONFIG_WLAN_VENDOR_RSI is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set +CONFIG_ZD1211RW=m +CONFIG_USB_NET_RNDIS_WLAN=m +CONFIG_MAC80211_HWSIM=m +CONFIG_WAN=y +CONFIG_HDLC=m +CONFIG_HDLC_RAW=m +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m +CONFIG_IEEE802154_FAKELB=m +CONFIG_VMXNET3=m +CONFIG_FUJITSU_ES=m +CONFIG_USB4_NET=m +CONFIG_NETDEVSIM=m +CONFIG_ISDN=y +CONFIG_MISDN=m +CONFIG_MISDN_DSP=m +CONFIG_MISDN_L1OIP=m +CONFIG_MISDN_HFCPCI=m +CONFIG_MISDN_HFCMULTI=m +CONFIG_MISDN_HFCUSB=m +CONFIG_MISDN_AVMFRITZ=m +CONFIG_MISDN_SPEEDFAX=m +CONFIG_MISDN_INFINEON=m +CONFIG_MISDN_W6692=m +CONFIG_MISDN_NETJET=m +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_JOYDEV=m +CONFIG_INPUT_EVDEV=y +CONFIG_KEYBOARD_XTKBD=m +CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_SENTELIC=y +CONFIG_MOUSE_SERIAL=m +CONFIG_MOUSE_APPLETOUCH=m +CONFIG_MOUSE_BCM5974=m +CONFIG_MOUSE_CYAPA=m +CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_SMBUS=y +CONFIG_MOUSE_VSXXXAA=m +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=m +CONFIG_TABLET_USB_AIPTEK=m +CONFIG_TABLET_USB_KBTAB=m +CONFIG_TABLET_SERIAL_WACOM4=m +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_ELO=m +CONFIG_TOUCHSCREEN_WACOM_W8001=m +CONFIG_TOUCHSCREEN_WACOM_I2C=m +CONFIG_INPUT_MISC=y +CONFIG_INPUT_ATI_REMOTE2=m +CONFIG_INPUT_KEYSPAN_REMOTE=m +CONFIG_INPUT_POWERMATE=m +CONFIG_INPUT_YEALINK=m +CONFIG_INPUT_CM109=m +CONFIG_INPUT_UINPUT=m +CONFIG_INPUT_GPIO_ROTARY_ENCODER=m +CONFIG_RMI4_I2C=m +CONFIG_RMI4_SPI=m +CONFIG_RMI4_SMB=m +CONFIG_RMI4_F34=y +CONFIG_RMI4_F55=y +CONFIG_SERIO_SERPORT=m +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +CONFIG_SERIO_ARC_PS2=m +CONFIG_LEGACY_PTY_COUNT=16 +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_NR_UARTS=16 +CONFIG_SERIAL_8250_RUNTIME_UARTS=16 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_JSM=m +CONFIG_SERIAL_ARC=m +CONFIG_SERIAL_NONSTANDARD=y +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +CONFIG_NOZOMI=m +CONFIG_PRINTER=m +CONFIG_PPDEV=m +CONFIG_VIRTIO_CONSOLE=y +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_PANIC_EVENT=y +CONFIG_IPMI_PANIC_STRING=y +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_TCG_TIS_SPI=m +CONFIG_TCG_TIS_I2C_ATMEL=m +CONFIG_TCG_TIS_I2C_INFINEON=m +CONFIG_TCG_TIS_I2C_NUVOTON=m +CONFIG_TCG_ATMEL=m +CONFIG_TCG_INFINEON=m +CONFIG_TCG_TIS_ST33ZP24_I2C=m +CONFIG_TCG_TIS_ST33ZP24_SPI=m +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_AMD756=m +CONFIG_I2C_AMD8111=m +CONFIG_I2C_ISCH=m +CONFIG_I2C_PIIX4=y +CONFIG_I2C_NFORCE2=m +CONFIG_I2C_SIS96X=m +CONFIG_I2C_VIA=m +CONFIG_I2C_VIAPRO=m +CONFIG_I2C_SCMI=m +CONFIG_I2C_DESIGNWARE_PLATFORM=y +CONFIG_I2C_GPIO=y +CONFIG_I2C_LS2X=m +CONFIG_I2C_PCA_PLATFORM=m +CONFIG_I2C_SIMTEC=m +CONFIG_I2C_DIOLAN_U2C=m +CONFIG_I2C_PARPORT=m +CONFIG_I2C_TINY_USB=m +CONFIG_I2C_VIPERBOARD=m +CONFIG_I2C_STUB=m +CONFIG_SPI=y +CONFIG_SPI_LOONGSON_PCI=y +CONFIG_SPI_LOONGSON_PLATFORM=m +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_PARPORT=m +CONFIG_PPS_CLIENT_GPIO=m +CONFIG_DP83640_PHY=m +CONFIG_PINCTRL=y +CONFIG_PINCTRL_LOONGSON2=y +CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_AMDPT=m +CONFIG_GPIO_LOONGSON_64BIT=y +CONFIG_GPIO_VIPERBOARD=m +CONFIG_POWER_RESET=y +CONFIG_SENSORS_AD7414=m +CONFIG_SENSORS_AD7418=m +CONFIG_SENSORS_ADM1025=m +CONFIG_SENSORS_ADM1026=m +CONFIG_SENSORS_ADM1029=m +CONFIG_SENSORS_ADM1031=m +CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADT7410=m +CONFIG_SENSORS_ADT7411=m +CONFIG_SENSORS_ADT7462=m +CONFIG_SENSORS_ADT7470=m +CONFIG_SENSORS_ADT7475=m +CONFIG_SENSORS_ASC7621=m +CONFIG_SENSORS_ATXP1=m +CONFIG_SENSORS_DS620=m +CONFIG_SENSORS_DS1621=m +CONFIG_SENSORS_I5K_AMB=m +CONFIG_SENSORS_F71805F=m +CONFIG_SENSORS_F71882FG=m +CONFIG_SENSORS_F75375S=m +CONFIG_SENSORS_GL518SM=m +CONFIG_SENSORS_GL520SM=m +CONFIG_SENSORS_G760A=m +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +CONFIG_SENSORS_IT87=m +CONFIG_SENSORS_JC42=m +CONFIG_SENSORS_LINEAGE=m +CONFIG_SENSORS_LTC4151=m +CONFIG_SENSORS_LTC4215=m +CONFIG_SENSORS_LTC4245=m +CONFIG_SENSORS_LTC4261=m +CONFIG_SENSORS_MAX16065=m +CONFIG_SENSORS_MAX1619=m +CONFIG_SENSORS_MAX1668=m +CONFIG_SENSORS_MAX197=m +CONFIG_SENSORS_MAX6639=m +CONFIG_SENSORS_MAX6650=m +CONFIG_SENSORS_MAX6697=m +CONFIG_SENSORS_MCP3021=m +CONFIG_SENSORS_LM63=m +CONFIG_SENSORS_LM73=m +CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM77=m +CONFIG_SENSORS_LM78=m +CONFIG_SENSORS_LM80=m +CONFIG_SENSORS_LM83=m +CONFIG_SENSORS_LM85=m +CONFIG_SENSORS_LM87=m +CONFIG_SENSORS_LM90=m +CONFIG_SENSORS_LM92=m +CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_LM95234=m +CONFIG_SENSORS_LM95241=m +CONFIG_SENSORS_LM95245=m +CONFIG_SENSORS_PC87360=m +CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_NTC_THERMISTOR=m +CONFIG_SENSORS_NCT6775=m +CONFIG_SENSORS_PCF8591=m +CONFIG_PMBUS=m +CONFIG_SENSORS_ADM1275=m +CONFIG_SENSORS_LM25066=m +CONFIG_SENSORS_LTC2978=m +CONFIG_SENSORS_MAX16064=m +CONFIG_SENSORS_MAX34440=m +CONFIG_SENSORS_MAX8688=m +CONFIG_SENSORS_UCD9000=m +CONFIG_SENSORS_UCD9200=m +CONFIG_SENSORS_ZL6100=m +CONFIG_SENSORS_SHT15=m +CONFIG_SENSORS_SHT21=m +CONFIG_SENSORS_SIS5595=m +CONFIG_SENSORS_DME1737=m +CONFIG_SENSORS_EMC1403=m +CONFIG_SENSORS_EMC6W201=m +CONFIG_SENSORS_SMSC47M1=m +CONFIG_SENSORS_SMSC47M192=m +CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SCH5627=m +CONFIG_SENSORS_SCH5636=m +CONFIG_SENSORS_ADS7828=m +CONFIG_SENSORS_AMC6821=m +CONFIG_SENSORS_INA209=m +CONFIG_SENSORS_INA2XX=m +CONFIG_SENSORS_THMC50=m +CONFIG_SENSORS_TMP102=m +CONFIG_SENSORS_TMP401=m +CONFIG_SENSORS_TMP421=m +CONFIG_SENSORS_VIA686A=m +CONFIG_SENSORS_VT1211=m +CONFIG_SENSORS_VT8231=m +CONFIG_SENSORS_W83781D=m +CONFIG_SENSORS_W83791D=m +CONFIG_SENSORS_W83792D=m +CONFIG_SENSORS_W83793=m +CONFIG_SENSORS_W83795=m +CONFIG_SENSORS_W83L785TS=m +CONFIG_SENSORS_W83L786NG=m +CONFIG_SENSORS_W83627HF=m +CONFIG_SENSORS_W83627EHF=m +CONFIG_SENSORS_ACPI_POWER=m +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_EMULATION=y +CONFIG_LOONGSON2_THERMAL=m +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +CONFIG_WATCHDOG_SYSFS=y +CONFIG_SOFT_WATCHDOG=m +CONFIG_GPIO_WATCHDOG=m +CONFIG_WDAT_WDT=m +CONFIG_ALIM7101_WDT=m +CONFIG_I6300ESB_WDT=m +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m +CONFIG_USBPCWATCHDOG=m +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +CONFIG_MFD_VIPERBOARD=m +CONFIG_MFD_SM501=m +CONFIG_MFD_SM501_GPIO=y +CONFIG_MFD_VX855=m +CONFIG_RC_CORE=m +CONFIG_LIRC=y +CONFIG_RC_DECODERS=y +CONFIG_IR_IMON_DECODER=m +CONFIG_IR_JVC_DECODER=m +CONFIG_IR_MCE_KBD_DECODER=m +CONFIG_IR_NEC_DECODER=m +CONFIG_IR_RC5_DECODER=m +CONFIG_IR_RC6_DECODER=m +CONFIG_IR_SANYO_DECODER=m +CONFIG_IR_SHARP_DECODER=m +CONFIG_IR_SONY_DECODER=m +CONFIG_IR_XMP_DECODER=m +CONFIG_RC_DEVICES=y +CONFIG_IR_ENE=m +CONFIG_IR_FINTEK=m +CONFIG_IR_IGUANA=m +CONFIG_IR_IMON=m +CONFIG_IR_IMON_RAW=m +CONFIG_IR_ITE_CIR=m +CONFIG_IR_MCEUSB=m +CONFIG_IR_NUVOTON=m +CONFIG_IR_REDRAT3=m +CONFIG_IR_SERIAL=m +CONFIG_IR_SERIAL_TRANSMITTER=y +CONFIG_IR_STREAMZAP=m +CONFIG_IR_TTUSBIR=m +CONFIG_RC_ATI_REMOTE=m +CONFIG_USB_PULSE8_CEC=m +CONFIG_USB_RAINSHADOW_CEC=m +CONFIG_MEDIA_SUPPORT=m +CONFIG_DVB_MAX_ADAPTERS=8 +CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_USB_GSPCA=m +CONFIG_USB_GSPCA_BENQ=m +CONFIG_USB_GSPCA_CONEX=m +CONFIG_USB_GSPCA_CPIA1=m +CONFIG_USB_GSPCA_ETOMS=m +CONFIG_USB_GSPCA_FINEPIX=m +CONFIG_USB_GSPCA_JEILINJ=m +CONFIG_USB_GSPCA_JL2005BCD=m +CONFIG_USB_GSPCA_KONICA=m +CONFIG_USB_GSPCA_MARS=m +CONFIG_USB_GSPCA_MR97310A=m +CONFIG_USB_GSPCA_NW80X=m +CONFIG_USB_GSPCA_OV519=m +CONFIG_USB_GSPCA_OV534=m +CONFIG_USB_GSPCA_OV534_9=m +CONFIG_USB_GSPCA_PAC207=m +CONFIG_USB_GSPCA_PAC7302=m +CONFIG_USB_GSPCA_PAC7311=m +CONFIG_USB_GSPCA_SE401=m +CONFIG_USB_GSPCA_SN9C2028=m +CONFIG_USB_GSPCA_SN9C20X=m +CONFIG_USB_GSPCA_SONIXB=m +CONFIG_USB_GSPCA_SONIXJ=m +CONFIG_USB_GSPCA_SPCA1528=m +CONFIG_USB_GSPCA_SPCA500=m +CONFIG_USB_GSPCA_SPCA501=m +CONFIG_USB_GSPCA_SPCA505=m +CONFIG_USB_GSPCA_SPCA506=m +CONFIG_USB_GSPCA_SPCA508=m +CONFIG_USB_GSPCA_SPCA561=m +CONFIG_USB_GSPCA_SQ905=m +CONFIG_USB_GSPCA_SQ905C=m +CONFIG_USB_GSPCA_SQ930X=m +CONFIG_USB_GSPCA_STK014=m +CONFIG_USB_GSPCA_STV0680=m +CONFIG_USB_GSPCA_SUNPLUS=m +CONFIG_USB_GSPCA_T613=m +CONFIG_USB_GSPCA_TOPRO=m +CONFIG_USB_GSPCA_TV8532=m +CONFIG_USB_GSPCA_VC032X=m +CONFIG_USB_GSPCA_VICAM=m +CONFIG_USB_GSPCA_XIRLINK_CIT=m +CONFIG_USB_GSPCA_ZC3XX=m +CONFIG_USB_GL860=m +CONFIG_USB_M5602=m +CONFIG_USB_STV06XX=m +CONFIG_USB_PWC=m +CONFIG_USB_S2255=m +CONFIG_USB_VIDEO_CLASS=m +CONFIG_VIDEO_HDPVR=m +CONFIG_VIDEO_PVRUSB2=m +CONFIG_VIDEO_AU0828=m +CONFIG_DVB_B2C2_FLEXCOP_USB=m +CONFIG_DVB_USB_V2=m +CONFIG_DVB_USB_AF9035=m +CONFIG_DVB_USB_ANYSEE=m +CONFIG_DVB_USB_AU6610=m +CONFIG_DVB_USB_AZ6007=m +CONFIG_DVB_USB_CE6230=m +CONFIG_DVB_USB_EC168=m +CONFIG_DVB_USB_GL861=m +CONFIG_DVB_USB_LME2510=m +CONFIG_DVB_USB_MXL111SF=m +CONFIG_DVB_USB=m +CONFIG_DVB_USB_A800=m +CONFIG_DVB_USB_AF9005=m +CONFIG_DVB_USB_AF9005_REMOTE=m +CONFIG_DVB_USB_AZ6027=m +CONFIG_DVB_USB_CINERGY_T2=m +CONFIG_DVB_USB_CXUSB=m +CONFIG_DVB_USB_DIB0700=m +CONFIG_DVB_USB_DIBUSB_MB=m +CONFIG_DVB_USB_DIBUSB_MC=m +CONFIG_DVB_USB_DIGITV=m +CONFIG_DVB_USB_DTT200U=m +CONFIG_DVB_USB_DTV5100=m +CONFIG_DVB_USB_DW2102=m +CONFIG_DVB_USB_GP8PSK=m +CONFIG_DVB_USB_M920X=m +CONFIG_DVB_USB_NOVA_T_USB2=m +CONFIG_DVB_USB_OPERA1=m +CONFIG_DVB_USB_PCTV452E=m +CONFIG_DVB_USB_TECHNISAT_USB2=m +CONFIG_DVB_USB_TTUSB2=m +CONFIG_DVB_USB_UMT_010=m +CONFIG_DVB_USB_VP702X=m +CONFIG_DVB_USB_VP7045=m +CONFIG_SMS_USB_DRV=m +CONFIG_DVB_TTUSB_BUDGET=m +CONFIG_DVB_TTUSB_DEC=m +CONFIG_VIDEO_EM28XX=m +CONFIG_VIDEO_EM28XX_ALSA=m +CONFIG_VIDEO_EM28XX_DVB=m +CONFIG_MEDIA_PCI_SUPPORT=y +CONFIG_VIDEO_IVTV=m +CONFIG_VIDEO_FB_IVTV=m +CONFIG_VIDEO_BT848=m +CONFIG_DVB_BT8XX=m +CONFIG_VIDEO_CX18=m +CONFIG_VIDEO_CX23885=m +CONFIG_MEDIA_ALTERA_CI=m +CONFIG_VIDEO_CX88=m +CONFIG_VIDEO_CX88_ALSA=m +CONFIG_VIDEO_CX88_BLACKBIRD=m +CONFIG_VIDEO_CX88_DVB=m +# CONFIG_VIDEO_CX88_ENABLE_VP3054 is not set +CONFIG_VIDEO_SAA7134=m +CONFIG_VIDEO_SAA7134_ALSA=m +CONFIG_VIDEO_SAA7134_DVB=m +CONFIG_VIDEO_SAA7164=m +CONFIG_DVB_B2C2_FLEXCOP_PCI=m +CONFIG_DVB_DDBRIDGE=m +CONFIG_DVB_DM1105=m +CONFIG_MANTIS_CORE=m +CONFIG_DVB_MANTIS=m +CONFIG_DVB_HOPPER=m +CONFIG_DVB_NGENE=m +CONFIG_DVB_PLUTO2=m +CONFIG_DVB_PT1=m +CONFIG_DVB_BUDGET_CORE=m +CONFIG_DVB_BUDGET=m +CONFIG_DVB_BUDGET_CI=m +CONFIG_DVB_BUDGET_AV=m +CONFIG_SMS_SDIO_DRV=m +CONFIG_DVB_FIREDTV=m +CONFIG_DRM=y +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DP_AUX_CHARDEV=y +CONFIG_DRM_DP_CEC=y +# CONFIG_DRM_I2C_CH7006 is not set +# CONFIG_DRM_I2C_SIL164 is not set +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_USERPTR=y +CONFIG_DRM_AMDGPU=m +CONFIG_DRM_AMDGPU_SI=y +CONFIG_DRM_AMDGPU_CIK=y +CONFIG_DRM_AMDGPU_USERPTR=y +CONFIG_DRM_NOUVEAU=m +CONFIG_DRM_VKMS=m +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=y +CONFIG_DRM_MGAG200=m +CONFIG_DRM_QXL=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_LOONGSON=y +CONFIG_DRM_BOCHS=m +CONFIG_DRM_CIRRUS_QEMU=m +CONFIG_FB=y +CONFIG_FB_EFI=y +CONFIG_FB_RADEON=y +CONFIG_FB_LS2K500=m +CONFIG_FB_TILEBLITTING=y +CONFIG_LCD_CLASS_DEVICE=m +CONFIG_LCD_PLATFORM=m +CONFIG_BACKLIGHT_LP855X=m +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_OSSEMUL=y +CONFIG_SND_HRTIMER=m +# CONFIG_SND_SUPPORT_OLD_API is not set +CONFIG_SND_SEQUENCER=m +CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_SEQUENCER_OSS=m +CONFIG_SND_DUMMY=m +CONFIG_SND_ALOOP=m +CONFIG_SND_VIRMIDI=m +CONFIG_SND_MTPAV=m +CONFIG_SND_MPU401=m +CONFIG_SND_AC97_POWER_SAVE=y +CONFIG_SND_AC97_POWER_SAVE_DEFAULT=5 +CONFIG_SND_AD1889=m +CONFIG_SND_ATIIXP=m +CONFIG_SND_ATIIXP_MODEM=m +CONFIG_SND_AU8810=m +CONFIG_SND_AU8820=m +CONFIG_SND_AU8830=m +CONFIG_SND_BT87X=m +CONFIG_SND_BT87X_OVERCLOCK=y +CONFIG_SND_CA0106=m +CONFIG_SND_CMIPCI=m +CONFIG_SND_OXYGEN=m +CONFIG_SND_CS46XX=m +CONFIG_SND_CTXFI=m +CONFIG_SND_DARLA20=m +CONFIG_SND_GINA20=m +CONFIG_SND_LAYLA20=m +CONFIG_SND_DARLA24=m +CONFIG_SND_GINA24=m +CONFIG_SND_LAYLA24=m +CONFIG_SND_MONA=m +CONFIG_SND_MIA=m +CONFIG_SND_ECHO3G=m +CONFIG_SND_INDIGO=m +CONFIG_SND_INDIGOIO=m +CONFIG_SND_INDIGODJ=m +CONFIG_SND_INDIGOIOX=m +CONFIG_SND_INDIGODJX=m +CONFIG_SND_ENS1370=m +CONFIG_SND_ENS1371=m +CONFIG_SND_HDSP=m +CONFIG_SND_HDSPM=m +CONFIG_SND_ICE1724=m +CONFIG_SND_INTEL8X0=m +CONFIG_SND_INTEL8X0M=m +CONFIG_SND_KORG1212=m +CONFIG_SND_LOLA=m +CONFIG_SND_LX6464ES=m +CONFIG_SND_MIXART=m +CONFIG_SND_PCXHR=m +CONFIG_SND_RME32=m +CONFIG_SND_RME96=m +CONFIG_SND_RME9652=m +CONFIG_SND_VIA82XX=m +CONFIG_SND_VIA82XX_MODEM=m +CONFIG_SND_VIRTUOSO=m +CONFIG_SND_VX222=m +CONFIG_SND_HDA_INTEL=m +CONFIG_SND_HDA_HWDEP=y +CONFIG_SND_HDA_INPUT_BEEP=y +CONFIG_SND_HDA_INPUT_BEEP_MODE=0 +CONFIG_SND_HDA_PATCH_LOADER=y +CONFIG_SND_HDA_CODEC_REALTEK=m +CONFIG_SND_HDA_CODEC_ANALOG=m +CONFIG_SND_HDA_CODEC_SIGMATEL=m +CONFIG_SND_HDA_CODEC_VIA=m +CONFIG_SND_HDA_CODEC_HDMI=m +CONFIG_SND_HDA_CODEC_CIRRUS=m +CONFIG_SND_HDA_CODEC_CONEXANT=m +CONFIG_SND_HDA_CODEC_CA0110=m +CONFIG_SND_HDA_CODEC_CA0132=m +CONFIG_SND_HDA_CODEC_CMEDIA=m +CONFIG_SND_HDA_CODEC_SI3054=m +CONFIG_SND_HDA_PREALLOC_SIZE=512 +# CONFIG_SND_SPI is not set +CONFIG_SND_USB_AUDIO=m +CONFIG_SND_USB_UA101=m +CONFIG_SND_USB_CAIAQ=m +CONFIG_SND_USB_CAIAQ_INPUT=y +CONFIG_SND_USB_6FIRE=m +CONFIG_SND_USB_HIFACE=m +CONFIG_SND_BCD2000=m +CONFIG_SND_USB_POD=m +CONFIG_SND_USB_PODHD=m +CONFIG_SND_USB_TONEPORT=m +CONFIG_SND_USB_VARIAX=m +CONFIG_SND_DICE=m +CONFIG_SND_OXFW=m +CONFIG_SND_ISIGHT=m +CONFIG_SND_FIREWORKS=m +CONFIG_SND_BEBOB=m +CONFIG_SND_FIREWIRE_DIGI00X=m +CONFIG_SND_FIREWIRE_TASCAM=m +CONFIG_SND_FIREWIRE_MOTU=m +CONFIG_SND_FIREFACE=m +CONFIG_SND_SOC=m +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_A4TECH=m +CONFIG_HID_ACRUX=m +CONFIG_HID_APPLE=m +CONFIG_HID_APPLEIR=m +CONFIG_HID_ASUS=m +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=m +CONFIG_HID_BETOP_FF=m +CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CORSAIR=m +CONFIG_HID_PRODIKEYS=m +CONFIG_HID_CMEDIA=m +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +CONFIG_HID_ELAN=m +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +CONFIG_HID_EZKEY=m +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +CONFIG_HID_HOLTEK=m +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=m +CONFIG_HID_JABRA=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LCPOWER=m +CONFIG_HID_LENOVO=m +CONFIG_HID_LOGITECH=m +CONFIG_HID_LOGITECH_DJ=m +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGIG940_FF=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m +CONFIG_HID_MULTITOUCH=m +CONFIG_HID_NTI=m +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PLANTRONICS=m +CONFIG_HID_PRIMAX=m +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +CONFIG_HID_SMARTJOYPLUS=m +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=y +CONFIG_HID_SENSOR_CUSTOM_SENSOR=m +CONFIG_HID_ALPS=m +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +CONFIG_I2C_HID=m +CONFIG_USB_LED_TRIG=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_MON=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_DBGCAP=y +CONFIG_USB_XHCI_PLATFORM=m +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_PRINTER=m +CONFIG_USB_TMC=m +CONFIG_USB_STORAGE=m +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +CONFIG_USB_DWC2=y +CONFIG_USB_DWC2_HOST=y +CONFIG_USB_SERIAL=m +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +CONFIG_USB_SERIAL_F8153X=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7715_PARPORT=y +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MXUPORT=m +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +CONFIG_USB_SERIAL_UPD78F0730=m +CONFIG_USB_SERIAL_DEBUG=m +CONFIG_USB_USS720=m +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +CONFIG_USB_IDMOUSE=m +CONFIG_USB_APPLEDISPLAY=m +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_LD=m +CONFIG_USB_IOWARRIOR=m +CONFIG_USB_ISIGHTFW=m +CONFIG_USB_HSIC_USB3503=m +CONFIG_USB_ATM=m +CONFIG_USB_SPEEDTOUCH=m +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m +CONFIG_USB_GADGET=y +CONFIG_TYPEC=m +CONFIG_TYPEC_TCPM=m +CONFIG_TYPEC_TCPCI=m +CONFIG_TYPEC_RT1711H=m +CONFIG_TYPEC_FUSB302=m +CONFIG_TYPEC_UCSI=m +CONFIG_UCSI_ACPI=m +CONFIG_TYPEC_TPS6598X=m +CONFIG_TYPEC_MUX_PI3USB30532=m +CONFIG_TYPEC_DP_ALTMODE=m +CONFIG_MMC=m +CONFIG_SDIO_UART=m +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +CONFIG_MMC_TIFM_SD=m +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +CONFIG_MMC_REALTEK_PCI=m +CONFIG_MMC_REALTEK_USB=m +CONFIG_MMC_SDHCI_XENON=m +CONFIG_MEMSTICK=m +CONFIG_MSPRO_BLOCK=m +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_MEMSTICK_REALTEK_PCI=m +CONFIG_MEMSTICK_REALTEK_USB=m +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_LM3530=m +CONFIG_LEDS_LP3944=m +CONFIG_LEDS_BLINKM=m +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +CONFIG_LEDS_TRIGGER_DISK=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +CONFIG_LEDS_TRIGGER_AUDIO=y +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_INFINIBAND_CXGB4=m +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +CONFIG_INFINIBAND_VMWARE_PVRDMA=m +CONFIG_RDMA_RXE=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +CONFIG_RTC_CLASS=y +# CONFIG_RTC_SYSTOHC is not set +CONFIG_RTC_DRV_DS1307=m +CONFIG_RTC_DRV_DS1374=m +CONFIG_RTC_DRV_DS1672=m +CONFIG_RTC_DRV_MAX6900=m +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +CONFIG_RTC_DRV_FM3130=m +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +CONFIG_RTC_DRV_RV8803=m +CONFIG_RTC_DRV_RX4581=m +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_RV3029C2=m +# CONFIG_RTC_DRV_RV3029_HWMON is not set +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_EFI=m +CONFIG_RTC_DRV_STK17TA8=m +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_RP5C01=m +CONFIG_RTC_DRV_LOONGSON=y +CONFIG_DMADEVICES=y +CONFIG_DW_DMAC=m +CONFIG_ASYNC_TX_DMA=y +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=m +CONFIG_UIO_DMEM_GENIRQ=m +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +CONFIG_VFIO=m +CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_PCI=m +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=m +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_COMEDI=m +CONFIG_COMEDI_PCI_DRIVERS=m +CONFIG_COMEDI_8255_PCI=m +CONFIG_COMEDI_ADL_PCI6208=m +CONFIG_COMEDI_ADL_PCI7X3X=m +CONFIG_COMEDI_ADL_PCI8164=m +CONFIG_COMEDI_ADL_PCI9111=m +CONFIG_COMEDI_ADL_PCI9118=m +CONFIG_COMEDI_ADV_PCI1710=m +CONFIG_COMEDI_ADV_PCI1720=m +CONFIG_COMEDI_ADV_PCI1723=m +CONFIG_COMEDI_ADV_PCI1724=m +CONFIG_COMEDI_ADV_PCI1760=m +CONFIG_COMEDI_ADV_PCI_DIO=m +CONFIG_COMEDI_NI_LABPC_PCI=m +CONFIG_COMEDI_NI_PCIDIO=m +CONFIG_COMEDI_NI_PCIMIO=m +CONFIG_STAGING=y +CONFIG_COMMON_CLK_LOONGSON2=y +CONFIG_LOONGSON2_GUTS=y +CONFIG_LOONGSON2_PM=y +CONFIG_PM_DEVFREQ=y +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y +CONFIG_DEVFREQ_GOV_PERFORMANCE=y +CONFIG_DEVFREQ_GOV_POWERSAVE=y +CONFIG_DEVFREQ_GOV_USERSPACE=y +CONFIG_IIO=m +CONFIG_HID_SENSOR_ACCEL_3D=m +CONFIG_HID_SENSOR_GYRO_3D=m +CONFIG_HID_SENSOR_HUMIDITY=m +CONFIG_HID_SENSOR_ALS=m +CONFIG_HID_SENSOR_PROX=m +CONFIG_HID_SENSOR_MAGNETOMETER_3D=m +CONFIG_HID_SENSOR_INCLINOMETER_3D=m +CONFIG_HID_SENSOR_DEVICE_ROTATION=m +CONFIG_HID_SENSOR_PRESS=m +CONFIG_HID_SENSOR_TEMP=m +CONFIG_NTB=m +CONFIG_NTB_PINGPONG=m +CONFIG_NTB_TOOL=m +CONFIG_NTB_PERF=m +CONFIG_NTB_TRANSPORT=m +CONFIG_PWM=y +CONFIG_POWERCAP=y +CONFIG_USB4=m +CONFIG_DAX=y +CONFIG_DEV_DAX=m +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +CONFIG_JFS_SECURITY=y +CONFIG_XFS_FS=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_GFS2_FS=m +CONFIG_GFS2_FS_LOCKING_DLM=y +CONFIG_OCFS2_FS=m +CONFIG_BTRFS_FS=y +CONFIG_BTRFS_FS_POSIX_ACL=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QFMT_V1=m +CONFIG_QFMT_V2=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_VIRTIO_FS=m +CONFIG_OVERLAY_FS=y +# CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW is not set +CONFIG_OVERLAY_FS_INDEX=y +CONFIG_OVERLAY_FS_XINO_AUTO=y +CONFIG_OVERLAY_FS_METACOPY=y +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +CONFIG_CACHEFILES=m +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=936 +CONFIG_FAT_DEFAULT_IOCHARSET="gb2312" +CONFIG_EXFAT_FS=m +CONFIG_NTFS_FS=m +CONFIG_NTFS3_FS=m +CONFIG_NTFS3_64BIT_CLUSTER=y +CONFIG_NTFS3_LZX_XPRESS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE_DEVICE_DUMP=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +CONFIG_ORANGEFS_FS=m +CONFIG_ECRYPT_FS=m +CONFIG_ECRYPT_FS_MESSAGING=y +CONFIG_HFS_FS=m +CONFIG_HFSPLUS_FS=m +CONFIG_UBIFS_FS=m +CONFIG_UBIFS_FS_ADVANCED_COMPR=y +CONFIG_CRAMFS=m +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +CONFIG_MINIX_FS=m +CONFIG_ROMFS_FS=m +CONFIG_PSTORE=m +CONFIG_SYSV_FS=m +CONFIG_UFS_FS=m +CONFIG_EROFS_FS=m +CONFIG_EROFS_FS_ZIP_LZMA=y +CONFIG_EROFS_FS_PCPU_KTHREAD=y +CONFIG_NFS_FS=y +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +# CONFIG_NFS_DISABLE_UDP_SUPPORT is not set +CONFIG_NFSD=y +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_BLOCKLAYOUT=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_FLEXFILELAYOUT=y +CONFIG_NFSD_V4_2_INTER_SSC=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_SUNRPC_DEBUG=y +CONFIG_CEPH_FS=m +CONFIG_CEPH_FSCACHE=y +CONFIG_CEPH_FS_POSIX_ACL=y +CONFIG_CEPH_FS_SECURITY_LABEL=y +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +# CONFIG_CIFS_DEBUG is not set +CONFIG_CIFS_DFS_UPCALL=y +CONFIG_9P_FS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=y +CONFIG_DLM=m +CONFIG_DLM_DEBUG=y +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_TRUSTED_KEYS=y +CONFIG_KEY_DH_OPERATIONS=y +CONFIG_SECURITY=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HARDENED_USERCOPY=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_APPARMOR=y +CONFIG_SECURITY_YAMA=y +CONFIG_SECURITY_LOCKDOWN_LSM=y +CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_PLATFORM_KEYRING=y +CONFIG_IMA=y +CONFIG_IMA_DEFAULT_HASH_SHA256=y +CONFIG_IMA_READ_POLICY=y +CONFIG_IMA_APPRAISE=y +CONFIG_IMA_LOAD_X509=y +CONFIG_EVM=y +CONFIG_EVM_LOAD_X509=y +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_LSM="landlock,lockdown,yama,loadpin,safesetid,integrity,bpf" +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_SM2=y +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_842=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_CRC32_LOONGARCH=m +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m +CONFIG_CRYPTO_DEV_CHELSIO=m +CONFIG_CRYPTO_DEV_VIRTIO=m +CONFIG_SIGNED_PE_FILE_VERIFICATION=y +CONFIG_SECONDARY_TRUSTED_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_REVOCATION_LIST=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=y +CONFIG_CRC7=m +CONFIG_DMA_CMA=y +CONFIG_PRINTK_TIME=y +CONFIG_PRINTK_CALLER=y +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_FRAME_WARN=4096 +CONFIG_STRIP_ASM_SYMS=y +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_SHIRQ=y +CONFIG_PANIC_ON_OOPS=y +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHEDSTATS=y +CONFIG_DEBUG_LIST=y +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +# CONFIG_STRICT_DEVMEM is not set +# CONFIG_RUNTIME_TESTING_MENU is not set -- Gitee From 8fc1fb1acb181e2899eb4d2e5940276a74751f81 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 3 Dec 2021 06:08:07 -0500 Subject: [PATCH 0379/2138] anolis: crypto: ccp: Support DOWNLOAD_FIRMWARE when detect CSV ANBZ: #8571 When ccp driver detect CSV support on Hygon CPU, it should try to update the latest CSV firmware on the system paths. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Shirong Hao Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2912 --- drivers/crypto/ccp/sev-dev.c | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index bd977b89e977..a91952b56cfe 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -35,6 +35,7 @@ #define DEVICE_NAME "sev" #define SEV_FW_FILE "amd/sev.fw" +#define CSV_FW_FILE "hygon/csv.fw" #define SEV_FW_NAME_SIZE 64 static DEFINE_MUTEX(sev_cmd_mutex); @@ -100,6 +101,11 @@ static inline bool sev_version_greater_or_equal(u8 maj, u8 min) return false; } +static inline bool csv_version_greater_or_equal(u32 build) +{ + return hygon_csv_build >= build; +} + static void sev_irq_handler(int irq, void *data, unsigned int status) { struct sev_device *sev = data; @@ -756,6 +762,14 @@ static int sev_get_firmware(struct device *dev, char fw_name_specific[SEV_FW_NAME_SIZE]; char fw_name_subset[SEV_FW_NAME_SIZE]; + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + /* Check for CSV FW to using generic name: csv.fw */ + if (firmware_request_nowarn(firmware, CSV_FW_FILE, dev) >= 0) + return 0; + else + return -ENOENT; + } + snprintf(fw_name_specific, sizeof(fw_name_specific), "amd/amd_sev_fam%.2xh_model%.2xh.sbin", boot_cpu_data.x86, boot_cpu_data.x86_model); @@ -794,7 +808,9 @@ static int sev_update_firmware(struct device *dev) struct page *p; u64 data_size; - if (!sev_version_greater_or_equal(0, 15)) { + if (!sev_version_greater_or_equal(0, 15) && + (boot_cpu_data.x86_vendor != X86_VENDOR_HYGON || + !csv_version_greater_or_equal(1667))) { dev_dbg(dev, "DOWNLOAD_FIRMWARE not supported\n"); return -1; } @@ -840,9 +856,14 @@ static int sev_update_firmware(struct device *dev) ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error); if (ret) - dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error); + dev_dbg(dev, "Failed to update %s firmware: %#x\n", + (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + ? "CSV" : "SEV", + error); else - dev_info(dev, "SEV firmware update successful\n"); + dev_info(dev, "%s firmware update successful\n", + (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + ? "CSV" : "SEV"); __free_pages(p, order); -- Gitee From e56c3aa54563fcbf0ac7584da72d0905e183f3a6 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 3 Dec 2021 05:31:27 -0500 Subject: [PATCH 0380/2138] anolis: crypto: ccp: Implement CSV_PLATFORM_INIT ioctl command ANBZ: #8571 The CSV_PLATFORM_INIT command can be used by the platform owner to switch platform from PSTATE.UNINIT to PSTATE.INIT. In the upcoming patches, we'll support DOWNLOAD_FIRMWARE at userspace. Due to DOWNLOAD_FIRMWARE can only performed when platform is in the PSTATE.UNINIT, we need invoke PLATFORM_INIT following DOWNLOAD_FIRMWARE to switch platform back to PSTATE.INIT. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Shirong Hao Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2912 --- drivers/crypto/ccp/sev-dev.c | 3 +++ include/uapi/linux/psp-sev.h | 1 + 2 files changed, 4 insertions(+) diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index a91952b56cfe..c8d98004c0da 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -1187,6 +1187,9 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { switch (input.cmd) { + case CSV_PLATFORM_INIT: + ret = __sev_platform_init_locked(&input.error); + goto result_to_user; case CSV_HGSC_CERT_IMPORT: ret = csv_ioctl_do_hgsc_import(&input); goto result_to_user; diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h index ae76776c0b15..a82643c0d795 100644 --- a/include/uapi/linux/psp-sev.h +++ b/include/uapi/linux/psp-sev.h @@ -36,6 +36,7 @@ enum { * CSV platform commands */ enum { + CSV_PLATFORM_INIT = 101, CSV_HGSC_CERT_IMPORT = 201, CSV_MAX, -- Gitee From 45d926c4903e2834522803adc83987b27985a7e4 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 3 Dec 2021 05:33:25 -0500 Subject: [PATCH 0381/2138] anolis: crypto: ccp: Implement CSV_PLATFORM_SHUTDOWN ioctl command ANBZ: #8571 The CSV_PLATFORM_SHUTDOWN command can be used by the platform owner to switch platform to PSTATE.UNINIT. The DOWNLOAD_FIRMWARE API can only performed when platform is in the PSTATE.UNINIT. In order to support DOWNLOAD_FIRMWARE at userspace, we need invoke PLATFORM_SHUTDOWN before that. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Shirong Hao Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2912 --- drivers/crypto/ccp/sev-dev.c | 3 +++ include/uapi/linux/psp-sev.h | 1 + 2 files changed, 4 insertions(+) diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index c8d98004c0da..5e1f994b00bf 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -1190,6 +1190,9 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) case CSV_PLATFORM_INIT: ret = __sev_platform_init_locked(&input.error); goto result_to_user; + case CSV_PLATFORM_SHUTDOWN: + ret = __sev_platform_shutdown_locked(&input.error); + goto result_to_user; case CSV_HGSC_CERT_IMPORT: ret = csv_ioctl_do_hgsc_import(&input); goto result_to_user; diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h index a82643c0d795..8ea91a7f9521 100644 --- a/include/uapi/linux/psp-sev.h +++ b/include/uapi/linux/psp-sev.h @@ -37,6 +37,7 @@ enum { */ enum { CSV_PLATFORM_INIT = 101, + CSV_PLATFORM_SHUTDOWN = 102, CSV_HGSC_CERT_IMPORT = 201, CSV_MAX, -- Gitee From 4b9bb4558a8669cb6c2b9305d16d6ae20d72c9fb Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 3 Dec 2021 05:58:23 -0500 Subject: [PATCH 0382/2138] anolis: crypto: ccp: Implement CSV_DOWNLOAD_FIRMWARE ioctl command ANBZ: #8571 The CSV_DOWNLOAD_FIRMWARE command can be used by the platform owner to updating CSV firmware. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Shirong Hao Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2912 --- drivers/crypto/ccp/sev-dev.c | 70 ++++++++++++++++++++++++++++++++++++ include/linux/psp-sev.h | 2 ++ include/uapi/linux/psp-sev.h | 12 +++++++ 3 files changed, 84 insertions(+) diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 5e1f994b00bf..58a8ae09f8c1 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -1115,6 +1115,73 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable) return ret; } +static int csv_ioctl_do_download_firmware(struct sev_issue_cmd *argp) +{ + struct sev_data_download_firmware *data = NULL; + struct csv_user_data_download_firmware input; + int ret, order; + struct page *p; + u64 data_size; + + /* Only support DOWNLOAD_FIRMWARE if build greater or equal 1667 */ + if (!csv_version_greater_or_equal(1667)) { + pr_err("DOWNLOAD_FIRMWARE not supported\n"); + return -EIO; + } + + if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) + return -EFAULT; + + if (!input.address) { + argp->error = SEV_RET_INVALID_ADDRESS; + return -EINVAL; + } + + if (!input.length || input.length > CSV_FW_MAX_SIZE) { + argp->error = SEV_RET_INVALID_LEN; + return -EINVAL; + } + + /* + * CSV FW expects the physical address given to it to be 32 + * byte aligned. Memory allocated has structure placed at the + * beginning followed by the firmware being passed to the CSV + * FW. Allocate enough memory for data structure + alignment + * padding + CSV FW. + */ + data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32); + + order = get_order(input.length + data_size); + p = alloc_pages(GFP_KERNEL, order); + if (!p) + return -ENOMEM; + + /* + * Copy firmware data to a kernel allocated contiguous + * memory region. + */ + data = page_address(p); + if (copy_from_user((void *)(page_address(p) + data_size), + (void *)input.address, input.length)) { + ret = -EFAULT; + goto err_free_page; + } + + data->address = __psp_pa(page_address(p) + data_size); + data->len = input.length; + + ret = __sev_do_cmd_locked(SEV_CMD_DOWNLOAD_FIRMWARE, data, &argp->error); + if (ret) + pr_err("Failed to update CSV firmware: %#x\n", argp->error); + else + pr_info("CSV firmware update successful\n"); + +err_free_page: + __free_pages(p, order); + + return ret; +} + static int csv_ioctl_do_hgsc_import(struct sev_issue_cmd *argp) { struct csv_user_data_hgsc_cert_import input; @@ -1193,6 +1260,9 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) case CSV_PLATFORM_SHUTDOWN: ret = __sev_platform_shutdown_locked(&input.error); goto result_to_user; + case CSV_DOWNLOAD_FIRMWARE: + ret = csv_ioctl_do_download_firmware(&input); + goto result_to_user; case CSV_HGSC_CERT_IMPORT: ret = csv_ioctl_do_hgsc_import(&input); goto result_to_user; diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 2b40efb57274..4d34d0f3d019 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -16,6 +16,8 @@ #define SEV_FW_BLOB_MAX_SIZE 0x4000 /* 16KB */ +#define CSV_FW_MAX_SIZE 0x80000 /* 512KB */ + /** * SEV platform state */ diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h index 8ea91a7f9521..07db804852a2 100644 --- a/include/uapi/linux/psp-sev.h +++ b/include/uapi/linux/psp-sev.h @@ -38,6 +38,7 @@ enum { enum { CSV_PLATFORM_INIT = 101, CSV_PLATFORM_SHUTDOWN = 102, + CSV_DOWNLOAD_FIRMWARE = 128, CSV_HGSC_CERT_IMPORT = 201, CSV_MAX, @@ -180,6 +181,17 @@ struct csv_user_data_hgsc_cert_import { __u32 hgsc_cert_len; /* In */ } __packed; +/** + * struct csv_user_data_download_firmware - DOWNLOAD_FIRMWARE command parameters + * + * @address: physical address of CSV firmware image + * @length: length of the CSV firmware image + */ +struct csv_user_data_download_firmware { + __u64 address; /* In */ + __u32 length; /* In */ +} __packed; + /** * struct sev_issue_cmd - SEV ioctl parameters * -- Gitee From 87303d5a4865e5c1a5d46329731138c799f176cb Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 14:42:49 +0800 Subject: [PATCH 0383/2138] anolis: crypto: ccp: Introduce init and free helpers to manage CSV RING_BUFFER queues ANBZ: #8572 There are up to two queues created in RING_BUFFER mode, each with two sub-queues. The sub-queues store the command pointer entries (written only by the x86) and status entries (written only by the CSV Firmware) respectively. The two queues are low priority queue (required) and high priority queue (optional) respectively. In this change, we introduce csv_ring_buffer_queue_init() to initialize CSV RING_BUFFER queues, and csv_ring_buffer_queue_free() to cleanup CSV RING_BUFFER queues. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- drivers/crypto/ccp/Makefile | 3 +- drivers/crypto/ccp/psp-ringbuf.c | 29 +++++++++++ drivers/crypto/ccp/psp-ringbuf.h | 31 ++++++++++++ drivers/crypto/ccp/sev-dev.c | 87 ++++++++++++++++++++++++++++++++ drivers/crypto/ccp/sev-dev.h | 4 ++ include/linux/psp-sev.h | 38 ++++++++++++++ 6 files changed, 191 insertions(+), 1 deletion(-) create mode 100644 drivers/crypto/ccp/psp-ringbuf.c create mode 100644 drivers/crypto/ccp/psp-ringbuf.h diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index aa0ba2d17e1e..82be0ac4a0b6 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -12,7 +12,8 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ sev-dev.o \ tee-dev.o \ platform-access.o \ - dbc.o + dbc.o \ + psp-ringbuf.o obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o ccp-crypto-objs := ccp-crypto-main.o \ diff --git a/drivers/crypto/ccp/psp-ringbuf.c b/drivers/crypto/ccp/psp-ringbuf.c new file mode 100644 index 000000000000..485c6da91ca9 --- /dev/null +++ b/drivers/crypto/ccp/psp-ringbuf.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON Platform Security Processor (PSP) interface + * + * Copyright (C) 2016-2023 Hygon Info Technologies Ltd. + * + * Author: Baoshun Fang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "psp-ringbuf.h" + +int csv_queue_init(struct csv_queue *queue, + void *buffer, unsigned int size, size_t esize) +{ + size /= esize; + + queue->head = 0; + queue->tail = 0; + queue->esize = esize; + queue->data = (u64)buffer; + queue->mask = size - 1; + queue->data_align = ALIGN(queue->data, CSV_RING_BUFFER_ALIGN); + + return 0; +} diff --git a/drivers/crypto/ccp/psp-ringbuf.h b/drivers/crypto/ccp/psp-ringbuf.h new file mode 100644 index 000000000000..cb6f1f7b5736 --- /dev/null +++ b/drivers/crypto/ccp/psp-ringbuf.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * HYGON Platform Security Processor (PSP) interface driver + * + * Copyright (C) 2016-2023 Hygon Info Technologies Ltd. + * + * Author: Baoshun Fang + */ + +#ifndef __PSP_RINGBUF_H__ +#define __PSP_RINGBUF_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int csv_queue_init(struct csv_queue *queue, + void *buffer, unsigned int size, size_t esize); + +#endif /* __PSP_RINGBUF_H__ */ diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 58a8ae09f8c1..94ad5adb4acf 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -1350,6 +1350,93 @@ int sev_guest_df_flush(int *error) } EXPORT_SYMBOL_GPL(sev_guest_df_flush); +int csv_ring_buffer_queue_free(void); + +static int __csv_ring_buffer_queue_init(struct csv_ringbuffer_queue *ring_buffer) +{ + int ret = 0; + void *cmd_ptr_buffer = NULL; + void *stat_val_buffer = NULL; + + memset((void *)ring_buffer, 0, sizeof(struct csv_ringbuffer_queue)); + + cmd_ptr_buffer = kzalloc(CSV_RING_BUFFER_LEN, GFP_KERNEL); + if (!cmd_ptr_buffer) + return -ENOMEM; + + csv_queue_init(&ring_buffer->cmd_ptr, cmd_ptr_buffer, + CSV_RING_BUFFER_LEN, CSV_RING_BUFFER_ESIZE); + + stat_val_buffer = kzalloc(CSV_RING_BUFFER_LEN, GFP_KERNEL); + if (!stat_val_buffer) { + ret = -ENOMEM; + goto free_cmdptr; + } + + csv_queue_init(&ring_buffer->stat_val, stat_val_buffer, + CSV_RING_BUFFER_LEN, CSV_RING_BUFFER_ESIZE); + return 0; + +free_cmdptr: + kfree(cmd_ptr_buffer); + + return ret; +} + +int csv_ring_buffer_queue_init(void) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + int i, ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + for (i = CSV_COMMAND_PRIORITY_HIGH; i < CSV_COMMAND_PRIORITY_NUM; i++) { + ret = __csv_ring_buffer_queue_init(&sev->ring_buffer[i]); + if (ret) + goto e_free; + } + + return 0; + +e_free: + csv_ring_buffer_queue_free(); + return ret; +} +EXPORT_SYMBOL_GPL(csv_ring_buffer_queue_init); + +int csv_ring_buffer_queue_free(void) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + struct csv_ringbuffer_queue *ring_buffer; + int i; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + for (i = 0; i < CSV_COMMAND_PRIORITY_NUM; i++) { + ring_buffer = &sev->ring_buffer[i]; + + if (ring_buffer->cmd_ptr.data) { + kfree((void *)ring_buffer->cmd_ptr.data); + ring_buffer->cmd_ptr.data = 0; + } + + if (ring_buffer->stat_val.data) { + kfree((void *)ring_buffer->stat_val.data); + ring_buffer->stat_val.data = 0; + } + } + return 0; +} +EXPORT_SYMBOL_GPL(csv_ring_buffer_queue_free); + static void sev_exit(struct kref *ref) { misc_deregister(&misc_dev->misc); diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h index 778c95155e74..372183b8c58f 100644 --- a/drivers/crypto/ccp/sev-dev.h +++ b/drivers/crypto/ccp/sev-dev.h @@ -25,6 +25,8 @@ #include #include +#include "psp-ringbuf.h" + #define SEV_CMDRESP_CMD GENMASK(26, 16) #define SEV_CMD_COMPLETE BIT(1) #define SEV_CMDRESP_IOC BIT(0) @@ -52,6 +54,8 @@ struct sev_device { u8 build; void *cmd_buf; + + struct csv_ringbuffer_queue ring_buffer[CSV_COMMAND_PRIORITY_NUM]; }; int sev_dev_init(struct psp_device *psp); diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 4d34d0f3d019..9656c4179581 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -88,6 +88,18 @@ enum csv_cmd { CSV_CMD_MAX, }; +/** + * Ring Buffer Mode regions: + * There are 4 regions and every region is a 4K area that must be 4K aligned. + * To accomplish this allocate an amount that is the size of area and the + * required alignment. + * The aligned address will be calculated from the returned address. + */ +#define CSV_RING_BUFFER_SIZE (32 * 1024) +#define CSV_RING_BUFFER_ALIGN (4 * 1024) +#define CSV_RING_BUFFER_LEN (CSV_RING_BUFFER_SIZE + CSV_RING_BUFFER_ALIGN) +#define CSV_RING_BUFFER_ESIZE 16 + /** * struct sev_data_init - INIT command parameters * @@ -546,6 +558,24 @@ struct csv_data_hgsc_cert_import { u32 hgsc_cert_len; /* In */ } __packed; +#define CSV_COMMAND_PRIORITY_HIGH 0 +#define CSV_COMMAND_PRIORITY_LOW 1 +#define CSV_COMMAND_PRIORITY_NUM 2 + +struct csv_queue { + u32 head; + u32 tail; + u32 mask; /* mask = (size - 1), inicates the elements max count */ + u32 esize; /* size of an element */ + u64 data; + u64 data_align; +} __packed; + +struct csv_ringbuffer_queue { + struct csv_queue cmd_ptr; + struct csv_queue stat_val; +} __packed; + #ifdef CONFIG_CRYPTO_DEV_SP_PSP /** @@ -662,6 +692,10 @@ int sev_guest_decommission(struct sev_data_decommission *data, int *error); void *psp_copy_user_blob(u64 uaddr, u32 len); +int csv_ring_buffer_queue_init(void); + +int csv_ring_buffer_queue_free(void); + #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ static inline int @@ -685,6 +719,10 @@ sev_issue_cmd_external_user(struct file *filep, unsigned int id, void *data, int static inline void *psp_copy_user_blob(u64 __user uaddr, u32 len) { return ERR_PTR(-EINVAL); } +static inline int csv_ring_buffer_queue_init(void) { return -ENODEV; } + +static inline int csv_ring_buffer_queue_free(void) { return -ENODEV; } + #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ #endif /* __PSP_SEV_H__ */ -- Gitee From 58d9c32a080007da10ccc12fde090274ec295490 Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 15:21:33 +0800 Subject: [PATCH 0384/2138] anolis: crypto: ccp: Add support for enqueue command pointers in CSV RING_BUFFER mode ANBZ: #8572 In CSV RING_BUFFER mode, X86 will enqueue command pointers to the sub-queue which stores the command pointers. The priority will be given through parameter. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- drivers/crypto/ccp/psp-ringbuf.c | 51 ++++++++++++++++++++++++++++++++ drivers/crypto/ccp/psp-ringbuf.h | 2 ++ drivers/crypto/ccp/sev-dev.c | 22 ++++++++++++++ include/linux/psp-sev.h | 12 ++++++++ 4 files changed, 87 insertions(+) diff --git a/drivers/crypto/ccp/psp-ringbuf.c b/drivers/crypto/ccp/psp-ringbuf.c index 485c6da91ca9..e2c236b71fec 100644 --- a/drivers/crypto/ccp/psp-ringbuf.c +++ b/drivers/crypto/ccp/psp-ringbuf.c @@ -13,6 +13,43 @@ #include "psp-ringbuf.h" +static void enqueue_data(struct csv_queue *queue, + const void *src, + unsigned int len, unsigned int off) +{ + unsigned int size = queue->mask + 1; + unsigned int esize = queue->esize; + unsigned int l; + void *data; + + if (esize != 1) { + off *= esize; + size *= esize; + len *= esize; + } + l = min(len, size - off); + + data = (void *)queue->data_align; + memcpy(data + off, src, l); + memcpy(data, src + l, len - l); + + /* + * Make sure that the data in the ring buffer is up to date before + * incrementing the queue->tail index counter. + */ + smp_wmb(); +} + +static unsigned int queue_avail_size(struct csv_queue *queue) +{ + /* + * According to the nature of unsigned Numbers, it always work + * well even though tail < head. Reserved 1 element to distinguish + * full and empty. + */ + return queue->mask - (queue->tail - queue->head); +} + int csv_queue_init(struct csv_queue *queue, void *buffer, unsigned int size, size_t esize) { @@ -27,3 +64,17 @@ int csv_queue_init(struct csv_queue *queue, return 0; } + +unsigned int csv_enqueue_cmd(struct csv_queue *queue, + const void *buf, unsigned int len) +{ + unsigned int size; + + size = queue_avail_size(queue); + if (len > size) + len = size; + + enqueue_data(queue, buf, len, queue->tail); + queue->tail += len; + return len; +} diff --git a/drivers/crypto/ccp/psp-ringbuf.h b/drivers/crypto/ccp/psp-ringbuf.h index cb6f1f7b5736..416caefb06a2 100644 --- a/drivers/crypto/ccp/psp-ringbuf.h +++ b/drivers/crypto/ccp/psp-ringbuf.h @@ -27,5 +27,7 @@ int csv_queue_init(struct csv_queue *queue, void *buffer, unsigned int size, size_t esize); +unsigned int csv_enqueue_cmd(struct csv_queue *queue, + const void *buf, unsigned int len); #endif /* __PSP_RINGBUF_H__ */ diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 94ad5adb4acf..f62ac6d823e2 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -1383,6 +1383,28 @@ static int __csv_ring_buffer_queue_init(struct csv_ringbuffer_queue *ring_buffer return ret; } +int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + struct csv_cmdptr_entry cmdptr = { }; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + cmdptr.cmd_buf_ptr = __psp_pa(data); + cmdptr.cmd_id = cmd; + cmdptr.cmd_flags = flags; + + if (csv_enqueue_cmd(&sev->ring_buffer[prio].cmd_ptr, &cmdptr, 1) != 1) + return -EFAULT; + + return 0; +} +EXPORT_SYMBOL_GPL(csv_fill_cmd_queue); + int csv_ring_buffer_queue_init(void) { struct psp_device *psp = psp_master; diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 9656c4179581..6083a68dcdac 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -562,6 +562,13 @@ struct csv_data_hgsc_cert_import { #define CSV_COMMAND_PRIORITY_LOW 1 #define CSV_COMMAND_PRIORITY_NUM 2 +struct csv_cmdptr_entry { + u16 cmd_id; + u16 cmd_flags; + u32 sw_data; + u64 cmd_buf_ptr; +} __packed; + struct csv_queue { u32 head; u32 tail; @@ -696,6 +703,8 @@ int csv_ring_buffer_queue_init(void); int csv_ring_buffer_queue_free(void); +int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags); + #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ static inline int @@ -723,6 +732,9 @@ static inline int csv_ring_buffer_queue_init(void) { return -ENODEV; } static inline int csv_ring_buffer_queue_free(void) { return -ENODEV; } +static inline +int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) { return -ENODEV; } + #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ #endif /* __PSP_SEV_H__ */ -- Gitee From f02a74c213d7d9fced36edd9cb9881556ee2b980 Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 15:35:47 +0800 Subject: [PATCH 0385/2138] anolis: crypto: ccp: Add support for dequeue status in CSV RING_BUFFER mode ANBZ: #8572 In CSV RING_BUFFER mode, X86 will dequeue status entries written by PSP after the corresponding command has been handled. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- drivers/crypto/ccp/psp-ringbuf.c | 39 ++++++++++++++++++++++++++++++++ drivers/crypto/ccp/psp-ringbuf.h | 2 ++ drivers/crypto/ccp/sev-dev.c | 32 ++++++++++++++++++++++++++ include/linux/psp-sev.h | 11 +++++++++ 4 files changed, 84 insertions(+) diff --git a/drivers/crypto/ccp/psp-ringbuf.c b/drivers/crypto/ccp/psp-ringbuf.c index e2c236b71fec..3b2f461b672c 100644 --- a/drivers/crypto/ccp/psp-ringbuf.c +++ b/drivers/crypto/ccp/psp-ringbuf.c @@ -78,3 +78,42 @@ unsigned int csv_enqueue_cmd(struct csv_queue *queue, queue->tail += len; return len; } + +static void dequeue_data(struct csv_queue *queue, + void *dst, unsigned int len, unsigned int off) +{ + unsigned int size = queue->mask + 1; + unsigned int esize = queue->esize; + unsigned int l; + + off &= queue->mask; + if (esize != 1) { + off *= esize; + size *= esize; + len *= esize; + } + l = min(len, size - off); + + memcpy(dst, (void *)(queue->data + off), l); + memcpy((void *)((uintptr_t)dst + l), (void *)queue->data, len - l); + + /* + * Make sure that the data is copied before incrementing the + * queue->tail index counter. + */ + smp_wmb(); +} + +unsigned int csv_dequeue_stat(struct csv_queue *queue, + void *buf, unsigned int len) +{ + unsigned int size; + + size = queue->tail - queue->head; + if (len > size) + len = size; + + dequeue_data(queue, buf, len, queue->head); + queue->head += len; + return len; +} diff --git a/drivers/crypto/ccp/psp-ringbuf.h b/drivers/crypto/ccp/psp-ringbuf.h index 416caefb06a2..50e014deb5ce 100644 --- a/drivers/crypto/ccp/psp-ringbuf.h +++ b/drivers/crypto/ccp/psp-ringbuf.h @@ -29,5 +29,7 @@ int csv_queue_init(struct csv_queue *queue, void *buffer, unsigned int size, size_t esize); unsigned int csv_enqueue_cmd(struct csv_queue *queue, const void *buf, unsigned int len); +unsigned int csv_dequeue_stat(struct csv_queue *queue, + void *buf, unsigned int len); #endif /* __PSP_RINGBUF_H__ */ diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index f62ac6d823e2..81770ee829da 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -1405,6 +1405,38 @@ int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) } EXPORT_SYMBOL_GPL(csv_fill_cmd_queue); +int csv_check_stat_queue_status(int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + unsigned int len; + int prio; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + for (prio = CSV_COMMAND_PRIORITY_HIGH; + prio < CSV_COMMAND_PRIORITY_NUM; prio++) { + do { + struct csv_statval_entry statval; + + len = csv_dequeue_stat(&sev->ring_buffer[prio].stat_val, + &statval, 1); + if (len) { + if (statval.status != 0) { + *psp_ret = statval.status; + return -EFAULT; + } + } + } while (len); + } + + return 0; +} +EXPORT_SYMBOL_GPL(csv_check_stat_queue_status); + int csv_ring_buffer_queue_init(void) { struct psp_device *psp = psp_master; diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 6083a68dcdac..84ae53a6f354 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -569,6 +569,13 @@ struct csv_cmdptr_entry { u64 cmd_buf_ptr; } __packed; +struct csv_statval_entry { + u16 status; + u16 reserved0; + u32 reserved1; + u64 reserved2; +} __packed; + struct csv_queue { u32 head; u32 tail; @@ -705,6 +712,8 @@ int csv_ring_buffer_queue_free(void); int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags); +int csv_check_stat_queue_status(int *psp_ret); + #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ static inline int @@ -735,6 +744,8 @@ static inline int csv_ring_buffer_queue_free(void) { return -ENODEV; } static inline int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) { return -ENODEV; } +static inline int csv_check_stat_queue_status(int *psp_ret) { return -ENODEV; } + #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ #endif /* __PSP_SEV_H__ */ -- Gitee From c2b708ff7cbe11e26ea0e06a4b777433a1e81102 Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 17:03:54 +0800 Subject: [PATCH 0386/2138] anolis: crypto: ccp: Add support to switch to CSV RING_BUFFER mode ANBZ: #8572 Invoke RING_BUFFER command will switch CSV firmware to RING_BUFFER mode. When CSV firmware stays in RING_BUFFER mode, it will fetch commands from CSV RING_BUFFER queues which are filled by X86. The CSV firmware will exit RING_BUFFER mode after SHUTDOWN command is completed. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- drivers/crypto/ccp/sev-dev.c | 51 ++++++++++++++++++++++++++++++++++++ include/linux/psp-sev.h | 40 ++++++++++++++++++++++++++++ 2 files changed, 91 insertions(+) diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 81770ee829da..e8ce38d04163 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -65,6 +65,8 @@ MODULE_FIRMWARE("amd/amd_sev_fam19h_model1xh.sbin"); /* 4th gen EPYC */ static bool psp_dead; static int psp_timeout; +static int csv_comm_mode = CSV_COMM_MAILBOX_ON; + /* Trusted Memory Region (TMR): * The TMR is a 1MB area that must be 1MB aligned. Use the page allocator * to allocate the memory, which will return aligned memory for the specified @@ -144,6 +146,8 @@ static int sev_cmd_buffer_len(int cmd) switch (cmd) { case CSV_CMD_HGSC_CERT_IMPORT: return sizeof(struct csv_data_hgsc_cert_import); + case CSV_CMD_RING_BUFFER: + return sizeof(struct csv_data_ring_buffer); default: break; } @@ -411,6 +415,48 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) return ret; } +static int __csv_ring_buffer_enter_locked(int *error) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + struct csv_data_ring_buffer *data; + struct csv_ringbuffer_queue *low_queue; + struct csv_ringbuffer_queue *hi_queue; + int ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + if (csv_comm_mode == CSV_COMM_RINGBUFFER_ON) + return -EEXIST; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + low_queue = &sev->ring_buffer[CSV_COMMAND_PRIORITY_LOW]; + hi_queue = &sev->ring_buffer[CSV_COMMAND_PRIORITY_HIGH]; + + data->queue_lo_cmdptr_address = __psp_pa(low_queue->cmd_ptr.data_align); + data->queue_lo_statval_address = __psp_pa(low_queue->stat_val.data_align); + data->queue_hi_cmdptr_address = __psp_pa(hi_queue->cmd_ptr.data_align); + data->queue_hi_statval_address = __psp_pa(hi_queue->stat_val.data_align); + data->queue_lo_size = 1; + data->queue_hi_size = 1; + data->int_on_empty = 1; + + ret = __sev_do_cmd_locked(CSV_CMD_RING_BUFFER, data, error); + if (!ret) { + iowrite32(0, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + csv_comm_mode = CSV_COMM_RINGBUFFER_ON; + } + + kfree(data); + return ret; +} + static int sev_do_cmd(int cmd, void *data, int *psp_ret) { int rc; @@ -562,6 +608,11 @@ static int __sev_platform_shutdown_locked(int *error) if (ret) return ret; + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + csv_comm_mode = CSV_COMM_MAILBOX_ON; + csv_ring_buffer_queue_free(); + } + sev->state = SEV_STATE_UNINIT; dev_dbg(sev->dev, "SEV firmware shutdown\n"); diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 84ae53a6f354..f9827593d060 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -83,7 +83,18 @@ enum sev_cmd { SEV_CMD_MAX, }; +/** + * CSV communication state + */ +enum csv_comm_state { + CSV_COMM_MAILBOX_ON = 0x0, + CSV_COMM_RINGBUFFER_ON = 0x1, + + CSV_COMM_MAX +}; + enum csv_cmd { + CSV_CMD_RING_BUFFER = 0x00F, CSV_CMD_HGSC_CERT_IMPORT = 0x300, CSV_CMD_MAX, }; @@ -590,6 +601,35 @@ struct csv_ringbuffer_queue { struct csv_queue stat_val; } __packed; +/** + * struct csv_data_ring_buffer - RING_BUFFER command parameters + * + * @queue_lo_cmdptr_address: physical address of the region to be used for + * low priority queue's CmdPtr ring buffer + * @queue_lo_statval_address: physical address of the region to be used for + * low priority queue's StatVal ring buffer + * @queue_hi_cmdptr_address: physical address of the region to be used for + * high priority queue's CmdPtr ring buffer + * @queue_hi_statval_address: physical address of the region to be used for + * high priority queue's StatVal ring buffer + * @queue_lo_size: size of the low priority queue in 4K pages. Must be 1 + * @queue_hi_size: size of the high priority queue in 4K pages. Must be 1 + * @queue_lo_threshold: queue(low) size, below which an interrupt may be generated + * @queue_hi_threshold: queue(high) size, below which an interrupt may be generated + * @int_on_empty: unconditionally interrupt when both queues are found empty + */ +struct csv_data_ring_buffer { + u64 queue_lo_cmdptr_address; /* In */ + u64 queue_lo_statval_address; /* In */ + u64 queue_hi_cmdptr_address; /* In */ + u64 queue_hi_statval_address; /* In */ + u8 queue_lo_size; /* In */ + u8 queue_hi_size; /* In */ + u16 queue_lo_threshold; /* In */ + u16 queue_hi_threshold; /* In */ + u16 int_on_empty; /* In */ +} __packed; + #ifdef CONFIG_CRYPTO_DEV_SP_PSP /** -- Gitee From ed8721c31727acdb181b71bfa8b6801a6197cd9a Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 17:36:19 +0800 Subject: [PATCH 0387/2138] anolis: crypto: ccp: Add support for issue commands in CSV RING_BUFFER mode ANBZ: #8572 The CSV firmware stays in Mailbox mode by default. Upon successfully switched to CSV RING_BUFFER mode, the semantics of the 3 registers used for communicate between X86 and CSV firmware will be changed: - The CmdResp register becomes the RBCtl register. It is only ever written by X86. - The CmdBufAddr_Hi register becomes the RBTail register. It is only ever written by X86. - The CmdBufAddr_Lo register becomes the RBHead register. It should never be written by X86; the PSP will update it. The CSV firmware will exit CSV RING_BUFFER mode when it read invalid value from the RBCtl register. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- drivers/crypto/ccp/psp-dev.h | 13 ++++ drivers/crypto/ccp/sev-dev.c | 125 ++++++++++++++++++++++++++++++++++- include/linux/psp-sev.h | 9 +++ 3 files changed, 146 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h index 8a4de69399c5..45b6e17d5770 100644 --- a/drivers/crypto/ccp/psp-dev.h +++ b/drivers/crypto/ccp/psp-dev.h @@ -17,6 +17,19 @@ #include "sp-dev.h" +#define PSP_RBCTL_X86_WRITES BIT(31) +#define PSP_RBCTL_RBMODE_ACT BIT(30) +#define PSP_RBCTL_CLR_INTSTAT BIT(29) +#define PSP_RBTAIL_QHI_TAIL_SHIFT 16 +#define PSP_RBTAIL_QHI_TAIL_MASK 0x7FF0000 +#define PSP_RBTAIL_QLO_TAIL_MASK 0x7FF + +#define PSP_RBHEAD_QHI_HEAD_SHIFT 16 +#define PSP_RBHEAD_QHI_HEAD_MASK 0x7FF0000 +#define PSP_RBHEAD_QLO_HEAD_MASK 0x7FF + +#define PSP_RBHEAD_QPAUSE_INT_STAT BIT(30) + #define MAX_PSP_NAME_LEN 16 extern struct psp_device *psp_master; diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index e8ce38d04163..b38e2bf88b00 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -119,7 +119,9 @@ static void sev_irq_handler(int irq, void *data, unsigned int status) /* Check if it is SEV command completion: */ reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg); - if (FIELD_GET(PSP_CMDRESP_RESP, reg)) { + if (FIELD_GET(PSP_CMDRESP_RESP, reg) || + ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && + (csv_comm_mode == CSV_COMM_RINGBUFFER_ON))) { sev->int_rcvd = 1; wake_up(&sev->int_queue); } @@ -140,6 +142,22 @@ static int sev_wait_cmd_ioc(struct sev_device *sev, return 0; } +static int csv_wait_cmd_ioc_ring_buffer(struct sev_device *sev, + unsigned int *reg, + unsigned int timeout) +{ + int ret; + + ret = wait_event_timeout(sev->int_queue, + sev->int_rcvd, timeout * HZ); + if (!ret) + return -ETIMEDOUT; + + *reg = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + + return 0; +} + static int sev_cmd_buffer_len(int cmd) { if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { @@ -457,6 +475,102 @@ static int __csv_ring_buffer_enter_locked(int *error) return ret; } +static int csv_get_cmd_status(struct sev_device *sev, int prio, int index) +{ + struct csv_queue *queue = &sev->ring_buffer[prio].stat_val; + struct csv_statval_entry *statval = (struct csv_statval_entry *)queue->data; + + return statval[index].status; +} + +static int __csv_do_ringbuf_cmds_locked(int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + unsigned int rb_tail; + unsigned int rb_ctl; + int last_cmd_index; + unsigned int reg, ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + if (psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + /* update rb tail */ + rb_tail = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + rb_tail &= (~PSP_RBTAIL_QHI_TAIL_MASK); + rb_tail |= (sev->ring_buffer[CSV_COMMAND_PRIORITY_HIGH].cmd_ptr.tail + << PSP_RBTAIL_QHI_TAIL_SHIFT); + rb_tail &= (~PSP_RBTAIL_QLO_TAIL_MASK); + rb_tail |= sev->ring_buffer[CSV_COMMAND_PRIORITY_LOW].cmd_ptr.tail; + iowrite32(rb_tail, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + /* update rb ctl to trigger psp irq */ + sev->int_rcvd = 0; + + /* PSP response to x86 only when all queue is empty or error happends */ + rb_ctl = PSP_RBCTL_X86_WRITES | + PSP_RBCTL_RBMODE_ACT | + PSP_RBCTL_CLR_INTSTAT; + iowrite32(rb_ctl, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for all commands in ring buffer completed */ + ret = csv_wait_cmd_ioc_ring_buffer(sev, ®, psp_timeout * 10); + if (ret) { + if (psp_ret) + *psp_ret = 0; + dev_err(sev->dev, "csv ringbuffer mode command timed out, disabling PSP\n"); + psp_dead = true; + + return ret; + } + + /* cmd error happends */ + if (reg & PSP_RBHEAD_QPAUSE_INT_STAT) + ret = -EFAULT; + + if (psp_ret) { + last_cmd_index = (reg & PSP_RBHEAD_QHI_HEAD_MASK) + >> PSP_RBHEAD_QHI_HEAD_SHIFT; + *psp_ret = csv_get_cmd_status(sev, CSV_COMMAND_PRIORITY_HIGH, + last_cmd_index); + if (*psp_ret == 0) { + last_cmd_index = reg & PSP_RBHEAD_QLO_HEAD_MASK; + *psp_ret = csv_get_cmd_status(sev, + CSV_COMMAND_PRIORITY_LOW, last_cmd_index); + } + } + + return ret; +} + +static int csv_do_ringbuf_cmds(int *psp_ret) +{ + struct sev_user_data_status data; + int rc; + + mutex_lock(&sev_cmd_mutex); + + rc = __csv_ring_buffer_enter_locked(psp_ret); + if (rc) + goto cmd_unlock; + + rc = __csv_do_ringbuf_cmds_locked(psp_ret); + + /* exit ringbuf mode by send CMD in mailbox mode */ + __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, NULL); + csv_comm_mode = CSV_COMM_MAILBOX_ON; + +cmd_unlock: + mutex_unlock(&sev_cmd_mutex); + + return rc; +} + static int sev_do_cmd(int cmd, void *data, int *psp_ret) { int rc; @@ -1691,6 +1805,15 @@ int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd, } EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user); +int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret) +{ + if (!filep || filep->f_op != &sev_fops) + return -EBADF; + + return csv_do_ringbuf_cmds(psp_ret); +} +EXPORT_SYMBOL_GPL(csv_issue_ringbuf_cmds_external_user); + void sev_pci_init(void) { struct sev_device *sev = psp_master->sev_data; diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index f9827593d060..05e22a17fcb5 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -754,6 +754,12 @@ int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags); int csv_check_stat_queue_status(int *psp_ret); +/** + * csv_issue_ringbuf_cmds_external_user - issue CSV commands into a ring + * buffer. + */ +int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret); + #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ static inline int @@ -786,6 +792,9 @@ int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) { return - static inline int csv_check_stat_queue_status(int *psp_ret) { return -ENODEV; } +static inline int +csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret) { return -ENODEV; } + #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ #endif /* __PSP_SEV_H__ */ -- Gitee From f409737a3a3a1339a2f2214a67e863fdccbdea8b Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 17:51:55 +0800 Subject: [PATCH 0388/2138] anolis: KVM: SVM: Add KVM_CSV_COMMAND_BATCH command for applying CSV RING_BUFFER mode ANBZ: #8572 The API KVM_CSV_COMMAD_BATCH receives data of structure kvm_csv_command_batch which embedded a link list of CSV command requests from userspace. It will do some preparation works to ensure data available for CSV RING_BUFFER mode, and then issues RING_BUFFER command. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- arch/x86/include/asm/svm.h | 20 +++++ arch/x86/kvm/svm/sev.c | 176 +++++++++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 14 +++ 3 files changed, 210 insertions(+) diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 3ac0ffc4f3e2..24b6a7e60f33 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -680,4 +680,24 @@ DEFINE_GHCB_ACCESSORS(sw_exit_info_2) DEFINE_GHCB_ACCESSORS(sw_scratch) DEFINE_GHCB_ACCESSORS(xcr0) +/* same to the ring buffer max num */ +#define SVM_RING_BUFFER_MAX 4094 + +struct csv_ringbuf_info_item { + struct page **pages; + uintptr_t hdr_vaddr; + uintptr_t trans_vaddr; + uintptr_t data_vaddr; + uintptr_t trans_uaddr; + uintptr_t hdr_uaddr; + unsigned long trans_len; + unsigned long hdr_len; + unsigned long n; +}; + +struct csv_ringbuf_infos { + struct csv_ringbuf_info_item *item[SVM_RING_BUFFER_MAX]; + int num; +}; + #endif diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 06a781d80a2f..ab08d3b14fc6 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -75,6 +75,8 @@ static unsigned int nr_asids; static unsigned long *sev_asid_bitmap; static unsigned long *sev_reclaim_asid_bitmap; +static DEFINE_MUTEX(csv_cmd_batch_mutex); + static const char sev_vm_mnonce[] = "VM_ATTESTATION"; struct enc_region { @@ -327,6 +329,28 @@ static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error) return __sev_issue_cmd(sev->fd, id, data, error); } +static int __csv_issue_ringbuf_cmds(int fd, int *psp_ret) +{ + struct fd f; + int ret; + + f = fdget(fd); + if (!f.file) + return -EBADF; + + ret = csv_issue_ringbuf_cmds_external_user(f.file, psp_ret); + + fdput(f); + return ret; +} + +static int csv_issue_ringbuf_cmds(struct kvm *kvm, int *psp_ret) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + + return __csv_issue_ringbuf_cmds(sev->fd, psp_ret); +} + static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; @@ -1869,6 +1893,8 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd) return ret; } +static int csv_command_batch(struct kvm *kvm, struct kvm_sev_cmd *argp); + int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -1953,6 +1979,14 @@ int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp) case KVM_SEV_RECEIVE_FINISH: r = sev_receive_finish(kvm, &sev_cmd); break; + case KVM_CSV_COMMAND_BATCH: + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + mutex_lock(&csv_cmd_batch_mutex); + r = csv_command_batch(kvm, &sev_cmd); + mutex_unlock(&csv_cmd_batch_mutex); + break; + } + fallthrough; default: r = -EINVAL; goto out; @@ -3251,3 +3285,145 @@ int sev_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len) sev_unpin_memory(kvm, pages, n); return ret; } + +static int csv_ringbuf_infos_free(struct kvm *kvm, + struct csv_ringbuf_infos *ringbuf_infos) +{ + int i; + + for (i = 0; i < ringbuf_infos->num; i++) { + struct csv_ringbuf_info_item *item = ringbuf_infos->item[i]; + + if (item) { + if (item->data_vaddr) + kfree((void *)item->data_vaddr); + + if (item->hdr_vaddr) + kfree((void *)item->hdr_vaddr); + + if (item->pages) + sev_unpin_memory(kvm, item->pages, item->n); + + kfree(item); + + ringbuf_infos->item[i] = NULL; + } + } + + return 0; +} + +typedef int (*csv_ringbuf_input_fn)(struct kvm *kvm, int prio, + uintptr_t data_ptr, + struct csv_ringbuf_infos *ringbuf_infos); +typedef int (*csv_ringbuf_output_fn)(struct kvm *kvm, + struct csv_ringbuf_infos *ringbuf_infos); + +static int get_cmd_helpers(__u32 cmd, + csv_ringbuf_input_fn *to_ringbuf_fn, + csv_ringbuf_output_fn *to_user_fn) +{ + int ret = 0; + + /* copy commands to ring buffer*/ + switch (cmd) { + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int csv_command_batch(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + int ret; + struct kvm_csv_command_batch params; + uintptr_t node_addr; + struct csv_ringbuf_infos *ringbuf_infos; + csv_ringbuf_input_fn csv_cmd_to_ringbuf_fn = NULL; + csv_ringbuf_output_fn csv_copy_to_user_fn = NULL; + int prio = CSV_COMMAND_PRIORITY_HIGH; + + if (!sev_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(struct kvm_csv_command_batch))) + return -EFAULT; + + /* return directly if node list is NULL */ + if (!params.csv_batch_list_uaddr) + return 0; + + /* ring buffer init */ + if (csv_ring_buffer_queue_init()) + return -EINVAL; + + if (get_cmd_helpers(params.command_id, + &csv_cmd_to_ringbuf_fn, &csv_copy_to_user_fn)) { + ret = -EINVAL; + goto err_free_ring_buffer; + } + + ringbuf_infos = kzalloc(sizeof(*ringbuf_infos), GFP_KERNEL); + if (!ringbuf_infos) { + ret = -ENOMEM; + goto err_free_ring_buffer; + } + + node_addr = (uintptr_t)params.csv_batch_list_uaddr; + while (node_addr) { + struct kvm_csv_batch_list_node node; + + if (copy_from_user(&node, (void __user *)node_addr, + sizeof(struct kvm_csv_batch_list_node))) { + ret = -EFAULT; + goto err_free_ring_buffer_infos_items; + } + + if (ringbuf_infos->num > SVM_RING_BUFFER_MAX) { + pr_err("%s: ring num is too large:%d, cmd:0x%x\n", + __func__, ringbuf_infos->num, params.command_id); + + ret = -EINVAL; + goto err_free_ring_buffer_infos_items; + } + + if (csv_cmd_to_ringbuf_fn(kvm, prio, + (uintptr_t)node.cmd_data_addr, + ringbuf_infos)) { + ret = -EFAULT; + goto err_free_ring_buffer_infos_items; + } + + /* 1st half set to HIGH queue, 2nd half set to LOW queue */ + if (ringbuf_infos->num == SVM_RING_BUFFER_MAX / 2) + prio = CSV_COMMAND_PRIORITY_LOW; + + node_addr = node.next_cmd_addr; + } + + /* ring buffer process */ + ret = csv_issue_ringbuf_cmds(kvm, &argp->error); + if (ret) + goto err_free_ring_buffer_infos_items; + + ret = csv_check_stat_queue_status(&argp->error); + if (ret) + goto err_free_ring_buffer_infos_items; + + if (csv_copy_to_user_fn && csv_copy_to_user_fn(kvm, ringbuf_infos)) { + ret = -EFAULT; + goto err_free_ring_buffer_infos_items; + } + +err_free_ring_buffer_infos_items: + csv_ringbuf_infos_free(kvm, ringbuf_infos); + kfree(ringbuf_infos); + +err_free_ring_buffer: + csv_ring_buffer_queue_free(); + + return ret; +} diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 2eea9dd73c64..79a0007c6e0f 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1932,6 +1932,9 @@ enum sev_cmd_id { /* Guest Migration Extension */ KVM_SEV_SEND_CANCEL, + /* Hygon CSV batch command */ + KVM_CSV_COMMAND_BATCH = 0x18, + KVM_SEV_NR_MAX, }; @@ -2028,6 +2031,17 @@ struct kvm_sev_receive_update_data { __u32 trans_len; }; +struct kvm_csv_batch_list_node { + __u64 cmd_data_addr; + __u64 addr; + __u64 next_cmd_addr; +}; + +struct kvm_csv_command_batch { + __u32 command_id; + __u64 csv_batch_list_uaddr; +}; + #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) #define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) #define KVM_DEV_ASSIGN_MASK_INTX (1 << 2) -- Gitee From 420e130a6f939be57fa4709e4e7344651f64dfff Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 18:22:04 +0800 Subject: [PATCH 0389/2138] anolis: KVM: SVM: Prepare memory pool to allocate buffers for KVM_CSV_COMMAND_BATCH ANBZ: #8572 In the upcoming patches, many buffers need to be allocated in KVM_CSV_COMMAND_BATCH code paths. To avoid memory allocation failures, directly allocate a memory pool in sev_hardware_setup() and free the memory pool in sev_hardware_teardown(). When KVM_CSV_COMMAND_BATCH handling a batch of SEND_UPDATE_DATA/RECEIVE_UPDATE_DATA commands, it will allocate trans buffers from the memory pool. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- arch/x86/kvm/svm/sev.c | 102 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 102 insertions(+) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index ab08d3b14fc6..2135ec3a9394 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -79,6 +79,9 @@ static DEFINE_MUTEX(csv_cmd_batch_mutex); static const char sev_vm_mnonce[] = "VM_ATTESTATION"; +static int alloc_trans_mempool(void); +static void free_trans_mempool(void); + struct enc_region { struct list_head list; unsigned long npages; @@ -2285,6 +2288,16 @@ void __init sev_hardware_setup(void) goto out; } + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + if (alloc_trans_mempool()) { + bitmap_free(sev_asid_bitmap); + sev_asid_bitmap = NULL; + bitmap_free(sev_reclaim_asid_bitmap); + sev_reclaim_asid_bitmap = NULL; + goto out; + } + } + if (min_sev_asid <= max_sev_asid) { sev_asid_count = max_sev_asid - min_sev_asid + 1; WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count)); @@ -2352,6 +2365,9 @@ void sev_hardware_unsetup(void) /* No need to take sev_bitmap_lock, all VMs have been destroyed. */ sev_flush_asids(1, max_sev_asid); + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + free_trans_mempool(); + bitmap_free(sev_asid_bitmap); bitmap_free(sev_reclaim_asid_bitmap); @@ -3286,6 +3302,91 @@ int sev_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len) return ret; } +/*--1024--1023--1024--1023--*/ +#define TRANS_MEMPOOL_1ST_BLOCK_OFFSET 0 +#define TRANS_MEMPOOL_2ND_BLOCK_OFFSET (1024 << PAGE_SHIFT) +#define TRANS_MEMPOOL_3RD_BLOCK_OFFSET (2047 << PAGE_SHIFT) +#define TRANS_MEMPOOL_4TH_BLOCK_OFFSET (3071 << PAGE_SHIFT) +#define TRANS_MEMPOOL_BLOCKS_MAX_OFFSET (4094 << PAGE_SHIFT) +#define TRANS_MEMPOOL_BLOCK_NUM 4 +#define TRANS_MEMPOOL_BLOCK_SIZE (1024 * PAGE_SIZE) + +static size_t g_mempool_offset; +void *g_trans_mempool[TRANS_MEMPOOL_BLOCK_NUM] = { 0, }; + +static void reset_mempool_offset(void) +{ + g_mempool_offset = 0; +} + +static int alloc_trans_mempool(void) +{ + int i; + + for (i = 0; i < TRANS_MEMPOOL_BLOCK_NUM; i++) { + WARN_ONCE(g_trans_mempool[i], + "CSV: g_trans_mempool[%d] was tainted\n", i); + + g_trans_mempool[i] = kzalloc(TRANS_MEMPOOL_BLOCK_SIZE, GFP_KERNEL); + if (!g_trans_mempool[i]) + goto free_trans_mempool; + } + + g_mempool_offset = 0; + return 0; + +free_trans_mempool: + for (i = 0; i < TRANS_MEMPOOL_BLOCK_NUM; i++) { + kfree(g_trans_mempool[i]); + g_trans_mempool[i] = NULL; + } + + return -ENOMEM; +} + +static void free_trans_mempool(void) +{ + int i; + + for (i = 0; i < TRANS_MEMPOOL_BLOCK_NUM; i++) { + kfree(g_trans_mempool[i]); + g_trans_mempool[i] = NULL; + } + + g_mempool_offset = 0; +} + +static void __maybe_unused *get_trans_data_from_mempool(size_t size) +{ + void *trans = NULL; + char *trans_data = NULL; + int i; + size_t offset; + + if (g_mempool_offset < TRANS_MEMPOOL_2ND_BLOCK_OFFSET) { + i = 0; + offset = g_mempool_offset - TRANS_MEMPOOL_1ST_BLOCK_OFFSET; + } else if (g_mempool_offset < TRANS_MEMPOOL_3RD_BLOCK_OFFSET) { + i = 1; + offset = g_mempool_offset - TRANS_MEMPOOL_2ND_BLOCK_OFFSET; + } else if (g_mempool_offset < TRANS_MEMPOOL_4TH_BLOCK_OFFSET) { + i = 2; + offset = g_mempool_offset - TRANS_MEMPOOL_3RD_BLOCK_OFFSET; + } else if (g_mempool_offset < TRANS_MEMPOOL_BLOCKS_MAX_OFFSET) { + i = 3; + offset = g_mempool_offset - TRANS_MEMPOOL_4TH_BLOCK_OFFSET; + } else { + pr_err("CSV: mempool is full (offset: %lu)\n", g_mempool_offset); + return NULL; + } + + trans_data = (char *)g_trans_mempool[i]; + trans = &trans_data[offset]; + g_mempool_offset += size; + + return trans; +} + static int csv_ringbuf_infos_free(struct kvm *kvm, struct csv_ringbuf_infos *ringbuf_infos) { @@ -3421,6 +3522,7 @@ static int csv_command_batch(struct kvm *kvm, struct kvm_sev_cmd *argp) err_free_ring_buffer_infos_items: csv_ringbuf_infos_free(kvm, ringbuf_infos); kfree(ringbuf_infos); + reset_mempool_offset(); err_free_ring_buffer: csv_ring_buffer_queue_free(); -- Gitee From 6ea6dfc11f97f757fb16792f78baefc17e1abe2d Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Sun, 1 Aug 2021 13:38:41 +0800 Subject: [PATCH 0390/2138] anolis: KVM: SVM: Add SEND_UPDATE_DATA command helper to support KVM_CSV_COMMAND_BATCH ANBZ: #8572 When KVM_CSV_COMMAND_BATCH handling a batch of SEND_UPDATE_DATA commands, it need execute 3 steps: 1. Enqueue each SEND_UPDATE_DATA command data to CSV RING_BUFFER queues (as input of RING_BUFFER command) 2. Issue RING_BUFFER command 3. Copy the output of RING_BUFFER command to userspace In this change, we add sev_send_update_data_to_ringbuf() to prepare input required by RING_BUFFER command as dictated in step 1, and add sev_send_update_data_copy_to_user() to copy output userspace as dictated in step 3. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- arch/x86/kvm/svm/sev.c | 143 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 143 insertions(+) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 2135ec3a9394..aa2310aad56a 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -3414,6 +3414,145 @@ static int csv_ringbuf_infos_free(struct kvm *kvm, return 0; } +static int +sev_send_update_data_to_ringbuf(struct kvm *kvm, + int prio, + uintptr_t data_ptr, + struct csv_ringbuf_infos *ringbuf_infos) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_update_data *data; + struct kvm_sev_send_update_data params; + struct csv_ringbuf_info_item *item; + void *hdr, *trans_data; + struct page **guest_page; + unsigned long n; + int ret, offset; + + if (!sev_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)data_ptr, + sizeof(struct kvm_sev_send_update_data))) + return -EFAULT; + + /* + * userspace shouldn't query either header or trans length in ringbuf + * mode. + */ + if (!params.trans_len || !params.hdr_len) + return -EINVAL; + + if (!params.trans_uaddr || !params.guest_uaddr || + !params.guest_len || !params.hdr_uaddr) + return -EINVAL; + + /* Check if we are crossing the page boundary */ + offset = params.guest_uaddr & (PAGE_SIZE - 1); + if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE) + return -EINVAL; + + /* Pin guest memory */ + guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, + PAGE_SIZE, &n, 0); + if (IS_ERR(guest_page)) + return PTR_ERR(guest_page); + + /* Allocate memory for header and transport buffer */ + ret = -ENOMEM; + hdr = kmalloc(params.hdr_len, GFP_KERNEL); + if (!hdr) + goto e_unpin; + + trans_data = get_trans_data_from_mempool(params.trans_len); + if (!trans_data) + goto e_free_hdr; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto e_free_hdr; + + data->hdr_address = __psp_pa(hdr); + data->hdr_len = params.hdr_len; + data->trans_address = __psp_pa(trans_data); + data->trans_len = params.trans_len; + + /* The SEND_UPDATE_DATA command requires C-bit to be always set. */ + data->guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + + offset; + data->guest_address |= sev_me_mask; + data->guest_len = params.guest_len; + data->handle = sev->handle; + + ret = csv_fill_cmd_queue(prio, SEV_CMD_SEND_UPDATE_DATA, data, 0); + if (ret) + goto e_free; + + /* + * Create item to save page info and pointer, which will be freed + * in function csv_command_batch because it will be used after PSP + * return for copy_to_user. + */ + item = kzalloc(sizeof(*item), GFP_KERNEL); + if (!item) { + ret = -ENOMEM; + goto e_free; + } + + item->pages = guest_page; + item->n = n; + item->hdr_vaddr = (uintptr_t)hdr; + item->trans_vaddr = (uintptr_t)trans_data; + item->data_vaddr = (uintptr_t)data; + item->hdr_uaddr = params.hdr_uaddr; + item->trans_uaddr = params.trans_uaddr; + item->hdr_len = params.hdr_len; + item->trans_len = params.trans_len; + + ringbuf_infos->item[ringbuf_infos->num] = item; + ringbuf_infos->num++; + + /* copy to ring buffer success, data freed after commands completed */ + goto finish; + +e_free: + kfree(data); +e_free_hdr: + kfree(hdr); +e_unpin: + sev_unpin_memory(kvm, guest_page, n); + +finish: + return ret; +} + +static int +sev_send_update_data_copy_to_user(struct kvm *kvm, + struct csv_ringbuf_infos *ringbuf_infos) +{ + int i, ret = 0; + + for (i = 0; i < ringbuf_infos->num; i++) { + struct csv_ringbuf_info_item *item = ringbuf_infos->item[i]; + + /* copy transport buffer to user space */ + if (copy_to_user((void __user *)item->trans_uaddr, + (void *)item->trans_vaddr, item->trans_len)) { + ret = -EFAULT; + break; + } + + /* Copy packet header to userspace. */ + if (copy_to_user((void __user *)item->hdr_uaddr, + (void *)item->hdr_vaddr, item->hdr_len)) { + ret = -EFAULT; + break; + } + } + + return ret; +} + typedef int (*csv_ringbuf_input_fn)(struct kvm *kvm, int prio, uintptr_t data_ptr, struct csv_ringbuf_infos *ringbuf_infos); @@ -3428,6 +3567,10 @@ static int get_cmd_helpers(__u32 cmd, /* copy commands to ring buffer*/ switch (cmd) { + case KVM_SEV_SEND_UPDATE_DATA: + *to_ringbuf_fn = sev_send_update_data_to_ringbuf; + *to_user_fn = sev_send_update_data_copy_to_user; + break; default: ret = -EINVAL; break; -- Gitee From 8860d26142a025442df4402b74a614412938d3ac Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Sun, 1 Aug 2021 13:50:54 +0800 Subject: [PATCH 0391/2138] anolis: KVM: SVM: Add RECEIVE_UPDATE_DATA command helper to support KVM_CSV_COMMAND_BATCH ANBZ: #8572 When KVM_CSV_COMMAND_BATCH handling a batch of RECEIVE_UPDATE_DATA commands, it need execute 2 steps: 1. Enqueue each SEND_UPDATE_DATA command data to CSV RING_BUFFER queues (as input of RING_BUFFER command) 2. Issue RING_BUFFER command In this change, we add sev_receive_update_data_to_ringbuf() to prepare input required by RING_BUFFER command as dictated in step 1. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- arch/x86/kvm/svm/sev.c | 121 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 121 insertions(+) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index aa2310aad56a..778dd8de52bf 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -3553,6 +3553,123 @@ sev_send_update_data_copy_to_user(struct kvm *kvm, return ret; } +static int +sev_receive_update_data_to_ringbuf(struct kvm *kvm, + int prio, + uintptr_t data_ptr, + struct csv_ringbuf_infos *ringbuf_infos) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_sev_receive_update_data params; + struct sev_data_receive_update_data *data; + struct csv_ringbuf_info_item *item; + void *hdr = NULL, *trans = NULL; + struct page **guest_page; + unsigned long n; + int ret, offset; + + if (!sev_guest(kvm)) + return -EINVAL; + + if (copy_from_user(¶ms, (void __user *)data_ptr, + sizeof(struct kvm_sev_receive_update_data))) + return -EFAULT; + + if (!params.hdr_uaddr || !params.hdr_len || + !params.guest_uaddr || !params.guest_len || + !params.trans_uaddr || !params.trans_len) + return -EINVAL; + + /* Check if we are crossing the page boundary */ + offset = params.guest_uaddr & (PAGE_SIZE - 1); + if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE) + return -EINVAL; + + hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len); + if (IS_ERR(hdr)) + return PTR_ERR(hdr); + + ret = -ENOMEM; + trans = get_trans_data_from_mempool(params.trans_len); + if (!trans) + goto e_free_hdr; + + if (copy_from_user(trans, (void __user *)params.trans_uaddr, + params.trans_len)) { + ret = -EFAULT; + goto e_free_hdr; + } + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto e_free_hdr; + + data->hdr_address = __psp_pa(hdr); + data->hdr_len = params.hdr_len; + data->trans_address = __psp_pa(trans); + data->trans_len = params.trans_len; + + /* Pin guest memory */ + guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, + PAGE_SIZE, &n, 1); + if (IS_ERR(guest_page)) { + ret = PTR_ERR(guest_page); + goto e_free; + } + + /* + * Flush (on non-coherent CPUs) before RECEIVE_UPDATE_DATA, the PSP + * encrypts the written data with the guest's key, and the cache may + * contain dirty, unencrypted data. + */ + sev_clflush_pages(guest_page, n); + + /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */ + data->guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + + offset; + data->guest_address |= sev_me_mask; + data->guest_len = params.guest_len; + data->handle = sev->handle; + + ret = csv_fill_cmd_queue(prio, SEV_CMD_RECEIVE_UPDATE_DATA, data, 0); + + if (ret) + goto e_unpin; + + /* + * Create item to save page info and pointer, whitch will be freed + * in function csv_command_batch because it will be used after PSP + * return for copy_to_user. + */ + item = kzalloc(sizeof(*item), GFP_KERNEL); + if (!item) { + ret = -ENOMEM; + goto e_unpin; + } + + item->pages = guest_page; + item->n = n; + item->hdr_vaddr = (uintptr_t)hdr; + item->trans_vaddr = (uintptr_t)trans; + item->data_vaddr = (uintptr_t)data; + + ringbuf_infos->item[ringbuf_infos->num] = item; + ringbuf_infos->num++; + + /* copy to ring buffer success, data freed after commands completed */ + goto finish; + +e_unpin: + sev_unpin_memory(kvm, guest_page, n); +e_free: + kfree(data); +e_free_hdr: + kfree(hdr); + +finish: + return ret; +} + typedef int (*csv_ringbuf_input_fn)(struct kvm *kvm, int prio, uintptr_t data_ptr, struct csv_ringbuf_infos *ringbuf_infos); @@ -3571,6 +3688,10 @@ static int get_cmd_helpers(__u32 cmd, *to_ringbuf_fn = sev_send_update_data_to_ringbuf; *to_user_fn = sev_send_update_data_copy_to_user; break; + case KVM_SEV_RECEIVE_UPDATE_DATA: + *to_ringbuf_fn = sev_receive_update_data_to_ringbuf; + *to_user_fn = NULL; + break; default: ret = -EINVAL; break; -- Gitee From 44056e4a31adda5da120550eaec63db40655cc19 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Tue, 24 May 2022 22:03:04 +0800 Subject: [PATCH 0392/2138] anolis: crypto: ccp: Fix definition of struct sev_data_send_update_vmsa ANBZ: #8572 The current definition of struct sev_data_send_update_vmsa in include/linux/psp-sev.h does not comply with SEV API spec. Fix it here. Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- include/linux/psp-sev.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 05e22a17fcb5..55dd35ce920f 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -417,6 +417,7 @@ struct sev_data_send_update_data { */ struct sev_data_send_update_vmsa { u32 handle; /* In */ + u32 reserved1; u64 hdr_address; /* In */ u32 hdr_len; /* In/Out */ u32 reserved2; -- Gitee From f718b7cc6d4e17ee5ae48be936db1c484a2c1411 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Thu, 8 Apr 2021 08:07:08 -0400 Subject: [PATCH 0393/2138] anolis: KVM: SVM: Add KVM_SEV_SEND_UPDATE_VMSA command ANBZ: #8572 The command is used for encrypting the VCPU register states of CSV2 guest using the encryption context created with KVM_SEV_SEND_START. Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- arch/x86/kvm/svm/sev.c | 115 +++++++++++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 8 +++ 2 files changed, 123 insertions(+) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 778dd8de52bf..678de27cc400 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1414,6 +1414,115 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +/* Userspace wants to query either header or trans length. */ +static int +__sev_send_update_vmsa_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp, + struct kvm_sev_send_update_vmsa *params) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_update_vmsa *vmsa; + int ret; + + vmsa = kzalloc(sizeof(*vmsa), GFP_KERNEL_ACCOUNT); + if (!vmsa) + return -ENOMEM; + + vmsa->handle = sev->handle; + ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_VMSA, vmsa, &argp->error); + + params->hdr_len = vmsa->hdr_len; + params->trans_len = vmsa->trans_len; + + if (copy_to_user((void __user *)argp->data, params, + sizeof(struct kvm_sev_send_update_vmsa))) + ret = -EFAULT; + + kfree(vmsa); + return ret; +} + +static int sev_send_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_update_vmsa *vmsa; + struct kvm_sev_send_update_vmsa params; + struct kvm_vcpu *vcpu; + void *hdr, *trans_data; + int ret; + + if (!sev_es_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(struct kvm_sev_send_update_vmsa))) + return -EFAULT; + + /* userspace wants to query either header or trans length */ + if (!params.trans_len || !params.hdr_len) + return __sev_send_update_vmsa_query_lengths(kvm, argp, ¶ms); + + if (!params.trans_uaddr || !params.hdr_uaddr) + return -EINVAL; + + /* Get the target vcpu */ + vcpu = kvm_get_vcpu_by_id(kvm, params.vcpu_id); + if (!vcpu) { + pr_err("%s: invalid vcpu\n", __func__); + return -EINVAL; + } + + pr_debug("%s: vcpu (%d)\n", __func__, vcpu->vcpu_id); + + /* allocate memory for header and transport buffer */ + ret = -ENOMEM; + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) + return ret; + + trans_data = kzalloc(params.trans_len, GFP_KERNEL_ACCOUNT); + if (!trans_data) + goto e_free_hdr; + + vmsa = kzalloc(sizeof(*vmsa), GFP_KERNEL); + if (!vmsa) + goto e_free_trans_data; + + vmsa->hdr_address = __psp_pa(hdr); + vmsa->hdr_len = params.hdr_len; + vmsa->trans_address = __psp_pa(trans_data); + vmsa->trans_len = params.trans_len; + + /* The SEND_UPDATE_VMSA command requires C-bit to be always set. */ + vmsa->guest_address = __pa(to_svm(vcpu)->sev_es.vmsa) | sev_me_mask; + vmsa->guest_len = PAGE_SIZE; + vmsa->handle = sev->handle; + + ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_VMSA, vmsa, &argp->error); + + if (ret) + goto e_free; + + /* copy transport buffer to user space */ + if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr, + trans_data, params.trans_len)) { + ret = -EFAULT; + goto e_free; + } + + /* Copy packet header to userspace. */ + ret = copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, + params.hdr_len); + +e_free: + kfree(vmsa); +e_free_trans_data: + kfree(trans_data); +e_free_hdr: + kfree(hdr); + + return ret; +} + static int sev_send_finish(struct kvm *kvm, struct kvm_sev_cmd *argp) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; @@ -1967,6 +2076,12 @@ int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp) case KVM_SEV_SEND_UPDATE_DATA: r = sev_send_update_data(kvm, &sev_cmd); break; + case KVM_SEV_SEND_UPDATE_VMSA: + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + r = sev_send_update_vmsa(kvm, &sev_cmd); + else + r = -EINVAL; + break; case KVM_SEV_SEND_FINISH: r = sev_send_finish(kvm, &sev_cmd); break; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 79a0007c6e0f..1c4a04525823 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -2013,6 +2013,14 @@ struct kvm_sev_send_update_data { __u32 trans_len; }; +struct kvm_sev_send_update_vmsa { + __u32 vcpu_id; + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + struct kvm_sev_receive_start { __u32 handle; __u32 policy; -- Gitee From 3c5a750307ab946c861718b38e0c434576730478 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Thu, 8 Apr 2021 08:39:49 -0400 Subject: [PATCH 0394/2138] anolis: KVM: SVM: Add KVM_SEV_RECEIVE_UPDATE_VMSA command ANBZ: #8572 The command is used for copying the incoming buffer into the VMSA memory regions of CSV2 guest. Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- arch/x86/kvm/svm/sev.c | 81 ++++++++++++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 8 ++++ 2 files changed, 89 insertions(+) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 678de27cc400..265db91afa0e 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1698,6 +1698,81 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +static int sev_receive_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_sev_receive_update_vmsa params; + struct sev_data_receive_update_vmsa *vmsa; + struct kvm_vcpu *vcpu; + void *hdr = NULL, *trans = NULL; + int ret; + + if (!sev_es_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(struct kvm_sev_receive_update_vmsa))) + return -EFAULT; + + if (!params.hdr_uaddr || !params.hdr_len || + !params.trans_uaddr || !params.trans_len) + return -EINVAL; + + /* Get the target vcpu */ + vcpu = kvm_get_vcpu_by_id(kvm, params.vcpu_id); + if (!vcpu) { + pr_err("%s: invalid vcpu\n", __func__); + return -EINVAL; + } + + pr_debug("%s: vcpu (%d)\n", __func__, vcpu->vcpu_id); + + hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len); + if (IS_ERR(hdr)) + return PTR_ERR(hdr); + + trans = psp_copy_user_blob(params.trans_uaddr, params.trans_len); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + goto e_free_hdr; + } + + ret = -ENOMEM; + vmsa = kzalloc(sizeof(*vmsa), GFP_KERNEL); + if (!vmsa) + goto e_free_trans; + + vmsa->hdr_address = __psp_pa(hdr); + vmsa->hdr_len = params.hdr_len; + vmsa->trans_address = __psp_pa(trans); + vmsa->trans_len = params.trans_len; + + /* + * Flush before RECEIVE_UPDATE_VMSA, the PSP encrypts the + * written VMSA memory content with the guest's key), and + * the cache may contain dirty, unencrypted data. + */ + clflush_cache_range(to_svm(vcpu)->sev_es.vmsa, PAGE_SIZE); + + /* The RECEIVE_UPDATE_VMSA command requires C-bit to be always set. */ + vmsa->guest_address = __pa(to_svm(vcpu)->sev_es.vmsa) | sev_me_mask; + vmsa->guest_len = PAGE_SIZE; + vmsa->handle = sev->handle; + + ret = sev_issue_cmd(kvm, SEV_CMD_RECEIVE_UPDATE_VMSA, vmsa, &argp->error); + + if (!ret) + vcpu->arch.guest_state_protected = true; + + kfree(vmsa); +e_free_trans: + kfree(trans); +e_free_hdr: + kfree(hdr); + + return ret; +} + static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; @@ -2094,6 +2169,12 @@ int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp) case KVM_SEV_RECEIVE_UPDATE_DATA: r = sev_receive_update_data(kvm, &sev_cmd); break; + case KVM_SEV_RECEIVE_UPDATE_VMSA: + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + r = sev_receive_update_vmsa(kvm, &sev_cmd); + else + r = -EINVAL; + break; case KVM_SEV_RECEIVE_FINISH: r = sev_receive_finish(kvm, &sev_cmd); break; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 1c4a04525823..a12f1a4a2f32 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -2039,6 +2039,14 @@ struct kvm_sev_receive_update_data { __u32 trans_len; }; +struct kvm_sev_receive_update_vmsa { + __u32 vcpu_id; + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + struct kvm_csv_batch_list_node { __u64 cmd_data_addr; __u64 addr; -- Gitee From 00bd64a1a0d5baba4f6f1704a99ccdac955b2586 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Wed, 7 Apr 2021 02:46:11 -0400 Subject: [PATCH 0395/2138] anolis: KVM: x86: Restore control registers in __set_sregs() to support CSV2 guest live migration ANBZ: #8572 When migrate CSV2 guest to the recipient, the KVM which on recipient's side needs to update the guest context so that the guest can continues to run. The control register state is necessary for updating the guest context. Allows the control registers to be updated in __set_sregs() so that the CSV2 guest could continue running correctly after migrated to the recipient. Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- arch/x86/kvm/x86.c | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b5deec1bb655..6b7fb1d62c2e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -11532,21 +11532,24 @@ static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, if (kvm_set_apic_base(vcpu, &apic_base_msr)) return -EINVAL; - if (vcpu->arch.guest_state_protected) + if (vcpu->arch.guest_state_protected && + boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) return 0; - dt.size = sregs->idt.limit; - dt.address = sregs->idt.base; - static_call(kvm_x86_set_idt)(vcpu, &dt); - dt.size = sregs->gdt.limit; - dt.address = sregs->gdt.base; - static_call(kvm_x86_set_gdt)(vcpu, &dt); + if (!vcpu->arch.guest_state_protected) { + dt.size = sregs->idt.limit; + dt.address = sregs->idt.base; + static_call(kvm_x86_set_idt)(vcpu, &dt); + dt.size = sregs->gdt.limit; + dt.address = sregs->gdt.base; + static_call(kvm_x86_set_gdt)(vcpu, &dt); - vcpu->arch.cr2 = sregs->cr2; - *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; - vcpu->arch.cr3 = sregs->cr3; - kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); - static_call_cond(kvm_x86_post_set_cr3)(vcpu, sregs->cr3); + vcpu->arch.cr2 = sregs->cr2; + *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; + vcpu->arch.cr3 = sregs->cr3; + kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); + static_call_cond(kvm_x86_post_set_cr3)(vcpu, sregs->cr3); + } kvm_set_cr8(vcpu, sregs->cr8); @@ -11560,6 +11563,9 @@ static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, *mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; static_call(kvm_x86_set_cr4)(vcpu, sregs->cr4); + if (vcpu->arch.guest_state_protected) + return 0; + if (update_pdptrs) { idx = srcu_read_lock(&vcpu->kvm->srcu); if (is_pae_paging(vcpu)) { -- Gitee From 52a85238bfeca5ed83d708596c0b940c03f6e13e Mon Sep 17 00:00:00 2001 From: hanliyang Date: Tue, 15 Jun 2021 11:29:13 +0800 Subject: [PATCH 0396/2138] anolis: KVM: SVM: Export MSR_AMD64_SEV_ES_GHCB to userspace for CSV2 guest ANBZ: #8572 VMCB.control.ghcb_gpa contains necessary info to support runtime CSV2 guest. At present, it includes the following points: 1. For GHCB MSR protocol, ghcb_gpa stores the negotiation result 2. For GHCB page protocol, ghcb_gpa stores the GPA of GHCB page In addition, AP VCPU's SIPI state and GHCB page mapping state are temporarily stored in KVM. When CSV2 guest was migrated to the recipient, KVM needs to restore VMCB.control.ghcb_gpa, VCPU's SIPI state and GHCB page mapping state on the source side. This patch is to support export MSR_AMD64_SEV_ES_GHCB to userspace. KVM can collect all the infos dictated above and return to userspace if userspace request to get MSR_AMD64_SEV_ES_GHCB, and KVM can restore all the infos dictated above if userspace request to set MSR_AMD64_SEV_ES_GHCB. Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- arch/x86/kvm/svm/sev.c | 19 +++++++++ arch/x86/kvm/svm/svm.c | 88 ++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/svm/svm.h | 42 +++++++++++++++++++ arch/x86/kvm/vmx/vmx.c | 1 + arch/x86/kvm/x86.c | 13 ++++++ include/uapi/linux/kvm.h | 2 + 6 files changed, 165 insertions(+) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 265db91afa0e..bb2192cfee7f 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -3989,3 +3989,22 @@ static int csv_command_batch(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } + +int sev_es_ghcb_map(struct vcpu_svm *svm, u64 ghcb_gpa) +{ + if (kvm_vcpu_map(&svm->vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) { + /* Unable to map GHCB from guest */ + vcpu_unimpl(&svm->vcpu, "Missing GHCB [%#llx] from guest\n", + ghcb_gpa); + + svm->sev_es.receiver_ghcb_map_fail = true; + return -EINVAL; + } + + svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva; + svm->sev_es.receiver_ghcb_map_fail = false; + + pr_info("Mapping GHCB [%#llx] from guest at recipient\n", ghcb_gpa); + + return 0; +} diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 20a7e6937c5f..f686014d1173 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2959,6 +2959,31 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_AMD64_DE_CFG: msr_info->data = svm->msr_decfg; break; + case MSR_AMD64_SEV_ES_GHCB: + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + /* + * Only support userspace get/set from/to + * vmcb.control.ghcb_gpa + */ + if (!msr_info->host_initiated || + !sev_es_guest(svm->vcpu.kvm)) + return 1; + + msr_info->data = svm->vmcb->control.ghcb_gpa; + + /* Only set status bits when using GHCB page protocol */ + if (msr_info->data && + !is_ghcb_msr_protocol(msr_info->data)) { + if (svm->sev_es.ghcb) + msr_info->data |= GHCB_MSR_MAPPED_MASK; + + if (svm->sev_es.received_first_sipi) + msr_info->data |= + GHCB_MSR_RECEIVED_FIRST_SIPI_MASK; + } + break; + } + return 1; default: return kvm_get_msr_common(vcpu, msr_info); } @@ -3200,6 +3225,47 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) svm->msr_decfg = data; break; } + case MSR_AMD64_SEV_ES_GHCB: + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + /* + * Only support userspace get/set from/to + * vmcb.control.ghcb_gpa + */ + if (!msr->host_initiated || + !sev_es_guest(svm->vcpu.kvm)) + return 1; + + /* + * Value 0 means uninitialized userspace MSR data, + * userspace need get the initial MSR data afterwards. + */ + if (!data) + return 0; + + /* Extract status info when using GHCB page protocol */ + if (!is_ghcb_msr_protocol(data)) { + if (!svm->sev_es.ghcb && + (data & GHCB_MSR_MAPPED_MASK)) { + /* + * This happened on recipient of migration, + * should return error if cannot map the + * ghcb page. + */ + if (sev_es_ghcb_map(to_svm(vcpu), + data & ~GHCB_MSR_KVM_STATUS_MASK)) + return 1; + } + + if (data & GHCB_MSR_RECEIVED_FIRST_SIPI_MASK) + svm->sev_es.received_first_sipi = true; + + data &= ~GHCB_MSR_KVM_STATUS_MASK; + } + + svm->vmcb->control.ghcb_gpa = data; + break; + } + return 1; default: return kvm_set_msr_common(vcpu, msr); } @@ -4166,6 +4232,19 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) trace_kvm_entry(vcpu); + /* + * For receipient side of CSV2 guest, fake the exit code as + * SVM_EXIT_ERR and return directly if failed to mapping + * the necessary GHCB page. When handling the exit code + * afterwards, it can exit to userspace and stop the guest. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + sev_es_guest(vcpu->kvm) && + svm->sev_es.receiver_ghcb_map_fail) { + svm->vmcb->control.exit_code = SVM_EXIT_ERR; + return EXIT_FASTPATH_NONE; + } + svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; @@ -4340,6 +4419,15 @@ static bool svm_has_emulated_msr(struct kvm *kvm, u32 index) if (kvm && sev_es_guest(kvm)) return false; break; + case MSR_AMD64_SEV_ES_GHCB: + /* + * Only CSV2 guests support to export this MSR, this should + * be determined after KVM_CREATE_VM. + */ + if (boot_cpu_data.x86_vendor != X86_VENDOR_HYGON || + (kvm && !sev_es_guest(kvm))) + return false; + break; default: break; } diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 7cd1063da561..2e4fcf1cf9e0 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -203,6 +203,9 @@ struct vcpu_sev_es_state { u32 ghcb_sa_len; bool ghcb_sa_sync; bool ghcb_sa_free; + + /* CSV2 migrated ghcb mapping state support */ + bool receiver_ghcb_map_fail; }; struct vcpu_svm { @@ -667,6 +670,44 @@ void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu); #define GHCB_VERSION_MAX 1ULL #define GHCB_VERSION_MIN 1ULL +/* + * CSV2 live migration support: + * If MSR_AMD64_SEV_ES_GHCB in migration didn't apply GHCB MSR protocol, + * reuse bits [52-63] to indicate vcpu status. The following status are + * currently included: + * * ghcb_map: indicate whether GHCB page was mapped. The mapped GHCB + * page may be filled with GPRs before VMRUN, so we must + * remap GHCB page on the recipient's side. + * * received_first_sipi: indicate AP's INIT-SIPI-SIPI stage. Reuse + * these bits for received_first_sipi is acceptable cause + * runtime stage of guest's linux only applies GHCB page + * protocol. + * It's unlikely that the migration encounter other stages + * of guest's linux. Once encountered, AP bringup may fail + * which will not impact user payload. + * Otherbits keep their's original meaning. (See GHCB Spec 2.3.1 for detail) + */ +#define GHCB_MSR_KVM_STATUS_POS 52 +#define GHCB_MSR_KVM_STATUS_BITS 12 +#define GHCB_MSR_KVM_STATUS_MASK \ + ((BIT_ULL(GHCB_MSR_KVM_STATUS_BITS) - 1) \ + << GHCB_MSR_KVM_STATUS_POS) +#define GHCB_MSR_MAPPED_POS 63 +#define GHCB_MSR_MAPPED_BITS 1 +#define GHCB_MSR_MAPPED_MASK \ + ((BIT_ULL(GHCB_MSR_MAPPED_BITS) - 1) \ + << GHCB_MSR_MAPPED_POS) +#define GHCB_MSR_RECEIVED_FIRST_SIPI_POS 62 +#define GHCB_MSR_RECEIVED_FIRST_SIPI_BITS 1 +#define GHCB_MSR_RECEIVED_FIRST_SIPI_MASK \ + ((BIT_ULL(GHCB_MSR_RECEIVED_FIRST_SIPI_BITS) - 1) \ + << GHCB_MSR_RECEIVED_FIRST_SIPI_POS) + + +static inline bool is_ghcb_msr_protocol(u64 ghcb_val) +{ + return ghcb_val & GHCB_MSR_INFO_MASK; +} extern unsigned int max_sev_asid; @@ -696,6 +737,7 @@ void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa); void sev_es_unmap_ghcb(struct vcpu_svm *svm); int sev_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len); +int sev_es_ghcb_map(struct vcpu_svm *svm, u64 ghcb_gpa); /* vmenter.S */ diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 479ef26626f2..c23f811694c4 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7025,6 +7025,7 @@ static bool vmx_has_emulated_msr(struct kvm *kvm, u32 index) return nested; case MSR_AMD64_VIRT_SPEC_CTRL: case MSR_AMD64_TSC_RATIO: + case MSR_AMD64_SEV_ES_GHCB: /* This is AMD only. */ return false; default: diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6b7fb1d62c2e..9d7a088bf992 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1564,6 +1564,8 @@ static const u32 emulated_msrs_all[] = { MSR_K7_HWCR, MSR_KVM_POLL_CONTROL, + + MSR_AMD64_SEV_ES_GHCB, }; static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)]; @@ -4636,6 +4638,17 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_X86_NOTIFY_VMEXIT: r = kvm_caps.has_notify_vmexit; break; + case KVM_CAP_SEV_ES_GHCB: + r = 0; + + /* Both CSV2 and SEV-ES guests support MSR_AMD64_SEV_ES_GHCB, + * but only CSV2 guest support export to emulate + * MSR_AMD64_SEV_ES_GHCB. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + r = static_call(kvm_x86_has_emulated_msr)(kvm, + MSR_AMD64_SEV_ES_GHCB); + break; default: break; } diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index a12f1a4a2f32..11e57082efd7 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1201,6 +1201,8 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228 #define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229 +#define KVM_CAP_SEV_ES_GHCB 500 + #ifdef KVM_CAP_IRQ_ROUTING struct kvm_irq_routing_irqchip { -- Gitee From 570f73c488606bd40a511dfdf6e62074be69e24c Mon Sep 17 00:00:00 2001 From: hanliyang Date: Tue, 8 Aug 2023 23:47:22 +0800 Subject: [PATCH 0397/2138] anolis: KVM: x86: Introduce control_{pre,post}_system_reset ioctl interfaces ANBZ: #8572 In the upcoming patches, we will support for rebooting CSV2 guests. In order to support rebooting CSV2 guest, we will set vcpu->arch.guest_state_protected to false, before VMRUN, so that VMM can initialize vCPU states and VMSA, and then set vcpu->arch.guest_state_protected back to true to bypass unexpected behaviour in KVM. Besides, cache flush is necessary during rebooting a memory encrypted guest. Introduce control_{pre,post}_system_reset ioctl interfaces to support rebooting memory encrypted guests correctly. Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- arch/x86/include/asm/kvm-x86-ops.h | 2 ++ arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/svm/sev.c | 10 ++++++++++ arch/x86/kvm/svm/svm.c | 2 ++ arch/x86/kvm/svm/svm.h | 3 +++ arch/x86/kvm/x86.c | 14 ++++++++++++++ include/uapi/linux/kvm.h | 4 ++++ 7 files changed, 37 insertions(+) diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 3ab3e361ea81..b54e72a0100b 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -136,6 +136,8 @@ KVM_X86_OP(complete_emulated_msr) KVM_X86_OP(vcpu_deliver_sipi_vector) KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons); KVM_X86_OP_OPTIONAL(vm_attestation) +KVM_X86_OP_OPTIONAL(control_pre_system_reset) +KVM_X86_OP_OPTIONAL(control_post_system_reset) #undef KVM_X86_OP #undef KVM_X86_OP_OPTIONAL diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 32fb7c0cdbd7..6ff68a0e43de 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1753,6 +1753,8 @@ struct kvm_x86_ops { unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *vcpu); int (*vm_attestation)(struct kvm *kvm, unsigned long gpa, unsigned long len); + int (*control_pre_system_reset)(struct kvm *kvm); + int (*control_post_system_reset)(struct kvm *kvm); }; struct kvm_x86_nested_ops { diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index bb2192cfee7f..88f360e1d74c 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -4008,3 +4008,13 @@ int sev_es_ghcb_map(struct vcpu_svm *svm, u64 ghcb_gpa) return 0; } + +int csv_control_pre_system_reset(struct kvm *kvm) +{ + return 0; +} + +int csv_control_post_system_reset(struct kvm *kvm) +{ + return 0; +} diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index f686014d1173..c89e116eea00 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -5162,6 +5162,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons, .vm_attestation = sev_vm_attestation, + .control_pre_system_reset = csv_control_pre_system_reset, + .control_post_system_reset = csv_control_post_system_reset, }; /* diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 2e4fcf1cf9e0..9d95c1624a59 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -739,6 +739,9 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm); int sev_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len); int sev_es_ghcb_map(struct vcpu_svm *svm, u64 ghcb_gpa); +int csv_control_pre_system_reset(struct kvm *kvm); +int csv_control_post_system_reset(struct kvm *kvm); + /* vmenter.S */ void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9d7a088bf992..bace46cf441e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7120,6 +7120,20 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) r = kvm_vm_ioctl_set_msr_filter(kvm, &filter); break; } + case KVM_CONTROL_PRE_SYSTEM_RESET: + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + kvm_x86_ops.control_pre_system_reset) + r = static_call(kvm_x86_control_pre_system_reset)(kvm); + else + r = -ENOTTY; + break; + case KVM_CONTROL_POST_SYSTEM_RESET: + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + kvm_x86_ops.control_post_system_reset) + r = static_call(kvm_x86_control_post_system_reset)(kvm); + else + r = -ENOTTY; + break; default: r = -ENOTTY; } diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 11e57082efd7..5c0859e7597f 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1587,6 +1587,10 @@ struct kvm_s390_ucas_mapping { #define KVM_GET_DEVICE_ATTR _IOW(KVMIO, 0xe2, struct kvm_device_attr) #define KVM_HAS_DEVICE_ATTR _IOW(KVMIO, 0xe3, struct kvm_device_attr) +/* ioctls for control vm during system reset */ +#define KVM_CONTROL_PRE_SYSTEM_RESET _IO(KVMIO, 0xe8) +#define KVM_CONTROL_POST_SYSTEM_RESET _IO(KVMIO, 0xe9) + /* * ioctls for vcpu fds */ -- Gitee From 7afd05b443e83b3aa7cbad7bafe0b827bd7bed88 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Thu, 15 Apr 2021 07:56:55 -0400 Subject: [PATCH 0398/2138] anolis: KVM: SVM: Add support for rebooting CSV2 guest ANBZ: #8572 Currently, reboot a CSV2 guest is unsupported because vCPU state is encrypted and can't be initialized when guest reboots to execute OVMF code. In order to support reboot a CSV2 guest, make a backup of the encrypted VMSA before booting the guest, and restore VMSA from the backup before rebooting the guest. Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- arch/x86/kvm/svm/sev.c | 59 ++++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/svm/svm.c | 10 +++++++ arch/x86/kvm/svm/svm.h | 2 ++ 3 files changed, 71 insertions(+) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 88f360e1d74c..165947f083f8 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -701,6 +701,18 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu, * MSR_IA32_DEBUGCTLMSR when guest_state_protected is not set. */ svm_enable_lbrv(vcpu); + + /* + * Backup encrypted vmsa to support rebooting CSV2 guest. The + * clflush_cache_range() is necessary to invalidate prefetched + * memory area pointed by svm->sev_es.vmsa so that we can read + * fresh memory updated by PSP. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE); + memcpy(svm->sev_es.reset_vmsa, svm->sev_es.vmsa, PAGE_SIZE); + } + return 0; } @@ -2647,6 +2659,8 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu) if (svm->sev_es.ghcb_sa_free) kvfree(svm->sev_es.ghcb_sa); + + __free_page(virt_to_page(svm->sev_es.reset_vmsa)); } static void dump_ghcb(struct vcpu_svm *svm) @@ -4011,10 +4025,55 @@ int sev_es_ghcb_map(struct vcpu_svm *svm, u64 ghcb_gpa) int csv_control_pre_system_reset(struct kvm *kvm) { + struct kvm_vcpu *vcpu; + unsigned long i; + int ret; + + if (!sev_es_guest(kvm)) + return 0; + + kvm_for_each_vcpu(i, vcpu, kvm) { + ret = mutex_lock_killable(&vcpu->mutex); + if (ret) + return ret; + + vcpu->arch.guest_state_protected = false; + + mutex_unlock(&vcpu->mutex); + } + return 0; } int csv_control_post_system_reset(struct kvm *kvm) { + struct kvm_vcpu *vcpu; + unsigned long i; + int ret; + + if (!sev_es_guest(kvm)) + return 0; + + /* Flush both host and guest caches of VMSA */ + wbinvd_on_all_cpus(); + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + ret = mutex_lock_killable(&vcpu->mutex); + if (ret) + return ret; + + memcpy(svm->sev_es.vmsa, svm->sev_es.reset_vmsa, PAGE_SIZE); + + /* Flush encrypted vmsa to memory */ + clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE); + + svm->vcpu.arch.guest_state_protected = true; + svm->sev_es.received_first_sipi = false; + + mutex_unlock(&vcpu->mutex); + } + return 0; } diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index c89e116eea00..e10264b375be 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1442,6 +1442,7 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu) struct vcpu_svm *svm; struct page *vmcb01_page; struct page *vmsa_page = NULL; + struct page *reset_vmsa_page = NULL; int err; BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0); @@ -1461,6 +1462,10 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu) if (!vmsa_page) goto error_free_vmcb_page; + reset_vmsa_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); + if (!reset_vmsa_page) + goto error_free_vmsa_page; + /* * SEV-ES guests maintain an encrypted version of their FPU * state which is restored and saved on VMRUN and VMEXIT. @@ -1489,6 +1494,9 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu) if (vmsa_page) svm->sev_es.vmsa = page_address(vmsa_page); + if (reset_vmsa_page) + svm->sev_es.reset_vmsa = page_address(reset_vmsa_page); + svm->guest_state_loaded = false; return 0; @@ -1496,6 +1504,8 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu) error_free_vmsa_page: if (vmsa_page) __free_page(vmsa_page); + if (reset_vmsa_page) + __free_page(reset_vmsa_page); error_free_vmcb_page: __free_page(vmcb01_page); out: diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 9d95c1624a59..0c50689a018a 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -206,6 +206,8 @@ struct vcpu_sev_es_state { /* CSV2 migrated ghcb mapping state support */ bool receiver_ghcb_map_fail; + /* CSV2 reboot vmsa */ + struct vmcb_save_area *reset_vmsa; }; struct vcpu_svm { -- Gitee From 9cd1b4afdf63586c186a7537b0b29f721daa7b9b Mon Sep 17 00:00:00 2001 From: hanliyang Date: Sat, 6 May 2023 16:01:25 +0800 Subject: [PATCH 0399/2138] anolis: KVM: SVM: Force flush caches before reboot CSV guest ANBZ: #8572 For memory encrypted guest, its pages' encrypt status will changed at runtime. When user reboot the guest, the pages' encrypt status during last boot were ignored. So during the boot flow of reboot, there may be 2 versions of memory data lies in cache as follows: +--------+ | | | | +--------------+ --+ | | | | \ |________| | | \ cacheline for -> |________| <-+ | | \ pa1(c=0) | | \ |______________| \ | | \_ 64 bytes aligned <- pa1 \ | | _ |______________| 4K | | / | | page cacheline for |________| / | | / pa1(c=1) -> |________| <-+ | | / | | | | / | | | | / | | | | / | | +--------------+ --+ | | | | If the older version cache was flushed after that of newer version, and guest read the memory again, then it will get corrupted data and may lead to crash. In this change, for any memory encrypted guest, the cache is forcibly flushed to memory before the next boot flow, which ensures that memory access is up-to-date. Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2913 --- arch/x86/kvm/svm/sev.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 165947f083f8..bd252d3518ff 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -4051,12 +4051,12 @@ int csv_control_post_system_reset(struct kvm *kvm) unsigned long i; int ret; + /* Flush both host and guest caches before next boot flow */ + wbinvd_on_all_cpus(); + if (!sev_es_guest(kvm)) return 0; - /* Flush both host and guest caches of VMSA */ - wbinvd_on_all_cpus(); - kvm_for_each_vcpu(i, vcpu, kvm) { struct vcpu_svm *svm = to_svm(vcpu); -- Gitee From edc82a3f362a960de11bec129679e6c813b507cb Mon Sep 17 00:00:00 2001 From: hanliyang Date: Sun, 10 Jan 2021 14:57:21 -0500 Subject: [PATCH 0400/2138] anolis: KVM: SVM: Fix the available ASID range for CSV2 guest ANBZ: #8573 All the ASIDs in range [1, max_sev_asid] are available for CSV2 guest, regardless of the value of min_sev_asid. Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2914 --- arch/x86/kvm/svm/sev.c | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index bd252d3518ff..6f567fcc9393 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -177,6 +177,12 @@ static int sev_asid_new(struct kvm_sev_info *sev) mutex_lock(&sev_bitmap_lock); + /* + * No matter what the min_sev_asid is, all asids in range + * [1, max_sev_asid] can be used for CSV2 guest on Hygon CPUs. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + max_asid = max_sev_asid; again: asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid); if (asid > max_asid) { @@ -2535,11 +2541,19 @@ void __init sev_hardware_setup(void) goto out; } - /* Has the system been allocated ASIDs for SEV-ES? */ - if (min_sev_asid == 1) - goto out; + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + /* + * Ths ASIDs from 1 to max_sev_asid are available for hygon + * CSV2 guest. + */ + sev_es_asid_count = max_sev_asid; + } else { + /* Has the system been allocated ASIDs for SEV-ES? */ + if (min_sev_asid == 1) + goto out; - sev_es_asid_count = min_sev_asid - 1; + sev_es_asid_count = min_sev_asid - 1; + } WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV_ES, sev_es_asid_count)); sev_es_supported = true; @@ -2555,7 +2569,10 @@ void __init sev_hardware_setup(void) pr_info("%s %s (ASIDs %u - %u)\n", boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? "CSV2" : "SEV-ES", sev_es_supported ? "enabled" : "disabled", - min_sev_asid > 1 ? 1 : 0, min_sev_asid - 1); + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? + 1 : (min_sev_asid > 1 ? 1 : 0), + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? + max_sev_asid : min_sev_asid - 1); sev_enabled = sev_supported; sev_es_enabled = sev_es_supported; -- Gitee From bf4ecb8a20ab751fc5d7e00c56e8ce4d78300a4f Mon Sep 17 00:00:00 2001 From: hanliyang Date: Thu, 6 Apr 2023 09:03:58 +0800 Subject: [PATCH 0401/2138] anolis: x86/csv2: Keep in atomic context when holding ghcb page if the #VC comes from userspace ANBZ: #8573 In function vc_raw_handle_exception(), it will holds ghcb page and calls __sev_get_ghcb() <- holding ghcb page to communicate with host vc_init_em_etxt() vc_handle_exitcode() __sev_put_ghcb() <- no longer holding ghcb page after the communication to emulate instruction which cause #VC. When the #VC comes from userspace, the code path user_exc_vmm_communication() vc_raw_handle_exception() cannot keep memory access in atomic context, this may lead to direct page fault handling if the emulation process access userspace address which doesn't exist in memory. For userspace address page fault handling, if it's not in the atomic context or the caller doesn't call pagefault_disable(), the irq may be enabled and there is a risk of generating more #VC. So it's necessary to switch to atomic context before emulate instructions which cause #VC. Add __preempt_count_{add,sub}() pair to keep the code between __sev_get_ghcb() and __sev_put_ghcb() in atomic context if #VC comes from userspace. If memory access fails during emulating, the caller will construct page fault info and forward a page fault later. Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2914 --- arch/x86/kernel/sev.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c index 9905dc0e0b09..62eef7824ab1 100644 --- a/arch/x86/kernel/sev.c +++ b/arch/x86/kernel/sev.c @@ -1852,6 +1852,15 @@ static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_co struct ghcb *ghcb; bool ret = true; + /* + * Make sure the codes between __sev_get_ghcb() and __sev_put_ghcb() + * keep in atomic context. If #VC comes from kernel mode, then the + * codes here are in atomic context. If #VC comes from user mode, then + * it's necessary to switch to atomic context manually. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && !in_nmi()) + __preempt_count_add(HARDIRQ_OFFSET); + ghcb = __sev_get_ghcb(&state); vc_ghcb_invalidate(ghcb); @@ -1862,6 +1871,9 @@ static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_co __sev_put_ghcb(&state); + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && !in_nmi()) + __preempt_count_sub(HARDIRQ_OFFSET); + /* Done - now check the result */ switch (result) { case ES_OK: -- Gitee From 75f8ed48a4a067bd96d20f8412446e05ca1d5016 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Sun, 7 Jan 2024 04:47:42 +0800 Subject: [PATCH 0402/2138] anolis: x86/head/64: Flush caches for .bss..decrypted section after CR3 switches to early_top_pgt ANBZ: #8573 The memory region of .bss..decrypted section maybe mapped with encryption before early boot stage of Linux. If the correspond stale caches lives in earlier stage were not flushed before we access that memory region in later stages, then Linux will crash because the stale caches will pollute the memory. Fix this issue by flush the caches with encrypted mapping before we access .bss..decrypted section. Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2914 --- arch/x86/kernel/head64.c | 48 +++++++++++++++++++++++++++++++++++++++ arch/x86/kernel/head_64.S | 10 ++++++++ 2 files changed, 58 insertions(+) diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index c58213bce294..ecedc296b303 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -318,6 +318,54 @@ unsigned long __head __startup_64(unsigned long physaddr, return sme_postprocess_startup(bp, pmd); } +#ifdef CONFIG_AMD_MEM_ENCRYPT +extern bool bsp_flush_bss_decrypted_section_done; + +void __ref early_clflush_bss_decrypted_section(void) +{ + /* Only allow bsp flush these caches and the bsp must at early boot stage */ + if (bsp_flush_bss_decrypted_section_done) + return; + + if (read_cr3_pa() != __pa_nodebug(early_top_pgt)) + return; + + if (sme_get_me_mask()) { + unsigned long vaddr, vaddr_end; + char *cl, *start, *end; + + /* + * The memory region of .bss..decrypted section maybe mapped + * with encryption in earlier stage. If the correspond stale + * caches lives in earlier stage were not flushed before we + * access that memory region, then Linux will crash later + * because the stale caches will pollute the memory. So we + * need flush the caches with encrypted mapping before we + * access .bss..decrypted section. + * + * The function __startup_64() have already filled the + * encrypted mapping for .bss..decrypted section, use that + * mapping here. + */ + vaddr = (unsigned long)__start_bss_decrypted - + __START_KERNEL_map + phys_base; + vaddr_end = (unsigned long)__end_bss_decrypted - + __START_KERNEL_map + phys_base; + + /* Hardcode cl-size to 64 at this stage. */ + start = (char *)(vaddr & ~63); + end = (char *)((vaddr_end + 63) & ~63); + + asm volatile("mfence" : : : "memory"); + for (cl = start; cl != end; cl += 64) + clflush(cl); + asm volatile("mfence" : : : "memory"); + } + + bsp_flush_bss_decrypted_section_done = true; +} +#endif + /* Wipe all early page tables except for the kernel symbol map */ static void __init reset_early_page_tables(void) { diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index e6eaee8509ce..9c2d7e2b5edb 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -375,6 +375,14 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL) shrq $32, %rdx wrmsr +#ifdef CONFIG_AMD_MEM_ENCRYPT + /* + * Ensure .bss.decrypted memory's stale caches which lived in earlier + * stage to be flushed. + */ + call early_clflush_bss_decrypted_section +#endif + /* Setup and Load IDT */ call early_setup_idt @@ -511,6 +519,8 @@ SYM_CODE_END(vc_boot_ghcb) SYM_DATA(initial_code, .quad x86_64_start_kernel) #ifdef CONFIG_AMD_MEM_ENCRYPT SYM_DATA(initial_vc_handler, .quad handle_vc_boot_ghcb) +SYM_DATA(bsp_flush_bss_decrypted_section_done, .byte 0x0) + .balign 8 #endif SYM_DATA(trampoline_lock, .quad 0); -- Gitee From 93947590eed7724bea05751fcf029bab53ff1443 Mon Sep 17 00:00:00 2001 From: Mathias Krause Date: Sat, 3 Feb 2024 13:45:20 +0100 Subject: [PATCH 0403/2138] KVM: x86: Fix KVM_GET_MSRS stack info leak ANBZ: #8573 commit 3376ca3f1a2075eaa23c5576c47d04d7e8a4adda upstream. Commit 6abe9c1386e5 ("KVM: X86: Move ignore_msrs handling upper the stack") changed the 'ignore_msrs' handling, including sanitizing return values to the caller. This was fine until commit 12bc2132b15e ("KVM: X86: Do the same ignore_msrs check for feature msrs") which allowed non-existing feature MSRs to be ignored, i.e. to not generate an error on the ioctl() level. It even tried to preserve the sanitization of the return value. However, the logic is flawed, as '*data' will be overwritten again with the uninitialized stack value of msr.data. Fix this by simplifying the logic and always initializing msr.data, vanishing the need for an additional error exit path. Fixes: 12bc2132b15e ("KVM: X86: Do the same ignore_msrs check for feature msrs") Signed-off-by: Mathias Krause Reviewed-by: Xiaoyao Li Link: https://lore.kernel.org/r/20240203124522.592778-2-minipli@grsecurity.net Signed-off-by: Sean Christopherson Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2914 --- arch/x86/kvm/x86.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index bace46cf441e..dfa0d0cd671e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1706,22 +1706,17 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) struct kvm_msr_entry msr; int r; + /* Unconditionally clear the output for simplicity */ + msr.data = 0; msr.index = index; r = kvm_get_msr_feature(&msr); - if (r == KVM_MSR_RET_INVALID) { - /* Unconditionally clear the output for simplicity */ - *data = 0; - if (kvm_msr_ignored_check(index, 0, false)) - r = 0; - } - - if (r) - return r; + if (r == KVM_MSR_RET_INVALID && kvm_msr_ignored_check(index, 0, false)) + r = 0; *data = msr.data; - return 0; + return r; } static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) -- Gitee From 270509e7ab5cd92963b9e0310bd83266e181521f Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Fri, 8 Dec 2023 10:56:48 +0800 Subject: [PATCH 0404/2138] docs: perf: Add description for Synopsys DesignWare PCIe PMU driver ANBZ: #8565 commit cae40614cdd61a6601dc87c6e07c06bf642a125b upstream. Alibaba's T-Head Yitan 710 SoC includes Synopsys' DesignWare Core PCIe controller which implements PMU for performance and functional debugging to facilitate system maintenance. Document it to provide guidance on how to use it. Signed-off-by: Shuai Xue Reviewed-by: Baolin Wang Reviewed-by: Jonathan Cameron Reviewed-by: Yicong Yang Tested-by: Ilkka Koskinen Link: https://lore.kernel.org/r/20231208025652.87192-2-xueshuai@linux.alibaba.com Signed-off-by: Will Deacon Signed-off-by: Jing Zhang Link: https://gitee.com/anolis/cloud-kernel/pulls/2900 --- .../admin-guide/perf/dwc_pcie_pmu.rst | 94 +++++++++++++++++++ Documentation/admin-guide/perf/index.rst | 1 + 2 files changed, 95 insertions(+) create mode 100644 Documentation/admin-guide/perf/dwc_pcie_pmu.rst diff --git a/Documentation/admin-guide/perf/dwc_pcie_pmu.rst b/Documentation/admin-guide/perf/dwc_pcie_pmu.rst new file mode 100644 index 000000000000..d47cd229d710 --- /dev/null +++ b/Documentation/admin-guide/perf/dwc_pcie_pmu.rst @@ -0,0 +1,94 @@ +====================================================================== +Synopsys DesignWare Cores (DWC) PCIe Performance Monitoring Unit (PMU) +====================================================================== + +DesignWare Cores (DWC) PCIe PMU +=============================== + +The PMU is a PCIe configuration space register block provided by each PCIe Root +Port in a Vendor-Specific Extended Capability named RAS D.E.S (Debug, Error +injection, and Statistics). + +As the name indicates, the RAS DES capability supports system level +debugging, AER error injection, and collection of statistics. To facilitate +collection of statistics, Synopsys DesignWare Cores PCIe controller +provides the following two features: + +- one 64-bit counter for Time Based Analysis (RX/TX data throughput and + time spent in each low-power LTSSM state) and +- one 32-bit counter for Event Counting (error and non-error events for + a specified lane) + +Note: There is no interrupt for counter overflow. + +Time Based Analysis +------------------- + +Using this feature you can obtain information regarding RX/TX data +throughput and time spent in each low-power LTSSM state by the controller. +The PMU measures data in two categories: + +- Group#0: Percentage of time the controller stays in LTSSM states. +- Group#1: Amount of data processed (Units of 16 bytes). + +Lane Event counters +------------------- + +Using this feature you can obtain Error and Non-Error information in +specific lane by the controller. The PMU event is selected by all of: + +- Group i +- Event j within the Group i +- Lane k + +Some of the events only exist for specific configurations. + +DesignWare Cores (DWC) PCIe PMU Driver +======================================= + +This driver adds PMU devices for each PCIe Root Port named based on the BDF of +the Root Port. For example, + + 30:03.0 PCI bridge: Device 1ded:8000 (rev 01) + +the PMU device name for this Root Port is dwc_rootport_3018. + +The DWC PCIe PMU driver registers a perf PMU driver, which provides +description of available events and configuration options in sysfs, see +/sys/bus/event_source/devices/dwc_rootport_{bdf}. + +The "format" directory describes format of the config fields of the +perf_event_attr structure. The "events" directory provides configuration +templates for all documented events. For example, +"Rx_PCIe_TLP_Data_Payload" is an equivalent of "eventid=0x22,type=0x1". + +The "perf list" command shall list the available events from sysfs, e.g.:: + + $# perf list | grep dwc_rootport + <...> + dwc_rootport_3018/Rx_PCIe_TLP_Data_Payload/ [Kernel PMU event] + <...> + dwc_rootport_3018/rx_memory_read,lane=?/ [Kernel PMU event] + +Time Based Analysis Event Usage +------------------------------- + +Example usage of counting PCIe RX TLP data payload (Units of bytes):: + + $# perf stat -a -e dwc_rootport_3018/Rx_PCIe_TLP_Data_Payload/ + +The average RX/TX bandwidth can be calculated using the following formula: + + PCIe RX Bandwidth = Rx_PCIe_TLP_Data_Payload / Measure_Time_Window + PCIe TX Bandwidth = Tx_PCIe_TLP_Data_Payload / Measure_Time_Window + +Lane Event Usage +------------------------------- + +Each lane has the same event set and to avoid generating a list of hundreds +of events, the user need to specify the lane ID explicitly, e.g.:: + + $# perf stat -a -e dwc_rootport_3018/rx_memory_read,lane=4/ + +The driver does not support sampling, therefore "perf record" will not +work. Per-task (without "-a") perf sessions are not supported. diff --git a/Documentation/admin-guide/perf/index.rst b/Documentation/admin-guide/perf/index.rst index f60be04e4e33..6bc7739fddb5 100644 --- a/Documentation/admin-guide/perf/index.rst +++ b/Documentation/admin-guide/perf/index.rst @@ -19,6 +19,7 @@ Performance monitor support arm_dsu_pmu thunderx2-pmu alibaba_pmu + dwc_pcie_pmu nvidia-pmu meson-ddr-pmu cxl -- Gitee From 864756bad1da57c56c48c4969bb288543d620394 Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Fri, 8 Dec 2023 10:56:49 +0800 Subject: [PATCH 0405/2138] PCI: Add Alibaba Vendor ID to linux/pci_ids.h ANBZ: #8565 commit ad6534c626fedd818718d76c36d69c7d8e7b61cc upstream. The Alibaba Vendor ID (0x1ded) is now used by Alibaba elasticRDMA ("erdma") and will be shared with the upcoming PCIe PMU ("dwc_pcie_pmu"). Move the Vendor ID to linux/pci_ids.h so that it can shared by several drivers later. Signed-off-by: Shuai Xue Acked-by: Bjorn Helgaas # pci_ids.h Tested-by: Ilkka Koskinen Link: https://lore.kernel.org/r/20231208025652.87192-3-xueshuai@linux.alibaba.com Signed-off-by: Will Deacon Signed-off-by: Jing Zhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2900 --- drivers/infiniband/hw/erdma/erdma_hw.h | 2 -- include/linux/pci_ids.h | 2 ++ 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/erdma/erdma_hw.h b/drivers/infiniband/hw/erdma/erdma_hw.h index 9d316fdc6f9a..a155519a862f 100644 --- a/drivers/infiniband/hw/erdma/erdma_hw.h +++ b/drivers/infiniband/hw/erdma/erdma_hw.h @@ -11,8 +11,6 @@ #include /* PCIe device related definition. */ -#define PCI_VENDOR_ID_ALIBABA 0x1ded - #define ERDMA_PCI_WIDTH 64 #define ERDMA_FUNC_BAR 0 #define ERDMA_MISX_BAR 2 diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 9e9feaedbd2b..d0f900d4c35b 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2610,6 +2610,8 @@ #define PCI_VENDOR_ID_TEKRAM 0x1de1 #define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29 +#define PCI_VENDOR_ID_ALIBABA 0x1ded + #define PCI_VENDOR_ID_TEHUTI 0x1fc9 #define PCI_DEVICE_ID_TEHUTI_3009 0x3009 #define PCI_DEVICE_ID_TEHUTI_3010 0x3010 -- Gitee From 2a907b9334a16677085f3b8f95c62fe0f9ef1049 Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Fri, 8 Dec 2023 10:56:50 +0800 Subject: [PATCH 0406/2138] PCI: Move pci_clear_and_set_dword() helper to PCI header ANBZ: #8565 commit ac16087134b837d42b75bb1c741070b6c142f258 upstream. The clear and set pattern is commonly used for accessing PCI config, move the helper pci_clear_and_set_dword() from aspm.c into PCI header. In addition, rename to pci_clear_and_set_config_dword() to retain the "config" information and match the other accessors. No functional change intended. Signed-off-by: Shuai Xue Acked-by: Bjorn Helgaas Tested-by: Ilkka Koskinen Link: https://lore.kernel.org/r/20231208025652.87192-4-xueshuai@linux.alibaba.com Signed-off-by: Will Deacon Signed-off-by: Jing Zhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2900 --- drivers/pci/access.c | 12 ++++++++ drivers/pci/pcie/aspm.c | 65 +++++++++++++++++++---------------------- include/linux/pci.h | 2 ++ 3 files changed, 44 insertions(+), 35 deletions(-) diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 6554a2e89d36..6449056b57dd 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -598,3 +598,15 @@ int pci_write_config_dword(const struct pci_dev *dev, int where, return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val); } EXPORT_SYMBOL(pci_write_config_dword); + +void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos, + u32 clear, u32 set) +{ + u32 val; + + pci_read_config_dword(dev, pos, &val); + val &= ~clear; + val |= set; + pci_write_config_dword(dev, pos, val); +} +EXPORT_SYMBOL(pci_clear_and_set_config_dword); diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 0aef6dc055b9..44e952e1a0ab 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -423,17 +423,6 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint) } } -static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos, - u32 clear, u32 set) -{ - u32 val; - - pci_read_config_dword(pdev, pos, &val); - val &= ~clear; - val |= set; - pci_write_config_dword(pdev, pos, val); -} - /* Calculate L1.2 PM substate timing parameters */ static void aspm_calc_l12_info(struct pcie_link_state *link, u32 parent_l1ss_cap, u32 child_l1ss_cap) @@ -494,10 +483,12 @@ static void aspm_calc_l12_info(struct pcie_link_state *link, cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK; if (pl1_2_enables || cl1_2_enables) { - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1_2_MASK, 0); - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1_2_MASK, 0); + pci_clear_and_set_config_dword(child, + child->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1_2_MASK, 0); + pci_clear_and_set_config_dword(parent, + parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1_2_MASK, 0); } /* Program T_POWER_ON times in both ports */ @@ -505,22 +496,26 @@ static void aspm_calc_l12_info(struct pcie_link_state *link, pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2); /* Program Common_Mode_Restore_Time in upstream device */ - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1); + pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1); /* Program LTR_L1.2_THRESHOLD time in both ports */ - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_LTR_L12_TH_VALUE | - PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1); - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_LTR_L12_TH_VALUE | - PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1); + pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_LTR_L12_TH_VALUE | + PCI_L1SS_CTL1_LTR_L12_TH_SCALE, + ctl1); + pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_LTR_L12_TH_VALUE | + PCI_L1SS_CTL1_LTR_L12_TH_SCALE, + ctl1); if (pl1_2_enables || cl1_2_enables) { - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 0, - pl1_2_enables); - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, 0, - cl1_2_enables); + pci_clear_and_set_config_dword(parent, + parent->l1ss + PCI_L1SS_CTL1, 0, + pl1_2_enables); + pci_clear_and_set_config_dword(child, + child->l1ss + PCI_L1SS_CTL1, 0, + cl1_2_enables); } } @@ -680,10 +675,10 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) */ /* Disable all L1 substates */ - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1SS_MASK, 0); - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1SS_MASK, 0); + pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1SS_MASK, 0); + pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1SS_MASK, 0); /* * If needed, disable L1, and it gets enabled later * in pcie_config_aspm_link(). @@ -706,10 +701,10 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) val |= PCI_L1SS_CTL1_PCIPM_L1_2; /* Enable what we need to enable */ - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1SS_MASK, val); - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1SS_MASK, val); + pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1SS_MASK, val); + pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1SS_MASK, val); } static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val) diff --git a/include/linux/pci.h b/include/linux/pci.h index 2d1fb935a8c8..f75eb4d3e30c 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1216,6 +1216,8 @@ int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val); int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val); int pci_write_config_word(const struct pci_dev *dev, int where, u16 val); int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val); +void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos, + u32 clear, u32 set); int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val); -- Gitee From bf9b0efe9636f3ce39645d051e21789f0aa20e1f Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Fri, 8 Dec 2023 10:56:51 +0800 Subject: [PATCH 0407/2138] drivers/perf: add DesignWare PCIe PMU driver ANBZ: #8565 commit af9597adc2f1e3609c67c9792a2469bb64e43ae9 upstream. This commit adds the PCIe Performance Monitoring Unit (PMU) driver support for T-Head Yitian SoC chip. Yitian is based on the Synopsys PCI Express Core controller IP which provides statistics feature. The PMU is a PCIe configuration space register block provided by each PCIe Root Port in a Vendor-Specific Extended Capability named RAS D.E.S (Debug, Error injection, and Statistics). To facilitate collection of statistics the controller provides the following two features for each Root Port: - one 64-bit counter for Time Based Analysis (RX/TX data throughput and time spent in each low-power LTSSM state) and - one 32-bit counter for Event Counting (error and non-error events for a specified lane) Note: There is no interrupt for counter overflow. This driver adds PMU devices for each PCIe Root Port. And the PMU device is named based the BDF of Root Port. For example, 30:03.0 PCI bridge: Device 1ded:8000 (rev 01) the PMU device name for this Root Port is dwc_rootport_3018. Example usage of counting PCIe RX TLP data payload (Units of bytes):: $# perf stat -a -e dwc_rootport_3018/Rx_PCIe_TLP_Data_Payload/ average RX bandwidth can be calculated like this: PCIe TX Bandwidth = Rx_PCIe_TLP_Data_Payload / Measure_Time_Window Signed-off-by: Shuai Xue Reviewed-by: Baolin Wang Reviewed-by: Jonathan Cameron Reviewed-by: Yicong Yang Reviewed-and-tested-by: Ilkka Koskinen Link: https://lore.kernel.org/r/20231208025652.87192-5-xueshuai@linux.alibaba.com [will: Fix sparse error due to use of uninitialised 'vsec' symbol in dwc_pcie_match_des_cap()] Signed-off-by: Will Deacon Signed-off-by: Jing Zhang Link: https://gitee.com/anolis/cloud-kernel/pulls/2900 --- drivers/perf/Kconfig | 7 + drivers/perf/Makefile | 1 + drivers/perf/dwc_pcie_pmu.c | 792 ++++++++++++++++++++++++++++++++++++ 3 files changed, 800 insertions(+) create mode 100644 drivers/perf/dwc_pcie_pmu.c diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index 273d67ecf6d2..ec6e0d9194a1 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -217,6 +217,13 @@ config MARVELL_CN10K_DDR_PMU Enable perf support for Marvell DDR Performance monitoring event on CN10K platform. +config DWC_PCIE_PMU + tristate "Synopsys DesignWare PCIe PMU" + depends on PCI + help + Enable perf support for Synopsys DesignWare PCIe PMU Performance + monitoring event on platform including the Alibaba Yitian 710. + source "drivers/perf/arm_cspmu/Kconfig" source "drivers/perf/amlogic/Kconfig" diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile index 16b3ec4db916..a06338e3401c 100644 --- a/drivers/perf/Makefile +++ b/drivers/perf/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_MARVELL_CN10K_TAD_PMU) += marvell_cn10k_tad_pmu.o obj-$(CONFIG_MARVELL_CN10K_DDR_PMU) += marvell_cn10k_ddr_pmu.o obj-$(CONFIG_APPLE_M1_CPU_PMU) += apple_m1_cpu_pmu.o obj-$(CONFIG_ALIBABA_UNCORE_DRW_PMU) += alibaba_uncore_drw_pmu.o +obj-$(CONFIG_DWC_PCIE_PMU) += dwc_pcie_pmu.o obj-$(CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU) += arm_cspmu/ obj-$(CONFIG_MESON_DDR_PMU) += amlogic/ obj-$(CONFIG_CXL_PMU) += cxl_pmu.o diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c new file mode 100644 index 000000000000..957058ad0099 --- /dev/null +++ b/drivers/perf/dwc_pcie_pmu.c @@ -0,0 +1,792 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Synopsys DesignWare PCIe PMU driver + * + * Copyright (C) 2021-2023 Alibaba Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DWC_PCIE_VSEC_RAS_DES_ID 0x02 +#define DWC_PCIE_EVENT_CNT_CTL 0x8 + +/* + * Event Counter Data Select includes two parts: + * - 27-24: Group number(4-bit: 0..0x7) + * - 23-16: Event number(8-bit: 0..0x13) within the Group + * + * Put them together as in TRM. + */ +#define DWC_PCIE_CNT_EVENT_SEL GENMASK(27, 16) +#define DWC_PCIE_CNT_LANE_SEL GENMASK(11, 8) +#define DWC_PCIE_CNT_STATUS BIT(7) +#define DWC_PCIE_CNT_ENABLE GENMASK(4, 2) +#define DWC_PCIE_PER_EVENT_OFF 0x1 +#define DWC_PCIE_PER_EVENT_ON 0x3 +#define DWC_PCIE_EVENT_CLEAR GENMASK(1, 0) +#define DWC_PCIE_EVENT_PER_CLEAR 0x1 + +#define DWC_PCIE_EVENT_CNT_DATA 0xC + +#define DWC_PCIE_TIME_BASED_ANAL_CTL 0x10 +#define DWC_PCIE_TIME_BASED_REPORT_SEL GENMASK(31, 24) +#define DWC_PCIE_TIME_BASED_DURATION_SEL GENMASK(15, 8) +#define DWC_PCIE_DURATION_MANUAL_CTL 0x0 +#define DWC_PCIE_DURATION_1MS 0x1 +#define DWC_PCIE_DURATION_10MS 0x2 +#define DWC_PCIE_DURATION_100MS 0x3 +#define DWC_PCIE_DURATION_1S 0x4 +#define DWC_PCIE_DURATION_2S 0x5 +#define DWC_PCIE_DURATION_4S 0x6 +#define DWC_PCIE_DURATION_4US 0xFF +#define DWC_PCIE_TIME_BASED_TIMER_START BIT(0) +#define DWC_PCIE_TIME_BASED_CNT_ENABLE 0x1 + +#define DWC_PCIE_TIME_BASED_ANAL_DATA_REG_LOW 0x14 +#define DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH 0x18 + +/* Event attributes */ +#define DWC_PCIE_CONFIG_EVENTID GENMASK(15, 0) +#define DWC_PCIE_CONFIG_TYPE GENMASK(19, 16) +#define DWC_PCIE_CONFIG_LANE GENMASK(27, 20) + +#define DWC_PCIE_EVENT_ID(event) FIELD_GET(DWC_PCIE_CONFIG_EVENTID, (event)->attr.config) +#define DWC_PCIE_EVENT_TYPE(event) FIELD_GET(DWC_PCIE_CONFIG_TYPE, (event)->attr.config) +#define DWC_PCIE_EVENT_LANE(event) FIELD_GET(DWC_PCIE_CONFIG_LANE, (event)->attr.config) + +enum dwc_pcie_event_type { + DWC_PCIE_TIME_BASE_EVENT, + DWC_PCIE_LANE_EVENT, + DWC_PCIE_EVENT_TYPE_MAX, +}; + +#define DWC_PCIE_LANE_EVENT_MAX_PERIOD GENMASK_ULL(31, 0) +#define DWC_PCIE_MAX_PERIOD GENMASK_ULL(63, 0) + +struct dwc_pcie_pmu { + struct pmu pmu; + struct pci_dev *pdev; /* Root Port device */ + u16 ras_des_offset; + u32 nr_lanes; + + struct list_head pmu_node; + struct hlist_node cpuhp_node; + struct perf_event *event[DWC_PCIE_EVENT_TYPE_MAX]; + int on_cpu; +}; + +#define to_dwc_pcie_pmu(p) (container_of(p, struct dwc_pcie_pmu, pmu)) + +static int dwc_pcie_pmu_hp_state; +static struct list_head dwc_pcie_dev_info_head = + LIST_HEAD_INIT(dwc_pcie_dev_info_head); +static bool notify; + +struct dwc_pcie_dev_info { + struct platform_device *plat_dev; + struct pci_dev *pdev; + struct list_head dev_node; +}; + +struct dwc_pcie_vendor_id { + int vendor_id; +}; + +static const struct dwc_pcie_vendor_id dwc_pcie_vendor_ids[] = { + {.vendor_id = PCI_VENDOR_ID_ALIBABA }, + {} /* terminator */ +}; + +static ssize_t cpumask_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(dev_get_drvdata(dev)); + + return cpumap_print_to_pagebuf(true, buf, cpumask_of(pcie_pmu->on_cpu)); +} +static DEVICE_ATTR_RO(cpumask); + +static struct attribute *dwc_pcie_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL +}; + +static struct attribute_group dwc_pcie_cpumask_attr_group = { + .attrs = dwc_pcie_pmu_cpumask_attrs, +}; + +struct dwc_pcie_format_attr { + struct device_attribute attr; + u64 field; + int config; +}; + +PMU_FORMAT_ATTR(eventid, "config:0-15"); +PMU_FORMAT_ATTR(type, "config:16-19"); +PMU_FORMAT_ATTR(lane, "config:20-27"); + +static struct attribute *dwc_pcie_format_attrs[] = { + &format_attr_type.attr, + &format_attr_eventid.attr, + &format_attr_lane.attr, + NULL, +}; + +static struct attribute_group dwc_pcie_format_attrs_group = { + .name = "format", + .attrs = dwc_pcie_format_attrs, +}; + +struct dwc_pcie_event_attr { + struct device_attribute attr; + enum dwc_pcie_event_type type; + u16 eventid; + u8 lane; +}; + +static ssize_t dwc_pcie_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dwc_pcie_event_attr *eattr; + + eattr = container_of(attr, typeof(*eattr), attr); + + if (eattr->type == DWC_PCIE_LANE_EVENT) + return sysfs_emit(buf, "eventid=0x%x,type=0x%x,lane=?\n", + eattr->eventid, eattr->type); + else if (eattr->type == DWC_PCIE_TIME_BASE_EVENT) + return sysfs_emit(buf, "eventid=0x%x,type=0x%x\n", + eattr->eventid, eattr->type); + + return 0; +} + +#define DWC_PCIE_EVENT_ATTR(_name, _type, _eventid, _lane) \ + (&((struct dwc_pcie_event_attr[]) {{ \ + .attr = __ATTR(_name, 0444, dwc_pcie_event_show, NULL), \ + .type = _type, \ + .eventid = _eventid, \ + .lane = _lane, \ + }})[0].attr.attr) + +#define DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(_name, _eventid) \ + DWC_PCIE_EVENT_ATTR(_name, DWC_PCIE_TIME_BASE_EVENT, _eventid, 0) +#define DWC_PCIE_PMU_LANE_EVENT_ATTR(_name, _eventid) \ + DWC_PCIE_EVENT_ATTR(_name, DWC_PCIE_LANE_EVENT, _eventid, 0) + +static struct attribute *dwc_pcie_pmu_time_event_attrs[] = { + /* Group #0 */ + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(one_cycle, 0x00), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(TX_L0S, 0x01), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(RX_L0S, 0x02), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L0, 0x03), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1, 0x04), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_1, 0x05), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_2, 0x06), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(CFG_RCVRY, 0x07), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(TX_RX_L0S, 0x08), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_AUX, 0x09), + + /* Group #1 */ + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Tx_PCIe_TLP_Data_Payload, 0x20), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Rx_PCIe_TLP_Data_Payload, 0x21), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Tx_CCIX_TLP_Data_Payload, 0x22), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Rx_CCIX_TLP_Data_Payload, 0x23), + + /* + * Leave it to the user to specify the lane ID to avoid generating + * a list of hundreds of events. + */ + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_ack_dllp, 0x600), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_update_fc_dllp, 0x601), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_ack_dllp, 0x602), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_update_fc_dllp, 0x603), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_nulified_tlp, 0x604), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_nulified_tlp, 0x605), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_duplicate_tl, 0x606), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_memory_write, 0x700), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_memory_read, 0x701), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_configuration_write, 0x702), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_configuration_read, 0x703), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_io_write, 0x704), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_io_read, 0x705), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_completion_without_data, 0x706), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_completion_with_data, 0x707), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_message_tlp, 0x708), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_atomic, 0x709), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_tlp_with_prefix, 0x70A), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_memory_write, 0x70B), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_memory_read, 0x70C), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_io_write, 0x70F), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_io_read, 0x710), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_completion_without_data, 0x711), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_completion_with_data, 0x712), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_message_tlp, 0x713), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_atomic, 0x714), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_tlp_with_prefix, 0x715), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_ccix_tlp, 0x716), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_ccix_tlp, 0x717), + NULL +}; + +static const struct attribute_group dwc_pcie_event_attrs_group = { + .name = "events", + .attrs = dwc_pcie_pmu_time_event_attrs, +}; + +static const struct attribute_group *dwc_pcie_attr_groups[] = { + &dwc_pcie_event_attrs_group, + &dwc_pcie_format_attrs_group, + &dwc_pcie_cpumask_attr_group, + NULL +}; + +static void dwc_pcie_pmu_lane_event_enable(struct dwc_pcie_pmu *pcie_pmu, + bool enable) +{ + struct pci_dev *pdev = pcie_pmu->pdev; + u16 ras_des_offset = pcie_pmu->ras_des_offset; + + if (enable) + pci_clear_and_set_config_dword(pdev, + ras_des_offset + DWC_PCIE_EVENT_CNT_CTL, + DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_ON); + else + pci_clear_and_set_config_dword(pdev, + ras_des_offset + DWC_PCIE_EVENT_CNT_CTL, + DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_OFF); +} + +static void dwc_pcie_pmu_time_based_event_enable(struct dwc_pcie_pmu *pcie_pmu, + bool enable) +{ + struct pci_dev *pdev = pcie_pmu->pdev; + u16 ras_des_offset = pcie_pmu->ras_des_offset; + + pci_clear_and_set_config_dword(pdev, + ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_CTL, + DWC_PCIE_TIME_BASED_TIMER_START, enable); +} + +static u64 dwc_pcie_pmu_read_lane_event_counter(struct perf_event *event) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + struct pci_dev *pdev = pcie_pmu->pdev; + u16 ras_des_offset = pcie_pmu->ras_des_offset; + u32 val; + + pci_read_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_DATA, &val); + + return val; +} + +static u64 dwc_pcie_pmu_read_time_based_counter(struct perf_event *event) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + struct pci_dev *pdev = pcie_pmu->pdev; + int event_id = DWC_PCIE_EVENT_ID(event); + u16 ras_des_offset = pcie_pmu->ras_des_offset; + u32 lo, hi, ss; + u64 val; + + /* + * The 64-bit value of the data counter is spread across two + * registers that are not synchronized. In order to read them + * atomically, ensure that the high 32 bits match before and after + * reading the low 32 bits. + */ + pci_read_config_dword(pdev, + ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH, &hi); + do { + /* snapshot the high 32 bits */ + ss = hi; + + pci_read_config_dword( + pdev, ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_DATA_REG_LOW, + &lo); + pci_read_config_dword( + pdev, ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH, + &hi); + } while (hi != ss); + + val = ((u64)hi << 32) | lo; + /* + * The Group#1 event measures the amount of data processed in 16-byte + * units. Simplify the end-user interface by multiplying the counter + * at the point of read. + */ + if (event_id >= 0x20 && event_id <= 0x23) + val *= 16; + + return val; +} + +static void dwc_pcie_pmu_event_update(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event); + u64 delta, prev, now = 0; + + do { + prev = local64_read(&hwc->prev_count); + + if (type == DWC_PCIE_LANE_EVENT) + now = dwc_pcie_pmu_read_lane_event_counter(event); + else if (type == DWC_PCIE_TIME_BASE_EVENT) + now = dwc_pcie_pmu_read_time_based_counter(event); + + } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev); + + delta = (now - prev) & DWC_PCIE_MAX_PERIOD; + /* 32-bit counter for Lane Event Counting */ + if (type == DWC_PCIE_LANE_EVENT) + delta &= DWC_PCIE_LANE_EVENT_MAX_PERIOD; + + local64_add(delta, &event->count); +} + +static int dwc_pcie_pmu_event_init(struct perf_event *event) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event); + struct perf_event *sibling; + u32 lane; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* We don't support sampling */ + if (is_sampling_event(event)) + return -EINVAL; + + /* We cannot support task bound events */ + if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK) + return -EINVAL; + + if (event->group_leader != event && + !is_software_event(event->group_leader)) + return -EINVAL; + + for_each_sibling_event(sibling, event->group_leader) { + if (sibling->pmu != event->pmu && !is_software_event(sibling)) + return -EINVAL; + } + + if (type < 0 || type >= DWC_PCIE_EVENT_TYPE_MAX) + return -EINVAL; + + if (type == DWC_PCIE_LANE_EVENT) { + lane = DWC_PCIE_EVENT_LANE(event); + if (lane < 0 || lane >= pcie_pmu->nr_lanes) + return -EINVAL; + } + + event->cpu = pcie_pmu->on_cpu; + + return 0; +} + +static void dwc_pcie_pmu_event_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event); + + hwc->state = 0; + local64_set(&hwc->prev_count, 0); + + if (type == DWC_PCIE_LANE_EVENT) + dwc_pcie_pmu_lane_event_enable(pcie_pmu, true); + else if (type == DWC_PCIE_TIME_BASE_EVENT) + dwc_pcie_pmu_time_based_event_enable(pcie_pmu, true); +} + +static void dwc_pcie_pmu_event_stop(struct perf_event *event, int flags) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event); + struct hw_perf_event *hwc = &event->hw; + + if (event->hw.state & PERF_HES_STOPPED) + return; + + if (type == DWC_PCIE_LANE_EVENT) + dwc_pcie_pmu_lane_event_enable(pcie_pmu, false); + else if (type == DWC_PCIE_TIME_BASE_EVENT) + dwc_pcie_pmu_time_based_event_enable(pcie_pmu, false); + + dwc_pcie_pmu_event_update(event); + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; +} + +static int dwc_pcie_pmu_event_add(struct perf_event *event, int flags) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + struct pci_dev *pdev = pcie_pmu->pdev; + struct hw_perf_event *hwc = &event->hw; + enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event); + int event_id = DWC_PCIE_EVENT_ID(event); + int lane = DWC_PCIE_EVENT_LANE(event); + u16 ras_des_offset = pcie_pmu->ras_des_offset; + u32 ctrl; + + /* one counter for each type and it is in use */ + if (pcie_pmu->event[type]) + return -ENOSPC; + + pcie_pmu->event[type] = event; + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + + if (type == DWC_PCIE_LANE_EVENT) { + /* EVENT_COUNTER_DATA_REG needs clear manually */ + ctrl = FIELD_PREP(DWC_PCIE_CNT_EVENT_SEL, event_id) | + FIELD_PREP(DWC_PCIE_CNT_LANE_SEL, lane) | + FIELD_PREP(DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_OFF) | + FIELD_PREP(DWC_PCIE_EVENT_CLEAR, DWC_PCIE_EVENT_PER_CLEAR); + pci_write_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_CTL, + ctrl); + } else if (type == DWC_PCIE_TIME_BASE_EVENT) { + /* + * TIME_BASED_ANAL_DATA_REG is a 64 bit register, we can safely + * use it with any manually controlled duration. And it is + * cleared when next measurement starts. + */ + ctrl = FIELD_PREP(DWC_PCIE_TIME_BASED_REPORT_SEL, event_id) | + FIELD_PREP(DWC_PCIE_TIME_BASED_DURATION_SEL, + DWC_PCIE_DURATION_MANUAL_CTL) | + DWC_PCIE_TIME_BASED_CNT_ENABLE; + pci_write_config_dword( + pdev, ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_CTL, ctrl); + } + + if (flags & PERF_EF_START) + dwc_pcie_pmu_event_start(event, PERF_EF_RELOAD); + + perf_event_update_userpage(event); + + return 0; +} + +static void dwc_pcie_pmu_event_del(struct perf_event *event, int flags) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event); + + dwc_pcie_pmu_event_stop(event, flags | PERF_EF_UPDATE); + perf_event_update_userpage(event); + pcie_pmu->event[type] = NULL; +} + +static void dwc_pcie_pmu_remove_cpuhp_instance(void *hotplug_node) +{ + cpuhp_state_remove_instance_nocalls(dwc_pcie_pmu_hp_state, hotplug_node); +} + +/* + * Find the binded DES capability device info of a PCI device. + * @pdev: The PCI device. + */ +static struct dwc_pcie_dev_info *dwc_pcie_find_dev_info(struct pci_dev *pdev) +{ + struct dwc_pcie_dev_info *dev_info; + + list_for_each_entry(dev_info, &dwc_pcie_dev_info_head, dev_node) + if (dev_info->pdev == pdev) + return dev_info; + + return NULL; +} + +static void dwc_pcie_unregister_pmu(void *data) +{ + struct dwc_pcie_pmu *pcie_pmu = data; + + perf_pmu_unregister(&pcie_pmu->pmu); +} + +static bool dwc_pcie_match_des_cap(struct pci_dev *pdev) +{ + const struct dwc_pcie_vendor_id *vid; + u16 vsec = 0; + u32 val; + + if (!pci_is_pcie(pdev) || !(pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)) + return false; + + for (vid = dwc_pcie_vendor_ids; vid->vendor_id; vid++) { + vsec = pci_find_vsec_capability(pdev, vid->vendor_id, + DWC_PCIE_VSEC_RAS_DES_ID); + if (vsec) + break; + } + if (!vsec) + return false; + + pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val); + if (PCI_VNDR_HEADER_REV(val) != 0x04) + return false; + + pci_dbg(pdev, + "Detected PCIe Vendor-Specific Extended Capability RAS DES\n"); + return true; +} + +static void dwc_pcie_unregister_dev(struct dwc_pcie_dev_info *dev_info) +{ + platform_device_unregister(dev_info->plat_dev); + list_del(&dev_info->dev_node); + kfree(dev_info); +} + +static int dwc_pcie_register_dev(struct pci_dev *pdev) +{ + struct platform_device *plat_dev; + struct dwc_pcie_dev_info *dev_info; + u32 bdf; + + bdf = PCI_DEVID(pdev->bus->number, pdev->devfn); + plat_dev = platform_device_register_data(NULL, "dwc_pcie_pmu", bdf, + pdev, sizeof(*pdev)); + + if (IS_ERR(plat_dev)) + return PTR_ERR(plat_dev); + + dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL); + if (!dev_info) + return -ENOMEM; + + /* Cache platform device to handle pci device hotplug */ + dev_info->plat_dev = plat_dev; + dev_info->pdev = pdev; + list_add(&dev_info->dev_node, &dwc_pcie_dev_info_head); + + return 0; +} + +static int dwc_pcie_pmu_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + struct pci_dev *pdev = to_pci_dev(dev); + struct dwc_pcie_dev_info *dev_info; + + switch (action) { + case BUS_NOTIFY_ADD_DEVICE: + if (!dwc_pcie_match_des_cap(pdev)) + return NOTIFY_DONE; + if (dwc_pcie_register_dev(pdev)) + return NOTIFY_BAD; + break; + case BUS_NOTIFY_DEL_DEVICE: + dev_info = dwc_pcie_find_dev_info(pdev); + if (!dev_info) + return NOTIFY_DONE; + dwc_pcie_unregister_dev(dev_info); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block dwc_pcie_pmu_nb = { + .notifier_call = dwc_pcie_pmu_notifier, +}; + +static int dwc_pcie_pmu_probe(struct platform_device *plat_dev) +{ + struct pci_dev *pdev = plat_dev->dev.platform_data; + struct dwc_pcie_pmu *pcie_pmu; + char *name; + u32 bdf, val; + u16 vsec; + int ret; + + vsec = pci_find_vsec_capability(pdev, pdev->vendor, + DWC_PCIE_VSEC_RAS_DES_ID); + pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val); + bdf = PCI_DEVID(pdev->bus->number, pdev->devfn); + name = devm_kasprintf(&plat_dev->dev, GFP_KERNEL, "dwc_rootport_%x", bdf); + if (!name) + return -ENOMEM; + + pcie_pmu = devm_kzalloc(&plat_dev->dev, sizeof(*pcie_pmu), GFP_KERNEL); + if (!pcie_pmu) + return -ENOMEM; + + pcie_pmu->pdev = pdev; + pcie_pmu->ras_des_offset = vsec; + pcie_pmu->nr_lanes = pcie_get_width_cap(pdev); + pcie_pmu->on_cpu = -1; + pcie_pmu->pmu = (struct pmu){ + .name = name, + .parent = &pdev->dev, + .module = THIS_MODULE, + .attr_groups = dwc_pcie_attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + .task_ctx_nr = perf_invalid_context, + .event_init = dwc_pcie_pmu_event_init, + .add = dwc_pcie_pmu_event_add, + .del = dwc_pcie_pmu_event_del, + .start = dwc_pcie_pmu_event_start, + .stop = dwc_pcie_pmu_event_stop, + .read = dwc_pcie_pmu_event_update, + }; + + /* Add this instance to the list used by the offline callback */ + ret = cpuhp_state_add_instance(dwc_pcie_pmu_hp_state, + &pcie_pmu->cpuhp_node); + if (ret) { + pci_err(pdev, "Error %d registering hotplug @%x\n", ret, bdf); + return ret; + } + + /* Unwind when platform driver removes */ + ret = devm_add_action_or_reset(&plat_dev->dev, + dwc_pcie_pmu_remove_cpuhp_instance, + &pcie_pmu->cpuhp_node); + if (ret) + return ret; + + ret = perf_pmu_register(&pcie_pmu->pmu, name, -1); + if (ret) { + pci_err(pdev, "Error %d registering PMU @%x\n", ret, bdf); + return ret; + } + ret = devm_add_action_or_reset(&plat_dev->dev, dwc_pcie_unregister_pmu, + pcie_pmu); + if (ret) + return ret; + + return 0; +} + +static int dwc_pcie_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_node) +{ + struct dwc_pcie_pmu *pcie_pmu; + + pcie_pmu = hlist_entry_safe(cpuhp_node, struct dwc_pcie_pmu, cpuhp_node); + if (pcie_pmu->on_cpu == -1) + pcie_pmu->on_cpu = cpumask_local_spread( + 0, dev_to_node(&pcie_pmu->pdev->dev)); + + return 0; +} + +static int dwc_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_node) +{ + struct dwc_pcie_pmu *pcie_pmu; + struct pci_dev *pdev; + int node; + cpumask_t mask; + unsigned int target; + + pcie_pmu = hlist_entry_safe(cpuhp_node, struct dwc_pcie_pmu, cpuhp_node); + /* Nothing to do if this CPU doesn't own the PMU */ + if (cpu != pcie_pmu->on_cpu) + return 0; + + pcie_pmu->on_cpu = -1; + pdev = pcie_pmu->pdev; + node = dev_to_node(&pdev->dev); + if (cpumask_and(&mask, cpumask_of_node(node), cpu_online_mask) && + cpumask_andnot(&mask, &mask, cpumask_of(cpu))) + target = cpumask_any(&mask); + else + target = cpumask_any_but(cpu_online_mask, cpu); + + if (target >= nr_cpu_ids) { + pci_err(pdev, "There is no CPU to set\n"); + return 0; + } + + /* This PMU does NOT support interrupt, just migrate context. */ + perf_pmu_migrate_context(&pcie_pmu->pmu, cpu, target); + pcie_pmu->on_cpu = target; + + return 0; +} + +static struct platform_driver dwc_pcie_pmu_driver = { + .probe = dwc_pcie_pmu_probe, + .driver = {.name = "dwc_pcie_pmu",}, +}; + +static int __init dwc_pcie_pmu_init(void) +{ + struct pci_dev *pdev = NULL; + bool found = false; + int ret; + + for_each_pci_dev(pdev) { + if (!dwc_pcie_match_des_cap(pdev)) + continue; + + ret = dwc_pcie_register_dev(pdev); + if (ret) { + pci_dev_put(pdev); + return ret; + } + + found = true; + } + if (!found) + return -ENODEV; + + ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, + "perf/dwc_pcie_pmu:online", + dwc_pcie_pmu_online_cpu, + dwc_pcie_pmu_offline_cpu); + if (ret < 0) + return ret; + + dwc_pcie_pmu_hp_state = ret; + + ret = platform_driver_register(&dwc_pcie_pmu_driver); + if (ret) + goto platform_driver_register_err; + + ret = bus_register_notifier(&pci_bus_type, &dwc_pcie_pmu_nb); + if (ret) + goto platform_driver_register_err; + notify = true; + + return 0; + +platform_driver_register_err: + cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state); + + return ret; +} + +static void __exit dwc_pcie_pmu_exit(void) +{ + struct dwc_pcie_dev_info *dev_info, *tmp; + + if (notify) + bus_unregister_notifier(&pci_bus_type, &dwc_pcie_pmu_nb); + list_for_each_entry_safe(dev_info, tmp, &dwc_pcie_dev_info_head, dev_node) + dwc_pcie_unregister_dev(dev_info); + platform_driver_unregister(&dwc_pcie_pmu_driver); + cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state); +} + +module_init(dwc_pcie_pmu_init); +module_exit(dwc_pcie_pmu_exit); + +MODULE_DESCRIPTION("PMU driver for DesignWare Cores PCI Express Controller"); +MODULE_AUTHOR("Shuai Xue "); +MODULE_LICENSE("GPL v2"); -- Gitee From 0908c61c57e4deaf655336dd26be5fa21e34426c Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Wed, 27 Sep 2023 13:59:46 +0800 Subject: [PATCH 0408/2138] perf metric: "Compat" supports regular expression matching identifiers ANBZ: #8601 commit 54409997d4b99ab63616bd431cf6244d58f8a597 upstream. The jevent "Compat" is used for uncore PMU alias or metric definitions. The same PMU driver has different PMU identifiers due to different hardware versions and types, but they may have some common PMU metric. Since a Compat value can only match one identifier, when adding the same metric to PMUs with different identifiers, each identifier needs to be defined once, which is not streamlined enough. So let "Compat" support using regular expression to match multiple identifiers for uncore PMU metric. Signed-off-by: Jing Zhang Reviewed-by: John Garry Reviewed-by: Ian Rogers Tested-by: Ian Rogers Cc: James Clark Cc: Will Deacon Cc: Leo Yan Cc: Mike Leach Cc: Shuai Xue Cc: Zhuo Song Cc: linux-arm-kernel@lists.infradead.org Cc: linux-doc@vger.kernel.org Link: https://lore.kernel.org/r/1695794391-34817-3-git-send-email-renyu.zj@linux.alibaba.com Signed-off-by: Namhyung Kim Signed-off-by: Jing Zhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2937 --- tools/perf/util/metricgroup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c index bb5faaa25d51..ca3e0404f187 100644 --- a/tools/perf/util/metricgroup.c +++ b/tools/perf/util/metricgroup.c @@ -498,7 +498,7 @@ static int metricgroup__sys_event_iter(const struct pmu_metric *pm, while ((pmu = perf_pmus__scan(pmu))) { - if (!pmu->id || strcmp(pmu->id, pm->compat)) + if (!pmu->id || !pmu_uncore_identifier_match(pm->compat, pmu->id)) continue; return d->fn(pm, table, d->data); -- Gitee From 71fe3e57a31a8693855090bf6ead34416377cc4b Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Wed, 27 Sep 2023 13:59:47 +0800 Subject: [PATCH 0409/2138] perf jevents: Support EventidCode and NodeType ANBZ: #8601 commit e3e42e23c0c6e791a00eb8331dc948f316e6de1f upstream. The previous code assumes an event has either an "event=" or "config" field at the beginning. For CMN neither of these may be present, as an event is typically "type=xx,eventid=xxx". So add EventidCode and NodeType to support CMN event description. I compared pmu_event.c before and after compiling with JEVENT_ARCH=all, they are consistent. Signed-off-by: Jing Zhang Reviewed-by: Ian Rogers Tested-by: Ian Rogers Cc: James Clark Cc: Will Deacon Cc: Leo Yan Cc: Mike Leach Cc: Shuai Xue Cc: Zhuo Song Cc: John Garry Cc: linux-arm-kernel@lists.infradead.org Cc: linux-doc@vger.kernel.org Link: https://lore.kernel.org/r/1695794391-34817-4-git-send-email-renyu.zj@linux.alibaba.com Signed-off-by: Namhyung Kim Signed-off-by: Jing Zhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2937 --- tools/perf/pmu-events/jevents.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/tools/perf/pmu-events/jevents.py b/tools/perf/pmu-events/jevents.py index 72ba4a9239c6..f11c5c39d2c9 100755 --- a/tools/perf/pmu-events/jevents.py +++ b/tools/perf/pmu-events/jevents.py @@ -298,6 +298,7 @@ class JsonEvent: if 'ExtSel' in jd: eventcode |= int(jd['ExtSel']) << 8 configcode = int(jd['ConfigCode'], 0) if 'ConfigCode' in jd else None + eventidcode = int(jd['EventidCode'], 0) if 'EventidCode' in jd else None self.name = jd['EventName'].lower() if 'EventName' in jd else None self.topic = '' self.compat = jd.get('Compat') @@ -335,7 +336,13 @@ class JsonEvent: if precise and self.desc and '(Precise Event)' not in self.desc: extra_desc += ' (Must be precise)' if precise == '2' else (' (Precise ' 'event)') - event = f'config={llx(configcode)}' if configcode is not None else f'event={llx(eventcode)}' + event = None + if configcode is not None: + event = f'config={llx(configcode)}' + elif eventidcode is not None: + event = f'eventid={llx(eventidcode)}' + else: + event = f'event={llx(eventcode)}' event_fields = [ ('AnyThread', 'any='), ('PortMask', 'ch_mask='), @@ -345,6 +352,7 @@ class JsonEvent: ('Invert', 'inv='), ('SampleAfterValue', 'period='), ('UMask', 'umask='), + ('NodeType', 'type='), ] for key, value in event_fields: if key in jd and jd[key] != '0': -- Gitee From 3405f1191d441a0d93862ece44b1c9e8cd1e1403 Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Wed, 27 Sep 2023 13:59:48 +0800 Subject: [PATCH 0410/2138] perf test: Make matching_pmu effective ANBZ: #8601 commit 3bb59e759cbb357f8fb46cc5a48d2b0da09b37c4 upstream. The perf_pmu_test_event.matching_pmu didn't work. No matter what its value is, it does not affect the test results. So let matching_pmu be used for matching perf_pmu_test_pmu.pmu.name. Signed-off-by: Jing Zhang Reviewed-by: John Garry Reviewed-by: Ian Rogers Tested-by: Ian Rogers Cc: James Clark Cc: Will Deacon Cc: Leo Yan Cc: Mike Leach Cc: Shuai Xue Cc: Zhuo Song Cc: linux-arm-kernel@lists.infradead.org Cc: linux-doc@vger.kernel.org Link: https://lore.kernel.org/r/1695794391-34817-5-git-send-email-renyu.zj@linux.alibaba.com Signed-off-by: Namhyung Kim Signed-off-by: Jing Zhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2937 --- tools/perf/tests/pmu-events.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tools/perf/tests/pmu-events.c b/tools/perf/tests/pmu-events.c index f5321fbdee79..0cf572f7c1e7 100644 --- a/tools/perf/tests/pmu-events.c +++ b/tools/perf/tests/pmu-events.c @@ -245,7 +245,7 @@ static const struct perf_pmu_test_event sys_ddr_pmu_write_cycles = { }, .alias_str = "event=0x2b", .alias_long_desc = "ddr write-cycles event", - .matching_pmu = "uncore_sys_ddr_pmu", + .matching_pmu = "uncore_sys_ddr_pmu0", }; static const struct perf_pmu_test_event sys_ccn_pmu_read_cycles = { @@ -259,7 +259,7 @@ static const struct perf_pmu_test_event sys_ccn_pmu_read_cycles = { }, .alias_str = "config=0x2c", .alias_long_desc = "ccn read-cycles event", - .matching_pmu = "uncore_sys_ccn_pmu", + .matching_pmu = "uncore_sys_ccn_pmu4", }; static const struct perf_pmu_test_event *sys_events[] = { @@ -615,6 +615,12 @@ static int __test_uncore_pmu_event_aliases(struct perf_pmu_test_pmu *test_pmu) .count = &matched_count, }; + if (strcmp(pmu_name, test_event.matching_pmu)) { + pr_debug("testing aliases uncore PMU %s: mismatched matching_pmu, %s vs %s\n", + pmu_name, test_event.matching_pmu, pmu_name); + return -1; + } + err = perf_pmu__find_event(pmu, event->name, &args, test_core_pmu_event_aliases_cb); if (err) { -- Gitee From 83d5b5f6b072a60eb64d0a53f7a4d3128bd51743 Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Wed, 27 Sep 2023 13:59:49 +0800 Subject: [PATCH 0411/2138] perf test: Add pmu-event test for "Compat" and new event_field. ANBZ: #8601 commit 7fded33c6971b6c8e87cbbf48e74536aacca2991 upstream. Add new event test for uncore system event which is used to verify the functionality of "Compat" matching multiple identifiers and the new event fields "EventidCode" and "NodeType". Signed-off-by: Jing Zhang Reviewed-by: Ian Rogers Tested-by: Ian Rogers Cc: James Clark Cc: Will Deacon Cc: Leo Yan Cc: Mike Leach Cc: Shuai Xue Cc: Zhuo Song Cc: John Garry Cc: linux-arm-kernel@lists.infradead.org Cc: linux-doc@vger.kernel.org Link: https://lore.kernel.org/r/1695794391-34817-6-git-send-email-renyu.zj@linux.alibaba.com Signed-off-by: Namhyung Kim Signed-off-by: Jing Zhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2937 --- .../arch/test/test_soc/sys/uncore.json | 8 +++ tools/perf/pmu-events/empty-pmu-events.c | 8 +++ tools/perf/tests/pmu-events.c | 55 +++++++++++++++++++ 3 files changed, 71 insertions(+) diff --git a/tools/perf/pmu-events/arch/test/test_soc/sys/uncore.json b/tools/perf/pmu-events/arch/test/test_soc/sys/uncore.json index c7e7528db315..4d423b149ad1 100644 --- a/tools/perf/pmu-events/arch/test/test_soc/sys/uncore.json +++ b/tools/perf/pmu-events/arch/test/test_soc/sys/uncore.json @@ -12,5 +12,13 @@ "EventName": "sys_ccn_pmu.read_cycles", "Unit": "sys_ccn_pmu", "Compat": "0x01" + }, + { + "BriefDescription": "Counts total cache misses in first lookup result (high priority)", + "EventidCode": "0x1", + "NodeType": "0x5", + "EventName": "sys_cmn_pmu.hnf_cache_miss", + "Unit": "sys_cmn_pmu", + "Compat": "(434|436|43c|43a).*" } ] diff --git a/tools/perf/pmu-events/empty-pmu-events.c b/tools/perf/pmu-events/empty-pmu-events.c index 12bd043a05e3..13727421d424 100644 --- a/tools/perf/pmu-events/empty-pmu-events.c +++ b/tools/perf/pmu-events/empty-pmu-events.c @@ -244,6 +244,14 @@ static const struct pmu_event pmu_events__test_soc_sys[] = { .topic = "uncore", .pmu = "uncore_sys_ccn_pmu", }, + { + .name = "sys_cmn_pmu.hnf_cache_miss", + .event = "eventid=0x1,type=0x5", + .desc = "Counts total cache misses in first lookup result (high priority). Unit: uncore_sys_cmn_pmu ", + .compat = "(434|436|43c|43a).*", + .topic = "uncore", + .pmu = "uncore_sys_cmn_pmu", + }, { .name = 0, .event = 0, diff --git a/tools/perf/tests/pmu-events.c b/tools/perf/tests/pmu-events.c index 0cf572f7c1e7..a56d32905743 100644 --- a/tools/perf/tests/pmu-events.c +++ b/tools/perf/tests/pmu-events.c @@ -262,9 +262,24 @@ static const struct perf_pmu_test_event sys_ccn_pmu_read_cycles = { .matching_pmu = "uncore_sys_ccn_pmu4", }; +static const struct perf_pmu_test_event sys_cmn_pmu_hnf_cache_miss = { + .event = { + .name = "sys_cmn_pmu.hnf_cache_miss", + .event = "eventid=0x1,type=0x5", + .desc = "Counts total cache misses in first lookup result (high priority)", + .topic = "uncore", + .pmu = "uncore_sys_cmn_pmu", + .compat = "(434|436|43c|43a).*", + }, + .alias_str = "eventid=0x1,type=0x5", + .alias_long_desc = "Counts total cache misses in first lookup result (high priority)", + .matching_pmu = "uncore_sys_cmn_pmu0", +}; + static const struct perf_pmu_test_event *sys_events[] = { &sys_ddr_pmu_write_cycles, &sys_ccn_pmu_read_cycles, + &sys_cmn_pmu_hnf_cache_miss, NULL }; @@ -707,6 +722,46 @@ static struct perf_pmu_test_pmu test_pmus[] = { &sys_ccn_pmu_read_cycles, }, }, + { + .pmu = { + .name = (char *)"uncore_sys_cmn_pmu0", + .is_uncore = 1, + .id = (char *)"43401", + }, + .aliases = { + &sys_cmn_pmu_hnf_cache_miss, + }, + }, + { + .pmu = { + .name = (char *)"uncore_sys_cmn_pmu0", + .is_uncore = 1, + .id = (char *)"43602", + }, + .aliases = { + &sys_cmn_pmu_hnf_cache_miss, + }, + }, + { + .pmu = { + .name = (char *)"uncore_sys_cmn_pmu0", + .is_uncore = 1, + .id = (char *)"43c03", + }, + .aliases = { + &sys_cmn_pmu_hnf_cache_miss, + }, + }, + { + .pmu = { + .name = (char *)"uncore_sys_cmn_pmu0", + .is_uncore = 1, + .id = (char *)"43a01", + }, + .aliases = { + &sys_cmn_pmu_hnf_cache_miss, + }, + } }; /* Test that aliases generated are as expected */ -- Gitee From 41f8c2d884133cc6d4bf5df89c09f8a016bf889b Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Wed, 27 Sep 2023 13:59:50 +0800 Subject: [PATCH 0412/2138] perf jevents: Add support for Arm CMN PMU aliasing ANBZ: #8601 commit 0b4de7bdf46c521518e38579d0ab5600a6949bec upstream. Currently just add aliases for part of Arm CMN PMU events which are general and compatible for any SoC and CMN-ANY. "Compat" value "(434|436|43c|43a).*" means it is compatible with all CMN600/CMN650/CMN700/Ci700, which can be obtained from commit 7819e05a0dce ("perf/arm-cmn: Revamp model detection"). The arm-cmn PMU events got from: [0] https://developer.arm.com/documentation/100180/0302/?lang=en [1] https://developer.arm.com/documentation/101408/0100/?lang=en [2] https://developer.arm.com/documentation/102308/0302/?lang=en [3] https://developer.arm.com/documentation/101569/0300/?lang=en Signed-off-by: Jing Zhang Reviewed-by: John Garry Reviewed-by: Ian Rogers Tested-by: Ian Rogers Cc: James Clark Cc: Will Deacon Cc: Leo Yan Cc: Mike Leach Cc: Shuai Xue Cc: Zhuo Song Cc: linux-arm-kernel@lists.infradead.org Cc: linux-doc@vger.kernel.org Link: https://lore.kernel.org/r/1695794391-34817-7-git-send-email-renyu.zj@linux.alibaba.com Signed-off-by: Namhyung Kim Signed-off-by: Jing Zhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2937 --- .../arch/arm64/arm/cmn/sys/cmn.json | 266 ++++++++++++++++++ tools/perf/pmu-events/jevents.py | 1 + 2 files changed, 267 insertions(+) create mode 100644 tools/perf/pmu-events/arch/arm64/arm/cmn/sys/cmn.json diff --git a/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/cmn.json b/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/cmn.json new file mode 100644 index 000000000000..428605c37d10 --- /dev/null +++ b/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/cmn.json @@ -0,0 +1,266 @@ +[ + { + "EventName": "hnf_cache_miss", + "EventidCode": "0x1", + "NodeType": "0x5", + "BriefDescription": "Counts total cache misses in first lookup result (high priority).", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_slc_sf_cache_access", + "EventidCode": "0x2", + "NodeType": "0x5", + "BriefDescription": "Counts number of cache accesses in first access (high priority).", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_cache_fill", + "EventidCode": "0x3", + "NodeType": "0x5", + "BriefDescription": "Counts total allocations in HN SLC (all cache line allocations to SLC).", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_pocq_retry", + "EventidCode": "0x4", + "NodeType": "0x5", + "BriefDescription": "Counts number of retried requests.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_pocq_reqs_recvd", + "EventidCode": "0x5", + "NodeType": "0x5", + "BriefDescription": "Counts number of requests that HN receives.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_sf_hit", + "EventidCode": "0x6", + "NodeType": "0x5", + "BriefDescription": "Counts number of SF hits.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_sf_evictions", + "EventidCode": "0x7", + "NodeType": "0x5", + "BriefDescription": "Counts number of SF eviction cache invalidations initiated.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_dir_snoops_sent", + "EventidCode": "0x8", + "NodeType": "0x5", + "BriefDescription": "Counts number of directed snoops sent (not including SF back invalidation).", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_brd_snoops_sent", + "EventidCode": "0x9", + "NodeType": "0x5", + "BriefDescription": "Counts number of multicast snoops sent (not including SF back invalidation).", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_slc_eviction", + "EventidCode": "0xa", + "NodeType": "0x5", + "BriefDescription": "Counts number of SLC evictions (dirty only).", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_slc_fill_invalid_way", + "EventidCode": "0xb", + "NodeType": "0x5", + "BriefDescription": "Counts number of SLC fills to an invalid way.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_mc_retries", + "EventidCode": "0xc", + "NodeType": "0x5", + "BriefDescription": "Counts number of retried transactions by the MC.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_mc_reqs", + "EventidCode": "0xd", + "NodeType": "0x5", + "BriefDescription": "Counts number of requests that are sent to MC.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_qos_hh_retry", + "EventidCode": "0xe", + "NodeType": "0x5", + "BriefDescription": "Counts number of times a HighHigh priority request is protocolretried at the HN‑F.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "rnid_s0_rdata_beats", + "EventidCode": "0x1", + "NodeType": "0xa", + "BriefDescription": "Number of RData beats (RVALID and RREADY) dispatched on port 0. This event measures the read bandwidth, including CMO responses.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "rnid_s1_rdata_beats", + "EventidCode": "0x2", + "NodeType": "0xa", + "BriefDescription": "Number of RData beats (RVALID and RREADY) dispatched on port 1. This event measures the read bandwidth, including CMO responses.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "rnid_s2_rdata_beats", + "EventidCode": "0x3", + "NodeType": "0xa", + "BriefDescription": "Number of RData beats (RVALID and RREADY) dispatched on port 2. This event measures the read bandwidth, including CMO responses.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "rnid_rxdat_flits", + "EventidCode": "0x4", + "NodeType": "0xa", + "BriefDescription": "Number of RXDAT flits received. This event measures the true read data bandwidth, excluding CMOs.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "rnid_txdat_flits", + "EventidCode": "0x5", + "NodeType": "0xa", + "BriefDescription": "Number of TXDAT flits dispatched. This event measures the write bandwidth.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "rnid_txreq_flits_total", + "EventidCode": "0x6", + "NodeType": "0xa", + "BriefDescription": "Number of TXREQ flits dispatched. This event measures the total request bandwidth.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "rnid_txreq_flits_retried", + "EventidCode": "0x7", + "NodeType": "0xa", + "BriefDescription": "Number of retried TXREQ flits dispatched. This event measures the retry rate.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "sbsx_txrsp_retryack", + "EventidCode": "0x4", + "NodeType": "0x7", + "BriefDescription": "Number of RXREQ flits dispatched. This event is a measure of the retry rate.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "sbsx_txdat_flitv", + "EventidCode": "0x5", + "NodeType": "0x7", + "BriefDescription": "Number of TXDAT flits dispatched from XP to SBSX. This event is a measure of the write bandwidth.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "sbsx_arvalid_no_arready", + "EventidCode": "0x21", + "NodeType": "0x7", + "BriefDescription": "Number of cycles the SBSX bridge is stalled because of backpressure on AR channel.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "sbsx_awvalid_no_awready", + "EventidCode": "0x22", + "NodeType": "0x7", + "BriefDescription": "Number of cycles the SBSX bridge is stalled because of backpressure on AW channel.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "sbsx_wvalid_no_wready", + "EventidCode": "0x23", + "NodeType": "0x7", + "BriefDescription": "Number of cycles the SBSX bridge is stalled because of backpressure on W channel.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hni_txrsp_retryack", + "EventidCode": "0x2a", + "NodeType": "0x4", + "BriefDescription": "Number of RXREQ flits dispatched. This event is a measure of the retry rate.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hni_arvalid_no_arready", + "EventidCode": "0x2b", + "NodeType": "0x4", + "BriefDescription": "Number of cycles the HN-I bridge is stalled because of backpressure on AR channel.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hni_arready_no_arvalid", + "EventidCode": "0x2c", + "NodeType": "0x4", + "BriefDescription": "Number of cycles the AR channel is waiting for new requests from HN-I bridge.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hni_awvalid_no_awready", + "EventidCode": "0x2d", + "NodeType": "0x4", + "BriefDescription": "Number of cycles the HN-I bridge is stalled because of backpressure on AW channel.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hni_awready_no_awvalid", + "EventidCode": "0x2e", + "NodeType": "0x4", + "BriefDescription": "Number of cycles the AW channel is waiting for new requests from HN-I bridge.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hni_wvalid_no_wready", + "EventidCode": "0x2f", + "NodeType": "0x4", + "BriefDescription": "Number of cycles the HN-I bridge is stalled because of backpressure on W channel.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hni_txdat_stall", + "EventidCode": "0x30", + "NodeType": "0x4", + "BriefDescription": "TXDAT valid but no link credit available.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + } +] diff --git a/tools/perf/pmu-events/jevents.py b/tools/perf/pmu-events/jevents.py index f11c5c39d2c9..ae2bd49e8805 100755 --- a/tools/perf/pmu-events/jevents.py +++ b/tools/perf/pmu-events/jevents.py @@ -289,6 +289,7 @@ class JsonEvent: 'cpu_core': 'cpu_core', 'cpu_atom': 'cpu_atom', 'ali_drw': 'ali_drw', + 'arm_cmn': 'arm_cmn', } return table[unit] if unit in table else f'uncore_{unit.lower()}' -- Gitee From 4f778ae970f7c4ff01f954fbe38c4ed0efde5310 Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Wed, 27 Sep 2023 13:59:51 +0800 Subject: [PATCH 0413/2138] perf vendor events: Add JSON metrics for Arm CMN ANBZ: #8601 commit 4f3ee7d1d5ced888e603c7fbe48e3468320745c1 upstream. Add JSON metrics for Arm CMN. Currently just add part of CMN PMU metrics which are general and compatible for any SoC with CMN-ANY. Signed-off-by: Jing Zhang Reviewed-by: John Garry Reviewed-by: Ian Rogers Tested-by: Ian Rogers Cc: James Clark Cc: Will Deacon Cc: Leo Yan Cc: Mike Leach Cc: Shuai Xue Cc: Zhuo Song Cc: linux-arm-kernel@lists.infradead.org Cc: linux-doc@vger.kernel.org Link: https://lore.kernel.org/r/1695794391-34817-8-git-send-email-renyu.zj@linux.alibaba.com Signed-off-by: Namhyung Kim Signed-off-by: Jing Zhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2937 --- .../arch/arm64/arm/cmn/sys/metric.json | 74 +++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100644 tools/perf/pmu-events/arch/arm64/arm/cmn/sys/metric.json diff --git a/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/metric.json b/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/metric.json new file mode 100644 index 000000000000..f7823bd265db --- /dev/null +++ b/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/metric.json @@ -0,0 +1,74 @@ +[ + { + "MetricName": "slc_miss_rate", + "BriefDescription": "The system level cache miss rate.", + "MetricGroup": "cmn", + "MetricExpr": "hnf_cache_miss / hnf_slc_sf_cache_access", + "ScaleUnit": "100%", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "MetricName": "hnf_message_retry_rate", + "BriefDescription": "HN-F message retry rate indicates whether a lack of credits is causing the bottlenecks.", + "MetricGroup": "cmn", + "MetricExpr": "hnf_pocq_retry / hnf_pocq_reqs_recvd", + "ScaleUnit": "100%", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "MetricName": "sf_hit_rate", + "BriefDescription": "Snoop filter hit rate can be used to measure the snoop filter efficiency.", + "MetricGroup": "cmn", + "MetricExpr": "hnf_sf_hit / hnf_slc_sf_cache_access", + "ScaleUnit": "100%", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "MetricName": "mc_message_retry_rate", + "BriefDescription": "The memory controller request retries rate indicates whether the memory controller is the bottleneck.", + "MetricGroup": "cmn", + "MetricExpr": "hnf_mc_retries / hnf_mc_reqs", + "ScaleUnit": "100%", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "MetricName": "rni_actual_read_bandwidth.all", + "BriefDescription": "This event measure the actual bandwidth that RN-I bridge sends to the interconnect.", + "MetricGroup": "cmn", + "MetricExpr": "rnid_rxdat_flits * 32 / 1e6 / duration_time", + "ScaleUnit": "1MB/s", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "MetricName": "rni_actual_write_bandwidth.all", + "BriefDescription": "This event measures the actual write bandwidth at RN-I bridges.", + "MetricGroup": "cmn", + "MetricExpr": "rnid_txdat_flits * 32 / 1e6 / duration_time", + "ScaleUnit": "1MB/s", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "MetricName": "rni_retry_rate", + "BriefDescription": "RN-I bridge retry rate indicates whether the memory controller is the bottleneck.", + "MetricGroup": "cmn", + "MetricExpr": "rnid_txreq_flits_retried / rnid_txreq_flits_total", + "ScaleUnit": "100%", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "MetricName": "sbsx_actual_write_bandwidth.all", + "BriefDescription": "sbsx actual write bandwidth.", + "MetricGroup": "cmn", + "MetricExpr": "sbsx_txdat_flitv * 32 / 1e6 / duration_time", + "ScaleUnit": "1MB/s", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + } +] -- Gitee From ac9c2845b774b6a49c80e86fc335b356243d0d0d Mon Sep 17 00:00:00 2001 From: Huaixin Chang Date: Fri, 6 Mar 2020 11:20:49 +0800 Subject: [PATCH 0414/2138] anolis: sched/fair: Introduce primitives for CFS bandwidth burst ANBZ: #8586 In this patch, we introduce the notion of CFS bandwidth burst. Unused "quota" from pervious "periods" might be accumulated and used in the following "periods". The maximum amount of accumulated bandwidth is bounded by "burst". And the maximun amount of CPU a group can consume in a given period is "buffer" which is equivalent to "quota" + "burst in case that this group has done enough accumulation. Signed-off-by: Huaixin Chang Acked-by: Shanpei Chen Signed-off-by: Tianchen Ding Reviewed-by: Cruz Zhao Link: https://gitee.com/anolis/cloud-kernel/pulls/2923 --- kernel/sched/core.c | 36 +++++++++++++++++++++++++++--------- kernel/sched/fair.c | 1 + kernel/sched/sched.h | 1 + 3 files changed, 29 insertions(+), 9 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 534cbafe2232..193b5eaaf78d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -11003,6 +11003,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, { int i, ret = 0, runtime_enabled, runtime_was_enabled; struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; + u64 buffer; if (tg == &root_task_group) return -EINVAL; @@ -11033,6 +11034,16 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, burst + quota > max_cfs_runtime)) return -EINVAL; + /* + * Bound burst to defend burst against overflow during bandwidth shift. + */ + if (burst > max_cfs_runtime) + return -EINVAL; + + if (quota == RUNTIME_INF) + buffer = RUNTIME_INF; + else + buffer = min(max_cfs_runtime, quota + burst); /* * Prevent race between setting of cfs_rq->runtime_enabled and * unthrottle_offline_cfs_rqs(). @@ -11057,6 +11068,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, cfs_b->period = ns_to_ktime(period); cfs_b->quota = quota; cfs_b->burst = burst; + cfs_b->buffer = buffer; __refill_cfs_bandwidth_runtime(cfs_b); @@ -11143,7 +11155,11 @@ static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us) { u64 quota, period, burst; - if ((u64)cfs_burst_us > U64_MAX / NSEC_PER_USEC) + if (cfs_burst_us < 0) + burst = RUNTIME_INF; + else if ((u64)cfs_burst_us <= U64_MAX / NSEC_PER_USEC) + burst = (u64)cfs_burst_us * NSEC_PER_USEC; + else return -EINVAL; burst = (u64)cfs_burst_us * NSEC_PER_USEC; @@ -11157,6 +11173,9 @@ static long tg_get_cfs_burst(struct task_group *tg) { u64 burst_us; + if (tg->cfs_bandwidth.burst == RUNTIME_INF) + return -1; + burst_us = tg->cfs_bandwidth.burst; do_div(burst_us, NSEC_PER_USEC); @@ -11187,18 +11206,17 @@ static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css, return tg_set_cfs_period(css_tg(css), cfs_period_us); } -static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css, +static s64 cpu_cfs_burst_read_s64(struct cgroup_subsys_state *css, struct cftype *cft) { return tg_get_cfs_burst(css_tg(css)); } -static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css, - struct cftype *cftype, u64 cfs_burst_us) +static int cpu_cfs_burst_write_s64(struct cgroup_subsys_state *css, + struct cftype *cftype, s64 cfs_burst_us) { return tg_set_cfs_burst(css_tg(css), cfs_burst_us); } - struct cfs_schedulable_data { struct task_group *tg; u64 period, quota; @@ -11404,8 +11422,8 @@ static struct cftype cpu_legacy_files[] = { }, { .name = "cfs_burst_us", - .read_u64 = cpu_cfs_burst_read_u64, - .write_u64 = cpu_cfs_burst_write_u64, + .read_s64 = cpu_cfs_burst_read_s64, + .write_s64 = cpu_cfs_burst_write_s64, }, { .name = "stat", @@ -11640,8 +11658,8 @@ static struct cftype cpu_files[] = { { .name = "max.burst", .flags = CFTYPE_NOT_ON_ROOT, - .read_u64 = cpu_cfs_burst_read_u64, - .write_u64 = cpu_cfs_burst_write_u64, + .read_s64 = cpu_cfs_burst_read_s64, + .write_s64 = cpu_cfs_burst_write_s64, }, #endif #ifdef CONFIG_UCLAMP_TASK_GROUP diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 42d825758e30..277d92122485 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6390,6 +6390,7 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *paren cfs_b->quota = RUNTIME_INF; cfs_b->period = ns_to_ktime(default_cfs_period()); cfs_b->burst = 0; + cfs_b->buffer = RUNTIME_INF; cfs_b->hierarchical_quota = parent ? parent->hierarchical_quota : RUNTIME_INF; INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 29967f2a8c6a..829577e9c315 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -340,6 +340,7 @@ struct cfs_bandwidth { u64 quota; u64 runtime; u64 burst; + u64 buffer; u64 runtime_snap; s64 hierarchical_quota; -- Gitee From 41ee4cdd01a8d490a0da613a7f1e46b5c08a9294 Mon Sep 17 00:00:00 2001 From: Huaixin Chang Date: Fri, 6 Mar 2020 18:34:40 +0800 Subject: [PATCH 0415/2138] anolis: sched/fair: Make CFS bandwidth controller burstable ANBZ: #8586 Accumulate unused quota from previous periods, thus accumulated bandwidth runtime can be used in the following periods. During accumulation, take care of runtime overflow. Previous non-burstable CFS bandwidth controller only assign quota to runtime, that saves a lot. A sysctl parameter sysctl_sched_cfs_bw_burst_onset_percent is introduced to denote how many percent of burst is given on setting cfs bandwidth. By default it is 0, which means on burst is allowed unless accumulated. Signed-off-by: Huaixin Chang Acked-by: Shanpei Chen [dtcccc: remove sysctl_sched_cfs_bw_burst_enabled to make cpu burst default on, which is the same with upstream.] Signed-off-by: Tianchen Ding Reviewed-by: Cruz Zhao Link: https://gitee.com/anolis/cloud-kernel/pulls/2923 --- kernel/sched/core.c | 32 ++++++++++++++++++------ kernel/sched/fair.c | 58 +++++++++++++++++++++++++++++++------------- kernel/sched/sched.h | 9 +++++-- 3 files changed, 72 insertions(+), 27 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 193b5eaaf78d..7b277aaf254d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -175,6 +175,14 @@ static inline int __task_prio(const struct task_struct *p) return MAX_RT_PRIO + MAX_NICE; /* 120, squash fair */ } +#ifdef CONFIG_CFS_BANDWIDTH +/* + * Percent of burst assigned to cfs_b->runtime on tg_set_cfs_bandwidth, + * 0 by default. + */ +unsigned int sysctl_sched_cfs_bw_burst_onset_percent; +#endif + /* * l(a,b) * le(a,b) := !l(b,a) @@ -10994,7 +11002,7 @@ static DEFINE_MUTEX(cfs_constraints_mutex); const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ /* More than 203 days if BW_SHIFT equals 20. */ -static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC; +const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC; static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); @@ -11003,7 +11011,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, { int i, ret = 0, runtime_enabled, runtime_was_enabled; struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; - u64 buffer; + u64 buffer, burst_onset; if (tg == &root_task_group) return -EINVAL; @@ -11070,14 +11078,22 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, cfs_b->burst = burst; cfs_b->buffer = buffer; - __refill_cfs_bandwidth_runtime(cfs_b); + cfs_b->max_overrun = DIV_ROUND_UP_ULL(max_cfs_runtime, quota); + cfs_b->runtime = cfs_b->quota; - /* - * Restart the period timer (if active) to handle new - * period expiry: - */ + /* burst_onset needed */ + if (cfs_b->quota != RUNTIME_INF && sysctl_sched_cfs_bw_burst_onset_percent > 0) { + + burst_onset = div_u64(burst, 100) * + sysctl_sched_cfs_bw_burst_onset_percent; + + cfs_b->runtime += burst_onset; + cfs_b->runtime = min(max_cfs_runtime, cfs_b->runtime); + } + + /* Restart the period timer (if active) to handle new period expiry: */ if (runtime_enabled) - start_cfs_bandwidth(cfs_b); + start_cfs_bandwidth(cfs_b, 1); } for_each_online_cpu(i) { diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 277d92122485..a3b07ef98bb8 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -161,6 +161,15 @@ static struct ctl_table sched_fair_sysctls[] = { .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ONE, }, + { + .procname = "sched_cfs_bw_burst_onset_percent", + .data = &sysctl_sched_cfs_bw_burst_onset_percent, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE_HUNDRED, + }, #endif #ifdef CONFIG_NUMA_BALANCING { @@ -5594,22 +5603,18 @@ static inline u64 sched_cfs_bandwidth_slice(void) * * requires cfs_b->lock */ -void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) +static void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b, + u64 overrun) { - s64 runtime; + u64 refill; if (unlikely(cfs_b->quota == RUNTIME_INF)) return; - cfs_b->runtime += cfs_b->quota; - runtime = cfs_b->runtime_snap - cfs_b->runtime; - if (runtime > 0) { - cfs_b->burst_time += runtime; - cfs_b->nr_burst++; - } - - cfs_b->runtime = min(cfs_b->runtime, cfs_b->quota + cfs_b->burst); - cfs_b->runtime_snap = cfs_b->runtime; + overrun = min(overrun, cfs_b->max_overrun); + refill = cfs_b->quota * overrun; + cfs_b->runtime += refill; + cfs_b->runtime = min(cfs_b->runtime, cfs_b->buffer); } static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) @@ -5631,7 +5636,7 @@ static int __assign_cfs_rq_runtime(struct cfs_bandwidth *cfs_b, if (cfs_b->quota == RUNTIME_INF) amount = min_amount; else { - start_cfs_bandwidth(cfs_b); + start_cfs_bandwidth(cfs_b, 0); if (cfs_b->runtime > 0) { amount = min(cfs_b->runtime, min_amount); @@ -6101,7 +6106,7 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, u cfs_b->nr_periods += overrun; /* Refill extra burst quota even if cfs_b->idle */ - __refill_cfs_bandwidth_runtime(cfs_b); + __refill_cfs_bandwidth_runtime(cfs_b, overrun); /* * idle depends on !throttled (for the case of a large deficit), and if @@ -6356,8 +6361,17 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) new = old * 2; if (new < max_cfs_quota_period) { cfs_b->period = ns_to_ktime(new); - cfs_b->quota *= 2; - cfs_b->burst *= 2; + cfs_b->quota = min(cfs_b->quota * 2, + max_cfs_runtime); + + cfs_b->burst = min(cfs_b->burst * 2, + max_cfs_runtime); + + cfs_b->buffer = min(max_cfs_runtime, + cfs_b->quota + cfs_b->burst); + /* Add 1 in case max_overrun becomes 0. */ + cfs_b->max_overrun >>= 1; + cfs_b->max_overrun++; pr_warn_ratelimited( "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n", @@ -6414,16 +6428,26 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) #endif } -void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) +void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, int init) { + u64 overrun; + lockdep_assert_held(&cfs_b->lock); if (cfs_b->period_active) return; cfs_b->period_active = 1; - hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period); + overrun = hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period); hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED); + + /* + * When period timer stops, quota for the following period is not + * refilled, however period timer is already forwarded. We should + * accumulate quota once more than overrun here. + */ + if (!init) + __refill_cfs_bandwidth_runtime(cfs_b, overrun + 1); } static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 829577e9c315..722b3a2881e8 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -106,6 +106,11 @@ struct cpuidle_state; #define TASK_ON_RQ_QUEUED 1 #define TASK_ON_RQ_MIGRATING 2 +#ifdef CONFIG_CFS_BANDWIDTH +extern const u64 max_cfs_runtime; +extern unsigned int sysctl_sched_cfs_bw_burst_onset_percent; +#endif + extern __read_mostly int scheduler_running; extern unsigned long calc_load_update; @@ -341,6 +346,7 @@ struct cfs_bandwidth { u64 runtime; u64 burst; u64 buffer; + u64 max_overrun; u64 runtime_snap; s64 hierarchical_quota; @@ -457,8 +463,7 @@ extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, struct sched_entity *parent); extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *parent); -extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); -extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); +extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, int init); extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); extern bool cfs_task_bw_constrained(struct task_struct *p); -- Gitee From 80b60357a0fb5065f5838422f0521dc2d7a79eec Mon Sep 17 00:00:00 2001 From: Huaixin Chang Date: Wed, 11 Mar 2020 11:07:54 +0800 Subject: [PATCH 0416/2138] anolis: sched/fair: Add cfs bandwidth burst statistics ANBZ: #8586 Introduce statistics exports for the burstable cfs bandwidth controller. The following exports are included: current_bw: current runtime in global pool nr_burst: number of periods bandwidth burst occurs burst_time: cumulative wall-time that any cpus has used above quota in respective periods Signed-off-by: Huaixin Chang Acked-by: Shanpei Chen [dtcccc: only add current_bw and turn it to usec in cgroup v2, keeping the same unit with upstream.] Signed-off-by: Tianchen Ding Reviewed-by: Cruz Zhao Link: https://gitee.com/anolis/cloud-kernel/pulls/2923 --- kernel/sched/core.c | 11 +++++++++-- kernel/sched/fair.c | 11 ++++++++++- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 7b277aaf254d..925ef8b7bd63 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -11091,6 +11091,8 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, cfs_b->runtime = min(max_cfs_runtime, cfs_b->runtime); } + cfs_b->runtime_snap = cfs_b->runtime; + /* Restart the period timer (if active) to handle new period expiry: */ if (runtime_enabled) start_cfs_bandwidth(cfs_b, 1); @@ -11343,6 +11345,7 @@ static int cpu_cfs_stat_show(struct seq_file *sf, void *v) seq_printf(sf, "wait_sum %llu\n", ws); } + seq_printf(sf, "current_bw %llu\n", cfs_b->runtime); seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst); seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time); @@ -11486,20 +11489,24 @@ static int cpu_extra_stat_show(struct seq_file *sf, { struct task_group *tg = css_tg(css); struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; - u64 throttled_usec, burst_usec; + u64 throttled_usec, current_bw_usec, burst_usec; throttled_usec = cfs_b->throttled_time; do_div(throttled_usec, NSEC_PER_USEC); + current_bw_usec = cfs_b->runtime; + do_div(current_bw_usec, NSEC_PER_USEC); burst_usec = cfs_b->burst_time; do_div(burst_usec, NSEC_PER_USEC); seq_printf(sf, "nr_periods %d\n" "nr_throttled %d\n" "throttled_usec %llu\n" + "current_bw_usec %llu\n" "nr_bursts %d\n" "burst_usec %llu\n", cfs_b->nr_periods, cfs_b->nr_throttled, - throttled_usec, cfs_b->nr_burst, burst_usec); + throttled_usec, current_bw_usec, cfs_b->nr_burst, + burst_usec); } #endif return 0; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index a3b07ef98bb8..05c23f9ce1b0 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5606,15 +5606,24 @@ static inline u64 sched_cfs_bandwidth_slice(void) static void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b, u64 overrun) { - u64 refill; + u64 refill, runtime; if (unlikely(cfs_b->quota == RUNTIME_INF)) return; + if (cfs_b->runtime_snap > cfs_b->runtime) { + runtime = cfs_b->runtime_snap - cfs_b->runtime; + if (runtime > cfs_b->quota) { + cfs_b->burst_time += runtime - cfs_b->quota; + cfs_b->nr_burst++; + } + } + overrun = min(overrun, cfs_b->max_overrun); refill = cfs_b->quota * overrun; cfs_b->runtime += refill; cfs_b->runtime = min(cfs_b->runtime, cfs_b->buffer); + cfs_b->runtime_snap = cfs_b->runtime; } static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) -- Gitee From fd6b0e7e28ea77e16f949949466f04d32d9e7400 Mon Sep 17 00:00:00 2001 From: Huaixin Chang Date: Wed, 16 Dec 2020 18:43:19 +0800 Subject: [PATCH 0417/2138] anolis: sched/fair: Add document for burstable CFS bandwidth control ANBZ: #8586 Basic description of usage and effect for CFS Bandwidth Control Burst. Signed-off-by: Huaixin Chang Acked-by: Shanpei Chen [dtcccc: add documents about anolis own fields.] Signed-off-by: Tianchen Ding Reviewed-by: Cruz Zhao Link: https://gitee.com/anolis/cloud-kernel/pulls/2923 --- Documentation/admin-guide/cgroup-v2.rst | 3 ++- Documentation/scheduler/sched-bwc.rst | 9 ++++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index 8238711ee842..aa0edf0d07f0 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1045,11 +1045,12 @@ All time durations are in microseconds. - user_usec - system_usec - and the following five when the controller is enabled: + and the following six when the controller is enabled: - nr_periods - nr_throttled - throttled_usec + - current_bw - nr_bursts - burst_usec diff --git a/Documentation/scheduler/sched-bwc.rst b/Documentation/scheduler/sched-bwc.rst index 41ed2ceafc92..329b00ba40f3 100644 --- a/Documentation/scheduler/sched-bwc.rst +++ b/Documentation/scheduler/sched-bwc.rst @@ -122,9 +122,15 @@ This is tunable via procfs:: Larger slice values will reduce transfer overheads, while smaller values allow for more fine-grained consumption. +Sometimes users might want a group to burst without accumulation. This is +tunable via:: + /proc/sys/kernel/sched_cfs_bw_burst_onset_percent (default=0) + +Up to 100% runtime of cpu.cfs_burst_us might be given on setting bandwidth. + Statistics ---------- -A group's bandwidth statistics are exported via 5 fields in cpu.stat. +A group's bandwidth statistics are exported via 6 fields in cpu.stat. cpu.stat: @@ -132,6 +138,7 @@ cpu.stat: - nr_throttled: Number of times the group has been throttled/limited. - throttled_time: The total time duration (in nanoseconds) for which entities of the group have been throttled. +- current_bw: Current runtime in global pool. - nr_bursts: Number of periods burst occurs. - burst_time: Cumulative wall-time (in nanoseconds) that any CPUs has used above quota in respective periods. -- Gitee From 98e31e8e53e4ffa3e4b4db59d17ab242ca3a338a Mon Sep 17 00:00:00 2001 From: Huaixin Chang Date: Tue, 5 May 2020 18:29:41 +0800 Subject: [PATCH 0418/2138] anolis: sched/fair: Introduce init buffer into CFS burst ANBZ: #8586 For CFS burst, cpu.cfs_burst_us is used to denote how much unused cputime a group can accumulate. However, users may want a much bigger buffer at first, and a smaller buffer at runtime. Thus, cpu.cfs_init_buffer_us is introduced to denote how much init cputime a group is granted at the very beginning. A group can consume cputime from init buffer without being throttled. When cputime from init buffer drops below cpu.cfs_burst_us, normal behaviour of CPU burst is restored. Also init buffer has no effect on its group tasks any more. Signed-off-by: Huaixin Chang Acked-by: Shanpei Chen [dtcccc: merge ("anolis: sched: Support cpu burst in cgroup v2").] Signed-off-by: Yi Tao Signed-off-by: Tianchen Ding Reviewed-by: Cruz Zhao Link: https://gitee.com/anolis/cloud-kernel/pulls/2923 --- kernel/sched/core.c | 79 +++++++++++++++++++++++++++++++++++++++----- kernel/sched/fair.c | 4 ++- kernel/sched/sched.h | 2 ++ 3 files changed, 75 insertions(+), 10 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 925ef8b7bd63..1c9c4ee3ed2b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -11007,7 +11007,7 @@ const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC; static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, - u64 burst) + u64 burst, u64 init_buffer) { int i, ret = 0, runtime_enabled, runtime_was_enabled; struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; @@ -11045,7 +11045,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, /* * Bound burst to defend burst against overflow during bandwidth shift. */ - if (burst > max_cfs_runtime) + if (burst > max_cfs_runtime || init_buffer > max_cfs_runtime) return -EINVAL; if (quota == RUNTIME_INF) @@ -11077,6 +11077,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, cfs_b->quota = quota; cfs_b->burst = burst; cfs_b->buffer = buffer; + cfs_b->init_buffer = init_buffer; cfs_b->max_overrun = DIV_ROUND_UP_ULL(max_cfs_runtime, quota); cfs_b->runtime = cfs_b->quota; @@ -11091,6 +11092,8 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, cfs_b->runtime = min(max_cfs_runtime, cfs_b->runtime); } + cfs_b->runtime = max(cfs_b->runtime, init_buffer); + cfs_b->current_buffer = max(cfs_b->buffer, init_buffer); cfs_b->runtime_snap = cfs_b->runtime; /* Restart the period timer (if active) to handle new period expiry: */ @@ -11118,10 +11121,11 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) { - u64 quota, period, burst; + u64 quota, period, burst, init_buffer; period = ktime_to_ns(tg->cfs_bandwidth.period); burst = tg->cfs_bandwidth.burst; + init_buffer = tg->cfs_bandwidth.init_buffer; if (cfs_quota_us < 0) quota = RUNTIME_INF; else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC) @@ -11129,7 +11133,7 @@ static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) else return -EINVAL; - return tg_set_cfs_bandwidth(tg, period, quota, burst); + return tg_set_cfs_bandwidth(tg, period, quota, burst, init_buffer); } static long tg_get_cfs_quota(struct task_group *tg) @@ -11147,7 +11151,7 @@ static long tg_get_cfs_quota(struct task_group *tg) static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) { - u64 quota, period, burst; + u64 quota, period, burst, init_buffer; if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC) return -EINVAL; @@ -11155,8 +11159,9 @@ static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) period = (u64)cfs_period_us * NSEC_PER_USEC; quota = tg->cfs_bandwidth.quota; burst = tg->cfs_bandwidth.burst; + init_buffer = tg->cfs_bandwidth.init_buffer; - return tg_set_cfs_bandwidth(tg, period, quota, burst); + return tg_set_cfs_bandwidth(tg, period, quota, burst, init_buffer); } static long tg_get_cfs_period(struct task_group *tg) @@ -11171,7 +11176,7 @@ static long tg_get_cfs_period(struct task_group *tg) static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us) { - u64 quota, period, burst; + u64 quota, period, burst, init_buffer; if (cfs_burst_us < 0) burst = RUNTIME_INF; @@ -11183,8 +11188,9 @@ static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us) burst = (u64)cfs_burst_us * NSEC_PER_USEC; period = ktime_to_ns(tg->cfs_bandwidth.period); quota = tg->cfs_bandwidth.quota; + init_buffer = tg->cfs_bandwidth.init_buffer; - return tg_set_cfs_bandwidth(tg, period, quota, burst); + return tg_set_cfs_bandwidth(tg, period, quota, burst, init_buffer); } static long tg_get_cfs_burst(struct task_group *tg) @@ -11200,6 +11206,36 @@ static long tg_get_cfs_burst(struct task_group *tg) return burst_us; } +static int tg_set_cfs_init_buffer(struct task_group *tg, long cfs_init_buffer_us) +{ + u64 quota, period, burst, init_buffer; + + period = ktime_to_ns(tg->cfs_bandwidth.period); + quota = tg->cfs_bandwidth.quota; + burst = tg->cfs_bandwidth.burst; + if (cfs_init_buffer_us < 0) + init_buffer = RUNTIME_INF; + else if ((u64)cfs_init_buffer_us <= U64_MAX / NSEC_PER_USEC) + init_buffer = (u64)cfs_init_buffer_us * NSEC_PER_USEC; + else + return -EINVAL; + + return tg_set_cfs_bandwidth(tg, period, quota, burst, init_buffer); +} + +static long tg_get_cfs_init_buffer(struct task_group *tg) +{ + u64 init_buffer_us; + + if (tg->cfs_bandwidth.init_buffer == RUNTIME_INF) + return -1; + + init_buffer_us = tg->cfs_bandwidth.init_buffer; + do_div(init_buffer_us, NSEC_PER_USEC); + + return init_buffer_us; +} + static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css, struct cftype *cft) { @@ -11235,6 +11271,19 @@ static int cpu_cfs_burst_write_s64(struct cgroup_subsys_state *css, { return tg_set_cfs_burst(css_tg(css), cfs_burst_us); } + +static s64 cpu_cfs_init_buffer_read_s64(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + return tg_get_cfs_init_buffer(css_tg(css)); +} + +static int cpu_cfs_init_buffer_write_s64(struct cgroup_subsys_state *css, + struct cftype *cftype, s64 cfs_init_buffer_us) +{ + return tg_set_cfs_init_buffer(css_tg(css), cfs_init_buffer_us); +} + struct cfs_schedulable_data { struct task_group *tg; u64 period, quota; @@ -11444,6 +11493,11 @@ static struct cftype cpu_legacy_files[] = { .read_s64 = cpu_cfs_burst_read_s64, .write_s64 = cpu_cfs_burst_write_s64, }, + { + .name = "cfs_init_buffer_us", + .read_s64 = cpu_cfs_init_buffer_read_s64, + .write_s64 = cpu_cfs_init_buffer_write_s64, + }, { .name = "stat", .seq_show = cpu_cfs_stat_show, @@ -11638,6 +11692,7 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { struct task_group *tg = css_tg(of_css(of)); + u64 init_buffer = tg_get_cfs_init_buffer(tg); u64 period = tg_get_cfs_period(tg); u64 burst = tg->cfs_bandwidth.burst; u64 quota; @@ -11645,7 +11700,7 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of, ret = cpu_period_quota_parse(buf, &period, "a); if (!ret) - ret = tg_set_cfs_bandwidth(tg, period, quota, burst); + ret = tg_set_cfs_bandwidth(tg, period, quota, burst, init_buffer); return ret ?: nbytes; } #endif @@ -11684,6 +11739,12 @@ static struct cftype cpu_files[] = { .read_s64 = cpu_cfs_burst_read_s64, .write_s64 = cpu_cfs_burst_write_s64, }, + { + .name = "max.init_buffer", + .flags = CFTYPE_NOT_ON_ROOT, + .read_s64 = cpu_cfs_init_buffer_read_s64, + .write_s64 = cpu_cfs_init_buffer_write_s64, + }, #endif #ifdef CONFIG_UCLAMP_TASK_GROUP { diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 05c23f9ce1b0..a76e8b4570d7 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5619,10 +5619,11 @@ static void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b, } } + cfs_b->current_buffer = max(cfs_b->runtime, cfs_b->buffer); overrun = min(overrun, cfs_b->max_overrun); refill = cfs_b->quota * overrun; cfs_b->runtime += refill; - cfs_b->runtime = min(cfs_b->runtime, cfs_b->buffer); + cfs_b->runtime = min(cfs_b->runtime, cfs_b->current_buffer); cfs_b->runtime_snap = cfs_b->runtime; } @@ -6413,6 +6414,7 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *paren cfs_b->quota = RUNTIME_INF; cfs_b->period = ns_to_ktime(default_cfs_period()); cfs_b->burst = 0; + cfs_b->init_buffer = 0; cfs_b->buffer = RUNTIME_INF; cfs_b->hierarchical_quota = parent ? parent->hierarchical_quota : RUNTIME_INF; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 722b3a2881e8..473cd58e2c6e 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -345,6 +345,8 @@ struct cfs_bandwidth { u64 quota; u64 runtime; u64 burst; + u64 init_buffer; + u64 current_buffer; u64 buffer; u64 max_overrun; u64 runtime_snap; -- Gitee From fc238134a3a87bdb08e67fa89edd0e7f8e28ddb3 Mon Sep 17 00:00:00 2001 From: Cruz Zhao Date: Mon, 19 Feb 2024 15:57:42 +0800 Subject: [PATCH 0419/2138] anolis: sched/core: introduce CPUTIME_SIBIDLE_TASK ANBZ: #8547 As acpu uses rq_clock() as clock source to account sibidle time, irq time will be accounted into sibidle time. However, in some scenarios, sibidle sum will be much larger than exec runtime, e.g., we observed that sibidle time of task calling futex_wake() is 50% larger than exec runtime, which is confusing. We introduce cpustat[CPUTIME_SIBIDLE_TASK] to account the time that a task is actually running while the SMT siblings are idle, using rq_clock_task() as clock source. Similarly, we introduce cpustat[CPUTIME_FORCEIDLE_TASK] to account the time that a task is actually running while the SMT siblings are forced idle, using rq_clock_task() as clock source. |<----------------------sibidle time---------------------->| |<---sibidle task time--->| |<- -sibidle task time--->| |<------exec runtime----->| |<-----exec runtime------>| ht0 | A running | irq | A running | ht1 | idle | And for ht aware quota, the sibidle_delta_task * ratio will be accounted to the task's cfs_rq_runtime, rather than sibidle_delta. Interfaces: - task level: /proc/$pid/sched, row core_forceidle_task_sum, row core_sibidle_task_sum. - cgroup level: /sys/fs/cgroup/$cg/cpu.stat, row core_sched.force_idle_task_usec row sibidle_task_usec. Signed-off-by: Cruz Zhao --- include/linux/cgroup-defs.h | 2 ++ include/linux/kernel_stat.h | 4 +++- include/linux/sched.h | 2 ++ kernel/cgroup/rstat.c | 22 ++++++++++++++++++++++ kernel/sched/core.c | 29 +++++++++++++++++++++++++---- kernel/sched/core_sched.c | 8 +++++++- kernel/sched/cputime.c | 8 +++++++- kernel/sched/debug.c | 2 ++ kernel/sched/sched.h | 3 +++ 9 files changed, 73 insertions(+), 7 deletions(-) diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 6b7077b70fa4..e96206d91c81 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -304,9 +304,11 @@ struct cgroup_base_stat { #ifdef CONFIG_SCHED_CORE u64 forceidle_sum; + u64 forceidle_task_sum; #endif #if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) u64 sibidle_sum; + u64 sibidle_task_sum; #endif }; diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 01f0c6391a98..9e86ee77d335 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -30,9 +30,11 @@ enum cpu_usage_stat { CPUTIME_GUEST_NICE, #ifdef CONFIG_SCHED_CORE CPUTIME_FORCEIDLE, + CPUTIME_FORCEIDLE_TASK, #endif #if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) CPUTIME_SIBIDLE, + CPUTIME_SIBIDLE_TASK, #endif NR_STATS, }; @@ -134,7 +136,7 @@ extern void account_process_tick(struct task_struct *, int user); extern void account_idle_ticks(unsigned long ticks); #if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) -extern void __account_sibidle_time(struct task_struct *tsk, u64 delta, bool fi); +extern void __account_sibidle_time(struct task_struct *tsk, u64 delta, u64 delta_task, bool fi); #endif #endif /* _LINUX_KERNEL_STAT_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 0c2b973f4987..99df651e6b11 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -544,9 +544,11 @@ struct sched_statistics { #ifdef CONFIG_SCHED_CORE u64 core_forceidle_sum; + u64 core_forceidle_task_sum; #endif #if defined(CONFIG_SCHED_CORE) || defined(CONFIG_SCHED_ACPU) u64 core_sibidle_sum; + u64 core_sibidle_task_sum; #endif #endif /* CONFIG_SCHEDSTATS */ diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c index a29c5275c68e..2ac57d3760cf 100644 --- a/kernel/cgroup/rstat.c +++ b/kernel/cgroup/rstat.c @@ -326,9 +326,11 @@ static void cgroup_base_stat_add(struct cgroup_base_stat *dst_bstat, dst_bstat->cputime.sum_exec_runtime += src_bstat->cputime.sum_exec_runtime; #ifdef CONFIG_SCHED_CORE dst_bstat->forceidle_sum += src_bstat->forceidle_sum; + dst_bstat->forceidle_task_sum += src_bstat->forceidle_task_sum; #endif #if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) dst_bstat->sibidle_sum += src_bstat->sibidle_sum; + dst_bstat->sibidle_task_sum += src_bstat->sibidle_task_sum; #endif } @@ -340,9 +342,11 @@ static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat, dst_bstat->cputime.sum_exec_runtime -= src_bstat->cputime.sum_exec_runtime; #ifdef CONFIG_SCHED_CORE dst_bstat->forceidle_sum -= src_bstat->forceidle_sum; + dst_bstat->forceidle_task_sum -= src_bstat->forceidle_task_sum; #endif #if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) dst_bstat->sibidle_sum -= src_bstat->sibidle_sum; + dst_bstat->sibidle_task_sum -= src_bstat->sibidle_task_sum; #endif } @@ -436,11 +440,17 @@ void __cgroup_account_cputime_field(struct cgroup *cgrp, case CPUTIME_FORCEIDLE: rstatc->bstat.forceidle_sum += delta_exec; break; + case CPUTIME_FORCEIDLE_TASK: + rstatc->bstat.forceidle_task_sum += delta_exec; + break; #endif #if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) case CPUTIME_SIBIDLE: rstatc->bstat.sibidle_sum += delta_exec; break; + case CPUTIME_SIBIDLE_TASK: + rstatc->bstat.sibidle_task_sum += delta_exec; + break; #endif default: break; @@ -484,9 +494,11 @@ static void root_cgroup_cputime(struct cgroup_base_stat *bstat) #ifdef CONFIG_SCHED_CORE bstat->forceidle_sum += cpustat[CPUTIME_FORCEIDLE]; + bstat->forceidle_task_sum += cpustat[CPUTIME_FORCEIDLE_TASK]; #endif #if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) bstat->sibidle_sum += cpustat[CPUTIME_SIBIDLE]; + bstat->sibidle_task_sum += cpustat[CPUTIME_SIBIDLE_TASK]; #endif } } @@ -498,9 +510,11 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) struct cgroup_base_stat bstat; #ifdef CONFIG_SCHED_CORE u64 forceidle_time; + u64 forceidle_task_time; #endif #if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) u64 sibidle_time; + u64 sibidle_task_time; #endif if (cgroup_parent(cgrp)) { @@ -510,9 +524,11 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) &utime, &stime); #ifdef CONFIG_SCHED_CORE forceidle_time = cgrp->bstat.forceidle_sum; + forceidle_task_time = cgrp->bstat.forceidle_task_sum; #endif #if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) sibidle_time = cgrp->bstat.sibidle_sum; + sibidle_task_time = cgrp->bstat.sibidle_task_sum; #endif cgroup_rstat_flush_release(); } else { @@ -522,9 +538,11 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) stime = bstat.cputime.stime; #ifdef CONFIG_SCHED_CORE forceidle_time = bstat.forceidle_sum; + forceidle_task_time = bstat.forceidle_task_sum; #endif #if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) sibidle_time = bstat.sibidle_sum; + sibidle_task_time = bstat.sibidle_task_sum; #endif } @@ -533,9 +551,11 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) do_div(stime, NSEC_PER_USEC); #ifdef CONFIG_SCHED_CORE do_div(forceidle_time, NSEC_PER_USEC); + do_div(forceidle_task_time, NSEC_PER_USEC); #endif #if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) do_div(sibidle_time, NSEC_PER_USEC); + do_div(sibidle_task_time, NSEC_PER_USEC); #endif seq_printf(seq, "usage_usec %llu\n" @@ -545,9 +565,11 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) #ifdef CONFIG_SCHED_CORE seq_printf(seq, "core_sched.force_idle_usec %llu\n", forceidle_time); + seq_printf(seq, "core_sched.force_idle_task_usec %llu\n", forceidle_task_time); #endif #if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) seq_printf(seq, "sibidle_usec %llu\n", sibidle_time); + seq_printf(seq, "sibidle_task_usec %llu\n", sibidle_task_time); #endif } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 1c9c4ee3ed2b..0df85c1161b7 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -382,6 +382,7 @@ static void __sched_core_flip(bool enabled) cpu_rq(t)->core_enabled = enabled; cpu_rq(cpu)->core->core_sibidle_start = 0; + cpu_rq(cpu)->core->core_sibidle_start_task = 0; sched_core_unlock(cpu, &flags); @@ -5044,8 +5045,9 @@ static void update_acpu(struct rq *rq, struct task_struct *prev, struct task_str const int cpu = cpu_of(rq); const struct cpumask *smt_mask = cpu_smt_mask(cpu); u64 now = rq_clock(rq); - u64 sibidle_sum, last_update_time; - s64 delta, last; + u64 now_task = rq_clock_task(rq); + u64 sibidle_sum, sibidle_task_sum, last_update_time, last_update_time_task; + s64 delta, delta_task, last, last_task; int i; if (!static_branch_likely(&acpu_enabled) || !schedstat_enabled()) @@ -5081,29 +5083,44 @@ static void update_acpu(struct rq *rq, struct task_struct *prev, struct task_str rq_i->last_acpu_update_time); last_update_time = last >= 0 ? rq->last_acpu_update_time : rq_i->last_acpu_update_time; + last_task = (s64)(rq->last_acpu_update_time_task - + rq_i->last_acpu_update_time_task); + last_update_time_task = last_task >= 0 ? + rq->last_acpu_update_time_task : + rq_i->last_acpu_update_time_task; /* * Sibling may update acpu at the same time, and it's * timestamp may be newer than this rq. */ delta = now - last_update_time; delta = delta > 0 ? delta : 0; + delta_task = now_task - last_update_time_task; + delta_task = delta_task > 0 ? delta_task : 0; /* Add the delta to improve accuracy. */ sibidle_sum = last >= 0 ? rq->sibidle_sum : rq_i->acpu_idle_sum; - if (curr_i == rq_i->idle) + sibidle_task_sum = last_task >= 0 ? rq->sibidle_task_sum : + rq_i->acpu_idle_sum; + if (curr_i == rq_i->idle) { sibidle_sum += delta; + sibidle_task_sum += delta_task; + } } } if (prev != rq->idle) { delta = sibidle_sum - rq->sibidle_sum; delta = delta > 0 ? delta : 0; - __account_sibidle_time(prev, delta, false); + delta_task = sibidle_task_sum - rq->sibidle_task_sum; + delta_task = delta_task > 0 ? delta_task : 0; + __account_sibidle_time(prev, delta, delta_task, false); } rq->sibidle_sum = sibidle_sum; + rq->sibidle_task_sum = sibidle_task_sum; out: rq->last_acpu_update_time = now; + rq->last_acpu_update_time_task = now_task; } #else static inline void update_acpu(struct rq *rq, struct task_struct *prev, struct task_struct *next) @@ -6271,6 +6288,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) sched_core_account_sibidle(rq); /* reset after accounting force idle */ rq->core->core_sibidle_start = 0; + rq->core->core_sibidle_start_task = 0; rq->core->core_sibidle_count = 0; rq->core->core_sibidle_occupation = 0; if (rq->core->core_forceidle_count) { @@ -6366,6 +6384,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) if (schedstat_enabled() && rq->core->core_sibidle_count) { rq->core->core_sibidle_start = rq_clock(rq->core); + rq->core->core_sibidle_start_task = rq_clock_task(rq->core); rq->core->core_sibidle_occupation = occ; } @@ -6633,6 +6652,7 @@ static void sched_core_cpu_deactivate(unsigned int cpu) * have a cookie. */ core_rq->core_sibidle_start = 0; + core_rq->core_sibidle_start_task = 0; /* install new leader */ for_each_cpu(t, smt_mask) { @@ -10229,6 +10249,7 @@ void __init sched_init(void) rq->core_sibidle_count = 0; rq->core_sibidle_occupation = 0; rq->core_sibidle_start = 0; + rq->core_sibidle_start_task = 0; rq->core_cookie = 0UL; #endif diff --git a/kernel/sched/core_sched.c b/kernel/sched/core_sched.c index 8db2999e51c8..f931992fc08e 100644 --- a/kernel/sched/core_sched.c +++ b/kernel/sched/core_sched.c @@ -241,6 +241,7 @@ void __sched_core_account_sibidle(struct rq *rq) { const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq)); u64 delta, now = rq_clock(rq->core); + u64 delta_task, now_task = rq_clock_task(rq->core); struct rq *rq_i; struct task_struct *p; int i; @@ -258,10 +259,12 @@ void __sched_core_account_sibidle(struct rq *rq) goto out; delta = now - rq->core->core_sibidle_start; + delta_task = now_task - rq->core->core_sibidle_start_task; if (unlikely((s64)delta <= 0)) goto out; rq->core->core_sibidle_start = now; + rq->core->core_sibidle_start_task = now_task; if (rq->core->core_sibidle_count > 1 || rq->core->core_sibidle_occupation > 1) { @@ -272,6 +275,8 @@ void __sched_core_account_sibidle(struct rq *rq) */ delta *= rq->core->core_sibidle_count; delta = div_u64(delta, rq->core->core_sibidle_occupation); + delta_task *= rq->core->core_sibidle_count; + delta_task = div_u64(delta_task, rq->core->core_sibidle_occupation); } for_each_cpu(i, smt_mask) { @@ -285,7 +290,8 @@ void __sched_core_account_sibidle(struct rq *rq) * Note: this will account sibidle to the current cpu, even * if it comes from our SMT sibling. */ - __account_sibidle_time(p, delta, !!rq->core->core_forceidle_count); + __account_sibidle_time(p, delta, delta_task, + !!rq->core->core_forceidle_count); } out: diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index ace56789d46e..6b8da47b5ade 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -237,18 +237,24 @@ void account_idle_time(u64 cputime) * * REQUIRES: schedstat is enabled. */ -void __account_sibidle_time(struct task_struct *p, u64 delta, bool fi) +void __account_sibidle_time(struct task_struct *p, u64 delta, u64 delta_task, bool fi) { unsigned int cpu = task_cpu(p); __schedstat_add(p->stats.core_sibidle_sum, delta); + __schedstat_add(p->stats.core_sibidle_task_sum, delta_task); kcpustat_cpu(cpu).cpustat[CPUTIME_SIBIDLE] += delta; + kcpustat_cpu(cpu).cpustat[CPUTIME_SIBIDLE_TASK] += delta_task; cgroup_account_cputime_field(p, CPUTIME_SIBIDLE, delta); + cgroup_account_cputime_field(p, CPUTIME_SIBIDLE_TASK, delta_task); #ifdef CONFIG_SCHED_CORE if (fi) { __schedstat_add(p->stats.core_forceidle_sum, delta); + __schedstat_add(p->stats.core_forceidle_task_sum, delta_task); kcpustat_cpu(cpu).cpustat[CPUTIME_FORCEIDLE] += delta; + kcpustat_cpu(cpu).cpustat[CPUTIME_FORCEIDLE_TASK] += delta_task; cgroup_account_cputime_field(p, CPUTIME_FORCEIDLE, delta); + cgroup_account_cputime_field(p, CPUTIME_FORCEIDLE_TASK, delta_task); } #endif } diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 464fa6b7c2a9..0baa877597df 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -1059,9 +1059,11 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, #ifdef CONFIG_SCHED_CORE PN_SCHEDSTAT(core_forceidle_sum); + PN_SCHEDSTAT(core_forceidle_task_sum); #endif #ifdef CONFIG_SCHED_ACPU PN_SCHEDSTAT(core_sibidle_sum); + PN_SCHEDSTAT(core_sibidle_task_sum); #endif } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 473cd58e2c6e..91383033b76a 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1166,6 +1166,7 @@ struct rq { unsigned int core_forceidle_seq; unsigned int core_sibidle_occupation; u64 core_sibidle_start; + u64 core_sibidle_start_task; unsigned int core_sibidle_count; #endif @@ -1180,7 +1181,9 @@ struct rq { #ifdef CONFIG_SCHED_ACPU u64 acpu_idle_sum; u64 sibidle_sum; + u64 sibidle_task_sum; u64 last_acpu_update_time; + u64 last_acpu_update_time_task; #endif }; -- Gitee From fff7bcdf1af982a5c117b4a607a95d85613c31a0 Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Sat, 7 May 2022 18:13:46 +0800 Subject: [PATCH 0420/2138] anolis: newfeature: crypto: ccp: Support SM2 algorithm for hygon ccp. ANBZ: #8582 In order to add SM2 driver for hygon ccp, relating to SM2_sign, SM2_verify, SM2_encrypt and SM2_decrypt. Signed-off-by: Yabin Li Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2924 --- drivers/crypto/ccp/Kconfig | 7 + drivers/crypto/ccp/Makefile | 2 + drivers/crypto/ccp/ccp-crypto-main.c | 15 + drivers/crypto/ccp/ccp-crypto-sm2-hygon.c | 1053 +++++++++++++++++++++ drivers/crypto/ccp/ccp-crypto.h | 39 + drivers/crypto/ccp/ccp-dev-v5.c | 45 + drivers/crypto/ccp/ccp-dev.h | 8 + drivers/crypto/ccp/ccp-ops.c | 94 ++ include/linux/ccp.h | 49 + 9 files changed, 1312 insertions(+) create mode 100644 drivers/crypto/ccp/ccp-crypto-sm2-hygon.c diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 32268e239bf1..9d5d3312f8e3 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -46,6 +46,13 @@ config CRYPTO_DEV_SP_PSP along with software-based Trusted Execution Environment (TEE) to enable third-party trusted applications. +config HYGON_GM + bool "Hygon GM (sm2/sm3/sm4) Interface" + default y + depends on CRYPTO_DEV_CCP_CRYPTO && X86_64 + help + Hygon GM ccp driver + config CRYPTO_DEV_CCP_DEBUGFS bool "Enable CCP Internals in DebugFS" default n diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 82be0ac4a0b6..2f002be97210 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -24,3 +24,5 @@ ccp-crypto-objs := ccp-crypto-main.o \ ccp-crypto-des3.o \ ccp-crypto-rsa.o \ ccp-crypto-sha.o + +ccp-crypto-$(CONFIG_HYGON_GM) += ccp-crypto-sm2-hygon.o diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index ecd58b38c46e..c2ef834eb1fa 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c @@ -39,6 +39,10 @@ static unsigned int rsa_disable; module_param(rsa_disable, uint, 0444); MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value"); +static unsigned int sm_disable; +module_param(sm_disable, uint, 0444); +MODULE_PARM_DESC(sm_disable, "Disable use of SM2/SM3/SM4 - any non-zero value"); + /* List heads for the supported algorithms */ static LIST_HEAD(hash_algs); static LIST_HEAD(skcipher_algs); @@ -322,6 +326,17 @@ static int ccp_register_algs(void) { int ret; +#ifdef CONFIG_HYGON_GM + if (!sm_disable && boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + ret = ccp_register_sm2_hygon_algs(&akcipher_algs); + if (ret) + return ret; + + /* Return on hygon platform */ + return 0; + } +#endif + if (!aes_disable) { ret = ccp_register_aes_algs(&skcipher_algs); if (ret) diff --git a/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c new file mode 100644 index 000000000000..fbf1c5e85fce --- /dev/null +++ b/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c @@ -0,0 +1,1053 @@ +/* + * Hygon Cryptographic Coprocessor (CCP) SM2 crypto API support + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ccp-crypto.h" + +static const u8 sm2_ecc_p[CCP_SM2_OPERAND_LEN] = { + 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, +}; + +static const u8 sm2_ecc_a[CCP_SM2_OPERAND_LEN] = { + 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, +}; + +static const u8 sm2_ecc_b[CCP_SM2_OPERAND_LEN] = { + 0x28, 0xE9, 0xFA, 0x9E, 0x9D, 0x9F, 0x5E, 0x34, + 0x4D, 0x5A, 0x9E, 0x4B, 0xCF, 0x65, 0x09, 0xA7, + 0xF3, 0x97, 0x89, 0xF5, 0x15, 0xAB, 0x8F, 0x92, + 0xDD, 0xBC, 0xBD, 0x41, 0x4D, 0x94, 0x0E, 0x93, +}; + +static const u8 sm2_ecc_n_sub_1[CCP_SM2_OPERAND_LEN] = { + 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0x72, 0x03, 0xDF, 0x6B, 0x21, 0xC6, 0x05, 0x2B, + 0x53, 0xBB, 0xF4, 0x09, 0x39, 0xD5, 0x41, 0x22, +}; + +static const u8 sm2_ecc_gx[CCP_SM2_OPERAND_LEN] = { + 0x32, 0xC4, 0xAE, 0x2C, 0x1F, 0x19, 0x81, 0x19, + 0x5F, 0x99, 0x04, 0x46, 0x6A, 0x39, 0xC9, 0x94, + 0x8F, 0xE3, 0x0B, 0xBF, 0xF2, 0x66, 0x0B, 0xE1, + 0x71, 0x5A, 0x45, 0x89, 0x33, 0x4C, 0x74, 0xC7, +}; + +static const u8 sm2_ecc_gy[CCP_SM2_OPERAND_LEN] = { + 0xBC, 0x37, 0x36, 0xA2, 0xF4, 0xF6, 0x77, 0x9C, + 0x59, 0xBD, 0xCE, 0xE3, 0x6B, 0x69, 0x21, 0x53, + 0xD0, 0xA9, 0x87, 0x7C, 0xC6, 0x2A, 0x47, 0x40, + 0x02, 0xDF, 0x32, 0xE5, 0x21, 0x39, 0xF0, 0xA0, +}; + +struct ccp_sm2_verify_src { + u8 operand_e[CCP_SM2_OPERAND_LEN]; /* compressed message */ + u8 operand_d[CCP_SM2_OPERAND_LEN]; /* input data r */ + u8 operand_k[CCP_SM2_OPERAND_LEN]; /* input data s */ + u8 operand_px[CCP_SM2_OPERAND_LEN]; /* x of public key */ + u8 operand_py[CCP_SM2_OPERAND_LEN]; /* y of public key */ +}; + +struct ccp_sm2_lp_src { + u8 operand_k[CCP_SM2_OPERAND_LEN]; /* random number */ + u8 operand_px[CCP_SM2_OPERAND_LEN]; /* x of public key */ + u8 operand_py[CCP_SM2_OPERAND_LEN]; /* y of public key */ +}; + +struct ccp_sm2_kg_src { + u8 operand_k[CCP_SM2_OPERAND_LEN]; /* random number */ +}; + +struct ccp_sm2_sign_src { + u8 operand_e[CCP_SM2_OPERAND_LEN]; /* compressed message */ + u8 operand_d[CCP_SM2_OPERAND_LEN]; /* private key */ + u8 operand_k[CCP_SM2_OPERAND_LEN]; /* random number */ +}; + +struct ccp_sm2_mmul_src { + u8 operand_e[CCP_SM2_OPERAND_LEN]; /* mulplicand */ + u8 operand_d[CCP_SM2_OPERAND_LEN]; /* mulplicator */ +}; + +struct ccp_sm2_dst { + union { + u8 result[CCP_SM2_OPERAND_LEN]; + u32 status; + } u; + u8 result_r[CCP_SM2_OPERAND_LEN]; + u8 result_s[CCP_SM2_OPERAND_LEN]; + u8 result_t[CCP_SM2_OPERAND_LEN]; +}; + +static bool ccp_sm2_is_zero(const u64 *data, u32 count) +{ + u32 i; + + for (i = 0; i < count; i++) { + if (data[i]) + return false; + } + + return true; +} + +/* Return: + * 1: a > b + * -1: a < b + * 0: a = b + */ +static int ccp_sm2_fp_cmp(const u64 *a, const u64 *b, u32 count) +{ + u64 a_cpu, b_cpu; + u32 i; + + for (i = 0; i < count; i++) { + a_cpu = be64_to_cpu(a[i]); + b_cpu = be64_to_cpu(b[i]); + if (a_cpu > b_cpu) + return 1; + else if (a_cpu < b_cpu) + return -1; + } + + return 0; +} + +/* a = a + b */ +static void ccp_sm2_fp_add(u64 *a, const u64 *b, u32 count) +{ + u64 a_cpu, b_cpu, c_cpu, d_cpu; + u32 carry = 0; + s32 i; + + for (i = count - 1; i >= 0; i--) { + a_cpu = be64_to_cpu(a[i]); + b_cpu = be64_to_cpu(b[i]); + c_cpu = a_cpu + b_cpu; + d_cpu = c_cpu + carry; + a[i] = cpu_to_be64(d_cpu); + + if (c_cpu < a_cpu) + carry = 1; + else if (carry && !d_cpu) + carry = 1; + else + carry = 0; + } +} + +/* a = -a */ +static void ccp_sm2_fp_neg(u64 *a, u32 count) +{ + u64 a_cpu, c_cpu; + s32 i; + + for (i = 0; i <= count - 1; i++) + a[i] = ~a[i]; + + for (i = count - 1; i >= 0; i--) { + a_cpu = be64_to_cpu(a[i]); + c_cpu = a_cpu + 1; + a[i] = cpu_to_be64(c_cpu); + + if (a_cpu < c_cpu) + break; + } +} + +/* a = a - b */ +static void ccp_sm2_fp_sub(u64 *a, u64 *b, u32 count) +{ + ccp_sm2_fp_neg(b, count); + ccp_sm2_fp_add(a, b, count); +} + +/* a and tmp must be 64B, b and c must be 32B + * a = b * c + */ +static void ccp_sm2_fp_mmul32(u8 *a, const u32 *b, const u32 *c, u8 *tmp) +{ + u64 b_cpu, c_cpu, m_cpu; + u32 rem_cpu; + u32 *base, *m_cur; + int i, j, iter; + + memset(a, 0, CCP_SM2_MMUL_LEN); + + iter = 7; + base = (u32 *)(tmp + CCP_SM2_MMUL_LEN - sizeof(u32)); + for (i = iter; i >= 0; i--) { + b_cpu = be32_to_cpu(b[i]); + memset(tmp, 0, CCP_SM2_MMUL_LEN); + + rem_cpu = 0; + m_cur = base; + for (j = iter; j >= 0; j--) { + c_cpu = be32_to_cpu(c[j]); + + m_cpu = b_cpu * c_cpu + rem_cpu; + rem_cpu = (u32)(m_cpu >> 32); + *m_cur = cpu_to_be32((u32)(m_cpu)); + m_cur--; + } + *m_cur = cpu_to_be32(rem_cpu); + ccp_sm2_fp_add((u64 *)a, (u64 *)tmp, + CCP_SM2_MMUL_LEN / sizeof(u64)); + + base--; + } +} + +/* mmul, dst, tmp must be 64B, remainder in mmul[32-63] + * high:low mod p + * = high*2^256+low mod p + * = high*(p+h)+low mod p + * = high*h+low mod p + * = high*(2^224+2^96-2^64+1)+low mod p + * iterating 8 times + */ +static void ccp_sm2_fast_mod_p(u8 *mmul, u8 *dst, u8 *tmp) +{ + u8 *mmul_high, *mmul_low; + u32 count; + int i, iter, ret; + + mmul_high = mmul; + mmul_low = mmul + CCP_SM2_OPERAND_LEN; + count = CCP_SM2_MMUL_LEN / sizeof(u64); + + iter = 8; + for (i = 0; i < iter; i++) { + /* dst = high * 2^224 */ + memset(dst, 0, CCP_SM2_MMUL_LEN); + memcpy(dst + 4, mmul_high, CCP_SM2_OPERAND_LEN); + + /* dst += high * 2^96 */ + memset(tmp, 0, CCP_SM2_MMUL_LEN); + memcpy(tmp + 20, mmul_high, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add((u64 *)dst, (u64 *)tmp, count); + + /* dst += high * 2^64 */ + memset(tmp, 0, CCP_SM2_MMUL_LEN); + memcpy(tmp + 24, mmul_high, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_sub((u64 *)dst, (u64 *)tmp, count); + + /* dst += high * 1 */ + memset(tmp, 0, CCP_SM2_MMUL_LEN); + memcpy(tmp + 32, mmul_high, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add((u64 *)dst, (u64 *)tmp, count); + + /* dst += low */ + memset(tmp, 0, CCP_SM2_MMUL_LEN); + memcpy(tmp + 32, mmul_low, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add((u64 *)dst, (u64 *)tmp, count); + + /* copy dst to mmul */ + memcpy(mmul, dst, CCP_SM2_MMUL_LEN); + } + + do { + memset(tmp, 0, CCP_SM2_MMUL_LEN); + memcpy(tmp + 32, sm2_ecc_p, CCP_SM2_OPERAND_LEN); + ret = ccp_sm2_fp_cmp( + (u64 *)mmul, (u64 *)tmp, + CCP_SM2_MMUL_LEN / sizeof(u64)); + if (ret < 0) + break; + + ccp_sm2_fp_sub((u64 *)mmul, (u64 *)tmp, count); + } while (1); +} + +static int ccp_sm2_is_privkey_valid(const u8 *priv_key) +{ + u64 last, last_cpu; + bool zero; + int ret; + + /* private key is satisfied with(1, n-1) */ + zero = ccp_sm2_is_zero((const u64 *)priv_key, + CCP_SM2_PRIVATE_KEY_LEN / sizeof(u64) - 1); + if (zero) { + last = *(const u64 *) + (priv_key + CCP_SM2_PRIVATE_KEY_LEN - sizeof(u64)); + last_cpu = be64_to_cpu(last); + if (last_cpu <= 1) + return -EINVAL; + } + + ret = ccp_sm2_fp_cmp((const u64 *)priv_key, + (const u64 *)sm2_ecc_n_sub_1, + CCP_SM2_PRIVATE_KEY_LEN / sizeof(u64)); + if (ret >= 0) + return -EINVAL; + + return 0; +} + +static int ccp_sm2_setprivkey(struct crypto_akcipher *tfm, + const void *key, unsigned int keylen) +{ + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_ctx *sm2 = &ctx->u.sm2; + int ret; + + if (!key || keylen != CCP_SM2_PRIVATE_KEY_LEN) + return -EINVAL; + + ret = ccp_sm2_is_privkey_valid(key); + if (ret < 0) + return ret; + + memcpy(sm2->pri_key, key, CCP_SM2_PRIVATE_KEY_LEN); + sm2->pri_key_len = CCP_SM2_PRIVATE_KEY_LEN; + + return 0; +} + +static int ccp_sm2_post_cmd(struct ccp_sm2_req_ctx *rctx, + u32 src_size, enum ccp_sm2_mode mode, u32 rand) +{ + struct akcipher_request *req = rctx->req; + struct ccp_sm2_engine *sm2 = NULL; + int ret; + + sg_init_one(&rctx->src_sg, rctx->src, src_size); + memset(rctx->dst, 0, CCP_SM2_DST_SIZE); + sg_init_one(&rctx->dst_sg, rctx->dst, CCP_SM2_DST_SIZE); + + memset(&rctx->cmd, 0, sizeof(rctx->cmd)); + INIT_LIST_HEAD(&rctx->cmd.entry); + rctx->cmd.engine = CCP_ENGINE_SM2; + + sm2 = &rctx->cmd.u.sm2; + sm2->mode = mode; + sm2->rand = rand; /* whether read operand_k from trng */ + sm2->src = &rctx->src_sg; + sm2->src_len = src_size; + sm2->dst = &rctx->dst_sg; + sm2->dst_len = CCP_SM2_DST_SIZE; + + ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); + + return ret; +} + +static int ccp_sm2_pubkey_strict_valid(const u8 *px, const u8 *py) +{ + u64 buf[CCP_SM2_OPERAND_LEN / sizeof(u64)]; + int ret1, ret2; + + /* private key is 1, corresponding public key is invalid */ + ret1 = memcmp(px, sm2_ecc_gx, CCP_SM2_OPERAND_LEN); + ret2 = memcmp(py, sm2_ecc_gy, CCP_SM2_OPERAND_LEN); + if (!ret1 && !ret2) + return -EINVAL; + + /* private key is n - 1, corresponding public key is invalid */ + memcpy(buf, py, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add(buf, (const u64 *)sm2_ecc_gy, + CCP_SM2_OPERAND_LEN / sizeof(u64)); + ret2 = memcmp(buf, sm2_ecc_p, CCP_SM2_OPERAND_LEN); + if (!ret1 && !ret2) + return -EINVAL; + + return 0; +} + +static int ccp_sm2_is_pubkey_valid(struct ccp_sm2_req_ctx *rctx, bool strict) +{ + const u8 *px, *py; + u8 *tmp; + bool zero; + int ret; + + px = rctx->src + CCP_SM2_LP_SRC_SIZE; + py = px + CCP_SM2_OPERAND_LEN; + + zero = ccp_sm2_is_zero((u64 *)px, CCP_SM2_PUBLIC_KEY_LEN / sizeof(u64)); + if (zero) + return -EINVAL; + + /* x < p */ + ret = ccp_sm2_fp_cmp((u64 *)px, (const u64 *)sm2_ecc_p, + CCP_SM2_OPERAND_LEN / sizeof(u64)); + if (ret >= 0) + return -EINVAL; + + /* y < p */ + ret = ccp_sm2_fp_cmp((u64 *)py, (const u64 *)sm2_ecc_p, + CCP_SM2_OPERAND_LEN / sizeof(u64)); + if (ret >= 0) + return -EINVAL; + + if (strict) { + ret = ccp_sm2_pubkey_strict_valid(px, py); + if (ret < 0) + return ret; + } + + /* check whether y^2 = x^3 + ax + b */ + tmp = rctx->dst + CCP_SM2_MMUL_LEN; + /* y * y */ + ccp_sm2_fp_mmul32(rctx->dst, (u32 *)py, (u32 *)py, tmp); + ccp_sm2_fast_mod_p(rctx->dst, rctx->src, tmp); + memcpy(rctx->src + CCP_SM2_MMUL_LEN, + rctx->dst + CCP_SM2_OPERAND_LEN, CCP_SM2_OPERAND_LEN); + /* x * x + a */ + ccp_sm2_fp_mmul32(rctx->dst, (u32 *)px, (u32 *)px, tmp); + memset(rctx->src, 0, CCP_SM2_MMUL_LEN); + memcpy(rctx->src + CCP_SM2_OPERAND_LEN, sm2_ecc_a, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add((u64 *)rctx->dst, (u64 *)rctx->src, + CCP_SM2_MMUL_LEN / sizeof(u64)); + ccp_sm2_fast_mod_p(rctx->dst, rctx->src, tmp); + memcpy(rctx->src, rctx->dst + CCP_SM2_OPERAND_LEN, CCP_SM2_OPERAND_LEN); + /* (x * x + a) * x + b */ + ccp_sm2_fp_mmul32(rctx->dst, (u32 *)px, (u32 *)rctx->src, tmp); + memset(rctx->src, 0, CCP_SM2_MMUL_LEN); + memcpy(rctx->src + CCP_SM2_OPERAND_LEN, sm2_ecc_b, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add((u64 *)rctx->dst, (u64 *)rctx->src, + CCP_SM2_MMUL_LEN / sizeof(u64)); + ccp_sm2_fast_mod_p(rctx->dst, rctx->src, tmp); + + ret = memcmp(rctx->src + CCP_SM2_MMUL_LEN, + rctx->dst + CCP_SM2_OPERAND_LEN, CCP_SM2_OPERAND_LEN); + if (ret) + return -EINVAL; + + /* Because the cofactor of the ECC group is 1, + * the checking that [n]P=O is not required. + */ + + return 0; +} + +static int ccp_sm2_setpubkey(struct crypto_akcipher *tfm, + const void *key, unsigned int keylen) +{ + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_ctx *sm2 = &ctx->u.sm2; + struct ccp_sm2_req_ctx *rctx = NULL; + int ret; + + if (!key || keylen != CCP_SM2_PUBLIC_KEY_LEN) + return -EINVAL; + + /* check whether public key is valid */ + rctx = kmalloc(sizeof(*rctx), GFP_KERNEL); + if (!rctx) + return -ENOMEM; + + memcpy(rctx->src + CCP_SM2_LP_SRC_SIZE, key, CCP_SM2_PUBLIC_KEY_LEN); + ret = ccp_sm2_is_pubkey_valid(rctx, true); + kfree(rctx); + if (ret < 0) + return ret; + + /* public key is valid */ + memcpy(sm2->pub_key, key, CCP_SM2_PUBLIC_KEY_LEN); + sm2->pub_key_len = CCP_SM2_PUBLIC_KEY_LEN; + + return 0; +} + +static unsigned int ccp_sm2_maxsize(struct crypto_akcipher *tfm) +{ + return CCP_SM2_DST_SIZE; +} + +static int ccp_sm2_compute_c3(struct crypto_shash *shash, + struct scatterlist *sg, u32 mlen, + u8 *c3, const u8 *x2, const u8 *y2) +{ + unsigned int len, remain; + int ret; + + SHASH_DESC_ON_STACK(sdesc, shash); + + sdesc->tfm = shash; + ret = crypto_shash_init(sdesc); + if (ret < 0) + return ret; + + /* update X2 */ + ret = crypto_shash_update(sdesc, x2, CCP_SM2_OPERAND_LEN); + if (ret < 0) + return ret; + + /* update M */ + remain = mlen; + while (sg) { + len = sg->length; + if (len > remain) + len = remain; + ret = crypto_shash_update(sdesc, (u8 *)sg_virt(sg), len); + if (ret < 0) + return ret; + + remain -= len; + if (!remain) + break; + + sg = sg_next(sg); + } + + /* ccp_sm2_encrypt should have checked length */ + if (unlikely(!sg)) + return -EINVAL; + + /* update Y2 */ + ret = crypto_shash_finup(sdesc, y2, CCP_SM2_OPERAND_LEN, c3); + + return ret; +} + +static bool ccp_sm2_msg_xor_t(u8 *msg, const u8 *t, u32 len) +{ + u64 *msg_cur, *msg_last, *t_cur; + u32 zero_cnt = 0; + u32 rem; + int i; + + msg_cur = (u64 *)msg; + t_cur = (u64 *)t; + msg_last = msg_cur + (len / sizeof(u64)); + while (msg_cur != msg_last) { + if (likely(*t_cur)) + *msg_cur = *msg_cur ^ *t_cur; + else + zero_cnt += sizeof(u64); + + msg_cur++; + t_cur++; + } + + msg = (u8 *)msg_cur; + t = (const u8 *)t_cur; + rem = len % sizeof(u64); + for (i = 0; i < rem; i++) { + if (likely(t[i])) + msg[i] = msg[i] ^ t[i]; + else + zero_cnt++; + } + + return zero_cnt == len; +} + +static int ccp_sm2_kdf_xor(struct crypto_shash *shash, + struct scatterlist *src, u32 src_offset, u32 src_len, + struct scatterlist *dst, u32 dst_offset, + u8 *x2_y2_ct, bool *all_zero, struct ccp_sm2_req_ctx *rctx) +{ + u32 *be_ct = NULL; + u32 ct, len, remain; + bool zero; + int ret = 0; + + SHASH_DESC_ON_STACK(sdesc, shash); + + sdesc->tfm = shash; + + *all_zero = true; + ct = 1; + be_ct = (u32 *)(x2_y2_ct + CCP_SM2_PUBLIC_KEY_LEN); + remain = src_len; + while (remain) { + len = SM3_DIGEST_SIZE; + if (len > remain) + len = remain; + *be_ct = cpu_to_be32(ct); + ret = crypto_shash_digest(sdesc, x2_y2_ct, + CCP_SM2_PUBLIC_KEY_LEN + sizeof(*be_ct), rctx->src); + if (ret < 0) + break; + + scatterwalk_map_and_copy(rctx->src + SM3_DIGEST_SIZE, src, + src_offset, len, 0); + zero = ccp_sm2_msg_xor_t(rctx->src + SM3_DIGEST_SIZE, + rctx->src, len); + if (zero == false) + *all_zero = false; + scatterwalk_map_and_copy(rctx->src + SM3_DIGEST_SIZE, dst, + dst_offset, len, 1); + + remain -= len; + src_offset += len; + dst_offset += len; + ct++; + } + + return ret; +} + +static void ccp_sm2_enc_compute(struct work_struct *work) +{ + struct ccp_sm2_req_ctx *rctx = + container_of(work, struct ccp_sm2_req_ctx, work); + struct akcipher_request *req = rctx->req; + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + struct crypto_shash *shash = NULL; + bool all_zero = true; + int ret; + + shash = crypto_alloc_shash("sm3", 0, 0); + if (IS_ERR(shash)) { + ret = PTR_ERR(shash); + goto e_complete; + } + + scatterwalk_map_and_copy(rctx->src, req->src, 0, req->src_len, 0); + + /* C2 = M ^ t */ + ret = ccp_sm2_kdf_xor(shash, req->src, 0, req->src_len, + req->dst, CCP_SM2_ENCRYPT_EXT_LEN, + dst->result_r, &all_zero, rctx); + if (ret < 0) + goto e_hash; + if (unlikely(all_zero)) { + ret = -EAGAIN; + goto e_hash; + } + + /* C3 */ + ret = ccp_sm2_compute_c3(shash, req->src, req->src_len, rctx->src, + dst->result_r, dst->result_s); + if (ret < 0) + goto e_hash; + + /* save C3 */ + scatterwalk_map_and_copy(rctx->src, req->dst, + CCP_SM2_PUBLIC_KEY_LEN, SM3_DIGEST_SIZE, 1); + +e_hash: + crypto_free_shash(shash); + +e_complete: + req->base.complete(&req->base, ret); +} + +static void ccp_sm2_enc_lp(struct work_struct *work) +{ + struct ccp_sm2_req_ctx *rctx = + container_of(work, struct ccp_sm2_req_ctx, work); + struct akcipher_request *req = rctx->req; + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + struct ccp_sm2_lp_src *src = (struct ccp_sm2_lp_src *)rctx->src; + int ret; + + /* save C1 */ + scatterwalk_map_and_copy(dst->result_r, req->dst, 0, + CCP_SM2_PUBLIC_KEY_LEN, 1); + /* operand_k used by kg is placed in dst->result_t */ + memcpy(src->operand_k, dst->result_t, CCP_SM2_OPERAND_LEN); + memcpy(src->operand_px, ctx->u.sm2.pub_key, CCP_SM2_OPERAND_LEN); + memcpy(src->operand_py, ctx->u.sm2.pub_key + CCP_SM2_OPERAND_LEN, + CCP_SM2_OPERAND_LEN); + rctx->phase = CCP_SM2_ENC_PH_LP; + + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_LP_SRC_SIZE, CCP_SM2_MODE_LP, 0); + if (ret != -EBUSY && ret != -EINPROGRESS) + req->base.complete(&req->base, ret); +} + +static int ccp_sm2_encrypt(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); + int nents; + int ret; + + if (!ctx->u.sm2.pub_key_len) + return -ENOKEY; + + if (!req->src_len || + req->dst_len < CCP_SM2_ENCRYPT_EXT_LEN + req->src_len) + return -EINVAL; + + nents = sg_nents_for_len(req->src, req->src_len); + if (nents < 0) + return -EINVAL; + + rctx->req = req; + rctx->phase = CCP_SM2_ENC_PH_KG; + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_KG_SRC_SIZE, CCP_SM2_MODE_KG, 1); + + return ret; +} + +static void ccp_sm2_dec_compute(struct work_struct *work) +{ + struct ccp_sm2_req_ctx *rctx = + container_of(work, struct ccp_sm2_req_ctx, work); + struct akcipher_request *req = rctx->req; + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + struct crypto_shash *shash = NULL; + bool all_zero = true; + int ret; + + shash = crypto_alloc_shash("sm3", 0, 0); + if (IS_ERR(shash)) { + ret = PTR_ERR(shash); + goto e_complete; + } + + /* M' = C2 ^ t */ + ret = ccp_sm2_kdf_xor(shash, req->src, CCP_SM2_ENCRYPT_EXT_LEN, + req->src_len - CCP_SM2_ENCRYPT_EXT_LEN, req->dst, 0, + dst->result_r, &all_zero, rctx); + if (ret < 0) + goto e_hash; + if (all_zero) { + ret = -EBADMSG; + goto e_hash; + } + + /* u */ + ret = ccp_sm2_compute_c3(shash, req->dst, + req->src_len - CCP_SM2_ENCRYPT_EXT_LEN, + rctx->src, dst->result_r, dst->result_s); + if (ret < 0) + goto e_hash; + + /* load and compare C3 */ + scatterwalk_map_and_copy(rctx->src + SM3_DIGEST_SIZE, req->src, + CCP_SM2_PUBLIC_KEY_LEN, SM3_DIGEST_SIZE, 0); + ret = memcmp(rctx->src, rctx->src + SM3_DIGEST_SIZE, SM3_DIGEST_SIZE); + if (ret) + ret = -EBADMSG; + +e_hash: + crypto_free_shash(shash); + +e_complete: + /* clear private key, plain, and dC1 */ + memset(rctx->src, 0, CCP_SM2_OPERAND_LEN * 2); + memset(dst, 0, CCP_SM2_DST_SIZE); + req->base.complete(&req->base, ret); +} + +static int ccp_sm2_decrypt(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); + struct ccp_sm2_lp_src *src = (struct ccp_sm2_lp_src *)rctx->src; + int nents; + int ret; + + if (!ctx->u.sm2.pri_key_len) + return -ENOKEY; + + if (req->src_len <= (CCP_SM2_PUBLIC_KEY_LEN + SM3_DIGEST_SIZE)) + return -EINVAL; + + if (req->dst_len < req->src_len - CCP_SM2_ENCRYPT_EXT_LEN) + return -EINVAL; + + nents = sg_nents_for_len(req->src, req->src_len); + if (nents < 0) + return -EINVAL; + + /* load C1 */ + scatterwalk_map_and_copy(rctx->src + CCP_SM2_LP_SRC_SIZE, + req->src, 0, CCP_SM2_PUBLIC_KEY_LEN, 0); + ret = ccp_sm2_is_pubkey_valid(rctx, false); + if (ret < 0) + return -EBADMSG; + + /* do kP */ + memcpy(src->operand_k, ctx->u.sm2.pri_key, CCP_SM2_PRIVATE_KEY_LEN); + memcpy(src->operand_px, rctx->src + CCP_SM2_LP_SRC_SIZE, + CCP_SM2_OPERAND_LEN); + memcpy(src->operand_py, rctx->src + CCP_SM2_LP_SRC_SIZE + + CCP_SM2_OPERAND_LEN, CCP_SM2_OPERAND_LEN); + rctx->req = req; + rctx->phase = CCP_SM2_DEC_PH_LP; + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_LP_SRC_SIZE, CCP_SM2_MODE_LP, 0); + + return ret; +} + +static int ccp_sm2_sign(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); + struct ccp_sm2_sign_src *src = (struct ccp_sm2_sign_src *)rctx->src; + int nents; + int ret; + + if (!ctx->u.sm2.pri_key_len) + return -ENOKEY; + + if (req->src_len != CCP_SM2_OPERAND_LEN) + return -EINVAL; + + nents = sg_nents_for_len(req->src, CCP_SM2_OPERAND_LEN); + if (nents < 0) + return -EINVAL; + + scatterwalk_map_and_copy(src->operand_e, req->src, 0, + CCP_SM2_OPERAND_LEN, 0); + memcpy(src->operand_d, ctx->u.sm2.pri_key, CCP_SM2_PRIVATE_KEY_LEN); + + rctx->req = req; + rctx->phase = CCP_SM2_SIGN_PH_SIGN; + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_SIGN_SRC_SIZE, + CCP_SM2_MODE_SIGN, 1); + + return ret; +} + +static int ccp_sm2_verify(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); + struct ccp_sm2_verify_src *src = (struct ccp_sm2_verify_src *)rctx->src; + int nents; + int ret; + + if (!ctx->u.sm2.pub_key_len) + return -ENOKEY; + + if (req->src_len != CCP_SM2_OPERAND_LEN * 3) + return -EINVAL; + + nents = sg_nents_for_len(req->src, CCP_SM2_OPERAND_LEN * 3); + if (nents < 0) + return -EINVAL; + + scatterwalk_map_and_copy(src->operand_e, req->src, 0, + CCP_SM2_OPERAND_LEN * 3, 0); + memcpy(src->operand_px, ctx->u.sm2.pub_key, CCP_SM2_OPERAND_LEN); + memcpy(src->operand_py, ctx->u.sm2.pub_key + CCP_SM2_OPERAND_LEN, + CCP_SM2_OPERAND_LEN); + + rctx->req = req; + rctx->phase = CCP_SM2_VERIFY_PH_VERIFY; + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_VERIFY_SRC_SIZE, + CCP_SM2_MODE_VERIFY, 0); + + return ret; +} + +static int ccp_sm2_verify_handle(struct ccp_sm2_req_ctx *rctx) +{ + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + + if (dst->u.status) + return -EBADMSG; + + return 0; +} + +static int ccp_sm2_sign_handle(struct ccp_sm2_req_ctx *rctx) +{ + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + struct ccp_sm2_sign_src *src = (struct ccp_sm2_sign_src *)rctx->src; + struct akcipher_request *req = rctx->req; + + if (unlikely(dst->u.status)) + return -EAGAIN; + + /* save signature */ + scatterwalk_map_and_copy(dst->result_r, req->dst, 0, + CCP_SM2_OPERAND_LEN * 2, 1); + /* clear private key */ + memset(src->operand_d, 0, CCP_SM2_PRIVATE_KEY_LEN); + + return 0; +} + +static int ccp_sm2_enc_kg_handle(struct ccp_sm2_req_ctx *rctx) +{ + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + + /* random operand_k is not satisfied with[1, n-1], try again */ + if (unlikely(dst->u.status)) + return -EAGAIN; + + INIT_WORK(&rctx->work, ccp_sm2_enc_lp); + schedule_work(&rctx->work); + + return -EINPROGRESS; +} + +static int ccp_sm2_enc_lp_handle(struct ccp_sm2_req_ctx *rctx) +{ + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + + if (unlikely(dst->u.status)) + return -EIO; + + INIT_WORK(&rctx->work, ccp_sm2_enc_compute); + schedule_work(&rctx->work); + + return -EINPROGRESS; +} + +static int ccp_sm2_dec_lp_handle(struct ccp_sm2_req_ctx *rctx) +{ + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + + if (unlikely(dst->u.status)) + return -EIO; + + INIT_WORK(&rctx->work, ccp_sm2_dec_compute); + schedule_work(&rctx->work); + + return -EINPROGRESS; +} + +static int ccp_sm2_complete(struct crypto_async_request *async_req, int ret) +{ + struct akcipher_request *req = + container_of(async_req, struct akcipher_request, base); + struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); + + if (ret) + return ret; + + switch (rctx->phase) { + case CCP_SM2_SIGN_PH_SIGN: + ret = ccp_sm2_sign_handle(rctx); + break; + case CCP_SM2_VERIFY_PH_VERIFY: + ret = ccp_sm2_verify_handle(rctx); + break; + case CCP_SM2_ENC_PH_KG: + ret = ccp_sm2_enc_kg_handle(rctx); + break; + case CCP_SM2_ENC_PH_LP: + ret = ccp_sm2_enc_lp_handle(rctx); + break; + case CCP_SM2_DEC_PH_LP: + ret = ccp_sm2_dec_lp_handle(rctx); + break; + } + + return ret; +} + +static int ccp_sm2_init_tfm(struct crypto_akcipher *tfm) +{ + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + + akcipher_set_reqsize(tfm, sizeof(struct ccp_sm2_req_ctx)); + ctx->complete = ccp_sm2_complete; + + return 0; +} + +static void ccp_sm2_exit_tfm(struct crypto_akcipher *tfm) +{ +} + +static struct akcipher_alg ccp_sm2_defaults = { + .sign = ccp_sm2_sign, + .verify = ccp_sm2_verify, + .encrypt = ccp_sm2_encrypt, + .decrypt = ccp_sm2_decrypt, + .set_pub_key = ccp_sm2_setpubkey, + .set_priv_key = ccp_sm2_setprivkey, + .max_size = ccp_sm2_maxsize, + .init = ccp_sm2_init_tfm, + .exit = ccp_sm2_exit_tfm, + .base = { + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_ctxsize = sizeof(struct ccp_ctx), + .cra_priority = CCP_CRA_PRIORITY, + .cra_module = THIS_MODULE, + }, +}; + +struct ccp_sm2_def { + unsigned int version; + const char *name; + const char *driver_name; + struct akcipher_alg *alg_defaults; +}; + +static struct ccp_sm2_def sm2_algs[] = { + { + .version = CCP_VERSION(5, 0), + .name = "sm2", + .driver_name = "sm2-ccp", + .alg_defaults = &ccp_sm2_defaults, + } +}; + +static int ccp_register_sm2_hygon_alg(struct list_head *head, + const struct ccp_sm2_def *def) +{ + struct ccp_crypto_akcipher_alg *ccp_alg; + struct akcipher_alg *alg; + int ret; + + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); + if (!ccp_alg) + return -ENOMEM; + + INIT_LIST_HEAD(&ccp_alg->entry); + + alg = &ccp_alg->alg; + *alg = *def->alg_defaults; + snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); + snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + def->driver_name); + + ret = crypto_register_akcipher(alg); + if (ret) { + pr_err("%s akcipher algorithm registration error (%d)\n", + alg->base.cra_name, ret); + kfree(ccp_alg); + return ret; + } + + list_add(&ccp_alg->entry, head); + + return 0; +} + +int ccp_register_sm2_hygon_algs(struct list_head *head) +{ + int i, ret; + unsigned int ccpversion = ccp_version(); + + for (i = 0; i < ARRAY_SIZE(sm2_algs); i++) { + if (sm2_algs[i].version > ccpversion) + continue; + ret = ccp_register_sm2_hygon_alg(head, &sm2_algs[i]); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h index e42450d07168..5133b921a5f5 100644 --- a/drivers/crypto/ccp/ccp-crypto.h +++ b/drivers/crypto/ccp/ccp-crypto.h @@ -258,6 +258,43 @@ struct ccp_rsa_req_ctx { #define CCP_RSA_MAXMOD (4 * 1024 / 8) #define CCP5_RSA_MAXMOD (16 * 1024 / 8) +/***** SM2 related defines *****/ +#define CCP_SM2_OPERAND_LEN 32 +#define CCP_SM2_PRIVATE_KEY_LEN CCP_SM2_OPERAND_LEN +#define CCP_SM2_PUBLIC_KEY_LEN (CCP_SM2_OPERAND_LEN * 2) +#define CCP_SM2_ENCRYPT_EXT_LEN (CCP_SM2_PUBLIC_KEY_LEN + SM3_DIGEST_SIZE) +#define CCP_SM2_MMUL_LEN (CCP_SM2_OPERAND_LEN * 2) + +struct ccp_sm2_ctx { + u32 pri_key_len; + u32 pub_key_len; + u8 pri_key[CCP_SM2_PRIVATE_KEY_LEN]; + u8 pub_key[CCP_SM2_PUBLIC_KEY_LEN]; +}; + +enum ccp_sm2_op_phase { + CCP_SM2_SIGN_PH_SIGN, + CCP_SM2_VERIFY_PH_VERIFY, + CCP_SM2_ENC_PH_KG, + CCP_SM2_ENC_PH_LP, + CCP_SM2_DEC_PH_LP +}; + +struct ccp_sm2_req_ctx { + enum ccp_sm2_op_phase phase; + struct akcipher_request *req; + + u8 src[CCP_SM2_VERIFY_SRC_SIZE]; + u8 dst[CCP_SM2_DST_SIZE]; + + struct scatterlist src_sg; + struct scatterlist dst_sg; + + struct work_struct work; + + struct ccp_cmd cmd; +}; + /***** Common Context Structure *****/ struct ccp_ctx { int (*complete)(struct crypto_async_request *req, int ret); @@ -267,6 +304,7 @@ struct ccp_ctx { struct ccp_rsa_ctx rsa; struct ccp_sha_ctx sha; struct ccp_des3_ctx des3; + struct ccp_sm2_ctx sm2; } u; }; @@ -282,5 +320,6 @@ int ccp_register_aes_aeads(struct list_head *head); int ccp_register_sha_algs(struct list_head *head); int ccp_register_des3_algs(struct list_head *head); int ccp_register_rsa_algs(struct list_head *head); +int ccp_register_sm2_hygon_algs(struct list_head *head); #endif diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 7b73332d6aa1..2c144fa64e88 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -131,6 +131,11 @@ union ccp_function { u16 type:2; u16 mode:3; } ecc; + struct { + u16 rand:1; + u16 rsvd:11; + u16 mode:3; + } sm2; u16 raw; }; @@ -151,6 +156,8 @@ union ccp_function { #define CCP_PT_BITWISE(p) ((p)->pt.bitwise) #define CCP_ECC_MODE(p) ((p)->ecc.mode) #define CCP_ECC_AFFINE(p) ((p)->ecc.one) +#define CCP_SM2_RAND(p) ((p)->sm2.rand) +#define CCP_SM2_MODE(p) ((p)->sm2.mode) /* Word 0 */ #define CCP5_CMD_DW0(p) ((p)->dw0) @@ -584,6 +591,43 @@ static int ccp5_perform_ecc(struct ccp_op *op) return ccp5_do_cmd(&desc, op->cmd_q); } +static int ccp5_perform_sm2(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + struct ccp_dma_info *saddr = &op->src.u.dma; + struct ccp_dma_info *daddr = &op->dst.u.dma; + + op->cmd_q->total_sm2_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM2; + + CCP5_CMD_SOC(&desc) = 0; + CCP5_CMD_IOC(&desc) = 1; + CCP5_CMD_INIT(&desc) = 1; + CCP5_CMD_EOM(&desc) = 1; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM2_RAND(&function) = op->u.sm2.rand; + CCP_SM2_MODE(&function) = op->u.sm2.mode; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + /* Length of source data must match with mode */ + CCP5_CMD_LEN(&desc) = saddr->length; + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(saddr); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(saddr); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(daddr); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(daddr); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + return ccp5_do_cmd(&desc, op->cmd_q); +} + static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) { int q_mask = 1 << cmd_q->id; @@ -1103,6 +1147,7 @@ static const struct ccp_actions ccp5_actions = { .rsa = ccp5_perform_rsa, .passthru = ccp5_perform_passthru, .ecc = ccp5_perform_ecc, + .sm2 = ccp5_perform_sm2, .sballoc = ccp_lsb_alloc, .sbfree = ccp_lsb_free, .init = ccp5_init, diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 83350e2d9821..2b45309b78fa 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -334,6 +334,7 @@ struct ccp_cmd_queue { unsigned long total_rsa_ops; unsigned long total_pt_ops; unsigned long total_ecc_ops; + unsigned long total_sm2_ops; } ____cacheline_aligned; struct ccp_device { @@ -528,6 +529,11 @@ struct ccp_ecc_op { enum ccp_ecc_function function; }; +struct ccp_sm2_op { + u32 rand; + enum ccp_sm2_mode mode; +}; + struct ccp_op { struct ccp_cmd_queue *cmd_q; @@ -551,6 +557,7 @@ struct ccp_op { struct ccp_rsa_op rsa; struct ccp_passthru_op passthru; struct ccp_ecc_op ecc; + struct ccp_sm2_op sm2; } u; }; @@ -657,6 +664,7 @@ struct ccp_actions { int (*rsa)(struct ccp_op *); int (*passthru)(struct ccp_op *); int (*ecc)(struct ccp_op *); + int (*sm2)(struct ccp_op *op); u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int); void (*sbfree)(struct ccp_cmd_queue *, unsigned int, unsigned int); unsigned int (*get_free_slots)(struct ccp_cmd_queue *); diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index cb8e99936abb..99b59dd296f0 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -2463,6 +2463,97 @@ ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) } } +static int ccp_run_sm2_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + struct ccp_sm2_engine *sm2 = &cmd->u.sm2; + struct ccp_data src, dst; + struct ccp_op op; + int ret; + + if (!sm2->src || !sm2->dst) + return -EINVAL; + + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); + op.ioc = 1; + op.init = 1; + op.eom = 1; + op.u.sm2.rand = sm2->rand & 0x1; + op.u.sm2.mode = sm2->mode; + + memset(&src, 0, sizeof(src)); + ret = ccp_init_sg_workarea(&src.sg_wa, cmd_q->ccp->dev, + sm2->src, sm2->src_len, DMA_TO_DEVICE); + if (ret) + return ret; + + /* if src isn't contiguous, should copy to a contiguous buffer */ + if (src.sg_wa.dma_count == 1) { + op.src.u.dma.address = sg_dma_address(src.sg_wa.sg); + } else { + ccp_sg_free(&src.sg_wa); + ret = ccp_init_dm_workarea(&src.dm_wa, cmd_q, sm2->src_len, + DMA_TO_DEVICE); + if (ret) + goto e_src; + + ccp_set_dm_area(&src.dm_wa, 0, sm2->src, 0, sm2->src_len); + op.src.u.dma.address = src.dm_wa.dma.address; + } + + op.src.type = CCP_MEMTYPE_SYSTEM; + op.src.u.dma.offset = 0; + op.src.u.dma.length = sm2->src_len; + op.src.u.dma.dir = DMA_TO_DEVICE; + + memset(&dst, 0, sizeof(dst)); + ret = ccp_init_sg_workarea(&dst.sg_wa, cmd_q->ccp->dev, + sm2->dst, sm2->dst_len, DMA_FROM_DEVICE); + if (ret) + goto e_src; + + /* if dst isn't contiguous, should copy to a contiguous buffer */ + if (dst.sg_wa.dma_count == 1) { + op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg); + } else { + ccp_sg_free(&dst.sg_wa); + ret = ccp_init_dm_workarea(&dst.dm_wa, cmd_q, sm2->dst_len, + DMA_FROM_DEVICE); + if (ret) + goto e_dst; + + op.dst.u.dma.address = dst.dm_wa.dma.address; + } + + op.dst.type = CCP_MEMTYPE_SYSTEM; + op.dst.u.dma.offset = 0; + op.dst.u.dma.length = sm2->dst_len; + op.dst.u.dma.dir = DMA_FROM_DEVICE; + + ret = cmd_q->ccp->vdata->perform->sm2(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_dst; + } + + if (dst.dm_wa.address) { + ccp_get_dm_area(&dst.dm_wa, 0, sm2->dst, 0, sm2->dst_len); + memset(dst.dm_wa.address, 0, sm2->dst_len); + } + +e_dst: + ccp_free_data(&dst, cmd_q); + +e_src: + if (src.dm_wa.address) + memset(src.dm_wa.address, 0, sm2->src_len); + + ccp_free_data(&src, cmd_q); + + return ret; +} + int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { int ret; @@ -2507,6 +2598,9 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) case CCP_ENGINE_ECC: ret = ccp_run_ecc_cmd(cmd_q, cmd); break; + case CCP_ENGINE_SM2: + ret = ccp_run_sm2_cmd(cmd_q, cmd); + break; default: ret = -EINVAL; } diff --git a/include/linux/ccp.h b/include/linux/ccp.h index 868924dec5a1..bd947cb8d41f 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h @@ -17,6 +17,7 @@ #include #include #include +#include struct ccp_device; struct ccp_cmd; @@ -587,6 +588,51 @@ struct ccp_ecc_engine { u16 ecc_result; }; +/***** SM2 engine *****/ +#define CCP_SM2_VERIFY_SRC_SIZE 160 +#define CCP_SM2_LP_SRC_SIZE 96 +#define CCP_SM2_KG_SRC_SIZE 32 +#define CCP_SM2_SIGN_SRC_SIZE 96 +#define CCP_SM2_MMUL_SRC_SIZE 64 +#define CCP_SM2_DST_SIZE 128 + +/** + * ccp_sm2_mode - SM2 operation mode + * + * @CCP_SM2_MODE_VERIFY: Verify mode + * @CCP_SM2_MODE_LP: LP mode + * @CCP_SM2_MODE_KG: KG mode + * @CCP_SM2_MODE_SIGN: SIGN mode + * @CCP_SM2_MODE_MMUL: MMUL mode + */ +enum ccp_sm2_mode { + CCP_SM2_MODE_VERIFY, + CCP_SM2_MODE_LP, + CCP_SM2_MODE_KG, + CCP_SM2_MODE_SIGN, + CCP_SM2_MODE_MMUL, + CCP_SM2_MODE__LAST, +}; + +/** + * struct ccp_sm2_engine - CCP SM2 operation + * @mode: SM2 operation mode + * @rand: indicateing that operand_k is from TRNG or not + * @src: data to be used for this operation + * @dst: data produced by this operation + * @src_len: length in bytes of data used for this operation + * @dst_len: length in bytes of data produced by this operation + */ +struct ccp_sm2_engine { + enum ccp_sm2_mode mode; + u32 rand; + + struct scatterlist *src; + u32 src_len; + + struct scatterlist *dst; + u32 dst_len; +}; /** * ccp_engine - CCP operation identifiers @@ -599,6 +645,7 @@ struct ccp_ecc_engine { * @CCP_ENGINE_PASSTHRU: pass-through operation * @CCP_ENGINE_ZLIB_DECOMPRESS: unused * @CCP_ENGINE_ECC: ECC operation + * @CCP_ENGINE_SM2: SM2 operation */ enum ccp_engine { CCP_ENGINE_AES = 0, @@ -609,6 +656,7 @@ enum ccp_engine { CCP_ENGINE_PASSTHRU, CCP_ENGINE_ZLIB_DECOMPRESS, CCP_ENGINE_ECC, + CCP_ENGINE_SM2 = 8, /* fixed value */ CCP_ENGINE__LAST, }; @@ -657,6 +705,7 @@ struct ccp_cmd { struct ccp_passthru_engine passthru; struct ccp_passthru_nomap_engine passthru_nomap; struct ccp_ecc_engine ecc; + struct ccp_sm2_engine sm2; } u; /* Completion callback support */ -- Gitee From 3a020c007a5628d01516995c7c3f5facd4d798eb Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Sat, 7 May 2022 18:20:16 +0800 Subject: [PATCH 0421/2138] anolis: newfeature: crypto: ccp: Support SM3 algorithm for hygon ccp. ANBZ: #8582 In order to add SM3 driver for hygon ccp, include sm3-hmac. Signed-off-by: Yabin Li Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2924 --- drivers/crypto/ccp/Makefile | 3 +- drivers/crypto/ccp/ccp-crypto-main.c | 4 + drivers/crypto/ccp/ccp-crypto-sm3-hygon.c | 488 ++++++++++++++++++++++ drivers/crypto/ccp/ccp-crypto.h | 49 +++ drivers/crypto/ccp/ccp-dev-v5.c | 45 ++ drivers/crypto/ccp/ccp-dev.h | 10 + drivers/crypto/ccp/ccp-ops.c | 157 +++++++ include/linux/ccp.h | 44 ++ 8 files changed, 799 insertions(+), 1 deletion(-) create mode 100644 drivers/crypto/ccp/ccp-crypto-sm3-hygon.c diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 2f002be97210..32d93a9b1a6f 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -25,4 +25,5 @@ ccp-crypto-objs := ccp-crypto-main.o \ ccp-crypto-rsa.o \ ccp-crypto-sha.o -ccp-crypto-$(CONFIG_HYGON_GM) += ccp-crypto-sm2-hygon.o +ccp-crypto-$(CONFIG_HYGON_GM) += ccp-crypto-sm2-hygon.o \ + ccp-crypto-sm3-hygon.o diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index c2ef834eb1fa..a2444759687d 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c @@ -332,6 +332,10 @@ static int ccp_register_algs(void) if (ret) return ret; + ret = ccp_register_sm3_hygon_algs(&hash_algs); + if (ret) + return ret; + /* Return on hygon platform */ return 0; } diff --git a/drivers/crypto/ccp/ccp-crypto-sm3-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm3-hygon.c new file mode 100644 index 000000000000..46ddbc1f14a8 --- /dev/null +++ b/drivers/crypto/ccp/ccp-crypto-sm3-hygon.c @@ -0,0 +1,488 @@ +/* + * Hygon Cryptographic Coprocessor (CCP) SM3 crypto API support + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ccp-crypto.h" + +static int ccp_sm3_complete(struct crypto_async_request *async_req, int ret) +{ + struct ahash_request *req = ahash_request_cast(async_req); + struct ccp_sm3_req_ctx *rctx = ahash_request_ctx(req); + + if (ret) + goto e_free; + + rctx->msg_bits += (rctx->hash_cnt << 3); + if (rctx->hash_rem) { + /* save remaining data to buffer */ + unsigned int offset = rctx->nbytes - rctx->hash_rem; + + scatterwalk_map_and_copy(rctx->buf, rctx->src, + offset, rctx->hash_rem, 0); + rctx->buf_count = rctx->hash_rem; + } else { + rctx->buf_count = 0; + } + + if (rctx->final) { + if (req->result) + memcpy(req->result, rctx->ctx, SM3_DIGEST_SIZE); + + memset(rctx->ctx, 0, SM3_DIGEST_SIZE); + } + +e_free: + sg_free_table(&rctx->data_sg); + + return ret; +} + +static int ccp_do_sm3_update(struct ahash_request *req, unsigned int nbytes, + unsigned int final) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); + struct ccp_sm3_req_ctx *rctx = ahash_request_ctx(req); + struct scatterlist *sg = req->src; + struct ccp_sm3_engine *sm3 = NULL; + unsigned int sg_count; + gfp_t gfp; + u64 len, msg_bits = 0; + int nents; + int ret; + + /* must check length of src, + * otherwise will result in NullPointer exception in ccp_sm3_complete + */ + if (nbytes) { + nents = sg_nents_for_len(req->src, nbytes); + if (nents < 0) + return -EINVAL; + } + + len = (u64)rctx->buf_count + (u64)nbytes; + if (len <= SM3_BLOCK_SIZE) { + scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src, + 0, nbytes, 0); + rctx->buf_count += nbytes; + if (!final) + return 0; + + sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count); + sg = &rctx->buf_sg; + } else { + gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? + GFP_KERNEL : GFP_ATOMIC; + + if (rctx->buf_count) { + /* build the scatterlist table: (buffer and input data) */ + sg_count = sg_nents(req->src) + 1; + ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp); + if (ret) + return ret; + + sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count); + sg = ccp_crypto_sg_table_add( + &rctx->data_sg, &rctx->buf_sg); + if (!sg) { + ret = -EINVAL; + goto e_free; + } + sg = ccp_crypto_sg_table_add(&rctx->data_sg, + req->src); + if (!sg) { + ret = -EINVAL; + goto e_free; + } + sg_mark_end(sg); + + sg = rctx->data_sg.sgl; + } else { + sg = req->src; + } + } + + rctx->final = final; + if (final) { + rctx->hash_rem = 0; + rctx->hash_cnt = len; + msg_bits = rctx->msg_bits + (len << 3); + } else { + rctx->hash_rem = len & (SM3_BLOCK_SIZE - 1); + rctx->hash_cnt = len - rctx->hash_rem; + rctx->src = req->src; + rctx->nbytes = nbytes; + } + + memset(&rctx->cmd, 0, sizeof(rctx->cmd)); + INIT_LIST_HEAD(&rctx->cmd.entry); + rctx->cmd.engine = CCP_ENGINE_SM3; + + sm3 = &rctx->cmd.u.sm3; + sm3->type = CCP_SM3_TYPE_256; + sm3->ctx = &rctx->ctx_sg; + sm3->ctx_len = SM3_DIGEST_SIZE; + sm3->src = sg; + sm3->src_len = rctx->hash_cnt; + sm3->first = rctx->msg_bits ? 0 : 1; + sm3->final = final; + sm3->msg_bits = msg_bits; + if (final && ctx->u.sm3.key_len) { + sm3->opad = &ctx->u.sm3.opad_sg; + sm3->opad_len = SM3_BLOCK_SIZE; + } + + ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); + + return ret; + +e_free: + sg_free_table(&rctx->data_sg); + + return ret; +} + +static int ccp_sm3_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); + struct ccp_sm3_req_ctx *rctx = ahash_request_ctx(req); + + if ((crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) && + (!ctx->u.sm3.key_len)) + return -ENOKEY; + + memset(rctx, 0, sizeof(*rctx)); + if (ctx->u.sm3.key_len) { + /* buffer the HMAC key for first update */ + memcpy(rctx->buf, ctx->u.sm3.ipad, SM3_BLOCK_SIZE); + rctx->buf_count = SM3_BLOCK_SIZE; + } + + sg_init_one(&rctx->ctx_sg, rctx->ctx, SM3_DIGEST_SIZE); + + return 0; +} + +static int ccp_sm3_update(struct ahash_request *req) +{ + return ccp_do_sm3_update(req, req->nbytes, 0); +} + +static int ccp_sm3_final(struct ahash_request *req) +{ + return ccp_do_sm3_update(req, 0, 1); +} + +static int ccp_sm3_finup(struct ahash_request *req) +{ + return ccp_do_sm3_update(req, req->nbytes, 1); +} + +static int ccp_sm3_digest(struct ahash_request *req) +{ + int ret; + + ret = ccp_sm3_init(req); + if (unlikely(ret)) + return ret; + + return ccp_sm3_finup(req); +} + +static int ccp_sm3_export(struct ahash_request *req, void *out) +{ + struct ccp_sm3_req_ctx *rctx = ahash_request_ctx(req); + struct ccp_sm3_exp_ctx state; + + if (!out) + return -EINVAL; + + /* don't let anything leak to 'out' */ + memset(&state, 0, sizeof(state)); + + state.msg_bits = rctx->msg_bits; + memcpy(state.ctx, rctx->ctx, SM3_DIGEST_SIZE); + state.buf_count = rctx->buf_count; + memcpy(state.buf, rctx->buf, SM3_BLOCK_SIZE); + + /* 'out' may not be aligned so memcpy from local variable */ + memcpy(out, &state, sizeof(state)); + memset(&state, 0, sizeof(state)); + + return 0; +} + +static int ccp_sm3_import(struct ahash_request *req, const void *in) +{ + struct ccp_sm3_req_ctx *rctx = ahash_request_ctx(req); + struct ccp_sm3_exp_ctx state; + + if (!in) + return -EINVAL; + + /* 'in' may not be aligned so memcpy to local variable */ + memcpy(&state, in, sizeof(state)); + + memset(rctx, 0, sizeof(*rctx)); + rctx->msg_bits = state.msg_bits; + memcpy(rctx->ctx, state.ctx, SM3_DIGEST_SIZE); + sg_init_one(&rctx->ctx_sg, rctx->ctx, SM3_DIGEST_SIZE); + rctx->buf_count = state.buf_count; + memcpy(rctx->buf, state.buf, SM3_BLOCK_SIZE); + + memset(&state, 0, sizeof(state)); + + return 0; +} + +static int ccp_sm3_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int key_len) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); + struct crypto_shash *shash = ctx->u.sm3.hmac_tfm; + + SHASH_DESC_ON_STACK(sdesc, shash); + + int i, ret; + + /* set to zero until complete */ + ctx->u.sm3.key_len = 0; + if (!key) + return -EINVAL; + + if (!key_len) { + crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY); + return -EINVAL; + } + + /* clear key area to provide zero padding for keys smaller + * than the block size + */ + memset(ctx->u.sm3.key, 0, SM3_BLOCK_SIZE); + + if (key_len > SM3_BLOCK_SIZE) { + /* must hash the input key */ + sdesc->tfm = shash; + ret = crypto_shash_digest(sdesc, key, key_len, + ctx->u.sm3.key); + if (ret) { + crypto_ahash_set_flags( + tfm, CRYPTO_TFM_NEED_KEY); + return -EINVAL; + } + + key_len = SM3_DIGEST_SIZE; + } else { + memcpy(ctx->u.sm3.key, key, key_len); + } + + for (i = 0; i < SM3_BLOCK_SIZE; i++) { + ctx->u.sm3.ipad[i] = ctx->u.sm3.key[i] ^ HMAC_IPAD_VALUE; + ctx->u.sm3.opad[i] = ctx->u.sm3.key[i] ^ HMAC_OPAD_VALUE; + } + + sg_init_one(&ctx->u.sm3.opad_sg, ctx->u.sm3.opad, SM3_BLOCK_SIZE); + + ctx->u.sm3.key_len = key_len; + + return 0; +} + +static int ccp_sm3_cra_init(struct crypto_tfm *tfm) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); + + ctx->complete = ccp_sm3_complete; + crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_sm3_req_ctx)); + + return 0; +} + +static void ccp_sm3_cra_exit(struct crypto_tfm *tfm) +{ +} + +static int ccp_hmac_sm3_cra_init(struct crypto_tfm *tfm) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); + struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(tfm); + struct crypto_shash *hmac_tfm; + + hmac_tfm = crypto_alloc_shash(alg->child_alg, 0, 0); + if (IS_ERR(hmac_tfm)) { + pr_warn("could not load driver %s need for HMAC support\n", + alg->child_alg); + return PTR_ERR(hmac_tfm); + } + + ctx->u.sm3.hmac_tfm = hmac_tfm; + + return ccp_sm3_cra_init(tfm); +} + +static void ccp_hmac_sm3_cra_exit(struct crypto_tfm *tfm) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); + + if (ctx->u.sm3.hmac_tfm) + crypto_free_shash(ctx->u.sm3.hmac_tfm); + + ccp_sm3_cra_exit(tfm); +} + +struct ccp_sm3_def { + unsigned int version; + const char *name; + const char *drv_name; + enum ccp_sm3_type type; + u32 digest_size; + u32 block_size; +}; + +static struct ccp_sm3_def sm3_algs[] = { + { + .version = CCP_VERSION(5, 0), + .name = "sm3", + .drv_name = "sm3-ccp", + .type = CCP_SM3_TYPE_256, + .digest_size = SM3_DIGEST_SIZE, + .block_size = SM3_BLOCK_SIZE, + }, +}; + +static int ccp_register_hmac_sm3_hygon_alg(struct list_head *head, + const struct ccp_sm3_def *def, + const struct ccp_crypto_ahash_alg *base_alg) +{ + struct ccp_crypto_ahash_alg *ccp_alg; + struct ahash_alg *alg; + struct crypto_alg *base; + int ret; + + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); + if (!ccp_alg) + return -ENOMEM; + + /* copy the base algorithm and only change what's necessary */ + *ccp_alg = *base_alg; + INIT_LIST_HEAD(&ccp_alg->entry); + + strscpy(ccp_alg->child_alg, def->name, CRYPTO_MAX_ALG_NAME); + + alg = &ccp_alg->alg; + alg->setkey = ccp_sm3_setkey; + + base = &alg->halg.base; + snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", def->name); + snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s", + def->drv_name); + base->cra_flags |= CRYPTO_ALG_NEED_FALLBACK; + base->cra_init = ccp_hmac_sm3_cra_init; + base->cra_exit = ccp_hmac_sm3_cra_exit; + + ret = crypto_register_ahash(alg); + if (ret) { + pr_err("%s ahash algorithm registration error (%d)\n", + base->cra_name, ret); + kfree(ccp_alg); + return ret; + } + + list_add(&ccp_alg->entry, head); + + return ret; +} + +static int ccp_register_sm3_hygon_alg(struct list_head *head, + const struct ccp_sm3_def *def) +{ + struct ccp_crypto_ahash_alg *ccp_alg; + struct ahash_alg *alg; + struct hash_alg_common *halg; + struct crypto_alg *base; + int ret; + + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); + if (!ccp_alg) + return -ENOMEM; + + INIT_LIST_HEAD(&ccp_alg->entry); + + ccp_alg->type = def->type; + + alg = &ccp_alg->alg; + alg->init = ccp_sm3_init; + alg->update = ccp_sm3_update; + alg->final = ccp_sm3_final; + alg->finup = ccp_sm3_finup; + alg->digest = ccp_sm3_digest; + alg->export = ccp_sm3_export; + alg->import = ccp_sm3_import; + + halg = &alg->halg; + halg->digestsize = def->digest_size; + halg->statesize = sizeof(struct ccp_sm3_exp_ctx); + + base = &halg->base; + snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); + snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + def->drv_name); + base->cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK; + base->cra_blocksize = def->block_size; + base->cra_ctxsize = sizeof(struct ccp_ctx); + base->cra_priority = CCP_CRA_PRIORITY; + base->cra_init = ccp_sm3_cra_init; + base->cra_exit = ccp_sm3_cra_exit; + base->cra_module = THIS_MODULE; + + ret = crypto_register_ahash(alg); + if (ret) { + pr_err("%s ahash algorithm registration error (%d)\n", + base->cra_name, ret); + kfree(ccp_alg); + return ret; + } + + list_add(&ccp_alg->entry, head); + + ret = ccp_register_hmac_sm3_hygon_alg(head, def, ccp_alg); + + return ret; +} + +int ccp_register_sm3_hygon_algs(struct list_head *head) +{ + int i, ret; + unsigned int ccpversion = ccp_version(); + + for (i = 0; i < ARRAY_SIZE(sm3_algs); i++) { + if (sm3_algs[i].version > ccpversion) + continue; + ret = ccp_register_sm3_hygon_alg(head, &sm3_algs[i]); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h index 5133b921a5f5..33e54fcbca53 100644 --- a/drivers/crypto/ccp/ccp-crypto.h +++ b/drivers/crypto/ccp/ccp-crypto.h @@ -295,6 +295,53 @@ struct ccp_sm2_req_ctx { struct ccp_cmd cmd; }; +/***** SM3 related defines *****/ +struct ccp_sm3_ctx { + u32 key_len; + u8 key[SM3_BLOCK_SIZE]; + + u8 ipad[SM3_BLOCK_SIZE]; + + u8 opad[SM3_BLOCK_SIZE]; + struct scatterlist opad_sg; + + struct crypto_shash *hmac_tfm; +}; + +struct ccp_sm3_req_ctx { + u64 msg_bits; + + unsigned int first; + unsigned int final; + + struct scatterlist *src; + u32 nbytes; + + u64 hash_cnt; + u32 hash_rem; + + struct sg_table data_sg; + struct scatterlist *src_sg; + + struct scatterlist ctx_sg; + u8 ctx[SM3_DIGEST_SIZE]; + + struct scatterlist buf_sg; + u32 buf_count; + u8 buf[SM3_BLOCK_SIZE]; + + struct ccp_cmd cmd; +}; + +struct ccp_sm3_exp_ctx { + u64 msg_bits; + + u8 ctx[SM3_DIGEST_SIZE]; + + u32 buf_count; + u8 buf[SM3_BLOCK_SIZE]; +}; + /***** Common Context Structure *****/ struct ccp_ctx { int (*complete)(struct crypto_async_request *req, int ret); @@ -305,6 +352,7 @@ struct ccp_ctx { struct ccp_sha_ctx sha; struct ccp_des3_ctx des3; struct ccp_sm2_ctx sm2; + struct ccp_sm3_ctx sm3; } u; }; @@ -321,5 +369,6 @@ int ccp_register_sha_algs(struct list_head *head); int ccp_register_des3_algs(struct list_head *head); int ccp_register_rsa_algs(struct list_head *head); int ccp_register_sm2_hygon_algs(struct list_head *head); +int ccp_register_sm3_hygon_algs(struct list_head *head); #endif diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 2c144fa64e88..7038be74bbb6 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -136,6 +136,11 @@ union ccp_function { u16 rsvd:11; u16 mode:3; } sm2; + struct { + u16 rsvd:10; + u16 type:4; + u16 rsvd2:1; + } sm3; u16 raw; }; @@ -158,6 +163,7 @@ union ccp_function { #define CCP_ECC_AFFINE(p) ((p)->ecc.one) #define CCP_SM2_RAND(p) ((p)->sm2.rand) #define CCP_SM2_MODE(p) ((p)->sm2.mode) +#define CCP_SM3_TYPE(p) ((p)->sm3.type) /* Word 0 */ #define CCP5_CMD_DW0(p) ((p)->dw0) @@ -193,6 +199,8 @@ union ccp_function { #define CCP5_CMD_FIX_DST(p) ((p)->dw5.fields.fixed) #define CCP5_CMD_SHA_LO(p) ((p)->dw4.sha_len_lo) #define CCP5_CMD_SHA_HI(p) ((p)->dw5.sha_len_hi) +#define CCP5_CMD_SM3_LO(p) ((p)->dw4.sm3_len_lo) +#define CCP5_CMD_SM3_HI(p) ((p)->dw5.sm3_len_hi) /* Word 6/7 */ #define CCP5_CMD_DW6(p) ((p)->key_lo) @@ -628,6 +636,42 @@ static int ccp5_perform_sm2(struct ccp_op *op) return ccp5_do_cmd(&desc, op->cmd_q); } +static int ccp5_perform_sm3(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + + op->cmd_q->total_sm3_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM3; + + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = op->ioc; + CCP5_CMD_INIT(&desc) = op->init; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM3_TYPE(&function) = op->u.sm3.type; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; + + if (op->eom) { + CCP5_CMD_SM3_LO(&desc) = lower_32_bits(op->u.sm3.msg_bits); + CCP5_CMD_SM3_HI(&desc) = upper_32_bits(op->u.sm3.msg_bits); + } + + return ccp5_do_cmd(&desc, op->cmd_q); +} + static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) { int q_mask = 1 << cmd_q->id; @@ -1148,6 +1192,7 @@ static const struct ccp_actions ccp5_actions = { .passthru = ccp5_perform_passthru, .ecc = ccp5_perform_ecc, .sm2 = ccp5_perform_sm2, + .sm3 = ccp5_perform_sm3, .sballoc = ccp_lsb_alloc, .sbfree = ccp_lsb_free, .init = ccp5_init, diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 2b45309b78fa..2d6c4c404539 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -335,6 +335,7 @@ struct ccp_cmd_queue { unsigned long total_pt_ops; unsigned long total_ecc_ops; unsigned long total_sm2_ops; + unsigned long total_sm3_ops; } ____cacheline_aligned; struct ccp_device { @@ -534,6 +535,11 @@ struct ccp_sm2_op { enum ccp_sm2_mode mode; }; +struct ccp_sm3_op { + enum ccp_sm3_type type; + u64 msg_bits; +}; + struct ccp_op { struct ccp_cmd_queue *cmd_q; @@ -558,6 +564,7 @@ struct ccp_op { struct ccp_passthru_op passthru; struct ccp_ecc_op ecc; struct ccp_sm2_op sm2; + struct ccp_sm3_op sm3; } u; }; @@ -606,6 +613,7 @@ struct dword3 { union dword4 { u32 dst_lo; /* NON-SHA */ u32 sha_len_lo; /* SHA */ + __le32 sm3_len_lo; /* SM3 */ }; union dword5 { @@ -616,6 +624,7 @@ union dword5 { unsigned int fixed:1; } fields; u32 sha_len_hi; + __le32 sm3_len_hi; }; struct dword7 { @@ -665,6 +674,7 @@ struct ccp_actions { int (*passthru)(struct ccp_op *); int (*ecc)(struct ccp_op *); int (*sm2)(struct ccp_op *op); + int (*sm3)(struct ccp_op *op); u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int); void (*sbfree)(struct ccp_cmd_queue *, unsigned int, unsigned int); unsigned int (*get_free_slots)(struct ccp_cmd_queue *); diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 99b59dd296f0..79aa8ff654dd 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -2554,6 +2554,160 @@ static int ccp_run_sm2_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) return ret; } +static int ccp_run_sm3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + struct ccp_sm3_engine *sm3 = &cmd->u.sm3; + struct ccp_dm_workarea ctx; + struct ccp_data src; + struct ccp_op op; + int ret; + + u8 sm3_zero_message_hash[SM3_DIGEST_SIZE] = { + 0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F, + 0x8e, 0x61, 0x19, 0x48, 0x31, 0xE8, 0x1A, 0x8F, + 0x22, 0xBE, 0xC8, 0xC7, 0x28, 0xFE, 0xFB, 0x74, + 0x7E, 0xD0, 0x35, 0xEB, 0x50, 0x82, 0xAA, 0x2B, + }; + + if ((sm3->ctx == NULL) || (sm3->ctx_len != SM3_DIGEST_SIZE)) + return -EINVAL; + + if (sg_nents_for_len(sm3->ctx, SM3_DIGEST_SIZE) < 0) + return -EINVAL; + + if (sm3->final && sm3->first) { + if (!sm3->src_len) { + scatterwalk_map_and_copy( + (void *)sm3_zero_message_hash, + sm3->ctx, 0, SM3_DIGEST_SIZE, 1); + return 0; + } + } + + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); + op.ioc = 1; + op.sb_ctx = cmd_q->sb_ctx; + op.init = sm3->first & 0x1; + op.u.sm3.type = sm3->type; + op.u.sm3.msg_bits = sm3->msg_bits; + + memset(&ctx, 0, sizeof(ctx)); + ret = ccp_init_dm_workarea(&ctx, cmd_q, SM3_DIGEST_SIZE, + DMA_BIDIRECTIONAL); + if (ret) + return ret; + + if (!sm3->first) { + /* load iv */ + ccp_set_dm_area(&ctx, 0, sm3->ctx, 0, SM3_DIGEST_SIZE); + + ret = ccp_copy_to_sb(cmd_q, &ctx, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_ctx; + } + } + + ret = ccp_init_data(&src, cmd_q, sm3->src, sm3->src_len, + SM3_BLOCK_SIZE, DMA_TO_DEVICE); + if (ret) + goto e_ctx; + + /* send data to the CCP SM3 engine */ + if (sm3->src_len) { + while (src.sg_wa.bytes_left) { + ccp_prepare_data(&src, NULL, &op, SM3_BLOCK_SIZE, + false); + if (!src.sg_wa.bytes_left && sm3->final) + op.eom = 1; + + ret = cmd_q->ccp->vdata->perform->sm3(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } + + ccp_process_data(&src, NULL, &op); + } + } else { + /* do sm3 padding */ + src.dm_wa.address[0] = 0x80; + *(__be64 *)&src.dm_wa.address[56] = cpu_to_be64(sm3->msg_bits); + + op.soc = 0; + op.ioc = 1; + op.eom = 0; + op.src.u.dma.address = src.dm_wa.dma.address; + op.src.u.dma.offset = 0; + op.src.u.dma.length = SM3_BLOCK_SIZE; + + ret = cmd_q->ccp->vdata->perform->sm3(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } + } + + ret = ccp_copy_from_sb(cmd_q, &ctx, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } + + if (sm3->final && sm3->opad) { + /* HMAC operation, recursively perform final SM3 */ + struct ccp_cmd hmac_cmd; + struct scatterlist sg; + u8 *hmac_buf = NULL; + + hmac_buf = kmalloc( + SM3_BLOCK_SIZE + SM3_DIGEST_SIZE, GFP_KERNEL); + if (!hmac_buf) { + ret = -ENOMEM; + goto e_data; + } + scatterwalk_map_and_copy(hmac_buf, sm3->opad, + 0, SM3_BLOCK_SIZE, 0); + memcpy(hmac_buf + SM3_BLOCK_SIZE, ctx.address, + SM3_DIGEST_SIZE); + sg_init_one(&sg, hmac_buf, SM3_BLOCK_SIZE + SM3_DIGEST_SIZE); + + memset(&hmac_cmd, 0, sizeof(hmac_cmd)); + hmac_cmd.engine = CCP_ENGINE_SM3; + hmac_cmd.u.sm3.type = sm3->type; + hmac_cmd.u.sm3.ctx = sm3->ctx; + hmac_cmd.u.sm3.ctx_len = sm3->ctx_len; + hmac_cmd.u.sm3.src = &sg; + hmac_cmd.u.sm3.src_len = SM3_BLOCK_SIZE + SM3_DIGEST_SIZE; + hmac_cmd.u.sm3.opad = NULL; + hmac_cmd.u.sm3.opad_len = 0; + hmac_cmd.u.sm3.first = 1; + hmac_cmd.u.sm3.final = 1; + hmac_cmd.u.sm3.msg_bits = + (SM3_BLOCK_SIZE + SM3_DIGEST_SIZE) << 3; + + ret = ccp_run_sm3_cmd(cmd_q, &hmac_cmd); + if (ret) + cmd->engine_error = hmac_cmd.engine_error; + + kfree(hmac_buf); + } else { + ccp_get_dm_area(&ctx, 0, sm3->ctx, 0, SM3_DIGEST_SIZE); + } + +e_data: + ccp_free_data(&src, cmd_q); + +e_ctx: + ccp_dm_free(&ctx); + + return ret; +} + int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { int ret; @@ -2601,6 +2755,9 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) case CCP_ENGINE_SM2: ret = ccp_run_sm2_cmd(cmd_q, cmd); break; + case CCP_ENGINE_SM3: + ret = ccp_run_sm3_cmd(cmd_q, cmd); + break; default: ret = -EINVAL; } diff --git a/include/linux/ccp.h b/include/linux/ccp.h index bd947cb8d41f..cda875cf3c71 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h @@ -634,6 +634,47 @@ struct ccp_sm2_engine { u32 dst_len; }; +/***** SM3 engine *****/ +/** + * ccp_sm3_type - type of SM3 operation + * + * @CCP_SM3_TYPE_256: SM3 operation + */ +enum ccp_sm3_type { + CCP_SM3_TYPE_256 = 2, + CCP_SM3_TYPE__LAST, +}; + +/** + * struct ccp_sm3_engine - CCP SM3 operation + * @type: Type of SM3 operation + * @ctx: current hash value + * @ctx_len: length in bytes of hash value + * @src: data to be used for this operation + * @src_len: length in bytes of data used for this operation + * @opad: data to be used for final HMAC operation + * @opad_len: length in bytes of data used for final HMAC operation + * @first: indicates first SM3 operation + * @final: indicates final SM3 operation + * @msg_bits: total length of the message in bits used in final SM3 operation + */ +struct ccp_sm3_engine { + enum ccp_sm3_type type; + + struct scatterlist *ctx; + u32 ctx_len; + + struct scatterlist *src; + u64 src_len; + + struct scatterlist *opad; + u32 opad_len; + + u32 first; + u32 final; + u64 msg_bits; +}; + /** * ccp_engine - CCP operation identifiers * @@ -646,6 +687,7 @@ struct ccp_sm2_engine { * @CCP_ENGINE_ZLIB_DECOMPRESS: unused * @CCP_ENGINE_ECC: ECC operation * @CCP_ENGINE_SM2: SM2 operation + * @CCP_ENGINE_SM3: SM3 operation */ enum ccp_engine { CCP_ENGINE_AES = 0, @@ -657,6 +699,7 @@ enum ccp_engine { CCP_ENGINE_ZLIB_DECOMPRESS, CCP_ENGINE_ECC, CCP_ENGINE_SM2 = 8, /* fixed value */ + CCP_ENGINE_SM3, CCP_ENGINE__LAST, }; @@ -706,6 +749,7 @@ struct ccp_cmd { struct ccp_passthru_nomap_engine passthru_nomap; struct ccp_ecc_engine ecc; struct ccp_sm2_engine sm2; + struct ccp_sm3_engine sm3; } u; /* Completion callback support */ -- Gitee From 8f761da7b07c16a183db2c5ae7b75fb8d2489ac6 Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Sat, 7 May 2022 18:25:26 +0800 Subject: [PATCH 0422/2138] anolis: newfeature: crypto: ccp: Support SM4 algorithm for hygon ccp. ANBZ: #8582 In order to add SM4 driver for hygon ccp, relating to sm4 mode of ecb/ecb_hs, cbc/cbc_hs, cfb, ofb and ctr Signed-off-by: Yabin Li Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2924 --- drivers/crypto/ccp/Makefile | 3 +- drivers/crypto/ccp/ccp-crypto-main.c | 4 + drivers/crypto/ccp/ccp-crypto-sm4-hygon.c | 324 ++++++++++++++++++++++ drivers/crypto/ccp/ccp-crypto.h | 17 ++ drivers/crypto/ccp/ccp-dev-v5.c | 127 ++++++++- drivers/crypto/ccp/ccp-dev.h | 18 ++ drivers/crypto/ccp/ccp-ops.c | 230 +++++++++++++++ include/linux/ccp.h | 114 ++++++++ 8 files changed, 835 insertions(+), 2 deletions(-) create mode 100644 drivers/crypto/ccp/ccp-crypto-sm4-hygon.c diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 32d93a9b1a6f..79a764bb11e7 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -26,4 +26,5 @@ ccp-crypto-objs := ccp-crypto-main.o \ ccp-crypto-sha.o ccp-crypto-$(CONFIG_HYGON_GM) += ccp-crypto-sm2-hygon.o \ - ccp-crypto-sm3-hygon.o + ccp-crypto-sm3-hygon.o \ + ccp-crypto-sm4-hygon.o diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index a2444759687d..3d22fbabc815 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c @@ -336,6 +336,10 @@ static int ccp_register_algs(void) if (ret) return ret; + ret = ccp_register_sm4_hygon_algs(&skcipher_algs); + if (ret) + return ret; + /* Return on hygon platform */ return 0; } diff --git a/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c new file mode 100644 index 000000000000..0d1c750ff118 --- /dev/null +++ b/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c @@ -0,0 +1,324 @@ +/* + * Hygon Cryptographic Coprocessor (CCP) SM4 crypto API support + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "ccp-crypto.h" + +enum ccp_sm4_alg_mode { + CCP_SM4_ALG_MODE_ECB = CCP_SM4_MODE_ECB, + CCP_SM4_ALG_MODE_CBC = CCP_SM4_MODE_CBC, + CCP_SM4_ALG_MODE_OFB = CCP_SM4_MODE_OFB, + CCP_SM4_ALG_MODE_CFB = CCP_SM4_MODE_CFB, + CCP_SM4_ALG_MODE_CTR = CCP_SM4_MODE_CTR, + CCP_SM4_ALG_MODE_ECB_HS = CCP_SM4_MODE_HS_SEL | CCP_SM4_MODE_ECB, + CCP_SM4_ALG_MODE_CBC_HS = CCP_SM4_MODE_HS_SEL | CCP_SM4_MODE_CBC, + CCP_SM4_ALG_MODE__LAST, +}; + +static int ccp_sm4_complete(struct crypto_async_request *async_req, int ret) +{ + struct skcipher_request *req = skcipher_request_cast(async_req); + struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct ccp_sm4_req_ctx *rctx = skcipher_request_ctx(req); + + if (ret) + return ret; + + if ((ctx->u.sm4.mode & CCP_SM4_MODE_MASK) != CCP_SM4_ALG_MODE_ECB) { + memcpy(req->iv, rctx->iv, SM4_BLOCK_SIZE); + memset(rctx->iv, 0, SM4_BLOCK_SIZE); + } + + return 0; +} + +static int ccp_sm4_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int key_len) +{ + struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); + + /* key_len is checked by crypto_ablkcipher_type, + * but key isn't checked + */ + if (!key) + return -EINVAL; + + memcpy(ctx->u.sm4.key, key, SM4_KEY_SIZE); + sg_init_one(&ctx->u.sm4.key_sg, ctx->u.sm4.key, SM4_KEY_SIZE); + + ctx->u.sm4.key_len = SM4_KEY_SIZE; + + return 0; +} + +static int ccp_sm4_crypt(struct skcipher_request *req, bool encrypt) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); + struct ccp_sm4_req_ctx *rctx = skcipher_request_ctx(req); + struct scatterlist *iv_sg = NULL; + struct ccp_cmd *cmd = NULL; + enum ccp_sm4_alg_mode mode; + enum ccp_sm4_action action; + int ret; + + if (!ctx->u.sm4.key_len) + return -ENOKEY; + + mode = ctx->u.sm4.mode; + if ((mode != CCP_SM4_ALG_MODE_CTR) && + (mode != CCP_SM4_ALG_MODE_OFB) && + (mode != CCP_SM4_ALG_MODE_CFB) && + (req->cryptlen & (SM4_BLOCK_SIZE - 1))) + return -EINVAL; + + if ((mode & CCP_SM4_MODE_MASK) != CCP_SM4_ALG_MODE_ECB) { + if (!req->iv) + return -EINVAL; + + memcpy(rctx->iv, req->iv, SM4_BLOCK_SIZE); + iv_sg = &rctx->iv_sg; + sg_init_one(iv_sg, rctx->iv, SM4_BLOCK_SIZE); + } + + cmd = &rctx->cmd; + memset(cmd, 0, sizeof(*cmd)); + INIT_LIST_HEAD(&cmd->entry); + action = encrypt ? CCP_SM4_ACTION_ENCRYPT : CCP_SM4_ACTION_DECRYPT; + if (mode == CCP_SM4_ALG_MODE_CTR) { + cmd->engine = CCP_ENGINE_SM4_CTR; + cmd->u.sm4_ctr.action = action; + cmd->u.sm4_ctr.size = 63; + cmd->u.sm4_ctr.step = 1; + + cmd->u.sm4_ctr.key = &ctx->u.sm4.key_sg; + cmd->u.sm4_ctr.key_len = SM4_KEY_SIZE; + cmd->u.sm4_ctr.iv = iv_sg; + cmd->u.sm4_ctr.iv_len = SM4_BLOCK_SIZE; + + cmd->u.sm4_ctr.src = req->src; + cmd->u.sm4_ctr.dst = req->dst; + cmd->u.sm4_ctr.src_len = req->cryptlen; + + } else { + cmd->engine = CCP_ENGINE_SM4; + cmd->u.sm4.mode = mode & CCP_SM4_MODE_MASK; + cmd->u.sm4.action = action; + if (mode & CCP_SM4_MODE_HS_SEL) + cmd->u.sm4.select = 1; + + cmd->u.sm4.key = &ctx->u.sm4.key_sg; + cmd->u.sm4.key_len = SM4_KEY_SIZE; + cmd->u.sm4.iv = iv_sg; + cmd->u.sm4.iv_len = iv_sg ? SM4_BLOCK_SIZE : 0; + + cmd->u.sm4.src = req->src; + cmd->u.sm4.dst = req->dst; + cmd->u.sm4.src_len = req->cryptlen; + } + + ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); + + return ret; +} + +static int ccp_sm4_encrypt(struct skcipher_request *req) +{ + return ccp_sm4_crypt(req, true); +} + +static int ccp_sm4_decrypt(struct skcipher_request *req) +{ + return ccp_sm4_crypt(req, false); +} + +static int ccp_sm4_init_tfm(struct crypto_skcipher *tfm) +{ + struct ccp_crypto_skcipher_alg *alg = ccp_crypto_skcipher_alg(tfm); + struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); + + ctx->complete = ccp_sm4_complete; + ctx->u.sm4.mode = alg->mode; + + crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_sm4_req_ctx)); + + return 0; +} + +static const struct skcipher_alg ccp_sm4_defaults = { + .setkey = ccp_sm4_setkey, + .encrypt = ccp_sm4_encrypt, + .decrypt = ccp_sm4_decrypt, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .init = ccp_sm4_init_tfm, + + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = SM4_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct ccp_ctx), + .base.cra_priority = CCP_CRA_PRIORITY, + .base.cra_module = THIS_MODULE, +}; + +struct ccp_sm4_def { + enum ccp_sm4_alg_mode mode; + unsigned int version; + const char *name; + const char *driver_name; + unsigned int blocksize; + unsigned int ivsize; + const struct skcipher_alg *alg_defaults; +}; + +static struct ccp_sm4_def sm4_algs[] = { + { + .mode = CCP_SM4_ALG_MODE_ECB, + .version = CCP_VERSION(5, 0), + .name = "ecb(sm4)", + .driver_name = "ecb-sm4-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = 0, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_ECB_HS, + .version = CCP_VERSION(5, 0), + .name = "ecb(sm4)", + .driver_name = "ecb-sm4-hs-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = 0, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_ECB_HS, + .version = CCP_VERSION(5, 0), + .name = "ecb(sm4)", + .driver_name = "ecb-sm4-hs-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = 0, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_CBC, + .version = CCP_VERSION(5, 0), + .name = "cbc(sm4)", + .driver_name = "cbc-sm4-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_CBC_HS, + .version = CCP_VERSION(5, 0), + .name = "cbc(sm4)", + .driver_name = "cbc-sm4-hs-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_CBC_HS, + .version = CCP_VERSION(5, 0), + .name = "cbc(sm4)", + .driver_name = "cbc-sm4-hs-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_OFB, + .version = CCP_VERSION(5, 0), + .name = "ofb(sm4)", + .driver_name = "ofb-sm4-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_CFB, + .version = CCP_VERSION(5, 0), + .name = "cfb(sm4)", + .driver_name = "cfb-sm4-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_CTR, + .version = CCP_VERSION(5, 0), + .name = "ctr(sm4)", + .driver_name = "ctr-sm4-ccp", + .blocksize = 1, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, +}; + +static int ccp_register_sm4_hygon_alg(struct list_head *head, + const struct ccp_sm4_def *def) +{ + struct ccp_crypto_skcipher_alg *ccp_alg; + struct skcipher_alg *alg; + int ret; + + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); + if (!ccp_alg) + return -ENOMEM; + + INIT_LIST_HEAD(&ccp_alg->entry); + + ccp_alg->mode = def->mode; + + /* copy the defaults and override as necessary */ + alg = &ccp_alg->alg; + *alg = *def->alg_defaults; + snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); + snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + def->driver_name); + alg->base.cra_blocksize = def->blocksize; + alg->ivsize = def->ivsize; + + ret = crypto_register_skcipher(alg); + if (ret) { + pr_err("%s skcipher algorithm registration error (%d)\n", + alg->base.cra_name, ret); + kfree(ccp_alg); + return ret; + } + + list_add(&ccp_alg->entry, head); + + return 0; +} + +int ccp_register_sm4_hygon_algs(struct list_head *head) +{ + int i, ret; + unsigned int ccpversion = ccp_version(); + + for (i = 0; i < ARRAY_SIZE(sm4_algs); i++) { + if (sm4_algs[i].version > ccpversion) + continue; + ret = ccp_register_sm4_hygon_alg(head, &sm4_algs[i]); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h index 33e54fcbca53..58b2950f9100 100644 --- a/drivers/crypto/ccp/ccp-crypto.h +++ b/drivers/crypto/ccp/ccp-crypto.h @@ -342,6 +342,21 @@ struct ccp_sm3_exp_ctx { u8 buf[SM3_BLOCK_SIZE]; }; +/***** SM4 related defines *****/ +struct ccp_sm4_ctx { + struct scatterlist key_sg; + u8 key[SM4_KEY_SIZE]; + u32 key_len; + u32 mode; +}; + +struct ccp_sm4_req_ctx { + struct scatterlist iv_sg; + u8 iv[SM4_BLOCK_SIZE]; + + struct ccp_cmd cmd; +}; + /***** Common Context Structure *****/ struct ccp_ctx { int (*complete)(struct crypto_async_request *req, int ret); @@ -353,6 +368,7 @@ struct ccp_ctx { struct ccp_des3_ctx des3; struct ccp_sm2_ctx sm2; struct ccp_sm3_ctx sm3; + struct ccp_sm4_ctx sm4; } u; }; @@ -370,5 +386,6 @@ int ccp_register_des3_algs(struct list_head *head); int ccp_register_rsa_algs(struct list_head *head); int ccp_register_sm2_hygon_algs(struct list_head *head); int ccp_register_sm3_hygon_algs(struct list_head *head); +int ccp_register_sm4_hygon_algs(struct list_head *head); #endif diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 7038be74bbb6..08c8d72aaf79 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -141,6 +141,18 @@ union ccp_function { u16 type:4; u16 rsvd2:1; } sm3; + struct { + u16 rsvd:7; + u16 encrypt:1; + u16 mode:4; + u16 select:1; + u16 rsvd2:2; + } sm4; + struct { + u16 size:7; + u16 encrypt:1; + u16 step:7; + } sm4_ctr; u16 raw; }; @@ -164,6 +176,12 @@ union ccp_function { #define CCP_SM2_RAND(p) ((p)->sm2.rand) #define CCP_SM2_MODE(p) ((p)->sm2.mode) #define CCP_SM3_TYPE(p) ((p)->sm3.type) +#define CCP_SM4_ENCRYPT(p) ((p)->sm4.encrypt) +#define CCP_SM4_MODE(p) ((p)->sm4.mode) +#define CCP_SM4_SELECT(p) ((p)->sm4.select) +#define CCP_SM4_CTR_ENCRYPT(p) ((p)->sm4_ctr.encrypt) +#define CCP_SM4_CTR_STEP(p) ((p)->sm4_ctr.step) +#define CCP_SM4_CTR_SIZE(p) ((p)->sm4_ctr.size) /* Word 0 */ #define CCP5_CMD_DW0(p) ((p)->dw0) @@ -672,6 +690,90 @@ static int ccp5_perform_sm3(struct ccp_op *op) return ccp5_do_cmd(&desc, op->cmd_q); } +static int ccp5_perform_sm4(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + u32 key_addr = op->sb_ctx * LSB_ITEM_SIZE + SM4_BLOCK_SIZE; + + op->cmd_q->total_sm4_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM4; + + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = op->ioc; + CCP5_CMD_INIT(&desc) = op->init; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM4_ENCRYPT(&function) = op->u.sm4.action; + CCP_SM4_MODE(&function) = op->u.sm4.mode; + CCP_SM4_SELECT(&function) = op->u.sm4.select; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; + + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); + CCP5_CMD_KEY_HI(&desc) = 0; + CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; + + return ccp5_do_cmd(&desc, op->cmd_q); +} + +static int ccp5_perform_sm4_ctr(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + u32 key_addr = op->sb_ctx * LSB_ITEM_SIZE + SM4_BLOCK_SIZE; + + op->cmd_q->total_sm4_ctr_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM4_CTR; + + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = op->ioc; + CCP5_CMD_INIT(&desc) = op->init; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM4_CTR_SIZE(&function) = op->u.sm4_ctr.size; + CCP_SM4_CTR_ENCRYPT(&function) = op->u.sm4_ctr.action; + CCP_SM4_CTR_STEP(&function) = op->u.sm4_ctr.step; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; + + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); + CCP5_CMD_KEY_HI(&desc) = 0; + CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; + + return ccp5_do_cmd(&desc, op->cmd_q); +} + static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) { int q_mask = 1 << cmd_q->id; @@ -1150,6 +1252,26 @@ static void ccp5_destroy(struct ccp_device *ccp) } } +static int ccp5_get_trng_mask_param(void) +{ + /* According to spec description for SM4 high secure module, + * which need 64 bytes data, so the initialize times of writing + * mask register must be 16 or a multiple of 16. + * + * The AES algorithem need 48 bytes, so the initialize times will + * be 12 or a multiple of 12. + */ + +#ifdef CONFIG_HYGON_GM + /* for sm4 HS */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + return 16; +#endif + + /* for AES HS */ + return 12; +} + static void ccp5_config(struct ccp_device *ccp) { /* Public side */ @@ -1160,12 +1282,13 @@ static void ccp5other_config(struct ccp_device *ccp) { int i; u32 rnd; + int len = ccp5_get_trng_mask_param(); /* We own all of the queues on the NTB CCP */ iowrite32(0x00012D57, ccp->io_regs + CMD5_TRNG_CTL_OFFSET); iowrite32(0x00000003, ccp->io_regs + CMD5_CONFIG_0_OFFSET); - for (i = 0; i < 12; i++) { + for (i = 0; i < len; i++) { rnd = ioread32(ccp->io_regs + TRNG_OUT_REG); iowrite32(rnd, ccp->io_regs + CMD5_AES_MASK_OFFSET); } @@ -1193,6 +1316,8 @@ static const struct ccp_actions ccp5_actions = { .ecc = ccp5_perform_ecc, .sm2 = ccp5_perform_sm2, .sm3 = ccp5_perform_sm3, + .sm4 = ccp5_perform_sm4, + .sm4_ctr = ccp5_perform_sm4_ctr, .sballoc = ccp_lsb_alloc, .sbfree = ccp_lsb_free, .init = ccp5_init, diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 2d6c4c404539..92b859dae7c6 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -336,6 +336,8 @@ struct ccp_cmd_queue { unsigned long total_ecc_ops; unsigned long total_sm2_ops; unsigned long total_sm3_ops; + unsigned long total_sm4_ops; + unsigned long total_sm4_ctr_ops; } ____cacheline_aligned; struct ccp_device { @@ -540,6 +542,18 @@ struct ccp_sm3_op { u64 msg_bits; }; +struct ccp_sm4_op { + enum ccp_sm4_action action; + enum ccp_sm4_mode mode; + u32 select; +}; + +struct ccp_sm4_ctr_op { + u32 size; + enum ccp_sm4_action action; + u32 step; +}; + struct ccp_op { struct ccp_cmd_queue *cmd_q; @@ -565,6 +579,8 @@ struct ccp_op { struct ccp_ecc_op ecc; struct ccp_sm2_op sm2; struct ccp_sm3_op sm3; + struct ccp_sm4_op sm4; + struct ccp_sm4_ctr_op sm4_ctr; } u; }; @@ -675,6 +691,8 @@ struct ccp_actions { int (*ecc)(struct ccp_op *); int (*sm2)(struct ccp_op *op); int (*sm3)(struct ccp_op *op); + int (*sm4)(struct ccp_op *op); + int (*sm4_ctr)(struct ccp_op *op); u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int); void (*sbfree)(struct ccp_cmd_queue *, unsigned int, unsigned int); unsigned int (*get_free_slots)(struct ccp_cmd_queue *); diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 79aa8ff654dd..7495e233446f 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -2708,6 +2708,230 @@ static int ccp_run_sm3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) return ret; } +static int ccp_run_sm4_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + struct ccp_sm4_engine *sm4 = &cmd->u.sm4; + struct ccp_dm_workarea iv_key; + struct ccp_data src, dst; + struct ccp_op op; + bool in_place = false; + int ret; + + if (sm4->src == NULL || sm4->dst == NULL) + return -EINVAL; + + if (sm4->key == NULL || sm4->key_len != SM4_KEY_SIZE) + return -EINVAL; + + if (sg_nents_for_len(sm4->key, SM4_KEY_SIZE) < 0) + return -EINVAL; + + if (sm4->mode != CCP_SM4_MODE_ECB) { + if (sm4->iv == NULL || sm4->iv_len != SM4_BLOCK_SIZE) + return -EINVAL; + + if (sg_nents_for_len(sm4->iv, SM4_BLOCK_SIZE) < 0) + return -EINVAL; + } + + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); + op.ioc = 1; + op.sb_ctx = cmd_q->sb_ctx; + op.u.sm4.action = sm4->action; + op.u.sm4.mode = sm4->mode; + op.u.sm4.select = sm4->select; + + /* Prepare the input and output data workareas. For in-place + * operations we need to set the dma direction to BIDIRECTIONAL + * and copy the src workarea to the dst workarea. + */ + if (sg_virt(sm4->src) == sg_virt(sm4->dst)) + in_place = true; + + ret = ccp_init_data(&src, cmd_q, sm4->src, sm4->src_len, + SM4_BLOCK_SIZE, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); + if (ret) + return ret; + + if (in_place) { + dst = src; + } else { + ret = ccp_init_data(&dst, cmd_q, sm4->dst, sm4->src_len, + SM4_BLOCK_SIZE, DMA_FROM_DEVICE); + if (ret) + goto e_src; + } + + /* load iv and key */ + ret = ccp_init_dm_workarea(&iv_key, cmd_q, + SM4_BLOCK_SIZE + SM4_KEY_SIZE, DMA_BIDIRECTIONAL); + if (ret) + goto e_dst; + + if (sm4->mode != CCP_SM4_MODE_ECB) + ccp_set_dm_area(&iv_key, 0, sm4->iv, 0, SM4_BLOCK_SIZE); + + ccp_set_dm_area(&iv_key, SM4_BLOCK_SIZE, sm4->key, 0, SM4_KEY_SIZE); + + ret = ccp_copy_to_sb(cmd_q, &iv_key, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + /* send data to the CCP SM4 engine */ + while (src.sg_wa.bytes_left) { + ccp_prepare_data(&src, &dst, &op, SM4_BLOCK_SIZE, true); + if (!src.sg_wa.bytes_left) + op.eom = 1; + + ret = cmd_q->ccp->vdata->perform->sm4(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + ccp_process_data(&src, &dst, &op); + } + + if (sm4->mode != CCP_SM4_MODE_ECB) { + /* retrieve the SM4 iv */ + ret = ccp_copy_from_sb(cmd_q, &iv_key, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + ccp_get_dm_area(&iv_key, 0, sm4->iv, 0, SM4_BLOCK_SIZE); + } + +e_iv_key: + memset(iv_key.address, 0, SM4_BLOCK_SIZE + SM4_KEY_SIZE); + ccp_dm_free(&iv_key); + +e_dst: + if (!in_place) + ccp_free_data(&dst, cmd_q); + +e_src: + ccp_free_data(&src, cmd_q); + + return ret; +} + +static int ccp_run_sm4_ctr_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + struct ccp_sm4_ctr_engine *sm4_ctr = &cmd->u.sm4_ctr; + struct ccp_dm_workarea iv_key; + struct ccp_data src, dst; + struct ccp_op op; + bool in_place = false; + int ret; + + if (sm4_ctr->src == NULL || sm4_ctr->dst == NULL) + return -EINVAL; + + if (sm4_ctr->key == NULL || sm4_ctr->key_len != SM4_KEY_SIZE) + return -EINVAL; + + if (sg_nents_for_len(sm4_ctr->key, SM4_KEY_SIZE) < 0) + return -EINVAL; + + if (sm4_ctr->iv == NULL || sm4_ctr->iv_len != SM4_BLOCK_SIZE) + return -EINVAL; + + if (sg_nents_for_len(sm4_ctr->iv, SM4_BLOCK_SIZE) < 0) + return -EINVAL; + + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); + op.ioc = 1; + op.sb_ctx = cmd_q->sb_ctx; + op.u.sm4_ctr.size = sm4_ctr->size; + op.u.sm4_ctr.action = sm4_ctr->action; + op.u.sm4_ctr.step = sm4_ctr->step; + + /* Prepare the input and output data workareas. For in-place + * operations we need to set the dma direction to BIDIRECTIONAL + * and copy the src workarea to the dst workarea. + */ + if (sg_virt(sm4_ctr->src) == sg_virt(sm4_ctr->dst)) + in_place = true; + + ret = ccp_init_data(&src, cmd_q, sm4_ctr->src, sm4_ctr->src_len, + SM4_BLOCK_SIZE, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); + if (ret) + return ret; + + if (in_place) { + dst = src; + } else { + ret = ccp_init_data(&dst, cmd_q, sm4_ctr->dst, + sm4_ctr->src_len, SM4_BLOCK_SIZE, DMA_FROM_DEVICE); + if (ret) + goto e_src; + } + + /* load iv and key */ + ret = ccp_init_dm_workarea(&iv_key, cmd_q, + SM4_BLOCK_SIZE + SM4_KEY_SIZE, DMA_BIDIRECTIONAL); + if (ret) + goto e_dst; + + ccp_set_dm_area(&iv_key, 0, sm4_ctr->iv, 0, SM4_BLOCK_SIZE); + ccp_set_dm_area(&iv_key, SM4_BLOCK_SIZE, sm4_ctr->key, 0, SM4_KEY_SIZE); + + ret = ccp_copy_to_sb(cmd_q, &iv_key, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + /* send data to the CCP SM4_CTR engine */ + while (src.sg_wa.bytes_left) { + ccp_prepare_data(&src, &dst, &op, SM4_BLOCK_SIZE, false); + if (!src.sg_wa.bytes_left) + op.eom = 1; + + ret = cmd_q->ccp->vdata->perform->sm4_ctr(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + ccp_process_data(&src, &dst, &op); + } + + /* retrieve the SM4_CTR iv */ + ret = ccp_copy_from_sb(cmd_q, &iv_key, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + ccp_get_dm_area(&iv_key, 0, sm4_ctr->iv, 0, SM4_BLOCK_SIZE); + +e_iv_key: + memset(iv_key.address, 0, SM4_BLOCK_SIZE + SM4_KEY_SIZE); + ccp_dm_free(&iv_key); + +e_dst: + if (!in_place) + ccp_free_data(&dst, cmd_q); + +e_src: + ccp_free_data(&src, cmd_q); + + return ret; +} + int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { int ret; @@ -2758,6 +2982,12 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) case CCP_ENGINE_SM3: ret = ccp_run_sm3_cmd(cmd_q, cmd); break; + case CCP_ENGINE_SM4: + ret = ccp_run_sm4_cmd(cmd_q, cmd); + break; + case CCP_ENGINE_SM4_CTR: + ret = ccp_run_sm4_ctr_cmd(cmd_q, cmd); + break; default: ret = -EINVAL; } diff --git a/include/linux/ccp.h b/include/linux/ccp.h index cda875cf3c71..8e34f05bc6b1 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h @@ -675,6 +675,116 @@ struct ccp_sm3_engine { u64 msg_bits; }; +/***** SM4 engine *****/ +#define SM4_BLOCK_SIZE 16 +#define SM4_KEY_SIZE 16 +#define CCP_SM4_MODE_MASK 0x0F +#define CCP_SM4_MODE_HS_SEL 0x10 + +/** + * ccp_sm4_mode - SM4 operation mode + * + * @CCP_SM4_MODE_ECB: ECB mode + * @CCP_SM4_MODE_CBC: CBC mode + * @CCP_SM4_MODE_OFB: OFB mode + * @CCP_SM4_MODE_CFB: CFB mode + * @CCP_SM4_MODE_CTR: CTR mode + */ +enum ccp_sm4_mode { + CCP_SM4_MODE_ECB = 0, + CCP_SM4_MODE_CBC, + CCP_SM4_MODE_OFB, + CCP_SM4_MODE_CFB, + CCP_SM4_MODE_CTR, + CCP_SM4_MODE__LAST, +}; + +/** + * ccp_sm4_action - SM4 operation + * + * @CCP_SM4_ACTION_DECRYPT: SM4 decrypt operation + * @CCP_SM4_ACTION_ENCRYPT: SM4 encrypt operation + */ +enum ccp_sm4_action { + CCP_SM4_ACTION_DECRYPT = 0, + CCP_SM4_ACTION_ENCRYPT, + CCP_SM4_ACTION__LAST, +}; + +/** + * struct ccp_sm4_engine - CCP SM4 operation + * @mode: SM4 operation mode + * @action: SM4 operation (decrypt/encrypt) + * @select: Indicating that high-secure engine is selected + * @key: key to be used for this SM4 operation + * @key_len: length in bytes of key + * @iv: IV to be used for this SM4 operation + * @iv_len: length in bytes of iv + * @src: data to be used for this operation + * @dst: data produced by this operation + * @src_len: length in bytes of data used for this operation + * + * Variables required to be set when calling ccp_enqueue_cmd(): + * - mode, action, select, key, key_len, src, dst, src_len + * - iv, iv_len for any mode other than ECB + * - key_len and iv_len must be 16B + * - src_len must be multiple of 16B + * - high-secure engine only for ECB and CBC mode + * + * The iv variable is used as both input and output. On completion of the + * SM4 operation the new IV overwrites the old IV. + */ +struct ccp_sm4_engine { + enum ccp_sm4_mode mode; + enum ccp_sm4_action action; + u32 select; /* Indicating that high-secure engine is selected */ + + struct scatterlist *key; + u32 key_len; /* In bytes */ + + struct scatterlist *iv; + u32 iv_len; /* In bytes */ + + struct scatterlist *src, *dst; + u64 src_len; /* In bytes */ +}; + +/***** SM4_CTR engine *****/ +/** + * struct ccp_sm4_ctr_engine - CCP SM4_CTR operation + * @action: SM4_CTR operation (decrypt/encrypt) + * @size: counter bit size + * @step: counter increase step + * @key: key to be used for this SM4 operation + * @key_len: length in bytes of key + * @iv: IV to be used for this SM4 operation + * @iv_len: length in bytes of iv + * @src: data to be used for this operation + * @dst: data produced by this operation + * @src_len: length in bytes of data used for this operation + * + * Variables required to be set when calling ccp_enqueue_cmd(): + * - action, size, step, key, key_len, iv, iv_len, src, dst, src_len + * - key_len and iv_len must be 16B + * + * The iv variable is used as both input and output. On completion of the + * SM4_CTR operation the new IV overwrites the old IV. + */ +struct ccp_sm4_ctr_engine { + enum ccp_sm4_action action; + u32 size; + u32 step; + + struct scatterlist *key; + u32 key_len; /* In bytes */ + + struct scatterlist *iv; + u32 iv_len; /* In bytes */ + + struct scatterlist *src, *dst; + u64 src_len; /* In bytes */ +}; + /** * ccp_engine - CCP operation identifiers * @@ -700,6 +810,8 @@ enum ccp_engine { CCP_ENGINE_ECC, CCP_ENGINE_SM2 = 8, /* fixed value */ CCP_ENGINE_SM3, + CCP_ENGINE_SM4, + CCP_ENGINE_SM4_CTR, CCP_ENGINE__LAST, }; @@ -750,6 +862,8 @@ struct ccp_cmd { struct ccp_ecc_engine ecc; struct ccp_sm2_engine sm2; struct ccp_sm3_engine sm3; + struct ccp_sm4_engine sm4; + struct ccp_sm4_ctr_engine sm4_ctr; } u; /* Completion callback support */ -- Gitee From 29b003f0cd4c639478eb834dfe20c1a8165ffcc1 Mon Sep 17 00:00:00 2001 From: yangdepei Date: Tue, 19 Mar 2024 20:13:22 +0800 Subject: [PATCH 0423/2138] anolis: bugfix: crypto: ccp: fix sm2 not return due to wrong complete callback parameter ANBZ: #8582 the complete callback 'crypto_req_done' has changed its input parameter, we need update input in ccp-crypto implement. Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2924 --- drivers/crypto/ccp/ccp-crypto-sm2-hygon.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c index fbf1c5e85fce..5d39842de3fd 100644 --- a/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c +++ b/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c @@ -645,7 +645,7 @@ static void ccp_sm2_enc_compute(struct work_struct *work) crypto_free_shash(shash); e_complete: - req->base.complete(&req->base, ret); + req->base.complete(req->base.data, ret); } static void ccp_sm2_enc_lp(struct work_struct *work) @@ -671,7 +671,7 @@ static void ccp_sm2_enc_lp(struct work_struct *work) ret = ccp_sm2_post_cmd(rctx, CCP_SM2_LP_SRC_SIZE, CCP_SM2_MODE_LP, 0); if (ret != -EBUSY && ret != -EINPROGRESS) - req->base.complete(&req->base, ret); + req->base.complete(req->base.data, ret); } static int ccp_sm2_encrypt(struct akcipher_request *req) @@ -748,7 +748,7 @@ static void ccp_sm2_dec_compute(struct work_struct *work) /* clear private key, plain, and dC1 */ memset(rctx->src, 0, CCP_SM2_OPERAND_LEN * 2); memset(dst, 0, CCP_SM2_DST_SIZE); - req->base.complete(&req->base, ret); + req->base.complete(req->base.data, ret); } static int ccp_sm2_decrypt(struct akcipher_request *req) -- Gitee From ae5e39901547478c449bde1c4a82c2b9f88955ef Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Sat, 7 May 2022 20:02:26 +0800 Subject: [PATCH 0424/2138] anolis: bugfix: crypto: ccp: It prompt ILLEGAL_MEM_ADDR when using PSPCCP. ANBZ: #8582 ccp_find_lsb_regions check from vq_1 but status value start from vq_0. Signed-off-by: Yabin Li Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2924 --- drivers/crypto/ccp/ccp-dev-v5.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 08c8d72aaf79..2fc4f08698df 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -783,6 +783,7 @@ static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) /* Build a bit mask to know which LSBs this queue has access to. * Don't bother with segment 0 as it has special privileges. */ + status >>= LSB_REGION_WIDTH; for (j = 1; j < MAX_LSB_CNT; j++) { if (status & q_mask) bitmap_set(cmd_q->lsbmask, j, 1); -- Gitee From c5352a2a87ecf3e944f25961e3770e33d79f5c3c Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Sat, 7 May 2022 20:40:53 +0800 Subject: [PATCH 0425/2138] anolis: bugfix: crypto: ccp: Only handle interrupts by completion, eliminating by empty queue. ANBZ: #8582 fix the repetitive interrupt (INT_COMPLETION & INT_EMPTY_QUEUE) in one cmd process. Signed-off-by: Yabin Li Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2924 --- drivers/crypto/ccp/ccp-dev-v5.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 2fc4f08698df..2179da3c9483 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -935,7 +935,7 @@ static void ccp5_irq_bh(unsigned long data) status = ioread32(cmd_q->reg_interrupt_status); - if (status) { + if (status & SUPPORTED_INTERRUPTS) { cmd_q->int_status = status; cmd_q->q_status = ioread32(cmd_q->reg_status); cmd_q->q_int_status = ioread32(cmd_q->reg_int_status); -- Gitee From 26bc218f84112dadd9a4a099bcc330ed36adea03 Mon Sep 17 00:00:00 2001 From: Xiangyu Xu Date: Mon, 22 Aug 2022 10:47:25 +0800 Subject: [PATCH 0426/2138] anolis: bugfix: crypto: ccp: Fix a problem that vq thread may stuck when do multi process test. ANBZ: #8582 Signed-off-by: Yabin Li Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2924 --- drivers/crypto/ccp/ccp-dev-v5.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 2179da3c9483..0a304b0ce99d 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -944,10 +944,9 @@ static void ccp5_irq_bh(unsigned long data) if ((status & INT_ERROR) && !cmd_q->cmd_error) cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); - cmd_q->int_rcvd = 1; - /* Acknowledge the interrupt and wake the kthread */ iowrite32(status, cmd_q->reg_interrupt_status); + cmd_q->int_rcvd = 1; wake_up_interruptible(&cmd_q->int_queue); } } -- Gitee From 730fbda20f0b9b2b572a3ecd475cd711affad8ea Mon Sep 17 00:00:00 2001 From: yangdepei Date: Fri, 17 Nov 2023 16:21:57 +0800 Subject: [PATCH 0427/2138] anolis: bugfix: fix sm2 test failed in testmgr because of missing DER coding ANBZ: #8582 Add DER coding support for ccp sm2 sign interface. Signed-off-by: liulanyi Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2924 --- drivers/crypto/ccp/Makefile | 6 +- drivers/crypto/ccp/ccp-crypto-sm2-hygon.c | 120 ++++++++++++++++++++-- drivers/crypto/ccp/ccp_sm2_sign.asn1 | 4 + 3 files changed, 121 insertions(+), 9 deletions(-) create mode 100644 drivers/crypto/ccp/ccp_sm2_sign.asn1 diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 79a764bb11e7..94c673805325 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -25,6 +25,10 @@ ccp-crypto-objs := ccp-crypto-main.o \ ccp-crypto-rsa.o \ ccp-crypto-sha.o +$(obj)/ccp_sm2_sign.asn1.o: $(obj)/ccp_sm2_sign.asn1.c $(obj)/ccp_sm2_sign.asn1.h +$(obj)/ccp-crypto-sm2-hygon.o: $(obj)/ccp_sm2_sign.asn1.h + ccp-crypto-$(CONFIG_HYGON_GM) += ccp-crypto-sm2-hygon.o \ ccp-crypto-sm3-hygon.o \ - ccp-crypto-sm4-hygon.o + ccp-crypto-sm4-hygon.o \ + ccp_sm2_sign.asn1.o diff --git a/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c index 5d39842de3fd..a83737f56d4e 100644 --- a/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c +++ b/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c @@ -18,6 +18,7 @@ #include #include "ccp-crypto.h" +#include "ccp_sm2_sign.asn1.h" static const u8 sm2_ecc_p[CCP_SM2_OPERAND_LEN] = { 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, @@ -100,6 +101,47 @@ struct ccp_sm2_dst { u8 result_t[CCP_SM2_OPERAND_LEN]; }; +struct sm2_signature_ctx { + const u8 *sig_r; + const u8 *sig_s; + size_t r_len; + size_t s_len; +}; + +int ccp_sm2_get_signature_r(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct sm2_signature_ctx *sig = context; + + if (!value || !vlen) + return -EINVAL; + + sig->sig_r = value; + sig->r_len = vlen; + + if (!sig->sig_r) + return -ENOMEM; + + return 0; +} + +int ccp_sm2_get_signature_s(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct sm2_signature_ctx *sig = context; + + if (!value || !vlen) + return -EINVAL; + + sig->sig_s = value; + sig->s_len = vlen; + + if (!sig->sig_s) + return -ENOMEM; + + return 0; +} + static bool ccp_sm2_is_zero(const u64 *data, u32 count) { u32 i; @@ -449,11 +491,21 @@ static int ccp_sm2_setpubkey(struct crypto_akcipher *tfm, struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); struct ccp_sm2_ctx *sm2 = &ctx->u.sm2; struct ccp_sm2_req_ctx *rctx = NULL; + const unsigned char *cflag = (const unsigned char *)key; int ret; - if (!key || keylen != CCP_SM2_PUBLIC_KEY_LEN) + if (!key || keylen < CCP_SM2_PUBLIC_KEY_LEN) return -EINVAL; + /* When the length of sm2 public key is 65, + * content of key should be 04 || X || Y, from GM/T0009-2012. + */ + if (keylen > CCP_SM2_PUBLIC_KEY_LEN) { + if (*cflag != 0x04) + return -EINVAL; + key = key + 1; + } + /* check whether public key is valid */ rctx = kmalloc(sizeof(*rctx), GFP_KERNEL); if (!rctx) @@ -830,21 +882,71 @@ static int ccp_sm2_verify(struct akcipher_request *req) struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); struct ccp_sm2_verify_src *src = (struct ccp_sm2_verify_src *)rctx->src; + int siglen; int nents; int ret; + struct sm2_signature_ctx sig; + unsigned char *buffer; if (!ctx->u.sm2.pub_key_len) return -ENOKEY; - if (req->src_len != CCP_SM2_OPERAND_LEN * 3) - return -EINVAL; + if (req->src_len == CCP_SM2_OPERAND_LEN * 3) { + /* Compatible with non-encoded signature from user space */ + nents = sg_nents_for_len(req->src, CCP_SM2_OPERAND_LEN * 3); + if (nents < 0) + return -EINVAL; - nents = sg_nents_for_len(req->src, CCP_SM2_OPERAND_LEN * 3); - if (nents < 0) - return -EINVAL; + scatterwalk_map_and_copy(src->operand_e, req->src, 0, + CCP_SM2_OPERAND_LEN * 3, 0); + memcpy(src->operand_px, ctx->u.sm2.pub_key, CCP_SM2_OPERAND_LEN); + memcpy(src->operand_py, ctx->u.sm2.pub_key + CCP_SM2_OPERAND_LEN, + CCP_SM2_OPERAND_LEN); + + rctx->req = req; + rctx->phase = CCP_SM2_VERIFY_PH_VERIFY; + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_VERIFY_SRC_SIZE, + CCP_SM2_MODE_VERIFY, 0); + + return ret; + } else if (req->src_len < CCP_SM2_OPERAND_LEN * 3) { + /* Compatible with usage like sm2 test of testmgr */ + siglen = req->src_len; + if (req->dst_len != CCP_SM2_OPERAND_LEN) + return -EINVAL; + } else { + /* deal with der encoding signature from user space */ + siglen = req->src_len - CCP_SM2_OPERAND_LEN; + } + + buffer = kmalloc(siglen + CCP_SM2_OPERAND_LEN, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + sg_pcopy_to_buffer(req->src, + sg_nents_for_len(req->src, siglen + CCP_SM2_OPERAND_LEN), + buffer, siglen + CCP_SM2_OPERAND_LEN, 0); + + sig.sig_r = NULL; + sig.sig_s = NULL; + ret = asn1_ber_decoder(&ccp_sm2_sign_decoder, &sig, + buffer, siglen); + + if (ret) + goto error; + + memcpy(src->operand_e, buffer + siglen, CCP_SM2_OPERAND_LEN); + + if (sig.r_len > CCP_SM2_OPERAND_LEN) + memcpy(src->operand_d, sig.sig_r + 1, CCP_SM2_OPERAND_LEN); + else + memcpy(src->operand_d, sig.sig_r, CCP_SM2_OPERAND_LEN); + + if (sig.s_len > CCP_SM2_OPERAND_LEN) + memcpy(src->operand_k, sig.sig_s + 1, CCP_SM2_OPERAND_LEN); + else + memcpy(src->operand_k, sig.sig_s, CCP_SM2_OPERAND_LEN); - scatterwalk_map_and_copy(src->operand_e, req->src, 0, - CCP_SM2_OPERAND_LEN * 3, 0); memcpy(src->operand_px, ctx->u.sm2.pub_key, CCP_SM2_OPERAND_LEN); memcpy(src->operand_py, ctx->u.sm2.pub_key + CCP_SM2_OPERAND_LEN, CCP_SM2_OPERAND_LEN); @@ -854,6 +956,8 @@ static int ccp_sm2_verify(struct akcipher_request *req) ret = ccp_sm2_post_cmd(rctx, CCP_SM2_VERIFY_SRC_SIZE, CCP_SM2_MODE_VERIFY, 0); +error: + kfree(buffer); return ret; } diff --git a/drivers/crypto/ccp/ccp_sm2_sign.asn1 b/drivers/crypto/ccp/ccp_sm2_sign.asn1 new file mode 100644 index 000000000000..7e83e6799cb4 --- /dev/null +++ b/drivers/crypto/ccp/ccp_sm2_sign.asn1 @@ -0,0 +1,4 @@ +Sm2Signature ::= SEQUENCE { + sig_r INTEGER ({ ccp_sm2_get_signature_r }), + sig_s INTEGER ({ ccp_sm2_get_signature_s }) +} -- Gitee From c63e7b0e8aec141fbd348d1f82c313179c6a0734 Mon Sep 17 00:00:00 2001 From: yangdepei Date: Mon, 18 Mar 2024 14:53:46 +0800 Subject: [PATCH 0428/2138] anolis: bugfix: crypto: ccp: fix bug that SM2 encryption of long data causes kernel crash ANBZ: #8582 Signed-off-by: liulanyi Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2924 --- drivers/crypto/ccp/ccp-crypto-sm2-hygon.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c index a83737f56d4e..25c9a49f7d22 100644 --- a/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c +++ b/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c @@ -670,8 +670,6 @@ static void ccp_sm2_enc_compute(struct work_struct *work) goto e_complete; } - scatterwalk_map_and_copy(rctx->src, req->src, 0, req->src_len, 0); - /* C2 = M ^ t */ ret = ccp_sm2_kdf_xor(shash, req->src, 0, req->src_len, req->dst, CCP_SM2_ENCRYPT_EXT_LEN, -- Gitee From 71e564cd6a0d007fe0ff140eefb4072365267e98 Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Sun, 8 May 2022 14:19:29 +0800 Subject: [PATCH 0429/2138] anolis: newfeature: crypto: ccp: Modify value of COMMANDS_PER_QUEUE from 16 to 8192. ANBZ: #8582 change command queue size to 8192 to support multipule cmd in hygon ccp Signed-off-by: Yabin Li Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2924 --- drivers/crypto/ccp/ccp-dev-v5.c | 32 ++++++++++++++++++++++++-------- drivers/crypto/ccp/ccp-dev.h | 11 +++++++---- 2 files changed, 31 insertions(+), 12 deletions(-) diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 0a304b0ce99d..62e07c9eb793 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -227,6 +227,17 @@ union ccp_function { #define CCP5_CMD_KEY_HI(p) ((p)->dw7.key_hi) #define CCP5_CMD_KEY_MEM(p) ((p)->dw7.key_mem) +static inline unsigned int command_per_queue(void) +{ +#ifdef CONFIG_HYGON_GM + return boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? + HYGON_COMMANDS_PER_QUEUE : + COMMANDS_PER_QUEUE; +#else + return COMMANDS_PER_QUEUE; +#endif +} + static inline u32 low_address(unsigned long addr) { return (u64)addr & 0x0ffffffff; @@ -240,15 +251,16 @@ static inline u32 high_address(unsigned long addr) static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q) { unsigned int head_idx, n; - u32 head_lo, queue_start; + u32 head_lo, queue_start, command_per_q; + command_per_q = command_per_queue(); queue_start = low_address(cmd_q->qdma_tail); head_lo = ioread32(cmd_q->reg_head_lo); head_idx = (head_lo - queue_start) / sizeof(struct ccp5_desc); - n = head_idx + COMMANDS_PER_QUEUE - cmd_q->qidx - 1; + n = head_idx + command_per_q - cmd_q->qidx - 1; - return n % COMMANDS_PER_QUEUE; /* Always one unused spot */ + return n % command_per_q; /* Always one unused spot */ } static int ccp5_do_cmd(struct ccp5_desc *desc, @@ -256,10 +268,11 @@ static int ccp5_do_cmd(struct ccp5_desc *desc, { __le32 *mP; u32 *dP; - u32 tail; + u32 tail, command_per_q; int i; int ret = 0; + command_per_q = command_per_queue(); cmd_q->total_ops++; if (CCP5_CMD_SOC(desc)) { @@ -273,7 +286,7 @@ static int ccp5_do_cmd(struct ccp5_desc *desc, for (i = 0; i < 8; i++) mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ - cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + cmd_q->qidx = (cmd_q->qidx + 1) % command_per_q; /* The data used by this command must be flushed to memory */ wmb(); @@ -974,7 +987,7 @@ static int ccp5_init(struct ccp_device *ccp) char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; unsigned int qmr, i; u64 status; - u32 status_lo, status_hi; + u32 status_lo, status_hi, command_per_q, queue_size_val; int ret; /* Find available queues */ @@ -991,6 +1004,9 @@ static int ccp5_init(struct ccp_device *ccp) return 1; } + command_per_q = command_per_queue(); + queue_size_val = QUEUE_SIZE_VAL(command_per_q); + for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) { if (!(qmr & (1 << i))) continue; @@ -1017,7 +1033,7 @@ static int ccp5_init(struct ccp_device *ccp) /* Page alignment satisfies our needs for N <= 128 */ BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); - cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); + cmd_q->qsize = Q_SIZE(command_per_q, Q_DESC_SIZE); cmd_q->qbase = dmam_alloc_coherent(dev, cmd_q->qsize, &cmd_q->qbase_dma, GFP_KERNEL); @@ -1104,7 +1120,7 @@ static int ccp5_init(struct ccp_device *ccp) cmd_q = &ccp->cmd_q[i]; cmd_q->qcontrol &= ~(CMD5_Q_SIZE << CMD5_Q_SHIFT); - cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD5_Q_SHIFT; + cmd_q->qcontrol |= queue_size_val << CMD5_Q_SHIFT; cmd_q->qdma_tail = cmd_q->qbase_dma; dma_addr_lo = low_address(cmd_q->qdma_tail); diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 92b859dae7c6..5dec502f3c5d 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -99,12 +99,15 @@ #define CMD5_Q_MEM_LOCATION 0x4 #define CMD5_Q_SIZE 0x1F #define CMD5_Q_SHIFT 3 + #define COMMANDS_PER_QUEUE 16 -#define QUEUE_SIZE_VAL ((ffs(COMMANDS_PER_QUEUE) - 2) & \ - CMD5_Q_SIZE) -#define Q_PTR_MASK (2 << (QUEUE_SIZE_VAL + 5) - 1) +#define HYGON_COMMANDS_PER_QUEUE 8192 + #define Q_DESC_SIZE sizeof(struct ccp5_desc) -#define Q_SIZE(n) (COMMANDS_PER_QUEUE*(n)) + +#define QUEUE_SIZE_VAL(c) ((ffs((c)) - 2) & CMD5_Q_SIZE) +#define Q_PTR_MASK(c) (2 << (QUEUE_SIZE_VAL((c)) + 5) - 1) +#define Q_SIZE(c, n) ((c)*(n)) #define INT_COMPLETION 0x1 #define INT_ERROR 0x2 -- Gitee From 8b5469ab0fd5ef7a7e1378f1e5b95485c62b8057 Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Sun, 8 May 2022 18:57:08 +0800 Subject: [PATCH 0430/2138] anolis: newfeature: crypto: ccp: Process multiple VQ commands once for SM3 ccp. ANBZ: #8582 optimize sm3 processing performance, the physical page of each sg list corresponds to a CCP cmd, all cmd prepared, then start ccp. Signed-off-by: Yabin Li Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2924 --- drivers/crypto/ccp/ccp-dev-v5.c | 73 ++++++++++++++++++++++++++++++++- drivers/crypto/ccp/ccp-dev.h | 1 + drivers/crypto/ccp/ccp-ops.c | 19 +++++++++ 3 files changed, 92 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 62e07c9eb793..b14d18162ebc 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -263,6 +263,76 @@ static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q) return n % command_per_q; /* Always one unused spot */ } +static int ccp5_do_multi_cmds(struct ccp5_desc *desc, + struct ccp_cmd_queue *cmd_q) +{ + u32 *mP; + __le32 *dP; + int i; + u32 command_per_q; + + command_per_q = command_per_queue(); + + cmd_q->total_ops++; + + if (CCP5_CMD_SOC(desc)) { + CCP5_CMD_IOC(desc) = 1; + CCP5_CMD_SOC(desc) = 0; + } + + mutex_lock(&cmd_q->q_mutex); + + mP = (u32 *) &cmd_q->qbase[cmd_q->qidx]; + dP = (__le32 *) desc; + for (i = 0; i < 8; i++) + mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ + + cmd_q->qidx = (cmd_q->qidx + 1) % command_per_q; + + mutex_unlock(&cmd_q->q_mutex); + + return 0; +} + +static int ccp5_do_run_cmd(struct ccp_op *op) +{ + struct ccp_cmd_queue *cmd_q = op->cmd_q; + u32 tail; + int ret = 0; + + mutex_lock(&cmd_q->q_mutex); + + /* The data used by this command must be flushed to memory */ + wmb(); + + /* Write the new tail address back to the queue register */ + tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE); + iowrite32(tail, cmd_q->reg_tail_lo); + + /* Turn the queue back on using our cached control register */ + iowrite32(cmd_q->qcontrol | CMD5_Q_RUN, cmd_q->reg_control); + mutex_unlock(&cmd_q->q_mutex); + + if (op->ioc) { + /* Wait for the job to complete */ + ret = wait_event_interruptible(cmd_q->int_queue, + cmd_q->int_rcvd); + if (ret || cmd_q->cmd_error) { + /* Log the error and flush the queue by + * moving the head pointer + */ + if (cmd_q->cmd_error) + ccp_log_error(cmd_q->ccp, cmd_q->cmd_error); + iowrite32(tail, cmd_q->reg_head_lo); + if (!ret) + ret = -EIO; + } + cmd_q->int_rcvd = 0; + } + + return ret; +} + static int ccp5_do_cmd(struct ccp5_desc *desc, struct ccp_cmd_queue *cmd_q) { @@ -700,7 +770,7 @@ static int ccp5_perform_sm3(struct ccp_op *op) CCP5_CMD_SM3_HI(&desc) = upper_32_bits(op->u.sm3.msg_bits); } - return ccp5_do_cmd(&desc, op->cmd_q); + return ccp5_do_multi_cmds(&desc, op->cmd_q); } static int ccp5_perform_sm4(struct ccp_op *op) @@ -1334,6 +1404,7 @@ static const struct ccp_actions ccp5_actions = { .sm3 = ccp5_perform_sm3, .sm4 = ccp5_perform_sm4, .sm4_ctr = ccp5_perform_sm4_ctr, + .run_cmd = ccp5_do_run_cmd, .sballoc = ccp_lsb_alloc, .sbfree = ccp_lsb_free, .init = ccp5_init, diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 5dec502f3c5d..e1aa68f4044c 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -696,6 +696,7 @@ struct ccp_actions { int (*sm3)(struct ccp_op *op); int (*sm4)(struct ccp_op *op); int (*sm4_ctr)(struct ccp_op *op); + int (*run_cmd)(struct ccp_op *op); u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int); void (*sbfree)(struct ccp_cmd_queue *, unsigned int, unsigned int); unsigned int (*get_free_slots)(struct ccp_cmd_queue *); diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 7495e233446f..774c15b1d81e 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -2624,12 +2624,25 @@ static int ccp_run_sm3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) if (!src.sg_wa.bytes_left && sm3->final) op.eom = 1; + if (!src.sg_wa.bytes_left || op.soc) + op.ioc = 1; + else + op.ioc = 0; + ret = cmd_q->ccp->vdata->perform->sm3(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_data; } + if (!src.sg_wa.bytes_left || op.soc) { + ret = cmd_q->ccp->vdata->perform->run_cmd(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } + } + ccp_process_data(&src, NULL, &op); } } else { @@ -2649,6 +2662,12 @@ static int ccp_run_sm3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) cmd->engine_error = cmd_q->cmd_error; goto e_data; } + + ret = cmd_q->ccp->vdata->perform->run_cmd(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } } ret = ccp_copy_from_sb(cmd_q, &ctx, 0, op.sb_ctx, -- Gitee From 94d6cec387b00ef491e66f9c607438ec8b0d41e5 Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Mon, 9 May 2022 07:02:32 +0800 Subject: [PATCH 0431/2138] anolis: newfeature: crypto: ccp: Process multiple VQ commands once for SM4/SM4-CTR ccp. ANBZ: #8582 optimize sm4 processing performance by starting ccp after all cmd has been prepared Signed-off-by: Yabin Li Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2924 --- drivers/crypto/ccp/ccp-dev-v5.c | 4 ++-- drivers/crypto/ccp/ccp-ops.c | 26 ++++++++++++++++++++++++++ 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index b14d18162ebc..e5c129c3e049 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -812,7 +812,7 @@ static int ccp5_perform_sm4(struct ccp_op *op) CCP5_CMD_KEY_HI(&desc) = 0; CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; - return ccp5_do_cmd(&desc, op->cmd_q); + return ccp5_do_multi_cmds(&desc, op->cmd_q); } static int ccp5_perform_sm4_ctr(struct ccp_op *op) @@ -854,7 +854,7 @@ static int ccp5_perform_sm4_ctr(struct ccp_op *op) CCP5_CMD_KEY_HI(&desc) = 0; CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; - return ccp5_do_cmd(&desc, op->cmd_q); + return ccp5_do_multi_cmds(&desc, op->cmd_q); } static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 774c15b1d81e..794ad6d6eb5b 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -2807,12 +2807,25 @@ static int ccp_run_sm4_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) if (!src.sg_wa.bytes_left) op.eom = 1; + if (!src.sg_wa.bytes_left || op.soc) + op.ioc = 1; + else + op.ioc = 0; + ret = cmd_q->ccp->vdata->perform->sm4(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_iv_key; } + if (!src.sg_wa.bytes_left || op.soc) { + ret = cmd_q->ccp->vdata->perform->run_cmd(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + } + ccp_process_data(&src, &dst, &op); } @@ -2918,12 +2931,25 @@ static int ccp_run_sm4_ctr_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) if (!src.sg_wa.bytes_left) op.eom = 1; + if (!src.sg_wa.bytes_left || op.soc) + op.ioc = 1; + else + op.ioc = 0; + ret = cmd_q->ccp->vdata->perform->sm4_ctr(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_iv_key; } + if (!src.sg_wa.bytes_left || op.soc) { + ret = cmd_q->ccp->vdata->perform->run_cmd(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + } + ccp_process_data(&src, &dst, &op); } -- Gitee From a72e8f1f1804c98b8c7ca0dc107707ea030883a5 Mon Sep 17 00:00:00 2001 From: Qi Liu Date: Tue, 19 Mar 2024 16:22:11 +0800 Subject: [PATCH 0432/2138] anolis: perf mem/c2c: Add load store event mapping for Hygon ANBZ: #8643 Hygon support perf mem/c2c as AMD Zen CPU does, and use "ibs_op//" event as mem-ldst event. Signed-off-by: Qi Liu Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2950 --- tools/perf/Documentation/perf-c2c.txt | 8 ++++---- tools/perf/arch/x86/util/env.c | 15 +++++++++++++++ tools/perf/arch/x86/util/env.h | 1 + tools/perf/arch/x86/util/mem-events.c | 2 +- tools/perf/arch/x86/util/pmu.c | 2 +- 5 files changed, 22 insertions(+), 6 deletions(-) diff --git a/tools/perf/Documentation/perf-c2c.txt b/tools/perf/Documentation/perf-c2c.txt index 856f0dfb8e5a..192ab0415ee9 100644 --- a/tools/perf/Documentation/perf-c2c.txt +++ b/tools/perf/Documentation/perf-c2c.txt @@ -21,9 +21,9 @@ you to track down the cacheline contentions. On Intel, the tool is based on load latency and precise store facility events provided by Intel CPUs. On PowerPC, the tool uses random instruction sampling -with thresholding feature. On AMD, the tool uses IBS op pmu (due to hardware -limitations, perf c2c is not supported on Zen3 cpus). On Arm64 it uses SPE to -sample load and store operations, therefore hardware and kernel support is +with thresholding feature. On AMD and Hygon, the tool uses IBS op pmu (due to +hardware limitations, perf c2c is not supported on Zen3 cpus). On Arm64 it uses +SPE to sample load and store operations, therefore hardware and kernel support is required. See linkperf:perf-arm-spe[1] for a setup guide. Due to the statistical nature of Arm SPE sampling, not every memory operation will be sampled. @@ -152,7 +152,7 @@ default on Intel: cpu/mem-loads,ldlat=30/P cpu/mem-stores/P -following on AMD: +following on AMD and Hygon: ibs_op// diff --git a/tools/perf/arch/x86/util/env.c b/tools/perf/arch/x86/util/env.c index 3e537ffb1353..f1de12d20b2a 100644 --- a/tools/perf/arch/x86/util/env.c +++ b/tools/perf/arch/x86/util/env.c @@ -17,3 +17,18 @@ bool x86__is_amd_cpu(void) ret: return is_amd >= 1 ? true : false; } + +bool x86__is_hygon_cpu(void) +{ + struct perf_env env = { .total_mem = 0, }; + static int is_hygon; /* 0: Uninitialized, 1: Yes, -1: No */ + + if (is_hygon) + goto ret; + + perf_env__cpuid(&env); + is_hygon = env.cpuid && strstarts(env.cpuid, "HygonGenuine") ? 1 : -1; + perf_env__exit(&env); +ret: + return is_hygon >= 1 ? true : false; +} diff --git a/tools/perf/arch/x86/util/env.h b/tools/perf/arch/x86/util/env.h index d78f080b6b3f..904d5e228360 100644 --- a/tools/perf/arch/x86/util/env.h +++ b/tools/perf/arch/x86/util/env.h @@ -3,5 +3,6 @@ #define _X86_ENV_H bool x86__is_amd_cpu(void); +bool x86__is_hygon_cpu(void); #endif /* _X86_ENV_H */ diff --git a/tools/perf/arch/x86/util/mem-events.c b/tools/perf/arch/x86/util/mem-events.c index 191b372f9a2d..f8d9aecbf2f2 100644 --- a/tools/perf/arch/x86/util/mem-events.c +++ b/tools/perf/arch/x86/util/mem-events.c @@ -33,7 +33,7 @@ struct perf_mem_event *perf_mem_events__ptr(int i) if (i >= PERF_MEM_EVENTS__MAX) return NULL; - if (x86__is_amd_cpu()) + if (x86__is_amd_cpu() || x86__is_hygon_cpu()) return &perf_mem_events_amd[i]; return &perf_mem_events_intel[i]; diff --git a/tools/perf/arch/x86/util/pmu.c b/tools/perf/arch/x86/util/pmu.c index f428cffb0378..0af256236466 100644 --- a/tools/perf/arch/x86/util/pmu.c +++ b/tools/perf/arch/x86/util/pmu.c @@ -174,7 +174,7 @@ const char *pmu_find_alias_name(const char *name) int perf_pmus__num_mem_pmus(void) { /* AMD uses IBS OP pmu and not a core PMU for perf mem/c2c */ - if (x86__is_amd_cpu()) + if (x86__is_amd_cpu() || x86__is_hygon_cpu()) return 1; /* Intel uses core pmus for perf mem/c2c */ -- Gitee From 8d483700677f292b3a590b0ffc752a79a33a504b Mon Sep 17 00:00:00 2001 From: Xiongfeng Wang Date: Tue, 5 Dec 2023 10:17:21 +0800 Subject: [PATCH 0433/2138] firmware: arm_sdei: add interrupt binding api ANBZ: #8621 commit 394a8507f8556ca7f430007e3f20d15e19f7bdbc openeuler hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8LQCC CVE: NA ------------------------------------------------- This patch add a interrupt binding api function which returns the binded event number. Signed-off-by: Xiongfeng Wang Reviewed-by: Kefeng Wang Signed-off-by: Yang Yingliang Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2945 --- drivers/firmware/arm_sdei.c | 10 ++++++++++ include/linux/arm_sdei.h | 1 + 2 files changed, 11 insertions(+) diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index 3e8051fe8296..4d05924a34b8 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -188,6 +188,16 @@ int sdei_api_event_context(u32 query, u64 *result) } NOKPROBE_SYMBOL(sdei_api_event_context); +int sdei_api_event_interrupt_bind(int hwirq) +{ + u64 event_number; + + invoke_sdei_fn(SDEI_1_0_FN_SDEI_INTERRUPT_BIND, hwirq, 0, 0, 0, 0, + &event_number); + + return (int)event_number; +} + static int sdei_api_event_get_info(u32 event, u32 info, u64 *result) { return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_GET_INFO, event, info, 0, diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h index 255701e1251b..bf92dc48fbea 100644 --- a/include/linux/arm_sdei.h +++ b/include/linux/arm_sdei.h @@ -36,6 +36,7 @@ int sdei_event_unregister(u32 event_num); int sdei_event_enable(u32 event_num); int sdei_event_disable(u32 event_num); +int sdei_api_event_interrupt_bind(int hwirq); /* GHES register/unregister helpers */ int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb, -- Gitee From 8dbdc9679eb4acfb7c937d937652f4112b7d7502 Mon Sep 17 00:00:00 2001 From: Xiongfeng Wang Date: Tue, 5 Dec 2023 10:17:22 +0800 Subject: [PATCH 0434/2138] firmware: arm_sdei: make 'sdei_api_event_disable/enable' public ANBZ: #8621 commit 2b61c8803b5ae428fa7408808c999d2a6e3f5c24 openeuler hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8LQCC CVE: NA ------------------------------------------------- NMI Watchdog need to enable the event for each core individually. But the existing public api 'sdei_event_enable' enable events for all cores when the event type is private. Signed-off-by: Xiongfeng Wang Reviewed-by: Kefeng Wang Reviewed-by: Hanjun Guo Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2945 --- drivers/firmware/arm_sdei.c | 4 ++-- include/linux/arm_sdei.h | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index 4d05924a34b8..a46e8da3bea1 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -389,7 +389,7 @@ static int sdei_platform_reset(void) return err; } -static int sdei_api_event_enable(u32 event_num) +int sdei_api_event_enable(u32 event_num) { return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_ENABLE, event_num, 0, 0, 0, 0, NULL); @@ -436,7 +436,7 @@ int sdei_event_enable(u32 event_num) return err; } -static int sdei_api_event_disable(u32 event_num) +int sdei_api_event_disable(u32 event_num) { return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_DISABLE, event_num, 0, 0, 0, 0, NULL); diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h index bf92dc48fbea..f5f6ba7a1d50 100644 --- a/include/linux/arm_sdei.h +++ b/include/linux/arm_sdei.h @@ -37,6 +37,8 @@ int sdei_event_unregister(u32 event_num); int sdei_event_enable(u32 event_num); int sdei_event_disable(u32 event_num); int sdei_api_event_interrupt_bind(int hwirq); +int sdei_api_event_disable(u32 event_num); +int sdei_api_event_enable(u32 event_num); /* GHES register/unregister helpers */ int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb, -- Gitee From e5125c145a6de1c146ab069d2505f1c0359c3f48 Mon Sep 17 00:00:00 2001 From: Xiongfeng Wang Date: Tue, 5 Dec 2023 10:17:23 +0800 Subject: [PATCH 0435/2138] lockup_detector: init lockup detector after all the init_calls ANBZ: #8621 commit 078428464b38d44898d9aa09dd6b66ebc681ae36 openeuler hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8LQCC CVE: NA ------------------------------------------------- We call 'sdei_init' as 'subsys_initcall_sync'. lockup detector need to be initialised after sdei_init. The influence of this patch is that we can not detect the hard lockup in init_calls. Signed-off-by: Xiongfeng Wang Reviewed-by: Kefeng Wang Reviewed-by: Hanjun Guo Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2945 --- init/main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/init/main.c b/init/main.c index c787e94cc898..9b77a454fed9 100644 --- a/init/main.c +++ b/init/main.c @@ -1541,7 +1541,6 @@ static noinline void __init kernel_init_freeable(void) rcu_init_tasks_generic(); do_pre_smp_initcalls(); - lockup_detector_init(); smp_init(); sched_init_smp(); @@ -1552,6 +1551,8 @@ static noinline void __init kernel_init_freeable(void) do_basic_setup(); + lockup_detector_init(); + kunit_run_all_tests(); wait_for_initramfs(); -- Gitee From 246cc0bb67aed61852dff02783bd4e4bb9ebe2c0 Mon Sep 17 00:00:00 2001 From: Xiongfeng Wang Date: Tue, 5 Dec 2023 10:17:24 +0800 Subject: [PATCH 0436/2138] watchdog: add nmi_watchdog support for arm64 based on SDEI ANBZ: #8621 commit 06f3c8d593243b14ddfd020fb835d8d1f196cccb openeuler hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8LQCC CVE: NA ------------------------------------------------- Add nmi_watchdog support for arm64 based on SDEI. Signed-off-by: Xiongfeng Wang Reviewed-by: Kefeng Wang Reviewed-by: Hanjun Guo Conflicts: lib/Kconfig.debug arch/arm64/kernel/watchdog_sdei.c Signed-off-by: Xiongfeng Wang Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2945 --- arch/arm64/kernel/Makefile | 1 + arch/arm64/kernel/watchdog_sdei.c | 112 ++++++++++++++++++++++++++++++ lib/Kconfig.debug | 9 +++ 3 files changed, 122 insertions(+) create mode 100644 arch/arm64/kernel/watchdog_sdei.c diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index d95b3d6b471a..d48aa807dcce 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -68,6 +68,7 @@ arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_CRASH_CORE) += crash_core.o obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o +obj-$(CONFIG_SDEI_WATCHDOG) += watchdog_sdei.o obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o obj-$(CONFIG_ARM64_MTE) += mte.o obj-y += vdso-wrap.o diff --git a/arch/arm64/kernel/watchdog_sdei.c b/arch/arm64/kernel/watchdog_sdei.c new file mode 100644 index 000000000000..8f9eb838b969 --- /dev/null +++ b/arch/arm64/kernel/watchdog_sdei.c @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Detect hard lockups on a system + * + * Note: Most of this code is borrowed heavily from the perf hardlockup + * detector, so thanks to Don for the initial implementation. + */ + +#define pr_fmt(fmt) "SDEI NMI watchdog: " fmt + +#include +#include +#include +#include +#include +#include +#include + +/* We use the secure physical timer as SDEI NMI watchdog timer */ +#define SDEI_NMI_WATCHDOG_HWIRQ 29 + +static int sdei_watchdog_event_num; +static bool disable_sdei_nmi_watchdog; +static bool sdei_watchdog_registered; + +void watchdog_hardlockup_enable(unsigned int cpu) +{ + int ret; + + if (!sdei_watchdog_registered) + return; + + /* Skip the first hardlockup check incase BIOS didn't init the + * secure timer correctly */ + watchdog_hardlockup_touch_cpu(cpu); + ret = sdei_api_event_enable(sdei_watchdog_event_num); + if (ret) { + pr_err("Enable NMI Watchdog failed on cpu%d\n", + smp_processor_id()); + } +} + +void watchdog_hardlockup_disable(unsigned int cpu) +{ + int ret; + + if (!sdei_watchdog_registered) + return; + + ret = sdei_api_event_disable(sdei_watchdog_event_num); + if (ret) + pr_err("Disable NMI Watchdog failed on cpu%d\n", + smp_processor_id()); +} + +static int sdei_watchdog_callback(u32 event, + struct pt_regs *regs, void *arg) +{ + watchdog_hardlockup_check(smp_processor_id(), regs); + + return 0; +} + +static void sdei_nmi_watchdog_bind(void *data) +{ + int ret; + + ret = sdei_api_event_interrupt_bind(SDEI_NMI_WATCHDOG_HWIRQ); + if (ret < 0) + pr_err("SDEI bind failed on cpu%d, return %d\n", + smp_processor_id(), ret); +} + +static int __init disable_sdei_nmi_watchdog_setup(char *str) +{ + disable_sdei_nmi_watchdog = true; + return 1; +} +__setup("disable_sdei_nmi_watchdog", disable_sdei_nmi_watchdog_setup); + +int __init watchdog_hardlockup_probe(void) +{ + int ret; + + if (disable_sdei_nmi_watchdog) + return -EINVAL; + + if (!is_hyp_mode_available()) { + pr_err("Disable SDEI NMI Watchdog in VM\n"); + return -EINVAL; + } + + sdei_watchdog_event_num = sdei_api_event_interrupt_bind(SDEI_NMI_WATCHDOG_HWIRQ); + if (sdei_watchdog_event_num < 0) { + pr_err("Bind interrupt failed. Firmware may not support SDEI !\n"); + return sdei_watchdog_event_num; + } + + on_each_cpu(sdei_nmi_watchdog_bind, NULL, true); + + ret = sdei_event_register(sdei_watchdog_event_num, + sdei_watchdog_callback, NULL); + if (ret) { + pr_err("SDEI Watchdog register callback failed\n"); + return ret; + } + + sdei_watchdog_registered = true; + pr_info("SDEI Watchdog registered successfully\n"); + + return 0; +} diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 8267aa4255c7..5eac56262974 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1046,6 +1046,12 @@ config HAVE_HARDLOCKUP_DETECTOR_BUDDY depends on SMP default y +config SDEI_WATCHDOG + bool "SDEI NMI Watchdog support" + depends on ARM_SDE_INTERFACE + depends on HARDLOCKUP_DETECTOR + select HARDLOCKUP_DETECTOR_COUNTS_HRTIMER + # # Global switch whether to build a hardlockup detector at all. It is available # only when the architecture supports at least one implementation. There are @@ -1062,6 +1068,7 @@ config HARDLOCKUP_DETECTOR depends on HAVE_HARDLOCKUP_DETECTOR_PERF || HAVE_HARDLOCKUP_DETECTOR_BUDDY || HAVE_HARDLOCKUP_DETECTOR_ARCH imply HARDLOCKUP_DETECTOR_PERF imply HARDLOCKUP_DETECTOR_BUDDY + imply SDEI_WATCHDOG imply HARDLOCKUP_DETECTOR_ARCH select LOCKUP_DETECTOR @@ -1098,6 +1105,7 @@ config HARDLOCKUP_DETECTOR_PERF depends on HARDLOCKUP_DETECTOR depends on HAVE_HARDLOCKUP_DETECTOR_PERF && !HARDLOCKUP_DETECTOR_PREFER_BUDDY depends on !HAVE_HARDLOCKUP_DETECTOR_ARCH + depends on !SDEI_WATCHDOG select HARDLOCKUP_DETECTOR_COUNTS_HRTIMER config HARDLOCKUP_DETECTOR_BUDDY @@ -1106,6 +1114,7 @@ config HARDLOCKUP_DETECTOR_BUDDY depends on HAVE_HARDLOCKUP_DETECTOR_BUDDY depends on !HAVE_HARDLOCKUP_DETECTOR_PERF || HARDLOCKUP_DETECTOR_PREFER_BUDDY depends on !HAVE_HARDLOCKUP_DETECTOR_ARCH + depends on !SDEI_WATCHDOG select HARDLOCKUP_DETECTOR_COUNTS_HRTIMER config HARDLOCKUP_DETECTOR_ARCH -- Gitee From b3ea290c6e12077912c7b9ed3cd9d276ffd4f2c9 Mon Sep 17 00:00:00 2001 From: Xiongfeng Wang Date: Tue, 5 Dec 2023 10:17:25 +0800 Subject: [PATCH 0437/2138] sdei_watchdog: clear EOI of the secure timer before kdump ANBZ: #8621 commit 490288e8343f9914f850f13a00ff98f8b878fdcd openeuler hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8LQCC CVE: NA ------------------------------------------------- When we panic in hardlockup, the secure timer interrupt remains activate because firmware clear eoi after dispatch is completed. This will cause arm_arch_timer interrupt failed to trigger in the second kernel. This patch add a new SMC helper to clear eoi of a certain interrupt and clear eoi of the secure timer before booting the second kernel. Signed-off-by: Xiongfeng Wang Reviewed-by: Hanjun Guo Signed-off-by: Zheng Zengkai Conflicts: include/linux/nmi.h Signed-off-by: Xiongfeng Wang Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2945 --- arch/arm64/kernel/machine_kexec.c | 10 ++++++++++ arch/arm64/kernel/watchdog_sdei.c | 6 ++++++ drivers/firmware/arm_sdei.c | 6 ++++++ include/linux/arm_sdei.h | 1 + include/linux/nmi.h | 6 ++++++ include/uapi/linux/arm_sdei.h | 1 + 6 files changed, 30 insertions(+) diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index 078910db77a4..cfa6b0dafc88 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -262,6 +263,15 @@ void machine_crash_shutdown(struct pt_regs *regs) /* shutdown non-crashing cpus */ crash_smp_send_stop(); + /* + * when we panic in hardlockup detected by sdei_watchdog, the secure + * timer interrupt remains activate here because firmware clear eoi + * after dispatch is completed. This will cause arm_arch_timer + * interrupt failed to trigger in the second kernel. So we clear eoi + * of the secure timer before booting the second kernel. + */ + sdei_watchdog_clear_eoi(); + /* for crashing cpu */ crash_save_cpu(regs, smp_processor_id()); machine_kexec_mask_interrupts(); diff --git a/arch/arm64/kernel/watchdog_sdei.c b/arch/arm64/kernel/watchdog_sdei.c index 8f9eb838b969..7ebf6b5ab237 100644 --- a/arch/arm64/kernel/watchdog_sdei.c +++ b/arch/arm64/kernel/watchdog_sdei.c @@ -78,6 +78,12 @@ static int __init disable_sdei_nmi_watchdog_setup(char *str) } __setup("disable_sdei_nmi_watchdog", disable_sdei_nmi_watchdog_setup); +void sdei_watchdog_clear_eoi(void) +{ + if (sdei_watchdog_registered) + sdei_api_clear_eoi(SDEI_NMI_WATCHDOG_HWIRQ); +} + int __init watchdog_hardlockup_probe(void) { int ret; diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index a46e8da3bea1..0adf9803ffd4 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -198,6 +198,12 @@ int sdei_api_event_interrupt_bind(int hwirq) return (int)event_number; } +int sdei_api_clear_eoi(int hwirq) +{ + return invoke_sdei_fn(SDEI_1_0_FN_SDEI_CLEAR_EOI, hwirq, 0, 0, 0, 0, + NULL); +} + static int sdei_api_event_get_info(u32 event, u32 info, u64 *result) { return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_GET_INFO, event, info, 0, diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h index f5f6ba7a1d50..6381537e7015 100644 --- a/include/linux/arm_sdei.h +++ b/include/linux/arm_sdei.h @@ -39,6 +39,7 @@ int sdei_event_disable(u32 event_num); int sdei_api_event_interrupt_bind(int hwirq); int sdei_api_event_disable(u32 event_num); int sdei_api_event_enable(u32 event_num); +int sdei_api_clear_eoi(int hwirq); /* GHES register/unregister helpers */ int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb, diff --git a/include/linux/nmi.h b/include/linux/nmi.h index e92e378df000..404c78e04a05 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -235,4 +235,10 @@ static inline void nmi_backtrace_stall_snap(const struct cpumask *btp) {} static inline void nmi_backtrace_stall_check(const struct cpumask *btp) {} #endif +#ifdef CONFIG_SDEI_WATCHDOG +void sdei_watchdog_clear_eoi(void); +#else +static inline void sdei_watchdog_clear_eoi(void) { } +#endif + #endif diff --git a/include/uapi/linux/arm_sdei.h b/include/uapi/linux/arm_sdei.h index af0630ba5437..1187b1b49c87 100644 --- a/include/uapi/linux/arm_sdei.h +++ b/include/uapi/linux/arm_sdei.h @@ -24,6 +24,7 @@ #define SDEI_1_0_FN_SDEI_INTERRUPT_RELEASE SDEI_1_0_FN(0x0E) #define SDEI_1_0_FN_SDEI_PRIVATE_RESET SDEI_1_0_FN(0x11) #define SDEI_1_0_FN_SDEI_SHARED_RESET SDEI_1_0_FN(0x12) +#define SDEI_1_0_FN_SDEI_CLEAR_EOI SDEI_1_0_FN(0x18) #define SDEI_VERSION_MAJOR_SHIFT 48 #define SDEI_VERSION_MAJOR_MASK 0x7fff -- Gitee From 0e2fc795f890b841dba15f95471e3a9f4ed0999d Mon Sep 17 00:00:00 2001 From: Xiongfeng Wang Date: Tue, 5 Dec 2023 10:17:26 +0800 Subject: [PATCH 0438/2138] sdei_watchdog: set secure timer period base on 'watchdog_thresh' ANBZ: #8621 commit 7fc503364616a91334aa623a97949e2573b87d5d openeuler hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8LQCC CVE: NA ------------------------------------------------- The period of the secure timer is set to 3s by BIOS. That means the secure timer interrupt will trigger every 3 seconds. To further decrease the NMI watchdog's effect on performance, this patch set the period of the secure timer base on 'watchdog_thresh'. This variable is initiallized to 10s. We can also set the period at runtime by modifying '/proc/sys/kernel/watchdog_thresh' Signed-off-by: Xiongfeng Wang Reviewed-by: Hanjun Guo Conflicts: arch/arm64/kernel/watchdog_sdei.c (context conflict) Signed-off-by: Xiongfeng Wang Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2945 --- arch/arm64/kernel/watchdog_sdei.c | 13 +++++++++++++ drivers/firmware/arm_sdei.c | 6 ++++++ include/linux/arm_sdei.h | 1 + include/uapi/linux/arm_sdei.h | 1 + 4 files changed, 21 insertions(+) diff --git a/arch/arm64/kernel/watchdog_sdei.c b/arch/arm64/kernel/watchdog_sdei.c index 7ebf6b5ab237..758e20eadc31 100644 --- a/arch/arm64/kernel/watchdog_sdei.c +++ b/arch/arm64/kernel/watchdog_sdei.c @@ -33,6 +33,8 @@ void watchdog_hardlockup_enable(unsigned int cpu) /* Skip the first hardlockup check incase BIOS didn't init the * secure timer correctly */ watchdog_hardlockup_touch_cpu(cpu); + sdei_api_set_secure_timer_period(watchdog_thresh); + ret = sdei_api_event_enable(sdei_watchdog_event_num); if (ret) { pr_err("Enable NMI Watchdog failed on cpu%d\n", @@ -102,6 +104,17 @@ int __init watchdog_hardlockup_probe(void) return sdei_watchdog_event_num; } + /* + * After we introduced 'sdei_api_set_secure_timer_period', we disselect + * 'CONFIG_HARDLOCKUP_CHECK_TIMESTAMP'. So we need to make sure that + * firmware can set the period of the secure timer and the timer + * interrupt doesn't trigger too soon. + */ + if (sdei_api_set_secure_timer_period(watchdog_thresh)) { + pr_err("Firmware doesn't support setting the secure timer period, please update your BIOS !\n"); + return -EINVAL; + } + on_each_cpu(sdei_nmi_watchdog_bind, NULL, true); ret = sdei_event_register(sdei_watchdog_event_num, diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index 0adf9803ffd4..7f178a4194a5 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -204,6 +204,12 @@ int sdei_api_clear_eoi(int hwirq) NULL); } +int sdei_api_set_secure_timer_period(int sec) +{ + return invoke_sdei_fn(SDEI_1_0_FN_SET_SECURE_TIMER_PERIOD, sec, 0, 0, 0, + 0, NULL); +} + static int sdei_api_event_get_info(u32 event, u32 info, u64 *result) { return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_GET_INFO, event, info, 0, diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h index 6381537e7015..28e247dd5773 100644 --- a/include/linux/arm_sdei.h +++ b/include/linux/arm_sdei.h @@ -40,6 +40,7 @@ int sdei_api_event_interrupt_bind(int hwirq); int sdei_api_event_disable(u32 event_num); int sdei_api_event_enable(u32 event_num); int sdei_api_clear_eoi(int hwirq); +int sdei_api_set_secure_timer_period(int sec); /* GHES register/unregister helpers */ int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb, diff --git a/include/uapi/linux/arm_sdei.h b/include/uapi/linux/arm_sdei.h index 1187b1b49c87..a5375679dd50 100644 --- a/include/uapi/linux/arm_sdei.h +++ b/include/uapi/linux/arm_sdei.h @@ -25,6 +25,7 @@ #define SDEI_1_0_FN_SDEI_PRIVATE_RESET SDEI_1_0_FN(0x11) #define SDEI_1_0_FN_SDEI_SHARED_RESET SDEI_1_0_FN(0x12) #define SDEI_1_0_FN_SDEI_CLEAR_EOI SDEI_1_0_FN(0x18) +#define SDEI_1_0_FN_SET_SECURE_TIMER_PERIOD SDEI_1_0_FN(0x19) #define SDEI_VERSION_MAJOR_SHIFT 48 #define SDEI_VERSION_MAJOR_MASK 0x7fff -- Gitee From 052c492e152762d862a64611b3ef48d6b6297899 Mon Sep 17 00:00:00 2001 From: Xiongfeng Wang Date: Tue, 5 Dec 2023 10:17:27 +0800 Subject: [PATCH 0439/2138] sdei_watchdog: avoid possible false hardlockup ANBZ: #8621 commit cac525db2b93ba824cd861b66ac7a374c3ebb069 openeuler hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8LQCC CVE: NA ------------------------------------------------- Firmware may not trigger SDEI event as required frequency. SDEI event may be triggered too soon, which cause false hardlockup in kernel. Check the time stamp in sdei_watchdog_callbak and skip the hardlockup check if it is invoked too soon. Signed-off-by: Xiongfeng Wang Reviewed-by: Hanjun Guo Conflicts: arch/arm64/kernel/watchdog_sdei.c (context conflict) Signed-off-by: Xiongfeng Wang Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2945 --- arch/arm64/kernel/watchdog_sdei.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/arch/arm64/kernel/watchdog_sdei.c b/arch/arm64/kernel/watchdog_sdei.c index 758e20eadc31..4a143a598eef 100644 --- a/arch/arm64/kernel/watchdog_sdei.c +++ b/arch/arm64/kernel/watchdog_sdei.c @@ -22,6 +22,7 @@ static int sdei_watchdog_event_num; static bool disable_sdei_nmi_watchdog; static bool sdei_watchdog_registered; +static DEFINE_PER_CPU(ktime_t, last_check_time); void watchdog_hardlockup_enable(unsigned int cpu) { @@ -34,6 +35,7 @@ void watchdog_hardlockup_enable(unsigned int cpu) * secure timer correctly */ watchdog_hardlockup_touch_cpu(cpu); sdei_api_set_secure_timer_period(watchdog_thresh); + __this_cpu_write(last_check_time, ktime_get_mono_fast_ns()); ret = sdei_api_event_enable(sdei_watchdog_event_num); if (ret) { @@ -58,6 +60,22 @@ void watchdog_hardlockup_disable(unsigned int cpu) static int sdei_watchdog_callback(u32 event, struct pt_regs *regs, void *arg) { + ktime_t delta, now = ktime_get_mono_fast_ns(); + + delta = now - __this_cpu_read(last_check_time); + __this_cpu_write(last_check_time, now); + + /* + * Set delta to 4/5 of the actual watchdog threshold period so the + * hrtimer is guaranteed to fire at least once within the real + * watchdog threshold. + */ + if (delta < watchdog_thresh * (u64)NSEC_PER_SEC * 4 / 5) { + pr_err(FW_BUG "SDEI Watchdog event triggered too soon, " + "time to last check:%lld ns\n", delta); + return 0; + } + watchdog_hardlockup_check(smp_processor_id(), regs); return 0; -- Gitee From 7de1133c8a0b499f3dda1fb891f128babfa9c559 Mon Sep 17 00:00:00 2001 From: Xiongfeng Wang Date: Tue, 5 Dec 2023 10:17:28 +0800 Subject: [PATCH 0440/2138] init: only move down lockup_detector_init() when sdei_watchdog is enabled ANBZ: #8621 commit 1509d06c9c41985ee6b7dd6acbb08d9ee5dcf2b3 openeuler hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I8LQCC CVE: NA ------------------------------------------------- When I enable CONFIG_DEBUG_PREEMPT and CONFIG_PREEMPT on X86, I got the following Call Trace: [ 3.341853] BUG: using smp_processor_id() in preemptible [00000000] code: swapper/0/1 [ 3.344392] caller is debug_smp_processor_id+0x17/0x20 [ 3.344395] CPU: 1 PID: 1 Comm: swapper/0 Not tainted 5.10.0+ #398 [ 3.344397] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.10.2-0-g5f4c7b1-prebuilt.qemu-project.org 04/01/2014 [ 3.344399] Call Trace: [ 3.344410] dump_stack+0x60/0x76 [ 3.344412] check_preemption_disabled+0xba/0xc0 [ 3.344415] debug_smp_processor_id+0x17/0x20 [ 3.344422] hardlockup_detector_event_create+0xf/0x60 [ 3.344427] hardlockup_detector_perf_init+0xf/0x41 [ 3.344430] watchdog_nmi_probe+0xe/0x10 [ 3.344432] lockup_detector_init+0x22/0x5b [ 3.344437] kernel_init_freeable+0x20c/0x245 [ 3.344439] ? rest_init+0xd0/0xd0 [ 3.344441] kernel_init+0xe/0x110 [ 3.344446] ret_from_fork+0x22/0x30 It is because sched_init_smp() set 'current->nr_cpus_allowed' to possible cpu number, and check_preemption_disabled() failed. This issue is introduced by commit a79050434b45, which move down lockup_detector_init() after do_basic_setup(). Fix it by moving lockup_detector_init() to its origin place when sdei_watchdog is disabled. There is no problem when sdei_watchdog is enabled because watchdog_nmi_probe() is overridden in 'arch/arm64/kernel/watchdog_sdei.c' in this case. Fixes: a79050434b45 ("lockup_detector: init lockup detector after all the init_calls") Signed-off-by: Xiongfeng Wang Reviewed-by: Wei Li Signed-off-by: Chen Jun Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2945 --- arch/arm64/kernel/watchdog_sdei.c | 2 +- include/linux/nmi.h | 2 ++ init/main.c | 6 +++++- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/watchdog_sdei.c b/arch/arm64/kernel/watchdog_sdei.c index 4a143a598eef..155f36e24699 100644 --- a/arch/arm64/kernel/watchdog_sdei.c +++ b/arch/arm64/kernel/watchdog_sdei.c @@ -20,7 +20,7 @@ #define SDEI_NMI_WATCHDOG_HWIRQ 29 static int sdei_watchdog_event_num; -static bool disable_sdei_nmi_watchdog; +bool disable_sdei_nmi_watchdog; static bool sdei_watchdog_registered; static DEFINE_PER_CPU(ktime_t, last_check_time); diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 404c78e04a05..7bd446acad24 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -237,8 +237,10 @@ static inline void nmi_backtrace_stall_check(const struct cpumask *btp) {} #ifdef CONFIG_SDEI_WATCHDOG void sdei_watchdog_clear_eoi(void); +extern bool disable_sdei_nmi_watchdog; #else static inline void sdei_watchdog_clear_eoi(void) { } +#define disable_sdei_nmi_watchdog 1 #endif #endif diff --git a/init/main.c b/init/main.c index 9b77a454fed9..a61ac250fdcb 100644 --- a/init/main.c +++ b/init/main.c @@ -1541,6 +1541,8 @@ static noinline void __init kernel_init_freeable(void) rcu_init_tasks_generic(); do_pre_smp_initcalls(); + if (disable_sdei_nmi_watchdog) + lockup_detector_init(); smp_init(); sched_init_smp(); @@ -1551,7 +1553,9 @@ static noinline void __init kernel_init_freeable(void) do_basic_setup(); - lockup_detector_init(); + /* sdei_watchdog needs to be initialized after sdei_init */ + if (!disable_sdei_nmi_watchdog) + lockup_detector_init(); kunit_run_all_tests(); -- Gitee From 0db972579cce25719ab71c05daf9da8487dd2e53 Mon Sep 17 00:00:00 2001 From: Xiongfeng Wang Date: Tue, 5 Dec 2023 10:17:29 +0800 Subject: [PATCH 0441/2138] kprobes/arm64: Blacklist sdei watchdog callback functions ANBZ: #8621 commit 9c2c933d3e9ac36f9f77391fe63ffbb145a566d6 openeuler hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8LQCC CVE: NA ------------------------------------------------- Functions called in sdei_handler are not allowed to be kprobed, so marked them as NOKPROBE_SYMBOL. There are so many functions in 'watchdog_check_timestamp()'. Luckily, we don't need 'CONFIG_HARDLOCKUP_CHECK_TIMESTAMP' now. So just make CONFIG_SDEI_WATCHDOG depends on !CONFIG_HARDLOCKUP_CHECK_TIMESTAMP in case someone add 'CONFIG_HARDLOCKUP_CHECK_TIMESTAMP' in the future. Signed-off-by: Xiongfeng Wang Reviewed-by: Yang Yingliang Reviewed-by: Hanjun Guo Conflicts: kernel/watchdog.c kernel/watchdog_hld.c Signed-off-by: Xiongfeng Wang Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2945 --- arch/arm64/kernel/watchdog_sdei.c | 2 ++ kernel/watchdog.c | 3 +++ 2 files changed, 5 insertions(+) diff --git a/arch/arm64/kernel/watchdog_sdei.c b/arch/arm64/kernel/watchdog_sdei.c index 155f36e24699..6f43496de56e 100644 --- a/arch/arm64/kernel/watchdog_sdei.c +++ b/arch/arm64/kernel/watchdog_sdei.c @@ -14,6 +14,7 @@ #include #include #include +#include #include /* We use the secure physical timer as SDEI NMI watchdog timer */ @@ -80,6 +81,7 @@ static int sdei_watchdog_callback(u32 event, return 0; } +NOKPROBE_SYMBOL(sdei_watchdog_callback); static void sdei_nmi_watchdog_bind(void *data) { diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 5cd6d4e26915..9e349a943cdd 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -127,6 +128,7 @@ static bool is_hardlockup(unsigned int cpu) return false; } +NOKPROBE_SYMBOL(is_hardlockup); static void watchdog_hardlockup_kick(void) { @@ -184,6 +186,7 @@ void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs) per_cpu(watchdog_hardlockup_warned, cpu) = false; } } +NOKPROBE_SYMBOL(watchdog_hardlockup_check); #else /* CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER */ -- Gitee From 2a5ad85775a3684f46f49a797c625a3f68cef672 Mon Sep 17 00:00:00 2001 From: Yicong Yang Date: Thu, 1 Feb 2024 15:51:37 +0800 Subject: [PATCH 0442/2138] watchdog: Support watchdog_sdei coexist with existing watchdogs ANBZ: #8621 commit f61b11535a0bcb5c0a90f626a757cb710d71409c openeuler kunpeng inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I90N2C CVE: NA ---------------------------------------------------------------------- Currently we cannot use watchdog_{perf, buddy} if CONFIG_SDEI_WATCHDOG=y. Not all the platforms has watchdog_sdei so this patch tries to make watchdog_sdei coexist with other watchdogs. Only one watchdog will finally works. By default watchdog_sdei will be used. If boot with "disable_sdei_nmi_watchdog", other watchdogs will be used if probed. Signed-off-by: Yicong Yang Signed-off-by: Jie Liu Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2945 --- arch/arm64/kernel/watchdog_sdei.c | 6 +++--- include/linux/nmi.h | 6 ++++++ kernel/watchdog.c | 16 ++++++++++++---- lib/Kconfig.debug | 2 -- 4 files changed, 21 insertions(+), 9 deletions(-) diff --git a/arch/arm64/kernel/watchdog_sdei.c b/arch/arm64/kernel/watchdog_sdei.c index 6f43496de56e..c7b12806364e 100644 --- a/arch/arm64/kernel/watchdog_sdei.c +++ b/arch/arm64/kernel/watchdog_sdei.c @@ -25,7 +25,7 @@ bool disable_sdei_nmi_watchdog; static bool sdei_watchdog_registered; static DEFINE_PER_CPU(ktime_t, last_check_time); -void watchdog_hardlockup_enable(unsigned int cpu) +void sdei_watchdog_hardlockup_enable(unsigned int cpu) { int ret; @@ -45,7 +45,7 @@ void watchdog_hardlockup_enable(unsigned int cpu) } } -void watchdog_hardlockup_disable(unsigned int cpu) +void sdei_watchdog_hardlockup_disable(unsigned int cpu) { int ret; @@ -106,7 +106,7 @@ void sdei_watchdog_clear_eoi(void) sdei_api_clear_eoi(SDEI_NMI_WATCHDOG_HWIRQ); } -int __init watchdog_hardlockup_probe(void) +int __init sdei_watchdog_hardlockup_probe(void) { int ret; diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 7bd446acad24..43dd3a79fdf2 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -236,10 +236,16 @@ static inline void nmi_backtrace_stall_check(const struct cpumask *btp) {} #endif #ifdef CONFIG_SDEI_WATCHDOG +void sdei_watchdog_hardlockup_enable(unsigned int cpu); +void sdei_watchdog_hardlockup_disable(unsigned int cpu); void sdei_watchdog_clear_eoi(void); +int sdei_watchdog_hardlockup_probe(void); extern bool disable_sdei_nmi_watchdog; #else +static inline void sdei_watchdog_hardlockup_enable(unsigned int cpu) { } +static inline void sdei_watchdog_hardlockup_disable(unsigned int cpu) { } static inline void sdei_watchdog_clear_eoi(void) { } +static inline int sdei_watchdog_hardlockup_probe(void) { return -ENODEV; } #define disable_sdei_nmi_watchdog 1 #endif diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 9e349a943cdd..11102420a2c7 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -562,8 +562,12 @@ static void watchdog_enable(unsigned int cpu) /* Initialize timestamp */ update_touch_ts(); /* Enable the hardlockup detector */ - if (watchdog_enabled & WATCHDOG_HARDLOCKUP_ENABLED) - watchdog_hardlockup_enable(cpu); + if (watchdog_enabled & WATCHDOG_HARDLOCKUP_ENABLED) { + if (disable_sdei_nmi_watchdog) + watchdog_hardlockup_enable(cpu); + else + sdei_watchdog_hardlockup_enable(cpu); + } } static void watchdog_disable(unsigned int cpu) @@ -577,7 +581,10 @@ static void watchdog_disable(unsigned int cpu) * delay between disabling the timer and disabling the hardlockup * detector causes a false positive. */ - watchdog_hardlockup_disable(cpu); + if (disable_sdei_nmi_watchdog) + watchdog_hardlockup_disable(cpu); + else + sdei_watchdog_hardlockup_disable(cpu); hrtimer_cancel(hrtimer); wait_for_completion(this_cpu_ptr(&softlockup_completion)); } @@ -1022,7 +1029,8 @@ void __init lockup_detector_init(void) cpumask_copy(&watchdog_cpumask, housekeeping_cpumask(HK_TYPE_TIMER)); - if (!watchdog_hardlockup_probe()) + if ((!disable_sdei_nmi_watchdog && !sdei_watchdog_hardlockup_probe()) || + !watchdog_hardlockup_probe()) watchdog_hardlockup_available = true; else allow_lockup_detector_init_retry = true; diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 5eac56262974..32843f12dae8 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1105,7 +1105,6 @@ config HARDLOCKUP_DETECTOR_PERF depends on HARDLOCKUP_DETECTOR depends on HAVE_HARDLOCKUP_DETECTOR_PERF && !HARDLOCKUP_DETECTOR_PREFER_BUDDY depends on !HAVE_HARDLOCKUP_DETECTOR_ARCH - depends on !SDEI_WATCHDOG select HARDLOCKUP_DETECTOR_COUNTS_HRTIMER config HARDLOCKUP_DETECTOR_BUDDY @@ -1114,7 +1113,6 @@ config HARDLOCKUP_DETECTOR_BUDDY depends on HAVE_HARDLOCKUP_DETECTOR_BUDDY depends on !HAVE_HARDLOCKUP_DETECTOR_PERF || HARDLOCKUP_DETECTOR_PREFER_BUDDY depends on !HAVE_HARDLOCKUP_DETECTOR_ARCH - depends on !SDEI_WATCHDOG select HARDLOCKUP_DETECTOR_COUNTS_HRTIMER config HARDLOCKUP_DETECTOR_ARCH -- Gitee From a4a49615fafb1d3b6cf7faea03c19cbfdfb78668 Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Tue, 26 Mar 2024 15:58:31 +0800 Subject: [PATCH 0443/2138] anolis: config: arm64: Enable SDEI nmi_watchdog support ANBZ: #8621 Enable SDEI nmi_watchdog support on ARM64 platform. Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2945 --- arch/arm64/configs/anolis-debug_defconfig | 1 + arch/arm64/configs/anolis_defconfig | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index b2a67eec8bf0..e2d73d630bf0 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -6919,6 +6919,7 @@ CONFIG_LOCKUP_DETECTOR=y CONFIG_SOFTLOCKUP_DETECTOR=y # CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y +CONFIG_SDEI_WATCHDOG=y CONFIG_HARDLOCKUP_DETECTOR=y # CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY is not set CONFIG_HARDLOCKUP_DETECTOR_PERF=y diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index 092a98239318..e69182e8882f 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -6879,6 +6879,7 @@ CONFIG_LOCKUP_DETECTOR=y CONFIG_SOFTLOCKUP_DETECTOR=y # CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y +CONFIG_SDEI_WATCHDOG=y CONFIG_HARDLOCKUP_DETECTOR=y # CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY is not set CONFIG_HARDLOCKUP_DETECTOR_PERF=y -- Gitee From b6710f8f64550be9c8fd52cc1dd776f442ed7588 Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Tue, 26 Mar 2024 15:09:33 +0800 Subject: [PATCH 0444/2138] anolis: configs: add CONFIG_DWC_PCIE_PMU config ANBZ: #8565 add CONFIG_DWC_PCIE_PMU config to anolis_defconfig and anolis-debug_defconfig Signed-off-by: Jing Zhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2953 --- arch/arm64/configs/anolis-debug_defconfig | 1 + arch/arm64/configs/anolis_defconfig | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index e2d73d630bf0..43c9990b1068 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -5760,6 +5760,7 @@ CONFIG_ARM_SPE_PMU=m # CONFIG_ARM_DMC620_PMU is not set # CONFIG_MARVELL_CN10K_TAD_PMU is not set CONFIG_ALIBABA_UNCORE_DRW_PMU=m +CONFIG_DWC_PCIE_PMU=m CONFIG_HISI_PMU=m CONFIG_HISI_PCIE_PMU=m # CONFIG_HNS3_PMU is not set diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index e69182e8882f..0e3103e1b10c 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -5756,6 +5756,7 @@ CONFIG_ARM_SPE_PMU=m # CONFIG_ARM_DMC620_PMU is not set # CONFIG_MARVELL_CN10K_TAD_PMU is not set CONFIG_ALIBABA_UNCORE_DRW_PMU=m +CONFIG_DWC_PCIE_PMU=m CONFIG_HISI_PMU=m CONFIG_HISI_PCIE_PMU=m # CONFIG_HNS3_PMU is not set -- Gitee From eb92cbd356b88a9957e18c6f09177882c171d2ae Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Tue, 19 Mar 2024 16:41:00 +0800 Subject: [PATCH 0445/2138] anolis: Add MWAIT Cx support for Zhaoxin CPUs ANBZ: #7809 When the processor is idle, low-power idle states (C-states) can be used to save power. For Zhaoxin processors, there are two methods to enter idle states. One is HLT instruction and legacy method of I/O reads from the ACPI-defined register (known as P_LVLx), the other one is MWAIT instruction with idle states hints. Default for legacy operating system, HLT and P_LVLx I/O reads are used for Zhaoxin Processors to enter idle states, but we have checked on some Zhaoxin platform that MWAIT instruction is more efficient than P_LVLx I/O reads and HLT, so we add MWAIT Cx support for Zhaoxin Processors. Signed-off-by: leoliu-oc Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2699 --- arch/x86/include/asm/acpi.h | 3 ++- arch/x86/kernel/acpi/cstate.c | 4 +++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 529c36a98d9e..9114ff001770 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -102,7 +102,8 @@ static inline bool arch_has_acpi_pdc(void) { struct cpuinfo_x86 *c = &cpu_data(0); return (c->x86_vendor == X86_VENDOR_INTEL || - c->x86_vendor == X86_VENDOR_CENTAUR); + c->x86_vendor == X86_VENDOR_CENTAUR || + c->x86_vendor == X86_VENDOR_ZHAOXIN); } static inline void arch_acpi_set_proc_cap_bits(u32 *cap) diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index 401808b47af3..90f22148acc7 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -221,7 +221,9 @@ static int __init ffh_cstate_init(void) if (c->x86_vendor != X86_VENDOR_INTEL && c->x86_vendor != X86_VENDOR_AMD && - c->x86_vendor != X86_VENDOR_HYGON) + c->x86_vendor != X86_VENDOR_HYGON && + c->x86_vendor != X86_VENDOR_CENTAUR && + c->x86_vendor != X86_VENDOR_ZHAOXIN) return -1; cpu_cstate_entry = alloc_percpu(struct cstate_entry); -- Gitee From b94fb52cdc51579d0ea7fe7511854694956965ba Mon Sep 17 00:00:00 2001 From: liyuting Date: Tue, 12 Mar 2024 09:29:05 +0800 Subject: [PATCH 0446/2138] anolis: gic: Add gic support for Phytium S2500 ANBZ: #8558 phytium inclusion category: feature CVE: NA --------------------------------------------------------- Add gic support for Phytium S2500 Signed-off-by: cuifulong Signed-off-by: liyuting Reviewed-by: Guanghui Feng Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/2872 --- arch/arm64/Kconfig.platforms | 6 + drivers/irqchip/Kconfig | 8 + drivers/irqchip/Makefile | 1 + drivers/irqchip/irq-gic-phytium-2500-its.c | 5734 ++++++++++++++++++ drivers/irqchip/irq-gic-phytium-2500.c | 2898 +++++++++ include/acpi/actbl2.h | 3 +- include/linux/irqchip/arm-gic-phytium-2500.h | 661 ++ 7 files changed, 9310 insertions(+), 1 deletion(-) create mode 100644 drivers/irqchip/irq-gic-phytium-2500-its.c create mode 100644 drivers/irqchip/irq-gic-phytium-2500.c create mode 100644 include/linux/irqchip/arm-gic-phytium-2500.h diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index 6069120199bb..62b813d80700 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -244,6 +244,12 @@ config ARCH_NPCM General support for NPCM8xx BMC (Arbel). Nuvoton NPCM8xx BMC based on the Cortex A35. +config ARCH_PHYTIUM + bool "Phytium SoC Family" + select ARM_GIC_PHYTIUM_2500 + help + This enables support for Phytium ARMv8 SoC family. + config ARCH_QCOM bool "Qualcomm Platforms" select GPIOLIB diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 3d506b42f31b..027df575d57f 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -89,6 +89,14 @@ config ARM_GIC_V3_ITS_FSL_MC depends on FSL_MC_BUS default ARM_GIC_V3_ITS +config ARM_GIC_PHYTIUM_2500 + bool + select IRQ_DOMAIN + select IRQ_DOMAIN_HIERARCHY + select PARTITION_PERCPU + select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_MSI_IRQ_DOMAIN + config ARM_NVIC bool select IRQ_DOMAIN_HIERARCHY diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index 466eb0bd2b52..246aa0603d6e 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -45,6 +45,7 @@ obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-v3-mbi.o irq-gic-common.o obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o obj-$(CONFIG_ARM_GIC_V3_ITS_PCI) += irq-gic-v3-its-pci-msi.o obj-$(CONFIG_ARM_GIC_V3_ITS_FSL_MC) += irq-gic-v3-its-fsl-mc-msi.o +obj-$(CONFIG_ARM_GIC_PHYTIUM_2500) += irq-gic-phytium-2500.o irq-gic-phytium-2500-its.o obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o obj-$(CONFIG_ARM_NVIC) += irq-nvic.o diff --git a/drivers/irqchip/irq-gic-phytium-2500-its.c b/drivers/irqchip/irq-gic-phytium-2500-its.c new file mode 100644 index 000000000000..d1ecf059a39f --- /dev/null +++ b/drivers/irqchip/irq-gic-phytium-2500-its.c @@ -0,0 +1,5734 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Phytium Corporation. + * Author: + * Wang Yinfeng + * Chen Baozi + * Chen Siyu + * Cui Fulong + * Li Yuting + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include "irq-gic-common.h" + +#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) +#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) +#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) +#define ITS_FLAGS_FORCE_NON_SHAREABLE (1ULL << 3) + +#define RD_LOCAL_LPI_ENABLED BIT(0) +#define RD_LOCAL_PENDTABLE_PREALLOCATED BIT(1) +#define RD_LOCAL_MEMRESERVE_DONE BIT(2) + +static u32 lpi_id_bits; + +/* + * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to + * deal with (one configuration byte per interrupt). PENDBASE has to + * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI). + */ +#define LPI_NRBITS lpi_id_bits +#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K) +#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K) + +#define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI + +/* + * Collection structure - just an ID, and a redistributor address to + * ping. We use one per CPU as a bag of interrupts assigned to this + * CPU. + */ +struct its_collection { + u64 target_address; + u16 col_id; +}; + +/* + * The ITS_BASER structure - contains memory information, cached + * value of BASER register configuration and ITS page size. + */ +struct its_baser { + void *base; + u64 val; + u32 order; + u32 psz; +}; + +struct its_device; + +/* + * The ITS structure - contains most of the infrastructure, with the + * top-level MSI domain, the command queue, the collections, and the + * list of devices writing to it. + * + * dev_alloc_lock has to be taken for device allocations, while the + * spinlock must be taken to parse data structures such as the device + * list. + */ +struct its_node { + raw_spinlock_t lock; + struct mutex dev_alloc_lock; + struct list_head entry; + void __iomem *base; + void __iomem *sgir_base; + phys_addr_t phys_base; + struct its_cmd_block *cmd_base; + struct its_cmd_block *cmd_write; + struct its_baser tables[GITS_BASER_NR_REGS]; + struct its_collection *collections; + struct fwnode_handle *fwnode_handle; + u64 (*get_msi_base)(struct its_device *its_dev); + u64 typer; + u64 cbaser_save; + u32 ctlr_save; + u32 mpidr; + struct list_head its_device_list; + u64 flags; + unsigned long list_nr; + int numa_node; + unsigned int msi_domain_flags; + u32 pre_its_base; /* for Socionext Synquacer */ + int vlpi_redist_offset; +}; + +#define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS)) +#define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP)) +#define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1) + +#define ITS_ITT_ALIGN SZ_256 + +/* The maximum number of VPEID bits supported by VLPI commands */ +#define ITS_MAX_VPEID_BITS \ + ({ \ + int nvpeid = 16; \ + if (gic_rdists->has_rvpeid && \ + gic_rdists->gicd_typer2 & GICD_TYPER2_VIL) \ + nvpeid = 1 + (gic_rdists->gicd_typer2 & \ + GICD_TYPER2_VID); \ + \ + nvpeid; \ + }) +#define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS)) + +/* Convert page order to size in bytes */ +#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o)) + +struct event_lpi_map { + unsigned long *lpi_map; + u16 *col_map; + irq_hw_number_t lpi_base; + int nr_lpis; + raw_spinlock_t vlpi_lock; + struct its_vm *vm; + struct its_vlpi_map *vlpi_maps; + int nr_vlpis; +}; + +/* + * The ITS view of a device - belongs to an ITS, owns an interrupt + * translation table, and a list of interrupts. If it some of its + * LPIs are injected into a guest (GICv4), the event_map.vm field + * indicates which one. + */ +struct its_device { + struct list_head entry; + struct its_node *its; + struct event_lpi_map event_map; + void *itt; + u32 nr_ites; + u32 device_id; + bool shared; +}; + +static struct { + raw_spinlock_t lock; + struct its_device *dev; + struct its_vpe **vpes; + int next_victim; +} vpe_proxy; + +struct cpu_lpi_count { + atomic_t managed; + atomic_t unmanaged; +}; + +static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count_ft2500); + +static LIST_HEAD(its_nodes); +static DEFINE_RAW_SPINLOCK(its_lock); +static struct rdists *gic_rdists; +static struct irq_domain *its_parent; + +static unsigned long its_list_map; +static u16 vmovp_seq_num; +static DEFINE_RAW_SPINLOCK(vmovp_lock); + +static DEFINE_IDA(its_vpeid_ida); + +#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) +#define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu)) +#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) +#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) + +/* + * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we + * always have vSGIs mapped. + */ +static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its) +{ + return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]); +} + +static u16 get_its_list(struct its_vm *vm) +{ + struct its_node *its; + unsigned long its_list = 0; + + list_for_each_entry(its, &its_nodes, entry) { + if (!is_v4(its)) + continue; + + if (require_its_list_vmovp(vm, its)) + __set_bit(its->list_nr, &its_list); + } + + return (u16)its_list; +} + +static inline u32 its_get_event_id(struct irq_data *d) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + return d->hwirq - its_dev->event_map.lpi_base; +} + +static struct its_collection *dev_event_to_col(struct its_device *its_dev, + u32 event) +{ + struct its_node *its = its_dev->its; + + return its->collections + its_dev->event_map.col_map[event]; +} + +static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev, + u32 event) +{ + if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis)) + return NULL; + + return &its_dev->event_map.vlpi_maps[event]; +} + +static struct its_vlpi_map *get_vlpi_map(struct irq_data *d) +{ + if (irqd_is_forwarded_to_vcpu(d)) { + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + + return dev_event_to_vlpi_map(its_dev, event); + } + + return NULL; +} + +static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags) +{ + raw_spin_lock_irqsave(&vpe->vpe_lock, *flags); + return vpe->col_idx; +} + +static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags) +{ + raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); +} + +static struct irq_chip its_vpe_irq_chip; + +static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags) +{ + struct its_vpe *vpe = NULL; + int cpu; + + if (d->chip == &its_vpe_irq_chip) { + vpe = irq_data_get_irq_chip_data(d); + } else { + struct its_vlpi_map *map = get_vlpi_map(d); + + if (map) + vpe = map->vpe; + } + + if (vpe) { + cpu = vpe_to_cpuid_lock(vpe, flags); + } else { + /* Physical LPIs are already locked via the irq_desc lock */ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + cpu = its_dev->event_map.col_map[its_get_event_id(d)]; + /* Keep GCC quiet... */ + *flags = 0; + } + + return cpu; +} + +static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags) +{ + struct its_vpe *vpe = NULL; + + if (d->chip == &its_vpe_irq_chip) { + vpe = irq_data_get_irq_chip_data(d); + } else { + struct its_vlpi_map *map = get_vlpi_map(d); + + if (map) + vpe = map->vpe; + } + + if (vpe) + vpe_to_cpuid_unlock(vpe, flags); +} + +static struct its_collection *valid_col(struct its_collection *col) +{ + if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0))) + return NULL; + + return col; +} + +static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe) +{ + if (valid_col(its->collections + vpe->col_idx)) + return vpe; + + return NULL; +} + +/* + * ITS command descriptors - parameters to be encoded in a command + * block. + */ +struct its_cmd_desc { + union { + struct { + struct its_device *dev; + u32 event_id; + } its_inv_cmd; + + struct { + struct its_device *dev; + u32 event_id; + } its_clear_cmd; + + struct { + struct its_device *dev; + u32 event_id; + } its_int_cmd; + + struct { + struct its_device *dev; + int valid; + } its_mapd_cmd; + + struct { + struct its_collection *col; + int valid; + } its_mapc_cmd; + + struct { + struct its_device *dev; + u32 phys_id; + u32 event_id; + } its_mapti_cmd; + + struct { + struct its_device *dev; + struct its_collection *col; + u32 event_id; + } its_movi_cmd; + + struct { + struct its_device *dev; + u32 event_id; + } its_discard_cmd; + + struct { + struct its_collection *col; + } its_invall_cmd; + + struct { + struct its_vpe *vpe; + } its_vinvall_cmd; + + struct { + struct its_vpe *vpe; + struct its_collection *col; + bool valid; + } its_vmapp_cmd; + + struct { + struct its_vpe *vpe; + struct its_device *dev; + u32 virt_id; + u32 event_id; + bool db_enabled; + } its_vmapti_cmd; + + struct { + struct its_vpe *vpe; + struct its_device *dev; + u32 event_id; + bool db_enabled; + } its_vmovi_cmd; + + struct { + struct its_vpe *vpe; + struct its_collection *col; + u16 seq_num; + u16 its_list; + } its_vmovp_cmd; + + struct { + struct its_vpe *vpe; + } its_invdb_cmd; + + struct { + struct its_vpe *vpe; + u8 sgi; + u8 priority; + bool enable; + bool group; + bool clear; + } its_vsgi_cmd; + }; +}; + +/* + * The ITS command block, which is what the ITS actually parses. + */ +struct its_cmd_block { + union { + u64 raw_cmd[4]; + __le64 raw_cmd_le[4]; + }; +}; + +#define ITS_CMD_QUEUE_SZ SZ_64K +#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block)) + +typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *, + struct its_cmd_block *, + struct its_cmd_desc *); + +typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *, + struct its_cmd_block *, + struct its_cmd_desc *); + +static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l) +{ + u64 mask = GENMASK_ULL(h, l); + *raw_cmd &= ~mask; + *raw_cmd |= (val << l) & mask; +} + +static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr) +{ + its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0); +} + +static void its_encode_devid(struct its_cmd_block *cmd, u32 devid) +{ + its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32); +} + +static void its_encode_event_id(struct its_cmd_block *cmd, u32 id) +{ + its_mask_encode(&cmd->raw_cmd[1], id, 31, 0); +} + +static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id) +{ + its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32); +} + +static void its_encode_size(struct its_cmd_block *cmd, u8 size) +{ + its_mask_encode(&cmd->raw_cmd[1], size, 4, 0); +} + +static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr) +{ + its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8); +} + +static void its_encode_valid(struct its_cmd_block *cmd, int valid) +{ + its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63); +} + +static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr) +{ + its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16); +} + +static void its_encode_collection(struct its_cmd_block *cmd, u16 col) +{ + its_mask_encode(&cmd->raw_cmd[2], col, 15, 0); +} + +static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid) +{ + its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32); +} + +static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id) +{ + its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0); +} + +static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id) +{ + its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32); +} + +static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid) +{ + its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0); +} + +static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num) +{ + its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32); +} + +static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list) +{ + its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0); +} + +static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa) +{ + its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16); +} + +static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size) +{ + its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0); +} + +static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa) +{ + its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16); +} + +static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc) +{ + its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8); +} + +static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz) +{ + its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9); +} + +static void its_encode_vmapp_default_db(struct its_cmd_block *cmd, + u32 vpe_db_lpi) +{ + its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0); +} + +static void its_encode_vmovp_default_db(struct its_cmd_block *cmd, + u32 vpe_db_lpi) +{ + its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0); +} + +static void its_encode_db(struct its_cmd_block *cmd, bool db) +{ + its_mask_encode(&cmd->raw_cmd[2], db, 63, 63); +} + +static void its_encode_sgi_intid(struct its_cmd_block *cmd, u8 sgi) +{ + its_mask_encode(&cmd->raw_cmd[0], sgi, 35, 32); +} + +static void its_encode_sgi_priority(struct its_cmd_block *cmd, u8 prio) +{ + its_mask_encode(&cmd->raw_cmd[0], prio >> 4, 23, 20); +} + +static void its_encode_sgi_group(struct its_cmd_block *cmd, bool grp) +{ + its_mask_encode(&cmd->raw_cmd[0], grp, 10, 10); +} + +static void its_encode_sgi_clear(struct its_cmd_block *cmd, bool clr) +{ + its_mask_encode(&cmd->raw_cmd[0], clr, 9, 9); +} + +static void its_encode_sgi_enable(struct its_cmd_block *cmd, bool en) +{ + its_mask_encode(&cmd->raw_cmd[0], en, 8, 8); +} + +static inline void its_fixup_cmd(struct its_cmd_block *cmd) +{ + /* Let's fixup BE commands */ + cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]); + cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]); + cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]); + cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]); +} + +static struct its_collection *its_build_mapd_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + unsigned long itt_addr; + u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites); + + itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt); + itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN); + + its_encode_cmd(cmd, GITS_CMD_MAPD); + its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id); + its_encode_size(cmd, size - 1); + its_encode_itt(cmd, itt_addr); + its_encode_valid(cmd, desc->its_mapd_cmd.valid); + + its_fixup_cmd(cmd); + + return NULL; +} + +static struct its_collection *its_build_mapc_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + its_encode_cmd(cmd, GITS_CMD_MAPC); + its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); + its_encode_target(cmd, desc->its_mapc_cmd.col->target_address); + its_encode_valid(cmd, desc->its_mapc_cmd.valid); + + its_fixup_cmd(cmd); + + return desc->its_mapc_cmd.col; +} + +static struct its_collection *its_build_mapti_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_mapti_cmd.dev, + desc->its_mapti_cmd.event_id); + col->col_id = col->col_id % 64; + + its_encode_cmd(cmd, GITS_CMD_MAPTI); + its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_mapti_cmd.event_id); + its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id); + its_encode_collection(cmd, col->col_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_movi_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_movi_cmd.dev, + desc->its_movi_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_MOVI); + its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_movi_cmd.event_id); + its_encode_collection(cmd, desc->its_movi_cmd.col->col_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_discard_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_discard_cmd.dev, + desc->its_discard_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_DISCARD); + its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_discard_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_inv_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_inv_cmd.dev, + desc->its_inv_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_INV); + its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_inv_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_int_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_int_cmd.dev, + desc->its_int_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_INT); + its_encode_devid(cmd, desc->its_int_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_int_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_clear_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_clear_cmd.dev, + desc->its_clear_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_CLEAR); + its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_clear_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_invall_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + its_encode_cmd(cmd, GITS_CMD_INVALL); + its_encode_collection(cmd, desc->its_invall_cmd.col->col_id); + + its_fixup_cmd(cmd); + + return desc->its_invall_cmd.col; +} + +static struct its_vpe *its_build_vinvall_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + its_encode_cmd(cmd, GITS_CMD_VINVALL); + its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vinvall_cmd.vpe); +} + +static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + unsigned long vpt_addr, vconf_addr; + u64 target; + bool alloc; + + its_encode_cmd(cmd, GITS_CMD_VMAPP); + its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id); + its_encode_valid(cmd, desc->its_vmapp_cmd.valid); + + if (!desc->its_vmapp_cmd.valid) { + if (is_v4_1(its)) { + alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count); + its_encode_alloc(cmd, alloc); + } + + goto out; + } + + vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); + target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset; + + its_encode_target(cmd, target); + its_encode_vpt_addr(cmd, vpt_addr); + its_encode_vpt_size(cmd, LPI_NRBITS - 1); + + if (!is_v4_1(its)) + goto out; + + vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page)); + + alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count); + + its_encode_alloc(cmd, alloc); + + /* + * GICv4.1 provides a way to get the VLPI state, which needs the vPE + * to be unmapped first, and in this case, we may remap the vPE + * back while the VPT is not empty. So we can't assume that the + * VPT is empty on map. This is why we never advertise PTZ. + */ + its_encode_ptz(cmd, false); + its_encode_vconf_addr(cmd, vconf_addr); + its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi); + +out: + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vmapp_cmd.vpe); +} + +static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + u32 db; + + if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled) + db = desc->its_vmapti_cmd.vpe->vpe_db_lpi; + else + db = 1023; + + its_encode_cmd(cmd, GITS_CMD_VMAPTI); + its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id); + its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id); + its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id); + its_encode_db_phys_id(cmd, db); + its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vmapti_cmd.vpe); +} + +static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + u32 db; + + if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled) + db = desc->its_vmovi_cmd.vpe->vpe_db_lpi; + else + db = 1023; + + its_encode_cmd(cmd, GITS_CMD_VMOVI); + its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id); + its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id); + its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id); + its_encode_db_phys_id(cmd, db); + its_encode_db_valid(cmd, true); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vmovi_cmd.vpe); +} + +static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + u64 target; + + target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset; + its_encode_cmd(cmd, GITS_CMD_VMOVP); + its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num); + its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list); + its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id); + its_encode_target(cmd, target); + + if (is_v4_1(its)) { + its_encode_db(cmd, true); + its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi); + } + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vmovp_cmd.vpe); +} + +static struct its_vpe *its_build_vinv_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_vlpi_map *map; + + map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev, + desc->its_inv_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_INV); + its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_inv_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, map->vpe); +} + +static struct its_vpe *its_build_vint_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_vlpi_map *map; + + map = dev_event_to_vlpi_map(desc->its_int_cmd.dev, + desc->its_int_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_INT); + its_encode_devid(cmd, desc->its_int_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_int_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, map->vpe); +} + +static struct its_vpe *its_build_vclear_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_vlpi_map *map; + + map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev, + desc->its_clear_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_CLEAR); + its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_clear_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, map->vpe); +} + +static struct its_vpe *its_build_invdb_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + if (WARN_ON(!is_v4_1(its))) + return NULL; + + its_encode_cmd(cmd, GITS_CMD_INVDB); + its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_invdb_cmd.vpe); +} + +static struct its_vpe *its_build_vsgi_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + if (WARN_ON(!is_v4_1(its))) + return NULL; + + its_encode_cmd(cmd, GITS_CMD_VSGI); + its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id); + its_encode_sgi_intid(cmd, desc->its_vsgi_cmd.sgi); + its_encode_sgi_priority(cmd, desc->its_vsgi_cmd.priority); + its_encode_sgi_group(cmd, desc->its_vsgi_cmd.group); + its_encode_sgi_clear(cmd, desc->its_vsgi_cmd.clear); + its_encode_sgi_enable(cmd, desc->its_vsgi_cmd.enable); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vsgi_cmd.vpe); +} + +static u64 its_cmd_ptr_to_offset(struct its_node *its, + struct its_cmd_block *ptr) +{ + return (ptr - its->cmd_base) * sizeof(*ptr); +} + +static int its_queue_full(struct its_node *its) +{ + int widx; + int ridx; + + widx = its->cmd_write - its->cmd_base; + ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block); + + /* This is incredibly unlikely to happen, unless the ITS locks up. */ + if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx) + return 1; + + return 0; +} + +static struct its_cmd_block *its_allocate_entry(struct its_node *its) +{ + struct its_cmd_block *cmd; + u32 count = 1000000; /* 1s! */ + + while (its_queue_full(its)) { + count--; + if (!count) { + pr_err_ratelimited("ITS queue not draining\n"); + return NULL; + } + cpu_relax(); + udelay(1); + } + + cmd = its->cmd_write++; + + /* Handle queue wrapping */ + if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES)) + its->cmd_write = its->cmd_base; + + /* Clear command */ + cmd->raw_cmd[0] = 0; + cmd->raw_cmd[1] = 0; + cmd->raw_cmd[2] = 0; + cmd->raw_cmd[3] = 0; + + return cmd; +} + +static struct its_cmd_block *its_post_commands(struct its_node *its) +{ + u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write); + + writel_relaxed(wr, its->base + GITS_CWRITER); + + return its->cmd_write; +} + +static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) +{ + /* + * Make sure the commands written to memory are observable by + * the ITS. + */ + if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING) + gic_flush_dcache_to_poc(cmd, sizeof(*cmd)); + else + dsb(ishst); +} + +static int its_wait_for_range_completion(struct its_node *its, + u64 prev_idx, + struct its_cmd_block *to) +{ + u64 rd_idx, to_idx, linear_idx; + u32 count = 1000000; /* 1s! */ + + /* Linearize to_idx if the command set has wrapped around */ + to_idx = its_cmd_ptr_to_offset(its, to); + if (to_idx < prev_idx) + to_idx += ITS_CMD_QUEUE_SZ; + + linear_idx = prev_idx; + + while (1) { + s64 delta; + + rd_idx = readl_relaxed(its->base + GITS_CREADR); + + /* + * Compute the read pointer progress, taking the + * potential wrap-around into account. + */ + delta = rd_idx - prev_idx; + if (rd_idx < prev_idx) + delta += ITS_CMD_QUEUE_SZ; + + linear_idx += delta; + if (linear_idx >= to_idx) + break; + + count--; + if (!count) { + pr_err_ratelimited("ITS queue timeout (%llu %llu)\n", + to_idx, linear_idx); + return -1; + } + prev_idx = rd_idx; + cpu_relax(); + udelay(1); + } + + return 0; +} + +/* Warning, macro hell follows */ +#define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \ +void name(struct its_node *its, \ + buildtype builder, \ + struct its_cmd_desc *desc) \ +{ \ + struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \ + synctype *sync_obj; \ + unsigned long flags; \ + u64 rd_idx; \ + \ + raw_spin_lock_irqsave(&its->lock, flags); \ + \ + cmd = its_allocate_entry(its); \ + if (!cmd) { /* We're soooooo screewed... */ \ + raw_spin_unlock_irqrestore(&its->lock, flags); \ + return; \ + } \ + sync_obj = builder(its, cmd, desc); \ + its_flush_cmd(its, cmd); \ + \ + if (sync_obj) { \ + sync_cmd = its_allocate_entry(its); \ + if (!sync_cmd) \ + goto post; \ + \ + buildfn(its, sync_cmd, sync_obj); \ + its_flush_cmd(its, sync_cmd); \ + } \ + \ +post: \ + rd_idx = readl_relaxed(its->base + GITS_CREADR); \ + next_cmd = its_post_commands(its); \ + raw_spin_unlock_irqrestore(&its->lock, flags); \ + \ + if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \ + pr_err_ratelimited("ITS cmd %ps failed\n", builder); \ +} + +static void its_build_sync_cmd(struct its_node *its, + struct its_cmd_block *sync_cmd, + struct its_collection *sync_col) +{ + its_encode_cmd(sync_cmd, GITS_CMD_SYNC); + its_encode_target(sync_cmd, sync_col->target_address); + + its_fixup_cmd(sync_cmd); +} + +static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t, + struct its_collection, its_build_sync_cmd) + +static void its_build_vsync_cmd(struct its_node *its, + struct its_cmd_block *sync_cmd, + struct its_vpe *sync_vpe) +{ + its_encode_cmd(sync_cmd, GITS_CMD_VSYNC); + its_encode_vpeid(sync_cmd, sync_vpe->vpe_id); + + its_fixup_cmd(sync_cmd); +} + +static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t, + struct its_vpe, its_build_vsync_cmd) + +static void its_send_int(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + desc.its_int_cmd.dev = dev; + desc.its_int_cmd.event_id = event_id; + + its_send_single_command(dev->its, its_build_int_cmd, &desc); +} + +static void its_send_clear(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + desc.its_clear_cmd.dev = dev; + desc.its_clear_cmd.event_id = event_id; + + its_send_single_command(dev->its, its_build_clear_cmd, &desc); +} + +static void its_send_inv(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + desc.its_inv_cmd.dev = dev; + desc.its_inv_cmd.event_id = event_id; + + its_send_single_command(dev->its, its_build_inv_cmd, &desc); +} + +static void its_send_mapd(struct its_device *dev, int valid) +{ + struct its_cmd_desc desc; + + desc.its_mapd_cmd.dev = dev; + desc.its_mapd_cmd.valid = !!valid; + + its_send_single_command(dev->its, its_build_mapd_cmd, &desc); +} + +static void its_send_mapc(struct its_node *its, struct its_collection *col, + int valid) +{ + struct its_cmd_desc desc; + + desc.its_mapc_cmd.col = col; + desc.its_mapc_cmd.valid = !!valid; + + its_send_single_command(its, its_build_mapc_cmd, &desc); +} + +static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id) +{ + struct its_cmd_desc desc; + + desc.its_mapti_cmd.dev = dev; + desc.its_mapti_cmd.phys_id = irq_id; + desc.its_mapti_cmd.event_id = id; + + its_send_single_command(dev->its, its_build_mapti_cmd, &desc); +} + +static void its_send_movi(struct its_device *dev, + struct its_collection *col, u32 id) +{ + struct its_cmd_desc desc; + + desc.its_movi_cmd.dev = dev; + desc.its_movi_cmd.col = col; + desc.its_movi_cmd.event_id = id; + + its_send_single_command(dev->its, its_build_movi_cmd, &desc); +} + +static void its_send_discard(struct its_device *dev, u32 id) +{ + struct its_cmd_desc desc; + + desc.its_discard_cmd.dev = dev; + desc.its_discard_cmd.event_id = id; + + its_send_single_command(dev->its, its_build_discard_cmd, &desc); +} + +static void its_send_invall(struct its_node *its, struct its_collection *col) +{ + struct its_cmd_desc desc; + + desc.its_invall_cmd.col = col; + + its_send_single_command(its, its_build_invall_cmd, &desc); +} + +static void its_send_vmapti(struct its_device *dev, u32 id) +{ + struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id); + struct its_cmd_desc desc; + + desc.its_vmapti_cmd.vpe = map->vpe; + desc.its_vmapti_cmd.dev = dev; + desc.its_vmapti_cmd.virt_id = map->vintid; + desc.its_vmapti_cmd.event_id = id; + desc.its_vmapti_cmd.db_enabled = map->db_enabled; + + its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc); +} + +static void its_send_vmovi(struct its_device *dev, u32 id) +{ + struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id); + struct its_cmd_desc desc; + + desc.its_vmovi_cmd.vpe = map->vpe; + desc.its_vmovi_cmd.dev = dev; + desc.its_vmovi_cmd.event_id = id; + desc.its_vmovi_cmd.db_enabled = map->db_enabled; + + its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc); +} + +static void its_send_vmapp(struct its_node *its, + struct its_vpe *vpe, bool valid) +{ + struct its_cmd_desc desc; + + desc.its_vmapp_cmd.vpe = vpe; + desc.its_vmapp_cmd.valid = valid; + desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx]; + + its_send_single_vcommand(its, its_build_vmapp_cmd, &desc); +} + +static void its_send_vmovp(struct its_vpe *vpe) +{ + struct its_cmd_desc desc = {}; + struct its_node *its; + unsigned long flags; + int col_id = vpe->col_idx; + + desc.its_vmovp_cmd.vpe = vpe; + + if (!its_list_map) { + its = list_first_entry(&its_nodes, struct its_node, entry); + desc.its_vmovp_cmd.col = &its->collections[col_id]; + its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); + return; + } + + /* + * Yet another marvel of the architecture. If using the + * its_list "feature", we need to make sure that all ITSs + * receive all VMOVP commands in the same order. The only way + * to guarantee this is to make vmovp a serialization point. + * + * Wall <-- Head. + */ + raw_spin_lock_irqsave(&vmovp_lock, flags); + + desc.its_vmovp_cmd.seq_num = vmovp_seq_num++; + desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm); + + /* Emit VMOVPs */ + list_for_each_entry(its, &its_nodes, entry) { + if (!is_v4(its)) + continue; + + if (!require_its_list_vmovp(vpe->its_vm, its)) + continue; + + desc.its_vmovp_cmd.col = &its->collections[col_id]; + its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); + } + + raw_spin_unlock_irqrestore(&vmovp_lock, flags); +} + +static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe) +{ + struct its_cmd_desc desc; + + desc.its_vinvall_cmd.vpe = vpe; + its_send_single_vcommand(its, its_build_vinvall_cmd, &desc); +} + +static void its_send_vinv(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + /* + * There is no real VINV command. This is just a normal INV, + * with a VSYNC instead of a SYNC. + */ + desc.its_inv_cmd.dev = dev; + desc.its_inv_cmd.event_id = event_id; + + its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc); +} + +static void its_send_vint(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + /* + * There is no real VINT command. This is just a normal INT, + * with a VSYNC instead of a SYNC. + */ + desc.its_int_cmd.dev = dev; + desc.its_int_cmd.event_id = event_id; + + its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc); +} + +static void its_send_vclear(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + /* + * There is no real VCLEAR command. This is just a normal CLEAR, + * with a VSYNC instead of a SYNC. + */ + desc.its_clear_cmd.dev = dev; + desc.its_clear_cmd.event_id = event_id; + + its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc); +} + +static void its_send_invdb(struct its_node *its, struct its_vpe *vpe) +{ + struct its_cmd_desc desc; + + desc.its_invdb_cmd.vpe = vpe; + its_send_single_vcommand(its, its_build_invdb_cmd, &desc); +} + +/* + * irqchip functions - assumes MSI, mostly. + */ +static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) +{ + struct its_vlpi_map *map = get_vlpi_map(d); + irq_hw_number_t hwirq; + void *va; + u8 *cfg; + + if (map) { + va = page_address(map->vm->vprop_page); + hwirq = map->vintid; + + /* Remember the updated property */ + map->properties &= ~clr; + map->properties |= set | LPI_PROP_GROUP1; + } else { + va = gic_rdists->prop_table_va; + hwirq = d->hwirq; + } + + cfg = va + hwirq - 8192; + *cfg &= ~clr; + *cfg |= set | LPI_PROP_GROUP1; + + /* + * Make the above write visible to the redistributors. + * And yes, we're flushing exactly: One. Single. Byte. + * Humpf... + */ + if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING) + gic_flush_dcache_to_poc(cfg, sizeof(*cfg)); + else + dsb(ishst); +} + +static void wait_for_syncr(void __iomem *rdbase) +{ + while (readl_relaxed(rdbase + GICR_SYNCR) & 1) + cpu_relax(); +} + +static void __direct_lpi_inv(struct irq_data *d, u64 val) +{ + void __iomem *rdbase; + unsigned long flags; + int cpu; + + /* Target the redistributor this LPI is currently routed to */ + cpu = irq_to_cpuid_lock(d, &flags); + raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); + + rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; + gic_write_lpir(val, rdbase + GICR_INVLPIR); + wait_for_syncr(rdbase); + + raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); + irq_to_cpuid_unlock(d, flags); +} + +static void direct_lpi_inv(struct irq_data *d) +{ + struct its_vlpi_map *map = get_vlpi_map(d); + u64 val; + + if (map) { + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + WARN_ON(!is_v4_1(its_dev->its)); + + val = GICR_INVLPIR_V; + val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id); + val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid); + } else { + val = d->hwirq; + } + + __direct_lpi_inv(d, val); +} + +static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + lpi_write_config(d, clr, set); + if (gic_rdists->has_direct_lpi && + (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d))) + direct_lpi_inv(d); + else if (!irqd_is_forwarded_to_vcpu(d)) + its_send_inv(its_dev, its_get_event_id(d)); + else + its_send_vinv(its_dev, its_get_event_id(d)); +} + +static void its_vlpi_set_doorbell(struct irq_data *d, bool enable) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + struct its_vlpi_map *map; + + /* + * GICv4.1 does away with the per-LPI nonsense, nothing to do + * here. + */ + if (is_v4_1(its_dev->its)) + return; + + map = dev_event_to_vlpi_map(its_dev, event); + + if (map->db_enabled == enable) + return; + + map->db_enabled = enable; + + /* + * More fun with the architecture: + * + * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI + * value or to 1023, depending on the enable bit. But that + * would be issuing a mapping for an /existing/ DevID+EventID + * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI + * to the /same/ vPE, using this opportunity to adjust the + * doorbell. Mouahahahaha. We loves it, Precious. + */ + its_send_vmovi(its_dev, event); +} + +static void its_mask_irq(struct irq_data *d) +{ + if (irqd_is_forwarded_to_vcpu(d)) + its_vlpi_set_doorbell(d, false); + + lpi_update_config(d, LPI_PROP_ENABLED, 0); +} + +static void its_unmask_irq(struct irq_data *d) +{ + if (irqd_is_forwarded_to_vcpu(d)) + its_vlpi_set_doorbell(d, true); + + lpi_update_config(d, 0, LPI_PROP_ENABLED); +} + +static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + return atomic_read(&per_cpu_ptr(&cpu_lpi_count_ft2500, cpu)->managed); + + return atomic_read(&per_cpu_ptr(&cpu_lpi_count_ft2500, cpu)->unmanaged); +} + +static void its_inc_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + atomic_inc(&per_cpu_ptr(&cpu_lpi_count_ft2500, cpu)->managed); + else + atomic_inc(&per_cpu_ptr(&cpu_lpi_count_ft2500, cpu)->unmanaged); +} + +static void its_dec_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + atomic_dec(&per_cpu_ptr(&cpu_lpi_count_ft2500, cpu)->managed); + else + atomic_dec(&per_cpu_ptr(&cpu_lpi_count_ft2500, cpu)->unmanaged); +} + +static unsigned int cpumask_pick_least_loaded(struct irq_data *d, + const struct cpumask *cpu_mask) +{ + unsigned int cpu = nr_cpu_ids, tmp; + int count = S32_MAX; + + for_each_cpu(tmp, cpu_mask) { + int this_count = its_read_lpi_count(d, tmp); + + if (this_count < count) { + cpu = tmp; + count = this_count; + } + } + + return cpu; +} + +/* + * As suggested by Thomas Gleixner in: + * https://lore.kernel.org/r/87h80q2aoc.fsf@nanos.tec.linutronix.de + */ +static int its_select_cpu(struct irq_data *d, + const struct cpumask *aff_mask) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + static DEFINE_RAW_SPINLOCK(tmpmask_lock); + static struct cpumask __tmpmask; + struct cpumask *tmpmask; + unsigned long flags; + int cpu, node; + + node = its_dev->its->numa_node; + tmpmask = &__tmpmask; + + raw_spin_lock_irqsave(&tmpmask_lock, flags); + + if (!irqd_affinity_is_managed(d)) { + /* First try the NUMA node */ + if (node != NUMA_NO_NODE) { + /* + * Try the intersection of the affinity mask and the + * node mask (and the online mask, just to be safe). + */ + cpumask_and(tmpmask, cpumask_of_node(node), aff_mask); + cpumask_and(tmpmask, tmpmask, cpu_online_mask); + + /* + * Ideally, we would check if the mask is empty, and + * try again on the full node here. + * + * But it turns out that the way ACPI describes the + * affinity for ITSs only deals about memory, and + * not target CPUs, so it cannot describe a single + * ITS placed next to two NUMA nodes. + * + * Instead, just fallback on the online mask. This + * diverges from Thomas' suggestion above. + */ + cpu = cpumask_pick_least_loaded(d, tmpmask); + if (cpu < nr_cpu_ids) + goto out; + + /* If we can't cross sockets, give up */ + if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)) + goto out; + + /* If the above failed, expand the search */ + } + + /* Try the intersection of the affinity and online masks */ + cpumask_and(tmpmask, aff_mask, cpu_online_mask); + + /* If that doesn't fly, the online mask is the last resort */ + if (cpumask_empty(tmpmask)) + cpumask_copy(tmpmask, cpu_online_mask); + + cpu = cpumask_pick_least_loaded(d, tmpmask); + } else { + cpumask_copy(tmpmask, aff_mask); + + /* If we cannot cross sockets, limit the search to that node */ + if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) && + node != NUMA_NO_NODE) + cpumask_and(tmpmask, tmpmask, cpumask_of_node(node)); + + cpu = cpumask_pick_least_loaded(d, tmpmask); + } +out: + raw_spin_unlock_irqrestore(&tmpmask_lock, flags); + + pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu); + return cpu; +} + +#define MAX_MARS3_SKT_COUNT 8 + +static int its_cpumask_select(struct its_device *its_dev, + const struct cpumask *mask_val, + const struct cpumask *cpu_mask) +{ + unsigned int skt, skt_id, i; + phys_addr_t its_phys_base; + unsigned int cpu, cpus = 0; + + unsigned int skt_cpu_cnt[MAX_MARS3_SKT_COUNT] = {0}; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SKT_COUNT)) + skt_cpu_cnt[skt]++; + else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + + its_phys_base = its_dev->its->phys_base; + skt_id = (its_phys_base >> 41) & 0x7; + + if (skt_id != 0) { + for (i = 0; i < skt_id; i++) + cpus += skt_cpu_cnt[i]; + } + + cpu = cpumask_any_and(mask_val, cpu_mask); + cpus = cpus + cpu % skt_cpu_cnt[skt_id]; + + return cpus; +} + +static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, + bool force) +{ + unsigned int cpu; + const struct cpumask *cpu_mask = cpu_online_mask; + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_collection *target_col; + u32 id = its_get_event_id(d); + int prev_cpu; + unsigned int skt_t1, skt_t2, cpu_idx; + + /* A forwarded interrupt should use irq_set_vcpu_affinity */ + if (irqd_is_forwarded_to_vcpu(d)) + return -EINVAL; + + prev_cpu = its_dev->event_map.col_map[id]; + its_dec_lpi_count(d, prev_cpu); + + cpu_idx = its_cpumask_select(its_dev, mask_val, cpu_mask); + skt_t1 = (cpu_logical_map(cpu_idx) >> 16) & 0xff; + if (!force) + cpu = its_select_cpu(d, mask_val); + else + cpu = cpumask_pick_least_loaded(d, mask_val); + skt_t2 = (cpu_logical_map(cpu) >> 16) & 0xff; + if (skt_t1 != skt_t2) + cpu = cpu_idx; + + if (cpu < 0 || cpu >= nr_cpu_ids) + goto err; + + /* don't set the affinity when the target cpu is same as current one */ + if (cpu != prev_cpu) { + target_col = &its_dev->its->collections[cpu]; + its_send_movi(its_dev, target_col, id); + its_dev->event_map.col_map[id] = cpu; + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + } + + its_inc_lpi_count(d, cpu); + + return IRQ_SET_MASK_OK_DONE; + +err: + its_inc_lpi_count(d, prev_cpu); + return -EINVAL; +} + +static u64 its_irq_get_msi_base(struct its_device *its_dev) +{ + struct its_node *its = its_dev->its; + + return its->phys_base + GITS_TRANSLATER; +} + +static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_node *its; + u64 addr; + + its = its_dev->its; + addr = its->get_msi_base(its_dev); + + msg->address_lo = lower_32_bits(addr); + msg->address_hi = upper_32_bits(addr); + msg->data = its_get_event_id(d); +} + +static int its_irq_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool state) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + if (irqd_is_forwarded_to_vcpu(d)) { + if (state) + its_send_vint(its_dev, event); + else + its_send_vclear(its_dev, event); + } else { + if (state) + its_send_int(its_dev, event); + else + its_send_clear(its_dev, event); + } + + return 0; +} + +static int its_irq_retrigger(struct irq_data *d) +{ + return !its_irq_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true); +} + +/* + * Two favourable cases: + * + * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times + * for vSGI delivery + * + * (b) Or the ITSs do not use a list map, meaning that VMOVP is cheap enough + * and we're better off mapping all VPEs always + * + * If neither (a) nor (b) is true, then we map vPEs on demand. + * + */ +static bool gic_requires_eager_mapping(void) +{ + if (!its_list_map || gic_rdists->has_rvpeid) + return true; + + return false; +} + +static void its_map_vm(struct its_node *its, struct its_vm *vm) +{ + unsigned long flags; + + if (gic_requires_eager_mapping()) + return; + + raw_spin_lock_irqsave(&vmovp_lock, flags); + + /* + * If the VM wasn't mapped yet, iterate over the vpes and get + * them mapped now. + */ + vm->vlpi_count[its->list_nr]++; + + if (vm->vlpi_count[its->list_nr] == 1) { + int i; + + for (i = 0; i < vm->nr_vpes; i++) { + struct its_vpe *vpe = vm->vpes[i]; + struct irq_data *d = irq_get_irq_data(vpe->irq); + + /* Map the VPE to the first possible CPU */ + vpe->col_idx = cpumask_first(cpu_online_mask); + its_send_vmapp(its, vpe, true); + its_send_vinvall(its, vpe); + irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); + } + } + + raw_spin_unlock_irqrestore(&vmovp_lock, flags); +} + +static void its_unmap_vm(struct its_node *its, struct its_vm *vm) +{ + unsigned long flags; + + /* Not using the ITS list? Everything is always mapped. */ + if (gic_requires_eager_mapping()) + return; + + raw_spin_lock_irqsave(&vmovp_lock, flags); + + if (!--vm->vlpi_count[its->list_nr]) { + int i; + + for (i = 0; i < vm->nr_vpes; i++) + its_send_vmapp(its, vm->vpes[i], false); + } + + raw_spin_unlock_irqrestore(&vmovp_lock, flags); +} + +static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + int ret = 0; + + if (!info->map) + return -EINVAL; + + raw_spin_lock(&its_dev->event_map.vlpi_lock); + + if (!its_dev->event_map.vm) { + struct its_vlpi_map *maps; + + maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps), + GFP_ATOMIC); + if (!maps) { + ret = -ENOMEM; + goto out; + } + + its_dev->event_map.vm = info->map->vm; + its_dev->event_map.vlpi_maps = maps; + } else if (its_dev->event_map.vm != info->map->vm) { + ret = -EINVAL; + goto out; + } + + /* Get our private copy of the mapping information */ + its_dev->event_map.vlpi_maps[event] = *info->map; + + if (irqd_is_forwarded_to_vcpu(d)) { + /* Already mapped, move it around */ + its_send_vmovi(its_dev, event); + } else { + /* Ensure all the VPEs are mapped on this ITS */ + its_map_vm(its_dev->its, info->map->vm); + + /* + * Flag the interrupt as forwarded so that we can + * start poking the virtual property table. + */ + irqd_set_forwarded_to_vcpu(d); + + /* Write out the property to the prop table */ + lpi_write_config(d, 0xff, info->map->properties); + + /* Drop the physical mapping */ + its_send_discard(its_dev, event); + + /* and install the virtual one */ + its_send_vmapti(its_dev, event); + + /* Increment the number of VLPIs */ + its_dev->event_map.nr_vlpis++; + } + +out: + raw_spin_unlock(&its_dev->event_map.vlpi_lock); + return ret; +} + +static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_vlpi_map *map; + int ret = 0; + + raw_spin_lock(&its_dev->event_map.vlpi_lock); + + map = get_vlpi_map(d); + + if (!its_dev->event_map.vm || !map) { + ret = -EINVAL; + goto out; + } + + /* Copy our mapping information to the incoming request */ + *info->map = *map; + +out: + raw_spin_unlock(&its_dev->event_map.vlpi_lock); + return ret; +} + +static int its_vlpi_unmap(struct irq_data *d) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + int ret = 0; + + raw_spin_lock(&its_dev->event_map.vlpi_lock); + + if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) { + ret = -EINVAL; + goto out; + } + + /* Drop the virtual mapping */ + its_send_discard(its_dev, event); + + /* and restore the physical one */ + irqd_clr_forwarded_to_vcpu(d); + its_send_mapti(its_dev, d->hwirq, event); + lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO | + LPI_PROP_ENABLED | + LPI_PROP_GROUP1)); + + /* Potentially unmap the VM from this ITS */ + its_unmap_vm(its_dev->its, its_dev->event_map.vm); + + /* + * Drop the refcount and make the device available again if + * this was the last VLPI. + */ + if (!--its_dev->event_map.nr_vlpis) { + its_dev->event_map.vm = NULL; + kfree(its_dev->event_map.vlpi_maps); + } + +out: + raw_spin_unlock(&its_dev->event_map.vlpi_lock); + return ret; +} + +static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) + return -EINVAL; + + if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI) + lpi_update_config(d, 0xff, info->config); + else + lpi_write_config(d, 0xff, info->config); + its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED)); + + return 0; +} + +static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + /* Need a v4 ITS */ + if (!is_v4(its_dev->its)) + return -EINVAL; + + /* Unmap request? */ + if (!info) + return its_vlpi_unmap(d); + + switch (info->cmd_type) { + case MAP_VLPI: + return its_vlpi_map(d, info); + + case GET_VLPI: + return its_vlpi_get(d, info); + + case PROP_UPDATE_VLPI: + case PROP_UPDATE_AND_INV_VLPI: + return its_vlpi_prop_update(d, info); + + default: + return -EINVAL; + } +} + +static struct irq_chip its_irq_chip = { + .name = "ITS", + .irq_mask = its_mask_irq, + .irq_unmask = its_unmask_irq, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = its_set_affinity, + .irq_compose_msi_msg = its_irq_compose_msi_msg, + .irq_set_irqchip_state = its_irq_set_irqchip_state, + .irq_retrigger = its_irq_retrigger, + .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity, +}; + + +/* + * How we allocate LPIs: + * + * lpi_range_list contains ranges of LPIs that are to available to + * allocate from. To allocate LPIs, just pick the first range that + * fits the required allocation, and reduce it by the required + * amount. Once empty, remove the range from the list. + * + * To free a range of LPIs, add a free range to the list, sort it and + * merge the result if the new range happens to be adjacent to an + * already free block. + * + * The consequence of the above is that allocation is cost is low, but + * freeing is expensive. We assumes that freeing rarely occurs. + */ +#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ + +static DEFINE_MUTEX(lpi_range_lock); +static LIST_HEAD(lpi_range_list); + +struct lpi_range { + struct list_head entry; + u32 base_id; + u32 span; +}; + +static struct lpi_range *mk_lpi_range(u32 base, u32 span) +{ + struct lpi_range *range; + + range = kmalloc(sizeof(*range), GFP_KERNEL); + if (range) { + range->base_id = base; + range->span = span; + } + + return range; +} + +static int alloc_lpi_range(u32 nr_lpis, u32 *base) +{ + struct lpi_range *range, *tmp; + int err = -ENOSPC; + + mutex_lock(&lpi_range_lock); + + list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) { + if (range->span >= nr_lpis) { + *base = range->base_id; + range->base_id += nr_lpis; + range->span -= nr_lpis; + + if (range->span == 0) { + list_del(&range->entry); + kfree(range); + } + + err = 0; + break; + } + } + + mutex_unlock(&lpi_range_lock); + + pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis); + return err; +} + +static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b) +{ + if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list) + return; + if (a->base_id + a->span != b->base_id) + return; + b->base_id = a->base_id; + b->span += a->span; + list_del(&a->entry); + kfree(a); +} + +static int free_lpi_range(u32 base, u32 nr_lpis) +{ + struct lpi_range *new, *old; + + new = mk_lpi_range(base, nr_lpis); + if (!new) + return -ENOMEM; + + mutex_lock(&lpi_range_lock); + + list_for_each_entry_reverse(old, &lpi_range_list, entry) { + if (old->base_id < base) + break; + } + /* + * old is the last element with ->base_id smaller than base, + * so new goes right after it. If there are no elements with + * ->base_id smaller than base, &old->entry ends up pointing + * at the head of the list, and inserting new it the start of + * the list is the right thing to do in that case as well. + */ + list_add(&new->entry, &old->entry); + /* + * Now check if we can merge with the preceding and/or + * following ranges. + */ + merge_lpi_ranges(old, new); + merge_lpi_ranges(new, list_next_entry(new, entry)); + + mutex_unlock(&lpi_range_lock); + return 0; +} + +static int __init its_lpi_init(u32 id_bits) +{ + u32 lpis = (1UL << id_bits) - 8192; + u32 numlpis; + int err; + + numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer); + + if (numlpis > 2 && !WARN_ON(numlpis > lpis)) { + lpis = numlpis; + pr_info("ITS: Using hypervisor restricted LPI range [%u]\n", + lpis); + } + + /* + * Initializing the allocator is just the same as freeing the + * full range of LPIs. + */ + err = free_lpi_range(8192, lpis); + pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis); + return err; +} + +static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids) +{ + unsigned long *bitmap = NULL; + int err = 0; + + do { + err = alloc_lpi_range(nr_irqs, base); + if (!err) + break; + + nr_irqs /= 2; + } while (nr_irqs > 0); + + if (!nr_irqs) + err = -ENOSPC; + + if (err) + goto out; + + bitmap = bitmap_zalloc(nr_irqs, GFP_ATOMIC); + if (!bitmap) + goto out; + + *nr_ids = nr_irqs; + +out: + if (!bitmap) + *base = *nr_ids = 0; + + return bitmap; +} + +static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids) +{ + WARN_ON(free_lpi_range(base, nr_ids)); + bitmap_free(bitmap); +} + +static void gic_reset_prop_table(void *va) +{ + /* Priority 0xa0, Group-1, disabled */ + memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ); + + /* Make sure the GIC will observe the written configuration */ + gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ); +} + +static struct page *its_allocate_prop_table(gfp_t gfp_flags) +{ + struct page *prop_page; + + prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ)); + if (!prop_page) + return NULL; + + gic_reset_prop_table(page_address(prop_page)); + + return prop_page; +} + +static void its_free_prop_table(struct page *prop_page) +{ + free_pages((unsigned long)page_address(prop_page), + get_order(LPI_PROPBASE_SZ)); +} + +static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size) +{ + phys_addr_t start, end, addr_end; + u64 i; + + /* + * We don't bother checking for a kdump kernel as by + * construction, the LPI tables are out of this kernel's + * memory map. + */ + if (is_kdump_kernel()) + return true; + + addr_end = addr + size - 1; + + for_each_reserved_mem_range(i, &start, &end) { + if (addr >= start && addr_end <= end) + return true; + } + + /* Not found, not a good sign... */ + pr_warn("GIC-2500: Expected reserved range [%pa:%pa], not found\n", + &addr, &addr_end); + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + return false; +} + +static int gic_reserve_range(phys_addr_t addr, unsigned long size) +{ + if (efi_enabled(EFI_CONFIG_TABLES)) + return efi_mem_reserve_persistent(addr, size); + + return 0; +} + +static int __init its_setup_lpi_prop_table(void) +{ + if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) { + u64 val; + + val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); + lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1; + + gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12); + gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa, + LPI_PROPBASE_SZ, + MEMREMAP_WB); + gic_reset_prop_table(gic_rdists->prop_table_va); + } else { + struct page *page; + + lpi_id_bits = min_t(u32, + GICD_TYPER_ID_BITS(gic_rdists->gicd_typer), + ITS_MAX_LPI_NRBITS); + page = its_allocate_prop_table(GFP_NOWAIT); + if (!page) { + pr_err("Failed to allocate PROPBASE\n"); + return -ENOMEM; + } + + gic_rdists->prop_table_pa = page_to_phys(page); + gic_rdists->prop_table_va = page_address(page); + WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa, + LPI_PROPBASE_SZ)); + } + + pr_info("GIC-2500: using LPI property table @%pa\n", + &gic_rdists->prop_table_pa); + + return its_lpi_init(lpi_id_bits); +} + +static const char * const its_base_type_string[] = { + [GITS_BASER_TYPE_DEVICE] = "Devices", + [GITS_BASER_TYPE_VCPU] = "Virtual CPUs", + [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)", + [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections", + [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)", + [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)", + [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)", +}; + +static u64 its_read_baser(struct its_node *its, struct its_baser *baser) +{ + u32 idx = baser - its->tables; + + return gits_read_baser(its->base + GITS_BASER + (idx << 3)); +} + +static void its_write_baser(struct its_node *its, struct its_baser *baser, + u64 val) +{ + u32 idx = baser - its->tables; + + gits_write_baser(val, its->base + GITS_BASER + (idx << 3)); + baser->val = its_read_baser(its, baser); +} + +static int its_setup_baser(struct its_node *its, struct its_baser *baser, + u64 cache, u64 shr, u32 order, bool indirect) +{ + u64 val = its_read_baser(its, baser); + u64 esz = GITS_BASER_ENTRY_SIZE(val); + u64 type = GITS_BASER_TYPE(val); + u64 baser_phys, tmp; + u32 alloc_pages, psz; + struct page *page; + void *base; + + psz = baser->psz; + alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); + if (alloc_pages > GITS_BASER_PAGES_MAX) { + pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n", + &its->phys_base, its_base_type_string[type], + alloc_pages, GITS_BASER_PAGES_MAX); + alloc_pages = GITS_BASER_PAGES_MAX; + order = get_order(GITS_BASER_PAGES_MAX * psz); + } + + page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order); + if (!page) + return -ENOMEM; + + base = (void *)page_address(page); + baser_phys = virt_to_phys(base); + + /* Check if the physical address of the memory is above 48bits */ + if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) { + + /* 52bit PA is supported only when PageSize=64K */ + if (psz != SZ_64K) { + pr_err("ITS: no 52bit PA support when psz=%d\n", psz); + free_pages((unsigned long)base, order); + return -ENXIO; + } + + /* Convert 52bit PA to 48bit field */ + baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys); + } + +retry_baser: + val = (baser_phys | + (type << GITS_BASER_TYPE_SHIFT) | + ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | + ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) | + cache | + shr | + GITS_BASER_VALID); + + val |= indirect ? GITS_BASER_INDIRECT : 0x0; + + switch (psz) { + case SZ_4K: + val |= GITS_BASER_PAGE_SIZE_4K; + break; + case SZ_16K: + val |= GITS_BASER_PAGE_SIZE_16K; + break; + case SZ_64K: + val |= GITS_BASER_PAGE_SIZE_64K; + break; + } + + its_write_baser(its, baser, val); + tmp = baser->val; + + if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE) + tmp &= ~GITS_BASER_SHAREABILITY_MASK; + + if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) { + /* + * Shareability didn't stick. Just use + * whatever the read reported, which is likely + * to be the only thing this redistributor + * supports. If that's zero, make it + * non-cacheable as well. + */ + shr = tmp & GITS_BASER_SHAREABILITY_MASK; + if (!shr) { + cache = GITS_BASER_nC; + gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order)); + } + goto retry_baser; + } + + if (val != tmp) { + pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n", + &its->phys_base, its_base_type_string[type], + val, tmp); + free_pages((unsigned long)base, order); + return -ENXIO; + } + + baser->order = order; + baser->base = base; + baser->psz = psz; + tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz; + + pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n", + &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp), + its_base_type_string[type], + (unsigned long)virt_to_phys(base), + indirect ? "indirect" : "flat", (int)esz, + psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); + + return 0; +} + +static bool its_parse_indirect_baser(struct its_node *its, + struct its_baser *baser, + u32 *order, u32 ids) +{ + u64 tmp = its_read_baser(its, baser); + u64 type = GITS_BASER_TYPE(tmp); + u64 esz = GITS_BASER_ENTRY_SIZE(tmp); + u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb; + u32 new_order = *order; + u32 psz = baser->psz; + bool indirect = false; + + /* No need to enable Indirection if memory requirement < (psz*2)bytes */ + if ((esz << ids) > (psz * 2)) { + /* + * Find out whether hw supports a single or two-level table by + * table by reading bit at offset '62' after writing '1' to it. + */ + its_write_baser(its, baser, val | GITS_BASER_INDIRECT); + indirect = !!(baser->val & GITS_BASER_INDIRECT); + + if (indirect) { + /* + * The size of the lvl2 table is equal to ITS page size + * which is 'psz'. For computing lvl1 table size, + * subtract ID bits that sparse lvl2 table from 'ids' + * which is reported by ITS hardware times lvl1 table + * entry size. + */ + ids -= ilog2(psz / (int)esz); + esz = GITS_LVL1_ENTRY_SIZE; + } + } + + /* + * Allocate as many entries as required to fit the + * range of device IDs that the ITS can grok... The ID + * space being incredibly sparse, this results in a + * massive waste of memory if two-level device table + * feature is not supported by hardware. + */ + new_order = max_t(u32, get_order(esz << ids), new_order); + if (new_order > MAX_ORDER) { + new_order = MAX_ORDER; + ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz); + pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n", + &its->phys_base, its_base_type_string[type], + device_ids(its), ids); + } + + *order = new_order; + + return indirect; +} + +static u32 compute_common_aff(u64 val) +{ + u32 aff, clpiaff; + + aff = FIELD_GET(GICR_TYPER_AFFINITY, val); + clpiaff = FIELD_GET(GICR_TYPER_COMMON_LPI_AFF, val); + + return aff & ~(GENMASK(31, 0) >> (clpiaff * 8)); +} + +static u32 compute_its_aff(struct its_node *its) +{ + u64 val; + u32 svpet; + + /* + * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute + * the resulting affinity. We then use that to see if this match + * our own affinity. + */ + svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer); + val = FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF, svpet); + val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr); + return compute_common_aff(val); +} + +static struct its_node *find_sibling_its(struct its_node *cur_its) +{ + struct its_node *its; + u32 aff; + + if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer)) + return NULL; + + aff = compute_its_aff(cur_its); + + list_for_each_entry(its, &its_nodes, entry) { + u64 baser; + + if (!is_v4_1(its) || its == cur_its) + continue; + + if (!FIELD_GET(GITS_TYPER_SVPET, its->typer)) + continue; + + if (aff != compute_its_aff(its)) + continue; + + /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */ + baser = its->tables[2].val; + if (!(baser & GITS_BASER_VALID)) + continue; + + return its; + } + + return NULL; +} + +static void its_free_tables(struct its_node *its) +{ + int i; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + if (its->tables[i].base) { + free_pages((unsigned long)its->tables[i].base, + its->tables[i].order); + its->tables[i].base = NULL; + } + } +} + +static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser) +{ + u64 psz = SZ_64K; + + while (psz) { + u64 val, gpsz; + + val = its_read_baser(its, baser); + val &= ~GITS_BASER_PAGE_SIZE_MASK; + + switch (psz) { + case SZ_64K: + gpsz = GITS_BASER_PAGE_SIZE_64K; + break; + case SZ_16K: + gpsz = GITS_BASER_PAGE_SIZE_16K; + break; + case SZ_4K: + default: + gpsz = GITS_BASER_PAGE_SIZE_4K; + break; + } + + gpsz >>= GITS_BASER_PAGE_SIZE_SHIFT; + + val |= FIELD_PREP(GITS_BASER_PAGE_SIZE_MASK, gpsz); + its_write_baser(its, baser, val); + + if (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser->val) == gpsz) + break; + + switch (psz) { + case SZ_64K: + psz = SZ_16K; + break; + case SZ_16K: + psz = SZ_4K; + break; + case SZ_4K: + default: + return -1; + } + } + + baser->psz = psz; + return 0; +} + +static int its_alloc_tables(struct its_node *its) +{ + u64 shr = GITS_BASER_InnerShareable; + u64 cache = GITS_BASER_RaWaWb; + int err, i; + + if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) + /* erratum 24313: ignore memory access type */ + cache = GITS_BASER_nCnB; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + struct its_baser *baser = its->tables + i; + u64 val = its_read_baser(its, baser); + u64 type = GITS_BASER_TYPE(val); + bool indirect = false; + u32 order; + + if (type == GITS_BASER_TYPE_NONE) + continue; + + if (its_probe_baser_psz(its, baser)) { + its_free_tables(its); + return -ENXIO; + } + + order = get_order(baser->psz); + + switch (type) { + case GITS_BASER_TYPE_DEVICE: + indirect = its_parse_indirect_baser(its, baser, &order, + device_ids(its)); + break; + + case GITS_BASER_TYPE_VCPU: + if (is_v4_1(its)) { + struct its_node *sibling; + + WARN_ON(i != 2); + sibling = find_sibling_its(its); + if (sibling != NULL) { + *baser = sibling->tables[2]; + its_write_baser(its, baser, baser->val); + continue; + } + } + + indirect = its_parse_indirect_baser(its, baser, &order, + ITS_MAX_VPEID_BITS); + break; + } + + err = its_setup_baser(its, baser, cache, shr, order, indirect); + if (err < 0) { + its_free_tables(its); + return err; + } + + /* Update settings which will be used for next BASERn */ + cache = baser->val & GITS_BASER_CACHEABILITY_MASK; + shr = baser->val & GITS_BASER_SHAREABILITY_MASK; + } + + return 0; +} + +static u64 inherit_vpe_l1_table_from_its(void) +{ + struct its_node *its; + u64 val; + u32 aff; + + val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); + aff = compute_common_aff(val); + + list_for_each_entry(its, &its_nodes, entry) { + u64 baser, addr; + + if (!is_v4_1(its)) + continue; + + if (!FIELD_GET(GITS_TYPER_SVPET, its->typer)) + continue; + + if (aff != compute_its_aff(its)) + continue; + + /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */ + baser = its->tables[2].val; + if (!(baser & GITS_BASER_VALID)) + continue; + + /* We have a winner! */ + gic_data_rdist()->vpe_l1_base = its->tables[2].base; + + val = GICR_VPROPBASER_4_1_VALID; + if (baser & GITS_BASER_INDIRECT) + val |= GICR_VPROPBASER_4_1_INDIRECT; + val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, + FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)); + switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)) { + case GIC_PAGE_SIZE_64K: + addr = GITS_BASER_ADDR_48_to_52(baser); + break; + default: + addr = baser & GENMASK_ULL(47, 12); + break; + } + val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12); + val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK, + FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser)); + val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK, + FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser)); + val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1); + + return val; + } + + return 0; +} + +static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask) +{ + u32 aff; + u64 val; + int cpu; + + val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); + aff = compute_common_aff(val); + + for_each_possible_cpu(cpu) { + void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base; + + if (!base || cpu == smp_processor_id()) + continue; + + val = gic_read_typer(base + GICR_TYPER); + if (aff != compute_common_aff(val)) + continue; + + /* + * At this point, we have a victim. This particular CPU + * has already booted, and has an affinity that matches + * ours wrt CommonLPIAff. Let's use its own VPROPBASER. + * Make sure we don't write the Z bit in that case. + */ + val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER); + val &= ~GICR_VPROPBASER_4_1_Z; + + gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base; + *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask; + + return val; + } + + return 0; +} + +static bool allocate_vpe_l2_table(int cpu, u32 id) +{ + void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base; + unsigned int psz, esz, idx, npg, gpsz; + u64 val; + struct page *page; + __le64 *table; + + if (!gic_rdists->has_rvpeid) + return true; + + /* Skip non-present CPUs */ + if (!base) + return true; + + val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER); + + esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1; + gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val); + npg = FIELD_GET(GICR_VPROPBASER_4_1_SIZE, val) + 1; + + switch (gpsz) { + default: + WARN_ON(1); + fallthrough; + case GIC_PAGE_SIZE_4K: + psz = SZ_4K; + break; + case GIC_PAGE_SIZE_16K: + psz = SZ_16K; + break; + case GIC_PAGE_SIZE_64K: + psz = SZ_64K; + break; + } + + /* Don't allow vpe_id that exceeds single, flat table limit */ + if (!(val & GICR_VPROPBASER_4_1_INDIRECT)) + return (id < (npg * psz / (esz * SZ_8))); + + /* Compute 1st level table index & check if that exceeds table limit */ + idx = id >> ilog2(psz / (esz * SZ_8)); + if (idx >= (npg * psz / GITS_LVL1_ENTRY_SIZE)) + return false; + + table = gic_data_rdist_cpu(cpu)->vpe_l1_base; + + /* Allocate memory for 2nd level table */ + if (!table[idx]) { + page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz)); + if (!page) + return false; + + /* Flush Lvl2 table to PoC if hw doesn't support coherency */ + if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK)) + gic_flush_dcache_to_poc(page_address(page), psz); + + table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); + + /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ + if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK)) + gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE); + + /* Ensure updated table contents are visible to RD hardware */ + dsb(sy); + } + + return true; +} + +static int allocate_vpe_l1_table(void) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val, gpsz, npg, pa; + unsigned int psz = SZ_64K; + unsigned int np, epp, esz; + struct page *page; + + if (!gic_rdists->has_rvpeid) + return 0; + + /* + * if VPENDBASER.Valid is set, disable any previously programmed + * VPE by setting PendingLast while clearing Valid. This has the + * effect of making sure no doorbell will be generated and we can + * then safely clear VPROPBASER.Valid. + */ + if (gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid) + gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast, + vlpi_base + GICR_VPENDBASER); + + /* + * If we can inherit the configuration from another RD, let's do + * so. Otherwise, we have to go through the allocation process. We + * assume that all RDs have the exact same requirements, as + * nothing will work otherwise. + */ + val = inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask); + if (val & GICR_VPROPBASER_4_1_VALID) + goto out; + + gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_ATOMIC); + if (!gic_data_rdist()->vpe_table_mask) + return -ENOMEM; + + val = inherit_vpe_l1_table_from_its(); + if (val & GICR_VPROPBASER_4_1_VALID) + goto out; + + /* First probe the page size */ + val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K); + gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + val = gicr_read_vpropbaser(vlpi_base + GICR_VPROPBASER); + gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val); + esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val); + + switch (gpsz) { + default: + gpsz = GIC_PAGE_SIZE_4K; + fallthrough; + case GIC_PAGE_SIZE_4K: + psz = SZ_4K; + break; + case GIC_PAGE_SIZE_16K: + psz = SZ_16K; + break; + case GIC_PAGE_SIZE_64K: + psz = SZ_64K; + break; + } + + /* + * Start populating the register from scratch, including RO fields + * (which we want to print in debug cases...) + */ + val = 0; + val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, gpsz); + val |= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE, esz); + + /* How many entries per GIC page? */ + esz++; + epp = psz / (esz * SZ_8); + + /* + * If we need more than just a single L1 page, flag the table + * as indirect and compute the number of required L1 pages. + */ + if (epp < ITS_MAX_VPEID) { + int nl2; + + val |= GICR_VPROPBASER_4_1_INDIRECT; + + /* Number of L2 pages required to cover the VPEID space */ + nl2 = DIV_ROUND_UP(ITS_MAX_VPEID, epp); + + /* Number of L1 pages to point to the L2 pages */ + npg = DIV_ROUND_UP(nl2 * SZ_8, psz); + } else { + npg = 1; + } + + val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg - 1); + + /* Right, that's the number of CPU pages we need for L1 */ + np = DIV_ROUND_UP(npg * psz, PAGE_SIZE); + + pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n", + np, npg, psz, epp, esz); + page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE)); + if (!page) + return -ENOMEM; + + gic_data_rdist()->vpe_l1_base = page_address(page); + pa = virt_to_phys(page_address(page)); + WARN_ON(!IS_ALIGNED(pa, psz)); + + val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12); + val |= GICR_VPROPBASER_RaWb; + val |= GICR_VPROPBASER_InnerShareable; + val |= GICR_VPROPBASER_4_1_Z; + val |= GICR_VPROPBASER_4_1_VALID; + +out: + gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask); + + pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n", + smp_processor_id(), val, + cpumask_pr_args(gic_data_rdist()->vpe_table_mask)); + + return 0; +} + +static int its_alloc_collections(struct its_node *its) +{ + int i; + + its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections), + GFP_KERNEL); + if (!its->collections) + return -ENOMEM; + + for (i = 0; i < nr_cpu_ids; i++) + its->collections[i].target_address = ~0ULL; + + return 0; +} + +static struct page *its_allocate_pending_table(gfp_t gfp_flags) +{ + struct page *pend_page; + + pend_page = alloc_pages(gfp_flags | __GFP_ZERO, + get_order(LPI_PENDBASE_SZ)); + if (!pend_page) + return NULL; + + /* Make sure the GIC will observe the zero-ed page */ + gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ); + + return pend_page; +} + +static void its_free_pending_table(struct page *pt) +{ + free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ)); +} + +/* + * Booting with kdump and LPIs enabled is generally fine. Any other + * case is wrong in the absence of firmware/EFI support. + */ +static bool enabled_lpis_allowed(void) +{ + phys_addr_t addr; + u64 val; + + /* Check whether the property table is in a reserved region */ + val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); + addr = val & GENMASK_ULL(51, 12); + + return gic_check_reserved_range(addr, LPI_PROPBASE_SZ); +} + +static int __init allocate_lpi_tables(void) +{ + u64 val; + int err, cpu; + + /* + * If LPIs are enabled while we run this from the boot CPU, + * flag the RD tables as pre-allocated if the stars do align. + */ + val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR); + if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) { + gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED | + RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING); + pr_info("GIC-2500: Using preallocated redistributor tables\n"); + } + + err = its_setup_lpi_prop_table(); + if (err) + return err; + + /* + * We allocate all the pending tables anyway, as we may have a + * mix of RDs that have had LPIs enabled, and some that + * don't. We'll free the unused ones as each CPU comes online. + */ + for_each_possible_cpu(cpu) { + struct page *pend_page; + + pend_page = its_allocate_pending_table(GFP_NOWAIT); + if (!pend_page) { + pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu); + return -ENOMEM; + } + + gic_data_rdist_cpu(cpu)->pend_page = pend_page; + } + + return 0; +} + +static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set) +{ + u32 count = 1000000; /* 1s! */ + bool clean; + u64 val; + + val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER); + val &= ~GICR_VPENDBASER_Valid; + val &= ~clr; + val |= set; + gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); + + do { + val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER); + clean = !(val & GICR_VPENDBASER_Dirty); + if (!clean) { + count--; + cpu_relax(); + udelay(1); + } + } while (!clean && count); + + if (unlikely(val & GICR_VPENDBASER_Dirty)) { + pr_err_ratelimited("ITS virtual pending table not cleaning\n"); + val |= GICR_VPENDBASER_PendingLast; + } + + return val; +} + +static void its_cpu_init_lpis(void) +{ + void __iomem *rbase = gic_data_rdist_rd_base(); + struct page *pend_page; + phys_addr_t paddr; + u64 val, tmp; + + if (gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) + return; + + val = readl_relaxed(rbase + GICR_CTLR); + if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) && + (val & GICR_CTLR_ENABLE_LPIS)) { + /* + * Check that we get the same property table on all + * RDs. If we don't, this is hopeless. + */ + paddr = gicr_read_propbaser(rbase + GICR_PROPBASER); + paddr &= GENMASK_ULL(51, 12); + if (WARN_ON(gic_rdists->prop_table_pa != paddr)) + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + + paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER); + paddr &= GENMASK_ULL(51, 16); + + WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ)); + gic_data_rdist()->flags |= RD_LOCAL_PENDTABLE_PREALLOCATED; + + goto out; + } + + pend_page = gic_data_rdist()->pend_page; + paddr = page_to_phys(pend_page); + + /* set PROPBASE */ + val = (gic_rdists->prop_table_pa | + GICR_PROPBASER_InnerShareable | + GICR_PROPBASER_RaWaWb | + ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK)); + + gicr_write_propbaser(val, rbase + GICR_PROPBASER); + tmp = gicr_read_propbaser(rbase + GICR_PROPBASER); + + if (gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE) + tmp &= ~GICR_PROPBASER_SHAREABILITY_MASK; + + if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { + if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) { + /* + * The HW reports non-shareable, we must + * remove the cacheability attributes as + * well. + */ + val &= ~(GICR_PROPBASER_SHAREABILITY_MASK | + GICR_PROPBASER_CACHEABILITY_MASK); + val |= GICR_PROPBASER_nC; + gicr_write_propbaser(val, rbase + GICR_PROPBASER); + } + pr_info_once("GIC: using cache flushing for LPI property table\n"); + gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; + } + + /* set PENDBASE */ + val = (page_to_phys(pend_page) | + GICR_PENDBASER_InnerShareable | + GICR_PENDBASER_RaWaWb); + + gicr_write_pendbaser(val, rbase + GICR_PENDBASER); + tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER); + + if (gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE) + tmp &= ~GICR_PENDBASER_SHAREABILITY_MASK; + + if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { + /* + * The HW reports non-shareable, we must remove the + * cacheability attributes as well. + */ + val &= ~(GICR_PENDBASER_SHAREABILITY_MASK | + GICR_PENDBASER_CACHEABILITY_MASK); + val |= GICR_PENDBASER_nC; + gicr_write_pendbaser(val, rbase + GICR_PENDBASER); + } + + /* Enable LPIs */ + val = readl_relaxed(rbase + GICR_CTLR); + val |= GICR_CTLR_ENABLE_LPIS; + writel_relaxed(val, rbase + GICR_CTLR); + + if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) { + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + + /* + * It's possible for CPU to receive VLPIs before it is + * scheduled as a vPE, especially for the first CPU, and the + * VLPI with INTID larger than 2^(IDbits+1) will be considered + * as out of range and dropped by GIC. + * So we initialize IDbits to known value to avoid VLPI drop. + */ + val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; + pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n", + smp_processor_id(), val); + gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + + /* + * Also clear Valid bit of GICR_VPENDBASER, in case some + * ancient programming gets left in and has possibility of + * corrupting memory. + */ + val = its_clear_vpend_valid(vlpi_base, 0, 0); + } + + if (allocate_vpe_l1_table()) { + /* + * If the allocation has failed, we're in massive trouble. + * Disable direct injection, and pray that no VM was + * already running... + */ + gic_rdists->has_rvpeid = false; + gic_rdists->has_vlpis = false; + } + + /* Make sure the GIC has seen the above */ + dsb(sy); +out: + gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED; + pr_info("GIC-2500: CPU%d: using %s LPI pending table @%pa\n", + smp_processor_id(), + gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED ? + "reserved" : "allocated", + &paddr); +} + +static void its_cpu_init_collection(struct its_node *its) +{ + int cpu = smp_processor_id(); + u64 target; + unsigned long mpid; + phys_addr_t its_phys_base; + unsigned long skt_id; + + /* avoid cross node collections and its mapping */ + if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { + struct device_node *cpu_node; + + cpu_node = of_get_cpu_node(cpu, NULL); + if (its->numa_node != NUMA_NO_NODE && + its->numa_node != of_node_to_nid(cpu_node)) + return; + } + + mpid = cpu_logical_map(cpu); + its_phys_base = its->phys_base; + skt_id = (its_phys_base >> 41) & 0x7; + + /* + * We now have to bind each collection to its target + * redistributor. + */ + if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) { + /* + * This ITS wants the physical address of the + * redistributor. + */ + target = gic_data_rdist()->phys_base; + } else { + /* This ITS wants a linear CPU number. */ + target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); + target = GICR_TYPER_CPU_NUMBER(target) << 16; + } + + /* Perform collection mapping */ + its->collections[cpu].target_address = target; + its->collections[cpu].col_id = cpu % 64; + + its_send_mapc(its, &its->collections[cpu], 1); + its_send_invall(its, &its->collections[cpu]); +} + +static void its_cpu_init_collections(void) +{ + struct its_node *its; + + raw_spin_lock(&its_lock); + + list_for_each_entry(its, &its_nodes, entry) + its_cpu_init_collection(its); + + raw_spin_unlock(&its_lock); +} + +static struct its_device *its_find_device(struct its_node *its, u32 dev_id) +{ + struct its_device *its_dev = NULL, *tmp; + unsigned long flags; + + raw_spin_lock_irqsave(&its->lock, flags); + + list_for_each_entry(tmp, &its->its_device_list, entry) { + if (tmp->device_id == dev_id) { + its_dev = tmp; + break; + } + } + + raw_spin_unlock_irqrestore(&its->lock, flags); + + return its_dev; +} + +static struct its_baser *its_get_baser(struct its_node *its, u32 type) +{ + int i; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + if (GITS_BASER_TYPE(its->tables[i].val) == type) + return &its->tables[i]; + } + + return NULL; +} + +static bool its_alloc_table_entry(struct its_node *its, + struct its_baser *baser, u32 id) +{ + struct page *page; + u32 esz, idx; + __le64 *table; + + /* Don't allow device id that exceeds single, flat table limit */ + esz = GITS_BASER_ENTRY_SIZE(baser->val); + if (!(baser->val & GITS_BASER_INDIRECT)) + return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz)); + + /* Compute 1st level table index & check if that exceeds table limit */ + idx = id >> ilog2(baser->psz / esz); + if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE)) + return false; + + table = baser->base; + + /* Allocate memory for 2nd level table */ + if (!table[idx]) { + page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, + get_order(baser->psz)); + if (!page) + return false; + + /* Flush Lvl2 table to PoC if hw doesn't support coherency */ + if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) + gic_flush_dcache_to_poc(page_address(page), baser->psz); + + table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); + + /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ + if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) + gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE); + + /* Ensure updated table contents are visible to ITS hardware */ + dsb(sy); + } + + return true; +} + +static bool its_alloc_device_table(struct its_node *its, u32 dev_id) +{ + struct its_baser *baser; + + baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE); + + /* Don't allow device id that exceeds ITS hardware limit */ + if (!baser) + return (ilog2(dev_id) < device_ids(its)); + + return its_alloc_table_entry(its, baser, dev_id); +} + +static bool its_alloc_vpe_table(u32 vpe_id) +{ + struct its_node *its; + int cpu; + + /* + * Make sure the L2 tables are allocated on *all* v4 ITSs. We + * could try and only do it on ITSs corresponding to devices + * that have interrupts targeted at this VPE, but the + * complexity becomes crazy (and you have tons of memory + * anyway, right?). + */ + list_for_each_entry(its, &its_nodes, entry) { + struct its_baser *baser; + + if (!is_v4(its)) + continue; + + baser = its_get_baser(its, GITS_BASER_TYPE_VCPU); + if (!baser) + return false; + + if (!its_alloc_table_entry(its, baser, vpe_id)) + return false; + } + + /* Non v4.1? No need to iterate RDs and go back early. */ + if (!gic_rdists->has_rvpeid) + return true; + + /* + * Make sure the L2 tables are allocated for all copies of + * the L1 table on *all* v4.1 RDs. + */ + for_each_possible_cpu(cpu) { + if (!allocate_vpe_l2_table(cpu, vpe_id)) + return false; + } + + return true; +} + +static struct its_device *its_create_device(struct its_node *its, u32 dev_id, + int nvecs, bool alloc_lpis) +{ + struct its_device *dev; + unsigned long *lpi_map = NULL; + unsigned long flags; + u16 *col_map = NULL; + void *itt; + int lpi_base; + int nr_lpis; + int nr_ites; + int sz; + + if (!its_alloc_device_table(its, dev_id)) + return NULL; + + if (WARN_ON(!is_power_of_2(nvecs))) + nvecs = roundup_pow_of_two(nvecs); + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + /* + * Even if the device wants a single LPI, the ITT must be + * sized as a power of two (and you need at least one bit...). + */ + nr_ites = max(2, nvecs); + sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1); + sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; + itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node); + if (alloc_lpis) { + lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis); + if (lpi_map) + col_map = kcalloc(nr_lpis, sizeof(*col_map), + GFP_KERNEL); + } else { + col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL); + nr_lpis = 0; + lpi_base = 0; + } + + if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) { + kfree(dev); + kfree(itt); + bitmap_free(lpi_map); + kfree(col_map); + return NULL; + } + + gic_flush_dcache_to_poc(itt, sz); + + dev->its = its; + dev->itt = itt; + dev->nr_ites = nr_ites; + dev->event_map.lpi_map = lpi_map; + dev->event_map.col_map = col_map; + dev->event_map.lpi_base = lpi_base; + dev->event_map.nr_lpis = nr_lpis; + raw_spin_lock_init(&dev->event_map.vlpi_lock); + dev->device_id = dev_id; + INIT_LIST_HEAD(&dev->entry); + + raw_spin_lock_irqsave(&its->lock, flags); + list_add(&dev->entry, &its->its_device_list); + raw_spin_unlock_irqrestore(&its->lock, flags); + + /* Map device to its ITT */ + its_send_mapd(dev, 1); + + return dev; +} + +static void its_free_device(struct its_device *its_dev) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&its_dev->its->lock, flags); + list_del(&its_dev->entry); + raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); + kfree(its_dev->event_map.col_map); + kfree(its_dev->itt); + kfree(its_dev); +} + +static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq) +{ + int idx; + + /* Find a free LPI region in lpi_map and allocate them. */ + idx = bitmap_find_free_region(dev->event_map.lpi_map, + dev->event_map.nr_lpis, + get_count_order(nvecs)); + if (idx < 0) + return -ENOSPC; + + *hwirq = dev->event_map.lpi_base + idx; + + return 0; +} + +static int its_msi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *info) +{ + struct its_node *its; + struct its_device *its_dev; + struct msi_domain_info *msi_info; + u32 dev_id; + int err = 0; + + /* + * We ignore "dev" entirely, and rely on the dev_id that has + * been passed via the scratchpad. This limits this domain's + * usefulness to upper layers that definitely know that they + * are built on top of the ITS. + */ + dev_id = info->scratchpad[0].ul; + + msi_info = msi_get_domain_info(domain); + its = msi_info->data; + + if (!gic_rdists->has_direct_lpi && + vpe_proxy.dev && + vpe_proxy.dev->its == its && + dev_id == vpe_proxy.dev->device_id) { + /* Bad luck. Get yourself a better implementation */ + WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n", + dev_id); + return -EINVAL; + } + + mutex_lock(&its->dev_alloc_lock); + its_dev = its_find_device(its, dev_id); + if (its_dev) { + /* + * We already have seen this ID, probably through + * another alias (PCI bridge of some sort). No need to + * create the device. + */ + its_dev->shared = true; + pr_debug("Reusing ITT for devID %x\n", dev_id); + goto out; + } + + its_dev = its_create_device(its, dev_id, nvec, true); + if (!its_dev) { + err = -ENOMEM; + goto out; + } + + if (info->flags & MSI_ALLOC_FLAGS_PROXY_DEVICE) + its_dev->shared = true; + + pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec)); +out: + mutex_unlock(&its->dev_alloc_lock); + info->scratchpad[0].ptr = its_dev; + return err; +} + +static struct msi_domain_ops its_msi_domain_ops = { + .msi_prepare = its_msi_prepare, +}; + +static int its_irq_gic_domain_alloc(struct irq_domain *domain, + unsigned int virq, + irq_hw_number_t hwirq) +{ + struct irq_fwspec fwspec; + + if (irq_domain_get_of_node(domain->parent)) { + fwspec.fwnode = domain->parent->fwnode; + fwspec.param_count = 3; + fwspec.param[0] = GIC_IRQ_TYPE_LPI; + fwspec.param[1] = hwirq; + fwspec.param[2] = IRQ_TYPE_EDGE_RISING; + } else if (is_fwnode_irqchip(domain->parent->fwnode)) { + fwspec.fwnode = domain->parent->fwnode; + fwspec.param_count = 2; + fwspec.param[0] = hwirq; + fwspec.param[1] = IRQ_TYPE_EDGE_RISING; + } else { + return -EINVAL; + } + + return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); +} + +static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + msi_alloc_info_t *info = args; + struct its_device *its_dev = info->scratchpad[0].ptr; + struct its_node *its = its_dev->its; + struct irq_data *irqd; + irq_hw_number_t hwirq; + int err; + int i; + + err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq); + if (err) + return err; + + err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev)); + if (err) + return err; + + for (i = 0; i < nr_irqs; i++) { + err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i); + if (err) + return err; + + irq_domain_set_hwirq_and_chip(domain, virq + i, + hwirq + i, &its_irq_chip, its_dev); + irqd = irq_get_irq_data(virq + i); + irqd_set_single_target(irqd); + irqd_set_affinity_on_activate(irqd); + irqd_set_resend_when_in_progress(irqd); + pr_debug("ID:%d pID:%d vID:%d\n", + (int)(hwirq + i - its_dev->event_map.lpi_base), + (int)(hwirq + i), virq + i); + } + + return 0; +} + +static int its_cpumask_first(struct its_device *its_dev, + const struct cpumask *cpu_mask) +{ + unsigned int skt, skt_id, i; + phys_addr_t its_phys_base; + unsigned int cpu, cpus = 0; + + unsigned int skt_cpu_cnt[MAX_MARS3_SKT_COUNT] = {0}; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SKT_COUNT)) + skt_cpu_cnt[skt]++; + else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + + its_phys_base = its_dev->its->phys_base; + skt_id = (its_phys_base >> 41) & 0x7; + + if (skt_id != 0) + for (i = 0; i < skt_id; i++) + cpus += skt_cpu_cnt[i]; + + cpu = cpumask_first(cpu_mask); + if ((cpu > cpus) && (cpu < (cpus + skt_cpu_cnt[skt_id]))) + cpus = cpu; + + return cpus; +} + +static int its_irq_domain_activate(struct irq_domain *domain, + struct irq_data *d, bool reserve) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + const struct cpumask *cpu_mask = cpu_online_mask; + int cpu; + + cpu = its_cpumask_first(its_dev, cpu_mask); + + if (cpu < 0 || cpu >= nr_cpu_ids) + return -EINVAL; + + its_inc_lpi_count(d, cpu); + its_dev->event_map.col_map[event] = cpu; + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + + /* Map the GIC IRQ and event to the device */ + its_send_mapti(its_dev, d->hwirq, event); + return 0; +} + +static void its_irq_domain_deactivate(struct irq_domain *domain, + struct irq_data *d) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + + its_dec_lpi_count(d, its_dev->event_map.col_map[event]); + /* Stop the delivery of interrupts */ + its_send_discard(its_dev, event); +} + +static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_node *its = its_dev->its; + int i; + + bitmap_release_region(its_dev->event_map.lpi_map, + its_get_event_id(irq_domain_get_irq_data(domain, virq)), + get_count_order(nr_irqs)); + + for (i = 0; i < nr_irqs; i++) { + struct irq_data *data = irq_domain_get_irq_data(domain, + virq + i); + /* Nuke the entry in the domain */ + irq_domain_reset_irq_data(data); + } + + mutex_lock(&its->dev_alloc_lock); + + /* + * If all interrupts have been freed, start mopping the + * floor. This is conditioned on the device not being shared. + */ + if (!its_dev->shared && + bitmap_empty(its_dev->event_map.lpi_map, + its_dev->event_map.nr_lpis)) { + its_lpi_free(its_dev->event_map.lpi_map, + its_dev->event_map.lpi_base, + its_dev->event_map.nr_lpis); + + /* Unmap device/itt */ + its_send_mapd(its_dev, 0); + its_free_device(its_dev); + } + + mutex_unlock(&its->dev_alloc_lock); + + irq_domain_free_irqs_parent(domain, virq, nr_irqs); +} + +static const struct irq_domain_ops its_domain_ops = { + .alloc = its_irq_domain_alloc, + .free = its_irq_domain_free, + .activate = its_irq_domain_activate, + .deactivate = its_irq_domain_deactivate, +}; + +/* + * This is insane. + * + * If a GICv4.0 doesn't implement Direct LPIs (which is extremely + * likely), the only way to perform an invalidate is to use a fake + * device to issue an INV command, implying that the LPI has first + * been mapped to some event on that device. Since this is not exactly + * cheap, we try to keep that mapping around as long as possible, and + * only issue an UNMAP if we're short on available slots. + * + * Broken by design(tm). + * + * GICv4.1, on the other hand, mandates that we're able to invalidate + * by writing to a MMIO register. It doesn't implement the whole of + * DirectLPI, but that's good enough. And most of the time, we don't + * even have to invalidate anything, as the redistributor can be told + * whether to generate a doorbell or not (we thus leave it enabled, + * always). + */ +static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe) +{ + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + + /* Already unmapped? */ + if (vpe->vpe_proxy_event == -1) + return; + + its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event); + vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL; + + /* + * We don't track empty slots at all, so let's move the + * next_victim pointer if we can quickly reuse that slot + * instead of nuking an existing entry. Not clear that this is + * always a win though, and this might just generate a ripple + * effect... Let's just hope VPEs don't migrate too often. + */ + if (vpe_proxy.vpes[vpe_proxy.next_victim]) + vpe_proxy.next_victim = vpe->vpe_proxy_event; + + vpe->vpe_proxy_event = -1; +} + +static void its_vpe_db_proxy_unmap(struct its_vpe *vpe) +{ + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + + if (!gic_rdists->has_direct_lpi) { + unsigned long flags; + + raw_spin_lock_irqsave(&vpe_proxy.lock, flags); + its_vpe_db_proxy_unmap_locked(vpe); + raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); + } +} + +static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe) +{ + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + + /* Already mapped? */ + if (vpe->vpe_proxy_event != -1) + return; + + /* This slot was already allocated. Kick the other VPE out. */ + if (vpe_proxy.vpes[vpe_proxy.next_victim]) + its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]); + + /* Map the new VPE instead */ + vpe_proxy.vpes[vpe_proxy.next_victim] = vpe; + vpe->vpe_proxy_event = vpe_proxy.next_victim; + vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites; + + vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx; + its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event); +} + +static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to) +{ + unsigned long flags; + struct its_collection *target_col; + + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + + if (gic_rdists->has_direct_lpi) { + void __iomem *rdbase; + + rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base; + gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); + wait_for_syncr(rdbase); + + return; + } + + raw_spin_lock_irqsave(&vpe_proxy.lock, flags); + + its_vpe_db_proxy_map_locked(vpe); + + target_col = &vpe_proxy.dev->its->collections[to]; + its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event); + vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to; + + raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); +} + +static int its_vpe_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, + bool force) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + int from, cpu = cpumask_first(mask_val); + unsigned long flags; + + /* + * Changing affinity is mega expensive, so let's be as lazy as + * we can and only do it if we really have to. Also, if mapped + * into the proxy device, we need to move the doorbell + * interrupt to its new location. + * + * Another thing is that changing the affinity of a vPE affects + * *other interrupts* such as all the vLPIs that are routed to + * this vPE. This means that the irq_desc lock is not enough to + * protect us, and that we must ensure nobody samples vpe->col_idx + * during the update, hence the lock below which must also be + * taken on any vLPI handling path that evaluates vpe->col_idx. + */ + from = vpe_to_cpuid_lock(vpe, &flags); + if (from == cpu) + goto out; + + vpe->col_idx = cpu; + + /* + * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD + * is sharing its VPE table with the current one. + */ + if (gic_data_rdist_cpu(cpu)->vpe_table_mask && + cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask)) + goto out; + + its_send_vmovp(vpe); + its_vpe_db_proxy_move(vpe, from, cpu); + +out: + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + vpe_to_cpuid_unlock(vpe, flags); + + return IRQ_SET_MASK_OK_DONE; +} + +static void its_wait_vpt_parse_complete(void) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val; + + if (!gic_rdists->has_vpend_valid_dirty) + return; + + WARN_ON_ONCE(readq_relaxed_poll_timeout_atomic(vlpi_base + GICR_VPENDBASER, + val, + !(val & GICR_VPENDBASER_Dirty), + 1, 500)); +} + +static void its_vpe_schedule(struct its_vpe *vpe) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val; + + /* Schedule the VPE */ + val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) & + GENMASK_ULL(51, 12); + val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; + val |= GICR_VPROPBASER_RaWb; + val |= GICR_VPROPBASER_InnerShareable; + gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + + val = virt_to_phys(page_address(vpe->vpt_page)) & + GENMASK_ULL(51, 16); + val |= GICR_VPENDBASER_RaWaWb; + val |= GICR_VPENDBASER_InnerShareable; + /* + * There is no good way of finding out if the pending table is + * empty as we can race against the doorbell interrupt very + * easily. So in the end, vpe->pending_last is only an + * indication that the vcpu has something pending, not one + * that the pending table is empty. A good implementation + * would be able to read its coarse map pretty quickly anyway, + * making this a tolerable issue. + */ + val |= GICR_VPENDBASER_PendingLast; + val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0; + val |= GICR_VPENDBASER_Valid; + gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); +} + +static void its_vpe_deschedule(struct its_vpe *vpe) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val; + + val = its_clear_vpend_valid(vlpi_base, 0, 0); + + vpe->idai = !!(val & GICR_VPENDBASER_IDAI); + vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); +} + +static void its_vpe_invall(struct its_vpe *vpe) +{ + struct its_node *its; + + list_for_each_entry(its, &its_nodes, entry) { + if (!is_v4(its)) + continue; + + if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr]) + continue; + + /* + * Sending a VINVALL to a single ITS is enough, as all + * we need is to reach the redistributors. + */ + its_send_vinvall(its, vpe); + return; + } +} + +static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + switch (info->cmd_type) { + case SCHEDULE_VPE: + its_vpe_schedule(vpe); + return 0; + + case DESCHEDULE_VPE: + its_vpe_deschedule(vpe); + return 0; + + case COMMIT_VPE: + its_wait_vpt_parse_complete(); + return 0; + + case INVALL_VPE: + its_vpe_invall(vpe); + return 0; + + default: + return -EINVAL; + } +} + +static void its_vpe_send_cmd(struct its_vpe *vpe, + void (*cmd)(struct its_device *, u32)) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&vpe_proxy.lock, flags); + + its_vpe_db_proxy_map_locked(vpe); + cmd(vpe_proxy.dev, vpe->vpe_proxy_event); + + raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); +} + +static void its_vpe_send_inv(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + if (gic_rdists->has_direct_lpi) + __direct_lpi_inv(d, d->parent_data->hwirq); + else + its_vpe_send_cmd(vpe, its_send_inv); +} + +static void its_vpe_mask_irq(struct irq_data *d) +{ + /* + * We need to unmask the LPI, which is described by the parent + * irq_data. Instead of calling into the parent (which won't + * exactly do the right thing, let's simply use the + * parent_data pointer. Yes, I'm naughty. + */ + lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0); + its_vpe_send_inv(d); +} + +static void its_vpe_unmask_irq(struct irq_data *d) +{ + /* Same hack as above... */ + lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED); + its_vpe_send_inv(d); +} + +static int its_vpe_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool state) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + if (gic_rdists->has_direct_lpi) { + void __iomem *rdbase; + + rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; + if (state) { + gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR); + } else { + gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); + wait_for_syncr(rdbase); + } + } else { + if (state) + its_vpe_send_cmd(vpe, its_send_int); + else + its_vpe_send_cmd(vpe, its_send_clear); + } + + return 0; +} + +static int its_vpe_retrigger(struct irq_data *d) +{ + return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true); +} + +static struct irq_chip its_vpe_irq_chip = { + .name = "GICv4-vpe", + .irq_mask = its_vpe_mask_irq, + .irq_unmask = its_vpe_unmask_irq, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = its_vpe_set_affinity, + .irq_retrigger = its_vpe_retrigger, + .irq_set_irqchip_state = its_vpe_set_irqchip_state, + .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity, +}; + +static struct its_node *find_4_1_its(void) +{ + static struct its_node *its; + + if (!its) { + list_for_each_entry(its, &its_nodes, entry) { + if (is_v4_1(its)) + return its; + } + + /* Oops? */ + its = NULL; + } + + return its; +} + +static void its_vpe_4_1_send_inv(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its; + + /* + * GICv4.1 wants doorbells to be invalidated using the + * INVDB command in order to be broadcast to all RDs. Send + * it to the first valid ITS, and let the HW do its magic. + */ + its = find_4_1_its(); + if (its) + its_send_invdb(its, vpe); +} + +static void its_vpe_4_1_mask_irq(struct irq_data *d) +{ + lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0); + its_vpe_4_1_send_inv(d); +} + +static void its_vpe_4_1_unmask_irq(struct irq_data *d) +{ + lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED); + its_vpe_4_1_send_inv(d); +} + +static void its_vpe_4_1_schedule(struct its_vpe *vpe, + struct its_cmd_info *info) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val = 0; + + /* Schedule the VPE */ + val |= GICR_VPENDBASER_Valid; + val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0; + val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0; + val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id); + + gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); +} + +static void its_vpe_4_1_deschedule(struct its_vpe *vpe, + struct its_cmd_info *info) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val; + + if (info->req_db) { + unsigned long flags; + + /* + * vPE is going to block: make the vPE non-resident with + * PendingLast clear and DB set. The GIC guarantees that if + * we read-back PendingLast clear, then a doorbell will be + * delivered when an interrupt comes. + * + * Note the locking to deal with the concurrent update of + * pending_last from the doorbell interrupt handler that can + * run concurrently. + */ + raw_spin_lock_irqsave(&vpe->vpe_lock, flags); + val = its_clear_vpend_valid(vlpi_base, + GICR_VPENDBASER_PendingLast, + GICR_VPENDBASER_4_1_DB); + vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); + raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); + } else { + /* + * We're not blocking, so just make the vPE non-resident + * with PendingLast set, indicating that we'll be back. + */ + val = its_clear_vpend_valid(vlpi_base, + 0, + GICR_VPENDBASER_PendingLast); + vpe->pending_last = true; + } +} + +static void its_vpe_4_1_invall(struct its_vpe *vpe) +{ + void __iomem *rdbase; + unsigned long flags; + u64 val; + int cpu; + + val = GICR_INVALLR_V; + val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id); + + /* Target the redistributor this vPE is currently known on */ + cpu = vpe_to_cpuid_lock(vpe, &flags); + raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); + rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; + gic_write_lpir(val, rdbase + GICR_INVALLR); + + wait_for_syncr(rdbase); + raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); + vpe_to_cpuid_unlock(vpe, flags); +} + +static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + switch (info->cmd_type) { + case SCHEDULE_VPE: + its_vpe_4_1_schedule(vpe, info); + return 0; + + case DESCHEDULE_VPE: + its_vpe_4_1_deschedule(vpe, info); + return 0; + + case COMMIT_VPE: + its_wait_vpt_parse_complete(); + return 0; + + case INVALL_VPE: + its_vpe_4_1_invall(vpe); + return 0; + + default: + return -EINVAL; + } +} + +static struct irq_chip its_vpe_4_1_irq_chip = { + .name = "GICv4.1-vpe", + .irq_mask = its_vpe_4_1_mask_irq, + .irq_unmask = its_vpe_4_1_unmask_irq, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = its_vpe_set_affinity, + .irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity, +}; + +static void its_configure_sgi(struct irq_data *d, bool clear) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_desc desc; + + desc.its_vsgi_cmd.vpe = vpe; + desc.its_vsgi_cmd.sgi = d->hwirq; + desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority; + desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled; + desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group; + desc.its_vsgi_cmd.clear = clear; + + /* + * GICv4.1 allows us to send VSGI commands to any ITS as long as the + * destination VPE is mapped there. Since we map them eagerly at + * activation time, we're pretty sure the first GICv4.1 ITS will do. + */ + its_send_single_vcommand(find_4_1_its(), its_build_vsgi_cmd, &desc); +} + +static void its_sgi_mask_irq(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + vpe->sgi_config[d->hwirq].enabled = false; + its_configure_sgi(d, false); +} + +static void its_sgi_unmask_irq(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + vpe->sgi_config[d->hwirq].enabled = true; + its_configure_sgi(d, false); +} + +static int its_sgi_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, + bool force) +{ + /* + * There is no notion of affinity for virtual SGIs, at least + * not on the host (since they can only be targeting a vPE). + * Tell the kernel we've done whatever it asked for. + */ + irq_data_update_effective_affinity(d, mask_val); + return IRQ_SET_MASK_OK; +} + +static int its_sgi_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool state) +{ + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + if (state) { + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its = find_4_1_its(); + u64 val; + + val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id); + val |= FIELD_PREP(GITS_SGIR_VINTID, d->hwirq); + writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K); + } else { + its_configure_sgi(d, true); + } + + return 0; +} + +static int its_sgi_get_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, bool *val) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + void __iomem *base; + unsigned long flags; + u32 count = 1000000; /* 1s! */ + u32 status; + int cpu; + + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + /* + * Locking galore! We can race against two different events: + * + * - Concurrent vPE affinity change: we must make sure it cannot + * happen, or we'll talk to the wrong redistributor. This is + * identical to what happens with vLPIs. + * + * - Concurrent VSGIPENDR access: As it involves accessing two + * MMIO registers, this must be made atomic one way or another. + */ + cpu = vpe_to_cpuid_lock(vpe, &flags); + raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); + base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K; + writel_relaxed(vpe->vpe_id, base + GICR_VSGIR); + do { + status = readl_relaxed(base + GICR_VSGIPENDR); + if (!(status & GICR_VSGIPENDR_BUSY)) + goto out; + + count--; + if (!count) { + pr_err_ratelimited("Unable to get SGI status\n"); + goto out; + } + cpu_relax(); + udelay(1); + } while (count); + +out: + raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); + vpe_to_cpuid_unlock(vpe, flags); + + if (!count) + return -ENXIO; + + *val = !!(status & (1 << d->hwirq)); + + return 0; +} + +static int its_sgi_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + switch (info->cmd_type) { + case PROP_UPDATE_VSGI: + vpe->sgi_config[d->hwirq].priority = info->priority; + vpe->sgi_config[d->hwirq].group = info->group; + its_configure_sgi(d, false); + return 0; + + default: + return -EINVAL; + } +} + +static struct irq_chip its_sgi_irq_chip = { + .name = "GICv4.1-sgi", + .irq_mask = its_sgi_mask_irq, + .irq_unmask = its_sgi_unmask_irq, + .irq_set_affinity = its_sgi_set_affinity, + .irq_set_irqchip_state = its_sgi_set_irqchip_state, + .irq_get_irqchip_state = its_sgi_get_irqchip_state, + .irq_set_vcpu_affinity = its_sgi_set_vcpu_affinity, +}; + +static int its_sgi_irq_domain_alloc(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs, + void *args) +{ + struct its_vpe *vpe = args; + int i; + + /* Yes, we do want 16 SGIs */ + WARN_ON(nr_irqs != 16); + + for (i = 0; i < 16; i++) { + vpe->sgi_config[i].priority = 0; + vpe->sgi_config[i].enabled = false; + vpe->sgi_config[i].group = false; + + irq_domain_set_hwirq_and_chip(domain, virq + i, i, + &its_sgi_irq_chip, vpe); + irq_set_status_flags(virq + i, IRQ_DISABLE_UNLAZY); + } + + return 0; +} + +static void its_sgi_irq_domain_free(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs) +{ + /* Nothing to do */ +} + +static int its_sgi_irq_domain_activate(struct irq_domain *domain, + struct irq_data *d, bool reserve) +{ + /* Write out the initial SGI configuration */ + its_configure_sgi(d, false); + return 0; +} + +static void its_sgi_irq_domain_deactivate(struct irq_domain *domain, + struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + /* + * The VSGI command is awkward: + * + * - To change the configuration, CLEAR must be set to false, + * leaving the pending bit unchanged. + * - To clear the pending bit, CLEAR must be set to true, leaving + * the configuration unchanged. + * + * You just can't do both at once, hence the two commands below. + */ + vpe->sgi_config[d->hwirq].enabled = false; + its_configure_sgi(d, false); + its_configure_sgi(d, true); +} + +static const struct irq_domain_ops its_sgi_domain_ops = { + .alloc = its_sgi_irq_domain_alloc, + .free = its_sgi_irq_domain_free, + .activate = its_sgi_irq_domain_activate, + .deactivate = its_sgi_irq_domain_deactivate, +}; + +static int its_vpe_id_alloc(void) +{ + return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL); +} + +static void its_vpe_id_free(u16 id) +{ + ida_simple_remove(&its_vpeid_ida, id); +} + +static int its_vpe_init(struct its_vpe *vpe) +{ + struct page *vpt_page; + int vpe_id; + + /* Allocate vpe_id */ + vpe_id = its_vpe_id_alloc(); + if (vpe_id < 0) + return vpe_id; + + /* Allocate VPT */ + vpt_page = its_allocate_pending_table(GFP_KERNEL); + if (!vpt_page) { + its_vpe_id_free(vpe_id); + return -ENOMEM; + } + + if (!its_alloc_vpe_table(vpe_id)) { + its_vpe_id_free(vpe_id); + its_free_pending_table(vpt_page); + return -ENOMEM; + } + + raw_spin_lock_init(&vpe->vpe_lock); + vpe->vpe_id = vpe_id; + vpe->vpt_page = vpt_page; + if (gic_rdists->has_rvpeid) + atomic_set(&vpe->vmapp_count, 0); + else + vpe->vpe_proxy_event = -1; + + return 0; +} + +static void its_vpe_teardown(struct its_vpe *vpe) +{ + its_vpe_db_proxy_unmap(vpe); + its_vpe_id_free(vpe->vpe_id); + its_free_pending_table(vpe->vpt_page); +} + +static void its_vpe_irq_domain_free(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs) +{ + struct its_vm *vm = domain->host_data; + int i; + + irq_domain_free_irqs_parent(domain, virq, nr_irqs); + + for (i = 0; i < nr_irqs; i++) { + struct irq_data *data = irq_domain_get_irq_data(domain, + virq + i); + struct its_vpe *vpe = irq_data_get_irq_chip_data(data); + + BUG_ON(vm != vpe->its_vm); + + clear_bit(data->hwirq, vm->db_bitmap); + its_vpe_teardown(vpe); + irq_domain_reset_irq_data(data); + } + + if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) { + its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); + its_free_prop_table(vm->vprop_page); + } +} + +static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + struct irq_chip *irqchip = &its_vpe_irq_chip; + struct its_vm *vm = args; + unsigned long *bitmap; + struct page *vprop_page; + int base, nr_ids, i, err = 0; + + BUG_ON(!vm); + + bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids); + if (!bitmap) + return -ENOMEM; + + if (nr_ids < nr_irqs) { + its_lpi_free(bitmap, base, nr_ids); + return -ENOMEM; + } + + vprop_page = its_allocate_prop_table(GFP_KERNEL); + if (!vprop_page) { + its_lpi_free(bitmap, base, nr_ids); + return -ENOMEM; + } + + vm->db_bitmap = bitmap; + vm->db_lpi_base = base; + vm->nr_db_lpis = nr_ids; + vm->vprop_page = vprop_page; + + if (gic_rdists->has_rvpeid) + irqchip = &its_vpe_4_1_irq_chip; + + for (i = 0; i < nr_irqs; i++) { + vm->vpes[i]->vpe_db_lpi = base + i; + err = its_vpe_init(vm->vpes[i]); + if (err) + break; + err = its_irq_gic_domain_alloc(domain, virq + i, + vm->vpes[i]->vpe_db_lpi); + if (err) + break; + irq_domain_set_hwirq_and_chip(domain, virq + i, i, + irqchip, vm->vpes[i]); + set_bit(i, bitmap); + irqd_set_resend_when_in_progress(irq_get_irq_data(virq + i)); + } + + if (err) { + if (i > 0) + its_vpe_irq_domain_free(domain, virq, i); + + its_lpi_free(bitmap, base, nr_ids); + its_free_prop_table(vprop_page); + } + + return err; +} + +static int its_vpe_irq_domain_activate(struct irq_domain *domain, + struct irq_data *d, bool reserve) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its; + + /* + * If we use the list map, we issue VMAPP on demand... Unless + * we're on a GICv4.1 and we eagerly map the VPE on all ITSs + * so that VSGIs can work. + */ + if (!gic_requires_eager_mapping()) + return 0; + + /* Map the VPE to the first possible CPU */ + vpe->col_idx = cpumask_first(cpu_online_mask); + + list_for_each_entry(its, &its_nodes, entry) { + if (!is_v4(its)) + continue; + + its_send_vmapp(its, vpe, true); + its_send_vinvall(its, vpe); + } + + irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); + + return 0; +} + +static void its_vpe_irq_domain_deactivate(struct irq_domain *domain, + struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its; + + /* + * If we use the list map on GICv4.0, we unmap the VPE once no + * VLPIs are associated with the VM. + */ + if (!gic_requires_eager_mapping()) + return; + + list_for_each_entry(its, &its_nodes, entry) { + if (!is_v4(its)) + continue; + + its_send_vmapp(its, vpe, false); + } + + /* + * There may be a direct read to the VPT after unmapping the + * vPE, to guarantee the validity of this, we make the VPT + * memory coherent with the CPU caches here. + */ + if (find_4_1_its() && !atomic_read(&vpe->vmapp_count)) + gic_flush_dcache_to_poc(page_address(vpe->vpt_page), + LPI_PENDBASE_SZ); +} + +static const struct irq_domain_ops its_vpe_domain_ops = { + .alloc = its_vpe_irq_domain_alloc, + .free = its_vpe_irq_domain_free, + .activate = its_vpe_irq_domain_activate, + .deactivate = its_vpe_irq_domain_deactivate, +}; + +static int its_force_quiescent(void __iomem *base) +{ + u32 count = 1000000; /* 1s */ + u32 val; + + val = readl_relaxed(base + GITS_CTLR); + /* + * GIC architecture specification requires the ITS to be both + * disabled and quiescent for writes to GITS_BASER or + * GITS_CBASER to not have UNPREDICTABLE results. + */ + if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE)) + return 0; + + /* Disable the generation of all interrupts to this ITS */ + val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe); + writel_relaxed(val, base + GITS_CTLR); + + /* Poll GITS_CTLR and wait until ITS becomes quiescent */ + while (1) { + val = readl_relaxed(base + GITS_CTLR); + if (val & GITS_CTLR_QUIESCENT) + return 0; + + count--; + if (!count) + return -EBUSY; + + cpu_relax(); + udelay(1); + } +} + +static bool __maybe_unused its_enable_quirk_cavium_22375(void *data) +{ + struct its_node *its = data; + + /* erratum 22375: only alloc 8MB table size (20 bits) */ + its->typer &= ~GITS_TYPER_DEVBITS; + its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1); + its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; + + return true; +} + +static bool __maybe_unused its_enable_quirk_cavium_23144(void *data) +{ + struct its_node *its = data; + + its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; + + return true; +} + +static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data) +{ + struct its_node *its = data; + + /* On QDF2400, the size of the ITE is 16Bytes */ + its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE; + its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1); + + return true; +} + +static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev) +{ + struct its_node *its = its_dev->its; + + /* + * The Socionext Synquacer SoC has a so-called 'pre-ITS', + * which maps 32-bit writes targeted at a separate window of + * size '4 << device_id_bits' onto writes to GITS_TRANSLATER + * with device ID taken from bits [device_id_bits + 1:2] of + * the window offset. + */ + return its->pre_its_base + (its_dev->device_id << 2); +} + +static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data) +{ + struct its_node *its = data; + u32 pre_its_window[2]; + u32 ids; + + if (!fwnode_property_read_u32_array(its->fwnode_handle, + "socionext,synquacer-pre-its", + pre_its_window, + ARRAY_SIZE(pre_its_window))) { + + its->pre_its_base = pre_its_window[0]; + its->get_msi_base = its_irq_get_msi_base_pre_its; + + ids = ilog2(pre_its_window[1]) - 2; + if (device_ids(its) > ids) { + its->typer &= ~GITS_TYPER_DEVBITS; + its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1); + } + + /* the pre-ITS breaks isolation, so disable MSI remapping */ + its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_ISOLATED_MSI; + return true; + } + return false; +} + +static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data) +{ + struct its_node *its = data; + + /* + * Hip07 insists on using the wrong address for the VLPI + * page. Trick it into doing the right thing... + */ + its->vlpi_redist_offset = SZ_128K; + return true; +} + +static bool __maybe_unused its_enable_rk3588001(void *data) +{ + struct its_node *its = data; + + if (!of_machine_is_compatible("rockchip,rk3588") && + !of_machine_is_compatible("rockchip,rk3588s")) + return false; + + its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE; + gic_rdists->flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE; + + return true; +} + +static bool its_set_non_coherent(void *data) +{ + struct its_node *its = data; + + its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE; + return true; +} + +static const struct gic_quirk its_quirks[] = { +#ifdef CONFIG_CAVIUM_ERRATUM_22375 + { + .desc = "ITS: Cavium errata 22375, 24313", + .iidr = 0xa100034c, /* ThunderX pass 1.x */ + .mask = 0xffff0fff, + .init = its_enable_quirk_cavium_22375, + }, +#endif +#ifdef CONFIG_CAVIUM_ERRATUM_23144 + { + .desc = "ITS: Cavium erratum 23144", + .iidr = 0xa100034c, /* ThunderX pass 1.x */ + .mask = 0xffff0fff, + .init = its_enable_quirk_cavium_23144, + }, +#endif +#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065 + { + .desc = "ITS: QDF2400 erratum 0065", + .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */ + .mask = 0xffffffff, + .init = its_enable_quirk_qdf2400_e0065, + }, +#endif +#ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS + { + /* + * The Socionext Synquacer SoC incorporates ARM's own GIC-500 + * implementation, but with a 'pre-ITS' added that requires + * special handling in software. + */ + .desc = "ITS: Socionext Synquacer pre-ITS", + .iidr = 0x0001143b, + .mask = 0xffffffff, + .init = its_enable_quirk_socionext_synquacer, + }, +#endif +#ifdef CONFIG_HISILICON_ERRATUM_161600802 + { + .desc = "ITS: Hip07 erratum 161600802", + .iidr = 0x00000004, + .mask = 0xffffffff, + .init = its_enable_quirk_hip07_161600802, + }, +#endif +#ifdef CONFIG_ROCKCHIP_ERRATUM_3588001 + { + .desc = "ITS: Rockchip erratum RK3588001", + .iidr = 0x0201743b, + .mask = 0xffffffff, + .init = its_enable_rk3588001, + }, +#endif + { + .desc = "ITS: non-coherent attribute", + .property = "dma-noncoherent", + .init = its_set_non_coherent, + }, + { + } +}; + +static void its_enable_quirks(struct its_node *its) +{ + u32 iidr = readl_relaxed(its->base + GITS_IIDR); + + gic_enable_quirks(iidr, its_quirks, its); + + if (is_of_node(its->fwnode_handle)) + gic_enable_of_quirks(to_of_node(its->fwnode_handle), + its_quirks, its); +} + +static int its_save_disable(void) +{ + struct its_node *its; + int err = 0; + + raw_spin_lock(&its_lock); + list_for_each_entry(its, &its_nodes, entry) { + void __iomem *base; + + base = its->base; + its->ctlr_save = readl_relaxed(base + GITS_CTLR); + err = its_force_quiescent(base); + if (err) { + pr_err("ITS@%pa: failed to quiesce: %d\n", + &its->phys_base, err); + writel_relaxed(its->ctlr_save, base + GITS_CTLR); + goto err; + } + + its->cbaser_save = gits_read_cbaser(base + GITS_CBASER); + } + +err: + if (err) { + list_for_each_entry_continue_reverse(its, &its_nodes, entry) { + void __iomem *base; + + base = its->base; + writel_relaxed(its->ctlr_save, base + GITS_CTLR); + } + } + raw_spin_unlock(&its_lock); + + return err; +} + +static void its_restore_enable(void) +{ + struct its_node *its; + int ret; + + raw_spin_lock(&its_lock); + list_for_each_entry(its, &its_nodes, entry) { + void __iomem *base; + int i; + + base = its->base; + + /* + * Make sure that the ITS is disabled. If it fails to quiesce, + * don't restore it since writing to CBASER or BASER + * registers is undefined according to the GIC v3 ITS + * Specification. + * + * Firmware resuming with the ITS enabled is terminally broken. + */ + WARN_ON(readl_relaxed(base + GITS_CTLR) & GITS_CTLR_ENABLE); + ret = its_force_quiescent(base); + if (ret) { + pr_err("ITS@%pa: failed to quiesce on resume: %d\n", + &its->phys_base, ret); + continue; + } + + gits_write_cbaser(its->cbaser_save, base + GITS_CBASER); + + /* + * Writing CBASER resets CREADR to 0, so make CWRITER and + * cmd_write line up with it. + */ + its->cmd_write = its->cmd_base; + gits_write_cwriter(0, base + GITS_CWRITER); + + /* Restore GITS_BASER from the value cache. */ + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + struct its_baser *baser = &its->tables[i]; + + if (!(baser->val & GITS_BASER_VALID)) + continue; + + its_write_baser(its, baser, baser->val); + } + writel_relaxed(its->ctlr_save, base + GITS_CTLR); + + /* + * Reinit the collection if it's stored in the ITS. This is + * indicated by the col_id being less than the HCC field. + * CID < HCC as specified in the GIC v3 Documentation. + */ + if (its->collections[smp_processor_id()].col_id < + GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER))) + its_cpu_init_collection(its); + } + raw_spin_unlock(&its_lock); +} + +static struct syscore_ops its_syscore_ops = { + .suspend = its_save_disable, + .resume = its_restore_enable, +}; + +static void __init __iomem *its_map_one(struct resource *res, int *err) +{ + void __iomem *its_base; + u32 val; + + its_base = ioremap(res->start, SZ_64K); + if (!its_base) { + pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start); + *err = -ENOMEM; + return NULL; + } + + val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK; + if (val != 0x30 && val != 0x40) { + pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start); + *err = -ENODEV; + goto out_unmap; + } + + *err = its_force_quiescent(its_base); + if (*err) { + pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start); + goto out_unmap; + } + + return its_base; + +out_unmap: + iounmap(its_base); + return NULL; +} + +static int its_init_domain(struct its_node *its) +{ + struct irq_domain *inner_domain; + struct msi_domain_info *info; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->ops = &its_msi_domain_ops; + info->data = its; + + inner_domain = irq_domain_create_hierarchy(its_parent, + its->msi_domain_flags, 0, + its->fwnode_handle, &its_domain_ops, + info); + if (!inner_domain) { + kfree(info); + return -ENOMEM; + } + + irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS); + + return 0; +} + +static int its_init_vpe_domain(void) +{ + struct its_node *its; + u32 devid; + int entries; + + if (gic_rdists->has_direct_lpi) { + pr_info("ITS: Using DirectLPI for VPE invalidation\n"); + return 0; + } + + /* Any ITS will do, even if not v4 */ + its = list_first_entry(&its_nodes, struct its_node, entry); + + entries = roundup_pow_of_two(nr_cpu_ids); + vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes), + GFP_KERNEL); + if (!vpe_proxy.vpes) + return -ENOMEM; + + /* Use the last possible DevID */ + devid = GENMASK(device_ids(its) - 1, 0); + vpe_proxy.dev = its_create_device(its, devid, entries, false); + if (!vpe_proxy.dev) { + kfree(vpe_proxy.vpes); + pr_err("ITS: Can't allocate GICv4 proxy device\n"); + return -ENOMEM; + } + + BUG_ON(entries > vpe_proxy.dev->nr_ites); + + raw_spin_lock_init(&vpe_proxy.lock); + vpe_proxy.next_victim = 0; + pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n", + devid, vpe_proxy.dev->nr_ites); + + return 0; +} + +static int __init its_compute_its_list_map(struct its_node *its) +{ + int its_number; + u32 ctlr; + + /* + * This is assumed to be done early enough that we're + * guaranteed to be single-threaded, hence no + * locking. Should this change, we should address + * this. + */ + its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX); + if (its_number >= GICv4_ITS_LIST_MAX) { + pr_err("ITS@%pa: No ITSList entry available!\n", + &its->phys_base); + return -EINVAL; + } + + ctlr = readl_relaxed(its->base + GITS_CTLR); + ctlr &= ~GITS_CTLR_ITS_NUMBER; + ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT; + writel_relaxed(ctlr, its->base + GITS_CTLR); + ctlr = readl_relaxed(its->base + GITS_CTLR); + if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) { + its_number = ctlr & GITS_CTLR_ITS_NUMBER; + its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT; + } + + if (test_and_set_bit(its_number, &its_list_map)) { + pr_err("ITS@%pa: Duplicate ITSList entry %d\n", + &its->phys_base, its_number); + return -EINVAL; + } + + return its_number; +} + +static int __init its_probe_one(struct its_node *its) +{ + u64 baser, tmp; + struct page *page; + u32 ctlr; + int err; + + if (is_v4(its)) { + if (!(its->typer & GITS_TYPER_VMOVP)) { + err = its_compute_its_list_map(its); + if (err < 0) + goto out; + + its->list_nr = err; + + pr_info("ITS@%pa: Using ITS number %d\n", + &its->phys_base, err); + } else { + pr_info("ITS@%pa: Single VMOVP capable\n", &its->phys_base); + } + + if (is_v4_1(its)) { + u32 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer); + + its->sgir_base = ioremap(its->phys_base + SZ_128K, SZ_64K); + if (!its->sgir_base) { + err = -ENOMEM; + goto out; + } + + its->mpidr = readl_relaxed(its->base + GITS_MPIDR); + + pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n", + &its->phys_base, its->mpidr, svpet); + } + } + + page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, + get_order(ITS_CMD_QUEUE_SZ)); + if (!page) { + err = -ENOMEM; + goto out_unmap_sgir; + } + its->cmd_base = (void *)page_address(page); + its->cmd_write = its->cmd_base; + + err = its_alloc_tables(its); + if (err) + goto out_free_cmd; + + err = its_alloc_collections(its); + if (err) + goto out_free_tables; + + baser = (virt_to_phys(its->cmd_base) | + GITS_CBASER_RaWaWb | + GITS_CBASER_InnerShareable | + (ITS_CMD_QUEUE_SZ / SZ_4K - 1) | + GITS_CBASER_VALID); + + gits_write_cbaser(baser, its->base + GITS_CBASER); + tmp = gits_read_cbaser(its->base + GITS_CBASER); + + if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE) + tmp &= ~GITS_CBASER_SHAREABILITY_MASK; + + if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) { + if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) { + /* + * The HW reports non-shareable, we must + * remove the cacheability attributes as + * well. + */ + baser &= ~(GITS_CBASER_SHAREABILITY_MASK | + GITS_CBASER_CACHEABILITY_MASK); + baser |= GITS_CBASER_nC; + gits_write_cbaser(baser, its->base + GITS_CBASER); + } + pr_info("ITS: using cache flushing for cmd queue\n"); + its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; + } + + gits_write_cwriter(0, its->base + GITS_CWRITER); + ctlr = readl_relaxed(its->base + GITS_CTLR); + ctlr |= GITS_CTLR_ENABLE; + if (is_v4(its)) + ctlr |= GITS_CTLR_ImDe; + writel_relaxed(ctlr, its->base + GITS_CTLR); + + err = its_init_domain(its); + if (err) + goto out_free_tables; + + raw_spin_lock(&its_lock); + list_add(&its->entry, &its_nodes); + raw_spin_unlock(&its_lock); + + return 0; + +out_free_tables: + its_free_tables(its); +out_free_cmd: + free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); +out_unmap_sgir: + if (its->sgir_base) + iounmap(its->sgir_base); +out: + pr_err("ITS@%pa: failed probing (%d)\n", &its->phys_base, err); + return err; +} + +static bool gic_rdists_supports_plpis(void) +{ + return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS); +} + +static int redist_disable_lpis(void) +{ + void __iomem *rbase = gic_data_rdist_rd_base(); + u64 timeout = USEC_PER_SEC; + u64 val; + + if (!gic_rdists_supports_plpis()) { + pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); + return -ENXIO; + } + + val = readl_relaxed(rbase + GICR_CTLR); + if (!(val & GICR_CTLR_ENABLE_LPIS)) + return 0; + + /* + * If coming via a CPU hotplug event, we don't need to disable + * LPIs before trying to re-enable them. They are already + * configured and all is well in the world. + * + * If running with preallocated tables, there is nothing to do. + */ + if ((gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) || + (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED)) + return 0; + + /* + * From that point on, we only try to do some damage control. + */ + pr_warn("GIC-2500: CPU%d: Booted with LPIs enabled, memory probably corrupted\n", + smp_processor_id()); + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + + /* Disable LPIs */ + val &= ~GICR_CTLR_ENABLE_LPIS; + writel_relaxed(val, rbase + GICR_CTLR); + + /* Make sure any change to GICR_CTLR is observable by the GIC */ + dsb(sy); + + /* + * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs + * from 1 to 0 before programming GICR_PEND{PROP}BASER registers. + * Error out if we time out waiting for RWP to clear. + */ + while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) { + if (!timeout) { + pr_err("CPU%d: Timeout while disabling LPIs\n", + smp_processor_id()); + return -ETIMEDOUT; + } + udelay(1); + timeout--; + } + + /* + * After it has been written to 1, it is IMPLEMENTATION + * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be + * cleared to 0. Error out if clearing the bit failed. + */ + if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) { + pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id()); + return -EBUSY; + } + + return 0; +} + +int phytium_its_cpu_init(void) +{ + if (!list_empty(&its_nodes)) { + int ret; + + ret = redist_disable_lpis(); + if (ret) + return ret; + + its_cpu_init_lpis(); + its_cpu_init_collections(); + } + + return 0; +} + +static void rdist_memreserve_cpuhp_cleanup_workfn(struct work_struct *work) +{ + cpuhp_remove_state_nocalls(gic_rdists->cpuhp_memreserve_state); + gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID; +} + +static DECLARE_WORK(rdist_memreserve_cpuhp_cleanup_work, + rdist_memreserve_cpuhp_cleanup_workfn); + +/* Mark all the BASER registers as invalid before they get reprogrammed */ +static int __init its_reset_one(struct resource *res) +{ + void __iomem *its_base; + int err, i; + + its_base = its_map_one(res, &err); + if (!its_base) + return err; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) + gits_write_baser(0, its_base + GITS_BASER + (i << 3)); + + iounmap(its_base); + return 0; +} + +static const struct of_device_id its_device_id[] = { + { .compatible = "arm,gic-phytium-2500-its", }, + {}, +}; + +static struct its_node __init *its_node_init(struct resource *res, + struct fwnode_handle *handle, int numa_node) +{ + void __iomem *its_base; + struct its_node *its; + int err; + + its_base = its_map_one(res, &err); + if (!its_base) + return NULL; + + pr_info("ITS %pR\n", res); + + its = kzalloc(sizeof(*its), GFP_KERNEL); + if (!its) + goto out_unmap; + + raw_spin_lock_init(&its->lock); + mutex_init(&its->dev_alloc_lock); + INIT_LIST_HEAD(&its->entry); + INIT_LIST_HEAD(&its->its_device_list); + + its->typer = gic_read_typer(its_base + GITS_TYPER); + its->base = its_base; + its->phys_base = res->start; + its->get_msi_base = its_irq_get_msi_base; + its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI; + + its->numa_node = numa_node; + its->fwnode_handle = handle; + + return its; + +out_unmap: + iounmap(its_base); + return NULL; +} + +static void its_node_destroy(struct its_node *its) +{ + iounmap(its->base); + kfree(its); +} + +static int __init its_of_probe(struct device_node *node) +{ + struct device_node *np; + struct resource res; + int err; + + /* + * Make sure *all* the ITS are reset before we probe any, as + * they may be sharing memory. If any of the ITS fails to + * reset, don't even try to go any further, as this could + * result in something even worse. + */ + for (np = of_find_matching_node(node, its_device_id); np; + np = of_find_matching_node(np, its_device_id)) { + if (!of_device_is_available(np) || + !of_property_read_bool(np, "msi-controller") || + of_address_to_resource(np, 0, &res)) + continue; + + err = its_reset_one(&res); + if (err) + return err; + } + + for (np = of_find_matching_node(node, its_device_id); np; + np = of_find_matching_node(np, its_device_id)) { + struct its_node *its; + + if (!of_device_is_available(np)) + continue; + if (!of_property_read_bool(np, "msi-controller")) { + pr_warn("%pOF: no msi-controller property, ITS ignored\n", + np); + continue; + } + + if (of_address_to_resource(np, 0, &res)) { + pr_warn("%pOF: no regs?\n", np); + continue; + } + + + its = its_node_init(&res, &np->fwnode, of_node_to_nid(np)); + if (!its) + return -ENOMEM; + + its_enable_quirks(its); + err = its_probe_one(its); + if (err) { + its_node_destroy(its); + return err; + } + } + return 0; +} + +#ifdef CONFIG_ACPI + +#define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K) + +#ifdef CONFIG_ACPI_NUMA +struct its_srat_map { + /* numa node id */ + u32 numa_node; + /* GIC ITS ID */ + u32 its_id; +}; + +static struct its_srat_map *its_srat_maps __initdata; +static int its_in_srat __initdata; + +static int __init acpi_get_its_numa_node(u32 its_id) +{ + int i; + + for (i = 0; i < its_in_srat; i++) { + if (its_id == its_srat_maps[i].its_id) + return its_srat_maps[i].numa_node; + } + return NUMA_NO_NODE; +} + +static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header, + const unsigned long end) +{ + return 0; +} + +static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header, + const unsigned long end) +{ + int node; + struct acpi_srat_gic_its_affinity *its_affinity; + + its_affinity = (struct acpi_srat_gic_its_affinity *)header; + if (!its_affinity) + return -EINVAL; + + if (its_affinity->header.length < sizeof(*its_affinity)) { + pr_err("SRAT: Invalid header length %d in ITS affinity\n", + its_affinity->header.length); + return -EINVAL; + } + + /* + * Note that in theory a new proximity node could be created by this + * entry as it is an SRAT resource allocation structure. + * We do not currently support doing so. + */ + node = pxm_to_node(its_affinity->proximity_domain); + + if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) { + pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node); + return 0; + } + + its_srat_maps[its_in_srat].numa_node = node; + its_srat_maps[its_in_srat].its_id = its_affinity->its_id; + its_in_srat++; + pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n", + its_affinity->proximity_domain, its_affinity->its_id, node); + + return 0; +} + +static void __init acpi_table_parse_srat_its(void) +{ + int count; + + count = acpi_table_parse_entries(ACPI_SIG_SRAT, + sizeof(struct acpi_table_srat), + ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, + gic_acpi_match_srat_its, 0); + if (count <= 0) + return; + + its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map), + GFP_KERNEL); + if (!its_srat_maps) + return; + + acpi_table_parse_entries(ACPI_SIG_SRAT, + sizeof(struct acpi_table_srat), + ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, + gic_acpi_parse_srat_its, 0); +} + +/* free the its_srat_maps after ITS probing */ +static void __init acpi_its_srat_maps_free(void) +{ + kfree(its_srat_maps); +} +#else +static void __init acpi_table_parse_srat_its(void) { } +static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; } +static void __init acpi_its_srat_maps_free(void) { } +#endif + +static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_translator *its_entry; + struct fwnode_handle *dom_handle; + struct its_node *its; + struct resource res; + int err; + + its_entry = (struct acpi_madt_generic_translator *)header; + memset(&res, 0, sizeof(res)); + res.start = its_entry->base_address; + res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1; + res.flags = IORESOURCE_MEM; + + dom_handle = irq_domain_alloc_fwnode(&res.start); + if (!dom_handle) { + pr_err("ITS@%pa: Unable to allocate GIC-phytium-2500 ITS domain token\n", + &res.start); + return -ENOMEM; + } + + err = iort_register_domain_token(its_entry->translation_id, res.start, + dom_handle); + if (err) { + pr_err("ITS@%pa: Unable to register GIC-phytium-2500 ITS domain token (ITS ID %d) to IORT\n", + &res.start, its_entry->translation_id); + goto dom_err; + } + + its = its_node_init(&res, dom_handle, + acpi_get_its_numa_node(its_entry->translation_id)); + if (!its) { + err = -ENOMEM; + goto node_err; + } + + err = its_probe_one(its); + if (!err) + return 0; + +node_err: + iort_deregister_domain_token(its_entry->translation_id); +dom_err: + irq_domain_free_fwnode(dom_handle); + return err; +} + +static int __init its_acpi_reset(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_translator *its_entry; + struct resource res; + + its_entry = (struct acpi_madt_generic_translator *)header; + res = (struct resource) { + .start = its_entry->base_address, + .end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1, + .flags = IORESOURCE_MEM, + }; + + return its_reset_one(&res); +} + +static void __init its_acpi_probe(void) +{ + acpi_table_parse_srat_its(); + /* + * Make sure *all* the ITS are reset before we probe any, as + * they may be sharing memory. If any of the ITS fails to + * reset, don't even try to go any further, as this could + * result in something even worse. + */ + if (acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, + its_acpi_reset, 0) > 0) + acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, + gic_acpi_parse_madt_its, 0); + acpi_its_srat_maps_free(); +} +#else +static void __init its_acpi_probe(void) { } +#endif + +int __init phytium_its_init(struct fwnode_handle *handle, struct rdists *rdists, + struct irq_domain *parent_domain) +{ + struct device_node *of_node; + struct its_node *its; + bool has_v4 = false; + bool has_v4_1 = false; + int err; + + gic_rdists = rdists; + + its_parent = parent_domain; + of_node = to_of_node(handle); + if (of_node) + its_of_probe(of_node); + else + its_acpi_probe(); + + if (list_empty(&its_nodes)) { + pr_warn("ITS: No ITS available, not enabling LPIs\n"); + return -ENXIO; + } + + err = allocate_lpi_tables(); + if (err) + return err; + + list_for_each_entry(its, &its_nodes, entry) { + has_v4 |= is_v4(its); + has_v4_1 |= is_v4_1(its); + } + + /* Don't bother with inconsistent systems */ + if (WARN_ON(!has_v4_1 && rdists->has_rvpeid)) + rdists->has_rvpeid = false; + + if (has_v4 & rdists->has_vlpis) { + const struct irq_domain_ops *sgi_ops; + + if (has_v4_1) + sgi_ops = &its_sgi_domain_ops; + else + sgi_ops = NULL; + + if (its_init_vpe_domain() || + its_init_v4(parent_domain, &its_vpe_domain_ops, sgi_ops)) { + rdists->has_vlpis = false; + pr_err("ITS: Disabling GICv4 support\n"); + } + } + + register_syscore_ops(&its_syscore_ops); + + return 0; +} diff --git a/drivers/irqchip/irq-gic-phytium-2500.c b/drivers/irqchip/irq-gic-phytium-2500.c new file mode 100644 index 000000000000..f9f3b591be00 --- /dev/null +++ b/drivers/irqchip/irq-gic-phytium-2500.c @@ -0,0 +1,2898 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Phytium Corporation. + * Author: + * Wang Yinfeng + * Chen Baozi + * Chen Siyu + * Cui Fulong + * Li Yuting + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + + +#define pr_fmt(fmt) "GIC-2500: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "irq-gic-common.h" + +#define MAX_MARS3_SOC_COUNT 8 +#define MARS3_ADDR_SKTID_SHIFT 41 + +struct gic_dist_desc { + void __iomem *dist_base; + phys_addr_t phys_base; + unsigned long size; +}; + +static struct gic_dist_desc mars3_gic_dists[MAX_MARS3_SOC_COUNT] __read_mostly; + +static unsigned int mars3_sockets_bitmap = 0x1; + +#define mars3_irq_to_skt(hwirq) (((hwirq) - 32) % 8) + +#define GICD_INT_NMI_PRI (GICD_INT_DEF_PRI & ~0x80) + +#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0) +#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1) +#define FLAGS_WORKAROUND_MTK_GICR_SAVE (1ULL << 2) +#define FLAGS_WORKAROUND_ASR_ERRATUM_8601001 (1ULL << 3) + +#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1) + +struct redist_region { + void __iomem *redist_base; + phys_addr_t phys_base; + bool single_redist; +}; + +struct gic_chip_data { + struct fwnode_handle *fwnode; + phys_addr_t dist_phys_base; + void __iomem *dist_base; + struct redist_region *redist_regions; + struct rdists rdists; + struct irq_domain *domain; + u64 redist_stride; + u32 nr_redist_regions; + u64 flags; + bool has_rss; + unsigned int ppi_nr; + struct partition_desc **ppi_descs; +}; + +#define T241_CHIPS_MAX 4 +static void __iomem *t241_dist_base_alias[T241_CHIPS_MAX] __read_mostly; +static DEFINE_STATIC_KEY_FALSE(gic_nvidia_t241_erratum); + +static DEFINE_STATIC_KEY_FALSE(gic_arm64_2941627_erratum); + +static struct gic_chip_data gic_data __read_mostly; +static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); + +#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer)) +#define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U) +#define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer) + +/* + * The behaviours of RPR and PMR registers differ depending on the value of + * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the + * distributor and redistributors depends on whether security is enabled in the + * GIC. + * + * When security is enabled, non-secure priority values from the (re)distributor + * are presented to the GIC CPUIF as follow: + * (GIC_(R)DIST_PRI[irq] >> 1) | 0x80; + * + * If SCR_EL3.FIQ == 1, the values written to/read from PMR and RPR at non-secure + * EL1 are subject to a similar operation thus matching the priorities presented + * from the (re)distributor when security is enabled. When SCR_EL3.FIQ == 0, + * these values are unchanged by the GIC. + * + * see GICv3/GICv4 Architecture Specification (IHI0069D): + * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt + * priorities. + * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1 + * interrupt. + */ +static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis_ft2500); + +extern struct static_key_false gic_nonsecure_priorities; + +/* + * When the Non-secure world has access to group 0 interrupts (as a + * consequence of SCR_EL3.FIQ == 0), reading the ICC_RPR_EL1 register will + * return the Distributor's view of the interrupt priority. + * + * When GIC security is enabled (GICD_CTLR.DS == 0), the interrupt priority + * written by software is moved to the Non-secure range by the Distributor. + * + * If both are true (which is when gic_nonsecure_priorities gets enabled), + * we need to shift down the priority programmed by software to match it + * against the value returned by ICC_RPR_EL1. + */ +#define GICD_INT_RPR_PRI(priority) \ + ({ \ + u32 __priority = (priority); \ + if (static_branch_unlikely(&gic_nonsecure_priorities)) \ + __priority = 0x80 | (__priority >> 1); \ + \ + __priority; \ + }) + +/* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */ +static refcount_t *ppi_nmi_refs; + +static struct gic_kvm_info gic_v3_kvm_info __initdata; +static DEFINE_PER_CPU(bool, has_rss_ft2500); + +#define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4) +#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist)) +#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) +#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K) + +/* Our default, arbitrary priority value. Linux only uses one anyway. */ +#define DEFAULT_PMR_VALUE 0xf0 + +enum gic_intid_range { + SGI_RANGE, + PPI_RANGE, + SPI_RANGE, + EPPI_RANGE, + ESPI_RANGE, + LPI_RANGE, + __INVALID_RANGE__ +}; + +static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq) +{ + switch (hwirq) { + case 0 ... 15: + return SGI_RANGE; + case 16 ... 31: + return PPI_RANGE; + case 32 ... 1019: + return SPI_RANGE; + case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63): + return EPPI_RANGE; + case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023): + return ESPI_RANGE; + case 8192 ... GENMASK(23, 0): + return LPI_RANGE; + default: + return __INVALID_RANGE__; + } +} + +static enum gic_intid_range get_intid_range(struct irq_data *d) +{ + return __get_intid_range(d->hwirq); +} + +static inline unsigned int gic_irq(struct irq_data *d) +{ + return d->hwirq; +} + +static inline bool gic_irq_in_rdist(struct irq_data *d) +{ + switch (get_intid_range(d)) { + case SGI_RANGE: + case PPI_RANGE: + case EPPI_RANGE: + return true; + default: + return false; + } +} + +static inline void __iomem *gic_dist_base_alias(struct irq_data *d) +{ + if (static_branch_unlikely(&gic_nvidia_t241_erratum)) { + irq_hw_number_t hwirq = irqd_to_hwirq(d); + u32 chip; + + /* + * For the erratum T241-FABRIC-4, read accesses to GICD_In{E} + * registers are directed to the chip that owns the SPI. The + * the alias region can also be used for writes to the + * GICD_In{E} except GICD_ICENABLERn. Each chip has support + * for 320 {E}SPIs. Mappings for all 4 chips: + * Chip0 = 32-351 + * Chip1 = 352-671 + * Chip2 = 672-991 + * Chip3 = 4096-4415 + */ + switch (__get_intid_range(hwirq)) { + case SPI_RANGE: + chip = (hwirq - 32) / 320; + break; + case ESPI_RANGE: + chip = 3; + break; + default: + unreachable(); + } + return t241_dist_base_alias[chip]; + } + + return gic_data.dist_base; +} + +static inline void __iomem *gic_dist_base(struct irq_data *d) +{ + switch (get_intid_range(d)) { + case SGI_RANGE: + case PPI_RANGE: + case EPPI_RANGE: + /* SGI+PPI -> SGI_base for this CPU */ + return gic_data_rdist_sgi_base(); + + case SPI_RANGE: + case ESPI_RANGE: + /* SPI -> dist_base */ + return gic_data.dist_base; + + default: + return NULL; + } +} + +static void gic_do_wait_for_rwp(void __iomem *base) +{ + u32 count = 1000000; /* 1s! */ + + while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) { + count--; + if (!count) { + pr_err_ratelimited("RWP timeout, gone fishing\n"); + return; + } + cpu_relax(); + udelay(1); + } +} + +/* Wait for completion of a distributor change */ +static void gic_dist_wait_for_rwp(void) +{ + gic_do_wait_for_rwp(gic_data.dist_base); +} + +/* Wait for completion of a redistributor change */ +static void gic_redist_wait_for_rwp(void) +{ + gic_do_wait_for_rwp(gic_data_rdist_rd_base()); +} + +#ifdef CONFIG_ARM64 + +static u64 __maybe_unused gic_read_iar(void) +{ + if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154)) + return gic_read_iar_cavium_thunderx(); + else + return gic_read_iar_common(); +} +#endif + +static void gic_enable_redist(bool enable) +{ + void __iomem *rbase; + u32 count = 1000000; /* 1s! */ + u32 val; + unsigned long mpidr; + int i; + + if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996) + return; + + rbase = gic_data_rdist_rd_base(); + + val = readl_relaxed(rbase + GICR_WAKER); + if (enable) + /* Wake up this CPU redistributor */ + val &= ~GICR_WAKER_ProcessorSleep; + else + val |= GICR_WAKER_ProcessorSleep; + writel_relaxed(val, rbase + GICR_WAKER); + + if (!enable) { /* Check that GICR_WAKER is writeable */ + val = readl_relaxed(rbase + GICR_WAKER); + if (!(val & GICR_WAKER_ProcessorSleep)) + return; /* No PM support in this redistributor */ + } + + while (--count) { + val = readl_relaxed(rbase + GICR_WAKER); + if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) + break; + cpu_relax(); + udelay(1); + } + if (!count) + pr_err_ratelimited("redistributor failed to %s...\n", + enable ? "wakeup" : "sleep"); + + mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); + + if (mpidr & 0xFFFF) // either Aff1 or Aff0 is not zero + return; + + rbase = rbase + 64 * SZ_128K; // skip 64 Redistributors + + for (i = 0; i < 4; i++) { + val = readl_relaxed(rbase + GICR_WAKER); + if (enable) + /* Wake up this CPU redistributor */ + val &= ~GICR_WAKER_ProcessorSleep; + else + val |= GICR_WAKER_ProcessorSleep; + writel_relaxed(val, rbase + GICR_WAKER); + + if (!enable) { /* Check that GICR_WAKER is writeable */ + val = readl_relaxed(rbase + GICR_WAKER); + if (!(val & GICR_WAKER_ProcessorSleep)) + return; /* No PM support in this redistributor */ + } + + count = 1000000; /* 1s! */ + while (--count) { + val = readl_relaxed(rbase + GICR_WAKER); + if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) + break; + cpu_relax(); + udelay(1); + }; + if (!count) + pr_err_ratelimited("CPU MPIDR 0x%lx: redistributor %d failed to %s...\n", + mpidr, 64 + i, enable ? "wakeup" : "sleep"); + + rbase = rbase + SZ_128K; // next redistributor + } +} + +/* + * Routines to disable, enable, EOI and route interrupts + */ +static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index) +{ + switch (get_intid_range(d)) { + case SGI_RANGE: + case PPI_RANGE: + case SPI_RANGE: + *index = d->hwirq; + return offset; + case EPPI_RANGE: + /* + * Contrary to the ESPI range, the EPPI range is contiguous + * to the PPI range in the registers, so let's adjust the + * displacement accordingly. Consistency is overrated. + */ + *index = d->hwirq - EPPI_BASE_INTID + 32; + return offset; + case ESPI_RANGE: + *index = d->hwirq - ESPI_BASE_INTID; + switch (offset) { + case GICD_ISENABLER: + return GICD_ISENABLERnE; + case GICD_ICENABLER: + return GICD_ICENABLERnE; + case GICD_ISPENDR: + return GICD_ISPENDRnE; + case GICD_ICPENDR: + return GICD_ICPENDRnE; + case GICD_ISACTIVER: + return GICD_ISACTIVERnE; + case GICD_ICACTIVER: + return GICD_ICACTIVERnE; + case GICD_IPRIORITYR: + return GICD_IPRIORITYRnE; + case GICD_ICFGR: + return GICD_ICFGRnE; + case GICD_IROUTER: + return GICD_IROUTERnE; + default: + break; + } + break; + default: + break; + } + + WARN_ON(1); + *index = d->hwirq; + return offset; +} + +static int gic_peek_irq(struct irq_data *d, u32 offset) +{ + void __iomem *base; + u32 index, mask; + + offset = convert_offset_index(d, offset, &index); + mask = 1 << (index % 32); + + if (gic_irq_in_rdist(d)) + base = gic_data_rdist_sgi_base(); + else { + unsigned int skt; + + skt = mars3_irq_to_skt(gic_irq(d)); + base = mars3_gic_dists[skt].dist_base; + } + + return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask); +} + +static void gic_poke_irq(struct irq_data *d, u32 offset) +{ + void __iomem *base; + + unsigned long mpidr; + void __iomem *rbase; + int i; + unsigned int skt; + u32 index, mask; + + offset = convert_offset_index(d, offset, &index); + mask = 1 << (index % 32); + + if (gic_irq_in_rdist(d)) { + base = gic_data_rdist_sgi_base(); + + writel_relaxed(mask, base + offset + (index / 32) * 4); + gic_redist_wait_for_rwp(); + + mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); + + if ((mpidr & 0xFFFF) == 0) { // both Aff1 and Aff0 are zero + rbase = base + 64*SZ_128K; // skip 64 Redistributors + + for (i = 0; i < 4; i++) { + writel_relaxed(mask, rbase + offset + (index / 32) * 4); + gic_do_wait_for_rwp(rbase - SZ_64K); // RD from SGI base + rbase = rbase + SZ_128K; + } + } // core 0 of each socket + } else { + skt = mars3_irq_to_skt(gic_irq(d)); + base = mars3_gic_dists[skt].dist_base; + writel_relaxed(mask, base + offset + (index / 32) * 4); + gic_do_wait_for_rwp(base); + } +} + +static void gic_mask_irq(struct irq_data *d) +{ + gic_poke_irq(d, GICD_ICENABLER); + if (gic_irq_in_rdist(d)) + gic_redist_wait_for_rwp(); + else + gic_dist_wait_for_rwp(); +} + +static void gic_eoimode1_mask_irq(struct irq_data *d) +{ + gic_mask_irq(d); + /* + * When masking a forwarded interrupt, make sure it is + * deactivated as well. + * + * This ensures that an interrupt that is getting + * disabled/masked will not get "stuck", because there is + * noone to deactivate it (guest is being terminated). + */ + if (irqd_is_forwarded_to_vcpu(d)) + gic_poke_irq(d, GICD_ICACTIVER); +} + +static void gic_unmask_irq(struct irq_data *d) +{ + gic_poke_irq(d, GICD_ISENABLER); +} + +static inline bool gic_supports_nmi_ft2500(void) +{ + return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && + static_branch_likely(&supports_pseudo_nmis_ft2500); +} + +static int gic_irq_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, bool val) +{ + u32 reg; + + if (d->hwirq >= 8192) /* SGI/PPI/SPI only */ + return -EINVAL; + + switch (which) { + case IRQCHIP_STATE_PENDING: + reg = val ? GICD_ISPENDR : GICD_ICPENDR; + break; + + case IRQCHIP_STATE_ACTIVE: + reg = val ? GICD_ISACTIVER : GICD_ICACTIVER; + break; + + case IRQCHIP_STATE_MASKED: + if (val) { + gic_mask_irq(d); + return 0; + } + reg = GICD_ISENABLER; + break; + + default: + return -EINVAL; + } + + gic_poke_irq(d, reg); + return 0; +} + +static int gic_irq_get_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, bool *val) +{ + if (d->hwirq >= 8192) /* PPI/SPI only */ + return -EINVAL; + + switch (which) { + case IRQCHIP_STATE_PENDING: + *val = gic_peek_irq(d, GICD_ISPENDR); + break; + + case IRQCHIP_STATE_ACTIVE: + *val = gic_peek_irq(d, GICD_ISACTIVER); + break; + + case IRQCHIP_STATE_MASKED: + *val = !gic_peek_irq(d, GICD_ISENABLER); + break; + + default: + return -EINVAL; + } + + return 0; +} + +static void gic_irq_set_prio(struct irq_data *d, u8 prio) +{ + void __iomem *base = gic_dist_base(d); + u32 offset, index; + + offset = convert_offset_index(d, GICD_IPRIORITYR, &index); + + writeb_relaxed(prio, base + offset + index); +} + +static u32 __gic_get_ppi_index(irq_hw_number_t hwirq) +{ + switch (__get_intid_range(hwirq)) { + case PPI_RANGE: + return hwirq - 16; + case EPPI_RANGE: + return hwirq - EPPI_BASE_INTID + 16; + default: + unreachable(); + } +} + +static u32 gic_get_ppi_index(struct irq_data *d) +{ + return __gic_get_ppi_index(d->hwirq); +} + +static int gic_irq_nmi_setup(struct irq_data *d) +{ + struct irq_desc *desc = irq_to_desc(d->irq); + + if (!gic_supports_nmi_ft2500()) + return -EINVAL; + + if (gic_peek_irq(d, GICD_ISENABLER)) { + pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq); + return -EINVAL; + } + + /* + * A secondary irq_chip should be in charge of LPI request, + * it should not be possible to get there + */ + if (WARN_ON(gic_irq(d) >= 8192)) + return -EINVAL; + + /* desc lock should already be held */ + if (gic_irq_in_rdist(d)) { + u32 idx = gic_get_ppi_index(d); + + /* Setting up PPI as NMI, only switch handler for first NMI */ + if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) { + refcount_set(&ppi_nmi_refs[idx], 1); + desc->handle_irq = handle_percpu_devid_fasteoi_nmi; + } + } else { + desc->handle_irq = handle_fasteoi_nmi; + } + + gic_irq_set_prio(d, GICD_INT_NMI_PRI); + + return 0; +} + +static void gic_irq_nmi_teardown(struct irq_data *d) +{ + struct irq_desc *desc = irq_to_desc(d->irq); + + if (WARN_ON(!gic_supports_nmi_ft2500())) + return; + + if (gic_peek_irq(d, GICD_ISENABLER)) { + pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq); + return; + } + + /* + * A secondary irq_chip should be in charge of LPI request, + * it should not be possible to get there + */ + if (WARN_ON(gic_irq(d) >= 8192)) + return; + + /* desc lock should already be held */ + if (gic_irq_in_rdist(d)) { + u32 idx = gic_get_ppi_index(d); + + /* Tearing down NMI, only switch handler for last NMI */ + if (refcount_dec_and_test(&ppi_nmi_refs[idx])) + desc->handle_irq = handle_percpu_devid_irq; + } else { + desc->handle_irq = handle_fasteoi_irq; + } + + gic_irq_set_prio(d, GICD_INT_DEF_PRI); +} + +static bool gic_arm64_erratum_2941627_needed(struct irq_data *d) +{ + enum gic_intid_range range; + + if (!static_branch_unlikely(&gic_arm64_2941627_erratum)) + return false; + + range = get_intid_range(d); + + /* + * The workaround is needed if the IRQ is an SPI and + * the target cpu is different from the one we are + * executing on. + */ + return (range == SPI_RANGE || range == ESPI_RANGE) && + !cpumask_test_cpu(raw_smp_processor_id(), + irq_data_get_effective_affinity_mask(d)); +} + +static void gic_eoi_irq(struct irq_data *d) +{ + write_gicreg(gic_irq(d), ICC_EOIR1_EL1); + isb(); + + if (gic_arm64_erratum_2941627_needed(d)) { + /* + * Make sure the GIC stream deactivate packet + * issued by ICC_EOIR1_EL1 has completed before + * deactivating through GICD_IACTIVER. + */ + dsb(sy); + gic_poke_irq(d, GICD_ICACTIVER); + } +} + +static void gic_eoimode1_eoi_irq(struct irq_data *d) +{ + /* + * No need to deactivate an LPI, or an interrupt that + * is is getting forwarded to a vcpu. + */ + if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d)) + return; + + if (!gic_arm64_erratum_2941627_needed(d)) + gic_write_dir(gic_irq(d)); + else + gic_poke_irq(d, GICD_ICACTIVER); +} + +static int gic_set_type(struct irq_data *d, unsigned int type) +{ + enum gic_intid_range range; + unsigned int irq = gic_irq(d); + void __iomem *base, *rbase; + u32 offset, index, skt; + int ret, i; + unsigned long mpidr; + + range = get_intid_range(d); + + /* Interrupt configuration for SGIs can't be changed */ + if (range == SGI_RANGE) + return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0; + + /* SPIs have restrictions on the supported types */ + if ((range == SPI_RANGE || range == ESPI_RANGE) && + type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) + return -EINVAL; + + offset = convert_offset_index(d, GICD_ICFGR, &index); + + if (gic_irq_in_rdist(d)) { + base = gic_data_rdist_sgi_base(); + ret = gic_configure_irq(index, type, base + offset, gic_redist_wait_for_rwp); + + mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); + + if ((mpidr & 0xffff) == 0) { + rbase = base + 64*SZ_128K; + + for (i = 0; i < 4; i++) { + ret = gic_configure_irq(index, type, rbase + offset, NULL); + gic_do_wait_for_rwp(rbase - SZ_64K); + rbase = rbase + SZ_128K; + } + } + } else { + skt = mars3_irq_to_skt(gic_irq(d)); + base = mars3_gic_dists[skt].dist_base; + ret = gic_configure_irq(index, type, base + offset, NULL); + gic_do_wait_for_rwp(base); + } + + if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) { + /* Misconfigured PPIs are usually not fatal */ + pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq); + ret = 0; + } + + return ret; +} + +static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) +{ + if (get_intid_range(d) == SGI_RANGE) + return -EINVAL; + + if (vcpu) + irqd_set_forwarded_to_vcpu(d); + else + irqd_clr_forwarded_to_vcpu(d); + return 0; +} + +static u64 gic_cpu_to_affinity(int cpu) +{ + u64 mpidr = cpu_logical_map(cpu); + u64 aff; + + /* ASR8601 needs to have its affinities shifted down... */ + if (unlikely(gic_data.flags & FLAGS_WORKAROUND_ASR_ERRATUM_8601001)) + mpidr = (MPIDR_AFFINITY_LEVEL(mpidr, 1) | + (MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8)); + + aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 | + MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | + MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | + MPIDR_AFFINITY_LEVEL(mpidr, 0)); + + return aff; +} + +static void gic_deactivate_unhandled(u32 irqnr) +{ + if (static_branch_likely(&supports_deactivate_key)) { + if (irqnr < 8192) + gic_write_dir(irqnr); + } else { + write_gicreg(irqnr, ICC_EOIR1_EL1); + isb(); + } +} + +/* + * Follow a read of the IAR with any HW maintenance that needs to happen prior + * to invoking the relevant IRQ handler. We must do two things: + * + * (1) Ensure instruction ordering between a read of IAR and subsequent + * instructions in the IRQ handler using an ISB. + * + * It is possible for the IAR to report an IRQ which was signalled *after* + * the CPU took an IRQ exception as multiple interrupts can race to be + * recognized by the GIC, earlier interrupts could be withdrawn, and/or + * later interrupts could be prioritized by the GIC. + * + * For devices which are tightly coupled to the CPU, such as PMUs, a + * context synchronization event is necessary to ensure that system + * register state is not stale, as these may have been indirectly written + * *after* exception entry. + * + * (2) Deactivate the interrupt when EOI mode 1 is in use. + */ +static inline void gic_complete_ack(u32 irqnr) +{ + if (static_branch_likely(&supports_deactivate_key)) + write_gicreg(irqnr, ICC_EOIR1_EL1); + + isb(); +} + +static bool gic_rpr_is_nmi_prio(void) +{ + if (!gic_supports_nmi_ft2500()) + return false; + + return unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI)); +} + +static bool gic_irqnr_is_special(u32 irqnr) +{ + return irqnr >= 1020 && irqnr <= 1023; +} + +static void __gic_handle_irq(u32 irqnr, struct pt_regs *regs) +{ + if (gic_irqnr_is_special(irqnr)) + return; + + gic_complete_ack(irqnr); + + if (generic_handle_domain_irq(gic_data.domain, irqnr)) { + WARN_ONCE(true, "Unexpected interrupt (irqnr %u)\n", irqnr); + gic_deactivate_unhandled(irqnr); + } +} + +static void __gic_handle_nmi(u32 irqnr, struct pt_regs *regs) +{ + if (gic_irqnr_is_special(irqnr)) + return; + + gic_complete_ack(irqnr); + + if (generic_handle_domain_nmi(gic_data.domain, irqnr)) { + WARN_ONCE(true, "Unexpected pseudo-NMI (irqnr %u)\n", irqnr); + gic_deactivate_unhandled(irqnr); + } +} + +/* + * An exception has been taken from a context with IRQs enabled, and this could + * be an IRQ or an NMI. + * + * The entry code called us with DAIF.IF set to keep NMIs masked. We must clear + * DAIF.IF (and update ICC_PMR_EL1 to mask regular IRQs) prior to returning, + * after handling any NMI but before handling any IRQ. + * + * The entry code has performed IRQ entry, and if an NMI is detected we must + * perform NMI entry/exit around invoking the handler. + */ +static void __gic_handle_irq_from_irqson(struct pt_regs *regs) +{ + bool is_nmi; + u32 irqnr; + + irqnr = gic_read_iar(); + + is_nmi = gic_rpr_is_nmi_prio(); + + if (is_nmi) { + nmi_enter(); + __gic_handle_nmi(irqnr, regs); + nmi_exit(); + } + + if (gic_prio_masking_enabled()) { + gic_pmr_mask_irqs(); + gic_arch_enable_irqs(); + } + + if (!is_nmi) + __gic_handle_irq(irqnr, regs); +} + +/* + * An exception has been taken from a context with IRQs disabled, which can only + * be an NMI. + * + * The entry code called us with DAIF.IF set to keep NMIs masked. We must leave + * DAIF.IF (and ICC_PMR_EL1) unchanged. + * + * The entry code has performed NMI entry. + */ +static void __gic_handle_irq_from_irqsoff(struct pt_regs *regs) +{ + u64 pmr; + u32 irqnr; + + /* + * We were in a context with IRQs disabled. However, the + * entry code has set PMR to a value that allows any + * interrupt to be acknowledged, and not just NMIs. This can + * lead to surprising effects if the NMI has been retired in + * the meantime, and that there is an IRQ pending. The IRQ + * would then be taken in NMI context, something that nobody + * wants to debug twice. + * + * Until we sort this, drop PMR again to a level that will + * actually only allow NMIs before reading IAR, and then + * restore it to what it was. + */ + pmr = gic_read_pmr(); + gic_pmr_mask_irqs(); + isb(); + irqnr = gic_read_iar(); + gic_write_pmr(pmr); + + __gic_handle_nmi(irqnr, regs); +} + +static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) +{ + if (unlikely(gic_supports_nmi_ft2500() && !interrupts_enabled(regs))) + __gic_handle_irq_from_irqsoff(regs); + else + __gic_handle_irq_from_irqson(regs); +} + +static u32 gic_get_pribits(void) +{ + u32 pribits; + + pribits = gic_read_ctlr(); + pribits &= ICC_CTLR_EL1_PRI_BITS_MASK; + pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT; + pribits++; + + return pribits; +} + +static bool gic_has_group0(void) +{ + u32 val; + u32 old_pmr; + + old_pmr = gic_read_pmr(); + + /* + * Let's find out if Group0 is under control of EL3 or not by + * setting the highest possible, non-zero priority in PMR. + * + * If SCR_EL3.FIQ is set, the priority gets shifted down in + * order for the CPU interface to set bit 7, and keep the + * actual priority in the non-secure range. In the process, it + * looses the least significant bit and the actual priority + * becomes 0x80. Reading it back returns 0, indicating that + * we're don't have access to Group0. + */ + gic_write_pmr(BIT(8 - gic_get_pribits())); + val = gic_read_pmr(); + + gic_write_pmr(old_pmr); + + return val != 0; +} + +static void __init gic_dist_init(void) +{ + unsigned int i; + u64 affinity; + void __iomem *base = gic_data.dist_base; + u32 val, skt; + + for (skt = 0; skt < MAX_MARS3_SOC_COUNT; skt++) { + if (((1U << skt) & mars3_sockets_bitmap) == 0) + continue; + + base = mars3_gic_dists[skt].dist_base; + + /* Disable the distributor */ + writel_relaxed(0, base + GICD_CTLR); + gic_do_wait_for_rwp(base); + + /* + * Configure SPIs as non-secure Group-1. This will only matter + * if the GIC only has a single security state. This will not + * do the right thing if the kernel is running in secure mode, + * but that's not the intended use case anyway. + */ + for (i = 32; i < GIC_LINE_NR; i += 32) + writel_relaxed(~0, base + GICD_IGROUPR + i / 8); + + /* Extended SPI range, not handled by the GICv2/GICv3 common code */ + for (i = 0; i < GIC_ESPI_NR; i += 32) { + writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8); + writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8); + } + + for (i = 0; i < GIC_ESPI_NR; i += 32) + writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8); + + for (i = 0; i < GIC_ESPI_NR; i += 16) + writel_relaxed(0, base + GICD_ICFGRnE + i / 4); + + for (i = 0; i < GIC_ESPI_NR; i += 4) + writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i); + + /* Now do the common stuff */ + gic_dist_config(base, GIC_LINE_NR, NULL); + gic_do_wait_for_rwp(base); + + val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1; + if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) { + pr_info("Enabling SGIs without active state\n"); + val |= GICD_CTLR_nASSGIreq; + } + + /* Enable distributor with ARE, Group1, and wait for it to drain */ + writel_relaxed(val, base + GICD_CTLR); + gic_dist_wait_for_rwp(); + + /* + * Set all global interrupts to the boot CPU only. ARE must be + * enabled. + */ + affinity = gic_cpu_to_affinity(smp_processor_id()); + for (i = 32; i < GIC_LINE_NR; i++) + gic_write_irouter(affinity, base + GICD_IROUTER + i * 8); + + for (i = 0; i < GIC_ESPI_NR; i++) + gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8); + } +} + +static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *)) +{ + int ret = -ENODEV; + int i; + + for (i = 0; i < gic_data.nr_redist_regions; i++) { + void __iomem *ptr = gic_data.redist_regions[i].redist_base; + u64 typer; + u32 reg; + + reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK; + if (reg != GIC_PIDR2_ARCH_GICv3 && + reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */ + pr_warn("No redistributor present @%p\n", ptr); + break; + } + + do { + typer = gic_read_typer(ptr + GICR_TYPER); + ret = fn(gic_data.redist_regions + i, ptr); + if (!ret) + return 0; + + if (gic_data.redist_regions[i].single_redist) + break; + + if (gic_data.redist_stride) { + ptr += gic_data.redist_stride; + } else { + ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */ + if (typer & GICR_TYPER_VLPIS) + ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */ + } + } while (!(typer & GICR_TYPER_LAST)); + } + + return ret ? -ENODEV : 0; +} + +static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr) +{ + unsigned long mpidr; + u64 typer; + u32 aff; + u32 aff2_skt; + u32 redist_skt; + + /* + * Convert affinity to a 32bit value that can be matched to + * GICR_TYPER bits [63:32]. + */ + mpidr = gic_cpu_to_affinity(smp_processor_id()); + + aff = (MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | + MPIDR_AFFINITY_LEVEL(mpidr, 0)); + + aff2_skt = MPIDR_AFFINITY_LEVEL(mpidr, 2) & 0x7; + redist_skt = (((u64)region->phys_base >> MARS3_ADDR_SKTID_SHIFT) & 0x7); + + if (aff2_skt != redist_skt) + return 1; + + typer = gic_read_typer(ptr + GICR_TYPER); + if ((typer >> 32) == aff) { + u64 offset = ptr - region->redist_base; + + raw_spin_lock_init(&gic_data_rdist()->rd_lock); + gic_data_rdist_rd_base() = ptr; + gic_data_rdist()->phys_base = region->phys_base + offset; + + pr_info("CPU%d: found redistributor %lx region %d:%pa\n", + smp_processor_id(), mpidr, + (int)(region - gic_data.redist_regions), + &gic_data_rdist()->phys_base); + return 0; + } + + /* Try next one */ + return 1; +} + +static int gic_populate_rdist(void) +{ + if (gic_iterate_rdists(__gic_populate_rdist) == 0) + return 0; + + /* We couldn't even deal with ourselves... */ + WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n", + smp_processor_id(), + (unsigned long)cpu_logical_map(smp_processor_id())); + return -ENODEV; +} + +static int __gic_update_rdist_properties(struct redist_region *region, + void __iomem *ptr) +{ + u64 typer = gic_read_typer(ptr + GICR_TYPER); + u32 ctlr = readl_relaxed(ptr + GICR_CTLR); + + /* Boot-time cleanup */ + if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) { + u64 val; + + /* Deactivate any present vPE */ + val = gicr_read_vpendbaser(ptr + SZ_128K + GICR_VPENDBASER); + if (val & GICR_VPENDBASER_Valid) + gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast, + ptr + SZ_128K + GICR_VPENDBASER); + + /* Mark the VPE table as invalid */ + val = gicr_read_vpropbaser(ptr + SZ_128K + GICR_VPROPBASER); + val &= ~GICR_VPROPBASER_4_1_VALID; + gicr_write_vpropbaser(val, ptr + SZ_128K + GICR_VPROPBASER); + } + + gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS); + + /* + * TYPER.RVPEID implies some form of DirectLPI, no matter what the + * doc says... :-/ And CTLR.IR implies another subset of DirectLPI + * that the ITS driver can make use of for LPIs (and not VLPIs). + * + * These are 3 different ways to express the same thing, depending + * on the revision of the architecture and its relaxations over + * time. Just group them under the 'direct_lpi' banner. + */ + gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID); + gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) | + !!(ctlr & GICR_CTLR_IR) | + gic_data.rdists.has_rvpeid); + gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY); + + /* Detect non-sensical configurations */ + if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) { + gic_data.rdists.has_direct_lpi = false; + gic_data.rdists.has_vlpis = false; + gic_data.rdists.has_rvpeid = false; + } + + gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr); + + return 1; +} + +static void gic_update_rdist_properties(void) +{ + gic_data.ppi_nr = UINT_MAX; + gic_iterate_rdists(__gic_update_rdist_properties); + if (WARN_ON(gic_data.ppi_nr == UINT_MAX)) + gic_data.ppi_nr = 0; + pr_info("GICv3 features: %d PPIs%s%s\n", + gic_data.ppi_nr, + gic_data.has_rss ? ", RSS" : "", + gic_data.rdists.has_direct_lpi ? ", DirectLPI" : ""); + + if (gic_data.rdists.has_vlpis) + pr_info("GICv4 features: %s%s%s\n", + gic_data.rdists.has_direct_lpi ? "DirectLPI " : "", + gic_data.rdists.has_rvpeid ? "RVPEID " : "", + gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : ""); +} + +/* Check whether it's single security state view */ +static inline bool gic_dist_security_disabled(void) +{ + return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS; +} + +static void gic_cpu_sys_reg_init(void) +{ + int i, cpu = smp_processor_id(); + u64 mpidr = gic_cpu_to_affinity(cpu); + u64 need_rss = MPIDR_RS(mpidr); + bool group0; + u32 pribits; + + /* + * Need to check that the SRE bit has actually been set. If + * not, it means that SRE is disabled at EL2. We're going to + * die painfully, and there is nothing we can do about it. + * + * Kindly inform the luser. + */ + if (!gic_enable_sre()) + pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n"); + + pribits = gic_get_pribits(); + + group0 = gic_has_group0(); + + /* Set priority mask register */ + if (!gic_prio_masking_enabled()) { + write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1); + } else if (gic_supports_nmi_ft2500()) { + /* + * Mismatch configuration with boot CPU, the system is likely + * to die as interrupt masking will not work properly on all + * CPUs + * + * The boot CPU calls this function before enabling NMI support, + * and as a result we'll never see this warning in the boot path + * for that CPU. + */ + if (static_branch_unlikely(&gic_nonsecure_priorities)) + WARN_ON(!group0 || gic_dist_security_disabled()); + else + WARN_ON(group0 && !gic_dist_security_disabled()); + } + + /* + * Some firmwares hand over to the kernel with the BPR changed from + * its reset value (and with a value large enough to prevent + * any pre-emptive interrupts from working at all). Writing a zero + * to BPR restores is reset value. + */ + gic_write_bpr1(0); + + if (static_branch_likely(&supports_deactivate_key)) { + /* EOI drops priority only (mode 1) */ + gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop); + } else { + /* EOI deactivates interrupt too (mode 0) */ + gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir); + } + + /* Always whack Group0 before Group1 */ + if (group0) { + switch (pribits) { + case 8: + case 7: + write_gicreg(0, ICC_AP0R3_EL1); + write_gicreg(0, ICC_AP0R2_EL1); + fallthrough; + case 6: + write_gicreg(0, ICC_AP0R1_EL1); + fallthrough; + case 5: + case 4: + write_gicreg(0, ICC_AP0R0_EL1); + } + + isb(); + } + + switch (pribits) { + case 8: + case 7: + write_gicreg(0, ICC_AP1R3_EL1); + write_gicreg(0, ICC_AP1R2_EL1); + fallthrough; + case 6: + write_gicreg(0, ICC_AP1R1_EL1); + fallthrough; + case 5: + case 4: + write_gicreg(0, ICC_AP1R0_EL1); + } + + isb(); + + /* ... and let's hit the road... */ + gic_write_grpen1(1); + + /* Keep the RSS capability status in per_cpu variable */ + per_cpu(has_rss_ft2500, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS); + + /* Check all the CPUs have capable of sending SGIs to other CPUs */ + for_each_online_cpu(i) { + bool have_rss = per_cpu(has_rss_ft2500, i) && per_cpu(has_rss_ft2500, cpu); + + need_rss |= MPIDR_RS(gic_cpu_to_affinity(i)); + if (need_rss && (!have_rss)) + pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n", + cpu, (unsigned long)mpidr, + i, (unsigned long)gic_cpu_to_affinity(i)); + } + + /** + * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0, + * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED + * UNPREDICTABLE choice of : + * - The write is ignored. + * - The RS field is treated as 0. + */ + if (need_rss && (!gic_data.has_rss)) + pr_crit_once("RSS is required but GICD doesn't support it\n"); +} + +static bool gicv3_nolpi; + +static int __init gicv3_nolpi_cfg(char *buf) +{ + return kstrtobool(buf, &gicv3_nolpi); +} +early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg); + +static int gic_dist_supports_lpis(void) +{ + return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && + !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) && + !gicv3_nolpi); +} + +static void gic_cpu_init(void) +{ + void __iomem *rbase; + int i; + unsigned long mpidr; + + /* Register ourselves with the rest of the world */ + if (gic_populate_rdist()) + return; + + gic_enable_redist(true); + + WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) && + !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange), + "Distributor has extended ranges, but CPU%d doesn't\n", + smp_processor_id()); + + rbase = gic_data_rdist_sgi_base(); + + /* Configure SGIs/PPIs as non-secure Group-1 */ + for (i = 0; i < gic_data.ppi_nr + 16; i += 32) + writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8); + + gic_cpu_config(rbase, gic_data.ppi_nr + 16, gic_redist_wait_for_rwp); + + mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); + + if ((mpidr & 0xFFFF) == 0) { // both Aff1 and Aff0 is zero + rbase = rbase + 64*SZ_128K; // skip 64 Redistributors + + for (i = 0; i < 4; i++) { + /* Configure SGIs/PPIs as non-secure Group-1 */ + writel_relaxed(~0, rbase + GICR_IGROUPR0); + + gic_cpu_config(rbase, gic_data.ppi_nr + 16, NULL); + gic_do_wait_for_rwp(rbase - SZ_64K); + + rbase = rbase + SZ_128K; + + } + } + + /* initialise system registers */ + gic_cpu_sys_reg_init(); +} + +#ifdef CONFIG_SMP + +#define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT) +#define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL) + +static int gic_starting_cpu(unsigned int cpu) +{ + gic_cpu_init(); + + if (gic_dist_supports_lpis()) + phytium_its_cpu_init(); + + return 0; +} + +static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, + unsigned long cluster_id) +{ + int next_cpu, cpu = *base_cpu; + unsigned long mpidr; + u16 tlist = 0; + + mpidr = gic_cpu_to_affinity(cpu); + + while (cpu < nr_cpu_ids) { + tlist |= 1 << (mpidr & 0xf); + + next_cpu = cpumask_next(cpu, mask); + if (next_cpu >= nr_cpu_ids) + goto out; + cpu = next_cpu; + + mpidr = gic_cpu_to_affinity(cpu); + + if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) { + cpu--; + goto out; + } + } +out: + *base_cpu = cpu; + return tlist; +} + +#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \ + (MPIDR_AFFINITY_LEVEL(cluster_id, level) \ + << ICC_SGI1R_AFFINITY_## level ##_SHIFT) + +static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq) +{ + u64 val; + + val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) | + MPIDR_TO_SGI_AFFINITY(cluster_id, 2) | + irq << ICC_SGI1R_SGI_ID_SHIFT | + MPIDR_TO_SGI_AFFINITY(cluster_id, 1) | + MPIDR_TO_SGI_RS(cluster_id) | + tlist << ICC_SGI1R_TARGET_LIST_SHIFT); + + pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); + gic_write_sgi1r(val); +} + +static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask) +{ + int cpu; + + if (WARN_ON(d->hwirq >= 16)) + return; + + /* + * Ensure that stores to Normal memory are visible to the + * other CPUs before issuing the IPI. + */ + dsb(ishst); + + for_each_cpu(cpu, mask) { + u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(gic_cpu_to_affinity(cpu)); + u16 tlist; + + tlist = gic_compute_target_list(&cpu, mask, cluster_id); + gic_send_sgi(cluster_id, tlist, d->hwirq); + } + + /* Force the above writes to ICC_SGI1R_EL1 to be executed */ + isb(); +} + +static void __init gic_smp_init(void) +{ + struct irq_fwspec sgi_fwspec = { + .fwnode = gic_data.fwnode, + .param_count = 1, + }; + int base_sgi; + + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING, + "irqchip/arm/gicv3:starting", + gic_starting_cpu, NULL); + + /* Register all 8 non-secure SGIs */ + base_sgi = irq_domain_alloc_irqs(gic_data.domain, 8, NUMA_NO_NODE, &sgi_fwspec); + if (WARN_ON(base_sgi <= 0)) + return; + + set_smp_ipi_range(base_sgi, 8); +} + +static int gic_cpumask_select(struct irq_data *d, const struct cpumask *mask_val) +{ + unsigned int skt, irq_skt, i; + unsigned int cpu, cpus = 0; + + unsigned int skt_cpu_cnt[MAX_MARS3_SOC_COUNT] = {0}; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SOC_COUNT)) + skt_cpu_cnt[skt]++; + else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + + irq_skt = mars3_irq_to_skt(gic_irq(d)); + + if (irq_skt != 0) + for (i = 0; i < irq_skt; i++) + cpus += skt_cpu_cnt[i]; + + cpu = cpumask_any_and(mask_val, cpu_online_mask); + cpus = cpus + cpu % skt_cpu_cnt[irq_skt]; + + return cpus; +} + +static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, + bool force) +{ + unsigned int cpu; + u32 offset, index; + void __iomem *reg; + int enabled; + u64 val; + unsigned int skt; + + if (force) + cpu = cpumask_first(mask_val); + else + cpu = gic_cpumask_select(d, mask_val); + + if (cpu >= nr_cpu_ids) + return -EINVAL; + + if (gic_irq_in_rdist(d)) + return -EINVAL; + + /* If interrupt was enabled, disable it first */ + enabled = gic_peek_irq(d, GICD_ISENABLER); + if (enabled) + gic_mask_irq(d); + + offset = convert_offset_index(d, GICD_IROUTER, &index); + + skt = mars3_irq_to_skt(gic_irq(d)); + reg = mars3_gic_dists[skt].dist_base + offset + GICD_IROUTER + (index * 8); + reg = gic_dist_base(d) + offset + (index * 8); + val = gic_cpu_to_affinity(cpu); + + gic_write_irouter(val, reg); + + /* + * If the interrupt was enabled, enabled it again. Otherwise, + * just wait for the distributor to have digested our changes. + */ + if (enabled) + gic_unmask_irq(d); + + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + + return IRQ_SET_MASK_OK_DONE; +} +#else +#define gic_set_affinity NULL +#define gic_ipi_send_mask NULL +#define gic_smp_init() do { } while (0) +#endif + +static int gic_retrigger(struct irq_data *data) +{ + return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true); +} + +#ifdef CONFIG_CPU_PM +static int gic_cpu_pm_notifier(struct notifier_block *self, + unsigned long cmd, void *v) +{ + if (cmd == CPU_PM_EXIT) { + if (gic_dist_security_disabled()) + gic_enable_redist(true); + gic_cpu_sys_reg_init(); + } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) { + gic_write_grpen1(0); + gic_enable_redist(false); + } + return NOTIFY_OK; +} + +static struct notifier_block gic_cpu_pm_notifier_block = { + .notifier_call = gic_cpu_pm_notifier, +}; + +static void gic_cpu_pm_init(void) +{ + cpu_pm_register_notifier(&gic_cpu_pm_notifier_block); +} + +#else +static inline void gic_cpu_pm_init(void) { } +#endif /* CONFIG_CPU_PM */ + +static struct irq_chip gic_chip = { + .name = "GIC-phytium-2500", + .irq_mask = gic_mask_irq, + .irq_unmask = gic_unmask_irq, + .irq_eoi = gic_eoi_irq, + .irq_set_type = gic_set_type, + .irq_set_affinity = gic_set_affinity, + .irq_retrigger = gic_retrigger, + .irq_get_irqchip_state = gic_irq_get_irqchip_state, + .irq_set_irqchip_state = gic_irq_set_irqchip_state, + .irq_nmi_setup = gic_irq_nmi_setup, + .irq_nmi_teardown = gic_irq_nmi_teardown, + .ipi_send_mask = gic_ipi_send_mask, + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + +static struct irq_chip gic_eoimode1_chip = { + .name = "GICv3-phytium-2500", + .irq_mask = gic_eoimode1_mask_irq, + .irq_unmask = gic_unmask_irq, + .irq_eoi = gic_eoimode1_eoi_irq, + .irq_set_type = gic_set_type, + .irq_set_affinity = gic_set_affinity, + .irq_retrigger = gic_retrigger, + .irq_get_irqchip_state = gic_irq_get_irqchip_state, + .irq_set_irqchip_state = gic_irq_set_irqchip_state, + .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity, + .irq_nmi_setup = gic_irq_nmi_setup, + .irq_nmi_teardown = gic_irq_nmi_teardown, + .ipi_send_mask = gic_ipi_send_mask, + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + +static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ + struct irq_chip *chip = &gic_chip; + struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq)); + + if (static_branch_likely(&supports_deactivate_key)) + chip = &gic_eoimode1_chip; + + switch (__get_intid_range(hw)) { + case SGI_RANGE: + case PPI_RANGE: + case EPPI_RANGE: + irq_set_percpu_devid(irq); + irq_domain_set_info(d, irq, hw, chip, d->host_data, + handle_percpu_devid_irq, NULL, NULL); + break; + + case SPI_RANGE: + case ESPI_RANGE: + irq_domain_set_info(d, irq, hw, chip, d->host_data, + handle_fasteoi_irq, NULL, NULL); + irq_set_probe(irq); + irqd_set_single_target(irqd); + break; + + case LPI_RANGE: + if (!gic_dist_supports_lpis()) + return -EPERM; + irq_domain_set_info(d, irq, hw, chip, d->host_data, + handle_fasteoi_irq, NULL, NULL); + break; + + default: + return -EPERM; + } + + /* Prevents SW retriggers which mess up the ACK/EOI ordering */ + irqd_set_handle_enforce_irqctx(irqd); + return 0; +} + +static int gic_irq_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + if (fwspec->param_count == 1 && fwspec->param[0] < 16) { + *hwirq = fwspec->param[0]; + *type = IRQ_TYPE_EDGE_RISING; + return 0; + } + + if (is_of_node(fwspec->fwnode)) { + if (fwspec->param_count < 3) + return -EINVAL; + + switch (fwspec->param[0]) { + case 0: /* SPI */ + *hwirq = fwspec->param[1] + 32; + break; + case 1: /* PPI */ + *hwirq = fwspec->param[1] + 16; + break; + case 2: /* ESPI */ + *hwirq = fwspec->param[1] + ESPI_BASE_INTID; + break; + case 3: /* EPPI */ + *hwirq = fwspec->param[1] + EPPI_BASE_INTID; + break; + case GIC_IRQ_TYPE_LPI: /* LPI */ + *hwirq = fwspec->param[1]; + break; + case GIC_IRQ_TYPE_PARTITION: + *hwirq = fwspec->param[1]; + if (fwspec->param[1] >= 16) + *hwirq += EPPI_BASE_INTID - 16; + else + *hwirq += 16; + break; + default: + return -EINVAL; + } + + *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; + + /* + * Make it clear that broken DTs are... broken. + * Partitioned PPIs are an unfortunate exception. + */ + WARN_ON(*type == IRQ_TYPE_NONE && + fwspec->param[0] != GIC_IRQ_TYPE_PARTITION); + return 0; + } + + if (is_fwnode_irqchip(fwspec->fwnode)) { + if (fwspec->param_count != 2) + return -EINVAL; + + if (fwspec->param[0] < 16) { + pr_err(FW_BUG "Illegal GSI%d translation request\n", + fwspec->param[0]); + return -EINVAL; + } + + *hwirq = fwspec->param[0]; + *type = fwspec->param[1]; + + WARN_ON(*type == IRQ_TYPE_NONE); + return 0; + } + + return -EINVAL; +} + +static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int i, ret; + irq_hw_number_t hwirq; + unsigned int type = IRQ_TYPE_NONE; + struct irq_fwspec *fwspec = arg; + + ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type); + if (ret) + return ret; + + for (i = 0; i < nr_irqs; i++) { + ret = gic_irq_domain_map(domain, virq + i, hwirq + i); + if (ret) + return ret; + } + + return 0; +} + +static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + int i; + + for (i = 0; i < nr_irqs; i++) { + struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); + + irq_set_handler(virq + i, NULL); + irq_domain_reset_irq_data(d); + } +} + +static bool fwspec_is_partitioned_ppi(struct irq_fwspec *fwspec, + irq_hw_number_t hwirq) +{ + enum gic_intid_range range; + + if (!gic_data.ppi_descs) + return false; + + if (!is_of_node(fwspec->fwnode)) + return false; + + if (fwspec->param_count < 4 || !fwspec->param[3]) + return false; + + range = __get_intid_range(hwirq); + if (range != PPI_RANGE && range != EPPI_RANGE) + return false; + + return true; +} + +static int gic_irq_domain_select(struct irq_domain *d, + struct irq_fwspec *fwspec, + enum irq_domain_bus_token bus_token) +{ + unsigned int type, ret, ppi_idx; + irq_hw_number_t hwirq; + + /* Not for us */ + if (fwspec->fwnode != d->fwnode) + return 0; + + /* If this is not DT, then we have a single domain */ + if (!is_of_node(fwspec->fwnode)) + return 1; + + ret = gic_irq_domain_translate(d, fwspec, &hwirq, &type); + if (WARN_ON_ONCE(ret)) + return 0; + + if (!fwspec_is_partitioned_ppi(fwspec, hwirq)) + return d == gic_data.domain; + + /* + * If this is a PPI and we have a 4th (non-null) parameter, + * then we need to match the partition domain. + */ + ppi_idx = __gic_get_ppi_index(hwirq); + return d == partition_get_domain(gic_data.ppi_descs[ppi_idx]); +} + +static const struct irq_domain_ops gic_irq_domain_ops = { + .translate = gic_irq_domain_translate, + .alloc = gic_irq_domain_alloc, + .free = gic_irq_domain_free, + .select = gic_irq_domain_select, +}; + +static int partition_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + unsigned long ppi_intid; + struct device_node *np; + unsigned int ppi_idx; + int ret; + + if (!gic_data.ppi_descs) + return -ENOMEM; + + np = of_find_node_by_phandle(fwspec->param[3]); + if (WARN_ON(!np)) + return -EINVAL; + + ret = gic_irq_domain_translate(d, fwspec, &ppi_intid, type); + if (WARN_ON_ONCE(ret)) + return 0; + + ppi_idx = __gic_get_ppi_index(ppi_intid); + ret = partition_translate_id(gic_data.ppi_descs[ppi_idx], + of_node_to_fwnode(np)); + if (ret < 0) + return ret; + + *hwirq = ret; + *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; + + return 0; +} + +static const struct irq_domain_ops partition_domain_ops = { + .translate = partition_domain_translate, + .select = gic_irq_domain_select, +}; + +static bool gic_enable_quirk_msm8996(void *data) +{ + struct gic_chip_data *d = data; + + d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996; + + return true; +} + +static bool gic_enable_quirk_mtk_gicr(void *data) +{ + struct gic_chip_data *d = data; + + d->flags |= FLAGS_WORKAROUND_MTK_GICR_SAVE; + + return true; +} + +static bool gic_enable_quirk_cavium_38539(void *data) +{ + struct gic_chip_data *d = data; + + d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539; + + return true; +} + +static bool gic_enable_quirk_hip06_07(void *data) +{ + struct gic_chip_data *d = data; + + /* + * HIP06 GICD_IIDR clashes with GIC-600 product number (despite + * not being an actual ARM implementation). The saving grace is + * that GIC-600 doesn't have ESPI, so nothing to do in that case. + * HIP07 doesn't even have a proper IIDR, and still pretends to + * have ESPI. In both cases, put them right. + */ + if (d->rdists.gicd_typer & GICD_TYPER_ESPI) { + /* Zero both ESPI and the RES0 field next to it... */ + d->rdists.gicd_typer &= ~GENMASK(9, 8); + return true; + } + + return false; +} + +#define T241_CHIPN_MASK GENMASK_ULL(45, 44) +#define T241_CHIP_GICDA_OFFSET 0x1580000 +#define SMCCC_SOC_ID_T241 0x036b0241 + +static bool gic_enable_quirk_nvidia_t241(void *data) +{ + s32 soc_id = arm_smccc_get_soc_id_version(); + unsigned long chip_bmask = 0; + phys_addr_t phys; + u32 i; + + /* Check JEP106 code for NVIDIA T241 chip (036b:0241) */ + if ((soc_id < 0) || (soc_id != SMCCC_SOC_ID_T241)) + return false; + + /* Find the chips based on GICR regions PHYS addr */ + for (i = 0; i < gic_data.nr_redist_regions; i++) { + chip_bmask |= BIT(FIELD_GET(T241_CHIPN_MASK, + (u64)gic_data.redist_regions[i].phys_base)); + } + + if (hweight32(chip_bmask) < 3) + return false; + + /* Setup GICD alias regions */ + for (i = 0; i < ARRAY_SIZE(t241_dist_base_alias); i++) { + if (chip_bmask & BIT(i)) { + phys = gic_data.dist_phys_base + T241_CHIP_GICDA_OFFSET; + phys |= FIELD_PREP(T241_CHIPN_MASK, i); + t241_dist_base_alias[i] = ioremap(phys, SZ_64K); + WARN_ON_ONCE(!t241_dist_base_alias[i]); + } + } + static_branch_enable(&gic_nvidia_t241_erratum); + return true; +} + +static bool gic_enable_quirk_asr8601(void *data) +{ + struct gic_chip_data *d = data; + + d->flags |= FLAGS_WORKAROUND_ASR_ERRATUM_8601001; + + return true; +} + +static bool gic_enable_quirk_arm64_2941627(void *data) +{ + static_branch_enable(&gic_arm64_2941627_erratum); + return true; +} + +static bool rd_set_non_coherent(void *data) +{ + struct gic_chip_data *d = data; + + d->rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE; + return true; +} + +static const struct gic_quirk gic_quirks[] = { + { + .desc = "GICv3: Qualcomm MSM8996 broken firmware", + .compatible = "qcom,msm8996-gic-v3", + .init = gic_enable_quirk_msm8996, + }, + { + .desc = "GICv3: ASR erratum 8601001", + .compatible = "asr,asr8601-gic-v3", + .init = gic_enable_quirk_asr8601, + }, + { + .desc = "GICv3: Mediatek Chromebook GICR save problem", + .property = "mediatek,broken-save-restore-fw", + .init = gic_enable_quirk_mtk_gicr, + }, + { + .desc = "GICv3: HIP06 erratum 161010803", + .iidr = 0x0204043b, + .mask = 0xffffffff, + .init = gic_enable_quirk_hip06_07, + }, + { + .desc = "GICv3: HIP07 erratum 161010803", + .iidr = 0x00000000, + .mask = 0xffffffff, + .init = gic_enable_quirk_hip06_07, + }, + { + /* + * Reserved register accesses generate a Synchronous + * External Abort. This erratum applies to: + * - ThunderX: CN88xx + * - OCTEON TX: CN83xx, CN81xx + * - OCTEON TX2: CN93xx, CN96xx, CN98xx, CNF95xx* + */ + .desc = "GICv3: Cavium erratum 38539", + .iidr = 0xa000034c, + .mask = 0xe8f00fff, + .init = gic_enable_quirk_cavium_38539, + }, + { + .desc = "GICv3: NVIDIA erratum T241-FABRIC-4", + .iidr = 0x0402043b, + .mask = 0xffffffff, + .init = gic_enable_quirk_nvidia_t241, + }, + { + /* + * GIC-700: 2941627 workaround - IP variant [0,1] + * + */ + .desc = "GICv3: ARM64 erratum 2941627", + .iidr = 0x0400043b, + .mask = 0xff0e0fff, + .init = gic_enable_quirk_arm64_2941627, + }, + { + /* + * GIC-700: 2941627 workaround - IP variant [2] + */ + .desc = "GICv3: ARM64 erratum 2941627", + .iidr = 0x0402043b, + .mask = 0xff0f0fff, + .init = gic_enable_quirk_arm64_2941627, + }, + { + .desc = "GICv3: non-coherent attribute", + .property = "dma-noncoherent", + .init = rd_set_non_coherent, + }, + { + } +}; + +static void gic_enable_nmi_support(void) +{ + int i; + + if (!gic_prio_masking_enabled()) + return; + + if (gic_data.flags & FLAGS_WORKAROUND_MTK_GICR_SAVE) { + pr_warn("Skipping NMI enable due to firmware issues\n"); + return; + } + + ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL); + if (!ppi_nmi_refs) + return; + + for (i = 0; i < gic_data.ppi_nr; i++) + refcount_set(&ppi_nmi_refs[i], 0); + + pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n", + gic_has_relaxed_pmr_sync() ? "relaxed" : "forced"); + + /* + * How priority values are used by the GIC depends on two things: + * the security state of the GIC (controlled by the GICD_CTRL.DS bit) + * and if Group 0 interrupts can be delivered to Linux in the non-secure + * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the + * ICC_PMR_EL1 register and the priority that software assigns to + * interrupts: + * + * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Group 1 priority + * ----------------------------------------------------------- + * 1 | - | unchanged | unchanged + * ----------------------------------------------------------- + * 0 | 1 | non-secure | non-secure + * ----------------------------------------------------------- + * 0 | 0 | unchanged | non-secure + * + * where non-secure means that the value is right-shifted by one and the + * MSB bit set, to make it fit in the non-secure priority range. + * + * In the first two cases, where ICC_PMR_EL1 and the interrupt priority + * are both either modified or unchanged, we can use the same set of + * priorities. + * + * In the last case, where only the interrupt priorities are modified to + * be in the non-secure range, we use a different PMR value to mask IRQs + * and the rest of the values that we use remain unchanged. + */ + if (gic_has_group0() && !gic_dist_security_disabled()) + static_branch_enable(&gic_nonsecure_priorities); + + static_branch_enable(&supports_pseudo_nmis_ft2500); + + if (static_branch_likely(&supports_deactivate_key)) + gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI; + else + gic_chip.flags |= IRQCHIP_SUPPORTS_NMI; +} + +static int __init gic_init_bases(phys_addr_t dist_phys_base, + void __iomem *dist_base, + struct redist_region *rdist_regs, + u32 nr_redist_regions, + u64 redist_stride, + struct fwnode_handle *handle) +{ + u32 typer; + int err; + + if (!is_hyp_mode_available()) + static_branch_disable(&supports_deactivate_key); + + if (static_branch_likely(&supports_deactivate_key)) + pr_info("GIC: Using split EOI/Deactivate mode\n"); + + gic_data.fwnode = handle; + gic_data.dist_phys_base = dist_phys_base; + gic_data.dist_base = dist_base; + gic_data.redist_regions = rdist_regs; + gic_data.nr_redist_regions = nr_redist_regions; + gic_data.redist_stride = redist_stride; + + /* + * Find out how many interrupts are supported. + */ + typer = readl_relaxed(gic_data.dist_base + GICD_TYPER); + gic_data.rdists.gicd_typer = typer; + + gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR), + gic_quirks, &gic_data); + + pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32); + pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR); + + /* + * ThunderX1 explodes on reading GICD_TYPER2, in violation of the + * architecture spec (which says that reserved registers are RES0). + */ + if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539)) + gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2); + + gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops, + &gic_data); + gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); + if (!static_branch_unlikely(&gic_nvidia_t241_erratum)) { + /* Disable GICv4.x features for the erratum T241-FABRIC-4 */ + gic_data.rdists.has_rvpeid = true; + gic_data.rdists.has_vlpis = true; + gic_data.rdists.has_direct_lpi = true; + gic_data.rdists.has_vpend_valid_dirty = true; + } + + if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { + err = -ENOMEM; + goto out_free; + } + + irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED); + + gic_data.has_rss = !!(typer & GICD_TYPER_RSS); + + if (typer & GICD_TYPER_MBIS) { + err = mbi_init(handle, gic_data.domain); + if (err) + pr_err("Failed to initialize MBIs\n"); + } + + set_handle_irq(gic_handle_irq); + + gic_update_rdist_properties(); + + gic_dist_init(); + gic_cpu_init(); + gic_smp_init(); + gic_cpu_pm_init(); + + if (gic_dist_supports_lpis()) { + phytium_its_init(handle, &gic_data.rdists, gic_data.domain); + phytium_its_cpu_init(); + its_lpi_memreserve_init(); + } else { + if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) + gicv2m_init(handle, gic_data.domain); + } + + gic_enable_nmi_support(); + + return 0; + +out_free: + if (gic_data.domain) + irq_domain_remove(gic_data.domain); + free_percpu(gic_data.rdists.rdist); + return err; +} + +static int __init gic_validate_dist_version(void __iomem *dist_base) +{ + u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; + + if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) + return -ENODEV; + + return 0; +} + +/* Create all possible partitions at boot time */ +static void __init gic_populate_ppi_partitions(struct device_node *gic_node) +{ + struct device_node *parts_node, *child_part; + int part_idx = 0, i; + int nr_parts; + struct partition_affinity *parts; + + parts_node = of_get_child_by_name(gic_node, "ppi-partitions"); + if (!parts_node) + return; + + gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL); + if (!gic_data.ppi_descs) + goto out_put_node; + + nr_parts = of_get_child_count(parts_node); + + if (!nr_parts) + goto out_put_node; + + parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL); + if (WARN_ON(!parts)) + goto out_put_node; + + for_each_child_of_node(parts_node, child_part) { + struct partition_affinity *part; + int n; + + part = &parts[part_idx]; + + part->partition_id = of_node_to_fwnode(child_part); + + pr_info("GIC: PPI partition %pOFn[%d] { ", + child_part, part_idx); + + n = of_property_count_elems_of_size(child_part, "affinity", + sizeof(u32)); + WARN_ON(n <= 0); + + for (i = 0; i < n; i++) { + int err, cpu; + u32 cpu_phandle; + struct device_node *cpu_node; + + err = of_property_read_u32_index(child_part, "affinity", + i, &cpu_phandle); + if (WARN_ON(err)) + continue; + + cpu_node = of_find_node_by_phandle(cpu_phandle); + if (WARN_ON(!cpu_node)) + continue; + + cpu = of_cpu_node_to_id(cpu_node); + if (WARN_ON(cpu < 0)) { + of_node_put(cpu_node); + continue; + } + + pr_info("%pOF[%d] ", cpu_node, cpu); + + cpumask_set_cpu(cpu, &part->mask); + of_node_put(cpu_node); + } + + pr_info("}\n"); + part_idx++; + } + + for (i = 0; i < gic_data.ppi_nr; i++) { + unsigned int irq; + struct partition_desc *desc; + struct irq_fwspec ppi_fwspec = { + .fwnode = gic_data.fwnode, + .param_count = 3, + .param = { + [0] = GIC_IRQ_TYPE_PARTITION, + [1] = i, + [2] = IRQ_TYPE_NONE, + }, + }; + + irq = irq_create_fwspec_mapping(&ppi_fwspec); + if (WARN_ON(!irq)) + continue; + desc = partition_create_desc(gic_data.fwnode, parts, nr_parts, + irq, &partition_domain_ops); + if (WARN_ON(!desc)) + continue; + + gic_data.ppi_descs[i] = desc; + } + +out_put_node: + of_node_put(parts_node); +} + +static void __init gic_of_setup_kvm_info(struct device_node *node) +{ + int ret; + struct resource r; + u32 gicv_idx; + + gic_v3_kvm_info.type = GIC_V3; + + gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0); + if (!gic_v3_kvm_info.maint_irq) + return; + + if (of_property_read_u32(node, "#redistributor-regions", + &gicv_idx)) + gicv_idx = 1; + + gicv_idx += 3; /* Also skip GICD, GICC, GICH */ + ret = of_address_to_resource(node, gicv_idx, &r); + if (!ret) + gic_v3_kvm_info.vcpu = r; + + gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; + gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; + vgic_set_kvm_info(&gic_v3_kvm_info); +} + +static void gic_request_region(resource_size_t base, resource_size_t size, + const char *name) +{ + if (!request_mem_region(base, size, name)) + pr_warn_once(FW_BUG "%s region %pa has overlapping address\n", + name, &base); +} + +static void __iomem *gic_of_iomap(struct device_node *node, int idx, + const char *name, struct resource *res) +{ + void __iomem *base; + int ret; + + ret = of_address_to_resource(node, idx, res); + if (ret) + return IOMEM_ERR_PTR(ret); + + gic_request_region(res->start, resource_size(res), name); + base = of_iomap(node, idx); + + return base ?: IOMEM_ERR_PTR(-ENOMEM); +} + +static int __init gic_of_init(struct device_node *node, struct device_node *parent) +{ + phys_addr_t dist_phys_base; + void __iomem *dist_base; + struct redist_region *rdist_regs; + struct resource res; + u64 redist_stride; + u32 nr_redist_regions; + int err, i; + unsigned long skt; + + dist_base = gic_of_iomap(node, 0, "GICD", &res); + if (IS_ERR(dist_base)) { + pr_err("%pOF: unable to map gic dist registers\n", node); + return PTR_ERR(dist_base); + } + + dist_phys_base = res.start; + + err = gic_validate_dist_version(dist_base); + if (err) { + pr_err("%pOF: no distributor detected, giving up\n", node); + goto out_unmap_dist; + } + + if (of_address_to_resource(node, 0, &res)) { + pr_err("Error: No GIC Distributor in FDT\n"); + goto out_unmap_dist; + } + + mars3_gic_dists[0].phys_base = res.start; + mars3_gic_dists[0].size = resource_size(&res); + mars3_gic_dists[0].dist_base = dist_base; + + if (of_property_read_u32(node, "#mars3_soc_bitmap", &mars3_sockets_bitmap)) + mars3_sockets_bitmap = 0x1; + + for (skt = 1; skt < MAX_MARS3_SOC_COUNT; skt++) { + if (((1U << skt) & mars3_sockets_bitmap) == 0) + continue; + + mars3_gic_dists[skt].phys_base = ((unsigned long)skt << MARS3_ADDR_SKTID_SHIFT) | + mars3_gic_dists[0].phys_base; + mars3_gic_dists[skt].size = mars3_gic_dists[0].size; + mars3_gic_dists[skt].dist_base = ioremap(mars3_gic_dists[skt].phys_base, + mars3_gic_dists[skt].size); + } + + if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions)) + nr_redist_regions = 1; + + rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs), + GFP_KERNEL); + if (!rdist_regs) { + err = -ENOMEM; + goto out_unmap_dist; + } + + for (i = 0; i < nr_redist_regions; i++) { + rdist_regs[i].redist_base = gic_of_iomap(node, 1 + i, "GICR", &res); + if (IS_ERR(rdist_regs[i].redist_base)) { + pr_err("%pOF: couldn't map region %d\n", node, i); + err = -ENODEV; + goto out_unmap_rdist; + } + rdist_regs[i].phys_base = res.start; + } + + if (of_property_read_u64(node, "redistributor-stride", &redist_stride)) + redist_stride = 0; + + err = gic_init_bases(dist_phys_base, dist_base, rdist_regs, + nr_redist_regions, redist_stride, &node->fwnode); + if (err) + goto out_unmap_rdist; + + gic_populate_ppi_partitions(node); + + if (static_branch_likely(&supports_deactivate_key)) + gic_of_setup_kvm_info(node); + return 0; + +out_unmap_rdist: + for (i = 0; i < nr_redist_regions; i++) + if (rdist_regs[i].redist_base && !IS_ERR(rdist_regs[i].redist_base)) + iounmap(rdist_regs[i].redist_base); + kfree(rdist_regs); +out_unmap_dist: + iounmap(dist_base); + return err; +} + +IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init); + +#ifdef CONFIG_ACPI +static struct +{ + void __iomem *dist_base; + struct redist_region *redist_regs; + u32 nr_redist_regions; + bool single_redist; + int enabled_rdists; + u32 maint_irq; + int maint_irq_mode; + phys_addr_t vcpu_base; +} acpi_data __initdata; + +static int gic_mars3_sockets_bitmap(void) +{ + unsigned int skt, i; + int skt_bitmap = 0; + + unsigned int skt_cpu_cnt[MAX_MARS3_SOC_COUNT] = {0}; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SOC_COUNT)) + skt_cpu_cnt[skt]++; + else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + + for (i = 0; i < MAX_MARS3_SOC_COUNT; i++) + if (skt_cpu_cnt[i] > 0) + skt_bitmap |= (1 << i); + + return skt_bitmap; +} + +static void __init +gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base) +{ + static int count; + + acpi_data.redist_regs[count].phys_base = phys_base; + acpi_data.redist_regs[count].redist_base = redist_base; + acpi_data.redist_regs[count].single_redist = acpi_data.single_redist; + count++; +} + +static int __init +gic_acpi_parse_madt_redist(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_redistributor *redist = + (struct acpi_madt_generic_redistributor *)header; + void __iomem *redist_base; + + redist_base = ioremap(redist->base_address, redist->length); + if (!redist_base) { + pr_err("Couldn't map GICR region @%llx\n", redist->base_address); + return -ENOMEM; + } + gic_request_region(redist->base_address, redist->length, "GICR"); + + gic_acpi_register_redist(redist->base_address, redist_base); + return 0; +} + +static int __init +gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_interrupt *gicc = + (struct acpi_madt_generic_interrupt *)header; + u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; + u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2; + void __iomem *redist_base; + + /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */ + if (!(gicc->flags & ACPI_MADT_ENABLED)) + return 0; + + redist_base = ioremap(gicc->gicr_base_address, size); + if (!redist_base) + return -ENOMEM; + gic_request_region(gicc->gicr_base_address, size, "GICR"); + + gic_acpi_register_redist(gicc->gicr_base_address, redist_base); + return 0; +} + +static int __init gic_acpi_collect_gicr_base(void) +{ + acpi_tbl_entry_handler redist_parser; + enum acpi_madt_type type; + + if (acpi_data.single_redist) { + type = ACPI_MADT_TYPE_GENERIC_INTERRUPT; + redist_parser = gic_acpi_parse_madt_gicc; + } else { + type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR; + redist_parser = gic_acpi_parse_madt_redist; + } + + /* Collect redistributor base addresses in GICR entries */ + if (acpi_table_parse_madt(type, redist_parser, 0) > 0) + return 0; + + pr_info("No valid GICR entries exist\n"); + return -ENODEV; +} + +static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header, + const unsigned long end) +{ + /* Subtable presence means that redist exists, that's it */ + return 0; +} + +static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_interrupt *gicc = + (struct acpi_madt_generic_interrupt *)header; + + /* + * If GICC is enabled and has valid gicr base address, then it means + * GICR base is presented via GICC + */ + if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) { + acpi_data.enabled_rdists++; + return 0; + } + + /* + * It's perfectly valid firmware can pass disabled GICC entry, driver + * should not treat as errors, skip the entry instead of probe fail. + */ + if (!(gicc->flags & ACPI_MADT_ENABLED)) + return 0; + + return -ENODEV; +} + +static int __init gic_acpi_count_gicr_regions(void) +{ + int count; + + /* + * Count how many redistributor regions we have. It is not allowed + * to mix redistributor description, GICR and GICC subtables have to be + * mutually exclusive. + */ + count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR, + gic_acpi_match_gicr, 0); + if (count > 0) { + acpi_data.single_redist = false; + return count; + } + + count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, + gic_acpi_match_gicc, 0); + if (count > 0) { + acpi_data.single_redist = true; + count = acpi_data.enabled_rdists; + } + + return count; +} + +static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header, + struct acpi_probe_entry *ape) +{ + struct acpi_madt_generic_distributor *dist; + int count; + + dist = (struct acpi_madt_generic_distributor *)header; + if (dist->version != ape->driver_data) + return false; + + /* We need to do that exercise anyway, the sooner the better */ + count = gic_acpi_count_gicr_regions(); + if (count <= 0) + return false; + + acpi_data.nr_redist_regions = count; + return true; +} + +static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_interrupt *gicc = + (struct acpi_madt_generic_interrupt *)header; + int maint_irq_mode; + static int first_madt = true; + + /* Skip unusable CPUs */ + if (!(gicc->flags & ACPI_MADT_ENABLED)) + return 0; + + maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ? + ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE; + + if (first_madt) { + first_madt = false; + + acpi_data.maint_irq = gicc->vgic_interrupt; + acpi_data.maint_irq_mode = maint_irq_mode; + acpi_data.vcpu_base = gicc->gicv_base_address; + + return 0; + } + + /* + * The maintenance interrupt and GICV should be the same for every CPU + */ + if ((acpi_data.maint_irq != gicc->vgic_interrupt) || + (acpi_data.maint_irq_mode != maint_irq_mode) || + (acpi_data.vcpu_base != gicc->gicv_base_address)) + return -EINVAL; + + return 0; +} + +static bool __init gic_acpi_collect_virt_info(void) +{ + int count; + + count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, + gic_acpi_parse_virt_madt_gicc, 0); + + return (count > 0); +} + +#define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K) +#define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K) +#define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K) + +static void __init gic_acpi_setup_kvm_info(void) +{ + int irq; + + if (!gic_acpi_collect_virt_info()) { + pr_warn("Unable to get hardware information used for virtualization\n"); + return; + } + + gic_v3_kvm_info.type = GIC_V3; + + irq = acpi_register_gsi(NULL, acpi_data.maint_irq, + acpi_data.maint_irq_mode, + ACPI_ACTIVE_HIGH); + if (irq <= 0) + return; + + gic_v3_kvm_info.maint_irq = irq; + + if (acpi_data.vcpu_base) { + struct resource *vcpu = &gic_v3_kvm_info.vcpu; + + vcpu->flags = IORESOURCE_MEM; + vcpu->start = acpi_data.vcpu_base; + vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1; + } + + gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; + gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; + vgic_set_kvm_info(&gic_v3_kvm_info); +} + +static struct fwnode_handle *gsi_domain_handle; + +static struct fwnode_handle *gic_v3_get_gsi_domain_id(u32 gsi) +{ + return gsi_domain_handle; +} + +static int __init +gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end) +{ + struct acpi_madt_generic_distributor *dist; + size_t size; + int i, err; + int skt; + + /* Get distributor base address */ + dist = (struct acpi_madt_generic_distributor *)header; + acpi_data.dist_base = ioremap(dist->base_address, + ACPI_GICV3_DIST_MEM_SIZE); + if (!acpi_data.dist_base) { + pr_err("Unable to map GICD registers\n"); + return -ENOMEM; + } + gic_request_region(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE, "GICD"); + + err = gic_validate_dist_version(acpi_data.dist_base); + if (err) { + pr_err("No distributor detected at @%p, giving up\n", + acpi_data.dist_base); + goto out_dist_unmap; + } + + mars3_gic_dists[0].phys_base = dist->base_address; + mars3_gic_dists[0].size = ACPI_GICV3_DIST_MEM_SIZE; + mars3_gic_dists[0].dist_base = acpi_data.dist_base; + +#ifdef CONFIG_ACPI + mars3_sockets_bitmap = gic_mars3_sockets_bitmap(); + if (mars3_sockets_bitmap == 0) { + mars3_sockets_bitmap = 0x1; + pr_err("No socket, please check cpus MPIDR_AFFINITY_LEVEL!!!"); + } else + pr_info("mars3_sockets_bitmap = 0x%x\n", mars3_sockets_bitmap); +#endif + + for (skt = 1; skt < MAX_MARS3_SOC_COUNT; skt++) { + if (((1U << skt) & mars3_sockets_bitmap) == 0) + continue; + + mars3_gic_dists[skt].phys_base = ((unsigned long)skt << MARS3_ADDR_SKTID_SHIFT) | + mars3_gic_dists[0].phys_base; + mars3_gic_dists[skt].size = mars3_gic_dists[0].size; + mars3_gic_dists[skt].dist_base = ioremap(mars3_gic_dists[skt].phys_base, + mars3_gic_dists[skt].size); + } + + size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions; + acpi_data.redist_regs = kzalloc(size, GFP_KERNEL); + if (!acpi_data.redist_regs) { + err = -ENOMEM; + goto out_dist_unmap; + } + + err = gic_acpi_collect_gicr_base(); + if (err) + goto out_redist_unmap; + + gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address); + if (!gsi_domain_handle) { + err = -ENOMEM; + goto out_redist_unmap; + } + + err = gic_init_bases(dist->base_address, acpi_data.dist_base, + acpi_data.redist_regs, acpi_data.nr_redist_regions, + 0, gsi_domain_handle); + if (err) + goto out_fwhandle_free; + + acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_v3_get_gsi_domain_id); + + if (static_branch_likely(&supports_deactivate_key)) + gic_acpi_setup_kvm_info(); + + return 0; + +out_fwhandle_free: + irq_domain_free_fwnode(gsi_domain_handle); +out_redist_unmap: + for (i = 0; i < acpi_data.nr_redist_regions; i++) + if (acpi_data.redist_regs[i].redist_base) + iounmap(acpi_data.redist_regs[i].redist_base); + kfree(acpi_data.redist_regs); +out_dist_unmap: + iounmap(acpi_data.dist_base); + return err; +} +IRQCHIP_ACPI_DECLARE(gic_phyt_2500, ACPI_MADT_TYPE_PHYTIUM_2500, + acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3, + gic_acpi_init); +#endif diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h index 3751ae69432f..8104c262bbae 100644 --- a/include/acpi/actbl2.h +++ b/include/acpi/actbl2.h @@ -897,7 +897,8 @@ enum acpi_madt_type { ACPI_MADT_TYPE_APLIC = 26, ACPI_MADT_TYPE_PLIC = 27, ACPI_MADT_TYPE_RESERVED = 28, /* 28 to 0x7F are reserved */ - ACPI_MADT_TYPE_OEM_RESERVED = 0x80 /* 0x80 to 0xFF are reserved for OEM use */ + ACPI_MADT_TYPE_OEM_RESERVED = 0x80, /* 0x80 to 0xFF are reserved for OEM use */ + ACPI_MADT_TYPE_PHYTIUM_2500 = 128 }; /* diff --git a/include/linux/irqchip/arm-gic-phytium-2500.h b/include/linux/irqchip/arm-gic-phytium-2500.h new file mode 100644 index 000000000000..f212a29390bf --- /dev/null +++ b/include/linux/irqchip/arm-gic-phytium-2500.h @@ -0,0 +1,661 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved. + * Author: Marc Zyngier + */ +#ifndef __LINUX_IRQCHIP_ARM_GIC_PHYTIUM_2500_H +#define __LINUX_IRQCHIP_ARM_GIC_PHYTIUM_2500_H + +/* + * Distributor registers. We assume we're running non-secure, with ARE + * being set. Secure-only and non-ARE registers are not described. + */ +#define GICD_CTLR 0x0000 +#define GICD_TYPER 0x0004 +#define GICD_IIDR 0x0008 +#define GICD_TYPER2 0x000C +#define GICD_STATUSR 0x0010 +#define GICD_SETSPI_NSR 0x0040 +#define GICD_CLRSPI_NSR 0x0048 +#define GICD_SETSPI_SR 0x0050 +#define GICD_CLRSPI_SR 0x0058 +#define GICD_IGROUPR 0x0080 +#define GICD_ISENABLER 0x0100 +#define GICD_ICENABLER 0x0180 +#define GICD_ISPENDR 0x0200 +#define GICD_ICPENDR 0x0280 +#define GICD_ISACTIVER 0x0300 +#define GICD_ICACTIVER 0x0380 +#define GICD_IPRIORITYR 0x0400 +#define GICD_ICFGR 0x0C00 +#define GICD_IGRPMODR 0x0D00 +#define GICD_NSACR 0x0E00 +#define GICD_IGROUPRnE 0x1000 +#define GICD_ISENABLERnE 0x1200 +#define GICD_ICENABLERnE 0x1400 +#define GICD_ISPENDRnE 0x1600 +#define GICD_ICPENDRnE 0x1800 +#define GICD_ISACTIVERnE 0x1A00 +#define GICD_ICACTIVERnE 0x1C00 +#define GICD_IPRIORITYRnE 0x2000 +#define GICD_ICFGRnE 0x3000 +#define GICD_IROUTER 0x6000 +#define GICD_IROUTERnE 0x8000 +#define GICD_IDREGS 0xFFD0 +#define GICD_PIDR2 0xFFE8 + +#define ESPI_BASE_INTID 4096 + +/* + * Those registers are actually from GICv2, but the spec demands that they + * are implemented as RES0 if ARE is 1 (which we do in KVM's emulated GICv3). + */ +#define GICD_ITARGETSR 0x0800 +#define GICD_SGIR 0x0F00 +#define GICD_CPENDSGIR 0x0F10 +#define GICD_SPENDSGIR 0x0F20 + +#define GICD_CTLR_RWP (1U << 31) +#define GICD_CTLR_nASSGIreq (1U << 8) +#define GICD_CTLR_DS (1U << 6) +#define GICD_CTLR_ARE_NS (1U << 4) +#define GICD_CTLR_ENABLE_G1A (1U << 1) +#define GICD_CTLR_ENABLE_G1 (1U << 0) + +#define GICD_IIDR_IMPLEMENTER_SHIFT 0 +#define GICD_IIDR_IMPLEMENTER_MASK (0xfff << GICD_IIDR_IMPLEMENTER_SHIFT) +#define GICD_IIDR_REVISION_SHIFT 12 +#define GICD_IIDR_REVISION_MASK (0xf << GICD_IIDR_REVISION_SHIFT) +#define GICD_IIDR_VARIANT_SHIFT 16 +#define GICD_IIDR_VARIANT_MASK (0xf << GICD_IIDR_VARIANT_SHIFT) +#define GICD_IIDR_PRODUCT_ID_SHIFT 24 +#define GICD_IIDR_PRODUCT_ID_MASK (0xff << GICD_IIDR_PRODUCT_ID_SHIFT) + + +/* + * In systems with a single security state (what we emulate in KVM) + * the meaning of the interrupt group enable bits is slightly different + */ +#define GICD_CTLR_ENABLE_SS_G1 (1U << 1) +#define GICD_CTLR_ENABLE_SS_G0 (1U << 0) + +#define GICD_TYPER_RSS (1U << 26) +#define GICD_TYPER_LPIS (1U << 17) +#define GICD_TYPER_MBIS (1U << 16) +#define GICD_TYPER_ESPI (1U << 8) + +#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1) +#define GICD_TYPER_NUM_LPIS(typer) ((((typer) >> 11) & 0x1f) + 1) +#define GICD_TYPER_SPIS(typer) ((((typer) & 0x1f) + 1) * 32) +#define GICD_TYPER_ESPIS(typer) \ + (((typer) & GICD_TYPER_ESPI) ? GICD_TYPER_SPIS((typer) >> 27) : 0) + +#define GICD_TYPER2_nASSGIcap (1U << 8) +#define GICD_TYPER2_VIL (1U << 7) +#define GICD_TYPER2_VID GENMASK(4, 0) + +#define GICD_IROUTER_SPI_MODE_ONE (0U << 31) +#define GICD_IROUTER_SPI_MODE_ANY (1U << 31) + +#define GIC_PIDR2_ARCH_MASK 0xf0 +#define GIC_PIDR2_ARCH_GICv3 0x30 +#define GIC_PIDR2_ARCH_GICv4 0x40 + +#define GIC_V3_DIST_SIZE 0x10000 + +#define GIC_PAGE_SIZE_4K 0ULL +#define GIC_PAGE_SIZE_16K 1ULL +#define GIC_PAGE_SIZE_64K 2ULL +#define GIC_PAGE_SIZE_MASK 3ULL + +/* + * Re-Distributor registers, offsets from RD_base + */ +#define GICR_CTLR GICD_CTLR +#define GICR_IIDR 0x0004 +#define GICR_TYPER 0x0008 +#define GICR_STATUSR GICD_STATUSR +#define GICR_WAKER 0x0014 +#define GICR_SETLPIR 0x0040 +#define GICR_CLRLPIR 0x0048 +#define GICR_PROPBASER 0x0070 +#define GICR_PENDBASER 0x0078 +#define GICR_INVLPIR 0x00A0 +#define GICR_INVALLR 0x00B0 +#define GICR_SYNCR 0x00C0 +#define GICR_IDREGS GICD_IDREGS +#define GICR_PIDR2 GICD_PIDR2 + +#define GICR_CTLR_ENABLE_LPIS (1UL << 0) +#define GICR_CTLR_CES (1UL << 1) +#define GICR_CTLR_IR (1UL << 2) +#define GICR_CTLR_RWP (1UL << 3) + +#define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff) + +#define EPPI_BASE_INTID 1056 + +#define GICR_TYPER_NR_PPIS(r) \ + ({ \ + unsigned int __ppinum = ((r) >> 27) & 0x1f; \ + unsigned int __nr_ppis = 16; \ + if (__ppinum == 1 || __ppinum == 2) \ + __nr_ppis += __ppinum * 32; \ + \ + __nr_ppis; \ + }) + +#define GICR_WAKER_ProcessorSleep (1U << 1) +#define GICR_WAKER_ChildrenAsleep (1U << 2) + +#define GIC_BASER_CACHE_nCnB 0ULL +#define GIC_BASER_CACHE_SameAsInner 0ULL +#define GIC_BASER_CACHE_nC 1ULL +#define GIC_BASER_CACHE_RaWt 2ULL +#define GIC_BASER_CACHE_RaWb 3ULL +#define GIC_BASER_CACHE_WaWt 4ULL +#define GIC_BASER_CACHE_WaWb 5ULL +#define GIC_BASER_CACHE_RaWaWt 6ULL +#define GIC_BASER_CACHE_RaWaWb 7ULL +#define GIC_BASER_CACHE_MASK 7ULL +#define GIC_BASER_NonShareable 0ULL +#define GIC_BASER_InnerShareable 1ULL +#define GIC_BASER_OuterShareable 2ULL +#define GIC_BASER_SHAREABILITY_MASK 3ULL + +#define GIC_BASER_CACHEABILITY(reg, inner_outer, type) \ + (GIC_BASER_CACHE_##type << reg##_##inner_outer##_CACHEABILITY_SHIFT) + +#define GIC_BASER_SHAREABILITY(reg, type) \ + (GIC_BASER_##type << reg##_SHAREABILITY_SHIFT) + +/* encode a size field of width @w containing @n - 1 units */ +#define GIC_ENCODE_SZ(n, w) (((unsigned long)(n) - 1) & GENMASK_ULL(((w) - 1), 0)) + +#define GICR_PROPBASER_SHAREABILITY_SHIFT (10) +#define GICR_PROPBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT (56) +#define GICR_PROPBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_PROPBASER, SHAREABILITY_MASK) +#define GICR_PROPBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, MASK) +#define GICR_PROPBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, MASK) +#define GICR_PROPBASER_CACHEABILITY_MASK GICR_PROPBASER_INNER_CACHEABILITY_MASK + +#define GICR_PROPBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable) + +#define GICR_PROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nCnB) +#define GICR_PROPBASER_nC GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nC) +#define GICR_PROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWt) +#define GICR_PROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb) +#define GICR_PROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWt) +#define GICR_PROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWb) +#define GICR_PROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWt) +#define GICR_PROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWb) + +#define GICR_PROPBASER_IDBITS_MASK (0x1f) +#define GICR_PROPBASER_ADDRESS(x) ((x) & GENMASK_ULL(51, 12)) +#define GICR_PENDBASER_ADDRESS(x) ((x) & GENMASK_ULL(51, 16)) + +#define GICR_PENDBASER_SHAREABILITY_SHIFT (10) +#define GICR_PENDBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT (56) +#define GICR_PENDBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_PENDBASER, SHAREABILITY_MASK) +#define GICR_PENDBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, MASK) +#define GICR_PENDBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, MASK) +#define GICR_PENDBASER_CACHEABILITY_MASK GICR_PENDBASER_INNER_CACHEABILITY_MASK + +#define GICR_PENDBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable) + +#define GICR_PENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nCnB) +#define GICR_PENDBASER_nC GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nC) +#define GICR_PENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWt) +#define GICR_PENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) +#define GICR_PENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWt) +#define GICR_PENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWb) +#define GICR_PENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWt) +#define GICR_PENDBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWb) + +#define GICR_PENDBASER_PTZ BIT_ULL(62) + +/* + * Re-Distributor registers, offsets from SGI_base + */ +#define GICR_IGROUPR0 GICD_IGROUPR +#define GICR_ISENABLER0 GICD_ISENABLER +#define GICR_ICENABLER0 GICD_ICENABLER +#define GICR_ISPENDR0 GICD_ISPENDR +#define GICR_ICPENDR0 GICD_ICPENDR +#define GICR_ISACTIVER0 GICD_ISACTIVER +#define GICR_ICACTIVER0 GICD_ICACTIVER +#define GICR_IPRIORITYR0 GICD_IPRIORITYR +#define GICR_ICFGR0 GICD_ICFGR +#define GICR_IGRPMODR0 GICD_IGRPMODR +#define GICR_NSACR GICD_NSACR + +#define GICR_TYPER_PLPIS (1U << 0) +#define GICR_TYPER_VLPIS (1U << 1) +#define GICR_TYPER_DIRTY (1U << 2) +#define GICR_TYPER_DirectLPIS (1U << 3) +#define GICR_TYPER_LAST (1U << 4) +#define GICR_TYPER_RVPEID (1U << 7) +#define GICR_TYPER_COMMON_LPI_AFF GENMASK_ULL(25, 24) +#define GICR_TYPER_AFFINITY GENMASK_ULL(63, 32) + +#define GICR_INVLPIR_INTID GENMASK_ULL(31, 0) +#define GICR_INVLPIR_VPEID GENMASK_ULL(47, 32) +#define GICR_INVLPIR_V GENMASK_ULL(63, 63) + +#define GICR_INVALLR_VPEID GICR_INVLPIR_VPEID +#define GICR_INVALLR_V GICR_INVLPIR_V + +#define GIC_V3_REDIST_SIZE 0x20000 + +#define LPI_PROP_GROUP1 (1 << 1) +#define LPI_PROP_ENABLED (1 << 0) + +/* + * Re-Distributor registers, offsets from VLPI_base + */ +#define GICR_VPROPBASER 0x0070 + +#define GICR_VPROPBASER_IDBITS_MASK 0x1f + +#define GICR_VPROPBASER_SHAREABILITY_SHIFT (10) +#define GICR_VPROPBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_VPROPBASER_OUTER_CACHEABILITY_SHIFT (56) + +#define GICR_VPROPBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_VPROPBASER, SHAREABILITY_MASK) +#define GICR_VPROPBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, MASK) +#define GICR_VPROPBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_VPROPBASER, OUTER, MASK) +#define GICR_VPROPBASER_CACHEABILITY_MASK \ + GICR_VPROPBASER_INNER_CACHEABILITY_MASK + +#define GICR_VPROPBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_VPROPBASER, InnerShareable) + +#define GICR_VPROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nCnB) +#define GICR_VPROPBASER_nC GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nC) +#define GICR_VPROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt) +#define GICR_VPROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWb) +#define GICR_VPROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWt) +#define GICR_VPROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWb) +#define GICR_VPROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWt) +#define GICR_VPROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWb) + +/* + * GICv4.1 VPROPBASER reinvention. A subtle mix between the old + * VPROPBASER and ITS_BASER. Just not quite any of the two. + */ +#define GICR_VPROPBASER_4_1_VALID (1ULL << 63) +#define GICR_VPROPBASER_4_1_ENTRY_SIZE GENMASK_ULL(61, 59) +#define GICR_VPROPBASER_4_1_INDIRECT (1ULL << 55) +#define GICR_VPROPBASER_4_1_PAGE_SIZE GENMASK_ULL(54, 53) +#define GICR_VPROPBASER_4_1_Z (1ULL << 52) +#define GICR_VPROPBASER_4_1_ADDR GENMASK_ULL(51, 12) +#define GICR_VPROPBASER_4_1_SIZE GENMASK_ULL(6, 0) + +#define GICR_VPENDBASER 0x0078 + +#define GICR_VPENDBASER_SHAREABILITY_SHIFT (10) +#define GICR_VPENDBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_VPENDBASER_OUTER_CACHEABILITY_SHIFT (56) +#define GICR_VPENDBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_VPENDBASER, SHAREABILITY_MASK) +#define GICR_VPENDBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, MASK) +#define GICR_VPENDBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_VPENDBASER, OUTER, MASK) +#define GICR_VPENDBASER_CACHEABILITY_MASK \ + GICR_VPENDBASER_INNER_CACHEABILITY_MASK + +#define GICR_VPENDBASER_NonShareable \ + GIC_BASER_SHAREABILITY(GICR_VPENDBASER, NonShareable) + +#define GICR_VPENDBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_VPENDBASER, InnerShareable) + +#define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB) +#define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC) +#define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt) +#define GICR_VPENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWb) +#define GICR_VPENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWt) +#define GICR_VPENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWb) +#define GICR_VPENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWt) +#define GICR_VPENDBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWb) + +#define GICR_VPENDBASER_Dirty (1ULL << 60) +#define GICR_VPENDBASER_PendingLast (1ULL << 61) +#define GICR_VPENDBASER_IDAI (1ULL << 62) +#define GICR_VPENDBASER_Valid (1ULL << 63) + +/* + * GICv4.1 VPENDBASER, used for VPE residency. On top of these fields, + * also use the above Valid, PendingLast and Dirty. + */ +#define GICR_VPENDBASER_4_1_DB (1ULL << 62) +#define GICR_VPENDBASER_4_1_VGRP0EN (1ULL << 59) +#define GICR_VPENDBASER_4_1_VGRP1EN (1ULL << 58) +#define GICR_VPENDBASER_4_1_VPEID GENMASK_ULL(15, 0) + +#define GICR_VSGIR 0x0080 + +#define GICR_VSGIR_VPEID GENMASK(15, 0) + +#define GICR_VSGIPENDR 0x0088 + +#define GICR_VSGIPENDR_BUSY (1U << 31) +#define GICR_VSGIPENDR_PENDING GENMASK(15, 0) + +/* + * ITS registers, offsets from ITS_base + */ +#define GITS_CTLR 0x0000 +#define GITS_IIDR 0x0004 +#define GITS_TYPER 0x0008 +#define GITS_MPIDR 0x0018 +#define GITS_CBASER 0x0080 +#define GITS_CWRITER 0x0088 +#define GITS_CREADR 0x0090 +#define GITS_BASER 0x0100 +#define GITS_IDREGS_BASE 0xffd0 +#define GITS_PIDR0 0xffe0 +#define GITS_PIDR1 0xffe4 +#define GITS_PIDR2 GICR_PIDR2 +#define GITS_PIDR4 0xffd0 +#define GITS_CIDR0 0xfff0 +#define GITS_CIDR1 0xfff4 +#define GITS_CIDR2 0xfff8 +#define GITS_CIDR3 0xfffc + +#define GITS_TRANSLATER 0x10040 + +#define GITS_SGIR 0x20020 + +#define GITS_SGIR_VPEID GENMASK_ULL(47, 32) +#define GITS_SGIR_VINTID GENMASK_ULL(3, 0) + +#define GITS_CTLR_ENABLE (1U << 0) +#define GITS_CTLR_ImDe (1U << 1) +#define GITS_CTLR_ITS_NUMBER_SHIFT 4 +#define GITS_CTLR_ITS_NUMBER (0xFU << GITS_CTLR_ITS_NUMBER_SHIFT) +#define GITS_CTLR_QUIESCENT (1U << 31) + +#define GITS_TYPER_PLPIS (1UL << 0) +#define GITS_TYPER_VLPIS (1UL << 1) +#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4 +#define GITS_TYPER_ITT_ENTRY_SIZE GENMASK_ULL(7, 4) +#define GITS_TYPER_IDBITS_SHIFT 8 +#define GITS_TYPER_DEVBITS_SHIFT 13 +#define GITS_TYPER_DEVBITS GENMASK_ULL(17, 13) +#define GITS_TYPER_PTA (1UL << 19) +#define GITS_TYPER_HCC_SHIFT 24 +#define GITS_TYPER_HCC(r) (((r) >> GITS_TYPER_HCC_SHIFT) & 0xff) +#define GITS_TYPER_VMOVP (1ULL << 37) +#define GITS_TYPER_VMAPP (1ULL << 40) +#define GITS_TYPER_SVPET GENMASK_ULL(42, 41) + +#define GITS_IIDR_REV_SHIFT 12 +#define GITS_IIDR_REV_MASK (0xf << GITS_IIDR_REV_SHIFT) +#define GITS_IIDR_REV(r) (((r) >> GITS_IIDR_REV_SHIFT) & 0xf) +#define GITS_IIDR_PRODUCTID_SHIFT 24 + +#define GITS_CBASER_VALID (1ULL << 63) +#define GITS_CBASER_SHAREABILITY_SHIFT (10) +#define GITS_CBASER_INNER_CACHEABILITY_SHIFT (59) +#define GITS_CBASER_OUTER_CACHEABILITY_SHIFT (53) +#define GITS_CBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GITS_CBASER, SHAREABILITY_MASK) +#define GITS_CBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, MASK) +#define GITS_CBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_CBASER, OUTER, MASK) +#define GITS_CBASER_CACHEABILITY_MASK GITS_CBASER_INNER_CACHEABILITY_MASK + +#define GITS_CBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GITS_CBASER, InnerShareable) + +#define GITS_CBASER_nCnB GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nCnB) +#define GITS_CBASER_nC GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nC) +#define GITS_CBASER_RaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWt) +#define GITS_CBASER_RaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWb) +#define GITS_CBASER_WaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWt) +#define GITS_CBASER_WaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWb) +#define GITS_CBASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWt) +#define GITS_CBASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWb) + +#define GITS_CBASER_ADDRESS(cbaser) ((cbaser) & GENMASK_ULL(51, 12)) + +#define GITS_BASER_NR_REGS 8 + +#define GITS_BASER_VALID (1ULL << 63) +#define GITS_BASER_INDIRECT (1ULL << 62) + +#define GITS_BASER_INNER_CACHEABILITY_SHIFT (59) +#define GITS_BASER_OUTER_CACHEABILITY_SHIFT (53) +#define GITS_BASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_BASER, INNER, MASK) +#define GITS_BASER_CACHEABILITY_MASK GITS_BASER_INNER_CACHEABILITY_MASK +#define GITS_BASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, MASK) +#define GITS_BASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GITS_BASER, SHAREABILITY_MASK) + +#define GITS_BASER_nCnB GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nCnB) +#define GITS_BASER_nC GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nC) +#define GITS_BASER_RaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWt) +#define GITS_BASER_RaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) +#define GITS_BASER_WaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWt) +#define GITS_BASER_WaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWb) +#define GITS_BASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWt) +#define GITS_BASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWb) + +#define GITS_BASER_TYPE_SHIFT (56) +#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7) +#define GITS_BASER_ENTRY_SIZE_SHIFT (48) +#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1) +#define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48) +#define GITS_BASER_PHYS_52_to_48(phys) \ + (((phys) & GENMASK_ULL(47, 16)) | (((phys) >> 48) & 0xf) << 12) +#define GITS_BASER_ADDR_48_to_52(baser) \ + (((baser) & GENMASK_ULL(47, 16)) | (((baser) >> 12) & 0xf) << 48) + +#define GITS_BASER_SHAREABILITY_SHIFT (10) +#define GITS_BASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) +#define GITS_BASER_PAGE_SIZE_SHIFT (8) +#define __GITS_BASER_PSZ(sz) (GIC_PAGE_SIZE_ ## sz << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGE_SIZE_4K __GITS_BASER_PSZ(4K) +#define GITS_BASER_PAGE_SIZE_16K __GITS_BASER_PSZ(16K) +#define GITS_BASER_PAGE_SIZE_64K __GITS_BASER_PSZ(64K) +#define GITS_BASER_PAGE_SIZE_MASK __GITS_BASER_PSZ(MASK) +#define GITS_BASER_PAGES_MAX 256 +#define GITS_BASER_PAGES_SHIFT (0) +#define GITS_BASER_NR_PAGES(r) (((r) & 0xff) + 1) + +#define GITS_BASER_TYPE_NONE 0 +#define GITS_BASER_TYPE_DEVICE 1 +#define GITS_BASER_TYPE_VCPU 2 +#define GITS_BASER_TYPE_RESERVED3 3 +#define GITS_BASER_TYPE_COLLECTION 4 +#define GITS_BASER_TYPE_RESERVED5 5 +#define GITS_BASER_TYPE_RESERVED6 6 +#define GITS_BASER_TYPE_RESERVED7 7 + +#define GITS_LVL1_ENTRY_SIZE (8UL) + +/* + * ITS commands + */ +#define GITS_CMD_MAPD 0x08 +#define GITS_CMD_MAPC 0x09 +#define GITS_CMD_MAPTI 0x0a +#define GITS_CMD_MAPI 0x0b +#define GITS_CMD_MOVI 0x01 +#define GITS_CMD_DISCARD 0x0f +#define GITS_CMD_INV 0x0c +#define GITS_CMD_MOVALL 0x0e +#define GITS_CMD_INVALL 0x0d +#define GITS_CMD_INT 0x03 +#define GITS_CMD_CLEAR 0x04 +#define GITS_CMD_SYNC 0x05 + +/* + * GICv4 ITS specific commands + */ +#define GITS_CMD_GICv4(x) ((x) | 0x20) +#define GITS_CMD_VINVALL GITS_CMD_GICv4(GITS_CMD_INVALL) +#define GITS_CMD_VMAPP GITS_CMD_GICv4(GITS_CMD_MAPC) +#define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI) +#define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI) +#define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC) +/* VMOVP, VSGI and INVDB are the odd ones, as they dont have a physical counterpart */ +#define GITS_CMD_VMOVP GITS_CMD_GICv4(2) +#define GITS_CMD_VSGI GITS_CMD_GICv4(3) +#define GITS_CMD_INVDB GITS_CMD_GICv4(0xe) + +/* + * ITS error numbers + */ +#define E_ITS_MOVI_UNMAPPED_INTERRUPT 0x010107 +#define E_ITS_MOVI_UNMAPPED_COLLECTION 0x010109 +#define E_ITS_INT_UNMAPPED_INTERRUPT 0x010307 +#define E_ITS_CLEAR_UNMAPPED_INTERRUPT 0x010507 +#define E_ITS_MAPD_DEVICE_OOR 0x010801 +#define E_ITS_MAPD_ITTSIZE_OOR 0x010802 +#define E_ITS_MAPC_PROCNUM_OOR 0x010902 +#define E_ITS_MAPC_COLLECTION_OOR 0x010903 +#define E_ITS_MAPTI_UNMAPPED_DEVICE 0x010a04 +#define E_ITS_MAPTI_ID_OOR 0x010a05 +#define E_ITS_MAPTI_PHYSICALID_OOR 0x010a06 +#define E_ITS_INV_UNMAPPED_INTERRUPT 0x010c07 +#define E_ITS_INVALL_UNMAPPED_COLLECTION 0x010d09 +#define E_ITS_MOVALL_PROCNUM_OOR 0x010e01 +#define E_ITS_DISCARD_UNMAPPED_INTERRUPT 0x010f07 + +/* + * CPU interface registers + */ +#define ICC_CTLR_EL1_EOImode_SHIFT (1) +#define ICC_CTLR_EL1_EOImode_drop_dir (0U << ICC_CTLR_EL1_EOImode_SHIFT) +#define ICC_CTLR_EL1_EOImode_drop (1U << ICC_CTLR_EL1_EOImode_SHIFT) +#define ICC_CTLR_EL1_EOImode_MASK (1 << ICC_CTLR_EL1_EOImode_SHIFT) +#define ICC_CTLR_EL1_CBPR_SHIFT 0 +#define ICC_CTLR_EL1_CBPR_MASK (1 << ICC_CTLR_EL1_CBPR_SHIFT) +#define ICC_CTLR_EL1_PMHE_SHIFT 6 +#define ICC_CTLR_EL1_PMHE_MASK (1 << ICC_CTLR_EL1_PMHE_SHIFT) +#define ICC_CTLR_EL1_PRI_BITS_SHIFT 8 +#define ICC_CTLR_EL1_PRI_BITS_MASK (0x7 << ICC_CTLR_EL1_PRI_BITS_SHIFT) +#define ICC_CTLR_EL1_ID_BITS_SHIFT 11 +#define ICC_CTLR_EL1_ID_BITS_MASK (0x7 << ICC_CTLR_EL1_ID_BITS_SHIFT) +#define ICC_CTLR_EL1_SEIS_SHIFT 14 +#define ICC_CTLR_EL1_SEIS_MASK (0x1 << ICC_CTLR_EL1_SEIS_SHIFT) +#define ICC_CTLR_EL1_A3V_SHIFT 15 +#define ICC_CTLR_EL1_A3V_MASK (0x1 << ICC_CTLR_EL1_A3V_SHIFT) +#define ICC_CTLR_EL1_RSS (0x1 << 18) +#define ICC_CTLR_EL1_ExtRange (0x1 << 19) +#define ICC_PMR_EL1_SHIFT 0 +#define ICC_PMR_EL1_MASK (0xff << ICC_PMR_EL1_SHIFT) +#define ICC_BPR0_EL1_SHIFT 0 +#define ICC_BPR0_EL1_MASK (0x7 << ICC_BPR0_EL1_SHIFT) +#define ICC_BPR1_EL1_SHIFT 0 +#define ICC_BPR1_EL1_MASK (0x7 << ICC_BPR1_EL1_SHIFT) +#define ICC_IGRPEN0_EL1_SHIFT 0 +#define ICC_IGRPEN0_EL1_MASK (1 << ICC_IGRPEN0_EL1_SHIFT) +#define ICC_IGRPEN1_EL1_SHIFT 0 +#define ICC_IGRPEN1_EL1_MASK (1 << ICC_IGRPEN1_EL1_SHIFT) +#define ICC_SRE_EL1_DIB (1U << 2) +#define ICC_SRE_EL1_DFB (1U << 1) +#define ICC_SRE_EL1_SRE (1U << 0) + +/* These are for GICv2 emulation only */ +#define GICH_LR_VIRTUALID (0x3ffUL << 0) +#define GICH_LR_PHYSID_CPUID_SHIFT (10) +#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT) + +#define ICC_IAR1_EL1_SPURIOUS 0x3ff + +#define ICC_SRE_EL2_SRE (1 << 0) +#define ICC_SRE_EL2_ENABLE (1 << 3) + +#define ICC_SGI1R_TARGET_LIST_SHIFT 0 +#define ICC_SGI1R_TARGET_LIST_MASK (0xffff << ICC_SGI1R_TARGET_LIST_SHIFT) +#define ICC_SGI1R_AFFINITY_1_SHIFT 16 +#define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT) +#define ICC_SGI1R_SGI_ID_SHIFT 24 +#define ICC_SGI1R_SGI_ID_MASK (0xfULL << ICC_SGI1R_SGI_ID_SHIFT) +#define ICC_SGI1R_AFFINITY_2_SHIFT 32 +#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT) +#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40 +#define ICC_SGI1R_RS_SHIFT 44 +#define ICC_SGI1R_RS_MASK (0xfULL << ICC_SGI1R_RS_SHIFT) +#define ICC_SGI1R_AFFINITY_3_SHIFT 48 +#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT) + +#include + +#ifndef __ASSEMBLY__ + +/* + * We need a value to serve as a irq-type for LPIs. Choose one that will + * hopefully pique the interest of the reviewer. + */ +#define GIC_IRQ_TYPE_LPI 0xa110c8ed + +struct rdists { + struct { + raw_spinlock_t rd_lock; + void __iomem *rd_base; + struct page *pend_page; + phys_addr_t phys_base; + u64 flags; + cpumask_t *vpe_table_mask; + void *vpe_l1_base; + } __percpu *rdist; + phys_addr_t prop_table_pa; + void *prop_table_va; + u64 flags; + u32 gicd_typer; + u32 gicd_typer2; + int cpuhp_memreserve_state; + bool has_vlpis; + bool has_rvpeid; + bool has_direct_lpi; + bool has_vpend_valid_dirty; +}; + +struct irq_domain; +struct fwnode_handle; +int __init its_lpi_memreserve_init(void); +int phytium_its_cpu_init(void); +int phytium_its_init(struct fwnode_handle *handle, struct rdists *rdists, + struct irq_domain *domain); +int mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent); + +static inline bool gic_enable_sre(void) +{ + u32 val; + + val = gic_read_sre(); + if (val & ICC_SRE_EL1_SRE) + return true; + + val |= ICC_SRE_EL1_SRE; + gic_write_sre(val); + val = gic_read_sre(); + + return !!(val & ICC_SRE_EL1_SRE); +} + +#endif + +#endif -- Gitee From aa395058b3e0918446552b739f92c84b8c88b057 Mon Sep 17 00:00:00 2001 From: liyuting Date: Thu, 21 Mar 2024 11:49:51 +0800 Subject: [PATCH 0447/2138] anolis: kdump: Add kdump support for Phytium S2500 ANBZ: #8558 phytium inclusion category: feature CVE: NA --------------------------------------------------------- Add kdump support for Phytium S2500 Signed-off-by: cuifulong Signed-off-by: liyuting Reviewed-by: Guanghui Feng Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/2872 --- arch/arm64/include/asm/cputype.h | 4 +++ arch/arm64/kernel/smp.c | 34 ++++++++++++++++++++++ drivers/irqchip/irq-gic-phytium-2500-its.c | 32 ++++++++++++++++++++ drivers/irqchip/irq-gic-phytium-2500.c | 18 ++++++++++++ 4 files changed, 88 insertions(+) diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 488f8e751349..c1613a9e329b 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -54,6 +54,7 @@ #define ARM_CPU_IMP_ARM 0x41 #define ARM_CPU_IMP_APM 0x50 #define ARM_CPU_IMP_CAVIUM 0x43 +#define ARM_CPU_IMP_PHYTIUM 0x70 #define ARM_CPU_IMP_BRCM 0x42 #define ARM_CPU_IMP_QCOM 0x51 #define ARM_CPU_IMP_NVIDIA 0x4E @@ -98,6 +99,7 @@ #define APM_CPU_PART_XGENE 0x000 #define APM_CPU_VAR_POTENZA 0x00 +#define PHYTIUM_CPU_PART_2500 0X663 #define CAVIUM_CPU_PART_THUNDERX 0x0A1 #define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2 @@ -153,6 +155,8 @@ #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) #define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73) #define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75) +#define MIDR_FT_2500 MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2500) + #define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35) #define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55) #define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76) diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 14365ef84244..0199ee17ef56 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -503,6 +504,34 @@ static bool bootcpu_valid __initdata; static unsigned int cpu_count = 1; #ifdef CONFIG_ACPI + +#ifdef CONFIG_ARCH_PHYTIUM +/* + * On phytium S2500 multi-socket server, for example 2-socket(2P), there are + * socekt0 and socket1 on the server: + * If storage device(like SAS controller and disks to save vmcore into) is + * installed on socket1 and second kernel brings up 2 CPUs both on socket0 with + * nr_cpus=2, then vmcore will fail to be saved into the disk as interrupts like + * SPI and LPI(except SGI) can't communicate across cpu sockets in this server + * platform. + * To avoid this issue, Bypass other non-cpu0 to ensure that each cpu0 on each + * socket can boot up and handle interrupt when booting the second kernel. + */ +static bool __init is_phytium_kdump_cpu_need_bypass(u64 hwid) +{ + if ((read_cpuid_id() & MIDR_CPU_MODEL_MASK) != MIDR_FT_2500) + return false; + + /* + * Bypass other non-cpu0 to ensure second kernel can bring up each cpu0 + * on each socket + */ + if (is_kdump_kernel() && (hwid & 0xffff) != (cpu_logical_map(0) & 0xffff)) + return true; + return false; +} +#endif + static struct acpi_madt_generic_interrupt cpu_madt_gicc[NR_CPUS]; struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu) @@ -552,6 +581,11 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) if (cpu_count >= NR_CPUS) return; +#ifdef CONFIG_ARCH_PHYTIUM + if (is_phytium_kdump_cpu_need_bypass(hwid)) + return; +#endif + /* map the logical cpu id to cpu MPIDR */ set_cpu_logical_map(cpu_count, hwid); diff --git a/drivers/irqchip/irq-gic-phytium-2500-its.c b/drivers/irqchip/irq-gic-phytium-2500-its.c index d1ecf059a39f..5685f5f901a1 100644 --- a/drivers/irqchip/irq-gic-phytium-2500-its.c +++ b/drivers/irqchip/irq-gic-phytium-2500-its.c @@ -1719,6 +1719,21 @@ static int its_cpumask_select(struct its_device *its_dev, cpu = cpumask_any_and(mask_val, cpu_mask); cpus = cpus + cpu % skt_cpu_cnt[skt_id]; + if (is_kdump_kernel()) { + skt = (cpu_logical_map(cpu) >> 16) & 0xff; + if (skt_id == skt) + return cpu; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SKT_COUNT)) { + if (skt_id == skt) + return i; + } else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + } + return cpus; } @@ -3056,6 +3071,9 @@ static bool enabled_lpis_allowed(void) phys_addr_t addr; u64 val; + if (is_kdump_kernel()) + return true; + /* Check whether the property table is in a reserved region */ val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); addr = val & GENMASK_ULL(51, 12); @@ -3704,6 +3722,20 @@ static int its_cpumask_first(struct its_device *its_dev, if ((cpu > cpus) && (cpu < (cpus + skt_cpu_cnt[skt_id]))) cpus = cpu; + if (is_kdump_kernel()) { + skt = (cpu_logical_map(cpu) >> 16) & 0xff; + if (skt_id == skt) + return cpu; + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SKT_COUNT)) { + if (skt_id == skt) + return i; + } else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + } + return cpus; } diff --git a/drivers/irqchip/irq-gic-phytium-2500.c b/drivers/irqchip/irq-gic-phytium-2500.c index f9f3b591be00..dbeeb795b581 100644 --- a/drivers/irqchip/irq-gic-phytium-2500.c +++ b/drivers/irqchip/irq-gic-phytium-2500.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -1566,6 +1567,20 @@ static int gic_cpumask_select(struct irq_data *d, const struct cpumask *mask_val cpu = cpumask_any_and(mask_val, cpu_online_mask); cpus = cpus + cpu % skt_cpu_cnt[irq_skt]; + if (is_kdump_kernel()) { + skt = (cpu_logical_map(cpu) >> 16) & 0xff; + if (irq_skt == skt) + return cpu; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SOC_COUNT)) { + if (irq_skt == skt) + return i; + } else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + } return cpus; } @@ -2833,6 +2848,9 @@ gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end) #ifdef CONFIG_ACPI mars3_sockets_bitmap = gic_mars3_sockets_bitmap(); + if (is_kdump_kernel()) + mars3_sockets_bitmap = 0x3; + if (mars3_sockets_bitmap == 0) { mars3_sockets_bitmap = 0x1; pr_err("No socket, please check cpus MPIDR_AFFINITY_LEVEL!!!"); -- Gitee From 3be2aa7f4128b7c14da2ef176f302bc14412fbd7 Mon Sep 17 00:00:00 2001 From: liyuting Date: Thu, 21 Mar 2024 13:56:37 +0800 Subject: [PATCH 0448/2138] anolis: iommu: Add iommu support for Phytium S2500 ANBZ: #8558 phytium inclusion category: feature CVE: NA --------------------------------------------------------- Add iommu support for Phytium S2500 Signed-off-by: cuifulong Signed-off-by: liyuting Reviewed-by: Guanghui Feng Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/2872 --- arch/arm64/include/asm/cputype.h | 9 +++++ .../arm64/include/asm/phytium_machine_types.h | 37 +++++++++++++++++++ drivers/iommu/arm/arm-smmu/arm-smmu.c | 25 ++++++++++++- drivers/irqchip/irq-gic-v3-its.c | 9 +++++ drivers/pci/quirks.c | 7 ++++ 5 files changed, 86 insertions(+), 1 deletion(-) create mode 100644 arch/arm64/include/asm/phytium_machine_types.h diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index c1613a9e329b..dbafb40d051a 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -55,6 +55,7 @@ #define ARM_CPU_IMP_APM 0x50 #define ARM_CPU_IMP_CAVIUM 0x43 #define ARM_CPU_IMP_PHYTIUM 0x70 + #define ARM_CPU_IMP_BRCM 0x42 #define ARM_CPU_IMP_QCOM 0x51 #define ARM_CPU_IMP_NVIDIA 0x4E @@ -99,6 +100,10 @@ #define APM_CPU_PART_XGENE 0x000 #define APM_CPU_VAR_POTENZA 0x00 +#define PHYTIUM_CPU_PART_1500A 0X660 +#define PHYTIUM_CPU_PART_2000AHK 0X661 +#define PHYTIUM_CPU_PART_2000PLUS 0X662 +#define PHYTIUM_CPU_PART_2004 0X663 #define PHYTIUM_CPU_PART_2500 0X663 #define CAVIUM_CPU_PART_THUNDERX 0x0A1 @@ -155,6 +160,10 @@ #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) #define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73) #define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75) +#define MIDR_FT_1500A MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_1500A) +#define MIDR_FT_2000AHK MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2000AHK) +#define MIDR_FT_2000PLUS MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2000PLUS) +#define MIDR_FT_2004 MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2004) #define MIDR_FT_2500 MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2500) #define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35) diff --git a/arch/arm64/include/asm/phytium_machine_types.h b/arch/arm64/include/asm/phytium_machine_types.h new file mode 100644 index 000000000000..8aed50daca4b --- /dev/null +++ b/arch/arm64/include/asm/phytium_machine_types.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Authors: Wang Yinfeng . + */ + +#ifndef _MACHINE_TYPE_H_ +#define _MACHINE_TYPE_H_ + +#include +#include + +static inline bool phytium_part(u32 cpuid) +{ + return ((read_cpuid_id() & MIDR_CPU_MODEL_MASK) == cpuid); +} + +#define typeof_ft1500a() phytium_part(MIDR_FT_1500A) +#define typeof_ft2000ahk() phytium_part(MIDR_FT_2000AHK) +#define typeof_ft2000plus() phytium_part(MIDR_FT_2000PLUS) +#define typeof_ft2004() phytium_part(MIDR_FT_2004) +#define typeof_s2500() phytium_part(MIDR_FT_2500) + +#endif diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index 42c5012ba8aa..818d4d2344db 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -35,6 +35,10 @@ #include #include +#ifdef CONFIG_ARCH_PHYTIUM +#include +#endif + #include #include "arm-smmu.h" @@ -51,6 +55,7 @@ #define MSI_IOVA_BASE 0x8000000 #define MSI_IOVA_LENGTH 0x100000 +#define SMR_MASK_SHIFT 16 static int force_stage; module_param(force_stage, int, S_IRUGO); @@ -1374,6 +1379,19 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev) return ERR_PTR(-ENODEV); } +#ifdef CONFIG_ARCH_PHYTIUM +#define FWID_READ(id) (((u16)(id) >> 3) | (((id) >> SMR_MASK_SHIFT | 0x7000) << SMR_MASK_SHIFT)) + if (typeof_ft2000plus()) { + int num = fwspec->num_ids; + + for (i = 0; i < num; i++) { + u32 fwid = FWID_READ(fwspec->ids[i]); + + iommu_fwspec_add_ids(dev, &fwid, 1); + } + } +#endif + ret = -EINVAL; for (i = 0; i < fwspec->num_ids; i++) { u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]); @@ -1469,7 +1487,12 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev) mutex_unlock(&smmu->stream_map_mutex); return ERR_PTR(-EINVAL); } - +#ifdef CONFIG_ARCH_PHYTIUM + if (typeof_s2500()) + break; + if (typeof_ft2000plus() && !smmu->s2crs[idx].group) + continue; +#endif group = smmu->s2crs[idx].group; } diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index b1e60c13c1e1..5cacfafb76dc 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -37,6 +37,10 @@ #include #include +#ifdef CONFIG_ARCH_PHYTIUM +#include +#endif + #include "irq-gic-common.h" #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) @@ -1736,6 +1740,11 @@ static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) msg->address_hi = upper_32_bits(addr); msg->data = its_get_event_id(d); +#ifdef CONFIG_ARCH_PHYTIUM + if (typeof_ft2000plus()) + return; +#endif + iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg); } diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 12ddf42b141b..00bdb5fd8218 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -5113,6 +5113,13 @@ static const struct pci_dev_acs_enabled { { PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs }, { PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs }, { PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs }, +#ifdef CONFIG_ARCH_PHYTIUM + /* because PLX switch Vendor id is 0x10b5 on phytium cpu */ + { 0x10b5, PCI_ANY_ID, pci_quirk_xgene_acs }, + /* because rootcomplex Vendor id is 0x17cd on phytium cpu */ + { 0x17cd, PCI_ANY_ID, pci_quirk_xgene_acs }, +#endif + /* Broadcom multi-function device */ { PCI_VENDOR_ID_BROADCOM, 0x16D7, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_BROADCOM, 0x1750, pci_quirk_mf_endpoint_acs }, -- Gitee From 110922c614a6662809a8189676e0cec5d166ba8a Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Tue, 12 Mar 2024 11:41:06 +0800 Subject: [PATCH 0449/2138] anolis: Add support for Zhaoxin GMI SM3 Secure Hash algorithm ANBZ: #7809 This SM3 algorithm driver is developed to support the SM3 instruction, making user develop their applications with both high performance and high security. Block-size 16 64 256 1024 2048 4096 8192 SM3-Generic 254.52 607.60 1055.30 1268.67 1314.55 1323.60 1379.98 SM3-GMI 505.99 1412.57 3191.53 4635.25 4993.05 5156.04 5250.53 Signed-off-by: leoliu-oc Reviewed-by: Tianjia Zhang Reviewed-by: Guanjun Link: https://gitee.com/anolis/cloud-kernel/pulls/2703 --- arch/x86/crypto/Kconfig | 14 ++ arch/x86/crypto/Makefile | 2 + arch/x86/crypto/sm3-zhaoxin-gmi.c | 198 +++++++++++++++++++++++++++++ arch/x86/include/asm/cpufeatures.h | 2 + 4 files changed, 216 insertions(+) create mode 100644 arch/x86/crypto/sm3-zhaoxin-gmi.c diff --git a/arch/x86/crypto/Kconfig b/arch/x86/crypto/Kconfig index 9bbfd01cfa2f..7b105b70c664 100644 --- a/arch/x86/crypto/Kconfig +++ b/arch/x86/crypto/Kconfig @@ -477,6 +477,20 @@ config CRYPTO_SM3_AVX_X86_64 If unsure, say N. +config CRYPTO_SM3_ZHAOXIN_GMI + tristate "Hash functions: SM3 (Zhaoxin GMI)" + depends on X86 && CRYPTO + default m + select CRYPTO_HASH + select CRYPTO_SM3 + help + SM3 cipher algorithms (Zhaoxin GMI Instruction). + + SM3 secure hash function as defined by OSCCA GM/T 0004-2012 SM3). + It is part of the Chinese Commercial Cryptography suite. + + If unsure, say N. + config CRYPTO_GHASH_CLMUL_NI_INTEL tristate "Hash functions: GHASH (CLMUL-NI)" depends on X86 && 64BIT diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 9aa46093c91b..63b373b5ebe5 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -109,6 +109,8 @@ aria-aesni-avx2-x86_64-y := aria-aesni-avx2-asm_64.o aria_aesni_avx2_glue.o obj-$(CONFIG_CRYPTO_ARIA_GFNI_AVX512_X86_64) += aria-gfni-avx512-x86_64.o aria-gfni-avx512-x86_64-y := aria-gfni-avx512-asm_64.o aria_gfni_avx512_glue.o +obj-$(CONFIG_CRYPTO_SM3_ZHAOXIN_GMI) += sm3-zhaoxin-gmi.o + quiet_cmd_perlasm = PERLASM $@ cmd_perlasm = $(PERL) $< > $@ $(obj)/%.S: $(src)/%.pl FORCE diff --git a/arch/x86/crypto/sm3-zhaoxin-gmi.c b/arch/x86/crypto/sm3-zhaoxin-gmi.c new file mode 100644 index 000000000000..e393133d572d --- /dev/null +++ b/arch/x86/crypto/sm3-zhaoxin-gmi.c @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sm3_zhaoxin_gmi.c - wrapper code for Zhaoxin GMI. + * + * Copyright (C) 2023 Shanghai Zhaoxin Semiconductor LTD. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +const u8 zx_sm3_zero_message_hash[SM3_DIGEST_SIZE] = { + 0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F, + 0x8e, 0x61, 0x19, 0x48, 0x31, 0xE8, 0x1A, 0x8F, + 0x22, 0xBE, 0xC8, 0xC7, 0x28, 0xFE, 0xFB, 0x74, + 0x7E, 0xD0, 0x35, 0xEB, 0x50, 0x82, 0xAA, 0x2B +}; +EXPORT_SYMBOL_GPL(zx_sm3_zero_message_hash); + +/* + * Load supported features of the CPU to see if the SM3/SM4 is available. + */ +static int gmi_available(void) +{ + struct cpuinfo_x86 *c = &cpu_data(0); + u32 eax, edx; + + if (((c->x86 == 6) && (c->x86_model >= 0x0f)) || + ((c->x86 == 6) && (c->x86_model == 0x09)) || + (c->x86 > 6)) { + if (!boot_cpu_has(X86_FEATURE_CCS) || !boot_cpu_has(X86_FEATURE_CCS_EN)) { + + eax = 0xC0000001; + __asm__ __volatile__ ("cpuid":"=d"(edx):"a"(eax) : ); + + if ((edx & 0x0030) != 0x0030) + return -ENODEV; + + pr_notice("GMI SM3 detected by CPUID\n"); + return 0; + } + pr_notice("GMI SM3 is available\n"); + return 0; + } + return -ENODEV; +} + +void sm3_generic_block_fn(struct sm3_state *sst, const u8 *inp, int blockcnt) +{ + unsigned long in, out, cnt; + + if (!blockcnt) + return; + + in = (unsigned long)inp; + out = (unsigned long)(sst->state); + cnt = (unsigned long)blockcnt; + + __asm__ __volatile__( + #ifdef __x86_64__ + "pushq %%rbp\n" + "pushq %%rbx\n" + "pushq %%rsi\n" + "pushq %%rdi\n" + "movq $-1, %%rax\n" + "movq $0x20, %%rbx\n" + #else + "pushl %%ebp\n" + "pushl %%ebx\n" + "pushl %%esi\n" + "pushl %%edi\n" + "movl $-1, %%eax\n" + "movl $0x20, %%ebx\n" + #endif + ".byte 0xf3,0x0f,0xa6,0xe8\n" + #ifdef __x86_64__ + "popq %%rdi\n" + "popq %%rsi\n" + "popq %%rbx\n" + "popq %%rbp\n" + #else + "popl %%edi\n" + "popl %%esi\n" + "popl %%ebx\n" + "popl %%ebp\n" + #endif + : + : "S"(in), "D"(out), "c"(cnt) + : + ); +} + +static inline int zx_sm3_init(struct shash_desc *desc) +{ + struct sm3_state *sctx; + + if (!desc) + return -EINVAL; + + sctx = shash_desc_ctx(desc); + + sctx->state[0] = 0x6f168073UL; + sctx->state[1] = 0xb9b21449UL; + sctx->state[2] = 0xd7422417UL; + sctx->state[3] = 0x00068adaUL; + sctx->state[4] = 0xbc306fa9UL; + sctx->state[5] = 0xaa383116UL; + sctx->state[6] = 0x4dee8de3UL; + sctx->state[7] = 0x4e0efbb0UL; + + sctx->count = 0; + + return 0; +} + +static inline int zx_sm3_base_finish(struct shash_desc *desc, u8 *out) +{ + struct sm3_state *sctx = shash_desc_ctx(desc); + __be32 *digest = (__be32 *)out; + + memcpy(digest, sctx->state, SM3_DIGEST_SIZE); + + *sctx = (struct sm3_state){}; + return 0; +} + +int zx_sm3_update(struct shash_desc *desc, const u8 *data, unsigned int len) +{ + return sm3_base_do_update(desc, data, len, sm3_generic_block_fn); +} +EXPORT_SYMBOL(zx_sm3_update); + +static int zx_sm3_final(struct shash_desc *desc, u8 *out) +{ + sm3_base_do_finalize(desc, sm3_generic_block_fn); + + return zx_sm3_base_finish(desc, out); +} + +int zx_sm3_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *hash) +{ + sm3_base_do_update(desc, data, len, sm3_generic_block_fn); + + return zx_sm3_final(desc, hash); +} +EXPORT_SYMBOL(zx_sm3_finup); + +static struct shash_alg zx_sm3_alg = { + .digestsize = SM3_DIGEST_SIZE, + .init = zx_sm3_init, + .update = zx_sm3_update, + .final = zx_sm3_final, + .finup = zx_sm3_finup, + .descsize = sizeof(struct sm3_state), + .base = { + .cra_name = "sm3", + .cra_driver_name = "sm3-zhaoxin-gmi", + .cra_priority = 300, + .cra_blocksize = SM3_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static int __init zx_sm3_generic_mod_init(void) +{ + if (gmi_available() == 0) + return crypto_register_shash(&zx_sm3_alg); + + pr_warn("GMI is unavailable on this platform."); + return -ENODEV; +} + +static void __exit zx_sm3_generic_mod_fini(void) +{ + crypto_unregister_shash(&zx_sm3_alg); +} + +module_init(zx_sm3_generic_mod_init); +module_exit(zx_sm3_generic_mod_fini); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("SM3 Secure Hash Algorithm"); + +MODULE_ALIAS_CRYPTO("sm3-zhaoxin"); +MODULE_ALIAS_CRYPTO("sm3-zhaoxin-gmi"); diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index bebaa0a2af01..b8b6d0fb0480 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -148,6 +148,8 @@ /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ #define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ #define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ +#define X86_FEATURE_CCS (5*32 + 4) /* "sm3/4" SM3/4 present */ +#define X86_FEATURE_CCS_EN (5*32 + 5) /* "sm3/4" SM3/4 enabled */ #define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ #define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */ #define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */ -- Gitee From e4afec7ea1ab31075b503a9a103acdab8bcafcd7 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Tue, 12 Mar 2024 11:49:56 +0800 Subject: [PATCH 0450/2138] anolis: Add support for Zhaoxin GMI SM4 Block Cipher algorithm ANBZ: #7809 This SM4 algorithm driver is developed to support the SM4 instruction, making user develop their applications with both high performance and high security. BlockSize 16 64 128 256 1024 1424 4096 SM4-Generic ECB Encryption 392.36 464.93 476.03 481.03 485.26 485.28 485.67 Decryption 411.20 446.40 475.43 481.07 484.80 485.27 486.17 CBC Encryption 349.39 424.60 439.53 446.25 451.78 452.87 453.20 Decryption 339.45 423.40 439.27 450.06 457.53 458.02 459.02 CFB Encryption 337.84 428.52 441.12 451.53 456.78 456.14 458.40 Decryption 367.30 403.70 414.36 418.61 421.63 421.37 422.56 CTR Encryption 366.54 448.51 439.18 465.56 472.31 471.36 474.39 Decryption 384.44 448.35 458.30 466.45 472.40 471.36 474.31 SM4-GMI ECB Encryption 811.68 2694.82 4086.80 5870.03 7975.80 8168.89 8510.23 Decryption 849.63 2823.88 4284.88 5833.04 7973.33 8168.45 8506.51 CBC Encryption 701.68 1943.75 2706.77 3365.73 4117.24 4201.31 4299.83 Decryption 709.10 1860.98 2621.00 3296.41 4102.92 4177.61 4298.36 CFB Encryption 711.95 1915.53 2712.57 3371.73 4065.69 4007.26 4310.40 Decryption 713.16 1952.34 2585.72 3363.57 4128.37 4204.84 4305.43 CTR Encryption 633.33 2165.92 3221.97 4250.08 5714.66 5556.47 5956.50 Decryption 662.45 2151.06 3221.11 4248.79 5677.76 5497.75 5968.06 Signed-off-by: leoliu-oc Reviewed-by: Tianjia Zhang Reviewed-by: Guanjun Link: https://gitee.com/anolis/cloud-kernel/pulls/2703 --- arch/x86/crypto/Kconfig | 20 + arch/x86/crypto/Makefile | 1 + arch/x86/crypto/sm4-zhaoxin-gmi.c | 858 ++++++++++++++++++++++++++++++ 3 files changed, 879 insertions(+) create mode 100644 arch/x86/crypto/sm4-zhaoxin-gmi.c diff --git a/arch/x86/crypto/Kconfig b/arch/x86/crypto/Kconfig index 7b105b70c664..cbe8eef473ec 100644 --- a/arch/x86/crypto/Kconfig +++ b/arch/x86/crypto/Kconfig @@ -231,6 +231,26 @@ config CRYPTO_SM4_AESNI_AVX2_X86_64 If unsure, say N. +config CRYPTO_SM4_ZHAOXIN_GMI + tristate "Ciphers: SM4 with modes: ECB, CBC, CTR, CFB, OFB (Zhaoxin GMI)" + depends on X86 && CRYPTO + default m + select CRYPTO_SKCIPHER + select CRYPTO_SIMD + select CRYPTO_ALGAPI + select CRYPTO_SM4 + help + SM4 cipher algorithms (Zhaoxin GMI Instruction). + + SM4 (GBT.32907-2016) is a cryptographic standard issued by the + Organization of State Commercial Administration of China (OSCCA) + as an authorized cryptographic algorithms for the use within China. + + This is SM4 optimized implementation using Zhaoxin GMI + instruction set for block cipher. + + If unsure, say N. + config CRYPTO_TWOFISH_586 tristate "Ciphers: Twofish (32-bit)" depends on (X86 || UML_X86) && !64BIT diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 63b373b5ebe5..4230829a6648 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -110,6 +110,7 @@ obj-$(CONFIG_CRYPTO_ARIA_GFNI_AVX512_X86_64) += aria-gfni-avx512-x86_64.o aria-gfni-avx512-x86_64-y := aria-gfni-avx512-asm_64.o aria_gfni_avx512_glue.o obj-$(CONFIG_CRYPTO_SM3_ZHAOXIN_GMI) += sm3-zhaoxin-gmi.o +obj-$(CONFIG_CRYPTO_SM4_ZHAOXIN_GMI) += sm4-zhaoxin-gmi.o quiet_cmd_perlasm = PERLASM $@ cmd_perlasm = $(PERL) $< > $@ diff --git a/arch/x86/crypto/sm4-zhaoxin-gmi.c b/arch/x86/crypto/sm4-zhaoxin-gmi.c new file mode 100644 index 000000000000..ec57b4ca4644 --- /dev/null +++ b/arch/x86/crypto/sm4-zhaoxin-gmi.c @@ -0,0 +1,858 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * zhaoxin-gmi-sm4.c - wrapper code for Zhaoxin GMI. + * + * Copyright (C) 2023 Shanghai Zhaoxin Semiconductor LTD. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define SM4_ECB (1<<6) +#define SM4_CBC (1<<7) +#define SM4_CFB (1<<8) +#define SM4_OFB (1<<9) +#define SM4_CTR (1<<10) + +#define ZX_GMI_ALIGNMENT 16 + +#define GETU16(p) ((u16)(p)[0]<<8 | (u16)(p)[1]) + +/* Control word. */ +struct sm4_cipher_data { + u8 iv[SM4_BLOCK_SIZE]; /* Initialization vector */ + union { + u32 pad; + struct { + u32 encdec:1; + u32 func:5; + u32 mode:5; + u32 digest:1; + } b; + } cword; /* Control word */ + struct sm4_ctx keys; /* Encryption key */ +}; + +static u8 *rep_xcrypt(const u8 *input, u8 *output, void *key, u8 *iv, + struct sm4_cipher_data *sm4_data, u64 count) +{ + unsigned long rax = sm4_data->cword.pad; + + // Set the flag for encryption or decryption + if (sm4_data->cword.b.encdec == 1) + rax &= ~0x01; + else + rax |= 0x01; + + __asm__ __volatile__( + #ifdef __x86_64__ + "pushq %%rbp\n\n" + "pushq %%rbx\n\n" + "pushq %%rcx\n\n" + "pushq %%rsi\n\n" + "pushq %%rdi\n\n" + #else + "pushl %%ebp\n\n" + "pushl %%ebx\n\n" + "pushl %%ecx\n\n" + "pushl %%esi\n\n" + "pushl %%edi\n\n" + #endif + ".byte 0xf3,0x0f,0xa7,0xf0\n" + #ifdef __x86_64__ + "popq %%rdi\n\n" + "popq %%rsi\n\n" + "popq %%rcx\n\n" + "popq %%rbx\n\n" + "popq %%rbp\n\n" + #else + "popl %%edi\n\n" + "popl %%esi\n\n" + "popl %%ecx\n\n" + "popl %%ebx\n\n" + "popl %%ebp\n\n" + #endif + : + : "S"(input), "D"(output), "a"(rax), "b"(key), "c"((unsigned long)count), "d"(iv)); + return iv; +} + +static u8 *rep_xcrypt_ctr(const u8 *input, u8 *output, void *key, u8 *iv, + struct sm4_cipher_data *sm4_data, u64 count) +{ + u8 oiv[SM4_BLOCK_SIZE] = {0}; + u16 cnt_tmp; + u32 i; + u8 *in_tmp = (u8 *)input, *out_tmp = output; + + //Backup the original IV if it is not NULL. + if (iv) + memcpy(oiv, iv, SM4_BLOCK_SIZE); + + // Get the current counter. + cnt_tmp = GETU16(&iv[14]); + + // Get the available counter space before overflow. + cnt_tmp = 0x10000 - cnt_tmp; + + // + // Check there is enough counter space for the required blocks. + // + if (cnt_tmp < count) { + + // Process the first part of data blocks. + rep_xcrypt(in_tmp, out_tmp, key, iv, sm4_data, cnt_tmp); + // Only increase the counter by SW when overflow occurs. + memcpy(iv, oiv, SM4_BLOCK_SIZE); + + for (i = 0; i < cnt_tmp; i++) + crypto_inc(iv, SM4_BLOCK_SIZE); + + out_tmp = output + cnt_tmp * SM4_BLOCK_SIZE; + in_tmp = (u8 *)(input + cnt_tmp * SM4_BLOCK_SIZE); + + // Get the number of data blocks that have not been encrypted. + cnt_tmp = count - cnt_tmp; + // Process the remaining part of data blocks. + rep_xcrypt(in_tmp, out_tmp, key, iv, sm4_data, cnt_tmp); + } else { + // Counter space is big enough, the counter will not overflow. + rep_xcrypt(in_tmp, out_tmp, key, iv, sm4_data, count); + } + + // Restore the iv if not null + if (iv) + memcpy(iv, oiv, SM4_BLOCK_SIZE); + + return iv; +} + +static u8 *rep_xcrypt_ecb_ONE(const u8 *input, u8 *output, void *key, + u8 *iv, struct sm4_cipher_data *sm4_data, u64 count) +{ + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_ECB; + + return rep_xcrypt(input, output, key, iv, &cw, 1); +} + +/** + * gmi_sm4_set_key - Set the sm4 key. + * @tfm: The %crypto_skcipher that is used in the context. + * @in_key: The input key. + * @key_len:The size of the key. + */ +int gmi_sm4_set_key(struct crypto_skcipher *tfm, const u8 *in_key, + unsigned int key_len) +{ + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + + if (key_len != SM4_KEY_SIZE) { + pr_warn("The key_len must be 16 bytes. please check\n"); + return -EINVAL; + } + + memcpy(ctx->rkey_enc, in_key, key_len); + memcpy(ctx->rkey_dec, in_key, key_len); + + return 0; +} +EXPORT_SYMBOL_GPL(gmi_sm4_set_key); + + +static int sm4_cipher_common(struct skcipher_request *req, struct sm4_cipher_data *cw) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int blocks; + int err; + u8 *iv; + + err = skcipher_walk_virt(&walk, req, true); + + while ((blocks = (walk.nbytes / SM4_BLOCK_SIZE))) { + iv = rep_xcrypt(walk.src.virt.addr, walk.dst.virt.addr, ctx->rkey_enc, + walk.iv, cw, blocks); + + err = skcipher_walk_done(&walk, walk.nbytes % SM4_BLOCK_SIZE); + } + + return err; +} + + +static int ecb_encrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_ECB; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +static int ecb_decrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.pad |= 0x20|SM4_ECB; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +static int cbc_encrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_CBC; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +static int cbc_decrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.pad |= 0x20|SM4_CBC; + + err = sm4_cipher_common(req, &cw); + + return err; +} + + +/* + * sm4_cipher_ctr is used for ZX-E and newer + */ +static int sm4_cipher_ctr(struct skcipher_request *req, struct sm4_cipher_data *cw) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int blocks, nbytes; + int err; + u8 *iv, *dst, *src; + u8 keystream[SM4_BLOCK_SIZE]; + u32 i; + + err = skcipher_walk_virt(&walk, req, true); + + while ((nbytes = walk.nbytes) > 0) { + + src = walk.src.virt.addr; + dst = walk.dst.virt.addr; + + while (nbytes >= SM4_BLOCK_SIZE) { + blocks = nbytes/SM4_BLOCK_SIZE; + iv = rep_xcrypt_ctr(walk.src.virt.addr, walk.dst.virt.addr, ctx->rkey_enc, + walk.iv, cw, blocks); + + for (i = 0; i < blocks; i++) + crypto_inc(walk.iv, SM4_BLOCK_SIZE); + + dst += blocks * SM4_BLOCK_SIZE; + src += blocks * SM4_BLOCK_SIZE; + nbytes -= blocks * SM4_BLOCK_SIZE; + } + + if (walk.nbytes == walk.total && nbytes > 0) { + rep_xcrypt_ecb_ONE(walk.iv, keystream, ctx->rkey_enc, walk.iv, cw, 1); + crypto_xor_cpy(dst, keystream, src, nbytes); + dst += nbytes; + src += nbytes; + nbytes = 0; + } + + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +/* + * ctr_encrypt is used for ZX-E and newer + */ +static int ctr_encrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_CTR; + + err = sm4_cipher_ctr(req, &cw); + + return err; +} + +/* + * ctr_decrypt is used for ZX-E and newer + */ +static int ctr_decrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.pad |= 0x20|SM4_CTR; + + err = sm4_cipher_ctr(req, &cw); + + return err; +} + +/* + * sm4_ctr_zxc is used for ZXC+ + */ +static int sm4_ctr_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + u8 *iv = NULL, *dst, *src; + u8 en_iv[SM4_BLOCK_SIZE] = {0}; + + err = skcipher_walk_virt(&walk, req, true); + + while ((nbytes = walk.nbytes) > 0) { + + src = walk.src.virt.addr; + dst = walk.dst.virt.addr; + + while (nbytes >= SM4_BLOCK_SIZE) { + + iv = rep_xcrypt_ecb_ONE(walk.iv, en_iv, ctx->rkey_enc, walk.iv, cw, 1); + crypto_inc(walk.iv, SM4_BLOCK_SIZE); + + crypto_xor_cpy(dst, en_iv, src, SM4_BLOCK_SIZE); + + dst += SM4_BLOCK_SIZE; + src += SM4_BLOCK_SIZE; + nbytes -= SM4_BLOCK_SIZE; + } + + // tail + if (walk.nbytes == walk.total && nbytes > 0) { + + rep_xcrypt_ecb_ONE(walk.iv, en_iv, ctx->rkey_enc, walk.iv, cw, 1); + crypto_xor_cpy(dst, en_iv, src, nbytes); + + dst += nbytes; + src += nbytes; + nbytes = 0; + } + + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +/* + * ctr_encrypt_zxc is used for ZX-C+ + */ +static int ctr_encrypt_zxc(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_CTR; + + err = sm4_ctr_zxc(req, &cw); + + return err; +} + +/* + * ctr_decrypt_zxc is used for ZX-C+ + */ +static int ctr_decrypt_zxc(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 0; + cw.cword.pad |= 0x20|SM4_CTR; + + err = sm4_ctr_zxc(req, &cw); + + return err; +} + +/* + * ofb_encrypt is used for ZX-E and newer + */ +static int ofb_encrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_OFB; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +/* + * ofb_decrypt is used for ZX-E and newer + */ +static int ofb_decrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.pad |= 0x20|SM4_OFB; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +/* + * sm4_ofb_zxc is used for ZX-C+ + */ +static int sm4_ofb_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int blocks; + int err; + + u32 n; + + err = skcipher_walk_virt(&walk, req, true); + + while ((blocks = (walk.nbytes / SM4_BLOCK_SIZE))) { + while (blocks--) { + + rep_xcrypt_ecb_ONE(walk.iv, walk.iv, ctx->rkey_enc, NULL, cw, 1); + + for (n = 0; n < SM4_BLOCK_SIZE; n += sizeof(size_t)) + *(size_t *)(walk.dst.virt.addr + n) = + *(size_t *)(walk.iv + n) ^ + *(size_t *)(walk.src.virt.addr + n); + + walk.src.virt.addr += SM4_BLOCK_SIZE; + walk.dst.virt.addr += SM4_BLOCK_SIZE; + + } + + err = skcipher_walk_done(&walk, walk.nbytes % SM4_BLOCK_SIZE); + } + + return err; +} + +/* + * ofb_encrypt_zxc is used for ZX-C+ + */ +static int ofb_encrypt_zxc(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_OFB; + + err = sm4_ofb_zxc(req, &cw); + + return err; +} + +/* + * ofb_decrypt_zxc is used for ZX-C+ + */ +static int ofb_decrypt_zxc(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 0; + cw.cword.pad |= 0x20|SM4_OFB; + + err = sm4_ofb_zxc(req, &cw); + + return err; +} + + +/* + * cfb_encrypt is used for ZX-E and newer. + */ +static int cfb_encrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_CFB; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +/* + * cfb_decrypt is used for ZX-E and newer. + */ + +static int cfb_decrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.pad |= 0x20|SM4_CFB; + + err = sm4_cipher_common(req, &cw); + + return err; + +} + +/* + * sm4_cfb_zxc is used for ZX-C+ + */ +static int sm4_cfb_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int blocks; + int err; + u32 n; + size_t t; + + err = skcipher_walk_virt(&walk, req, true); + + while ((blocks = (walk.nbytes / SM4_BLOCK_SIZE))) { + while (blocks--) { + rep_xcrypt_ecb_ONE(walk.iv, walk.iv, ctx->rkey_enc, NULL, cw, 1); + + if (cw->cword.b.encdec) + for (n = 0; n < SM4_BLOCK_SIZE; n += sizeof(size_t)) + *(size_t *)(walk.dst.virt.addr + n) = + *(size_t *)(walk.iv + n) ^= + *(size_t *)(walk.src.virt.addr + n); + + else + for (n = 0; n < SM4_BLOCK_SIZE; n += sizeof(size_t)) { + t = *(size_t *)(walk.src.virt.addr + n); + *(size_t *)(walk.dst.virt.addr + n) = + *(size_t *)(walk.iv + n) ^ t; + *(size_t *)(walk.iv + n) = t; + } + + walk.src.virt.addr += SM4_BLOCK_SIZE; + walk.dst.virt.addr += SM4_BLOCK_SIZE; + } + + err = skcipher_walk_done(&walk, walk.nbytes % SM4_BLOCK_SIZE); + } + + return err; +} + +/* + * cfb_encrypt_zxc is used for ZX-C+ + */ +static int cfb_encrypt_zxc(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_CFB; + + err = sm4_cfb_zxc(req, &cw); + + return err; +} + +/* + * cfb_decrypt_zxc is used for ZX-C+ + */ +static int cfb_decrypt_zxc(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 0; + cw.cword.pad |= 0x20|SM4_CFB; + + err = sm4_cfb_zxc(req, &cw); + + return err; +} + + +static struct skcipher_alg sm4_algs[] = { + { + .base = { + .cra_name = "__ecb(sm4)", + .cra_driver_name = "__ecb-sm4-gmi", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .walksize = 8 * SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = ecb_encrypt, + .decrypt = ecb_decrypt, + }, + + { + .base = { + .cra_name = "__cbc(sm4)", + .cra_driver_name = "__cbc-sm4-gmi", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .walksize = 8 * SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = cbc_encrypt, + .decrypt = cbc_decrypt, + }, + + { + .base = { + .cra_name = "__ctr(sm4)", + .cra_driver_name = "__ctr-sm4-gmi", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = 1, //SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .chunksize = SM4_BLOCK_SIZE, + .walksize = 8 * SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = ctr_encrypt, + .decrypt = ctr_decrypt, + }, + + { + .base = { + .cra_name = "__ofb(sm4)", + .cra_driver_name = "__ofb-sm4-gmi", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .chunksize = SM4_BLOCK_SIZE, + .walksize = 8 * SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = ofb_encrypt, + .decrypt = ofb_decrypt, + }, + + { + .base = { + .cra_name = "__cfb(sm4)", + .cra_driver_name = "__cfb-sm4-gmi", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .chunksize = SM4_BLOCK_SIZE, + .walksize = 8 * SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = cfb_encrypt, + .decrypt = cfb_decrypt, + } +}; + +static struct simd_skcipher_alg *sm4_simd_algs[ARRAY_SIZE(sm4_algs)]; + +static int gmi_zxc_check(void) +{ + int f_zxc = 0; + + struct cpuinfo_x86 *c = &cpu_data(0); + + if ((c->x86 > 6)) { + f_zxc = 0; + } else if (((c->x86 == 6) && (c->x86_model >= 0x0f)) + || ((c->x86 == 6) && (c->x86_model == 0x09)) + ) { + f_zxc = 1; + } + + return f_zxc; +} + +/* + * Load supported features of the CPU to see if the SM4 is available. + */ +static int gmi_ccs_available(void) +{ + struct cpuinfo_x86 *c = &cpu_data(0); + u32 eax, edx; + + if (((c->x86 == 6) && (c->x86_model >= 0x0f)) + || ((c->x86 == 6) && (c->x86_model == 0x09)) + || (c->x86 > 6)) { + if (!boot_cpu_has(X86_FEATURE_CCS) || !boot_cpu_has(X86_FEATURE_CCS_EN)) { + + eax = 0xC0000001; + __asm__ __volatile__ ("cpuid":"=d"(edx):"a"(eax) : ); + + if ((edx & 0x0030) != 0x0030) + return -ENODEV; + + pr_notice("GMI SM4 is detected by CPUID\n"); + return 0; + } + pr_notice("GMI SM4 is available\n"); + return 0; + + } + return -ENODEV; +} + + +static void gmi_sm4_exit(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(sm4_simd_algs) && sm4_simd_algs[i]; i++) + simd_skcipher_free(sm4_simd_algs[i]); + + crypto_unregister_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs)); +} +static int __init gmi_sm4_init(void) +{ + struct simd_skcipher_alg *simd; + const char *basename; + const char *algname; + const char *drvname; + int err; + int i; + + if (gmi_ccs_available() != 0) + return -ENODEV; + + if (gmi_zxc_check()) { + + for (i = 0; i < ARRAY_SIZE(sm4_algs); i++) { + if (!strcmp(sm4_algs[i].base.cra_name, "__ctr(sm4)")) { + + sm4_algs[i].encrypt = ctr_encrypt_zxc; + sm4_algs[i].decrypt = ctr_decrypt_zxc; + } else if (!strcmp(sm4_algs[i].base.cra_name, "__cfb(sm4)")) { + + sm4_algs[i].encrypt = cfb_encrypt_zxc; + sm4_algs[i].decrypt = cfb_decrypt_zxc; + + } else if (!strcmp(sm4_algs[i].base.cra_name, "__ofb(sm4)")) { + + sm4_algs[i].encrypt = ofb_encrypt_zxc; + sm4_algs[i].decrypt = ofb_decrypt_zxc; + } + } + } + + err = crypto_register_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs)); + if (err) + return err; + + for (i = 0; i < ARRAY_SIZE(sm4_algs); i++) { + algname = sm4_algs[i].base.cra_name + 2; + drvname = sm4_algs[i].base.cra_driver_name + 2; + basename = sm4_algs[i].base.cra_driver_name; + simd = simd_skcipher_create_compat(algname, drvname, basename); + err = PTR_ERR(simd); + if (IS_ERR(simd)) + goto unregister_simds; + + sm4_simd_algs[i] = simd; + } + + return 0; + +unregister_simds: + gmi_sm4_exit(); + return err; +} + +late_initcall(gmi_sm4_init); +module_exit(gmi_sm4_exit); + +MODULE_DESCRIPTION("SM4-ECB/CBC/CTR/CFB/OFB using Zhaoxin GMI"); +MODULE_AUTHOR("GRX"); +MODULE_LICENSE("GPL"); -- Gitee From 18b5debb5ea9b9e597a2f565416cd889df9005b4 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Tue, 26 Mar 2024 15:17:11 +0800 Subject: [PATCH 0451/2138] anolis: configs: Add Zhaoxin SM3 and SM4 algorithm configs ANBZ: #7809 Add Zhaoxin SM3 and SM4 algorithm dirvers: CONFIG_CRYPTO_SM3_ZHAOXIN_GMI=m CONFIG_CRYPTO_SM4_ZHAOXIN_GMI=m Signed-off-by: leoliu-oc Reviewed-by: Tianjia Zhang Reviewed-by: Guanjun Link: https://gitee.com/anolis/cloud-kernel/pulls/2703 --- arch/x86/configs/anolis_defconfig | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 145ac54de178..31d935f9b299 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -7376,6 +7376,7 @@ CONFIG_CRYPTO_SERPENT_AVX_X86_64=m CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64=m CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64=m +CONFIG_CRYPTO_SM4_ZHAOXIN_GMI=m CONFIG_CRYPTO_TWOFISH_X86_64=m CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m @@ -7393,6 +7394,7 @@ CONFIG_CRYPTO_SHA1_SSSE3=y CONFIG_CRYPTO_SHA256_SSSE3=y CONFIG_CRYPTO_SHA512_SSSE3=y CONFIG_CRYPTO_SM3_AVX_X86_64=m +CONFIG_CRYPTO_SM3_ZHAOXIN_GMI=m CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m CONFIG_CRYPTO_CRC32C_INTEL=m CONFIG_CRYPTO_CRC32_PCLMUL=m -- Gitee From a18ad209d56021831eea3808890aa57b551bd7ed Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Tue, 12 Mar 2024 15:49:52 +0800 Subject: [PATCH 0452/2138] anolis: Add support for Zhaoxin AES algorithm ANBZ: #7809 Some Zhaoxin processors come with an integrated crypto engine (so called Zhaoxin ACE, Advanced Cryptography Engine) that provides instructions for very fast cryptographic operations with supported AES algorithms. Signed-off-by: leoliu-oc Reviewed-by: Tianjia Zhang Reviewed-by: Guanjun Link: https://gitee.com/anolis/cloud-kernel/pulls/2691 --- drivers/crypto/Kconfig | 25 ++ drivers/crypto/Makefile | 1 + drivers/crypto/padlock-aes.c | 2 +- drivers/crypto/zhaoxin-aes.c | 523 +++++++++++++++++++++++++++++++++++ 4 files changed, 550 insertions(+), 1 deletion(-) create mode 100644 drivers/crypto/zhaoxin-aes.c diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index b03f7ed92793..960a4f0ebf77 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -52,6 +52,31 @@ config CRYPTO_DEV_PADLOCK_SHA If unsure say M. The compiled module will be called padlock-sha. +config CRYPTO_DEV_ZHAOXIN + tristate "Support for Zhaoxin ACE" + depends on X86 && !UML + help + Some Zhaoxin processors come with an integrated crypto engine + (so called Zhaoxin ACE, Advanced Cryptography Engine) + that provides instructions for very fast cryptographic + operations with supported algorithms. + + The instructions are used only when the CPU supports them. + Otherwise software encryption is used. + +config CRYPTO_DEV_ZHAOXIN_AES + tristate "Zhaoxin ACE driver for AES algorithm" + depends on CRYPTO_DEV_ZHAOXIN + select CRYPTO_BLKCIPHER + select CRYPTO_AES + help + Use Zhaoxin ACE for AES algorithm. + + Available in Zhaoxin CPUs. + + If unsure say M. The compiled module will be + called zhaoxin-aes. + config CRYPTO_DEV_GEODE tristate "Support for the Geode LX AES engine" depends on X86_32 && PCI diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 94c8b187f739..dda3c310f065 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -31,6 +31,7 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_DES) += omap-des.o obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o +obj-$(CONFIG_CRYPTO_DEV_ZHAOXIN_AES) += zhaoxin-aes.o obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/ obj-$(CONFIG_CRYPTO_DEV_QCOM_RNG) += qcom-rng.o diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 1be549a07a21..f0c3127941ae 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -475,7 +475,7 @@ static struct skcipher_alg cbc_aes_alg = { }; static const struct x86_cpu_id padlock_cpu_id[] = { - X86_MATCH_FEATURE(X86_FEATURE_XCRYPT, NULL), + { X86_VENDOR_CENTAUR, 6, X86_MODEL_ANY, X86_FEATURE_XCRYPT }, {} }; MODULE_DEVICE_TABLE(x86cpu, padlock_cpu_id); diff --git a/drivers/crypto/zhaoxin-aes.c b/drivers/crypto/zhaoxin-aes.c new file mode 100644 index 000000000000..e1d029fa9d1a --- /dev/null +++ b/drivers/crypto/zhaoxin-aes.c @@ -0,0 +1,523 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Support for ACE hardware crypto engine. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_VERSION "1.0.0" + +/* + * Number of data blocks actually fetched for each xcrypt insn. + * Processors with prefetch errata will fetch extra blocks. + */ +static unsigned int ecb_fetch_blocks = 2; +#define MAX_ECB_FETCH_BLOCKS (8) +#define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE) + +static unsigned int cbc_fetch_blocks = 1; +#define MAX_CBC_FETCH_BLOCKS (4) +#define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE) + +/* Control word. */ +struct cword { + unsigned int __packed + rounds:4, + algo:3, + keygen:1, + interm:1, + encdec:1, + ksize:2; +} __aligned(PADLOCK_ALIGNMENT); + +/* + * Whenever making any changes to the following structure *make sure* you keep E, d_data and cword + * aligned on 16 Bytes boundaries and the Hardware can access 16 * 16 bytes of E and d_data (only + * the first 15 * 16 bytes matter but the HW reads more). + */ +struct aes_ctx { + u32 E[AES_MAX_KEYLENGTH_U32] __aligned(PADLOCK_ALIGNMENT); + u32 d_data[AES_MAX_KEYLENGTH_U32] __aligned(PADLOCK_ALIGNMENT); + struct { + struct cword encrypt; + struct cword decrypt; + } cword; + u32 *D; +}; + +static DEFINE_PER_CPU(struct cword *, zx_paes_last_cword); + +/* Tells whether the ACE is capable to generate the extended key for a given key_len. */ +static inline int aes_hw_extkey_available(uint8_t key_len) +{ + /* + * TODO: We should check the actual CPU model/stepping as it's possible that the + * capability will be added in the next CPU revisions. + */ + if (key_len == 16) + return 1; + return 0; +} + +static inline struct aes_ctx *aes_ctx_common(void *ctx) +{ + unsigned long addr = (unsigned long)ctx; + unsigned long align = PADLOCK_ALIGNMENT; + + if (align <= crypto_tfm_ctx_alignment()) + align = 1; + return (struct aes_ctx *)ALIGN(addr, align); +} + +static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) +{ + return aes_ctx_common(crypto_tfm_ctx(tfm)); +} + +static inline struct aes_ctx *skcipher_aes_ctx(struct crypto_skcipher *tfm) +{ + return aes_ctx_common(crypto_skcipher_ctx(tfm)); +} + +static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) +{ + struct aes_ctx *ctx = aes_ctx(tfm); + const __le32 *key = (const __le32 *)in_key; + struct crypto_aes_ctx gen_aes; + int cpu; + + if (key_len % 8) + return -EINVAL; + + /* + * If the hardware is capable of generating the extended key itself we must supply the + * plain key for both encryption and decryption. + */ + ctx->D = ctx->E; + + ctx->E[0] = le32_to_cpu(key[0]); + ctx->E[1] = le32_to_cpu(key[1]); + ctx->E[2] = le32_to_cpu(key[2]); + ctx->E[3] = le32_to_cpu(key[3]); + + /* Prepare control words. */ + memset(&ctx->cword, 0, sizeof(ctx->cword)); + + ctx->cword.decrypt.encdec = 1; + ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4; + ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds; + ctx->cword.encrypt.ksize = (key_len - 16) / 8; + ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize; + + /* Don't generate extended keys if the hardware can do it. */ + if (aes_hw_extkey_available(key_len)) + goto ok; + + ctx->D = ctx->d_data; + ctx->cword.encrypt.keygen = 1; + ctx->cword.decrypt.keygen = 1; + + if (aes_expandkey(&gen_aes, in_key, key_len)) + return -EINVAL; + + memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH); + memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH); + +ok: + for_each_online_cpu(cpu) + if (&ctx->cword.encrypt == per_cpu(zx_paes_last_cword, cpu) || + &ctx->cword.decrypt == per_cpu(zx_paes_last_cword, cpu)) + per_cpu(zx_paes_last_cword, cpu) = NULL; + + return 0; +} + +static int aes_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key, + unsigned int key_len) +{ + return aes_set_key(crypto_skcipher_tfm(tfm), in_key, key_len); +} + +/* ====== Encryption/decryption routines ====== */ + +/* These are the real call to PadLock. */ +static inline void padlock_reset_key(struct cword *cword) +{ + int cpu = raw_smp_processor_id(); + + if (cword != per_cpu(zx_paes_last_cword, cpu)) +#ifndef CONFIG_X86_64 + asm volatile ("pushfl; popfl"); +#else + asm volatile ("pushfq; popfq"); +#endif +} + +static inline void padlock_store_cword(struct cword *cword) +{ + per_cpu(zx_paes_last_cword, raw_smp_processor_id()) = cword; +} + +/* + * While the padlock instructions don't use FP/SSE registers, they generate a spurious DNA fault + * when CR0.TS is '1'. Fortunately, the kernel doesn't use CR0.TS. + */ +static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key, + struct cword *control_word, int count) +{ + asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ + : "+S"(input), "+D"(output) + : "d"(control_word), "b"(key), "c"(count)); +} + +static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key, u8 *iv, + struct cword *control_word, int count) +{ + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (iv) + : "d" (control_word), "b" (key), "c" (count)); + return iv; +} + +static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword, int count) +{ + /* + * Padlock prefetches extra data so we must provide mapped input buffers. + * Assume there are at least 16 bytes of stack already in use. + */ + u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1]; + u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + + memcpy(tmp, in, count * AES_BLOCK_SIZE); + rep_xcrypt_ecb(tmp, out, key, cword, count); +} + +static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key, u8 *iv, struct cword *cword, int count) +{ + /* + * Padlock prefetches extra data so we must provide mapped input buffers. + * Assume there are at least 16 bytes of stack already in use. + */ + u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1]; + u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + + memcpy(tmp, in, count * AES_BLOCK_SIZE); + return rep_xcrypt_cbc(tmp, out, key, iv, cword, count); +} + +static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key, struct cword *cword, int count) +{ + /* + * Padlock in ECB mode fetches at least ecb_fetch_bytes of data. + * We could avoid some copying here but it's probably not worth it. + */ + if (unlikely(offset_in_page(in) + ecb_fetch_bytes > PAGE_SIZE)) { + ecb_crypt_copy(in, out, key, cword, count); + return; + } + + rep_xcrypt_ecb(in, out, key, cword, count); +} + +static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key, u8 *iv, struct cword *cword, + int count) +{ + /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */ + if (unlikely(offset_in_page(in) + cbc_fetch_bytes > PAGE_SIZE)) + return cbc_crypt_copy(in, out, key, iv, cword, count); + + return rep_xcrypt_cbc(in, out, key, iv, cword, count); +} + +static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, void *control_word, + u32 count) +{ + u32 initial = count & (ecb_fetch_blocks - 1); + + if (count < ecb_fetch_blocks) { + ecb_crypt(input, output, key, control_word, count); + return; + } + + count -= initial; + + if (initial) + asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ + : "+S"(input), "+D"(output) + : "d"(control_word), "b"(key), "c"(initial)); + + asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ + : "+S"(input), "+D"(output) + : "d"(control_word), "b"(key), "c"(count)); +} + +static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, u8 *iv, + void *control_word, u32 count) +{ + u32 initial = count & (cbc_fetch_blocks - 1); + + if (count < cbc_fetch_blocks) + return cbc_crypt(input, output, key, iv, control_word, count); + + count -= initial; + + if (initial) + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (iv) + : "d" (control_word), "b" (key), "c" (initial)); + + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (iv) + : "d" (control_word), "b" (key), "c" (count)); + return iv; +} + +static void padlock_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct aes_ctx *ctx = aes_ctx(tfm); + + padlock_reset_key(&ctx->cword.encrypt); + ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1); + padlock_store_cword(&ctx->cword.encrypt); +} + +static void padlock_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct aes_ctx *ctx = aes_ctx(tfm); + + padlock_reset_key(&ctx->cword.encrypt); + ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1); + padlock_store_cword(&ctx->cword.encrypt); +} + +static struct crypto_alg aes_alg = { + .cra_name = "aes", + .cra_driver_name = "aes-padlock", + .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aes_ctx), + .cra_alignmask = PADLOCK_ALIGNMENT - 1, + .cra_module = THIS_MODULE, + .cra_u = { + .cipher = { + .cia_min_keysize = AES_MIN_KEY_SIZE, + .cia_max_keysize = AES_MAX_KEY_SIZE, + .cia_setkey = aes_set_key, + .cia_encrypt = padlock_aes_encrypt, + .cia_decrypt = padlock_aes_decrypt, + } + } +}; + +static int ecb_aes_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct aes_ctx *ctx = skcipher_aes_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + padlock_reset_key(&ctx->cword.encrypt); + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) != 0) { + padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, + ctx->E, &ctx->cword.encrypt, + nbytes / AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } + + padlock_store_cword(&ctx->cword.encrypt); + + return err; +} + +static int ecb_aes_decrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct aes_ctx *ctx = skcipher_aes_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + padlock_reset_key(&ctx->cword.decrypt); + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) != 0) { + padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, + ctx->D, &ctx->cword.decrypt, + nbytes / AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } + + padlock_store_cword(&ctx->cword.encrypt); + + return err; +} + +static struct skcipher_alg ecb_aes_alg = { + .base.cra_name = "ecb(aes)", + .base.cra_driver_name = "ecb-aes-padlock", + .base.cra_priority = PADLOCK_COMPOSITE_PRIORITY, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct aes_ctx), + .base.cra_alignmask = PADLOCK_ALIGNMENT - 1, + .base.cra_module = THIS_MODULE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = aes_set_key_skcipher, + .encrypt = ecb_aes_encrypt, + .decrypt = ecb_aes_decrypt, +}; + +static int cbc_aes_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct aes_ctx *ctx = skcipher_aes_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + padlock_reset_key(&ctx->cword.encrypt); + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) != 0) { + u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr, + walk.dst.virt.addr, ctx->E, + walk.iv, &ctx->cword.encrypt, + nbytes / AES_BLOCK_SIZE); + memcpy(walk.iv, iv, AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } + + padlock_store_cword(&ctx->cword.decrypt); + + return err; +} + +static int cbc_aes_decrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct aes_ctx *ctx = skcipher_aes_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + padlock_reset_key(&ctx->cword.encrypt); + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) != 0) { + padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, + ctx->D, walk.iv, &ctx->cword.decrypt, + nbytes / AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } + + padlock_store_cword(&ctx->cword.encrypt); + + return err; +} + +static struct skcipher_alg cbc_aes_alg = { + .base.cra_name = "cbc(aes)", + .base.cra_driver_name = "cbc-aes-padlock", + .base.cra_priority = PADLOCK_COMPOSITE_PRIORITY, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct aes_ctx), + .base.cra_alignmask = PADLOCK_ALIGNMENT - 1, + .base.cra_module = THIS_MODULE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = aes_set_key_skcipher, + .encrypt = cbc_aes_encrypt, + .decrypt = cbc_aes_decrypt, +}; + +static const struct x86_cpu_id zhaoxin_cpu_id[] = { + { X86_VENDOR_CENTAUR, 7, X86_MODEL_ANY, X86_STEPPING_ANY, X86_FEATURE_XCRYPT }, + { X86_VENDOR_ZHAOXIN, 7, X86_MODEL_ANY, X86_STEPPING_ANY, X86_FEATURE_XCRYPT }, + {} +}; +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_cpu_id); + +static int __init padlock_init(void) +{ + int ret; + + if (!x86_match_cpu(zhaoxin_cpu_id)) + return -ENODEV; + + if (!boot_cpu_has(X86_FEATURE_XCRYPT_EN)) { + pr_notice("ACE detected, but not enabled. Hmm, strange...\n"); + return -ENODEV; + } + + ret = crypto_register_alg(&aes_alg); + if (!!ret) + goto aes_err; + + ret = crypto_register_skcipher(&ecb_aes_alg); + if (!!ret) + goto ecb_aes_err; + + ret = crypto_register_skcipher(&cbc_aes_alg); + if (!!ret) + goto cbc_aes_err; + + pr_notice("Using ACE for AES algorithm.\n"); + +out: + return ret; + +cbc_aes_err: + crypto_unregister_skcipher(&ecb_aes_alg); +ecb_aes_err: + crypto_unregister_alg(&aes_alg); +aes_err: + pr_err("ACE AES initialization failed.\n"); + goto out; +} + +static void __exit padlock_fini(void) +{ + crypto_unregister_skcipher(&cbc_aes_alg); + crypto_unregister_skcipher(&ecb_aes_alg); + crypto_unregister_alg(&aes_alg); +} + +module_init(padlock_init); +module_exit(padlock_fini); + +MODULE_DESCRIPTION("ACE AES algorithm support"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Michal Ludvig"); +MODULE_VERSION(DRIVER_VERSION); + +MODULE_ALIAS_CRYPTO("aes"); -- Gitee From 5d816c4ba15444af5e85f4724f029a8d230d50da Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Tue, 2 Jan 2024 15:02:00 +0800 Subject: [PATCH 0453/2138] anolis: Add support for Zhaoxin SHA algorithm ANBZ: #7809 Some Zhaoxin processors come with an integrated crypto engine (so called Zhaoxin ACE, Advanced Cryptography Engine) that provides instructions for very fast cryptographic operations with supportedSHA1/SHA256 algorithms. Signed-off-by: leoliu-oc Reviewed-by: Tianjia Zhang Reviewed-by: Guanjun Link: https://gitee.com/anolis/cloud-kernel/pulls/2691 --- drivers/crypto/Kconfig | 14 ++ drivers/crypto/Makefile | 1 + drivers/crypto/padlock-sha.c | 2 +- drivers/crypto/zhaoxin-sha.c | 304 +++++++++++++++++++++++++++++++++++ 4 files changed, 320 insertions(+), 1 deletion(-) create mode 100644 drivers/crypto/zhaoxin-sha.c diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 960a4f0ebf77..b84a921d293f 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -77,6 +77,20 @@ config CRYPTO_DEV_ZHAOXIN_AES If unsure say M. The compiled module will be called zhaoxin-aes. +config CRYPTO_DEV_ZHAOXIN_SHA + tristate "Zhaoxin ACE driver for SHA1 and SHA256 algorithms" + depends on CRYPTO_DEV_ZHAOXIN + select CRYPTO_HASH + select CRYPTO_SHA1 + select CRYPTO_SHA256 + help + Use Zhaoxin ACE for SHA1/SHA256 algorithms. + + Available in Zhaoxin processors. + + If unsure say M. The compiled module will be + called zhaoxin-sha. + config CRYPTO_DEV_GEODE tristate "Support for the Geode LX AES engine" depends on X86_32 && PCI diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index dda3c310f065..5247d2bf09ce 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -32,6 +32,7 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o obj-$(CONFIG_CRYPTO_DEV_ZHAOXIN_AES) += zhaoxin-aes.o +obj-$(CONFIG_CRYPTO_DEV_ZHAOXIN_SHA) += zhaoxin-sha.o obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/ obj-$(CONFIG_CRYPTO_DEV_QCOM_RNG) += qcom-rng.o diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index 6865c7f1fc1a..04858dc8b597 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c @@ -491,7 +491,7 @@ static struct shash_alg sha256_alg_nano = { }; static const struct x86_cpu_id padlock_sha_ids[] = { - X86_MATCH_FEATURE(X86_FEATURE_PHE, NULL), + { X86_VENDOR_CENTAUR, 6, X86_MODEL_ANY, X86_FEATURE_PHE }, {} }; MODULE_DEVICE_TABLE(x86cpu, padlock_sha_ids); diff --git a/drivers/crypto/zhaoxin-sha.c b/drivers/crypto/zhaoxin-sha.c new file mode 100644 index 000000000000..840805f36838 --- /dev/null +++ b/drivers/crypto/zhaoxin-sha.c @@ -0,0 +1,304 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Support for ACE hardware crypto engine. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_VERSION "1.0.0" + +static inline void padlock_output_block(uint32_t *src, uint32_t *dst, size_t count) +{ + while (count--) + *dst++ = swab32(*src++); +} + +/* + * Add two shash_alg instance for hardware-implemented multiple-parts hash + * supported by Zhaoxin Processor. + */ +static int padlock_sha1_init_zhaoxin(struct shash_desc *desc) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + + *sctx = (struct sha1_state){ + .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, + }; + + return 0; +} + +static int padlock_sha1_update_zhaoxin(struct shash_desc *desc, const u8 *data, unsigned int len) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + unsigned int partial, done; + const u8 *src; + + /* The PHE require the out buffer must 128 bytes and 16-bytes aligned */ + u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __aligned(STACK_ALIGN); + u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + + partial = sctx->count & 0x3f; + sctx->count += len; + done = 0; + src = data; + memcpy(dst, (u8 *)(sctx->state), SHA1_DIGEST_SIZE); + + if ((partial + len) >= SHA1_BLOCK_SIZE) { + /* Append the bytes in state's buffer to a block to handle */ + if (partial) { + done = -partial; + memcpy(sctx->buffer + partial, data, done + SHA1_BLOCK_SIZE); + src = sctx->buffer; + asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" + : "+S"(src), "+D"(dst) + : "a"((long)-1), "c"(1UL)); + done += SHA1_BLOCK_SIZE; + src = data + done; + } + + /* Process the left bytes from the input data */ + if (len - done >= SHA1_BLOCK_SIZE) { + asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" + : "+S"(src), "+D"(dst) + : "a"((long)-1), "c"((unsigned long)((len - done) / SHA1_BLOCK_SIZE))); + done += ((len - done) - (len - done) % SHA1_BLOCK_SIZE); + src = data + done; + } + partial = 0; + } + memcpy((u8 *)(sctx->state), dst, SHA1_DIGEST_SIZE); + memcpy(sctx->buffer + partial, src, len - done); + + return 0; +} + +static int padlock_sha1_final_zhaoxin(struct shash_desc *desc, u8 *out) +{ + struct sha1_state *state = (struct sha1_state *)shash_desc_ctx(desc); + unsigned int partial, padlen; + __be64 bits; + static const u8 padding[64] = { 0x80, }; + + bits = cpu_to_be64(state->count << 3); + + /* Pad out to 56 mod 64 */ + partial = state->count & 0x3f; + padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial); + padlock_sha1_update_zhaoxin(desc, padding, padlen); + + /* Append length field bytes */ + padlock_sha1_update_zhaoxin(desc, (const u8 *)&bits, sizeof(bits)); + + /* Swap to output */ + padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 5); + + return 0; +} + +static int padlock_sha256_init_zhaoxin(struct shash_desc *desc) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + + *sctx = (struct sha256_state) { + .state = { + SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, + SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 + }, + }; + + return 0; +} + +static int padlock_sha256_update_zhaoxin(struct shash_desc *desc, const u8 *data, unsigned int len) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + unsigned int partial, done; + const u8 *src; + + /* The PHE require the out buffer must 128 bytes and 16-bytes aligned */ + u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __aligned(STACK_ALIGN); + u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + + partial = sctx->count & 0x3f; + sctx->count += len; + done = 0; + src = data; + memcpy(dst, (u8 *)(sctx->state), SHA256_DIGEST_SIZE); + + if ((partial + len) >= SHA256_BLOCK_SIZE) { + + /* Append the bytes in state's buffer to a block to handle */ + if (partial) { + done = -partial; + memcpy(sctx->buf + partial, data, done + SHA256_BLOCK_SIZE); + src = sctx->buf; + asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" + : "+S"(src), "+D"(dst) + : "a"((long)-1), "c"(1UL)); + done += SHA256_BLOCK_SIZE; + src = data + done; + } + + /* Process the left bytes from input data */ + if (len - done >= SHA256_BLOCK_SIZE) { + asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" + : "+S"(src), "+D"(dst) + : "a"((long)-1), "c"((unsigned long)((len - done) / 64))); + done += ((len - done) - (len - done) % 64); + src = data + done; + } + partial = 0; + } + memcpy((u8 *)(sctx->state), dst, SHA256_DIGEST_SIZE); + memcpy(sctx->buf + partial, src, len - done); + + return 0; +} + +static int padlock_sha256_final_zhaoxin(struct shash_desc *desc, u8 *out) +{ + struct sha256_state *state = (struct sha256_state *)shash_desc_ctx(desc); + unsigned int partial, padlen; + __be64 bits; + static const u8 padding[64] = { 0x80, }; + + bits = cpu_to_be64(state->count << 3); + + /* Pad out to 56 mod 64 */ + partial = state->count & 0x3f; + padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial); + padlock_sha256_update_zhaoxin(desc, padding, padlen); + + /* Append length field bytes */ + padlock_sha256_update_zhaoxin(desc, (const u8 *)&bits, sizeof(bits)); + + /* Swap to output */ + padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 8); + + return 0; +} + +static int padlock_sha_export_zhaoxin(struct shash_desc *desc, void *out) +{ + int statesize = crypto_shash_statesize(desc->tfm); + void *sctx = shash_desc_ctx(desc); + + memcpy(out, sctx, statesize); + return 0; +} + +static int padlock_sha_import_zhaoxin(struct shash_desc *desc, const void *in) +{ + int statesize = crypto_shash_statesize(desc->tfm); + void *sctx = shash_desc_ctx(desc); + + memcpy(sctx, in, statesize); + return 0; +} + +static struct shash_alg sha1_alg_zhaoxin = { + .digestsize = SHA1_DIGEST_SIZE, + .init = padlock_sha1_init_zhaoxin, + .update = padlock_sha1_update_zhaoxin, + .final = padlock_sha1_final_zhaoxin, + .export = padlock_sha_export_zhaoxin, + .import = padlock_sha_import_zhaoxin, + .descsize = sizeof(struct sha1_state), + .statesize = sizeof(struct sha1_state), + .base = { + .cra_name = "sha1", + .cra_driver_name = "sha1-padlock-zhaoxin", + .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static struct shash_alg sha256_alg_zhaoxin = { + .digestsize = SHA256_DIGEST_SIZE, + .init = padlock_sha256_init_zhaoxin, + .update = padlock_sha256_update_zhaoxin, + .final = padlock_sha256_final_zhaoxin, + .export = padlock_sha_export_zhaoxin, + .import = padlock_sha_import_zhaoxin, + .descsize = sizeof(struct sha256_state), + .statesize = sizeof(struct sha256_state), + .base = { + .cra_name = "sha256", + .cra_driver_name = "sha256-padlock-zhaoxin", + .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static const struct x86_cpu_id zhaoxin_sha_ids[] = { + { X86_VENDOR_CENTAUR, 7, X86_MODEL_ANY, X86_STEPPING_ANY, X86_FEATURE_PHE }, + { X86_VENDOR_ZHAOXIN, 7, X86_MODEL_ANY, X86_STEPPING_ANY, X86_FEATURE_PHE }, + {} +}; +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_sha_ids); + +static int __init padlock_init(void) +{ + int rc = -ENODEV; + struct shash_alg *sha1; + struct shash_alg *sha256; + + if (!x86_match_cpu(zhaoxin_sha_ids) || !boot_cpu_has(X86_FEATURE_PHE_EN)) + return -ENODEV; + + sha1 = &sha1_alg_zhaoxin; + sha256 = &sha256_alg_zhaoxin; + + rc = crypto_register_shash(sha1); + if (rc) + goto out; + + rc = crypto_register_shash(sha256); + if (rc) + goto out_unreg1; + + pr_notice("Using ACE for SHA1/SHA256 algorithms.\n"); + + return 0; + +out_unreg1: + crypto_unregister_shash(sha1); + +out: + pr_err("ACE SHA1/SHA256 initialization failed.\n"); + return rc; +} + +static void __exit padlock_fini(void) +{ + crypto_unregister_shash(&sha1_alg_zhaoxin); + crypto_unregister_shash(&sha256_alg_zhaoxin); +} + +module_init(padlock_init); +module_exit(padlock_fini); + +MODULE_DESCRIPTION("ACE SHA1/SHA256 algorithms support."); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Michal Ludvig"); +MODULE_VERSION(DRIVER_VERSION); + +MODULE_ALIAS_CRYPTO("sha1-all"); +MODULE_ALIAS_CRYPTO("sha256-all"); +MODULE_ALIAS_CRYPTO("sha1-padlock"); +MODULE_ALIAS_CRYPTO("sha256-padlock"); -- Gitee From 929f5b55a1fc61f135794f925c96c25fac4c7002 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Fri, 26 Jan 2024 17:58:29 +0800 Subject: [PATCH 0454/2138] anolis: configs: add CONFIG_CRYPTO_DEV_ZHAOXIN seiries ANBZ: #7809 To add support for zhaoxin-aes/sha drivers, we need to add CONFIG_CRYPTO_ZHAOXIN series of configuration items: CONFIG_CRYPTO_DEV_ZHAOXIN=m CONFIG_CRYPTO_DEV_ZHAOXIN_AES=m CONFIG_CRYPTO_DEV_ZHAOXIN_SHA=m Signed-off-by: leoliu-oc Reviewed-by: Tianjia Zhang Reviewed-by: Guanjun Link: https://gitee.com/anolis/cloud-kernel/pulls/2691 --- arch/x86/configs/anolis_defconfig | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 31d935f9b299..23258f261dcb 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -7405,6 +7405,9 @@ CONFIG_CRYPTO_HW=y CONFIG_CRYPTO_DEV_PADLOCK=m CONFIG_CRYPTO_DEV_PADLOCK_AES=m CONFIG_CRYPTO_DEV_PADLOCK_SHA=m +CONFIG_CRYPTO_DEV_ZHAOXIN=m +CONFIG_CRYPTO_DEV_ZHAOXIN_AES=m +CONFIG_CRYPTO_DEV_ZHAOXIN_SHA=m # CONFIG_CRYPTO_DEV_ATMEL_ECC is not set # CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set CONFIG_CRYPTO_DEV_CCP=y -- Gitee From f4448bca30396e7ecfdc18021b9fe38c6fe531a0 Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Thu, 28 Mar 2024 14:52:07 +0800 Subject: [PATCH 0455/2138] anolis: kfence: handle kmemcg properly ANBZ: #8499 When kmemcg is enabled, the object vectors are allocated statically. See commit 8f0b364973034 ("mm: kfence: fix objcgs vector allocation") for detail. However, when turning to anolis kfence, we missed a "&". Fix it. Fixes: e61ac77f426a ("anolis: kfence: enhance kfence for 6.6") Signed-off-by: Tianchen Ding Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2964 --- mm/kfence/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 571ef4dcccf8..d5329b1c560b 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -777,7 +777,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g __SetPageSlab(page); slab->slab_cache = cache; #ifdef CONFIG_MEMCG - slab->memcg_data = (unsigned long)meta->objcg | MEMCG_DATA_OBJCGS; + slab->memcg_data = (unsigned long)&meta->objcg | MEMCG_DATA_OBJCGS; #endif #if defined(CONFIG_SLUB) slab->objects = 1; -- Gitee From 1c6784a2f86db9c3349fbf4ab22cc9c06facc306 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Thu, 28 Mar 2024 16:30:23 +0800 Subject: [PATCH 0456/2138] anolis: crypto: x86/sm2 -add Zhaoxin SM2 algorithm implementation ANBZ: #7809 Add support for SM2 (ShangMi 2) public key algorithm by Zhaoxin GMI Instruction. The purpose of this driver is to ensure that the application has both high performance and high security. Signed-off-by: leoliu-oc Reviewed-by: Tianjia Zhang Reviewed-by: Guanjun Link: https://gitee.com/anolis/cloud-kernel/pulls/2708 --- arch/x86/crypto/Kconfig | 11 ++ arch/x86/crypto/Makefile | 1 + arch/x86/crypto/sm2-zhaoxin-gmi.c | 158 +++++++++++++++++++++++++++++ arch/x86/include/asm/cpufeatures.h | 2 + 4 files changed, 172 insertions(+) create mode 100644 arch/x86/crypto/sm2-zhaoxin-gmi.c diff --git a/arch/x86/crypto/Kconfig b/arch/x86/crypto/Kconfig index cbe8eef473ec..2d2d807e3b00 100644 --- a/arch/x86/crypto/Kconfig +++ b/arch/x86/crypto/Kconfig @@ -553,4 +553,15 @@ config CRYPTO_CRCT10DIF_PCLMUL Architecture: x86_64 using: - PCLMULQDQ (carry-less multiplication) +config CRYPTO_SM2_ZHAOXIN_GMI + tristate "SM2 Cipher algorithm (Zhaoxin GMI Instruction)" + depends on X86 && (CPU_SUP_CENTAUR || CPU_SUP_ZHAOXIN) + select CRYPTO_AKCIPHER + select CRYPTO_MANAGER + help + SM2 (ShangMi 2) public key algorithm by Zhaoxin GMI Instruction + + Published by State Encryption Management Bureau, China, + as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012. + endmenu diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 4230829a6648..e5480c50a8d9 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -109,6 +109,7 @@ aria-aesni-avx2-x86_64-y := aria-aesni-avx2-asm_64.o aria_aesni_avx2_glue.o obj-$(CONFIG_CRYPTO_ARIA_GFNI_AVX512_X86_64) += aria-gfni-avx512-x86_64.o aria-gfni-avx512-x86_64-y := aria-gfni-avx512-asm_64.o aria_gfni_avx512_glue.o +obj-$(CONFIG_CRYPTO_SM2_ZHAOXIN_GMI) += sm2-zhaoxin-gmi.o obj-$(CONFIG_CRYPTO_SM3_ZHAOXIN_GMI) += sm3-zhaoxin-gmi.o obj-$(CONFIG_CRYPTO_SM4_ZHAOXIN_GMI) += sm4-zhaoxin-gmi.o diff --git a/arch/x86/crypto/sm2-zhaoxin-gmi.c b/arch/x86/crypto/sm2-zhaoxin-gmi.c new file mode 100644 index 000000000000..a0430c6611fc --- /dev/null +++ b/arch/x86/crypto/sm2-zhaoxin-gmi.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SM2 asymmetric public-key algorithm + * as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012 SM2 and + * described at https://tools.ietf.org/html/draft-shen-sm2-ecdsa-02 + * + * Copyright (c) 2023 Shanghai Zhaoxin Semiconductor LTD. + * Authors: YunShen + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define SCRATCH_SIZE (4 * 2048) + +#define SM2_CWORD_VERIFY 0x8 +#define SM2_VERIFY_PASS 1 + +struct sm2_cipher_data { + u8 pub_key[65]; /* public key */ +}; + +/* Load supported features of the CPU to see if the SM2 is available. */ +static int zhaoxin_gmi_available(void) +{ + if (!boot_cpu_has(X86_FEATURE_SM2_EN)) { + pr_err("can't enable hardware SM2 if Zhaoxin GMI SM2 is not enabled\n"); + return -ENODEV; + } + return 0; +} + +/* Zhaoxin sm2 verify function */ +static inline size_t zhaoxin_gmi_sm2_verify(unsigned char *key, unsigned char *hash, + unsigned char *sig, unsigned char *scratch) +{ + size_t result; + + asm volatile( + ".byte 0xf2, 0x0f, 0xa6, 0xc0" + : "=c"(result) + : "a"(hash), "b"(key), "d"(SM2_CWORD_VERIFY), "S"(scratch), "D"(sig) + : "memory"); + + return result; +} + +/* Zhaoxin sm2 verify function */ +static int _zhaoxin_sm2_verify(struct sm2_cipher_data *ec, unsigned char *hash, unsigned char *sig) +{ + unsigned char *scratch = kzalloc(SCRATCH_SIZE, GFP_KERNEL); + int ret = -EKEYREJECTED; + size_t result; + + result = zhaoxin_gmi_sm2_verify(ec->pub_key, hash, sig, scratch); + if (result == SM2_VERIFY_PASS) + ret = 0; + + kfree(scratch); + + return ret; +} + +static int zhaoxin_sm2_verify(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct sm2_cipher_data *ec = akcipher_tfm_ctx(tfm); + unsigned char *buffer; + int ret, buf_len; + + buf_len = req->src_len + req->dst_len; + buffer = kmalloc(buf_len, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + sg_pcopy_to_buffer(req->src, sg_nents_for_len(req->src, buf_len), buffer, buf_len, 0); + ret = _zhaoxin_sm2_verify(ec, buffer + req->src_len, buffer); + + kfree(buffer); + + return ret; +} + +static int zhaoxin_sm2_set_pub_key(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen) +{ + struct sm2_cipher_data *ec = akcipher_tfm_ctx(tfm); + + memcpy(ec->pub_key, key, keylen); + + return 0; +} + +static unsigned int zhaoxin_sm2_max_size(struct crypto_akcipher *tfm) +{ + /* Unlimited max size */ + return PAGE_SIZE; +} + +static int zhaoxin_sm2_init_tfm(struct crypto_akcipher *tfm) +{ + return zhaoxin_gmi_available(); +} + +static void zhaoxin_sm2_exit_tfm(struct crypto_akcipher *tfm) +{ + struct sm2_cipher_data *ec = akcipher_tfm_ctx(tfm); + + memset(ec, 0, sizeof(*ec)); +} + +static struct akcipher_alg zhaoxin_sm2 = { + .verify = zhaoxin_sm2_verify, + .set_pub_key = zhaoxin_sm2_set_pub_key, + .max_size = zhaoxin_sm2_max_size, + .init = zhaoxin_sm2_init_tfm, + .exit = zhaoxin_sm2_exit_tfm, + .base = { + .cra_name = "sm2", + .cra_driver_name = "zhaoxin-gmi-sm2", + .cra_priority = 150, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct sm2_cipher_data), + }, +}; + +static const struct x86_cpu_id zhaoxin_sm2_cpu_ids[] = { + X86_MATCH_FEATURE(X86_FEATURE_SM2, NULL), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_sm2_cpu_ids); + +static int __init zhaoxin_sm2_init(void) +{ + if (!x86_match_cpu(zhaoxin_sm2_cpu_ids)) + return -ENODEV; + + return crypto_register_akcipher(&zhaoxin_sm2); +} + +static void __exit zhaoxin_sm2_exit(void) +{ + crypto_unregister_akcipher(&zhaoxin_sm2); +} + +module_init(zhaoxin_sm2_init); +module_exit(zhaoxin_sm2_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("YunShen "); +MODULE_DESCRIPTION("SM2 Zhaoxin GMI Algorithm"); +MODULE_ALIAS_CRYPTO("zhaoxin-gmi-sm2"); diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index b8b6d0fb0480..fab4aa1abaaf 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -146,6 +146,8 @@ #define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */ /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ +#define X86_FEATURE_SM2 (5*32 + 0) /* SM2 ZhaoXin GMI present */ +#define X86_FEATURE_SM2_EN (5*32 + 1) /* SM2 ZhaoXin GMI enabled */ #define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ #define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ #define X86_FEATURE_CCS (5*32 + 4) /* "sm3/4" SM3/4 present */ -- Gitee From 2a67baf5aa812f7e3f83d13e21647430ee12c181 Mon Sep 17 00:00:00 2001 From: Tianyu Yuan Date: Tue, 21 Feb 2023 17:23:05 +0800 Subject: [PATCH 0457/2138] anolis: nsp: generate nsp command with variable nsp major version ANBZ: #8563 The most significant 4 bits of nsp command code should carry the ABI major version so that nsp command can be responded correctly. It is working well since current major version is 0. However management firmware is going to bump the major version to support multi-PF feature. So change the code to explicitly contain the major version into nsp command code. Signed-off-by: Tianyu Yuan Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c index 7136bc48530b..ee934663c6d9 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -37,7 +37,8 @@ #define NSP_COMMAND 0x08 #define NSP_COMMAND_OPTION GENMASK_ULL(63, 32) -#define NSP_COMMAND_CODE GENMASK_ULL(31, 16) +#define NSP_COMMAND_CODE_MJ_VER GENMASK_ULL(31, 28) +#define NSP_COMMAND_CODE GENMASK_ULL(27, 16) #define NSP_COMMAND_DMA_BUF BIT_ULL(1) #define NSP_COMMAND_START BIT_ULL(0) @@ -380,6 +381,7 @@ __nfp_nsp_command(struct nfp_nsp *state, const struct nfp_nsp_command_arg *arg) err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_command, FIELD_PREP(NSP_COMMAND_OPTION, arg->option) | + FIELD_PREP(NSP_COMMAND_CODE_MJ_VER, state->ver.major) | FIELD_PREP(NSP_COMMAND_CODE, arg->code) | FIELD_PREP(NSP_COMMAND_DMA_BUF, arg->dma) | FIELD_PREP(NSP_COMMAND_START, 1)); -- Gitee From 7d23a7f3ffd1fa2f95b7b991d37fa6aa0e689419 Mon Sep 17 00:00:00 2001 From: Tianyu Yuan Date: Fri, 24 Feb 2023 13:49:01 +0800 Subject: [PATCH 0458/2138] anolis: nfp: bump the nsp major version to support multi-PF ANBZ: #8563 Currently NFP NICs implement single PF with multiple ports instantiated. While NFP3800 can support multiple PFs and one port per PF is more up-to-date, the management firmware will start to support multi-PF. Since it's incompatible with currenty implementation, the ABI major version is bumped. A new flag is also introduced to indicate whether it's multi-PF setup or single-PF setup. Signed-off-by: Tianyu Yuan Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 3 +++ drivers/net/ethernet/netronome/nfp/nfp_main.h | 1 + drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c | 8 ++++---- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 71301dbd8fb5..274bdb8e62f2 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -625,6 +625,9 @@ static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf) return err; } + pf->multi_pf_support = pdev->multifunction; + dev_info(&pdev->dev, "%s-PF detected\n", pf->multi_pf_support ? "Multi" : "Single"); + err = nfp_nsp_wait(nsp); if (err < 0) goto exit_close_nsp; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index 14a751bfe1fe..d0bfde2a0b2c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -141,6 +141,7 @@ struct nfp_pf { struct nfp_shared_buf *shared_bufs; unsigned int num_shared_bufs; + bool multi_pf_support; }; extern struct pci_driver nfp_netvf_pci_driver; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c index ee934663c6d9..56682c530b26 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -59,7 +59,7 @@ #define NFP_CAP_CMD_DMA_SG 0x28 #define NSP_MAGIC 0xab10 -#define NSP_MAJOR 0 +#define NSP_MAJOR 1 #define NSP_MINOR 8 #define NSP_CODE_MAJOR GENMASK(15, 12) @@ -248,14 +248,14 @@ static int nfp_nsp_check(struct nfp_nsp *state) state->ver.major = FIELD_GET(NSP_STATUS_MAJOR, reg); state->ver.minor = FIELD_GET(NSP_STATUS_MINOR, reg); - if (state->ver.major != NSP_MAJOR) { + if (state->ver.major > NSP_MAJOR) { nfp_err(cpp, "Unsupported ABI %hu.%hu\n", state->ver.major, state->ver.minor); return -EINVAL; } if (state->ver.minor < NSP_MINOR) { - nfp_err(cpp, "ABI too old to support NIC operation (%u.%hu < %u.%u), please update the management FW on the flash\n", - NSP_MAJOR, state->ver.minor, NSP_MAJOR, NSP_MINOR); + nfp_err(cpp, "ABI too old to support NIC operation (x.%u < x.%u), please update the management FW on the flash\n", + state->ver.minor, NSP_MINOR); return -EINVAL; } -- Gitee From 4afb023517b7adda76997f81ac1d75b52a8f7af1 Mon Sep 17 00:00:00 2001 From: Yinjun Zhang Date: Mon, 27 Feb 2023 11:05:13 +0800 Subject: [PATCH 0459/2138] anolis: nfp: change application firmware loading flow in multi-PF setup ANBZ: #8563 In multi-PF setup, all PFs share the single application firmware. Each PF is treated equally, and first-come-first-served. So the first step is to check firmware is loaded or not. And also loading firmware from disk and flash are treated consistently, both propagating the failure and setting `fw_loaded` flag. At last, firmware shouldn't be unloaded in this setup. The following commit will introduce a keepalive mechanism to let management firmware manage unloading. The flow is not changed in non-multi-PF setup. Signed-off-by: Yinjun Zhang Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 30 +++++++++++++++---- 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 274bdb8e62f2..3ac39d3dbbeb 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -528,6 +528,12 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) if (err) return err; + /* Skip firmware loading in multi-PF setup if firmware is loaded. */ + if (pf->multi_pf_support && nfp_nsp_fw_loaded(nsp)) { + fw_loaded = true; + goto end; + } + fw = nfp_net_fw_find(pdev, pf); do_reset = reset == NFP_NSP_DRV_RESET_ALWAYS || (fw && reset == NFP_NSP_DRV_RESET_DISK); @@ -556,16 +562,27 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) fw_loaded = true; } else if (policy != NFP_NSP_APP_FW_LOAD_DISK && nfp_nsp_has_stored_fw_load(nsp)) { + err = nfp_nsp_load_stored_fw(nsp); - /* Don't propagate this error to stick with legacy driver + /* Same logic with loading from disk when multi-PF. Othewise: + * + * Don't propagate this error to stick with legacy driver * behavior, failure will be detected later during init. + * + * Don't flag the fw_loaded in this case since other devices + * may reuse the firmware when configured this way. */ - if (!nfp_nsp_load_stored_fw(nsp)) + if (!err) { dev_info(&pdev->dev, "Finished loading stored FW image\n"); - /* Don't flag the fw_loaded in this case since other devices - * may reuse the firmware when configured this way - */ + if (pf->multi_pf_support) + fw_loaded = true; + } else { + if (pf->multi_pf_support) + dev_err(&pdev->dev, "Stored FW loading failed: %d\n", err); + else + err = 0; + } } else { dev_warn(&pdev->dev, "Didn't load firmware, please update flash or reconfigure card\n"); } @@ -577,9 +594,10 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) * dependent on it, which could be the case if there are multiple * devices that could load firmware. */ - if (fw_loaded && ifcs == 1) + if (fw_loaded && ifcs == 1 && !pf->multi_pf_support) pf->unload_fw_on_remove = true; +end: return err < 0 ? err : fw_loaded; } -- Gitee From da30f033e5cd711a2c5b15e765742040431ce2d0 Mon Sep 17 00:00:00 2001 From: Yinjun Zhang Date: Fri, 10 Mar 2023 18:16:08 +0800 Subject: [PATCH 0460/2138] anolis: nfp: don't skip firmware loading when it's pxe firmware in running ANBZ: #8563 In pxe boot case, the pxe firmware is not unloaded in some systems when booting completes. Driver needs to detect it so that it has chance to load the correct firmware. Signed-off-by: Yinjun Zhang Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 30 +++++++++++++++++-- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 3ac39d3dbbeb..d0f6bf8383e4 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -469,6 +469,30 @@ nfp_get_fw_policy_value(struct pci_dev *pdev, struct nfp_nsp *nsp, return err; } +static bool +nfp_skip_fw_load(struct nfp_pf *pf, struct nfp_nsp *nsp) +{ + const struct nfp_mip *mip; + + if (!pf->multi_pf_support || nfp_nsp_fw_loaded(nsp) <= 0) + return false; + + mip = nfp_mip_open(pf->cpp); + if (!mip) + return false; + + /* For the case that system boots from pxe, we need + * reload FW if pxe FW is running. + */ + if (!strncmp(nfp_mip_name(mip), "pxe", 3)) { + nfp_mip_close(mip); + return false; + } + + pf->mip = mip; + return true; +} + /** * nfp_fw_load() - Load the firmware image * @pdev: PCI Device structure @@ -528,8 +552,7 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) if (err) return err; - /* Skip firmware loading in multi-PF setup if firmware is loaded. */ - if (pf->multi_pf_support && nfp_nsp_fw_loaded(nsp)) { + if (nfp_skip_fw_load(pf, nsp)) { fw_loaded = true; goto end; } @@ -851,7 +874,8 @@ static int nfp_pci_probe(struct pci_dev *pdev, if (err) goto err_hwinfo_free; - pf->mip = nfp_mip_open(pf->cpp); + if (!pf->mip) + pf->mip = nfp_mip_open(pf->cpp); pf->rtbl = __nfp_rtsym_table_read(pf->cpp, pf->mip); err = nfp_pf_find_rtsyms(pf); -- Gitee From 1cba7eb108dee479c73fb3d8f7d46ee4744ef13a Mon Sep 17 00:00:00 2001 From: Yinjun Zhang Date: Mon, 27 Feb 2023 15:21:34 +0800 Subject: [PATCH 0461/2138] anolis: nfp: introduce keepalive mechanism for multi-PF setup ANBZ: #8563 In multi-PF setup, management firmware is in charge of application firmware unloading instead of driver by keepalive mechanism. A new NSP resource area is allocated for keepalive use with name "nfp.beat". Driver sets the magic number when keepalive is needed and periodically updates the PF's corresponding qword in "nfp.beat". Management firmware checks these PFs' qwords to learn whether and which PFs are alive, and will unload the application firmware if no PF is running. This only works when magic number is correct. Signed-off-by: Yinjun Zhang Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 109 ++++++++++++++++-- drivers/net/ethernet/netronome/nfp/nfp_main.h | 15 ++- .../net/ethernet/netronome/nfp/nfpcore/nfp.h | 4 + 3 files changed, 115 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index d0f6bf8383e4..93e234575a76 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -469,12 +469,82 @@ nfp_get_fw_policy_value(struct pci_dev *pdev, struct nfp_nsp *nsp, return err; } +static void +nfp_nsp_beat_timer(struct timer_list *t) +{ + struct nfp_pf *pf = from_timer(pf, t, multi_pf.beat_timer); + u8 __iomem *addr; + + /* Each PF has corresponding qword to beat: + * offset | usage + * 0 | magic number + * 8 | beat qword of pf0 + * 16 | beat qword of pf1 + */ + addr = pf->multi_pf.beat_addr + ((pf->multi_pf.id + 1) << 3); + writeq(jiffies, addr); + /* Beat once per second. */ + mod_timer(&pf->multi_pf.beat_timer, jiffies + HZ); +} + +/** + * nfp_nsp_keepalive_start() - Start keepalive mechanism if needed + * @pf: NFP PF Device structure + * + * Return 0 if no error, errno otherwise + */ +static int +nfp_nsp_keepalive_start(struct nfp_pf *pf) +{ + struct nfp_resource *res; + u8 __iomem *base; + int err = 0; + u64 addr; + u32 cpp; + + if (!pf->multi_pf.en) + return 0; + + res = nfp_resource_acquire(pf->cpp, NFP_KEEPALIVE); + if (IS_ERR(res)) + return PTR_ERR(res); + + cpp = nfp_resource_cpp_id(res); + addr = nfp_resource_address(res); + + /* Allocate a fixed area for keepalive. */ + base = nfp_cpp_map_area(pf->cpp, "keepalive", cpp, addr, + nfp_resource_size(res), &pf->multi_pf.beat_area); + if (IS_ERR(base)) { + nfp_err(pf->cpp, "Failed to map area for keepalive\n"); + err = PTR_ERR(base); + goto res_release; + } + + pf->multi_pf.beat_addr = base; + timer_setup(&pf->multi_pf.beat_timer, nfp_nsp_beat_timer, 0); + mod_timer(&pf->multi_pf.beat_timer, jiffies); + +res_release: + nfp_resource_release(res); + return err; +} + +static void +nfp_nsp_keepalive_stop(struct nfp_pf *pf) +{ + if (pf->multi_pf.beat_area) { + del_timer_sync(&pf->multi_pf.beat_timer); + nfp_cpp_area_release_free(pf->multi_pf.beat_area); + } +} + static bool nfp_skip_fw_load(struct nfp_pf *pf, struct nfp_nsp *nsp) { const struct nfp_mip *mip; - if (!pf->multi_pf_support || nfp_nsp_fw_loaded(nsp) <= 0) + if (!pf->multi_pf.en || nfp_nsp_fw_loaded(nsp) <= 0) return false; mip = nfp_mip_open(pf->cpp); @@ -504,7 +574,7 @@ nfp_skip_fw_load(struct nfp_pf *pf, struct nfp_nsp *nsp) static int nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) { - bool do_reset, fw_loaded = false; + bool do_reset, fw_loaded = false, fw_new = false; const struct firmware *fw = NULL; int err, reset, policy, ifcs = 0; char *token, *ptr; @@ -552,10 +622,12 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) if (err) return err; - if (nfp_skip_fw_load(pf, nsp)) { - fw_loaded = true; - goto end; - } + err = nfp_nsp_keepalive_start(pf); + if (err) + return err; + + if (nfp_skip_fw_load(pf, nsp)) + return true; fw = nfp_net_fw_find(pdev, pf); do_reset = reset == NFP_NSP_DRV_RESET_ALWAYS || @@ -583,6 +655,7 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) } dev_info(&pdev->dev, "Finished loading FW image\n"); fw_loaded = true; + fw_new = true; } else if (policy != NFP_NSP_APP_FW_LOAD_DISK && nfp_nsp_has_stored_fw_load(nsp)) { err = nfp_nsp_load_stored_fw(nsp); @@ -598,10 +671,10 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) if (!err) { dev_info(&pdev->dev, "Finished loading stored FW image\n"); - if (pf->multi_pf_support) + if (pf->multi_pf.en) fw_loaded = true; } else { - if (pf->multi_pf_support) + if (pf->multi_pf.en) dev_err(&pdev->dev, "Stored FW loading failed: %d\n", err); else err = 0; @@ -617,10 +690,19 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) * dependent on it, which could be the case if there are multiple * devices that could load firmware. */ - if (fw_loaded && ifcs == 1 && !pf->multi_pf_support) + if (err < 0) + nfp_nsp_keepalive_stop(pf); + else if (fw_loaded && ifcs == 1 && !pf->multi_pf.en) pf->unload_fw_on_remove = true; -end: + /* Only setting magic number when fw is freshly loaded here. NSP + * won't unload fw when heartbeat stops if the magic number is not + * correct. It's used when firmware is preloaded and shouldn't be + * unloaded when driver exits. + */ + if (fw_new && pf->multi_pf.en) + writeq(NFP_KEEPALIVE_MAGIC, pf->multi_pf.beat_addr); + return err < 0 ? err : fw_loaded; } @@ -666,8 +748,9 @@ static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf) return err; } - pf->multi_pf_support = pdev->multifunction; - dev_info(&pdev->dev, "%s-PF detected\n", pf->multi_pf_support ? "Multi" : "Single"); + pf->multi_pf.en = pdev->multifunction; + pf->multi_pf.id = PCI_FUNC(pdev->devfn); + dev_info(&pdev->dev, "%s-PF detected\n", pf->multi_pf.en ? "Multi" : "Single"); err = nfp_nsp_wait(nsp); if (err < 0) @@ -915,6 +998,7 @@ static int nfp_pci_probe(struct pci_dev *pdev, err_net_remove: nfp_net_pci_remove(pf); err_fw_unload: + nfp_nsp_keepalive_stop(pf); kfree(pf->rtbl); nfp_mip_close(pf->mip); if (pf->unload_fw_on_remove) @@ -954,6 +1038,7 @@ static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw) nfp_net_pci_remove(pf); vfree(pf->dumpspec); + nfp_nsp_keepalive_stop(pf); kfree(pf->rtbl); nfp_mip_close(pf->mip); if (unload_fw && pf->unload_fw_on_remove) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index d0bfde2a0b2c..c071087c83cd 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -84,6 +84,12 @@ struct nfp_dumpspec { * @port_refresh_work: Work entry for taking netdevs out * @shared_bufs: Array of shared buffer structures if FW has any SBs * @num_shared_bufs: Number of elements in @shared_bufs + * @multi_pf: Used in multi-PF setup + * @multi_pf.en: Is multi-PF setup? + * @multi_pf.id: PF index + * @multi_pf.beat_timer:Timer for beat to keepalive + * @multi_pf.beat_area: Pointer to CPP area for beat to keepalive + * @multi_pf.beat_addr: Pointer to mapped beat address used for keepalive * * Fields which may change after proble are protected by devlink instance lock. */ @@ -141,7 +147,14 @@ struct nfp_pf { struct nfp_shared_buf *shared_bufs; unsigned int num_shared_bufs; - bool multi_pf_support; + + struct { + bool en; + u8 id; + struct timer_list beat_timer; + struct nfp_cpp_area *beat_area; + u8 __iomem *beat_addr; + } multi_pf; }; extern struct pci_driver nfp_netvf_pci_driver; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h index db94b0bddc92..89a131cffc48 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h @@ -64,6 +64,10 @@ int nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask, /* MAC Statistics Accumulator */ #define NFP_RESOURCE_MAC_STATISTICS "mac.stat" +/* Keepalive */ +#define NFP_KEEPALIVE "nfp.beat" +#define NFP_KEEPALIVE_MAGIC 0x6e66702e62656174ULL /* ASCII of "nfp.beat" */ + int nfp_resource_table_init(struct nfp_cpp *cpp); struct nfp_resource * -- Gitee From 3f0fe219158157d674447cb52149aaef64d8cbf6 Mon Sep 17 00:00:00 2001 From: Yinjun Zhang Date: Fri, 5 May 2023 19:15:17 +0800 Subject: [PATCH 0462/2138] anolis: nfp: avoid reclaiming resource mutex by mistake ANBZ: #8563 Multiple PFs of the same controller use the same interface id. So we shouldn't unconditionally reclaim resource mutex when probing, because the mutex may be held by another PF from the same controller. Now give it some time to release the mutex, and reclaim it if timeout. Signed-off-by: Yinjun Zhang Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- .../ethernet/netronome/nfp/nfpcore/nfp_mutex.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c index 7bc17b94ac60..1fac6867922b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c @@ -341,6 +341,7 @@ int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex) int nfp_cpp_mutex_reclaim(struct nfp_cpp *cpp, int target, unsigned long long address) { + unsigned long timeout = jiffies + 2 * HZ; const u32 mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */ const u32 muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */ u16 interface = nfp_cpp_interface(cpp); @@ -352,12 +353,16 @@ int nfp_cpp_mutex_reclaim(struct nfp_cpp *cpp, int target, return err; /* Check lock */ - err = nfp_cpp_readl(cpp, mur, address, &tmp); - if (err < 0) - return err; + while (time_is_after_jiffies(timeout)) { + err = nfp_cpp_readl(cpp, mur, address, &tmp); + if (err < 0) + return err; - if (nfp_mutex_is_unlocked(tmp) || nfp_mutex_owner(tmp) != interface) - return 0; + if (nfp_mutex_is_unlocked(tmp) || nfp_mutex_owner(tmp) != interface) + return 0; + + msleep_interruptible(10); + } /* Bust the lock */ err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_unlocked(interface)); -- Gitee From 6edb93df0bfbbb04aa644f588fac6decc0447828 Mon Sep 17 00:00:00 2001 From: Yinjun Zhang Date: Thu, 2 Mar 2023 18:28:06 +0800 Subject: [PATCH 0463/2138] anolis: nfp: redefine PF id used to format symbols ANBZ: #8563 Taking account that NFP3800 supports 4 physcial functions per controller, now recalcuate PF id that used to format symbols to communicate with application firmware. Signed-off-by: Yinjun Zhang Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/abm/ctrl.c | 2 +- .../net/ethernet/netronome/nfp/flower/main.c | 2 +- drivers/net/ethernet/netronome/nfp/nfp_main.c | 18 +++++++++++------- drivers/net/ethernet/netronome/nfp/nfp_main.h | 1 + .../ethernet/netronome/nfp/nfpcore/nfp_dev.c | 2 ++ .../ethernet/netronome/nfp/nfpcore/nfp_dev.h | 1 + 6 files changed, 17 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c index 69e84ff7f2e5..41d18df97c85 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c +++ b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c @@ -362,7 +362,7 @@ int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm) const struct nfp_rtsym *sym; int res; - abm->pf_id = nfp_cppcore_pcie_unit(pf->cpp); + abm->pf_id = nfp_get_pf_id(pf); /* Check if Qdisc offloads are supported */ res = nfp_pf_rtsym_read_optional(pf, NFP_RED_SUPPORT_SYM_NAME, 1); diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 83eaa5ae3cd4..565987f0a595 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -378,10 +378,10 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, enum nfp_flower_cmsg_port_vnic_type vnic_type, enum nfp_repr_type repr_type, unsigned int cnt) { - u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp); struct nfp_flower_priv *priv = app->priv; atomic_t *replies = &priv->reify_replies; struct nfp_flower_repr_priv *repr_priv; + u8 nfp_pcie = nfp_get_pf_id(app->pf); enum nfp_port_type port_type; struct nfp_repr *nfp_repr; struct nfp_reprs *reprs; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 93e234575a76..8174a07b3cf7 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -69,6 +69,13 @@ static const struct pci_device_id nfp_pci_device_ids[] = { }; MODULE_DEVICE_TABLE(pci, nfp_pci_device_ids); +u8 nfp_get_pf_id(struct nfp_pf *pf) +{ + return nfp_cppcore_pcie_unit(pf->cpp) * + pf->dev_info->pf_num_per_unit + + pf->multi_pf.id; +} + int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format, unsigned int default_val) { @@ -76,7 +83,7 @@ int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format, int err = 0; u64 val; - snprintf(name, sizeof(name), format, nfp_cppcore_pcie_unit(pf->cpp)); + snprintf(name, sizeof(name), format, nfp_get_pf_id(pf)); val = nfp_rtsym_read_le(pf->rtbl, name, &err); if (err) { @@ -95,8 +102,7 @@ nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, { char pf_symbol[256]; - snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt, - nfp_cppcore_pcie_unit(pf->cpp)); + snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt, nfp_get_pf_id(pf)); return nfp_rtsym_map(pf->rtbl, pf_symbol, name, min_size, area); } @@ -801,10 +807,8 @@ static void nfp_fw_unload(struct nfp_pf *pf) static int nfp_pf_find_rtsyms(struct nfp_pf *pf) { + unsigned int pf_id = nfp_get_pf_id(pf); char pf_symbol[256]; - unsigned int pf_id; - - pf_id = nfp_cppcore_pcie_unit(pf->cpp); /* Optional per-PCI PF mailbox */ snprintf(pf_symbol, sizeof(pf_symbol), NFP_MBOX_SYM_NAME, pf_id); @@ -830,7 +834,7 @@ static u64 nfp_net_pf_get_app_cap(struct nfp_pf *pf) int err = 0; u64 val; - snprintf(name, sizeof(name), "_pf%u_net_app_cap", nfp_cppcore_pcie_unit(pf->cpp)); + snprintf(name, sizeof(name), "_pf%u_net_app_cap", nfp_get_pf_id(pf)); val = nfp_rtsym_read_le(pf->rtbl, name, &err); if (err) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index c071087c83cd..66bc1f48fee0 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -208,4 +208,5 @@ void nfp_devlink_params_unregister(struct nfp_pf *pf); unsigned int nfp_net_lr2speed(unsigned int linkrate); unsigned int nfp_net_speed2lr(unsigned int speed); +u8 nfp_get_pf_id(struct nfp_pf *pf); #endif /* NFP_MAIN_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c index 0725b51c2a95..8a7c5de0de77 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c @@ -19,6 +19,7 @@ const struct nfp_dev_info nfp_dev_info[NFP_DEV_CNT] = { .pcie_cfg_expbar_offset = 0x0a00, .pcie_expl_offset = 0xd000, .qc_area_sz = 0x100000, + .pf_num_per_unit = 4, }, [NFP_DEV_NFP3800_VF] = { .dma_mask = DMA_BIT_MASK(48), @@ -38,6 +39,7 @@ const struct nfp_dev_info nfp_dev_info[NFP_DEV_CNT] = { .pcie_cfg_expbar_offset = 0x0400, .pcie_expl_offset = 0x1000, .qc_area_sz = 0x80000, + .pf_num_per_unit = 1, }, [NFP_DEV_NFP6000_VF] = { .dma_mask = DMA_BIT_MASK(40), diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h index e4d38178de0f..d948c9c4a09a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h @@ -35,6 +35,7 @@ struct nfp_dev_info { u32 pcie_cfg_expbar_offset; u32 pcie_expl_offset; u32 qc_area_sz; + u8 pf_num_per_unit; }; extern const struct nfp_dev_info nfp_dev_info[NFP_DEV_CNT]; -- Gitee From 4b7e349ac6579c657f227efb45b82fbf11dad001 Mon Sep 17 00:00:00 2001 From: Tianyu Yuan Date: Tue, 7 Mar 2023 17:02:29 +0800 Subject: [PATCH 0464/2138] anolis: nfp: apply one port per PF for multi-PF setup ANBZ: #8563 Only one port per PF is allowed in multi-PF setup. While eth_table still carries the total port info, each PF need bind itself with correct port according to PF id. Signed-off-by: Tianyu Yuan Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/abm/main.c | 2 +- drivers/net/ethernet/netronome/nfp/bpf/main.c | 2 +- .../net/ethernet/netronome/nfp/flower/main.c | 17 ++++++++++------- .../net/ethernet/netronome/nfp/nfp_net_main.c | 8 ++++++-- drivers/net/ethernet/netronome/nfp/nfp_port.c | 3 ++- drivers/net/ethernet/netronome/nfp/nic/main.c | 3 ++- 6 files changed, 22 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c index 5d3df28c648f..d4acaa15629d 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.c +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c @@ -451,7 +451,7 @@ static int nfp_abm_init(struct nfp_app *app) nfp_err(pf->cpp, "ABM NIC requires ETH table\n"); return -EINVAL; } - if (pf->max_data_vnics != pf->eth_tbl->count) { + if (pf->max_data_vnics != pf->eth_tbl->count && !pf->multi_pf.en) { nfp_err(pf->cpp, "ETH entries don't match vNICs (%d vs %d)\n", pf->max_data_vnics, pf->eth_tbl->count); return -EINVAL; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index f469950c7265..3d928dfba114 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -70,7 +70,7 @@ nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) nfp_err(pf->cpp, "No ETH table\n"); return -EINVAL; } - if (pf->max_data_vnics != pf->eth_tbl->count) { + if (pf->max_data_vnics != pf->eth_tbl->count && !pf->multi_pf.en) { nfp_err(pf->cpp, "ETH entries don't match vNICs (%d vs %d)\n", pf->max_data_vnics, pf->eth_tbl->count); return -EINVAL; diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 565987f0a595..88e8ae25f0cc 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -428,10 +428,10 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, goto err_reprs_clean; } if (repr_type == NFP_REPR_TYPE_PF) { - port->pf_id = i; + port->pf_id = app->pf->multi_pf.id; port->vnic = priv->nn->dp.ctrl_bar; } else { - port->pf_id = 0; + port->pf_id = app->pf->multi_pf.id; port->vf_id = i; port->vnic = app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ; @@ -496,24 +496,27 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) struct nfp_eth_table *eth_tbl = app->pf->eth_tbl; atomic_t *replies = &priv->reify_replies; struct nfp_flower_repr_priv *repr_priv; + int err, reify_cnt, phy_reprs_num; struct nfp_repr *nfp_repr; struct sk_buff *ctrl_skb; struct nfp_reprs *reprs; - int err, reify_cnt; unsigned int i; ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count); if (!ctrl_skb) return -ENOMEM; + phy_reprs_num = app->pf->multi_pf.en ? app->pf->max_data_vnics : + eth_tbl->count; reprs = nfp_reprs_alloc(eth_tbl->max_index + 1); if (!reprs) { err = -ENOMEM; goto err_free_ctrl_skb; } - for (i = 0; i < eth_tbl->count; i++) { - unsigned int phys_port = eth_tbl->ports[i].index; + for (i = 0; i < phy_reprs_num; i++) { + int idx = app->pf->multi_pf.en ? app->pf->multi_pf.id : i; + unsigned int phys_port = eth_tbl->ports[idx].index; struct net_device *repr; struct nfp_port *port; u32 cmsg_port_id; @@ -542,7 +545,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) nfp_repr_free(repr); goto err_reprs_clean; } - err = nfp_port_init_phy_port(app->pf, app, port, i); + err = nfp_port_init_phy_port(app->pf, app, port, idx); if (err) { kfree(repr_priv); nfp_port_free(port); @@ -609,7 +612,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) static int nfp_flower_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) { - if (id > 0) { + if (id > 0 && !app->pf->multi_pf.en) { nfp_warn(app->cpp, "FlowerNIC doesn't support more than one data vNIC\n"); goto err_invalid_port; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index cbe4972ba104..ad51fbfc152d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -141,7 +141,7 @@ nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id) { int err; - nn->id = id; + nn->id = pf->multi_pf.en ? pf->multi_pf.id : id; if (nn->port) { err = nfp_devlink_port_register(pf->app, nn->port); @@ -184,7 +184,7 @@ nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar, for (i = 0; i < pf->max_data_vnics; i++) { nn = nfp_net_pf_alloc_vnic(pf, true, ctrl_bar, qc_bar, - stride, i); + stride, pf->multi_pf.en ? pf->multi_pf.id : i); if (IS_ERR(nn)) { err = PTR_ERR(nn); goto err_free_prev; @@ -706,6 +706,10 @@ int nfp_net_pci_probe(struct nfp_pf *pf) pf->max_data_vnics = nfp_net_pf_get_num_ports(pf); if ((int)pf->max_data_vnics < 0) return pf->max_data_vnics; + if (pf->multi_pf.en && pf->max_data_vnics != 1) { + nfp_err(pf->cpp, "Only one data_vnic per PF is supported in multiple PF setup, please update FW.\n"); + return -EPERM; + } err = nfp_net_pci_map_mem(pf); if (err) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c index 54640bcb70fb..dadd6844c385 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c @@ -189,7 +189,8 @@ int nfp_port_init_phy_port(struct nfp_pf *pf, struct nfp_app *app, port->eth_port = &pf->eth_tbl->ports[id]; port->eth_id = pf->eth_tbl->ports[id].index; - port->netdev->dev_port = id; + if (!pf->multi_pf.en) + port->netdev->dev_port = id; if (pf->mac_stats_mem) port->eth_stats = pf->mac_stats_mem + port->eth_id * NFP_MAC_STATS_SIZE; diff --git a/drivers/net/ethernet/netronome/nfp/nic/main.c b/drivers/net/ethernet/netronome/nfp/nic/main.c index 9dd5afe37f6e..e7a2d01bcbff 100644 --- a/drivers/net/ethernet/netronome/nfp/nic/main.c +++ b/drivers/net/ethernet/netronome/nfp/nic/main.c @@ -12,7 +12,8 @@ static int nfp_nic_init(struct nfp_app *app) { struct nfp_pf *pf = app->pf; - if (pf->eth_tbl && pf->max_data_vnics != pf->eth_tbl->count) { + if (pf->eth_tbl && pf->max_data_vnics != pf->eth_tbl->count && + !pf->multi_pf.en) { nfp_err(pf->cpp, "ETH entries don't match vNICs (%d vs %d)\n", pf->max_data_vnics, pf->eth_tbl->count); return -EINVAL; -- Gitee From 19e76158f81294b30f9c93ac1e485a5138d6c355 Mon Sep 17 00:00:00 2001 From: Yinjun Zhang Date: Wed, 1 Mar 2023 19:23:49 +0800 Subject: [PATCH 0465/2138] anolis: nfp: enable multi-PF in application firmware if supported ANBZ: #8563 For backward compatibility concern, the new application firmware is designed to support both single-PF setup and multi-PF setup. Thus driver should inform application firmware which setup current is. This should be done as early as possible since the setup may affect some configurations exposed by firmware. Signed-off-by: Yinjun Zhang Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- .../net/ethernet/netronome/nfp/nfp_net_ctrl.h | 1 + .../net/ethernet/netronome/nfp/nfp_net_main.c | 121 +++++++++++++----- 2 files changed, 92 insertions(+), 30 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h index 3e63f6d6a563..d6b127f13ed3 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h @@ -268,6 +268,7 @@ #define NFP_NET_CFG_CTRL_PKT_TYPE (0x1 << 0) /* Pkttype offload */ #define NFP_NET_CFG_CTRL_IPSEC (0x1 << 1) /* IPsec offload */ #define NFP_NET_CFG_CTRL_MCAST_FILTER (0x1 << 2) /* Multicast Filter */ +#define NFP_NET_CFG_CTRL_MULTI_PF (0x1 << 5) /* Multi PF */ #define NFP_NET_CFG_CTRL_FREELIST_EN (0x1 << 6) /* Freelist enable flag bit */ #define NFP_NET_CFG_CAP_WORD1 0x00a4 diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index ad51fbfc152d..c06e1e9c9412 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -684,15 +684,100 @@ int nfp_net_refresh_eth_port(struct nfp_port *port) return ret; } +static int nfp_net_pre_init(struct nfp_pf *pf, int *stride) +{ + struct nfp_net_fw_version fw_ver; + struct nfp_cpp_area *area; + u8 __iomem *ctrl_bar; + int err = 0; + + ctrl_bar = nfp_pf_map_rtsym(pf, NULL, "_pf%d_net_bar0", NFP_PF_CSR_SLICE_SIZE, &area); + if (IS_ERR(ctrl_bar)) { + nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n"); + return pf->fw_loaded ? PTR_ERR(ctrl_bar) : 1; + } + + nfp_net_get_fw_version(&fw_ver, ctrl_bar); + if (fw_ver.extend & NFP_NET_CFG_VERSION_RESERVED_MASK || + fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) { + nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n", + fw_ver.extend, fw_ver.class, + fw_ver.major, fw_ver.minor); + err = -EINVAL; + goto end; + } + + /* Determine stride */ + if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) { + *stride = 2; + nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n"); + } else { + switch (fw_ver.major) { + case 1 ... 5: + *stride = 4; + break; + default: + nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n", + fw_ver.extend, fw_ver.class, + fw_ver.major, fw_ver.minor); + err = -EINVAL; + goto end; + } + } + + if (!pf->multi_pf.en) + goto end; + + /* Enable multi-PF. */ + if (readl(ctrl_bar + NFP_NET_CFG_CAP_WORD1) & NFP_NET_CFG_CTRL_MULTI_PF) { + unsigned long long addr; + u32 cfg_q, cpp_id, ret; + unsigned long timeout; + + writel(NFP_NET_CFG_CTRL_MULTI_PF, ctrl_bar + NFP_NET_CFG_CTRL_WORD1); + writel(NFP_NET_CFG_UPDATE_GEN, ctrl_bar + NFP_NET_CFG_UPDATE); + + /* Config queue is next to txq. */ + cfg_q = readl(ctrl_bar + NFP_NET_CFG_START_TXQ) + 1; + addr = nfp_qcp_queue_offset(pf->dev_info, cfg_q) + NFP_QCP_QUEUE_ADD_WPTR; + cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0); + err = nfp_cpp_writel(pf->cpp, cpp_id, addr, 1); + if (err) + goto end; + + timeout = jiffies + HZ * NFP_NET_POLL_TIMEOUT; + while ((ret = readl(ctrl_bar + NFP_NET_CFG_UPDATE))) { + if (ret & NFP_NET_CFG_UPDATE_ERR) { + nfp_err(pf->cpp, "Enalbe multi-PF failed\n"); + err = -EIO; + break; + } + + usleep_range(250, 500); + if (time_is_before_eq_jiffies(timeout)) { + nfp_err(pf->cpp, "Enalbe multi-PF timeout\n"); + err = -ETIMEDOUT; + break; + } + }; + } else { + nfp_err(pf->cpp, "Loaded firmware doesn't support multi-PF\n"); + err = -EINVAL; + } + +end: + nfp_cpp_area_release_free(area); + return err; +} + /* * PCI device functions */ int nfp_net_pci_probe(struct nfp_pf *pf) { struct devlink *devlink = priv_to_devlink(pf); - struct nfp_net_fw_version fw_ver; u8 __iomem *ctrl_bar, *qc_bar; - int stride; + int stride = 0; int err; INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_vnics); @@ -703,6 +788,10 @@ int nfp_net_pci_probe(struct nfp_pf *pf) return -EINVAL; } + err = nfp_net_pre_init(pf, &stride); + if (err) + return err; + pf->max_data_vnics = nfp_net_pf_get_num_ports(pf); if ((int)pf->max_data_vnics < 0) return pf->max_data_vnics; @@ -722,34 +811,6 @@ int nfp_net_pci_probe(struct nfp_pf *pf) goto err_unmap; } - nfp_net_get_fw_version(&fw_ver, ctrl_bar); - if (fw_ver.extend & NFP_NET_CFG_VERSION_RESERVED_MASK || - fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) { - nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n", - fw_ver.extend, fw_ver.class, - fw_ver.major, fw_ver.minor); - err = -EINVAL; - goto err_unmap; - } - - /* Determine stride */ - if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) { - stride = 2; - nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n"); - } else { - switch (fw_ver.major) { - case 1 ... 5: - stride = 4; - break; - default: - nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n", - fw_ver.extend, fw_ver.class, - fw_ver.major, fw_ver.minor); - err = -EINVAL; - goto err_unmap; - } - } - err = nfp_net_pf_app_init(pf, qc_bar, stride); if (err) goto err_unmap; -- Gitee From 15dc4d0143d46c7e684532196e2eace6d151864a Mon Sep 17 00:00:00 2001 From: Tianyu Yuan Date: Fri, 3 Mar 2023 10:31:05 +0800 Subject: [PATCH 0466/2138] anolis: nfp: configure VF total count for each PF MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8563 By default, PFs share the total 64 VFs equally, i.e., 32 VFs for each PF in two port NIC, which is initialized in each PF’s SR-IOV capability register by management firmware. And a new hwinfo `abi_total_vf` is introduced to make each PF’s VF total count configurable. Management firmware reads the hwinfo and configures it in SR-IOV capability register during boot process. So reboot is required to make the configuration take effect. This is not touched in driver code. Driver then modifies each PF’s `sriov_totalvf` according to maximum VF count supported by the loaded application firmware. Here we apply the rule that the PF with smaller id is satisfied first if total configured count exceeds the limitation. Signed-off-by: Tianyu Yuan Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 49 +++++++++++++++++-- 1 file changed, 45 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 8174a07b3cf7..57432d5d1d00 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -224,11 +224,48 @@ static int nfp_pf_board_state_wait(struct nfp_pf *pf) return 0; } +static unsigned int nfp_pf_get_limit_vfs(struct nfp_pf *pf, + unsigned int limit_vfs_rtsym) +{ + u16 pos, offset, total; + + if (!pf->multi_pf.en || !limit_vfs_rtsym) + return limit_vfs_rtsym; + + pos = pci_find_ext_capability(pf->pdev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return 0; + + /* Management firmware ensures that SR-IOV capability registers + * are initialized correctly. + */ + pci_read_config_word(pf->pdev, pos + PCI_SRIOV_VF_OFFSET, &offset); + pci_read_config_word(pf->pdev, pos + PCI_SRIOV_TOTAL_VF, &total); + if (!total) + return 0; + + /* Offset of first VF is relative to its PF. */ + offset += pf->multi_pf.id; + if (offset < pf->dev_info->pf_num_per_unit) + return 0; + + /* For 3800, VF is numbered from max PF count. */ + offset -= pf->dev_info->pf_num_per_unit; + if (offset >= limit_vfs_rtsym) + return 0; + + if (offset + total > limit_vfs_rtsym) + return limit_vfs_rtsym - offset; + + return total; +} + static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf) { + unsigned int limit_vfs_rtsym; int err; - pf->limit_vfs = nfp_rtsym_read_le(pf->rtbl, "nfd_vf_cfg_max_vfs", &err); + limit_vfs_rtsym = nfp_rtsym_read_le(pf->rtbl, "nfd_vf_cfg_max_vfs", &err); if (err) { /* For backwards compatibility if symbol not found allow all */ pf->limit_vfs = ~0; @@ -239,9 +276,13 @@ static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf) return err; } - err = pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs); - if (err) - nfp_warn(pf->cpp, "Failed to set VF count in sysfs: %d\n", err); + pf->limit_vfs = nfp_pf_get_limit_vfs(pf, limit_vfs_rtsym); + if (pci_sriov_get_totalvfs(pf->pdev) != pf->limit_vfs) { + err = pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs); + if (err) + nfp_warn(pf->cpp, "Failed to set VF count in sysfs: %d\n", err); + } + return 0; } -- Gitee From b606f2d6699da9ab291dde989f7a71b1d0c61408 Mon Sep 17 00:00:00 2001 From: Yinjun Zhang Date: Thu, 30 Mar 2023 10:24:21 +0800 Subject: [PATCH 0467/2138] anolis: nfp: configure VF split info into application firmware ANBZ: #8563 In multi-PF case, all PFs share total 64 VFs. To support the VF count of each PF configurable, driver needs to write the VF count and the first VF id into application firmware, so that firmware can initialize and allocate relevant resource accordingly. Signed-off-by: Yinjun Zhang Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 1 + drivers/net/ethernet/netronome/nfp/nfp_main.h | 2 ++ .../net/ethernet/netronome/nfp/nfp_net_main.c | 16 ++++++++++++ .../ethernet/netronome/nfp/nfp_net_sriov.c | 25 +++++++++++++++++++ .../ethernet/netronome/nfp/nfp_net_sriov.h | 5 ++++ 5 files changed, 49 insertions(+) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 57432d5d1d00..afb210943b78 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -254,6 +254,7 @@ static unsigned int nfp_pf_get_limit_vfs(struct nfp_pf *pf, if (offset >= limit_vfs_rtsym) return 0; + pf->multi_pf.vf_fid = offset; if (offset + total > limit_vfs_rtsym) return limit_vfs_rtsym - offset; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index 66bc1f48fee0..750dfaf4ca82 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -87,6 +87,7 @@ struct nfp_dumpspec { * @multi_pf: Used in multi-PF setup * @multi_pf.en: Is multi-PF setup? * @multi_pf.id: PF index + * @multi_pf.vf_fid: Id of first VF that belongs to this PF * @multi_pf.beat_timer:Timer for beat to keepalive * @multi_pf.beat_area: Pointer to CPP area for beat to keepalive * @multi_pf.beat_addr: Pointer to mapped beat address used for keepalive @@ -151,6 +152,7 @@ struct nfp_pf { struct { bool en; u8 id; + u8 vf_fid; struct timer_list beat_timer; struct nfp_cpp_area *beat_area; u8 __iomem *beat_addr; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index c06e1e9c9412..4d4af60400af 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -293,6 +293,16 @@ static int nfp_net_pf_init_vnics(struct nfp_pf *pf) return err; } +static void nfp_net_pf_clean_vnics(struct nfp_pf *pf) +{ + struct nfp_net *nn; + + list_for_each_entry(nn, &pf->vnics, vnic_list) { + if (nfp_net_is_data_vnic(nn)) + nfp_net_pf_clean_vnic(pf, nn); + } +} + static int nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride) { @@ -843,11 +853,17 @@ int nfp_net_pci_probe(struct nfp_pf *pf) if (err) goto err_stop_app; + err = nfp_net_pf_init_sriov(pf); + if (err) + goto err_clean_vnics; + devl_unlock(devlink); devlink_register(devlink); return 0; +err_clean_vnics: + nfp_net_pf_clean_vnics(pf); err_stop_app: nfp_net_pf_app_stop(pf); err_free_irqs: diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c index 6eeeb0fda91f..f516ba7a429e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c @@ -14,6 +14,9 @@ #include "nfp_net.h" #include "nfp_net_sriov.h" +/* The configurations that precede VF creating. */ +#define NFP_NET_VF_PRE_CONFIG NFP_NET_VF_CFG_MB_CAP_SPLIT + static int nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg, bool warn) { @@ -29,6 +32,10 @@ nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg, bool return -EOPNOTSUPP; } + /* No need to check vf for the pre-configurations. */ + if (cap & NFP_NET_VF_PRE_CONFIG) + return 0; + if (vf < 0 || vf >= app->pf->num_vfs) { if (warn) nfp_warn(app->pf->cpp, "invalid VF id %d\n", vf); @@ -309,3 +316,21 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf, return 0; } + +int nfp_net_pf_init_sriov(struct nfp_pf *pf) +{ + int err; + + if (!pf->multi_pf.en || !pf->limit_vfs) + return 0; + + err = nfp_net_sriov_check(pf->app, 0, NFP_NET_VF_CFG_MB_CAP_SPLIT, "split", true); + if (err) + return err; + + writeb(pf->limit_vfs, pf->vfcfg_tbl2 + NFP_NET_VF_CFG_MB_VF_CNT); + + /* Reuse NFP_NET_VF_CFG_MB_VF_NUM to pass vf_fid to FW. */ + return nfp_net_sriov_update(pf->app, pf->multi_pf.vf_fid, + NFP_NET_VF_CFG_MB_UPD_SPLIT, "split"); +} diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h index 2d445fa199dc..8de959018819 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h @@ -21,6 +21,7 @@ #define NFP_NET_VF_CFG_MB_CAP_TRUST (0x1 << 4) #define NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO (0x1 << 5) #define NFP_NET_VF_CFG_MB_CAP_RATE (0x1 << 6) +#define NFP_NET_VF_CFG_MB_CAP_SPLIT (0x1 << 8) #define NFP_NET_VF_CFG_MB_RET 0x2 #define NFP_NET_VF_CFG_MB_UPD 0x4 #define NFP_NET_VF_CFG_MB_UPD_MAC (0x1 << 0) @@ -30,6 +31,8 @@ #define NFP_NET_VF_CFG_MB_UPD_TRUST (0x1 << 4) #define NFP_NET_VF_CFG_MB_UPD_VLAN_PROTO (0x1 << 5) #define NFP_NET_VF_CFG_MB_UPD_RATE (0x1 << 6) +#define NFP_NET_VF_CFG_MB_UPD_SPLIT (0x1 << 8) +#define NFP_NET_VF_CFG_MB_VF_CNT 0x6 #define NFP_NET_VF_CFG_MB_VF_NUM 0x7 /* VF config entry @@ -68,4 +71,6 @@ int nfp_app_set_vf_link_state(struct net_device *netdev, int vf, int nfp_app_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi); +int nfp_net_pf_init_sriov(struct nfp_pf *pf); + #endif /* _NFP_NET_SRIOV_H_ */ -- Gitee From 6016016d00e528508613a64df0957e4bb9403d41 Mon Sep 17 00:00:00 2001 From: Yinjun Zhang Date: Tue, 4 Apr 2023 10:49:03 +0800 Subject: [PATCH 0468/2138] anolis: nfp: use absolute vf id for multi-PF case ANBZ: #8563 In multi-PF setup, absolute VF id is required to configure attributes for corresponding VF. Add helper function to map rtsym with specified offset. With PF's first VF as base offset, we can access `vf_cfg_mem` as before. Signed-off-by: Yinjun Zhang Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 14 +++++++++++--- drivers/net/ethernet/netronome/nfp/nfp_main.h | 4 ++++ .../net/ethernet/netronome/nfp/nfp_net_main.c | 10 ++++++---- .../net/ethernet/netronome/nfp/nfp_net_sriov.c | 14 ++++++++++---- .../ethernet/netronome/nfp/nfpcore/nfp_nffw.h | 4 ++++ .../ethernet/netronome/nfp/nfpcore/nfp_rtsym.c | 16 ++++++++++++---- 6 files changed, 47 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index afb210943b78..21c1d4877a81 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -97,14 +97,22 @@ int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format, } u8 __iomem * -nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, - unsigned int min_size, struct nfp_cpp_area **area) +nfp_pf_map_rtsym_offset(struct nfp_pf *pf, const char *name, const char *sym_fmt, + unsigned int offset, unsigned int min_size, + struct nfp_cpp_area **area) { char pf_symbol[256]; snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt, nfp_get_pf_id(pf)); - return nfp_rtsym_map(pf->rtbl, pf_symbol, name, min_size, area); + return nfp_rtsym_map_offset(pf->rtbl, pf_symbol, name, offset, min_size, area); +} + +u8 __iomem * +nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, + unsigned int min_size, struct nfp_cpp_area **area) +{ + return nfp_pf_map_rtsym_offset(pf, name, sym_fmt, 0, min_size, area); } /* Callers should hold the devlink instance lock */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index 750dfaf4ca82..5a01c66ddce9 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -179,6 +179,10 @@ int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format, unsigned int default_val); int nfp_net_pf_get_app_id(struct nfp_pf *pf); u8 __iomem * +nfp_pf_map_rtsym_offset(struct nfp_pf *pf, const char *name, const char *sym_fmt, + unsigned int offset, unsigned int min_size, + struct nfp_cpp_area **area); +u8 __iomem * nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, unsigned int min_size, struct nfp_cpp_area **area); int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index 4d4af60400af..5df99c60c3b2 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -473,9 +473,10 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf) } } - pf->vf_cfg_mem = nfp_pf_map_rtsym(pf, "net.vfcfg", "_pf%d_net_vf_bar", - NFP_NET_CFG_BAR_SZ * pf->limit_vfs, - &pf->vf_cfg_bar); + pf->vf_cfg_mem = nfp_pf_map_rtsym_offset(pf, "net.vfcfg", "_pf%d_net_vf_bar", + NFP_NET_CFG_BAR_SZ * pf->multi_pf.vf_fid, + NFP_NET_CFG_BAR_SZ * pf->limit_vfs, + &pf->vf_cfg_bar); if (IS_ERR(pf->vf_cfg_mem)) { if (PTR_ERR(pf->vf_cfg_mem) != -ENOENT) { err = PTR_ERR(pf->vf_cfg_mem); @@ -484,7 +485,8 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf) pf->vf_cfg_mem = NULL; } - min_size = NFP_NET_VF_CFG_SZ * pf->limit_vfs + NFP_NET_VF_CFG_MB_SZ; + min_size = NFP_NET_VF_CFG_SZ * (pf->limit_vfs + pf->multi_pf.vf_fid) + + NFP_NET_VF_CFG_MB_SZ; pf->vfcfg_tbl2 = nfp_pf_map_rtsym(pf, "net.vfcfg_tbl2", "_pf%d_net_vf_cfg2", min_size, &pf->vfcfg_tbl2_area); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c index f516ba7a429e..67aea9445aa2 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c @@ -72,7 +72,7 @@ int nfp_app_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) { struct nfp_app *app = nfp_app_from_netdev(netdev); unsigned int vf_offset; - int err; + int err, abs_vf; err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_MAC, "mac", true); if (err) @@ -85,13 +85,14 @@ int nfp_app_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) return -EINVAL; } + abs_vf = vf + app->pf->multi_pf.vf_fid; /* Write MAC to VF entry in VF config symbol */ - vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ; + vf_offset = NFP_NET_VF_CFG_MB_SZ + abs_vf * NFP_NET_VF_CFG_SZ; writel(get_unaligned_be32(mac), app->pf->vfcfg_tbl2 + vf_offset); writew(get_unaligned_be16(mac + 4), app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_MAC_LO); - err = nfp_net_sriov_update(app, vf, NFP_NET_VF_CFG_MB_UPD_MAC, "MAC"); + err = nfp_net_sriov_update(app, abs_vf, NFP_NET_VF_CFG_MB_UPD_MAC, "MAC"); if (!err) nfp_info(app->pf->cpp, "MAC %pM set on VF %d, reload the VF driver to make this change effective.\n", @@ -145,6 +146,7 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, if (vlan_tag && is_proto_sup) vlan_tag |= FIELD_PREP(NFP_NET_VF_CFG_VLAN_PROT, ntohs(vlan_proto)); + vf += app->pf->multi_pf.vf_fid; vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ; writel(vlan_tag, app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_VLAN); @@ -169,6 +171,7 @@ int nfp_app_set_vf_rate(struct net_device *netdev, int vf, return -EINVAL; } + vf += app->pf->multi_pf.vf_fid; vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ; ratevalue = FIELD_PREP(NFP_NET_VF_CFG_MAX_RATE, max_tx_rate ? max_tx_rate : @@ -195,6 +198,7 @@ int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable) return err; /* Write spoof check control bit to VF entry in VF config symbol */ + vf += app->pf->multi_pf.vf_fid; vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ + NFP_NET_VF_CFG_CTRL; vf_ctrl = readb(app->pf->vfcfg_tbl2 + vf_offset); @@ -219,6 +223,7 @@ int nfp_app_set_vf_trust(struct net_device *netdev, int vf, bool enable) return err; /* Write trust control bit to VF entry in VF config symbol */ + vf += app->pf->multi_pf.vf_fid; vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ + NFP_NET_VF_CFG_CTRL; vf_ctrl = readb(app->pf->vfcfg_tbl2 + vf_offset); @@ -253,6 +258,7 @@ int nfp_app_set_vf_link_state(struct net_device *netdev, int vf, } /* Write link state to VF entry in VF config symbol */ + vf += app->pf->multi_pf.vf_fid; vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ + NFP_NET_VF_CFG_CTRL; vf_ctrl = readb(app->pf->vfcfg_tbl2 + vf_offset); @@ -278,7 +284,7 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf, if (err) return err; - vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ; + vf_offset = NFP_NET_VF_CFG_MB_SZ + (vf + app->pf->multi_pf.vf_fid) * NFP_NET_VF_CFG_SZ; mac_hi = readl(app->pf->vfcfg_tbl2 + vf_offset); mac_lo = readw(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_MAC_LO); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h index 49a4d3f56b56..4042352f83b0 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h @@ -101,6 +101,10 @@ u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, int nfp_rtsym_write_le(struct nfp_rtsym_table *rtbl, const char *name, u64 value); u8 __iomem * +nfp_rtsym_map_offset(struct nfp_rtsym_table *rtbl, const char *name, const char *id, + unsigned int offset, unsigned int min_size, + struct nfp_cpp_area **area); +u8 __iomem * nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id, unsigned int min_size, struct nfp_cpp_area **area); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c index 2260c2403a83..97a4417a1c1b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c @@ -520,8 +520,9 @@ int nfp_rtsym_write_le(struct nfp_rtsym_table *rtbl, const char *name, } u8 __iomem * -nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id, - unsigned int min_size, struct nfp_cpp_area **area) +nfp_rtsym_map_offset(struct nfp_rtsym_table *rtbl, const char *name, const char *id, + unsigned int offset, unsigned int min_size, + struct nfp_cpp_area **area) { const struct nfp_rtsym *sym; u8 __iomem *mem; @@ -540,12 +541,12 @@ nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id, return (u8 __iomem *)ERR_PTR(err); } - if (sym->size < min_size) { + if (sym->size < min_size + offset) { nfp_err(rtbl->cpp, "rtsym '%s': too small\n", name); return (u8 __iomem *)ERR_PTR(-EINVAL); } - mem = nfp_cpp_map_area(rtbl->cpp, id, cpp_id, addr, sym->size, area); + mem = nfp_cpp_map_area(rtbl->cpp, id, cpp_id, addr + offset, sym->size - offset, area); if (IS_ERR(mem)) { nfp_err(rtbl->cpp, "rtysm '%s': failed to map: %ld\n", name, PTR_ERR(mem)); @@ -554,3 +555,10 @@ nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id, return mem; } + +u8 __iomem * +nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id, + unsigned int min_size, struct nfp_cpp_area **area) +{ + return nfp_rtsym_map_offset(rtbl, name, id, 0, min_size, area); +} -- Gitee From 2a5a555b58af5dc3501a0880553ce8124257e3a0 Mon Sep 17 00:00:00 2001 From: Yinjun Zhang Date: Tue, 19 Sep 2023 19:27:06 +0800 Subject: [PATCH 0469/2138] anolis: nfp: refine firmware loading and keepalive mechanism ANBZ: #8563 Currently we skip application firmware loading either because other PFs are running or the firmware is preloaded. But sometimes the firmware is not preloaded intentionally but is remained unexpectedly, in which case we need a chance to reload firmware. Now we only skip firmware loading when there're other PFs in running. And the firmware loading flow of multi-PF setup is more consistent with that of single-PF setup now. Keepalive magic number is set when PFs are removed if firmware needs to be unloaded. Signed-off-by: Yinjun Zhang Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 104 +++++++++--------- 1 file changed, 53 insertions(+), 51 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 21c1d4877a81..e1ddc6e667fa 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -525,20 +525,24 @@ nfp_get_fw_policy_value(struct pci_dev *pdev, struct nfp_nsp *nsp, return err; } -static void -nfp_nsp_beat_timer(struct timer_list *t) +static u8 __iomem * +nfp_get_beat_addr(struct nfp_pf *pf, int pf_id) { - struct nfp_pf *pf = from_timer(pf, t, multi_pf.beat_timer); - u8 __iomem *addr; - /* Each PF has corresponding qword to beat: * offset | usage * 0 | magic number * 8 | beat qword of pf0 * 16 | beat qword of pf1 */ - addr = pf->multi_pf.beat_addr + ((pf->multi_pf.id + 1) << 3); - writeq(jiffies, addr); + return pf->multi_pf.beat_addr + ((pf_id + 1) << 3); +} + +static void +nfp_nsp_beat_timer(struct timer_list *t) +{ + struct nfp_pf *pf = from_timer(pf, t, multi_pf.beat_timer); + + writeq(jiffies, nfp_get_beat_addr(pf, pf->multi_pf.id)); /* Beat once per second. */ mod_timer(&pf->multi_pf.beat_timer, jiffies + HZ); } @@ -595,28 +599,42 @@ nfp_nsp_keepalive_stop(struct nfp_pf *pf) } } +static u64 +nfp_get_sibling_beat(struct nfp_pf *pf) +{ + unsigned int i = 0; + u64 beat = 0; + + if (!pf->multi_pf.beat_addr) + return 0; + + for (; i < pf->dev_info->pf_num_per_unit; i++) { + if (i == pf->multi_pf.id) + continue; + + beat += readq(nfp_get_beat_addr(pf, i)); + } + + return beat; +} + static bool nfp_skip_fw_load(struct nfp_pf *pf, struct nfp_nsp *nsp) { - const struct nfp_mip *mip; + unsigned long timeout = jiffies + HZ * 3; + u64 beat = nfp_get_sibling_beat(pf); if (!pf->multi_pf.en || nfp_nsp_fw_loaded(nsp) <= 0) return false; - mip = nfp_mip_open(pf->cpp); - if (!mip) - return false; + while (time_is_after_jiffies(timeout)) { + if (beat != nfp_get_sibling_beat(pf)) + return true; - /* For the case that system boots from pxe, we need - * reload FW if pxe FW is running. - */ - if (!strncmp(nfp_mip_name(mip), "pxe", 3)) { - nfp_mip_close(mip); - return false; + msleep(500); } - pf->mip = mip; - return true; + return false; } /** @@ -630,7 +648,7 @@ nfp_skip_fw_load(struct nfp_pf *pf, struct nfp_nsp *nsp) static int nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) { - bool do_reset, fw_loaded = false, fw_new = false; + bool do_reset, fw_loaded = false; const struct firmware *fw = NULL; int err, reset, policy, ifcs = 0; char *token, *ptr; @@ -711,30 +729,17 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) } dev_info(&pdev->dev, "Finished loading FW image\n"); fw_loaded = true; - fw_new = true; } else if (policy != NFP_NSP_APP_FW_LOAD_DISK && nfp_nsp_has_stored_fw_load(nsp)) { - err = nfp_nsp_load_stored_fw(nsp); - - /* Same logic with loading from disk when multi-PF. Othewise: - * - * Don't propagate this error to stick with legacy driver + /* Don't propagate this error to stick with legacy driver * behavior, failure will be detected later during init. - * - * Don't flag the fw_loaded in this case since other devices - * may reuse the firmware when configured this way. */ - if (!err) { + if (!nfp_nsp_load_stored_fw(nsp)) dev_info(&pdev->dev, "Finished loading stored FW image\n"); - if (pf->multi_pf.en) - fw_loaded = true; - } else { - if (pf->multi_pf.en) - dev_err(&pdev->dev, "Stored FW loading failed: %d\n", err); - else - err = 0; - } + /* Don't flag the fw_loaded in this case since other devices + * may reuse the firmware when configured this way + */ } else { dev_warn(&pdev->dev, "Didn't load firmware, please update flash or reconfigure card\n"); } @@ -748,17 +753,9 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) */ if (err < 0) nfp_nsp_keepalive_stop(pf); - else if (fw_loaded && ifcs == 1 && !pf->multi_pf.en) + else if (fw_loaded && ifcs == 1) pf->unload_fw_on_remove = true; - /* Only setting magic number when fw is freshly loaded here. NSP - * won't unload fw when heartbeat stops if the magic number is not - * correct. It's used when firmware is preloaded and shouldn't be - * unloaded when driver exits. - */ - if (fw_new && pf->multi_pf.en) - writeq(NFP_KEEPALIVE_MAGIC, pf->multi_pf.beat_addr); - return err < 0 ? err : fw_loaded; } @@ -840,6 +837,12 @@ static void nfp_fw_unload(struct nfp_pf *pf) struct nfp_nsp *nsp; int err; + if (pf->multi_pf.en && pf->multi_pf.beat_addr) { + /* NSP will unload firmware when no active PF exists. */ + writeq(NFP_KEEPALIVE_MAGIC, pf->multi_pf.beat_addr); + return; + } + nsp = nfp_nsp_open(pf->cpp); if (IS_ERR(nsp)) { nfp_err(pf->cpp, "Reset failed, can't open NSP\n"); @@ -1011,8 +1014,7 @@ static int nfp_pci_probe(struct pci_dev *pdev, if (err) goto err_hwinfo_free; - if (!pf->mip) - pf->mip = nfp_mip_open(pf->cpp); + pf->mip = nfp_mip_open(pf->cpp); pf->rtbl = __nfp_rtsym_table_read(pf->cpp, pf->mip); err = nfp_pf_find_rtsyms(pf); @@ -1052,11 +1054,11 @@ static int nfp_pci_probe(struct pci_dev *pdev, err_net_remove: nfp_net_pci_remove(pf); err_fw_unload: - nfp_nsp_keepalive_stop(pf); kfree(pf->rtbl); nfp_mip_close(pf->mip); if (pf->unload_fw_on_remove) nfp_fw_unload(pf); + nfp_nsp_keepalive_stop(pf); kfree(pf->eth_tbl); kfree(pf->nspi); vfree(pf->dumpspec); @@ -1092,12 +1094,12 @@ static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw) nfp_net_pci_remove(pf); vfree(pf->dumpspec); - nfp_nsp_keepalive_stop(pf); kfree(pf->rtbl); nfp_mip_close(pf->mip); if (unload_fw && pf->unload_fw_on_remove) nfp_fw_unload(pf); + nfp_nsp_keepalive_stop(pf); destroy_workqueue(pf->wq); pci_set_drvdata(pdev, NULL); kfree(pf->hwinfo); -- Gitee From 3427cbf71eb1b68e263514d2c6ebd4f9ea5885ab Mon Sep 17 00:00:00 2001 From: Yinjun Zhang Date: Tue, 21 Nov 2023 15:43:34 +0800 Subject: [PATCH 0470/2138] anolis: nfp: grant the right of reclaiming resources to PF0 only ANBZ: #8563 In multi-PF setup, one PF still may bust the resource lock that is held by another. Since it's an error handling to bust lock and we only need a way here, no need to let every PF have the error handler. Now leave the right to the first PF only. Signed-off-by: Yinjun Zhang Reviewed-by: Louis Peens Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 17 ++++++++++------- .../ethernet/netronome/nfp/nfpcore/nfp_mutex.c | 15 +++++---------- 2 files changed, 15 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index e1ddc6e667fa..bd3f681b6d18 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -801,10 +801,6 @@ static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf) return err; } - pf->multi_pf.en = pdev->multifunction; - pf->multi_pf.id = PCI_FUNC(pdev->devfn); - dev_info(&pdev->dev, "%s-PF detected\n", pf->multi_pf.en ? "Multi" : "Single"); - err = nfp_nsp_wait(nsp); if (err < 0) goto exit_close_nsp; @@ -993,9 +989,16 @@ static int nfp_pci_probe(struct pci_dev *pdev, goto err_disable_msix; } - err = nfp_resource_table_init(pf->cpp); - if (err) - goto err_cpp_free; + pf->multi_pf.en = pdev->multifunction; + pf->multi_pf.id = PCI_FUNC(pdev->devfn); + dev_info(&pdev->dev, "%s-PF detected\n", pf->multi_pf.en ? "Multi" : "Single"); + + /* Only PF0 has the right to reclaim locked resources. */ + if (!pf->multi_pf.id) { + err = nfp_resource_table_init(pf->cpp); + if (err) + goto err_cpp_free; + } pf->hwinfo = nfp_hwinfo_read(pf->cpp); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c index 1fac6867922b..7bc17b94ac60 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c @@ -341,7 +341,6 @@ int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex) int nfp_cpp_mutex_reclaim(struct nfp_cpp *cpp, int target, unsigned long long address) { - unsigned long timeout = jiffies + 2 * HZ; const u32 mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */ const u32 muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */ u16 interface = nfp_cpp_interface(cpp); @@ -353,16 +352,12 @@ int nfp_cpp_mutex_reclaim(struct nfp_cpp *cpp, int target, return err; /* Check lock */ - while (time_is_after_jiffies(timeout)) { - err = nfp_cpp_readl(cpp, mur, address, &tmp); - if (err < 0) - return err; - - if (nfp_mutex_is_unlocked(tmp) || nfp_mutex_owner(tmp) != interface) - return 0; + err = nfp_cpp_readl(cpp, mur, address, &tmp); + if (err < 0) + return err; - msleep_interruptible(10); - } + if (nfp_mutex_is_unlocked(tmp) || nfp_mutex_owner(tmp) != interface) + return 0; /* Bust the lock */ err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_unlocked(interface)); -- Gitee From 1640cb440cd08c7837071235a9cf05615ffbf6c2 Mon Sep 17 00:00:00 2001 From: Louis Peens Date: Wed, 30 Aug 2023 16:35:08 +0200 Subject: [PATCH 0471/2138] anolis: nfp: add pci_error_handler callback ANBZ: #8563 Add callbacks to catch FLR prepare and done. Stop the heartbeat timer before the FLR to make sure it can't trigger during. We do need to manually write keepalive value just before this to make sure the firmware is kept alive, otherwise firmware maybe unloaded during frequent FLR for both PFs. Resume the timer when the FLR is done. Signed-off-by: Louis Peens Signed-off-by: Baowen Zheng Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index bd3f681b6d18..9788f7958d92 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -1125,12 +1125,43 @@ static void nfp_pci_shutdown(struct pci_dev *pdev) __nfp_pci_shutdown(pdev, false); } +void nfp_pci_error_reset_prepare(struct pci_dev *dev) +{ + struct nfp_pf *pf = pci_get_drvdata(dev); + + if (pf) { + if (pf->multi_pf.en && pf->multi_pf.beat_addr) { + /* Pause heartbeat timer so it can't happen during FLR */ + del_timer_sync(&pf->multi_pf.beat_timer); + /* We need to write keepalive to keep firmware alive + * during frequent FLR. + */ + writeq(jiffies, nfp_get_beat_addr(pf, pf->multi_pf.id)); + } + } +} + +void nfp_pci_error_reset_done(struct pci_dev *dev) +{ + struct nfp_pf *pf = pci_get_drvdata(dev); + + if (pf) + if (pf->multi_pf.en && pf->multi_pf.beat_addr) + add_timer(&pf->multi_pf.beat_timer); +} + +static const struct pci_error_handlers nfp_pci_err_handler = { + .reset_prepare = nfp_pci_error_reset_prepare, + .reset_done = nfp_pci_error_reset_done, +}; + static struct pci_driver nfp_pci_driver = { .name = nfp_driver_name, .id_table = nfp_pci_device_ids, .probe = nfp_pci_probe, .remove = nfp_pci_remove, .shutdown = nfp_pci_shutdown, + .err_handler = &nfp_pci_err_handler, .sriov_configure = nfp_pcie_sriov_configure, }; -- Gitee From 66b2dd1507a7e470a354f28b62dd6e4c931708ec Mon Sep 17 00:00:00 2001 From: Baowen Zheng Date: Fri, 1 Sep 2023 08:23:54 +0200 Subject: [PATCH 0472/2138] anolis: nfp: reset netdev state on FLR event. ANBZ: #8563 We need to bring down netdev when the pf is in FLR progress to prevent driver and firmware access chip memory, in opposite, bring up netdev when FLR is finished. Signed-off-by: Baowen Zheng Signed-off-by: Louis Peens Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 24 ++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 9788f7958d92..bedf4af5833d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -1130,6 +1130,8 @@ void nfp_pci_error_reset_prepare(struct pci_dev *dev) struct nfp_pf *pf = pci_get_drvdata(dev); if (pf) { + struct nfp_net *nn; + if (pf->multi_pf.en && pf->multi_pf.beat_addr) { /* Pause heartbeat timer so it can't happen during FLR */ del_timer_sync(&pf->multi_pf.beat_timer); @@ -1138,6 +1140,14 @@ void nfp_pci_error_reset_prepare(struct pci_dev *dev) */ writeq(jiffies, nfp_get_beat_addr(pf, pf->multi_pf.id)); } + + list_for_each_entry(nn, &pf->vnics, vnic_list) { + if (nn->dp.netdev && nn->dp.netdev->flags & IFF_UP) { + struct net_device *netdev = nn->dp.netdev; + + netdev->netdev_ops->ndo_stop(netdev); + } + } } } @@ -1145,9 +1155,21 @@ void nfp_pci_error_reset_done(struct pci_dev *dev) { struct nfp_pf *pf = pci_get_drvdata(dev); - if (pf) + if (pf) { + struct nfp_net *nn; + + list_for_each_entry(nn, &pf->vnics, vnic_list) { + if (nn->dp.netdev && nn->dp.netdev->flags & IFF_UP) { + struct net_device *netdev = nn->dp.netdev; + + rtnl_lock(); + netdev->netdev_ops->ndo_open(netdev); + rtnl_unlock(); + } + } if (pf->multi_pf.en && pf->multi_pf.beat_addr) add_timer(&pf->multi_pf.beat_timer); + } } static const struct pci_error_handlers nfp_pci_err_handler = { -- Gitee From 8057e68ce5df353b8a3ec1526f7da8f1ff146d4d Mon Sep 17 00:00:00 2001 From: Ryno Swart Date: Thu, 23 Nov 2023 16:40:02 +0200 Subject: [PATCH 0473/2138] anolis: nfp: preserve multi-pf control bit during initialisation ANBZ: #8563 Preserve the multi-PF control bit if the functionality is available. Multi-PF mode is configured before this, during pre-init. The old behaviour would reset the control bit to zero regardless of which mode is active. This had no effect on the card, as the firmware rejects all requests to return to single-PF mode. This change only preserves the control bit in the config BAR for verification. Signed-off-by: Ryno Swart Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfp_net_common.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index fceb4abea236..998761bf56af 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -2704,6 +2704,11 @@ int nfp_net_init(struct nfp_net *nn) if (nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER) nn->dp.ctrl_w1 |= NFP_NET_CFG_CTRL_MCAST_FILTER; + /* Multi-PF is already enabled during pre-init, preserve control bit */ + if (nn->cap_w1 & NFP_NET_CFG_CTRL_MULTI_PF) + nn->dp.ctrl_w1 |= (nn_readl(nn, NFP_NET_CFG_CTRL_WORD1) & + NFP_NET_CFG_CTRL_MULTI_PF); + /* Stash the re-configuration queue away. First odd queue in TX Bar */ nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ; -- Gitee From 88917aa212f6753f24989911d8ecb9fb1d9a727b Mon Sep 17 00:00:00 2001 From: Yinjun Zhang Date: Wed, 6 Mar 2024 17:28:31 +0800 Subject: [PATCH 0474/2138] anolis: nfp: fix initialization of incorrect PF id ANBZ: #8563 Using function id from PCI BDF as PF id is not reliable when PF is passed through to VM. Now we get PF id from vendor specific capability register which is filled by management firmware. Signed-off-by: Yinjun Zhang Signed-off-by: Louis Peens Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 22 ++++++++++++++----- .../netronome/nfp/nfpcore/nfp6000_pcie.c | 15 ++++++++----- .../netronome/nfp/nfpcore/nfp6000_pcie.h | 9 +++++++- 3 files changed, 34 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index bedf4af5833d..22000315a79f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -933,6 +933,18 @@ static void nfp_pf_cfg_hwinfo(struct nfp_pf *pf) nfp_nsp_close(nsp); } +static u8 nfp_init_pf_id(struct pci_dev *pdev) +{ + int vndr = pci_find_capability(pdev, PCI_CAP_ID_VNDR); + u8 id = 0; + + if (!vndr) + return PCI_FUNC(pdev->devfn); + + pci_read_config_byte(pdev, vndr + NFP_VNDR_PF_ID_OFFSET, &id); + return id; +} + static int nfp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) { @@ -983,16 +995,16 @@ static int nfp_pci_probe(struct pci_dev *pdev, goto err_pci_priv_unset; } - pf->cpp = nfp_cpp_from_nfp6000_pcie(pdev, dev_info); + pf->multi_pf.en = pdev->multifunction; + pf->multi_pf.id = nfp_init_pf_id(pdev); + dev_info(&pdev->dev, "%s-PF detected\n", pf->multi_pf.en ? "Multi" : "Single"); + + pf->cpp = nfp_cpp_from_nfp6000_pcie(pdev, dev_info, pf); if (IS_ERR(pf->cpp)) { err = PTR_ERR(pf->cpp); goto err_disable_msix; } - pf->multi_pf.en = pdev->multifunction; - pf->multi_pf.id = PCI_FUNC(pdev->devfn); - dev_info(&pdev->dev, "%s-PF detected\n", pf->multi_pf.en ? "Multi" : "Single"); - /* Only PF0 has the right to reclaim locked resources. */ if (!pf->multi_pf.id) { err = nfp_resource_table_init(pf->cpp); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index 3f10c5365c80..8e60e20c4fee 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -29,6 +29,7 @@ #include "nfp_cpp.h" #include "nfp_dev.h" +#include "../nfp_main.h" #include "nfp6000/nfp6000.h" @@ -532,7 +533,8 @@ static int bar_cmp(const void *aptr, const void *bptr) * BAR1.0-BAR1.7: -- * BAR2.0-BAR2.7: -- */ -static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) +static int enable_bars(struct nfp6000_pcie *nfp, u16 interface, + struct nfp_pf *pf) { const u32 barcfg_msix_general = NFP_PCIE_BAR_PCIE2CPP_MapType( @@ -611,7 +613,7 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) bar->iomem = ioremap(nfp_bar_resource_start(bar), nfp_bar_resource_len(bar)); if (bar->iomem) { - int pf; + int pf_id; msg += scnprintf(msg, end - msg, "0.0: General/MSI-X SRAM, "); atomic_inc(&bar->refcnt); @@ -624,8 +626,8 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) switch (nfp->pdev->device) { case PCI_DEVICE_ID_NFP3800: - pf = nfp->pdev->devfn & 7; - nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(pf); + pf_id = pf->multi_pf.id; + nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(pf_id); break; case PCI_DEVICE_ID_NFP4000: case PCI_DEVICE_ID_NFP5000: @@ -1309,7 +1311,8 @@ static const struct nfp_cpp_operations nfp6000_pcie_ops = { * Return: NFP CPP handle */ struct nfp_cpp * -nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_info) +nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_info, + struct nfp_pf *pf) { struct nfp6000_pcie *nfp; u16 interface; @@ -1353,7 +1356,7 @@ nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_i goto err_free_nfp; } - err = enable_bars(nfp, interface); + err = enable_bars(nfp, interface, pf); if (err) goto err_free_nfp; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h index 097660b673db..e992f5c91013 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h @@ -11,7 +11,14 @@ #include "nfp_cpp.h" +/* Vendor specific register layout */ +#define NFP_VNDR_HEADER_OFFSET 0x0 +#define NFP_VNDR_PF_ID_OFFSET 0x4 + +struct nfp_pf; + struct nfp_cpp * -nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_info); +nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_info, + struct nfp_pf *pf); #endif /* NFP6000_PCIE_H */ -- Gitee From 7cca2443f4ac56eb1983890cf8e5f76b747c9f8a Mon Sep 17 00:00:00 2001 From: Baowen Zheng Date: Sun, 25 Feb 2024 21:31:00 -0500 Subject: [PATCH 0475/2138] anolis: nfp: add device activate command for nsp service ANBZ: #8563 Add device activate command for nsp service in multiple pfs case. We need to activate device if the probing pf is not pf0 to make vfs belong to other pfs send traffic normally. When removing pcie device, we need to keep the device active if the pf is pf 0. Signed-off-by: Baowen Zheng Reviewed-by: Yinjun Zhang Signed-off-by: Louis Peens Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 19 ++++++++++++++++++- .../ethernet/netronome/nfp/nfpcore/nfp_nsp.c | 12 ++++++++++++ .../ethernet/netronome/nfp/nfpcore/nfp_nsp.h | 1 + 3 files changed, 31 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 22000315a79f..c74b314d035f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -805,6 +805,15 @@ static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf) if (err < 0) goto exit_close_nsp; + if (pf->multi_pf.en && pf->multi_pf.id) { + err = nfp_nsp_device_activate(nsp); + if (err < 0 && err != -EOPNOTSUPP) { + dev_err(&pdev->dev, + "Failed to activate the NFP device: %d\n", err); + goto exit_close_nsp; + } + } + nfp_nsp_init_ports(pdev, pf, nsp); pf->nspi = __nfp_nsp_identify(nsp); @@ -1096,12 +1105,14 @@ static int nfp_pci_probe(struct pci_dev *pdev, static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw) { + bool keep_device_active; struct nfp_pf *pf; pf = pci_get_drvdata(pdev); if (!pf) return; + keep_device_active = pf->multi_pf.en && !pf->multi_pf.id; nfp_hwmon_unregister(pf); nfp_pcie_sriov_disable(pdev); @@ -1124,7 +1135,13 @@ static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw) kfree(pf->nspi); devlink_free(priv_to_devlink(pf)); pci_release_regions(pdev); - pci_disable_device(pdev); + + /* In multiple pfs case, we need to keep master flag of pf 0 + * to ensure vfs of other pfs work normally because of + * hardware limitation. + */ + if (!keep_device_active) + pci_disable_device(pdev); } static void nfp_pci_remove(struct pci_dev *pdev) diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c index 56682c530b26..55d799d420aa 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -102,6 +102,7 @@ enum nfp_nsp_cmd { SPCODE_VERSIONS = 21, /* Report FW versions */ SPCODE_READ_SFF_EEPROM = 22, /* Read module EEPROM */ SPCODE_READ_MEDIA = 23, /* Get either the supported or advertised media for a port */ + SPCODE_DEV_ACTIVATE = 29, /* Activate hardware for multiple pfs case */ }; struct nfp_nsp_dma_buf { @@ -732,6 +733,17 @@ int nfp_nsp_device_soft_reset(struct nfp_nsp *state) return nfp_nsp_command(state, SPCODE_SOFT_RESET); } +int nfp_nsp_device_activate(struct nfp_nsp *state) +{ + /* Older ABI versions did support this feature, however this has only + * been reliable since ABI 38. + */ + if (nfp_nsp_get_abi_ver_minor(state) < 38) + return -EOPNOTSUPP; + + return nfp_nsp_command(state, SPCODE_DEV_ACTIVATE); +} + int nfp_nsp_mac_reinit(struct nfp_nsp *state) { return nfp_nsp_command(state, SPCODE_MAC_INIT); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h index 6e044ac04917..f34b996b0749 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h @@ -17,6 +17,7 @@ u16 nfp_nsp_get_abi_ver_major(struct nfp_nsp *state); u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state); int nfp_nsp_wait(struct nfp_nsp *state); int nfp_nsp_device_soft_reset(struct nfp_nsp *state); +int nfp_nsp_device_activate(struct nfp_nsp *state); int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw); int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw); int nfp_nsp_mac_reinit(struct nfp_nsp *state); -- Gitee From ee6a62a9a0ba2d7a624f814f0998dc6bd81a40f1 Mon Sep 17 00:00:00 2001 From: Yinjun Zhang Date: Tue, 20 Feb 2024 16:44:10 +0800 Subject: [PATCH 0476/2138] anolis: nfp: try firmware name of card type without media info ANBZ: #8563 Now all application firmware is indifferent of port speed, so do not bother to compose the firmware name with media info. This can reduce a number of symlinks for firmware files. For backward compatibility concern, the trial of firmware name with media info is still kept. Signed-off-by: Yinjun Zhang Signed-off-by: Louis Peens Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index c74b314d035f..ddd2335013c2 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -460,7 +460,7 @@ nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf) if (fw) return fw; - /* Finally try the card type and media */ + /* Then try the card type */ if (!pf->eth_tbl) { dev_err(&pdev->dev, "Error: can't identify media config\n"); return NULL; @@ -474,6 +474,12 @@ nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf) return NULL; } + sprintf(fw_name, "netronome/%s.nffw", fw_model); + fw = nfp_net_fw_request(pdev, pf, fw_name); + if (fw) + return fw; + + /* Finally try the card type and media */ spc = ARRAY_SIZE(fw_name); spc -= snprintf(fw_name, spc, "netronome/nic_%s", fw_model); -- Gitee From 4caaf16985c16b4053ad41f5bd42ea74e5e69b6e Mon Sep 17 00:00:00 2001 From: Yinjun Zhang Date: Tue, 20 Feb 2024 16:52:48 +0800 Subject: [PATCH 0477/2138] anolis: nfp: update module firmware list ANBZ: #8563 Update the module firmware list to accommodate some new NFP products. Signed-off-by: Yinjun Zhang Signed-off-by: Louis Peens Signed-off-by: Fei Qin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2963 --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index ddd2335013c2..0fae86d8abe0 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -1271,6 +1271,13 @@ MODULE_FIRMWARE("netronome/nic_AMDA0097-0001_8x10.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x10.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x25.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_1x10_1x25.nffw"); +MODULE_FIRMWARE("netronome/AMDA0161-1001.nffw"); +MODULE_FIRMWARE("netronome/AMDA2000-1103.nffw"); +MODULE_FIRMWARE("netronome/AMDA2000-1104.nffw"); +MODULE_FIRMWARE("netronome/AMDA2001-1103.nffw"); +MODULE_FIRMWARE("netronome/AMDA2001-1104.nffw"); +MODULE_FIRMWARE("netronome/AMDA2002-1113.nffw"); +MODULE_FIRMWARE("netronome/AMDA2002-1114.nffw"); MODULE_AUTHOR("Corigine, Inc. "); MODULE_LICENSE("GPL"); -- Gitee From 6d0dd20df1e757e16b1654e5a2a11c97df0ef35d Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Wed, 27 Dec 2023 21:05:04 +0800 Subject: [PATCH 0478/2138] anolis: x86/tsc: Make cur->adjusted values in package#1 to be the same ANBZ: #7809 When resume from S4 on Zhaoxin 2 packages platform that support X86_FEATURE_TSC_ADJUST, the following warning messages appear: [ 327.445302] [Firmware Bug]: TSC ADJUST differs: CPU15 45960750 --> 78394770. Restoring [ 329.209120] [Firmware Bug]: TSC ADJUST differs: CPU14 45960750 --> 78394770. Restoring [ 329.209128] [Firmware Bug]: TSC ADJUST differs: CPU13 45960750 --> 78394770. Restoring [ 329.209138] [Firmware Bug]: TSC ADJUST differs: CPU12 45960750 --> 78394770. Restoring [ 329.209151] [Firmware Bug]: TSC ADJUST differs: CPU11 45960750 --> 78394770. Restoring [ 329.209160] [Firmware Bug]: TSC ADJUST differs: CPU10 45960750 --> 78394770. Restoring [ 329.209169] [Firmware Bug]: TSC ADJUST differs: CPU9 45960750 --> 78394770. Restoring The reason is: Step 1: Bring up. TSC is sync after bring up with following settings: MSR 0x3b cur->adjusted Package#0 CPU 0-7 0 0 Package#1 first CPU value1 value1 Package#1 non-first CPU value1 value1 Step 2: Suspend to S4. Settings in Step 1 are not changed in this Step. Step 3: Bring up caused by S4 wake up event. TSC is sync when bring up with following settings: MSR 0x3b cur->adjusted Package#0 CPU 0-7 0 0 Package#1 first CPU value2 value2 Package#1 non-first CPU value2 value2 Step 4: Resume from S4. When resuming from S4, Current TSC synchronous mechanism cause following settings: MSR 0x3b cur->adjusted Package#0 CPU 0-7 0 0 Package#1 first CPU value2 value2 Package#1 non-first CPU value2 value1 In these Steps, value1 != 0 and value2 != value1. In Step4, as function tsc_store_and_check_tsc_adjust() do, when the value of MSR 0x3b on the non-first online CPU in package#1 is equal to the value of cur->adjusted on the first online CPU in the same package, the cur->adjusted value on this non-first online CPU will hold the old value1. This cause function tsc_verify_tsc_adjust() set the value of MSR 0x3b on the non-first online CPUs in the package#1 to the old value1 and print the beginning warning messages. Fix it by setting cur->adjusted value on the non-first online CPU in a package to the value of MSR 0x3b on the same CPU when they are not equal. Signed-off-by: leoliu-oc Reviewed-by: Xingrui Yi Reviewed-by: Guanjun Link: https://gitee.com/anolis/cloud-kernel/pulls/2697 --- arch/x86/kernel/tsc_sync.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index 4334033658ed..3998d1f681bc 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -230,6 +230,11 @@ bool tsc_store_and_check_tsc_adjust(bool bootcpu) if (bootval != ref->adjusted) { cur->adjusted = ref->adjusted; wrmsrl(MSR_IA32_TSC_ADJUST, ref->adjusted); + } else if (cur->adjusted != bootval) { + if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) { + cur->adjusted = bootval; + } } /* * We have the TSCs forced to be in sync on this package. Skip sync -- Gitee From 514b86eb6e68942c1ea0066b9e3bb14e06ad8e29 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Wed, 27 Dec 2023 21:04:56 +0800 Subject: [PATCH 0479/2138] anolis: x86/cpufeatures: Add low performance CRC32C instruction CPU feature ANBZ: #7809 SSE4.2 on Zhaoxin CPUs are compatible with Intel. The presence of CRC32C instruction is enumerated by CPUID.01H:ECX.SSE4_2[bit 20] = 1. Some Zhaoxin CPUs declare support SSE4.2 instruction sets but their CRC32C instruction are working with low performance. Add a synthetic CPU flag to indicates that the CRC32C instruction is not working as intended. This low performance CRC32C instruction flag is depend on X86_FEATURE_XMM4_2. Signed-off-by: leoliu-oc Reviewed-by: Tianjia Zhang Reviewed-by: Guanjun Link: https://gitee.com/anolis/cloud-kernel/pulls/2694 [Fixes conflicts: set X86_FEATURE_CRC32C_LOW_PERF = (12*32+27)] Signed-off-by: Qinyun Tan --- arch/x86/include/asm/cpufeatures.h | 1 + arch/x86/kernel/cpu/cpuid-deps.c | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index fab4aa1abaaf..42cef8da01b4 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -350,6 +350,7 @@ #define X86_FEATURE_AMX_FP16 (12*32+21) /* "" AMX fp16 Support */ #define X86_FEATURE_AVX_IFMA (12*32+23) /* "" Support for VPMADD52[H,L]UQ */ #define X86_FEATURE_LAM (12*32+26) /* Linear Address Masking */ +#define X86_FEATURE_CRC32C_LOW_PERF (12*32+27) /* "" Low performance */ /* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */ #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c index 6fb6d8a57cec..7d8733874218 100644 --- a/arch/x86/kernel/cpu/cpuid-deps.c +++ b/arch/x86/kernel/cpu/cpuid-deps.c @@ -82,6 +82,7 @@ static const struct cpuid_dep cpuid_deps[] = { { X86_FEATURE_XFD, X86_FEATURE_XGETBV1 }, { X86_FEATURE_AMX_TILE, X86_FEATURE_XFD }, { X86_FEATURE_SHSTK, X86_FEATURE_XSAVES }, + { X86_FEATURE_CRC32C_LOW_PERF, X86_FEATURE_XMM4_2 }, {} }; -- Gitee From a8e3d190940ead6bec7b8030d5fd469273b4a351 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Wed, 6 Mar 2024 16:15:30 +0800 Subject: [PATCH 0480/2138] anolis: x86/cpu: Set low performance CRC32C flag on some Zhaoxin CPUs ANBZ: #7809 Some Zhaoxin CPUs declare support SSE4.2 instruction sets but having a CRC32C instruction implementation that not working as intended. Set low performance CRC32C flag on these CPUs for later use. Signed-off-by: leoliu-oc Reviewed-by: Tianjia Zhang Reviewed-by: Guanjun Link: https://gitee.com/anolis/cloud-kernel/pulls/2694 --- arch/x86/kernel/cpu/centaur.c | 7 +++++++ arch/x86/kernel/cpu/zhaoxin.c | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index a5c01c8f8824..ad6982391bc9 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -110,6 +110,13 @@ static void early_init_centaur(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); } + /* + * These CPUs declare support SSE4.2 instruction sets but + * having low performance CRC32C instruction implementation. + */ + if (c->x86 == 0x6 || (c->x86 == 0x7 && c->x86_model <= 0x3b)) + set_cpu_cap(c, X86_FEATURE_CRC32C_LOW_PERF); + if (detect_extended_topology_early(c) < 0) detect_ht_early(c); } diff --git a/arch/x86/kernel/cpu/zhaoxin.c b/arch/x86/kernel/cpu/zhaoxin.c index 2126b10de796..f9a65b57a6bd 100644 --- a/arch/x86/kernel/cpu/zhaoxin.c +++ b/arch/x86/kernel/cpu/zhaoxin.c @@ -79,6 +79,13 @@ static void early_init_zhaoxin(struct cpuinfo_x86 *c) c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff); } + /* + * These CPUs declare support SSE4.2 instruction sets but + * having low performance CRC32C instruction implementation. + */ + if (c->x86 == 0x6 || (c->x86 == 0x7 && c->x86_model <= 0x3b)) + set_cpu_cap(c, X86_FEATURE_CRC32C_LOW_PERF); + if (detect_extended_topology_early(c) < 0) detect_ht_early(c); } -- Gitee From 012fdeed603d7ec57438c81f76ad9b1ce1d36756 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Wed, 27 Dec 2023 21:04:57 +0800 Subject: [PATCH 0481/2138] anolis: crypto: x86/crc32c-intel Exclude low performance CRC32C instruction CPUs ANBZ: #7809 Low performance CRC32C instruction CPUs expect to use the driver crc32c-generic. So remove these CPUs support from crc32c-intel. Signed-off-by: leoliu-oc Reviewed-by: Tianjia Zhang Reviewed-by: Guanjun Link: https://gitee.com/anolis/cloud-kernel/pulls/2694 --- arch/x86/crypto/crc32c-intel_glue.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c index feccb5254c7e..91d318b08fb7 100644 --- a/arch/x86/crypto/crc32c-intel_glue.c +++ b/arch/x86/crypto/crc32c-intel_glue.c @@ -224,6 +224,11 @@ static int __init crc32c_intel_mod_init(void) { if (!x86_match_cpu(crc32c_cpu_id)) return -ENODEV; + + /* Don't merit use low performance CRC32C instruction */ + if (boot_cpu_has(X86_FEATURE_CRC32C_LOW_PERF)) + return -ENODEV; + #ifdef CONFIG_X86_64 if (boot_cpu_has(X86_FEATURE_PCLMULQDQ)) { alg.update = crc32c_pcl_intel_update; -- Gitee From 1f5048df870f16dcd74bfb72c81ffffa7dc33bee Mon Sep 17 00:00:00 2001 From: Maciej Wieczor-Retman Date: Wed, 11 Oct 2023 08:48:42 +0200 Subject: [PATCH 0482/2138] x86/resctrl: Fix remaining kernel-doc warnings ANBZ: #8626 commit f05fd4ce99635975caa3e6a0eeb02118637f72a3 upstream. The kernel test robot reported kernel-doc warnings here: arch/x86/kernel/cpu/resctrl/rdtgroup.c:915: warning: Function parameter or member 'of' not described in 'rdt_bit_usage_show' arch/x86/kernel/cpu/resctrl/rdtgroup.c:915: warning: Function parameter or member 'seq' not described in 'rdt_bit_usage_show' arch/x86/kernel/cpu/resctrl/rdtgroup.c:915: warning: Function parameter or member 'v' not described in 'rdt_bit_usage_show' arch/x86/kernel/cpu/resctrl/rdtgroup.c:1144: warning: Function parameter or member 'type' not described in '__rdtgroup_cbm_overlaps' arch/x86/kernel/cpu/resctrl/rdtgroup.c:1224: warning: Function parameter or member 'rdtgrp' not described in 'rdtgroup_mode_test_exclusive' arch/x86/kernel/cpu/resctrl/rdtgroup.c:1261: warning: Function parameter or member 'of' not described in 'rdtgroup_mode_write' arch/x86/kernel/cpu/resctrl/rdtgroup.c:1261: warning: Function parameter or member 'buf' not described in 'rdtgroup_mode_write' arch/x86/kernel/cpu/resctrl/rdtgroup.c:1261: warning: Function parameter or member 'nbytes' not described in 'rdtgroup_mode_write' arch/x86/kernel/cpu/resctrl/rdtgroup.c:1261: warning: Function parameter or member 'off' not described in 'rdtgroup_mode_write' arch/x86/kernel/cpu/resctrl/rdtgroup.c:1370: warning: Function parameter or member 'of' not described in 'rdtgroup_size_show' arch/x86/kernel/cpu/resctrl/rdtgroup.c:1370: warning: Function parameter or member 's' not described in 'rdtgroup_size_show' arch/x86/kernel/cpu/resctrl/rdtgroup.c:1370: warning: Function parameter or member 'v' not described in 'rdtgroup_size_show' The first two functions are missing an argument description while the other three are file callbacks and don't require a kernel-doc comment. Closes: https://lore.kernel.org/oe-kbuild-all/202310070434.mD8eRNAz-lkp@intel.com/ Reported-by: kernel test robot Signed-off-by: Maciej Wieczor-Retman Signed-off-by: Ingo Molnar Cc: Peter Newman Cc: Borislav Petkov Cc: Reinette Chatre Link: https://lore.kernel.org/r/20231011064843.246592-1-maciej.wieczor-retman@intel.com Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 1c0f00cd212d..f4e25fbdb1ab 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -895,7 +895,7 @@ static int rdt_shareable_bits_show(struct kernfs_open_file *of, return 0; } -/** +/* * rdt_bit_usage_show - Display current usage of resources * * A domain is a shared resource that can now be allocated differently. Here @@ -1134,6 +1134,7 @@ static int rdt_has_sparse_bitmasks_show(struct kernfs_open_file *of, * @d: The domain instance for which @closid is being tested. * @cbm: Capacity bitmask being tested. * @closid: Intended closid for @cbm. + * @type: CDP type of @r. * @exclusive: Only check if overlaps with exclusive resource groups * * Checks if provided @cbm intended to be used for @closid on domain @@ -1220,6 +1221,7 @@ bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d, /** * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive + * @rdtgrp: Resource group identified through its closid. * * An exclusive resource group implies that there should be no sharing of * its allocated resources. At the time this group is considered to be @@ -1262,9 +1264,8 @@ static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp) return true; } -/** +/* * rdtgroup_mode_write - Modify the resource group's mode - * */ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) @@ -1368,12 +1369,11 @@ unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, return size; } -/** +/* * rdtgroup_size_show - Display size in bytes of allocated regions * * The "size" file mirrors the layout of the "schemata" file, printing the * size in bytes of each region instead of the capacity bitmask. - * */ static int rdtgroup_size_show(struct kernfs_open_file *of, struct seq_file *s, void *v) -- Gitee From 5548159951c34c9be0a2f964d28fd28f696f95c1 Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Mon, 16 Oct 2023 19:23:00 -0500 Subject: [PATCH 0483/2138] x86/resctrl: Add multiple tasks to the resctrl group at once MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8626 commit fe2a20ea0b0953189e57740debc7dcc789d1ea55 upstream. The resctrl task assignment for monitor or control group needs to be done one at a time. For example: $mount -t resctrl resctrl /sys/fs/resctrl/ $mkdir /sys/fs/resctrl/ctrl_grp1 $echo 123 > /sys/fs/resctrl/ctrl_grp1/tasks $echo 456 > /sys/fs/resctrl/ctrl_grp1/tasks $echo 789 > /sys/fs/resctrl/ctrl_grp1/tasks This is not user-friendly when dealing with hundreds of tasks. Support multiple task assignment in one command with tasks ids separated by commas. For example: $echo 123,456,789 > /sys/fs/resctrl/ctrl_grp1/tasks Signed-off-by: Babu Moger Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Peter Newman Reviewed-by: Tan Shaopeng Reviewed-by: Fenghua Yu Reviewed-by: Reinette Chatre Reviewed-by: Ilpo Järvinen Tested-by: Peter Newman Tested-by: Tan Shaopeng Link: https://lore.kernel.org/r/20231017002308.134480-2-babu.moger@amd.com Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- Documentation/arch/x86/resctrl.rst | 9 ++++++++- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 25 ++++++++++++++++++++++--- 2 files changed, 30 insertions(+), 4 deletions(-) diff --git a/Documentation/arch/x86/resctrl.rst b/Documentation/arch/x86/resctrl.rst index 4c6421e2aa31..178ab1d8f747 100644 --- a/Documentation/arch/x86/resctrl.rst +++ b/Documentation/arch/x86/resctrl.rst @@ -306,7 +306,14 @@ All groups contain the following files: "tasks": Reading this file shows the list of all tasks that belong to this group. Writing a task id to the file will add a task to the - group. If the group is a CTRL_MON group the task is removed from + group. Multiple tasks can be added by separating the task ids + with commas. Tasks will be assigned sequentially. Multiple + failures are not supported. A single failure encountered while + attempting to assign a task will cause the operation to abort and + already added tasks before the failure will remain in the group. + Failures will be logged to /sys/fs/resctrl/info/last_cmd_status. + + If the group is a CTRL_MON group the task is removed from whichever previous CTRL_MON group owned the task and also from any MON group that owned the task. If the group is a MON group, then the task must already belong to the CTRL_MON parent of this diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index f4e25fbdb1ab..161b63c86328 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -696,11 +696,10 @@ static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { struct rdtgroup *rdtgrp; + char *pid_str; int ret = 0; pid_t pid; - if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0) - return -EINVAL; rdtgrp = rdtgroup_kn_lock_live(of->kn); if (!rdtgrp) { rdtgroup_kn_unlock(of->kn); @@ -715,7 +714,27 @@ static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of, goto unlock; } - ret = rdtgroup_move_task(pid, rdtgrp, of); + while (buf && buf[0] != '\0' && buf[0] != '\n') { + pid_str = strim(strsep(&buf, ",")); + + if (kstrtoint(pid_str, 0, &pid)) { + rdt_last_cmd_printf("Task list parsing error pid %s\n", pid_str); + ret = -EINVAL; + break; + } + + if (pid < 0) { + rdt_last_cmd_printf("Invalid pid %d\n", pid); + ret = -EINVAL; + break; + } + + ret = rdtgroup_move_task(pid, rdtgrp, of); + if (ret) { + rdt_last_cmd_printf("Error while processing task %d\n", pid); + break; + } + } unlock: rdtgroup_kn_unlock(of->kn); -- Gitee From 4ea0b3917418c3c637f322ddb7fc541b60222b8b Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Tue, 3 Oct 2023 18:54:22 -0500 Subject: [PATCH 0484/2138] x86/resctrl: Simplify rftype flag definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8626 commit 6846dc1a31d1894a7acf52d8442fe73b34091022 upstream. The rftype flags are bitmaps used for adding files under the resctrl filesystem. Some of these bitmap defines have one extra level of indirection which is not necessary. Drop the RF_* defines and simplify the macros. [ bp: Massage commit message. ] Signed-off-by: Babu Moger Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Peter Newman Reviewed-by: Tan Shaopeng Reviewed-by: Fenghua Yu Reviewed-by: Reinette Chatre Reviewed-by: Ilpo Järvinen Tested-by: Peter Newman Tested-by: Tan Shaopeng Link: https://lore.kernel.org/r/20231017002308.134480-3-babu.moger@amd.com Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/internal.h | 9 +++------ arch/x86/kernel/cpu/resctrl/rdtgroup.c | 6 +++++- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index ca86a96e80c2..5959026075c9 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -242,12 +242,9 @@ struct rdtgroup { */ #define RFTYPE_INFO BIT(0) #define RFTYPE_BASE BIT(1) -#define RF_CTRLSHIFT 4 -#define RF_MONSHIFT 5 -#define RF_TOPSHIFT 6 -#define RFTYPE_CTRL BIT(RF_CTRLSHIFT) -#define RFTYPE_MON BIT(RF_MONSHIFT) -#define RFTYPE_TOP BIT(RF_TOPSHIFT) +#define RFTYPE_CTRL BIT(4) +#define RFTYPE_MON BIT(5) +#define RFTYPE_TOP BIT(6) #define RFTYPE_RES_CACHE BIT(8) #define RFTYPE_RES_MB BIT(9) #define RF_CTRL_INFO (RFTYPE_INFO | RFTYPE_CTRL) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 161b63c86328..784d37c90c23 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -3262,7 +3262,11 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, goto out_destroy; } - files = RFTYPE_BASE | BIT(RF_CTRLSHIFT + rtype); + if (rtype == RDTCTRL_GROUP) + files = RFTYPE_BASE | RFTYPE_CTRL; + else + files = RFTYPE_BASE | RFTYPE_MON; + ret = rdtgroup_add_files(kn, files); if (ret) { rdt_last_cmd_puts("kernfs fill error\n"); -- Gitee From 711da50debd17268463441d71989b1221d992757 Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Mon, 16 Oct 2023 19:23:02 -0500 Subject: [PATCH 0485/2138] x86/resctrl: Rename rftype flags for consistency MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8626 commit d41592435cde9a658a1bd3b3fdfeb8db7b330d78 upstream. resctrl associates rftype flags with its files so that files can be chosen based on the resource, whether it is info or base, and if it is control or monitor type file. These flags use the RF_ as well as RFTYPE_ prefixes. Change the prefix to RFTYPE_ for all these flags to be consistent. Signed-off-by: Babu Moger Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Peter Newman Reviewed-by: Tan Shaopeng Reviewed-by: Fenghua Yu Reviewed-by: Reinette Chatre Reviewed-by: Ilpo Järvinen Tested-by: Peter Newman Tested-by: Tan Shaopeng Link: https://lore.kernel.org/r/20231017002308.134480-4-babu.moger@amd.com Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/internal.h | 10 +++--- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 44 +++++++++++++------------- 2 files changed, 27 insertions(+), 27 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 5959026075c9..2c310fe7f1d6 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -247,10 +247,10 @@ struct rdtgroup { #define RFTYPE_TOP BIT(6) #define RFTYPE_RES_CACHE BIT(8) #define RFTYPE_RES_MB BIT(9) -#define RF_CTRL_INFO (RFTYPE_INFO | RFTYPE_CTRL) -#define RF_MON_INFO (RFTYPE_INFO | RFTYPE_MON) -#define RF_TOP_INFO (RFTYPE_INFO | RFTYPE_TOP) -#define RF_CTRL_BASE (RFTYPE_BASE | RFTYPE_CTRL) +#define RFTYPE_CTRL_INFO (RFTYPE_INFO | RFTYPE_CTRL) +#define RFTYPE_MON_INFO (RFTYPE_INFO | RFTYPE_MON) +#define RFTYPE_TOP_INFO (RFTYPE_INFO | RFTYPE_TOP) +#define RFTYPE_CTRL_BASE (RFTYPE_BASE | RFTYPE_CTRL) /* List of all resource groups */ extern struct list_head rdt_all_groups; @@ -266,7 +266,7 @@ void __exit rdtgroup_exit(void); * @mode: Access mode * @kf_ops: File operations * @flags: File specific RFTYPE_FLAGS_* flags - * @fflags: File specific RF_* or RFTYPE_* flags + * @fflags: File specific RFTYPE_* flags * @seq_show: Show content of the file * @write: Write to the file */ diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 784d37c90c23..933b1b13eb17 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -1718,77 +1718,77 @@ static struct rftype res_common_files[] = { .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_last_cmd_status_show, - .fflags = RF_TOP_INFO, + .fflags = RFTYPE_TOP_INFO, }, { .name = "num_closids", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_num_closids_show, - .fflags = RF_CTRL_INFO, + .fflags = RFTYPE_CTRL_INFO, }, { .name = "mon_features", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_mon_features_show, - .fflags = RF_MON_INFO, + .fflags = RFTYPE_MON_INFO, }, { .name = "num_rmids", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_num_rmids_show, - .fflags = RF_MON_INFO, + .fflags = RFTYPE_MON_INFO, }, { .name = "cbm_mask", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_default_ctrl_show, - .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, }, { .name = "min_cbm_bits", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_min_cbm_bits_show, - .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, }, { .name = "shareable_bits", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_shareable_bits_show, - .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, }, { .name = "bit_usage", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_bit_usage_show, - .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, }, { .name = "min_bandwidth", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_min_bw_show, - .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB, }, { .name = "bandwidth_gran", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_bw_gran_show, - .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB, }, { .name = "delay_linear", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_delay_linear_show, - .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB, }, /* * Platform specific which (if any) capabilities are provided by @@ -1807,7 +1807,7 @@ static struct rftype res_common_files[] = { .kf_ops = &rdtgroup_kf_single_ops, .write = max_threshold_occ_write, .seq_show = max_threshold_occ_show, - .fflags = RF_MON_INFO | RFTYPE_RES_CACHE, + .fflags = RFTYPE_MON_INFO | RFTYPE_RES_CACHE, }, { .name = "mbm_total_bytes_config", @@ -1854,7 +1854,7 @@ static struct rftype res_common_files[] = { .kf_ops = &rdtgroup_kf_single_ops, .write = rdtgroup_schemata_write, .seq_show = rdtgroup_schemata_show, - .fflags = RF_CTRL_BASE, + .fflags = RFTYPE_CTRL_BASE, }, { .name = "mode", @@ -1862,21 +1862,21 @@ static struct rftype res_common_files[] = { .kf_ops = &rdtgroup_kf_single_ops, .write = rdtgroup_mode_write, .seq_show = rdtgroup_mode_show, - .fflags = RF_CTRL_BASE, + .fflags = RFTYPE_CTRL_BASE, }, { .name = "size", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdtgroup_size_show, - .fflags = RF_CTRL_BASE, + .fflags = RFTYPE_CTRL_BASE, }, { .name = "sparse_masks", .mode = 0444, .kf_ops = &rdtgroup_kf_single_ops, .seq_show = rdt_has_sparse_bitmasks_show, - .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, }, }; @@ -1933,7 +1933,7 @@ void __init thread_throttle_mode_init(void) if (!rft) return; - rft->fflags = RF_CTRL_INFO | RFTYPE_RES_MB; + rft->fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB; } void __init mbm_config_rftype_init(const char *config) @@ -1942,7 +1942,7 @@ void __init mbm_config_rftype_init(const char *config) rft = rdtgroup_get_rftype_by_name(config); if (rft) - rft->fflags = RF_MON_INFO | RFTYPE_RES_CACHE; + rft->fflags = RFTYPE_MON_INFO | RFTYPE_RES_CACHE; } /** @@ -2077,21 +2077,21 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn) if (IS_ERR(kn_info)) return PTR_ERR(kn_info); - ret = rdtgroup_add_files(kn_info, RF_TOP_INFO); + ret = rdtgroup_add_files(kn_info, RFTYPE_TOP_INFO); if (ret) goto out_destroy; /* loop over enabled controls, these are all alloc_capable */ list_for_each_entry(s, &resctrl_schema_all, list) { r = s->res; - fflags = r->fflags | RF_CTRL_INFO; + fflags = r->fflags | RFTYPE_CTRL_INFO; ret = rdtgroup_mkdir_info_resdir(s, s->name, fflags); if (ret) goto out_destroy; } for_each_mon_capable_rdt_resource(r) { - fflags = r->fflags | RF_MON_INFO; + fflags = r->fflags | RFTYPE_MON_INFO; sprintf(name, "%s_MON", r->name); ret = rdtgroup_mkdir_info_resdir(r, name, fflags); if (ret) @@ -3729,7 +3729,7 @@ static int __init rdtgroup_setup_root(void) list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups); - ret = rdtgroup_add_files(kernfs_root_to_node(rdt_root), RF_CTRL_BASE); + ret = rdtgroup_add_files(kernfs_root_to_node(rdt_root), RFTYPE_CTRL_BASE); if (ret) { kernfs_destroy_root(rdt_root); goto out; -- Gitee From 2015afe091249415d307ed059f516775ecd38a5e Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Mon, 16 Oct 2023 19:23:03 -0500 Subject: [PATCH 0486/2138] x86/resctrl: Unwind properly from rdt_enable_ctx() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8626 commit df5f3a1dd8a6d3ddb1f07a10817f735194717422 upstream. rdt_enable_ctx() enables the features provided during resctrl mount. Additions to rdt_enable_ctx() are required to also modify error paths of rdt_enable_ctx() callers to ensure correct unwinding if errors are encountered after calling rdt_enable_ctx(). This is error prone. Introduce rdt_disable_ctx() to refactor the error unwinding of rdt_enable_ctx() to simplify future additions. This also simplifies cleanup in rdt_kill_sb(). Suggested-by: Reinette Chatre Signed-off-by: Babu Moger Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Peter Newman Reviewed-by: Tan Shaopeng Reviewed-by: Fenghua Yu Reviewed-by: Reinette Chatre Reviewed-by: Ilpo Järvinen Tested-by: Peter Newman Tested-by: Tan Shaopeng Link: https://lore.kernel.org/r/20231017002308.134480-5-babu.moger@amd.com Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 53 ++++++++++++++++---------- 1 file changed, 32 insertions(+), 21 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 933b1b13eb17..6fc9739658ed 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -2310,14 +2310,6 @@ int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable) return 0; } -static void cdp_disable_all(void) -{ - if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3)) - resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false); - if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) - resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false); -} - /* * We don't allow rdtgroup directories to be created anywhere * except the root directory. Thus when looking for the rdtgroup @@ -2397,19 +2389,42 @@ static int mkdir_mondata_all(struct kernfs_node *parent_kn, struct rdtgroup *prgrp, struct kernfs_node **mon_data_kn); +static void rdt_disable_ctx(void) +{ + resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false); + resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false); + set_mba_sc(false); +} + static int rdt_enable_ctx(struct rdt_fs_context *ctx) { int ret = 0; - if (ctx->enable_cdpl2) + if (ctx->enable_cdpl2) { ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, true); + if (ret) + goto out_done; + } - if (!ret && ctx->enable_cdpl3) + if (ctx->enable_cdpl3) { ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, true); + if (ret) + goto out_cdpl2; + } - if (!ret && ctx->enable_mba_mbps) + if (ctx->enable_mba_mbps) { ret = set_mba_sc(true); + if (ret) + goto out_cdpl3; + } + + return 0; +out_cdpl3: + resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false); +out_cdpl2: + resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false); +out_done: return ret; } @@ -2517,13 +2532,13 @@ static int rdt_get_tree(struct fs_context *fc) } ret = rdt_enable_ctx(ctx); - if (ret < 0) - goto out_cdp; + if (ret) + goto out; ret = schemata_list_create(); if (ret) { schemata_list_destroy(); - goto out_mba; + goto out_ctx; } closid_init(); @@ -2582,11 +2597,8 @@ static int rdt_get_tree(struct fs_context *fc) kernfs_remove(kn_info); out_schemata_free: schemata_list_destroy(); -out_mba: - if (ctx->enable_mba_mbps) - set_mba_sc(false); -out_cdp: - cdp_disable_all(); +out_ctx: + rdt_disable_ctx(); out: rdt_last_cmd_clear(); mutex_unlock(&rdtgroup_mutex); @@ -2818,12 +2830,11 @@ static void rdt_kill_sb(struct super_block *sb) cpus_read_lock(); mutex_lock(&rdtgroup_mutex); - set_mba_sc(false); + rdt_disable_ctx(); /*Put everything back to default values. */ for_each_alloc_capable_rdt_resource(r) reset_all_ctrls(r); - cdp_disable_all(); rmdir_all_sub(); rdt_pseudo_lock_release(); rdtgroup_default.mode = RDT_MODE_SHAREABLE; -- Gitee From 0e78c1619ce28f8aee6363c5771bff15c7f773b6 Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Mon, 16 Oct 2023 19:23:04 -0500 Subject: [PATCH 0487/2138] x86/resctrl: Move default group file creation to mount MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8626 commit d27567a0eb54be457b25e240593fdbd1c35c8618 upstream. The default resource group and its files are created during kernel init time. Upcoming changes will make some resctrl files optional based on a mount parameter. If optional files are to be added to the default group based on the mount option, then each new file needs to be created separately and call kernfs_activate() again. Create all files of the default resource group during resctrl mount, destroyed during unmount, to avoid scattering resctrl file addition across two separate code flows. Signed-off-by: Babu Moger Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Peter Newman Reviewed-by: Tan Shaopeng Reviewed-by: Fenghua Yu Reviewed-by: Reinette Chatre Reviewed-by: Ilpo Järvinen Tested-by: Peter Newman Tested-by: Tan Shaopeng Link: https://lore.kernel.org/r/20231017002308.134480-6-babu.moger@amd.com Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 58 +++++++++++++++----------- 1 file changed, 34 insertions(+), 24 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 6fc9739658ed..d04dd495d7cf 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -54,6 +54,9 @@ static struct kernfs_node *kn_mondata; static struct seq_buf last_cmd_status; static char last_cmd_status_buf[512]; +static int rdtgroup_setup_root(struct rdt_fs_context *ctx); +static void rdtgroup_destroy_root(void); + struct dentry *debugfs_resctrl; void rdt_last_cmd_clear(void) @@ -2531,10 +2534,14 @@ static int rdt_get_tree(struct fs_context *fc) goto out; } - ret = rdt_enable_ctx(ctx); + ret = rdtgroup_setup_root(ctx); if (ret) goto out; + ret = rdt_enable_ctx(ctx); + if (ret) + goto out_root; + ret = schemata_list_create(); if (ret) { schemata_list_destroy(); @@ -2543,6 +2550,12 @@ static int rdt_get_tree(struct fs_context *fc) closid_init(); + ret = rdtgroup_add_files(rdtgroup_default.kn, RFTYPE_CTRL_BASE); + if (ret) + goto out_schemata_free; + + kernfs_activate(rdtgroup_default.kn); + ret = rdtgroup_create_info_dir(rdtgroup_default.kn); if (ret < 0) goto out_schemata_free; @@ -2599,6 +2612,8 @@ static int rdt_get_tree(struct fs_context *fc) schemata_list_destroy(); out_ctx: rdt_disable_ctx(); +out_root: + rdtgroup_destroy_root(); out: rdt_last_cmd_clear(); mutex_unlock(&rdtgroup_mutex); @@ -2669,7 +2684,6 @@ static int rdt_init_fs_context(struct fs_context *fc) if (!ctx) return -ENOMEM; - ctx->kfc.root = rdt_root; ctx->kfc.magic = RDTGROUP_SUPER_MAGIC; fc->fs_private = &ctx->kfc; fc->ops = &rdt_fs_context_ops; @@ -2839,6 +2853,7 @@ static void rdt_kill_sb(struct super_block *sb) rdt_pseudo_lock_release(); rdtgroup_default.mode = RDT_MODE_SHAREABLE; schemata_list_destroy(); + rdtgroup_destroy_root(); static_branch_disable_cpuslocked(&rdt_alloc_enable_key); static_branch_disable_cpuslocked(&rdt_mon_enable_key); static_branch_disable_cpuslocked(&rdt_enable_key); @@ -3720,10 +3735,8 @@ static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = { .show_options = rdtgroup_show_options, }; -static int __init rdtgroup_setup_root(void) +static int rdtgroup_setup_root(struct rdt_fs_context *ctx) { - int ret; - rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops, KERNFS_ROOT_CREATE_DEACTIVATED | KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK, @@ -3731,6 +3744,20 @@ static int __init rdtgroup_setup_root(void) if (IS_ERR(rdt_root)) return PTR_ERR(rdt_root); + ctx->kfc.root = rdt_root; + rdtgroup_default.kn = kernfs_root_to_node(rdt_root); + + return 0; +} + +static void rdtgroup_destroy_root(void) +{ + kernfs_destroy_root(rdt_root); + rdtgroup_default.kn = NULL; +} + +static void __init rdtgroup_setup_default(void) +{ mutex_lock(&rdtgroup_mutex); rdtgroup_default.closid = 0; @@ -3740,19 +3767,7 @@ static int __init rdtgroup_setup_root(void) list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups); - ret = rdtgroup_add_files(kernfs_root_to_node(rdt_root), RFTYPE_CTRL_BASE); - if (ret) { - kernfs_destroy_root(rdt_root); - goto out; - } - - rdtgroup_default.kn = kernfs_root_to_node(rdt_root); - kernfs_activate(rdtgroup_default.kn); - -out: mutex_unlock(&rdtgroup_mutex); - - return ret; } static void domain_destroy_mon_state(struct rdt_domain *d) @@ -3874,13 +3889,11 @@ int __init rdtgroup_init(void) seq_buf_init(&last_cmd_status, last_cmd_status_buf, sizeof(last_cmd_status_buf)); - ret = rdtgroup_setup_root(); - if (ret) - return ret; + rdtgroup_setup_default(); ret = sysfs_create_mount_point(fs_kobj, "resctrl"); if (ret) - goto cleanup_root; + return ret; ret = register_filesystem(&rdt_fs_type); if (ret) @@ -3913,8 +3926,6 @@ int __init rdtgroup_init(void) cleanup_mountpoint: sysfs_remove_mount_point(fs_kobj, "resctrl"); -cleanup_root: - kernfs_destroy_root(rdt_root); return ret; } @@ -3924,5 +3935,4 @@ void __exit rdtgroup_exit(void) debugfs_remove_recursive(debugfs_resctrl); unregister_filesystem(&rdt_fs_type); sysfs_remove_mount_point(fs_kobj, "resctrl"); - kernfs_destroy_root(rdt_root); } -- Gitee From 4690c97c7a864ccb2462de0bd66624fbf7d5e7cd Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Mon, 16 Oct 2023 19:23:05 -0500 Subject: [PATCH 0488/2138] x86/resctrl: Introduce "-o debug" mount option MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8626 commit cb07d71f01017b7c2885ed629da9b973cb56b1d2 upstream. Add "-o debug" option to mount resctrl filesystem in debug mode. When in debug mode resctrl displays files that have the new RFTYPE_DEBUG flag to help resctrl debugging. Signed-off-by: Babu Moger Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Peter Newman Reviewed-by: Tan Shaopeng Reviewed-by: Fenghua Yu Reviewed-by: Reinette Chatre Reviewed-by: Ilpo Järvinen Tested-by: Peter Newman Tested-by: Tan Shaopeng Link: https://lore.kernel.org/r/20231017002308.134480-7-babu.moger@amd.com Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- Documentation/arch/x86/resctrl.rst | 5 ++++- arch/x86/kernel/cpu/resctrl/internal.h | 2 ++ arch/x86/kernel/cpu/resctrl/rdtgroup.c | 18 ++++++++++++++++++ 3 files changed, 24 insertions(+), 1 deletion(-) diff --git a/Documentation/arch/x86/resctrl.rst b/Documentation/arch/x86/resctrl.rst index 178ab1d8f747..68f11611f341 100644 --- a/Documentation/arch/x86/resctrl.rst +++ b/Documentation/arch/x86/resctrl.rst @@ -35,7 +35,7 @@ about the feature from resctrl's info directory. To use the feature mount the file system:: - # mount -t resctrl resctrl [-o cdp[,cdpl2][,mba_MBps]] /sys/fs/resctrl + # mount -t resctrl resctrl [-o cdp[,cdpl2][,mba_MBps][,debug]] /sys/fs/resctrl mount options are: @@ -46,6 +46,9 @@ mount options are: "mba_MBps": Enable the MBA Software Controller(mba_sc) to specify MBA bandwidth in MBps +"debug": + Make debug files accessible. Available debug files are annotated with + "Available only with debug option". L2 and L3 CDP are controlled separately. diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 2c310fe7f1d6..77cfbbe251d5 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -58,6 +58,7 @@ struct rdt_fs_context { bool enable_cdpl2; bool enable_cdpl3; bool enable_mba_mbps; + bool enable_debug; }; static inline struct rdt_fs_context *rdt_fc2context(struct fs_context *fc) @@ -247,6 +248,7 @@ struct rdtgroup { #define RFTYPE_TOP BIT(6) #define RFTYPE_RES_CACHE BIT(8) #define RFTYPE_RES_MB BIT(9) +#define RFTYPE_DEBUG BIT(10) #define RFTYPE_CTRL_INFO (RFTYPE_INFO | RFTYPE_CTRL) #define RFTYPE_MON_INFO (RFTYPE_INFO | RFTYPE_MON) #define RFTYPE_TOP_INFO (RFTYPE_INFO | RFTYPE_TOP) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index d04dd495d7cf..c419603dba16 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -59,6 +59,8 @@ static void rdtgroup_destroy_root(void); struct dentry *debugfs_resctrl; +static bool resctrl_debug; + void rdt_last_cmd_clear(void) { lockdep_assert_held(&rdtgroup_mutex); @@ -1894,6 +1896,9 @@ static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags) lockdep_assert_held(&rdtgroup_mutex); + if (resctrl_debug) + fflags |= RFTYPE_DEBUG; + for (rft = rfts; rft < rfts + len; rft++) { if (rft->fflags && ((fflags & rft->fflags) == rft->fflags)) { ret = rdtgroup_add_file(kn, rft); @@ -2397,6 +2402,8 @@ static void rdt_disable_ctx(void) resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false); resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false); set_mba_sc(false); + + resctrl_debug = false; } static int rdt_enable_ctx(struct rdt_fs_context *ctx) @@ -2421,6 +2428,9 @@ static int rdt_enable_ctx(struct rdt_fs_context *ctx) goto out_cdpl3; } + if (ctx->enable_debug) + resctrl_debug = true; + return 0; out_cdpl3: @@ -2625,6 +2635,7 @@ enum rdt_param { Opt_cdp, Opt_cdpl2, Opt_mba_mbps, + Opt_debug, nr__rdt_params }; @@ -2632,6 +2643,7 @@ static const struct fs_parameter_spec rdt_fs_parameters[] = { fsparam_flag("cdp", Opt_cdp), fsparam_flag("cdpl2", Opt_cdpl2), fsparam_flag("mba_MBps", Opt_mba_mbps), + fsparam_flag("debug", Opt_debug), {} }; @@ -2657,6 +2669,9 @@ static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param) return -EINVAL; ctx->enable_mba_mbps = true; return 0; + case Opt_debug: + ctx->enable_debug = true; + return 0; } return -EINVAL; @@ -3725,6 +3740,9 @@ static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf) if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl)) seq_puts(seq, ",mba_MBps"); + if (resctrl_debug) + seq_puts(seq, ",debug"); + return 0; } -- Gitee From 1423d960506283f964130386e653477240ec8379 Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Mon, 16 Oct 2023 19:23:06 -0500 Subject: [PATCH 0489/2138] x86/resctrl: Display CLOSID for resource group MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8626 commit ca8dad225e237493f19b1c5d4a8531f13a9b078f upstream. In x86, hardware uses CLOSID to identify a control group. When a user creates a control group this information is not visible to the user. It can help resctrl debugging. Add CLOSID(ctrl_hw_id) to the control groups display in the resctrl interface. Users can see this detail when resctrl is mounted with the "-o debug" option. Other architectures do not use "CLOSID". Use the names ctrl_hw_id to refer to "CLOSID" in an effort to keep the naming generic. For example: $cat /sys/fs/resctrl/ctrl_grp1/ctrl_hw_id 1 Signed-off-by: Babu Moger Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Peter Newman Reviewed-by: Tan Shaopeng Reviewed-by: Fenghua Yu Reviewed-by: Reinette Chatre Reviewed-by: Ilpo Järvinen Tested-by: Peter Newman Tested-by: Tan Shaopeng Link: https://lore.kernel.org/r/20231017002308.134480-8-babu.moger@amd.com Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- Documentation/arch/x86/resctrl.rst | 4 ++++ arch/x86/kernel/cpu/resctrl/rdtgroup.c | 23 +++++++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/Documentation/arch/x86/resctrl.rst b/Documentation/arch/x86/resctrl.rst index 68f11611f341..7412252f95a7 100644 --- a/Documentation/arch/x86/resctrl.rst +++ b/Documentation/arch/x86/resctrl.rst @@ -359,6 +359,10 @@ When control is enabled all CTRL_MON groups will also contain: file. On successful pseudo-locked region creation the mode will automatically change to "pseudo-locked". +"ctrl_hw_id": + Available only with debug option. The identifier used by hardware + for the control group. On x86 this is the CLOSID. + When monitoring is enabled all MON groups will also contain: "mon_data": diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index c419603dba16..069a9c395f6c 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -779,6 +779,22 @@ static int rdtgroup_tasks_show(struct kernfs_open_file *of, return ret; } +static int rdtgroup_closid_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct rdtgroup *rdtgrp; + int ret = 0; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (rdtgrp) + seq_printf(s, "%u\n", rdtgrp->closid); + else + ret = -ENOENT; + rdtgroup_kn_unlock(of->kn); + + return ret; +} + #ifdef CONFIG_PROC_CPU_RESCTRL /* @@ -1883,6 +1899,13 @@ static struct rftype res_common_files[] = { .seq_show = rdt_has_sparse_bitmasks_show, .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, }, + { + .name = "ctrl_hw_id", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdtgroup_closid_show, + .fflags = RFTYPE_CTRL_BASE | RFTYPE_DEBUG, + }, }; -- Gitee From 94735eda1b50e37d29eb418e01c44f96d44b1aa8 Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Mon, 16 Oct 2023 19:23:07 -0500 Subject: [PATCH 0490/2138] x86/resctrl: Add support for the files of MON groups only MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8626 commit 918f211b5e4e709e91acf856967a850569c96b71 upstream. Files unique to monitoring groups have the RFTYPE_MON flag. When a new monitoring group is created the resctrl files with flags RFTYPE_BASE (files common to all resource groups) and RFTYPE_MON (files unique to monitoring groups) are created to support interacting with the new monitoring group. A resource group can support both monitoring and control, also termed a CTRL_MON resource group. CTRL_MON groups should get both monitoring and control resctrl files but that is not the case. Only the RFTYPE_BASE and RFTYPE_CTRL files are created for CTRL_MON groups. Ensure that files with the RFTYPE_MON flag are created for CTRL_MON groups. Signed-off-by: Babu Moger Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Peter Newman Reviewed-by: Tan Shaopeng Reviewed-by: Fenghua Yu Reviewed-by: Ilpo Järvinen Reviewed-by: Reinette Chatre Tested-by: Peter Newman Tested-by: Tan Shaopeng Link: https://lore.kernel.org/r/20231017002308.134480-9-babu.moger@amd.com Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 069a9c395f6c..910db69f9fa5 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -2553,6 +2553,7 @@ static void schemata_list_destroy(void) static int rdt_get_tree(struct fs_context *fc) { struct rdt_fs_context *ctx = rdt_fc2context(fc); + unsigned long flags = RFTYPE_CTRL_BASE; struct rdt_domain *dom; struct rdt_resource *r; int ret; @@ -2583,7 +2584,10 @@ static int rdt_get_tree(struct fs_context *fc) closid_init(); - ret = rdtgroup_add_files(rdtgroup_default.kn, RFTYPE_CTRL_BASE); + if (rdt_mon_capable) + flags |= RFTYPE_MON; + + ret = rdtgroup_add_files(rdtgroup_default.kn, flags); if (ret) goto out_schemata_free; @@ -3273,8 +3277,8 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, enum rdt_group_type rtype, struct rdtgroup **r) { struct rdtgroup *prdtgrp, *rdtgrp; + unsigned long files = 0; struct kernfs_node *kn; - uint files = 0; int ret; prdtgrp = rdtgroup_kn_lock_live(parent_kn); @@ -3326,10 +3330,13 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, goto out_destroy; } - if (rtype == RDTCTRL_GROUP) + if (rtype == RDTCTRL_GROUP) { files = RFTYPE_BASE | RFTYPE_CTRL; - else + if (rdt_mon_capable) + files |= RFTYPE_MON; + } else { files = RFTYPE_BASE | RFTYPE_MON; + } ret = rdtgroup_add_files(kn, files); if (ret) { -- Gitee From 6655315b86456040c94b7ff9be0a25b4597c5d8d Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Mon, 16 Oct 2023 19:23:08 -0500 Subject: [PATCH 0491/2138] x86/resctrl: Display RMID of resource group MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8626 commit 4cee14bcb14881aae81d60f106a335c68553ac1f upstream. In x86, hardware uses RMID to identify a monitoring group. When a user creates a monitor group these details are not visible. These details can help resctrl debugging. Add RMID(mon_hw_id) to the monitor groups display in the resctrl interface. Users can see these details when resctrl is mounted with "-o debug" option. Add RFTYPE_MON_BASE that complements existing RFTYPE_CTRL_BASE and represents files belonging to monitoring groups. Other architectures do not use "RMID". Use the name mon_hw_id to refer to "RMID" in an effort to keep the naming generic. For example: $cat /sys/fs/resctrl/mon_groups/mon_grp1/mon_hw_id 3 Signed-off-by: Babu Moger Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Peter Newman Reviewed-by: Tan Shaopeng Reviewed-by: Fenghua Yu Reviewed-by: Reinette Chatre Reviewed-by: Ilpo Järvinen Tested-by: Peter Newman Tested-by: Tan Shaopeng Link: https://lore.kernel.org/r/20231017002308.134480-10-babu.moger@amd.com Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- Documentation/arch/x86/resctrl.rst | 4 ++++ arch/x86/kernel/cpu/resctrl/internal.h | 1 + arch/x86/kernel/cpu/resctrl/rdtgroup.c | 23 +++++++++++++++++++++++ 3 files changed, 28 insertions(+) diff --git a/Documentation/arch/x86/resctrl.rst b/Documentation/arch/x86/resctrl.rst index 7412252f95a7..a6279df64a9d 100644 --- a/Documentation/arch/x86/resctrl.rst +++ b/Documentation/arch/x86/resctrl.rst @@ -376,6 +376,10 @@ When monitoring is enabled all MON groups will also contain: the sum for all tasks in the CTRL_MON group and all tasks in MON groups. Please see example section for more details on usage. +"mon_hw_id": + Available only with debug option. The identifier used by hardware + for the monitor group. On x86 this is the RMID. + Resource allocation rules ------------------------- diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 77cfbbe251d5..52e7e7deee10 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -253,6 +253,7 @@ struct rdtgroup { #define RFTYPE_MON_INFO (RFTYPE_INFO | RFTYPE_MON) #define RFTYPE_TOP_INFO (RFTYPE_INFO | RFTYPE_TOP) #define RFTYPE_CTRL_BASE (RFTYPE_BASE | RFTYPE_CTRL) +#define RFTYPE_MON_BASE (RFTYPE_BASE | RFTYPE_MON) /* List of all resource groups */ extern struct list_head rdt_all_groups; diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 910db69f9fa5..2b69e560b05f 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -795,6 +795,22 @@ static int rdtgroup_closid_show(struct kernfs_open_file *of, return ret; } +static int rdtgroup_rmid_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct rdtgroup *rdtgrp; + int ret = 0; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (rdtgrp) + seq_printf(s, "%u\n", rdtgrp->mon.rmid); + else + ret = -ENOENT; + rdtgroup_kn_unlock(of->kn); + + return ret; +} + #ifdef CONFIG_PROC_CPU_RESCTRL /* @@ -1869,6 +1885,13 @@ static struct rftype res_common_files[] = { .seq_show = rdtgroup_tasks_show, .fflags = RFTYPE_BASE, }, + { + .name = "mon_hw_id", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdtgroup_rmid_show, + .fflags = RFTYPE_MON_BASE | RFTYPE_DEBUG, + }, { .name = "schemata", .mode = 0644, -- Gitee From 4d1052cb2e8bad23e3a22cc61cbeed4444143248 Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Wed, 1 Nov 2023 14:26:15 -0700 Subject: [PATCH 0492/2138] x86/resctrl: Fix unused variable warning in cache_alloc_hsw_probe() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8626 commit 1b908debf53ff3cf0e43e0fa51e7319a23518e6c upstream. In a "W=1" build gcc throws a warning: arch/x86/kernel/cpu/resctrl/core.c: In function ‘cache_alloc_hsw_probe’: arch/x86/kernel/cpu/resctrl/core.c:139:16: warning: variable ‘h’ set but not used Switch from wrmsr_safe() to wrmsrl_safe(), and from rdmsr() to rdmsrl() using a single u64 argument for the MSR value instead of the pair of u32 for the high and low halves. Signed-off-by: Tony Luck Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Babu Moger Acked-by: Reinette Chatre Link: https://lore.kernel.org/r/ZULCd/TGJL9Dmncf@agluck-desk3 Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/core.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index fbdaa9307138..4ec87f9e6d06 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -136,15 +136,15 @@ static inline void cache_alloc_hsw_probe(void) { struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_L3]; struct rdt_resource *r = &hw_res->r_resctrl; - u32 l, h, max_cbm = BIT_MASK(20) - 1; + u64 max_cbm = BIT_ULL_MASK(20) - 1, l3_cbm_0; - if (wrmsr_safe(MSR_IA32_L3_CBM_BASE, max_cbm, 0)) + if (wrmsrl_safe(MSR_IA32_L3_CBM_BASE, max_cbm)) return; - rdmsr(MSR_IA32_L3_CBM_BASE, l, h); + rdmsrl(MSR_IA32_L3_CBM_BASE, l3_cbm_0); /* If all the bits were set in MSR, return success */ - if (l != max_cbm) + if (l3_cbm_0 != max_cbm) return; hw_res->num_closid = 4; -- Gitee From 68495774911326f44f14b7e5ae46f0a779a2000d Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Wed, 24 Jan 2024 11:52:56 -0600 Subject: [PATCH 0493/2138] x86/resctrl: Remove redundant variable in mbm_config_write_domain() ANBZ: #8626 commit fc747eebef734563cf68a512f57937c8f231834a upstream. The kernel test robot reported the following warning after commit 54e35eb8611c ("x86/resctrl: Read supported bandwidth sources from CPUID"). even though the issue is present even in the original commit 92bd5a139033 ("x86/resctrl: Add interface to write mbm_total_bytes_config") which added this function. The reported warning is: $ make C=1 CHECK=scripts/coccicheck arch/x86/kernel/cpu/resctrl/rdtgroup.o ... arch/x86/kernel/cpu/resctrl/rdtgroup.c:1621:5-8: Unneeded variable: "ret". Return "0" on line 1655 Remove the local variable 'ret'. [ bp: Massage commit message, make mbm_config_write_domain() void. ] Fixes: 92bd5a139033 ("x86/resctrl: Add interface to write mbm_total_bytes_config") Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202401241810.jbd8Ipa1-lkp@intel.com/ Signed-off-by: Babu Moger Signed-off-by: Borislav Petkov (AMD) Acked-by: Reinette Chatre Link: https://lore.kernel.org/r/202401241810.jbd8Ipa1-lkp@intel.com Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 2b69e560b05f..aa24343f1d23 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -1614,11 +1614,10 @@ static void mon_event_config_write(void *info) wrmsr(MSR_IA32_EVT_CFG_BASE + index, mon_info->mon_config, 0); } -static int mbm_config_write_domain(struct rdt_resource *r, - struct rdt_domain *d, u32 evtid, u32 val) +static void mbm_config_write_domain(struct rdt_resource *r, + struct rdt_domain *d, u32 evtid, u32 val) { struct mon_config_info mon_info = {0}; - int ret = 0; /* * Read the current config value first. If both are the same then @@ -1627,7 +1626,7 @@ static int mbm_config_write_domain(struct rdt_resource *r, mon_info.evtid = evtid; mondata_config_read(d, &mon_info); if (mon_info.mon_config == val) - goto out; + return; mon_info.mon_config = val; @@ -1650,9 +1649,6 @@ static int mbm_config_write_domain(struct rdt_resource *r, * mbm_local and mbm_total counts for all the RMIDs. */ resctrl_arch_reset_rmid_all(r, d); - -out: - return ret; } static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid) @@ -1661,7 +1657,6 @@ static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid) char *dom_str = NULL, *id_str; unsigned long dom_id, val; struct rdt_domain *d; - int ret = 0; next: if (!tok || tok[0] == '\0') @@ -1690,9 +1685,7 @@ static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid) list_for_each_entry(d, &r->domains, list) { if (d->id == dom_id) { - ret = mbm_config_write_domain(r, d, evtid, val); - if (ret) - return -EINVAL; + mbm_config_write_domain(r, d, evtid, val); goto next; } } -- Gitee From 27ee54457134e20b736b67edb5c0701ba19b0dc2 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:15 +0000 Subject: [PATCH 0494/2138] tick/nohz: Move tick_nohz_full_mask declaration outside the #ifdef ANBZ: #8626 commit 31a5c0b7c674977889ce721d69101bc35f25e041 upstream. tick_nohz_full_mask lists the CPUs that are nohz_full. This is only needed when CONFIG_NO_HZ_FULL is defined. tick_nohz_full_cpu() allows a specific CPU to be tested against the mask, and evaluates to false when CONFIG_NO_HZ_FULL is not defined. The resctrl code needs to pick a CPU to run some work on, a new helper prefers housekeeping CPUs by examining the tick_nohz_full_mask. Hiding the declaration behind #ifdef CONFIG_NO_HZ_FULL forces all the users to be behind an #ifdef too. Move the tick_nohz_full_mask declaration, this lets callers drop the #ifdef, and guard access to tick_nohz_full_mask with IS_ENABLED() or something like tick_nohz_full_cpu(). The definition does not need to be moved as any callers should be removed at compile time unless CONFIG_NO_HZ_FULL is defined. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Thomas Gleixner Acked-by: Reinette Chatre # for resctrl dependency Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-2-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- include/linux/tick.h | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/include/linux/tick.h b/include/linux/tick.h index 9701c571a5cf..39f01f0bef76 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -174,9 +174,16 @@ static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } static inline void tick_nohz_idle_stop_tick_protected(void) { } #endif /* !CONFIG_NO_HZ_COMMON */ +/* + * Mask of CPUs that are nohz_full. + * + * Users should be guarded by CONFIG_NO_HZ_FULL or a tick_nohz_full_cpu() + * check. + */ +extern cpumask_var_t tick_nohz_full_mask; + #ifdef CONFIG_NO_HZ_FULL extern bool tick_nohz_full_running; -extern cpumask_var_t tick_nohz_full_mask; static inline bool tick_nohz_full_enabled(void) { -- Gitee From e28a6155532ec25ba2feb4c37e6491afb998ffbc Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:16 +0000 Subject: [PATCH 0495/2138] x86/resctrl: Free rmid_ptrs from resctrl_exit() ANBZ: #8626 commit 3f7b07380d58cfbb6a2d3aa672dcc76c0f4b0745 upstream. rmid_ptrs[] is allocated from dom_data_init() but never free()d. While the exit text ends up in the linker script's DISCARD section, the direction of travel is for resctrl to be/have loadable modules. Add resctrl_put_mon_l3_config() to cleanup any memory allocated by rdt_get_mon_l3_config(). There is no reason to backport this to a stable kernel. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Babu Moger Reviewed-by: Reinette Chatre Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-3-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/core.c | 6 ++++++ arch/x86/kernel/cpu/resctrl/internal.h | 1 + arch/x86/kernel/cpu/resctrl/monitor.c | 15 +++++++++++++++ 3 files changed, 22 insertions(+) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 4ec87f9e6d06..aed9b182dbd8 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -994,8 +994,14 @@ late_initcall(resctrl_late_init); static void __exit resctrl_exit(void) { + struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; + cpuhp_remove_state(rdt_online); + rdtgroup_exit(); + + if (r->mon_capable) + rdt_put_mon_l3_config(); } __exitcall(resctrl_exit); diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 52e7e7deee10..61c763604fc9 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -544,6 +544,7 @@ void closid_free(int closid); int alloc_rmid(void); void free_rmid(u32 rmid); int rdt_get_mon_l3_config(struct rdt_resource *r); +void __exit rdt_put_mon_l3_config(void); bool __init rdt_cpu_has(int flag); void mon_event_count(void *info); int rdtgroup_mondata_show(struct seq_file *m, void *arg); diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 3a6c069614eb..3a73db0579d8 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -719,6 +719,16 @@ static int dom_data_init(struct rdt_resource *r) return 0; } +static void __exit dom_data_exit(void) +{ + mutex_lock(&rdtgroup_mutex); + + kfree(rmid_ptrs); + rmid_ptrs = NULL; + + mutex_unlock(&rdtgroup_mutex); +} + static struct mon_evt llc_occupancy_event = { .name = "llc_occupancy", .evtid = QOS_L3_OCCUP_EVENT_ID, @@ -814,6 +824,11 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r) return 0; } +void __exit rdt_put_mon_l3_config(void) +{ + dom_data_exit(); +} + void __init intel_rdt_mbm_apply_quirk(void) { int cf_index; -- Gitee From 0b0584f7849f358ded984ff6704546460112ffb0 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:17 +0000 Subject: [PATCH 0496/2138] x86/resctrl: Create helper for RMID allocation and mondata dir creation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8626 commit b1de313979af99dc0f999656fc99bbcb52559a38 upstream. When monitoring is supported, each monitor and control group is allocated an RMID. For control groups, rdtgroup_mkdir_ctrl_mon() later goes on to allocate the CLOSID. MPAM's equivalent of RMID are not an independent number, so can't be allocated until the CLOSID is known. An RMID allocation for one CLOSID may fail, whereas another may succeed depending on how many monitor groups a control group has. The RMID allocation needs to move to be after the CLOSID has been allocated. Move the RMID allocation and mondata dir creation to a helper. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Ilpo Järvinen Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Peter Newman Tested-by: Shaopeng Tan Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-4-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 42 +++++++++++++++++--------- 1 file changed, 27 insertions(+), 15 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index aa24343f1d23..4ea5a871be49 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -3288,6 +3288,30 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) return ret; } +static int mkdir_rdt_prepare_rmid_alloc(struct rdtgroup *rdtgrp) +{ + int ret; + + if (!rdt_mon_capable) + return 0; + + ret = alloc_rmid(); + if (ret < 0) { + rdt_last_cmd_puts("Out of RMIDs\n"); + return ret; + } + rdtgrp->mon.rmid = ret; + + ret = mkdir_mondata_all(rdtgrp->kn, rdtgrp, &rdtgrp->mon.mon_data_kn); + if (ret) { + rdt_last_cmd_puts("kernfs subdir error\n"); + free_rmid(rdtgrp->mon.rmid); + return ret; + } + + return 0; +} + static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, const char *name, umode_t mode, enum rdt_group_type rtype, struct rdtgroup **r) @@ -3360,20 +3384,10 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, goto out_destroy; } - if (rdt_mon_capable) { - ret = alloc_rmid(); - if (ret < 0) { - rdt_last_cmd_puts("Out of RMIDs\n"); - goto out_destroy; - } - rdtgrp->mon.rmid = ret; + ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp); + if (ret) + goto out_destroy; - ret = mkdir_mondata_all(kn, rdtgrp, &rdtgrp->mon.mon_data_kn); - if (ret) { - rdt_last_cmd_puts("kernfs subdir error\n"); - goto out_idfree; - } - } kernfs_activate(kn); /* @@ -3381,8 +3395,6 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, */ return 0; -out_idfree: - free_rmid(rdtgrp->mon.rmid); out_destroy: kernfs_put(rdtgrp->kn); kernfs_remove(rdtgrp->kn); -- Gitee From 4cfc6f3d3a44017b7a188318ee5345a2fa59c419 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:18 +0000 Subject: [PATCH 0497/2138] x86/resctrl: Move RMID allocation out of mkdir_rdt_prepare() ANBZ: #8626 commit 311639e9512bb3af2abae32be9322b8a9b30eaa1 upstream. RMIDs are allocated for each monitor or control group directory, because each of these needs its own RMID. For control groups, rdtgroup_mkdir_ctrl_mon() later goes on to allocate the CLOSID. MPAM's equivalent of RMID is not an independent number, so can't be allocated until the CLOSID is known. An RMID allocation for one CLOSID may fail, whereas another may succeed depending on how many monitor groups a control group has. The RMID allocation needs to move to be after the CLOSID has been allocated. Move the RMID allocation out of mkdir_rdt_prepare() to occur in its caller, after the mkdir_rdt_prepare() call. This allows the RMID allocator to know the CLOSID. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-5-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 35 +++++++++++++++++++------- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 4ea5a871be49..f455a10b74ab 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -3312,6 +3312,12 @@ static int mkdir_rdt_prepare_rmid_alloc(struct rdtgroup *rdtgrp) return 0; } +static void mkdir_rdt_prepare_rmid_free(struct rdtgroup *rgrp) +{ + if (rdt_mon_capable) + free_rmid(rgrp->mon.rmid); +} + static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, const char *name, umode_t mode, enum rdt_group_type rtype, struct rdtgroup **r) @@ -3384,12 +3390,6 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, goto out_destroy; } - ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp); - if (ret) - goto out_destroy; - - kernfs_activate(kn); - /* * The caller unlocks the parent_kn upon success. */ @@ -3408,7 +3408,6 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp) { kernfs_remove(rgrp->kn); - free_rmid(rgrp->mon.rmid); rdtgroup_remove(rgrp); } @@ -3430,12 +3429,21 @@ static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn, prgrp = rdtgrp->mon.parent; rdtgrp->closid = prgrp->closid; + ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp); + if (ret) { + mkdir_rdt_prepare_clean(rdtgrp); + goto out_unlock; + } + + kernfs_activate(rdtgrp->kn); + /* * Add the rdtgrp to the list of rdtgrps the parent * ctrl_mon group has to track. */ list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list); +out_unlock: rdtgroup_kn_unlock(parent_kn); return ret; } @@ -3466,9 +3474,16 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, ret = 0; rdtgrp->closid = closid; + + ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp); + if (ret) + goto out_closid_free; + + kernfs_activate(rdtgrp->kn); + ret = rdtgroup_init_alloc(rdtgrp); if (ret < 0) - goto out_id_free; + goto out_rmid_free; list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups); @@ -3488,7 +3503,9 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, out_del_list: list_del(&rdtgrp->rdtgroup_list); -out_id_free: +out_rmid_free: + mkdir_rdt_prepare_rmid_free(rdtgrp); +out_closid_free: closid_free(closid); out_common_fail: mkdir_rdt_prepare_clean(rdtgrp); -- Gitee From 26899dde19afdfaff8459612b5c9e635b3d4256f Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:19 +0000 Subject: [PATCH 0498/2138] x86/resctrl: Track the closid with the rmid ANBZ: #8626 commit 40fc735b78f0c81cea7d1c511cfd83892cb4d679 upstream. x86's RMID are independent of the CLOSID. An RMID can be allocated, used and freed without considering the CLOSID. MPAM's equivalent feature is PMG, which is not an independent number, it extends the CLOSID/PARTID space. For MPAM, only PMG-bits worth of 'RMID' can be allocated for a single CLOSID. i.e. if there is 1 bit of PMG space, then each CLOSID can have two monitor groups. To allow resctrl to disambiguate RMID values for different CLOSID, everything in resctrl that keeps an RMID value needs to know the CLOSID too. This will always be ignored on x86. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Xin Hao Reviewed-by: Reinette Chatre Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-6-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/include/asm/resctrl.h | 7 +++ arch/x86/kernel/cpu/resctrl/internal.h | 2 +- arch/x86/kernel/cpu/resctrl/monitor.c | 73 +++++++++++++++-------- arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 4 +- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 12 ++-- include/linux/resctrl.h | 16 ++++- 6 files changed, 77 insertions(+), 37 deletions(-) diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index 255a78d9d906..cc6e1bce7b1a 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -7,6 +7,13 @@ #include #include +/* + * This value can never be a valid CLOSID, and is used when mapping a + * (closid, rmid) pair to an index and back. On x86 only the RMID is + * needed. The index is a software defined value. + */ +#define X86_RESCTRL_EMPTY_CLOSID ((u32)~0) + /** * struct resctrl_pqr_state - State cache for the PQR MSR * @cur_rmid: The cached Resource Monitoring ID diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 61c763604fc9..ae0e3338abc4 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -542,7 +542,7 @@ struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r); int closids_supported(void); void closid_free(int closid); int alloc_rmid(void); -void free_rmid(u32 rmid); +void free_rmid(u32 closid, u32 rmid); int rdt_get_mon_l3_config(struct rdt_resource *r); void __exit rdt_put_mon_l3_config(void); bool __init rdt_cpu_has(int flag); diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 3a73db0579d8..3dad4134d2c9 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -24,7 +24,20 @@ #include "internal.h" +/** + * struct rmid_entry - dirty tracking for all RMID. + * @closid: The CLOSID for this entry. + * @rmid: The RMID for this entry. + * @busy: The number of domains with cached data using this RMID. + * @list: Member of the rmid_free_lru list when busy == 0. + * + * Depending on the architecture the correct monitor is accessed using + * both @closid and @rmid, or @rmid only. + * + * Take the rdtgroup_mutex when accessing. + */ struct rmid_entry { + u32 closid; u32 rmid; int busy; struct list_head list; @@ -136,7 +149,7 @@ static inline u64 get_corrected_mbm_count(u32 rmid, unsigned long val) return val; } -static inline struct rmid_entry *__rmid_entry(u32 rmid) +static inline struct rmid_entry *__rmid_entry(u32 closid, u32 rmid) { struct rmid_entry *entry; @@ -190,7 +203,8 @@ static struct arch_mbm_state *get_arch_mbm_state(struct rdt_hw_domain *hw_dom, } void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d, - u32 rmid, enum resctrl_event_id eventid) + u32 unused, u32 rmid, + enum resctrl_event_id eventid) { struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); struct arch_mbm_state *am; @@ -230,7 +244,8 @@ static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width) } int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, - u32 rmid, enum resctrl_event_id eventid, u64 *val) + u32 unused, u32 rmid, enum resctrl_event_id eventid, + u64 *val) { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); @@ -285,9 +300,9 @@ void __check_limbo(struct rdt_domain *d, bool force_free) if (nrmid >= r->num_rmid) break; - entry = __rmid_entry(nrmid); + entry = __rmid_entry(X86_RESCTRL_EMPTY_CLOSID, nrmid);// temporary - if (resctrl_arch_rmid_read(r, d, entry->rmid, + if (resctrl_arch_rmid_read(r, d, entry->closid, entry->rmid, QOS_L3_OCCUP_EVENT_ID, &val)) { rmid_dirty = true; } else { @@ -342,7 +357,8 @@ static void add_rmid_to_limbo(struct rmid_entry *entry) cpu = get_cpu(); list_for_each_entry(d, &r->domains, list) { if (cpumask_test_cpu(cpu, &d->cpu_mask)) { - err = resctrl_arch_rmid_read(r, d, entry->rmid, + err = resctrl_arch_rmid_read(r, d, entry->closid, + entry->rmid, QOS_L3_OCCUP_EVENT_ID, &val); if (err || val <= resctrl_rmid_realloc_threshold) @@ -366,7 +382,7 @@ static void add_rmid_to_limbo(struct rmid_entry *entry) list_add_tail(&entry->list, &rmid_free_lru); } -void free_rmid(u32 rmid) +void free_rmid(u32 closid, u32 rmid) { struct rmid_entry *entry; @@ -375,7 +391,7 @@ void free_rmid(u32 rmid) lockdep_assert_held(&rdtgroup_mutex); - entry = __rmid_entry(rmid); + entry = __rmid_entry(closid, rmid); if (is_llc_occupancy_enabled()) add_rmid_to_limbo(entry); @@ -383,8 +399,8 @@ void free_rmid(u32 rmid) list_add_tail(&entry->list, &rmid_free_lru); } -static struct mbm_state *get_mbm_state(struct rdt_domain *d, u32 rmid, - enum resctrl_event_id evtid) +static struct mbm_state *get_mbm_state(struct rdt_domain *d, u32 closid, + u32 rmid, enum resctrl_event_id evtid) { switch (evtid) { case QOS_L3_MBM_TOTAL_EVENT_ID: @@ -396,20 +412,21 @@ static struct mbm_state *get_mbm_state(struct rdt_domain *d, u32 rmid, } } -static int __mon_event_count(u32 rmid, struct rmid_read *rr) +static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr) { struct mbm_state *m; u64 tval = 0; if (rr->first) { - resctrl_arch_reset_rmid(rr->r, rr->d, rmid, rr->evtid); - m = get_mbm_state(rr->d, rmid, rr->evtid); + resctrl_arch_reset_rmid(rr->r, rr->d, closid, rmid, rr->evtid); + m = get_mbm_state(rr->d, closid, rmid, rr->evtid); if (m) memset(m, 0, sizeof(struct mbm_state)); return 0; } - rr->err = resctrl_arch_rmid_read(rr->r, rr->d, rmid, rr->evtid, &tval); + rr->err = resctrl_arch_rmid_read(rr->r, rr->d, closid, rmid, rr->evtid, + &tval); if (rr->err) return rr->err; @@ -421,6 +438,7 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr) /* * mbm_bw_count() - Update bw count from values previously read by * __mon_event_count(). + * @closid: The closid used to identify the cached mbm_state. * @rmid: The rmid used to identify the cached mbm_state. * @rr: The struct rmid_read populated by __mon_event_count(). * @@ -429,7 +447,7 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr) * __mon_event_count() is compared with the chunks value from the previous * invocation. This must be called once per second to maintain values in MBps. */ -static void mbm_bw_count(u32 rmid, struct rmid_read *rr) +static void mbm_bw_count(u32 closid, u32 rmid, struct rmid_read *rr) { struct mbm_state *m = &rr->d->mbm_local[rmid]; u64 cur_bw, bytes, cur_bytes; @@ -456,7 +474,7 @@ void mon_event_count(void *info) rdtgrp = rr->rgrp; - ret = __mon_event_count(rdtgrp->mon.rmid, rr); + ret = __mon_event_count(rdtgrp->closid, rdtgrp->mon.rmid, rr); /* * For Ctrl groups read data from child monitor groups and @@ -467,7 +485,8 @@ void mon_event_count(void *info) if (rdtgrp->type == RDTCTRL_GROUP) { list_for_each_entry(entry, head, mon.crdtgrp_list) { - if (__mon_event_count(entry->mon.rmid, rr) == 0) + if (__mon_event_count(entry->closid, entry->mon.rmid, + rr) == 0) ret = 0; } } @@ -578,7 +597,8 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm) resctrl_arch_update_one(r_mba, dom_mba, closid, CDP_NONE, new_msr_val); } -static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, int rmid) +static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, + u32 closid, u32 rmid) { struct rmid_read rr; @@ -593,12 +613,12 @@ static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, int rmid) if (is_mbm_total_enabled()) { rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID; rr.val = 0; - __mon_event_count(rmid, &rr); + __mon_event_count(closid, rmid, &rr); } if (is_mbm_local_enabled()) { rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID; rr.val = 0; - __mon_event_count(rmid, &rr); + __mon_event_count(closid, rmid, &rr); /* * Call the MBA software controller only for the @@ -606,7 +626,7 @@ static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, int rmid) * the software controller explicitly. */ if (is_mba_sc(NULL)) - mbm_bw_count(rmid, &rr); + mbm_bw_count(closid, rmid, &rr); } } @@ -663,11 +683,11 @@ void mbm_handle_overflow(struct work_struct *work) d = container_of(work, struct rdt_domain, mbm_over.work); list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { - mbm_update(r, d, prgrp->mon.rmid); + mbm_update(r, d, prgrp->closid, prgrp->mon.rmid); head = &prgrp->mon.crdtgrp_list; list_for_each_entry(crgrp, head, mon.crdtgrp_list) - mbm_update(r, d, crgrp->mon.rmid); + mbm_update(r, d, crgrp->closid, crgrp->mon.rmid); if (is_mba_sc(NULL)) update_mba_bw(prgrp, d); @@ -710,10 +730,11 @@ static int dom_data_init(struct rdt_resource *r) } /* - * RMID 0 is special and is always allocated. It's used for all - * tasks that are not monitored. + * RESCTRL_RESERVED_CLOSID and RESCTRL_RESERVED_RMID are special and + * are always allocated. These are used for the rdtgroup_default + * control group, which will be setup later in rdtgroup_init(). */ - entry = __rmid_entry(0); + entry = __rmid_entry(RESCTRL_RESERVED_CLOSID, RESCTRL_RESERVED_RMID); list_del(&entry->list); return 0; diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index 8f559eeae08e..65bee6f11015 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -752,7 +752,7 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) * anymore when this group would be used for pseudo-locking. This * is safe to call on platforms not capable of monitoring. */ - free_rmid(rdtgrp->mon.rmid); + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); ret = 0; goto out; @@ -787,7 +787,7 @@ int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp) ret = rdtgroup_locksetup_user_restore(rdtgrp); if (ret) { - free_rmid(rdtgrp->mon.rmid); + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); return ret; } diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index f455a10b74ab..ad7da7254f4d 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -2837,7 +2837,7 @@ static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp) head = &rdtgrp->mon.crdtgrp_list; list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) { - free_rmid(sentry->mon.rmid); + free_rmid(sentry->closid, sentry->mon.rmid); list_del(&sentry->mon.crdtgrp_list); if (atomic_read(&sentry->waitcount) != 0) @@ -2877,7 +2877,7 @@ static void rmdir_all_sub(void) cpumask_or(&rdtgroup_default.cpu_mask, &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); - free_rmid(rdtgrp->mon.rmid); + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); kernfs_remove(rdtgrp->kn); list_del(&rdtgrp->rdtgroup_list); @@ -3305,7 +3305,7 @@ static int mkdir_rdt_prepare_rmid_alloc(struct rdtgroup *rdtgrp) ret = mkdir_mondata_all(rdtgrp->kn, rdtgrp, &rdtgrp->mon.mon_data_kn); if (ret) { rdt_last_cmd_puts("kernfs subdir error\n"); - free_rmid(rdtgrp->mon.rmid); + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); return ret; } @@ -3315,7 +3315,7 @@ static int mkdir_rdt_prepare_rmid_alloc(struct rdtgroup *rdtgrp) static void mkdir_rdt_prepare_rmid_free(struct rdtgroup *rgrp) { if (rdt_mon_capable) - free_rmid(rgrp->mon.rmid); + free_rmid(rgrp->closid, rgrp->mon.rmid); } static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, @@ -3574,7 +3574,7 @@ static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) update_closid_rmid(tmpmask, NULL); rdtgrp->flags = RDT_DELETED; - free_rmid(rdtgrp->mon.rmid); + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); /* * Remove the rdtgrp from the parent ctrl_mon group's list @@ -3620,8 +3620,8 @@ static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); update_closid_rmid(tmpmask, NULL); + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); closid_free(rdtgrp->closid); - free_rmid(rdtgrp->mon.rmid); rdtgroup_ctrl_remove(rdtgrp); diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 66942d7fba7f..bd4ec22b5a96 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -6,6 +6,10 @@ #include #include +/* CLOSID, RMID value used by the default control group */ +#define RESCTRL_RESERVED_CLOSID 0 +#define RESCTRL_RESERVED_RMID 0 + #ifdef CONFIG_PROC_CPU_RESCTRL int proc_resctrl_show(struct seq_file *m, @@ -225,6 +229,9 @@ void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d); * for this resource and domain. * @r: resource that the counter should be read from. * @d: domain that the counter should be read from. + * @closid: closid that matches the rmid. Depending on the architecture, the + * counter may match traffic of both @closid and @rmid, or @rmid + * only. * @rmid: rmid of the counter to read. * @eventid: eventid to read, e.g. L3 occupancy. * @val: result of the counter read in bytes. @@ -235,20 +242,25 @@ void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d); * 0 on success, or -EIO, -EINVAL etc on error. */ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, - u32 rmid, enum resctrl_event_id eventid, u64 *val); + u32 closid, u32 rmid, enum resctrl_event_id eventid, + u64 *val); + /** * resctrl_arch_reset_rmid() - Reset any private state associated with rmid * and eventid. * @r: The domain's resource. * @d: The rmid's domain. + * @closid: closid that matches the rmid. Depending on the architecture, the + * counter may match traffic of both @closid and @rmid, or @rmid only. * @rmid: The rmid whose counter values should be reset. * @eventid: The eventid whose counter values should be reset. * * This can be called from any CPU. */ void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d, - u32 rmid, enum resctrl_event_id eventid); + u32 closid, u32 rmid, + enum resctrl_event_id eventid); /** * resctrl_arch_reset_rmid_all() - Reset all private state associated with -- Gitee From bfd025f9ba77452512a2f413089675a05fd00847 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:20 +0000 Subject: [PATCH 0499/2138] x86/resctrl: Access per-rmid structures by index ANBZ: #8626 commit 6791e0ea30711b937d5cb6e2b17f59a2a2af5386 upstream. x86 systems identify traffic using the CLOSID and RMID. The CLOSID is used to lookup the control policy, the RMID is used for monitoring. For x86 these are independent numbers. Arm's MPAM has equivalent features PARTID and PMG, where the PARTID is used to lookup the control policy. The PMG in contrast is a small number of bits that are used to subdivide PARTID when monitoring. The cache-occupancy monitors require the PARTID to be specified when monitoring. This means MPAM's PMG field is not unique. There are multiple PMG-0, one per allocated CLOSID/PARTID. If PMG is treated as equivalent to RMID, it cannot be allocated as an independent number. Bitmaps like rmid_busy_llc need to be sized by the number of unique entries for this resource. Treat the combined CLOSID and RMID as an index, and provide architecture helpers to pack and unpack an index. This makes the MPAM values unique. The domain's rmid_busy_llc and rmid_ptrs[] are then sized by index, as are domain mbm_local[] and mbm_total[]. x86 can ignore the CLOSID field when packing and unpacking an index, and report as many indexes as RMID. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Babu Moger Reviewed-by: Reinette Chatre Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-7-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/include/asm/resctrl.h | 17 +++++ arch/x86/kernel/cpu/resctrl/core.c | 13 ++-- arch/x86/kernel/cpu/resctrl/internal.h | 4 +- arch/x86/kernel/cpu/resctrl/monitor.c | 98 +++++++++++++++++--------- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 13 ++-- 5 files changed, 100 insertions(+), 45 deletions(-) diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index cc6e1bce7b1a..db4c84dde2d5 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -101,6 +101,23 @@ static inline void resctrl_sched_in(struct task_struct *tsk) __resctrl_sched_in(tsk); } +static inline u32 resctrl_arch_system_num_rmid_idx(void) +{ + /* RMID are independent numbers for x86. num_rmid_idx == num_rmid */ + return boot_cpu_data.x86_cache_max_rmid + 1; +} + +static inline void resctrl_arch_rmid_idx_decode(u32 idx, u32 *closid, u32 *rmid) +{ + *rmid = idx; + *closid = X86_RESCTRL_EMPTY_CLOSID; +} + +static inline u32 resctrl_arch_rmid_idx_encode(u32 ignored, u32 rmid) +{ + return rmid; +} + void resctrl_cpu_detect(struct cpuinfo_x86 *c); #else diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index aed9b182dbd8..42ea49735564 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -587,7 +587,7 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) mbm_setup_overflow_handler(d, 0); } if (is_llc_occupancy_enabled() && cpu == d->cqm_work_cpu && - has_busy_rmid(r, d)) { + has_busy_rmid(d)) { cancel_delayed_work(&d->cqm_limbo); cqm_setup_limbo_handler(d, 0); } @@ -598,11 +598,12 @@ static void clear_closid_rmid(int cpu) { struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state); - state->default_closid = 0; - state->default_rmid = 0; - state->cur_closid = 0; - state->cur_rmid = 0; - wrmsr(MSR_IA32_PQR_ASSOC, 0, 0); + state->default_closid = RESCTRL_RESERVED_CLOSID; + state->default_rmid = RESCTRL_RESERVED_RMID; + state->cur_closid = RESCTRL_RESERVED_CLOSID; + state->cur_rmid = RESCTRL_RESERVED_RMID; + wrmsr(MSR_IA32_PQR_ASSOC, RESCTRL_RESERVED_RMID, + RESCTRL_RESERVED_CLOSID); } static int resctrl_online_cpu(unsigned int cpu) diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index ae0e3338abc4..cbba782acd0c 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -8,6 +8,8 @@ #include #include +#include + #define L3_QOS_CDP_ENABLE 0x01ULL #define L2_QOS_CDP_ENABLE 0x01ULL @@ -558,7 +560,7 @@ void __init intel_rdt_mbm_apply_quirk(void); bool is_mba_sc(struct rdt_resource *r); void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms); void cqm_handle_limbo(struct work_struct *work); -bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d); +bool has_busy_rmid(struct rdt_domain *d); void __check_limbo(struct rdt_domain *d, bool force_free); void rdt_domain_reconfigure_cdp(struct rdt_resource *r); void __init thread_throttle_mode_init(void); diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 3dad4134d2c9..bc5ceef143ab 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -149,12 +149,29 @@ static inline u64 get_corrected_mbm_count(u32 rmid, unsigned long val) return val; } -static inline struct rmid_entry *__rmid_entry(u32 closid, u32 rmid) +/* + * x86 and arm64 differ in their handling of monitoring. + * x86's RMID are independent numbers, there is only one source of traffic + * with an RMID value of '1'. + * arm64's PMG extends the PARTID/CLOSID space, there are multiple sources of + * traffic with a PMG value of '1', one for each CLOSID, meaning the RMID + * value is no longer unique. + * To account for this, resctrl uses an index. On x86 this is just the RMID, + * on arm64 it encodes the CLOSID and RMID. This gives a unique number. + * + * The domain's rmid_busy_llc and rmid_ptrs[] are sized by index. The arch code + * must accept an attempt to read every index. + */ +static inline struct rmid_entry *__rmid_entry(u32 idx) { struct rmid_entry *entry; + u32 closid, rmid; + + entry = &rmid_ptrs[idx]; + resctrl_arch_rmid_idx_decode(idx, &closid, &rmid); - entry = &rmid_ptrs[rmid]; - WARN_ON(entry->rmid != rmid); + WARN_ON_ONCE(entry->closid != closid); + WARN_ON_ONCE(entry->rmid != rmid); return entry; } @@ -284,8 +301,9 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, void __check_limbo(struct rdt_domain *d, bool force_free) { struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; + u32 idx_limit = resctrl_arch_system_num_rmid_idx(); struct rmid_entry *entry; - u32 crmid = 1, nrmid; + u32 idx, cur_idx = 1; bool rmid_dirty; u64 val = 0; @@ -296,12 +314,11 @@ void __check_limbo(struct rdt_domain *d, bool force_free) * RMID and move it to the free list when the counter reaches 0. */ for (;;) { - nrmid = find_next_bit(d->rmid_busy_llc, r->num_rmid, crmid); - if (nrmid >= r->num_rmid) + idx = find_next_bit(d->rmid_busy_llc, idx_limit, cur_idx); + if (idx >= idx_limit) break; - entry = __rmid_entry(X86_RESCTRL_EMPTY_CLOSID, nrmid);// temporary - + entry = __rmid_entry(idx); if (resctrl_arch_rmid_read(r, d, entry->closid, entry->rmid, QOS_L3_OCCUP_EVENT_ID, &val)) { rmid_dirty = true; @@ -310,19 +327,21 @@ void __check_limbo(struct rdt_domain *d, bool force_free) } if (force_free || !rmid_dirty) { - clear_bit(entry->rmid, d->rmid_busy_llc); + clear_bit(idx, d->rmid_busy_llc); if (!--entry->busy) { rmid_limbo_count--; list_add_tail(&entry->list, &rmid_free_lru); } } - crmid = nrmid + 1; + cur_idx = idx + 1; } } -bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d) +bool has_busy_rmid(struct rdt_domain *d) { - return find_first_bit(d->rmid_busy_llc, r->num_rmid) != r->num_rmid; + u32 idx_limit = resctrl_arch_system_num_rmid_idx(); + + return find_first_bit(d->rmid_busy_llc, idx_limit) != idx_limit; } /* @@ -352,6 +371,9 @@ static void add_rmid_to_limbo(struct rmid_entry *entry) struct rdt_domain *d; int cpu, err; u64 val = 0; + u32 idx; + + idx = resctrl_arch_rmid_idx_encode(entry->closid, entry->rmid); entry->busy = 0; cpu = get_cpu(); @@ -369,9 +391,9 @@ static void add_rmid_to_limbo(struct rmid_entry *entry) * For the first limbo RMID in the domain, * setup up the limbo worker. */ - if (!has_busy_rmid(r, d)) + if (!has_busy_rmid(d)) cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL); - set_bit(entry->rmid, d->rmid_busy_llc); + set_bit(idx, d->rmid_busy_llc); entry->busy++; } put_cpu(); @@ -384,14 +406,21 @@ static void add_rmid_to_limbo(struct rmid_entry *entry) void free_rmid(u32 closid, u32 rmid) { + u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid); struct rmid_entry *entry; - if (!rmid) - return; - lockdep_assert_held(&rdtgroup_mutex); - entry = __rmid_entry(closid, rmid); + /* + * Do not allow the default rmid to be free'd. Comparing by index + * allows architectures that ignore the closid parameter to avoid an + * unnecessary check. + */ + if (idx == resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID, + RESCTRL_RESERVED_RMID)) + return; + + entry = __rmid_entry(idx); if (is_llc_occupancy_enabled()) add_rmid_to_limbo(entry); @@ -402,11 +431,13 @@ void free_rmid(u32 closid, u32 rmid) static struct mbm_state *get_mbm_state(struct rdt_domain *d, u32 closid, u32 rmid, enum resctrl_event_id evtid) { + u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid); + switch (evtid) { case QOS_L3_MBM_TOTAL_EVENT_ID: - return &d->mbm_total[rmid]; + return &d->mbm_total[idx]; case QOS_L3_MBM_LOCAL_EVENT_ID: - return &d->mbm_local[rmid]; + return &d->mbm_local[idx]; default: return NULL; } @@ -449,7 +480,8 @@ static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr) */ static void mbm_bw_count(u32 closid, u32 rmid, struct rmid_read *rr) { - struct mbm_state *m = &rr->d->mbm_local[rmid]; + u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid); + struct mbm_state *m = &rr->d->mbm_local[idx]; u64 cur_bw, bytes, cur_bytes; cur_bytes = rr->val; @@ -538,9 +570,9 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm) struct mbm_state *pmbm_data, *cmbm_data; struct rdt_resource *r_mba; struct rdt_domain *dom_mba; + u32 cur_bw, user_bw, idx; struct list_head *head; struct rdtgroup *entry; - u32 cur_bw, user_bw; if (!is_mbm_local_enabled()) return; @@ -549,7 +581,8 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm) closid = rgrp->closid; rmid = rgrp->mon.rmid; - pmbm_data = &dom_mbm->mbm_local[rmid]; + idx = resctrl_arch_rmid_idx_encode(closid, rmid); + pmbm_data = &dom_mbm->mbm_local[idx]; dom_mba = get_domain_from_cpu(smp_processor_id(), r_mba); if (!dom_mba) { @@ -638,17 +671,15 @@ void cqm_handle_limbo(struct work_struct *work) { unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL); int cpu = smp_processor_id(); - struct rdt_resource *r; struct rdt_domain *d; mutex_lock(&rdtgroup_mutex); - r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; d = container_of(work, struct rdt_domain, cqm_limbo.work); __check_limbo(d, false); - if (has_busy_rmid(r, d)) + if (has_busy_rmid(d)) schedule_delayed_work_on(cpu, &d->cqm_limbo, delay); mutex_unlock(&rdtgroup_mutex); @@ -713,19 +744,20 @@ void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms) static int dom_data_init(struct rdt_resource *r) { + u32 idx_limit = resctrl_arch_system_num_rmid_idx(); struct rmid_entry *entry = NULL; - int i, nr_rmids; + u32 idx; + int i; - nr_rmids = r->num_rmid; - rmid_ptrs = kcalloc(nr_rmids, sizeof(struct rmid_entry), GFP_KERNEL); + rmid_ptrs = kcalloc(idx_limit, sizeof(struct rmid_entry), GFP_KERNEL); if (!rmid_ptrs) return -ENOMEM; - for (i = 0; i < nr_rmids; i++) { + for (i = 0; i < idx_limit; i++) { entry = &rmid_ptrs[i]; INIT_LIST_HEAD(&entry->list); - entry->rmid = i; + resctrl_arch_rmid_idx_decode(i, &entry->closid, &entry->rmid); list_add_tail(&entry->list, &rmid_free_lru); } @@ -734,7 +766,9 @@ static int dom_data_init(struct rdt_resource *r) * are always allocated. These are used for the rdtgroup_default * control group, which will be setup later in rdtgroup_init(). */ - entry = __rmid_entry(RESCTRL_RESERVED_CLOSID, RESCTRL_RESERVED_RMID); + idx = resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID, + RESCTRL_RESERVED_RMID); + entry = __rmid_entry(idx); list_del(&entry->list); return 0; diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index ad7da7254f4d..a7dbc0e7e559 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -3853,8 +3853,8 @@ static void __init rdtgroup_setup_default(void) { mutex_lock(&rdtgroup_mutex); - rdtgroup_default.closid = 0; - rdtgroup_default.mon.rmid = 0; + rdtgroup_default.closid = RESCTRL_RESERVED_CLOSID; + rdtgroup_default.mon.rmid = RESCTRL_RESERVED_RMID; rdtgroup_default.type = RDTCTRL_GROUP; INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list); @@ -3889,7 +3889,7 @@ void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d) if (is_mbm_enabled()) cancel_delayed_work(&d->mbm_over); - if (is_llc_occupancy_enabled() && has_busy_rmid(r, d)) { + if (is_llc_occupancy_enabled() && has_busy_rmid(d)) { /* * When a package is going down, forcefully * decrement rmid->ebusy. There is no way to know @@ -3907,16 +3907,17 @@ void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d) static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d) { + u32 idx_limit = resctrl_arch_system_num_rmid_idx(); size_t tsize; if (is_llc_occupancy_enabled()) { - d->rmid_busy_llc = bitmap_zalloc(r->num_rmid, GFP_KERNEL); + d->rmid_busy_llc = bitmap_zalloc(idx_limit, GFP_KERNEL); if (!d->rmid_busy_llc) return -ENOMEM; } if (is_mbm_total_enabled()) { tsize = sizeof(*d->mbm_total); - d->mbm_total = kcalloc(r->num_rmid, tsize, GFP_KERNEL); + d->mbm_total = kcalloc(idx_limit, tsize, GFP_KERNEL); if (!d->mbm_total) { bitmap_free(d->rmid_busy_llc); return -ENOMEM; @@ -3924,7 +3925,7 @@ static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d) } if (is_mbm_local_enabled()) { tsize = sizeof(*d->mbm_local); - d->mbm_local = kcalloc(r->num_rmid, tsize, GFP_KERNEL); + d->mbm_local = kcalloc(idx_limit, tsize, GFP_KERNEL); if (!d->mbm_local) { bitmap_free(d->rmid_busy_llc); kfree(d->mbm_total); -- Gitee From c43cabca39bce27c71a58e5eaa042e86c0037290 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:21 +0000 Subject: [PATCH 0500/2138] x86/resctrl: Allow RMID allocation to be scoped by CLOSID ANBZ: #8626 commit c4c0376eefe185b790d89ca8016b7f837ebf25da upstream. MPAMs RMID values are not unique unless the CLOSID is considered as well. alloc_rmid() expects the RMID to be an independent number. Pass the CLOSID in to alloc_rmid(). Use this to compare indexes when allocating. If the CLOSID is not relevant to the index, this ends up comparing the free RMID with itself, and the first free entry will be used. With MPAM the CLOSID is included in the index, so this becomes a walk of the free RMID entries, until one that matches the supplied CLOSID is found. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-8-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/internal.h | 2 +- arch/x86/kernel/cpu/resctrl/monitor.c | 43 ++++++++++++++++++----- arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 2 +- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 2 +- 4 files changed, 37 insertions(+), 12 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index cbba782acd0c..872ba1a34103 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -543,7 +543,7 @@ void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp); struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r); int closids_supported(void); void closid_free(int closid); -int alloc_rmid(void); +int alloc_rmid(u32 closid); void free_rmid(u32 closid, u32 rmid); int rdt_get_mon_l3_config(struct rdt_resource *r); void __exit rdt_put_mon_l3_config(void); diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index bc5ceef143ab..c49f2e89ef29 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -344,24 +344,49 @@ bool has_busy_rmid(struct rdt_domain *d) return find_first_bit(d->rmid_busy_llc, idx_limit) != idx_limit; } +static struct rmid_entry *resctrl_find_free_rmid(u32 closid) +{ + struct rmid_entry *itr; + u32 itr_idx, cmp_idx; + + if (list_empty(&rmid_free_lru)) + return rmid_limbo_count ? ERR_PTR(-EBUSY) : ERR_PTR(-ENOSPC); + + list_for_each_entry(itr, &rmid_free_lru, list) { + /* + * Get the index of this free RMID, and the index it would need + * to be if it were used with this CLOSID. + * If the CLOSID is irrelevant on this architecture, the two + * index values are always the same on every entry and thus the + * very first entry will be returned. + */ + itr_idx = resctrl_arch_rmid_idx_encode(itr->closid, itr->rmid); + cmp_idx = resctrl_arch_rmid_idx_encode(closid, itr->rmid); + + if (itr_idx == cmp_idx) + return itr; + } + + return ERR_PTR(-ENOSPC); +} + /* - * As of now the RMIDs allocation is global. - * However we keep track of which packages the RMIDs - * are used to optimize the limbo list management. + * For MPAM the RMID value is not unique, and has to be considered with + * the CLOSID. The (CLOSID, RMID) pair is allocated on all domains, which + * allows all domains to be managed by a single free list. + * Each domain also has a rmid_busy_llc to reduce the work of the limbo handler. */ -int alloc_rmid(void) +int alloc_rmid(u32 closid) { struct rmid_entry *entry; lockdep_assert_held(&rdtgroup_mutex); - if (list_empty(&rmid_free_lru)) - return rmid_limbo_count ? -EBUSY : -ENOSPC; + entry = resctrl_find_free_rmid(closid); + if (IS_ERR(entry)) + return PTR_ERR(entry); - entry = list_first_entry(&rmid_free_lru, - struct rmid_entry, list); list_del(&entry->list); - return entry->rmid; } diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index 65bee6f11015..d8f44113ed1f 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -777,7 +777,7 @@ int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp) int ret; if (rdt_mon_capable) { - ret = alloc_rmid(); + ret = alloc_rmid(rdtgrp->closid); if (ret < 0) { rdt_last_cmd_puts("Out of RMIDs\n"); return ret; diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index a7dbc0e7e559..dcffd1c4a476 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -3295,7 +3295,7 @@ static int mkdir_rdt_prepare_rmid_alloc(struct rdtgroup *rdtgrp) if (!rdt_mon_capable) return 0; - ret = alloc_rmid(); + ret = alloc_rmid(rdtgrp->closid); if (ret < 0) { rdt_last_cmd_puts("Out of RMIDs\n"); return ret; -- Gitee From 8a4cf0fa2251ff846a430a61d4e89a57fae5d9ab Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:22 +0000 Subject: [PATCH 0501/2138] x86/resctrl: Track the number of dirty RMID a CLOSID has ANBZ: #8626 commit b30a55df60c35df09b9ef08dfb0a0cbb543abe81 upstream. MPAM's PMG bits extend its PARTID space, meaning the same PMG value can be used for different control groups. This means once a CLOSID is allocated, all its monitoring ids may still be dirty, and held in limbo. Keep track of the number of RMID held in limbo each CLOSID has. This will allow a future helper to find the 'cleanest' CLOSID when allocating. The array is only needed when CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID is defined. This will never be the case on x86. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-9-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/monitor.c | 75 +++++++++++++++++++++++---- 1 file changed, 65 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index c49f2e89ef29..13b0c8d14f3d 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -50,6 +50,13 @@ struct rmid_entry { */ static LIST_HEAD(rmid_free_lru); +/* + * @closid_num_dirty_rmid The number of dirty RMID each CLOSID has. + * Only allocated when CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID is defined. + * Indexed by CLOSID. Protected by rdtgroup_mutex. + */ +static u32 *closid_num_dirty_rmid; + /* * @rmid_limbo_count - count of currently unused but (potentially) * dirty RMIDs. @@ -292,6 +299,17 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, return 0; } +static void limbo_release_entry(struct rmid_entry *entry) +{ + lockdep_assert_held(&rdtgroup_mutex); + + rmid_limbo_count--; + list_add_tail(&entry->list, &rmid_free_lru); + + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) + closid_num_dirty_rmid[entry->closid]--; +} + /* * Check the RMIDs that are marked as busy for this domain. If the * reported LLC occupancy is below the threshold clear the busy bit and @@ -328,10 +346,8 @@ void __check_limbo(struct rdt_domain *d, bool force_free) if (force_free || !rmid_dirty) { clear_bit(idx, d->rmid_busy_llc); - if (!--entry->busy) { - rmid_limbo_count--; - list_add_tail(&entry->list, &rmid_free_lru); - } + if (!--entry->busy) + limbo_release_entry(entry); } cur_idx = idx + 1; } @@ -398,6 +414,8 @@ static void add_rmid_to_limbo(struct rmid_entry *entry) u64 val = 0; u32 idx; + lockdep_assert_held(&rdtgroup_mutex); + idx = resctrl_arch_rmid_idx_encode(entry->closid, entry->rmid); entry->busy = 0; @@ -423,10 +441,13 @@ static void add_rmid_to_limbo(struct rmid_entry *entry) } put_cpu(); - if (entry->busy) + if (entry->busy) { rmid_limbo_count++; - else + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) + closid_num_dirty_rmid[entry->closid]++; + } else { list_add_tail(&entry->list, &rmid_free_lru); + } } void free_rmid(u32 closid, u32 rmid) @@ -770,13 +791,39 @@ void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms) static int dom_data_init(struct rdt_resource *r) { u32 idx_limit = resctrl_arch_system_num_rmid_idx(); + u32 num_closid = resctrl_arch_get_num_closid(r); struct rmid_entry *entry = NULL; + int err = 0, i; u32 idx; - int i; + + mutex_lock(&rdtgroup_mutex); + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) { + u32 *tmp; + + /* + * If the architecture hasn't provided a sanitised value here, + * this may result in larger arrays than necessary. Resctrl will + * use a smaller system wide value based on the resources in + * use. + */ + tmp = kcalloc(num_closid, sizeof(*tmp), GFP_KERNEL); + if (!tmp) { + err = -ENOMEM; + goto out_unlock; + } + + closid_num_dirty_rmid = tmp; + } rmid_ptrs = kcalloc(idx_limit, sizeof(struct rmid_entry), GFP_KERNEL); - if (!rmid_ptrs) - return -ENOMEM; + if (!rmid_ptrs) { + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) { + kfree(closid_num_dirty_rmid); + closid_num_dirty_rmid = NULL; + } + err = -ENOMEM; + goto out_unlock; + } for (i = 0; i < idx_limit; i++) { entry = &rmid_ptrs[i]; @@ -796,13 +843,21 @@ static int dom_data_init(struct rdt_resource *r) entry = __rmid_entry(idx); list_del(&entry->list); - return 0; +out_unlock: + mutex_unlock(&rdtgroup_mutex); + + return err; } static void __exit dom_data_exit(void) { mutex_lock(&rdtgroup_mutex); + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) { + kfree(closid_num_dirty_rmid); + closid_num_dirty_rmid = NULL; + } + kfree(rmid_ptrs); rmid_ptrs = NULL; -- Gitee From f2da34680c5ce3572cfa55dc0dffe180992504ef Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:23 +0000 Subject: [PATCH 0502/2138] x86/resctrl: Use __set_bit()/__clear_bit() instead of open coding ANBZ: #8626 commit 5d920b6881f2249be3a028ce0a7f31c5cc61b1ee upstream. The resctrl CLOSID allocator uses a single 32bit word to track which CLOSID are free. The setting and clearing of bits is open coded. Convert the existing open coded bit manipulations of closid_free_map to use __set_bit() and friends. These don't need to be atomic as this list is protected by the mutex. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-10-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index dcffd1c4a476..bc6e0f83c847 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -111,7 +111,7 @@ void rdt_staged_configs_clear(void) * - Our choices on how to configure each resource become progressively more * limited as the number of resources grows. */ -static int closid_free_map; +static unsigned long closid_free_map; static int closid_free_map_len; int closids_supported(void) @@ -130,8 +130,8 @@ static void closid_init(void) closid_free_map = BIT_MASK(rdt_min_closid) - 1; - /* CLOSID 0 is always reserved for the default group */ - closid_free_map &= ~1; + /* RESCTRL_RESERVED_CLOSID is always reserved for the default group */ + __clear_bit(RESCTRL_RESERVED_CLOSID, &closid_free_map); closid_free_map_len = rdt_min_closid; } @@ -139,17 +139,21 @@ static int closid_alloc(void) { u32 closid = ffs(closid_free_map); + lockdep_assert_held(&rdtgroup_mutex); + if (closid == 0) return -ENOSPC; closid--; - closid_free_map &= ~(1 << closid); + __clear_bit(closid, &closid_free_map); return closid; } void closid_free(int closid) { - closid_free_map |= 1 << closid; + lockdep_assert_held(&rdtgroup_mutex); + + __set_bit(closid, &closid_free_map); } /** @@ -161,7 +165,9 @@ void closid_free(int closid) */ static bool closid_allocated(unsigned int closid) { - return (closid_free_map & (1 << closid)) == 0; + lockdep_assert_held(&rdtgroup_mutex); + + return !test_bit(closid, &closid_free_map); } /** -- Gitee From 36d2b22765bd4a32a0e1fa9f925cbbc8796dc81f Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:24 +0000 Subject: [PATCH 0503/2138] x86/resctrl: Allocate the cleanest CLOSID by searching closid_num_dirty_rmid ANBZ: #8626 commit 6eac36bb9eb0349c983313c71692c19d50b56878 upstream. MPAM's PMG bits extend its PARTID space, meaning the same PMG value can be used for different control groups. This means once a CLOSID is allocated, all its monitoring ids may still be dirty, and held in limbo. Instead of allocating the first free CLOSID, on architectures where CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID is enabled, search closid_num_dirty_rmid[] to find the cleanest CLOSID. The CLOSID found is returned to closid_alloc() for the free list to be updated. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-11-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/internal.h | 2 ++ arch/x86/kernel/cpu/resctrl/monitor.c | 45 ++++++++++++++++++++++++++ arch/x86/kernel/cpu/resctrl/rdtgroup.c | 19 ++++++++--- 3 files changed, 61 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 872ba1a34103..b7b9d9230bef 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -566,5 +566,7 @@ void rdt_domain_reconfigure_cdp(struct rdt_resource *r); void __init thread_throttle_mode_init(void); void __init mbm_config_rftype_init(const char *config); void rdt_staged_configs_clear(void); +bool closid_allocated(unsigned int closid); +int resctrl_find_cleanest_closid(void); #endif /* _ASM_X86_RESCTRL_INTERNAL_H */ diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 13b0c8d14f3d..101f1b112d17 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -386,6 +386,51 @@ static struct rmid_entry *resctrl_find_free_rmid(u32 closid) return ERR_PTR(-ENOSPC); } +/** + * resctrl_find_cleanest_closid() - Find a CLOSID where all the associated + * RMID are clean, or the CLOSID that has + * the most clean RMID. + * + * MPAM's equivalent of RMID are per-CLOSID, meaning a freshly allocated CLOSID + * may not be able to allocate clean RMID. To avoid this the allocator will + * choose the CLOSID with the most clean RMID. + * + * When the CLOSID and RMID are independent numbers, the first free CLOSID will + * be returned. + */ +int resctrl_find_cleanest_closid(void) +{ + u32 cleanest_closid = ~0; + int i = 0; + + lockdep_assert_held(&rdtgroup_mutex); + + if (!IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) + return -EIO; + + for (i = 0; i < closids_supported(); i++) { + int num_dirty; + + if (closid_allocated(i)) + continue; + + num_dirty = closid_num_dirty_rmid[i]; + if (num_dirty == 0) + return i; + + if (cleanest_closid == ~0) + cleanest_closid = i; + + if (num_dirty < closid_num_dirty_rmid[cleanest_closid]) + cleanest_closid = i; + } + + if (cleanest_closid == ~0) + return -ENOSPC; + + return cleanest_closid; +} + /* * For MPAM the RMID value is not unique, and has to be considered with * the CLOSID. The (CLOSID, RMID) pair is allocated on all domains, which diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index bc6e0f83c847..8fc46204a6cc 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -137,13 +137,22 @@ static void closid_init(void) static int closid_alloc(void) { - u32 closid = ffs(closid_free_map); + int cleanest_closid; + u32 closid; lockdep_assert_held(&rdtgroup_mutex); - if (closid == 0) - return -ENOSPC; - closid--; + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) { + cleanest_closid = resctrl_find_cleanest_closid(); + if (cleanest_closid < 0) + return cleanest_closid; + closid = cleanest_closid; + } else { + closid = ffs(closid_free_map); + if (closid == 0) + return -ENOSPC; + closid--; + } __clear_bit(closid, &closid_free_map); return closid; @@ -163,7 +172,7 @@ void closid_free(int closid) * Return: true if @closid is currently associated with a resource group, * false if @closid is free */ -static bool closid_allocated(unsigned int closid) +bool closid_allocated(unsigned int closid) { lockdep_assert_held(&rdtgroup_mutex); -- Gitee From fcd2f08619727316b06ce72a2b89aee698bb04d7 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:25 +0000 Subject: [PATCH 0504/2138] x86/resctrl: Move CLOSID/RMID matching and setting to use helpers ANBZ: #8626 commit 6eca639d8340b569ff78ffd753796e83ef7075ae upstream. When switching tasks, the CLOSID and RMID that the new task should use are stored in struct task_struct. For x86 the CLOSID known by resctrl, the value in task_struct, and the value written to the CPU register are all the same thing. MPAM's CPU interface has two different PARTIDs - one for data accesses the other for instruction fetch. Storing resctrl's CLOSID value in struct task_struct implies the arch code knows whether resctrl is using CDP. Move the matching and setting of the struct task_struct properties to use helpers. This allows arm64 to store the hardware format of the register, instead of having to convert it each time. __rdtgroup_move_task()s use of READ_ONCE()/WRITE_ONCE() ensures torn values aren't seen as another CPU may schedule the task being moved while the value is being changed. MPAM has an additional corner-case here as the PMG bits extend the PARTID space. If the scheduler sees a new-CLOSID but old-RMID, the task will dirty an RMID that the limbo code is not watching causing an inaccurate count. x86's RMID are independent values, so the limbo code will still be watching the old-RMID in this circumstance. To avoid this, arm64 needs both the CLOSID/RMID WRITE_ONCE()d together. Both values must be provided together. Because MPAM's RMID values are not unique, the CLOSID must be provided when matching the RMID. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-12-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/include/asm/resctrl.h | 18 ++++++++ arch/x86/kernel/cpu/resctrl/rdtgroup.c | 62 ++++++++++++++++---------- 2 files changed, 56 insertions(+), 24 deletions(-) diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index db4c84dde2d5..1d274dbabc44 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -95,6 +95,24 @@ static inline unsigned int resctrl_arch_round_mon_val(unsigned int val) return val * scale; } +static inline void resctrl_arch_set_closid_rmid(struct task_struct *tsk, + u32 closid, u32 rmid) +{ + WRITE_ONCE(tsk->closid, closid); + WRITE_ONCE(tsk->rmid, rmid); +} + +static inline bool resctrl_arch_match_closid(struct task_struct *tsk, u32 closid) +{ + return READ_ONCE(tsk->closid) == closid; +} + +static inline bool resctrl_arch_match_rmid(struct task_struct *tsk, u32 ignored, + u32 rmid) +{ + return READ_ONCE(tsk->rmid) == rmid; +} + static inline void resctrl_sched_in(struct task_struct *tsk) { if (static_branch_likely(&rdt_enable_key)) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 8fc46204a6cc..e42cbdf8f6a3 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -102,7 +102,7 @@ void rdt_staged_configs_clear(void) * * Using a global CLOSID across all resources has some advantages and * some drawbacks: - * + We can simply set "current->closid" to assign a task to a resource + * + We can simply set current's closid to assign a task to a resource * group. * + Context switch code can avoid extra memory references deciding which * CLOSID to load into the PQR_ASSOC MSR @@ -574,14 +574,26 @@ static void update_task_closid_rmid(struct task_struct *t) _update_task_closid_rmid(t); } +static bool task_in_rdtgroup(struct task_struct *tsk, struct rdtgroup *rdtgrp) +{ + u32 closid, rmid = rdtgrp->mon.rmid; + + if (rdtgrp->type == RDTCTRL_GROUP) + closid = rdtgrp->closid; + else if (rdtgrp->type == RDTMON_GROUP) + closid = rdtgrp->mon.parent->closid; + else + return false; + + return resctrl_arch_match_closid(tsk, closid) && + resctrl_arch_match_rmid(tsk, closid, rmid); +} + static int __rdtgroup_move_task(struct task_struct *tsk, struct rdtgroup *rdtgrp) { /* If the task is already in rdtgrp, no need to move the task. */ - if ((rdtgrp->type == RDTCTRL_GROUP && tsk->closid == rdtgrp->closid && - tsk->rmid == rdtgrp->mon.rmid) || - (rdtgrp->type == RDTMON_GROUP && tsk->rmid == rdtgrp->mon.rmid && - tsk->closid == rdtgrp->mon.parent->closid)) + if (task_in_rdtgroup(tsk, rdtgrp)) return 0; /* @@ -592,19 +604,19 @@ static int __rdtgroup_move_task(struct task_struct *tsk, * For monitor groups, can move the tasks only from * their parent CTRL group. */ - - if (rdtgrp->type == RDTCTRL_GROUP) { - WRITE_ONCE(tsk->closid, rdtgrp->closid); - WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid); - } else if (rdtgrp->type == RDTMON_GROUP) { - if (rdtgrp->mon.parent->closid == tsk->closid) { - WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid); - } else { - rdt_last_cmd_puts("Can't move task to different control group\n"); - return -EINVAL; - } + if (rdtgrp->type == RDTMON_GROUP && + !resctrl_arch_match_closid(tsk, rdtgrp->mon.parent->closid)) { + rdt_last_cmd_puts("Can't move task to different control group\n"); + return -EINVAL; } + if (rdtgrp->type == RDTMON_GROUP) + resctrl_arch_set_closid_rmid(tsk, rdtgrp->mon.parent->closid, + rdtgrp->mon.rmid); + else + resctrl_arch_set_closid_rmid(tsk, rdtgrp->closid, + rdtgrp->mon.rmid); + /* * Ensure the task's closid and rmid are written before determining if * the task is current that will decide if it will be interrupted. @@ -626,14 +638,15 @@ static int __rdtgroup_move_task(struct task_struct *tsk, static bool is_closid_match(struct task_struct *t, struct rdtgroup *r) { - return (rdt_alloc_capable && - (r->type == RDTCTRL_GROUP) && (t->closid == r->closid)); + return (rdt_alloc_capable && (r->type == RDTCTRL_GROUP) && + resctrl_arch_match_closid(t, r->closid)); } static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r) { - return (rdt_mon_capable && - (r->type == RDTMON_GROUP) && (t->rmid == r->mon.rmid)); + return (rdt_mon_capable && (r->type == RDTMON_GROUP) && + resctrl_arch_match_rmid(t, r->mon.parent->closid, + r->mon.rmid)); } /** @@ -884,7 +897,7 @@ int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns, rdtg->mode != RDT_MODE_EXCLUSIVE) continue; - if (rdtg->closid != tsk->closid) + if (!resctrl_arch_match_closid(tsk, rdtg->closid)) continue; seq_printf(s, "res:%s%s\n", (rdtg == &rdtgroup_default) ? "/" : "", @@ -892,7 +905,8 @@ int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns, seq_puts(s, "mon:"); list_for_each_entry(crg, &rdtg->mon.crdtgrp_list, mon.crdtgrp_list) { - if (tsk->rmid != crg->mon.rmid) + if (!resctrl_arch_match_rmid(tsk, crg->mon.parent->closid, + crg->mon.rmid)) continue; seq_printf(s, "%s", crg->kn->name); break; @@ -2820,8 +2834,8 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to, for_each_process_thread(p, t) { if (!from || is_closid_match(t, from) || is_rmid_match(t, from)) { - WRITE_ONCE(t->closid, to->closid); - WRITE_ONCE(t->rmid, to->mon.rmid); + resctrl_arch_set_closid_rmid(t, to->closid, + to->mon.rmid); /* * Order the closid/rmid stores above before the loads -- Gitee From c6a4f57cfc6d385c80a26dbc1465309637b7ac88 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:26 +0000 Subject: [PATCH 0505/2138] x86/resctrl: Add cpumask_any_housekeeping() for limbo/overflow ANBZ: #8626 commit a4846aaf39455fe69fce3522b385319383666eef upstream. The limbo and overflow code picks a CPU to use from the domain's list of online CPUs. Work is then scheduled on these CPUs to maintain the limbo list and any counters that may overflow. cpumask_any() may pick a CPU that is marked nohz_full, which will either penalise the work that CPU was dedicated to, or delay the processing of limbo list or counters that may overflow. Perhaps indefinitely. Delaying the overflow handling will skew the bandwidth values calculated by mba_sc, which expects to be called once a second. Add cpumask_any_housekeeping() as a replacement for cpumask_any() that prefers housekeeping CPUs. This helper will still return a nohz_full CPU if that is the only option. The CPU to use is re-evaluated each time the limbo/overflow work runs. This ensures the work will move off a nohz_full CPU once a housekeeping CPU is available. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-13-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/internal.h | 24 ++++++++++++++++++++++++ arch/x86/kernel/cpu/resctrl/monitor.c | 20 +++++++++++++------- 2 files changed, 37 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index b7b9d9230bef..81f5de916db8 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -7,6 +7,7 @@ #include #include #include +#include #include @@ -55,6 +56,29 @@ /* Max event bits supported */ #define MAX_EVT_CONFIG_BITS GENMASK(6, 0) +/** + * cpumask_any_housekeeping() - Choose any CPU in @mask, preferring those that + * aren't marked nohz_full + * @mask: The mask to pick a CPU from. + * + * Returns a CPU in @mask. If there are housekeeping CPUs that don't use + * nohz_full, these are preferred. + */ +static inline unsigned int cpumask_any_housekeeping(const struct cpumask *mask) +{ + unsigned int cpu, hk_cpu; + + cpu = cpumask_any(mask); + if (!tick_nohz_full_cpu(cpu)) + return cpu; + + hk_cpu = cpumask_nth_andnot(0, mask, tick_nohz_full_mask); + if (hk_cpu < nr_cpu_ids) + cpu = hk_cpu; + + return cpu; +} + struct rdt_fs_context { struct kernfs_fs_context kfc; bool enable_cdpl2; diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 101f1b112d17..38f85e53ca93 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -761,7 +761,6 @@ static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, void cqm_handle_limbo(struct work_struct *work) { unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL); - int cpu = smp_processor_id(); struct rdt_domain *d; mutex_lock(&rdtgroup_mutex); @@ -770,8 +769,11 @@ void cqm_handle_limbo(struct work_struct *work) __check_limbo(d, false); - if (has_busy_rmid(d)) - schedule_delayed_work_on(cpu, &d->cqm_limbo, delay); + if (has_busy_rmid(d)) { + d->cqm_work_cpu = cpumask_any_housekeeping(&d->cpu_mask); + schedule_delayed_work_on(d->cqm_work_cpu, &d->cqm_limbo, + delay); + } mutex_unlock(&rdtgroup_mutex); } @@ -781,7 +783,7 @@ void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms) unsigned long delay = msecs_to_jiffies(delay_ms); int cpu; - cpu = cpumask_any(&dom->cpu_mask); + cpu = cpumask_any_housekeeping(&dom->cpu_mask); dom->cqm_work_cpu = cpu; schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay); @@ -791,7 +793,6 @@ void mbm_handle_overflow(struct work_struct *work) { unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL); struct rdtgroup *prgrp, *crgrp; - int cpu = smp_processor_id(); struct list_head *head; struct rdt_resource *r; struct rdt_domain *d; @@ -815,7 +816,12 @@ void mbm_handle_overflow(struct work_struct *work) update_mba_bw(prgrp, d); } - schedule_delayed_work_on(cpu, &d->mbm_over, delay); + /* + * Re-check for housekeeping CPUs. This allows the overflow handler to + * move off a nohz_full CPU quickly. + */ + d->mbm_work_cpu = cpumask_any_housekeeping(&d->cpu_mask); + schedule_delayed_work_on(d->mbm_work_cpu, &d->mbm_over, delay); out_unlock: mutex_unlock(&rdtgroup_mutex); @@ -828,7 +834,7 @@ void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms) if (!static_branch_likely(&rdt_mon_enable_key)) return; - cpu = cpumask_any(&dom->cpu_mask); + cpu = cpumask_any_housekeeping(&dom->cpu_mask); dom->mbm_work_cpu = cpu; schedule_delayed_work_on(cpu, &dom->mbm_over, delay); } -- Gitee From 0bde6bc4652d1b185c185a3dcdd948a8a896da3c Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:27 +0000 Subject: [PATCH 0506/2138] x86/resctrl: Queue mon_event_read() instead of sending an IPI ANBZ: #8626 commit 09909e098113bed99c9f63e1df89073e92c69891 upstream. Intel is blessed with an abundance of monitors, one per RMID, that can be read from any CPU in the domain. MPAMs monitors reside in the MMIO MSC, the number implemented is up to the manufacturer. This means when there are fewer monitors than needed, they need to be allocated and freed. MPAM's CSU monitors are used to back the 'llc_occupancy' monitor file. The CSU counter is allowed to return 'not ready' for a small number of micro-seconds after programming. To allow one CSU hardware monitor to be used for multiple control or monitor groups, the CPU accessing the monitor needs to be able to block when configuring and reading the counter. Worse, the domain may be broken up into slices, and the MMIO accesses for each slice may need performing from different CPUs. These two details mean MPAMs monitor code needs to be able to sleep, and IPI another CPU in the domain to read from a resource that has been sliced. mon_event_read() already invokes mon_event_count() via IPI, which means this isn't possible. On systems using nohz-full, some CPUs need to be interrupted to run kernel work as they otherwise stay in user-space running realtime workloads. Interrupting these CPUs should be avoided, and scheduling work on them may never complete. Change mon_event_read() to pick a housekeeping CPU, (one that is not using nohz_full) and schedule mon_event_count() and wait. If all the CPUs in a domain are using nohz-full, then an IPI is used as the fallback. This function is only used in response to a user-space filesystem request (not the timing sensitive overflow code). This allows MPAM to hide the slice behaviour from resctrl, and to keep the monitor-allocation in monitor.c. When the IPI fallback is used on machines where MPAM needs to make an access on multiple CPUs, the counter read will always fail. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Peter Newman Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-14-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 26 +++++++++++++++++++++-- arch/x86/kernel/cpu/resctrl/monitor.c | 2 +- 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index e004ecbe3553..d3e5594ca0db 100644 --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -19,6 +19,8 @@ #include #include #include +#include + #include "internal.h" /* @@ -527,12 +529,21 @@ int rdtgroup_schemata_show(struct kernfs_open_file *of, return ret; } +static int smp_mon_event_count(void *arg) +{ + mon_event_count(arg); + + return 0; +} + void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, struct rdt_domain *d, struct rdtgroup *rdtgrp, int evtid, int first) { + int cpu; + /* - * setup the parameters to send to the IPI to read the data. + * Setup the parameters to pass to mon_event_count() to read the data. */ rr->rgrp = rdtgrp; rr->evtid = evtid; @@ -541,7 +552,18 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, rr->val = 0; rr->first = first; - smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1); + cpu = cpumask_any_housekeeping(&d->cpu_mask); + + /* + * cpumask_any_housekeeping() prefers housekeeping CPUs, but + * are all the CPUs nohz_full? If yes, pick a CPU to IPI. + * MPAM's resctrl_arch_rmid_read() is unable to read the + * counters on some platforms if its called in IRQ context. + */ + if (tick_nohz_full_cpu(cpu)) + smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1); + else + smp_call_on_cpu(cpu, smp_mon_event_count, rr, false); } int rdtgroup_mondata_show(struct seq_file *m, void *arg) diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 38f85e53ca93..fd060ef86f38 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -585,7 +585,7 @@ static void mbm_bw_count(u32 closid, u32 rmid, struct rmid_read *rr) } /* - * This is called via IPI to read the CQM/MBM counters + * This is scheduled by mon_event_read() to read the CQM/MBM counters * on a domain. */ void mon_event_count(void *info) -- Gitee From ead974bb84fb43b6f1fd1657412320ca3c1549ef Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:28 +0000 Subject: [PATCH 0507/2138] x86/resctrl: Allow resctrl_arch_rmid_read() to sleep ANBZ: #8626 commit 6fde1424f29b151b9dc8c660eecf4d1645facea5 upstream. MPAM's cache occupancy counters can take a little while to settle once the monitor has been configured. The maximum settling time is described to the driver via a firmware table. The value could be large enough that it makes sense to sleep. To avoid exposing this to resctrl, it should be hidden behind MPAM's resctrl_arch_rmid_read(). resctrl_arch_rmid_read() may be called via IPI meaning it is unable to sleep. In this case, it should return an error if it needs to sleep. This will only affect MPAM platforms where the cache occupancy counter isn't available immediately, nohz_full is in use, and there are no housekeeping CPUs in the necessary domain. There are three callers of resctrl_arch_rmid_read(): __mon_event_count() and __check_limbo() are both called from a non-migrateable context. mon_event_read() invokes __mon_event_count() using smp_call_on_cpu(), which adds work to the target CPUs workqueue. rdtgroup_mutex() is held, meaning this cannot race with the resctrl cpuhp callback. __check_limbo() is invoked via schedule_delayed_work_on() also adds work to a per-cpu workqueue. The remaining call is add_rmid_to_limbo() which is called in response to a user-space syscall that frees an RMID. This opportunistically reads the LLC occupancy counter on the current domain to see if the RMID is over the dirty threshold. This has to disable preemption to avoid reading the wrong domain's value. Disabling preemption here prevents resctrl_arch_rmid_read() from sleeping. add_rmid_to_limbo() walks each domain, but only reads the counter on one domain. If the system has more than one domain, the RMID will always be added to the limbo list. If the RMIDs usage was not over the threshold, it will be removed from the list when __check_limbo() runs. Make this the default behaviour. Free RMIDs are always added to the limbo list for each domain. The user visible effect of this is that a clean RMID is not available for re-allocation immediately after 'rmdir()' completes. This behaviour was never portable as it never happened on a machine with multiple domains. Removing this path allows resctrl_arch_rmid_read() to sleep if its called with interrupts unmasked. Document this is the expected behaviour, and add a might_sleep() annotation to catch changes that won't work on arm64. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-15-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/monitor.c | 25 +++++-------------------- include/linux/resctrl.h | 23 ++++++++++++++++++++++- 2 files changed, 27 insertions(+), 21 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index fd060ef86f38..e8aeff6673ea 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -277,6 +277,8 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, u64 msr_val, chunks; int ret; + resctrl_arch_rmid_read_context_check(); + if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask)) return -EINVAL; @@ -455,8 +457,6 @@ static void add_rmid_to_limbo(struct rmid_entry *entry) { struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; struct rdt_domain *d; - int cpu, err; - u64 val = 0; u32 idx; lockdep_assert_held(&rdtgroup_mutex); @@ -464,17 +464,7 @@ static void add_rmid_to_limbo(struct rmid_entry *entry) idx = resctrl_arch_rmid_idx_encode(entry->closid, entry->rmid); entry->busy = 0; - cpu = get_cpu(); list_for_each_entry(d, &r->domains, list) { - if (cpumask_test_cpu(cpu, &d->cpu_mask)) { - err = resctrl_arch_rmid_read(r, d, entry->closid, - entry->rmid, - QOS_L3_OCCUP_EVENT_ID, - &val); - if (err || val <= resctrl_rmid_realloc_threshold) - continue; - } - /* * For the first limbo RMID in the domain, * setup up the limbo worker. @@ -484,15 +474,10 @@ static void add_rmid_to_limbo(struct rmid_entry *entry) set_bit(idx, d->rmid_busy_llc); entry->busy++; } - put_cpu(); - if (entry->busy) { - rmid_limbo_count++; - if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) - closid_num_dirty_rmid[entry->closid]++; - } else { - list_add_tail(&entry->list, &rmid_free_lru); - } + rmid_limbo_count++; + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) + closid_num_dirty_rmid[entry->closid]++; } void free_rmid(u32 closid, u32 rmid) diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index bd4ec22b5a96..8649fc84aac2 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -236,7 +236,12 @@ void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d); * @eventid: eventid to read, e.g. L3 occupancy. * @val: result of the counter read in bytes. * - * Call from process context on a CPU that belongs to domain @d. + * Some architectures need to sleep when first programming some of the counters. + * (specifically: arm64's MPAM cache occupancy counters can return 'not ready' + * for a short period of time). Call from a non-migrateable process context on + * a CPU that belongs to domain @d. e.g. use smp_call_on_cpu() or + * schedule_work_on(). This function can be called with interrupts masked, + * e.g. using smp_call_function_any(), but may consistently return an error. * * Return: * 0 on success, or -EIO, -EINVAL etc on error. @@ -245,6 +250,22 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, u32 closid, u32 rmid, enum resctrl_event_id eventid, u64 *val); +/** + * resctrl_arch_rmid_read_context_check() - warn about invalid contexts + * + * When built with CONFIG_DEBUG_ATOMIC_SLEEP generate a warning when + * resctrl_arch_rmid_read() is called with preemption disabled. + * + * The contract with resctrl_arch_rmid_read() is that if interrupts + * are unmasked, it can sleep. This allows NOHZ_FULL systems to use an + * IPI, (and fail if the call needed to sleep), while most of the time + * the work is scheduled, allowing the call to sleep. + */ +static inline void resctrl_arch_rmid_read_context_check(void) +{ + if (!irqs_disabled()) + might_sleep(); +} /** * resctrl_arch_reset_rmid() - Reset any private state associated with rmid -- Gitee From a962b2cb4821e321ddf95865b0a85bb2d27150b9 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:29 +0000 Subject: [PATCH 0508/2138] x86/resctrl: Allow arch to allocate memory needed in resctrl_arch_rmid_read() ANBZ: #8626 commit e557999f80a5ee4ec812f594ab42bb76c3ec4eb2 upstream. Depending on the number of monitors available, Arm's MPAM may need to allocate a monitor prior to reading the counter value. Allocating a contended resource may involve sleeping. __check_limbo() and mon_event_count() each make multiple calls to resctrl_arch_rmid_read(), to avoid extra work on contended systems, the allocation should be valid for multiple invocations of resctrl_arch_rmid_read(). The memory or hardware allocated is not specific to a domain. Add arch hooks for this allocation, which need calling before resctrl_arch_rmid_read(). The allocated monitor is passed to resctrl_arch_rmid_read(), then freed again afterwards. The helper can be called on any CPU, and can sleep. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-16-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/include/asm/resctrl.h | 11 +++++++ arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 7 +++++ arch/x86/kernel/cpu/resctrl/internal.h | 1 + arch/x86/kernel/cpu/resctrl/monitor.c | 35 +++++++++++++++++++++-- include/linux/resctrl.h | 5 +++- 5 files changed, 55 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index 1d274dbabc44..29c4cc343787 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -136,6 +136,17 @@ static inline u32 resctrl_arch_rmid_idx_encode(u32 ignored, u32 rmid) return rmid; } +/* x86 can always read an rmid, nothing needs allocating */ +struct rdt_resource; +static inline void *resctrl_arch_mon_ctx_alloc(struct rdt_resource *r, int evtid) +{ + might_sleep(); + return NULL; +}; + +static inline void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid, + void *ctx) { }; + void resctrl_cpu_detect(struct cpuinfo_x86 *c); #else diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index d3e5594ca0db..a117e81296e3 100644 --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -551,6 +551,11 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, rr->d = d; rr->val = 0; rr->first = first; + rr->arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, evtid); + if (IS_ERR(rr->arch_mon_ctx)) { + rr->err = -EINVAL; + return; + } cpu = cpumask_any_housekeeping(&d->cpu_mask); @@ -564,6 +569,8 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1); else smp_call_on_cpu(cpu, smp_mon_event_count, rr, false); + + resctrl_arch_mon_ctx_free(r, evtid, rr->arch_mon_ctx); } int rdtgroup_mondata_show(struct seq_file *m, void *arg) diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 81f5de916db8..e089d1a1a055 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -137,6 +137,7 @@ struct rmid_read { bool first; int err; u64 val; + void *arch_mon_ctx; }; extern bool rdt_alloc_capable; diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index e8aeff6673ea..9b503e6ac490 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -269,7 +269,7 @@ static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width) int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, u32 unused, u32 rmid, enum resctrl_event_id eventid, - u64 *val) + u64 *val, void *ignored) { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); @@ -324,9 +324,17 @@ void __check_limbo(struct rdt_domain *d, bool force_free) u32 idx_limit = resctrl_arch_system_num_rmid_idx(); struct rmid_entry *entry; u32 idx, cur_idx = 1; + void *arch_mon_ctx; bool rmid_dirty; u64 val = 0; + arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, QOS_L3_OCCUP_EVENT_ID); + if (IS_ERR(arch_mon_ctx)) { + pr_warn_ratelimited("Failed to allocate monitor context: %ld", + PTR_ERR(arch_mon_ctx)); + return; + } + /* * Skip RMID 0 and start from RMID 1 and check all the RMIDs that * are marked as busy for occupancy < threshold. If the occupancy @@ -340,7 +348,8 @@ void __check_limbo(struct rdt_domain *d, bool force_free) entry = __rmid_entry(idx); if (resctrl_arch_rmid_read(r, d, entry->closid, entry->rmid, - QOS_L3_OCCUP_EVENT_ID, &val)) { + QOS_L3_OCCUP_EVENT_ID, &val, + arch_mon_ctx)) { rmid_dirty = true; } else { rmid_dirty = (val >= resctrl_rmid_realloc_threshold); @@ -353,6 +362,8 @@ void __check_limbo(struct rdt_domain *d, bool force_free) } cur_idx = idx + 1; } + + resctrl_arch_mon_ctx_free(r, QOS_L3_OCCUP_EVENT_ID, arch_mon_ctx); } bool has_busy_rmid(struct rdt_domain *d) @@ -533,7 +544,7 @@ static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr) } rr->err = resctrl_arch_rmid_read(rr->r, rr->d, closid, rmid, rr->evtid, - &tval); + &tval, rr->arch_mon_ctx); if (rr->err) return rr->err; @@ -722,11 +733,27 @@ static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, if (is_mbm_total_enabled()) { rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID; rr.val = 0; + rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid); + if (IS_ERR(rr.arch_mon_ctx)) { + pr_warn_ratelimited("Failed to allocate monitor context: %ld", + PTR_ERR(rr.arch_mon_ctx)); + return; + } + __mon_event_count(closid, rmid, &rr); + + resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx); } if (is_mbm_local_enabled()) { rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID; rr.val = 0; + rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid); + if (IS_ERR(rr.arch_mon_ctx)) { + pr_warn_ratelimited("Failed to allocate monitor context: %ld", + PTR_ERR(rr.arch_mon_ctx)); + return; + } + __mon_event_count(closid, rmid, &rr); /* @@ -736,6 +763,8 @@ static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, */ if (is_mba_sc(NULL)) mbm_bw_count(closid, rmid, &rr); + + resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx); } } diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 8649fc84aac2..bf460c912bf5 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -235,6 +235,9 @@ void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d); * @rmid: rmid of the counter to read. * @eventid: eventid to read, e.g. L3 occupancy. * @val: result of the counter read in bytes. + * @arch_mon_ctx: An architecture specific value from + * resctrl_arch_mon_ctx_alloc(), for MPAM this identifies + * the hardware monitor allocated for this read request. * * Some architectures need to sleep when first programming some of the counters. * (specifically: arm64's MPAM cache occupancy counters can return 'not ready' @@ -248,7 +251,7 @@ void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d); */ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, u32 closid, u32 rmid, enum resctrl_event_id eventid, - u64 *val); + u64 *val, void *arch_mon_ctx); /** * resctrl_arch_rmid_read_context_check() - warn about invalid contexts -- Gitee From 9b7cbbe2440e65ac9942a31e2dd8be2e4238568b Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:30 +0000 Subject: [PATCH 0509/2138] x86/resctrl: Make resctrl_mounted checks explicit ANBZ: #8626 commit 13e5769debf09588543db83836c524148873929f upstream. The rdt_enable_key is switched when resctrl is mounted, and used to prevent a second mount of the filesystem. It also enables the architecture's context switch code. This requires another architecture to have the same set of static keys, as resctrl depends on them too. The existing users of these static keys are implicitly also checking if the filesystem is mounted. Make the resctrl_mounted checks explicit: resctrl can keep track of whether it has been mounted once. This doesn't need to be combined with whether the arch code is context switching the CLOSID. rdt_mon_enable_key is never used just to test that resctrl is mounted, but does also have this implication. Add a resctrl_mounted to all uses of rdt_mon_enable_key. This will allow the static key changing to be moved behind resctrl_arch_ calls. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-17-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/internal.h | 1 + arch/x86/kernel/cpu/resctrl/monitor.c | 12 ++++++++++-- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 23 +++++++++++++++++------ 3 files changed, 28 insertions(+), 8 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index e089d1a1a055..9bfda6963794 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -144,6 +144,7 @@ extern bool rdt_alloc_capable; extern bool rdt_mon_capable; extern unsigned int rdt_mon_features; extern struct list_head resctrl_schema_all; +extern bool resctrl_mounted; enum rdt_group_type { RDTCTRL_GROUP = 0, diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 9b503e6ac490..d5d8a58d96f2 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -813,7 +813,11 @@ void mbm_handle_overflow(struct work_struct *work) mutex_lock(&rdtgroup_mutex); - if (!static_branch_likely(&rdt_mon_enable_key)) + /* + * If the filesystem has been unmounted this work no longer needs to + * run. + */ + if (!resctrl_mounted || !static_branch_likely(&rdt_mon_enable_key)) goto out_unlock; r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; @@ -846,7 +850,11 @@ void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms) unsigned long delay = msecs_to_jiffies(delay_ms); int cpu; - if (!static_branch_likely(&rdt_mon_enable_key)) + /* + * When a domain comes online there is no guarantee the filesystem is + * mounted. If not, there is no need to catch counter overflow. + */ + if (!resctrl_mounted || !static_branch_likely(&rdt_mon_enable_key)) return; cpu = cpumask_any_housekeeping(&dom->cpu_mask); dom->mbm_work_cpu = cpu; diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index e42cbdf8f6a3..857fbbc3c839 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -42,6 +42,9 @@ LIST_HEAD(rdt_all_groups); /* list of entries for the schemata file */ LIST_HEAD(resctrl_schema_all); +/* The filesystem can only be mounted once. */ +bool resctrl_mounted; + /* Kernel fs node for "info" directory under root */ static struct kernfs_node *kn_info; @@ -881,7 +884,7 @@ int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns, mutex_lock(&rdtgroup_mutex); /* Return empty if resctrl has not been mounted. */ - if (!static_branch_unlikely(&rdt_enable_key)) { + if (!resctrl_mounted) { seq_puts(s, "res:\nmon:\n"); goto unlock; } @@ -2608,7 +2611,7 @@ static int rdt_get_tree(struct fs_context *fc) /* * resctrl file system can only be mounted once. */ - if (static_branch_unlikely(&rdt_enable_key)) { + if (resctrl_mounted) { ret = -EBUSY; goto out; } @@ -2669,8 +2672,10 @@ static int rdt_get_tree(struct fs_context *fc) if (rdt_mon_capable) static_branch_enable_cpuslocked(&rdt_mon_enable_key); - if (rdt_alloc_capable || rdt_mon_capable) + if (rdt_alloc_capable || rdt_mon_capable) { static_branch_enable_cpuslocked(&rdt_enable_key); + resctrl_mounted = true; + } if (is_mbm_enabled()) { r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; @@ -2944,6 +2949,7 @@ static void rdt_kill_sb(struct super_block *sb) static_branch_disable_cpuslocked(&rdt_alloc_enable_key); static_branch_disable_cpuslocked(&rdt_mon_enable_key); static_branch_disable_cpuslocked(&rdt_enable_key); + resctrl_mounted = false; kernfs_kill_sb(sb); mutex_unlock(&rdtgroup_mutex); cpus_read_unlock(); @@ -3913,7 +3919,7 @@ void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d) * If resctrl is mounted, remove all the * per domain monitor data directories. */ - if (static_branch_unlikely(&rdt_mon_enable_key)) + if (resctrl_mounted && static_branch_unlikely(&rdt_mon_enable_key)) rmdir_mondata_subdir_allrdtgrp(r, d->id); if (is_mbm_enabled()) @@ -3990,8 +3996,13 @@ int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d) if (is_llc_occupancy_enabled()) INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo); - /* If resctrl is mounted, add per domain monitor data directories. */ - if (static_branch_unlikely(&rdt_mon_enable_key)) + /* + * If the filesystem is not mounted then only the default resource group + * exists. Creation of its directories is deferred until mount time + * by rdt_get_tree() calling mkdir_mondata_all(). + * If resctrl is mounted, add per domain monitor data directories. + */ + if (resctrl_mounted && static_branch_unlikely(&rdt_mon_enable_key)) mkdir_mondata_subdir_allrdtgrp(r, d); return 0; -- Gitee From e10943c6e69bdf6bd90f51cb543d8dae002fae3b Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:31 +0000 Subject: [PATCH 0510/2138] x86/resctrl: Move alloc/mon static keys into helpers ANBZ: #8626 commit 5db6a4a75c95f6967d57906ba7b82756d1985d63 upstream. resctrl enables three static keys depending on the features it has enabled. Another architecture's context switch code may look different, any static keys that control it should be buried behind helpers. Move the alloc/mon logic into arch-specific helpers as a preparatory step for making the rdt_enable_key's status something the arch code decides. This means other architectures don't have to mirror the static keys. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-18-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/include/asm/resctrl.h | 20 ++++++++++++++++++++ arch/x86/kernel/cpu/resctrl/internal.h | 5 ----- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 8 ++++---- 3 files changed, 24 insertions(+), 9 deletions(-) diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index 29c4cc343787..3c9137b6ad4f 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -42,6 +42,26 @@ DECLARE_STATIC_KEY_FALSE(rdt_enable_key); DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key); DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key); +static inline void resctrl_arch_enable_alloc(void) +{ + static_branch_enable_cpuslocked(&rdt_alloc_enable_key); +} + +static inline void resctrl_arch_disable_alloc(void) +{ + static_branch_disable_cpuslocked(&rdt_alloc_enable_key); +} + +static inline void resctrl_arch_enable_mon(void) +{ + static_branch_enable_cpuslocked(&rdt_mon_enable_key); +} + +static inline void resctrl_arch_disable_mon(void) +{ + static_branch_disable_cpuslocked(&rdt_mon_enable_key); +} + /* * __resctrl_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR * diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 9bfda6963794..78580855139d 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -94,9 +94,6 @@ static inline struct rdt_fs_context *rdt_fc2context(struct fs_context *fc) return container_of(kfc, struct rdt_fs_context, kfc); } -DECLARE_STATIC_KEY_FALSE(rdt_enable_key); -DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key); - /** * struct mon_evt - Entry in the event list of a resource * @evtid: event id @@ -452,8 +449,6 @@ extern struct mutex rdtgroup_mutex; extern struct rdt_hw_resource rdt_resources_all[]; extern struct rdtgroup rdtgroup_default; -DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key); - extern struct dentry *debugfs_resctrl; enum resctrl_res_level { diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 857fbbc3c839..231207f09e04 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -2668,9 +2668,9 @@ static int rdt_get_tree(struct fs_context *fc) goto out_psl; if (rdt_alloc_capable) - static_branch_enable_cpuslocked(&rdt_alloc_enable_key); + resctrl_arch_enable_alloc(); if (rdt_mon_capable) - static_branch_enable_cpuslocked(&rdt_mon_enable_key); + resctrl_arch_enable_mon(); if (rdt_alloc_capable || rdt_mon_capable) { static_branch_enable_cpuslocked(&rdt_enable_key); @@ -2946,8 +2946,8 @@ static void rdt_kill_sb(struct super_block *sb) rdtgroup_default.mode = RDT_MODE_SHAREABLE; schemata_list_destroy(); rdtgroup_destroy_root(); - static_branch_disable_cpuslocked(&rdt_alloc_enable_key); - static_branch_disable_cpuslocked(&rdt_mon_enable_key); + resctrl_arch_disable_alloc(); + resctrl_arch_disable_mon(); static_branch_disable_cpuslocked(&rdt_enable_key); resctrl_mounted = false; kernfs_kill_sb(sb); -- Gitee From d5acafce82f702f7acaa40d8b59c9ebdf7269192 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:32 +0000 Subject: [PATCH 0511/2138] x86/resctrl: Make rdt_enable_key the arch's decision to switch ANBZ: #8626 commit 0a2f4d9b548c5b1e2e3fcfa966f5d47b1cacff01 upstream. rdt_enable_key is switched when resctrl is mounted. It was also previously used to prevent a second mount of the filesystem. Any other architecture that wants to support resctrl has to provide identical static keys. Now that there are helpers for enabling and disabling the alloc/mon keys, resctrl doesn't need to switch this extra key, it can be done by the arch code. Use the static-key increment and decrement helpers, and change resctrl to ensure the calls are balanced. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-19-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/include/asm/resctrl.h | 4 ++++ arch/x86/kernel/cpu/resctrl/rdtgroup.c | 11 +++++------ 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index 3c9137b6ad4f..b74aa34dc9e8 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -45,21 +45,25 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key); static inline void resctrl_arch_enable_alloc(void) { static_branch_enable_cpuslocked(&rdt_alloc_enable_key); + static_branch_inc_cpuslocked(&rdt_enable_key); } static inline void resctrl_arch_disable_alloc(void) { static_branch_disable_cpuslocked(&rdt_alloc_enable_key); + static_branch_dec_cpuslocked(&rdt_enable_key); } static inline void resctrl_arch_enable_mon(void) { static_branch_enable_cpuslocked(&rdt_mon_enable_key); + static_branch_inc_cpuslocked(&rdt_enable_key); } static inline void resctrl_arch_disable_mon(void) { static_branch_disable_cpuslocked(&rdt_mon_enable_key); + static_branch_dec_cpuslocked(&rdt_enable_key); } /* diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 231207f09e04..7e57ac9d81f7 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -2672,10 +2672,8 @@ static int rdt_get_tree(struct fs_context *fc) if (rdt_mon_capable) resctrl_arch_enable_mon(); - if (rdt_alloc_capable || rdt_mon_capable) { - static_branch_enable_cpuslocked(&rdt_enable_key); + if (rdt_alloc_capable || rdt_mon_capable) resctrl_mounted = true; - } if (is_mbm_enabled()) { r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; @@ -2946,9 +2944,10 @@ static void rdt_kill_sb(struct super_block *sb) rdtgroup_default.mode = RDT_MODE_SHAREABLE; schemata_list_destroy(); rdtgroup_destroy_root(); - resctrl_arch_disable_alloc(); - resctrl_arch_disable_mon(); - static_branch_disable_cpuslocked(&rdt_enable_key); + if (rdt_alloc_capable) + resctrl_arch_disable_alloc(); + if (rdt_mon_capable) + resctrl_arch_disable_mon(); resctrl_mounted = false; kernfs_kill_sb(sb); mutex_unlock(&rdtgroup_mutex); -- Gitee From d5edfe8673e01f73fedeb4a954d1d70328a40614 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:33 +0000 Subject: [PATCH 0512/2138] x86/resctrl: Add helpers for system wide mon/alloc capable ANBZ: #8626 commit 30017b60706c2ba72a0a4da7d5ef8f5fa95a2f01 upstream. resctrl reads rdt_alloc_capable or rdt_mon_capable to determine whether any of the resources support the corresponding features. resctrl also uses the static keys that affect the architecture's context-switch code to determine the same thing. This forces another architecture to have the same static keys. As the static key is enabled based on the capable flag, and none of the filesystem uses of these are in the scheduler path, move the capable flags behind helpers, and use these in the filesystem code instead of the static key. After this change, only the architecture code manages and uses the static keys to ensure __resctrl_sched_in() does not need runtime checks. This avoids multiple architectures having to define the same static keys. Cases where the static key implicitly tested if the resctrl filesystem was mounted all have an explicit check now. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-20-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/include/asm/resctrl.h | 13 ++++++++ arch/x86/kernel/cpu/resctrl/internal.h | 2 -- arch/x86/kernel/cpu/resctrl/monitor.c | 4 +-- arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 6 ++-- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 38 +++++++++++------------ 5 files changed, 37 insertions(+), 26 deletions(-) diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index b74aa34dc9e8..12dbd2588ca7 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -38,10 +38,18 @@ struct resctrl_pqr_state { DECLARE_PER_CPU(struct resctrl_pqr_state, pqr_state); +extern bool rdt_alloc_capable; +extern bool rdt_mon_capable; + DECLARE_STATIC_KEY_FALSE(rdt_enable_key); DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key); DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key); +static inline bool resctrl_arch_alloc_capable(void) +{ + return rdt_alloc_capable; +} + static inline void resctrl_arch_enable_alloc(void) { static_branch_enable_cpuslocked(&rdt_alloc_enable_key); @@ -54,6 +62,11 @@ static inline void resctrl_arch_disable_alloc(void) static_branch_dec_cpuslocked(&rdt_enable_key); } +static inline bool resctrl_arch_mon_capable(void) +{ + return rdt_mon_capable; +} + static inline void resctrl_arch_enable_mon(void) { static_branch_enable_cpuslocked(&rdt_mon_enable_key); diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 78580855139d..3ee855c37447 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -137,8 +137,6 @@ struct rmid_read { void *arch_mon_ctx; }; -extern bool rdt_alloc_capable; -extern bool rdt_mon_capable; extern unsigned int rdt_mon_features; extern struct list_head resctrl_schema_all; extern bool resctrl_mounted; diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index d5d8a58d96f2..92d7ba674003 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -817,7 +817,7 @@ void mbm_handle_overflow(struct work_struct *work) * If the filesystem has been unmounted this work no longer needs to * run. */ - if (!resctrl_mounted || !static_branch_likely(&rdt_mon_enable_key)) + if (!resctrl_mounted || !resctrl_arch_mon_capable()) goto out_unlock; r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; @@ -854,7 +854,7 @@ void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms) * When a domain comes online there is no guarantee the filesystem is * mounted. If not, there is no need to catch counter overflow. */ - if (!resctrl_mounted || !static_branch_likely(&rdt_mon_enable_key)) + if (!resctrl_mounted || !resctrl_arch_mon_capable()) return; cpu = cpumask_any_housekeeping(&dom->cpu_mask); dom->mbm_work_cpu = cpu; diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index d8f44113ed1f..8056bed033cc 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -581,7 +581,7 @@ static int rdtgroup_locksetup_user_restrict(struct rdtgroup *rdtgrp) if (ret) goto err_cpus; - if (rdt_mon_capable) { + if (resctrl_arch_mon_capable()) { ret = rdtgroup_kn_mode_restrict(rdtgrp, "mon_groups"); if (ret) goto err_cpus_list; @@ -628,7 +628,7 @@ static int rdtgroup_locksetup_user_restore(struct rdtgroup *rdtgrp) if (ret) goto err_cpus; - if (rdt_mon_capable) { + if (resctrl_arch_mon_capable()) { ret = rdtgroup_kn_mode_restore(rdtgrp, "mon_groups", 0777); if (ret) goto err_cpus_list; @@ -776,7 +776,7 @@ int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp) { int ret; - if (rdt_mon_capable) { + if (resctrl_arch_mon_capable()) { ret = alloc_rmid(rdtgrp->closid); if (ret < 0) { rdt_last_cmd_puts("Out of RMIDs\n"); diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 7e57ac9d81f7..ed5fc677a99d 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -641,13 +641,13 @@ static int __rdtgroup_move_task(struct task_struct *tsk, static bool is_closid_match(struct task_struct *t, struct rdtgroup *r) { - return (rdt_alloc_capable && (r->type == RDTCTRL_GROUP) && + return (resctrl_arch_alloc_capable() && (r->type == RDTCTRL_GROUP) && resctrl_arch_match_closid(t, r->closid)); } static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r) { - return (rdt_mon_capable && (r->type == RDTMON_GROUP) && + return (resctrl_arch_mon_capable() && (r->type == RDTMON_GROUP) && resctrl_arch_match_rmid(t, r->mon.parent->closid, r->mon.rmid)); } @@ -2632,7 +2632,7 @@ static int rdt_get_tree(struct fs_context *fc) closid_init(); - if (rdt_mon_capable) + if (resctrl_arch_mon_capable()) flags |= RFTYPE_MON; ret = rdtgroup_add_files(rdtgroup_default.kn, flags); @@ -2645,7 +2645,7 @@ static int rdt_get_tree(struct fs_context *fc) if (ret < 0) goto out_schemata_free; - if (rdt_mon_capable) { + if (resctrl_arch_mon_capable()) { ret = mongroup_create_dir(rdtgroup_default.kn, &rdtgroup_default, "mon_groups", &kn_mongrp); @@ -2667,12 +2667,12 @@ static int rdt_get_tree(struct fs_context *fc) if (ret < 0) goto out_psl; - if (rdt_alloc_capable) + if (resctrl_arch_alloc_capable()) resctrl_arch_enable_alloc(); - if (rdt_mon_capable) + if (resctrl_arch_mon_capable()) resctrl_arch_enable_mon(); - if (rdt_alloc_capable || rdt_mon_capable) + if (resctrl_arch_alloc_capable() || resctrl_arch_mon_capable()) resctrl_mounted = true; if (is_mbm_enabled()) { @@ -2686,10 +2686,10 @@ static int rdt_get_tree(struct fs_context *fc) out_psl: rdt_pseudo_lock_release(); out_mondata: - if (rdt_mon_capable) + if (resctrl_arch_mon_capable()) kernfs_remove(kn_mondata); out_mongrp: - if (rdt_mon_capable) + if (resctrl_arch_mon_capable()) kernfs_remove(kn_mongrp); out_info: kernfs_remove(kn_info); @@ -2944,9 +2944,9 @@ static void rdt_kill_sb(struct super_block *sb) rdtgroup_default.mode = RDT_MODE_SHAREABLE; schemata_list_destroy(); rdtgroup_destroy_root(); - if (rdt_alloc_capable) + if (resctrl_arch_alloc_capable()) resctrl_arch_disable_alloc(); - if (rdt_mon_capable) + if (resctrl_arch_mon_capable()) resctrl_arch_disable_mon(); resctrl_mounted = false; kernfs_kill_sb(sb); @@ -3326,7 +3326,7 @@ static int mkdir_rdt_prepare_rmid_alloc(struct rdtgroup *rdtgrp) { int ret; - if (!rdt_mon_capable) + if (!resctrl_arch_mon_capable()) return 0; ret = alloc_rmid(rdtgrp->closid); @@ -3348,7 +3348,7 @@ static int mkdir_rdt_prepare_rmid_alloc(struct rdtgroup *rdtgrp) static void mkdir_rdt_prepare_rmid_free(struct rdtgroup *rgrp) { - if (rdt_mon_capable) + if (resctrl_arch_mon_capable()) free_rmid(rgrp->closid, rgrp->mon.rmid); } @@ -3412,7 +3412,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, if (rtype == RDTCTRL_GROUP) { files = RFTYPE_BASE | RFTYPE_CTRL; - if (rdt_mon_capable) + if (resctrl_arch_mon_capable()) files |= RFTYPE_MON; } else { files = RFTYPE_BASE | RFTYPE_MON; @@ -3521,7 +3521,7 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups); - if (rdt_mon_capable) { + if (resctrl_arch_mon_capable()) { /* * Create an empty mon_groups directory to hold the subset * of tasks and cpus to monitor. @@ -3576,14 +3576,14 @@ static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name, * allocation is supported, add a control and monitoring * subdirectory */ - if (rdt_alloc_capable && parent_kn == rdtgroup_default.kn) + if (resctrl_arch_alloc_capable() && parent_kn == rdtgroup_default.kn) return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode); /* * If RDT monitoring is supported and the parent directory is a valid * "mon_groups" directory, add a monitoring subdirectory. */ - if (rdt_mon_capable && is_mon_groups(parent_kn, name)) + if (resctrl_arch_mon_capable() && is_mon_groups(parent_kn, name)) return rdtgroup_mkdir_mon(parent_kn, name, mode); return -EPERM; @@ -3918,7 +3918,7 @@ void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d) * If resctrl is mounted, remove all the * per domain monitor data directories. */ - if (resctrl_mounted && static_branch_unlikely(&rdt_mon_enable_key)) + if (resctrl_mounted && resctrl_arch_mon_capable()) rmdir_mondata_subdir_allrdtgrp(r, d->id); if (is_mbm_enabled()) @@ -4001,7 +4001,7 @@ int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d) * by rdt_get_tree() calling mkdir_mondata_all(). * If resctrl is mounted, add per domain monitor data directories. */ - if (resctrl_mounted && static_branch_unlikely(&rdt_mon_enable_key)) + if (resctrl_mounted && resctrl_arch_mon_capable()) mkdir_mondata_subdir_allrdtgrp(r, d); return 0; -- Gitee From 2e6aef3df3854bdd8a3f622c70d8743d7dcc4e09 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:34 +0000 Subject: [PATCH 0513/2138] x86/resctrl: Add CPU online callback for resctrl work ANBZ: #8626 commit 1b3e50ce7f5001f1e0edaf7d6abea43b264db7ee upstream. The resctrl architecture specific code may need to create a domain when a CPU comes online, it also needs to reset the CPUs PQR_ASSOC register. The resctrl filesystem code needs to update the rdtgroup_default CPU mask when CPUs are brought online. Currently, this is all done in one function, resctrl_online_cpu(). It will need to be split into architecture and filesystem parts before resctrl can be moved to /fs/. Pull the rdtgroup_default update work out as a filesystem specific cpu_online helper. resctrl_online_cpu() is the obvious name for this, which means the version in core.c needs renaming. resctrl_online_cpu() is called by the arch code once it has done the work to add the new CPU to any domains. In future patches, resctrl_online_cpu() will take the rdtgroup_mutex itself. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-21-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/core.c | 8 ++++---- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 8 ++++++++ include/linux/resctrl.h | 1 + 3 files changed, 13 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 42ea49735564..707aba3e2621 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -606,16 +606,16 @@ static void clear_closid_rmid(int cpu) RESCTRL_RESERVED_CLOSID); } -static int resctrl_online_cpu(unsigned int cpu) +static int resctrl_arch_online_cpu(unsigned int cpu) { struct rdt_resource *r; mutex_lock(&rdtgroup_mutex); for_each_capable_rdt_resource(r) domain_add_cpu(cpu, r); - /* The cpu is set in default rdtgroup after online. */ - cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask); clear_closid_rmid(cpu); + + resctrl_online_cpu(cpu); mutex_unlock(&rdtgroup_mutex); return 0; @@ -971,7 +971,7 @@ static int __init resctrl_late_init(void) state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/resctrl/cat:online:", - resctrl_online_cpu, resctrl_offline_cpu); + resctrl_arch_online_cpu, resctrl_offline_cpu); if (state < 0) return state; diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index ed5fc677a99d..38d3b19a3aca 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -4007,6 +4007,14 @@ int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d) return 0; } +void resctrl_online_cpu(unsigned int cpu) +{ + lockdep_assert_held(&rdtgroup_mutex); + + /* The CPU is set in default rdtgroup after online. */ + cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask); +} + /* * rdtgroup_init - rdtgroup initialization * diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index bf460c912bf5..4c4bad3c34e4 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -223,6 +223,7 @@ u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, u32 closid, enum resctrl_conf_type type); int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d); void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d); +void resctrl_online_cpu(unsigned int cpu); /** * resctrl_arch_rmid_read() - Read the eventid counter corresponding to rmid -- Gitee From 0ec6f7a4e982c80918f2830c76dfc85f5d638075 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:35 +0000 Subject: [PATCH 0514/2138] x86/resctrl: Allow overflow/limbo handlers to be scheduled on any-but CPU ANBZ: #8626 commit 978fcca954cb52249babbc14e53de53c88dd6433 upstream. When a CPU is taken offline resctrl may need to move the overflow or limbo handlers to run on a different CPU. Once the offline callbacks have been split, cqm_setup_limbo_handler() will be called while the CPU that is going offline is still present in the CPU mask. Pass the CPU to exclude to cqm_setup_limbo_handler() and mbm_setup_overflow_handler(). These functions can use a variant of cpumask_any_but() when selecting the CPU. -1 is used to indicate no CPUs need excluding. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Babu Moger Reviewed-by: Reinette Chatre Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-22-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/core.c | 8 +++-- arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 2 +- arch/x86/kernel/cpu/resctrl/internal.h | 33 ++++++++++++++---- arch/x86/kernel/cpu/resctrl/monitor.c | 42 ++++++++++++++++++----- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 6 ++-- include/linux/resctrl.h | 2 ++ 6 files changed, 72 insertions(+), 21 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 707aba3e2621..3d1ac38fcf70 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -584,12 +584,16 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) if (r == &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl) { if (is_mbm_enabled() && cpu == d->mbm_work_cpu) { cancel_delayed_work(&d->mbm_over); - mbm_setup_overflow_handler(d, 0); + /* + * temporary: exclude_cpu=-1 as this CPU has already + * been removed by cpumask_clear_cpu()d + */ + mbm_setup_overflow_handler(d, 0, RESCTRL_PICK_ANY_CPU); } if (is_llc_occupancy_enabled() && cpu == d->cqm_work_cpu && has_busy_rmid(d)) { cancel_delayed_work(&d->cqm_limbo); - cqm_setup_limbo_handler(d, 0); + cqm_setup_limbo_handler(d, 0, RESCTRL_PICK_ANY_CPU); } } } diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index a117e81296e3..af4bf63683ed 100644 --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -557,7 +557,7 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, return; } - cpu = cpumask_any_housekeeping(&d->cpu_mask); + cpu = cpumask_any_housekeeping(&d->cpu_mask, RESCTRL_PICK_ANY_CPU); /* * cpumask_any_housekeeping() prefers housekeeping CPUs, but diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 3ee855c37447..c99f26ebe7a6 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -60,19 +60,36 @@ * cpumask_any_housekeeping() - Choose any CPU in @mask, preferring those that * aren't marked nohz_full * @mask: The mask to pick a CPU from. + * @exclude_cpu:The CPU to avoid picking. * - * Returns a CPU in @mask. If there are housekeeping CPUs that don't use - * nohz_full, these are preferred. + * Returns a CPU from @mask, but not @exclude_cpu. If there are housekeeping + * CPUs that don't use nohz_full, these are preferred. Pass + * RESCTRL_PICK_ANY_CPU to avoid excluding any CPUs. + * + * When a CPU is excluded, returns >= nr_cpu_ids if no CPUs are available. */ -static inline unsigned int cpumask_any_housekeeping(const struct cpumask *mask) +static inline unsigned int +cpumask_any_housekeeping(const struct cpumask *mask, int exclude_cpu) { unsigned int cpu, hk_cpu; - cpu = cpumask_any(mask); - if (!tick_nohz_full_cpu(cpu)) + if (exclude_cpu == RESCTRL_PICK_ANY_CPU) + cpu = cpumask_any(mask); + else + cpu = cpumask_any_but(mask, exclude_cpu); + + if (!IS_ENABLED(CONFIG_NO_HZ_FULL)) return cpu; + /* If the CPU picked isn't marked nohz_full nothing more needs doing. */ + if (cpu < nr_cpu_ids && !tick_nohz_full_cpu(cpu)) + return cpu; + + /* Try to find a CPU that isn't nohz_full to use in preference */ hk_cpu = cpumask_nth_andnot(0, mask, tick_nohz_full_mask); + if (hk_cpu == exclude_cpu) + hk_cpu = cpumask_nth_andnot(1, mask, tick_nohz_full_mask); + if (hk_cpu < nr_cpu_ids) cpu = hk_cpu; @@ -573,11 +590,13 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, struct rdt_domain *d, struct rdtgroup *rdtgrp, int evtid, int first); void mbm_setup_overflow_handler(struct rdt_domain *dom, - unsigned long delay_ms); + unsigned long delay_ms, + int exclude_cpu); void mbm_handle_overflow(struct work_struct *work); void __init intel_rdt_mbm_apply_quirk(void); bool is_mba_sc(struct rdt_resource *r); -void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms); +void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms, + int exclude_cpu); void cqm_handle_limbo(struct work_struct *work); bool has_busy_rmid(struct rdt_domain *d); void __check_limbo(struct rdt_domain *d, bool force_free); diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 92d7ba674003..67edd4c440f0 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -481,7 +481,8 @@ static void add_rmid_to_limbo(struct rmid_entry *entry) * setup up the limbo worker. */ if (!has_busy_rmid(d)) - cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL); + cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL, + RESCTRL_PICK_ANY_CPU); set_bit(idx, d->rmid_busy_llc); entry->busy++; } @@ -784,7 +785,8 @@ void cqm_handle_limbo(struct work_struct *work) __check_limbo(d, false); if (has_busy_rmid(d)) { - d->cqm_work_cpu = cpumask_any_housekeeping(&d->cpu_mask); + d->cqm_work_cpu = cpumask_any_housekeeping(&d->cpu_mask, + RESCTRL_PICK_ANY_CPU); schedule_delayed_work_on(d->cqm_work_cpu, &d->cqm_limbo, delay); } @@ -792,15 +794,25 @@ void cqm_handle_limbo(struct work_struct *work) mutex_unlock(&rdtgroup_mutex); } -void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms) +/** + * cqm_setup_limbo_handler() - Schedule the limbo handler to run for this + * domain. + * @dom: The domain the limbo handler should run for. + * @delay_ms: How far in the future the handler should run. + * @exclude_cpu: Which CPU the handler should not run on, + * RESCTRL_PICK_ANY_CPU to pick any CPU. + */ +void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms, + int exclude_cpu) { unsigned long delay = msecs_to_jiffies(delay_ms); int cpu; - cpu = cpumask_any_housekeeping(&dom->cpu_mask); + cpu = cpumask_any_housekeeping(&dom->cpu_mask, exclude_cpu); dom->cqm_work_cpu = cpu; - schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay); + if (cpu < nr_cpu_ids) + schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay); } void mbm_handle_overflow(struct work_struct *work) @@ -838,14 +850,24 @@ void mbm_handle_overflow(struct work_struct *work) * Re-check for housekeeping CPUs. This allows the overflow handler to * move off a nohz_full CPU quickly. */ - d->mbm_work_cpu = cpumask_any_housekeeping(&d->cpu_mask); + d->mbm_work_cpu = cpumask_any_housekeeping(&d->cpu_mask, + RESCTRL_PICK_ANY_CPU); schedule_delayed_work_on(d->mbm_work_cpu, &d->mbm_over, delay); out_unlock: mutex_unlock(&rdtgroup_mutex); } -void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms) +/** + * mbm_setup_overflow_handler() - Schedule the overflow handler to run for this + * domain. + * @dom: The domain the overflow handler should run for. + * @delay_ms: How far in the future the handler should run. + * @exclude_cpu: Which CPU the handler should not run on, + * RESCTRL_PICK_ANY_CPU to pick any CPU. + */ +void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms, + int exclude_cpu) { unsigned long delay = msecs_to_jiffies(delay_ms); int cpu; @@ -856,9 +878,11 @@ void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms) */ if (!resctrl_mounted || !resctrl_arch_mon_capable()) return; - cpu = cpumask_any_housekeeping(&dom->cpu_mask); + cpu = cpumask_any_housekeeping(&dom->cpu_mask, exclude_cpu); dom->mbm_work_cpu = cpu; - schedule_delayed_work_on(cpu, &dom->mbm_over, delay); + + if (cpu < nr_cpu_ids) + schedule_delayed_work_on(cpu, &dom->mbm_over, delay); } static int dom_data_init(struct rdt_resource *r) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 38d3b19a3aca..f5688c79d94f 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -2678,7 +2678,8 @@ static int rdt_get_tree(struct fs_context *fc) if (is_mbm_enabled()) { r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; list_for_each_entry(dom, &r->domains, list) - mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL); + mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL, + RESCTRL_PICK_ANY_CPU); } goto out; @@ -3989,7 +3990,8 @@ int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d) if (is_mbm_enabled()) { INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow); - mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL); + mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL, + RESCTRL_PICK_ANY_CPU); } if (is_llc_occupancy_enabled()) diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 4c4bad3c34e4..ccbbbe5d18d3 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -10,6 +10,8 @@ #define RESCTRL_RESERVED_CLOSID 0 #define RESCTRL_RESERVED_RMID 0 +#define RESCTRL_PICK_ANY_CPU -1 + #ifdef CONFIG_PROC_CPU_RESCTRL int proc_resctrl_show(struct seq_file *m, -- Gitee From c24e53efe27bb1ebab10715a468559c87827c803 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:36 +0000 Subject: [PATCH 0515/2138] x86/resctrl: Add CPU offline callback for resctrl work ANBZ: #8626 commit 258c91e84fedc789353a35ad91d827a9111d3cbd upstream. The resctrl architecture specific code may need to free a domain when a CPU goes offline, it also needs to reset the CPUs PQR_ASSOC register. Amongst other things, the resctrl filesystem code needs to clear this CPU from the cpu_mask of any control and monitor groups. Currently, this is all done in core.c and called from resctrl_offline_cpu(), making the split between architecture and filesystem code unclear. Move the filesystem work to remove the CPU from the control and monitor groups into a filesystem helper called resctrl_offline_cpu(), and rename the one in core.c resctrl_arch_offline_cpu(). Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-23-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/core.c | 25 +++++-------------------- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 24 ++++++++++++++++++++++++ include/linux/resctrl.h | 1 + 3 files changed, 30 insertions(+), 20 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 3d1ac38fcf70..063bec47719e 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -625,31 +625,15 @@ static int resctrl_arch_online_cpu(unsigned int cpu) return 0; } -static void clear_childcpus(struct rdtgroup *r, unsigned int cpu) +static int resctrl_arch_offline_cpu(unsigned int cpu) { - struct rdtgroup *cr; - - list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) { - if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) { - break; - } - } -} - -static int resctrl_offline_cpu(unsigned int cpu) -{ - struct rdtgroup *rdtgrp; struct rdt_resource *r; mutex_lock(&rdtgroup_mutex); + resctrl_offline_cpu(cpu); + for_each_capable_rdt_resource(r) domain_remove_cpu(cpu, r); - list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { - if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) { - clear_childcpus(rdtgrp, cpu); - break; - } - } clear_closid_rmid(cpu); mutex_unlock(&rdtgroup_mutex); @@ -975,7 +959,8 @@ static int __init resctrl_late_init(void) state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/resctrl/cat:online:", - resctrl_arch_online_cpu, resctrl_offline_cpu); + resctrl_arch_online_cpu, + resctrl_arch_offline_cpu); if (state < 0) return state; diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index f5688c79d94f..5bd3d8fb3f67 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -4017,6 +4017,30 @@ void resctrl_online_cpu(unsigned int cpu) cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask); } +static void clear_childcpus(struct rdtgroup *r, unsigned int cpu) +{ + struct rdtgroup *cr; + + list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) { + if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) + break; + } +} + +void resctrl_offline_cpu(unsigned int cpu) +{ + struct rdtgroup *rdtgrp; + + lockdep_assert_held(&rdtgroup_mutex); + + list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { + if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) { + clear_childcpus(rdtgrp, cpu); + break; + } + } +} + /* * rdtgroup_init - rdtgroup initialization * diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index ccbbbe5d18d3..270ff1d5c051 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -226,6 +226,7 @@ u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d); void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d); void resctrl_online_cpu(unsigned int cpu); +void resctrl_offline_cpu(unsigned int cpu); /** * resctrl_arch_rmid_read() - Read the eventid counter corresponding to rmid -- Gitee From 1ae00da727b2b4c77be7009619979c8f4bffe088 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:37 +0000 Subject: [PATCH 0516/2138] x86/resctrl: Move domain helper migration into resctrl_offline_cpu() ANBZ: #8626 commit eeff1d4f118bdf0870227fee5a770f03056e3adc upstream. When a CPU is taken offline the resctrl filesystem code needs to check if it was the CPU nominated to perform the periodic overflow and limbo work. If so, another CPU needs to be chosen to do this work. This is currently done in core.c, mixed in with the code that removes the CPU from the domain's mask, and potentially free()s the domain. Move the migration of the overflow and limbo helpers into the filesystem code, into resctrl_offline_cpu(). As resctrl_offline_cpu() runs before the architecture code has removed the CPU from the domain mask, the callers need to be told which CPU is being removed, to avoid picking it as the new CPU. This uses the exclude_cpu feature previously added. Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-24-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/core.c | 16 ---------------- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 18 ++++++++++++++++++ 2 files changed, 18 insertions(+), 16 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 063bec47719e..aefeb6104ee3 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -580,22 +580,6 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) return; } - - if (r == &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl) { - if (is_mbm_enabled() && cpu == d->mbm_work_cpu) { - cancel_delayed_work(&d->mbm_over); - /* - * temporary: exclude_cpu=-1 as this CPU has already - * been removed by cpumask_clear_cpu()d - */ - mbm_setup_overflow_handler(d, 0, RESCTRL_PICK_ANY_CPU); - } - if (is_llc_occupancy_enabled() && cpu == d->cqm_work_cpu && - has_busy_rmid(d)) { - cancel_delayed_work(&d->cqm_limbo); - cqm_setup_limbo_handler(d, 0, RESCTRL_PICK_ANY_CPU); - } - } } static void clear_closid_rmid(int cpu) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 5bd3d8fb3f67..777e9f680332 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -4029,7 +4029,9 @@ static void clear_childcpus(struct rdtgroup *r, unsigned int cpu) void resctrl_offline_cpu(unsigned int cpu) { + struct rdt_resource *l3 = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; struct rdtgroup *rdtgrp; + struct rdt_domain *d; lockdep_assert_held(&rdtgroup_mutex); @@ -4039,6 +4041,22 @@ void resctrl_offline_cpu(unsigned int cpu) break; } } + + if (!l3->mon_capable) + return; + + d = get_domain_from_cpu(cpu, l3); + if (d) { + if (is_mbm_enabled() && cpu == d->mbm_work_cpu) { + cancel_delayed_work(&d->mbm_over); + mbm_setup_overflow_handler(d, 0, cpu); + } + if (is_llc_occupancy_enabled() && cpu == d->cqm_work_cpu && + has_busy_rmid(d)) { + cancel_delayed_work(&d->cqm_limbo); + cqm_setup_limbo_handler(d, 0, cpu); + } + } } /* -- Gitee From 3a1d9670051ad6b7dbb5881e897953aed2a867dc Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Feb 2024 18:44:38 +0000 Subject: [PATCH 0517/2138] x86/resctrl: Separate arch and fs resctrl locks ANBZ: #8626 commit fb700810d30b9eb333a7bf447012e1158e35c62f upstream. resctrl has one mutex that is taken by the architecture-specific code, and the filesystem parts. The two interact via cpuhp, where the architecture code updates the domain list. Filesystem handlers that walk the domains list should not run concurrently with the cpuhp callback modifying the list. Exposing a lock from the filesystem code means the interface is not cleanly defined, and creates the possibility of cross-architecture lock ordering headaches. The interaction only exists so that certain filesystem paths are serialised against CPU hotplug. The CPU hotplug code already has a mechanism to do this using cpus_read_lock(). MPAM's monitors have an overflow interrupt, so it needs to be possible to walk the domains list in irq context. RCU is ideal for this, but some paths need to be able to sleep to allocate memory. Because resctrl_{on,off}line_cpu() take the rdtgroup_mutex as part of a cpuhp callback, cpus_read_lock() must always be taken first. rdtgroup_schemata_write() already does this. Most of the filesystem code's domain list walkers are currently protected by the rdtgroup_mutex taken in rdtgroup_kn_lock_live(). The exceptions are rdt_bit_usage_show() and the mon_config helpers which take the lock directly. Make the domain list protected by RCU. An architecture-specific lock prevents concurrent writers. rdt_bit_usage_show() could walk the domain list using RCU, but to keep all the filesystem operations the same, this is changed to call cpus_read_lock(). The mon_config helpers send multiple IPIs, take the cpus_read_lock() in these cases. The other filesystem list walkers need to be able to sleep. Add cpus_read_lock() to rdtgroup_kn_lock_live() so that the cpuhp callbacks can't be invoked when file system operations are occurring. Add lockdep_assert_cpus_held() in the cases where the rdtgroup_kn_lock_live() call isn't obvious. Resctrl's domain online/offline calls now need to take the rdtgroup_mutex themselves. [ bp: Fold in a build fix: https://lore.kernel.org/r/87zfvwieli.ffs@tglx ] Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Shaopeng Tan Reviewed-by: Reinette Chatre Reviewed-by: Babu Moger Tested-by: Shaopeng Tan Tested-by: Peter Newman Tested-by: Babu Moger Tested-by: Carl Worth # arm64 Link: https://lore.kernel.org/r/20240213184438.16675-25-james.morse@arm.com Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/core.c | 44 +++++++++++---- arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 15 ++++- arch/x86/kernel/cpu/resctrl/monitor.c | 8 +++ arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 3 + arch/x86/kernel/cpu/resctrl/rdtgroup.c | 68 ++++++++++++++++++----- include/linux/resctrl.h | 2 +- 6 files changed, 112 insertions(+), 28 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index aefeb6104ee3..e08fb4e898b6 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -16,6 +16,7 @@ #define pr_fmt(fmt) "resctrl: " fmt +#include #include #include #include @@ -25,8 +26,15 @@ #include #include "internal.h" -/* Mutex to protect rdtgroup access. */ -DEFINE_MUTEX(rdtgroup_mutex); +/* + * rdt_domain structures are kfree()d when their last CPU goes offline, + * and allocated when the first CPU in a new domain comes online. + * The rdt_resource's domain list is updated when this happens. Readers of + * the domain list must either take cpus_read_lock(), or rely on an RCU + * read-side critical section, to avoid observing concurrent modification. + * All writers take this mutex: + */ +static DEFINE_MUTEX(domain_list_lock); /* * The cached resctrl_pqr_state is strictly per CPU and can never be @@ -354,6 +362,15 @@ struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r) { struct rdt_domain *d; + /* + * Walking r->domains, ensure it can't race with cpuhp. + * Because this is called via IPI by rdt_ctrl_update(), assertions + * about locks this thread holds will lead to false positives. Check + * someone is holding the CPUs lock. + */ + if (IS_ENABLED(CONFIG_HOTPLUG_CPU) && IS_ENABLED(CONFIG_LOCKDEP)) + WARN_ON_ONCE(!lockdep_is_cpus_held()); + list_for_each_entry(d, &r->domains, list) { /* Find the domain that contains this CPU */ if (cpumask_test_cpu(cpu, &d->cpu_mask)) @@ -510,6 +527,8 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r) struct rdt_domain *d; int err; + lockdep_assert_held(&domain_list_lock); + d = rdt_find_domain(r, id, &add_pos); if (IS_ERR(d)) { pr_warn("Couldn't find cache id for CPU %d\n", cpu); @@ -543,11 +562,12 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r) return; } - list_add_tail(&d->list, add_pos); + list_add_tail_rcu(&d->list, add_pos); err = resctrl_online_domain(r, d); if (err) { - list_del(&d->list); + list_del_rcu(&d->list); + synchronize_rcu(); domain_free(hw_dom); } } @@ -558,6 +578,8 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) struct rdt_hw_domain *hw_dom; struct rdt_domain *d; + lockdep_assert_held(&domain_list_lock); + d = rdt_find_domain(r, id, NULL); if (IS_ERR_OR_NULL(d)) { pr_warn("Couldn't find cache id for CPU %d\n", cpu); @@ -568,7 +590,8 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) cpumask_clear_cpu(cpu, &d->cpu_mask); if (cpumask_empty(&d->cpu_mask)) { resctrl_offline_domain(r, d); - list_del(&d->list); + list_del_rcu(&d->list); + synchronize_rcu(); /* * rdt_domain "d" is going to be freed below, so clear @@ -598,13 +621,13 @@ static int resctrl_arch_online_cpu(unsigned int cpu) { struct rdt_resource *r; - mutex_lock(&rdtgroup_mutex); + mutex_lock(&domain_list_lock); for_each_capable_rdt_resource(r) domain_add_cpu(cpu, r); - clear_closid_rmid(cpu); + mutex_unlock(&domain_list_lock); + clear_closid_rmid(cpu); resctrl_online_cpu(cpu); - mutex_unlock(&rdtgroup_mutex); return 0; } @@ -613,13 +636,14 @@ static int resctrl_arch_offline_cpu(unsigned int cpu) { struct rdt_resource *r; - mutex_lock(&rdtgroup_mutex); resctrl_offline_cpu(cpu); + mutex_lock(&domain_list_lock); for_each_capable_rdt_resource(r) domain_remove_cpu(cpu, r); + mutex_unlock(&domain_list_lock); + clear_closid_rmid(cpu); - mutex_unlock(&rdtgroup_mutex); return 0; } diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index af4bf63683ed..8bd717222f0c 100644 --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -217,6 +217,9 @@ static int parse_line(char *line, struct resctrl_schema *s, struct rdt_domain *d; unsigned long dom_id; + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA)) { rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n"); @@ -321,6 +324,9 @@ int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid) struct rdt_domain *d; u32 idx; + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) return -ENOMEM; @@ -386,11 +392,9 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, return -EINVAL; buf[nbytes - 1] = '\0'; - cpus_read_lock(); rdtgrp = rdtgroup_kn_lock_live(of->kn); if (!rdtgrp) { rdtgroup_kn_unlock(of->kn); - cpus_read_unlock(); return -ENOENT; } rdt_last_cmd_clear(); @@ -452,7 +456,6 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, out: rdt_staged_configs_clear(); rdtgroup_kn_unlock(of->kn); - cpus_read_unlock(); return ret ?: nbytes; } @@ -472,6 +475,9 @@ static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int clo bool sep = false; u32 ctrl_val; + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + seq_printf(s, "%*s:", max_name_width, schema->name); list_for_each_entry(dom, &r->domains, list) { if (sep) @@ -542,6 +548,9 @@ void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, { int cpu; + /* When picking a CPU from cpu_mask, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + /* * Setup the parameters to pass to mon_event_count() to read the data. */ diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 67edd4c440f0..c34a35ec0f03 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -15,6 +15,7 @@ * Software Developer Manual June 2016, volume 3, section 17.17. */ +#include #include #include #include @@ -472,6 +473,9 @@ static void add_rmid_to_limbo(struct rmid_entry *entry) lockdep_assert_held(&rdtgroup_mutex); + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + idx = resctrl_arch_rmid_idx_encode(entry->closid, entry->rmid); entry->busy = 0; @@ -778,6 +782,7 @@ void cqm_handle_limbo(struct work_struct *work) unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL); struct rdt_domain *d; + cpus_read_lock(); mutex_lock(&rdtgroup_mutex); d = container_of(work, struct rdt_domain, cqm_limbo.work); @@ -792,6 +797,7 @@ void cqm_handle_limbo(struct work_struct *work) } mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); } /** @@ -823,6 +829,7 @@ void mbm_handle_overflow(struct work_struct *work) struct rdt_resource *r; struct rdt_domain *d; + cpus_read_lock(); mutex_lock(&rdtgroup_mutex); /* @@ -856,6 +863,7 @@ void mbm_handle_overflow(struct work_struct *work) out_unlock: mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); } /** diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index 8056bed033cc..884b88e25141 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -844,6 +844,9 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) struct rdt_domain *d_i; bool ret = false; + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + if (!zalloc_cpumask_var(&cpu_with_psl, GFP_KERNEL)) return true; diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 777e9f680332..011e17efb1a6 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -35,6 +35,10 @@ DEFINE_STATIC_KEY_FALSE(rdt_enable_key); DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key); DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key); + +/* Mutex to protect rdtgroup access. */ +DEFINE_MUTEX(rdtgroup_mutex); + static struct kernfs_root *rdt_root; struct rdtgroup rdtgroup_default; LIST_HEAD(rdt_all_groups); @@ -1014,6 +1018,7 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of, bool sep = false; u32 ctrl_val; + cpus_read_lock(); mutex_lock(&rdtgroup_mutex); hw_shareable = r->cache.shareable_bits; list_for_each_entry(dom, &r->domains, list) { @@ -1074,6 +1079,7 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of, } seq_putc(seq, '\n'); mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); return 0; } @@ -1329,6 +1335,9 @@ static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp) struct rdt_domain *d; u32 ctrl; + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + list_for_each_entry(s, &resctrl_schema_all, list) { r = s->res; if (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA) @@ -1593,6 +1602,7 @@ static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid struct rdt_domain *dom; bool sep = false; + cpus_read_lock(); mutex_lock(&rdtgroup_mutex); list_for_each_entry(dom, &r->domains, list) { @@ -1609,6 +1619,7 @@ static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid seq_puts(s, "\n"); mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); return 0; } @@ -1690,6 +1701,9 @@ static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid) unsigned long dom_id, val; struct rdt_domain *d; + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + next: if (!tok || tok[0] == '\0') return 0; @@ -1736,6 +1750,7 @@ static ssize_t mbm_total_bytes_config_write(struct kernfs_open_file *of, if (nbytes == 0 || buf[nbytes - 1] != '\n') return -EINVAL; + cpus_read_lock(); mutex_lock(&rdtgroup_mutex); rdt_last_cmd_clear(); @@ -1745,6 +1760,7 @@ static ssize_t mbm_total_bytes_config_write(struct kernfs_open_file *of, ret = mon_config_write(r, buf, QOS_L3_MBM_TOTAL_EVENT_ID); mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); return ret ?: nbytes; } @@ -1760,6 +1776,7 @@ static ssize_t mbm_local_bytes_config_write(struct kernfs_open_file *of, if (nbytes == 0 || buf[nbytes - 1] != '\n') return -EINVAL; + cpus_read_lock(); mutex_lock(&rdtgroup_mutex); rdt_last_cmd_clear(); @@ -1769,6 +1786,7 @@ static ssize_t mbm_local_bytes_config_write(struct kernfs_open_file *of, ret = mon_config_write(r, buf, QOS_L3_MBM_LOCAL_EVENT_ID); mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); return ret ?: nbytes; } @@ -2245,6 +2263,9 @@ static int set_cache_qos_cfg(int level, bool enable) struct rdt_domain *d; int cpu; + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + if (level == RDT_RESOURCE_L3) update = l3_qos_cfg_update; else if (level == RDT_RESOURCE_L2) @@ -2444,6 +2465,7 @@ struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn) rdtgroup_kn_get(rdtgrp, kn); + cpus_read_lock(); mutex_lock(&rdtgroup_mutex); /* Was this group deleted while we waited? */ @@ -2461,6 +2483,8 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn) return; mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + rdtgroup_kn_put(rdtgrp, kn); } @@ -2793,6 +2817,9 @@ static int reset_all_ctrls(struct rdt_resource *r) struct rdt_domain *d; int i; + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) return -ENOMEM; @@ -3077,6 +3104,9 @@ static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn, struct rdt_domain *dom; int ret; + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + list_for_each_entry(dom, &r->domains, list) { ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp); if (ret) @@ -3907,13 +3937,13 @@ static void domain_destroy_mon_state(struct rdt_domain *d) void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d) { - lockdep_assert_held(&rdtgroup_mutex); + mutex_lock(&rdtgroup_mutex); if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) mba_sc_domain_destroy(r, d); if (!r->mon_capable) - return; + goto out_unlock; /* * If resctrl is mounted, remove all the @@ -3938,6 +3968,9 @@ void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d) } domain_destroy_mon_state(d); + +out_unlock: + mutex_unlock(&rdtgroup_mutex); } static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d) @@ -3973,20 +4006,22 @@ static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d) int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d) { - int err; + int err = 0; - lockdep_assert_held(&rdtgroup_mutex); + mutex_lock(&rdtgroup_mutex); - if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) + if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) { /* RDT_RESOURCE_MBA is never mon_capable */ - return mba_sc_domain_allocate(r, d); + err = mba_sc_domain_allocate(r, d); + goto out_unlock; + } if (!r->mon_capable) - return 0; + goto out_unlock; err = domain_setup_mon_state(r, d); if (err) - return err; + goto out_unlock; if (is_mbm_enabled()) { INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow); @@ -4006,15 +4041,18 @@ int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d) if (resctrl_mounted && resctrl_arch_mon_capable()) mkdir_mondata_subdir_allrdtgrp(r, d); - return 0; +out_unlock: + mutex_unlock(&rdtgroup_mutex); + + return err; } void resctrl_online_cpu(unsigned int cpu) { - lockdep_assert_held(&rdtgroup_mutex); - + mutex_lock(&rdtgroup_mutex); /* The CPU is set in default rdtgroup after online. */ cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask); + mutex_unlock(&rdtgroup_mutex); } static void clear_childcpus(struct rdtgroup *r, unsigned int cpu) @@ -4033,8 +4071,7 @@ void resctrl_offline_cpu(unsigned int cpu) struct rdtgroup *rdtgrp; struct rdt_domain *d; - lockdep_assert_held(&rdtgroup_mutex); - + mutex_lock(&rdtgroup_mutex); list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) { clear_childcpus(rdtgrp, cpu); @@ -4043,7 +4080,7 @@ void resctrl_offline_cpu(unsigned int cpu) } if (!l3->mon_capable) - return; + goto out_unlock; d = get_domain_from_cpu(cpu, l3); if (d) { @@ -4057,6 +4094,9 @@ void resctrl_offline_cpu(unsigned int cpu) cqm_setup_limbo_handler(d, 0, cpu); } } + +out_unlock: + mutex_unlock(&rdtgroup_mutex); } /* diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 270ff1d5c051..a365f67131ec 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -159,7 +159,7 @@ struct resctrl_schema; * @cache_level: Which cache level defines scope of this resource * @cache: Cache allocation related data * @membw: If the component has bandwidth controls, their properties. - * @domains: All domains for this resource + * @domains: RCU list of all domains for this resource * @name: Name to use in "schemata" file. * @data_width: Character width of data when displaying * @default_ctrl: Specifies default cache cbm or memory B/W percent. -- Gitee From b5f906b31c368363a998516adb560053b30eac5f Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 21 Feb 2024 12:23:06 +0000 Subject: [PATCH 0518/2138] x86/resctrl: Remove lockdep annotation that triggers false positive ANBZ: #8626 commit c0d848fcb09d80a5f48b99f85e448185125ef59f upstream. get_domain_from_cpu() walks a list of domains to find the one that contains the specified CPU. This needs to be protected against races with CPU hotplug when the list is modified. It has recently gained a lockdep annotation to check this. The lockdep annotation causes false positives when called via IPI as the lock is held, but by another process. Remove it. [ bp: Refresh it ontop of x86/cache. ] Fixes: fb700810d30b ("x86/resctrl: Separate arch and fs resctrl locks") Reported-by: Tony Luck Signed-off-by: James Morse Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/all/ZdUSwOM9UUNpw84Y@agluck-desk3 Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2959 --- arch/x86/kernel/cpu/resctrl/core.c | 9 --------- 1 file changed, 9 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index e08fb4e898b6..cab67782f19d 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -362,15 +362,6 @@ struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r) { struct rdt_domain *d; - /* - * Walking r->domains, ensure it can't race with cpuhp. - * Because this is called via IPI by rdt_ctrl_update(), assertions - * about locks this thread holds will lead to false positives. Check - * someone is holding the CPUs lock. - */ - if (IS_ENABLED(CONFIG_HOTPLUG_CPU) && IS_ENABLED(CONFIG_LOCKDEP)) - WARN_ON_ONCE(!lockdep_is_cpus_held()); - list_for_each_entry(d, &r->domains, list) { /* Find the domain that contains this CPU */ if (cpumask_test_cpu(cpu, &d->cpu_mask)) -- Gitee From c8bcce47621db9ba39898840c5bdac38d12ded34 Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Fri, 29 Mar 2024 17:19:43 +0800 Subject: [PATCH 0519/2138] anolis: configs: adjust kconfigs to support some important arch ANBZ: #8598 Adjust kconfigs to support ZHAOXIN, PHYTIUM and KUNPENG. By the way, some important kconfigs, such as CONFIG_UACCE, was modified, too. Here are the kconfig list to be adjusted: x86: CONFIG_UACCE=m CONFIG_SCHED_CLUSTER=y CONFIG_SATA_ZHAOXIN=m CONFIG_HW_RANDOM_ZHAOXIN=m CONFIG_I2C_ZHAOXIN=m CONFIG_CRYPTO_SM2_ZHAOXIN_GMI=m CONFIG_I2C_ZHAOXIN_SMBUS=m arm64: CONFIG_ARM_SMMU_V3_PMU=m CONFIG_SPI_HISI_KUNPENG=m CONFIG_NTB=m CONFIG_UACCE=m CONFIG_ARCH_PHYTIUM=y CONFIG_ARM_GIC_PHYTIUM_2500=y CONFIG_CRYPTO_DEV_HISI_SEC2=m CONFIG_CRYPTO_DEV_HISI_ZIP=m CONFIG_INFINIBAND_HNS_HIP08=y CONFIG_SPI_HISI_SFC_V3XX=m CONFIG_ARM_SMMU_V3_SVA=y Signed-off-by: Qiao Ma Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/2973 --- arch/arm64/configs/anolis-debug_defconfig | 32 ++++++++++++++++------- arch/arm64/configs/anolis_defconfig | 32 ++++++++++++++++------- arch/x86/configs/anolis-debug_defconfig | 19 +++++++++----- arch/x86/configs/anolis_defconfig | 16 +++++++----- 4 files changed, 67 insertions(+), 32 deletions(-) diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index 43c9990b1068..8b932435df46 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -337,6 +337,7 @@ CONFIG_ARCH_HISI=y # CONFIG_ARCH_NXP is not set # CONFIG_ARCH_MA35 is not set # CONFIG_ARCH_NPCM is not set +CONFIG_ARCH_PHYTIUM=y CONFIG_ARCH_QCOM=y # CONFIG_ARCH_REALTEK is not set # CONFIG_ARCH_RENESAS is not set @@ -2328,7 +2329,7 @@ CONFIG_CB710_DEBUG_ASSUMPTIONS=y # CONFIG_MISC_ALCOR_PCI is not set # CONFIG_MISC_RTSX_PCI is not set # CONFIG_MISC_RTSX_USB is not set -# CONFIG_UACCE is not set +CONFIG_UACCE=m CONFIG_PVPANIC=y # CONFIG_PVPANIC_MMIO is not set # CONFIG_PVPANIC_PCI is not set @@ -3486,8 +3487,8 @@ CONFIG_SPI_DESIGNWARE=m # CONFIG_SPI_DW_DMA is not set # CONFIG_SPI_DW_PCI is not set CONFIG_SPI_DW_MMIO=m -# CONFIG_SPI_HISI_KUNPENG is not set -# CONFIG_SPI_HISI_SFC_V3XX is not set +CONFIG_SPI_HISI_KUNPENG=m +CONFIG_SPI_HISI_SFC_V3XX=m # CONFIG_SPI_GPIO is not set # CONFIG_SPI_FSL_SPI is not set # CONFIG_SPI_MICROCHIP_CORE is not set @@ -5126,7 +5127,7 @@ CONFIG_INFINIBAND_CXGB4=m # CONFIG_INFINIBAND_EFA is not set CONFIG_INFINIBAND_ERDMA=m CONFIG_INFINIBAND_HNS=m -# CONFIG_INFINIBAND_HNS_HIP08 is not set +CONFIG_INFINIBAND_HNS_HIP08=y # CONFIG_INFINIBAND_IRDMA is not set CONFIG_MLX4_INFINIBAND=m CONFIG_MLX5_INFINIBAND=m @@ -5357,6 +5358,7 @@ CONFIG_VFIO_PCI_MMAP=y CONFIG_VFIO_PCI_INTX=y CONFIG_VFIO_PCI=m # CONFIG_MLX5_VFIO_PCI is not set +# CONFIG_HISI_ACC_VFIO_PCI is not set # end of VFIO support for PCI devices # @@ -5531,6 +5533,7 @@ CONFIG_IOMMU_DEFAULT_DMA_STRICT=y # CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set CONFIG_OF_IOMMU=y CONFIG_IOMMU_DMA=y +CONFIG_IOMMU_SVA=y # CONFIG_IOMMUFD is not set CONFIG_ARM_SMMU=y # CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS is not set @@ -5538,7 +5541,7 @@ CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT=y CONFIG_ARM_SMMU_QCOM=y # CONFIG_ARM_SMMU_QCOM_DEBUG is not set CONFIG_ARM_SMMU_V3=y -# CONFIG_ARM_SMMU_V3_SVA is not set +CONFIG_ARM_SMMU_V3_SVA=y # CONFIG_QCOM_IOMMU is not set # CONFIG_VIRTIO_IOMMU is not set @@ -5649,7 +5652,15 @@ CONFIG_EXTCON_GPIO=m # CONFIG_EXTCON_USBC_TUSB320 is not set # CONFIG_MEMORY is not set # CONFIG_IIO is not set -# CONFIG_NTB is not set +CONFIG_NTB=m +# CONFIG_NTB_MSI is not set +# CONFIG_NTB_IDT is not set +# CONFIG_NTB_EPF is not set +# CONFIG_NTB_SWITCHTEC is not set +# CONFIG_NTB_PINGPONG is not set +# CONFIG_NTB_TOOL is not set +# CONFIG_NTB_PERF is not set +# CONFIG_NTB_TRANSPORT is not set CONFIG_PWM=y CONFIG_PWM_SYSFS=y # CONFIG_PWM_DEBUG is not set @@ -5671,6 +5682,7 @@ CONFIG_ARM_GIC_V2M=y CONFIG_ARM_GIC_V3=y CONFIG_ARM_GIC_V3_ITS=y CONFIG_ARM_GIC_V3_ITS_PCI=y +CONFIG_ARM_GIC_PHYTIUM_2500=y # CONFIG_AL_FIC is not set CONFIG_HISILICON_IRQ_MBIGEN=y # CONFIG_XILINX_INTC is not set @@ -5749,7 +5761,7 @@ CONFIG_ARM_CCN=y CONFIG_ARM_CMN=y CONFIG_ARM_PMU=y CONFIG_ARM_PMU_ACPI=y -CONFIG_ARM_SMMU_V3_PMU=y +CONFIG_ARM_SMMU_V3_PMU=m CONFIG_ARM_PMUV3=y CONFIG_ARM_DSU_PMU=y CONFIG_QCOM_L2_PMU=y @@ -5760,11 +5772,11 @@ CONFIG_ARM_SPE_PMU=m # CONFIG_ARM_DMC620_PMU is not set # CONFIG_MARVELL_CN10K_TAD_PMU is not set CONFIG_ALIBABA_UNCORE_DRW_PMU=m -CONFIG_DWC_PCIE_PMU=m CONFIG_HISI_PMU=m CONFIG_HISI_PCIE_PMU=m # CONFIG_HNS3_PMU is not set # CONFIG_MARVELL_CN10K_DDR_PMU is not set +CONFIG_DWC_PCIE_PMU=m # CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU is not set # end of Performance monitor support @@ -6527,9 +6539,9 @@ CONFIG_CRYPTO_DEV_CHELSIO=m # CONFIG_CRYPTO_DEV_SAFEXCEL is not set # CONFIG_CRYPTO_DEV_CCREE is not set CONFIG_CRYPTO_DEV_HISI_SEC=m -# CONFIG_CRYPTO_DEV_HISI_SEC2 is not set +CONFIG_CRYPTO_DEV_HISI_SEC2=m CONFIG_CRYPTO_DEV_HISI_QM=m -# CONFIG_CRYPTO_DEV_HISI_ZIP is not set +CONFIG_CRYPTO_DEV_HISI_ZIP=m CONFIG_CRYPTO_DEV_HISI_HPRE=m CONFIG_CRYPTO_DEV_HISI_TRNG=m # CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index 0e3103e1b10c..ba7a0a1e15f4 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -335,6 +335,7 @@ CONFIG_ARCH_HISI=y # CONFIG_ARCH_NXP is not set # CONFIG_ARCH_MA35 is not set # CONFIG_ARCH_NPCM is not set +CONFIG_ARCH_PHYTIUM=y CONFIG_ARCH_QCOM=y # CONFIG_ARCH_REALTEK is not set # CONFIG_ARCH_RENESAS is not set @@ -2325,7 +2326,7 @@ CONFIG_CB710_DEBUG_ASSUMPTIONS=y # CONFIG_MISC_ALCOR_PCI is not set # CONFIG_MISC_RTSX_PCI is not set # CONFIG_MISC_RTSX_USB is not set -# CONFIG_UACCE is not set +CONFIG_UACCE=m CONFIG_PVPANIC=y # CONFIG_PVPANIC_MMIO is not set # CONFIG_PVPANIC_PCI is not set @@ -3483,8 +3484,8 @@ CONFIG_SPI_DESIGNWARE=m # CONFIG_SPI_DW_DMA is not set # CONFIG_SPI_DW_PCI is not set CONFIG_SPI_DW_MMIO=m -# CONFIG_SPI_HISI_KUNPENG is not set -# CONFIG_SPI_HISI_SFC_V3XX is not set +CONFIG_SPI_HISI_KUNPENG=m +CONFIG_SPI_HISI_SFC_V3XX=m # CONFIG_SPI_GPIO is not set # CONFIG_SPI_FSL_SPI is not set # CONFIG_SPI_MICROCHIP_CORE is not set @@ -5123,7 +5124,7 @@ CONFIG_INFINIBAND_CXGB4=m # CONFIG_INFINIBAND_EFA is not set CONFIG_INFINIBAND_ERDMA=m CONFIG_INFINIBAND_HNS=m -# CONFIG_INFINIBAND_HNS_HIP08 is not set +CONFIG_INFINIBAND_HNS_HIP08=y # CONFIG_INFINIBAND_IRDMA is not set CONFIG_MLX4_INFINIBAND=m CONFIG_MLX5_INFINIBAND=m @@ -5353,6 +5354,7 @@ CONFIG_VFIO_PCI_MMAP=y CONFIG_VFIO_PCI_INTX=y CONFIG_VFIO_PCI=m # CONFIG_MLX5_VFIO_PCI is not set +# CONFIG_HISI_ACC_VFIO_PCI is not set # end of VFIO support for PCI devices # @@ -5527,6 +5529,7 @@ CONFIG_IOMMU_DEFAULT_DMA_STRICT=y # CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set CONFIG_OF_IOMMU=y CONFIG_IOMMU_DMA=y +CONFIG_IOMMU_SVA=y # CONFIG_IOMMUFD is not set CONFIG_ARM_SMMU=y # CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS is not set @@ -5534,7 +5537,7 @@ CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT=y CONFIG_ARM_SMMU_QCOM=y # CONFIG_ARM_SMMU_QCOM_DEBUG is not set CONFIG_ARM_SMMU_V3=y -# CONFIG_ARM_SMMU_V3_SVA is not set +CONFIG_ARM_SMMU_V3_SVA=y # CONFIG_QCOM_IOMMU is not set # CONFIG_VIRTIO_IOMMU is not set @@ -5645,7 +5648,15 @@ CONFIG_EXTCON_GPIO=m # CONFIG_EXTCON_USBC_TUSB320 is not set # CONFIG_MEMORY is not set # CONFIG_IIO is not set -# CONFIG_NTB is not set +CONFIG_NTB=m +# CONFIG_NTB_MSI is not set +# CONFIG_NTB_IDT is not set +# CONFIG_NTB_EPF is not set +# CONFIG_NTB_SWITCHTEC is not set +# CONFIG_NTB_PINGPONG is not set +# CONFIG_NTB_TOOL is not set +# CONFIG_NTB_PERF is not set +# CONFIG_NTB_TRANSPORT is not set CONFIG_PWM=y CONFIG_PWM_SYSFS=y # CONFIG_PWM_DEBUG is not set @@ -5667,6 +5678,7 @@ CONFIG_ARM_GIC_V2M=y CONFIG_ARM_GIC_V3=y CONFIG_ARM_GIC_V3_ITS=y CONFIG_ARM_GIC_V3_ITS_PCI=y +CONFIG_ARM_GIC_PHYTIUM_2500=y # CONFIG_AL_FIC is not set CONFIG_HISILICON_IRQ_MBIGEN=y # CONFIG_XILINX_INTC is not set @@ -5745,7 +5757,7 @@ CONFIG_ARM_CCN=y CONFIG_ARM_CMN=y CONFIG_ARM_PMU=y CONFIG_ARM_PMU_ACPI=y -CONFIG_ARM_SMMU_V3_PMU=y +CONFIG_ARM_SMMU_V3_PMU=m CONFIG_ARM_PMUV3=y CONFIG_ARM_DSU_PMU=y CONFIG_QCOM_L2_PMU=y @@ -5756,11 +5768,11 @@ CONFIG_ARM_SPE_PMU=m # CONFIG_ARM_DMC620_PMU is not set # CONFIG_MARVELL_CN10K_TAD_PMU is not set CONFIG_ALIBABA_UNCORE_DRW_PMU=m -CONFIG_DWC_PCIE_PMU=m CONFIG_HISI_PMU=m CONFIG_HISI_PCIE_PMU=m # CONFIG_HNS3_PMU is not set # CONFIG_MARVELL_CN10K_DDR_PMU is not set +CONFIG_DWC_PCIE_PMU=m # CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU is not set # end of Performance monitor support @@ -6523,9 +6535,9 @@ CONFIG_CRYPTO_DEV_CHELSIO=m # CONFIG_CRYPTO_DEV_SAFEXCEL is not set # CONFIG_CRYPTO_DEV_CCREE is not set CONFIG_CRYPTO_DEV_HISI_SEC=m -# CONFIG_CRYPTO_DEV_HISI_SEC2 is not set +CONFIG_CRYPTO_DEV_HISI_SEC2=m CONFIG_CRYPTO_DEV_HISI_QM=m -# CONFIG_CRYPTO_DEV_HISI_ZIP is not set +CONFIG_CRYPTO_DEV_HISI_ZIP=m CONFIG_CRYPTO_DEV_HISI_HPRE=m CONFIG_CRYPTO_DEV_HISI_TRNG=m # CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index 36a089094635..05b04486b420 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -414,7 +414,7 @@ CONFIG_NR_CPUS_RANGE_BEGIN=2 CONFIG_NR_CPUS_RANGE_END=8192 CONFIG_NR_CPUS_DEFAULT=64 CONFIG_NR_CPUS=1024 -# CONFIG_SCHED_CLUSTER is not set +CONFIG_SCHED_CLUSTER=y CONFIG_SCHED_SMT=y CONFIG_SCHED_MC=y CONFIG_SCHED_MC_PRIO=y @@ -2430,7 +2430,7 @@ CONFIG_VMWARE_VMCI=m # CONFIG_MISC_ALCOR_PCI is not set CONFIG_MISC_RTSX_PCI=m CONFIG_MISC_RTSX_USB=m -# CONFIG_UACCE is not set +CONFIG_UACCE=m CONFIG_PVPANIC=y # CONFIG_PVPANIC_MMIO is not set # CONFIG_PVPANIC_PCI is not set @@ -2597,7 +2597,7 @@ CONFIG_ATA_PIIX=m # CONFIG_SATA_ULI is not set # CONFIG_SATA_VIA is not set # CONFIG_SATA_VITESSE is not set -# CONFIG_SATA_ZHAOXIN is not set +CONFIG_SATA_ZHAOXIN=m # # PATA SFF controllers with BMDMA @@ -3751,7 +3751,7 @@ CONFIG_HW_RANDOM_INTEL=m CONFIG_HW_RANDOM_AMD=m # CONFIG_HW_RANDOM_BA431 is not set CONFIG_HW_RANDOM_VIA=m -CONFIG_HW_RANDOM_ZHAOXIN=y +CONFIG_HW_RANDOM_ZHAOXIN=m CONFIG_HW_RANDOM_VIRTIO=y # CONFIG_HW_RANDOM_XIPHERA is not set # CONFIG_APPLICOM is not set @@ -3839,13 +3839,13 @@ CONFIG_I2C_NFORCE2_S4985=m CONFIG_I2C_SIS96X=m CONFIG_I2C_VIA=m CONFIG_I2C_VIAPRO=m -# CONFIG_I2C_ZHAOXIN is not set +CONFIG_I2C_ZHAOXIN=m # # ACPI drivers # CONFIG_I2C_SCMI=m -# CONFIG_I2C_ZHAOXIN_SMBUS is not set +CONFIG_I2C_ZHAOXIN_SMBUS=m # # I2C system bus drivers (mostly embedded / system-on-chip) @@ -6655,6 +6655,7 @@ CONFIG_IDLE_INJECT=y # # Performance monitor support # +# CONFIG_DWC_PCIE_PMU is not set # end of Performance monitor support CONFIG_RAS=y @@ -7387,6 +7388,7 @@ CONFIG_CRYPTO_SERPENT_AVX_X86_64=m CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64=m CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64=m +CONFIG_CRYPTO_SM4_ZHAOXIN_GMI=m CONFIG_CRYPTO_TWOFISH_X86_64=m CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m @@ -7404,16 +7406,19 @@ CONFIG_CRYPTO_SHA1_SSSE3=y CONFIG_CRYPTO_SHA256_SSSE3=y CONFIG_CRYPTO_SHA512_SSSE3=y CONFIG_CRYPTO_SM3_AVX_X86_64=m +CONFIG_CRYPTO_SM3_ZHAOXIN_GMI=m CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m CONFIG_CRYPTO_CRC32C_INTEL=m CONFIG_CRYPTO_CRC32_PCLMUL=m CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m +CONFIG_CRYPTO_SM2_ZHAOXIN_GMI=m # end of Accelerated Cryptographic Algorithms for CPU (x86) CONFIG_CRYPTO_HW=y CONFIG_CRYPTO_DEV_PADLOCK=m CONFIG_CRYPTO_DEV_PADLOCK_AES=m CONFIG_CRYPTO_DEV_PADLOCK_SHA=m +# CONFIG_CRYPTO_DEV_ZHAOXIN is not set # CONFIG_CRYPTO_DEV_ATMEL_ECC is not set # CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set CONFIG_CRYPTO_DEV_CCP=y @@ -7421,6 +7426,7 @@ CONFIG_CRYPTO_DEV_CCP_DD=m CONFIG_CRYPTO_DEV_SP_CCP=y CONFIG_CRYPTO_DEV_CCP_CRYPTO=m CONFIG_CRYPTO_DEV_SP_PSP=y +CONFIG_HYGON_GM=y # CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set CONFIG_CRYPTO_DEV_NITROX=m CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m @@ -7813,6 +7819,7 @@ CONFIG_LOCKUP_DETECTOR=y CONFIG_SOFTLOCKUP_DETECTOR=y # CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y +# CONFIG_SDEI_WATCHDOG is not set CONFIG_HARDLOCKUP_DETECTOR=y # CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY is not set CONFIG_HARDLOCKUP_DETECTOR_PERF=y diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 23258f261dcb..ca8ff01300ac 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -411,7 +411,7 @@ CONFIG_NR_CPUS_RANGE_BEGIN=2 CONFIG_NR_CPUS_RANGE_END=8192 CONFIG_NR_CPUS_DEFAULT=64 CONFIG_NR_CPUS=1024 -# CONFIG_SCHED_CLUSTER is not set +CONFIG_SCHED_CLUSTER=y CONFIG_SCHED_SMT=y CONFIG_SCHED_MC=y CONFIG_SCHED_MC_PRIO=y @@ -2425,7 +2425,7 @@ CONFIG_VMWARE_VMCI=m # CONFIG_MISC_ALCOR_PCI is not set CONFIG_MISC_RTSX_PCI=m CONFIG_MISC_RTSX_USB=m -# CONFIG_UACCE is not set +CONFIG_UACCE=m CONFIG_PVPANIC=y # CONFIG_PVPANIC_MMIO is not set # CONFIG_PVPANIC_PCI is not set @@ -2592,7 +2592,7 @@ CONFIG_ATA_PIIX=m # CONFIG_SATA_ULI is not set # CONFIG_SATA_VIA is not set # CONFIG_SATA_VITESSE is not set -# CONFIG_SATA_ZHAOXIN is not set +CONFIG_SATA_ZHAOXIN=m # # PATA SFF controllers with BMDMA @@ -3745,7 +3745,7 @@ CONFIG_HW_RANDOM_INTEL=m CONFIG_HW_RANDOM_AMD=m # CONFIG_HW_RANDOM_BA431 is not set CONFIG_HW_RANDOM_VIA=m -CONFIG_HW_RANDOM_ZHAOXIN=y +CONFIG_HW_RANDOM_ZHAOXIN=m CONFIG_HW_RANDOM_VIRTIO=y # CONFIG_HW_RANDOM_XIPHERA is not set # CONFIG_APPLICOM is not set @@ -3833,13 +3833,13 @@ CONFIG_I2C_NFORCE2_S4985=m CONFIG_I2C_SIS96X=m CONFIG_I2C_VIA=m CONFIG_I2C_VIAPRO=m -# CONFIG_I2C_ZHAOXIN is not set +CONFIG_I2C_ZHAOXIN=m # # ACPI drivers # CONFIG_I2C_SCMI=m -# CONFIG_I2C_ZHAOXIN_SMBUS is not set +CONFIG_I2C_ZHAOXIN_SMBUS=m # # I2C system bus drivers (mostly embedded / system-on-chip) @@ -6644,6 +6644,7 @@ CONFIG_IDLE_INJECT=y # # Performance monitor support # +# CONFIG_DWC_PCIE_PMU is not set # end of Performance monitor support CONFIG_RAS=y @@ -7399,6 +7400,7 @@ CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m CONFIG_CRYPTO_CRC32C_INTEL=m CONFIG_CRYPTO_CRC32_PCLMUL=m CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m +CONFIG_CRYPTO_SM2_ZHAOXIN_GMI=m # end of Accelerated Cryptographic Algorithms for CPU (x86) CONFIG_CRYPTO_HW=y @@ -7415,6 +7417,7 @@ CONFIG_CRYPTO_DEV_CCP_DD=m CONFIG_CRYPTO_DEV_SP_CCP=y CONFIG_CRYPTO_DEV_CCP_CRYPTO=m CONFIG_CRYPTO_DEV_SP_PSP=y +CONFIG_HYGON_GM=y # CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set CONFIG_CRYPTO_DEV_NITROX=m CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m @@ -7771,6 +7774,7 @@ CONFIG_LOCKUP_DETECTOR=y CONFIG_SOFTLOCKUP_DETECTOR=y # CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y +# CONFIG_SDEI_WATCHDOG is not set CONFIG_HARDLOCKUP_DETECTOR=y # CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY is not set CONFIG_HARDLOCKUP_DETECTOR_PERF=y -- Gitee From 92a414be3d6312012eae61d64a115d53c78fbd15 Mon Sep 17 00:00:00 2001 From: xiongmengbiao Date: Thu, 14 Mar 2024 20:46:36 +0800 Subject: [PATCH 0520/2138] anolis: crypto: ccp: concurrent psp access support between user and kernel space ANBZ: #8628 Add a self-defined mutex to support concurrent psp access between kernel space and user space. Signed-off-by: xiongmengbiao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2955 --- drivers/crypto/ccp/psp-dev.c | 173 +++++++++++++++++++++++++++++++++++ drivers/crypto/ccp/psp-dev.h | 16 ++++ drivers/crypto/ccp/sev-dev.c | 69 ++++++++++++-- 3 files changed, 248 insertions(+), 10 deletions(-) diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index 223e198eddec..f26026733356 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -9,6 +9,7 @@ #include #include +#include #include "sp-dev.h" #include "psp-dev.h" @@ -19,6 +20,55 @@ struct psp_device *psp_master; +struct psp_misc_dev *psp_misc; +int is_hygon_psp; + +uint64_t atomic64_exchange(uint64_t *dst, uint64_t val) +{ + return xchg(dst, val); +} + +int psp_mutex_init(struct psp_mutex *mutex) +{ + if (!mutex) + return -1; + mutex->locked = 0; + return 0; +} + +int psp_mutex_trylock(struct psp_mutex *mutex) +{ + if (atomic64_exchange(&mutex->locked, 1)) + return 0; + else + return 1; +} + +int psp_mutex_lock_timeout(struct psp_mutex *mutex, uint64_t ms) +{ + int ret = 0; + unsigned long je; + + je = jiffies + msecs_to_jiffies(ms); + do { + if (psp_mutex_trylock(mutex)) { + ret = 1; + break; + } + } while (time_before(jiffies, je)); + + return ret; +} + +int psp_mutex_unlock(struct psp_mutex *mutex) +{ + if (!mutex) + return -1; + + atomic64_exchange(&mutex->locked, 0); + return 0; +} + static struct psp_device *psp_alloc_struct(struct sp_device *sp) { struct device *dev = sp->dev; @@ -156,9 +206,120 @@ static int psp_init(struct psp_device *psp) return 0; } +static int mmap_psp(struct file *filp, struct vm_area_struct *vma) +{ + unsigned long page; + + page = virt_to_phys((void *)psp_misc->data_pg_aligned) >> PAGE_SHIFT; + + if (remap_pfn_range(vma, vma->vm_start, page, (vma->vm_end - vma->vm_start), + vma->vm_page_prot)) { + printk(KERN_ERR "remap failed..."); + return -1; + } + vm_flags_mod(vma, VM_DONTDUMP | VM_DONTEXPAND, 0); + printk(KERN_INFO "remap_pfn_rang page:[%lu] ok.\n", page); + return 0; +} + +static ssize_t read_psp(struct file *file, char __user *buf, size_t count, loff_t *ppos) +{ + ssize_t remaining; + + if ((*ppos + count) > PAGE_SIZE) { + printk(KERN_ERR "%s: invalid address range, pos %llx, count %lx\n", + __func__, *ppos, count); + return -EFAULT; + } + + remaining = copy_to_user(buf, (char *)psp_misc->data_pg_aligned + *ppos, count); + if (remaining) + return -EFAULT; + + *ppos += count; + + return count; +} + +static ssize_t write_psp(struct file *file, const char __user *buf, size_t count, loff_t *ppos) +{ + ssize_t remaining, written; + + if ((*ppos + count) > PAGE_SIZE) { + printk(KERN_ERR "%s: invalid address range, pos %llx, count %lx\n", + __func__, *ppos, count); + return -EFAULT; + } + + remaining = copy_from_user((char *)psp_misc->data_pg_aligned + *ppos, buf, count); + written = count - remaining; + if (!written) + return -EFAULT; + + *ppos += written; + + return written; +} + +static const struct file_operations psp_fops = { + .owner = THIS_MODULE, + .mmap = mmap_psp, + .read = read_psp, + .write = write_psp, +}; + +static int hygon_psp_additional_setup(struct sp_device *sp) +{ + struct device *dev = sp->dev; + int ret = 0; + + if (!psp_misc) { + struct miscdevice *misc; + + psp_misc = devm_kzalloc(dev, sizeof(*psp_misc), GFP_KERNEL); + if (!psp_misc) + return -ENOMEM; + psp_misc->data_pg_aligned = (struct psp_dev_data *)get_zeroed_page(GFP_KERNEL); + if (!psp_misc->data_pg_aligned) { + dev_err(dev, "alloc psp data page failed\n"); + devm_kfree(dev, psp_misc); + psp_misc = NULL; + return -ENOMEM; + } + SetPageReserved(virt_to_page(psp_misc->data_pg_aligned)); + psp_mutex_init(&psp_misc->data_pg_aligned->mb_mutex); + + *(uint32_t *)((void *)psp_misc->data_pg_aligned + 8) = 0xdeadbeef; + misc = &psp_misc->misc; + misc->minor = MISC_DYNAMIC_MINOR; + misc->name = "hygon_psp_config"; + misc->fops = &psp_fops; + + ret = misc_register(misc); + if (ret) + return ret; + kref_init(&psp_misc->refcount); + } else { + kref_get(&psp_misc->refcount); + } + + return ret; +} + +static void hygon_psp_exit(struct kref *ref) +{ + struct psp_misc_dev *misc_dev = container_of(ref, struct psp_misc_dev, refcount); + + misc_deregister(&misc_dev->misc); + ClearPageReserved(virt_to_page(misc_dev->data_pg_aligned)); + free_page((unsigned long)misc_dev->data_pg_aligned); + psp_misc = NULL; +} + int psp_dev_init(struct sp_device *sp) { struct device *dev = sp->dev; + struct pci_dev *pdev = to_pci_dev(dev); struct psp_device *psp; int ret; @@ -186,6 +347,15 @@ int psp_dev_init(struct sp_device *sp) iowrite32(0, psp->io_regs + psp->vdata->inten_reg); iowrite32(-1, psp->io_regs + psp->vdata->intsts_reg); + if (pdev->vendor == PCI_VENDOR_ID_HYGON) { + is_hygon_psp = 1; + ret = hygon_psp_additional_setup(sp); + if (ret) { + dev_err(dev, "psp: unable to do additional setup\n"); + goto e_err; + } + } + /* Request an irq */ ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp); if (ret) { @@ -237,6 +407,9 @@ void psp_dev_destroy(struct sp_device *sp) tee_dev_destroy(psp); + if (is_hygon_psp && psp_misc) + kref_put(&psp_misc->refcount, hygon_psp_exit); + dbc_dev_destroy(psp); platform_access_dev_destroy(psp); diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h index 45b6e17d5770..c3cd027197fa 100644 --- a/drivers/crypto/ccp/psp-dev.h +++ b/drivers/crypto/ccp/psp-dev.h @@ -14,6 +14,7 @@ #include #include #include +#include #include "sp-dev.h" @@ -58,6 +59,21 @@ struct psp_device { unsigned int capability; }; +#define PSP_MUTEX_TIMEOUT 10000 +struct psp_mutex { + uint64_t locked; +}; + +struct psp_dev_data { + struct psp_mutex mb_mutex; +}; + +struct psp_misc_dev { + struct kref refcount; + struct psp_dev_data *data_pg_aligned; + struct miscdevice misc; +}; + void psp_set_sev_irq_handler(struct psp_device *psp, psp_irq_handler_t handler, void *data); void psp_clear_sev_irq_handler(struct psp_device *psp); diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index b38e2bf88b00..145b632575a3 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -66,6 +66,10 @@ static bool psp_dead; static int psp_timeout; static int csv_comm_mode = CSV_COMM_MAILBOX_ON; +extern int is_hygon_psp; +extern struct psp_misc_dev *psp_misc; +extern int psp_mutex_lock_timeout(struct psp_mutex *mutex, uint64_t ms); +extern int psp_mutex_unlock(struct psp_mutex *mutex); /* Trusted Memory Region (TMR): * The TMR is a 1MB area that must be 1MB aligned. Use the page allocator @@ -553,7 +557,13 @@ static int csv_do_ringbuf_cmds(int *psp_ret) struct sev_user_data_status data; int rc; - mutex_lock(&sev_cmd_mutex); + if (is_hygon_psp) { + if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(&sev_cmd_mutex); + } rc = __csv_ring_buffer_enter_locked(psp_ret); if (rc) @@ -566,7 +576,10 @@ static int csv_do_ringbuf_cmds(int *psp_ret) csv_comm_mode = CSV_COMM_MAILBOX_ON; cmd_unlock: - mutex_unlock(&sev_cmd_mutex); + if (is_hygon_psp) + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); return rc; } @@ -575,9 +588,18 @@ static int sev_do_cmd(int cmd, void *data, int *psp_ret) { int rc; - mutex_lock(&sev_cmd_mutex); + if (is_hygon_psp) { + if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(&sev_cmd_mutex); + } rc = __sev_do_cmd_locked(cmd, data, psp_ret); - mutex_unlock(&sev_cmd_mutex); + if (is_hygon_psp) + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); return rc; } @@ -696,9 +718,18 @@ int sev_platform_init(int *error) { int rc; - mutex_lock(&sev_cmd_mutex); + if (is_hygon_psp) { + if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(&sev_cmd_mutex); + } rc = __sev_platform_init_locked(error); - mutex_unlock(&sev_cmd_mutex); + if (is_hygon_psp) + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); return rc; } @@ -737,9 +768,18 @@ static int sev_platform_shutdown(int *error) { int rc; - mutex_lock(&sev_cmd_mutex); + if (is_hygon_psp) { + if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(&sev_cmd_mutex); + } rc = __sev_platform_shutdown_locked(NULL); - mutex_unlock(&sev_cmd_mutex); + if (is_hygon_psp) + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); return rc; } @@ -1415,7 +1455,13 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) return -EINVAL; } - mutex_lock(&sev_cmd_mutex); + if (is_hygon_psp) { + if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(&sev_cmd_mutex); + } if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { switch (input.cmd) { @@ -1475,7 +1521,10 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd))) ret = -EFAULT; out: - mutex_unlock(&sev_cmd_mutex); + if (is_hygon_psp) + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); return ret; } -- Gitee From 5f5c28a86b087f45313d4907af8d36325215cfdc Mon Sep 17 00:00:00 2001 From: xiongmengbiao Date: Thu, 14 Mar 2024 20:50:25 +0800 Subject: [PATCH 0521/2138] anolis: crypto: ccp: Add psp mutex enable ioctl support ANBZ: #8628 Signed-off-by: xiongmengbiao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2955 --- drivers/crypto/ccp/psp-dev.c | 52 +++++++++++++++++++++++++++++++++++- drivers/crypto/ccp/psp-dev.h | 2 +- drivers/crypto/ccp/sev-dev.c | 28 +++++++++++-------- 3 files changed, 69 insertions(+), 13 deletions(-) diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index f26026733356..b1c0c205dac7 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -9,6 +9,7 @@ #include #include +#include #include #include "sp-dev.h" @@ -22,6 +23,14 @@ struct psp_device *psp_master; struct psp_misc_dev *psp_misc; int is_hygon_psp; +#define HYGON_PSP_IOC_TYPE 'H' +enum HYGON_PSP_OPCODE { + HYGON_PSP_MUTEX_ENABLE = 1, + HYGON_PSP_MUTEX_DISABLE, + HYGON_PSP_OPCODE_MAX_NR, +}; +int psp_mutex_enabled; +extern struct mutex sev_cmd_mutex; uint64_t atomic64_exchange(uint64_t *dst, uint64_t val) { @@ -55,7 +64,7 @@ int psp_mutex_lock_timeout(struct psp_mutex *mutex, uint64_t ms) ret = 1; break; } - } while (time_before(jiffies, je)); + } while ((ms == 0) || time_before(jiffies, je)); return ret; } @@ -261,11 +270,51 @@ static ssize_t write_psp(struct file *file, const char __user *buf, size_t count return written; } +static long ioctl_psp(struct file *file, unsigned int ioctl, unsigned long arg) +{ + unsigned int opcode = 0; + + if (_IOC_TYPE(ioctl) != HYGON_PSP_IOC_TYPE) { + printk(KERN_ERR "%s: invalid ioctl type: 0x%x\n", __func__, _IOC_TYPE(ioctl)); + return -EINVAL; + } + opcode = _IOC_NR(ioctl); + switch (opcode) { + case HYGON_PSP_MUTEX_ENABLE: + psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, 0); + // And get the sev lock to make sure no one is using it now. + mutex_lock(&sev_cmd_mutex); + psp_mutex_enabled = 1; + mutex_unlock(&sev_cmd_mutex); + // Wait 10ms just in case someone is right before getting the psp lock. + mdelay(10); + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + break; + + case HYGON_PSP_MUTEX_DISABLE: + mutex_lock(&sev_cmd_mutex); + // And get the psp lock to make sure no one is using it now. + psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, 0); + psp_mutex_enabled = 0; + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + // Wait 10ms just in case someone is right before getting the sev lock. + mdelay(10); + mutex_unlock(&sev_cmd_mutex); + break; + + default: + printk(KERN_ERR "%s: invalid ioctl number: %d\n", __func__, opcode); + return -EINVAL; + } + return 0; +} + static const struct file_operations psp_fops = { .owner = THIS_MODULE, .mmap = mmap_psp, .read = read_psp, .write = write_psp, + .unlocked_ioctl = ioctl_psp, }; static int hygon_psp_additional_setup(struct sp_device *sp) @@ -349,6 +398,7 @@ int psp_dev_init(struct sp_device *sp) if (pdev->vendor == PCI_VENDOR_ID_HYGON) { is_hygon_psp = 1; + psp_mutex_enabled = 0; ret = hygon_psp_additional_setup(sp); if (ret) { dev_err(dev, "psp: unable to do additional setup\n"); diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h index c3cd027197fa..b0a7bf42e552 100644 --- a/drivers/crypto/ccp/psp-dev.h +++ b/drivers/crypto/ccp/psp-dev.h @@ -59,7 +59,7 @@ struct psp_device { unsigned int capability; }; -#define PSP_MUTEX_TIMEOUT 10000 +#define PSP_MUTEX_TIMEOUT 600000 struct psp_mutex { uint64_t locked; }; diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 145b632575a3..7484d05ba7d9 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -38,7 +38,7 @@ #define CSV_FW_FILE "hygon/csv.fw" #define SEV_FW_NAME_SIZE 64 -static DEFINE_MUTEX(sev_cmd_mutex); +DEFINE_MUTEX(sev_cmd_mutex); static struct sev_misc_dev *misc_dev; static int psp_cmd_timeout = 100; @@ -70,6 +70,7 @@ extern int is_hygon_psp; extern struct psp_misc_dev *psp_misc; extern int psp_mutex_lock_timeout(struct psp_mutex *mutex, uint64_t ms); extern int psp_mutex_unlock(struct psp_mutex *mutex); +extern int psp_mutex_enabled; /* Trusted Memory Region (TMR): * The TMR is a 1MB area that must be 1MB aligned. Use the page allocator @@ -556,8 +557,9 @@ static int csv_do_ringbuf_cmds(int *psp_ret) { struct sev_user_data_status data; int rc; + int mutex_enabled = READ_ONCE(psp_mutex_enabled); - if (is_hygon_psp) { + if (is_hygon_psp && mutex_enabled) { if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, PSP_MUTEX_TIMEOUT) != 1) return -EBUSY; @@ -576,7 +578,7 @@ static int csv_do_ringbuf_cmds(int *psp_ret) csv_comm_mode = CSV_COMM_MAILBOX_ON; cmd_unlock: - if (is_hygon_psp) + if (is_hygon_psp && mutex_enabled) psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); else mutex_unlock(&sev_cmd_mutex); @@ -587,8 +589,9 @@ static int csv_do_ringbuf_cmds(int *psp_ret) static int sev_do_cmd(int cmd, void *data, int *psp_ret) { int rc; + int mutex_enabled = READ_ONCE(psp_mutex_enabled); - if (is_hygon_psp) { + if (is_hygon_psp && mutex_enabled) { if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, PSP_MUTEX_TIMEOUT) != 1) return -EBUSY; @@ -596,7 +599,7 @@ static int sev_do_cmd(int cmd, void *data, int *psp_ret) mutex_lock(&sev_cmd_mutex); } rc = __sev_do_cmd_locked(cmd, data, psp_ret); - if (is_hygon_psp) + if (is_hygon_psp && mutex_enabled) psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); else mutex_unlock(&sev_cmd_mutex); @@ -717,8 +720,9 @@ static int __sev_platform_init_locked(int *error) int sev_platform_init(int *error) { int rc; + int mutex_enabled = READ_ONCE(psp_mutex_enabled); - if (is_hygon_psp) { + if (is_hygon_psp && mutex_enabled) { if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, PSP_MUTEX_TIMEOUT) != 1) return -EBUSY; @@ -726,7 +730,7 @@ int sev_platform_init(int *error) mutex_lock(&sev_cmd_mutex); } rc = __sev_platform_init_locked(error); - if (is_hygon_psp) + if (is_hygon_psp && mutex_enabled) psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); else mutex_unlock(&sev_cmd_mutex); @@ -767,8 +771,9 @@ static int __sev_platform_shutdown_locked(int *error) static int sev_platform_shutdown(int *error) { int rc; + int mutex_enabled = READ_ONCE(psp_mutex_enabled); - if (is_hygon_psp) { + if (is_hygon_psp && mutex_enabled) { if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, PSP_MUTEX_TIMEOUT) != 1) return -EBUSY; @@ -776,7 +781,7 @@ static int sev_platform_shutdown(int *error) mutex_lock(&sev_cmd_mutex); } rc = __sev_platform_shutdown_locked(NULL); - if (is_hygon_psp) + if (is_hygon_psp && mutex_enabled) psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); else mutex_unlock(&sev_cmd_mutex); @@ -1437,6 +1442,7 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) struct sev_issue_cmd input; int ret = -EFAULT; bool writable = file->f_mode & FMODE_WRITE; + int mutex_enabled = READ_ONCE(psp_mutex_enabled); if (!psp_master || !psp_master->sev_data) return -ENODEV; @@ -1455,7 +1461,7 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) return -EINVAL; } - if (is_hygon_psp) { + if (is_hygon_psp && mutex_enabled) { if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, PSP_MUTEX_TIMEOUT) != 1) return -EBUSY; @@ -1521,7 +1527,7 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd))) ret = -EFAULT; out: - if (is_hygon_psp) + if (is_hygon_psp && mutex_enabled) psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); else mutex_unlock(&sev_cmd_mutex); -- Gitee From 3129dbe7d94dfecf71328278a4211923783a2a5d Mon Sep 17 00:00:00 2001 From: niuyongwen Date: Thu, 10 Aug 2023 10:53:18 +0800 Subject: [PATCH 0522/2138] anolis: kvm: Support psp virtualization ANBZ: #8628 Add KVM_HC_PSP_OP option to kvm_emulate_hypercall, Used to receive PSP commands for virtual machines. Signed-off-by: niuyongwen Signed-off-by: xiongmengbiao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2955 --- arch/x86/include/asm/kvm_host.h | 2 + arch/x86/kvm/Makefile | 2 +- arch/x86/kvm/hygon/psp.c | 83 ++++++++++++++++++++++++++++++++ arch/x86/kvm/x86.c | 5 +- drivers/crypto/ccp/sev-dev.c | 84 +++++++++++++++++++++++++++++++++ include/linux/psp-sev.h | 5 ++ include/uapi/linux/kvm_para.h | 1 + 7 files changed, 180 insertions(+), 2 deletions(-) create mode 100644 arch/x86/kvm/hygon/psp.c diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 6ff68a0e43de..6a3ae64dfd06 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -2147,6 +2147,8 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event); int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, unsigned long ipi_bitmap_high, u32 min, unsigned long icr, int op_64_bit); +int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, + gpa_t psp_ret_gpa, gpa_t table_gpa); int kvm_add_user_return_msr(u32 msr); int kvm_find_user_return_msr(u32 msr); diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index a99ffc3f3a3f..c88c8847dfd1 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -12,7 +12,7 @@ include $(srctree)/virt/kvm/Makefile.kvm kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \ i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \ hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o \ - mmu/spte.o + mmu/spte.o hygon/psp.o ifdef CONFIG_HYPERV kvm-y += kvm_onhyperv.o diff --git a/arch/x86/kvm/hygon/psp.c b/arch/x86/kvm/hygon/psp.c new file mode 100644 index 000000000000..40f5ee0e6e42 --- /dev/null +++ b/arch/x86/kvm/hygon/psp.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * PSP virtualization + * + * Copyright (c) 2023, HYGON CORPORATION. All rights reserved. + * Author: Ge Yang + * + */ + +#include +#include +#include +#include + +struct psp_cmdresp_head { + uint32_t buf_size; + uint32_t cmdresp_size; + uint32_t cmdresp_code; +} __packed; + +int guest_addr_map_table_op(void *data_hva, gpa_t data_gpa, gpa_t table_gpa, + int op) +{ + return 0; +} + +int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, + gpa_t table_gpa) +{ + void *data; + struct psp_cmdresp_head psp_head; + uint32_t data_size; + int psp_ret = 0; + int ret = 0; + + if (unlikely(kvm_read_guest(kvm, data_gpa, &psp_head, + sizeof(struct psp_cmdresp_head)))) + return -EFAULT; + + data_size = psp_head.buf_size; + data = kzalloc(data_size, GFP_KERNEL); + if (!data) + return -ENOMEM; + + if (unlikely(kvm_read_guest(kvm, data_gpa, data, data_size))) { + ret = -EFAULT; + goto e_free; + } + + if (guest_addr_map_table_op(data, data_gpa, table_gpa, 0)) { + ret = -EFAULT; + goto e_free; + } + + ret = psp_do_cmd(cmd, data, &psp_ret); + if (ret) { + pr_err("%s: psp do cmd error, %d\n", __func__, psp_ret); + ret = -EIO; + goto e_free; + } + + if (guest_addr_map_table_op(data, data_gpa, table_gpa, 1)) { + ret = -EFAULT; + goto e_free; + } + + if (unlikely(kvm_write_guest(kvm, data_gpa, data, data_size))) { + ret = -EFAULT; + goto e_free; + } + + if (unlikely(kvm_write_guest(kvm, psp_ret_gpa, &psp_ret, + sizeof(psp_ret)))) { + ret = -EFAULT; + goto e_free; + } + + return ret; + +e_free: + kfree(data); + return ret; +} diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index dfa0d0cd671e..f5abd4848400 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9883,7 +9883,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) } if (static_call(kvm_x86_get_cpl)(vcpu) != 0 && - nr != KVM_HC_VM_ATTESTATION) { + !(nr == KVM_HC_VM_ATTESTATION || nr == KVM_HC_PSP_OP)) { ret = -KVM_EPERM; goto out; } @@ -9951,6 +9951,9 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) if (kvm_x86_ops.vm_attestation) ret = static_call(kvm_x86_vm_attestation)(vcpu->kvm, a0, a1); break; + case KVM_HC_PSP_OP: + ret = kvm_pv_psp_op(vcpu->kvm, a0, a1, a2, a3); + break; default: ret = -KVM_ENOSYS; break; diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 7484d05ba7d9..f2901059d29c 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -438,6 +438,68 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) return ret; } +static int __psp_do_cmd_locked(int cmd, void *data, int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + unsigned int phys_lsb, phys_msb; + unsigned int reg, ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + if (psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + /* Get the physical address of the command buffer */ + phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0; + phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0; + + dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", + cmd, phys_msb, phys_lsb, psp_timeout); + + print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, + sev_cmd_buffer_len(cmd), false); + + iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + sev->int_rcvd = 0; + + reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC; + iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for command completion */ + ret = sev_wait_cmd_ioc(sev, ®, psp_timeout); + if (ret) { + if (psp_ret) + *psp_ret = 0; + + dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd); + psp_dead = true; + + return ret; + } + + psp_timeout = psp_cmd_timeout; + + if (psp_ret) + *psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg); + + if (FIELD_GET(PSP_CMDRESP_STS, reg)) { + dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n", + cmd, FIELD_GET(PSP_CMDRESP_STS, reg)); + ret = -EIO; + } + + print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data, + sev_cmd_buffer_len(cmd), false); + + return ret; +} + static int __csv_ring_buffer_enter_locked(int *error) { struct psp_device *psp = psp_master; @@ -607,6 +669,28 @@ static int sev_do_cmd(int cmd, void *data, int *psp_ret) return rc; } +int psp_do_cmd(int cmd, void *data, int *psp_ret) +{ + int rc; + int mutex_enabled = READ_ONCE(psp_mutex_enabled); + + if (is_hygon_psp && mutex_enabled) { + if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(&sev_cmd_mutex); + } + rc = __psp_do_cmd_locked(cmd, data, psp_ret); + if (is_hygon_psp && mutex_enabled) + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); + + return rc; +} +EXPORT_SYMBOL_GPL(psp_do_cmd); + static int __sev_init_locked(int *error) { struct sev_data_init data; diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 55dd35ce920f..c18706b3e47b 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -633,6 +633,8 @@ struct csv_data_ring_buffer { #ifdef CONFIG_CRYPTO_DEV_SP_PSP +int psp_do_cmd(int cmd, void *data, int *psp_ret); + /** * sev_platform_init - perform SEV INIT command * @@ -763,6 +765,9 @@ int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ +static inline int +psp_do_cmd(int cmd, void *data, int *psp_ret) { return -ENODEV; } + static inline int sev_platform_status(struct sev_user_data_status *status, int *error) { return -ENODEV; } diff --git a/include/uapi/linux/kvm_para.h b/include/uapi/linux/kvm_para.h index 67192835455e..86369b7a5733 100644 --- a/include/uapi/linux/kvm_para.h +++ b/include/uapi/linux/kvm_para.h @@ -31,6 +31,7 @@ #define KVM_HC_SCHED_YIELD 11 #define KVM_HC_MAP_GPA_RANGE 12 #define KVM_HC_VM_ATTESTATION 100 /* Specific to Hygon CPU */ +#define KVM_HC_PSP_OP 101 /* Specific to Hygon platform */ /* * hypercalls use architecture specific -- Gitee From 54c15a09851f2d650bdc41e8baf02af9ae94ea9e Mon Sep 17 00:00:00 2001 From: niuyongwen Date: Thu, 10 Aug 2023 11:05:02 +0800 Subject: [PATCH 0523/2138] anolis: crypto: ccp: Support sending tkm commands based on ringbuffer ANBZ: #8628 Signed-off-by: niuyongwen Signed-off-by: xiongmengbiao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2955 --- drivers/crypto/ccp/psp-ringbuf.c | 23 ++ drivers/crypto/ccp/psp-ringbuf.h | 4 + drivers/crypto/ccp/sev-dev.c | 468 ++++++++++++++++++++++++++++++- include/linux/psp-sev.h | 50 ++++ 4 files changed, 543 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/ccp/psp-ringbuf.c b/drivers/crypto/ccp/psp-ringbuf.c index 3b2f461b672c..9b5f886c0b40 100644 --- a/drivers/crypto/ccp/psp-ringbuf.c +++ b/drivers/crypto/ccp/psp-ringbuf.c @@ -22,6 +22,7 @@ static void enqueue_data(struct csv_queue *queue, unsigned int l; void *data; + off &= queue->mask; if (esize != 1) { off *= esize; size *= esize; @@ -117,3 +118,25 @@ unsigned int csv_dequeue_stat(struct csv_queue *queue, queue->head += len; return len; } + +unsigned int csv_dequeue_cmd(struct csv_queue *queue, + void *buf, unsigned int len) +{ + unsigned int size; + + size = queue->tail - queue->head; + if (len > size) + len = size; + + dequeue_data(queue, buf, len, queue->head); + queue->head += len; + return len; +} + +unsigned int csv_cmd_queue_size(struct csv_queue *queue) +{ + unsigned int free_size; + + free_size = queue_avail_size(queue); + return queue->mask - free_size; +} diff --git a/drivers/crypto/ccp/psp-ringbuf.h b/drivers/crypto/ccp/psp-ringbuf.h index 50e014deb5ce..336352cc7a66 100644 --- a/drivers/crypto/ccp/psp-ringbuf.h +++ b/drivers/crypto/ccp/psp-ringbuf.h @@ -32,4 +32,8 @@ unsigned int csv_enqueue_cmd(struct csv_queue *queue, unsigned int csv_dequeue_stat(struct csv_queue *queue, void *buf, unsigned int len); +unsigned int csv_dequeue_cmd(struct csv_queue *ring_buf, + void *buf, unsigned int len); + +unsigned int csv_cmd_queue_size(struct csv_queue *ring_buf); #endif /* __PSP_RINGBUF_H__ */ diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index f2901059d29c..75ccfce02e76 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -69,9 +69,24 @@ static int csv_comm_mode = CSV_COMM_MAILBOX_ON; extern int is_hygon_psp; extern struct psp_misc_dev *psp_misc; extern int psp_mutex_lock_timeout(struct psp_mutex *mutex, uint64_t ms); +extern int psp_mutex_trylock(struct psp_mutex *mutex); extern int psp_mutex_unlock(struct psp_mutex *mutex); extern int psp_mutex_enabled; +/* defination of variabled used by virtual psp */ +enum VPSP_RB_CHECK_STATUS { + RB_NOT_CHECK = 0, + RB_CHECKING, + RB_CHECKED, + RB_CHECK_MAX +}; +#define VPSP_RB_IS_SUPPORTED(buildid) (buildid >= 1913) +#define VPSP_CMD_STATUS_RUNNING 0xffff +static DEFINE_MUTEX(vpsp_rb_mutex); +struct csv_ringbuffer_queue vpsp_ring_buffer[CSV_COMMAND_PRIORITY_NUM]; +static uint8_t vpsp_rb_supported; +static atomic_t vpsp_rb_check_status = ATOMIC_INIT(RB_NOT_CHECK); + /* Trusted Memory Region (TMR): * The TMR is a 1MB area that must be 1MB aligned. Use the page allocator * to allocate the memory, which will return aligned memory for the specified @@ -1669,7 +1684,7 @@ static int __csv_ring_buffer_queue_init(struct csv_ringbuffer_queue *ring_buffer return -ENOMEM; csv_queue_init(&ring_buffer->cmd_ptr, cmd_ptr_buffer, - CSV_RING_BUFFER_LEN, CSV_RING_BUFFER_ESIZE); + CSV_RING_BUFFER_SIZE, CSV_RING_BUFFER_ESIZE); stat_val_buffer = kzalloc(CSV_RING_BUFFER_LEN, GFP_KERNEL); if (!stat_val_buffer) { @@ -1678,7 +1693,7 @@ static int __csv_ring_buffer_queue_init(struct csv_ringbuffer_queue *ring_buffer } csv_queue_init(&ring_buffer->stat_val, stat_val_buffer, - CSV_RING_BUFFER_LEN, CSV_RING_BUFFER_ESIZE); + CSV_RING_BUFFER_SIZE, CSV_RING_BUFFER_ESIZE); return 0; free_cmdptr: @@ -1795,6 +1810,455 @@ int csv_ring_buffer_queue_free(void) } EXPORT_SYMBOL_GPL(csv_ring_buffer_queue_free); +static int get_queue_tail(struct csv_ringbuffer_queue *ringbuffer) +{ + return ringbuffer->cmd_ptr.tail & ringbuffer->cmd_ptr.mask; +} + +static int get_queue_head(struct csv_ringbuffer_queue *ringbuffer) +{ + return ringbuffer->cmd_ptr.head & ringbuffer->cmd_ptr.mask; +} + +static void vpsp_set_cmd_status(int prio, int index, int status) +{ + struct csv_queue *ringbuf = &vpsp_ring_buffer[prio].stat_val; + struct csv_statval_entry *statval = (struct csv_statval_entry *)ringbuf->data; + + statval[index].status = status; +} + +static int vpsp_get_cmd_status(int prio, int index) +{ + struct csv_queue *ringbuf = &vpsp_ring_buffer[prio].stat_val; + struct csv_statval_entry *statval = (struct csv_statval_entry *)ringbuf->data; + + return statval[index].status; +} + +static unsigned int vpsp_queue_cmd_size(int prio) +{ + return csv_cmd_queue_size(&vpsp_ring_buffer[prio].cmd_ptr); +} + +static int vpsp_dequeue_cmd(int prio, int index, + struct csv_cmdptr_entry *cmd_ptr) +{ + mutex_lock(&vpsp_rb_mutex); + + /* The status update must be before the head update */ + vpsp_set_cmd_status(prio, index, 0); + csv_dequeue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, (void *)cmd_ptr, 1); + + mutex_unlock(&vpsp_rb_mutex); + + return 0; +} + +/* + * Populate the command from the virtual machine to the queue to + * support execution in ringbuffer mode + */ +static int vpsp_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) +{ + struct csv_cmdptr_entry cmdptr = { }; + int index = -1; + + cmdptr.cmd_buf_ptr = __psp_pa(data); + cmdptr.cmd_id = cmd; + cmdptr.cmd_flags = flags; + + mutex_lock(&vpsp_rb_mutex); + index = get_queue_tail(&vpsp_ring_buffer[prio]); + + /* If status is equal to VPSP_CMD_STATUS_RUNNING, then the queue is full */ + if (vpsp_get_cmd_status(prio, index) == VPSP_CMD_STATUS_RUNNING) { + index = -1; + goto out; + } + + /* The status must be written first, and then the cmd can be enqueued */ + vpsp_set_cmd_status(prio, index, VPSP_CMD_STATUS_RUNNING); + if (csv_enqueue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, &cmdptr, 1) != 1) { + vpsp_set_cmd_status(prio, index, 0); + index = -1; + goto out; + } + +out: + mutex_unlock(&vpsp_rb_mutex); + return index; +} + +static void vpsp_ring_update_head(struct csv_ringbuffer_queue *ring_buffer, + uint32_t new_head) +{ + uint32_t orig_head = get_queue_head(ring_buffer); + uint32_t comple_num = 0; + + if (new_head >= orig_head) + comple_num = new_head - orig_head; + else + comple_num = ring_buffer->cmd_ptr.mask - (orig_head - new_head) + + 1; + + ring_buffer->cmd_ptr.head += comple_num; +} + +static int vpsp_ring_buffer_queue_init(void) +{ + int i; + int ret; + + for (i = CSV_COMMAND_PRIORITY_HIGH; i < CSV_COMMAND_PRIORITY_NUM; i++) { + ret = __csv_ring_buffer_queue_init(&vpsp_ring_buffer[i]); + if (ret) + return ret; + } + + return 0; +} + +static int __vpsp_ring_buffer_enter_locked(int *error) +{ + int ret; + struct csv_data_ring_buffer *data; + struct csv_ringbuffer_queue *low_queue; + struct csv_ringbuffer_queue *hi_queue; + struct sev_device *sev = psp_master->sev_data; + + if (csv_comm_mode == CSV_COMM_RINGBUFFER_ON) + return -EEXIST; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + low_queue = &vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]; + hi_queue = &vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]; + + data->queue_lo_cmdptr_address = __psp_pa(low_queue->cmd_ptr.data_align); + data->queue_lo_statval_address = __psp_pa(low_queue->stat_val.data_align); + data->queue_hi_cmdptr_address = __psp_pa(hi_queue->cmd_ptr.data_align); + data->queue_hi_statval_address = __psp_pa(hi_queue->stat_val.data_align); + data->queue_lo_size = 1; + data->queue_hi_size = 1; + data->int_on_empty = 1; + + ret = __sev_do_cmd_locked(CSV_CMD_RING_BUFFER, data, error); + if (!ret) { + iowrite32(0, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + csv_comm_mode = CSV_COMM_RINGBUFFER_ON; + } + + kfree(data); + return ret; +} + +static int __vpsp_do_ringbuf_cmds_locked(int *psp_ret, uint8_t prio, int index) +{ + struct psp_device *psp = psp_master; + unsigned int reg, ret = 0; + unsigned int rb_tail, rb_head; + unsigned int rb_ctl; + struct sev_device *sev; + + if (!psp) + return -ENODEV; + + if (psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + /* update rb tail */ + rb_tail = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + rb_tail &= (~PSP_RBTAIL_QHI_TAIL_MASK); + rb_tail |= (get_queue_tail(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]) + << PSP_RBTAIL_QHI_TAIL_SHIFT); + rb_tail &= (~PSP_RBTAIL_QLO_TAIL_MASK); + rb_tail |= get_queue_tail(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]); + iowrite32(rb_tail, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + /* update rb head */ + rb_head = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + rb_head &= (~PSP_RBHEAD_QHI_HEAD_MASK); + rb_head |= (get_queue_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]) + << PSP_RBHEAD_QHI_HEAD_SHIFT); + rb_head &= (~PSP_RBHEAD_QLO_HEAD_MASK); + rb_head |= get_queue_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]); + iowrite32(rb_head, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + + /* update rb ctl to trigger psp irq */ + sev->int_rcvd = 0; + /* PSP response to x86 only when all queue is empty or error happends */ + rb_ctl = (PSP_RBCTL_X86_WRITES | PSP_RBCTL_RBMODE_ACT | PSP_RBCTL_CLR_INTSTAT); + iowrite32(rb_ctl, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for all commands in ring buffer completed */ + ret = csv_wait_cmd_ioc_ring_buffer(sev, ®, psp_timeout*10); + if (ret) { + if (psp_ret) + *psp_ret = 0; + + dev_err(psp->dev, "sev command in ringbuffer mode timed out, disabling PSP\n"); + psp_dead = true; + return ret; + } + /* cmd error happends */ + if (reg & PSP_RBHEAD_QPAUSE_INT_STAT) + ret = -EFAULT; + + /* update head */ + vpsp_ring_update_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH], + (reg & PSP_RBHEAD_QHI_HEAD_MASK) >> PSP_RBHEAD_QHI_HEAD_SHIFT); + vpsp_ring_update_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW], + reg & PSP_RBHEAD_QLO_HEAD_MASK); + + if (psp_ret) + *psp_ret = vpsp_get_cmd_status(prio, index); + + return ret; +} + +static int vpsp_do_ringbuf_cmds_locked(int *psp_ret, uint8_t prio, int index) +{ + struct sev_user_data_status data; + int rc; + + rc = __vpsp_ring_buffer_enter_locked(psp_ret); + if (rc) + goto end; + + rc = __vpsp_do_ringbuf_cmds_locked(psp_ret, prio, index); + + /* exit ringbuf mode by send CMD in mailbox mode */ + __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, + &data, NULL); + csv_comm_mode = CSV_COMM_MAILBOX_ON; + +end: + return rc; +} + +/** + * struct user_data_status - PLATFORM_STATUS command parameters + * + * @major: major API version + * @minor: minor API version + * @state: platform state + * @owner: self-owned or externally owned + * @chip_secure: ES or MP chip + * @fw_enc: is this FW is encrypted + * @fw_sign: is this FW is signed + * @config_es: platform config flags for csv-es + * @build: Firmware Build ID for this API version + * @bl_version_debug: Bootloader VERSION_DEBUG field + * @bl_version_minor: Bootloader VERSION_MINOR field + * @bl_version_major: Bootloader VERSION_MAJOR field + * @guest_count: number of active guests + * @reserved: should set to zero + */ +struct user_data_status { + uint8_t api_major; /* Out */ + uint8_t api_minor; /* Out */ + uint8_t state; /* Out */ + uint8_t owner : 1, /* Out */ + chip_secure : 1, /* Out */ + fw_enc : 1, /* Out */ + fw_sign : 1, /* Out */ + reserved1 : 4; /*reserved*/ + uint32_t config_es : 1, /* Out */ + build : 31; /* Out */ + uint32_t guest_count; /* Out */ +} __packed; + +/* + * Check whether the firmware supports ringbuffer mode and parse + * commands from the virtual machine + */ +static int vpsp_rb_check_and_cmd_prio_parse(uint8_t *prio, + struct vpsp_cmd *vcmd) +{ + int ret, error; + int rb_supported; + int rb_check_old = RB_NOT_CHECK; + struct user_data_status *status = NULL; + + if (atomic_try_cmpxchg(&vpsp_rb_check_status, &rb_check_old, + RB_CHECKING)) { + /* get buildid to check if the firmware supports ringbuffer mode */ + status = kzalloc(sizeof(*status), GFP_KERNEL); + if (!status) { + atomic_set(&vpsp_rb_check_status, RB_CHECKED); + goto end; + } + ret = sev_platform_status((struct sev_user_data_status *)status, + &error); + if (ret) { + pr_warn("failed to get status[%#x], use default command mode.\n", error); + atomic_set(&vpsp_rb_check_status, RB_CHECKED); + goto end; + } + + /* check if the firmware supports the ringbuffer mode */ + if (VPSP_RB_IS_SUPPORTED(status->build)) { + if (vpsp_ring_buffer_queue_init()) { + pr_warn("vpsp_ring_buffer_queue_init fail, use default command mode\n"); + atomic_set(&vpsp_rb_check_status, RB_CHECKED); + goto end; + } + WRITE_ONCE(vpsp_rb_supported, 1); + } + + atomic_set(&vpsp_rb_check_status, RB_CHECKED); + } + +end: + rb_supported = READ_ONCE(vpsp_rb_supported); + /* parse prio by vcmd */ + if (rb_supported && vcmd->is_high_rb) + *prio = CSV_COMMAND_PRIORITY_HIGH; + else + *prio = CSV_COMMAND_PRIORITY_LOW; + /* clear rb level bit in vcmd */ + vcmd->is_high_rb = 0; + + kfree(status); + return rb_supported; +} + +/* + * Try to obtain the result again by the command index, this + * interface is used in ringbuffer mode + */ +int vpsp_try_get_result(uint8_t prio, uint32_t index, void *data, + struct vpsp_ret *psp_ret) +{ + int ret = 0; + struct csv_cmdptr_entry cmd = {0}; + int mutex_enabled = READ_ONCE(psp_mutex_enabled); + + /* Get the retult directly if the command has been executed */ + if (index >= 0 && vpsp_get_cmd_status(prio, index) != + VPSP_CMD_STATUS_RUNNING) { + psp_ret->pret = vpsp_get_cmd_status(prio, index); + psp_ret->status = VPSP_FINISH; + return 0; + } + + if (is_hygon_psp && mutex_enabled) + ret = psp_mutex_trylock(&psp_misc->data_pg_aligned->mb_mutex); + else + ret = mutex_trylock(&sev_cmd_mutex); + + if (ret) { + /* Use mailbox mode to execute a command if there is only one command */ + if (vpsp_queue_cmd_size(prio) == 1) { + /* dequeue command from queue*/ + vpsp_dequeue_cmd(prio, index, &cmd); + ret = __sev_do_cmd_locked(cmd.cmd_id, data, + (int *)psp_ret); + psp_ret->status = VPSP_FINISH; + if (unlikely(ret)) { + if (ret == -EIO) { + ret = 0; + } else { + pr_err("[%s]: psp do cmd error, %d\n", + __func__, psp_ret->pret); + ret = -EIO; + goto end; + } + } + } else { + ret = vpsp_do_ringbuf_cmds_locked((int *)psp_ret, prio, + index); + psp_ret->status = VPSP_FINISH; + if (unlikely(ret)) { + pr_err("[%s]: vpsp_do_ringbuf_cmds_locked failed\n", __func__); + goto end; + } + } + } else { + /* Change the command to the running state if getting the mutex fails */ + psp_ret->index = index; + psp_ret->status = VPSP_RUNNING; + return 0; + } +end: + if (is_hygon_psp && mutex_enabled) + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(vpsp_try_get_result); + +/* + * Send the virtual psp command to the PSP device and try to get the + * execution result, the interface and the vpsp_try_get_result + * interface are executed asynchronously. If the execution succeeds, + * the result is returned to the VM. If the execution fails, the + * vpsp_try_get_result interface will be used to obtain the result + * later again + */ +int vpsp_try_do_cmd(int cmd, void *data, struct vpsp_ret *psp_ret) +{ + int ret = 0; + int rb_supported; + int index = -1; + uint8_t prio = CSV_COMMAND_PRIORITY_LOW; + + /* ringbuffer mode check and parse command prio*/ + rb_supported = vpsp_rb_check_and_cmd_prio_parse(&prio, + (struct vpsp_cmd *)&cmd); + if (rb_supported) { + /* fill command in ringbuffer's queue and get index */ + index = vpsp_fill_cmd_queue(prio, cmd, data, 0); + if (unlikely(index < 0)) { + /* do mailbox command if queuing failed*/ + ret = psp_do_cmd(cmd, data, (int *)psp_ret); + if (unlikely(ret)) { + if (ret == -EIO) { + ret = 0; + } else { + pr_err("[%s]: psp do cmd error, %d\n", + __func__, psp_ret->pret); + ret = -EIO; + goto end; + } + } + psp_ret->status = VPSP_FINISH; + goto end; + } + + /* try to get result from the ringbuffer command */ + ret = vpsp_try_get_result(prio, index, data, psp_ret); + if (unlikely(ret)) { + pr_err("[%s]: vpsp_try_get_result failed\n", __func__); + goto end; + } + } else { + /* mailbox mode */ + ret = psp_do_cmd(cmd, data, (int *)psp_ret); + if (unlikely(ret)) { + if (ret == -EIO) { + ret = 0; + } else { + pr_err("[%s]: psp do cmd error, %d\n", + __func__, psp_ret->pret); + ret = -EIO; + goto end; + } + } + psp_ret->status = VPSP_FINISH; + } + +end: + return ret; +} +EXPORT_SYMBOL_GPL(vpsp_try_do_cmd); + static void sev_exit(struct kref *ref) { misc_deregister(&misc_dev->misc); diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index c18706b3e47b..df88daf170d3 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -631,6 +631,46 @@ struct csv_data_ring_buffer { u16 int_on_empty; /* In */ } __packed; +/** + * enum VPSP_CMD_STATUS - virtual psp command status + * + * @VPSP_INIT: the initial command from guest + * @VPSP_RUNNING: the middle command to check and run ringbuffer command + * @VPSP_FINISH: inform the guest that the command ran successfully + */ +enum VPSP_CMD_STATUS { + VPSP_INIT = 0, + VPSP_RUNNING, + VPSP_FINISH, + VPSP_MAX +}; + +/** + * struct vpsp_cmd - virtual psp command + * + * @cmd_id: the command id is used to distinguish different commands + * @is_high_rb: indicates the ringbuffer level in which the command is placed + */ +struct vpsp_cmd { + u32 cmd_id : 31; + u32 is_high_rb : 1; +}; + +/** + * struct vpsp_ret - virtual psp return result + * + * @pret: the return code from device + * @resv: reserved bits + * @index: used to distinguish the position of command in the ringbuffer + * @status: indicates the current status of the related command + */ +struct vpsp_ret { + u32 pret : 16; + u32 resv : 2; + u32 index : 12; + u32 status : 2; +}; + #ifdef CONFIG_CRYPTO_DEV_SP_PSP int psp_do_cmd(int cmd, void *data, int *psp_ret); @@ -763,6 +803,9 @@ int csv_check_stat_queue_status(int *psp_ret); */ int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret); +int vpsp_try_get_result(uint8_t prio, uint32_t index, void *data, struct vpsp_ret *psp_ret); + +int vpsp_try_do_cmd(int cmd, void *data, struct vpsp_ret *psp_ret); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ static inline int @@ -801,6 +844,13 @@ static inline int csv_check_stat_queue_status(int *psp_ret) { return -ENODEV; } static inline int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret) { return -ENODEV; } +static inline int +vpsp_try_get_result(uint8_t prio, uint32_t index, + void *data, struct vpsp_ret *psp_ret) { return -ENODEV; } + +static inline int +vpsp_try_do_cmd(int cmd, void *data, struct vpsp_ret *psp_ret) { return -ENODEV; } + #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ #endif /* __PSP_SEV_H__ */ -- Gitee From 8a21e7e16475647040d8c9c48aa520fbbe04de7e Mon Sep 17 00:00:00 2001 From: xiongmengbiao Date: Tue, 26 Mar 2024 17:07:17 +0800 Subject: [PATCH 0524/2138] anolis: kvm: Support tkm virtualization ANBZ: #8628 The virtual machine enters the kernel through the vmmcall instruction, and puts the tkm command into the csv_ringbuffer and passes it to the PSP for processing. Signed-off-by: niuyongwen Signed-off-by: xiongmengbiao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2955 --- arch/x86/kvm/hygon/psp.c | 584 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 556 insertions(+), 28 deletions(-) diff --git a/arch/x86/kvm/hygon/psp.c b/arch/x86/kvm/hygon/psp.c index 40f5ee0e6e42..c8edc99a92df 100644 --- a/arch/x86/kvm/hygon/psp.c +++ b/arch/x86/kvm/hygon/psp.c @@ -11,6 +11,43 @@ #include #include #include +#include + +#ifdef pr_fmt +#undef pr_fmt +#endif +#define pr_fmt(fmt) "vpsp: " fmt + +/* + * The file mainly implements the base execution + * logic of virtual PSP in kernel mode, which mainly includes: + * (1) Obtain the VM command and preprocess the pointer + * mapping table information in the command buffer + * (2) The command that has been converted will interact + * with the channel of the psp through the driver and + * try to obtain the execution result + * (3) The executed command data is recovered according to + * the multilevel pointer of the mapping table, and then returned to the VM + * + * The primary implementation logic of virtual PSP in kernel mode + * call trace: + * guest command(vmmcall) + * | + * | |-> kvm_pv_psp_cmd_pre_op + * | | + * | | -> guest_addr_map_table_op + * | | + * | | -> guest_multiple_level_gpa_replace + * | + * kvm_pv_psp_op->|-> vpsp_try_do_cmd/vpsp_try_get_result <====> psp device driver + * | + * | + * |-> kvm_pv_psp_cmd_post_op + * | + * | -> guest_addr_map_table_op + * | + * | -> guest_multiple_level_gpa_restore + */ struct psp_cmdresp_head { uint32_t buf_size; @@ -18,66 +55,557 @@ struct psp_cmdresp_head { uint32_t cmdresp_code; } __packed; -int guest_addr_map_table_op(void *data_hva, gpa_t data_gpa, gpa_t table_gpa, - int op) +/** + * struct map_tbl - multilevel pointer address mapping table + * + * @parent_pa: parent address block's physics address + * @offset: offset in parent address block + * @size: submemory size + * @align: submemory align size, hva need to keep size alignment in kernel + * @hva: submemory copy block in kernel virtual address + */ +struct map_tbl { + uint64_t parent_pa; + uint32_t offset; + uint32_t size; + uint32_t align; + uint64_t hva; +} __packed; + +struct addr_map_tbls { + uint32_t tbl_nums; + struct map_tbl tbl[]; +} __packed; + +/* gpa and hva conversion maintenance table for internal use */ +struct gpa2hva_t { + void *hva; + gpa_t gpa; +}; + +struct gpa2hva_tbls { + uint32_t max_nums; + uint32_t tbl_nums; + struct gpa2hva_t tbl[]; +}; + +/* save command data for restoring later */ +struct vpsp_hbuf_wrapper { + void *data; + uint32_t data_size; + struct addr_map_tbls *map_tbls; + struct gpa2hva_tbls *g2h_tbls; +}; + +/* + * Virtual PSP host memory information maintenance, used in ringbuffer mode + */ +struct vpsp_hbuf_wrapper +g_hbuf_wrap[CSV_COMMAND_PRIORITY_NUM][CSV_RING_BUFFER_SIZE / CSV_RING_BUFFER_ESIZE] = {0}; + +void __maybe_unused map_tbl_dump(const char *title, struct addr_map_tbls *tbls) { - return 0; + int i; + + pr_info("[%s]-> map_tbl_nums: %d", title, tbls->tbl_nums); + for (i = 0; i < tbls->tbl_nums; i++) { + pr_info("\t[%d]: parent_pa: 0x%llx, offset: 0x%x, size: 0x%x, align: 0x%x hva: 0x%llx", + i, tbls->tbl[i].parent_pa, tbls->tbl[i].offset, + tbls->tbl[i].size, tbls->tbl[i].align, tbls->tbl[i].hva); + } + pr_info("\n"); } -int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, - gpa_t table_gpa) +void __maybe_unused g2h_tbl_dump(const char *title, struct gpa2hva_tbls *tbls) { - void *data; - struct psp_cmdresp_head psp_head; - uint32_t data_size; - int psp_ret = 0; - int ret = 0; + int i; - if (unlikely(kvm_read_guest(kvm, data_gpa, &psp_head, - sizeof(struct psp_cmdresp_head)))) + pr_info("[%s]-> g2h_tbl_nums: %d, max_nums: %d", title, tbls->tbl_nums, + tbls->max_nums); + for (i = 0; i < tbls->tbl_nums; i++) + pr_info("\t[%d]: hva: 0x%llx, gpa: 0x%llx", i, + (uint64_t)tbls->tbl[i].hva, tbls->tbl[i].gpa); + pr_info("\n"); +} + +static int gpa2hva_tbl_fill(struct gpa2hva_tbls *tbls, void *hva, gpa_t gpa) +{ + uint32_t fill_idx = tbls->tbl_nums; + + if (fill_idx >= tbls->max_nums) return -EFAULT; - data_size = psp_head.buf_size; - data = kzalloc(data_size, GFP_KERNEL); - if (!data) + tbls->tbl[fill_idx].hva = hva; + tbls->tbl[fill_idx].gpa = gpa; + tbls->tbl_nums = fill_idx + 1; + + return 0; +} + +static void clear_hva_in_g2h_tbls(struct gpa2hva_tbls *g2h, void *hva) +{ + int i; + + for (i = 0; i < g2h->tbl_nums; i++) { + if (g2h->tbl[i].hva == hva) + g2h->tbl[i].hva = NULL; + } +} + +static void *get_hva_from_gpa(struct gpa2hva_tbls *g2h, gpa_t gpa) +{ + int i; + + for (i = 0; i < g2h->tbl_nums; i++) { + if (g2h->tbl[i].gpa == gpa) + return (void *)g2h->tbl[i].hva; + } + + return NULL; +} + +static gpa_t get_gpa_from_hva(struct gpa2hva_tbls *g2h, void *hva) +{ + int i; + + for (i = 0; i < g2h->tbl_nums; i++) { + if (g2h->tbl[i].hva == hva) + return g2h->tbl[i].gpa; + } + + return 0; +} + +/* + * The virtual machine multilevel pointer command buffer handles the + * execution entity, synchronizes the data in the original gpa to the + * newly allocated hva(host virtual address) and updates the mapping + * relationship in the parent memory + */ +static int guest_multiple_level_gpa_replace(struct kvm *kvm, + struct map_tbl *tbl, struct gpa2hva_tbls *g2h) +{ + int ret = 0; + uint32_t sub_block_size; + uint64_t sub_paddr; + void *parent_kva = NULL; + + /* kmalloc memory for child block */ + sub_block_size = max(tbl->size, tbl->align); + tbl->hva = (uint64_t)kzalloc(sub_block_size, GFP_KERNEL); + if (!tbl->hva) return -ENOMEM; - if (unlikely(kvm_read_guest(kvm, data_gpa, data, data_size))) { + /* get child gpa from parent gpa */ + if (unlikely(kvm_read_guest(kvm, tbl->parent_pa + tbl->offset, + &sub_paddr, sizeof(sub_paddr)))) { + pr_err("[%s]: kvm_read_guest for parent gpa failed\n", + __func__); ret = -EFAULT; goto e_free; } - if (guest_addr_map_table_op(data, data_gpa, table_gpa, 0)) { + /* copy child block data from gpa to hva */ + if (unlikely(kvm_read_guest(kvm, sub_paddr, (void *)tbl->hva, + tbl->size))) { + pr_err("[%s]: kvm_read_guest for sub_data failed\n", + __func__); ret = -EFAULT; goto e_free; } - ret = psp_do_cmd(cmd, data, &psp_ret); - if (ret) { - pr_err("%s: psp do cmd error, %d\n", __func__, psp_ret); - ret = -EIO; + /* get hva from gpa */ + parent_kva = get_hva_from_gpa(g2h, tbl->parent_pa); + if (unlikely(!parent_kva)) { + pr_err("[%s]: get_hva_from_gpa for parent_pa failed\n", + __func__); + ret = -EFAULT; goto e_free; } - if (guest_addr_map_table_op(data, data_gpa, table_gpa, 1)) { + /* replace pa of hva from gpa */ + *(uint64_t *)((uint8_t *)parent_kva + tbl->offset) = __psp_pa(tbl->hva); + + /* fill in gpa and hva to map table for restoring later */ + if (unlikely(gpa2hva_tbl_fill(g2h, (void *)tbl->hva, sub_paddr))) { + pr_err("[%s]: gpa2hva_tbl_fill for sub_addr failed\n", + __func__); ret = -EFAULT; goto e_free; } - if (unlikely(kvm_write_guest(kvm, data_gpa, data, data_size))) { + return ret; + +e_free: + kfree((const void *)tbl->hva); + return ret; +} + +/* The virtual machine multi-level pointer command memory handles the + * execution entity, synchronizes the data in the hva(host virtual + * address) back to the memory corresponding to the gpa, and restores + * the mapping relationship in the original parent memory + */ +static int guest_multiple_level_gpa_restore(struct kvm *kvm, + struct map_tbl *tbl, struct gpa2hva_tbls *g2h) +{ + int ret = 0; + gpa_t sub_gpa; + void *parent_hva = NULL; + + /* get gpa from hva */ + sub_gpa = get_gpa_from_hva(g2h, (void *)tbl->hva); + if (unlikely(!sub_gpa)) { + pr_err("[%s]: get_gpa_from_hva for sub_gpa failed\n", + __func__); ret = -EFAULT; - goto e_free; + goto end; } - if (unlikely(kvm_write_guest(kvm, psp_ret_gpa, &psp_ret, - sizeof(psp_ret)))) { + /* copy child block data from hva to gpa */ + if (unlikely(kvm_write_guest(kvm, sub_gpa, (void *)tbl->hva, + tbl->size))) { + pr_err("[%s]: kvm_write_guest for sub_gpa failed\n", + __func__); ret = -EFAULT; - goto e_free; + goto end; + } + + /* get parent hva from parent gpa */ + parent_hva = get_hva_from_gpa(g2h, tbl->parent_pa); + if (unlikely(!parent_hva)) { + pr_err("[%s]: get_hva_from_gpa for parent_pa failed\n", + __func__); + ret = -EFAULT; + goto end; + } + + /* restore gpa from pa of hva in parent block */ + *(uint64_t *)((uint8_t *)parent_hva + tbl->offset) = sub_gpa; + + /* free child block memory */ + clear_hva_in_g2h_tbls(g2h, (void *)tbl->hva); + kfree((const void *)tbl->hva); + tbl->hva = 0; + +end: + return ret; +} + +/* + * The virtual machine multilevel pointer command memory processing + * executes upper-layer abstract interfaces, including replacing and + * restoring two sub-processing functions + */ +static int guest_addr_map_table_op(struct kvm *kvm, struct gpa2hva_tbls *g2h, + struct addr_map_tbls *map_tbls, int op) +{ + int ret = 0; + int i; + uint64_t *sub_paddr_ptr; + + if (op) { + for (i = map_tbls->tbl_nums - 1; i >= 0; i--) { + /* check if the gpa of root points to itself */ + if (map_tbls->tbl[i].parent_pa == g2h->tbl[0].gpa) { + sub_paddr_ptr = (uint64_t *)((uint8_t *)g2h->tbl[0].hva + + map_tbls->tbl[i].offset); + /* if the child paddr is equal to the parent paddr */ + if ((uint64_t)g2h->tbl[0].hva == map_tbls->tbl[i].hva) { + *sub_paddr_ptr = g2h->tbl[0].gpa; + continue; + } + } + + /* restore new pa of kva with the gpa from guest */ + if (unlikely(guest_multiple_level_gpa_restore(kvm, + &map_tbls->tbl[i], g2h))) { + pr_err("[%s]: guest_multiple_level_gpa_restore failed\n", + __func__); + ret = -EFAULT; + goto end; + } + } + } else { + for (i = 0; i < map_tbls->tbl_nums; i++) { + /* check if the gpa of root points to itself */ + if (map_tbls->tbl[i].parent_pa == g2h->tbl[0].gpa) { + sub_paddr_ptr = (uint64_t *)((uint8_t *)g2h->tbl[0].hva + + map_tbls->tbl[i].offset); + /* if the child paddr is equal to the parent paddr */ + if (*sub_paddr_ptr == map_tbls->tbl[i].parent_pa) { + *sub_paddr_ptr = __psp_pa(g2h->tbl[0].hva); + map_tbls->tbl[i].hva = (uint64_t)g2h->tbl[0].hva; + continue; + } + } + + /* check if parent_pa is valid */ + if (unlikely(!get_hva_from_gpa(g2h, map_tbls->tbl[i].parent_pa))) { + pr_err("[%s]: g2h->tbl[%d].parent_pa: 0x%llx is invalid\n", + __func__, i, map_tbls->tbl[i].parent_pa); + ret = -EFAULT; + goto end; + } + + /* replace the gpa from guest with the new pa of kva */ + if (unlikely(guest_multiple_level_gpa_replace(kvm, + &map_tbls->tbl[i], g2h))) { + pr_err("[%s]: guest_multiple_level_gpa_replace failed\n", + __func__); + ret = -EFAULT; + goto end; + } + } } +end: return ret; +} -e_free: +static void kvm_pv_psp_mem_free(struct gpa2hva_tbls *g2h, struct addr_map_tbls + *map_tbl, void *data) +{ + int i; + + if (g2h) { + for (i = 0; i < g2h->tbl_nums; i++) { + if (g2h->tbl[i].hva && (g2h->tbl[i].hva != data)) { + kfree(g2h->tbl[i].hva); + g2h->tbl[i].hva = NULL; + } + } + kfree(g2h); + } + + kfree(map_tbl); kfree(data); +} + +/* + * Obtain the VM command and preprocess the pointer mapping table + * information in the command buffer, the processed data will be + * used to interact with the psp device + */ +static int kvm_pv_psp_cmd_pre_op(struct kvm *kvm, gpa_t data_gpa, + gpa_t table_gpa, struct vpsp_hbuf_wrapper *hbuf) +{ + int ret = 0; + void *data = NULL; + struct psp_cmdresp_head psp_head; + uint32_t data_size; + struct addr_map_tbls map_head, *map_tbls = NULL; + uint32_t map_tbl_size; + struct gpa2hva_tbls *g2h = NULL; + uint32_t g2h_tbl_size; + + if (unlikely(kvm_read_guest(kvm, data_gpa, &psp_head, + sizeof(struct psp_cmdresp_head)))) + return -EFAULT; + + data_size = psp_head.buf_size; + data = kzalloc(data_size, GFP_KERNEL); + if (!data) + return -ENOMEM; + + if (unlikely(kvm_read_guest(kvm, data_gpa, data, data_size))) { + ret = -EFAULT; + goto end; + } + + if (table_gpa) { + /* parse address map table from guest */ + if (unlikely(kvm_read_guest(kvm, table_gpa, &map_head, + sizeof(struct addr_map_tbls)))) { + pr_err("[%s]: kvm_read_guest for map_head failed\n", + __func__); + ret = -EFAULT; + goto end; + } + + map_tbl_size = sizeof(struct addr_map_tbls) + map_head.tbl_nums + * sizeof(struct map_tbl); + map_tbls = kzalloc(map_tbl_size, GFP_KERNEL); + if (!map_tbls) { + ret = -ENOMEM; + goto end; + } + + if (unlikely(kvm_read_guest(kvm, table_gpa, map_tbls, + map_tbl_size))) { + pr_err("[%s]: kvm_read_guest for map_tbls failed\n", + __func__); + ret = -EFAULT; + goto end; + } + + /* init for gpa2hva table*/ + g2h_tbl_size = sizeof(struct gpa2hva_tbls) + (map_head.tbl_nums + + 1) * sizeof(struct gpa2hva_t); + g2h = kzalloc(g2h_tbl_size, GFP_KERNEL); + if (!g2h) { + ret = -ENOMEM; + goto end; + } + g2h->max_nums = map_head.tbl_nums + 1; + + /* fill the root parent address */ + if (gpa2hva_tbl_fill(g2h, data, data_gpa)) { + pr_err("[%s]: gpa2hva_tbl_fill for root data address failed\n", + __func__); + ret = -EFAULT; + goto end; + } + + if (guest_addr_map_table_op(kvm, g2h, map_tbls, 0)) { + pr_err("[%s]: guest_addr_map_table_op for replacing failed\n", + __func__); + ret = -EFAULT; + goto end; + } + } + + hbuf->data = data; + hbuf->data_size = data_size; + hbuf->map_tbls = map_tbls; + hbuf->g2h_tbls = g2h; + +end: + if (ret && data) + kfree(data); + return ret; +} + +/* + * The executed command data is recovered according to the multilevel + * pointer of the mapping table when the command has finished + * interacting with the psp device + */ +static int kvm_pv_psp_cmd_post_op(struct kvm *kvm, gpa_t data_gpa, + struct vpsp_hbuf_wrapper *hbuf) +{ + int ret = 0; + + if (hbuf->map_tbls) { + if (guest_addr_map_table_op(kvm, hbuf->g2h_tbls, + hbuf->map_tbls, 1)) { + pr_err("[%s]: guest_addr_map_table_op for restoring failed\n", + __func__); + ret = -EFAULT; + goto end; + } + } + + /* restore cmdresp's buffer from context */ + if (unlikely(kvm_write_guest(kvm, data_gpa, hbuf->data, + hbuf->data_size))) { + pr_err("[%s]: kvm_write_guest for cmdresp data failed\n", + __func__); + ret = -EFAULT; + goto end; + } + +end: + /* release memory and clear hbuf */ + kvm_pv_psp_mem_free(hbuf->g2h_tbls, hbuf->map_tbls, hbuf->data); + memset(hbuf, 0, sizeof(*hbuf)); + + return ret; +} + +/* + * The primary implementation interface of virtual PSP in kernel mode + */ +int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, + gpa_t table_gpa) +{ + int ret = 0; + struct vpsp_ret psp_ret = {0}; + struct vpsp_hbuf_wrapper hbuf = {0}; + struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd; + uint8_t prio = CSV_COMMAND_PRIORITY_LOW; + uint32_t index = 0; + + if (unlikely(kvm_read_guest(kvm, psp_ret_gpa, &psp_ret, + sizeof(psp_ret)))) + return -EFAULT; + + switch (psp_ret.status) { + case VPSP_INIT: + /* multilevel pointer replace*/ + ret = kvm_pv_psp_cmd_pre_op(kvm, data_gpa, table_gpa, &hbuf); + if (unlikely(ret)) { + psp_ret.status = VPSP_FINISH; + pr_err("[%s]: kvm_pv_psp_cmd_pre_op failed\n", + __func__); + ret = -EFAULT; + goto end; + } + + /* try to send command to the device for execution*/ + ret = vpsp_try_do_cmd(cmd, (void *)hbuf.data, + (struct vpsp_ret *)&psp_ret); + if (unlikely(ret)) { + pr_err("[%s]: vpsp_do_cmd failed\n", __func__); + ret = -EFAULT; + goto end; + } + + ret = -EFAULT; + if (psp_ret.status == VPSP_RUNNING) { + /* backup host memory message for restoring later*/ + prio = vcmd->is_high_rb ? CSV_COMMAND_PRIORITY_HIGH : + CSV_COMMAND_PRIORITY_LOW; + g_hbuf_wrap[prio][psp_ret.index] = hbuf; + ret = 0; + } else if (psp_ret.status == VPSP_FINISH) { + /* restore multilevel pointer data */ + ret = kvm_pv_psp_cmd_post_op(kvm, data_gpa, &hbuf); + if (unlikely(ret)) { + pr_err("[%s]: kvm_pv_psp_cmd_post_op failed\n", + __func__); + ret = -EFAULT; + goto end; + } + } + break; + + case VPSP_RUNNING: + prio = vcmd->is_high_rb ? CSV_COMMAND_PRIORITY_HIGH : + CSV_COMMAND_PRIORITY_LOW; + index = psp_ret.index; + /* try to get the execution result from ringbuffer*/ + ret = vpsp_try_get_result(prio, index, g_hbuf_wrap[prio][index].data, + (struct vpsp_ret *)&psp_ret); + if (unlikely(ret)) { + pr_err("[%s]: vpsp_try_get_result failed\n", __func__); + ret = -EFAULT; + goto end; + } + + ret = -EFAULT; + if (psp_ret.status == VPSP_RUNNING) { + ret = 0; + } else if (psp_ret.status == VPSP_FINISH) { + /* restore multilevel pointer data */ + ret = kvm_pv_psp_cmd_post_op(kvm, data_gpa, + &g_hbuf_wrap[prio][index]); + if (unlikely(ret)) { + pr_err("[%s]: kvm_pv_psp_cmd_post_op failed\n", + __func__); + ret = -EFAULT; + goto end; + } + } + break; + + default: + pr_err("[%s]: invalid command status\n", __func__); + ret = -EFAULT; + break; + } +end: + /* return psp_ret to guest */ + kvm_write_guest(kvm, psp_ret_gpa, &psp_ret, sizeof(psp_ret)); return ret; } -- Gitee From 47917d3cadd9d6c0dd82afd8e5e1341068f08dbc Mon Sep 17 00:00:00 2001 From: xiongmengbiao Date: Tue, 26 Dec 2023 16:59:41 +0800 Subject: [PATCH 0525/2138] anolis: crypto: ccp: Support tkm key isolation ANBZ: #8628 save qemu process PID to vid mapping, and when processing TKM commands, obtain the corresponding vid based on kvm->userspace_pid. After obtaining the vid, append the vid to the high 8 bits of the physical address and send it to the PSP. Signed-off-by: xiongmengbiao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2955 --- arch/x86/kvm/hygon/psp.c | 25 +++++- drivers/crypto/ccp/psp-dev.c | 161 ++++++++++++++++++++++++++++++++++- drivers/crypto/ccp/sev-dev.c | 114 ++++++++++++++++++++++--- include/linux/psp-sev.h | 15 +++- 4 files changed, 298 insertions(+), 17 deletions(-) diff --git a/arch/x86/kvm/hygon/psp.c b/arch/x86/kvm/hygon/psp.c index c8edc99a92df..fd5b4839176b 100644 --- a/arch/x86/kvm/hygon/psp.c +++ b/arch/x86/kvm/hygon/psp.c @@ -49,6 +49,9 @@ * | -> guest_multiple_level_gpa_restore */ +#define TKM_CMD_ID_MIN 0x120 +#define TKM_CMD_ID_MAX 0x12f + struct psp_cmdresp_head { uint32_t buf_size; uint32_t cmdresp_size; @@ -513,6 +516,13 @@ static int kvm_pv_psp_cmd_post_op(struct kvm *kvm, gpa_t data_gpa, return ret; } +static int cmd_type_is_tkm(int cmd) +{ + if (cmd >= TKM_CMD_ID_MIN && cmd <= TKM_CMD_ID_MAX) + return 1; + return 0; +} + /* * The primary implementation interface of virtual PSP in kernel mode */ @@ -525,6 +535,17 @@ int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd; uint8_t prio = CSV_COMMAND_PRIORITY_LOW; uint32_t index = 0; + uint32_t vid = 0; + + // only tkm cmd need vid + if (cmd_type_is_tkm(vcmd->cmd_id)) { + // if vm without set vid, then tkm command is not allowed + ret = vpsp_get_vid(&vid, kvm->userspace_pid); + if (ret) { + pr_err("[%s]: not allowed tkm command without vid\n", __func__); + return -EFAULT; + } + } if (unlikely(kvm_read_guest(kvm, psp_ret_gpa, &psp_ret, sizeof(psp_ret)))) @@ -543,7 +564,7 @@ int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, } /* try to send command to the device for execution*/ - ret = vpsp_try_do_cmd(cmd, (void *)hbuf.data, + ret = vpsp_try_do_cmd(vid, cmd, (void *)hbuf.data, (struct vpsp_ret *)&psp_ret); if (unlikely(ret)) { pr_err("[%s]: vpsp_do_cmd failed\n", __func__); @@ -575,7 +596,7 @@ int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, CSV_COMMAND_PRIORITY_LOW; index = psp_ret.index; /* try to get the execution result from ringbuffer*/ - ret = vpsp_try_get_result(prio, index, g_hbuf_wrap[prio][index].data, + ret = vpsp_try_get_result(vid, prio, index, g_hbuf_wrap[prio][index].data, (struct vpsp_ret *)&psp_ret); if (unlikely(ret)) { pr_err("[%s]: vpsp_try_get_result failed\n", __func__); diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index b1c0c205dac7..994721c7848e 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -11,6 +11,9 @@ #include #include #include +#include +#include +#include #include "sp-dev.h" #include "psp-dev.h" @@ -27,8 +30,23 @@ int is_hygon_psp; enum HYGON_PSP_OPCODE { HYGON_PSP_MUTEX_ENABLE = 1, HYGON_PSP_MUTEX_DISABLE, + HYGON_VPSP_CTRL_OPT, HYGON_PSP_OPCODE_MAX_NR, }; + +enum VPSP_DEV_CTRL_OPCODE { + VPSP_OP_VID_ADD, + VPSP_OP_VID_DEL, +}; + +struct vpsp_dev_ctrl { + unsigned char op; + union { + unsigned int vid; + unsigned char reserved[128]; + } data; +}; + int psp_mutex_enabled; extern struct mutex sev_cmd_mutex; @@ -270,9 +288,141 @@ static ssize_t write_psp(struct file *file, const char __user *buf, size_t count return written; } +DEFINE_RWLOCK(vpsp_rwlock); + +/* VPSP_VID_MAX_ENTRIES determines the maximum number of vms that can set vid. + * but, the performance of finding vid is determined by g_vpsp_vid_num, + * so VPSP_VID_MAX_ENTRIES can be set larger. + */ +#define VPSP_VID_MAX_ENTRIES 2048 +#define VPSP_VID_NUM_MAX 64 + +struct vpsp_vid_entry { + uint32_t vid; + pid_t pid; +}; +static struct vpsp_vid_entry g_vpsp_vid_array[VPSP_VID_MAX_ENTRIES]; +static uint32_t g_vpsp_vid_num; +static int compare_vid_entries(const void *a, const void *b) +{ + return ((struct vpsp_vid_entry *)a)->pid - ((struct vpsp_vid_entry *)b)->pid; +} +static void swap_vid_entries(void *a, void *b, int size) +{ + struct vpsp_vid_entry entry; + + memcpy(&entry, a, size); + memcpy(a, b, size); + memcpy(b, &entry, size); +} + +/** + * When the virtual machine executes the 'tkm' command, + * it needs to retrieve the corresponding 'vid' + * by performing a binary search using 'kvm->userspace_pid'. + */ +int vpsp_get_vid(uint32_t *vid, pid_t pid) +{ + struct vpsp_vid_entry new_entry = {.pid = pid}; + struct vpsp_vid_entry *existing_entry = NULL; + + read_lock(&vpsp_rwlock); + existing_entry = bsearch(&new_entry, g_vpsp_vid_array, g_vpsp_vid_num, + sizeof(struct vpsp_vid_entry), compare_vid_entries); + read_unlock(&vpsp_rwlock); + + if (!existing_entry) + return -ENOENT; + if (vid) { + *vid = existing_entry->vid; + pr_debug("PSP: %s %d, by pid %d\n", __func__, *vid, pid); + } + return 0; +} +EXPORT_SYMBOL_GPL(vpsp_get_vid); + +/** + * Upon qemu startup, this section checks whether + * the '-device psp,vid' parameter is specified. + * If set, it utilizes the 'vpsp_add_vid' function + * to insert the 'vid' and 'pid' values into the 'g_vpsp_vid_array'. + * The insertion is done in ascending order of 'pid'. + */ +static int vpsp_add_vid(uint32_t vid) +{ + pid_t cur_pid = task_pid_nr(current); + struct vpsp_vid_entry new_entry = {.vid = vid, .pid = cur_pid}; + + if (vpsp_get_vid(NULL, cur_pid) == 0) + return -EEXIST; + if (g_vpsp_vid_num == VPSP_VID_MAX_ENTRIES) + return -ENOMEM; + if (vid >= VPSP_VID_NUM_MAX) + return -EINVAL; + + write_lock(&vpsp_rwlock); + memcpy(&g_vpsp_vid_array[g_vpsp_vid_num++], &new_entry, sizeof(struct vpsp_vid_entry)); + sort(g_vpsp_vid_array, g_vpsp_vid_num, sizeof(struct vpsp_vid_entry), + compare_vid_entries, swap_vid_entries); + pr_info("PSP: add vid %d, by pid %d, total vid num is %d\n", vid, cur_pid, g_vpsp_vid_num); + write_unlock(&vpsp_rwlock); + return 0; +} + +/** + * Upon the virtual machine is shut down, + * the 'vpsp_del_vid' function is employed to remove + * the 'vid' associated with the current 'pid'. + */ +static int vpsp_del_vid(void) +{ + pid_t cur_pid = task_pid_nr(current); + int i, ret = -ENOENT; + + write_lock(&vpsp_rwlock); + for (i = 0; i < g_vpsp_vid_num; ++i) { + if (g_vpsp_vid_array[i].pid == cur_pid) { + --g_vpsp_vid_num; + pr_info("PSP: delete vid %d, by pid %d, total vid num is %d\n", + g_vpsp_vid_array[i].vid, cur_pid, g_vpsp_vid_num); + memcpy(&g_vpsp_vid_array[i], &g_vpsp_vid_array[i + 1], + sizeof(struct vpsp_vid_entry) * (g_vpsp_vid_num - i)); + ret = 0; + goto end; + } + } + +end: + write_unlock(&vpsp_rwlock); + return ret; +} + +static int do_vpsp_op_ioctl(struct vpsp_dev_ctrl *ctrl) +{ + int ret = 0; + unsigned char op = ctrl->op; + + switch (op) { + case VPSP_OP_VID_ADD: + ret = vpsp_add_vid(ctrl->data.vid); + break; + + case VPSP_OP_VID_DEL: + ret = vpsp_del_vid(); + break; + + default: + ret = -EINVAL; + break; + } + return ret; +} + static long ioctl_psp(struct file *file, unsigned int ioctl, unsigned long arg) { unsigned int opcode = 0; + struct vpsp_dev_ctrl vpsp_ctrl_op; + int ret = -EFAULT; if (_IOC_TYPE(ioctl) != HYGON_PSP_IOC_TYPE) { printk(KERN_ERR "%s: invalid ioctl type: 0x%x\n", __func__, _IOC_TYPE(ioctl)); @@ -289,6 +439,7 @@ static long ioctl_psp(struct file *file, unsigned int ioctl, unsigned long arg) // Wait 10ms just in case someone is right before getting the psp lock. mdelay(10); psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + ret = 0; break; case HYGON_PSP_MUTEX_DISABLE: @@ -300,13 +451,21 @@ static long ioctl_psp(struct file *file, unsigned int ioctl, unsigned long arg) // Wait 10ms just in case someone is right before getting the sev lock. mdelay(10); mutex_unlock(&sev_cmd_mutex); + ret = 0; + break; + + case HYGON_VPSP_CTRL_OPT: + if (copy_from_user(&vpsp_ctrl_op, (void __user *)arg, + sizeof(struct vpsp_dev_ctrl))) + return -EFAULT; + ret = do_vpsp_op_ioctl(&vpsp_ctrl_op); break; default: printk(KERN_ERR "%s: invalid ioctl number: %d\n", __func__, opcode); return -EINVAL; } - return 0; + return ret; } static const struct file_operations psp_fops = { diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 75ccfce02e76..ee42430b4ae3 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -684,6 +684,73 @@ static int sev_do_cmd(int cmd, void *data, int *psp_ret) return rc; } +static int __vpsp_do_cmd_locked(uint32_t vid, int cmd, void *data, int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + phys_addr_t phys_addr; + unsigned int phys_lsb, phys_msb; + unsigned int reg, ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + if (psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + if (data && WARN_ON_ONCE(!virt_addr_valid(data))) + return -EINVAL; + + /* Get the physical address of the command buffer */ + phys_addr = PUT_PSP_VID(__psp_pa(data), vid); + phys_lsb = data ? lower_32_bits(phys_addr) : 0; + phys_msb = data ? upper_32_bits(phys_addr) : 0; + + dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", + cmd, phys_msb, phys_lsb, psp_timeout); + + print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, + sev_cmd_buffer_len(cmd), false); + + iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + sev->int_rcvd = 0; + + reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC; + iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for command completion */ + ret = sev_wait_cmd_ioc(sev, ®, psp_timeout); + if (ret) { + if (psp_ret) + *psp_ret = 0; + + dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd); + psp_dead = true; + + return ret; + } + + psp_timeout = psp_cmd_timeout; + + if (psp_ret) + *psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg); + + if (FIELD_GET(PSP_CMDRESP_STS, reg)) { + dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n", + cmd, FIELD_GET(PSP_CMDRESP_STS, reg)); + ret = -EIO; + } + + print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data, + sev_cmd_buffer_len(cmd), false); + + return ret; +} + int psp_do_cmd(int cmd, void *data, int *psp_ret) { int rc; @@ -1859,12 +1926,12 @@ static int vpsp_dequeue_cmd(int prio, int index, * Populate the command from the virtual machine to the queue to * support execution in ringbuffer mode */ -static int vpsp_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) +static int vpsp_fill_cmd_queue(uint32_t vid, int prio, int cmd, void *data, uint16_t flags) { struct csv_cmdptr_entry cmdptr = { }; int index = -1; - cmdptr.cmd_buf_ptr = __psp_pa(data); + cmdptr.cmd_buf_ptr = PUT_PSP_VID(__psp_pa(data), vid); cmdptr.cmd_id = cmd; cmdptr.cmd_flags = flags; @@ -2132,7 +2199,7 @@ static int vpsp_rb_check_and_cmd_prio_parse(uint8_t *prio, * Try to obtain the result again by the command index, this * interface is used in ringbuffer mode */ -int vpsp_try_get_result(uint8_t prio, uint32_t index, void *data, +int vpsp_try_get_result(uint32_t vid, uint8_t prio, uint32_t index, void *data, struct vpsp_ret *psp_ret) { int ret = 0; @@ -2157,7 +2224,7 @@ int vpsp_try_get_result(uint8_t prio, uint32_t index, void *data, if (vpsp_queue_cmd_size(prio) == 1) { /* dequeue command from queue*/ vpsp_dequeue_cmd(prio, index, &cmd); - ret = __sev_do_cmd_locked(cmd.cmd_id, data, + ret = __vpsp_do_cmd_locked(vid, cmd.cmd_id, data, (int *)psp_ret); psp_ret->status = VPSP_FINISH; if (unlikely(ret)) { @@ -2175,7 +2242,8 @@ int vpsp_try_get_result(uint8_t prio, uint32_t index, void *data, index); psp_ret->status = VPSP_FINISH; if (unlikely(ret)) { - pr_err("[%s]: vpsp_do_ringbuf_cmds_locked failed\n", __func__); + pr_err("[%s]: vpsp_do_ringbuf_cmds_locked failed %d\n", + __func__, ret); goto end; } } @@ -2194,6 +2262,30 @@ int vpsp_try_get_result(uint8_t prio, uint32_t index, void *data, } EXPORT_SYMBOL_GPL(vpsp_try_get_result); +int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret) +{ + int rc; + int mutex_enabled = READ_ONCE(psp_mutex_enabled); + + if (is_hygon_psp && mutex_enabled) { + if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) { + return -EBUSY; + } + } else { + mutex_lock(&sev_cmd_mutex); + } + + rc = __vpsp_do_cmd_locked(vid, cmd, data, psp_ret); + + if (is_hygon_psp && mutex_enabled) + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); + + return rc; +} + /* * Send the virtual psp command to the PSP device and try to get the * execution result, the interface and the vpsp_try_get_result @@ -2202,7 +2294,7 @@ EXPORT_SYMBOL_GPL(vpsp_try_get_result); * vpsp_try_get_result interface will be used to obtain the result * later again */ -int vpsp_try_do_cmd(int cmd, void *data, struct vpsp_ret *psp_ret) +int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret) { int ret = 0; int rb_supported; @@ -2214,10 +2306,10 @@ int vpsp_try_do_cmd(int cmd, void *data, struct vpsp_ret *psp_ret) (struct vpsp_cmd *)&cmd); if (rb_supported) { /* fill command in ringbuffer's queue and get index */ - index = vpsp_fill_cmd_queue(prio, cmd, data, 0); + index = vpsp_fill_cmd_queue(vid, prio, cmd, data, 0); if (unlikely(index < 0)) { /* do mailbox command if queuing failed*/ - ret = psp_do_cmd(cmd, data, (int *)psp_ret); + ret = vpsp_do_cmd(vid, cmd, data, (int *)psp_ret); if (unlikely(ret)) { if (ret == -EIO) { ret = 0; @@ -2233,14 +2325,14 @@ int vpsp_try_do_cmd(int cmd, void *data, struct vpsp_ret *psp_ret) } /* try to get result from the ringbuffer command */ - ret = vpsp_try_get_result(prio, index, data, psp_ret); + ret = vpsp_try_get_result(vid, prio, index, data, psp_ret); if (unlikely(ret)) { - pr_err("[%s]: vpsp_try_get_result failed\n", __func__); + pr_err("[%s]: vpsp_try_get_result failed %d\n", __func__, ret); goto end; } } else { /* mailbox mode */ - ret = psp_do_cmd(cmd, data, (int *)psp_ret); + ret = vpsp_do_cmd(vid, cmd, data, (int *)psp_ret); if (unlikely(ret)) { if (ret == -EIO) { ret = 0; diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index df88daf170d3..75f54d8e49ae 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -671,6 +671,12 @@ struct vpsp_ret { u32 status : 2; }; +#define PSP_VID_MASK 0xff +#define PSP_VID_SHIFT 56 +#define PUT_PSP_VID(hpa, vid) ((__u64)(hpa) | ((__u64)(PSP_VID_MASK & vid) << PSP_VID_SHIFT)) +#define GET_PSP_VID(hpa) ((__u16)((__u64)(hpa) >> PSP_VID_SHIFT) & PSP_VID_MASK) +#define CLEAR_PSP_VID(hpa) ((__u64)(hpa) & ~((__u64)PSP_VID_MASK << PSP_VID_SHIFT)) + #ifdef CONFIG_CRYPTO_DEV_SP_PSP int psp_do_cmd(int cmd, void *data, int *psp_ret); @@ -803,9 +809,12 @@ int csv_check_stat_queue_status(int *psp_ret); */ int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret); -int vpsp_try_get_result(uint8_t prio, uint32_t index, void *data, struct vpsp_ret *psp_ret); +int vpsp_try_get_result(uint32_t vid, uint8_t prio, uint32_t index, + void *data, struct vpsp_ret *psp_ret); + +int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret); -int vpsp_try_do_cmd(int cmd, void *data, struct vpsp_ret *psp_ret); +int vpsp_get_vid(uint32_t *vid, pid_t pid); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ static inline int @@ -849,7 +858,7 @@ vpsp_try_get_result(uint8_t prio, uint32_t index, void *data, struct vpsp_ret *psp_ret) { return -ENODEV; } static inline int -vpsp_try_do_cmd(int cmd, void *data, struct vpsp_ret *psp_ret) { return -ENODEV; } +vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret) { return -ENODEV; } #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ -- Gitee From 26afabc59f2ef80e9d685037247a823d53e6510d Mon Sep 17 00:00:00 2001 From: xiongmengbiao Date: Sun, 18 Feb 2024 22:56:37 +0800 Subject: [PATCH 0526/2138] anolis: crypto: ccp: Allow VM without a configured vid to use TKM ANBZ: #8628 When no vid is assigned to the qemu virtual machine, the virtual machine can use the default vid0 when executing the tkm command, but this will share the key space with the host. You can use ioctl to operate /dev/hygon_psp_config to control the behavior of the default vid. Signed-off-by: xiongmengbiao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2955 --- arch/x86/kvm/hygon/psp.c | 4 ++-- drivers/crypto/ccp/psp-dev.c | 33 +++++++++++++++++++++++++++++++++ include/linux/psp-sev.h | 4 ++++ 3 files changed, 39 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/hygon/psp.c b/arch/x86/kvm/hygon/psp.c index fd5b4839176b..9181ec2406ec 100644 --- a/arch/x86/kvm/hygon/psp.c +++ b/arch/x86/kvm/hygon/psp.c @@ -539,9 +539,9 @@ int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, // only tkm cmd need vid if (cmd_type_is_tkm(vcmd->cmd_id)) { - // if vm without set vid, then tkm command is not allowed + // check the permission to use the default vid when no vid is set ret = vpsp_get_vid(&vid, kvm->userspace_pid); - if (ret) { + if (ret && !vpsp_get_default_vid_permission()) { pr_err("[%s]: not allowed tkm command without vid\n", __func__); return -EFAULT; } diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index 994721c7848e..c110ae79d93f 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -37,12 +37,16 @@ enum HYGON_PSP_OPCODE { enum VPSP_DEV_CTRL_OPCODE { VPSP_OP_VID_ADD, VPSP_OP_VID_DEL, + VPSP_OP_SET_DEFAULT_VID_PERMISSION, + VPSP_OP_GET_DEFAULT_VID_PERMISSION, }; struct vpsp_dev_ctrl { unsigned char op; union { unsigned int vid; + // Set or check the permissions for the default VID + unsigned int def_vid_perm; unsigned char reserved[128]; } data; }; @@ -316,6 +320,23 @@ static void swap_vid_entries(void *a, void *b, int size) memcpy(b, &entry, size); } +/** + * When 'allow_default_vid' is set to 1, + * QEMU is allowed to use 'vid 0' by default + * in the absence of a valid 'vid' setting. + */ +uint32_t allow_default_vid = 1; +void vpsp_set_default_vid_permission(uint32_t is_allow) +{ + allow_default_vid = is_allow; +} + +int vpsp_get_default_vid_permission(void) +{ + return allow_default_vid; +} +EXPORT_SYMBOL_GPL(vpsp_get_default_vid_permission); + /** * When the virtual machine executes the 'tkm' command, * it needs to retrieve the corresponding 'vid' @@ -333,6 +354,7 @@ int vpsp_get_vid(uint32_t *vid, pid_t pid) if (!existing_entry) return -ENOENT; + if (vid) { *vid = existing_entry->vid; pr_debug("PSP: %s %d, by pid %d\n", __func__, *vid, pid); @@ -411,6 +433,14 @@ static int do_vpsp_op_ioctl(struct vpsp_dev_ctrl *ctrl) ret = vpsp_del_vid(); break; + case VPSP_OP_SET_DEFAULT_VID_PERMISSION: + vpsp_set_default_vid_permission(ctrl->data.def_vid_perm); + break; + + case VPSP_OP_GET_DEFAULT_VID_PERMISSION: + ctrl->data.def_vid_perm = vpsp_get_default_vid_permission(); + break; + default: ret = -EINVAL; break; @@ -459,6 +489,9 @@ static long ioctl_psp(struct file *file, unsigned int ioctl, unsigned long arg) sizeof(struct vpsp_dev_ctrl))) return -EFAULT; ret = do_vpsp_op_ioctl(&vpsp_ctrl_op); + if (!ret && copy_to_user((void __user *)arg, &vpsp_ctrl_op, + sizeof(struct vpsp_dev_ctrl))) + return -EFAULT; break; default: diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 75f54d8e49ae..1536d0057738 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -815,6 +815,8 @@ int vpsp_try_get_result(uint32_t vid, uint8_t prio, uint32_t index, int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret); int vpsp_get_vid(uint32_t *vid, pid_t pid); + +int vpsp_get_default_vid_permission(void); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ static inline int @@ -860,6 +862,8 @@ vpsp_try_get_result(uint8_t prio, uint32_t index, static inline int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret) { return -ENODEV; } +static inline int +vpsp_get_default_vid_permission(void) { return -ENODEV; } #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ #endif /* __PSP_SEV_H__ */ -- Gitee From 644e70a221a0c5fea7db923b0f35d93fb5059e76 Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Mon, 18 Sep 2023 17:26:45 +0800 Subject: [PATCH 0527/2138] anolis: drm/hygon: Add support to passthrough Hygon DCU to virtual machine ANBZ: #8574 PCI RESET will cause failure to passthrough Hygon DCU to the guest. Fix this issue by add hydcu-fixup-header driver to disable PCI RESET. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Reviewed-by: Gu Mi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2915 --- Documentation/gpu/hydcu-fixup-header.rst | 13 +++ drivers/gpu/drm/Kconfig | 6 ++ drivers/gpu/drm/Makefile | 1 + .../gpu/drm/hygon/hydcu-fixup-header/Makefile | 3 + .../hydcu_pci_fixup_header.c | 93 +++++++++++++++++++ 5 files changed, 116 insertions(+) create mode 100644 Documentation/gpu/hydcu-fixup-header.rst create mode 100644 drivers/gpu/drm/hygon/hydcu-fixup-header/Makefile create mode 100644 drivers/gpu/drm/hygon/hydcu-fixup-header/hydcu_pci_fixup_header.c diff --git a/Documentation/gpu/hydcu-fixup-header.rst b/Documentation/gpu/hydcu-fixup-header.rst new file mode 100644 index 000000000000..5dca3ff3a137 --- /dev/null +++ b/Documentation/gpu/hydcu-fixup-header.rst @@ -0,0 +1,13 @@ +.. SPDX-License-Identifier: GPL-2.0-only + +========================= + drm/hygon/hydcu-fixup-header hydcu-fixup-header driver +========================= + +The drm/hygon/hydcu-fixup-header driver supports all HYGON DCUs. + +General description +====================== + +The drm/hygon/hydcu-fixup-header driver adds flags NO_BUS_RESET to hydcu +device to disable vfio pci reset, as dcu is not support now. diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index ec4abf9ff47b..353ffa210f0e 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -436,3 +436,9 @@ config DRM_LIB_RANDOM config DRM_PRIVACY_SCREEN bool default n + +config HYDCU_FIXUP_HEADER + bool "Enable fixup header support for HYDCU" + help + Choose this option if you want to use pci passthrough with HYDCU + HYDCU cannot support pci reset, so enable this module to disable pci reset diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 215e78e79125..12ad840d9e3a 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -198,3 +198,4 @@ obj-$(CONFIG_DRM_HYPERV) += hyperv/ obj-y += solomon/ obj-$(CONFIG_DRM_SPRD) += sprd/ obj-$(CONFIG_DRM_LOONGSON) += loongson/ +obj-$(CONFIG_HYDCU_FIXUP_HEADER) += hygon/hydcu-fixup-header/ diff --git a/drivers/gpu/drm/hygon/hydcu-fixup-header/Makefile b/drivers/gpu/drm/hygon/hydcu-fixup-header/Makefile new file mode 100644 index 000000000000..2dc816df4239 --- /dev/null +++ b/drivers/gpu/drm/hygon/hydcu-fixup-header/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_HYDCU_FIXUP_HEADER) += hydcu_pci_fixup_header.o \ No newline at end of file diff --git a/drivers/gpu/drm/hygon/hydcu-fixup-header/hydcu_pci_fixup_header.c b/drivers/gpu/drm/hygon/hydcu-fixup-header/hydcu_pci_fixup_header.c new file mode 100644 index 000000000000..962f0d74f703 --- /dev/null +++ b/drivers/gpu/drm/hygon/hydcu-fixup-header/hydcu_pci_fixup_header.c @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON DCU fixup driver + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Baoshun Fang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +#define PCI_VENDOR_ID_HYGON 0x1d94 + +#define DEVICE_Z100SM 0x51b7 +#define DEVICE_C878182 0x52b7 +#define DEVICE_C878186 0x53b7 +#define DEVICE_Z100 0x54b7 +#define DEVICE_Z100L 0x55b7 +#define DEVICE_C878181 0x56b7 +#define DEVICE_C878185 0x57b7 +#define DEVICE_C878188 0x58b7 +#define DEVICE_C878174 0x59b7 +#define DEVICE_KONGMING 0x61b7 +#define DEVICE_KONGMING_E 0x6210 + +#define DRIVER_VERSION "0.2" +#define DRIVER_AUTHOR "huangjun " +#define DRIVER_DESC "fix dcu header" + +static int hydcu_pci_fixup_header_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + dev_info(&pdev->dev, "add flags NO_BUS_RESET\n"); + pdev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET; + pdev->pm_cap = 0; + dev_info(&pdev->dev, "will abort probe\n"); + + return -EINVAL; +} + +static void hydcu_pci_fixup_header_remove(struct pci_dev *pdev) +{ +} + +static const struct pci_device_id hydcu_pci_fixup_header_ids[] = { + { PCI_VDEVICE(HYGON, DEVICE_Z100SM), }, + { PCI_VDEVICE(HYGON, DEVICE_C878182), }, + { PCI_VDEVICE(HYGON, DEVICE_C878186), }, + { PCI_VDEVICE(HYGON, DEVICE_Z100), }, + { PCI_VDEVICE(HYGON, DEVICE_Z100L), }, + { PCI_VDEVICE(HYGON, DEVICE_C878181), }, + { PCI_VDEVICE(HYGON, DEVICE_C878185), }, + { PCI_VDEVICE(HYGON, DEVICE_C878188), }, + { PCI_VDEVICE(HYGON, DEVICE_C878174), }, + { PCI_VDEVICE(HYGON, DEVICE_KONGMING), }, + { PCI_VDEVICE(HYGON, DEVICE_KONGMING_E), }, + {}, +}; + +static struct pci_driver hydcu_pci_fixup_header_driver = { + .name = "hydcu-fixup-header", + .id_table = hydcu_pci_fixup_header_ids, + .probe = hydcu_pci_fixup_header_probe, + .remove = hydcu_pci_fixup_header_remove, +}; + +static int __init hydcu_pci_fixup_header_init(void) +{ + /* Register and scan for devices */ + return pci_register_driver(&hydcu_pci_fixup_header_driver); +} + +static void __exit hydcu_pci_fixup_header_cleanup(void) +{ + pci_unregister_driver(&hydcu_pci_fixup_header_driver); +} + +module_init(hydcu_pci_fixup_header_init); +module_exit(hydcu_pci_fixup_header_cleanup); + +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); -- Gitee From 7de607bab66c59fbfb0b0803573dddbd0e53c0c9 Mon Sep 17 00:00:00 2001 From: yangge Date: Fri, 22 Mar 2024 08:32:28 -0400 Subject: [PATCH 0528/2138] anolis: mm/page_alloc: don't use PCP list for THP-sized allocations when using PF_MEMALLOC_PIN ANBZ: #8680 In the past, movable allocations could be disallowed from CMA through PF_MEMALLOC_PIN. However, since 5d0a661d808f ("mm/page_alloc: use only one PCP list for THP-sized allocations"), THP-sized pages of different types are put into one PCP list. When allocate a THP with PF_MEMALLOC_PIN, it would accidentally get a CMA page from PCP list, which will cause the program to not run correctly. So, PCP list can't be used for THP-sized allocations when using PF_MEMALLOC_PIN. Fixes: 5d0a661d808f ("mm/page_alloc: use only one PCP list for THP-sized allocations") Signed-off-by: yangge Signed-off-by: hanliyang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2983 --- mm/page_alloc.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2587b0b957e5..245d0348e4a9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2813,10 +2813,20 @@ struct page *rmqueue(struct zone *preferred_zone, WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); if (likely(pcp_allowed_order(order))) { +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA || + order != pageblock_order) { + page = rmqueue_pcplist(preferred_zone, zone, order, + migratetype, alloc_flags); + if (likely(page)) + goto out; + } +#else page = rmqueue_pcplist(preferred_zone, zone, order, migratetype, alloc_flags); if (likely(page)) goto out; +#endif } page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, -- Gitee From 37fe8f8bc7d5eebd1979772169adb5abbada222b Mon Sep 17 00:00:00 2001 From: yangge Date: Thu, 21 Mar 2024 08:46:33 -0400 Subject: [PATCH 0529/2138] anolis: mm/gup: don't check if a page is in lru before draining it ANBZ: #8680 Before migrating a page, we need to drain the page out of cpu's pagevecs if the page is in cpu's pagevecs. Otherwise, the migration will fail because of incorrect page reference. Whatever the return value of the function folio_test_lru() is, it does not tell whether the page is in cpu's pagevecs. Therefore, the check folio_test_lru() needs to be removed to ensure that the migration logic is correct. Signed-off-by: yangge Signed-off-by: hanliyang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2983 --- mm/gup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/gup.c b/mm/gup.c index fdd75384160d..2576962d4538 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1975,7 +1975,7 @@ static unsigned long collect_longterm_unpinnable_pages( continue; } - if (!folio_test_lru(folio) && drain_allow) { + if (drain_allow) { lru_add_drain_all(); drain_allow = false; } -- Gitee From 6227544778b862df50297e5eafddc2e281ea8dec Mon Sep 17 00:00:00 2001 From: hanliyang Date: Mon, 13 Nov 2023 01:54:26 +0800 Subject: [PATCH 0530/2138] anolis: KVM: SVM: Unmap ghcb pages if they're still mapped when destroy a CSV2 guest ANBZ: #8675 The ghcb pages might be mapped when KVM handling the VMGEXIT events, and these ghcb pages will be unmapped when prepare to switch to guest mode. If we try to kill the userspace VMM (e.g. qemu) for a CSV2 guest, it's possible that the mapped ghcb pages will never be unmapped which will cause memory leak. We exposed a serious memory leak by creating and killing multiple qemu processes for CSV2 guests frequently. In order to solve this issue, unmap ghcb pages if they're sill mapped when destroy CSV2 guest. Fixes: ce7ea0cfdc2e ("KVM: SVM: Move GHCB unmapping to fix RCU warning") Fixes: 291bd20d5d88 ("KVM: SVM: Add initial support for a VMGEXIT VMEXIT") Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Shirong Hao Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2975 --- arch/x86/kvm/svm/sev.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 6f567fcc9393..59163f97b553 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2674,6 +2674,9 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu) __free_page(virt_to_page(svm->sev_es.vmsa)); + if (svm->sev_es.ghcb) + kvm_vcpu_unmap(vcpu, &svm->sev_es.ghcb_map, false); + if (svm->sev_es.ghcb_sa_free) kvfree(svm->sev_es.ghcb_sa); -- Gitee From 4ab9f9cf781dc16722bb000d179c2b39a2080566 Mon Sep 17 00:00:00 2001 From: Cruz Zhao Date: Wed, 3 Apr 2024 11:11:35 +0800 Subject: [PATCH 0531/2138] anolis: sched: fix compile error when CONFIG_SCHED_CORE is disabled ANBZ: #8698 When CONFIG_SCHED_CORE is disabled, acpu_enabled and sysctl_sched_cfs_bw_burst_onset_percent cannot be found because they are defined under CONFIG_SCHED_CORE. This patch is to fix this problem. Fixes: aa1ad269896c("anolis: sched: introduce ACPU accounting") Fixes: 55a1c37487b1("anolis: sched/fair: Make CFS bandwidth controller burstable") Signed-off-by: Cruz Zhao Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2999 --- kernel/sched/core.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0df85c1161b7..e51210cfbd7f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -151,15 +151,23 @@ const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK; __read_mostly int scheduler_running; -#ifdef CONFIG_SCHED_CORE - -DEFINE_STATIC_KEY_FALSE(__sched_core_enabled); - #ifdef CONFIG_SCHED_ACPU DEFINE_STATIC_KEY_FALSE(acpu_enabled); unsigned int sysctl_sched_acpu_enabled; #endif +#ifdef CONFIG_CFS_BANDWIDTH +/* + * Percent of burst assigned to cfs_b->runtime on tg_set_cfs_bandwidth, + * 0 by default. + */ +unsigned int sysctl_sched_cfs_bw_burst_onset_percent; +#endif + +#ifdef CONFIG_SCHED_CORE + +DEFINE_STATIC_KEY_FALSE(__sched_core_enabled); + /* kernel prio, less is more */ static inline int __task_prio(const struct task_struct *p) { @@ -175,14 +183,6 @@ static inline int __task_prio(const struct task_struct *p) return MAX_RT_PRIO + MAX_NICE; /* 120, squash fair */ } -#ifdef CONFIG_CFS_BANDWIDTH -/* - * Percent of burst assigned to cfs_b->runtime on tg_set_cfs_bandwidth, - * 0 by default. - */ -unsigned int sysctl_sched_cfs_bw_burst_onset_percent; -#endif - /* * l(a,b) * le(a,b) := !l(b,a) -- Gitee From 1f512b2d318f45bde745ab1ff1cae6677404fa8c Mon Sep 17 00:00:00 2001 From: duanqiangwen Date: Wed, 6 Dec 2023 17:50:44 +0800 Subject: [PATCH 0532/2138] net: wangxun: fix changing mac failed when running ANBZ: #8484 commit 87e839c82cc36346a2cd183ca941316902110716 upstream. in some bonding mode, service need to change mac when netif is running. Wangxun netdev add IFF_LIVE_ADDR_CHANGE priv_flag to support it. Signed-off-by: duanqiangwen Link: https://lore.kernel.org/r/20231206095044.17844-1-duanqiangwen@net-swift.com Signed-off-by: Jakub Kicinski Reviewed-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/2984 --- drivers/net/ethernet/wangxun/ngbe/ngbe_main.c | 1 + drivers/net/ethernet/wangxun/txgbe/txgbe_main.c | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index a4d63d2f3c5b..2085b9c38a15 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -580,6 +580,7 @@ static int ngbe_probe(struct pci_dev *pdev, netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; + netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE; netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = WX_MAX_JUMBO_FRAME_SIZE - diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index d60c26ba0ba4..2482b661bc99 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -637,6 +637,7 @@ static int txgbe_probe(struct pci_dev *pdev, netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; + netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE; netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = WX_MAX_JUMBO_FRAME_SIZE - -- Gitee From 561f3f2c22ce4893ce47c334c9bb28cdf9de0d1f Mon Sep 17 00:00:00 2001 From: Jiawen Wu Date: Wed, 11 Oct 2023 17:19:04 +0800 Subject: [PATCH 0533/2138] net: libwx: support hardware statistics ANBZ: #8484 commit 46b92e10d631b6a2c06d151929e87f1d39d72b8a upstream Implement update and clear Rx/Tx statistics. Signed-off-by: Jiawen Wu Signed-off-by: Jakub Kicinski Link: https://lore.kernel.org/r/20231011091906.70486-2-jiawenwu@trustnetic.com Signed-off-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/2984 --- .../net/ethernet/wangxun/libwx/wx_ethtool.c | 169 ++++++++++++++++++ .../net/ethernet/wangxun/libwx/wx_ethtool.h | 8 + drivers/net/ethernet/wangxun/libwx/wx_hw.c | 99 ++++++++++ drivers/net/ethernet/wangxun/libwx/wx_hw.h | 2 + drivers/net/ethernet/wangxun/libwx/wx_lib.c | 20 ++- drivers/net/ethernet/wangxun/libwx/wx_type.h | 81 +++++++++ 6 files changed, 377 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c index 93cb6f2294e7..ddc5f6d20b9c 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c @@ -3,9 +3,171 @@ #include #include +#include #include "wx_type.h" #include "wx_ethtool.h" +#include "wx_hw.h" + +struct wx_stats { + char stat_string[ETH_GSTRING_LEN]; + size_t sizeof_stat; + off_t stat_offset; +}; + +#define WX_STAT(str, m) { \ + .stat_string = str, \ + .sizeof_stat = sizeof(((struct wx *)0)->m), \ + .stat_offset = offsetof(struct wx, m) } + +static const struct wx_stats wx_gstrings_stats[] = { + WX_STAT("rx_dma_pkts", stats.gprc), + WX_STAT("tx_dma_pkts", stats.gptc), + WX_STAT("rx_dma_bytes", stats.gorc), + WX_STAT("tx_dma_bytes", stats.gotc), + WX_STAT("rx_total_pkts", stats.tpr), + WX_STAT("tx_total_pkts", stats.tpt), + WX_STAT("rx_long_length_count", stats.roc), + WX_STAT("rx_short_length_count", stats.ruc), + WX_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + WX_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + WX_STAT("os2bmc_tx_by_host", stats.o2bspc), + WX_STAT("os2bmc_rx_by_host", stats.b2ogprc), + WX_STAT("rx_no_dma_resources", stats.rdmdrop), + WX_STAT("tx_busy", tx_busy), + WX_STAT("non_eop_descs", non_eop_descs), + WX_STAT("tx_restart_queue", restart_queue), + WX_STAT("rx_csum_offload_good_count", hw_csum_rx_good), + WX_STAT("rx_csum_offload_errors", hw_csum_rx_error), + WX_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), +}; + +/* drivers allocates num_tx_queues and num_rx_queues symmetrically so + * we set the num_rx_queues to evaluate to num_tx_queues. This is + * used because we do not have a good way to get the max number of + * rx queues with CONFIG_RPS disabled. + */ +#define WX_NUM_RX_QUEUES netdev->num_tx_queues +#define WX_NUM_TX_QUEUES netdev->num_tx_queues + +#define WX_QUEUE_STATS_LEN ( \ + (WX_NUM_TX_QUEUES + WX_NUM_RX_QUEUES) * \ + (sizeof(struct wx_queue_stats) / sizeof(u64))) +#define WX_GLOBAL_STATS_LEN ARRAY_SIZE(wx_gstrings_stats) +#define WX_STATS_LEN (WX_GLOBAL_STATS_LEN + WX_QUEUE_STATS_LEN) + +int wx_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return WX_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} +EXPORT_SYMBOL(wx_get_sset_count); + +void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < WX_GLOBAL_STATS_LEN; i++) + ethtool_sprintf(&p, wx_gstrings_stats[i].stat_string); + for (i = 0; i < netdev->num_tx_queues; i++) { + ethtool_sprintf(&p, "tx_queue_%u_packets", i); + ethtool_sprintf(&p, "tx_queue_%u_bytes", i); + } + for (i = 0; i < WX_NUM_RX_QUEUES; i++) { + ethtool_sprintf(&p, "rx_queue_%u_packets", i); + ethtool_sprintf(&p, "rx_queue_%u_bytes", i); + } + break; + } +} +EXPORT_SYMBOL(wx_get_strings); + +void wx_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct wx *wx = netdev_priv(netdev); + struct wx_ring *ring; + unsigned int start; + int i, j; + char *p; + + wx_update_stats(wx); + + for (i = 0; i < WX_GLOBAL_STATS_LEN; i++) { + p = (char *)wx + wx_gstrings_stats[i].stat_offset; + data[i] = (wx_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + for (j = 0; j < netdev->num_tx_queues; j++) { + ring = wx->tx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; + continue; + } + + do { + start = u64_stats_fetch_begin(&ring->syncp); + data[i] = ring->stats.packets; + data[i + 1] = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + i += 2; + } + for (j = 0; j < WX_NUM_RX_QUEUES; j++) { + ring = wx->rx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; + continue; + } + + do { + start = u64_stats_fetch_begin(&ring->syncp); + data[i] = ring->stats.packets; + data[i + 1] = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + i += 2; + } +} +EXPORT_SYMBOL(wx_get_ethtool_stats); + +void wx_get_mac_stats(struct net_device *netdev, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct wx *wx = netdev_priv(netdev); + struct wx_hw_stats *hwstats; + + wx_update_stats(wx); + + hwstats = &wx->stats; + mac_stats->MulticastFramesXmittedOK = hwstats->mptc; + mac_stats->BroadcastFramesXmittedOK = hwstats->bptc; + mac_stats->MulticastFramesReceivedOK = hwstats->mprc; + mac_stats->BroadcastFramesReceivedOK = hwstats->bprc; +} +EXPORT_SYMBOL(wx_get_mac_stats); + +void wx_get_pause_stats(struct net_device *netdev, + struct ethtool_pause_stats *stats) +{ + struct wx *wx = netdev_priv(netdev); + struct wx_hw_stats *hwstats; + + wx_update_stats(wx); + + hwstats = &wx->stats; + stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc; + stats->rx_pause_frames = hwstats->lxonoffrxc; +} +EXPORT_SYMBOL(wx_get_pause_stats); void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { @@ -14,5 +176,12 @@ void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) strscpy(info->driver, wx->driver_name, sizeof(info->driver)); strscpy(info->fw_version, wx->eeprom_id, sizeof(info->fw_version)); strscpy(info->bus_info, pci_name(wx->pdev), sizeof(info->bus_info)); + if (wx->num_tx_queues <= WX_NUM_TX_QUEUES) { + info->n_stats = WX_STATS_LEN - + (WX_NUM_TX_QUEUES - wx->num_tx_queues) * + (sizeof(struct wx_queue_stats) / sizeof(u64)) * 2; + } else { + info->n_stats = WX_STATS_LEN; + } } EXPORT_SYMBOL(wx_get_drvinfo); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h index e85538c69454..16d1a09369a6 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h @@ -4,5 +4,13 @@ #ifndef _WX_ETHTOOL_H_ #define _WX_ETHTOOL_H_ +int wx_get_sset_count(struct net_device *netdev, int sset); +void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data); +void wx_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data); +void wx_get_mac_stats(struct net_device *netdev, + struct ethtool_eth_mac_stats *mac_stats); +void wx_get_pause_stats(struct net_device *netdev, + struct ethtool_pause_stats *stats); void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info); #endif /* _WX_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index 52130df26aee..ec92e0023ff5 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -1911,6 +1911,105 @@ int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) } EXPORT_SYMBOL(wx_vlan_rx_kill_vid); +/** + * wx_update_stats - Update the board statistics counters. + * @wx: board private structure + **/ +void wx_update_stats(struct wx *wx) +{ + struct wx_hw_stats *hwstats = &wx->stats; + + u64 non_eop_descs = 0, alloc_rx_buff_failed = 0; + u64 hw_csum_rx_good = 0, hw_csum_rx_error = 0; + u64 restart_queue = 0, tx_busy = 0; + u32 i; + + /* gather some stats to the wx struct that are per queue */ + for (i = 0; i < wx->num_rx_queues; i++) { + struct wx_ring *rx_ring = wx->rx_ring[i]; + + non_eop_descs += rx_ring->rx_stats.non_eop_descs; + alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; + hw_csum_rx_good += rx_ring->rx_stats.csum_good_cnt; + hw_csum_rx_error += rx_ring->rx_stats.csum_err; + } + wx->non_eop_descs = non_eop_descs; + wx->alloc_rx_buff_failed = alloc_rx_buff_failed; + wx->hw_csum_rx_error = hw_csum_rx_error; + wx->hw_csum_rx_good = hw_csum_rx_good; + + for (i = 0; i < wx->num_tx_queues; i++) { + struct wx_ring *tx_ring = wx->tx_ring[i]; + + restart_queue += tx_ring->tx_stats.restart_queue; + tx_busy += tx_ring->tx_stats.tx_busy; + } + wx->restart_queue = restart_queue; + wx->tx_busy = tx_busy; + + hwstats->gprc += rd32(wx, WX_RDM_PKT_CNT); + hwstats->gptc += rd32(wx, WX_TDM_PKT_CNT); + hwstats->gorc += rd64(wx, WX_RDM_BYTE_CNT_LSB); + hwstats->gotc += rd64(wx, WX_TDM_BYTE_CNT_LSB); + hwstats->tpr += rd64(wx, WX_RX_FRAME_CNT_GOOD_BAD_L); + hwstats->tpt += rd64(wx, WX_TX_FRAME_CNT_GOOD_BAD_L); + hwstats->crcerrs += rd64(wx, WX_RX_CRC_ERROR_FRAMES_L); + hwstats->rlec += rd64(wx, WX_RX_LEN_ERROR_FRAMES_L); + hwstats->bprc += rd64(wx, WX_RX_BC_FRAMES_GOOD_L); + hwstats->bptc += rd64(wx, WX_TX_BC_FRAMES_GOOD_L); + hwstats->mprc += rd64(wx, WX_RX_MC_FRAMES_GOOD_L); + hwstats->mptc += rd64(wx, WX_TX_MC_FRAMES_GOOD_L); + hwstats->roc += rd32(wx, WX_RX_OVERSIZE_FRAMES_GOOD); + hwstats->ruc += rd32(wx, WX_RX_UNDERSIZE_FRAMES_GOOD); + hwstats->lxonoffrxc += rd32(wx, WX_MAC_LXONOFFRXC); + hwstats->lxontxc += rd32(wx, WX_RDB_LXONTXC); + hwstats->lxofftxc += rd32(wx, WX_RDB_LXOFFTXC); + hwstats->o2bgptc += rd32(wx, WX_TDM_OS2BMC_CNT); + hwstats->b2ospc += rd32(wx, WX_MNG_BMC2OS_CNT); + hwstats->o2bspc += rd32(wx, WX_MNG_OS2BMC_CNT); + hwstats->b2ogprc += rd32(wx, WX_RDM_BMC2OS_CNT); + hwstats->rdmdrop += rd32(wx, WX_RDM_DRP_PKT); + + for (i = 0; i < wx->mac.max_rx_queues; i++) + hwstats->qmprc += rd32(wx, WX_PX_MPRC(i)); +} +EXPORT_SYMBOL(wx_update_stats); + +/** + * wx_clear_hw_cntrs - Generic clear hardware counters + * @wx: board private structure + * + * Clears all hardware statistics counters by reading them from the hardware + * Statistics counters are clear on read. + **/ +void wx_clear_hw_cntrs(struct wx *wx) +{ + u16 i = 0; + + for (i = 0; i < wx->mac.max_rx_queues; i++) + wr32(wx, WX_PX_MPRC(i), 0); + + rd32(wx, WX_RDM_PKT_CNT); + rd32(wx, WX_TDM_PKT_CNT); + rd64(wx, WX_RDM_BYTE_CNT_LSB); + rd32(wx, WX_TDM_BYTE_CNT_LSB); + rd32(wx, WX_RDM_DRP_PKT); + rd32(wx, WX_RX_UNDERSIZE_FRAMES_GOOD); + rd32(wx, WX_RX_OVERSIZE_FRAMES_GOOD); + rd64(wx, WX_RX_FRAME_CNT_GOOD_BAD_L); + rd64(wx, WX_TX_FRAME_CNT_GOOD_BAD_L); + rd64(wx, WX_RX_MC_FRAMES_GOOD_L); + rd64(wx, WX_TX_MC_FRAMES_GOOD_L); + rd64(wx, WX_RX_BC_FRAMES_GOOD_L); + rd64(wx, WX_TX_BC_FRAMES_GOOD_L); + rd64(wx, WX_RX_CRC_ERROR_FRAMES_L); + rd64(wx, WX_RX_LEN_ERROR_FRAMES_L); + rd32(wx, WX_RDB_LXONTXC); + rd32(wx, WX_RDB_LXOFFTXC); + rd32(wx, WX_MAC_LXONOFFRXC); +} +EXPORT_SYMBOL(wx_clear_hw_cntrs); + /** * wx_start_hw - Prepare hardware for Tx/Rx * @wx: pointer to hardware structure diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h index 0b3447bc6f2f..d3a0c65ef3ef 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h @@ -34,5 +34,7 @@ int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count); int wx_sw_init(struct wx *wx); int wx_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid); int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid); +void wx_update_stats(struct wx *wx); +void wx_clear_hw_cntrs(struct wx *wx); #endif /* _WX_HW_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index c37500aa0637..eea9fc0df873 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -421,6 +421,7 @@ static bool wx_is_non_eop(struct wx_ring *rx_ring, return false; rx_ring->rx_buffer_info[ntc].skb = skb; + rx_ring->rx_stats.non_eop_descs++; return true; } @@ -654,6 +655,7 @@ static int wx_clean_rx_irq(struct wx_q_vector *q_vector, /* exit if we failed to retrieve a buffer */ if (!skb) { + rx_ring->rx_stats.alloc_rx_buff_failed++; break; } @@ -809,9 +811,11 @@ static bool wx_clean_tx_irq(struct wx_q_vector *q_vector, if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && - netif_running(tx_ring->netdev)) + netif_running(tx_ring->netdev)) { netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } } return !!budget; @@ -888,6 +892,7 @@ static int wx_maybe_stop_tx(struct wx_ring *tx_ring, u16 size) /* A reprieve! - use start_queue because it doesn't call schedule */ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; return 0; } @@ -1465,8 +1470,10 @@ static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb, count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)-> frags[f])); - if (wx_maybe_stop_tx(tx_ring, count + 3)) + if (wx_maybe_stop_tx(tx_ring, count + 3)) { + tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; + } /* record the location of the first descriptor for this packet */ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; @@ -2596,8 +2603,11 @@ void wx_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct wx *wx = netdev_priv(netdev); + struct wx_hw_stats *hwstats; int i; + wx_update_stats(wx); + rcu_read_lock(); for (i = 0; i < wx->num_rx_queues; i++) { struct wx_ring *ring = READ_ONCE(wx->rx_ring[i]); @@ -2633,6 +2643,12 @@ void wx_get_stats64(struct net_device *netdev, } rcu_read_unlock(); + + hwstats = &wx->stats; + stats->rx_errors = hwstats->crcerrs + hwstats->rlec; + stats->multicast = hwstats->qmprc; + stats->rx_length_errors = hwstats->rlec; + stats->rx_crc_errors = hwstats->crcerrs; } EXPORT_SYMBOL(wx_get_stats64); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index c555af9ed51b..5386a418d3bc 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -59,6 +59,25 @@ #define WX_TS_ALARM_ST_DALARM BIT(1) #define WX_TS_ALARM_ST_ALARM BIT(0) +/* statistic */ +#define WX_TX_FRAME_CNT_GOOD_BAD_L 0x1181C +#define WX_TX_BC_FRAMES_GOOD_L 0x11824 +#define WX_TX_MC_FRAMES_GOOD_L 0x1182C +#define WX_RX_FRAME_CNT_GOOD_BAD_L 0x11900 +#define WX_RX_BC_FRAMES_GOOD_L 0x11918 +#define WX_RX_MC_FRAMES_GOOD_L 0x11920 +#define WX_RX_CRC_ERROR_FRAMES_L 0x11928 +#define WX_RX_LEN_ERROR_FRAMES_L 0x11978 +#define WX_RX_UNDERSIZE_FRAMES_GOOD 0x11938 +#define WX_RX_OVERSIZE_FRAMES_GOOD 0x1193C +#define WX_MAC_LXONOFFRXC 0x11E0C + +/*********************** Receive DMA registers **************************/ +#define WX_RDM_DRP_PKT 0x12500 +#define WX_RDM_PKT_CNT 0x12504 +#define WX_RDM_BYTE_CNT_LSB 0x12508 +#define WX_RDM_BMC2OS_CNT 0x12510 + /************************* Port Registers ************************************/ /* port cfg Registers */ #define WX_CFG_PORT_CTL 0x14400 @@ -94,6 +113,9 @@ #define WX_TDM_CTL_TE BIT(0) /* Transmit Enable */ #define WX_TDM_PB_THRE(_i) (0x18020 + ((_i) * 4)) #define WX_TDM_RP_IDX 0x1820C +#define WX_TDM_PKT_CNT 0x18308 +#define WX_TDM_BYTE_CNT_LSB 0x1830C +#define WX_TDM_OS2BMC_CNT 0x18314 #define WX_TDM_RP_RATE 0x18404 /***************************** RDB registers *********************************/ @@ -106,6 +128,8 @@ /* statistic */ #define WX_RDB_PFCMACDAL 0x19210 #define WX_RDB_PFCMACDAH 0x19214 +#define WX_RDB_LXOFFTXC 0x19218 +#define WX_RDB_LXONTXC 0x1921C /* ring assignment */ #define WX_RDB_PL_CFG(_i) (0x19300 + ((_i) * 4)) #define WX_RDB_PL_CFG_L4HDR BIT(1) @@ -218,6 +242,8 @@ #define WX_MNG_MBOX_CTL 0x1E044 #define WX_MNG_MBOX_CTL_SWRDY BIT(0) #define WX_MNG_MBOX_CTL_FWRDY BIT(2) +#define WX_MNG_BMC2OS_CNT 0x1E090 +#define WX_MNG_OS2BMC_CNT 0x1E094 /************************************* ETH MAC *****************************/ #define WX_MAC_TX_CFG 0x11000 @@ -300,6 +326,7 @@ enum WX_MSCA_CMD_value { #define WX_PX_RR_WP(_i) (0x01008 + ((_i) * 0x40)) #define WX_PX_RR_RP(_i) (0x0100C + ((_i) * 0x40)) #define WX_PX_RR_CFG(_i) (0x01010 + ((_i) * 0x40)) +#define WX_PX_MPRC(_i) (0x01020 + ((_i) * 0x40)) /* PX_RR_CFG bit definitions */ #define WX_PX_RR_CFG_VLAN BIT(31) #define WX_PX_RR_CFG_SPLIT_MODE BIT(26) @@ -766,9 +793,16 @@ struct wx_queue_stats { u64 bytes; }; +struct wx_tx_queue_stats { + u64 restart_queue; + u64 tx_busy; +}; + struct wx_rx_queue_stats { + u64 non_eop_descs; u64 csum_good_cnt; u64 csum_err; + u64 alloc_rx_buff_failed; }; /* iterator for handling rings in ring container */ @@ -812,6 +846,7 @@ struct wx_ring { struct wx_queue_stats stats; struct u64_stats_sync syncp; union { + struct wx_tx_queue_stats tx_stats; struct wx_rx_queue_stats rx_stats; }; } ____cacheline_internodealigned_in_smp; @@ -843,6 +878,33 @@ enum wx_isb_idx { WX_ISB_MAX }; +/* Statistics counters collected by the MAC */ +struct wx_hw_stats { + u64 gprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 tpr; + u64 tpt; + u64 bprc; + u64 bptc; + u64 mprc; + u64 mptc; + u64 roc; + u64 ruc; + u64 lxonoffrxc; + u64 lxontxc; + u64 lxofftxc; + u64 o2bgptc; + u64 b2ospc; + u64 o2bspc; + u64 b2ogprc; + u64 rdmdrop; + u64 crcerrs; + u64 rlec; + u64 qmprc; +}; + struct wx { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; @@ -918,6 +980,14 @@ struct wx { u32 wol; u16 bd_number; + + struct wx_hw_stats stats; + u64 tx_busy; + u64 non_eop_descs; + u64 restart_queue; + u64 hw_csum_rx_good; + u64 hw_csum_rx_error; + u64 alloc_rx_buff_failed; }; #define WX_INTR_ALL (~0ULL) @@ -951,6 +1021,17 @@ wr32m(struct wx *wx, u32 reg, u32 mask, u32 field) wr32(wx, reg, val); } +static inline u64 +rd64(struct wx *wx, u32 reg) +{ + u64 lsb, msb; + + lsb = rd32(wx, reg); + msb = rd32(wx, reg + 4); + + return (lsb | msb << 32); +} + /* On some domestic CPU platforms, sometimes IO is not synchronized with * flushing memory, here use readl() to flush PCI read and write. */ -- Gitee From 259932c766d8a0f1623071f3615ce90f8f82e0cb Mon Sep 17 00:00:00 2001 From: Jiawen Wu Date: Tue, 12 Sep 2023 11:14:24 +0800 Subject: [PATCH 0534/2138] net: wangxun: move MDIO bus implementation to the library ANBZ: #8484 commit f557524029458ab7dd1c6077f35fa23fbb744356 upstream. Move similar code of accessing MDIO bus from txgbe/ngbe to libwx. Signed-off-by: Jiawen Wu Reviewed-by: Simon Horman Signed-off-by: Paolo Abeni Link: https://lore.kernel.org/r/20230912031424.721386-1-jiawenwu@trustnetic.com Signed-off-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/2984 --- drivers/net/ethernet/wangxun/libwx/wx_hw.c | 92 ++++++++++++++ drivers/net/ethernet/wangxun/libwx/wx_hw.h | 7 ++ drivers/net/ethernet/wangxun/libwx/wx_type.h | 1 + drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c | 119 +----------------- drivers/net/ethernet/wangxun/ngbe/ngbe_type.h | 3 - .../net/ethernet/wangxun/txgbe/txgbe_phy.c | 56 +-------- 6 files changed, 106 insertions(+), 172 deletions(-) diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index ec92e0023ff5..533e912af089 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -12,6 +12,98 @@ #include "wx_lib.h" #include "wx_hw.h" +static int wx_phy_read_reg_mdi(struct mii_bus *bus, int phy_addr, int devnum, int regnum) +{ + struct wx *wx = bus->priv; + u32 command, val; + int ret; + + /* setup and write the address cycle command */ + command = WX_MSCA_RA(regnum) | + WX_MSCA_PA(phy_addr) | + WX_MSCA_DA(devnum); + wr32(wx, WX_MSCA, command); + + command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | WX_MSCC_BUSY; + if (wx->mac.type == wx_mac_em) + command |= WX_MDIO_CLK(6); + wr32(wx, WX_MSCC, command); + + /* wait to complete */ + ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, + 100000, false, wx, WX_MSCC); + if (ret) { + wx_err(wx, "Mdio read c22 command did not complete.\n"); + return ret; + } + + return (u16)rd32(wx, WX_MSCC); +} + +static int wx_phy_write_reg_mdi(struct mii_bus *bus, int phy_addr, + int devnum, int regnum, u16 value) +{ + struct wx *wx = bus->priv; + u32 command, val; + int ret; + + /* setup and write the address cycle command */ + command = WX_MSCA_RA(regnum) | + WX_MSCA_PA(phy_addr) | + WX_MSCA_DA(devnum); + wr32(wx, WX_MSCA, command); + + command = value | WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | WX_MSCC_BUSY; + if (wx->mac.type == wx_mac_em) + command |= WX_MDIO_CLK(6); + wr32(wx, WX_MSCC, command); + + /* wait to complete */ + ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, + 100000, false, wx, WX_MSCC); + if (ret) + wx_err(wx, "Mdio write c22 command did not complete.\n"); + + return ret; +} + +int wx_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum) +{ + struct wx *wx = bus->priv; + + wr32(wx, WX_MDIO_CLAUSE_SELECT, 0xF); + return wx_phy_read_reg_mdi(bus, phy_addr, 0, regnum); +} +EXPORT_SYMBOL(wx_phy_read_reg_mdi_c22); + +int wx_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value) +{ + struct wx *wx = bus->priv; + + wr32(wx, WX_MDIO_CLAUSE_SELECT, 0xF); + return wx_phy_write_reg_mdi(bus, phy_addr, 0, regnum, value); +} +EXPORT_SYMBOL(wx_phy_write_reg_mdi_c22); + +int wx_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum) +{ + struct wx *wx = bus->priv; + + wr32(wx, WX_MDIO_CLAUSE_SELECT, 0); + return wx_phy_read_reg_mdi(bus, phy_addr, devnum, regnum); +} +EXPORT_SYMBOL(wx_phy_read_reg_mdi_c45); + +int wx_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr, + int devnum, int regnum, u16 value) +{ + struct wx *wx = bus->priv; + + wr32(wx, WX_MDIO_CLAUSE_SELECT, 0); + return wx_phy_write_reg_mdi(bus, phy_addr, devnum, regnum, value); +} +EXPORT_SYMBOL(wx_phy_write_reg_mdi_c45); + static void wx_intr_disable(struct wx *wx, u64 qmask) { u32 mask; diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h index d3a0c65ef3ef..12c20a7c364d 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h @@ -4,6 +4,13 @@ #ifndef _WX_HW_H_ #define _WX_HW_H_ +#include + +int wx_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum); +int wx_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value); +int wx_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum); +int wx_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr, + int devnum, int regnum, u16 value); void wx_intr_enable(struct wx *wx, u64 qmask); void wx_irq_disable(struct wx *wx); int wx_check_flash_load(struct wx *wx, u32 check_bit); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 5386a418d3bc..83f9bb7b3c22 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -277,6 +277,7 @@ enum WX_MSCA_CMD_value { #define WX_MSCC_SADDR BIT(18) #define WX_MSCC_BUSY BIT(22) #define WX_MDIO_CLK(v) FIELD_PREP(GENMASK(21, 19), v) +#define WX_MDIO_CLAUSE_SELECT 0x11220 #define WX_MMC_CONTROL 0x11800 #define WX_MMC_CONTROL_RSTONRD BIT(2) /* reset on read */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c index 5007addd119a..2afae24c0c69 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c @@ -29,117 +29,6 @@ static int ngbe_phy_write_reg_internal(struct mii_bus *bus, int phy_addr, int re return 0; } -static int ngbe_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum) -{ - u32 command, val, device_type = 0; - struct wx *wx = bus->priv; - int ret; - - wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0xF); - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(device_type); - wr32(wx, WX_MSCA, command); - command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | - WX_MSCC_BUSY | - WX_MDIO_CLK(6); - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) { - wx_err(wx, "Mdio read c22 command did not complete.\n"); - return ret; - } - - return (u16)rd32(wx, WX_MSCC); -} - -static int ngbe_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value) -{ - u32 command, val, device_type = 0; - struct wx *wx = bus->priv; - int ret; - - wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0xF); - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(device_type); - wr32(wx, WX_MSCA, command); - command = value | - WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | - WX_MSCC_BUSY | - WX_MDIO_CLK(6); - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) - wx_err(wx, "Mdio write c22 command did not complete.\n"); - - return ret; -} - -static int ngbe_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum) -{ - struct wx *wx = bus->priv; - u32 val, command; - int ret; - - wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0x0); - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(devnum); - wr32(wx, WX_MSCA, command); - command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | - WX_MSCC_BUSY | - WX_MDIO_CLK(6); - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) { - wx_err(wx, "Mdio read c45 command did not complete.\n"); - return ret; - } - - return (u16)rd32(wx, WX_MSCC); -} - -static int ngbe_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr, - int devnum, int regnum, u16 value) -{ - struct wx *wx = bus->priv; - int ret, command; - u16 val; - - wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0x0); - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(devnum); - wr32(wx, WX_MSCA, command); - command = value | - WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | - WX_MSCC_BUSY | - WX_MDIO_CLK(6); - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) - wx_err(wx, "Mdio write c45 command did not complete.\n"); - - return ret; -} - static int ngbe_phy_read_reg_c22(struct mii_bus *bus, int phy_addr, int regnum) { struct wx *wx = bus->priv; @@ -148,7 +37,7 @@ static int ngbe_phy_read_reg_c22(struct mii_bus *bus, int phy_addr, int regnum) if (wx->mac_type == em_mac_type_mdi) phy_data = ngbe_phy_read_reg_internal(bus, phy_addr, regnum); else - phy_data = ngbe_phy_read_reg_mdi_c22(bus, phy_addr, regnum); + phy_data = wx_phy_read_reg_mdi_c22(bus, phy_addr, regnum); return phy_data; } @@ -162,7 +51,7 @@ static int ngbe_phy_write_reg_c22(struct mii_bus *bus, int phy_addr, if (wx->mac_type == em_mac_type_mdi) ret = ngbe_phy_write_reg_internal(bus, phy_addr, regnum, value); else - ret = ngbe_phy_write_reg_mdi_c22(bus, phy_addr, regnum, value); + ret = wx_phy_write_reg_mdi_c22(bus, phy_addr, regnum, value); return ret; } @@ -266,8 +155,8 @@ int ngbe_mdio_init(struct wx *wx) mii_bus->priv = wx; if (wx->mac_type == em_mac_type_rgmii) { - mii_bus->read_c45 = ngbe_phy_read_reg_mdi_c45; - mii_bus->write_c45 = ngbe_phy_write_reg_mdi_c45; + mii_bus->read_c45 = wx_phy_read_reg_mdi_c45; + mii_bus->write_c45 = wx_phy_write_reg_mdi_c45; } snprintf(mii_bus->id, MII_BUS_ID_SIZE, "ngbe-%x", pci_dev_id(pdev)); diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h index 72c8cd2d5575..ff754d69bdf6 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h @@ -59,9 +59,6 @@ #define NGBE_EEPROM_VERSION_L 0x1D #define NGBE_EEPROM_VERSION_H 0x1E -/* Media-dependent registers. */ -#define NGBE_MDIO_CLAUSE_SELECT 0x11220 - /* GPIO Registers */ #define NGBE_GPIO_DR 0x14800 #define NGBE_GPIO_DDR 0x14804 diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c index 4159c84035fd..b6c06adb8656 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -647,58 +647,6 @@ static int txgbe_sfp_register(struct txgbe *txgbe) return 0; } -static int txgbe_phy_read(struct mii_bus *bus, int phy_addr, - int devnum, int regnum) -{ - struct wx *wx = bus->priv; - u32 val, command; - int ret; - - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(devnum); - wr32(wx, WX_MSCA, command); - - command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | WX_MSCC_BUSY; - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) { - wx_err(wx, "Mdio read c45 command did not complete.\n"); - return ret; - } - - return (u16)rd32(wx, WX_MSCC); -} - -static int txgbe_phy_write(struct mii_bus *bus, int phy_addr, - int devnum, int regnum, u16 value) -{ - struct wx *wx = bus->priv; - int ret, command; - u16 val; - - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(devnum); - wr32(wx, WX_MSCA, command); - - command = value | WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | WX_MSCC_BUSY; - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) - wx_err(wx, "Mdio write c45 command did not complete.\n"); - - return ret; -} - static int txgbe_ext_phy_init(struct txgbe *txgbe) { struct phy_device *phydev; @@ -715,8 +663,8 @@ static int txgbe_ext_phy_init(struct txgbe *txgbe) return -ENOMEM; mii_bus->name = "txgbe_mii_bus"; - mii_bus->read_c45 = &txgbe_phy_read; - mii_bus->write_c45 = &txgbe_phy_write; + mii_bus->read_c45 = &wx_phy_read_reg_mdi_c45; + mii_bus->write_c45 = &wx_phy_write_reg_mdi_c45; mii_bus->parent = &pdev->dev; mii_bus->phy_mask = GENMASK(31, 1); mii_bus->priv = wx; -- Gitee From e1d9fd18eda2c4a5ebfd80751a8efbd15ba18444 Mon Sep 17 00:00:00 2001 From: Jiawen Wu Date: Wed, 11 Oct 2023 17:19:05 +0800 Subject: [PATCH 0535/2138] net: txgbe: add ethtool stats support ANBZ: #8484 commit 9224ade6539096585d35378fe2817b10b2bd7dc5 upstream. Support to show ethtool statistics. Signed-off-by: Jiawen Wu Signed-off-by: Jakub Kicinski Link: https://lore.kernel.org/r/20231011091906.70486-3-jiawenwu@trustnetic.com Signed-off-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/2984 --- drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c | 5 +++++ drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c | 2 ++ drivers/net/ethernet/wangxun/txgbe/txgbe_main.c | 2 ++ 3 files changed, 9 insertions(+) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index 859da112586a..3f336a088e43 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -39,6 +39,11 @@ static const struct ethtool_ops txgbe_ethtool_ops = { .get_link = ethtool_op_get_link, .get_link_ksettings = txgbe_get_link_ksettings, .set_link_ksettings = txgbe_set_link_ksettings, + .get_sset_count = wx_get_sset_count, + .get_strings = wx_get_strings, + .get_ethtool_stats = wx_get_ethtool_stats, + .get_eth_mac_stats = wx_get_mac_stats, + .get_pause_stats = wx_get_pause_stats, }; void txgbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c index 372745250270..474d55524e82 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c @@ -306,6 +306,8 @@ int txgbe_reset_hw(struct wx *wx) txgbe_reset_misc(wx); + wx_clear_hw_cntrs(wx); + /* Store the permanent mac address */ wx_get_mac_addr(wx, wx->mac.perm_addr); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index 2482b661bc99..15fe3725670c 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -286,6 +286,8 @@ static void txgbe_disable_device(struct wx *wx) /* Disable the Tx DMA engine */ wr32m(wx, WX_TDM_CTL, WX_TDM_CTL_TE, 0); + + wx_update_stats(wx); } static void txgbe_down(struct wx *wx) -- Gitee From e6ab3e6e509184b8a6569200802c69cc8debdbfe Mon Sep 17 00:00:00 2001 From: Jiawen Wu Date: Wed, 11 Oct 2023 17:19:06 +0800 Subject: [PATCH 0536/2138] net: ngbe: add ethtool stats support ANBZ: #8484 commit 0a2714d5e2d3bf57f42d2ee58a04416a42f84f89 upstream. Support to show ethtool statistics. Signed-off-by: Jiawen Wu Signed-off-by: Jakub Kicinski Link: https://lore.kernel.org/r/20231011091906.70486-4-jiawenwu@trustnetic.com Signed-off-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/2984 --- drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c | 5 +++++ drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c | 2 ++ drivers/net/ethernet/wangxun/ngbe/ngbe_main.c | 2 ++ 3 files changed, 9 insertions(+) diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c index ec0e869e9aac..afbdf6919071 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c @@ -49,6 +49,11 @@ static const struct ethtool_ops ngbe_ethtool_ops = { .nway_reset = phy_ethtool_nway_reset, .get_wol = ngbe_get_wol, .set_wol = ngbe_set_wol, + .get_sset_count = wx_get_sset_count, + .get_strings = wx_get_strings, + .get_ethtool_stats = wx_get_ethtool_stats, + .get_eth_mac_stats = wx_get_mac_stats, + .get_pause_stats = wx_get_pause_stats, }; void ngbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c index 6562a2de9527..6459bc1d7c22 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c @@ -85,6 +85,8 @@ int ngbe_reset_hw(struct wx *wx) } ngbe_reset_misc(wx); + wx_clear_hw_cntrs(wx); + /* Store the permanent mac address */ wx_get_mac_addr(wx, wx->mac.perm_addr); diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index 2085b9c38a15..c2a2b6ef42dc 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -330,6 +330,8 @@ static void ngbe_disable_device(struct wx *wx) wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH); } + + wx_update_stats(wx); } static void ngbe_down(struct wx *wx) -- Gitee From 4947f6662df86e466820ce2ab5e830b348b95e4a Mon Sep 17 00:00:00 2001 From: Jiawen Wu Date: Tue, 17 Oct 2023 18:06:35 +0800 Subject: [PATCH 0537/2138] net: wangxun: remove redundant kernel log ANBZ: #8484 commit 48e44287c6537e736baa2e1d7be520d6ec91840a upstream. Since PBA info can be read from lspci, delete txgbe_read_pba_string() and the prints. In addition, delete the redundant MAC address printing. Signed-off-by: Jiawen Wu Reviewed-by: Simon Horman Reviewed-by: Andrew Lunn Signed-off-by: Jakub Kicinski Link: https://lore.kernel.org/r/20231017100635.154967-1-jiawenwu@trustnetic.com Signed-off-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/2984 --- drivers/net/ethernet/wangxun/ngbe/ngbe_main.c | 5 - drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c | 108 ------------------ drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h | 1 - .../net/ethernet/wangxun/txgbe/txgbe_main.c | 8 -- .../net/ethernet/wangxun/txgbe/txgbe_type.h | 6 - 5 files changed, 128 deletions(-) diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index c2a2b6ef42dc..a5c623fd023e 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -678,11 +678,6 @@ static int ngbe_probe(struct pci_dev *pdev, pci_set_drvdata(pdev, wx); - netif_info(wx, probe, netdev, - "PHY: %s, PBA No: Wang Xun GbE Family Controller\n", - wx->mac_type == em_mac_type_mdi ? "Internal" : "External"); - netif_info(wx, probe, netdev, "%pM\n", netdev->dev_addr); - return 0; err_register: diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c index 474d55524e82..d6b2b3c781b6 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c @@ -70,114 +70,6 @@ static void txgbe_init_thermal_sensor_thresh(struct wx *wx) wr32(wx, WX_TS_DALARM_THRE, 614); } -/** - * txgbe_read_pba_string - Reads part number string from EEPROM - * @wx: pointer to hardware structure - * @pba_num: stores the part number string from the EEPROM - * @pba_num_size: part number string buffer length - * - * Reads the part number string from the EEPROM. - **/ -int txgbe_read_pba_string(struct wx *wx, u8 *pba_num, u32 pba_num_size) -{ - u16 pba_ptr, offset, length, data; - int ret_val; - - if (!pba_num) { - wx_err(wx, "PBA string buffer was null\n"); - return -EINVAL; - } - - ret_val = wx_read_ee_hostif(wx, - wx->eeprom.sw_region_offset + TXGBE_PBANUM0_PTR, - &data); - if (ret_val != 0) { - wx_err(wx, "NVM Read Error\n"); - return ret_val; - } - - ret_val = wx_read_ee_hostif(wx, - wx->eeprom.sw_region_offset + TXGBE_PBANUM1_PTR, - &pba_ptr); - if (ret_val != 0) { - wx_err(wx, "NVM Read Error\n"); - return ret_val; - } - - /* if data is not ptr guard the PBA must be in legacy format which - * means pba_ptr is actually our second data word for the PBA number - * and we can decode it into an ascii string - */ - if (data != TXGBE_PBANUM_PTR_GUARD) { - wx_err(wx, "NVM PBA number is not stored as string\n"); - - /* we will need 11 characters to store the PBA */ - if (pba_num_size < 11) { - wx_err(wx, "PBA string buffer too small\n"); - return -ENOMEM; - } - - /* extract hex string from data and pba_ptr */ - pba_num[0] = (data >> 12) & 0xF; - pba_num[1] = (data >> 8) & 0xF; - pba_num[2] = (data >> 4) & 0xF; - pba_num[3] = data & 0xF; - pba_num[4] = (pba_ptr >> 12) & 0xF; - pba_num[5] = (pba_ptr >> 8) & 0xF; - pba_num[6] = '-'; - pba_num[7] = 0; - pba_num[8] = (pba_ptr >> 4) & 0xF; - pba_num[9] = pba_ptr & 0xF; - - /* put a null character on the end of our string */ - pba_num[10] = '\0'; - - /* switch all the data but the '-' to hex char */ - for (offset = 0; offset < 10; offset++) { - if (pba_num[offset] < 0xA) - pba_num[offset] += '0'; - else if (pba_num[offset] < 0x10) - pba_num[offset] += 'A' - 0xA; - } - - return 0; - } - - ret_val = wx_read_ee_hostif(wx, pba_ptr, &length); - if (ret_val != 0) { - wx_err(wx, "NVM Read Error\n"); - return ret_val; - } - - if (length == 0xFFFF || length == 0) { - wx_err(wx, "NVM PBA number section invalid length\n"); - return -EINVAL; - } - - /* check if pba_num buffer is big enough */ - if (pba_num_size < (((u32)length * 2) - 1)) { - wx_err(wx, "PBA string buffer too small\n"); - return -ENOMEM; - } - - /* trim pba length from start of string */ - pba_ptr++; - length--; - - for (offset = 0; offset < length; offset++) { - ret_val = wx_read_ee_hostif(wx, pba_ptr + offset, &data); - if (ret_val != 0) { - wx_err(wx, "NVM Read Error\n"); - return ret_val; - } - pba_num[offset * 2] = (u8)(data >> 8); - pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); - } - pba_num[offset * 2] = '\0'; - - return 0; -} - /** * txgbe_calc_eeprom_checksum - Calculates and returns the checksum * @wx: pointer to hardware structure diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h index abc729eb187a..1f3ecf60e3c4 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h @@ -6,7 +6,6 @@ int txgbe_disable_sec_tx_path(struct wx *wx); void txgbe_enable_sec_tx_path(struct wx *wx); -int txgbe_read_pba_string(struct wx *wx, u8 *pba_num, u32 pba_num_size); int txgbe_validate_eeprom_checksum(struct wx *wx, u16 *checksum_val); int txgbe_reset_hw(struct wx *wx); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index 15fe3725670c..a78da2309db5 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -538,7 +538,6 @@ static int txgbe_probe(struct pci_dev *pdev, u16 eeprom_verh = 0, eeprom_verl = 0, offset = 0; u16 eeprom_cfg_blkh = 0, eeprom_cfg_blkl = 0; u16 build = 0, major = 0, patch = 0; - u8 part_str[TXGBE_PBANUM_LENGTH]; u32 etrack_id = 0; err = pci_enable_device_mem(pdev); @@ -737,13 +736,6 @@ static int txgbe_probe(struct pci_dev *pdev, else dev_warn(&pdev->dev, "Failed to enumerate PF devices.\n"); - /* First try to read PBA as a string */ - err = txgbe_read_pba_string(wx, part_str, TXGBE_PBANUM_LENGTH); - if (err) - strncpy(part_str, "Unknown", TXGBE_PBANUM_LENGTH); - - netif_info(wx, probe, netdev, "%pM\n", netdev->dev_addr); - return 0; err_remove_phy: diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index 51199c355f95..3ba9ce43f394 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -88,9 +88,6 @@ #define TXGBE_XPCS_IDA_ADDR 0x13000 #define TXGBE_XPCS_IDA_DATA 0x13004 -/* Part Number String Length */ -#define TXGBE_PBANUM_LENGTH 32 - /* Checksum and EEPROM pointers */ #define TXGBE_EEPROM_LAST_WORD 0x800 #define TXGBE_EEPROM_CHECKSUM 0x2F @@ -98,9 +95,6 @@ #define TXGBE_EEPROM_VERSION_L 0x1D #define TXGBE_EEPROM_VERSION_H 0x1E #define TXGBE_ISCSI_BOOT_CONFIG 0x07 -#define TXGBE_PBANUM0_PTR 0x05 -#define TXGBE_PBANUM1_PTR 0x06 -#define TXGBE_PBANUM_PTR_GUARD 0xFAFA #define TXGBE_MAX_MSIX_VECTORS 64 #define TXGBE_MAX_FDIR_INDICES 63 -- Gitee From 29686e45635e514ace226ffe182ef8572fd4d96b Mon Sep 17 00:00:00 2001 From: Jiawen Wu Date: Wed, 3 Jan 2024 10:08:47 +0800 Subject: [PATCH 0538/2138] net: libwx: add phylink to libwx ANBZ: #8484 commit e8e138cf7383cf820419fcbec63992e75a01467b upstream. For the following implementation, add struct phylink and phylink_config to wx structure. Add the helper function for converting phylink to wx, implement ethtool ksetting and nway reset in libwx. Signed-off-by: Jiawen Wu Reviewed-by: Russell King (Oracle) Signed-off-by: David S. Miller Link: https://lore.kernel.org/all/20240103020854.1656604-2-jiawenwu@trustnetic.com Signed-off-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/2984 --- .../net/ethernet/wangxun/libwx/wx_ethtool.c | 26 +++++++++++++++++++ .../net/ethernet/wangxun/libwx/wx_ethtool.h | 5 ++++ drivers/net/ethernet/wangxun/libwx/wx_type.h | 8 ++++++ 3 files changed, 39 insertions(+) diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c index ddc5f6d20b9c..12feb8a5ee75 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c @@ -185,3 +185,29 @@ void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) } } EXPORT_SYMBOL(wx_get_drvinfo); + +int wx_nway_reset(struct net_device *netdev) +{ + struct wx *wx = netdev_priv(netdev); + + return phylink_ethtool_nway_reset(wx->phylink); +} +EXPORT_SYMBOL(wx_nway_reset); + +int wx_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct wx *wx = netdev_priv(netdev); + + return phylink_ethtool_ksettings_get(wx->phylink, cmd); +} +EXPORT_SYMBOL(wx_get_link_ksettings); + +int wx_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct wx *wx = netdev_priv(netdev); + + return phylink_ethtool_ksettings_set(wx->phylink, cmd); +} +EXPORT_SYMBOL(wx_set_link_ksettings); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h index 16d1a09369a6..f15cc445ae0f 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h @@ -13,4 +13,9 @@ void wx_get_mac_stats(struct net_device *netdev, void wx_get_pause_stats(struct net_device *netdev, struct ethtool_pause_stats *stats); void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info); +int wx_nway_reset(struct net_device *netdev); +int wx_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd); +int wx_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd); #endif /* _WX_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 83f9bb7b3c22..5b064c434053 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -7,6 +7,7 @@ #include #include #include +#include #include #define WX_NCSI_SUP 0x8000 @@ -939,6 +940,8 @@ struct wx { int speed; int duplex; struct phy_device *phydev; + struct phylink *phylink; + struct phylink_config phylink_config; bool wol_hw_supported; bool ncsi_enabled; @@ -1044,4 +1047,9 @@ rd64(struct wx *wx, u32 reg) #define wx_dbg(wx, fmt, arg...) \ dev_dbg(&(wx)->pdev->dev, fmt, ##arg) +static inline struct wx *phylink_to_wx(struct phylink_config *config) +{ + return container_of(config, struct wx, phylink_config); +} + #endif /* _WX_TYPE_H_ */ -- Gitee From f995c73cdd6073d7084fc46402e8b1e0f9faa008 Mon Sep 17 00:00:00 2001 From: Jiawen Wu Date: Wed, 3 Jan 2024 10:08:48 +0800 Subject: [PATCH 0539/2138] net: txgbe: use phylink bits added in libwx ANBZ: #8484 commit 4491c602fe5f3a248cc8a2ed4180aacdc2162365 upstream. Convert txgbe to use phylink and phylink_config added in libwx. Signed-off-by: Jiawen Wu Reviewed-by: Russell King (Oracle) Signed-off-by: David S. Miller Link: https://lore.kernel.org/all/20231222101639.1499997-3-jiawenwu@trustnetic.com Signed-off-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/2984 --- .../ethernet/wangxun/txgbe/txgbe_ethtool.c | 29 ++----------- .../net/ethernet/wangxun/txgbe/txgbe_main.c | 8 +--- .../net/ethernet/wangxun/txgbe/txgbe_phy.c | 43 +++++++++---------- .../net/ethernet/wangxun/txgbe/txgbe_type.h | 8 ---- 4 files changed, 26 insertions(+), 62 deletions(-) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index 3f336a088e43..60f351a3b89d 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -10,35 +10,12 @@ #include "txgbe_type.h" #include "txgbe_ethtool.h" -static int txgbe_nway_reset(struct net_device *netdev) -{ - struct txgbe *txgbe = netdev_to_txgbe(netdev); - - return phylink_ethtool_nway_reset(txgbe->phylink); -} - -static int txgbe_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *cmd) -{ - struct txgbe *txgbe = netdev_to_txgbe(netdev); - - return phylink_ethtool_ksettings_get(txgbe->phylink, cmd); -} - -static int txgbe_set_link_ksettings(struct net_device *netdev, - const struct ethtool_link_ksettings *cmd) -{ - struct txgbe *txgbe = netdev_to_txgbe(netdev); - - return phylink_ethtool_ksettings_set(txgbe->phylink, cmd); -} - static const struct ethtool_ops txgbe_ethtool_ops = { .get_drvinfo = wx_get_drvinfo, - .nway_reset = txgbe_nway_reset, + .nway_reset = wx_nway_reset, .get_link = ethtool_op_get_link, - .get_link_ksettings = txgbe_get_link_ksettings, - .set_link_ksettings = txgbe_set_link_ksettings, + .get_link_ksettings = wx_get_link_ksettings, + .set_link_ksettings = wx_set_link_ksettings, .get_sset_count = wx_get_sset_count, .get_strings = wx_get_strings, .get_ethtool_stats = wx_get_ethtool_stats, diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index a78da2309db5..1007ae2541ce 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -206,7 +206,6 @@ static int txgbe_request_irq(struct wx *wx) static void txgbe_up_complete(struct wx *wx) { struct net_device *netdev = wx->netdev; - struct txgbe *txgbe; wx_control_hw(wx, true); wx_configure_vectors(wx); @@ -215,8 +214,7 @@ static void txgbe_up_complete(struct wx *wx) smp_mb__before_atomic(); wx_napi_enable_all(wx); - txgbe = netdev_to_txgbe(netdev); - phylink_start(txgbe->phylink); + phylink_start(wx->phylink); /* clear any pending interrupts, may auto mask */ rd32(wx, WX_PX_IC(0)); @@ -292,11 +290,9 @@ static void txgbe_disable_device(struct wx *wx) static void txgbe_down(struct wx *wx) { - struct txgbe *txgbe = netdev_to_txgbe(wx->netdev); - txgbe_disable_device(wx); txgbe_reset(wx); - phylink_stop(txgbe->phylink); + phylink_stop(wx->phylink); wx_clean_all_tx_rings(wx); wx_clean_all_rx_rings(wx); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c index b6c06adb8656..3c0524d19866 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -159,7 +159,8 @@ static int txgbe_mdio_pcs_init(struct txgbe *txgbe) static struct phylink_pcs *txgbe_phylink_mac_select(struct phylink_config *config, phy_interface_t interface) { - struct txgbe *txgbe = netdev_to_txgbe(to_net_dev(config->dev)); + struct wx *wx = phylink_to_wx(config); + struct txgbe *txgbe = wx->priv; if (interface == PHY_INTERFACE_MODE_10GBASER) return &txgbe->xpcs->pcs; @@ -175,7 +176,7 @@ static void txgbe_mac_config(struct phylink_config *config, unsigned int mode, static void txgbe_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { - struct wx *wx = netdev_priv(to_net_dev(config->dev)); + struct wx *wx = phylink_to_wx(config); wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0); } @@ -186,7 +187,7 @@ static void txgbe_mac_link_up(struct phylink_config *config, int speed, int duplex, bool tx_pause, bool rx_pause) { - struct wx *wx = netdev_priv(to_net_dev(config->dev)); + struct wx *wx = phylink_to_wx(config); u32 txcfg, wdg; txcfg = rd32(wx, WX_MAC_TX_CFG); @@ -217,7 +218,7 @@ static void txgbe_mac_link_up(struct phylink_config *config, static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { - struct wx *wx = netdev_priv(to_net_dev(config->dev)); + struct wx *wx = phylink_to_wx(config); wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0); wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, 0); @@ -228,7 +229,7 @@ static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode, static int txgbe_mac_finish(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { - struct wx *wx = netdev_priv(to_net_dev(config->dev)); + struct wx *wx = phylink_to_wx(config); txgbe_enable_sec_tx_path(wx); wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE); @@ -253,10 +254,7 @@ static int txgbe_phylink_init(struct txgbe *txgbe) phy_interface_t phy_mode; struct phylink *phylink; - config = devm_kzalloc(&wx->pdev->dev, sizeof(*config), GFP_KERNEL); - if (!config) - return -ENOMEM; - + config = &wx->phylink_config; config->dev = &wx->netdev->dev; config->type = PHYLINK_NETDEV; config->mac_capabilities = MAC_10000FD | MAC_1000FD | MAC_100FD | @@ -287,7 +285,7 @@ static int txgbe_phylink_init(struct txgbe *txgbe) } } - txgbe->phylink = phylink; + wx->phylink = phylink; return 0; } @@ -483,7 +481,7 @@ static void txgbe_irq_handler(struct irq_desc *desc) TXGBE_PX_MISC_ETH_AN)) { u32 reg = rd32(wx, TXGBE_CFG_PORT_ST); - phylink_mac_change(txgbe->phylink, !!(reg & TXGBE_CFG_PORT_ST_LINK_UP)); + phylink_mac_change(wx->phylink, !!(reg & TXGBE_CFG_PORT_ST_LINK_UP)); } /* unmask interrupt */ @@ -701,6 +699,7 @@ static int txgbe_ext_phy_init(struct txgbe *txgbe) int txgbe_init_phy(struct txgbe *txgbe) { + struct wx *wx = txgbe->wx; int ret; if (txgbe->wx->media_type == sp_media_copper) @@ -708,43 +707,43 @@ int txgbe_init_phy(struct txgbe *txgbe) ret = txgbe_swnodes_register(txgbe); if (ret) { - wx_err(txgbe->wx, "failed to register software nodes\n"); + wx_err(wx, "failed to register software nodes\n"); return ret; } ret = txgbe_mdio_pcs_init(txgbe); if (ret) { - wx_err(txgbe->wx, "failed to init mdio pcs: %d\n", ret); + wx_err(wx, "failed to init mdio pcs: %d\n", ret); goto err_unregister_swnode; } ret = txgbe_phylink_init(txgbe); if (ret) { - wx_err(txgbe->wx, "failed to init phylink\n"); + wx_err(wx, "failed to init phylink\n"); goto err_destroy_xpcs; } ret = txgbe_gpio_init(txgbe); if (ret) { - wx_err(txgbe->wx, "failed to init gpio\n"); + wx_err(wx, "failed to init gpio\n"); goto err_destroy_phylink; } ret = txgbe_clock_register(txgbe); if (ret) { - wx_err(txgbe->wx, "failed to register clock: %d\n", ret); + wx_err(wx, "failed to register clock: %d\n", ret); goto err_destroy_phylink; } ret = txgbe_i2c_register(txgbe); if (ret) { - wx_err(txgbe->wx, "failed to init i2c interface: %d\n", ret); + wx_err(wx, "failed to init i2c interface: %d\n", ret); goto err_unregister_clk; } ret = txgbe_sfp_register(txgbe); if (ret) { - wx_err(txgbe->wx, "failed to register sfp\n"); + wx_err(wx, "failed to register sfp\n"); goto err_unregister_i2c; } @@ -756,7 +755,7 @@ int txgbe_init_phy(struct txgbe *txgbe) clkdev_drop(txgbe->clock); clk_unregister(txgbe->clk); err_destroy_phylink: - phylink_destroy(txgbe->phylink); + phylink_destroy(wx->phylink); err_destroy_xpcs: xpcs_destroy(txgbe->xpcs); err_unregister_swnode: @@ -768,8 +767,8 @@ int txgbe_init_phy(struct txgbe *txgbe) void txgbe_remove_phy(struct txgbe *txgbe) { if (txgbe->wx->media_type == sp_media_copper) { - phylink_disconnect_phy(txgbe->phylink); - phylink_destroy(txgbe->phylink); + phylink_disconnect_phy(txgbe->wx->phylink); + phylink_destroy(txgbe->wx->phylink); return; } @@ -777,7 +776,7 @@ void txgbe_remove_phy(struct txgbe *txgbe) platform_device_unregister(txgbe->i2c_dev); clkdev_drop(txgbe->clock); clk_unregister(txgbe->clk); - phylink_destroy(txgbe->phylink); + phylink_destroy(txgbe->wx->phylink); xpcs_destroy(txgbe->xpcs); software_node_unregister_node_group(txgbe->nodes.group); } diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index 3ba9ce43f394..5494ea88df0a 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -129,13 +129,6 @@ extern char txgbe_driver_name[]; -static inline struct txgbe *netdev_to_txgbe(struct net_device *netdev) -{ - struct wx *wx = netdev_priv(netdev); - - return wx->priv; -} - #define NODE_PROP(_NAME, _PROP) \ (const struct software_node) { \ .name = _NAME, \ @@ -175,7 +168,6 @@ struct txgbe { struct wx *wx; struct txgbe_nodes nodes; struct dw_xpcs *xpcs; - struct phylink *phylink; struct platform_device *sfp_dev; struct platform_device *i2c_dev; struct clk_lookup *clock; -- Gitee From cc27de72bfd54201905d6e17d6ef52beb5d42218 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 11 Jan 2024 17:27:53 +0100 Subject: [PATCH 0540/2138] wangxun: select CONFIG_PHYLINK where needed ANBZ: #8484 commit b3739fb3a9e6633b233d829ee799323d75162775 upstream. The ngbe driver needs phylink: arm-linux-gnueabi-ld: drivers/net/ethernet/wangxun/libwx/wx_ethtool.o: in function `wx_nway_reset': wx_ethtool.c:(.text+0x458): undefined reference to `phylink_ethtool_nway_reset' arm-linux-gnueabi-ld: drivers/net/ethernet/wangxun/ngbe/ngbe_main.o: in function `ngbe_remove': ngbe_main.c:(.text+0x7c): undefined reference to `phylink_destroy' arm-linux-gnueabi-ld: drivers/net/ethernet/wangxun/ngbe/ngbe_main.o: in function `ngbe_open': ngbe_main.c:(.text+0xf90): undefined reference to `phylink_connect_phy' arm-linux-gnueabi-ld: drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.o: in function `ngbe_mdio_init': ngbe_mdio.c:(.text+0x314): undefined reference to `phylink_create' Add the missing Kconfig description for this. Fixes: bc2426d74aa3 ("net: ngbe: convert phylib to phylink") Signed-off-by: Arnd Bergmann Reviewed-by: Andrew Lunn Signed-off-by: Jakub Kicinski Link: https://lore.kernel.org/r/20240111162828.68564-1-arnd@kernel.org Signed-off-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/2984 --- drivers/net/ethernet/wangxun/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig index 23cd610bd376..85cdbdd44fec 100644 --- a/drivers/net/ethernet/wangxun/Kconfig +++ b/drivers/net/ethernet/wangxun/Kconfig @@ -26,7 +26,7 @@ config NGBE tristate "Wangxun(R) GbE PCI Express adapters support" depends on PCI select LIBWX - select PHYLIB + select PHYLINK help This driver supports Wangxun(R) GbE PCI Express family of adapters. -- Gitee From be34b42c1f798f77a1f06849122ac6af9b58b2f2 Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 8 Mar 2024 15:42:09 +0000 Subject: [PATCH 0541/2138] x86/resctrl: Fix allocation of cleanest CLOSID on platforms with no monitors ANBZ: #8626 commit 21f4744007624235765b7b27798bf4de86da99d8 morse-linux. commit 6eac36bb9eb0 ("x86/resctrl: Allocate the cleanest CLOSID by searching closid_num_dirty_rmid") added a Kconfig option that causes resctrl to search for the CLOSID with the fewest dirty cache lines when creating a new control group. This depends on the values read from the llc_occupancy counters. This support missed that some platforms may not have these counters. This causes a NULL pointer dereference when creating a new control group as the array was not allocated by dom_data_init(). As this feature isn't necessary on platforms that don't have cache occupancy monitors, add this to the check that occurs when a new control group is allocated. The existing code is not selected by any upstream platform, it makes no sense to backport this patch to stable. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 011e17efb1a6..1767c1affa60 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -149,7 +149,8 @@ static int closid_alloc(void) lockdep_assert_held(&rdtgroup_mutex); - if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) { + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID) && + is_llc_occupancy_enabled()) { cleanest_closid = resctrl_find_cleanest_closid(); if (cleanest_closid < 0) return cleanest_closid; -- Gitee From a91c1a6efa959265c7a868f20c1f4f0463789e96 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 19 Mar 2019 10:45:43 +0000 Subject: [PATCH 0542/2138] x86/resctrl: Add a helper to avoid reaching into the arch code resource list ANBZ: #8626 commit 5b1da243d13f6237f332163d1bae325c36648a6b morse-linux. Resctrl occasionally wants to know something about a specific resource, in these cases it reaches into the arch code's rdt_resources_all[] array. Once the filesystem parts of resctrl are moved to /fs/, this means it will need visibility of the architecture specific struct resctrl_hw_resource definition, and the array of all resources. All architectures would also need a r_resctrl member in this struct. Instead, abstract this via a helper to allow architectures to do different things here. Move the level enum to the resctrl header and add a helper to retrieve the struct rdt_resource by 'rid'. resctrl_arch_get_resource() should not return NULL for any value in the enum, it may instead return a dummy resource that is !alloc_enabled && !mon_enabled. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/core.c | 10 +++++++++- arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 2 +- arch/x86/kernel/cpu/resctrl/internal.h | 10 ---------- arch/x86/kernel/cpu/resctrl/monitor.c | 8 ++++---- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 15 +++++++-------- include/linux/resctrl.h | 17 +++++++++++++++++ 6 files changed, 38 insertions(+), 24 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index cab67782f19d..0b38d92d1249 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -122,6 +122,14 @@ struct rdt_hw_resource rdt_resources_all[] = { }, }; +struct rdt_resource *resctrl_arch_get_resource(enum resctrl_res_level l) +{ + if (l >= RDT_NUM_RESOURCES) + return NULL; + + return &rdt_resources_all[l].r_resctrl; +} + /* * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs * as they do not have CPUID enumeration support for Cache allocation. @@ -169,7 +177,7 @@ static inline void cache_alloc_hsw_probe(void) bool is_mba_sc(struct rdt_resource *r) { if (!r) - return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.mba_sc; + r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); /* * The software controller support is only applicable to MBA resource. diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index 8bd717222f0c..e2634a3263a6 100644 --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -604,7 +604,7 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg) domid = md.u.domid; evtid = md.u.evtid; - r = &rdt_resources_all[resid].r_resctrl; + r = resctrl_arch_get_resource(resid); d = rdt_find_domain(r, domid, NULL); if (IS_ERR_OR_NULL(d)) { ret = -ENOENT; diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index c99f26ebe7a6..65990def6c79 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -466,16 +466,6 @@ extern struct rdt_hw_resource rdt_resources_all[]; extern struct rdtgroup rdtgroup_default; extern struct dentry *debugfs_resctrl; -enum resctrl_res_level { - RDT_RESOURCE_L3, - RDT_RESOURCE_L2, - RDT_RESOURCE_MBA, - RDT_RESOURCE_SMBA, - - /* Must be the last */ - RDT_NUM_RESOURCES, -}; - static inline struct rdt_resource *resctrl_inc(struct rdt_resource *res) { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(res); diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index c34a35ec0f03..06565153ceb2 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -321,7 +321,7 @@ static void limbo_release_entry(struct rmid_entry *entry) */ void __check_limbo(struct rdt_domain *d, bool force_free) { - struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); u32 idx_limit = resctrl_arch_system_num_rmid_idx(); struct rmid_entry *entry; u32 idx, cur_idx = 1; @@ -467,7 +467,7 @@ int alloc_rmid(u32 closid) static void add_rmid_to_limbo(struct rmid_entry *entry) { - struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); struct rdt_domain *d; u32 idx; @@ -669,7 +669,7 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm) if (!is_mbm_local_enabled()) return; - r_mba = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl; + r_mba = resctrl_arch_get_resource(RDT_RESOURCE_MBA); closid = rgrp->closid; rmid = rgrp->mon.rmid; @@ -839,7 +839,7 @@ void mbm_handle_overflow(struct work_struct *work) if (!resctrl_mounted || !resctrl_arch_mon_capable()) goto out_unlock; - r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; + r = resctrl_arch_get_resource(RDT_RESOURCE_L3); d = container_of(work, struct rdt_domain, mbm_over.work); list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 1767c1affa60..45372b6a6215 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -2253,7 +2253,7 @@ static void l2_qos_cfg_update(void *arg) static inline bool is_mba_linear(void) { - return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.delay_linear; + return resctrl_arch_get_resource(RDT_RESOURCE_MBA)->membw.delay_linear; } static int set_cache_qos_cfg(int level, bool enable) @@ -2341,7 +2341,7 @@ static void mba_sc_domain_destroy(struct rdt_resource *r, */ static bool supports_mba_mbps(void) { - struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl; + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); return (is_mbm_local_enabled() && r->alloc_capable && is_mba_linear()); @@ -2353,7 +2353,7 @@ static bool supports_mba_mbps(void) */ static int set_mba_sc(bool mba_sc) { - struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl; + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); u32 num_closid = resctrl_arch_get_num_closid(r); struct rdt_domain *d; int i; @@ -2625,10 +2625,10 @@ static void schemata_list_destroy(void) static int rdt_get_tree(struct fs_context *fc) { + struct rdt_resource *l3 = resctrl_arch_get_resource(RDT_RESOURCE_L3); struct rdt_fs_context *ctx = rdt_fc2context(fc); unsigned long flags = RFTYPE_CTRL_BASE; struct rdt_domain *dom; - struct rdt_resource *r; int ret; cpus_read_lock(); @@ -2701,8 +2701,7 @@ static int rdt_get_tree(struct fs_context *fc) resctrl_mounted = true; if (is_mbm_enabled()) { - r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; - list_for_each_entry(dom, &r->domains, list) + list_for_each_entry(dom, &l3->domains, list) mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL, RESCTRL_PICK_ANY_CPU); } @@ -3878,7 +3877,7 @@ static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf) if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) seq_puts(seq, ",cdpl2"); - if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl)) + if (is_mba_sc(resctrl_arch_get_resource(RDT_RESOURCE_MBA))) seq_puts(seq, ",mba_MBps"); if (resctrl_debug) @@ -4068,7 +4067,7 @@ static void clear_childcpus(struct rdtgroup *r, unsigned int cpu) void resctrl_offline_cpu(unsigned int cpu) { - struct rdt_resource *l3 = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; + struct rdt_resource *l3 = resctrl_arch_get_resource(RDT_RESOURCE_L3); struct rdtgroup *rdtgrp; struct rdt_domain *d; diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index a365f67131ec..168cc9510069 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -36,6 +36,16 @@ enum resctrl_conf_type { CDP_DATA, }; +enum resctrl_res_level { + RDT_RESOURCE_L3, + RDT_RESOURCE_L2, + RDT_RESOURCE_MBA, + RDT_RESOURCE_SMBA, + + /* Must be the last */ + RDT_NUM_RESOURCES, +}; + #define CDP_NUM_TYPES (CDP_DATA + 1) /* @@ -190,6 +200,13 @@ struct rdt_resource { bool cdp_capable; }; +/* + * Get the resource that exists at this level. If the level is not supported + * a dummy/not-capable resource can be returned. Levels >= RDT_NUM_RESOURCES + * will return NULL. + */ +struct rdt_resource *resctrl_arch_get_resource(enum resctrl_res_level l); + /** * struct resctrl_schema - configuration abilities of a resource presented to * user-space -- Gitee From 2482c68bb2b0615745b176d9a64eca3bdda70eae Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 19 Mar 2019 16:29:13 +0000 Subject: [PATCH 0543/2138] x86/resctrl: Move ctrlval string parsing policy away from the arch code ANBZ: #8626 commit f8546bea270343d2c81e5f3542d7f01dc24125dc morse-linux. The policy for parsing the configuration values as a string from user-space is specified by a function pointer the arch code specifies. These strings are part of resctrl's ABI, and the functions and their caller both live in the same file. Exporting the parsing functions and allowing the architecture to choose how a schema is parsed allows an architecture to get this wrong. Keep this all in the flesystem parts of resctrl. This should prevent any architecture's string-parsing behaviour from varying without core code changes. Use the fflags to spot caches and bandwidth resources, and use the appropriate helper. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/core.c | 4 ---- arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 28 +++++++++++++++++++---- arch/x86/kernel/cpu/resctrl/internal.h | 10 -------- include/linux/resctrl.h | 7 ------ 4 files changed, 23 insertions(+), 26 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 0b38d92d1249..ad5ab8cfa455 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -75,7 +75,6 @@ struct rdt_hw_resource rdt_resources_all[] = { .name = "L3", .cache_level = 3, .domains = domain_init(RDT_RESOURCE_L3), - .parse_ctrlval = parse_cbm, .format_str = "%d=%0*x", .fflags = RFTYPE_RES_CACHE, }, @@ -89,7 +88,6 @@ struct rdt_hw_resource rdt_resources_all[] = { .name = "L2", .cache_level = 2, .domains = domain_init(RDT_RESOURCE_L2), - .parse_ctrlval = parse_cbm, .format_str = "%d=%0*x", .fflags = RFTYPE_RES_CACHE, }, @@ -103,7 +101,6 @@ struct rdt_hw_resource rdt_resources_all[] = { .name = "MB", .cache_level = 3, .domains = domain_init(RDT_RESOURCE_MBA), - .parse_ctrlval = parse_bw, .format_str = "%d=%*u", .fflags = RFTYPE_RES_MB, }, @@ -115,7 +112,6 @@ struct rdt_hw_resource rdt_resources_all[] = { .name = "SMBA", .cache_level = 3, .domains = domain_init(RDT_RESOURCE_SMBA), - .parse_ctrlval = parse_bw, .format_str = "%d=%*u", .fflags = RFTYPE_RES_MB, }, diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index e2634a3263a6..077b4cbaa074 100644 --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -23,6 +23,15 @@ #include "internal.h" +struct rdt_parse_data { + struct rdtgroup *rdtgrp; + char *buf; +}; + +typedef int (ctrlval_parser_t)(struct rdt_parse_data *data, + struct resctrl_schema *s, + struct rdt_domain *d); + /* * Check whether MBA bandwidth percentage value is correct. The value is * checked against the minimum and max bandwidth values specified by the @@ -64,8 +73,8 @@ static bool bw_validate(char *buf, u32 *data, struct rdt_resource *r) return true; } -int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s, - struct rdt_domain *d) +static int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s, + struct rdt_domain *d) { struct resctrl_staged_config *cfg; u32 closid = data->rdtgrp->closid; @@ -143,8 +152,8 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) * Read one cache bit mask (hex). Check that it is valid for the current * resource type. */ -int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s, - struct rdt_domain *d) +static int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s, + struct rdt_domain *d) { struct rdtgroup *rdtgrp = data->rdtgrp; struct resctrl_staged_config *cfg; @@ -200,6 +209,14 @@ int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s, return 0; } +static ctrlval_parser_t *get_parser(struct rdt_resource *res) +{ + if (res->fflags & RFTYPE_RES_CACHE) + return &parse_cbm; + else + return &parse_bw; +} + /* * For each domain in this resource we expect to find a series of: * id=mask @@ -209,6 +226,7 @@ int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s, static int parse_line(char *line, struct resctrl_schema *s, struct rdtgroup *rdtgrp) { + ctrlval_parser_t *parse_ctrlval = get_parser(s->res); enum resctrl_conf_type t = s->conf_type; struct resctrl_staged_config *cfg; struct rdt_resource *r = s->res; @@ -240,7 +258,7 @@ static int parse_line(char *line, struct resctrl_schema *s, if (d->id == dom_id) { data.buf = dom; data.rdtgrp = rdtgrp; - if (r->parse_ctrlval(&data, s, d)) + if (parse_ctrlval(&data, s, d)) return -EINVAL; if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { cfg = &d->staged_config[t]; diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 65990def6c79..9048bd32e86f 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -413,11 +413,6 @@ static inline bool is_mbm_event(int e) e <= QOS_L3_MBM_LOCAL_EVENT_ID); } -struct rdt_parse_data { - struct rdtgroup *rdtgrp; - char *buf; -}; - /** * struct rdt_hw_resource - arch private attributes of a resctrl resource * @r_resctrl: Attributes of the resource used directly by resctrl. @@ -455,11 +450,6 @@ static inline struct rdt_hw_resource *resctrl_to_arch_res(struct rdt_resource *r return container_of(r, struct rdt_hw_resource, r_resctrl); } -int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s, - struct rdt_domain *d); -int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s, - struct rdt_domain *d); - extern struct mutex rdtgroup_mutex; extern struct rdt_hw_resource rdt_resources_all[]; diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 168cc9510069..6e87bc95f5ea 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -157,9 +157,6 @@ struct resctrl_membw { u32 *mb_map; }; -struct rdt_parse_data; -struct resctrl_schema; - /** * struct rdt_resource - attributes of a resctrl resource * @rid: The index of the resource @@ -174,7 +171,6 @@ struct resctrl_schema; * @data_width: Character width of data when displaying * @default_ctrl: Specifies default cache cbm or memory B/W percent. * @format_str: Per resource format string to show domain value - * @parse_ctrlval: Per resource function pointer to parse control values * @evt_list: List of monitoring events * @fflags: flags to choose base and info files * @cdp_capable: Is the CDP feature available on this resource @@ -192,9 +188,6 @@ struct rdt_resource { int data_width; u32 default_ctrl; const char *format_str; - int (*parse_ctrlval)(struct rdt_parse_data *data, - struct resctrl_schema *s, - struct rdt_domain *d); struct list_head evt_list; unsigned long fflags; bool cdp_capable; -- Gitee From 398a6246c486288e1f69a44025bffd11e2fc1e58 Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 14 Jun 2018 13:52:30 +0100 Subject: [PATCH 0544/2138] x86/resctrl: Add helper for setting CPU default properties ANBZ: #8626 commit 8bd9128d2be50d27de7a2e1c694f7960862f43c2 morse-linux. rdtgroup_rmdir_ctrl() and rdtgroup_rmdir_mon() set the per-CPU pqr_state for CPUs that were part of the rmdir()'d group. Another architecture might not have a 'pqr_state', its hardware may need the values in a different format. MPAM's equivalent of RMID values are not unique, and always need the CLOSID to be provided too. There is only one caller that modifies a single value, (rdtgroup_rmdir_mon()). MPAM always needs both CLOSID and RMID for the hardware value as these are written to the same system register. As rdtgroup_rmdir_mon() has the CLOSID on hand, only provide a helper to set both values. These values are read by __resctrl_sched_in(), but may be written by a different CPU without any locking, add READ/WRTE_ONCE() to avoid torn values. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/include/asm/resctrl.h | 14 +++++++++++--- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 15 ++++++++++----- 2 files changed, 21 insertions(+), 8 deletions(-) diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index 12dbd2588ca7..f61382258743 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -4,8 +4,9 @@ #ifdef CONFIG_X86_CPU_RESCTRL -#include #include +#include +#include /* * This value can never be a valid CLOSID, and is used when mapping a @@ -96,8 +97,8 @@ static inline void resctrl_arch_disable_mon(void) static inline void __resctrl_sched_in(struct task_struct *tsk) { struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state); - u32 closid = state->default_closid; - u32 rmid = state->default_rmid; + u32 closid = READ_ONCE(state->default_closid); + u32 rmid = READ_ONCE(state->default_rmid); u32 tmp; /* @@ -132,6 +133,13 @@ static inline unsigned int resctrl_arch_round_mon_val(unsigned int val) return val * scale; } +static inline void resctrl_arch_set_cpu_default_closid_rmid(int cpu, u32 closid, + u32 rmid) +{ + WRITE_ONCE(per_cpu(pqr_state.default_closid, cpu), closid); + WRITE_ONCE(per_cpu(pqr_state.default_rmid, cpu), rmid); +} + static inline void resctrl_arch_set_closid_rmid(struct task_struct *tsk, u32 closid, u32 rmid) { diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 45372b6a6215..5d2c1ce5b6b1 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -3623,14 +3623,18 @@ static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name, static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) { struct rdtgroup *prdtgrp = rdtgrp->mon.parent; + u32 closid, rmid; int cpu; /* Give any tasks back to the parent group */ rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask); /* Update per cpu rmid of the moved CPUs first */ + closid = rdtgrp->closid; + rmid = prdtgrp->mon.rmid; for_each_cpu(cpu, &rdtgrp->cpu_mask) - per_cpu(pqr_state.default_rmid, cpu) = prdtgrp->mon.rmid; + resctrl_arch_set_cpu_default_closid_rmid(cpu, closid, rmid); + /* * Update the MSR on moved CPUs and CPUs which have moved * task running on them. @@ -3663,6 +3667,7 @@ static int rdtgroup_ctrl_remove(struct rdtgroup *rdtgrp) static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) { + u32 closid, rmid; int cpu; /* Give any tasks back to the default group */ @@ -3673,10 +3678,10 @@ static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); /* Update per cpu closid and rmid of the moved CPUs first */ - for_each_cpu(cpu, &rdtgrp->cpu_mask) { - per_cpu(pqr_state.default_closid, cpu) = rdtgroup_default.closid; - per_cpu(pqr_state.default_rmid, cpu) = rdtgroup_default.mon.rmid; - } + closid = rdtgroup_default.closid; + rmid = rdtgroup_default.mon.rmid; + for_each_cpu(cpu, &rdtgrp->cpu_mask) + resctrl_arch_set_cpu_default_closid_rmid(cpu, closid, rmid); /* * Update the MSR on moved CPUs and CPUs which have moved -- Gitee From 692e41b9ceb8fdccd83cc8b4214ec2cf2c9fa964 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 19 Mar 2019 16:40:49 +0000 Subject: [PATCH 0545/2138] x86/resctrl: Remove rdtgroup from update_cpu_closid_rmid() ANBZ: #8626 commit 5c5385f5f23f19b7a05451798eab277ff77abccc morse-linux. update_cpu_closid_rmid() takes a struct rdtgroup as an argument, which it uses to update the local CPUs default pqr values. This is a problem once the resctrl parts move out to /fs/, as the arch code cannot poke around inside struct rdtgroup. Rename update_cpu_closid_rmid() as resctrl_arch_sync_cpus_defaults() to be used as the target of an IPI, and pass the effective CLOSID and RMID in a new struct. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 19 +++++++++++++++---- include/linux/resctrl.h | 11 +++++++++++ 2 files changed, 26 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 5d2c1ce5b6b1..18f097fce51e 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -341,13 +341,13 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of, * from update_closid_rmid() is protected against __switch_to() because * preemption is disabled. */ -static void update_cpu_closid_rmid(void *info) +void resctrl_arch_sync_cpu_defaults(void *info) { - struct rdtgroup *r = info; + struct resctrl_cpu_sync *r = info; if (r) { this_cpu_write(pqr_state.default_closid, r->closid); - this_cpu_write(pqr_state.default_rmid, r->mon.rmid); + this_cpu_write(pqr_state.default_rmid, r->rmid); } /* @@ -362,11 +362,22 @@ static void update_cpu_closid_rmid(void *info) * Update the PGR_ASSOC MSR on all cpus in @cpu_mask, * * Per task closids/rmids must have been set up before calling this function. + * @r may be NULL. */ static void update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r) { - on_each_cpu_mask(cpu_mask, update_cpu_closid_rmid, r, 1); + struct resctrl_cpu_sync defaults; + struct resctrl_cpu_sync *defaults_p = NULL; + + if (r) { + defaults.closid = r->closid; + defaults.rmid = r->mon.rmid; + defaults_p = &defaults; + } + + on_each_cpu_mask(cpu_mask, resctrl_arch_sync_cpu_defaults, defaults_p, + 1); } static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 6e87bc95f5ea..2b79e4159507 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -220,6 +220,17 @@ struct resctrl_schema { u32 num_closid; }; +struct resctrl_cpu_sync { + u32 closid; + u32 rmid; +}; + +/* + * Update and re-load this CPUs defaults. Called via IPI, takes a pointer to + * struct resctrl_cpu_sync, or NULL. + */ +void resctrl_arch_sync_cpu_defaults(void *info); + /* The number of closid supported by this resource regardless of CDP */ u32 resctrl_arch_get_num_closid(struct rdt_resource *r); int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid); -- Gitee From 5b0f4d6916a8870cd42a57d64851a5ff6851c773 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 19 Mar 2019 16:45:54 +0000 Subject: [PATCH 0546/2138] x86/resctrl: Export resctrl fs's init function ANBZ: #8626 commit 208dada2dd57b7d887e96a51a4bf75be6f6d2065 morse-linux. rdtgroup_init() needs exporting so that arch code can call it once it lives in core code. As this is one of the few functions we export, rename it to have the resctrl in the name. The same goes for the exit call. x86's arch code init functions for RDT are renamed to have an arch prefix to make it clear these are part of the architecture code. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/core.c | 12 ++++++------ arch/x86/kernel/cpu/resctrl/internal.h | 3 --- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 8 ++++---- include/linux/resctrl.h | 3 +++ 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index ad5ab8cfa455..fd7305ae2121 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -942,7 +942,7 @@ void resctrl_cpu_detect(struct cpuinfo_x86 *c) } } -static int __init resctrl_late_init(void) +static int __init resctrl_arch_late_init(void) { struct rdt_resource *r; int state, ret; @@ -967,7 +967,7 @@ static int __init resctrl_late_init(void) if (state < 0) return state; - ret = rdtgroup_init(); + ret = resctrl_init(); if (ret) { cpuhp_remove_state(state); return ret; @@ -983,18 +983,18 @@ static int __init resctrl_late_init(void) return 0; } -late_initcall(resctrl_late_init); +late_initcall(resctrl_arch_late_init); -static void __exit resctrl_exit(void) +static void __exit resctrl_arch_exit(void) { struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; cpuhp_remove_state(rdt_online); - rdtgroup_exit(); + resctrl_exit(); if (r->mon_capable) rdt_put_mon_l3_config(); } -__exitcall(resctrl_exit); +__exitcall(resctrl_arch_exit); diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 9048bd32e86f..7c073298aabf 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -300,9 +300,6 @@ extern struct list_head rdt_all_groups; extern int max_name_width, max_data_width; -int __init rdtgroup_init(void); -void __exit rdtgroup_exit(void); - /** * struct rftype - describe each file in the resctrl file system * @name: File name diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 18f097fce51e..1a49c9918f8d 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -4116,14 +4116,14 @@ void resctrl_offline_cpu(unsigned int cpu) } /* - * rdtgroup_init - rdtgroup initialization + * resctrl_init - resctrl filesystem initialization * * Setup resctrl file system including set up root, create mount point, - * register rdtgroup filesystem, and initialize files under root directory. + * register resctrl filesystem, and initialize files under root directory. * * Return: 0 on success or -errno */ -int __init rdtgroup_init(void) +int __init resctrl_init(void) { int ret = 0; @@ -4171,7 +4171,7 @@ int __init rdtgroup_init(void) return ret; } -void __exit rdtgroup_exit(void) +void __exit resctrl_exit(void) { debugfs_remove_recursive(debugfs_resctrl); unregister_filesystem(&rdt_fs_type); diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 2b79e4159507..f6a4b75f8122 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -325,4 +325,7 @@ void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_domain *d); extern unsigned int resctrl_rmid_realloc_threshold; extern unsigned int resctrl_rmid_realloc_limit; +int __init resctrl_init(void); +void __exit resctrl_exit(void); + #endif /* _RESCTRL_H */ -- Gitee From 2e96e4d1cd47bf84f29259a9b3e3ec17afc71099 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 19 Mar 2019 17:09:15 +0000 Subject: [PATCH 0547/2138] x86/resctrl: Wrap resctrl_arch_find_domain() around rdt_find_domain() ANBZ: #8626 commit 221cd020d3775fbd226d77e5fdb09247ba93127e morse-linux. rdt_find_domain() finds a domain given a resource and a cache-id. It's not quite right for the resctrl arch API as it also returns the position to insert a new domain, which is needed when bringing a domain online in the arch code. Wrap rdt_find_domain() in a another function resctrl_arch_find_domain() so we avoid the unnecessary argument outside the arch code. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/core.c | 9 +++++++-- arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 2 +- arch/x86/kernel/cpu/resctrl/internal.h | 2 -- include/linux/resctrl.h | 2 ++ 4 files changed, 10 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index fd7305ae2121..e5143f8af919 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -405,8 +405,8 @@ void rdt_ctrl_update(void *arg) * caller, return the first domain whose id is bigger than the input id. * The domain list is sorted by id in ascending order. */ -struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, - struct list_head **pos) +static struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, + struct list_head **pos) { struct rdt_domain *d; struct list_head *l; @@ -430,6 +430,11 @@ struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, return NULL; } +struct rdt_domain *resctrl_arch_find_domain(struct rdt_resource *r, int id) +{ + return rdt_find_domain(r, id, NULL); +} + static void setup_default_ctrlval(struct rdt_resource *r, u32 *dc) { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index 077b4cbaa074..ad9e5516e607 100644 --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -623,7 +623,7 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg) evtid = md.u.evtid; r = resctrl_arch_get_resource(resid); - d = rdt_find_domain(r, domid, NULL); + d = resctrl_arch_find_domain(r, domid); if (IS_ERR_OR_NULL(d)) { ret = -ENOENT; goto out; diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 7c073298aabf..32ade929ea1b 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -533,8 +533,6 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn); int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name); int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name, umode_t mask); -struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, - struct list_head **pos); ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off); int rdtgroup_schemata_show(struct kernfs_open_file *of, diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index f6a4b75f8122..c5fcbb524136 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -233,6 +233,8 @@ void resctrl_arch_sync_cpu_defaults(void *info); /* The number of closid supported by this resource regardless of CDP */ u32 resctrl_arch_get_num_closid(struct rdt_resource *r); + +struct rdt_domain *resctrl_arch_find_domain(struct rdt_resource *r, int id); int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid); /* -- Gitee From 2de2cda05b5eed4fb214c9848b92c64162c68c8c Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 20 Mar 2019 13:24:07 +0000 Subject: [PATCH 0548/2138] x86/resctrl: Move resctrl types to a separate header ANBZ: #8626 commit 80afaf082df1ccf983bbebba6b5037d20dd8a832 morse-linux. To avoid sticky problems in the mpam glue code, move the resctrl enums into a separate header. This lets the arch code declare prototypes that use these enums without creating a loop via asm<->linux resctrl.h The same logic applies to the monitor-configuration defines, move these too. The maintainers entry for these headers was missed when resctrl.h was created. Add a wildcard entry to match both resctrl.h and resctrl_types.h. Signed-off-by: James Morse --- internal.h lacks a copyright notice so there is nothing to preserve when creating a new file... [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- MAINTAINERS | 1 + arch/x86/kernel/cpu/resctrl/internal.h | 24 --------- include/linux/resctrl.h | 35 +------------ include/linux/resctrl_types.h | 68 ++++++++++++++++++++++++++ 4 files changed, 70 insertions(+), 58 deletions(-) create mode 100644 include/linux/resctrl_types.h diff --git a/MAINTAINERS b/MAINTAINERS index d9a486e6587b..d0b4a22f8ecf 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -18083,6 +18083,7 @@ S: Supported F: Documentation/arch/x86/resctrl* F: arch/x86/include/asm/resctrl.h F: arch/x86/kernel/cpu/resctrl/ +F: include/linux/resctrl*.h F: tools/testing/selftests/resctrl/ READ-COPY UPDATE (RCU) diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 32ade929ea1b..031948322eab 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -32,30 +32,6 @@ */ #define MBM_CNTR_WIDTH_OFFSET_MAX (62 - MBM_CNTR_WIDTH_BASE) -/* Reads to Local DRAM Memory */ -#define READS_TO_LOCAL_MEM BIT(0) - -/* Reads to Remote DRAM Memory */ -#define READS_TO_REMOTE_MEM BIT(1) - -/* Non-Temporal Writes to Local Memory */ -#define NON_TEMP_WRITE_TO_LOCAL_MEM BIT(2) - -/* Non-Temporal Writes to Remote Memory */ -#define NON_TEMP_WRITE_TO_REMOTE_MEM BIT(3) - -/* Reads to Local Memory the system identifies as "Slow Memory" */ -#define READS_TO_LOCAL_S_MEM BIT(4) - -/* Reads to Remote Memory the system identifies as "Slow Memory" */ -#define READS_TO_REMOTE_S_MEM BIT(5) - -/* Dirty Victims to All Types of Memory */ -#define DIRTY_VICTIMS_TO_ALL_MEM BIT(6) - -/* Max event bits supported */ -#define MAX_EVT_CONFIG_BITS GENMASK(6, 0) - /** * cpumask_any_housekeeping() - Choose any CPU in @mask, preferring those that * aren't marked nohz_full diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index c5fcbb524136..b0ee7256e095 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -5,6 +5,7 @@ #include #include #include +#include /* CLOSID, RMID value used by the default control group */ #define RESCTRL_RESERVED_CLOSID 0 @@ -24,40 +25,6 @@ int proc_resctrl_show(struct seq_file *m, /* max value for struct rdt_domain's mbps_val */ #define MBA_MAX_MBPS U32_MAX -/** - * enum resctrl_conf_type - The type of configuration. - * @CDP_NONE: No prioritisation, both code and data are controlled or monitored. - * @CDP_CODE: Configuration applies to instruction fetches. - * @CDP_DATA: Configuration applies to reads and writes. - */ -enum resctrl_conf_type { - CDP_NONE, - CDP_CODE, - CDP_DATA, -}; - -enum resctrl_res_level { - RDT_RESOURCE_L3, - RDT_RESOURCE_L2, - RDT_RESOURCE_MBA, - RDT_RESOURCE_SMBA, - - /* Must be the last */ - RDT_NUM_RESOURCES, -}; - -#define CDP_NUM_TYPES (CDP_DATA + 1) - -/* - * Event IDs, the values match those used to program IA32_QM_EVTSEL before - * reading IA32_QM_CTR on RDT systems. - */ -enum resctrl_event_id { - QOS_L3_OCCUP_EVENT_ID = 0x01, - QOS_L3_MBM_TOTAL_EVENT_ID = 0x02, - QOS_L3_MBM_LOCAL_EVENT_ID = 0x03, -}; - /** * struct resctrl_staged_config - parsed configuration to be applied * @new_ctrl: new ctrl value to be loaded diff --git a/include/linux/resctrl_types.h b/include/linux/resctrl_types.h new file mode 100644 index 000000000000..4788bd95dac6 --- /dev/null +++ b/include/linux/resctrl_types.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2024 Arm Ltd. + * Based on arch/x86/kernel/cpu/resctrl/internal.h + */ + +#ifndef __LINUX_RESCTRL_TYPES_H +#define __LINUX_RESCTRL_TYPES_H + +/* Reads to Local DRAM Memory */ +#define READS_TO_LOCAL_MEM BIT(0) + +/* Reads to Remote DRAM Memory */ +#define READS_TO_REMOTE_MEM BIT(1) + +/* Non-Temporal Writes to Local Memory */ +#define NON_TEMP_WRITE_TO_LOCAL_MEM BIT(2) + +/* Non-Temporal Writes to Remote Memory */ +#define NON_TEMP_WRITE_TO_REMOTE_MEM BIT(3) + +/* Reads to Local Memory the system identifies as "Slow Memory" */ +#define READS_TO_LOCAL_S_MEM BIT(4) + +/* Reads to Remote Memory the system identifies as "Slow Memory" */ +#define READS_TO_REMOTE_S_MEM BIT(5) + +/* Dirty Victims to All Types of Memory */ +#define DIRTY_VICTIMS_TO_ALL_MEM BIT(6) + +/* Max event bits supported */ +#define MAX_EVT_CONFIG_BITS GENMASK(6, 0) + +/** + * enum resctrl_conf_type - The type of configuration. + * @CDP_NONE: No prioritisation, both code and data are controlled or monitored. + * @CDP_CODE: Configuration applies to instruction fetches. + * @CDP_DATA: Configuration applies to reads and writes. + */ +enum resctrl_conf_type { + CDP_NONE, + CDP_CODE, + CDP_DATA, +}; + +enum resctrl_res_level { + RDT_RESOURCE_L3, + RDT_RESOURCE_L2, + RDT_RESOURCE_MBA, + RDT_RESOURCE_SMBA, + + /* Must be the last */ + RDT_NUM_RESOURCES, +}; + +#define CDP_NUM_TYPES (CDP_DATA + 1) + +/* + * Event IDs, the values match those used to program IA32_QM_EVTSEL before + * reading IA32_QM_CTR on RDT systems. + */ +enum resctrl_event_id { + QOS_L3_OCCUP_EVENT_ID = 0x01, + QOS_L3_MBM_TOTAL_EVENT_ID = 0x02, + QOS_L3_MBM_LOCAL_EVENT_ID = 0x03, +}; + +#endif /* __LINUX_RESCTRL_TYPES_H */ -- Gitee From e92612a7eecc1e3be818122f0ccb963b014a657b Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 19 Mar 2019 17:54:18 +0000 Subject: [PATCH 0549/2138] x86/resctrl: Add a resctrl helper to reset all the resources ANBZ: #8626 commit 128aadaa200f229945c91d94e40548338bbae773 morse-linux. On umount(), resctrl resets each resource back to its default configuration. It only ever does this for all resources in one go. reset_all_ctrls() is architecture specific as it works with struct rdt_hw_resource. Add an architecture helper to reset all resources. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/include/asm/resctrl.h | 2 ++ arch/x86/kernel/cpu/resctrl/rdtgroup.c | 16 +++++++++++----- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index f61382258743..5f6a5375bb4a 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -15,6 +15,8 @@ */ #define X86_RESCTRL_EMPTY_CLOSID ((u32)~0) +void resctrl_arch_reset_resources(void); + /** * struct resctrl_pqr_state - State cache for the PQR MSR * @cur_rmid: The cached Resource Monitoring ID diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 1a49c9918f8d..13c24cb18d76 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -2859,6 +2859,14 @@ static int reset_all_ctrls(struct rdt_resource *r) return 0; } +void resctrl_arch_reset_resources(void) +{ + struct rdt_resource *r; + + for_each_capable_rdt_resource(r) + reset_all_ctrls(r); +} + /* * Move tasks from one to the other group. If @from is NULL, then all tasks * in the systems are moved unconditionally (used for teardown). @@ -2968,16 +2976,14 @@ static void rmdir_all_sub(void) static void rdt_kill_sb(struct super_block *sb) { - struct rdt_resource *r; - cpus_read_lock(); mutex_lock(&rdtgroup_mutex); rdt_disable_ctx(); - /*Put everything back to default values. */ - for_each_alloc_capable_rdt_resource(r) - reset_all_ctrls(r); + /* Put everything back to default values. */ + resctrl_arch_reset_resources(); + rmdir_all_sub(); rdt_pseudo_lock_release(); rdtgroup_default.mode = RDT_MODE_SHAREABLE; -- Gitee From 381b2ff67aae5d3df582345da1fb90f0a68a0a1b Mon Sep 17 00:00:00 2001 From: James Morse Date: Mon, 22 Jan 2024 14:18:51 +0000 Subject: [PATCH 0550/2138] x86/resctrl: Move monitor init work to a resctrl init call ANBZ: #8626 commit b8401f504352b8e3dde8864ebaecb5b9cbbf12f8 morse-linux. rdt_get_mon_l3_config() is called from the architecture's resctrl_arch_late_init(), and initialises both architecture specific fields, such as hw_res->mon_scale and resctrl filesystem fields by calling dom_data_init(). To separate the filesystem and architecture parts of resctrl, this function needs splitting up. Add resctrl_mon_resource_init() to do the filesystem specific work, and call it from resctrl_init(). This runs later, but is still before the filesystem is mounted and the rmid_ptrs[] array can be used. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/internal.h | 1 + arch/x86/kernel/cpu/resctrl/monitor.c | 24 +++++++++++++++++------- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 4 ++++ 3 files changed, 22 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 031948322eab..7a0c74779c53 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -540,6 +540,7 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg); void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, struct rdt_domain *d, struct rdtgroup *rdtgrp, int evtid, int first); +int resctrl_mon_resource_init(void); void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms, int exclude_cpu); diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 06565153ceb2..929ec1430b45 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -1003,12 +1003,28 @@ static void l3_mon_evt_init(struct rdt_resource *r) list_add_tail(&mbm_local_event.list, &r->evt_list); } +int resctrl_mon_resource_init(void) +{ + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); + int ret; + + if (!r->mon_capable) + return 0; + + ret = dom_data_init(r); + if (ret) + return ret; + + l3_mon_evt_init(r); + + return 0; +} + int __init rdt_get_mon_l3_config(struct rdt_resource *r) { unsigned int mbm_offset = boot_cpu_data.x86_cache_mbm_width_offset; struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); unsigned int threshold; - int ret; resctrl_rmid_realloc_limit = boot_cpu_data.x86_cache_size * 1024; hw_res->mon_scale = boot_cpu_data.x86_cache_occ_scale; @@ -1036,10 +1052,6 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r) */ resctrl_rmid_realloc_threshold = resctrl_arch_round_mon_val(threshold); - ret = dom_data_init(r); - if (ret) - return ret; - if (rdt_cpu_has(X86_FEATURE_BMEC)) { u32 eax, ebx, ecx, edx; @@ -1057,8 +1069,6 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r) } } - l3_mon_evt_init(r); - r->mon_capable = true; return 0; diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 13c24cb18d76..7a9696f53f2b 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -4138,6 +4138,10 @@ int __init resctrl_init(void) rdtgroup_setup_default(); + ret = resctrl_mon_resource_init(); + if (ret) + return ret; + ret = sysfs_create_mount_point(fs_kobj, "resctrl"); if (ret) return ret; -- Gitee From 1d6544e024a2f898f8c1d82de68fc6e188db3cba Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 24 Oct 2023 17:36:46 +0100 Subject: [PATCH 0551/2138] x86/resctrl: Move monitor exit work to a restrl exit call ANBZ: #8626 commit 7e7bad526c7be49af6d4f4e3382e6cfd212378dd morse-linux. rdt_put_mon_l3_config() is called via the architecture's resctrl_arch_exit() call, and appears to free the rmid_ptrs[] and closid_num_dirty_rmid[] arrays. In reality this code is marked __exit, and is removed by the linker as resctl can't be built as a module. MPAM can make use of this code from its error interrupt handler, a later patch drops all the __init/__exit annotations. To separate the filesystem and architecture parts of resctrl, this free()ing work needs to be triggered by the filesystem, as these structures belong to the filesystem code. Rename rdt_put_mon_l3_config() resctrl_mon_resource_exit() and call it from resctrl_exit(). The kfree() is currently dependent on r->mon_capable. resctrl_mon_resource_init() takes no arguments, so resctrl_mon_resource_exit() shouldn't take any either. Add the check to dom_data_exit(), making it take the resource as an argument. This makes it more symmetrical with dom_data_init(). Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/core.c | 5 ----- arch/x86/kernel/cpu/resctrl/internal.h | 2 +- arch/x86/kernel/cpu/resctrl/monitor.c | 12 ++++++++---- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 2 ++ 4 files changed, 11 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index e5143f8af919..bafd289dd4b0 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -992,14 +992,9 @@ late_initcall(resctrl_arch_late_init); static void __exit resctrl_arch_exit(void) { - struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; - cpuhp_remove_state(rdt_online); resctrl_exit(); - - if (r->mon_capable) - rdt_put_mon_l3_config(); } __exitcall(resctrl_arch_exit); diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 7a0c74779c53..01fcd4ef26ca 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -533,7 +533,7 @@ void closid_free(int closid); int alloc_rmid(u32 closid); void free_rmid(u32 closid, u32 rmid); int rdt_get_mon_l3_config(struct rdt_resource *r); -void __exit rdt_put_mon_l3_config(void); +void __exit resctrl_mon_resource_exit(void); bool __init rdt_cpu_has(int flag); void mon_event_count(void *info); int rdtgroup_mondata_show(struct seq_file *m, void *arg); diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 929ec1430b45..2a1cbd4de6ee 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -954,10 +954,12 @@ static int dom_data_init(struct rdt_resource *r) return err; } -static void __exit dom_data_exit(void) +static void __exit dom_data_exit(struct rdt_resource *r) { - mutex_lock(&rdtgroup_mutex); + if (!r->mon_capable) + return; + mutex_lock(&rdtgroup_mutex); if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) { kfree(closid_num_dirty_rmid); closid_num_dirty_rmid = NULL; @@ -1074,9 +1076,11 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r) return 0; } -void __exit rdt_put_mon_l3_config(void) +void __exit resctrl_mon_resource_exit(void) { - dom_data_exit(); + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); + + dom_data_exit(r); } void __init intel_rdt_mbm_apply_quirk(void) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 7a9696f53f2b..6cf4ebe9c058 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -4186,4 +4186,6 @@ void __exit resctrl_exit(void) debugfs_remove_recursive(debugfs_resctrl); unregister_filesystem(&rdt_fs_type); sysfs_remove_mount_point(fs_kobj, "resctrl"); + + resctrl_mon_resource_exit(); } -- Gitee From b3202c4ac4d3fa60831cd91bfd6d6be298f45126 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 19 Mar 2019 18:04:51 +0000 Subject: [PATCH 0552/2138] x86/resctrl: Move max_{name,data}_width into resctrl code ANBZ: #8626 commit 92a05bac93d1cba6950d0a9b9a4315c0b2d7d22f morse-linux. max_name_width and max_data_width are used to pad the strings in the resctrl schemata file. This should be part of the fs code as it influences the user-space interface, but currently max_data_width is generated by the arch init code. max_name_width is already managed by schemata_list_add(). Move the variables and max_data_width's initialisation code to rdtgroup.c. There is no need for an extra rdt_init_padding() helper as the length of the name can be considered when schemata_list_add() creates each schema entry. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/core.c | 22 ---------------------- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 12 ++++++++++++ 2 files changed, 12 insertions(+), 22 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index bafd289dd4b0..7242d15d4806 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -44,12 +44,6 @@ static DEFINE_MUTEX(domain_list_lock); */ DEFINE_PER_CPU(struct resctrl_pqr_state, pqr_state); -/* - * Used to store the max resource name width and max resource data width - * to display the schemata in a tabular format - */ -int max_name_width, max_data_width; - /* * Global boolean for rdt_alloc which is true if any * resource allocation is enabled. @@ -648,20 +642,6 @@ static int resctrl_arch_offline_cpu(unsigned int cpu) return 0; } -/* - * Choose a width for the resource name and resource data based on the - * resource that has widest name and cbm. - */ -static __init void rdt_init_padding(void) -{ - struct rdt_resource *r; - - for_each_alloc_capable_rdt_resource(r) { - if (r->data_width > max_data_width) - max_data_width = r->data_width; - } -} - enum { RDT_FLAG_CMT, RDT_FLAG_MBM_TOTAL, @@ -963,8 +943,6 @@ static int __init resctrl_arch_late_init(void) if (!get_rdt_resources()) return -ENODEV; - rdt_init_padding(); - state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/resctrl/cat:online:", resctrl_arch_online_cpu, diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 6cf4ebe9c058..e736e4d20f63 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -58,6 +58,12 @@ static struct kernfs_node *kn_mongrp; /* Kernel fs node for "mon_data" directory under root */ static struct kernfs_node *kn_mondata; +/* + * Used to store the max resource name width and max resource data width + * to display the schemata in a tabular format + */ +int max_name_width, max_data_width; + static struct seq_buf last_cmd_status; static char last_cmd_status_buf[512]; @@ -2595,6 +2601,12 @@ static int schemata_list_add(struct rdt_resource *r, enum resctrl_conf_type type if (cl > max_name_width) max_name_width = cl; + /* + * Choose a width for the resource data based on the resource that has + * widest name and cbm. + */ + max_data_width = max(max_data_width, r->data_width); + INIT_LIST_HEAD(&s->list); list_add(&s->list, &resctrl_schema_all); -- Gitee From 87334d4229225575873415d4b767a4a6690e7263 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 19 Mar 2019 18:12:17 +0000 Subject: [PATCH 0553/2138] x86/resctrl: Stop using the for_each_*_rdt_resource() walkers ANBZ: #8626 commit a5b9d55f0b14d88ad1608c38f2033b057d122723 morse-linux. The for_each_*_rdt_resource() helpers walk the architectures array of structures, using the resctrl visible part as an iterator. These became over-complex when the structures were split into a filesystem and architecture-specific struct. This approach avoided the need to touch every call site. Once the filesystem parts of resctrl are moved to /fs/, both the architecture's resource array, and the definition of those structures is no longer accessible. To support resctrl, each architecture would have to provide equally complex macros. Change the resctrl code that uses these to walk through the resource_level enum and check the mon/alloc capable flags instead. Instances in core.c, and resctrl_arch_reset_resources() remain part of x86's architecture specific code. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 7 +++++- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 30 +++++++++++++++++++---- 2 files changed, 31 insertions(+), 6 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index 884b88e25141..f2315a50ea4f 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -840,6 +840,7 @@ bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) { cpumask_var_t cpu_with_psl; + enum resctrl_res_level i; struct rdt_resource *r; struct rdt_domain *d_i; bool ret = false; @@ -854,7 +855,11 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) * First determine which cpus have pseudo-locked regions * associated with them. */ - for_each_alloc_capable_rdt_resource(r) { + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + r = resctrl_arch_get_resource(i); + if (!r->alloc_capable) + continue; + list_for_each_entry(d_i, &r->domains, list) { if (d_i->plr) cpumask_or(cpu_with_psl, cpu_with_psl, diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index e736e4d20f63..3f16e7854411 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -98,12 +98,17 @@ void rdt_last_cmd_printf(const char *fmt, ...) void rdt_staged_configs_clear(void) { + enum resctrl_res_level i; struct rdt_resource *r; struct rdt_domain *dom; lockdep_assert_held(&rdtgroup_mutex); - for_each_alloc_capable_rdt_resource(r) { + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + r = resctrl_arch_get_resource(i); + if (!r->alloc_capable) + continue; + list_for_each_entry(dom, &r->domains, list) memset(dom->staged_config, 0, sizeof(dom->staged_config)); } @@ -2181,6 +2186,7 @@ static int rdtgroup_mkdir_info_resdir(void *priv, char *name, static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn) { + enum resctrl_res_level i; struct resctrl_schema *s; struct rdt_resource *r; unsigned long fflags; @@ -2205,8 +2211,12 @@ static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn) goto out_destroy; } - for_each_mon_capable_rdt_resource(r) { - fflags = r->fflags | RFTYPE_MON_INFO; + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + r = resctrl_arch_get_resource(i); + if (!r->mon_capable) + continue; + + fflags = r->fflags | RFTYPE_MON_INFO; sprintf(name, "%s_MON", r->name); ret = rdtgroup_mkdir_info_resdir(r, name, fflags); if (ret) @@ -2615,10 +2625,15 @@ static int schemata_list_add(struct rdt_resource *r, enum resctrl_conf_type type static int schemata_list_create(void) { + enum resctrl_res_level i; struct rdt_resource *r; int ret = 0; - for_each_alloc_capable_rdt_resource(r) { + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + r = resctrl_arch_get_resource(i); + if (!r->alloc_capable) + continue; + if (resctrl_arch_get_cdp_enabled(r->rid)) { ret = schemata_list_add(r, CDP_CODE); if (ret) @@ -3166,6 +3181,7 @@ static int mkdir_mondata_all(struct kernfs_node *parent_kn, struct rdtgroup *prgrp, struct kernfs_node **dest_kn) { + enum resctrl_res_level i; struct rdt_resource *r; struct kernfs_node *kn; int ret; @@ -3184,7 +3200,11 @@ static int mkdir_mondata_all(struct kernfs_node *parent_kn, * Create the subdirectories for each domain. Note that all events * in a domain like L3 are grouped into a resource whose domain is L3 */ - for_each_mon_capable_rdt_resource(r) { + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + r = resctrl_arch_get_resource(i); + if (!r->mon_capable) + continue; + ret = mkdir_mondata_subdir_alldom(kn, r, prgrp); if (ret) goto out_destroy; -- Gitee From b0d6c0a9b9edb21d7e05dd2e95fb5dd7783fa749 Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 20 Mar 2019 10:26:39 +0000 Subject: [PATCH 0554/2138] x86/resctrl: Export the is_mbm_*_enabled() helpers to asm/resctrl.h ANBZ: #8626 commit 1b09c3e0ec8bc1a30b01d466b27565e06999414d morse-linux. The architecture specific parts of resctrl have helpers to hide accesses to the rdt_mon_features bitmap. Once the filesystem parts of resctrl are moved, these can no longer live in internal.h. Once these are exposed to the wider kernel, they should have a 'resctrl_arch_' prefix, to fit the rest of the arch<->fs interface. Move and rename the helpers that touch rdt_mon_features directly. is_mbm_event() and is_mbm_enabled() are only called from rdtgroup.c, so can be moved into that file. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/include/asm/resctrl.h | 17 +++++++++++ arch/x86/kernel/cpu/resctrl/core.c | 4 +-- arch/x86/kernel/cpu/resctrl/internal.h | 27 ----------------- arch/x86/kernel/cpu/resctrl/monitor.c | 18 ++++++------ arch/x86/kernel/cpu/resctrl/rdtgroup.c | 40 +++++++++++++++++--------- 5 files changed, 54 insertions(+), 52 deletions(-) diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index 5f6a5375bb4a..50407e83d0ca 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -7,6 +7,7 @@ #include #include #include +#include /* * This value can never be a valid CLOSID, and is used when mapping a @@ -43,6 +44,7 @@ DECLARE_PER_CPU(struct resctrl_pqr_state, pqr_state); extern bool rdt_alloc_capable; extern bool rdt_mon_capable; +extern unsigned int rdt_mon_features; DECLARE_STATIC_KEY_FALSE(rdt_enable_key); DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key); @@ -82,6 +84,21 @@ static inline void resctrl_arch_disable_mon(void) static_branch_dec_cpuslocked(&rdt_enable_key); } +static inline bool resctrl_arch_is_llc_occupancy_enabled(void) +{ + return (rdt_mon_features & (1 << QOS_L3_OCCUP_EVENT_ID)); +} + +static inline bool resctrl_arch_is_mbm_total_enabled(void) +{ + return (rdt_mon_features & (1 << QOS_L3_MBM_TOTAL_EVENT_ID)); +} + +static inline bool resctrl_arch_is_mbm_local_enabled(void) +{ + return (rdt_mon_features & (1 << QOS_L3_MBM_LOCAL_EVENT_ID)); +} + /* * __resctrl_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR * diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 7242d15d4806..c23d5da137cd 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -481,13 +481,13 @@ static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_domain *hw_dom) { size_t tsize; - if (is_mbm_total_enabled()) { + if (resctrl_arch_is_mbm_total_enabled()) { tsize = sizeof(*hw_dom->arch_mbm_total); hw_dom->arch_mbm_total = kcalloc(num_rmid, tsize, GFP_KERNEL); if (!hw_dom->arch_mbm_total) return -ENOMEM; } - if (is_mbm_local_enabled()) { + if (resctrl_arch_is_mbm_local_enabled()) { tsize = sizeof(*hw_dom->arch_mbm_local); hw_dom->arch_mbm_local = kcalloc(num_rmid, tsize, GFP_KERNEL); if (!hw_dom->arch_mbm_local) { diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 01fcd4ef26ca..edbccc79246f 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -130,7 +130,6 @@ struct rmid_read { void *arch_mon_ctx; }; -extern unsigned int rdt_mon_features; extern struct list_head resctrl_schema_all; extern bool resctrl_mounted; @@ -360,32 +359,6 @@ struct msr_param { u32 high; }; -static inline bool is_llc_occupancy_enabled(void) -{ - return (rdt_mon_features & (1 << QOS_L3_OCCUP_EVENT_ID)); -} - -static inline bool is_mbm_total_enabled(void) -{ - return (rdt_mon_features & (1 << QOS_L3_MBM_TOTAL_EVENT_ID)); -} - -static inline bool is_mbm_local_enabled(void) -{ - return (rdt_mon_features & (1 << QOS_L3_MBM_LOCAL_EVENT_ID)); -} - -static inline bool is_mbm_enabled(void) -{ - return (is_mbm_total_enabled() || is_mbm_local_enabled()); -} - -static inline bool is_mbm_event(int e) -{ - return (e >= QOS_L3_MBM_TOTAL_EVENT_ID && - e <= QOS_L3_MBM_LOCAL_EVENT_ID); -} - /** * struct rdt_hw_resource - arch private attributes of a resctrl resource * @r_resctrl: Attributes of the resource used directly by resctrl. diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 2a1cbd4de6ee..c9be2d0819c0 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -251,11 +251,11 @@ void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_domain *d) { struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); - if (is_mbm_total_enabled()) + if (resctrl_arch_is_mbm_total_enabled()) memset(hw_dom->arch_mbm_total, 0, sizeof(*hw_dom->arch_mbm_total) * r->num_rmid); - if (is_mbm_local_enabled()) + if (resctrl_arch_is_mbm_local_enabled()) memset(hw_dom->arch_mbm_local, 0, sizeof(*hw_dom->arch_mbm_local) * r->num_rmid); } @@ -514,7 +514,7 @@ void free_rmid(u32 closid, u32 rmid) entry = __rmid_entry(idx); - if (is_llc_occupancy_enabled()) + if (resctrl_arch_is_llc_occupancy_enabled()) add_rmid_to_limbo(entry); else list_add_tail(&entry->list, &rmid_free_lru); @@ -666,7 +666,7 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm) struct list_head *head; struct rdtgroup *entry; - if (!is_mbm_local_enabled()) + if (!resctrl_arch_is_mbm_local_enabled()) return; r_mba = resctrl_arch_get_resource(RDT_RESOURCE_MBA); @@ -735,7 +735,7 @@ static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, * This is protected from concurrent reads from user * as both the user and we hold the global mutex. */ - if (is_mbm_total_enabled()) { + if (resctrl_arch_is_mbm_total_enabled()) { rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID; rr.val = 0; rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid); @@ -749,7 +749,7 @@ static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx); } - if (is_mbm_local_enabled()) { + if (resctrl_arch_is_mbm_local_enabled()) { rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID; rr.val = 0; rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid); @@ -997,11 +997,11 @@ static void l3_mon_evt_init(struct rdt_resource *r) { INIT_LIST_HEAD(&r->evt_list); - if (is_llc_occupancy_enabled()) + if (resctrl_arch_is_llc_occupancy_enabled()) list_add_tail(&llc_occupancy_event.list, &r->evt_list); - if (is_mbm_total_enabled()) + if (resctrl_arch_is_mbm_total_enabled()) list_add_tail(&mbm_total_event.list, &r->evt_list); - if (is_mbm_local_enabled()) + if (resctrl_arch_is_mbm_local_enabled()) list_add_tail(&mbm_local_event.list, &r->evt_list); } diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 3f16e7854411..8285b916289c 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -114,6 +114,18 @@ void rdt_staged_configs_clear(void) } } +static bool resctrl_is_mbm_enabled(void) +{ + return (resctrl_arch_is_mbm_total_enabled() || + resctrl_arch_is_mbm_local_enabled()); +} + +static bool resctrl_is_mbm_event(int e) +{ + return (e >= QOS_L3_MBM_TOTAL_EVENT_ID && + e <= QOS_L3_MBM_LOCAL_EVENT_ID); +} + /* * Trivial allocator for CLOSIDs. Since h/w only supports a small number, * we can keep a bitmap of free CLOSIDs in a single integer. @@ -161,7 +173,7 @@ static int closid_alloc(void) lockdep_assert_held(&rdtgroup_mutex); if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID) && - is_llc_occupancy_enabled()) { + resctrl_arch_is_llc_occupancy_enabled()) { cleanest_closid = resctrl_find_cleanest_closid(); if (cleanest_closid < 0) return cleanest_closid; @@ -2370,7 +2382,7 @@ static bool supports_mba_mbps(void) { struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); - return (is_mbm_local_enabled() && + return (resctrl_arch_is_mbm_local_enabled() && r->alloc_capable && is_mba_linear()); } @@ -2738,7 +2750,7 @@ static int rdt_get_tree(struct fs_context *fc) if (resctrl_arch_alloc_capable() || resctrl_arch_mon_capable()) resctrl_mounted = true; - if (is_mbm_enabled()) { + if (resctrl_is_mbm_enabled()) { list_for_each_entry(dom, &l3->domains, list) mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL, RESCTRL_PICK_ANY_CPU); @@ -3107,7 +3119,7 @@ static int mkdir_mondata_subdir(struct kernfs_node *parent_kn, if (ret) goto out_destroy; - if (is_mbm_event(mevt->evtid)) + if (resctrl_is_mbm_event(mevt->evtid)) mon_event_read(&rr, r, d, prgrp, mevt->evtid, true); } kernfs_activate(kn); @@ -4006,9 +4018,9 @@ void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d) if (resctrl_mounted && resctrl_arch_mon_capable()) rmdir_mondata_subdir_allrdtgrp(r, d->id); - if (is_mbm_enabled()) + if (resctrl_is_mbm_enabled()) cancel_delayed_work(&d->mbm_over); - if (is_llc_occupancy_enabled() && has_busy_rmid(d)) { + if (resctrl_arch_is_llc_occupancy_enabled() && has_busy_rmid(d)) { /* * When a package is going down, forcefully * decrement rmid->ebusy. There is no way to know @@ -4032,12 +4044,12 @@ static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d) u32 idx_limit = resctrl_arch_system_num_rmid_idx(); size_t tsize; - if (is_llc_occupancy_enabled()) { + if (resctrl_arch_is_llc_occupancy_enabled()) { d->rmid_busy_llc = bitmap_zalloc(idx_limit, GFP_KERNEL); if (!d->rmid_busy_llc) return -ENOMEM; } - if (is_mbm_total_enabled()) { + if (resctrl_arch_is_mbm_total_enabled()) { tsize = sizeof(*d->mbm_total); d->mbm_total = kcalloc(idx_limit, tsize, GFP_KERNEL); if (!d->mbm_total) { @@ -4045,7 +4057,7 @@ static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d) return -ENOMEM; } } - if (is_mbm_local_enabled()) { + if (resctrl_arch_is_mbm_local_enabled()) { tsize = sizeof(*d->mbm_local); d->mbm_local = kcalloc(idx_limit, tsize, GFP_KERNEL); if (!d->mbm_local) { @@ -4077,13 +4089,13 @@ int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d) if (err) goto out_unlock; - if (is_mbm_enabled()) { + if (resctrl_is_mbm_enabled()) { INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow); mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL, RESCTRL_PICK_ANY_CPU); } - if (is_llc_occupancy_enabled()) + if (resctrl_arch_is_llc_occupancy_enabled()) INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo); /* @@ -4138,12 +4150,12 @@ void resctrl_offline_cpu(unsigned int cpu) d = get_domain_from_cpu(cpu, l3); if (d) { - if (is_mbm_enabled() && cpu == d->mbm_work_cpu) { + if (resctrl_is_mbm_enabled() && cpu == d->mbm_work_cpu) { cancel_delayed_work(&d->mbm_over); mbm_setup_overflow_handler(d, 0, cpu); } - if (is_llc_occupancy_enabled() && cpu == d->cqm_work_cpu && - has_busy_rmid(d)) { + if (resctrl_arch_is_llc_occupancy_enabled() && + cpu == d->cqm_work_cpu && has_busy_rmid(d)) { cancel_delayed_work(&d->cqm_limbo); cqm_setup_limbo_handler(d, 0, cpu); } -- Gitee From 5ef6e6319f0aa54635a1f421f00085207391cd85 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 7 Mar 2023 16:11:32 +0000 Subject: [PATCH 0555/2138] x86/resctrl: Add resctrl_arch_is_evt_configurable() to abstract BMEC ANBZ: #8626 commit 9adbcdc241c348ff91aad916cdc12aa3dd45dbd7 morse-linux. When BMEC is supported the resctrl event can be configured in a number of ways. This depends on architecture support. rdt_get_mon_l3_config() modifies the struct mon_evt and calls mbm_config_rftype_init() to create the files that allow the configuration. Splitting this into separate architecture and filesystem parts would require the struct mon_evt and mbm_config_rftype_init() to be exposed. Instead, add resctrl_arch_is_evt_configurable(), and use this from resctrl_mon_resource_init() to initialise struct mon_evt and call mbm_config_rftype_init(). resctrl_arch_is_evt_configurable() calls rdt_cpu_has() so it doesn't obviously benefit from being inlined. Putting it in core.c will allow rdt_cpu_has() to eventually become static. resctrl_arch_is_evt_configurable() uses rdt_cpu_has() from resctrl_mon_resource_init(), which isn't marked __init. In addition, MPAM needs to initialise resctrl late. Drop the __init on the relevant functions. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/core.c | 19 +++++++++++++++++-- arch/x86/kernel/cpu/resctrl/internal.h | 4 ++-- arch/x86/kernel/cpu/resctrl/monitor.c | 18 +++++++++--------- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 2 +- include/linux/resctrl.h | 2 ++ 5 files changed, 31 insertions(+), 14 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index c23d5da137cd..a320e3d1c2c9 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -667,7 +667,7 @@ struct rdt_options { bool force_off, force_on; }; -static struct rdt_options rdt_options[] __initdata = { +static struct rdt_options rdt_options[] __ro_after_init = { RDT_OPT(RDT_FLAG_CMT, "cmt", X86_FEATURE_CQM_OCCUP_LLC), RDT_OPT(RDT_FLAG_MBM_TOTAL, "mbmtotal", X86_FEATURE_CQM_MBM_TOTAL), RDT_OPT(RDT_FLAG_MBM_LOCAL, "mbmlocal", X86_FEATURE_CQM_MBM_LOCAL), @@ -707,7 +707,7 @@ static int __init set_rdt_options(char *str) } __setup("rdt", set_rdt_options); -bool __init rdt_cpu_has(int flag) +bool rdt_cpu_has(int flag) { bool ret = boot_cpu_has(flag); struct rdt_options *o; @@ -727,6 +727,21 @@ bool __init rdt_cpu_has(int flag) return ret; } +bool resctrl_arch_is_evt_configurable(enum resctrl_event_id evt) +{ + if (!rdt_cpu_has(X86_FEATURE_BMEC)) + return false; + + switch (evt) { + case QOS_L3_MBM_TOTAL_EVENT_ID: + return rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL); + case QOS_L3_MBM_LOCAL_EVENT_ID: + return rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL); + default: + return false; + } +} + static __init bool get_mem_config(void) { struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_MBA]; diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index edbccc79246f..46370eafb00f 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -507,7 +507,7 @@ int alloc_rmid(u32 closid); void free_rmid(u32 closid, u32 rmid); int rdt_get_mon_l3_config(struct rdt_resource *r); void __exit resctrl_mon_resource_exit(void); -bool __init rdt_cpu_has(int flag); +bool rdt_cpu_has(int flag); void mon_event_count(void *info); int rdtgroup_mondata_show(struct seq_file *m, void *arg); void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, @@ -527,7 +527,7 @@ bool has_busy_rmid(struct rdt_domain *d); void __check_limbo(struct rdt_domain *d, bool force_free); void rdt_domain_reconfigure_cdp(struct rdt_resource *r); void __init thread_throttle_mode_init(void); -void __init mbm_config_rftype_init(const char *config); +void mbm_config_rftype_init(const char *config); void rdt_staged_configs_clear(void); bool closid_allocated(unsigned int closid); int resctrl_find_cleanest_closid(void); diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index c9be2d0819c0..ccb85c61b43b 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -1019,6 +1019,15 @@ int resctrl_mon_resource_init(void) l3_mon_evt_init(r); + if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_TOTAL_EVENT_ID)) { + mbm_total_event.configurable = true; + mbm_config_rftype_init("mbm_total_bytes_config"); + } + if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_LOCAL_EVENT_ID)) { + mbm_local_event.configurable = true; + mbm_config_rftype_init("mbm_local_bytes_config"); + } + return 0; } @@ -1060,15 +1069,6 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r) /* Detect list of bandwidth sources that can be tracked */ cpuid_count(0x80000020, 3, &eax, &ebx, &ecx, &edx); hw_res->mbm_cfg_mask = ecx & MAX_EVT_CONFIG_BITS; - - if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL)) { - mbm_total_event.configurable = true; - mbm_config_rftype_init("mbm_total_bytes_config"); - } - if (rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL)) { - mbm_local_event.configurable = true; - mbm_config_rftype_init("mbm_local_bytes_config"); - } } r->mon_capable = true; diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 8285b916289c..2d6f4e0d3656 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -2068,7 +2068,7 @@ void __init thread_throttle_mode_init(void) rft->fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB; } -void __init mbm_config_rftype_init(const char *config) +void mbm_config_rftype_init(const char *config) { struct rftype *rft; diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index b0ee7256e095..bfc63e8219e5 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -204,6 +204,8 @@ u32 resctrl_arch_get_num_closid(struct rdt_resource *r); struct rdt_domain *resctrl_arch_find_domain(struct rdt_resource *r, int id); int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid); +bool resctrl_arch_is_evt_configurable(enum resctrl_event_id evt); + /* * Update the ctrl_val and apply this config right now. * Must be called on one of the domain's CPUs. -- Gitee From f4d74fc073f1c2ffd5550ace8a49c6410c5969c4 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 7 Mar 2023 16:47:42 +0000 Subject: [PATCH 0556/2138] x86/resctrl: Change mon_event_config_{read,write}() to be arch helpers ANBZ: #8626 commit 297671c1f7c0c747a2ca4d007b3284f100dd0890 morse-linux. mon_event_config_{read,write}() are called via IPI and access model specific registers to do their work. To support another architecture, this needs abstracting. Rename mon_event_config_{read,write}() to have a resctrl_arch_ prefix, and move their struct mon_config_info parameter into the restrl_types header. This allows another architecture to supply an implementation of these. As struct mon_config_info is now exposed globally, give it a 'resctrl_' prefix. MPAM systems need access to the domain to do this work, add the resource and domain to struct resctrl_mon_config_info. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 34 +++++++++++++------------- include/linux/resctrl.h | 9 +++++++ 2 files changed, 26 insertions(+), 17 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 2d6f4e0d3656..e76018687117 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -1580,11 +1580,6 @@ static int rdtgroup_size_show(struct kernfs_open_file *of, return ret; } -struct mon_config_info { - u32 evtid; - u32 mon_config; -}; - #define INVALID_CONFIG_INDEX UINT_MAX /** @@ -1609,9 +1604,9 @@ static inline unsigned int mon_event_config_index_get(u32 evtid) } } -static void mon_event_config_read(void *info) +void resctrl_arch_mon_event_config_read(void *info) { - struct mon_config_info *mon_info = info; + struct resctrl_mon_config_info *mon_info = info; unsigned int index; u64 msrval; @@ -1626,14 +1621,15 @@ static void mon_event_config_read(void *info) mon_info->mon_config = msrval & MAX_EVT_CONFIG_BITS; } -static void mondata_config_read(struct rdt_domain *d, struct mon_config_info *mon_info) +static void mondata_config_read(struct resctrl_mon_config_info *mon_info) { - smp_call_function_any(&d->cpu_mask, mon_event_config_read, mon_info, 1); + smp_call_function_any(&mon_info->d->cpu_mask, + resctrl_arch_mon_event_config_read, mon_info, 1); } static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid) { - struct mon_config_info mon_info = {0}; + struct resctrl_mon_config_info mon_info = {0}; struct rdt_domain *dom; bool sep = false; @@ -1644,9 +1640,11 @@ static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid if (sep) seq_puts(s, ";"); - memset(&mon_info, 0, sizeof(struct mon_config_info)); + memset(&mon_info, 0, sizeof(struct resctrl_mon_config_info)); + mon_info.r = r; + mon_info.d = dom; mon_info.evtid = evtid; - mondata_config_read(dom, &mon_info); + mondata_config_read(&mon_info); seq_printf(s, "%d=0x%02x", dom->id, mon_info.mon_config); sep = true; @@ -1679,9 +1677,9 @@ static int mbm_local_bytes_config_show(struct kernfs_open_file *of, return 0; } -static void mon_event_config_write(void *info) +void resctrl_arch_mon_event_config_write(void *info) { - struct mon_config_info *mon_info = info; + struct resctrl_mon_config_info *mon_info = info; unsigned int index; index = mon_event_config_index_get(mon_info->evtid); @@ -1695,14 +1693,16 @@ static void mon_event_config_write(void *info) static void mbm_config_write_domain(struct rdt_resource *r, struct rdt_domain *d, u32 evtid, u32 val) { - struct mon_config_info mon_info = {0}; + struct resctrl_mon_config_info mon_info = {0}; /* * Read the current config value first. If both are the same then * no need to write it again. */ + mon_info.r = r; + mon_info.d = d; mon_info.evtid = evtid; - mondata_config_read(d, &mon_info); + mondata_config_read(&mon_info); if (mon_info.mon_config == val) return; @@ -1714,7 +1714,7 @@ static void mbm_config_write_domain(struct rdt_resource *r, * are scoped at the domain level. Writing any of these MSRs * on one CPU is observed by all the CPUs in the domain. */ - smp_call_function_any(&d->cpu_mask, mon_event_config_write, + smp_call_function_any(&d->cpu_mask, resctrl_arch_mon_event_config_write, &mon_info, 1); /* diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index bfc63e8219e5..975b80102fbe 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -192,6 +192,13 @@ struct resctrl_cpu_sync { u32 rmid; }; +struct resctrl_mon_config_info { + struct rdt_resource *r; + struct rdt_domain *d; + u32 evtid; + u32 mon_config; +}; + /* * Update and re-load this CPUs defaults. Called via IPI, takes a pointer to * struct resctrl_cpu_sync, or NULL. @@ -205,6 +212,8 @@ struct rdt_domain *resctrl_arch_find_domain(struct rdt_resource *r, int id); int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid); bool resctrl_arch_is_evt_configurable(enum resctrl_event_id evt); +void resctrl_arch_mon_event_config_write(void *info); +void resctrl_arch_mon_event_config_read(void *info); /* * Update the ctrl_val and apply this config right now. -- Gitee From 032ab95e24c47713c70a06de59e3470605799476 Mon Sep 17 00:00:00 2001 From: James Morse Date: Mon, 12 Feb 2024 18:36:57 +0000 Subject: [PATCH 0557/2138] x86/resctrl: Move mbm_cfg_mask to struct rdt_resource ANBZ: #8626 commit d4c4b30510913f35145f2bf03b3fc888dae9dc2e morse-linux. The mbm_cfg_mask field lists the bits that user-space can set when configuring an event. This value is output via the last_cmd_status file. Once the filesystem parts of resctrl are moved to live in /fs/, the struct rdt_hw_resource is inaccessible to the filesystem code. Because this value is output to user-space, it has to be accessible to the filesystem code. Move it to struct rdt_resource. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/internal.h | 3 --- arch/x86/kernel/cpu/resctrl/monitor.c | 2 +- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 5 ++--- include/linux/resctrl.h | 3 +++ 4 files changed, 6 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 46370eafb00f..238b81d3f64a 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -371,8 +371,6 @@ struct msr_param { * @msr_update: Function pointer to update QOS MSRs * @mon_scale: cqm counter * mon_scale = occupancy in bytes * @mbm_width: Monitor width, to detect and correct for overflow. - * @mbm_cfg_mask: Bandwidth sources that can be tracked when Bandwidth - * Monitoring Event Configuration (BMEC) is supported. * @cdp_enabled: CDP state of this resource * * Members of this structure are either private to the architecture @@ -387,7 +385,6 @@ struct rdt_hw_resource { struct rdt_resource *r); unsigned int mon_scale; unsigned int mbm_width; - unsigned int mbm_cfg_mask; bool cdp_enabled; }; diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index ccb85c61b43b..287fb0a5f060 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -1068,7 +1068,7 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r) /* Detect list of bandwidth sources that can be tracked */ cpuid_count(0x80000020, 3, &eax, &ebx, &ecx, &edx); - hw_res->mbm_cfg_mask = ecx & MAX_EVT_CONFIG_BITS; + r->mbm_cfg_mask = ecx & MAX_EVT_CONFIG_BITS; } r->mon_capable = true; diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index e76018687117..3d3a839eba6b 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -1731,7 +1731,6 @@ static void mbm_config_write_domain(struct rdt_resource *r, static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid) { - struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); char *dom_str = NULL, *id_str; unsigned long dom_id, val; struct rdt_domain *d; @@ -1758,9 +1757,9 @@ static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid) } /* Value from user cannot be more than the supported set of events */ - if ((val & hw_res->mbm_cfg_mask) != val) { + if ((val & r->mbm_cfg_mask) != val) { rdt_last_cmd_printf("Invalid event configuration: max valid mask is 0x%02x\n", - hw_res->mbm_cfg_mask); + r->mbm_cfg_mask); return -EINVAL; } diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 975b80102fbe..8a7367d1ce45 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -140,6 +140,8 @@ struct resctrl_membw { * @format_str: Per resource format string to show domain value * @evt_list: List of monitoring events * @fflags: flags to choose base and info files + * @mbm_cfg_mask: Bandwidth sources that can be tracked when Bandwidth + * Monitoring Event Configuration (BMEC) is supported. * @cdp_capable: Is the CDP feature available on this resource */ struct rdt_resource { @@ -157,6 +159,7 @@ struct rdt_resource { const char *format_str; struct list_head evt_list; unsigned long fflags; + unsigned int mbm_cfg_mask; bool cdp_capable; }; -- Gitee From 08c36ef68c9e52b0099728a0f04eda94cbeebe40 Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 9 Mar 2023 16:47:38 +0000 Subject: [PATCH 0558/2138] x86/resctrl: Allow resctrl_arch_mon_event_config_write() to return an error ANBZ: #8626 commit e51dc33bababe3f5ebfeb622076ac89b6d89332e morse-linux. resctrl_arch_mon_event_config_write() writes a bitmap of events provided by user-space into the configuration register for the monitors. This assumes that all architectures support all the features each bit corresponds to. MPAM can filter monitors based on read, write, or both, but there are many more options in the existing bitmap. To allow this interface to work for machines with MPAM, allow the architecture helper to return an error if an incompatible bitmap is set. When valid values are provided, there is no change in behaviour. If an invalid value is provided, currently it is silently ignored, but last_cmd_status is updated. After this change, the parser will stop at the first invalid value and return an error to user-space. This matches the way changes to the schemata file are made. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 20 ++++++++++++++++---- include/linux/resctrl.h | 1 + 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 3d3a839eba6b..56a0bfdc11f7 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -1685,13 +1685,16 @@ void resctrl_arch_mon_event_config_write(void *info) index = mon_event_config_index_get(mon_info->evtid); if (index == INVALID_CONFIG_INDEX) { pr_warn_once("Invalid event id %d\n", mon_info->evtid); + mon_info->err = -EINVAL; return; } wrmsr(MSR_IA32_EVT_CFG_BASE + index, mon_info->mon_config, 0); + + mon_info->err = 0; } -static void mbm_config_write_domain(struct rdt_resource *r, - struct rdt_domain *d, u32 evtid, u32 val) +static int mbm_config_write_domain(struct rdt_resource *r, + struct rdt_domain *d, u32 evtid, u32 val) { struct resctrl_mon_config_info mon_info = {0}; @@ -1704,7 +1707,7 @@ static void mbm_config_write_domain(struct rdt_resource *r, mon_info.evtid = evtid; mondata_config_read(&mon_info); if (mon_info.mon_config == val) - return; + return 0; mon_info.mon_config = val; @@ -1716,6 +1719,10 @@ static void mbm_config_write_domain(struct rdt_resource *r, */ smp_call_function_any(&d->cpu_mask, resctrl_arch_mon_event_config_write, &mon_info, 1); + if (mon_info.err) { + rdt_last_cmd_puts("Invalid event configuration\n"); + return mon_info.err; + } /* * When an Event Configuration is changed, the bandwidth counters @@ -1727,6 +1734,8 @@ static void mbm_config_write_domain(struct rdt_resource *r, * mbm_local and mbm_total counts for all the RMIDs. */ resctrl_arch_reset_rmid_all(r, d); + + return 0; } static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid) @@ -1734,6 +1743,7 @@ static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid) char *dom_str = NULL, *id_str; unsigned long dom_id, val; struct rdt_domain *d; + int err; /* Walking r->domains, ensure it can't race with cpuhp */ lockdep_assert_cpus_held(); @@ -1765,7 +1775,9 @@ static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid) list_for_each_entry(d, &r->domains, list) { if (d->id == dom_id) { - mbm_config_write_domain(r, d, evtid, val); + err = mbm_config_write_domain(r, d, evtid, val); + if (err) + return err; goto next; } } diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 8a7367d1ce45..6705d7960dfd 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -200,6 +200,7 @@ struct resctrl_mon_config_info { struct rdt_domain *d; u32 evtid; u32 mon_config; + int err; }; /* -- Gitee From 0c3a3f0b2abbcc92ddc7d53c264149bcf9358865 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 19 Mar 2024 15:42:33 +0000 Subject: [PATCH 0559/2138] x86/resctrl: Add resctrl_arch_ prefix to pseudo lock functions ANBZ: #8626 commit 9166173fea78964e579f1d2beafaed1498fba820 morse-linux. resctrl's pseudo lock has some copy-to-cache and measurement functions that are micro-architecture specific. pseudo_lock_fn() is not at all portable. Label these 'resctrl_arch_' so they stay under /arch/x86. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/include/asm/resctrl.h | 5 ++++ arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 36 ++++++++++++----------- 2 files changed, 24 insertions(+), 17 deletions(-) diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index 50407e83d0ca..a88af68f9fe2 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -211,6 +211,11 @@ static inline void *resctrl_arch_mon_ctx_alloc(struct rdt_resource *r, int evtid static inline void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid, void *ctx) { }; +u64 resctrl_arch_get_prefetch_disable_bits(void); +int resctrl_arch_pseudo_lock_fn(void *_rdtgrp); +int resctrl_arch_measure_cycles_lat_fn(void *_plr); +int resctrl_arch_measure_l2_residency(void *_plr); +int resctrl_arch_measure_l3_residency(void *_plr); void resctrl_cpu_detect(struct cpuinfo_x86 *c); #else diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index f2315a50ea4f..856beb6f668b 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -62,7 +62,8 @@ static const struct class pseudo_lock_class = { }; /** - * get_prefetch_disable_bits - prefetch disable bits of supported platforms + * resctrl_arch_get_prefetch_disable_bits - prefetch disable bits of supported + * platforms * @void: It takes no parameters. * * Capture the list of platforms that have been validated to support @@ -76,13 +77,13 @@ static const struct class pseudo_lock_class = { * in the SDM. * * When adding a platform here also add support for its cache events to - * measure_cycles_perf_fn() + * resctrl_arch_measure_l*_residency() * * Return: * If platform is supported, the bits to disable hardware prefetchers, 0 * if platform is not supported. */ -static u64 get_prefetch_disable_bits(void) +u64 resctrl_arch_get_prefetch_disable_bits(void) { if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || boot_cpu_data.x86 != 6) @@ -410,7 +411,7 @@ static void pseudo_lock_free(struct rdtgroup *rdtgrp) } /** - * pseudo_lock_fn - Load kernel memory into cache + * resctrl_arch_pseudo_lock_fn - Load kernel memory into cache * @_rdtgrp: resource group to which pseudo-lock region belongs * * This is the core pseudo-locking flow. @@ -428,7 +429,7 @@ static void pseudo_lock_free(struct rdtgroup *rdtgrp) * * Return: 0. Waiter on waitqueue will be woken on completion. */ -static int pseudo_lock_fn(void *_rdtgrp) +int resctrl_arch_pseudo_lock_fn(void *_rdtgrp) { struct rdtgroup *rdtgrp = _rdtgrp; struct pseudo_lock_region *plr = rdtgrp->plr; @@ -714,7 +715,7 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) * Not knowing the bits to disable prefetching implies that this * platform does not support Cache Pseudo-Locking. */ - prefetch_disable_bits = get_prefetch_disable_bits(); + prefetch_disable_bits = resctrl_arch_get_prefetch_disable_bits(); if (prefetch_disable_bits == 0) { rdt_last_cmd_puts("Pseudo-locking not supported\n"); return -EINVAL; @@ -879,7 +880,8 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) } /** - * measure_cycles_lat_fn - Measure cycle latency to read pseudo-locked memory + * resctrl_arch_measure_cycles_lat_fn - Measure cycle latency to read + * pseudo-locked memory * @_plr: pseudo-lock region to measure * * There is no deterministic way to test if a memory region is cached. One @@ -892,7 +894,7 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) * * Return: 0. Waiter on waitqueue will be woken on completion. */ -static int measure_cycles_lat_fn(void *_plr) +int resctrl_arch_measure_cycles_lat_fn(void *_plr) { struct pseudo_lock_region *plr = _plr; u32 saved_low, saved_high; @@ -1076,7 +1078,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr, return 0; } -static int measure_l2_residency(void *_plr) +int resctrl_arch_measure_l2_residency(void *_plr) { struct pseudo_lock_region *plr = _plr; struct residency_counts counts = {0}; @@ -1114,7 +1116,7 @@ static int measure_l2_residency(void *_plr) return 0; } -static int measure_l3_residency(void *_plr) +int resctrl_arch_measure_l3_residency(void *_plr) { struct pseudo_lock_region *plr = _plr; struct residency_counts counts = {0}; @@ -1212,18 +1214,18 @@ static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel) plr->cpu = cpu; if (sel == 1) - thread = kthread_create_on_node(measure_cycles_lat_fn, plr, - cpu_to_node(cpu), + thread = kthread_create_on_node(resctrl_arch_measure_cycles_lat_fn, + plr, cpu_to_node(cpu), "pseudo_lock_measure/%u", cpu); else if (sel == 2) - thread = kthread_create_on_node(measure_l2_residency, plr, - cpu_to_node(cpu), + thread = kthread_create_on_node(resctrl_arch_measure_l2_residency, + plr, cpu_to_node(cpu), "pseudo_lock_measure/%u", cpu); else if (sel == 3) - thread = kthread_create_on_node(measure_l3_residency, plr, - cpu_to_node(cpu), + thread = kthread_create_on_node(resctrl_arch_measure_l3_residency, + plr, cpu_to_node(cpu), "pseudo_lock_measure/%u", cpu); else @@ -1322,7 +1324,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) plr->thread_done = 0; - thread = kthread_create_on_node(pseudo_lock_fn, rdtgrp, + thread = kthread_create_on_node(resctrl_arch_pseudo_lock_fn, rdtgrp, cpu_to_node(plr->cpu), "pseudo_lock/%u", plr->cpu); if (IS_ERR(thread)) { -- Gitee From ed48d7e7ac83766e56078b6dc35d53e1e84d699b Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 19 Mar 2024 15:48:31 +0000 Subject: [PATCH 0560/2138] x86/resctrl: Allow an architecture to disable pseudo lock ANBZ: #8626 commit 508f630578e16a0318d333863b4d37fd5b65e3c5 morse-linux. Pseudo-lock relies on knowledge of the micro-architecture to disable prefetchers etc. On arm64 these controls are typically secure only, meaning linux can't access them. Arm's cache-lockdown feature works in a very different way. Resctrl's pseudo-lock isn't going to be used on arm64 platforms. Add a Kconfig symbol that can be selected by the architecture. This enables or disables building of the psuedo_lock.c file, and replaces the functions with stubs. An additional IS_ENABLED() check is needed in rdtgroup_mode_write() so that attempting to enable pseudo-lock reports an "Unknown or unsupported mode" to user-space. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/Kconfig | 7 ++++ arch/x86/kernel/cpu/resctrl/Makefile | 5 +-- arch/x86/kernel/cpu/resctrl/internal.h | 48 +++++++++++++++++++++----- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 3 +- 4 files changed, 52 insertions(+), 11 deletions(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index f841dba33e14..52b4847616f4 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -482,6 +482,7 @@ config X86_CPU_RESCTRL depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD) select KERNFS select PROC_CPU_RESCTRL if PROC_FS + select RESCTRL_FS_PSEUDO_LOCK help Enable x86 CPU resource control support. @@ -498,6 +499,12 @@ config X86_CPU_RESCTRL Say N if unsure. +config RESCTRL_FS_PSEUDO_LOCK + bool + help + Software mechanism to pin data in a cache portion using + micro-architecture specific knowledge. + if X86_32 config X86_BIGSMP bool "Support for big SMP systems with more than 8 CPUs" diff --git a/arch/x86/kernel/cpu/resctrl/Makefile b/arch/x86/kernel/cpu/resctrl/Makefile index 4a06c37b9cf1..0c13b0befd8a 100644 --- a/arch/x86/kernel/cpu/resctrl/Makefile +++ b/arch/x86/kernel/cpu/resctrl/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_X86_CPU_RESCTRL) += core.o rdtgroup.o monitor.o -obj-$(CONFIG_X86_CPU_RESCTRL) += ctrlmondata.o pseudo_lock.o +obj-$(CONFIG_X86_CPU_RESCTRL) += core.o rdtgroup.o monitor.o +obj-$(CONFIG_X86_CPU_RESCTRL) += ctrlmondata.o +obj-$(CONFIG_RESCTRL_FS_PSEUDO_LOCK) += pseudo_lock.o CFLAGS_pseudo_lock.o = -I$(src) diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 238b81d3f64a..d6db15839dc4 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -489,14 +489,6 @@ unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d, unsigned long cbm); enum rdtgrp_mode rdtgroup_mode_by_closid(int closid); int rdtgroup_tasks_assigned(struct rdtgroup *r); -int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp); -int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp); -bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm); -bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d); -int rdt_pseudo_lock_init(void); -void rdt_pseudo_lock_release(void); -int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp); -void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp); struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r); int closids_supported(void); void closid_free(int closid); @@ -529,4 +521,44 @@ void rdt_staged_configs_clear(void); bool closid_allocated(unsigned int closid); int resctrl_find_cleanest_closid(void); +#ifdef CONFIG_RESCTRL_FS_PSEUDO_LOCK +int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp); +int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp); +bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm); +bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d); +int rdt_pseudo_lock_init(void); +void rdt_pseudo_lock_release(void); +int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp); +void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp); +#else +static inline int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) +{ + return -EOPNOTSUPP; +} + +static inline int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp) +{ + return -EOPNOTSUPP; +} + +static inline bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm) +{ + return false; +} + +static inline bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) +{ + return false; +} + +static inline int rdt_pseudo_lock_init(void) { return 0; } +static inline void rdt_pseudo_lock_release(void) { } +static inline int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) +{ + return -EOPNOTSUPP; +} + +static inline void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp) { } +#endif /* CONFIG_RESCTRL_FS_PSEUDO_LOCK */ + #endif /* _ASM_X86_RESCTRL_INTERNAL_H */ diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 56a0bfdc11f7..9275d6f8a74e 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -1452,7 +1452,8 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, goto out; } rdtgrp->mode = RDT_MODE_EXCLUSIVE; - } else if (!strcmp(buf, "pseudo-locksetup")) { + } else if (IS_ENABLED(CONFIG_RESCTRL_FS_PSEUDO_LOCK) && + !strcmp(buf, "pseudo-locksetup")) { ret = rdtgroup_locksetup_enter(rdtgrp); if (ret) goto out; -- Gitee From 06784911671e6ccd6a992f238aae4079a1b0a559 Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 20 Mar 2019 11:35:14 +0000 Subject: [PATCH 0561/2138] x86/resctrl: Make prefetch_disable_bits belong to the arch code ANBZ: #8626 commit 53e04c1d92d1c2bd575a8989c82c909af4a1056e morse-linux. prefetch_disable_bits is set by rdtgroup_locksetup_enter() from a value provided by the architecture, but is largely read by other architecture helpers. Instead of exporting this value, make resctrl_arch_get_prefetch_disable_bits() set it so that the other arch-code helpers can use the cached-value. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index 856beb6f668b..5a66e3b2c2ea 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -85,6 +85,8 @@ static const struct class pseudo_lock_class = { */ u64 resctrl_arch_get_prefetch_disable_bits(void) { + prefetch_disable_bits = 0; + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || boot_cpu_data.x86 != 6) return 0; @@ -100,7 +102,8 @@ u64 resctrl_arch_get_prefetch_disable_bits(void) * 3 DCU IP Prefetcher Disable (R/W) * 63:4 Reserved */ - return 0xF; + prefetch_disable_bits = 0xF; + break; case INTEL_FAM6_ATOM_GOLDMONT: case INTEL_FAM6_ATOM_GOLDMONT_PLUS: /* @@ -111,10 +114,11 @@ u64 resctrl_arch_get_prefetch_disable_bits(void) * 2 DCU Hardware Prefetcher Disable (R/W) * 63:3 Reserved */ - return 0x5; + prefetch_disable_bits = 0x5; + break; } - return 0; + return prefetch_disable_bits; } /** @@ -715,8 +719,7 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) * Not knowing the bits to disable prefetching implies that this * platform does not support Cache Pseudo-Locking. */ - prefetch_disable_bits = resctrl_arch_get_prefetch_disable_bits(); - if (prefetch_disable_bits == 0) { + if (resctrl_arch_get_prefetch_disable_bits() == 0) { rdt_last_cmd_puts("Pseudo-locking not supported\n"); return -EINVAL; } -- Gitee From 3a3dfe249a30664faa961279e80d9a090bc827be Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 20 Mar 2019 17:17:15 +0000 Subject: [PATCH 0562/2138] x86/resctrl: Make resctrl_arch_pseudo_lock_fn() take a plr ANBZ: #8626 commit 02cf773e40a6f631bf3f37992d7ffb6f86ca2f4b morse-linux. resctrl_arch_pseudo_lock_fn() has architecture specific behaviour, and takes a struct rdtgroup as an argument. After the filesystem code moves to /fs/, the definition of struct rdtgroup will not be available to the architecture code. The only reason resctrl_arch_pseudo_lock_fn() wants the rdtgroup is for the CLOSID. Embed that in the pseudo_lock_region as a hw_closid, and move the definition of struct pseudo_lock_region to resctrl.h. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/include/asm/resctrl.h | 2 +- arch/x86/kernel/cpu/resctrl/internal.h | 37 --------------------- arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 13 ++++---- include/linux/resctrl.h | 39 +++++++++++++++++++++++ 4 files changed, 47 insertions(+), 44 deletions(-) diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index a88af68f9fe2..9940398e367e 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -212,7 +212,7 @@ static inline void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid, void *ctx) { }; u64 resctrl_arch_get_prefetch_disable_bits(void); -int resctrl_arch_pseudo_lock_fn(void *_rdtgrp); +int resctrl_arch_pseudo_lock_fn(void *_plr); int resctrl_arch_measure_cycles_lat_fn(void *_plr); int resctrl_arch_measure_l2_residency(void *_plr); int resctrl_arch_measure_l3_residency(void *_plr); diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index d6db15839dc4..be4e8f31b127 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -182,43 +182,6 @@ struct mongroup { u32 rmid; }; -/** - * struct pseudo_lock_region - pseudo-lock region information - * @s: Resctrl schema for the resource to which this - * pseudo-locked region belongs - * @d: RDT domain to which this pseudo-locked region - * belongs - * @cbm: bitmask of the pseudo-locked region - * @lock_thread_wq: waitqueue used to wait on the pseudo-locking thread - * completion - * @thread_done: variable used by waitqueue to test if pseudo-locking - * thread completed - * @cpu: core associated with the cache on which the setup code - * will be run - * @line_size: size of the cache lines - * @size: size of pseudo-locked region in bytes - * @kmem: the kernel memory associated with pseudo-locked region - * @minor: minor number of character device associated with this - * region - * @debugfs_dir: pointer to this region's directory in the debugfs - * filesystem - * @pm_reqs: Power management QoS requests related to this region - */ -struct pseudo_lock_region { - struct resctrl_schema *s; - struct rdt_domain *d; - u32 cbm; - wait_queue_head_t lock_thread_wq; - int thread_done; - int cpu; - unsigned int line_size; - unsigned int size; - void *kmem; - unsigned int minor; - struct dentry *debugfs_dir; - struct list_head pm_reqs; -}; - /** * struct rdtgroup - store rdtgroup's data in resctrl file system. * @kn: kernfs node diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index 5a66e3b2c2ea..ba51ab1f70e6 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -416,7 +416,7 @@ static void pseudo_lock_free(struct rdtgroup *rdtgrp) /** * resctrl_arch_pseudo_lock_fn - Load kernel memory into cache - * @_rdtgrp: resource group to which pseudo-lock region belongs + * @_plr: the pseudo-lock region descriptor * * This is the core pseudo-locking flow. * @@ -433,10 +433,9 @@ static void pseudo_lock_free(struct rdtgroup *rdtgrp) * * Return: 0. Waiter on waitqueue will be woken on completion. */ -int resctrl_arch_pseudo_lock_fn(void *_rdtgrp) +int resctrl_arch_pseudo_lock_fn(void *_plr) { - struct rdtgroup *rdtgrp = _rdtgrp; - struct pseudo_lock_region *plr = rdtgrp->plr; + struct pseudo_lock_region *plr = _plr; u32 rmid_p, closid_p; unsigned long i; u64 saved_msr; @@ -496,7 +495,8 @@ int resctrl_arch_pseudo_lock_fn(void *_rdtgrp) * pseudo-locked followed by reading of kernel memory to load it * into the cache. */ - __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, rdtgrp->closid); + __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, plr->closid); + /* * Cache was flushed earlier. Now access kernel memory to read it * into cache region associated with just activated plr->closid. @@ -1327,7 +1327,8 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) plr->thread_done = 0; - thread = kthread_create_on_node(resctrl_arch_pseudo_lock_fn, rdtgrp, + plr->closid = rdtgrp->closid; + thread = kthread_create_on_node(resctrl_arch_pseudo_lock_fn, plr, cpu_to_node(plr->cpu), "pseudo_lock/%u", plr->cpu); if (IS_ERR(thread)) { diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 6705d7960dfd..3de5bc63ace0 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -25,6 +25,45 @@ int proc_resctrl_show(struct seq_file *m, /* max value for struct rdt_domain's mbps_val */ #define MBA_MAX_MBPS U32_MAX +/** + * struct pseudo_lock_region - pseudo-lock region information + * @s: Resctrl schema for the resource to which this + * pseudo-locked region belongs + * @closid: The closid that this pseudo-locked region uses + * @d: RDT domain to which this pseudo-locked region + * belongs + * @cbm: bitmask of the pseudo-locked region + * @lock_thread_wq: waitqueue used to wait on the pseudo-locking thread + * completion + * @thread_done: variable used by waitqueue to test if pseudo-locking + * thread completed + * @cpu: core associated with the cache on which the setup code + * will be run + * @line_size: size of the cache lines + * @size: size of pseudo-locked region in bytes + * @kmem: the kernel memory associated with pseudo-locked region + * @minor: minor number of character device associated with this + * region + * @debugfs_dir: pointer to this region's directory in the debugfs + * filesystem + * @pm_reqs: Power management QoS requests related to this region + */ +struct pseudo_lock_region { + struct resctrl_schema *s; + u32 closid; + struct rdt_domain *d; + u32 cbm; + wait_queue_head_t lock_thread_wq; + int thread_done; + int cpu; + unsigned int line_size; + unsigned int size; + void *kmem; + unsigned int minor; + struct dentry *debugfs_dir; + struct list_head pm_reqs; +}; + /** * struct resctrl_staged_config - parsed configuration to be applied * @new_ctrl: new ctrl value to be loaded -- Gitee From e3423e69e2462b248094048098b10f25d8b98654 Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 2 Dec 2021 17:22:12 +0000 Subject: [PATCH 0563/2138] x86/resctrl: Move thread_throttle_mode_init() to be managed by resctrl ANBZ: #8626 commit 9dcfdbe39fb9ec2d4506330aed65686e9a62dc9d morse-linux. thread_throttle_mode_init() is called from the architecture specific code to make the 'thread_throttle_mode' file visible. The architecture specific code has already set the membw.throttle_mode in the rdt_resource. This doesn't need to be specific to the architecture, the throttle_mode can be used by resctrl to determine if the 'thread_throttle_mode' file should be visible. Call thread_throttle_mode_init() from resctrl_setup(), check the membw.throttle_mode on the MBA resource. This avoids publishing an extra function between the architecture and filesystem code. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/core.c | 1 - arch/x86/kernel/cpu/resctrl/internal.h | 1 - arch/x86/kernel/cpu/resctrl/rdtgroup.c | 9 ++++++++- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index a320e3d1c2c9..1aa6f8244cd1 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -227,7 +227,6 @@ static __init bool __get_mem_config_intel(struct rdt_resource *r) r->membw.throttle_mode = THREAD_THROTTLE_PER_THREAD; else r->membw.throttle_mode = THREAD_THROTTLE_MAX; - thread_throttle_mode_init(); r->alloc_capable = true; diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index be4e8f31b127..e849d4407769 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -478,7 +478,6 @@ void cqm_handle_limbo(struct work_struct *work); bool has_busy_rmid(struct rdt_domain *d); void __check_limbo(struct rdt_domain *d, bool force_free); void rdt_domain_reconfigure_cdp(struct rdt_resource *r); -void __init thread_throttle_mode_init(void); void mbm_config_rftype_init(const char *config); void rdt_staged_configs_clear(void); bool closid_allocated(unsigned int closid); diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 9275d6f8a74e..702a94fad6db 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -2069,10 +2069,15 @@ static struct rftype *rdtgroup_get_rftype_by_name(const char *name) return NULL; } -void __init thread_throttle_mode_init(void) +static void __init thread_throttle_mode_init(void) { + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); struct rftype *rft; + if (!r->alloc_capable || + r->membw.throttle_mode == THREAD_THROTTLE_UNDEFINED) + return; + rft = rdtgroup_get_rftype_by_name("thread_throttle_mode"); if (!rft) return; @@ -4194,6 +4199,8 @@ int __init resctrl_init(void) rdtgroup_setup_default(); + thread_throttle_mode_init(); + ret = resctrl_mon_resource_init(); if (ret) return ret; -- Gitee From c224aed756264d5d01d4c575ce6f89fe60ae56c9 Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 14 Jul 2021 14:15:49 +0100 Subject: [PATCH 0564/2138] x86/resctrl: Move get_config_index() to a header ANBZ: #8626 commit 24e3994691f6f0c2fcc7d4b167fea30c3d8c4d39 morse-linux. get_config_index() is used by the architecture specific code to map a CLOSID+type pair to an index in the configuration arrays. MPAM needs to do this too to preserve the ABI to user-space, there is no reason to do it differently. Move the helper to a header file. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 19 +++---------------- include/linux/resctrl.h | 15 +++++++++++++++ 2 files changed, 18 insertions(+), 16 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index ad9e5516e607..4d91a8abb24d 100644 --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -282,19 +282,6 @@ static int parse_line(char *line, struct resctrl_schema *s, return -EINVAL; } -static u32 get_config_index(u32 closid, enum resctrl_conf_type type) -{ - switch (type) { - default: - case CDP_NONE: - return closid; - case CDP_CODE: - return closid * 2 + 1; - case CDP_DATA: - return closid * 2; - } -} - static bool apply_config(struct rdt_hw_domain *hw_dom, struct resctrl_staged_config *cfg, u32 idx, cpumask_var_t cpu_mask) @@ -316,7 +303,7 @@ int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d, { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); - u32 idx = get_config_index(closid, t); + u32 idx = resctrl_get_config_index(closid, t); struct msr_param msr_param; if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask)) @@ -356,7 +343,7 @@ int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid) if (!cfg->have_new_ctrl) continue; - idx = get_config_index(closid, t); + idx = resctrl_get_config_index(closid, t); if (!apply_config(hw_dom, cfg, idx, cpu_mask)) continue; @@ -481,7 +468,7 @@ u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, u32 closid, enum resctrl_conf_type type) { struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); - u32 idx = get_config_index(closid, type); + u32 idx = resctrl_get_config_index(closid, type); return hw_dom->ctrl_val[idx]; } diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 3de5bc63ace0..73c111963433 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -258,6 +258,21 @@ bool resctrl_arch_is_evt_configurable(enum resctrl_event_id evt); void resctrl_arch_mon_event_config_write(void *info); void resctrl_arch_mon_event_config_read(void *info); +/* For use by arch code to remap resctrl's smaller CDP CLOSID range */ +static inline u32 resctrl_get_config_index(u32 closid, + enum resctrl_conf_type type) +{ + switch (type) { + default: + case CDP_NONE: + return closid; + case CDP_CODE: + return (closid * 2) + 1; + case CDP_DATA: + return (closid * 2); + } +} + /* * Update the ctrl_val and apply this config right now. * Must be called on one of the domain's CPUs. -- Gitee From e841ac0550038d38ecb1dc5ad663f0df982d8c1d Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 20 Mar 2019 11:42:19 +0000 Subject: [PATCH 0565/2138] x86/resctrl: Claim get_domain_from_cpu() for resctrl ANBZ: #8626 commit 8f3c04a0853a2baae0369d079012906bc3ff68ff morse-linux. get_domain_from_cpu() is a handy helper that both the arch code and resctrl need to use. Rename it resctrl_get_domain_from_cpu() so it gets moved out to /fs, and exported back to the arch code. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/core.c | 15 +-------------- arch/x86/kernel/cpu/resctrl/internal.h | 1 - arch/x86/kernel/cpu/resctrl/monitor.c | 2 +- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 2 +- include/linux/resctrl.h | 19 +++++++++++++++++++ 5 files changed, 22 insertions(+), 17 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 1aa6f8244cd1..948b4d409cd9 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -355,19 +355,6 @@ cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]); } -struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r) -{ - struct rdt_domain *d; - - list_for_each_entry(d, &r->domains, list) { - /* Find the domain that contains this CPU */ - if (cpumask_test_cpu(cpu, &d->cpu_mask)) - return d; - } - - return NULL; -} - u32 resctrl_arch_get_num_closid(struct rdt_resource *r) { return resctrl_to_arch_res(r)->num_closid; @@ -381,7 +368,7 @@ void rdt_ctrl_update(void *arg) int cpu = smp_processor_id(); struct rdt_domain *d; - d = get_domain_from_cpu(cpu, r); + d = resctrl_get_domain_from_cpu(cpu, r); if (d) { hw_res->msr_update(d, m, r); return; diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index e849d4407769..3a3962736061 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -452,7 +452,6 @@ unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d, unsigned long cbm); enum rdtgrp_mode rdtgroup_mode_by_closid(int closid); int rdtgroup_tasks_assigned(struct rdtgroup *r); -struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r); int closids_supported(void); void closid_free(int closid); int alloc_rmid(u32 closid); diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 287fb0a5f060..8b316d9acc3b 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -676,7 +676,7 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm) idx = resctrl_arch_rmid_idx_encode(closid, rmid); pmbm_data = &dom_mbm->mbm_local[idx]; - dom_mba = get_domain_from_cpu(smp_processor_id(), r_mba); + dom_mba = resctrl_get_domain_from_cpu(smp_processor_id(), r_mba); if (!dom_mba) { pr_warn_once("Failure to get domain for MBA update\n"); return; diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 702a94fad6db..085fb9c2333a 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -4165,7 +4165,7 @@ void resctrl_offline_cpu(unsigned int cpu) if (!l3->mon_capable) goto out_unlock; - d = get_domain_from_cpu(cpu, l3); + d = resctrl_get_domain_from_cpu(cpu, l3); if (d) { if (resctrl_is_mbm_enabled() && cpu == d->mbm_work_cpu) { cancel_delayed_work(&d->mbm_over); diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 73c111963433..84420253dc05 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -2,6 +2,7 @@ #ifndef _RESCTRL_H #define _RESCTRL_H +#include #include #include #include @@ -273,6 +274,24 @@ static inline u32 resctrl_get_config_index(u32 closid, } } +/* + * Caller must hold the cpuhp read lock to prevent the struct rdt_domain being + * freed. + */ +static inline struct rdt_domain * +resctrl_get_domain_from_cpu(int cpu, struct rdt_resource *r) +{ + struct rdt_domain *d; + + list_for_each_entry_rcu(d, &r->domains, list) { + /* Find the domain that contains this CPU */ + if (cpumask_test_cpu(cpu, &d->cpu_mask)) + return d; + } + + return NULL; +} + /* * Update the ctrl_val and apply this config right now. * Must be called on one of the domain's CPUs. -- Gitee From b8d6653f92d0d0ebad2c1c2d51984bbf6c0bc81a Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Jul 2021 16:05:56 +0100 Subject: [PATCH 0566/2138] x86/resctrl: Describe resctrl's bitmap size assumptions ANBZ: #8626 commit 9a9ad2443351feb996d41f24cb382dac399a249d morse-linux. resctrl operates on configuration bitmaps and a bitmap of allocated CLOSID, both are stored in a u32. MPAM supports configuration/portion bitmaps and PARTIDs larger than will fit in a u32. Add some preprocessor values that make it clear why MPAM clamps some of these values. This will make it easier to find code related to these values if this resctrl behaviour ever changes. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- include/linux/resctrl.h | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 84420253dc05..f463fb949677 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -26,6 +26,17 @@ int proc_resctrl_show(struct seq_file *m, /* max value for struct rdt_domain's mbps_val */ #define MBA_MAX_MBPS U32_MAX +/* + * Resctrl uses a u32 as a closid bitmap. The maximum closid is 32. + */ +#define RESCTRL_MAX_CLOSID 32 + +/* + * Resctrl uses u32 to hold the user-space config. The maximum bitmap size is + * 32. + */ +#define RESCTRL_MAX_CBM 32 + /** * struct pseudo_lock_region - pseudo-lock region information * @s: Resctrl schema for the resource to which this -- Gitee From 1638046913b6424ff884a4b16e50d0d64b88cd8e Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 8 Mar 2024 16:03:04 +0000 Subject: [PATCH 0567/2138] x86/resctrl: Rename resctrl_sched_in() to begin resctrl_arch_ ANBZ: #8626 commit bebb9ca3523eae808b16ac03718081436baaf99c morse-linux. resctrl_sched_in() loads the architecture specific CPU MSRs with the CLOSID and RMID values. This function was named before resctrl was split to have architecture specific code, and generic filesystem code. This function is obviously architecture specific, but does not begin with 'resctrl_arch_', making it the odd one out in the functions an architecture needs to support to enable resctrl. Rename it for concistency. This is purely cosmetic. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/include/asm/resctrl.h | 4 ++-- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 12 ++++++------ arch/x86/kernel/process_32.c | 2 +- arch/x86/kernel/process_64.c | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index 9940398e367e..491342f56811 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -177,7 +177,7 @@ static inline bool resctrl_arch_match_rmid(struct task_struct *tsk, u32 ignored, return READ_ONCE(tsk->rmid) == rmid; } -static inline void resctrl_sched_in(struct task_struct *tsk) +static inline void resctrl_arch_sched_in(struct task_struct *tsk) { if (static_branch_likely(&rdt_enable_key)) __resctrl_sched_in(tsk); @@ -220,7 +220,7 @@ void resctrl_cpu_detect(struct cpuinfo_x86 *c); #else -static inline void resctrl_sched_in(struct task_struct *tsk) {} +static inline void resctrl_arch_sched_in(struct task_struct *tsk) {} static inline void resctrl_cpu_detect(struct cpuinfo_x86 *c) {} #endif /* CONFIG_X86_CPU_RESCTRL */ diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 085fb9c2333a..218aebd6387f 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -359,7 +359,7 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of, } /* - * This is safe against resctrl_sched_in() called from __switch_to() + * This is safe against resctrl_arch_sched_in() called from __switch_to() * because __switch_to() is executed with interrupts disabled. A local call * from update_closid_rmid() is protected against __switch_to() because * preemption is disabled. @@ -378,7 +378,7 @@ void resctrl_arch_sync_cpu_defaults(void *info) * executing task might have its own closid selected. Just reuse * the context switch code. */ - resctrl_sched_in(current); + resctrl_arch_sched_in(current); } /* @@ -605,7 +605,7 @@ static void _update_task_closid_rmid(void *task) * Otherwise, the MSR is updated when the task is scheduled in. */ if (task == current) - resctrl_sched_in(task); + resctrl_arch_sched_in(task); } static void update_task_closid_rmid(struct task_struct *t) @@ -663,7 +663,7 @@ static int __rdtgroup_move_task(struct task_struct *tsk, * Ensure the task's closid and rmid are written before determining if * the task is current that will decide if it will be interrupted. * This pairs with the full barrier between the rq->curr update and - * resctrl_sched_in() during context switch. + * resctrl_arch_sched_in() during context switch. */ smp_mb(); @@ -2946,8 +2946,8 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to, /* * Order the closid/rmid stores above before the loads * in task_curr(). This pairs with the full barrier - * between the rq->curr update and resctrl_sched_in() - * during context switch. + * between the rq->curr update and + * resctrl_arch_sched_in() during context switch. */ smp_mb(); diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 708c87b88cc1..619560eb9f94 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -212,7 +212,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) switch_fpu_finish(); /* Load the Intel cache allocation PQR MSR. */ - resctrl_sched_in(next_p); + resctrl_arch_sched_in(next_p); return prev_p; } diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index dd19a4db741a..fa04091e2f4e 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -659,7 +659,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) } /* Load the Intel cache allocation PQR MSR. */ - resctrl_sched_in(next_p); + resctrl_arch_sched_in(next_p); return prev_p; } -- Gitee From 805613d2c780df356a06644a95419c1219556bbf Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 20 Mar 2019 16:49:39 +0000 Subject: [PATCH 0568/2138] x86/resctrl: Drop __init/__exit on assorted symbols ANBZ: #8626 commit 63cf0f208aec2e54acfbae4314be2b4674f931bf morse-linux. Because ARM's MPAM controls are probed using MMIO, resctrl can't be initialised until enough CPUs are online to have determined the system-wide supported num_closid. Arm64 also supports 'late onlined secondaries', where only a subset of CPUs are online during boot. These two combine to mean the MPAM driver may not be able to initialise resctrl until user-space has brought 'enough' CPUs online. To allow MPAM to initialise resctrl after __init text has been free'd, remove all the __init markings from resctrl. The existing __exit markings cause these functions to be removed by the linker as it has never been possible to build resctrl as a module. MPAM has an error interrupt which causes the driver to reset and disable itself. Remove the __exit markings to allow the MPAM driver to tear down resctrl when an error occurs. Signed-off-by: James Morse --- If 'late onlined secondaries' is an alien concept, I can add a worked example to the commit message: If a system has two L3 caches, but during boot only CPU-0 is online, then no CPU is able to probe the features of the second L3 cache. It's not until user-space brings other CPUs online that the MPAM driver can finally get a glimpse of all the hardware to determine what properties the system has. [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/internal.h | 2 +- arch/x86/kernel/cpu/resctrl/monitor.c | 4 ++-- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 8 ++++---- include/linux/resctrl.h | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 3a3962736061..56218193a8ba 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -457,7 +457,7 @@ void closid_free(int closid); int alloc_rmid(u32 closid); void free_rmid(u32 closid, u32 rmid); int rdt_get_mon_l3_config(struct rdt_resource *r); -void __exit resctrl_mon_resource_exit(void); +void resctrl_mon_resource_exit(void); bool rdt_cpu_has(int flag); void mon_event_count(void *info); int rdtgroup_mondata_show(struct seq_file *m, void *arg); diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 8b316d9acc3b..7e6fca138cb7 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -954,7 +954,7 @@ static int dom_data_init(struct rdt_resource *r) return err; } -static void __exit dom_data_exit(struct rdt_resource *r) +static void dom_data_exit(struct rdt_resource *r) { if (!r->mon_capable) return; @@ -1076,7 +1076,7 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r) return 0; } -void __exit resctrl_mon_resource_exit(void) +void resctrl_mon_resource_exit(void) { struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 218aebd6387f..1425a33d201d 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -2069,7 +2069,7 @@ static struct rftype *rdtgroup_get_rftype_by_name(const char *name) return NULL; } -static void __init thread_throttle_mode_init(void) +static void thread_throttle_mode_init(void) { struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); struct rftype *rft; @@ -3997,7 +3997,7 @@ static void rdtgroup_destroy_root(void) rdtgroup_default.kn = NULL; } -static void __init rdtgroup_setup_default(void) +static void rdtgroup_setup_default(void) { mutex_lock(&rdtgroup_mutex); @@ -4190,7 +4190,7 @@ void resctrl_offline_cpu(unsigned int cpu) * * Return: 0 on success or -errno */ -int __init resctrl_init(void) +int resctrl_init(void) { int ret = 0; @@ -4244,7 +4244,7 @@ int __init resctrl_init(void) return ret; } -void __exit resctrl_exit(void) +void resctrl_exit(void) { debugfs_remove_recursive(debugfs_resctrl); unregister_filesystem(&rdt_fs_type); diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index f463fb949677..5da55e58f229 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -393,7 +393,7 @@ void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_domain *d); extern unsigned int resctrl_rmid_realloc_threshold; extern unsigned int resctrl_rmid_realloc_limit; -int __init resctrl_init(void); -void __exit resctrl_exit(void); +int resctrl_init(void); +void resctrl_exit(void); #endif /* _RESCTRL_H */ -- Gitee From e5e48c802a6b060613faffddde5d402425222188 Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 20 Mar 2019 17:02:33 +0000 Subject: [PATCH 0569/2138] fs/resctrl: Add boiler plate for external resctrl code ANBZ: #8626 commit 54e9a22058364365403ce63bed514925200f9336 morse-linux. Add Makefile and Kconfig for fs/resctrl. Add ARCH_HAS_CPU_RESCTRL for the common parts of the resctrl interface and make X86_CPU_RESCTRL depend on this. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- MAINTAINERS | 1 + arch/Kconfig | 8 ++++++++ arch/x86/Kconfig | 10 +++------- fs/Kconfig | 1 + fs/Makefile | 1 + fs/resctrl/Kconfig | 23 +++++++++++++++++++++++ fs/resctrl/Makefile | 3 +++ fs/resctrl/ctrlmondata.c | 0 fs/resctrl/internal.h | 0 fs/resctrl/monitor.c | 0 fs/resctrl/psuedo_lock.c | 0 fs/resctrl/rdtgroup.c | 0 include/linux/resctrl.h | 4 ++++ 13 files changed, 44 insertions(+), 7 deletions(-) create mode 100644 fs/resctrl/Kconfig create mode 100644 fs/resctrl/Makefile create mode 100644 fs/resctrl/ctrlmondata.c create mode 100644 fs/resctrl/internal.h create mode 100644 fs/resctrl/monitor.c create mode 100644 fs/resctrl/psuedo_lock.c create mode 100644 fs/resctrl/rdtgroup.c diff --git a/MAINTAINERS b/MAINTAINERS index d0b4a22f8ecf..58647078de3f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -18083,6 +18083,7 @@ S: Supported F: Documentation/arch/x86/resctrl* F: arch/x86/include/asm/resctrl.h F: arch/x86/kernel/cpu/resctrl/ +F: fs/resctrl/ F: include/linux/resctrl*.h F: tools/testing/selftests/resctrl/ diff --git a/arch/Kconfig b/arch/Kconfig index 09603e0bc2cc..80533a75f511 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -1333,6 +1333,14 @@ config STRICT_MODULE_RWX config ARCH_HAS_PHYS_TO_DMA bool +config ARCH_HAS_CPU_RESCTRL + bool + help + The 'resctrl' filesystem allows CPU controls of shared resources + such as caches and memory bandwidth to be configured. An architecture + selects this if it provides the arch-specific hooks for the filesystem + and needs the per-task CLOSID/RMID properties. + config HAVE_ARCH_COMPILER_H bool help diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 52b4847616f4..a4846bc89600 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -480,8 +480,10 @@ config GOLDFISH config X86_CPU_RESCTRL bool "x86 CPU resource control support" depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD) + depends on MISC_FILESYSTEMS select KERNFS - select PROC_CPU_RESCTRL if PROC_FS + select ARCH_HAS_CPU_RESCTRL + select RESCTRL_FS select RESCTRL_FS_PSEUDO_LOCK help Enable x86 CPU resource control support. @@ -499,12 +501,6 @@ config X86_CPU_RESCTRL Say N if unsure. -config RESCTRL_FS_PSEUDO_LOCK - bool - help - Software mechanism to pin data in a cache portion using - micro-architecture specific knowledge. - if X86_32 config X86_BIGSMP bool "Support for big SMP systems with more than 8 CPUs" diff --git a/fs/Kconfig b/fs/Kconfig index aa7e03cc1941..1e3ed753b9fe 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -325,6 +325,7 @@ source "fs/omfs/Kconfig" source "fs/hpfs/Kconfig" source "fs/qnx4/Kconfig" source "fs/qnx6/Kconfig" +source "fs/resctrl/Kconfig" source "fs/romfs/Kconfig" source "fs/pstore/Kconfig" source "fs/sysv/Kconfig" diff --git a/fs/Makefile b/fs/Makefile index f9541f40be4e..b62375770dee 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -129,3 +129,4 @@ obj-$(CONFIG_EFIVAR_FS) += efivarfs/ obj-$(CONFIG_EROFS_FS) += erofs/ obj-$(CONFIG_VBOXSF_FS) += vboxsf/ obj-$(CONFIG_ZONEFS_FS) += zonefs/ +obj-$(CONFIG_RESCTRL_FS) += resctrl/ diff --git a/fs/resctrl/Kconfig b/fs/resctrl/Kconfig new file mode 100644 index 000000000000..36a1ddbe6c21 --- /dev/null +++ b/fs/resctrl/Kconfig @@ -0,0 +1,23 @@ +config RESCTRL_FS + bool "CPU Resource Control Filesystem (resctrl)" + depends on ARCH_HAS_CPU_RESCTRL + select KERNFS + select PROC_CPU_RESCTRL if PROC_FS + help + Resctrl is a filesystem interface + to control allocation and + monitoring of system resources + used by the CPUs. + +config RESCTRL_FS_PSEUDO_LOCK + bool + help + Software mechanism to pin data in a cache portion using + micro-architecture specific knowledge. + +config RESCTRL_RMID_DEPENDS_ON_CLOSID + bool + help + Enable by the architecture when the RMID values depend on the CLOSID. + This causes the closid allocator to search for CLOSID with clean + RMID. diff --git a/fs/resctrl/Makefile b/fs/resctrl/Makefile new file mode 100644 index 000000000000..10fcfb0fdb10 --- /dev/null +++ b/fs/resctrl/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_RESCTRL_FS) += rdtgroup.o ctrlmondata.o monitor.o +obj-$(CONFIG_RESCTRL_FS_PSEUDO_LOCK) += psuedo_lock.o diff --git a/fs/resctrl/ctrlmondata.c b/fs/resctrl/ctrlmondata.c new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/fs/resctrl/internal.h b/fs/resctrl/internal.h new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/fs/resctrl/monitor.c b/fs/resctrl/monitor.c new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/fs/resctrl/psuedo_lock.c b/fs/resctrl/psuedo_lock.c new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/fs/resctrl/rdtgroup.c b/fs/resctrl/rdtgroup.c new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 5da55e58f229..f786ffceeda3 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -8,6 +8,10 @@ #include #include +#ifdef CONFIG_ARCH_HAS_CPU_RESCTRL +#include +#endif + /* CLOSID, RMID value used by the default control group */ #define RESCTRL_RESERVED_CLOSID 0 #define RESCTRL_RESERVED_RMID 0 -- Gitee From 05cb1370fa25ecdfe821c7447b79f6f77d90969b Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 22 Jun 2021 17:34:16 +0100 Subject: [PATCH 0570/2138] x86/resctrl: Move the filesystem bits to headers visible to fs/resctrl ANBZ: #8626 commit d59bc7452c5197cb300f0dd6f3d5a895039e0e59 morse-linux. Once the filesystem parts of resctrl move to fs/resctrl, it cannot rely on definitions in x86's internal.h. Move definitions in internal.h that need to be shared between the filesystem and architecture code to header files that fs/resctrl can include. Doing this separately means the filesystem code only moves between files of the same name, instead of having these changes mixed in too. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/include/asm/resctrl.h | 3 +++ arch/x86/kernel/cpu/resctrl/core.c | 5 ++++ arch/x86/kernel/cpu/resctrl/internal.h | 36 -------------------------- include/linux/resctrl.h | 3 +++ include/linux/resctrl_types.h | 30 +++++++++++++++++++++ 5 files changed, 41 insertions(+), 36 deletions(-) diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index 491342f56811..746431c66fc4 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -218,6 +218,9 @@ int resctrl_arch_measure_l2_residency(void *_plr); int resctrl_arch_measure_l3_residency(void *_plr); void resctrl_cpu_detect(struct cpuinfo_x86 *c); +bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level l); +int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable); + #else static inline void resctrl_arch_sched_in(struct task_struct *tsk) {} diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 948b4d409cd9..87cd508588e5 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -306,6 +306,11 @@ static void rdt_get_cdp_l2_config(void) rdt_get_cdp_config(RDT_RESOURCE_L2); } +bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level l) +{ + return rdt_resources_all[l].cdp_enabled; +} + static void mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) { diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 56218193a8ba..0f7e3f10941b 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -15,12 +15,6 @@ #define L2_QOS_CDP_ENABLE 0x01ULL -#define CQM_LIMBOCHECK_INTERVAL 1000 - -#define MBM_CNTR_WIDTH_BASE 24 -#define MBM_OVERFLOW_INTERVAL 1000 -#define MAX_MBA_BW 100u -#define MBA_IS_LINEAR 0x4 #define MBM_CNTR_WIDTH_OFFSET_AMD 20 #define RMID_VAL_ERROR BIT_ULL(63) @@ -210,29 +204,6 @@ struct rdtgroup { struct pseudo_lock_region *plr; }; -/* rdtgroup.flags */ -#define RDT_DELETED 1 - -/* rftype.flags */ -#define RFTYPE_FLAGS_CPUS_LIST 1 - -/* - * Define the file type flags for base and info directories. - */ -#define RFTYPE_INFO BIT(0) -#define RFTYPE_BASE BIT(1) -#define RFTYPE_CTRL BIT(4) -#define RFTYPE_MON BIT(5) -#define RFTYPE_TOP BIT(6) -#define RFTYPE_RES_CACHE BIT(8) -#define RFTYPE_RES_MB BIT(9) -#define RFTYPE_DEBUG BIT(10) -#define RFTYPE_CTRL_INFO (RFTYPE_INFO | RFTYPE_CTRL) -#define RFTYPE_MON_INFO (RFTYPE_INFO | RFTYPE_MON) -#define RFTYPE_TOP_INFO (RFTYPE_INFO | RFTYPE_TOP) -#define RFTYPE_CTRL_BASE (RFTYPE_BASE | RFTYPE_CTRL) -#define RFTYPE_MON_BASE (RFTYPE_BASE | RFTYPE_MON) - /* List of all resource groups */ extern struct list_head rdt_all_groups; @@ -370,13 +341,6 @@ static inline struct rdt_resource *resctrl_inc(struct rdt_resource *res) return &hw_res->r_resctrl; } -static inline bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level l) -{ - return rdt_resources_all[l].cdp_enabled; -} - -int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable); - /* * To return the common struct rdt_resource, which is contained in struct * rdt_hw_resource, walk the resctrl member of struct rdt_hw_resource. diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index f786ffceeda3..00cc0457af50 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -41,6 +41,9 @@ int proc_resctrl_show(struct seq_file *m, */ #define RESCTRL_MAX_CBM 32 +extern unsigned int resctrl_rmid_realloc_limit; +extern unsigned int resctrl_rmid_realloc_threshold; + /** * struct pseudo_lock_region - pseudo-lock region information * @s: Resctrl schema for the resource to which this diff --git a/include/linux/resctrl_types.h b/include/linux/resctrl_types.h index 4788bd95dac6..fe0b10b589c0 100644 --- a/include/linux/resctrl_types.h +++ b/include/linux/resctrl_types.h @@ -7,6 +7,36 @@ #ifndef __LINUX_RESCTRL_TYPES_H #define __LINUX_RESCTRL_TYPES_H +#define CQM_LIMBOCHECK_INTERVAL 1000 + +#define MBM_CNTR_WIDTH_BASE 24 +#define MBM_OVERFLOW_INTERVAL 1000 +#define MAX_MBA_BW 100u +#define MBA_IS_LINEAR 0x4 + +/* rdtgroup.flags */ +#define RDT_DELETED 1 + +/* rftype.flags */ +#define RFTYPE_FLAGS_CPUS_LIST 1 + +/* + * Define the file type flags for base and info directories. + */ +#define RFTYPE_INFO BIT(0) +#define RFTYPE_BASE BIT(1) +#define RFTYPE_CTRL BIT(4) +#define RFTYPE_MON BIT(5) +#define RFTYPE_TOP BIT(6) +#define RFTYPE_RES_CACHE BIT(8) +#define RFTYPE_RES_MB BIT(9) +#define RFTYPE_DEBUG BIT(10) +#define RFTYPE_CTRL_INFO (RFTYPE_INFO | RFTYPE_CTRL) +#define RFTYPE_MON_INFO (RFTYPE_INFO | RFTYPE_MON) +#define RFTYPE_TOP_INFO (RFTYPE_INFO | RFTYPE_TOP) +#define RFTYPE_CTRL_BASE (RFTYPE_BASE | RFTYPE_CTRL) +#define RFTYPE_MON_BASE (RFTYPE_BASE | RFTYPE_MON) + /* Reads to Local DRAM Memory */ #define READS_TO_LOCAL_MEM BIT(0) -- Gitee From 52a306c6c72b60c556009f8933cba0c97a2a0d9d Mon Sep 17 00:00:00 2001 From: James Morse Date: Mon, 22 Jan 2024 13:54:21 +0000 Subject: [PATCH 0571/2138] x86/resctrl: Move the resctrl filesystem code to /fs/resctrl ANBZ: #8626 commit f6d5f1a23554faed3bf9edb6103ec7e6d798674c morse-linux. resctrl is linux's defacto interface for managing cache and bandwidth policies for groups of tasks. To allow other architectures to make use of this pseudo filesystem, move it live in /fs/resctrl instead of /arch/x86. This move leaves behind the parts of resctrl that form the architecture interface for x86. Signed-off-by: James Morse --- Discussion needed on how/when to merge this, as it would conflict with all outstanding series. It's probably worth deferring to some opportune time, but is included here for illustration. [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/move_to_fs/v1 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2974 --- arch/x86/kernel/cpu/resctrl/core.c | 15 - arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 510 --- arch/x86/kernel/cpu/resctrl/internal.h | 310 -- arch/x86/kernel/cpu/resctrl/monitor.c | 821 ---- arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 1093 ------ arch/x86/kernel/cpu/resctrl/rdtgroup.c | 4288 +-------------------- fs/resctrl/ctrlmondata.c | 532 +++ fs/resctrl/internal.h | 340 ++ fs/resctrl/monitor.c | 843 ++++ fs/resctrl/psuedo_lock.c | 1122 ++++++ fs/resctrl/rdtgroup.c | 4013 +++++++++++++++++++ 11 files changed, 6997 insertions(+), 6890 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 87cd508588e5..51389ebc0f19 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -164,21 +164,6 @@ static inline void cache_alloc_hsw_probe(void) rdt_alloc_capable = true; } -bool is_mba_sc(struct rdt_resource *r) -{ - if (!r) - r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); - - /* - * The software controller support is only applicable to MBA resource. - * Make sure to check for resource type. - */ - if (r->rid != RDT_RESOURCE_MBA) - return false; - - return r->membw.mba_sc; -} - /* * rdt_get_mb_table() - get a mapping of bandwidth(b/w) percentage values * exposed to user interface and the h/w understandable delay values. diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index 4d91a8abb24d..c5c3eaea27b6 100644 --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -23,265 +23,6 @@ #include "internal.h" -struct rdt_parse_data { - struct rdtgroup *rdtgrp; - char *buf; -}; - -typedef int (ctrlval_parser_t)(struct rdt_parse_data *data, - struct resctrl_schema *s, - struct rdt_domain *d); - -/* - * Check whether MBA bandwidth percentage value is correct. The value is - * checked against the minimum and max bandwidth values specified by the - * hardware. The allocated bandwidth percentage is rounded to the next - * control step available on the hardware. - */ -static bool bw_validate(char *buf, u32 *data, struct rdt_resource *r) -{ - int ret; - u32 bw; - - /* - * Only linear delay values is supported for current Intel SKUs. - */ - if (!r->membw.delay_linear && r->membw.arch_needs_linear) { - rdt_last_cmd_puts("No support for non-linear MB domains\n"); - return false; - } - - ret = kstrtou32(buf, 10, &bw); - if (ret) { - rdt_last_cmd_printf("Invalid MB value %s\n", buf); - return false; - } - - /* Nothing else to do if software controller is enabled. */ - if (is_mba_sc(r)) { - *data = bw; - return true; - } - - if (bw < r->membw.min_bw || bw > r->default_ctrl) { - rdt_last_cmd_printf("MB value %u out of range [%d,%d]\n", - bw, r->membw.min_bw, r->default_ctrl); - return false; - } - - *data = roundup(bw, (unsigned long)r->membw.bw_gran); - return true; -} - -static int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s, - struct rdt_domain *d) -{ - struct resctrl_staged_config *cfg; - u32 closid = data->rdtgrp->closid; - struct rdt_resource *r = s->res; - u32 bw_val; - - cfg = &d->staged_config[s->conf_type]; - if (cfg->have_new_ctrl) { - rdt_last_cmd_printf("Duplicate domain %d\n", d->id); - return -EINVAL; - } - - if (!bw_validate(data->buf, &bw_val, r)) - return -EINVAL; - - if (is_mba_sc(r)) { - d->mbps_val[closid] = bw_val; - return 0; - } - - cfg->new_ctrl = bw_val; - cfg->have_new_ctrl = true; - - return 0; -} - -/* - * Check whether a cache bit mask is valid. - * On Intel CPUs, non-contiguous 1s value support is indicated by CPUID: - * - CPUID.0x10.1:ECX[3]: L3 non-contiguous 1s value supported if 1 - * - CPUID.0x10.2:ECX[3]: L2 non-contiguous 1s value supported if 1 - * - * Haswell does not support a non-contiguous 1s value and additionally - * requires at least two bits set. - * AMD allows non-contiguous bitmasks. - */ -static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) -{ - unsigned long first_bit, zero_bit, val; - unsigned int cbm_len = r->cache.cbm_len; - int ret; - - ret = kstrtoul(buf, 16, &val); - if (ret) { - rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf); - return false; - } - - if ((r->cache.min_cbm_bits > 0 && val == 0) || val > r->default_ctrl) { - rdt_last_cmd_puts("Mask out of range\n"); - return false; - } - - first_bit = find_first_bit(&val, cbm_len); - zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); - - /* Are non-contiguous bitmasks allowed? */ - if (!r->cache.arch_has_sparse_bitmasks && - (find_next_bit(&val, cbm_len, zero_bit) < cbm_len)) { - rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val); - return false; - } - - if ((zero_bit - first_bit) < r->cache.min_cbm_bits) { - rdt_last_cmd_printf("Need at least %d bits in the mask\n", - r->cache.min_cbm_bits); - return false; - } - - *data = val; - return true; -} - -/* - * Read one cache bit mask (hex). Check that it is valid for the current - * resource type. - */ -static int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s, - struct rdt_domain *d) -{ - struct rdtgroup *rdtgrp = data->rdtgrp; - struct resctrl_staged_config *cfg; - struct rdt_resource *r = s->res; - u32 cbm_val; - - cfg = &d->staged_config[s->conf_type]; - if (cfg->have_new_ctrl) { - rdt_last_cmd_printf("Duplicate domain %d\n", d->id); - return -EINVAL; - } - - /* - * Cannot set up more than one pseudo-locked region in a cache - * hierarchy. - */ - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && - rdtgroup_pseudo_locked_in_hierarchy(d)) { - rdt_last_cmd_puts("Pseudo-locked region in hierarchy\n"); - return -EINVAL; - } - - if (!cbm_validate(data->buf, &cbm_val, r)) - return -EINVAL; - - if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE || - rdtgrp->mode == RDT_MODE_SHAREABLE) && - rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) { - rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n"); - return -EINVAL; - } - - /* - * The CBM may not overlap with the CBM of another closid if - * either is exclusive. - */ - if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, true)) { - rdt_last_cmd_puts("Overlaps with exclusive group\n"); - return -EINVAL; - } - - if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, false)) { - if (rdtgrp->mode == RDT_MODE_EXCLUSIVE || - rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - rdt_last_cmd_puts("Overlaps with other group\n"); - return -EINVAL; - } - } - - cfg->new_ctrl = cbm_val; - cfg->have_new_ctrl = true; - - return 0; -} - -static ctrlval_parser_t *get_parser(struct rdt_resource *res) -{ - if (res->fflags & RFTYPE_RES_CACHE) - return &parse_cbm; - else - return &parse_bw; -} - -/* - * For each domain in this resource we expect to find a series of: - * id=mask - * separated by ";". The "id" is in decimal, and must match one of - * the "id"s for this resource. - */ -static int parse_line(char *line, struct resctrl_schema *s, - struct rdtgroup *rdtgrp) -{ - ctrlval_parser_t *parse_ctrlval = get_parser(s->res); - enum resctrl_conf_type t = s->conf_type; - struct resctrl_staged_config *cfg; - struct rdt_resource *r = s->res; - struct rdt_parse_data data; - char *dom = NULL, *id; - struct rdt_domain *d; - unsigned long dom_id; - - /* Walking r->domains, ensure it can't race with cpuhp */ - lockdep_assert_cpus_held(); - - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && - (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA)) { - rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n"); - return -EINVAL; - } - -next: - if (!line || line[0] == '\0') - return 0; - dom = strsep(&line, ";"); - id = strsep(&dom, "="); - if (!dom || kstrtoul(id, 10, &dom_id)) { - rdt_last_cmd_puts("Missing '=' or non-numeric domain\n"); - return -EINVAL; - } - dom = strim(dom); - list_for_each_entry(d, &r->domains, list) { - if (d->id == dom_id) { - data.buf = dom; - data.rdtgrp = rdtgrp; - if (parse_ctrlval(&data, s, d)) - return -EINVAL; - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - cfg = &d->staged_config[t]; - /* - * In pseudo-locking setup mode and just - * parsed a valid CBM that should be - * pseudo-locked. Only one locked region per - * resource group and domain so just do - * the required initialization for single - * region and return. - */ - rdtgrp->plr->s = s; - rdtgrp->plr->d = d; - rdtgrp->plr->cbm = cfg->new_ctrl; - d->plr = rdtgrp->plr; - return 0; - } - goto next; - } - } - return -EINVAL; -} - static bool apply_config(struct rdt_hw_domain *hw_dom, struct resctrl_staged_config *cfg, u32 idx, cpumask_var_t cpu_mask) @@ -370,100 +111,6 @@ int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid) return 0; } -static int rdtgroup_parse_resource(char *resname, char *tok, - struct rdtgroup *rdtgrp) -{ - struct resctrl_schema *s; - - list_for_each_entry(s, &resctrl_schema_all, list) { - if (!strcmp(resname, s->name) && rdtgrp->closid < s->num_closid) - return parse_line(tok, s, rdtgrp); - } - rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname); - return -EINVAL; -} - -ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off) -{ - struct resctrl_schema *s; - struct rdtgroup *rdtgrp; - struct rdt_resource *r; - char *tok, *resname; - int ret = 0; - - /* Valid input requires a trailing newline */ - if (nbytes == 0 || buf[nbytes - 1] != '\n') - return -EINVAL; - buf[nbytes - 1] = '\0'; - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (!rdtgrp) { - rdtgroup_kn_unlock(of->kn); - return -ENOENT; - } - rdt_last_cmd_clear(); - - /* - * No changes to pseudo-locked region allowed. It has to be removed - * and re-created instead. - */ - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { - ret = -EINVAL; - rdt_last_cmd_puts("Resource group is pseudo-locked\n"); - goto out; - } - - rdt_staged_configs_clear(); - - while ((tok = strsep(&buf, "\n")) != NULL) { - resname = strim(strsep(&tok, ":")); - if (!tok) { - rdt_last_cmd_puts("Missing ':'\n"); - ret = -EINVAL; - goto out; - } - if (tok[0] == '\0') { - rdt_last_cmd_printf("Missing '%s' value\n", resname); - ret = -EINVAL; - goto out; - } - ret = rdtgroup_parse_resource(resname, tok, rdtgrp); - if (ret) - goto out; - } - - list_for_each_entry(s, &resctrl_schema_all, list) { - r = s->res; - - /* - * Writes to mba_sc resources update the software controller, - * not the control MSR. - */ - if (is_mba_sc(r)) - continue; - - ret = resctrl_arch_update_domains(r, rdtgrp->closid); - if (ret) - goto out; - } - - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - /* - * If pseudo-locking fails we keep the resource group in - * mode RDT_MODE_PSEUDO_LOCKSETUP with its class of service - * active and updated for just the domain the pseudo-locked - * region was requested for. - */ - ret = rdtgroup_pseudo_lock_create(rdtgrp); - } - -out: - rdt_staged_configs_clear(); - rdtgroup_kn_unlock(of->kn); - return ret ?: nbytes; -} - u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, u32 closid, enum resctrl_conf_type type) { @@ -472,160 +119,3 @@ u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, return hw_dom->ctrl_val[idx]; } - -static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid) -{ - struct rdt_resource *r = schema->res; - struct rdt_domain *dom; - bool sep = false; - u32 ctrl_val; - - /* Walking r->domains, ensure it can't race with cpuhp */ - lockdep_assert_cpus_held(); - - seq_printf(s, "%*s:", max_name_width, schema->name); - list_for_each_entry(dom, &r->domains, list) { - if (sep) - seq_puts(s, ";"); - - if (is_mba_sc(r)) - ctrl_val = dom->mbps_val[closid]; - else - ctrl_val = resctrl_arch_get_config(r, dom, closid, - schema->conf_type); - - seq_printf(s, r->format_str, dom->id, max_data_width, - ctrl_val); - sep = true; - } - seq_puts(s, "\n"); -} - -int rdtgroup_schemata_show(struct kernfs_open_file *of, - struct seq_file *s, void *v) -{ - struct resctrl_schema *schema; - struct rdtgroup *rdtgrp; - int ret = 0; - u32 closid; - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (rdtgrp) { - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - list_for_each_entry(schema, &resctrl_schema_all, list) { - seq_printf(s, "%s:uninitialized\n", schema->name); - } - } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { - if (!rdtgrp->plr->d) { - rdt_last_cmd_clear(); - rdt_last_cmd_puts("Cache domain offline\n"); - ret = -ENODEV; - } else { - seq_printf(s, "%s:%d=%x\n", - rdtgrp->plr->s->res->name, - rdtgrp->plr->d->id, - rdtgrp->plr->cbm); - } - } else { - closid = rdtgrp->closid; - list_for_each_entry(schema, &resctrl_schema_all, list) { - if (closid < schema->num_closid) - show_doms(s, schema, closid); - } - } - } else { - ret = -ENOENT; - } - rdtgroup_kn_unlock(of->kn); - return ret; -} - -static int smp_mon_event_count(void *arg) -{ - mon_event_count(arg); - - return 0; -} - -void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, - struct rdt_domain *d, struct rdtgroup *rdtgrp, - int evtid, int first) -{ - int cpu; - - /* When picking a CPU from cpu_mask, ensure it can't race with cpuhp */ - lockdep_assert_cpus_held(); - - /* - * Setup the parameters to pass to mon_event_count() to read the data. - */ - rr->rgrp = rdtgrp; - rr->evtid = evtid; - rr->r = r; - rr->d = d; - rr->val = 0; - rr->first = first; - rr->arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, evtid); - if (IS_ERR(rr->arch_mon_ctx)) { - rr->err = -EINVAL; - return; - } - - cpu = cpumask_any_housekeeping(&d->cpu_mask, RESCTRL_PICK_ANY_CPU); - - /* - * cpumask_any_housekeeping() prefers housekeeping CPUs, but - * are all the CPUs nohz_full? If yes, pick a CPU to IPI. - * MPAM's resctrl_arch_rmid_read() is unable to read the - * counters on some platforms if its called in IRQ context. - */ - if (tick_nohz_full_cpu(cpu)) - smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1); - else - smp_call_on_cpu(cpu, smp_mon_event_count, rr, false); - - resctrl_arch_mon_ctx_free(r, evtid, rr->arch_mon_ctx); -} - -int rdtgroup_mondata_show(struct seq_file *m, void *arg) -{ - struct kernfs_open_file *of = m->private; - u32 resid, evtid, domid; - struct rdtgroup *rdtgrp; - struct rdt_resource *r; - union mon_data_bits md; - struct rdt_domain *d; - struct rmid_read rr; - int ret = 0; - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (!rdtgrp) { - ret = -ENOENT; - goto out; - } - - md.priv = of->kn->priv; - resid = md.u.rid; - domid = md.u.domid; - evtid = md.u.evtid; - - r = resctrl_arch_get_resource(resid); - d = resctrl_arch_find_domain(r, domid); - if (IS_ERR_OR_NULL(d)) { - ret = -ENOENT; - goto out; - } - - mon_event_read(&rr, r, d, rdtgrp, evtid, false); - - if (rr.err == -EIO) - seq_puts(m, "Error\n"); - else if (rr.err == -EINVAL) - seq_puts(m, "Unavailable\n"); - else - seq_printf(m, "%llu\n", rr.val); - -out: - rdtgroup_kn_unlock(of->kn); - return ret; -} diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 0f7e3f10941b..bf3538992667 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -26,227 +26,6 @@ */ #define MBM_CNTR_WIDTH_OFFSET_MAX (62 - MBM_CNTR_WIDTH_BASE) -/** - * cpumask_any_housekeeping() - Choose any CPU in @mask, preferring those that - * aren't marked nohz_full - * @mask: The mask to pick a CPU from. - * @exclude_cpu:The CPU to avoid picking. - * - * Returns a CPU from @mask, but not @exclude_cpu. If there are housekeeping - * CPUs that don't use nohz_full, these are preferred. Pass - * RESCTRL_PICK_ANY_CPU to avoid excluding any CPUs. - * - * When a CPU is excluded, returns >= nr_cpu_ids if no CPUs are available. - */ -static inline unsigned int -cpumask_any_housekeeping(const struct cpumask *mask, int exclude_cpu) -{ - unsigned int cpu, hk_cpu; - - if (exclude_cpu == RESCTRL_PICK_ANY_CPU) - cpu = cpumask_any(mask); - else - cpu = cpumask_any_but(mask, exclude_cpu); - - if (!IS_ENABLED(CONFIG_NO_HZ_FULL)) - return cpu; - - /* If the CPU picked isn't marked nohz_full nothing more needs doing. */ - if (cpu < nr_cpu_ids && !tick_nohz_full_cpu(cpu)) - return cpu; - - /* Try to find a CPU that isn't nohz_full to use in preference */ - hk_cpu = cpumask_nth_andnot(0, mask, tick_nohz_full_mask); - if (hk_cpu == exclude_cpu) - hk_cpu = cpumask_nth_andnot(1, mask, tick_nohz_full_mask); - - if (hk_cpu < nr_cpu_ids) - cpu = hk_cpu; - - return cpu; -} - -struct rdt_fs_context { - struct kernfs_fs_context kfc; - bool enable_cdpl2; - bool enable_cdpl3; - bool enable_mba_mbps; - bool enable_debug; -}; - -static inline struct rdt_fs_context *rdt_fc2context(struct fs_context *fc) -{ - struct kernfs_fs_context *kfc = fc->fs_private; - - return container_of(kfc, struct rdt_fs_context, kfc); -} - -/** - * struct mon_evt - Entry in the event list of a resource - * @evtid: event id - * @name: name of the event - * @configurable: true if the event is configurable - * @list: entry in &rdt_resource->evt_list - */ -struct mon_evt { - enum resctrl_event_id evtid; - char *name; - bool configurable; - struct list_head list; -}; - -/** - * union mon_data_bits - Monitoring details for each event file - * @priv: Used to store monitoring event data in @u - * as kernfs private data - * @rid: Resource id associated with the event file - * @evtid: Event id associated with the event file - * @domid: The domain to which the event file belongs - * @u: Name of the bit fields struct - */ -union mon_data_bits { - void *priv; - struct { - unsigned int rid : 10; - enum resctrl_event_id evtid : 8; - unsigned int domid : 14; - } u; -}; - -struct rmid_read { - struct rdtgroup *rgrp; - struct rdt_resource *r; - struct rdt_domain *d; - enum resctrl_event_id evtid; - bool first; - int err; - u64 val; - void *arch_mon_ctx; -}; - -extern struct list_head resctrl_schema_all; -extern bool resctrl_mounted; - -enum rdt_group_type { - RDTCTRL_GROUP = 0, - RDTMON_GROUP, - RDT_NUM_GROUP, -}; - -/** - * enum rdtgrp_mode - Mode of a RDT resource group - * @RDT_MODE_SHAREABLE: This resource group allows sharing of its allocations - * @RDT_MODE_EXCLUSIVE: No sharing of this resource group's allocations allowed - * @RDT_MODE_PSEUDO_LOCKSETUP: Resource group will be used for Pseudo-Locking - * @RDT_MODE_PSEUDO_LOCKED: No sharing of this resource group's allocations - * allowed AND the allocations are Cache Pseudo-Locked - * @RDT_NUM_MODES: Total number of modes - * - * The mode of a resource group enables control over the allowed overlap - * between allocations associated with different resource groups (classes - * of service). User is able to modify the mode of a resource group by - * writing to the "mode" resctrl file associated with the resource group. - * - * The "shareable", "exclusive", and "pseudo-locksetup" modes are set by - * writing the appropriate text to the "mode" file. A resource group enters - * "pseudo-locked" mode after the schemata is written while the resource - * group is in "pseudo-locksetup" mode. - */ -enum rdtgrp_mode { - RDT_MODE_SHAREABLE = 0, - RDT_MODE_EXCLUSIVE, - RDT_MODE_PSEUDO_LOCKSETUP, - RDT_MODE_PSEUDO_LOCKED, - - /* Must be last */ - RDT_NUM_MODES, -}; - -/** - * struct mongroup - store mon group's data in resctrl fs. - * @mon_data_kn: kernfs node for the mon_data directory - * @parent: parent rdtgrp - * @crdtgrp_list: child rdtgroup node list - * @rmid: rmid for this rdtgroup - */ -struct mongroup { - struct kernfs_node *mon_data_kn; - struct rdtgroup *parent; - struct list_head crdtgrp_list; - u32 rmid; -}; - -/** - * struct rdtgroup - store rdtgroup's data in resctrl file system. - * @kn: kernfs node - * @rdtgroup_list: linked list for all rdtgroups - * @closid: closid for this rdtgroup - * @cpu_mask: CPUs assigned to this rdtgroup - * @flags: status bits - * @waitcount: how many cpus expect to find this - * group when they acquire rdtgroup_mutex - * @type: indicates type of this rdtgroup - either - * monitor only or ctrl_mon group - * @mon: mongroup related data - * @mode: mode of resource group - * @plr: pseudo-locked region - */ -struct rdtgroup { - struct kernfs_node *kn; - struct list_head rdtgroup_list; - u32 closid; - struct cpumask cpu_mask; - int flags; - atomic_t waitcount; - enum rdt_group_type type; - struct mongroup mon; - enum rdtgrp_mode mode; - struct pseudo_lock_region *plr; -}; - -/* List of all resource groups */ -extern struct list_head rdt_all_groups; - -extern int max_name_width, max_data_width; - -/** - * struct rftype - describe each file in the resctrl file system - * @name: File name - * @mode: Access mode - * @kf_ops: File operations - * @flags: File specific RFTYPE_FLAGS_* flags - * @fflags: File specific RFTYPE_* flags - * @seq_show: Show content of the file - * @write: Write to the file - */ -struct rftype { - char *name; - umode_t mode; - const struct kernfs_ops *kf_ops; - unsigned long flags; - unsigned long fflags; - - int (*seq_show)(struct kernfs_open_file *of, - struct seq_file *sf, void *v); - /* - * write() is the generic write callback which maps directly to - * kernfs write operation and overrides all other operations. - * Maximum write size is determined by ->max_write_len. - */ - ssize_t (*write)(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off); -}; - -/** - * struct mbm_state - status for each MBM counter in each domain - * @prev_bw_bytes: Previous bytes value read for bandwidth calculation - * @prev_bw: The most recent bandwidth in MBps - */ -struct mbm_state { - u64 prev_bw_bytes; - u32 prev_bw; -}; - /** * struct arch_mbm_state - values used to compute resctrl_arch_rmid_read()s * return value. @@ -327,11 +106,7 @@ static inline struct rdt_hw_resource *resctrl_to_arch_res(struct rdt_resource *r return container_of(r, struct rdt_hw_resource, r_resctrl); } -extern struct mutex rdtgroup_mutex; - extern struct rdt_hw_resource rdt_resources_all[]; -extern struct rdtgroup rdtgroup_default; -extern struct dentry *debugfs_resctrl; static inline struct rdt_resource *resctrl_inc(struct rdt_resource *res) { @@ -395,95 +170,10 @@ union cpuid_0x10_x_edx { unsigned int full; }; -void rdt_last_cmd_clear(void); -void rdt_last_cmd_puts(const char *s); -__printf(1, 2) -void rdt_last_cmd_printf(const char *fmt, ...); - void rdt_ctrl_update(void *arg); -struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn); -void rdtgroup_kn_unlock(struct kernfs_node *kn); -int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name); -int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name, - umode_t mask); -ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off); -int rdtgroup_schemata_show(struct kernfs_open_file *of, - struct seq_file *s, void *v); -bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d, - unsigned long cbm, int closid, bool exclusive); -unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d, - unsigned long cbm); -enum rdtgrp_mode rdtgroup_mode_by_closid(int closid); -int rdtgroup_tasks_assigned(struct rdtgroup *r); -int closids_supported(void); -void closid_free(int closid); -int alloc_rmid(u32 closid); -void free_rmid(u32 closid, u32 rmid); int rdt_get_mon_l3_config(struct rdt_resource *r); -void resctrl_mon_resource_exit(void); bool rdt_cpu_has(int flag); -void mon_event_count(void *info); -int rdtgroup_mondata_show(struct seq_file *m, void *arg); -void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, - struct rdt_domain *d, struct rdtgroup *rdtgrp, - int evtid, int first); -int resctrl_mon_resource_init(void); -void mbm_setup_overflow_handler(struct rdt_domain *dom, - unsigned long delay_ms, - int exclude_cpu); -void mbm_handle_overflow(struct work_struct *work); void __init intel_rdt_mbm_apply_quirk(void); -bool is_mba_sc(struct rdt_resource *r); -void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms, - int exclude_cpu); -void cqm_handle_limbo(struct work_struct *work); -bool has_busy_rmid(struct rdt_domain *d); -void __check_limbo(struct rdt_domain *d, bool force_free); void rdt_domain_reconfigure_cdp(struct rdt_resource *r); -void mbm_config_rftype_init(const char *config); -void rdt_staged_configs_clear(void); -bool closid_allocated(unsigned int closid); -int resctrl_find_cleanest_closid(void); - -#ifdef CONFIG_RESCTRL_FS_PSEUDO_LOCK -int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp); -int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp); -bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm); -bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d); -int rdt_pseudo_lock_init(void); -void rdt_pseudo_lock_release(void); -int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp); -void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp); -#else -static inline int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) -{ - return -EOPNOTSUPP; -} - -static inline int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp) -{ - return -EOPNOTSUPP; -} - -static inline bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm) -{ - return false; -} - -static inline bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) -{ - return false; -} - -static inline int rdt_pseudo_lock_init(void) { return 0; } -static inline void rdt_pseudo_lock_release(void) { } -static inline int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) -{ - return -EOPNOTSUPP; -} - -static inline void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp) { } -#endif /* CONFIG_RESCTRL_FS_PSEUDO_LOCK */ #endif /* _ASM_X86_RESCTRL_INTERNAL_H */ diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 7e6fca138cb7..02fb9d87479a 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -25,53 +25,6 @@ #include "internal.h" -/** - * struct rmid_entry - dirty tracking for all RMID. - * @closid: The CLOSID for this entry. - * @rmid: The RMID for this entry. - * @busy: The number of domains with cached data using this RMID. - * @list: Member of the rmid_free_lru list when busy == 0. - * - * Depending on the architecture the correct monitor is accessed using - * both @closid and @rmid, or @rmid only. - * - * Take the rdtgroup_mutex when accessing. - */ -struct rmid_entry { - u32 closid; - u32 rmid; - int busy; - struct list_head list; -}; - -/* - * @rmid_free_lru - A least recently used list of free RMIDs - * These RMIDs are guaranteed to have an occupancy less than the - * threshold occupancy - */ -static LIST_HEAD(rmid_free_lru); - -/* - * @closid_num_dirty_rmid The number of dirty RMID each CLOSID has. - * Only allocated when CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID is defined. - * Indexed by CLOSID. Protected by rdtgroup_mutex. - */ -static u32 *closid_num_dirty_rmid; - -/* - * @rmid_limbo_count - count of currently unused but (potentially) - * dirty RMIDs. - * This counts RMIDs that no one is currently using but that - * may have a occupancy value > resctrl_rmid_realloc_threshold. User can - * change the threshold occupancy value. - */ -static unsigned int rmid_limbo_count; - -/* - * @rmid_entry - The entry in the limbo and free lists. - */ -static struct rmid_entry *rmid_ptrs; - /* * Global boolean for rdt_monitor which is true if any * resource monitoring is enabled. @@ -83,17 +36,6 @@ bool rdt_mon_capable; */ unsigned int rdt_mon_features; -/* - * This is the threshold cache occupancy in bytes at which we will consider an - * RMID available for re-allocation. - */ -unsigned int resctrl_rmid_realloc_threshold; - -/* - * This is the maximum value for the reallocation threshold, in bytes. - */ -unsigned int resctrl_rmid_realloc_limit; - #define CF(cf) ((unsigned long)(1048576 * (cf) + 0.5)) /* @@ -157,33 +99,6 @@ static inline u64 get_corrected_mbm_count(u32 rmid, unsigned long val) return val; } -/* - * x86 and arm64 differ in their handling of monitoring. - * x86's RMID are independent numbers, there is only one source of traffic - * with an RMID value of '1'. - * arm64's PMG extends the PARTID/CLOSID space, there are multiple sources of - * traffic with a PMG value of '1', one for each CLOSID, meaning the RMID - * value is no longer unique. - * To account for this, resctrl uses an index. On x86 this is just the RMID, - * on arm64 it encodes the CLOSID and RMID. This gives a unique number. - * - * The domain's rmid_busy_llc and rmid_ptrs[] are sized by index. The arch code - * must accept an attempt to read every index. - */ -static inline struct rmid_entry *__rmid_entry(u32 idx) -{ - struct rmid_entry *entry; - u32 closid, rmid; - - entry = &rmid_ptrs[idx]; - resctrl_arch_rmid_idx_decode(idx, &closid, &rmid); - - WARN_ON_ONCE(entry->closid != closid); - WARN_ON_ONCE(entry->rmid != rmid); - - return entry; -} - static int __rmid_read(u32 rmid, enum resctrl_event_id eventid, u64 *val) { u64 msr_val; @@ -302,735 +217,6 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, return 0; } -static void limbo_release_entry(struct rmid_entry *entry) -{ - lockdep_assert_held(&rdtgroup_mutex); - - rmid_limbo_count--; - list_add_tail(&entry->list, &rmid_free_lru); - - if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) - closid_num_dirty_rmid[entry->closid]--; -} - -/* - * Check the RMIDs that are marked as busy for this domain. If the - * reported LLC occupancy is below the threshold clear the busy bit and - * decrement the count. If the busy count gets to zero on an RMID, we - * free the RMID - */ -void __check_limbo(struct rdt_domain *d, bool force_free) -{ - struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); - u32 idx_limit = resctrl_arch_system_num_rmid_idx(); - struct rmid_entry *entry; - u32 idx, cur_idx = 1; - void *arch_mon_ctx; - bool rmid_dirty; - u64 val = 0; - - arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, QOS_L3_OCCUP_EVENT_ID); - if (IS_ERR(arch_mon_ctx)) { - pr_warn_ratelimited("Failed to allocate monitor context: %ld", - PTR_ERR(arch_mon_ctx)); - return; - } - - /* - * Skip RMID 0 and start from RMID 1 and check all the RMIDs that - * are marked as busy for occupancy < threshold. If the occupancy - * is less than the threshold decrement the busy counter of the - * RMID and move it to the free list when the counter reaches 0. - */ - for (;;) { - idx = find_next_bit(d->rmid_busy_llc, idx_limit, cur_idx); - if (idx >= idx_limit) - break; - - entry = __rmid_entry(idx); - if (resctrl_arch_rmid_read(r, d, entry->closid, entry->rmid, - QOS_L3_OCCUP_EVENT_ID, &val, - arch_mon_ctx)) { - rmid_dirty = true; - } else { - rmid_dirty = (val >= resctrl_rmid_realloc_threshold); - } - - if (force_free || !rmid_dirty) { - clear_bit(idx, d->rmid_busy_llc); - if (!--entry->busy) - limbo_release_entry(entry); - } - cur_idx = idx + 1; - } - - resctrl_arch_mon_ctx_free(r, QOS_L3_OCCUP_EVENT_ID, arch_mon_ctx); -} - -bool has_busy_rmid(struct rdt_domain *d) -{ - u32 idx_limit = resctrl_arch_system_num_rmid_idx(); - - return find_first_bit(d->rmid_busy_llc, idx_limit) != idx_limit; -} - -static struct rmid_entry *resctrl_find_free_rmid(u32 closid) -{ - struct rmid_entry *itr; - u32 itr_idx, cmp_idx; - - if (list_empty(&rmid_free_lru)) - return rmid_limbo_count ? ERR_PTR(-EBUSY) : ERR_PTR(-ENOSPC); - - list_for_each_entry(itr, &rmid_free_lru, list) { - /* - * Get the index of this free RMID, and the index it would need - * to be if it were used with this CLOSID. - * If the CLOSID is irrelevant on this architecture, the two - * index values are always the same on every entry and thus the - * very first entry will be returned. - */ - itr_idx = resctrl_arch_rmid_idx_encode(itr->closid, itr->rmid); - cmp_idx = resctrl_arch_rmid_idx_encode(closid, itr->rmid); - - if (itr_idx == cmp_idx) - return itr; - } - - return ERR_PTR(-ENOSPC); -} - -/** - * resctrl_find_cleanest_closid() - Find a CLOSID where all the associated - * RMID are clean, or the CLOSID that has - * the most clean RMID. - * - * MPAM's equivalent of RMID are per-CLOSID, meaning a freshly allocated CLOSID - * may not be able to allocate clean RMID. To avoid this the allocator will - * choose the CLOSID with the most clean RMID. - * - * When the CLOSID and RMID are independent numbers, the first free CLOSID will - * be returned. - */ -int resctrl_find_cleanest_closid(void) -{ - u32 cleanest_closid = ~0; - int i = 0; - - lockdep_assert_held(&rdtgroup_mutex); - - if (!IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) - return -EIO; - - for (i = 0; i < closids_supported(); i++) { - int num_dirty; - - if (closid_allocated(i)) - continue; - - num_dirty = closid_num_dirty_rmid[i]; - if (num_dirty == 0) - return i; - - if (cleanest_closid == ~0) - cleanest_closid = i; - - if (num_dirty < closid_num_dirty_rmid[cleanest_closid]) - cleanest_closid = i; - } - - if (cleanest_closid == ~0) - return -ENOSPC; - - return cleanest_closid; -} - -/* - * For MPAM the RMID value is not unique, and has to be considered with - * the CLOSID. The (CLOSID, RMID) pair is allocated on all domains, which - * allows all domains to be managed by a single free list. - * Each domain also has a rmid_busy_llc to reduce the work of the limbo handler. - */ -int alloc_rmid(u32 closid) -{ - struct rmid_entry *entry; - - lockdep_assert_held(&rdtgroup_mutex); - - entry = resctrl_find_free_rmid(closid); - if (IS_ERR(entry)) - return PTR_ERR(entry); - - list_del(&entry->list); - return entry->rmid; -} - -static void add_rmid_to_limbo(struct rmid_entry *entry) -{ - struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); - struct rdt_domain *d; - u32 idx; - - lockdep_assert_held(&rdtgroup_mutex); - - /* Walking r->domains, ensure it can't race with cpuhp */ - lockdep_assert_cpus_held(); - - idx = resctrl_arch_rmid_idx_encode(entry->closid, entry->rmid); - - entry->busy = 0; - list_for_each_entry(d, &r->domains, list) { - /* - * For the first limbo RMID in the domain, - * setup up the limbo worker. - */ - if (!has_busy_rmid(d)) - cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL, - RESCTRL_PICK_ANY_CPU); - set_bit(idx, d->rmid_busy_llc); - entry->busy++; - } - - rmid_limbo_count++; - if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) - closid_num_dirty_rmid[entry->closid]++; -} - -void free_rmid(u32 closid, u32 rmid) -{ - u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid); - struct rmid_entry *entry; - - lockdep_assert_held(&rdtgroup_mutex); - - /* - * Do not allow the default rmid to be free'd. Comparing by index - * allows architectures that ignore the closid parameter to avoid an - * unnecessary check. - */ - if (idx == resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID, - RESCTRL_RESERVED_RMID)) - return; - - entry = __rmid_entry(idx); - - if (resctrl_arch_is_llc_occupancy_enabled()) - add_rmid_to_limbo(entry); - else - list_add_tail(&entry->list, &rmid_free_lru); -} - -static struct mbm_state *get_mbm_state(struct rdt_domain *d, u32 closid, - u32 rmid, enum resctrl_event_id evtid) -{ - u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid); - - switch (evtid) { - case QOS_L3_MBM_TOTAL_EVENT_ID: - return &d->mbm_total[idx]; - case QOS_L3_MBM_LOCAL_EVENT_ID: - return &d->mbm_local[idx]; - default: - return NULL; - } -} - -static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr) -{ - struct mbm_state *m; - u64 tval = 0; - - if (rr->first) { - resctrl_arch_reset_rmid(rr->r, rr->d, closid, rmid, rr->evtid); - m = get_mbm_state(rr->d, closid, rmid, rr->evtid); - if (m) - memset(m, 0, sizeof(struct mbm_state)); - return 0; - } - - rr->err = resctrl_arch_rmid_read(rr->r, rr->d, closid, rmid, rr->evtid, - &tval, rr->arch_mon_ctx); - if (rr->err) - return rr->err; - - rr->val += tval; - - return 0; -} - -/* - * mbm_bw_count() - Update bw count from values previously read by - * __mon_event_count(). - * @closid: The closid used to identify the cached mbm_state. - * @rmid: The rmid used to identify the cached mbm_state. - * @rr: The struct rmid_read populated by __mon_event_count(). - * - * Supporting function to calculate the memory bandwidth - * and delta bandwidth in MBps. The chunks value previously read by - * __mon_event_count() is compared with the chunks value from the previous - * invocation. This must be called once per second to maintain values in MBps. - */ -static void mbm_bw_count(u32 closid, u32 rmid, struct rmid_read *rr) -{ - u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid); - struct mbm_state *m = &rr->d->mbm_local[idx]; - u64 cur_bw, bytes, cur_bytes; - - cur_bytes = rr->val; - bytes = cur_bytes - m->prev_bw_bytes; - m->prev_bw_bytes = cur_bytes; - - cur_bw = bytes / SZ_1M; - - m->prev_bw = cur_bw; -} - -/* - * This is scheduled by mon_event_read() to read the CQM/MBM counters - * on a domain. - */ -void mon_event_count(void *info) -{ - struct rdtgroup *rdtgrp, *entry; - struct rmid_read *rr = info; - struct list_head *head; - int ret; - - rdtgrp = rr->rgrp; - - ret = __mon_event_count(rdtgrp->closid, rdtgrp->mon.rmid, rr); - - /* - * For Ctrl groups read data from child monitor groups and - * add them together. Count events which are read successfully. - * Discard the rmid_read's reporting errors. - */ - head = &rdtgrp->mon.crdtgrp_list; - - if (rdtgrp->type == RDTCTRL_GROUP) { - list_for_each_entry(entry, head, mon.crdtgrp_list) { - if (__mon_event_count(entry->closid, entry->mon.rmid, - rr) == 0) - ret = 0; - } - } - - /* - * __mon_event_count() calls for newly created monitor groups may - * report -EINVAL/Unavailable if the monitor hasn't seen any traffic. - * Discard error if any of the monitor event reads succeeded. - */ - if (ret == 0) - rr->err = 0; -} - -/* - * Feedback loop for MBA software controller (mba_sc) - * - * mba_sc is a feedback loop where we periodically read MBM counters and - * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so - * that: - * - * current bandwidth(cur_bw) < user specified bandwidth(user_bw) - * - * This uses the MBM counters to measure the bandwidth and MBA throttle - * MSRs to control the bandwidth for a particular rdtgrp. It builds on the - * fact that resctrl rdtgroups have both monitoring and control. - * - * The frequency of the checks is 1s and we just tag along the MBM overflow - * timer. Having 1s interval makes the calculation of bandwidth simpler. - * - * Although MBA's goal is to restrict the bandwidth to a maximum, there may - * be a need to increase the bandwidth to avoid unnecessarily restricting - * the L2 <-> L3 traffic. - * - * Since MBA controls the L2 external bandwidth where as MBM measures the - * L3 external bandwidth the following sequence could lead to such a - * situation. - * - * Consider an rdtgroup which had high L3 <-> memory traffic in initial - * phases -> mba_sc kicks in and reduced bandwidth percentage values -> but - * after some time rdtgroup has mostly L2 <-> L3 traffic. - * - * In this case we may restrict the rdtgroup's L2 <-> L3 traffic as its - * throttle MSRs already have low percentage values. To avoid - * unnecessarily restricting such rdtgroups, we also increase the bandwidth. - */ -static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm) -{ - u32 closid, rmid, cur_msr_val, new_msr_val; - struct mbm_state *pmbm_data, *cmbm_data; - struct rdt_resource *r_mba; - struct rdt_domain *dom_mba; - u32 cur_bw, user_bw, idx; - struct list_head *head; - struct rdtgroup *entry; - - if (!resctrl_arch_is_mbm_local_enabled()) - return; - - r_mba = resctrl_arch_get_resource(RDT_RESOURCE_MBA); - - closid = rgrp->closid; - rmid = rgrp->mon.rmid; - idx = resctrl_arch_rmid_idx_encode(closid, rmid); - pmbm_data = &dom_mbm->mbm_local[idx]; - - dom_mba = resctrl_get_domain_from_cpu(smp_processor_id(), r_mba); - if (!dom_mba) { - pr_warn_once("Failure to get domain for MBA update\n"); - return; - } - - cur_bw = pmbm_data->prev_bw; - user_bw = dom_mba->mbps_val[closid]; - - /* MBA resource doesn't support CDP */ - cur_msr_val = resctrl_arch_get_config(r_mba, dom_mba, closid, CDP_NONE); - - /* - * For Ctrl groups read data from child monitor groups. - */ - head = &rgrp->mon.crdtgrp_list; - list_for_each_entry(entry, head, mon.crdtgrp_list) { - cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid]; - cur_bw += cmbm_data->prev_bw; - } - - /* - * Scale up/down the bandwidth linearly for the ctrl group. The - * bandwidth step is the bandwidth granularity specified by the - * hardware. - * Always increase throttling if current bandwidth is above the - * target set by user. - * But avoid thrashing up and down on every poll by checking - * whether a decrease in throttling is likely to push the group - * back over target. E.g. if currently throttling to 30% of bandwidth - * on a system with 10% granularity steps, check whether moving to - * 40% would go past the limit by multiplying current bandwidth by - * "(30 + 10) / 30". - */ - if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) { - new_msr_val = cur_msr_val - r_mba->membw.bw_gran; - } else if (cur_msr_val < MAX_MBA_BW && - (user_bw > (cur_bw * (cur_msr_val + r_mba->membw.min_bw) / cur_msr_val))) { - new_msr_val = cur_msr_val + r_mba->membw.bw_gran; - } else { - return; - } - - resctrl_arch_update_one(r_mba, dom_mba, closid, CDP_NONE, new_msr_val); -} - -static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, - u32 closid, u32 rmid) -{ - struct rmid_read rr; - - rr.first = false; - rr.r = r; - rr.d = d; - - /* - * This is protected from concurrent reads from user - * as both the user and we hold the global mutex. - */ - if (resctrl_arch_is_mbm_total_enabled()) { - rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID; - rr.val = 0; - rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid); - if (IS_ERR(rr.arch_mon_ctx)) { - pr_warn_ratelimited("Failed to allocate monitor context: %ld", - PTR_ERR(rr.arch_mon_ctx)); - return; - } - - __mon_event_count(closid, rmid, &rr); - - resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx); - } - if (resctrl_arch_is_mbm_local_enabled()) { - rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID; - rr.val = 0; - rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid); - if (IS_ERR(rr.arch_mon_ctx)) { - pr_warn_ratelimited("Failed to allocate monitor context: %ld", - PTR_ERR(rr.arch_mon_ctx)); - return; - } - - __mon_event_count(closid, rmid, &rr); - - /* - * Call the MBA software controller only for the - * control groups and when user has enabled - * the software controller explicitly. - */ - if (is_mba_sc(NULL)) - mbm_bw_count(closid, rmid, &rr); - - resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx); - } -} - -/* - * Handler to scan the limbo list and move the RMIDs - * to free list whose occupancy < threshold_occupancy. - */ -void cqm_handle_limbo(struct work_struct *work) -{ - unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL); - struct rdt_domain *d; - - cpus_read_lock(); - mutex_lock(&rdtgroup_mutex); - - d = container_of(work, struct rdt_domain, cqm_limbo.work); - - __check_limbo(d, false); - - if (has_busy_rmid(d)) { - d->cqm_work_cpu = cpumask_any_housekeeping(&d->cpu_mask, - RESCTRL_PICK_ANY_CPU); - schedule_delayed_work_on(d->cqm_work_cpu, &d->cqm_limbo, - delay); - } - - mutex_unlock(&rdtgroup_mutex); - cpus_read_unlock(); -} - -/** - * cqm_setup_limbo_handler() - Schedule the limbo handler to run for this - * domain. - * @dom: The domain the limbo handler should run for. - * @delay_ms: How far in the future the handler should run. - * @exclude_cpu: Which CPU the handler should not run on, - * RESCTRL_PICK_ANY_CPU to pick any CPU. - */ -void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms, - int exclude_cpu) -{ - unsigned long delay = msecs_to_jiffies(delay_ms); - int cpu; - - cpu = cpumask_any_housekeeping(&dom->cpu_mask, exclude_cpu); - dom->cqm_work_cpu = cpu; - - if (cpu < nr_cpu_ids) - schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay); -} - -void mbm_handle_overflow(struct work_struct *work) -{ - unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL); - struct rdtgroup *prgrp, *crgrp; - struct list_head *head; - struct rdt_resource *r; - struct rdt_domain *d; - - cpus_read_lock(); - mutex_lock(&rdtgroup_mutex); - - /* - * If the filesystem has been unmounted this work no longer needs to - * run. - */ - if (!resctrl_mounted || !resctrl_arch_mon_capable()) - goto out_unlock; - - r = resctrl_arch_get_resource(RDT_RESOURCE_L3); - d = container_of(work, struct rdt_domain, mbm_over.work); - - list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { - mbm_update(r, d, prgrp->closid, prgrp->mon.rmid); - - head = &prgrp->mon.crdtgrp_list; - list_for_each_entry(crgrp, head, mon.crdtgrp_list) - mbm_update(r, d, crgrp->closid, crgrp->mon.rmid); - - if (is_mba_sc(NULL)) - update_mba_bw(prgrp, d); - } - - /* - * Re-check for housekeeping CPUs. This allows the overflow handler to - * move off a nohz_full CPU quickly. - */ - d->mbm_work_cpu = cpumask_any_housekeeping(&d->cpu_mask, - RESCTRL_PICK_ANY_CPU); - schedule_delayed_work_on(d->mbm_work_cpu, &d->mbm_over, delay); - -out_unlock: - mutex_unlock(&rdtgroup_mutex); - cpus_read_unlock(); -} - -/** - * mbm_setup_overflow_handler() - Schedule the overflow handler to run for this - * domain. - * @dom: The domain the overflow handler should run for. - * @delay_ms: How far in the future the handler should run. - * @exclude_cpu: Which CPU the handler should not run on, - * RESCTRL_PICK_ANY_CPU to pick any CPU. - */ -void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms, - int exclude_cpu) -{ - unsigned long delay = msecs_to_jiffies(delay_ms); - int cpu; - - /* - * When a domain comes online there is no guarantee the filesystem is - * mounted. If not, there is no need to catch counter overflow. - */ - if (!resctrl_mounted || !resctrl_arch_mon_capable()) - return; - cpu = cpumask_any_housekeeping(&dom->cpu_mask, exclude_cpu); - dom->mbm_work_cpu = cpu; - - if (cpu < nr_cpu_ids) - schedule_delayed_work_on(cpu, &dom->mbm_over, delay); -} - -static int dom_data_init(struct rdt_resource *r) -{ - u32 idx_limit = resctrl_arch_system_num_rmid_idx(); - u32 num_closid = resctrl_arch_get_num_closid(r); - struct rmid_entry *entry = NULL; - int err = 0, i; - u32 idx; - - mutex_lock(&rdtgroup_mutex); - if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) { - u32 *tmp; - - /* - * If the architecture hasn't provided a sanitised value here, - * this may result in larger arrays than necessary. Resctrl will - * use a smaller system wide value based on the resources in - * use. - */ - tmp = kcalloc(num_closid, sizeof(*tmp), GFP_KERNEL); - if (!tmp) { - err = -ENOMEM; - goto out_unlock; - } - - closid_num_dirty_rmid = tmp; - } - - rmid_ptrs = kcalloc(idx_limit, sizeof(struct rmid_entry), GFP_KERNEL); - if (!rmid_ptrs) { - if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) { - kfree(closid_num_dirty_rmid); - closid_num_dirty_rmid = NULL; - } - err = -ENOMEM; - goto out_unlock; - } - - for (i = 0; i < idx_limit; i++) { - entry = &rmid_ptrs[i]; - INIT_LIST_HEAD(&entry->list); - - resctrl_arch_rmid_idx_decode(i, &entry->closid, &entry->rmid); - list_add_tail(&entry->list, &rmid_free_lru); - } - - /* - * RESCTRL_RESERVED_CLOSID and RESCTRL_RESERVED_RMID are special and - * are always allocated. These are used for the rdtgroup_default - * control group, which will be setup later in rdtgroup_init(). - */ - idx = resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID, - RESCTRL_RESERVED_RMID); - entry = __rmid_entry(idx); - list_del(&entry->list); - -out_unlock: - mutex_unlock(&rdtgroup_mutex); - - return err; -} - -static void dom_data_exit(struct rdt_resource *r) -{ - if (!r->mon_capable) - return; - - mutex_lock(&rdtgroup_mutex); - if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) { - kfree(closid_num_dirty_rmid); - closid_num_dirty_rmid = NULL; - } - - kfree(rmid_ptrs); - rmid_ptrs = NULL; - - mutex_unlock(&rdtgroup_mutex); -} - -static struct mon_evt llc_occupancy_event = { - .name = "llc_occupancy", - .evtid = QOS_L3_OCCUP_EVENT_ID, -}; - -static struct mon_evt mbm_total_event = { - .name = "mbm_total_bytes", - .evtid = QOS_L3_MBM_TOTAL_EVENT_ID, -}; - -static struct mon_evt mbm_local_event = { - .name = "mbm_local_bytes", - .evtid = QOS_L3_MBM_LOCAL_EVENT_ID, -}; - -/* - * Initialize the event list for the resource. - * - * Note that MBM events are also part of RDT_RESOURCE_L3 resource - * because as per the SDM the total and local memory bandwidth - * are enumerated as part of L3 monitoring. - */ -static void l3_mon_evt_init(struct rdt_resource *r) -{ - INIT_LIST_HEAD(&r->evt_list); - - if (resctrl_arch_is_llc_occupancy_enabled()) - list_add_tail(&llc_occupancy_event.list, &r->evt_list); - if (resctrl_arch_is_mbm_total_enabled()) - list_add_tail(&mbm_total_event.list, &r->evt_list); - if (resctrl_arch_is_mbm_local_enabled()) - list_add_tail(&mbm_local_event.list, &r->evt_list); -} - -int resctrl_mon_resource_init(void) -{ - struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); - int ret; - - if (!r->mon_capable) - return 0; - - ret = dom_data_init(r); - if (ret) - return ret; - - l3_mon_evt_init(r); - - if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_TOTAL_EVENT_ID)) { - mbm_total_event.configurable = true; - mbm_config_rftype_init("mbm_total_bytes_config"); - } - if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_LOCAL_EVENT_ID)) { - mbm_local_event.configurable = true; - mbm_config_rftype_init("mbm_local_bytes_config"); - } - - return 0; -} - int __init rdt_get_mon_l3_config(struct rdt_resource *r) { unsigned int mbm_offset = boot_cpu_data.x86_cache_mbm_width_offset; @@ -1076,13 +262,6 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r) return 0; } -void resctrl_mon_resource_exit(void) -{ - struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); - - dom_data_exit(r); -} - void __init intel_rdt_mbm_apply_quirk(void) { int cf_index; diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index ba51ab1f70e6..ba1596afee10 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -39,28 +39,6 @@ */ static u64 prefetch_disable_bits; -/* - * Major number assigned to and shared by all devices exposing - * pseudo-locked regions. - */ -static unsigned int pseudo_lock_major; -static unsigned long pseudo_lock_minor_avail = GENMASK(MINORBITS, 0); - -static char *pseudo_lock_devnode(const struct device *dev, umode_t *mode) -{ - const struct rdtgroup *rdtgrp; - - rdtgrp = dev_get_drvdata(dev); - if (mode) - *mode = 0600; - return kasprintf(GFP_KERNEL, "pseudo_lock/%s", rdtgrp->kn->name); -} - -static const struct class pseudo_lock_class = { - .name = "pseudo_lock", - .devnode = pseudo_lock_devnode, -}; - /** * resctrl_arch_get_prefetch_disable_bits - prefetch disable bits of supported * platforms @@ -121,299 +99,6 @@ u64 resctrl_arch_get_prefetch_disable_bits(void) return prefetch_disable_bits; } -/** - * pseudo_lock_minor_get - Obtain available minor number - * @minor: Pointer to where new minor number will be stored - * - * A bitmask is used to track available minor numbers. Here the next free - * minor number is marked as unavailable and returned. - * - * Return: 0 on success, <0 on failure. - */ -static int pseudo_lock_minor_get(unsigned int *minor) -{ - unsigned long first_bit; - - first_bit = find_first_bit(&pseudo_lock_minor_avail, MINORBITS); - - if (first_bit == MINORBITS) - return -ENOSPC; - - __clear_bit(first_bit, &pseudo_lock_minor_avail); - *minor = first_bit; - - return 0; -} - -/** - * pseudo_lock_minor_release - Return minor number to available - * @minor: The minor number made available - */ -static void pseudo_lock_minor_release(unsigned int minor) -{ - __set_bit(minor, &pseudo_lock_minor_avail); -} - -/** - * region_find_by_minor - Locate a pseudo-lock region by inode minor number - * @minor: The minor number of the device representing pseudo-locked region - * - * When the character device is accessed we need to determine which - * pseudo-locked region it belongs to. This is done by matching the minor - * number of the device to the pseudo-locked region it belongs. - * - * Minor numbers are assigned at the time a pseudo-locked region is associated - * with a cache instance. - * - * Return: On success return pointer to resource group owning the pseudo-locked - * region, NULL on failure. - */ -static struct rdtgroup *region_find_by_minor(unsigned int minor) -{ - struct rdtgroup *rdtgrp, *rdtgrp_match = NULL; - - list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { - if (rdtgrp->plr && rdtgrp->plr->minor == minor) { - rdtgrp_match = rdtgrp; - break; - } - } - return rdtgrp_match; -} - -/** - * struct pseudo_lock_pm_req - A power management QoS request list entry - * @list: Entry within the @pm_reqs list for a pseudo-locked region - * @req: PM QoS request - */ -struct pseudo_lock_pm_req { - struct list_head list; - struct dev_pm_qos_request req; -}; - -static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr) -{ - struct pseudo_lock_pm_req *pm_req, *next; - - list_for_each_entry_safe(pm_req, next, &plr->pm_reqs, list) { - dev_pm_qos_remove_request(&pm_req->req); - list_del(&pm_req->list); - kfree(pm_req); - } -} - -/** - * pseudo_lock_cstates_constrain - Restrict cores from entering C6 - * @plr: Pseudo-locked region - * - * To prevent the cache from being affected by power management entering - * C6 has to be avoided. This is accomplished by requesting a latency - * requirement lower than lowest C6 exit latency of all supported - * platforms as found in the cpuidle state tables in the intel_idle driver. - * At this time it is possible to do so with a single latency requirement - * for all supported platforms. - * - * Since Goldmont is supported, which is affected by X86_BUG_MONITOR, - * the ACPI latencies need to be considered while keeping in mind that C2 - * may be set to map to deeper sleep states. In this case the latency - * requirement needs to prevent entering C2 also. - * - * Return: 0 on success, <0 on failure - */ -static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr) -{ - struct pseudo_lock_pm_req *pm_req; - int cpu; - int ret; - - for_each_cpu(cpu, &plr->d->cpu_mask) { - pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL); - if (!pm_req) { - rdt_last_cmd_puts("Failure to allocate memory for PM QoS\n"); - ret = -ENOMEM; - goto out_err; - } - ret = dev_pm_qos_add_request(get_cpu_device(cpu), - &pm_req->req, - DEV_PM_QOS_RESUME_LATENCY, - 30); - if (ret < 0) { - rdt_last_cmd_printf("Failed to add latency req CPU%d\n", - cpu); - kfree(pm_req); - ret = -1; - goto out_err; - } - list_add(&pm_req->list, &plr->pm_reqs); - } - - return 0; - -out_err: - pseudo_lock_cstates_relax(plr); - return ret; -} - -/** - * pseudo_lock_region_clear - Reset pseudo-lock region data - * @plr: pseudo-lock region - * - * All content of the pseudo-locked region is reset - any memory allocated - * freed. - * - * Return: void - */ -static void pseudo_lock_region_clear(struct pseudo_lock_region *plr) -{ - plr->size = 0; - plr->line_size = 0; - kfree(plr->kmem); - plr->kmem = NULL; - plr->s = NULL; - if (plr->d) - plr->d->plr = NULL; - plr->d = NULL; - plr->cbm = 0; - plr->debugfs_dir = NULL; -} - -/** - * pseudo_lock_region_init - Initialize pseudo-lock region information - * @plr: pseudo-lock region - * - * Called after user provided a schemata to be pseudo-locked. From the - * schemata the &struct pseudo_lock_region is on entry already initialized - * with the resource, domain, and capacity bitmask. Here the information - * required for pseudo-locking is deduced from this data and &struct - * pseudo_lock_region initialized further. This information includes: - * - size in bytes of the region to be pseudo-locked - * - cache line size to know the stride with which data needs to be accessed - * to be pseudo-locked - * - a cpu associated with the cache instance on which the pseudo-locking - * flow can be executed - * - * Return: 0 on success, <0 on failure. Descriptive error will be written - * to last_cmd_status buffer. - */ -static int pseudo_lock_region_init(struct pseudo_lock_region *plr) -{ - struct cpu_cacheinfo *ci; - int ret; - int i; - - /* Pick the first cpu we find that is associated with the cache. */ - plr->cpu = cpumask_first(&plr->d->cpu_mask); - - if (!cpu_online(plr->cpu)) { - rdt_last_cmd_printf("CPU %u associated with cache not online\n", - plr->cpu); - ret = -ENODEV; - goto out_region; - } - - ci = get_cpu_cacheinfo(plr->cpu); - - plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm); - - for (i = 0; i < ci->num_leaves; i++) { - if (ci->info_list[i].level == plr->s->res->cache_level) { - plr->line_size = ci->info_list[i].coherency_line_size; - return 0; - } - } - - ret = -1; - rdt_last_cmd_puts("Unable to determine cache line size\n"); -out_region: - pseudo_lock_region_clear(plr); - return ret; -} - -/** - * pseudo_lock_init - Initialize a pseudo-lock region - * @rdtgrp: resource group to which new pseudo-locked region will belong - * - * A pseudo-locked region is associated with a resource group. When this - * association is created the pseudo-locked region is initialized. The - * details of the pseudo-locked region are not known at this time so only - * allocation is done and association established. - * - * Return: 0 on success, <0 on failure - */ -static int pseudo_lock_init(struct rdtgroup *rdtgrp) -{ - struct pseudo_lock_region *plr; - - plr = kzalloc(sizeof(*plr), GFP_KERNEL); - if (!plr) - return -ENOMEM; - - init_waitqueue_head(&plr->lock_thread_wq); - INIT_LIST_HEAD(&plr->pm_reqs); - rdtgrp->plr = plr; - return 0; -} - -/** - * pseudo_lock_region_alloc - Allocate kernel memory that will be pseudo-locked - * @plr: pseudo-lock region - * - * Initialize the details required to set up the pseudo-locked region and - * allocate the contiguous memory that will be pseudo-locked to the cache. - * - * Return: 0 on success, <0 on failure. Descriptive error will be written - * to last_cmd_status buffer. - */ -static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr) -{ - int ret; - - ret = pseudo_lock_region_init(plr); - if (ret < 0) - return ret; - - /* - * We do not yet support contiguous regions larger than - * KMALLOC_MAX_SIZE. - */ - if (plr->size > KMALLOC_MAX_SIZE) { - rdt_last_cmd_puts("Requested region exceeds maximum size\n"); - ret = -E2BIG; - goto out_region; - } - - plr->kmem = kzalloc(plr->size, GFP_KERNEL); - if (!plr->kmem) { - rdt_last_cmd_puts("Unable to allocate memory\n"); - ret = -ENOMEM; - goto out_region; - } - - ret = 0; - goto out; -out_region: - pseudo_lock_region_clear(plr); -out: - return ret; -} - -/** - * pseudo_lock_free - Free a pseudo-locked region - * @rdtgrp: resource group to which pseudo-locked region belonged - * - * The pseudo-locked region's resources have already been released, or not - * yet created at this point. Now it can be freed and disassociated from the - * resource group. - * - * Return: void - */ -static void pseudo_lock_free(struct rdtgroup *rdtgrp) -{ - pseudo_lock_region_clear(rdtgrp->plr); - kfree(rdtgrp->plr); - rdtgrp->plr = NULL; -} - /** * resctrl_arch_pseudo_lock_fn - Load kernel memory into cache * @_plr: the pseudo-lock region descriptor @@ -543,345 +228,6 @@ int resctrl_arch_pseudo_lock_fn(void *_plr) return 0; } -/** - * rdtgroup_monitor_in_progress - Test if monitoring in progress - * @rdtgrp: resource group being queried - * - * Return: 1 if monitor groups have been created for this resource - * group, 0 otherwise. - */ -static int rdtgroup_monitor_in_progress(struct rdtgroup *rdtgrp) -{ - return !list_empty(&rdtgrp->mon.crdtgrp_list); -} - -/** - * rdtgroup_locksetup_user_restrict - Restrict user access to group - * @rdtgrp: resource group needing access restricted - * - * A resource group used for cache pseudo-locking cannot have cpus or tasks - * assigned to it. This is communicated to the user by restricting access - * to all the files that can be used to make such changes. - * - * Permissions restored with rdtgroup_locksetup_user_restore() - * - * Return: 0 on success, <0 on failure. If a failure occurs during the - * restriction of access an attempt will be made to restore permissions but - * the state of the mode of these files will be uncertain when a failure - * occurs. - */ -static int rdtgroup_locksetup_user_restrict(struct rdtgroup *rdtgrp) -{ - int ret; - - ret = rdtgroup_kn_mode_restrict(rdtgrp, "tasks"); - if (ret) - return ret; - - ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus"); - if (ret) - goto err_tasks; - - ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list"); - if (ret) - goto err_cpus; - - if (resctrl_arch_mon_capable()) { - ret = rdtgroup_kn_mode_restrict(rdtgrp, "mon_groups"); - if (ret) - goto err_cpus_list; - } - - ret = 0; - goto out; - -err_cpus_list: - rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777); -err_cpus: - rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777); -err_tasks: - rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777); -out: - return ret; -} - -/** - * rdtgroup_locksetup_user_restore - Restore user access to group - * @rdtgrp: resource group needing access restored - * - * Restore all file access previously removed using - * rdtgroup_locksetup_user_restrict() - * - * Return: 0 on success, <0 on failure. If a failure occurs during the - * restoration of access an attempt will be made to restrict permissions - * again but the state of the mode of these files will be uncertain when - * a failure occurs. - */ -static int rdtgroup_locksetup_user_restore(struct rdtgroup *rdtgrp) -{ - int ret; - - ret = rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777); - if (ret) - return ret; - - ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777); - if (ret) - goto err_tasks; - - ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777); - if (ret) - goto err_cpus; - - if (resctrl_arch_mon_capable()) { - ret = rdtgroup_kn_mode_restore(rdtgrp, "mon_groups", 0777); - if (ret) - goto err_cpus_list; - } - - ret = 0; - goto out; - -err_cpus_list: - rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list"); -err_cpus: - rdtgroup_kn_mode_restrict(rdtgrp, "cpus"); -err_tasks: - rdtgroup_kn_mode_restrict(rdtgrp, "tasks"); -out: - return ret; -} - -/** - * rdtgroup_locksetup_enter - Resource group enters locksetup mode - * @rdtgrp: resource group requested to enter locksetup mode - * - * A resource group enters locksetup mode to reflect that it would be used - * to represent a pseudo-locked region and is in the process of being set - * up to do so. A resource group used for a pseudo-locked region would - * lose the closid associated with it so we cannot allow it to have any - * tasks or cpus assigned nor permit tasks or cpus to be assigned in the - * future. Monitoring of a pseudo-locked region is not allowed either. - * - * The above and more restrictions on a pseudo-locked region are checked - * for and enforced before the resource group enters the locksetup mode. - * - * Returns: 0 if the resource group successfully entered locksetup mode, <0 - * on failure. On failure the last_cmd_status buffer is updated with text to - * communicate details of failure to the user. - */ -int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) -{ - int ret; - - /* - * The default resource group can neither be removed nor lose the - * default closid associated with it. - */ - if (rdtgrp == &rdtgroup_default) { - rdt_last_cmd_puts("Cannot pseudo-lock default group\n"); - return -EINVAL; - } - - /* - * Cache Pseudo-locking not supported when CDP is enabled. - * - * Some things to consider if you would like to enable this - * support (using L3 CDP as example): - * - When CDP is enabled two separate resources are exposed, - * L3DATA and L3CODE, but they are actually on the same cache. - * The implication for pseudo-locking is that if a - * pseudo-locked region is created on a domain of one - * resource (eg. L3CODE), then a pseudo-locked region cannot - * be created on that same domain of the other resource - * (eg. L3DATA). This is because the creation of a - * pseudo-locked region involves a call to wbinvd that will - * affect all cache allocations on particular domain. - * - Considering the previous, it may be possible to only - * expose one of the CDP resources to pseudo-locking and - * hide the other. For example, we could consider to only - * expose L3DATA and since the L3 cache is unified it is - * still possible to place instructions there are execute it. - * - If only one region is exposed to pseudo-locking we should - * still keep in mind that availability of a portion of cache - * for pseudo-locking should take into account both resources. - * Similarly, if a pseudo-locked region is created in one - * resource, the portion of cache used by it should be made - * unavailable to all future allocations from both resources. - */ - if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3) || - resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) { - rdt_last_cmd_puts("CDP enabled\n"); - return -EINVAL; - } - - /* - * Not knowing the bits to disable prefetching implies that this - * platform does not support Cache Pseudo-Locking. - */ - if (resctrl_arch_get_prefetch_disable_bits() == 0) { - rdt_last_cmd_puts("Pseudo-locking not supported\n"); - return -EINVAL; - } - - if (rdtgroup_monitor_in_progress(rdtgrp)) { - rdt_last_cmd_puts("Monitoring in progress\n"); - return -EINVAL; - } - - if (rdtgroup_tasks_assigned(rdtgrp)) { - rdt_last_cmd_puts("Tasks assigned to resource group\n"); - return -EINVAL; - } - - if (!cpumask_empty(&rdtgrp->cpu_mask)) { - rdt_last_cmd_puts("CPUs assigned to resource group\n"); - return -EINVAL; - } - - if (rdtgroup_locksetup_user_restrict(rdtgrp)) { - rdt_last_cmd_puts("Unable to modify resctrl permissions\n"); - return -EIO; - } - - ret = pseudo_lock_init(rdtgrp); - if (ret) { - rdt_last_cmd_puts("Unable to init pseudo-lock region\n"); - goto out_release; - } - - /* - * If this system is capable of monitoring a rmid would have been - * allocated when the control group was created. This is not needed - * anymore when this group would be used for pseudo-locking. This - * is safe to call on platforms not capable of monitoring. - */ - free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); - - ret = 0; - goto out; - -out_release: - rdtgroup_locksetup_user_restore(rdtgrp); -out: - return ret; -} - -/** - * rdtgroup_locksetup_exit - resource group exist locksetup mode - * @rdtgrp: resource group - * - * When a resource group exits locksetup mode the earlier restrictions are - * lifted. - * - * Return: 0 on success, <0 on failure - */ -int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp) -{ - int ret; - - if (resctrl_arch_mon_capable()) { - ret = alloc_rmid(rdtgrp->closid); - if (ret < 0) { - rdt_last_cmd_puts("Out of RMIDs\n"); - return ret; - } - rdtgrp->mon.rmid = ret; - } - - ret = rdtgroup_locksetup_user_restore(rdtgrp); - if (ret) { - free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); - return ret; - } - - pseudo_lock_free(rdtgrp); - return 0; -} - -/** - * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked - * @d: RDT domain - * @cbm: CBM to test - * - * @d represents a cache instance and @cbm a capacity bitmask that is - * considered for it. Determine if @cbm overlaps with any existing - * pseudo-locked region on @d. - * - * @cbm is unsigned long, even if only 32 bits are used, to make the - * bitmap functions work correctly. - * - * Return: true if @cbm overlaps with pseudo-locked region on @d, false - * otherwise. - */ -bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm) -{ - unsigned int cbm_len; - unsigned long cbm_b; - - if (d->plr) { - cbm_len = d->plr->s->res->cache.cbm_len; - cbm_b = d->plr->cbm; - if (bitmap_intersects(&cbm, &cbm_b, cbm_len)) - return true; - } - return false; -} - -/** - * rdtgroup_pseudo_locked_in_hierarchy - Pseudo-locked region in cache hierarchy - * @d: RDT domain under test - * - * The setup of a pseudo-locked region affects all cache instances within - * the hierarchy of the region. It is thus essential to know if any - * pseudo-locked regions exist within a cache hierarchy to prevent any - * attempts to create new pseudo-locked regions in the same hierarchy. - * - * Return: true if a pseudo-locked region exists in the hierarchy of @d or - * if it is not possible to test due to memory allocation issue, - * false otherwise. - */ -bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) -{ - cpumask_var_t cpu_with_psl; - enum resctrl_res_level i; - struct rdt_resource *r; - struct rdt_domain *d_i; - bool ret = false; - - /* Walking r->domains, ensure it can't race with cpuhp */ - lockdep_assert_cpus_held(); - - if (!zalloc_cpumask_var(&cpu_with_psl, GFP_KERNEL)) - return true; - - /* - * First determine which cpus have pseudo-locked regions - * associated with them. - */ - for (i = 0; i < RDT_NUM_RESOURCES; i++) { - r = resctrl_arch_get_resource(i); - if (!r->alloc_capable) - continue; - - list_for_each_entry(d_i, &r->domains, list) { - if (d_i->plr) - cpumask_or(cpu_with_psl, cpu_with_psl, - &d_i->cpu_mask); - } - } - - /* - * Next test if new pseudo-locked region would intersect with - * existing region. - */ - if (cpumask_intersects(&d->cpu_mask, cpu_with_psl)) - ret = true; - - free_cpumask_var(cpu_with_psl); - return ret; -} - /** * resctrl_arch_measure_cycles_lat_fn - Measure cycle latency to read * pseudo-locked memory @@ -1174,442 +520,3 @@ int resctrl_arch_measure_l3_residency(void *_plr) wake_up_interruptible(&plr->lock_thread_wq); return 0; } - -/** - * pseudo_lock_measure_cycles - Trigger latency measure to pseudo-locked region - * @rdtgrp: Resource group to which the pseudo-locked region belongs. - * @sel: Selector of which measurement to perform on a pseudo-locked region. - * - * The measurement of latency to access a pseudo-locked region should be - * done from a cpu that is associated with that pseudo-locked region. - * Determine which cpu is associated with this region and start a thread on - * that cpu to perform the measurement, wait for that thread to complete. - * - * Return: 0 on success, <0 on failure - */ -static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel) -{ - struct pseudo_lock_region *plr = rdtgrp->plr; - struct task_struct *thread; - unsigned int cpu; - int ret = -1; - - cpus_read_lock(); - mutex_lock(&rdtgroup_mutex); - - if (rdtgrp->flags & RDT_DELETED) { - ret = -ENODEV; - goto out; - } - - if (!plr->d) { - ret = -ENODEV; - goto out; - } - - plr->thread_done = 0; - cpu = cpumask_first(&plr->d->cpu_mask); - if (!cpu_online(cpu)) { - ret = -ENODEV; - goto out; - } - - plr->cpu = cpu; - - if (sel == 1) - thread = kthread_create_on_node(resctrl_arch_measure_cycles_lat_fn, - plr, cpu_to_node(cpu), - "pseudo_lock_measure/%u", - cpu); - else if (sel == 2) - thread = kthread_create_on_node(resctrl_arch_measure_l2_residency, - plr, cpu_to_node(cpu), - "pseudo_lock_measure/%u", - cpu); - else if (sel == 3) - thread = kthread_create_on_node(resctrl_arch_measure_l3_residency, - plr, cpu_to_node(cpu), - "pseudo_lock_measure/%u", - cpu); - else - goto out; - - if (IS_ERR(thread)) { - ret = PTR_ERR(thread); - goto out; - } - kthread_bind(thread, cpu); - wake_up_process(thread); - - ret = wait_event_interruptible(plr->lock_thread_wq, - plr->thread_done == 1); - if (ret < 0) - goto out; - - ret = 0; - -out: - mutex_unlock(&rdtgroup_mutex); - cpus_read_unlock(); - return ret; -} - -static ssize_t pseudo_lock_measure_trigger(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct rdtgroup *rdtgrp = file->private_data; - size_t buf_size; - char buf[32]; - int ret; - int sel; - - buf_size = min(count, (sizeof(buf) - 1)); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - - buf[buf_size] = '\0'; - ret = kstrtoint(buf, 10, &sel); - if (ret == 0) { - if (sel != 1 && sel != 2 && sel != 3) - return -EINVAL; - ret = debugfs_file_get(file->f_path.dentry); - if (ret) - return ret; - ret = pseudo_lock_measure_cycles(rdtgrp, sel); - if (ret == 0) - ret = count; - debugfs_file_put(file->f_path.dentry); - } - - return ret; -} - -static const struct file_operations pseudo_measure_fops = { - .write = pseudo_lock_measure_trigger, - .open = simple_open, - .llseek = default_llseek, -}; - -/** - * rdtgroup_pseudo_lock_create - Create a pseudo-locked region - * @rdtgrp: resource group to which pseudo-lock region belongs - * - * Called when a resource group in the pseudo-locksetup mode receives a - * valid schemata that should be pseudo-locked. Since the resource group is - * in pseudo-locksetup mode the &struct pseudo_lock_region has already been - * allocated and initialized with the essential information. If a failure - * occurs the resource group remains in the pseudo-locksetup mode with the - * &struct pseudo_lock_region associated with it, but cleared from all - * information and ready for the user to re-attempt pseudo-locking by - * writing the schemata again. - * - * Return: 0 if the pseudo-locked region was successfully pseudo-locked, <0 - * on failure. Descriptive error will be written to last_cmd_status buffer. - */ -int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) -{ - struct pseudo_lock_region *plr = rdtgrp->plr; - struct task_struct *thread; - unsigned int new_minor; - struct device *dev; - int ret; - - ret = pseudo_lock_region_alloc(plr); - if (ret < 0) - return ret; - - ret = pseudo_lock_cstates_constrain(plr); - if (ret < 0) { - ret = -EINVAL; - goto out_region; - } - - plr->thread_done = 0; - - plr->closid = rdtgrp->closid; - thread = kthread_create_on_node(resctrl_arch_pseudo_lock_fn, plr, - cpu_to_node(plr->cpu), - "pseudo_lock/%u", plr->cpu); - if (IS_ERR(thread)) { - ret = PTR_ERR(thread); - rdt_last_cmd_printf("Locking thread returned error %d\n", ret); - goto out_cstates; - } - - kthread_bind(thread, plr->cpu); - wake_up_process(thread); - - ret = wait_event_interruptible(plr->lock_thread_wq, - plr->thread_done == 1); - if (ret < 0) { - /* - * If the thread does not get on the CPU for whatever - * reason and the process which sets up the region is - * interrupted then this will leave the thread in runnable - * state and once it gets on the CPU it will dereference - * the cleared, but not freed, plr struct resulting in an - * empty pseudo-locking loop. - */ - rdt_last_cmd_puts("Locking thread interrupted\n"); - goto out_cstates; - } - - ret = pseudo_lock_minor_get(&new_minor); - if (ret < 0) { - rdt_last_cmd_puts("Unable to obtain a new minor number\n"); - goto out_cstates; - } - - /* - * Unlock access but do not release the reference. The - * pseudo-locked region will still be here on return. - * - * The mutex has to be released temporarily to avoid a potential - * deadlock with the mm->mmap_lock which is obtained in the - * device_create() and debugfs_create_dir() callpath below as well as - * before the mmap() callback is called. - */ - mutex_unlock(&rdtgroup_mutex); - - if (!IS_ERR_OR_NULL(debugfs_resctrl)) { - plr->debugfs_dir = debugfs_create_dir(rdtgrp->kn->name, - debugfs_resctrl); - if (!IS_ERR_OR_NULL(plr->debugfs_dir)) - debugfs_create_file("pseudo_lock_measure", 0200, - plr->debugfs_dir, rdtgrp, - &pseudo_measure_fops); - } - - dev = device_create(&pseudo_lock_class, NULL, - MKDEV(pseudo_lock_major, new_minor), - rdtgrp, "%s", rdtgrp->kn->name); - - mutex_lock(&rdtgroup_mutex); - - if (IS_ERR(dev)) { - ret = PTR_ERR(dev); - rdt_last_cmd_printf("Failed to create character device: %d\n", - ret); - goto out_debugfs; - } - - /* We released the mutex - check if group was removed while we did so */ - if (rdtgrp->flags & RDT_DELETED) { - ret = -ENODEV; - goto out_device; - } - - plr->minor = new_minor; - - rdtgrp->mode = RDT_MODE_PSEUDO_LOCKED; - closid_free(rdtgrp->closid); - rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0444); - rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0444); - - ret = 0; - goto out; - -out_device: - device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, new_minor)); -out_debugfs: - debugfs_remove_recursive(plr->debugfs_dir); - pseudo_lock_minor_release(new_minor); -out_cstates: - pseudo_lock_cstates_relax(plr); -out_region: - pseudo_lock_region_clear(plr); -out: - return ret; -} - -/** - * rdtgroup_pseudo_lock_remove - Remove a pseudo-locked region - * @rdtgrp: resource group to which the pseudo-locked region belongs - * - * The removal of a pseudo-locked region can be initiated when the resource - * group is removed from user space via a "rmdir" from userspace or the - * unmount of the resctrl filesystem. On removal the resource group does - * not go back to pseudo-locksetup mode before it is removed, instead it is - * removed directly. There is thus asymmetry with the creation where the - * &struct pseudo_lock_region is removed here while it was not created in - * rdtgroup_pseudo_lock_create(). - * - * Return: void - */ -void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp) -{ - struct pseudo_lock_region *plr = rdtgrp->plr; - - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - /* - * Default group cannot be a pseudo-locked region so we can - * free closid here. - */ - closid_free(rdtgrp->closid); - goto free; - } - - pseudo_lock_cstates_relax(plr); - debugfs_remove_recursive(rdtgrp->plr->debugfs_dir); - device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, plr->minor)); - pseudo_lock_minor_release(plr->minor); - -free: - pseudo_lock_free(rdtgrp); -} - -static int pseudo_lock_dev_open(struct inode *inode, struct file *filp) -{ - struct rdtgroup *rdtgrp; - - mutex_lock(&rdtgroup_mutex); - - rdtgrp = region_find_by_minor(iminor(inode)); - if (!rdtgrp) { - mutex_unlock(&rdtgroup_mutex); - return -ENODEV; - } - - filp->private_data = rdtgrp; - atomic_inc(&rdtgrp->waitcount); - /* Perform a non-seekable open - llseek is not supported */ - filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); - - mutex_unlock(&rdtgroup_mutex); - - return 0; -} - -static int pseudo_lock_dev_release(struct inode *inode, struct file *filp) -{ - struct rdtgroup *rdtgrp; - - mutex_lock(&rdtgroup_mutex); - rdtgrp = filp->private_data; - WARN_ON(!rdtgrp); - if (!rdtgrp) { - mutex_unlock(&rdtgroup_mutex); - return -ENODEV; - } - filp->private_data = NULL; - atomic_dec(&rdtgrp->waitcount); - mutex_unlock(&rdtgroup_mutex); - return 0; -} - -static int pseudo_lock_dev_mremap(struct vm_area_struct *area) -{ - /* Not supported */ - return -EINVAL; -} - -static const struct vm_operations_struct pseudo_mmap_ops = { - .mremap = pseudo_lock_dev_mremap, -}; - -static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma) -{ - unsigned long vsize = vma->vm_end - vma->vm_start; - unsigned long off = vma->vm_pgoff << PAGE_SHIFT; - struct pseudo_lock_region *plr; - struct rdtgroup *rdtgrp; - unsigned long physical; - unsigned long psize; - - mutex_lock(&rdtgroup_mutex); - - rdtgrp = filp->private_data; - WARN_ON(!rdtgrp); - if (!rdtgrp) { - mutex_unlock(&rdtgroup_mutex); - return -ENODEV; - } - - plr = rdtgrp->plr; - - if (!plr->d) { - mutex_unlock(&rdtgroup_mutex); - return -ENODEV; - } - - /* - * Task is required to run with affinity to the cpus associated - * with the pseudo-locked region. If this is not the case the task - * may be scheduled elsewhere and invalidate entries in the - * pseudo-locked region. - */ - if (!cpumask_subset(current->cpus_ptr, &plr->d->cpu_mask)) { - mutex_unlock(&rdtgroup_mutex); - return -EINVAL; - } - - physical = __pa(plr->kmem) >> PAGE_SHIFT; - psize = plr->size - off; - - if (off > plr->size) { - mutex_unlock(&rdtgroup_mutex); - return -ENOSPC; - } - - /* - * Ensure changes are carried directly to the memory being mapped, - * do not allow copy-on-write mapping. - */ - if (!(vma->vm_flags & VM_SHARED)) { - mutex_unlock(&rdtgroup_mutex); - return -EINVAL; - } - - if (vsize > psize) { - mutex_unlock(&rdtgroup_mutex); - return -ENOSPC; - } - - memset(plr->kmem + off, 0, vsize); - - if (remap_pfn_range(vma, vma->vm_start, physical + vma->vm_pgoff, - vsize, vma->vm_page_prot)) { - mutex_unlock(&rdtgroup_mutex); - return -EAGAIN; - } - vma->vm_ops = &pseudo_mmap_ops; - mutex_unlock(&rdtgroup_mutex); - return 0; -} - -static const struct file_operations pseudo_lock_dev_fops = { - .owner = THIS_MODULE, - .llseek = no_llseek, - .read = NULL, - .write = NULL, - .open = pseudo_lock_dev_open, - .release = pseudo_lock_dev_release, - .mmap = pseudo_lock_dev_mmap, -}; - -int rdt_pseudo_lock_init(void) -{ - int ret; - - ret = register_chrdev(0, "pseudo_lock", &pseudo_lock_dev_fops); - if (ret < 0) - return ret; - - pseudo_lock_major = ret; - - ret = class_register(&pseudo_lock_class); - if (ret) { - unregister_chrdev(pseudo_lock_major, "pseudo_lock"); - return ret; - } - - return 0; -} - -void rdt_pseudo_lock_release(void) -{ - class_unregister(&pseudo_lock_class); - unregister_chrdev(pseudo_lock_major, "pseudo_lock"); - pseudo_lock_major = 0; -} diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 1425a33d201d..fe3952514add 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -12,22 +12,8 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include -#include -#include - -#include #include #include "internal.h" @@ -36,4219 +22,239 @@ DEFINE_STATIC_KEY_FALSE(rdt_enable_key); DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key); DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key); -/* Mutex to protect rdtgroup access. */ -DEFINE_MUTEX(rdtgroup_mutex); - -static struct kernfs_root *rdt_root; -struct rdtgroup rdtgroup_default; -LIST_HEAD(rdt_all_groups); - -/* list of entries for the schemata file */ -LIST_HEAD(resctrl_schema_all); - -/* The filesystem can only be mounted once. */ -bool resctrl_mounted; - -/* Kernel fs node for "info" directory under root */ -static struct kernfs_node *kn_info; - -/* Kernel fs node for "mon_groups" directory under root */ -static struct kernfs_node *kn_mongrp; - -/* Kernel fs node for "mon_data" directory under root */ -static struct kernfs_node *kn_mondata; - /* - * Used to store the max resource name width and max resource data width - * to display the schemata in a tabular format + * This is safe against resctrl_arch_sched_in() called from __switch_to() + * because __switch_to() is executed with interrupts disabled. A local call + * from update_closid_rmid() is protected against __switch_to() because + * preemption is disabled. */ -int max_name_width, max_data_width; - -static struct seq_buf last_cmd_status; -static char last_cmd_status_buf[512]; - -static int rdtgroup_setup_root(struct rdt_fs_context *ctx); -static void rdtgroup_destroy_root(void); - -struct dentry *debugfs_resctrl; - -static bool resctrl_debug; - -void rdt_last_cmd_clear(void) -{ - lockdep_assert_held(&rdtgroup_mutex); - seq_buf_clear(&last_cmd_status); -} - -void rdt_last_cmd_puts(const char *s) -{ - lockdep_assert_held(&rdtgroup_mutex); - seq_buf_puts(&last_cmd_status, s); -} - -void rdt_last_cmd_printf(const char *fmt, ...) -{ - va_list ap; - - va_start(ap, fmt); - lockdep_assert_held(&rdtgroup_mutex); - seq_buf_vprintf(&last_cmd_status, fmt, ap); - va_end(ap); -} - -void rdt_staged_configs_clear(void) +void resctrl_arch_sync_cpu_defaults(void *info) { - enum resctrl_res_level i; - struct rdt_resource *r; - struct rdt_domain *dom; - - lockdep_assert_held(&rdtgroup_mutex); - - for (i = 0; i < RDT_NUM_RESOURCES; i++) { - r = resctrl_arch_get_resource(i); - if (!r->alloc_capable) - continue; + struct resctrl_cpu_sync *r = info; - list_for_each_entry(dom, &r->domains, list) - memset(dom->staged_config, 0, sizeof(dom->staged_config)); + if (r) { + this_cpu_write(pqr_state.default_closid, r->closid); + this_cpu_write(pqr_state.default_rmid, r->rmid); } -} -static bool resctrl_is_mbm_enabled(void) -{ - return (resctrl_arch_is_mbm_total_enabled() || - resctrl_arch_is_mbm_local_enabled()); + /* + * We cannot unconditionally write the MSR because the current + * executing task might have its own closid selected. Just reuse + * the context switch code. + */ + resctrl_arch_sched_in(current); } -static bool resctrl_is_mbm_event(int e) -{ - return (e >= QOS_L3_MBM_TOTAL_EVENT_ID && - e <= QOS_L3_MBM_LOCAL_EVENT_ID); -} +#define INVALID_CONFIG_INDEX UINT_MAX -/* - * Trivial allocator for CLOSIDs. Since h/w only supports a small number, - * we can keep a bitmap of free CLOSIDs in a single integer. +/** + * mon_event_config_index_get - get the hardware index for the + * configurable event + * @evtid: event id. * - * Using a global CLOSID across all resources has some advantages and - * some drawbacks: - * + We can simply set current's closid to assign a task to a resource - * group. - * + Context switch code can avoid extra memory references deciding which - * CLOSID to load into the PQR_ASSOC MSR - * - We give up some options in configuring resource groups across multi-socket - * systems. - * - Our choices on how to configure each resource become progressively more - * limited as the number of resources grows. + * Return: 0 for evtid == QOS_L3_MBM_TOTAL_EVENT_ID + * 1 for evtid == QOS_L3_MBM_LOCAL_EVENT_ID + * INVALID_CONFIG_INDEX for invalid evtid */ -static unsigned long closid_free_map; -static int closid_free_map_len; - -int closids_supported(void) -{ - return closid_free_map_len; -} - -static void closid_init(void) +static inline unsigned int mon_event_config_index_get(u32 evtid) { - struct resctrl_schema *s; - u32 rdt_min_closid = 32; - - /* Compute rdt_min_closid across all resources */ - list_for_each_entry(s, &resctrl_schema_all, list) - rdt_min_closid = min(rdt_min_closid, s->num_closid); - - closid_free_map = BIT_MASK(rdt_min_closid) - 1; - - /* RESCTRL_RESERVED_CLOSID is always reserved for the default group */ - __clear_bit(RESCTRL_RESERVED_CLOSID, &closid_free_map); - closid_free_map_len = rdt_min_closid; + switch (evtid) { + case QOS_L3_MBM_TOTAL_EVENT_ID: + return 0; + case QOS_L3_MBM_LOCAL_EVENT_ID: + return 1; + default: + /* Should never reach here */ + return INVALID_CONFIG_INDEX; + } } -static int closid_alloc(void) +void resctrl_arch_mon_event_config_read(void *info) { - int cleanest_closid; - u32 closid; - - lockdep_assert_held(&rdtgroup_mutex); + struct resctrl_mon_config_info *mon_info = info; + unsigned int index; + u64 msrval; - if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID) && - resctrl_arch_is_llc_occupancy_enabled()) { - cleanest_closid = resctrl_find_cleanest_closid(); - if (cleanest_closid < 0) - return cleanest_closid; - closid = cleanest_closid; - } else { - closid = ffs(closid_free_map); - if (closid == 0) - return -ENOSPC; - closid--; + index = mon_event_config_index_get(mon_info->evtid); + if (index == INVALID_CONFIG_INDEX) { + pr_warn_once("Invalid event id %d\n", mon_info->evtid); + return; } - __clear_bit(closid, &closid_free_map); + rdmsrl(MSR_IA32_EVT_CFG_BASE + index, msrval); - return closid; + /* Report only the valid event configuration bits */ + mon_info->mon_config = msrval & MAX_EVT_CONFIG_BITS; } -void closid_free(int closid) +void resctrl_arch_mon_event_config_write(void *info) { - lockdep_assert_held(&rdtgroup_mutex); - - __set_bit(closid, &closid_free_map); -} + struct resctrl_mon_config_info *mon_info = info; + unsigned int index; -/** - * closid_allocated - test if provided closid is in use - * @closid: closid to be tested - * - * Return: true if @closid is currently associated with a resource group, - * false if @closid is free - */ -bool closid_allocated(unsigned int closid) -{ - lockdep_assert_held(&rdtgroup_mutex); + index = mon_event_config_index_get(mon_info->evtid); + if (index == INVALID_CONFIG_INDEX) { + pr_warn_once("Invalid event id %d\n", mon_info->evtid); + mon_info->err = -EINVAL; + return; + } + wrmsr(MSR_IA32_EVT_CFG_BASE + index, mon_info->mon_config, 0); - return !test_bit(closid, &closid_free_map); + mon_info->err = 0; } -/** - * rdtgroup_mode_by_closid - Return mode of resource group with closid - * @closid: closid if the resource group - * - * Each resource group is associated with a @closid. Here the mode - * of a resource group can be queried by searching for it using its closid. - * - * Return: mode as &enum rdtgrp_mode of resource group with closid @closid - */ -enum rdtgrp_mode rdtgroup_mode_by_closid(int closid) +static void l3_qos_cfg_update(void *arg) { - struct rdtgroup *rdtgrp; - - list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { - if (rdtgrp->closid == closid) - return rdtgrp->mode; - } + bool *enable = arg; - return RDT_NUM_MODES; + wrmsrl(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL); } -static const char * const rdt_mode_str[] = { - [RDT_MODE_SHAREABLE] = "shareable", - [RDT_MODE_EXCLUSIVE] = "exclusive", - [RDT_MODE_PSEUDO_LOCKSETUP] = "pseudo-locksetup", - [RDT_MODE_PSEUDO_LOCKED] = "pseudo-locked", -}; - -/** - * rdtgroup_mode_str - Return the string representation of mode - * @mode: the resource group mode as &enum rdtgroup_mode - * - * Return: string representation of valid mode, "unknown" otherwise - */ -static const char *rdtgroup_mode_str(enum rdtgrp_mode mode) +static void l2_qos_cfg_update(void *arg) { - if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES) - return "unknown"; + bool *enable = arg; - return rdt_mode_str[mode]; + wrmsrl(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); } -/* set uid and gid of rdtgroup dirs and files to that of the creator */ -static int rdtgroup_kn_set_ugid(struct kernfs_node *kn) +static int set_cache_qos_cfg(int level, bool enable) { - struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID, - .ia_uid = current_fsuid(), - .ia_gid = current_fsgid(), }; - - if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) && - gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID)) - return 0; + void (*update)(void *arg); + struct rdt_resource *r_l; + cpumask_var_t cpu_mask; + struct rdt_domain *d; + int cpu; - return kernfs_setattr(kn, &iattr); -} + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); -static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft) -{ - struct kernfs_node *kn; - int ret; + if (level == RDT_RESOURCE_L3) + update = l3_qos_cfg_update; + else if (level == RDT_RESOURCE_L2) + update = l2_qos_cfg_update; + else + return -EINVAL; - kn = __kernfs_create_file(parent_kn, rft->name, rft->mode, - GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, - 0, rft->kf_ops, rft, NULL, NULL); - if (IS_ERR(kn)) - return PTR_ERR(kn); + if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) + return -ENOMEM; - ret = rdtgroup_kn_set_ugid(kn); - if (ret) { - kernfs_remove(kn); - return ret; + r_l = &rdt_resources_all[level].r_resctrl; + list_for_each_entry(d, &r_l->domains, list) { + if (r_l->cache.arch_has_per_cpu_cfg) + /* Pick all the CPUs in the domain instance */ + for_each_cpu(cpu, &d->cpu_mask) + cpumask_set_cpu(cpu, cpu_mask); + else + /* Pick one CPU from each domain instance to update MSR */ + cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); } - return 0; -} + /* Update QOS_CFG MSR on all the CPUs in cpu_mask */ + on_each_cpu_mask(cpu_mask, update, &enable, 1); -static int rdtgroup_seqfile_show(struct seq_file *m, void *arg) -{ - struct kernfs_open_file *of = m->private; - struct rftype *rft = of->kn->priv; + free_cpumask_var(cpu_mask); - if (rft->seq_show) - return rft->seq_show(of, m, arg); return 0; } -static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf, - size_t nbytes, loff_t off) +/* Restore the qos cfg state when a domain comes online */ +void rdt_domain_reconfigure_cdp(struct rdt_resource *r) { - struct rftype *rft = of->kn->priv; - - if (rft->write) - return rft->write(of, buf, nbytes, off); - - return -EINVAL; -} - -static const struct kernfs_ops rdtgroup_kf_single_ops = { - .atomic_write_len = PAGE_SIZE, - .write = rdtgroup_file_write, - .seq_show = rdtgroup_seqfile_show, -}; + struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); -static const struct kernfs_ops kf_mondata_ops = { - .atomic_write_len = PAGE_SIZE, - .seq_show = rdtgroup_mondata_show, -}; + if (!r->cdp_capable) + return; -static bool is_cpu_list(struct kernfs_open_file *of) -{ - struct rftype *rft = of->kn->priv; + if (r->rid == RDT_RESOURCE_L2) + l2_qos_cfg_update(&hw_res->cdp_enabled); - return rft->flags & RFTYPE_FLAGS_CPUS_LIST; + if (r->rid == RDT_RESOURCE_L3) + l3_qos_cfg_update(&hw_res->cdp_enabled); } -static int rdtgroup_cpus_show(struct kernfs_open_file *of, - struct seq_file *s, void *v) +static int cdp_enable(int level) { - struct rdtgroup *rdtgrp; - struct cpumask *mask; - int ret = 0; + struct rdt_resource *r_l = &rdt_resources_all[level].r_resctrl; + int ret; - rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!r_l->alloc_capable) + return -EINVAL; - if (rdtgrp) { - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { - if (!rdtgrp->plr->d) { - rdt_last_cmd_clear(); - rdt_last_cmd_puts("Cache domain offline\n"); - ret = -ENODEV; - } else { - mask = &rdtgrp->plr->d->cpu_mask; - seq_printf(s, is_cpu_list(of) ? - "%*pbl\n" : "%*pb\n", - cpumask_pr_args(mask)); - } - } else { - seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n", - cpumask_pr_args(&rdtgrp->cpu_mask)); - } - } else { - ret = -ENOENT; - } - rdtgroup_kn_unlock(of->kn); + ret = set_cache_qos_cfg(level, true); + if (!ret) + rdt_resources_all[level].cdp_enabled = true; return ret; } -/* - * This is safe against resctrl_arch_sched_in() called from __switch_to() - * because __switch_to() is executed with interrupts disabled. A local call - * from update_closid_rmid() is protected against __switch_to() because - * preemption is disabled. - */ -void resctrl_arch_sync_cpu_defaults(void *info) -{ - struct resctrl_cpu_sync *r = info; - - if (r) { - this_cpu_write(pqr_state.default_closid, r->closid); - this_cpu_write(pqr_state.default_rmid, r->rmid); - } - - /* - * We cannot unconditionally write the MSR because the current - * executing task might have its own closid selected. Just reuse - * the context switch code. - */ - resctrl_arch_sched_in(current); -} - -/* - * Update the PGR_ASSOC MSR on all cpus in @cpu_mask, - * - * Per task closids/rmids must have been set up before calling this function. - * @r may be NULL. - */ -static void -update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r) +static void cdp_disable(int level) { - struct resctrl_cpu_sync defaults; - struct resctrl_cpu_sync *defaults_p = NULL; + struct rdt_hw_resource *r_hw = &rdt_resources_all[level]; - if (r) { - defaults.closid = r->closid; - defaults.rmid = r->mon.rmid; - defaults_p = &defaults; + if (r_hw->cdp_enabled) { + set_cache_qos_cfg(level, false); + r_hw->cdp_enabled = false; } - - on_each_cpu_mask(cpu_mask, resctrl_arch_sync_cpu_defaults, defaults_p, - 1); } -static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, - cpumask_var_t tmpmask) +int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable) { - struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp; - struct list_head *head; + struct rdt_hw_resource *hw_res = &rdt_resources_all[l]; - /* Check whether cpus belong to parent ctrl group */ - cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask); - if (!cpumask_empty(tmpmask)) { - rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n"); + if (!hw_res->r_resctrl.cdp_capable) return -EINVAL; - } - /* Check whether cpus are dropped from this group */ - cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); - if (!cpumask_empty(tmpmask)) { - /* Give any dropped cpus to parent rdtgroup */ - cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask); - update_closid_rmid(tmpmask, prgrp); - } - - /* - * If we added cpus, remove them from previous group that owned them - * and update per-cpu rmid - */ - cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); - if (!cpumask_empty(tmpmask)) { - head = &prgrp->mon.crdtgrp_list; - list_for_each_entry(crgrp, head, mon.crdtgrp_list) { - if (crgrp == rdtgrp) - continue; - cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask, - tmpmask); - } - update_closid_rmid(tmpmask, rdtgrp); - } + if (enable) + return cdp_enable(l); - /* Done pushing/pulling - update this group with new mask */ - cpumask_copy(&rdtgrp->cpu_mask, newmask); + cdp_disable(l); return 0; } -static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m) +static int reset_all_ctrls(struct rdt_resource *r) { - struct rdtgroup *crgrp; - - cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m); - /* update the child mon group masks as well*/ - list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list) - cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask); -} + struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); + struct rdt_hw_domain *hw_dom; + struct msr_param msr_param; + cpumask_var_t cpu_mask; + struct rdt_domain *d; + int i; -static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, - cpumask_var_t tmpmask, cpumask_var_t tmpmask1) -{ - struct rdtgroup *r, *crgrp; - struct list_head *head; + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); - /* Check whether cpus are dropped from this group */ - cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); - if (!cpumask_empty(tmpmask)) { - /* Can't drop from default group */ - if (rdtgrp == &rdtgroup_default) { - rdt_last_cmd_puts("Can't drop CPUs from default group\n"); - return -EINVAL; - } + if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) + return -ENOMEM; - /* Give any dropped cpus to rdtgroup_default */ - cpumask_or(&rdtgroup_default.cpu_mask, - &rdtgroup_default.cpu_mask, tmpmask); - update_closid_rmid(tmpmask, &rdtgroup_default); - } + msr_param.res = r; + msr_param.low = 0; + msr_param.high = hw_res->num_closid; /* - * If we added cpus, remove them from previous group and - * the prev group's child groups that owned them - * and update per-cpu closid/rmid. + * Disable resource control for this resource by setting all + * CBMs in all domains to the maximum mask value. Pick one CPU + * from each domain to update the MSRs below. */ - cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); - if (!cpumask_empty(tmpmask)) { - list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) { - if (r == rdtgrp) - continue; - cpumask_and(tmpmask1, &r->cpu_mask, tmpmask); - if (!cpumask_empty(tmpmask1)) - cpumask_rdtgrp_clear(r, tmpmask1); - } - update_closid_rmid(tmpmask, rdtgrp); + list_for_each_entry(d, &r->domains, list) { + hw_dom = resctrl_to_arch_dom(d); + cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); + + for (i = 0; i < hw_res->num_closid; i++) + hw_dom->ctrl_val[i] = r->default_ctrl; } - /* Done pushing/pulling - update this group with new mask */ - cpumask_copy(&rdtgrp->cpu_mask, newmask); + /* Update CBM on all the CPUs in cpu_mask */ + on_each_cpu_mask(cpu_mask, rdt_ctrl_update, &msr_param, 1); - /* - * Clear child mon group masks since there is a new parent mask - * now and update the rmid for the cpus the child lost. - */ - head = &rdtgrp->mon.crdtgrp_list; - list_for_each_entry(crgrp, head, mon.crdtgrp_list) { - cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask); - update_closid_rmid(tmpmask, rdtgrp); - cpumask_clear(&crgrp->cpu_mask); - } + free_cpumask_var(cpu_mask); return 0; } -static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off) +void resctrl_arch_reset_resources(void) { - cpumask_var_t tmpmask, newmask, tmpmask1; - struct rdtgroup *rdtgrp; - int ret; + struct rdt_resource *r; - if (!buf) - return -EINVAL; - - if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) - return -ENOMEM; - if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) { - free_cpumask_var(tmpmask); - return -ENOMEM; - } - if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) { - free_cpumask_var(tmpmask); - free_cpumask_var(newmask); - return -ENOMEM; - } - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (!rdtgrp) { - ret = -ENOENT; - goto unlock; - } - - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || - rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - ret = -EINVAL; - rdt_last_cmd_puts("Pseudo-locking in progress\n"); - goto unlock; - } - - if (is_cpu_list(of)) - ret = cpulist_parse(buf, newmask); - else - ret = cpumask_parse(buf, newmask); - - if (ret) { - rdt_last_cmd_puts("Bad CPU list/mask\n"); - goto unlock; - } - - /* check that user didn't specify any offline cpus */ - cpumask_andnot(tmpmask, newmask, cpu_online_mask); - if (!cpumask_empty(tmpmask)) { - ret = -EINVAL; - rdt_last_cmd_puts("Can only assign online CPUs\n"); - goto unlock; - } - - if (rdtgrp->type == RDTCTRL_GROUP) - ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1); - else if (rdtgrp->type == RDTMON_GROUP) - ret = cpus_mon_write(rdtgrp, newmask, tmpmask); - else - ret = -EINVAL; - -unlock: - rdtgroup_kn_unlock(of->kn); - free_cpumask_var(tmpmask); - free_cpumask_var(newmask); - free_cpumask_var(tmpmask1); - - return ret ?: nbytes; -} - -/** - * rdtgroup_remove - the helper to remove resource group safely - * @rdtgrp: resource group to remove - * - * On resource group creation via a mkdir, an extra kernfs_node reference is - * taken to ensure that the rdtgroup structure remains accessible for the - * rdtgroup_kn_unlock() calls where it is removed. - * - * Drop the extra reference here, then free the rdtgroup structure. - * - * Return: void - */ -static void rdtgroup_remove(struct rdtgroup *rdtgrp) -{ - kernfs_put(rdtgrp->kn); - kfree(rdtgrp); -} - -static void _update_task_closid_rmid(void *task) -{ - /* - * If the task is still current on this CPU, update PQR_ASSOC MSR. - * Otherwise, the MSR is updated when the task is scheduled in. - */ - if (task == current) - resctrl_arch_sched_in(task); -} - -static void update_task_closid_rmid(struct task_struct *t) -{ - if (IS_ENABLED(CONFIG_SMP) && task_curr(t)) - smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1); - else - _update_task_closid_rmid(t); -} - -static bool task_in_rdtgroup(struct task_struct *tsk, struct rdtgroup *rdtgrp) -{ - u32 closid, rmid = rdtgrp->mon.rmid; - - if (rdtgrp->type == RDTCTRL_GROUP) - closid = rdtgrp->closid; - else if (rdtgrp->type == RDTMON_GROUP) - closid = rdtgrp->mon.parent->closid; - else - return false; - - return resctrl_arch_match_closid(tsk, closid) && - resctrl_arch_match_rmid(tsk, closid, rmid); -} - -static int __rdtgroup_move_task(struct task_struct *tsk, - struct rdtgroup *rdtgrp) -{ - /* If the task is already in rdtgrp, no need to move the task. */ - if (task_in_rdtgroup(tsk, rdtgrp)) - return 0; - - /* - * Set the task's closid/rmid before the PQR_ASSOC MSR can be - * updated by them. - * - * For ctrl_mon groups, move both closid and rmid. - * For monitor groups, can move the tasks only from - * their parent CTRL group. - */ - if (rdtgrp->type == RDTMON_GROUP && - !resctrl_arch_match_closid(tsk, rdtgrp->mon.parent->closid)) { - rdt_last_cmd_puts("Can't move task to different control group\n"); - return -EINVAL; - } - - if (rdtgrp->type == RDTMON_GROUP) - resctrl_arch_set_closid_rmid(tsk, rdtgrp->mon.parent->closid, - rdtgrp->mon.rmid); - else - resctrl_arch_set_closid_rmid(tsk, rdtgrp->closid, - rdtgrp->mon.rmid); - - /* - * Ensure the task's closid and rmid are written before determining if - * the task is current that will decide if it will be interrupted. - * This pairs with the full barrier between the rq->curr update and - * resctrl_arch_sched_in() during context switch. - */ - smp_mb(); - - /* - * By now, the task's closid and rmid are set. If the task is current - * on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource - * group go into effect. If the task is not current, the MSR will be - * updated when the task is scheduled in. - */ - update_task_closid_rmid(tsk); - - return 0; -} - -static bool is_closid_match(struct task_struct *t, struct rdtgroup *r) -{ - return (resctrl_arch_alloc_capable() && (r->type == RDTCTRL_GROUP) && - resctrl_arch_match_closid(t, r->closid)); -} - -static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r) -{ - return (resctrl_arch_mon_capable() && (r->type == RDTMON_GROUP) && - resctrl_arch_match_rmid(t, r->mon.parent->closid, - r->mon.rmid)); -} - -/** - * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group - * @r: Resource group - * - * Return: 1 if tasks have been assigned to @r, 0 otherwise - */ -int rdtgroup_tasks_assigned(struct rdtgroup *r) -{ - struct task_struct *p, *t; - int ret = 0; - - lockdep_assert_held(&rdtgroup_mutex); - - rcu_read_lock(); - for_each_process_thread(p, t) { - if (is_closid_match(t, r) || is_rmid_match(t, r)) { - ret = 1; - break; - } - } - rcu_read_unlock(); - - return ret; -} - -static int rdtgroup_task_write_permission(struct task_struct *task, - struct kernfs_open_file *of) -{ - const struct cred *tcred = get_task_cred(task); - const struct cred *cred = current_cred(); - int ret = 0; - - /* - * Even if we're attaching all tasks in the thread group, we only - * need to check permissions on one of them. - */ - if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && - !uid_eq(cred->euid, tcred->uid) && - !uid_eq(cred->euid, tcred->suid)) { - rdt_last_cmd_printf("No permission to move task %d\n", task->pid); - ret = -EPERM; - } - - put_cred(tcred); - return ret; -} - -static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp, - struct kernfs_open_file *of) -{ - struct task_struct *tsk; - int ret; - - rcu_read_lock(); - if (pid) { - tsk = find_task_by_vpid(pid); - if (!tsk) { - rcu_read_unlock(); - rdt_last_cmd_printf("No task %d\n", pid); - return -ESRCH; - } - } else { - tsk = current; - } - - get_task_struct(tsk); - rcu_read_unlock(); - - ret = rdtgroup_task_write_permission(tsk, of); - if (!ret) - ret = __rdtgroup_move_task(tsk, rdtgrp); - - put_task_struct(tsk); - return ret; -} - -static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off) -{ - struct rdtgroup *rdtgrp; - char *pid_str; - int ret = 0; - pid_t pid; - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (!rdtgrp) { - rdtgroup_kn_unlock(of->kn); - return -ENOENT; - } - rdt_last_cmd_clear(); - - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || - rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - ret = -EINVAL; - rdt_last_cmd_puts("Pseudo-locking in progress\n"); - goto unlock; - } - - while (buf && buf[0] != '\0' && buf[0] != '\n') { - pid_str = strim(strsep(&buf, ",")); - - if (kstrtoint(pid_str, 0, &pid)) { - rdt_last_cmd_printf("Task list parsing error pid %s\n", pid_str); - ret = -EINVAL; - break; - } - - if (pid < 0) { - rdt_last_cmd_printf("Invalid pid %d\n", pid); - ret = -EINVAL; - break; - } - - ret = rdtgroup_move_task(pid, rdtgrp, of); - if (ret) { - rdt_last_cmd_printf("Error while processing task %d\n", pid); - break; - } - } - -unlock: - rdtgroup_kn_unlock(of->kn); - - return ret ?: nbytes; -} - -static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s) -{ - struct task_struct *p, *t; - pid_t pid; - - rcu_read_lock(); - for_each_process_thread(p, t) { - if (is_closid_match(t, r) || is_rmid_match(t, r)) { - pid = task_pid_vnr(t); - if (pid) - seq_printf(s, "%d\n", pid); - } - } - rcu_read_unlock(); -} - -static int rdtgroup_tasks_show(struct kernfs_open_file *of, - struct seq_file *s, void *v) -{ - struct rdtgroup *rdtgrp; - int ret = 0; - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (rdtgrp) - show_rdt_tasks(rdtgrp, s); - else - ret = -ENOENT; - rdtgroup_kn_unlock(of->kn); - - return ret; -} - -static int rdtgroup_closid_show(struct kernfs_open_file *of, - struct seq_file *s, void *v) -{ - struct rdtgroup *rdtgrp; - int ret = 0; - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (rdtgrp) - seq_printf(s, "%u\n", rdtgrp->closid); - else - ret = -ENOENT; - rdtgroup_kn_unlock(of->kn); - - return ret; -} - -static int rdtgroup_rmid_show(struct kernfs_open_file *of, - struct seq_file *s, void *v) -{ - struct rdtgroup *rdtgrp; - int ret = 0; - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (rdtgrp) - seq_printf(s, "%u\n", rdtgrp->mon.rmid); - else - ret = -ENOENT; - rdtgroup_kn_unlock(of->kn); - - return ret; -} - -#ifdef CONFIG_PROC_CPU_RESCTRL - -/* - * A task can only be part of one resctrl control group and of one monitor - * group which is associated to that control group. - * - * 1) res: - * mon: - * - * resctrl is not available. - * - * 2) res:/ - * mon: - * - * Task is part of the root resctrl control group, and it is not associated - * to any monitor group. - * - * 3) res:/ - * mon:mon0 - * - * Task is part of the root resctrl control group and monitor group mon0. - * - * 4) res:group0 - * mon: - * - * Task is part of resctrl control group group0, and it is not associated - * to any monitor group. - * - * 5) res:group0 - * mon:mon1 - * - * Task is part of resctrl control group group0 and monitor group mon1. - */ -int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns, - struct pid *pid, struct task_struct *tsk) -{ - struct rdtgroup *rdtg; - int ret = 0; - - mutex_lock(&rdtgroup_mutex); - - /* Return empty if resctrl has not been mounted. */ - if (!resctrl_mounted) { - seq_puts(s, "res:\nmon:\n"); - goto unlock; - } - - list_for_each_entry(rdtg, &rdt_all_groups, rdtgroup_list) { - struct rdtgroup *crg; - - /* - * Task information is only relevant for shareable - * and exclusive groups. - */ - if (rdtg->mode != RDT_MODE_SHAREABLE && - rdtg->mode != RDT_MODE_EXCLUSIVE) - continue; - - if (!resctrl_arch_match_closid(tsk, rdtg->closid)) - continue; - - seq_printf(s, "res:%s%s\n", (rdtg == &rdtgroup_default) ? "/" : "", - rdtg->kn->name); - seq_puts(s, "mon:"); - list_for_each_entry(crg, &rdtg->mon.crdtgrp_list, - mon.crdtgrp_list) { - if (!resctrl_arch_match_rmid(tsk, crg->mon.parent->closid, - crg->mon.rmid)) - continue; - seq_printf(s, "%s", crg->kn->name); - break; - } - seq_putc(s, '\n'); - goto unlock; - } - /* - * The above search should succeed. Otherwise return - * with an error. - */ - ret = -ENOENT; -unlock: - mutex_unlock(&rdtgroup_mutex); - - return ret; -} -#endif - -static int rdt_last_cmd_status_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - int len; - - mutex_lock(&rdtgroup_mutex); - len = seq_buf_used(&last_cmd_status); - if (len) - seq_printf(seq, "%.*s", len, last_cmd_status_buf); - else - seq_puts(seq, "ok\n"); - mutex_unlock(&rdtgroup_mutex); - return 0; -} - -static int rdt_num_closids_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - - seq_printf(seq, "%u\n", s->num_closid); - return 0; -} - -static int rdt_default_ctrl_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - struct rdt_resource *r = s->res; - - seq_printf(seq, "%x\n", r->default_ctrl); - return 0; -} - -static int rdt_min_cbm_bits_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - struct rdt_resource *r = s->res; - - seq_printf(seq, "%u\n", r->cache.min_cbm_bits); - return 0; -} - -static int rdt_shareable_bits_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - struct rdt_resource *r = s->res; - - seq_printf(seq, "%x\n", r->cache.shareable_bits); - return 0; -} - -/* - * rdt_bit_usage_show - Display current usage of resources - * - * A domain is a shared resource that can now be allocated differently. Here - * we display the current regions of the domain as an annotated bitmask. - * For each domain of this resource its allocation bitmask - * is annotated as below to indicate the current usage of the corresponding bit: - * 0 - currently unused - * X - currently available for sharing and used by software and hardware - * H - currently used by hardware only but available for software use - * S - currently used and shareable by software only - * E - currently used exclusively by one resource group - * P - currently pseudo-locked by one resource group - */ -static int rdt_bit_usage_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - /* - * Use unsigned long even though only 32 bits are used to ensure - * test_bit() is used safely. - */ - unsigned long sw_shareable = 0, hw_shareable = 0; - unsigned long exclusive = 0, pseudo_locked = 0; - struct rdt_resource *r = s->res; - struct rdt_domain *dom; - int i, hwb, swb, excl, psl; - enum rdtgrp_mode mode; - bool sep = false; - u32 ctrl_val; - - cpus_read_lock(); - mutex_lock(&rdtgroup_mutex); - hw_shareable = r->cache.shareable_bits; - list_for_each_entry(dom, &r->domains, list) { - if (sep) - seq_putc(seq, ';'); - sw_shareable = 0; - exclusive = 0; - seq_printf(seq, "%d=", dom->id); - for (i = 0; i < closids_supported(); i++) { - if (!closid_allocated(i)) - continue; - ctrl_val = resctrl_arch_get_config(r, dom, i, - s->conf_type); - mode = rdtgroup_mode_by_closid(i); - switch (mode) { - case RDT_MODE_SHAREABLE: - sw_shareable |= ctrl_val; - break; - case RDT_MODE_EXCLUSIVE: - exclusive |= ctrl_val; - break; - case RDT_MODE_PSEUDO_LOCKSETUP: - /* - * RDT_MODE_PSEUDO_LOCKSETUP is possible - * here but not included since the CBM - * associated with this CLOSID in this mode - * is not initialized and no task or cpu can be - * assigned this CLOSID. - */ - break; - case RDT_MODE_PSEUDO_LOCKED: - case RDT_NUM_MODES: - WARN(1, - "invalid mode for closid %d\n", i); - break; - } - } - for (i = r->cache.cbm_len - 1; i >= 0; i--) { - pseudo_locked = dom->plr ? dom->plr->cbm : 0; - hwb = test_bit(i, &hw_shareable); - swb = test_bit(i, &sw_shareable); - excl = test_bit(i, &exclusive); - psl = test_bit(i, &pseudo_locked); - if (hwb && swb) - seq_putc(seq, 'X'); - else if (hwb && !swb) - seq_putc(seq, 'H'); - else if (!hwb && swb) - seq_putc(seq, 'S'); - else if (excl) - seq_putc(seq, 'E'); - else if (psl) - seq_putc(seq, 'P'); - else /* Unused bits remain */ - seq_putc(seq, '0'); - } - sep = true; - } - seq_putc(seq, '\n'); - mutex_unlock(&rdtgroup_mutex); - cpus_read_unlock(); - return 0; -} - -static int rdt_min_bw_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - struct rdt_resource *r = s->res; - - seq_printf(seq, "%u\n", r->membw.min_bw); - return 0; -} - -static int rdt_num_rmids_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct rdt_resource *r = of->kn->parent->priv; - - seq_printf(seq, "%d\n", r->num_rmid); - - return 0; -} - -static int rdt_mon_features_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct rdt_resource *r = of->kn->parent->priv; - struct mon_evt *mevt; - - list_for_each_entry(mevt, &r->evt_list, list) { - seq_printf(seq, "%s\n", mevt->name); - if (mevt->configurable) - seq_printf(seq, "%s_config\n", mevt->name); - } - - return 0; -} - -static int rdt_bw_gran_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - struct rdt_resource *r = s->res; - - seq_printf(seq, "%u\n", r->membw.bw_gran); - return 0; -} - -static int rdt_delay_linear_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - struct rdt_resource *r = s->res; - - seq_printf(seq, "%u\n", r->membw.delay_linear); - return 0; -} - -static int max_threshold_occ_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - seq_printf(seq, "%u\n", resctrl_rmid_realloc_threshold); - - return 0; -} - -static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - struct rdt_resource *r = s->res; - - if (r->membw.throttle_mode == THREAD_THROTTLE_PER_THREAD) - seq_puts(seq, "per-thread\n"); - else - seq_puts(seq, "max\n"); - - return 0; -} - -static ssize_t max_threshold_occ_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off) -{ - unsigned int bytes; - int ret; - - ret = kstrtouint(buf, 0, &bytes); - if (ret) - return ret; - - if (bytes > resctrl_rmid_realloc_limit) - return -EINVAL; - - resctrl_rmid_realloc_threshold = resctrl_arch_round_mon_val(bytes); - - return nbytes; -} - -/* - * rdtgroup_mode_show - Display mode of this resource group - */ -static int rdtgroup_mode_show(struct kernfs_open_file *of, - struct seq_file *s, void *v) -{ - struct rdtgroup *rdtgrp; - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (!rdtgrp) { - rdtgroup_kn_unlock(of->kn); - return -ENOENT; - } - - seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode)); - - rdtgroup_kn_unlock(of->kn); - return 0; -} - -static enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type) -{ - switch (my_type) { - case CDP_CODE: - return CDP_DATA; - case CDP_DATA: - return CDP_CODE; - default: - case CDP_NONE: - return CDP_NONE; - } -} - -static int rdt_has_sparse_bitmasks_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - struct rdt_resource *r = s->res; - - seq_printf(seq, "%u\n", r->cache.arch_has_sparse_bitmasks); - - return 0; -} - -/** - * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other - * @r: Resource to which domain instance @d belongs. - * @d: The domain instance for which @closid is being tested. - * @cbm: Capacity bitmask being tested. - * @closid: Intended closid for @cbm. - * @type: CDP type of @r. - * @exclusive: Only check if overlaps with exclusive resource groups - * - * Checks if provided @cbm intended to be used for @closid on domain - * @d overlaps with any other closids or other hardware usage associated - * with this domain. If @exclusive is true then only overlaps with - * resource groups in exclusive mode will be considered. If @exclusive - * is false then overlaps with any resource group or hardware entities - * will be considered. - * - * @cbm is unsigned long, even if only 32 bits are used, to make the - * bitmap functions work correctly. - * - * Return: false if CBM does not overlap, true if it does. - */ -static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, - unsigned long cbm, int closid, - enum resctrl_conf_type type, bool exclusive) -{ - enum rdtgrp_mode mode; - unsigned long ctrl_b; - int i; - - /* Check for any overlap with regions used by hardware directly */ - if (!exclusive) { - ctrl_b = r->cache.shareable_bits; - if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) - return true; - } - - /* Check for overlap with other resource groups */ - for (i = 0; i < closids_supported(); i++) { - ctrl_b = resctrl_arch_get_config(r, d, i, type); - mode = rdtgroup_mode_by_closid(i); - if (closid_allocated(i) && i != closid && - mode != RDT_MODE_PSEUDO_LOCKSETUP) { - if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) { - if (exclusive) { - if (mode == RDT_MODE_EXCLUSIVE) - return true; - continue; - } - return true; - } - } - } - - return false; -} - -/** - * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware - * @s: Schema for the resource to which domain instance @d belongs. - * @d: The domain instance for which @closid is being tested. - * @cbm: Capacity bitmask being tested. - * @closid: Intended closid for @cbm. - * @exclusive: Only check if overlaps with exclusive resource groups - * - * Resources that can be allocated using a CBM can use the CBM to control - * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test - * for overlap. Overlap test is not limited to the specific resource for - * which the CBM is intended though - when dealing with CDP resources that - * share the underlying hardware the overlap check should be performed on - * the CDP resource sharing the hardware also. - * - * Refer to description of __rdtgroup_cbm_overlaps() for the details of the - * overlap test. - * - * Return: true if CBM overlap detected, false if there is no overlap - */ -bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d, - unsigned long cbm, int closid, bool exclusive) -{ - enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type); - struct rdt_resource *r = s->res; - - if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, s->conf_type, - exclusive)) - return true; - - if (!resctrl_arch_get_cdp_enabled(r->rid)) - return false; - return __rdtgroup_cbm_overlaps(r, d, cbm, closid, peer_type, exclusive); -} - -/** - * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive - * @rdtgrp: Resource group identified through its closid. - * - * An exclusive resource group implies that there should be no sharing of - * its allocated resources. At the time this group is considered to be - * exclusive this test can determine if its current schemata supports this - * setting by testing for overlap with all other resource groups. - * - * Return: true if resource group can be exclusive, false if there is overlap - * with allocations of other resource groups and thus this resource group - * cannot be exclusive. - */ -static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp) -{ - int closid = rdtgrp->closid; - struct resctrl_schema *s; - struct rdt_resource *r; - bool has_cache = false; - struct rdt_domain *d; - u32 ctrl; - - /* Walking r->domains, ensure it can't race with cpuhp */ - lockdep_assert_cpus_held(); - - list_for_each_entry(s, &resctrl_schema_all, list) { - r = s->res; - if (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA) - continue; - has_cache = true; - list_for_each_entry(d, &r->domains, list) { - ctrl = resctrl_arch_get_config(r, d, closid, - s->conf_type); - if (rdtgroup_cbm_overlaps(s, d, ctrl, closid, false)) { - rdt_last_cmd_puts("Schemata overlaps\n"); - return false; - } - } - } - - if (!has_cache) { - rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n"); - return false; - } - - return true; -} - -/* - * rdtgroup_mode_write - Modify the resource group's mode - */ -static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off) -{ - struct rdtgroup *rdtgrp; - enum rdtgrp_mode mode; - int ret = 0; - - /* Valid input requires a trailing newline */ - if (nbytes == 0 || buf[nbytes - 1] != '\n') - return -EINVAL; - buf[nbytes - 1] = '\0'; - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (!rdtgrp) { - rdtgroup_kn_unlock(of->kn); - return -ENOENT; - } - - rdt_last_cmd_clear(); - - mode = rdtgrp->mode; - - if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) || - (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) || - (!strcmp(buf, "pseudo-locksetup") && - mode == RDT_MODE_PSEUDO_LOCKSETUP) || - (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED)) - goto out; - - if (mode == RDT_MODE_PSEUDO_LOCKED) { - rdt_last_cmd_puts("Cannot change pseudo-locked group\n"); - ret = -EINVAL; - goto out; - } - - if (!strcmp(buf, "shareable")) { - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - ret = rdtgroup_locksetup_exit(rdtgrp); - if (ret) - goto out; - } - rdtgrp->mode = RDT_MODE_SHAREABLE; - } else if (!strcmp(buf, "exclusive")) { - if (!rdtgroup_mode_test_exclusive(rdtgrp)) { - ret = -EINVAL; - goto out; - } - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - ret = rdtgroup_locksetup_exit(rdtgrp); - if (ret) - goto out; - } - rdtgrp->mode = RDT_MODE_EXCLUSIVE; - } else if (IS_ENABLED(CONFIG_RESCTRL_FS_PSEUDO_LOCK) && - !strcmp(buf, "pseudo-locksetup")) { - ret = rdtgroup_locksetup_enter(rdtgrp); - if (ret) - goto out; - rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP; - } else { - rdt_last_cmd_puts("Unknown or unsupported mode\n"); - ret = -EINVAL; - } - -out: - rdtgroup_kn_unlock(of->kn); - return ret ?: nbytes; -} - -/** - * rdtgroup_cbm_to_size - Translate CBM to size in bytes - * @r: RDT resource to which @d belongs. - * @d: RDT domain instance. - * @cbm: bitmask for which the size should be computed. - * - * The bitmask provided associated with the RDT domain instance @d will be - * translated into how many bytes it represents. The size in bytes is - * computed by first dividing the total cache size by the CBM length to - * determine how many bytes each bit in the bitmask represents. The result - * is multiplied with the number of bits set in the bitmask. - * - * @cbm is unsigned long, even if only 32 bits are used to make the - * bitmap functions work correctly. - */ -unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, - struct rdt_domain *d, unsigned long cbm) -{ - struct cpu_cacheinfo *ci; - unsigned int size = 0; - int num_b, i; - - num_b = bitmap_weight(&cbm, r->cache.cbm_len); - ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask)); - for (i = 0; i < ci->num_leaves; i++) { - if (ci->info_list[i].level == r->cache_level) { - size = ci->info_list[i].size / r->cache.cbm_len * num_b; - break; - } - } - - return size; -} - -/* - * rdtgroup_size_show - Display size in bytes of allocated regions - * - * The "size" file mirrors the layout of the "schemata" file, printing the - * size in bytes of each region instead of the capacity bitmask. - */ -static int rdtgroup_size_show(struct kernfs_open_file *of, - struct seq_file *s, void *v) -{ - struct resctrl_schema *schema; - enum resctrl_conf_type type; - struct rdtgroup *rdtgrp; - struct rdt_resource *r; - struct rdt_domain *d; - unsigned int size; - int ret = 0; - u32 closid; - bool sep; - u32 ctrl; - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (!rdtgrp) { - rdtgroup_kn_unlock(of->kn); - return -ENOENT; - } - - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { - if (!rdtgrp->plr->d) { - rdt_last_cmd_clear(); - rdt_last_cmd_puts("Cache domain offline\n"); - ret = -ENODEV; - } else { - seq_printf(s, "%*s:", max_name_width, - rdtgrp->plr->s->name); - size = rdtgroup_cbm_to_size(rdtgrp->plr->s->res, - rdtgrp->plr->d, - rdtgrp->plr->cbm); - seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size); - } - goto out; - } - - closid = rdtgrp->closid; - - list_for_each_entry(schema, &resctrl_schema_all, list) { - r = schema->res; - type = schema->conf_type; - sep = false; - seq_printf(s, "%*s:", max_name_width, schema->name); - list_for_each_entry(d, &r->domains, list) { - if (sep) - seq_putc(s, ';'); - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - size = 0; - } else { - if (is_mba_sc(r)) - ctrl = d->mbps_val[closid]; - else - ctrl = resctrl_arch_get_config(r, d, - closid, - type); - if (r->rid == RDT_RESOURCE_MBA || - r->rid == RDT_RESOURCE_SMBA) - size = ctrl; - else - size = rdtgroup_cbm_to_size(r, d, ctrl); - } - seq_printf(s, "%d=%u", d->id, size); - sep = true; - } - seq_putc(s, '\n'); - } - -out: - rdtgroup_kn_unlock(of->kn); - - return ret; -} - -#define INVALID_CONFIG_INDEX UINT_MAX - -/** - * mon_event_config_index_get - get the hardware index for the - * configurable event - * @evtid: event id. - * - * Return: 0 for evtid == QOS_L3_MBM_TOTAL_EVENT_ID - * 1 for evtid == QOS_L3_MBM_LOCAL_EVENT_ID - * INVALID_CONFIG_INDEX for invalid evtid - */ -static inline unsigned int mon_event_config_index_get(u32 evtid) -{ - switch (evtid) { - case QOS_L3_MBM_TOTAL_EVENT_ID: - return 0; - case QOS_L3_MBM_LOCAL_EVENT_ID: - return 1; - default: - /* Should never reach here */ - return INVALID_CONFIG_INDEX; - } -} - -void resctrl_arch_mon_event_config_read(void *info) -{ - struct resctrl_mon_config_info *mon_info = info; - unsigned int index; - u64 msrval; - - index = mon_event_config_index_get(mon_info->evtid); - if (index == INVALID_CONFIG_INDEX) { - pr_warn_once("Invalid event id %d\n", mon_info->evtid); - return; - } - rdmsrl(MSR_IA32_EVT_CFG_BASE + index, msrval); - - /* Report only the valid event configuration bits */ - mon_info->mon_config = msrval & MAX_EVT_CONFIG_BITS; -} - -static void mondata_config_read(struct resctrl_mon_config_info *mon_info) -{ - smp_call_function_any(&mon_info->d->cpu_mask, - resctrl_arch_mon_event_config_read, mon_info, 1); -} - -static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid) -{ - struct resctrl_mon_config_info mon_info = {0}; - struct rdt_domain *dom; - bool sep = false; - - cpus_read_lock(); - mutex_lock(&rdtgroup_mutex); - - list_for_each_entry(dom, &r->domains, list) { - if (sep) - seq_puts(s, ";"); - - memset(&mon_info, 0, sizeof(struct resctrl_mon_config_info)); - mon_info.r = r; - mon_info.d = dom; - mon_info.evtid = evtid; - mondata_config_read(&mon_info); - - seq_printf(s, "%d=0x%02x", dom->id, mon_info.mon_config); - sep = true; - } - seq_puts(s, "\n"); - - mutex_unlock(&rdtgroup_mutex); - cpus_read_unlock(); - - return 0; -} - -static int mbm_total_bytes_config_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct rdt_resource *r = of->kn->parent->priv; - - mbm_config_show(seq, r, QOS_L3_MBM_TOTAL_EVENT_ID); - - return 0; -} - -static int mbm_local_bytes_config_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct rdt_resource *r = of->kn->parent->priv; - - mbm_config_show(seq, r, QOS_L3_MBM_LOCAL_EVENT_ID); - - return 0; -} - -void resctrl_arch_mon_event_config_write(void *info) -{ - struct resctrl_mon_config_info *mon_info = info; - unsigned int index; - - index = mon_event_config_index_get(mon_info->evtid); - if (index == INVALID_CONFIG_INDEX) { - pr_warn_once("Invalid event id %d\n", mon_info->evtid); - mon_info->err = -EINVAL; - return; - } - wrmsr(MSR_IA32_EVT_CFG_BASE + index, mon_info->mon_config, 0); - - mon_info->err = 0; -} - -static int mbm_config_write_domain(struct rdt_resource *r, - struct rdt_domain *d, u32 evtid, u32 val) -{ - struct resctrl_mon_config_info mon_info = {0}; - - /* - * Read the current config value first. If both are the same then - * no need to write it again. - */ - mon_info.r = r; - mon_info.d = d; - mon_info.evtid = evtid; - mondata_config_read(&mon_info); - if (mon_info.mon_config == val) - return 0; - - mon_info.mon_config = val; - - /* - * Update MSR_IA32_EVT_CFG_BASE MSR on one of the CPUs in the - * domain. The MSRs offset from MSR MSR_IA32_EVT_CFG_BASE - * are scoped at the domain level. Writing any of these MSRs - * on one CPU is observed by all the CPUs in the domain. - */ - smp_call_function_any(&d->cpu_mask, resctrl_arch_mon_event_config_write, - &mon_info, 1); - if (mon_info.err) { - rdt_last_cmd_puts("Invalid event configuration\n"); - return mon_info.err; - } - - /* - * When an Event Configuration is changed, the bandwidth counters - * for all RMIDs and Events will be cleared by the hardware. The - * hardware also sets MSR_IA32_QM_CTR.Unavailable (bit 62) for - * every RMID on the next read to any event for every RMID. - * Subsequent reads will have MSR_IA32_QM_CTR.Unavailable (bit 62) - * cleared while it is tracked by the hardware. Clear the - * mbm_local and mbm_total counts for all the RMIDs. - */ - resctrl_arch_reset_rmid_all(r, d); - - return 0; -} - -static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid) -{ - char *dom_str = NULL, *id_str; - unsigned long dom_id, val; - struct rdt_domain *d; - int err; - - /* Walking r->domains, ensure it can't race with cpuhp */ - lockdep_assert_cpus_held(); - -next: - if (!tok || tok[0] == '\0') - return 0; - - /* Start processing the strings for each domain */ - dom_str = strim(strsep(&tok, ";")); - id_str = strsep(&dom_str, "="); - - if (!id_str || kstrtoul(id_str, 10, &dom_id)) { - rdt_last_cmd_puts("Missing '=' or non-numeric domain id\n"); - return -EINVAL; - } - - if (!dom_str || kstrtoul(dom_str, 16, &val)) { - rdt_last_cmd_puts("Non-numeric event configuration value\n"); - return -EINVAL; - } - - /* Value from user cannot be more than the supported set of events */ - if ((val & r->mbm_cfg_mask) != val) { - rdt_last_cmd_printf("Invalid event configuration: max valid mask is 0x%02x\n", - r->mbm_cfg_mask); - return -EINVAL; - } - - list_for_each_entry(d, &r->domains, list) { - if (d->id == dom_id) { - err = mbm_config_write_domain(r, d, evtid, val); - if (err) - return err; - goto next; - } - } - - return -EINVAL; -} - -static ssize_t mbm_total_bytes_config_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, - loff_t off) -{ - struct rdt_resource *r = of->kn->parent->priv; - int ret; - - /* Valid input requires a trailing newline */ - if (nbytes == 0 || buf[nbytes - 1] != '\n') - return -EINVAL; - - cpus_read_lock(); - mutex_lock(&rdtgroup_mutex); - - rdt_last_cmd_clear(); - - buf[nbytes - 1] = '\0'; - - ret = mon_config_write(r, buf, QOS_L3_MBM_TOTAL_EVENT_ID); - - mutex_unlock(&rdtgroup_mutex); - cpus_read_unlock(); - - return ret ?: nbytes; -} - -static ssize_t mbm_local_bytes_config_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, - loff_t off) -{ - struct rdt_resource *r = of->kn->parent->priv; - int ret; - - /* Valid input requires a trailing newline */ - if (nbytes == 0 || buf[nbytes - 1] != '\n') - return -EINVAL; - - cpus_read_lock(); - mutex_lock(&rdtgroup_mutex); - - rdt_last_cmd_clear(); - - buf[nbytes - 1] = '\0'; - - ret = mon_config_write(r, buf, QOS_L3_MBM_LOCAL_EVENT_ID); - - mutex_unlock(&rdtgroup_mutex); - cpus_read_unlock(); - - return ret ?: nbytes; -} - -/* rdtgroup information files for one cache resource. */ -static struct rftype res_common_files[] = { - { - .name = "last_cmd_status", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_last_cmd_status_show, - .fflags = RFTYPE_TOP_INFO, - }, - { - .name = "num_closids", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_num_closids_show, - .fflags = RFTYPE_CTRL_INFO, - }, - { - .name = "mon_features", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_mon_features_show, - .fflags = RFTYPE_MON_INFO, - }, - { - .name = "num_rmids", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_num_rmids_show, - .fflags = RFTYPE_MON_INFO, - }, - { - .name = "cbm_mask", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_default_ctrl_show, - .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, - }, - { - .name = "min_cbm_bits", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_min_cbm_bits_show, - .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, - }, - { - .name = "shareable_bits", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_shareable_bits_show, - .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, - }, - { - .name = "bit_usage", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_bit_usage_show, - .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, - }, - { - .name = "min_bandwidth", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_min_bw_show, - .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB, - }, - { - .name = "bandwidth_gran", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_bw_gran_show, - .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB, - }, - { - .name = "delay_linear", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_delay_linear_show, - .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB, - }, - /* - * Platform specific which (if any) capabilities are provided by - * thread_throttle_mode. Defer "fflags" initialization to platform - * discovery. - */ - { - .name = "thread_throttle_mode", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_thread_throttle_mode_show, - }, - { - .name = "max_threshold_occupancy", - .mode = 0644, - .kf_ops = &rdtgroup_kf_single_ops, - .write = max_threshold_occ_write, - .seq_show = max_threshold_occ_show, - .fflags = RFTYPE_MON_INFO | RFTYPE_RES_CACHE, - }, - { - .name = "mbm_total_bytes_config", - .mode = 0644, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = mbm_total_bytes_config_show, - .write = mbm_total_bytes_config_write, - }, - { - .name = "mbm_local_bytes_config", - .mode = 0644, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = mbm_local_bytes_config_show, - .write = mbm_local_bytes_config_write, - }, - { - .name = "cpus", - .mode = 0644, - .kf_ops = &rdtgroup_kf_single_ops, - .write = rdtgroup_cpus_write, - .seq_show = rdtgroup_cpus_show, - .fflags = RFTYPE_BASE, - }, - { - .name = "cpus_list", - .mode = 0644, - .kf_ops = &rdtgroup_kf_single_ops, - .write = rdtgroup_cpus_write, - .seq_show = rdtgroup_cpus_show, - .flags = RFTYPE_FLAGS_CPUS_LIST, - .fflags = RFTYPE_BASE, - }, - { - .name = "tasks", - .mode = 0644, - .kf_ops = &rdtgroup_kf_single_ops, - .write = rdtgroup_tasks_write, - .seq_show = rdtgroup_tasks_show, - .fflags = RFTYPE_BASE, - }, - { - .name = "mon_hw_id", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdtgroup_rmid_show, - .fflags = RFTYPE_MON_BASE | RFTYPE_DEBUG, - }, - { - .name = "schemata", - .mode = 0644, - .kf_ops = &rdtgroup_kf_single_ops, - .write = rdtgroup_schemata_write, - .seq_show = rdtgroup_schemata_show, - .fflags = RFTYPE_CTRL_BASE, - }, - { - .name = "mode", - .mode = 0644, - .kf_ops = &rdtgroup_kf_single_ops, - .write = rdtgroup_mode_write, - .seq_show = rdtgroup_mode_show, - .fflags = RFTYPE_CTRL_BASE, - }, - { - .name = "size", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdtgroup_size_show, - .fflags = RFTYPE_CTRL_BASE, - }, - { - .name = "sparse_masks", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_has_sparse_bitmasks_show, - .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, - }, - { - .name = "ctrl_hw_id", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdtgroup_closid_show, - .fflags = RFTYPE_CTRL_BASE | RFTYPE_DEBUG, - }, - -}; - -static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags) -{ - struct rftype *rfts, *rft; - int ret, len; - - rfts = res_common_files; - len = ARRAY_SIZE(res_common_files); - - lockdep_assert_held(&rdtgroup_mutex); - - if (resctrl_debug) - fflags |= RFTYPE_DEBUG; - - for (rft = rfts; rft < rfts + len; rft++) { - if (rft->fflags && ((fflags & rft->fflags) == rft->fflags)) { - ret = rdtgroup_add_file(kn, rft); - if (ret) - goto error; - } - } - - return 0; -error: - pr_warn("Failed to add %s, err=%d\n", rft->name, ret); - while (--rft >= rfts) { - if ((fflags & rft->fflags) == rft->fflags) - kernfs_remove_by_name(kn, rft->name); - } - return ret; -} - -static struct rftype *rdtgroup_get_rftype_by_name(const char *name) -{ - struct rftype *rfts, *rft; - int len; - - rfts = res_common_files; - len = ARRAY_SIZE(res_common_files); - - for (rft = rfts; rft < rfts + len; rft++) { - if (!strcmp(rft->name, name)) - return rft; - } - - return NULL; -} - -static void thread_throttle_mode_init(void) -{ - struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); - struct rftype *rft; - - if (!r->alloc_capable || - r->membw.throttle_mode == THREAD_THROTTLE_UNDEFINED) - return; - - rft = rdtgroup_get_rftype_by_name("thread_throttle_mode"); - if (!rft) - return; - - rft->fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB; -} - -void mbm_config_rftype_init(const char *config) -{ - struct rftype *rft; - - rft = rdtgroup_get_rftype_by_name(config); - if (rft) - rft->fflags = RFTYPE_MON_INFO | RFTYPE_RES_CACHE; -} - -/** - * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file - * @r: The resource group with which the file is associated. - * @name: Name of the file - * - * The permissions of named resctrl file, directory, or link are modified - * to not allow read, write, or execute by any user. - * - * WARNING: This function is intended to communicate to the user that the - * resctrl file has been locked down - that it is not relevant to the - * particular state the system finds itself in. It should not be relied - * on to protect from user access because after the file's permissions - * are restricted the user can still change the permissions using chmod - * from the command line. - * - * Return: 0 on success, <0 on failure. - */ -int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name) -{ - struct iattr iattr = {.ia_valid = ATTR_MODE,}; - struct kernfs_node *kn; - int ret = 0; - - kn = kernfs_find_and_get_ns(r->kn, name, NULL); - if (!kn) - return -ENOENT; - - switch (kernfs_type(kn)) { - case KERNFS_DIR: - iattr.ia_mode = S_IFDIR; - break; - case KERNFS_FILE: - iattr.ia_mode = S_IFREG; - break; - case KERNFS_LINK: - iattr.ia_mode = S_IFLNK; - break; - } - - ret = kernfs_setattr(kn, &iattr); - kernfs_put(kn); - return ret; -} - -/** - * rdtgroup_kn_mode_restore - Restore user access to named resctrl file - * @r: The resource group with which the file is associated. - * @name: Name of the file - * @mask: Mask of permissions that should be restored - * - * Restore the permissions of the named file. If @name is a directory the - * permissions of its parent will be used. - * - * Return: 0 on success, <0 on failure. - */ -int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name, - umode_t mask) -{ - struct iattr iattr = {.ia_valid = ATTR_MODE,}; - struct kernfs_node *kn, *parent; - struct rftype *rfts, *rft; - int ret, len; - - rfts = res_common_files; - len = ARRAY_SIZE(res_common_files); - - for (rft = rfts; rft < rfts + len; rft++) { - if (!strcmp(rft->name, name)) - iattr.ia_mode = rft->mode & mask; - } - - kn = kernfs_find_and_get_ns(r->kn, name, NULL); - if (!kn) - return -ENOENT; - - switch (kernfs_type(kn)) { - case KERNFS_DIR: - parent = kernfs_get_parent(kn); - if (parent) { - iattr.ia_mode |= parent->mode; - kernfs_put(parent); - } - iattr.ia_mode |= S_IFDIR; - break; - case KERNFS_FILE: - iattr.ia_mode |= S_IFREG; - break; - case KERNFS_LINK: - iattr.ia_mode |= S_IFLNK; - break; - } - - ret = kernfs_setattr(kn, &iattr); - kernfs_put(kn); - return ret; -} - -static int rdtgroup_mkdir_info_resdir(void *priv, char *name, - unsigned long fflags) -{ - struct kernfs_node *kn_subdir; - int ret; - - kn_subdir = kernfs_create_dir(kn_info, name, - kn_info->mode, priv); - if (IS_ERR(kn_subdir)) - return PTR_ERR(kn_subdir); - - ret = rdtgroup_kn_set_ugid(kn_subdir); - if (ret) - return ret; - - ret = rdtgroup_add_files(kn_subdir, fflags); - if (!ret) - kernfs_activate(kn_subdir); - - return ret; -} - -static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn) -{ - enum resctrl_res_level i; - struct resctrl_schema *s; - struct rdt_resource *r; - unsigned long fflags; - char name[32]; - int ret; - - /* create the directory */ - kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL); - if (IS_ERR(kn_info)) - return PTR_ERR(kn_info); - - ret = rdtgroup_add_files(kn_info, RFTYPE_TOP_INFO); - if (ret) - goto out_destroy; - - /* loop over enabled controls, these are all alloc_capable */ - list_for_each_entry(s, &resctrl_schema_all, list) { - r = s->res; - fflags = r->fflags | RFTYPE_CTRL_INFO; - ret = rdtgroup_mkdir_info_resdir(s, s->name, fflags); - if (ret) - goto out_destroy; - } - - for (i = 0; i < RDT_NUM_RESOURCES; i++) { - r = resctrl_arch_get_resource(i); - if (!r->mon_capable) - continue; - - fflags = r->fflags | RFTYPE_MON_INFO; - sprintf(name, "%s_MON", r->name); - ret = rdtgroup_mkdir_info_resdir(r, name, fflags); - if (ret) - goto out_destroy; - } - - ret = rdtgroup_kn_set_ugid(kn_info); - if (ret) - goto out_destroy; - - kernfs_activate(kn_info); - - return 0; - -out_destroy: - kernfs_remove(kn_info); - return ret; -} - -static int -mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp, - char *name, struct kernfs_node **dest_kn) -{ - struct kernfs_node *kn; - int ret; - - /* create the directory */ - kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); - if (IS_ERR(kn)) - return PTR_ERR(kn); - - if (dest_kn) - *dest_kn = kn; - - ret = rdtgroup_kn_set_ugid(kn); - if (ret) - goto out_destroy; - - kernfs_activate(kn); - - return 0; - -out_destroy: - kernfs_remove(kn); - return ret; -} - -static void l3_qos_cfg_update(void *arg) -{ - bool *enable = arg; - - wrmsrl(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL); -} - -static void l2_qos_cfg_update(void *arg) -{ - bool *enable = arg; - - wrmsrl(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); -} - -static inline bool is_mba_linear(void) -{ - return resctrl_arch_get_resource(RDT_RESOURCE_MBA)->membw.delay_linear; -} - -static int set_cache_qos_cfg(int level, bool enable) -{ - void (*update)(void *arg); - struct rdt_resource *r_l; - cpumask_var_t cpu_mask; - struct rdt_domain *d; - int cpu; - - /* Walking r->domains, ensure it can't race with cpuhp */ - lockdep_assert_cpus_held(); - - if (level == RDT_RESOURCE_L3) - update = l3_qos_cfg_update; - else if (level == RDT_RESOURCE_L2) - update = l2_qos_cfg_update; - else - return -EINVAL; - - if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) - return -ENOMEM; - - r_l = &rdt_resources_all[level].r_resctrl; - list_for_each_entry(d, &r_l->domains, list) { - if (r_l->cache.arch_has_per_cpu_cfg) - /* Pick all the CPUs in the domain instance */ - for_each_cpu(cpu, &d->cpu_mask) - cpumask_set_cpu(cpu, cpu_mask); - else - /* Pick one CPU from each domain instance to update MSR */ - cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); - } - - /* Update QOS_CFG MSR on all the CPUs in cpu_mask */ - on_each_cpu_mask(cpu_mask, update, &enable, 1); - - free_cpumask_var(cpu_mask); - - return 0; -} - -/* Restore the qos cfg state when a domain comes online */ -void rdt_domain_reconfigure_cdp(struct rdt_resource *r) -{ - struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); - - if (!r->cdp_capable) - return; - - if (r->rid == RDT_RESOURCE_L2) - l2_qos_cfg_update(&hw_res->cdp_enabled); - - if (r->rid == RDT_RESOURCE_L3) - l3_qos_cfg_update(&hw_res->cdp_enabled); -} - -static int mba_sc_domain_allocate(struct rdt_resource *r, struct rdt_domain *d) -{ - u32 num_closid = resctrl_arch_get_num_closid(r); - int cpu = cpumask_any(&d->cpu_mask); - int i; - - d->mbps_val = kcalloc_node(num_closid, sizeof(*d->mbps_val), - GFP_KERNEL, cpu_to_node(cpu)); - if (!d->mbps_val) - return -ENOMEM; - - for (i = 0; i < num_closid; i++) - d->mbps_val[i] = MBA_MAX_MBPS; - - return 0; -} - -static void mba_sc_domain_destroy(struct rdt_resource *r, - struct rdt_domain *d) -{ - kfree(d->mbps_val); - d->mbps_val = NULL; -} - -/* - * MBA software controller is supported only if - * MBM is supported and MBA is in linear scale. - */ -static bool supports_mba_mbps(void) -{ - struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); - - return (resctrl_arch_is_mbm_local_enabled() && - r->alloc_capable && is_mba_linear()); -} - -/* - * Enable or disable the MBA software controller - * which helps user specify bandwidth in MBps. - */ -static int set_mba_sc(bool mba_sc) -{ - struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); - u32 num_closid = resctrl_arch_get_num_closid(r); - struct rdt_domain *d; - int i; - - if (!supports_mba_mbps() || mba_sc == is_mba_sc(r)) - return -EINVAL; - - r->membw.mba_sc = mba_sc; - - list_for_each_entry(d, &r->domains, list) { - for (i = 0; i < num_closid; i++) - d->mbps_val[i] = MBA_MAX_MBPS; - } - - return 0; -} - -static int cdp_enable(int level) -{ - struct rdt_resource *r_l = &rdt_resources_all[level].r_resctrl; - int ret; - - if (!r_l->alloc_capable) - return -EINVAL; - - ret = set_cache_qos_cfg(level, true); - if (!ret) - rdt_resources_all[level].cdp_enabled = true; - - return ret; -} - -static void cdp_disable(int level) -{ - struct rdt_hw_resource *r_hw = &rdt_resources_all[level]; - - if (r_hw->cdp_enabled) { - set_cache_qos_cfg(level, false); - r_hw->cdp_enabled = false; - } -} - -int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable) -{ - struct rdt_hw_resource *hw_res = &rdt_resources_all[l]; - - if (!hw_res->r_resctrl.cdp_capable) - return -EINVAL; - - if (enable) - return cdp_enable(l); - - cdp_disable(l); - - return 0; -} - -/* - * We don't allow rdtgroup directories to be created anywhere - * except the root directory. Thus when looking for the rdtgroup - * structure for a kernfs node we are either looking at a directory, - * in which case the rdtgroup structure is pointed at by the "priv" - * field, otherwise we have a file, and need only look to the parent - * to find the rdtgroup. - */ -static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn) -{ - if (kernfs_type(kn) == KERNFS_DIR) { - /* - * All the resource directories use "kn->priv" - * to point to the "struct rdtgroup" for the - * resource. "info" and its subdirectories don't - * have rdtgroup structures, so return NULL here. - */ - if (kn == kn_info || kn->parent == kn_info) - return NULL; - else - return kn->priv; - } else { - return kn->parent->priv; - } -} - -static void rdtgroup_kn_get(struct rdtgroup *rdtgrp, struct kernfs_node *kn) -{ - atomic_inc(&rdtgrp->waitcount); - kernfs_break_active_protection(kn); -} - -static void rdtgroup_kn_put(struct rdtgroup *rdtgrp, struct kernfs_node *kn) -{ - if (atomic_dec_and_test(&rdtgrp->waitcount) && - (rdtgrp->flags & RDT_DELETED)) { - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || - rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) - rdtgroup_pseudo_lock_remove(rdtgrp); - kernfs_unbreak_active_protection(kn); - rdtgroup_remove(rdtgrp); - } else { - kernfs_unbreak_active_protection(kn); - } -} - -struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn) -{ - struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); - - if (!rdtgrp) - return NULL; - - rdtgroup_kn_get(rdtgrp, kn); - - cpus_read_lock(); - mutex_lock(&rdtgroup_mutex); - - /* Was this group deleted while we waited? */ - if (rdtgrp->flags & RDT_DELETED) - return NULL; - - return rdtgrp; -} - -void rdtgroup_kn_unlock(struct kernfs_node *kn) -{ - struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); - - if (!rdtgrp) - return; - - mutex_unlock(&rdtgroup_mutex); - cpus_read_unlock(); - - rdtgroup_kn_put(rdtgrp, kn); -} - -static int mkdir_mondata_all(struct kernfs_node *parent_kn, - struct rdtgroup *prgrp, - struct kernfs_node **mon_data_kn); - -static void rdt_disable_ctx(void) -{ - resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false); - resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false); - set_mba_sc(false); - - resctrl_debug = false; -} - -static int rdt_enable_ctx(struct rdt_fs_context *ctx) -{ - int ret = 0; - - if (ctx->enable_cdpl2) { - ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, true); - if (ret) - goto out_done; - } - - if (ctx->enable_cdpl3) { - ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, true); - if (ret) - goto out_cdpl2; - } - - if (ctx->enable_mba_mbps) { - ret = set_mba_sc(true); - if (ret) - goto out_cdpl3; - } - - if (ctx->enable_debug) - resctrl_debug = true; - - return 0; - -out_cdpl3: - resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false); -out_cdpl2: - resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false); -out_done: - return ret; -} - -static int schemata_list_add(struct rdt_resource *r, enum resctrl_conf_type type) -{ - struct resctrl_schema *s; - const char *suffix = ""; - int ret, cl; - - s = kzalloc(sizeof(*s), GFP_KERNEL); - if (!s) - return -ENOMEM; - - s->res = r; - s->num_closid = resctrl_arch_get_num_closid(r); - if (resctrl_arch_get_cdp_enabled(r->rid)) - s->num_closid /= 2; - - s->conf_type = type; - switch (type) { - case CDP_CODE: - suffix = "CODE"; - break; - case CDP_DATA: - suffix = "DATA"; - break; - case CDP_NONE: - suffix = ""; - break; - } - - ret = snprintf(s->name, sizeof(s->name), "%s%s", r->name, suffix); - if (ret >= sizeof(s->name)) { - kfree(s); - return -EINVAL; - } - - cl = strlen(s->name); - - /* - * If CDP is supported by this resource, but not enabled, - * include the suffix. This ensures the tabular format of the - * schemata file does not change between mounts of the filesystem. - */ - if (r->cdp_capable && !resctrl_arch_get_cdp_enabled(r->rid)) - cl += 4; - - if (cl > max_name_width) - max_name_width = cl; - - /* - * Choose a width for the resource data based on the resource that has - * widest name and cbm. - */ - max_data_width = max(max_data_width, r->data_width); - - INIT_LIST_HEAD(&s->list); - list_add(&s->list, &resctrl_schema_all); - - return 0; -} - -static int schemata_list_create(void) -{ - enum resctrl_res_level i; - struct rdt_resource *r; - int ret = 0; - - for (i = 0; i < RDT_NUM_RESOURCES; i++) { - r = resctrl_arch_get_resource(i); - if (!r->alloc_capable) - continue; - - if (resctrl_arch_get_cdp_enabled(r->rid)) { - ret = schemata_list_add(r, CDP_CODE); - if (ret) - break; - - ret = schemata_list_add(r, CDP_DATA); - } else { - ret = schemata_list_add(r, CDP_NONE); - } - - if (ret) - break; - } - - return ret; -} - -static void schemata_list_destroy(void) -{ - struct resctrl_schema *s, *tmp; - - list_for_each_entry_safe(s, tmp, &resctrl_schema_all, list) { - list_del(&s->list); - kfree(s); - } -} - -static int rdt_get_tree(struct fs_context *fc) -{ - struct rdt_resource *l3 = resctrl_arch_get_resource(RDT_RESOURCE_L3); - struct rdt_fs_context *ctx = rdt_fc2context(fc); - unsigned long flags = RFTYPE_CTRL_BASE; - struct rdt_domain *dom; - int ret; - - cpus_read_lock(); - mutex_lock(&rdtgroup_mutex); - /* - * resctrl file system can only be mounted once. - */ - if (resctrl_mounted) { - ret = -EBUSY; - goto out; - } - - ret = rdtgroup_setup_root(ctx); - if (ret) - goto out; - - ret = rdt_enable_ctx(ctx); - if (ret) - goto out_root; - - ret = schemata_list_create(); - if (ret) { - schemata_list_destroy(); - goto out_ctx; - } - - closid_init(); - - if (resctrl_arch_mon_capable()) - flags |= RFTYPE_MON; - - ret = rdtgroup_add_files(rdtgroup_default.kn, flags); - if (ret) - goto out_schemata_free; - - kernfs_activate(rdtgroup_default.kn); - - ret = rdtgroup_create_info_dir(rdtgroup_default.kn); - if (ret < 0) - goto out_schemata_free; - - if (resctrl_arch_mon_capable()) { - ret = mongroup_create_dir(rdtgroup_default.kn, - &rdtgroup_default, "mon_groups", - &kn_mongrp); - if (ret < 0) - goto out_info; - - ret = mkdir_mondata_all(rdtgroup_default.kn, - &rdtgroup_default, &kn_mondata); - if (ret < 0) - goto out_mongrp; - rdtgroup_default.mon.mon_data_kn = kn_mondata; - } - - ret = rdt_pseudo_lock_init(); - if (ret) - goto out_mondata; - - ret = kernfs_get_tree(fc); - if (ret < 0) - goto out_psl; - - if (resctrl_arch_alloc_capable()) - resctrl_arch_enable_alloc(); - if (resctrl_arch_mon_capable()) - resctrl_arch_enable_mon(); - - if (resctrl_arch_alloc_capable() || resctrl_arch_mon_capable()) - resctrl_mounted = true; - - if (resctrl_is_mbm_enabled()) { - list_for_each_entry(dom, &l3->domains, list) - mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL, - RESCTRL_PICK_ANY_CPU); - } - - goto out; - -out_psl: - rdt_pseudo_lock_release(); -out_mondata: - if (resctrl_arch_mon_capable()) - kernfs_remove(kn_mondata); -out_mongrp: - if (resctrl_arch_mon_capable()) - kernfs_remove(kn_mongrp); -out_info: - kernfs_remove(kn_info); -out_schemata_free: - schemata_list_destroy(); -out_ctx: - rdt_disable_ctx(); -out_root: - rdtgroup_destroy_root(); -out: - rdt_last_cmd_clear(); - mutex_unlock(&rdtgroup_mutex); - cpus_read_unlock(); - return ret; -} - -enum rdt_param { - Opt_cdp, - Opt_cdpl2, - Opt_mba_mbps, - Opt_debug, - nr__rdt_params -}; - -static const struct fs_parameter_spec rdt_fs_parameters[] = { - fsparam_flag("cdp", Opt_cdp), - fsparam_flag("cdpl2", Opt_cdpl2), - fsparam_flag("mba_MBps", Opt_mba_mbps), - fsparam_flag("debug", Opt_debug), - {} -}; - -static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param) -{ - struct rdt_fs_context *ctx = rdt_fc2context(fc); - struct fs_parse_result result; - int opt; - - opt = fs_parse(fc, rdt_fs_parameters, param, &result); - if (opt < 0) - return opt; - - switch (opt) { - case Opt_cdp: - ctx->enable_cdpl3 = true; - return 0; - case Opt_cdpl2: - ctx->enable_cdpl2 = true; - return 0; - case Opt_mba_mbps: - if (!supports_mba_mbps()) - return -EINVAL; - ctx->enable_mba_mbps = true; - return 0; - case Opt_debug: - ctx->enable_debug = true; - return 0; - } - - return -EINVAL; -} - -static void rdt_fs_context_free(struct fs_context *fc) -{ - struct rdt_fs_context *ctx = rdt_fc2context(fc); - - kernfs_free_fs_context(fc); - kfree(ctx); -} - -static const struct fs_context_operations rdt_fs_context_ops = { - .free = rdt_fs_context_free, - .parse_param = rdt_parse_param, - .get_tree = rdt_get_tree, -}; - -static int rdt_init_fs_context(struct fs_context *fc) -{ - struct rdt_fs_context *ctx; - - ctx = kzalloc(sizeof(struct rdt_fs_context), GFP_KERNEL); - if (!ctx) - return -ENOMEM; - - ctx->kfc.magic = RDTGROUP_SUPER_MAGIC; - fc->fs_private = &ctx->kfc; - fc->ops = &rdt_fs_context_ops; - put_user_ns(fc->user_ns); - fc->user_ns = get_user_ns(&init_user_ns); - fc->global = true; - return 0; -} - -static int reset_all_ctrls(struct rdt_resource *r) -{ - struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); - struct rdt_hw_domain *hw_dom; - struct msr_param msr_param; - cpumask_var_t cpu_mask; - struct rdt_domain *d; - int i; - - /* Walking r->domains, ensure it can't race with cpuhp */ - lockdep_assert_cpus_held(); - - if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) - return -ENOMEM; - - msr_param.res = r; - msr_param.low = 0; - msr_param.high = hw_res->num_closid; - - /* - * Disable resource control for this resource by setting all - * CBMs in all domains to the maximum mask value. Pick one CPU - * from each domain to update the MSRs below. - */ - list_for_each_entry(d, &r->domains, list) { - hw_dom = resctrl_to_arch_dom(d); - cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); - - for (i = 0; i < hw_res->num_closid; i++) - hw_dom->ctrl_val[i] = r->default_ctrl; - } - - /* Update CBM on all the CPUs in cpu_mask */ - on_each_cpu_mask(cpu_mask, rdt_ctrl_update, &msr_param, 1); - - free_cpumask_var(cpu_mask); - - return 0; -} - -void resctrl_arch_reset_resources(void) -{ - struct rdt_resource *r; - - for_each_capable_rdt_resource(r) - reset_all_ctrls(r); -} - -/* - * Move tasks from one to the other group. If @from is NULL, then all tasks - * in the systems are moved unconditionally (used for teardown). - * - * If @mask is not NULL the cpus on which moved tasks are running are set - * in that mask so the update smp function call is restricted to affected - * cpus. - */ -static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to, - struct cpumask *mask) -{ - struct task_struct *p, *t; - - read_lock(&tasklist_lock); - for_each_process_thread(p, t) { - if (!from || is_closid_match(t, from) || - is_rmid_match(t, from)) { - resctrl_arch_set_closid_rmid(t, to->closid, - to->mon.rmid); - - /* - * Order the closid/rmid stores above before the loads - * in task_curr(). This pairs with the full barrier - * between the rq->curr update and - * resctrl_arch_sched_in() during context switch. - */ - smp_mb(); - - /* - * If the task is on a CPU, set the CPU in the mask. - * The detection is inaccurate as tasks might move or - * schedule before the smp function call takes place. - * In such a case the function call is pointless, but - * there is no other side effect. - */ - if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t)) - cpumask_set_cpu(task_cpu(t), mask); - } - } - read_unlock(&tasklist_lock); -} - -static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp) -{ - struct rdtgroup *sentry, *stmp; - struct list_head *head; - - head = &rdtgrp->mon.crdtgrp_list; - list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) { - free_rmid(sentry->closid, sentry->mon.rmid); - list_del(&sentry->mon.crdtgrp_list); - - if (atomic_read(&sentry->waitcount) != 0) - sentry->flags = RDT_DELETED; - else - rdtgroup_remove(sentry); - } -} - -/* - * Forcibly remove all of subdirectories under root. - */ -static void rmdir_all_sub(void) -{ - struct rdtgroup *rdtgrp, *tmp; - - /* Move all tasks to the default resource group */ - rdt_move_group_tasks(NULL, &rdtgroup_default, NULL); - - list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) { - /* Free any child rmids */ - free_all_child_rdtgrp(rdtgrp); - - /* Remove each rdtgroup other than root */ - if (rdtgrp == &rdtgroup_default) - continue; - - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || - rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) - rdtgroup_pseudo_lock_remove(rdtgrp); - - /* - * Give any CPUs back to the default group. We cannot copy - * cpu_online_mask because a CPU might have executed the - * offline callback already, but is still marked online. - */ - cpumask_or(&rdtgroup_default.cpu_mask, - &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); - - free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); - - kernfs_remove(rdtgrp->kn); - list_del(&rdtgrp->rdtgroup_list); - - if (atomic_read(&rdtgrp->waitcount) != 0) - rdtgrp->flags = RDT_DELETED; - else - rdtgroup_remove(rdtgrp); - } - /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */ - update_closid_rmid(cpu_online_mask, &rdtgroup_default); - - kernfs_remove(kn_info); - kernfs_remove(kn_mongrp); - kernfs_remove(kn_mondata); -} - -static void rdt_kill_sb(struct super_block *sb) -{ - cpus_read_lock(); - mutex_lock(&rdtgroup_mutex); - - rdt_disable_ctx(); - - /* Put everything back to default values. */ - resctrl_arch_reset_resources(); - - rmdir_all_sub(); - rdt_pseudo_lock_release(); - rdtgroup_default.mode = RDT_MODE_SHAREABLE; - schemata_list_destroy(); - rdtgroup_destroy_root(); - if (resctrl_arch_alloc_capable()) - resctrl_arch_disable_alloc(); - if (resctrl_arch_mon_capable()) - resctrl_arch_disable_mon(); - resctrl_mounted = false; - kernfs_kill_sb(sb); - mutex_unlock(&rdtgroup_mutex); - cpus_read_unlock(); -} - -static struct file_system_type rdt_fs_type = { - .name = "resctrl", - .init_fs_context = rdt_init_fs_context, - .parameters = rdt_fs_parameters, - .kill_sb = rdt_kill_sb, -}; - -static int mon_addfile(struct kernfs_node *parent_kn, const char *name, - void *priv) -{ - struct kernfs_node *kn; - int ret = 0; - - kn = __kernfs_create_file(parent_kn, name, 0444, - GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0, - &kf_mondata_ops, priv, NULL, NULL); - if (IS_ERR(kn)) - return PTR_ERR(kn); - - ret = rdtgroup_kn_set_ugid(kn); - if (ret) { - kernfs_remove(kn); - return ret; - } - - return ret; -} - -/* - * Remove all subdirectories of mon_data of ctrl_mon groups - * and monitor groups with given domain id. - */ -static void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, - unsigned int dom_id) -{ - struct rdtgroup *prgrp, *crgrp; - char name[32]; - - list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { - sprintf(name, "mon_%s_%02d", r->name, dom_id); - kernfs_remove_by_name(prgrp->mon.mon_data_kn, name); - - list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list) - kernfs_remove_by_name(crgrp->mon.mon_data_kn, name); - } -} - -static int mkdir_mondata_subdir(struct kernfs_node *parent_kn, - struct rdt_domain *d, - struct rdt_resource *r, struct rdtgroup *prgrp) -{ - union mon_data_bits priv; - struct kernfs_node *kn; - struct mon_evt *mevt; - struct rmid_read rr; - char name[32]; - int ret; - - sprintf(name, "mon_%s_%02d", r->name, d->id); - /* create the directory */ - kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); - if (IS_ERR(kn)) - return PTR_ERR(kn); - - ret = rdtgroup_kn_set_ugid(kn); - if (ret) - goto out_destroy; - - if (WARN_ON(list_empty(&r->evt_list))) { - ret = -EPERM; - goto out_destroy; - } - - priv.u.rid = r->rid; - priv.u.domid = d->id; - list_for_each_entry(mevt, &r->evt_list, list) { - priv.u.evtid = mevt->evtid; - ret = mon_addfile(kn, mevt->name, priv.priv); - if (ret) - goto out_destroy; - - if (resctrl_is_mbm_event(mevt->evtid)) - mon_event_read(&rr, r, d, prgrp, mevt->evtid, true); - } - kernfs_activate(kn); - return 0; - -out_destroy: - kernfs_remove(kn); - return ret; -} - -/* - * Add all subdirectories of mon_data for "ctrl_mon" groups - * and "monitor" groups with given domain id. - */ -static void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, - struct rdt_domain *d) -{ - struct kernfs_node *parent_kn; - struct rdtgroup *prgrp, *crgrp; - struct list_head *head; - - list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { - parent_kn = prgrp->mon.mon_data_kn; - mkdir_mondata_subdir(parent_kn, d, r, prgrp); - - head = &prgrp->mon.crdtgrp_list; - list_for_each_entry(crgrp, head, mon.crdtgrp_list) { - parent_kn = crgrp->mon.mon_data_kn; - mkdir_mondata_subdir(parent_kn, d, r, crgrp); - } - } -} - -static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn, - struct rdt_resource *r, - struct rdtgroup *prgrp) -{ - struct rdt_domain *dom; - int ret; - - /* Walking r->domains, ensure it can't race with cpuhp */ - lockdep_assert_cpus_held(); - - list_for_each_entry(dom, &r->domains, list) { - ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp); - if (ret) - return ret; - } - - return 0; -} - -/* - * This creates a directory mon_data which contains the monitored data. - * - * mon_data has one directory for each domain which are named - * in the format mon__. For ex: A mon_data - * with L3 domain looks as below: - * ./mon_data: - * mon_L3_00 - * mon_L3_01 - * mon_L3_02 - * ... - * - * Each domain directory has one file per event: - * ./mon_L3_00/: - * llc_occupancy - * - */ -static int mkdir_mondata_all(struct kernfs_node *parent_kn, - struct rdtgroup *prgrp, - struct kernfs_node **dest_kn) -{ - enum resctrl_res_level i; - struct rdt_resource *r; - struct kernfs_node *kn; - int ret; - - /* - * Create the mon_data directory first. - */ - ret = mongroup_create_dir(parent_kn, prgrp, "mon_data", &kn); - if (ret) - return ret; - - if (dest_kn) - *dest_kn = kn; - - /* - * Create the subdirectories for each domain. Note that all events - * in a domain like L3 are grouped into a resource whose domain is L3 - */ - for (i = 0; i < RDT_NUM_RESOURCES; i++) { - r = resctrl_arch_get_resource(i); - if (!r->mon_capable) - continue; - - ret = mkdir_mondata_subdir_alldom(kn, r, prgrp); - if (ret) - goto out_destroy; - } - - return 0; - -out_destroy: - kernfs_remove(kn); - return ret; -} - -/** - * cbm_ensure_valid - Enforce validity on provided CBM - * @_val: Candidate CBM - * @r: RDT resource to which the CBM belongs - * - * The provided CBM represents all cache portions available for use. This - * may be represented by a bitmap that does not consist of contiguous ones - * and thus be an invalid CBM. - * Here the provided CBM is forced to be a valid CBM by only considering - * the first set of contiguous bits as valid and clearing all bits. - * The intention here is to provide a valid default CBM with which a new - * resource group is initialized. The user can follow this with a - * modification to the CBM if the default does not satisfy the - * requirements. - */ -static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r) -{ - unsigned int cbm_len = r->cache.cbm_len; - unsigned long first_bit, zero_bit; - unsigned long val = _val; - - if (!val) - return 0; - - first_bit = find_first_bit(&val, cbm_len); - zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); - - /* Clear any remaining bits to ensure contiguous region */ - bitmap_clear(&val, zero_bit, cbm_len - zero_bit); - return (u32)val; -} - -/* - * Initialize cache resources per RDT domain - * - * Set the RDT domain up to start off with all usable allocations. That is, - * all shareable and unused bits. All-zero CBM is invalid. - */ -static int __init_one_rdt_domain(struct rdt_domain *d, struct resctrl_schema *s, - u32 closid) -{ - enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type); - enum resctrl_conf_type t = s->conf_type; - struct resctrl_staged_config *cfg; - struct rdt_resource *r = s->res; - u32 used_b = 0, unused_b = 0; - unsigned long tmp_cbm; - enum rdtgrp_mode mode; - u32 peer_ctl, ctrl_val; - int i; - - cfg = &d->staged_config[t]; - cfg->have_new_ctrl = false; - cfg->new_ctrl = r->cache.shareable_bits; - used_b = r->cache.shareable_bits; - for (i = 0; i < closids_supported(); i++) { - if (closid_allocated(i) && i != closid) { - mode = rdtgroup_mode_by_closid(i); - if (mode == RDT_MODE_PSEUDO_LOCKSETUP) - /* - * ctrl values for locksetup aren't relevant - * until the schemata is written, and the mode - * becomes RDT_MODE_PSEUDO_LOCKED. - */ - continue; - /* - * If CDP is active include peer domain's - * usage to ensure there is no overlap - * with an exclusive group. - */ - if (resctrl_arch_get_cdp_enabled(r->rid)) - peer_ctl = resctrl_arch_get_config(r, d, i, - peer_type); - else - peer_ctl = 0; - ctrl_val = resctrl_arch_get_config(r, d, i, - s->conf_type); - used_b |= ctrl_val | peer_ctl; - if (mode == RDT_MODE_SHAREABLE) - cfg->new_ctrl |= ctrl_val | peer_ctl; - } - } - if (d->plr && d->plr->cbm > 0) - used_b |= d->plr->cbm; - unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1); - unused_b &= BIT_MASK(r->cache.cbm_len) - 1; - cfg->new_ctrl |= unused_b; - /* - * Force the initial CBM to be valid, user can - * modify the CBM based on system availability. - */ - cfg->new_ctrl = cbm_ensure_valid(cfg->new_ctrl, r); - /* - * Assign the u32 CBM to an unsigned long to ensure that - * bitmap_weight() does not access out-of-bound memory. - */ - tmp_cbm = cfg->new_ctrl; - if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) { - rdt_last_cmd_printf("No space on %s:%d\n", s->name, d->id); - return -ENOSPC; - } - cfg->have_new_ctrl = true; - - return 0; -} - -/* - * Initialize cache resources with default values. - * - * A new RDT group is being created on an allocation capable (CAT) - * supporting system. Set this group up to start off with all usable - * allocations. - * - * If there are no more shareable bits available on any domain then - * the entire allocation will fail. - */ -static int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid) -{ - struct rdt_domain *d; - int ret; - - list_for_each_entry(d, &s->res->domains, list) { - ret = __init_one_rdt_domain(d, s, closid); - if (ret < 0) - return ret; - } - - return 0; -} - -/* Initialize MBA resource with default values. */ -static void rdtgroup_init_mba(struct rdt_resource *r, u32 closid) -{ - struct resctrl_staged_config *cfg; - struct rdt_domain *d; - - list_for_each_entry(d, &r->domains, list) { - if (is_mba_sc(r)) { - d->mbps_val[closid] = MBA_MAX_MBPS; - continue; - } - - cfg = &d->staged_config[CDP_NONE]; - cfg->new_ctrl = r->default_ctrl; - cfg->have_new_ctrl = true; - } -} - -/* Initialize the RDT group's allocations. */ -static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) -{ - struct resctrl_schema *s; - struct rdt_resource *r; - int ret = 0; - - rdt_staged_configs_clear(); - - list_for_each_entry(s, &resctrl_schema_all, list) { - r = s->res; - if (r->rid == RDT_RESOURCE_MBA || - r->rid == RDT_RESOURCE_SMBA) { - rdtgroup_init_mba(r, rdtgrp->closid); - if (is_mba_sc(r)) - continue; - } else { - ret = rdtgroup_init_cat(s, rdtgrp->closid); - if (ret < 0) - goto out; - } - - ret = resctrl_arch_update_domains(r, rdtgrp->closid); - if (ret < 0) { - rdt_last_cmd_puts("Failed to initialize allocations\n"); - goto out; - } - - } - - rdtgrp->mode = RDT_MODE_SHAREABLE; - -out: - rdt_staged_configs_clear(); - return ret; -} - -static int mkdir_rdt_prepare_rmid_alloc(struct rdtgroup *rdtgrp) -{ - int ret; - - if (!resctrl_arch_mon_capable()) - return 0; - - ret = alloc_rmid(rdtgrp->closid); - if (ret < 0) { - rdt_last_cmd_puts("Out of RMIDs\n"); - return ret; - } - rdtgrp->mon.rmid = ret; - - ret = mkdir_mondata_all(rdtgrp->kn, rdtgrp, &rdtgrp->mon.mon_data_kn); - if (ret) { - rdt_last_cmd_puts("kernfs subdir error\n"); - free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); - return ret; - } - - return 0; -} - -static void mkdir_rdt_prepare_rmid_free(struct rdtgroup *rgrp) -{ - if (resctrl_arch_mon_capable()) - free_rmid(rgrp->closid, rgrp->mon.rmid); -} - -static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, - const char *name, umode_t mode, - enum rdt_group_type rtype, struct rdtgroup **r) -{ - struct rdtgroup *prdtgrp, *rdtgrp; - unsigned long files = 0; - struct kernfs_node *kn; - int ret; - - prdtgrp = rdtgroup_kn_lock_live(parent_kn); - if (!prdtgrp) { - ret = -ENODEV; - goto out_unlock; - } - - if (rtype == RDTMON_GROUP && - (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || - prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) { - ret = -EINVAL; - rdt_last_cmd_puts("Pseudo-locking in progress\n"); - goto out_unlock; - } - - /* allocate the rdtgroup. */ - rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL); - if (!rdtgrp) { - ret = -ENOSPC; - rdt_last_cmd_puts("Kernel out of memory\n"); - goto out_unlock; - } - *r = rdtgrp; - rdtgrp->mon.parent = prdtgrp; - rdtgrp->type = rtype; - INIT_LIST_HEAD(&rdtgrp->mon.crdtgrp_list); - - /* kernfs creates the directory for rdtgrp */ - kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp); - if (IS_ERR(kn)) { - ret = PTR_ERR(kn); - rdt_last_cmd_puts("kernfs create error\n"); - goto out_free_rgrp; - } - rdtgrp->kn = kn; - - /* - * kernfs_remove() will drop the reference count on "kn" which - * will free it. But we still need it to stick around for the - * rdtgroup_kn_unlock(kn) call. Take one extra reference here, - * which will be dropped by kernfs_put() in rdtgroup_remove(). - */ - kernfs_get(kn); - - ret = rdtgroup_kn_set_ugid(kn); - if (ret) { - rdt_last_cmd_puts("kernfs perm error\n"); - goto out_destroy; - } - - if (rtype == RDTCTRL_GROUP) { - files = RFTYPE_BASE | RFTYPE_CTRL; - if (resctrl_arch_mon_capable()) - files |= RFTYPE_MON; - } else { - files = RFTYPE_BASE | RFTYPE_MON; - } - - ret = rdtgroup_add_files(kn, files); - if (ret) { - rdt_last_cmd_puts("kernfs fill error\n"); - goto out_destroy; - } - - /* - * The caller unlocks the parent_kn upon success. - */ - return 0; - -out_destroy: - kernfs_put(rdtgrp->kn); - kernfs_remove(rdtgrp->kn); -out_free_rgrp: - kfree(rdtgrp); -out_unlock: - rdtgroup_kn_unlock(parent_kn); - return ret; -} - -static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp) -{ - kernfs_remove(rgrp->kn); - rdtgroup_remove(rgrp); -} - -/* - * Create a monitor group under "mon_groups" directory of a control - * and monitor group(ctrl_mon). This is a resource group - * to monitor a subset of tasks and cpus in its parent ctrl_mon group. - */ -static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn, - const char *name, umode_t mode) -{ - struct rdtgroup *rdtgrp, *prgrp; - int ret; - - ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTMON_GROUP, &rdtgrp); - if (ret) - return ret; - - prgrp = rdtgrp->mon.parent; - rdtgrp->closid = prgrp->closid; - - ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp); - if (ret) { - mkdir_rdt_prepare_clean(rdtgrp); - goto out_unlock; - } - - kernfs_activate(rdtgrp->kn); - - /* - * Add the rdtgrp to the list of rdtgrps the parent - * ctrl_mon group has to track. - */ - list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list); - -out_unlock: - rdtgroup_kn_unlock(parent_kn); - return ret; -} - -/* - * These are rdtgroups created under the root directory. Can be used - * to allocate and monitor resources. - */ -static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, - const char *name, umode_t mode) -{ - struct rdtgroup *rdtgrp; - struct kernfs_node *kn; - u32 closid; - int ret; - - ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTCTRL_GROUP, &rdtgrp); - if (ret) - return ret; - - kn = rdtgrp->kn; - ret = closid_alloc(); - if (ret < 0) { - rdt_last_cmd_puts("Out of CLOSIDs\n"); - goto out_common_fail; - } - closid = ret; - ret = 0; - - rdtgrp->closid = closid; - - ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp); - if (ret) - goto out_closid_free; - - kernfs_activate(rdtgrp->kn); - - ret = rdtgroup_init_alloc(rdtgrp); - if (ret < 0) - goto out_rmid_free; - - list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups); - - if (resctrl_arch_mon_capable()) { - /* - * Create an empty mon_groups directory to hold the subset - * of tasks and cpus to monitor. - */ - ret = mongroup_create_dir(kn, rdtgrp, "mon_groups", NULL); - if (ret) { - rdt_last_cmd_puts("kernfs subdir error\n"); - goto out_del_list; - } - } - - goto out_unlock; - -out_del_list: - list_del(&rdtgrp->rdtgroup_list); -out_rmid_free: - mkdir_rdt_prepare_rmid_free(rdtgrp); -out_closid_free: - closid_free(closid); -out_common_fail: - mkdir_rdt_prepare_clean(rdtgrp); -out_unlock: - rdtgroup_kn_unlock(parent_kn); - return ret; -} - -/* - * We allow creating mon groups only with in a directory called "mon_groups" - * which is present in every ctrl_mon group. Check if this is a valid - * "mon_groups" directory. - * - * 1. The directory should be named "mon_groups". - * 2. The mon group itself should "not" be named "mon_groups". - * This makes sure "mon_groups" directory always has a ctrl_mon group - * as parent. - */ -static bool is_mon_groups(struct kernfs_node *kn, const char *name) -{ - return (!strcmp(kn->name, "mon_groups") && - strcmp(name, "mon_groups")); -} - -static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name, - umode_t mode) -{ - /* Do not accept '\n' to avoid unparsable situation. */ - if (strchr(name, '\n')) - return -EINVAL; - - /* - * If the parent directory is the root directory and RDT - * allocation is supported, add a control and monitoring - * subdirectory - */ - if (resctrl_arch_alloc_capable() && parent_kn == rdtgroup_default.kn) - return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode); - - /* - * If RDT monitoring is supported and the parent directory is a valid - * "mon_groups" directory, add a monitoring subdirectory. - */ - if (resctrl_arch_mon_capable() && is_mon_groups(parent_kn, name)) - return rdtgroup_mkdir_mon(parent_kn, name, mode); - - return -EPERM; -} - -static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) -{ - struct rdtgroup *prdtgrp = rdtgrp->mon.parent; - u32 closid, rmid; - int cpu; - - /* Give any tasks back to the parent group */ - rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask); - - /* Update per cpu rmid of the moved CPUs first */ - closid = rdtgrp->closid; - rmid = prdtgrp->mon.rmid; - for_each_cpu(cpu, &rdtgrp->cpu_mask) - resctrl_arch_set_cpu_default_closid_rmid(cpu, closid, rmid); - - /* - * Update the MSR on moved CPUs and CPUs which have moved - * task running on them. - */ - cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); - update_closid_rmid(tmpmask, NULL); - - rdtgrp->flags = RDT_DELETED; - free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); - - /* - * Remove the rdtgrp from the parent ctrl_mon group's list - */ - WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list)); - list_del(&rdtgrp->mon.crdtgrp_list); - - kernfs_remove(rdtgrp->kn); - - return 0; -} - -static int rdtgroup_ctrl_remove(struct rdtgroup *rdtgrp) -{ - rdtgrp->flags = RDT_DELETED; - list_del(&rdtgrp->rdtgroup_list); - - kernfs_remove(rdtgrp->kn); - return 0; -} - -static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) -{ - u32 closid, rmid; - int cpu; - - /* Give any tasks back to the default group */ - rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask); - - /* Give any CPUs back to the default group */ - cpumask_or(&rdtgroup_default.cpu_mask, - &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); - - /* Update per cpu closid and rmid of the moved CPUs first */ - closid = rdtgroup_default.closid; - rmid = rdtgroup_default.mon.rmid; - for_each_cpu(cpu, &rdtgrp->cpu_mask) - resctrl_arch_set_cpu_default_closid_rmid(cpu, closid, rmid); - - /* - * Update the MSR on moved CPUs and CPUs which have moved - * task running on them. - */ - cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); - update_closid_rmid(tmpmask, NULL); - - free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); - closid_free(rdtgrp->closid); - - rdtgroup_ctrl_remove(rdtgrp); - - /* - * Free all the child monitor group rmids. - */ - free_all_child_rdtgrp(rdtgrp); - - return 0; -} - -static int rdtgroup_rmdir(struct kernfs_node *kn) -{ - struct kernfs_node *parent_kn = kn->parent; - struct rdtgroup *rdtgrp; - cpumask_var_t tmpmask; - int ret = 0; - - if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) - return -ENOMEM; - - rdtgrp = rdtgroup_kn_lock_live(kn); - if (!rdtgrp) { - ret = -EPERM; - goto out; - } - - /* - * If the rdtgroup is a ctrl_mon group and parent directory - * is the root directory, remove the ctrl_mon group. - * - * If the rdtgroup is a mon group and parent directory - * is a valid "mon_groups" directory, remove the mon group. - */ - if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn && - rdtgrp != &rdtgroup_default) { - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || - rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { - ret = rdtgroup_ctrl_remove(rdtgrp); - } else { - ret = rdtgroup_rmdir_ctrl(rdtgrp, tmpmask); - } - } else if (rdtgrp->type == RDTMON_GROUP && - is_mon_groups(parent_kn, kn->name)) { - ret = rdtgroup_rmdir_mon(rdtgrp, tmpmask); - } else { - ret = -EPERM; - } - -out: - rdtgroup_kn_unlock(kn); - free_cpumask_var(tmpmask); - return ret; -} - -/** - * mongrp_reparent() - replace parent CTRL_MON group of a MON group - * @rdtgrp: the MON group whose parent should be replaced - * @new_prdtgrp: replacement parent CTRL_MON group for @rdtgrp - * @cpus: cpumask provided by the caller for use during this call - * - * Replaces the parent CTRL_MON group for a MON group, resulting in all member - * tasks' CLOSID immediately changing to that of the new parent group. - * Monitoring data for the group is unaffected by this operation. - */ -static void mongrp_reparent(struct rdtgroup *rdtgrp, - struct rdtgroup *new_prdtgrp, - cpumask_var_t cpus) -{ - struct rdtgroup *prdtgrp = rdtgrp->mon.parent; - - WARN_ON(rdtgrp->type != RDTMON_GROUP); - WARN_ON(new_prdtgrp->type != RDTCTRL_GROUP); - - /* Nothing to do when simply renaming a MON group. */ - if (prdtgrp == new_prdtgrp) - return; - - WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list)); - list_move_tail(&rdtgrp->mon.crdtgrp_list, - &new_prdtgrp->mon.crdtgrp_list); - - rdtgrp->mon.parent = new_prdtgrp; - rdtgrp->closid = new_prdtgrp->closid; - - /* Propagate updated closid to all tasks in this group. */ - rdt_move_group_tasks(rdtgrp, rdtgrp, cpus); - - update_closid_rmid(cpus, NULL); -} - -static int rdtgroup_rename(struct kernfs_node *kn, - struct kernfs_node *new_parent, const char *new_name) -{ - struct rdtgroup *new_prdtgrp; - struct rdtgroup *rdtgrp; - cpumask_var_t tmpmask; - int ret; - - rdtgrp = kernfs_to_rdtgroup(kn); - new_prdtgrp = kernfs_to_rdtgroup(new_parent); - if (!rdtgrp || !new_prdtgrp) - return -ENOENT; - - /* Release both kernfs active_refs before obtaining rdtgroup mutex. */ - rdtgroup_kn_get(rdtgrp, kn); - rdtgroup_kn_get(new_prdtgrp, new_parent); - - mutex_lock(&rdtgroup_mutex); - - rdt_last_cmd_clear(); - - /* - * Don't allow kernfs_to_rdtgroup() to return a parent rdtgroup if - * either kernfs_node is a file. - */ - if (kernfs_type(kn) != KERNFS_DIR || - kernfs_type(new_parent) != KERNFS_DIR) { - rdt_last_cmd_puts("Source and destination must be directories"); - ret = -EPERM; - goto out; - } - - if ((rdtgrp->flags & RDT_DELETED) || (new_prdtgrp->flags & RDT_DELETED)) { - ret = -ENOENT; - goto out; - } - - if (rdtgrp->type != RDTMON_GROUP || !kn->parent || - !is_mon_groups(kn->parent, kn->name)) { - rdt_last_cmd_puts("Source must be a MON group\n"); - ret = -EPERM; - goto out; - } - - if (!is_mon_groups(new_parent, new_name)) { - rdt_last_cmd_puts("Destination must be a mon_groups subdirectory\n"); - ret = -EPERM; - goto out; - } - - /* - * If the MON group is monitoring CPUs, the CPUs must be assigned to the - * current parent CTRL_MON group and therefore cannot be assigned to - * the new parent, making the move illegal. - */ - if (!cpumask_empty(&rdtgrp->cpu_mask) && - rdtgrp->mon.parent != new_prdtgrp) { - rdt_last_cmd_puts("Cannot move a MON group that monitors CPUs\n"); - ret = -EPERM; - goto out; - } - - /* - * Allocate the cpumask for use in mongrp_reparent() to avoid the - * possibility of failing to allocate it after kernfs_rename() has - * succeeded. - */ - if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) { - ret = -ENOMEM; - goto out; - } - - /* - * Perform all input validation and allocations needed to ensure - * mongrp_reparent() will succeed before calling kernfs_rename(), - * otherwise it would be necessary to revert this call if - * mongrp_reparent() failed. - */ - ret = kernfs_rename(kn, new_parent, new_name); - if (!ret) - mongrp_reparent(rdtgrp, new_prdtgrp, tmpmask); - - free_cpumask_var(tmpmask); - -out: - mutex_unlock(&rdtgroup_mutex); - rdtgroup_kn_put(rdtgrp, kn); - rdtgroup_kn_put(new_prdtgrp, new_parent); - return ret; -} - -static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf) -{ - if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3)) - seq_puts(seq, ",cdp"); - - if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) - seq_puts(seq, ",cdpl2"); - - if (is_mba_sc(resctrl_arch_get_resource(RDT_RESOURCE_MBA))) - seq_puts(seq, ",mba_MBps"); - - if (resctrl_debug) - seq_puts(seq, ",debug"); - - return 0; -} - -static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = { - .mkdir = rdtgroup_mkdir, - .rmdir = rdtgroup_rmdir, - .rename = rdtgroup_rename, - .show_options = rdtgroup_show_options, -}; - -static int rdtgroup_setup_root(struct rdt_fs_context *ctx) -{ - rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops, - KERNFS_ROOT_CREATE_DEACTIVATED | - KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK, - &rdtgroup_default); - if (IS_ERR(rdt_root)) - return PTR_ERR(rdt_root); - - ctx->kfc.root = rdt_root; - rdtgroup_default.kn = kernfs_root_to_node(rdt_root); - - return 0; -} - -static void rdtgroup_destroy_root(void) -{ - kernfs_destroy_root(rdt_root); - rdtgroup_default.kn = NULL; -} - -static void rdtgroup_setup_default(void) -{ - mutex_lock(&rdtgroup_mutex); - - rdtgroup_default.closid = RESCTRL_RESERVED_CLOSID; - rdtgroup_default.mon.rmid = RESCTRL_RESERVED_RMID; - rdtgroup_default.type = RDTCTRL_GROUP; - INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list); - - list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups); - - mutex_unlock(&rdtgroup_mutex); -} - -static void domain_destroy_mon_state(struct rdt_domain *d) -{ - bitmap_free(d->rmid_busy_llc); - kfree(d->mbm_total); - kfree(d->mbm_local); -} - -void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d) -{ - mutex_lock(&rdtgroup_mutex); - - if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) - mba_sc_domain_destroy(r, d); - - if (!r->mon_capable) - goto out_unlock; - - /* - * If resctrl is mounted, remove all the - * per domain monitor data directories. - */ - if (resctrl_mounted && resctrl_arch_mon_capable()) - rmdir_mondata_subdir_allrdtgrp(r, d->id); - - if (resctrl_is_mbm_enabled()) - cancel_delayed_work(&d->mbm_over); - if (resctrl_arch_is_llc_occupancy_enabled() && has_busy_rmid(d)) { - /* - * When a package is going down, forcefully - * decrement rmid->ebusy. There is no way to know - * that the L3 was flushed and hence may lead to - * incorrect counts in rare scenarios, but leaving - * the RMID as busy creates RMID leaks if the - * package never comes back. - */ - __check_limbo(d, true); - cancel_delayed_work(&d->cqm_limbo); - } - - domain_destroy_mon_state(d); - -out_unlock: - mutex_unlock(&rdtgroup_mutex); -} - -static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d) -{ - u32 idx_limit = resctrl_arch_system_num_rmid_idx(); - size_t tsize; - - if (resctrl_arch_is_llc_occupancy_enabled()) { - d->rmid_busy_llc = bitmap_zalloc(idx_limit, GFP_KERNEL); - if (!d->rmid_busy_llc) - return -ENOMEM; - } - if (resctrl_arch_is_mbm_total_enabled()) { - tsize = sizeof(*d->mbm_total); - d->mbm_total = kcalloc(idx_limit, tsize, GFP_KERNEL); - if (!d->mbm_total) { - bitmap_free(d->rmid_busy_llc); - return -ENOMEM; - } - } - if (resctrl_arch_is_mbm_local_enabled()) { - tsize = sizeof(*d->mbm_local); - d->mbm_local = kcalloc(idx_limit, tsize, GFP_KERNEL); - if (!d->mbm_local) { - bitmap_free(d->rmid_busy_llc); - kfree(d->mbm_total); - return -ENOMEM; - } - } - - return 0; -} - -int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d) -{ - int err = 0; - - mutex_lock(&rdtgroup_mutex); - - if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) { - /* RDT_RESOURCE_MBA is never mon_capable */ - err = mba_sc_domain_allocate(r, d); - goto out_unlock; - } - - if (!r->mon_capable) - goto out_unlock; - - err = domain_setup_mon_state(r, d); - if (err) - goto out_unlock; - - if (resctrl_is_mbm_enabled()) { - INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow); - mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL, - RESCTRL_PICK_ANY_CPU); - } - - if (resctrl_arch_is_llc_occupancy_enabled()) - INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo); - - /* - * If the filesystem is not mounted then only the default resource group - * exists. Creation of its directories is deferred until mount time - * by rdt_get_tree() calling mkdir_mondata_all(). - * If resctrl is mounted, add per domain monitor data directories. - */ - if (resctrl_mounted && resctrl_arch_mon_capable()) - mkdir_mondata_subdir_allrdtgrp(r, d); - -out_unlock: - mutex_unlock(&rdtgroup_mutex); - - return err; -} - -void resctrl_online_cpu(unsigned int cpu) -{ - mutex_lock(&rdtgroup_mutex); - /* The CPU is set in default rdtgroup after online. */ - cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask); - mutex_unlock(&rdtgroup_mutex); -} - -static void clear_childcpus(struct rdtgroup *r, unsigned int cpu) -{ - struct rdtgroup *cr; - - list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) { - if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) - break; - } -} - -void resctrl_offline_cpu(unsigned int cpu) -{ - struct rdt_resource *l3 = resctrl_arch_get_resource(RDT_RESOURCE_L3); - struct rdtgroup *rdtgrp; - struct rdt_domain *d; - - mutex_lock(&rdtgroup_mutex); - list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { - if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) { - clear_childcpus(rdtgrp, cpu); - break; - } - } - - if (!l3->mon_capable) - goto out_unlock; - - d = resctrl_get_domain_from_cpu(cpu, l3); - if (d) { - if (resctrl_is_mbm_enabled() && cpu == d->mbm_work_cpu) { - cancel_delayed_work(&d->mbm_over); - mbm_setup_overflow_handler(d, 0, cpu); - } - if (resctrl_arch_is_llc_occupancy_enabled() && - cpu == d->cqm_work_cpu && has_busy_rmid(d)) { - cancel_delayed_work(&d->cqm_limbo); - cqm_setup_limbo_handler(d, 0, cpu); - } - } - -out_unlock: - mutex_unlock(&rdtgroup_mutex); -} - -/* - * resctrl_init - resctrl filesystem initialization - * - * Setup resctrl file system including set up root, create mount point, - * register resctrl filesystem, and initialize files under root directory. - * - * Return: 0 on success or -errno - */ -int resctrl_init(void) -{ - int ret = 0; - - seq_buf_init(&last_cmd_status, last_cmd_status_buf, - sizeof(last_cmd_status_buf)); - - rdtgroup_setup_default(); - - thread_throttle_mode_init(); - - ret = resctrl_mon_resource_init(); - if (ret) - return ret; - - ret = sysfs_create_mount_point(fs_kobj, "resctrl"); - if (ret) - return ret; - - ret = register_filesystem(&rdt_fs_type); - if (ret) - goto cleanup_mountpoint; - - /* - * Adding the resctrl debugfs directory here may not be ideal since - * it would let the resctrl debugfs directory appear on the debugfs - * filesystem before the resctrl filesystem is mounted. - * It may also be ok since that would enable debugging of RDT before - * resctrl is mounted. - * The reason why the debugfs directory is created here and not in - * rdt_get_tree() is because rdt_get_tree() takes rdtgroup_mutex and - * during the debugfs directory creation also &sb->s_type->i_mutex_key - * (the lockdep class of inode->i_rwsem). Other filesystem - * interactions (eg. SyS_getdents) have the lock ordering: - * &sb->s_type->i_mutex_key --> &mm->mmap_lock - * During mmap(), called with &mm->mmap_lock, the rdtgroup_mutex - * is taken, thus creating dependency: - * &mm->mmap_lock --> rdtgroup_mutex for the latter that can cause - * issues considering the other two lock dependencies. - * By creating the debugfs directory here we avoid a dependency - * that may cause deadlock (even though file operations cannot - * occur until the filesystem is mounted, but I do not know how to - * tell lockdep that). - */ - debugfs_resctrl = debugfs_create_dir("resctrl", NULL); - - return 0; - -cleanup_mountpoint: - sysfs_remove_mount_point(fs_kobj, "resctrl"); - - return ret; -} - -void resctrl_exit(void) -{ - debugfs_remove_recursive(debugfs_resctrl); - unregister_filesystem(&rdt_fs_type); - sysfs_remove_mount_point(fs_kobj, "resctrl"); - - resctrl_mon_resource_exit(); + for_each_capable_rdt_resource(r) + reset_all_ctrls(r); } diff --git a/fs/resctrl/ctrlmondata.c b/fs/resctrl/ctrlmondata.c index e69de29bb2d1..f5cdabe2ee9e 100644 --- a/fs/resctrl/ctrlmondata.c +++ b/fs/resctrl/ctrlmondata.c @@ -0,0 +1,532 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Resource Director Technology(RDT) + * - Cache Allocation code. + * + * Copyright (C) 2016 Intel Corporation + * + * Authors: + * Fenghua Yu + * Tony Luck + * + * More information about RDT be found in the Intel (R) x86 Architecture + * Software Developer Manual June 2016, volume 3, section 17.17. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include "internal.h" + +struct rdt_parse_data { + struct rdtgroup *rdtgrp; + char *buf; +}; + +typedef int(ctrlval_parser_t)(struct rdt_parse_data *data, + struct resctrl_schema *s, struct rdt_domain *d); + +/* + * Check whether MBA bandwidth percentage value is correct. The value is + * checked against the minimum and max bandwidth values specified by the + * hardware. The allocated bandwidth percentage is rounded to the next + * control step available on the hardware. + */ +static bool bw_validate(char *buf, u32 *data, struct rdt_resource *r) +{ + int ret; + u32 bw; + + /* + * Only linear delay values is supported for current Intel SKUs. + */ + if (!r->membw.delay_linear && r->membw.arch_needs_linear) { + rdt_last_cmd_puts("No support for non-linear MB domains\n"); + return false; + } + + ret = kstrtou32(buf, 10, &bw); + if (ret) { + rdt_last_cmd_printf("Invalid MB value %s\n", buf); + return false; + } + + /* Nothing else to do if software controller is enabled. */ + if (is_mba_sc(r)) { + *data = bw; + return true; + } + + if (bw < r->membw.min_bw || bw > r->default_ctrl) { + rdt_last_cmd_printf("MB value %u out of range [%d,%d]\n", bw, + r->membw.min_bw, r->default_ctrl); + return false; + } + + *data = roundup(bw, (unsigned long)r->membw.bw_gran); + return true; +} + +static int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s, + struct rdt_domain *d) +{ + struct resctrl_staged_config *cfg; + u32 closid = data->rdtgrp->closid; + struct rdt_resource *r = s->res; + u32 bw_val; + + cfg = &d->staged_config[s->conf_type]; + if (cfg->have_new_ctrl) { + rdt_last_cmd_printf("Duplicate domain %d\n", d->id); + return -EINVAL; + } + + if (!bw_validate(data->buf, &bw_val, r)) + return -EINVAL; + + if (is_mba_sc(r)) { + d->mbps_val[closid] = bw_val; + return 0; + } + + cfg->new_ctrl = bw_val; + cfg->have_new_ctrl = true; + + return 0; +} + +/* + * Check whether a cache bit mask is valid. + * On Intel CPUs, non-contiguous 1s value support is indicated by CPUID: + * - CPUID.0x10.1:ECX[3]: L3 non-contiguous 1s value supported if 1 + * - CPUID.0x10.2:ECX[3]: L2 non-contiguous 1s value supported if 1 + * + * Haswell does not support a non-contiguous 1s value and additionally + * requires at least two bits set. + * AMD allows non-contiguous bitmasks. + */ +static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) +{ + unsigned long first_bit, zero_bit, val; + unsigned int cbm_len = r->cache.cbm_len; + int ret; + + ret = kstrtoul(buf, 16, &val); + if (ret) { + rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf); + return false; + } + + if ((r->cache.min_cbm_bits > 0 && val == 0) || val > r->default_ctrl) { + rdt_last_cmd_puts("Mask out of range\n"); + return false; + } + + first_bit = find_first_bit(&val, cbm_len); + zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); + + /* Are non-contiguous bitmasks allowed? */ + if (!r->cache.arch_has_sparse_bitmasks && + (find_next_bit(&val, cbm_len, zero_bit) < cbm_len)) { + rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", + val); + return false; + } + + if ((zero_bit - first_bit) < r->cache.min_cbm_bits) { + rdt_last_cmd_printf("Need at least %d bits in the mask\n", + r->cache.min_cbm_bits); + return false; + } + + *data = val; + return true; +} + +/* + * Read one cache bit mask (hex). Check that it is valid for the current + * resource type. + */ +static int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s, + struct rdt_domain *d) +{ + struct rdtgroup *rdtgrp = data->rdtgrp; + struct resctrl_staged_config *cfg; + struct rdt_resource *r = s->res; + u32 cbm_val; + + cfg = &d->staged_config[s->conf_type]; + if (cfg->have_new_ctrl) { + rdt_last_cmd_printf("Duplicate domain %d\n", d->id); + return -EINVAL; + } + + /* + * Cannot set up more than one pseudo-locked region in a cache + * hierarchy. + */ + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && + rdtgroup_pseudo_locked_in_hierarchy(d)) { + rdt_last_cmd_puts("Pseudo-locked region in hierarchy\n"); + return -EINVAL; + } + + if (!cbm_validate(data->buf, &cbm_val, r)) + return -EINVAL; + + if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE || + rdtgrp->mode == RDT_MODE_SHAREABLE) && + rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) { + rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n"); + return -EINVAL; + } + + /* + * The CBM may not overlap with the CBM of another closid if + * either is exclusive. + */ + if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, true)) { + rdt_last_cmd_puts("Overlaps with exclusive group\n"); + return -EINVAL; + } + + if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, false)) { + if (rdtgrp->mode == RDT_MODE_EXCLUSIVE || + rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + rdt_last_cmd_puts("Overlaps with other group\n"); + return -EINVAL; + } + } + + cfg->new_ctrl = cbm_val; + cfg->have_new_ctrl = true; + + return 0; +} + +static ctrlval_parser_t *get_parser(struct rdt_resource *res) +{ + if (res->fflags & RFTYPE_RES_CACHE) + return &parse_cbm; + else + return &parse_bw; +} + +/* + * For each domain in this resource we expect to find a series of: + * id=mask + * separated by ";". The "id" is in decimal, and must match one of + * the "id"s for this resource. + */ +static int parse_line(char *line, struct resctrl_schema *s, + struct rdtgroup *rdtgrp) +{ + ctrlval_parser_t *parse_ctrlval = get_parser(s->res); + enum resctrl_conf_type t = s->conf_type; + struct resctrl_staged_config *cfg; + struct rdt_resource *r = s->res; + struct rdt_parse_data data; + char *dom = NULL, *id; + struct rdt_domain *d; + unsigned long dom_id; + + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && + (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA)) { + rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n"); + return -EINVAL; + } + +next: + if (!line || line[0] == '\0') + return 0; + dom = strsep(&line, ";"); + id = strsep(&dom, "="); + if (!dom || kstrtoul(id, 10, &dom_id)) { + rdt_last_cmd_puts("Missing '=' or non-numeric domain\n"); + return -EINVAL; + } + dom = strim(dom); + list_for_each_entry(d, &r->domains, list) { + if (d->id == dom_id) { + data.buf = dom; + data.rdtgrp = rdtgrp; + if (parse_ctrlval(&data, s, d)) + return -EINVAL; + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + cfg = &d->staged_config[t]; + /* + * In pseudo-locking setup mode and just + * parsed a valid CBM that should be + * pseudo-locked. Only one locked region per + * resource group and domain so just do + * the required initialization for single + * region and return. + */ + rdtgrp->plr->s = s; + rdtgrp->plr->d = d; + rdtgrp->plr->cbm = cfg->new_ctrl; + d->plr = rdtgrp->plr; + return 0; + } + goto next; + } + } + return -EINVAL; +} + +static int rdtgroup_parse_resource(char *resname, char *tok, + struct rdtgroup *rdtgrp) +{ + struct resctrl_schema *s; + + list_for_each_entry(s, &resctrl_schema_all, list) { + if (!strcmp(resname, s->name) && rdtgrp->closid < s->num_closid) + return parse_line(tok, s, rdtgrp); + } + rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname); + return -EINVAL; +} + +ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + struct resctrl_schema *s; + struct rdtgroup *rdtgrp; + struct rdt_resource *r; + char *tok, *resname; + int ret = 0; + + /* Valid input requires a trailing newline */ + if (nbytes == 0 || buf[nbytes - 1] != '\n') + return -EINVAL; + buf[nbytes - 1] = '\0'; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!rdtgrp) { + rdtgroup_kn_unlock(of->kn); + return -ENOENT; + } + rdt_last_cmd_clear(); + + /* + * No changes to pseudo-locked region allowed. It has to be removed + * and re-created instead. + */ + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { + ret = -EINVAL; + rdt_last_cmd_puts("Resource group is pseudo-locked\n"); + goto out; + } + + rdt_staged_configs_clear(); + + while ((tok = strsep(&buf, "\n")) != NULL) { + resname = strim(strsep(&tok, ":")); + if (!tok) { + rdt_last_cmd_puts("Missing ':'\n"); + ret = -EINVAL; + goto out; + } + if (tok[0] == '\0') { + rdt_last_cmd_printf("Missing '%s' value\n", resname); + ret = -EINVAL; + goto out; + } + ret = rdtgroup_parse_resource(resname, tok, rdtgrp); + if (ret) + goto out; + } + + list_for_each_entry(s, &resctrl_schema_all, list) { + r = s->res; + + /* + * Writes to mba_sc resources update the software controller, + * not the control MSR. + */ + if (is_mba_sc(r)) + continue; + + ret = resctrl_arch_update_domains(r, rdtgrp->closid); + if (ret) + goto out; + } + + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + /* + * If pseudo-locking fails we keep the resource group in + * mode RDT_MODE_PSEUDO_LOCKSETUP with its class of service + * active and updated for just the domain the pseudo-locked + * region was requested for. + */ + ret = rdtgroup_pseudo_lock_create(rdtgrp); + } + +out: + rdt_staged_configs_clear(); + rdtgroup_kn_unlock(of->kn); + return ret ?: nbytes; +} + +static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid) +{ + struct rdt_resource *r = schema->res; + struct rdt_domain *dom; + bool sep = false; + u32 ctrl_val; + + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + + seq_printf(s, "%*s:", max_name_width, schema->name); + list_for_each_entry(dom, &r->domains, list) { + if (sep) + seq_puts(s, ";"); + + if (is_mba_sc(r)) + ctrl_val = dom->mbps_val[closid]; + else + ctrl_val = resctrl_arch_get_config(r, dom, closid, + schema->conf_type); + + seq_printf(s, r->format_str, dom->id, max_data_width, + ctrl_val); + sep = true; + } + seq_puts(s, "\n"); +} + +int rdtgroup_schemata_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct resctrl_schema *schema; + struct rdtgroup *rdtgrp; + int ret = 0; + u32 closid; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (rdtgrp) { + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + list_for_each_entry(schema, &resctrl_schema_all, list) { + seq_printf(s, "%s:uninitialized\n", schema->name); + } + } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { + if (!rdtgrp->plr->d) { + rdt_last_cmd_clear(); + rdt_last_cmd_puts("Cache domain offline\n"); + ret = -ENODEV; + } else { + seq_printf(s, "%s:%d=%x\n", + rdtgrp->plr->s->res->name, + rdtgrp->plr->d->id, + rdtgrp->plr->cbm); + } + } else { + closid = rdtgrp->closid; + list_for_each_entry(schema, &resctrl_schema_all, list) { + if (closid < schema->num_closid) + show_doms(s, schema, closid); + } + } + } else { + ret = -ENOENT; + } + rdtgroup_kn_unlock(of->kn); + return ret; +} + +static int smp_mon_event_count(void *arg) +{ + mon_event_count(arg); + + return 0; +} + +void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, + struct rdt_domain *d, struct rdtgroup *rdtgrp, + int evtid, int first) +{ + int cpu; + + /* When picking a CPU from cpu_mask, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + + /* + * Setup the parameters to pass to mon_event_count() to read the data. + */ + rr->rgrp = rdtgrp; + rr->evtid = evtid; + rr->r = r; + rr->d = d; + rr->val = 0; + rr->first = first; + rr->arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, evtid); + if (IS_ERR(rr->arch_mon_ctx)) { + rr->err = -EINVAL; + return; + } + + cpu = cpumask_any_housekeeping(&d->cpu_mask, RESCTRL_PICK_ANY_CPU); + + /* + * cpumask_any_housekeeping() prefers housekeeping CPUs, but + * are all the CPUs nohz_full? If yes, pick a CPU to IPI. + * MPAM's resctrl_arch_rmid_read() is unable to read the + * counters on some platforms if its called in IRQ context. + */ + if (tick_nohz_full_cpu(cpu)) + smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1); + else + smp_call_on_cpu(cpu, smp_mon_event_count, rr, false); + + resctrl_arch_mon_ctx_free(r, evtid, rr->arch_mon_ctx); +} + +int rdtgroup_mondata_show(struct seq_file *m, void *arg) +{ + struct kernfs_open_file *of = m->private; + u32 resid, evtid, domid; + struct rdtgroup *rdtgrp; + struct rdt_resource *r; + union mon_data_bits md; + struct rdt_domain *d; + struct rmid_read rr; + int ret = 0; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!rdtgrp) { + ret = -ENOENT; + goto out; + } + + md.priv = of->kn->priv; + resid = md.u.rid; + domid = md.u.domid; + evtid = md.u.evtid; + + r = resctrl_arch_get_resource(resid); + d = resctrl_arch_find_domain(r, domid); + if (IS_ERR_OR_NULL(d)) { + ret = -ENOENT; + goto out; + } + + mon_event_read(&rr, r, d, rdtgrp, evtid, false); + + if (rr.err == -EIO) + seq_puts(m, "Error\n"); + else if (rr.err == -EINVAL) + seq_puts(m, "Unavailable\n"); + else + seq_printf(m, "%llu\n", rr.val); + +out: + rdtgroup_kn_unlock(of->kn); + return ret; +} diff --git a/fs/resctrl/internal.h b/fs/resctrl/internal.h index e69de29bb2d1..f73267762a87 100644 --- a/fs/resctrl/internal.h +++ b/fs/resctrl/internal.h @@ -0,0 +1,340 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FS_RESCTRL_INTERNAL_H +#define _FS_RESCTRL_INTERNAL_H + +#include +#include +#include +#include +#include +#include + +#include + +/** + * cpumask_any_housekeeping() - Choose any CPU in @mask, preferring those that + * aren't marked nohz_full + * @mask: The mask to pick a CPU from. + * @exclude_cpu:The CPU to avoid picking. + * + * Returns a CPU from @mask, but not @exclude_cpu. If there are housekeeping + * CPUs that don't use nohz_full, these are preferred. Pass + * RESCTRL_PICK_ANY_CPU to avoid excluding any CPUs. + * + * When a CPU is excluded, returns >= nr_cpu_ids if no CPUs are available. + */ +static inline unsigned int +cpumask_any_housekeeping(const struct cpumask *mask, int exclude_cpu) +{ + unsigned int cpu, hk_cpu; + + if (exclude_cpu == RESCTRL_PICK_ANY_CPU) + cpu = cpumask_any(mask); + else + cpu = cpumask_any_but(mask, exclude_cpu); + + if (!IS_ENABLED(CONFIG_NO_HZ_FULL)) + return cpu; + + /* If the CPU picked isn't marked nohz_full nothing more needs doing. */ + if (cpu < nr_cpu_ids && !tick_nohz_full_cpu(cpu)) + return cpu; + + /* Try to find a CPU that isn't nohz_full to use in preference */ + hk_cpu = cpumask_nth_andnot(0, mask, tick_nohz_full_mask); + if (hk_cpu == exclude_cpu) + hk_cpu = cpumask_nth_andnot(1, mask, tick_nohz_full_mask); + + if (hk_cpu < nr_cpu_ids) + cpu = hk_cpu; + + return cpu; +} + +struct rdt_fs_context { + struct kernfs_fs_context kfc; + bool enable_cdpl2; + bool enable_cdpl3; + bool enable_mba_mbps; + bool enable_debug; +}; + +static inline struct rdt_fs_context *rdt_fc2context(struct fs_context *fc) +{ + struct kernfs_fs_context *kfc = fc->fs_private; + + return container_of(kfc, struct rdt_fs_context, kfc); +} + +/** + * struct mon_evt - Entry in the event list of a resource + * @evtid: event id + * @name: name of the event + * @configurable: true if the event is configurable + * @list: entry in &rdt_resource->evt_list + */ +struct mon_evt { + enum resctrl_event_id evtid; + char *name; + bool configurable; + struct list_head list; +}; + +/** + * union mon_data_bits - Monitoring details for each event file + * @priv: Used to store monitoring event data in @u + * as kernfs private data + * @rid: Resource id associated with the event file + * @evtid: Event id associated with the event file + * @domid: The domain to which the event file belongs + * @u: Name of the bit fields struct + */ +union mon_data_bits { + void *priv; + struct { + unsigned int rid : 10; + enum resctrl_event_id evtid : 8; + unsigned int domid : 14; + } u; +}; + +struct rmid_read { + struct rdtgroup *rgrp; + struct rdt_resource *r; + struct rdt_domain *d; + enum resctrl_event_id evtid; + bool first; + int err; + u64 val; + void *arch_mon_ctx; +}; + +extern struct list_head resctrl_schema_all; +extern bool resctrl_mounted; + +enum rdt_group_type { + RDTCTRL_GROUP = 0, + RDTMON_GROUP, + RDT_NUM_GROUP, +}; + +/** + * enum rdtgrp_mode - Mode of a RDT resource group + * @RDT_MODE_SHAREABLE: This resource group allows sharing of its allocations + * @RDT_MODE_EXCLUSIVE: No sharing of this resource group's allocations allowed + * @RDT_MODE_PSEUDO_LOCKSETUP: Resource group will be used for Pseudo-Locking + * @RDT_MODE_PSEUDO_LOCKED: No sharing of this resource group's allocations + * allowed AND the allocations are Cache Pseudo-Locked + * @RDT_NUM_MODES: Total number of modes + * + * The mode of a resource group enables control over the allowed overlap + * between allocations associated with different resource groups (classes + * of service). User is able to modify the mode of a resource group by + * writing to the "mode" resctrl file associated with the resource group. + * + * The "shareable", "exclusive", and "pseudo-locksetup" modes are set by + * writing the appropriate text to the "mode" file. A resource group enters + * "pseudo-locked" mode after the schemata is written while the resource + * group is in "pseudo-locksetup" mode. + */ +enum rdtgrp_mode { + RDT_MODE_SHAREABLE = 0, + RDT_MODE_EXCLUSIVE, + RDT_MODE_PSEUDO_LOCKSETUP, + RDT_MODE_PSEUDO_LOCKED, + + /* Must be last */ + RDT_NUM_MODES, +}; + +/** + * struct mongroup - store mon group's data in resctrl fs. + * @mon_data_kn: kernfs node for the mon_data directory + * @parent: parent rdtgrp + * @crdtgrp_list: child rdtgroup node list + * @rmid: rmid for this rdtgroup + */ +struct mongroup { + struct kernfs_node *mon_data_kn; + struct rdtgroup *parent; + struct list_head crdtgrp_list; + u32 rmid; +}; + +/** + * struct rdtgroup - store rdtgroup's data in resctrl file system. + * @kn: kernfs node + * @rdtgroup_list: linked list for all rdtgroups + * @closid: closid for this rdtgroup + * @cpu_mask: CPUs assigned to this rdtgroup + * @flags: status bits + * @waitcount: how many cpus expect to find this + * group when they acquire rdtgroup_mutex + * @type: indicates type of this rdtgroup - either + * monitor only or ctrl_mon group + * @mon: mongroup related data + * @mode: mode of resource group + * @plr: pseudo-locked region + */ +struct rdtgroup { + struct kernfs_node *kn; + struct list_head rdtgroup_list; + u32 closid; + struct cpumask cpu_mask; + int flags; + atomic_t waitcount; + enum rdt_group_type type; + struct mongroup mon; + enum rdtgrp_mode mode; + struct pseudo_lock_region *plr; +}; + +/* List of all resource groups */ +extern struct list_head rdt_all_groups; + +extern int max_name_width, max_data_width; + +/** + * struct rftype - describe each file in the resctrl file system + * @name: File name + * @mode: Access mode + * @kf_ops: File operations + * @flags: File specific RFTYPE_FLAGS_* flags + * @fflags: File specific RFTYPE_* flags + * @seq_show: Show content of the file + * @write: Write to the file + */ +struct rftype { + char *name; + umode_t mode; + const struct kernfs_ops *kf_ops; + unsigned long flags; + unsigned long fflags; + + int (*seq_show)(struct kernfs_open_file *of, + struct seq_file *sf, void *v); + /* + * write() is the generic write callback which maps directly to + * kernfs write operation and overrides all other operations. + * Maximum write size is determined by ->max_write_len. + */ + ssize_t (*write)(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off); +}; + +/** + * struct mbm_state - status for each MBM counter in each domain + * @prev_bw_bytes: Previous bytes value read for bandwidth calculation + * @prev_bw: The most recent bandwidth in MBps + */ +struct mbm_state { + u64 prev_bw_bytes; + u32 prev_bw; +}; + +static inline bool is_mba_sc(struct rdt_resource *r) +{ + if (!r) + r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); + + /* + * The software controller support is only applicable to MBA resource. + * Make sure to check for resource type. + */ + if (r->rid != RDT_RESOURCE_MBA) + return false; + + return r->membw.mba_sc; +} + +extern struct mutex rdtgroup_mutex; +extern struct rdtgroup rdtgroup_default; +extern struct dentry *debugfs_resctrl; + +void rdt_last_cmd_clear(void); +void rdt_last_cmd_puts(const char *s); +__printf(1, 2) +void rdt_last_cmd_printf(const char *fmt, ...); + +struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn); +void rdtgroup_kn_unlock(struct kernfs_node *kn); +int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name); +int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name, + umode_t mask); +ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off); +int rdtgroup_schemata_show(struct kernfs_open_file *of, + struct seq_file *s, void *v); +bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d, + unsigned long cbm, int closid, bool exclusive); +unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d, + unsigned long cbm); +enum rdtgrp_mode rdtgroup_mode_by_closid(int closid); +int rdtgroup_tasks_assigned(struct rdtgroup *r); +int closids_supported(void); +void closid_free(int closid); +int alloc_rmid(u32 closid); +void free_rmid(u32 closid, u32 rmid); +void resctrl_mon_resource_exit(void); +void mon_event_count(void *info); +int rdtgroup_mondata_show(struct seq_file *m, void *arg); +void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, + struct rdt_domain *d, struct rdtgroup *rdtgrp, + int evtid, int first); +int resctrl_mon_resource_init(void); +void mbm_setup_overflow_handler(struct rdt_domain *dom, + unsigned long delay_ms, + int exclude_cpu); +void mbm_handle_overflow(struct work_struct *work); +bool is_mba_sc(struct rdt_resource *r); +void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms, + int exclude_cpu); +void cqm_handle_limbo(struct work_struct *work); +bool has_busy_rmid(struct rdt_domain *d); +void __check_limbo(struct rdt_domain *d, bool force_free); +void mbm_config_rftype_init(const char *config); +void rdt_staged_configs_clear(void); +bool closid_allocated(unsigned int closid); +int resctrl_find_cleanest_closid(void); + +#ifdef CONFIG_RESCTRL_FS_PSEUDO_LOCK +int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp); +int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp); +bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm); +bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d); +int rdt_pseudo_lock_init(void); +void rdt_pseudo_lock_release(void); +int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp); +void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp); +#else +static inline int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) +{ + return -EOPNOTSUPP; +} + +static inline int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp) +{ + return -EOPNOTSUPP; +} + +static inline bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm) +{ + return false; +} + +static inline bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) +{ + return false; +} + +static inline int rdt_pseudo_lock_init(void) { return 0; } +static inline void rdt_pseudo_lock_release(void) { } +static inline int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) +{ + return -EOPNOTSUPP; +} + +static inline void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp) { } +#endif /* CONFIG_RESCTRL_FS_PSEUDO_LOCK */ + +#endif /* _FS_RESCTRL_INTERNAL_H */ diff --git a/fs/resctrl/monitor.c b/fs/resctrl/monitor.c index e69de29bb2d1..06f660dfd929 100644 --- a/fs/resctrl/monitor.c +++ b/fs/resctrl/monitor.c @@ -0,0 +1,843 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Resource Director Technology(RDT) + * - Monitoring code + * + * Copyright (C) 2017 Intel Corporation + * + * Author: + * Vikas Shivappa + * + * This replaces the cqm.c based on perf but we reuse a lot of + * code and datastructures originally from Peter Zijlstra and Matt Fleming. + * + * More information about RDT be found in the Intel (R) x86 Architecture + * Software Developer Manual June 2016, volume 3, section 17.17. + */ + +#include +#include +#include +#include +#include "internal.h" + +/* + * struct rmid_entry - dirty tracking for all RMID. + * @closid: The CLOSID for this entry. + * @rmid: The RMID for this entry. + * @busy: The number of domains with cached data using this RMID. + * @list: Member of the rmid_free_lru list when busy == 0. + * + * Depending on the architecture the correct monitor is accessed using + * both @closid and @rmid, or @rmid only. + * + * Take the rdtgroup_mutex when accessing. + */ +struct rmid_entry { + u32 closid; + u32 rmid; + int busy; + struct list_head list; +}; + +/* + * @rmid_free_lru - A least recently used list of free RMIDs + * These RMIDs are guaranteed to have an occupancy less than the + * threshold occupancy + */ +static LIST_HEAD(rmid_free_lru); + +/* + * @closid_num_dirty_rmid The number of dirty RMID each CLOSID has. + * Only allocated when CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID is defined. + * Indexed by CLOSID. Protected by rdtgroup_mutex. + */ +static u32 *closid_num_dirty_rmid; + +/* + * @rmid_limbo_count - count of currently unused but (potentially) + * dirty RMIDs. + * This counts RMIDs that no one is currently using but that + * may have a occupancy value > resctrl_rmid_realloc_threshold. User can + * change the threshold occupancy value. + */ +static unsigned int rmid_limbo_count; + +/* + * @rmid_entry - The entry in the limbo and free lists. + */ +static struct rmid_entry *rmid_ptrs; + +/* + * This is the threshold cache occupancy in bytes at which we will consider an + * RMID available for re-allocation. + */ +unsigned int resctrl_rmid_realloc_threshold; + +/* + * This is the maximum value for the reallocation threshold, in bytes. + */ +unsigned int resctrl_rmid_realloc_limit; + +/* + * x86 and arm64 differ in their handling of monitoring. + * x86's RMID are independent numbers, there is only one source of traffic + * with an RMID value of '1'. + * arm64's PMG extends the PARTID/CLOSID space, there are multiple sources of + * traffic with a PMG value of '1', one for each CLOSID, meaning the RMID + * value is no longer unique. + * To account for this, resctrl uses an index. On x86 this is just the RMID, + * on arm64 it encodes the CLOSID and RMID. This gives a unique number. + * + * The domain's rmid_busy_llc and rmid_ptrs[] are sized by index. The arch code + * must accept an attempt to read every index. + */ +static inline struct rmid_entry *__rmid_entry(u32 idx) +{ + struct rmid_entry *entry; + u32 closid, rmid; + + entry = &rmid_ptrs[idx]; + resctrl_arch_rmid_idx_decode(idx, &closid, &rmid); + + WARN_ON_ONCE(entry->closid != closid); + WARN_ON_ONCE(entry->rmid != rmid); + + return entry; +} + +static void limbo_release_entry(struct rmid_entry *entry) +{ + lockdep_assert_held(&rdtgroup_mutex); + + rmid_limbo_count--; + list_add_tail(&entry->list, &rmid_free_lru); + + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) + closid_num_dirty_rmid[entry->closid]--; +} + +/* + * Check the RMIDs that are marked as busy for this domain. If the + * reported LLC occupancy is below the threshold clear the busy bit and + * decrement the count. If the busy count gets to zero on an RMID, we + * free the RMID + */ +void __check_limbo(struct rdt_domain *d, bool force_free) +{ + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); + u32 idx_limit = resctrl_arch_system_num_rmid_idx(); + struct rmid_entry *entry; + u32 idx, cur_idx = 1; + void *arch_mon_ctx; + bool rmid_dirty; + u64 val = 0; + + arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, QOS_L3_OCCUP_EVENT_ID); + if (IS_ERR(arch_mon_ctx)) { + pr_warn_ratelimited("Failed to allocate monitor context: %ld", + PTR_ERR(arch_mon_ctx)); + return; + } + + /* + * Skip RMID 0 and start from RMID 1 and check all the RMIDs that + * are marked as busy for occupancy < threshold. If the occupancy + * is less than the threshold decrement the busy counter of the + * RMID and move it to the free list when the counter reaches 0. + */ + for (;;) { + idx = find_next_bit(d->rmid_busy_llc, idx_limit, cur_idx); + if (idx >= idx_limit) + break; + + entry = __rmid_entry(idx); + if (resctrl_arch_rmid_read(r, d, entry->closid, entry->rmid, + QOS_L3_OCCUP_EVENT_ID, &val, + arch_mon_ctx)) { + rmid_dirty = true; + } else { + rmid_dirty = (val >= resctrl_rmid_realloc_threshold); + } + + if (force_free || !rmid_dirty) { + clear_bit(idx, d->rmid_busy_llc); + if (!--entry->busy) + limbo_release_entry(entry); + } + cur_idx = idx + 1; + } + + resctrl_arch_mon_ctx_free(r, QOS_L3_OCCUP_EVENT_ID, arch_mon_ctx); +} + +bool has_busy_rmid(struct rdt_domain *d) +{ + u32 idx_limit = resctrl_arch_system_num_rmid_idx(); + + return find_first_bit(d->rmid_busy_llc, idx_limit) != idx_limit; +} + +static struct rmid_entry *resctrl_find_free_rmid(u32 closid) +{ + struct rmid_entry *itr; + u32 itr_idx, cmp_idx; + + if (list_empty(&rmid_free_lru)) + return rmid_limbo_count ? ERR_PTR(-EBUSY) : ERR_PTR(-ENOSPC); + + list_for_each_entry(itr, &rmid_free_lru, list) { + /* + * Get the index of this free RMID, and the index it would need + * to be if it were used with this CLOSID. + * If the CLOSID is irrelevant on this architecture, the two + * index values are always the same on every entry and thus the + * very first entry will be returned. + */ + itr_idx = resctrl_arch_rmid_idx_encode(itr->closid, itr->rmid); + cmp_idx = resctrl_arch_rmid_idx_encode(closid, itr->rmid); + + if (itr_idx == cmp_idx) + return itr; + } + + return ERR_PTR(-ENOSPC); +} + +/** + * resctrl_find_cleanest_closid() - Find a CLOSID where all the associated + * RMID are clean, or the CLOSID that has + * the most clean RMID. + * + * MPAM's equivalent of RMID are per-CLOSID, meaning a freshly allocated CLOSID + * may not be able to allocate clean RMID. To avoid this the allocator will + * choose the CLOSID with the most clean RMID. + * + * When the CLOSID and RMID are independent numbers, the first free CLOSID will + * be returned. + */ +int resctrl_find_cleanest_closid(void) +{ + u32 cleanest_closid = ~0; + int i = 0; + + lockdep_assert_held(&rdtgroup_mutex); + + if (!IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) + return -EIO; + + for (i = 0; i < closids_supported(); i++) { + int num_dirty; + + if (closid_allocated(i)) + continue; + + num_dirty = closid_num_dirty_rmid[i]; + if (num_dirty == 0) + return i; + + if (cleanest_closid == ~0) + cleanest_closid = i; + + if (num_dirty < closid_num_dirty_rmid[cleanest_closid]) + cleanest_closid = i; + } + + if (cleanest_closid == ~0) + return -ENOSPC; + + return cleanest_closid; +} + +/* + * For MPAM the RMID value is not unique, and has to be considered with + * the CLOSID. The (CLOSID, RMID) pair is allocated on all domains, which + * allows all domains to be managed by a single free list. + * Each domain also has a rmid_busy_llc to reduce the work of the limbo handler. + */ +int alloc_rmid(u32 closid) +{ + struct rmid_entry *entry; + + lockdep_assert_held(&rdtgroup_mutex); + + entry = resctrl_find_free_rmid(closid); + if (IS_ERR(entry)) + return PTR_ERR(entry); + + list_del(&entry->list); + return entry->rmid; +} + +static void add_rmid_to_limbo(struct rmid_entry *entry) +{ + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); + struct rdt_domain *d; + u32 idx; + + lockdep_assert_held(&rdtgroup_mutex); + + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + + idx = resctrl_arch_rmid_idx_encode(entry->closid, entry->rmid); + + entry->busy = 0; + list_for_each_entry(d, &r->domains, list) { + /* + * For the first limbo RMID in the domain, + * setup up the limbo worker. + */ + if (!has_busy_rmid(d)) + cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL, + RESCTRL_PICK_ANY_CPU); + set_bit(idx, d->rmid_busy_llc); + entry->busy++; + } + + rmid_limbo_count++; + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) + closid_num_dirty_rmid[entry->closid]++; +} + +void free_rmid(u32 closid, u32 rmid) +{ + u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid); + struct rmid_entry *entry; + + lockdep_assert_held(&rdtgroup_mutex); + + /* + * Do not allow the default rmid to be free'd. Comparing by index + * allows architectures that ignore the closid parameter to avoid an + * unnecessary check. + */ + if (idx == resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID, + RESCTRL_RESERVED_RMID)) + return; + + entry = __rmid_entry(idx); + + if (resctrl_arch_is_llc_occupancy_enabled()) + add_rmid_to_limbo(entry); + else + list_add_tail(&entry->list, &rmid_free_lru); +} + +static struct mbm_state *get_mbm_state(struct rdt_domain *d, u32 closid, + u32 rmid, enum resctrl_event_id evtid) +{ + u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid); + + switch (evtid) { + case QOS_L3_MBM_TOTAL_EVENT_ID: + return &d->mbm_total[idx]; + case QOS_L3_MBM_LOCAL_EVENT_ID: + return &d->mbm_local[idx]; + default: + return NULL; + } +} + +static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr) +{ + struct mbm_state *m; + u64 tval = 0; + + if (rr->first) { + resctrl_arch_reset_rmid(rr->r, rr->d, closid, rmid, rr->evtid); + m = get_mbm_state(rr->d, closid, rmid, rr->evtid); + if (m) + memset(m, 0, sizeof(struct mbm_state)); + return 0; + } + + rr->err = resctrl_arch_rmid_read(rr->r, rr->d, closid, rmid, rr->evtid, + &tval, rr->arch_mon_ctx); + if (rr->err) + return rr->err; + + rr->val += tval; + + return 0; +} + +/* + * mbm_bw_count() - Update bw count from values previously read by + * __mon_event_count(). + * @closid: The closid used to identify the cached mbm_state. + * @rmid: The rmid used to identify the cached mbm_state. + * @rr: The struct rmid_read populated by __mon_event_count(). + * + * Supporting function to calculate the memory bandwidth + * and delta bandwidth in MBps. The chunks value previously read by + * __mon_event_count() is compared with the chunks value from the previous + * invocation. This must be called once per second to maintain values in MBps. + */ +static void mbm_bw_count(u32 closid, u32 rmid, struct rmid_read *rr) +{ + u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid); + struct mbm_state *m = &rr->d->mbm_local[idx]; + u64 cur_bw, bytes, cur_bytes; + + cur_bytes = rr->val; + bytes = cur_bytes - m->prev_bw_bytes; + m->prev_bw_bytes = cur_bytes; + + cur_bw = bytes / SZ_1M; + + m->prev_bw = cur_bw; +} + +/* + * This is scheduled by mon_event_read() to read the CQM/MBM counters + * on a domain. + */ +void mon_event_count(void *info) +{ + struct rdtgroup *rdtgrp, *entry; + struct rmid_read *rr = info; + struct list_head *head; + int ret; + + rdtgrp = rr->rgrp; + + ret = __mon_event_count(rdtgrp->closid, rdtgrp->mon.rmid, rr); + + /* + * For Ctrl groups read data from child monitor groups and + * add them together. Count events which are read successfully. + * Discard the rmid_read's reporting errors. + */ + head = &rdtgrp->mon.crdtgrp_list; + + if (rdtgrp->type == RDTCTRL_GROUP) { + list_for_each_entry(entry, head, mon.crdtgrp_list) { + if (__mon_event_count(entry->closid, entry->mon.rmid, + rr) == 0) + ret = 0; + } + } + + /* + * __mon_event_count() calls for newly created monitor groups may + * report -EINVAL/Unavailable if the monitor hasn't seen any traffic. + * Discard error if any of the monitor event reads succeeded. + */ + if (ret == 0) + rr->err = 0; +} + +/* + * Feedback loop for MBA software controller (mba_sc) + * + * mba_sc is a feedback loop where we periodically read MBM counters and + * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so + * that: + * + * current bandwidth(cur_bw) < user specified bandwidth(user_bw) + * + * This uses the MBM counters to measure the bandwidth and MBA throttle + * MSRs to control the bandwidth for a particular rdtgrp. It builds on the + * fact that resctrl rdtgroups have both monitoring and control. + * + * The frequency of the checks is 1s and we just tag along the MBM overflow + * timer. Having 1s interval makes the calculation of bandwidth simpler. + * + * Although MBA's goal is to restrict the bandwidth to a maximum, there may + * be a need to increase the bandwidth to avoid unnecessarily restricting + * the L2 <-> L3 traffic. + * + * Since MBA controls the L2 external bandwidth where as MBM measures the + * L3 external bandwidth the following sequence could lead to such a + * situation. + * + * Consider an rdtgroup which had high L3 <-> memory traffic in initial + * phases -> mba_sc kicks in and reduced bandwidth percentage values -> but + * after some time rdtgroup has mostly L2 <-> L3 traffic. + * + * In this case we may restrict the rdtgroup's L2 <-> L3 traffic as its + * throttle MSRs already have low percentage values. To avoid + * unnecessarily restricting such rdtgroups, we also increase the bandwidth. + */ +static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm) +{ + u32 closid, rmid, cur_msr_val, new_msr_val; + struct mbm_state *pmbm_data, *cmbm_data; + struct rdt_resource *r_mba; + struct rdt_domain *dom_mba; + u32 cur_bw, user_bw, idx; + struct list_head *head; + struct rdtgroup *entry; + + if (!resctrl_arch_is_mbm_local_enabled()) + return; + + r_mba = resctrl_arch_get_resource(RDT_RESOURCE_MBA); + + closid = rgrp->closid; + rmid = rgrp->mon.rmid; + idx = resctrl_arch_rmid_idx_encode(closid, rmid); + pmbm_data = &dom_mbm->mbm_local[idx]; + + dom_mba = resctrl_get_domain_from_cpu(smp_processor_id(), r_mba); + if (!dom_mba) { + pr_warn_once("Failure to get domain for MBA update\n"); + return; + } + + cur_bw = pmbm_data->prev_bw; + user_bw = dom_mba->mbps_val[closid]; + + /* MBA resource doesn't support CDP */ + cur_msr_val = resctrl_arch_get_config(r_mba, dom_mba, closid, CDP_NONE); + + /* + * For Ctrl groups read data from child monitor groups. + */ + head = &rgrp->mon.crdtgrp_list; + list_for_each_entry(entry, head, mon.crdtgrp_list) { + cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid]; + cur_bw += cmbm_data->prev_bw; + } + + /* + * Scale up/down the bandwidth linearly for the ctrl group. The + * bandwidth step is the bandwidth granularity specified by the + * hardware. + * Always increase throttling if current bandwidth is above the + * target set by user. + * But avoid thrashing up and down on every poll by checking + * whether a decrease in throttling is likely to push the group + * back over target. E.g. if currently throttling to 30% of bandwidth + * on a system with 10% granularity steps, check whether moving to + * 40% would go past the limit by multiplying current bandwidth by + * "(30 + 10) / 30". + */ + if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) { + new_msr_val = cur_msr_val - r_mba->membw.bw_gran; + } else if (cur_msr_val < MAX_MBA_BW && + (user_bw > (cur_bw * (cur_msr_val + r_mba->membw.min_bw) / cur_msr_val))) { + new_msr_val = cur_msr_val + r_mba->membw.bw_gran; + } else { + return; + } + + resctrl_arch_update_one(r_mba, dom_mba, closid, CDP_NONE, new_msr_val); +} + +static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, + u32 closid, u32 rmid) +{ + struct rmid_read rr; + + rr.first = false; + rr.r = r; + rr.d = d; + + /* + * This is protected from concurrent reads from user + * as both the user and we hold the global mutex. + */ + if (resctrl_arch_is_mbm_total_enabled()) { + rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID; + rr.val = 0; + rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid); + if (IS_ERR(rr.arch_mon_ctx)) { + pr_warn_ratelimited("Failed to allocate monitor context: %ld", + PTR_ERR(rr.arch_mon_ctx)); + return; + } + + __mon_event_count(closid, rmid, &rr); + + resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx); + } + if (resctrl_arch_is_mbm_local_enabled()) { + rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID; + rr.val = 0; + rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid); + if (IS_ERR(rr.arch_mon_ctx)) { + pr_warn_ratelimited("Failed to allocate monitor context: %ld", + PTR_ERR(rr.arch_mon_ctx)); + return; + } + + __mon_event_count(closid, rmid, &rr); + + /* + * Call the MBA software controller only for the + * control groups and when user has enabled + * the software controller explicitly. + */ + if (is_mba_sc(NULL)) + mbm_bw_count(closid, rmid, &rr); + + resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx); + } +} + +/* + * Handler to scan the limbo list and move the RMIDs + * to free list whose occupancy < threshold_occupancy. + */ +void cqm_handle_limbo(struct work_struct *work) +{ + unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL); + struct rdt_domain *d; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + d = container_of(work, struct rdt_domain, cqm_limbo.work); + + __check_limbo(d, false); + + if (has_busy_rmid(d)) { + d->cqm_work_cpu = cpumask_any_housekeeping(&d->cpu_mask, + RESCTRL_PICK_ANY_CPU); + schedule_delayed_work_on(d->cqm_work_cpu, &d->cqm_limbo, + delay); + } + + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); +} + +/** + * cqm_setup_limbo_handler() - Schedule the limbo handler to run for this + * domain. + * @dom: The domain the limbo handler should run for. + * @delay_ms: How far in the future the handler should run. + * @exclude_cpu: Which CPU the handler should not run on, + * RESCTRL_PICK_ANY_CPU to pick any CPU. + */ +void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms, + int exclude_cpu) +{ + unsigned long delay = msecs_to_jiffies(delay_ms); + int cpu; + + cpu = cpumask_any_housekeeping(&dom->cpu_mask, exclude_cpu); + dom->cqm_work_cpu = cpu; + + if (cpu < nr_cpu_ids) + schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay); +} + +void mbm_handle_overflow(struct work_struct *work) +{ + unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL); + struct rdtgroup *prgrp, *crgrp; + struct list_head *head; + struct rdt_resource *r; + struct rdt_domain *d; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + /* + * If the filesystem has been unmounted this work no longer needs to + * run. + */ + if (!resctrl_mounted || !resctrl_arch_mon_capable()) + goto out_unlock; + + r = resctrl_arch_get_resource(RDT_RESOURCE_L3); + d = container_of(work, struct rdt_domain, mbm_over.work); + + list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { + mbm_update(r, d, prgrp->closid, prgrp->mon.rmid); + + head = &prgrp->mon.crdtgrp_list; + list_for_each_entry(crgrp, head, mon.crdtgrp_list) + mbm_update(r, d, crgrp->closid, crgrp->mon.rmid); + + if (is_mba_sc(NULL)) + update_mba_bw(prgrp, d); + } + + /* + * Re-check for housekeeping CPUs. This allows the overflow handler to + * move off a nohz_full CPU quickly. + */ + d->mbm_work_cpu = cpumask_any_housekeeping(&d->cpu_mask, + RESCTRL_PICK_ANY_CPU); + schedule_delayed_work_on(d->mbm_work_cpu, &d->mbm_over, delay); + +out_unlock: + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); +} + +/** + * mbm_setup_overflow_handler() - Schedule the overflow handler to run for this + * domain. + * @dom: The domain the overflow handler should run for. + * @delay_ms: How far in the future the handler should run. + * @exclude_cpu: Which CPU the handler should not run on, + * RESCTRL_PICK_ANY_CPU to pick any CPU. + */ +void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms, + int exclude_cpu) +{ + unsigned long delay = msecs_to_jiffies(delay_ms); + int cpu; + + /* + * When a domain comes online there is no guarantee the filesystem is + * mounted. If not, there is no need to catch counter overflow. + */ + if (!resctrl_mounted || !resctrl_arch_mon_capable()) + return; + cpu = cpumask_any_housekeeping(&dom->cpu_mask, exclude_cpu); + dom->mbm_work_cpu = cpu; + + if (cpu < nr_cpu_ids) + schedule_delayed_work_on(cpu, &dom->mbm_over, delay); +} + +static int dom_data_init(struct rdt_resource *r) +{ + u32 idx_limit = resctrl_arch_system_num_rmid_idx(); + u32 num_closid = resctrl_arch_get_num_closid(r); + struct rmid_entry *entry = NULL; + int err = 0, i; + u32 idx; + + mutex_lock(&rdtgroup_mutex); + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) { + u32 *tmp; + + /* + * If the architecture hasn't provided a sanitised value here, + * this may result in larger arrays than necessary. Resctrl will + * use a smaller system wide value based on the resources in + * use. + */ + tmp = kcalloc(num_closid, sizeof(*tmp), GFP_KERNEL); + if (!tmp) { + err = -ENOMEM; + goto out_unlock; + } + + closid_num_dirty_rmid = tmp; + } + + rmid_ptrs = kcalloc(idx_limit, sizeof(struct rmid_entry), GFP_KERNEL); + if (!rmid_ptrs) { + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) { + kfree(closid_num_dirty_rmid); + closid_num_dirty_rmid = NULL; + } + err = -ENOMEM; + goto out_unlock; + } + + for (i = 0; i < idx_limit; i++) { + entry = &rmid_ptrs[i]; + INIT_LIST_HEAD(&entry->list); + + resctrl_arch_rmid_idx_decode(i, &entry->closid, &entry->rmid); + list_add_tail(&entry->list, &rmid_free_lru); + } + + /* + * RESCTRL_RESERVED_CLOSID and RESCTRL_RESERVED_RMID are special and + * are always allocated. These are used for the rdtgroup_default + * control group, which will be setup later in rdtgroup_init(). + */ + idx = resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID, + RESCTRL_RESERVED_RMID); + entry = __rmid_entry(idx); + list_del(&entry->list); + +out_unlock: + mutex_unlock(&rdtgroup_mutex); + + return err; +} + +static void dom_data_exit(struct rdt_resource *r) +{ + if (!r->mon_capable) + return; + + mutex_lock(&rdtgroup_mutex); + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) { + kfree(closid_num_dirty_rmid); + closid_num_dirty_rmid = NULL; + } + + kfree(rmid_ptrs); + rmid_ptrs = NULL; + + mutex_unlock(&rdtgroup_mutex); +} + +static struct mon_evt llc_occupancy_event = { + .name = "llc_occupancy", + .evtid = QOS_L3_OCCUP_EVENT_ID, +}; + +static struct mon_evt mbm_total_event = { + .name = "mbm_total_bytes", + .evtid = QOS_L3_MBM_TOTAL_EVENT_ID, +}; + +static struct mon_evt mbm_local_event = { + .name = "mbm_local_bytes", + .evtid = QOS_L3_MBM_LOCAL_EVENT_ID, +}; + +/* + * Initialize the event list for the resource. + * + * Note that MBM events are also part of RDT_RESOURCE_L3 resource + * because as per the SDM the total and local memory bandwidth + * are enumerated as part of L3 monitoring. + */ +static void l3_mon_evt_init(struct rdt_resource *r) +{ + INIT_LIST_HEAD(&r->evt_list); + + if (resctrl_arch_is_llc_occupancy_enabled()) + list_add_tail(&llc_occupancy_event.list, &r->evt_list); + if (resctrl_arch_is_mbm_total_enabled()) + list_add_tail(&mbm_total_event.list, &r->evt_list); + if (resctrl_arch_is_mbm_local_enabled()) + list_add_tail(&mbm_local_event.list, &r->evt_list); +} + +int resctrl_mon_resource_init(void) +{ + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); + int ret; + + if (!r->mon_capable) + return 0; + + ret = dom_data_init(r); + if (ret) + return ret; + + l3_mon_evt_init(r); + + if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_TOTAL_EVENT_ID)) { + mbm_total_event.configurable = true; + mbm_config_rftype_init("mbm_total_bytes_config"); + } + if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_LOCAL_EVENT_ID)) { + mbm_local_event.configurable = true; + mbm_config_rftype_init("mbm_local_bytes_config"); + } + + return 0; +} + +void resctrl_mon_resource_exit(void) +{ + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); + + dom_data_exit(r); +} diff --git a/fs/resctrl/psuedo_lock.c b/fs/resctrl/psuedo_lock.c index e69de29bb2d1..077c2abb6edd 100644 --- a/fs/resctrl/psuedo_lock.c +++ b/fs/resctrl/psuedo_lock.c @@ -0,0 +1,1122 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Resource Director Technology (RDT) + * + * Pseudo-locking support built on top of Cache Allocation Technology (CAT) + * + * Copyright (C) 2018 Intel Corporation + * + * Author: Reinette Chatre + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "internal.h" + +/* + * Major number assigned to and shared by all devices exposing + * pseudo-locked regions. + */ +static unsigned int pseudo_lock_major; +static unsigned long pseudo_lock_minor_avail = GENMASK(MINORBITS, 0); + +static char *pseudo_lock_devnode(const struct device *dev, umode_t *mode) +{ + const struct rdtgroup *rdtgrp; + + rdtgrp = dev_get_drvdata(dev); + if (mode) + *mode = 0600; + return kasprintf(GFP_KERNEL, "pseudo_lock/%s", rdtgrp->kn->name); +} + +static const struct class pseudo_lock_class = { + .name = "pseudo_lock", + .devnode = pseudo_lock_devnode, +}; + +/** + * pseudo_lock_minor_get - Obtain available minor number + * @minor: Pointer to where new minor number will be stored + * + * A bitmask is used to track available minor numbers. Here the next free + * minor number is marked as unavailable and returned. + * + * Return: 0 on success, <0 on failure. + */ +static int pseudo_lock_minor_get(unsigned int *minor) +{ + unsigned long first_bit; + + first_bit = find_first_bit(&pseudo_lock_minor_avail, MINORBITS); + + if (first_bit == MINORBITS) + return -ENOSPC; + + __clear_bit(first_bit, &pseudo_lock_minor_avail); + *minor = first_bit; + + return 0; +} + +/** + * pseudo_lock_minor_release - Return minor number to available + * @minor: The minor number made available + */ +static void pseudo_lock_minor_release(unsigned int minor) +{ + __set_bit(minor, &pseudo_lock_minor_avail); +} + +/** + * region_find_by_minor - Locate a pseudo-lock region by inode minor number + * @minor: The minor number of the device representing pseudo-locked region + * + * When the character device is accessed we need to determine which + * pseudo-locked region it belongs to. This is done by matching the minor + * number of the device to the pseudo-locked region it belongs. + * + * Minor numbers are assigned at the time a pseudo-locked region is associated + * with a cache instance. + * + * Return: On success return pointer to resource group owning the pseudo-locked + * region, NULL on failure. + */ +static struct rdtgroup *region_find_by_minor(unsigned int minor) +{ + struct rdtgroup *rdtgrp, *rdtgrp_match = NULL; + + list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { + if (rdtgrp->plr && rdtgrp->plr->minor == minor) { + rdtgrp_match = rdtgrp; + break; + } + } + return rdtgrp_match; +} + +/** + * struct pseudo_lock_pm_req - A power management QoS request list entry + * @list: Entry within the @pm_reqs list for a pseudo-locked region + * @req: PM QoS request + */ +struct pseudo_lock_pm_req { + struct list_head list; + struct dev_pm_qos_request req; +}; + +static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr) +{ + struct pseudo_lock_pm_req *pm_req, *next; + + list_for_each_entry_safe(pm_req, next, &plr->pm_reqs, list) { + dev_pm_qos_remove_request(&pm_req->req); + list_del(&pm_req->list); + kfree(pm_req); + } +} + +/** + * pseudo_lock_cstates_constrain - Restrict cores from entering C6 + * @plr: Pseudo-locked region + * + * To prevent the cache from being affected by power management entering + * C6 has to be avoided. This is accomplished by requesting a latency + * requirement lower than lowest C6 exit latency of all supported + * platforms as found in the cpuidle state tables in the intel_idle driver. + * At this time it is possible to do so with a single latency requirement + * for all supported platforms. + * + * Since Goldmont is supported, which is affected by X86_BUG_MONITOR, + * the ACPI latencies need to be considered while keeping in mind that C2 + * may be set to map to deeper sleep states. In this case the latency + * requirement needs to prevent entering C2 also. + * + * Return: 0 on success, <0 on failure + */ +static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr) +{ + struct pseudo_lock_pm_req *pm_req; + int cpu; + int ret; + + for_each_cpu(cpu, &plr->d->cpu_mask) { + pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL); + if (!pm_req) { + rdt_last_cmd_puts("Failure to allocate memory for PM QoS\n"); + ret = -ENOMEM; + goto out_err; + } + ret = dev_pm_qos_add_request(get_cpu_device(cpu), + &pm_req->req, + DEV_PM_QOS_RESUME_LATENCY, + 30); + if (ret < 0) { + rdt_last_cmd_printf("Failed to add latency req CPU%d\n", + cpu); + kfree(pm_req); + ret = -1; + goto out_err; + } + list_add(&pm_req->list, &plr->pm_reqs); + } + + return 0; + +out_err: + pseudo_lock_cstates_relax(plr); + return ret; +} + +/** + * pseudo_lock_region_clear - Reset pseudo-lock region data + * @plr: pseudo-lock region + * + * All content of the pseudo-locked region is reset - any memory allocated + * freed. + * + * Return: void + */ +static void pseudo_lock_region_clear(struct pseudo_lock_region *plr) +{ + plr->size = 0; + plr->line_size = 0; + kfree(plr->kmem); + plr->kmem = NULL; + plr->s = NULL; + if (plr->d) + plr->d->plr = NULL; + plr->d = NULL; + plr->cbm = 0; + plr->debugfs_dir = NULL; +} + +/** + * pseudo_lock_region_init - Initialize pseudo-lock region information + * @plr: pseudo-lock region + * + * Called after user provided a schemata to be pseudo-locked. From the + * schemata the &struct pseudo_lock_region is on entry already initialized + * with the resource, domain, and capacity bitmask. Here the information + * required for pseudo-locking is deduced from this data and &struct + * pseudo_lock_region initialized further. This information includes: + * - size in bytes of the region to be pseudo-locked + * - cache line size to know the stride with which data needs to be accessed + * to be pseudo-locked + * - a cpu associated with the cache instance on which the pseudo-locking + * flow can be executed + * + * Return: 0 on success, <0 on failure. Descriptive error will be written + * to last_cmd_status buffer. + */ +static int pseudo_lock_region_init(struct pseudo_lock_region *plr) +{ + struct cpu_cacheinfo *ci; + int ret; + int i; + + /* Pick the first cpu we find that is associated with the cache. */ + plr->cpu = cpumask_first(&plr->d->cpu_mask); + + if (!cpu_online(plr->cpu)) { + rdt_last_cmd_printf("CPU %u associated with cache not online\n", + plr->cpu); + ret = -ENODEV; + goto out_region; + } + + ci = get_cpu_cacheinfo(plr->cpu); + + plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm); + + for (i = 0; i < ci->num_leaves; i++) { + if (ci->info_list[i].level == plr->s->res->cache_level) { + plr->line_size = ci->info_list[i].coherency_line_size; + return 0; + } + } + + ret = -1; + rdt_last_cmd_puts("Unable to determine cache line size\n"); +out_region: + pseudo_lock_region_clear(plr); + return ret; +} + +/** + * pseudo_lock_init - Initialize a pseudo-lock region + * @rdtgrp: resource group to which new pseudo-locked region will belong + * + * A pseudo-locked region is associated with a resource group. When this + * association is created the pseudo-locked region is initialized. The + * details of the pseudo-locked region are not known at this time so only + * allocation is done and association established. + * + * Return: 0 on success, <0 on failure + */ +static int pseudo_lock_init(struct rdtgroup *rdtgrp) +{ + struct pseudo_lock_region *plr; + + plr = kzalloc(sizeof(*plr), GFP_KERNEL); + if (!plr) + return -ENOMEM; + + init_waitqueue_head(&plr->lock_thread_wq); + INIT_LIST_HEAD(&plr->pm_reqs); + rdtgrp->plr = plr; + return 0; +} + +/** + * pseudo_lock_region_alloc - Allocate kernel memory that will be pseudo-locked + * @plr: pseudo-lock region + * + * Initialize the details required to set up the pseudo-locked region and + * allocate the contiguous memory that will be pseudo-locked to the cache. + * + * Return: 0 on success, <0 on failure. Descriptive error will be written + * to last_cmd_status buffer. + */ +static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr) +{ + int ret; + + ret = pseudo_lock_region_init(plr); + if (ret < 0) + return ret; + + /* + * We do not yet support contiguous regions larger than + * KMALLOC_MAX_SIZE. + */ + if (plr->size > KMALLOC_MAX_SIZE) { + rdt_last_cmd_puts("Requested region exceeds maximum size\n"); + ret = -E2BIG; + goto out_region; + } + + plr->kmem = kzalloc(plr->size, GFP_KERNEL); + if (!plr->kmem) { + rdt_last_cmd_puts("Unable to allocate memory\n"); + ret = -ENOMEM; + goto out_region; + } + + ret = 0; + goto out; +out_region: + pseudo_lock_region_clear(plr); +out: + return ret; +} + +/** + * pseudo_lock_free - Free a pseudo-locked region + * @rdtgrp: resource group to which pseudo-locked region belonged + * + * The pseudo-locked region's resources have already been released, or not + * yet created at this point. Now it can be freed and disassociated from the + * resource group. + * + * Return: void + */ +static void pseudo_lock_free(struct rdtgroup *rdtgrp) +{ + pseudo_lock_region_clear(rdtgrp->plr); + kfree(rdtgrp->plr); + rdtgrp->plr = NULL; +} + +/** + * rdtgroup_monitor_in_progress - Test if monitoring in progress + * @rdtgrp: resource group being queried + * + * Return: 1 if monitor groups have been created for this resource + * group, 0 otherwise. + */ +static int rdtgroup_monitor_in_progress(struct rdtgroup *rdtgrp) +{ + return !list_empty(&rdtgrp->mon.crdtgrp_list); +} + +/** + * rdtgroup_locksetup_user_restrict - Restrict user access to group + * @rdtgrp: resource group needing access restricted + * + * A resource group used for cache pseudo-locking cannot have cpus or tasks + * assigned to it. This is communicated to the user by restricting access + * to all the files that can be used to make such changes. + * + * Permissions restored with rdtgroup_locksetup_user_restore() + * + * Return: 0 on success, <0 on failure. If a failure occurs during the + * restriction of access an attempt will be made to restore permissions but + * the state of the mode of these files will be uncertain when a failure + * occurs. + */ +static int rdtgroup_locksetup_user_restrict(struct rdtgroup *rdtgrp) +{ + int ret; + + ret = rdtgroup_kn_mode_restrict(rdtgrp, "tasks"); + if (ret) + return ret; + + ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus"); + if (ret) + goto err_tasks; + + ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list"); + if (ret) + goto err_cpus; + + if (resctrl_arch_mon_capable()) { + ret = rdtgroup_kn_mode_restrict(rdtgrp, "mon_groups"); + if (ret) + goto err_cpus_list; + } + + ret = 0; + goto out; + +err_cpus_list: + rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777); +err_cpus: + rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777); +err_tasks: + rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777); +out: + return ret; +} + +/** + * rdtgroup_locksetup_user_restore - Restore user access to group + * @rdtgrp: resource group needing access restored + * + * Restore all file access previously removed using + * rdtgroup_locksetup_user_restrict() + * + * Return: 0 on success, <0 on failure. If a failure occurs during the + * restoration of access an attempt will be made to restrict permissions + * again but the state of the mode of these files will be uncertain when + * a failure occurs. + */ +static int rdtgroup_locksetup_user_restore(struct rdtgroup *rdtgrp) +{ + int ret; + + ret = rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777); + if (ret) + return ret; + + ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777); + if (ret) + goto err_tasks; + + ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777); + if (ret) + goto err_cpus; + + if (resctrl_arch_mon_capable()) { + ret = rdtgroup_kn_mode_restore(rdtgrp, "mon_groups", 0777); + if (ret) + goto err_cpus_list; + } + + ret = 0; + goto out; + +err_cpus_list: + rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list"); +err_cpus: + rdtgroup_kn_mode_restrict(rdtgrp, "cpus"); +err_tasks: + rdtgroup_kn_mode_restrict(rdtgrp, "tasks"); +out: + return ret; +} + +/** + * rdtgroup_locksetup_enter - Resource group enters locksetup mode + * @rdtgrp: resource group requested to enter locksetup mode + * + * A resource group enters locksetup mode to reflect that it would be used + * to represent a pseudo-locked region and is in the process of being set + * up to do so. A resource group used for a pseudo-locked region would + * lose the closid associated with it so we cannot allow it to have any + * tasks or cpus assigned nor permit tasks or cpus to be assigned in the + * future. Monitoring of a pseudo-locked region is not allowed either. + * + * The above and more restrictions on a pseudo-locked region are checked + * for and enforced before the resource group enters the locksetup mode. + * + * Returns: 0 if the resource group successfully entered locksetup mode, <0 + * on failure. On failure the last_cmd_status buffer is updated with text to + * communicate details of failure to the user. + */ +int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) +{ + int ret; + + /* + * The default resource group can neither be removed nor lose the + * default closid associated with it. + */ + if (rdtgrp == &rdtgroup_default) { + rdt_last_cmd_puts("Cannot pseudo-lock default group\n"); + return -EINVAL; + } + + /* + * Cache Pseudo-locking not supported when CDP is enabled. + * + * Some things to consider if you would like to enable this + * support (using L3 CDP as example): + * - When CDP is enabled two separate resources are exposed, + * L3DATA and L3CODE, but they are actually on the same cache. + * The implication for pseudo-locking is that if a + * pseudo-locked region is created on a domain of one + * resource (eg. L3CODE), then a pseudo-locked region cannot + * be created on that same domain of the other resource + * (eg. L3DATA). This is because the creation of a + * pseudo-locked region involves a call to wbinvd that will + * affect all cache allocations on particular domain. + * - Considering the previous, it may be possible to only + * expose one of the CDP resources to pseudo-locking and + * hide the other. For example, we could consider to only + * expose L3DATA and since the L3 cache is unified it is + * still possible to place instructions there are execute it. + * - If only one region is exposed to pseudo-locking we should + * still keep in mind that availability of a portion of cache + * for pseudo-locking should take into account both resources. + * Similarly, if a pseudo-locked region is created in one + * resource, the portion of cache used by it should be made + * unavailable to all future allocations from both resources. + */ + if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3) || + resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) { + rdt_last_cmd_puts("CDP enabled\n"); + return -EINVAL; + } + + /* + * Not knowing the bits to disable prefetching implies that this + * platform does not support Cache Pseudo-Locking. + */ + if (resctrl_arch_get_prefetch_disable_bits() == 0) { + rdt_last_cmd_puts("Pseudo-locking not supported\n"); + return -EINVAL; + } + + if (rdtgroup_monitor_in_progress(rdtgrp)) { + rdt_last_cmd_puts("Monitoring in progress\n"); + return -EINVAL; + } + + if (rdtgroup_tasks_assigned(rdtgrp)) { + rdt_last_cmd_puts("Tasks assigned to resource group\n"); + return -EINVAL; + } + + if (!cpumask_empty(&rdtgrp->cpu_mask)) { + rdt_last_cmd_puts("CPUs assigned to resource group\n"); + return -EINVAL; + } + + if (rdtgroup_locksetup_user_restrict(rdtgrp)) { + rdt_last_cmd_puts("Unable to modify resctrl permissions\n"); + return -EIO; + } + + ret = pseudo_lock_init(rdtgrp); + if (ret) { + rdt_last_cmd_puts("Unable to init pseudo-lock region\n"); + goto out_release; + } + + /* + * If this system is capable of monitoring a rmid would have been + * allocated when the control group was created. This is not needed + * anymore when this group would be used for pseudo-locking. This + * is safe to call on platforms not capable of monitoring. + */ + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); + + ret = 0; + goto out; + +out_release: + rdtgroup_locksetup_user_restore(rdtgrp); +out: + return ret; +} + +/** + * rdtgroup_locksetup_exit - resource group exist locksetup mode + * @rdtgrp: resource group + * + * When a resource group exits locksetup mode the earlier restrictions are + * lifted. + * + * Return: 0 on success, <0 on failure + */ +int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp) +{ + int ret; + + if (resctrl_arch_mon_capable()) { + ret = alloc_rmid(rdtgrp->closid); + if (ret < 0) { + rdt_last_cmd_puts("Out of RMIDs\n"); + return ret; + } + rdtgrp->mon.rmid = ret; + } + + ret = rdtgroup_locksetup_user_restore(rdtgrp); + if (ret) { + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); + return ret; + } + + pseudo_lock_free(rdtgrp); + return 0; +} + +/** + * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked + * @d: RDT domain + * @cbm: CBM to test + * + * @d represents a cache instance and @cbm a capacity bitmask that is + * considered for it. Determine if @cbm overlaps with any existing + * pseudo-locked region on @d. + * + * @cbm is unsigned long, even if only 32 bits are used, to make the + * bitmap functions work correctly. + * + * Return: true if @cbm overlaps with pseudo-locked region on @d, false + * otherwise. + */ +bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm) +{ + unsigned int cbm_len; + unsigned long cbm_b; + + if (d->plr) { + cbm_len = d->plr->s->res->cache.cbm_len; + cbm_b = d->plr->cbm; + if (bitmap_intersects(&cbm, &cbm_b, cbm_len)) + return true; + } + return false; +} + +/** + * rdtgroup_pseudo_locked_in_hierarchy - Pseudo-locked region in cache hierarchy + * @d: RDT domain under test + * + * The setup of a pseudo-locked region affects all cache instances within + * the hierarchy of the region. It is thus essential to know if any + * pseudo-locked regions exist within a cache hierarchy to prevent any + * attempts to create new pseudo-locked regions in the same hierarchy. + * + * Return: true if a pseudo-locked region exists in the hierarchy of @d or + * if it is not possible to test due to memory allocation issue, + * false otherwise. + */ +bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) +{ + cpumask_var_t cpu_with_psl; + enum resctrl_res_level i; + struct rdt_resource *r; + struct rdt_domain *d_i; + bool ret = false; + + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + + if (!zalloc_cpumask_var(&cpu_with_psl, GFP_KERNEL)) + return true; + + /* + * First determine which cpus have pseudo-locked regions + * associated with them. + */ + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + r = resctrl_arch_get_resource(i); + if (!r->alloc_capable) + continue; + + list_for_each_entry(d_i, &r->domains, list) { + if (d_i->plr) + cpumask_or(cpu_with_psl, cpu_with_psl, + &d_i->cpu_mask); + } + } + + /* + * Next test if new pseudo-locked region would intersect with + * existing region. + */ + if (cpumask_intersects(&d->cpu_mask, cpu_with_psl)) + ret = true; + + free_cpumask_var(cpu_with_psl); + return ret; +} + +/** + * pseudo_lock_measure_cycles - Trigger latency measure to pseudo-locked region + * @rdtgrp: Resource group to which the pseudo-locked region belongs. + * @sel: Selector of which measurement to perform on a pseudo-locked region. + * + * The measurement of latency to access a pseudo-locked region should be + * done from a cpu that is associated with that pseudo-locked region. + * Determine which cpu is associated with this region and start a thread on + * that cpu to perform the measurement, wait for that thread to complete. + * + * Return: 0 on success, <0 on failure + */ +static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel) +{ + struct pseudo_lock_region *plr = rdtgrp->plr; + struct task_struct *thread; + unsigned int cpu; + int ret = -1; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + if (rdtgrp->flags & RDT_DELETED) { + ret = -ENODEV; + goto out; + } + + if (!plr->d) { + ret = -ENODEV; + goto out; + } + + plr->thread_done = 0; + cpu = cpumask_first(&plr->d->cpu_mask); + if (!cpu_online(cpu)) { + ret = -ENODEV; + goto out; + } + + plr->cpu = cpu; + + if (sel == 1) + thread = kthread_create_on_node(resctrl_arch_measure_cycles_lat_fn, + plr, cpu_to_node(cpu), + "pseudo_lock_measure/%u", + cpu); + else if (sel == 2) + thread = kthread_create_on_node(resctrl_arch_measure_l2_residency, + plr, cpu_to_node(cpu), + "pseudo_lock_measure/%u", + cpu); + else if (sel == 3) + thread = kthread_create_on_node(resctrl_arch_measure_l3_residency, + plr, cpu_to_node(cpu), + "pseudo_lock_measure/%u", + cpu); + else + goto out; + + if (IS_ERR(thread)) { + ret = PTR_ERR(thread); + goto out; + } + kthread_bind(thread, cpu); + wake_up_process(thread); + + ret = wait_event_interruptible(plr->lock_thread_wq, + plr->thread_done == 1); + if (ret < 0) + goto out; + + ret = 0; + +out: + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + return ret; +} + +static ssize_t pseudo_lock_measure_trigger(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rdtgroup *rdtgrp = file->private_data; + size_t buf_size; + char buf[32]; + int ret; + int sel; + + buf_size = min(count, (sizeof(buf) - 1)); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + buf[buf_size] = '\0'; + ret = kstrtoint(buf, 10, &sel); + if (ret == 0) { + if (sel != 1 && sel != 2 && sel != 3) + return -EINVAL; + ret = debugfs_file_get(file->f_path.dentry); + if (ret) + return ret; + ret = pseudo_lock_measure_cycles(rdtgrp, sel); + if (ret == 0) + ret = count; + debugfs_file_put(file->f_path.dentry); + } + + return ret; +} + +static const struct file_operations pseudo_measure_fops = { + .write = pseudo_lock_measure_trigger, + .open = simple_open, + .llseek = default_llseek, +}; + +/** + * rdtgroup_pseudo_lock_create - Create a pseudo-locked region + * @rdtgrp: resource group to which pseudo-lock region belongs + * + * Called when a resource group in the pseudo-locksetup mode receives a + * valid schemata that should be pseudo-locked. Since the resource group is + * in pseudo-locksetup mode the &struct pseudo_lock_region has already been + * allocated and initialized with the essential information. If a failure + * occurs the resource group remains in the pseudo-locksetup mode with the + * &struct pseudo_lock_region associated with it, but cleared from all + * information and ready for the user to re-attempt pseudo-locking by + * writing the schemata again. + * + * Return: 0 if the pseudo-locked region was successfully pseudo-locked, <0 + * on failure. Descriptive error will be written to last_cmd_status buffer. + */ +int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) +{ + struct pseudo_lock_region *plr = rdtgrp->plr; + struct task_struct *thread; + unsigned int new_minor; + struct device *dev; + int ret; + + ret = pseudo_lock_region_alloc(plr); + if (ret < 0) + return ret; + + ret = pseudo_lock_cstates_constrain(plr); + if (ret < 0) { + ret = -EINVAL; + goto out_region; + } + + plr->thread_done = 0; + + plr->closid = rdtgrp->closid; + thread = kthread_create_on_node(resctrl_arch_pseudo_lock_fn, plr, + cpu_to_node(plr->cpu), + "pseudo_lock/%u", plr->cpu); + if (IS_ERR(thread)) { + ret = PTR_ERR(thread); + rdt_last_cmd_printf("Locking thread returned error %d\n", ret); + goto out_cstates; + } + + kthread_bind(thread, plr->cpu); + wake_up_process(thread); + + ret = wait_event_interruptible(plr->lock_thread_wq, + plr->thread_done == 1); + if (ret < 0) { + /* + * If the thread does not get on the CPU for whatever + * reason and the process which sets up the region is + * interrupted then this will leave the thread in runnable + * state and once it gets on the CPU it will dereference + * the cleared, but not freed, plr struct resulting in an + * empty pseudo-locking loop. + */ + rdt_last_cmd_puts("Locking thread interrupted\n"); + goto out_cstates; + } + + ret = pseudo_lock_minor_get(&new_minor); + if (ret < 0) { + rdt_last_cmd_puts("Unable to obtain a new minor number\n"); + goto out_cstates; + } + + /* + * Unlock access but do not release the reference. The + * pseudo-locked region will still be here on return. + * + * The mutex has to be released temporarily to avoid a potential + * deadlock with the mm->mmap_lock which is obtained in the + * device_create() and debugfs_create_dir() callpath below as well as + * before the mmap() callback is called. + */ + mutex_unlock(&rdtgroup_mutex); + + if (!IS_ERR_OR_NULL(debugfs_resctrl)) { + plr->debugfs_dir = debugfs_create_dir(rdtgrp->kn->name, + debugfs_resctrl); + if (!IS_ERR_OR_NULL(plr->debugfs_dir)) + debugfs_create_file("pseudo_lock_measure", 0200, + plr->debugfs_dir, rdtgrp, + &pseudo_measure_fops); + } + + dev = device_create(&pseudo_lock_class, NULL, + MKDEV(pseudo_lock_major, new_minor), + rdtgrp, "%s", rdtgrp->kn->name); + + mutex_lock(&rdtgroup_mutex); + + if (IS_ERR(dev)) { + ret = PTR_ERR(dev); + rdt_last_cmd_printf("Failed to create character device: %d\n", + ret); + goto out_debugfs; + } + + /* We released the mutex - check if group was removed while we did so */ + if (rdtgrp->flags & RDT_DELETED) { + ret = -ENODEV; + goto out_device; + } + + plr->minor = new_minor; + + rdtgrp->mode = RDT_MODE_PSEUDO_LOCKED; + closid_free(rdtgrp->closid); + rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0444); + rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0444); + + ret = 0; + goto out; + +out_device: + device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, new_minor)); +out_debugfs: + debugfs_remove_recursive(plr->debugfs_dir); + pseudo_lock_minor_release(new_minor); +out_cstates: + pseudo_lock_cstates_relax(plr); +out_region: + pseudo_lock_region_clear(plr); +out: + return ret; +} + +/** + * rdtgroup_pseudo_lock_remove - Remove a pseudo-locked region + * @rdtgrp: resource group to which the pseudo-locked region belongs + * + * The removal of a pseudo-locked region can be initiated when the resource + * group is removed from user space via a "rmdir" from userspace or the + * unmount of the resctrl filesystem. On removal the resource group does + * not go back to pseudo-locksetup mode before it is removed, instead it is + * removed directly. There is thus asymmetry with the creation where the + * &struct pseudo_lock_region is removed here while it was not created in + * rdtgroup_pseudo_lock_create(). + * + * Return: void + */ +void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp) +{ + struct pseudo_lock_region *plr = rdtgrp->plr; + + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + /* + * Default group cannot be a pseudo-locked region so we can + * free closid here. + */ + closid_free(rdtgrp->closid); + goto free; + } + + pseudo_lock_cstates_relax(plr); + debugfs_remove_recursive(rdtgrp->plr->debugfs_dir); + device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, plr->minor)); + pseudo_lock_minor_release(plr->minor); + +free: + pseudo_lock_free(rdtgrp); +} + +static int pseudo_lock_dev_open(struct inode *inode, struct file *filp) +{ + struct rdtgroup *rdtgrp; + + mutex_lock(&rdtgroup_mutex); + + rdtgrp = region_find_by_minor(iminor(inode)); + if (!rdtgrp) { + mutex_unlock(&rdtgroup_mutex); + return -ENODEV; + } + + filp->private_data = rdtgrp; + atomic_inc(&rdtgrp->waitcount); + /* Perform a non-seekable open - llseek is not supported */ + filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); + + mutex_unlock(&rdtgroup_mutex); + + return 0; +} + +static int pseudo_lock_dev_release(struct inode *inode, struct file *filp) +{ + struct rdtgroup *rdtgrp; + + mutex_lock(&rdtgroup_mutex); + rdtgrp = filp->private_data; + WARN_ON(!rdtgrp); + if (!rdtgrp) { + mutex_unlock(&rdtgroup_mutex); + return -ENODEV; + } + filp->private_data = NULL; + atomic_dec(&rdtgrp->waitcount); + mutex_unlock(&rdtgroup_mutex); + return 0; +} + +static int pseudo_lock_dev_mremap(struct vm_area_struct *area) +{ + /* Not supported */ + return -EINVAL; +} + +static const struct vm_operations_struct pseudo_mmap_ops = { + .mremap = pseudo_lock_dev_mremap, +}; + +static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma) +{ + unsigned long vsize = vma->vm_end - vma->vm_start; + unsigned long off = vma->vm_pgoff << PAGE_SHIFT; + struct pseudo_lock_region *plr; + struct rdtgroup *rdtgrp; + unsigned long physical; + unsigned long psize; + + mutex_lock(&rdtgroup_mutex); + + rdtgrp = filp->private_data; + WARN_ON(!rdtgrp); + if (!rdtgrp) { + mutex_unlock(&rdtgroup_mutex); + return -ENODEV; + } + + plr = rdtgrp->plr; + + if (!plr->d) { + mutex_unlock(&rdtgroup_mutex); + return -ENODEV; + } + + /* + * Task is required to run with affinity to the cpus associated + * with the pseudo-locked region. If this is not the case the task + * may be scheduled elsewhere and invalidate entries in the + * pseudo-locked region. + */ + if (!cpumask_subset(current->cpus_ptr, &plr->d->cpu_mask)) { + mutex_unlock(&rdtgroup_mutex); + return -EINVAL; + } + + physical = __pa(plr->kmem) >> PAGE_SHIFT; + psize = plr->size - off; + + if (off > plr->size) { + mutex_unlock(&rdtgroup_mutex); + return -ENOSPC; + } + + /* + * Ensure changes are carried directly to the memory being mapped, + * do not allow copy-on-write mapping. + */ + if (!(vma->vm_flags & VM_SHARED)) { + mutex_unlock(&rdtgroup_mutex); + return -EINVAL; + } + + if (vsize > psize) { + mutex_unlock(&rdtgroup_mutex); + return -ENOSPC; + } + + memset(plr->kmem + off, 0, vsize); + + if (remap_pfn_range(vma, vma->vm_start, physical + vma->vm_pgoff, + vsize, vma->vm_page_prot)) { + mutex_unlock(&rdtgroup_mutex); + return -EAGAIN; + } + vma->vm_ops = &pseudo_mmap_ops; + mutex_unlock(&rdtgroup_mutex); + return 0; +} + +static const struct file_operations pseudo_lock_dev_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .read = NULL, + .write = NULL, + .open = pseudo_lock_dev_open, + .release = pseudo_lock_dev_release, + .mmap = pseudo_lock_dev_mmap, +}; + +int rdt_pseudo_lock_init(void) +{ + int ret; + + ret = register_chrdev(0, "pseudo_lock", &pseudo_lock_dev_fops); + if (ret < 0) + return ret; + + pseudo_lock_major = ret; + + ret = class_register(&pseudo_lock_class); + if (ret) { + unregister_chrdev(pseudo_lock_major, "pseudo_lock"); + return ret; + } + + return 0; +} + +void rdt_pseudo_lock_release(void) +{ + class_unregister(&pseudo_lock_class); + unregister_chrdev(pseudo_lock_major, "pseudo_lock"); + pseudo_lock_major = 0; +} diff --git a/fs/resctrl/rdtgroup.c b/fs/resctrl/rdtgroup.c index e69de29bb2d1..936fc6e47386 100644 --- a/fs/resctrl/rdtgroup.c +++ b/fs/resctrl/rdtgroup.c @@ -0,0 +1,4013 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * User interface for Resource Allocation in Resource Director Technology(RDT) + * + * Copyright (C) 2016 Intel Corporation + * + * Author: Fenghua Yu + * + * More information about RDT be found in the Intel (R) x86 Architecture + * Software Developer Manual. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include "internal.h" + +/* Mutex to protect rdtgroup access. */ +DEFINE_MUTEX(rdtgroup_mutex); + +static struct kernfs_root *rdt_root; +struct rdtgroup rdtgroup_default; +LIST_HEAD(rdt_all_groups); + +/* list of entries for the schemata file */ +LIST_HEAD(resctrl_schema_all); + +/* The filesystem can only be mounted once. */ +bool resctrl_mounted; + +/* Kernel fs node for "info" directory under root */ +static struct kernfs_node *kn_info; + +/* Kernel fs node for "mon_groups" directory under root */ +static struct kernfs_node *kn_mongrp; + +/* Kernel fs node for "mon_data" directory under root */ +static struct kernfs_node *kn_mondata; + +/* + * Used to store the max resource name width and max resource data width + * to display the schemata in a tabular format + */ +int max_name_width, max_data_width; + +static struct seq_buf last_cmd_status; +static char last_cmd_status_buf[512]; + +static int rdtgroup_setup_root(struct rdt_fs_context *ctx); +static void rdtgroup_destroy_root(void); + +struct dentry *debugfs_resctrl; + +static bool resctrl_debug; + +void rdt_last_cmd_clear(void) +{ + lockdep_assert_held(&rdtgroup_mutex); + seq_buf_clear(&last_cmd_status); +} + +void rdt_last_cmd_puts(const char *s) +{ + lockdep_assert_held(&rdtgroup_mutex); + seq_buf_puts(&last_cmd_status, s); +} + +void rdt_last_cmd_printf(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + lockdep_assert_held(&rdtgroup_mutex); + seq_buf_vprintf(&last_cmd_status, fmt, ap); + va_end(ap); +} + +void rdt_staged_configs_clear(void) +{ + enum resctrl_res_level i; + struct rdt_resource *r; + struct rdt_domain *dom; + + lockdep_assert_held(&rdtgroup_mutex); + + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + r = resctrl_arch_get_resource(i); + if (!r->alloc_capable) + continue; + + list_for_each_entry(dom, &r->domains, list) + memset(dom->staged_config, 0, sizeof(dom->staged_config)); + } +} + +static bool resctrl_is_mbm_enabled(void) +{ + return (resctrl_arch_is_mbm_total_enabled() || + resctrl_arch_is_mbm_local_enabled()); +} + +static bool resctrl_is_mbm_event(int e) +{ + return (e >= QOS_L3_MBM_TOTAL_EVENT_ID && + e <= QOS_L3_MBM_LOCAL_EVENT_ID); +} + +/* + * Trivial allocator for CLOSIDs. Since h/w only supports a small number, + * we can keep a bitmap of free CLOSIDs in a single integer. + * + * Using a global CLOSID across all resources has some advantages and + * some drawbacks: + * + We can simply set current's closid to assign a task to a resource + * group. + * + Context switch code can avoid extra memory references deciding which + * CLOSID to load into the PQR_ASSOC MSR + * - We give up some options in configuring resource groups across multi-socket + * systems. + * - Our choices on how to configure each resource become progressively more + * limited as the number of resources grows. + */ +static unsigned long closid_free_map; +static int closid_free_map_len; + +int closids_supported(void) +{ + return closid_free_map_len; +} + +static void closid_init(void) +{ + struct resctrl_schema *s; + u32 rdt_min_closid = 32; + + /* Compute rdt_min_closid across all resources */ + list_for_each_entry(s, &resctrl_schema_all, list) + rdt_min_closid = min(rdt_min_closid, s->num_closid); + + closid_free_map = BIT_MASK(rdt_min_closid) - 1; + + /* RESCTRL_RESERVED_CLOSID is always reserved for the default group */ + __clear_bit(RESCTRL_RESERVED_CLOSID, &closid_free_map); + closid_free_map_len = rdt_min_closid; +} + +static int closid_alloc(void) +{ + int cleanest_closid; + u32 closid; + + lockdep_assert_held(&rdtgroup_mutex); + + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID) && + resctrl_arch_is_llc_occupancy_enabled()) { + cleanest_closid = resctrl_find_cleanest_closid(); + if (cleanest_closid < 0) + return cleanest_closid; + closid = cleanest_closid; + } else { + closid = ffs(closid_free_map); + if (closid == 0) + return -ENOSPC; + closid--; + } + __clear_bit(closid, &closid_free_map); + + return closid; +} + +void closid_free(int closid) +{ + lockdep_assert_held(&rdtgroup_mutex); + + __set_bit(closid, &closid_free_map); +} + +/** + * closid_allocated - test if provided closid is in use + * @closid: closid to be tested + * + * Return: true if @closid is currently associated with a resource group, + * false if @closid is free + */ +bool closid_allocated(unsigned int closid) +{ + lockdep_assert_held(&rdtgroup_mutex); + + return !test_bit(closid, &closid_free_map); +} + +/** + * rdtgroup_mode_by_closid - Return mode of resource group with closid + * @closid: closid if the resource group + * + * Each resource group is associated with a @closid. Here the mode + * of a resource group can be queried by searching for it using its closid. + * + * Return: mode as &enum rdtgrp_mode of resource group with closid @closid + */ +enum rdtgrp_mode rdtgroup_mode_by_closid(int closid) +{ + struct rdtgroup *rdtgrp; + + list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { + if (rdtgrp->closid == closid) + return rdtgrp->mode; + } + + return RDT_NUM_MODES; +} + +static const char * const rdt_mode_str[] = { + [RDT_MODE_SHAREABLE] = "shareable", + [RDT_MODE_EXCLUSIVE] = "exclusive", + [RDT_MODE_PSEUDO_LOCKSETUP] = "pseudo-locksetup", + [RDT_MODE_PSEUDO_LOCKED] = "pseudo-locked", +}; + +/** + * rdtgroup_mode_str - Return the string representation of mode + * @mode: the resource group mode as &enum rdtgroup_mode + * + * Return: string representation of valid mode, "unknown" otherwise + */ +static const char *rdtgroup_mode_str(enum rdtgrp_mode mode) +{ + if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES) + return "unknown"; + + return rdt_mode_str[mode]; +} + +/* set uid and gid of rdtgroup dirs and files to that of the creator */ +static int rdtgroup_kn_set_ugid(struct kernfs_node *kn) +{ + struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID, + .ia_uid = current_fsuid(), + .ia_gid = current_fsgid(), }; + + if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) && + gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID)) + return 0; + + return kernfs_setattr(kn, &iattr); +} + +static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft) +{ + struct kernfs_node *kn; + int ret; + + kn = __kernfs_create_file(parent_kn, rft->name, rft->mode, + GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, + 0, rft->kf_ops, rft, NULL, NULL); + if (IS_ERR(kn)) + return PTR_ERR(kn); + + ret = rdtgroup_kn_set_ugid(kn); + if (ret) { + kernfs_remove(kn); + return ret; + } + + return 0; +} + +static int rdtgroup_seqfile_show(struct seq_file *m, void *arg) +{ + struct kernfs_open_file *of = m->private; + struct rftype *rft = of->kn->priv; + + if (rft->seq_show) + return rft->seq_show(of, m, arg); + return 0; +} + +static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf, + size_t nbytes, loff_t off) +{ + struct rftype *rft = of->kn->priv; + + if (rft->write) + return rft->write(of, buf, nbytes, off); + + return -EINVAL; +} + +static const struct kernfs_ops rdtgroup_kf_single_ops = { + .atomic_write_len = PAGE_SIZE, + .write = rdtgroup_file_write, + .seq_show = rdtgroup_seqfile_show, +}; + +static const struct kernfs_ops kf_mondata_ops = { + .atomic_write_len = PAGE_SIZE, + .seq_show = rdtgroup_mondata_show, +}; + +static bool is_cpu_list(struct kernfs_open_file *of) +{ + struct rftype *rft = of->kn->priv; + + return rft->flags & RFTYPE_FLAGS_CPUS_LIST; +} + +static int rdtgroup_cpus_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct rdtgroup *rdtgrp; + struct cpumask *mask; + int ret = 0; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + + if (rdtgrp) { + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { + if (!rdtgrp->plr->d) { + rdt_last_cmd_clear(); + rdt_last_cmd_puts("Cache domain offline\n"); + ret = -ENODEV; + } else { + mask = &rdtgrp->plr->d->cpu_mask; + seq_printf(s, is_cpu_list(of) ? + "%*pbl\n" : "%*pb\n", + cpumask_pr_args(mask)); + } + } else { + seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n", + cpumask_pr_args(&rdtgrp->cpu_mask)); + } + } else { + ret = -ENOENT; + } + rdtgroup_kn_unlock(of->kn); + + return ret; +} + +/* + * Update the PGR_ASSOC MSR on all cpus in @cpu_mask, + * + * Per task closids/rmids must have been set up before calling this function. + * @r may be NULL. + */ +static void +update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r) +{ + struct resctrl_cpu_sync defaults; + struct resctrl_cpu_sync *defaults_p = NULL; + + if (r) { + defaults.closid = r->closid; + defaults.rmid = r->mon.rmid; + defaults_p = &defaults; + } + + on_each_cpu_mask(cpu_mask, resctrl_arch_sync_cpu_defaults, defaults_p, + 1); +} + +static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, + cpumask_var_t tmpmask) +{ + struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp; + struct list_head *head; + + /* Check whether cpus belong to parent ctrl group */ + cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask); + if (!cpumask_empty(tmpmask)) { + rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n"); + return -EINVAL; + } + + /* Check whether cpus are dropped from this group */ + cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); + if (!cpumask_empty(tmpmask)) { + /* Give any dropped cpus to parent rdtgroup */ + cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask); + update_closid_rmid(tmpmask, prgrp); + } + + /* + * If we added cpus, remove them from previous group that owned them + * and update per-cpu rmid + */ + cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); + if (!cpumask_empty(tmpmask)) { + head = &prgrp->mon.crdtgrp_list; + list_for_each_entry(crgrp, head, mon.crdtgrp_list) { + if (crgrp == rdtgrp) + continue; + cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask, + tmpmask); + } + update_closid_rmid(tmpmask, rdtgrp); + } + + /* Done pushing/pulling - update this group with new mask */ + cpumask_copy(&rdtgrp->cpu_mask, newmask); + + return 0; +} + +static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m) +{ + struct rdtgroup *crgrp; + + cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m); + /* update the child mon group masks as well*/ + list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list) + cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask); +} + +static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, + cpumask_var_t tmpmask, cpumask_var_t tmpmask1) +{ + struct rdtgroup *r, *crgrp; + struct list_head *head; + + /* Check whether cpus are dropped from this group */ + cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); + if (!cpumask_empty(tmpmask)) { + /* Can't drop from default group */ + if (rdtgrp == &rdtgroup_default) { + rdt_last_cmd_puts("Can't drop CPUs from default group\n"); + return -EINVAL; + } + + /* Give any dropped cpus to rdtgroup_default */ + cpumask_or(&rdtgroup_default.cpu_mask, + &rdtgroup_default.cpu_mask, tmpmask); + update_closid_rmid(tmpmask, &rdtgroup_default); + } + + /* + * If we added cpus, remove them from previous group and + * the prev group's child groups that owned them + * and update per-cpu closid/rmid. + */ + cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); + if (!cpumask_empty(tmpmask)) { + list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) { + if (r == rdtgrp) + continue; + cpumask_and(tmpmask1, &r->cpu_mask, tmpmask); + if (!cpumask_empty(tmpmask1)) + cpumask_rdtgrp_clear(r, tmpmask1); + } + update_closid_rmid(tmpmask, rdtgrp); + } + + /* Done pushing/pulling - update this group with new mask */ + cpumask_copy(&rdtgrp->cpu_mask, newmask); + + /* + * Clear child mon group masks since there is a new parent mask + * now and update the rmid for the cpus the child lost. + */ + head = &rdtgrp->mon.crdtgrp_list; + list_for_each_entry(crgrp, head, mon.crdtgrp_list) { + cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask); + update_closid_rmid(tmpmask, rdtgrp); + cpumask_clear(&crgrp->cpu_mask); + } + + return 0; +} + +static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + cpumask_var_t tmpmask, newmask, tmpmask1; + struct rdtgroup *rdtgrp; + int ret; + + if (!buf) + return -EINVAL; + + if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) + return -ENOMEM; + if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) { + free_cpumask_var(tmpmask); + return -ENOMEM; + } + if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) { + free_cpumask_var(tmpmask); + free_cpumask_var(newmask); + return -ENOMEM; + } + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!rdtgrp) { + ret = -ENOENT; + goto unlock; + } + + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || + rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + ret = -EINVAL; + rdt_last_cmd_puts("Pseudo-locking in progress\n"); + goto unlock; + } + + if (is_cpu_list(of)) + ret = cpulist_parse(buf, newmask); + else + ret = cpumask_parse(buf, newmask); + + if (ret) { + rdt_last_cmd_puts("Bad CPU list/mask\n"); + goto unlock; + } + + /* check that user didn't specify any offline cpus */ + cpumask_andnot(tmpmask, newmask, cpu_online_mask); + if (!cpumask_empty(tmpmask)) { + ret = -EINVAL; + rdt_last_cmd_puts("Can only assign online CPUs\n"); + goto unlock; + } + + if (rdtgrp->type == RDTCTRL_GROUP) + ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1); + else if (rdtgrp->type == RDTMON_GROUP) + ret = cpus_mon_write(rdtgrp, newmask, tmpmask); + else + ret = -EINVAL; + +unlock: + rdtgroup_kn_unlock(of->kn); + free_cpumask_var(tmpmask); + free_cpumask_var(newmask); + free_cpumask_var(tmpmask1); + + return ret ?: nbytes; +} + +/** + * rdtgroup_remove - the helper to remove resource group safely + * @rdtgrp: resource group to remove + * + * On resource group creation via a mkdir, an extra kernfs_node reference is + * taken to ensure that the rdtgroup structure remains accessible for the + * rdtgroup_kn_unlock() calls where it is removed. + * + * Drop the extra reference here, then free the rdtgroup structure. + * + * Return: void + */ +static void rdtgroup_remove(struct rdtgroup *rdtgrp) +{ + kernfs_put(rdtgrp->kn); + kfree(rdtgrp); +} + +static void _update_task_closid_rmid(void *task) +{ + /* + * If the task is still current on this CPU, update PQR_ASSOC MSR. + * Otherwise, the MSR is updated when the task is scheduled in. + */ + if (task == current) + resctrl_arch_sched_in(task); +} + +static void update_task_closid_rmid(struct task_struct *t) +{ + if (IS_ENABLED(CONFIG_SMP) && task_curr(t)) + smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1); + else + _update_task_closid_rmid(t); +} + +static bool task_in_rdtgroup(struct task_struct *tsk, struct rdtgroup *rdtgrp) +{ + u32 closid, rmid = rdtgrp->mon.rmid; + + if (rdtgrp->type == RDTCTRL_GROUP) + closid = rdtgrp->closid; + else if (rdtgrp->type == RDTMON_GROUP) + closid = rdtgrp->mon.parent->closid; + else + return false; + + return resctrl_arch_match_closid(tsk, closid) && + resctrl_arch_match_rmid(tsk, closid, rmid); +} + +static int __rdtgroup_move_task(struct task_struct *tsk, + struct rdtgroup *rdtgrp) +{ + /* If the task is already in rdtgrp, no need to move the task. */ + if (task_in_rdtgroup(tsk, rdtgrp)) + return 0; + + /* + * Set the task's closid/rmid before the PQR_ASSOC MSR can be + * updated by them. + * + * For ctrl_mon groups, move both closid and rmid. + * For monitor groups, can move the tasks only from + * their parent CTRL group. + */ + if (rdtgrp->type == RDTMON_GROUP && + !resctrl_arch_match_closid(tsk, rdtgrp->mon.parent->closid)) { + rdt_last_cmd_puts("Can't move task to different control group\n"); + return -EINVAL; + } + + if (rdtgrp->type == RDTMON_GROUP) + resctrl_arch_set_closid_rmid(tsk, rdtgrp->mon.parent->closid, + rdtgrp->mon.rmid); + else + resctrl_arch_set_closid_rmid(tsk, rdtgrp->closid, + rdtgrp->mon.rmid); + + /* + * Ensure the task's closid and rmid are written before determining if + * the task is current that will decide if it will be interrupted. + * This pairs with the full barrier between the rq->curr update and + * resctrl_arch_sched_in() during context switch. + */ + smp_mb(); + + /* + * By now, the task's closid and rmid are set. If the task is current + * on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource + * group go into effect. If the task is not current, the MSR will be + * updated when the task is scheduled in. + */ + update_task_closid_rmid(tsk); + + return 0; +} + +static bool is_closid_match(struct task_struct *t, struct rdtgroup *r) +{ + return (resctrl_arch_alloc_capable() && (r->type == RDTCTRL_GROUP) && + resctrl_arch_match_closid(t, r->closid)); +} + +static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r) +{ + return (resctrl_arch_mon_capable() && (r->type == RDTMON_GROUP) && + resctrl_arch_match_rmid(t, r->mon.parent->closid, + r->mon.rmid)); +} + +/** + * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group + * @r: Resource group + * + * Return: 1 if tasks have been assigned to @r, 0 otherwise + */ +int rdtgroup_tasks_assigned(struct rdtgroup *r) +{ + struct task_struct *p, *t; + int ret = 0; + + lockdep_assert_held(&rdtgroup_mutex); + + rcu_read_lock(); + for_each_process_thread(p, t) { + if (is_closid_match(t, r) || is_rmid_match(t, r)) { + ret = 1; + break; + } + } + rcu_read_unlock(); + + return ret; +} + +static int rdtgroup_task_write_permission(struct task_struct *task, + struct kernfs_open_file *of) +{ + const struct cred *tcred = get_task_cred(task); + const struct cred *cred = current_cred(); + int ret = 0; + + /* + * Even if we're attaching all tasks in the thread group, we only + * need to check permissions on one of them. + */ + if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && + !uid_eq(cred->euid, tcred->uid) && + !uid_eq(cred->euid, tcred->suid)) { + rdt_last_cmd_printf("No permission to move task %d\n", task->pid); + ret = -EPERM; + } + + put_cred(tcred); + return ret; +} + +static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp, + struct kernfs_open_file *of) +{ + struct task_struct *tsk; + int ret; + + rcu_read_lock(); + if (pid) { + tsk = find_task_by_vpid(pid); + if (!tsk) { + rcu_read_unlock(); + rdt_last_cmd_printf("No task %d\n", pid); + return -ESRCH; + } + } else { + tsk = current; + } + + get_task_struct(tsk); + rcu_read_unlock(); + + ret = rdtgroup_task_write_permission(tsk, of); + if (!ret) + ret = __rdtgroup_move_task(tsk, rdtgrp); + + put_task_struct(tsk); + return ret; +} + +static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + struct rdtgroup *rdtgrp; + char *pid_str; + int ret = 0; + pid_t pid; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!rdtgrp) { + rdtgroup_kn_unlock(of->kn); + return -ENOENT; + } + rdt_last_cmd_clear(); + + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || + rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + ret = -EINVAL; + rdt_last_cmd_puts("Pseudo-locking in progress\n"); + goto unlock; + } + + while (buf && buf[0] != '\0' && buf[0] != '\n') { + pid_str = strim(strsep(&buf, ",")); + + if (kstrtoint(pid_str, 0, &pid)) { + rdt_last_cmd_printf("Task list parsing error pid %s\n", pid_str); + ret = -EINVAL; + break; + } + + if (pid < 0) { + rdt_last_cmd_printf("Invalid pid %d\n", pid); + ret = -EINVAL; + break; + } + + ret = rdtgroup_move_task(pid, rdtgrp, of); + if (ret) { + rdt_last_cmd_printf("Error while processing task %d\n", pid); + break; + } + } + +unlock: + rdtgroup_kn_unlock(of->kn); + + return ret ?: nbytes; +} + +static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s) +{ + struct task_struct *p, *t; + pid_t pid; + + rcu_read_lock(); + for_each_process_thread(p, t) { + if (is_closid_match(t, r) || is_rmid_match(t, r)) { + pid = task_pid_vnr(t); + if (pid) + seq_printf(s, "%d\n", pid); + } + } + rcu_read_unlock(); +} + +static int rdtgroup_tasks_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct rdtgroup *rdtgrp; + int ret = 0; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (rdtgrp) + show_rdt_tasks(rdtgrp, s); + else + ret = -ENOENT; + rdtgroup_kn_unlock(of->kn); + + return ret; +} + +static int rdtgroup_closid_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct rdtgroup *rdtgrp; + int ret = 0; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (rdtgrp) + seq_printf(s, "%u\n", rdtgrp->closid); + else + ret = -ENOENT; + rdtgroup_kn_unlock(of->kn); + + return ret; +} + +static int rdtgroup_rmid_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct rdtgroup *rdtgrp; + int ret = 0; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (rdtgrp) + seq_printf(s, "%u\n", rdtgrp->mon.rmid); + else + ret = -ENOENT; + rdtgroup_kn_unlock(of->kn); + + return ret; +} + +#ifdef CONFIG_PROC_CPU_RESCTRL + +/* + * A task can only be part of one resctrl control group and of one monitor + * group which is associated to that control group. + * + * 1) res: + * mon: + * + * resctrl is not available. + * + * 2) res:/ + * mon: + * + * Task is part of the root resctrl control group, and it is not associated + * to any monitor group. + * + * 3) res:/ + * mon:mon0 + * + * Task is part of the root resctrl control group and monitor group mon0. + * + * 4) res:group0 + * mon: + * + * Task is part of resctrl control group group0, and it is not associated + * to any monitor group. + * + * 5) res:group0 + * mon:mon1 + * + * Task is part of resctrl control group group0 and monitor group mon1. + */ +int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns, + struct pid *pid, struct task_struct *tsk) +{ + struct rdtgroup *rdtg; + int ret = 0; + + mutex_lock(&rdtgroup_mutex); + + /* Return empty if resctrl has not been mounted. */ + if (!resctrl_mounted) { + seq_puts(s, "res:\nmon:\n"); + goto unlock; + } + + list_for_each_entry(rdtg, &rdt_all_groups, rdtgroup_list) { + struct rdtgroup *crg; + + /* + * Task information is only relevant for shareable + * and exclusive groups. + */ + if (rdtg->mode != RDT_MODE_SHAREABLE && + rdtg->mode != RDT_MODE_EXCLUSIVE) + continue; + + if (!resctrl_arch_match_closid(tsk, rdtg->closid)) + continue; + + seq_printf(s, "res:%s%s\n", (rdtg == &rdtgroup_default) ? "/" : "", + rdtg->kn->name); + seq_puts(s, "mon:"); + list_for_each_entry(crg, &rdtg->mon.crdtgrp_list, + mon.crdtgrp_list) { + if (!resctrl_arch_match_rmid(tsk, crg->mon.parent->closid, + crg->mon.rmid)) + continue; + seq_printf(s, "%s", crg->kn->name); + break; + } + seq_putc(s, '\n'); + goto unlock; + } + /* + * The above search should succeed. Otherwise return + * with an error. + */ + ret = -ENOENT; +unlock: + mutex_unlock(&rdtgroup_mutex); + + return ret; +} +#endif + +static int rdt_last_cmd_status_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + int len; + + mutex_lock(&rdtgroup_mutex); + len = seq_buf_used(&last_cmd_status); + if (len) + seq_printf(seq, "%.*s", len, last_cmd_status_buf); + else + seq_puts(seq, "ok\n"); + mutex_unlock(&rdtgroup_mutex); + return 0; +} + +static int rdt_num_closids_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + + seq_printf(seq, "%u\n", s->num_closid); + return 0; +} + +static int rdt_default_ctrl_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + seq_printf(seq, "%x\n", r->default_ctrl); + return 0; +} + +static int rdt_min_cbm_bits_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + seq_printf(seq, "%u\n", r->cache.min_cbm_bits); + return 0; +} + +static int rdt_shareable_bits_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + seq_printf(seq, "%x\n", r->cache.shareable_bits); + return 0; +} + +/* + * rdt_bit_usage_show - Display current usage of resources + * + * A domain is a shared resource that can now be allocated differently. Here + * we display the current regions of the domain as an annotated bitmask. + * For each domain of this resource its allocation bitmask + * is annotated as below to indicate the current usage of the corresponding bit: + * 0 - currently unused + * X - currently available for sharing and used by software and hardware + * H - currently used by hardware only but available for software use + * S - currently used and shareable by software only + * E - currently used exclusively by one resource group + * P - currently pseudo-locked by one resource group + */ +static int rdt_bit_usage_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + /* + * Use unsigned long even though only 32 bits are used to ensure + * test_bit() is used safely. + */ + unsigned long sw_shareable = 0, hw_shareable = 0; + unsigned long exclusive = 0, pseudo_locked = 0; + struct rdt_resource *r = s->res; + struct rdt_domain *dom; + int i, hwb, swb, excl, psl; + enum rdtgrp_mode mode; + bool sep = false; + u32 ctrl_val; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + hw_shareable = r->cache.shareable_bits; + list_for_each_entry(dom, &r->domains, list) { + if (sep) + seq_putc(seq, ';'); + sw_shareable = 0; + exclusive = 0; + seq_printf(seq, "%d=", dom->id); + for (i = 0; i < closids_supported(); i++) { + if (!closid_allocated(i)) + continue; + ctrl_val = resctrl_arch_get_config(r, dom, i, + s->conf_type); + mode = rdtgroup_mode_by_closid(i); + switch (mode) { + case RDT_MODE_SHAREABLE: + sw_shareable |= ctrl_val; + break; + case RDT_MODE_EXCLUSIVE: + exclusive |= ctrl_val; + break; + case RDT_MODE_PSEUDO_LOCKSETUP: + /* + * RDT_MODE_PSEUDO_LOCKSETUP is possible + * here but not included since the CBM + * associated with this CLOSID in this mode + * is not initialized and no task or cpu can be + * assigned this CLOSID. + */ + break; + case RDT_MODE_PSEUDO_LOCKED: + case RDT_NUM_MODES: + WARN(1, + "invalid mode for closid %d\n", i); + break; + } + } + for (i = r->cache.cbm_len - 1; i >= 0; i--) { + pseudo_locked = dom->plr ? dom->plr->cbm : 0; + hwb = test_bit(i, &hw_shareable); + swb = test_bit(i, &sw_shareable); + excl = test_bit(i, &exclusive); + psl = test_bit(i, &pseudo_locked); + if (hwb && swb) + seq_putc(seq, 'X'); + else if (hwb && !swb) + seq_putc(seq, 'H'); + else if (!hwb && swb) + seq_putc(seq, 'S'); + else if (excl) + seq_putc(seq, 'E'); + else if (psl) + seq_putc(seq, 'P'); + else /* Unused bits remain */ + seq_putc(seq, '0'); + } + sep = true; + } + seq_putc(seq, '\n'); + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + return 0; +} + +static int rdt_min_bw_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + seq_printf(seq, "%u\n", r->membw.min_bw); + return 0; +} + +static int rdt_num_rmids_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct rdt_resource *r = of->kn->parent->priv; + + seq_printf(seq, "%d\n", r->num_rmid); + + return 0; +} + +static int rdt_mon_features_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct rdt_resource *r = of->kn->parent->priv; + struct mon_evt *mevt; + + list_for_each_entry(mevt, &r->evt_list, list) { + seq_printf(seq, "%s\n", mevt->name); + if (mevt->configurable) + seq_printf(seq, "%s_config\n", mevt->name); + } + + return 0; +} + +static int rdt_bw_gran_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + seq_printf(seq, "%u\n", r->membw.bw_gran); + return 0; +} + +static int rdt_delay_linear_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + seq_printf(seq, "%u\n", r->membw.delay_linear); + return 0; +} + +static int max_threshold_occ_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + seq_printf(seq, "%u\n", resctrl_rmid_realloc_threshold); + + return 0; +} + +static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + if (r->membw.throttle_mode == THREAD_THROTTLE_PER_THREAD) + seq_puts(seq, "per-thread\n"); + else + seq_puts(seq, "max\n"); + + return 0; +} + +static ssize_t max_threshold_occ_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + unsigned int bytes; + int ret; + + ret = kstrtouint(buf, 0, &bytes); + if (ret) + return ret; + + if (bytes > resctrl_rmid_realloc_limit) + return -EINVAL; + + resctrl_rmid_realloc_threshold = resctrl_arch_round_mon_val(bytes); + + return nbytes; +} + +/* + * rdtgroup_mode_show - Display mode of this resource group + */ +static int rdtgroup_mode_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct rdtgroup *rdtgrp; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!rdtgrp) { + rdtgroup_kn_unlock(of->kn); + return -ENOENT; + } + + seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode)); + + rdtgroup_kn_unlock(of->kn); + return 0; +} + +static enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type) +{ + switch (my_type) { + case CDP_CODE: + return CDP_DATA; + case CDP_DATA: + return CDP_CODE; + default: + case CDP_NONE: + return CDP_NONE; + } +} + +static int rdt_has_sparse_bitmasks_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + seq_printf(seq, "%u\n", r->cache.arch_has_sparse_bitmasks); + + return 0; +} + +/** + * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other + * @r: Resource to which domain instance @d belongs. + * @d: The domain instance for which @closid is being tested. + * @cbm: Capacity bitmask being tested. + * @closid: Intended closid for @cbm. + * @type: CDP type of @r. + * @exclusive: Only check if overlaps with exclusive resource groups + * + * Checks if provided @cbm intended to be used for @closid on domain + * @d overlaps with any other closids or other hardware usage associated + * with this domain. If @exclusive is true then only overlaps with + * resource groups in exclusive mode will be considered. If @exclusive + * is false then overlaps with any resource group or hardware entities + * will be considered. + * + * @cbm is unsigned long, even if only 32 bits are used, to make the + * bitmap functions work correctly. + * + * Return: false if CBM does not overlap, true if it does. + */ +static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, + unsigned long cbm, int closid, + enum resctrl_conf_type type, bool exclusive) +{ + enum rdtgrp_mode mode; + unsigned long ctrl_b; + int i; + + /* Check for any overlap with regions used by hardware directly */ + if (!exclusive) { + ctrl_b = r->cache.shareable_bits; + if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) + return true; + } + + /* Check for overlap with other resource groups */ + for (i = 0; i < closids_supported(); i++) { + ctrl_b = resctrl_arch_get_config(r, d, i, type); + mode = rdtgroup_mode_by_closid(i); + if (closid_allocated(i) && i != closid && + mode != RDT_MODE_PSEUDO_LOCKSETUP) { + if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) { + if (exclusive) { + if (mode == RDT_MODE_EXCLUSIVE) + return true; + continue; + } + return true; + } + } + } + + return false; +} + +/** + * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware + * @s: Schema for the resource to which domain instance @d belongs. + * @d: The domain instance for which @closid is being tested. + * @cbm: Capacity bitmask being tested. + * @closid: Intended closid for @cbm. + * @exclusive: Only check if overlaps with exclusive resource groups + * + * Resources that can be allocated using a CBM can use the CBM to control + * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test + * for overlap. Overlap test is not limited to the specific resource for + * which the CBM is intended though - when dealing with CDP resources that + * share the underlying hardware the overlap check should be performed on + * the CDP resource sharing the hardware also. + * + * Refer to description of __rdtgroup_cbm_overlaps() for the details of the + * overlap test. + * + * Return: true if CBM overlap detected, false if there is no overlap + */ +bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d, + unsigned long cbm, int closid, bool exclusive) +{ + enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type); + struct rdt_resource *r = s->res; + + if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, s->conf_type, + exclusive)) + return true; + + if (!resctrl_arch_get_cdp_enabled(r->rid)) + return false; + return __rdtgroup_cbm_overlaps(r, d, cbm, closid, peer_type, exclusive); +} + +/** + * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive + * @rdtgrp: Resource group identified through its closid. + * + * An exclusive resource group implies that there should be no sharing of + * its allocated resources. At the time this group is considered to be + * exclusive this test can determine if its current schemata supports this + * setting by testing for overlap with all other resource groups. + * + * Return: true if resource group can be exclusive, false if there is overlap + * with allocations of other resource groups and thus this resource group + * cannot be exclusive. + */ +static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp) +{ + int closid = rdtgrp->closid; + struct resctrl_schema *s; + struct rdt_resource *r; + bool has_cache = false; + struct rdt_domain *d; + u32 ctrl; + + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + + list_for_each_entry(s, &resctrl_schema_all, list) { + r = s->res; + if (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA) + continue; + has_cache = true; + list_for_each_entry(d, &r->domains, list) { + ctrl = resctrl_arch_get_config(r, d, closid, + s->conf_type); + if (rdtgroup_cbm_overlaps(s, d, ctrl, closid, false)) { + rdt_last_cmd_puts("Schemata overlaps\n"); + return false; + } + } + } + + if (!has_cache) { + rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n"); + return false; + } + + return true; +} + +/* + * rdtgroup_mode_write - Modify the resource group's mode + */ +static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + struct rdtgroup *rdtgrp; + enum rdtgrp_mode mode; + int ret = 0; + + /* Valid input requires a trailing newline */ + if (nbytes == 0 || buf[nbytes - 1] != '\n') + return -EINVAL; + buf[nbytes - 1] = '\0'; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!rdtgrp) { + rdtgroup_kn_unlock(of->kn); + return -ENOENT; + } + + rdt_last_cmd_clear(); + + mode = rdtgrp->mode; + + if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) || + (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) || + (!strcmp(buf, "pseudo-locksetup") && + mode == RDT_MODE_PSEUDO_LOCKSETUP) || + (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED)) + goto out; + + if (mode == RDT_MODE_PSEUDO_LOCKED) { + rdt_last_cmd_puts("Cannot change pseudo-locked group\n"); + ret = -EINVAL; + goto out; + } + + if (!strcmp(buf, "shareable")) { + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + ret = rdtgroup_locksetup_exit(rdtgrp); + if (ret) + goto out; + } + rdtgrp->mode = RDT_MODE_SHAREABLE; + } else if (!strcmp(buf, "exclusive")) { + if (!rdtgroup_mode_test_exclusive(rdtgrp)) { + ret = -EINVAL; + goto out; + } + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + ret = rdtgroup_locksetup_exit(rdtgrp); + if (ret) + goto out; + } + rdtgrp->mode = RDT_MODE_EXCLUSIVE; + } else if (IS_ENABLED(CONFIG_RESCTRL_FS_PSEUDO_LOCK) && + !strcmp(buf, "pseudo-locksetup")) { + ret = rdtgroup_locksetup_enter(rdtgrp); + if (ret) + goto out; + rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP; + } else { + rdt_last_cmd_puts("Unknown or unsupported mode\n"); + ret = -EINVAL; + } + +out: + rdtgroup_kn_unlock(of->kn); + return ret ?: nbytes; +} + +/** + * rdtgroup_cbm_to_size - Translate CBM to size in bytes + * @r: RDT resource to which @d belongs. + * @d: RDT domain instance. + * @cbm: bitmask for which the size should be computed. + * + * The bitmask provided associated with the RDT domain instance @d will be + * translated into how many bytes it represents. The size in bytes is + * computed by first dividing the total cache size by the CBM length to + * determine how many bytes each bit in the bitmask represents. The result + * is multiplied with the number of bits set in the bitmask. + * + * @cbm is unsigned long, even if only 32 bits are used to make the + * bitmap functions work correctly. + */ +unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, + struct rdt_domain *d, unsigned long cbm) +{ + struct cpu_cacheinfo *ci; + unsigned int size = 0; + int num_b, i; + + num_b = bitmap_weight(&cbm, r->cache.cbm_len); + ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask)); + for (i = 0; i < ci->num_leaves; i++) { + if (ci->info_list[i].level == r->cache_level) { + size = ci->info_list[i].size / r->cache.cbm_len * num_b; + break; + } + } + + return size; +} + +/* + * rdtgroup_size_show - Display size in bytes of allocated regions + * + * The "size" file mirrors the layout of the "schemata" file, printing the + * size in bytes of each region instead of the capacity bitmask. + */ +static int rdtgroup_size_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct resctrl_schema *schema; + enum resctrl_conf_type type; + struct rdtgroup *rdtgrp; + struct rdt_resource *r; + struct rdt_domain *d; + unsigned int size; + int ret = 0; + u32 closid; + bool sep; + u32 ctrl; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!rdtgrp) { + rdtgroup_kn_unlock(of->kn); + return -ENOENT; + } + + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { + if (!rdtgrp->plr->d) { + rdt_last_cmd_clear(); + rdt_last_cmd_puts("Cache domain offline\n"); + ret = -ENODEV; + } else { + seq_printf(s, "%*s:", max_name_width, + rdtgrp->plr->s->name); + size = rdtgroup_cbm_to_size(rdtgrp->plr->s->res, + rdtgrp->plr->d, + rdtgrp->plr->cbm); + seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size); + } + goto out; + } + + closid = rdtgrp->closid; + + list_for_each_entry(schema, &resctrl_schema_all, list) { + r = schema->res; + type = schema->conf_type; + sep = false; + seq_printf(s, "%*s:", max_name_width, schema->name); + list_for_each_entry(d, &r->domains, list) { + if (sep) + seq_putc(s, ';'); + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + size = 0; + } else { + if (is_mba_sc(r)) + ctrl = d->mbps_val[closid]; + else + ctrl = resctrl_arch_get_config(r, d, + closid, + type); + if (r->rid == RDT_RESOURCE_MBA || + r->rid == RDT_RESOURCE_SMBA) + size = ctrl; + else + size = rdtgroup_cbm_to_size(r, d, ctrl); + } + seq_printf(s, "%d=%u", d->id, size); + sep = true; + } + seq_putc(s, '\n'); + } + +out: + rdtgroup_kn_unlock(of->kn); + + return ret; +} + +static void mondata_config_read(struct resctrl_mon_config_info *mon_info) +{ + smp_call_function_any(&mon_info->d->cpu_mask, + resctrl_arch_mon_event_config_read, mon_info, 1); +} + +static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid) +{ + struct resctrl_mon_config_info mon_info = {0}; + struct rdt_domain *dom; + bool sep = false; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + list_for_each_entry(dom, &r->domains, list) { + if (sep) + seq_puts(s, ";"); + + memset(&mon_info, 0, sizeof(struct resctrl_mon_config_info)); + mon_info.r = r; + mon_info.d = dom; + mon_info.evtid = evtid; + mondata_config_read(&mon_info); + + seq_printf(s, "%d=0x%02x", dom->id, mon_info.mon_config); + sep = true; + } + seq_puts(s, "\n"); + + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + + return 0; +} + +static int mbm_total_bytes_config_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct rdt_resource *r = of->kn->parent->priv; + + mbm_config_show(seq, r, QOS_L3_MBM_TOTAL_EVENT_ID); + + return 0; +} + +static int mbm_local_bytes_config_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct rdt_resource *r = of->kn->parent->priv; + + mbm_config_show(seq, r, QOS_L3_MBM_LOCAL_EVENT_ID); + + return 0; +} + +static int mbm_config_write_domain(struct rdt_resource *r, + struct rdt_domain *d, u32 evtid, u32 val) +{ + struct resctrl_mon_config_info mon_info = {0}; + + /* + * Read the current config value first. If both are the same then + * no need to write it again. + */ + mon_info.r = r; + mon_info.d = d; + mon_info.evtid = evtid; + mondata_config_read(&mon_info); + if (mon_info.mon_config == val) + return 0; + + mon_info.mon_config = val; + + /* + * Update MSR_IA32_EVT_CFG_BASE MSR on one of the CPUs in the + * domain. The MSRs offset from MSR MSR_IA32_EVT_CFG_BASE + * are scoped at the domain level. Writing any of these MSRs + * on one CPU is observed by all the CPUs in the domain. + */ + smp_call_function_any(&d->cpu_mask, resctrl_arch_mon_event_config_write, + &mon_info, 1); + if (mon_info.err) { + rdt_last_cmd_puts("Invalid event configuration\n"); + return mon_info.err; + } + + /* + * When an Event Configuration is changed, the bandwidth counters + * for all RMIDs and Events will be cleared by the hardware. The + * hardware also sets MSR_IA32_QM_CTR.Unavailable (bit 62) for + * every RMID on the next read to any event for every RMID. + * Subsequent reads will have MSR_IA32_QM_CTR.Unavailable (bit 62) + * cleared while it is tracked by the hardware. Clear the + * mbm_local and mbm_total counts for all the RMIDs. + */ + resctrl_arch_reset_rmid_all(r, d); + + return 0; +} + +static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid) +{ + char *dom_str = NULL, *id_str; + unsigned long dom_id, val; + struct rdt_domain *d; + int err; + + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + +next: + if (!tok || tok[0] == '\0') + return 0; + + /* Start processing the strings for each domain */ + dom_str = strim(strsep(&tok, ";")); + id_str = strsep(&dom_str, "="); + + if (!id_str || kstrtoul(id_str, 10, &dom_id)) { + rdt_last_cmd_puts("Missing '=' or non-numeric domain id\n"); + return -EINVAL; + } + + if (!dom_str || kstrtoul(dom_str, 16, &val)) { + rdt_last_cmd_puts("Non-numeric event configuration value\n"); + return -EINVAL; + } + + /* Value from user cannot be more than the supported set of events */ + if ((val & r->mbm_cfg_mask) != val) { + rdt_last_cmd_printf("Invalid event configuration: max valid mask is 0x%02x\n", + r->mbm_cfg_mask); + return -EINVAL; + } + + list_for_each_entry(d, &r->domains, list) { + if (d->id == dom_id) { + err = mbm_config_write_domain(r, d, evtid, val); + if (err) + return err; + goto next; + } + } + + return -EINVAL; +} + +static ssize_t mbm_total_bytes_config_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, + loff_t off) +{ + struct rdt_resource *r = of->kn->parent->priv; + int ret; + + /* Valid input requires a trailing newline */ + if (nbytes == 0 || buf[nbytes - 1] != '\n') + return -EINVAL; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + rdt_last_cmd_clear(); + + buf[nbytes - 1] = '\0'; + + ret = mon_config_write(r, buf, QOS_L3_MBM_TOTAL_EVENT_ID); + + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + + return ret ?: nbytes; +} + +static ssize_t mbm_local_bytes_config_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, + loff_t off) +{ + struct rdt_resource *r = of->kn->parent->priv; + int ret; + + /* Valid input requires a trailing newline */ + if (nbytes == 0 || buf[nbytes - 1] != '\n') + return -EINVAL; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + rdt_last_cmd_clear(); + + buf[nbytes - 1] = '\0'; + + ret = mon_config_write(r, buf, QOS_L3_MBM_LOCAL_EVENT_ID); + + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + + return ret ?: nbytes; +} + +/* rdtgroup information files for one cache resource. */ +static struct rftype res_common_files[] = { + { + .name = "last_cmd_status", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_last_cmd_status_show, + .fflags = RFTYPE_TOP_INFO, + }, + { + .name = "num_closids", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_num_closids_show, + .fflags = RFTYPE_CTRL_INFO, + }, + { + .name = "mon_features", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_mon_features_show, + .fflags = RFTYPE_MON_INFO, + }, + { + .name = "num_rmids", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_num_rmids_show, + .fflags = RFTYPE_MON_INFO, + }, + { + .name = "cbm_mask", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_default_ctrl_show, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, + }, + { + .name = "min_cbm_bits", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_min_cbm_bits_show, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, + }, + { + .name = "shareable_bits", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_shareable_bits_show, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, + }, + { + .name = "bit_usage", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_bit_usage_show, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, + }, + { + .name = "min_bandwidth", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_min_bw_show, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB, + }, + { + .name = "bandwidth_gran", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_bw_gran_show, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB, + }, + { + .name = "delay_linear", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_delay_linear_show, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB, + }, + /* + * Platform specific which (if any) capabilities are provided by + * thread_throttle_mode. Defer "fflags" initialization to platform + * discovery. + */ + { + .name = "thread_throttle_mode", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_thread_throttle_mode_show, + }, + { + .name = "max_threshold_occupancy", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .write = max_threshold_occ_write, + .seq_show = max_threshold_occ_show, + .fflags = RFTYPE_MON_INFO | RFTYPE_RES_CACHE, + }, + { + .name = "mbm_total_bytes_config", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = mbm_total_bytes_config_show, + .write = mbm_total_bytes_config_write, + }, + { + .name = "mbm_local_bytes_config", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = mbm_local_bytes_config_show, + .write = mbm_local_bytes_config_write, + }, + { + .name = "cpus", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .write = rdtgroup_cpus_write, + .seq_show = rdtgroup_cpus_show, + .fflags = RFTYPE_BASE, + }, + { + .name = "cpus_list", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .write = rdtgroup_cpus_write, + .seq_show = rdtgroup_cpus_show, + .flags = RFTYPE_FLAGS_CPUS_LIST, + .fflags = RFTYPE_BASE, + }, + { + .name = "tasks", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .write = rdtgroup_tasks_write, + .seq_show = rdtgroup_tasks_show, + .fflags = RFTYPE_BASE, + }, + { + .name = "mon_hw_id", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdtgroup_rmid_show, + .fflags = RFTYPE_MON_BASE | RFTYPE_DEBUG, + }, + { + .name = "schemata", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .write = rdtgroup_schemata_write, + .seq_show = rdtgroup_schemata_show, + .fflags = RFTYPE_CTRL_BASE, + }, + { + .name = "mode", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .write = rdtgroup_mode_write, + .seq_show = rdtgroup_mode_show, + .fflags = RFTYPE_CTRL_BASE, + }, + { + .name = "size", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdtgroup_size_show, + .fflags = RFTYPE_CTRL_BASE, + }, + { + .name = "sparse_masks", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_has_sparse_bitmasks_show, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, + }, + { + .name = "ctrl_hw_id", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdtgroup_closid_show, + .fflags = RFTYPE_CTRL_BASE | RFTYPE_DEBUG, + }, + +}; + +static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags) +{ + struct rftype *rfts, *rft; + int ret, len; + + rfts = res_common_files; + len = ARRAY_SIZE(res_common_files); + + lockdep_assert_held(&rdtgroup_mutex); + + if (resctrl_debug) + fflags |= RFTYPE_DEBUG; + + for (rft = rfts; rft < rfts + len; rft++) { + if (rft->fflags && ((fflags & rft->fflags) == rft->fflags)) { + ret = rdtgroup_add_file(kn, rft); + if (ret) + goto error; + } + } + + return 0; +error: + pr_warn("Failed to add %s, err=%d\n", rft->name, ret); + while (--rft >= rfts) { + if ((fflags & rft->fflags) == rft->fflags) + kernfs_remove_by_name(kn, rft->name); + } + return ret; +} + +static struct rftype *rdtgroup_get_rftype_by_name(const char *name) +{ + struct rftype *rfts, *rft; + int len; + + rfts = res_common_files; + len = ARRAY_SIZE(res_common_files); + + for (rft = rfts; rft < rfts + len; rft++) { + if (!strcmp(rft->name, name)) + return rft; + } + + return NULL; +} + +static void thread_throttle_mode_init(void) +{ + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); + struct rftype *rft; + + if (!r->alloc_capable || + r->membw.throttle_mode == THREAD_THROTTLE_UNDEFINED) + return; + + rft = rdtgroup_get_rftype_by_name("thread_throttle_mode"); + if (!rft) + return; + + rft->fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB; +} + +void mbm_config_rftype_init(const char *config) +{ + struct rftype *rft; + + rft = rdtgroup_get_rftype_by_name(config); + if (rft) + rft->fflags = RFTYPE_MON_INFO | RFTYPE_RES_CACHE; +} + +/** + * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file + * @r: The resource group with which the file is associated. + * @name: Name of the file + * + * The permissions of named resctrl file, directory, or link are modified + * to not allow read, write, or execute by any user. + * + * WARNING: This function is intended to communicate to the user that the + * resctrl file has been locked down - that it is not relevant to the + * particular state the system finds itself in. It should not be relied + * on to protect from user access because after the file's permissions + * are restricted the user can still change the permissions using chmod + * from the command line. + * + * Return: 0 on success, <0 on failure. + */ +int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name) +{ + struct iattr iattr = {.ia_valid = ATTR_MODE,}; + struct kernfs_node *kn; + int ret = 0; + + kn = kernfs_find_and_get_ns(r->kn, name, NULL); + if (!kn) + return -ENOENT; + + switch (kernfs_type(kn)) { + case KERNFS_DIR: + iattr.ia_mode = S_IFDIR; + break; + case KERNFS_FILE: + iattr.ia_mode = S_IFREG; + break; + case KERNFS_LINK: + iattr.ia_mode = S_IFLNK; + break; + } + + ret = kernfs_setattr(kn, &iattr); + kernfs_put(kn); + return ret; +} + +/** + * rdtgroup_kn_mode_restore - Restore user access to named resctrl file + * @r: The resource group with which the file is associated. + * @name: Name of the file + * @mask: Mask of permissions that should be restored + * + * Restore the permissions of the named file. If @name is a directory the + * permissions of its parent will be used. + * + * Return: 0 on success, <0 on failure. + */ +int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name, + umode_t mask) +{ + struct iattr iattr = {.ia_valid = ATTR_MODE,}; + struct kernfs_node *kn, *parent; + struct rftype *rfts, *rft; + int ret, len; + + rfts = res_common_files; + len = ARRAY_SIZE(res_common_files); + + for (rft = rfts; rft < rfts + len; rft++) { + if (!strcmp(rft->name, name)) + iattr.ia_mode = rft->mode & mask; + } + + kn = kernfs_find_and_get_ns(r->kn, name, NULL); + if (!kn) + return -ENOENT; + + switch (kernfs_type(kn)) { + case KERNFS_DIR: + parent = kernfs_get_parent(kn); + if (parent) { + iattr.ia_mode |= parent->mode; + kernfs_put(parent); + } + iattr.ia_mode |= S_IFDIR; + break; + case KERNFS_FILE: + iattr.ia_mode |= S_IFREG; + break; + case KERNFS_LINK: + iattr.ia_mode |= S_IFLNK; + break; + } + + ret = kernfs_setattr(kn, &iattr); + kernfs_put(kn); + return ret; +} + +static int rdtgroup_mkdir_info_resdir(void *priv, char *name, + unsigned long fflags) +{ + struct kernfs_node *kn_subdir; + int ret; + + kn_subdir = kernfs_create_dir(kn_info, name, + kn_info->mode, priv); + if (IS_ERR(kn_subdir)) + return PTR_ERR(kn_subdir); + + ret = rdtgroup_kn_set_ugid(kn_subdir); + if (ret) + return ret; + + ret = rdtgroup_add_files(kn_subdir, fflags); + if (!ret) + kernfs_activate(kn_subdir); + + return ret; +} + +static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn) +{ + enum resctrl_res_level i; + struct resctrl_schema *s; + struct rdt_resource *r; + unsigned long fflags; + char name[32]; + int ret; + + /* create the directory */ + kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL); + if (IS_ERR(kn_info)) + return PTR_ERR(kn_info); + + ret = rdtgroup_add_files(kn_info, RFTYPE_TOP_INFO); + if (ret) + goto out_destroy; + + /* loop over enabled controls, these are all alloc_capable */ + list_for_each_entry(s, &resctrl_schema_all, list) { + r = s->res; + fflags = r->fflags | RFTYPE_CTRL_INFO; + ret = rdtgroup_mkdir_info_resdir(s, s->name, fflags); + if (ret) + goto out_destroy; + } + + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + r = resctrl_arch_get_resource(i); + if (!r->mon_capable) + continue; + + fflags = r->fflags | RFTYPE_MON_INFO; + sprintf(name, "%s_MON", r->name); + ret = rdtgroup_mkdir_info_resdir(r, name, fflags); + if (ret) + goto out_destroy; + } + + ret = rdtgroup_kn_set_ugid(kn_info); + if (ret) + goto out_destroy; + + kernfs_activate(kn_info); + + return 0; + +out_destroy: + kernfs_remove(kn_info); + return ret; +} + +static int +mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp, + char *name, struct kernfs_node **dest_kn) +{ + struct kernfs_node *kn; + int ret; + + /* create the directory */ + kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); + if (IS_ERR(kn)) + return PTR_ERR(kn); + + if (dest_kn) + *dest_kn = kn; + + ret = rdtgroup_kn_set_ugid(kn); + if (ret) + goto out_destroy; + + kernfs_activate(kn); + + return 0; + +out_destroy: + kernfs_remove(kn); + return ret; +} + +static inline bool is_mba_linear(void) +{ + return resctrl_arch_get_resource(RDT_RESOURCE_MBA)->membw.delay_linear; +} + +static int mba_sc_domain_allocate(struct rdt_resource *r, struct rdt_domain *d) +{ + u32 num_closid = resctrl_arch_get_num_closid(r); + int cpu = cpumask_any(&d->cpu_mask); + int i; + + d->mbps_val = kcalloc_node(num_closid, sizeof(*d->mbps_val), + GFP_KERNEL, cpu_to_node(cpu)); + if (!d->mbps_val) + return -ENOMEM; + + for (i = 0; i < num_closid; i++) + d->mbps_val[i] = MBA_MAX_MBPS; + + return 0; +} + +static void mba_sc_domain_destroy(struct rdt_resource *r, + struct rdt_domain *d) +{ + kfree(d->mbps_val); + d->mbps_val = NULL; +} + +/* + * MBA software controller is supported only if + * MBM is supported and MBA is in linear scale. + */ +static bool supports_mba_mbps(void) +{ + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); + + return (resctrl_arch_is_mbm_local_enabled() && + r->alloc_capable && is_mba_linear()); +} + +/* + * Enable or disable the MBA software controller + * which helps user specify bandwidth in MBps. + */ +static int set_mba_sc(bool mba_sc) +{ + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); + u32 num_closid = resctrl_arch_get_num_closid(r); + struct rdt_domain *d; + int i; + + if (!supports_mba_mbps() || mba_sc == is_mba_sc(r)) + return -EINVAL; + + r->membw.mba_sc = mba_sc; + + list_for_each_entry(d, &r->domains, list) { + for (i = 0; i < num_closid; i++) + d->mbps_val[i] = MBA_MAX_MBPS; + } + + return 0; +} + +/* + * We don't allow rdtgroup directories to be created anywhere + * except the root directory. Thus when looking for the rdtgroup + * structure for a kernfs node we are either looking at a directory, + * in which case the rdtgroup structure is pointed at by the "priv" + * field, otherwise we have a file, and need only look to the parent + * to find the rdtgroup. + */ +static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn) +{ + if (kernfs_type(kn) == KERNFS_DIR) { + /* + * All the resource directories use "kn->priv" + * to point to the "struct rdtgroup" for the + * resource. "info" and its subdirectories don't + * have rdtgroup structures, so return NULL here. + */ + if (kn == kn_info || kn->parent == kn_info) + return NULL; + else + return kn->priv; + } else { + return kn->parent->priv; + } +} + +static void rdtgroup_kn_get(struct rdtgroup *rdtgrp, struct kernfs_node *kn) +{ + atomic_inc(&rdtgrp->waitcount); + kernfs_break_active_protection(kn); +} + +static void rdtgroup_kn_put(struct rdtgroup *rdtgrp, struct kernfs_node *kn) +{ + if (atomic_dec_and_test(&rdtgrp->waitcount) && + (rdtgrp->flags & RDT_DELETED)) { + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || + rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) + rdtgroup_pseudo_lock_remove(rdtgrp); + kernfs_unbreak_active_protection(kn); + rdtgroup_remove(rdtgrp); + } else { + kernfs_unbreak_active_protection(kn); + } +} + +struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn) +{ + struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); + + if (!rdtgrp) + return NULL; + + rdtgroup_kn_get(rdtgrp, kn); + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + /* Was this group deleted while we waited? */ + if (rdtgrp->flags & RDT_DELETED) + return NULL; + + return rdtgrp; +} + +void rdtgroup_kn_unlock(struct kernfs_node *kn) +{ + struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); + + if (!rdtgrp) + return; + + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + + rdtgroup_kn_put(rdtgrp, kn); +} + +static int mkdir_mondata_all(struct kernfs_node *parent_kn, + struct rdtgroup *prgrp, + struct kernfs_node **mon_data_kn); + +static void rdt_disable_ctx(void) +{ + resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false); + resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false); + set_mba_sc(false); + + resctrl_debug = false; +} + +static int rdt_enable_ctx(struct rdt_fs_context *ctx) +{ + int ret = 0; + + if (ctx->enable_cdpl2) { + ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, true); + if (ret) + goto out_done; + } + + if (ctx->enable_cdpl3) { + ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, true); + if (ret) + goto out_cdpl2; + } + + if (ctx->enable_mba_mbps) { + ret = set_mba_sc(true); + if (ret) + goto out_cdpl3; + } + + if (ctx->enable_debug) + resctrl_debug = true; + + return 0; + +out_cdpl3: + resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false); +out_cdpl2: + resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false); +out_done: + return ret; +} + +static int schemata_list_add(struct rdt_resource *r, enum resctrl_conf_type type) +{ + struct resctrl_schema *s; + const char *suffix = ""; + int ret, cl; + + s = kzalloc(sizeof(*s), GFP_KERNEL); + if (!s) + return -ENOMEM; + + s->res = r; + s->num_closid = resctrl_arch_get_num_closid(r); + if (resctrl_arch_get_cdp_enabled(r->rid)) + s->num_closid /= 2; + + s->conf_type = type; + switch (type) { + case CDP_CODE: + suffix = "CODE"; + break; + case CDP_DATA: + suffix = "DATA"; + break; + case CDP_NONE: + suffix = ""; + break; + } + + ret = snprintf(s->name, sizeof(s->name), "%s%s", r->name, suffix); + if (ret >= sizeof(s->name)) { + kfree(s); + return -EINVAL; + } + + cl = strlen(s->name); + + /* + * If CDP is supported by this resource, but not enabled, + * include the suffix. This ensures the tabular format of the + * schemata file does not change between mounts of the filesystem. + */ + if (r->cdp_capable && !resctrl_arch_get_cdp_enabled(r->rid)) + cl += 4; + + if (cl > max_name_width) + max_name_width = cl; + + /* + * Choose a width for the resource data based on the resource that has + * widest name and cbm. + */ + max_data_width = max(max_data_width, r->data_width); + + INIT_LIST_HEAD(&s->list); + list_add(&s->list, &resctrl_schema_all); + + return 0; +} + +static int schemata_list_create(void) +{ + enum resctrl_res_level i; + struct rdt_resource *r; + int ret = 0; + + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + r = resctrl_arch_get_resource(i); + if (!r->alloc_capable) + continue; + + if (resctrl_arch_get_cdp_enabled(r->rid)) { + ret = schemata_list_add(r, CDP_CODE); + if (ret) + break; + + ret = schemata_list_add(r, CDP_DATA); + } else { + ret = schemata_list_add(r, CDP_NONE); + } + + if (ret) + break; + } + + return ret; +} + +static void schemata_list_destroy(void) +{ + struct resctrl_schema *s, *tmp; + + list_for_each_entry_safe(s, tmp, &resctrl_schema_all, list) { + list_del(&s->list); + kfree(s); + } +} + +static int rdt_get_tree(struct fs_context *fc) +{ + struct rdt_resource *l3 = resctrl_arch_get_resource(RDT_RESOURCE_L3); + struct rdt_fs_context *ctx = rdt_fc2context(fc); + unsigned long flags = RFTYPE_CTRL_BASE; + struct rdt_domain *dom; + int ret; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + /* + * resctrl file system can only be mounted once. + */ + if (resctrl_mounted) { + ret = -EBUSY; + goto out; + } + + ret = rdtgroup_setup_root(ctx); + if (ret) + goto out; + + ret = rdt_enable_ctx(ctx); + if (ret) + goto out_root; + + ret = schemata_list_create(); + if (ret) { + schemata_list_destroy(); + goto out_ctx; + } + + closid_init(); + + if (resctrl_arch_mon_capable()) + flags |= RFTYPE_MON; + + ret = rdtgroup_add_files(rdtgroup_default.kn, flags); + if (ret) + goto out_schemata_free; + + kernfs_activate(rdtgroup_default.kn); + + ret = rdtgroup_create_info_dir(rdtgroup_default.kn); + if (ret < 0) + goto out_schemata_free; + + if (resctrl_arch_mon_capable()) { + ret = mongroup_create_dir(rdtgroup_default.kn, + &rdtgroup_default, "mon_groups", + &kn_mongrp); + if (ret < 0) + goto out_info; + + ret = mkdir_mondata_all(rdtgroup_default.kn, + &rdtgroup_default, &kn_mondata); + if (ret < 0) + goto out_mongrp; + rdtgroup_default.mon.mon_data_kn = kn_mondata; + } + + ret = rdt_pseudo_lock_init(); + if (ret) + goto out_mondata; + + ret = kernfs_get_tree(fc); + if (ret < 0) + goto out_psl; + + if (resctrl_arch_alloc_capable()) + resctrl_arch_enable_alloc(); + if (resctrl_arch_mon_capable()) + resctrl_arch_enable_mon(); + + if (resctrl_arch_alloc_capable() || resctrl_arch_mon_capable()) + resctrl_mounted = true; + + if (resctrl_is_mbm_enabled()) { + list_for_each_entry(dom, &l3->domains, list) + mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL, + RESCTRL_PICK_ANY_CPU); + } + + goto out; + +out_psl: + rdt_pseudo_lock_release(); +out_mondata: + if (resctrl_arch_mon_capable()) + kernfs_remove(kn_mondata); +out_mongrp: + if (resctrl_arch_mon_capable()) + kernfs_remove(kn_mongrp); +out_info: + kernfs_remove(kn_info); +out_schemata_free: + schemata_list_destroy(); +out_ctx: + rdt_disable_ctx(); +out_root: + rdtgroup_destroy_root(); +out: + rdt_last_cmd_clear(); + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + return ret; +} + +enum rdt_param { + Opt_cdp, + Opt_cdpl2, + Opt_mba_mbps, + Opt_debug, + nr__rdt_params +}; + +static const struct fs_parameter_spec rdt_fs_parameters[] = { + fsparam_flag("cdp", Opt_cdp), + fsparam_flag("cdpl2", Opt_cdpl2), + fsparam_flag("mba_MBps", Opt_mba_mbps), + fsparam_flag("debug", Opt_debug), + {} +}; + +static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param) +{ + struct rdt_fs_context *ctx = rdt_fc2context(fc); + struct fs_parse_result result; + int opt; + + opt = fs_parse(fc, rdt_fs_parameters, param, &result); + if (opt < 0) + return opt; + + switch (opt) { + case Opt_cdp: + ctx->enable_cdpl3 = true; + return 0; + case Opt_cdpl2: + ctx->enable_cdpl2 = true; + return 0; + case Opt_mba_mbps: + if (!supports_mba_mbps()) + return -EINVAL; + ctx->enable_mba_mbps = true; + return 0; + case Opt_debug: + ctx->enable_debug = true; + return 0; + } + + return -EINVAL; +} + +static void rdt_fs_context_free(struct fs_context *fc) +{ + struct rdt_fs_context *ctx = rdt_fc2context(fc); + + kernfs_free_fs_context(fc); + kfree(ctx); +} + +static const struct fs_context_operations rdt_fs_context_ops = { + .free = rdt_fs_context_free, + .parse_param = rdt_parse_param, + .get_tree = rdt_get_tree, +}; + +static int rdt_init_fs_context(struct fs_context *fc) +{ + struct rdt_fs_context *ctx; + + ctx = kzalloc(sizeof(struct rdt_fs_context), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->kfc.magic = RDTGROUP_SUPER_MAGIC; + fc->fs_private = &ctx->kfc; + fc->ops = &rdt_fs_context_ops; + put_user_ns(fc->user_ns); + fc->user_ns = get_user_ns(&init_user_ns); + fc->global = true; + return 0; +} + +/* + * Move tasks from one to the other group. If @from is NULL, then all tasks + * in the systems are moved unconditionally (used for teardown). + * + * If @mask is not NULL the cpus on which moved tasks are running are set + * in that mask so the update smp function call is restricted to affected + * cpus. + */ +static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to, + struct cpumask *mask) +{ + struct task_struct *p, *t; + + read_lock(&tasklist_lock); + for_each_process_thread(p, t) { + if (!from || is_closid_match(t, from) || + is_rmid_match(t, from)) { + resctrl_arch_set_closid_rmid(t, to->closid, + to->mon.rmid); + + /* + * Order the closid/rmid stores above before the loads + * in task_curr(). This pairs with the full barrier + * between the rq->curr update and + * resctrl_arch_sched_in() during context switch. + */ + smp_mb(); + + /* + * If the task is on a CPU, set the CPU in the mask. + * The detection is inaccurate as tasks might move or + * schedule before the smp function call takes place. + * In such a case the function call is pointless, but + * there is no other side effect. + */ + if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t)) + cpumask_set_cpu(task_cpu(t), mask); + } + } + read_unlock(&tasklist_lock); +} + +static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp) +{ + struct rdtgroup *sentry, *stmp; + struct list_head *head; + + head = &rdtgrp->mon.crdtgrp_list; + list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) { + free_rmid(sentry->closid, sentry->mon.rmid); + list_del(&sentry->mon.crdtgrp_list); + + if (atomic_read(&sentry->waitcount) != 0) + sentry->flags = RDT_DELETED; + else + rdtgroup_remove(sentry); + } +} + +/* + * Forcibly remove all of subdirectories under root. + */ +static void rmdir_all_sub(void) +{ + struct rdtgroup *rdtgrp, *tmp; + + /* Move all tasks to the default resource group */ + rdt_move_group_tasks(NULL, &rdtgroup_default, NULL); + + list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) { + /* Free any child rmids */ + free_all_child_rdtgrp(rdtgrp); + + /* Remove each rdtgroup other than root */ + if (rdtgrp == &rdtgroup_default) + continue; + + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || + rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) + rdtgroup_pseudo_lock_remove(rdtgrp); + + /* + * Give any CPUs back to the default group. We cannot copy + * cpu_online_mask because a CPU might have executed the + * offline callback already, but is still marked online. + */ + cpumask_or(&rdtgroup_default.cpu_mask, + &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); + + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); + + kernfs_remove(rdtgrp->kn); + list_del(&rdtgrp->rdtgroup_list); + + if (atomic_read(&rdtgrp->waitcount) != 0) + rdtgrp->flags = RDT_DELETED; + else + rdtgroup_remove(rdtgrp); + } + /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */ + update_closid_rmid(cpu_online_mask, &rdtgroup_default); + + kernfs_remove(kn_info); + kernfs_remove(kn_mongrp); + kernfs_remove(kn_mondata); +} + +static void rdt_kill_sb(struct super_block *sb) +{ + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + rdt_disable_ctx(); + + /* Put everything back to default values. */ + resctrl_arch_reset_resources(); + + rmdir_all_sub(); + rdt_pseudo_lock_release(); + rdtgroup_default.mode = RDT_MODE_SHAREABLE; + schemata_list_destroy(); + rdtgroup_destroy_root(); + if (resctrl_arch_alloc_capable()) + resctrl_arch_disable_alloc(); + if (resctrl_arch_mon_capable()) + resctrl_arch_disable_mon(); + resctrl_mounted = false; + kernfs_kill_sb(sb); + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); +} + +static struct file_system_type rdt_fs_type = { + .name = "resctrl", + .init_fs_context = rdt_init_fs_context, + .parameters = rdt_fs_parameters, + .kill_sb = rdt_kill_sb, +}; + +static int mon_addfile(struct kernfs_node *parent_kn, const char *name, + void *priv) +{ + struct kernfs_node *kn; + int ret = 0; + + kn = __kernfs_create_file(parent_kn, name, 0444, + GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0, + &kf_mondata_ops, priv, NULL, NULL); + if (IS_ERR(kn)) + return PTR_ERR(kn); + + ret = rdtgroup_kn_set_ugid(kn); + if (ret) { + kernfs_remove(kn); + return ret; + } + + return ret; +} + +/* + * Remove all subdirectories of mon_data of ctrl_mon groups + * and monitor groups with given domain id. + */ +static void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, + unsigned int dom_id) +{ + struct rdtgroup *prgrp, *crgrp; + char name[32]; + + list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { + sprintf(name, "mon_%s_%02d", r->name, dom_id); + kernfs_remove_by_name(prgrp->mon.mon_data_kn, name); + + list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list) + kernfs_remove_by_name(crgrp->mon.mon_data_kn, name); + } +} + +static int mkdir_mondata_subdir(struct kernfs_node *parent_kn, + struct rdt_domain *d, + struct rdt_resource *r, struct rdtgroup *prgrp) +{ + union mon_data_bits priv; + struct kernfs_node *kn; + struct mon_evt *mevt; + struct rmid_read rr; + char name[32]; + int ret; + + sprintf(name, "mon_%s_%02d", r->name, d->id); + /* create the directory */ + kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); + if (IS_ERR(kn)) + return PTR_ERR(kn); + + ret = rdtgroup_kn_set_ugid(kn); + if (ret) + goto out_destroy; + + if (WARN_ON(list_empty(&r->evt_list))) { + ret = -EPERM; + goto out_destroy; + } + + priv.u.rid = r->rid; + priv.u.domid = d->id; + list_for_each_entry(mevt, &r->evt_list, list) { + priv.u.evtid = mevt->evtid; + ret = mon_addfile(kn, mevt->name, priv.priv); + if (ret) + goto out_destroy; + + if (resctrl_is_mbm_event(mevt->evtid)) + mon_event_read(&rr, r, d, prgrp, mevt->evtid, true); + } + kernfs_activate(kn); + return 0; + +out_destroy: + kernfs_remove(kn); + return ret; +} + +/* + * Add all subdirectories of mon_data for "ctrl_mon" groups + * and "monitor" groups with given domain id. + */ +static void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, + struct rdt_domain *d) +{ + struct kernfs_node *parent_kn; + struct rdtgroup *prgrp, *crgrp; + struct list_head *head; + + list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { + parent_kn = prgrp->mon.mon_data_kn; + mkdir_mondata_subdir(parent_kn, d, r, prgrp); + + head = &prgrp->mon.crdtgrp_list; + list_for_each_entry(crgrp, head, mon.crdtgrp_list) { + parent_kn = crgrp->mon.mon_data_kn; + mkdir_mondata_subdir(parent_kn, d, r, crgrp); + } + } +} + +static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn, + struct rdt_resource *r, + struct rdtgroup *prgrp) +{ + struct rdt_domain *dom; + int ret; + + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + + list_for_each_entry(dom, &r->domains, list) { + ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp); + if (ret) + return ret; + } + + return 0; +} + +/* + * This creates a directory mon_data which contains the monitored data. + * + * mon_data has one directory for each domain which are named + * in the format mon__. For ex: A mon_data + * with L3 domain looks as below: + * ./mon_data: + * mon_L3_00 + * mon_L3_01 + * mon_L3_02 + * ... + * + * Each domain directory has one file per event: + * ./mon_L3_00/: + * llc_occupancy + * + */ +static int mkdir_mondata_all(struct kernfs_node *parent_kn, + struct rdtgroup *prgrp, + struct kernfs_node **dest_kn) +{ + enum resctrl_res_level i; + struct rdt_resource *r; + struct kernfs_node *kn; + int ret; + + /* + * Create the mon_data directory first. + */ + ret = mongroup_create_dir(parent_kn, prgrp, "mon_data", &kn); + if (ret) + return ret; + + if (dest_kn) + *dest_kn = kn; + + /* + * Create the subdirectories for each domain. Note that all events + * in a domain like L3 are grouped into a resource whose domain is L3 + */ + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + r = resctrl_arch_get_resource(i); + if (!r->mon_capable) + continue; + + ret = mkdir_mondata_subdir_alldom(kn, r, prgrp); + if (ret) + goto out_destroy; + } + + return 0; + +out_destroy: + kernfs_remove(kn); + return ret; +} + +/** + * cbm_ensure_valid - Enforce validity on provided CBM + * @_val: Candidate CBM + * @r: RDT resource to which the CBM belongs + * + * The provided CBM represents all cache portions available for use. This + * may be represented by a bitmap that does not consist of contiguous ones + * and thus be an invalid CBM. + * Here the provided CBM is forced to be a valid CBM by only considering + * the first set of contiguous bits as valid and clearing all bits. + * The intention here is to provide a valid default CBM with which a new + * resource group is initialized. The user can follow this with a + * modification to the CBM if the default does not satisfy the + * requirements. + */ +static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r) +{ + unsigned int cbm_len = r->cache.cbm_len; + unsigned long first_bit, zero_bit; + unsigned long val = _val; + + if (!val) + return 0; + + first_bit = find_first_bit(&val, cbm_len); + zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); + + /* Clear any remaining bits to ensure contiguous region */ + bitmap_clear(&val, zero_bit, cbm_len - zero_bit); + return (u32)val; +} + +/* + * Initialize cache resources per RDT domain + * + * Set the RDT domain up to start off with all usable allocations. That is, + * all shareable and unused bits. All-zero CBM is invalid. + */ +static int __init_one_rdt_domain(struct rdt_domain *d, struct resctrl_schema *s, + u32 closid) +{ + enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type); + enum resctrl_conf_type t = s->conf_type; + struct resctrl_staged_config *cfg; + struct rdt_resource *r = s->res; + u32 used_b = 0, unused_b = 0; + unsigned long tmp_cbm; + enum rdtgrp_mode mode; + u32 peer_ctl, ctrl_val; + int i; + + cfg = &d->staged_config[t]; + cfg->have_new_ctrl = false; + cfg->new_ctrl = r->cache.shareable_bits; + used_b = r->cache.shareable_bits; + for (i = 0; i < closids_supported(); i++) { + if (closid_allocated(i) && i != closid) { + mode = rdtgroup_mode_by_closid(i); + if (mode == RDT_MODE_PSEUDO_LOCKSETUP) + /* + * ctrl values for locksetup aren't relevant + * until the schemata is written, and the mode + * becomes RDT_MODE_PSEUDO_LOCKED. + */ + continue; + /* + * If CDP is active include peer domain's + * usage to ensure there is no overlap + * with an exclusive group. + */ + if (resctrl_arch_get_cdp_enabled(r->rid)) + peer_ctl = resctrl_arch_get_config(r, d, i, + peer_type); + else + peer_ctl = 0; + ctrl_val = resctrl_arch_get_config(r, d, i, + s->conf_type); + used_b |= ctrl_val | peer_ctl; + if (mode == RDT_MODE_SHAREABLE) + cfg->new_ctrl |= ctrl_val | peer_ctl; + } + } + if (d->plr && d->plr->cbm > 0) + used_b |= d->plr->cbm; + unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1); + unused_b &= BIT_MASK(r->cache.cbm_len) - 1; + cfg->new_ctrl |= unused_b; + /* + * Force the initial CBM to be valid, user can + * modify the CBM based on system availability. + */ + cfg->new_ctrl = cbm_ensure_valid(cfg->new_ctrl, r); + /* + * Assign the u32 CBM to an unsigned long to ensure that + * bitmap_weight() does not access out-of-bound memory. + */ + tmp_cbm = cfg->new_ctrl; + if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) { + rdt_last_cmd_printf("No space on %s:%d\n", s->name, d->id); + return -ENOSPC; + } + cfg->have_new_ctrl = true; + + return 0; +} + +/* + * Initialize cache resources with default values. + * + * A new RDT group is being created on an allocation capable (CAT) + * supporting system. Set this group up to start off with all usable + * allocations. + * + * If there are no more shareable bits available on any domain then + * the entire allocation will fail. + */ +static int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid) +{ + struct rdt_domain *d; + int ret; + + list_for_each_entry(d, &s->res->domains, list) { + ret = __init_one_rdt_domain(d, s, closid); + if (ret < 0) + return ret; + } + + return 0; +} + +/* Initialize MBA resource with default values. */ +static void rdtgroup_init_mba(struct rdt_resource *r, u32 closid) +{ + struct resctrl_staged_config *cfg; + struct rdt_domain *d; + + list_for_each_entry(d, &r->domains, list) { + if (is_mba_sc(r)) { + d->mbps_val[closid] = MBA_MAX_MBPS; + continue; + } + + cfg = &d->staged_config[CDP_NONE]; + cfg->new_ctrl = r->default_ctrl; + cfg->have_new_ctrl = true; + } +} + +/* Initialize the RDT group's allocations. */ +static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) +{ + struct resctrl_schema *s; + struct rdt_resource *r; + int ret = 0; + + rdt_staged_configs_clear(); + + list_for_each_entry(s, &resctrl_schema_all, list) { + r = s->res; + if (r->rid == RDT_RESOURCE_MBA || + r->rid == RDT_RESOURCE_SMBA) { + rdtgroup_init_mba(r, rdtgrp->closid); + if (is_mba_sc(r)) + continue; + } else { + ret = rdtgroup_init_cat(s, rdtgrp->closid); + if (ret < 0) + goto out; + } + + ret = resctrl_arch_update_domains(r, rdtgrp->closid); + if (ret < 0) { + rdt_last_cmd_puts("Failed to initialize allocations\n"); + goto out; + } + + } + + rdtgrp->mode = RDT_MODE_SHAREABLE; + +out: + rdt_staged_configs_clear(); + return ret; +} + +static int mkdir_rdt_prepare_rmid_alloc(struct rdtgroup *rdtgrp) +{ + int ret; + + if (!resctrl_arch_mon_capable()) + return 0; + + ret = alloc_rmid(rdtgrp->closid); + if (ret < 0) { + rdt_last_cmd_puts("Out of RMIDs\n"); + return ret; + } + rdtgrp->mon.rmid = ret; + + ret = mkdir_mondata_all(rdtgrp->kn, rdtgrp, &rdtgrp->mon.mon_data_kn); + if (ret) { + rdt_last_cmd_puts("kernfs subdir error\n"); + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); + return ret; + } + + return 0; +} + +static void mkdir_rdt_prepare_rmid_free(struct rdtgroup *rgrp) +{ + if (resctrl_arch_mon_capable()) + free_rmid(rgrp->closid, rgrp->mon.rmid); +} + +static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, + const char *name, umode_t mode, + enum rdt_group_type rtype, struct rdtgroup **r) +{ + struct rdtgroup *prdtgrp, *rdtgrp; + unsigned long files = 0; + struct kernfs_node *kn; + int ret; + + prdtgrp = rdtgroup_kn_lock_live(parent_kn); + if (!prdtgrp) { + ret = -ENODEV; + goto out_unlock; + } + + if (rtype == RDTMON_GROUP && + (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || + prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) { + ret = -EINVAL; + rdt_last_cmd_puts("Pseudo-locking in progress\n"); + goto out_unlock; + } + + /* allocate the rdtgroup. */ + rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL); + if (!rdtgrp) { + ret = -ENOSPC; + rdt_last_cmd_puts("Kernel out of memory\n"); + goto out_unlock; + } + *r = rdtgrp; + rdtgrp->mon.parent = prdtgrp; + rdtgrp->type = rtype; + INIT_LIST_HEAD(&rdtgrp->mon.crdtgrp_list); + + /* kernfs creates the directory for rdtgrp */ + kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp); + if (IS_ERR(kn)) { + ret = PTR_ERR(kn); + rdt_last_cmd_puts("kernfs create error\n"); + goto out_free_rgrp; + } + rdtgrp->kn = kn; + + /* + * kernfs_remove() will drop the reference count on "kn" which + * will free it. But we still need it to stick around for the + * rdtgroup_kn_unlock(kn) call. Take one extra reference here, + * which will be dropped by kernfs_put() in rdtgroup_remove(). + */ + kernfs_get(kn); + + ret = rdtgroup_kn_set_ugid(kn); + if (ret) { + rdt_last_cmd_puts("kernfs perm error\n"); + goto out_destroy; + } + + if (rtype == RDTCTRL_GROUP) { + files = RFTYPE_BASE | RFTYPE_CTRL; + if (resctrl_arch_mon_capable()) + files |= RFTYPE_MON; + } else { + files = RFTYPE_BASE | RFTYPE_MON; + } + + ret = rdtgroup_add_files(kn, files); + if (ret) { + rdt_last_cmd_puts("kernfs fill error\n"); + goto out_destroy; + } + + /* + * The caller unlocks the parent_kn upon success. + */ + return 0; + +out_destroy: + kernfs_put(rdtgrp->kn); + kernfs_remove(rdtgrp->kn); +out_free_rgrp: + kfree(rdtgrp); +out_unlock: + rdtgroup_kn_unlock(parent_kn); + return ret; +} + +static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp) +{ + kernfs_remove(rgrp->kn); + rdtgroup_remove(rgrp); +} + +/* + * Create a monitor group under "mon_groups" directory of a control + * and monitor group(ctrl_mon). This is a resource group + * to monitor a subset of tasks and cpus in its parent ctrl_mon group. + */ +static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn, + const char *name, umode_t mode) +{ + struct rdtgroup *rdtgrp, *prgrp; + int ret; + + ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTMON_GROUP, &rdtgrp); + if (ret) + return ret; + + prgrp = rdtgrp->mon.parent; + rdtgrp->closid = prgrp->closid; + + ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp); + if (ret) { + mkdir_rdt_prepare_clean(rdtgrp); + goto out_unlock; + } + + kernfs_activate(rdtgrp->kn); + + /* + * Add the rdtgrp to the list of rdtgrps the parent + * ctrl_mon group has to track. + */ + list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list); + +out_unlock: + rdtgroup_kn_unlock(parent_kn); + return ret; +} + +/* + * These are rdtgroups created under the root directory. Can be used + * to allocate and monitor resources. + */ +static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, + const char *name, umode_t mode) +{ + struct rdtgroup *rdtgrp; + struct kernfs_node *kn; + u32 closid; + int ret; + + ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTCTRL_GROUP, &rdtgrp); + if (ret) + return ret; + + kn = rdtgrp->kn; + ret = closid_alloc(); + if (ret < 0) { + rdt_last_cmd_puts("Out of CLOSIDs\n"); + goto out_common_fail; + } + closid = ret; + ret = 0; + + rdtgrp->closid = closid; + + ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp); + if (ret) + goto out_closid_free; + + kernfs_activate(rdtgrp->kn); + + ret = rdtgroup_init_alloc(rdtgrp); + if (ret < 0) + goto out_rmid_free; + + list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups); + + if (resctrl_arch_mon_capable()) { + /* + * Create an empty mon_groups directory to hold the subset + * of tasks and cpus to monitor. + */ + ret = mongroup_create_dir(kn, rdtgrp, "mon_groups", NULL); + if (ret) { + rdt_last_cmd_puts("kernfs subdir error\n"); + goto out_del_list; + } + } + + goto out_unlock; + +out_del_list: + list_del(&rdtgrp->rdtgroup_list); +out_rmid_free: + mkdir_rdt_prepare_rmid_free(rdtgrp); +out_closid_free: + closid_free(closid); +out_common_fail: + mkdir_rdt_prepare_clean(rdtgrp); +out_unlock: + rdtgroup_kn_unlock(parent_kn); + return ret; +} + +/* + * We allow creating mon groups only with in a directory called "mon_groups" + * which is present in every ctrl_mon group. Check if this is a valid + * "mon_groups" directory. + * + * 1. The directory should be named "mon_groups". + * 2. The mon group itself should "not" be named "mon_groups". + * This makes sure "mon_groups" directory always has a ctrl_mon group + * as parent. + */ +static bool is_mon_groups(struct kernfs_node *kn, const char *name) +{ + return (!strcmp(kn->name, "mon_groups") && + strcmp(name, "mon_groups")); +} + +static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name, + umode_t mode) +{ + /* Do not accept '\n' to avoid unparsable situation. */ + if (strchr(name, '\n')) + return -EINVAL; + + /* + * If the parent directory is the root directory and RDT + * allocation is supported, add a control and monitoring + * subdirectory + */ + if (resctrl_arch_alloc_capable() && parent_kn == rdtgroup_default.kn) + return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode); + + /* + * If RDT monitoring is supported and the parent directory is a valid + * "mon_groups" directory, add a monitoring subdirectory. + */ + if (resctrl_arch_mon_capable() && is_mon_groups(parent_kn, name)) + return rdtgroup_mkdir_mon(parent_kn, name, mode); + + return -EPERM; +} + +static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) +{ + struct rdtgroup *prdtgrp = rdtgrp->mon.parent; + u32 closid, rmid; + int cpu; + + /* Give any tasks back to the parent group */ + rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask); + + /* Update per cpu rmid of the moved CPUs first */ + closid = rdtgrp->closid; + rmid = prdtgrp->mon.rmid; + for_each_cpu(cpu, &rdtgrp->cpu_mask) + resctrl_arch_set_cpu_default_closid_rmid(cpu, closid, rmid); + + /* + * Update the MSR on moved CPUs and CPUs which have moved + * task running on them. + */ + cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); + update_closid_rmid(tmpmask, NULL); + + rdtgrp->flags = RDT_DELETED; + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); + + /* + * Remove the rdtgrp from the parent ctrl_mon group's list + */ + WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list)); + list_del(&rdtgrp->mon.crdtgrp_list); + + kernfs_remove(rdtgrp->kn); + + return 0; +} + +static int rdtgroup_ctrl_remove(struct rdtgroup *rdtgrp) +{ + rdtgrp->flags = RDT_DELETED; + list_del(&rdtgrp->rdtgroup_list); + + kernfs_remove(rdtgrp->kn); + return 0; +} + +static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) +{ + u32 closid, rmid; + int cpu; + + /* Give any tasks back to the default group */ + rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask); + + /* Give any CPUs back to the default group */ + cpumask_or(&rdtgroup_default.cpu_mask, + &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); + + /* Update per cpu closid and rmid of the moved CPUs first */ + closid = rdtgroup_default.closid; + rmid = rdtgroup_default.mon.rmid; + for_each_cpu(cpu, &rdtgrp->cpu_mask) + resctrl_arch_set_cpu_default_closid_rmid(cpu, closid, rmid); + + /* + * Update the MSR on moved CPUs and CPUs which have moved + * task running on them. + */ + cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); + update_closid_rmid(tmpmask, NULL); + + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); + closid_free(rdtgrp->closid); + + rdtgroup_ctrl_remove(rdtgrp); + + /* + * Free all the child monitor group rmids. + */ + free_all_child_rdtgrp(rdtgrp); + + return 0; +} + +static int rdtgroup_rmdir(struct kernfs_node *kn) +{ + struct kernfs_node *parent_kn = kn->parent; + struct rdtgroup *rdtgrp; + cpumask_var_t tmpmask; + int ret = 0; + + if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) + return -ENOMEM; + + rdtgrp = rdtgroup_kn_lock_live(kn); + if (!rdtgrp) { + ret = -EPERM; + goto out; + } + + /* + * If the rdtgroup is a ctrl_mon group and parent directory + * is the root directory, remove the ctrl_mon group. + * + * If the rdtgroup is a mon group and parent directory + * is a valid "mon_groups" directory, remove the mon group. + */ + if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn && + rdtgrp != &rdtgroup_default) { + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || + rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { + ret = rdtgroup_ctrl_remove(rdtgrp); + } else { + ret = rdtgroup_rmdir_ctrl(rdtgrp, tmpmask); + } + } else if (rdtgrp->type == RDTMON_GROUP && + is_mon_groups(parent_kn, kn->name)) { + ret = rdtgroup_rmdir_mon(rdtgrp, tmpmask); + } else { + ret = -EPERM; + } + +out: + rdtgroup_kn_unlock(kn); + free_cpumask_var(tmpmask); + return ret; +} + +/** + * mongrp_reparent() - replace parent CTRL_MON group of a MON group + * @rdtgrp: the MON group whose parent should be replaced + * @new_prdtgrp: replacement parent CTRL_MON group for @rdtgrp + * @cpus: cpumask provided by the caller for use during this call + * + * Replaces the parent CTRL_MON group for a MON group, resulting in all member + * tasks' CLOSID immediately changing to that of the new parent group. + * Monitoring data for the group is unaffected by this operation. + */ +static void mongrp_reparent(struct rdtgroup *rdtgrp, + struct rdtgroup *new_prdtgrp, + cpumask_var_t cpus) +{ + struct rdtgroup *prdtgrp = rdtgrp->mon.parent; + + WARN_ON(rdtgrp->type != RDTMON_GROUP); + WARN_ON(new_prdtgrp->type != RDTCTRL_GROUP); + + /* Nothing to do when simply renaming a MON group. */ + if (prdtgrp == new_prdtgrp) + return; + + WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list)); + list_move_tail(&rdtgrp->mon.crdtgrp_list, + &new_prdtgrp->mon.crdtgrp_list); + + rdtgrp->mon.parent = new_prdtgrp; + rdtgrp->closid = new_prdtgrp->closid; + + /* Propagate updated closid to all tasks in this group. */ + rdt_move_group_tasks(rdtgrp, rdtgrp, cpus); + + update_closid_rmid(cpus, NULL); +} + +static int rdtgroup_rename(struct kernfs_node *kn, + struct kernfs_node *new_parent, const char *new_name) +{ + struct rdtgroup *new_prdtgrp; + struct rdtgroup *rdtgrp; + cpumask_var_t tmpmask; + int ret; + + rdtgrp = kernfs_to_rdtgroup(kn); + new_prdtgrp = kernfs_to_rdtgroup(new_parent); + if (!rdtgrp || !new_prdtgrp) + return -ENOENT; + + /* Release both kernfs active_refs before obtaining rdtgroup mutex. */ + rdtgroup_kn_get(rdtgrp, kn); + rdtgroup_kn_get(new_prdtgrp, new_parent); + + mutex_lock(&rdtgroup_mutex); + + rdt_last_cmd_clear(); + + /* + * Don't allow kernfs_to_rdtgroup() to return a parent rdtgroup if + * either kernfs_node is a file. + */ + if (kernfs_type(kn) != KERNFS_DIR || + kernfs_type(new_parent) != KERNFS_DIR) { + rdt_last_cmd_puts("Source and destination must be directories"); + ret = -EPERM; + goto out; + } + + if ((rdtgrp->flags & RDT_DELETED) || (new_prdtgrp->flags & RDT_DELETED)) { + ret = -ENOENT; + goto out; + } + + if (rdtgrp->type != RDTMON_GROUP || !kn->parent || + !is_mon_groups(kn->parent, kn->name)) { + rdt_last_cmd_puts("Source must be a MON group\n"); + ret = -EPERM; + goto out; + } + + if (!is_mon_groups(new_parent, new_name)) { + rdt_last_cmd_puts("Destination must be a mon_groups subdirectory\n"); + ret = -EPERM; + goto out; + } + + /* + * If the MON group is monitoring CPUs, the CPUs must be assigned to the + * current parent CTRL_MON group and therefore cannot be assigned to + * the new parent, making the move illegal. + */ + if (!cpumask_empty(&rdtgrp->cpu_mask) && + rdtgrp->mon.parent != new_prdtgrp) { + rdt_last_cmd_puts("Cannot move a MON group that monitors CPUs\n"); + ret = -EPERM; + goto out; + } + + /* + * Allocate the cpumask for use in mongrp_reparent() to avoid the + * possibility of failing to allocate it after kernfs_rename() has + * succeeded. + */ + if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) { + ret = -ENOMEM; + goto out; + } + + /* + * Perform all input validation and allocations needed to ensure + * mongrp_reparent() will succeed before calling kernfs_rename(), + * otherwise it would be necessary to revert this call if + * mongrp_reparent() failed. + */ + ret = kernfs_rename(kn, new_parent, new_name); + if (!ret) + mongrp_reparent(rdtgrp, new_prdtgrp, tmpmask); + + free_cpumask_var(tmpmask); + +out: + mutex_unlock(&rdtgroup_mutex); + rdtgroup_kn_put(rdtgrp, kn); + rdtgroup_kn_put(new_prdtgrp, new_parent); + return ret; +} + +static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf) +{ + if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3)) + seq_puts(seq, ",cdp"); + + if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) + seq_puts(seq, ",cdpl2"); + + if (is_mba_sc(resctrl_arch_get_resource(RDT_RESOURCE_MBA))) + seq_puts(seq, ",mba_MBps"); + + if (resctrl_debug) + seq_puts(seq, ",debug"); + + return 0; +} + +static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = { + .mkdir = rdtgroup_mkdir, + .rmdir = rdtgroup_rmdir, + .rename = rdtgroup_rename, + .show_options = rdtgroup_show_options, +}; + +static int rdtgroup_setup_root(struct rdt_fs_context *ctx) +{ + rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops, + KERNFS_ROOT_CREATE_DEACTIVATED | + KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK, + &rdtgroup_default); + if (IS_ERR(rdt_root)) + return PTR_ERR(rdt_root); + + ctx->kfc.root = rdt_root; + rdtgroup_default.kn = kernfs_root_to_node(rdt_root); + + return 0; +} + +static void rdtgroup_destroy_root(void) +{ + kernfs_destroy_root(rdt_root); + rdtgroup_default.kn = NULL; +} + +static void rdtgroup_setup_default(void) +{ + mutex_lock(&rdtgroup_mutex); + + rdtgroup_default.closid = RESCTRL_RESERVED_CLOSID; + rdtgroup_default.mon.rmid = RESCTRL_RESERVED_RMID; + rdtgroup_default.type = RDTCTRL_GROUP; + INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list); + + list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups); + + mutex_unlock(&rdtgroup_mutex); +} + +static void domain_destroy_mon_state(struct rdt_domain *d) +{ + bitmap_free(d->rmid_busy_llc); + kfree(d->mbm_total); + kfree(d->mbm_local); +} + +void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d) +{ + mutex_lock(&rdtgroup_mutex); + + if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) + mba_sc_domain_destroy(r, d); + + if (!r->mon_capable) + goto out_unlock; + + /* + * If resctrl is mounted, remove all the + * per domain monitor data directories. + */ + if (resctrl_mounted && resctrl_arch_mon_capable()) + rmdir_mondata_subdir_allrdtgrp(r, d->id); + + if (resctrl_is_mbm_enabled()) + cancel_delayed_work(&d->mbm_over); + if (resctrl_arch_is_llc_occupancy_enabled() && has_busy_rmid(d)) { + /* + * When a package is going down, forcefully + * decrement rmid->ebusy. There is no way to know + * that the L3 was flushed and hence may lead to + * incorrect counts in rare scenarios, but leaving + * the RMID as busy creates RMID leaks if the + * package never comes back. + */ + __check_limbo(d, true); + cancel_delayed_work(&d->cqm_limbo); + } + + domain_destroy_mon_state(d); + +out_unlock: + mutex_unlock(&rdtgroup_mutex); +} + +static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d) +{ + u32 idx_limit = resctrl_arch_system_num_rmid_idx(); + size_t tsize; + + if (resctrl_arch_is_llc_occupancy_enabled()) { + d->rmid_busy_llc = bitmap_zalloc(idx_limit, GFP_KERNEL); + if (!d->rmid_busy_llc) + return -ENOMEM; + } + if (resctrl_arch_is_mbm_total_enabled()) { + tsize = sizeof(*d->mbm_total); + d->mbm_total = kcalloc(idx_limit, tsize, GFP_KERNEL); + if (!d->mbm_total) { + bitmap_free(d->rmid_busy_llc); + return -ENOMEM; + } + } + if (resctrl_arch_is_mbm_local_enabled()) { + tsize = sizeof(*d->mbm_local); + d->mbm_local = kcalloc(idx_limit, tsize, GFP_KERNEL); + if (!d->mbm_local) { + bitmap_free(d->rmid_busy_llc); + kfree(d->mbm_total); + return -ENOMEM; + } + } + + return 0; +} + +int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d) +{ + int err = 0; + + mutex_lock(&rdtgroup_mutex); + + if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) { + /* RDT_RESOURCE_MBA is never mon_capable */ + err = mba_sc_domain_allocate(r, d); + goto out_unlock; + } + + if (!r->mon_capable) + goto out_unlock; + + err = domain_setup_mon_state(r, d); + if (err) + goto out_unlock; + + if (resctrl_is_mbm_enabled()) { + INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow); + mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL, + RESCTRL_PICK_ANY_CPU); + } + + if (resctrl_arch_is_llc_occupancy_enabled()) + INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo); + + /* + * If the filesystem is not mounted then only the default resource group + * exists. Creation of its directories is deferred until mount time + * by rdt_get_tree() calling mkdir_mondata_all(). + * If resctrl is mounted, add per domain monitor data directories. + */ + if (resctrl_mounted && resctrl_arch_mon_capable()) + mkdir_mondata_subdir_allrdtgrp(r, d); + +out_unlock: + mutex_unlock(&rdtgroup_mutex); + + return err; +} + +void resctrl_online_cpu(unsigned int cpu) +{ + mutex_lock(&rdtgroup_mutex); + /* The CPU is set in default rdtgroup after online. */ + cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask); + mutex_unlock(&rdtgroup_mutex); +} + +static void clear_childcpus(struct rdtgroup *r, unsigned int cpu) +{ + struct rdtgroup *cr; + + list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) { + if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) + break; + } +} + +void resctrl_offline_cpu(unsigned int cpu) +{ + struct rdt_resource *l3 = resctrl_arch_get_resource(RDT_RESOURCE_L3); + struct rdtgroup *rdtgrp; + struct rdt_domain *d; + + mutex_lock(&rdtgroup_mutex); + list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { + if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) { + clear_childcpus(rdtgrp, cpu); + break; + } + } + + if (!l3->mon_capable) + goto out_unlock; + + d = resctrl_get_domain_from_cpu(cpu, l3); + if (d) { + if (resctrl_is_mbm_enabled() && cpu == d->mbm_work_cpu) { + cancel_delayed_work(&d->mbm_over); + mbm_setup_overflow_handler(d, 0, cpu); + } + if (resctrl_arch_is_llc_occupancy_enabled() && + cpu == d->cqm_work_cpu && has_busy_rmid(d)) { + cancel_delayed_work(&d->cqm_limbo); + cqm_setup_limbo_handler(d, 0, cpu); + } + } + +out_unlock: + mutex_unlock(&rdtgroup_mutex); +} + +/* + * resctrl_init - resctrl filesystem initialization + * + * Setup resctrl file system including set up root, create mount point, + * register resctrl filesystem, and initialize files under root directory. + * + * Return: 0 on success or -errno + */ +int resctrl_init(void) +{ + int ret = 0; + + seq_buf_init(&last_cmd_status, last_cmd_status_buf, + sizeof(last_cmd_status_buf)); + + rdtgroup_setup_default(); + + thread_throttle_mode_init(); + + ret = resctrl_mon_resource_init(); + if (ret) + return ret; + + ret = sysfs_create_mount_point(fs_kobj, "resctrl"); + if (ret) + return ret; + + ret = register_filesystem(&rdt_fs_type); + if (ret) + goto cleanup_mountpoint; + + /* + * Adding the resctrl debugfs directory here may not be ideal since + * it would let the resctrl debugfs directory appear on the debugfs + * filesystem before the resctrl filesystem is mounted. + * It may also be ok since that would enable debugging of RDT before + * resctrl is mounted. + * The reason why the debugfs directory is created here and not in + * rdt_get_tree() is because rdt_get_tree() takes rdtgroup_mutex and + * during the debugfs directory creation also &sb->s_type->i_mutex_key + * (the lockdep class of inode->i_rwsem). Other filesystem + * interactions (eg. SyS_getdents) have the lock ordering: + * &sb->s_type->i_mutex_key --> &mm->mmap_lock + * During mmap(), called with &mm->mmap_lock, the rdtgroup_mutex + * is taken, thus creating dependency: + * &mm->mmap_lock --> rdtgroup_mutex for the latter that can cause + * issues considering the other two lock dependencies. + * By creating the debugfs directory here we avoid a dependency + * that may cause deadlock (even though file operations cannot + * occur until the filesystem is mounted, but I do not know how to + * tell lockdep that). + */ + debugfs_resctrl = debugfs_create_dir("resctrl", NULL); + + return 0; + +cleanup_mountpoint: + sysfs_remove_mount_point(fs_kobj, "resctrl"); + + return ret; +} + +void resctrl_exit(void) +{ + debugfs_remove_recursive(debugfs_resctrl); + unregister_filesystem(&rdt_fs_type); + sysfs_remove_mount_point(fs_kobj, "resctrl"); + + resctrl_mon_resource_exit(); +} -- Gitee From b60fa907762cda97429d7d5b63fa77c0447494a4 Mon Sep 17 00:00:00 2001 From: James Morse Date: Mon, 2 Jul 2018 11:15:31 +0100 Subject: [PATCH 0572/2138] arm64: head.S: Initialise MPAM EL2 registers and disable traps ANBZ: #8686 commit 2dd87f04e14aaade03e43874d4134c76e00a4d92 morse-linux. Add code to head.S's el2_setup to detect MPAM and disable any EL2 traps. This register resets to an unknown value, setting it to the default parititons/pmg before we enable the MMU is the best thing to do. Kexec/kdump will depend on this if the previous kernel left the CPU configured with a restrictive configuration. If linux is booted at the highest implemented exception level el2_setup will clear the enable bit, disabling MPAM. This code can't be enabled until a subsequent patch adds the Kconfig and cpufeature boiler plate. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- arch/arm64/include/asm/el2_setup.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index b7afaa026842..1e2181820a0a 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -208,6 +208,21 @@ msr spsr_el2, x0 .endm +.macro __init_el2_mpam +#ifdef CONFIG_ARM64_MPAM + /* Memory Partioning And Monitoring: disable EL2 traps */ + mrs x1, id_aa64pfr0_el1 + ubfx x0, x1, #ID_AA64PFR0_EL1_MPAM_SHIFT, #4 + cbz x0, .Lskip_mpam_\@ // skip if no MPAM + msr_s SYS_MPAM2_EL2, xzr // use the default partition + // and disable lower traps + mrs_s x0, SYS_MPAMIDR_EL1 + tbz x0, #17, .Lskip_mpam_\@ // skip if no MPAMHCR reg + msr_s SYS_MPAMHCR_EL2, xzr // clear TRAP_MPAMIDR_EL1 -> EL2 +.Lskip_mpam_\@: +#endif /* CONFIG_ARM64_MPAM */ +.endm + /** * Initialize EL2 registers to sane values. This should be called early on all * cores that were booted in EL2. Note that everything gets initialised as @@ -225,6 +240,7 @@ __init_el2_stage2 __init_el2_gicv3 __init_el2_hstr + __init_el2_mpam __init_el2_nvhe_idregs __init_el2_cptr __init_el2_fgt -- Gitee From 4e54d5e672ee0e2af134462baff838ba3df63af7 Mon Sep 17 00:00:00 2001 From: James Morse Date: Mon, 2 Jul 2018 11:15:31 +0100 Subject: [PATCH 0573/2138] arm64: cpufeature: discover CPU support for MPAM ANBZ: #8686 commit a59cda5f355ee9fea8cfcb08f0266f0f1353143b morse-linux. ARMv8.4 adds support for 'Memory Partitioning And Monitoring' (MPAM) which describes an interface to cache and bandwidth controls wherever they appear in the system. Add support to detect MPAM. Like SVE, MPAM has an extra id register that describes the virtualisation support, which is optional. Detect this separately so we can detect mismatched/insane systems, but still use MPAM on the host even if the virtualisation support is missing. MPAM needs enabling at the highest implemented exception level, otherwise the register accesses trap. The 'enabled' flag is accessible to lower exception levels, but its in a register that traps when MPAM isn't enabled. The cpufeature 'matches' hook is extended to test this on one of the CPUs, so that firwmare can emulate MPAM as disabled if it is reserved for use by secure world. (If you have a boot failure that bisects here its likely your CPUs advertise MPAM in the id registers, but firmware failed to either enable or MPAM, or emulate the trap as if it were disabled) Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- .../arch/arm64/cpu-feature-registers.rst | 2 + arch/arm64/Kconfig | 19 ++++- arch/arm64/include/asm/cpu.h | 1 + arch/arm64/include/asm/cpufeature.h | 13 ++++ arch/arm64/include/asm/mpam.h | 76 +++++++++++++++++++ arch/arm64/include/asm/sysreg.h | 8 ++ arch/arm64/kernel/Makefile | 2 + arch/arm64/kernel/cpufeature.c | 69 +++++++++++++++++ arch/arm64/kernel/cpuinfo.c | 4 + arch/arm64/kernel/mpam.c | 8 ++ arch/arm64/tools/cpucaps | 1 + arch/arm64/tools/sysreg | 33 ++++++++ 12 files changed, 235 insertions(+), 1 deletion(-) create mode 100644 arch/arm64/include/asm/mpam.h create mode 100644 arch/arm64/kernel/mpam.c diff --git a/Documentation/arch/arm64/cpu-feature-registers.rst b/Documentation/arch/arm64/cpu-feature-registers.rst index de6d8a4790e2..14ea68bcf196 100644 --- a/Documentation/arch/arm64/cpu-feature-registers.rst +++ b/Documentation/arch/arm64/cpu-feature-registers.rst @@ -152,6 +152,8 @@ infrastructure: +------------------------------+---------+---------+ | DIT | [51-48] | y | +------------------------------+---------+---------+ + | MPAM | [43-40] | n | + +------------------------------+---------+---------+ | SVE | [35-32] | y | +------------------------------+---------+---------+ | GIC | [27-24] | n | diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 658c6a61ab6f..471e8129d0fb 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -2013,7 +2013,24 @@ config ARM64_TLB_RANGE The feature introduces new assembly instructions, and they were support when binutils >= 2.30. -endmenu # "ARMv8.4 architectural features" +config ARM64_MPAM + bool "Enable support for MPAM" + help + Memory Partitioning and Monitoring is an optional extension + that allows the CPUs to mark load and store transactions with + labels for partition-id and performance-monitoring-group. + System components, such as the caches, can use the partition-id + to apply a performance policy. MPAM monitors can use the + partition-id and performance-monitoring-group to measure the + cache occupancy or data throughput. + + Use of this extension requires CPU support, support in the + memory system components (MSC), and a description from firmware + of where the MSC are in the address space. + + MPAM is exposed to user-space via the resctrl pseudo filesystem. + +endmenu menu "ARMv8.5 architectural features" diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h index e749838b9c5d..1cb5bafd9238 100644 --- a/arch/arm64/include/asm/cpu.h +++ b/arch/arm64/include/asm/cpu.h @@ -47,6 +47,7 @@ struct cpuinfo_arm64 { u64 reg_revidr; u64 reg_gmid; u64 reg_smidr; + u64 reg_mpamidr; u64 reg_id_aa64dfr0; u64 reg_id_aa64dfr1; diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 5bba39376055..e873848ad9d9 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -619,6 +619,13 @@ static inline bool id_aa64pfr1_sme(u64 pfr1) return val > 0; } +static inline bool id_aa64pfr0_mpam(u64 pfr0) +{ + u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_MPAM_SHIFT); + + return val > 0; +} + static inline bool id_aa64pfr1_mte(u64 pfr1) { u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_MTE_SHIFT); @@ -831,6 +838,12 @@ static inline bool system_supports_tlb_range(void) cpus_have_const_cap(ARM64_HAS_TLB_RANGE); } +static inline bool cpus_support_mpam(void) +{ + return IS_ENABLED(CONFIG_ARM64_MPAM) && + cpus_have_final_cap(ARM64_MPAM); +} + int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt); bool try_emulate_mrs(struct pt_regs *regs, u32 isn); diff --git a/arch/arm64/include/asm/mpam.h b/arch/arm64/include/asm/mpam.h new file mode 100644 index 000000000000..a4a969be233a --- /dev/null +++ b/arch/arm64/include/asm/mpam.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 Arm Ltd. */ + +#ifndef __ASM__MPAM_H +#define __ASM__MPAM_H + +#include +#include +#include + +#include +#include +#include + +/* CPU Registers */ +#define MPAM_SYSREG_EN BIT_ULL(63) +#define MPAM_SYSREG_TRAP_IDR BIT_ULL(58) +#define MPAM_SYSREG_TRAP_MPAM0_EL1 BIT_ULL(49) +#define MPAM_SYSREG_TRAP_MPAM1_EL1 BIT_ULL(48) +#define MPAM_SYSREG_PMG_D GENMASK(47, 40) +#define MPAM_SYSREG_PMG_I GENMASK(39, 32) +#define MPAM_SYSREG_PARTID_D GENMASK(31, 16) +#define MPAM_SYSREG_PARTID_I GENMASK(15, 0) + +#define MPAMIDR_PMG_MAX GENMASK(40, 32) +#define MPAMIDR_PMG_MAX_SHIFT 32 +#define MPAMIDR_PMG_MAX_LEN 8 +#define MPAMIDR_VPMR_MAX GENMASK(20, 18) +#define MPAMIDR_VPMR_MAX_SHIFT 18 +#define MPAMIDR_VPMR_MAX_LEN 3 +#define MPAMIDR_HAS_HCR BIT(17) +#define MPAMIDR_HAS_HCR_SHIFT 17 +#define MPAMIDR_PARTID_MAX GENMASK(15, 0) +#define MPAMIDR_PARTID_MAX_SHIFT 0 +#define MPAMIDR_PARTID_MAX_LEN 15 + +#define MPAMHCR_EL0_VPMEN BIT_ULL(0) +#define MPAMHCR_EL1_VPMEN BIT_ULL(1) +#define MPAMHCR_GSTAPP_PLK BIT_ULL(8) +#define MPAMHCR_TRAP_MPAMIDR BIT_ULL(31) + +/* Properties of the VPM registers */ +#define MPAM_VPM_NUM_REGS 8 +#define MPAM_VPM_PARTID_LEN 16 +#define MPAM_VPM_PARTID_MASK 0xffff +#define MPAM_VPM_REG_LEN 64 +#define MPAM_VPM_PARTIDS_PER_REG (MPAM_VPM_REG_LEN / MPAM_VPM_PARTID_LEN) +#define MPAM_VPM_MAX_PARTID (MPAM_VPM_NUM_REGS * MPAM_VPM_PARTIDS_PER_REG) + + +DECLARE_STATIC_KEY_FALSE(arm64_mpam_has_hcr); + +/* check whether all CPUs have MPAM support */ +static inline bool mpam_cpus_have_feature(void) +{ + if (IS_ENABLED(CONFIG_ARM64_MPAM)) + return cpus_have_final_cap(ARM64_MPAM); + return false; +} + +/* check whether all CPUs have MPAM virtualisation support */ +static inline bool mpam_cpus_have_mpam_hcr(void) +{ + if (IS_ENABLED(CONFIG_ARM64_MPAM)) + return static_branch_unlikely(&arm64_mpam_has_hcr); + return false; +} + +/* enable MPAM virtualisation support */ +static inline void __init __enable_mpam_hcr(void) +{ + if (IS_ENABLED(CONFIG_ARM64_MPAM)) + static_branch_enable(&arm64_mpam_has_hcr); +} + +#endif /* __ASM__MPAM_H */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 38296579a4fd..94633246d311 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -515,6 +515,13 @@ #define SYS_MAIR_EL2 sys_reg(3, 4, 10, 2, 0) #define SYS_AMAIR_EL2 sys_reg(3, 4, 10, 3, 0) +#define SYS_MPAMHCR_EL2 sys_reg(3, 4, 10, 4, 0) +#define SYS_MPAMVPMV_EL2 sys_reg(3, 4, 10, 4, 1) +#define SYS_MPAM2_EL2 sys_reg(3, 4, 10, 5, 0) + +#define __VPMn_op2(n) ((n) & 0x7) +#define SYS_MPAM_VPMn_EL2(n) sys_reg(3, 4, 10, 6, __VPMn_op2(n)) + #define SYS_VBAR_EL2 sys_reg(3, 4, 12, 0, 0) #define SYS_RVBAR_EL2 sys_reg(3, 4, 12, 0, 1) #define SYS_RMR_EL2 sys_reg(3, 4, 12, 0, 2) @@ -579,6 +586,7 @@ #define SYS_TFSR_EL12 sys_reg(3, 5, 5, 6, 0) #define SYS_MAIR_EL12 sys_reg(3, 5, 10, 2, 0) #define SYS_AMAIR_EL12 sys_reg(3, 5, 10, 3, 0) +#define SYS_MPAM1_EL12 sys_reg(3, 5, 10, 5, 0) #define SYS_VBAR_EL12 sys_reg(3, 5, 12, 0, 0) #define SYS_CNTKCTL_EL12 sys_reg(3, 5, 14, 1, 0) #define SYS_CNTP_TVAL_EL02 sys_reg(3, 5, 14, 2, 0) diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index d48aa807dcce..7c67e2f29206 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -65,11 +65,13 @@ obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o \ obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o + obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_CRASH_CORE) += crash_core.o obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o obj-$(CONFIG_SDEI_WATCHDOG) += watchdog_sdei.o obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o +obj-$(CONFIG_ARM64_MPAM) += mpam.o obj-$(CONFIG_ARM64_MTE) += mte.o obj-y += vdso-wrap.o obj-$(CONFIG_COMPAT_VDSO) += vdso32-wrap.o diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 7e9660455900..8ec1c9dd3644 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -84,6 +84,7 @@ #include #include #include +#include #include #include #include @@ -623,6 +624,18 @@ static const struct arm64_ftr_bits ftr_smcr[] = { ARM64_FTR_END, }; +static const struct arm64_ftr_bits ftr_mpamidr[] = { + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, + MPAMIDR_PMG_MAX_SHIFT, MPAMIDR_PMG_MAX_LEN, 0), /* PMG_MAX */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, + MPAMIDR_VPMR_MAX_SHIFT, MPAMIDR_VPMR_MAX_LEN, 0), /* VPMR_MAX */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, + MPAMIDR_HAS_HCR_SHIFT, 1, 0), /* HAS_HCR */ + ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, + MPAMIDR_PARTID_MAX_SHIFT, MPAMIDR_PARTID_MAX_LEN, 0), /* PARTID_MAX */ + ARM64_FTR_END, +}; + /* * Common ftr bits for a 32bit register with all hidden, strict * attributes, with 4bit feature fields and a default safe value of @@ -739,6 +752,9 @@ static const struct __ftr_reg_entry { ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr), ARM64_FTR_REG(SYS_SMCR_EL1, ftr_smcr), + /* Op1 = 0, CRn = 10, CRm = 4 */ + ARM64_FTR_REG(SYS_MPAMIDR_EL1, ftr_mpamidr), + /* Op1 = 1, CRn = 0, CRm = 0 */ ARM64_FTR_REG(SYS_GMID_EL1, ftr_gmid), @@ -1058,6 +1074,9 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info) vec_init_vq_map(ARM64_VEC_SME); } + if (id_aa64pfr0_mpam(info->reg_id_aa64pfr0)) + init_cpu_ftr_reg(SYS_MPAMIDR_EL1, info->reg_mpamidr); + if (id_aa64pfr1_mte(info->reg_id_aa64pfr1)) init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid); @@ -1317,6 +1336,11 @@ void update_cpu_features(int cpu, vec_update_vq_map(ARM64_VEC_SME); } + if (id_aa64pfr0_mpam(info->reg_id_aa64pfr0)) { + taint |= check_update_ftr_reg(SYS_MPAMIDR_EL1, cpu, + info->reg_mpamidr, boot->reg_mpamidr); + } + /* * The kernel uses the LDGM/STGM instructions and the number of tags * they read/write depends on the GMID_EL1.BS field. Check that the @@ -2250,6 +2274,39 @@ cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap) return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT); } +static bool __maybe_unused +test_has_mpam(const struct arm64_cpu_capabilities *entry, int scope) +{ + if (!has_cpuid_feature(entry, scope)) + return false; + + /* Check firmware actually enabled MPAM on this cpu. */ + return (read_sysreg_s(SYS_MPAM1_EL1) & MPAM_SYSREG_EN); +} + +static void __maybe_unused +cpu_enable_mpam(const struct arm64_cpu_capabilities *entry) +{ + /* + * Access by the kernel (at EL1) should use the reserved PARTID + * which is configured unrestricted. This avoids priority-inversion + * where latency sensitive tasks have to wait for a task that has + * been throttled to release the lock. + */ + write_sysreg_s(0, SYS_MPAM1_EL1); +} + +static void mpam_extra_caps(void) +{ + u64 idr = read_sanitised_ftr_reg(SYS_MPAMIDR_EL1); + + if (!IS_ENABLED(CONFIG_ARM64_MPAM)) + return; + + if (idr & MPAMIDR_HAS_HCR) + __enable_mpam_hcr(); +} + static const struct arm64_cpu_capabilities arm64_features[] = { { .capability = ARM64_ALWAYS_BOOT, @@ -2730,6 +2787,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_cpuid_feature, ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, EVT, IMP) }, +#ifdef CONFIG_ARM64_MPAM + { + .desc = "Memory Partitioning And Monitoring", + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .capability = ARM64_MPAM, + .matches = test_has_mpam, + .cpu_enable = cpu_enable_mpam, + ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, MPAM, 1) + }, +#endif {}, }; @@ -3378,6 +3445,8 @@ void __init setup_cpu_features(void) if (!cwg) pr_warn("No Cache Writeback Granule information, assuming %d\n", ARCH_DMA_MINALIGN); + + mpam_extra_caps(); } static int enable_mismatched_32bit_el0(unsigned int cpu) diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 98fda8500535..1b1fe0f58a86 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -460,6 +460,10 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) __cpuinfo_store_cpu_32bit(&info->aarch32); + if (IS_ENABLED(CONFIG_ARM64_MPAM) && + id_aa64pfr0_mpam(info->reg_id_aa64pfr0)) + info->reg_mpamidr = read_cpuid(MPAMIDR_EL1); + cpuinfo_detect_icache_policy(info); } diff --git a/arch/arm64/kernel/mpam.c b/arch/arm64/kernel/mpam.c new file mode 100644 index 000000000000..ff29b666e025 --- /dev/null +++ b/arch/arm64/kernel/mpam.c @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 Arm Ltd. */ + +#include + +#include + +DEFINE_STATIC_KEY_FALSE(arm64_mpam_has_hcr); diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index c251ef3caae5..bbb901027817 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -56,6 +56,7 @@ HW_DBM KVM_HVHE KVM_PROTECTED_MODE MISMATCHED_CACHE_TYPE +MPAM MTE MTE_ASYMM SME diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 76ce150e7347..0e7d7f327410 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -2530,6 +2530,22 @@ Res0 1 Field 0 EN EndSysreg +Sysreg MPAMIDR_EL1 3 0 10 4 4 +Res0 63:62 +Field 61 HAS_SDEFLT +Field 60 HAS_FORCE_NS +Field 59 SP4 +Field 58 HAS_TIDR +Field 57 HAS_ALTSP +Res0 56:40 +Field 39:32 PMG_MAX +Res0 31:21 +Field 20:18 VPMR_MAX +Field 17 HAS_HCR +Res0 16 +Field 15:0 PARTID_MAX +EndSysreg + Sysreg LORID_EL1 3 0 10 4 7 Res0 63:24 Field 23:16 LD @@ -2537,6 +2553,22 @@ Res0 15:8 Field 7:0 LR EndSysreg +Sysreg MPAM1_EL1 3 0 10 5 0 +Res0 63:48 +Field 47:40 PMG_D +Field 39:32 PMG_I +Field 31:16 PARTID_D +Field 15:0 PARTID_I +EndSysreg + +Sysreg MPAM0_EL1 3 0 10 5 1 +Res0 63:48 +Field 47:40 PMG_D +Field 39:32 PMG_I +Field 31:16 PARTID_D +Field 15:0 PARTID_I +EndSysreg + Sysreg ISR_EL1 3 0 12 1 0 Res0 63:11 Field 10 IS @@ -2550,6 +2582,7 @@ EndSysreg Sysreg ICC_NMIAR1_EL1 3 0 12 9 5 Res0 63:24 Field 23:0 INTID + EndSysreg Sysreg TRBLIMITR_EL1 3 0 9 11 0 -- Gitee From 44031908aaae1b6c75c976e84de4ffb0adcf6a44 Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 21 Mar 2019 15:59:41 +0000 Subject: [PATCH 0574/2138] KVM: arm64: Fix missing traps of guest accesses to the MPAM registers ANBZ: #8686 commit 055409b8aafcca81a2dfa43052142a3b0f416816 morse-linux. commit 011e5f5bf529f ("arm64/cpufeature: Add remaining feature bits in ID_AA64PFR0 register") exposed the MPAM field of AA64PFR0_EL1 to guests, but didn't add trap handling. If you are unlucky, this results in an MPAM aware guest being delivered an undef during boot. The host prints: | kvm [97]: Unsupported guest sys_reg access at: ffff800080024c64 [00000005] | { Op0( 3), Op1( 0), CRn(10), CRm( 5), Op2( 0), func_read }, Which results in: | Internal error: Oops - Undefined instruction: 0000000002000000 [#1] PREEMPT SMP | Modules linked in: | CPU: 0 PID: 1 Comm: swapper/0 Not tainted 6.6.0-rc7-00559-gd89c186d50b2 #14616 | Hardware name: linux,dummy-virt (DT) | pstate: 00000005 (nzcv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--) | pc : test_has_mpam+0x18/0x30 | lr : test_has_mpam+0x10/0x30 | sp : ffff80008000bd90 ... | Call trace: | test_has_mpam+0x18/0x30 | update_cpu_capabilities+0x7c/0x11c | setup_cpu_features+0x14/0xd8 | smp_cpus_done+0x24/0xb8 | smp_init+0x7c/0x8c | kernel_init_freeable+0xf8/0x280 | kernel_init+0x24/0x1e0 | ret_from_fork+0x10/0x20 | Code: 910003fd 97ffffde 72001c00 54000080 (d538a500) | ---[ end trace 0000000000000000 ]--- | Kernel panic - not syncing: Attempted to kill init! exitcode=0x0000000b | ---[ end Kernel panic - not syncing: Attempted to kill init! exitcode=0x0000000b ]--- Add the support to enable the traps, and handle the three guest accessible registers as RAZ/WI. This allows guests to keep the invariant id-register value, while advertising that MPAM isn't really supported. With MPAM v1.0 we can trap the MPAMIDR_EL1 register only if ARM64_HAS_MPAM_HCR, with v1.1 an additional MPAM2_EL2.TIDR bit traps MPAMIDR_EL1 on platforms that don't have MPAMHCR_EL2. Enable one of these if either is supported. If neither is supported, the guest can discover that the CPU has MPAM support, and how many PARTID etc the host has ... but it can't influence anything, so its harmless. Full support for the feature would only expose MPAM to the guest if a psuedo-device has been created to describe the virt->phys partid mapping the VMM expects. This will depend on ARM64_HAS_MPAM_HCR. Fixes: 011e5f5bf529f ("arm64/cpufeature: Add remaining feature bits in ID_AA64PFR0 register") CC: Anshuman Khandual Link: https://lore.kernel.org/linux-arm-kernel/20200925160102.118858-1-james.morse@arm.com/ Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- arch/arm64/include/asm/kvm_arm.h | 1 + arch/arm64/include/asm/mpam.h | 4 ++-- arch/arm64/kernel/image-vars.h | 5 ++++ arch/arm64/kvm/hyp/include/hyp/switch.h | 32 +++++++++++++++++++++++++ arch/arm64/kvm/sys_regs.c | 20 ++++++++++++++++ 5 files changed, 60 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 1095c6647e96..2d8b243a86cd 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -104,6 +104,7 @@ #define HCRX_GUEST_FLAGS (HCRX_EL2_SMPME | HCRX_EL2_TCR2En) #define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En) +#define MPAMHCR_HOST_FLAGS 0 /* TCR_EL2 Registers bits */ #define TCR_EL2_RES1 ((1U << 31) | (1 << 23)) diff --git a/arch/arm64/include/asm/mpam.h b/arch/arm64/include/asm/mpam.h index a4a969be233a..576102d510ad 100644 --- a/arch/arm64/include/asm/mpam.h +++ b/arch/arm64/include/asm/mpam.h @@ -51,7 +51,7 @@ DECLARE_STATIC_KEY_FALSE(arm64_mpam_has_hcr); /* check whether all CPUs have MPAM support */ -static inline bool mpam_cpus_have_feature(void) +static __always_inline bool mpam_cpus_have_feature(void) { if (IS_ENABLED(CONFIG_ARM64_MPAM)) return cpus_have_final_cap(ARM64_MPAM); @@ -59,7 +59,7 @@ static inline bool mpam_cpus_have_feature(void) } /* check whether all CPUs have MPAM virtualisation support */ -static inline bool mpam_cpus_have_mpam_hcr(void) +static __always_inline bool mpam_cpus_have_mpam_hcr(void) { if (IS_ENABLED(CONFIG_ARM64_MPAM)) return static_branch_unlikely(&arm64_mpam_has_hcr); diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index 35f3c7959513..d10d3fed31d9 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -64,6 +64,11 @@ KVM_NVHE_ALIAS(nvhe_hyp_panic_handler); /* Vectors installed by hyp-init on reset HVC. */ KVM_NVHE_ALIAS(__hyp_stub_vectors); +/* Additional static keys for cpufeatures */ +#ifdef CONFIG_ARM64_MPAM +KVM_NVHE_ALIAS(arm64_mpam_has_hcr); +#endif + /* Static keys which are set if a vGIC trap should be handled in hyp. */ KVM_NVHE_ALIAS(vgic_v2_cpuif_trap); KVM_NVHE_ALIAS(vgic_v3_cpuif_trap); diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 9cfe6bd1dbe4..657320f453e6 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -172,6 +173,35 @@ static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu) write_sysreg_s(ctxt_sys_reg(hctxt, HDFGWTR_EL2), SYS_HDFGWTR_EL2); } +static inline void __activate_traps_mpam(struct kvm_vcpu *vcpu) +{ + u64 r = MPAM_SYSREG_TRAP_MPAM0_EL1 | MPAM_SYSREG_TRAP_MPAM1_EL1; + + if (!mpam_cpus_have_feature()) + return; + + /* trap guest access to MPAMIDR_EL1 */ + if (mpam_cpus_have_mpam_hcr()) { + write_sysreg_s(MPAMHCR_TRAP_MPAMIDR, SYS_MPAMHCR_EL2); + } else { + /* From v1.1 TIDR can trap MPAMIDR, set it unconditionally */ + r |= MPAM_SYSREG_TRAP_IDR; + } + + write_sysreg_s(r, SYS_MPAM2_EL2); +} + +static inline void __deactivate_traps_mpam(void) +{ + if (!mpam_cpus_have_feature()) + return; + + write_sysreg_s(0, SYS_MPAM2_EL2); + + if (mpam_cpus_have_mpam_hcr()) + write_sysreg_s(MPAMHCR_HOST_FLAGS, SYS_MPAMHCR_EL2); +} + static inline void __activate_traps_common(struct kvm_vcpu *vcpu) { /* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */ @@ -212,6 +242,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu) } __activate_traps_hfgxtr(vcpu); + __activate_traps_mpam(vcpu); } static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) @@ -231,6 +262,7 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2); __deactivate_traps_hfgxtr(vcpu); + __deactivate_traps_mpam(); } static inline void ___activate_traps(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 2031703424ea..e4922b443f9f 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -417,6 +417,23 @@ static bool trap_oslar_el1(struct kvm_vcpu *vcpu, return true; } +static bool workaround_bad_mpam_abi(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + /* + * The ID register can't be removed without breaking migration, + * but MPAMIDR_EL1 can advertise all-zeroes, indicating there are zero + * PARTID/PMG supported by the CPU, allowing the other two trapped + * registers (MPAM1_EL1 and MPAM0_EL1) to be treated as RAZ/WI. + * Emulating MPAM1_EL1 as RAZ/WI means the guest sees the MPAMEN bit + * as clear, and realises MPAM isn't usable on this CPU. + */ + p->regval = 0; + + return true; +} + static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) @@ -2184,8 +2201,11 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_LOREA_EL1), trap_loregion }, { SYS_DESC(SYS_LORN_EL1), trap_loregion }, { SYS_DESC(SYS_LORC_EL1), trap_loregion }, + { SYS_DESC(SYS_MPAMIDR_EL1), workaround_bad_mpam_abi }, { SYS_DESC(SYS_LORID_EL1), trap_loregion }, + { SYS_DESC(SYS_MPAM1_EL1), workaround_bad_mpam_abi }, + { SYS_DESC(SYS_MPAM0_EL1), workaround_bad_mpam_abi }, { SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 }, { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 }, -- Gitee From eb0629d064f44eb10715bd1d7db9ad0000806a0b Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 23 Nov 2023 16:22:12 +0000 Subject: [PATCH 0575/2138] KVM: arm64: Disable MPAM visibility by default, and handle traps ANBZ: #8686 commit 9c4f586b73e6dbb1d6ad1e3eeb9ee75befeab4c1 morse-linux. Currently KVM only allows writeable ID registers to be downgraded in the 'safe' direction, as determined by the cpufeature 'lower safe' flags. commit 011e5f5bf529f ("arm64/cpufeature: Add remaining feature bits in ID_AA64PFR0 register") exposed the MPAM field of AA64PFR0_EL1 to guests, but didn't add trap handling. A previous patch supplied the missing trap handling. Existing VMs that have the MPAM field of AA64PFR0_EL1 need to be migratable, but there is little point enabling the MPAM CPU interface on new VMs until there is something a guest can do with it. Clear the MPAM field from the guest's AA64PFR0_EL1 by default, but allow user-space to set it again if the host supports MPAM. Add a helper to return the maximum permitted value for an ID register. For most this is the reset value. To allow the MPAM field to be written as supported, check if the host sanitised value is '1' and upgrade the reset value. Finally, change the trap handling to inject an undef if MPAM was not advertised to the guest. Full support will depend on an psuedo-device being created that describes the virt->phys PARTID mapping the VMM expects. Migration would be expected to fail if this psuedo-device can't be created on the remote end. This ID bit isn't needed to block migration. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- arch/arm64/kvm/sys_regs.c | 69 ++++++++++++++++++++++++++++++--------- 1 file changed, 54 insertions(+), 15 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index e4922b443f9f..16b8cb1a1590 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -417,21 +417,29 @@ static bool trap_oslar_el1(struct kvm_vcpu *vcpu, return true; } -static bool workaround_bad_mpam_abi(struct kvm_vcpu *vcpu, - struct sys_reg_params *p, - const struct sys_reg_desc *r) +static bool trap_mpam(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *r) { + u64 aa64pfr0_el1 = IDREG(vcpu->kvm, SYS_ID_AA64PFR0_EL1); + /* - * The ID register can't be removed without breaking migration, - * but MPAMIDR_EL1 can advertise all-zeroes, indicating there are zero - * PARTID/PMG supported by the CPU, allowing the other two trapped - * registers (MPAM1_EL1 and MPAM0_EL1) to be treated as RAZ/WI. + * What did we expose to the guest? + * Earlier guests may have seen the ID bits, which can't be removed + * without breaking migration, but MPAMIDR_EL1 can advertise all-zeroes, + * indicating there are zero PARTID/PMG supported by the CPU, allowing + * the other two trapped registers (MPAM1_EL1 and MPAM0_EL1) to be + * treated as RAZ/WI. * Emulating MPAM1_EL1 as RAZ/WI means the guest sees the MPAMEN bit * as clear, and realises MPAM isn't usable on this CPU. */ - p->regval = 0; + if (FIELD_GET(ID_AA64PFR0_EL1_MPAM_MASK, aa64pfr0_el1)) { + p->regval = 0; + return true; + } - return true; + kvm_inject_undefined(vcpu); + return false; } static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, @@ -1251,6 +1259,36 @@ static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp, return arm64_ftr_safe_value(&kvm_ftr, new, cur); } +static u64 kvm_arm64_ftr_max(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) +{ + u64 pfr0, val = rd->reset(vcpu, rd); + u32 field, id = reg_to_encoding(rd); + + /* + * Some values may reset to a lower value than can be supported, + * get the maximum feature value. + */ + switch (id) { + case SYS_ID_AA64PFR0_EL1: + pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); + + /* + * MPAM resets to 0, but migration of MPAM=1 guests is needed. + * See trap_mpam() for more. + */ + field = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_MPAM_SHIFT); + if (field == ID_AA64PFR0_EL1_MPAM_1) { + val &= ~ID_AA64PFR0_EL1_MPAM_MASK; + val |= FIELD_PREP(ID_AA64PFR0_EL1_MPAM_MASK, ID_AA64PFR0_EL1_MPAM_1); + } + + break; + } + + return val; +} + /** * arm64_check_features() - Check if a feature register value constitutes * a subset of features indicated by the idreg's KVM sanitised limit. @@ -1271,8 +1309,7 @@ static int arm64_check_features(struct kvm_vcpu *vcpu, const struct arm64_ftr_bits *ftrp = NULL; u32 id = reg_to_encoding(rd); u64 writable_mask = rd->val; - u64 limit = rd->reset(vcpu, rd); - u64 mask = 0; + u64 limit, mask = 0; /* * Hidden and unallocated ID registers may not have a corresponding @@ -1286,6 +1323,7 @@ static int arm64_check_features(struct kvm_vcpu *vcpu, if (!ftr_reg) return -EINVAL; + limit = kvm_arm64_ftr_max(vcpu, rd); ftrp = ftr_reg->ftr_bits; for (; ftrp && ftrp->width; ftrp++) { @@ -1493,7 +1531,8 @@ static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, /* * MPAM is disabled by default as KVM also needs a set of PARTID to * program the MPAMVPMx_EL2 PARTID remapping registers with. But some - * older kernels let the guest see the ID bit. + * older kernels let the guest see the ID bit. Turning it on causes + * the registers to be emulated as RAZ/WI. See trap_mpam() for more. */ val &= ~ID_AA64PFR0_EL1_MPAM_MASK; @@ -2201,11 +2240,11 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_LOREA_EL1), trap_loregion }, { SYS_DESC(SYS_LORN_EL1), trap_loregion }, { SYS_DESC(SYS_LORC_EL1), trap_loregion }, - { SYS_DESC(SYS_MPAMIDR_EL1), workaround_bad_mpam_abi }, + { SYS_DESC(SYS_MPAMIDR_EL1), trap_mpam }, { SYS_DESC(SYS_LORID_EL1), trap_loregion }, - { SYS_DESC(SYS_MPAM1_EL1), workaround_bad_mpam_abi }, - { SYS_DESC(SYS_MPAM0_EL1), workaround_bad_mpam_abi }, + { SYS_DESC(SYS_MPAM1_EL1), trap_mpam }, + { SYS_DESC(SYS_MPAM0_EL1), trap_mpam }, { SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 }, { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 }, -- Gitee From f2c6148fe5339eb8251bbe84e0b5730a1bd2e3db Mon Sep 17 00:00:00 2001 From: James Morse Date: Mon, 4 Dec 2023 12:49:18 +0000 Subject: [PATCH 0576/2138] arm64: mpam: Context switch the MPAM registers ANBZ: #8686 commit d3220b56803a826f5f9702e27fe7d24eb2be0c39 morse-linux. MPAM has a system register that is used to hold the partid and pmg values that traffic generated by EL0 will use. This can be set per-task by the resctrl file system. Add a helper to switch this. resctrl expects a 'default' value to be used in preference if the default partid and pmg are selected. struct task_struct's separate closid and rmid fields are insufficient to implement resctrl using MPAM, as resctrl can change the partid (closid) and pmg (sort of like the rmid) separately. On x86, the rmid is an independent number, so a race that writes a mismatched closid and rmid into hardware is benign. On arm64, the pmg bits extend the partid. (i.e. partid-5 has a pmg-0 that is not the same as partid-6's pmg-0). In this case, mismatching the values will 'dirty' a pmg value that resctrl believes is clean, and is not tracking with its 'limbo' code. To avoid this, the partid and pmg are always read and written as a pair. Instead of making struct task_struct's closid and rmid fields an endian-unsafe union, add the value to struct thread_info and always use READ_ONCE()/WRITE_ONCE() when accessing this field. CC: Amit Singh Tomar Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- arch/arm64/include/asm/mpam.h | 46 ++++++++++++++++++++++++++++ arch/arm64/include/asm/thread_info.h | 3 ++ arch/arm64/kernel/mpam.c | 4 +++ arch/arm64/kernel/process.c | 7 +++++ 4 files changed, 60 insertions(+) diff --git a/arch/arm64/include/asm/mpam.h b/arch/arm64/include/asm/mpam.h index 576102d510ad..1d81a6f26acd 100644 --- a/arch/arm64/include/asm/mpam.h +++ b/arch/arm64/include/asm/mpam.h @@ -7,6 +7,8 @@ #include #include #include +#include +#include #include #include @@ -49,6 +51,9 @@ DECLARE_STATIC_KEY_FALSE(arm64_mpam_has_hcr); +DECLARE_STATIC_KEY_FALSE(mpam_enabled); +DECLARE_PER_CPU(u64, arm64_mpam_default); +DECLARE_PER_CPU(u64, arm64_mpam_current); /* check whether all CPUs have MPAM support */ static __always_inline bool mpam_cpus_have_feature(void) @@ -73,4 +78,45 @@ static inline void __init __enable_mpam_hcr(void) static_branch_enable(&arm64_mpam_has_hcr); } +/* + * The resctrl filesystem writes to the partid/pmg values for threads and CPUs, + * which may race with reads in __mpam_sched_in(). Ensure only one of the old + * or new values are used. Particular care should be taken with the pmg field + * as __mpam_sched_in() may read a partid and pmg that don't match, causing + * this value to be stored with cache allocations, despite being considered + * 'free' by resctrl. + * + * A value in struct thread_info is used instead of struct task_struct as the + * cpu's u64 register format is used, but struct task_struct has two u32'. + */ +static inline u64 mpam_get_regval(struct task_struct *tsk) +{ +#ifdef CONFIG_ARM64_MPAM + return READ_ONCE(task_thread_info(tsk)->mpam_partid_pmg); +#else + return 0; +#endif +} + +static inline void mpam_thread_switch(struct task_struct *tsk) +{ + u64 oldregval; + int cpu = smp_processor_id(); + u64 regval = mpam_get_regval(tsk); + + if (!IS_ENABLED(CONFIG_ARM64_MPAM) || + !static_branch_likely(&mpam_enabled)) + return; + + if (!regval) + regval = READ_ONCE(per_cpu(arm64_mpam_default, cpu)); + + oldregval = READ_ONCE(per_cpu(arm64_mpam_current, cpu)); + if (oldregval == regval) + return; + + /* Synchronising this write is left until the ERET to EL0 */ + write_sysreg_s(regval, SYS_MPAM0_EL1); + WRITE_ONCE(per_cpu(arm64_mpam_current, cpu), regval); +} #endif /* __ASM__MPAM_H */ diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 553d1bc559c6..c57b33de0ed1 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -41,6 +41,9 @@ struct thread_info { #ifdef CONFIG_SHADOW_CALL_STACK void *scs_base; void *scs_sp; +#endif +#ifdef CONFIG_ARM64_MPAM + u64 mpam_partid_pmg; #endif u32 cpu; }; diff --git a/arch/arm64/kernel/mpam.c b/arch/arm64/kernel/mpam.c index ff29b666e025..346f0273b2c5 100644 --- a/arch/arm64/kernel/mpam.c +++ b/arch/arm64/kernel/mpam.c @@ -4,5 +4,9 @@ #include #include +#include DEFINE_STATIC_KEY_FALSE(arm64_mpam_has_hcr); +DEFINE_STATIC_KEY_FALSE(mpam_enabled); +DEFINE_PER_CPU(u64, arm64_mpam_default); +DEFINE_PER_CPU(u64, arm64_mpam_current); diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 385fb78845d6..dcd519994395 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include #include @@ -552,6 +553,12 @@ struct task_struct *__switch_to(struct task_struct *prev, if (prev->thread.sctlr_user != next->thread.sctlr_user) update_sctlr_el1(next->thread.sctlr_user); + /* + * MPAM thread switch happens after the DSB to ensure prev's accesses + * use prev's MPAM settings. + */ + mpam_thread_switch(next); + /* the actual thread switch */ last = cpu_switch_to(prev, next); -- Gitee From ae9245587695207adb1c807687f4fb59b7e8087e Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 11 Dec 2018 17:04:48 +0000 Subject: [PATCH 0577/2138] untested: KVM: arm64: Force guest EL1 to use user-space's partid configuration ANBZ: #8686 commit 0b21ba1c38b70420a9d7819012938d99273b8a13 morse-linux. While we trap the guest's attempts to read/write the MPAM control registers, these remain in effect. guest-EL0 uses KVM's user-space's configuration, as the value is left in the register, and guest-EL1 uses either the host kernel's configuration, or in the case of VHE, the unknown reset value of MPAM1_EL1. On nVHE systems, EL2 continues to use partid-0 for world-switch, even when the host may have configured its kernel threads to use a different partid. 0 may have been assigned to another task. We want to force the guest-EL1 to use KVM's user-space's MPAM configuration. On a nVHE system, copy the EL1 MPAM register to EL2. This ensures world-switch uses the same partid as the kernel thread does on the host. When loading the guests EL1 registers, copy the VMM's EL0 partid to the EL1 register. When restoring the hosts registers, the partid previously copied to EL2 can be used to restore EL1. For VHE systems, we can skip restoring the EL1 register for the host, as it is out-of-context once HCR_EL2.TGE is set. This is done outside the usual sysreg save/restore as the values can change behind KVMs back, so should not be stored in the guest context. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h | 27 ++++++++++++++++++++++ arch/arm64/kvm/hyp/nvhe/switch.c | 11 +++++++++ arch/arm64/kvm/hyp/vhe/sysreg-sr.c | 1 + 3 files changed, 39 insertions(+) diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h index bb6b571ec627..c8767abd693e 100644 --- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h +++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h @@ -15,6 +15,7 @@ #include #include #include +#include static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt) { @@ -243,4 +244,30 @@ static inline void __sysreg32_restore_state(struct kvm_vcpu *vcpu) write_sysreg(__vcpu_sys_reg(vcpu, DBGVCR32_EL2), dbgvcr32_el2); } +/* + * The _EL0 value was written by the host's context switch, copy this into the + * guest's EL1. + */ +static inline void __mpam_guest_load(void) +{ + if (IS_ENABLED(CONFIG_ARM64_MPAM) && mpam_cpus_have_feature()) + write_sysreg_el1(read_sysreg_s(SYS_MPAM0_EL1), SYS_MPAM1); +} + +/* + * Copy the _EL2 register back to _EL1, clearing any trap bits EL2 may have set. + * nVHE world-switch copies the _EL1 register to _EL2. A VHE host writes to the + * _EL2 register as it is aliased by the hardware when TGE is set. + */ +static inline void __mpam_guest_put(void) +{ + u64 val, mask = MPAM_SYSREG_PMG_D | MPAM_SYSREG_PMG_I | + MPAM_SYSREG_PARTID_D | MPAM_SYSREG_PARTID_I; + + if (IS_ENABLED(CONFIG_ARM64_MPAM) && mpam_cpus_have_feature()) { + val = FIELD_GET(mask, read_sysreg_s(SYS_MPAM2_EL2)); + write_sysreg_el1(val, SYS_MPAM1); + } +} + #endif /* __ARM64_KVM_HYP_SYSREG_SR_H__ */ diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index c353a06ee7e6..c2118f658e22 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -242,6 +242,13 @@ static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code) } } +/* Use the host thread's partid and pmg for world switch */ +static void __mpam_copy_el1_to_el2(void) +{ + if (IS_ENABLED(CONFIG_ARM64_MPAM) && mpam_cpus_have_feature()) + write_sysreg_s(read_sysreg_s(SYS_MPAM1_EL1), SYS_MPAM2_EL2); +} + /* Switch to the guest for legacy non-VHE systems */ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) { @@ -251,6 +258,8 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) bool pmu_switch_needed; u64 exit_code; + __mpam_copy_el1_to_el2(); + /* * Having IRQs masked via PMR when entering the guest means the GIC * will not signal the CPU of interrupts of lower priority, and the @@ -310,6 +319,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) __timer_enable_traps(vcpu); __debug_switch_to_guest(vcpu); + __mpam_guest_load(); do { /* Jump in the fire! */ @@ -320,6 +330,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) __sysreg_save_state_nvhe(guest_ctxt); __sysreg32_save_state(vcpu); + __mpam_guest_put(); __timer_disable_traps(vcpu); __hyp_vgic_save_state(vcpu); diff --git a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c index b35a178e7e0d..6b407cd3230d 100644 --- a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c @@ -90,6 +90,7 @@ void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu) __sysreg32_restore_state(vcpu); __sysreg_restore_user_state(guest_ctxt); __sysreg_restore_el1_state(guest_ctxt); + __mpam_guest_load(); vcpu_set_flag(vcpu, SYSREGS_ON_CPU); -- Gitee From 6c413c11c670c6ac62186acc9e589be1da87cbd0 Mon Sep 17 00:00:00 2001 From: James Morse Date: Mon, 4 Dec 2023 14:33:19 +0000 Subject: [PATCH 0578/2138] ACPI / PPTT: Provide a helper to walk processor containers ANBZ: #8686 commit 140a693dd7a214179f67cb3dcf36f8fd9467e5e6 morse-linux. The PPTT describes CPUs and caches, as well as processor containers. To enable PPI partitions, the irqchip driver needs to know how many partitions the platform has, and which CPUs belong to which partition. When a percpu interrupt is registered, the partition is provided to allow a different driver to request the same percpu interrupt intid, one per partition. The acpi_id of the Processor Container is the natural way to do this with ACPI, but the DSDT AML interpreter is not available early enough for the irqchipi driver. Fortunately, the same information can be described in the PPTT. Add a helper to count the number or Processor Containers in the PPTT. This is structured as a walker/callback as the irqchip driver will also use this to configure each partition. Only Processor entries in the PPTT that have a valid acpi id are considered as containers. To identify a particular Processor Container, it must have an id. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/acpi/pptt.c | 58 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index a35dd0e41c27..f3279bcb78ff 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -21,6 +21,8 @@ #include #include +typedef int (*acpi_pptt_cpu_callback_t)(struct acpi_pptt_processor *, void *); + static struct acpi_subtable_header *fetch_pptt_subtable(struct acpi_table_header *table_hdr, u32 pptt_ref) { @@ -293,6 +295,62 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he return NULL; } +/** + * acpi_pptt_for_each_container() - Iterate over all processor containers + * + * Not all 'Processor' entries in the PPTT are either a CPU or a Processor + * Container, they may exist purely to describe a Private resource. CPUs + * have to be leaves, so a Processor Container is a non-leaf that has the + * 'ACPI Processor ID valid' flag set. + * + * Return: 0 for a complete walk, or the first non-zero value from the callback + * that stopped the walk. + */ +int acpi_pptt_for_each_container(acpi_pptt_cpu_callback_t callback, void *arg) +{ + struct acpi_pptt_processor *cpu_node; + struct acpi_table_header *table_hdr; + struct acpi_subtable_header *entry; + bool leaf_flag, has_leaf_flag = false; + unsigned long table_end; + acpi_status status; + u32 proc_sz; + int ret = 0; + + status = acpi_get_table(ACPI_SIG_PPTT, 0, &table_hdr); + if (ACPI_FAILURE(status)) + return 0; + + if (table_hdr->revision > 1) + has_leaf_flag = true; + + table_end = (unsigned long)table_hdr + table_hdr->length; + entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr, + sizeof(struct acpi_table_pptt)); + proc_sz = sizeof(struct acpi_pptt_processor); + while ((unsigned long)entry + proc_sz < table_end) { + cpu_node = (struct acpi_pptt_processor *)entry; + if (entry->type == ACPI_PPTT_TYPE_PROCESSOR && + cpu_node->flags & ACPI_PPTT_ACPI_PROCESSOR_ID_VALID) + { + leaf_flag = cpu_node->flags & ACPI_PPTT_ACPI_LEAF_NODE; + if ((has_leaf_flag && !leaf_flag) || + (!has_leaf_flag && !acpi_pptt_leaf_node(table_hdr, cpu_node))) + { + ret = callback(cpu_node, arg); + if (ret) + break; + } + } + entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry, + entry->length); + } + + acpi_put_table(table_hdr); + + return ret; +} + static u8 acpi_cache_type(enum cache_type type) { switch (type) { -- Gitee From 41b1daf365fca7b0893e58b43e0920ed1d71010d Mon Sep 17 00:00:00 2001 From: James Morse Date: Mon, 30 Nov 2020 13:29:56 +0000 Subject: [PATCH 0579/2138] ACPI / PPTT: Find PPTT cache level by ID ANBZ: #8686 commit 7bf596309dc800428c5ca5bfabe5053e45e47cfb morse-linux. The MPAM table identifies caches by id, but the driver also wants to know the cache level, without having to wait for whichever core has that cache to come online. Add a helper that walks every possible cache, until it finds the one identified by id, then return the level. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/acpi/pptt.c | 74 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/acpi.h | 5 +++ 2 files changed, 79 insertions(+) diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index f3279bcb78ff..6c1f668d63ba 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -870,3 +870,77 @@ int find_acpi_cpu_topology_hetero_id(unsigned int cpu) return find_acpi_cpu_topology_tag(cpu, PPTT_ABORT_PACKAGE, ACPI_PPTT_ACPI_IDENTICAL); } + + +/** + * find_acpi_cache_level_from_id() - Get the level of the specified cache + * @cache_id: The id field of the unified cache + * + * Determine the level relative to any CPU for the unified cache identified by + * cache_id. This allows the property to be found even if the CPUs are offline. + * + * The returned level can be used to group unified caches that are peers. + * + * The PPTT table must be rev 3 or later, + * + * If one CPUs L2 is shared with another as L3, this function will return + * and unpredictable value. + * + * Return: -ENOENT if the PPTT doesn't exist, or the cache cannot be found. + * Otherwise returns a value which represents the level of the specified cache. + */ +int find_acpi_cache_level_from_id(u32 cache_id) +{ + u32 acpi_cpu_id; + acpi_status status; + int level, cpu, num_levels; + struct acpi_pptt_cache *cache; + struct acpi_table_header *table; + struct acpi_pptt_cache_v1* cache_v1; + struct acpi_pptt_processor *cpu_node; + + status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); + if (ACPI_FAILURE(status)) { + acpi_pptt_warn_missing(); + return -ENOENT; + } + + if (table->revision < 3) { + acpi_put_table(table); + return -ENOENT; + } + + /* + * If we found the cache first, we'd still need to walk from each CPU + * to find the level... + */ + for_each_possible_cpu(cpu) { + acpi_cpu_id = get_acpi_id_for_cpu(cpu); + cpu_node = acpi_find_processor_node(table, acpi_cpu_id); + if (!cpu_node) + break; + acpi_count_levels(table, cpu_node, &num_levels, NULL); + + /* Start at 1 for L1 */ + for (level = 1; level <= num_levels; level++) { + cache = acpi_find_cache_node(table, acpi_cpu_id, + ACPI_PPTT_CACHE_TYPE_UNIFIED, + level, &cpu_node); + if (!cache) + continue; + + cache_v1 = ACPI_ADD_PTR(struct acpi_pptt_cache_v1, + cache, + sizeof(struct acpi_pptt_cache)); + + if (cache->flags & ACPI_PPTT_CACHE_ID_VALID && + cache_v1->cache_id == cache_id) { + acpi_put_table(table); + return level; + } + } + } + + acpi_put_table(table); + return -ENOENT; +} diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 29654f5d65db..7ce11a7a6cf9 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -1495,6 +1495,7 @@ int find_acpi_cpu_topology(unsigned int cpu, int level); int find_acpi_cpu_topology_cluster(unsigned int cpu); int find_acpi_cpu_topology_package(unsigned int cpu); int find_acpi_cpu_topology_hetero_id(unsigned int cpu); +int find_acpi_cache_level_from_id(u32 cache_id); #else static inline int acpi_pptt_cpu_is_thread(unsigned int cpu) { @@ -1516,6 +1517,10 @@ static inline int find_acpi_cpu_topology_hetero_id(unsigned int cpu) { return -EINVAL; } +static inline int find_acpi_cache_level_from_id(u32 cache_id) +{ + return -EINVAL; +} #endif #ifdef CONFIG_ARM64 -- Gitee From 039682afcc4f59e4397b193b724df9353ced2405 Mon Sep 17 00:00:00 2001 From: James Morse Date: Mon, 4 Dec 2023 14:40:25 +0000 Subject: [PATCH 0580/2138] ACPI / PPTT: Add a helper to fill a cpumask from a processor container ANBZ: #8686 commit 3c920715e3e3bb5cd28b697db9103e482359861a morse-linux. The ACPI table for MPAM describes a set of CPUs with the UID of a processor container. These exist both in the namespace and the PPTT. Using the existing for-each helpers, provide a helper to find the specified processor container in the PPTT, and fill a cpumask with the CPUs that belong to it. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/acpi/pptt.c | 67 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/acpi.h | 6 ++++ 2 files changed, 73 insertions(+) diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index 6c1f668d63ba..9fbeed61cd87 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -295,6 +295,38 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he return NULL; } +/* parent_node points into the table, but the table isn't provided. */ +static void acpi_pptt_get_child_cpus(struct acpi_pptt_processor *parent_node, + cpumask_t *cpus) +{ + struct acpi_pptt_processor *cpu_node; + struct acpi_table_header *table_hdr; + acpi_status status; + u32 acpi_id; + int cpu; + + status = acpi_get_table(ACPI_SIG_PPTT, 0, &table_hdr); + if (ACPI_FAILURE(status)) + return; + + for_each_possible_cpu(cpu) { + acpi_id = get_acpi_id_for_cpu(cpu); + cpu_node = acpi_find_processor_node(table_hdr, acpi_id); + + while (cpu_node) { + if (cpu_node == parent_node) { + cpumask_set_cpu(cpu, cpus); + break; + } + cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent); + } + } + + acpi_put_table(table_hdr); + + return; +} + /** * acpi_pptt_for_each_container() - Iterate over all processor containers * @@ -351,6 +383,41 @@ int acpi_pptt_for_each_container(acpi_pptt_cpu_callback_t callback, void *arg) return ret; } +struct __cpus_from_container_arg { + u32 acpi_cpu_id; + cpumask_t *cpus; +}; + +static int __cpus_from_container(struct acpi_pptt_processor *container, void *arg) +{ + struct __cpus_from_container_arg *params = arg; + + if (container->acpi_processor_id == params->acpi_cpu_id) + acpi_pptt_get_child_cpus(container, params->cpus); + + return 0; +} + +/** + * acpi_pptt_get_cpus_from_container() - Populate a cpumask with all CPUs in a + * processor containers + * + * Find the specified Processor Container, and fill cpus with all the cpus + * below it. + * + * Return: 0 for a complete walk, or an error if the mask is incomplete. + */ +int acpi_pptt_get_cpus_from_container(u32 acpi_cpu_id, cpumask_t *cpus) +{ + struct __cpus_from_container_arg params; + + params.acpi_cpu_id = acpi_cpu_id; + params.cpus = cpus; + + cpumask_clear(cpus); + return acpi_pptt_for_each_container(&__cpus_from_container, ¶ms); +} + static u8 acpi_cache_type(enum cache_type type) { switch (type) { diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 7ce11a7a6cf9..925f2e167b6d 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -1496,6 +1496,7 @@ int find_acpi_cpu_topology_cluster(unsigned int cpu); int find_acpi_cpu_topology_package(unsigned int cpu); int find_acpi_cpu_topology_hetero_id(unsigned int cpu); int find_acpi_cache_level_from_id(u32 cache_id); +int acpi_pptt_get_cpus_from_container(u32 acpi_cpu_id, cpumask_t *cpus); #else static inline int acpi_pptt_cpu_is_thread(unsigned int cpu) { @@ -1521,6 +1522,11 @@ static inline int find_acpi_cache_level_from_id(u32 cache_id) { return -EINVAL; } +static inline int acpi_pptt_get_cpus_from_container(u32 acpi_cpu_id, + cpumask_t *cpus) +{ + return -EINVAL; +} #endif #ifdef CONFIG_ARM64 -- Gitee From d806988f35420abd90c98b4f6f06d1b56d8e831f Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 19 May 2021 15:16:28 +0100 Subject: [PATCH 0581/2138] ACPI / PPTT: Add a helper to fill a cpumask from a cache_id ANBZ: #8686 commit 90e73e8b3b6219b34e875b0c866f94aae84c8952 morse-linux. MPAM identifies CPUs by the cache_id in the PPTT cache structure. The driver needs to know which CPUs are associated with the cache, the CPUs may not all be online, so cacheinfo does not have the information. Add a helper to pull this information out of the PPTT. CC: Rohit Mathew Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/acpi/pptt.c | 79 ++++++++++++++++++++++++++++++++++++++++++-- include/linux/acpi.h | 6 ++++ 2 files changed, 83 insertions(+), 2 deletions(-) diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index 9fbeed61cd87..201df60e4a79 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -183,9 +183,10 @@ acpi_find_cache_level(struct acpi_table_header *table_hdr, * levels and split cache levels (data/instruction). * @table_hdr: Pointer to the head of the PPTT table * @cpu_node: processor node we wish to count caches for - * @levels: Number of levels if success. + * @levels: Number of levels if success. (*levels) should be initialized by + * the caller with the value to be used as the starting level. * @split_levels: Number of split cache levels (data/instruction) if - * success. Can by NULL. + * success. Can be NULL. * * Given a processor node containing a processing unit, walk into it and count * how many levels exist solely for it, and then walk up each level until we hit @@ -982,6 +983,8 @@ int find_acpi_cache_level_from_id(u32 cache_id) * to find the level... */ for_each_possible_cpu(cpu) { + + num_levels = 0; acpi_cpu_id = get_acpi_id_for_cpu(cpu); cpu_node = acpi_find_processor_node(table, acpi_cpu_id); if (!cpu_node) @@ -1011,3 +1014,75 @@ int find_acpi_cache_level_from_id(u32 cache_id) acpi_put_table(table); return -ENOENT; } + +/** + * acpi_pptt_get_cpumask_from_cache_id() - Get the cpus associated with the + * specified cache + * @cache_id: The id field of the unified cache + * @cpus: Where to buidl the cpumask + * + * Determine which CPUs are below this cache in the PPTT. This allows the property + * to be found even if the CPUs are offline. + * + * The PPTT table must be rev 3 or later, + * + * Return: -ENOENT if the PPTT doesn't exist, or the cache cannot be found. + * Otherwise returns 0 and sets the cpus in the provided cpumask. + */ +int acpi_pptt_get_cpumask_from_cache_id(u32 cache_id, cpumask_t *cpus) +{ + u32 acpi_cpu_id; + acpi_status status; + int level, cpu, num_levels; + struct acpi_pptt_cache *cache; + struct acpi_table_header *table; + struct acpi_pptt_cache_v1* cache_v1; + struct acpi_pptt_processor *cpu_node; + + cpumask_clear(cpus); + + status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); + if (ACPI_FAILURE(status)) { + acpi_pptt_warn_missing(); + return -ENOENT; + } + + if (table->revision < 3) { + acpi_put_table(table); + return -ENOENT; + } + + /* + * If we found the cache first, we'd still need to walk from each cpu. + */ + for_each_possible_cpu(cpu) { + + num_levels = 0; + acpi_cpu_id = get_acpi_id_for_cpu(cpu); + cpu_node = acpi_find_processor_node(table, acpi_cpu_id); + if (!cpu_node) + break; + acpi_count_levels(table, cpu_node, &num_levels, NULL); + + /* Start at 1 for L1 */ + for (level = 1; level <= num_levels; level++) { + cache = acpi_find_cache_node(table, acpi_cpu_id, + ACPI_PPTT_CACHE_TYPE_UNIFIED, + level, &cpu_node); + if (!cache) + continue; + + cache_v1 = ACPI_ADD_PTR(struct acpi_pptt_cache_v1, + cache, + sizeof(struct acpi_pptt_cache)); + + if (cache->flags & ACPI_PPTT_CACHE_ID_VALID && + cache_v1->cache_id == cache_id) { + cpumask_set_cpu(cpu, cpus); + } + } + } + + acpi_put_table(table); + return 0; +} diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 925f2e167b6d..fc4cc82128f0 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -1497,6 +1497,7 @@ int find_acpi_cpu_topology_package(unsigned int cpu); int find_acpi_cpu_topology_hetero_id(unsigned int cpu); int find_acpi_cache_level_from_id(u32 cache_id); int acpi_pptt_get_cpus_from_container(u32 acpi_cpu_id, cpumask_t *cpus); +int acpi_pptt_get_cpumask_from_cache_id(u32 cache_id, cpumask_t *cpus); #else static inline int acpi_pptt_cpu_is_thread(unsigned int cpu) { @@ -1527,6 +1528,11 @@ static inline int acpi_pptt_get_cpus_from_container(u32 acpi_cpu_id, { return -EINVAL; } +static inline int acpi_pptt_get_cpumask_from_cache_id(u32 cache_id, + cpumask_t *cpus) +{ + return -EINVAL; +} #endif #ifdef CONFIG_ARM64 -- Gitee From 78b6bdad6ea9962bcb8c456c03e6db4d064e7722 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 4 Dec 2023 19:09:38 +0000 Subject: [PATCH 0582/2138] cacheinfo: Allow for >32-bit cache 'id' ANBZ: #8686 commit 48ce26a34bccf6aff649ce9e3318b6d6aca94a03 morse-linux. In preparation to set the cache 'id' based on the CPU h/w ids, allow for 64-bit bit 'id' value. The only case that needs this is arm64, so unsigned long is sufficient. Cc: Greg Kroah-Hartman Cc: "Rafael J. Wysocki" Signed-off-by: Rob Herring [ Update get_cpu_cacheinfo_id() too. Use UL instead of ULL. ] Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/base/cacheinfo.c | 8 +++++++- include/linux/cacheinfo.h | 8 ++++---- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index b5715b8ded89..60290d7b4831 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -622,13 +622,19 @@ static ssize_t file_name##_show(struct device *dev, \ return sysfs_emit(buf, "%u\n", this_leaf->object); \ } -show_one(id, id); show_one(level, level); show_one(coherency_line_size, coherency_line_size); show_one(number_of_sets, number_of_sets); show_one(physical_line_partition, physical_line_partition); show_one(ways_of_associativity, ways_of_associativity); +static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct cacheinfo *this_leaf = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%lu\n", this_leaf->id); +} + static ssize_t size_show(struct device *dev, struct device_attribute *attr, char *buf) { diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index a5cfd44fab45..7aa8da587f92 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -47,7 +47,7 @@ extern unsigned int coherency_max_size; * keeping, the remaining members form the core properties of the cache */ struct cacheinfo { - unsigned int id; + unsigned long id; enum cache_type type; unsigned int level; unsigned int coherency_line_size; @@ -115,7 +115,7 @@ const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf); * Get the id of the cache associated with @cpu at level @level. * cpuhp lock must be held. */ -static inline int get_cpu_cacheinfo_id(int cpu, int level) +static inline unsigned long get_cpu_cacheinfo_id(int cpu, int level) { struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu); int i; @@ -124,11 +124,11 @@ static inline int get_cpu_cacheinfo_id(int cpu, int level) if (ci->info_list[i].level == level) { if (ci->info_list[i].attributes & CACHE_ID) return ci->info_list[i].id; - return -1; + return ~0UL; } } - return -1; + return ~0UL; } #ifdef CONFIG_ARM64 -- Gitee From 4c54522917f94ac979be138cb76e48cda55ce764 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Wed, 6 Oct 2021 11:43:32 -0500 Subject: [PATCH 0583/2138] cacheinfo: Set cache 'id' based on DT data ANBZ: #8686 commit 594097510201724732eb99aaf68d838fcfcc3809 morse-linux. Use the minimum CPU h/w id of the CPUs associated with the cache for the cache 'id'. This will provide a stable id value for a given system. As we need to check all possible CPUs, we can't use the shared_cpu_map which is just online CPUs. As there's not a cache to CPUs mapping in DT, we have to walk all CPU nodes and then walk cache levels. Cc: Greg Kroah-Hartman Cc: "Rafael J. Wysocki" Signed-off-by: Rob Herring Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/base/cacheinfo.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index 60290d7b4831..f088e9a48b5d 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -183,6 +183,31 @@ static bool cache_node_is_unified(struct cacheinfo *this_leaf, return of_property_read_bool(np, "cache-unified"); } +static void cache_of_set_id(struct cacheinfo *this_leaf, struct device_node *np) +{ + struct device_node *cpu; + unsigned long min_id = ~0UL; + + for_each_of_cpu_node(cpu) { + struct device_node *cache_node = cpu; + u64 id = of_get_cpu_hwid(cache_node, 0); + + while ((cache_node = of_find_next_cache_node(cache_node))) { + if ((cache_node == np) && (id < min_id)) { + min_id = id; + of_node_put(cache_node); + break; + } + of_node_put(cache_node); + } + } + + if (min_id != ~0UL) { + this_leaf->id = min_id; + this_leaf->attributes |= CACHE_ID; + } +} + static void cache_of_set_props(struct cacheinfo *this_leaf, struct device_node *np) { @@ -198,6 +223,7 @@ static void cache_of_set_props(struct cacheinfo *this_leaf, cache_get_line_size(this_leaf, np); cache_nr_sets(this_leaf, np); cache_associativity(this_leaf); + cache_of_set_id(this_leaf, np); } static int cache_setup_of_node(unsigned int cpu) -- Gitee From f4429a52884bf387d27b21fab29ef740b8af5299 Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 20 Oct 2021 16:04:55 +0100 Subject: [PATCH 0584/2138] cacheinfo: Expose the code to generate a cache-id from a device_node ANBZ: #8686 commit 6ade1c87b850595b164455a76a569a67bbf57f97 morse-linux. The MPAM driver identifies caches by id for use with resctrl. It needs to know the cache-id when probeing, but the value isn't set in cacheinfo until device_initcall(). Expose the code that generates the cache-id. The parts of the MPAM driver that run early can use this to set up the resctrl structures. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/base/cacheinfo.c | 13 ++++++++++--- include/linux/cacheinfo.h | 1 + 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index f088e9a48b5d..b864f8fbdaa1 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -183,7 +183,7 @@ static bool cache_node_is_unified(struct cacheinfo *this_leaf, return of_property_read_bool(np, "cache-unified"); } -static void cache_of_set_id(struct cacheinfo *this_leaf, struct device_node *np) +unsigned long cache_of_get_id(struct device_node *np) { struct device_node *cpu; unsigned long min_id = ~0UL; @@ -202,8 +202,15 @@ static void cache_of_set_id(struct cacheinfo *this_leaf, struct device_node *np) } } - if (min_id != ~0UL) { - this_leaf->id = min_id; + return min_id; +} + +static void cache_of_set_id(struct cacheinfo *this_leaf, struct device_node *np) +{ + unsigned long id = cache_of_get_id(np); + + if (id != ~0UL) { + this_leaf->id = id; this_leaf->attributes |= CACHE_ID; } } diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index 7aa8da587f92..d343754aacfb 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -110,6 +110,7 @@ int acpi_get_cache_info(unsigned int cpu, #endif const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf); +unsigned long cache_of_get_id(struct device_node *np); /* * Get the id of the cache associated with @cpu at level @level. -- Gitee From 96e02f86bae931fe1404a2a87bb83c8887ac8586 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Jul 2021 18:57:05 +0100 Subject: [PATCH 0585/2138] drivers: base: cacheinfo: Add helper to find the cache size from cpu+level ANBZ: #8686 commit 3a64a1d7d94c23c46bdea89071c9f1681d6167d1 morse-linux. MPAM needs to know the size of a cache associated with a particular CPU. The DT/ACPI agnostic way of doing this is to ask cacheinfo. Add a helper to do this. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- include/linux/cacheinfo.h | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index d343754aacfb..467ec6a76567 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -132,6 +132,27 @@ static inline unsigned long get_cpu_cacheinfo_id(int cpu, int level) return ~0UL; } +/* + * Get the size of the cache associated with @cpu at level @level. + * cpuhp lock must be held. + */ +static inline unsigned int get_cpu_cacheinfo_size(int cpu, int level) +{ + struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu); + int i; + + if (!ci->info_list) + return 0; + + for (i = 0; i < ci->num_leaves; i++) { + if (ci->info_list[i].level == level) { + return ci->info_list[i].size; + } + } + + return 0; +} + #ifdef CONFIG_ARM64 #define use_arch_cache_info() (true) #else -- Gitee From 117ac120893dfe196b70bd24c2624f136ebf1a99 Mon Sep 17 00:00:00 2001 From: James Morse Date: Mon, 4 Dec 2023 19:11:19 +0000 Subject: [PATCH 0586/2138] ACPI / MPAM: Parse the MPAM table ANBZ: #8686 commit 27b4ec98977590b385ce4128dcaf95df802ebead morse-linux. Add code to parse the arm64 specific MPAM table, looking up the cache level from the PPTT and feeding the end result into the MPAM driver. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- arch/arm64/Kconfig | 1 + drivers/acpi/arm64/Kconfig | 3 + drivers/acpi/arm64/Makefile | 1 + drivers/acpi/arm64/mpam.c | 369 ++++++++++++++++++++++++++++++++++++ drivers/acpi/tables.c | 2 +- include/linux/arm_mpam.h | 43 +++++ 6 files changed, 418 insertions(+), 1 deletion(-) create mode 100644 drivers/acpi/arm64/mpam.c create mode 100644 include/linux/arm_mpam.h diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 471e8129d0fb..145995b14daa 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -2015,6 +2015,7 @@ config ARM64_TLB_RANGE config ARM64_MPAM bool "Enable support for MPAM" + select ACPI_MPAM if ACPI help Memory Partitioning and Monitoring is an optional extension that allows the CPUs to mark load and store transactions with diff --git a/drivers/acpi/arm64/Kconfig b/drivers/acpi/arm64/Kconfig index b3ed6212244c..f2fd79f22e7d 100644 --- a/drivers/acpi/arm64/Kconfig +++ b/drivers/acpi/arm64/Kconfig @@ -21,3 +21,6 @@ config ACPI_AGDI config ACPI_APMT bool + +config ACPI_MPAM + bool diff --git a/drivers/acpi/arm64/Makefile b/drivers/acpi/arm64/Makefile index 143debc1ba4a..a55d16c01c50 100644 --- a/drivers/acpi/arm64/Makefile +++ b/drivers/acpi/arm64/Makefile @@ -4,4 +4,5 @@ obj-$(CONFIG_ACPI_IORT) += iort.o obj-$(CONFIG_ACPI_GTDT) += gtdt.o obj-$(CONFIG_ACPI_APMT) += apmt.o obj-$(CONFIG_ARM_AMBA) += amba.o +obj-$(CONFIG_ACPI_MPAM) += mpam.o obj-y += dma.o init.o diff --git a/drivers/acpi/arm64/mpam.c b/drivers/acpi/arm64/mpam.c new file mode 100644 index 000000000000..8a63449f27b5 --- /dev/null +++ b/drivers/acpi/arm64/mpam.c @@ -0,0 +1,369 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2022 Arm Ltd. + +/* Parse the MPAM ACPI table feeding the discovered nodes into the driver */ + +#define pr_fmt(fmt) "ACPI MPAM: " fmt + +#include +#include +#include +#include +#include + +#include + +#include + +/* Flags for acpi_table_mpam_msc.*_interrupt_flags */ +#define ACPI_MPAM_MSC_IRQ_MODE_EDGE 1 +#define ACPI_MPAM_MSC_IRQ_TYPE_MASK (3<<1) +#define ACPI_MPAM_MSC_IRQ_TYPE_WIRED 0 +#define ACPI_MPAM_MSC_IRQ_AFFINITY_PROCESSOR_CONTAINER (1<<3) +#define ACPI_MPAM_MSC_IRQ_AFFINITY_VALID (1<<4) + +static bool frob_irq(struct platform_device *pdev, int intid, u32 flags, + int *irq, u32 processor_container_uid) +{ + int sense; + + if (!intid) + return false; + + /* 0 in this field indicates a wired interrupt */ + if (flags & ACPI_MPAM_MSC_IRQ_TYPE_MASK) + return false; + + if (flags & ACPI_MPAM_MSC_IRQ_MODE_EDGE) + sense = ACPI_EDGE_SENSITIVE; + else + sense = ACPI_LEVEL_SENSITIVE; + + /* + * If the GSI is in the GIC's PPI range, try and create a partitioned + * percpu interrupt. + */ + if (16 <= intid && intid < 32 && processor_container_uid != ~0) { + pr_err_once("Partitioned interrupts not supported\n"); + return false; + } else { + *irq = acpi_register_gsi(&pdev->dev, intid, sense, + ACPI_ACTIVE_HIGH); + } + if (*irq <= 0) { + pr_err_once("Failed to register interrupt 0x%x with ACPI\n", + intid); + return false; + } + + return true; +} + +static void acpi_mpam_parse_irqs(struct platform_device *pdev, + struct acpi_mpam_msc_node *tbl_msc, + struct resource *res, int *res_idx) +{ + u32 flags, aff = ~0; + int irq; + + flags = tbl_msc->overflow_interrupt_flags; + if (flags & ACPI_MPAM_MSC_IRQ_AFFINITY_VALID && + flags & ACPI_MPAM_MSC_IRQ_AFFINITY_PROCESSOR_CONTAINER) + aff = tbl_msc->overflow_interrupt_affinity; + if (frob_irq(pdev, tbl_msc->overflow_interrupt, flags, &irq, aff)) { + res[*res_idx].start = irq; + res[*res_idx].end = irq; + res[*res_idx].flags = IORESOURCE_IRQ; + res[*res_idx].name = "overflow"; + + (*res_idx)++; + } + + flags = tbl_msc->error_interrupt_flags; + if (flags & ACPI_MPAM_MSC_IRQ_AFFINITY_VALID && + flags & ACPI_MPAM_MSC_IRQ_AFFINITY_PROCESSOR_CONTAINER) + aff = tbl_msc->error_interrupt_affinity; + else + aff = ~0; + if (frob_irq(pdev, tbl_msc->error_interrupt, flags, &irq, aff)) { + res[*res_idx].start = irq; + res[*res_idx].end = irq; + res[*res_idx].flags = IORESOURCE_IRQ; + res[*res_idx].name = "error"; + + (*res_idx)++; + } +} + +static int acpi_mpam_parse_resource(struct mpam_msc *msc, + struct acpi_mpam_resource_node *res) +{ + u32 cache_id; + int level; + + switch (res->locator_type) { + case ACPI_MPAM_LOCATION_TYPE_PROCESSOR_CACHE: + cache_id = res->locator.cache_locator.cache_reference; + level = find_acpi_cache_level_from_id(cache_id); + if (level < 0) { + pr_err_once("Bad level for cache with id %u\n", cache_id); + return level; + } + return mpam_ris_create(msc, res->ris_index, MPAM_CLASS_CACHE, + level, cache_id); + case ACPI_MPAM_LOCATION_TYPE_MEMORY: + return mpam_ris_create(msc, res->ris_index, MPAM_CLASS_MEMORY, + 255, res->locator.memory_locator.proximity_domain); + default: + /* These get discovered later and treated as unknown */ + return 0; + } +} + +int acpi_mpam_parse_resources(struct mpam_msc *msc, + struct acpi_mpam_msc_node *tbl_msc) +{ + int i, err; + struct acpi_mpam_resource_node *resources; + + resources = (struct acpi_mpam_resource_node *)(tbl_msc + 1); + for (i = 0; i < tbl_msc->num_resouce_nodes; i++) { + err = acpi_mpam_parse_resource(msc, &resources[i]); + if (err) + return err; + } + + return 0; +} + +static bool __init parse_msc_pm_link(struct acpi_mpam_msc_node *tbl_msc, + struct platform_device *pdev, + u32 *acpi_id) +{ + bool acpi_id_valid = false; + struct acpi_device *buddy; + char hid[16], uid[16]; + int err; + + memset(&hid, 0, sizeof(hid)); + memcpy(hid, &tbl_msc->hardware_id_linked_device, + sizeof(tbl_msc->hardware_id_linked_device)); + + if (!strcmp(hid, ACPI_PROCESSOR_CONTAINER_HID)) { + *acpi_id = tbl_msc->instance_id_linked_device; + acpi_id_valid = true; + } + + err = snprintf(uid, sizeof(uid), "%u", + tbl_msc->instance_id_linked_device); + if (err < 0 || err >= sizeof(uid)) + return acpi_id_valid; + + buddy = acpi_dev_get_first_match_dev(hid, uid, -1); + if (buddy) { + device_link_add(&pdev->dev, &buddy->dev, DL_FLAG_STATELESS); + } + + return acpi_id_valid; +} + +static int decode_interface_type(struct acpi_mpam_msc_node *tbl_msc, + enum mpam_msc_iface *iface) +{ + switch (tbl_msc->interface_type){ + case 0: + *iface = MPAM_IFACE_MMIO; + return 0; + case 1: + *iface = MPAM_IFACE_PCC; + return 0; + default: + return -EINVAL; + } +} + +static int __init _parse_table(struct acpi_table_header *table) +{ + char *table_end, *table_offset = (char *)(table + 1); + struct property_entry props[4]; /* needs a sentinel */ + struct acpi_mpam_msc_node *tbl_msc; + int next_res, next_prop, err = 0; + struct acpi_device *companion; + struct platform_device *pdev; + enum mpam_msc_iface iface; + struct resource res[3]; + char uid[16]; + u32 acpi_id; + + table_end = (char *)table + table->length; + + while (table_offset < table_end) { + tbl_msc = (struct acpi_mpam_msc_node *)table_offset; + table_offset += tbl_msc->length; + + /* + * If any of the reserved fields are set, make no attempt to + * parse the msc structure. This will prevent the driver from + * probing all the MSC, meaning it can't discover the system + * wide supported partid and pmg ranges. This avoids whatever + * this MSC is truncating the partids and creating a screaming + * error interrupt. + */ + if (tbl_msc->reserved || tbl_msc->reserved1 || tbl_msc->reserved2) + continue; + + if (decode_interface_type(tbl_msc, &iface)) + continue; + + next_res = 0; + next_prop = 0; + memset(res, 0, sizeof(res)); + memset(props, 0, sizeof(props)); + + pdev = platform_device_alloc("mpam_msc", tbl_msc->identifier); + if (IS_ERR(pdev)) { + err = PTR_ERR(pdev); + break; + } + + if (tbl_msc->length < sizeof(*tbl_msc)) { + err = -EINVAL; + break; + } + + /* Some power management is described in the namespace: */ + err = snprintf(uid, sizeof(uid), "%u", tbl_msc->identifier); + if (err > 0 && err < sizeof(uid)) { + companion = acpi_dev_get_first_match_dev("ARMHAA5C", uid, -1); + if (companion) + ACPI_COMPANION_SET(&pdev->dev, companion); + } + + if (iface == MPAM_IFACE_MMIO) { + res[next_res].name = "MPAM:MSC"; + res[next_res].start = tbl_msc->base_address; + res[next_res].end = tbl_msc->base_address + tbl_msc->mmio_size - 1; + res[next_res].flags = IORESOURCE_MEM; + next_res++; + } else if (iface == MPAM_IFACE_PCC) { + props[next_prop++] = PROPERTY_ENTRY_U32("pcc-channel", + tbl_msc->base_address); + next_prop++; + } + + acpi_mpam_parse_irqs(pdev, tbl_msc, res, &next_res); + err = platform_device_add_resources(pdev, res, next_res); + if (err) + break; + + props[next_prop++] = PROPERTY_ENTRY_U32("arm,not-ready-us", + tbl_msc->max_nrdy_usec); + + /* + * The MSC's CPU affinity is described via its linked power + * management device, but only if it points at a Processor or + * Processor Container. + */ + if (parse_msc_pm_link(tbl_msc, pdev, &acpi_id)) { + props[next_prop++] = PROPERTY_ENTRY_U32("cpu_affinity", + acpi_id); + } + + err = device_create_managed_software_node(&pdev->dev, props, + NULL); + if (err) + break; + + /* Come back later if you want the RIS too */ + err = platform_device_add_data(pdev, tbl_msc, tbl_msc->length); + if (err) + break; + + platform_device_add(pdev); + } + + if (err) + platform_device_put(pdev); + + return err; +} + +static struct acpi_table_header *get_table(void) +{ + struct acpi_table_header *table; + acpi_status status; + + if (acpi_disabled || !mpam_cpus_have_feature()) + return NULL; + + status = acpi_get_table(ACPI_SIG_MPAM, 0, &table); + if (ACPI_FAILURE(status)) + return NULL; + + if (table->revision != 1) + return NULL; + + return table; +} + + + +static int __init acpi_mpam_parse(void) +{ + struct acpi_table_header *mpam; + int err; + + mpam = get_table(); + if (!mpam) + return 0; + + err = _parse_table(mpam); + acpi_put_table(mpam); + + return err; +} + +static int _count_msc(struct acpi_table_header *table) +{ + char *table_end, *table_offset = (char *)(table + 1); + struct acpi_mpam_msc_node *tbl_msc; + int ret = 0; + + tbl_msc = (struct acpi_mpam_msc_node *)table_offset; + table_end = (char *)table + table->length; + + while (table_offset < table_end) { + if (tbl_msc->length < sizeof(*tbl_msc)) + return -EINVAL; + + ret++; + + table_offset += tbl_msc->length; + tbl_msc = (struct acpi_mpam_msc_node *)table_offset; + } + + return ret; +} + + +int acpi_mpam_count_msc(void) +{ + struct acpi_table_header *mpam; + int ret; + + mpam = get_table(); + if (!mpam) + return 0; + + ret = _count_msc(mpam); + acpi_put_table(mpam); + + return ret; +} + +/* + * Call after ACPI devices have been created, which happens behind acpi_scan_init() + * called from subsys_initcall(). PCC requires the mailbox driver, which is + * initialised from postcore_initcall(). + */ +subsys_initcall_sync(acpi_mpam_parse); diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index 8ab0a82b4da4..94cb47d740c9 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c @@ -566,7 +566,7 @@ static const char table_sigs[][ACPI_NAMESEG_SIZE] __initconst = { ACPI_SIG_PSDT, ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, ACPI_SIG_IORT, ACPI_SIG_NFIT, ACPI_SIG_HMAT, ACPI_SIG_PPTT, ACPI_SIG_NHLT, ACPI_SIG_AEST, ACPI_SIG_CEDT, ACPI_SIG_AGDI, - ACPI_SIG_NBFT }; + ACPI_SIG_NBFT, ACPI_SIG_MPAM }; #define ACPI_HEADER_SIZE sizeof(struct acpi_table_header) diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h new file mode 100644 index 000000000000..0f1d3f07e789 --- /dev/null +++ b/include/linux/arm_mpam.h @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2021 Arm Ltd. + +#ifndef __LINUX_ARM_MPAM_H +#define __LINUX_ARM_MPAM_H + +#include +#include + +struct mpam_msc; + +enum mpam_msc_iface { + MPAM_IFACE_MMIO, /* a real MPAM MSC */ + MPAM_IFACE_PCC, /* a fake MPAM MSC */ +}; + +enum mpam_class_types { + MPAM_CLASS_CACHE, /* Well known caches, e.g. L2 */ + MPAM_CLASS_MEMORY, /* Main memory */ + MPAM_CLASS_UNKNOWN, /* Everything else, e.g. SMMU */ +}; + +#ifdef CONFIG_ACPI_MPAM +/* Parse the ACPI description of resources entries for this MSC. */ +int acpi_mpam_parse_resources(struct mpam_msc *msc, + struct acpi_mpam_msc_node *tbl_msc); +int acpi_mpam_count_msc(void); +#else +static inline int acpi_mpam_parse_resources(struct mpam_msc *msc, + struct acpi_mpam_msc_node *tbl_msc) +{ + return -EINVAL; +} +static inline int acpi_mpam_count_msc(void) { return -EINVAL; } +#endif + +static inline int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx, + enum mpam_class_types type, u8 class_id, int component_id) +{ + return -EINVAL; +} + +#endif /* __LINUX_ARM_MPAM_H */ -- Gitee From 03a764cbfcfa764dc34a04c613a19d7f74ed6934 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Fri, 12 Nov 2021 13:24:35 -0600 Subject: [PATCH 0587/2138] dt-bindings: arm: Add MPAM MSC binding ANBZ: #8686 commit ce1052e983a1edec65defad8d94af1e263bd80de morse-linux. The binding is designed around the assumption that an MSC will be a sub-block of something else such as a memory controller, cache controller, or IOMMU. However, it's certainly possible a design does not have that association or has a mixture of both, so the binding illustrates how we can support that with RIS child nodes. A key part of MPAM is we need to know about all of the MSCs in the system before it can be enabled. This drives the need for the genericish 'arm,mpam-msc' compatible. Though we can't assume an MSC is accessible until a h/w specific driver potentially enables the h/w. Cc: James Morse Signed-off-by: Rob Herring Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- .../devicetree/bindings/arm/arm,mpam-msc.yaml | 227 ++++++++++++++++++ 1 file changed, 227 insertions(+) create mode 100644 Documentation/devicetree/bindings/arm/arm,mpam-msc.yaml diff --git a/Documentation/devicetree/bindings/arm/arm,mpam-msc.yaml b/Documentation/devicetree/bindings/arm/arm,mpam-msc.yaml new file mode 100644 index 000000000000..9d542ecb1a7d --- /dev/null +++ b/Documentation/devicetree/bindings/arm/arm,mpam-msc.yaml @@ -0,0 +1,227 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/arm/arm,mpam-msc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Arm Memory System Resource Partitioning and Monitoring (MPAM) + +description: | + The Arm MPAM specification can be found here: + + https://developer.arm.com/documentation/ddi0598/latest + +maintainers: + - Rob Herring + +properties: + compatible: + items: + - const: arm,mpam-msc # Further details are discoverable + - const: arm,mpam-memory-controller-msc + + reg: + maxItems: 1 + description: A memory region containing registers as defined in the MPAM + specification. + + interrupts: + minItems: 1 + items: + - description: error (optional) + - description: overflow (optional, only for monitoring) + + interrupt-names: + oneOf: + - items: + - enum: [ error, overflow ] + - items: + - const: error + - const: overflow + + arm,not-ready-us: + description: The maximum time in microseconds for monitoring data to be + accurate after a settings change. For more information, see the + Not-Ready (NRDY) bit description in the MPAM specification. + + numa-node-id: true # see NUMA binding + + '#address-cells': + const: 1 + + '#size-cells': + const: 0 + +patternProperties: + '^ris@[0-9a-f]$': + type: object + additionalProperties: false + description: | + RIS nodes for each RIS in an MSC. These nodes are required for each RIS + implementing known MPAM controls + + properties: + compatible: + enum: + # Bulk storage for cache + - arm,mpam-cache + # Memory bandwidth + - arm,mpam-memory + + reg: + minimum: 0 + maximum: 0xf + + cpus: + $ref: '/schemas/types.yaml#/definitions/phandle-array' + description: + Phandle(s) to the CPU node(s) this RIS belongs to. By default, the parent + device's affinity is used. + + arm,mpam-device: + $ref: '/schemas/types.yaml#/definitions/phandle' + description: + By default, the MPAM enabled device associated with a RIS is the MSC's + parent node. It is possible for each RIS to be associated with different + devices in which case 'arm,mpam-device' should be used. + + required: + - compatible + - reg + +required: + - compatible + - reg + +dependencies: + interrupts: [ interrupt-names ] + +additionalProperties: false + +examples: + - | + /* + cpus { + cpu@0 { + next-level-cache = <&L2_0>; + }; + cpu@100 { + next-level-cache = <&L2_1>; + }; + }; + */ + L2_0: cache-controller-0 { + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&L3>; + + }; + + L2_1: cache-controller-1 { + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&L3>; + + }; + + L3: cache-controller@30000000 { + compatible = "arm,dsu-l3-cache", "cache"; + cache-level = <3>; + cache-unified; + + ranges = <0x0 0x30000000 0x800000>; + #address-cells = <1>; + #size-cells = <1>; + + msc@10000 { + compatible = "arm,mpam-msc"; + + /* CPU affinity implied by parent cache node's */ + reg = <0x10000 0x2000>; + interrupts = <1>, <2>; + interrupt-names = "error", "overflow"; + arm,not-ready-us = <1>; + }; + }; + + mem: memory-controller@20000 { + compatible = "foo,a-memory-controller"; + reg = <0x20000 0x1000>; + + #address-cells = <1>; + #size-cells = <1>; + ranges; + + msc@21000 { + compatible = "arm,mpam-memory-controller-msc", "arm,mpam-msc"; + reg = <0x21000 0x1000>; + interrupts = <3>; + interrupt-names = "error"; + arm,not-ready-us = <1>; + numa-node-id = <1>; + }; + }; + + iommu@40000 { + reg = <0x40000 0x1000>; + + ranges; + #address-cells = <1>; + #size-cells = <1>; + + msc@41000 { + compatible = "arm,mpam-msc"; + reg = <0 0x1000>; + interrupts = <5>, <6>; + interrupt-names = "error", "overflow"; + arm,not-ready-us = <1>; + + #address-cells = <1>; + #size-cells = <0>; + + ris@2 { + compatible = "arm,mpam-cache"; + reg = <0>; + // TODO: How to map to device(s)? + }; + }; + }; + + msc@80000 { + compatible = "foo,a-standalone-msc"; + reg = <0x80000 0x1000>; + + clocks = <&clks 123>; + + ranges; + #address-cells = <1>; + #size-cells = <1>; + + msc@10000 { + compatible = "arm,mpam-msc"; + + reg = <0x10000 0x2000>; + interrupts = <7>; + interrupt-names = "overflow"; + arm,not-ready-us = <1>; + + #address-cells = <1>; + #size-cells = <0>; + + ris@0 { + compatible = "arm,mpam-cache"; + reg = <0>; + arm,mpam-device = <&L2_0>; + }; + + ris@1 { + compatible = "arm,mpam-memory"; + reg = <1>; + arm,mpam-device = <&mem>; + }; + }; + }; + +... -- Gitee From d551b72c99fc77d540378be36c1507bdfd6bb3b6 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 14 Aug 2018 15:03:34 +0100 Subject: [PATCH 0588/2138] arm_mpam: Add probe/remove for mpam msc driver and kbuild boiler plate ANBZ: #8686 commit 05b457d3d8f70861448547ea67ecaed9d2f44e46 morse-linux. Probing MPAM is convoluted. MSCs that are integrated with a CPU may only be accessible from those CPUs, and they may not be online. Touching the hardware early is pointless as MPAM can't be used until the system-wide common values for num_partid and num_pmg have been discovered. Start with driver probe/remove and mapping the MSC. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- arch/arm64/Kconfig | 1 + drivers/platform/Kconfig | 2 + drivers/platform/Makefile | 1 + drivers/platform/mpam/Kconfig | 6 + drivers/platform/mpam/Makefile | 1 + drivers/platform/mpam/mpam_devices.c | 355 ++++++++++++++++++++++++++ drivers/platform/mpam/mpam_internal.h | 48 ++++ 7 files changed, 414 insertions(+) create mode 100644 drivers/platform/mpam/Kconfig create mode 100644 drivers/platform/mpam/Makefile create mode 100644 drivers/platform/mpam/mpam_devices.c create mode 100644 drivers/platform/mpam/mpam_internal.h diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 145995b14daa..95c50f3ac290 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -2016,6 +2016,7 @@ config ARM64_TLB_RANGE config ARM64_MPAM bool "Enable support for MPAM" select ACPI_MPAM if ACPI + select ARM_CPU_RESCTRL help Memory Partitioning and Monitoring is an optional extension that allows the CPUs to mark load and store transactions with diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig index 868b20361769..f26534a4a83b 100644 --- a/drivers/platform/Kconfig +++ b/drivers/platform/Kconfig @@ -9,6 +9,8 @@ source "drivers/platform/chrome/Kconfig" source "drivers/platform/mellanox/Kconfig" +source "drivers/platform/mpam/Kconfig" + source "drivers/platform/olpc/Kconfig" source "drivers/platform/surface/Kconfig" diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile index 8296d4c41eb7..54ee16e4e4d8 100644 --- a/drivers/platform/Makefile +++ b/drivers/platform/Makefile @@ -12,3 +12,4 @@ obj-$(CONFIG_OLPC_EC) += olpc/ obj-$(CONFIG_GOLDFISH) += goldfish/ obj-$(CONFIG_CHROME_PLATFORMS) += chrome/ obj-$(CONFIG_SURFACE_PLATFORMS) += surface/ +obj-$(CONFIG_ARM_CPU_RESCTRL) += mpam/ diff --git a/drivers/platform/mpam/Kconfig b/drivers/platform/mpam/Kconfig new file mode 100644 index 000000000000..13bd86fc5e58 --- /dev/null +++ b/drivers/platform/mpam/Kconfig @@ -0,0 +1,6 @@ +# Confusingly, this is everything but the CPU bits of MPAM. CPU here means +# CPU resources, not containers or cgroups etc. +config ARM_CPU_RESCTRL + bool + depends on ARM64 + select RESCTRL_RMID_DEPENDS_ON_CLOSID diff --git a/drivers/platform/mpam/Makefile b/drivers/platform/mpam/Makefile new file mode 100644 index 000000000000..8ad69bfa2aa2 --- /dev/null +++ b/drivers/platform/mpam/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_ARM_CPU_RESCTRL) += mpam_devices.o diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c new file mode 100644 index 000000000000..885f8b61cb65 --- /dev/null +++ b/drivers/platform/mpam/mpam_devices.c @@ -0,0 +1,355 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2022 Arm Ltd. + +#define pr_fmt(fmt) "mpam: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include "mpam_internal.h" + +/* + * mpam_list_lock protects the SRCU lists when writing. Once the + * mpam_enabled key is enabled these lists are read-only, + * unless the error interrupt disables the driver. + */ +static DEFINE_MUTEX(mpam_list_lock); +static LIST_HEAD(mpam_all_msc); + +static struct srcu_struct mpam_srcu; + +/* MPAM isn't available until all the MSC have been probed. */ +static u32 mpam_num_msc; + +static void mpam_discovery_complete(void) +{ + pr_err("Discovered all MSC\n"); +} + +static int mpam_dt_count_msc(void) +{ + int count = 0; + struct device_node *np; + + for_each_compatible_node(np, NULL, "arm,mpam-msc") + count++; + + return count; +} + +static int mpam_dt_parse_resource(struct mpam_msc *msc, struct device_node *np, + u32 ris_idx) +{ + int err = 0; + u32 level = 0; + unsigned long cache_id; + struct device_node *cache; + + do { + if (of_device_is_compatible(np, "arm,mpam-cache")) { + cache = of_parse_phandle(np, "arm,mpam-device", 0); + if (!cache) { + pr_err("Failed to read phandle\n"); + break; + } + } else if (of_device_is_compatible(np->parent, "cache")) { + cache = np->parent; + } else { + /* For now, only caches are supported */ + cache = NULL; + break; + } + + err = of_property_read_u32(cache, "cache-level", &level); + if (err) { + pr_err("Failed to read cache-level\n"); + break; + } + + cache_id = cache_of_get_id(cache); + if (cache_id == ~0UL) { + err = -ENOENT; + break; + } + + err = mpam_ris_create(msc, ris_idx, MPAM_CLASS_CACHE, level, + cache_id); + } while (0); + of_node_put(cache); + + return err; +} + + +static int mpam_dt_parse_resources(struct mpam_msc *msc, void *ignored) +{ + int err, num_ris = 0; + const u32 *ris_idx_p; + struct device_node *iter, *np; + + np = msc->pdev->dev.of_node; + for_each_child_of_node(np, iter) { + ris_idx_p = of_get_property(iter, "reg", NULL); + if (ris_idx_p) { + num_ris++; + err = mpam_dt_parse_resource(msc, iter, *ris_idx_p); + if (err) { + of_node_put(iter); + return err; + } + } + } + + if (!num_ris) + mpam_dt_parse_resource(msc, np, 0); + + return err; +} + +static int get_msc_affinity(struct mpam_msc *msc) +{ + struct device_node *parent; + u32 affinity_id; + int err; + + if (!acpi_disabled) { + err = device_property_read_u32(&msc->pdev->dev, "cpu_affinity", + &affinity_id); + if (err) { + cpumask_copy(&msc->accessibility, cpu_possible_mask); + err = 0; + } else { + err = acpi_pptt_get_cpus_from_container(affinity_id, + &msc->accessibility); + } + + return err; + } + + /* This depends on the path to of_node */ + parent = of_get_parent(msc->pdev->dev.of_node); + if (parent == of_root) { + cpumask_copy(&msc->accessibility, cpu_possible_mask); + err = 0; + } else { + err = -EINVAL; + pr_err("Cannot determine CPU accessibility of MSC\n"); + } + of_node_put(parent); + + return err; +} + +static int fw_num_msc; + +static void mpam_pcc_rx_callback(struct mbox_client *cl, void *msg) +{ + /* TODO: wake up tasks blocked on this MSC's PCC channel */ +} + +static int mpam_msc_drv_probe(struct platform_device *pdev) +{ + int err; + pgprot_t prot; + void * __iomem io; + struct mpam_msc *msc; + struct resource *msc_res; + void *plat_data = pdev->dev.platform_data; + + mutex_lock(&mpam_list_lock); + do { + msc = devm_kzalloc(&pdev->dev, sizeof(*msc), GFP_KERNEL); + if (!msc) { + err = -ENOMEM; + break; + } + + INIT_LIST_HEAD_RCU(&msc->glbl_list); + msc->pdev = pdev; + + err = device_property_read_u32(&pdev->dev, "arm,not-ready-us", + &msc->nrdy_usec); + if (err) { + /* This will prevent CSU monitors being usable */ + msc->nrdy_usec = 0; + } + + err = get_msc_affinity(msc); + if (err) + break; + if (cpumask_empty(&msc->accessibility)) { + pr_err_once("msc:%u is not accessible from any CPU!", + msc->id); + err = -EINVAL; + break; + } + + mutex_init(&msc->lock); + msc->id = mpam_num_msc++; + INIT_LIST_HEAD_RCU(&msc->ris); + spin_lock_init(&msc->part_sel_lock); + spin_lock_init(&msc->mon_sel_lock); + + if (device_property_read_u32(&pdev->dev, "pcc-channel", + &msc->pcc_subspace_id)) + msc->iface = MPAM_IFACE_MMIO; + else + msc->iface = MPAM_IFACE_PCC; + + if (msc->iface == MPAM_IFACE_MMIO) { + io = devm_platform_get_and_ioremap_resource(pdev, 0, + &msc_res); + if (IS_ERR(io)) { + pr_err("Failed to map MSC base address\n"); + devm_kfree(&pdev->dev, msc); + err = PTR_ERR(io); + break; + } + msc->mapped_hwpage_sz = msc_res->end - msc_res->start; + msc->mapped_hwpage = io; + } else if (msc->iface == MPAM_IFACE_PCC) { + msc->pcc_cl.dev = &pdev->dev; + msc->pcc_cl.rx_callback = mpam_pcc_rx_callback; + msc->pcc_cl.tx_block = false; + msc->pcc_cl.tx_tout = 1000; /* 1s */ + msc->pcc_cl.knows_txdone = false; + + msc->pcc_chan = pcc_mbox_request_channel(&msc->pcc_cl, + msc->pcc_subspace_id); + if (IS_ERR(msc->pcc_chan)) { + pr_err("Failed to request MSC PCC channel\n"); + devm_kfree(&pdev->dev, msc); + err = PTR_ERR(msc->pcc_chan); + break; + } + + prot = __acpi_get_mem_attribute(msc->pcc_chan->shmem_base_addr); + io = ioremap_prot(msc->pcc_chan->shmem_base_addr, + msc->pcc_chan->shmem_size, pgprot_val(prot)); + if (IS_ERR(io)) { + pr_err("Failed to map MSC base address\n"); + pcc_mbox_free_channel(msc->pcc_chan); + devm_kfree(&pdev->dev, msc); + err = PTR_ERR(io); + break; + } + + /* TODO: issue a read to update the registers */ + + msc->mapped_hwpage_sz = msc->pcc_chan->shmem_size; + msc->mapped_hwpage = io + sizeof(struct acpi_pcct_shared_memory); + } + + list_add_rcu(&msc->glbl_list, &mpam_all_msc); + platform_set_drvdata(pdev, msc); + } while (0); + mutex_unlock(&mpam_list_lock); + + if (!err) { + /* Create RIS entries described by firmware */ + if (!acpi_disabled) + err = acpi_mpam_parse_resources(msc, plat_data); + else + err = mpam_dt_parse_resources(msc, plat_data); + } + + if (!err && fw_num_msc == mpam_num_msc) + mpam_discovery_complete(); + + return err; +} + +static int mpam_msc_drv_remove(struct platform_device *pdev) +{ + struct mpam_msc *msc = platform_get_drvdata(pdev); + + if (!msc) + return 0; + + mutex_lock(&mpam_list_lock); + mpam_num_msc--; + platform_set_drvdata(pdev, NULL); + list_del_rcu(&msc->glbl_list); + synchronize_srcu(&mpam_srcu); + mutex_unlock(&mpam_list_lock); + + return 0; +} + +static const struct of_device_id mpam_of_match[] = { + { .compatible = "arm,mpam-msc", }, + {}, +}; +MODULE_DEVICE_TABLE(of, mpam_of_match); + +static struct platform_driver mpam_msc_driver = { + .driver = { + .name = "mpam_msc", + .of_match_table = of_match_ptr(mpam_of_match), + }, + .probe = mpam_msc_drv_probe, + .remove = mpam_msc_drv_remove, +}; + +/* + * MSC that are hidden under caches are not created as platform devices + * as there is no cache driver. Caches are also special-cased in + * get_msc_affinity(). + */ +static void mpam_dt_create_foundling_msc(void) +{ + int err; + struct device_node *cache; + + for_each_compatible_node(cache, NULL, "cache") { + err = of_platform_populate(cache, mpam_of_match, NULL, NULL); + if (err) { + pr_err("Failed to create MSC devices under caches\n"); + } + } +} + +static int __init mpam_msc_driver_init(void) +{ + if (!mpam_cpus_have_feature()) + return -EOPNOTSUPP; + + init_srcu_struct(&mpam_srcu); + + if (!acpi_disabled) + fw_num_msc = acpi_mpam_count_msc(); + else + fw_num_msc = mpam_dt_count_msc(); + + if (fw_num_msc <= 0) { + pr_err("No MSC devices found in firmware\n"); + return -EINVAL; + } + + if (acpi_disabled) + mpam_dt_create_foundling_msc(); + + return platform_driver_register(&mpam_msc_driver); +} +subsys_initcall(mpam_msc_driver_init); diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h new file mode 100644 index 000000000000..affd7999fcad --- /dev/null +++ b/drivers/platform/mpam/mpam_internal.h @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2021 Arm Ltd. + +#ifndef MPAM_INTERNAL_H +#define MPAM_INTERNAL_H + +#include +#include +#include +#include +#include +#include +#include + +struct mpam_msc +{ + /* member of mpam_all_msc */ + struct list_head glbl_list; + + int id; + struct platform_device *pdev; + + /* Not modified after mpam_is_enabled() becomes true */ + enum mpam_msc_iface iface; + u32 pcc_subspace_id; + struct mbox_client pcc_cl; + struct pcc_mbox_chan *pcc_chan; + u32 nrdy_usec; + cpumask_t accessibility; + + struct mutex lock; + unsigned long ris_idxs[128 / BITS_PER_LONG]; + u32 ris_max; + + /* mpam_msc_ris of this component */ + struct list_head ris; + + /* + * part_sel_lock protects access to the MSC hardware registers that are + * affected by MPAMCFG_PART_SEL. (including the ID registers) + * If needed, take msc->lock first. + */ + spinlock_t part_sel_lock; + void __iomem * mapped_hwpage; + size_t mapped_hwpage_sz; +}; + +#endif /* MPAM_INTERNAL_H */ -- Gitee From f0b44f08196ea7602ad808929c50df1650da7638 Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 5 May 2021 17:18:41 +0100 Subject: [PATCH 0589/2138] arm_mpam: Add the class and component structures for ris firmware described ANBZ: #8686 commit 5a666c3b177c179161ce86a695d62d3019f83a58 morse-linux. An MSC is a container of resources, each identified by their RIS index. Some RIS are described by firmware to provide their position in the system. Others are discovered when the driver probes the hardware. To configure a resource it needs to be found by its class, e.g. 'L2'. There are two kinds of grouping, a class is a set of components, which are visible as there are likely to be multiple instances of the L2 cache. struct mpam_components are a set of struct mpam_msc_ris, which are not visible as each L2 cache may be composed of individual slices which need to be configured the same as the hardware is not able to distribute the configuration. Add support for creating and destroying these structures. A gfp is passed as the structure for 'unknown' may need creating if a new RIS entry is discovered when probing the MSC. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 362 +++++++++++++++++++++++++- drivers/platform/mpam/mpam_internal.h | 52 ++++ include/linux/arm_mpam.h | 7 +- 3 files changed, 412 insertions(+), 9 deletions(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 885f8b61cb65..599bd4ee5b00 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include @@ -37,11 +36,359 @@ static DEFINE_MUTEX(mpam_list_lock); static LIST_HEAD(mpam_all_msc); -static struct srcu_struct mpam_srcu; +struct srcu_struct mpam_srcu; /* MPAM isn't available until all the MSC have been probed. */ static u32 mpam_num_msc; +/* + * An MSC is a container for resources, each identified by their RIS index. + * Components are a group of RIS that control the same thing. + * Classes are the set components of the same type. + * + * e.g. The set of RIS that make up the L2 are a component. These are sometimes + * termed slices. They should be configured as if they were one MSC. + * + * e.g. The SoC probably has more than one L2, each attached to a distinct set + * of CPUs. All the L2 components are grouped as a class. + * + * When creating an MSC, struct mpam_msc is added to the all mpam_all_msc list, + * then linked via struct mpam_ris to a component and a class. + * The same MSC may exist under different class->component paths, but the RIS + * index will be unique. + */ +LIST_HEAD(mpam_classes); + +static struct mpam_component * +mpam_component_alloc(struct mpam_class *class, int id, gfp_t gfp) +{ + struct mpam_component *comp; + + lockdep_assert_held(&mpam_list_lock); + + comp = kzalloc(sizeof(*comp), gfp); + if (!comp) + return ERR_PTR(-ENOMEM); + + comp->comp_id = id; + INIT_LIST_HEAD_RCU(&comp->ris); + /* affinity is updated when ris are added */ + INIT_LIST_HEAD_RCU(&comp->class_list); + comp->class = class; + + list_add_rcu(&comp->class_list, &class->components); + + return comp; +} + +static struct mpam_component * +mpam_component_get(struct mpam_class *class, int id, bool alloc, gfp_t gfp) +{ + struct mpam_component *comp; + + lockdep_assert_held(&mpam_list_lock); + + list_for_each_entry(comp, &class->components, class_list) { + if (comp->comp_id == id) + return comp; + } + + if (!alloc) + return ERR_PTR(-ENOENT); + + return mpam_component_alloc(class, id, gfp); +} + +static struct mpam_class * +mpam_class_alloc(u8 level_idx, enum mpam_class_types type, gfp_t gfp) +{ + struct mpam_class *class; + + lockdep_assert_held(&mpam_list_lock); + + class = kzalloc(sizeof(*class), gfp); + if (!class) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD_RCU(&class->components); + /* affinity is updated when ris are added */ + class->level = level_idx; + class->type = type; + INIT_LIST_HEAD_RCU(&class->classes_list); + + list_add_rcu(&class->classes_list, &mpam_classes); + + return class; +} + +static struct mpam_class * +mpam_class_get(u8 level_idx, enum mpam_class_types type, bool alloc, gfp_t gfp) +{ + bool found = false; + struct mpam_class *class; + + lockdep_assert_held(&mpam_list_lock); + + list_for_each_entry(class, &mpam_classes, classes_list) { + if (class->type == type && class->level == level_idx) { + found = true; + break; + } + } + + if (found) + return class; + + if (!alloc) + return ERR_PTR(-ENOENT); + + return mpam_class_alloc(level_idx, type, gfp); +} + +static void mpam_class_destroy(struct mpam_class *class) +{ + lockdep_assert_held(&mpam_list_lock); + + list_del_rcu(&class->classes_list); + synchronize_srcu(&mpam_srcu); + kfree(class); +} + +static void mpam_comp_destroy(struct mpam_component *comp) +{ + struct mpam_class *class = comp->class; + + lockdep_assert_held(&mpam_list_lock); + + list_del_rcu(&comp->class_list); + synchronize_srcu(&mpam_srcu); + kfree(comp); + + if (list_empty(&class->components)) + mpam_class_destroy(class); +} + +/* synchronise_srcu() before freeing ris */ +static void mpam_ris_destroy(struct mpam_msc_ris *ris) +{ + struct mpam_component *comp = ris->comp; + struct mpam_class *class = comp->class; + struct mpam_msc *msc = ris->msc; + + lockdep_assert_held(&mpam_list_lock); + lockdep_assert_preemption_enabled(); + + clear_bit(ris->ris_idx, msc->ris_idxs); + list_del_rcu(&ris->comp_list); + list_del_rcu(&ris->msc_list); + + cpumask_andnot(&comp->affinity, &comp->affinity, &ris->affinity); + cpumask_andnot(&class->affinity, &class->affinity, &ris->affinity); + + if (list_empty(&comp->ris)) + mpam_comp_destroy(comp); +} + +/* + * There are two ways of reaching a struct mpam_msc_ris. Via the + * class->component->ris, or via the msc. + * When destroying the msc, the other side needs unlinking and cleaning up too. + * synchronise_srcu() before freeing msc. + */ +static void mpam_msc_destroy(struct mpam_msc *msc) +{ + struct mpam_msc_ris *ris, *tmp; + + lockdep_assert_held(&mpam_list_lock); + lockdep_assert_preemption_enabled(); + + list_for_each_entry_safe(ris, tmp, &msc->ris, msc_list) + mpam_ris_destroy(ris); +} + +/* + * The cacheinfo structures are only populated when CPUs are online. + * This helper walks the device tree to include offline CPUs too. + */ +static int get_cpumask_from_cache_id(u32 cache_id, u32 cache_level, + cpumask_t *affinity) +{ + int cpu, err; + u32 iter_level; + int iter_cache_id; + struct device_node *iter; + + if (!acpi_disabled) + return acpi_pptt_get_cpumask_from_cache_id(cache_id, affinity); + + for_each_possible_cpu(cpu) { + iter = of_get_cpu_node(cpu, NULL); + if (!iter) { + pr_err("Failed to find cpu%d device node\n", cpu); + return -ENOENT; + } + + while ((iter = of_find_next_cache_node(iter))) { + err = of_property_read_u32(iter, "cache-level", + &iter_level); + if (err || (iter_level != cache_level)) { + of_node_put(iter); + continue; + } + + /* + * get_cpu_cacheinfo_id() isn't ready until sometime + * during device_initcall(). Use cache_of_get_id(). + */ + iter_cache_id = cache_of_get_id(iter); + if (cache_id == ~0UL) { + of_node_put(iter); + continue; + } + + if (iter_cache_id == cache_id) + cpumask_set_cpu(cpu, affinity); + + of_node_put(iter); + } + } + + return 0; +} + + +/* + * cpumask_of_node() only knows about online CPUs. This can't tell us whether + * a class is represented on all possible CPUs. + */ +static void get_cpumask_from_node_id(u32 node_id, cpumask_t *affinity) +{ + int cpu; + + for_each_possible_cpu(cpu) { + if (node_id == cpu_to_node(cpu)) + cpumask_set_cpu(cpu, affinity); + } +} + +static int get_cpumask_from_cache(struct device_node *cache, + cpumask_t *affinity) +{ + int err; + u32 cache_level; + int cache_id; + + err = of_property_read_u32(cache, "cache-level", &cache_level); + if (err) { + pr_err("Failed to read cache-level from cache node\n"); + return -ENOENT; + } + + cache_id = cache_of_get_id(cache); + if (cache_id == ~0UL) { + pr_err("Failed to calculate cache-id from cache node\n"); + return -ENOENT; + } + + return get_cpumask_from_cache_id(cache_id, cache_level, affinity); +} + +static int mpam_ris_get_affinity(struct mpam_msc *msc, cpumask_t *affinity, + enum mpam_class_types type, + struct mpam_class *class, + struct mpam_component *comp) +{ + int err; + + switch (type) { + case MPAM_CLASS_CACHE: + err = get_cpumask_from_cache_id(comp->comp_id, class->level, + affinity); + if (err) + return err; + + if (cpumask_empty(affinity)) + pr_warn_once("%s no CPUs associated with cache node", + dev_name(&msc->pdev->dev)); + + break; + case MPAM_CLASS_MEMORY: + get_cpumask_from_node_id(comp->comp_id, affinity); + if (cpumask_empty(affinity)) + pr_warn_once("%s no CPUs associated with memory node", + dev_name(&msc->pdev->dev)); + break; + case MPAM_CLASS_UNKNOWN: + return 0; + } + + cpumask_and(affinity, affinity, &msc->accessibility); + + return 0; +} + +static int mpam_ris_create_locked(struct mpam_msc *msc, u8 ris_idx, + enum mpam_class_types type, u8 class_id, + int component_id, gfp_t gfp) +{ + int err; + struct mpam_msc_ris *ris; + struct mpam_class *class; + struct mpam_component *comp; + + lockdep_assert_held(&mpam_list_lock); + + if (test_and_set_bit(ris_idx, msc->ris_idxs)) + return -EBUSY; + + ris = devm_kzalloc(&msc->pdev->dev, sizeof(*ris), gfp); + if (!ris) + return -ENOMEM; + + class = mpam_class_get(class_id, type, true, gfp); + if (IS_ERR(class)) + return PTR_ERR(class); + + comp = mpam_component_get(class, component_id, true, gfp); + if (IS_ERR(comp)) { + if (list_empty(&class->components)) + mpam_class_destroy(class); + return PTR_ERR(comp); + } + + err = mpam_ris_get_affinity(msc, &ris->affinity, type, class, comp); + if (err) { + if (list_empty(&class->components)) + mpam_class_destroy(class); + return err; + } + + ris->ris_idx = ris_idx; + INIT_LIST_HEAD_RCU(&ris->comp_list); + INIT_LIST_HEAD_RCU(&ris->msc_list); + ris->msc = msc; + ris->comp = comp; + + cpumask_or(&comp->affinity, &comp->affinity, &ris->affinity); + cpumask_or(&class->affinity, &class->affinity, &ris->affinity); + list_add_rcu(&ris->comp_list, &comp->ris); + + return 0; +} + +int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx, + enum mpam_class_types type, u8 class_id, int component_id) +{ + int err; + + mutex_lock(&mpam_list_lock); + err = mpam_ris_create_locked(msc, ris_idx, type, class_id, + component_id, GFP_KERNEL); + mutex_unlock(&mpam_list_lock); + + return err; +} + static void mpam_discovery_complete(void) { pr_err("Discovered all MSC\n"); @@ -153,8 +500,14 @@ static int get_msc_affinity(struct mpam_msc *msc) cpumask_copy(&msc->accessibility, cpu_possible_mask); err = 0; } else { - err = -EINVAL; - pr_err("Cannot determine CPU accessibility of MSC\n"); + if (of_device_is_compatible(parent, "cache")) { + err = get_cpumask_from_cache(parent, + &msc->accessibility); + } else { + err = -EINVAL; + pr_err("Cannot determine accessibility of MSC: %s\n", + dev_name(&msc->pdev->dev)); + } } of_node_put(parent); @@ -291,6 +644,7 @@ static int mpam_msc_drv_remove(struct platform_device *pdev) mpam_num_msc--; platform_set_drvdata(pdev, NULL); list_del_rcu(&msc->glbl_list); + mpam_msc_destroy(msc); synchronize_srcu(&mpam_srcu); mutex_unlock(&mpam_list_lock); diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index affd7999fcad..07d9c70bf1e6 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -11,6 +11,7 @@ #include #include #include +#include struct mpam_msc { @@ -45,4 +46,55 @@ struct mpam_msc size_t mapped_hwpage_sz; }; +struct mpam_class +{ + /* mpam_components in this class */ + struct list_head components; + + cpumask_t affinity; + + u8 level; + enum mpam_class_types type; + + /* member of mpam_classes */ + struct list_head classes_list; +}; + +struct mpam_component +{ + u32 comp_id; + + /* mpam_msc_ris in this component */ + struct list_head ris; + + cpumask_t affinity; + + /* member of mpam_class:components */ + struct list_head class_list; + + /* parent: */ + struct mpam_class *class; +}; + +struct mpam_msc_ris +{ + u8 ris_idx; + + cpumask_t affinity; + + /* member of mpam_component:ris */ + struct list_head comp_list; + + /* member of mpam_msc:ris */ + struct list_head msc_list; + + /* parents: */ + struct mpam_msc *msc; + struct mpam_component *comp; +}; + +/* List of all classes */ +extern struct list_head mpam_classes; +extern struct srcu_struct mpam_srcu; + #endif /* MPAM_INTERNAL_H */ diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index 0f1d3f07e789..950ea7049d53 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -34,10 +34,7 @@ static inline int acpi_mpam_parse_resources(struct mpam_msc *msc, static inline int acpi_mpam_count_msc(void) { return -EINVAL; } #endif -static inline int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx, - enum mpam_class_types type, u8 class_id, int component_id) -{ - return -EINVAL; -} +int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx, + enum mpam_class_types type, u8 class_id, int component_id); #endif /* __LINUX_ARM_MPAM_H */ -- Gitee From 5a19719030a93d927a8cf5addc24f8f1d4d176e6 Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 13 Dec 2018 11:41:37 +0000 Subject: [PATCH 0590/2138] arm_mpam: Add MPAM MSC register layout definitions ANBZ: #8686 commit eb95c932eec969d1597429391e526dce2052a3cb morse-linux. Memory Partitioning and Monitoring (MPAM) has memory mapped devices (MSCs) with an identity/configuration page. Add the definitions for these registers as offset within the page(s). Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_internal.h | 254 ++++++++++++++++++++++++++ 1 file changed, 254 insertions(+) diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index 07d9c70bf1e6..5d339e4375e2 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -97,4 +97,258 @@ struct mpam_msc_ris extern struct list_head mpam_classes; extern struct srcu_struct mpam_srcu; + +/* + * MPAM MSCs have the following register layout. See: + * Arm Architecture Reference Manual Supplement - Memory System Resource + * Partitioning and Monitoring (MPAM), for Armv8-A. DDI 0598A.a + */ +#define MPAM_ARCHITECTURE_V1 0x10 + +/* Memory mapped control pages: */ +/* ID Register offsets in the memory mapped page */ +#define MPAMF_IDR 0x0000 /* features id register */ +#define MPAMF_MSMON_IDR 0x0080 /* performance monitoring features */ +#define MPAMF_IMPL_IDR 0x0028 /* imp-def partitioning */ +#define MPAMF_CPOR_IDR 0x0030 /* cache-portion partitioning */ +#define MPAMF_CCAP_IDR 0x0038 /* cache-capacity partitioning */ +#define MPAMF_MBW_IDR 0x0040 /* mem-bw partitioning */ +#define MPAMF_PRI_IDR 0x0048 /* priority partitioning */ +#define MPAMF_CSUMON_IDR 0x0088 /* cache-usage monitor */ +#define MPAMF_MBWUMON_IDR 0x0090 /* mem-bw usage monitor */ +#define MPAMF_PARTID_NRW_IDR 0x0050 /* partid-narrowing */ +#define MPAMF_IIDR 0x0018 /* implementer id register */ +#define MPAMF_AIDR 0x0020 /* architectural id register */ + +/* Configuration and Status Register offsets in the memory mapped page */ +#define MPAMCFG_PART_SEL 0x0100 /* partid to configure: */ +#define MPAMCFG_CPBM 0x1000 /* cache-portion config */ +#define MPAMCFG_CMAX 0x0108 /* cache-capacity config */ +#define MPAMCFG_MBW_MIN 0x0200 /* min mem-bw config */ +#define MPAMCFG_MBW_MAX 0x0208 /* max mem-bw config */ +#define MPAMCFG_MBW_WINWD 0x0220 /* mem-bw accounting window config */ +#define MPAMCFG_MBW_PBM 0x2000 /* mem-bw portion bitmap config */ +#define MPAMCFG_PRI 0x0400 /* priority partitioning config */ +#define MPAMCFG_MBW_PROP 0x0500 /* mem-bw stride config */ +#define MPAMCFG_INTPARTID 0x0600 /* partid-narrowing config */ + +#define MSMON_CFG_MON_SEL 0x0800 /* monitor selector */ +#define MSMON_CFG_CSU_FLT 0x0810 /* cache-usage monitor filter */ +#define MSMON_CFG_CSU_CTL 0x0818 /* cache-usage monitor config */ +#define MSMON_CFG_MBWU_FLT 0x0820 /* mem-bw monitor filter */ +#define MSMON_CFG_MBWU_CTL 0x0828 /* mem-bw monitor config */ +#define MSMON_CSU 0x0840 /* current cache-usage */ +#define MSMON_CSU_CAPTURE 0x0848 /* last cache-usage value captured */ +#define MSMON_MBWU 0x0860 /* current mem-bw usage value */ +#define MSMON_MBWU_CAPTURE 0x0868 /* last mem-bw value captured */ +#define MSMON_CAPT_EVNT 0x0808 /* signal a capture event */ +#define MPAMF_ESR 0x00F8 /* error status register */ +#define MPAMF_ECR 0x00F0 /* error control register */ + +/* MPAMF_IDR - MPAM features ID register */ +#define MPAMF_IDR_PARTID_MAX GENMASK(15, 0) +#define MPAMF_IDR_PMG_MAX GENMASK(23, 16) +#define MPAMF_IDR_HAS_CCAP_PART BIT(24) +#define MPAMF_IDR_HAS_CPOR_PART BIT(25) +#define MPAMF_IDR_HAS_MBW_PART BIT(26) +#define MPAMF_IDR_HAS_PRI_PART BIT(27) +#define MPAMF_IDR_HAS_EXT BIT(28) +#define MPAMF_IDR_HAS_IMPL_IDR BIT(29) +#define MPAMF_IDR_HAS_MSMON BIT(30) +#define MPAMF_IDR_HAS_PARTID_NRW BIT(31) +#define MPAMF_IDR_HAS_RIS BIT(32) +#define MPAMF_IDR_HAS_EXT_ESR BIT(38) +#define MPAMF_IDR_HAS_ESR BIT(39) +#define MPAMF_IDR_RIS_MAX GENMASK(59, 56) + + +/* MPAMF_MSMON_IDR - MPAM performance monitoring ID register */ +#define MPAMF_MSMON_IDR_MSMON_CSU BIT(16) +#define MPAMF_MSMON_IDR_MSMON_MBWU BIT(17) +#define MPAMF_MSMON_IDR_HAS_LOCAL_CAPT_EVNT BIT(31) + +/* MPAMF_CPOR_IDR - MPAM features cache portion partitioning ID register */ +#define MPAMF_CPOR_IDR_CPBM_WD GENMASK(15, 0) + +/* MPAMF_CCAP_IDR - MPAM features cache capacity partitioning ID register */ +#define MPAMF_CCAP_IDR_CMAX_WD GENMASK(5, 0) + +/* MPAMF_MBW_IDR - MPAM features memory bandwidth partitioning ID register */ +#define MPAMF_MBW_IDR_BWA_WD GENMASK(5, 0) +#define MPAMF_MBW_IDR_HAS_MIN BIT(10) +#define MPAMF_MBW_IDR_HAS_MAX BIT(11) +#define MPAMF_MBW_IDR_HAS_PBM BIT(12) +#define MPAMF_MBW_IDR_HAS_PROP BIT(13) +#define MPAMF_MBW_IDR_WINDWR BIT(14) +#define MPAMF_MBW_IDR_BWPBM_WD GENMASK(28, 16) + +/* MPAMF_PRI_IDR - MPAM features priority partitioning ID register */ +#define MPAMF_PRI_IDR_HAS_INTPRI BIT(0) +#define MPAMF_PRI_IDR_INTPRI_0_IS_LOW BIT(1) +#define MPAMF_PRI_IDR_INTPRI_WD GENMASK(9, 4) +#define MPAMF_PRI_IDR_HAS_DSPRI BIT(16) +#define MPAMF_PRI_IDR_DSPRI_0_IS_LOW BIT(17) +#define MPAMF_PRI_IDR_DSPRI_WD GENMASK(25, 20) + +/* MPAMF_CSUMON_IDR - MPAM cache storage usage monitor ID register */ +#define MPAMF_CSUMON_IDR_NUM_MON GENMASK(15, 0) +#define MPAMF_CSUMON_IDR_HAS_CAPTURE BIT(31) + +/* MPAMF_MBWUMON_IDR - MPAM memory bandwidth usage monitor ID register */ +#define MPAMF_MBWUMON_IDR_NUM_MON GENMASK(15, 0) +#define MPAMF_MBWUMON_IDR_RWBW BIT(28) +#define MPAMF_MBWUMON_IDR_LWD BIT(29) +#define MPAMF_MBWUMON_IDR_HAS_LONG BIT(30) +#define MPAMF_MBWUMON_IDR_HAS_CAPTURE BIT(31) + +/* MPAMF_PARTID_NRW_IDR - MPAM PARTID narrowing ID register */ +#define MPAMF_PARTID_NRW_IDR_INTPARTID_MAX GENMASK(15, 0) + +/* MPAMF_IIDR - MPAM implementation ID register */ +#define MPAMF_IIDR_PRODUCTID GENMASK(31, 20) +#define MPAMF_IIDR_PRODUCTID_SHIFT 20 +#define MPAMF_IIDR_VARIANT GENMASK(19, 16) +#define MPAMF_IIDR_VARIANT_SHIFT 16 +#define MPAMF_IIDR_REVISON GENMASK(15, 12) +#define MPAMF_IIDR_REVISON_SHIFT 12 +#define MPAMF_IIDR_IMPLEMENTER GENMASK(11, 0) +#define MPAMF_IIDR_IMPLEMENTER_SHIFT 0 + +/* MPAMF_AIDR - MPAM architecture ID register */ +#define MPAMF_AIDR_ARCH_MAJOR_REV GENMASK(7, 4) +#define MPAMF_AIDR_ARCH_MINOR_REV GENMASK(3, 0) + +/* MPAMCFG_PART_SEL - MPAM partition configuration selection register */ +#define MPAMCFG_PART_SEL_PARTID_SEL GENMASK(15, 0) +#define MPAMCFG_PART_SEL_INTERNAL BIT(16) +#define MPAMCFG_PART_SEL_RIS GENMASK(27, 24) + +/* MPAMCFG_CMAX - MPAM cache portion bitmap partition configuration register */ +#define MPAMCFG_CMAX_CMAX GENMASK(15, 0) + +/* + * MPAMCFG_MBW_MIN - MPAM memory minimum bandwidth partitioning configuration + * register + */ +#define MPAMCFG_MBW_MIN_MIN GENMASK(15, 0) + +/* + * MPAMCFG_MBW_MAX - MPAM memory maximum bandwidth partitioning configuration + * register + */ +#define MPAMCFG_MBW_MAX_MAX GENMASK(15, 0) +#define MPAMCFG_MBW_MAX_HARDLIM BIT(31) + +/* + * MPAMCFG_MBW_WINWD - MPAM memory bandwidth partitioning window width + * register + */ +#define MPAMCFG_MBW_WINWD_US_FRAC GENMASK(7, 0) +#define MPAMCFG_MBW_WINWD_US_INT GENMASK(23, 8) + + +/* MPAMCFG_PRI - MPAM priority partitioning configuration register */ +#define MPAMCFG_PRI_INTPRI GENMASK(15, 0) +#define MPAMCFG_PRI_DSPRI GENMASK(31, 16) + +/* + * MPAMCFG_MBW_PROP - Memory bandwidth proportional stride partitioning + * configuration register + */ +#define MPAMCFG_MBW_PROP_STRIDEM1 GENMASK(15, 0) +#define MPAMCFG_MBW_PROP_EN BIT(31) + +/* + * MPAMCFG_INTPARTID - MPAM internal partition narrowing configuration register + */ +#define MPAMCFG_INTPARTID_INTPARTID GENMASK(15, 0) +#define MPAMCFG_INTPARTID_INTERNAL BIT(16) + +/* MSMON_CFG_MON_SEL - Memory system performance monitor selection register */ +#define MSMON_CFG_MON_SEL_MON_SEL GENMASK(7, 0) +#define MSMON_CFG_MON_SEL_RIS GENMASK(27, 24) + +/* MPAMF_ESR - MPAM Error Status Register */ +#define MPAMF_ESR_PARTID_OR_MON GENMASK(15, 0) +#define MPAMF_ESR_PMG GENMASK(23, 16) +#define MPAMF_ESR_ERRCODE GENMASK(27, 24) +#define MPAMF_ESR_OVRWR BIT(31) +#define MPAMF_ESR_RIS GENMASK(35, 32) + +/* MPAMF_ECR - MPAM Error Control Register */ +#define MPAMF_ECR_INTEN BIT(0) + +/* Error conditions in accessing memory mapped registers */ +#define MPAM_ERRCODE_NONE 0 +#define MPAM_ERRCODE_PARTID_SEL_RANGE 1 +#define MPAM_ERRCODE_REQ_PARTID_RANGE 2 +#define MPAM_ERRCODE_MSMONCFG_ID_RANGE 3 +#define MPAM_ERRCODE_REQ_PMG_RANGE 4 +#define MPAM_ERRCODE_MONITOR_RANGE 5 +#define MPAM_ERRCODE_INTPARTID_RANGE 6 +#define MPAM_ERRCODE_UNEXPECTED_INTERNAL 7 + +/* + * MSMON_CFG_CSU_FLT - Memory system performance monitor configure cache storage + * usage monitor filter register + */ +#define MSMON_CFG_CSU_FLT_PARTID GENMASK(15, 0) +#define MSMON_CFG_CSU_FLT_PMG GENMASK(23, 16) + +/* + * MSMON_CFG_CSU_CTL - Memory system performance monitor configure cache storage + * usage monitor control register + * MSMON_CFG_MBWU_CTL - Memory system performance monitor configure memory + * bandwidth usage monitor control register + */ +#define MSMON_CFG_x_CTL_TYPE GENMASK(7, 0) +#define MSMON_CFG_x_CTL_MATCH_PARTID BIT(16) +#define MSMON_CFG_x_CTL_MATCH_PMG BIT(17) +#define MSMON_CFG_x_CTL_SCLEN BIT(19) +#define MSMON_CFG_x_CTL_SUBTYPE GENMASK(23, 20) +#define MSMON_CFG_x_CTL_OFLOW_FRZ BIT(24) +#define MSMON_CFG_x_CTL_OFLOW_INTR BIT(25) +#define MSMON_CFG_x_CTL_OFLOW_STATUS BIT(26) +#define MSMON_CFG_x_CTL_CAPT_RESET BIT(27) +#define MSMON_CFG_x_CTL_CAPT_EVNT GENMASK(30, 28) +#define MSMON_CFG_x_CTL_EN BIT(31) + +#define MSMON_CFG_MBWU_CTL_TYPE_MBWU 0x42 +#define MSMON_CFG_MBWU_CTL_TYPE_CSU 0x43 + +#define MSMON_CFG_MBWU_CTL_SUBTYPE_NONE 0 +#define MSMON_CFG_MBWU_CTL_SUBTYPE_READ 1 +#define MSMON_CFG_MBWU_CTL_SUBTYPE_WRITE 2 +#define MSMON_CFG_MBWU_CTL_SUBTYPE_BOTH 3 + +#define MSMON_CFG_MBWU_CTL_SUBTYPE_MAX 3 +#define MSMON_CFG_MBWU_CTL_SUBTYPE_MASK 0x3 + +/* + * MSMON_CFG_MBWU_FLT - Memory system performance monitor configure memory + * bandwidth usage monitor filter register + */ +#define MSMON_CFG_MBWU_FLT_PARTID GENMASK(15, 0) +#define MSMON_CFG_MBWU_FLT_PMG GENMASK(23, 16) +#define MSMON_CFG_MBWU_FLT_RWBW GENMASK(31, 30) + +/* + * MSMON_CSU - Memory system performance monitor cache storage usage monitor + * register + * MSMON_CSU_CAPTURE - Memory system performance monitor cache storage usage + * capture register + * MSMON_MBWU - Memory system performance monitor memory bandwidth usage + * monitor register + * MSMON_MBWU_CAPTURE - Memory system performance monitor memory bandwidth usage + * capture register + */ +#define MSMON___VALUE GENMASK(30, 0) +#define MSMON___NRDY BIT(31) +#define MSMON_MBWU_L_VALUE GENMASK(62, 0) +/* + * MSMON_CAPT_EVNT - Memory system performance monitoring capture event + * generation register + */ +#define MSMON_CAPT_EVNT_NOW BIT(0) + #endif /* MPAM_INTERNAL_H */ -- Gitee From e7847a43520e59a017523b19a2d01b210053f7e4 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 4 May 2021 18:12:42 +0100 Subject: [PATCH 0591/2138] arm_mpam: Add cpuhp callbacks to probe MSC hardware ANBZ: #8686 commit f4d4d6b4ee651ec7573b4bc521ed6ff101ab934f morse-linux. Because an MSC can only by accessed from the CPUs in its cpu-affinity set we need to be running on one of those CPUs to probe the MSC hardware. Do this work in the cpuhp callback. Probing the hardware will only happen before MPAM is enabled, walk all the MSCs and probe those we can reach that haven't already been probed. Later, enabling MPAM will enable a static key which will allow mpam_discovery_cpu_online() and its mutex to be skipped. Enabling a static key will also take the cpuhp lock, so can't be done from the cpuhp callback. Whenever a new MSC has been probed schedule work to test if all the MSCs have now been probed. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 148 +++++++++++++++++++++++++- drivers/platform/mpam/mpam_internal.h | 5 +- 2 files changed, 149 insertions(+), 4 deletions(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 599bd4ee5b00..48811c90b78a 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -4,6 +4,7 @@ #define pr_fmt(fmt) "mpam: " fmt #include +#include #include #include #include @@ -21,6 +22,7 @@ #include #include #include +#include #include @@ -41,6 +43,16 @@ struct srcu_struct mpam_srcu; /* MPAM isn't available until all the MSC have been probed. */ static u32 mpam_num_msc; +static int mpam_cpuhp_state; +static DEFINE_MUTEX(mpam_cpuhp_state_lock); + +/* + * mpam is enabled once all devices have been probed from CPU online callbacks, + * scheduled via this work_struct. If access to an MSC depends on a CPU that + * was not brought online at boot, this can happen surprisingly late. + */ +static DECLARE_WORK(mpam_enable_work, &mpam_enable); + /* * An MSC is a container for resources, each identified by their RIS index. * Components are a group of RIS that control the same thing. @@ -59,6 +71,24 @@ static u32 mpam_num_msc; */ LIST_HEAD(mpam_classes); +static u32 __mpam_read_reg(struct mpam_msc *msc, u16 reg) +{ + WARN_ON_ONCE(reg > msc->mapped_hwpage_sz); + WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &msc->accessibility)); + + return readl_relaxed(msc->mapped_hwpage + reg); +} + +#define mpam_read_partsel_reg(msc, reg) \ +({ \ + u32 ____ret; \ + \ + lockdep_assert_held_once(&msc->part_sel_lock); \ + ____ret = __mpam_read_reg(msc, MPAMF_##reg); \ + \ + ____ret; \ +}) + static struct mpam_component * mpam_component_alloc(struct mpam_class *class, int id, gfp_t gfp) { @@ -389,9 +419,81 @@ int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx, return err; } -static void mpam_discovery_complete(void) +static int mpam_msc_hw_probe(struct mpam_msc *msc) +{ + u64 idr; + int err; + + lockdep_assert_held(&msc->lock); + + spin_lock(&msc->part_sel_lock); + idr = mpam_read_partsel_reg(msc, AIDR); + if ((idr & MPAMF_AIDR_ARCH_MAJOR_REV) != MPAM_ARCHITECTURE_V1) { + pr_err_once("%s does not match MPAM architecture v1.0\n", + dev_name(&msc->pdev->dev)); + err = -EIO; + } else { + msc->probed = true; + err = 0; + } + spin_unlock(&msc->part_sel_lock); + + return err; +} + +static int mpam_cpu_online(unsigned int cpu) { - pr_err("Discovered all MSC\n"); + return 0; +} + +/* Before mpam is enabled, try to probe new MSC */ +static int mpam_discovery_cpu_online(unsigned int cpu) +{ + int err = 0; + struct mpam_msc *msc; + bool new_device_probed = false; + + mutex_lock(&mpam_list_lock); + list_for_each_entry(msc, &mpam_all_msc, glbl_list) { + if (!cpumask_test_cpu(cpu, &msc->accessibility)) + continue; + + mutex_lock(&msc->lock); + if (!msc->probed) + err = mpam_msc_hw_probe(msc); + mutex_unlock(&msc->lock); + + if (!err) + new_device_probed = true; + else + break; // mpam_broken + } + mutex_unlock(&mpam_list_lock); + + if (new_device_probed && !err) + schedule_work(&mpam_enable_work); + + if (err < 0) + return err; + + return mpam_cpu_online(cpu); +} + +static int mpam_cpu_offline(unsigned int cpu) +{ + return 0; +} + +static void mpam_register_cpuhp_callbacks(int (*online)(unsigned int online)) +{ + mutex_lock(&mpam_cpuhp_state_lock); + mpam_cpuhp_state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mpam:online", + online, mpam_cpu_offline); + if (mpam_cpuhp_state <= 0) { + pr_err("Failed to register cpuhp callbacks"); + mpam_cpuhp_state = 0; + } + mutex_unlock(&mpam_cpuhp_state_lock); } static int mpam_dt_count_msc(void) @@ -628,11 +730,51 @@ static int mpam_msc_drv_probe(struct platform_device *pdev) } if (!err && fw_num_msc == mpam_num_msc) - mpam_discovery_complete(); + mpam_register_cpuhp_callbacks(&mpam_discovery_cpu_online); return err; } +static void mpam_enable_once(void) +{ + mutex_lock(&mpam_cpuhp_state_lock); + cpuhp_remove_state(mpam_cpuhp_state); + mpam_cpuhp_state = 0; + mutex_unlock(&mpam_cpuhp_state_lock); + + mpam_register_cpuhp_callbacks(mpam_cpu_online); + + pr_info("MPAM enabled\n"); +} + +/* + * Enable mpam once all devices have been probed. + * Scheduled by mpam_discovery_cpu_online() once all devices have been created. + * Also scheduled when new devices are probed when new CPUs come online. + */ +void mpam_enable(struct work_struct *work) +{ + static atomic_t once; + struct mpam_msc *msc; + bool all_devices_probed = true; + + /* Have we probed all the hw devices? */ + mutex_lock(&mpam_list_lock); + list_for_each_entry(msc, &mpam_all_msc, glbl_list) { + mutex_lock(&msc->lock); + if (!msc->probed) + all_devices_probed = false; + mutex_unlock(&msc->lock); + + if (!all_devices_probed) + break; + } + mutex_unlock(&mpam_list_lock); + + if (all_devices_probed && !atomic_fetch_inc(&once)) + mpam_enable_once(); +} + static int mpam_msc_drv_remove(struct platform_device *pdev) { struct mpam_msc *msc = platform_get_drvdata(pdev); diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index 5d339e4375e2..d5d567fe57ed 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -30,6 +30,7 @@ struct mpam_msc cpumask_t accessibility; struct mutex lock; + bool probed; unsigned long ris_idxs[128 / BITS_PER_LONG]; u32 ris_max; @@ -97,6 +98,8 @@ struct mpam_msc_ris extern struct list_head mpam_classes; extern struct srcu_struct mpam_srcu; +/* Scheduled work callback to enable mpam once all MSC have been probed */ +void mpam_enable(struct work_struct *work); /* * MPAM MSCs have the following register layout. See: @@ -196,7 +199,7 @@ extern struct srcu_struct mpam_srcu; /* MPAMF_MBWUMON_IDR - MPAM memory bandwidth usage monitor ID register */ #define MPAMF_MBWUMON_IDR_NUM_MON GENMASK(15, 0) -#define MPAMF_MBWUMON_IDR_RWBW BIT(28) +#define MPAMF_MBWUMON_IDR_HAS_RWBW BIT(28) #define MPAMF_MBWUMON_IDR_LWD BIT(29) #define MPAMF_MBWUMON_IDR_HAS_LONG BIT(30) #define MPAMF_MBWUMON_IDR_HAS_CAPTURE BIT(31) -- Gitee From fbb684e3d14228b424aee289eb2e88f4348e3e14 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 5 Dec 2023 14:04:33 +0000 Subject: [PATCH 0592/2138] arm_mpam: Probe MSCs to find the supported partid/pmg values ANBZ: #8686 commit 77115dd523dedacef5a4d3504cc5f41a97dd053c morse-linux. CPUs can generate traffic with a range of PARTID and PMG values, but each MSC may have its own maximum size for these fields. Before MPAM can be used, the driver needs to probe each RIS on each MSC, to find the system-wide smallest value that can be used. While doing this, RIS entries that firmware didn't describe are create under MPAM_CLASS_UNKNOWN. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- arch/arm64/kernel/mpam.c | 11 ++ drivers/platform/mpam/mpam_devices.c | 166 ++++++++++++++++++++++++-- drivers/platform/mpam/mpam_internal.h | 6 + include/linux/arm_mpam.h | 2 + 4 files changed, 178 insertions(+), 7 deletions(-) diff --git a/arch/arm64/kernel/mpam.c b/arch/arm64/kernel/mpam.c index 346f0273b2c5..02f43334f078 100644 --- a/arch/arm64/kernel/mpam.c +++ b/arch/arm64/kernel/mpam.c @@ -3,6 +3,7 @@ #include +#include #include #include @@ -10,3 +11,13 @@ DEFINE_STATIC_KEY_FALSE(arm64_mpam_has_hcr); DEFINE_STATIC_KEY_FALSE(mpam_enabled); DEFINE_PER_CPU(u64, arm64_mpam_default); DEFINE_PER_CPU(u64, arm64_mpam_current); + +static int __init arm64_mpam_register_cpus(void) +{ + u64 mpamidr = read_sanitised_ftr_reg(SYS_MPAMIDR_EL1); + u16 partid_max = FIELD_GET(MPAMIDR_PARTID_MAX, mpamidr); + u8 pmg_max = FIELD_GET(MPAMIDR_PMG_MAX, mpamidr); + + return mpam_register_requestor(partid_max, pmg_max); +} +arch_initcall(arm64_mpam_register_cpus) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 48811c90b78a..ec22c279cfe9 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -46,6 +47,15 @@ static u32 mpam_num_msc; static int mpam_cpuhp_state; static DEFINE_MUTEX(mpam_cpuhp_state_lock); +/* + * The smallest common values for any CPU or MSC in the system. + * Generating traffic outside this range will result in screaming interrupts. + */ +u16 mpam_partid_max; +u8 mpam_pmg_max; +static bool partid_max_init, partid_max_published; +static DEFINE_SPINLOCK(partid_max_lock); + /* * mpam is enabled once all devices have been probed from CPU online callbacks, * scheduled via this work_struct. If access to an MSC depends on a CPU that @@ -79,6 +89,14 @@ static u32 __mpam_read_reg(struct mpam_msc *msc, u16 reg) return readl_relaxed(msc->mapped_hwpage + reg); } +static void __mpam_write_reg(struct mpam_msc *msc, u16 reg, u32 val) +{ + WARN_ON_ONCE(reg > msc->mapped_hwpage_sz); + WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &msc->accessibility)); + + writel_relaxed(val, msc->mapped_hwpage + reg); +} + #define mpam_read_partsel_reg(msc, reg) \ ({ \ u32 ____ret; \ @@ -89,6 +107,59 @@ static u32 __mpam_read_reg(struct mpam_msc *msc, u16 reg) ____ret; \ }) +#define mpam_write_partsel_reg(msc, reg, val) \ +({ \ + lockdep_assert_held_once(&msc->part_sel_lock); \ + __mpam_write_reg(msc, MPAMCFG_##reg, val); \ +}) + +static u64 mpam_msc_read_idr(struct mpam_msc *msc) +{ + u64 idr_high = 0, idr_low; + + lockdep_assert_held(&msc->part_sel_lock); + + idr_low = mpam_read_partsel_reg(msc, IDR); + if (FIELD_GET(MPAMF_IDR_HAS_EXT, idr_low)) + idr_high = mpam_read_partsel_reg(msc, IDR + 4); + + return (idr_high << 32) | idr_low; +} + +static void __mpam_part_sel(u8 ris_idx, u16 partid, struct mpam_msc *msc) +{ + u32 partsel; + + lockdep_assert_held(&msc->part_sel_lock); + + partsel = FIELD_PREP(MPAMCFG_PART_SEL_RIS, ris_idx) | + FIELD_PREP(MPAMCFG_PART_SEL_PARTID_SEL, partid); + mpam_write_partsel_reg(msc, PART_SEL, partsel); +} + +int mpam_register_requestor(u16 partid_max, u8 pmg_max) +{ + int err = 0; + + spin_lock(&partid_max_lock); + if (!partid_max_init) { + mpam_partid_max = partid_max; + mpam_pmg_max = pmg_max; + partid_max_init = true; + } else if (!partid_max_published) { + mpam_partid_max = min(mpam_partid_max, partid_max); + mpam_pmg_max = min(mpam_pmg_max, pmg_max); + } else { + /* New requestors can't lower the values */ + if ((partid_max < mpam_partid_max) || (pmg_max < mpam_pmg_max)) + err = -EBUSY; + } + spin_unlock(&partid_max_lock); + + return err; +} +EXPORT_SYMBOL(mpam_register_requestor); + static struct mpam_component * mpam_component_alloc(struct mpam_class *class, int id, gfp_t gfp) { @@ -402,6 +473,7 @@ static int mpam_ris_create_locked(struct mpam_msc *msc, u8 ris_idx, cpumask_or(&comp->affinity, &comp->affinity, &ris->affinity); cpumask_or(&class->affinity, &class->affinity, &ris->affinity); list_add_rcu(&ris->comp_list, &comp->ris); + list_add_rcu(&ris->msc_list, &msc->ris); return 0; } @@ -419,10 +491,37 @@ int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx, return err; } +static struct mpam_msc_ris *mpam_get_or_create_ris(struct mpam_msc *msc, + u8 ris_idx) +{ + int err; + struct mpam_msc_ris *ris, *found = ERR_PTR(-ENOENT); + + lockdep_assert_held(&mpam_list_lock); + + if (!test_bit(ris_idx, msc->ris_idxs)) { + err = mpam_ris_create_locked(msc, ris_idx, MPAM_CLASS_UNKNOWN, + 0, 0, GFP_ATOMIC); + if (err) + return ERR_PTR(err); + } + + list_for_each_entry(ris, &msc->ris, msc_list) { + if (ris->ris_idx == ris_idx) { + found = ris; + break; + } + } + + return found; +} + static int mpam_msc_hw_probe(struct mpam_msc *msc) { u64 idr; - int err; + u16 partid_max; + u8 ris_idx, pmg_max; + struct mpam_msc_ris *ris; lockdep_assert_held(&msc->lock); @@ -431,14 +530,43 @@ static int mpam_msc_hw_probe(struct mpam_msc *msc) if ((idr & MPAMF_AIDR_ARCH_MAJOR_REV) != MPAM_ARCHITECTURE_V1) { pr_err_once("%s does not match MPAM architecture v1.0\n", dev_name(&msc->pdev->dev)); - err = -EIO; - } else { - msc->probed = true; - err = 0; + spin_unlock(&msc->part_sel_lock); + return -EIO; } + + idr = mpam_msc_read_idr(msc); spin_unlock(&msc->part_sel_lock); + msc->ris_max = FIELD_GET(MPAMF_IDR_RIS_MAX, idr); + + /* Use these values so partid/pmg always starts with a valid value */ + msc->partid_max = FIELD_GET(MPAMF_IDR_PARTID_MAX, idr); + msc->pmg_max = FIELD_GET(MPAMF_IDR_PMG_MAX, idr); + + for (ris_idx = 0; ris_idx <= msc->ris_max; ris_idx++) { + spin_lock(&msc->part_sel_lock); + __mpam_part_sel(ris_idx, 0, msc); + idr = mpam_msc_read_idr(msc); + spin_unlock(&msc->part_sel_lock); + + partid_max = FIELD_GET(MPAMF_IDR_PARTID_MAX, idr); + pmg_max = FIELD_GET(MPAMF_IDR_PMG_MAX, idr); + msc->partid_max = min(msc->partid_max, partid_max); + msc->pmg_max = min(msc->pmg_max, pmg_max); + + ris = mpam_get_or_create_ris(msc, ris_idx); + if (IS_ERR(ris)) { + return PTR_ERR(ris); + } + } - return err; + spin_lock(&partid_max_lock); + mpam_partid_max = min(mpam_partid_max, msc->partid_max); + mpam_pmg_max = min(mpam_pmg_max, msc->pmg_max); + spin_unlock(&partid_max_lock); + + msc->probed = true; + + return 0; } static int mpam_cpu_online(unsigned int cpu) @@ -742,9 +870,18 @@ static void mpam_enable_once(void) mpam_cpuhp_state = 0; mutex_unlock(&mpam_cpuhp_state_lock); + /* + * Once the cpuhp callbacks have been changed, mpam_partid_max can no + * longer change. + */ + spin_lock(&partid_max_lock); + partid_max_published = true; + spin_unlock(&partid_max_lock); + mpam_register_cpuhp_callbacks(mpam_cpu_online); - pr_info("MPAM enabled\n"); + pr_info("MPAM enabled with %u partid and %u pmg\n", + mpam_partid_max + 1, mpam_pmg_max + 1); } /* @@ -828,11 +965,25 @@ static void mpam_dt_create_foundling_msc(void) static int __init mpam_msc_driver_init(void) { + bool mpam_not_available = false; + if (!mpam_cpus_have_feature()) return -EOPNOTSUPP; init_srcu_struct(&mpam_srcu); + /* + * If the MPAM CPU interface is not implemented, or reserved by + * firmware, there is no point touching the rest of the hardware. + */ + spin_lock(&partid_max_lock); + if (!partid_max_init || (!mpam_partid_max && !mpam_pmg_max)) + mpam_not_available = true; + spin_unlock(&partid_max_lock); + + if (mpam_not_available) + return 0; + if (!acpi_disabled) fw_num_msc = acpi_mpam_count_msc(); else @@ -848,4 +999,5 @@ static int __init mpam_msc_driver_init(void) return platform_driver_register(&mpam_msc_driver); } +/* Must occur after arm64_mpam_register_cpus() from arch_initcall() */ subsys_initcall(mpam_msc_driver_init); diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index d5d567fe57ed..a7de4a69b9f8 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -31,6 +31,8 @@ struct mpam_msc struct mutex lock; bool probed; + u16 partid_max; + u8 pmg_max; unsigned long ris_idxs[128 / BITS_PER_LONG]; u32 ris_max; @@ -98,6 +100,10 @@ struct mpam_msc_ris extern struct list_head mpam_classes; extern struct srcu_struct mpam_srcu; +/* System wide partid/pmg values */ +extern u16 mpam_partid_max; +extern u8 mpam_pmg_max; + /* Scheduled work callback to enable mpam once all MSC have been probed */ void mpam_enable(struct work_struct *work); diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index 950ea7049d53..40e09b4d236b 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -34,6 +34,8 @@ static inline int acpi_mpam_parse_resources(struct mpam_msc *msc, static inline int acpi_mpam_count_msc(void) { return -EINVAL; } #endif +int mpam_register_requestor(u16 partid_max, u8 pmg_max); + int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx, enum mpam_class_types type, u8 class_id, int component_id); -- Gitee From da87f9cf9f2ede88e33b366406eddafe3df06fd3 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 26 Jan 2021 17:10:44 +0000 Subject: [PATCH 0593/2138] arm_mpam: Probe the hardware features resctrl supports ANBZ: #8686 commit f7c52f94545ccc4c0f7882e68985efefa1b8e2ed morse-linux. Expand the probing support with the control and monitor types we can use with resctrl. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 79 ++++++++++++++++++++++++++- drivers/platform/mpam/mpam_internal.h | 50 +++++++++++++++++ 2 files changed, 127 insertions(+), 2 deletions(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index ec22c279cfe9..7eeeb44506b1 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -83,7 +83,7 @@ LIST_HEAD(mpam_classes); static u32 __mpam_read_reg(struct mpam_msc *msc, u16 reg) { - WARN_ON_ONCE(reg > msc->mapped_hwpage_sz); + WARN_ON_ONCE(reg + sizeof(u32) > msc->mapped_hwpage_sz); WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &msc->accessibility)); return readl_relaxed(msc->mapped_hwpage + reg); @@ -91,7 +91,7 @@ static u32 __mpam_read_reg(struct mpam_msc *msc, u16 reg) static void __mpam_write_reg(struct mpam_msc *msc, u16 reg, u32 val) { - WARN_ON_ONCE(reg > msc->mapped_hwpage_sz); + WARN_ON_ONCE(reg + sizeof(u32) > msc->mapped_hwpage_sz); WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &msc->accessibility)); writel_relaxed(val, msc->mapped_hwpage + reg); @@ -516,6 +516,74 @@ static struct mpam_msc_ris *mpam_get_or_create_ris(struct mpam_msc *msc, return found; } +static void mpam_ris_hw_probe(struct mpam_msc_ris *ris) +{ + int err; + struct mpam_msc *msc = ris->msc; + struct mpam_props *props = &ris->props; + + lockdep_assert_held(&msc->lock); + lockdep_assert_held(&msc->part_sel_lock); + + /* Cache Portion partitioning */ + if (FIELD_GET(MPAMF_IDR_HAS_CPOR_PART, ris->idr)) { + u32 cpor_features = mpam_read_partsel_reg(msc, CPOR_IDR); + + props->cpbm_wd = FIELD_GET(MPAMF_CPOR_IDR_CPBM_WD, cpor_features); + if (props->cpbm_wd) + mpam_set_feature(mpam_feat_cpor_part, props); + } + + /* Memory bandwidth partitioning */ + if (FIELD_GET(MPAMF_IDR_HAS_MBW_PART, ris->idr)) { + u32 mbw_features = mpam_read_partsel_reg(msc, MBW_IDR); + + /* portion bitmap resolution */ + props->mbw_pbm_bits = FIELD_GET(MPAMF_MBW_IDR_BWPBM_WD, mbw_features); + if (props->mbw_pbm_bits && + FIELD_GET(MPAMF_MBW_IDR_HAS_PBM, mbw_features)) + mpam_set_feature(mpam_feat_mbw_part, props); + + props->bwa_wd = FIELD_GET(MPAMF_MBW_IDR_BWA_WD, mbw_features); + if (props->bwa_wd && FIELD_GET(MPAMF_MBW_IDR_HAS_MAX, mbw_features)) + mpam_set_feature(mpam_feat_mbw_max, props); + } + + /* Performance Monitoring */ + if (FIELD_GET(MPAMF_IDR_HAS_MSMON, ris->idr)) { + u32 msmon_features = mpam_read_partsel_reg(msc, MSMON_IDR); + + if (FIELD_GET(MPAMF_MSMON_IDR_MSMON_CSU, msmon_features)) { + u32 csumonidr, discard; + + /* + * If the firmware max-nrdy-us property is missing, the + * CSU counters can't be used. Should we wait forever? + */ + err = device_property_read_u32(&msc->pdev->dev, + "arm,not-ready-us", + &discard); + + csumonidr = mpam_read_partsel_reg(msc, CSUMON_IDR); + props->num_csu_mon = FIELD_GET(MPAMF_CSUMON_IDR_NUM_MON, csumonidr); + if (props->num_csu_mon && !err) + mpam_set_feature(mpam_feat_msmon_csu, props); + else if (props->num_csu_mon) + pr_err_once("Counters are not usable because not-ready timeout was not provided by firmware."); + } + if (FIELD_GET(MPAMF_MSMON_IDR_MSMON_MBWU, msmon_features)) { + u32 mbwumonidr = mpam_read_partsel_reg(msc, MBWUMON_IDR); + + props->num_mbwu_mon = FIELD_GET(MPAMF_MBWUMON_IDR_NUM_MON, mbwumonidr); + if (props->num_mbwu_mon) + mpam_set_feature(mpam_feat_msmon_mbwu, props); + + if (FIELD_GET(MPAMF_MBWUMON_IDR_HAS_RWBW, mbwumonidr)) + mpam_set_feature(mpam_feat_msmon_mbwu_rwbw, props); + } + } +} + static int mpam_msc_hw_probe(struct mpam_msc *msc) { u64 idr; @@ -536,6 +604,7 @@ static int mpam_msc_hw_probe(struct mpam_msc *msc) idr = mpam_msc_read_idr(msc); spin_unlock(&msc->part_sel_lock); + msc->ris_max = FIELD_GET(MPAMF_IDR_RIS_MAX, idr); /* Use these values so partid/pmg always starts with a valid value */ @@ -557,6 +626,12 @@ static int mpam_msc_hw_probe(struct mpam_msc *msc) if (IS_ERR(ris)) { return PTR_ERR(ris); } + ris->idr = idr; + + spin_lock(&msc->part_sel_lock); + __mpam_part_sel(ris_idx, 0, msc); + mpam_ris_hw_probe(ris); + spin_unlock(&msc->part_sel_lock); } spin_lock(&partid_max_lock); diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index a7de4a69b9f8..71e62594876d 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -49,6 +49,54 @@ struct mpam_msc size_t mapped_hwpage_sz; }; +/* + * When we compact the supported features, we don't care what they are. + * Storing them as a bitmap makes life easy. + */ +typedef u16 mpam_features_t; + +/* Bits for mpam_features_t */ +enum mpam_device_features { + mpam_feat_ccap_part = 0, + mpam_feat_cpor_part, + mpam_feat_mbw_part, + mpam_feat_mbw_min, + mpam_feat_mbw_max, + mpam_feat_mbw_prop, + mpam_feat_msmon, + mpam_feat_msmon_csu, + mpam_feat_msmon_csu_capture, + mpam_feat_msmon_mbwu, + mpam_feat_msmon_mbwu_capture, + mpam_feat_msmon_mbwu_rwbw, + mpam_feat_msmon_capt, + MPAM_FEATURE_LAST, +}; +#define MPAM_ALL_FEATURES ((1<features; +} + +static inline void mpam_set_feature(enum mpam_device_features feat, + struct mpam_props *props) +{ + props->features |= (1< Date: Fri, 7 May 2021 12:45:15 +0100 Subject: [PATCH 0594/2138] arm_mpam: Merge supported features during mpam_enable() into mpam_class ANBZ: #8686 commit b5d7e31197b22fe33c513a3e267e4e1dde94ae18 morse-linux. To make a decision about whether to expose an mpam class as a resctrl resource we need to know its overall supported features and properties. Once we've probed all the resources, we can walk the tree and produced overall values by merging the bitmaps. This eliminates features that are only supported by some MSC that make up a component or class. If bitmap properties are mismatched within a component we cannot support the mismatched feature. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 87 +++++++++++++++++++++++++++ drivers/platform/mpam/mpam_internal.h | 8 +++ 2 files changed, 95 insertions(+) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 7eeeb44506b1..f1574f393e1d 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -938,8 +938,95 @@ static int mpam_msc_drv_probe(struct platform_device *pdev) return err; } +/* + * If a resource doesn't match class feature/configuration, do the right thing. + * For 'num' properties we can just take the minimum. + * For properties where the mismatched unused bits would make a difference, we + * nobble the class feature, as we can't configure all the resources. + * e.g. The L3 cache is composed of two resources with 13 and 17 portion + * bitmaps respectively. + */ +static void +__resource_props_mismatch(struct mpam_msc_ris *ris, struct mpam_class *class) +{ + struct mpam_props *cprops = &class->props; + struct mpam_props *rprops = &ris->props; + + lockdep_assert_held(&mpam_list_lock); /* we modify class */ + + /* Clear missing features */ + cprops->features &= rprops->features; + + /* Clear incompatible features */ + if (cprops->cpbm_wd != rprops->cpbm_wd) + mpam_clear_feature(mpam_feat_cpor_part, &cprops->features); + if (cprops->mbw_pbm_bits != rprops->mbw_pbm_bits) + mpam_clear_feature(mpam_feat_mbw_part, &cprops->features); + + /* bwa_wd is a count of bits, fewer bits means less precision */ + if (cprops->bwa_wd != rprops->bwa_wd) + cprops->bwa_wd = min(cprops->bwa_wd, rprops->bwa_wd); + + /* For num properties, take the minimum */ + if (cprops->num_csu_mon != rprops->num_csu_mon) + cprops->num_csu_mon = min(cprops->num_csu_mon, rprops->num_csu_mon); + if (cprops->num_mbwu_mon != rprops->num_mbwu_mon) + cprops->num_mbwu_mon = min(cprops->num_mbwu_mon, rprops->num_mbwu_mon); +} + +/* + * Copy the first component's first resources's properties and features to the + * class. __resource_props_mismatch() will remove conflicts. + * It is not possible to have a class with no components, or a component with + * no resources. + */ +static void mpam_enable_init_class_features(struct mpam_class *class) +{ + struct mpam_msc_ris *ris; + struct mpam_component *comp; + + comp = list_first_entry_or_null(&class->components, + struct mpam_component, class_list); + if (WARN_ON(!comp)) + return; + + ris = list_first_entry_or_null(&comp->ris, + struct mpam_msc_ris, comp_list); + if (WARN_ON(!ris)) + return; + + class->props = ris->props; +} + +/* Merge all the common resource features into class. */ +static void mpam_enable_merge_features(void) +{ + struct mpam_msc_ris *ris; + struct mpam_class *class; + struct mpam_component *comp; + + lockdep_assert_held(&mpam_list_lock); + + list_for_each_entry(class, &mpam_classes, classes_list) { + mpam_enable_init_class_features(class); + + list_for_each_entry(comp, &class->components, class_list) { + list_for_each_entry(ris, &comp->ris, comp_list) { + __resource_props_mismatch(ris, class); + + class->nrdy_usec = max(class->nrdy_usec, + ris->msc->nrdy_usec); + } + } + } +} + static void mpam_enable_once(void) { + mutex_lock(&mpam_list_lock); + mpam_enable_merge_features(); + mutex_unlock(&mpam_list_lock); + mutex_lock(&mpam_cpuhp_state_lock); cpuhp_remove_state(mpam_cpuhp_state); mpam_cpuhp_state = 0; diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index 71e62594876d..db50f5e40d98 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -97,6 +97,12 @@ static inline void mpam_set_feature(enum mpam_device_features feat, props->features |= (1< Date: Thu, 28 Feb 2019 18:06:57 +0000 Subject: [PATCH 0595/2138] arm_mpam: Reset MSC controls from cpu hp callbacks ANBZ: #8686 commit 9216311dfa1ea9b3c8c4a0dc04657392e3b151b0 morse-linux. When a CPU comes online, it may bring a newly accessible MSC with it. Only the default partid has its value reset by hardware, and even then the MSC might not have been reset since its config was previously dirtyied. e.g. Kexec. Any in-use partid must have its configuration restored, or reset. In-use partids may be held in caches and evicted later. MSC are also reset when CPUs are taken offline to cover cases where firmware doesn't reset the MSC over reboot using UEFI, or kexec where there is no firmware involvement. If the configuration for a RIS has not been touched since it was brought online, it does not need resetting again. To reset, write the maximum values for all discovered controls. CC: Rohit Mathew Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 126 +++++++++++++++++++++++++- drivers/platform/mpam/mpam_internal.h | 3 + 2 files changed, 128 insertions(+), 1 deletion(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index f1574f393e1d..e281e6e910e0 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -644,8 +645,115 @@ static int mpam_msc_hw_probe(struct mpam_msc *msc) return 0; } +static void mpam_reset_msc_bitmap(struct mpam_msc *msc, u16 reg, u16 wd) +{ + u32 num_words, msb; + u32 bm = ~0; + int i; + + lockdep_assert_held(&msc->part_sel_lock); + + /* + * Write all ~0 to all but the last 32bit-word, which may + * have fewer bits... + */ + num_words = DIV_ROUND_UP(wd, 32); + for (i = 0; i < num_words - 1; i++, reg += sizeof(bm)) + __mpam_write_reg(msc, reg, bm); + + /* + * ....and then the last (maybe) partial 32bit word. When wd is a + * multiple of 32, msb should be 31 to write a full 32bit word. + */ + msb = (wd - 1) % 32; + bm = GENMASK(msb , 0); + if (bm) + __mpam_write_reg(msc, reg, bm); +} + +static void mpam_reset_ris_partid(struct mpam_msc_ris *ris, u16 partid) +{ + struct mpam_msc *msc = ris->msc; + u16 bwa_fract = MPAMCFG_MBW_MAX_MAX; + struct mpam_props *rprops = &ris->props; + + lockdep_assert_held(&msc->lock); + + spin_lock(&msc->part_sel_lock); + __mpam_part_sel(ris->ris_idx, partid, msc); + + if (mpam_has_feature(mpam_feat_cpor_part, rprops)) + mpam_reset_msc_bitmap(msc, MPAMCFG_CPBM, rprops->cpbm_wd); + + if (mpam_has_feature(mpam_feat_mbw_part, rprops)) + mpam_reset_msc_bitmap(msc, MPAMCFG_MBW_PBM, rprops->mbw_pbm_bits); + + if (mpam_has_feature(mpam_feat_mbw_min, rprops)) + mpam_write_partsel_reg(msc, MBW_MIN, 0); + + if (mpam_has_feature(mpam_feat_mbw_max, rprops)) + mpam_write_partsel_reg(msc, MBW_MAX, bwa_fract); + + if (mpam_has_feature(mpam_feat_mbw_prop, rprops)) + mpam_write_partsel_reg(msc, MBW_PROP, bwa_fract); + spin_unlock(&msc->part_sel_lock); +} + +static void mpam_reset_ris(struct mpam_msc_ris *ris) +{ + u16 partid, partid_max; + struct mpam_msc *msc = ris->msc; + + lockdep_assert_held(&msc->lock); + + if (ris->in_reset_state) + return; + + spin_lock(&partid_max_lock); + partid_max = mpam_partid_max; + spin_unlock(&partid_max_lock); + for (partid = 0; partid < partid_max; partid++) + mpam_reset_ris_partid(ris, partid); +} + +static void mpam_reset_msc(struct mpam_msc *msc, bool online) +{ + int idx; + struct mpam_msc_ris *ris; + + lockdep_assert_held(&msc->lock); + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(ris, &msc->ris, msc_list) { + mpam_reset_ris(ris); + + /* + * Set in_reset_state when coming online. The reset state + * for non-zero partid may be lost while the CPUs are offline. + */ + ris->in_reset_state = online; + } + srcu_read_unlock(&mpam_srcu, idx); +} + static int mpam_cpu_online(unsigned int cpu) { + int idx; + struct mpam_msc *msc; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(msc, &mpam_all_msc, glbl_list) { + if (!cpumask_test_cpu(cpu, &msc->accessibility)) + continue; + + if (atomic_fetch_inc(&msc->online_refs) == 0) { + mutex_lock(&msc->lock); + mpam_reset_msc(msc, true); + mutex_unlock(&msc->lock); + } + } + srcu_read_unlock(&mpam_srcu, idx); + return 0; } @@ -684,6 +792,22 @@ static int mpam_discovery_cpu_online(unsigned int cpu) static int mpam_cpu_offline(unsigned int cpu) { + int idx; + struct mpam_msc *msc; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(msc, &mpam_all_msc, glbl_list) { + if (!cpumask_test_cpu(cpu, &msc->accessibility)) + continue; + + if (atomic_dec_and_test(&msc->online_refs)) { + mutex_lock(&msc->lock); + mpam_reset_msc(msc, false); + mutex_unlock(&msc->lock); + } + } + srcu_read_unlock(&mpam_srcu, idx); + return 0; } @@ -1043,7 +1167,7 @@ static void mpam_enable_once(void) mpam_register_cpuhp_callbacks(mpam_cpu_online); pr_info("MPAM enabled with %u partid and %u pmg\n", - mpam_partid_max + 1, mpam_pmg_max + 1); + READ_ONCE(mpam_partid_max) + 1, mpam_pmg_max + 1); } /* diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index db50f5e40d98..228d3c286f98 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -5,6 +5,7 @@ #define MPAM_INTERNAL_H #include +#include #include #include #include @@ -28,6 +29,7 @@ struct mpam_msc struct pcc_mbox_chan *pcc_chan; u32 nrdy_usec; cpumask_t accessibility; + atomic_t online_refs; struct mutex lock; bool probed; @@ -140,6 +142,7 @@ struct mpam_msc_ris u8 ris_idx; u64 idr; struct mpam_props props; + bool in_reset_state; cpumask_t affinity; -- Gitee From 0032bb24ab17f1cac71e4893d339b4ada5244994 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 11 May 2021 12:45:16 +0100 Subject: [PATCH 0596/2138] arm_mpam: Add a helper to touch an MSC from any CPU ANBZ: #8686 commit 92706dc754cdb62ddc9b6366140ec8ba28834f1d morse-linux. Resetting RIS entries from the cpuhp callback is easy as the callback occurs on the correct CPU. This won't be true for any other caller that wants to reset or configure an MSC. Add a helper that schedules the provided function if necessary. Prevent the cpuhp callbacks from changing the MSC state by taking the cpuhp lock. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 40 +++++++++++++++++++++++----- 1 file changed, 34 insertions(+), 6 deletions(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index e281e6e910e0..9156591fc4b9 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -699,21 +699,49 @@ static void mpam_reset_ris_partid(struct mpam_msc_ris *ris, u16 partid) spin_unlock(&msc->part_sel_lock); } -static void mpam_reset_ris(struct mpam_msc_ris *ris) +/* + * Called via smp_call_on_cpu() to prevent migration, while still being + * pre-emptible. + */ +static int mpam_reset_ris(void *arg) { u16 partid, partid_max; - struct mpam_msc *msc = ris->msc; - - lockdep_assert_held(&msc->lock); + struct mpam_msc_ris *ris = arg; if (ris->in_reset_state) - return; + return 0; spin_lock(&partid_max_lock); partid_max = mpam_partid_max; spin_unlock(&partid_max_lock); for (partid = 0; partid < partid_max; partid++) mpam_reset_ris_partid(ris, partid); + + return 0; +} + +/* + * Get the preferred CPU for this MSC. If it is accessible from this CPU, + * this CPU is preferred. This can be preempted/migrated, it will only result + * in more work. + */ +static int mpam_get_msc_preferred_cpu(struct mpam_msc *msc) +{ + int cpu = raw_smp_processor_id(); + + if (cpumask_test_cpu(cpu, &msc->accessibility)) + return cpu; + + return cpumask_first_and(&msc->accessibility, cpu_online_mask); +} + +static int mpam_touch_msc(struct mpam_msc *msc, int (*fn)(void *a), void *arg) +{ + lockdep_assert_irqs_enabled(); + lockdep_assert_cpus_held(); + lockdep_assert_held(&msc->lock); + + return smp_call_on_cpu(mpam_get_msc_preferred_cpu(msc), fn, arg, true); } static void mpam_reset_msc(struct mpam_msc *msc, bool online) @@ -725,7 +753,7 @@ static void mpam_reset_msc(struct mpam_msc *msc, bool online) idx = srcu_read_lock(&mpam_srcu); list_for_each_entry_rcu(ris, &msc->ris, msc_list) { - mpam_reset_ris(ris); + mpam_touch_msc(msc, &mpam_reset_ris, ris); /* * Set in_reset_state when coming online. The reset state -- Gitee From e1d28ba7508054a44c2085c81a998e930df8d9a2 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 9 Feb 2021 13:46:35 +0000 Subject: [PATCH 0597/2138] arm_mpam: Extend reset logic to allow devices to be reset any time ANBZ: #8686 commit b213a7cb016cee3b229f33fc16a486a06d43b499 morse-linux. cpuhp callbacks aren't the only time the MSC configuration may need to be reset. Resctrl has an API call to reset a class. If an MPAM error interrupt arrives it indicates the driver has misprogrammed an MSC. The safest thing to do is reset all the MSCs and disable MPAM. Add a helper to reset RIS via their class. Call this from mpam_disable(), which can be scheduled from the error interrupt handler. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 34 ++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 9156591fc4b9..6091e87308ba 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -1198,6 +1198,40 @@ static void mpam_enable_once(void) READ_ONCE(mpam_partid_max) + 1, mpam_pmg_max + 1); } +static void mpam_reset_class(struct mpam_class *class) +{ + int idx; + struct mpam_msc_ris *ris; + struct mpam_component *comp; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(comp, &class->components, class_list) { + list_for_each_entry_rcu(ris, &comp->ris, comp_list) { + mutex_lock(&ris->msc->lock); + mpam_touch_msc(ris->msc, mpam_reset_ris, ris); + mutex_unlock(&ris->msc->lock); + ris->in_reset_state = true; + } + } + srcu_read_unlock(&mpam_srcu, idx); +} + +/* + * Called in response to an error IRQ. + * All of MPAMs errors indicate a software bug, restore any modified + * controls to their reset values. + */ +void mpam_disable(void) +{ + int idx; + struct mpam_class *class; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(class, &mpam_classes, classes_list) + mpam_reset_class(class); + srcu_read_unlock(&mpam_srcu, idx); +} + /* * Enable mpam once all devices have been probed. * Scheduled by mpam_discovery_cpu_online() once all devices have been created. -- Gitee From 312e7214cc8bd90b21f174a2f2516964d516659b Mon Sep 17 00:00:00 2001 From: James Morse Date: Mon, 8 Feb 2021 13:09:09 +0000 Subject: [PATCH 0598/2138] arm_mpam: Register and enable IRQs ANBZ: #8686 commit 7da1c7f9d9ef723f829bf44ed96e1fc4a46ef29f morse-linux. Register and enable error IRQs. All the MPAM error interrupts indicate a software bug, e.g. out of range partid. If the error interrupt is ever signalled, attempt to disable MPAM. Only the irq handler accesses the ESR register, so no locking is needed. The work to disable MPAM after an error needs to happen at process context, use a threaded interrupt. There is no support for percpu threaded interrupts, for now schedule the work to be done from the irq handler. Enabling the IRQs in the MSC may involve cross calling to a CPU that can access the MSC. CC: Rohit Mathew Tested-by: Rohit Mathew Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 311 +++++++++++++++++++++++++- drivers/platform/mpam/mpam_internal.h | 8 + 2 files changed, 309 insertions(+), 10 deletions(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 6091e87308ba..ac82c041999d 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -14,6 +14,9 @@ #include #include #include +#include +#include +#include #include #include #include @@ -64,6 +67,12 @@ static DEFINE_SPINLOCK(partid_max_lock); */ static DECLARE_WORK(mpam_enable_work, &mpam_enable); +/* + * All mpam error interrupts indicate a software bug. On receipt, disable the + * driver. + */ +static DECLARE_WORK(mpam_broken_work, &mpam_disable); + /* * An MSC is a container for resources, each identified by their RIS index. * Components are a group of RIS that control the same thing. @@ -127,6 +136,24 @@ static u64 mpam_msc_read_idr(struct mpam_msc *msc) return (idr_high << 32) | idr_low; } +static void mpam_msc_zero_esr(struct mpam_msc *msc) +{ + writel_relaxed(0, msc->mapped_hwpage + MPAMF_ESR); + if (msc->has_extd_esr) + writel_relaxed(0, msc->mapped_hwpage + MPAMF_ESR + 4); +} + +static u64 mpam_msc_read_esr(struct mpam_msc *msc) +{ + u64 esr_high = 0, esr_low; + + esr_low = readl_relaxed(msc->mapped_hwpage + MPAMF_ESR); + if (msc->has_extd_esr) + esr_high = readl_relaxed(msc->mapped_hwpage + MPAMF_ESR + 4); + + return (esr_high << 32) | esr_low; +} + static void __mpam_part_sel(u8 ris_idx, u16 partid, struct mpam_msc *msc) { u32 partsel; @@ -622,6 +649,7 @@ static int mpam_msc_hw_probe(struct mpam_msc *msc) pmg_max = FIELD_GET(MPAMF_IDR_PMG_MAX, idr); msc->partid_max = min(msc->partid_max, partid_max); msc->pmg_max = min(msc->pmg_max, pmg_max); + msc->has_extd_esr = FIELD_GET(MPAMF_IDR_HAS_EXT_ESR, idr); ris = mpam_get_or_create_ris(msc, ris_idx); if (IS_ERR(ris)) { @@ -764,6 +792,12 @@ static void mpam_reset_msc(struct mpam_msc *msc, bool online) srcu_read_unlock(&mpam_srcu, idx); } +static void _enable_percpu_irq(void *_irq) +{ + int *irq = _irq; + enable_percpu_irq(*irq, IRQ_TYPE_NONE); +} + static int mpam_cpu_online(unsigned int cpu) { int idx; @@ -774,11 +808,13 @@ static int mpam_cpu_online(unsigned int cpu) if (!cpumask_test_cpu(cpu, &msc->accessibility)) continue; - if (atomic_fetch_inc(&msc->online_refs) == 0) { - mutex_lock(&msc->lock); + mutex_lock(&msc->lock); + if (msc->reenable_error_ppi) + _enable_percpu_irq(&msc->reenable_error_ppi); + + if (atomic_fetch_inc(&msc->online_refs) == 0) mpam_reset_msc(msc, true); - mutex_unlock(&msc->lock); - } + mutex_unlock(&msc->lock); } srcu_read_unlock(&mpam_srcu, idx); @@ -828,11 +864,13 @@ static int mpam_cpu_offline(unsigned int cpu) if (!cpumask_test_cpu(cpu, &msc->accessibility)) continue; - if (atomic_dec_and_test(&msc->online_refs)) { - mutex_lock(&msc->lock); + mutex_lock(&msc->lock); + if (msc->reenable_error_ppi) + disable_percpu_irq(msc->reenable_error_ppi); + + if (atomic_dec_and_test(&msc->online_refs)) mpam_reset_msc(msc, false); - mutex_unlock(&msc->lock); - } + mutex_unlock(&msc->lock); } srcu_read_unlock(&mpam_srcu, idx); @@ -851,6 +889,50 @@ static void mpam_register_cpuhp_callbacks(int (*online)(unsigned int online)) mutex_unlock(&mpam_cpuhp_state_lock); } +static int __setup_ppi(struct mpam_msc *msc) +{ + int cpu; + + msc->error_dev_id = alloc_percpu_gfp(struct mpam_msc *, GFP_KERNEL); + if (!msc->error_dev_id) + return -ENOMEM; + + for_each_cpu(cpu, &msc->accessibility) { + struct mpam_msc *empty = *per_cpu_ptr(msc->error_dev_id, cpu); + if (empty != NULL) { + pr_err_once("%s shares PPI with %s!\n", dev_name(&msc->pdev->dev), + dev_name(&empty->pdev->dev)); + return -EBUSY; + } + *per_cpu_ptr(msc->error_dev_id, cpu) = msc; + } + + return 0; +} + +static int mpam_msc_setup_error_irq(struct mpam_msc *msc) +{ + int irq; + + irq = platform_get_irq_byname_optional(msc->pdev, "error"); + if (irq <= 0) + return 0; + + /* Allocate and initialise the percpu device pointer for PPI */ + if (irq_is_percpu(irq)) + + return __setup_ppi(msc); + + /* sanity check: shared interrupts can be routed anywhere? */ + if (!cpumask_equal(&msc->accessibility, cpu_possible_mask)) { + pr_err_once("msc:%u is a private resource with a shared error interrupt", + msc->id); + return -EINVAL; + } + + return 0; +} + static int mpam_dt_count_msc(void) { int count = 0; @@ -1021,6 +1103,13 @@ static int mpam_msc_drv_probe(struct platform_device *pdev) spin_lock_init(&msc->part_sel_lock); spin_lock_init(&msc->mon_sel_lock); + err = mpam_msc_setup_error_irq(msc); + if (err) { + devm_kfree(&pdev->dev, msc); + msc = ERR_PTR(err); + break; + } + if (device_property_read_u32(&pdev->dev, "pcc-channel", &msc->pcc_subspace_id)) msc->iface = MPAM_IFACE_MMIO; @@ -1173,11 +1262,198 @@ static void mpam_enable_merge_features(void) } } +static char *mpam_errcode_names[16] = { + [0] = "No error", + [1] = "PARTID_SEL_Range", + [2] = "Req_PARTID_Range", + [3] = "MSMONCFG_ID_RANGE", + [4] = "Req_PMG_Range", + [5] = "Monitor_Range", + [6] = "intPARTID_Range", + [7] = "Unexpected_INTERNAL", + [8] = "Undefined_RIS_PART_SEL", + [9] = "RIS_No_Control", + [10] = "Undefined_RIS_MON_SEL", + [11] = "RIS_No_Monitor", + [12 ... 15] = "Reserved" +}; + +static int mpam_enable_msc_ecr(void *_msc) +{ + struct mpam_msc *msc = _msc; + + writel_relaxed(1, msc->mapped_hwpage + MPAMF_ECR); + + return 0; +} + +static int mpam_disable_msc_ecr(void *_msc) +{ + struct mpam_msc *msc = _msc; + + writel_relaxed(0, msc->mapped_hwpage + MPAMF_ECR); + + return 0; +} + +static irqreturn_t __mpam_irq_handler(int irq, struct mpam_msc *msc) +{ + u64 reg; + u16 partid; + u8 errcode, pmg, ris; + + if (WARN_ON_ONCE(!msc) || + WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), + &msc->accessibility))) + return IRQ_NONE; + + reg = mpam_msc_read_esr(msc); + + errcode = FIELD_GET(MPAMF_ESR_ERRCODE, reg); + if (!errcode) + return IRQ_NONE; + + /* Clear level triggered irq */ + mpam_msc_zero_esr(msc); + + partid = FIELD_GET(MPAMF_ESR_PARTID_OR_MON, reg); + pmg = FIELD_GET(MPAMF_ESR_PMG, reg); + ris = FIELD_GET(MPAMF_ESR_PMG, reg); + + pr_err("error irq from msc:%u '%s', partid:%u, pmg: %u, ris: %u\n", + msc->id, mpam_errcode_names[errcode], partid, pmg, ris); + + if (irq_is_percpu(irq)) { + mpam_disable_msc_ecr(msc); + schedule_work(&mpam_broken_work); + return IRQ_HANDLED; + } + + return IRQ_WAKE_THREAD; +} + +static irqreturn_t mpam_ppi_handler(int irq, void *dev_id) +{ + struct mpam_msc *msc = *(struct mpam_msc **)dev_id; + + return __mpam_irq_handler(irq, msc); +} + +static irqreturn_t mpam_spi_handler(int irq, void *dev_id) +{ + struct mpam_msc *msc = dev_id; + + return __mpam_irq_handler(irq, msc); +} + +static irqreturn_t mpam_disable_thread(int irq, void *dev_id); + +static int mpam_register_irqs(void) +{ + int err, irq; + struct mpam_msc *msc; + + lockdep_assert_cpus_held(); + lockdep_assert_held(&mpam_list_lock); + + list_for_each_entry(msc, &mpam_all_msc, glbl_list) { + irq = platform_get_irq_byname_optional(msc->pdev, "error"); + if (irq <= 0) + continue; + + /* The MPAM spec says the interrupt can be SPI, PPI or LPI */ + /* We anticipate sharing the interrupt with other MSCs */ + if (irq_is_percpu(irq)) { + err = request_percpu_irq(irq, &mpam_ppi_handler, + "mpam:msc:error", + msc->error_dev_id); + if (err) + return err; + + mutex_lock(&msc->lock); + msc->reenable_error_ppi = irq; + smp_call_function_many(&msc->accessibility, + &_enable_percpu_irq, &irq, + true); + mutex_unlock(&msc->lock); + } else { + err = devm_request_threaded_irq(&msc->pdev->dev, irq, + &mpam_spi_handler, + &mpam_disable_thread, + IRQF_SHARED, + "mpam:msc:error", msc); + if (err) + return err; + } + + mutex_lock(&msc->lock); + msc->error_irq_requested = true; + mpam_touch_msc(msc, mpam_enable_msc_ecr, msc); + msc->error_irq_hw_enabled = true; + mutex_unlock(&msc->lock); + } + + return 0; +} + +static void mpam_unregister_irqs(void) +{ + int irq; + struct mpam_msc *msc; + + cpus_read_lock(); + /* take the lock as free_irq() can sleep */ + mutex_lock(&mpam_list_lock); + list_for_each_entry(msc, &mpam_all_msc, glbl_list) { + irq = platform_get_irq_byname_optional(msc->pdev, "error"); + if (irq <= 0) + continue; + + mutex_lock(&msc->lock); + if (msc->error_irq_hw_enabled) { + mpam_touch_msc(msc, mpam_disable_msc_ecr, msc); + msc->error_irq_hw_enabled = false; + } + + if (msc->error_irq_requested) { + if (irq_is_percpu(irq)) { + msc->reenable_error_ppi = 0; + free_percpu_irq(irq, msc->error_dev_id); + } else { + devm_free_irq(&msc->pdev->dev, irq, msc); + } + msc->error_irq_requested = false; + } + mutex_unlock(&msc->lock); + } + mutex_unlock(&mpam_list_lock); + cpus_read_unlock(); +} + static void mpam_enable_once(void) { + int err; + + /* + * If all the MSC have been probed, enabling the IRQs happens next. + * That involves cross-calling to a CPU that can reach the MSC, and + * the locks must be taken in this order: + */ + cpus_read_lock(); mutex_lock(&mpam_list_lock); mpam_enable_merge_features(); + + err = mpam_register_irqs(); + if (err) + pr_warn("Failed to register irqs: %d\n", err); + mutex_unlock(&mpam_list_lock); + cpus_read_unlock(); + + if (err) { + schedule_work(&mpam_broken_work); + return; + } mutex_lock(&mpam_cpuhp_state_lock); cpuhp_remove_state(mpam_cpuhp_state); @@ -1221,15 +1497,31 @@ static void mpam_reset_class(struct mpam_class *class) * All of MPAMs errors indicate a software bug, restore any modified * controls to their reset values. */ -void mpam_disable(void) +static irqreturn_t mpam_disable_thread(int irq, void *dev_id) { int idx; struct mpam_class *class; + mutex_lock(&mpam_cpuhp_state_lock); + if (mpam_cpuhp_state) { + cpuhp_remove_state(mpam_cpuhp_state); + mpam_cpuhp_state = 0; + } + mutex_unlock(&mpam_cpuhp_state_lock); + + mpam_unregister_irqs(); + idx = srcu_read_lock(&mpam_srcu); list_for_each_entry_rcu(class, &mpam_classes, classes_list) mpam_reset_class(class); srcu_read_unlock(&mpam_srcu, idx); + + return IRQ_HANDLED; +} + +void mpam_disable(struct work_struct *ignored) +{ + mpam_disable_thread(0, NULL); } /* @@ -1243,7 +1535,6 @@ void mpam_enable(struct work_struct *work) struct mpam_msc *msc; bool all_devices_probed = true; - /* Have we probed all the hw devices? */ mutex_lock(&mpam_list_lock); list_for_each_entry(msc, &mpam_all_msc, glbl_list) { mutex_lock(&msc->lock); diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index 228d3c286f98..b58d031d34e9 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -29,10 +29,17 @@ struct mpam_msc struct pcc_mbox_chan *pcc_chan; u32 nrdy_usec; cpumask_t accessibility; + bool has_extd_esr; + + int reenable_error_ppi; + struct mpam_msc * __percpu *error_dev_id; + atomic_t online_refs; struct mutex lock; bool probed; + bool error_irq_requested; + bool error_irq_hw_enabled; u16 partid_max; u8 pmg_max; unsigned long ris_idxs[128 / BITS_PER_LONG]; @@ -167,6 +174,7 @@ extern u8 mpam_pmg_max; /* Scheduled work callback to enable mpam once all MSC have been probed */ void mpam_enable(struct work_struct *work); +void mpam_disable(struct work_struct *work); /* * MPAM MSCs have the following register layout. See: -- Gitee From 4a471113ee909836847b83b2674cd71499721c14 Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 13 May 2021 15:21:13 +0100 Subject: [PATCH 0599/2138] arm_mpam: Use the arch static key to indicate when mpam is enabled ANBZ: #8686 commit 08d5c2c036ad3116c897722813c80f522e652334 morse-linux. Once all the MSC have been probed, the system wide usable number of PARTID is known and the configuration arrays can be allocated. After this point, checking all the MSC have been probed is pointless, and the cpuhp callbacks should restore the configuration, instead of just resetting the MSC. Enable the architecture's static key that indicates whether mpam is enabled and use this to skip the discovery work on cpu hotplug. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 6 ++++++ drivers/platform/mpam/mpam_internal.h | 8 ++++++++ 2 files changed, 14 insertions(+) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index ac82c041999d..04e1dfd1c5e3 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -828,6 +828,9 @@ static int mpam_discovery_cpu_online(unsigned int cpu) struct mpam_msc *msc; bool new_device_probed = false; + if (mpam_is_enabled()) + return 0; + mutex_lock(&mpam_list_lock); list_for_each_entry(msc, &mpam_all_msc, glbl_list) { if (!cpumask_test_cpu(cpu, &msc->accessibility)) @@ -1468,6 +1471,7 @@ static void mpam_enable_once(void) partid_max_published = true; spin_unlock(&partid_max_lock); + static_branch_enable(&mpam_enabled); mpam_register_cpuhp_callbacks(mpam_cpu_online); pr_info("MPAM enabled with %u partid and %u pmg\n", @@ -1509,6 +1513,8 @@ static irqreturn_t mpam_disable_thread(int irq, void *dev_id) } mutex_unlock(&mpam_cpuhp_state_lock); + static_branch_disable(&mpam_enabled); + mpam_unregister_irqs(); idx = srcu_read_lock(&mpam_srcu); diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index b58d031d34e9..bc97d569a2bd 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -8,12 +8,20 @@ #include #include #include +#include #include #include #include #include #include +DECLARE_STATIC_KEY_FALSE(mpam_enabled); + +static inline bool mpam_is_enabled(void) +{ + return static_branch_likely(&mpam_enabled); +} + struct mpam_msc { /* member of mpam_all_msc */ -- Gitee From 480eb54c00b78f5621c580252793c012c41ecc55 Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 10 Feb 2021 18:11:20 +0000 Subject: [PATCH 0600/2138] arm_mpam: Allow configuration to be applied and restored during cpu online ANBZ: #8686 commit 7b3c212b6f49f3f9011a8870604f95716d6b1f22 morse-linux. When CPUs come online the original configuration should be restored. Once the maximum partid is known, allocate an configuration array for each component, and reprogram each RIS configuration from this. The MPAM spec describes how multiple controls can interact. To prevent this happening by accident, always reset controls that don't have a valid configuration. This allows the same helper to be used for configuration and reset. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 193 +++++++++++++++++++++++--- drivers/platform/mpam/mpam_internal.h | 24 +++- 2 files changed, 192 insertions(+), 25 deletions(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 04e1dfd1c5e3..7da57310d52c 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -699,51 +699,89 @@ static void mpam_reset_msc_bitmap(struct mpam_msc *msc, u16 reg, u16 wd) __mpam_write_reg(msc, reg, bm); } -static void mpam_reset_ris_partid(struct mpam_msc_ris *ris, u16 partid) +static void mpam_reprogram_ris_partid(struct mpam_msc_ris *ris, u16 partid, + struct mpam_config *cfg) { struct mpam_msc *msc = ris->msc; u16 bwa_fract = MPAMCFG_MBW_MAX_MAX; struct mpam_props *rprops = &ris->props; - lockdep_assert_held(&msc->lock); - spin_lock(&msc->part_sel_lock); __mpam_part_sel(ris->ris_idx, partid, msc); - if (mpam_has_feature(mpam_feat_cpor_part, rprops)) - mpam_reset_msc_bitmap(msc, MPAMCFG_CPBM, rprops->cpbm_wd); + if (mpam_has_feature(mpam_feat_cpor_part, rprops)) { + if (mpam_has_feature(mpam_feat_cpor_part, cfg)) + mpam_write_partsel_reg(msc, CPBM, cfg->cpbm); + else + mpam_reset_msc_bitmap(msc, MPAMCFG_CPBM, + rprops->cpbm_wd); + } - if (mpam_has_feature(mpam_feat_mbw_part, rprops)) - mpam_reset_msc_bitmap(msc, MPAMCFG_MBW_PBM, rprops->mbw_pbm_bits); + if (mpam_has_feature(mpam_feat_mbw_part, rprops)) { + if (mpam_has_feature(mpam_feat_mbw_part, cfg)) + mpam_write_partsel_reg(msc, MBW_PBM, cfg->mbw_pbm); + else + mpam_reset_msc_bitmap(msc, MPAMCFG_MBW_PBM, + rprops->mbw_pbm_bits); + } if (mpam_has_feature(mpam_feat_mbw_min, rprops)) mpam_write_partsel_reg(msc, MBW_MIN, 0); - if (mpam_has_feature(mpam_feat_mbw_max, rprops)) - mpam_write_partsel_reg(msc, MBW_MAX, bwa_fract); + if (mpam_has_feature(mpam_feat_mbw_max, rprops)) { + if (mpam_has_feature(mpam_feat_mbw_max, cfg)) + mpam_write_partsel_reg(msc, MBW_MAX, cfg->mbw_max); + else + mpam_write_partsel_reg(msc, MBW_MAX, bwa_fract); + } if (mpam_has_feature(mpam_feat_mbw_prop, rprops)) mpam_write_partsel_reg(msc, MBW_PROP, bwa_fract); spin_unlock(&msc->part_sel_lock); } +struct reprogram_ris { + struct mpam_msc_ris *ris; + struct mpam_config *cfg; +}; + +/* Call with MSC lock held */ +static int mpam_reprogram_ris(void *_arg) +{ + u16 partid, partid_max; + struct reprogram_ris *arg = _arg; + struct mpam_msc_ris *ris = arg->ris; + struct mpam_config *cfg = arg->cfg; + + if (ris->in_reset_state) + return 0; + + spin_lock(&partid_max_lock); + partid_max = mpam_partid_max; + spin_unlock(&partid_max_lock); + for (partid = 0; partid < partid_max; partid++) + mpam_reprogram_ris_partid(ris, partid, cfg); + + return 0; +} + /* * Called via smp_call_on_cpu() to prevent migration, while still being * pre-emptible. */ static int mpam_reset_ris(void *arg) { - u16 partid, partid_max; struct mpam_msc_ris *ris = arg; + struct reprogram_ris reprogram_arg; + struct mpam_config empty_cfg = { 0 }; if (ris->in_reset_state) return 0; - spin_lock(&partid_max_lock); - partid_max = mpam_partid_max; - spin_unlock(&partid_max_lock); - for (partid = 0; partid < partid_max; partid++) - mpam_reset_ris_partid(ris, partid); + reprogram_arg.ris = ris; + reprogram_arg.cfg = &empty_cfg; + + mpam_reprogram_ris(&reprogram_arg); return 0; } @@ -792,6 +830,37 @@ static void mpam_reset_msc(struct mpam_msc *msc, bool online) srcu_read_unlock(&mpam_srcu, idx); } +static void mpam_reprogram_msc(struct mpam_msc *msc) +{ + int idx; + u16 partid; + bool reset; + struct mpam_config *cfg; + struct mpam_msc_ris *ris; + + lockdep_assert_held(&msc->lock); + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(ris, &msc->ris, msc_list) { + if (!mpam_is_enabled() && !ris->in_reset_state) { + mpam_touch_msc(msc, &mpam_reset_ris, ris); + ris->in_reset_state = true; + continue; + } + + reset = true; + for (partid = 0; partid < mpam_partid_max; partid++) { + cfg = &ris->comp->cfg[partid]; + if (cfg->features) + reset = false; + + mpam_reprogram_ris_partid(ris, partid, cfg); + } + ris->in_reset_state = reset; + } + srcu_read_unlock(&mpam_srcu, idx); +} + static void _enable_percpu_irq(void *_irq) { int *irq = _irq; @@ -813,7 +882,7 @@ static int mpam_cpu_online(unsigned int cpu) _enable_percpu_irq(&msc->reenable_error_ppi); if (atomic_fetch_inc(&msc->online_refs) == 0) - mpam_reset_msc(msc, true); + mpam_reprogram_msc(msc); mutex_unlock(&msc->lock); } srcu_read_unlock(&mpam_srcu, idx); @@ -1433,6 +1502,37 @@ static void mpam_unregister_irqs(void) cpus_read_unlock(); } +static int __allocate_component_cfg(struct mpam_component *comp) +{ + if (comp->cfg) + return 0; + + comp->cfg = kcalloc(mpam_partid_max, sizeof(*comp->cfg), GFP_KERNEL); + if (!comp->cfg) + return -ENOMEM; + + return 0; +} + +static int mpam_allocate_config(void) +{ + int err = 0; + struct mpam_class *class; + struct mpam_component *comp; + + lockdep_assert_held(&mpam_list_lock); + + list_for_each_entry(class, &mpam_classes, classes_list) { + list_for_each_entry(comp, &class->components, class_list) { + err = __allocate_component_cfg(comp); + if (err) + return err; + } + } + + return 0; +} + static void mpam_enable_once(void) { int err; @@ -1444,12 +1544,21 @@ static void mpam_enable_once(void) */ cpus_read_lock(); mutex_lock(&mpam_list_lock); - mpam_enable_merge_features(); + do { + mpam_enable_merge_features(); - err = mpam_register_irqs(); - if (err) - pr_warn("Failed to register irqs: %d\n", err); + err = mpam_allocate_config(); + if (err) { + pr_err("Failed to allocate configuration arrays.\n"); + break; + } + err = mpam_register_irqs(); + if (err) { + pr_warn("Failed to register irqs: %d\n", err); + break; + } + } while (0); mutex_unlock(&mpam_list_lock); cpus_read_unlock(); @@ -1486,6 +1595,8 @@ static void mpam_reset_class(struct mpam_class *class) idx = srcu_read_lock(&mpam_srcu); list_for_each_entry_rcu(comp, &class->components, class_list) { + memset(comp->cfg, 0, (mpam_partid_max * sizeof(*comp->cfg))); + list_for_each_entry_rcu(ris, &comp->ris, comp_list) { mutex_lock(&ris->msc->lock); mpam_touch_msc(ris->msc, mpam_reset_ris, ris); @@ -1575,6 +1686,48 @@ static int mpam_msc_drv_remove(struct platform_device *pdev) return 0; } +struct mpam_write_config_arg { + struct mpam_msc_ris *ris; + struct mpam_component *comp; + u16 partid; +}; + +static int __write_config(void *arg) +{ + struct mpam_write_config_arg *c = arg; + + mpam_reprogram_ris_partid(c->ris, c->partid, &c->comp->cfg[c->partid]); + + return 0; +} + +/* TODO: split into write_config/sync_config */ +/* TODO: add config_dirty bitmap to drive sync_config */ +int mpam_apply_config(struct mpam_component *comp, u16 partid, + struct mpam_config *cfg) +{ + struct mpam_write_config_arg arg; + struct mpam_msc_ris *ris; + int idx; + + lockdep_assert_cpus_held(); + + comp->cfg[partid] = *cfg; + arg.comp = comp; + arg.partid = partid; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(ris, &comp->ris, comp_list) { + arg.ris = ris; + mutex_lock(&ris->msc->lock); + mpam_touch_msc(ris->msc, __write_config, &arg); + mutex_unlock(&ris->msc->lock); + } + srcu_read_unlock(&mpam_srcu, idx); + + return 0; +} + static const struct of_device_id mpam_of_match[] = { { .compatible = "arm,mpam-msc", }, {}, diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index bc97d569a2bd..fa9386263dd5 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -102,11 +102,7 @@ struct mpam_props u16 num_mbwu_mon; }; -static inline bool mpam_has_feature(enum mpam_device_features feat, - struct mpam_props *props) -{ - return (1<features; -} +#define mpam_has_feature(_feat, x) ((1<<_feat) & (x)->features) static inline void mpam_set_feature(enum mpam_device_features feat, struct mpam_props *props) @@ -136,6 +132,15 @@ struct mpam_class struct list_head classes_list; }; +struct mpam_config { + /* Which configuration values are valid. 0 is used for reset */ + mpam_features_t features; + + u32 cpbm; + u32 mbw_pbm; + u16 mbw_max; +}; + struct mpam_component { u32 comp_id; @@ -145,6 +150,12 @@ struct mpam_component cpumask_t affinity; + /* + * Array of configuration values, indexed by partid. + * Read from cpuhp callbacks, hold the cpuhp lock when writing. + */ + struct mpam_config *cfg; + /* member of mpam_class:components */ struct list_head class_list; @@ -184,6 +195,9 @@ extern u8 mpam_pmg_max; void mpam_enable(struct work_struct *work); void mpam_disable(struct work_struct *work); +int mpam_apply_config(struct mpam_component *comp, u16 partid, + struct mpam_config *cfg); + /* * MPAM MSCs have the following register layout. See: * Arm Architecture Reference Manual Supplement - Memory System Resource -- Gitee From 6fbb605b3d242fb0ce95d5fce106f327e8c4b767 Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 28 Feb 2019 18:57:21 +0000 Subject: [PATCH 0601/2138] arm_mpam: Probe and reset the rest of the features ANBZ: #8686 commit 137b0cba6abe9f249f25b0d5f32a693dffc89b9a morse-linux. MPAM supports more features than are going to be exposed to resctrl. For partid other than 0, the reset values of these controls isn't known. Discover the rest of the features so they can be reset to avoid any side effects when resctrl is in use. PARTID narrowing allows MSC/RIS to support less configuration space than is usable. If this feature is found on a class of device we are likely to use, then reduce the partid_max to make it usable. This allows us to map a PARTID to itself. CC: Rohit Mathew Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 91 +++++++++++++++++++++++++++ drivers/platform/mpam/mpam_internal.h | 10 ++- 2 files changed, 100 insertions(+), 1 deletion(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 7da57310d52c..612c4b97d373 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -549,10 +549,20 @@ static void mpam_ris_hw_probe(struct mpam_msc_ris *ris) int err; struct mpam_msc *msc = ris->msc; struct mpam_props *props = &ris->props; + struct mpam_class *class = ris->comp->class; lockdep_assert_held(&msc->lock); lockdep_assert_held(&msc->part_sel_lock); + /* Cache Capacity Partitioning */ + if (FIELD_GET(MPAMF_IDR_HAS_CCAP_PART, ris->idr)) { + u32 ccap_features = mpam_read_partsel_reg(msc, CCAP_IDR); + + props->cmax_wd = FIELD_GET(MPAMF_CCAP_IDR_CMAX_WD, ccap_features); + if (props->cmax_wd) + mpam_set_feature(mpam_feat_ccap_part, props); + } + /* Cache Portion partitioning */ if (FIELD_GET(MPAMF_IDR_HAS_CPOR_PART, ris->idr)) { u32 cpor_features = mpam_read_partsel_reg(msc, CPOR_IDR); @@ -575,6 +585,31 @@ static void mpam_ris_hw_probe(struct mpam_msc_ris *ris) props->bwa_wd = FIELD_GET(MPAMF_MBW_IDR_BWA_WD, mbw_features); if (props->bwa_wd && FIELD_GET(MPAMF_MBW_IDR_HAS_MAX, mbw_features)) mpam_set_feature(mpam_feat_mbw_max, props); + + if (props->bwa_wd && FIELD_GET(MPAMF_MBW_IDR_HAS_MIN, mbw_features)) + mpam_set_feature(mpam_feat_mbw_min, props); + + if (props->bwa_wd && FIELD_GET(MPAMF_MBW_IDR_HAS_PROP, mbw_features)) + mpam_set_feature(mpam_feat_mbw_prop, props); + } + + /* Priority partitioning */ + if (FIELD_GET(MPAMF_IDR_HAS_PRI_PART, ris->idr)) { + u32 pri_features = mpam_read_partsel_reg(msc, PRI_IDR); + + props->intpri_wd = FIELD_GET(MPAMF_PRI_IDR_INTPRI_WD, pri_features); + if (props->intpri_wd && FIELD_GET(MPAMF_PRI_IDR_HAS_INTPRI, pri_features)) { + mpam_set_feature(mpam_feat_intpri_part, props); + if (FIELD_GET(MPAMF_PRI_IDR_INTPRI_0_IS_LOW, pri_features)) + mpam_set_feature(mpam_feat_intpri_part_0_low, props); + } + + props->dspri_wd = FIELD_GET(MPAMF_PRI_IDR_DSPRI_WD, pri_features); + if (props->dspri_wd && FIELD_GET(MPAMF_PRI_IDR_HAS_DSPRI, pri_features)) { + mpam_set_feature(mpam_feat_dspri_part, props); + if (FIELD_GET(MPAMF_PRI_IDR_DSPRI_0_IS_LOW, pri_features)) + mpam_set_feature(mpam_feat_dspri_part_0_low, props); + } } /* Performance Monitoring */ @@ -610,6 +645,21 @@ static void mpam_ris_hw_probe(struct mpam_msc_ris *ris) mpam_set_feature(mpam_feat_msmon_mbwu_rwbw, props); } } + + /* + * RIS with PARTID narrowing don't have enough storage for one + * configuration per PARTID. If these are in a class we could use, + * reduce the supported partid_max to match the numer of intpartid. + * If the class is unknown, just ignore it. + */ + if (FIELD_GET(MPAMF_IDR_HAS_PARTID_NRW, ris->idr) && + class->type != MPAM_CLASS_UNKNOWN) { + u32 nrwidr = mpam_read_partsel_reg(msc, PARTID_NRW_IDR); + u16 partid_max = FIELD_GET(MPAMF_PARTID_NRW_IDR_INTPARTID_MAX, nrwidr); + + mpam_set_feature(mpam_feat_partid_nrw, props); + msc->partid_max = min(msc->partid_max, partid_max); + } } static int mpam_msc_hw_probe(struct mpam_msc *msc) @@ -702,13 +752,21 @@ static void mpam_reset_msc_bitmap(struct mpam_msc *msc, u16 reg, u16 wd) static void mpam_reprogram_ris_partid(struct mpam_msc_ris *ris, u16 partid, struct mpam_config *cfg) { + u32 pri_val = 0; + u16 cmax = MPAMCFG_CMAX_CMAX; struct mpam_msc *msc = ris->msc; u16 bwa_fract = MPAMCFG_MBW_MAX_MAX; struct mpam_props *rprops = &ris->props; + u16 dspri = GENMASK(rprops->dspri_wd, 0); + u16 intpri = GENMASK(rprops->intpri_wd, 0); spin_lock(&msc->part_sel_lock); __mpam_part_sel(ris->ris_idx, partid, msc); + if(mpam_has_feature(mpam_feat_partid_nrw, rprops)) + mpam_write_partsel_reg(msc, INTPARTID, + (MPAMCFG_PART_SEL_INTERNAL | partid)); + if (mpam_has_feature(mpam_feat_cpor_part, rprops)) { if (mpam_has_feature(mpam_feat_cpor_part, cfg)) mpam_write_partsel_reg(msc, CPBM, cfg->cpbm); @@ -737,6 +795,26 @@ static void mpam_reprogram_ris_partid(struct mpam_msc_ris *ris, u16 partid, if (mpam_has_feature(mpam_feat_mbw_prop, rprops)) mpam_write_partsel_reg(msc, MBW_PROP, bwa_fract); + + if (mpam_has_feature(mpam_feat_ccap_part, rprops)) + mpam_write_partsel_reg(msc, CMAX, cmax); + + if (mpam_has_feature(mpam_feat_intpri_part, rprops) || + mpam_has_feature(mpam_feat_dspri_part, rprops)) { + /* aces high? */ + if (!mpam_has_feature(mpam_feat_intpri_part_0_low, rprops)) + intpri = 0; + if (!mpam_has_feature(mpam_feat_dspri_part_0_low, rprops)) + dspri = 0; + + if (mpam_has_feature(mpam_feat_intpri_part, rprops)) + pri_val |= FIELD_PREP(MPAMCFG_PRI_INTPRI, intpri); + if (mpam_has_feature(mpam_feat_dspri_part, rprops)) + pri_val |= FIELD_PREP(MPAMCFG_PRI_DSPRI, dspri); + + mpam_write_partsel_reg(msc, PRI, pri_val); + } + spin_unlock(&msc->part_sel_lock); } @@ -1285,6 +1363,19 @@ __resource_props_mismatch(struct mpam_msc_ris *ris, struct mpam_class *class) cprops->num_csu_mon = min(cprops->num_csu_mon, rprops->num_csu_mon); if (cprops->num_mbwu_mon != rprops->num_mbwu_mon) cprops->num_mbwu_mon = min(cprops->num_mbwu_mon, rprops->num_mbwu_mon); + + if (cprops->intpri_wd != rprops->intpri_wd) + cprops->intpri_wd = min(cprops->intpri_wd, rprops->intpri_wd); + if (cprops->dspri_wd != rprops->dspri_wd) + cprops->dspri_wd = min(cprops->dspri_wd, rprops->dspri_wd); + + /* {int,ds}pri may not have differing 0-low behaviour */ + if (mpam_has_feature(mpam_feat_intpri_part_0_low, cprops) != + mpam_has_feature(mpam_feat_intpri_part_0_low, rprops)) + mpam_clear_feature(mpam_feat_intpri_part, &cprops->features); + if (mpam_has_feature(mpam_feat_dspri_part_0_low, cprops) != + mpam_has_feature(mpam_feat_dspri_part_0_low, rprops)) + mpam_clear_feature(mpam_feat_dspri_part, &cprops->features); } /* diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index fa9386263dd5..d2319eefa565 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -70,7 +70,7 @@ struct mpam_msc * When we compact the supported features, we don't care what they are. * Storing them as a bitmap makes life easy. */ -typedef u16 mpam_features_t; +typedef u32 mpam_features_t; /* Bits for mpam_features_t */ enum mpam_device_features { @@ -80,6 +80,10 @@ enum mpam_device_features { mpam_feat_mbw_min, mpam_feat_mbw_max, mpam_feat_mbw_prop, + mpam_feat_intpri_part, + mpam_feat_intpri_part_0_low, + mpam_feat_dspri_part, + mpam_feat_dspri_part_0_low, mpam_feat_msmon, mpam_feat_msmon_csu, mpam_feat_msmon_csu_capture, @@ -87,6 +91,7 @@ enum mpam_device_features { mpam_feat_msmon_mbwu_capture, mpam_feat_msmon_mbwu_rwbw, mpam_feat_msmon_capt, + mpam_feat_partid_nrw, MPAM_FEATURE_LAST, }; #define MPAM_ALL_FEATURES ((1< Date: Fri, 25 Jun 2021 12:53:12 +0100 Subject: [PATCH 0602/2138] arm_mpam: Add helpers to allocate monitors ANBZ: #8686 commit e7b9d1fbfb0478c896b4fb96c9b214d4acc3d49e morse-linux. MPAM's MSC support a number of monitors, each of which supports bandwidth counters, or cache-storage-utilisation counters. To use a counter, a monitor needs to be configured. Add helpers to allocate and free CSU or MBWU monitors. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 2 ++ drivers/platform/mpam/mpam_internal.h | 35 +++++++++++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 612c4b97d373..82dffe8c03ef 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -244,6 +244,8 @@ mpam_class_alloc(u8 level_idx, enum mpam_class_types type, gfp_t gfp) class->level = level_idx; class->type = type; INIT_LIST_HEAD_RCU(&class->classes_list); + ida_init(&class->ida_csu_mon); + ida_init(&class->ida_mbwu_mon); list_add_rcu(&class->classes_list, &mpam_classes); diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index d2319eefa565..fd792f9fba86 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -138,6 +138,9 @@ struct mpam_class /* member of mpam_classes */ struct list_head classes_list; + + struct ida ida_csu_mon; + struct ida ida_mbwu_mon; }; struct mpam_config { @@ -191,6 +194,38 @@ struct mpam_msc_ris struct mpam_component *comp; }; +static inline int mpam_alloc_csu_mon(struct mpam_class *class) +{ + struct mpam_props *cprops = &class->props; + + if (!mpam_has_feature(mpam_feat_msmon_csu, cprops)) + return -EOPNOTSUPP; + + return ida_alloc_range(&class->ida_csu_mon, 0, cprops->num_csu_mon - 1, + GFP_KERNEL); +} + +static inline void mpam_free_csu_mon(struct mpam_class *class, int csu_mon) +{ + ida_free(&class->ida_csu_mon, csu_mon); +} + +static inline int mpam_alloc_mbwu_mon(struct mpam_class *class) +{ + struct mpam_props *cprops = &class->props; + + if (!mpam_has_feature(mpam_feat_msmon_mbwu, cprops)) + return -EOPNOTSUPP; + + return ida_alloc_range(&class->ida_mbwu_mon, 0, + cprops->num_mbwu_mon - 1, GFP_KERNEL); +} + +static inline void mpam_free_mbwu_mon(struct mpam_class *class, int mbwu_mon) +{ + ida_free(&class->ida_mbwu_mon, mbwu_mon); +} + /* List of all classes */ extern struct list_head mpam_classes; extern struct srcu_struct mpam_srcu; -- Gitee From 29c755eee4746d76a6e685c3aee72e01a8e6e3ec Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 24 Jun 2021 16:49:50 +0100 Subject: [PATCH 0603/2138] arm_mpam: Add mpam_msmon_read() to read monitor value ANBZ: #8686 commit 8a3a9a2ba3085adade0d7c7075c508a12c93e143 morse-linux. Reaing a monitor involves configuring what you want to monitor, and reading the value. Components made up of multiple MSC may need values from each MSC. MSCs may take time to configure, returning 'not ready'. The maximum 'not ready' time should have been provided by firmware. Add mpam_msmon_read() to hide all this. If (one of) the MSC returns not ready, then wait the full timeout value before trying again. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 219 ++++++++++++++++++++++++++ drivers/platform/mpam/mpam_internal.h | 19 +++ 2 files changed, 238 insertions(+) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 82dffe8c03ef..3516528f2e14 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -123,6 +123,22 @@ static void __mpam_write_reg(struct mpam_msc *msc, u16 reg, u32 val) __mpam_write_reg(msc, MPAMCFG_##reg, val); \ }) +#define mpam_read_monsel_reg(msc, reg) \ +({ \ + u32 ____ret; \ + \ + lockdep_assert_held_once(&msc->mon_sel_lock); \ + ____ret = __mpam_read_reg(msc, MSMON_##reg); \ + \ + ____ret; \ +}) + +#define mpam_write_monsel_reg(msc, reg, val) \ +({ \ + lockdep_assert_held_once(&msc->mon_sel_lock); \ + __mpam_write_reg(msc, MSMON_##reg, val); \ +}) + static u64 mpam_msc_read_idr(struct mpam_msc *msc) { u64 idr_high = 0, idr_low; @@ -725,6 +741,209 @@ static int mpam_msc_hw_probe(struct mpam_msc *msc) return 0; } +struct mon_read +{ + struct mpam_msc_ris *ris; + struct mon_cfg *ctx; + enum mpam_device_features type; + u64 *val; + int err; +}; + +static void gen_msmon_ctl_flt_vals(struct mon_read *m, u32 *ctl_val, + u32 *flt_val) +{ + struct mon_cfg *ctx = m->ctx; + + switch (m->type) { + case mpam_feat_msmon_csu: + *ctl_val = MSMON_CFG_MBWU_CTL_TYPE_CSU; + break; + case mpam_feat_msmon_mbwu: + *ctl_val = MSMON_CFG_MBWU_CTL_TYPE_MBWU; + break; + default: + return; + } + + /* + * For CSU counters its implementation-defined what happens when not + * filtering by partid. + */ + *ctl_val |= MSMON_CFG_x_CTL_MATCH_PARTID; + + *flt_val = FIELD_PREP(MSMON_CFG_MBWU_FLT_PARTID, ctx->partid); + if (m->ctx->match_pmg) { + *ctl_val |= MSMON_CFG_x_CTL_MATCH_PMG; + *flt_val |= FIELD_PREP(MSMON_CFG_MBWU_FLT_PMG, ctx->pmg); + } + + if (mpam_has_feature(mpam_feat_msmon_mbwu_rwbw, &m->ris->props)) + *flt_val |= FIELD_PREP(MSMON_CFG_MBWU_FLT_RWBW, ctx->opts); +} + +static void read_msmon_ctl_flt_vals(struct mon_read *m, u32 *ctl_val, + u32 *flt_val) +{ + struct mpam_msc *msc = m->ris->msc; + + switch (m->type) { + case mpam_feat_msmon_csu: + *ctl_val = mpam_read_monsel_reg(msc, CFG_CSU_CTL); + *flt_val = mpam_read_monsel_reg(msc, CFG_CSU_FLT); + break; + case mpam_feat_msmon_mbwu: + *ctl_val = mpam_read_monsel_reg(msc, CFG_MBWU_CTL); + *flt_val = mpam_read_monsel_reg(msc, CFG_MBWU_FLT); + break; + default: + return; + } +} + +static void write_msmon_ctl_flt_vals(struct mon_read *m, u32 ctl_val, + u32 flt_val) +{ + struct mpam_msc *msc = m->ris->msc; + + /* + * Write the ctl_val with the enable bit cleared, reset the counter, + * then enable counter. + */ + switch (m->type) { + case mpam_feat_msmon_csu: + mpam_write_monsel_reg(msc, CFG_CSU_FLT, flt_val); + mpam_write_monsel_reg(msc, CFG_CSU_CTL, ctl_val); + mpam_write_monsel_reg(msc, CSU, 0); + mpam_write_monsel_reg(msc, CFG_CSU_CTL, ctl_val|MSMON_CFG_x_CTL_EN); + break; + case mpam_feat_msmon_mbwu: + mpam_write_monsel_reg(msc, CFG_MBWU_FLT, flt_val); + mpam_write_monsel_reg(msc, CFG_MBWU_CTL, ctl_val); + mpam_write_monsel_reg(msc, MBWU, 0); + mpam_write_monsel_reg(msc, CFG_MBWU_CTL, ctl_val|MSMON_CFG_x_CTL_EN); + break; + default: + return; + } +} + +static void __ris_msmon_read(void *arg) +{ + u64 now; + bool nrdy = false; + unsigned long flags; + struct mon_read *m = arg; + struct mon_cfg *ctx = m->ctx; + struct mpam_msc_ris *ris = m->ris; + struct mpam_msc *msc = m->ris->msc; + u32 mon_sel, ctl_val, flt_val, cur_ctl, cur_flt; + + lockdep_assert_held(&msc->lock); + + spin_lock_irqsave(&msc->mon_sel_lock, flags); + mon_sel = FIELD_PREP(MSMON_CFG_MON_SEL_MON_SEL, ctx->mon) | + FIELD_PREP(MSMON_CFG_MON_SEL_RIS, ris->ris_idx); + mpam_write_monsel_reg(msc, CFG_MON_SEL, mon_sel); + + /* + * Read the existing configuration to avoid re-writing the same values. + * This saves waiting for 'nrdy' on subsequent reads. + */ + read_msmon_ctl_flt_vals(m, &cur_ctl, &cur_flt); + gen_msmon_ctl_flt_vals(m, &ctl_val, &flt_val); + if (cur_flt != flt_val || cur_ctl != (ctl_val | MSMON_CFG_x_CTL_EN)) + write_msmon_ctl_flt_vals(m, ctl_val, flt_val); + + switch (m->type) { + case mpam_feat_msmon_csu: + now = mpam_read_monsel_reg(msc, CSU); + break; + case mpam_feat_msmon_mbwu: + now = mpam_read_monsel_reg(msc, MBWU); + break; + default: + return; + } + spin_unlock_irqrestore(&msc->mon_sel_lock, flags); + + nrdy = now & MSMON___NRDY; + if (nrdy) { + m->err = -EBUSY; + return; + } + + now = FIELD_GET(MSMON___VALUE, now); + *(m->val) += now; +} + +static int _msmon_read(struct mpam_component *comp, struct mon_read *arg) +{ + int err, idx; + struct mpam_msc *msc; + struct mpam_msc_ris *ris; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(ris, &comp->ris, comp_list) { + arg->ris = ris; + + msc = ris->msc; + mutex_lock(&msc->lock); + err = smp_call_function_any(&msc->accessibility, + __ris_msmon_read, arg, true); + mutex_unlock(&msc->lock); + if (!err && arg->err) + err = arg->err; + if (err) + break; + } + srcu_read_unlock(&mpam_srcu, idx); + + return err; +} + +int mpam_msmon_read(struct mpam_component *comp, struct mon_cfg *ctx, + enum mpam_device_features type, u64 *val) +{ + int err; + struct mon_read arg; + u64 wait_jiffies = 0; + struct mpam_props *cprops = &comp->class->props; + + might_sleep(); + + if (!mpam_is_enabled()) + return -EIO; + + if (!mpam_has_feature(type, cprops)) + return -EOPNOTSUPP; + + memset(&arg, 0, sizeof(arg)); + arg.ctx = ctx; + arg.type = type; + arg.val = val; + *val = 0; + + err = _msmon_read(comp, &arg); + if (err == -EBUSY) + wait_jiffies = usecs_to_jiffies(comp->class->nrdy_usec); + + while (wait_jiffies) + wait_jiffies = schedule_timeout_uninterruptible(wait_jiffies); + + if (err == -EBUSY) { + memset(&arg, 0, sizeof(arg)); + arg.ctx = ctx; + arg.type = type; + arg.val = val; + *val = 0; + + err = _msmon_read(comp, &arg); + } + + return err; +} + static void mpam_reset_msc_bitmap(struct mpam_msc *msc, u16 reg, u16 wd) { u32 num_words, msb; diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index fd792f9fba86..06a31e5d9610 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -62,6 +62,7 @@ struct mpam_msc * If needed, take msc->lock first. */ spinlock_t part_sel_lock; + spinlock_t mon_sel_lock; void __iomem * mapped_hwpage; size_t mapped_hwpage_sz; }; @@ -194,6 +195,21 @@ struct mpam_msc_ris struct mpam_component *comp; }; +/* The values for MSMON_CFG_MBWU_FLT.RWBW */ +enum mon_filter_options { + COUNT_BOTH = 0, + COUNT_WRITE = 1, + COUNT_READ = 2, +}; + +struct mon_cfg { + u16 mon; + u8 pmg; + bool match_pmg; + u32 partid; + enum mon_filter_options opts; +}; + static inline int mpam_alloc_csu_mon(struct mpam_class *class) { struct mpam_props *cprops = &class->props; @@ -241,6 +257,9 @@ void mpam_disable(struct work_struct *work); int mpam_apply_config(struct mpam_component *comp, u16 partid, struct mpam_config *cfg); +int mpam_msmon_read(struct mpam_component *comp, struct mon_cfg *ctx, + enum mpam_device_features, u64 *val); + /* * MPAM MSCs have the following register layout. See: * Arm Architecture Reference Manual Supplement - Memory System Resource -- Gitee From 160d217b7b7358ddcafbfc81eaf6760e8f42373f Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 8 Sep 2021 12:23:40 +0100 Subject: [PATCH 0604/2138] arm_mpam: Track bandwidth counter state for overflow and power management ANBZ: #8686 commit 1120466a35068928977b61c0fb6e5734e664f1ba morse-linux. Bandwidth counters need to run continuously to correctly reflect the bandwidth. The value read may be lower than the previous value read in the case of overflow and when the hardware is reset due to CPU hotplug. Add struct mbwu_state to track the bandwidth counter to allow overflow and power management to be handled. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 148 +++++++++++++++++++++++++- drivers/platform/mpam/mpam_internal.h | 52 ++++++--- 2 files changed, 181 insertions(+), 19 deletions(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 3516528f2e14..930e14471378 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -773,6 +773,7 @@ static void gen_msmon_ctl_flt_vals(struct mon_read *m, u32 *ctl_val, *ctl_val |= MSMON_CFG_x_CTL_MATCH_PARTID; *flt_val = FIELD_PREP(MSMON_CFG_MBWU_FLT_PARTID, ctx->partid); + *flt_val |= FIELD_PREP(MSMON_CFG_MBWU_FLT_RWBW, ctx->opts); if (m->ctx->match_pmg) { *ctl_val |= MSMON_CFG_x_CTL_MATCH_PMG; *flt_val |= FIELD_PREP(MSMON_CFG_MBWU_FLT_PMG, ctx->pmg); @@ -805,6 +806,7 @@ static void write_msmon_ctl_flt_vals(struct mon_read *m, u32 ctl_val, u32 flt_val) { struct mpam_msc *msc = m->ris->msc; + struct msmon_mbwu_state *mbwu_state; /* * Write the ctl_val with the enable bit cleared, reset the counter, @@ -822,21 +824,33 @@ static void write_msmon_ctl_flt_vals(struct mon_read *m, u32 ctl_val, mpam_write_monsel_reg(msc, CFG_MBWU_CTL, ctl_val); mpam_write_monsel_reg(msc, MBWU, 0); mpam_write_monsel_reg(msc, CFG_MBWU_CTL, ctl_val|MSMON_CFG_x_CTL_EN); + + mbwu_state = &m->ris->mbwu_state[m->ctx->mon]; + if (mbwu_state) + mbwu_state->prev_val = 0; + break; default: return; } } +static u64 mpam_msmon_overflow_val(struct mpam_msc_ris *ris) +{ + /* TODO: scaling, and long counters */ + return GENMASK_ULL(30, 0); +} + static void __ris_msmon_read(void *arg) { - u64 now; bool nrdy = false; unsigned long flags; struct mon_read *m = arg; + u64 now, overflow_val = 0; struct mon_cfg *ctx = m->ctx; struct mpam_msc_ris *ris = m->ris; struct mpam_msc *msc = m->ris->msc; + struct msmon_mbwu_state *mbwu_state; u32 mon_sel, ctl_val, flt_val, cur_ctl, cur_flt; lockdep_assert_held(&msc->lock); @@ -858,22 +872,41 @@ static void __ris_msmon_read(void *arg) switch (m->type) { case mpam_feat_msmon_csu: now = mpam_read_monsel_reg(msc, CSU); + nrdy = now & MSMON___NRDY; + now = FIELD_GET(MSMON___VALUE, now); break; case mpam_feat_msmon_mbwu: now = mpam_read_monsel_reg(msc, MBWU); + nrdy = now & MSMON___NRDY; + now = FIELD_GET(MSMON___VALUE, now); + + if (nrdy) + break; + + mbwu_state = &ris->mbwu_state[ctx->mon]; + if (!mbwu_state) + break; + + /* Add any pre-overflow value to the mbwu_state->val */ + if (mbwu_state->prev_val > now) + overflow_val = mpam_msmon_overflow_val(ris) - mbwu_state->prev_val; + + mbwu_state->prev_val = now; + mbwu_state->correction += overflow_val; + + /* Include bandwidth consumed before the last hardware reset */ + now += mbwu_state->correction; break; default: return; } spin_unlock_irqrestore(&msc->mon_sel_lock, flags); - nrdy = now & MSMON___NRDY; if (nrdy) { m->err = -EBUSY; return; } - now = FIELD_GET(MSMON___VALUE, now); *(m->val) += now; } @@ -1064,6 +1097,68 @@ static int mpam_reprogram_ris(void *_arg) return 0; } +static int mpam_restore_mbwu_state(void *_ris) +{ + int i; + struct mon_read mwbu_arg; + struct mpam_msc_ris *ris = _ris; + + lockdep_assert_held(&ris->msc->lock); + + for (i = 0; i < ris->props.num_mbwu_mon; i++) { + if (ris->mbwu_state[i].enabled) { + mwbu_arg.ris = ris; + mwbu_arg.ctx = &ris->mbwu_state[i].cfg; + mwbu_arg.type = mpam_feat_msmon_mbwu; + + __ris_msmon_read(&mwbu_arg); + } + } + + return 0; +} + +static int mpam_save_mbwu_state(void *arg) +{ + int i; + u64 val; + struct mon_cfg *cfg; + unsigned long flags; + u32 cur_flt, cur_ctl, mon_sel; + struct mpam_msc_ris *ris = arg; + struct mpam_msc *msc = ris->msc; + struct msmon_mbwu_state *mbwu_state; + + lockdep_assert_held(&msc->lock); + + for (i = 0; i < ris->props.num_mbwu_mon; i++) { + mbwu_state = &ris->mbwu_state[i]; + cfg = &mbwu_state->cfg; + + spin_lock_irqsave(&msc->mon_sel_lock, flags); + mon_sel = FIELD_PREP(MSMON_CFG_MON_SEL_MON_SEL, i) | + FIELD_PREP(MSMON_CFG_MON_SEL_RIS, ris->ris_idx); + mpam_write_monsel_reg(msc, CFG_MON_SEL, mon_sel); + + cur_flt = mpam_read_monsel_reg(msc, CFG_MBWU_FLT); + cur_ctl = mpam_read_monsel_reg(msc, CFG_MBWU_CTL); + mpam_write_monsel_reg(msc, CFG_MBWU_CTL, 0); + + val = mpam_read_monsel_reg(msc, MBWU); + mpam_write_monsel_reg(msc, MBWU, 0); + + cfg->mon = i; + cfg->pmg = FIELD_GET(MSMON_CFG_MBWU_FLT_PMG, cur_flt); + cfg->match_pmg = FIELD_GET(MSMON_CFG_x_CTL_MATCH_PMG, cur_ctl); + cfg->partid = FIELD_GET(MSMON_CFG_MBWU_FLT_PARTID, cur_flt); + mbwu_state->correction += val; + mbwu_state->enabled = FIELD_GET(MSMON_CFG_x_CTL_EN, cur_ctl); + spin_unlock_irqrestore(&msc->mon_sel_lock, flags); + } + + return 0; +} + /* * Called via smp_call_on_cpu() to prevent migration, while still being * pre-emptible. @@ -1125,6 +1220,9 @@ static void mpam_reset_msc(struct mpam_msc *msc, bool online) * for non-zero partid may be lost while the CPUs are offline. */ ris->in_reset_state = online; + + if (mpam_is_enabled() && !online) + mpam_touch_msc(msc, &mpam_save_mbwu_state, ris); } srcu_read_unlock(&mpam_srcu, idx); } @@ -1156,6 +1254,9 @@ static void mpam_reprogram_msc(struct mpam_msc *msc) mpam_reprogram_ris_partid(ris, partid, cfg); } ris->in_reset_state = reset; + + if (mpam_has_feature(mpam_feat_msmon_mbwu, &ris->props)) + mpam_touch_msc(msc, &mpam_restore_mbwu_state, ris); } srcu_read_unlock(&mpam_srcu, idx); } @@ -1814,8 +1915,31 @@ static void mpam_unregister_irqs(void) cpus_read_unlock(); } +static void __destroy_component_cfg(struct mpam_component *comp) +{ + unsigned long flags; + struct mpam_msc_ris *ris; + struct msmon_mbwu_state *mbwu_state; + + kfree(comp->cfg); + list_for_each_entry(ris, &comp->ris, comp_list) { + mutex_lock(&ris->msc->lock); + spin_lock_irqsave(&ris->msc->mon_sel_lock, flags); + mbwu_state = ris->mbwu_state; + ris->mbwu_state = NULL; + spin_unlock_irqrestore(&ris->msc->mon_sel_lock, flags); + mutex_unlock(&ris->msc->lock); + + kfree(mbwu_state); + } +} + static int __allocate_component_cfg(struct mpam_component *comp) { + unsigned long flags; + struct mpam_msc_ris *ris; + struct msmon_mbwu_state *mbwu_state; + if (comp->cfg) return 0; @@ -1823,6 +1947,24 @@ static int __allocate_component_cfg(struct mpam_component *comp) if (!comp->cfg) return -ENOMEM; + list_for_each_entry(ris, &comp->ris, comp_list) { + if (!ris->props.num_mbwu_mon) + continue; + + mbwu_state = kcalloc(ris->props.num_mbwu_mon, + sizeof(*ris->mbwu_state), GFP_KERNEL); + if (!mbwu_state) { + __destroy_component_cfg(comp); + return -ENOMEM; + } + + mutex_lock(&ris->msc->lock); + spin_lock_irqsave(&ris->msc->mon_sel_lock, flags); + ris->mbwu_state = mbwu_state; + spin_unlock_irqrestore(&ris->msc->mon_sel_lock, flags); + mutex_unlock(&ris->msc->lock); + } + return 0; } diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index 06a31e5d9610..e546a8612dab 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -175,8 +175,40 @@ struct mpam_component struct mpam_class *class; }; -struct mpam_msc_ris -{ +/* The values for MSMON_CFG_MBWU_FLT.RWBW */ +enum mon_filter_options { + COUNT_BOTH = 0, + COUNT_WRITE = 1, + COUNT_READ = 2, +}; + +struct mon_cfg { + u16 mon; + u8 pmg; + bool match_pmg; + u32 partid; + enum mon_filter_options opts; +}; + +/* + * Changes to enabled and cfg are protected by the msc->lock. + * Changes to prev_val and correction are protected by the msc's mon_sel_lock. + */ +struct msmon_mbwu_state { + bool enabled; + struct mon_cfg cfg; + + /* The value last read from the hardware. Used to detect overflow. */ + u64 prev_val; + + /* + * The value to add to the new reading to account for power management, + * and shifts to trigger the overflow interrupt. + */ + u64 correction; +}; + +struct mpam_msc_ris { u8 ris_idx; u64 idr; struct mpam_props props; @@ -193,21 +225,9 @@ struct mpam_msc_ris /* parents: */ struct mpam_msc *msc; struct mpam_component *comp; -}; -/* The values for MSMON_CFG_MBWU_FLT.RWBW */ -enum mon_filter_options { - COUNT_BOTH = 0, - COUNT_WRITE = 1, - COUNT_READ = 2, -}; - -struct mon_cfg { - u16 mon; - u8 pmg; - bool match_pmg; - u32 partid; - enum mon_filter_options opts; + /* msmon mbwu configuration is preserved over reset */ + struct msmon_mbwu_state *mbwu_state; }; static inline int mpam_alloc_csu_mon(struct mpam_class *class) -- Gitee From de7a4cacf40b1c949f41af10bdac71ea3d9558e1 Mon Sep 17 00:00:00 2001 From: Rohit Mathew Date: Tue, 7 Feb 2023 19:14:17 +0000 Subject: [PATCH 0605/2138] arm_mpam: Probe for long/lwd mbwu counters ANBZ: #8686 commit dde6e1a3721f18c9e955dd666afcd84eec30faef morse-linux. mpam v0.1 and versions above v1.0 support optional long counter for memory bandwidth monitoring. The MPAMF_MBWUMON_IDR register have fields indicating support for long counters. As of now, a 44 bit counter represented by HAS_LONG field (bit 30) and a 63 bit counter represented by LWD (bit 29) can be optionally integrated. Probe for these counters and set corresponding feature bits if any of these counters are present. Signed-off-by: Rohit Mathew Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 22 ++++++++++++++++++++++ drivers/platform/mpam/mpam_internal.h | 7 +++++++ 2 files changed, 29 insertions(+) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 930e14471378..c2c5dfa77083 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -653,6 +653,7 @@ static void mpam_ris_hw_probe(struct mpam_msc_ris *ris) pr_err_once("Counters are not usable because not-ready timeout was not provided by firmware."); } if (FIELD_GET(MPAMF_MSMON_IDR_MSMON_MBWU, msmon_features)) { + bool has_long; u32 mbwumonidr = mpam_read_partsel_reg(msc, MBWUMON_IDR); props->num_mbwu_mon = FIELD_GET(MPAMF_MBWUMON_IDR_NUM_MON, mbwumonidr); @@ -661,6 +662,27 @@ static void mpam_ris_hw_probe(struct mpam_msc_ris *ris) if (FIELD_GET(MPAMF_MBWUMON_IDR_HAS_RWBW, mbwumonidr)) mpam_set_feature(mpam_feat_msmon_mbwu_rwbw, props); + + /* + * Treat long counter and its extension, lwd as mutually + * exclusive feature bits. Though these are dependent + * fields at the implementation level, there would never + * be a need for mpam_feat_msmon_mbwu_44counter (long + * counter) and mpam_feat_msmon_mbwu_63counter (lwd) + * bits to be set together. + * + * mpam_feat_msmon_mbwu isn't treated as an exclusive + * bit as this feature bit would be used as the "front + * facing feature bit" for any checks related to mbwu + * monitors. + */ + has_long = FIELD_GET(MPAMF_MBWUMON_IDR_HAS_LONG, mbwumonidr); + if (props->num_mbwu_mon && has_long) { + if (FIELD_GET(MPAMF_MBWUMON_IDR_LWD, mbwumonidr)) + mpam_set_feature(mpam_feat_msmon_mbwu_63counter, props); + else + mpam_set_feature(mpam_feat_msmon_mbwu_44counter, props); + } } } diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index e546a8612dab..99790ba74768 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -88,7 +88,14 @@ enum mpam_device_features { mpam_feat_msmon, mpam_feat_msmon_csu, mpam_feat_msmon_csu_capture, + /* + * Having mpam_feat_msmon_mbwu set doesn't mean the regular 31 bit MBWU + * counter would be used. The exact counter used is decided based on the + * status of mpam_feat_msmon_mbwu_l/mpam_feat_msmon_mbwu_lwd as well. + */ mpam_feat_msmon_mbwu, + mpam_feat_msmon_mbwu_44counter, + mpam_feat_msmon_mbwu_63counter, mpam_feat_msmon_mbwu_capture, mpam_feat_msmon_mbwu_rwbw, mpam_feat_msmon_capt, -- Gitee From 8882eb02a24a45b6adf6b31639440b556ce030cd Mon Sep 17 00:00:00 2001 From: Rohit Mathew Date: Mon, 20 Feb 2023 16:06:39 +0000 Subject: [PATCH 0606/2138] arm_mpam: Use long MBWU counters if supported ANBZ: #8686 commit abab48aae75632bac08e42751734c48c3a024a32 morse-linux. If the 44 bit (long) or 63 bit (LWD) counters are detected on probing the RIS, use long/LWD counter instead of the regular 31 bit mbwu counter. Only 32bit accesses to the MSC are required to be supported by the spec, but these registers are 64bits. The lower half may overflow into the higher half between two 32bit reads. To avoid this, use a helper that reads the top half twice to check for overflow. Signed-off-by: Rohit Mathew [morse: merged multiple patches from Rohit] Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 86 ++++++++++++++++++++++++--- drivers/platform/mpam/mpam_internal.h | 7 ++- 2 files changed, 84 insertions(+), 9 deletions(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index c2c5dfa77083..7f1f848281be 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -772,6 +772,48 @@ struct mon_read int err; }; +static bool mpam_ris_has_mbwu_long_counter(struct mpam_msc_ris *ris) +{ + return (mpam_has_feature(mpam_feat_msmon_mbwu_63counter, &ris->props) || + mpam_has_feature(mpam_feat_msmon_mbwu_44counter, &ris->props)); +} + +static u64 mpam_msc_read_mbwu_l(struct mpam_msc *msc) +{ + int retry = 3; + u32 mbwu_l_low; + u64 mbwu_l_high1, mbwu_l_high2; + + lockdep_assert_held_once(&msc->mon_sel_lock); + + WARN_ON_ONCE((MSMON_MBWU_L + sizeof(u64)) > msc->mapped_hwpage_sz); + WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &msc->accessibility)); + + mbwu_l_high2 = readl_relaxed(msc->mapped_hwpage + MSMON_MBWU_L + 4); + do { + mbwu_l_high1 = mbwu_l_high2; + mbwu_l_low = readl_relaxed(msc->mapped_hwpage + MSMON_MBWU_L); + mbwu_l_high2 = readl_relaxed(msc->mapped_hwpage + MSMON_MBWU_L + 4); + + retry--; + } while (mbwu_l_high1 != mbwu_l_high2 && retry > 0); + + if (mbwu_l_high2 == mbwu_l_high1) + return (mbwu_l_high1 << 32) | mbwu_l_low; + return MSMON___NRDY_L; +} + +static void mpam_msc_zero_mbwu_l(struct mpam_msc *msc) +{ + lockdep_assert_held_once(&msc->mon_sel_lock); + + WARN_ON_ONCE((MSMON_MBWU_L + sizeof(u64)) > msc->mapped_hwpage_sz); + WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &msc->accessibility)); + + writel_relaxed(0, msc->mapped_hwpage + MSMON_MBWU_L); + writel_relaxed(0, msc->mapped_hwpage + MSMON_MBWU_L + 4); +} + static void gen_msmon_ctl_flt_vals(struct mon_read *m, u32 *ctl_val, u32 *flt_val) { @@ -844,7 +886,12 @@ static void write_msmon_ctl_flt_vals(struct mon_read *m, u32 ctl_val, case mpam_feat_msmon_mbwu: mpam_write_monsel_reg(msc, CFG_MBWU_FLT, flt_val); mpam_write_monsel_reg(msc, CFG_MBWU_CTL, ctl_val); - mpam_write_monsel_reg(msc, MBWU, 0); + + if (mpam_ris_has_mbwu_long_counter(m->ris)) + mpam_msc_zero_mbwu_l(m->ris->msc); + else + mpam_write_monsel_reg(msc, MBWU, 0); + mpam_write_monsel_reg(msc, CFG_MBWU_CTL, ctl_val|MSMON_CFG_x_CTL_EN); mbwu_state = &m->ris->mbwu_state[m->ctx->mon]; @@ -859,8 +906,13 @@ static void write_msmon_ctl_flt_vals(struct mon_read *m, u32 ctl_val, static u64 mpam_msmon_overflow_val(struct mpam_msc_ris *ris) { - /* TODO: scaling, and long counters */ - return GENMASK_ULL(30, 0); + /* TODO: implement scaling counters */ + if (mpam_has_feature(mpam_feat_msmon_mbwu_63counter, &ris->props)) + return GENMASK_ULL(62, 0); + else if (mpam_has_feature(mpam_feat_msmon_mbwu_44counter, &ris->props)) + return GENMASK_ULL(43, 0); + else + return GENMASK_ULL(30, 0); } static void __ris_msmon_read(void *arg) @@ -898,9 +950,22 @@ static void __ris_msmon_read(void *arg) now = FIELD_GET(MSMON___VALUE, now); break; case mpam_feat_msmon_mbwu: - now = mpam_read_monsel_reg(msc, MBWU); - nrdy = now & MSMON___NRDY; - now = FIELD_GET(MSMON___VALUE, now); + /* + * If long or lwd counters are supported, use them, else revert + * to the 32 bit counter. + */ + if (mpam_ris_has_mbwu_long_counter(ris)) { + now = mpam_msc_read_mbwu_l(msc); + nrdy = now & MSMON___NRDY_L; + if (mpam_has_feature(mpam_feat_msmon_mbwu_63counter, &ris->props)) + now = FIELD_GET(MSMON___LWD_VALUE, now); + else + now = FIELD_GET(MSMON___L_VALUE, now); + } else { + now = mpam_read_monsel_reg(msc, MBWU); + nrdy = now & MSMON___NRDY; + now = FIELD_GET(MSMON___VALUE, now); + } if (nrdy) break; @@ -1166,8 +1231,13 @@ static int mpam_save_mbwu_state(void *arg) cur_ctl = mpam_read_monsel_reg(msc, CFG_MBWU_CTL); mpam_write_monsel_reg(msc, CFG_MBWU_CTL, 0); - val = mpam_read_monsel_reg(msc, MBWU); - mpam_write_monsel_reg(msc, MBWU, 0); + if (mpam_ris_has_mbwu_long_counter(ris)) { + val = mpam_msc_read_mbwu_l(msc); + mpam_msc_zero_mbwu_l(msc); + } else { + val = mpam_read_monsel_reg(msc, MBWU); + mpam_write_monsel_reg(msc, MBWU, 0); + } cfg->mon = i; cfg->pmg = FIELD_GET(MSMON_CFG_MBWU_FLT_PMG, cur_flt); diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index 99790ba74768..cafcce3e9efb 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -330,6 +330,8 @@ int mpam_msmon_read(struct mpam_component *comp, struct mon_cfg *ctx, #define MSMON_CSU_CAPTURE 0x0848 /* last cache-usage value captured */ #define MSMON_MBWU 0x0860 /* current mem-bw usage value */ #define MSMON_MBWU_CAPTURE 0x0868 /* last mem-bw value captured */ +#define MSMON_MBWU_L 0x0880 /* current long mem-bw usage value */ +#define MSMON_MBWU_CAPTURE_L 0x0890 /* last long mem-bw value captured */ #define MSMON_CAPT_EVNT 0x0808 /* signal a capture event */ #define MPAMF_ESR 0x00F8 /* error status register */ #define MPAMF_ECR 0x00F0 /* error control register */ @@ -533,7 +535,10 @@ int mpam_msmon_read(struct mpam_component *comp, struct mon_cfg *ctx, */ #define MSMON___VALUE GENMASK(30, 0) #define MSMON___NRDY BIT(31) -#define MSMON_MBWU_L_VALUE GENMASK(62, 0) +#define MSMON___NRDY_L BIT(63) +#define MSMON___L_VALUE GENMASK(43, 0) +#define MSMON___LWD_VALUE GENMASK(62, 0) + /* * MSMON_CAPT_EVNT - Memory system performance monitoring capture event * generation register -- Gitee From 3330fada012f7016a700880ccf32b4026f08a736 Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 10 Sep 2021 12:00:01 +0100 Subject: [PATCH 0607/2138] arm_mpam: Add helper to reset saved mbwu state ANBZ: #8686 commit ca01e27727c44fc0deef570f3e5bc8e05e16e092 morse-linux. resctrl expects to reset the bandwidth counters when the filesystem is mounted. To allow this, add a helper that clears the saved mbwu state. Instead of cross calling to each CPU that can access the component MSC to write to the counter, set a flag that causes it to be zero'd on the the next read. This is easily done by forcing a configuration update. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3010 --- drivers/platform/mpam/mpam_devices.c | 44 +++++++++++++++++++++++---- drivers/platform/mpam/mpam_internal.h | 5 ++- 2 files changed, 42 insertions(+), 7 deletions(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 7f1f848281be..f9f22d1d698c 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -919,9 +919,11 @@ static void __ris_msmon_read(void *arg) { bool nrdy = false; unsigned long flags; + bool config_mismatch; struct mon_read *m = arg; u64 now, overflow_val = 0; struct mon_cfg *ctx = m->ctx; + bool reset_on_next_read = false; struct mpam_msc_ris *ris = m->ris; struct mpam_msc *msc = m->ris->msc; struct msmon_mbwu_state *mbwu_state; @@ -934,13 +936,24 @@ static void __ris_msmon_read(void *arg) FIELD_PREP(MSMON_CFG_MON_SEL_RIS, ris->ris_idx); mpam_write_monsel_reg(msc, CFG_MON_SEL, mon_sel); + if (m->type == mpam_feat_msmon_mbwu) { + mbwu_state = &ris->mbwu_state[ctx->mon]; + if (mbwu_state) { + reset_on_next_read = mbwu_state->reset_on_next_read; + mbwu_state->reset_on_next_read = false; + } + } + /* * Read the existing configuration to avoid re-writing the same values. * This saves waiting for 'nrdy' on subsequent reads. */ read_msmon_ctl_flt_vals(m, &cur_ctl, &cur_flt); gen_msmon_ctl_flt_vals(m, &ctl_val, &flt_val); - if (cur_flt != flt_val || cur_ctl != (ctl_val | MSMON_CFG_x_CTL_EN)) + config_mismatch = cur_flt != flt_val || + cur_ctl != (ctl_val | MSMON_CFG_x_CTL_EN); + + if (config_mismatch || reset_on_next_read) write_msmon_ctl_flt_vals(m, ctl_val, flt_val); switch (m->type) { @@ -970,7 +983,6 @@ static void __ris_msmon_read(void *arg) if (nrdy) break; - mbwu_state = &ris->mbwu_state[ctx->mon]; if (!mbwu_state) break; @@ -1064,6 +1076,30 @@ int mpam_msmon_read(struct mpam_component *comp, struct mon_cfg *ctx, return err; } +void mpam_msmon_reset_mbwu(struct mpam_component *comp, struct mon_cfg *ctx) +{ + int idx; + unsigned long flags; + struct mpam_msc *msc; + struct mpam_msc_ris *ris; + + if (!mpam_is_enabled()) + return; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(ris, &comp->ris, comp_list) { + if (!mpam_has_feature(mpam_feat_msmon_mbwu, &ris->props)) + continue; + + msc = ris->msc; + spin_lock_irqsave(&msc->mon_sel_lock, flags); + ris->mbwu_state[ctx->mon].correction = 0; + ris->mbwu_state[ctx->mon].reset_on_next_read = true; + spin_unlock_irqrestore(&msc->mon_sel_lock, flags); + } + srcu_read_unlock(&mpam_srcu, idx); +} + static void mpam_reset_msc_bitmap(struct mpam_msc *msc, u16 reg, u16 wd) { u32 num_words, msb; @@ -1190,8 +1226,6 @@ static int mpam_restore_mbwu_state(void *_ris) struct mon_read mwbu_arg; struct mpam_msc_ris *ris = _ris; - lockdep_assert_held(&ris->msc->lock); - for (i = 0; i < ris->props.num_mbwu_mon; i++) { if (ris->mbwu_state[i].enabled) { mwbu_arg.ris = ris; @@ -1216,8 +1250,6 @@ static int mpam_save_mbwu_state(void *arg) struct mpam_msc *msc = ris->msc; struct msmon_mbwu_state *mbwu_state; - lockdep_assert_held(&msc->lock); - for (i = 0; i < ris->props.num_mbwu_mon; i++) { mbwu_state = &ris->mbwu_state[i]; cfg = &mbwu_state->cfg; diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index cafcce3e9efb..3f9478c90faf 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -199,10 +199,12 @@ struct mon_cfg { /* * Changes to enabled and cfg are protected by the msc->lock. - * Changes to prev_val and correction are protected by the msc's mon_sel_lock. + * Changes to reset_on_next_read, prev_val and correction are protected by the + * msc's mon_sel_lock. */ struct msmon_mbwu_state { bool enabled; + bool reset_on_next_read; struct mon_cfg cfg; /* The value last read from the hardware. Used to detect overflow. */ @@ -286,6 +288,7 @@ int mpam_apply_config(struct mpam_component *comp, u16 partid, int mpam_msmon_read(struct mpam_component *comp, struct mon_cfg *ctx, enum mpam_device_features, u64 *val); +void mpam_msmon_reset_mbwu(struct mpam_component *comp, struct mon_cfg *ctx); /* * MPAM MSCs have the following register layout. See: -- Gitee From e236128f84c214a41123267edaac33358c9b6887 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 11 Jun 2019 17:02:09 +0100 Subject: [PATCH 0608/2138] arm_mpam: resctrl: Add boilerplate cpuhp and domain allocation ANBZ: #8686 commit 2d0f0357cb0a8c43d3d037ea9403a863dbb5fe9e morse-linux. resctrl has its own data structures to describe its resources. We can't use these directly as we play tricks with the 'MBA' resource, picking the MPAM controls or monitors that best apply. We may export the same component as both L3 and MBA. Add mpam_resctrl_exports[] as the array of class->resctrl mappings we are exporting, and add the cpuhp hooks that allocated and free the resctrl domain structures. While we're here, plumb in a few other obvious things. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/platform/mpam/Makefile | 2 +- drivers/platform/mpam/mpam_devices.c | 12 ++ drivers/platform/mpam/mpam_internal.h | 15 ++ drivers/platform/mpam/mpam_resctrl.c | 237 ++++++++++++++++++++++++++ include/linux/arm_mpam.h | 4 + 5 files changed, 269 insertions(+), 1 deletion(-) create mode 100644 drivers/platform/mpam/mpam_resctrl.c diff --git a/drivers/platform/mpam/Makefile b/drivers/platform/mpam/Makefile index 8ad69bfa2aa2..37693be531c3 100644 --- a/drivers/platform/mpam/Makefile +++ b/drivers/platform/mpam/Makefile @@ -1 +1 @@ -obj-$(CONFIG_ARM_CPU_RESCTRL) += mpam_devices.o +obj-$(CONFIG_ARM_CPU_RESCTRL) += mpam_devices.o mpam_resctrl.o diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index f9f22d1d698c..28f6010df6dc 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -1411,6 +1411,9 @@ static int mpam_cpu_online(unsigned int cpu) } srcu_read_unlock(&mpam_srcu, idx); + if (mpam_is_enabled()) + mpam_resctrl_online_cpu(cpu); + return 0; } @@ -1470,6 +1473,9 @@ static int mpam_cpu_offline(unsigned int cpu) } srcu_read_unlock(&mpam_srcu, idx); + if (mpam_is_enabled()) + mpam_resctrl_offline_cpu(cpu); + return 0; } @@ -2140,6 +2146,12 @@ static void mpam_enable_once(void) mutex_unlock(&mpam_list_lock); cpus_read_unlock(); + if (!err) { + err = mpam_resctrl_setup(); + if (err) + pr_err("Failed to initialise resctrl: %d\n", err); + } + if (err) { schedule_work(&mpam_broken_work); return; diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index 3f9478c90faf..c9d9abb87cff 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -239,6 +239,16 @@ struct mpam_msc_ris { struct msmon_mbwu_state *mbwu_state; }; +struct mpam_resctrl_dom { + struct mpam_component *comp; + struct rdt_domain resctrl_dom; +}; + +struct mpam_resctrl_res { + struct mpam_class *class; + struct rdt_resource resctrl_res; +}; + static inline int mpam_alloc_csu_mon(struct mpam_class *class) { struct mpam_props *cprops = &class->props; @@ -290,6 +300,11 @@ int mpam_msmon_read(struct mpam_component *comp, struct mon_cfg *ctx, enum mpam_device_features, u64 *val); void mpam_msmon_reset_mbwu(struct mpam_component *comp, struct mon_cfg *ctx); +int mpam_resctrl_online_cpu(unsigned int cpu); +int mpam_resctrl_offline_cpu(unsigned int cpu); + +int mpam_resctrl_setup(void); + /* * MPAM MSCs have the following register layout. See: * Arm Architecture Reference Manual Supplement - Memory System Resource diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c new file mode 100644 index 000000000000..b9c1292ff630 --- /dev/null +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2021 Arm Ltd. + +#define pr_fmt(fmt) "mpam: resctrl: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mpam_internal.h" + +/* + * The classes we've picked to map to resctrl resources. + * Class pointer may be NULL. + */ +static struct mpam_resctrl_res mpam_resctrl_exports[RDT_NUM_RESOURCES]; + +static bool exposed_alloc_capable; +static bool exposed_mon_capable; + +bool resctrl_arch_alloc_capable(void) +{ + return exposed_alloc_capable; +} + +bool resctrl_arch_mon_capable(void) +{ + return exposed_mon_capable; +} + +/* + * MSC may raise an error interrupt if it sees an out or range partid/pmg, + * and go on to truncate the value. Regardless of what the hardware supports, + * only the system wide safe value is safe to use. + */ +u32 resctrl_arch_get_num_closid(struct rdt_resource *ignored) +{ + return min((u32)mpam_partid_max + 1, (u32)RESCTRL_MAX_CLOSID); +} + +struct rdt_resource *resctrl_arch_get_resource(enum resctrl_res_level l) +{ + if (l >= RDT_NUM_RESOURCES) + return NULL; + + return &mpam_resctrl_exports[l].resctrl_res; +} + +static int mpam_resctrl_resource_init(struct mpam_resctrl_res *res) +{ + /* TODO: initialise the resctrl resources */ + + return 0; +} + +/* Called with the mpam classes lock held */ +int mpam_resctrl_setup(void) +{ + int err = 0; + struct mpam_resctrl_res *res; + enum resctrl_res_level i; + + cpus_read_lock(); + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + res = &mpam_resctrl_exports[i]; + INIT_LIST_HEAD(&res->resctrl_res.domains); + INIT_LIST_HEAD(&res->resctrl_res.evt_list); + res->resctrl_res.rid = i; + } + + /* TODO: pick MPAM classes to map to resctrl resources */ + + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + res = &mpam_resctrl_exports[i]; + if (!res->class) + continue; // dummy resource + + err = mpam_resctrl_resource_init(res); + if (err) + break; + } + cpus_read_unlock(); + + if (!err && !exposed_alloc_capable && !exposed_mon_capable) + err = -EOPNOTSUPP; + + if (!err) { + if (!is_power_of_2(mpam_pmg_max + 1)) { + /* + * If not all the partid*pmg values are valid indexes, + * resctrl may allocate pmg that don't exist. This + * should cause an error interrupt. + */ + pr_warn("Number of PMG is not a power of 2! resctrl may misbehave"); + } + err = resctrl_init(); + } + + return err; +} + +static struct mpam_resctrl_dom * +mpam_resctrl_alloc_domain(unsigned int cpu, struct mpam_resctrl_res *res) +{ + struct mpam_resctrl_dom *dom; + struct mpam_class *class = res->class; + struct mpam_component *comp_iter, *comp; + + comp = NULL; + list_for_each_entry(comp_iter, &class->components, class_list) { + if (cpumask_test_cpu(cpu, &comp_iter->affinity)) { + comp = comp_iter; + break; + } + } + + /* cpu with unknown exported component? */ + if (WARN_ON_ONCE(!comp)) + return ERR_PTR(-EINVAL); + + dom = kzalloc_node(sizeof(*dom), GFP_KERNEL, cpu_to_node(cpu)); + if (!dom) + return ERR_PTR(-ENOMEM); + + dom->comp = comp; + INIT_LIST_HEAD(&dom->resctrl_dom.list); + dom->resctrl_dom.id = comp->comp_id; + cpumask_set_cpu(cpu, &dom->resctrl_dom.cpu_mask); + + /* TODO: this list should be sorted */ + list_add_tail(&dom->resctrl_dom.list, &res->resctrl_res.domains); + + return dom; +} + +/* Like resctrl_get_domain_from_cpu(), but for offline CPUs */ +static struct mpam_resctrl_dom * +mpam_get_domain_from_cpu(int cpu, struct mpam_resctrl_res *res) +{ + struct rdt_domain *d; + struct mpam_resctrl_dom *dom; + + lockdep_assert_cpus_held(); + + list_for_each_entry(d, &res->resctrl_res.domains, list) { + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + + if (cpumask_test_cpu(cpu, &dom->comp->affinity)) + return dom; + } + + return NULL; +} + +struct rdt_domain *resctrl_arch_find_domain(struct rdt_resource *r, int id) +{ + struct rdt_domain *d; + struct mpam_resctrl_dom *dom; + + lockdep_assert_cpus_held(); + + list_for_each_entry(d, &r->domains, list) { + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + if (dom->comp->comp_id == id) + return &dom->resctrl_dom; + } + + return NULL; +} + +int mpam_resctrl_online_cpu(unsigned int cpu) +{ + int i; + struct mpam_resctrl_dom *dom; + struct mpam_resctrl_res *res; + + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + res = &mpam_resctrl_exports[i]; + + if (!res->class) + continue; // dummy_resource; + + dom = mpam_get_domain_from_cpu(cpu, res); + if (dom) { + cpumask_set_cpu(cpu, &dom->resctrl_dom.cpu_mask); + continue; + } + + dom = mpam_resctrl_alloc_domain(cpu, res); + if (IS_ERR(dom)) + return PTR_ERR(dom); + } + + return 0; +} + +int mpam_resctrl_offline_cpu(unsigned int cpu) +{ + int i; + struct rdt_domain *d; + struct mpam_resctrl_res *res; + struct mpam_resctrl_dom *dom; + + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + res = &mpam_resctrl_exports[i]; + + if (!res->class) + continue; // dummy resource + + d = resctrl_get_domain_from_cpu(cpu, &res->resctrl_res); + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + + /* The last one standing was ahead of us... */ + if (WARN_ON_ONCE(!d)) + continue; + + cpumask_clear_cpu(cpu, &d->cpu_mask); + + if (!cpumask_empty(&d->cpu_mask)) + continue; + + list_del(&d->list); + kfree(dom); + } + + return 0; +} diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index 40e09b4d236b..27c3ad9912ef 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -39,4 +39,8 @@ int mpam_register_requestor(u16 partid_max, u8 pmg_max); int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx, enum mpam_class_types type, u8 class_id, int component_id); + +bool resctrl_arch_alloc_capable(void); +bool resctrl_arch_mon_capable(void); + #endif /* __LINUX_ARM_MPAM_H */ -- Gitee From 8925b90f71fce2f414a57524495014a132140c77 Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 12 Jun 2019 13:51:30 +0100 Subject: [PATCH 0609/2138] arm_mpam: resctrl: Pick the caches we will use as resctrl resources ANBZ: #8686 commit 6e4fe28163a54f1426e3433c51c462f6424f35f2 morse-linux. Sytems with MPAM support may have a variety of control types at any point of their system layout. We can only expose certain types of control, and only if they exist at particular locations. Start with the well-know caches. These have to be depth 2 or 3 and support MPAM's cache portion bitmap controls, with a number of portions fewer that resctrl's limit. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/platform/mpam/mpam_resctrl.c | 134 ++++++++++++++++++++++++++- include/linux/arm_mpam.h | 7 ++ 2 files changed, 137 insertions(+), 4 deletions(-) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index b9c1292ff630..d294db50a651 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -27,6 +27,7 @@ static struct mpam_resctrl_res mpam_resctrl_exports[RDT_NUM_RESOURCES]; static bool exposed_alloc_capable; static bool exposed_mon_capable; +static struct mpam_class *mbm_local_class; bool resctrl_arch_alloc_capable(void) { @@ -38,6 +39,11 @@ bool resctrl_arch_mon_capable(void) return exposed_mon_capable; } +bool resctrl_arch_is_mbm_local_enabled(void) +{ + return mbm_local_class; +} + /* * MSC may raise an error interrupt if it sees an out or range partid/pmg, * and go on to truncate the value. Regardless of what the hardware supports, @@ -56,14 +62,133 @@ struct rdt_resource *resctrl_arch_get_resource(enum resctrl_res_level l) return &mpam_resctrl_exports[l].resctrl_res; } +static bool cache_has_usable_cpor(struct mpam_class *class) +{ + struct mpam_props *cprops = &class->props; + + if (!mpam_has_feature(mpam_feat_cpor_part, cprops)) + return false; + + /* TODO: Scaling is not yet supported */ + return (class->props.cpbm_wd <= RESCTRL_MAX_CBM); +} + +static bool cache_has_usable_csu(struct mpam_class *class) +{ + struct mpam_props *cprops; + + if (!class) + return false; + + cprops = &class->props; + + if (!mpam_has_feature(mpam_feat_msmon_csu, cprops)) + return false; + + /* + * CSU counters settle on the value, so we can get away with + * having only one. + */ + if (!cprops->num_csu_mon) + return false; + + return (mpam_partid_max > 1) || (mpam_pmg_max != 0); +} + +bool resctrl_arch_is_llc_occupancy_enabled(void) +{ + return cache_has_usable_csu(mpam_resctrl_exports[RDT_RESOURCE_L3].class); +} + +/* Test whether we can export MPAM_CLASS_CACHE:{2,3}? */ +static void mpam_resctrl_pick_caches(void) +{ + int idx; + struct mpam_class *class; + struct mpam_resctrl_res *res; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(class, &mpam_classes, classes_list) { + bool has_cpor = cache_has_usable_cpor(class); + + if (class->type != MPAM_CLASS_CACHE) { + pr_debug("pick_caches: Class is not a cache\n"); + continue; + } + + if (class->level != 2 && class->level != 3) { + pr_debug("pick_caches: not L2 or L3\n"); + continue; + } + + if (class->level == 2 && !has_cpor) { + pr_debug("pick_caches: L2 missing CPOR\n"); + continue; + } else if (!has_cpor && !cache_has_usable_csu(class)) { + pr_debug("pick_caches: Cache misses CPOR and CSU\n"); + continue; + } + + if (!cpumask_equal(&class->affinity, cpu_possible_mask)) { + pr_debug("pick_caches: Class has missing CPUs\n"); + continue; + } + + if (class->level == 2) { + res = &mpam_resctrl_exports[RDT_RESOURCE_L2]; + res->resctrl_res.name = "L2"; + } else { + res = &mpam_resctrl_exports[RDT_RESOURCE_L3]; + res->resctrl_res.name = "L3"; + } + res->class = class; + } + srcu_read_unlock(&mpam_srcu, idx); +} + static int mpam_resctrl_resource_init(struct mpam_resctrl_res *res) { - /* TODO: initialise the resctrl resources */ + struct mpam_class *class = res->class; + struct rdt_resource *r = &res->resctrl_res; + + /* Is this one of the two well-known caches? */ + if (res->resctrl_res.rid == RDT_RESOURCE_L2 || + res->resctrl_res.rid == RDT_RESOURCE_L3) { + /* TODO: Scaling is not yet supported */ + r->cache.cbm_len = class->props.cpbm_wd; + r->cache.arch_has_sparse_bitmasks = true; + + /* mpam_devices will reject empty bitmaps */ + r->cache.min_cbm_bits = 1; + + /* TODO: kill these properties off as they are derivatives */ + r->format_str = "%d=%0*x"; + r->fflags = RFTYPE_RES_CACHE; + r->default_ctrl = BIT_MASK(class->props.cpbm_wd) - 1; + r->data_width = (class->props.cpbm_wd + 3) / 4; + + /* + * Which bits are shared with other ...things... + * Unknown devices use partid-0 which uses all the bitmap + * fields. Until we configured the SMMU and GIC not to do this + * 'all the bits' is the correct answer here. + */ + r->cache.shareable_bits = r->default_ctrl; + + if (mpam_has_feature(mpam_feat_cpor_part, &class->props)) { + r->alloc_capable = true; + exposed_alloc_capable = true; + } + + if (class->level == 3 && cache_has_usable_csu(class)) { + r->mon_capable = true; + exposed_mon_capable = true; + } + } return 0; } -/* Called with the mpam classes lock held */ int mpam_resctrl_setup(void) { int err = 0; @@ -78,7 +203,7 @@ int mpam_resctrl_setup(void) res->resctrl_res.rid = i; } - /* TODO: pick MPAM classes to map to resctrl resources */ + mpam_resctrl_pick_caches(); for (i = 0; i < RDT_NUM_RESOURCES; i++) { res = &mpam_resctrl_exports[i]; @@ -103,7 +228,8 @@ int mpam_resctrl_setup(void) */ pr_warn("Number of PMG is not a power of 2! resctrl may misbehave"); } - err = resctrl_init(); + + /* TODO: call resctrl_init() */ } return err; diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index 27c3ad9912ef..576bb97fa552 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -42,5 +42,12 @@ int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx, bool resctrl_arch_alloc_capable(void); bool resctrl_arch_mon_capable(void); +bool resctrl_arch_is_llc_occupancy_enabled(void); +bool resctrl_arch_is_mbm_local_enabled(void); + +static inline bool resctrl_arch_is_mbm_total_enabled(void) +{ + return false; +} #endif /* __LINUX_ARM_MPAM_H */ -- Gitee From bad78176cd70ee84dcd37e2189d11da47245312c Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 20 Aug 2021 15:28:42 +0100 Subject: [PATCH 0610/2138] arm_mpam: resctrl: Pick a value for num_rmid ANBZ: #8686 commit 8cf2449dc1536390ca1e875c5f39648cc711b0f4 morse-linux. After the changes to resctrl to support MPAM, num_rmid is only used as a value that is unfortunately exposed to user-space. For MPAM, this value doesn't mean anything, and whatever value we do expose will be wrong for some use cases. User-space may expect it can use this value to know how many 'extra' monitor groups it can create. e.g. on x86 if num_closid=4, num_rmid=8, then a total of 4 monitor groups can be created. If num_rmid were 2, then only 2 control groups could be created. For MPAM the number of pmg is very likely to be smaller than the number of partid, but this doesn't restrict the creation of control groups, as each control group has its own pmg space. Pick 1 if monitoring is supported. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/platform/mpam/mpam_resctrl.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index d294db50a651..f71f8c466817 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -180,10 +180,22 @@ static int mpam_resctrl_resource_init(struct mpam_resctrl_res *res) exposed_alloc_capable = true; } - if (class->level == 3 && cache_has_usable_csu(class)) { + if (class->level == 3 && cache_has_usable_csu(class)) r->mon_capable = true; - exposed_mon_capable = true; - } + } + + if (r->mon_capable) { + exposed_mon_capable = true; + + /* + * Unfortunately, num_rmid doesn't mean anything for + * mpam, and its exposed to user-space! + * num-rmid is supposed to mean the number of groups + * that can be created, both control or monitor groups. + * For mpam, each control group has its own pmg/rmid + * space. + */ + r->num_rmid = 1; } return 0; -- Gitee From 0ae68e9b9bb7811b4e46692d472b3c2929bff0c0 Mon Sep 17 00:00:00 2001 From: James Morse Date: Mon, 4 Mar 2019 15:15:25 +0000 Subject: [PATCH 0611/2138] arm_mpam: resctrl: Implement resctrl_arch_reset_resources() ANBZ: #8686 commit 57dd4f86c8c2cc7797427e9cf4120f6a385bd05c morse-linux. We already have a helper for resetting an mpam class. Hook it up to resctrl_arch_reset_resources(). Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/platform/mpam/mpam_devices.c | 2 +- drivers/platform/mpam/mpam_internal.h | 2 ++ drivers/platform/mpam/mpam_resctrl.c | 27 +++++++++++++++++++++++++++ include/linux/arm_mpam.h | 3 +++ 4 files changed, 33 insertions(+), 1 deletion(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 28f6010df6dc..13f48878c3d5 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -2177,7 +2177,7 @@ static void mpam_enable_once(void) READ_ONCE(mpam_partid_max) + 1, mpam_pmg_max + 1); } -static void mpam_reset_class(struct mpam_class *class) +void mpam_reset_class(struct mpam_class *class) { int idx; struct mpam_msc_ris *ris; diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index c9d9abb87cff..612d5f8c0568 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -293,6 +293,8 @@ extern u8 mpam_pmg_max; void mpam_enable(struct work_struct *work); void mpam_disable(struct work_struct *work); +void mpam_reset_class(struct mpam_class *class); + int mpam_apply_config(struct mpam_component *comp, u16 partid, struct mpam_config *cfg); diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index f71f8c466817..82325956a938 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -247,6 +247,33 @@ int mpam_resctrl_setup(void) return err; } +void resctrl_arch_reset_resources(void) +{ + int i, idx; + struct mpam_class *class; + struct mpam_resctrl_res *res; + + lockdep_assert_cpus_held(); + + if (!mpam_is_enabled()) + return; + + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + res = &mpam_resctrl_exports[i]; + + if (!res->class) + continue; // dummy resource + + if (!res->resctrl_res.alloc_capable) + continue; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(class, &mpam_classes, classes_list) + mpam_reset_class(class); + srcu_read_unlock(&mpam_srcu, idx); + } +} + static struct mpam_resctrl_dom * mpam_resctrl_alloc_domain(unsigned int cpu, struct mpam_resctrl_res *res) { diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index 576bb97fa552..97d4c8f076e4 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -50,4 +50,7 @@ static inline bool resctrl_arch_is_mbm_total_enabled(void) return false; } +/* reset cached configurations, then all devices */ +void resctrl_arch_reset_resources(void); + #endif /* __LINUX_ARM_MPAM_H */ -- Gitee From 7a4d967416e0edee2d07c888cfa38a0fe96814f1 Mon Sep 17 00:00:00 2001 From: James Morse Date: Mon, 4 Mar 2019 14:34:44 +0000 Subject: [PATCH 0612/2138] arm_mpam: resctrl: Add resctrl_arch_get_config() ANBZ: #8686 commit dd4535f45deab7aeff4068d61963a9b2cecda7eb morse-linux. Implement resctrl_arch_get_config() by testing the configuration for a CPOR bitmap. For any other configuration type return the default. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/platform/mpam/mpam_devices.c | 3 ++ drivers/platform/mpam/mpam_resctrl.c | 44 ++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 13f48878c3d5..5ebb944ad9fc 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -2302,6 +2302,9 @@ int mpam_apply_config(struct mpam_component *comp, u16 partid, lockdep_assert_cpus_held(); + if (!memcmp(&comp->cfg[partid], cfg, sizeof(*cfg))) + return 0; + comp->cfg[partid] = *cfg; arg.comp = comp; arg.partid = partid; diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index 82325956a938..cda8f08c0819 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -247,6 +247,50 @@ int mpam_resctrl_setup(void) return err; } +u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, + u32 closid, enum resctrl_conf_type type) +{ + u32 partid; + struct mpam_config *cfg; + struct mpam_props *cprops; + struct mpam_resctrl_res *res; + struct mpam_resctrl_dom *dom; + enum mpam_device_features configured_by; + + lockdep_assert_cpus_held(); + + if (!mpam_is_enabled()) + return r->default_ctrl; + + res = container_of(r, struct mpam_resctrl_res, resctrl_res); + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + cprops = &res->class->props; + + partid = resctrl_get_config_index(closid, type); + cfg = &dom->comp->cfg[partid]; + + switch (r->rid) { + case RDT_RESOURCE_L2: + case RDT_RESOURCE_L3: + configured_by = mpam_feat_cpor_part; + break; + default: + return -EINVAL; + } + + if (!r->alloc_capable || partid >= resctrl_arch_get_num_closid(r) || + !mpam_has_feature(configured_by, cfg)) + return r->default_ctrl; + + switch (configured_by) { + case mpam_feat_cpor_part: + /* TODO: Scaling is not yet supported */ + return cfg->cpbm; + default: + return -EINVAL; + } +} + void resctrl_arch_reset_resources(void) { int i, idx; -- Gitee From 1a4b54bbf70adc9fd4d6c2fa1c03a9b8928c0994 Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 21 May 2021 12:19:36 +0100 Subject: [PATCH 0613/2138] arm_mpam: resctrl: Implement helpers to update configuration ANBZ: #8686 commit 72b1c4d7c4157d9d5ce7c20ea696630ff7cc5426 morse-linux. resctrl has two helpers for updating the configuration. resctrl_arch_update_one() updates a single value, and is used by the software-controller to apply feedback to the bandwidth controls, it has to be called on one of the CPUs in the resctrl:domain. resctrl_arch_update_domains() copies multiple staged configurations, it can be called from anywhere. Both helpers should update any changes to the underlying hardware. Imlpement resctrl_arch_update_domains() to use resctrl_arch_update_one(), which doesn't depend on being called on the right CPU. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/platform/mpam/mpam_internal.h | 7 +-- drivers/platform/mpam/mpam_resctrl.c | 65 +++++++++++++++++++++++++++ 2 files changed, 66 insertions(+), 6 deletions(-) diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index 612d5f8c0568..36c49cfb9271 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -119,12 +119,7 @@ struct mpam_props }; #define mpam_has_feature(_feat, x) ((1<<_feat) & (x)->features) - -static inline void mpam_set_feature(enum mpam_device_features feat, - struct mpam_props *props) -{ - props->features |= (1<features |= (1<<_feat)) static inline void mpam_clear_feature(enum mpam_device_features feat, mpam_features_t *supported) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index cda8f08c0819..ebd8aef84679 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -291,6 +291,71 @@ u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, } } +int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d, + u32 closid, enum resctrl_conf_type t, u32 cfg_val) +{ + u32 partid; + struct mpam_config cfg; + struct mpam_props *cprops; + struct mpam_resctrl_res *res; + struct mpam_resctrl_dom *dom; + + lockdep_assert_cpus_held(); + lockdep_assert_irqs_enabled(); + + /* + * NOTE: don't check the CPU as mpam_apply_config() doesn't care, + * and resctrl_arch_update_domains() depends on this. + */ + res = container_of(r, struct mpam_resctrl_res, resctrl_res); + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + cprops = &res->class->props; + + partid = resctrl_get_config_index(closid, t); + if (!r->alloc_capable || partid >= resctrl_arch_get_num_closid(r)) + return -EINVAL; + + switch (r->rid) { + case RDT_RESOURCE_L2: + case RDT_RESOURCE_L3: + /* TODO: Scaling is not yet supported */ + cfg.cpbm = cfg_val; + mpam_set_feature(mpam_feat_cpor_part, &cfg); + break; + default: + return -EINVAL; + } + + return mpam_apply_config(dom->comp, partid, &cfg); +} + +/* TODO: this is IPI heavy */ +int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid) +{ + int err = 0; + struct rdt_domain *d; + enum resctrl_conf_type t; + struct resctrl_staged_config *cfg; + + lockdep_assert_cpus_held(); + lockdep_assert_irqs_enabled(); + + list_for_each_entry(d, &r->domains, list) { + for (t = 0; t < CDP_NUM_TYPES; t++) { + cfg = &d->staged_config[t]; + if (!cfg->have_new_ctrl) + continue; + + err = resctrl_arch_update_one(r, d, closid, t, + cfg->new_ctrl); + if (err) + return err; + } + } + + return err; +} + void resctrl_arch_reset_resources(void) { int i, idx; -- Gitee From 8fd4cfe94a9c6d4a4cea1c81125321167a336f74 Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 25 Jun 2021 17:19:16 +0100 Subject: [PATCH 0614/2138] arm_mpam: resctrl: Add CDP emulation ANBZ: #8686 commit 72b4ce74ed8ed79999a91b088c926dddc0667941 morse-linux. Intel RDT's CDP feature allows the cache to use a different control value depending on whether the accesses was for instruction fetch or a data access. MPAM's equivalent feature is the other way up: the CPU assigns a different partid label to traffic depending on whether it was instruction fetch or a data access, which causes the cache to use a different control value based solely on the partid. MPAM can emulate CDP, with the side effect that the alternative partid is seen by all MSC, it can't be enabled per-MSC. Add the resctrl hooks to turn this on or off. Add the helpers that match a closid against a task, which need to be aware that the value written to hardware is not the same as the one resctrl is using. The context switch code needs to match the default resctrl group's value against a variable, as this value changes depending on whether CDP is in use. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- arch/arm64/include/asm/mpam.h | 3 +- drivers/platform/mpam/mpam_resctrl.c | 62 ++++++++++++++++++++++++++++ include/linux/arm_mpam.h | 13 ++++++ 3 files changed, 77 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/mpam.h b/arch/arm64/include/asm/mpam.h index 1d81a6f26acd..edae8c98fa28 100644 --- a/arch/arm64/include/asm/mpam.h +++ b/arch/arm64/include/asm/mpam.h @@ -4,6 +4,7 @@ #ifndef __ASM__MPAM_H #define __ASM__MPAM_H +#include #include #include #include @@ -108,7 +109,7 @@ static inline void mpam_thread_switch(struct task_struct *tsk) !static_branch_likely(&mpam_enabled)) return; - if (!regval) + if (regval == READ_ONCE(mpam_resctrl_default_group)) regval = READ_ONCE(per_cpu(arm64_mpam_default, cpu)); oldregval = READ_ONCE(per_cpu(arm64_mpam_current, cpu)); diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index ebd8aef84679..64372f5bf380 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -19,6 +19,8 @@ #include "mpam_internal.h" +u64 mpam_resctrl_default_group; + /* * The classes we've picked to map to resctrl resources. * Class pointer may be NULL. @@ -29,6 +31,12 @@ static bool exposed_alloc_capable; static bool exposed_mon_capable; static struct mpam_class *mbm_local_class; +/* + * MPAM emulates CDP by setting different PARTID in the I/D fields of MPAM1_EL1. + * This applies globally to all traffic the CPU generates. + */ +static bool cdp_enabled; + bool resctrl_arch_alloc_capable(void) { return exposed_alloc_capable; @@ -44,6 +52,36 @@ bool resctrl_arch_is_mbm_local_enabled(void) return mbm_local_class; } +bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level ignored) +{ + return cdp_enabled; +} + +int resctrl_arch_set_cdp_enabled(enum resctrl_res_level ignored, bool enable) +{ + u64 regval; + u32 partid, partid_i, partid_d; + + cdp_enabled = enable; + + partid = RESCTRL_RESERVED_CLOSID; + + if (enable) { + partid_d = resctrl_get_config_index(partid, CDP_CODE); + partid_i = resctrl_get_config_index(partid, CDP_DATA); + regval = FIELD_PREP(MPAM_SYSREG_PARTID_D, partid_d) | + FIELD_PREP(MPAM_SYSREG_PARTID_I, partid_i); + + } else { + regval = FIELD_PREP(MPAM_SYSREG_PARTID_D, partid) | + FIELD_PREP(MPAM_SYSREG_PARTID_I, partid); + } + + WRITE_ONCE(mpam_resctrl_default_group, regval); + + return 0; +} + /* * MSC may raise an error interrupt if it sees an out or range partid/pmg, * and go on to truncate the value. Regardless of what the hardware supports, @@ -54,6 +92,30 @@ u32 resctrl_arch_get_num_closid(struct rdt_resource *ignored) return min((u32)mpam_partid_max + 1, (u32)RESCTRL_MAX_CLOSID); } +bool resctrl_arch_match_closid(struct task_struct *tsk, u32 closid) +{ + u64 regval = mpam_get_regval(tsk); + u32 tsk_closid = FIELD_GET(MPAM_SYSREG_PARTID_D, regval); + + if (cdp_enabled) + tsk_closid >>= 1; + + return tsk_closid == closid; +} + +/* The task's pmg is not unique, the partid must be considered too */ +bool resctrl_arch_match_rmid(struct task_struct *tsk, u32 closid, u32 rmid) +{ + u64 regval = mpam_get_regval(tsk); + u32 tsk_closid = FIELD_GET(MPAM_SYSREG_PARTID_D, regval); + u32 tsk_rmid = FIELD_GET(MPAM_SYSREG_PMG_D, regval); + + if (cdp_enabled) + tsk_closid >>= 1; + + return (tsk_closid == closid) && (tsk_rmid == rmid); +} + struct rdt_resource *resctrl_arch_get_resource(enum resctrl_res_level l) { if (l >= RDT_NUM_RESOURCES) diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index 97d4c8f076e4..e3921b0ab836 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -5,8 +5,17 @@ #define __LINUX_ARM_MPAM_H #include +#include #include +/* + * The value of the MPAM1_EL1 sysreg when a task is in the default group. + * This is used by the context switch code to use the resctrl CPU property + * instead. The value is modified when CDP is enabled/disabled by mounting + * the resctrl filesystem. + */ +extern u64 mpam_resctrl_default_group; + struct mpam_msc; enum mpam_msc_iface { @@ -53,4 +62,8 @@ static inline bool resctrl_arch_is_mbm_total_enabled(void) /* reset cached configurations, then all devices */ void resctrl_arch_reset_resources(void); +bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level ignored); +int resctrl_arch_set_cdp_enabled(enum resctrl_res_level ignored, bool enable); +bool resctrl_arch_match_closid(struct task_struct *tsk, u32 closid); +bool resctrl_arch_match_rmid(struct task_struct *tsk, u32 closid, u32 rmid); #endif /* __LINUX_ARM_MPAM_H */ -- Gitee From 7c01191e5a8685d9d525b48a5cf637463af347ac Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 25 Jun 2021 17:16:00 +0100 Subject: [PATCH 0615/2138] arm64: mpam: Add helpers to change a tasks and cpu mpam partid/pmg values ANBZ: #8686 commit e2951d7d49e760dc0c759775950ad9ae652cd59d morse-linux. Care must be taken when modifying the partid and pmg of a task, as writing these values may race with the task being scheduled in, and reading the modified values. Add helpers to set the task properties, and the cpu default value, and add the plumbing to the mpam driver that lets resctrl use them. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] [ use WARN_ON_ONCE() instead of BUG_ON() to check the validity of closid and rmid ] [ rename expired resctrl_sched_in() to resctrl_arch_sched_in() ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- arch/arm64/include/asm/mpam.h | 44 +++++++++++++++++++++ drivers/platform/mpam/mpam_resctrl.c | 58 ++++++++++++++++++++++++++++ include/linux/arm_mpam.h | 7 ++++ 3 files changed, 109 insertions(+) diff --git a/arch/arm64/include/asm/mpam.h b/arch/arm64/include/asm/mpam.h index edae8c98fa28..9abe1fe58c34 100644 --- a/arch/arm64/include/asm/mpam.h +++ b/arch/arm64/include/asm/mpam.h @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -90,6 +91,35 @@ static inline void __init __enable_mpam_hcr(void) * A value in struct thread_info is used instead of struct task_struct as the * cpu's u64 register format is used, but struct task_struct has two u32'. */ + static inline void mpam_set_cpu_defaults(int cpu, u16 partid_d, u16 partid_i, + u8 pmg_d, u8 pmg_i) +{ + u64 default_val; + + default_val = FIELD_PREP(MPAM_SYSREG_PARTID_D, partid_d); + default_val |= FIELD_PREP(MPAM_SYSREG_PARTID_I, partid_i); + default_val |= FIELD_PREP(MPAM_SYSREG_PMG_D, pmg_d); + default_val |= FIELD_PREP(MPAM_SYSREG_PMG_I, pmg_i); + + WRITE_ONCE(per_cpu(arm64_mpam_default, cpu), default_val); +} + +static inline void mpam_set_task_partid_pmg(struct task_struct *tsk, + u16 partid_d, u16 partid_i, + u8 pmg_d, u8 pmg_i) +{ +#ifdef CONFIG_ARM64_MPAM + u64 regval; + + regval = FIELD_PREP(MPAM_SYSREG_PARTID_D, partid_d); + regval |= FIELD_PREP(MPAM_SYSREG_PARTID_I, partid_i); + regval |= FIELD_PREP(MPAM_SYSREG_PMG_D, pmg_d); + regval |= FIELD_PREP(MPAM_SYSREG_PMG_I, pmg_i); + + WRITE_ONCE(task_thread_info(tsk)->mpam_partid_pmg, regval); +#endif +} + static inline u64 mpam_get_regval(struct task_struct *tsk) { #ifdef CONFIG_ARM64_MPAM @@ -99,6 +129,20 @@ static inline u64 mpam_get_regval(struct task_struct *tsk) #endif } +static inline void resctrl_arch_set_rmid(struct task_struct *tsk, u32 rmid) +{ +#ifdef CONFIG_ARM64_MPAM + u64 regval = mpam_get_regval(tsk); + + regval &= ~MPAM_SYSREG_PMG_D; + regval &= ~MPAM_SYSREG_PMG_I; + regval |= FIELD_PREP(MPAM_SYSREG_PMG_D, rmid); + regval |= FIELD_PREP(MPAM_SYSREG_PMG_I, rmid); + + WRITE_ONCE(task_thread_info(tsk)->mpam_partid_pmg, regval); +#endif +} + static inline void mpam_thread_switch(struct task_struct *tsk) { u64 oldregval; diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index 64372f5bf380..225565ec4b04 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -92,6 +93,63 @@ u32 resctrl_arch_get_num_closid(struct rdt_resource *ignored) return min((u32)mpam_partid_max + 1, (u32)RESCTRL_MAX_CLOSID); } +void resctrl_arch_sched_in(struct task_struct *tsk) +{ + lockdep_assert_preemption_disabled(); + + mpam_thread_switch(tsk); +} + +void resctrl_arch_set_cpu_default_closid_rmid(int cpu, u32 closid, u32 pmg) +{ + if (WARN_ON_ONCE(closid > U16_MAX) || WARN_ON_ONCE(pmg > U8_MAX)) + return; + + if (!cdp_enabled) { + mpam_set_cpu_defaults(cpu, closid, closid, pmg, pmg); + } else { + /* + * When CDP is enabled, resctrl halves the closid range and we + * use odd/even partid for one closid. + */ + u32 partid_d = resctrl_get_config_index(closid, CDP_DATA); + u32 partid_i = resctrl_get_config_index(closid, CDP_CODE); + + mpam_set_cpu_defaults(cpu, partid_d, partid_i, pmg, pmg); + } +} + +void resctrl_arch_sync_cpu_defaults(void *info) +{ + struct resctrl_cpu_sync *r = info; + + lockdep_assert_preemption_disabled(); + + if (r) { + resctrl_arch_set_cpu_default_closid_rmid(smp_processor_id(), + r->closid, r->rmid); + } + + resctrl_arch_sched_in(current); +} + +void resctrl_arch_set_closid_rmid(struct task_struct *tsk, u32 closid, u32 rmid) +{ + + + if (WARN_ON_ONCE(closid > U16_MAX) || WARN_ON_ONCE(rmid > U8_MAX)) + return; + + if (!cdp_enabled) { + mpam_set_task_partid_pmg(tsk, closid, closid, rmid, rmid); + } else { + u32 partid_d = resctrl_get_config_index(closid, CDP_DATA); + u32 partid_i = resctrl_get_config_index(closid, CDP_CODE); + + mpam_set_task_partid_pmg(tsk, partid_d, partid_i, rmid, rmid); + } +} + bool resctrl_arch_match_closid(struct task_struct *tsk, u32 closid) { u64 regval = mpam_get_regval(tsk); diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index e3921b0ab836..95a960b6f9d7 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -16,6 +16,8 @@ */ extern u64 mpam_resctrl_default_group; +#include + struct mpam_msc; enum mpam_msc_iface { @@ -66,4 +68,9 @@ bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level ignored); int resctrl_arch_set_cdp_enabled(enum resctrl_res_level ignored, bool enable); bool resctrl_arch_match_closid(struct task_struct *tsk, u32 closid); bool resctrl_arch_match_rmid(struct task_struct *tsk, u32 closid, u32 rmid); +void resctrl_arch_set_cpu_default_closid(int cpu, u32 closid); +void resctrl_arch_set_closid_rmid(struct task_struct *tsk, u32 closid, u32 rmid); +void resctrl_arch_set_cpu_default_closid_rmid(int cpu, u32 closid, u32 pmg); +void resctrl_arch_sched_in(struct task_struct *tsk); + #endif /* __LINUX_ARM_MPAM_H */ -- Gitee From dcd6fe6c730cecbc077ee3892094353fe32429ab Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Jul 2021 18:45:14 +0100 Subject: [PATCH 0616/2138] arm_mpam: resctrl: Add rmid index helpers ANBZ: #8686 commit f018052a6be7a89ea54c955f696027ecd885b248 morse-linux. Because MPAM's pmg aren't identical to RDT's rmid, resctrl handles some datastructrues by index. This allows x86 to map indexes to RMID, and MPAM to map them to partid-and-pmg. Add the helpers to do this. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] [ use WARN_ON_ONCE() instead of BUG_ON() to check the validity of closid_shift ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/platform/mpam/mpam_resctrl.c | 30 ++++++++++++++++++++++++++++ include/linux/arm_mpam.h | 3 +++ 2 files changed, 33 insertions(+) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index 225565ec4b04..1efefbf1313b 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -93,6 +93,36 @@ u32 resctrl_arch_get_num_closid(struct rdt_resource *ignored) return min((u32)mpam_partid_max + 1, (u32)RESCTRL_MAX_CLOSID); } +u32 resctrl_arch_system_num_rmid_idx(void) +{ + u8 closid_shift = fls(mpam_pmg_max); + u32 num_partid = resctrl_arch_get_num_closid(NULL); + + return num_partid << closid_shift; +} + +u32 resctrl_arch_rmid_idx_encode(u32 closid, u32 rmid) +{ + u8 closid_shift = fls(mpam_pmg_max); + + if (WARN_ON_ONCE(closid_shift > 8)) + closid_shift = 8; + + return (closid << closid_shift) | rmid; +} + +void resctrl_arch_rmid_idx_decode(u32 idx, u32 *closid, u32 *rmid) +{ + u8 closid_shift = fls(mpam_pmg_max); + u32 pmg_mask = ~(~0 << closid_shift); + + if (WARN_ON_ONCE(closid_shift > 8)) + closid_shift = 8; + + *closid = idx >> closid_shift; + *rmid = idx & pmg_mask; +} + void resctrl_arch_sched_in(struct task_struct *tsk) { lockdep_assert_preemption_disabled(); diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index 95a960b6f9d7..d41891df56d4 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -72,5 +72,8 @@ void resctrl_arch_set_cpu_default_closid(int cpu, u32 closid); void resctrl_arch_set_closid_rmid(struct task_struct *tsk, u32 closid, u32 rmid); void resctrl_arch_set_cpu_default_closid_rmid(int cpu, u32 closid, u32 pmg); void resctrl_arch_sched_in(struct task_struct *tsk); +u32 resctrl_arch_rmid_idx_encode(u32 closid, u32 rmid); +void resctrl_arch_rmid_idx_decode(u32 idx, u32 *closid, u32 *rmid); +u32 resctrl_arch_system_num_rmid_idx(void); #endif /* __LINUX_ARM_MPAM_H */ -- Gitee From 698e0ebe50528298343714a150e0002926540a02 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 27 Jul 2021 18:09:13 +0100 Subject: [PATCH 0617/2138] untested: arm_mpam: resctrl: Add support for MB resource ANBZ: #8686 commit 62e0c4c1568edc593aeb130a3f83e628999df009 morse-linux. resctrl supports 'MB', as a percentage throttling of traffic somewhere after the L3. This is the control that mba_sc uses, so ideally the class chosen should be as close as possible to the counters used for mba_local. MB's percentage control can be backed either with the fixed point fraction MBW_MAX or the bandwidth portion bitmap. Add helper to convert to/from percentages. One problem here is the value written is not the same as the value read back. This is deliberatly made visible to user-space. Another is the MBW_MAX fixed point fraction can't represent 100%. This is also exposed to user-space, as otherwise the values for a single-bit system is 100%, 0%, instead of 50%, 0%. The way CDP is emulated means MB controls need programming twice by the resctrl glue, as the bandwidth controls can be applied independently for data or instruction-fetch. This isn't how x86 behaves, and neither user-space nor resctrl support it. CC: Amit Singh Tomar Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/platform/mpam/mpam_resctrl.c | 219 ++++++++++++++++++++++++++- 1 file changed, 216 insertions(+), 3 deletions(-) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index 1efefbf1313b..b4be7f81e79b 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -53,9 +53,20 @@ bool resctrl_arch_is_mbm_local_enabled(void) return mbm_local_class; } -bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level ignored) +bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level rid) { - return cdp_enabled; + switch (rid) { + case RDT_RESOURCE_L2: + case RDT_RESOURCE_L3: + return cdp_enabled; + case RDT_RESOURCE_MBA: + default: + /* + * x86's MBA control doesn't support CDP, so user-space doesn't + * expect it. + */ + return false; + } } int resctrl_arch_set_cdp_enabled(enum resctrl_res_level ignored, bool enable) @@ -83,6 +94,11 @@ int resctrl_arch_set_cdp_enabled(enum resctrl_res_level ignored, bool enable) return 0; } +static bool mpam_resctrl_hide_cdp(enum resctrl_res_level rid) +{ + return cdp_enabled && !resctrl_arch_get_cdp_enabled(rid); +} + /* * MSC may raise an error interrupt if it sees an out or range partid/pmg, * and go on to truncate the value. Regardless of what the hardware supports, @@ -250,6 +266,102 @@ bool resctrl_arch_is_llc_occupancy_enabled(void) return cache_has_usable_csu(mpam_resctrl_exports[RDT_RESOURCE_L3].class); } +static bool mba_class_use_mbw_part(struct mpam_props *cprops) +{ + /* TODO: Scaling is not yet supported */ + return (mpam_has_feature(mpam_feat_mbw_part, cprops) && + cprops->mbw_pbm_bits < MAX_MBA_BW); +} + +static bool class_has_usable_mba(struct mpam_props *cprops) +{ + if (mba_class_use_mbw_part(cprops) || + mpam_has_feature(mpam_feat_mbw_max, cprops)) + return true; + + return false; +} + +/* + * Calculate the percentage change from each implemented bit in the control + * This can return 0 when BWA_WD is greater than 6. (100 / (1<<7) == 0) + */ +static u32 get_mba_granularity(struct mpam_props *cprops) +{ + if (mba_class_use_mbw_part(cprops)) { + return MAX_MBA_BW / cprops->mbw_pbm_bits; + } else if (mpam_has_feature(mpam_feat_mbw_max, cprops)) { + /* + * bwa_wd is the number of bits implemented in the 0.xxx + * fixed point fraction. 1 bit is 50%, 2 is 25% etc. + */ + return MAX_MBA_BW / (cprops->bwa_wd + 1); + } + + return 0; +} + +static u32 mbw_pbm_to_percent(unsigned long mbw_pbm, struct mpam_props *cprops) +{ + u32 bit, result = 0, granularity = get_mba_granularity(cprops); + + for_each_set_bit(bit, &mbw_pbm, cprops->mbw_pbm_bits % 32) { + result += granularity; + } + + return result; +} + +static u32 mbw_max_to_percent(u16 mbw_max, struct mpam_props *cprops) +{ + u8 bit; + u32 divisor = 2, value = 0; + + for (bit = 15; bit; bit--) { + if (mbw_max & BIT(bit)) + value += MAX_MBA_BW / divisor; + divisor <<= 1; + } + + return value; +} + +static u32 percent_to_mbw_pbm(u8 pc, struct mpam_props *cprops) +{ + u32 granularity = get_mba_granularity(cprops); + u8 num_bits = pc / granularity; + + if (!num_bits) + return 0; + + /* TODO: pick bits at random to avoid contention */ + return (1 << num_bits) - 1; +} + +static u16 percent_to_mbw_max(u8 pc, struct mpam_props *cprops) +{ + u8 bit; + u32 divisor = 2, value = 0; + + if (WARN_ON_ONCE(cprops->bwa_wd > 15)) + return MAX_MBA_BW; + + for (bit = 15; bit; bit--) { + if (pc >= MAX_MBA_BW / divisor) { + pc -= MAX_MBA_BW / divisor; + value |= BIT(bit); + } + divisor <<= 1; + + if (!pc || !(MAX_MBA_BW / divisor)) + break; + } + + value &= GENMASK(15, 15 - cprops->bwa_wd); + + return value; +} + /* Test whether we can export MPAM_CLASS_CACHE:{2,3}? */ static void mpam_resctrl_pick_caches(void) { @@ -296,6 +408,44 @@ static void mpam_resctrl_pick_caches(void) srcu_read_unlock(&mpam_srcu, idx); } +static void mpam_resctrl_pick_mba(void) +{ + struct mpam_class *class, *candidate_class = NULL; + struct mpam_resctrl_res *res; + int idx; + + lockdep_assert_cpus_held(); + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(class, &mpam_classes, classes_list) { + struct mpam_props *cprops = &class->props; + + if (class->level < 3) + continue; + + if (!class_has_usable_mba(cprops)) + continue; + + if (!cpumask_equal(&class->affinity, cpu_possible_mask)) + continue; + + /* + * mba_sc reads the mbm_local counter, and waggles the MBA controls. + * mbm_local is implicitly part of the L3, pick a resouce to be MBA + * that as close as possible to the L3. + */ + if (!candidate_class || class->level < candidate_class->level) + candidate_class = class; + } + srcu_read_unlock(&mpam_srcu, idx); + + if (candidate_class) { + res = &mpam_resctrl_exports[RDT_RESOURCE_MBA]; + res->class = candidate_class; + res->resctrl_res.name = "MB"; + } +} + static int mpam_resctrl_resource_init(struct mpam_resctrl_res *res) { struct mpam_class *class = res->class; @@ -332,6 +482,27 @@ static int mpam_resctrl_resource_init(struct mpam_resctrl_res *res) if (class->level == 3 && cache_has_usable_csu(class)) r->mon_capable = true; + } else if (res->resctrl_res.rid == RDT_RESOURCE_MBA) { + struct mpam_props *cprops = &class->props; + + /* TODO: kill these properties off as they are derivatives */ + r->format_str = "%d=%0*u"; + r->fflags = RFTYPE_RES_MB; + r->default_ctrl = MAX_MBA_BW; + r->data_width = 3; + + r->membw.delay_linear = true; + r->membw.throttle_mode = THREAD_THROTTLE_UNDEFINED; + r->membw.bw_gran = get_mba_granularity(cprops); + + /* Round up to at least 1% */ + if (!r->membw.bw_gran) + r->membw.bw_gran = 1; + + if (class_has_usable_mba(cprops)) { + r->alloc_capable = true; + exposed_alloc_capable = true; + } } if (r->mon_capable) { @@ -366,6 +537,7 @@ int mpam_resctrl_setup(void) } mpam_resctrl_pick_caches(); + mpam_resctrl_pick_mba(); for (i = 0; i < RDT_NUM_RESOURCES; i++) { res = &mpam_resctrl_exports[i]; @@ -424,6 +596,15 @@ u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, case RDT_RESOURCE_L3: configured_by = mpam_feat_cpor_part; break; + case RDT_RESOURCE_MBA: + if (mba_class_use_mbw_part(cprops)) { + configured_by = mpam_feat_mbw_part; + break; + } else if (mpam_has_feature(mpam_feat_mbw_max, cprops)) { + configured_by = mpam_feat_mbw_max; + break; + } + fallthrough; default: return -EINVAL; } @@ -436,6 +617,11 @@ u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, case mpam_feat_cpor_part: /* TODO: Scaling is not yet supported */ return cfg->cpbm; + case mpam_feat_mbw_part: + /* TODO: Scaling is not yet supported */ + return mbw_pbm_to_percent(cfg->mbw_pbm, cprops); + case mpam_feat_mbw_max: + return mbw_max_to_percent(cfg->mbw_max, cprops); default: return -EINVAL; } @@ -444,6 +630,7 @@ u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d, u32 closid, enum resctrl_conf_type t, u32 cfg_val) { + int err; u32 partid; struct mpam_config cfg; struct mpam_props *cprops; @@ -472,11 +659,37 @@ int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d, cfg.cpbm = cfg_val; mpam_set_feature(mpam_feat_cpor_part, &cfg); break; + case RDT_RESOURCE_MBA: + if (mba_class_use_mbw_part(cprops)) { + cfg.mbw_pbm = percent_to_mbw_pbm(cfg_val, cprops); + mpam_set_feature(mpam_feat_mbw_part, &cfg); + break; + } else if (mpam_has_feature(mpam_feat_mbw_max, cprops)) { + cfg.mbw_max = percent_to_mbw_max(cfg_val, cprops); + mpam_set_feature(mpam_feat_mbw_max, &cfg); + break; + } + fallthrough; default: return -EINVAL; } - return mpam_apply_config(dom->comp, partid, &cfg); + /* + * When CDP is enabled, but the resource doesn't support it, we need to + * apply the same configuration to the other partid. + */ + if (mpam_resctrl_hide_cdp(r->rid)) { + partid = resctrl_get_config_index(closid, CDP_CODE); + err = mpam_apply_config(dom->comp, partid, &cfg); + if (err) + return err; + + partid = resctrl_get_config_index(closid, CDP_DATA); + return mpam_apply_config(dom->comp, partid, &cfg); + + } else { + return mpam_apply_config(dom->comp, partid, &cfg); + } } /* TODO: this is IPI heavy */ -- Gitee From a5c55d5578b9996b94c70a9a2cd1552457b25883 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 7 Sep 2021 17:21:42 +0100 Subject: [PATCH 0618/2138] untested: arm_mpam: resctrl: Add support for mbm counters ANBZ: #8686 commit a0ab3a6c26002494e8532b9e01859d36e89ca585 morse-linux. resctrl has two types of counters, NUMA-local and global. MPAM has only bandwidth counters, but the position of the MSC may mean it counts NUMA-local, or global traffic. But the topology information is not available. Apply a hueristic: the L2 or L3 supports bandwidth monitors, these are probably NUMA-local. If the memory controller supports bandwidth monitors, they are probably global. This selection is made from mpam_resctrl_resource_init(), which implies resources that can be used for resctrl controls exist and also have counters. This would be a problem on a platform that only supports monitoring. TODO: Add an extra pass of all classes to find the classes to use as bandwidth counters. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/platform/mpam/mpam_resctrl.c | 52 +++++++++++++++++++++++++++- include/linux/arm_mpam.h | 6 +--- 2 files changed, 52 insertions(+), 6 deletions(-) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index b4be7f81e79b..4786be5dc4b4 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -31,6 +31,7 @@ static struct mpam_resctrl_res mpam_resctrl_exports[RDT_NUM_RESOURCES]; static bool exposed_alloc_capable; static bool exposed_mon_capable; static struct mpam_class *mbm_local_class; +static struct mpam_class *mbm_total_class; /* * MPAM emulates CDP by setting different PARTID in the I/D fields of MPAM1_EL1. @@ -53,6 +54,11 @@ bool resctrl_arch_is_mbm_local_enabled(void) return mbm_local_class; } +bool resctrl_arch_is_mbm_total_enabled(void) +{ + return mbm_total_class; +} + bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level rid) { switch (rid) { @@ -266,6 +272,24 @@ bool resctrl_arch_is_llc_occupancy_enabled(void) return cache_has_usable_csu(mpam_resctrl_exports[RDT_RESOURCE_L3].class); } +static bool class_has_usable_mbwu(struct mpam_class *class) +{ + struct mpam_props *cprops = &class->props; + + if (!mpam_has_feature(mpam_feat_msmon_mbwu, cprops)) + return false; + + /* + * resctrl expects the bandwidth counters to be free running, + * which means we need as many monitors as resctrl has + * control/monitor groups. + */ + if (cprops->num_mbwu_mon < resctrl_arch_system_num_rmid_idx()) + return false; + + return (mpam_partid_max > 1) || (mpam_pmg_max != 0); +} + static bool mba_class_use_mbw_part(struct mpam_props *cprops) { /* TODO: Scaling is not yet supported */ @@ -450,10 +474,13 @@ static int mpam_resctrl_resource_init(struct mpam_resctrl_res *res) { struct mpam_class *class = res->class; struct rdt_resource *r = &res->resctrl_res; + bool has_mbwu = class_has_usable_mbwu(class); /* Is this one of the two well-known caches? */ if (res->resctrl_res.rid == RDT_RESOURCE_L2 || res->resctrl_res.rid == RDT_RESOURCE_L3) { + bool has_csu = cache_has_usable_csu(class); + /* TODO: Scaling is not yet supported */ r->cache.cbm_len = class->props.cpbm_wd; r->cache.arch_has_sparse_bitmasks = true; @@ -480,8 +507,25 @@ static int mpam_resctrl_resource_init(struct mpam_resctrl_res *res) exposed_alloc_capable = true; } - if (class->level == 3 && cache_has_usable_csu(class)) + /* + * MBWU counters may be 'local' or 'total' depending on where + * they are in the topology. Counters on caches are assumed to + * be local. If it's on the memory controller, its assumed to + * be global. + */ + if (has_mbwu && class->level >= 3) { + mbm_local_class = class; r->mon_capable = true; + } + + /* + * CSU counters only make sense on a cache. The file is called + * llc_occupancy, but its expected to the on the L3. + */ + if (has_csu && class->type == MPAM_CLASS_CACHE && + class->level == 3) { + r->mon_capable = true; + } } else if (res->resctrl_res.rid == RDT_RESOURCE_MBA) { struct mpam_props *cprops = &class->props; @@ -503,6 +547,11 @@ static int mpam_resctrl_resource_init(struct mpam_resctrl_res *res) r->alloc_capable = true; exposed_alloc_capable = true; } + + if (has_mbwu && class->type == MPAM_CLASS_MEMORY) { + mbm_total_class = class; + r->mon_capable = true; + } } if (r->mon_capable) { @@ -538,6 +587,7 @@ int mpam_resctrl_setup(void) mpam_resctrl_pick_caches(); mpam_resctrl_pick_mba(); + /* TODO: mpam_resctrl_pick_counters(); */ for (i = 0; i < RDT_NUM_RESOURCES; i++) { res = &mpam_resctrl_exports[i]; diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index d41891df56d4..49416f22244a 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -55,11 +55,7 @@ bool resctrl_arch_alloc_capable(void); bool resctrl_arch_mon_capable(void); bool resctrl_arch_is_llc_occupancy_enabled(void); bool resctrl_arch_is_mbm_local_enabled(void); - -static inline bool resctrl_arch_is_mbm_total_enabled(void) -{ - return false; -} +bool resctrl_arch_is_mbm_total_enabled(void); /* reset cached configurations, then all devices */ void resctrl_arch_reset_resources(void); -- Gitee From 8712b50d2b44cc7851e68d654ad6c76d28356a4d Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 25 Jun 2021 13:29:39 +0100 Subject: [PATCH 0619/2138] arm_mpam: resctrl: Allow resctrl to allocate monitors ANBZ: #8686 commit 0e131acd37ebe32adab83e1d2200dd6205b7683f morse-linux. When resctrl wants to read a domain's 'QOS_L3_OCCUP', it needs to allocate a monitor on the corresponding resource. Monitors are allocated by class instead of component. Add helpers to do this. The MBM events depend on having their monitors allocated at init time so that they can be left running. The value USE_RMID_IDX is out of range for a monitor, and is used to indicate this behaviour. resctrl_arch_mon_ctx_alloc() is implemented to have a no_wait version and a waitqueue for callers that sleep. The no_wait version will later become an interface for the resctrl_pmu to use. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/platform/mpam/mpam_internal.h | 3 ++ drivers/platform/mpam/mpam_resctrl.c | 72 +++++++++++++++++++++++++++ include/linux/arm_mpam.h | 4 ++ 3 files changed, 79 insertions(+) diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index 36c49cfb9271..8d4bcc1f5642 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -17,6 +17,9 @@ DECLARE_STATIC_KEY_FALSE(mpam_enabled); +/* Value to indicate the allocated monitor is derived from the RMID index. */ +#define USE_RMID_IDX (U16_MAX + 1) + static inline bool mpam_is_enabled(void) { return static_branch_likely(&mpam_enabled); diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index 4786be5dc4b4..8cb4707a4117 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -22,6 +22,8 @@ u64 mpam_resctrl_default_group; +DECLARE_WAIT_QUEUE_HEAD(resctrl_mon_ctx_waiters); + /* * The classes we've picked to map to resctrl resources. * Class pointer may be NULL. @@ -39,6 +41,10 @@ static struct mpam_class *mbm_total_class; */ static bool cdp_enabled; +/* A dummy mon context to use when the monitors were allocated up front */ +u32 __mon_is_rmid_idx = USE_RMID_IDX; +void *mon_is_rmid_idx = &__mon_is_rmid_idx; + bool resctrl_arch_alloc_capable(void) { return exposed_alloc_capable; @@ -234,6 +240,72 @@ struct rdt_resource *resctrl_arch_get_resource(enum resctrl_res_level l) return &mpam_resctrl_exports[l].resctrl_res; } +static void *resctrl_arch_mon_ctx_alloc_no_wait(struct rdt_resource *r, + int evtid) +{ + struct mpam_resctrl_res *res; + u32 *ret = kmalloc(sizeof(*ret), GFP_KERNEL); + + if (!ret) + return ERR_PTR(-ENOMEM); + + switch (evtid) { + case QOS_L3_OCCUP_EVENT_ID: + res = container_of(r, struct mpam_resctrl_res, resctrl_res); + + *ret = mpam_alloc_csu_mon(res->class); + return ret; + case QOS_L3_MBM_LOCAL_EVENT_ID: + case QOS_L3_MBM_TOTAL_EVENT_ID: + return mon_is_rmid_idx; + } + + return ERR_PTR(-EOPNOTSUPP); +} + +void *resctrl_arch_mon_ctx_alloc(struct rdt_resource *r, int evtid) +{ + DEFINE_WAIT(wait); + void *ret; + + might_sleep(); + + do { + prepare_to_wait(&resctrl_mon_ctx_waiters, &wait, + TASK_INTERRUPTIBLE); + ret = resctrl_arch_mon_ctx_alloc_no_wait(r, evtid); + if (PTR_ERR(ret) == -ENOSPC) + schedule(); + } while (PTR_ERR(ret) == -ENOSPC && !signal_pending(current)); + finish_wait(&resctrl_mon_ctx_waiters, &wait); + + return ret; +} + +void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid, + void *arch_mon_ctx) +{ + struct mpam_resctrl_res *res; + u32 mon = *(u32 *)arch_mon_ctx; + + if (mon == USE_RMID_IDX) + return; + kfree(arch_mon_ctx); + arch_mon_ctx = NULL; + + res = container_of(r, struct mpam_resctrl_res, resctrl_res); + + switch (evtid) { + case QOS_L3_OCCUP_EVENT_ID: + mpam_free_csu_mon(res->class, mon); + wake_up(&resctrl_mon_ctx_waiters); + return; + case QOS_L3_MBM_TOTAL_EVENT_ID: + case QOS_L3_MBM_LOCAL_EVENT_ID: + return; + } +} + static bool cache_has_usable_cpor(struct mpam_class *class) { struct mpam_props *cprops = &class->props; diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index 49416f22244a..88000eb59c6f 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -72,4 +72,8 @@ u32 resctrl_arch_rmid_idx_encode(u32 closid, u32 rmid); void resctrl_arch_rmid_idx_decode(u32 idx, u32 *closid, u32 *rmid); u32 resctrl_arch_system_num_rmid_idx(void); +struct rdt_resource; +void *resctrl_arch_mon_ctx_alloc(struct rdt_resource *r, int evtid); +void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid, void *ctx); + #endif /* __LINUX_ARM_MPAM_H */ -- Gitee From a004cf51545c740e80ac207eb0b106ca53a2cfcb Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 25 Jun 2021 16:36:58 +0100 Subject: [PATCH 0620/2138] arm_mpam: resctrl: Add resctrl_arch_rmid_read() and resctrl_arch_reset_rmid() ANBZ: #8686 commit f77ded5f6670639c76c0941dc6943d9e1b261b95 morse-linux. resctrl uses resctrl_arch_rmid_read() to read counters. CDP emulation means the counter may need reading twice to get both the I and D side allocations. The same goes for reset. Add the rounding helper for checking monitor values while we're here. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/platform/mpam/mpam_resctrl.c | 79 ++++++++++++++++++++++++++++ include/linux/arm_mpam.h | 4 ++ 2 files changed, 83 insertions(+) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index 8cb4707a4117..730ea6c6dffc 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -306,6 +306,85 @@ void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid, } } +int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, + u32 closid, u32 rmid, enum resctrl_event_id eventid, + u64 *val, void *arch_mon_ctx) +{ + int err; + u64 cdp_val; + struct mon_cfg cfg; + struct mpam_resctrl_dom *dom; + u32 mon = *(u32 *)arch_mon_ctx; + enum mpam_device_features type; + + resctrl_arch_rmid_read_context_check(); + + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + + switch (eventid) { + case QOS_L3_OCCUP_EVENT_ID: + type = mpam_feat_msmon_csu; + break; + case QOS_L3_MBM_LOCAL_EVENT_ID: + case QOS_L3_MBM_TOTAL_EVENT_ID: + type = mpam_feat_msmon_mbwu; + break; + default: + return -EINVAL; + } + + cfg.mon = mon; + if (cfg.mon == USE_RMID_IDX) + cfg.mon = resctrl_arch_rmid_idx_encode(closid, rmid); + + cfg.match_pmg = true; + cfg.pmg = rmid; + + if (cdp_enabled) { + cfg.partid = closid << 1; + err = mpam_msmon_read(dom->comp, &cfg, type, val); + if (err) + return err; + + cfg.partid += 1; + err = mpam_msmon_read(dom->comp, &cfg, type, &cdp_val); + if (!err) + *val += cdp_val; + } else { + cfg.partid = closid; + err = mpam_msmon_read(dom->comp, &cfg, type, val); + } + + return err; +} + +void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d, + u32 closid, u32 rmid, enum resctrl_event_id eventid) +{ + struct mon_cfg cfg; + struct mpam_resctrl_dom *dom; + + if (eventid != QOS_L3_MBM_LOCAL_EVENT_ID) + return; + + cfg.mon = resctrl_arch_rmid_idx_encode(closid, rmid); + cfg.match_pmg = true; + cfg.pmg = rmid; + + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + + if (cdp_enabled) { + cfg.partid = closid << 1; + mpam_msmon_reset_mbwu(dom->comp, &cfg); + + cfg.partid += 1; + mpam_msmon_reset_mbwu(dom->comp, &cfg); + } else { + cfg.partid = closid; + mpam_msmon_reset_mbwu(dom->comp, &cfg); + } +} + static bool cache_has_usable_cpor(struct mpam_class *class) { struct mpam_props *cprops = &class->props; diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index 88000eb59c6f..abadaba0085f 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -50,6 +50,10 @@ int mpam_register_requestor(u16 partid_max, u8 pmg_max); int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx, enum mpam_class_types type, u8 class_id, int component_id); +static inline unsigned int resctrl_arch_round_mon_val(unsigned int val) +{ + return val; +} bool resctrl_arch_alloc_capable(void); bool resctrl_arch_mon_capable(void); -- Gitee From 16b4ff89981a489bfa39656efca77587584c5b69 Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 9 Mar 2023 14:01:42 +0000 Subject: [PATCH 0621/2138] untested: arm_mpam: resctrl: Allow monitors to be configured ANBZ: #8686 commit db0ac51f60675b6c4a54ccd24fa7198ec321c56d morse-linux. MPAM MSCs may have support for filtering reads or writes when monitoring traffic. Resctrl has a configuration bitmap for which kind of accesses should be monitored. Bridge the gap where possible. MPAM only has a read/write bit, so not all the combinations can be supported. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/platform/mpam/mpam_devices.c | 26 +++++++++++ drivers/platform/mpam/mpam_internal.h | 9 ++++ drivers/platform/mpam/mpam_resctrl.c | 62 +++++++++++++++++++++++++++ 3 files changed, 97 insertions(+) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 5ebb944ad9fc..04e1e98e17cb 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -1076,6 +1076,32 @@ int mpam_msmon_read(struct mpam_component *comp, struct mon_cfg *ctx, return err; } +void mpam_msmon_reset_all_mbwu(struct mpam_component *comp) +{ + int idx, i; + unsigned long flags; + struct mpam_msc *msc; + struct mpam_msc_ris *ris; + + if (!mpam_is_enabled()) + return; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(ris, &comp->ris, comp_list) { + if (!mpam_has_feature(mpam_feat_msmon_mbwu, &ris->props)) + continue; + + msc = ris->msc; + spin_lock_irqsave(&msc->mon_sel_lock, flags); + for (i = 0; i < ris->props.num_mbwu_mon; i++) { + ris->mbwu_state[i].correction = 0; + ris->mbwu_state[i].reset_on_next_read = true; + } + spin_unlock_irqrestore(&msc->mon_sel_lock, flags); + } + srcu_read_unlock(&mpam_srcu, idx); +} + void mpam_msmon_reset_mbwu(struct mpam_component *comp, struct mon_cfg *ctx) { int idx; diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index 8d4bcc1f5642..50d738d83047 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -20,6 +20,12 @@ DECLARE_STATIC_KEY_FALSE(mpam_enabled); /* Value to indicate the allocated monitor is derived from the RMID index. */ #define USE_RMID_IDX (U16_MAX + 1) +/* + * Only these event configuration bits are supported. MPAM can't know if + * data is being written back, these will show up as a write. + */ +#define MPAM_RESTRL_EVT_CONFIG_VALID (READS_TO_LOCAL_MEM | NON_TEMP_WRITE_TO_LOCAL_MEM) + static inline bool mpam_is_enabled(void) { return static_branch_likely(&mpam_enabled); @@ -240,6 +246,8 @@ struct mpam_msc_ris { struct mpam_resctrl_dom { struct mpam_component *comp; struct rdt_domain resctrl_dom; + + u32 mbm_local_evt_cfg; }; struct mpam_resctrl_res { @@ -299,6 +307,7 @@ int mpam_apply_config(struct mpam_component *comp, u16 partid, int mpam_msmon_read(struct mpam_component *comp, struct mon_cfg *ctx, enum mpam_device_features, u64 *val); void mpam_msmon_reset_mbwu(struct mpam_component *comp, struct mon_cfg *ctx); +void mpam_msmon_reset_all_mbwu(struct mpam_component *comp); int mpam_resctrl_online_cpu(unsigned int cpu); int mpam_resctrl_offline_cpu(unsigned int cpu); diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index 730ea6c6dffc..328837867d60 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -306,6 +306,18 @@ void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid, } } +static enum mon_filter_options resctrl_evt_config_to_mpam(u32 local_evt_cfg) +{ + switch (local_evt_cfg) { + case READS_TO_LOCAL_MEM: + return COUNT_READ; + case NON_TEMP_WRITE_TO_LOCAL_MEM: + return COUNT_WRITE; + default: + return COUNT_BOTH; + } +} + int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, u32 closid, u32 rmid, enum resctrl_event_id eventid, u64 *val, void *arch_mon_ctx) @@ -339,6 +351,7 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, cfg.match_pmg = true; cfg.pmg = rmid; + cfg.opts = resctrl_evt_config_to_mpam(dom->mbm_local_evt_cfg); if (cdp_enabled) { cfg.partid = closid << 1; @@ -621,6 +634,54 @@ static void mpam_resctrl_pick_mba(void) } } +bool resctrl_arch_is_evt_configurable(enum resctrl_event_id evt) +{ + struct mpam_props *cprops; + + switch (evt) { + case QOS_L3_MBM_LOCAL_EVENT_ID: + if (!mbm_local_class) + return false; + cprops = &mbm_local_class->props; + + return mpam_has_feature(mpam_feat_msmon_mbwu_rwbw, cprops); + default: + return false; + } +} + +void resctrl_arch_mon_event_config_read(void *info) +{ + struct mpam_resctrl_dom *dom; + struct resctrl_mon_config_info *mon_info = info; + + dom = container_of(mon_info->d, struct mpam_resctrl_dom, resctrl_dom); + mon_info->mon_config = dom->mbm_local_evt_cfg & MAX_EVT_CONFIG_BITS; +} + +void resctrl_arch_mon_event_config_write(void *info) +{ + struct mpam_resctrl_dom *dom; + struct resctrl_mon_config_info *mon_info = info; + + if (mon_info->mon_config & ~MPAM_RESTRL_EVT_CONFIG_VALID) { + mon_info->err = -EOPNOTSUPP; + return; + } + + dom = container_of(mon_info->d, struct mpam_resctrl_dom, resctrl_dom); + dom->mbm_local_evt_cfg = mon_info->mon_config & MPAM_RESTRL_EVT_CONFIG_VALID; +} + +void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_domain *d) +{ + struct mpam_resctrl_dom *dom; + + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + dom->mbm_local_evt_cfg = MPAM_RESTRL_EVT_CONFIG_VALID; + mpam_msmon_reset_all_mbwu(dom->comp); +} + static int mpam_resctrl_resource_init(struct mpam_resctrl_res *res) { struct mpam_class *class = res->class; @@ -973,6 +1034,7 @@ mpam_resctrl_alloc_domain(unsigned int cpu, struct mpam_resctrl_res *res) dom->comp = comp; INIT_LIST_HEAD(&dom->resctrl_dom.list); dom->resctrl_dom.id = comp->comp_id; + dom->mbm_local_evt_cfg = MPAM_RESTRL_EVT_CONFIG_VALID; cpumask_set_cpu(cpu, &dom->resctrl_dom.cpu_mask); /* TODO: this list should be sorted */ -- Gitee From d6e83c9e78e0294c7163acfec2c78ea4db7253b5 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Jul 2021 18:48:23 +0100 Subject: [PATCH 0622/2138] arm_mpam: resctrl: Add empty definitions for pseudo lock ANBZ: #8686 commit 12329dd3015ff84e06b3044cfb506a555dba7b29 morse-linux. Pseudo lock isn't supported on arm64. Add empty definitions of the functions arm64 doesn't implement. Because the Kconfig option is not selected, none of these will be called. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- include/linux/arm_mpam.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index abadaba0085f..8897309c163d 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -80,4 +80,11 @@ struct rdt_resource; void *resctrl_arch_mon_ctx_alloc(struct rdt_resource *r, int evtid); void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid, void *ctx); +/* Pseudo lock is not supported by MPAM */ +static inline int resctrl_arch_pseudo_lock_fn(void *_plr) { return 0; } +static inline int resctrl_arch_measure_l2_residency(void *_plr) { return 0; } +static inline int resctrl_arch_measure_l3_residency(void *_plr) { return 0; } +static inline int resctrl_arch_measure_cycles_lat_fn(void *_plr) { return 0; } +static inline u64 resctrl_arch_get_prefetch_disable_bits(void) { return 0; } + #endif /* __LINUX_ARM_MPAM_H */ -- Gitee From 264211854011d1107d627b6d4f702dc0fa1580f7 Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 14 Jul 2021 15:34:16 +0100 Subject: [PATCH 0623/2138] arm_mpam: resctrl: Add empty definitions for fine-grained enables ANBZ: #8686 commit 0d96e8cc81e11d9ae1ce9392c5ff59e9db82339e morse-linux. resctrl has individual hooks to separately enable and disable the closid/partid and rmid/pmg context switching code. For MPAM this is all the same thing, as the value in struct task_struct is used to cache the value that should be written to hardware. arm64's context switching code is enabled once MPAM is usable, but doesn't touch the hardware unless the value has changed. Resctrl doesn't need to ask. Add empty definitions for these hoooks. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- include/linux/arm_mpam.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index 8897309c163d..f6b92060b811 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -87,4 +87,13 @@ static inline int resctrl_arch_measure_l3_residency(void *_plr) { return 0; } static inline int resctrl_arch_measure_cycles_lat_fn(void *_plr) { return 0; } static inline u64 resctrl_arch_get_prefetch_disable_bits(void) { return 0; } +/* + * The CPU configuration for MPAM is cheap to write, and is only written if it + * has changed. No need for fine grained enables. + */ +static inline void resctrl_arch_enable_mon(void) { } +static inline void resctrl_arch_disable_mon(void) { } +static inline void resctrl_arch_enable_alloc(void) { } +static inline void resctrl_arch_disable_alloc(void) { } + #endif /* __LINUX_ARM_MPAM_H */ -- Gitee From 4b53009c78e177f8f4596eaa3ed95f206e5514e0 Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 2 Sep 2022 11:08:41 +0100 Subject: [PATCH 0624/2138] arm_mpam: resctrl: Add dummy definition for free running counters ANBZ: #8686 commit 8876ec5dfa36043b8e8c80de9d35f07495dbf77c morse-linux. resctrl expects RDT like counters that are free running. MPAM's counters don't behave like this as they need a monitor to be allocated first. Provide the helper that says whether free running counters are supported. Subsequent patches will make this more intelligent. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- include/linux/arm_mpam.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index f6b92060b811..239d27af9e32 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -55,6 +55,12 @@ static inline unsigned int resctrl_arch_round_mon_val(unsigned int val) return val; } +/* MPAM counters requires a monitor to be allocated */ +static inline bool resctrl_arch_event_is_free_running(enum resctrl_event_id evt) +{ + return false; +} + bool resctrl_arch_alloc_capable(void); bool resctrl_arch_mon_capable(void); bool resctrl_arch_is_llc_occupancy_enabled(void); -- Gitee From 48c1594b8eba8b069baebaeaab2c3d4915d7ef9a Mon Sep 17 00:00:00 2001 From: James Morse Date: Wed, 14 Jul 2021 15:40:17 +0100 Subject: [PATCH 0625/2138] arm64: mpam: Select ARCH_HAS_CPU_RESCTRL ANBZ: #8686 commit 01473b460f8b425cf15bc838864435ce79c366b4 morse-linux. Enough MPAM support is present to enable ARCH_HAS_CPU_RESCTRL. Let it rip^Wlink! Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] [ add missing SPDX-License-Identifier tag ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- arch/arm64/Kconfig | 3 ++- arch/arm64/include/asm/resctrl.h | 2 ++ drivers/platform/mpam/Kconfig | 4 +++- 3 files changed, 7 insertions(+), 2 deletions(-) create mode 100644 arch/arm64/include/asm/resctrl.h diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 95c50f3ac290..7ff5e6becc9b 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -2016,7 +2016,8 @@ config ARM64_TLB_RANGE config ARM64_MPAM bool "Enable support for MPAM" select ACPI_MPAM if ACPI - select ARM_CPU_RESCTRL + select ARCH_HAS_CPU_RESCTRL + select RESCTRL_FS help Memory Partitioning and Monitoring is an optional extension that allows the CPUs to mark load and store transactions with diff --git a/arch/arm64/include/asm/resctrl.h b/arch/arm64/include/asm/resctrl.h new file mode 100644 index 000000000000..b506e95cf6e3 --- /dev/null +++ b/arch/arm64/include/asm/resctrl.h @@ -0,0 +1,2 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include diff --git a/drivers/platform/mpam/Kconfig b/drivers/platform/mpam/Kconfig index 13bd86fc5e58..75f5b2454fbe 100644 --- a/drivers/platform/mpam/Kconfig +++ b/drivers/platform/mpam/Kconfig @@ -2,5 +2,7 @@ # CPU resources, not containers or cgroups etc. config ARM_CPU_RESCTRL bool - depends on ARM64 + default y + depends on ARM64 && ARCH_HAS_CPU_RESCTRL + depends on MISC_FILESYSTEMS select RESCTRL_RMID_DEPENDS_ON_CLOSID -- Gitee From b1b37064f8d6b9503b7e56b4b78c156e4e6bfea9 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 5 Dec 2023 16:18:37 +0000 Subject: [PATCH 0626/2138] perf/arm-cmn: Stop claiming all the resources ANBZ: #8686 commit af8184da77bf79023a968bab12ad78de7f7311f1 morse-linux. Carl reports that when both the MPAM driver and CMN driver are built into the kernel, they fight over who can claim the resources associated with their registers. This prevents the second of these two drivers from probing. Currently the CMN PMU driver claims all the CMN registers. The MPAM registers are grouped together in a small number of pages, whereas the PMU registers that the CMN PMU driver uses appear throughout the CMN register space. Having the CMN driver claim all the resources is the wrong thing to do, and claiming individual registers here and there is not worthwhile. Instead, stop the CMN driver from claiming any resources as its registers are not grouped together. Reported-by: Carl Worth Tested-by: Carl Worth Signed-off-by: James Morse CC: Ilkka Koskinen [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] [ remove the redundant PTR_ERR macro ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/perf/arm-cmn.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index 7bd1733d7977..6f5c2739fe66 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -2435,6 +2435,7 @@ static int arm_cmn_probe(struct platform_device *pdev) struct arm_cmn *cmn; const char *name; static atomic_t id; + struct resource *cfg; int err, rootnode, this_id; cmn = devm_kzalloc(&pdev->dev, sizeof(*cmn), GFP_KERNEL); @@ -2449,7 +2450,16 @@ static int arm_cmn_probe(struct platform_device *pdev) rootnode = arm_cmn600_acpi_probe(pdev, cmn); } else { rootnode = 0; - cmn->base = devm_platform_ioremap_resource(pdev, 0); + + /* + * Avoid registering resources as the PMUs registers are + * scattered through CMN, and may appear either side of + * registers for other 'devices'. (e.g. the MPAM MSC controls). + */ + cfg = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!cfg) + return -EINVAL; + cmn->base = devm_ioremap(&pdev->dev, cfg->start, resource_size(cfg)); if (IS_ERR(cmn->base)) return PTR_ERR(cmn->base); if (cmn->part == PART_CMN600) -- Gitee From a0ff3ac936326dee094f0827c8bc71bdf20d7d32 Mon Sep 17 00:00:00 2001 From: James Morse Date: Thu, 19 Aug 2021 15:06:55 +0100 Subject: [PATCH 0627/2138] arm_mpam: resctrl: Tell resctrl about cpu/domain online/offline ANBZ: #8686 commit f6d31defad669e42342bd5ae044e85eb9be239da morse-linux. Now that mpam links against resctrl, call the cpu and domain online/offline calls at the appropriate point. Signed-off-by: James Morse Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/platform/mpam/mpam_resctrl.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index 328837867d60..976641a2af12 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -825,7 +825,7 @@ int mpam_resctrl_setup(void) pr_warn("Number of PMG is not a power of 2! resctrl may misbehave"); } - /* TODO: call resctrl_init() */ + err = resctrl_init(); } return err; @@ -1080,7 +1080,7 @@ struct rdt_domain *resctrl_arch_find_domain(struct rdt_resource *r, int id) int mpam_resctrl_online_cpu(unsigned int cpu) { - int i; + int i, err; struct mpam_resctrl_dom *dom; struct mpam_resctrl_res *res; @@ -1099,8 +1099,12 @@ int mpam_resctrl_online_cpu(unsigned int cpu) dom = mpam_resctrl_alloc_domain(cpu, res); if (IS_ERR(dom)) return PTR_ERR(dom); + err = resctrl_online_domain(&res->resctrl_res, &dom->resctrl_dom); + if (err) + return err; } + resctrl_online_cpu(cpu); return 0; } @@ -1111,6 +1115,8 @@ int mpam_resctrl_offline_cpu(unsigned int cpu) struct mpam_resctrl_res *res; struct mpam_resctrl_dom *dom; + resctrl_offline_cpu(cpu); + for (i = 0; i < RDT_NUM_RESOURCES; i++) { res = &mpam_resctrl_exports[i]; @@ -1129,6 +1135,7 @@ int mpam_resctrl_offline_cpu(unsigned int cpu) if (!cpumask_empty(&d->cpu_mask)) continue; + resctrl_offline_domain(&res->resctrl_res, &dom->resctrl_dom); list_del(&d->list); kfree(dom); } -- Gitee From 79c857aac4f23dfae70f5506eeeb6b319c58f9b6 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 2 Nov 2021 12:45:26 +0000 Subject: [PATCH 0628/2138] arm_mpam: resctrl: Call resctrl_exit() in the event of errors ANBZ: #8686 commit 44ac89e937ea85aae5ec363f003f50cb2791d3f9 morse-linux. All of MPAMs errors indicate a software bug, e.g. an out-of-bounds partid has been generated. When this happens, the mpam driver is disabled. If resctrl_init() succeeded, also call resctrl_exit() to remove resctrl. If the filesystem was mounted in its traditional place, it is no longer possible for processes to find it as the mount point has been removed. If the filesystem was mounted elsewhere, it will appear that all CPU and domains are offline. User-space will not be able to update the hardware. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/platform/mpam/mpam_devices.c | 2 ++ drivers/platform/mpam/mpam_internal.h | 1 + drivers/platform/mpam/mpam_resctrl.c | 17 +++++++++++++++++ 3 files changed, 20 insertions(+) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 04e1e98e17cb..32532a918115 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -2240,6 +2240,8 @@ static irqreturn_t mpam_disable_thread(int irq, void *dev_id) } mutex_unlock(&mpam_cpuhp_state_lock); + mpam_resctrl_exit(); + static_branch_disable(&mpam_enabled); mpam_unregister_irqs(); diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index 50d738d83047..48554ff93e09 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -313,6 +313,7 @@ int mpam_resctrl_online_cpu(unsigned int cpu); int mpam_resctrl_offline_cpu(unsigned int cpu); int mpam_resctrl_setup(void); +void mpam_resctrl_exit(void); /* * MPAM MSCs have the following register layout. See: diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index 976641a2af12..62bfa7d772d9 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -41,6 +41,12 @@ static struct mpam_class *mbm_total_class; */ static bool cdp_enabled; +/* + * If resctrl_init() succeeded, resctrl_exit() can be used to remove support + * for the filesystem in the event of an error. + */ +static bool resctrl_enabled; + /* A dummy mon context to use when the monitors were allocated up front */ u32 __mon_is_rmid_idx = USE_RMID_IDX; void *mon_is_rmid_idx = &__mon_is_rmid_idx; @@ -826,11 +832,22 @@ int mpam_resctrl_setup(void) } err = resctrl_init(); + if (!err) + WRITE_ONCE(resctrl_enabled, true); } return err; } +void mpam_resctrl_exit(void) +{ + if (!READ_ONCE(resctrl_enabled)) + return; + + WRITE_ONCE(resctrl_enabled, false); + resctrl_exit(); +} + u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, u32 closid, enum resctrl_conf_type type) { -- Gitee From 16f16f9655794ea4c0071c6f0528f67308c5ffa5 Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 13 Jul 2021 19:01:23 +0100 Subject: [PATCH 0629/2138] arm_mpam: resctrl: Update the rmid reallocation limit ANBZ: #8686 commit 0066c4921f837d64ba963d0b47a4271388aacd1e morse-linux. resctrl's limbo code needs to be told when the data left in a cache is small enough for the partid+pmg value to be re-allocated. x86 uses the cache size divded by the number of rmid users the cache may have. Do the same, but for the smallest cache, and with the number of partid-and-pmg users. Querying the cache size can't happen until after cacheinfo_sysfs_init() has run, so mpam_resctrl_setup() must wait until then. Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/platform/mpam/mpam_resctrl.c | 51 ++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index 62bfa7d772d9..4062c0f93f85 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -15,6 +15,7 @@ #include #include #include +#include #include @@ -47,6 +48,13 @@ static bool cdp_enabled; */ static bool resctrl_enabled; +/* + * mpam_resctrl_pick_caches() needs to know the size of the caches. cacheinfo + * populates this from a device_initcall(). mpam_resctrl_setup() must wait. + */ +static bool cacheinfo_ready; +static DECLARE_WAIT_QUEUE_HEAD(wait_cacheinfo_ready); + /* A dummy mon context to use when the monitors were allocated up front */ u32 __mon_is_rmid_idx = USE_RMID_IDX; void *mon_is_rmid_idx = &__mon_is_rmid_idx; @@ -404,6 +412,24 @@ void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d, } } +/* + * The rmid realloc threshold should be for the smallest cache exposed to + * resctrl. + */ +static void update_rmid_limits(unsigned int size) +{ + u32 num_unique_pmg = resctrl_arch_system_num_rmid_idx(); + + if (WARN_ON_ONCE(!size)) + return; + + if (resctrl_rmid_realloc_limit && size > resctrl_rmid_realloc_limit) + return; + + resctrl_rmid_realloc_limit = size; + resctrl_rmid_realloc_threshold = size / num_unique_pmg; +} + static bool cache_has_usable_cpor(struct mpam_class *class) { struct mpam_props *cprops = &class->props; @@ -560,11 +586,15 @@ static u16 percent_to_mbw_max(u8 pc, struct mpam_props *cprops) static void mpam_resctrl_pick_caches(void) { int idx; + unsigned int cache_size; struct mpam_class *class; struct mpam_resctrl_res *res; + lockdep_assert_cpus_held(); + idx = srcu_read_lock(&mpam_srcu); list_for_each_entry_rcu(class, &mpam_classes, classes_list) { + struct mpam_props *cprops = &class->props; bool has_cpor = cache_has_usable_cpor(class); if (class->type != MPAM_CLASS_CACHE) { @@ -590,6 +620,16 @@ static void mpam_resctrl_pick_caches(void) continue; } + /* Assume cache levels are the same size for all CPUs... */ + cache_size = get_cpu_cacheinfo_size(smp_processor_id(), class->level); + if (!cache_size) { + pr_debug("pick_caches: Could not read cache size\n"); + continue; + } + + if (mpam_has_feature(mpam_feat_msmon_csu, cprops)) + update_rmid_limits(cache_size); + if (class->level == 2) { res = &mpam_resctrl_exports[RDT_RESOURCE_L2]; res->resctrl_res.name = "L2"; @@ -795,6 +835,8 @@ int mpam_resctrl_setup(void) struct mpam_resctrl_res *res; enum resctrl_res_level i; + wait_event(wait_cacheinfo_ready, cacheinfo_ready); + cpus_read_lock(); for (i = 0; i < RDT_NUM_RESOURCES; i++) { res = &mpam_resctrl_exports[i]; @@ -1159,3 +1201,12 @@ int mpam_resctrl_offline_cpu(unsigned int cpu) return 0; } + +static int __init __cacheinfo_ready(void) +{ + cacheinfo_ready = true; + wake_up(&wait_cacheinfo_ready); + + return 0; +} +device_initcall_sync(__cacheinfo_ready); -- Gitee From 7d5309dd2b0dbf365eb2d2a519acba1f472b3ef3 Mon Sep 17 00:00:00 2001 From: Amit Singh Tomar Date: Mon, 9 Jan 2023 17:03:59 +0530 Subject: [PATCH 0630/2138] fs/resctrl: Remove the limit on the number of CLOSID ANBZ: #8686 commit b530deed244d9b45f3bce3cccde91f6ed0ebf7ea morse-linux. At the moment, number of resource control group (user can create) is limited to 32. Remove the limit. ffs() returns '1' for bit 0, hence the existing code subtracts 1 from the index to get the CLOSID value. find_first_bit() returns the bit number which does not need adjusting. Signed-off-by: Amit Singh Tomar [ morse: fixed the off-by-one in the allocator and the wrong not-found value. Removed the limit. ] Signed-off-by: James Morse [ cherry-picked from https://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git/log/?h=mpam/snapshot/v6.7-rc2 ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3011 --- drivers/platform/mpam/mpam_resctrl.c | 2 +- fs/resctrl/rdtgroup.c | 25 +++++++++++++------------ include/linux/resctrl.h | 5 ----- 3 files changed, 14 insertions(+), 18 deletions(-) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index 4062c0f93f85..c7ebb4d37b47 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -132,7 +132,7 @@ static bool mpam_resctrl_hide_cdp(enum resctrl_res_level rid) */ u32 resctrl_arch_get_num_closid(struct rdt_resource *ignored) { - return min((u32)mpam_partid_max + 1, (u32)RESCTRL_MAX_CLOSID); + return mpam_partid_max + 1; } u32 resctrl_arch_system_num_rmid_idx(void) diff --git a/fs/resctrl/rdtgroup.c b/fs/resctrl/rdtgroup.c index 936fc6e47386..ea969ddb1a9d 100644 --- a/fs/resctrl/rdtgroup.c +++ b/fs/resctrl/rdtgroup.c @@ -123,8 +123,8 @@ static bool resctrl_is_mbm_event(int e) } /* - * Trivial allocator for CLOSIDs. Since h/w only supports a small number, - * we can keep a bitmap of free CLOSIDs in a single integer. + * Trivial allocator for CLOSIDs. Use BITMAP APIs to manipulate a bitmap + * of free CLOSIDs. * * Using a global CLOSID across all resources has some advantages and * some drawbacks: @@ -137,7 +137,7 @@ static bool resctrl_is_mbm_event(int e) * - Our choices on how to configure each resource become progressively more * limited as the number of resources grows. */ -static unsigned long closid_free_map; +static unsigned long *closid_free_map; static int closid_free_map_len; int closids_supported(void) @@ -148,16 +148,17 @@ int closids_supported(void) static void closid_init(void) { struct resctrl_schema *s; - u32 rdt_min_closid = 32; + u32 rdt_min_closid = ~0; /* Compute rdt_min_closid across all resources */ list_for_each_entry(s, &resctrl_schema_all, list) rdt_min_closid = min(rdt_min_closid, s->num_closid); - closid_free_map = BIT_MASK(rdt_min_closid) - 1; + closid_free_map = bitmap_alloc(rdt_min_closid, GFP_KERNEL); + bitmap_fill(closid_free_map, rdt_min_closid); /* RESCTRL_RESERVED_CLOSID is always reserved for the default group */ - __clear_bit(RESCTRL_RESERVED_CLOSID, &closid_free_map); + __clear_bit(RESCTRL_RESERVED_CLOSID, closid_free_map); closid_free_map_len = rdt_min_closid; } @@ -175,12 +176,12 @@ static int closid_alloc(void) return cleanest_closid; closid = cleanest_closid; } else { - closid = ffs(closid_free_map); - if (closid == 0) + closid = find_first_bit(closid_free_map, closid_free_map_len); + if (closid == closid_free_map_len) return -ENOSPC; - closid--; } - __clear_bit(closid, &closid_free_map); + + __clear_bit(closid, closid_free_map); return closid; } @@ -189,7 +190,7 @@ void closid_free(int closid) { lockdep_assert_held(&rdtgroup_mutex); - __set_bit(closid, &closid_free_map); + __set_bit(closid, closid_free_map); } /** @@ -203,7 +204,7 @@ bool closid_allocated(unsigned int closid) { lockdep_assert_held(&rdtgroup_mutex); - return !test_bit(closid, &closid_free_map); + return !test_bit(closid, closid_free_map); } /** diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 00cc0457af50..dc80ddab26cf 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -30,11 +30,6 @@ int proc_resctrl_show(struct seq_file *m, /* max value for struct rdt_domain's mbps_val */ #define MBA_MAX_MBPS U32_MAX -/* - * Resctrl uses a u32 as a closid bitmap. The maximum closid is 32. - */ -#define RESCTRL_MAX_CLOSID 32 - /* * Resctrl uses u32 to hold the user-space config. The maximum bitmap size is * 32. -- Gitee From 068049653a4505fd010e1baa7ecccd1e5db1f3fb Mon Sep 17 00:00:00 2001 From: Tyler Fanelli Date: Tue, 19 Sep 2023 22:40:01 -0400 Subject: [PATCH 0631/2138] docs/fuse-io: Document the usage of DIRECT_IO_ALLOW_MMAP ANBZ: #8700 commit 11ca77cdcca17cec909d2b97404ddacfec0acafd upstream. By default, shared mmap is disabled in FUSE DIRECT_IO mode. However, when the DIRECT_IO_ALLOW_MMAP flag is enabled in the FUSE_INIT reply, shared mmap is allowed. Signed-off-by: Tyler Fanelli Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3007 --- Documentation/filesystems/fuse-io.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Documentation/filesystems/fuse-io.rst b/Documentation/filesystems/fuse-io.rst index 255a368fe534..6464de4266ad 100644 --- a/Documentation/filesystems/fuse-io.rst +++ b/Documentation/filesystems/fuse-io.rst @@ -15,7 +15,8 @@ The direct-io mode can be selected with the FOPEN_DIRECT_IO flag in the FUSE_OPEN reply. In direct-io mode the page cache is completely bypassed for reads and writes. -No read-ahead takes place. Shared mmap is disabled. +No read-ahead takes place. Shared mmap is disabled by default. To allow shared +mmap, the FUSE_DIRECT_IO_ALLOW_MMAP flag may be enabled in the FUSE_INIT reply. In cached mode reads may be satisfied from the page cache, and data may be read-ahead by the kernel to fill the cache. The cache is always kept consistent -- Gitee From 4ef2db1b905b015dc6113e5641d923f8d0cecc8d Mon Sep 17 00:00:00 2001 From: Bernd Schubert Date: Tue, 22 Aug 2023 21:48:18 +0200 Subject: [PATCH 0632/2138] fuse: create helper function if DIO write needs exclusive lock ANBZ: #8700 commit 699cf8246ee4c2c524f18c2e395909d16e7fda1b upstream. This makes the code a bit easier to read and allows to more easily add more conditions when an exclusive lock is needed. Signed-off-by: Bernd Schubert Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3007 --- fs/fuse/file.c | 63 +++++++++++++++++++++++++++++++++++--------------- 1 file changed, 45 insertions(+), 18 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index ceb9f7d23038..46a0c8e31e06 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1298,6 +1298,47 @@ static ssize_t fuse_perform_write(struct kiocb *iocb, struct iov_iter *ii) return res; } +static bool fuse_io_past_eof(struct kiocb *iocb, struct iov_iter *iter) +{ + struct inode *inode = file_inode(iocb->ki_filp); + + return iocb->ki_pos + iov_iter_count(iter) > i_size_read(inode); +} + +/* + * @return true if an exclusive lock for direct IO writes is needed + */ +static bool fuse_dio_wr_exclusive_lock(struct kiocb *iocb, struct iov_iter *from) +{ + struct file *file = iocb->ki_filp; + struct fuse_file *ff = file->private_data; + struct inode *inode = file_inode(iocb->ki_filp); + + /* Server side has to advise that it supports parallel dio writes. */ + if (!(ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES)) + return true; + + /* + * Append will need to know the eventual EOF - always needs an + * exclusive lock. + */ + if (iocb->ki_flags & IOCB_APPEND) + return true; + + /* + * Combination of page access and direct-io is difficult, shared locks + * actually introduce a conflict. + */ + if (get_fuse_conn(inode)->direct_io_allow_mmap) + return true; + + /* Parallel dio beyond EOF is not supported, at least for now. */ + if (fuse_io_past_eof(iocb, from)) + return true; + + return false; +} + static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; @@ -1557,26 +1598,12 @@ static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to) return res; } -static bool fuse_direct_write_extending_i_size(struct kiocb *iocb, - struct iov_iter *iter) -{ - struct inode *inode = file_inode(iocb->ki_filp); - - return iocb->ki_pos + iov_iter_count(iter) > i_size_read(inode); -} - static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct inode *inode = file_inode(iocb->ki_filp); - struct file *file = iocb->ki_filp; - struct fuse_file *ff = file->private_data; struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb); ssize_t res; - bool exclusive_lock = - !(ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES) || - get_fuse_conn(inode)->direct_io_allow_mmap || - iocb->ki_flags & IOCB_APPEND || - fuse_direct_write_extending_i_size(iocb, from); + bool exclusive_lock = fuse_dio_wr_exclusive_lock(iocb, from); /* * Take exclusive lock if @@ -1590,10 +1617,10 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) else { inode_lock_shared(inode); - /* A race with truncate might have come up as the decision for - * the lock type was done without holding the lock, check again. + /* + * Previous check was without any lock and might have raced. */ - if (fuse_direct_write_extending_i_size(iocb, from)) { + if (fuse_io_past_eof(iocb, from)) { inode_unlock_shared(inode); inode_lock(inode); exclusive_lock = true; -- Gitee From e8059729fb095e59f89f83b4c110ae5128a38cb6 Mon Sep 17 00:00:00 2001 From: Bernd Schubert Date: Sun, 24 Dec 2023 00:05:53 +0100 Subject: [PATCH 0633/2138] fuse: add fuse_dio_lock/unlock helper functions ANBZ: #8700 commit 9bbb6717dfd286a2861ca33273f4d7c3e65423b0 upstream. So far this is just a helper to remove complex locking logic out of fuse_direct_write_iter. Especially needed by the next patch in the series to that adds the fuse inode cache IO mode and adds in even more locking complexity. Signed-off-by: Bernd Schubert Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3007 --- fs/fuse/file.c | 61 ++++++++++++++++++++++++++++---------------------- 1 file changed, 34 insertions(+), 27 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 46a0c8e31e06..004843dac8fa 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1339,6 +1339,37 @@ static bool fuse_dio_wr_exclusive_lock(struct kiocb *iocb, struct iov_iter *from return false; } +static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from, + bool *exclusive) +{ + struct inode *inode = file_inode(iocb->ki_filp); + + *exclusive = fuse_dio_wr_exclusive_lock(iocb, from); + if (*exclusive) { + inode_lock(inode); + } else { + inode_lock_shared(inode); + /* + * Previous check was without inode lock and might have raced, + * check again. + */ + if (fuse_io_past_eof(iocb, from)) { + inode_unlock_shared(inode); + inode_lock(inode); + *exclusive = true; + } + } +} + +static void fuse_dio_unlock(struct inode *inode, bool exclusive) +{ + if (exclusive) { + inode_unlock(inode); + } else { + inode_unlock_shared(inode); + } +} + static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; @@ -1603,30 +1634,9 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) struct inode *inode = file_inode(iocb->ki_filp); struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb); ssize_t res; - bool exclusive_lock = fuse_dio_wr_exclusive_lock(iocb, from); - - /* - * Take exclusive lock if - * - Parallel direct writes are disabled - a user space decision - * - Parallel direct writes are enabled and i_size is being extended. - * - Shared mmap on direct_io file is supported (FUSE_DIRECT_IO_ALLOW_MMAP). - * This might not be needed at all, but needs further investigation. - */ - if (exclusive_lock) - inode_lock(inode); - else { - inode_lock_shared(inode); - - /* - * Previous check was without any lock and might have raced. - */ - if (fuse_io_past_eof(iocb, from)) { - inode_unlock_shared(inode); - inode_lock(inode); - exclusive_lock = true; - } - } + bool exclusive; + fuse_dio_lock(iocb, from, &exclusive); res = generic_write_checks(iocb, from); if (res > 0) { if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) { @@ -1637,10 +1647,7 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) fuse_write_update_attr(inode, iocb->ki_pos, res); } } - if (exclusive_lock) - inode_unlock(inode); - else - inode_unlock_shared(inode); + fuse_dio_unlock(inode, exclusive); return res; } -- Gitee From 44a340dc157d2f72beaca4a432d1018b98ad4f73 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Thu, 1 Feb 2024 13:48:59 +0200 Subject: [PATCH 0634/2138] fuse: factor out helper fuse_truncate_update_attr() ANBZ: #8700 commit 0c9d708953d02f74cea05a01cf3e2c8f5a9fbaf4 upstream. fuse_finish_open() is called from fuse_open_common() and from fuse_create_open(). In the latter case, the O_TRUNC flag is always cleared in finish_open()m before calling into fuse_finish_open(). Move the bits that update attribute cache post O_TRUNC open into a helper and call this helper from fuse_open_common() directly. Signed-off-by: Amir Goldstein Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3007 --- fs/fuse/file.c | 38 +++++++++++++++++++++----------------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 004843dac8fa..c1909fa07a94 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -204,30 +204,31 @@ void fuse_finish_open(struct inode *inode, struct file *file) else if (ff->open_flags & FOPEN_NONSEEKABLE) nonseekable_open(inode, file); - if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) { - struct fuse_inode *fi = get_fuse_inode(inode); - - spin_lock(&fi->lock); - fi->attr_version = atomic64_inc_return(&fc->attr_version); - i_size_write(inode, 0); - spin_unlock(&fi->lock); - file_update_time(file); - fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE); - } if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache) fuse_link_write_file(file); } +static void fuse_truncate_update_attr(struct inode *inode, struct file *file) +{ + struct fuse_conn *fc = get_fuse_conn(inode); + struct fuse_inode *fi = get_fuse_inode(inode); + + spin_lock(&fi->lock); + fi->attr_version = atomic64_inc_return(&fc->attr_version); + i_size_write(inode, 0); + spin_unlock(&fi->lock); + file_update_time(file); + fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE); +} + int fuse_open_common(struct inode *inode, struct file *file, bool isdir) { struct fuse_mount *fm = get_fuse_mount(inode); struct fuse_conn *fc = fm->fc; int err; - bool is_wb_truncate = (file->f_flags & O_TRUNC) && - fc->atomic_o_trunc && - fc->writeback_cache; - bool dax_truncate = (file->f_flags & O_TRUNC) && - fc->atomic_o_trunc && FUSE_IS_DAX(inode); + bool is_truncate = (file->f_flags & O_TRUNC) && fc->atomic_o_trunc; + bool is_wb_truncate = is_truncate && fc->writeback_cache; + bool dax_truncate = is_truncate && FUSE_IS_DAX(inode); if (fuse_is_bad(inode)) return -EIO; @@ -250,15 +251,18 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir) fuse_set_nowrite(inode); err = fuse_do_open(fm, get_node_id(inode), file, isdir); - if (!err) + if (!err) { fuse_finish_open(inode, file); + if (is_truncate) + fuse_truncate_update_attr(inode, file); + } if (is_wb_truncate || dax_truncate) fuse_release_nowrite(inode); if (!err) { struct fuse_file *ff = file->private_data; - if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) + if (is_truncate) truncate_pagecache(inode, 0); else if (!(ff->open_flags & FOPEN_KEEP_CACHE)) invalidate_inode_pages2(inode->i_mapping); -- Gitee From eec8a15c2a5bb99d59a666188a1657ea475d3bba Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Thu, 1 Feb 2024 15:30:05 +0200 Subject: [PATCH 0635/2138] fuse: allocate ff->release_args only if release is needed ANBZ: #8700 commit e26ee4efbc79610b20e7abe9d96c87f33dacc1ff upstream. This removed the need to pass isdir argument to fuse_put_file(). Signed-off-by: Amir Goldstein Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3007 --- fs/fuse/dir.c | 2 +- fs/fuse/file.c | 69 +++++++++++++++++++++++++++--------------------- fs/fuse/fuse_i.h | 2 +- 3 files changed, 41 insertions(+), 32 deletions(-) diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 95f9913a3537..b08eb62639dc 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -634,7 +634,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, goto out_err; err = -ENOMEM; - ff = fuse_file_alloc(fm); + ff = fuse_file_alloc(fm, true); if (!ff) goto out_put_forget_req; diff --git a/fs/fuse/file.c b/fs/fuse/file.c index c1909fa07a94..5b40353294da 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -55,7 +55,7 @@ struct fuse_release_args { struct inode *inode; }; -struct fuse_file *fuse_file_alloc(struct fuse_mount *fm) +struct fuse_file *fuse_file_alloc(struct fuse_mount *fm, bool release) { struct fuse_file *ff; @@ -64,11 +64,13 @@ struct fuse_file *fuse_file_alloc(struct fuse_mount *fm) return NULL; ff->fm = fm; - ff->release_args = kzalloc(sizeof(*ff->release_args), - GFP_KERNEL_ACCOUNT); - if (!ff->release_args) { - kfree(ff); - return NULL; + if (release) { + ff->release_args = kzalloc(sizeof(*ff->release_args), + GFP_KERNEL_ACCOUNT); + if (!ff->release_args) { + kfree(ff); + return NULL; + } } INIT_LIST_HEAD(&ff->write_entry); @@ -104,14 +106,14 @@ static void fuse_release_end(struct fuse_mount *fm, struct fuse_args *args, kfree(ra); } -static void fuse_file_put(struct fuse_file *ff, bool sync, bool isdir) +static void fuse_file_put(struct fuse_file *ff, bool sync) { if (refcount_dec_and_test(&ff->count)) { - struct fuse_args *args = &ff->release_args->args; + struct fuse_release_args *ra = ff->release_args; + struct fuse_args *args = (ra ? &ra->args : NULL); - if (isdir ? ff->fm->fc->no_opendir : ff->fm->fc->no_open) { - /* Do nothing when client does not implement 'open' */ - fuse_release_end(ff->fm, args, 0); + if (!args) { + /* Do nothing when server does not implement 'open' */ } else if (sync) { fuse_simple_request(ff->fm, args); fuse_release_end(ff->fm, args, 0); @@ -131,15 +133,16 @@ struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid, struct fuse_conn *fc = fm->fc; struct fuse_file *ff; int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; + bool open = isdir ? !fc->no_opendir : !fc->no_open; - ff = fuse_file_alloc(fm); + ff = fuse_file_alloc(fm, open); if (!ff) return ERR_PTR(-ENOMEM); ff->fh = 0; /* Default for no-open */ ff->open_flags = FOPEN_KEEP_CACHE | (isdir ? FOPEN_CACHE_DIR : 0); - if (isdir ? !fc->no_opendir : !fc->no_open) { + if (open) { struct fuse_open_out outarg; int err; @@ -147,11 +150,13 @@ struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid, if (!err) { ff->fh = outarg.fh; ff->open_flags = outarg.open_flags; - } else if (err != -ENOSYS) { fuse_file_free(ff); return ERR_PTR(err); } else { + /* No release needed */ + kfree(ff->release_args); + ff->release_args = NULL; if (isdir) fc->no_opendir = 1; else @@ -277,7 +282,7 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir) } static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff, - unsigned int flags, int opcode) + unsigned int flags, int opcode, bool sync) { struct fuse_conn *fc = ff->fm->fc; struct fuse_release_args *ra = ff->release_args; @@ -295,6 +300,9 @@ static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff, wake_up_interruptible_all(&ff->poll_wait); + if (!ra) + return; + ra->inarg.fh = ff->fh; ra->inarg.flags = flags; ra->args.in_numargs = 1; @@ -304,6 +312,13 @@ static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff, ra->args.nodeid = ff->nodeid; ra->args.force = true; ra->args.nocreds = true; + + /* + * Hold inode until release is finished. + * From fuse_sync_release() the refcount is 1 and everything's + * synchronous, so we are fine with not doing igrab() here. + */ + ra->inode = sync ? NULL : igrab(&fi->inode); } void fuse_file_release(struct inode *inode, struct fuse_file *ff, @@ -313,14 +328,12 @@ void fuse_file_release(struct inode *inode, struct fuse_file *ff, struct fuse_release_args *ra = ff->release_args; int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE; - fuse_prepare_release(fi, ff, open_flags, opcode); + fuse_prepare_release(fi, ff, open_flags, opcode, false); - if (ff->flock) { + if (ra && ff->flock) { ra->inarg.release_flags |= FUSE_RELEASE_FLOCK_UNLOCK; ra->inarg.lock_owner = fuse_lock_owner_id(ff->fm->fc, id); } - /* Hold inode until release is finished */ - ra->inode = igrab(inode); /* * Normally this will send the RELEASE request, however if @@ -331,7 +344,7 @@ void fuse_file_release(struct inode *inode, struct fuse_file *ff, * synchronous RELEASE is allowed (and desirable) in this case * because the server can be trusted not to screw up. */ - fuse_file_put(ff, ff->fm->fc->destroy, isdir); + fuse_file_put(ff, ff->fm->fc->destroy); } void fuse_release_common(struct file *file, bool isdir) @@ -366,12 +379,8 @@ void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff, unsigned int flags) { WARN_ON(refcount_read(&ff->count) > 1); - fuse_prepare_release(fi, ff, flags, FUSE_RELEASE); - /* - * iput(NULL) is a no-op and since the refcount is 1 and everything's - * synchronous, we are fine with not doing igrab() here" - */ - fuse_file_put(ff, true, false); + fuse_prepare_release(fi, ff, flags, FUSE_RELEASE, true); + fuse_file_put(ff, true); } EXPORT_SYMBOL_GPL(fuse_sync_release); @@ -928,7 +937,7 @@ static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args, put_page(page); } if (ia->ff) - fuse_file_put(ia->ff, false, false); + fuse_file_put(ia->ff, false); fuse_io_free(ia); } @@ -1704,7 +1713,7 @@ static void fuse_writepage_free(struct fuse_writepage_args *wpa) __free_page(ap->pages[i]); if (wpa->ia.ff) - fuse_file_put(wpa->ia.ff, false, false); + fuse_file_put(wpa->ia.ff, false); kfree(ap->pages); kfree(wpa); @@ -1952,7 +1961,7 @@ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc) ff = __fuse_write_file_get(fi); err = fuse_flush_times(inode, ff); if (ff) - fuse_file_put(ff, false, false); + fuse_file_put(ff, false); return err; } @@ -2350,7 +2359,7 @@ static int fuse_writepages(struct address_space *mapping, fuse_writepages_send(&data); } if (data.ff) - fuse_file_put(data.ff, false, false); + fuse_file_put(data.ff, false); kfree(data.orig_pages); out: diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 29523925ff81..8969ba4d7898 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -1037,7 +1037,7 @@ void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos, */ int fuse_open_common(struct inode *inode, struct file *file, bool isdir); -struct fuse_file *fuse_file_alloc(struct fuse_mount *fm); +struct fuse_file *fuse_file_alloc(struct fuse_mount *fm, bool release); void fuse_file_free(struct fuse_file *ff); void fuse_finish_open(struct inode *inode, struct file *file); -- Gitee From 003c28cb7468f7f885997e2c0b3a365b99ea2dd7 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Fri, 2 Feb 2024 13:30:30 +0200 Subject: [PATCH 0636/2138] fuse: break up fuse_open_common() ANBZ: #8700 commit 7de64d521bf92396b7da8ae0600188ea5d75a4c9 upstream. fuse_open_common() has a lot of code relevant only for regular files and O_TRUNC in particular. Copy the little bit of remaining code into fuse_dir_open() and stop using this common helper for directory open. Also split out fuse_dir_finish_open() from fuse_finish_open() before we add inode io modes to fuse_finish_open(). Suggested-by: Miklos Szeredi Signed-off-by: Amir Goldstein Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3007 --- fs/fuse/dir.c | 25 ++++++++++++++++++++++++- fs/fuse/file.c | 9 ++------- fs/fuse/fuse_i.h | 5 ----- 3 files changed, 26 insertions(+), 13 deletions(-) diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index b08eb62639dc..5244bc200b74 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1635,7 +1635,30 @@ static const char *fuse_get_link(struct dentry *dentry, struct inode *inode, static int fuse_dir_open(struct inode *inode, struct file *file) { - return fuse_open_common(inode, file, true); + struct fuse_mount *fm = get_fuse_mount(inode); + int err; + + if (fuse_is_bad(inode)) + return -EIO; + + err = generic_file_open(inode, file); + if (err) + return err; + + err = fuse_do_open(fm, get_node_id(inode), file, true); + if (!err) { + struct fuse_file *ff = file->private_data; + + /* + * Keep handling FOPEN_STREAM and FOPEN_NONSEEKABLE for + * directories for backward compatibility, though it's unlikely + * to be useful. + */ + if (ff->open_flags & (FOPEN_STREAM | FOPEN_NONSEEKABLE)) + nonseekable_open(inode, file); + } + + return err; } static int fuse_dir_release(struct inode *inode, struct file *file) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 5b40353294da..cb3668b35f55 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -226,7 +226,7 @@ static void fuse_truncate_update_attr(struct inode *inode, struct file *file) fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE); } -int fuse_open_common(struct inode *inode, struct file *file, bool isdir) +static int fuse_open(struct inode *inode, struct file *file) { struct fuse_mount *fm = get_fuse_mount(inode); struct fuse_conn *fc = fm->fc; @@ -255,7 +255,7 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir) if (is_wb_truncate || dax_truncate) fuse_set_nowrite(inode); - err = fuse_do_open(fm, get_node_id(inode), file, isdir); + err = fuse_do_open(fm, get_node_id(inode), file, false); if (!err) { fuse_finish_open(inode, file); if (is_truncate) @@ -353,11 +353,6 @@ void fuse_release_common(struct file *file, bool isdir) (fl_owner_t) file, isdir); } -static int fuse_open(struct inode *inode, struct file *file) -{ - return fuse_open_common(inode, file, false); -} - static int fuse_release(struct inode *inode, struct file *file) { struct fuse_conn *fc = get_fuse_conn(inode); diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 8969ba4d7898..16ba3ff7178f 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -1032,11 +1032,6 @@ void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos, size_t count, int opcode); -/** - * Send OPEN or OPENDIR request - */ -int fuse_open_common(struct inode *inode, struct file *file, bool isdir); - struct fuse_file *fuse_file_alloc(struct fuse_mount *fm, bool release); void fuse_file_free(struct fuse_file *ff); void fuse_finish_open(struct inode *inode, struct file *file); -- Gitee From cc36e399cf6d66935eb5f9abd67178d75865e4b5 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Thu, 1 Feb 2024 15:38:06 +0200 Subject: [PATCH 0637/2138] fuse: prepare for failing open response ANBZ: #8700 commit d2c487f150ae00e3cb9faf57aceacc584e0a130c upstream. In preparation for inode io modes, a server open response could fail due to conflicting inode io modes. Allow returning an error from fuse_finish_open() and handle the error in the callers. fuse_finish_open() is used as the callback of finish_open(), so that FMODE_OPENED will not be set if fuse_finish_open() fails. Signed-off-by: Amir Goldstein Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3007 --- fs/fuse/dir.c | 8 +++++--- fs/fuse/file.c | 15 ++++++++++----- fs/fuse/fuse_i.h | 2 +- 3 files changed, 16 insertions(+), 9 deletions(-) diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 5244bc200b74..ca865e7c4b55 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -696,13 +696,15 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, d_instantiate(entry, inode); fuse_change_entry_timeout(entry, &outentry); fuse_dir_changed(dir); - err = finish_open(file, entry, generic_file_open); + err = generic_file_open(inode, file); + if (!err) { + file->private_data = ff; + err = finish_open(file, entry, fuse_finish_open); + } if (err) { fi = get_fuse_inode(inode); fuse_sync_release(fi, ff, flags); } else { - file->private_data = ff; - fuse_finish_open(inode, file); if (fm->fc->atomic_o_trunc && trunc) truncate_pagecache(inode, 0); else if (!(ff->open_flags & FOPEN_KEEP_CACHE)) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index cb3668b35f55..4474176604fe 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -199,7 +199,7 @@ static void fuse_link_write_file(struct file *file) spin_unlock(&fi->lock); } -void fuse_finish_open(struct inode *inode, struct file *file) +int fuse_finish_open(struct inode *inode, struct file *file) { struct fuse_file *ff = file->private_data; struct fuse_conn *fc = get_fuse_conn(inode); @@ -211,6 +211,8 @@ void fuse_finish_open(struct inode *inode, struct file *file) if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache) fuse_link_write_file(file); + + return 0; } static void fuse_truncate_update_attr(struct inode *inode, struct file *file) @@ -229,7 +231,9 @@ static void fuse_truncate_update_attr(struct inode *inode, struct file *file) static int fuse_open(struct inode *inode, struct file *file) { struct fuse_mount *fm = get_fuse_mount(inode); + struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_conn *fc = fm->fc; + struct fuse_file *ff; int err; bool is_truncate = (file->f_flags & O_TRUNC) && fc->atomic_o_trunc; bool is_wb_truncate = is_truncate && fc->writeback_cache; @@ -257,16 +261,17 @@ static int fuse_open(struct inode *inode, struct file *file) err = fuse_do_open(fm, get_node_id(inode), file, false); if (!err) { - fuse_finish_open(inode, file); - if (is_truncate) + ff = file->private_data; + err = fuse_finish_open(inode, file); + if (err) + fuse_sync_release(fi, ff, file->f_flags); + else if (is_truncate) fuse_truncate_update_attr(inode, file); } if (is_wb_truncate || dax_truncate) fuse_release_nowrite(inode); if (!err) { - struct fuse_file *ff = file->private_data; - if (is_truncate) truncate_pagecache(inode, 0); else if (!(ff->open_flags & FOPEN_KEEP_CACHE)) diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 16ba3ff7178f..53d98141887c 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -1034,7 +1034,7 @@ void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos, struct fuse_file *fuse_file_alloc(struct fuse_mount *fm, bool release); void fuse_file_free(struct fuse_file *ff); -void fuse_finish_open(struct inode *inode, struct file *file); +int fuse_finish_open(struct inode *inode, struct file *file); void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff, unsigned int flags); -- Gitee From c2227958901375dde68e551869e614d3f8bfc6f4 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Thu, 1 Feb 2024 16:26:15 +0200 Subject: [PATCH 0638/2138] fuse: introduce inode io modes ANBZ: #8700 commit cb098dd24bab8a315aa00bab1ccddb6be872156d upstream. The fuse inode io mode is determined by the mode of its open files/mmaps and parallel dio opens and expressed in the value of fi->iocachectr: > 0 - caching io: files open in caching mode or mmap on direct_io file < 0 - parallel dio: direct io mode with parallel dio writes enabled == 0 - direct io: no files open in caching mode and no files mmaped Note that iocachectr value of 0 might become positive or negative, while non-parallel dio is getting processed. direct_io mmap uses page cache, so first mmap will mark the file as ff->io_opened and increment fi->iocachectr to enter the caching io mode. If the server opens the file in caching mode while it is already open for parallel dio or vice versa the open fails. This allows executing parallel dio when inode is not in caching mode and no mmaps have been performed on the inode in question. Signed-off-by: Bernd Schubert Signed-off-by: Amir Goldstein Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3007 --- fs/fuse/Makefile | 1 + fs/fuse/file.c | 15 +++++ fs/fuse/fuse_i.h | 17 ++++- fs/fuse/iomode.c | 158 +++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 189 insertions(+), 2 deletions(-) create mode 100644 fs/fuse/iomode.c diff --git a/fs/fuse/Makefile b/fs/fuse/Makefile index 8dfb7f9c1f58..d2bad0c85d8c 100644 --- a/fs/fuse/Makefile +++ b/fs/fuse/Makefile @@ -9,6 +9,7 @@ obj-$(CONFIG_VIRTIO_FS) += virtiofs.o obj-$(CONFIG_VIRT_FUSE) += virtfuse.o fuse-y := dev.o dir.o file.o inode.o control.o xattr.o acl.o readdir.o ioctl.o +fuse-y += iomode.o fuse-$(CONFIG_FUSE_DAX) += dax.o virtiofs-y := virtio_fs.o diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 4474176604fe..08055ea76c6b 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -112,6 +112,9 @@ static void fuse_file_put(struct fuse_file *ff, bool sync) struct fuse_release_args *ra = ff->release_args; struct fuse_args *args = (ra ? &ra->args : NULL); + if (ra && ra->inode) + fuse_file_io_release(ff, ra->inode); + if (!args) { /* Do nothing when server does not implement 'open' */ } else if (sync) { @@ -203,6 +206,11 @@ int fuse_finish_open(struct inode *inode, struct file *file) { struct fuse_file *ff = file->private_data; struct fuse_conn *fc = get_fuse_conn(inode); + int err; + + err = fuse_file_io_open(file, inode); + if (err) + return err; if (ff->open_flags & FOPEN_STREAM) stream_open(inode, file); @@ -2514,6 +2522,7 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) { struct fuse_file *ff = file->private_data; struct fuse_conn *fc = ff->fm->fc; + int rc; /* DAX mmap is superior to direct_io mmap */ if (FUSE_IS_DAX(file_inode(file))) @@ -2533,6 +2542,11 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) /* MAP_PRIVATE */ return generic_file_mmap(file, vma); } + + /* First mmap of direct_io file enters caching inode io mode. */ + rc = fuse_file_cached_io_start(file_inode(file), ff); + if (rc) + return rc; } if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) @@ -3301,6 +3315,7 @@ void fuse_init_file_inode(struct inode *inode, unsigned int flags) INIT_LIST_HEAD(&fi->write_files); INIT_LIST_HEAD(&fi->queued_writes); fi->writectr = 0; + fi->iocachectr = 0; init_waitqueue_head(&fi->page_waitq); fi->writepages = RB_ROOT; diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 53d98141887c..e2a8eb822013 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -112,7 +112,7 @@ struct fuse_inode { u64 attr_version; union { - /* Write related fields (regular file only) */ + /* read/write io cache (regular file only) */ struct { /* Files usable in writepage. Protected by fi->lock */ struct list_head write_files; @@ -124,6 +124,9 @@ struct fuse_inode { * (FUSE_NOWRITE) means more writes are blocked */ int writectr; + /** Number of files/maps using page cache */ + int iocachectr; + /* Waitq for writepage completion */ wait_queue_head_t page_waitq; @@ -188,6 +191,8 @@ enum { FUSE_I_BAD, /* Has btime */ FUSE_I_BTIME, + /* Wants or already has page cache IO */ + FUSE_I_CACHE_IO_MODE, }; struct fuse_conn; @@ -245,6 +250,9 @@ struct fuse_file { /** Wait queue head for poll */ wait_queue_head_t poll_wait; + /** Does file hold a fi->iocachectr refcount? */ + enum { IOM_NONE, IOM_CACHED, IOM_UNCACHED } iomode; + /** Has flock been performed on this file? */ bool flock:1; }; @@ -1344,8 +1352,13 @@ int fuse_fileattr_get(struct dentry *dentry, struct fileattr *fa); int fuse_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry, struct fileattr *fa); -/* file.c */ +/* iomode.c */ +int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff); +int fuse_file_io_open(struct file *file, struct inode *inode); +void fuse_file_io_release(struct fuse_file *ff, struct inode *inode); + +/* file.c */ struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid, unsigned int open_flags, bool isdir); void fuse_file_release(struct inode *inode, struct fuse_file *ff, diff --git a/fs/fuse/iomode.c b/fs/fuse/iomode.c new file mode 100644 index 000000000000..a1a836b2aacc --- /dev/null +++ b/fs/fuse/iomode.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * FUSE inode io modes. + * + * Copyright (c) 2024 CTERA Networks. + */ + +#include "fuse_i.h" + +#include +#include +#include +#include + +/* + * Start cached io mode, where parallel dio writes are not allowed. + */ +int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff) +{ + struct fuse_inode *fi = get_fuse_inode(inode); + int err = 0; + + /* There are no io modes if server does not implement open */ + if (!ff->release_args) + return 0; + + spin_lock(&fi->lock); + if (fi->iocachectr < 0) { + err = -ETXTBSY; + goto unlock; + } + WARN_ON(ff->iomode == IOM_UNCACHED); + if (ff->iomode == IOM_NONE) { + ff->iomode = IOM_CACHED; + if (fi->iocachectr == 0) + set_bit(FUSE_I_CACHE_IO_MODE, &fi->state); + fi->iocachectr++; + } +unlock: + spin_unlock(&fi->lock); + return err; +} + +static void fuse_file_cached_io_end(struct inode *inode, struct fuse_file *ff) +{ + struct fuse_inode *fi = get_fuse_inode(inode); + + spin_lock(&fi->lock); + WARN_ON(fi->iocachectr <= 0); + WARN_ON(ff->iomode != IOM_CACHED); + ff->iomode = IOM_NONE; + fi->iocachectr--; + if (fi->iocachectr == 0) + clear_bit(FUSE_I_CACHE_IO_MODE, &fi->state); + spin_unlock(&fi->lock); +} + +/* Start strictly uncached io mode where cache access is not allowed */ +static int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff) +{ + struct fuse_inode *fi = get_fuse_inode(inode); + int err = 0; + + spin_lock(&fi->lock); + if (fi->iocachectr > 0) { + err = -ETXTBSY; + goto unlock; + } + WARN_ON(ff->iomode != IOM_NONE); + fi->iocachectr--; + ff->iomode = IOM_UNCACHED; +unlock: + spin_unlock(&fi->lock); + return err; +} + +static void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff) +{ + struct fuse_inode *fi = get_fuse_inode(inode); + + spin_lock(&fi->lock); + WARN_ON(fi->iocachectr >= 0); + WARN_ON(ff->iomode != IOM_UNCACHED); + ff->iomode = IOM_NONE; + fi->iocachectr++; + spin_unlock(&fi->lock); +} + +/* Request access to submit new io to inode via open file */ +int fuse_file_io_open(struct file *file, struct inode *inode) +{ + struct fuse_file *ff = file->private_data; + int err; + + /* + * io modes are not relevant with DAX and with server that does not + * implement open. + */ + if (FUSE_IS_DAX(inode) || !ff->release_args) + return 0; + + /* + * FOPEN_PARALLEL_DIRECT_WRITES requires FOPEN_DIRECT_IO. + */ + if (!(ff->open_flags & FOPEN_DIRECT_IO)) + ff->open_flags &= ~FOPEN_PARALLEL_DIRECT_WRITES; + + /* + * First parallel dio open denies caching inode io mode. + * First caching file open enters caching inode io mode. + * + * Note that if user opens a file open with O_DIRECT, but server did + * not specify FOPEN_DIRECT_IO, a later fcntl() could remove O_DIRECT, + * so we put the inode in caching mode to prevent parallel dio. + */ + if (ff->open_flags & FOPEN_DIRECT_IO) { + if (ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES) + err = fuse_file_uncached_io_start(inode, ff); + else + return 0; + } else { + err = fuse_file_cached_io_start(inode, ff); + } + if (err) + goto fail; + + return 0; + +fail: + pr_debug("failed to open file in requested io mode (open_flags=0x%x, err=%i).\n", + ff->open_flags, err); + /* + * The file open mode determines the inode io mode. + * Using incorrect open mode is a server mistake, which results in + * user visible failure of open() with EIO error. + */ + return -EIO; +} + +/* No more pending io and no new io possible to inode via open/mmapped file */ +void fuse_file_io_release(struct fuse_file *ff, struct inode *inode) +{ + /* + * Last parallel dio close allows caching inode io mode. + * Last caching file close exits caching inode io mode. + */ + switch (ff->iomode) { + case IOM_NONE: + /* Nothing to do */ + break; + case IOM_UNCACHED: + fuse_file_uncached_io_end(inode, ff); + break; + case IOM_CACHED: + fuse_file_cached_io_end(inode, ff); + break; + } +} -- Gitee From 596807363e54cc7333802014849dc8248d772e6c Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Fri, 9 Feb 2024 16:54:37 +0200 Subject: [PATCH 0639/2138] fuse: allow parallel dio writes with FUSE_DIRECT_IO_ALLOW_MMAP ANBZ: #8700 commit 205c1d8026835746d8597e1aa70c370e014e83fa upstream. Instead of denying caching mode on parallel dio open, deny caching open only while parallel dio are in-progress and wait for in-progress parallel dio writes before entering inode caching io mode. This allows executing parallel dio when inode is not in caching mode even if shared mmap is allowed, but no mmaps have been performed on the inode in question. An mmap on direct_io file now waits for all in-progress parallel dio writes to complete, so parallel dio writes together with FUSE_DIRECT_IO_ALLOW_MMAP is enabled by this commit. Signed-off-by: Bernd Schubert Signed-off-by: Amir Goldstein Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3007 --- fs/fuse/file.c | 41 +++++++++++++++++++++++++++++------------ fs/fuse/fuse_i.h | 5 +++++ fs/fuse/iomode.c | 48 ++++++++++++++++++++++++++++++------------------ 3 files changed, 64 insertions(+), 30 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 08055ea76c6b..ef5a16c267b2 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1334,6 +1334,7 @@ static bool fuse_dio_wr_exclusive_lock(struct kiocb *iocb, struct iov_iter *from struct file *file = iocb->ki_filp; struct fuse_file *ff = file->private_data; struct inode *inode = file_inode(iocb->ki_filp); + struct fuse_inode *fi = get_fuse_inode(inode); /* Server side has to advise that it supports parallel dio writes. */ if (!(ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES)) @@ -1346,12 +1347,9 @@ static bool fuse_dio_wr_exclusive_lock(struct kiocb *iocb, struct iov_iter *from if (iocb->ki_flags & IOCB_APPEND) return true; - /* - * Combination of page access and direct-io is difficult, shared locks - * actually introduce a conflict. - */ - if (get_fuse_conn(inode)->direct_io_allow_mmap) - return true; + /* shared locks are not allowed with parallel page cache IO */ + if (test_bit(FUSE_I_CACHE_IO_MODE, &fi->state)) + return false; /* Parallel dio beyond EOF is not supported, at least for now. */ if (fuse_io_past_eof(iocb, from)) @@ -1364,6 +1362,7 @@ static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from, bool *exclusive) { struct inode *inode = file_inode(iocb->ki_filp); + struct fuse_file *ff = iocb->ki_filp->private_data; *exclusive = fuse_dio_wr_exclusive_lock(iocb, from); if (*exclusive) { @@ -1371,10 +1370,14 @@ static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from, } else { inode_lock_shared(inode); /* - * Previous check was without inode lock and might have raced, - * check again. + * New parallal dio allowed only if inode is not in caching + * mode and denies new opens in caching mode. This check + * should be performed only after taking shared inode lock. + * Previous past eof check was without inode lock and might + * have raced, so check it again. */ - if (fuse_io_past_eof(iocb, from)) { + if (fuse_io_past_eof(iocb, from) || + fuse_file_uncached_io_start(inode, ff) != 0) { inode_unlock_shared(inode); inode_lock(inode); *exclusive = true; @@ -1382,11 +1385,16 @@ static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from, } } -static void fuse_dio_unlock(struct inode *inode, bool exclusive) +static void fuse_dio_unlock(struct kiocb *iocb, bool exclusive) { + struct inode *inode = file_inode(iocb->ki_filp); + struct fuse_file *ff = iocb->ki_filp->private_data; + if (exclusive) { inode_unlock(inode); } else { + /* Allow opens in caching mode after last parallel dio end */ + fuse_file_uncached_io_end(inode, ff); inode_unlock_shared(inode); } } @@ -1668,7 +1676,7 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) fuse_write_update_attr(inode, iocb->ki_pos, res); } } - fuse_dio_unlock(inode, exclusive); + fuse_dio_unlock(iocb, exclusive); return res; } @@ -2528,6 +2536,10 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) if (FUSE_IS_DAX(file_inode(file))) return fuse_dax_mmap(file, vma); + /* + * FOPEN_DIRECT_IO handling is special compared to O_DIRECT, + * as does not allow MAP_SHARED mmap without FUSE_DIRECT_IO_ALLOW_MMAP. + */ if (ff->open_flags & FOPEN_DIRECT_IO) { /* * Can't provide the coherency needed for MAP_SHARED @@ -2543,7 +2555,11 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) return generic_file_mmap(file, vma); } - /* First mmap of direct_io file enters caching inode io mode. */ + /* + * First mmap of direct_io file enters caching inode io mode. + * Also waits for parallel dio writers to go into serial mode + * (exclusive instead of shared lock). + */ rc = fuse_file_cached_io_start(file_inode(file), ff); if (rc) return rc; @@ -3317,6 +3333,7 @@ void fuse_init_file_inode(struct inode *inode, unsigned int flags) fi->writectr = 0; fi->iocachectr = 0; init_waitqueue_head(&fi->page_waitq); + init_waitqueue_head(&fi->direct_io_waitq); fi->writepages = RB_ROOT; if (IS_ENABLED(CONFIG_FUSE_DAX)) diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index e2a8eb822013..695e3e993dc2 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -130,6 +130,9 @@ struct fuse_inode { /* Waitq for writepage completion */ wait_queue_head_t page_waitq; + /* waitq for direct-io completion */ + wait_queue_head_t direct_io_waitq; + /* List of writepage requestst (pending or sent) */ struct rb_root writepages; }; @@ -1354,6 +1357,8 @@ int fuse_fileattr_set(struct mnt_idmap *idmap, /* iomode.c */ int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff); +int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff); +void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff); int fuse_file_io_open(struct file *file, struct inode *inode); void fuse_file_io_release(struct fuse_file *ff, struct inode *inode); diff --git a/fs/fuse/iomode.c b/fs/fuse/iomode.c index a1a836b2aacc..ea47c76b9df1 100644 --- a/fs/fuse/iomode.c +++ b/fs/fuse/iomode.c @@ -13,21 +13,37 @@ #include /* - * Start cached io mode, where parallel dio writes are not allowed. + * Return true if need to wait for new opens in caching mode. + */ +static inline bool fuse_is_io_cache_wait(struct fuse_inode *fi) +{ + return READ_ONCE(fi->iocachectr) < 0; +} + +/* + * Start cached io mode. + * + * Blocks new parallel dio writes and waits for the in-progress parallel dio + * writes to complete. */ int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff) { struct fuse_inode *fi = get_fuse_inode(inode); - int err = 0; /* There are no io modes if server does not implement open */ if (!ff->release_args) return 0; spin_lock(&fi->lock); - if (fi->iocachectr < 0) { - err = -ETXTBSY; - goto unlock; + /* + * Setting the bit advises new direct-io writes to use an exclusive + * lock - without it the wait below might be forever. + */ + while (fuse_is_io_cache_wait(fi)) { + set_bit(FUSE_I_CACHE_IO_MODE, &fi->state); + spin_unlock(&fi->lock); + wait_event(fi->direct_io_waitq, !fuse_is_io_cache_wait(fi)); + spin_lock(&fi->lock); } WARN_ON(ff->iomode == IOM_UNCACHED); if (ff->iomode == IOM_NONE) { @@ -36,9 +52,8 @@ int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff) set_bit(FUSE_I_CACHE_IO_MODE, &fi->state); fi->iocachectr++; } -unlock: spin_unlock(&fi->lock); - return err; + return 0; } static void fuse_file_cached_io_end(struct inode *inode, struct fuse_file *ff) @@ -56,7 +71,7 @@ static void fuse_file_cached_io_end(struct inode *inode, struct fuse_file *ff) } /* Start strictly uncached io mode where cache access is not allowed */ -static int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff) +int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff) { struct fuse_inode *fi = get_fuse_inode(inode); int err = 0; @@ -74,7 +89,7 @@ static int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff return err; } -static void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff) +void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff) { struct fuse_inode *fi = get_fuse_inode(inode); @@ -83,6 +98,8 @@ static void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff) WARN_ON(ff->iomode != IOM_UNCACHED); ff->iomode = IOM_NONE; fi->iocachectr++; + if (!fi->iocachectr) + wake_up(&fi->direct_io_waitq); spin_unlock(&fi->lock); } @@ -106,21 +123,16 @@ int fuse_file_io_open(struct file *file, struct inode *inode) ff->open_flags &= ~FOPEN_PARALLEL_DIRECT_WRITES; /* - * First parallel dio open denies caching inode io mode. * First caching file open enters caching inode io mode. * * Note that if user opens a file open with O_DIRECT, but server did * not specify FOPEN_DIRECT_IO, a later fcntl() could remove O_DIRECT, * so we put the inode in caching mode to prevent parallel dio. */ - if (ff->open_flags & FOPEN_DIRECT_IO) { - if (ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES) - err = fuse_file_uncached_io_start(inode, ff); - else - return 0; - } else { - err = fuse_file_cached_io_start(inode, ff); - } + if (ff->open_flags & FOPEN_DIRECT_IO) + return 0; + + err = fuse_file_cached_io_start(inode, ff); if (err) goto fail; -- Gitee From 3b57260d123cabb5accfe3704dbd83ae5430017f Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Thu, 18 Jan 2024 19:46:37 +0800 Subject: [PATCH 0640/2138] anolis: irqchip/loongson-eiointc: Skip handling if there is no pending interrupt ANBZ: #8689 commit 3eece72ded7f ("irqchip/loongson-eiointc: Skip handling if there is no pending interrupt") It is one simple optimization in the interrupt dispatch function eiointc_irq_dispatch(). There are 256 IRQs supported for eiointc on Loongson-3A5000 and Loongson-2K2000 platform, 128 IRQs on Loongson-2K0500 platform, eiointc irq handler reads the bitmap and find pending irqs when irq happens. So there are several consecutive iocsr_read64 operations for the all bits to find all pending irqs. If the pending bitmap is zero, it means that there is no pending irq for the this irq bitmap range, we can skip handling to avoid some useless operations such as clearing hw ISR. Signed-off-by: Bibo Mao Acked-by: Huacai Chen Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- drivers/irqchip/irq-loongson-eiointc.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c index 503870c7c1cb..8b1845a80968 100644 --- a/drivers/irqchip/irq-loongson-eiointc.c +++ b/drivers/irqchip/irq-loongson-eiointc.c @@ -213,6 +213,12 @@ static void eiointc_irq_dispatch(struct irq_desc *desc) for (i = 0; i < eiointc_priv[0]->vec_count / VEC_COUNT_PER_REG; i++) { pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3)); + + /* Skip handling if pending bitmap is zero */ + if (!pending) + continue; + + /* Clear the IRQs */ iocsr_write64(pending, EIOINTC_REG_ISR + (i << 3)); while (pending) { int bit = __ffs(pending); -- Gitee From 3c739b121e01678c2ce1c29615ea7188094bd3ad Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Thu, 18 Jan 2024 19:52:46 +0800 Subject: [PATCH 0641/2138] anolis: irqchip/loongson-eiointc: Remove explicit interrupt affinity restore on resume ANBZ: #8689 commit 83c0708719f7 ("irqchip/loongson-eiointc: Remove explicit interrupt affinity restore on resume") During suspend all CPUs except CPU0 are hot-unpluged and all active interrupts are migrated to CPU0. On resume eiointc_router_init() affines all interrupts to CPU0, so the subsequent explicit interrupt affinity restore is redundant. Remove it. [ tglx: Rewrote changelog ] Signed-off-by: Bibo Mao Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240130082722.2912576-4-maobibo@loongson.cn -------------------------------- During suspend and resume, CPUs except CPU0 can be hot-unpluged and IRQs will be migrated to CPU0. So it is not necessary to restore irq affinity for eiointc irq controller when system resumes. This patch removes this piece of code about irq affinity restoring in function eiointc_resume(). Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- drivers/irqchip/irq-loongson-eiointc.c | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c index 8b1845a80968..fb4f6a1d4318 100644 --- a/drivers/irqchip/irq-loongson-eiointc.c +++ b/drivers/irqchip/irq-loongson-eiointc.c @@ -325,23 +325,7 @@ static int eiointc_suspend(void) static void eiointc_resume(void) { - int i, j; - struct irq_desc *desc; - struct irq_data *irq_data; - eiointc_router_init(0); - - for (i = 0; i < nr_pics; i++) { - for (j = 0; j < eiointc_priv[0]->vec_count; j++) { - desc = irq_resolve_mapping(eiointc_priv[i]->eiointc_domain, j); - if (desc && desc->handle_irq && desc->handle_irq != handle_bad_irq) { - raw_spin_lock(&desc->lock); - irq_data = irq_domain_get_irq_data(eiointc_priv[i]->eiointc_domain, irq_desc_get_irq(desc)); - eiointc_set_irq_affinity(irq_data, irq_data->common->affinity, 0); - raw_spin_unlock(&desc->lock); - } - } - } } static struct syscore_ops eiointc_syscore_ops = { -- Gitee From 67808b61c05178a25643834ef285cf5be852b93b Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Wed, 6 Mar 2024 09:12:13 +0800 Subject: [PATCH 0642/2138] anolis: LoongArch: KVM: Start SW timer only when vcpu is blocking ANBZ: #8689 commit 8bc15d02d5fd ("LoongArch: KVM: Start SW timer only when vcpu is blocking") SW timer is enabled when vcpu thread is scheduled out, and it is to wake up vcpu from blocked queue. If vcpu thread is scheduled out but is not blocked, such as it is preempted by other threads, it is not necessary to enable SW timer. Since vcpu thread is still on running queue if it is preempted and SW timer is only to wake up vcpu on blocking queue, so SW timer is not useful in this situation. This patch enables SW timer only when vcpu is scheduled out and is blocking. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- arch/loongarch/kvm/timer.c | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/arch/loongarch/kvm/timer.c b/arch/loongarch/kvm/timer.c index 111328f60872..b0dafe0611ab 100644 --- a/arch/loongarch/kvm/timer.c +++ b/arch/loongarch/kvm/timer.c @@ -93,7 +93,8 @@ void kvm_restore_timer(struct kvm_vcpu *vcpu) /* * Freeze the soft-timer and sync the guest stable timer with it. */ - hrtimer_cancel(&vcpu->arch.swtimer); + if (kvm_vcpu_is_blocking(vcpu)) + hrtimer_cancel(&vcpu->arch.swtimer); /* * From LoongArch Reference Manual Volume 1 Chapter 7.6.2 @@ -168,26 +169,20 @@ static void _kvm_save_timer(struct kvm_vcpu *vcpu) * Here judge one-shot timer fired by checking whether TVAL is larger * than TCFG */ - if (ticks < cfg) { + if (ticks < cfg) delta = tick_to_ns(vcpu, ticks); - expire = ktime_add_ns(ktime_get(), delta); - vcpu->arch.expire = expire; + else + delta = 0; + + expire = ktime_add_ns(ktime_get(), delta); + vcpu->arch.expire = expire; + if (kvm_vcpu_is_blocking(vcpu)) { /* * HRTIMER_MODE_PINNED is suggested since vcpu may run in * the same physical cpu in next time */ hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED); - } else if (vcpu->stat.generic.blocking) { - /* - * Inject timer interrupt so that halt polling can dectect and exit. - * VCPU is scheduled out already and sleeps in rcuwait queue and - * will not poll pending events again. kvm_queue_irq() is not enough, - * hrtimer swtimer should be used here. - */ - expire = ktime_add_ns(ktime_get(), 10); - vcpu->arch.expire = expire; - hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED); } } -- Gitee From 2cb27bcb4159a37ce08976ba320b44a64b66a6ae Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Wed, 6 Mar 2024 09:12:13 +0800 Subject: [PATCH 0643/2138] anolis: LoongArch: KVM: Do not restart SW timer when it is expired ANBZ: #8689 commit f66228053e42 ("LoongArch: KVM: Do not restart SW timer when it is expired") LoongArch VCPUs have their own separate HW timers. SW timer is to wake up blocked vcpu thread, rather than HW timer emulation. When blocking vcpu scheduled out, SW timer is used to wakeup blocked vcpu thread and injects timer interrupt. It does not care about whether guest timer is in period mode or oneshot mode, and SW timer needs not to be restarted since vcpu has been woken. This patch does not restart SW timer when it is expired. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- arch/loongarch/kvm/timer.c | 20 +------------------- 1 file changed, 1 insertion(+), 19 deletions(-) diff --git a/arch/loongarch/kvm/timer.c b/arch/loongarch/kvm/timer.c index b0dafe0611ab..bcc6b6d063d9 100644 --- a/arch/loongarch/kvm/timer.c +++ b/arch/loongarch/kvm/timer.c @@ -23,24 +23,6 @@ static inline u64 tick_to_ns(struct kvm_vcpu *vcpu, u64 tick) return div_u64(tick * MNSEC_PER_SEC, vcpu->arch.timer_mhz); } -/* - * Push timer forward on timeout. - * Handle an hrtimer event by push the hrtimer forward a period. - */ -static enum hrtimer_restart kvm_count_timeout(struct kvm_vcpu *vcpu) -{ - unsigned long cfg, period; - - /* Add periodic tick to current expire time */ - cfg = kvm_read_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG); - if (cfg & CSR_TCFG_PERIOD) { - period = tick_to_ns(vcpu, cfg & CSR_TCFG_VAL); - hrtimer_add_expires_ns(&vcpu->arch.swtimer, period); - return HRTIMER_RESTART; - } else - return HRTIMER_NORESTART; -} - /* Low level hrtimer wake routine */ enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer) { @@ -50,7 +32,7 @@ enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer) kvm_queue_irq(vcpu, INT_TI); rcuwait_wake_up(&vcpu->wait); - return kvm_count_timeout(vcpu); + return HRTIMER_NORESTART; } /* -- Gitee From 464d00112db817a752ba4ebdbe04a26b5b20beca Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Wed, 6 Mar 2024 09:12:13 +0800 Subject: [PATCH 0644/2138] anolis: LoongArch: KVM: Set reserved bits as zero in CPUCFG ANBZ: #8689 commit aebd3bd586c6 ("LoongArch: KVM: Set reserved bits as zero in CPUCFG") Supported CPUCFG information comes from function _kvm_get_cpucfg_mask(). A bit should be zero if it is reserved by HW or if it is not supported by KVM. Also LoongArch software page table walk feature defined in CPUCFG2_LSPW is supported by KVM, it should be enabled by default. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- arch/loongarch/kvm/vcpu.c | 33 ++++++++++++++++++++++++++------- 1 file changed, 26 insertions(+), 7 deletions(-) diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 36106922b5d7..3a8779065f73 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -304,11 +304,18 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v) return -EINVAL; switch (id) { - case 2: + case LOONGARCH_CPUCFG0: + *v = GENMASK(31, 0); + return 0; + case LOONGARCH_CPUCFG1: + /* CPUCFG1_MSGINT is not supported by KVM */ + *v = GENMASK(25, 0); + return 0; + case LOONGARCH_CPUCFG2: /* CPUCFG2 features unconditionally supported by KVM */ *v = CPUCFG2_FP | CPUCFG2_FPSP | CPUCFG2_FPDP | CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV | - CPUCFG2_LAM; + CPUCFG2_LSPW | CPUCFG2_LAM; /* * For the ISA extensions listed below, if one is supported * by the host, then it is also supported by KVM. @@ -318,14 +325,26 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v) if (cpu_has_lasx) *v |= CPUCFG2_LASX; + return 0; + case LOONGARCH_CPUCFG3: + *v = GENMASK(16, 0); + return 0; + case LOONGARCH_CPUCFG4: + case LOONGARCH_CPUCFG5: + *v = GENMASK(31, 0); + return 0; + case LOONGARCH_CPUCFG16: + *v = GENMASK(16, 0); + return 0; + case LOONGARCH_CPUCFG17 ... LOONGARCH_CPUCFG20: + *v = GENMASK(30, 0); return 0; default: /* - * No restrictions on other valid CPUCFG IDs' values, but - * CPUCFG data is limited to 32 bits as the LoongArch ISA - * manual says (Volume 1, Section 2.2.10.5 "CPUCFG"). + * CPUCFG bits should be zero if reserved by HW or not + * supported by KVM. */ - *v = U32_MAX; + *v = 0; return 0; } } @@ -344,7 +363,7 @@ static int kvm_check_cpucfg(int id, u64 val) return -EINVAL; switch (id) { - case 2: + case LOONGARCH_CPUCFG2: if (!(val & CPUCFG2_LLFTP)) /* Guests must have a constant timer */ return -EINVAL; -- Gitee From cea50fc807f9bdf3e49a2b191c5a3a200334f6cf Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Mon, 25 Mar 2024 14:37:44 +0800 Subject: [PATCH 0645/2138] anolis: LoongArch/smp: Refine some ipi functions on LoongArch platform ANBZ: #8689 It is code refine about ipi handling on LoongArch platform, there are three modifications. 1. Add generic function get_percpu_irq(), replacing some percpu irq functions such as get_ipi_irq()/get_pmc_irq()/get_timer_irq() with get_percpu_irq(). 2. Change definition about parameter action called by function loongson_send_ipi_single() and loongson_send_ipi_mask(), and it is defined as decimal encoding format at ipi sender side. Normal decimal encoding is used rather than binary bitmap encoding for ipi action, ipi hw sender uses decimal encoding code, and ipi receiver will get binary bitmap encoding, the ipi hw will convert it into bitmap in ipi message buffer. 3. Add structure smp_ops on LoongArch platform so that pv ipi can be used later. Signed-off-by: Bibo Mao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- arch/loongarch/include/asm/hardirq.h | 4 ++ arch/loongarch/include/asm/irq.h | 11 +++++- arch/loongarch/include/asm/smp.h | 31 +++++++-------- arch/loongarch/kernel/irq.c | 22 +---------- arch/loongarch/kernel/perf_event.c | 14 +------ arch/loongarch/kernel/smp.c | 58 +++++++++++++++++++--------- arch/loongarch/kernel/time.c | 12 +----- 7 files changed, 72 insertions(+), 80 deletions(-) diff --git a/arch/loongarch/include/asm/hardirq.h b/arch/loongarch/include/asm/hardirq.h index 0ef3b18f8980..9f0038e19c7f 100644 --- a/arch/loongarch/include/asm/hardirq.h +++ b/arch/loongarch/include/asm/hardirq.h @@ -12,6 +12,10 @@ extern void ack_bad_irq(unsigned int irq); #define ack_bad_irq ack_bad_irq +enum ipi_msg_type { + IPI_RESCHEDULE, + IPI_CALL_FUNCTION, +}; #define NR_IPI 2 typedef struct { diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h index ed8e72db0dba..85a3315597b6 100644 --- a/arch/loongarch/include/asm/irq.h +++ b/arch/loongarch/include/asm/irq.h @@ -118,9 +118,18 @@ extern struct fwnode_handle *liointc_handle; extern struct fwnode_handle *pch_lpc_handle; extern struct fwnode_handle *pch_pic_handle[MAX_IO_PICS]; -extern irqreturn_t loongson_ipi_interrupt(int irq, void *dev); extern void fixup_irqs(void); +static inline int get_percpu_irq(int vector) +{ + struct irq_domain *d; + + d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY); + if (d) + return irq_create_mapping(d, vector); + + return -EINVAL; +} #include #endif /* _ASM_IRQ_H */ diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h index f81e5f01d619..75d30529748c 100644 --- a/arch/loongarch/include/asm/smp.h +++ b/arch/loongarch/include/asm/smp.h @@ -12,6 +12,13 @@ #include #include +struct smp_ops { + void (*init_ipi)(void); + void (*send_ipi_mask)(const struct cpumask *mask, unsigned int action); + void (*send_ipi_single)(int cpu, unsigned int action); +}; + +extern struct smp_ops smp_ops; extern int smp_num_siblings; extern int num_processors; extern int disabled_cpus; @@ -24,8 +31,6 @@ void loongson_prepare_cpus(unsigned int max_cpus); void loongson_boot_secondary(int cpu, struct task_struct *idle); void loongson_init_secondary(void); void loongson_smp_finish(void); -void loongson_send_ipi_single(int cpu, unsigned int action); -void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action); #ifdef CONFIG_HOTPLUG_CPU int loongson_cpu_disable(void); void loongson_cpu_die(unsigned int cpu); @@ -59,9 +64,12 @@ extern int __cpu_logical_map[NR_CPUS]; #define cpu_physical_id(cpu) cpu_logical_map(cpu) -#define SMP_BOOT_CPU 0x1 -#define SMP_RESCHEDULE 0x2 -#define SMP_CALL_FUNCTION 0x4 +#define ACTION_BOOT_CPU 0 +#define ACTION_RESCHEDULE 1 +#define ACTION_CALL_FUNCTION 2 +#define SMP_BOOT_CPU BIT(ACTION_BOOT_CPU) +#define SMP_RESCHEDULE BIT(ACTION_RESCHEDULE) +#define SMP_CALL_FUNCTION BIT(ACTION_CALL_FUNCTION) struct secondary_data { unsigned long stack; @@ -71,7 +79,8 @@ extern struct secondary_data cpuboot_data; extern asmlinkage void smpboot_entry(void); extern asmlinkage void start_secondary(void); - +extern void arch_send_call_function_single_ipi(int cpu); +extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); extern void calculate_cpu_foreign_map(void); /* @@ -79,16 +88,6 @@ extern void calculate_cpu_foreign_map(void); */ extern void show_ipi_list(struct seq_file *p, int prec); -static inline void arch_send_call_function_single_ipi(int cpu) -{ - loongson_send_ipi_single(cpu, SMP_CALL_FUNCTION); -} - -static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) -{ - loongson_send_ipi_mask(mask, SMP_CALL_FUNCTION); -} - #ifdef CONFIG_HOTPLUG_CPU static inline int __cpu_disable(void) { diff --git a/arch/loongarch/kernel/irq.c b/arch/loongarch/kernel/irq.c index ebcdb573104a..dd447be23324 100644 --- a/arch/loongarch/kernel/irq.c +++ b/arch/loongarch/kernel/irq.c @@ -86,16 +86,6 @@ static void __init init_vec_parent_group(void) acpi_table_parse(ACPI_SIG_MCFG, early_pci_mcfg_parse); } -static int __init get_ipi_irq(void) -{ - struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY); - - if (d) - return irq_create_mapping(d, INT_IPI); - - return -EINVAL; -} - #ifdef CONFIG_HOTPLUG_CPU static void handle_irq_affinity(void) { @@ -135,10 +125,6 @@ void fixup_irqs(void) void __init init_IRQ(void) { int i, ret; -#ifdef CONFIG_SMP - int r, ipi_irq; - static int ipi_dummy_dev; -#endif unsigned int order = get_order(IRQ_STACK_SIZE); struct page *page; @@ -154,13 +140,7 @@ void __init init_IRQ(void) irqchip_init(); } #ifdef CONFIG_SMP - ipi_irq = get_ipi_irq(); - if (ipi_irq < 0) - panic("IPI IRQ mapping failed\n"); - irq_set_percpu_devid(ipi_irq); - r = request_percpu_irq(ipi_irq, loongson_ipi_interrupt, "IPI", &ipi_dummy_dev); - if (r < 0) - panic("IPI IRQ request failed\n"); + smp_ops.init_ipi(); #endif for_each_possible_cpu(i) { diff --git a/arch/loongarch/kernel/perf_event.c b/arch/loongarch/kernel/perf_event.c index cac7cba81b65..f86a4b838dd7 100644 --- a/arch/loongarch/kernel/perf_event.c +++ b/arch/loongarch/kernel/perf_event.c @@ -456,16 +456,6 @@ static void loongarch_pmu_disable(struct pmu *pmu) static DEFINE_MUTEX(pmu_reserve_mutex); static atomic_t active_events = ATOMIC_INIT(0); -static int get_pmc_irq(void) -{ - struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY); - - if (d) - return irq_create_mapping(d, INT_PCOV); - - return -EINVAL; -} - static void reset_counters(void *arg); static int __hw_perf_event_init(struct perf_event *event); @@ -473,7 +463,7 @@ static void hw_perf_event_destroy(struct perf_event *event) { if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) { on_each_cpu(reset_counters, NULL, 1); - free_irq(get_pmc_irq(), &loongarch_pmu); + free_irq(get_percpu_irq(INT_PCOV), &loongarch_pmu); mutex_unlock(&pmu_reserve_mutex); } } @@ -562,7 +552,7 @@ static int loongarch_pmu_event_init(struct perf_event *event) if (event->cpu >= 0 && !cpu_online(event->cpu)) return -ENODEV; - irq = get_pmc_irq(); + irq = get_percpu_irq(INT_PCOV); flags = IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_THREAD | IRQF_NO_SUSPEND | IRQF_SHARED; if (!atomic_inc_not_zero(&active_events)) { mutex_lock(&pmu_reserve_mutex); diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index b6cdde7aad69..da42a8e6ef3d 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -67,11 +67,6 @@ static cpumask_t cpu_core_setup_map; struct secondary_data cpuboot_data; static DEFINE_PER_CPU(int, cpu_state); -enum ipi_msg_type { - IPI_RESCHEDULE, - IPI_CALL_FUNCTION, -}; - static const char *ipi_types[NR_IPI] __tracepoint_string = { [IPI_RESCHEDULE] = "Rescheduling interrupts", [IPI_CALL_FUNCTION] = "Function call interrupts", @@ -191,24 +186,19 @@ static u32 ipi_read_clear(int cpu) static void ipi_write_action(int cpu, u32 action) { - unsigned int irq = 0; - - while ((irq = ffs(action))) { - uint32_t val = IOCSR_IPI_SEND_BLOCKING; + uint32_t val; - val |= (irq - 1); - val |= (cpu << IOCSR_IPI_SEND_CPU_SHIFT); - iocsr_write32(val, LOONGARCH_IOCSR_IPI_SEND); - action &= ~BIT(irq - 1); - } + val = IOCSR_IPI_SEND_BLOCKING | action; + val |= (cpu << IOCSR_IPI_SEND_CPU_SHIFT); + iocsr_write32(val, LOONGARCH_IOCSR_IPI_SEND); } -void loongson_send_ipi_single(int cpu, unsigned int action) +static void loongson_send_ipi_single(int cpu, unsigned int action) { ipi_write_action(cpu_logical_map(cpu), (u32)action); } -void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action) +static void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action) { unsigned int i; @@ -216,6 +206,16 @@ void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action) ipi_write_action(cpu_logical_map(i), (u32)action); } +void arch_send_call_function_single_ipi(int cpu) +{ + smp_ops.send_ipi_single(cpu, ACTION_CALL_FUNCTION); +} + +void arch_send_call_function_ipi_mask(const struct cpumask *mask) +{ + smp_ops.send_ipi_mask(mask, ACTION_CALL_FUNCTION); +} + /* * This function sends a 'reschedule' IPI to another CPU. * it goes straight through and wastes no time serializing @@ -223,11 +223,11 @@ void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action) */ void arch_smp_send_reschedule(int cpu) { - loongson_send_ipi_single(cpu, SMP_RESCHEDULE); + smp_ops.send_ipi_single(cpu, ACTION_RESCHEDULE); } EXPORT_SYMBOL_GPL(arch_smp_send_reschedule); -irqreturn_t loongson_ipi_interrupt(int irq, void *dev) +static irqreturn_t loongson_ipi_interrupt(int irq, void *dev) { unsigned int action; unsigned int cpu = smp_processor_id(); @@ -247,6 +247,26 @@ irqreturn_t loongson_ipi_interrupt(int irq, void *dev) return IRQ_HANDLED; } +static void loongson_init_ipi(void) +{ + int r, ipi_irq; + + ipi_irq = get_percpu_irq(INT_IPI); + if (ipi_irq < 0) + panic("IPI IRQ mapping failed\n"); + + irq_set_percpu_devid(ipi_irq); + r = request_percpu_irq(ipi_irq, loongson_ipi_interrupt, "IPI", &irq_stat); + if (r < 0) + panic("IPI IRQ request failed\n"); +} + +struct smp_ops smp_ops = { + .init_ipi = loongson_init_ipi, + .send_ipi_single = loongson_send_ipi_single, + .send_ipi_mask = loongson_send_ipi_mask, +}; + static void __init fdt_smp_setup(void) { #ifdef CONFIG_OF @@ -324,7 +344,7 @@ void loongson_boot_secondary(int cpu, struct task_struct *idle) csr_mail_send(entry, cpu_logical_map(cpu), 0); - loongson_send_ipi_single(cpu, SMP_BOOT_CPU); + loongson_send_ipi_single(cpu, ACTION_BOOT_CPU); } /* diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c index e7015f7b70e3..fd5354f9be7c 100644 --- a/arch/loongarch/kernel/time.c +++ b/arch/loongarch/kernel/time.c @@ -123,16 +123,6 @@ void sync_counter(void) csr_write64(init_offset, LOONGARCH_CSR_CNTC); } -static int get_timer_irq(void) -{ - struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY); - - if (d) - return irq_create_mapping(d, INT_TI); - - return -EINVAL; -} - int constant_clockevent_init(void) { unsigned int cpu = smp_processor_id(); @@ -142,7 +132,7 @@ int constant_clockevent_init(void) static int irq = 0, timer_irq_installed = 0; if (!timer_irq_installed) { - irq = get_timer_irq(); + irq = get_percpu_irq(INT_TI); if (irq < 0) pr_err("Failed to map irq %d (timer)\n", irq); } -- Gitee From 2f0d2463969a086e9a5bc036f47d4662e6dc453a Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Fri, 19 Jan 2024 09:37:28 +0800 Subject: [PATCH 0646/2138] anolis: LoongArch: KVM: Add hypercall instruction emulation support ANBZ: #8689 On LoongArch system, there is hypercall instruction special for virtualization. When system executes this instruction on host side, there is illegal instruction exception reported, however it will trap into host when it is executed in VM mode. When hypercall is emulated, A0 register is set with value KVM_HCALL_INVALID_CODE, rather than inject EXCCODE_INE invalid instruction exception. So VM can continue to executing the next code. Signed-off-by: Bibo Mao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- arch/loongarch/include/asm/Kbuild | 1 - arch/loongarch/include/asm/kvm_para.h | 26 ++++++++++++++++++++++++++ arch/loongarch/include/uapi/asm/Kbuild | 2 -- arch/loongarch/kvm/exit.c | 10 ++++++++++ 4 files changed, 36 insertions(+), 3 deletions(-) create mode 100644 arch/loongarch/include/asm/kvm_para.h delete mode 100644 arch/loongarch/include/uapi/asm/Kbuild diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild index dede0b422cfb..27f66930ab6a 100644 --- a/arch/loongarch/include/asm/Kbuild +++ b/arch/loongarch/include/asm/Kbuild @@ -24,4 +24,3 @@ generic-y += poll.h generic-y += param.h generic-y += posix_types.h generic-y += resource.h -generic-y += kvm_para.h diff --git a/arch/loongarch/include/asm/kvm_para.h b/arch/loongarch/include/asm/kvm_para.h new file mode 100644 index 000000000000..d48f993ae206 --- /dev/null +++ b/arch/loongarch/include/asm/kvm_para.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_LOONGARCH_KVM_PARA_H +#define _ASM_LOONGARCH_KVM_PARA_H + +/* + * LoongArch hypercall return code + */ +#define KVM_HCALL_STATUS_SUCCESS 0 +#define KVM_HCALL_INVALID_CODE -1UL +#define KVM_HCALL_INVALID_PARAMETER -2UL + +static inline unsigned int kvm_arch_para_features(void) +{ + return 0; +} + +static inline unsigned int kvm_arch_para_hints(void) +{ + return 0; +} + +static inline bool kvm_check_and_clear_guest_paused(void) +{ + return false; +} +#endif /* _ASM_LOONGARCH_KVM_PARA_H */ diff --git a/arch/loongarch/include/uapi/asm/Kbuild b/arch/loongarch/include/uapi/asm/Kbuild deleted file mode 100644 index 4aa680ca2e5f..000000000000 --- a/arch/loongarch/include/uapi/asm/Kbuild +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -generic-y += kvm_para.h diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index ed1d89d53e2e..923bbca9bd22 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -685,6 +685,15 @@ static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu) return RESUME_GUEST; } +static int kvm_handle_hypercall(struct kvm_vcpu *vcpu) +{ + update_pc(&vcpu->arch); + + /* Treat it as noop intruction, only set return value */ + vcpu->arch.gprs[LOONGARCH_GPR_A0] = KVM_HCALL_INVALID_CODE; + return RESUME_GUEST; +} + /* * LoongArch KVM callback handling for unimplemented guest exiting */ @@ -716,6 +725,7 @@ static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = { [EXCCODE_LSXDIS] = kvm_handle_lsx_disabled, [EXCCODE_LASXDIS] = kvm_handle_lasx_disabled, [EXCCODE_GSPR] = kvm_handle_gspr, + [EXCCODE_HVC] = kvm_handle_hypercall, }; int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault) -- Gitee From bac474cf89457ea79462e5d6995478e02a440142 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Wed, 6 Mar 2024 10:23:16 +0800 Subject: [PATCH 0647/2138] anolis: LoongArch: KVM: Add cpucfg area for kvm hypervisor ANBZ: #8689 Instruction cpucfg can be used to get processor features. And there is trap exception when it is executed in VM mode, and also it is to provide cpu features to VM. On real hardware cpucfg area 0 - 20 is used. Here one specified area 0x40000000 -- 0x400000ff is used for KVM hypervisor to privide PV features, and the area can be extended for other hypervisors in future. This area will never be used for real HW, it is only used by software. Signed-off-by: Bibo Mao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- arch/loongarch/include/asm/inst.h | 1 + arch/loongarch/include/asm/loongarch.h | 10 +++++ arch/loongarch/kvm/exit.c | 59 +++++++++++++++++++------- 3 files changed, 54 insertions(+), 16 deletions(-) diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h index 77a9fcf8e879..a43b6a1fe2f1 100644 --- a/arch/loongarch/include/asm/inst.h +++ b/arch/loongarch/include/asm/inst.h @@ -65,6 +65,7 @@ enum reg2_op { revbd_op = 0x0f, revh2w_op = 0x10, revhd_op = 0x11, + cpucfg_op = 0x1b, iocsrrdb_op = 0x19200, iocsrrdh_op = 0x19201, iocsrrdw_op = 0x19202, diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h index a1774ba0167b..c471f471ae58 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -158,6 +158,16 @@ #define CPUCFG48_VFPU_CG BIT(2) #define CPUCFG48_RAM_CG BIT(3) +/* + * cpucfg index area: 0x40000000 -- 0x400000ff + * SW emulation for KVM hypervirsor + */ +#define CPUCFG_KVM_BASE 0x40000000UL +#define CPUCFG_KVM_SIZE 0x100 +#define CPUCFG_KVM_SIG CPUCFG_KVM_BASE +#define KVM_SIGNATURE "KVM\0" +#define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4) + #ifndef __ASSEMBLY__ /* CSR */ diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 923bbca9bd22..a8d3b652d3ea 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -206,10 +206,50 @@ int kvm_emu_idle(struct kvm_vcpu *vcpu) return EMULATE_DONE; } -static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu) +static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst) { int rd, rj; unsigned int index; + unsigned long plv; + + rd = inst.reg2_format.rd; + rj = inst.reg2_format.rj; + ++vcpu->stat.cpucfg_exits; + index = vcpu->arch.gprs[rj]; + + /* + * By LoongArch Reference Manual 2.2.10.5 + * Return value is 0 for undefined cpucfg index + * + * Disable preemption since hw gcsr is accessed + */ + preempt_disable(); + plv = kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD) >> CSR_CRMD_PLV_SHIFT; + switch (index) { + case 0 ... (KVM_MAX_CPUCFG_REGS - 1): + vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index]; + break; + case CPUCFG_KVM_SIG: + /* + * Cpucfg emulation between 0x40000000 -- 0x400000ff + * Return value with 0 if executed in user mode + */ + if ((plv & CSR_CRMD_PLV) == PLV_KERN) + vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE; + else + vcpu->arch.gprs[rd] = 0; + break; + default: + vcpu->arch.gprs[rd] = 0; + break; + } + + preempt_enable(); + return EMULATE_DONE; +} + +static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu) +{ unsigned long curr_pc; larch_inst inst; enum emulation_result er = EMULATE_DONE; @@ -224,21 +264,8 @@ static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu) er = EMULATE_FAIL; switch (((inst.word >> 24) & 0xff)) { case 0x0: /* CPUCFG GSPR */ - if (inst.reg2_format.opcode == 0x1B) { - rd = inst.reg2_format.rd; - rj = inst.reg2_format.rj; - ++vcpu->stat.cpucfg_exits; - index = vcpu->arch.gprs[rj]; - er = EMULATE_DONE; - /* - * By LoongArch Reference Manual 2.2.10.5 - * return value is 0 for undefined cpucfg index - */ - if (index < KVM_MAX_CPUCFG_REGS) - vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index]; - else - vcpu->arch.gprs[rd] = 0; - } + if (inst.reg2_format.opcode == cpucfg_op) + er = kvm_emu_cpucfg(vcpu, inst); break; case 0x4: /* CSR{RD,WR,XCHG} GSPR */ er = kvm_handle_csr(vcpu, inst); -- Gitee From ff3e031e142ce80535bda5609dbe2cb8f1da90f5 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Fri, 5 Jan 2024 16:20:34 +0800 Subject: [PATCH 0648/2138] anolis: LoongArch: KVM: Add vcpu search support from physical cpuid ANBZ: #8689 Physical cpuid is used for interrupt routing for irqchips such as ipi/msi/extioi interrupt controller. And physical cpuid is stored at CSR register LOONGARCH_CSR_CPUID, it can not be changed once vcpu is created and physical cpuid of two vcpus cannot be the same. Different irqchips have different size declaration about physical cpuid, max cpuid value for CSR LOONGARCH_CSR_CPUID on 3A5000 is 512, max cpuid supported by IPI hardware is 1024, 256 for extioi irqchip, and 65536 for MSI irqchip. The smallest value from all interrupt controllers is selected now, and the max cpuid size is defines as 256 by KVM which comes from extioi irqchip. Signed-off-by: Bibo Mao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- arch/loongarch/include/asm/kvm_host.h | 26 ++++++++ arch/loongarch/include/asm/kvm_vcpu.h | 1 + arch/loongarch/kvm/vcpu.c | 93 ++++++++++++++++++++++++++- arch/loongarch/kvm/vm.c | 11 ++++ 4 files changed, 130 insertions(+), 1 deletion(-) diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index 5bdb34b2c5d6..e5ba021679f4 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -64,6 +64,30 @@ struct kvm_world_switch { #define MAX_PGTABLE_LEVELS 4 +/* + * Physical cpu id is used for interrupt routing, there are different + * definitions about physical cpuid on different hardwares. + * For LOONGARCH_CSR_CPUID register, max cpuid size if 512 + * For IPI HW, max dest CPUID size 1024 + * For extioi interrupt controller, max dest CPUID size is 256 + * For MSI interrupt controller, max supported CPUID size is 65536 + * + * Currently max CPUID is defined as 256 for KVM hypervisor, in future + * it will be expanded to 4096, including 16 packages at most. And every + * package supports at most 256 vcpus + */ +#define KVM_MAX_PHYID 256 + +struct kvm_phyid_info { + struct kvm_vcpu *vcpu; + bool enabled; +}; + +struct kvm_phyid_map { + int max_phyid; + struct kvm_phyid_info phys_map[KVM_MAX_PHYID]; +}; + struct kvm_arch { /* Guest physical mm */ kvm_pte_t *pgd; @@ -71,6 +95,8 @@ struct kvm_arch { unsigned long invalid_ptes[MAX_PGTABLE_LEVELS]; unsigned int pte_shifts[MAX_PGTABLE_LEVELS]; unsigned int root_level; + spinlock_t phyid_map_lock; + struct kvm_phyid_map *phyid_map; s64 time_offset; struct kvm_context __percpu *vmcs; diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h index 0cb4fdb8a9b5..9f53950959da 100644 --- a/arch/loongarch/include/asm/kvm_vcpu.h +++ b/arch/loongarch/include/asm/kvm_vcpu.h @@ -81,6 +81,7 @@ void kvm_save_timer(struct kvm_vcpu *vcpu); void kvm_restore_timer(struct kvm_vcpu *vcpu); int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq); +struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid); /* * Loongarch KVM guest interrupt handling diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 3a8779065f73..b633fd28b8db 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -274,6 +274,95 @@ static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val) return 0; } +static inline int kvm_set_cpuid(struct kvm_vcpu *vcpu, u64 val) +{ + int cpuid; + struct loongarch_csrs *csr = vcpu->arch.csr; + struct kvm_phyid_map *map; + + if (val >= KVM_MAX_PHYID) + return -EINVAL; + + cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT); + map = vcpu->kvm->arch.phyid_map; + spin_lock(&vcpu->kvm->arch.phyid_map_lock); + if (map->phys_map[cpuid].enabled) { + /* + * Cpuid is already set before + * Forbid changing different cpuid at runtime + */ + if (cpuid != val) { + /* + * Cpuid 0 is initial value for vcpu, maybe invalid + * unset value for vcpu + */ + if (cpuid) { + spin_unlock(&vcpu->kvm->arch.phyid_map_lock); + return -EINVAL; + } + } else { + /* Discard duplicated cpuid set */ + spin_unlock(&vcpu->kvm->arch.phyid_map_lock); + return 0; + } + } + + if (map->phys_map[val].enabled) { + /* + * New cpuid is already set with other vcpu + * Forbid sharing the same cpuid between different vcpus + */ + if (map->phys_map[val].vcpu != vcpu) { + spin_unlock(&vcpu->kvm->arch.phyid_map_lock); + return -EINVAL; + } + + /* Discard duplicated cpuid set operation*/ + spin_unlock(&vcpu->kvm->arch.phyid_map_lock); + return 0; + } + + kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, val); + map->phys_map[val].enabled = true; + map->phys_map[val].vcpu = vcpu; + if (map->max_phyid < val) + map->max_phyid = val; + spin_unlock(&vcpu->kvm->arch.phyid_map_lock); + return 0; +} + +struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid) +{ + struct kvm_phyid_map *map; + + if (cpuid >= KVM_MAX_PHYID) + return NULL; + + map = kvm->arch.phyid_map; + if (map->phys_map[cpuid].enabled) + return map->phys_map[cpuid].vcpu; + + return NULL; +} + +static inline void kvm_drop_cpuid(struct kvm_vcpu *vcpu) +{ + int cpuid; + struct loongarch_csrs *csr = vcpu->arch.csr; + struct kvm_phyid_map *map; + + map = vcpu->kvm->arch.phyid_map; + cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT); + if (cpuid >= KVM_MAX_PHYID) + return; + + if (map->phys_map[cpuid].enabled) { + map->phys_map[cpuid].vcpu = NULL; + map->phys_map[cpuid].enabled = false; + kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, 0); + } +} + static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val) { int ret = 0, gintc; @@ -291,7 +380,8 @@ static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val) kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc); return ret; - } + } else if (id == LOONGARCH_CSR_CPUID) + return kvm_set_cpuid(vcpu, val); kvm_write_sw_gcsr(csr, id, val); @@ -943,6 +1033,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) hrtimer_cancel(&vcpu->arch.swtimer); kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); kfree(vcpu->arch.csr); + kvm_drop_cpuid(vcpu); /* * If the vCPU is freed and reused as another vCPU, we don't want the diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c index 0a37f6fa8f2d..6006a28653ad 100644 --- a/arch/loongarch/kvm/vm.c +++ b/arch/loongarch/kvm/vm.c @@ -30,6 +30,14 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) if (!kvm->arch.pgd) return -ENOMEM; + kvm->arch.phyid_map = kvzalloc(sizeof(struct kvm_phyid_map), + GFP_KERNEL_ACCOUNT); + if (!kvm->arch.phyid_map) { + free_page((unsigned long)kvm->arch.pgd); + kvm->arch.pgd = NULL; + return -ENOMEM; + } + kvm_init_vmcs(kvm); kvm->arch.gpa_size = BIT(cpu_vabits - 1); kvm->arch.root_level = CONFIG_PGTABLE_LEVELS - 1; @@ -44,6 +52,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) for (i = 0; i <= kvm->arch.root_level; i++) kvm->arch.pte_shifts[i] = PAGE_SHIFT + i * (PAGE_SHIFT - 3); + spin_lock_init(&kvm->arch.phyid_map_lock); return 0; } @@ -51,7 +60,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm) { kvm_destroy_vcpus(kvm); free_page((unsigned long)kvm->arch.pgd); + kvfree(kvm->arch.phyid_map); kvm->arch.pgd = NULL; + kvm->arch.phyid_map = NULL; } int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) -- Gitee From 9e8a56d78290b502eff83f43fb8ad8ffeed2f98f Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Fri, 1 Mar 2024 18:17:29 +0800 Subject: [PATCH 0649/2138] anolis: LoongArch: KVM: Add pv ipi support on kvm side ANBZ: #8689 On LoongArch system, ipi hw uses iocsr registers, there is one iocsr register access on ipi sending, and two iocsr access on ipi receiving which is ipi interrupt handler. On VM mode all iocsr accessing will cause VM to trap into hypervisor. So with one ipi hw notification there will be three times of trap. PV ipi is added for VM, hypercall instruction is used for ipi sender, and hypervisor will inject SWI to destination vcpu. During SWI interrupt handler, only estat CSR register is written to clear irq. Estat CSR register access will not trap into hypervisor. So with pv ipi supported, there is one trap with pv ipi sender, and no trap with ipi receiver, there is only one trap with ipi notification. Also this patch adds ipi multicast support, the method is similar with x86. With ipi multicast support, ipi notification can be sent to at most 128 vcpus at one time. It reduces trap times into hypervisor greatly. Signed-off-by: Bibo Mao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- arch/loongarch/include/asm/kvm_host.h | 1 + arch/loongarch/include/asm/kvm_para.h | 130 +++++++++++++++++++++++++ arch/loongarch/include/asm/loongarch.h | 1 + arch/loongarch/kvm/exit.c | 76 ++++++++++++++- arch/loongarch/kvm/vcpu.c | 1 + 5 files changed, 207 insertions(+), 2 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index e5ba021679f4..c31619ae1fb4 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -43,6 +43,7 @@ struct kvm_vcpu_stat { u64 idle_exits; u64 cpucfg_exits; u64 signal_exits; + u64 hypercall_exits; }; #define KVM_MEM_HUGEPAGE_CAPABLE (1UL << 0) diff --git a/arch/loongarch/include/asm/kvm_para.h b/arch/loongarch/include/asm/kvm_para.h index d48f993ae206..a82bffbbf8a1 100644 --- a/arch/loongarch/include/asm/kvm_para.h +++ b/arch/loongarch/include/asm/kvm_para.h @@ -2,6 +2,16 @@ #ifndef _ASM_LOONGARCH_KVM_PARA_H #define _ASM_LOONGARCH_KVM_PARA_H +/* + * Hypercall code field + */ +#define HYPERVISOR_KVM 1 +#define HYPERVISOR_VENDOR_SHIFT 8 +#define HYPERCALL_CODE(vendor, code) ((vendor << HYPERVISOR_VENDOR_SHIFT) + code) +#define KVM_HCALL_CODE_PV_SERVICE 0 +#define KVM_HCALL_PV_SERVICE HYPERCALL_CODE(HYPERVISOR_KVM, KVM_HCALL_CODE_PV_SERVICE) +#define KVM_HCALL_FUNC_PV_IPI 1 + /* * LoongArch hypercall return code */ @@ -9,6 +19,126 @@ #define KVM_HCALL_INVALID_CODE -1UL #define KVM_HCALL_INVALID_PARAMETER -2UL +/* + * Hypercall interface for KVM hypervisor + * + * a0: function identifier + * a1-a6: args + * Return value will be placed in v0. + * Up to 6 arguments are passed in a1, a2, a3, a4, a5, a6. + */ +static __always_inline long kvm_hypercall(u64 fid) +{ + register long ret asm("v0"); + register unsigned long fun asm("a0") = fid; + + __asm__ __volatile__( + "hvcl "__stringify(KVM_HCALL_PV_SERVICE) + : "=r" (ret) + : "r" (fun) + : "memory" + ); + + return ret; +} + +static __always_inline long kvm_hypercall1(u64 fid, unsigned long arg0) +{ + register long ret asm("v0"); + register unsigned long fun asm("a0") = fid; + register unsigned long a1 asm("a1") = arg0; + + __asm__ __volatile__( + "hvcl "__stringify(KVM_HCALL_PV_SERVICE) + : "=r" (ret) + : "r" (fun), "r" (a1) + : "memory" + ); + + return ret; +} + +static __always_inline long kvm_hypercall2(u64 fid, + unsigned long arg0, unsigned long arg1) +{ + register long ret asm("v0"); + register unsigned long fun asm("a0") = fid; + register unsigned long a1 asm("a1") = arg0; + register unsigned long a2 asm("a2") = arg1; + + __asm__ __volatile__( + "hvcl "__stringify(KVM_HCALL_PV_SERVICE) + : "=r" (ret) + : "r" (fun), "r" (a1), "r" (a2) + : "memory" + ); + + return ret; +} + +static __always_inline long kvm_hypercall3(u64 fid, + unsigned long arg0, unsigned long arg1, unsigned long arg2) +{ + register long ret asm("v0"); + register unsigned long fun asm("a0") = fid; + register unsigned long a1 asm("a1") = arg0; + register unsigned long a2 asm("a2") = arg1; + register unsigned long a3 asm("a3") = arg2; + + __asm__ __volatile__( + "hvcl "__stringify(KVM_HCALL_PV_SERVICE) + : "=r" (ret) + : "r" (fun), "r" (a1), "r" (a2), "r" (a3) + : "memory" + ); + + return ret; +} + +static __always_inline long kvm_hypercall4(u64 fid, + unsigned long arg0, unsigned long arg1, unsigned long arg2, + unsigned long arg3) +{ + register long ret asm("v0"); + register unsigned long fun asm("a0") = fid; + register unsigned long a1 asm("a1") = arg0; + register unsigned long a2 asm("a2") = arg1; + register unsigned long a3 asm("a3") = arg2; + register unsigned long a4 asm("a4") = arg3; + + __asm__ __volatile__( + "hvcl "__stringify(KVM_HCALL_PV_SERVICE) + : "=r" (ret) + : "r"(fun), "r" (a1), "r" (a2), "r" (a3), "r" (a4) + : "memory" + ); + + return ret; +} + +static __always_inline long kvm_hypercall5(u64 fid, + unsigned long arg0, unsigned long arg1, unsigned long arg2, + unsigned long arg3, unsigned long arg4) +{ + register long ret asm("v0"); + register unsigned long fun asm("a0") = fid; + register unsigned long a1 asm("a1") = arg0; + register unsigned long a2 asm("a2") = arg1; + register unsigned long a3 asm("a3") = arg2; + register unsigned long a4 asm("a4") = arg3; + register unsigned long a5 asm("a5") = arg4; + + __asm__ __volatile__( + "hvcl "__stringify(KVM_HCALL_PV_SERVICE) + : "=r" (ret) + : "r"(fun), "r" (a1), "r" (a2), "r" (a3), "r" (a4), "r" (a5) + : "memory" + ); + + return ret; +} + + static inline unsigned int kvm_arch_para_features(void) { return 0; diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h index c471f471ae58..35e7bd31fdc7 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -167,6 +167,7 @@ #define CPUCFG_KVM_SIG CPUCFG_KVM_BASE #define KVM_SIGNATURE "KVM\0" #define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4) +#define KVM_FEATURE_PV_IPI BIT(1) #ifndef __ASSEMBLY__ diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index a8d3b652d3ea..933879ad0ddc 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -239,6 +239,12 @@ static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst) else vcpu->arch.gprs[rd] = 0; break; + case CPUCFG_KVM_FEATURE: + if ((plv & CSR_CRMD_PLV) == PLV_KERN) + vcpu->arch.gprs[rd] = KVM_FEATURE_PV_IPI; + else + vcpu->arch.gprs[rd] = 0; + break; default: vcpu->arch.gprs[rd] = 0; break; @@ -712,12 +718,78 @@ static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu) return RESUME_GUEST; } +static int kvm_pv_send_ipi(struct kvm_vcpu *vcpu) +{ + unsigned long ipi_bitmap; + unsigned int min, cpu, i; + struct kvm_vcpu *dest; + + min = vcpu->arch.gprs[LOONGARCH_GPR_A3]; + for (i = 0; i < 2; i++, min += BITS_PER_LONG) { + ipi_bitmap = vcpu->arch.gprs[LOONGARCH_GPR_A1 + i]; + if (!ipi_bitmap) + continue; + + cpu = find_first_bit((void *)&ipi_bitmap, BITS_PER_LONG); + while (cpu < BITS_PER_LONG) { + dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min); + cpu = find_next_bit((void *)&ipi_bitmap, BITS_PER_LONG, + cpu + 1); + if (!dest) + continue; + + /* + * Send SWI0 to dest vcpu to emulate IPI interrupt + */ + kvm_queue_irq(dest, INT_SWI0); + kvm_vcpu_kick(dest); + } + } + + return 0; +} + +/* + * hypercall emulation always return to guest, Caller should check retval. + */ +static void kvm_handle_pv_service(struct kvm_vcpu *vcpu) +{ + unsigned long func = vcpu->arch.gprs[LOONGARCH_GPR_A0]; + long ret; + + switch (func) { + case KVM_HCALL_FUNC_PV_IPI: + kvm_pv_send_ipi(vcpu); + ret = KVM_HCALL_STATUS_SUCCESS; + break; + default: + ret = KVM_HCALL_INVALID_CODE; + break; + }; + + vcpu->arch.gprs[LOONGARCH_GPR_A0] = ret; +} + static int kvm_handle_hypercall(struct kvm_vcpu *vcpu) { + larch_inst inst; + unsigned int code; + + inst.word = vcpu->arch.badi; + code = inst.reg0i15_format.immediate; update_pc(&vcpu->arch); - /* Treat it as noop intruction, only set return value */ - vcpu->arch.gprs[LOONGARCH_GPR_A0] = KVM_HCALL_INVALID_CODE; + switch (code) { + case KVM_HCALL_PV_SERVICE: + vcpu->stat.hypercall_exits++; + kvm_handle_pv_service(vcpu); + break; + default: + /* Treat it as noop intruction, only set return value */ + vcpu->arch.gprs[LOONGARCH_GPR_A0] = KVM_HCALL_INVALID_CODE; + break; + } + return RESUME_GUEST; } diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index b633fd28b8db..76f2086ab68b 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -19,6 +19,7 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { STATS_DESC_COUNTER(VCPU, idle_exits), STATS_DESC_COUNTER(VCPU, cpucfg_exits), STATS_DESC_COUNTER(VCPU, signal_exits), + STATS_DESC_COUNTER(VCPU, hypercall_exits) }; const struct kvm_stats_header kvm_vcpu_stats_header = { -- Gitee From 524b48c1881f2dbeb5dd55ce89ea733fd46a4816 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Fri, 1 Mar 2024 11:42:21 +0800 Subject: [PATCH 0650/2138] anolis: LoongArch: Add pv ipi support on guest kernel side ANBZ: #8689 PARAVIRT option and pv ipi is added on guest kernel side, function pv_ipi_init() is to add ipi sending and ipi receiving hooks. This function firstly checks whether system runs on VM mode. If kernel runs on VM mode, it will call function kvm_para_available() to detect current hypervirsor type. Now only KVM type detection is supported, the paravirt function can work only if current hypervisor type is KVM, since there is only KVM supported on LoongArch now. PV IPI uses virtual IPI sender and virtual IPI receiver function. With virutal IPI sender, ipi message is stored in DDR memory rather than emulated HW. IPI multicast is supported, and 128 vcpus can received IPIs at the same time like X86 KVM method. Hypercall method is used for IPI sending. With virtual IPI receiver, HW SW0 is used rather than real IPI HW. Since VCPU has separate HW SW0 like HW timer, there is no trap in IPI interrupt acknowledge. And IPI message is stored in DDR, no trap in get IPI message. Signed-off-by: Bibo Mao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- arch/loongarch/Kconfig | 9 ++ arch/loongarch/include/asm/hardirq.h | 1 + arch/loongarch/include/asm/paravirt.h | 27 ++++ .../include/asm/paravirt_api_clock.h | 10 ++ arch/loongarch/kernel/Makefile | 1 + arch/loongarch/kernel/irq.c | 2 +- arch/loongarch/kernel/paravirt.c | 151 ++++++++++++++++++ arch/loongarch/kernel/smp.c | 4 +- 8 files changed, 203 insertions(+), 2 deletions(-) create mode 100644 arch/loongarch/include/asm/paravirt.h create mode 100644 arch/loongarch/include/asm/paravirt_api_clock.h create mode 100644 arch/loongarch/kernel/paravirt.c diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index bad326ae58f2..7b82992af3c4 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -565,6 +565,15 @@ config CPU_HAS_PREFETCH bool default y +config PARAVIRT + bool "Enable paravirtualization code" + depends on AS_HAS_LVZ_EXTENSION + help + This changes the kernel so it can modify itself when it is run + under a hypervisor, potentially improving performance significantly + over full virtualization. However, when run without a hypervisor + the kernel is theoretically slower and slightly larger. + config ARCH_SUPPORTS_KEXEC def_bool y diff --git a/arch/loongarch/include/asm/hardirq.h b/arch/loongarch/include/asm/hardirq.h index 9f0038e19c7f..b26d596a73aa 100644 --- a/arch/loongarch/include/asm/hardirq.h +++ b/arch/loongarch/include/asm/hardirq.h @@ -21,6 +21,7 @@ enum ipi_msg_type { typedef struct { unsigned int ipi_irqs[NR_IPI]; unsigned int __softirq_pending; + atomic_t message ____cacheline_aligned_in_smp; } ____cacheline_aligned irq_cpustat_t; DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); diff --git a/arch/loongarch/include/asm/paravirt.h b/arch/loongarch/include/asm/paravirt.h new file mode 100644 index 000000000000..58f7b7b89f2c --- /dev/null +++ b/arch/loongarch/include/asm/paravirt.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_LOONGARCH_PARAVIRT_H +#define _ASM_LOONGARCH_PARAVIRT_H + +#ifdef CONFIG_PARAVIRT +#include +struct static_key; +extern struct static_key paravirt_steal_enabled; +extern struct static_key paravirt_steal_rq_enabled; + +u64 dummy_steal_clock(int cpu); +DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock); + +static inline u64 paravirt_steal_clock(int cpu) +{ + return static_call(pv_steal_clock)(cpu); +} + +int pv_ipi_init(void); +#else +static inline int pv_ipi_init(void) +{ + return 0; +} + +#endif // CONFIG_PARAVIRT +#endif diff --git a/arch/loongarch/include/asm/paravirt_api_clock.h b/arch/loongarch/include/asm/paravirt_api_clock.h new file mode 100644 index 000000000000..8a418f0b4fd5 --- /dev/null +++ b/arch/loongarch/include/asm/paravirt_api_clock.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ +#ifndef _ASM_API_CLOCK_H +#define _ASM_API_CLOCK_H + +#include + +#endif diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile index 10ee5fc7ac3e..6c148ccea674 100644 --- a/arch/loongarch/kernel/Makefile +++ b/arch/loongarch/kernel/Makefile @@ -49,6 +49,7 @@ obj-$(CONFIG_MODULES) += module.o module-sections.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_PROC_FS) += proc.o +obj-$(CONFIG_PARAVIRT) += paravirt.o obj-$(CONFIG_SMP) += smp.o diff --git a/arch/loongarch/kernel/irq.c b/arch/loongarch/kernel/irq.c index dd447be23324..0a2243c8847a 100644 --- a/arch/loongarch/kernel/irq.c +++ b/arch/loongarch/kernel/irq.c @@ -151,5 +151,5 @@ void __init init_IRQ(void) per_cpu(irq_stack, i), per_cpu(irq_stack, i) + IRQ_STACK_SIZE); } - set_csr_ecfg(ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 | ECFGF_IPI | ECFGF_PMC); + set_csr_ecfg(ECFGF_SIP0 | ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 | ECFGF_IPI | ECFGF_PMC); } diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c new file mode 100644 index 000000000000..9044ed62045c --- /dev/null +++ b/arch/loongarch/kernel/paravirt.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include + +struct static_key paravirt_steal_enabled; +struct static_key paravirt_steal_rq_enabled; + +static u64 native_steal_clock(int cpu) +{ + return 0; +} + +DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock); + +#ifdef CONFIG_SMP +static void pv_send_ipi_single(int cpu, unsigned int action) +{ + unsigned int min, old; + irq_cpustat_t *info = &per_cpu(irq_stat, cpu); + + old = atomic_fetch_or(BIT(action), &info->message); + if (old) + return; + + min = cpu_logical_map(cpu); + kvm_hypercall3(KVM_HCALL_FUNC_PV_IPI, 1, 0, min); +} + +#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG) +static void pv_send_ipi_mask(const struct cpumask *mask, unsigned int action) +{ + unsigned int cpu, i, min = 0, max = 0, old; + __uint128_t bitmap = 0; + irq_cpustat_t *info; + + if (cpumask_empty(mask)) + return; + + action = BIT(action); + for_each_cpu(i, mask) { + info = &per_cpu(irq_stat, i); + old = atomic_fetch_or(action, &info->message); + if (old) + continue; + + cpu = cpu_logical_map(i); + if (!bitmap) { + min = max = cpu; + } else if (cpu > min && cpu < min + KVM_IPI_CLUSTER_SIZE) { + max = cpu > max ? cpu : max; + } else if (cpu < min && (max - cpu) < KVM_IPI_CLUSTER_SIZE) { + bitmap <<= min - cpu; + min = cpu; + } else { + /* + * Physical cpuid is sorted in ascending order ascend + * for the next mask calculation, send IPI here + * directly and skip the remainding cpus + */ + kvm_hypercall3(KVM_HCALL_FUNC_PV_IPI, + (unsigned long)bitmap, + (unsigned long)(bitmap >> BITS_PER_LONG), min); + min = max = cpu; + bitmap = 0; + } + __set_bit(cpu - min, (unsigned long *)&bitmap); + } + + if (bitmap) + kvm_hypercall3(KVM_HCALL_FUNC_PV_IPI, (unsigned long)bitmap, + (unsigned long)(bitmap >> BITS_PER_LONG), min); +} + +static irqreturn_t loongson_do_swi(int irq, void *dev) +{ + irq_cpustat_t *info; + long action; + + /* Clear swi interrupt */ + clear_csr_estat(1 << INT_SWI0); + info = this_cpu_ptr(&irq_stat); + action = atomic_xchg(&info->message, 0); + if (action & SMP_CALL_FUNCTION) { + generic_smp_call_function_interrupt(); + info->ipi_irqs[IPI_CALL_FUNCTION]++; + } + + if (action & SMP_RESCHEDULE) { + scheduler_ipi(); + info->ipi_irqs[IPI_RESCHEDULE]++; + } + + return IRQ_HANDLED; +} + +static void pv_init_ipi(void) +{ + int r, swi0; + + swi0 = get_percpu_irq(INT_SWI0); + if (swi0 < 0) + panic("SWI0 IRQ mapping failed\n"); + irq_set_percpu_devid(swi0); + r = request_percpu_irq(swi0, loongson_do_swi, "SWI0", &irq_stat); + if (r < 0) + panic("SWI0 IRQ request failed\n"); +} +#endif + +static bool kvm_para_available(void) +{ + static int hypervisor_type; + int config; + + if (!hypervisor_type) { + config = read_cpucfg(CPUCFG_KVM_SIG); + if (!memcmp(&config, KVM_SIGNATURE, 4)) + hypervisor_type = HYPERVISOR_KVM; + } + + return hypervisor_type == HYPERVISOR_KVM; +} + +int __init pv_ipi_init(void) +{ + int feature; + + if (!cpu_has_hypervisor) + return 0; + if (!kvm_para_available()) + return 0; + + /* + * check whether KVM hypervisor supports pv_ipi or not + */ + feature = read_cpucfg(CPUCFG_KVM_FEATURE); +#ifdef CONFIG_SMP + if (feature & KVM_FEATURE_PV_IPI) { + smp_ops.init_ipi = pv_init_ipi; + smp_ops.send_ipi_single = pv_send_ipi_single; + smp_ops.send_ipi_mask = pv_send_ipi_mask; + } +#endif + + return 1; +} diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index da42a8e6ef3d..974303b6084c 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -309,6 +310,7 @@ void __init loongson_smp_setup(void) cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package; cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package; + pv_ipi_init(); iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN); pr_info("Detected %i available CPU(s)\n", loongson_sysconf.nr_cpus); } @@ -353,7 +355,7 @@ void loongson_boot_secondary(int cpu, struct task_struct *idle) void loongson_init_secondary(void) { unsigned int cpu = smp_processor_id(); - unsigned int imask = ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 | + unsigned int imask = ECFGF_SIP0 | ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 | ECFGF_IPI | ECFGF_PMC | ECFGF_TIMER; change_csr_ecfg(ECFG0_IM, imask); -- Gitee From cc268942fb628461dcd996d147d8e102724608bf Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Sat, 2 Mar 2024 15:20:50 +0800 Subject: [PATCH 0651/2138] anolis: Documentation: KVM: Add hypercall for LoongArch ANBZ: #8689 Add documentation topic for using pv_virt when running as a guest on KVM hypervisor. Signed-off-by: Bibo Mao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- Documentation/virt/kvm/index.rst | 1 + .../virt/kvm/loongarch/hypercalls.rst | 79 +++++++++++++++++++ Documentation/virt/kvm/loongarch/index.rst | 10 +++ 3 files changed, 90 insertions(+) create mode 100644 Documentation/virt/kvm/loongarch/hypercalls.rst create mode 100644 Documentation/virt/kvm/loongarch/index.rst diff --git a/Documentation/virt/kvm/index.rst b/Documentation/virt/kvm/index.rst index ad13ec55ddfe..9ca5a45c2140 100644 --- a/Documentation/virt/kvm/index.rst +++ b/Documentation/virt/kvm/index.rst @@ -14,6 +14,7 @@ KVM s390/index ppc-pv x86/index + loongarch/index locking vcpu-requests diff --git a/Documentation/virt/kvm/loongarch/hypercalls.rst b/Documentation/virt/kvm/loongarch/hypercalls.rst new file mode 100644 index 000000000000..1679e48d67d2 --- /dev/null +++ b/Documentation/virt/kvm/loongarch/hypercalls.rst @@ -0,0 +1,79 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=================================== +The LoongArch paravirtual interface +=================================== + +KVM hypercalls use the HVCL instruction with code 0x100, and the hypercall +number is put in a0 and up to five arguments may be placed in a1-a5, the +return value is placed in v0 (alias with a0). + +The code for that interface can be found in arch/loongarch/kvm/* + +Querying for existence +====================== + +To find out if we're running on KVM or not, cpucfg can be used with index +CPUCFG_KVM_BASE (0x40000000), cpucfg range between 0x40000000 - 0x400000FF +is marked as a specially reserved range. All existing and future processors +will not implement any features in this range. + +When Linux is running on KVM, cpucfg with index CPUCFG_KVM_BASE (0x40000000) +returns magic string "KVM\0" + +Once you determined you're running under a PV capable KVM, you can now use +hypercalls as described below. + +KVM hypercall ABI +================= + +Hypercall ABI on KVM is simple, only one scratch register a0 (v0) and at most +five generic registers used as input parameter. FP register and vector register +is not used for input register and should not be modified during hypercall. +Hypercall function can be inlined since there is only one scratch register. + +The parameters are as follows: + + ======== ================ ================ + Register IN OUT + ======== ================ ================ + a0 function number Return code + a1 1st parameter - + a2 2nd parameter - + a3 3rd parameter - + a4 4th parameter - + a5 5th parameter - + ======== ================ ================ + +Return codes can be as follows: + + ==== ========================= + Code Meaning + ==== ========================= + 0 Success + -1 Hypercall not implemented + -2 Hypercall parameter error + ==== ========================= + +KVM Hypercalls Documentation +============================ + +The template for each hypercall is: +1. Hypercall name +2. Purpose + +1. KVM_HCALL_FUNC_PV_IPI +------------------------ + +:Purpose: Send IPIs to multiple vCPUs. + +- a0: KVM_HCALL_FUNC_PV_IPI +- a1: lower part of the bitmap of destination physical CPUIDs +- a2: higher part of the bitmap of destination physical CPUIDs +- a3: the lowest physical CPUID in bitmap + +The hypercall lets a guest send multicast IPIs, with at most 128 +destinations per hypercall. The destinations are represented by a bitmap +contained in the first two arguments (a1 and a2). Bit 0 of a1 corresponds +to the physical CPUID in the third argument (a3), bit 1 corresponds to the +physical ID a3+1, and so on. diff --git a/Documentation/virt/kvm/loongarch/index.rst b/Documentation/virt/kvm/loongarch/index.rst new file mode 100644 index 000000000000..83387b4c5345 --- /dev/null +++ b/Documentation/virt/kvm/loongarch/index.rst @@ -0,0 +1,10 @@ +.. SPDX-License-Identifier: GPL-2.0 + +========================= +KVM for LoongArch systems +========================= + +.. toctree:: + :maxdepth: 2 + + hypercalls.rst -- Gitee From 35d34e415474fcc543f967f10c5058e03954a58f Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Wed, 6 Mar 2024 11:03:13 +0800 Subject: [PATCH 0652/2138] anolis: LoongArch: KVM: Add software breakpoint support ANBZ: #8689 When VM runs in kvm mode, system will not exit to host mode if executing general software breakpoint instruction, one trap exception happens in guest mode rather than host mode. In order to debug guest kernel on host side, one mechanism should be used to let vm exit to host mode. Here one special hypercall code is used for software breakpoint usage, vm exists to host mode and kvm hypervisor identifies the special hypercall code and sets exit_reason with KVM_EXIT_DEBUG, and then let qemu handle it. Signed-off-by: Bibo Mao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- arch/loongarch/include/asm/inst.h | 1 + arch/loongarch/include/asm/kvm_host.h | 2 ++ arch/loongarch/include/asm/kvm_para.h | 2 ++ arch/loongarch/include/uapi/asm/kvm.h | 4 ++++ arch/loongarch/kvm/exit.c | 16 ++++++++++++++-- arch/loongarch/kvm/vcpu.c | 13 ++++++++++++- arch/loongarch/kvm/vm.c | 1 + 7 files changed, 36 insertions(+), 3 deletions(-) diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h index a43b6a1fe2f1..1d43a781a2dd 100644 --- a/arch/loongarch/include/asm/inst.h +++ b/arch/loongarch/include/asm/inst.h @@ -12,6 +12,7 @@ #define INSN_NOP 0x03400000 #define INSN_BREAK 0x002a0000 +#define INSN_HVCL 0x002b8000 #define ADDR_IMMMASK_LU52ID 0xFFF0000000000000 #define ADDR_IMMMASK_LU32ID 0x000FFFFF00000000 diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index c31619ae1fb4..3a92b6b024d5 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -31,6 +31,8 @@ #define KVM_HALT_POLL_NS_DEFAULT 500000 +#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ + KVM_GUESTDBG_USE_SW_BP | KVM_GUESTDBG_SINGLESTEP) struct kvm_vm_stat { struct kvm_vm_stat_generic generic; u64 pages; diff --git a/arch/loongarch/include/asm/kvm_para.h b/arch/loongarch/include/asm/kvm_para.h index a82bffbbf8a1..db4579923542 100644 --- a/arch/loongarch/include/asm/kvm_para.h +++ b/arch/loongarch/include/asm/kvm_para.h @@ -9,8 +9,10 @@ #define HYPERVISOR_VENDOR_SHIFT 8 #define HYPERCALL_CODE(vendor, code) ((vendor << HYPERVISOR_VENDOR_SHIFT) + code) #define KVM_HCALL_CODE_PV_SERVICE 0 +#define KVM_HCALL_CODE_SWDBG 1 #define KVM_HCALL_PV_SERVICE HYPERCALL_CODE(HYPERVISOR_KVM, KVM_HCALL_CODE_PV_SERVICE) #define KVM_HCALL_FUNC_PV_IPI 1 +#define KVM_HCALL_SWDBG HYPERCALL_CODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SWDBG) /* * LoongArch hypercall return code diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h index 923d0bd38294..4cec8c16013c 100644 --- a/arch/loongarch/include/uapi/asm/kvm.h +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -15,10 +15,12 @@ */ #define __KVM_HAVE_READONLY_MEM +#define __KVM_HAVE_GUEST_DEBUG #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 #define KVM_DIRTY_LOG_PAGE_OFFSET 64 +#define KVM_GUESTDBG_USE_SW_BP 0x00010000 /* * for KVM_GET_REGS and KVM_SET_REGS */ @@ -74,6 +76,8 @@ struct kvm_fpu { #define KVM_REG_LOONGARCH_COUNTER (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 1) #define KVM_REG_LOONGARCH_VCPU_RESET (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 2) +/* Debugging: Special instruction for software breakpoint */ +#define KVM_REG_LOONGARCH_DEBUG_INST (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 3) #define LOONGARCH_REG_SHIFT 3 #define LOONGARCH_REG_64(TYPE, REG) (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT)) diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 933879ad0ddc..19822813755d 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -774,23 +774,35 @@ static int kvm_handle_hypercall(struct kvm_vcpu *vcpu) { larch_inst inst; unsigned int code; + int ret; inst.word = vcpu->arch.badi; code = inst.reg0i15_format.immediate; - update_pc(&vcpu->arch); + ret = RESUME_GUEST; switch (code) { case KVM_HCALL_PV_SERVICE: vcpu->stat.hypercall_exits++; kvm_handle_pv_service(vcpu); break; + case KVM_HCALL_SWDBG: + /* KVM_HC_SWDBG only in effective when SW_BP is enabled */ + if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { + vcpu->run->exit_reason = KVM_EXIT_DEBUG; + ret = RESUME_HOST; + } else + vcpu->arch.gprs[LOONGARCH_GPR_A0] = KVM_HCALL_INVALID_CODE; + break; default: /* Treat it as noop intruction, only set return value */ vcpu->arch.gprs[LOONGARCH_GPR_A0] = KVM_HCALL_INVALID_CODE; break; } - return RESUME_GUEST; + if (ret == RESUME_GUEST) + update_pc(&vcpu->arch); + + return ret; } /* diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 76f2086ab68b..f22d10228cd2 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -248,7 +248,15 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { - return -EINVAL; + if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) + return -EINVAL; + + if (dbg->control & KVM_GUESTDBG_ENABLE) + vcpu->guest_debug = dbg->control; + else + vcpu->guest_debug = 0; + + return 0; } static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val) @@ -500,6 +508,9 @@ static int kvm_get_one_reg(struct kvm_vcpu *vcpu, case KVM_REG_LOONGARCH_COUNTER: *v = drdtime() + vcpu->kvm->arch.time_offset; break; + case KVM_REG_LOONGARCH_DEBUG_INST: + *v = INSN_HVCL + KVM_HCALL_SWDBG; + break; default: ret = -EINVAL; break; diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c index 6006a28653ad..06fd746b03b6 100644 --- a/arch/loongarch/kvm/vm.c +++ b/arch/loongarch/kvm/vm.c @@ -77,6 +77,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_IMMEDIATE_EXIT: case KVM_CAP_IOEVENTFD: case KVM_CAP_MP_STATE: + case KVM_CAP_SET_GUEST_DEBUG: r = 1; break; case KVM_CAP_NR_VCPUS: -- Gitee From 35f37754e2605bf3b50725ef6fb3b2c7a06118fb Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Wed, 13 Mar 2024 17:48:26 +0800 Subject: [PATCH 0653/2138] anolis: irqchip/loongson-eiointc: Add virt extension support ANBZ: #8689 With virt eiointc, interrupt can be routed to 256 vcpus Signed-off-by: Bibo Mao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- drivers/irqchip/irq-loongson-eiointc.c | 43 +++++++++++++++++++++++--- 1 file changed, 38 insertions(+), 5 deletions(-) diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c index fb4f6a1d4318..5c5462681c03 100644 --- a/drivers/irqchip/irq-loongson-eiointc.c +++ b/drivers/irqchip/irq-loongson-eiointc.c @@ -24,6 +24,16 @@ #define EIOINTC_REG_ISR 0x1800 #define EIOINTC_REG_ROUTE 0x1c00 +#define EXTIOI_VIRT_FEATURES 0x40000000 +#define EXTIOI_HAS_VIRT_EXTENSION 0 +#define EXTIOI_HAS_ENABLE_OPTION 1 +#define EXTIOI_HAS_INT_ENCODE 2 +#define EXTIOI_HAS_CPU_ENCODE 3 +#define EXTIOI_VIRT_CONFIG 0x40000004 +#define EXTIOI_ENABLE 1 +#define EXTIOI_ENABLE_INT_ENCODE 2 +#define EXTIOI_ENABLE_CPU_ENCODE 3 + #define VEC_REG_COUNT 4 #define VEC_COUNT_PER_REG 64 #define VEC_COUNT (VEC_REG_COUNT * VEC_COUNT_PER_REG) @@ -42,6 +52,7 @@ struct eiointc_priv { cpumask_t cpuspan_map; struct fwnode_handle *domain_handle; struct irq_domain *eiointc_domain; + bool cpu_encoded; }; static struct eiointc_priv *eiointc_priv[MAX_IO_PICS]; @@ -92,7 +103,16 @@ static DEFINE_RAW_SPINLOCK(affinity_lock); static void virt_extioi_set_irq_route(int irq, unsigned int cpu) { - iocsr_write8(cpu_logical_map(cpu), EIOINTC_REG_ROUTE + irq); + int data; + + /* + * get irq route info for continuous 4 vectors + * and set affinity for specified vector + */ + data = iocsr_read32(EIOINTC_REG_ROUTE + (irq & ~3)); + data &= ~(0xff << ((irq & 3) * 8)); + data |= cpu_logical_map(cpu) << ((irq & 3) * 8); + iocsr_write32(data, EIOINTC_REG_ROUTE + (irq & ~3)); } static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, bool force) @@ -117,7 +137,7 @@ static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *af vector = d->hwirq; regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2); - if (cpu_has_hypervisor) { + if (priv->cpu_encoded) { iocsr_write32(EIOINTC_ALL_ENABLE & ~BIT(vector & 0x1F), regaddr); virt_extioi_set_irq_route(vector, cpu); iocsr_write32(EIOINTC_ALL_ENABLE, regaddr); @@ -182,8 +202,10 @@ static int eiointc_router_init(unsigned int cpu) for (i = 0; i < eiointc_priv[0]->vec_count / 4; i++) { /* Route to Node-0 Core-0 */ - if (index == 0) - bit = (cpu_has_hypervisor ? cpu_logical_map(0) : BIT(cpu_logical_map(0))); + if (eiointc_priv[index]->cpu_encoded) + bit = cpu_logical_map(0); + else if (index == 0) + bit = BIT(cpu_logical_map(0)); else bit = (eiointc_priv[index]->node << 4) | 1; @@ -384,7 +406,7 @@ static int __init acpi_cascade_irqdomain_init(void) static int __init eiointc_init(struct eiointc_priv *priv, int parent_irq, u64 node_map) { - int i; + int i, val; node_map = node_map ? node_map : -1ULL; for_each_possible_cpu(i) { @@ -404,6 +426,17 @@ static int __init eiointc_init(struct eiointc_priv *priv, int parent_irq, return -ENOMEM; } + if (cpu_has_hypervisor) { + val = iocsr_read32(EXTIOI_VIRT_FEATURES); + if (val & BIT(EXTIOI_HAS_CPU_ENCODE)) { + val = iocsr_read32(EXTIOI_VIRT_CONFIG); + val |= BIT(EXTIOI_ENABLE_CPU_ENCODE); + iocsr_write32(val, EXTIOI_VIRT_CONFIG); + priv->cpu_encoded = true; + pr_info("loongson-extioi: enable cpu encodig\n"); + } + } + eiointc_priv[nr_pics++] = priv; eiointc_router_init(0); irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv); -- Gitee From f4412cc5aea01289715c45e2f5fe77e60138779f Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Fri, 22 Mar 2024 16:24:10 +0800 Subject: [PATCH 0654/2138] anolis: LoongArch: KVM: Add steal time support in kvm side ANBZ: #8689 Steal time feature is added here in kvm side, VM can search supported features provided by KVM hypervisor, feature KVM_FEATURE_STEAL_TIME is added here. Like x86, steal time structure is saved in guest memory, one hypercall function KVM_HCALL_FUNC_NOTIFY is added to notify KVM to enable the feature. One cpu attr ioctl command KVM_LOONGARCH_VCPU_PVTIME_CTRL is added to save and restore base address of steal time structure when VM is migrated. Since it needs hypercall instruction emulation handling, and it is dependent on this patchset: https://lore.kernel.org/all/20240201031950.3225626-1-maobibo@loongson.cn/ Signed-off-by: Bibo Mao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- arch/loongarch/include/asm/kvm_host.h | 7 ++ arch/loongarch/include/asm/kvm_para.h | 10 ++ arch/loongarch/include/asm/loongarch.h | 1 + arch/loongarch/include/uapi/asm/kvm.h | 4 + arch/loongarch/kvm/exit.c | 35 ++++++- arch/loongarch/kvm/vcpu.c | 122 +++++++++++++++++++++++++ 6 files changed, 174 insertions(+), 5 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index 3a92b6b024d5..778874703483 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -30,6 +30,7 @@ #define KVM_PRIVATE_MEM_SLOTS 0 #define KVM_HALT_POLL_NS_DEFAULT 500000 +#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(1) #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ KVM_GUESTDBG_USE_SW_BP | KVM_GUESTDBG_SINGLESTEP) @@ -197,6 +198,12 @@ struct kvm_vcpu_arch { struct kvm_mp_state mp_state; /* cpucfg */ u32 cpucfg[KVM_MAX_CPUCFG_REGS]; + /* paravirt steal time */ + struct { + u64 guest_addr; + u64 last_steal; + struct gfn_to_hva_cache cache; + } st; }; static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg) diff --git a/arch/loongarch/include/asm/kvm_para.h b/arch/loongarch/include/asm/kvm_para.h index db4579923542..032101b941d9 100644 --- a/arch/loongarch/include/asm/kvm_para.h +++ b/arch/loongarch/include/asm/kvm_para.h @@ -12,6 +12,7 @@ #define KVM_HCALL_CODE_SWDBG 1 #define KVM_HCALL_PV_SERVICE HYPERCALL_CODE(HYPERVISOR_KVM, KVM_HCALL_CODE_PV_SERVICE) #define KVM_HCALL_FUNC_PV_IPI 1 +#define KVM_HCALL_FUNC_NOTIFY 2 #define KVM_HCALL_SWDBG HYPERCALL_CODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SWDBG) /* @@ -21,6 +22,15 @@ #define KVM_HCALL_INVALID_CODE -1UL #define KVM_HCALL_INVALID_PARAMETER -2UL +#define KVM_STEAL_PHYS_VALID BIT_ULL(0) +#define KVM_STEAL_PHYS_MASK GENMASK_ULL(63, 6) +struct kvm_steal_time { + __u64 steal; + __u32 version; + __u32 flags; + __u32 pad[12]; +}; + /* * Hypercall interface for KVM hypervisor * diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h index 35e7bd31fdc7..33add67389ec 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -168,6 +168,7 @@ #define KVM_SIGNATURE "KVM\0" #define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4) #define KVM_FEATURE_PV_IPI BIT(1) +#define KVM_FEATURE_STEAL_TIME BIT(2) #ifndef __ASSEMBLY__ diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h index 4cec8c16013c..9891ed93816a 100644 --- a/arch/loongarch/include/uapi/asm/kvm.h +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -83,7 +83,11 @@ struct kvm_fpu { #define LOONGARCH_REG_64(TYPE, REG) (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT)) #define KVM_IOC_CSRID(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG) #define KVM_IOC_CPUCFG(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG) + +/* Device Control API on vcpu fd */ #define KVM_LOONGARCH_VCPU_CPUCFG 0 +#define KVM_LOONGARCH_VCPU_PVTIME_CTRL 1 +#define KVM_LOONGARCH_VCPU_PVTIME_GPA 0 struct kvm_debug_exit_arch { }; diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 19822813755d..13a9e58f463b 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -209,7 +209,7 @@ int kvm_emu_idle(struct kvm_vcpu *vcpu) static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst) { int rd, rj; - unsigned int index; + unsigned int index, ret; unsigned long plv; rd = inst.reg2_format.rd; @@ -240,10 +240,13 @@ static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst) vcpu->arch.gprs[rd] = 0; break; case CPUCFG_KVM_FEATURE: - if ((plv & CSR_CRMD_PLV) == PLV_KERN) - vcpu->arch.gprs[rd] = KVM_FEATURE_PV_IPI; - else - vcpu->arch.gprs[rd] = 0; + ret = 0; + if ((plv & CSR_CRMD_PLV) == PLV_KERN) { + ret = KVM_FEATURE_PV_IPI; + if (sched_info_on()) + ret |= KVM_FEATURE_STEAL_TIME; + } + vcpu->arch.gprs[rd] = ret; break; default: vcpu->arch.gprs[rd] = 0; @@ -749,6 +752,25 @@ static int kvm_pv_send_ipi(struct kvm_vcpu *vcpu) return 0; } +static int kvm_save_notify(struct kvm_vcpu *vcpu) +{ + unsigned long id, data; + + id = vcpu->arch.gprs[LOONGARCH_GPR_A1]; + data = vcpu->arch.gprs[LOONGARCH_GPR_A2]; + switch (id) { + case KVM_FEATURE_STEAL_TIME: + vcpu->arch.st.guest_addr = data; + vcpu->arch.st.last_steal = current->sched_info.run_delay; + kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu); + break; + default: + break; + }; + + return 0; +}; + /* * hypercall emulation always return to guest, Caller should check retval. */ @@ -762,6 +784,9 @@ static void kvm_handle_pv_service(struct kvm_vcpu *vcpu) kvm_pv_send_ipi(vcpu); ret = KVM_HCALL_STATUS_SUCCESS; break; + case KVM_HCALL_FUNC_NOTIFY: + ret = kvm_save_notify(vcpu); + break; default: ret = KVM_HCALL_INVALID_CODE; break; diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index f22d10228cd2..49c1172f0005 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -31,6 +31,115 @@ const struct kvm_stats_header kvm_vcpu_stats_header = { sizeof(kvm_vcpu_stats_desc), }; +static void kvm_update_stolen_time(struct kvm_vcpu *vcpu) +{ + struct kvm_steal_time __user *st; + struct gfn_to_hva_cache *ghc; + struct kvm_memslots *slots; + gpa_t gpa; + u64 steal; + u32 version; + + ghc = &vcpu->arch.st.cache; + gpa = vcpu->arch.st.guest_addr; + if (!(gpa & KVM_STEAL_PHYS_VALID)) + return; + + gpa &= KVM_STEAL_PHYS_MASK; + slots = kvm_memslots(vcpu->kvm); + if (slots->generation != ghc->generation || gpa != ghc->gpa) { + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, + sizeof(*st))) { + ghc->gpa = INVALID_GPA; + return; + } + } + + st = (struct kvm_steal_time __user *)ghc->hva; + unsafe_get_user(version, &st->version, out); + if (version & 1) + version += 1; + version += 1; + unsafe_put_user(version, &st->version, out); + /* Make sure st->version is written first */ + smp_wmb(); + + unsafe_get_user(steal, &st->steal, out); + steal += current->sched_info.run_delay - + vcpu->arch.st.last_steal; + vcpu->arch.st.last_steal = current->sched_info.run_delay; + unsafe_put_user(steal, &st->steal, out); + + /* Make sure st->steal is written first */ + smp_wmb(); + version += 1; + unsafe_put_user(version, &st->version, out); +out: + mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); +} + +static bool kvm_pvtime_supported(void) +{ + return !!sched_info_on(); +} + +static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + u64 __user *user = (u64 __user *)attr->addr; + struct kvm *kvm = vcpu->kvm; + u64 gpa; + int ret = 0; + int idx; + + if (!kvm_pvtime_supported() || + attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) + return -ENXIO; + + if (get_user(gpa, user)) + return -EFAULT; + + /* Check the address is in a valid memslot */ + idx = srcu_read_lock(&kvm->srcu); + if (kvm_is_error_hva(gfn_to_hva(kvm, gpa >> PAGE_SHIFT))) + ret = -EINVAL; + srcu_read_unlock(&kvm->srcu, idx); + + if (!ret) + vcpu->arch.st.guest_addr = gpa; + + return ret; +} + +static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + u64 __user *user = (u64 __user *)attr->addr; + u64 gpa; + + if (!kvm_pvtime_supported() || + attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) + return -ENXIO; + + gpa = vcpu->arch.st.guest_addr; + if (put_user(gpa, user)) + return -EFAULT; + + return 0; +} + +static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + switch (attr->attr) { + case KVM_LOONGARCH_VCPU_PVTIME_GPA: + if (kvm_pvtime_supported()) + return 0; + } + + return -ENXIO; +} + /* * kvm_check_requests - check and handle pending vCPU requests * @@ -48,6 +157,9 @@ static int kvm_check_requests(struct kvm_vcpu *vcpu) if (kvm_dirty_ring_check_request(vcpu)) return RESUME_HOST; + if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu)) + kvm_update_stolen_time(vcpu); + return RESUME_GUEST; } @@ -672,6 +784,9 @@ static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu, case KVM_LOONGARCH_VCPU_CPUCFG: ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr); break; + case KVM_LOONGARCH_VCPU_PVTIME_CTRL: + ret = kvm_loongarch_pvtime_has_attr(vcpu, attr); + break; default: break; } @@ -704,6 +819,9 @@ static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu, case KVM_LOONGARCH_VCPU_CPUCFG: ret = kvm_loongarch_get_cpucfg_attr(vcpu, attr); break; + case KVM_LOONGARCH_VCPU_PVTIME_CTRL: + ret = kvm_loongarch_pvtime_get_attr(vcpu, attr); + break; default: break; } @@ -726,6 +844,9 @@ static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu, case KVM_LOONGARCH_VCPU_CPUCFG: ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr); break; + case KVM_LOONGARCH_VCPU_PVTIME_CTRL: + ret = kvm_loongarch_pvtime_set_attr(vcpu, attr); + break; default: break; } @@ -1084,6 +1205,7 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) /* Control guest page CCA attribute */ change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT); + kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu); /* Don't bother restoring registers multiple times unless necessary */ if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE) -- Gitee From c3a0c75f41ab489f8d000efc27120579bec2b2ce Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Fri, 22 Mar 2024 16:42:48 +0800 Subject: [PATCH 0655/2138] anolis: LoongArch: Add steal time support in guest side ANBZ: #8689 Percpu struct kvm_steal_time is added here, its size is 64 bytes and also defined as 64 bytes, so that the whole structure is in one physical page. When vcpu is onlined, function pv_register_steal_time() is called. This function will pass physical address of struct kvm_steal_time and tells hypervisor to enable steal time. When vcpu is offline, physical address is set as 0 and tells hypervisor to disable steal time. Signed-off-by: Bibo Mao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- arch/loongarch/include/asm/paravirt.h | 5 + arch/loongarch/kernel/paravirt.c | 130 ++++++++++++++++++++++++++ arch/loongarch/kernel/time.c | 2 + 3 files changed, 137 insertions(+) diff --git a/arch/loongarch/include/asm/paravirt.h b/arch/loongarch/include/asm/paravirt.h index 58f7b7b89f2c..fe27fb5e82b8 100644 --- a/arch/loongarch/include/asm/paravirt.h +++ b/arch/loongarch/include/asm/paravirt.h @@ -17,11 +17,16 @@ static inline u64 paravirt_steal_clock(int cpu) } int pv_ipi_init(void); +int __init pv_time_init(void); #else static inline int pv_ipi_init(void) { return 0; } +static inline int pv_time_init(void) +{ + return 0; +} #endif // CONFIG_PARAVIRT #endif diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c index 9044ed62045c..56182c64ab38 100644 --- a/arch/loongarch/kernel/paravirt.c +++ b/arch/loongarch/kernel/paravirt.c @@ -5,10 +5,13 @@ #include #include #include +#include #include struct static_key paravirt_steal_enabled; struct static_key paravirt_steal_rq_enabled; +static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64); +static int has_steal_clock; static u64 native_steal_clock(int cpu) { @@ -17,6 +20,57 @@ static u64 native_steal_clock(int cpu) DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock); +static bool steal_acc = true; +static int __init parse_no_stealacc(char *arg) +{ + steal_acc = false; + return 0; +} +early_param("no-steal-acc", parse_no_stealacc); + +static u64 para_steal_clock(int cpu) +{ + u64 steal; + struct kvm_steal_time *src; + int version; + + src = &per_cpu(steal_time, cpu); + do { + + version = src->version; + /* Make sure that the version is read before the steal */ + virt_rmb(); + steal = src->steal; + /* Make sure that the steal is read before the next version */ + virt_rmb(); + + } while ((version & 1) || (version != src->version)); + return steal; +} + +static int pv_register_steal_time(void) +{ + int cpu = smp_processor_id(); + struct kvm_steal_time *st; + unsigned long addr; + + if (!has_steal_clock) + return -EPERM; + + st = &per_cpu(steal_time, cpu); + addr = per_cpu_ptr_to_phys(st); + + /* The whole structure kvm_steal_time should be one page */ + if (PFN_DOWN(addr) != PFN_DOWN(addr + sizeof(*st))) { + pr_warn("Illegal PV steal time addr %lx\n", addr); + return -EFAULT; + } + + addr |= KVM_STEAL_PHYS_VALID; + kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, addr); + return 0; +} + #ifdef CONFIG_SMP static void pv_send_ipi_single(int cpu, unsigned int action) { @@ -110,6 +164,32 @@ static void pv_init_ipi(void) if (r < 0) panic("SWI0 IRQ request failed\n"); } + +static void pv_disable_steal_time(void) +{ + if (has_steal_clock) + kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, 0); +} + +static int pv_cpu_online(unsigned int cpu) +{ + unsigned long flags; + + local_irq_save(flags); + pv_register_steal_time(); + local_irq_restore(flags); + return 0; +} + +static int pv_cpu_down_prepare(unsigned int cpu) +{ + unsigned long flags; + + local_irq_save(flags); + pv_disable_steal_time(); + local_irq_restore(flags); + return 0; +} #endif static bool kvm_para_available(void) @@ -149,3 +229,53 @@ int __init pv_ipi_init(void) return 1; } + +static void pv_cpu_reboot(void *unused) +{ + pv_disable_steal_time(); +} + +static int pv_reboot_notify(struct notifier_block *nb, unsigned long code, + void *unused) +{ + on_each_cpu(pv_cpu_reboot, NULL, 1); + return NOTIFY_DONE; +} + +static struct notifier_block pv_reboot_nb = { + .notifier_call = pv_reboot_notify, +}; + +int __init pv_time_init(void) +{ + int feature; + + if (!cpu_has_hypervisor) + return 0; + if (!kvm_para_available()) + return 0; + + feature = read_cpucfg(CPUCFG_KVM_FEATURE); + if (!(feature & KVM_FEATURE_STEAL_TIME)) + return 0; + + has_steal_clock = 1; + if (pv_register_steal_time()) { + has_steal_clock = 0; + return 0; + } + + register_reboot_notifier(&pv_reboot_nb); + static_call_update(pv_steal_clock, para_steal_clock); + static_key_slow_inc(¶virt_steal_enabled); + if (steal_acc) + static_key_slow_inc(¶virt_steal_rq_enabled); + +#ifdef CONFIG_SMP + if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "loongarch/pv:online", + pv_cpu_online, pv_cpu_down_prepare) < 0) + pr_err("Failed to install cpu hotplug callbacks\n"); +#endif + pr_info("Using stolen time PV\n"); + return 0; +} diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c index fd5354f9be7c..46d7d40c87e3 100644 --- a/arch/loongarch/kernel/time.c +++ b/arch/loongarch/kernel/time.c @@ -15,6 +15,7 @@ #include #include +#include #include u64 cpu_clock_freq; @@ -214,4 +215,5 @@ void __init time_init(void) constant_clockevent_init(); constant_clocksource_init(); + pv_time_init(); } -- Gitee From df6ccda08f40cf8a737305ea54bffa1aa27cbc4b Mon Sep 17 00:00:00 2001 From: Song Gao Date: Thu, 28 Mar 2024 17:10:08 +0800 Subject: [PATCH 0656/2138] anolis: LoongArch: KVM: Add PMU support ANBZ: #8689 Add PMU device emulation Signed-off-by: Song Gao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/2998 Reviewed-by: Juxin Gao --- arch/loongarch/include/asm/kvm_csr.h | 5 ++ arch/loongarch/include/asm/kvm_host.h | 14 ++++ arch/loongarch/include/asm/kvm_vcpu.h | 2 + arch/loongarch/include/asm/loongarch.h | 1 + arch/loongarch/kvm/exit.c | 7 ++ arch/loongarch/kvm/vcpu.c | 97 +++++++++++++++++++++++++- 6 files changed, 125 insertions(+), 1 deletion(-) diff --git a/arch/loongarch/include/asm/kvm_csr.h b/arch/loongarch/include/asm/kvm_csr.h index 724ca8b7b401..476c9f620dd5 100644 --- a/arch/loongarch/include/asm/kvm_csr.h +++ b/arch/loongarch/include/asm/kvm_csr.h @@ -208,4 +208,9 @@ static __always_inline void kvm_change_sw_gcsr(struct loongarch_csrs *csr, csr->csrs[gid] |= val & _mask; } +#define KVM_PMU_PLV_ENABLE (CSR_PERFCTRL_PLV0 | \ + CSR_PERFCTRL_PLV1 | \ + CSR_PERFCTRL_PLV2 | \ + CSR_PERFCTRL_PLV3) + #endif /* __ASM_LOONGARCH_KVM_CSR_H__ */ diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index 778874703483..c146d2ebdb90 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -129,6 +129,7 @@ enum emulation_result { #define KVM_LARCH_LASX (0x1 << 2) #define KVM_LARCH_SWCSR_LATEST (0x1 << 3) #define KVM_LARCH_HWCSR_USABLE (0x1 << 4) +#define KVM_LARCH_PERF (0x1 << 5) struct kvm_vcpu_arch { /* @@ -204,6 +205,9 @@ struct kvm_vcpu_arch { u64 last_steal; struct gfn_to_hva_cache cache; } st; + /* Save host pmu csr */ + u64 perf_ctrl[4]; + u64 perf_cntr[4]; }; static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg) @@ -231,6 +235,16 @@ static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch) return arch->cpucfg[2] & CPUCFG2_LASX; } +static inline bool kvm_guest_has_pmu(struct kvm_vcpu_arch *arch) +{ + return arch->cpucfg[6] & CPUCFG6_PMP; +} + +static inline int kvm_get_pmu_num(struct kvm_vcpu_arch *arch) +{ + return (arch->cpucfg[6] & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT; +} + /* Debug: dump vcpu state */ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h index 9f53950959da..1da24994b838 100644 --- a/arch/loongarch/include/asm/kvm_vcpu.h +++ b/arch/loongarch/include/asm/kvm_vcpu.h @@ -75,6 +75,8 @@ static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { } static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { } #endif +int kvm_own_pmu(struct kvm_vcpu *vcpu); + void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz); void kvm_reset_timer(struct kvm_vcpu *vcpu); void kvm_save_timer(struct kvm_vcpu *vcpu); diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h index 33add67389ec..d0c50140bba9 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -119,6 +119,7 @@ #define CPUCFG6_PMP BIT(0) #define CPUCFG6_PAMVER GENMASK(3, 1) #define CPUCFG6_PMNUM GENMASK(7, 4) +#define CPUCFG6_PMNUM_SHIFT 4 #define CPUCFG6_PMBITS GENMASK(13, 8) #define CPUCFG6_UPM BIT(14) diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 13a9e58f463b..8affc6d4a66e 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -83,6 +83,13 @@ static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst) rj = inst.reg2csr_format.rj; csrid = inst.reg2csr_format.csr; + if (csrid >= LOONGARCH_CSR_PERFCTRL0 && csrid <= LOONGARCH_CSR_PERFCNTR3) { + if (!kvm_own_pmu(vcpu)) { + vcpu->arch.pc -= 4; + return EMULATE_DONE; + } + } + /* Process CSR ops */ switch (rj) { case 0: /* process csrrd */ diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 49c1172f0005..685f2826d022 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -544,6 +544,12 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v) case LOONGARCH_CPUCFG5: *v = GENMASK(31, 0); return 0; + case LOONGARCH_CPUCFG6: + if (cpu_has_pmp) + *v = GENMASK(14, 0); + else + *v = 0; + return 0; case LOONGARCH_CPUCFG16: *v = GENMASK(16, 0); return 0; @@ -562,7 +568,7 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v) static int kvm_check_cpucfg(int id, u64 val) { - int ret; + int ret, host; u64 mask = 0; ret = _kvm_get_cpucfg_mask(id, &mask); @@ -588,6 +594,18 @@ static int kvm_check_cpucfg(int id, u64 val) /* LASX architecturally implies LSX and FP but val does not satisfy that */ return -EINVAL; return 0; + case LOONGARCH_CPUCFG6: + if (val & CPUCFG6_PMP) { + host = read_cpucfg(6); + if ((val & CPUCFG6_PMBITS) != (host & CPUCFG6_PMBITS)) + /* Guest pmbits must be the same with host */ + return -EINVAL; + if ((val & CPUCFG6_PMNUM) > (host & CPUCFG6_PMNUM)) + return -EINVAL; + if ((val & CPUCFG6_UPM) && !(host & CPUCFG6_UPM)) + return -EINVAL; + } + return 0; default: /* * Values for the other CPUCFG IDs are not being further validated @@ -767,6 +785,7 @@ static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu, { switch (attr->attr) { case 2: + case 6: return 0; default: return -ENXIO; @@ -1067,6 +1086,77 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu) preempt_enable(); } +int kvm_own_pmu(struct kvm_vcpu *vcpu) +{ + unsigned long val; + + if (!kvm_guest_has_pmu(&vcpu->arch)) + return -EINVAL; + + preempt_disable(); + val = read_csr_gcfg() & ~CSR_GCFG_GPERF; + val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT; + write_csr_gcfg(val); + + vcpu->arch.aux_inuse |= KVM_LARCH_PERF; + preempt_enable(); + return 0; +} + +static void kvm_lose_pmu(struct kvm_vcpu *vcpu) +{ + struct loongarch_csrs *csr = vcpu->arch.csr; + + if (!(vcpu->arch.aux_inuse & KVM_LARCH_PERF)) + return; + + /* save guest pmu csr */ + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3); + kvm_write_hw_gcsr(LOONGARCH_CSR_PERFCTRL0, 0); + kvm_write_hw_gcsr(LOONGARCH_CSR_PERFCTRL1, 0); + kvm_write_hw_gcsr(LOONGARCH_CSR_PERFCTRL2, 0); + kvm_write_hw_gcsr(LOONGARCH_CSR_PERFCTRL3, 0); + /* Disable pmu access from guest */ + write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF); + + if (((kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0) | + kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1) | + kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2) | + kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3)) + & KVM_PMU_PLV_ENABLE) == 0) + vcpu->arch.aux_inuse &= ~KVM_LARCH_PERF; +} + +static void kvm_restore_pmu(struct kvm_vcpu *vcpu) +{ + unsigned long val; + struct loongarch_csrs *csr = vcpu->arch.csr; + + if (!(vcpu->arch.aux_inuse & KVM_LARCH_PERF)) + return; + + /* Set PM0-PM(num) to Guest */ + val = read_csr_gcfg() & ~CSR_GCFG_GPERF; + val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT; + write_csr_gcfg(val); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3); +} + + int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) { int intr = (int)irq->irq; @@ -1205,6 +1295,10 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) /* Control guest page CCA attribute */ change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT); + + /* Restore hardware perf csr */ + kvm_restore_pmu(vcpu); + kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu); /* Don't bother restoring registers multiple times unless necessary */ @@ -1290,6 +1384,7 @@ static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu) struct loongarch_csrs *csr = vcpu->arch.csr; kvm_lose_fpu(vcpu); + kvm_lose_pmu(vcpu); /* * Update CSR state from hardware if software CSR state is stale, -- Gitee From 4009a04058074b8dce1f8fd2e7f980f5836a6f5a Mon Sep 17 00:00:00 2001 From: Guixin Liu Date: Fri, 29 Mar 2024 10:30:47 +0800 Subject: [PATCH 0657/2138] anolis: config: open mpi3mr driver ANBZ: #8665 Open mpi3mr driver by default. Signed-off-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/2970 --- arch/arm64/configs/anolis-debug_defconfig | 2 +- arch/arm64/configs/anolis_defconfig | 2 +- arch/x86/configs/anolis-debug_defconfig | 2 +- arch/x86/configs/anolis_defconfig | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index 8b932435df46..97230e6e79b5 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -2406,7 +2406,7 @@ CONFIG_SCSI_MPT3SAS=m CONFIG_SCSI_MPT2SAS_MAX_SGE=128 CONFIG_SCSI_MPT3SAS_MAX_SGE=128 CONFIG_SCSI_MPT2SAS=m -# CONFIG_SCSI_MPI3MR is not set +CONFIG_SCSI_MPI3MR=m CONFIG_SCSI_SMARTPQI=m # CONFIG_SCSI_HPTIOP is not set # CONFIG_SCSI_BUSLOGIC is not set diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index ba7a0a1e15f4..6c0af4f2c954 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -2403,7 +2403,7 @@ CONFIG_SCSI_MPT3SAS=m CONFIG_SCSI_MPT2SAS_MAX_SGE=128 CONFIG_SCSI_MPT3SAS_MAX_SGE=128 CONFIG_SCSI_MPT2SAS=m -# CONFIG_SCSI_MPI3MR is not set +CONFIG_SCSI_MPI3MR=m CONFIG_SCSI_SMARTPQI=m # CONFIG_SCSI_HPTIOP is not set # CONFIG_SCSI_BUSLOGIC is not set diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index 05b04486b420..edbe050c1962 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -2504,7 +2504,7 @@ CONFIG_SCSI_MPT3SAS=m CONFIG_SCSI_MPT2SAS_MAX_SGE=128 CONFIG_SCSI_MPT3SAS_MAX_SGE=128 CONFIG_SCSI_MPT2SAS=m -# CONFIG_SCSI_MPI3MR is not set +CONFIG_SCSI_MPI3MR=m CONFIG_SCSI_SMARTPQI=m # CONFIG_SCSI_HPTIOP is not set # CONFIG_SCSI_BUSLOGIC is not set diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index ca8ff01300ac..2845d0515d6d 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -2499,7 +2499,7 @@ CONFIG_SCSI_MPT3SAS=m CONFIG_SCSI_MPT2SAS_MAX_SGE=128 CONFIG_SCSI_MPT3SAS_MAX_SGE=128 CONFIG_SCSI_MPT2SAS=m -# CONFIG_SCSI_MPI3MR is not set +CONFIG_SCSI_MPI3MR=m CONFIG_SCSI_SMARTPQI=m # CONFIG_SCSI_HPTIOP is not set # CONFIG_SCSI_BUSLOGIC is not set -- Gitee From 5f336f09960e3bf0b460befd03845c18918ae9ba Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 8 Sep 2023 20:09:00 -0400 Subject: [PATCH 0658/2138] anolis: KVM: SVM: Add support for different CSV guests to reuse the same ASID ANBZ: #8676 If user want to reuse one ASID for many CSV guests, he should provide a label (i.e. userid) and the length of the label when launch CSV guest. The reference count of the ASID will be increased if user launch a CSV guest with the label correspond to the ASID. When a CSV guest which launch with a label is destroyed, the reference count of the ASID correspond to the label will be decreased, and the ASID is freed only if the reference count becomes zero. The codes for reuse ASID is not compatible with CONFIG_CGROUP_MISC, we introduce CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID that depends on !CGROUP_MISC, the code take effect only when CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID=y. Make CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID=y as the default configure. Signed-off-by: hanliyang Reviewed-by: Shirong Hao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2976 --- arch/x86/configs/anolis-debug_defconfig | 1 + arch/x86/configs/anolis_defconfig | 1 + arch/x86/kvm/Kconfig | 10 ++ arch/x86/kvm/svm/sev.c | 146 +++++++++++++++++++++++- include/uapi/linux/kvm.h | 5 + 5 files changed, 162 insertions(+), 1 deletion(-) diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index edbe050c1962..92c4549d3a57 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -743,6 +743,7 @@ CONFIG_KVM_AMD_SEV=y CONFIG_KVM_SMM=y # CONFIG_KVM_XEN is not set CONFIG_KVM_EXTERNAL_WRITE_TRACKING=y +CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID=y CONFIG_AS_AVX512=y CONFIG_AS_SHA1_NI=y CONFIG_AS_SHA256_NI=y diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 2845d0515d6d..392caf0e3e0a 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -738,6 +738,7 @@ CONFIG_KVM_AMD_SEV=y CONFIG_KVM_SMM=y # CONFIG_KVM_XEN is not set CONFIG_KVM_EXTERNAL_WRITE_TRACKING=y +CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID=y CONFIG_AS_AVX512=y CONFIG_AS_SHA1_NI=y CONFIG_AS_SHA256_NI=y diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index ed90f148140d..463732963a15 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -154,4 +154,14 @@ config KVM_PROVE_MMU config KVM_EXTERNAL_WRITE_TRACKING bool +config KVM_SUPPORTS_CSV_REUSE_ASID + def_bool y + bool "Reuse the same ASID for different HYGON CSV guests" + depends on KVM_AMD_SEV && CPU_SUP_HYGON + depends on !CGROUP_MISC + help + Provide support for reuse the same ASID for difference HYGON + CSV guests, this allow the user to create more CSV guests on + HYGON CPUs with limited ASIDs. + endif # VIRTUALIZATION diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 59163f97b553..b9f3378b5f36 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -90,6 +90,17 @@ struct enc_region { unsigned long size; }; +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID +#define ASID_USERID_LENGTH 20 +struct csv_asid_userid { + int refcnt; // reference count of the ASID + u32 userid_len; + char userid[ASID_USERID_LENGTH]; +}; + +static struct csv_asid_userid *csv_asid_userid_array; +#endif + /* Called with the sev_bitmap_lock held, or on shutdown */ static int sev_flush_asids(unsigned int min_asid, unsigned int max_asid) { @@ -149,7 +160,11 @@ static void sev_misc_cg_uncharge(struct kvm_sev_info *sev) misc_cg_uncharge(type, sev->misc_cg, 1); } +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID +static int sev_asid_new(struct kvm_sev_info *sev, const char *userid, u32 userid_len) +#else static int sev_asid_new(struct kvm_sev_info *sev) +#endif { /* * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid. @@ -177,6 +192,34 @@ static int sev_asid_new(struct kvm_sev_info *sev) mutex_lock(&sev_bitmap_lock); +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + /* For Hygon CPU, check whether the userid exists */ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && + userid && userid_len) { + int i = !min_sev_asid ? 1 : min_sev_asid; + + for (; i <= max_sev_asid; i++) { + /* skip ASIDs without correspond userid */ + if (!csv_asid_userid_array[i].userid_len) + continue; + + /* skip if length of userid is different */ + if (csv_asid_userid_array[i].userid_len != userid_len) + continue; + + if (!memcmp(csv_asid_userid_array[i].userid, + userid, userid_len)) { + pr_debug("Found reusable asid %d\n", i); + /* Increase reference count if userid exists */ + csv_asid_userid_array[i].refcnt++; + + mutex_unlock(&sev_bitmap_lock); + return i; + } + } + } +#endif + /* * No matter what the min_sev_asid is, all asids in range * [1, max_sev_asid] can be used for CSV2 guest on Hygon CPUs. @@ -197,6 +240,16 @@ static int sev_asid_new(struct kvm_sev_info *sev) __set_bit(asid, sev_asid_bitmap); +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + /* For Hygon CPU, initialize the new userid */ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && + userid && userid_len) { + memcpy(csv_asid_userid_array[asid].userid, userid, userid_len); + csv_asid_userid_array[asid].userid_len = userid_len; + csv_asid_userid_array[asid].refcnt = 1; + } +#endif + mutex_unlock(&sev_bitmap_lock); return asid; @@ -221,7 +274,25 @@ static void sev_asid_free(struct kvm_sev_info *sev) mutex_lock(&sev_bitmap_lock); +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + /* For Hygon CPU, decrease the reference count if userid exist */ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && + csv_asid_userid_array[sev->asid].userid_len) { + /* If reach here, reference count should large than 0. */ + WARN_ON(csv_asid_userid_array[sev->asid].refcnt <= 0); + + if (--csv_asid_userid_array[sev->asid].refcnt == 0) { + __set_bit(sev->asid, sev_reclaim_asid_bitmap); + + memset(&csv_asid_userid_array[sev->asid], 0, + sizeof(struct csv_asid_userid)); + } + } else { + __set_bit(sev->asid, sev_reclaim_asid_bitmap); + } +#else __set_bit(sev->asid, sev_reclaim_asid_bitmap); +#endif for_each_possible_cpu(cpu) { sd = per_cpu_ptr(&svm_data, cpu); @@ -268,6 +339,11 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; int asid, ret; +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + struct kvm_csv_init params; + void *csv_blob = NULL; +#endif + if (kvm->created_vcpus) return -EINVAL; @@ -277,7 +353,43 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) sev->active = true; sev->es_active = argp->id == KVM_SEV_ES_INIT; + +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + memset(¶ms, 0, sizeof(params)); + + if (argp->data && + copy_from_user(¶ms, + (void __user *)(uintptr_t)argp->data, sizeof(params))) + return -EFAULT; + + if (params.userid_addr) { + if (params.len >= ASID_USERID_LENGTH) { + pr_err("Invalid length of userid %d > %d\n", + params.len, ASID_USERID_LENGTH); + return -EINVAL; + } + + csv_blob = psp_copy_user_blob(params.userid_addr, params.len); + if (IS_ERR(csv_blob)) { + pr_err("Copy userid failed, %llx (%u)\n", + params.userid_addr, params.len); + return PTR_ERR(csv_blob); + } + } + + asid = sev_asid_new(sev, (const char *)csv_blob, params.len); + + /* Free the @csv_blob to prevent memory leak */ + kfree(csv_blob); + csv_blob = NULL; + } else { + asid = sev_asid_new(sev, NULL, 0); + } +#else asid = sev_asid_new(sev); +#endif + if (asid < 0) goto e_no_asid; sev->asid = asid; @@ -297,6 +409,9 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) sev_asid_free(sev); sev->asid = 0; e_no_asid: +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + kfree(csv_blob); +#endif sev->es_active = false; sev->active = false; return ret; @@ -2503,7 +2618,25 @@ void __init sev_hardware_setup(void) } if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + /* Initialize CSV ASID reuse array */ + csv_asid_userid_array = kcalloc(nr_asids, + sizeof(struct csv_asid_userid), GFP_KERNEL); + if (!csv_asid_userid_array) { + bitmap_free(sev_asid_bitmap); + sev_asid_bitmap = NULL; + bitmap_free(sev_reclaim_asid_bitmap); + sev_reclaim_asid_bitmap = NULL; + goto out; + } +#endif + + /* Initialize buffer to accelerate migration of CSV/CSV2 guest */ if (alloc_trans_mempool()) { +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + kfree(csv_asid_userid_array); + csv_asid_userid_array = NULL; +#endif bitmap_free(sev_asid_bitmap); sev_asid_bitmap = NULL; bitmap_free(sev_reclaim_asid_bitmap); @@ -2590,8 +2723,12 @@ void sev_hardware_unsetup(void) /* No need to take sev_bitmap_lock, all VMs have been destroyed. */ sev_flush_asids(1, max_sev_asid); - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { free_trans_mempool(); +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + kfree(csv_asid_userid_array); +#endif + } bitmap_free(sev_asid_bitmap); bitmap_free(sev_reclaim_asid_bitmap); @@ -2946,6 +3083,13 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu) /* Assign the asid allocated with this SEV guest */ svm->asid = asid; +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + /* If ASID is shared with other guests, then flush TLB before VMRUN */ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && + csv_asid_userid_array[asid].userid_len) + svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; +#endif + /* * Flush guest TLB: * diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 5c0859e7597f..61c5c6990801 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -2064,6 +2064,11 @@ struct kvm_csv_command_batch { __u64 csv_batch_list_uaddr; }; +struct kvm_csv_init { + __u64 userid_addr; + __u32 len; +}; + #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) #define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) #define KVM_DEV_ASSIGN_MASK_INTX (1 << 2) -- Gitee From 96ef9a9f6cd3e9d087e04467ec5be5b32f86ef73 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Mon, 11 Mar 2024 10:36:36 +0800 Subject: [PATCH 0659/2138] anolis: crypto: ccp: Define CSV3 key management command id ANBZ: #8681 Define Hygon CSV3 key management command id and structure. CSV3 is the technology for Hygon secure virtualization to improve security of guest with secure isolated memory technology in hardware. The command definition is available in CSV3 spec. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2995 --- .../arch/x86/hygon-secure-virtualization.rst | 101 +++++++++ drivers/crypto/ccp/sev-dev.c | 17 ++ include/linux/psp-csv.h | 191 ++++++++++++++++++ 3 files changed, 309 insertions(+) create mode 100644 Documentation/arch/x86/hygon-secure-virtualization.rst create mode 100644 include/linux/psp-csv.h diff --git a/Documentation/arch/x86/hygon-secure-virtualization.rst b/Documentation/arch/x86/hygon-secure-virtualization.rst new file mode 100644 index 000000000000..ab94107c91f5 --- /dev/null +++ b/Documentation/arch/x86/hygon-secure-virtualization.rst @@ -0,0 +1,101 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=========================== +HYGON Secure Virtualization +=========================== + +China Secure Virtualization (CSV) is a key virtualization feature on Hygon +processors. + +The 1st generation of CSV (CSV for short) is a secure virtualization technology +to provide memory encryption for the virtual machine (VM), each VM's memory is +encrypted by its unique encryption key which is managed by secure processor. + +The 2nd generation of CSV (CSV2 for short) provides security enhancement to CSV +by encrypting not only the VM's memory but also the vCPU's registers of the VM. + +The 3rd generation of CSV (CSV3 for short) is a more advanced secure +virtualization technology, it integrates secure processor, memory encryption and +memory isolation to provide the ability to protect guest's private data. The CSV3 +guest's context like CPU registers, control block and nested page table is accessed +only by the guest itself and the secure processor. Neither other guests nor the +host can tamper with the guest's context. + +The secure processor is a separate processor inside Hygon hardware. The firmware +running inside the secure processor performs activities in a secure way, such as +OVMF encryption, VM launch, secure memory management and nested page table +management etc. For more information, please see CSV spec and CSV3 spec from Hygon. + +A CSV guest is running in the memory that is encrypted with a dedicated encrypt +key which is set by the secure processor. And CSV guest's memory encrypt key is +unique from the others. A low latency crypto engine resides on Hygon hardware +to minimize the negative effect on memory bandwidth. In CSV guest, a guest private +page will be automatically decrypted when read from memory and encrypted when +written to memory. + +CSV3 provides an enhancement technology named memory isolation to improve the +security. A dedicated memory isolation hardware is built in Hygon hardware. Only +the secure processor has privilege to configure the isolation hardware. At the +BIOS stage, host will reserve several memory regions as secure which are protected +by the isolation hardware. The secure processor allocates the reserved secure +memory for CSV3 guest and marks the memory as dedicated for the current CSV3 +guest. Any memory access (read or write) to CSV3 guest's private memory outside +the guest will be blocked by isolation hardware. + +A CSV3 guest may declare some memory regions as shared to share data with the +host. When a page is set as shared, read/write on the page will bypass the +isolation hardware and the guest's shared memory can be accessed by the host. A +method named CSV3 secure call command is designed and CSV3 guest sends the secure +call command to the secure processor to change private memory to shared memory. +In the method, 2 dedicated pages are reserved at early stage of the guest. Any +read/write on the dedicated pages will trigger nested page fault. When NPF +happens, the host helps to issue an external command to the secure processor but +cannot tamper with the data in the guest's private memory. Then the secure +processor checks the fault address and handles the command if the address is +exactly the dedicated pages. + +Support for CSV can be determined through the CPUID instruction. The CPUID +function 0x8000001f reports information to CSV:: + + 0x8000001f[eax]: + Bit[1] indicates support for CSV + Bit[3] indicates support for CSV2 + Bit[30] indicates support for CSV3 + +If CSV is support, MSR 0xc0010131 can be used to determine if CSV is active:: + + 0xc0010131: + Bit[0] 0 = CSV is not active + 1 = CSV is active + Bit[1] 0 = CSV2 is not active + 1 = CSV2 is active + Bit[30] 0 = CSV3 is not active + 1 = CSV3 is active + +All CSV/CSV2's configurations must be enabled in CSV3. Linux can activate CSV3 by +default (CONFIG_HYGON_CSV=y, CONFIG_CMA=y). CSV3 guest's memory is managed by +CMA (Contiguous Memory Allocation). User must specify CSV3 total secure memory on +the linux kernel command line with csv_mem_size or csv_mem_percentage:: + + csv_mem_size=nn[MG] + [KNL,CSV] + Reserve specified CSV3 memory size in CMA. CSV3's memory will be + allocated from these CMAs. + For instance, csv_mem_size=40G, 40G memory is reserved for CSV3. + + csv_mem_percentage=nn + [KNL,CSV] + Reserve specified memory size which is prorated according to the + whole system memory size. CSV3 guest's memory will be allocated + from these CMAs. + For instance, csv_mem_percentage=60, means 60% system memory is + reserved for CSV3. + The maximum percentage is 80. And the default percentage is 0. + +Limitations +The reserved CSV3 memory within CMA cannot be used by kernel or any application that +may pin memory using long term gup during the application's life time. +For instance, if the whole system memory is 64G and 32G is reserved for CSV3 with +kernel command line csv_mem_percentage=50, only 32G memory is available for CSV/CSV2. +As a result, user will fail to run a CSV/CSV2 guest with memory size which exceeds +32G. diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index ee42430b4ae3..893bafb42f81 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -186,6 +187,22 @@ static int sev_cmd_buffer_len(int cmd) return sizeof(struct csv_data_hgsc_cert_import); case CSV_CMD_RING_BUFFER: return sizeof(struct csv_data_ring_buffer); + case CSV3_CMD_LAUNCH_ENCRYPT_DATA: + return sizeof(struct csv3_data_launch_encrypt_data); + case CSV3_CMD_LAUNCH_ENCRYPT_VMCB: + return sizeof(struct csv3_data_launch_encrypt_vmcb); + case CSV3_CMD_UPDATE_NPT: + return sizeof(struct csv3_data_update_npt); + case CSV3_CMD_SET_SMR: + return sizeof(struct csv3_data_set_smr); + case CSV3_CMD_SET_SMCR: + return sizeof(struct csv3_data_set_smcr); + case CSV3_CMD_SET_GUEST_PRIVATE_MEMORY: + return sizeof(struct csv3_data_set_guest_private_memory); + case CSV3_CMD_DBG_READ_VMSA: + return sizeof(struct csv3_data_dbg_read_vmsa); + case CSV3_CMD_DBG_READ_MEM: + return sizeof(struct csv3_data_dbg_read_mem); default: break; } diff --git a/include/linux/psp-csv.h b/include/linux/psp-csv.h new file mode 100644 index 000000000000..960459375cd6 --- /dev/null +++ b/include/linux/psp-csv.h @@ -0,0 +1,191 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Hygon Secure Virtualization feature CSV driver interface + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#ifndef __PSP_CSV_H__ +#define __PSP_CSV_H__ + +#include + +/** + * Guest/platform management commands for CSV3 + */ +enum csv3_cmd { + /* Guest launch commands */ + CSV3_CMD_SET_GUEST_PRIVATE_MEMORY = 0x200, + CSV3_CMD_LAUNCH_ENCRYPT_DATA = 0x201, + CSV3_CMD_LAUNCH_ENCRYPT_VMCB = 0x202, + /* Guest NPT(Nested Page Table) management commands */ + CSV3_CMD_UPDATE_NPT = 0x203, + + /* Guest migration commands */ + CSV3_CMD_SEND_ENCRYPT_DATA = 0x210, + CSV3_CMD_SEND_ENCRYPT_CONTEXT = 0x211, + CSV3_CMD_RECEIVE_ENCRYPT_DATA = 0x212, + CSV3_CMD_RECEIVE_ENCRYPT_CONTEXT = 0x213, + + /* Guest debug commands */ + CSV3_CMD_DBG_READ_VMSA = 0x220, + CSV3_CMD_DBG_READ_MEM = 0x221, + + /* Platform secure memory management commands */ + CSV3_CMD_SET_SMR = 0x230, + CSV3_CMD_SET_SMCR = 0x231, + + CSV3_CMD_MAX, +}; + +/** + * struct csv3_data_launch_encrypt_data - CSV3_CMD_LAUNCH_ENCRYPT_DATA command + * + * @handle: handle of the VM to update + * @gpa: guest address where data is copied + * @length: len of memory to be encrypted + * @data_blocks: memory regions to hold data page address + */ +struct csv3_data_launch_encrypt_data { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 gpa; /* In */ + u32 length; /* In */ + u32 reserved1; /* In */ + u64 data_blocks[8]; /* In */ +} __packed; + +/** + * struct csv3_data_launch_encrypt_vmcb - CSV3_CMD_LAUNCH_ENCRYPT_VMCB command + * + * @handle: handle of the VM + * @vcpu_id: id of vcpu per vmsa/vmcb + * @vmsa_addr: memory address of initial vmsa data + * @vmsa_len: len of initial vmsa data + * @shadow_vmcb_addr: memory address of shadow vmcb data + * @shadow_vmcb_len: len of shadow vmcb data + * @secure_vmcb_addr: memory address of secure vmcb data + * @secure_vmcb_len: len of secure vmcb data + */ +struct csv3_data_launch_encrypt_vmcb { + u32 handle; /* In */ + u32 reserved; /* In */ + u32 vcpu_id; /* In */ + u32 reserved1; /* In */ + u64 vmsa_addr; /* In */ + u32 vmsa_len; /* In */ + u32 reserved2; /* In */ + u64 shadow_vmcb_addr; /* In */ + u32 shadow_vmcb_len; /* In */ + u32 reserved3; /* In */ + u64 secure_vmcb_addr; /* Out */ + u32 secure_vmcb_len; /* Out */ +} __packed; + +/** + * struct csv3_data_update_npt - CSV3_CMD_UPDATE_NPT command + * + * @handle: handle assigned to the VM + * @error_code: nested page fault error code + * @gpa: guest page address where npf happens + * @spa: physical address which maps to gpa in host page table + * @level: page level which can be mapped in nested page table + * @page_attr: page attribute for gpa + * @page_attr_mask: which page attribute bit should be set + * @npages: number of pages from gpa is handled. + */ +struct csv3_data_update_npt { + u32 handle; /* In */ + u32 reserved; /* In */ + u32 error_code; /* In */ + u32 reserved1; /* In */ + u64 gpa; /* In */ + u64 spa; /* In */ + u64 level; /* In */ + u64 page_attr; /* In */ + u64 page_attr_mask; /* In */ + u32 npages; /* In/Out */ +} __packed; + +/** + * struct csv3_data_mem_region - define a memory region + * + * @base_address: base address of a memory region + * @size: size of memory region + */ +struct csv3_data_memory_region { + u64 base_address; /* In */ + u64 size; /* In */ +} __packed; + +/** + * struct csv3_data_set_guest_private_memory - CSV3_CMD_SET_GUEST_PRIVATE_MEMORY + * command parameters + * + * @handle: handle assigned to the VM + * @nregions: number of memory regions + * @regions_paddr: address of memory containing multiple memory regions + */ +struct csv3_data_set_guest_private_memory { + u32 handle; /* In */ + u32 nregions; /* In */ + u64 regions_paddr; /* In */ +} __packed; + +/** + * struct csv3_data_set_smr - CSV3_CMD_SET_SMR command parameters + * + * @smr_entry_size: size of SMR entry + * @nregions: number of memory regions + * @regions_paddr: address of memory containing multiple memory regions + */ +struct csv3_data_set_smr { + u32 smr_entry_size; /* In */ + u32 nregions; /* In */ + u64 regions_paddr; /* In */ +} __packed; + +/** + * struct csv3_data_set_smcr - CSV3_CMD_SET_SMCR command parameters + * + * @base_address: start address of SMCR memory + * @size: size of SMCR memory + */ +struct csv3_data_set_smcr { + u64 base_address; /* In */ + u64 size; /* In */ +} __packed; + +/** + * struct csv3_data_dbg_read_vmsa - CSV3_CMD_DBG_READ_VMSA command parameters + * + * @handle: handle assigned to the VM + * @spa: system physical address of memory to get vmsa of the specific vcpu + * @size: size of the host memory + * @vcpu_id: the specific vcpu + */ +struct csv3_data_dbg_read_vmsa { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 spa; /* In */ + u32 size; /* In */ + u32 vcpu_id; /* In */ +} __packed; + +/** + * struct csv3_data_dbg_read_mem - CSV3_CMD_DBG_READ_MEM command parameters + * + * @handle: handle assigned to the VM + * @gpa: guest physical address of the memory to access + * @spa: system physical address of memory to get data from gpa + * @size: size of guest memory to access + */ +struct csv3_data_dbg_read_mem { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 gpa; /* In */ + u64 spa; /* In */ + u32 size; /* In */ +} __packed; + +#endif -- Gitee From def55a9681a3f4ee5e72cf5250b35cec74addb17 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Mon, 11 Mar 2024 10:52:56 +0800 Subject: [PATCH 0660/2138] anolis: x86/csv: Manage CSV3 guest's private memory by CMA ANBZ: #8681 The private memory of a CSV3 guest is isolated from VMM and has to be physically contiguous. CMA (Contiguous Memory Allocator) is a memory allocator within the kernel for contiguous physical memory. Use the CMA for the CSV3 private memory management. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2995 --- arch/x86/Kconfig | 23 ++ arch/x86/configs/anolis-debug_defconfig | 1 + arch/x86/configs/anolis_defconfig | 1 + arch/x86/include/asm/csv.h | 53 ++++ arch/x86/kernel/setup.c | 5 + arch/x86/mm/Makefile | 2 + arch/x86/mm/csv.c | 382 ++++++++++++++++++++++++ include/linux/cma.h | 1 + mm/cma.c | 28 +- mm/cma.h | 2 +- 10 files changed, 494 insertions(+), 4 deletions(-) create mode 100644 arch/x86/include/asm/csv.h create mode 100644 arch/x86/mm/csv.c diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index a4846bc89600..d8d988e96e9a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2045,6 +2045,29 @@ config EFI_RUNTIME_MAP See also Documentation/ABI/testing/sysfs-firmware-efi-runtime-map. +config HYGON_CSV + bool "Hygon secure virtualization CSV support" + default y + depends on CPU_SUP_HYGON && AMD_MEM_ENCRYPT && CMA + help + Hygon CSV integrates secure processor, memory encryption and + memory isolation to provide the ability to protect guest's private + data. It has evolved from CSV, CSV2 to CSV3. + + For CSV, the guest's memory is encrypted. + + For CSV2, not only the guest's memory, but also the guest's vCPU + registers are encrypted, neither other guests nor the host can tamper + with the vCPU registers. + + For CSV3, the guest's context like vCPU registers, control block and + nested page table is accessed only by the guest itself and the secure + processor. Neither other guests nor the host can tamper with the + guest's context. + + Say Y here to enable support for the whole capbilities of Hygon secure + virtualization on hygon processor. + source "kernel/Kconfig.hz" config ARCH_SUPPORTS_KEXEC diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index 92c4549d3a57..e9f3e126b433 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -489,6 +489,7 @@ CONFIG_EFI_HANDOVER_PROTOCOL=y CONFIG_EFI_MIXED=y # CONFIG_EFI_FAKE_MEMMAP is not set CONFIG_EFI_RUNTIME_MAP=y +CONFIG_HYGON_CSV=y # CONFIG_HZ_100 is not set # CONFIG_HZ_250 is not set # CONFIG_HZ_300 is not set diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 392caf0e3e0a..caace118a265 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -486,6 +486,7 @@ CONFIG_EFI_HANDOVER_PROTOCOL=y CONFIG_EFI_MIXED=y # CONFIG_EFI_FAKE_MEMMAP is not set CONFIG_EFI_RUNTIME_MAP=y +CONFIG_HYGON_CSV=y # CONFIG_HZ_100 is not set # CONFIG_HZ_250 is not set # CONFIG_HZ_300 is not set diff --git a/arch/x86/include/asm/csv.h b/arch/x86/include/asm/csv.h new file mode 100644 index 000000000000..68f55e1b857b --- /dev/null +++ b/arch/x86/include/asm/csv.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Hygon China Secure Virtualization (CSV) + * + * Copyright (C) Hygon Info Technologies Ltd. + * + * Author: Jiang Xin + */ + +#ifndef __ASM_X86_CSV_H__ +#define __ASM_X86_CSV_H__ + +#ifndef __ASSEMBLY__ + +struct csv_mem { + uint64_t start; + uint64_t size; +}; + +#ifdef CONFIG_HYGON_CSV + +#define CSV_MR_ALIGN_BITS (28) + +extern struct csv_mem *csv_smr; +extern unsigned int csv_smr_num; + +void __init early_csv_reserve_mem(void); + +phys_addr_t csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed, + unsigned int align); +void csv_release_to_contiguous(phys_addr_t pa, size_t size); + +uint32_t csv_get_smr_entry_shift(void); + +#else /* !CONFIG_HYGON_CSV */ + +#define csv_smr NULL +#define csv_smr_num 0U + +static inline void __init early_csv_reserve_mem(void) { } + +static inline phys_addr_t +csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed, + unsigned int align) { return 0; } +static inline void csv_release_to_contiguous(phys_addr_t pa, size_t size) { } + +static inline uint32_t csv_get_smr_entry_shift(void) { return 0; } + +#endif /* CONFIG_HYGON_CSV */ + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_X86_CSV_H__ */ diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 1716ef357439..93dc119c8e2e 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -55,6 +55,7 @@ #include #include #include +#include /* * max_low_pfn_mapped: highest directly mapped pfn < 4 GB @@ -1245,6 +1246,10 @@ void __init setup_arch(char **cmdline_p) early_acpi_boot_init(); initmem_init(); + + /* Try to reserve contiguous memory to support CSV3 */ + early_csv_reserve_mem(); + dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT); if (boot_cpu_has(X86_FEATURE_GBPAGES)) diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index c80febc44cd2..166a0934d3e4 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -67,3 +67,5 @@ obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_amd.o obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_identity.o obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_boot.o + +obj-$(CONFIG_HYGON_CSV) += csv.o diff --git a/arch/x86/mm/csv.c b/arch/x86/mm/csv.c new file mode 100644 index 000000000000..fe5ca7ed4493 --- /dev/null +++ b/arch/x86/mm/csv.c @@ -0,0 +1,382 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Hygon China Secure Virtualization (CSV) + * + * Copyright (C) Hygon Info Technologies Ltd. + * + * Author: Jiang Xin + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef pr_fmt +#define pr_fmt(fmt) "CSV-CMA: " fmt + +#define NUM_SMR_ENTRIES (8 * 1024) +#define CSV_CMA_SHIFT PUD_SHIFT +#define CSV_CMA_SIZE (1 << CSV_CMA_SHIFT) +#define MIN_SMR_ENTRY_SHIFT 23 +#define CSV_SMR_INFO_SIZE (nr_node_ids * sizeof(struct csv_mem)) + +/* 0 percent of total memory by default*/ +static unsigned char csv_mem_percentage; +static unsigned long csv_mem_size; + +static int __init cmdline_parse_csv_mem_size(char *str) +{ + unsigned long size; + char *endp; + + if (str) { + size = memparse(str, &endp); + csv_mem_size = size; + if (!csv_mem_size) + csv_mem_percentage = 0; + } + + return 0; +} +early_param("csv_mem_size", cmdline_parse_csv_mem_size); + +static int __init cmdline_parse_csv_mem_percentage(char *str) +{ + unsigned char percentage; + int ret; + + if (!str) + return 0; + + ret = kstrtou8(str, 10, &percentage); + if (!ret) { + csv_mem_percentage = min_t(unsigned char, percentage, 80); + if (csv_mem_percentage != percentage) + pr_warn("csv_mem_percentage is limited to 80.\n"); + } else { + /* Disable CSV CMA. */ + csv_mem_percentage = 0; + pr_err("csv_mem_percentage is invalid. (0 - 80) is expected.\n"); + } + + return ret; +} +early_param("csv_mem_percentage", cmdline_parse_csv_mem_percentage); + +struct csv_mem *csv_smr; +EXPORT_SYMBOL_GPL(csv_smr); + +unsigned int csv_smr_num; +EXPORT_SYMBOL_GPL(csv_smr_num); + +struct csv_cma { + int fast; + struct cma *cma; +}; + +struct cma_array { + unsigned long count; + struct csv_cma csv_cma[]; +}; + +static unsigned int smr_entry_shift; +static struct cma_array *csv_contiguous_pernuma_area[MAX_NUMNODES]; + +static void csv_set_smr_entry_shift(unsigned int shift) +{ + smr_entry_shift = max_t(unsigned int, shift, MIN_SMR_ENTRY_SHIFT); + pr_info("SMR entry size is 0x%x\n", 1 << smr_entry_shift); +} + +unsigned int csv_get_smr_entry_shift(void) +{ + return smr_entry_shift; +} +EXPORT_SYMBOL_GPL(csv_get_smr_entry_shift); + +static unsigned long __init present_pages_in_node(int nid) +{ + unsigned long range_start_pfn, range_end_pfn; + unsigned long nr_present = 0; + int i; + + for_each_mem_pfn_range(i, nid, &range_start_pfn, &range_end_pfn, NULL) + nr_present += range_end_pfn - range_start_pfn; + + return nr_present; +} + +static phys_addr_t __init csv_early_percent_memory_on_node(int nid) +{ + return (present_pages_in_node(nid) * csv_mem_percentage / 100) << PAGE_SHIFT; +} + +static void __init csv_cma_reserve_mem(void) +{ + int node, i; + unsigned long size; + int idx = 0; + int count; + int cma_array_size; + unsigned long max_spanned_size = 0; + + csv_smr = memblock_alloc_node(CSV_SMR_INFO_SIZE, SMP_CACHE_BYTES, NUMA_NO_NODE); + if (!csv_smr) { + pr_err("Fail to allocate csv_smr\n"); + return; + } + + for_each_node_state(node, N_ONLINE) { + int ret; + char name[CMA_MAX_NAME]; + struct cma_array *array; + unsigned long spanned_size; + unsigned long start = 0, end = 0; + struct csv_cma *csv_cma; + + size = csv_early_percent_memory_on_node(node); + count = DIV_ROUND_UP(size, 1 << CSV_CMA_SHIFT); + if (!count) + continue; + + cma_array_size = count * sizeof(*csv_cma) + sizeof(*array); + array = memblock_alloc_node(cma_array_size, SMP_CACHE_BYTES, NUMA_NO_NODE); + if (!array) { + pr_err("Fail to allocate cma_array\n"); + continue; + } + + array->count = 0; + csv_contiguous_pernuma_area[node] = array; + + for (i = 0; i < count; i++) { + csv_cma = &array->csv_cma[i]; + csv_cma->fast = 1; + snprintf(name, sizeof(name), "csv-n%dc%d", node, i); + ret = cma_declare_contiguous_nid(0, CSV_CMA_SIZE, 0, + 1 << CSV_MR_ALIGN_BITS, PMD_SHIFT - PAGE_SHIFT, + false, name, &(csv_cma->cma), node); + if (ret) { + pr_warn("Fail to reserve memory size 0x%x node %d\n", + 1 << CSV_CMA_SHIFT, node); + break; + } + + if (start > cma_get_base(csv_cma->cma) || !start) + start = cma_get_base(csv_cma->cma); + + if (end < cma_get_base(csv_cma->cma) + cma_get_size(csv_cma->cma)) + end = cma_get_base(csv_cma->cma) + cma_get_size(csv_cma->cma); + } + + if (!i) + continue; + + array->count = i; + spanned_size = end - start; + if (spanned_size > max_spanned_size) + max_spanned_size = spanned_size; + + csv_smr[idx].start = start; + csv_smr[idx].size = end - start; + idx++; + + pr_info("Node %d - reserve size 0x%016lx, (expected size 0x%016lx)\n", + node, (unsigned long)i * CSV_CMA_SIZE, size); + } + + csv_smr_num = idx; + WARN_ON((max_spanned_size / NUM_SMR_ENTRIES) < 1); + if (likely((max_spanned_size / NUM_SMR_ENTRIES) >= 1)) + csv_set_smr_entry_shift(ilog2(max_spanned_size / NUM_SMR_ENTRIES - 1) + 1); +} + +/* + * Check whether host supports CSV3 in hygon platform. + * Called in the guest, it always returns false. + */ +static bool __init csv3_check_cpu_support(void) +{ + unsigned int eax, ebx, ecx, edx; + unsigned long me_mask; + u64 msr; + bool csv3_enabled; + + if (boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) + return false; + + if (sev_status) + return false; + + /* Check for the SME/CSV support leaf */ + eax = 0x80000000; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + if (eax < 0x8000001f) + return false; + +#define HYGON_SME_BIT BIT(0) +#define HYGON_CSV3_BIT BIT(30) + /* + * Check for the CSV feature: + * CPUID Fn8000_001F[EAX] + * - Bit 0 - SME support + * - Bit 1 - CSV support + * - Bit 3 - CSV2 support + * - Bit 30 - CSV3 support + */ + eax = 0x8000001f; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + if (!(eax & HYGON_SME_BIT)) + return false; + + csv3_enabled = !!(eax & HYGON_CSV3_BIT); + + me_mask = 1UL << (ebx & 0x3f); + + /* No SME if Hypervisor bit is set */ + eax = 1; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + if (ecx & BIT(31)) + return false; + + /* For SME, check the SYSCFG MSR */ + msr = __rdmsr(MSR_AMD64_SYSCFG); + if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT)) + return false; + + return !!me_mask && csv3_enabled; +} + +#define CSV_CMA_AREAS 2458 + +void __init early_csv_reserve_mem(void) +{ + unsigned long total_pages; + + /* Only reserve memory on the host that enabled CSV3 feature */ + if (!csv3_check_cpu_support()) + return; + + if (cma_alloc_areas(CSV_CMA_AREAS)) + return; + + total_pages = PHYS_PFN(memblock_phys_mem_size()); + if (csv_mem_size) { + if (csv_mem_size < (total_pages << PAGE_SHIFT)) { + csv_mem_percentage = csv_mem_size * 100 / (total_pages << PAGE_SHIFT); + if (csv_mem_percentage > 80) + csv_mem_percentage = 80; /* Maximum percentage */ + } else + csv_mem_percentage = 80; /* Maximum percentage */ + } + + if (!csv_mem_percentage) { + pr_warn("Don't reserve any memory\n"); + return; + } + + csv_cma_reserve_mem(); +} + +phys_addr_t csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed, + unsigned int align) +{ + int nid; + int nr_nodes; + struct page *page = NULL; + phys_addr_t phys_addr; + int count; + struct csv_cma *csv_cma; + int fast = 1; + + if (!nodes_allowed || size > CSV_CMA_SIZE) { + pr_err("Invalid params, size = 0x%lx, nodes_allowed = %p\n", + size, nodes_allowed); + return 0; + } + + align = min_t(unsigned int, align, get_order(CSV_CMA_SIZE)); +retry: + nr_nodes = nodes_weight(*nodes_allowed); + + /* Traverse from current node */ + nid = numa_node_id(); + if (!node_isset(nid, *nodes_allowed)) + nid = next_node_in(nid, *nodes_allowed); + + for (; nr_nodes > 0; nid = next_node_in(nid, *nodes_allowed), nr_nodes--) { + struct cma_array *array = csv_contiguous_pernuma_area[nid]; + + if (!array) + continue; + + count = array->count; + while (count) { + csv_cma = &array->csv_cma[count - 1]; + + /* + * The value check of csv_cma->fast is lockless, but + * that's ok as this don't affect functional correntness + * whatever the value of csv_cma->fast. + */ + if (fast && !csv_cma->fast) { + count--; + continue; + } + page = cma_alloc(csv_cma->cma, PAGE_ALIGN(size) >> PAGE_SHIFT, + align, true); + if (page) { + page->private = (unsigned long)csv_cma; + if (!csv_cma->fast) + csv_cma->fast = 1; + goto success; + } else + csv_cma->fast = 0; + + count--; + } + } + + if (fast) { + fast = 0; + goto retry; + } else { + pr_err("Fail to alloc secure memory(size = 0x%lx)\n", size); + return 0; + } + +success: + phys_addr = page_to_phys(page); + clflush_cache_range(__va(phys_addr), size); + + return phys_addr; +} +EXPORT_SYMBOL_GPL(csv_alloc_from_contiguous); + +void csv_release_to_contiguous(phys_addr_t pa, size_t size) +{ + struct csv_cma *csv_cma; + struct page *page = pfn_to_page(pa >> PAGE_SHIFT); + + WARN_ON(!page); + if (likely(page)) { + csv_cma = (struct csv_cma *)page->private; + WARN_ON(!csv_cma); + if (likely(csv_cma)) { + page->private = 0; + csv_cma->fast = 1; + cma_release(csv_cma->cma, page, PAGE_ALIGN(size) >> PAGE_SHIFT); + } + } +} +EXPORT_SYMBOL_GPL(csv_release_to_contiguous); diff --git a/include/linux/cma.h b/include/linux/cma.h index 63873b93deaa..4dadf9a05752 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -56,4 +56,5 @@ extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data); extern void cma_reserve_pages_on_error(struct cma *cma); +extern int __init cma_alloc_areas(unsigned int max_cma_size); #endif diff --git a/mm/cma.c b/mm/cma.c index ac363f16d392..5af7642e607b 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -36,7 +36,10 @@ #include "internal.h" #include "cma.h" -struct cma cma_areas[MAX_CMA_AREAS]; +static struct cma cma_areas_data[MAX_CMA_AREAS]; +static unsigned int cma_areas_size = MAX_CMA_AREAS; +struct cma *cma_areas = cma_areas_data; + unsigned cma_area_count; static DEFINE_MUTEX(cma_mutex); @@ -159,6 +162,25 @@ void __init cma_reserve_pages_on_error(struct cma *cma) cma->reserve_pages_on_error = true; } +int __init cma_alloc_areas(unsigned int max_cma_size) +{ + struct cma *data; + + if (max_cma_size <= MAX_CMA_AREAS) + return 0; + + if (cma_area_count || cma_areas != cma_areas_data) + return -EPERM; + + data = memblock_alloc(max_cma_size * sizeof(*cma_areas), SMP_CACHE_BYTES); + if (!data) + return -ENOMEM; + + cma_areas = data; + cma_areas_size = max_cma_size; + return 0; +} + /** * cma_init_reserved_mem() - create custom contiguous area from reserved memory * @base: Base address of the reserved area @@ -179,7 +201,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, struct cma *cma; /* Sanity checks */ - if (cma_area_count == ARRAY_SIZE(cma_areas)) { + if (cma_area_count == cma_areas_size) { pr_err("Not enough slots for CMA reserved regions!\n"); return -ENOSPC; } @@ -252,7 +274,7 @@ int __init cma_declare_contiguous_nid(phys_addr_t base, pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", __func__, &size, &base, &limit, &alignment); - if (cma_area_count == ARRAY_SIZE(cma_areas)) { + if (cma_area_count == cma_areas_size) { pr_err("Not enough slots for CMA reserved regions!\n"); return -ENOSPC; } diff --git a/mm/cma.h b/mm/cma.h index 88a0595670b7..12aba820969c 100644 --- a/mm/cma.h +++ b/mm/cma.h @@ -33,7 +33,7 @@ struct cma { bool reserve_pages_on_error; }; -extern struct cma cma_areas[MAX_CMA_AREAS]; +extern struct cma *cma_areas; extern unsigned cma_area_count; static inline unsigned long cma_bitmap_maxno(struct cma *cma) -- Gitee From a7e149b4168b7525f5b12e0349e3fddf5fafcc38 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Mon, 11 Mar 2024 11:03:41 +0800 Subject: [PATCH 0661/2138] anolis: crypto: ccp: Add SET_SMR/SET_SMCR commands for CSV3 ANBZ: #8681 Set guest memory regions in hygon hardware with SET_SMR command. Secure memory control region(SMCR) is a special memory region which is dedicated for CSV3 guest's meta data. SET_SMCR command is used to set SMCR memory in hygon hardware. Both SET_SMR and SET_SMCR should be issued early during platform initialization. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2995 --- drivers/crypto/ccp/Makefile | 3 +- drivers/crypto/ccp/csv-dev.c | 101 +++++++++++++++++++++++++++++++++++ drivers/crypto/ccp/csv-dev.h | 31 +++++++++++ drivers/crypto/ccp/sev-dev.c | 15 ++++++ 4 files changed, 149 insertions(+), 1 deletion(-) create mode 100644 drivers/crypto/ccp/csv-dev.c create mode 100644 drivers/crypto/ccp/csv-dev.h diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 94c673805325..0da504999951 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -13,7 +13,8 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ tee-dev.o \ platform-access.o \ dbc.o \ - psp-ringbuf.o + psp-ringbuf.o \ + csv-dev.o obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o ccp-crypto-objs := ccp-crypto-main.o \ diff --git a/drivers/crypto/ccp/csv-dev.c b/drivers/crypto/ccp/csv-dev.c new file mode 100644 index 000000000000..b9a9ca4fa3c7 --- /dev/null +++ b/drivers/crypto/ccp/csv-dev.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON Platform Security Processor (PSP) interface + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include "sev-dev.h" +#include "csv-dev.h" + +/* Function pointers for hooks */ +struct csv_hooks_table csv_hooks; + +#ifdef CONFIG_HYGON_CSV + +int csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error) +{ + int ret = 0; + unsigned int i = 0; + struct csv3_data_set_smr *cmd_set_smr; + struct csv3_data_set_smcr *cmd_set_smcr; + struct csv3_data_memory_region *smr_regions; + + if (!csv_smr || !csv_smr_num) + return -EINVAL; + + cmd_set_smr = kzalloc(sizeof(*cmd_set_smr), GFP_KERNEL); + if (!cmd_set_smr) + return -ENOMEM; + + smr_regions = kcalloc(csv_smr_num, sizeof(*smr_regions), GFP_KERNEL); + if (!smr_regions) { + ret = -ENOMEM; + goto e_free_cmd_set_smr; + } + + for (i = 0; i < csv_smr_num; i++) { + smr_regions[i].base_address = csv_smr[i].start; + smr_regions[i].size = csv_smr[i].size; + } + cmd_set_smr->smr_entry_size = 1 << csv_get_smr_entry_shift(); + cmd_set_smr->regions_paddr = __psp_pa(smr_regions); + cmd_set_smr->nregions = csv_smr_num; + ret = csv_hooks.sev_do_cmd(CSV3_CMD_SET_SMR, cmd_set_smr, error); + if (ret) { + pr_err("Fail to set SMR, ret %#x, error %#x\n", ret, *error); + goto e_free_smr_area; + } + + cmd_set_smcr = kzalloc(sizeof(*cmd_set_smcr), GFP_KERNEL); + if (!cmd_set_smcr) { + ret = -ENOMEM; + goto e_free_smr_area; + } + + cmd_set_smcr->base_address = csv_alloc_from_contiguous(1UL << CSV_MR_ALIGN_BITS, + &node_online_map, + get_order(1 << CSV_MR_ALIGN_BITS)); + if (!cmd_set_smcr->base_address) { + pr_err("Fail to alloc SMCR memory\n"); + ret = -ENOMEM; + goto e_free_cmd_set_smcr; + } + + cmd_set_smcr->size = 1UL << CSV_MR_ALIGN_BITS; + ret = csv_hooks.sev_do_cmd(CSV3_CMD_SET_SMCR, cmd_set_smcr, error); + if (ret) { + if (*error == SEV_RET_INVALID_COMMAND) + ret = 0; + else + pr_err("set smcr ret %#x, error %#x\n", ret, *error); + + csv_release_to_contiguous(cmd_set_smcr->base_address, + 1UL << CSV_MR_ALIGN_BITS); + } + +e_free_cmd_set_smcr: + kfree((void *)cmd_set_smcr); +e_free_smr_area: + kfree((void *)smr_regions); +e_free_cmd_set_smr: + kfree((void *)cmd_set_smr); + + if (ret) + dev_warn(sev->dev, + "CSV3: fail to set secure memory region, CSV3 support unavailable\n"); + + return ret; +} + +#endif /* CONFIG_HYGON_CSV */ diff --git a/drivers/crypto/ccp/csv-dev.h b/drivers/crypto/ccp/csv-dev.h new file mode 100644 index 000000000000..8865b945728b --- /dev/null +++ b/drivers/crypto/ccp/csv-dev.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * HYGON Platform Security Processor (PSP) interface driver + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + */ + +#ifndef __CSV_DEV_H__ +#define __CSV_DEV_H__ + +#include + +/* Hooks table: a table of function pointers filled in when psp init */ +extern struct csv_hooks_table { + int (*sev_do_cmd)(int cmd, void *data, int *psp_ret); +} csv_hooks; + +#ifdef CONFIG_HYGON_CSV + +int csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error); + +#else /* !CONFIG_HYGON_CSV */ + +static inline int +csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error) { return 0; } + +#endif /* CONFIG_HYGON_CSV */ + +#endif /* __CSV_DEV_H__ */ diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 893bafb42f81..ba4f9de8b783 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -33,6 +33,7 @@ #include "psp-dev.h" #include "sev-dev.h" +#include "csv-dev.h" #define DEVICE_NAME "sev" #define SEV_FW_FILE "amd/sev.fw" @@ -2375,6 +2376,13 @@ static void sev_exit(struct kref *ref) misc_dev = NULL; } +/* Code to set all of the function pointers for CSV. */ +static inline void csv_install_hooks(void) +{ + /* Install the hook functions for CSV. */ + csv_hooks.sev_do_cmd = sev_do_cmd; +} + static int sev_misc_init(struct sev_device *sev) { struct device *dev = sev->dev; @@ -2404,6 +2412,9 @@ static int sev_misc_init(struct sev_device *sev) return ret; kref_init(&misc_dev->refcount); + + /* Install the hook functions for CSV */ + csv_install_hooks(); } else { kref_get(&misc_dev->refcount); } @@ -2566,6 +2577,10 @@ void sev_pci_init(void) if (!psp_init_on_probe) return; + /* Set SMR for HYGON CSV3 */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + csv_platform_cmd_set_secure_memory_region(sev, &error); + /* Initialize the platform */ rc = sev_platform_init(&error); if (rc) -- Gitee From 82dacb15ac20a5d1b199f50b38974b4d308ca537 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Sat, 16 Mar 2024 13:40:54 +0800 Subject: [PATCH 0662/2138] anolis: KVM: SEV: Pin SEV guest memory out of CMA area ANBZ: #8681 When pin_user_pages_fast pin SEV guest memory without FOLL_LONGTERM flag, the pinning pages may be in CMA area, which resulting in other applications may can't use the CMA area because the pinning pages can't be migrated. Add FOLL_LONGTERM flag to pin_user_pages_fast, which makes sure that we don't keep non_movable pages (due to page reference count) in CMA area. So CMA area can be allocated by other applications. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2995 --- arch/x86/kvm/svm/sev.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index b9f3378b5f36..35258bcc1b62 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -557,6 +557,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, unsigned long locked, lock_limit; struct page **pages; unsigned long first, last; + unsigned int flags = 0; int ret; lockdep_assert_held(&kvm->lock); @@ -589,8 +590,10 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, if (!pages) return ERR_PTR(-ENOMEM); + flags = write ? FOLL_WRITE : 0; + /* Pin the user virtual address. */ - npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages); + npinned = pin_user_pages_fast(uaddr, npages, flags | FOLL_LONGTERM, pages); if (npinned != npages) { pr_err("SEV: Failure locking %lu pages.\n", npages); ret = -ENOMEM; -- Gitee From a4db4040d21f144bd5ef3b8e330841ebbfc240b2 Mon Sep 17 00:00:00 2001 From: Cruz Zhao Date: Tue, 28 Feb 2023 09:42:36 +0000 Subject: [PATCH 0663/2138] anolis: sched/core: introduce ht-aware-quota ANBZ: #8648 With acpu accounting, we are able to assess how long the task is running with sibling idle and how long with sibling busy. To make the computing power of tasks stable, we need the tasks to execute a similar number of instructions in each scheduling cycle. To achieve this goal, we introduce ht-aware-quota. When task is running with sibling idle, we consider the task to have execute more instructions, with a certain ratio, and the sibling idle time * ratio will be account its cfs_rq_runtime, not just siblind idle time. The ratio can be configured from /sys/fs/cgroup//cpu.ht_ratio, unit: percentage, range: [100, 200], default: 100. As for now, ht-aware-quota is only valid for cookie'd tasks, as when the sibling is busy, we know what task is running. And sched_feat SCHED_CORE_HT_AWARE_QUOTA is required to be enable. Signed-off-by: Cruz Zhao Reviewed-by: Yi Tao Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2956 --- include/linux/sched.h | 3 +++ kernel/sched/core.c | 49 +++++++++++++++++++++++++++++++++++++++ kernel/sched/core_sched.c | 3 ++- kernel/sched/fair.c | 29 +++++++++++++++++++++++ kernel/sched/features.h | 4 ++++ kernel/sched/sched.h | 8 +++++++ 6 files changed, 95 insertions(+), 1 deletion(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 99df651e6b11..ea17a90d15bd 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -593,6 +593,9 @@ struct sched_entity { */ struct sched_avg avg; #endif +#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) + unsigned int ht_ratio; +#endif }; struct sched_rt_entity { diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e51210cfbd7f..0dc08a7eab36 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -10598,6 +10598,9 @@ struct task_group *sched_create_group(struct task_group *parent) alloc_uclamp_sched_group(tg, parent); +#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) + tg->ht_ratio = 100; +#endif return tg; err: @@ -11485,6 +11488,38 @@ static int cpu_idle_write_s64(struct cgroup_subsys_state *css, } #endif +#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) +static int cpu_ht_ratio_write(struct cgroup_subsys_state *css, + struct cftype *cftype, u64 ht_ratio) +{ + struct task_group *tg = css_tg(css); + int cpu; + + if (ht_ratio < 100 || ht_ratio > 200) + return -1; + + if (tg == &root_task_group) + return -1; + + tg->ht_ratio = ht_ratio; + for_each_online_cpu(cpu) { + struct sched_entity *se = tg->se[cpu]; + + se->ht_ratio = ht_ratio; + } + + return 0; +} + +static u64 cpu_ht_ratio_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + struct task_group *tg = css_tg(css); + + return tg->ht_ratio; +} +#endif + static struct cftype cpu_legacy_files[] = { #ifdef CONFIG_FAIR_GROUP_SCHED { @@ -11553,6 +11588,13 @@ static struct cftype cpu_legacy_files[] = { .seq_show = cpu_uclamp_max_show, .write = cpu_uclamp_max_write, }, +#endif +#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) + { + .name = "ht_ratio", + .read_u64 = cpu_ht_ratio_read, + .write_u64 = cpu_ht_ratio_write, + }, #endif { } /* Terminate */ }; @@ -11780,6 +11822,13 @@ static struct cftype cpu_files[] = { .seq_show = cpu_uclamp_max_show, .write = cpu_uclamp_max_write, }, +#endif +#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) + { + .name = "ht_ratio", + .read_u64 = cpu_ht_ratio_read, + .write_u64 = cpu_ht_ratio_write, + }, #endif { } /* terminate */ }; diff --git a/kernel/sched/core_sched.c b/kernel/sched/core_sched.c index f931992fc08e..924859051b5f 100644 --- a/kernel/sched/core_sched.c +++ b/kernel/sched/core_sched.c @@ -292,9 +292,10 @@ void __sched_core_account_sibidle(struct rq *rq) */ __account_sibidle_time(p, delta, delta_task, !!rq->core->core_forceidle_count); + account_ht_aware_quota(p, delta_task); } -out: +out:; #ifdef CONFIG_SCHED_ACPU for_each_cpu(i, smt_mask) { rq_i = cpu_rq(i); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index a76e8b4570d7..4d4d4de16c9f 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -12674,6 +12674,32 @@ static int task_is_throttled_fair(struct task_struct *p, int cpu) #endif return throttled_hierarchy(cfs_rq); } + +#ifdef CONFIG_CFS_BANDWIDTH +void account_ht_aware_quota(struct task_struct *p, u64 delta) +{ + struct sched_entity *se; + unsigned int ht_ratio; + struct cfs_rq *cfs_rq; + + /* We only account ht_aware_quota for cookied task. */ + if (sched_feat(SCHED_CORE_HT_AWARE_QUOTA) && p->core_cookie) { + se = &p->se; + cfs_rq = task_cfs_rq(p); + + if (se->parent) { + ht_ratio = se->parent->ht_ratio; + if (ht_ratio > 100 && ht_ratio <= 200) { + for_each_sched_entity(se) { + cfs_rq = cfs_rq_of(se); + account_cfs_rq_runtime(cfs_rq, + delta * (ht_ratio - 100) / 100); + } + } + } + } +} +#endif #else static inline void task_tick_core(struct rq *rq, struct task_struct *curr) {} #endif @@ -12959,6 +12985,9 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) init_cfs_rq(cfs_rq); init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); init_entity_runnable_average(se); +#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) + se->ht_ratio = 100; +#endif } return 1; diff --git a/kernel/sched/features.h b/kernel/sched/features.h index f770168230ae..ee7fb7220ed8 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -89,3 +89,7 @@ SCHED_FEAT(UTIL_EST_FASTUP, true) SCHED_FEAT(LATENCY_WARN, false) SCHED_FEAT(HZ_BW, true) + +#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) +SCHED_FEAT(SCHED_CORE_HT_AWARE_QUOTA, false) +#endif diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 91383033b76a..74fcbb36c538 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -420,6 +420,9 @@ struct task_group { /* Effective clamp values used for a task group */ struct uclamp_se uclamp[UCLAMP_CNT]; #endif +#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) + unsigned int ht_ratio; +#endif }; @@ -1337,6 +1340,11 @@ extern void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags); extern void sched_core_get(void); extern void sched_core_put(void); +#ifdef CONFIG_CFS_BANDWIDTH +extern void account_ht_aware_quota(struct task_struct *p, u64 delta); +#else +void account_ht_aware_quota(struct task_struct *p, u64 delta) {} +#endif #else /* !CONFIG_SCHED_CORE */ static inline bool sched_core_enabled(struct rq *rq) -- Gitee From 49aad67e5a4b3103ae20d1a67e85123d1c32112a Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Mon, 20 Jan 2020 17:24:11 +0800 Subject: [PATCH 0664/2138] anolis: sched/isolation: dynamical CPU isolation support ANBZ: #8684 We have so many wild tasks under root cgroup, they come from anywhere and there are no good way to manage them properly. However, we don't want them to disturb our critical tasks, for example the IO process tasks in MOC environment. Currently we're using 'isolcpu' cmdline parameter to isolate CPUs, however this is a static config, so later when we want to release more CPU resources, reboot is required. This patch introduced a way to dynamically isolate CPUs from those wild tasks, give admin the capability of dynamical CPU resource arrangement. By 'echo CPULIST > /proc/dyn_isolcpus', the CPUs will be: * Isolated from the unbound userspace wild tasks * Get rid of schedule domain * Isolated from the unbound workqueue worker Reviewed-by: Shanpei Chen Signed-off-by: Michael Wang Acked-by: Michael Wang Reviewed-by: Yihao Wu Signed-off-by: Cruz Zhao --- include/linux/sched/isolation.h | 7 ++ kernel/cgroup/cpuset.c | 23 +++- kernel/sched/isolation.c | 188 ++++++++++++++++++++++++++++++++ 3 files changed, 215 insertions(+), 3 deletions(-) diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h index fe1a46f30d24..bf538a280c82 100644 --- a/include/linux/sched/isolation.h +++ b/include/linux/sched/isolation.h @@ -55,6 +55,13 @@ static inline bool housekeeping_test_cpu(int cpu, enum hk_type type) static inline void housekeeping_init(void) { } #endif /* CONFIG_CPU_ISOLATION */ +#if defined(CONFIG_CPU_ISOLATION) && defined(CONFIG_CGROUP_SCHED) +DECLARE_STATIC_KEY_FALSE(dyn_isolcpus_enabled); +extern void wilds_cpus_allowed(struct cpumask *pmask); +#else +static inline void wilds_cpus_allowed(struct cpumask *pmask) {} +#endif + static inline bool housekeeping_cpu(int cpu, enum hk_type type) { #ifdef CONFIG_CPU_ISOLATION diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 3646426c69e2..5b54d3573c82 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -2646,9 +2646,10 @@ static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task) { lockdep_assert_held(&cpuset_mutex); - if (cs != &top_cpuset) + if (cs != &top_cpuset) { guarantee_online_cpus(task, cpus_attach); - else + wilds_cpus_allowed(cpus_attach); + } else cpumask_andnot(cpus_attach, task_cpu_possible_mask(task), cs->subparts_cpus); /* @@ -3477,8 +3478,24 @@ static void cpuset_fork(struct task_struct *task) rcu_read_unlock(); if (same_cs) { - if (cs == &top_cpuset) + if (cs == &top_cpuset) { + /* + * This is necessary since update_wilds_cpumask() + * could have missed the 'task', if it's parent is + * the last one on the iteratoration list, like: + * + * 1. 'task' dup old dyn_allowed from parent + * 2. update_wilds_cpumask() begin + * 3. new dyn_allowed applied to parent + * 4. update_wilds_cpumask() end + * 5. 'task' add into iteratoration list + * + * Fix this by redup current's allowed here if changed. + */ + if (!cpumask_equal(task->cpus_ptr, current->cpus_ptr)) + set_cpus_allowed_ptr(task, current->cpus_ptr); return; + } set_cpus_allowed_ptr(task, current->cpus_ptr); task->mems_allowed = current->mems_allowed; diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c index 82e2f7fc7c26..7538e793e755 100644 --- a/kernel/sched/isolation.c +++ b/kernel/sched/isolation.c @@ -53,8 +53,32 @@ int housekeeping_any_cpu(enum hk_type type) } EXPORT_SYMBOL_GPL(housekeeping_any_cpu); +#ifdef CONFIG_CGROUP_SCHED +/* + * dyn_allowed -- allowed CPUs for wild tasks. + * + * dyn_isolated -- isolated CPUs for wild tasks. + * + * dyn_possible -- possible CPUs for dynamical isolation. + */ +static cpumask_var_t dyn_allowed; +static cpumask_var_t dyn_isolated; +static cpumask_var_t dyn_possible; + +static bool dyn_isolcpus_ready; + +DEFINE_STATIC_KEY_FALSE(dyn_isolcpus_enabled); +EXPORT_SYMBOL_GPL(dyn_isolcpus_enabled); +#endif + const struct cpumask *housekeeping_cpumask(enum hk_type type) { +#ifdef CONFIG_CGROUP_SCHED + if (static_branch_unlikely(&dyn_isolcpus_enabled)) + if (BIT(type) & HK_FLAG_DOMAIN) + return dyn_allowed; +#endif + if (static_branch_unlikely(&housekeeping_overridden)) if (housekeeping.flags & BIT(type)) return housekeeping.cpumasks[type]; @@ -72,6 +96,12 @@ EXPORT_SYMBOL_GPL(housekeeping_affine); bool housekeeping_test_cpu(int cpu, enum hk_type type) { +#ifdef CONFIG_CGROUP_SCHED + if (static_branch_unlikely(&dyn_isolcpus_enabled)) + if (BIT(type) & HK_FLAG_DOMAIN) + return cpumask_test_cpu(cpu, dyn_allowed); +#endif + if (static_branch_unlikely(&housekeeping_overridden)) if (housekeeping.flags & BIT(type)) return cpumask_test_cpu(cpu, housekeeping.cpumasks[type]); @@ -79,10 +109,30 @@ bool housekeeping_test_cpu(int cpu, enum hk_type type) } EXPORT_SYMBOL_GPL(housekeeping_test_cpu); +#ifdef CONFIG_CGROUP_SCHED +static inline void free_dyn_masks(void) +{ + free_cpumask_var(dyn_allowed); + free_cpumask_var(dyn_isolated); + free_cpumask_var(dyn_possible); +} +#endif + void __init housekeeping_init(void) { enum hk_type type; +#ifdef CONFIG_CGROUP_SCHED + if (zalloc_cpumask_var(&dyn_allowed, GFP_KERNEL) && + zalloc_cpumask_var(&dyn_isolated, GFP_KERNEL) && + zalloc_cpumask_var(&dyn_possible, GFP_KERNEL)) { + cpumask_copy(dyn_allowed, cpu_possible_mask); + cpumask_copy(dyn_possible, cpu_possible_mask); + dyn_isolcpus_ready = true; + } else + free_dyn_masks(); +#endif + if (!housekeeping.flags) return; @@ -95,6 +145,12 @@ void __init housekeeping_init(void) /* We need at least one CPU to handle housekeeping work */ WARN_ON_ONCE(cpumask_empty(housekeeping.cpumasks[type])); } +#ifdef CONFIG_CGROUP_SCHED + if ((housekeeping.flags & HK_FLAG_DOMAIN) && type < HK_TYPE_MAX) { + cpumask_copy(dyn_allowed, housekeeping.cpumasks[type]); + cpumask_copy(dyn_possible, housekeeping.cpumasks[type]); + } +#endif } static void __init housekeeping_setup_type(enum hk_type type, @@ -244,3 +300,135 @@ static int __init housekeeping_isolcpus_setup(char *str) return housekeeping_setup(str, flags); } __setup("isolcpus=", housekeeping_isolcpus_setup); + +#ifdef CONFIG_CGROUP_SCHED +static int dyn_isolcpus_show(struct seq_file *s, void *p) +{ + seq_printf(s, "%*pbl\n", cpumask_pr_args(dyn_isolated)); + + return 0; +} + +static int dyn_isolcpus_open(struct inode *inode, struct file *file) +{ + return single_open(file, dyn_isolcpus_show, NULL); +} + +void wilds_cpus_allowed(struct cpumask *pmask) +{ + if (static_branch_unlikely(&dyn_isolcpus_enabled)) + cpumask_and(pmask, pmask, dyn_allowed); +} + +void update_wilds_cpumask(cpumask_var_t new_allowed, cpumask_var_t old_allowed) +{ + struct css_task_iter it; + struct task_struct *task; + struct task_group *tg = &root_task_group; + + css_task_iter_start(&tg->css, 0, &it); + while ((task = css_task_iter_next(&it))) { + if (task->flags & PF_KTHREAD) + continue; + + if (!cpumask_equal(task->cpus_ptr, old_allowed)) + continue; + + set_cpus_allowed_ptr(task, new_allowed); + } + css_task_iter_end(&it); +} + +static DEFINE_MUTEX(dyn_isolcpus_mutex); + +static ssize_t write_dyn_isolcpus(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + int ret = count; + cpumask_var_t isolated; + cpumask_var_t new_allowed; + cpumask_var_t old_allowed; + + mutex_lock(&dyn_isolcpus_mutex); + + if (!zalloc_cpumask_var(&isolated, GFP_KERNEL)) { + ret = -ENOMEM; + goto out; + } + + if (!zalloc_cpumask_var(&new_allowed, GFP_KERNEL)) { + ret = -ENOMEM; + goto free_isolated; + } + + if (!zalloc_cpumask_var(&old_allowed, GFP_KERNEL)) { + ret = -ENOMEM; + goto free_new_allowed; + } + + if (cpumask_parselist_user(buf, count, isolated)) { + ret = -EINVAL; + goto free_all; + } + + if (!cpumask_subset(isolated, dyn_possible)) { + ret = -EINVAL; + goto free_all; + } + + /* At least reserve one for wild tasks to run */ + cpumask_andnot(new_allowed, dyn_possible, isolated); + if (!cpumask_intersects(new_allowed, cpu_online_mask)) { + ret = -EINVAL; + goto free_all; + } + + cpumask_copy(old_allowed, dyn_allowed); + cpumask_copy(dyn_allowed, new_allowed); + cpumask_copy(dyn_isolated, isolated); + + if (cpumask_empty(dyn_isolated)) + static_branch_disable(&dyn_isolcpus_enabled); + else + static_branch_enable(&dyn_isolcpus_enabled); + + update_wilds_cpumask(new_allowed, old_allowed); + + rebuild_sched_domains(); + workqueue_set_unbound_cpumask(new_allowed); + +free_all: + free_cpumask_var(old_allowed); +free_new_allowed: + free_cpumask_var(new_allowed); +free_isolated: + free_cpumask_var(isolated); +out: + mutex_unlock(&dyn_isolcpus_mutex); + + return ret; +} + +static const struct proc_ops proc_dyn_isolcpus_operations = { + .proc_open = dyn_isolcpus_open, + .proc_read = seq_read, + .proc_write = write_dyn_isolcpus, + .proc_lseek = noop_llseek, +}; + +static int __init dyn_isolcpus_init(void) +{ + if (dyn_isolcpus_ready && + !proc_create("dyn_isolcpus", 0200, NULL, + &proc_dyn_isolcpus_operations)) { + dyn_isolcpus_ready = false; + free_dyn_masks(); + } + + if (!dyn_isolcpus_ready) + pr_err("Initialize Dynamical Isolation Failed\n"); + + return 0; +} +early_initcall(dyn_isolcpus_init); +#endif -- Gitee From 64a4cb04ea4f5757508d94e1b3d07426a1109360 Mon Sep 17 00:00:00 2001 From: Yihao Wu Date: Thu, 2 Feb 2023 16:54:45 +0800 Subject: [PATCH 0665/2138] anolis: sched/isolation: dynamically isolate all tasks ANBZ: #8684 Previously, only tasks in the root task group were isolated. But production proves universal isolation more useful, especially when ops engineer has limited control over the system. In order to prevent any tasks disturbing critical tasks, we choose to isolate all tasks, instead of wild tasks. Also dyn_isolcpus behaves more like isolcpus kernel cmdline now. Signed-off-by: Yihao Wu --- kernel/sched/isolation.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c index 7538e793e755..91dd5b0a1a52 100644 --- a/kernel/sched/isolation.c +++ b/kernel/sched/isolation.c @@ -322,12 +322,10 @@ void wilds_cpus_allowed(struct cpumask *pmask) void update_wilds_cpumask(cpumask_var_t new_allowed, cpumask_var_t old_allowed) { - struct css_task_iter it; - struct task_struct *task; - struct task_group *tg = &root_task_group; + struct task_struct *g, *task; - css_task_iter_start(&tg->css, 0, &it); - while ((task = css_task_iter_next(&it))) { + rcu_read_lock(); + for_each_process_thread(g, task) { if (task->flags & PF_KTHREAD) continue; @@ -336,7 +334,7 @@ void update_wilds_cpumask(cpumask_var_t new_allowed, cpumask_var_t old_allowed) set_cpus_allowed_ptr(task, new_allowed); } - css_task_iter_end(&it); + rcu_read_unlock(); } static DEFINE_MUTEX(dyn_isolcpus_mutex); -- Gitee From 123afab0d4ff84e91b10b77238dd1de7c1029ee8 Mon Sep 17 00:00:00 2001 From: suhua Date: Fri, 16 Jun 2023 10:19:02 +0800 Subject: [PATCH 0666/2138] anolis: sched: Copy mask when dyn_isolcpus_ready is true ANBZ: #8684 In the process of initializing dyn_allowed and dyn_possible by housekeeping_init function, the check to see if the target cpumask memory space is successfully requested is missing. Signed-off-by: suhua Reviewed-by: Tianchen Ding Reviewed-by: Cruz Zhao Reviewed-by: Zelin Deng --- kernel/sched/isolation.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c index 91dd5b0a1a52..6fdb7ed32ebf 100644 --- a/kernel/sched/isolation.c +++ b/kernel/sched/isolation.c @@ -146,7 +146,8 @@ void __init housekeeping_init(void) WARN_ON_ONCE(cpumask_empty(housekeeping.cpumasks[type])); } #ifdef CONFIG_CGROUP_SCHED - if ((housekeeping.flags & HK_FLAG_DOMAIN) && type < HK_TYPE_MAX) { + if (dyn_isolcpus_ready && (housekeeping.flags & HK_FLAG_DOMAIN) && + type < HK_TYPE_MAX) { cpumask_copy(dyn_allowed, housekeeping.cpumasks[type]); cpumask_copy(dyn_possible, housekeeping.cpumasks[type]); } -- Gitee From ea83c7dcfa8211d92e166715c3ca1bccbe67eab7 Mon Sep 17 00:00:00 2001 From: lishuo Date: Wed, 3 Apr 2024 09:28:52 +0000 Subject: [PATCH 0667/2138] anolis: DRM: Add Phytium Display Engine support. ANBZ: #8701 phytium inclusion category: feature CVE: NA --------------------------------------------------------- This is Phytium Display Engine support,DC/DP driver patch. Signed-off-by: Yang Xun Signed-off-by: Chen Baozi Signed-off-by: lishuo Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3006 --- .../devicetree/bindings/gpu/phytium,dc.yaml | 49 + .../devicetree/bindings/vendor-prefixes.yaml | 2 + drivers/gpu/drm/Kconfig | 2 + drivers/gpu/drm/Makefile | 1 + drivers/gpu/drm/phytium/Kconfig | 12 + drivers/gpu/drm/phytium/Makefile | 20 + drivers/gpu/drm/phytium/pe220x_dc.c | 255 ++ drivers/gpu/drm/phytium/pe220x_dc.h | 31 + drivers/gpu/drm/phytium/pe220x_dp.c | 514 ++++ drivers/gpu/drm/phytium/pe220x_dp.h | 14 + drivers/gpu/drm/phytium/pe220x_reg.h | 209 ++ drivers/gpu/drm/phytium/phytium_crtc.c | 484 +++ drivers/gpu/drm/phytium/phytium_crtc.h | 39 + drivers/gpu/drm/phytium/phytium_debugfs.c | 456 +++ drivers/gpu/drm/phytium/phytium_debugfs.h | 13 + drivers/gpu/drm/phytium/phytium_display_drv.c | 434 +++ drivers/gpu/drm/phytium/phytium_display_drv.h | 174 ++ drivers/gpu/drm/phytium/phytium_dp.c | 2639 +++++++++++++++++ drivers/gpu/drm/phytium/phytium_dp.h | 156 + drivers/gpu/drm/phytium/phytium_fb.c | 131 + drivers/gpu/drm/phytium/phytium_fb.h | 26 + drivers/gpu/drm/phytium/phytium_fbdev.c | 151 + drivers/gpu/drm/phytium/phytium_fbdev.h | 13 + drivers/gpu/drm/phytium/phytium_gem.c | 509 ++++ drivers/gpu/drm/phytium/phytium_gem.h | 42 + drivers/gpu/drm/phytium/phytium_panel.c | 420 +++ drivers/gpu/drm/phytium/phytium_panel.h | 46 + drivers/gpu/drm/phytium/phytium_pci.c | 387 +++ drivers/gpu/drm/phytium/phytium_pci.h | 26 + drivers/gpu/drm/phytium/phytium_plane.c | 632 ++++ drivers/gpu/drm/phytium/phytium_plane.h | 46 + drivers/gpu/drm/phytium/phytium_platform.c | 307 ++ drivers/gpu/drm/phytium/phytium_platform.h | 18 + drivers/gpu/drm/phytium/phytium_reg.h | 365 +++ drivers/gpu/drm/phytium/px210_dc.c | 326 ++ drivers/gpu/drm/phytium/px210_dc.h | 30 + drivers/gpu/drm/phytium/px210_dp.c | 920 ++++++ drivers/gpu/drm/phytium/px210_dp.h | 13 + drivers/gpu/drm/phytium/px210_reg.h | 349 +++ include/linux/pci_ids.h | 2 + 40 files changed, 10263 insertions(+) create mode 100644 Documentation/devicetree/bindings/gpu/phytium,dc.yaml create mode 100644 drivers/gpu/drm/phytium/Kconfig create mode 100644 drivers/gpu/drm/phytium/Makefile create mode 100644 drivers/gpu/drm/phytium/pe220x_dc.c create mode 100644 drivers/gpu/drm/phytium/pe220x_dc.h create mode 100644 drivers/gpu/drm/phytium/pe220x_dp.c create mode 100644 drivers/gpu/drm/phytium/pe220x_dp.h create mode 100644 drivers/gpu/drm/phytium/pe220x_reg.h create mode 100644 drivers/gpu/drm/phytium/phytium_crtc.c create mode 100644 drivers/gpu/drm/phytium/phytium_crtc.h create mode 100644 drivers/gpu/drm/phytium/phytium_debugfs.c create mode 100644 drivers/gpu/drm/phytium/phytium_debugfs.h create mode 100644 drivers/gpu/drm/phytium/phytium_display_drv.c create mode 100644 drivers/gpu/drm/phytium/phytium_display_drv.h create mode 100644 drivers/gpu/drm/phytium/phytium_dp.c create mode 100644 drivers/gpu/drm/phytium/phytium_dp.h create mode 100644 drivers/gpu/drm/phytium/phytium_fb.c create mode 100644 drivers/gpu/drm/phytium/phytium_fb.h create mode 100644 drivers/gpu/drm/phytium/phytium_fbdev.c create mode 100644 drivers/gpu/drm/phytium/phytium_fbdev.h create mode 100644 drivers/gpu/drm/phytium/phytium_gem.c create mode 100644 drivers/gpu/drm/phytium/phytium_gem.h create mode 100644 drivers/gpu/drm/phytium/phytium_panel.c create mode 100644 drivers/gpu/drm/phytium/phytium_panel.h create mode 100644 drivers/gpu/drm/phytium/phytium_pci.c create mode 100644 drivers/gpu/drm/phytium/phytium_pci.h create mode 100644 drivers/gpu/drm/phytium/phytium_plane.c create mode 100644 drivers/gpu/drm/phytium/phytium_plane.h create mode 100644 drivers/gpu/drm/phytium/phytium_platform.c create mode 100644 drivers/gpu/drm/phytium/phytium_platform.h create mode 100644 drivers/gpu/drm/phytium/phytium_reg.h create mode 100644 drivers/gpu/drm/phytium/px210_dc.c create mode 100644 drivers/gpu/drm/phytium/px210_dc.h create mode 100644 drivers/gpu/drm/phytium/px210_dp.c create mode 100644 drivers/gpu/drm/phytium/px210_dp.h create mode 100644 drivers/gpu/drm/phytium/px210_reg.h diff --git a/Documentation/devicetree/bindings/gpu/phytium,dc.yaml b/Documentation/devicetree/bindings/gpu/phytium,dc.yaml new file mode 100644 index 000000000000..5be348f6e23f --- /dev/null +++ b/Documentation/devicetree/bindings/gpu/phytium,dc.yaml @@ -0,0 +1,49 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/dc/snps,dc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Phytium Display Controller + +maintainers: + - Chen Baozi + +allOf: + - $ref: /schemas/dc/display-controller.yaml# + +properties: + compatible: + const: phytium,dc + + reg: + minItems: 1 + items: + - description: Offset and length of the memory mapped registers + + interrupts: + maxItems: 1 + + clocks: + minItems: 1 + items: + - description:Display controller reference clock source + +unevaluatedProperties: false + +required: + - compatible + - reg + - interrupts + +Example: + /memreserve/ 0xf4000000 0x4000000; // (optional) + + dc0@32000000 { + compatible = "phytium,dc"; + reg = <0x0 0x32000000 0x0 0x8000>, + <0x0 0xf4000000 0x0 0x4000000>; // (optional) + interrupts = ; + pipe_mask = 0x3 + edp_mask = 0x0; + }; diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml index 12a16031d7b6..93258265c6b0 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.yaml +++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml @@ -1047,6 +1047,8 @@ patternProperties: description: PHICOMM Co., Ltd. "^phytec,.*": description: PHYTEC Messtechnik GmbH + "^phytium,.*": + description: Phytium Technology Co., Ltd. "^picochip,.*": description: Picochip Ltd "^pine64,.*": diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 353ffa210f0e..2a89adbbf9fa 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -388,6 +388,8 @@ source "drivers/gpu/drm/solomon/Kconfig" source "drivers/gpu/drm/sprd/Kconfig" +source "drivers/gpu/drm/phytium/Kconfig" + config DRM_HYPERV tristate "DRM Support for Hyper-V synthetic video device" depends on DRM && PCI && MMU && HYPERV diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 12ad840d9e3a..017ff5a6ebe2 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -199,3 +199,4 @@ obj-y += solomon/ obj-$(CONFIG_DRM_SPRD) += sprd/ obj-$(CONFIG_DRM_LOONGSON) += loongson/ obj-$(CONFIG_HYDCU_FIXUP_HEADER) += hygon/hydcu-fixup-header/ +obj-$(CONFIG_DRM_PHYTIUM) += phytium/ diff --git a/drivers/gpu/drm/phytium/Kconfig b/drivers/gpu/drm/phytium/Kconfig new file mode 100644 index 000000000000..5f540962129a --- /dev/null +++ b/drivers/gpu/drm/phytium/Kconfig @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config DRM_PHYTIUM + tristate "DRM Support for Phytium Graphics Card" + depends on DRM && ARCH_PHYTIUM + select DRM_KMS_HELPER + select DRM_DISPLAY_HELPER + select DRM_DISPLAY_DP_HELPER + select DRM_DISPLAY_HDCP_HELPER + help + Choose this option if you have a phytium graphics card. + This driver provides kernel mode setting and buffer management to userspace. diff --git a/drivers/gpu/drm/phytium/Makefile b/drivers/gpu/drm/phytium/Makefile new file mode 100644 index 000000000000..1f68cdcd80da --- /dev/null +++ b/drivers/gpu/drm/phytium/Makefile @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0-only + +phytium-dc-drm-y := phytium_display_drv.o \ + phytium_plane.o \ + phytium_crtc.o \ + phytium_dp.o \ + phytium_fb.o \ + phytium_gem.o \ + phytium_fbdev.o \ + phytium_debugfs.o \ + px210_dp.o \ + phytium_panel.o \ + px210_dc.o \ + phytium_pci.o \ + pe220x_dp.o \ + pe220x_dc.o \ + phytium_platform.o + +obj-$(CONFIG_DRM_PHYTIUM) += phytium-dc-drm.o +CFLAGS_REMOVE_phytium_crtc.o += -mgeneral-regs-only diff --git a/drivers/gpu/drm/phytium/pe220x_dc.c b/drivers/gpu/drm/phytium/pe220x_dc.c new file mode 100644 index 000000000000..8f74199f9a47 --- /dev/null +++ b/drivers/gpu/drm/phytium/pe220x_dc.c @@ -0,0 +1,255 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium Pe220x display controller DRM driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "pe220x_reg.h" +#include "phytium_crtc.h" +#include "phytium_plane.h" +#include "phytium_fb.h" +#include "phytium_gem.h" + +void pe220x_dc_hw_disable(struct drm_crtc *crtc); + +static const unsigned int pe220x_primary_formats[] = { + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_RGBA1010102, + DRM_FORMAT_BGRA1010102, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_ARGB4444, + DRM_FORMAT_ABGR4444, + DRM_FORMAT_RGBA4444, + DRM_FORMAT_BGRA4444, + DRM_FORMAT_XRGB4444, + DRM_FORMAT_XBGR4444, + DRM_FORMAT_RGBX4444, + DRM_FORMAT_BGRX4444, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_ABGR1555, + DRM_FORMAT_RGBA5551, + DRM_FORMAT_BGRA5551, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_XBGR1555, + DRM_FORMAT_RGBX5551, + DRM_FORMAT_BGRX5551, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, + DRM_FORMAT_YUYV, + DRM_FORMAT_UYVY, + DRM_FORMAT_NV16, + DRM_FORMAT_NV12, + DRM_FORMAT_NV21, +}; + +static uint64_t pe220x_primary_formats_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID +}; + +static uint64_t pe220x_cursor_formats_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID +}; + +static const unsigned int pe220x_cursor_formats[] = { + DRM_FORMAT_ARGB8888, +}; + +void pe220x_dc_hw_vram_init(struct phytium_display_private *priv, resource_size_t vram_addr, + resource_size_t vram_size) +{ + uint32_t config; + uint32_t group_offset = priv->address_transform_base; + + phytium_writel_reg(priv, (vram_addr & SRC_ADDR_MASK) >> SRC_ADDR_OFFSET, + group_offset, PE220X_DC_ADDRESS_TRANSFORM_SRC_ADDR); + phytium_writel_reg(priv, (vram_size >> SIZE_OFFSET) | ADDRESS_TRANSFORM_ENABLE, + group_offset, PE220X_DC_ADDRESS_TRANSFORM_SIZE); + config = phytium_readl_reg(priv, group_offset, PE220X_DC_ADDRESS_TRANSFORM_DST_ADDR); + phytium_writel_reg(priv, config, group_offset, PE220X_DC_ADDRESS_TRANSFORM_DST_ADDR); +} + +void pe220x_dc_hw_config_pix_clock(struct drm_crtc *crtc, int clock) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + int ret = 0; + + /* config pix clock */ + phytium_writel_reg(priv, FLAG_REQUEST | CMD_PIXEL_CLOCK | (clock & PIXEL_CLOCK_MASK), + 0, PE220X_DC_CMD_REGISTER(phys_pipe)); + ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(phys_pipe), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to set pixel clock\n", __func__); +} + +void pe220x_dc_hw_reset(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int config = 0; + int phys_pipe = phytium_crtc->phys_pipe; + + /* disable pixel clock for bmc mode */ + if (phys_pipe == 0) + pe220x_dc_hw_disable(crtc); + + config = phytium_readl_reg(priv, 0, PE220X_DC_CLOCK_CONTROL); + config &= (~(DC0_CORE_RESET | DC1_CORE_RESET | AXI_RESET | AHB_RESET)); + + if (phys_pipe == 0) { + phytium_writel_reg(priv, config | DC0_CORE_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC0_CORE_RESET | AXI_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC0_CORE_RESET | AXI_RESET | AHB_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC0_CORE_RESET | AXI_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC0_CORE_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config, 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + } else { + phytium_writel_reg(priv, config | DC1_CORE_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC1_CORE_RESET | AXI_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC1_CORE_RESET | AXI_RESET | AHB_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC1_CORE_RESET | AXI_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC1_CORE_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config, 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + } +} + +void pe220x_dc_hw_disable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int config = 0; + int phys_pipe = phytium_crtc->phys_pipe; + + /* clear framebuffer */ + phytium_writel_reg(priv, CLEAR_VALUE_BLACK, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CLEARVALUE); + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + config |= FRAMEBUFFER_CLEAR; + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + + /* disable cursor */ + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], PHYTIUM_DC_CURSOR_CONFIG); + config = ((config & (~CURSOR_FORMAT_MASK)) | CURSOR_FORMAT_DISABLED); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], PHYTIUM_DC_CURSOR_CONFIG); + mdelay(20); + + /* reset pix clock */ + pe220x_dc_hw_config_pix_clock(crtc, 0); + + if (phys_pipe == 0) { + config = phytium_readl_reg(priv, 0, PE220X_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, config | DC0_CORE_RESET, 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config & (~DC0_CORE_RESET), 0, PE220X_DC_CLOCK_CONTROL); + } else { + config = phytium_readl_reg(priv, 0, PE220X_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, config | DC1_CORE_RESET, 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config & (~DC1_CORE_RESET), 0, PE220X_DC_CLOCK_CONTROL); + } + udelay(20); +} + +int pe220x_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, int count) +{ + int ret = 0; + + if (mode_cmd->modifier[count] != DRM_FORMAT_MOD_LINEAR) { + DRM_ERROR("unsupported fb modifier 0x%llx\n", mode_cmd->modifier[count]); + ret = -EINVAL; + } + + return ret; +} + +void pe220x_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count) +{ + *format_modifiers = pe220x_primary_formats_modifiers; + *formats = pe220x_primary_formats; + *format_count = ARRAY_SIZE(pe220x_primary_formats); +} + +void pe220x_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count) +{ + *format_modifiers = pe220x_cursor_formats_modifiers; + *formats = pe220x_cursor_formats; + *format_count = ARRAY_SIZE(pe220x_cursor_formats); +} + +void pe220x_dc_hw_update_primary_hi_addr(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + + phytium_writel_reg(priv, (phytium_plane->iova[0] >> PREFIX_SHIFT) & PREFIX_MASK, + priv->dc_reg_base[phys_pipe], PE220X_DC_FRAMEBUFFER_Y_HI_ADDRESS); + + phytium_writel_reg(priv, (phytium_plane->iova[1] >> U_PREFIX_SHIFT) & U_PREFIX_MASK, + priv->dc_reg_base[phys_pipe], PE220X_DC_FRAMEBUFFER_U_HI_ADDRESS); + + phytium_writel_reg(priv, (phytium_plane->iova[2] >> V_PREFIX_SHIFT) & V_PREFIX_MASK, + priv->dc_reg_base[phys_pipe], PE220X_DC_FRAMEBUFFER_V_HI_ADDRESS); +} + +void pe220x_dc_hw_update_cursor_hi_addr(struct drm_plane *plane, uint64_t iova) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + int config; + + config = ((iova >> CURSOR_PREFIX_SHIFT) & CURSOR_PREFIX_MASK); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], PE220X_DC_CURSOR_HI_ADDRESS); +} diff --git a/drivers/gpu/drm/phytium/pe220x_dc.h b/drivers/gpu/drm/phytium/pe220x_dc.h new file mode 100644 index 000000000000..f88a054cf0d0 --- /dev/null +++ b/drivers/gpu/drm/phytium/pe220x_dc.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Phytium Pe220x display controller DRM driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PE220X_DC_H__ +#define __PE220X_DC_H__ + +#define PE220X_DC_PIX_CLOCK_MAX (594000) +#define PE220X_DC_HDISPLAY_MAX 3840 +#define PE220X_DC_VDISPLAY_MAX 2160 +#define PE220X_DC_ADDRESS_MASK 0x7f + +extern void pe220x_dc_hw_vram_init(struct phytium_display_private *priv, + resource_size_t vram_addr, + resource_size_t vram_size); +extern void pe220x_dc_hw_config_pix_clock(struct drm_crtc *crtc, int clock); +extern void pe220x_dc_hw_disable(struct drm_crtc *crtc); +extern int pe220x_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, int count); +extern void pe220x_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); +extern void pe220x_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); +extern void pe220x_dc_hw_update_primary_hi_addr(struct drm_plane *plane); +extern void pe220x_dc_hw_update_cursor_hi_addr(struct drm_plane *plane, uint64_t iova); +void pe220x_dc_hw_reset(struct drm_crtc *crtc); +#endif /* __PE220X_DC_H__ */ diff --git a/drivers/gpu/drm/phytium/pe220x_dp.c b/drivers/gpu/drm/phytium/pe220x_dp.c new file mode 100644 index 000000000000..54a6e8ac454b --- /dev/null +++ b/drivers/gpu/drm/phytium/pe220x_dp.c @@ -0,0 +1,514 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium display port DRM driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include "phytium_display_drv.h" +#include "pe220x_reg.h" +#include "phytium_dp.h" +#include "pe220x_dp.h" + +static uint8_t pe220x_dp_source_lane_count[2] = {1, 1}; + +/* [reg][ling_rate 1.62->8.1] */ +static int vco_val[12][4] = { + {0x0509, 0x0509, 0x0509, 0x0509}, /* CP_PADJ */ + {0x0f00, 0x0f00, 0x0f00, 0x0f00}, /* CP_IADJ */ + {0x0F08, 0x0F08, 0x0F08, 0x0F08}, /* FILT_PADJ */ + {0x0061, 0x006C, 0x006C, 0x0051}, /* INTDIV */ + {0x3333, 0x0000, 0x0000, 0x0000}, /* FRACDIVL */ + {0x0000, 0x0000, 0x0000, 0x0000}, /* FRACDIVH */ + {0x0042, 0x0048, 0x0048, 0x0036}, /* HIGH_THR */ + {0x0002, 0x0002, 0x0002, 0x0002}, /* PDIAG_CTRL */ + {0x0c5e, 0x0c5e, 0x0c5e, 0x0c5e}, /* VCOCAL_PLLCNT_START */ + {0x00c7, 0x00c7, 0x00c7, 0x00c7}, /* LOCK_PEFCNT */ + {0x00c7, 0x00c7, 0x00c7, 0x00c7}, /* LOCK_PLLCNT_START */ + {0x0005, 0x0005, 0x0005, 0x0005}, /* LOCK_PLLCNT_THR */ +}; + +/* [link_rate][swing][emphasis] */ +static int mgnfs_val[4][4][4] = { + /* 1.62Gbps */ + { + {0x0026, 0x001f, 0x0012, 0x0000}, + {0x0013, 0x0013, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 2.7Gbps */ + { + {0x0026, 0x001f, 0x0012, 0x0000}, + {0x0013, 0x0013, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 5.4Gbps */ + { + {0x001f, 0x0013, 0x005, 0x0000}, + {0x0018, 0x006, 0x0000, 0x0000}, + {0x000c, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 8.1Gbps */ + { + {0x0026, 0x0013, 0x005, 0x0000}, + {0x0013, 0x006, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, +}; + +/* [link_rate][swing][emphasis] */ +static int cpost_val[4][4][4] = { + /* 1.62Gbps */ + { + {0x0000, 0x0014, 0x0020, 0x002a}, + {0x0000, 0x0010, 0x001f, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 2.7Gbps */ + { + {0x0000, 0x0014, 0x0020, 0x002a}, + {0x0000, 0x0010, 0x001f, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 5.4Gbps */ + { + {0x0005, 0x0014, 0x0022, 0x002e}, + {0x0000, 0x0013, 0x0020, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 8.1Gbps */ + { + {0x0000, 0x0014, 0x0022, 0x002e}, + {0x0000, 0x0013, 0x0020, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, +}; + +static int pe220x_dp_hw_set_phy_lane_and_rate(struct phytium_dp_device *phytium_dp, + uint8_t link_lane_count, uint32_t link_rate) +{ + int port = phytium_dp->port%2; + int i = 0, data, tmp, tmp1, index = 0, mask = 0; + int timeout = 500, ret = 0; + + /* set pma powerdown */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (A3_POWERDOWN3 << (i * A3_POWERDOWN3_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_PMA0_POWER(port), data); + + /* lane pll disable */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) { + data |= (PLL_EN << (i * PLL_EN_SHIFT)); + mask |= (((1<source_max_lane_count; i++) + data |= (PLL_EN << (i * PLL_EN_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL_EN(port), data); + + /* set pma power active */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (A0_ACTIVE << (i * A0_ACTIVE_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_PMA0_POWER(port), data); + + mask = PLL0_LOCK_DONE; + do { + mdelay(1); + timeout--; + tmp = phytium_phy_readl(phytium_dp, PE220X_PHY_PMA_CONTROL2(port)); + } while ((!(tmp & mask)) && timeout); + + if (timeout == 0) { + DRM_ERROR("dp(%d) phy pll lock failed\n", port); + ret = -1; + } + udelay(1); + + return ret; +} + +static void pe220x_dp_hw_set_phy_lane_setting(struct phytium_dp_device *phytium_dp, + uint32_t link_rate, uint8_t train_set) +{ + int port = phytium_dp->port % 3; + int voltage_swing = 0; + int pre_emphasis = 0, link_rate_index = 0; + + switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: + voltage_swing = 1; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: + voltage_swing = 2; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: + voltage_swing = 3; + break; + default: + voltage_swing = 0; + break; + } + + switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { + case DP_TRAIN_PRE_EMPH_LEVEL_1: + pre_emphasis = 1; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_2: + pre_emphasis = 2; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_3: + pre_emphasis = 3; + break; + default: + pre_emphasis = 0; + break; + } + + switch (link_rate) { + case 810000: + link_rate_index = 3; + break; + case 540000: + link_rate_index = 2; + break; + case 270000: + link_rate_index = 1; + break; + case 162000: + link_rate_index = 0; + break; + default: + DRM_ERROR("phytium dp rate(%d) not support\n", link_rate); + link_rate_index = 2; + break; + } + + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL0_TX_DIAG_ACYA(port), LOCK); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL0_TX_TXCC_CTRL(port), TX_TXCC_CTRL); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL0_TX_DRV(port), TX_DRV); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL0_TX_MGNFS(port), + mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL0_TX_CPOST(port), + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL0_TX_DIAG_ACYA(port), UNLOCK); +} + +static int pe220x_dp_hw_init_phy(struct phytium_dp_device *phytium_dp) +{ + int port = phytium_dp->port; + int i = 0, data, tmp, mask; + int timeout = 500, ret = 0; + + phytium_phy_writel(phytium_dp, PE220X_PHY_APB_RESET(port), APB_RESET); + phytium_phy_writel(phytium_dp, PE220X_PHY_PIPE_RESET(port), RESET); + + /* config lane to dp mode */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (LANE_BIT << (i * LANE_BIT_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_MODE(port), data); + + /* pll clock enable */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (PLL_EN << (i * PLL_EN_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL_EN(port), data); + + /* config input 20 bit */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (BIT_20 << (i * BIT_20_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_PMA_WIDTH(port), data); + + /* config lane active power state */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (A0_ACTIVE << (i * A0_ACTIVE_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_PMA0_POWER(port), data); + + /* link reset */ + phytium_phy_writel(phytium_dp, PE220X_PHY_LINK_RESET(port), LINK_RESET); + + phytium_phy_writel(phytium_dp, PE220X_PHY_SGMII_DPSEL_INIT(port), DP_SEL); + + /* config single link */ + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL_CFG(port), SINGLE_LINK); + + /* pipe reset */ + phytium_phy_writel(phytium_dp, PE220X_PHY_PIPE_RESET(port), RESET_DEASSERT); + + mask = PLL0_LOCK_DONE; + do { + mdelay(1); + timeout--; + tmp = phytium_phy_readl(phytium_dp, PE220X_PHY_PMA_CONTROL2(port)); + } while ((!(tmp & mask)) && timeout); + + if (timeout == 0) { + DRM_ERROR("reset dp(%d) phy failed\n", port); + ret = -1; + } + udelay(1); + + return ret; +} + +static void pe220x_dp_hw_poweron_panel(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_ENABLE, + 0, PE220X_DC_CMD_REGISTER(port)); + ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(port), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to poweron panel\n", __func__); +} + +static void pe220x_dp_hw_poweroff_panel(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_DISABLE, + 0, PE220X_DC_CMD_REGISTER(port)); + ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(port), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to poweroff panel\n", __func__); +} + +static void pe220x_dp_hw_enable_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_ENABLE, + 0, PE220X_DC_CMD_REGISTER(port)); + ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(port), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to enable backlight\n", __func__); +} + +static void pe220x_dp_hw_disable_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_DISABLE, + 0, PE220X_DC_CMD_REGISTER(port)); + ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(port), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to disable backlight\n", __func__); +} + +static uint32_t pe220x_dp_hw_get_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int config; + uint32_t group_offset = priv->address_transform_base; + + config = phytium_readl_reg(priv, group_offset, PE220X_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE); + return ((config >> BACKLIGHT_VALUE_SHIFT) & BACKLIGHT_VALUE_MASK); +} + +static int pe220x_dp_hw_set_backlight(struct phytium_dp_device *phytium_dp, uint32_t level) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int config = 0; + int ret = 0; + + if (level > PE220X_DP_BACKLIGHT_MAX) { + ret = -EINVAL; + goto out; + } + + config = FLAG_REQUEST | CMD_BACKLIGHT | ((level & BACKLIGHT_MASK) << BACKLIGHT_SHIFT); + phytium_writel_reg(priv, config, 0, PE220X_DC_CMD_REGISTER(port)); + ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(port), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to set backlight\n", __func__); +out: + return ret; +} + +bool pe220x_dp_hw_spread_is_enable(struct phytium_dp_device *phytium_dp) +{ + return false; +} + +int pe220x_dp_hw_reset(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, DP_RESET, group_offset, PE220X_DP_CONTROLLER_RESET); + udelay(500); + phytium_writel_reg(priv, AUX_CLK_DIVIDER_100, group_offset, PHYTIUM_DP_AUX_CLK_DIVIDER); + phytium_writel_reg(priv, SUPPORT_EDP_1_4, group_offset, PHYTIUM_EDP_CRC_ENABLE); + + return 0; +} + +uint8_t pe220x_dp_hw_get_source_lane_count(struct phytium_dp_device *phytium_dp) +{ + return pe220x_dp_source_lane_count[phytium_dp->port]; +} + +static struct phytium_dp_func pe220x_dp_funcs = { + .dp_hw_get_source_lane_count = pe220x_dp_hw_get_source_lane_count, + .dp_hw_reset = pe220x_dp_hw_reset, + .dp_hw_spread_is_enable = pe220x_dp_hw_spread_is_enable, + .dp_hw_set_backlight = pe220x_dp_hw_set_backlight, + .dp_hw_get_backlight = pe220x_dp_hw_get_backlight, + .dp_hw_disable_backlight = pe220x_dp_hw_disable_backlight, + .dp_hw_enable_backlight = pe220x_dp_hw_enable_backlight, + .dp_hw_poweroff_panel = pe220x_dp_hw_poweroff_panel, + .dp_hw_poweron_panel = pe220x_dp_hw_poweron_panel, + .dp_hw_init_phy = pe220x_dp_hw_init_phy, + .dp_hw_set_phy_lane_setting = pe220x_dp_hw_set_phy_lane_setting, + .dp_hw_set_phy_lane_and_rate = pe220x_dp_hw_set_phy_lane_and_rate, +}; + +void pe220x_dp_func_register(struct phytium_dp_device *phytium_dp) +{ + phytium_dp->funcs = &pe220x_dp_funcs; +} diff --git a/drivers/gpu/drm/phytium/pe220x_dp.h b/drivers/gpu/drm/phytium/pe220x_dp.h new file mode 100644 index 000000000000..6b763d996631 --- /dev/null +++ b/drivers/gpu/drm/phytium/pe220x_dp.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Phytium display port DRM driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PE220X_DP_H__ +#define __PE220X_DP_H__ + +#define PE220X_DP_BACKLIGHT_MAX 100 + +void pe220x_dp_func_register(struct phytium_dp_device *phytium_dp); +#endif /* __PE220X_DP_H__ */ diff --git a/drivers/gpu/drm/phytium/pe220x_reg.h b/drivers/gpu/drm/phytium/pe220x_reg.h new file mode 100644 index 000000000000..88fc9c7383a5 --- /dev/null +++ b/drivers/gpu/drm/phytium/pe220x_reg.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Phytium Pe220x display engine register + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PE220X_REG_H__ +#define __PE220X_REG_H__ + +#include "phytium_reg.h" + +/* dc register */ +#define PE220X_DC_CLOCK_CONTROL 0x0000 +#define DC1_CORE_RESET (1<<18) +#define DC0_CORE_RESET (1<<17) +#define AXI_RESET (1<<16) +#define AHB_RESET (1<<12) + +#define PE220X_DC_CMD_REGISTER(pipe) (PE220X_DC_BASE(0) + 0x00F0 + 0x4*(pipe)) +#define FLAG_REPLY (1<<31) +#define FLAG_REQUEST (1<<30) +#define CMD_PIXEL_CLOCK (0x0 << 28) +#define CMD_BACKLIGHT (0x1 << 28) +#define CMD_DC_DP_RESET (0x3 << 28) +#define BACKLIGHT_SHIFT 21 +#define BACKLIGHT_MASK 0x7f +#define BACKLIGHT_MAX 100 +#define BACKLIGHT_ENABLE (101 << BACKLIGHT_SHIFT) +#define BACKLIGHT_DISABLE (102 << BACKLIGHT_SHIFT) +#define PANEL_POWER_ENABLE (103 << BACKLIGHT_SHIFT) +#define PANEL_POWER_DISABLE (104 << BACKLIGHT_SHIFT) +#define PIXEL_CLOCK_MASK (0x1fffff) + +#define PE220X_DC_FRAMEBUFFER_Y_HI_ADDRESS 0x1404 +#define PREFIX_MASK 0xff +#define PREFIX_SHIFT 32 + +#define PE220X_DC_CURSOR_HI_ADDRESS 0x1490 +#define CURSOR_PREFIX_MASK 0xff +#define CURSOR_PREFIX_SHIFT 32 + +#define PE220X_DC_FRAMEBUFFER_U_HI_ADDRESS 0x1534 +#define U_PREFIX_MASK 0xff +#define U_PREFIX_SHIFT 32 + +#define PE220X_DC_FRAMEBUFFER_V_HI_ADDRESS 0x153c +#define V_PREFIX_MASK 0xff +#define V_PREFIX_SHIFT 32 + +/* dp register */ +#define PE220X_DP_CONTROLLER_RESET 0x0850 +#define DP_RESET 0x1 + +/* address transform register */ +#define PE220X_DC_ADDRESS_TRANSFORM_SRC_ADDR 0x0 +#define SRC_ADDR_OFFSET 22 +#define SRC_ADDR_MASK 0xffffffffff + +#define PE220X_DC_ADDRESS_TRANSFORM_SIZE 0x4 +#define ADDRESS_TRANSFORM_ENABLE (0x1 << 31) +#define SIZE_OFFSET 22 + +#define PE220X_DC_ADDRESS_TRANSFORM_DST_ADDR 0x8 +#define DST_ADDR_OFFSET 22 + +#define PE220X_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS 0x48 +#define DC_DP_RESET_STATUS(pipe) (1 << pipe) +#define DP_SPREAD_ENABLE(pipe) (0x8 << pipe) + +#define PE220X_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE 0x4c +#define BACKLIGHT_VALUE_MASK (0x7f) +#define BACKLIGHT_VALUE_SHIFT 16 + +/* phy register start */ +#define PE220X_PHY_BASE(pipe) (0x100000*pipe) + +#define PE220X_PHY_PIPE_RESET(pipe) (PE220X_PHY_BASE(pipe) + 0x40254) +#define RESET 0x0 +#define RESET_DEASSERT 0x1 + +#define PE220X_PHY_MODE(pipe) (PE220X_PHY_BASE(pipe) + 0x40034) +#define LANE_BIT (0x3) +#define LANE_BIT_SHIFT 0x2 + +#define PE220X_PHY_LINK_CFG(pipe) (PE220X_PHY_BASE(pipe) + 0x40044) +#define LANE_MASTER 0x1 +#define LANE_MASTER_SHIFT 1 + +#define PE220X_PHY_PLL_EN(pipe) (PE220X_PHY_BASE(pipe) + 0x40214) +#define PLL_EN 0x1 +#define PLL_EN_SHIFT 1 + +#define PE220X_PHY_PMA_WIDTH(pipe) (PE220X_PHY_BASE(pipe) + 0x4021c) +#define BIT_20 0x5 +#define BIT_20_SHIFT 4 + +#define PE220X_PHY_PLL_SOURCE_SEL(pipe) (PE220X_PHY_BASE(pipe) + 0x4004C) + +#define PE220X_PHY_PMA0_POWER(pipe) (PE220X_PHY_BASE(pipe) + 0x402bc) +#define A0_ACTIVE 0x1 +#define A0_ACTIVE_SHIFT 8 +#define A3_POWERDOWN3 0x8 +#define A3_POWERDOWN3_SHIFT 8 + +#define PE220X_PHY_LINK_RESET(pipe) (PE220X_PHY_BASE(pipe) + 0x40258) +#define LINK_RESET 0x1 +#define LINK_RESET_MASK 0x1 +#define LINTK_RESET_SHIFT 0x1 + +#define PE220X_PHY_SGMII_DPSEL_INIT(pipe) (PE220X_PHY_BASE(pipe) + 0x40260) +#define DP_SEL 0x1 + +#define PE220X_PHY_APB_RESET(pipe) (PE220X_PHY_BASE(pipe) + 0x40250) +#define APB_RESET 0x1 + +/* phy origin register */ +#define PE220X_PHY_PLL_CFG(pipe) (PE220X_PHY_BASE(pipe) + 0x30038) +#define SINGLE_LINK 0x0 + +#define PE220X_PHY_PMA_CONTROL(pipe) (PE220X_PHY_BASE(pipe) + 0x3800c) +#define CONTROL_ENABLE 0x1 +#define CONTROL_ENABLE_MASK 0x1 +#define CONTROL_ENABLE_SHIFT 0x1 + +#define PE220X_PHY_PMA_CONTROL2(pipe) (PE220X_PHY_BASE(pipe) + 0x38004) +#define PLL0_LOCK_DONE (0x1 << 6) + +#define PE220X_PHY_PLL0_CLK_SEL(pipe) (PE220X_PHY_BASE(pipe) + 0X684) +#define PLL_LINK_RATE_162000 0xf01 +#define PLL_LINK_RATE_270000 0x701 +#define PLL_LINK_RATE_540000 0x301 +#define PLL_LINK_RATE_810000 0x200 + +#define PE220X_PHY_HSCLK0_SEL(pipe) (PE220X_PHY_BASE(pipe) + 0x18398) +#define HSCLK_LINK_0 0x0 +#define HSCLK_LINK_1 0x1 + +#define PE220X_PHY_HSCLK0_DIV(pipe) (PE220X_PHY_BASE(pipe) + 0x1839c) +#define HSCLK_LINK_RATE_162000 0x2 +#define HSCLK_LINK_RATE_270000 0x1 +#define HSCLK_LINK_RATE_540000 0x0 +#define HSCLK_LINK_RATE_810000 0x0 + +#define PE220X_PHY_PLLDRC0_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x18394) +#define PLLDRC_LINK0 0x1 +#define PLLDRC_LINK1 0x9 + +#define PE220X_PHY_PLL0_DSM_M0(pipe) (PE220X_PHY_BASE(pipe) + 0x250) +#define PLL0_DSM_M0 0x4 +#define PE220X_PHY_PLL0_VCOCAL_START(pipe) (PE220X_PHY_BASE(pipe) + 0x218) +#define PLL0_VCOCAL_START 0xc5e +#define PE220X_PHY_PLL0_VCOCAL_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x208) +#define PLL0_VCOCAL_CTRL 0x3 + +#define PE220X_PHY_PLL0_CP_PADJ(pipe) (PE220X_PHY_BASE(pipe) + 0x690) +#define PE220X_PHY_PLL0_CP_IADJ(pipe) (PE220X_PHY_BASE(pipe) + 0x694) +#define PE220X_PHY_PLL0_CP_FILT_PADJ(pipe) (PE220X_PHY_BASE(pipe) + 0x698) +#define PE220X_PHY_PLL0_INTDIV(pipe) (PE220X_PHY_BASE(pipe) + 0x240) +#define PE220X_PHY_PLL0_FRACDIVL(pipe) (PE220X_PHY_BASE(pipe) + 0x244) +#define PE220X_PHY_PLL0_FRACDIVH(pipe) (PE220X_PHY_BASE(pipe) + 0x248) +#define PE220X_PHY_PLL0_HIGH_THR(pipe) (PE220X_PHY_BASE(pipe) + 0x24c) +#define PE220X_PHY_PLL0_PDIAG_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x680) +#define PE220X_PHY_PLL0_VCOCAL_PLLCNT_START(pipe) (PE220X_PHY_BASE(pipe) + 0x220) +#define PE220X_PHY_PLL0_LOCK_PEFCNT(pipe) (PE220X_PHY_BASE(pipe) + 0x270) +#define PE220X_PHY_PLL0_LOCK_PLLCNT_START(pipe) (PE220X_PHY_BASE(pipe) + 0x278) +#define PE220X_PHY_PLL0_LOCK_PLLCNT_THR(pipe) (PE220X_PHY_BASE(pipe) + 0x27c) + +#define PE220X_PHY_PLL0_TX_PSC_A0(pipe) (PE220X_PHY_BASE(pipe) + 0x18400) +#define PLL0_TX_PSC_A0 0xfb +#define PE220X_PHY_PLL0_TX_PSC_A2(pipe) (PE220X_PHY_BASE(pipe) + 0x18408) +#define PLL0_TX_PSC_A2 0x4aa +#define PE220X_PHY_PLL0_TX_PSC_A3(pipe) (PE220X_PHY_BASE(pipe) + 0x1840c) +#define PLL0_TX_PSC_A3 0x4aa +#define PE220X_PHY_PLL0_RX_PSC_A0(pipe) (PE220X_PHY_BASE(pipe) + 0x28000) +#define PLL0_RX_PSC_A0 0x0 +#define PE220X_PHY_PLL0_RX_PSC_A2(pipe) (PE220X_PHY_BASE(pipe) + 0x28008) +#define PLL0_RX_PSC_A2 0x0 +#define PE220X_PHY_PLL0_RX_PSC_A3(pipe) (PE220X_PHY_BASE(pipe) + 0x2800C) +#define PLL0_RX_PSC_A3 0x0 +#define PE220X_PHY_PLL0_RX_PSC_CAL(pipe) (PE220X_PHY_BASE(pipe) + 0x28018) +#define PLL0_RX_PSC_CAL 0x0 + +#define PE220X_PHY_PLL0_XCVR_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x183a8) +#define PLL0_XCVR_CTRL 0xf + +#define PE220X_PHY_PLL0_RX_GCSM1_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x28420) +#define PLL0_RX_GCSM1_CTRL 0x0 +#define PE220X_PHY_PLL0_RX_GCSM2_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x28440) +#define PLL0_RX_GCSM2_CTRL 0x0 +#define PE220X_PHY_PLL0_RX_PERGCSM_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x28460) +#define PLL0_RX_PERGCSM_CTRL 0x0 + +/* swing and emphasis */ +#define PE220X_PHY_PLL0_TX_DIAG_ACYA(pipe) (PE220X_PHY_BASE(pipe) + 0x1879c) +#define LOCK 1 +#define UNLOCK 0 + +#define PE220X_PHY_PLL0_TX_TXCC_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x18100) +#define TX_TXCC_CTRL 0x8a4 + +#define PE220X_PHY_PLL0_TX_DRV(pipe) (PE220X_PHY_BASE(pipe) + 0x18318) +#define TX_DRV 0x3 + +#define PE220X_PHY_PLL0_TX_MGNFS(pipe) (PE220X_PHY_BASE(pipe) + 0x18140) + +#define PE220X_PHY_PLL0_TX_CPOST(pipe) (PE220X_PHY_BASE(pipe) + 0x18130) + +#endif /* __PE220X_REG_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_crtc.c b/drivers/gpu/drm/phytium/phytium_crtc.c new file mode 100644 index 000000000000..628357837da6 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_crtc.c @@ -0,0 +1,484 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_crtc.h" +#include "phytium_plane.h" +#include "phytium_dp.h" +#include "px210_dc.h" +#include "pe220x_dc.h" +#include "phytium_reg.h" + +#define MAXKERNELSIZE 9 +#define SUBPIXELINDEXBITS 5 +#define SUBPIXELCOUNT (1 << SUBPIXELINDEXBITS) +#define SUBPIXELLOADCOUNT (SUBPIXELCOUNT / 2 + 1) +#define WEIGHTSTATECOUNT (((SUBPIXELLOADCOUNT * MAXKERNELSIZE + 1) & ~1) / 2) +#define KERNELTABLESIZE (SUBPIXELLOADCOUNT * MAXKERNELSIZE * sizeof(uint16_t)) +#define PHYALIGN(n, align) (((n) + ((align) - 1)) & ~((align) - 1)) +#define KERNELSTATES (PHYALIGN(KERNELTABLESIZE + 4, 8)) +#define PHYPI 3.14159265358979323846f + +#define MATH_Add(X, Y) ((float)((X) + (Y))) +#define MATH_Multiply(X, Y) ((float)((X) * (Y))) +#define MATH_Divide(X, Y) ((float)((X) / (Y))) +#define MATH_DivideFromUInteger(X, Y) ((float)(X) / (float)(Y)) +#define MATH_I2Float(X) ((float)(X)) + +struct filter_blit_array { + uint8_t kernelSize; + uint32_t scaleFactor; + uint32_t *kernelStates; +}; + +static void phytium_crtc_gamma_set(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + uint32_t config = 0; + struct drm_crtc_state *state = crtc->state; + struct drm_color_lut *lut; + int i; + + if (state->gamma_lut) { + if (WARN((state->gamma_lut->length/sizeof(struct drm_color_lut) != GAMMA_INDEX_MAX), + "gamma size is not match\n")) + return; + lut = (struct drm_color_lut *)state->gamma_lut->data; + for (i = 0; i < GAMMA_INDEX_MAX; i++) { + phytium_writel_reg(priv, i, group_offset, PHYTIUM_DC_GAMMA_INDEX); + config = ((lut[i].red >> 6) & GAMMA_RED_MASK) << GAMMA_RED_SHIFT; + config |= (((lut[i].green >> 6) & GAMMA_GREEN_MASK) << GAMMA_GREEN_SHIFT); + config |= (((lut[i].blue >> 6) & GAMMA_BLUE_MASK) << GAMMA_BLUE_SHIFT); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_GAMMA_DATA); + } + } +} + +static void phytium_crtc_gamma_init(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + uint32_t config = 0; + uint16_t *red, *green, *blue; + int i; + + if (WARN((crtc->gamma_size != GAMMA_INDEX_MAX), "gamma size is not match\n")) + return; + + red = crtc->gamma_store; + green = red + crtc->gamma_size; + blue = green + crtc->gamma_size; + + for (i = 0; i < GAMMA_INDEX_MAX; i++) { + phytium_writel_reg(priv, i, group_offset, PHYTIUM_DC_GAMMA_INDEX); + config = ((*red++ >> 6) & GAMMA_RED_MASK) << GAMMA_RED_SHIFT; + config |= (((*green++ >> 6) & GAMMA_GREEN_MASK) << GAMMA_GREEN_SHIFT); + config |= (((*blue++ >> 6) & GAMMA_BLUE_MASK) << GAMMA_BLUE_SHIFT); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_GAMMA_DATA); + } +} + +static void phytium_crtc_destroy(struct drm_crtc *crtc) +{ + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + + drm_crtc_cleanup(crtc); + kfree(phytium_crtc); +} + +struct drm_crtc_state * +phytium_crtc_atomic_duplicate_state(struct drm_crtc *crtc) +{ + struct phytium_crtc_state *phytium_crtc_state = NULL; + + phytium_crtc_state = kmemdup(crtc->state, sizeof(*phytium_crtc_state), + GFP_KERNEL); + if (!phytium_crtc_state) + return NULL; + __drm_atomic_helper_crtc_duplicate_state(crtc, + &phytium_crtc_state->base); + + return &phytium_crtc_state->base; +} + +void +phytium_crtc_atomic_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct phytium_crtc_state *phytium_crtc_state = + to_phytium_crtc_state(state); + + phytium_crtc_state = to_phytium_crtc_state(state); + __drm_atomic_helper_crtc_destroy_state(state); + kfree(phytium_crtc_state); +} + +static int phytium_enable_vblank(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + + phytium_writel_reg(priv, INT_ENABLE, priv->dc_reg_base[phys_pipe], PHYTIUM_DC_INT_ENABLE); + + return 0; +} + +static void phytium_disable_vblank(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + + phytium_writel_reg(priv, INT_DISABLE, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_INT_ENABLE); +} + +static const struct drm_crtc_funcs phytium_crtc_funcs = { + .set_config = drm_atomic_helper_set_config, + .destroy = phytium_crtc_destroy, + .page_flip = drm_atomic_helper_page_flip, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = phytium_crtc_atomic_duplicate_state, + .atomic_destroy_state = phytium_crtc_atomic_destroy_state, + .enable_vblank = phytium_enable_vblank, + .disable_vblank = phytium_disable_vblank, +}; + +static void +phytium_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct drm_display_mode *mode = &crtc->state->adjusted_mode; + struct drm_connector_state *new_conn_state; + struct drm_connector *conn; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + int config = 0, i = 0; + + for_each_new_connector_in_state(state, conn, new_conn_state, i) { + if (new_conn_state->crtc != crtc) + continue; + + switch (conn->display_info.bpc) { + case 10: + phytium_crtc->bpc = DP_RGB101010; + break; + case 6: + phytium_crtc->bpc = DP_RGB666; + break; + default: + phytium_crtc->bpc = DP_RGB888; + break; + } + } + + /* config pix clock */ + phytium_crtc->dc_hw_config_pix_clock(crtc, mode->clock); + + config = ((mode->crtc_hdisplay & HDISPLAY_END_MASK) << HDISPLAY_END_SHIFT) + | ((mode->crtc_htotal&HDISPLAY_TOTAL_MASK) << HDISPLAY_TOTAL_SHIFT); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_HDISPLAY); + config = ((mode->crtc_hsync_start & HSYNC_START_MASK) << HSYNC_START_SHIFT) + | ((mode->crtc_hsync_end & HSYNC_END_MASK) << HSYNC_END_SHIFT) + | HSYNC_PULSE_ENABLED; + config |= (mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : HSYNC_NEGATIVE; + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_HSYNC); + config = ((mode->crtc_vdisplay & VDISPLAY_END_MASK) << VDISPLAY_END_SHIFT) + | ((mode->crtc_vtotal & VDISPLAY_TOTAL_MASK) << VDISPLAY_TOTAL_SHIFT); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_VDISPLAY); + config = ((mode->crtc_vsync_start & VSYNC_START_MASK) << VSYNC_START_SHIFT) + | ((mode->crtc_vsync_end & VSYNC_END_MASK) << VSYNC_END_SHIFT) + | VSYNC_PULSE_ENABLED; + config |= (mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : VSYNC_NEGATIVE; + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_VSYNC); + config = PANEL_DATAENABLE_ENABLE | PANEL_DATA_ENABLE | PANEL_CLOCK_ENABLE; + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_PANEL_CONFIG); + config = phytium_crtc->bpc | OUTPUT_DP; + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_DP_CONFIG); + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + + if (crtc->state->active) + config |= FRAMEBUFFER_OUTPUT | FRAMEBUFFER_RESET; + else + config &= (~(FRAMEBUFFER_OUTPUT | FRAMEBUFFER_RESET)); + + if (phytium_crtc->scale_enable) + config |= FRAMEBUFFER_SCALE_ENABLE; + else + config &= (~FRAMEBUFFER_SCALE_ENABLE); + + if (crtc->state->gamma_lut) + phytium_crtc_gamma_set(crtc); + else + phytium_crtc_gamma_init(crtc); + + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + drm_crtc_vblank_on(crtc); +} + +static void +phytium_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + + drm_crtc_vblank_off(crtc); + phytium_crtc->dc_hw_disable(crtc); +} + +static void phytium_crtc_update_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, + const struct drm_display_mode *native_mode) +{ + if (native_mode->clock == drm_mode->clock && + native_mode->htotal == drm_mode->htotal && + native_mode->vtotal == drm_mode->vtotal) { + drm_mode->crtc_hdisplay = native_mode->crtc_hdisplay; + drm_mode->crtc_vdisplay = native_mode->crtc_vdisplay; + drm_mode->crtc_clock = native_mode->crtc_clock; + drm_mode->crtc_hblank_start = native_mode->crtc_hblank_start; + drm_mode->crtc_hblank_end = native_mode->crtc_hblank_end; + drm_mode->crtc_hsync_start = native_mode->crtc_hsync_start; + drm_mode->crtc_hsync_end = native_mode->crtc_hsync_end; + drm_mode->crtc_htotal = native_mode->crtc_htotal; + drm_mode->crtc_hskew = native_mode->crtc_hskew; + drm_mode->crtc_vblank_start = native_mode->crtc_vblank_start; + drm_mode->crtc_vblank_end = native_mode->crtc_vblank_end; + drm_mode->crtc_vsync_start = native_mode->crtc_vsync_start; + drm_mode->crtc_vsync_end = native_mode->crtc_vsync_end; + drm_mode->crtc_vtotal = native_mode->crtc_vtotal; + } +} + +static int +phytium_crtc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state) +{ + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + struct drm_plane_state *new_plane_state = NULL; + int ret = 0; + struct drm_connector *connector; + struct drm_connector_state *new_con_state; + uint32_t i; + struct phytium_dp_device *phytium_dp = NULL; + + for_each_new_connector_in_state(state, connector, new_con_state, i) { + if (new_con_state->crtc == crtc) { + phytium_dp = connector_to_dp_device(connector); + break; + } + } + if (phytium_dp) + phytium_crtc_update_timing_for_drm_display_mode(&crtc_state->adjusted_mode, + &phytium_dp->native_mode); + + new_plane_state = drm_atomic_get_new_plane_state(crtc_state->state, + crtc->primary); + if (crtc_state->enable && new_plane_state && !new_plane_state->crtc) { + ret = -EINVAL; + goto fail; + } + + return 0; +fail: + return ret; +} + +static void +phytium_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe, config; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + if (config & FRAMEBUFFER_RESET) { + phytium_writel_reg(priv, config | FRAMEBUFFER_VALID_PENDING, + group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + } +} + +static void phytium_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + struct phytium_crtc_state *phytium_crtc_state = NULL; + int phys_pipe = phytium_crtc->phys_pipe, config; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + + DRM_DEBUG_KMS("crtc->state active:%d enable:%d\n", + crtc->state->active, crtc->state->enable); + phytium_crtc_state = to_phytium_crtc_state(crtc->state); + + if (crtc->state->color_mgmt_changed) + phytium_crtc_gamma_set(crtc); + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + phytium_writel_reg(priv, config&(~FRAMEBUFFER_VALID_PENDING), + group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + + if (crtc->state->event) { + DRM_DEBUG_KMS("vblank->refcount:%d\n", + atomic_read(&dev->vblank[0].refcount)); + spin_lock_irq(&dev->event_lock); + if (drm_crtc_vblank_get(crtc) == 0) + drm_crtc_arm_vblank_event(crtc, crtc->state->event); + else + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + spin_unlock_irq(&dev->event_lock); + } +} + +static enum drm_mode_status +phytium_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + + if (mode->crtc_clock > priv->info.crtc_clock_max) + return MODE_CLOCK_HIGH; + + if (mode->hdisplay > priv->info.hdisplay_max) + return MODE_BAD_HVALUE; + + if (mode->vdisplay > priv->info.vdisplay_max) + return MODE_BAD_VVALUE; + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + return MODE_NO_INTERLACE; + + return MODE_OK; +} + +static const struct drm_crtc_helper_funcs phytium_crtc_helper_funcs = { + .mode_valid = phytium_crtc_mode_valid, + .atomic_check = phytium_crtc_atomic_check, + .atomic_begin = phytium_crtc_atomic_begin, + .atomic_flush = phytium_crtc_atomic_flush, + .atomic_enable = phytium_crtc_atomic_enable, + .atomic_disable = phytium_crtc_atomic_disable, +}; + +void phytium_crtc_resume(struct drm_device *drm_dev) +{ + struct drm_crtc *crtc; + struct phytium_crtc *phytium_crtc = NULL; + + drm_for_each_crtc(crtc, drm_dev) { + phytium_crtc = to_phytium_crtc(crtc); + if (phytium_crtc->dc_hw_reset) + phytium_crtc->dc_hw_reset(crtc); + phytium_crtc_gamma_init(crtc); + } +} + +int phytium_crtc_init(struct drm_device *dev, int phys_pipe) +{ + struct phytium_crtc *phytium_crtc; + struct phytium_crtc_state *phytium_crtc_state; + struct phytium_plane *phytium_primary_plane = NULL; + struct phytium_plane *phytium_cursor_plane = NULL; + struct phytium_display_private *priv = dev->dev_private; + int ret; + + phytium_crtc = kzalloc(sizeof(*phytium_crtc), GFP_KERNEL); + if (!phytium_crtc) { + ret = -ENOMEM; + goto failed_malloc_crtc; + } + + phytium_crtc_state = kzalloc(sizeof(*phytium_crtc_state), GFP_KERNEL); + if (!phytium_crtc_state) { + ret = -ENOMEM; + goto failed_malloc_crtc_state; + } + + phytium_crtc_state->base.crtc = &phytium_crtc->base; + phytium_crtc->base.state = &phytium_crtc_state->base; + phytium_crtc->phys_pipe = phys_pipe; + + if (IS_PX210(priv)) { + phytium_crtc->dc_hw_config_pix_clock = px210_dc_hw_config_pix_clock; + phytium_crtc->dc_hw_disable = px210_dc_hw_disable; + phytium_crtc->dc_hw_reset = NULL; + priv->dc_reg_base[phys_pipe] = PX210_DC_BASE(phys_pipe); + priv->dcreq_reg_base[phys_pipe] = PX210_DCREQ_BASE(phys_pipe); + priv->address_transform_base = PX210_ADDRESS_TRANSFORM_BASE; + } else if (IS_PE220X(priv)) { + phytium_crtc->dc_hw_config_pix_clock = pe220x_dc_hw_config_pix_clock; + phytium_crtc->dc_hw_disable = pe220x_dc_hw_disable; + phytium_crtc->dc_hw_reset = pe220x_dc_hw_reset; + priv->dc_reg_base[phys_pipe] = PE220X_DC_BASE(phys_pipe); + priv->dcreq_reg_base[phys_pipe] = 0x0; + priv->address_transform_base = PE220X_ADDRESS_TRANSFORM_BASE; + } + + phytium_primary_plane = phytium_primary_plane_create(dev, phys_pipe); + if (IS_ERR(phytium_primary_plane)) { + ret = PTR_ERR(phytium_primary_plane); + DRM_ERROR("create primary plane failed, phys_pipe(%d)\n", phys_pipe); + goto failed_create_primary; + } + + phytium_cursor_plane = phytium_cursor_plane_create(dev, phys_pipe); + if (IS_ERR(phytium_cursor_plane)) { + ret = PTR_ERR(phytium_cursor_plane); + DRM_ERROR("create cursor plane failed, phys_pipe(%d)\n", phys_pipe); + goto failed_create_cursor; + } + + ret = drm_crtc_init_with_planes(dev, &phytium_crtc->base, + &phytium_primary_plane->base, + &phytium_cursor_plane->base, + &phytium_crtc_funcs, + "phys_pipe %d", phys_pipe); + + if (ret) { + DRM_ERROR("init crtc with plane failed, phys_pipe(%d)\n", phys_pipe); + goto failed_crtc_init; + } + drm_crtc_helper_add(&phytium_crtc->base, &phytium_crtc_helper_funcs); + drm_crtc_vblank_reset(&phytium_crtc->base); + drm_mode_crtc_set_gamma_size(&phytium_crtc->base, GAMMA_INDEX_MAX); + drm_crtc_enable_color_mgmt(&phytium_crtc->base, 0, false, GAMMA_INDEX_MAX); + if (phytium_crtc->dc_hw_reset) + phytium_crtc->dc_hw_reset(&phytium_crtc->base); + phytium_crtc_gamma_init(&phytium_crtc->base); + + return 0; + +failed_crtc_init: +failed_create_cursor: + /* drm_mode_config_cleanup() will free any crtcs/planes already initialized */ +failed_create_primary: + kfree(phytium_crtc_state); +failed_malloc_crtc_state: + kfree(phytium_crtc); +failed_malloc_crtc: + return ret; +} diff --git a/drivers/gpu/drm/phytium/phytium_crtc.h b/drivers/gpu/drm/phytium/phytium_crtc.h new file mode 100644 index 000000000000..78a841c1c684 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_crtc.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_CRTC_H__ +#define __PHYTIUM_CRTC_H__ + +struct phytium_crtc { + struct drm_crtc base; + int phys_pipe; + unsigned int bpc; + + /* scale */ + uint32_t src_width; + uint32_t src_height; + uint32_t dst_width; + uint32_t dst_height; + uint32_t dst_x; + uint32_t dst_y; + bool scale_enable; + bool reserve[3]; + + void (*dc_hw_config_pix_clock)(struct drm_crtc *crtc, int clock); + void (*dc_hw_disable)(struct drm_crtc *crtc); + void (*dc_hw_reset)(struct drm_crtc *crtc); +}; + +struct phytium_crtc_state { + struct drm_crtc_state base; +}; + +#define to_phytium_crtc(x) container_of(x, struct phytium_crtc, base) +#define to_phytium_crtc_state(x) container_of(x, struct phytium_crtc_state, base) + +void phytium_crtc_resume(struct drm_device *drm_dev); +int phytium_crtc_init(struct drm_device *dev, int pipe); +#endif /* __PHYTIUM_CRTC_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_debugfs.c b/drivers/gpu/drm/phytium/phytium_debugfs.c new file mode 100644 index 000000000000..eedad22c1536 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_debugfs.c @@ -0,0 +1,456 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_dp.h" +#include "phytium_reg.h" + +const char *const mem_state[PHYTIUM_MEM_STATE_TYPE_COUNT] = { + "Memory_Vram_Total", + "Memory_Vram_Alloc", + "Memory_System_Carveout_Total", + "Memory_System_Carveout_Alloc", + "Memory_System_Alloc", +}; + +static ssize_t +phytium_dp_register_write(struct file *filp, + const char __user *ubuf, + size_t len, + loff_t *ppos) +{ + char tmp[16]; + + if (len >= sizeof(tmp)) + return -EINVAL; + + memset(tmp, 0, sizeof(tmp)); + if (copy_from_user(tmp, ubuf, len)) + return -EFAULT; + tmp[len] = '\0'; + + return len; +} + +static int phytium_dp_register_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_M_VID, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_M_VID)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_N_VID, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_N_VID)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_TRANSFER_UNIT_SIZE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_TRANSFER_UNIT_SIZE)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_DATA_COUNT, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_DATA_COUNT)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HTOTAL, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HTOTAL)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HRES, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HRES)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HSWIDTH, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HSWIDTH)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HSTART, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HSTART)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VTOTAL, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VTOTAL)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VRES, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VRES)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VSWIDTH, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VSWIDTH)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VSTART, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VSTART)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_POLARITY, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_POLARITY)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_MISC0, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_MISC0)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_MISC1, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_MISC1)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_USER_SYNC_POLARITY, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_USER_SYNC_POLARITY)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_VIDEO_STREAM_ENABLE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SECONDARY_STREAM_ENABLE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE)); + seq_puts(m, "audio:\n"); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_INPUT_SELECT, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_INPUT_SELECT)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_DIRECT_CLKDIV, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_DIRECT_CLKDIV)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CHANNEL_COUNT, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CHANNEL_COUNT)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CHANNEL_MAP, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CHANNEL_MAP)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_DATA_WINDOW, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_DATA_WINDOW)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_CATEGORY_CODE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_CATEGORY_CODE)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_MAUD, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_MAUD)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_NAUD, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_NAUD)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CLOCK_MODE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CLOCK_MODE)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_SOURCE_FORMAT, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_SOURCE_FORMAT)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_AUDIO_ENABLE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE)); + + return 0; +} + +static int phytium_dp_register_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_dp_register_show, inode->i_private); +} + +static const struct file_operations phytium_dp_register_fops = { + .owner = THIS_MODULE, + .open = phytium_dp_register_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = phytium_dp_register_write, +}; + +static ssize_t +phytium_dp_trigger_train_fail_write(struct file *filp, + const char __user *ubuf, + size_t len, + loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + char tmp[16]; + + if (len >= sizeof(tmp)) + return -EINVAL; + + memset(tmp, 0, sizeof(tmp)); + if (copy_from_user(tmp, ubuf, len)) + return -EFAULT; + tmp[len] = '\0'; + + if (kstrtouint(tmp, 10, &phytium_dp->trigger_train_fail) != 0) + return -EINVAL; + + return len; +} + +static int phytium_dp_trigger_train_fail_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + seq_printf(m, "trigger_train_fail: %d\n", phytium_dp->trigger_train_fail); + seq_printf(m, "train_retry_count: %d\n", phytium_dp->train_retry_count); + + return 0; +} + +static int phytium_dp_trigger_train_fail_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_dp_trigger_train_fail_show, inode->i_private); +} + +static const struct file_operations phytium_dp_trigger_train_fail_fops = { + .owner = THIS_MODULE, + .open = phytium_dp_trigger_train_fail_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = phytium_dp_trigger_train_fail_write, +}; + +static int phytium_edp_backlight_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + if (!phytium_dp->is_edp) + return -ENODEV; + + mutex_lock(&phytium_dp->panel.panel_lock); + seq_printf(m, "backlight: %s\n", phytium_dp->panel.backlight_enabled?"enabled":"disabled"); + mutex_unlock(&phytium_dp->panel.panel_lock); + + return 0; +} + +static int phytium_edp_backlight_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_edp_backlight_show, inode->i_private); +} + +static const struct file_operations phytium_edp_backlight_fops = { + .owner = THIS_MODULE, + .open = phytium_edp_backlight_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int phytium_edp_power_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + if (!phytium_dp->is_edp) + return -ENODEV; + + mutex_lock(&phytium_dp->panel.panel_lock); + seq_printf(m, "power: %s\n", phytium_dp->panel.power_enabled?"enabled":"disabled"); + mutex_unlock(&phytium_dp->panel.panel_lock); + + return 0; +} + +static int phytium_edp_power_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_edp_power_show, inode->i_private); +} + +static const struct file_operations phytium_edp_power_fops = { + .owner = THIS_MODULE, + .open = phytium_edp_power_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +struct dpcd_block { + /* DPCD dump start address. */ + unsigned int offset; + /* DPCD dump end address, inclusive. If unset, .size will be used. */ + unsigned int end; + /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */ + size_t size; + /* Only valid for eDP. */ + bool edp; +}; + +static const struct dpcd_block phytium_dpcd_debug[] = { + { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE }, + { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS }, + { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 }, + { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET }, + { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 }, + { .offset = DP_SET_POWER }, + { .offset = DP_EDP_DPCD_REV }, + { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 }, + { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB }, + { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET }, + { .offset = DP_DEVICE_SERVICE_IRQ_VECTOR, .size = 1 }, + { .offset = DP_TEST_REQUEST, .end = DP_TEST_PATTERN }, +}; + +static int phytium_dpcd_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + uint8_t buf[16], i; + ssize_t err; + + if (connector->status != connector_status_connected) + return -ENODEV; + + for (i = 0; i < ARRAY_SIZE(phytium_dpcd_debug); i++) { + const struct dpcd_block *b = &phytium_dpcd_debug[i]; + size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1); + + if (WARN_ON(size > sizeof(buf))) + continue; + + err = drm_dp_dpcd_read(&phytium_dp->aux, b->offset, buf, size); + if (err <= 0) { + DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n", + size, b->offset, err); + continue; + } + + seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf); + } + + return 0; +} + +static int phytium_dpcd_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_dpcd_show, inode->i_private); +} + +static const struct file_operations phytium_dpcd_fops = { + .owner = THIS_MODULE, + .open = phytium_dpcd_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static ssize_t +phytium_dp_state_write(struct file *filp, + const char __user *ubuf, + size_t len, + loff_t *ppos) +{ + char tmp[16]; + + if (len >= sizeof(tmp)) + return -EINVAL; + + memset(tmp, 0, sizeof(tmp)); + if (copy_from_user(tmp, ubuf, len)) + return -EFAULT; + tmp[len] = '\0'; + + return len; +} + +static int phytium_dp_state_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + seq_printf(m, "port number: %d\n", phytium_dp->port); + seq_printf(m, "source_max_lane_count: %d\n", phytium_dp->source_max_lane_count); + seq_printf(m, "max_source_rates: %d\n", + phytium_dp->source_rates[phytium_dp->num_source_rates-1]); + if (connector->status == connector_status_connected) { + seq_printf(m, "sink_max_lane_count: %d\n", phytium_dp->sink_max_lane_count); + seq_printf(m, "max_sink_rates: %d\n", + phytium_dp->sink_rates[phytium_dp->num_sink_rates-1]); + seq_printf(m, "link_rate: %d\n", phytium_dp->link_rate); + seq_printf(m, "link_lane_count: %d\n", phytium_dp->link_lane_count); + seq_printf(m, "train_set[0]: %d\n", phytium_dp->train_set[0]); + seq_printf(m, "has_audio: %s\n", phytium_dp->has_audio?"yes":"no"); + } + + return 0; +} + +static int phytium_dp_state_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_dp_state_show, inode->i_private); +} + +static const struct file_operations phytium_dp_state_fops = { + .owner = THIS_MODULE, + .open = phytium_dp_state_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = phytium_dp_state_write, +}; + +static const struct phytium_debugfs_files { + const char *name; + const struct file_operations *fops; +} phytium_debugfs_connector_files[] = { + {"dp_state", &phytium_dp_state_fops}, + {"dpcd", &phytium_dpcd_fops}, + {"dp_register", &phytium_dp_register_fops}, + {"dp_trigger_train_fail", &phytium_dp_trigger_train_fail_fops}, +}; + +static const struct phytium_debugfs_files phytium_edp_debugfs_connector_files[] = { + {"edp_power", &phytium_edp_power_fops}, + {"edp_backlight", &phytium_edp_backlight_fops}, +}; + +int phytium_debugfs_connector_add(struct drm_connector *connector) +{ + struct dentry *root = connector->debugfs_entry; + struct dentry *ent; + int i; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + if (!root) + return -ENODEV; + + for (i = 0; i < ARRAY_SIZE(phytium_debugfs_connector_files); i++) { + ent = debugfs_create_file(phytium_debugfs_connector_files[i].name, + 0644, + root, + connector, + phytium_debugfs_connector_files[i].fops); + if (!ent) + return -ENOMEM; + } + + if (phytium_dp->is_edp) + for (i = 0; i < ARRAY_SIZE(phytium_edp_debugfs_connector_files); i++) { + ent = debugfs_create_file(phytium_edp_debugfs_connector_files[i].name, + 0644, + root, + connector, + phytium_edp_debugfs_connector_files[i].fops); + if (!ent) + return -ENOMEM; + } + + return 0; +} + +static int phytium_mem_state_show(struct seq_file *m, void *data) +{ + struct phytium_display_private *priv = m->private; + uint8_t i; + + for (i = 0; i < ARRAY_SIZE(mem_state); i++) + seq_printf(m, "%-34s %10lld\n", mem_state[i], priv->mem_state[i]); + + return 0; +} + +static int phytium_mem_state_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_mem_state_show, inode->i_private); +} + +static const struct file_operations phytium_mem_state_fops = { + .owner = THIS_MODULE, + .open = phytium_mem_state_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct phytium_debugfs_files phytium_debugfs_display_files[] = { + {"mem_state", &phytium_mem_state_fops}, +}; + +int phytium_debugfs_display_register(struct phytium_display_private *priv) +{ + struct drm_minor *minor = priv->dev->primary; + struct dentry *root = minor->debugfs_root; + struct dentry *ent; + + if (!root) + return -ENODEV; + + ent = debugfs_create_file(phytium_debugfs_display_files[0].name, + 0644, + root, + priv, + phytium_debugfs_display_files[0].fops); + if (!ent) + return -ENOMEM; + + return 0; +} diff --git a/drivers/gpu/drm/phytium/phytium_debugfs.h b/drivers/gpu/drm/phytium/phytium_debugfs.h new file mode 100644 index 000000000000..dc784bc557a7 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_debugfs.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_DEBUGFS_H__ +#define __PHYTIUM_DEBUGFS_H__ + +int phytium_debugfs_connector_add(struct drm_connector *connector); +int phytium_debugfs_display_register(struct phytium_display_private *priv); + +#endif /* __PHYTIUM_DEBUGFS_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_display_drv.c b/drivers/gpu/drm/phytium/phytium_display_drv.c new file mode 100644 index 000000000000..60c7a20e7ca2 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_display_drv.c @@ -0,0 +1,434 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_plane.h" +#include "phytium_crtc.h" +#include "phytium_dp.h" +#include "phytium_gem.h" +#include "phytium_fb.h" +#include "phytium_fbdev.h" +#include "phytium_reg.h" +#include "phytium_pci.h" +#include "phytium_platform.h" +#include "phytium_debugfs.h" + +int dc_fake_mode_enable; +module_param(dc_fake_mode_enable, int, 0644); +MODULE_PARM_DESC(dc_fake_mode_enable, "Enable DC fake mode (0-disabled; 1-enabled; default-0)"); + +int dc_fast_training_check = 1; +module_param(dc_fast_training_check, int, 0644); +MODULE_PARM_DESC(dc_fast_training_check, "Check dp fast training (0-disabled; 1-enabled; default-1)"); + +int num_source_rates = 4; +module_param(num_source_rates, int, 0644); +MODULE_PARM_DESC(num_source_rates, "set the source max rates (1-1.62Gbps; 2-2.7Gbps; 3-5.4Gbps; 4-8.1Gbps; default-4)"); + +int source_max_lane_count = 4; +module_param(source_max_lane_count, int, 0644); +MODULE_PARM_DESC(source_max_lane_count, "set the source lane count (1-1lane; 2-2lane; 4-4lane; default-4)"); + +int link_dynamic_adjust; +module_param(link_dynamic_adjust, int, 0644); +MODULE_PARM_DESC(link_dynamic_adjust, "dynamic select the train pamameter according to the display mode (0-disabled; 1-enabled; default-1)"); + +int phytium_wait_cmd_done(struct phytium_display_private *priv, + uint32_t register_offset, + uint32_t request_bit, + uint32_t reply_bit) +{ + int timeout = 500, config = 0, ret = 0; + + do { + mdelay(1); + timeout--; + config = phytium_readl_reg(priv, 0, register_offset); + } while ((!(config & reply_bit)) && timeout); + + phytium_writel_reg(priv, config & (~request_bit), 0, register_offset); + + if (timeout == 0) { + DRM_ERROR("wait cmd reply timeout\n"); + ret = -EBUSY; + } else { + timeout = 500; + do { + mdelay(1); + timeout--; + config = phytium_readl_reg(priv, 0, register_offset); + } while ((config & reply_bit) && timeout); + if (timeout == 0) { + DRM_ERROR("clear cmd timeout\n"); + ret = -EBUSY; + } + } + mdelay(5); + + return ret; +} + +static void phytium_irq_preinstall(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + int i, status; + + for_each_pipe_masked(priv, i) { + status = phytium_readl_reg(priv, priv->dc_reg_base[i], PHYTIUM_DC_INT_STATUS); + phytium_writel_reg(priv, INT_DISABLE, priv->dc_reg_base[i], PHYTIUM_DC_INT_ENABLE); + } +} + +static void phytium_irq_uninstall(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + int i, status; + + for_each_pipe_masked(priv, i) { + status = phytium_readl_reg(priv, priv->dc_reg_base[i], PHYTIUM_DC_INT_STATUS); + phytium_writel_reg(priv, INT_DISABLE, priv->dc_reg_base[i], PHYTIUM_DC_INT_ENABLE); + } +} + +static irqreturn_t phytium_display_irq_handler(int irq, void *data) +{ + struct drm_device *dev = data; + struct phytium_display_private *priv = dev->dev_private; + bool enabled = 0; + int i = 0, virt_pipe = 0; + irqreturn_t ret = IRQ_NONE, ret1 = IRQ_NONE; + + for_each_pipe_masked(priv, i) { + enabled = phytium_readl_reg(priv, priv->dc_reg_base[i], PHYTIUM_DC_INT_STATUS); + if (enabled & INT_STATUS) { + virt_pipe = phytium_get_virt_pipe(priv, i); + if (virt_pipe < 0) + return IRQ_NONE; + drm_handle_vblank(dev, virt_pipe); + ret = IRQ_HANDLED; + if (priv->dc_hw_clear_msi_irq) + priv->dc_hw_clear_msi_irq(priv, i); + } + } + + ret1 = phytium_dp_hpd_irq_handler(priv); + if (ret == IRQ_HANDLED || ret1 == IRQ_HANDLED) + return IRQ_HANDLED; + + return IRQ_NONE; +} + +static const struct drm_mode_config_funcs phytium_mode_funcs = { + .fb_create = phytium_fb_create, + .output_poll_changed = drm_fb_helper_output_poll_changed, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static void phytium_atomic_commit_tail(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + + drm_atomic_helper_commit_modeset_disables(dev, state); + drm_atomic_helper_commit_planes(dev, state, false); + drm_atomic_helper_commit_modeset_enables(dev, state); + drm_atomic_helper_commit_hw_done(state); + drm_atomic_helper_wait_for_flip_done(dev, state); + drm_atomic_helper_cleanup_planes(dev, state); +} + +static struct drm_mode_config_helper_funcs phytium_mode_config_helpers = { + .atomic_commit_tail = phytium_atomic_commit_tail, +}; + +static int phytium_modeset_init(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + int i = 0, ret; + + drm_mode_config_init(dev); + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + dev->mode_config.max_width = 16384; + dev->mode_config.max_height = 16384; + dev->mode_config.cursor_width = 32; + dev->mode_config.cursor_height = 32; + + dev->mode_config.preferred_depth = 24; + dev->mode_config.prefer_shadow = 1; + dev->mode_config.fb_modifiers_not_supported = false; + + dev->mode_config.funcs = &phytium_mode_funcs; + dev->mode_config.helper_private = &phytium_mode_config_helpers; + + for_each_pipe_masked(priv, i) { + ret = phytium_crtc_init(dev, i); + if (ret) { + DRM_ERROR("phytium_crtc_init(pipe %d) return failed\n", i); + goto failed_crtc_init; + } + } + + for_each_pipe_masked(priv, i) { + ret = phytium_dp_init(dev, i); + if (ret) { + DRM_ERROR("phytium_dp_init(pipe %d) return failed\n", i); + goto failed_dp_init; + } + } + + drm_mode_config_reset(dev); + + return 0; +failed_dp_init: +failed_crtc_init: + drm_mode_config_cleanup(dev); + return ret; +} + +int phytium_get_virt_pipe(struct phytium_display_private *priv, int phys_pipe) +{ + int i = 0; + int virt_pipe = 0; + + for_each_pipe_masked(priv, i) { + if (i != phys_pipe) + virt_pipe++; + else + return virt_pipe; + } + + DRM_ERROR("%s %d failed\n", __func__, phys_pipe); + return -EINVAL; +} + +int phytium_get_phys_pipe(struct phytium_display_private *priv, int virt_pipe) +{ + int i = 0; + int tmp = 0; + + for_each_pipe_masked(priv, i) { + if (tmp != virt_pipe) + tmp++; + else + return i; + } + + DRM_ERROR("%s %d failed\n", __func__, virt_pipe); + return -EINVAL; +} + +static int phytium_display_load(struct drm_device *dev, unsigned long flags) +{ + struct phytium_display_private *priv = dev->dev_private; + int ret = 0; + + ret = drm_vblank_init(dev, priv->info.num_pipes); + if (ret) { + DRM_ERROR("vblank init failed\n"); + goto failed_vblank_init; + } + + ret = phytium_modeset_init(dev); + if (ret) { + DRM_ERROR("phytium_modeset_init failed\n"); + goto failed_modeset_init; + } + + if (priv->support_memory_type & MEMORY_TYPE_VRAM) + priv->vram_hw_init(priv); + + phytium_irq_preinstall(dev); + ret = request_irq(priv->irq, phytium_display_irq_handler, + IRQF_SHARED, dev->driver->name, dev); + if (ret) { + DRM_ERROR("install irq failed\n"); + goto failed_irq_install; + } + + ret = phytium_drm_fbdev_init(dev); + if (ret) + DRM_ERROR("failed to init dev\n"); + + phytium_debugfs_display_register(priv); + + return ret; + +failed_irq_install: + drm_mode_config_cleanup(dev); +failed_modeset_init: +failed_vblank_init: + return ret; +} + +static void phytium_display_unload(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + + phytium_drm_fbdev_fini(dev); + phytium_irq_uninstall(dev); + free_irq(priv->irq, dev); + drm_mode_config_cleanup(dev); +} + +static const struct drm_ioctl_desc phytium_ioctls[] = { + /* for test, none so far */ +}; + +static const struct file_operations phytium_drm_driver_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, + .compat_ioctl = drm_compat_ioctl, + .poll = drm_poll, + .read = drm_read, + .llseek = no_llseek, + .mmap = phytium_gem_mmap, +}; + +struct drm_driver phytium_display_drm_driver = { + .driver_features = DRIVER_HAVE_IRQ | + DRIVER_MODESET | + DRIVER_ATOMIC | + DRIVER_GEM, + .load = phytium_display_load, + .unload = phytium_display_unload, + .lastclose = drm_fb_helper_lastclose, + .gem_prime_import = drm_gem_prime_import, + .gem_prime_import_sg_table = phytium_gem_prime_import_sg_table, + .dumb_create = phytium_gem_dumb_create, + .ioctls = phytium_ioctls, + .num_ioctls = ARRAY_SIZE(phytium_ioctls), + .fops = &phytium_drm_driver_fops, + .name = DRV_NAME, + .desc = DRV_DESC, + .date = DRV_DATE, + .major = DRV_MAJOR, + .minor = DRV_MINOR, +}; + +static void phytium_display_shutdown(struct drm_device *dev) +{ + drm_atomic_helper_shutdown(dev); +} + +static int phytium_display_pm_suspend(struct drm_device *dev) +{ + struct drm_atomic_state *state; + struct phytium_display_private *priv = dev->dev_private; + int ret, ret1; + + phytium_dp_hpd_irq_setup(dev, false); + cancel_work_sync(&priv->hotplug_work); + drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1); + state = drm_atomic_helper_suspend(dev); + if (IS_ERR(state)) { + DRM_ERROR("drm_atomic_helper_suspend failed: %ld\n", PTR_ERR(state)); + ret = PTR_ERR(state); + goto suspend_failed; + } + dev->mode_config.suspend_state = state; + ret = phytium_gem_suspend(dev); + if (ret) { + DRM_ERROR("phytium_gem_suspend failed: %d\n", ret); + goto gem_suspend_failed; + } + + return 0; + +gem_suspend_failed: + ret1 = drm_atomic_helper_resume(dev, dev->mode_config.suspend_state); + if (ret1) + DRM_ERROR("Failed to resume (%d)\n", ret1); + dev->mode_config.suspend_state = NULL; +suspend_failed: + drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); + phytium_dp_hpd_irq_setup(dev, true); + + return ret; +} + +static int phytium_display_pm_resume(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + int ret = 0; + + if (WARN_ON(!dev->mode_config.suspend_state)) + return -EINVAL; + + ret = phytium_dp_resume(dev); + if (ret) + return -EIO; + + phytium_crtc_resume(dev); + phytium_gem_resume(dev); + + if (priv->support_memory_type & MEMORY_TYPE_VRAM) + priv->vram_hw_init(priv); + + ret = drm_atomic_helper_resume(dev, dev->mode_config.suspend_state); + if (ret) { + DRM_ERROR("Failed to resume (%d)\n", ret); + return ret; + } + + dev->mode_config.suspend_state = NULL; + drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); + phytium_dp_hpd_irq_setup(dev, true); + + return 0; +} + +void phytium_display_private_init(struct phytium_display_private *priv, struct drm_device *dev) +{ + INIT_LIST_HEAD(&priv->gem_list_head); + spin_lock_init(&priv->hotplug_irq_lock); + INIT_WORK(&priv->hotplug_work, phytium_dp_hpd_work_func); + memset(priv->mem_state, 0, sizeof(priv->mem_state)); + priv->dev = dev; + priv->display_shutdown = phytium_display_shutdown; + priv->display_pm_suspend = phytium_display_pm_suspend; + priv->display_pm_resume = phytium_display_pm_resume; +} + +static int __init phytium_display_init(void) +{ + int ret = 0; + + ret = platform_driver_register(&phytium_platform_driver); + if (ret) + return ret; + + ret = pci_register_driver(&phytium_pci_driver); + + return ret; +} + +static void __exit phytium_display_exit(void) +{ + pci_unregister_driver(&phytium_pci_driver); + + platform_driver_unregister(&phytium_platform_driver); +} + +module_init(phytium_display_init); +module_exit(phytium_display_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Yang Xun "); +MODULE_DESCRIPTION("Phytium Display Controller"); diff --git a/drivers/gpu/drm/phytium/phytium_display_drv.h b/drivers/gpu/drm/phytium/phytium_display_drv.h new file mode 100644 index 000000000000..70080dad8621 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_display_drv.h @@ -0,0 +1,174 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_DISPLAY_DRV_H__ +#define __PHYTIUM_DISPLAY_DRV_H__ + +#include +#include + +#define DEBUG_LOG 0 + +#define PHYTIUM_FORMAT_MAX_PLANE 3 +#define DP_MAX_DOWNSTREAM_PORTS 0x10 + +#define DRV_NAME "dc" +#define DRV_DESC "phytium dc" +#define DRV_DATE "20201220" +#define DRV_MAJOR 1 +#define DRV_MINOR 1 + +/* come from GPU */ +#define DRM_FORMAT_MOD_VENDOR_PHYTIUM 0x92 + +/* dc:mode0 8x8 16bpp gpu: FBCDC_8X8_V10 */ +#define DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC fourcc_mod_code(PHYTIUM, 21) +/* dc:mode3 8x4 32bpp gpu: FBCDC_16X4_v10 */ +#define DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC fourcc_mod_code(PHYTIUM, 22) + +#define PIPE_MASK_SHIFT 0x0 +#define PIPE_MASK_MASK 0x7 +#define EDP_MASK_SHIFT 0x3 +#define EDP_MASK_MASK 0x7 + +enum phytium_platform { + PHYTIUM_PLATFORM_UNINITIALIZED = 0, + PHYTIUM_PLATFORM_PX210, + PHYTIUM_PLATFORM_PE220X, +}; + +enum phytium_mem_state_type { + PHYTIUM_MEM_VRAM_TOTAL = 0, + PHYTIUM_MEM_VRAM_ALLOC, + PHYTIUM_MEM_SYSTEM_CARVEOUT_TOTAL, + PHYTIUM_MEM_SYSTEM_CARVEOUT_ALLOC, + PHYTIUM_MEM_SYSTEM_UNIFIED_ALLOC, + PHYTIUM_MEM_STATE_TYPE_COUNT, +}; + +#define MEMORY_TYPE_VRAM 0x1 +#define MEMORY_TYPE_SYSTEM_CARVEOUT 0x2 +#define MEMORY_TYPE_SYSTEM_UNIFIED 0x4 + +#define IS_PLATFORM(priv, p) ((priv)->info.platform_mask & BIT(p)) + +#define IS_PX210(priv) IS_PLATFORM(priv, PHYTIUM_PLATFORM_PX210) +#define IS_PE220X(priv) IS_PLATFORM(priv, PHYTIUM_PLATFORM_PE220X) + +struct phytium_device_info { + unsigned char platform_mask; + unsigned char pipe_mask; + unsigned char num_pipes; + unsigned char total_pipes; + unsigned char edp_mask; + unsigned int crtc_clock_max; + unsigned int hdisplay_max; + unsigned int vdisplay_max; + unsigned int backlight_max; + unsigned long address_mask; +}; + +struct phytium_display_private { + /* hw */ + void __iomem *regs; + void __iomem *vram_addr; + struct phytium_device_info info; + char support_memory_type; + char reserve[3]; + uint32_t dc_reg_base[3]; + uint32_t dcreq_reg_base[3]; + uint32_t dp_reg_base[3]; + uint32_t address_transform_base; + uint32_t phy_access_base[3]; + + /* drm */ + struct drm_device *dev; + int irq; + + /* fb_dev */ + struct drm_fb_helper fbdev_helper; + struct phytium_gem_object *fbdev_phytium_gem; + + int save_reg[3]; + struct list_head gem_list_head; + + struct work_struct hotplug_work; + spinlock_t hotplug_irq_lock; + + void (*vram_hw_init)(struct phytium_display_private *priv); + void (*display_shutdown)(struct drm_device *dev); + int (*display_pm_suspend)(struct drm_device *dev); + int (*display_pm_resume)(struct drm_device *dev); + void (*dc_hw_clear_msi_irq)(struct phytium_display_private *priv, uint32_t phys_pipe); + int (*dc_hw_fb_format_check)(const struct drm_mode_fb_cmd2 *mode_cmd, int count); + + struct gen_pool *memory_pool; + resource_size_t pool_phys_addr; + resource_size_t pool_size; + void *pool_virt_addr; + uint64_t mem_state[PHYTIUM_MEM_STATE_TYPE_COUNT]; + + /* DMA info */ + int dma_inited; + struct dma_chan *dma_chan; +}; + +static inline unsigned int +phytium_readl_reg(struct phytium_display_private *priv, uint32_t group_offset, uint32_t reg_offset) +{ + unsigned int data; + + data = readl(priv->regs + group_offset + reg_offset); +#if DEBUG_LOG + pr_info("Read 32'h%08x 32'h%08x\n", group_offset + reg_offset, data); +#endif + return data; +} + +static inline void +phytium_writel_reg(struct phytium_display_private *priv, uint32_t data, + uint32_t group_offset, uint32_t reg_offset) +{ + + writel(data, priv->regs + group_offset + reg_offset); +#if DEBUG_LOG + pr_info("Write 32'h%08x 32'h%08x\n", group_offset + reg_offset, data); +#endif +} + +static inline void +phytium_writeb_reg(struct phytium_display_private *priv, uint8_t data, + uint32_t group_offset, uint32_t reg_offset) +{ + writeb(data, priv->regs + group_offset + reg_offset); +#if DEBUG_LOG + pr_info("Write 32'h%08x 8'h%08x\n", group_offset + reg_offset, data); +#endif +} + +#define for_each_pipe(__dev_priv, __p) \ + for ((__p) = 0; (__p) < __dev_priv->info.total_pipes; (__p)++) + +#define for_each_pipe_masked(__dev_priv, __p) \ + for ((__p) = 0; (__p) < __dev_priv->info.total_pipes; (__p)++) \ + for_each_if((__dev_priv->info.pipe_mask) & BIT(__p)) + +int phytium_get_virt_pipe(struct phytium_display_private *priv, int phys_pipe); +int phytium_get_phys_pipe(struct phytium_display_private *priv, int virt_pipe); +int phytium_wait_cmd_done(struct phytium_display_private *priv, + uint32_t register_offset, + uint32_t request_bit, + uint32_t reply_bit); +void phytium_display_private_init(struct phytium_display_private *priv, struct drm_device *dev); + +extern struct drm_driver phytium_display_drm_driver; +extern int dc_fake_mode_enable; +extern int dc_fast_training_check; +extern int num_source_rates; +extern int source_max_lane_count; +extern int link_dynamic_adjust; + +#endif /* __PHYTIUM_DISPLAY_DRV_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_dp.c b/drivers/gpu/drm/phytium/phytium_dp.c new file mode 100644 index 000000000000..98a06ccbc48d --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_dp.c @@ -0,0 +1,2639 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_dp.h" +#include "phytium_debugfs.h" +#include "px210_dp.h" +#include "pe220x_dp.h" +#include "phytium_panel.h" +#include "phytium_reg.h" + +static void phytium_dp_aux_init(struct phytium_dp_device *phytium_dp); +static void handle_plugged_change(struct phytium_dp_device *phytium_dp, bool plugged); +static bool phytium_edp_init_connector(struct phytium_dp_device *phytium_dp); +static void phytium_edp_fini_connector(struct phytium_dp_device *phytium_dp); +static void phytium_edp_panel_poweroff(struct phytium_dp_device *phytium_dp); +static void phytium_dp_audio_codec_fini(struct phytium_dp_device *phytium_dp); + +static int phytium_rate[] = {162000, 270000, 540000, 810000}; +static int codec_id = PHYTIUM_DP_AUDIO_ID; + +void phytium_phy_writel(struct phytium_dp_device *phytium_dp, uint32_t address, uint32_t data) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->phy_access_base[port]; + +#if DEBUG_LOG + pr_info("phy address write: 0x%x data:0x%x\n", address, data); +#endif + phytium_writel_reg(priv, address, group_offset, PHYTIUM_PHY_ACCESS_ADDRESS); + phytium_writel_reg(priv, data, group_offset, PHYTIUM_PHY_WRITE_DATA); + phytium_writel_reg(priv, ACCESS_WRITE, group_offset, PHYTIUM_PHY_ACCESS_CTRL); + udelay(10); +} + +uint32_t phytium_phy_readl(struct phytium_dp_device *phytium_dp, uint32_t address) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->phy_access_base[port]; + uint32_t data; + + phytium_writel_reg(priv, address, group_offset, PHYTIUM_PHY_ACCESS_ADDRESS); + phytium_writel_reg(priv, ACCESS_READ, group_offset, PHYTIUM_PHY_ACCESS_CTRL); + udelay(10); + data = phytium_readl_reg(priv, group_offset, PHYTIUM_PHY_READ_DATA); +#if DEBUG_LOG + pr_info("phy address read: 0x%x data:0x%x\n", address, data); +#endif + + return data; +} + +static int +phytium_dp_hw_aux_transfer_write(struct phytium_dp_device *phytium_dp, struct drm_dp_aux_msg *msg) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + unsigned int i = 0, j = 0; + unsigned int cmd = 0; + unsigned int aux_status = 0, interrupt_status = 0; + unsigned char *data = msg->buffer; + int count_timeout = 0; + long ret = 0; + + for (i = 0; i < 3; i++) { + /* clear PX210_DP_INTERRUPT_RAW_STATUS */ + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + phytium_writel_reg(priv, msg->address, group_offset, PHYTIUM_DP_AUX_ADDRESS); + for (j = 0; j < msg->size; j++) + phytium_writeb_reg(priv, data[j], group_offset, PHYTIUM_DP_AUX_WRITE_FIFO); + + cmd = ((msg->request & COMMAND_MASK) << COMMAND_SHIFT); + if (msg->size == 0) + cmd |= ADDRESS_ONLY; + else + cmd |= (msg->size-1) & BYTE_COUNT_MASK; + phytium_writel_reg(priv, cmd, group_offset, PHYTIUM_DP_AUX_COMMAND); + + count_timeout = 0; + do { + mdelay(5); + interrupt_status = phytium_readl_reg(priv, group_offset, + PHYTIUM_DP_INTERRUPT_RAW_STATUS); + aux_status = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_STATUS); + if ((aux_status & REPLY_RECEIVED) || (aux_status & REPLY_ERROR) + || (interrupt_status & REPLY_TIMEOUT)) { + DRM_DEBUG_KMS("aux wait exit\n"); + break; + } + count_timeout++; + } while (count_timeout < 6); + + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + if (interrupt_status & REPLY_TIMEOUT) { + DRM_DEBUG_KMS("aux write reply timeout\n"); + continue; + } else if (aux_status & REPLY_ERROR) { + DRM_DEBUG_KMS("aux write reply error\n"); + continue; + } else if (aux_status & REPLY_RECEIVED) { + DRM_DEBUG_KMS("aux write reply received succussful\n"); + break; + } + } + + if (interrupt_status & REPLY_TIMEOUT) { + DRM_NOTE("aux(%d) write reply timeout\n", phytium_dp->port); + ret = -EIO; + goto out; + } else if (aux_status & REPLY_ERROR) { + DRM_ERROR("aux(%d) write reply error\n", phytium_dp->port); + ret = -EIO; + goto out; + } else if ((aux_status & REPLY_RECEIVED) != REPLY_RECEIVED) { + DRM_ERROR("aux(%d) write reply no response\n", phytium_dp->port); + ret = -EIO; + goto out; + } + + msg->reply = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_CODE); + ret = msg->size; +out: + return ret; +} + +static int +phytium_dp_hw_aux_transfer_read(struct phytium_dp_device *phytium_dp, struct drm_dp_aux_msg *msg) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + unsigned int i = 0; + unsigned int cmd = 0; + unsigned int aux_status = 0, interrupt_status = 0; + unsigned char *data = msg->buffer; + int count_timeout = 0; + long ret = 0; + + for (i = 0; i < 3; i++) { + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + phytium_writel_reg(priv, msg->address, group_offset, PHYTIUM_DP_AUX_ADDRESS); + cmd = ((msg->request & COMMAND_MASK) << COMMAND_SHIFT); + if (msg->size == 0) + cmd |= ADDRESS_ONLY; + else + cmd |= ((msg->size-1) & BYTE_COUNT_MASK); + phytium_writel_reg(priv, cmd, group_offset, PHYTIUM_DP_AUX_COMMAND); + + count_timeout = 0; + do { + mdelay(5); + interrupt_status = phytium_readl_reg(priv, group_offset, + PHYTIUM_DP_INTERRUPT_RAW_STATUS); + aux_status = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_STATUS); + if ((aux_status & REPLY_RECEIVED) || (aux_status & REPLY_ERROR) + || (interrupt_status & REPLY_TIMEOUT)) { + DRM_DEBUG_KMS("aux wait exit\n"); + break; + } + count_timeout++; + } while (count_timeout < 6); + + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + if (interrupt_status & REPLY_TIMEOUT) { + DRM_DEBUG_KMS("aux read reply timeout\n"); + continue; + } else if (aux_status & REPLY_ERROR) { + DRM_DEBUG_KMS("aux read reply error\n"); + continue; + } else if (aux_status & REPLY_RECEIVED) { + DRM_DEBUG_KMS("aux read reply received succussful\n"); + break; + } + } + + if (interrupt_status & REPLY_TIMEOUT) { + DRM_NOTE("aux(%d) read reply timeout\n", phytium_dp->port); + ret = -EIO; + goto out; + } else if (aux_status & REPLY_ERROR) { + DRM_ERROR("aux(%d) read reply error\n", phytium_dp->port); + ret = -EIO; + goto out; + } else if ((aux_status & REPLY_RECEIVED) != REPLY_RECEIVED) { + DRM_ERROR("aux(%d) read reply no response\n", phytium_dp->port); + ret = -EIO; + goto out; + } + + msg->reply = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_CODE); + ret = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_DATA_COUNT); + + if (ret > msg->size) { + ret = msg->size; + } else if (ret != msg->size) { + DRM_DEBUG_KMS("aux read count error(ret:0x%lx != 0x%lx)\n", ret, msg->size); + ret = -EBUSY; + goto out; + } + + for (i = 0; i < ret; i++) + data[i] = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_DATA); + +out: + return ret; +} + +static void phytium_get_native_mode(struct phytium_dp_device *phytium_dp) +{ + struct drm_display_mode *t, *mode; + struct drm_connector *connector = &phytium_dp->connector; + struct drm_display_mode *native_mode = &phytium_dp->native_mode; + + list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { + if (mode->type & DRM_MODE_TYPE_PREFERRED) { + if (mode->hdisplay != native_mode->hdisplay || + mode->vdisplay != native_mode->vdisplay) { + memcpy(native_mode, mode, sizeof(*mode)); + drm_mode_set_crtcinfo(native_mode, 0); + } + break; + } + } + + if (&mode->head == &connector->probed_modes) + native_mode->clock = 0; +} + +static int phytium_connector_add_common_modes(struct phytium_dp_device *phytium_dp) +{ + int i = 0, ret = 0; + struct drm_device *dev = phytium_dp->dev; + struct drm_display_mode *mode = NULL, *current_mode = NULL; + struct drm_display_mode *native_mode = &phytium_dp->native_mode; + bool mode_existed = false; + struct mode_size { + char name[DRM_DISPLAY_MODE_LEN]; + int w; + int h; + } common_mode[] = { + { "640x480", 640, 480}, + { "800x600", 800, 600}, + { "1024x768", 1024, 768}, + { "1280x720", 1280, 720}, + { "1280x800", 1280, 800}, + {"1280x1024", 1280, 1024}, + { "1440x900", 1440, 900}, + {"1680x1050", 1680, 1050}, + {"1600x1200", 1600, 1200}, + {"1920x1080", 1920, 1080}, + {"1920x1200", 1920, 1200} + }; + + if (native_mode->clock == 0) + return ret; + + for (i = 0; i < ARRAY_SIZE(common_mode); i++) { + mode_existed = false; + + if (common_mode[i].w > native_mode->hdisplay || + common_mode[i].h > native_mode->vdisplay || + (common_mode[i].w == native_mode->hdisplay && + common_mode[i].h == native_mode->vdisplay)) + continue; + + list_for_each_entry(current_mode, &phytium_dp->connector.probed_modes, head) { + if (common_mode[i].w == current_mode->hdisplay && + common_mode[i].h == current_mode->vdisplay) { + mode_existed = true; + break; + } + } + + if (mode_existed) + continue; + + mode = drm_mode_duplicate(dev, native_mode); + if (mode == NULL) + continue; + + mode->hdisplay = common_mode[i].w; + mode->vdisplay = common_mode[i].h; + mode->type &= ~DRM_MODE_TYPE_PREFERRED; + strscpy(mode->name, common_mode[i].name, DRM_DISPLAY_MODE_LEN); + drm_mode_probed_add(&phytium_dp->connector, mode); + ret++; + } + + return ret; +} + +static int phytium_connector_get_modes(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + struct edid *edid; + int ret = 0; + + if (phytium_dp->is_edp) + edid = phytium_dp->edp_edid; + else + edid = drm_get_edid(connector, &phytium_dp->aux.ddc); + + if (edid && drm_edid_is_valid(edid)) { + drm_connector_update_edid_property(connector, edid); + ret = drm_add_edid_modes(connector, edid); + phytium_dp->has_audio = drm_detect_monitor_audio(edid); + phytium_get_native_mode(phytium_dp); + if (dc_fake_mode_enable) + ret += phytium_connector_add_common_modes(phytium_dp); + } else { + drm_connector_update_edid_property(connector, NULL); + phytium_dp->has_audio = false; + } + + if (!phytium_dp->is_edp) + kfree(edid); + + return ret; +} + +static struct drm_encoder *phytium_dp_best_encoder(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + return &phytium_dp->encoder; +} + +static const +struct drm_connector_helper_funcs phytium_connector_helper_funcs = { + .get_modes = phytium_connector_get_modes, + .best_encoder = phytium_dp_best_encoder, +}; + +static void phytium_dp_set_sink_rates(struct phytium_dp_device *phytium_dp) +{ + static const int dp_rates[] = {162000, 270000, 540000, 810000}; + int i, max_rate; + + max_rate = drm_dp_bw_code_to_link_rate(phytium_dp->dpcd[DP_MAX_LINK_RATE]); + for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { + if (dp_rates[i] > max_rate) + break; + phytium_dp->sink_rates[i] = dp_rates[i]; + } + phytium_dp->num_sink_rates = i; +} + +static int get_common_rates(const int *source_rates, int source_len, const int *sink_rates, + int sink_len, int *common_rates) +{ + int i = 0, j = 0, k = 0; + + while (i < source_len && j < sink_len) { + if (source_rates[i] == sink_rates[j]) { + if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) + return k; + common_rates[k] = source_rates[i]; + ++k; + ++i; + ++j; + } else if (source_rates[i] < sink_rates[j]) { + ++i; + } else { + ++j; + } + } + return k; +} + +static void phytium_dp_set_common_rates(struct phytium_dp_device *phytium_dp) +{ + WARN_ON(!phytium_dp->num_source_rates || !phytium_dp->num_sink_rates); + + phytium_dp->num_common_rates = get_common_rates(phytium_dp->source_rates, + phytium_dp->num_source_rates, + phytium_dp->sink_rates, + phytium_dp->num_sink_rates, + phytium_dp->common_rates); + + if (WARN_ON(phytium_dp->num_common_rates == 0)) { + phytium_dp->common_rates[0] = 162000; + phytium_dp->num_common_rates = 1; + } +} + +static bool phytium_dp_get_dpcd(struct phytium_dp_device *phytium_dp) +{ + int ret; + unsigned char sink_count = 0; + + /* get dpcd capability,but don't check data error; so check revision */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, 0x00, phytium_dp->dpcd, + sizeof(phytium_dp->dpcd)); + if (ret < 0) { + DRM_ERROR("port %d get DPCD capability fail\n", phytium_dp->port); + return false; + } + + if (phytium_dp->dpcd[DP_DPCD_REV] == 0) { + DRM_ERROR("DPCD data error: 0x%x\n", phytium_dp->dpcd[DP_DPCD_REV]); + return false; + } + + /* parse sink support link */ + phytium_dp_set_sink_rates(phytium_dp); + phytium_dp_set_common_rates(phytium_dp); + phytium_dp->sink_max_lane_count = drm_dp_max_lane_count(phytium_dp->dpcd); + phytium_dp->common_max_lane_count = min(phytium_dp->source_max_lane_count, + phytium_dp->sink_max_lane_count); + + /* get dpcd sink count */ + if (drm_dp_dpcd_readb(&phytium_dp->aux, DP_SINK_COUNT, &sink_count) <= 0) { + DRM_ERROR("get DPCD sink_count fail\n"); + return false; + } + + phytium_dp->sink_count = DP_GET_SINK_COUNT(sink_count); + if (!phytium_dp->sink_count) { + DRM_ERROR("DPCD sink_count should not be zero\n"); + return false; + } + + if (!drm_dp_is_branch(phytium_dp->dpcd)) + return true; + + if (phytium_dp->dpcd[DP_DPCD_REV] == 0x10) + return true; + + /* get downstream port for branch device */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_DOWNSTREAM_PORT_0, + phytium_dp->downstream_ports, DP_MAX_DOWNSTREAM_PORTS); + if (ret < 0) { + DRM_ERROR("get DPCD DFP fail\n"); + return false; + } + + return true; +} + +static enum drm_connector_status +phytium_dp_detect_dpcd(struct phytium_dp_device *phytium_dp) +{ + if (!phytium_dp_get_dpcd(phytium_dp)) + return connector_status_disconnected; + + if (!drm_dp_is_branch(phytium_dp->dpcd)) + return connector_status_connected; + + if (phytium_dp->downstream_ports[0] & DP_DS_PORT_HPD) { + return phytium_dp->sink_count ? connector_status_connected + : connector_status_disconnected; + } + return connector_status_connected; +} + +static void phytium_get_adjust_train(struct phytium_dp_device *phytium_dp, + const uint8_t link_status[DP_LINK_STATUS_SIZE], uint8_t lane_count) +{ + unsigned char v = 0; + unsigned char p = 0; + int lane; + unsigned char voltage_max; + unsigned char preemph_max; + + /* find max value */ + for (lane = 0; lane < lane_count; lane++) { + uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane); + uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); + + if (this_v > v) + v = this_v; + if (this_p > p) + p = this_p; + } + voltage_max = DP_TRAIN_VOLTAGE_SWING_LEVEL_3; + if (v >= voltage_max) + v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; + + preemph_max = DP_TRAIN_PRE_EMPH_LEVEL_3; + if (p >= preemph_max) + p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + + for (lane = 0; lane < 4; lane++) + phytium_dp->train_set[lane] = v | p; +} + +bool phytium_dp_coding_8b10b_need_enable(unsigned char test_pattern) +{ + switch (test_pattern) { + case PHYTIUM_PHY_TP_D10_2: + case PHYTIUM_PHY_TP_SYMBOL_ERROR: + case PHYTIUM_PHY_TP_CP2520_1: + case PHYTIUM_PHY_TP_CP2520_2: + case PHYTIUM_PHY_TP_CP2520_3: + return true; + case PHYTIUM_PHY_TP_PRBS7: + case PHYTIUM_PHY_TP_80BIT_CUSTOM: + return false; + default: + return false; + } +} + +bool phytium_dp_scrambled_need_enable(unsigned char test_pattern) +{ + switch (test_pattern) { + case PHYTIUM_PHY_TP_SYMBOL_ERROR: + case PHYTIUM_PHY_TP_CP2520_1: + case PHYTIUM_PHY_TP_CP2520_2: + case PHYTIUM_PHY_TP_CP2520_3: + return true; + case PHYTIUM_PHY_TP_D10_2: + case PHYTIUM_PHY_TP_PRBS7: + case PHYTIUM_PHY_TP_80BIT_CUSTOM: + return false; + default: + return false; + } +} + +static void phytium_dp_hw_set_lane_setting(struct phytium_dp_device *phytium_dp, + uint32_t link_rate, + uint8_t train_set) +{ + phytium_dp->funcs->dp_hw_set_phy_lane_setting(phytium_dp, link_rate, train_set); +} + +static void phytium_dp_hw_set_link(struct phytium_dp_device *phytium_dp, + uint8_t lane_count, + uint32_t link_rate) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, ret = 0, retry = 3; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, lane_count, + group_offset, PHYTIUM_DP_LANE_COUNT_SET); + phytium_writel_reg(priv, + drm_dp_link_rate_to_bw_code(link_rate), + group_offset, PHYTIUM_DP_LINK_BW_SET); + + if (drm_dp_enhanced_frame_cap(phytium_dp->dpcd)) + phytium_writel_reg(priv, ENHANCED_FRAME_ENABLE, + group_offset, PHYTIUM_DP_ENHANCED_FRAME_EN); + else + phytium_writel_reg(priv, ENHANCED_FRAME_DISABLE, + group_offset, PHYTIUM_DP_ENHANCED_FRAME_EN); + +try_again: + ret = phytium_dp->funcs->dp_hw_set_phy_lane_and_rate(phytium_dp, lane_count, link_rate); + if ((ret < 0) && retry) { + retry--; + goto try_again; + } +} + +static void phytium_dp_hw_set_test_pattern(struct phytium_dp_device *phytium_dp, + uint8_t lane_count, + uint8_t test_pattern, + uint8_t *custom_pattern, + uint32_t custom_pattern_size) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, val = 0, tmp = 0, i; + uint32_t group_offset = priv->dp_reg_base[port]; + + if ((test_pattern == PHYTIUM_PHY_TP_80BIT_CUSTOM) + && custom_pattern && (custom_pattern_size > 0)) { + val = *(int *)custom_pattern; + phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_0); + val = *(int *)(custom_pattern + 4); + phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_1); + val = *(short int *)(custom_pattern + 8); + phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_2); + } + + if (test_pattern == PHYTIUM_PHY_TP_D10_2 || test_pattern == PHYTIUM_PHY_TP_PRBS7 + || test_pattern == PHYTIUM_PHY_TP_80BIT_CUSTOM) + phytium_writel_reg(priv, SCRAMBLING_DISABLE, group_offset, + PHYTIUM_DP_SCRAMBLING_DISABLE); + else + phytium_writel_reg(priv, SCRAMBLING_ENABLE, group_offset, + PHYTIUM_DP_SCRAMBLING_DISABLE); + + tmp = test_pattern - PHYTIUM_PHY_TP_NONE + TEST_PATTERN_NONE; + val = 0; + for (i = 0; i < lane_count; i++) + val |= (tmp << (TEST_PATTERN_LANE_SHIFT * i)); + phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_LINK_QUAL_PATTERN_SET); +} + +static void phytium_dp_hw_set_train_pattern(struct phytium_dp_device *phytium_dp, + uint8_t train_pattern) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, tmp = 0; + uint32_t group_offset = priv->dp_reg_base[port]; + + /* Scrambling is disabled for TPS1/TPS2/3 and enabled for TPS4 */ + if (train_pattern == DP_TRAINING_PATTERN_4 + || train_pattern == DP_TRAINING_PATTERN_DISABLE) { + phytium_writel_reg(priv, SCRAMBLING_ENABLE, group_offset, + PHYTIUM_DP_SCRAMBLING_DISABLE); + phytium_writel_reg(priv, SCRAMBLER_RESET, group_offset, + PHYTIUM_DP_FORCE_SCRAMBLER_RESET); + } else { + phytium_writel_reg(priv, SCRAMBLING_DISABLE, group_offset, + PHYTIUM_DP_SCRAMBLING_DISABLE); + } + switch (train_pattern) { + case DP_TRAINING_PATTERN_DISABLE: + tmp = TRAINING_OFF; + break; + case DP_TRAINING_PATTERN_1: + tmp = TRAINING_PATTERN_1; + break; + case DP_TRAINING_PATTERN_2: + tmp = TRAINING_PATTERN_2; + break; + case DP_TRAINING_PATTERN_3: + tmp = TRAINING_PATTERN_3; + break; + case DP_TRAINING_PATTERN_4: + tmp = TRAINING_PATTERN_4; + break; + default: + tmp = TRAINING_OFF; + break; + } + + phytium_writel_reg(priv, tmp, group_offset, PHYTIUM_DP_TRAINING_PATTERN_SET); +} + +void phytium_dp_hw_enable_audio(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int config = 0, config1, data_window = 0; + const struct dp_audio_n_m *n_m = NULL; + uint32_t group_offset = priv->dp_reg_base[port]; + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); + phytium_writel_reg(priv, CHANNEL_MUTE_ENABLE, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); + + data_window = 90*(phytium_dp->link_rate)/100 + *(phytium_dp->mode.htotal - phytium_dp->mode.hdisplay) + /phytium_dp->mode.clock/4; + + phytium_writel_reg(priv, data_window, group_offset, PHYTIUM_DP_SEC_DATA_WINDOW); + + n_m = phytium_dp_audio_get_n_m(phytium_dp->link_rate, phytium_dp->audio_info.sample_rate); + if (n_m == NULL) { + DRM_NOTE("can not get n_m for link_rate(%d) and sample_rate(%d)\n", + phytium_dp->link_rate, phytium_dp->audio_info.sample_rate); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_MAUD); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_NAUD); + } else { + phytium_writel_reg(priv, n_m->m, group_offset, PHYTIUM_DP_SEC_MAUD); + phytium_writel_reg(priv, n_m->n, group_offset, PHYTIUM_DP_SEC_NAUD); + } + + config1 = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, + group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + phytium_writel_reg(priv, config1, group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); +} + +static void phytium_dp_hw_audio_shutdown(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, + group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); +} + +static void phytium_dp_hw_audio_digital_mute(struct phytium_dp_device *phytium_dp, bool enable) +{ + struct phytium_display_private *priv = phytium_dp->dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + if (enable) + phytium_writel_reg(priv, CHANNEL_MUTE_ENABLE, + group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); + else + phytium_writel_reg(priv, SEC_AUDIO_ENABLE, + group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); +} + +static int +phytium_dp_hw_audio_hw_params(struct phytium_dp_device *phytium_dp, struct audio_info audio_info) +{ + struct phytium_display_private *priv = phytium_dp->dev->dev_private; + int port = phytium_dp->port; + int ret = 0, data_window = 0; + const struct dp_audio_n_m *n_m = NULL; + uint32_t fs, ws, fs_accurac; + uint32_t group_offset = priv->dp_reg_base[port]; + + DRM_DEBUG_KMS("%s:set port%d sample_rate(%d) channels(%d) sample_width(%d)\n", + __func__, phytium_dp->port, audio_info.sample_rate, + audio_info.channels, audio_info.sample_width); + + phytium_writel_reg(priv, INPUT_SELECT_I2S, group_offset, PHYTIUM_DP_SEC_INPUT_SELECT); + phytium_writel_reg(priv, APB_CLOCK/audio_info.sample_rate, + group_offset, PHYTIUM_DP_SEC_DIRECT_CLKDIV); + phytium_writel_reg(priv, audio_info.channels & CHANNEL_MASK, + group_offset, PHYTIUM_DP_SEC_CHANNEL_COUNT); + phytium_writel_reg(priv, CHANNEL_MAP_DEFAULT, group_offset, PHYTIUM_DP_SEC_CHANNEL_MAP); + data_window = 90*(phytium_dp->link_rate)/100 + *(phytium_dp->mode.htotal - phytium_dp->mode.hdisplay) + /phytium_dp->mode.clock/4; + phytium_writel_reg(priv, data_window, group_offset, PHYTIUM_DP_SEC_DATA_WINDOW); + phytium_writel_reg(priv, 0xb5, group_offset, PHYTIUM_DP_SEC_CS_CATEGORY_CODE); + + phytium_writel_reg(priv, CLOCK_MODE_SYNC, group_offset, PHYTIUM_DP_SEC_CLOCK_MODE); + phytium_writel_reg(priv, CS_SOURCE_FORMAT_DEFAULT, + group_offset, PHYTIUM_DP_SEC_CS_SOURCE_FORMAT); + + switch (audio_info.sample_rate) { + case 32000: + fs = ORIG_FREQ_32000; + fs_accurac = SAMPLING_FREQ_32000; + break; + case 44100: + fs = ORIG_FREQ_44100; + fs_accurac = SAMPLING_FREQ_44100; + break; + case 48000: + fs = ORIG_FREQ_48000; + fs_accurac = SAMPLING_FREQ_48000; + break; + case 96000: + fs = ORIG_FREQ_96000; + fs_accurac = SAMPLING_FREQ_96000; + break; + case 176400: + fs = ORIG_FREQ_176400; + fs_accurac = SAMPLING_FREQ_176400; + break; + case 192000: + fs = ORIG_FREQ_192000; + fs_accurac = SAMPLING_FREQ_192000; + break; + default: + DRM_ERROR("dp not support sample_rate %d\n", audio_info.sample_rate); + goto out; + } + + switch (audio_info.sample_width) { + case 16: + ws = WORD_LENGTH_16; + break; + case 18: + ws = WORD_LENGTH_18; + break; + case 20: + ws = WORD_LENGTH_20; + break; + case 24: + ws = WORD_LENGTH_24; + break; + default: + DRM_ERROR("dp not support sample_width %d\n", audio_info.sample_width); + goto out; + } + + phytium_writel_reg(priv, ((fs&ORIG_FREQ_MASK)<link_rate, audio_info.sample_rate); + if (n_m == NULL) { + DRM_NOTE("can not get n_m for link_rate(%d) and sample_rate(%d)\n", + phytium_dp->link_rate, audio_info.sample_rate); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_MAUD); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_NAUD); + + } else { + phytium_writel_reg(priv, n_m->m, group_offset, PHYTIUM_DP_SEC_MAUD); + phytium_writel_reg(priv, n_m->n, group_offset, PHYTIUM_DP_SEC_NAUD); + } + phytium_writel_reg(priv, SECONDARY_STREAM_ENABLE, + group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + phytium_dp->audio_info = audio_info; + + return 0; + +out: + phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, + group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + + return ret; +} + +void phytium_dp_hw_disable_video(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, SST_MST_SOURCE_0_DISABLE, + group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE); +} + +bool phytium_dp_hw_video_is_enable(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, config; + uint32_t group_offset = priv->dp_reg_base[port]; + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE); + return config ? true : false; +} + +void phytium_dp_hw_enable_video(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, SST_MST_SOURCE_0_ENABLE, + group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE); + phytium_writel_reg(priv, LINK_SOFT_RESET, group_offset, PHYTIUM_DP_SOFT_RESET); +} + +void phytium_dp_hw_config_video(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + unsigned long link_bw, date_rate = 0; + struct drm_display_info *display_info = &phytium_dp->connector.display_info; + unsigned char tu_size = 64; + unsigned long data_per_tu = 0; + int symbols_per_tu, frac_symbols_per_tu, symbol_count, udc, value; + + /* cal M/N and tu_size */ + phytium_writel_reg(priv, phytium_dp->mode.crtc_clock/10, group_offset, PHYTIUM_DP_M_VID); + phytium_writel_reg(priv, phytium_dp->link_rate/10, group_offset, PHYTIUM_DP_N_VID); + link_bw = phytium_dp->link_rate * phytium_dp->link_lane_count; + date_rate = (phytium_dp->mode.crtc_clock * display_info->bpc * 3)/8; + + /* mul 10 for register setting */ + data_per_tu = 10*tu_size * date_rate/link_bw; + symbols_per_tu = (data_per_tu/10)&0xff; + frac_symbols_per_tu = (data_per_tu%10*16/10) & 0xf; + phytium_writel_reg(priv, frac_symbols_per_tu<<24 | symbols_per_tu<<16 | tu_size, + group_offset, PHYTIUM_DP_TRANSFER_UNIT_SIZE); + + symbol_count = (phytium_dp->mode.crtc_hdisplay*display_info->bpc*3 + 7)/8; + udc = (symbol_count + phytium_dp->link_lane_count - 1)/phytium_dp->link_lane_count; + phytium_writel_reg(priv, udc, group_offset, PHYTIUM_DP_DATA_COUNT); + + /* config main stream attributes */ + phytium_writel_reg(priv, phytium_dp->mode.crtc_htotal, + group_offset, PHYTIUM_DP_MAIN_LINK_HTOTAL); + phytium_writel_reg(priv, phytium_dp->mode.crtc_hdisplay, + group_offset, PHYTIUM_DP_MAIN_LINK_HRES); + phytium_writel_reg(priv, + phytium_dp->mode.crtc_hsync_end - phytium_dp->mode.crtc_hsync_start, + group_offset, PHYTIUM_DP_MAIN_LINK_HSWIDTH); + phytium_writel_reg(priv, phytium_dp->mode.crtc_htotal - phytium_dp->mode.crtc_hsync_start, + group_offset, PHYTIUM_DP_MAIN_LINK_HSTART); + phytium_writel_reg(priv, phytium_dp->mode.crtc_vtotal, + group_offset, PHYTIUM_DP_MAIN_LINK_VTOTAL); + phytium_writel_reg(priv, phytium_dp->mode.crtc_vdisplay, + group_offset, PHYTIUM_DP_MAIN_LINK_VRES); + phytium_writel_reg(priv, + phytium_dp->mode.crtc_vsync_end - phytium_dp->mode.crtc_vsync_start, + group_offset, PHYTIUM_DP_MAIN_LINK_VSWIDTH); + phytium_writel_reg(priv, phytium_dp->mode.crtc_vtotal - phytium_dp->mode.crtc_vsync_start, + group_offset, PHYTIUM_DP_MAIN_LINK_VSTART); + + value = 0; + if (phytium_dp->mode.flags & DRM_MODE_FLAG_PHSYNC) + value = value & (~HSYNC_POLARITY_LOW); + else + value = value | HSYNC_POLARITY_LOW; + + if (phytium_dp->mode.flags & DRM_MODE_FLAG_PVSYNC) + value = value & (~VSYNC_POLARITY_LOW); + else + value = value | VSYNC_POLARITY_LOW; + phytium_writel_reg(priv, value, group_offset, PHYTIUM_DP_MAIN_LINK_POLARITY); + + switch (display_info->bpc) { + case 10: + value = (MISC0_BIT_DEPTH_10BIT << MISC0_BIT_DEPTH_OFFSET); + break; + case 6: + value = (MISC0_BIT_DEPTH_6BIT << MISC0_BIT_DEPTH_OFFSET); + break; + default: + value = (MISC0_BIT_DEPTH_8BIT << MISC0_BIT_DEPTH_OFFSET); + break; + } + value |= (MISC0_COMPONENT_FORMAT_RGB << MISC0_COMPONENT_FORMAT_SHIFT) + | MISC0_SYNCHRONOUS_CLOCK; + phytium_writel_reg(priv, value, group_offset, PHYTIUM_DP_MAIN_LINK_MISC0); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_MAIN_LINK_MISC1); + + value = USER_ODDEVEN_POLARITY_HIGH | USER_DATA_ENABLE_POLARITY_HIGH; + if (phytium_dp->mode.flags & DRM_MODE_FLAG_PHSYNC) + value = value | USER_HSYNC_POLARITY_HIGH; + else + value = value & (~USER_HSYNC_POLARITY_HIGH); + if (phytium_dp->mode.flags & DRM_MODE_FLAG_PVSYNC) + value = value | USER_VSYNC_POLARITY_HIGH; + else + value = value & (~USER_VSYNC_POLARITY_HIGH); + phytium_writel_reg(priv, value, group_offset, PHYTIUM_DP_USER_SYNC_POLARITY); +} + +void phytium_dp_hw_disable_output(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, TRANSMITTER_OUTPUT_DISABLE, + group_offset, PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE); + phytium_writel_reg(priv, LINK_SOFT_RESET, group_offset, PHYTIUM_DP_SOFT_RESET); +} + +void phytium_dp_hw_enable_output(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, LINK_SOFT_RESET, group_offset, PHYTIUM_DP_SOFT_RESET); + phytium_writel_reg(priv, TRANSMITTER_OUTPUT_ENABLE, + group_offset, PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE); +} + +void phytium_dp_hw_enable_input_source(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, VIRTUAL_SOURCE_0_ENABLE, + group_offset, PHYTIUM_INPUT_SOURCE_ENABLE); +} + +void phytium_dp_hw_disable_input_source(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + + phytium_writel_reg(priv, (~VIRTUAL_SOURCE_0_ENABLE)&VIRTUAL_SOURCE_0_ENABLE_MASK, + priv->dp_reg_base[port], PHYTIUM_INPUT_SOURCE_ENABLE); +} + +bool phytium_dp_hw_output_is_enable(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + int config = 0; + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE); + return config ? true : false; +} + +static void phytium_dp_hw_get_hpd_state(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t val = 0, raw_state = 0; + uint32_t group_offset = priv->dp_reg_base[port]; + + val = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_RAW_STATUS); + + /* maybe miss hpd, so used for clear PHYTIUM_DP_INTERRUPT_RAW_STATUS */ + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + raw_state = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SINK_HPD_STATE); + if (val & HPD_EVENT) + phytium_dp->dp_hpd_state.hpd_event_state = true; + + if (val & HPD_IRQ) + phytium_dp->dp_hpd_state.hpd_irq_state = true; + + if (raw_state & HPD_CONNECT) + phytium_dp->dp_hpd_state.hpd_raw_state = true; + else + phytium_dp->dp_hpd_state.hpd_raw_state = false; +} + +void phytium_dp_hw_hpd_irq_setup(struct phytium_dp_device *phytium_dp, bool enable) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_dp->dp_hpd_state.hpd_irq_enable = enable; + if (enable) + phytium_writel_reg(priv, HPD_OTHER_MASK, group_offset, PHYTIUM_DP_INTERRUPT_MASK); + else + phytium_writel_reg(priv, HPD_IRQ_MASK|HPD_EVENT_MASK|HPD_OTHER_MASK, + group_offset, PHYTIUM_DP_INTERRUPT_MASK); +} + +int phytium_dp_hw_init(struct phytium_dp_device *phytium_dp) +{ + int ret = 0; + uint8_t count = 0; + + phytium_dp->source_rates = phytium_rate; + phytium_dp->num_source_rates = num_source_rates; + count = phytium_dp->funcs->dp_hw_get_source_lane_count(phytium_dp); + phytium_dp->source_max_lane_count = count; + + ret = phytium_dp->funcs->dp_hw_reset(phytium_dp); + if (ret) + goto out; + ret = phytium_dp->funcs->dp_hw_init_phy(phytium_dp); + if (ret) + goto out; + + phytium_dp->fast_train_support = false; + phytium_dp->hw_spread_enable = phytium_dp->funcs->dp_hw_spread_is_enable(phytium_dp); + +out: + return ret; +} + +static int phytium_dp_dpcd_get_tp_link(struct phytium_dp_device *phytium_dp, + uint8_t *test_lane_count, + uint32_t *test_link_rate) +{ + uint8_t test_link_bw; + int ret; + + ret = drm_dp_dpcd_readb(&phytium_dp->aux, DP_TEST_LANE_COUNT, + test_lane_count); + if (ret <= 0) { + DRM_DEBUG_KMS("test pattern Lane count read failed(%d)\n", ret); + goto failed; + } + + ret = drm_dp_dpcd_readb(&phytium_dp->aux, DP_TEST_LINK_RATE, + &test_link_bw); + if (ret <= 0) { + DRM_DEBUG_KMS("test pattern link rate read failed(%d)\n", ret); + goto failed; + } + *test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); + + return 0; +failed: + return ret; +} + +static int phytium_dp_dpcd_set_link(struct phytium_dp_device *phytium_dp, + uint8_t lane_count, uint32_t link_rate) +{ + uint8_t link_config[2]; + int ret = 0; + + link_config[0] = drm_dp_link_rate_to_bw_code(link_rate); + link_config[1] = lane_count; + if (drm_dp_enhanced_frame_cap(phytium_dp->dpcd)) + link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + + ret = drm_dp_dpcd_write(&phytium_dp->aux, DP_LINK_BW_SET, link_config, 2); + if (ret < 0) { + DRM_NOTE("write dpcd DP_LINK_BW_SET fail: ret:%d\n", ret); + goto failed; + } + + if (phytium_dp->hw_spread_enable) + link_config[0] = DP_SPREAD_AMP_0_5; + else + link_config[0] = 0; + link_config[1] = DP_SET_ANSI_8B10B; + ret = drm_dp_dpcd_write(&phytium_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2); + if (ret < 0) { + DRM_ERROR("write DP_DOWNSPREAD_CTRL fail: ret:%d\n", ret); + goto failed; + } + + return 0; +failed: + return ret; +} + +static int phytium_dp_dpcd_set_test_pattern(struct phytium_dp_device *phytium_dp, + uint8_t test_pattern) +{ + unsigned char value; + int ret; + + if (phytium_dp_coding_8b10b_need_enable(test_pattern)) + value = DP_SET_ANSI_8B10B; + else + value = 0; + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_MAIN_LINK_CHANNEL_CODING_SET, value); + if (ret < 0) { + DRM_ERROR("write DP_MAIN_LINK_CHANNEL_CODING_SET fail: ret:%d\n", ret); + goto failed; + } + + if (phytium_dp_scrambled_need_enable(test_pattern)) + value = DP_TRAINING_PATTERN_DISABLE; + else + value = (DP_TRAINING_PATTERN_DISABLE | DP_LINK_SCRAMBLING_DISABLE); + + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_TRAINING_PATTERN_SET, value); + if (ret < 0) { + DRM_ERROR("write DP_TRAINING_PATTERN_SET fail: ret:%d\n", ret); + goto failed; + } + + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_LINK_QUAL_LANE0_SET, test_pattern); + if (ret < 0) { + DRM_ERROR("write DP_TRAINING_PATTERN_SET fail: ret:%d\n", ret); + goto failed; + } + + return 0; +failed: + return ret; +} + +static int phytium_dp_dpcd_set_train_pattern(struct phytium_dp_device *phytium_dp, + uint8_t train_pattern) +{ + uint8_t value; + int ret; + + /* Scrambling is disabled for TPS1/2/3 and enabled for TPS4 */ + if (train_pattern == DP_TRAINING_PATTERN_4 || train_pattern == DP_TRAINING_PATTERN_DISABLE) + value = train_pattern; + else + value = (train_pattern | DP_LINK_SCRAMBLING_DISABLE); + + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_TRAINING_PATTERN_SET, value); + if (ret < 0) { + DRM_NOTE("write DP_TRAINING_PATTERN_SET fail: ret:%d\n", ret); + goto failed; + } + + return 0; +failed: + return ret; +} + +static int +phytium_dp_dpcd_set_lane_setting(struct phytium_dp_device *phytium_dp, uint8_t *train_set) +{ + int ret = 0; + + ret = drm_dp_dpcd_write(&phytium_dp->aux, DP_TRAINING_LANE0_SET, + phytium_dp->train_set, 4); + if (ret < 0) { + DRM_ERROR("write DP_TRAINING_LANE0_SET fail: ret:%d\n", ret); + return ret; + } + + return 0; +} + +static int +phytium_dp_dpcd_get_adjust_request(struct phytium_dp_device *phytium_dp, uint8_t lane_count) +{ + int ret = 0; + uint8_t link_status[DP_LINK_STATUS_SIZE]; + + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + goto failed; + } + phytium_get_adjust_train(phytium_dp, link_status, lane_count); + + return 0; +failed: + return ret; +} + +void phytium_dp_dpcd_sink_dpms(struct phytium_dp_device *phytium_dp, int mode) +{ + int ret, i; + + if (phytium_dp->dpcd[DP_DPCD_REV] < 0x11) + return; + if (mode != DRM_MODE_DPMS_ON) { + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_SET_POWER, DP_SET_POWER_D3); + } else { + for (i = 0; i < 3; i++) { + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); + if (ret == 1) + break; + msleep(20); + } + } + + if (ret != 1) + DRM_DEBUG_KMS("failed to %s sink power state\n", + mode == DRM_MODE_DPMS_ON ? "enable" : "disable"); +} + +static bool phytium_dp_link_training_clock_recovery(struct phytium_dp_device *phytium_dp) +{ + int ret; + unsigned char voltage, max_vswing_tries; + int voltage_tries; + + /* clear the test pattern */ + phytium_dp_hw_set_test_pattern(phytium_dp, phytium_dp->link_lane_count, + PHYTIUM_PHY_TP_NONE, NULL, 0); + + /* config source and sink's link rate and lane count */ + phytium_dp_hw_set_link(phytium_dp, phytium_dp->link_lane_count, phytium_dp->link_rate); + ret = phytium_dp_dpcd_set_link(phytium_dp, phytium_dp->link_lane_count, + phytium_dp->link_rate); + if (ret < 0) { + DRM_NOTE("phytium_dp_dpcd_set_link failed(ret=%d)\n", ret); + return false; + } + + /* config source's voltage swing and pre-emphasis(103-106) */ + memset(phytium_dp->train_set, 0, sizeof(phytium_dp->train_set)); + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + + /* config train pattern */ + phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_1); + ret = phytium_dp_dpcd_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_1); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_train_pattern fail: ret:%d\n", ret); + return false; + } + + /* config sink's voltage swing and pre-emphasis(103-106) */ + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + return false; + } + + voltage_tries = 1; + max_vswing_tries = 0; + for (;;) { + unsigned char link_status[DP_LINK_STATUS_SIZE]; + + drm_dp_link_train_clock_recovery_delay(&phytium_dp->aux, phytium_dp->dpcd); + /* get link status 0x202-0x207 */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + return false; + } + + if (drm_dp_clock_recovery_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("clock revorery ok\n"); + return true; + } + + if (voltage_tries == 5) { + DRM_DEBUG_KMS("Same voltage tried 5 times\n"); + return false; + } + + if (max_vswing_tries == 1) { + DRM_DEBUG_KMS("Max Voltage Swing reached\n"); + return false; + } + + voltage = phytium_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; + + /* config source and sink's voltage swing and pre-emphasis(103-106) */ + phytium_get_adjust_train(phytium_dp, link_status, phytium_dp->link_lane_count); + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + return false; + } + + if ((phytium_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) + ++voltage_tries; + else + voltage_tries = 1; + + if (phytium_dp->train_set[0] & DP_TRAIN_MAX_SWING_REACHED) + ++max_vswing_tries; + + DRM_DEBUG_KMS("try train_set:0x%x voltage_tries:%d max_vswing_tries:%d\n", + phytium_dp->train_set[0], voltage_tries, max_vswing_tries); + } +} + +static unsigned int phytium_dp_get_training_pattern(struct phytium_dp_device *phytium_dp) +{ + bool sink_tps3, sink_tps4; + + sink_tps4 = drm_dp_tps4_supported(phytium_dp->dpcd); + if (sink_tps4) + return DP_TRAINING_PATTERN_4; + else if (phytium_dp->link_rate == 810000) + DRM_DEBUG_KMS("8.1 Gbps link rate without sink TPS4 support\n"); + + sink_tps3 = drm_dp_tps3_supported(phytium_dp->dpcd); + if (sink_tps3) + return DP_TRAINING_PATTERN_3; + else if (phytium_dp->link_rate >= 540000) + DRM_DEBUG_KMS(">=5.4/6.48 Gbps link rate without sink TPS3 support\n"); + + return DP_TRAINING_PATTERN_2; +} + +static bool phytium_dp_link_training_channel_equalization(struct phytium_dp_device *phytium_dp) +{ + unsigned int training_pattern; + int tries, ret; + unsigned char link_status[DP_LINK_STATUS_SIZE]; + bool channel_eq = false; + + /* config source and sink's voltage swing and pre-emphasis(103-106), from clock recovery */ + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + return channel_eq; + } + + /* config source and sink's train_pattern x */ + training_pattern = phytium_dp_get_training_pattern(phytium_dp); + phytium_dp_hw_set_train_pattern(phytium_dp, training_pattern); + ret = phytium_dp_dpcd_set_train_pattern(phytium_dp, training_pattern); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_train_pattern fail: ret:%d\n", ret); + return channel_eq; + } + + for (tries = 0; tries < 5; tries++) { + drm_dp_link_train_channel_eq_delay(&phytium_dp->aux, phytium_dp->dpcd); + + /* get link status 0x202-0x207 */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + break; + } + + /* Make sure clock is still ok */ + if (!drm_dp_clock_recovery_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("CR check failed, cannot continue channel equalization\n"); + break; + } + + if (drm_dp_channel_eq_ok(link_status, phytium_dp->link_lane_count)) { + channel_eq = true; + DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); + break; + } + + /* config source and sink's voltage swing and pre-emphasis(103-106) */ + phytium_get_adjust_train(phytium_dp, link_status, phytium_dp->link_lane_count); + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + break; + } + } + + /* Try 5 times, else fail and try at lower BW */ + if (tries == 5) + DRM_DEBUG_KMS("Channel equalization failed 5 times\n"); + + return channel_eq; +} + +static void phytium_dp_train_retry_work_fn(struct work_struct *work) +{ + struct phytium_dp_device *phytium_dp = train_retry_to_dp_device(work); + struct drm_connector *connector; + + connector = &phytium_dp->connector; + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); + mutex_lock(&connector->dev->mode_config.mutex); + drm_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD); + mutex_unlock(&connector->dev->mode_config.mutex); + drm_kms_helper_hotplug_event(connector->dev); +} + +/* return index of rate in rates array, or -1 if not found */ +static int phytium_dp_rate_index(const int *rates, int len, int rate) +{ + int i; + + for (i = 0; i < len; i++) + if (rate == rates[i]) + return i; + + return -1; +} + +int phytium_dp_get_link_train_fallback_values(struct phytium_dp_device *phytium_dp) +{ + int index, ret = 0; + + if (phytium_dp->is_edp) { + phytium_dp->train_retry_count++; + DRM_INFO("Retrying Link training for eDP(%d) with same parameters\n", + phytium_dp->port); + goto out; + } else { + index = phytium_dp_rate_index(phytium_dp->common_rates, + phytium_dp->num_common_rates, + phytium_dp->link_rate); + if (index > 0) { + phytium_dp->link_rate = phytium_dp->common_rates[index - 1]; + } else if (phytium_dp->link_lane_count > 1) { + phytium_dp->link_rate = phytium_dp->max_link_rate; + phytium_dp->link_lane_count = phytium_dp->link_lane_count >> 1; + } else { + phytium_dp->train_retry_count++; + phytium_dp->link_rate = phytium_dp->max_link_rate; + phytium_dp->link_lane_count = phytium_dp->max_link_lane_count; + DRM_INFO("Retrying Link training for DP(%d) with maximal parameters\n", + phytium_dp->port); + ret = -1; + } + } + +out: + return ret; +} + +static int +phytium_dp_stop_link_train(struct phytium_dp_device *phytium_dp) +{ + int ret; + + /* config source and sink's train_pattern x: DP_TRAINING_PATTERN_DISABLE */ + phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_DISABLE); + + ret = phytium_dp_dpcd_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_DISABLE); + if (ret < 0) { + DRM_NOTE("phytium_dp_dpcd_set_train_pattern fail: ret:%d\n", ret); + return ret; + } + + return 0; +} + +int phytium_dp_start_link_train(struct phytium_dp_device *phytium_dp) +{ + int ret = 0; + + phytium_dp_hw_disable_output(phytium_dp); + phytium_dp_hw_disable_input_source(phytium_dp); + phytium_dp_hw_disable_video(phytium_dp); + phytium_dp_hw_enable_input_source(phytium_dp); + phytium_dp_hw_enable_output(phytium_dp); + phytium_dp_dpcd_sink_dpms(phytium_dp, DRM_MODE_DPMS_OFF); + phytium_dp_dpcd_sink_dpms(phytium_dp, DRM_MODE_DPMS_ON); + + if (!phytium_dp_link_training_clock_recovery(phytium_dp)) + goto failure_handling; + + if (!phytium_dp_link_training_channel_equalization(phytium_dp)) + goto failure_handling; + + ret = phytium_dp_stop_link_train(phytium_dp); + if (ret < 0) { + DRM_NOTE("phytium_dp_stop_link_train failed: ret = %d\n", ret); + goto out; + } + + if (phytium_dp->trigger_train_fail) { + phytium_dp->trigger_train_fail--; + goto failure_handling; + } + phytium_dp->train_retry_count = 0; + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training Pass at Link Rate = %d, Lane count = %d\n", + phytium_dp->connector.base.id, + phytium_dp->connector.name, phytium_dp->link_rate, + phytium_dp->link_lane_count); + + return 0; + +failure_handling: + DRM_INFO("[CONNECTOR:%d:%s] Link Training failed at Link Rate = %d, Lane count = %d", + phytium_dp->connector.base.id, + phytium_dp->connector.name, + phytium_dp->link_rate, phytium_dp->link_lane_count); + + ret = phytium_dp_stop_link_train(phytium_dp); + if (ret < 0) { + DRM_NOTE("phytium_dp_stop_link_train failed: ret = %d\n", ret); + goto out; + } + + phytium_dp_get_link_train_fallback_values(phytium_dp); + if (phytium_dp->train_retry_count < 5) + schedule_work(&phytium_dp->train_retry_work); + else + DRM_ERROR("DP(%d) Link Training Unsuccessful, and stop Training\n", + phytium_dp->port); + +out: + return -1; +} + +static bool phytium_dp_needs_link_retrain(struct phytium_dp_device *phytium_dp) +{ + unsigned char link_status[DP_LINK_STATUS_SIZE]; + int ret = 0; + + /* get link status 0x202-0x207 */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + return true; + } + + if ((phytium_dp->link_rate == 0) || (phytium_dp->link_lane_count == 0)) { + DRM_DEBUG_KMS("link_rate(%d) or lane_count(%d) is invalid\n", + phytium_dp->link_rate, phytium_dp->link_lane_count); + return true; + } + + /* Make sure clock is still ok */ + if (!drm_dp_clock_recovery_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("Clock recovery check failed\n"); + return true; + } + + if (!drm_dp_channel_eq_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("Channel EQ check failed\n"); + return true; + } + + if (!phytium_dp_hw_output_is_enable(phytium_dp)) { + DRM_DEBUG_KMS("check DP output enable failed\n"); + return true; + } + return false; +} + +static bool +phytium_dp_get_sink_irq(struct phytium_dp_device *phytium_dp, u8 *sink_irq_vector) +{ + return drm_dp_dpcd_readb(&phytium_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, + sink_irq_vector) == 1; +} + +static uint8_t phytium_dp_autotest_phy_pattern(struct phytium_dp_device *phytium_dp) +{ + union phytium_phy_tp phytium_phy_tp; + int ret; + unsigned char test_80_bit_pattern[ + (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 - + DP_TEST_80BIT_CUSTOM_PATTERN_7_0)+1] = {0}; + unsigned char test_pattern; + unsigned int offset; + + offset = DP_PHY_TEST_PATTERN; + + ret = drm_dp_dpcd_read(&phytium_dp->aux, offset, + &phytium_phy_tp.raw, + sizeof(phytium_phy_tp)); + if (ret <= 0) { + DRM_DEBUG_KMS("Could not read DP_TEST_PHY_PATTERN\n"); + goto failed; + } + + test_pattern = phytium_phy_tp.bits.PATTERN; + + if (test_pattern == PHYTIUM_PHY_TP_80BIT_CUSTOM) { + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_TEST_80BIT_CUSTOM_PATTERN_7_0, + test_80_bit_pattern, + sizeof(test_80_bit_pattern)); + if (ret <= 0) { + DRM_DEBUG_KMS("Could not read DP_TEST_PHY_PATTERN\n"); + goto failed; + } + } + + /* config source and sink's link rate and link count */ + ret = phytium_dp_dpcd_get_tp_link(phytium_dp, &phytium_dp->compliance.test_lane_count, + &phytium_dp->compliance.test_link_rate); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_get_tp_link fail: ret:%d\n", ret); + goto failed; + } + + phytium_dp_hw_set_link(phytium_dp, phytium_dp->compliance.test_lane_count, + phytium_dp->compliance.test_link_rate); + ret = phytium_dp_dpcd_set_link(phytium_dp, phytium_dp->compliance.test_lane_count, + phytium_dp->compliance.test_link_rate); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_link fail: ret:%d\n", ret); + goto failed_dpcd_set_link; + } + + /* config source and sink's lane setting: voltage swing and pre-emphasis */ + ret = phytium_dp_dpcd_get_adjust_request(phytium_dp, + phytium_dp->compliance.test_lane_count); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_get_adjust_request fail: ret:%d\n", ret); + goto failed_dpcd_get_adjust_request; + } + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->compliance.test_link_rate, + phytium_dp->train_set[0]); + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + goto failed_dpcd_set_lane_setting; + } + + /* config test pattern */ + phytium_dp_hw_set_test_pattern(phytium_dp, phytium_dp->compliance.test_lane_count, + test_pattern, test_80_bit_pattern, + sizeof(test_80_bit_pattern)); + ret = phytium_dp_dpcd_set_test_pattern(phytium_dp, test_pattern); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_test_pattern fail: ret:%d\n", ret); + goto failed_dpcd_set_tp; + } + + return DP_TEST_ACK; + +failed_dpcd_set_tp: + phytium_dp_hw_set_test_pattern(phytium_dp, phytium_dp->compliance.test_lane_count, + PHYTIUM_PHY_TP_NONE, test_80_bit_pattern, + sizeof(test_80_bit_pattern)); +failed_dpcd_set_link: +failed_dpcd_set_lane_setting: +failed_dpcd_get_adjust_request: +failed: + return DP_TEST_NAK; +} + +static void phytium_dp_handle_test_request(struct phytium_dp_device *phytium_dp) +{ + uint8_t response = DP_TEST_NAK; + uint8_t request = 0; + int status; + + status = drm_dp_dpcd_readb(&phytium_dp->aux, DP_TEST_REQUEST, &request); + if (status <= 0) { + DRM_DEBUG_KMS("Could not read test request from sink\n"); + goto update_status; + } + + switch (request) { + case DP_TEST_LINK_TRAINING: + case DP_TEST_LINK_VIDEO_PATTERN: + case DP_TEST_LINK_EDID_READ: + DRM_DEBUG_KMS("Not support test request '%02x'\n", request); + response = DP_TEST_NAK; + break; + case DP_TEST_LINK_PHY_TEST_PATTERN: + DRM_DEBUG_KMS("PHY_PATTERN test requested\n"); + response = phytium_dp_autotest_phy_pattern(phytium_dp); + break; + default: + DRM_DEBUG_KMS("Invalid test request '%02x'\n", request); + break; + } + +update_status: + status = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_TEST_RESPONSE, response); + if (status <= 0) + DRM_DEBUG_KMS("Could not write test response to sink\n"); + +} + +static int phytium_dp_long_pulse(struct drm_connector *connector, bool hpd_raw_state) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + enum drm_connector_status status = connector->status; + bool video_enable = false; + uint32_t index = 0; + + if (phytium_dp->is_edp) + status = connector_status_connected; + else if (hpd_raw_state) { + if (!phytium_dp_needs_link_retrain(phytium_dp)) { + status = connector_status_connected; + goto out; + } + } else { + status = connector_status_disconnected; + goto out; + } + + if (!phytium_dp->is_edp) { + status = phytium_dp_detect_dpcd(phytium_dp); + if (status == connector_status_disconnected) + goto out; + + index = phytium_dp->num_common_rates-1; + phytium_dp->max_link_rate = phytium_dp->common_rates[index]; + phytium_dp->max_link_lane_count = phytium_dp->common_max_lane_count; + phytium_dp->link_rate = phytium_dp->max_link_rate; + phytium_dp->link_lane_count = phytium_dp->max_link_lane_count; + DRM_DEBUG_KMS("common_max_lane_count: %d, common_max_rate:%d\n", + phytium_dp->max_link_lane_count, phytium_dp->max_link_rate); + + video_enable = phytium_dp_hw_video_is_enable(phytium_dp); + phytium_dp_start_link_train(phytium_dp); + + if (video_enable) { + mdelay(2); + phytium_dp_hw_enable_video(phytium_dp); + } + } + +out: + return status; +} + +static int phytium_dp_short_pulse(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + enum drm_connector_status status = connector->status; + u8 sink_irq_vector = 0; + bool video_enable = false; + + /* handle the test pattern */ + if (phytium_dp_get_sink_irq(phytium_dp, &sink_irq_vector) && + sink_irq_vector != 0) { + drm_dp_dpcd_writeb(&phytium_dp->aux, + DP_DEVICE_SERVICE_IRQ_VECTOR, + sink_irq_vector); + if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) + phytium_dp_handle_test_request(phytium_dp); + if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) + DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); + } + if (!phytium_dp_needs_link_retrain(phytium_dp)) { + status = connector_status_connected; + goto out; + } + + video_enable = phytium_dp_hw_video_is_enable(phytium_dp); + phytium_dp_start_link_train(phytium_dp); + if (video_enable) { + mdelay(2); + phytium_dp_hw_enable_video(phytium_dp); + } + +out: + return status; +} + +void phytium_dp_hpd_poll_handler(struct phytium_display_private *priv) +{ + struct drm_device *dev = priv->dev; + struct drm_connector_list_iter conn_iter; + struct drm_connector *connector; + enum drm_connector_status old_status; + bool changed = false; + + mutex_lock(&dev->mode_config.mutex); + DRM_DEBUG_KMS("running encoder hotplug poll functions\n"); + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->force) + continue; + old_status = connector->status; + connector->status = drm_helper_probe_detect(connector, NULL, false); + if (old_status != connector->status) { + const char *old, *new; + + old = drm_get_connector_status_name(old_status); + new = drm_get_connector_status_name(connector->status); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", + connector->base.id, + connector->name, + old, new); + changed = true; + } + } + drm_connector_list_iter_end(&conn_iter); + mutex_unlock(&dev->mode_config.mutex); + + if (changed) + drm_kms_helper_hotplug_event(dev); +} + +void phytium_dp_hpd_irq_setup(struct drm_device *dev, bool enable) +{ + struct phytium_dp_device *phytium_dp; + struct drm_encoder *encoder; + struct phytium_display_private *priv = dev->dev_private; + bool handler = false; + bool hpd_raw_state_old = false; + + /* We might have missed any hotplugs that happened, so polling and handler */ + if (enable) { + spin_lock_irq(&priv->hotplug_irq_lock); + + drm_for_each_encoder(encoder, dev) { + phytium_dp = encoder_to_dp_device(encoder); + if (!phytium_dp->dp_hpd_state.hpd_irq_enable) { + hpd_raw_state_old = phytium_dp->dp_hpd_state.hpd_raw_state; + phytium_dp_hw_get_hpd_state(phytium_dp); + if (phytium_dp->dp_hpd_state.hpd_event_state + || phytium_dp->dp_hpd_state.hpd_irq_state + || (hpd_raw_state_old != phytium_dp->dp_hpd_state.hpd_raw_state)) { + handler = true; + } + } + } + spin_unlock_irq(&priv->hotplug_irq_lock); + if (handler) + phytium_dp_hpd_poll_handler(priv); + } + + drm_for_each_encoder(encoder, dev) { + phytium_dp = encoder_to_dp_device(encoder); + phytium_dp_hw_hpd_irq_setup(phytium_dp, enable); + } +} + +void phytium_dp_hpd_work_func(struct work_struct *work) +{ + struct phytium_display_private *priv = + container_of(work, struct phytium_display_private, hotplug_work); + struct drm_device *dev = priv->dev; + struct drm_connector_list_iter conn_iter; + struct drm_connector *connector; + enum drm_connector_status old_status; + bool changed = false; + + mutex_lock(&dev->mode_config.mutex); + DRM_DEBUG_KMS("running encoder hotplug work functions\n"); + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->force) + continue; + old_status = connector->status; + connector->status = drm_helper_probe_detect(connector, NULL, false); + if (old_status != connector->status) { + const char *old, *new; + + old = drm_get_connector_status_name(old_status); + new = drm_get_connector_status_name(connector->status); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", + connector->base.id, + connector->name, + old, new); + changed = true; + } + } + drm_connector_list_iter_end(&conn_iter); + mutex_unlock(&dev->mode_config.mutex); + + if (changed) + drm_kms_helper_hotplug_event(dev); + + phytium_dp_hpd_irq_setup(dev, true); +} + +irqreturn_t phytium_dp_hpd_irq_handler(struct phytium_display_private *priv) +{ + struct drm_encoder *encoder = NULL; + struct phytium_dp_device *phytium_dp = NULL; + struct drm_device *dev = priv->dev; + bool handler = false; + + spin_lock(&priv->hotplug_irq_lock); + + drm_for_each_encoder(encoder, dev) { + phytium_dp = encoder_to_dp_device(encoder); + if (phytium_dp->dp_hpd_state.hpd_irq_enable) { + phytium_dp_hw_get_hpd_state(phytium_dp); + if (phytium_dp->dp_hpd_state.hpd_event_state + || phytium_dp->dp_hpd_state.hpd_irq_state) { + handler = true; + } + } + } + spin_unlock(&priv->hotplug_irq_lock); + + if (handler) { + phytium_dp_hpd_irq_setup(dev, false); + schedule_work(&priv->hotplug_work); + return IRQ_HANDLED; + } + return IRQ_NONE; +} + + +static void phytium_dp_fast_link_train_detect(struct phytium_dp_device *phytium_dp) +{ + phytium_dp->fast_train_support = !!(phytium_dp->dpcd[DP_MAX_DOWNSPREAD] + & DP_NO_AUX_HANDSHAKE_LINK_TRAINING); + DRM_DEBUG_KMS("fast link training %s\n", + phytium_dp->fast_train_support ? "supported" : "unsupported"); +} + +bool phytium_dp_fast_link_train(struct phytium_dp_device *phytium_dp) +{ + int ret = 0; + unsigned int training_pattern; + + /* clear the test pattern */ + phytium_dp_hw_set_test_pattern(phytium_dp, phytium_dp->link_lane_count, + PHYTIUM_PHY_TP_NONE, NULL, 0); + + /* config source and sink's link rate and lane count */ + phytium_dp_hw_set_link(phytium_dp, phytium_dp->link_lane_count, phytium_dp->link_rate); + + /* config source and sink's voltage swing and pre-emphasis(103-106) */ + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + + /* config train pattern */ + phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_1); + usleep_range(500, 600); + + training_pattern = phytium_dp_get_training_pattern(phytium_dp); + phytium_dp_hw_set_train_pattern(phytium_dp, training_pattern); + usleep_range(500, 600); + + phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_DISABLE); + + if (dc_fast_training_check) { + unsigned char link_status[DP_LINK_STATUS_SIZE]; + + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + return false; + } + + if (!drm_dp_clock_recovery_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("check clock recovery failed\n"); + return false; + } + + if (!drm_dp_channel_eq_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("check channel equalization failed\n"); + return false; + } + } + + return true; +} + +static enum drm_connector_status +phytium_connector_detect(struct drm_connector *connector, bool force) +{ + enum drm_connector_status status = connector->status; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + bool hpd_event_state, hpd_irq_state, hpd_raw_state; + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + bool plugged = true; + + spin_lock_irq(&priv->hotplug_irq_lock); + hpd_event_state = phytium_dp->dp_hpd_state.hpd_event_state; + hpd_irq_state = phytium_dp->dp_hpd_state.hpd_irq_state; + hpd_raw_state = phytium_dp->dp_hpd_state.hpd_raw_state; + phytium_dp->dp_hpd_state.hpd_event_state = false; + phytium_dp->dp_hpd_state.hpd_irq_state = false; + spin_unlock_irq(&priv->hotplug_irq_lock); + + if (hpd_event_state) + status = phytium_dp_long_pulse(connector, hpd_raw_state); + + if (hpd_irq_state) + status = phytium_dp_short_pulse(connector); + + if (status == connector_status_unknown) + status = connector_status_disconnected; + + if ((!phytium_dp->is_edp) && (!hpd_raw_state)) + status = connector_status_disconnected; + + if (connector->status != status) { + if ((status == connector_status_connected) && phytium_dp->has_audio) + plugged = true; + else + plugged = false; + + handle_plugged_change(phytium_dp, plugged); + } + + return status; +} + +static void +phytium_connector_destroy(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + drm_connector_cleanup(connector); + kfree(phytium_dp); +} + +static int +phytium_dp_connector_register(struct drm_connector *connector) +{ + int ret; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + phytium_dp_aux_init(phytium_dp); + if (phytium_dp->is_edp) { + phytium_edp_init_connector(phytium_dp); + ret = phytium_edp_backlight_device_register(phytium_dp); + if (ret) + DRM_ERROR("failed to register port(%d) backlight device(ret=%d)\n", + phytium_dp->port, ret); + } + + ret = phytium_debugfs_connector_add(connector); + if (ret) + DRM_ERROR("failed to register phytium connector debugfs(ret=%d)\n", ret); + + return 0; +} + +static void +phytium_dp_connector_unregister(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + if (phytium_dp->is_edp) { + phytium_edp_backlight_device_unregister(phytium_dp); + phytium_edp_fini_connector(phytium_dp); + } + drm_dp_aux_unregister(&phytium_dp->aux); +} + +static const struct drm_connector_funcs phytium_connector_funcs = { + .dpms = drm_helper_connector_dpms, + .detect = phytium_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = phytium_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .late_register = phytium_dp_connector_register, + .early_unregister = phytium_dp_connector_unregister, +}; + +static void phytium_dp_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted) +{ + struct phytium_dp_device *dp = encoder_to_dp_device(encoder); + + drm_mode_copy(&dp->mode, adjusted); +} + +static void phytium_edp_panel_poweron(struct phytium_dp_device *phytium_dp) +{ + phytium_panel_poweron(&phytium_dp->panel); +} + +static void phytium_edp_panel_poweroff(struct phytium_dp_device *phytium_dp) +{ + phytium_panel_poweroff(&phytium_dp->panel); +} + +static void phytium_edp_backlight_on(struct phytium_dp_device *phytium_dp) +{ + phytium_panel_enable_backlight(&phytium_dp->panel); +} + +static void phytium_edp_backlight_off(struct phytium_dp_device *phytium_dp) +{ + phytium_panel_disable_backlight(&phytium_dp->panel); +} + +static void phytium_encoder_disable(struct drm_encoder *encoder) +{ + struct phytium_dp_device *phytium_dp = encoder_to_dp_device(encoder); + + if (phytium_dp->is_edp) + phytium_edp_backlight_off(phytium_dp); + + phytium_dp_hw_disable_video(phytium_dp); + + mdelay(50); + + if (phytium_dp->is_edp) + phytium_edp_panel_poweroff(phytium_dp); +} + +void phytium_dp_adjust_link_train_parameter(struct phytium_dp_device *phytium_dp) +{ + struct drm_display_info *display_info = &phytium_dp->connector.display_info; + unsigned long link_bw, date_rate = 0, bs_limit, bs_request; + int rate = 0; + + bs_request = phytium_dp->mode.crtc_htotal/(phytium_dp->mode.crtc_clock/1000); + date_rate = (phytium_dp->mode.crtc_clock * display_info->bpc * 3)/8; + + for (;;) { + bs_limit = 8192 / (phytium_dp->link_rate/1000); + link_bw = phytium_dp->link_rate * phytium_dp->link_lane_count; + rate = 10 * date_rate / link_bw; + DRM_DEBUG_KMS("adjust link rate(%d), lane count(%d)\n", + phytium_dp->link_rate, phytium_dp->link_lane_count); + DRM_DEBUG_KMS("for crtc_clock(%d) bs_request(%ld) bs_limit(%ld) rate(%d)\n", + phytium_dp->mode.crtc_clock, bs_request, bs_limit, rate); + if ((link_dynamic_adjust && (bs_request < bs_limit) && rate < 10) || + ((!link_dynamic_adjust) && (rate < 10))) + break; + phytium_dp_get_link_train_fallback_values(phytium_dp); + } + + DRM_DEBUG_KMS("Try link training at Link Rate = %d, Lane count = %d\n", + phytium_dp->link_rate, phytium_dp->link_lane_count); +} + +static void phytium_encoder_enable(struct drm_encoder *encoder) +{ + struct phytium_dp_device *phytium_dp = encoder_to_dp_device(encoder); + int ret = 0; + + phytium_dp_hw_disable_video(phytium_dp); + + if (phytium_dp->is_edp) { + phytium_edp_panel_poweron(phytium_dp); + if (phytium_dp->fast_train_support) + phytium_dp_fast_link_train(phytium_dp); + else + ret = phytium_dp_start_link_train(phytium_dp); + mdelay(2); + phytium_dp_fast_link_train_detect(phytium_dp); + } else { + phytium_dp_adjust_link_train_parameter(phytium_dp); + ret = phytium_dp_start_link_train(phytium_dp); + mdelay(2); + } + + phytium_dp_hw_config_video(phytium_dp); + if (ret == 0) { + phytium_dp_hw_enable_video(phytium_dp); + if (phytium_dp->has_audio) + phytium_dp_hw_enable_audio(phytium_dp); + } + + if (phytium_dp->is_edp) + phytium_edp_backlight_on(phytium_dp); + +} + +enum drm_mode_status +phytium_encoder_mode_valid(struct drm_encoder *encoder, const struct drm_display_mode *mode) +{ + struct phytium_dp_device *phytium_dp = encoder_to_dp_device(encoder); + struct drm_display_info *display_info = &phytium_dp->connector.display_info; + unsigned int requested, actual; + + switch (display_info->bpc) { + case 10: + case 6: + case 8: + break; + default: + DRM_INFO("not support bpc(%d)\n", display_info->bpc); + display_info->bpc = 8; + break; + } + + if ((display_info->color_formats & DRM_COLOR_FORMAT_RGB444) == 0) { + DRM_INFO("not support color_format(%d)\n", display_info->color_formats); + display_info->color_formats = DRM_COLOR_FORMAT_RGB444; + } + + requested = mode->clock * display_info->bpc * 3 / 1000; + actual = phytium_dp->max_link_rate * phytium_dp->max_link_lane_count / 100; + actual = actual * 8 / 10; + if (requested >= actual) { + DRM_DEBUG_KMS("requested=%d, actual=%d, clock=%d\n", requested, actual, + mode->clock); + return MODE_CLOCK_HIGH; + } + + if (dc_fake_mode_enable && + (phytium_dp->native_mode.clock == mode->clock) && + (phytium_dp->native_mode.htotal == mode->htotal) && + (phytium_dp->native_mode.vtotal == mode->vtotal)) + return MODE_OK; + + if ((mode->hdisplay == 1600) && (mode->vdisplay == 900)) + return MODE_BAD_HVALUE; + + if ((mode->hdisplay == 1024) && (mode->clock > 78000)) + return MODE_BAD_HVALUE; + + if ((mode->hdisplay < 640) || (mode->vdisplay < 480)) + return MODE_BAD_HVALUE; + + return MODE_OK; +} + +static const struct drm_encoder_helper_funcs phytium_encoder_helper_funcs = { + .mode_set = phytium_dp_encoder_mode_set, + .disable = phytium_encoder_disable, + .enable = phytium_encoder_enable, + .mode_valid = phytium_encoder_mode_valid, +}; + +void phytium_dp_encoder_destroy(struct drm_encoder *encoder) +{ + struct phytium_dp_device *phytium_dp = encoder_to_dp_device(encoder); + + phytium_dp_audio_codec_fini(phytium_dp); + drm_encoder_cleanup(encoder); +} + +static const struct drm_encoder_funcs phytium_encoder_funcs = { + .destroy = phytium_dp_encoder_destroy, +}; + +static const struct dp_audio_n_m phytium_dp_audio_n_m[] = { + { 32000, 162000, 1024, 10125 }, + { 44100, 162000, 784, 5625 }, + { 48000, 162000, 512, 3375 }, + { 64000, 162000, 2048, 10125 }, + { 88200, 162000, 1568, 5625 }, + { 96000, 162000, 1024, 3375 }, + { 128000, 162000, 4096, 10125 }, + { 176400, 162000, 3136, 5625 }, + { 192000, 162000, 2048, 3375 }, + { 32000, 270000, 1024, 16875 }, + { 44100, 270000, 784, 9375 }, + { 48000, 270000, 512, 5625 }, + { 64000, 270000, 2048, 16875 }, + { 88200, 270000, 1568, 9375 }, + { 96000, 270000, 1024, 5625 }, + { 128000, 270000, 4096, 16875 }, + { 176400, 270000, 3136, 9375 }, + { 192000, 270000, 2048, 5625 }, + { 32000, 540000, 1024, 33750 }, + { 44100, 540000, 784, 18750 }, + { 48000, 540000, 512, 11250 }, + { 64000, 540000, 2048, 33750 }, + { 88200, 540000, 1568, 18750 }, + { 96000, 540000, 1024, 11250 }, + { 128000, 540000, 4096, 33750 }, + { 176400, 540000, 3136, 18750 }, + { 192000, 540000, 2048, 11250 }, + { 32000, 810000, 1024, 50625 }, + { 44100, 810000, 784, 28125 }, + { 48000, 810000, 512, 16875 }, + { 64000, 810000, 2048, 50625 }, + { 88200, 810000, 1568, 28125 }, + { 96000, 810000, 1024, 16875 }, + { 128000, 810000, 4096, 50625 }, + { 176400, 810000, 3136, 28125 }, + { 192000, 810000, 2048, 16875 }, +}; + +static int phytium_dp_audio_get_eld(struct device *dev, void *data, u8 *buf, size_t len) +{ + struct phytium_dp_device *phytium_dp = data; + + memcpy(buf, phytium_dp->connector.eld, min(sizeof(phytium_dp->connector.eld), len)); + + return 0; +} + +static int phytium_dp_audio_mute_stream(struct device *dev, void *data, bool enable, int direction) +{ + struct phytium_dp_device *phytium_dp = data; + + phytium_dp_hw_audio_digital_mute(phytium_dp, enable); + + return 0; +} + +const struct dp_audio_n_m *phytium_dp_audio_get_n_m(int link_rate, int sample_rate) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(phytium_dp_audio_n_m); i++) { + if (sample_rate == phytium_dp_audio_n_m[i].sample_rate + && link_rate == phytium_dp_audio_n_m[i].link_rate) + return &phytium_dp_audio_n_m[i]; + } + + return NULL; +} + +static int phytium_dp_audio_hw_params(struct device *dev, void *data, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params) +{ + struct phytium_dp_device *phytium_dp = data; + int ret = 0; + struct audio_info audio_info = { + .sample_width = params->sample_width, + .sample_rate = params->sample_rate, + .channels = params->channels, + }; + + if (daifmt->fmt != HDMI_I2S) { + DRM_ERROR("invalid audio format %d\n", daifmt->fmt); + ret = -EINVAL; + goto failed; + } + + ret = phytium_dp_hw_audio_hw_params(phytium_dp, audio_info); + +failed: + return ret; +} + +static void phytium_dp_audio_shutdown(struct device *dev, void *data) +{ + struct phytium_dp_device *phytium_dp = data; + + phytium_dp_hw_audio_shutdown(phytium_dp); +} + +static void handle_plugged_change(struct phytium_dp_device *phytium_dp, bool plugged) +{ + if (phytium_dp->plugged_cb && phytium_dp->codec_dev) + phytium_dp->plugged_cb(phytium_dp->codec_dev, plugged); +} + +static int phytium_dp_audio_hook_plugged_cb(struct device *dev, void *data, + hdmi_codec_plugged_cb fn, + struct device *codec_dev) +{ + struct phytium_dp_device *phytium_dp = data; + bool plugged; + + phytium_dp->plugged_cb = fn; + phytium_dp->codec_dev = codec_dev; + + if ((phytium_dp->connector.status == connector_status_connected) && phytium_dp->has_audio) + plugged = true; + else + plugged = false; + + handle_plugged_change(phytium_dp, plugged); + return 0; +} + + +static const struct hdmi_codec_ops phytium_audio_codec_ops = { + .hw_params = phytium_dp_audio_hw_params, + .audio_shutdown = phytium_dp_audio_shutdown, + .mute_stream = phytium_dp_audio_mute_stream, + .get_eld = phytium_dp_audio_get_eld, + .hook_plugged_cb = phytium_dp_audio_hook_plugged_cb, +}; + +static int phytium_dp_audio_codec_init(struct phytium_dp_device *phytium_dp) +{ + struct device *dev = phytium_dp->dev->dev; + struct hdmi_codec_pdata codec_data = { + .i2s = 1, + .spdif = 0, + .ops = &phytium_audio_codec_ops, + .max_i2s_channels = 2, + .data = phytium_dp, + }; + + phytium_dp->audio_pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME, + codec_id, + &codec_data, sizeof(codec_data)); + if (!PTR_ERR_OR_ZERO(phytium_dp->audio_pdev)) + codec_id += 1; + + return PTR_ERR_OR_ZERO(phytium_dp->audio_pdev); +} + +static void phytium_dp_audio_codec_fini(struct phytium_dp_device *phytium_dp) +{ + + if (!PTR_ERR_OR_ZERO(phytium_dp->audio_pdev)) + platform_device_unregister(phytium_dp->audio_pdev); + phytium_dp->audio_pdev = NULL; + codec_id -= 1; +} + +static long phytium_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) +{ + struct phytium_dp_device *phytium_dp = container_of(aux, struct phytium_dp_device, aux); + long ret = 0; + + DRM_DEBUG_KMS("msg->size: 0x%lx\n", msg->size); + + if (WARN_ON(msg->size > 16)) + return -E2BIG; + + switch (msg->request & ~DP_AUX_I2C_MOT) { + case DP_AUX_NATIVE_WRITE: + case DP_AUX_I2C_WRITE: + case DP_AUX_I2C_WRITE_STATUS_UPDATE: + ret = phytium_dp_hw_aux_transfer_write(phytium_dp, msg); + DRM_DEBUG_KMS("aux write reply:0x%x ret:0x%lx\n", msg->reply, ret); + break; + case DP_AUX_NATIVE_READ: + case DP_AUX_I2C_READ: + ret = phytium_dp_hw_aux_transfer_read(phytium_dp, msg); + DRM_DEBUG_KMS("aux read ret:0x%lx\n", ret); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static void phytium_dp_aux_init(struct phytium_dp_device *phytium_dp) +{ + drm_dp_aux_init(&phytium_dp->aux); + phytium_dp->aux.name = kasprintf(GFP_KERNEL, "dp-%d", phytium_dp->port); + phytium_dp->aux.transfer = phytium_dp_aux_transfer; +} + +int phytium_get_encoder_crtc_mask(struct phytium_dp_device *phytium_dp, int port) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int i, mask = 0; + + for_each_pipe_masked(priv, i) { + if (i != port) + mask++; + else + break; + } + + return BIT(mask); +} + +static bool phytium_dp_is_edp(struct phytium_dp_device *phytium_dp, int port) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + + if (priv->info.edp_mask & BIT(port)) + return true; + else + return false; +} + +static bool phytium_edp_init_connector(struct phytium_dp_device *phytium_dp) +{ + enum drm_connector_status status; + struct drm_connector *connector = &phytium_dp->connector; + + phytium_edp_panel_poweron(phytium_dp); + + status = phytium_dp_detect_dpcd(phytium_dp); + if (status == connector_status_disconnected) { + DRM_ERROR("detect edp dpcd failed\n"); + return false; + } + + phytium_dp->edp_edid = drm_get_edid(connector, &phytium_dp->aux.ddc); + if (!phytium_dp->edp_edid) { + DRM_ERROR("get edp edid failed\n"); + return false; + } + + connector->status = status; + phytium_dp->max_link_rate = phytium_dp->common_rates[phytium_dp->num_common_rates-1]; + phytium_dp->max_link_lane_count = phytium_dp->common_max_lane_count; + phytium_dp->link_rate = phytium_dp->max_link_rate; + phytium_dp->link_lane_count = phytium_dp->max_link_lane_count; + DRM_DEBUG_KMS("common_max_lane_count: %d, common_max_rate:%d\n", + phytium_dp->max_link_lane_count, phytium_dp->max_link_rate); + + return true; +} + +static void phytium_edp_fini_connector(struct phytium_dp_device *phytium_dp) +{ + kfree(phytium_dp->edp_edid); + + phytium_dp->edp_edid = NULL; + phytium_edp_panel_poweroff(phytium_dp); +} + +int phytium_dp_resume(struct drm_device *drm_dev) +{ + struct phytium_dp_device *phytium_dp; + struct drm_encoder *encoder; + int ret = 0; + + drm_for_each_encoder(encoder, drm_dev) { + phytium_dp = encoder_to_dp_device(encoder); + if (phytium_dp->is_edp) { + phytium_edp_backlight_off(phytium_dp); + phytium_edp_panel_poweroff(phytium_dp); + } + ret = phytium_dp_hw_init(phytium_dp); + if (ret) { + DRM_ERROR("failed to initialize dp %d\n", phytium_dp->port); + return -EIO; + } + } + + return 0; +} + +int phytium_dp_init(struct drm_device *dev, int port) +{ + struct phytium_display_private *priv = dev->dev_private; + struct phytium_dp_device *phytium_dp = NULL; + int ret, type; + + DRM_DEBUG_KMS("%s: port %d\n", __func__, port); + phytium_dp = kzalloc(sizeof(*phytium_dp), GFP_KERNEL); + if (!phytium_dp) { + ret = -ENOMEM; + goto failed_malloc_dp; + } + + phytium_dp->dev = dev; + phytium_dp->port = port; + + if (IS_PX210(priv)) { + px210_dp_func_register(phytium_dp); + priv->dp_reg_base[port] = PX210_DP_BASE(port); + priv->phy_access_base[port] = PX210_PHY_ACCESS_BASE(port); + } else if (IS_PE220X(priv)) { + pe220x_dp_func_register(phytium_dp); + priv->dp_reg_base[port] = PE220X_DP_BASE(port); + priv->phy_access_base[port] = PE220X_PHY_ACCESS_BASE(port); + } + + if (phytium_dp_is_edp(phytium_dp, port)) { + phytium_dp->is_edp = true; + type = DRM_MODE_CONNECTOR_eDP; + phytium_dp_panel_init_backlight_funcs(phytium_dp); + phytium_edp_backlight_off(phytium_dp); + phytium_edp_panel_poweroff(phytium_dp); + } else { + phytium_dp->is_edp = false; + type = DRM_MODE_CONNECTOR_DisplayPort; + } + + ret = phytium_dp_hw_init(phytium_dp); + if (ret) { + DRM_ERROR("failed to initialize dp %d\n", phytium_dp->port); + goto failed_init_dp; + } + + ret = drm_encoder_init(dev, &phytium_dp->encoder, + &phytium_encoder_funcs, + DRM_MODE_ENCODER_TMDS, "DP %d", port); + if (ret) { + DRM_ERROR("failed to initialize encoder with drm\n"); + goto failed_encoder_init; + } + drm_encoder_helper_add(&phytium_dp->encoder, &phytium_encoder_helper_funcs); + phytium_dp->encoder.possible_crtcs = phytium_get_encoder_crtc_mask(phytium_dp, port); + + phytium_dp->connector.dpms = DRM_MODE_DPMS_OFF; + phytium_dp->connector.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; + ret = drm_connector_init(dev, &phytium_dp->connector, &phytium_connector_funcs, + type); + if (ret) { + DRM_ERROR("failed to initialize connector with drm\n"); + goto failed_connector_init; + } + drm_connector_helper_add(&phytium_dp->connector, &phytium_connector_helper_funcs); + drm_connector_attach_encoder(&phytium_dp->connector, &phytium_dp->encoder); + + ret = phytium_dp_audio_codec_init(phytium_dp); + if (ret) { + DRM_ERROR("failed to initialize audio codec\n"); + goto failed_connector_init; + } + + phytium_dp->train_retry_count = 0; + INIT_WORK(&phytium_dp->train_retry_work, phytium_dp_train_retry_work_fn); + drm_connector_register(&phytium_dp->connector); + + return 0; +failed_connector_init: +failed_encoder_init: +failed_init_dp: + kfree(phytium_dp); +failed_malloc_dp: + return ret; +} diff --git a/drivers/gpu/drm/phytium/phytium_dp.h b/drivers/gpu/drm/phytium/phytium_dp.h new file mode 100644 index 000000000000..ada3f42a6868 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_dp.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_DP_H__ +#define __PHYTIUM_DP_H__ + +#include +#include +#include +#include + +struct phytium_dp_device; + +#include "phytium_panel.h" + +struct audio_info { + int sample_rate; + int channels; + int sample_width; +}; + +struct dp_audio_n_m { + int sample_rate; + int link_rate; + u16 m; + u16 n; +}; + +struct phytium_dp_compliance { + unsigned long test_type; + uint32_t test_link_rate; + u8 test_lane_count; + bool test_active; + u8 reserve[2]; +}; + +struct phytium_dp_func { + uint8_t (*dp_hw_get_source_lane_count)(struct phytium_dp_device *phytium_dp); + int (*dp_hw_reset)(struct phytium_dp_device *phytium_dp); + bool (*dp_hw_spread_is_enable)(struct phytium_dp_device *phytium_dp); + int (*dp_hw_set_backlight)(struct phytium_dp_device *phytium_dp, uint32_t level); + uint32_t (*dp_hw_get_backlight)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_disable_backlight)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_enable_backlight)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_poweroff_panel)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_poweron_panel)(struct phytium_dp_device *phytium_dp); + int (*dp_hw_init_phy)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_set_phy_lane_setting)(struct phytium_dp_device *phytium_dp, + uint32_t link_rate, uint8_t train_set); + int (*dp_hw_set_phy_lane_and_rate)(struct phytium_dp_device *phytium_dp, + uint8_t link_lane_count, + uint32_t link_rate); +}; + +struct phytium_dp_hpd_state { + bool hpd_event_state; + bool hpd_irq_state; + bool hpd_raw_state; + bool hpd_irq_enable; +}; + +struct phytium_dp_device { + struct drm_device *dev; + struct drm_encoder encoder; + struct drm_connector connector; + int port; + struct drm_display_mode mode; + bool link_trained; + bool detect_done; + bool is_edp; + bool reserve0; + struct drm_dp_aux aux; + unsigned char dpcd[DP_RECEIVER_CAP_SIZE]; + uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]; + unsigned char downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; + unsigned char sink_count; + + int *source_rates; + int num_source_rates; + int sink_rates[DP_MAX_SUPPORTED_RATES]; + int num_sink_rates; + int common_rates[DP_MAX_SUPPORTED_RATES]; + int num_common_rates; + + int source_max_lane_count; + int sink_max_lane_count; + int common_max_lane_count; + + int max_link_rate; + int max_link_lane_count; + int link_rate; + int link_lane_count; + struct work_struct train_retry_work; + int train_retry_count; + uint32_t trigger_train_fail; + + unsigned char train_set[4]; + struct edid *edp_edid; + bool has_audio; + bool fast_train_support; + bool hw_spread_enable; + bool reserve[1]; + struct platform_device *audio_pdev; + struct audio_info audio_info; + hdmi_codec_plugged_cb plugged_cb; + struct device *codec_dev; + struct phytium_dp_compliance compliance; + struct phytium_dp_func *funcs; + struct phytium_dp_hpd_state dp_hpd_state; + + struct phytium_panel panel; + struct drm_display_mode native_mode; +}; + +union phytium_phy_tp { + struct { + /* DpcdPhyTestPatterns. This field is 2 bits for DP1.1 + * and 3 bits for DP1.2. + */ + uint8_t PATTERN :3; + uint8_t RESERVED :5; + } bits; + uint8_t raw; +}; + +/* PHY test patterns + * The order of test patterns follows DPCD register PHY_TEST_PATTERN (0x248) + */ +enum phytium_dpcd_phy_tp { + PHYTIUM_PHY_TP_NONE = 0, + PHYTIUM_PHY_TP_D10_2, + PHYTIUM_PHY_TP_SYMBOL_ERROR, + PHYTIUM_PHY_TP_PRBS7, + PHYTIUM_PHY_TP_80BIT_CUSTOM, + PHYTIUM_PHY_TP_CP2520_1, + PHYTIUM_PHY_TP_CP2520_2, + PHYTIUM_PHY_TP_CP2520_3, +}; +#define PHYTIUM_DP_AUDIO_ID (('P' << 24) + ('H' << 16) + ('Y' << 8)) +#define encoder_to_dp_device(x) container_of(x, struct phytium_dp_device, encoder) +#define connector_to_dp_device(x) container_of(x, struct phytium_dp_device, connector) +#define panel_to_dp_device(x) container_of(x, struct phytium_dp_device, panel) +#define train_retry_to_dp_device(x) container_of(x, struct phytium_dp_device, train_retry_work) +void phytium_phy_writel(struct phytium_dp_device *phytium_dp, uint32_t address, uint32_t data); +uint32_t phytium_phy_readl(struct phytium_dp_device *phytium_dp, uint32_t address); + +int phytium_dp_init(struct drm_device *dev, int pipe); +int phytium_dp_resume(struct drm_device *drm_dev); +void phytium_dp_hpd_irq_setup(struct drm_device *dev, bool enable); +irqreturn_t phytium_dp_hpd_irq_handler(struct phytium_display_private *priv); +void phytium_dp_hpd_work_func(struct work_struct *work); +const struct dp_audio_n_m *phytium_dp_audio_get_n_m(int link_rate, int sample_rate); +#endif /* __PHYTIUM_DP_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_fb.c b/drivers/gpu/drm/phytium/phytium_fb.c new file mode 100644 index 000000000000..879065964729 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_fb.c @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_fb.h" +#include "phytium_gem.h" + +static int +phytium_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file_priv, + unsigned int *handle) +{ + struct phytium_framebuffer *phytium_fb = to_phytium_framebuffer(fb); + + return drm_gem_handle_create(file_priv, &phytium_fb->phytium_gem_obj[0]->base, handle); +} + +static void phytium_fb_destroy(struct drm_framebuffer *fb) +{ + struct phytium_framebuffer *phytium_fb = to_phytium_framebuffer(fb); + int i, num_planes; + struct drm_gem_object *obj = NULL; + const struct drm_format_info *info; + + info = drm_format_info(fb->format->format); + num_planes = info ? info->num_planes : 1; + + for (i = 0; i < num_planes; i++) { + obj = &phytium_fb->phytium_gem_obj[i]->base; + if (obj) + drm_gem_object_put(obj); + } + + drm_framebuffer_cleanup(fb); + kfree(phytium_fb); +} + +static struct drm_framebuffer_funcs viv_fb_funcs = { + .create_handle = phytium_fb_create_handle, + .destroy = phytium_fb_destroy, +}; + +struct phytium_framebuffer * +phytium_fb_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd, + struct phytium_gem_object **phytium_gem_obj, unsigned int num_planes) +{ + struct phytium_framebuffer *phytium_fb; + int ret = 0, i; + + phytium_fb = kzalloc(sizeof(*phytium_fb), GFP_KERNEL); + if (!phytium_fb) + return ERR_PTR(-ENOMEM); + + drm_helper_mode_fill_fb_struct(dev, &phytium_fb->base, mode_cmd); + + ret = drm_framebuffer_init(dev, &phytium_fb->base, &viv_fb_funcs); + + if (ret) { + DRM_ERROR("Failed to initialize framebuffer: %d\n", ret); + kfree(phytium_fb); + return ERR_PTR(ret); + } + + for (i = 0; i < num_planes; i++) { + phytium_fb->phytium_gem_obj[i] = phytium_gem_obj[i]; + phytium_fb->base.obj[i] = &phytium_gem_obj[i]->base; + } + return phytium_fb; +} + +struct drm_framebuffer * +phytium_fb_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + int ret = 0, i, num_planes; + struct drm_gem_object *obj; + unsigned int hsub, vsub, size; + struct phytium_gem_object *phytium_gem_obj[PHYTIUM_FORMAT_MAX_PLANE] = {0}; + struct phytium_framebuffer *phytium_fb; + struct phytium_display_private *priv = dev->dev_private; + const struct drm_format_info *info; + + info = drm_format_info(mode_cmd->pixel_format); + hsub = info ? info->hsub : 1; + vsub = info ? info->vsub : 1; + num_planes = info ? info->num_planes : 1; + num_planes = min(num_planes, PHYTIUM_FORMAT_MAX_PLANE); + + for (i = 0; i < num_planes; i++) { + unsigned int height = mode_cmd->height / (i ? vsub : 1); + + size = height * mode_cmd->pitches[i] + mode_cmd->offsets[i]; + obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]); + if (!obj) { + DRM_ERROR("Failed to lookup GEM object\n"); + ret = -ENXIO; + goto error; + } + + if (obj->size < size) { + drm_gem_object_put(obj); + ret = -EINVAL; + goto error; + } + + phytium_gem_obj[i] = to_phytium_gem_obj(obj); + + ret = priv->dc_hw_fb_format_check(mode_cmd, i); + if (ret < 0) + goto error; + } + + phytium_fb = phytium_fb_alloc(dev, mode_cmd, phytium_gem_obj, i); + if (IS_ERR(phytium_fb)) { + DRM_DEBUG_KMS("phytium_fb_alloc failed\n"); + ret = PTR_ERR(phytium_fb); + goto error; + } + + return &phytium_fb->base; +error: + for (i--; i >= 0; i--) + drm_gem_object_put(&phytium_gem_obj[i]->base); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/phytium/phytium_fb.h b/drivers/gpu/drm/phytium/phytium_fb.h new file mode 100644 index 000000000000..e096aa30ccb5 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_fb.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_FB_H__ +#define __PHYTIUM_FB_H__ + +#include + +struct phytium_framebuffer { + struct drm_framebuffer base; + struct phytium_gem_object *phytium_gem_obj[PHYTIUM_FORMAT_MAX_PLANE]; +}; + +#define to_phytium_framebuffer(fb) container_of(fb, struct phytium_framebuffer, base) + +struct phytium_framebuffer *phytium_fb_alloc(struct drm_device *dev, + const struct drm_mode_fb_cmd2 *mode_cmd, + struct phytium_gem_object **phytium_gem_obj, + unsigned int num_planes); + +struct drm_framebuffer *phytium_fb_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_mode_fb_cmd2 *mode_cmd); +#endif /* __PHYTIUM_FB_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_fbdev.c b/drivers/gpu/drm/phytium/phytium_fbdev.c new file mode 100644 index 000000000000..e929ad281724 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_fbdev.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_gem.h" +#include "phytium_fb.h" + + +#define PHYTIUM_MAX_CONNECTOR 1 +#define helper_to_drm_private(x) container_of(x, struct phytium_display_private, fbdev_helper) + +static void phytium_fbdev_destroy(struct fb_info *info) +{ + struct drm_fb_helper *helper = info->par; + struct phytium_display_private *priv = helper_to_drm_private(helper); + + phytium_gem_free_object(&priv->fbdev_phytium_gem->base); +} + +static int phytium_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma) +{ + struct drm_fb_helper *helper = info->par; + struct phytium_display_private *priv = helper_to_drm_private(helper); + + return phytium_gem_mmap_obj(&priv->fbdev_phytium_gem->base, vma); +} + +static const struct fb_ops phytium_fbdev_ops = { + .owner = THIS_MODULE, + DRM_FB_HELPER_DEFAULT_OPS, + .fb_mmap = phytium_fbdev_mmap, + FB_DEFAULT_IOMEM_OPS, + .fb_destroy = phytium_fbdev_destroy, +}; + +static int +phytium_drm_fbdev_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) +{ + struct phytium_display_private *priv = helper_to_drm_private(helper); + struct drm_device *dev = helper->dev; + unsigned int bytes_per_pixel; + struct drm_mode_fb_cmd2 mode_cmd = {0}; + struct phytium_framebuffer *phytium_fb = NULL; + struct fb_info *fbi = NULL; + struct drm_framebuffer *fb = NULL; + size_t size = 0; + int ret = 0; + unsigned long offset; + + bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8); + mode_cmd.width = sizes->surface_width; + mode_cmd.height = sizes->surface_height; + mode_cmd.pitches[0] = ALIGN(sizes->surface_width * bytes_per_pixel, 128); + mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); + size = PAGE_ALIGN(mode_cmd.pitches[0] * mode_cmd.height); + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret < 0) { + DRM_ERROR("failed to get mutex lock\n"); + return ret; + } + + priv->fbdev_phytium_gem = phytium_gem_create_object(dev, size); + if (!priv->fbdev_phytium_gem) { + DRM_ERROR("failed to create gem object\n"); + return -ENOMEM; + } + mutex_unlock(&dev->struct_mutex); + + fbi = drm_fb_helper_alloc_info(helper); + if (IS_ERR(fbi)) { + DRM_DEV_ERROR(dev->dev, "Failed to create framebuffer info."); + ret = PTR_ERR(fbi); + goto out; + } + + phytium_fb = phytium_fb_alloc(dev, &mode_cmd, &priv->fbdev_phytium_gem, 1); + if (IS_ERR(phytium_fb)) { + DRM_DEV_ERROR(dev->dev, "Failed to alloc DRM framebuffer.\n"); + ret = PTR_ERR(phytium_fb); + goto out; + } + + helper->fb = &(phytium_fb->base); + fbi->par = helper; + fbi->fbops = &phytium_fbdev_ops; + + fb = helper->fb; + drm_fb_helper_fill_info(fbi, helper, sizes); + + offset = fbi->var.xoffset * bytes_per_pixel; + offset += fbi->var.yoffset * fb->pitches[0]; + fbi->screen_base = priv->fbdev_phytium_gem->vaddr + offset; + fbi->screen_size = priv->fbdev_phytium_gem->base.size; + fbi->fix.smem_len = priv->fbdev_phytium_gem->base.size; + DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%pa offset=%ld size=%zu\n", fb->width, fb->height, + fb->format->depth, &priv->fbdev_phytium_gem->iova, offset, size); + fbi->skip_vt_switch = true; + + return 0; +out: + phytium_gem_free_object(&priv->fbdev_phytium_gem->base); + return ret; +} + +static const struct drm_fb_helper_funcs phytium_drm_fb_helper_funcs = { + .fb_probe = phytium_drm_fbdev_create, +}; + +int phytium_drm_fbdev_init(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + struct drm_fb_helper *helper; + int ret; + + if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector) + return -EINVAL; + + helper = &priv->fbdev_helper; + drm_fb_helper_prepare(dev, helper, 32, &phytium_drm_fb_helper_funcs); + + ret = drm_fb_helper_init(dev, helper); + if (ret < 0) { + DRM_DEV_ERROR(dev->dev, "Failed to initialize drm fb helper -ret %d\n", ret); + return ret; + } + + ret = drm_fb_helper_initial_config(helper); + return 0; +} + +void phytium_drm_fbdev_fini(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + struct drm_fb_helper *helper; + + helper = &priv->fbdev_helper; + drm_fb_helper_unregister_info(helper); + + if (helper->fb) + drm_framebuffer_put(helper->fb); + + drm_fb_helper_fini(helper); +} diff --git a/drivers/gpu/drm/phytium/phytium_fbdev.h b/drivers/gpu/drm/phytium/phytium_fbdev.h new file mode 100644 index 000000000000..fe352557a4f9 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_fbdev.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef _PHYTIUM_FBDEV_H +#define _PHYTIUM_FBDEV_H + +int phytium_drm_fbdev_init(struct drm_device *dev); +void phytium_drm_fbdev_fini(struct drm_device *dev); + +#endif /* _PHYTIUM_FBDEV_H */ diff --git a/drivers/gpu/drm/phytium/phytium_gem.c b/drivers/gpu/drm/phytium/phytium_gem.c new file mode 100644 index 000000000000..f470f769dce6 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_gem.c @@ -0,0 +1,509 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_gem.h" + +#define VRAM_POOL_ALLOC_ORDER 12 + +int phytium_memory_pool_alloc(struct phytium_display_private *priv, void **pvaddr, + phys_addr_t *phys_addr, uint64_t size) +{ + unsigned long vaddr; + + vaddr = gen_pool_alloc(priv->memory_pool, size); + if (!vaddr) + return -ENOMEM; + + *phys_addr = gen_pool_virt_to_phys(priv->memory_pool, vaddr); + + *pvaddr = (void *)vaddr; + return 0; +} + +void phytium_memory_pool_free(struct phytium_display_private *priv, void *vaddr, uint64_t size) +{ + gen_pool_free(priv->memory_pool, (unsigned long)vaddr, size); +} + +int phytium_memory_pool_init(struct device *dev, struct phytium_display_private *priv) +{ + int ret = 0; + + priv->memory_pool = gen_pool_create(VRAM_POOL_ALLOC_ORDER, -1); + if (priv->memory_pool == NULL) { + DRM_ERROR("fail to create memory pool\n"); + ret = -1; + goto failed_create_pool; + } + + ret = gen_pool_add_virt(priv->memory_pool, (unsigned long)priv->pool_virt_addr, + priv->pool_phys_addr, priv->pool_size, -1); + if (ret) { + DRM_ERROR("fail to add vram pool\n"); + ret = -1; + goto failed_add_pool_virt; + } + + return 0; + +failed_add_pool_virt: + gen_pool_destroy(priv->memory_pool); + +failed_create_pool: + return ret; +} + +void phytium_memory_pool_fini(struct device *dev, struct phytium_display_private *priv) +{ + gen_pool_destroy(priv->memory_pool); +} + +struct sg_table * +phytium_gem_prime_get_sg_table(struct drm_gem_object *obj) +{ + struct phytium_gem_object *phytium_gem_obj = to_phytium_gem_obj(obj); + struct sg_table *sgt; + struct drm_device *dev = obj->dev; + int ret; + struct page *page = NULL; + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) { + DRM_DEBUG_KMS("malloc sgt fail\n"); + return ERR_PTR(-ENOMEM); + } + + if ((phytium_gem_obj->memory_type == MEMORY_TYPE_VRAM) || + (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_CARVEOUT)) { + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); + if (ret) { + DRM_ERROR("failed to allocate sg\n"); + goto sgt_free; + } + page = phys_to_page(phytium_gem_obj->phys_addr); + sg_set_page(sgt->sgl, page, PAGE_ALIGN(phytium_gem_obj->size), 0); + } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_UNIFIED) { + ret = dma_get_sgtable_attrs(dev->dev, sgt, phytium_gem_obj->vaddr, + phytium_gem_obj->iova, phytium_gem_obj->size, + DMA_ATTR_WRITE_COMBINE); + if (ret) { + DRM_ERROR("failed to allocate sgt, %d\n", ret); + goto sgt_free; + } + } + + return sgt; +sgt_free: + kfree(sgt); + return ERR_PTR(ret); +} + +struct drm_gem_object * +phytium_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt) +{ + struct phytium_gem_object *phytium_gem_obj = NULL; + struct scatterlist *s; + dma_addr_t expected; + int ret, i; + + phytium_gem_obj = kzalloc(sizeof(*phytium_gem_obj), GFP_KERNEL); + if (!phytium_gem_obj) { + DRM_ERROR("failed to allocate phytium_gem_obj\n"); + ret = -ENOMEM; + goto failed_malloc; + } + + ret = drm_gem_object_init(dev, &phytium_gem_obj->base, attach->dmabuf->size); + if (ret) { + DRM_ERROR("failed to initialize drm gem object: %d\n", ret); + goto failed_object_init; + } + + expected = sg_dma_address(sgt->sgl); + for_each_sg(sgt->sgl, s, sgt->nents, i) { + if (sg_dma_address(s) != expected) { + DRM_ERROR("sg_table is not contiguous"); + ret = -EINVAL; + goto failed_check_continue; + } + expected = sg_dma_address(s) + sg_dma_len(s); + } + + phytium_gem_obj->iova = sg_dma_address(sgt->sgl); + phytium_gem_obj->sgt = sgt; + + return &phytium_gem_obj->base; +failed_check_continue: + drm_gem_object_release(&phytium_gem_obj->base); +failed_object_init: + kfree(phytium_gem_obj); +failed_malloc: + return ERR_PTR(ret); +} + +int phytium_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) +{ + struct phytium_gem_object *phytium_obj = to_phytium_gem_obj(obj); + + iosys_map_set_vaddr(map, phytium_obj->vaddr); + + return 0; +} + +void phytium_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map) +{ + +} + +static void phytium_dma_callback(void *callback_param) +{ + struct completion *comp = callback_param; + + complete(comp); +} + +int phytium_dma_transfer(struct drm_device *drm_dev, int dev_to_mem, void *addr, + dma_addr_t iova, uint64_t size) +{ + struct phytium_display_private *priv = drm_dev->dev_private; + struct dma_chan *dma_chan = priv->dma_chan; + struct sg_table st; + struct scatterlist *sgl; + int ret = 0, timeout; + uint32_t nents, i; + struct dma_slave_config cfg = {0}; + struct dma_async_tx_descriptor *desc; + struct completion comp; + enum dma_data_direction dir; + size_t min = 0; + + nents = DIV_ROUND_UP(size, PAGE_SIZE); + ret = sg_alloc_table(&st, nents, GFP_KERNEL); + if (ret) { + DRM_ERROR("failed to allocate sg_table\n"); + ret = -ENOMEM; + goto failed_sg_alloc_table; + } + + for_each_sg(st.sgl, sgl, st.nents, i) { + min = min_t(size_t, size, PAGE_SIZE - offset_in_page(addr)); + sg_set_page(sgl, vmalloc_to_page(addr), min, offset_in_page(addr)); + addr += min; + size -= min; + } + + memset(&cfg, 0, sizeof(cfg)); + if (dev_to_mem) { + cfg.direction = DMA_DEV_TO_MEM; + cfg.src_addr = iova; + cfg.dst_addr = 0; + dir = DMA_FROM_DEVICE; + } else { + cfg.direction = DMA_MEM_TO_DEV; + cfg.src_addr = 0; + cfg.dst_addr = iova; + dir = DMA_TO_DEVICE; + } + + dmaengine_slave_config(dma_chan, &cfg); + + nents = dma_map_sg(dma_chan->device->dev, st.sgl, st.nents, dir); + if (!nents) { + DRM_DEV_ERROR(drm_dev->dev, "failed to dma_map_sg for dmaengine\n"); + ret = -EINVAL; + goto failed_dma_map_sg; + } + st.nents = nents; + dma_sync_sg_for_device(dma_chan->device->dev, st.sgl, st.nents, dir); + + sgl = st.sgl; + desc = dmaengine_prep_slave_sg(dma_chan, + st.sgl, + st.nents, + cfg.direction, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) { + DRM_DEV_ERROR(drm_dev->dev, "failed to dmaengine_prep_slave_sg\n"); + ret = -EINVAL; + goto failed_prep_slave_sg; + } + init_completion(&comp); + desc->callback = phytium_dma_callback; + desc->callback_param = ∁ + + dmaengine_submit(desc); + dma_async_issue_pending(dma_chan); + + timeout = wait_for_completion_timeout(&comp, 2 * HZ); + if (timeout == 0) { + DRM_DEV_ERROR(drm_dev->dev, "wait for dma callback timeout\n"); + ret = -EIO; + } + dma_sync_sg_for_cpu(dma_chan->device->dev, st.sgl, st.nents, dir); + +failed_prep_slave_sg: + dma_unmap_sg(dma_chan->device->dev, st.sgl, st.nents, dir); +failed_dma_map_sg: + sg_free_table(&st); +failed_sg_alloc_table: + return ret; +} + +int phytium_gem_suspend(struct drm_device *drm_dev) +{ + struct phytium_display_private *priv = drm_dev->dev_private; + struct phytium_gem_object *phytium_gem_obj = NULL; + int ret = 0; + + list_for_each_entry(phytium_gem_obj, &priv->gem_list_head, list) { + if (phytium_gem_obj->memory_type != MEMORY_TYPE_VRAM) + continue; + + phytium_gem_obj->vaddr_save = vmalloc(phytium_gem_obj->size); + if (!phytium_gem_obj->vaddr_save) + goto malloc_failed; + + if (priv->dma_inited) + ret = phytium_dma_transfer(drm_dev, 1, phytium_gem_obj->vaddr_save, + phytium_gem_obj->iova, phytium_gem_obj->size); + + if ((!priv->dma_inited) || ret) + memcpy(phytium_gem_obj->vaddr_save, phytium_gem_obj->vaddr, + phytium_gem_obj->size); + } + + return 0; +malloc_failed: + list_for_each_entry(phytium_gem_obj, &priv->gem_list_head, list) { + if (phytium_gem_obj->memory_type != MEMORY_TYPE_VRAM) + continue; + + if (phytium_gem_obj->vaddr_save) { + vfree(phytium_gem_obj->vaddr_save); + phytium_gem_obj->vaddr_save = NULL; + } + } + return -ENOMEM; +} + +void phytium_gem_resume(struct drm_device *drm_dev) +{ + struct phytium_display_private *priv = drm_dev->dev_private; + struct phytium_gem_object *phytium_gem_obj = NULL; + + list_for_each_entry(phytium_gem_obj, &priv->gem_list_head, list) { + if (phytium_gem_obj->memory_type != MEMORY_TYPE_VRAM) + continue; + + memcpy(phytium_gem_obj->vaddr, phytium_gem_obj->vaddr_save, phytium_gem_obj->size); + vfree(phytium_gem_obj->vaddr_save); + phytium_gem_obj->vaddr_save = NULL; + } +} + +void phytium_gem_free_object(struct drm_gem_object *obj) +{ + struct phytium_gem_object *phytium_gem_obj = to_phytium_gem_obj(obj); + struct drm_device *dev = obj->dev; + struct phytium_display_private *priv = dev->dev_private; + uint64_t size = phytium_gem_obj->size; + + DRM_DEBUG_KMS("free phytium_gem_obj iova:0x%pa size:0x%lx\n", + &phytium_gem_obj->iova, phytium_gem_obj->size); + if (phytium_gem_obj->vaddr) { + if (phytium_gem_obj->memory_type == MEMORY_TYPE_VRAM) { + phytium_memory_pool_free(priv, phytium_gem_obj->vaddr, size); + priv->mem_state[PHYTIUM_MEM_VRAM_ALLOC] -= size; + } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_CARVEOUT) { + dma_unmap_page(dev->dev, phytium_gem_obj->iova, size, DMA_TO_DEVICE); + phytium_memory_pool_free(priv, phytium_gem_obj->vaddr, size); + priv->mem_state[PHYTIUM_MEM_SYSTEM_CARVEOUT_ALLOC] -= size; + } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_UNIFIED) { + dma_free_attrs(dev->dev, size, phytium_gem_obj->vaddr, + phytium_gem_obj->iova, 0); + priv->mem_state[PHYTIUM_MEM_SYSTEM_UNIFIED_ALLOC] -= size; + } + list_del(&phytium_gem_obj->list); + } else if (obj->import_attach) + drm_prime_gem_destroy(obj, phytium_gem_obj->sgt); + drm_gem_object_release(obj); + kfree(phytium_gem_obj); +} + +int phytium_gem_mmap_obj(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + int ret = 0; + struct phytium_gem_object *phytium_gem_obj = to_phytium_gem_obj(obj); + unsigned long pfn = PHYS_PFN(phytium_gem_obj->phys_addr); + /* + * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the + * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map + * the whole buffer. + */ + vm_flags_clear(vma, VM_PFNMAP); + vma->vm_pgoff = 0; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + + if (phytium_gem_obj->memory_type == MEMORY_TYPE_VRAM) { + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + ret = remap_pfn_range(vma, vma->vm_start, pfn, + vma->vm_end - vma->vm_start, vma->vm_page_prot); + } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_CARVEOUT) { + ret = remap_pfn_range(vma, vma->vm_start, pfn, + vma->vm_end - vma->vm_start, vma->vm_page_prot); + } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_UNIFIED) { + ret = dma_mmap_attrs(obj->dev->dev, vma, phytium_gem_obj->vaddr, + phytium_gem_obj->iova, vma->vm_end - vma->vm_start, 0); + } + if (ret) + drm_gem_vm_close(vma); + + return ret; +} + +int phytium_gem_mmap(struct file *filp, struct vm_area_struct *vma) +{ + int ret = 0; + + ret = drm_gem_mmap(filp, vma); + if (ret < 0) + return ret; + + return phytium_gem_mmap_obj(vma->vm_private_data, vma); +} + +static const struct vm_operations_struct phytium_vm_ops = { + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static const struct drm_gem_object_funcs phytium_drm_gem_object_funcs = { + .free = phytium_gem_free_object, + .get_sg_table = phytium_gem_prime_get_sg_table, + .vmap = phytium_gem_prime_vmap, + .vunmap = phytium_gem_prime_vunmap, + .vm_ops = &phytium_vm_ops, +}; + +struct phytium_gem_object *phytium_gem_create_object(struct drm_device *dev, unsigned long size) +{ + struct phytium_gem_object *phytium_gem_obj = NULL; + struct phytium_display_private *priv = dev->dev_private; + struct page *page = NULL; + int ret = 0; + + phytium_gem_obj = kzalloc(sizeof(*phytium_gem_obj), GFP_KERNEL); + if (!phytium_gem_obj) { + DRM_ERROR("failed to allocate phytium_gem_obj\n"); + ret = -ENOMEM; + goto error; + } + + ret = drm_gem_object_init(dev, &phytium_gem_obj->base, size); + if (ret) { + DRM_ERROR("failed to initialize drm gem object: %d\n", ret); + goto failed_object_init; + } + + if (priv->support_memory_type & MEMORY_TYPE_VRAM) { + ret = phytium_memory_pool_alloc(priv, &phytium_gem_obj->vaddr, + &phytium_gem_obj->phys_addr, size); + if (ret) { + DRM_ERROR("fail to allocate vram buffer with size %lx\n", size); + goto failed_dma_alloc; + } + phytium_gem_obj->iova = phytium_gem_obj->phys_addr; + phytium_gem_obj->memory_type = MEMORY_TYPE_VRAM; + priv->mem_state[PHYTIUM_MEM_VRAM_ALLOC] += size; + } else if (priv->support_memory_type & MEMORY_TYPE_SYSTEM_CARVEOUT) { + ret = phytium_memory_pool_alloc(priv, &phytium_gem_obj->vaddr, + &phytium_gem_obj->phys_addr, size); + if (ret) { + DRM_ERROR("fail to allocate carveout memory with size %lx\n", size); + goto failed_dma_alloc; + } + page = phys_to_page(phytium_gem_obj->phys_addr); + phytium_gem_obj->iova = dma_map_page(dev->dev, page, 0, size, DMA_TO_DEVICE); + if (dma_mapping_error(dev->dev, phytium_gem_obj->iova)) { + DRM_ERROR("fail to dma map carveout memory with size %lx\n", size); + phytium_memory_pool_free(priv, phytium_gem_obj->vaddr, size); + ret = -ENOMEM; + goto failed_dma_alloc; + } + phytium_gem_obj->memory_type = MEMORY_TYPE_SYSTEM_CARVEOUT; + priv->mem_state[PHYTIUM_MEM_SYSTEM_CARVEOUT_ALLOC] += size; + } else if (priv->support_memory_type & MEMORY_TYPE_SYSTEM_UNIFIED) { + phytium_gem_obj->vaddr = dma_alloc_attrs(dev->dev, size, &phytium_gem_obj->iova, + GFP_KERNEL, 0); + if (!phytium_gem_obj->vaddr) { + DRM_ERROR("fail to allocate unified buffer with size %lx\n", size); + ret = -ENOMEM; + goto failed_dma_alloc; + } + phytium_gem_obj->memory_type = MEMORY_TYPE_SYSTEM_UNIFIED; + priv->mem_state[PHYTIUM_MEM_SYSTEM_UNIFIED_ALLOC] += size; + } else { + DRM_ERROR("fail to allocate buffer with size %lx\n", size); + ret = -ENOMEM; + goto failed_dma_alloc; + } + + phytium_gem_obj->base.funcs = &phytium_drm_gem_object_funcs; + + phytium_gem_obj->size = size; + list_add_tail(&phytium_gem_obj->list, &priv->gem_list_head); + DRM_DEBUG_KMS("phytium_gem_obj iova:0x%pa size:0x%lx\n", + &phytium_gem_obj->iova, phytium_gem_obj->size); + return phytium_gem_obj; + +failed_dma_alloc: + drm_gem_object_put(&phytium_gem_obj->base); + + return ERR_PTR(ret); +failed_object_init: + kfree(phytium_gem_obj); +error: + return ERR_PTR(ret); +} + +int phytium_gem_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + int size = 0; + struct phytium_gem_object *phytium_gem_obj = NULL; + int ret = 0; + + args->pitch = ALIGN(args->width*DIV_ROUND_UP(args->bpp, 8), 128); + args->size = args->pitch * args->height; + size = PAGE_ALIGN(args->size); + phytium_gem_obj = phytium_gem_create_object(dev, size); + if (IS_ERR(phytium_gem_obj)) + return PTR_ERR(phytium_gem_obj); + ret = drm_gem_handle_create(file, &phytium_gem_obj->base, &args->handle); + if (ret) { + DRM_ERROR("failed to drm_gem_handle_create\n"); + goto failed_gem_handle; + } + drm_gem_object_put(&phytium_gem_obj->base); + + return 0; +failed_gem_handle: + phytium_gem_free_object(&phytium_gem_obj->base); + return ret; +} diff --git a/drivers/gpu/drm/phytium/phytium_gem.h b/drivers/gpu/drm/phytium/phytium_gem.h new file mode 100644 index 000000000000..17c438e6e63c --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_gem.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_GEM_H__ +#define __PHYTIUM_GEM_H__ + +#include + +struct phytium_gem_object { + struct drm_gem_object base; + phys_addr_t phys_addr; + dma_addr_t iova; + void *vaddr; + unsigned long size; + struct sg_table *sgt; + char memory_type; + char reserve[3]; + struct list_head list; + void *vaddr_save; +}; + +#define to_phytium_gem_obj(obj) container_of(obj, struct phytium_gem_object, base) + +int phytium_memory_pool_init(struct device *dev, struct phytium_display_private *priv); +void phytium_memory_pool_fini(struct device *dev, struct phytium_display_private *priv); +int phytium_gem_mmap_obj(struct drm_gem_object *obj, struct vm_area_struct *vma); +int phytium_gem_mmap(struct file *filp, struct vm_area_struct *vma); +void phytium_gem_free_object(struct drm_gem_object *obj); +struct sg_table *phytium_gem_prime_get_sg_table(struct drm_gem_object *obj); +struct drm_gem_object *phytium_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, struct sg_table *sgt); +void phytium_gem_free_object(struct drm_gem_object *obj); +int phytium_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, unsigned int handle); +struct phytium_gem_object *phytium_gem_create_object(struct drm_device *dev, unsigned long size); +int phytium_gem_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args); +int phytium_gem_suspend(struct drm_device *drm_dev); +void phytium_gem_resume(struct drm_device *drm_dev); +#endif /* __PHYTIUM_GEM_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_panel.c b/drivers/gpu/drm/phytium/phytium_panel.c new file mode 100644 index 000000000000..1cd266e868b3 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_panel.c @@ -0,0 +1,420 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_dp.h" +#include "phytium_panel.h" + +static int +phytium_dp_aux_set_backlight(struct phytium_panel *panel, unsigned int level) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + unsigned char vals[2] = { 0x0 }; + + vals[0] = level; + if (phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) { + vals[0] = (level & 0xFF00) >> 8; + vals[1] = (level & 0xFF); + } + + if (drm_dp_dpcd_write(&phytium_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, + vals, sizeof(vals)) < 0) { + DRM_DEBUG_KMS("Failed to write aux backlight level\n"); + return -EIO; + } + + return 0; +} + +static unsigned int phytium_dp_aux_get_backlight(struct phytium_panel *panel) +{ + unsigned char read_val[2] = { 0x0 }; + unsigned char level = 0; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + if (drm_dp_dpcd_read(&phytium_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, + &read_val, sizeof(read_val)) < 0) { + DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", + DP_EDP_BACKLIGHT_BRIGHTNESS_MSB); + return 0; + } + + level = read_val[0]; + if (phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) + level = (read_val[0] << 8 | read_val[1]); + + return level; +} + +static void set_aux_backlight_enable(struct phytium_panel *panel, bool enable) +{ + u8 reg_val = 0; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + if (!(phytium_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP)) + return; + + if (drm_dp_dpcd_readb(&phytium_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER, + ®_val) < 0) { + DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", + DP_EDP_DISPLAY_CONTROL_REGISTER); + return; + } + + if (enable) + reg_val |= DP_EDP_BACKLIGHT_ENABLE; + else + reg_val &= ~(DP_EDP_BACKLIGHT_ENABLE); + + if (drm_dp_dpcd_writeb(&phytium_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER, + reg_val) != 1) { + DRM_DEBUG_KMS("Failed to %s aux backlight\n", + enable ? "enable" : "disable"); + } +} + +static void phytium_dp_aux_enable_backlight(struct phytium_panel *panel) +{ + unsigned char dpcd_buf, new_dpcd_buf, edp_backlight_mode; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + if (drm_dp_dpcd_readb(&phytium_dp->aux, + DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) { + DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", + DP_EDP_BACKLIGHT_MODE_SET_REGISTER); + return; + } + + new_dpcd_buf = dpcd_buf; + edp_backlight_mode = dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK; + + switch (edp_backlight_mode) { + case DP_EDP_BACKLIGHT_CONTROL_MODE_PWM: + case DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET: + case DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT: + new_dpcd_buf &= ~DP_EDP_BACKLIGHT_CONTROL_MODE_MASK; + new_dpcd_buf |= DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD; + break; + + /* Do nothing when it is already DPCD mode */ + case DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD: + default: + break; + } + + if (new_dpcd_buf != dpcd_buf) { + if (drm_dp_dpcd_writeb(&phytium_dp->aux, + DP_EDP_BACKLIGHT_MODE_SET_REGISTER, new_dpcd_buf) < 0) { + DRM_DEBUG_KMS("Failed to write aux backlight mode\n"); + } + } + + set_aux_backlight_enable(panel, true); + phytium_dp_aux_set_backlight(panel, panel->level); +} + +static void phytium_dp_aux_disable_backlight(struct phytium_panel *panel) +{ + set_aux_backlight_enable(panel, false); +} + +static void phytium_dp_aux_setup_backlight(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + if (phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) + phytium_dp->panel.max = 0xFFFF; + else + phytium_dp->panel.max = 0xFF; + + phytium_dp->panel.min = 0; + phytium_dp->panel.level = phytium_dp_aux_get_backlight(panel); + phytium_dp->panel.backlight_enabled = (phytium_dp->panel.level != 0); +} + +static void phytium_dp_hw_poweron_panel(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + phytium_dp->funcs->dp_hw_poweron_panel(phytium_dp); +} + +static void phytium_dp_hw_poweroff_panel(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + phytium_dp->funcs->dp_hw_poweroff_panel(phytium_dp); +} + +static int +phytium_dp_hw_set_backlight(struct phytium_panel *panel, uint32_t level) +{ + int ret; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + ret = phytium_dp->funcs->dp_hw_set_backlight(phytium_dp, level); + + return ret; +} + +static uint32_t phytium_dp_hw_get_backlight(struct phytium_panel *panel) +{ + uint32_t ret; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + ret = phytium_dp->funcs->dp_hw_get_backlight(phytium_dp); + + return ret; +} + +static void phytium_dp_hw_enable_backlight(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + phytium_dp->funcs->dp_hw_set_backlight(phytium_dp, phytium_dp->panel.level); + phytium_dp->funcs->dp_hw_enable_backlight(phytium_dp); +} + +static void phytium_dp_hw_disable_backlight(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + phytium_dp->funcs->dp_hw_disable_backlight(phytium_dp); +} + +static void phytium_dp_hw_setup_backlight(struct phytium_panel *panel) +{ + struct drm_device *dev = panel->dev; + struct phytium_display_private *priv = dev->dev_private; + + panel->max = priv->info.backlight_max; + panel->min = 0; + panel->level = phytium_dp_hw_get_backlight(panel); +} + +void phytium_dp_panel_init_backlight_funcs(struct phytium_dp_device *phytium_dp) +{ + if (phytium_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP && + (phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP) && + !(phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP)) { + DRM_DEBUG_KMS("AUX Backlight Control Supported!\n"); + phytium_dp->panel.setup_backlight = phytium_dp_aux_setup_backlight; + phytium_dp->panel.enable_backlight = phytium_dp_aux_enable_backlight; + phytium_dp->panel.disable_backlight = phytium_dp_aux_disable_backlight; + phytium_dp->panel.set_backlight = phytium_dp_aux_set_backlight; + phytium_dp->panel.get_backlight = phytium_dp_aux_get_backlight; + } else { + DRM_DEBUG_KMS("SE Backlight Control Supported!\n"); + phytium_dp->panel.setup_backlight = phytium_dp_hw_setup_backlight; + phytium_dp->panel.enable_backlight = phytium_dp_hw_enable_backlight; + phytium_dp->panel.disable_backlight = phytium_dp_hw_disable_backlight; + phytium_dp->panel.set_backlight = phytium_dp_hw_set_backlight; + phytium_dp->panel.get_backlight = phytium_dp_hw_get_backlight; + } + phytium_dp->panel.poweron = phytium_dp_hw_poweron_panel; + phytium_dp->panel.poweroff = phytium_dp_hw_poweroff_panel; + mutex_init(&phytium_dp->panel.panel_lock); + phytium_dp->panel.dev = phytium_dp->dev; + + /* Upper limits from eDP 1.3 spec */ + phytium_dp->panel.panel_power_up_delay = 210; /* t1_t3 */ + phytium_dp->panel.backlight_on_delay = 50; /* t7 */ + phytium_dp->panel.backlight_off_delay = 50; + phytium_dp->panel.panel_power_down_delay = 0; /* t10 */ + phytium_dp->panel.panel_power_cycle_delay = 510; /* t11 + t12 */ +} + +void phytium_dp_panel_release_backlight_funcs(struct phytium_dp_device *phytium_dp) +{ + phytium_dp->panel.setup_backlight = NULL; + phytium_dp->panel.enable_backlight = NULL; + phytium_dp->panel.disable_backlight = NULL; + phytium_dp->panel.set_backlight = NULL; + phytium_dp->panel.get_backlight = NULL; + phytium_dp->panel.poweron = NULL; + phytium_dp->panel.poweroff = NULL; +} + +void phytium_panel_enable_backlight(struct phytium_panel *panel) +{ + + if (panel->enable_backlight) { + mutex_lock(&panel->panel_lock); + msleep(panel->backlight_on_delay); + panel->enable_backlight(panel); + panel->backlight_enabled = true; + mutex_unlock(&panel->panel_lock); + } +} + +void phytium_panel_disable_backlight(struct phytium_panel *panel) +{ + if (panel->disable_backlight) { + mutex_lock(&panel->panel_lock); + panel->disable_backlight(panel); + panel->backlight_enabled = false; + msleep(panel->backlight_off_delay); + mutex_unlock(&panel->panel_lock); + } +} + +void phytium_panel_poweron(struct phytium_panel *panel) +{ + if (panel->poweron) { + mutex_lock(&panel->panel_lock); + panel->poweron(panel); + panel->power_enabled = true; + msleep(panel->panel_power_up_delay); + mutex_unlock(&panel->panel_lock); + } +} + +void phytium_panel_poweroff(struct phytium_panel *panel) +{ + if (panel->poweroff) { + mutex_lock(&panel->panel_lock); + msleep(panel->panel_power_down_delay); + panel->poweroff(panel); + panel->power_enabled = false; + mutex_unlock(&panel->panel_lock); + } +} + +static uint32_t phytium_scale(uint32_t source_val, + uint32_t source_min, uint32_t source_max, + uint32_t target_min, uint32_t target_max) +{ + uint64_t target_val; + + WARN_ON(source_min > source_max); + WARN_ON(target_min > target_max); + + /* defensive */ + source_val = clamp(source_val, source_min, source_max); + + /* avoid overflows */ + target_val = mul_u32_u32(source_val - source_min, target_max - target_min); + target_val = DIV_ROUND_CLOSEST_ULL(target_val, source_max - source_min); + target_val += target_min; + + return target_val; +} + +static inline uint32_t +phytium_scale_hw_to_user(struct phytium_panel *panel, uint32_t hw_level, uint32_t user_max) +{ + return phytium_scale(hw_level, panel->min, panel->max, + 0, user_max); +} + +static inline uint32_t +phytium_scale_user_to_hw(struct phytium_panel *panel, u32 user_level, u32 user_max) +{ + return phytium_scale(user_level, 0, user_max, + panel->min, panel->max); +} + +static int phytium_backlight_device_update_status(struct backlight_device *bd) +{ + struct phytium_panel *panel = bl_get_data(bd); + struct drm_device *dev = panel->dev; + uint32_t hw_level = 0; + int ret = 0; + + DRM_DEBUG_KMS("updating phytium_backlight, brightness=%d/%d\n", + bd->props.brightness, bd->props.max_brightness); + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + hw_level = phytium_scale_user_to_hw(panel, bd->props.brightness, bd->props.max_brightness); + + if ((panel->set_backlight) && (panel->backlight_enabled)) { + mutex_lock(&panel->panel_lock); + ret = panel->set_backlight(panel, hw_level); + panel->level = hw_level; + mutex_unlock(&panel->panel_lock); + } + drm_modeset_unlock(&dev->mode_config.connection_mutex); + + return ret; +} + +static int phytium_backlight_device_get_brightness(struct backlight_device *bd) +{ + struct phytium_panel *panel = bl_get_data(bd); + struct drm_device *dev = panel->dev; + uint32_t hw_level = 0; + int ret; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + if (panel->get_backlight && panel->backlight_enabled) { + mutex_lock(&panel->panel_lock); + hw_level = panel->get_backlight(panel); + panel->level = hw_level; + mutex_unlock(&panel->panel_lock); + } + drm_modeset_unlock(&dev->mode_config.connection_mutex); + ret = phytium_scale_hw_to_user(panel, hw_level, bd->props.max_brightness); + DRM_DEBUG_KMS("get phytium_backlight, brightness=%d/%d\n", + ret, bd->props.max_brightness); + + return ret; +} + +static const struct backlight_ops phytium_backlight_device_ops = { + .update_status = phytium_backlight_device_update_status, + .get_brightness = phytium_backlight_device_get_brightness, +}; + +int phytium_edp_backlight_device_register(struct phytium_dp_device *phytium_dp) +{ + struct backlight_properties props; + char bl_name[16]; + + if (phytium_dp->panel.setup_backlight) { + mutex_lock(&phytium_dp->panel.panel_lock); + phytium_dp->panel.setup_backlight(&phytium_dp->panel); + mutex_unlock(&phytium_dp->panel.panel_lock); + } else { + return -EINVAL; + } + + memset(&props, 0, sizeof(props)); + props.max_brightness = PHYTIUM_MAX_BL_LEVEL; + props.type = BACKLIGHT_RAW; + props.brightness = phytium_scale_hw_to_user(&phytium_dp->panel, phytium_dp->panel.level, + props.max_brightness); + snprintf(bl_name, sizeof(bl_name), "phytium_bl%d", phytium_dp->port); + + phytium_dp->panel.bl_device = + backlight_device_register(bl_name, + phytium_dp->connector.kdev, + &phytium_dp->panel, + &phytium_backlight_device_ops, + &props); + + if (IS_ERR(phytium_dp->panel.bl_device)) { + DRM_ERROR("Failed to register backlight: %ld\n", + PTR_ERR(phytium_dp->panel.bl_device)); + phytium_dp->panel.bl_device = NULL; + return -ENODEV; + } + + DRM_DEBUG_KMS("Connector %s backlight sysfs interface registered\n", + phytium_dp->connector.name); + + return 0; +} + +void phytium_edp_backlight_device_unregister(struct phytium_dp_device *phytium_dp) +{ + if (phytium_dp->panel.bl_device) { + backlight_device_unregister(phytium_dp->panel.bl_device); + phytium_dp->panel.bl_device = NULL; + } +} diff --git a/drivers/gpu/drm/phytium/phytium_panel.h b/drivers/gpu/drm/phytium/phytium_panel.h new file mode 100644 index 000000000000..f9e2c7e65896 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_panel.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_PANEL_H__ +#define __PHYTIUM_PANEL_H__ +#include "phytium_dp.h" + +#define PHYTIUM_MAX_BL_LEVEL 0xFF + +struct phytium_panel { + struct drm_device *dev; + bool backlight_enabled; + bool power_enabled; + bool reserve1[2]; + unsigned int min; + unsigned int level; + unsigned int max; + struct backlight_device *bl_device; + void (*setup_backlight)(struct phytium_panel *panel); + uint32_t (*get_backlight)(struct phytium_panel *panel); + int (*set_backlight)(struct phytium_panel *panel, uint32_t level); + void (*disable_backlight)(struct phytium_panel *panel); + void (*enable_backlight)(struct phytium_panel *panel); + void (*poweron)(struct phytium_panel *panel); + void (*poweroff)(struct phytium_panel *panel); + struct mutex panel_lock; + uint32_t panel_power_up_delay; + uint32_t backlight_on_delay; + uint32_t backlight_off_delay; + uint32_t panel_power_down_delay; + uint32_t panel_power_cycle_delay; +}; + +void phytium_dp_panel_init_backlight_funcs(struct phytium_dp_device *phytium_dp); +void phytium_panel_release_backlight_funcs(struct phytium_dp_device *phytium_dp); +int phytium_edp_backlight_device_register(struct phytium_dp_device *phytium_dp); +void phytium_edp_backlight_device_unregister(struct phytium_dp_device *phytium_dp); +void phytium_panel_enable_backlight(struct phytium_panel *panel); +void phytium_panel_disable_backlight(struct phytium_panel *panel); +void phytium_panel_poweron(struct phytium_panel *panel); +void phytium_panel_poweroff(struct phytium_panel *panel); + +#endif /* __PHYTIUM_PANEL_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_pci.c b/drivers/gpu/drm/phytium/phytium_pci.c new file mode 100644 index 000000000000..f93ab85395c5 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_pci.c @@ -0,0 +1,387 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_pci.h" +#include "phytium_dp.h" +#include "phytium_gem.h" +#include "px210_dc.h" +#include "px210_dp.h" +#include "pe220x_dc.h" +#include "pe220x_dp.h" + +int dc_msi_enable; +module_param(dc_msi_enable, int, 0644); +MODULE_PARM_DESC(dc_msi_enable, "Enable DC msi interrupt (0-disabled; 1-enabled; default-0)"); + +void phytium_pci_vram_hw_init(struct phytium_display_private *priv) +{ + struct phytium_pci_private *pci_priv = to_pci_priv(priv); + + pci_priv->dc_hw_vram_init(priv, priv->pool_phys_addr, priv->pool_size); +} + +int phytium_pci_vram_init(struct pci_dev *pdev, struct phytium_display_private *priv) +{ + int ret = 0; + + priv->pool_phys_addr = pci_resource_start(pdev, 2); + priv->pool_size = pci_resource_len(pdev, 2); + if ((priv->pool_phys_addr != 0) && (priv->pool_size != 0)) { + priv->pool_virt_addr = devm_ioremap_wc(&pdev->dev, priv->pool_phys_addr, + priv->pool_size); + if (priv->pool_virt_addr == NULL) { + DRM_ERROR("pci vram ioremap fail, addr:0x%llx, size:0x%llx\n", + priv->pool_phys_addr, priv->pool_size); + ret = -EINVAL; + goto failed_ioremap; + } + ret = phytium_memory_pool_init(&pdev->dev, priv); + if (ret) + goto failed_init_memory_pool; + + priv->mem_state[PHYTIUM_MEM_VRAM_TOTAL] = priv->pool_size; + priv->support_memory_type = MEMORY_TYPE_VRAM; + priv->vram_hw_init = phytium_pci_vram_hw_init; + } else { + DRM_DEBUG_KMS("not support vram\n"); + priv->pool_virt_addr = NULL; + priv->mem_state[PHYTIUM_MEM_VRAM_TOTAL] = 0; + priv->support_memory_type = MEMORY_TYPE_SYSTEM_UNIFIED; + priv->vram_hw_init = NULL; + } + + return 0; + +failed_init_memory_pool: + devm_iounmap(&pdev->dev, priv->pool_virt_addr); +failed_ioremap: + return ret; +} + +void phytium_pci_vram_fini(struct pci_dev *pdev, struct phytium_display_private *priv) +{ + if (priv->support_memory_type == MEMORY_TYPE_VRAM) { + phytium_memory_pool_fini(&pdev->dev, priv); + devm_iounmap(&pdev->dev, priv->pool_virt_addr); + } +} + +static bool phytium_pci_dma_chan_filter(struct dma_chan *chan, void *param) +{ + struct phytium_dma_slave *s = param; + + if (s->dma_dev != chan->device->dev) + return false; + + if (s->chan_id == chan->chan_id) + return true; + else + return false; +} + +int phytium_pci_dma_init(struct phytium_display_private *priv) +{ + struct pci_dev *dma_dev, *gpu_dev; + struct drm_device *drm_dev = priv->dev; + dma_cap_mask_t mask; + struct phytium_dma_slave s; + int ret = 0; + u16 cmd; + + /* check px210 gpu enable */ + gpu_dev = pci_get_device(PCI_VENDOR_ID_PHYTIUM, 0xdc20, NULL); + if (!gpu_dev) { + DRM_INFO("failed to get gpu_dev\n"); + ret = -ENODEV; + goto failed; + } + + pci_read_config_word(gpu_dev, PCI_COMMAND, &cmd); + if (!(cmd & PCI_COMMAND_MASTER)) { + DRM_INFO("gpu_dev master is disabled\n"); + ret = -ENODEV; + goto failed; + } + + dma_dev = pci_get_device(PCI_VENDOR_ID_PHYTIUM, 0xdc3c, NULL); + if (!dma_dev) { + DRM_INFO("failed to get dma_dev\n"); + ret = -ENODEV; + goto failed; + } + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + s.dma_dev = &dma_dev->dev; + s.chan_id = 2; + priv->dma_chan = dma_request_channel(mask, phytium_pci_dma_chan_filter, &s); + if (!priv->dma_chan) { + DRM_DEV_ERROR(drm_dev->dev, "failed to request dma chan\n"); + ret = -EBUSY; + goto failed; + } + priv->dma_inited = 1; + +failed: + return ret; +} + +void phytium_pci_dma_fini(struct phytium_display_private *priv) +{ + if (priv->dma_inited) + dma_release_channel(priv->dma_chan); + priv->dma_inited = 0; + priv->dma_chan = NULL; +} + +static struct phytium_display_private* +phytium_pci_private_init(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = NULL; + struct phytium_pci_private *pci_priv = NULL; + struct phytium_device_info *phytium_info = (struct phytium_device_info *)ent->driver_data; + int i = 0; + resource_size_t io_addr, io_size; + + pci_priv = devm_kzalloc(&pdev->dev, sizeof(*pci_priv), GFP_KERNEL); + if (!pci_priv) { + DRM_ERROR("no memory to allocate for drm_display_private\n"); + goto failed_malloc_priv; + } + + memset(pci_priv, 0, sizeof(*pci_priv)); + priv = &pci_priv->base; + phytium_display_private_init(priv, dev); + + memcpy(&(priv->info), phytium_info, sizeof(struct phytium_device_info)); + DRM_DEBUG_KMS("priv->info.num_pipes :%d\n", priv->info.num_pipes); + priv->info.pipe_mask = ((pdev->subsystem_device >> PIPE_MASK_SHIFT) & PIPE_MASK_MASK); + priv->info.edp_mask = ((pdev->subsystem_device >> EDP_MASK_SHIFT) & EDP_MASK_MASK); + priv->info.num_pipes = 0; + for_each_pipe_masked(priv, i) + priv->info.num_pipes++; + if (priv->info.num_pipes == 0) { + DRM_ERROR("num_pipes is zero, so exit init\n"); + goto failed_init_numpipe; + } + + io_addr = pci_resource_start(pdev, 0); + io_size = pci_resource_len(pdev, 0); + priv->regs = ioremap(io_addr, io_size); + if (priv->regs == NULL) { + DRM_ERROR("pci bar0 ioremap fail, addr:0x%llx, size:0x%llx\n", io_addr, io_size); + goto failed_ioremap; + } + + priv->irq = pdev->irq; + if (IS_PX210(priv)) { + pci_priv->dc_hw_vram_init = px210_dc_hw_vram_init; + priv->dc_hw_clear_msi_irq = px210_dc_hw_clear_msi_irq; + priv->dc_hw_fb_format_check = px210_dc_hw_fb_format_check; + } else if (IS_PE220X(priv)) { + pci_priv->dc_hw_vram_init = pe220x_dc_hw_vram_init; + priv->dc_hw_clear_msi_irq = NULL; + priv->dc_hw_fb_format_check = pe220x_dc_hw_fb_format_check; + } + + return priv; + +failed_ioremap: +failed_init_numpipe: + devm_kfree(&pdev->dev, pci_priv); +failed_malloc_priv: + return NULL; +} + +static void +phytium_pci_private_fini(struct pci_dev *pdev, struct phytium_display_private *priv) +{ + struct phytium_pci_private *pci_priv = to_pci_priv(priv); + + if (priv->regs) + iounmap(priv->regs); + + devm_kfree(&pdev->dev, pci_priv); +} + +static int phytium_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct phytium_display_private *priv = NULL; + struct drm_device *dev = NULL; + int ret = 0; + + dev = drm_dev_alloc(&phytium_display_drm_driver, &pdev->dev); + if (IS_ERR(dev)) { + DRM_ERROR("failed to allocate drm_device\n"); + return PTR_ERR(dev); + } + pci_set_drvdata(pdev, dev); + pci_set_master(pdev); + ret = pci_enable_device(pdev); + if (ret) { + DRM_ERROR("pci enable device fail\n"); + goto failed_enable_device; + } + + if (dc_msi_enable) { + ret = pci_enable_msi(pdev); + if (ret) + DRM_ERROR("pci enable msi fail\n"); + } + + dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); + + priv = phytium_pci_private_init(pdev, ent); + if (priv) + dev->dev_private = priv; + else + goto failed_pci_private_init; + + ret = phytium_pci_vram_init(pdev, priv); + if (ret) { + DRM_ERROR("failed to init pci vram\n"); + goto failed_pci_vram_init; + } + + ret = drm_dev_register(dev, 0); + if (ret) { + DRM_ERROR("failed to register drm dev\n"); + goto failed_register_drm; + } + + phytium_dp_hpd_irq_setup(dev, true); + + return 0; + +failed_register_drm: + phytium_pci_vram_fini(pdev, priv); +failed_pci_vram_init: + phytium_pci_private_fini(pdev, priv); +failed_pci_private_init: + if (pdev->msi_enabled) + pci_disable_msi(pdev); + pci_disable_device(pdev); +failed_enable_device: + pci_set_drvdata(pdev, NULL); + drm_dev_put(dev); + + return -1; +} + +static void phytium_pci_remove(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = dev->dev_private; + + phytium_dp_hpd_irq_setup(dev, false); + cancel_work_sync(&priv->hotplug_work); + drm_dev_unregister(dev); + phytium_pci_vram_fini(pdev, priv); + phytium_pci_private_fini(pdev, priv); + if (pdev->msi_enabled) + pci_disable_msi(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + drm_dev_put(dev); +} + +static void phytium_pci_shutdown(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = dev->dev_private; + + priv->display_shutdown(dev); +} + +static int phytium_pci_pm_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = drm_dev->dev_private; + int ret = 0; + + if (IS_PX210(priv)) + phytium_pci_dma_init(priv); + + ret = priv->display_pm_suspend(drm_dev); + if (ret < 0) + goto out; + + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); + udelay(200); + +out: + return ret; +} + +static int phytium_pci_pm_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = drm_dev->dev_private; + int ret = 0; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + ret = pci_enable_device(pdev); + if (ret) + return ret; + pci_set_master(pdev); + + ret = priv->display_pm_resume(drm_dev); + if (IS_PX210(priv)) + phytium_pci_dma_fini(priv); + + return ret; +} + +static const struct dev_pm_ops phytium_pci_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytium_pci_pm_suspend, phytium_pci_pm_resume) +}; + +static const struct phytium_device_info px210_info = { + .platform_mask = BIT(PHYTIUM_PLATFORM_PX210), + .total_pipes = 3, + .crtc_clock_max = PX210_DC_PIX_CLOCK_MAX, + .hdisplay_max = PX210_DC_HDISPLAY_MAX, + .vdisplay_max = PX210_DC_VDISPLAY_MAX, + .address_mask = PX210_DC_ADDRESS_MASK, + .backlight_max = PX210_DP_BACKLIGHT_MAX, +}; + +static const struct phytium_device_info pe220x_info = { + .platform_mask = BIT(PHYTIUM_PLATFORM_PE220X), + .total_pipes = 2, + .crtc_clock_max = PE220X_DC_PIX_CLOCK_MAX, + .hdisplay_max = PE220X_DC_HDISPLAY_MAX, + .vdisplay_max = PE220X_DC_VDISPLAY_MAX, + .address_mask = PE220X_DC_ADDRESS_MASK, + .backlight_max = PE220X_DP_BACKLIGHT_MAX, +}; + +static const struct pci_device_id phytium_display_pci_ids[] = { + { PCI_VDEVICE(PHYTIUM, 0xdc22), (kernel_ulong_t)&px210_info }, + { PCI_VDEVICE(PHYTIUM, 0xdc3e), (kernel_ulong_t)&pe220x_info }, + { /* End: all zeroes */ } +}; +MODULE_DEVICE_TABLE(pci, phytium_display_pci_ids); + +struct pci_driver phytium_pci_driver = { + .name = "phytium_display_pci", + .id_table = phytium_display_pci_ids, + .probe = phytium_pci_probe, + .remove = phytium_pci_remove, + .shutdown = phytium_pci_shutdown, + .driver.pm = &phytium_pci_pm_ops, +}; diff --git a/drivers/gpu/drm/phytium/phytium_pci.h b/drivers/gpu/drm/phytium/phytium_pci.h new file mode 100644 index 000000000000..92b08fcb0452 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_pci.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_PCI_H__ +#define __PHYTIUM_PCI_H__ + +#include "phytium_display_drv.h" + +struct phytium_pci_private { + struct phytium_display_private base; + void (*dc_hw_vram_init)(struct phytium_display_private *priv, resource_size_t vram_addr, + resource_size_t vram_size); +}; + +struct phytium_dma_slave { + struct device *dma_dev; + u32 chan_id; +}; + +#define to_pci_priv(priv) container_of(priv, struct phytium_pci_private, base) + +extern struct pci_driver phytium_pci_driver; +#endif /* __PHYTIUM_PCI_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_plane.c b/drivers/gpu/drm/phytium/phytium_plane.c new file mode 100644 index 000000000000..9f35d57cd726 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_plane.c @@ -0,0 +1,632 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "phytium_display_drv.h" +#include "phytium_plane.h" +#include "phytium_fb.h" +#include "phytium_gem.h" +#include "phytium_crtc.h" +#include "px210_dc.h" +#include "pe220x_dc.h" +#include "phytium_reg.h" + +#define PHYTIUM_CURS_W_SIZE 32 +#define PHYTIUM_CURS_H_SIZE 32 + +void phytium_plane_destroy(struct drm_plane *plane) +{ + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + + drm_plane_cleanup(plane); + kfree(phytium_plane); +} + +/** + * phytium_plane_atomic_get_property - fetch plane property value + * @plane: plane to fetch property for + * @state: state containing the property value + * @property: property to look up + * @val: pointer to write property value into + * + * The DRM core does not store shadow copies of properties for + * atomic-capable drivers. This entrypoint is used to fetch + * the current value of a driver-specific plane property. + */ +static int +phytium_plane_atomic_get_property(struct drm_plane *plane, + const struct drm_plane_state *state, + struct drm_property *property, + uint64_t *val) +{ + DRM_DEBUG_KMS("Unknown plane property [PROP:%d:%s]\n", property->base.id, property->name); + return -EINVAL; +} + +/** + * phytium_plane_atomic_set_property - set plane property value + * @plane: plane to set property for + * @state: state to update property value in + * @property: property to set + * @val: value to set property to + * + * Writes the specified property value for a plane into the provided atomic + * state object. + * + * Returns 0 on success, -EINVAL on unrecognized properties + */ +int +phytium_plane_atomic_set_property(struct drm_plane *plane, + struct drm_plane_state *state, + struct drm_property *property, + uint64_t val) +{ + DRM_DEBUG_KMS("Unknown plane property [PROP:%d:%s]\n", property->base.id, property->name); + return -EINVAL; +} + +struct drm_plane_state * +phytium_plane_atomic_duplicate_state(struct drm_plane *plane) +{ + struct drm_plane_state *state = NULL; + struct phytium_plane_state *phytium_state = NULL; + + phytium_state = kmemdup(plane->state, sizeof(*phytium_state), GFP_KERNEL); + + if (!phytium_state) + return NULL; + + state = &phytium_state->base; + if (state->fb) + drm_framebuffer_get(state->fb); + + state->fence = NULL; + state->commit = NULL; + + return state; +} + +void +phytium_plane_atomic_destroy_state(struct drm_plane *plane, struct drm_plane_state *state) +{ + struct phytium_plane_state *phytium_state = to_phytium_plane_state(state); + + __drm_atomic_helper_plane_destroy_state(state); + kfree(phytium_state); +} + +const struct drm_plane_funcs phytium_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = phytium_plane_destroy, + .reset = drm_atomic_helper_plane_reset, + .atomic_get_property = phytium_plane_atomic_get_property, + .atomic_set_property = phytium_plane_atomic_set_property, + .atomic_duplicate_state = phytium_plane_atomic_duplicate_state, + .atomic_destroy_state = phytium_plane_atomic_destroy_state, +}; + +static int +phytium_plane_atomic_check(struct drm_plane *plane, struct drm_atomic_state *atomic_state) +{ + struct drm_plane_state *state = drm_atomic_get_new_plane_state(atomic_state, + plane); + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct drm_framebuffer *fb = state->fb; + struct drm_crtc *crtc = state->crtc; + struct drm_crtc_state *crtc_state; + int src_x, src_y, src_w, src_h; + unsigned long base_offset; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + + if ((!fb) || (!crtc)) + return 0; + + crtc_state = drm_atomic_get_crtc_state(state->state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + if (plane->type == DRM_PLANE_TYPE_CURSOR) { + src_w = state->src_w >> 16; + src_h = state->src_h >> 16; + if (phytium_crtc->scale_enable) + return -EINVAL; + if ((src_w != PHYTIUM_CURS_W_SIZE) || (src_h != PHYTIUM_CURS_W_SIZE)) { + DRM_INFO("Invalid cursor size(%d, %d)\n", src_w, src_h); + return -EINVAL; + } + } else if (plane->type == DRM_PLANE_TYPE_PRIMARY) { + src_x = state->src_x >> 16; + src_y = state->src_y >> 16; + src_w = state->src_w >> 16; + src_h = state->src_h >> 16; + + base_offset = src_x * fb->format->cpp[0] + src_y*fb->pitches[0]; + if (base_offset & (priv->info.address_mask)) { + DRM_ERROR("fb base address is not aligned by 0x%lx byte\n", + priv->info.address_mask); + return -EINVAL; + } + + if (src_w != state->crtc_w || src_h != state->crtc_h) { + DRM_ERROR("scale not support: crtc_w(0x%x)/h(0x%x) src_w(0x%x)/h(0x%x)\n", + state->crtc_w, state->crtc_h, src_w, src_h); + return -EINVAL; + } + + if ((state->crtc_x < 0) || (state->crtc_y < 0)) { + DRM_ERROR("crtc_x(0x%x)/y(0x%x) of drm plane state is invalid\n", + state->crtc_x, state->crtc_y); + return -EINVAL; + } + + if ((state->crtc_x + state->crtc_w > crtc_state->adjusted_mode.hdisplay) + || (state->crtc_y + state->crtc_h > crtc_state->adjusted_mode.vdisplay)) { + DRM_ERROR("plane out of crtc region\n"); + return -EINVAL; + } + } + + return 0; +} + +static void phytium_dc_get_plane_parameter(struct drm_plane *plane) +{ + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + struct drm_framebuffer *fb = plane->state->fb; + struct phytium_framebuffer *phytium_fb = to_phytium_framebuffer(fb); + struct phytium_gem_object *phytium_gem_obj = NULL; + int i, num_planes = 0; + const struct drm_format_info *info; + + info = drm_format_info(fb->format->format); + num_planes = info ? info->num_planes : 1; + + for (i = 0; i < num_planes; i++) { + phytium_gem_obj = phytium_fb->phytium_gem_obj[i]; + phytium_plane->iova[i] = phytium_gem_obj->iova + fb->offsets[i]; + phytium_plane->size[i] = phytium_gem_obj->size - fb->offsets[i]; + + if (fb->modifier == DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC) + phytium_plane->tiling[i] = FRAMEBUFFER_TILE_MODE0; + else if (fb->modifier == DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC) + phytium_plane->tiling[i] = FRAMEBUFFER_TILE_MODE3; + else if (fb->modifier == DRM_FORMAT_MOD_LINEAR) + phytium_plane->tiling[i] = FRAMEBUFFER_LINEAR; + else + phytium_plane->tiling[i] = FRAMEBUFFER_LINEAR; + + if (i == 0) { + switch (fb->format->format) { + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_BGRA1010102: + phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB2101010; + break; + + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB8888; + break; + + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_BGRX8888: + phytium_plane->format = FRAMEBUFFER_FORMAT_XRGB8888; + break; + + case DRM_FORMAT_ARGB4444: + case DRM_FORMAT_ABGR4444: + case DRM_FORMAT_RGBA4444: + case DRM_FORMAT_BGRA4444: + phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB4444; + break; + + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_XBGR4444: + case DRM_FORMAT_RGBX4444: + case DRM_FORMAT_BGRX4444: + phytium_plane->format = FRAMEBUFFER_FORMAT_XRGB4444; + break; + + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_BGRA5551: + phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB1555; + break; + + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_XBGR1555: + case DRM_FORMAT_RGBX5551: + case DRM_FORMAT_BGRX5551: + phytium_plane->format = FRAMEBUFFER_FORMAT_XRGB1555; + break; + + case DRM_FORMAT_RGB565: + case DRM_FORMAT_BGR565: + phytium_plane->format = FRAMEBUFFER_FORMAT_RGB565; + break; + + case DRM_FORMAT_YUYV: + phytium_plane->format = FRAMEBUFFER_FORMAT_YUYV; + break; + + case DRM_FORMAT_UYVY: + phytium_plane->format = FRAMEBUFFER_FORMAT_UYVY; + break; + case DRM_FORMAT_NV16: + phytium_plane->format = FRAMEBUFFER_FORMAT_NV16; + break; + case DRM_FORMAT_NV12: + phytium_plane->format = FRAMEBUFFER_FORMAT_NV12; + break; + case DRM_FORMAT_NV21: + phytium_plane->format = FRAMEBUFFER_FORMAT_NV12; + break; + default: + DRM_ERROR("unsupported pixel format (format = %d)\n", + fb->format->format); + return; + } + + switch (fb->format->format) { + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB4444: + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_RGB565: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_ARGB; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR4444: + case DRM_FORMAT_XBGR4444: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_XBGR1555: + case DRM_FORMAT_BGR565: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_ABGR; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_RGBA4444: + case DRM_FORMAT_RGBX4444: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_RGBX5551: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_RGBA; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + case DRM_FORMAT_BGRA1010102: + case DRM_FORMAT_BGRA8888: + case DRM_FORMAT_BGRX8888: + case DRM_FORMAT_BGRA4444: + case DRM_FORMAT_BGRX4444: + case DRM_FORMAT_BGRA5551: + case DRM_FORMAT_BGRX5551: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_BGRA; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + case DRM_FORMAT_YUYV: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV12: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_ARGB; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + default: + DRM_ERROR("unsupported pixel format (format = %d)\n", + fb->format->format); + return; + } + } + } +} + +static void phytium_dc_primary_plane_update(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + struct drm_framebuffer *fb = plane->state->fb; + int phys_pipe = phytium_plane->phys_pipe; + int src_x, src_y, crtc_x, crtc_y, crtc_w, crtc_h; + unsigned long base_offset; + int config; + + src_x = plane->state->src_x >> 16; + src_y = plane->state->src_y >> 16; + crtc_x = plane->state->crtc_x; + crtc_y = plane->state->crtc_y; + crtc_w = plane->state->crtc_w; + crtc_h = plane->state->crtc_h; + + if (phytium_plane->dc_hw_update_dcreq) + phytium_plane->dc_hw_update_dcreq(plane); + phytium_plane->dc_hw_update_primary_hi_addr(plane); + + /* config dc */ + /* Y */ + base_offset = src_x * fb->format->cpp[0] + src_y*fb->pitches[0]; + phytium_writel_reg(priv, (phytium_plane->iova[0] + base_offset) & ADDRESS_MASK, + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_Y_ADDRESS); + phytium_writel_reg(priv, ALIGN(fb->pitches[0], 128), + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_Y_STRIDE); + + /* U */ + phytium_writel_reg(priv, phytium_plane->iova[1] & 0xffffffff, + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_U_ADDRESS); + phytium_writel_reg(priv, ALIGN(fb->pitches[1], 128), + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_U_STRIDE); + + /* V */ + phytium_writel_reg(priv, phytium_plane->iova[2] & 0xffffffff, + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_V_ADDRESS); + phytium_writel_reg(priv, ALIGN(fb->pitches[2], 128), + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_V_STRIDE); + + /* size */ + phytium_writel_reg(priv, (crtc_w & WIDTH_MASK) | ((crtc_h&HEIGHT_MASK) << HEIGHT_SHIFT), + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_SIZE); + /* config */ + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + config &= ~(FRAMEBUFFER_FORMAT_MASK << FRAMEBUFFER_FORMAT_SHIFT); + config |= (phytium_plane->format << FRAMEBUFFER_FORMAT_SHIFT); + config &= ~(1 << FRAMEBUFFER_UVSWIZZLE_SHIFT); + config |= (phytium_plane->uv_swizzle << FRAMEBUFFER_UVSWIZZLE_SHIFT); + config &= ~(FRAMEBUFFER_SWIZZLE_MASK << FRAMEBUFFER_SWIZZLE_SHIFT); + config |= (phytium_plane->swizzle << FRAMEBUFFER_SWIZZLE_SHIFT); + config &= ~(FRAMEBUFFER_TILE_MODE_MASK << FRAMEBUFFER_TILE_MODE_SHIFT); + config |= (phytium_plane->tiling[0] << FRAMEBUFFER_TILE_MODE_SHIFT); + config &= (~FRAMEBUFFER_CLEAR); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); +} + +static void phytium_dc_cursor_plane_update(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + struct drm_framebuffer *fb = plane->state->fb; + int phys_pipe = phytium_plane->phys_pipe; + int config; + unsigned long iova; + + phytium_plane->enable = 1; + phytium_plane->cursor_hot_x = fb->hot_x; + phytium_plane->cursor_hot_y = fb->hot_y; + phytium_plane->cursor_x = plane->state->crtc_x + fb->hot_x; + phytium_plane->cursor_y = plane->state->crtc_y + fb->hot_y; + + config = CURSOR_FORMAT_ARGB8888 | + ((phytium_plane->cursor_hot_y & CURSOR_HOT_Y_MASK) << CURSOR_HOT_Y_SHIFT) | + ((phytium_plane->cursor_hot_x & CURSOR_HOT_X_MASK) << CURSOR_HOT_X_SHIFT); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], PHYTIUM_DC_CURSOR_CONFIG); + + config = ((phytium_plane->cursor_x & CURSOR_X_MASK) << CURSOR_X_SHIFT) | + ((phytium_plane->cursor_y & CURSOR_Y_MASK) << CURSOR_Y_SHIFT); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_CURSOR_LOCATION); + iova = phytium_plane->iova[0]; + phytium_writel_reg(priv, iova & 0xffffffff, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_CURSOR_ADDRESS); + if (phytium_plane->dc_hw_update_cursor_hi_addr) + phytium_plane->dc_hw_update_cursor_hi_addr(plane, iova); +} + +static void phytium_plane_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); + struct drm_framebuffer *fb, *old_fb; + + DRM_DEBUG_KMS("update plane: type=%d\n", plane->type); + if (!plane->state->crtc || !plane->state->fb) + return; + + fb = plane->state->fb; + old_fb = old_state->fb; + + if (fb) + drm_framebuffer_get(fb); + if (old_fb) + drm_framebuffer_put(old_fb); + + phytium_dc_get_plane_parameter(plane); + + if (plane->type == DRM_PLANE_TYPE_PRIMARY) + phytium_dc_primary_plane_update(plane); + else if (plane->type == DRM_PLANE_TYPE_CURSOR) + phytium_dc_cursor_plane_update(plane); +} + +static void phytium_plane_atomic_disable(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + int config; + struct drm_framebuffer *old_fb; + + old_fb = old_state->fb; + if (old_fb) + drm_framebuffer_put(old_fb); + + if (plane->type == DRM_PLANE_TYPE_PRIMARY) { + phytium_writel_reg(priv, CLEAR_VALUE_RED, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CLEARVALUE); + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + config |= FRAMEBUFFER_CLEAR; + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + } else if (plane->type == DRM_PLANE_TYPE_CURSOR) { + phytium_writel_reg(priv, CURSOR_FORMAT_DISABLED, + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_CURSOR_CONFIG); + } +} + +const struct drm_plane_helper_funcs phytium_plane_helper_funcs = { + .prepare_fb = drm_gem_plane_helper_prepare_fb, + .atomic_check = phytium_plane_atomic_check, + .atomic_update = phytium_plane_atomic_update, + .atomic_disable = phytium_plane_atomic_disable, +}; + +struct phytium_plane *phytium_primary_plane_create(struct drm_device *dev, int phys_pipe) +{ + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = NULL; + struct phytium_plane_state *phytium_plane_state = NULL; + int ret = 0; + unsigned int flags = 0; + const uint32_t *formats = NULL; + uint32_t format_count; + const uint64_t *format_modifiers; + + phytium_plane = kzalloc(sizeof(*phytium_plane), GFP_KERNEL); + if (!phytium_plane) { + ret = -ENOMEM; + goto failed_malloc_plane; + } + + phytium_plane_state = kzalloc(sizeof(*phytium_plane_state), GFP_KERNEL); + if (!phytium_plane_state) { + ret = -ENOMEM; + goto failed_malloc_plane_state; + } + phytium_plane_state->base.plane = &phytium_plane->base; + phytium_plane_state->base.rotation = DRM_MODE_ROTATE_0; + phytium_plane->base.state = &phytium_plane_state->base; + phytium_plane->phys_pipe = phys_pipe; + + if (IS_PX210(priv)) { + phytium_plane->dc_hw_plane_get_format = px210_dc_hw_plane_get_primary_format; + phytium_plane->dc_hw_update_dcreq = px210_dc_hw_update_dcreq; + phytium_plane->dc_hw_update_primary_hi_addr = px210_dc_hw_update_primary_hi_addr; + phytium_plane->dc_hw_update_cursor_hi_addr = NULL; + } else if (IS_PE220X(priv)) { + phytium_plane->dc_hw_plane_get_format = pe220x_dc_hw_plane_get_primary_format; + phytium_plane->dc_hw_update_dcreq = NULL; + phytium_plane->dc_hw_update_primary_hi_addr = pe220x_dc_hw_update_primary_hi_addr; + phytium_plane->dc_hw_update_cursor_hi_addr = NULL; + } + + phytium_plane->dc_hw_plane_get_format(&format_modifiers, &formats, &format_count); + ret = drm_universal_plane_init(dev, &phytium_plane->base, 0x0, + &phytium_plane_funcs, formats, + format_count, + format_modifiers, + DRM_PLANE_TYPE_PRIMARY, "primary %d", phys_pipe); + + if (ret) + goto failed_plane_init; + + flags = DRM_MODE_ROTATE_0; + drm_plane_create_rotation_property(&phytium_plane->base, DRM_MODE_ROTATE_0, flags); + drm_plane_helper_add(&phytium_plane->base, &phytium_plane_helper_funcs); + + return phytium_plane; +failed_plane_init: + kfree(phytium_plane_state); +failed_malloc_plane_state: + kfree(phytium_plane); +failed_malloc_plane: + return ERR_PTR(ret); +} + +struct phytium_plane *phytium_cursor_plane_create(struct drm_device *dev, int phys_pipe) +{ + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = NULL; + struct phytium_plane_state *phytium_plane_state = NULL; + int ret = 0; + unsigned int flags = 0; + const uint32_t *formats = NULL; + uint32_t format_count; + const uint64_t *format_modifiers; + + phytium_plane = kzalloc(sizeof(*phytium_plane), GFP_KERNEL); + if (!phytium_plane) { + ret = -ENOMEM; + goto failed_malloc_plane; + } + + phytium_plane_state = kzalloc(sizeof(*phytium_plane_state), GFP_KERNEL); + if (!phytium_plane_state) { + ret = -ENOMEM; + goto failed_malloc_plane_state; + } + phytium_plane_state->base.plane = &phytium_plane->base; + phytium_plane_state->base.rotation = DRM_MODE_ROTATE_0; + phytium_plane->base.state = &phytium_plane_state->base; + phytium_plane->phys_pipe = phys_pipe; + + if (IS_PX210(priv)) { + phytium_plane->dc_hw_plane_get_format = px210_dc_hw_plane_get_cursor_format; + phytium_plane->dc_hw_update_dcreq = NULL; + phytium_plane->dc_hw_update_primary_hi_addr = NULL; + phytium_plane->dc_hw_update_cursor_hi_addr = NULL; + } else if (IS_PE220X(priv)) { + phytium_plane->dc_hw_plane_get_format = pe220x_dc_hw_plane_get_cursor_format; + phytium_plane->dc_hw_update_dcreq = NULL; + phytium_plane->dc_hw_update_primary_hi_addr = NULL; + phytium_plane->dc_hw_update_cursor_hi_addr = pe220x_dc_hw_update_cursor_hi_addr; + } + + phytium_plane->dc_hw_plane_get_format(&format_modifiers, &formats, &format_count); + ret = drm_universal_plane_init(dev, &phytium_plane->base, 0x0, + &phytium_plane_funcs, + formats, format_count, + format_modifiers, + DRM_PLANE_TYPE_CURSOR, "cursor %d", phys_pipe); + + if (ret) + goto failed_plane_init; + + flags = DRM_MODE_ROTATE_0; + drm_plane_create_rotation_property(&phytium_plane->base, DRM_MODE_ROTATE_0, flags); + drm_plane_helper_add(&phytium_plane->base, &phytium_plane_helper_funcs); + + return phytium_plane; +failed_plane_init: + kfree(phytium_plane_state); +failed_malloc_plane_state: + kfree(phytium_plane); +failed_malloc_plane: + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/phytium/phytium_plane.h b/drivers/gpu/drm/phytium/phytium_plane.h new file mode 100644 index 000000000000..5527579b0348 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_plane.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_PLANE_H__ +#define __PHYTIUM_PLANE_H__ + +struct phytium_plane { + struct drm_plane base; + int phys_pipe; + unsigned long iova[PHYTIUM_FORMAT_MAX_PLANE]; + unsigned long size[PHYTIUM_FORMAT_MAX_PLANE]; + unsigned int format; + unsigned int tiling[PHYTIUM_FORMAT_MAX_PLANE]; + unsigned int swizzle; + unsigned int uv_swizzle; + unsigned int rot_angle; + + /* only for cursor */ + bool enable; + bool reserve[3]; + unsigned int cursor_x; + unsigned int cursor_y; + unsigned int cursor_hot_x; + unsigned int cursor_hot_y; + + void (*dc_hw_plane_get_format)(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); + void (*dc_hw_update_dcreq)(struct drm_plane *plane); + void (*dc_hw_update_primary_hi_addr)(struct drm_plane *plane); + void (*dc_hw_update_cursor_hi_addr)(struct drm_plane *plane, uint64_t iova); +}; + +struct phytium_plane_state { + struct drm_plane_state base; +}; + +#define to_phytium_plane(x) container_of(x, struct phytium_plane, base) +#define to_phytium_plane_state(x) container_of(x, struct phytium_plane_state, base) + +struct phytium_plane *phytium_primary_plane_create(struct drm_device *dev, int pipe); +struct phytium_plane *phytium_cursor_plane_create(struct drm_device *dev, int pipe); +#endif /* __PHYTIUM_PLANE_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_platform.c b/drivers/gpu/drm/phytium/phytium_platform.c new file mode 100644 index 000000000000..d28aadba7c30 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_platform.c @@ -0,0 +1,307 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium display engine DRM driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_platform.h" +#include "phytium_dp.h" +#include "phytium_gem.h" +#include "pe220x_dc.h" +#include "pe220x_dp.h" + +int phytium_platform_carveout_mem_init(struct platform_device *pdev, + struct phytium_display_private *priv) +{ + struct resource *res; + int ret = 0; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (res) { + priv->pool_size = resource_size(res); + priv->pool_phys_addr = res->start; + } + + if ((priv->pool_phys_addr != 0) && (priv->pool_size != 0)) { + priv->pool_virt_addr = ioremap_cache(priv->pool_phys_addr, priv->pool_size); + if (priv->pool_virt_addr == NULL) { + DRM_ERROR("failed to remap carveout mem(0x%llx)\n", priv->pool_phys_addr); + ret = -EINVAL; + goto failed_ioremap; + } + ret = phytium_memory_pool_init(&pdev->dev, priv); + if (ret) + goto failed_init_memory_pool; + + priv->mem_state[PHYTIUM_MEM_SYSTEM_CARVEOUT_TOTAL] = priv->pool_size; + priv->support_memory_type = MEMORY_TYPE_SYSTEM_CARVEOUT; + priv->vram_hw_init = NULL; + } else { + DRM_DEBUG_KMS("not support carveout memory\n"); + priv->mem_state[PHYTIUM_MEM_SYSTEM_CARVEOUT_TOTAL] = 0; + priv->support_memory_type = MEMORY_TYPE_SYSTEM_UNIFIED; + priv->vram_hw_init = NULL; + } + + return 0; + +failed_init_memory_pool: + iounmap(priv->pool_virt_addr); +failed_ioremap: + return ret; +} + +void phytium_platform_carveout_mem_fini(struct platform_device *pdev, + struct phytium_display_private *priv) +{ + if (priv->support_memory_type == MEMORY_TYPE_SYSTEM_CARVEOUT) { + phytium_memory_pool_fini(&pdev->dev, priv); + iounmap(priv->pool_virt_addr); + } +} + +static struct phytium_display_private * +phytium_platform_private_init(struct platform_device *pdev) +{ + struct drm_device *dev = dev_get_drvdata(&pdev->dev); + struct device_node *node; + struct fwnode_handle *np; + struct phytium_display_private *priv = NULL; + struct phytium_platform_private *platform_priv = NULL; + struct phytium_device_info *phytium_info = NULL; + int i = 0, ret = 0; + struct resource *res; + + platform_priv = devm_kzalloc(&pdev->dev, sizeof(*platform_priv), GFP_KERNEL); + if (!platform_priv) { + DRM_ERROR("no memory to allocate for phytium_platform_private\n"); + goto exit; + } + + memset(platform_priv, 0, sizeof(*platform_priv)); + priv = &platform_priv->base; + phytium_display_private_init(priv, dev); + + if (pdev->dev.of_node) { + phytium_info = (struct phytium_device_info *)of_device_get_match_data(&pdev->dev); + if (!phytium_info) { + DRM_ERROR("failed to get dts id data(phytium_info)\n"); + goto failed; + } + + memcpy(&(priv->info), phytium_info, sizeof(struct phytium_device_info)); + node = pdev->dev.of_node; + ret = of_property_read_u8(node, "pipe_mask", &priv->info.pipe_mask); + if (ret < 0) { + dev_err(&pdev->dev, "missing pipe_mask property from dts\n"); + goto failed; + } + + ret = of_property_read_u8(node, "edp_mask", &priv->info.edp_mask); + if (ret < 0) { + dev_err(&pdev->dev, "missing edp_mask property from dts\n"); + goto failed; + } + } else if (has_acpi_companion(&pdev->dev)) { + phytium_info = (struct phytium_device_info *)acpi_device_get_match_data(&pdev->dev); + if (!phytium_info) { + DRM_ERROR("failed to get acpi id data(phytium_info)\n"); + goto failed; + } + + memcpy(&(priv->info), phytium_info, sizeof(struct phytium_device_info)); + np = dev_fwnode(&(pdev->dev)); + ret = fwnode_property_read_u8(np, "pipe_mask", &priv->info.pipe_mask); + if (ret < 0) { + dev_err(&pdev->dev, "missing pipe_mask property from acpi\n"); + goto failed; + } + ret = fwnode_property_read_u8(np, "edp_mask", &priv->info.edp_mask); + if (ret < 0) { + dev_err(&pdev->dev, "missing edp_mask property from acpi\n"); + goto failed; + } + } + + priv->info.num_pipes = 0; + for_each_pipe_masked(priv, i) + priv->info.num_pipes++; + if (priv->info.num_pipes == 0) { + DRM_ERROR("num_pipes is zero, so exit init\n"); + goto failed; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->regs = devm_ioremap_resource(&pdev->dev, res); + if (priv->regs == NULL) { + DRM_ERROR("ioremap fail, addr:0x%llx, size:0x%llx\n", res->start, res->end); + goto failed; + } + + priv->irq = platform_get_irq(pdev, 0); + if (priv->irq < 0) { + dev_err(&pdev->dev, "failed to get irq\n"); + goto failed; + } + + if (IS_PE220X(priv)) { + priv->dc_hw_clear_msi_irq = NULL; + priv->dc_hw_fb_format_check = pe220x_dc_hw_fb_format_check; + } + + return priv; + +failed: + devm_kfree(&pdev->dev, platform_priv); +exit: + return NULL; +} + +static void phytium_platform_private_fini(struct platform_device *pdev) +{ + struct drm_device *dev = dev_get_drvdata(&pdev->dev); + struct phytium_display_private *priv = dev->dev_private; + struct phytium_platform_private *platform_priv = to_platform_priv(priv); + + devm_kfree(&pdev->dev, platform_priv); +} + +static int phytium_platform_probe(struct platform_device *pdev) +{ + struct phytium_display_private *priv = NULL; + struct drm_device *dev = NULL; + int ret = 0; + + dev = drm_dev_alloc(&phytium_display_drm_driver, &pdev->dev); + if (IS_ERR(dev)) { + DRM_ERROR("failed to allocate drm_device\n"); + return PTR_ERR(dev); + } + + dev_set_drvdata(&pdev->dev, dev); + dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); + + priv = phytium_platform_private_init(pdev); + if (priv) + dev->dev_private = priv; + else + goto failed_platform_private_init; + + ret = phytium_platform_carveout_mem_init(pdev, priv); + if (ret) { + DRM_ERROR("failed to init system carveout memory\n"); + goto failed_carveout_mem_init; + } + + ret = drm_dev_register(dev, 0); + if (ret) { + DRM_ERROR("failed to register drm dev\n"); + goto failed_register_drm; + } + + phytium_dp_hpd_irq_setup(dev, true); + + return 0; + +failed_register_drm: + phytium_platform_carveout_mem_fini(pdev, priv); +failed_carveout_mem_init: + phytium_platform_private_fini(pdev); +failed_platform_private_init: + dev_set_drvdata(&pdev->dev, NULL); + drm_dev_put(dev); + return -1; +} + +static int phytium_platform_remove(struct platform_device *pdev) +{ + struct drm_device *dev = dev_get_drvdata(&pdev->dev); + struct phytium_display_private *priv = dev->dev_private; + + phytium_dp_hpd_irq_setup(dev, false); + cancel_work_sync(&priv->hotplug_work); + drm_dev_unregister(dev); + phytium_platform_private_fini(pdev); + dev_set_drvdata(&pdev->dev, NULL); + drm_dev_put(dev); + + return 0; +} + +static void phytium_platform_shutdown(struct platform_device *pdev) +{ + struct drm_device *dev = dev_get_drvdata(&pdev->dev); + struct phytium_display_private *priv = dev->dev_private; + + priv->display_shutdown(dev); +} + +static int phytium_platform_pm_suspend(struct device *dev) +{ + struct drm_device *drm_dev = dev_get_drvdata(dev); + struct phytium_display_private *priv = drm_dev->dev_private; + + return priv->display_pm_suspend(drm_dev); +} + +static int phytium_platform_pm_resume(struct device *dev) +{ + struct drm_device *drm_dev = dev_get_drvdata(dev); + struct phytium_display_private *priv = drm_dev->dev_private; + + return priv->display_pm_resume(drm_dev); +} + +static const struct dev_pm_ops phytium_platform_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytium_platform_pm_suspend, phytium_platform_pm_resume) +}; + +static const struct phytium_device_info pe220x_info = { + .platform_mask = BIT(PHYTIUM_PLATFORM_PE220X), + .total_pipes = 2, + .crtc_clock_max = PE220X_DC_PIX_CLOCK_MAX, + .hdisplay_max = PE220X_DC_HDISPLAY_MAX, + .vdisplay_max = PE220X_DC_VDISPLAY_MAX, + .address_mask = PE220X_DC_ADDRESS_MASK, + .backlight_max = PE220X_DP_BACKLIGHT_MAX, +}; + +static const struct of_device_id display_of_match[] = { + { + .compatible = "phytium,dc", + .data = &pe220x_info, + }, + { } +}; + +#ifdef CONFIG_ACPI +static const struct acpi_device_id display_acpi_ids[] = { + { + .id = "PHYT0015", + .driver_data = (kernel_ulong_t)&pe220x_info, + }, + {}, +}; + +MODULE_DEVICE_TABLE(acpi, display_acpi_ids); +#else +#define display_acpi_ids NULL +#endif + +struct platform_driver phytium_platform_driver = { + .driver = { + .name = "phytium_display_platform", + .of_match_table = of_match_ptr(display_of_match), + .acpi_match_table = ACPI_PTR(display_acpi_ids), + }, + .probe = phytium_platform_probe, + .remove = phytium_platform_remove, + .shutdown = phytium_platform_shutdown, + .driver.pm = &phytium_platform_pm_ops, +}; diff --git a/drivers/gpu/drm/phytium/phytium_platform.h b/drivers/gpu/drm/phytium/phytium_platform.h new file mode 100644 index 000000000000..42f6570b476f --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_platform.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_PLATFORM_H__ +#define __PHYTIUM_PLATFORM_H__ + +struct phytium_platform_private { + struct phytium_display_private base; +}; + +#define to_platform_priv(priv) container_of(priv, struct phytium_platform_private, base) + +extern struct platform_driver phytium_platform_driver; + +#endif /* __PHYTIUM_PLATFORM_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_reg.h b/drivers/gpu/drm/phytium/phytium_reg.h new file mode 100644 index 000000000000..99ac9d4cb4d9 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_reg.h @@ -0,0 +1,365 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_REG_H__ +#define __PHYTIUM_REG_H__ + +/******************************register base******************************************/ +#define PX210_PIPE_BASE(pipe) (0x8000*pipe) +#define PX210_DC_BASE(pipe) (PX210_PIPE_BASE(pipe) + 0x0000) +#define PX210_DCREQ_BASE(pipe) (PX210_PIPE_BASE(pipe) + 0x2000) +#define PX210_DP_BASE(pipe) (PX210_PIPE_BASE(pipe) + 0x3000) +#define PX210_ADDRESS_TRANSFORM_BASE 0x4000 +#define PX210_PHY_ACCESS_BASE(pipe) (PX210_PIPE_BASE(pipe) + 0x5000) + +#define PE220X_DC_BASE(pipe) (0x1000*pipe) +#define PE220X_DP_BASE(pipe) (0x4000 + 0x1000*pipe) +#define PE220X_ADDRESS_TRANSFORM_BASE 0x8000 +#define PE220X_PHY_ACCESS_BASE(pipe) (0x6000 + 0x1000*pipe) +/******************************register base end******************************************/ + +/******************************dc register start******************************************/ +#define PHYTIUM_DC_FRAMEBUFFER_Y_ADDRESS 0x1400 + #define ADDRESS_MASK 0xffffff80 +#define PHYTIUM_DC_FRAMEBUFFER_Y_STRIDE 0x1408 +#define PHYTIUM_DC_PANEL_CONFIG 0x1418 + #define PANEL_DATAENABLE_ENABLE (1<<0) + #define PANEL_DATA_ENABLE (1<<4) + #define PANEL_CLOCK_ENABLE (1<<8) +#define PHYTIUM_DC_HDISPLAY 0x1430 + #define HDISPLAY_END_SHIFT 0 + #define HDISPLAY_END_MASK 0x7fff + #define HDISPLAY_TOTAL_SHIFT 16 + #define HDISPLAY_TOTAL_MASK 0x7fff +#define PHYTIUM_DC_HSYNC 0x1438 + #define HSYNC_START_SHIFT 0 + #define HSYNC_START_MASK 0x7fff + #define HSYNC_END_SHIFT 15 + #define HSYNC_END_MASK 0x7fff + #define HSYNC_PULSE_ENABLED (1<<30) + #define HSYNC_NEGATIVE (1<<31) +#define PHYTIUM_DC_VDISPLAY 0x1440 + #define VDISPLAY_END_SHIFT 0 + #define VDISPLAY_END_MASK 0x7fff + #define VDISPLAY_TOTAL_SHIFT 16 + #define VDISPLAY_TOTAL_MASK 0x7fff +#define PHYTIUM_DC_VSYNC 0x1448 + #define VSYNC_START_SHIFT 0 + #define VSYNC_START_MASK 0x7fff + #define VSYNC_END_SHIFT 15 + #define VSYNC_END_MASK 0x7fff + #define VSYNC_PULSE_ENABLED (1<<30) + #define VSYNC_NEGATIVE (1<<31) +#define PHYTIUM_DC_DISPLAY_CURRENT_LOCATION 0x1450 +#define PHYTIUM_DC_GAMMA_INDEX 0x1458 + #define GAMMA_INDEX_MAX 256 +#define PHYTIUM_DC_GAMMA_DATA 0x1460 + #define GAMMA_BLUE_SHIFT 0 + #define GAMMA_BLUE_MASK 0x3ff + #define GAMMA_GREEN_SHIFT 10 + #define GAMMA_GREEN_MASK 0x3ff + #define GAMMA_RED_SHIFT 20 + #define GAMMA_RED_MASK 0x3ff +#define PHYTIUM_DC_CURSOR_CONFIG 0x1468 + #define CURSOR_FORMAT_DISABLED 0x0 + #define CURSOR_FORMAT_MASKMODE 0x3 + #define CURSOR_FORMAT_ARGB8888 0x2 + #define CURSOR_FORMAT_MASK 0x3 + #define CURSOR_HOT_Y_SHIFT 8 + #define CURSOR_HOT_Y_MASK 0x1f + #define CURSOR_HOT_X_SHIFT 16 + #define CURSOR_HOT_X_MASK 0x1f +#define PHYTIUM_DC_CURSOR_ADDRESS 0x146c +#define PHYTIUM_DC_CURSOR_LOCATION 0x1470 + #define CURSOR_X_SHIFT 0 + #define CURSOR_X_MASK 0x7fff + #define CURSOR_Y_SHIFT 16 + #define CURSOR_Y_MASK 0x7fff +#define PHYTIUM_DC_CURSOR_BACKGROUND 0x1474 +#define PHYTIUM_DC_CURSOR_FOREGROUND 0x1478 +#define PHYTIUM_DC_INT_STATUS 0x147c + #define INT_STATUS 0x1 +#define PHYTIUM_DC_INT_ENABLE 0x1480 + #define INT_ENABLE 0x1 + #define INT_DISABLE 0x0 + +#define PHYTIUM_DC_FRAMEBUFFER_CONFIG 0x1518 + #define FRAMEBUFFER_OUTPUT BIT(0) + #define FRAMEBUFFER_GAMMA_ENABLE BIT(2) + #define FRAMEBUFFER_VALID_PENDING BIT(3) + #define FRAMEBUFFER_RESET BIT(4) + #define FRAMEBUFFER_PROGRESS BIT(6) + #define FRAMEBUFFER_ROT_ANGLE_SHIFT (11) + #define FRAMEBUFFER_ROT_ANGLE_MASK (0x7) + #define FRAMEBUFFER_ROT_ANGLE_ROT0 (0) + #define FRAMEBUFFER_ROT_ANGLE_FLIP_X (1) + #define FRAMEBUFFER_ROT_ANGLE_FLIP_Y (2) + #define FRAMEBUFFER_TILE_MODE_SHIFT (17) + #define FRAMEBUFFER_TILE_MODE_MASK (0x1f) + #define FRAMEBUFFER_LINEAR 0 + #define FRAMEBUFFER_TILE_MODE0 4 + #define FRAMEBUFFER_TILE_MODE3 7 + #define FRAMEBUFFER_FORMAT_SHIFT 26 + #define FRAMEBUFFER_FORMAT_MASK 0x3f + #define FRAMEBUFFER_FORMAT_XRGB4444 0x0 + #define FRAMEBUFFER_FORMAT_ARGB4444 0x1 + #define FRAMEBUFFER_FORMAT_XRGB1555 0x2 + #define FRAMEBUFFER_FORMAT_ARGB1555 0x3 + #define FRAMEBUFFER_FORMAT_RGB565 0x4 + #define FRAMEBUFFER_FORMAT_XRGB8888 0x5 + #define FRAMEBUFFER_FORMAT_ARGB8888 0x6 + #define FRAMEBUFFER_FORMAT_YUYV 0x7 + #define FRAMEBUFFER_FORMAT_UYVY 0x8 + #define FRAMEBUFFER_FORMAT_NV12 0x11 + #define FRAMEBUFFER_FORMAT_NV16 0x12 + #define FRAMEBUFFER_FORMAT_ARGB2101010 0x16 + #define FRAMEBUFFER_SWIZZLE_SHIFT 23 + #define FRAMEBUFFER_SWIZZLE_MASK 0x3 + #define FRAMEBUFFER_SWIZZLE_ARGB 0 + #define FRAMEBUFFER_SWIZZLE_RGBA 1 + #define FRAMEBUFFER_SWIZZLE_ABGR 2 + #define FRAMEBUFFER_SWIZZLE_BGRA 3 + #define FRAMEBUFFER_UVSWIZZLE_SHIFT 25 + #define FRAMEBUFFER_UVSWIZZLE_DISABLE 0 + #define FRAMEBUFFER_UVSWIZZLE_ENABLE 1 + #define FRAMEBUFFER_CLEAR BIT(8) + #define FRAMEBUFFER_SCALE_ENABLE BIT(22) +#define PHYTIUM_DC_FRAMEBUFFER_SCALECONFIG 0x1520 + #define FRAMEBUFFER_FILTER_TAP 3 + #define FRAMEBUFFER_HORIZONTAL_FILTER_TAP 3 + #define FRAMEBUFFER_TAP 0x33 +#define PHYTIUM_DC_FRAMEBUFFER_U_ADDRESS 0x1530 +#define PHYTIUM_DC_FRAMEBUFFER_V_ADDRESS 0x1538 +#define PHYTIUM_DC_OVERLAY_CONFIG 0x1540 + #define PX210_DC_OVERLAY_ENABLE BIT(24) + +#define PHYTIUM_DC_FRAMEBUFFER_U_STRIDE 0x1800 +#define PHYTIUM_DC_FRAMEBUFFER_V_STRIDE 0x1808 +#define PHYTIUM_DC_FRAMEBUFFER_SIZE 0x1810 + #define WIDTH_SHIFT 0 + #define WIDTH_MASK 0x7fff + #define HEIGHT_SHIFT 15 + #define HEIGHT_MASK 0x7fff + +#define PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_X 0x1828 + #define SCALE_FACTOR_X_MASK 0x7fffffff +#define PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_Y 0x1830 + #define SCALE_FACTOR_Y_MASK 0x7fffffff + #define SCALE_FACTOR_Y_MAX 0x3 + #define SCALE_FACTOR_SRC_OFFSET 16 + +#define PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER_INDEX 0x1838 + #define HORI_FILTER_INDEX 0x0 +#define PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER 0x1a00 +#define PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER_INDEX 0x1a08 + #define VERT_FILTER_INDEX 0x0 +#define PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER 0x1a10 +#define PHYTIUM_DC_FRAMEBUFFER_CLEARVALUE 0x1a18 + #define CLEAR_VALUE_RED 0x00ff0000 + #define CLEAR_VALUE_GREEN 0x0000ff00 + #define CLEAR_VALUE_BLACK 0x00000000 +#define PHYTIUM_DC_FRAMEBUFFER_INITIALOFFSET 0x1a20 + #define INITIALOFFSET (0x8000 | (0X8000 << 16)) +#define PHYTIUM_DC_DP_CONFIG 0x1cd0 + #define OUTPUT_DP (1<<3) + #define DP_RGB666 (0x1) + #define DP_RGB888 (0x2) + #define DP_RGB101010 (0x3) +/******************************dc register end********************************************/ + +/******************************phy access register****************************************/ +#define PHYTIUM_PHY_ACCESS_ADDRESS 0x0000 +#define PHYTIUM_PHY_WRITE_DATA 0x0004 +#define PHYTIUM_PHY_READ_DATA 0x0008 +#define PHYTIUM_PHY_ACCESS_CTRL 0x000c + #define ACCESS_WRITE (1<<0) + #define ACCESS_READ (1<<1) +/******************************phy access register end*************************************/ + +/******************************dp register start******************************************/ +#define PHYTIUM_DP_LINK_BW_SET 0x0000 +#define PHYTIUM_DP_LANE_COUNT_SET 0x0004 +#define PHYTIUM_DP_ENHANCED_FRAME_EN 0x0008 + #define ENHANCED_FRAME_ENABLE 0x1 + #define ENHANCED_FRAME_DISABLE 0x0 +#define PHYTIUM_DP_TRAINING_PATTERN_SET 0x000c + #define TRAINING_OFF 0x0 + #define TRAINING_PATTERN_1 0x1 + #define TRAINING_PATTERN_2 0x2 + #define TRAINING_PATTERN_3 0x3 + #define TRAINING_PATTERN_4 0x4 +#define PHYTIUM_DP_LINK_QUAL_PATTERN_SET 0x0010 + #define TEST_PATTERN_NONE 0x0 + #define TEST_PATTERN_D10_2 0x1 + #define TEST_PATTERN_SYMBOL_ERROR 0x2 + #define TEST_PATTERN_PRBS7 0x3 + #define TEST_PATTERN_80BIT_CUSTOM 0x4 + #define TEST_PATTERN_CP2520_1 0x5 + #define TEST_PATTERN_CP2520_2 0x6 + #define TEST_PATTERN_CP2520_3 0x7 + #define TEST_PATTERN_LANE_SHIFT 8 +#define PHYTIUM_DP_SCRAMBLING_DISABLE 0x0014 + #define SCRAMBLING_ENABLE 0x0 + #define SCRAMBLING_DISABLE 0x1 +#define PHYTIUM_DP_DOWNSPREAD_CTRL 0x0018 +#define PHYTIUM_DP_ALT_SCRAMBLER_RESET 0x001c +#define PHYTIUM_DP_HBR2_SCRAMBLER_RESET 0x0020 +#define PHYTIUM_DP_DISPLAYPORT_VERSION 0x0024 +#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_0 0x0030 +#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_1 0x0034 +#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_2 0x0038 +#define PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE 0x0080 + #define TRANSMITTER_OUTPUT_ENABLE BIT(0) + #define TRANSMITTER_OUTPUT_DISABLE 0 +#define PHYTIUM_DP_VIDEO_STREAM_ENABLE 0x0084 + #define SST_MST_SOURCE_0_ENABLE BIT(0) + #define SST_MST_SOURCE_0_ENABLE_MASK 0x1 + #define SST_MST_SOURCE_0_DISABLE 0 +#define PHYTIUM_DP_SECONDARY_STREAM_ENABLE 0x0088 + #define SECONDARY_STREAM_ENABLE 0x1 + #define SECONDARY_STREAM_DISABLE 0x0 +#define PHYTIUM_DP_SEC_DATA_WINDOW 0x008C +#define PHYTIUM_DP_SOFT_RESET 0x0090 + #define LINK_SOFT_RESET (0x1 << 0) + #define VIDEO_SOFT_RESET (0x1 << 1) +#define PHYTIUM_INPUT_SOURCE_ENABLE 0x0094 + #define VIRTUAL_SOURCE_0_ENABLE BIT(0) + #define VIRTUAL_SOURCE_0_ENABLE_MASK 0x1 +#define PHYTIUM_DP_FORCE_SCRAMBLER_RESET 0x00C0 + #define SCRAMBLER_RESET BIT(0) +#define PHYTIUM_DP_SOURCE_CONTROL_STATUS 0x00C4 +#define PHYTIUM_DP_DATA_CONTROL 0x00C8 +#define PHYTIUM_DP_CORE_CAPABILITY 0x00F8 +#define PHYTIUM_DP_CORE_ID 0x00FC +#define PHYTIUM_DP_AUX_COMMAND 0x0100 + #define BYTE_COUNT_MASK 0xf + #define COMMAND_SHIFT 8 + #define COMMAND_MASK 0xf + #define ADDRESS_ONLY (1<<12) +#define PHYTIUM_DP_AUX_WRITE_FIFO 0x0104 +#define PHYTIUM_DP_AUX_ADDRESS 0x0108 +#define PHYTIUM_DP_AUX_CLK_DIVIDER 0x010C + #define AUX_CLK_DIVIDER 48 + #define AUX_CLK_DIVIDER_100 100 +#define PHYTIUM_DP_SINK_HPD_STATE 0x0128 + #define HPD_CONNECT 0x1 + #define HPD_DISCONNECT 0x0 +#define PHYTIUM_DP_INTERRUPT_RAW_STATUS 0x0130 + #define REPLY_TIMEOUT (1<<3) + #define DP_STATUS_REQUEST_IN_PROGRESS (1<<1) + #define HPD_STATE (0<<1) +#define PHYTIUM_DP_AUX_REPLY_DATA 0x0134 +#define PHYTIUM_DP_AUX_REPLY_CODE 0x0138 + #define AUX_NATIVE_ACK (0x0<<0) + #define AUX_NATIVE_NACK (0x1<<0) + #define AUX_NATIVE_DEFER (0x2<<0) + #define AUX_NATIVE_MASK (0x3 << 0) + #define AUX_I2C_ACK (0x0<<2) + #define AUX_I2C_NACK (0x1<<2) + #define AUX_I2C_DEFER (0x2<<2) + #define AUX_I2C_MASK (0x3 << 2) +#define PHYTIUM_DP_INTERRUPT_STATUS 0x0140 + #define HPD_IRQ (1<<1) + #define HPD_EVENT (1<<0) +#define PHYTIUM_DP_INTERRUPT_MASK 0x0144 + #define HPD_IRQ_MASK (1<<1) + #define HPD_EVENT_MASK (1<<0) + #define HPD_OTHER_MASK 0x3c +#define PHYTIUM_DP_AUX_REPLY_DATA_COUNT 0x0148 +#define PHYTIUM_DP_AUX_STATUS 0x014C + #define REPLY_RECEIVED 0x1 + #define REPLY_IN_PROGRESS 0x2 + #define REQUEST_IN_PROGRESS 0x4 + #define REPLY_ERROR 0x8 +#define PHYTIUM_DP_AUX_TIMER 0x0158 +#define PHYTIUM_DP_MAIN_LINK_HTOTAL 0x0180 +#define PHYTIUM_DP_MAIN_LINK_VTOTAL 0x0184 +#define PHYTIUM_DP_MAIN_LINK_POLARITY 0x0188 + #define VSYNC_POLARITY_LOW BIT(1) + #define HSYNC_POLARITY_LOW BIT(0) +#define PHYTIUM_DP_MAIN_LINK_HSWIDTH 0x018C +#define PHYTIUM_DP_MAIN_LINK_VSWIDTH 0x0190 +#define PHYTIUM_DP_MAIN_LINK_HRES 0x0194 +#define PHYTIUM_DP_MAIN_LINK_VRES 0x0198 +#define PHYTIUM_DP_MAIN_LINK_HSTART 0x019C +#define PHYTIUM_DP_MAIN_LINK_VSTART 0x01A0 +#define PHYTIUM_DP_MAIN_LINK_MISC0 0x01A4 + #define MISC0_SYNCHRONOUS_CLOCK BIT(0) + #define MISC0_BIT_DEPTH_OFFSET 5 + #define MISC0_BIT_DEPTH_6BIT 0x0 + #define MISC0_BIT_DEPTH_8BIT 0x1 + #define MISC0_BIT_DEPTH_10BIT 0x2 + #define MISC0_COMPONENT_FORMAT_SHIFT 1 + #define MISC0_COMPONENT_FORMAT_RGB 0x0 +#define PHYTIUM_DP_MAIN_LINK_MISC1 0x01A8 +#define PHYTIUM_DP_M_VID 0x01AC +#define PHYTIUM_DP_TRANSFER_UNIT_SIZE 0x01B0 +#define PHYTIUM_DP_N_VID 0x01B4 +#define PHYTIUM_DP_USER_PIXEL_WIDTH 0x01B8 +#define PHYTIUM_DP_DATA_COUNT 0x01BC +#define PHYTIUM_DP_INTERLACED 0x01C0 +#define PHYTIUM_DP_USER_SYNC_POLARITY 0x01C4 + #define USER_ODDEVEN_POLARITY_HIGH BIT(3) + #define USER_DATA_ENABLE_POLARITY_HIGH BIT(2) + #define USER_VSYNC_POLARITY_HIGH BIT(1) + #define USER_HSYNC_POLARITY_HIGH BIT(0) +#define PHYTIUM_DP_USER_CONTROL 0x01C8 +#define PHYTIUM_EDP_CRC_ENABLE 0x01D0 + #define SUPPORT_EDP_1_4 BIT(1) +#define PHYTIUM_EDP_CRC_RED 0x01D4 +#define PHYTIUM_EDP_CRC_GREEN 0x01D8 +#define PHYTIUM_EDP_CRC_BLUE 0x01DC +#define PHYTIUM_DP_SEC_AUDIO_ENABLE 0x0300 + #define SEC_AUDIO_ENABLE BIT(0) + #define CHANNEL_MUTE_ENABLE BIT(1) +#define PHYTIUM_DP_SEC_INPUT_SELECT 0x0304 + #define INPUT_SELECT_I2S 0x0 +#define PHYTIUM_DP_SEC_CHANNEL_COUNT 0x0308 + #define CHANNEL_2 0x2 + #define CHANNEL_2_LFE 0x3 + #define CHANNEL_5_1 0x6 + #define CHANNEL_7_1 0x7 + #define CHANNEL_MASK 0xf +#define PHYTIUM_DP_SEC_DIRECT_CLKDIV 0x030c + #define APB_CLOCK 48000000 +#define PHYTIUM_DP_SEC_MAUD 0x0318 +#define PHYTIUM_DP_SEC_NAUD 0x031c +#define PHYTIUM_DP_SEC_CLOCK_MODE 0x0320 + #define CLOCK_MODE_SYNC 0x1 +#define PHYTIUM_DP_SEC_CS_SOURCE_FORMAT 0x0340 + #define CS_SOURCE_FORMAT_DEFAULT 0x0 +#define PHYTIUM_DP_SEC_CS_CATEGORY_CODE 0x0344 +#define PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ 0x0348 + #define ORIG_FREQ_32000 0xc + #define ORIG_FREQ_44100 0xf + #define ORIG_FREQ_48000 0xd + #define ORIG_FREQ_88200 0x7 + #define ORIG_FREQ_96000 0x5 + #define ORIG_FREQ_176400 0x3 + #define ORIG_FREQ_192000 0x1 + #define ORIG_FREQ_MASK 0xf + #define ORIG_FREQ_SHIFT 0 + #define WORD_LENGTH_16 0x4 + #define WORD_LENGTH_18 0x2 + #define WORD_LENGTH_20 0xc + #define WORD_LENGTH_24 0xd + #define WORD_LENGTH_MASK 0xf + #define WORD_LENGTH_SHIFT 4 +#define PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY 0x034c // not used + #define SAMPLING_FREQ_32000 0xc + #define SAMPLING_FREQ_44100 0x0 + #define SAMPLING_FREQ_48000 0x4 + #define SAMPLING_FREQ_88200 0x1 + #define SAMPLING_FREQ_96000 0x5 + #define SAMPLING_FREQ_176400 0x3 + #define SAMPLING_FREQ_192000 0x7 + #define SAMPLING_FREQ_MASK 0xf + #define SAMPLING_FREQ_SHIFT 4 +#define PHYTIUM_DP_SEC_CHANNEL_MAP 0x035C + #define CHANNEL_MAP_DEFAULT 0x87654321 +/******************************dp register end********************************************/ + +#endif /* __PHYTIUM_REG_H__ */ diff --git a/drivers/gpu/drm/phytium/px210_dc.c b/drivers/gpu/drm/phytium/px210_dc.c new file mode 100644 index 000000000000..ae022f9fe3fb --- /dev/null +++ b/drivers/gpu/drm/phytium/px210_dc.c @@ -0,0 +1,326 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "px210_reg.h" +#include "phytium_crtc.h" +#include "phytium_plane.h" +#include "phytium_fb.h" +#include "phytium_gem.h" + +static const unsigned int px210_primary_formats[] = { + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_RGBA1010102, + DRM_FORMAT_BGRA1010102, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_ARGB4444, + DRM_FORMAT_ABGR4444, + DRM_FORMAT_RGBA4444, + DRM_FORMAT_BGRA4444, + DRM_FORMAT_XRGB4444, + DRM_FORMAT_XBGR4444, + DRM_FORMAT_RGBX4444, + DRM_FORMAT_BGRX4444, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_ABGR1555, + DRM_FORMAT_RGBA5551, + DRM_FORMAT_BGRA5551, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_XBGR1555, + DRM_FORMAT_RGBX5551, + DRM_FORMAT_BGRX5551, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, + DRM_FORMAT_YUYV, + DRM_FORMAT_UYVY, +}; + +static uint64_t px210_primary_formats_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC, + DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC, + DRM_FORMAT_MOD_INVALID +}; + +static uint64_t px210_cursor_formats_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID +}; + +static const unsigned int px210_cursor_formats[] = { + DRM_FORMAT_ARGB8888, +}; + +void px210_dc_hw_vram_init(struct phytium_display_private *priv, resource_size_t vram_addr, + resource_size_t vram_size) +{ + uint32_t config; + uint32_t group_offset = priv->address_transform_base; + + config = phytium_readl_reg(priv, group_offset, + PX210_GPU_ADDRESS_TRANSFORM_SRC_ADDR); + if (config) + phytium_writel_reg(priv, config, group_offset, + PX210_GPU_ADDRESS_TRANSFORM_SRC_ADDR); + + config = phytium_readl_reg(priv, group_offset, + PX210_GPU_ADDRESS_TRANSFORM_SIZE); + if (config) + phytium_writel_reg(priv, config, group_offset, + PX210_GPU_ADDRESS_TRANSFORM_SIZE); + + config = phytium_readl_reg(priv, group_offset, + PX210_GPU_ADDRESS_TRANSFORM_DST_ADDR); + if (config) + phytium_writel_reg(priv, config, group_offset, + PX210_GPU_ADDRESS_TRANSFORM_DST_ADDR); + + phytium_writel_reg(priv, (vram_addr & SRC_ADDR_MASK) >> SRC_ADDR_OFFSET, + group_offset, PX210_DC_ADDRESS_TRANSFORM_SRC_ADDR); + phytium_writel_reg(priv, (vram_size >> SIZE_OFFSET) | ADDRESS_TRANSFORM_ENABLE, + group_offset, PX210_DC_ADDRESS_TRANSFORM_SIZE); + config = phytium_readl_reg(priv, group_offset, PX210_DC_ADDRESS_TRANSFORM_DST_ADDR); + phytium_writel_reg(priv, config, group_offset, PX210_DC_ADDRESS_TRANSFORM_DST_ADDR); +} + +void px210_dc_hw_clear_msi_irq(struct phytium_display_private *priv, uint32_t phys_pipe) +{ + phytium_writel_reg(priv, MSI_CLEAR, priv->dcreq_reg_base[phys_pipe], PX210_DCREQ_MSI_CLEAR); +} + +void px210_dc_hw_config_pix_clock(struct drm_crtc *crtc, int clock) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + uint32_t group_offset = priv->dcreq_reg_base[phys_pipe]; + int ret = 0; + + /* config pix clock */ + phytium_writel_reg(priv, FLAG_REQUEST | CMD_PIXEL_CLOCK | (clock & PIXEL_CLOCK_MASK), + group_offset, PX210_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + PX210_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to set pixel clock\n", __func__); +} + +void px210_dc_hw_disable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int reset_timeout = 100; + int config = 0; + int phys_pipe = phytium_crtc->phys_pipe; + + // reset dc + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], PX210_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, config | SOFT_RESET, priv->dc_reg_base[phys_pipe], + PX210_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, 0, priv->dc_reg_base[phys_pipe], PX210_DC_CLOCK_CONTROL); + do { + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], PX210_DC_CLOCK_IDLE); + if (config | IS_IDLE) + break; + mdelay(1); + reset_timeout--; + } while (reset_timeout); + + /* reset pix clock */ + px210_dc_hw_config_pix_clock(crtc, 0); + + // reset dc + reset_timeout = 100; + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], PX210_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, config | SOFT_RESET, priv->dc_reg_base[phys_pipe], + PX210_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, 0, priv->dc_reg_base[phys_pipe], PX210_DC_CLOCK_CONTROL); + do { + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], PX210_DC_CLOCK_IDLE); + if (config | IS_IDLE) + break; + mdelay(1); + reset_timeout--; + } while (reset_timeout); + + /* reset dcreq */ + phytium_writel_reg(priv, DCREQ_PLAN_A, priv->dcreq_reg_base[phys_pipe], PX210_DCREQ_PLAN); + phytium_writel_reg(priv, 0, priv->dcreq_reg_base[phys_pipe], PX210_DCREQ_CONTROL); + phytium_writel_reg(priv, DCREQ_RESET, priv->dcreq_reg_base[phys_pipe], PX210_DCREQ_RESET); + msleep(20); + phytium_writel_reg(priv, (~DCREQ_RESET)&DCREQ_RESET_MASK, + priv->dcreq_reg_base[phys_pipe], PX210_DCREQ_RESET); +} + +int px210_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, int count) +{ + int ret = 0; + + switch (mode_cmd->modifier[count]) { + case DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC: + switch (mode_cmd->pixel_format) { + case DRM_FORMAT_ARGB4444: + case DRM_FORMAT_ABGR4444: + case DRM_FORMAT_RGBA4444: + case DRM_FORMAT_BGRA4444: + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_XBGR4444: + case DRM_FORMAT_RGBX4444: + case DRM_FORMAT_BGRX4444: + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_BGRA5551: + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_XBGR1555: + case DRM_FORMAT_RGBX5551: + case DRM_FORMAT_BGRX5551: + case DRM_FORMAT_RGB565: + case DRM_FORMAT_BGR565: + case DRM_FORMAT_YUYV: + case DRM_FORMAT_UYVY: + break; + default: + DRM_ERROR("TILE_MODE0_FBCDC not support DRM_FORMAT %d", + mode_cmd->pixel_format); + ret = -EINVAL; + goto error; + } + break; + case DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC: + switch (mode_cmd->pixel_format) { + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_BGRA1010102: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_BGRX8888: + break; + default: + DRM_ERROR("TILE_MODE3_FBCDC not support DRM_FORMAT %d", + mode_cmd->pixel_format); + ret = -EINVAL; + goto error; + } + break; + case DRM_FORMAT_MOD_LINEAR: + break; + default: + DRM_ERROR("unsupported fb modifier 0x%llx\n", mode_cmd->modifier[0]); + ret = -EINVAL; + goto error; + } + + return 0; +error: + return ret; +} + +void px210_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count) +{ + *format_modifiers = px210_primary_formats_modifiers; + *formats = px210_primary_formats; + *format_count = ARRAY_SIZE(px210_primary_formats); +} + +void px210_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count) +{ + *format_modifiers = px210_cursor_formats_modifiers; + *formats = px210_cursor_formats; + *format_count = ARRAY_SIZE(px210_cursor_formats); +} + +void px210_dc_hw_update_dcreq(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + uint32_t group_offset = priv->dcreq_reg_base[phys_pipe]; + int config; + + if (phytium_plane->tiling[0] == FRAMEBUFFER_LINEAR) { + phytium_writel_reg(priv, DCREQ_MODE_LINEAR, + group_offset, PX210_DCREQ_PLANE0_CONFIG); + } else { + config = DCREQ_NO_LOSSY; + if (phytium_plane->tiling[0] == FRAMEBUFFER_TILE_MODE0) + config |= DCREQ_TILE_TYPE_MODE0; + else if (phytium_plane->tiling[0] == FRAMEBUFFER_TILE_MODE3) + config |= DCREQ_TILE_TYPE_MODE3; + else + config |= DCREQ_TILE_TYPE_MODE0; + + switch (phytium_plane->format) { + case FRAMEBUFFER_FORMAT_ARGB8888: + case FRAMEBUFFER_FORMAT_XRGB8888: + config |= DCREQ_COLOURFORMAT_BGRA8888; + break; + case FRAMEBUFFER_FORMAT_ARGB2101010: + config |= DCREQ_COLOURFORMAT_ARGB2101010; + break; + case FRAMEBUFFER_FORMAT_XRGB4444: + case FRAMEBUFFER_FORMAT_ARGB4444: + config |= DCREQ_COLOURFORMAT_ARGB4444; + break; + case FRAMEBUFFER_FORMAT_XRGB1555: + case FRAMEBUFFER_FORMAT_ARGB1555: + config |= DCREQ_COLOURFORMAT_ARGB1555; + break; + case FRAMEBUFFER_FORMAT_RGB565: + config |= DCREQ_COLOURFORMAT_RGB565; + break; + case FRAMEBUFFER_FORMAT_YUYV: + config |= DCREQ_COLOURFORMAT_YUYV; + break; + case FRAMEBUFFER_FORMAT_UYVY: + config |= DCREQ_COLOURFORMAT_UYVY; + break; + } + config |= DCREQ_ARGBSWIZZLE_ARGB; + config |= DCREQ_MODE_TILE; + phytium_writel_reg(priv, phytium_plane->iova[0] & 0xffffffff, + group_offset, PX210_DCREQ_PLANE0_ADDR_START); + phytium_writel_reg(priv, (phytium_plane->iova[0] + phytium_plane->size[0]) & + 0xffffffff, group_offset, PX210_DCREQ_PLANE0_ADDR_END); + phytium_writel_reg(priv, config, group_offset, PX210_DCREQ_PLANE0_CONFIG); + } +} + +void px210_dc_hw_update_primary_hi_addr(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + + phytium_writel_reg(priv, (phytium_plane->iova[0] >> PREFIX_SHIFT) & PREFIX_MASK, + priv->dcreq_reg_base[phys_pipe], PX210_DCREQ_PIX_DMA_PREFIX); +} diff --git a/drivers/gpu/drm/phytium/px210_dc.h b/drivers/gpu/drm/phytium/px210_dc.h new file mode 100644 index 000000000000..1d8220faadc7 --- /dev/null +++ b/drivers/gpu/drm/phytium/px210_dc.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PX210_DC_H__ +#define __PX210_DC_H__ + +#define PX210_DC_PIX_CLOCK_MAX (594000) +#define PX210_DC_HDISPLAY_MAX 3840 +#define PX210_DC_VDISPLAY_MAX 2160 +#define PX210_DC_ADDRESS_MASK 0x7f + +extern void px210_dc_hw_vram_init(struct phytium_display_private *priv, + resource_size_t vram_addr, + resource_size_t vram_size); +extern void px210_dc_hw_clear_msi_irq(struct phytium_display_private *priv, uint32_t phys_pipe); +extern void px210_dc_hw_config_pix_clock(struct drm_crtc *crtc, int clock); +extern void px210_dc_hw_disable(struct drm_crtc *crtc); +extern int px210_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, int count); +extern void px210_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); +extern void px210_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); +void px210_dc_hw_update_dcreq(struct drm_plane *plane); +void px210_dc_hw_update_primary_hi_addr(struct drm_plane *plane); +#endif /* __PX210_DC_H__ */ diff --git a/drivers/gpu/drm/phytium/px210_dp.c b/drivers/gpu/drm/phytium/px210_dp.c new file mode 100644 index 000000000000..be3c520a3c09 --- /dev/null +++ b/drivers/gpu/drm/phytium/px210_dp.c @@ -0,0 +1,920 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include "phytium_display_drv.h" +#include "px210_reg.h" +#include "phytium_dp.h" +#include "px210_dp.h" + +static uint8_t px210_dp_source_lane_count[3] = {4, 4, 1}; + +/* [reg][ling_rate 1.62->8.1] */ +static int vco_val[12][4] = { + {0x0509, 0x0509, 0x0509, 0x0509}, // CP_PADJ + {0x0f00, 0x0f00, 0x0f00, 0x0f00}, // CP_IADJ + {0x0F08, 0x0F08, 0x0F08, 0x0F08}, // FILT_PADJ + {0x0061, 0x006C, 0x006C, 0x0051}, // INTDIV + {0x3333, 0x0000, 0x0000, 0x0000}, // FRACDIVL + {0x0000, 0x0000, 0x0000, 0x0000}, // FRACDIVH + {0x0042, 0x0048, 0x0048, 0x0036}, // HIGH_THR + {0x0002, 0x0002, 0x0002, 0x0002}, // PDIAG_CTRL + {0x0c5e, 0x0c5e, 0x0c5e, 0x0c5e}, // VCOCAL_PLLCNT_START + {0x00c7, 0x00c7, 0x00c7, 0x00c7}, // LOCK_PEFCNT + {0x00c7, 0x00c7, 0x00c7, 0x00c7}, // LOCK_PLLCNT_START + {0x0005, 0x0005, 0x0005, 0x0005}, // LOCK_PLLCNT_THR +}; + +static int mgnfs_val[4][4][4] = // [link_rate][swing][emphasis] +{ + /* 1.62Gbps */ + { + {0x0026, 0x001f, 0x0012, 0x0000}, + {0x0013, 0x0013, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 2.7Gbps */ + { + {0x0026, 0x001f, 0x0012, 0x0000}, + {0x0013, 0x0013, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 5.4Gbps */ + { + {0x0026, 0x0013, 0x005, 0x0000}, + {0x0018, 0x006, 0x0000, 0x0000}, + {0x000c, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 8.1Gbps */ + { + {0x0026, 0x0013, 0x005, 0x0000}, + {0x0013, 0x006, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, +}; + +static int cpost_val[4][4][4] = // [link_rate][swing][emphasis] +{ + /* 1.62Gbps */ + { + {0x0000, 0x0014, 0x0020, 0x002a}, + {0x0000, 0x0010, 0x001f, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 2.7Gbps */ + { + {0x0000, 0x0014, 0x0020, 0x002a}, + {0x0000, 0x0010, 0x001f, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 5.4Gbps */ + { + {0x0000, 0x0014, 0x0022, 0x002e}, + {0x0000, 0x0013, 0x0020, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 8.1Gbps */ + { + {0x0000, 0x0014, 0x0022, 0x002e}, + {0x0000, 0x0013, 0x0020, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, +}; + +static int px210_dp_hw_set_phy_lane_and_rate(struct phytium_dp_device *phytium_dp, + uint8_t link_lane_count, + uint32_t link_rate) +{ + int port = phytium_dp->port%3; + int i = 0, data, tmp, tmp1, index = 0, mask; + int timeout = 500, ret = 0; + + if (port == 0 || port == 1) { + /* set pma powerdown */ + data = 0; + mask = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) { + data |= (A3_POWERDOWN3 << i*A3_POWERDOWN3_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (PLL_EN << i*PLL_EN_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (PLL_EN << i*PLL_EN_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (A0_ACTIVE << i*A0_ACTIVE_SHIFT); + mask |= (((1<port%3; + int voltage_swing = 0; + int pre_emphasis = 0, link_rate_index = 0; + + switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: + default: + voltage_swing = 0; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: + voltage_swing = 1; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: + voltage_swing = 2; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: + voltage_swing = 3; + break; + } + switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { + case DP_TRAIN_PRE_EMPH_LEVEL_0: + default: + pre_emphasis = 0; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_1: + pre_emphasis = 1; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_2: + pre_emphasis = 2; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_3: + pre_emphasis = 3; + break; + } + + switch (link_rate) { + case 810000: + link_rate_index = 3; + break; + case 540000: + link_rate_index = 2; + break; + case 270000: + link_rate_index = 1; + break; + case 162000: + link_rate_index = 0; + break; + default: + DRM_ERROR("phytium dp rate(%d) not support\n", link_rate); + link_rate_index = 2; + break; + } + + if (port == 0) { + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL0_TX_DIAG_ACYA, LOCK); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL0_TX_TXCC_CTRL, TX_TXCC_CTRL); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL0_TX_DRV, TX_DRV); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL0_TX_MGNFS, + mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL0_TX_CPOST, + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL0_TX_DIAG_ACYA, UNLOCK); + + } else if (port == 1) { + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_DIAG_ACYA, LOCK); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_TXCC_CTRL, TX_TXCC_CTRL); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_DRV, TX_DRV); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_MGNFS, + mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_CPOST, + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_CPOST1, + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_DIAG_ACYA, UNLOCK); + } else { + phytium_phy_writel(phytium_dp, PX210_PHY1_PLL0_TX_DIAG_ACYA, LOCK); + phytium_phy_writel(phytium_dp, PX210_PHY1_PLL0_TX_TXCC_CTRL, TX_TXCC_CTRL); + phytium_phy_writel(phytium_dp, PX210_PHY1_PLL0_TX_DRV, TX_DRV); + phytium_phy_writel(phytium_dp, PX210_PHY1_PLL0_TX_MGNFS, + mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY1_PLL0_TX_CPOST, + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY1_PLL0_TX_DIAG_ACYA, UNLOCK); + } +} + +static int px210_dp_hw_init_phy(struct phytium_dp_device *phytium_dp) +{ + int port = phytium_dp->port; + int i = 0, data, tmp, mask; + int timeout = 500, ret = 0; + + if (port == 0 || port == 1) { + phytium_phy_writel(phytium_dp, PX210_PHY0_APB_RESET, APB_RESET); + + phytium_phy_writel(phytium_dp, PX210_PHY0_PIPE_RESET, RESET); + + /* config lane to dp mode */ + data = 0; + mask = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) { + data |= (LANE_BIT << i*LANE_BIT_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (LANE_MASTER << i*LANE_MASTER_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (PLL_EN << i*PLL_EN_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (BIT_20 << i*BIT_20_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (A0_ACTIVE << i*A0_ACTIVE_SHIFT); + mask |= (((1<dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dcreq_reg_base[port]; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_ENABLE, + group_offset, PX210_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + PX210_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to poweron panel\n", __func__); +} + +static void px210_dp_hw_poweroff_panel(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dcreq_reg_base[port]; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_DISABLE, + group_offset, PX210_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + PX210_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to poweroff panel\n", __func__); +} + +static void px210_dp_hw_enable_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, ret = 0; + uint32_t group_offset = priv->dcreq_reg_base[port]; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_ENABLE, + group_offset, PX210_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + PX210_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to enable backlight\n", __func__); +} + +static void px210_dp_hw_disable_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dcreq_reg_base[port]; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_DISABLE, + group_offset, PX210_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + PX210_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to disable backlight\n", __func__); +} + +static uint32_t px210_dp_hw_get_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int config; + uint32_t group_offset = priv->address_transform_base; + + config = phytium_readl_reg(priv, group_offset, PX210_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE); + return ((config >> BACKLIGHT_VALUE_SHIFT) & BACKLIGHT_VALUE_MASK); +} + +static int px210_dp_hw_set_backlight(struct phytium_dp_device *phytium_dp, uint32_t level) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dcreq_reg_base[port]; + int config = 0; + int ret = 0; + + if (level > PX210_DP_BACKLIGHT_MAX) { + ret = -EINVAL; + goto out; + } + + config = FLAG_REQUEST | CMD_BACKLIGHT | ((level & BACKLIGHT_MASK) << BACKLIGHT_SHIFT); + phytium_writel_reg(priv, config, group_offset, PX210_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + PX210_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to set backlight\n", __func__); + +out: + return ret; +} + +bool px210_dp_hw_spread_is_enable(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, config; + uint32_t group_offset = priv->address_transform_base; + + config = phytium_readl_reg(priv, group_offset, PX210_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); + + return ((config & DP_SPREAD_ENABLE(port)) ? true:false); +} + +int px210_dp_hw_reset(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int timeout = 100, config, ret = 0; + uint32_t group_offset = priv->address_transform_base; + uint32_t group_offset_dp = priv->dp_reg_base[port]; + + config = phytium_readl_reg(priv, group_offset, PX210_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); + config &= (~DC_DP_RESET_STATUS(port)); + + phytium_writel_reg(priv, config, group_offset, PX210_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); + phytium_writel_reg(priv, FLAG_REQUEST | CMD_DC_DP_RESET, + priv->dcreq_reg_base[port], PX210_DCREQ_CMD_REGISTER); + do { + mdelay(10); + timeout--; + config = phytium_readl_reg(priv, group_offset, + PX210_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); + if (config & DC_DP_RESET_STATUS(port)) + break; + } while (timeout); + if (timeout == 0) { + DRM_ERROR("reset dc/dp pipe(%d) failed\n", port); + ret = -1; + } + + phytium_writel_reg(priv, AUX_CLK_DIVIDER, group_offset_dp, PHYTIUM_DP_AUX_CLK_DIVIDER); + + return ret; +} + +uint8_t px210_dp_hw_get_source_lane_count(struct phytium_dp_device *phytium_dp) +{ + return px210_dp_source_lane_count[phytium_dp->port]; +} + +static struct phytium_dp_func px210_dp_funcs = { + .dp_hw_get_source_lane_count = px210_dp_hw_get_source_lane_count, + .dp_hw_reset = px210_dp_hw_reset, + .dp_hw_spread_is_enable = px210_dp_hw_spread_is_enable, + .dp_hw_set_backlight = px210_dp_hw_set_backlight, + .dp_hw_get_backlight = px210_dp_hw_get_backlight, + .dp_hw_disable_backlight = px210_dp_hw_disable_backlight, + .dp_hw_enable_backlight = px210_dp_hw_enable_backlight, + .dp_hw_poweroff_panel = px210_dp_hw_poweroff_panel, + .dp_hw_poweron_panel = px210_dp_hw_poweron_panel, + .dp_hw_init_phy = px210_dp_hw_init_phy, + .dp_hw_set_phy_lane_setting = px210_dp_hw_set_phy_lane_setting, + .dp_hw_set_phy_lane_and_rate = px210_dp_hw_set_phy_lane_and_rate, +}; + +void px210_dp_func_register(struct phytium_dp_device *phytium_dp) +{ + phytium_dp->funcs = &px210_dp_funcs; +} diff --git a/drivers/gpu/drm/phytium/px210_dp.h b/drivers/gpu/drm/phytium/px210_dp.h new file mode 100644 index 000000000000..f2436ace1845 --- /dev/null +++ b/drivers/gpu/drm/phytium/px210_dp.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PX210_DP_H__ +#define __PX210_DP_H__ + +#define PX210_DP_BACKLIGHT_MAX 100 + +void px210_dp_func_register(struct phytium_dp_device *phytium_dp); +#endif /* __PX210_DP_H__ */ diff --git a/drivers/gpu/drm/phytium/px210_reg.h b/drivers/gpu/drm/phytium/px210_reg.h new file mode 100644 index 000000000000..e594fbc8d96f --- /dev/null +++ b/drivers/gpu/drm/phytium/px210_reg.h @@ -0,0 +1,349 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PX210_REG_H__ +#define __PX210_REG_H__ + +#include "phytium_reg.h" + +/******************************dc register start******************************************/ +#define PX210_DC_CLOCK_CONTROL 0x0000 + #define SOFT_RESET (1<<12) +#define PX210_DC_CLOCK_IDLE 0x0004 + #define IS_IDLE (1<<16) +/******************************dc register end********************************************/ + +/******************************dcreq register start**************************************/ +#define PX210_DCREQ_PLANE0_ADDR_START 0x00 +#define PX210_DCREQ_PLANE0_ADDR_END 0x04 +#define PX210_DCREQ_PLANE1_ADDR_START 0x08 +#define PX210_DCREQ_PLANE1_ADDR_END 0x0c +#define PX210_DCREQ_PLANE0_CONFIG 0x10 + #define DCREQ_NO_LOSSY (0 << 0) + #define DCREQ_LOSSY (1 << 0) + #define DCREQ_TILE_TYPE_MASK (0x3 << 1) + #define DCREQ_TILE_TYPE_MODE0 (0x1 << 1) + #define DCREQ_TILE_TYPE_MODE3 (0x2 << 1) + #define DCREQ_COLOURFORMAT_MASK (0x7f << 8) + #define DCREQ_COLOURFORMAT_RGB565 (0x5 << 8) + #define DCREQ_COLOURFORMAT_ARGB1555 (0x4 << 8) + #define DCREQ_COLOURFORMAT_ARGB4444 (0x02 << 8) + #define DCREQ_COLOURFORMAT_BGRA8888 (0x29 << 8) + #define DCREQ_COLOURFORMAT_ARGB2101010 (0xe << 8) + #define DCREQ_COLOURFORMAT_YUYV (0x59 << 8) + #define DCREQ_COLOURFORMAT_UYVY (0x5b << 8) + #define DCREQ_ARGBSWIZZLE_MASK (0xf << 4) + #define DCREQ_ARGBSWIZZLE_ARGB (0X0 << 4) + #define DCREQ_ARGBSWIZZLE_BGRA (0XC << 4) + #define DCREQ_MODE_MASK (1 << 16) + #define DCREQ_MODE_LINEAR (0 << 16) + #define DCREQ_MODE_TILE (1 << 16) +#define PX210_DCREQ_PLANE1_CONFIG(pipe) 0x14 +#define PX210_DCREQ_PLANE0_CLEAR_COLOR_L 0x18 +#define PX210_DCREQ_PLANE0_CLEAR_COLOR_H 0x1C +#define PX210_DCREQ_PLANE1_CLEAR_COLOR_L 0x20 +#define PX210_DCREQ_PLANE1_CLEAR_COLOR_H 0x24 +#define PX210_DCREQ_CMD_REGISTER 0x38 + #define FLAG_REPLY (1<<31) + #define FLAG_REQUEST (1<<30) + #define CMD_PIXEL_CLOCK (0x0 << 28) + #define CMD_BACKLIGHT (0x1 << 28) + #define CMD_DC_DP_RESET (0x3 << 28) + #define BACKLIGHT_SHIFT 21 + #define BACKLIGHT_MASK 0x7f + #define BACKLIGHT_MAX 100 + #define BACKLIGHT_ENABLE (101 << BACKLIGHT_SHIFT) + #define BACKLIGHT_DISABLE (102 << BACKLIGHT_SHIFT) + #define PANEL_POWER_ENABLE (103 << BACKLIGHT_SHIFT) + #define PANEL_POWER_DISABLE (104 << BACKLIGHT_SHIFT) + #define PIXEL_CLOCK_MASK (0x1fffff) +#define PX210_DCREQ_FBCD_CLOCK_CONFIG 0x3c +#define PX210_DCREQ_PIX_DMA_PREFIX 0x50 + #define PREFIX_MASK 0xff + #define PREFIX_SHIFT 32 +#define PX210_DCREQ_FRAME_START 0x54 +#define PX210_DCREQ_FILTER_CONFIG 0x58 +#define PX210_DCREQ_CONTROL 0x5C + #define DC_REQ_ENABLE (1<<0) +#define PX210_DCREQ_MSI_CLEAR 0x60 + #define MSI_CLEAR 0x0 +#define PX210_DCREQ_RESET 0x68 + #define DCREQ_RESET (0x3 << 0) + #define DCREQ_RESET_MASK 0x3 +#define PX210_DCREQ_PLAN 0x94 + #define DCREQ_PLAN_A 0x0 + #define DCREQ_PLAN_B 0X5 +/******************************dcreq register end**************************************/ + +/******************************address transform register start**************************/ +#define PX210_GPU_ADDRESS_TRANSFORM_SRC_ADDR 0x0 +#define PX210_GPU_ADDRESS_TRANSFORM_SIZE 0x4 +#define PX210_GPU_ADDRESS_TRANSFORM_DST_ADDR 0x8 + +#define PX210_DC_ADDRESS_TRANSFORM_SRC_ADDR 0x24 + #define SRC_ADDR_OFFSET 22 + #define SRC_ADDR_MASK 0xffffffffff +#define PX210_DC_ADDRESS_TRANSFORM_SIZE 0x28 + #define ADDRESS_TRANSFORM_ENABLE (0x1 << 31) + #define SIZE_OFFSET 22 +#define PX210_DC_ADDRESS_TRANSFORM_DST_ADDR 0x2c + #define DST_ADDR_OFFSET 22 +#define PX210_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS 0x48 + #define DC_DP_RESET_STATUS(pipe) (1 << pipe) + #define DP_SPREAD_ENABLE(pipe) (0x8 << pipe) +#define PX210_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE 0x4c + #define BACKLIGHT_VALUE_MASK (0x7f) + #define BACKLIGHT_VALUE_SHIFT 16 +/******************************address transform register end**************************/ + +/******************************phy register start******************************************/ +/* self define */ +#define PX210_PHY0_PIPE_RESET 0x40104 + #define RESET 0x0 + #define RESET_DEASSERT 0x1 +#define PX210_PHY1_PIPE_RESET 0x100100 + #define PHY1_PIPE_RESET 0x0 + #define PHY1_PIPE_RESET_DEASSERT 0x4 + +#define PX210_PHY1_EN_REFCLK 0x100070 + +#define PX210_PHY0_MODE 0x40088 + #define LANE_BIT (0x3) + #define LANE_BIT_SHIFT 0x2 +#define PX210_PHY1_SEL 0x100004 + #define PHY1_DP_LANE_BIT 0x1 + #define PHY1_DP_LANE_BIT_SHIFT 2 + +#define PX210_PHY0_LINK_CFG 0x40044 + #define LANE_MASTER 0x1 + #define LANE_MASTER_SHIFT 1 + +#define PX210_PHY0_PLL_EN 0x40010 + #define PLL_EN 0x1 + #define PLL_EN_SHIFT 1 +#define PX210_PHY0_PMA_WIDTH 0x40020 + #define BIT_20 0x5 + #define BIT_20_SHIFT 4 + +#define PX210_PHY0_PMA0_POWER 0x40014 +#define PX210_PHY0_PMA1_POWER 0x40018 + #define A0_ACTIVE 0x1 + #define A0_ACTIVE_SHIFT 8 + #define A3_POWERDOWN3 0x8 + #define A3_POWERDOWN3_SHIFT 8 + +#define PX210_PHY1_PMA_MISC 0x1000a0 + #define PHY1_PLL_EN 0x1 + #define PHY1_PLL_EN_MASK 1 + #define PHY1_PLL_EN_SHIFT 8 + #define PHY1_BIT_20 0x5 + #define PHY1_BIT_20_SHIFT 9 + #define PHY1_A0_ACTIVE 0x1 + #define PHY1_A0_ACTIVE_SHIFT 2 + #define PHY1_A0_ACTIVE_MASK 0x3f + #define PHY1_A3_POWERDOWN3 0x8 + #define PHY1_A3_POWERDOWN3_MASK 0x3f + #define PHY1_A3_POWERDOWN3_SHIFT 2 + +#define PX210_PHY0_LINK_RESET 0x40108 + #define LINK_RESET 0x1 + #define LINK_RESET_MASK 0x1 + #define LINTK_RESET_SHIFT 0x1 + +#define PX210_PHY0_APB_RESET 0x40100 + #define APB_RESET 0x1 +#define PX210_PHY1_APB_RESET 0x100104 + #define PHY1_APB_RESET 0x4 + +/* phy origin register */ +#define PX210_PHY0_PLL_CFG 0x30038 +#define PX210_PHY1_PLL_CFG 0xb0038 + #define SINGLE_LINK 0x0 + #define DOUBLE_LINK 0x2 + +#define PX210_PHY0_PMA_CONTROL 0x3800c +#define PX210_PHY1_PMA_CONTROL 0xb800c + #define CONTROL_ENABLE 0x1 + #define CONTROL_ENABLE_MASK 0x1 + #define CONTROL_ENABLE_SHIFT 0x1 + +#define PX210_PHY0_PMA_CONTROL2 0x38004 +#define PX210_PHY1_PMA_CONTROL2 0xb8004 + #define PLL0_LOCK_DONE (0x1 << 6) + #define PLL1_LOCK_DONE (0x1 << 7) + +#define PX210_PHY0_PLL0_CLK_SEL 0X684 +#define PX210_PHY0_PLL1_CLK_SEL 0x704 +#define PX210_PHY1_PLL_CLK_SEL 0X80684 + #define PLL_LINK_RATE_162000 0xf01 + #define PLL_LINK_RATE_270000 0x701 + #define PLL_LINK_RATE_540000 0x301 + #define PLL_LINK_RATE_810000 0x200 + +#define PX210_PHY0_HSCLK0_SEL 0x18398 +#define PX210_PHY0_HSCLK1_SEL 0x1a398 +#define PX210_PHY1_HSCLK_SEL 0x90398 + #define HSCLK_LINK_0 0x0 + #define HSCLK_LINK_1 0x1 + +#define PX210_PHY0_HSCLK0_DIV 0x1839c +#define PX210_PHY0_HSCLK1_DIV 0x1a39c +#define PX210_PHY1_HSCLK_DIV 0x9039c + #define HSCLK_LINK_RATE_162000 0x2 + #define HSCLK_LINK_RATE_270000 0x1 + #define HSCLK_LINK_RATE_540000 0x0 + #define HSCLK_LINK_RATE_810000 0x0 + +#define PX210_PHY0_PLLDRC0_CTRL 0x18394 +#define PX210_PHY0_PLLDRC1_CTRL 0x1a394 +#define PX210_PHY1_PLLDRC_CTRL 0x90394 + #define PLLDRC_LINK0 0x1 + #define PLLDRC_LINK1 0x9 + +#define PX210_PHY0_PLL0_DSM_M0 0x250 +#define PX210_PHY1_PLL0_DSM_M0 0x80250 + #define PLL0_DSM_M0 0x4 +#define PX210_PHY0_PLL0_VCOCAL_START 0x218 +#define PX210_PHY1_PLL0_VCOCAL_START 0x80218 + #define PLL0_VCOCAL_START 0xc5e +#define PX210_PHY0_PLL0_VCOCAL_CTRL 0x208 +#define PX210_PHY1_PLL0_VCOCAL_CTRL 0x80208 + #define PLL0_VCOCAL_CTRL 0x3 + +#define PX210_PHY0_PLL1_DSM_M0 0x350 + #define PLL1_DSM_M0 0x4 +#define PX210_PHY0_PLL1_VCOCAL_START 0x318 + #define PLL1_VCOCAL_START 0xc5e +#define PX210_PHY0_PLL1_VCOCAL_CTRL 0x308 + #define PLL1_VCOCAL_CTRL 0x3 + +#define PX210_PHY0_PLL0_CP_PADJ 0x690 +#define PX210_PHY0_PLL0_CP_IADJ 0x694 +#define PX210_PHY0_PLL0_CP_FILT_PADJ 0x698 +#define PX210_PHY0_PLL0_INTDIV 0x240 +#define PX210_PHY0_PLL0_FRACDIVL 0x244 +#define PX210_PHY0_PLL0_FRACDIVH 0x248 +#define PX210_PHY0_PLL0_HIGH_THR 0x24c +#define PX210_PHY0_PLL0_PDIAG_CTRL 0x680 +#define PX210_PHY0_PLL0_VCOCAL_PLLCNT_START 0x220 +#define PX210_PHY0_PLL0_LOCK_PEFCNT 0x270 +#define PX210_PHY0_PLL0_LOCK_PLLCNT_START 0x278 +#define PX210_PHY0_PLL0_LOCK_PLLCNT_THR 0x27c + +#define PX210_PHY0_PLL1_CP_PADJ 0x710 +#define PX210_PHY0_PLL1_CP_IADJ 0x714 +#define PX210_PHY0_PLL1_CP_FILT_PADJ 0x718 +#define PX210_PHY0_PLL1_INTDIV 0x340 +#define PX210_PHY0_PLL1_FRACDIVL 0x344 +#define PX210_PHY0_PLL1_FRACDIVH 0x348 +#define PX210_PHY0_PLL1_HIGH_THR 0x34c +#define PX210_PHY0_PLL1_PDIAG_CTRL 0x700 +#define PX210_PHY0_PLL1_VCOCAL_PLLCNT_START 0x320 +#define PX210_PHY0_PLL1_LOCK_PEFCNT 0x370 +#define PX210_PHY0_PLL1_LOCK_PLLCNT_START 0x378 +#define PX210_PHY0_PLL1_LOCK_PLLCNT_THR 0x37c + +#define PX210_PHY1_PLL0_CP_PADJ 0x80690 +#define PX210_PHY1_PLL0_CP_IADJ 0x80694 +#define PX210_PHY1_PLL0_CP_FILT_PADJ 0x80698 +#define PX210_PHY1_PLL0_INTDIV 0x80240 +#define PX210_PHY1_PLL0_FRACDIVL 0x80244 +#define PX210_PHY1_PLL0_FRACDIVH 0x80248 +#define PX210_PHY1_PLL0_HIGH_THR 0x8024c +#define PX210_PHY1_PLL0_PDIAG_CTRL 0x80680 +#define PX210_PHY1_PLL0_VCOCAL_PLLCNT_START 0x80220 +#define PX210_PHY1_PLL0_LOCK_PEFCNT 0x80270 +#define PX210_PHY1_PLL0_LOCK_PLLCNT_START 0x80278 +#define PX210_PHY1_PLL0_LOCK_PLLCNT_THR 0x8027c + +#define PX210_PHY0_PLL0_TX_PSC_A0 0x18400 +#define PX210_PHY1_PLL0_TX_PSC_A0 0x90400 + #define PLL0_TX_PSC_A0 0xfb +#define PX210_PHY0_PLL0_TX_PSC_A2 0x18408 +#define PX210_PHY1_PLL0_TX_PSC_A2 0x90408 + #define PLL0_TX_PSC_A2 0x4aa +#define PX210_PHY0_PLL0_TX_PSC_A3 0x1840c +#define PX210_PHY1_PLL0_TX_PSC_A3 0x9040c + #define PLL0_TX_PSC_A3 0x4aa +#define PX210_PHY0_PLL0_RX_PSC_A0 0x28000 +#define PX210_PHY1_PLL0_RX_PSC_A0 0xa0000 + #define PLL0_RX_PSC_A0 0x0 +#define PX210_PHY0_PLL0_RX_PSC_A2 0x28008 +#define PX210_PHY1_PLL0_RX_PSC_A2 0xa0008 + #define PLL0_RX_PSC_A2 0x0 +#define PX210_PHY0_PLL0_RX_PSC_A3 0x2800C +#define PX210_PHY1_PLL0_RX_PSC_A3 0xa000C + #define PLL0_RX_PSC_A3 0x0 +#define PX210_PHY0_PLL0_RX_PSC_CAL 0x28018 +#define PX210_PHY1_PLL0_RX_PSC_CAL 0xa0018 + #define PLL0_RX_PSC_CAL 0x0 + +#define PX210_PHY0_PLL1_TX_PSC_A0 0x1a400 + #define PLL1_TX_PSC_A0 0xfb +#define PX210_PHY0_PLL1_TX_PSC_A2 0x1a408 + #define PLL1_TX_PSC_A2 0x4aa +#define PX210_PHY0_PLL1_TX_PSC_A3 0x1a40c + #define PLL1_TX_PSC_A3 0x4aa +#define PX210_PHY0_PLL1_RX_PSC_A0 0x2a000 + #define PLL1_RX_PSC_A0 0x0 +#define PX210_PHY0_PLL1_RX_PSC_A2 0x2a008 + #define PLL1_RX_PSC_A2 0x0 +#define PX210_PHY0_PLL1_RX_PSC_A3 0x2a00C + #define PLL1_RX_PSC_A3 0x0 +#define PX210_PHY0_PLL1_RX_PSC_CAL 0x2a018 + #define PLL1_RX_PSC_CAL 0x0 + +#define PX210_PHY0_PLL0_XCVR_CTRL 0x183a8 +#define PX210_PHY1_PLL0_XCVR_CTRL 0x903a8 + #define PLL0_XCVR_CTRL 0xf +#define PX210_PHY0_PLL1_XCVR_CTRL 0x1a3a8 + #define PLL1_XCVR_CTRL 0xf + +#define PX210_PHY0_PLL0_RX_GCSM1_CTRL 0x28420 +#define PX210_PHY1_PLL0_RX_GCSM1_CTRL 0xa0420 + #define PLL0_RX_GCSM1_CTRL 0x0 +#define PX210_PHY0_PLL0_RX_GCSM2_CTRL 0x28440 +#define PX210_PHY1_PLL0_RX_GCSM2_CTRL 0xa0440 + #define PLL0_RX_GCSM2_CTRL 0x0 +#define PX210_PHY0_PLL0_RX_PERGCSM_CTRL 0x28460 +#define PX210_PHY1_PLL0_RX_PERGCSM_CTRL 0xa0460 + #define PLL0_RX_PERGCSM_CTRL 0x0 + +#define PX210_PHY0_PLL1_RX_GCSM1_CTRL 0x2a420 + #define PLL1_RX_GCSM1_CTRL 0x0 +#define PX210_PHY0_PLL1_RX_GCSM2_CTRL 0x2a440 + #define PLL1_RX_GCSM2_CTRL 0x0 +#define PX210_PHY0_PLL1_RX_PERGCSM_CTRL 0x2a460 + #define PLL1_RX_PERGCSM_CTRL 0x0 + +/* swing and emphasis */ +#define PX210_PHY0_PLL0_TX_DIAG_ACYA 0x1879c +#define PX210_PHY0_PLL1_TX_DIAG_ACYA 0x1a79c +#define PX210_PHY1_PLL0_TX_DIAG_ACYA 0x9079c + #define LOCK 1 + #define UNLOCK 0 + +#define PX210_PHY0_PLL0_TX_TXCC_CTRL 0x18100 +#define PX210_PHY0_PLL1_TX_TXCC_CTRL 0x1a100 +#define PX210_PHY1_PLL0_TX_TXCC_CTRL 0x90100 + #define TX_TXCC_CTRL 0x8a4 + +#define PX210_PHY0_PLL0_TX_DRV 0x18318 +#define PX210_PHY0_PLL1_TX_DRV 0x1a318 +#define PX210_PHY1_PLL0_TX_DRV 0x90318 + #define TX_DRV 0x3 + +#define PX210_PHY0_PLL0_TX_MGNFS 0x18140 +#define PX210_PHY0_PLL1_TX_MGNFS 0x1a140 +#define PX210_PHY1_PLL0_TX_MGNFS 0x90140 + +#define PX210_PHY0_PLL0_TX_CPOST 0x18130 +#define PX210_PHY0_PLL1_TX_CPOST 0x1a130 +#define PX210_PHY0_PLL1_TX_CPOST1 0x1a13c +#define PX210_PHY1_PLL0_TX_CPOST 0x90130 + +/******************************phy register end********************************************/ +#endif /* __PX210_REG_H__ */ diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index d0f900d4c35b..082cd30aba87 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -3225,4 +3225,6 @@ #define PCI_VENDOR_ID_NCUBE 0x10ff +#define PCI_VENDOR_ID_PHYTIUM 0x1db7 + #endif /* _LINUX_PCI_IDS_H */ -- Gitee From 3d466c75d607e096ccde628500ea352d41f368a5 Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Wed, 3 Apr 2024 15:26:36 +0800 Subject: [PATCH 0668/2138] anolis: configs: adjust loongarch configs ANBZ: #8598 Enable some important kconfigs for better compatibility with x86 and arm architectures. Here are the adjusted configs: CONFIG_PSI=y CONFIG_LOCKUP_DETECTOR=y CONFIG_SOFTLOCKUP_DETECTOR=y CONFIG_HARDLOCKUP_DETECTOR=y CONFIG_DETECT_HUNG_TASK=y CONFIG_FUNCTION_TRACER=y CONFIG_DYNAMIC_FTRACE=y CONFIG_FTRACE_SYSCALLS=y CONFIG_BLK_DEV_IO_TRACE=y CONFIG_DEBUG_INFO=y CONFIG_DEBUG_INFO_BTF=y CONFIG_BPF_JIT=y CONFIG_CRYPTO_SM4=y CONFIG_CRYPTO_SM4_GENERIC=y CONFIG_CRYPTO_SM3_GENERIC=y CONFIG_KPROBES=y CONFIG_KRETPROBES=y CONFIG_SCSI_SAS_ATA=y CONFIG_KVM=y CONFIG_ZSMALLOC=y CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y CONFIG_RTC_SYSTOHC=y CONFIG_ARCH_IOREMAP=y CONFIG_ARCH_WRITECOMBINE=y CONFIG_CPU_HAS_LBT=y CONFIG_DWMAC_LOONGSON=m These kconfigs are adjusted by follow steps: 1. `ARCH=loongarch CROSS_COMPILE=scripts/dummy-tools/ make anolis_defconfig` 2. `ARCH=loongarch CROSS_COMPILE=scripts/dummy-tools/ ./scripts/kconfig/merge_config.sh .config adjust-configs` Signed-off-by: Qiao Ma Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3002 --- arch/loongarch/configs/anolis-debug_defconfig | 6664 ++++++++++++++++- arch/loongarch/configs/anolis_defconfig | 6664 ++++++++++++++++- 2 files changed, 13312 insertions(+), 16 deletions(-) diff --git a/arch/loongarch/configs/anolis-debug_defconfig b/arch/loongarch/configs/anolis-debug_defconfig index db41cbf5efd4..365f27c124b4 100644 --- a/arch/loongarch/configs/anolis-debug_defconfig +++ b/arch/loongarch/configs/anolis-debug_defconfig @@ -1,147 +1,904 @@ # -## Automatically generated file; DO NOT EDIT. -## Linux/loongarch 6.6.7 Kernel Configuration -## +# Automatically generated file; DO NOT EDIT. +# Linux/loongarch 6.6.7 Kernel Configuration +# +CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=200000 +CONFIG_CLANG_VERSION=0 +CONFIG_AS_IS_GNU=y +CONFIG_AS_VERSION=25000 +CONFIG_LD_IS_BFD=y +CONFIG_LD_VERSION=25000 +CONFIG_LLD_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_CAN_LINK_STATIC=y +CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y +CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y +CONFIG_TOOLS_SUPPORT_RELR=y +CONFIG_CC_HAS_ASM_INLINE=y +CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y +CONFIG_PAHOLE_VERSION=117 +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_TABLE_SORT=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +# CONFIG_WERROR is not set +CONFIG_LOCALVERSION="" # CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_HAVE_KERNEL_ZSTD=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +# CONFIG_KERNEL_LZ4 is not set +# CONFIG_KERNEL_ZSTD is not set +CONFIG_DEFAULT_INIT="" +CONFIG_DEFAULT_HOSTNAME="(none)" CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +# CONFIG_WATCH_QUEUE is not set +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_IRQ_INJECTION=y +CONFIG_GENERIC_IRQ_CHIP=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +# end of IRQ subsystem + +CONFIG_GENERIC_IRQ_MULTI_HANDLER=y +CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CMOS_UPDATE=y +CONFIG_CONTEXT_TRACKING=y +CONFIG_CONTEXT_TRACKING_IDLE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +CONFIG_NO_HZ_IDLE=y +# CONFIG_NO_HZ_FULL is not set CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y +# end of Timers subsystem + +CONFIG_BPF=y +CONFIG_HAVE_EBPF_JIT=y + +# +# BPF subsystem +# CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +# CONFIG_BPF_JIT_ALWAYS_ON is not set # CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set +CONFIG_USERMODE_DRIVER=y +# CONFIG_BPF_PRELOAD is not set +# CONFIG_BPF_LSM is not set +# end of BPF subsystem + +CONFIG_PREEMPT_VOLUNTARY_BUILD=y +# CONFIG_PREEMPT_NONE is not set CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set +# CONFIG_SCHED_CORE is not set + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_SCHED_AVG_IRQ=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_TASKSTATS=y CONFIG_TASK_DELAY_ACCT=y CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +# CONFIG_PSI_DEFAULT_DISABLED is not set +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_TREE_SRCU=y +CONFIG_TASKS_RCU_GENERIC=y +CONFIG_TASKS_RUDE_RCU=y +CONFIG_TASKS_TRACE_RCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +# end of RCU Subsystem + +# CONFIG_IKCONFIG is not set +# CONFIG_IKHEADERS is not set CONFIG_LOG_BUF_SHIFT=18 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +# CONFIG_PRINTK_INDEX is not set +CONFIG_GENERIC_SCHED_CLOCK=y + +# +# Scheduler features +# +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_CC_HAS_INT128=y +CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" +CONFIG_GCC11_NO_ARRAY_BOUNDS=y +CONFIG_CC_NO_ARRAY_BOUNDS=y CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +# CONFIG_CGROUP_FAVOR_DYNMODS is not set CONFIG_MEMCG=y +CONFIG_MEMCG_KMEM=y CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y CONFIG_CFS_BANDWIDTH=y CONFIG_RT_GROUP_SCHED=y +CONFIG_SCHED_MM_CID=y CONFIG_CGROUP_PIDS=y CONFIG_CGROUP_RDMA=y CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_HUGETLB=y CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y CONFIG_CGROUP_DEVICE=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_MISC is not set +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_TIME_NS=y +CONFIG_IPC_NS=y CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y CONFIG_CHECKPOINT_RESTORE=y CONFIG_SCHED_AUTOGROUP=y CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_RD_ZSTD=y +# CONFIG_BOOT_CONFIG is not set +CONFIG_INITRAMFS_PRESERVE_MTIME=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_LD_ORPHAN_WARN=y +CONFIG_LD_ORPHAN_WARN_LEVEL="warn" +CONFIG_SYSCTL=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_SYSCTL_ARCH_UNALIGN_NO_WARN=y +CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW=y CONFIG_EXPERT=y +CONFIG_MULTIUSER=y +# CONFIG_SGETMASK_SYSCALL is not set +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_SELFTEST is not set CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_KCMP=y +CONFIG_RSEQ=y +CONFIG_CACHESTAT_SYSCALL=y +# CONFIG_DEBUG_RSEQ is not set +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y +# CONFIG_PC104 is not set + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# end of Kernel Performance Events And Counters + +CONFIG_SYSTEM_DATA_VERIFICATION=y CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y + +# +# Kexec and crash features +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y CONFIG_KEXEC=y CONFIG_CRASH_DUMP=y +# end of Kexec and crash features +# end of General setup + +CONFIG_LOONGARCH=y +CONFIG_64BIT=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_L1_CACHE_SHIFT=6 +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_MACH_LOONGSON64=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_PAGE_SIZE_16KB=y +CONFIG_PGTABLE_3LEVEL=y +CONFIG_PGTABLE_LEVELS=3 +CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_AS_HAS_EXPLICIT_RELOCS=y +CONFIG_AS_HAS_FCSR_CLASS=y +CONFIG_AS_HAS_LSX_EXTENSION=y +CONFIG_AS_HAS_LASX_EXTENSION=y +CONFIG_AS_HAS_LBT_EXTENSION=y +CONFIG_AS_HAS_LVZ_EXTENSION=y + +# +# Kernel type and options +# +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +# CONFIG_4KB_3LEVEL is not set +# CONFIG_4KB_4LEVEL is not set +# CONFIG_16KB_2LEVEL is not set +CONFIG_16KB_3LEVEL=y +# CONFIG_64KB_2LEVEL is not set +# CONFIG_64KB_3LEVEL is not set +CONFIG_CMDLINE="" +CONFIG_CMDLINE_BOOTLOADER=y +# CONFIG_CMDLINE_EXTEND is not set +# CONFIG_CMDLINE_FORCE is not set +CONFIG_DMI=y +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_SCHED_SMT=y +CONFIG_SMP=y +CONFIG_HOTPLUG_CPU=y CONFIG_NR_CPUS=256 CONFIG_NUMA=y +CONFIG_NODES_SHIFT=6 +CONFIG_ARCH_FORCE_MAX_ORDER=11 +CONFIG_ARCH_IOREMAP=y +CONFIG_ARCH_WRITECOMBINE=y +CONFIG_ARCH_STRICT_ALIGN=y +CONFIG_CPU_HAS_FPU=y CONFIG_CPU_HAS_LSX=y CONFIG_CPU_HAS_LASX=y +CONFIG_CPU_HAS_LBT=y +CONFIG_CPU_HAS_PREFETCH=y +CONFIG_ARCH_SUPPORTS_KEXEC=y +CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y +CONFIG_ARCH_SELECTS_CRASH_DUMP=y +CONFIG_RELOCATABLE=y CONFIG_RANDOMIZE_BASE=y +CONFIG_RANDOMIZE_BASE_MAX_OFFSET=0x01000000 +CONFIG_SECCOMP=y +# end of Kernel type and options + +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y +CONFIG_ARCH_MEMORY_PROBE=y +CONFIG_MMU=y +CONFIG_ARCH_MMAP_RND_BITS_MIN=12 +CONFIG_ARCH_MMAP_RND_BITS_MAX=18 +CONFIG_ARCH_SUPPORTS_UPROBES=y + +# +# Power management options +# +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y + +# +# CPU Frequency scaling +# CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y +# CONFIG_CPU_FREQ_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set + +# +# CPU frequency scaling drivers +# +# CONFIG_CPUFREQ_DT is not set +# CONFIG_CPUFREQ_DT_PLATDEV is not set CONFIG_LOONGSON3_ACPI_CPUFREQ=y +# end of CPU Frequency scaling + +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +# CONFIG_SUSPEND_SKIP_SYNC is not set +CONFIG_HIBERNATE_CALLBACKS=y CONFIG_HIBERNATION=y +CONFIG_HIBERNATION_SNAPSHOT_DEV=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_USERSPACE_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_CLK=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_CPU_PM=y +# CONFIG_ENERGY_MODEL is not set +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_GENERIC_GSI=y +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y +# CONFIG_ACPI_DEBUGGER is not set CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_SLEEP=y +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_VIDEO=y +CONFIG_ACPI_FAN=y CONFIG_ACPI_TAD=y CONFIG_ACPI_DOCK=y +CONFIG_ACPI_CPU_FREQ_PSS=y +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_MCFG=y +CONFIG_ACPI_PROCESSOR=y CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_THERMAL=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y CONFIG_ACPI_HOTPLUG_MEMORY=y +# CONFIG_ACPI_HED is not set +# CONFIG_ACPI_CUSTOM_METHOD is not set +# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set +CONFIG_ACPI_NUMA=y +# CONFIG_ACPI_HMAT is not set +CONFIG_ACPI_WATCHDOG=y +# CONFIG_ACPI_CONFIGFS is not set +# CONFIG_ACPI_PFRUT is not set +CONFIG_ACPI_PPTT=y +# CONFIG_ACPI_FFH is not set +# CONFIG_PMIC_OPREGION is not set +# end of Power management options + +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_DIRTY_RING=y +CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL=y +CONFIG_KVM_XFER_TO_GUEST_WORK=y +CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y CONFIG_VIRTUALIZATION=y -CONFIG_KVM=m +CONFIG_KVM=y + +# +# General architecture-dependent options +# +CONFIG_GENERIC_ENTRY=y +CONFIG_KPROBES=y CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +CONFIG_KPROBES_ON_FTRACE=y +CONFIG_UPROBES=y +CONFIG_HAVE_64BIT_ALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_KRETPROBES=y +CONFIG_KRETPROBE_ON_RETHOOK=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_KPROBES_ON_FTRACE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_CPU_FINALIZE_INIT=y +CONFIG_ARCH_WANTS_NO_INSTR=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_MMU_GATHER_MERGE_VMAS=y +CONFIG_MMU_LAZY_TLB_REFCOUNT=y +CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS=y +CONFIG_HAVE_ARCH_SECCOMP=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +# CONFIG_SECCOMP_CACHE_DEBUG is not set +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_ARCH_SUPPORTS_LTO_CLANG=y +CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y +CONFIG_LTO_NONE=y +CONFIG_HAVE_CONTEXT_TRACKING_USER=y +CONFIG_HAVE_TIF_NOHZ=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_ARCH_WANT_PMD_MKWRITE=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_HAVE_EXIT_THREAD=y +CONFIG_ARCH_MMAP_RND_BITS=12 +CONFIG_PAGE_SIZE_LESS_THAN_64KB=y +CONFIG_PAGE_SIZE_LESS_THAN_256KB=y +CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT=y +# CONFIG_COMPAT_32BIT_TIME is not set +CONFIG_ARCH_HAS_PHYS_TO_DMA=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y +# CONFIG_LOCK_EVENT_COUNTS is not set +CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +# end of GCOV-based kernel profiling + +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_GCC_PLUGINS=y +# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set +CONFIG_FUNCTION_ALIGNMENT=0 +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULE_SIG_FORMAT=y CONFIG_MODULES=y +# CONFIG_MODULE_DEBUG is not set CONFIG_MODULE_FORCE_LOAD=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set CONFIG_MODVERSIONS=y +CONFIG_ASM_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +CONFIG_MODULE_SIG_ALL=y +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set CONFIG_MODULE_SIG_SHA256=y +# CONFIG_MODULE_SIG_SHA384 is not set +# CONFIG_MODULE_SIG_SHA512 is not set +CONFIG_MODULE_SIG_HASH="sha256" +CONFIG_MODULE_COMPRESS_NONE=y +# CONFIG_MODULE_COMPRESS_GZIP is not set +# CONFIG_MODULE_COMPRESS_XZ is not set +# CONFIG_MODULE_COMPRESS_ZSTD is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +CONFIG_MODPROBE_PATH="/sbin/modprobe" +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLOCK_LEGACY_AUTOLOAD=y +CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_CGROUP_PUNT_BIO=y +CONFIG_BLK_DEV_BSG_COMMON=y +CONFIG_BLK_ICQ=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_INTEGRITY_T10=m CONFIG_BLK_DEV_ZONED=y CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set CONFIG_BLK_WBT=y +CONFIG_BLK_WBT_MQ=y +# CONFIG_BLK_CGROUP_IOLATENCY is not set +# CONFIG_BLK_CGROUP_FC_APPID is not set +# CONFIG_BLK_CGROUP_IOCOST is not set +# CONFIG_BLK_CGROUP_IOPRIO is not set +CONFIG_BLK_DEBUG_FS=y +CONFIG_BLK_DEBUG_FS_ZONED=y +# CONFIG_BLK_SED_OPAL is not set +# CONFIG_BLK_INLINE_ENCRYPTION is not set + +# +# Partition Types +# CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y CONFIG_BSD_DISKLABEL=y +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set CONFIG_UNIXWARE_DISKLABEL=y +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +# end of Partition Types + +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_PM=y +CONFIG_BLOCK_HOLDER_DEPRECATED=y +CONFIG_BLK_MQ_STACKING=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +# CONFIG_BFQ_CGROUP_DEBUG is not set +# end of IO Schedulers + +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK=y +CONFIG_ARCH_INLINE_SPIN_LOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_READ_LOCK=y +CONFIG_ARCH_INLINE_READ_LOCK_BH=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_READ_UNLOCK=y +CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_WRITE_LOCK=y +CONFIG_ARCH_INLINE_WRITE_LOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_SPIN_TRYLOCK=y +CONFIG_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_INLINE_SPIN_LOCK=y +CONFIG_INLINE_SPIN_LOCK_BH=y +CONFIG_INLINE_SPIN_LOCK_IRQ=y +CONFIG_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_INLINE_SPIN_UNLOCK_BH=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_READ_LOCK=y +CONFIG_INLINE_READ_LOCK_BH=y +CONFIG_INLINE_READ_LOCK_IRQ=y +CONFIG_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_BH=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_WRITE_LOCK=y +CONFIG_INLINE_WRITE_LOCK_BH=y +CONFIG_INLINE_WRITE_LOCK_IRQ=y +CONFIG_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_BH=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y +CONFIG_CK_KABI_RESERVE=y +CONFIG_CK_KABI_SIZE_ALIGN_CHECKS=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_ARCH_BINFMT_ELF_STATE=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_ZPOOL=y +CONFIG_SWAP=y CONFIG_ZSWAP=y +# CONFIG_ZSWAP_DEFAULT_ON is not set +# CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y +CONFIG_ZSWAP_COMPRESSOR_DEFAULT="zstd" +CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y +# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set +# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set +CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud" +CONFIG_ZBUD=y CONFIG_Z3FOLD=y +CONFIG_ZSMALLOC=y CONFIG_ZSMALLOC_STAT=y +CONFIG_ZSMALLOC_CHAIN_SIZE=8 + +# +# SLAB allocator options +# +# CONFIG_SLAB_DEPRECATED is not set +CONFIG_SLUB=y +# CONFIG_SLUB_TINY is not set +CONFIG_SLAB_MERGE_DEFAULT=y CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SLAB_FREELIST_HARDENED is not set +# CONFIG_SLUB_STATS is not set +CONFIG_SLUB_CPU_PARTIAL=y +# CONFIG_RANDOM_KMALLOC_CACHES is not set +# end of SLAB allocator options + # CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set # CONFIG_COMPAT_BRK is not set +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP=y +CONFIG_HAVE_FAST_GUP=y +CONFIG_ARCH_KEEP_MEMBLOCK=y +CONFIG_NUMA_KEEP_MEMINFO=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y CONFIG_MEMORY_HOTREMOVE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 +CONFIG_PAGE_REPORTING=y +CONFIG_MIGRATION=y +CONFIG_CONTIG_ALLOC=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_MMU_NOTIFIER=y CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +# CONFIG_READ_ONLY_THP_FOR_FS is not set +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +# CONFIG_CMA_SYSFS is not set +CONFIG_CMA_AREAS=19 +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +CONFIG_PAGE_IDLE_FLAG=y CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_ZONE_DMA32=y +CONFIG_HMM_MIRROR=y +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_TEST is not set +# CONFIG_DMAPOOL_TEST is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_MEMFD_CREATE=y +# CONFIG_ANON_VMA_NAME is not set CONFIG_USERFAULTFD=y +# CONFIG_LRU_GEN is not set +CONFIG_LOCK_MM_AND_FIND_VMA=y + +# +# Data Access Monitoring +# +# CONFIG_DAMON is not set +# end of Data Access Monitoring +# end of Memory Management options + CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y +CONFIG_NET_XGRESS=y +CONFIG_NET_REDIRECT=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# CONFIG_PACKET=y CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_AF_UNIX_OOB=y CONFIG_UNIX_DIAG=m CONFIG_TLS=m CONFIG_TLS_DEVICE=y CONFIG_TLS_TOE=y +CONFIG_XFRM=y +CONFIG_XFRM_OFFLOAD=y +CONFIG_XFRM_ALGO=y CONFIG_XFRM_USER=y CONFIG_XFRM_INTERFACE=m CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_AH=m +CONFIG_XFRM_ESP=m +CONFIG_XFRM_IPCOMP=m CONFIG_NET_KEY=m CONFIG_NET_KEY_MIGRATE=y +CONFIG_XFRM_ESPINTCP=y CONFIG_SMC=m CONFIG_SMC_DIAG=m CONFIG_XDP_SOCKETS=y CONFIG_XDP_SOCKETS_DIAG=m +CONFIG_NET_HANDSHAKE=y +CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_FIB_TRIE_STATS=y CONFIG_IP_MULTIPLE_TABLES=y CONFIG_IP_ROUTE_MULTIPATH=y CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m CONFIG_NET_IPGRE=m CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y CONFIG_IP_MROUTE=y CONFIG_IP_MROUTE_MULTIPLE_TABLES=y CONFIG_IP_PIMSM_V1=y CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +CONFIG_NET_FOU=m CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_ESP_OFFLOAD=m CONFIG_INET_ESPINTCP=y CONFIG_INET_IPCOMP=m +CONFIG_INET_TABLE_PERTURB_ORDER=16 +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m CONFIG_INET_UDP_DIAG=m CONFIG_INET_RAW_DIAG=m CONFIG_INET_DIAG_DESTROY=y CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m CONFIG_TCP_CONG_CUBIC=m +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m CONFIG_TCP_CONG_HSTCP=m CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m CONFIG_TCP_CONG_NV=m CONFIG_TCP_CONG_SCALABLE=m CONFIG_TCP_CONG_LP=m @@ -151,6 +908,8 @@ CONFIG_TCP_CONG_ILLINOIS=m CONFIG_TCP_CONG_DCTCP=m CONFIG_TCP_CONG_CDG=m CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_RENO=y +CONFIG_DEFAULT_TCP_CONG="reno" CONFIG_TCP_MD5SIG=y CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y @@ -163,9 +922,17 @@ CONFIG_INET6_ESPINTCP=y CONFIG_INET6_IPCOMP=m CONFIG_IPV6_MIP6=m CONFIG_IPV6_ILA=m +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m CONFIG_IPV6_GRE=m +CONFIG_IPV6_FOU=m +CONFIG_IPV6_FOU_TUNNEL=m +CONFIG_IPV6_MULTIPLE_TABLES=y CONFIG_IPV6_SUBTREES=y CONFIG_IPV6_MROUTE=y CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y @@ -173,21 +940,53 @@ CONFIG_IPV6_PIMSM_V2=y CONFIG_IPV6_SEG6_LWTUNNEL=y CONFIG_IPV6_SEG6_HMAC=y CONFIG_IPV6_RPL_LWTUNNEL=y +# CONFIG_IPV6_IOAM6_LWTUNNEL is not set CONFIG_NETLABEL=y CONFIG_MPTCP=y +CONFIG_INET_MPTCP_DIAG=m +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y CONFIG_NETWORK_PHY_TIMESTAMPING=y CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_EGRESS=y +CONFIG_NETFILTER_SKIP_EGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_BPF_LINK=y +# CONFIG_NETFILTER_NETLINK_HOOK is not set +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_SYSLOG=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y CONFIG_NF_CONNTRACK_SECMARK=y CONFIG_NF_CONNTRACK_ZONES=y +# CONFIG_NF_CONNTRACK_PROCFS is not set CONFIG_NF_CONNTRACK_EVENTS=y CONFIG_NF_CONNTRACK_TIMEOUT=y CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CONNTRACK_OVS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m CONFIG_NF_CONNTRACK_NETBIOS_NS=m CONFIG_NF_CONNTRACK_SNMP=m CONFIG_NF_CONNTRACK_PPTP=m @@ -198,6 +997,16 @@ CONFIG_NF_CT_NETLINK=m CONFIG_NF_CT_NETLINK_TIMEOUT=m CONFIG_NF_CT_NETLINK_HELPER=m CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NF_NAT_OVS=y +CONFIG_NETFILTER_SYNPROXY=m CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=y CONFIG_NF_TABLES_NETDEV=y @@ -214,41 +1023,67 @@ CONFIG_NFT_TUNNEL=m CONFIG_NFT_QUEUE=m CONFIG_NFT_QUOTA=m CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m CONFIG_NFT_COMPAT=m CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m CONFIG_NFT_FIB_INET=m CONFIG_NFT_XFRM=m CONFIG_NFT_SOCKET=m CONFIG_NFT_OSF=m CONFIG_NFT_TPROXY=m CONFIG_NFT_SYNPROXY=m +CONFIG_NF_DUP_NETDEV=m CONFIG_NFT_DUP_NETDEV=m CONFIG_NFT_FWD_NETDEV=m CONFIG_NFT_FIB_NETDEV=m +# CONFIG_NFT_REJECT_NETDEV is not set CONFIG_NF_FLOW_TABLE_INET=m CONFIG_NF_FLOW_TABLE=m +# CONFIG_NF_FLOW_TABLE_PROCFS is not set CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# CONFIG_NETFILTER_XT_TARGET_AUDIT=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_CONNMARK=m CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m CONFIG_NETFILTER_XT_TARGET_HMARK=m CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m CONFIG_NETFILTER_XT_TARGET_LED=m CONFIG_NETFILTER_XT_TARGET_LOG=m CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m CONFIG_NETFILTER_XT_TARGET_TEE=m CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_SECMARK=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m CONFIG_NETFILTER_XT_MATCH_BPF=m CONFIG_NETFILTER_XT_MATCH_CGROUP=m @@ -260,11 +1095,14 @@ CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m CONFIG_NETFILTER_XT_MATCH_CONNMARK=m CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m CONFIG_NETFILTER_XT_MATCH_ESP=m CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m CONFIG_NETFILTER_XT_MATCH_IPCOMP=m CONFIG_NETFILTER_XT_MATCH_IPRANGE=m CONFIG_NETFILTER_XT_MATCH_IPVS=m @@ -284,6 +1122,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m @@ -291,7 +1130,10 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_TIME=m CONFIG_NETFILTER_XT_MATCH_U32=m +# end of Core Netfilter Configuration + CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 CONFIG_IP_SET_BITMAP_IP=m CONFIG_IP_SET_BITMAP_IPMAC=m CONFIG_IP_SET_BITMAP_PORT=m @@ -311,11 +1153,21 @@ CONFIG_IP_SET_LIST_SET=m CONFIG_IP_VS=m CONFIG_IP_VS_IPV6=y CONFIG_IP_VS_DEBUG=y +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# CONFIG_IP_VS_PROTO_TCP=y CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y CONFIG_IP_VS_PROTO_ESP=y CONFIG_IP_VS_PROTO_AH=y CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# CONFIG_IP_VS_RR=m CONFIG_IP_VS_WRR=m CONFIG_IP_VS_LC=m @@ -329,13 +1181,43 @@ CONFIG_IP_VS_SH=m CONFIG_IP_VS_MH=m CONFIG_IP_VS_SED=m CONFIG_IP_VS_NQ=m +# CONFIG_IP_VS_TWOS is not set + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_REJECT_IPV4=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m CONFIG_NF_TABLES_ARP=y +CONFIG_NF_DUP_IPV4=m CONFIG_NF_LOG_ARP=m CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -356,8 +1238,20 @@ CONFIG_IP_NF_SECURITY=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_REJECT_IPV6=m CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -379,6 +1273,9 @@ CONFIG_IP6_NF_SECURITY=m CONFIG_IP6_NF_NAT=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=m CONFIG_NF_TABLES_BRIDGE=m CONFIG_NFT_BRIDGE_META=m CONFIG_NFT_BRIDGE_REJECT=m @@ -405,19 +1302,43 @@ CONFIG_BRIDGE_EBT_SNAT=m CONFIG_BRIDGE_EBT_LOG=m CONFIG_BRIDGE_EBT_NFLOG=m CONFIG_BPFILTER=y +CONFIG_BPFILTER_UMH=m CONFIG_IP_DCCP=m +CONFIG_INET_DCCP_DIAG=m + +# +# DCCP CCIDs Configuration +# CONFIG_IP_DCCP_CCID2_DEBUG=y +CONFIG_IP_DCCP_CCID3=y CONFIG_IP_DCCP_CCID3_DEBUG=y +CONFIG_IP_DCCP_TFRC_LIB=y +CONFIG_IP_DCCP_TFRC_DEBUG=y +# end of DCCP CCIDs Configuration + +# +# DCCP Kernel Hacking +# CONFIG_IP_DCCP_DEBUG=y +# end of DCCP Kernel Hacking + +CONFIG_IP_SCTP=m CONFIG_SCTP_DBG_OBJCNT=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_INET_SCTP_DIAG=m CONFIG_RDS=m CONFIG_RDS_RDMA=m CONFIG_RDS_TCP=m CONFIG_RDS_DEBUG=y CONFIG_TIPC=m CONFIG_TIPC_MEDIA_IB=y +CONFIG_TIPC_MEDIA_UDP=y +CONFIG_TIPC_CRYPTO=y +CONFIG_TIPC_DIAG=m CONFIG_ATM=m CONFIG_ATM_CLIP=m CONFIG_ATM_CLIP_NO_ICMP=y @@ -426,30 +1347,46 @@ CONFIG_ATM_MPOA=m CONFIG_ATM_BR2684=m CONFIG_ATM_BR2684_IPFILTER=y CONFIG_L2TP=m +# CONFIG_L2TP_DEBUGFS is not set CONFIG_L2TP_V3=y CONFIG_L2TP_IP=m CONFIG_L2TP_ETH=m +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y CONFIG_BRIDGE_VLAN_FILTERING=y CONFIG_BRIDGE_MRP=y +# CONFIG_BRIDGE_CFM is not set CONFIG_NET_DSA=m +# CONFIG_NET_DSA_TAG_NONE is not set CONFIG_NET_DSA_TAG_AR9331=m +CONFIG_NET_DSA_TAG_BRCM_COMMON=m CONFIG_NET_DSA_TAG_BRCM=m +# CONFIG_NET_DSA_TAG_BRCM_LEGACY is not set CONFIG_NET_DSA_TAG_BRCM_PREPEND=m +# CONFIG_NET_DSA_TAG_HELLCREEK is not set CONFIG_NET_DSA_TAG_GSWIP=m +CONFIG_NET_DSA_TAG_DSA_COMMON=m CONFIG_NET_DSA_TAG_DSA=m CONFIG_NET_DSA_TAG_EDSA=m CONFIG_NET_DSA_TAG_MTK=m CONFIG_NET_DSA_TAG_KSZ=m CONFIG_NET_DSA_TAG_OCELOT=m +# CONFIG_NET_DSA_TAG_OCELOT_8021Q is not set CONFIG_NET_DSA_TAG_QCA=m CONFIG_NET_DSA_TAG_RTL4_A=m +# CONFIG_NET_DSA_TAG_RTL8_4 is not set +# CONFIG_NET_DSA_TAG_RZN1_A5PSW is not set CONFIG_NET_DSA_TAG_LAN9303=m CONFIG_NET_DSA_TAG_SJA1105=m CONFIG_NET_DSA_TAG_TRAILER=m +# CONFIG_NET_DSA_TAG_XRS700X is not set CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q_GVRP=y CONFIG_VLAN_8021Q_MVRP=y +CONFIG_LLC=m CONFIG_LLC2=m CONFIG_ATALK=m CONFIG_DEV_APPLETALK=m @@ -459,12 +1396,18 @@ CONFIG_X25=m CONFIG_LAPB=m CONFIG_PHONET=m CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_DEBUGFS is not set # CONFIG_6LOWPAN_NHC is not set CONFIG_IEEE802154=m CONFIG_IEEE802154_NL802154_EXPERIMENTAL=y +CONFIG_IEEE802154_SOCKET=m CONFIG_IEEE802154_6LOWPAN=m CONFIG_MAC802154=m CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# CONFIG_NET_SCH_HTB=m CONFIG_NET_SCH_HFSC=m CONFIG_NET_SCH_PRIO=m @@ -476,6 +1419,7 @@ CONFIG_NET_SCH_TEQL=m CONFIG_NET_SCH_TBF=m CONFIG_NET_SCH_CBS=m CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_MQPRIO_LIB=m CONFIG_NET_SCH_TAPRIO=m CONFIG_NET_SCH_GRED=m CONFIG_NET_SCH_NETEM=m @@ -495,7 +1439,18 @@ CONFIG_NET_SCH_INGRESS=m CONFIG_NET_SCH_PLUG=m CONFIG_NET_SCH_ETS=m CONFIG_NET_SCH_DEFAULT=y +# CONFIG_DEFAULT_FQ is not set +# CONFIG_DEFAULT_CODEL is not set CONFIG_DEFAULT_FQ_CODEL=y +# CONFIG_DEFAULT_FQ_PIE is not set +# CONFIG_DEFAULT_SFQ is not set +# CONFIG_DEFAULT_PFIFO_FAST is not set +CONFIG_DEFAULT_NET_SCH="fq_codel" + +# +# Classification +# +CONFIG_NET_CLS=y CONFIG_NET_CLS_BASIC=m CONFIG_NET_CLS_ROUTE4=m CONFIG_NET_CLS_FW=m @@ -508,11 +1463,13 @@ CONFIG_NET_CLS_BPF=m CONFIG_NET_CLS_FLOWER=m CONFIG_NET_CLS_MATCHALL=m CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 CONFIG_NET_EMATCH_CMP=m CONFIG_NET_EMATCH_NBYTE=m CONFIG_NET_EMATCH_U32=m CONFIG_NET_EMATCH_META=m CONFIG_NET_EMATCH_TEXT=m +# CONFIG_NET_EMATCH_CANID is not set CONFIG_NET_EMATCH_IPSET=m CONFIG_NET_EMATCH_IPT=m CONFIG_NET_CLS_ACT=y @@ -541,29 +1498,70 @@ CONFIG_NET_IFE_SKBMARK=m CONFIG_NET_IFE_SKBPRIO=m CONFIG_NET_IFE_SKBTCINDEX=m CONFIG_NET_TC_SKB_EXT=y +CONFIG_NET_SCH_FIFO=y CONFIG_DCB=y CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m +CONFIG_BATMAN_ADV_BATMAN_V=y +CONFIG_BATMAN_ADV_BLA=y +CONFIG_BATMAN_ADV_DAT=y CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_MCAST=y CONFIG_BATMAN_ADV_DEBUG=y +# CONFIG_BATMAN_ADV_TRACING is not set CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VSOCKETS_LOOPBACK=m CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=y CONFIG_MPLS_ROUTING=m CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_NSH=y CONFIG_HSR=m +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_L3_MASTER_DEV=y CONFIG_QRTR=m CONFIG_QRTR_TUN=m CONFIG_NET_NCSI=y CONFIG_NCSI_OEM_CMD_GET_MAC=y +# CONFIG_NCSI_OEM_CMD_KEEP_PHY is not set +CONFIG_PCPU_DEV_REFCNT=y +CONFIG_MAX_SKB_FRAGS=17 +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_SOCK_RX_QUEUE_MAPPING=y +CONFIG_XPS=y CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# CONFIG_NET_PKTGEN=m +# CONFIG_NET_DROP_MONITOR is not set +# end of Network testing +# end of Networking options + +# CONFIG_HAMRADIO is not set CONFIG_CAN=m +CONFIG_CAN_RAW=m +CONFIG_CAN_BCM=m +CONFIG_CAN_GW=m +# CONFIG_CAN_J1939 is not set +# CONFIG_CAN_ISOTP is not set CONFIG_BT=m +CONFIG_BT_BREDR=y CONFIG_BT_RFCOMM=m CONFIG_BT_RFCOMM_TTY=y CONFIG_BT_BNEP=m @@ -572,95 +1570,514 @@ CONFIG_BT_BNEP_PROTO_FILTER=y CONFIG_BT_CMTP=m CONFIG_BT_HIDP=m CONFIG_BT_HS=y +CONFIG_BT_LE=y +CONFIG_BT_LE_L2CAP_ECRED=y +# CONFIG_BT_6LOWPAN is not set +# CONFIG_BT_LEDS is not set +# CONFIG_BT_MSFTEXT is not set +# CONFIG_BT_AOSPEXT is not set +CONFIG_BT_DEBUGFS=y +# CONFIG_BT_SELFTEST is not set + +# +# Bluetooth device drivers +# +CONFIG_BT_INTEL=m +CONFIG_BT_RTL=m CONFIG_BT_HCIBTUSB=m CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y +CONFIG_BT_HCIBTUSB_POLL_SYNC=y # CONFIG_BT_HCIBTUSB_BCM is not set +# CONFIG_BT_HCIBTUSB_MTK is not set +CONFIG_BT_HCIBTUSB_RTL=y CONFIG_BT_HCIBTSDIO=m CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_H4=y CONFIG_BT_HCIUART_BCSP=y CONFIG_BT_HCIUART_ATH3K=y +# CONFIG_BT_HCIUART_INTEL is not set +# CONFIG_BT_HCIUART_AG6XX is not set CONFIG_BT_HCIBCM203X=m +# CONFIG_BT_HCIBCM4377 is not set CONFIG_BT_HCIBPA10X=m CONFIG_BT_HCIBFUSB=m CONFIG_BT_HCIVHCI=m CONFIG_BT_MRVL=m CONFIG_BT_MRVL_SDIO=m CONFIG_BT_ATH3K=m +# CONFIG_BT_MTKSDIO is not set +# CONFIG_BT_VIRTIO is not set +# end of Bluetooth device drivers + +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=y +# CONFIG_MCTP is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +# CONFIG_CFG80211_CERTIFICATION_ONUS is not set +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +CONFIG_CFG80211_CRDA_SUPPORT=y CONFIG_CFG80211_WEXT=y CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +# CONFIG_MAC80211_DEBUGFS is not set +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y CONFIG_RFKILL_INPUT=y +# CONFIG_RFKILL_GPIO is not set CONFIG_NET_9P=y +CONFIG_NET_9P_FD=y CONFIG_NET_9P_VIRTIO=y +# CONFIG_NET_9P_RDMA is not set +# CONFIG_NET_9P_DEBUG is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +# CONFIG_NFC is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_SOCK_VALIDATE_XMIT=y +CONFIG_NET_SELFTESTS=y +CONFIG_NET_SOCK_MSG=y +CONFIG_NET_DEVLINK=y +CONFIG_PAGE_POOL=y +# CONFIG_PAGE_POOL_STATS is not set +CONFIG_FAILOVER=m +CONFIG_ETHTOOL_NETLINK=y + +# +# Device Drivers +# +CONFIG_HAVE_PCI=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DOMAINS_GENERIC=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y CONFIG_PCIEAER=y CONFIG_PCIEAER_INJECT=m CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y CONFIG_PCIE_DPC=y +# CONFIG_PCIE_PTM is not set +# CONFIG_PCIE_EDR is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_ARCH_FALLBACKS=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set CONFIG_PCI_STUB=y CONFIG_PCI_PF_STUB=m +CONFIG_PCI_ATS=y +CONFIG_PCI_ECAM=y CONFIG_PCI_IOV=y +# CONFIG_PCI_PRI is not set +# CONFIG_PCI_PASID is not set +CONFIG_PCI_LABEL=y +# CONFIG_PCI_DYNAMIC_OF_NODES is not set +# CONFIG_PCIE_BUS_TUNE_OFF is not set +CONFIG_PCIE_BUS_DEFAULT=y +# CONFIG_PCIE_BUS_SAFE is not set +# CONFIG_PCIE_BUS_PERFORMANCE is not set +# CONFIG_PCIE_BUS_PEER2PEER is not set +CONFIG_VGA_ARB=y CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_HOTPLUG_PCI=y CONFIG_HOTPLUG_PCI_ACPI=y +# CONFIG_HOTPLUG_PCI_ACPI_IBM is not set +# CONFIG_HOTPLUG_PCI_CPCI is not set CONFIG_HOTPLUG_PCI_SHPC=y + +# +# PCI controller drivers +# +# CONFIG_PCI_FTPCI100 is not set +# CONFIG_PCI_HOST_GENERIC is not set +CONFIG_PCI_LOONGSON=y +# CONFIG_PCIE_MICROCHIP_HOST is not set +# CONFIG_PCIE_XILINX is not set + +# +# Cadence-based PCIe controllers +# +# CONFIG_PCIE_CADENCE_PLAT_HOST is not set +# CONFIG_PCI_J721E_HOST is not set +# end of Cadence-based PCIe controllers + +# +# DesignWare-based PCIe controllers +# +# CONFIG_PCI_MESON is not set +# CONFIG_PCIE_DW_PLAT_HOST is not set +# end of DesignWare-based PCIe controllers + +# +# Mobiveil-based PCIe controllers +# +# end of Mobiveil-based PCIe controllers +# end of PCI controller drivers + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# end of PCI switch controller drivers + +# CONFIG_CXL_BUS is not set CONFIG_PCCARD=m # CONFIG_PCMCIA is not set +CONFIG_CARDBUS=y + +# +# PC-card bridges +# CONFIG_YENTA=m +CONFIG_YENTA_O2=y +CONFIG_YENTA_RICOH=y +CONFIG_YENTA_TI=y +CONFIG_YENTA_ENE_TUNE=y +CONFIG_YENTA_TOSHIBA=y CONFIG_RAPIDIO=y CONFIG_RAPIDIO_TSI721=y +CONFIG_RAPIDIO_DISC_TIMEOUT=30 CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS=y +# CONFIG_RAPIDIO_DMA_ENGINE is not set +# CONFIG_RAPIDIO_DEBUG is not set CONFIG_RAPIDIO_ENUM_BASIC=m CONFIG_RAPIDIO_CHMAN=m CONFIG_RAPIDIO_MPORT_CDEV=m + +# +# RapidIO Switch drivers +# +# CONFIG_RAPIDIO_CPS_XX is not set +# CONFIG_RAPIDIO_CPS_GEN2 is not set +# CONFIG_RAPIDIO_RXS_GEN3 is not set +# end of RapidIO Switch drivers + +# +# Generic Driver Options +# +CONFIG_AUXILIARY_BUS=y CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="" CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_DEVTMPFS_SAFE is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_FW_LOADER_DEBUG=y +CONFIG_FW_LOADER_PAGED_BUF=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set CONFIG_FW_LOADER_COMPRESS=y +CONFIG_FW_LOADER_COMPRESS_XZ=y +# CONFIG_FW_LOADER_COMPRESS_ZSTD is not set +CONFIG_FW_CACHE=y +# CONFIG_FW_UPLOAD is not set +# end of Firmware loader + +CONFIG_WANT_DEV_COREDUMP=y +CONFIG_ALLOW_DEV_COREDUMP=y +CONFIG_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_SOC_BUS=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=m +CONFIG_REGMAP_SPI=m +CONFIG_REGMAP_MMIO=y +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set +# end of Generic Driver Options + +# +# Bus devices +# +# CONFIG_MOXTET is not set +# CONFIG_MHI_BUS is not set +# CONFIG_MHI_BUS_EP is not set +# end of Bus devices + +# +# Cache Drivers +# +# end of Cache Drivers + CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y + +# +# Firmware Drivers +# + +# +# ARM System Control and Management Interface Protocol +# +# end of ARM System Control and Management Interface Protocol + +# CONFIG_FIRMWARE_MEMMAP is not set +CONFIG_DMIID=y CONFIG_DMI_SYSFS=y +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y CONFIG_ISCSI_IBFT=m +CONFIG_SYSFB=y +# CONFIG_SYSFB_SIMPLEFB is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=m +# CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE is not set +CONFIG_EFI_RUNTIME_WRAPPERS=y +CONFIG_EFI_GENERIC_STUB=y CONFIG_EFI_ZBOOT=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set CONFIG_EFI_CAPSULE_LOADER=m CONFIG_EFI_TEST=m +# CONFIG_RESET_ATTACK_MITIGATION is not set +# CONFIG_EFI_DISABLE_PCI_DMA is not set +CONFIG_EFI_EARLYCON=y +CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y +# CONFIG_EFI_DISABLE_RUNTIME is not set +# CONFIG_EFI_COCO_SECRET is not set +# end of EFI (Extensible Firmware Interface) Support + +# +# Tegra firmware driver +# +# end of Tegra firmware driver +# end of Firmware Drivers + +# CONFIG_GNSS is not set CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set + +# +# Partition parsers +# +# CONFIG_MTD_AR7_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +CONFIG_MTD_OF_PARTS=m +# CONFIG_MTD_REDBOOT_PARTS is not set +# end of Partition parsers + +# +# User Modules And Translation Layers +# +CONFIG_MTD_BLKDEVS=m CONFIG_MTD_BLOCK=m +# CONFIG_MTD_BLOCK_RO is not set + +# +# Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK. +# +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# CONFIG_MTD_CFI=m CONFIG_MTD_JEDECPROBE=m +CONFIG_MTD_GEN_PROBE=m +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y CONFIG_MTD_CFI_INTELEXT=m CONFIG_MTD_CFI_AMDSTD=m CONFIG_MTD_CFI_STAA=m +CONFIG_MTD_CFI_UTIL=m CONFIG_MTD_RAM=m CONFIG_MTD_ROM=m +# CONFIG_MTD_ABSENT is not set +# end of RAM/ROM/Flash chip drivers + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PHYSMAP is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set +# end of Mapping drivers for chip access + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_MCHP48L640 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set CONFIG_MTD_BLOCK2MTD=m + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# end of Self-contained MTD device drivers + +# +# NAND +# +# CONFIG_MTD_ONENAND is not set +# CONFIG_MTD_RAW_NAND is not set +# CONFIG_MTD_SPI_NAND is not set + +# +# ECC engine support +# +# CONFIG_MTD_NAND_ECC_SW_HAMMING is not set +# CONFIG_MTD_NAND_ECC_SW_BCH is not set +# CONFIG_MTD_NAND_ECC_MXIC is not set +# end of ECC engine support +# end of NAND + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# end of LPDDR & LPDDR2 PCM memory drivers + CONFIG_MTD_SPI_NOR=m +CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y +# CONFIG_MTD_SPI_NOR_SWP_DISABLE is not set +CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE=y +# CONFIG_MTD_SPI_NOR_SWP_KEEP is not set CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_FASTMAP is not set CONFIG_MTD_UBI_GLUEBI=m CONFIG_MTD_UBI_BLOCK=y +# CONFIG_MTD_HYPERBUS is not set +CONFIG_DTC=y +CONFIG_OF=y +# CONFIG_OF_UNITTEST is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_KOBJ=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_IRQ=y +CONFIG_OF_RESERVED_MEM=y +# CONFIG_OF_OVERLAY is not set +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y CONFIG_PARPORT=m CONFIG_PARPORT_PC=m CONFIG_PARPORT_SERIAL=m CONFIG_PARPORT_PC_FIFO=y +# CONFIG_PARPORT_PC_SUPERIO is not set CONFIG_PARPORT_1284=y +CONFIG_PARPORT_NOT_PC=y +CONFIG_PNP=y # CONFIG_PNP_DEBUG_MESSAGES is not set + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_CDROM=m +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set CONFIG_ZRAM=m +# CONFIG_ZRAM_DEF_COMP_LZORLE is not set CONFIG_ZRAM_DEF_COMP_ZSTD=y +# CONFIG_ZRAM_DEF_COMP_LZ4 is not set +# CONFIG_ZRAM_DEF_COMP_LZO is not set +# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set +# CONFIG_ZRAM_DEF_COMP_842 is not set +CONFIG_ZRAM_DEF_COMP="zstd" CONFIG_ZRAM_WRITEBACK=y +# CONFIG_ZRAM_MEMORY_TRACKING is not set +# CONFIG_ZRAM_MULTI_COMP is not set CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 CONFIG_BLK_DEV_DRBD=m +# CONFIG_DRBD_FAULT_INJECTION is not set CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_SIZE=8192 CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +# CONFIG_ATA_OVER_ETH is not set CONFIG_VIRTIO_BLK=m CONFIG_BLK_DEV_RBD=m +# CONFIG_BLK_DEV_UBLK is not set + +# +# NVME Support +# +CONFIG_NVME_CORE=m CONFIG_BLK_DEV_NVME=m CONFIG_NVME_MULTIPATH=y +# CONFIG_NVME_VERBOSE_ERRORS is not set +# CONFIG_NVME_HWMON is not set +CONFIG_NVME_FABRICS=m CONFIG_NVME_RDMA=m CONFIG_NVME_FC=m CONFIG_NVME_TCP=m +# CONFIG_NVME_AUTH is not set CONFIG_NVME_TARGET=m CONFIG_NVME_TARGET_PASSTHRU=y CONFIG_NVME_TARGET_LOOP=m @@ -668,57 +2085,183 @@ CONFIG_NVME_TARGET_RDMA=m CONFIG_NVME_TARGET_FC=m CONFIG_NVME_TARGET_FCLOOP=m CONFIG_NVME_TARGET_TCP=m +# CONFIG_NVME_TARGET_AUTH is not set +# end of NVME Support + +# +# Misc devices +# +CONFIG_SENSORS_LIS3LV02D=m +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +CONFIG_TIFM_CORE=m +CONFIG_TIFM_7XX1=m +# CONFIG_ICS932S401 is not set CONFIG_ENCLOSURE_SERVICES=m +# CONFIG_HP_ILO is not set CONFIG_APDS9802ALS=m CONFIG_ISL29003=m CONFIG_ISL29020=m CONFIG_SENSORS_TSL2550=m CONFIG_SENSORS_BH1770=m CONFIG_SENSORS_APDS990X=m +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +# CONFIG_DW_XDATA_PCIE is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_XILINX_SDFEC is not set +CONFIG_MISC_RTSX=m +# CONFIG_HISI_HIKEY_USB is not set +# CONFIG_OPEN_DICE is not set +# CONFIG_VCPU_STALL_DETECTOR is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# CONFIG_EEPROM_AT24=m +# CONFIG_EEPROM_AT25 is not set CONFIG_EEPROM_LEGACY=m CONFIG_EEPROM_MAX6875=m +CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_EEPROM_EE1004 is not set +# end of EEPROM support + +CONFIG_CB710_CORE=m +# CONFIG_CB710_DEBUG is not set +CONFIG_CB710_DEBUG_ASSUMPTIONS=y + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +# end of Texas Instruments shared transport line discipline + CONFIG_SENSORS_LIS3_I2C=m +CONFIG_ALTERA_STAPL=m +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_BCM_VK is not set +# CONFIG_MISC_ALCOR_PCI is not set CONFIG_MISC_RTSX_PCI=m CONFIG_MISC_RTSX_USB=m CONFIG_UACCE=m CONFIG_PVPANIC=y +# CONFIG_PVPANIC_MMIO is not set +# CONFIG_PVPANIC_PCI is not set +# CONFIG_GP_PCI1XXXX is not set +# end of Misc devices + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=y +CONFIG_SCSI_COMMON=y +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# CONFIG_BLK_DEV_SD=m CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=m CONFIG_CHR_DEV_SG=m +CONFIG_BLK_DEV_BSG=y CONFIG_CHR_DEV_SCH=m CONFIG_SCSI_ENCLOSURE=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=y +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=m +# CONFIG_SCSI_CXGB3_ISCSI is not set CONFIG_SCSI_CXGB4_ISCSI=m CONFIG_SCSI_BNX2_ISCSI=m CONFIG_SCSI_BNX2X_FCOE=m CONFIG_BE2ISCSI=m +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set CONFIG_SCSI_HPSA=m +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set CONFIG_SCSI_AACRAID=m +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set CONFIG_SCSI_MVSAS=y # CONFIG_SCSI_MVSAS_DEBUG is not set CONFIG_SCSI_MVSAS_TASKLET=y CONFIG_SCSI_MVUMI=y +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set CONFIG_MEGARAID_NEWGEN=y CONFIG_MEGARAID_MM=y CONFIG_MEGARAID_MAILBOX=y CONFIG_MEGARAID_LEGACY=y CONFIG_MEGARAID_SAS=m CONFIG_SCSI_MPT3SAS=y +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 CONFIG_SCSI_MPT2SAS=m +# CONFIG_SCSI_MPI3MR is not set CONFIG_SCSI_SMARTPQI=m +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_BUSLOGIC is not set +# CONFIG_SCSI_MYRB is not set +# CONFIG_SCSI_MYRS is not set CONFIG_LIBFC=m CONFIG_LIBFCOE=m CONFIG_FCOE=m +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FDOMAIN_PCI is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_PPA is not set +# CONFIG_SCSI_IMM is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set CONFIG_SCSI_QLOGIC_1280=m CONFIG_SCSI_QLA_FC=m CONFIG_TCM_QLA2XXX=m +# CONFIG_TCM_QLA2XXX_DEBUG is not set CONFIG_SCSI_QLA_ISCSI=m +# CONFIG_SCSI_LPFC is not set +# CONFIG_SCSI_EFCT is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +# CONFIG_SCSI_DEBUG is not set +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_BFA_FC is not set CONFIG_SCSI_VIRTIO=m CONFIG_SCSI_CHELSIO_FCOE=m CONFIG_SCSI_DH=y @@ -726,25 +2269,141 @@ CONFIG_SCSI_DH_RDAC=y CONFIG_SCSI_DH_HP_SW=y CONFIG_SCSI_DH_EMC=y CONFIG_SCSI_DH_ALUA=y +# end of SCSI device support + CONFIG_ATA=y +CONFIG_SATA_HOST=y +CONFIG_PATA_TIMINGS=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_FORCE=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# CONFIG_SATA_AHCI=y +CONFIG_SATA_MOBILE_LPM_POLICY=0 CONFIG_SATA_AHCI_PLATFORM=y +# CONFIG_AHCI_DWC is not set +# CONFIG_AHCI_CEVA is not set +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# CONFIG_ATA_PIIX=m +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set +# CONFIG_SATA_ZHAOXIN is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set CONFIG_PATA_ATIIXP=y +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_OF_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set +# CONFIG_PATA_PARPORT is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set CONFIG_ATA_GENERIC=m +# CONFIG_PATA_LEGACY is not set CONFIG_MD=y CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_BITMAP_FILE=y CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m CONFIG_MD_MULTIPATH=m CONFIG_MD_FAULTY=m +# CONFIG_MD_CLUSTER is not set CONFIG_BCACHE=m +# CONFIG_BCACHE_DEBUG is not set +# CONFIG_BCACHE_CLOSURES_DEBUG is not set +# CONFIG_BCACHE_ASYNC_REGISTRATION is not set +CONFIG_BLK_DEV_DM_BUILTIN=y CONFIG_BLK_DEV_DM=m +# CONFIG_DM_DEBUG is not set +CONFIG_DM_BUFIO=m +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +# CONFIG_DM_UNSTRIPED is not set CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m CONFIG_DM_WRITECACHE=m +# CONFIG_DM_EBS is not set CONFIG_DM_ERA=m +# CONFIG_DM_CLONE is not set CONFIG_DM_MIRROR=m CONFIG_DM_LOG_USERSPACE=m CONFIG_DM_RAID=m @@ -752,33 +2411,57 @@ CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m CONFIG_DM_MULTIPATH_QL=m CONFIG_DM_MULTIPATH_ST=m +# CONFIG_DM_MULTIPATH_HST is not set +# CONFIG_DM_MULTIPATH_IOA is not set CONFIG_DM_DELAY=m +# CONFIG_DM_DUST is not set CONFIG_DM_UEVENT=y CONFIG_DM_FLAKEY=m CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set +# CONFIG_DM_VERITY_FEC is not set CONFIG_DM_SWITCH=m CONFIG_DM_LOG_WRITES=m CONFIG_DM_INTEGRITY=m +# CONFIG_DM_ZONED is not set +CONFIG_DM_AUDIT=y CONFIG_TARGET_CORE=m CONFIG_TCM_IBLOCK=m CONFIG_TCM_FILEIO=m CONFIG_TCM_PSCSI=m CONFIG_TCM_USER2=m CONFIG_LOOPBACK_TARGET=m +# CONFIG_TCM_FC is not set CONFIG_ISCSI_TARGET=m CONFIG_ISCSI_TARGET_CXGB4=m +# CONFIG_SBP_TARGET is not set +# CONFIG_REMOTE_TARGET is not set CONFIG_FUSION=y CONFIG_FUSION_SPI=m +# CONFIG_FUSION_FC is not set CONFIG_FUSION_SAS=m +CONFIG_FUSION_MAX_SGE=128 CONFIG_FUSION_CTL=m CONFIG_FUSION_LOGGING=y + +# +# IEEE 1394 (FireWire) support +# CONFIG_FIREWIRE=m CONFIG_FIREWIRE_OHCI=m CONFIG_FIREWIRE_SBP2=m CONFIG_FIREWIRE_NET=m +# CONFIG_FIREWIRE_NOSY is not set +# end of IEEE 1394 (FireWire) support + +CONFIG_NETDEVICES=y +CONFIG_MII=y +CONFIG_NET_CORE=y CONFIG_BONDING=m CONFIG_DUMMY=m CONFIG_WIREGUARD=m +# CONFIG_WIREGUARD_DEBUG is not set +# CONFIG_EQUALIZER is not set CONFIG_NET_FC=y CONFIG_IFB=m CONFIG_NET_TEAM=m @@ -789,89 +2472,210 @@ CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m +CONFIG_IPVLAN_L3S=y CONFIG_IPVLAN=m CONFIG_IPVTAP=m CONFIG_VXLAN=m CONFIG_GENEVE=m +# CONFIG_BAREUDP is not set +# CONFIG_GTP is not set +# CONFIG_AMT is not set CONFIG_MACSEC=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y +# CONFIG_NETCONSOLE_EXTENDED_LOG is not set +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y CONFIG_NTB_NETDEV=m CONFIG_RIONET=m +CONFIG_RIONET_TX_SIZE=128 +CONFIG_RIONET_RX_SIZE=128 CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set CONFIG_VETH=m CONFIG_VIRTIO_NET=m CONFIG_NLMON=m CONFIG_NET_VRF=m CONFIG_VSOCKMON=m +# CONFIG_ARCNET is not set # CONFIG_ATM_DRIVERS is not set + +# +# Distributed Switch Architecture drivers +# +# CONFIG_B53 is not set +# CONFIG_NET_DSA_BCM_SF2 is not set +# CONFIG_NET_DSA_LOOP is not set +# CONFIG_NET_DSA_HIRSCHMANN_HELLCREEK is not set +# CONFIG_NET_DSA_LANTIQ_GSWIP is not set +# CONFIG_NET_DSA_MT7530 is not set +# CONFIG_NET_DSA_MV88E6060 is not set +# CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON is not set +# CONFIG_NET_DSA_MV88E6XXX is not set +# CONFIG_NET_DSA_AR9331 is not set +# CONFIG_NET_DSA_QCA8K is not set +# CONFIG_NET_DSA_SJA1105 is not set +# CONFIG_NET_DSA_XRS700X_I2C is not set +# CONFIG_NET_DSA_XRS700X_MDIO is not set +# CONFIG_NET_DSA_REALTEK is not set +# CONFIG_NET_DSA_SMSC_LAN9303_I2C is not set +# CONFIG_NET_DSA_SMSC_LAN9303_MDIO is not set +# CONFIG_NET_DSA_VITESSE_VSC73XX_SPI is not set +# CONFIG_NET_DSA_VITESSE_VSC73XX_PLATFORM is not set +# end of Distributed Switch Architecture drivers + +CONFIG_ETHERNET=y +CONFIG_MDIO=m # CONFIG_NET_VENDOR_3COM is not set # CONFIG_NET_VENDOR_ADAPTEC is not set # CONFIG_NET_VENDOR_AGERE is not set # CONFIG_NET_VENDOR_ALACRITECH is not set # CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set # CONFIG_NET_VENDOR_AMAZON is not set # CONFIG_NET_VENDOR_AMD is not set # CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set +CONFIG_NET_VENDOR_ASIX=y +# CONFIG_SPI_AX88796C is not set # CONFIG_NET_VENDOR_ATHEROS is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set CONFIG_BNX2=y +CONFIG_CNIC=m CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y CONFIG_BNXT_DCB=y +CONFIG_BNXT_HWMON=y +CONFIG_NET_VENDOR_CADENCE=y +# CONFIG_MACB is not set # CONFIG_NET_VENDOR_CAVIUM is not set +CONFIG_NET_VENDOR_CHELSIO=y CONFIG_CHELSIO_T1=m CONFIG_CHELSIO_T1_1G=y CONFIG_CHELSIO_T3=m +CONFIG_CHELSIO_T4=m +# CONFIG_CHELSIO_T4_DCB is not set CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_LIB=m +CONFIG_CHELSIO_INLINE_CRYPTO=y +# CONFIG_CRYPTO_DEV_CHELSIO_TLS is not set CONFIG_CHELSIO_IPSEC_INLINE=m +# CONFIG_CHELSIO_TLS_DEVICE is not set # CONFIG_NET_VENDOR_CISCO is not set # CONFIG_NET_VENDOR_CORTINA is not set +CONFIG_NET_VENDOR_DAVICOM=y +# CONFIG_DM9051 is not set CONFIG_DNET=m # CONFIG_NET_VENDOR_DEC is not set # CONFIG_NET_VENDOR_DLINK is not set # CONFIG_NET_VENDOR_EMULEX is not set +CONFIG_NET_VENDOR_ENGLEDER=y +# CONFIG_TSNEP is not set # CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_NET_VENDOR_FUNGIBLE=y +# CONFIG_FUN_ETH is not set +CONFIG_NET_VENDOR_GOOGLE=y +CONFIG_NET_VENDOR_HUAWEI=y # CONFIG_NET_VENDOR_I825XX is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set CONFIG_E1000=m CONFIG_E1000E=m CONFIG_IGB=m +CONFIG_IGB_HWMON=y CONFIG_IGBVF=m CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y CONFIG_IXGBE_DCB=y +CONFIG_IXGBE_IPSEC=y CONFIG_IXGBEVF=m +CONFIG_IXGBEVF_IPSEC=y CONFIG_I40E=m CONFIG_I40E_DCB=y +CONFIG_IAVF=m CONFIG_I40EVF=m CONFIG_ICE=m +CONFIG_ICE_SWITCHDEV=y CONFIG_FM10K=m +# CONFIG_IGC is not set +# CONFIG_JME is not set +CONFIG_NET_VENDOR_ADI=y +# CONFIG_ADIN1110 is not set +CONFIG_NET_VENDOR_LITEX=y +# CONFIG_LITEX_LITEETH is not set # CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_NET_VENDOR_MELLANOX=y CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y # CONFIG_MLX4_CORE_GEN2 is not set CONFIG_MLX5_CORE=m CONFIG_MLX5_FPGA=y CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +CONFIG_MLX5_ESWITCH=y +CONFIG_MLX5_BRIDGE=y +CONFIG_MLX5_CLS_ACT=y +CONFIG_MLX5_TC_CT=y +CONFIG_MLX5_TC_SAMPLE=y +CONFIG_MLX5_CORE_EN_DCB=y CONFIG_MLX5_CORE_IPOIB=y +# CONFIG_MLX5_MACSEC is not set +# CONFIG_MLX5_EN_IPSEC is not set +# CONFIG_MLX5_EN_TLS is not set +CONFIG_MLX5_SW_STEERING=y +# CONFIG_MLX5_SF is not set CONFIG_MLXSW_CORE=m +CONFIG_MLXSW_CORE_HWMON=y +CONFIG_MLXSW_CORE_THERMAL=y +CONFIG_MLXSW_PCI=m +CONFIG_MLXSW_I2C=m +CONFIG_MLXSW_SPECTRUM=m +CONFIG_MLXSW_SPECTRUM_DCB=y +CONFIG_MLXSW_MINIMAL=m +CONFIG_MLXFW=m # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_MICROCHIP is not set # CONFIG_NET_VENDOR_MICROSEMI is not set +CONFIG_NET_VENDOR_MICROSOFT=y # CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_FEALNX is not set # CONFIG_NET_VENDOR_NI is not set # CONFIG_NET_VENDOR_NATSEMI is not set +CONFIG_NET_VENDOR_NETERION=y +# CONFIG_S2IO is not set # CONFIG_NET_VENDOR_NETRONOME is not set # CONFIG_NET_VENDOR_NVIDIA is not set # CONFIG_NET_VENDOR_OKI is not set CONFIG_ETHOC=m +CONFIG_NET_VENDOR_PACKET_ENGINES=y +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +CONFIG_NET_VENDOR_PENSANDO=y +# CONFIG_IONIC is not set # CONFIG_NET_VENDOR_QLOGIC is not set # CONFIG_NET_VENDOR_BROCADE is not set # CONFIG_NET_VENDOR_QUALCOMM is not set # CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y CONFIG_8139CP=m CONFIG_8139TOO=m # CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set CONFIG_R8169=m # CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set @@ -882,23 +2686,54 @@ CONFIG_R8169=m # CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_SOCIONEXT is not set +CONFIG_NET_VENDOR_STMICRO=y CONFIG_STMMAC_ETH=y +# CONFIG_STMMAC_SELFTESTS is not set +CONFIG_STMMAC_PLATFORM=y +# CONFIG_DWMAC_DWC_QOS_ETH is not set +CONFIG_DWMAC_GENERIC=y +# CONFIG_DWMAC_INTEL_PLAT is not set +CONFIG_DWMAC_LOONGSON=m +# CONFIG_STMMAC_PCI is not set # CONFIG_NET_VENDOR_SUN is not set # CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_TEHUTI is not set # CONFIG_NET_VENDOR_TI is not set +CONFIG_NET_VENDOR_VERTEXCOM=y +# CONFIG_MSE102X is not set # CONFIG_NET_VENDOR_VIA is not set +CONFIG_NET_VENDOR_WANGXUN=y +CONFIG_LIBWX=m CONFIG_NGBE=m CONFIG_TXGBE=m # CONFIG_NET_VENDOR_WIZNET is not set # CONFIG_NET_VENDOR_XILINX is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_PHYLINK=y +CONFIG_PHYLIB=y +CONFIG_SWPHY=y CONFIG_LED_TRIGGER_PHY=y +CONFIG_PHYLIB_LEDS=y +CONFIG_FIXED_PHY=y CONFIG_SFP=y + +# +# MII PHY device drivers +# CONFIG_AMD_PHY=m +# CONFIG_ADIN_PHY is not set +# CONFIG_ADIN1100_PHY is not set CONFIG_AQUANTIA_PHY=m +# CONFIG_AX88796B_PHY is not set CONFIG_BROADCOM_PHY=m +# CONFIG_BCM54140_PHY is not set CONFIG_BCM7XXX_PHY=m +# CONFIG_BCM84881_PHY is not set CONFIG_BCM87XX_PHY=m +CONFIG_BCM_NET_PHYLIB=m +CONFIG_BCM_NET_PHYPTP=m CONFIG_CICADA_PHY=m CONFIG_CORTINA_PHY=m CONFIG_DAVICOM_PHY=m @@ -908,43 +2743,127 @@ CONFIG_INTEL_XWAY_PHY=m CONFIG_LSI_ET1011C_PHY=m CONFIG_MARVELL_PHY=m CONFIG_MARVELL_10G_PHY=y +# CONFIG_MARVELL_88Q2XXX_PHY is not set +# CONFIG_MARVELL_88X2222_PHY is not set +# CONFIG_MAXLINEAR_GPHY is not set +# CONFIG_MEDIATEK_GE_PHY is not set CONFIG_MICREL_PHY=m +# CONFIG_MICROCHIP_T1S_PHY is not set +CONFIG_MICROCHIP_PHY=m CONFIG_MICROCHIP_T1_PHY=m CONFIG_MICROSEMI_PHY=m +# CONFIG_MOTORCOMM_PHY is not set CONFIG_NATIONAL_PHY=m +# CONFIG_NXP_CBTX_PHY is not set +# CONFIG_NXP_C45_TJA11XX_PHY is not set +# CONFIG_NXP_TJA11XX_PHY is not set +# CONFIG_NCN26000_PHY is not set CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m CONFIG_RENESAS_PHY=m CONFIG_ROCKCHIP_PHY=m +CONFIG_SMSC_PHY=m CONFIG_STE10XP=m CONFIG_TERANETICS_PHY=m CONFIG_DP83822_PHY=m CONFIG_DP83TC811_PHY=m CONFIG_DP83848_PHY=m CONFIG_DP83867_PHY=m +# CONFIG_DP83869_PHY is not set +# CONFIG_DP83TD510_PHY is not set CONFIG_VITESSE_PHY=m CONFIG_XILINX_GMII2RGMII=m CONFIG_MICREL_KS8995MA=m +# CONFIG_PSE_CONTROLLER is not set +CONFIG_CAN_DEV=m CONFIG_CAN_VCAN=m +# CONFIG_CAN_VXCAN is not set +CONFIG_CAN_NETLINK=y +CONFIG_CAN_CALC_BITTIMING=y +# CONFIG_CAN_CAN327 is not set +# CONFIG_CAN_FLEXCAN is not set +# CONFIG_CAN_GRCAN is not set +# CONFIG_CAN_KVASER_PCIEFD is not set CONFIG_CAN_SLCAN=m CONFIG_CAN_C_CAN=m CONFIG_CAN_C_CAN_PLATFORM=m CONFIG_CAN_C_CAN_PCI=m CONFIG_CAN_CC770=m +# CONFIG_CAN_CC770_ISA is not set CONFIG_CAN_CC770_PLATFORM=m +# CONFIG_CAN_CTUCANFD_PCI is not set +# CONFIG_CAN_CTUCANFD_PLATFORM is not set +# CONFIG_CAN_IFI_CANFD is not set +# CONFIG_CAN_M_CAN is not set +# CONFIG_CAN_PEAK_PCIEFD is not set CONFIG_CAN_SJA1000=m CONFIG_CAN_EMS_PCI=m +# CONFIG_CAN_F81601 is not set CONFIG_CAN_KVASER_PCI=m CONFIG_CAN_PEAK_PCI=m +CONFIG_CAN_PEAK_PCIEC=y CONFIG_CAN_PLX_PCI=m +# CONFIG_CAN_SJA1000_ISA is not set CONFIG_CAN_SJA1000_PLATFORM=m CONFIG_CAN_SOFTING=m + +# +# CAN SPI interfaces +# +# CONFIG_CAN_HI311X is not set +# CONFIG_CAN_MCP251X is not set +# CONFIG_CAN_MCP251XFD is not set +# end of CAN SPI interfaces + +# +# CAN USB interfaces +# CONFIG_CAN_8DEV_USB=m CONFIG_CAN_EMS_USB=m +# CONFIG_CAN_ESD_USB is not set +# CONFIG_CAN_ETAS_ES58X is not set +# CONFIG_CAN_F81604 is not set +# CONFIG_CAN_GS_USB is not set CONFIG_CAN_KVASER_USB=m +# CONFIG_CAN_MCBA_USB is not set CONFIG_CAN_PEAK_USB=m +# CONFIG_CAN_UCAN is not set +# end of CAN USB interfaces + +# CONFIG_CAN_DEBUG_DEVICES is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +CONFIG_FWNODE_MDIO=y +CONFIG_OF_MDIO=y +CONFIG_ACPI_MDIO=y +CONFIG_MDIO_DEVRES=y CONFIG_MDIO_BITBANG=m +# CONFIG_MDIO_BCM_UNIMAC is not set +CONFIG_MDIO_CAVIUM=m +# CONFIG_MDIO_GPIO is not set +# CONFIG_MDIO_HISI_FEMAC is not set +CONFIG_MDIO_I2C=y +# CONFIG_MDIO_MVUSB is not set CONFIG_MDIO_MSCC_MIIM=m +# CONFIG_MDIO_OCTEON is not set +# CONFIG_MDIO_IPQ4019 is not set +# CONFIG_MDIO_IPQ8064 is not set CONFIG_MDIO_THUNDER=m + +# +# MDIO Multiplexers +# +# CONFIG_MDIO_BUS_MUX_GPIO is not set +# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set +# CONFIG_MDIO_BUS_MUX_MMIOREG is not set + +# +# PCS device drivers +# +CONFIG_PCS_XPCS=y +# end of PCS device drivers + +# CONFIG_PLIP is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m @@ -953,31 +2872,47 @@ CONFIG_PPP_MPPE=m CONFIG_PPP_MULTILINK=y CONFIG_PPPOATM=m CONFIG_PPPOE=m +# CONFIG_PPPOE_HASH_BITS_1 is not set +# CONFIG_PPPOE_HASH_BITS_2 is not set +CONFIG_PPPOE_HASH_BITS_4=y +# CONFIG_PPPOE_HASH_BITS_8 is not set +CONFIG_PPPOE_HASH_BITS=4 CONFIG_PPTP=m CONFIG_PPPOL2TP=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m CONFIG_SLIP=m +CONFIG_SLHC=m CONFIG_SLIP_COMPRESSED=y CONFIG_SLIP_SMART=y +# CONFIG_SLIP_MODE_SLIP6 is not set +CONFIG_USB_NET_DRIVERS=y CONFIG_USB_CATC=m CONFIG_USB_KAWETH=m CONFIG_USB_PEGASUS=m CONFIG_USB_RTL8150=m CONFIG_USB_RTL8152=m CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=m # CONFIG_USB_NET_AX8817X is not set # CONFIG_USB_NET_AX88179_178A is not set +CONFIG_USB_NET_CDCETHER=m CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m CONFIG_USB_NET_HUAWEI_CDC_NCM=m CONFIG_USB_NET_CDC_MBIM=m CONFIG_USB_NET_DM9601=m +# CONFIG_USB_NET_SR9700 is not set +# CONFIG_USB_NET_SR9800 is not set CONFIG_USB_NET_SMSC75XX=m CONFIG_USB_NET_SMSC95XX=m CONFIG_USB_NET_GL620A=m # CONFIG_USB_NET_NET1080 is not set CONFIG_USB_NET_PLUSB=m CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m +CONFIG_USB_NET_CDC_SUBSET=m CONFIG_USB_ALI_M5632=y CONFIG_USB_AN2720=y # CONFIG_USB_BELKIN is not set @@ -990,41 +2925,155 @@ CONFIG_USB_NET_KALMIA=m CONFIG_USB_NET_QMI_WWAN=m CONFIG_USB_HSO=m CONFIG_USB_NET_INT51X1=m +# CONFIG_USB_CDC_PHONET is not set CONFIG_USB_IPHETH=m CONFIG_USB_SIERRA_NET=m CONFIG_USB_VL600=m CONFIG_USB_NET_CH9200=m +# CONFIG_USB_NET_AQC111 is not set +CONFIG_USB_RTL8153_ECM=m +CONFIG_WLAN=y # CONFIG_WLAN_VENDOR_ADMTEK is not set +CONFIG_ATH_COMMON=m +CONFIG_WLAN_VENDOR_ATH=y +# CONFIG_ATH_DEBUG is not set +# CONFIG_ATH5K is not set +# CONFIG_ATH5K_PCI is not set +CONFIG_ATH9K_HW=m +CONFIG_ATH9K_COMMON=m +CONFIG_ATH9K_BTCOEX_SUPPORT=y CONFIG_ATH9K=m +CONFIG_ATH9K_PCI=y CONFIG_ATH9K_AHB=y +# CONFIG_ATH9K_DEBUGFS is not set +# CONFIG_ATH9K_DYNACK is not set CONFIG_ATH9K_WOW=y +CONFIG_ATH9K_RFKILL=y +# CONFIG_ATH9K_CHANNEL_CONTEXT is not set +CONFIG_ATH9K_PCOEM=y +# CONFIG_ATH9K_PCI_NO_EEPROM is not set CONFIG_ATH9K_HTC=m +# CONFIG_ATH9K_HTC_DEBUGFS is not set +# CONFIG_ATH9K_HWRNG is not set +# CONFIG_CARL9170 is not set +# CONFIG_ATH6KL is not set +# CONFIG_AR5523 is not set +# CONFIG_WIL6210 is not set CONFIG_ATH10K=m +CONFIG_ATH10K_CE=y CONFIG_ATH10K_PCI=m +# CONFIG_ATH10K_AHB is not set +# CONFIG_ATH10K_SDIO is not set +# CONFIG_ATH10K_USB is not set +# CONFIG_ATH10K_DEBUG is not set +# CONFIG_ATH10K_DEBUGFS is not set +# CONFIG_ATH10K_TRACING is not set +# CONFIG_WCN36XX is not set +# CONFIG_ATH11K is not set +# CONFIG_ATH12K is not set # CONFIG_WLAN_VENDOR_ATMEL is not set +CONFIG_WLAN_VENDOR_BROADCOM=y +# CONFIG_B43 is not set +# CONFIG_B43LEGACY is not set +CONFIG_BRCMUTIL=m CONFIG_BRCMSMAC=m +CONFIG_BRCMSMAC_LEDS=y CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_PROTO_BCDC=y +CONFIG_BRCMFMAC_PROTO_MSGBUF=y +CONFIG_BRCMFMAC_SDIO=y CONFIG_BRCMFMAC_USB=y CONFIG_BRCMFMAC_PCIE=y +# CONFIG_BRCM_TRACING is not set +# CONFIG_BRCMDBG is not set # CONFIG_WLAN_VENDOR_CISCO is not set +CONFIG_WLAN_VENDOR_INTEL=y +# CONFIG_IPW2100 is not set +# CONFIG_IPW2200 is not set +# CONFIG_IWL4965 is not set +# CONFIG_IWL3945 is not set CONFIG_IWLWIFI=m +CONFIG_IWLWIFI_LEDS=y CONFIG_IWLDVM=m CONFIG_IWLMVM=m +CONFIG_IWLWIFI_OPMODE_MODULAR=y + +# +# Debugging Options +# +# CONFIG_IWLWIFI_DEBUG is not set +CONFIG_IWLWIFI_DEVICE_TRACING=y +# end of Debugging Options + # CONFIG_WLAN_VENDOR_INTERSIL is not set +CONFIG_WLAN_VENDOR_MARVELL=y +# CONFIG_LIBERTAS is not set +# CONFIG_LIBERTAS_THINFIRM is not set CONFIG_MWIFIEX=m CONFIG_MWIFIEX_SDIO=m CONFIG_MWIFIEX_PCIE=m CONFIG_MWIFIEX_USB=m +# CONFIG_MWL8K is not set +CONFIG_WLAN_VENDOR_MEDIATEK=y CONFIG_MT7601U=m +CONFIG_MT76_CORE=m +CONFIG_MT76_LEDS=y +CONFIG_MT76_USB=m +CONFIG_MT76x02_LIB=m +CONFIG_MT76x02_USB=m +CONFIG_MT76x0_COMMON=m CONFIG_MT76x0U=m +# CONFIG_MT76x0E is not set +CONFIG_MT76x2_COMMON=m +# CONFIG_MT76x2E is not set CONFIG_MT76x2U=m +# CONFIG_MT7603E is not set +# CONFIG_MT7615E is not set +# CONFIG_MT7663U is not set +# CONFIG_MT7663S is not set +# CONFIG_MT7915E is not set +# CONFIG_MT7921E is not set +# CONFIG_MT7921S is not set +# CONFIG_MT7921U is not set +# CONFIG_MT7996E is not set +CONFIG_WLAN_VENDOR_MICROCHIP=y +# CONFIG_WILC1000_SDIO is not set +# CONFIG_WILC1000_SPI is not set +CONFIG_WLAN_VENDOR_PURELIFI=y +# CONFIG_PLFXLC is not set +CONFIG_WLAN_VENDOR_RALINK=y CONFIG_RT2X00=m +# CONFIG_RT2400PCI is not set +# CONFIG_RT2500PCI is not set +# CONFIG_RT61PCI is not set CONFIG_RT2800PCI=m +CONFIG_RT2800PCI_RT33XX=y +CONFIG_RT2800PCI_RT35XX=y +CONFIG_RT2800PCI_RT53XX=y +CONFIG_RT2800PCI_RT3290=y +# CONFIG_RT2500USB is not set +# CONFIG_RT73USB is not set CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT33XX=y +CONFIG_RT2800USB_RT35XX=y CONFIG_RT2800USB_RT3573=y CONFIG_RT2800USB_RT53XX=y CONFIG_RT2800USB_RT55XX=y CONFIG_RT2800USB_UNKNOWN=y +CONFIG_RT2800_LIB=m +CONFIG_RT2800_LIB_MMIO=m +CONFIG_RT2X00_LIB_MMIO=m +CONFIG_RT2X00_LIB_PCI=m +CONFIG_RT2X00_LIB_USB=m +CONFIG_RT2X00_LIB=m +CONFIG_RT2X00_LIB_FIRMWARE=y +CONFIG_RT2X00_LIB_CRYPTO=y +CONFIG_RT2X00_LIB_LEDS=y +# CONFIG_RT2X00_DEBUG is not set +CONFIG_WLAN_VENDOR_REALTEK=y +# CONFIG_RTL8180 is not set +# CONFIG_RTL8187 is not set +CONFIG_RTL_CARDS=m CONFIG_RTL8192CE=m CONFIG_RTL8192SE=m CONFIG_RTL8192DE=m @@ -1034,29 +3083,77 @@ CONFIG_RTL8188EE=m CONFIG_RTL8192EE=m CONFIG_RTL8821AE=m CONFIG_RTL8192CU=m +CONFIG_RTLWIFI=m +CONFIG_RTLWIFI_PCI=m +CONFIG_RTLWIFI_USB=m # CONFIG_RTLWIFI_DEBUG is not set +CONFIG_RTL8192C_COMMON=m +CONFIG_RTL8723_COMMON=m +CONFIG_RTLBTCOEXIST=m CONFIG_RTL8XXXU=m +# CONFIG_RTL8XXXU_UNTESTED is not set +# CONFIG_RTW88 is not set +# CONFIG_RTW89 is not set # CONFIG_WLAN_VENDOR_RSI is not set +CONFIG_WLAN_VENDOR_SILABS=y +# CONFIG_WFX is not set # CONFIG_WLAN_VENDOR_ST is not set # CONFIG_WLAN_VENDOR_TI is not set +CONFIG_WLAN_VENDOR_ZYDAS=y +# CONFIG_USB_ZD1201 is not set CONFIG_ZD1211RW=m +# CONFIG_ZD1211RW_DEBUG is not set +CONFIG_WLAN_VENDOR_QUANTENNA=y +# CONFIG_QTNFMAC_PCIE is not set CONFIG_USB_NET_RNDIS_WLAN=m CONFIG_MAC80211_HWSIM=m +# CONFIG_VIRT_WIFI is not set CONFIG_WAN=y CONFIG_HDLC=m CONFIG_HDLC_RAW=m +# CONFIG_HDLC_RAW_ETH is not set CONFIG_HDLC_CISCO=m CONFIG_HDLC_FR=m CONFIG_HDLC_PPP=m +# CONFIG_HDLC_X25 is not set +# CONFIG_PCI200SYN is not set +# CONFIG_WANXL is not set +# CONFIG_PC300TOO is not set +# CONFIG_FARSYNC is not set +# CONFIG_LAPBETHER is not set +CONFIG_IEEE802154_DRIVERS=m CONFIG_IEEE802154_FAKELB=m +# CONFIG_IEEE802154_AT86RF230 is not set +# CONFIG_IEEE802154_MRF24J40 is not set +# CONFIG_IEEE802154_CC2520 is not set +# CONFIG_IEEE802154_ATUSB is not set +# CONFIG_IEEE802154_ADF7242 is not set +# CONFIG_IEEE802154_CA8210 is not set +# CONFIG_IEEE802154_MCR20A is not set +# CONFIG_IEEE802154_HWSIM is not set + +# +# Wireless WAN +# +# CONFIG_WWAN is not set +# end of Wireless WAN + CONFIG_VMXNET3=m CONFIG_FUJITSU_ES=m CONFIG_USB4_NET=m CONFIG_NETDEVSIM=m +CONFIG_NET_FAILOVER=m CONFIG_ISDN=y +CONFIG_ISDN_CAPI=y +CONFIG_CAPI_TRACE=y +CONFIG_ISDN_CAPI_MIDDLEWARE=y CONFIG_MISDN=m CONFIG_MISDN_DSP=m CONFIG_MISDN_L1OIP=m + +# +# mISDN hardware drivers +# CONFIG_MISDN_HFCPCI=m CONFIG_MISDN_HFCMULTI=m CONFIG_MISDN_HFCUSB=m @@ -1065,161 +3162,777 @@ CONFIG_MISDN_SPEEDFAX=m CONFIG_MISDN_INFINEON=m CONFIG_MISDN_W6692=m CONFIG_MISDN_NETJET=m +CONFIG_MISDN_HDLC=m +CONFIG_MISDN_IPAC=m +CONFIG_MISDN_ISAR=m + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y +CONFIG_INPUT_FF_MEMLESS=m +CONFIG_INPUT_SPARSEKMAP=y +# CONFIG_INPUT_MATRIXKMAP is not set +CONFIG_INPUT_VIVALDIFMAP=y + +# +# Userland interfaces +# CONFIG_INPUT_MOUSEDEV=y CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 CONFIG_INPUT_JOYDEV=m CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADC is not set +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1050 is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_OMAP4 is not set +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set CONFIG_KEYBOARD_XTKBD=m +# CONFIG_KEYBOARD_CAP11XX is not set +# CONFIG_KEYBOARD_BCM is not set +# CONFIG_KEYBOARD_CYPRESS_SF is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_BYD=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_TRACKPOINT=y CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_ELANTECH_SMBUS=y CONFIG_MOUSE_PS2_SENTELIC=y +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +CONFIG_MOUSE_PS2_FOCALTECH=y +CONFIG_MOUSE_PS2_SMBUS=y CONFIG_MOUSE_SERIAL=m CONFIG_MOUSE_APPLETOUCH=m CONFIG_MOUSE_BCM5974=m CONFIG_MOUSE_CYAPA=m CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_I2C=y CONFIG_MOUSE_ELAN_I2C_SMBUS=y CONFIG_MOUSE_VSXXXAA=m +# CONFIG_MOUSE_GPIO is not set CONFIG_MOUSE_SYNAPTICS_I2C=m CONFIG_MOUSE_SYNAPTICS_USB=m +# CONFIG_INPUT_JOYSTICK is not set CONFIG_INPUT_TABLET=y CONFIG_TABLET_USB_ACECAD=m CONFIG_TABLET_USB_AIPTEK=m +# CONFIG_TABLET_USB_HANWANG is not set CONFIG_TABLET_USB_KBTAB=m +# CONFIG_TABLET_USB_PEGASUS is not set CONFIG_TABLET_SERIAL_WACOM4=m CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_ADC is not set +# CONFIG_TOUCHSCREEN_AR1021_I2C is not set +# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set +# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_BU21029 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8318 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 is not set +# CONFIG_TOUCHSCREEN_CY8CTMA140 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP5 is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_EGALAX is not set +# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set +# CONFIG_TOUCHSCREEN_EXC3000 is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GOODIX is not set +# CONFIG_TOUCHSCREEN_HIDEEP is not set +# CONFIG_TOUCHSCREEN_HYCON_HY46XX is not set +# CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX is not set +# CONFIG_TOUCHSCREEN_ILI210X is not set +# CONFIG_TOUCHSCREEN_ILITEK is not set +# CONFIG_TOUCHSCREEN_S6SY761 is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_EKTF2127 is not set +# CONFIG_TOUCHSCREEN_ELAN is not set CONFIG_TOUCHSCREEN_ELO=m CONFIG_TOUCHSCREEN_WACOM_W8001=m CONFIG_TOUCHSCREEN_WACOM_I2C=m +# CONFIG_TOUCHSCREEN_MAX11801 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MMS114 is not set +# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set +# CONFIG_TOUCHSCREEN_MSG2638 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_NOVATEK_NVT_TS is not set +# CONFIG_TOUCHSCREEN_IMAGIS is not set +# CONFIG_TOUCHSCREEN_IMX6UL_TSC is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_PIXCIR is not set +# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set +# CONFIG_TOUCHSCREEN_WM97XX is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC_SERIO is not set +# CONFIG_TOUCHSCREEN_TSC2004 is not set +# CONFIG_TOUCHSCREEN_TSC2005 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_RM_TS is not set +# CONFIG_TOUCHSCREEN_SILEAD is not set +# CONFIG_TOUCHSCREEN_SIS_I2C is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_STMFTS is not set +# CONFIG_TOUCHSCREEN_SUR40 is not set +# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set +# CONFIG_TOUCHSCREEN_SX8654 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +# CONFIG_TOUCHSCREEN_ZET6223 is not set +# CONFIG_TOUCHSCREEN_ZFORCE is not set +# CONFIG_TOUCHSCREEN_COLIBRI_VF50 is not set +# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set +# CONFIG_TOUCHSCREEN_IQS5XX is not set +# CONFIG_TOUCHSCREEN_IQS7211 is not set +# CONFIG_TOUCHSCREEN_ZINITIX is not set +# CONFIG_TOUCHSCREEN_HIMAX_HX83112B is not set CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ATMEL_CAPTOUCH is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_E3X0_BUTTON is not set +# CONFIG_INPUT_MMA8450 is not set +# CONFIG_INPUT_GPIO_BEEPER is not set +# CONFIG_INPUT_GPIO_DECODER is not set +# CONFIG_INPUT_GPIO_VIBRA is not set CONFIG_INPUT_ATI_REMOTE2=m CONFIG_INPUT_KEYSPAN_REMOTE=m +# CONFIG_INPUT_KXTJ9 is not set CONFIG_INPUT_POWERMATE=m CONFIG_INPUT_YEALINK=m CONFIG_INPUT_CM109=m CONFIG_INPUT_UINPUT=m +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_PWM_BEEPER is not set +# CONFIG_INPUT_PWM_VIBRA is not set CONFIG_INPUT_GPIO_ROTARY_ENCODER=m +# CONFIG_INPUT_DA7280_HAPTICS is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_IMS_PCU is not set +# CONFIG_INPUT_IQS269A is not set +# CONFIG_INPUT_IQS626A is not set +# CONFIG_INPUT_IQS7222 is not set +# CONFIG_INPUT_CMA3000 is not set +# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set +# CONFIG_INPUT_DRV260X_HAPTICS is not set +# CONFIG_INPUT_DRV2665_HAPTICS is not set +# CONFIG_INPUT_DRV2667_HAPTICS is not set +CONFIG_RMI4_CORE=m CONFIG_RMI4_I2C=m CONFIG_RMI4_SPI=m CONFIG_RMI4_SMB=m +CONFIG_RMI4_F03=y +CONFIG_RMI4_F03_SERIO=m +CONFIG_RMI4_2D_SENSOR=y +CONFIG_RMI4_F11=y +CONFIG_RMI4_F12=y +CONFIG_RMI4_F30=y CONFIG_RMI4_F34=y +# CONFIG_RMI4_F3A is not set +# CONFIG_RMI4_F54 is not set CONFIG_RMI4_F55=y + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +CONFIG_SERIO_I8042=y CONFIG_SERIO_SERPORT=m +# CONFIG_SERIO_PARKBD is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y CONFIG_SERIO_RAW=m CONFIG_SERIO_ALTERA_PS2=m +# CONFIG_SERIO_PS2MULT is not set CONFIG_SERIO_ARC_PS2=m +# CONFIG_SERIO_APBPS2 is not set +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y CONFIG_LEGACY_PTY_COUNT=16 +CONFIG_LEGACY_TIOCSTI=y +CONFIG_LDISC_AUTOLOAD=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y CONFIG_SERIAL_8250=y # CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +CONFIG_SERIAL_8250_16550A_VARIANTS=y +# CONFIG_SERIAL_8250_FINTEK is not set CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCILIB=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y CONFIG_SERIAL_8250_NR_UARTS=16 CONFIG_SERIAL_8250_RUNTIME_UARTS=16 CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_MANY_PORTS=y +# CONFIG_SERIAL_8250_PCI1XXXX is not set CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DWLIB=y CONFIG_SERIAL_8250_DW=y +# CONFIG_SERIAL_8250_RT288X is not set +CONFIG_SERIAL_8250_PERICOM=y +# CONFIG_SERIAL_OF_PLATFORM is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y CONFIG_SERIAL_JSM=m +# CONFIG_SERIAL_SIFIVE is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_XILINX_PS_UART is not set CONFIG_SERIAL_ARC=m +CONFIG_SERIAL_ARC_NR_PORTS=1 +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set +# CONFIG_SERIAL_SPRD is not set +# end of Serial drivers + +CONFIG_SERIAL_MCTRL_GPIO=y CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set CONFIG_N_HDLC=m CONFIG_N_GSM=m CONFIG_NOZOMI=m +# CONFIG_NULL_TTY is not set +CONFIG_HVC_DRIVER=y +# CONFIG_SERIAL_DEV_BUS is not set +# CONFIG_TTY_PRINTK is not set CONFIG_PRINTER=m +# CONFIG_LP_CONSOLE is not set CONFIG_PPDEV=m CONFIG_VIRTIO_CONSOLE=y CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +CONFIG_IPMI_PLAT_DATA=y CONFIG_IPMI_PANIC_EVENT=y CONFIG_IPMI_PANIC_STRING=y CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m CONFIG_IPMI_SSIF=m CONFIG_IPMI_WATCHDOG=m CONFIG_IPMI_POWEROFF=m CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_TIMERIOMEM=m +# CONFIG_HW_RANDOM_BA431 is not set CONFIG_HW_RANDOM_VIRTIO=m +# CONFIG_HW_RANDOM_CCTRNG is not set +# CONFIG_HW_RANDOM_XIPHERA is not set +# CONFIG_APPLICOM is not set +CONFIG_DEVMEM=y +CONFIG_DEVPORT=y +CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=m +# CONFIG_TCG_TIS is not set CONFIG_TCG_TIS_SPI=m +# CONFIG_TCG_TIS_SPI_CR50 is not set +# CONFIG_TCG_TIS_I2C is not set +# CONFIG_TCG_TIS_I2C_CR50 is not set CONFIG_TCG_TIS_I2C_ATMEL=m CONFIG_TCG_TIS_I2C_INFINEON=m CONFIG_TCG_TIS_I2C_NUVOTON=m CONFIG_TCG_ATMEL=m CONFIG_TCG_INFINEON=m +CONFIG_TCG_CRB=y +# CONFIG_TCG_VTPM_PROXY is not set +CONFIG_TCG_TIS_ST33ZP24=m CONFIG_TCG_TIS_ST33ZP24_I2C=m CONFIG_TCG_TIS_ST33ZP24_SPI=m +# CONFIG_XILLYBUS is not set +# CONFIG_XILLYUSB is not set +# end of Character devices + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y CONFIG_I2C_CHARDEV=y +# CONFIG_I2C_MUX is not set +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_SMBUS=m +CONFIG_I2C_ALGOBIT=y +CONFIG_I2C_ALGOPCA=m + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set CONFIG_I2C_AMD756=m CONFIG_I2C_AMD8111=m +# CONFIG_I2C_AMD_MP2 is not set +# CONFIG_I2C_I801 is not set CONFIG_I2C_ISCH=m CONFIG_I2C_PIIX4=y CONFIG_I2C_NFORCE2=m +# CONFIG_I2C_NVIDIA_GPU is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set CONFIG_I2C_SIS96X=m CONFIG_I2C_VIA=m CONFIG_I2C_VIAPRO=m +# CONFIG_I2C_ZHAOXIN is not set + +# +# ACPI drivers +# CONFIG_I2C_SCMI=m +# CONFIG_I2C_ZHAOXIN_SMBUS is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CBUS_GPIO is not set +CONFIG_I2C_DESIGNWARE_CORE=y +# CONFIG_I2C_DESIGNWARE_SLAVE is not set CONFIG_I2C_DESIGNWARE_PLATFORM=y +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set CONFIG_I2C_GPIO=y +# CONFIG_I2C_GPIO_FAULT_INJECTOR is not set CONFIG_I2C_LS2X=m +# CONFIG_I2C_OCORES is not set CONFIG_I2C_PCA_PLATFORM=m +# CONFIG_I2C_RK3X is not set CONFIG_I2C_SIMTEC=m +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# CONFIG_I2C_DIOLAN_U2C=m +# CONFIG_I2C_CP2615 is not set CONFIG_I2C_PARPORT=m +# CONFIG_I2C_PCI1XXXX is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set CONFIG_I2C_TINY_USB=m CONFIG_I2C_VIPERBOARD=m + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_VIRTIO is not set +# end of I2C Hardware Bus support + CONFIG_I2C_STUB=m +# CONFIG_I2C_SLAVE is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + +# CONFIG_I3C is not set CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +CONFIG_SPI_MEM=y + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_BUTTERFLY is not set +# CONFIG_SPI_CADENCE is not set +# CONFIG_SPI_CADENCE_XSPI is not set +# CONFIG_SPI_DESIGNWARE is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_LM70_LLP is not set +CONFIG_SPI_LOONGSON_CORE=y CONFIG_SPI_LOONGSON_PCI=y CONFIG_SPI_LOONGSON_PLATFORM=m +# CONFIG_SPI_FSL_SPI is not set +# CONFIG_SPI_MICROCHIP_CORE is not set +# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PCI1XXXX is not set +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_SIFIVE is not set +# CONFIG_SPI_SN_F_OSPI is not set +# CONFIG_SPI_MXIC is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_ZYNQMP_GQSPI is not set +# CONFIG_SPI_AMD is not set + +# +# SPI Multiplexer support +# +# CONFIG_SPI_MUX is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +CONFIG_SPI_DYNAMIC=y +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set CONFIG_PPS_CLIENT_LDISC=m CONFIG_PPS_CLIENT_PARPORT=m CONFIG_PPS_CLIENT_GPIO=m + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +CONFIG_PTP_1588_CLOCK_OPTIONAL=y CONFIG_DP83640_PHY=m +# CONFIG_PTP_1588_CLOCK_INES is not set +# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set +# CONFIG_PTP_1588_CLOCK_IDTCM is not set +# CONFIG_PTP_1588_CLOCK_MOCK is not set +# CONFIG_PTP_1588_CLOCK_OCP is not set +# end of PTP clock support + CONFIG_PINCTRL=y +CONFIG_PINMUX=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_CY8C95X0 is not set CONFIG_PINCTRL_LOONGSON2=y +# CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_MICROCHIP_SGPIO is not set +# CONFIG_PINCTRL_OCELOT is not set +# CONFIG_PINCTRL_SINGLE is not set +# CONFIG_PINCTRL_STMFX is not set +# CONFIG_PINCTRL_SX150X is not set + +# +# Renesas pinctrl drivers +# +# end of Renesas pinctrl drivers + +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_OF_GPIO=y +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_CDEV=y +CONFIG_GPIO_CDEV_V1=y +CONFIG_GPIO_GENERIC=y + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_74XX_MMIO is not set +# CONFIG_GPIO_ALTERA is not set CONFIG_GPIO_AMDPT=m +# CONFIG_GPIO_CADENCE is not set +# CONFIG_GPIO_DWAPB is not set +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_FTGPIO010 is not set +# CONFIG_GPIO_GENERIC_PLATFORM is not set +# CONFIG_GPIO_GRGPIO is not set +# CONFIG_GPIO_HLWD is not set +# CONFIG_GPIO_LOGICVC is not set CONFIG_GPIO_LOONGSON_64BIT=y +# CONFIG_GPIO_MB86S7X is not set +# CONFIG_GPIO_SIFIVE is not set +# CONFIG_GPIO_SYSCON is not set +# CONFIG_GPIO_XILINX is not set +# CONFIG_GPIO_AMD_FCH is not set +# end of Memory mapped GPIO drivers + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADNP is not set +# CONFIG_GPIO_FXL6408 is not set +# CONFIG_GPIO_DS4520 is not set +# CONFIG_GPIO_GW_PLD is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCA9570 is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set +# end of I2C GPIO expanders + +# +# MFD GPIO expanders +# +# end of MFD GPIO expanders + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set +# end of PCI GPIO expanders + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_74X164 is not set +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set +# end of SPI GPIO expanders + +# +# USB GPIO expanders +# CONFIG_GPIO_VIPERBOARD=m +# end of USB GPIO expanders + +# +# Virtual GPIO drivers +# +# CONFIG_GPIO_AGGREGATOR is not set +# CONFIG_GPIO_LATCH is not set +# CONFIG_GPIO_MOCKUP is not set +# CONFIG_GPIO_VIRTIO is not set +# CONFIG_GPIO_SIM is not set +# end of Virtual GPIO drivers + +# CONFIG_W1 is not set CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_GPIO is not set +# CONFIG_POWER_RESET_GPIO_RESTART is not set +# CONFIG_POWER_RESET_LTC2952 is not set +# CONFIG_POWER_RESET_RESTART is not set +# CONFIG_POWER_RESET_SYSCON is not set +# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set +# CONFIG_SYSCON_REBOOT_MODE is not set +# CONFIG_NVMEM_REBOOT_MODE is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_POWER_SUPPLY_HWMON=y +# CONFIG_GENERIC_ADC_BATTERY is not set +# CONFIG_IP5XXX_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_CW2015 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SAMSUNG_SDI is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_LT3651 is not set +# CONFIG_CHARGER_LTC4162L is not set +# CONFIG_CHARGER_DETECTOR_MAX14656 is not set +# CONFIG_CHARGER_MAX77976 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ2515X is not set +# CONFIG_CHARGER_BQ25890 is not set +# CONFIG_CHARGER_BQ25980 is not set +# CONFIG_CHARGER_BQ256XX is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_BATTERY_GOLDFISH is not set +# CONFIG_BATTERY_RT5033 is not set +# CONFIG_CHARGER_RT9455 is not set +# CONFIG_CHARGER_BD99954 is not set +# CONFIG_BATTERY_UG3105 is not set +CONFIG_HWMON=y +CONFIG_HWMON_VID=m +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_AD7314 is not set CONFIG_SENSORS_AD7414=m CONFIG_SENSORS_AD7418=m CONFIG_SENSORS_ADM1025=m CONFIG_SENSORS_ADM1026=m CONFIG_SENSORS_ADM1029=m CONFIG_SENSORS_ADM1031=m +# CONFIG_SENSORS_ADM1177 is not set CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADT7X10=m +# CONFIG_SENSORS_ADT7310 is not set CONFIG_SENSORS_ADT7410=m CONFIG_SENSORS_ADT7411=m CONFIG_SENSORS_ADT7462=m CONFIG_SENSORS_ADT7470=m CONFIG_SENSORS_ADT7475=m +# CONFIG_SENSORS_AHT10 is not set +# CONFIG_SENSORS_AQUACOMPUTER_D5NEXT is not set +# CONFIG_SENSORS_AS370 is not set CONFIG_SENSORS_ASC7621=m +# CONFIG_SENSORS_AXI_FAN_CONTROL is not set CONFIG_SENSORS_ATXP1=m +# CONFIG_SENSORS_CORSAIR_CPRO is not set +# CONFIG_SENSORS_CORSAIR_PSU is not set +# CONFIG_SENSORS_DRIVETEMP is not set CONFIG_SENSORS_DS620=m CONFIG_SENSORS_DS1621=m CONFIG_SENSORS_I5K_AMB=m CONFIG_SENSORS_F71805F=m CONFIG_SENSORS_F71882FG=m CONFIG_SENSORS_F75375S=m +# CONFIG_SENSORS_FTSTEUTATES is not set CONFIG_SENSORS_GL518SM=m CONFIG_SENSORS_GL520SM=m CONFIG_SENSORS_G760A=m +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_GPIO_FAN is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_HS3001 is not set CONFIG_SENSORS_IBMAEM=m CONFIG_SENSORS_IBMPEX=m +# CONFIG_SENSORS_IIO_HWMON is not set CONFIG_SENSORS_IT87=m CONFIG_SENSORS_JC42=m +# CONFIG_SENSORS_POWR1220 is not set CONFIG_SENSORS_LINEAGE=m +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2947_I2C is not set +# CONFIG_SENSORS_LTC2947_SPI is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC2992 is not set CONFIG_SENSORS_LTC4151=m CONFIG_SENSORS_LTC4215=m +# CONFIG_SENSORS_LTC4222 is not set CONFIG_SENSORS_LTC4245=m +# CONFIG_SENSORS_LTC4260 is not set CONFIG_SENSORS_LTC4261=m +# CONFIG_SENSORS_MAX1111 is not set +# CONFIG_SENSORS_MAX127 is not set CONFIG_SENSORS_MAX16065=m CONFIG_SENSORS_MAX1619=m CONFIG_SENSORS_MAX1668=m CONFIG_SENSORS_MAX197=m +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX31730 is not set +# CONFIG_SENSORS_MAX31760 is not set +# CONFIG_MAX31827 is not set +# CONFIG_SENSORS_MAX6620 is not set +# CONFIG_SENSORS_MAX6621 is not set CONFIG_SENSORS_MAX6639=m CONFIG_SENSORS_MAX6650=m CONFIG_SENSORS_MAX6697=m +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MC34VR500 is not set CONFIG_SENSORS_MCP3021=m +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_TPS23861 is not set +# CONFIG_SENSORS_MR75203 is not set +# CONFIG_SENSORS_ADCXX is not set CONFIG_SENSORS_LM63=m +# CONFIG_SENSORS_LM70 is not set CONFIG_SENSORS_LM73=m CONFIG_SENSORS_LM75=m CONFIG_SENSORS_LM77=m @@ -1237,72 +3950,322 @@ CONFIG_SENSORS_LM95245=m CONFIG_SENSORS_PC87360=m CONFIG_SENSORS_PC87427=m CONFIG_SENSORS_NTC_THERMISTOR=m +# CONFIG_SENSORS_NCT6683 is not set +CONFIG_SENSORS_NCT6775_CORE=m CONFIG_SENSORS_NCT6775=m +# CONFIG_SENSORS_NCT6775_I2C is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_NZXT_KRAKEN2 is not set +# CONFIG_SENSORS_NZXT_SMART2 is not set +# CONFIG_SENSORS_OCC_P8_I2C is not set CONFIG_SENSORS_PCF8591=m CONFIG_PMBUS=m +CONFIG_SENSORS_PMBUS=m +# CONFIG_SENSORS_ACBEL_FSG032 is not set +# CONFIG_SENSORS_ADM1266 is not set CONFIG_SENSORS_ADM1275=m +# CONFIG_SENSORS_BEL_PFE is not set +# CONFIG_SENSORS_BPA_RS600 is not set +# CONFIG_SENSORS_DELTA_AHE50DC_FAN is not set +# CONFIG_SENSORS_FSP_3Y is not set +# CONFIG_SENSORS_IBM_CFFPS is not set +# CONFIG_SENSORS_DPS920AB is not set +# CONFIG_SENSORS_INSPUR_IPSPS is not set +# CONFIG_SENSORS_IR35221 is not set +# CONFIG_SENSORS_IR36021 is not set +# CONFIG_SENSORS_IR38064 is not set +# CONFIG_SENSORS_IRPS5401 is not set +# CONFIG_SENSORS_ISL68137 is not set CONFIG_SENSORS_LM25066=m +# CONFIG_SENSORS_LT7182S is not set CONFIG_SENSORS_LTC2978=m +# CONFIG_SENSORS_LTC3815 is not set +# CONFIG_SENSORS_MAX15301 is not set CONFIG_SENSORS_MAX16064=m +# CONFIG_SENSORS_MAX16601 is not set +# CONFIG_SENSORS_MAX20730 is not set +# CONFIG_SENSORS_MAX20751 is not set +# CONFIG_SENSORS_MAX31785 is not set CONFIG_SENSORS_MAX34440=m CONFIG_SENSORS_MAX8688=m +# CONFIG_SENSORS_MP2888 is not set +# CONFIG_SENSORS_MP2975 is not set +# CONFIG_SENSORS_MP5023 is not set +# CONFIG_SENSORS_MPQ7932 is not set +# CONFIG_SENSORS_PIM4328 is not set +# CONFIG_SENSORS_PLI1209BC is not set +# CONFIG_SENSORS_PM6764TR is not set +# CONFIG_SENSORS_PXE1610 is not set +# CONFIG_SENSORS_Q54SJ108A2 is not set +# CONFIG_SENSORS_STPDDC60 is not set +# CONFIG_SENSORS_TDA38640 is not set +# CONFIG_SENSORS_TPS40422 is not set +# CONFIG_SENSORS_TPS53679 is not set +# CONFIG_SENSORS_TPS546D24 is not set CONFIG_SENSORS_UCD9000=m CONFIG_SENSORS_UCD9200=m +# CONFIG_SENSORS_XDPE152 is not set +# CONFIG_SENSORS_XDPE122 is not set CONFIG_SENSORS_ZL6100=m +# CONFIG_SENSORS_PWM_FAN is not set +# CONFIG_SENSORS_SBTSI is not set +# CONFIG_SENSORS_SBRMI is not set CONFIG_SENSORS_SHT15=m CONFIG_SENSORS_SHT21=m +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHT4x is not set +# CONFIG_SENSORS_SHTC1 is not set CONFIG_SENSORS_SIS5595=m CONFIG_SENSORS_DME1737=m CONFIG_SENSORS_EMC1403=m +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC2305 is not set CONFIG_SENSORS_EMC6W201=m CONFIG_SENSORS_SMSC47M1=m CONFIG_SENSORS_SMSC47M192=m CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SCH56XX_COMMON=m CONFIG_SENSORS_SCH5627=m CONFIG_SENSORS_SCH5636=m +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_ADC128D818 is not set CONFIG_SENSORS_ADS7828=m +# CONFIG_SENSORS_ADS7871 is not set CONFIG_SENSORS_AMC6821=m CONFIG_SENSORS_INA209=m CONFIG_SENSORS_INA2XX=m +# CONFIG_SENSORS_INA238 is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set CONFIG_SENSORS_THMC50=m CONFIG_SENSORS_TMP102=m +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set CONFIG_SENSORS_TMP401=m CONFIG_SENSORS_TMP421=m +# CONFIG_SENSORS_TMP464 is not set +# CONFIG_SENSORS_TMP513 is not set CONFIG_SENSORS_VIA686A=m CONFIG_SENSORS_VT1211=m CONFIG_SENSORS_VT8231=m +# CONFIG_SENSORS_W83773G is not set CONFIG_SENSORS_W83781D=m CONFIG_SENSORS_W83791D=m CONFIG_SENSORS_W83792D=m CONFIG_SENSORS_W83793=m CONFIG_SENSORS_W83795=m +# CONFIG_SENSORS_W83795_FANCTRL is not set CONFIG_SENSORS_W83L785TS=m CONFIG_SENSORS_W83L786NG=m CONFIG_SENSORS_W83627HF=m CONFIG_SENSORS_W83627EHF=m + +# +# ACPI drivers +# CONFIG_SENSORS_ACPI_POWER=m +CONFIG_THERMAL=y +# CONFIG_THERMAL_NETLINK is not set +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_OF=y +# CONFIG_THERMAL_WRITABLE_TRIPS is not set +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +# CONFIG_THERMAL_GOV_BANG_BANG is not set +# CONFIG_THERMAL_GOV_USER_SPACE is not set +# CONFIG_CPU_THERMAL is not set +# CONFIG_DEVFREQ_THERMAL is not set CONFIG_THERMAL_EMULATION=y +# CONFIG_THERMAL_MMIO is not set +# CONFIG_GENERIC_ADC_THERMAL is not set CONFIG_LOONGSON2_THERMAL=m CONFIG_WATCHDOG=y CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +CONFIG_WATCHDOG_OPEN_TIMEOUT=0 CONFIG_WATCHDOG_SYSFS=y +# CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT is not set + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set + +# +# Watchdog Device Drivers +# CONFIG_SOFT_WATCHDOG=m CONFIG_GPIO_WATCHDOG=m CONFIG_WDAT_WDT=m +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_ZIIRAVE_WATCHDOG is not set +# CONFIG_CADENCE_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set CONFIG_ALIM7101_WDT=m CONFIG_I6300ESB_WDT=m +# CONFIG_MEN_A21_WDT is not set + +# +# PCI-based Watchdog Cards +# CONFIG_PCIPCWATCHDOG=m CONFIG_WDTPCI=m + +# +# USB-based Watchdog Cards +# CONFIG_USBPCWATCHDOG=m +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +CONFIG_BCMA=m +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +CONFIG_BCMA_DRIVER_PCI=y CONFIG_BCMA_DRIVER_GMAC_CMN=y CONFIG_BCMA_DRIVER_GPIO=y +# CONFIG_BCMA_DEBUG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=y +# CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_AS3711 is not set +# CONFIG_MFD_SMPRO is not set +# CONFIG_MFD_AS3722 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set +# CONFIG_MFD_ATMEL_FLEXCOM is not set +# CONFIG_MFD_ATMEL_HLCDC is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_CS42L43_I2C is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_MFD_MAX5970 is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_GATEWORKS_GSC is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_MP2629 is not set +# CONFIG_MFD_HI6421_PMIC is not set +# CONFIG_LPC_ICH is not set +CONFIG_LPC_SCH=m +# CONFIG_MFD_IQS62X is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77541 is not set +# CONFIG_MFD_MAX77620 is not set +# CONFIG_MFD_MAX77650 is not set +# CONFIG_MFD_MAX77686 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77714 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6360 is not set +# CONFIG_MFD_MT6370 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_MFD_OCELOT is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_CPCAP is not set CONFIG_MFD_VIPERBOARD=m +# CONFIG_MFD_NTXEC is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_SY7636A is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT4831 is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RT5120 is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_RK8XX_I2C is not set +# CONFIG_MFD_RK8XX_SPI is not set +# CONFIG_MFD_RN5T618 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set CONFIG_MFD_SM501=m CONFIG_MFD_SM501_GPIO=y +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_STMPE is not set +CONFIG_MFD_SYSCON=y +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TI_LP87565 is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS65219 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS6594_I2C is not set +# CONFIG_MFD_TPS6594_SPI is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TQMX86 is not set CONFIG_MFD_VX855=m +# CONFIG_MFD_LOCHNAGAR is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_ROHM_BD718XX is not set +# CONFIG_MFD_ROHM_BD71828 is not set +# CONFIG_MFD_ROHM_BD957XMUF is not set +# CONFIG_MFD_STPMIC1 is not set +# CONFIG_MFD_STMFX is not set +# CONFIG_MFD_ATC260X_I2C is not set +# CONFIG_MFD_QCOM_PM8008 is not set +# CONFIG_MFD_INTEL_M10_BMC_SPI is not set +# CONFIG_MFD_RSMU_I2C is not set +# CONFIG_MFD_RSMU_SPI is not set +# end of Multifunction device drivers + +# CONFIG_REGULATOR is not set CONFIG_RC_CORE=m CONFIG_LIRC=y +CONFIG_RC_MAP=m CONFIG_RC_DECODERS=y CONFIG_IR_IMON_DECODER=m CONFIG_IR_JVC_DECODER=m @@ -1310,6 +4273,7 @@ CONFIG_IR_MCE_KBD_DECODER=m CONFIG_IR_NEC_DECODER=m CONFIG_IR_RC5_DECODER=m CONFIG_IR_RC6_DECODER=m +# CONFIG_IR_RCMM_DECODER is not set CONFIG_IR_SANYO_DECODER=m CONFIG_IR_SHARP_DECODER=m CONFIG_IR_SONY_DECODER=m @@ -1317,31 +4281,114 @@ CONFIG_IR_XMP_DECODER=m CONFIG_RC_DEVICES=y CONFIG_IR_ENE=m CONFIG_IR_FINTEK=m +# CONFIG_IR_GPIO_CIR is not set +# CONFIG_IR_GPIO_TX is not set +# CONFIG_IR_HIX5HD2 is not set +# CONFIG_IR_IGORPLUGUSB is not set CONFIG_IR_IGUANA=m CONFIG_IR_IMON=m CONFIG_IR_IMON_RAW=m CONFIG_IR_ITE_CIR=m CONFIG_IR_MCEUSB=m CONFIG_IR_NUVOTON=m +# CONFIG_IR_PWM_TX is not set CONFIG_IR_REDRAT3=m CONFIG_IR_SERIAL=m CONFIG_IR_SERIAL_TRANSMITTER=y +# CONFIG_IR_SPI is not set CONFIG_IR_STREAMZAP=m +# CONFIG_IR_TOY is not set CONFIG_IR_TTUSBIR=m CONFIG_RC_ATI_REMOTE=m +# CONFIG_RC_LOOPBACK is not set +# CONFIG_RC_XBOX_DVD is not set +CONFIG_CEC_CORE=m + +# +# CEC support +# +# CONFIG_MEDIA_CEC_RC is not set +CONFIG_MEDIA_CEC_SUPPORT=y +# CONFIG_CEC_CH7322 is not set CONFIG_USB_PULSE8_CEC=m CONFIG_USB_RAINSHADOW_CEC=m +# end of CEC support + CONFIG_MEDIA_SUPPORT=m -CONFIG_DVB_MAX_ADAPTERS=8 -CONFIG_MEDIA_USB_SUPPORT=y -CONFIG_USB_GSPCA=m +# CONFIG_MEDIA_SUPPORT_FILTER is not set +# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set + +# +# Media device types +# +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_ANALOG_TV_SUPPORT=y +CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y +CONFIG_MEDIA_RADIO_SUPPORT=y +CONFIG_MEDIA_SDR_SUPPORT=y +CONFIG_MEDIA_PLATFORM_SUPPORT=y +CONFIG_MEDIA_TEST_SUPPORT=y +# end of Media device types + +# +# Media core support +# +CONFIG_VIDEO_DEV=m +CONFIG_MEDIA_CONTROLLER=y +CONFIG_DVB_CORE=m +# end of Media core support + +# +# Video4Linux options +# +CONFIG_VIDEO_V4L2_I2C=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +# CONFIG_VIDEO_ADV_DEBUG is not set +# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set +CONFIG_VIDEO_TUNER=m +CONFIG_V4L2_FWNODE=m +CONFIG_V4L2_ASYNC=m +# end of Video4Linux options + +# +# Media controller options +# +CONFIG_MEDIA_CONTROLLER_DVB=y +# end of Media controller options + +# +# Digital TV options +# +# CONFIG_DVB_MMAP is not set +CONFIG_DVB_NET=y +CONFIG_DVB_MAX_ADAPTERS=8 +CONFIG_DVB_DYNAMIC_MINORS=y +# CONFIG_DVB_DEMUX_SECTION_LOSS_LOG is not set +# CONFIG_DVB_ULE_DEBUG is not set +# end of Digital TV options + +# +# Media drivers +# + +# +# Media drivers +# +CONFIG_MEDIA_USB_SUPPORT=y + +# +# Webcam devices +# +CONFIG_USB_GSPCA=m CONFIG_USB_GSPCA_BENQ=m CONFIG_USB_GSPCA_CONEX=m CONFIG_USB_GSPCA_CPIA1=m +# CONFIG_USB_GSPCA_DTCS033 is not set CONFIG_USB_GSPCA_ETOMS=m CONFIG_USB_GSPCA_FINEPIX=m CONFIG_USB_GSPCA_JEILINJ=m CONFIG_USB_GSPCA_JL2005BCD=m +# CONFIG_USB_GSPCA_KINECT is not set CONFIG_USB_GSPCA_KONICA=m CONFIG_USB_GSPCA_MARS=m CONFIG_USB_GSPCA_MR97310A=m @@ -1368,10 +4415,12 @@ CONFIG_USB_GSPCA_SQ905=m CONFIG_USB_GSPCA_SQ905C=m CONFIG_USB_GSPCA_SQ930X=m CONFIG_USB_GSPCA_STK014=m +# CONFIG_USB_GSPCA_STK1135 is not set CONFIG_USB_GSPCA_STV0680=m CONFIG_USB_GSPCA_SUNPLUS=m CONFIG_USB_GSPCA_T613=m CONFIG_USB_GSPCA_TOPRO=m +# CONFIG_USB_GSPCA_TOUPTEK is not set CONFIG_USB_GSPCA_TV8532=m CONFIG_USB_GSPCA_VC032X=m CONFIG_USB_GSPCA_VICAM=m @@ -1381,31 +4430,62 @@ CONFIG_USB_GL860=m CONFIG_USB_M5602=m CONFIG_USB_STV06XX=m CONFIG_USB_PWC=m +# CONFIG_USB_PWC_DEBUG is not set +CONFIG_USB_PWC_INPUT_EVDEV=y CONFIG_USB_S2255=m +# CONFIG_VIDEO_USBTV is not set CONFIG_USB_VIDEO_CLASS=m +CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y + +# +# Analog TV USB devices +# +# CONFIG_VIDEO_GO7007 is not set CONFIG_VIDEO_HDPVR=m CONFIG_VIDEO_PVRUSB2=m +CONFIG_VIDEO_PVRUSB2_SYSFS=y +CONFIG_VIDEO_PVRUSB2_DVB=y +# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set +# CONFIG_VIDEO_STK1160 is not set + +# +# Analog/digital TV USB devices +# CONFIG_VIDEO_AU0828=m +CONFIG_VIDEO_AU0828_V4L2=y +# CONFIG_VIDEO_AU0828_RC is not set + +# +# Digital TV USB devices +# +# CONFIG_DVB_AS102 is not set CONFIG_DVB_B2C2_FLEXCOP_USB=m +# CONFIG_DVB_B2C2_FLEXCOP_USB_DEBUG is not set CONFIG_DVB_USB_V2=m CONFIG_DVB_USB_AF9035=m CONFIG_DVB_USB_ANYSEE=m CONFIG_DVB_USB_AU6610=m CONFIG_DVB_USB_AZ6007=m CONFIG_DVB_USB_CE6230=m +# CONFIG_DVB_USB_DVBSKY is not set CONFIG_DVB_USB_EC168=m CONFIG_DVB_USB_GL861=m CONFIG_DVB_USB_LME2510=m CONFIG_DVB_USB_MXL111SF=m +# CONFIG_DVB_USB_ZD1301 is not set CONFIG_DVB_USB=m +# CONFIG_DVB_USB_DEBUG is not set CONFIG_DVB_USB_A800=m CONFIG_DVB_USB_AF9005=m CONFIG_DVB_USB_AF9005_REMOTE=m CONFIG_DVB_USB_AZ6027=m CONFIG_DVB_USB_CINERGY_T2=m CONFIG_DVB_USB_CXUSB=m +# CONFIG_DVB_USB_CXUSB_ANALOG is not set CONFIG_DVB_USB_DIB0700=m +CONFIG_DVB_USB_DIB3000MC=m CONFIG_DVB_USB_DIBUSB_MB=m +# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set CONFIG_DVB_USB_DIBUSB_MC=m CONFIG_DVB_USB_DIGITV=m CONFIG_DVB_USB_DTT200U=m @@ -1424,103 +4504,1042 @@ CONFIG_DVB_USB_VP7045=m CONFIG_SMS_USB_DRV=m CONFIG_DVB_TTUSB_BUDGET=m CONFIG_DVB_TTUSB_DEC=m + +# +# Webcam, TV (analog/digital) USB devices +# CONFIG_VIDEO_EM28XX=m +# CONFIG_VIDEO_EM28XX_V4L2 is not set CONFIG_VIDEO_EM28XX_ALSA=m CONFIG_VIDEO_EM28XX_DVB=m +CONFIG_VIDEO_EM28XX_RC=m + +# +# Software defined radio USB devices +# +# CONFIG_USB_AIRSPY is not set +# CONFIG_USB_HACKRF is not set +# CONFIG_USB_MSI2500 is not set CONFIG_MEDIA_PCI_SUPPORT=y + +# +# Media capture support +# +# CONFIG_VIDEO_SOLO6X10 is not set +# CONFIG_VIDEO_TW5864 is not set +# CONFIG_VIDEO_TW68 is not set +# CONFIG_VIDEO_TW686X is not set +# CONFIG_VIDEO_ZORAN is not set + +# +# Media capture/analog TV support +# +# CONFIG_VIDEO_DT3155 is not set CONFIG_VIDEO_IVTV=m +# CONFIG_VIDEO_IVTV_ALSA is not set CONFIG_VIDEO_FB_IVTV=m +# CONFIG_VIDEO_HEXIUM_GEMINI is not set +# CONFIG_VIDEO_HEXIUM_ORION is not set +# CONFIG_VIDEO_MXB is not set + +# +# Media capture/analog/hybrid TV support +# CONFIG_VIDEO_BT848=m CONFIG_DVB_BT8XX=m CONFIG_VIDEO_CX18=m +# CONFIG_VIDEO_CX18_ALSA is not set CONFIG_VIDEO_CX23885=m CONFIG_MEDIA_ALTERA_CI=m +# CONFIG_VIDEO_CX25821 is not set CONFIG_VIDEO_CX88=m CONFIG_VIDEO_CX88_ALSA=m CONFIG_VIDEO_CX88_BLACKBIRD=m CONFIG_VIDEO_CX88_DVB=m # CONFIG_VIDEO_CX88_ENABLE_VP3054 is not set +CONFIG_VIDEO_CX88_MPEG=m CONFIG_VIDEO_SAA7134=m CONFIG_VIDEO_SAA7134_ALSA=m +CONFIG_VIDEO_SAA7134_RC=y CONFIG_VIDEO_SAA7134_DVB=m CONFIG_VIDEO_SAA7164=m + +# +# Media digital TV PCI Adapters +# CONFIG_DVB_B2C2_FLEXCOP_PCI=m +# CONFIG_DVB_B2C2_FLEXCOP_PCI_DEBUG is not set CONFIG_DVB_DDBRIDGE=m +# CONFIG_DVB_DDBRIDGE_MSIENABLE is not set CONFIG_DVB_DM1105=m CONFIG_MANTIS_CORE=m CONFIG_DVB_MANTIS=m CONFIG_DVB_HOPPER=m +# CONFIG_DVB_NETUP_UNIDVB is not set CONFIG_DVB_NGENE=m CONFIG_DVB_PLUTO2=m CONFIG_DVB_PT1=m +# CONFIG_DVB_PT3 is not set +# CONFIG_DVB_SMIPCIE is not set CONFIG_DVB_BUDGET_CORE=m CONFIG_DVB_BUDGET=m CONFIG_DVB_BUDGET_CI=m CONFIG_DVB_BUDGET_AV=m +# CONFIG_IPU_BRIDGE is not set +CONFIG_RADIO_ADAPTERS=m +# CONFIG_RADIO_MAXIRADIO is not set +# CONFIG_RADIO_SAA7706H is not set +# CONFIG_RADIO_SHARK is not set +# CONFIG_RADIO_SHARK2 is not set +# CONFIG_RADIO_SI4713 is not set +CONFIG_RADIO_TEA575X=m +# CONFIG_RADIO_TEA5764 is not set +# CONFIG_RADIO_TEF6862 is not set +# CONFIG_RADIO_WL1273 is not set +# CONFIG_USB_DSBR is not set +# CONFIG_USB_KEENE is not set +# CONFIG_USB_MA901 is not set +# CONFIG_USB_MR800 is not set +# CONFIG_USB_RAREMONO is not set +# CONFIG_RADIO_SI470X is not set +CONFIG_MEDIA_PLATFORM_DRIVERS=y +# CONFIG_V4L_PLATFORM_DRIVERS is not set +# CONFIG_SDR_PLATFORM_DRIVERS is not set +# CONFIG_DVB_PLATFORM_DRIVERS is not set +# CONFIG_V4L_MEM2MEM_DRIVERS is not set + +# +# Allegro DVT media platform drivers +# + +# +# Amlogic media platform drivers +# + +# +# Amphion drivers +# + +# +# Aspeed media platform drivers +# + +# +# Atmel media platform drivers +# + +# +# Cadence media platform drivers +# +# CONFIG_VIDEO_CADENCE_CSI2RX is not set +# CONFIG_VIDEO_CADENCE_CSI2TX is not set + +# +# Chips&Media media platform drivers +# + +# +# Intel media platform drivers +# + +# +# Marvell media platform drivers +# + +# +# Mediatek media platform drivers +# + +# +# Microchip Technology, Inc. media platform drivers +# + +# +# NVidia media platform drivers +# + +# +# NXP media platform drivers +# + +# +# Qualcomm media platform drivers +# + +# +# Renesas media platform drivers +# + +# +# Rockchip media platform drivers +# + +# +# Samsung media platform drivers +# + +# +# STMicroelectronics media platform drivers +# + +# +# Sunxi media platform drivers +# + +# +# Texas Instruments drivers +# + +# +# Verisilicon media platform drivers +# + +# +# VIA media platform drivers +# + +# +# Xilinx media platform drivers +# + +# +# MMC/SDIO DVB adapters +# CONFIG_SMS_SDIO_DRV=m +# CONFIG_V4L_TEST_DRIVERS is not set +# CONFIG_DVB_TEST_DRIVERS is not set + +# +# FireWire (IEEE 1394) Adapters +# CONFIG_DVB_FIREDTV=m +CONFIG_DVB_FIREDTV_INPUT=y +CONFIG_MEDIA_COMMON_OPTIONS=y + +# +# common driver options +# +CONFIG_CYPRESS_FIRMWARE=m +CONFIG_TTPCI_EEPROM=m +CONFIG_UVC_COMMON=m +CONFIG_VIDEO_CX2341X=m +CONFIG_VIDEO_TVEEPROM=m +CONFIG_DVB_B2C2_FLEXCOP=m +CONFIG_VIDEO_SAA7146=m +CONFIG_VIDEO_SAA7146_VV=m +CONFIG_SMS_SIANO_MDTV=m +CONFIG_SMS_SIANO_RC=y +# CONFIG_SMS_SIANO_DEBUGFS is not set +CONFIG_VIDEOBUF2_CORE=m +CONFIG_VIDEOBUF2_V4L2=m +CONFIG_VIDEOBUF2_MEMOPS=m +CONFIG_VIDEOBUF2_VMALLOC=m +CONFIG_VIDEOBUF2_DMA_SG=m +CONFIG_VIDEOBUF2_DVB=m +# end of Media drivers + +# +# Media ancillary drivers +# +CONFIG_MEDIA_ATTACH=y +CONFIG_VIDEO_IR_I2C=m +CONFIG_VIDEO_CAMERA_SENSOR=y +# CONFIG_VIDEO_AR0521 is not set +# CONFIG_VIDEO_HI556 is not set +# CONFIG_VIDEO_HI846 is not set +# CONFIG_VIDEO_HI847 is not set +# CONFIG_VIDEO_IMX208 is not set +# CONFIG_VIDEO_IMX214 is not set +# CONFIG_VIDEO_IMX219 is not set +# CONFIG_VIDEO_IMX258 is not set +# CONFIG_VIDEO_IMX274 is not set +# CONFIG_VIDEO_IMX290 is not set +# CONFIG_VIDEO_IMX296 is not set +# CONFIG_VIDEO_IMX319 is not set +# CONFIG_VIDEO_IMX334 is not set +# CONFIG_VIDEO_IMX335 is not set +# CONFIG_VIDEO_IMX355 is not set +# CONFIG_VIDEO_IMX412 is not set +# CONFIG_VIDEO_IMX415 is not set +# CONFIG_VIDEO_MT9M001 is not set +# CONFIG_VIDEO_MT9M111 is not set +# CONFIG_VIDEO_MT9P031 is not set +# CONFIG_VIDEO_MT9T112 is not set +# CONFIG_VIDEO_MT9V011 is not set +# CONFIG_VIDEO_MT9V032 is not set +# CONFIG_VIDEO_MT9V111 is not set +# CONFIG_VIDEO_OG01A1B is not set +# CONFIG_VIDEO_OV01A10 is not set +# CONFIG_VIDEO_OV02A10 is not set +# CONFIG_VIDEO_OV08D10 is not set +# CONFIG_VIDEO_OV08X40 is not set +# CONFIG_VIDEO_OV13858 is not set +# CONFIG_VIDEO_OV13B10 is not set +# CONFIG_VIDEO_OV2640 is not set +# CONFIG_VIDEO_OV2659 is not set +# CONFIG_VIDEO_OV2680 is not set +# CONFIG_VIDEO_OV2685 is not set +# CONFIG_VIDEO_OV2740 is not set +# CONFIG_VIDEO_OV4689 is not set +# CONFIG_VIDEO_OV5640 is not set +# CONFIG_VIDEO_OV5645 is not set +# CONFIG_VIDEO_OV5647 is not set +# CONFIG_VIDEO_OV5648 is not set +# CONFIG_VIDEO_OV5670 is not set +# CONFIG_VIDEO_OV5675 is not set +# CONFIG_VIDEO_OV5693 is not set +# CONFIG_VIDEO_OV5695 is not set +# CONFIG_VIDEO_OV6650 is not set +# CONFIG_VIDEO_OV7251 is not set +# CONFIG_VIDEO_OV7640 is not set +# CONFIG_VIDEO_OV7670 is not set +# CONFIG_VIDEO_OV772X is not set +# CONFIG_VIDEO_OV7740 is not set +# CONFIG_VIDEO_OV8856 is not set +# CONFIG_VIDEO_OV8858 is not set +# CONFIG_VIDEO_OV8865 is not set +# CONFIG_VIDEO_OV9282 is not set +# CONFIG_VIDEO_OV9640 is not set +# CONFIG_VIDEO_OV9650 is not set +# CONFIG_VIDEO_OV9734 is not set +# CONFIG_VIDEO_RDACM20 is not set +# CONFIG_VIDEO_RDACM21 is not set +# CONFIG_VIDEO_RJ54N1 is not set +# CONFIG_VIDEO_S5C73M3 is not set +# CONFIG_VIDEO_S5K5BAF is not set +# CONFIG_VIDEO_S5K6A3 is not set +# CONFIG_VIDEO_ST_VGXY61 is not set +# CONFIG_VIDEO_CCS is not set +# CONFIG_VIDEO_ET8EK8 is not set + +# +# Lens drivers +# +# CONFIG_VIDEO_AD5820 is not set +# CONFIG_VIDEO_AK7375 is not set +# CONFIG_VIDEO_DW9714 is not set +# CONFIG_VIDEO_DW9719 is not set +# CONFIG_VIDEO_DW9768 is not set +# CONFIG_VIDEO_DW9807_VCM is not set +# end of Lens drivers + +# +# Flash devices +# +# CONFIG_VIDEO_ADP1653 is not set +# CONFIG_VIDEO_LM3560 is not set +# CONFIG_VIDEO_LM3646 is not set +# end of Flash devices + +# +# Audio decoders, processors and mixers +# +CONFIG_VIDEO_CS3308=m +CONFIG_VIDEO_CS5345=m +CONFIG_VIDEO_CS53L32A=m +CONFIG_VIDEO_MSP3400=m +# CONFIG_VIDEO_SONY_BTF_MPX is not set +# CONFIG_VIDEO_TDA1997X is not set +# CONFIG_VIDEO_TDA7432 is not set +# CONFIG_VIDEO_TDA9840 is not set +# CONFIG_VIDEO_TEA6415C is not set +# CONFIG_VIDEO_TEA6420 is not set +# CONFIG_VIDEO_TLV320AIC23B is not set +# CONFIG_VIDEO_TVAUDIO is not set +# CONFIG_VIDEO_UDA1342 is not set +CONFIG_VIDEO_VP27SMPX=m +CONFIG_VIDEO_WM8739=m +CONFIG_VIDEO_WM8775=m +# end of Audio decoders, processors and mixers + +# +# RDS decoders +# +# CONFIG_VIDEO_SAA6588 is not set +# end of RDS decoders + +# +# Video decoders +# +# CONFIG_VIDEO_ADV7180 is not set +# CONFIG_VIDEO_ADV7183 is not set +# CONFIG_VIDEO_ADV748X is not set +# CONFIG_VIDEO_ADV7604 is not set +# CONFIG_VIDEO_ADV7842 is not set +# CONFIG_VIDEO_BT819 is not set +# CONFIG_VIDEO_BT856 is not set +# CONFIG_VIDEO_BT866 is not set +# CONFIG_VIDEO_ISL7998X is not set +# CONFIG_VIDEO_KS0127 is not set +# CONFIG_VIDEO_ML86V7667 is not set +# CONFIG_VIDEO_SAA7110 is not set +CONFIG_VIDEO_SAA711X=m +# CONFIG_VIDEO_TC358743 is not set +# CONFIG_VIDEO_TC358746 is not set +# CONFIG_VIDEO_TVP514X is not set +# CONFIG_VIDEO_TVP5150 is not set +# CONFIG_VIDEO_TVP7002 is not set +# CONFIG_VIDEO_TW2804 is not set +# CONFIG_VIDEO_TW9903 is not set +# CONFIG_VIDEO_TW9906 is not set +# CONFIG_VIDEO_TW9910 is not set +# CONFIG_VIDEO_VPX3220 is not set + +# +# Video and audio decoders +# +CONFIG_VIDEO_SAA717X=m +CONFIG_VIDEO_CX25840=m +# end of Video decoders + +# +# Video encoders +# +# CONFIG_VIDEO_ADV7170 is not set +# CONFIG_VIDEO_ADV7175 is not set +# CONFIG_VIDEO_ADV7343 is not set +# CONFIG_VIDEO_ADV7393 is not set +# CONFIG_VIDEO_ADV7511 is not set +# CONFIG_VIDEO_AK881X is not set +CONFIG_VIDEO_SAA7127=m +# CONFIG_VIDEO_SAA7185 is not set +# CONFIG_VIDEO_THS8200 is not set +# end of Video encoders + +# +# Video improvement chips +# +CONFIG_VIDEO_UPD64031A=m +CONFIG_VIDEO_UPD64083=m +# end of Video improvement chips + +# +# Audio/Video compression chips +# +# CONFIG_VIDEO_SAA6752HS is not set +# end of Audio/Video compression chips + +# +# SDR tuner chips +# +# CONFIG_SDR_MAX2175 is not set +# end of SDR tuner chips + +# +# Miscellaneous helper chips +# +# CONFIG_VIDEO_I2C is not set +CONFIG_VIDEO_M52790=m +# CONFIG_VIDEO_ST_MIPID02 is not set +# CONFIG_VIDEO_THS7303 is not set +# end of Miscellaneous helper chips + +# +# Video serializers and deserializers +# +# CONFIG_VIDEO_DS90UB913 is not set +# CONFIG_VIDEO_DS90UB953 is not set +# CONFIG_VIDEO_DS90UB960 is not set +# end of Video serializers and deserializers + +# +# Media SPI Adapters +# +CONFIG_CXD2880_SPI_DRV=m +# CONFIG_VIDEO_GS1662 is not set +# end of Media SPI Adapters + +CONFIG_MEDIA_TUNER=m + +# +# Customize TV tuners +# +CONFIG_MEDIA_TUNER_E4000=m +CONFIG_MEDIA_TUNER_FC0011=m +CONFIG_MEDIA_TUNER_FC0012=m +CONFIG_MEDIA_TUNER_FC0013=m +CONFIG_MEDIA_TUNER_FC2580=m +CONFIG_MEDIA_TUNER_IT913X=m +CONFIG_MEDIA_TUNER_M88RS6000T=m +CONFIG_MEDIA_TUNER_MAX2165=m +CONFIG_MEDIA_TUNER_MC44S803=m +CONFIG_MEDIA_TUNER_MSI001=m +CONFIG_MEDIA_TUNER_MT2060=m +CONFIG_MEDIA_TUNER_MT2063=m +CONFIG_MEDIA_TUNER_MT20XX=m +CONFIG_MEDIA_TUNER_MT2131=m +CONFIG_MEDIA_TUNER_MT2266=m +CONFIG_MEDIA_TUNER_MXL301RF=m +CONFIG_MEDIA_TUNER_MXL5005S=m +CONFIG_MEDIA_TUNER_MXL5007T=m +CONFIG_MEDIA_TUNER_QM1D1B0004=m +CONFIG_MEDIA_TUNER_QM1D1C0042=m +CONFIG_MEDIA_TUNER_QT1010=m +CONFIG_MEDIA_TUNER_R820T=m +CONFIG_MEDIA_TUNER_SI2157=m +CONFIG_MEDIA_TUNER_SIMPLE=m +CONFIG_MEDIA_TUNER_TDA18212=m +CONFIG_MEDIA_TUNER_TDA18218=m +CONFIG_MEDIA_TUNER_TDA18250=m +CONFIG_MEDIA_TUNER_TDA18271=m +CONFIG_MEDIA_TUNER_TDA827X=m +CONFIG_MEDIA_TUNER_TDA8290=m +CONFIG_MEDIA_TUNER_TDA9887=m +CONFIG_MEDIA_TUNER_TEA5761=m +CONFIG_MEDIA_TUNER_TEA5767=m +CONFIG_MEDIA_TUNER_TUA9001=m +CONFIG_MEDIA_TUNER_XC2028=m +CONFIG_MEDIA_TUNER_XC4000=m +CONFIG_MEDIA_TUNER_XC5000=m +# end of Customize TV tuners + +# +# Customise DVB Frontends +# + +# +# Multistandard (satellite) frontends +# +CONFIG_DVB_MXL5XX=m +CONFIG_DVB_STB0899=m +CONFIG_DVB_STB6100=m +CONFIG_DVB_STV090x=m +CONFIG_DVB_STV0910=m +CONFIG_DVB_STV6110x=m +CONFIG_DVB_STV6111=m + +# +# Multistandard (cable + terrestrial) frontends +# +CONFIG_DVB_DRXK=m +CONFIG_DVB_MN88472=m +CONFIG_DVB_MN88473=m +CONFIG_DVB_SI2165=m +CONFIG_DVB_TDA18271C2DD=m + +# +# DVB-S (satellite) frontends +# +CONFIG_DVB_CX24110=m +CONFIG_DVB_CX24116=m +CONFIG_DVB_CX24117=m +CONFIG_DVB_CX24120=m +CONFIG_DVB_CX24123=m +CONFIG_DVB_DS3000=m +CONFIG_DVB_MB86A16=m +CONFIG_DVB_MT312=m +CONFIG_DVB_S5H1420=m +CONFIG_DVB_SI21XX=m +CONFIG_DVB_STB6000=m +CONFIG_DVB_STV0288=m +CONFIG_DVB_STV0299=m +CONFIG_DVB_STV0900=m +CONFIG_DVB_STV6110=m +CONFIG_DVB_TDA10071=m +CONFIG_DVB_TDA10086=m +CONFIG_DVB_TDA8083=m +CONFIG_DVB_TDA8261=m +CONFIG_DVB_TDA826X=m +CONFIG_DVB_TS2020=m +CONFIG_DVB_TUA6100=m +CONFIG_DVB_TUNER_CX24113=m +CONFIG_DVB_TUNER_ITD1000=m +CONFIG_DVB_VES1X93=m +CONFIG_DVB_ZL10036=m +CONFIG_DVB_ZL10039=m + +# +# DVB-T (terrestrial) frontends +# +CONFIG_DVB_CX22700=m +CONFIG_DVB_CX22702=m +CONFIG_DVB_CXD2820R=m +CONFIG_DVB_CXD2841ER=m +CONFIG_DVB_DIB3000MB=m +CONFIG_DVB_DIB3000MC=m +CONFIG_DVB_DIB7000M=m +CONFIG_DVB_DIB7000P=m +CONFIG_DVB_DIB9000=m +CONFIG_DVB_DRXD=m +CONFIG_DVB_EC100=m +CONFIG_DVB_GP8PSK_FE=m +CONFIG_DVB_L64781=m +CONFIG_DVB_MT352=m +CONFIG_DVB_NXT6000=m +CONFIG_DVB_S5H1432=m +CONFIG_DVB_SP887X=m +CONFIG_DVB_STV0367=m +CONFIG_DVB_TDA10048=m +CONFIG_DVB_TDA1004X=m +CONFIG_DVB_ZD1301_DEMOD=m +CONFIG_DVB_ZL10353=m +CONFIG_DVB_CXD2880=m + +# +# DVB-C (cable) frontends +# +CONFIG_DVB_STV0297=m +CONFIG_DVB_TDA10021=m +CONFIG_DVB_TDA10023=m +CONFIG_DVB_VES1820=m + +# +# ATSC (North American/Korean Terrestrial/Cable DTV) frontends +# +CONFIG_DVB_AU8522=m +CONFIG_DVB_AU8522_DTV=m +CONFIG_DVB_AU8522_V4L=m +CONFIG_DVB_BCM3510=m +CONFIG_DVB_LG2160=m +CONFIG_DVB_LGDT3305=m +CONFIG_DVB_LGDT330X=m +CONFIG_DVB_MXL692=m +CONFIG_DVB_NXT200X=m +CONFIG_DVB_OR51132=m +CONFIG_DVB_OR51211=m +CONFIG_DVB_S5H1409=m +CONFIG_DVB_S5H1411=m + +# +# ISDB-T (terrestrial) frontends +# +CONFIG_DVB_DIB8000=m +CONFIG_DVB_MB86A20S=m +CONFIG_DVB_S921=m + +# +# ISDB-S (satellite) & ISDB-T (terrestrial) frontends +# +CONFIG_DVB_MN88443X=m +CONFIG_DVB_TC90522=m + +# +# Digital terrestrial only tuners/PLL +# +CONFIG_DVB_PLL=m +CONFIG_DVB_TUNER_DIB0070=m +CONFIG_DVB_TUNER_DIB0090=m + +# +# SEC control devices for DVB-S +# +CONFIG_DVB_A8293=m +CONFIG_DVB_AF9033=m +CONFIG_DVB_ASCOT2E=m +CONFIG_DVB_ATBM8830=m +CONFIG_DVB_HELENE=m +CONFIG_DVB_HORUS3A=m +CONFIG_DVB_ISL6405=m +CONFIG_DVB_ISL6421=m +CONFIG_DVB_ISL6423=m +CONFIG_DVB_IX2505V=m +CONFIG_DVB_LGS8GL5=m +CONFIG_DVB_LGS8GXX=m +CONFIG_DVB_LNBH25=m +CONFIG_DVB_LNBH29=m +CONFIG_DVB_LNBP21=m +CONFIG_DVB_LNBP22=m +CONFIG_DVB_M88RS2000=m +CONFIG_DVB_TDA665x=m +CONFIG_DVB_DRX39XYJ=m + +# +# Common Interface (EN50221) controller drivers +# +CONFIG_DVB_CXD2099=m +CONFIG_DVB_SP2=m +# end of Customise DVB Frontends + +# +# Tools to develop new frontends +# +# CONFIG_DVB_DUMMY_FE is not set +# end of Media ancillary drivers + +# +# Graphics support +# +CONFIG_APERTURE_HELPERS=y +CONFIG_VIDEO_CMDLINE=y +CONFIG_VIDEO_NOMODESET=y +# CONFIG_AUXDISPLAY is not set +# CONFIG_PANEL is not set CONFIG_DRM=y +# CONFIG_DRM_DEBUG_MM is not set +CONFIG_DRM_KMS_HELPER=y +# CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS is not set +# CONFIG_DRM_DEBUG_MODESET_LOCK is not set +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +# CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DISPLAY_HELPER=m +CONFIG_DRM_DISPLAY_DP_HELPER=y +CONFIG_DRM_DISPLAY_HDCP_HELPER=y +CONFIG_DRM_DISPLAY_HDMI_HELPER=y CONFIG_DRM_DP_AUX_CHARDEV=y CONFIG_DRM_DP_CEC=y +CONFIG_DRM_TTM=y +CONFIG_DRM_EXEC=m +CONFIG_DRM_BUDDY=m +CONFIG_DRM_VRAM_HELPER=m +CONFIG_DRM_TTM_HELPER=m +CONFIG_DRM_GEM_SHMEM_HELPER=y +CONFIG_DRM_SUBALLOC_HELPER=m +CONFIG_DRM_SCHED=m + +# +# I2C encoder or helper chips +# # CONFIG_DRM_I2C_CH7006 is not set # CONFIG_DRM_I2C_SIL164 is not set +# CONFIG_DRM_I2C_NXP_TDA998X is not set +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# end of I2C encoder or helper chips + +# +# ARM devices +# +# CONFIG_DRM_KOMEDA is not set +# end of ARM devices + CONFIG_DRM_RADEON=m CONFIG_DRM_RADEON_USERPTR=y CONFIG_DRM_AMDGPU=m CONFIG_DRM_AMDGPU_SI=y CONFIG_DRM_AMDGPU_CIK=y CONFIG_DRM_AMDGPU_USERPTR=y +# CONFIG_DRM_AMDGPU_WERROR is not set + +# +# ACP (Audio CoProcessor) Configuration +# +# CONFIG_DRM_AMD_ACP is not set +# end of ACP (Audio CoProcessor) Configuration + +# +# Display Engine Configuration +# +CONFIG_DRM_AMD_DC=y +CONFIG_DRM_AMD_DC_FP=y +# CONFIG_DRM_AMD_DC_SI is not set +# CONFIG_DRM_AMD_SECURE_DISPLAY is not set +# end of Display Engine Configuration + CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +# CONFIG_NOUVEAU_DEBUG_MMU is not set +# CONFIG_NOUVEAU_DEBUG_PUSH is not set +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +# CONFIG_DRM_VGEM is not set CONFIG_DRM_VKMS=m CONFIG_DRM_UDL=m CONFIG_DRM_AST=y CONFIG_DRM_MGAG200=m CONFIG_DRM_QXL=m CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_VIRTIO_GPU_KMS=y +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_ABT_Y030XX067A is not set +# CONFIG_DRM_PANEL_ARM_VERSATILE is not set +# CONFIG_DRM_PANEL_AUO_A030JTN01 is not set +# CONFIG_DRM_PANEL_LVDS is not set +# CONFIG_DRM_PANEL_SIMPLE is not set +# CONFIG_DRM_PANEL_EDP is not set +# CONFIG_DRM_PANEL_ILITEK_IL9322 is not set +# CONFIG_DRM_PANEL_ILITEK_ILI9341 is not set +# CONFIG_DRM_PANEL_INNOLUX_EJ030NA is not set +# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set +# CONFIG_DRM_PANEL_LG_LB035Q02 is not set +# CONFIG_DRM_PANEL_LG_LG4573 is not set +# CONFIG_DRM_PANEL_NEC_NL8048HL11 is not set +# CONFIG_DRM_PANEL_NEWVISION_NV3052C is not set +# CONFIG_DRM_PANEL_NOVATEK_NT39016 is not set +# CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO is not set +# CONFIG_DRM_PANEL_ORISETECH_OTA5601A is not set +# CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20 is not set +# CONFIG_DRM_PANEL_SAMSUNG_DB7430 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6D27A1 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set +# CONFIG_DRM_PANEL_SEIKO_43WVF1G is not set +# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set +# CONFIG_DRM_PANEL_SONY_ACX565AKM is not set +# CONFIG_DRM_PANEL_TPO_TD028TTEC1 is not set +# CONFIG_DRM_PANEL_TPO_TPG110 is not set +# CONFIG_DRM_PANEL_WIDECHIPS_WS2401 is not set +# end of Display Panels + +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_CHIPONE_ICN6211 is not set +# CONFIG_DRM_CHRONTEL_CH7033 is not set +# CONFIG_DRM_DISPLAY_CONNECTOR is not set +# CONFIG_DRM_ITE_IT6505 is not set +# CONFIG_DRM_LONTIUM_LT8912B is not set +# CONFIG_DRM_LONTIUM_LT9211 is not set +# CONFIG_DRM_LONTIUM_LT9611 is not set +# CONFIG_DRM_LONTIUM_LT9611UXC is not set +# CONFIG_DRM_ITE_IT66121 is not set +# CONFIG_DRM_LVDS_CODEC is not set +# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set +# CONFIG_DRM_NWL_MIPI_DSI is not set +# CONFIG_DRM_NXP_PTN3460 is not set +# CONFIG_DRM_PARADE_PS8622 is not set +# CONFIG_DRM_PARADE_PS8640 is not set +# CONFIG_DRM_SAMSUNG_DSIM is not set +# CONFIG_DRM_SIL_SII8620 is not set +# CONFIG_DRM_SII902X is not set +# CONFIG_DRM_SII9234 is not set +# CONFIG_DRM_SIMPLE_BRIDGE is not set +# CONFIG_DRM_THINE_THC63LVD1024 is not set +# CONFIG_DRM_TOSHIBA_TC358762 is not set +# CONFIG_DRM_TOSHIBA_TC358764 is not set +# CONFIG_DRM_TOSHIBA_TC358767 is not set +# CONFIG_DRM_TOSHIBA_TC358768 is not set +# CONFIG_DRM_TOSHIBA_TC358775 is not set +# CONFIG_DRM_TI_DLPC3433 is not set +# CONFIG_DRM_TI_TFP410 is not set +# CONFIG_DRM_TI_SN65DSI83 is not set +# CONFIG_DRM_TI_SN65DSI86 is not set +# CONFIG_DRM_TI_TPD12S015 is not set +# CONFIG_DRM_ANALOGIX_ANX6345 is not set +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# CONFIG_DRM_ANALOGIX_ANX7625 is not set +# CONFIG_DRM_I2C_ADV7511 is not set +# CONFIG_DRM_CDNS_DSI is not set +# CONFIG_DRM_CDNS_MHDP8546 is not set +# end of Display Interface Bridges + CONFIG_DRM_LOONGSON=y +# CONFIG_DRM_ETNAVIV is not set +# CONFIG_DRM_LOGICVC is not set +# CONFIG_DRM_ARCPGU is not set CONFIG_DRM_BOCHS=m CONFIG_DRM_CIRRUS_QEMU=m +# CONFIG_DRM_GM12U320 is not set +# CONFIG_DRM_PANEL_MIPI_DBI is not set +# CONFIG_DRM_SIMPLEDRM is not set +# CONFIG_TINYDRM_HX8357D is not set +# CONFIG_TINYDRM_ILI9163 is not set +# CONFIG_TINYDRM_ILI9225 is not set +# CONFIG_TINYDRM_ILI9341 is not set +# CONFIG_TINYDRM_ILI9486 is not set +# CONFIG_TINYDRM_MI0283QT is not set +# CONFIG_TINYDRM_REPAPER is not set +# CONFIG_TINYDRM_ST7586 is not set +# CONFIG_TINYDRM_ST7735R is not set +# CONFIG_DRM_GUD is not set +# CONFIG_DRM_SSD130X is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y +# CONFIG_HYDCU_FIXUP_HEADER is not set + +# +# Frame buffer Devices +# CONFIG_FB=y +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_UVESA is not set CONFIG_FB_EFI=y +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set CONFIG_FB_RADEON=y +CONFIG_FB_RADEON_I2C=y +CONFIG_FB_RADEON_BACKLIGHT=y +# CONFIG_FB_RADEON_DEBUG is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SM501 is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SSD1307 is not set +# CONFIG_FB_SM712 is not set CONFIG_FB_LS2K500=m +CONFIG_FB_CORE=y +CONFIG_FB_NOTIFY=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_DEVICE=y +CONFIG_FB_DDC=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_IOMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y +CONFIG_FB_BACKLIGHT=y +CONFIG_FB_MODE_HELPERS=y CONFIG_FB_TILEBLITTING=y +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set CONFIG_LCD_PLATFORM=m +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_KTD253 is not set +# CONFIG_BACKLIGHT_KTZ8866 is not set +# CONFIG_BACKLIGHT_PWM is not set +# CONFIG_BACKLIGHT_QCOM_WLED is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3630A is not set +# CONFIG_BACKLIGHT_LM3639 is not set CONFIG_BACKLIGHT_LP855X=m +# CONFIG_BACKLIGHT_GPIO is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# CONFIG_BACKLIGHT_LED is not set +# end of Backlight & LCD device support + +CONFIG_HDMI=y + +# +# Console display driver support +# # CONFIG_VGA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION is not set +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +# end of Console display driver support + CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +# end of Graphics support + +# CONFIG_DRM_ACCEL is not set CONFIG_SOUND=y +CONFIG_SOUND_OSS_CORE=y +CONFIG_SOUND_OSS_CORE_PRECLAIM=y CONFIG_SND=y +CONFIG_SND_TIMER=m +CONFIG_SND_PCM=m +CONFIG_SND_HWDEP=m +CONFIG_SND_SEQ_DEVICE=m +CONFIG_SND_RAWMIDI=m +CONFIG_SND_JACK=y +CONFIG_SND_JACK_INPUT_DEV=y CONFIG_SND_OSSEMUL=y +# CONFIG_SND_MIXER_OSS is not set +# CONFIG_SND_PCM_OSS is not set +CONFIG_SND_PCM_TIMER=y CONFIG_SND_HRTIMER=m +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_MAX_CARDS=32 # CONFIG_SND_SUPPORT_OLD_API is not set +CONFIG_SND_PROC_FS=y +CONFIG_SND_VERBOSE_PROCFS=y +# CONFIG_SND_VERBOSE_PRINTK is not set +CONFIG_SND_CTL_FAST_LOOKUP=y +# CONFIG_SND_DEBUG is not set +# CONFIG_SND_CTL_INPUT_VALIDATION is not set +CONFIG_SND_VMASTER=y +CONFIG_SND_CTL_LED=m CONFIG_SND_SEQUENCER=m CONFIG_SND_SEQ_DUMMY=m CONFIG_SND_SEQUENCER_OSS=m +CONFIG_SND_SEQ_HRTIMER_DEFAULT=y +CONFIG_SND_SEQ_MIDI_EVENT=m +CONFIG_SND_SEQ_MIDI=m +CONFIG_SND_SEQ_MIDI_EMUL=m +CONFIG_SND_SEQ_VIRMIDI=m +# CONFIG_SND_SEQ_UMP is not set +CONFIG_SND_MPU401_UART=m +CONFIG_SND_OPL3_LIB=m +CONFIG_SND_OPL3_LIB_SEQ=m +CONFIG_SND_VX_LIB=m +CONFIG_SND_AC97_CODEC=m +CONFIG_SND_DRIVERS=y CONFIG_SND_DUMMY=m CONFIG_SND_ALOOP=m +# CONFIG_SND_PCMTEST is not set CONFIG_SND_VIRMIDI=m CONFIG_SND_MTPAV=m +# CONFIG_SND_MTS64 is not set +# CONFIG_SND_SERIAL_U16550 is not set CONFIG_SND_MPU401=m +# CONFIG_SND_PORTMAN2X4 is not set CONFIG_SND_AC97_POWER_SAVE=y CONFIG_SND_AC97_POWER_SAVE_DEFAULT=5 +CONFIG_SND_PCI=y CONFIG_SND_AD1889=m CONFIG_SND_ATIIXP=m CONFIG_SND_ATIIXP_MODEM=m CONFIG_SND_AU8810=m CONFIG_SND_AU8820=m CONFIG_SND_AU8830=m +# CONFIG_SND_AW2 is not set CONFIG_SND_BT87X=m CONFIG_SND_BT87X_OVERCLOCK=y CONFIG_SND_CA0106=m CONFIG_SND_CMIPCI=m +CONFIG_SND_OXYGEN_LIB=m CONFIG_SND_OXYGEN=m +# CONFIG_SND_CS4281 is not set CONFIG_SND_CS46XX=m +CONFIG_SND_CS46XX_NEW_DSP=y CONFIG_SND_CTXFI=m CONFIG_SND_DARLA20=m CONFIG_SND_GINA20=m @@ -1538,6 +5557,7 @@ CONFIG_SND_INDIGOIOX=m CONFIG_SND_INDIGODJX=m CONFIG_SND_ENS1370=m CONFIG_SND_ENS1371=m +# CONFIG_SND_FM801 is not set CONFIG_SND_HDSP=m CONFIG_SND_HDSPM=m CONFIG_SND_ICE1724=m @@ -1547,7 +5567,9 @@ CONFIG_SND_KORG1212=m CONFIG_SND_LOLA=m CONFIG_SND_LX6464ES=m CONFIG_SND_MIXART=m +# CONFIG_SND_NM256 is not set CONFIG_SND_PCXHR=m +# CONFIG_SND_RIPTIDE is not set CONFIG_SND_RME32=m CONFIG_SND_RME96=m CONFIG_SND_RME9652=m @@ -1555,35 +5577,68 @@ CONFIG_SND_VIA82XX=m CONFIG_SND_VIA82XX_MODEM=m CONFIG_SND_VIRTUOSO=m CONFIG_SND_VX222=m +# CONFIG_SND_YMFPCI is not set + +# +# HD-Audio +# +CONFIG_SND_HDA=m +CONFIG_SND_HDA_GENERIC_LEDS=y CONFIG_SND_HDA_INTEL=m CONFIG_SND_HDA_HWDEP=y +CONFIG_SND_HDA_RECONFIG=y CONFIG_SND_HDA_INPUT_BEEP=y CONFIG_SND_HDA_INPUT_BEEP_MODE=0 CONFIG_SND_HDA_PATCH_LOADER=y +# CONFIG_SND_HDA_SCODEC_CS35L41_I2C is not set +# CONFIG_SND_HDA_SCODEC_CS35L41_SPI is not set +# CONFIG_SND_HDA_SCODEC_CS35L56_I2C is not set +# CONFIG_SND_HDA_SCODEC_CS35L56_SPI is not set +# CONFIG_SND_HDA_SCODEC_TAS2781_I2C is not set CONFIG_SND_HDA_CODEC_REALTEK=m CONFIG_SND_HDA_CODEC_ANALOG=m CONFIG_SND_HDA_CODEC_SIGMATEL=m CONFIG_SND_HDA_CODEC_VIA=m CONFIG_SND_HDA_CODEC_HDMI=m CONFIG_SND_HDA_CODEC_CIRRUS=m +# CONFIG_SND_HDA_CODEC_CS8409 is not set CONFIG_SND_HDA_CODEC_CONEXANT=m CONFIG_SND_HDA_CODEC_CA0110=m CONFIG_SND_HDA_CODEC_CA0132=m +CONFIG_SND_HDA_CODEC_CA0132_DSP=y CONFIG_SND_HDA_CODEC_CMEDIA=m CONFIG_SND_HDA_CODEC_SI3054=m +CONFIG_SND_HDA_GENERIC=m +CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0 +# CONFIG_SND_HDA_INTEL_HDMI_SILENT_STREAM is not set +# CONFIG_SND_HDA_CTL_DEV_ID is not set +# end of HD-Audio + +CONFIG_SND_HDA_CORE=m +CONFIG_SND_HDA_DSP_LOADER=y +CONFIG_SND_HDA_COMPONENT=y CONFIG_SND_HDA_PREALLOC_SIZE=512 +CONFIG_SND_INTEL_NHLT=y +CONFIG_SND_INTEL_DSP_CONFIG=m +CONFIG_SND_INTEL_SOUNDWIRE_ACPI=m # CONFIG_SND_SPI is not set +CONFIG_SND_USB=y CONFIG_SND_USB_AUDIO=m +# CONFIG_SND_USB_AUDIO_MIDI_V2 is not set +CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER=y CONFIG_SND_USB_UA101=m CONFIG_SND_USB_CAIAQ=m CONFIG_SND_USB_CAIAQ_INPUT=y CONFIG_SND_USB_6FIRE=m CONFIG_SND_USB_HIFACE=m CONFIG_SND_BCD2000=m +CONFIG_SND_USB_LINE6=m CONFIG_SND_USB_POD=m CONFIG_SND_USB_PODHD=m CONFIG_SND_USB_TONEPORT=m CONFIG_SND_USB_VARIAX=m +CONFIG_SND_FIREWIRE=y +CONFIG_SND_FIREWIRE_LIB=m CONFIG_SND_DICE=m CONFIG_SND_OXFW=m CONFIG_SND_ISIGHT=m @@ -1594,36 +5649,306 @@ CONFIG_SND_FIREWIRE_TASCAM=m CONFIG_SND_FIREWIRE_MOTU=m CONFIG_SND_FIREFACE=m CONFIG_SND_SOC=m +# CONFIG_SND_SOC_ADI is not set +# CONFIG_SND_SOC_AMD_ACP is not set +# CONFIG_SND_AMD_ACP_CONFIG is not set +# CONFIG_SND_ATMEL_SOC is not set +# CONFIG_SND_BCM63XX_I2S_WHISTLER is not set +# CONFIG_SND_DESIGNWARE_I2S is not set + +# +# SoC Audio for Freescale CPUs +# + +# +# Common SoC Audio options for Freescale CPUs: +# +# CONFIG_SND_SOC_FSL_ASRC is not set +# CONFIG_SND_SOC_FSL_SAI is not set +# CONFIG_SND_SOC_FSL_AUDMIX is not set +# CONFIG_SND_SOC_FSL_SSI is not set +# CONFIG_SND_SOC_FSL_SPDIF is not set +# CONFIG_SND_SOC_FSL_ESAI is not set +# CONFIG_SND_SOC_FSL_MICFIL is not set +# CONFIG_SND_SOC_FSL_XCVR is not set +# CONFIG_SND_SOC_IMX_AUDMUX is not set +# end of SoC Audio for Freescale CPUs + +# CONFIG_SND_SOC_CHV3_I2S is not set +# CONFIG_SND_I2S_HI6210_I2S is not set + +# +# SoC Audio for Loongson CPUs +# +# CONFIG_SND_SOC_LOONGSON_I2S_PCI is not set +# CONFIG_SND_SOC_LOONGSON_CARD is not set +# end of SoC Audio for Loongson CPUs + +# CONFIG_SND_SOC_IMG is not set +# CONFIG_SND_SOC_MTK_BTCVSD is not set +# CONFIG_SND_SOC_SOF_TOPLEVEL is not set + +# +# STMicroelectronics STM32 SOC audio support +# +# end of STMicroelectronics STM32 SOC audio support + +# CONFIG_SND_SOC_XILINX_I2S is not set +# CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER is not set +# CONFIG_SND_SOC_XILINX_SPDIF is not set +# CONFIG_SND_SOC_XTFPGA_I2S is not set +CONFIG_SND_SOC_I2C_AND_SPI=m + +# +# CODEC drivers +# +# CONFIG_SND_SOC_AC97_CODEC is not set +# CONFIG_SND_SOC_ADAU1372_I2C is not set +# CONFIG_SND_SOC_ADAU1372_SPI is not set +# CONFIG_SND_SOC_ADAU1701 is not set +# CONFIG_SND_SOC_ADAU1761_I2C is not set +# CONFIG_SND_SOC_ADAU1761_SPI is not set +# CONFIG_SND_SOC_ADAU7002 is not set +# CONFIG_SND_SOC_ADAU7118_HW is not set +# CONFIG_SND_SOC_ADAU7118_I2C is not set +# CONFIG_SND_SOC_AK4104 is not set +# CONFIG_SND_SOC_AK4118 is not set +# CONFIG_SND_SOC_AK4375 is not set +# CONFIG_SND_SOC_AK4458 is not set +# CONFIG_SND_SOC_AK4554 is not set +# CONFIG_SND_SOC_AK4613 is not set +# CONFIG_SND_SOC_AK4642 is not set +# CONFIG_SND_SOC_AK5386 is not set +# CONFIG_SND_SOC_AK5558 is not set +# CONFIG_SND_SOC_ALC5623 is not set +# CONFIG_SND_SOC_AUDIO_IIO_AUX is not set +# CONFIG_SND_SOC_AW8738 is not set +# CONFIG_SND_SOC_AW88395 is not set +# CONFIG_SND_SOC_AW88261 is not set +# CONFIG_SND_SOC_BD28623 is not set +# CONFIG_SND_SOC_BT_SCO is not set +# CONFIG_SND_SOC_CHV3_CODEC is not set +# CONFIG_SND_SOC_CS35L32 is not set +# CONFIG_SND_SOC_CS35L33 is not set +# CONFIG_SND_SOC_CS35L34 is not set +# CONFIG_SND_SOC_CS35L35 is not set +# CONFIG_SND_SOC_CS35L36 is not set +# CONFIG_SND_SOC_CS35L41_SPI is not set +# CONFIG_SND_SOC_CS35L41_I2C is not set +# CONFIG_SND_SOC_CS35L45_SPI is not set +# CONFIG_SND_SOC_CS35L45_I2C is not set +# CONFIG_SND_SOC_CS35L56_I2C is not set +# CONFIG_SND_SOC_CS35L56_SPI is not set +# CONFIG_SND_SOC_CS42L42 is not set +# CONFIG_SND_SOC_CS42L51_I2C is not set +# CONFIG_SND_SOC_CS42L52 is not set +# CONFIG_SND_SOC_CS42L56 is not set +# CONFIG_SND_SOC_CS42L73 is not set +# CONFIG_SND_SOC_CS42L83 is not set +# CONFIG_SND_SOC_CS4234 is not set +# CONFIG_SND_SOC_CS4265 is not set +# CONFIG_SND_SOC_CS4270 is not set +# CONFIG_SND_SOC_CS4271_I2C is not set +# CONFIG_SND_SOC_CS4271_SPI is not set +# CONFIG_SND_SOC_CS42XX8_I2C is not set +# CONFIG_SND_SOC_CS43130 is not set +# CONFIG_SND_SOC_CS4341 is not set +# CONFIG_SND_SOC_CS4349 is not set +# CONFIG_SND_SOC_CS53L30 is not set +# CONFIG_SND_SOC_CX2072X is not set +# CONFIG_SND_SOC_DA7213 is not set +# CONFIG_SND_SOC_DMIC is not set +# CONFIG_SND_SOC_ES7134 is not set +# CONFIG_SND_SOC_ES7241 is not set +# CONFIG_SND_SOC_ES8316 is not set +# CONFIG_SND_SOC_ES8326 is not set +# CONFIG_SND_SOC_ES8328_I2C is not set +# CONFIG_SND_SOC_ES8328_SPI is not set +# CONFIG_SND_SOC_GTM601 is not set +# CONFIG_SND_SOC_HDA is not set +# CONFIG_SND_SOC_ICS43432 is not set +# CONFIG_SND_SOC_IDT821034 is not set +# CONFIG_SND_SOC_INNO_RK3036 is not set +# CONFIG_SND_SOC_MAX98088 is not set +# CONFIG_SND_SOC_MAX98090 is not set +# CONFIG_SND_SOC_MAX98357A is not set +# CONFIG_SND_SOC_MAX98504 is not set +# CONFIG_SND_SOC_MAX9867 is not set +# CONFIG_SND_SOC_MAX98927 is not set +# CONFIG_SND_SOC_MAX98520 is not set +# CONFIG_SND_SOC_MAX98373_I2C is not set +# CONFIG_SND_SOC_MAX98388 is not set +# CONFIG_SND_SOC_MAX98390 is not set +# CONFIG_SND_SOC_MAX98396 is not set +# CONFIG_SND_SOC_MAX9860 is not set +# CONFIG_SND_SOC_MSM8916_WCD_DIGITAL is not set +# CONFIG_SND_SOC_PCM1681 is not set +# CONFIG_SND_SOC_PCM1789_I2C is not set +# CONFIG_SND_SOC_PCM179X_I2C is not set +# CONFIG_SND_SOC_PCM179X_SPI is not set +# CONFIG_SND_SOC_PCM186X_I2C is not set +# CONFIG_SND_SOC_PCM186X_SPI is not set +# CONFIG_SND_SOC_PCM3060_I2C is not set +# CONFIG_SND_SOC_PCM3060_SPI is not set +# CONFIG_SND_SOC_PCM3168A_I2C is not set +# CONFIG_SND_SOC_PCM3168A_SPI is not set +# CONFIG_SND_SOC_PCM5102A is not set +# CONFIG_SND_SOC_PCM512x_I2C is not set +# CONFIG_SND_SOC_PCM512x_SPI is not set +# CONFIG_SND_SOC_PEB2466 is not set +# CONFIG_SND_SOC_RK3328 is not set +# CONFIG_SND_SOC_RT5616 is not set +# CONFIG_SND_SOC_RT5631 is not set +# CONFIG_SND_SOC_RT5640 is not set +# CONFIG_SND_SOC_RT5659 is not set +# CONFIG_SND_SOC_RT9120 is not set +# CONFIG_SND_SOC_SGTL5000 is not set +# CONFIG_SND_SOC_SIMPLE_AMPLIFIER is not set +# CONFIG_SND_SOC_SIMPLE_MUX is not set +# CONFIG_SND_SOC_SMA1303 is not set +# CONFIG_SND_SOC_SPDIF is not set +# CONFIG_SND_SOC_SRC4XXX_I2C is not set +# CONFIG_SND_SOC_SSM2305 is not set +# CONFIG_SND_SOC_SSM2518 is not set +# CONFIG_SND_SOC_SSM2602_SPI is not set +# CONFIG_SND_SOC_SSM2602_I2C is not set +# CONFIG_SND_SOC_SSM3515 is not set +# CONFIG_SND_SOC_SSM4567 is not set +# CONFIG_SND_SOC_STA32X is not set +# CONFIG_SND_SOC_STA350 is not set +# CONFIG_SND_SOC_STI_SAS is not set +# CONFIG_SND_SOC_TAS2552 is not set +# CONFIG_SND_SOC_TAS2562 is not set +# CONFIG_SND_SOC_TAS2764 is not set +# CONFIG_SND_SOC_TAS2770 is not set +# CONFIG_SND_SOC_TAS2780 is not set +# CONFIG_SND_SOC_TAS2781_I2C is not set +# CONFIG_SND_SOC_TAS5086 is not set +# CONFIG_SND_SOC_TAS571X is not set +# CONFIG_SND_SOC_TAS5720 is not set +# CONFIG_SND_SOC_TAS5805M is not set +# CONFIG_SND_SOC_TAS6424 is not set +# CONFIG_SND_SOC_TDA7419 is not set +# CONFIG_SND_SOC_TFA9879 is not set +# CONFIG_SND_SOC_TFA989X is not set +# CONFIG_SND_SOC_TLV320ADC3XXX is not set +# CONFIG_SND_SOC_TLV320AIC23_I2C is not set +# CONFIG_SND_SOC_TLV320AIC23_SPI is not set +# CONFIG_SND_SOC_TLV320AIC31XX is not set +# CONFIG_SND_SOC_TLV320AIC32X4_I2C is not set +# CONFIG_SND_SOC_TLV320AIC32X4_SPI is not set +# CONFIG_SND_SOC_TLV320AIC3X_I2C is not set +# CONFIG_SND_SOC_TLV320AIC3X_SPI is not set +# CONFIG_SND_SOC_TLV320ADCX140 is not set +# CONFIG_SND_SOC_TS3A227E is not set +# CONFIG_SND_SOC_TSCS42XX is not set +# CONFIG_SND_SOC_TSCS454 is not set +# CONFIG_SND_SOC_UDA1334 is not set +# CONFIG_SND_SOC_WM8510 is not set +# CONFIG_SND_SOC_WM8523 is not set +# CONFIG_SND_SOC_WM8524 is not set +# CONFIG_SND_SOC_WM8580 is not set +# CONFIG_SND_SOC_WM8711 is not set +# CONFIG_SND_SOC_WM8728 is not set +# CONFIG_SND_SOC_WM8731_I2C is not set +# CONFIG_SND_SOC_WM8731_SPI is not set +# CONFIG_SND_SOC_WM8737 is not set +# CONFIG_SND_SOC_WM8741 is not set +# CONFIG_SND_SOC_WM8750 is not set +# CONFIG_SND_SOC_WM8753 is not set +# CONFIG_SND_SOC_WM8770 is not set +# CONFIG_SND_SOC_WM8776 is not set +# CONFIG_SND_SOC_WM8782 is not set +# CONFIG_SND_SOC_WM8804_I2C is not set +# CONFIG_SND_SOC_WM8804_SPI is not set +# CONFIG_SND_SOC_WM8903 is not set +# CONFIG_SND_SOC_WM8904 is not set +# CONFIG_SND_SOC_WM8940 is not set +# CONFIG_SND_SOC_WM8960 is not set +# CONFIG_SND_SOC_WM8961 is not set +# CONFIG_SND_SOC_WM8962 is not set +# CONFIG_SND_SOC_WM8974 is not set +# CONFIG_SND_SOC_WM8978 is not set +# CONFIG_SND_SOC_WM8985 is not set +# CONFIG_SND_SOC_ZL38060 is not set +# CONFIG_SND_SOC_MAX9759 is not set +# CONFIG_SND_SOC_MT6351 is not set +# CONFIG_SND_SOC_MT6358 is not set +# CONFIG_SND_SOC_MT6660 is not set +# CONFIG_SND_SOC_NAU8315 is not set +# CONFIG_SND_SOC_NAU8540 is not set +# CONFIG_SND_SOC_NAU8810 is not set +# CONFIG_SND_SOC_NAU8821 is not set +# CONFIG_SND_SOC_NAU8822 is not set +# CONFIG_SND_SOC_NAU8824 is not set +# CONFIG_SND_SOC_TPA6130A2 is not set +# CONFIG_SND_SOC_LPASS_WSA_MACRO is not set +# CONFIG_SND_SOC_LPASS_VA_MACRO is not set +# CONFIG_SND_SOC_LPASS_RX_MACRO is not set +# CONFIG_SND_SOC_LPASS_TX_MACRO is not set +# end of CODEC drivers + +# CONFIG_SND_SIMPLE_CARD is not set +# CONFIG_SND_AUDIO_GRAPH_CARD is not set +# CONFIG_SND_AUDIO_GRAPH_CARD2 is not set +# CONFIG_SND_TEST_COMPONENT is not set +# CONFIG_SND_VIRTIO is not set +CONFIG_AC97_BUS=m +CONFIG_HID_SUPPORT=y +CONFIG_HID=y CONFIG_HID_BATTERY_STRENGTH=y CONFIG_HIDRAW=y CONFIG_UHID=m +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# CONFIG_HID_A4TECH=m +# CONFIG_HID_ACCUTOUCH is not set CONFIG_HID_ACRUX=m +# CONFIG_HID_ACRUX_FF is not set CONFIG_HID_APPLE=m CONFIG_HID_APPLEIR=m CONFIG_HID_ASUS=m CONFIG_HID_AUREAL=m CONFIG_HID_BELKIN=m CONFIG_HID_BETOP_FF=m +# CONFIG_HID_BIGBEN_FF is not set CONFIG_HID_CHERRY=m CONFIG_HID_CHICONY=m CONFIG_HID_CORSAIR=m +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_MACALLY is not set CONFIG_HID_PRODIKEYS=m CONFIG_HID_CMEDIA=m +# CONFIG_HID_CP2112 is not set +# CONFIG_HID_CREATIVE_SB0540 is not set CONFIG_HID_CYPRESS=m CONFIG_HID_DRAGONRISE=m +# CONFIG_DRAGONRISE_FF is not set +# CONFIG_HID_EMS_FF is not set CONFIG_HID_ELAN=m CONFIG_HID_ELECOM=m CONFIG_HID_ELO=m +# CONFIG_HID_EVISION is not set CONFIG_HID_EZKEY=m +# CONFIG_HID_FT260 is not set CONFIG_HID_GEMBIRD=m CONFIG_HID_GFRM=m +# CONFIG_HID_GLORIOUS is not set CONFIG_HID_HOLTEK=m +# CONFIG_HOLTEK_FF is not set +# CONFIG_HID_GOOGLE_STADIA_FF is not set +# CONFIG_HID_VIVALDI is not set CONFIG_HID_GT683R=m CONFIG_HID_KEYTOUCH=m CONFIG_HID_KYE=m CONFIG_HID_UCLOGIC=m CONFIG_HID_WALTOP=m +# CONFIG_HID_VIEWSONIC is not set +# CONFIG_HID_VRC2 is not set +# CONFIG_HID_XIAOMI is not set CONFIG_HID_GYRATION=m CONFIG_HID_ICADE=m CONFIG_HID_ITE=m @@ -1631,69 +5956,169 @@ CONFIG_HID_JABRA=m CONFIG_HID_TWINHAN=m CONFIG_HID_KENSINGTON=m CONFIG_HID_LCPOWER=m +CONFIG_HID_LED=m CONFIG_HID_LENOVO=m +# CONFIG_HID_LETSKETCH is not set CONFIG_HID_LOGITECH=m CONFIG_HID_LOGITECH_DJ=m +CONFIG_HID_LOGITECH_HIDPP=m CONFIG_LOGITECH_FF=y CONFIG_LOGIRUMBLEPAD2_FF=y CONFIG_LOGIG940_FF=y +CONFIG_LOGIWHEELS_FF=y CONFIG_HID_MAGICMOUSE=y +# CONFIG_HID_MALTRON is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_MEGAWORLD_FF is not set +# CONFIG_HID_REDRAGON is not set CONFIG_HID_MICROSOFT=m CONFIG_HID_MONTEREY=m CONFIG_HID_MULTITOUCH=m +# CONFIG_HID_NINTENDO is not set CONFIG_HID_NTI=m CONFIG_HID_NTRIG=y +# CONFIG_HID_NVIDIA_SHIELD is not set CONFIG_HID_ORTEK=m CONFIG_HID_PANTHERLORD=m +# CONFIG_PANTHERLORD_FF is not set CONFIG_HID_PENMOUNT=m CONFIG_HID_PETALYNX=m CONFIG_HID_PICOLCD=m +# CONFIG_HID_PICOLCD_FB is not set +# CONFIG_HID_PICOLCD_BACKLIGHT is not set +# CONFIG_HID_PICOLCD_LCD is not set +# CONFIG_HID_PICOLCD_LEDS is not set +# CONFIG_HID_PICOLCD_CIR is not set CONFIG_HID_PLANTRONICS=m +# CONFIG_HID_PXRC is not set +# CONFIG_HID_RAZER is not set CONFIG_HID_PRIMAX=m +# CONFIG_HID_RETRODE is not set CONFIG_HID_ROCCAT=m CONFIG_HID_SAITEK=m CONFIG_HID_SAMSUNG=m +# CONFIG_HID_SEMITEK is not set +# CONFIG_HID_SIGMAMICRO is not set CONFIG_HID_SONY=m CONFIG_SONY_FF=y CONFIG_HID_SPEEDLINK=m +# CONFIG_HID_STEAM is not set CONFIG_HID_STEELSERIES=m CONFIG_HID_SUNPLUS=m CONFIG_HID_RMI=m CONFIG_HID_GREENASIA=m +# CONFIG_GREENASIA_FF is not set CONFIG_HID_SMARTJOYPLUS=m +# CONFIG_SMARTJOYPLUS_FF is not set CONFIG_HID_TIVO=m CONFIG_HID_TOPSEED=m +# CONFIG_HID_TOPRE is not set CONFIG_HID_THINGM=m CONFIG_HID_THRUSTMASTER=m +# CONFIG_THRUSTMASTER_FF is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_U2FZERO is not set CONFIG_HID_WACOM=m CONFIG_HID_WIIMOTE=m CONFIG_HID_XINMO=m CONFIG_HID_ZEROPLUS=m +# CONFIG_ZEROPLUS_FF is not set CONFIG_HID_ZYDACRON=m CONFIG_HID_SENSOR_HUB=y CONFIG_HID_SENSOR_CUSTOM_SENSOR=m CONFIG_HID_ALPS=m +# CONFIG_HID_MCP2221 is not set +# end of Special HID drivers + +# +# HID-BPF support +# +# CONFIG_HID_BPF is not set +# end of HID-BPF support + +# +# USB HID support +# +CONFIG_USB_HID=y CONFIG_HID_PID=y CONFIG_USB_HIDDEV=y +# end of USB HID support + CONFIG_I2C_HID=m +# CONFIG_I2C_HID_ACPI is not set +# CONFIG_I2C_HID_OF is not set +# CONFIG_I2C_HID_OF_ELAN is not set +# CONFIG_I2C_HID_OF_GOODIX is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y CONFIG_USB_LED_TRIG=y +# CONFIG_USB_ULPI_BUS is not set +# CONFIG_USB_CONN_GPIO is not set +CONFIG_USB_ARCH_HAS_HCD=y CONFIG_USB=y +CONFIG_USB_PCI=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_FEW_INIT_RETRIES is not set +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_PRODUCTLIST is not set +# CONFIG_USB_OTG_DISABLE_EXTERNAL_HUB is not set CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_AUTOSUSPEND_DELAY=2 CONFIG_USB_MON=y + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set CONFIG_USB_XHCI_HCD=y CONFIG_USB_XHCI_DBGCAP=y +CONFIG_USB_XHCI_PCI=y +# CONFIG_USB_XHCI_PCI_RENESAS is not set CONFIG_USB_XHCI_PLATFORM=m CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +# CONFIG_USB_EHCI_FSL is not set CONFIG_USB_EHCI_HCD_PLATFORM=y +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y CONFIG_USB_OHCI_HCD_PLATFORM=y CONFIG_USB_UHCI_HCD=y +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_BCMA is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m CONFIG_USB_TMC=m + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y CONFIG_USB_STORAGE_DATAFAB=m CONFIG_USB_STORAGE_FREECOM=m CONFIG_USB_STORAGE_ISD200=m @@ -1707,12 +6132,40 @@ CONFIG_USB_STORAGE_KARMA=m CONFIG_USB_STORAGE_CYPRESS_ATACB=m CONFIG_USB_STORAGE_ENE_UB6250=m CONFIG_USB_UAS=m + +# +# USB Imaging devices +# CONFIG_USB_MDC800=m CONFIG_USB_MICROTEK=m +# CONFIG_USBIP_CORE is not set + +# +# USB dual-mode controller drivers +# +# CONFIG_USB_CDNS_SUPPORT is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set CONFIG_USB_DWC2=y CONFIG_USB_DWC2_HOST=y + +# +# Gadget/Dual-role mode requires USB Gadget support to be enabled +# +# CONFIG_USB_DWC2_PERIPHERAL is not set +# CONFIG_USB_DWC2_DUAL_ROLE is not set +# CONFIG_USB_DWC2_PCI is not set +# CONFIG_USB_DWC2_DEBUG is not set +# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# CONFIG_USB_SERIAL=m CONFIG_USB_SERIAL_GENERIC=y +# CONFIG_USB_SERIAL_SIMPLE is not set CONFIG_USB_SERIAL_AIRCABLE=m CONFIG_USB_SERIAL_ARK3116=m CONFIG_USB_SERIAL_BELKIN=m @@ -1728,6 +6181,7 @@ CONFIG_USB_SERIAL_IPAQ=m CONFIG_USB_SERIAL_IR=m CONFIG_USB_SERIAL_EDGEPORT=m CONFIG_USB_SERIAL_EDGEPORT_TI=m +# CONFIG_USB_SERIAL_F81232 is not set CONFIG_USB_SERIAL_F8153X=m CONFIG_USB_SERIAL_GARMIN=m CONFIG_USB_SERIAL_IPW=m @@ -1737,6 +6191,7 @@ CONFIG_USB_SERIAL_KEYSPAN=m CONFIG_USB_SERIAL_KLSI=m CONFIG_USB_SERIAL_KOBIL_SCT=m CONFIG_USB_SERIAL_MCT_U232=m +# CONFIG_USB_SERIAL_METRO is not set CONFIG_USB_SERIAL_MOS7720=m CONFIG_USB_SERIAL_MOS7715_PARPORT=y CONFIG_USB_SERIAL_MOS7840=m @@ -1753,14 +6208,21 @@ CONFIG_USB_SERIAL_SIERRAWIRELESS=m CONFIG_USB_SERIAL_SYMBOL=m CONFIG_USB_SERIAL_TI=m CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_WWAN=m CONFIG_USB_SERIAL_OPTION=m CONFIG_USB_SERIAL_OMNINET=m CONFIG_USB_SERIAL_OPTICON=m CONFIG_USB_SERIAL_XSENS_MT=m +# CONFIG_USB_SERIAL_WISHBONE is not set CONFIG_USB_SERIAL_SSU100=m CONFIG_USB_SERIAL_QT2=m CONFIG_USB_SERIAL_UPD78F0730=m +# CONFIG_USB_SERIAL_XR is not set CONFIG_USB_SERIAL_DEBUG=m + +# +# USB Miscellaneous drivers +# CONFIG_USB_USS720=m CONFIG_USB_EMI62=m CONFIG_USB_EMI26=m @@ -1768,138 +6230,541 @@ CONFIG_USB_ADUTUX=m CONFIG_USB_SEVSEG=m CONFIG_USB_LEGOTOWER=m CONFIG_USB_LCD=m +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set CONFIG_USB_IDMOUSE=m CONFIG_USB_APPLEDISPLAY=m +# CONFIG_APPLE_MFI_FASTCHARGE is not set CONFIG_USB_SISUSBVGA=m CONFIG_USB_LD=m +# CONFIG_USB_TRANCEVIBRATOR is not set CONFIG_USB_IOWARRIOR=m +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set CONFIG_USB_ISIGHTFW=m +# CONFIG_USB_YUREX is not set +CONFIG_USB_EZUSB_FX2=m +# CONFIG_USB_HUB_USB251XB is not set CONFIG_USB_HSIC_USB3503=m +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +# CONFIG_USB_CHAOSKEY is not set +# CONFIG_USB_ONBOARD_HUB is not set CONFIG_USB_ATM=m CONFIG_USB_SPEEDTOUCH=m CONFIG_USB_CXACRU=m CONFIG_USB_UEAGLEATM=m CONFIG_USB_XUSBATM=m + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +# end of USB Physical Layer drivers + CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=2 +CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2 + +# +# USB Peripheral Controller +# +# CONFIG_USB_GR_UDC is not set +# CONFIG_USB_R8A66597 is not set +# CONFIG_USB_PXA27X is not set +# CONFIG_USB_MV_UDC is not set +# CONFIG_USB_MV_U3D is not set +# CONFIG_USB_SNP_UDC_PLAT is not set +# CONFIG_USB_M66592 is not set +# CONFIG_USB_BDC_UDC is not set +# CONFIG_USB_AMD5536UDC is not set +# CONFIG_USB_NET2272 is not set +# CONFIG_USB_NET2280 is not set +# CONFIG_USB_GOKU is not set +# CONFIG_USB_EG20T is not set +# CONFIG_USB_GADGET_XILINX is not set +# CONFIG_USB_MAX3420_UDC is not set +# CONFIG_USB_CDNS2_UDC is not set +# CONFIG_USB_DUMMY_HCD is not set +# end of USB Peripheral Controller + +# CONFIG_USB_CONFIGFS is not set + +# +# USB Gadget precomposed configurations +# +# CONFIG_USB_ZERO is not set +# CONFIG_USB_AUDIO is not set +# CONFIG_USB_ETH is not set +# CONFIG_USB_G_NCM is not set +# CONFIG_USB_GADGETFS is not set +# CONFIG_USB_FUNCTIONFS is not set +# CONFIG_USB_MASS_STORAGE is not set +# CONFIG_USB_GADGET_TARGET is not set +# CONFIG_USB_G_SERIAL is not set +# CONFIG_USB_MIDI_GADGET is not set +# CONFIG_USB_G_PRINTER is not set +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_NOKIA is not set +# CONFIG_USB_G_ACM_MS is not set +# CONFIG_USB_G_MULTI is not set +# CONFIG_USB_G_HID is not set +# CONFIG_USB_G_DBGP is not set +# CONFIG_USB_G_WEBCAM is not set +# CONFIG_USB_RAW_GADGET is not set +# end of USB Gadget precomposed configurations + CONFIG_TYPEC=m CONFIG_TYPEC_TCPM=m CONFIG_TYPEC_TCPCI=m CONFIG_TYPEC_RT1711H=m +# CONFIG_TYPEC_TCPCI_MAXIM is not set CONFIG_TYPEC_FUSB302=m CONFIG_TYPEC_UCSI=m +# CONFIG_UCSI_CCG is not set CONFIG_UCSI_ACPI=m +# CONFIG_UCSI_STM32G0 is not set CONFIG_TYPEC_TPS6598X=m +# CONFIG_TYPEC_ANX7411 is not set +# CONFIG_TYPEC_RT1719 is not set +# CONFIG_TYPEC_HD3SS3220 is not set +# CONFIG_TYPEC_STUSB160X is not set +# CONFIG_TYPEC_WUSB3801 is not set + +# +# USB Type-C Multiplexer/DeMultiplexer Switch support +# +# CONFIG_TYPEC_MUX_FSA4480 is not set +# CONFIG_TYPEC_MUX_GPIO_SBU is not set CONFIG_TYPEC_MUX_PI3USB30532=m +# CONFIG_TYPEC_MUX_NB7VPQ904M is not set +# end of USB Type-C Multiplexer/DeMultiplexer Switch support + +# +# USB Type-C Alternate Mode drivers +# CONFIG_TYPEC_DP_ALTMODE=m +# CONFIG_TYPEC_NVIDIA_ALTMODE is not set +# end of USB Type-C Alternate Mode drivers + +CONFIG_USB_ROLE_SWITCH=y CONFIG_MMC=m +CONFIG_PWRSEQ_EMMC=m +# CONFIG_PWRSEQ_SD8787 is not set +CONFIG_PWRSEQ_SIMPLE=m +CONFIG_MMC_BLOCK=m +CONFIG_MMC_BLOCK_MINORS=8 CONFIG_SDIO_UART=m +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_IO_ACCESSORS=y CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_RICOH_MMC=y CONFIG_MMC_SDHCI_ACPI=m CONFIG_MMC_SDHCI_PLTFM=m +# CONFIG_MMC_SDHCI_OF_ARASAN is not set +# CONFIG_MMC_SDHCI_OF_AT91 is not set +# CONFIG_MMC_SDHCI_OF_DWCMSHC is not set +# CONFIG_MMC_SDHCI_CADENCE is not set +# CONFIG_MMC_SDHCI_F_SDH30 is not set +# CONFIG_MMC_SDHCI_MILBEAUT is not set CONFIG_MMC_TIFM_SD=m +# CONFIG_MMC_SPI is not set CONFIG_MMC_CB710=m CONFIG_MMC_VIA_SDMMC=m CONFIG_MMC_VUB300=m CONFIG_MMC_USHC=m +# CONFIG_MMC_USDHI6ROL0 is not set CONFIG_MMC_REALTEK_PCI=m CONFIG_MMC_REALTEK_USB=m +CONFIG_MMC_CQHCI=m +# CONFIG_MMC_HSQ is not set +# CONFIG_MMC_TOSHIBA_PCI is not set +# CONFIG_MMC_MTK is not set CONFIG_MMC_SDHCI_XENON=m +# CONFIG_MMC_SDHCI_OMAP is not set +# CONFIG_MMC_SDHCI_AM654 is not set +# CONFIG_SCSI_UFSHCD is not set CONFIG_MEMSTICK=m +# CONFIG_MEMSTICK_DEBUG is not set + +# +# MemoryStick drivers +# +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set CONFIG_MSPRO_BLOCK=m +# CONFIG_MS_BLOCK is not set + +# +# MemoryStick Host Controller Drivers +# CONFIG_MEMSTICK_TIFM_MS=m CONFIG_MEMSTICK_JMICRON_38X=m CONFIG_MEMSTICK_R592=m CONFIG_MEMSTICK_REALTEK_PCI=m CONFIG_MEMSTICK_REALTEK_USB=m +CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y +# CONFIG_LEDS_CLASS_FLASH is not set +# CONFIG_LEDS_CLASS_MULTICOLOR is not set +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_AN30259A is not set +# CONFIG_LEDS_AW200XX is not set +# CONFIG_LEDS_AW2013 is not set +# CONFIG_LEDS_BCM6328 is not set +# CONFIG_LEDS_BCM6358 is not set +# CONFIG_LEDS_CR0014114 is not set +# CONFIG_LEDS_EL15203000 is not set CONFIG_LEDS_LM3530=m +# CONFIG_LEDS_LM3532 is not set +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_LM3692X is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set CONFIG_LEDS_LP3944=m +# CONFIG_LEDS_LP3952 is not set +# CONFIG_LEDS_LP50XX is not set +# CONFIG_LEDS_LP55XX_COMMON is not set +# CONFIG_LEDS_LP8860 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_PCA995X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_PWM is not set +# CONFIG_LEDS_BD2606MVV is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_LT3593 is not set +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set +# CONFIG_LEDS_IS31FL319X is not set +# CONFIG_LEDS_IS31FL32XX is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# CONFIG_LEDS_BLINKM=m +# CONFIG_LEDS_SYSCON is not set +# CONFIG_LEDS_MLXREG is not set +# CONFIG_LEDS_USER is not set +# CONFIG_LEDS_SPI_BYTE is not set +# CONFIG_LEDS_LM3697 is not set + +# +# Flash and Torch LED drivers +# + +# +# RGB LED drivers +# + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_TIMER=m CONFIG_LEDS_TRIGGER_ONESHOT=m CONFIG_LEDS_TRIGGER_DISK=y +# CONFIG_LEDS_TRIGGER_MTD is not set CONFIG_LEDS_TRIGGER_HEARTBEAT=m CONFIG_LEDS_TRIGGER_BACKLIGHT=m +# CONFIG_LEDS_TRIGGER_CPU is not set +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# CONFIG_LEDS_TRIGGER_TRANSIENT=m CONFIG_LEDS_TRIGGER_CAMERA=m +# CONFIG_LEDS_TRIGGER_PANIC is not set +# CONFIG_LEDS_TRIGGER_NETDEV is not set +# CONFIG_LEDS_TRIGGER_PATTERN is not set CONFIG_LEDS_TRIGGER_AUDIO=y +# CONFIG_LEDS_TRIGGER_TTY is not set + +# +# Simple LED drivers +# +# CONFIG_ACCESSIBILITY is not set CONFIG_INFINIBAND=m CONFIG_INFINIBAND_USER_MAD=m CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +CONFIG_INFINIBAND_VIRT_DMA=y CONFIG_INFINIBAND_BNXT_RE=m CONFIG_INFINIBAND_CXGB4=m +# CONFIG_INFINIBAND_EFA is not set +# CONFIG_INFINIBAND_ERDMA is not set +# CONFIG_INFINIBAND_IRDMA is not set CONFIG_MLX4_INFINIBAND=m CONFIG_MLX5_INFINIBAND=m +# CONFIG_INFINIBAND_MTHCA is not set +# CONFIG_INFINIBAND_OCRDMA is not set CONFIG_INFINIBAND_VMWARE_PVRDMA=m CONFIG_RDMA_RXE=m +# CONFIG_RDMA_SIW is not set CONFIG_INFINIBAND_IPOIB=m CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set CONFIG_INFINIBAND_SRP=m CONFIG_INFINIBAND_SRPT=m CONFIG_INFINIBAND_ISER=m CONFIG_INFINIBAND_ISERT=m +# CONFIG_INFINIBAND_RTRS_CLIENT is not set +# CONFIG_INFINIBAND_RTRS_SERVER is not set +CONFIG_RTC_LIB=y CONFIG_RTC_CLASS=y -# CONFIG_RTC_SYSTOHC is not set +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABEOZ9 is not set +# CONFIG_RTC_DRV_ABX80X is not set CONFIG_RTC_DRV_DS1307=m +# CONFIG_RTC_DRV_DS1307_CENTURY is not set CONFIG_RTC_DRV_DS1374=m +# CONFIG_RTC_DRV_DS1374_WDT is not set CONFIG_RTC_DRV_DS1672=m +# CONFIG_RTC_DRV_HYM8563 is not set CONFIG_RTC_DRV_MAX6900=m +# CONFIG_RTC_DRV_NCT3018Y is not set CONFIG_RTC_DRV_RS5C372=m CONFIG_RTC_DRV_ISL1208=m CONFIG_RTC_DRV_ISL12022=m +# CONFIG_RTC_DRV_ISL12026 is not set CONFIG_RTC_DRV_X1205=m CONFIG_RTC_DRV_PCF8523=m +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF85363 is not set CONFIG_RTC_DRV_PCF8563=m CONFIG_RTC_DRV_PCF8583=m CONFIG_RTC_DRV_M41T80=m CONFIG_RTC_DRV_M41T80_WDT=y CONFIG_RTC_DRV_BQ32K=m +# CONFIG_RTC_DRV_S35390A is not set CONFIG_RTC_DRV_FM3130=m +# CONFIG_RTC_DRV_RX8010 is not set CONFIG_RTC_DRV_RX8581=m CONFIG_RTC_DRV_RX8025=m CONFIG_RTC_DRV_EM3027=m +# CONFIG_RTC_DRV_RV3028 is not set +# CONFIG_RTC_DRV_RV3032 is not set CONFIG_RTC_DRV_RV8803=m +# CONFIG_RTC_DRV_SD3078 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1302 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1343 is not set +# CONFIG_RTC_DRV_DS1347 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6916 is not set +# CONFIG_RTC_DRV_R9701 is not set CONFIG_RTC_DRV_RX4581=m +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_PCF2123 is not set +# CONFIG_RTC_DRV_MCP795 is not set +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_DS3232_HWMON=y +# CONFIG_RTC_DRV_PCF2127 is not set CONFIG_RTC_DRV_RV3029C2=m # CONFIG_RTC_DRV_RV3029_HWMON is not set +# CONFIG_RTC_DRV_RX6110 is not set + +# +# Platform RTC drivers +# CONFIG_RTC_DRV_DS1286=m CONFIG_RTC_DRV_DS1511=m CONFIG_RTC_DRV_DS1553=m +# CONFIG_RTC_DRV_DS1685_FAMILY is not set CONFIG_RTC_DRV_DS1742=m CONFIG_RTC_DRV_DS2404=m CONFIG_RTC_DRV_EFI=m CONFIG_RTC_DRV_STK17TA8=m +# CONFIG_RTC_DRV_M48T86 is not set CONFIG_RTC_DRV_M48T35=m CONFIG_RTC_DRV_M48T59=m CONFIG_RTC_DRV_MSM6242=m CONFIG_RTC_DRV_RP5C01=m +# CONFIG_RTC_DRV_ZYNQMP is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_CADENCE is not set +# CONFIG_RTC_DRV_FTRTC010 is not set CONFIG_RTC_DRV_LOONGSON=y +# CONFIG_RTC_DRV_R7301 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set +# CONFIG_RTC_DRV_GOLDFISH is not set CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_ACPI=y +CONFIG_DMA_OF=y +# CONFIG_ALTERA_MSGDMA is not set +# CONFIG_DW_AXI_DMAC is not set +# CONFIG_FSL_EDMA is not set +# CONFIG_INTEL_IDMA64 is not set +# CONFIG_PLX_DMA is not set +# CONFIG_XILINX_DMA is not set +# CONFIG_XILINX_XDMA is not set +# CONFIG_XILINX_ZYNQMP_DPDMA is not set +# CONFIG_QCOM_HIDMA_MGMT is not set +# CONFIG_QCOM_HIDMA is not set +CONFIG_DW_DMAC_CORE=m CONFIG_DW_DMAC=m +# CONFIG_DW_DMAC_PCI is not set +# CONFIG_DW_EDMA is not set +# CONFIG_SF_PDMA is not set + +# +# DMA Clients +# CONFIG_ASYNC_TX_DMA=y +# CONFIG_DMATEST is not set + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +# CONFIG_UDMABUF is not set +# CONFIG_DMABUF_MOVE_NOTIFY is not set +# CONFIG_DMABUF_DEBUG is not set +# CONFIG_DMABUF_SELFTESTS is not set +# CONFIG_DMABUF_HEAPS is not set +# CONFIG_DMABUF_SYSFS_STATS is not set +# end of DMABUF options + +CONFIG_UIO=m CONFIG_UIO_CIF=m CONFIG_UIO_PDRV_GENIRQ=m CONFIG_UIO_DMEM_GENIRQ=m CONFIG_UIO_AEC=m CONFIG_UIO_SERCOS3=m CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set CONFIG_VFIO=m +CONFIG_VFIO_GROUP=y +CONFIG_VFIO_CONTAINER=y CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_VIRQFD=y + +# +# VFIO support for PCI devices +# +CONFIG_VFIO_PCI_CORE=m +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y CONFIG_VFIO_PCI=m +# CONFIG_MLX5_VFIO_PCI is not set +# end of VFIO support for PCI devices + +CONFIG_IRQ_BYPASS_MANAGER=m +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO_ANCHOR=y +CONFIG_VIRTIO=y +CONFIG_VIRTIO_PCI_LIB=y +CONFIG_VIRTIO_PCI_LIB_LEGACY=y +CONFIG_VIRTIO_MENU=y CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y CONFIG_VIRTIO_BALLOON=m CONFIG_VIRTIO_INPUT=m CONFIG_VIRTIO_MMIO=m CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y +CONFIG_VIRTIO_DMA_SHARED_BUFFER=m +# CONFIG_VDPA is not set +CONFIG_VHOST_IOTLB=m +CONFIG_VHOST_TASK=y +CONFIG_VHOST=m +CONFIG_VHOST_MENU=y CONFIG_VHOST_NET=m CONFIG_VHOST_SCSI=m CONFIG_VHOST_VSOCK=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# Microsoft Hyper-V guest support +# +# end of Microsoft Hyper-V guest support + +# CONFIG_GREYBUS is not set CONFIG_COMEDI=m +# CONFIG_COMEDI_DEBUG is not set +CONFIG_COMEDI_DEFAULT_BUF_SIZE_KB=2048 +CONFIG_COMEDI_DEFAULT_BUF_MAXSIZE_KB=20480 +# CONFIG_COMEDI_MISC_DRIVERS is not set +# CONFIG_COMEDI_ISA_DRIVERS is not set CONFIG_COMEDI_PCI_DRIVERS=m CONFIG_COMEDI_8255_PCI=m +# CONFIG_COMEDI_ADDI_APCI_1032 is not set +# CONFIG_COMEDI_ADDI_APCI_1500 is not set +# CONFIG_COMEDI_ADDI_APCI_1516 is not set +# CONFIG_COMEDI_ADDI_APCI_1564 is not set +# CONFIG_COMEDI_ADDI_APCI_16XX is not set +# CONFIG_COMEDI_ADDI_APCI_2032 is not set +# CONFIG_COMEDI_ADDI_APCI_2200 is not set +# CONFIG_COMEDI_ADDI_APCI_3120 is not set +# CONFIG_COMEDI_ADDI_APCI_3501 is not set +# CONFIG_COMEDI_ADDI_APCI_3XXX is not set CONFIG_COMEDI_ADL_PCI6208=m CONFIG_COMEDI_ADL_PCI7X3X=m CONFIG_COMEDI_ADL_PCI8164=m @@ -1911,39 +6776,916 @@ CONFIG_COMEDI_ADV_PCI1723=m CONFIG_COMEDI_ADV_PCI1724=m CONFIG_COMEDI_ADV_PCI1760=m CONFIG_COMEDI_ADV_PCI_DIO=m +# CONFIG_COMEDI_AMPLC_DIO200_PCI is not set +# CONFIG_COMEDI_AMPLC_PC236_PCI is not set +# CONFIG_COMEDI_AMPLC_PC263_PCI is not set +# CONFIG_COMEDI_AMPLC_PCI224 is not set +# CONFIG_COMEDI_AMPLC_PCI230 is not set +# CONFIG_COMEDI_CONTEC_PCI_DIO is not set +# CONFIG_COMEDI_DAS08_PCI is not set +# CONFIG_COMEDI_DT3000 is not set +# CONFIG_COMEDI_DYNA_PCI10XX is not set +# CONFIG_COMEDI_GSC_HPDI is not set +# CONFIG_COMEDI_MF6X4 is not set +# CONFIG_COMEDI_ICP_MULTI is not set +# CONFIG_COMEDI_DAQBOARD2000 is not set +# CONFIG_COMEDI_JR3_PCI is not set +# CONFIG_COMEDI_KE_COUNTER is not set +# CONFIG_COMEDI_CB_PCIDAS64 is not set +# CONFIG_COMEDI_CB_PCIDAS is not set +# CONFIG_COMEDI_CB_PCIDDA is not set +# CONFIG_COMEDI_CB_PCIMDAS is not set +# CONFIG_COMEDI_CB_PCIMDDA is not set +# CONFIG_COMEDI_ME4000 is not set +# CONFIG_COMEDI_ME_DAQ is not set +# CONFIG_COMEDI_NI_6527 is not set +# CONFIG_COMEDI_NI_65XX is not set +# CONFIG_COMEDI_NI_660X is not set +# CONFIG_COMEDI_NI_670X is not set CONFIG_COMEDI_NI_LABPC_PCI=m CONFIG_COMEDI_NI_PCIDIO=m CONFIG_COMEDI_NI_PCIMIO=m +# CONFIG_COMEDI_RTD520 is not set +# CONFIG_COMEDI_S626 is not set +CONFIG_COMEDI_MITE=m +CONFIG_COMEDI_NI_TIOCMD=m +# CONFIG_COMEDI_USB_DRIVERS is not set +CONFIG_COMEDI_8254=m +CONFIG_COMEDI_8255=m +# CONFIG_COMEDI_8255_SA is not set +# CONFIG_COMEDI_KCOMEDILIB is not set +CONFIG_COMEDI_NI_LABPC=m +CONFIG_COMEDI_NI_TIO=m +CONFIG_COMEDI_NI_ROUTING=m +# CONFIG_COMEDI_TESTS is not set CONFIG_STAGING=y +# CONFIG_PRISM2_USB is not set +# CONFIG_RTL8192U is not set +# CONFIG_RTLLIB is not set +# CONFIG_RTL8723BS is not set +# CONFIG_R8712U is not set +# CONFIG_RTS5208 is not set +# CONFIG_VT6655 is not set +# CONFIG_VT6656 is not set + +# +# IIO staging drivers +# + +# +# Accelerometers +# +# CONFIG_ADIS16203 is not set +# CONFIG_ADIS16240 is not set +# end of Accelerometers + +# +# Analog to digital converters +# +# CONFIG_AD7816 is not set +# end of Analog to digital converters + +# +# Analog digital bi-direction converters +# +# CONFIG_ADT7316 is not set +# end of Analog digital bi-direction converters + +# +# Direct Digital Synthesis +# +# CONFIG_AD9832 is not set +# CONFIG_AD9834 is not set +# end of Direct Digital Synthesis + +# +# Network Analyzer, Impedance Converters +# +# CONFIG_AD5933 is not set +# end of Network Analyzer, Impedance Converters + +# +# Resolver to digital converters +# +# CONFIG_AD2S1210 is not set +# end of Resolver to digital converters +# end of IIO staging drivers + +# CONFIG_FB_SM750 is not set +# CONFIG_STAGING_MEDIA is not set +# CONFIG_STAGING_BOARD is not set +# CONFIG_LTE_GDM724X is not set +# CONFIG_FB_TFT is not set +# CONFIG_KS7010 is not set +# CONFIG_PI433 is not set +# CONFIG_XIL_AXIS_FIFO is not set +# CONFIG_FIELDBUS_DEV is not set +# CONFIG_QLGE is not set +# CONFIG_VME_BUS is not set +CONFIG_LOONGARCH_PLATFORM_DEVICES=y +CONFIG_LOONGSON_LAPTOP=y +# CONFIG_GOLDFISH is not set +CONFIG_HAVE_CLK=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y +# CONFIG_LMK04832 is not set +# CONFIG_COMMON_CLK_MAX9485 is not set +# CONFIG_COMMON_CLK_SI5341 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI514 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_SI570 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CDCE925 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_COMMON_CLK_AXI_CLKGEN is not set CONFIG_COMMON_CLK_LOONGSON2=y +# CONFIG_COMMON_CLK_PWM is not set +# CONFIG_COMMON_CLK_RS9_PCIE is not set +# CONFIG_COMMON_CLK_SI521XX is not set +# CONFIG_COMMON_CLK_VC3 is not set +# CONFIG_COMMON_CLK_VC5 is not set +# CONFIG_COMMON_CLK_VC7 is not set +# CONFIG_COMMON_CLK_FIXED_MMIO is not set +# CONFIG_XILINX_VCU is not set +# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set +# CONFIG_HWSPINLOCK is not set + +# +# Clock Source drivers +# +# end of Clock Source drivers + +# CONFIG_MAILBOX is not set +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +# end of Generic IOMMU Pagetable Support + +# CONFIG_IOMMU_DEBUGFS is not set +CONFIG_IOMMU_DEFAULT_DMA_STRICT=y +# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set +CONFIG_OF_IOMMU=y +# CONFIG_IOMMUFD is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Broadcom SoC drivers +# +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# end of NXP/Freescale QorIQ SoC drivers + +# +# fujitsu SoC drivers +# +# end of fujitsu SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Enable LiteX SoC Builder specific drivers +# +# CONFIG_LITEX_SOC_CONTROLLER is not set +# end of Enable LiteX SoC Builder specific drivers + CONFIG_LOONGSON2_GUTS=y CONFIG_LOONGSON2_PM=y +# CONFIG_WPCM450_SOC is not set + +# +# Qualcomm SoC drivers +# +# end of Qualcomm SoC drivers + +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + CONFIG_PM_DEVFREQ=y + +# +# DEVFREQ Governors +# CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y CONFIG_DEVFREQ_GOV_PERFORMANCE=y CONFIG_DEVFREQ_GOV_POWERSAVE=y CONFIG_DEVFREQ_GOV_USERSPACE=y +# CONFIG_DEVFREQ_GOV_PASSIVE is not set + +# +# DEVFREQ Drivers +# +# CONFIG_PM_DEVFREQ_EVENT is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set CONFIG_IIO=m +CONFIG_IIO_BUFFER=y +# CONFIG_IIO_BUFFER_CB is not set +# CONFIG_IIO_BUFFER_DMA is not set +# CONFIG_IIO_BUFFER_DMAENGINE is not set +# CONFIG_IIO_BUFFER_HW_CONSUMER is not set +CONFIG_IIO_KFIFO_BUF=m +CONFIG_IIO_TRIGGERED_BUFFER=m +# CONFIG_IIO_CONFIGFS is not set +CONFIG_IIO_TRIGGER=y +CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 +# CONFIG_IIO_SW_DEVICE is not set +# CONFIG_IIO_SW_TRIGGER is not set +# CONFIG_IIO_TRIGGERED_EVENT is not set + +# +# Accelerometers +# +# CONFIG_ADIS16201 is not set +# CONFIG_ADIS16209 is not set +# CONFIG_ADXL313_I2C is not set +# CONFIG_ADXL313_SPI is not set +# CONFIG_ADXL345_I2C is not set +# CONFIG_ADXL345_SPI is not set +# CONFIG_ADXL355_I2C is not set +# CONFIG_ADXL355_SPI is not set +# CONFIG_ADXL367_SPI is not set +# CONFIG_ADXL367_I2C is not set +# CONFIG_ADXL372_SPI is not set +# CONFIG_ADXL372_I2C is not set +# CONFIG_BMA180 is not set +# CONFIG_BMA220 is not set +# CONFIG_BMA400 is not set +# CONFIG_BMC150_ACCEL is not set +# CONFIG_BMI088_ACCEL is not set +# CONFIG_DA280 is not set +# CONFIG_DA311 is not set +# CONFIG_DMARD06 is not set +# CONFIG_DMARD09 is not set +# CONFIG_DMARD10 is not set +# CONFIG_FXLS8962AF_I2C is not set +# CONFIG_FXLS8962AF_SPI is not set CONFIG_HID_SENSOR_ACCEL_3D=m +# CONFIG_IIO_ST_ACCEL_3AXIS is not set +# CONFIG_IIO_KX022A_SPI is not set +# CONFIG_IIO_KX022A_I2C is not set +# CONFIG_KXSD9 is not set +# CONFIG_KXCJK1013 is not set +# CONFIG_MC3230 is not set +# CONFIG_MMA7455_I2C is not set +# CONFIG_MMA7455_SPI is not set +# CONFIG_MMA7660 is not set +# CONFIG_MMA8452 is not set +# CONFIG_MMA9551 is not set +# CONFIG_MMA9553 is not set +# CONFIG_MSA311 is not set +# CONFIG_MXC4005 is not set +# CONFIG_MXC6255 is not set +# CONFIG_SCA3000 is not set +# CONFIG_SCA3300 is not set +# CONFIG_STK8312 is not set +# CONFIG_STK8BA50 is not set +# end of Accelerometers + +# +# Analog to digital converters +# +# CONFIG_AD4130 is not set +# CONFIG_AD7091R5 is not set +# CONFIG_AD7124 is not set +# CONFIG_AD7192 is not set +# CONFIG_AD7266 is not set +# CONFIG_AD7280 is not set +# CONFIG_AD7291 is not set +# CONFIG_AD7292 is not set +# CONFIG_AD7298 is not set +# CONFIG_AD7476 is not set +# CONFIG_AD7606_IFACE_PARALLEL is not set +# CONFIG_AD7606_IFACE_SPI is not set +# CONFIG_AD7766 is not set +# CONFIG_AD7768_1 is not set +# CONFIG_AD7780 is not set +# CONFIG_AD7791 is not set +# CONFIG_AD7793 is not set +# CONFIG_AD7887 is not set +# CONFIG_AD7923 is not set +# CONFIG_AD7949 is not set +# CONFIG_AD799X is not set +# CONFIG_ADI_AXI_ADC is not set +# CONFIG_ENVELOPE_DETECTOR is not set +# CONFIG_HI8435 is not set +# CONFIG_HX711 is not set +# CONFIG_INA2XX_ADC is not set +# CONFIG_LTC2471 is not set +# CONFIG_LTC2485 is not set +# CONFIG_LTC2496 is not set +# CONFIG_LTC2497 is not set +# CONFIG_MAX1027 is not set +# CONFIG_MAX11100 is not set +# CONFIG_MAX1118 is not set +# CONFIG_MAX11205 is not set +# CONFIG_MAX11410 is not set +# CONFIG_MAX1241 is not set +# CONFIG_MAX1363 is not set +# CONFIG_MAX9611 is not set +# CONFIG_MCP320X is not set +# CONFIG_MCP3422 is not set +# CONFIG_MCP3911 is not set +# CONFIG_NAU7802 is not set +# CONFIG_RICHTEK_RTQ6056 is not set +# CONFIG_SD_ADC_MODULATOR is not set +# CONFIG_TI_ADC081C is not set +# CONFIG_TI_ADC0832 is not set +# CONFIG_TI_ADC084S021 is not set +# CONFIG_TI_ADC12138 is not set +# CONFIG_TI_ADC108S102 is not set +# CONFIG_TI_ADC128S052 is not set +# CONFIG_TI_ADC161S626 is not set +# CONFIG_TI_ADS1015 is not set +# CONFIG_TI_ADS7924 is not set +# CONFIG_TI_ADS1100 is not set +# CONFIG_TI_ADS7950 is not set +# CONFIG_TI_ADS8344 is not set +# CONFIG_TI_ADS8688 is not set +# CONFIG_TI_ADS124S08 is not set +# CONFIG_TI_ADS131E08 is not set +# CONFIG_TI_LMP92064 is not set +# CONFIG_TI_TLC4541 is not set +# CONFIG_TI_TSC2046 is not set +# CONFIG_VF610_ADC is not set +# CONFIG_VIPERBOARD_ADC is not set +# CONFIG_XILINX_XADC is not set +# end of Analog to digital converters + +# +# Analog to digital and digital to analog converters +# +# CONFIG_AD74115 is not set +# CONFIG_AD74413R is not set +# end of Analog to digital and digital to analog converters + +# +# Analog Front Ends +# +# CONFIG_IIO_RESCALE is not set +# end of Analog Front Ends + +# +# Amplifiers +# +# CONFIG_AD8366 is not set +# CONFIG_ADA4250 is not set +# CONFIG_HMC425 is not set +# end of Amplifiers + +# +# Capacitance to digital converters +# +# CONFIG_AD7150 is not set +# CONFIG_AD7746 is not set +# end of Capacitance to digital converters + +# +# Chemical Sensors +# +# CONFIG_ATLAS_PH_SENSOR is not set +# CONFIG_ATLAS_EZO_SENSOR is not set +# CONFIG_BME680 is not set +# CONFIG_CCS811 is not set +# CONFIG_IAQCORE is not set +# CONFIG_SCD30_CORE is not set +# CONFIG_SCD4X is not set +# CONFIG_SENSIRION_SGP30 is not set +# CONFIG_SENSIRION_SGP40 is not set +# CONFIG_SPS30_I2C is not set +# CONFIG_SENSEAIR_SUNRISE_CO2 is not set +# CONFIG_VZ89X is not set +# end of Chemical Sensors + +# +# Hid Sensor IIO Common +# +CONFIG_HID_SENSOR_IIO_COMMON=m +CONFIG_HID_SENSOR_IIO_TRIGGER=m +# end of Hid Sensor IIO Common + +# +# IIO SCMI Sensors +# +# end of IIO SCMI Sensors + +# +# SSP Sensor Common +# +# CONFIG_IIO_SSP_SENSORHUB is not set +# end of SSP Sensor Common + +# +# Digital to analog converters +# +# CONFIG_AD3552R is not set +# CONFIG_AD5064 is not set +# CONFIG_AD5360 is not set +# CONFIG_AD5380 is not set +# CONFIG_AD5421 is not set +# CONFIG_AD5446 is not set +# CONFIG_AD5449 is not set +# CONFIG_AD5592R is not set +# CONFIG_AD5593R is not set +# CONFIG_AD5504 is not set +# CONFIG_AD5624R_SPI is not set +# CONFIG_LTC2688 is not set +# CONFIG_AD5686_SPI is not set +# CONFIG_AD5696_I2C is not set +# CONFIG_AD5755 is not set +# CONFIG_AD5758 is not set +# CONFIG_AD5761 is not set +# CONFIG_AD5764 is not set +# CONFIG_AD5766 is not set +# CONFIG_AD5770R is not set +# CONFIG_AD5791 is not set +# CONFIG_AD7293 is not set +# CONFIG_AD7303 is not set +# CONFIG_AD8801 is not set +# CONFIG_DPOT_DAC is not set +# CONFIG_DS4424 is not set +# CONFIG_LTC1660 is not set +# CONFIG_LTC2632 is not set +# CONFIG_M62332 is not set +# CONFIG_MAX517 is not set +# CONFIG_MAX5522 is not set +# CONFIG_MAX5821 is not set +# CONFIG_MCP4725 is not set +# CONFIG_MCP4728 is not set +# CONFIG_MCP4922 is not set +# CONFIG_TI_DAC082S085 is not set +# CONFIG_TI_DAC5571 is not set +# CONFIG_TI_DAC7311 is not set +# CONFIG_TI_DAC7612 is not set +# CONFIG_VF610_DAC is not set +# end of Digital to analog converters + +# +# IIO dummy driver +# +# end of IIO dummy driver + +# +# Filters +# +# CONFIG_ADMV8818 is not set +# end of Filters + +# +# Frequency Synthesizers DDS/PLL +# + +# +# Clock Generator/Distribution +# +# CONFIG_AD9523 is not set +# end of Clock Generator/Distribution + +# +# Phase-Locked Loop (PLL) frequency synthesizers +# +# CONFIG_ADF4350 is not set +# CONFIG_ADF4371 is not set +# CONFIG_ADF4377 is not set +# CONFIG_ADMV1013 is not set +# CONFIG_ADMV1014 is not set +# CONFIG_ADMV4420 is not set +# CONFIG_ADRF6780 is not set +# end of Phase-Locked Loop (PLL) frequency synthesizers +# end of Frequency Synthesizers DDS/PLL + +# +# Digital gyroscope sensors +# +# CONFIG_ADIS16080 is not set +# CONFIG_ADIS16130 is not set +# CONFIG_ADIS16136 is not set +# CONFIG_ADIS16260 is not set +# CONFIG_ADXRS290 is not set +# CONFIG_ADXRS450 is not set +# CONFIG_BMG160 is not set +# CONFIG_FXAS21002C is not set CONFIG_HID_SENSOR_GYRO_3D=m +# CONFIG_MPU3050_I2C is not set +# CONFIG_IIO_ST_GYRO_3AXIS is not set +# CONFIG_ITG3200 is not set +# end of Digital gyroscope sensors + +# +# Health Sensors +# + +# +# Heart Rate Monitors +# +# CONFIG_AFE4403 is not set +# CONFIG_AFE4404 is not set +# CONFIG_MAX30100 is not set +# CONFIG_MAX30102 is not set +# end of Heart Rate Monitors +# end of Health Sensors + +# +# Humidity sensors +# +# CONFIG_AM2315 is not set +# CONFIG_DHT11 is not set +# CONFIG_HDC100X is not set +# CONFIG_HDC2010 is not set CONFIG_HID_SENSOR_HUMIDITY=m +# CONFIG_HTS221 is not set +# CONFIG_HTU21 is not set +# CONFIG_SI7005 is not set +# CONFIG_SI7020 is not set +# end of Humidity sensors + +# +# Inertial measurement units +# +# CONFIG_ADIS16400 is not set +# CONFIG_ADIS16460 is not set +# CONFIG_ADIS16475 is not set +# CONFIG_ADIS16480 is not set +# CONFIG_BMI160_I2C is not set +# CONFIG_BMI160_SPI is not set +# CONFIG_BOSCH_BNO055_I2C is not set +# CONFIG_FXOS8700_I2C is not set +# CONFIG_FXOS8700_SPI is not set +# CONFIG_KMX61 is not set +# CONFIG_INV_ICM42600_I2C is not set +# CONFIG_INV_ICM42600_SPI is not set +# CONFIG_INV_MPU6050_I2C is not set +# CONFIG_INV_MPU6050_SPI is not set +# CONFIG_IIO_ST_LSM6DSX is not set +# CONFIG_IIO_ST_LSM9DS0 is not set +# end of Inertial measurement units + +# +# Light sensors +# +# CONFIG_ACPI_ALS is not set +# CONFIG_ADJD_S311 is not set +# CONFIG_ADUX1020 is not set +# CONFIG_AL3010 is not set +# CONFIG_AL3320A is not set +# CONFIG_APDS9300 is not set +# CONFIG_APDS9960 is not set +# CONFIG_AS73211 is not set +# CONFIG_BH1750 is not set +# CONFIG_BH1780 is not set +# CONFIG_CM32181 is not set +# CONFIG_CM3232 is not set +# CONFIG_CM3323 is not set +# CONFIG_CM3605 is not set +# CONFIG_CM36651 is not set +# CONFIG_GP2AP002 is not set +# CONFIG_GP2AP020A00F is not set +# CONFIG_SENSORS_ISL29018 is not set +# CONFIG_SENSORS_ISL29028 is not set +# CONFIG_ISL29125 is not set CONFIG_HID_SENSOR_ALS=m CONFIG_HID_SENSOR_PROX=m +# CONFIG_JSA1212 is not set +# CONFIG_ROHM_BU27008 is not set +# CONFIG_ROHM_BU27034 is not set +# CONFIG_RPR0521 is not set +# CONFIG_LTR501 is not set +# CONFIG_LTRF216A is not set +# CONFIG_LV0104CS is not set +# CONFIG_MAX44000 is not set +# CONFIG_MAX44009 is not set +# CONFIG_NOA1305 is not set +# CONFIG_OPT3001 is not set +# CONFIG_OPT4001 is not set +# CONFIG_PA12203001 is not set +# CONFIG_SI1133 is not set +# CONFIG_SI1145 is not set +# CONFIG_STK3310 is not set +# CONFIG_ST_UVIS25 is not set +# CONFIG_TCS3414 is not set +# CONFIG_TCS3472 is not set +# CONFIG_SENSORS_TSL2563 is not set +# CONFIG_TSL2583 is not set +# CONFIG_TSL2591 is not set +# CONFIG_TSL2772 is not set +# CONFIG_TSL4531 is not set +# CONFIG_US5182D is not set +# CONFIG_VCNL4000 is not set +# CONFIG_VCNL4035 is not set +# CONFIG_VEML6030 is not set +# CONFIG_VEML6070 is not set +# CONFIG_VL6180 is not set +# CONFIG_ZOPT2201 is not set +# end of Light sensors + +# +# Magnetometer sensors +# +# CONFIG_AK8974 is not set +# CONFIG_AK8975 is not set +# CONFIG_AK09911 is not set +# CONFIG_BMC150_MAGN_I2C is not set +# CONFIG_BMC150_MAGN_SPI is not set +# CONFIG_MAG3110 is not set CONFIG_HID_SENSOR_MAGNETOMETER_3D=m +# CONFIG_MMC35240 is not set +# CONFIG_IIO_ST_MAGN_3AXIS is not set +# CONFIG_SENSORS_HMC5843_I2C is not set +# CONFIG_SENSORS_HMC5843_SPI is not set +# CONFIG_SENSORS_RM3100_I2C is not set +# CONFIG_SENSORS_RM3100_SPI is not set +# CONFIG_TI_TMAG5273 is not set +# CONFIG_YAMAHA_YAS530 is not set +# end of Magnetometer sensors + +# +# Multiplexers +# +# CONFIG_IIO_MUX is not set +# end of Multiplexers + +# +# Inclinometer sensors +# CONFIG_HID_SENSOR_INCLINOMETER_3D=m CONFIG_HID_SENSOR_DEVICE_ROTATION=m +# end of Inclinometer sensors + +# +# Triggers - standalone +# +# CONFIG_IIO_INTERRUPT_TRIGGER is not set +# CONFIG_IIO_SYSFS_TRIGGER is not set +# end of Triggers - standalone + +# +# Linear and angular position sensors +# +# CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE is not set +# end of Linear and angular position sensors + +# +# Digital potentiometers +# +# CONFIG_AD5110 is not set +# CONFIG_AD5272 is not set +# CONFIG_DS1803 is not set +# CONFIG_MAX5432 is not set +# CONFIG_MAX5481 is not set +# CONFIG_MAX5487 is not set +# CONFIG_MCP4018 is not set +# CONFIG_MCP4131 is not set +# CONFIG_MCP4531 is not set +# CONFIG_MCP41010 is not set +# CONFIG_TPL0102 is not set +# CONFIG_X9250 is not set +# end of Digital potentiometers + +# +# Digital potentiostats +# +# CONFIG_LMP91000 is not set +# end of Digital potentiostats + +# +# Pressure sensors +# +# CONFIG_ABP060MG is not set +# CONFIG_BMP280 is not set +# CONFIG_DLHL60D is not set +# CONFIG_DPS310 is not set CONFIG_HID_SENSOR_PRESS=m +# CONFIG_HP03 is not set +# CONFIG_ICP10100 is not set +# CONFIG_MPL115_I2C is not set +# CONFIG_MPL115_SPI is not set +# CONFIG_MPL3115 is not set +# CONFIG_MPRLS0025PA is not set +# CONFIG_MS5611 is not set +# CONFIG_MS5637 is not set +# CONFIG_IIO_ST_PRESS is not set +# CONFIG_T5403 is not set +# CONFIG_HP206C is not set +# CONFIG_ZPA2326 is not set +# end of Pressure sensors + +# +# Lightning sensors +# +# CONFIG_AS3935 is not set +# end of Lightning sensors + +# +# Proximity and distance sensors +# +# CONFIG_IRSD200 is not set +# CONFIG_ISL29501 is not set +# CONFIG_LIDAR_LITE_V2 is not set +# CONFIG_MB1232 is not set +# CONFIG_PING is not set +# CONFIG_RFD77402 is not set +# CONFIG_SRF04 is not set +# CONFIG_SX9310 is not set +# CONFIG_SX9324 is not set +# CONFIG_SX9360 is not set +# CONFIG_SX9500 is not set +# CONFIG_SRF08 is not set +# CONFIG_VCNL3020 is not set +# CONFIG_VL53L0X_I2C is not set +# end of Proximity and distance sensors + +# +# Resolver to digital converters +# +# CONFIG_AD2S90 is not set +# CONFIG_AD2S1200 is not set +# end of Resolver to digital converters + +# +# Temperature sensors +# +# CONFIG_LTC2983 is not set +# CONFIG_MAXIM_THERMOCOUPLE is not set CONFIG_HID_SENSOR_TEMP=m +# CONFIG_MLX90614 is not set +# CONFIG_MLX90632 is not set +# CONFIG_TMP006 is not set +# CONFIG_TMP007 is not set +# CONFIG_TMP117 is not set +# CONFIG_TSYS01 is not set +# CONFIG_TSYS02D is not set +# CONFIG_MAX30208 is not set +# CONFIG_MAX31856 is not set +# CONFIG_MAX31865 is not set +# end of Temperature sensors + CONFIG_NTB=m +# CONFIG_NTB_MSI is not set +# CONFIG_NTB_IDT is not set +# CONFIG_NTB_EPF is not set +# CONFIG_NTB_SWITCHTEC is not set CONFIG_NTB_PINGPONG=m CONFIG_NTB_TOOL=m CONFIG_NTB_PERF=m CONFIG_NTB_TRANSPORT=m CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +# CONFIG_PWM_DEBUG is not set +# CONFIG_PWM_ATMEL_TCB is not set +# CONFIG_PWM_CLK is not set +# CONFIG_PWM_DWC is not set +# CONFIG_PWM_FSL_FTM is not set +# CONFIG_PWM_PCA9685 is not set +# CONFIG_PWM_XILINX is not set + +# +# IRQ chip support +# +CONFIG_IRQCHIP=y +# CONFIG_AL_FIC is not set +# CONFIG_XILINX_INTC is not set +CONFIG_IRQ_LOONGARCH_CPU=y +CONFIG_LOONGSON_LIOINTC=y +CONFIG_LOONGSON_EIOINTC=y +CONFIG_LOONGSON_HTVEC=y +CONFIG_LOONGSON_PCH_PIC=y +CONFIG_LOONGSON_PCH_MSI=y +CONFIG_LOONGSON_PCH_LPC=y +# end of IRQ chip support + +# CONFIG_IPACK_BUS is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_SIMPLE is not set +# CONFIG_RESET_TI_SYSCON is not set +# CONFIG_RESET_TI_TPS380X is not set + +# +# PHY Subsystem +# +# CONFIG_GENERIC_PHY is not set +# CONFIG_PHY_CAN_TRANSCEIVER is not set + +# +# PHY drivers for Broadcom platforms +# +# CONFIG_BCM_KONA_USB2_PHY is not set +# end of PHY drivers for Broadcom platforms + +# CONFIG_PHY_CADENCE_TORRENT is not set +# CONFIG_PHY_CADENCE_DPHY is not set +# CONFIG_PHY_CADENCE_DPHY_RX is not set +# CONFIG_PHY_CADENCE_SIERRA is not set +# CONFIG_PHY_CADENCE_SALVO is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_LAN966X_SERDES is not set +# CONFIG_PHY_CPCAP_USB is not set +# CONFIG_PHY_MAPPHONE_MDM6600 is not set +# CONFIG_PHY_OCELOT_SERDES is not set +# CONFIG_PHY_SAMSUNG_USB2 is not set +# end of PHY Subsystem + CONFIG_POWERCAP=y +# CONFIG_DTPM is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# CONFIG_DWC_PCIE_PMU is not set +# end of Performance monitor support + +CONFIG_RAS=y CONFIG_USB4=m +# CONFIG_USB4_DEBUGFS_WRITE is not set +# CONFIG_USB4_DMA_TEST is not set + +# +# Android +# +# CONFIG_ANDROID_BINDER_IPC is not set +# end of Android + +# CONFIG_LIBNVDIMM is not set CONFIG_DAX=y CONFIG_DEV_DAX=m +CONFIG_DEV_DAX_KMEM=m +CONFIG_NVMEM=y +CONFIG_NVMEM_SYSFS=y + +# +# Layout Types +# +# CONFIG_NVMEM_LAYOUT_SL28_VPD is not set +# CONFIG_NVMEM_LAYOUT_ONIE_TLV is not set +# end of Layout Types + +# CONFIG_NVMEM_RMEM is not set +# CONFIG_NVMEM_U_BOOT_ENV is not set + +# +# HW tracing support +# +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +# CONFIG_FSI is not set +CONFIG_PM_OPP=y +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# CONFIG_MOST is not set +# CONFIG_PECI is not set +# CONFIG_HTE is not set +# end of Device Drivers + +# +# File systems +# +# CONFIG_VALIDATE_FS_PARSER is not set +CONFIG_FS_IOMAP=y +CONFIG_BUFFER_HEAD=y +CONFIG_LEGACY_DIRECT_IO=y CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y @@ -1951,105 +7693,289 @@ CONFIG_EXT2_FS_SECURITY=y CONFIG_EXT3_FS=y CONFIG_EXT3_FS_POSIX_ACL=y CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set CONFIG_JFS_FS=m CONFIG_JFS_POSIX_ACL=y CONFIG_JFS_SECURITY=y +# CONFIG_JFS_DEBUG is not set +# CONFIG_JFS_STATISTICS is not set CONFIG_XFS_FS=y +CONFIG_XFS_SUPPORT_V4=y +CONFIG_XFS_SUPPORT_ASCII_CI=y CONFIG_XFS_QUOTA=y CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_ONLINE_SCRUB is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set CONFIG_GFS2_FS=m CONFIG_GFS2_FS_LOCKING_DLM=y CONFIG_OCFS2_FS=m +CONFIG_OCFS2_FS_O2CB=m +CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m +CONFIG_OCFS2_FS_STATS=y +CONFIG_OCFS2_DEBUG_MASKLOG=y +# CONFIG_OCFS2_DEBUG_FS is not set CONFIG_BTRFS_FS=y CONFIG_BTRFS_FS_POSIX_ACL=y +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +# CONFIG_ZONEFS_FS is not set +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +# CONFIG_FS_VERITY is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y CONFIG_FANOTIFY=y CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=y CONFIG_QFMT_V1=m CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y CONFIG_AUTOFS_FS=y CONFIG_FUSE_FS=m CONFIG_CUSE=m CONFIG_VIRTIO_FS=m +# CONFIG_VIRT_FUSE is not set CONFIG_OVERLAY_FS=y +CONFIG_OVERLAY_FS_REDIRECT_DIR=y # CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW is not set CONFIG_OVERLAY_FS_INDEX=y CONFIG_OVERLAY_FS_XINO_AUTO=y CONFIG_OVERLAY_FS_METACOPY=y +# CONFIG_OVERLAY_FS_DEBUG is not set + +# +# Caches +# +CONFIG_NETFS_SUPPORT=y +CONFIG_NETFS_STATS=y CONFIG_FSCACHE=m CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_DEBUG is not set CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_ERROR_INJECTION is not set +# CONFIG_CACHEFILES_ONDEMAND is not set +# end of Caches + +# +# CD-ROM/DVD Filesystems +# CONFIG_ISO9660_FS=m CONFIG_JOLIET=y CONFIG_ZISOFS=y CONFIG_UDF_FS=m +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/EXFAT/NT Filesystems +# +CONFIG_FAT_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_FAT_DEFAULT_CODEPAGE=936 CONFIG_FAT_DEFAULT_IOCHARSET="gb2312" +# CONFIG_FAT_DEFAULT_UTF8 is not set CONFIG_EXFAT_FS=m +CONFIG_EXFAT_DEFAULT_IOCHARSET="utf8" CONFIG_NTFS_FS=m +# CONFIG_NTFS_DEBUG is not set +# CONFIG_NTFS_RW is not set CONFIG_NTFS3_FS=m CONFIG_NTFS3_64BIT_CLUSTER=y CONFIG_NTFS3_LZX_XPRESS=y +# CONFIG_NTFS3_FS_POSIX_ACL is not set +# end of DOS/FAT/EXFAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y CONFIG_PROC_VMCORE_DEVICE_DUMP=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +# CONFIG_TMPFS_INODE64 is not set +# CONFIG_TMPFS_QUOTA is not set +CONFIG_ARCH_SUPPORTS_HUGETLBFS=y CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP=y +# CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON is not set CONFIG_CONFIGFS_FS=y CONFIG_EFIVAR_FS=y +# end of Pseudo filesystems + +CONFIG_MISC_FILESYSTEMS=y CONFIG_ORANGEFS_FS=m +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set CONFIG_ECRYPT_FS=m CONFIG_ECRYPT_FS_MESSAGING=y CONFIG_HFS_FS=m CONFIG_HFSPLUS_FS=m +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set CONFIG_UBIFS_FS=m CONFIG_UBIFS_FS_ADVANCED_COMPR=y +CONFIG_UBIFS_FS_LZO=y +CONFIG_UBIFS_FS_ZLIB=y +CONFIG_UBIFS_FS_ZSTD=y +# CONFIG_UBIFS_ATIME_SUPPORT is not set +CONFIG_UBIFS_FS_XATTR=y +CONFIG_UBIFS_FS_SECURITY=y +# CONFIG_UBIFS_FS_AUTHENTICATION is not set CONFIG_CRAMFS=m +CONFIG_CRAMFS_BLOCKDEV=y +# CONFIG_CRAMFS_MTD is not set CONFIG_SQUASHFS=m +# CONFIG_SQUASHFS_FILE_CACHE is not set CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set +CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y CONFIG_SQUASHFS_LZ4=y CONFIG_SQUASHFS_LZO=y CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set CONFIG_MINIX_FS=m +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set CONFIG_ROMFS_FS=m +CONFIG_ROMFS_BACKED_BY_BLOCK=y +# CONFIG_ROMFS_BACKED_BY_MTD is not set +# CONFIG_ROMFS_BACKED_BY_BOTH is not set +CONFIG_ROMFS_ON_BLOCK=y CONFIG_PSTORE=m +CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 +CONFIG_PSTORE_COMPRESS=y +# CONFIG_PSTORE_CONSOLE is not set +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +# CONFIG_PSTORE_RAM is not set +# CONFIG_PSTORE_BLK is not set CONFIG_SYSV_FS=m CONFIG_UFS_FS=m +# CONFIG_UFS_FS_WRITE is not set +# CONFIG_UFS_DEBUG is not set CONFIG_EROFS_FS=m +# CONFIG_EROFS_FS_DEBUG is not set +CONFIG_EROFS_FS_XATTR=y +CONFIG_EROFS_FS_POSIX_ACL=y +CONFIG_EROFS_FS_SECURITY=y +CONFIG_EROFS_FS_ZIP=y CONFIG_EROFS_FS_ZIP_LZMA=y +# CONFIG_EROFS_FS_ZIP_DEFLATE is not set CONFIG_EROFS_FS_PCPU_KTHREAD=y +CONFIG_EROFS_FS_PCPU_KTHREAD_HIPRI=y +CONFIG_NETWORK_FILESYSTEMS=y CONFIG_NFS_FS=y # CONFIG_NFS_V2 is not set CONFIG_NFS_V3=m CONFIG_NFS_V3_ACL=y CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set CONFIG_NFS_V4_1=y CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +# CONFIG_ROOT_NFS is not set +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y # CONFIG_NFS_DISABLE_UDP_SUPPORT is not set +CONFIG_NFS_V4_2_READ_PLUS=y CONFIG_NFSD=y +# CONFIG_NFSD_V2 is not set CONFIG_NFSD_V3_ACL=y CONFIG_NFSD_V4=y +CONFIG_NFSD_PNFS=y CONFIG_NFSD_BLOCKLAYOUT=y CONFIG_NFSD_SCSILAYOUT=y CONFIG_NFSD_FLEXFILELAYOUT=y CONFIG_NFSD_V4_2_INTER_SSC=y CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_GRACE_PERIOD=y +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=y +CONFIG_NFS_COMMON=y +CONFIG_NFS_V4_2_SSC_HELPER=y +CONFIG_SUNRPC=y +CONFIG_SUNRPC_GSS=y +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=y +CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1=y +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA is not set +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 is not set CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m CONFIG_CEPH_FS=m CONFIG_CEPH_FSCACHE=y CONFIG_CEPH_FS_POSIX_ACL=y CONFIG_CEPH_FS_SECURITY_LABEL=y CONFIG_CIFS=m # CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y CONFIG_CIFS_UPCALL=y CONFIG_CIFS_XATTR=y CONFIG_CIFS_POSIX=y # CONFIG_CIFS_DEBUG is not set CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_SWN_UPCALL is not set +# CONFIG_CIFS_SMB_DIRECT is not set +# CONFIG_CIFS_FSCACHE is not set +# CONFIG_SMB_SERVER is not set +CONFIG_SMBFS=m +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set CONFIG_9P_FS=y +# CONFIG_9P_FS_POSIX_ACL is not set +# CONFIG_9P_FS_SECURITY is not set +CONFIG_NLS=y CONFIG_NLS_DEFAULT="utf8" CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_737=m @@ -2100,44 +8026,207 @@ CONFIG_NLS_MAC_INUIT=m CONFIG_NLS_MAC_ROMANIAN=m CONFIG_NLS_MAC_TURKISH=m CONFIG_NLS_UTF8=y +CONFIG_NLS_UCS2_UTILS=m CONFIG_DLM=m CONFIG_DLM_DEBUG=y +# CONFIG_UNICODE is not set +CONFIG_IO_WQ=y +# end of File systems + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_KEYS_REQUEST_CACHE is not set CONFIG_PERSISTENT_KEYRINGS=y CONFIG_TRUSTED_KEYS=y +CONFIG_TRUSTED_KEYS_TPM=y +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_USER_DECRYPTED_DATA is not set CONFIG_KEY_DH_OPERATIONS=y +# CONFIG_SECURITY_DMESG_RESTRICT is not set CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y CONFIG_SECURITY_INFINIBAND=y CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y CONFIG_LSM_MMAP_MIN_ADDR=65535 CONFIG_HARDENED_USERCOPY=y +# CONFIG_FORTIFY_SOURCE is not set +# CONFIG_STATIC_USERMODEHELPER is not set CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9 +CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256 +# CONFIG_SECURITY_SELINUX_DEBUG is not set +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set CONFIG_SECURITY_APPARMOR=y +# CONFIG_SECURITY_APPARMOR_DEBUG is not set +CONFIG_SECURITY_APPARMOR_INTROSPECT_POLICY=y +CONFIG_SECURITY_APPARMOR_HASH=y +CONFIG_SECURITY_APPARMOR_HASH_DEFAULT=y +CONFIG_SECURITY_APPARMOR_EXPORT_BINARY=y +CONFIG_SECURITY_APPARMOR_PARANOID_LOAD=y +# CONFIG_SECURITY_LOADPIN is not set CONFIG_SECURITY_YAMA=y +# CONFIG_SECURITY_SAFESETID is not set CONFIG_SECURITY_LOCKDOWN_LSM=y CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y +CONFIG_LOCK_DOWN_KERNEL_FORCE_NONE=y +# CONFIG_LOCK_DOWN_KERNEL_FORCE_INTEGRITY is not set +# CONFIG_LOCK_DOWN_KERNEL_FORCE_CONFIDENTIALITY is not set +# CONFIG_SECURITY_LANDLOCK is not set +CONFIG_INTEGRITY=y CONFIG_INTEGRITY_SIGNATURE=y CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_TRUSTED_KEYRING=y CONFIG_INTEGRITY_PLATFORM_KEYRING=y +# CONFIG_INTEGRITY_MACHINE_KEYRING is not set +CONFIG_LOAD_UEFI_KEYS=y +CONFIG_INTEGRITY_AUDIT=y CONFIG_IMA=y +CONFIG_IMA_MEASURE_PCR_IDX=10 +CONFIG_IMA_LSM_RULES=y +CONFIG_IMA_NG_TEMPLATE=y +# CONFIG_IMA_SIG_TEMPLATE is not set +CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng" +# CONFIG_IMA_DEFAULT_HASH_SHA1 is not set CONFIG_IMA_DEFAULT_HASH_SHA256=y +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set +# CONFIG_IMA_DEFAULT_HASH_SM3 is not set +CONFIG_IMA_DEFAULT_HASH="sha256" +# CONFIG_IMA_WRITE_POLICY is not set CONFIG_IMA_READ_POLICY=y CONFIG_IMA_APPRAISE=y +# CONFIG_IMA_ARCH_POLICY is not set +# CONFIG_IMA_APPRAISE_BUILD_POLICY is not set +CONFIG_IMA_APPRAISE_BOOTPARAM=y +# CONFIG_IMA_APPRAISE_MODSIG is not set +# CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY is not set +# CONFIG_IMA_BLACKLIST_KEYRING is not set CONFIG_IMA_LOAD_X509=y +CONFIG_IMA_X509_PATH="/etc/keys/x509_ima.der" +# CONFIG_IMA_APPRAISE_SIGNED_INIT is not set +CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y +CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y +# CONFIG_IMA_DISABLE_HTABLE is not set CONFIG_EVM=y +CONFIG_EVM_ATTR_FSUUID=y +# CONFIG_EVM_ADD_XATTRS is not set CONFIG_EVM_LOAD_X509=y +CONFIG_EVM_X509_PATH="/etc/keys/x509_evm.der" +# CONFIG_DEFAULT_SECURITY_SELINUX is not set +# CONFIG_DEFAULT_SECURITY_APPARMOR is not set CONFIG_DEFAULT_SECURITY_DAC=y CONFIG_LSM="landlock,lockdown,yama,loadpin,safesetid,integrity,bpf" + +# +# Kernel hardening options +# + +# +# Memory initialization +# +CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y +# CONFIG_INIT_STACK_NONE is not set +# CONFIG_INIT_STACK_ALL_PATTERN is not set +CONFIG_INIT_STACK_ALL_ZERO=y +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y +# CONFIG_ZERO_CALL_USED_REGS is not set +# end of Memory initialization + +# +# Hardening of kernel data structures +# +CONFIG_LIST_HARDENED=y +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# end of Hardening of kernel data structures + +CONFIG_CC_HAS_RANDSTRUCT=y +CONFIG_RANDSTRUCT_NONE=y +# CONFIG_RANDSTRUCT_FULL is not set +# CONFIG_RANDSTRUCT_PERFORMANCE is not set +# end of Kernel hardening options +# end of Security options + +CONFIG_XOR_BLOCKS=y +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_FIPS_NAME="Linux Kernel Cryptographic API" +# CONFIG_CRYPTO_FIPS_CUSTOM_VERSION is not set +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_SIG2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=y +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y CONFIG_CRYPTO_USER=m # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +# CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is not set +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y CONFIG_CRYPTO_PCRYPT=m CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_AUTHENC=m CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_ENGINE=m +# end of Crypto core or helper + +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=y +# CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set +CONFIG_CRYPTO_ECC=m +CONFIG_CRYPTO_ECDH=m +# CONFIG_CRYPTO_ECDSA is not set +# CONFIG_CRYPTO_ECRDSA is not set CONFIG_CRYPTO_SM2=y +# CONFIG_CRYPTO_CURVE25519 is not set +# end of Public-key cryptography + +# +# Block ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set CONFIG_CRYPTO_ANUBIS=m +# CONFIG_CRYPTO_ARIA is not set CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m CONFIG_CRYPTO_CAST5=m CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_DES=m @@ -2145,59 +8234,618 @@ CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=y +CONFIG_CRYPTO_SM4_GENERIC=y CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m +# end of Block ciphers + +# +# Length-preserving ciphers and modes +# +# CONFIG_CRYPTO_ADIANTUM is not set CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_CBC=y CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTR=y CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_HCTR2 is not set +# CONFIG_CRYPTO_KEYWRAP is not set CONFIG_CRYPTO_LRW=m +# CONFIG_CRYPTO_OFB is not set CONFIG_CRYPTO_PCBC=m +# CONFIG_CRYPTO_XTS is not set +# end of Length-preserving ciphers and modes + +# +# AEAD (authenticated encryption with associated data) ciphers +# +# CONFIG_CRYPTO_AEGIS128 is not set CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_GENIV=y CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=m +CONFIG_CRYPTO_ESSIV=m +# end of AEAD (authenticated encryption with associated data) ciphers + +# +# Hashes, digests, and MACs +# +CONFIG_CRYPTO_BLAKE2B=y +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_POLY1305=m CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_SHA3=y +CONFIG_CRYPTO_SM3=y +CONFIG_CRYPTO_SM3_GENERIC=y +# CONFIG_CRYPTO_STREEBOG is not set CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_XXHASH=y +# end of Hashes, digests, and MACs + +# +# CRCs (cyclic redundancy checks) +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_CRC64_ROCKSOFT=m +# end of CRCs (cyclic redundancy checks) + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=m +CONFIG_CRYPTO_LZO=m CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ZSTD=y +# end of Compression + +# +# Random number generation +# CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y CONFIG_CRYPTO_DRBG_HASH=y CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +# CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE is not set +CONFIG_CRYPTO_KDF800108_CTR=y +# end of Random number generation + +# +# Userspace interface +# +CONFIG_CRYPTO_USER_API=y CONFIG_CRYPTO_USER_API_HASH=y CONFIG_CRYPTO_USER_API_SKCIPHER=y CONFIG_CRYPTO_USER_API_RNG=y +# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y +# CONFIG_CRYPTO_STATS is not set +# end of Userspace interface + +CONFIG_CRYPTO_HASH_INFO=y + +# +# Accelerated Cryptographic Algorithms for CPU (loongarch) +# CONFIG_CRYPTO_CRC32_LOONGARCH=m +# end of Accelerated Cryptographic Algorithms for CPU (loongarch) + +CONFIG_CRYPTO_HW=y +# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set +# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set +CONFIG_CRYPTO_DEV_NITROX=m CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m +# CONFIG_CRYPTO_DEV_QAT_DH895xCC is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXX is not set +# CONFIG_CRYPTO_DEV_QAT_C62X is not set +# CONFIG_CRYPTO_DEV_QAT_4XXX is not set +# CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set +# CONFIG_CRYPTO_DEV_QAT_C62XVF is not set CONFIG_CRYPTO_DEV_CHELSIO=m CONFIG_CRYPTO_DEV_VIRTIO=m +# CONFIG_CRYPTO_DEV_SAFEXCEL is not set +# CONFIG_CRYPTO_DEV_CCREE is not set +# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set CONFIG_SIGNED_PE_FILE_VERIFICATION=y +# CONFIG_FIPS_SIGNATURE_SELFTEST is not set + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_MODULE_SIG_KEY_TYPE_RSA=y +# CONFIG_MODULE_SIG_KEY_TYPE_ECDSA is not set +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set CONFIG_SECONDARY_TRUSTED_KEYRING=y CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" CONFIG_SYSTEM_REVOCATION_LIST=y +CONFIG_SYSTEM_REVOCATION_KEYS="" +# CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE is not set +# end of Certificates for signature checking + +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=y +CONFIG_RAID6_PQ_BENCHMARK=y +CONFIG_PACKING=y +CONFIG_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_CORDIC=m +# CONFIG_PRIME_NUMBERS is not set +CONFIG_RATIONAL=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y + +# +# Crypto library routines +# +CONFIG_CRYPTO_LIB_UTILS=y +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_ARC4=m +CONFIG_CRYPTO_LIB_GF128MUL=y +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y +CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m +CONFIG_CRYPTO_LIB_CHACHA=m +CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m +CONFIG_CRYPTO_LIB_CURVE25519=m +CONFIG_CRYPTO_LIB_DES=m +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=1 +CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m +CONFIG_CRYPTO_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m +CONFIG_CRYPTO_LIB_SHA1=y +CONFIG_CRYPTO_LIB_SHA256=y +# end of Crypto library routines + +CONFIG_CRC_CCITT=m +CONFIG_CRC16=y CONFIG_CRC_T10DIF=y +CONFIG_CRC64_ROCKSOFT=m CONFIG_CRC_ITU_T=y +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC64=m +# CONFIG_CRC4 is not set CONFIG_CRC7=m +CONFIG_LIBCRC32C=y +# CONFIG_CRC8 is not set +CONFIG_XXHASH=y +CONFIG_AUDIT_GENERIC=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_842_COMPRESS=m +CONFIG_842_DECOMPRESS=m +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=m +CONFIG_LZ4HC_COMPRESS=m +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMMON=y +CONFIG_ZSTD_COMPRESS=y +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_MICROLZMA=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_DECOMPRESS_ZSTD=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_DMA_DECLARE_COHERENT=y +CONFIG_SWIOTLB=y +# CONFIG_SWIOTLB_DYNAMIC is not set +# CONFIG_DMA_RESTRICTED_POOL is not set CONFIG_DMA_CMA=y +# CONFIG_DMA_NUMA_CMA is not set + +# +# Default contiguous memory area size: +# +CONFIG_CMA_SIZE_MBYTES=16 +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_ALIGNMENT=8 +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_DMA_MAP_BENCHMARK is not set +CONFIG_SGL_ALLOC=y +CONFIG_CHECK_SIGNATURE=y +# CONFIG_CPUMASK_OFFSTACK is not set +# CONFIG_FORCE_NR_CPUS is not set +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_LRU_CACHE=m +CONFIG_CLZ_TAB=y +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_SIGNATURE=y +CONFIG_DIMLIB=y +CONFIG_LIBFDT=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_HAVE_GENERIC_VDSO=y +CONFIG_GENERIC_GETTIMEOFDAY=y +CONFIG_GENERIC_VDSO_TIME_NS=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_POOL=y +CONFIG_ARCH_STACKWALK=y +CONFIG_STACKDEPOT=y +CONFIG_SBITMAP=y +CONFIG_PARMAN=m +CONFIG_OBJAGG=m +# end of Library routines + +CONFIG_GENERIC_LIB_ASHLDI3=y +CONFIG_GENERIC_LIB_ASHRDI3=y +CONFIG_GENERIC_LIB_LSHRDI3=y +CONFIG_GENERIC_LIB_CMPDI2=y +CONFIG_GENERIC_LIB_UCMPDI2=y +CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y +CONFIG_PLDMFW=y +CONFIG_ASN1_ENCODER=y + +# +# Kernel hacking +# + +# +# printk and dmesg options +# CONFIG_PRINTK_TIME=y CONFIG_PRINTK_CALLER=y +# CONFIG_STACKTRACE_BUILD_ID is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 CONFIG_BOOT_PRINTK_DELAY=y CONFIG_DYNAMIC_DEBUG=y +CONFIG_DYNAMIC_DEBUG_CORE=y +CONFIG_SYMBOLIC_ERRNAME=y +CONFIG_DEBUG_BUGVERBOSE=y +# end of printk and dmesg options + +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +CONFIG_AS_HAS_NON_CONST_LEB128=y +# CONFIG_DEBUG_INFO_NONE is not set +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y +# CONFIG_DEBUG_INFO_DWARF4 is not set +# CONFIG_DEBUG_INFO_DWARF5 is not set +# CONFIG_DEBUG_INFO_REDUCED is not set +CONFIG_DEBUG_INFO_COMPRESSED_NONE=y +# CONFIG_DEBUG_INFO_COMPRESSED_ZLIB is not set +# CONFIG_DEBUG_INFO_COMPRESSED_ZSTD is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +CONFIG_DEBUG_INFO_BTF=y +# CONFIG_GDB_SCRIPTS is not set CONFIG_FRAME_WARN=4096 CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +# CONFIG_HEADERS_INSTALL is not set CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +# CONFIG_VMLINUX_MAP is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +# +# Generic Kernel Debugging Instruments +# CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_FS_ALLOW_ALL=y +# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set +# CONFIG_DEBUG_FS_ALLOW_NONE is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_UBSAN is not set +CONFIG_HAVE_KCSAN_COMPILER=y +# end of Generic Kernel Debugging Instruments + +# +# Networking Debugging +# +# CONFIG_NET_DEV_REFCNT_TRACKER is not set +# CONFIG_NET_NS_REFCNT_TRACKER is not set +# CONFIG_DEBUG_NET is not set +# end of Networking Debugging + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +CONFIG_SLUB_DEBUG=y +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_PAGE_OWNER is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SHRINKER_DEBUG is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_DEBUG_STACKOVERFLOW=y +# CONFIG_DEBUG_STACKOVERFLOW is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_ARCH_DISABLE_KASAN_INLINE=y +CONFIG_CC_HAS_KASAN_GENERIC=y +CONFIG_CC_HAS_KASAN_SW_TAGS=y +CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y +# CONFIG_KASAN is not set +CONFIG_HAVE_ARCH_KFENCE=y +# CONFIG_KFENCE is not set +# end of Memory Debugging + CONFIG_DEBUG_SHIRQ=y + +# +# Debug Oops, Lockups and Hangs +# CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y +# CONFIG_SDEI_WATCHDOG is not set +CONFIG_HARDLOCKUP_DETECTOR=y +# CONFIG_HARDLOCKUP_DETECTOR_PERF is not set +CONFIG_HARDLOCKUP_DETECTOR_BUDDY=y +# CONFIG_HARDLOCKUP_DETECTOR_ARCH is not set +CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER=y +# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_WQ_CPU_INTENSIVE_REPORT is not set +# CONFIG_TEST_LOCKUP is not set +# end of Debug Oops, Lockups and Hangs + +# +# Scheduler Debugging +# # CONFIG_SCHED_DEBUG is not set +CONFIG_SCHED_INFO=y CONFIG_SCHEDSTATS=y +CONFIG_SCHED_ACPU=y +# end of Scheduler Debugging + +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +# CONFIG_SCF_TORTURE_TEST is not set +# CONFIG_CSD_LOCK_WAIT_DEBUG is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +# CONFIG_DEBUG_IRQFLAGS is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set + +# +# Debug kernel data structures +# CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PLIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_MAPLE_TREE is not set +# end of Debug kernel data structures + +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_RCU_SCALE_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_REF_SCALE_TEST is not set CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 +# CONFIG_RCU_CPU_STALL_CPUTIME is not set # CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_DEBUG_CGROUP_REF is not set +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_NOP_TRACER=y +CONFIG_HAVE_RETHOOK=y +CONFIG_RETHOOK=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_BOOTTIME_TRACING is not set +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_FUNCTION_GRAPH_RETVAL is not set +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y +# CONFIG_FPROBE is not set +# CONFIG_FUNCTION_PROFILER is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_HWLAT_TRACER is not set +# CONFIG_OSNOISE_TRACER is not set +# CONFIG_TIMERLAT_TRACER is not set +CONFIG_FTRACE_SYSCALLS=y +# CONFIG_TRACER_SNAPSHOT is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_PROBE_EVENTS_BTF_ARGS=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_DYNAMIC_EVENTS=y +CONFIG_PROBE_EVENTS=y +CONFIG_FTRACE_MCOUNT_RECORD=y +CONFIG_FTRACE_MCOUNT_USE_CC=y +# CONFIG_SYNTH_EVENTS is not set +# CONFIG_USER_EVENTS is not set +# CONFIG_TRACE_EVENT_INJECT is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_FTRACE_RECORD_RECURSION is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_KPROBE_EVENT_GEN_TEST is not set +# CONFIG_RV is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y # CONFIG_STRICT_DEVMEM is not set + +# +# loongarch Debugging +# +# CONFIG_UNWINDER_GUESS is not set +CONFIG_UNWINDER_PROLOGUE=y +# end of loongarch Debugging + +# +# Kernel Testing and Coverage +# +# CONFIG_KUNIT is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +# CONFIG_FUNCTION_ERROR_INJECTION is not set +# CONFIG_FAULT_INJECTION is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set # CONFIG_RUNTIME_TESTING_MENU is not set +# end of Kernel Testing and Coverage + +# +# Rust hacking +# +# end of Rust hacking +# end of Kernel hacking diff --git a/arch/loongarch/configs/anolis_defconfig b/arch/loongarch/configs/anolis_defconfig index db41cbf5efd4..365f27c124b4 100644 --- a/arch/loongarch/configs/anolis_defconfig +++ b/arch/loongarch/configs/anolis_defconfig @@ -1,147 +1,904 @@ # -## Automatically generated file; DO NOT EDIT. -## Linux/loongarch 6.6.7 Kernel Configuration -## +# Automatically generated file; DO NOT EDIT. +# Linux/loongarch 6.6.7 Kernel Configuration +# +CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=200000 +CONFIG_CLANG_VERSION=0 +CONFIG_AS_IS_GNU=y +CONFIG_AS_VERSION=25000 +CONFIG_LD_IS_BFD=y +CONFIG_LD_VERSION=25000 +CONFIG_LLD_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_CAN_LINK_STATIC=y +CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y +CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y +CONFIG_TOOLS_SUPPORT_RELR=y +CONFIG_CC_HAS_ASM_INLINE=y +CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y +CONFIG_PAHOLE_VERSION=117 +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_TABLE_SORT=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +# CONFIG_WERROR is not set +CONFIG_LOCALVERSION="" # CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_HAVE_KERNEL_ZSTD=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +# CONFIG_KERNEL_LZ4 is not set +# CONFIG_KERNEL_ZSTD is not set +CONFIG_DEFAULT_INIT="" +CONFIG_DEFAULT_HOSTNAME="(none)" CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +# CONFIG_WATCH_QUEUE is not set +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_IRQ_INJECTION=y +CONFIG_GENERIC_IRQ_CHIP=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +# end of IRQ subsystem + +CONFIG_GENERIC_IRQ_MULTI_HANDLER=y +CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CMOS_UPDATE=y +CONFIG_CONTEXT_TRACKING=y +CONFIG_CONTEXT_TRACKING_IDLE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +CONFIG_NO_HZ_IDLE=y +# CONFIG_NO_HZ_FULL is not set CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y +# end of Timers subsystem + +CONFIG_BPF=y +CONFIG_HAVE_EBPF_JIT=y + +# +# BPF subsystem +# CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +# CONFIG_BPF_JIT_ALWAYS_ON is not set # CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set +CONFIG_USERMODE_DRIVER=y +# CONFIG_BPF_PRELOAD is not set +# CONFIG_BPF_LSM is not set +# end of BPF subsystem + +CONFIG_PREEMPT_VOLUNTARY_BUILD=y +# CONFIG_PREEMPT_NONE is not set CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set +# CONFIG_SCHED_CORE is not set + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_SCHED_AVG_IRQ=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_TASKSTATS=y CONFIG_TASK_DELAY_ACCT=y CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +# CONFIG_PSI_DEFAULT_DISABLED is not set +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_TREE_SRCU=y +CONFIG_TASKS_RCU_GENERIC=y +CONFIG_TASKS_RUDE_RCU=y +CONFIG_TASKS_TRACE_RCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +# end of RCU Subsystem + +# CONFIG_IKCONFIG is not set +# CONFIG_IKHEADERS is not set CONFIG_LOG_BUF_SHIFT=18 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +# CONFIG_PRINTK_INDEX is not set +CONFIG_GENERIC_SCHED_CLOCK=y + +# +# Scheduler features +# +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_CC_HAS_INT128=y +CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" +CONFIG_GCC11_NO_ARRAY_BOUNDS=y +CONFIG_CC_NO_ARRAY_BOUNDS=y CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +# CONFIG_CGROUP_FAVOR_DYNMODS is not set CONFIG_MEMCG=y +CONFIG_MEMCG_KMEM=y CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y CONFIG_CFS_BANDWIDTH=y CONFIG_RT_GROUP_SCHED=y +CONFIG_SCHED_MM_CID=y CONFIG_CGROUP_PIDS=y CONFIG_CGROUP_RDMA=y CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_HUGETLB=y CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y CONFIG_CGROUP_DEVICE=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_MISC is not set +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_TIME_NS=y +CONFIG_IPC_NS=y CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y CONFIG_CHECKPOINT_RESTORE=y CONFIG_SCHED_AUTOGROUP=y CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_RD_ZSTD=y +# CONFIG_BOOT_CONFIG is not set +CONFIG_INITRAMFS_PRESERVE_MTIME=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_LD_ORPHAN_WARN=y +CONFIG_LD_ORPHAN_WARN_LEVEL="warn" +CONFIG_SYSCTL=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_SYSCTL_ARCH_UNALIGN_NO_WARN=y +CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW=y CONFIG_EXPERT=y +CONFIG_MULTIUSER=y +# CONFIG_SGETMASK_SYSCALL is not set +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_SELFTEST is not set CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_KCMP=y +CONFIG_RSEQ=y +CONFIG_CACHESTAT_SYSCALL=y +# CONFIG_DEBUG_RSEQ is not set +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y +# CONFIG_PC104 is not set + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# end of Kernel Performance Events And Counters + +CONFIG_SYSTEM_DATA_VERIFICATION=y CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y + +# +# Kexec and crash features +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y CONFIG_KEXEC=y CONFIG_CRASH_DUMP=y +# end of Kexec and crash features +# end of General setup + +CONFIG_LOONGARCH=y +CONFIG_64BIT=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_L1_CACHE_SHIFT=6 +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_MACH_LOONGSON64=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_PAGE_SIZE_16KB=y +CONFIG_PGTABLE_3LEVEL=y +CONFIG_PGTABLE_LEVELS=3 +CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_AS_HAS_EXPLICIT_RELOCS=y +CONFIG_AS_HAS_FCSR_CLASS=y +CONFIG_AS_HAS_LSX_EXTENSION=y +CONFIG_AS_HAS_LASX_EXTENSION=y +CONFIG_AS_HAS_LBT_EXTENSION=y +CONFIG_AS_HAS_LVZ_EXTENSION=y + +# +# Kernel type and options +# +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +# CONFIG_4KB_3LEVEL is not set +# CONFIG_4KB_4LEVEL is not set +# CONFIG_16KB_2LEVEL is not set +CONFIG_16KB_3LEVEL=y +# CONFIG_64KB_2LEVEL is not set +# CONFIG_64KB_3LEVEL is not set +CONFIG_CMDLINE="" +CONFIG_CMDLINE_BOOTLOADER=y +# CONFIG_CMDLINE_EXTEND is not set +# CONFIG_CMDLINE_FORCE is not set +CONFIG_DMI=y +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_SCHED_SMT=y +CONFIG_SMP=y +CONFIG_HOTPLUG_CPU=y CONFIG_NR_CPUS=256 CONFIG_NUMA=y +CONFIG_NODES_SHIFT=6 +CONFIG_ARCH_FORCE_MAX_ORDER=11 +CONFIG_ARCH_IOREMAP=y +CONFIG_ARCH_WRITECOMBINE=y +CONFIG_ARCH_STRICT_ALIGN=y +CONFIG_CPU_HAS_FPU=y CONFIG_CPU_HAS_LSX=y CONFIG_CPU_HAS_LASX=y +CONFIG_CPU_HAS_LBT=y +CONFIG_CPU_HAS_PREFETCH=y +CONFIG_ARCH_SUPPORTS_KEXEC=y +CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y +CONFIG_ARCH_SELECTS_CRASH_DUMP=y +CONFIG_RELOCATABLE=y CONFIG_RANDOMIZE_BASE=y +CONFIG_RANDOMIZE_BASE_MAX_OFFSET=0x01000000 +CONFIG_SECCOMP=y +# end of Kernel type and options + +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y +CONFIG_ARCH_MEMORY_PROBE=y +CONFIG_MMU=y +CONFIG_ARCH_MMAP_RND_BITS_MIN=12 +CONFIG_ARCH_MMAP_RND_BITS_MAX=18 +CONFIG_ARCH_SUPPORTS_UPROBES=y + +# +# Power management options +# +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y + +# +# CPU Frequency scaling +# CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y +# CONFIG_CPU_FREQ_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set + +# +# CPU frequency scaling drivers +# +# CONFIG_CPUFREQ_DT is not set +# CONFIG_CPUFREQ_DT_PLATDEV is not set CONFIG_LOONGSON3_ACPI_CPUFREQ=y +# end of CPU Frequency scaling + +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +# CONFIG_SUSPEND_SKIP_SYNC is not set +CONFIG_HIBERNATE_CALLBACKS=y CONFIG_HIBERNATION=y +CONFIG_HIBERNATION_SNAPSHOT_DEV=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_USERSPACE_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_CLK=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_CPU_PM=y +# CONFIG_ENERGY_MODEL is not set +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_GENERIC_GSI=y +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y +# CONFIG_ACPI_DEBUGGER is not set CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_SLEEP=y +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_VIDEO=y +CONFIG_ACPI_FAN=y CONFIG_ACPI_TAD=y CONFIG_ACPI_DOCK=y +CONFIG_ACPI_CPU_FREQ_PSS=y +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_MCFG=y +CONFIG_ACPI_PROCESSOR=y CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_THERMAL=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y CONFIG_ACPI_HOTPLUG_MEMORY=y +# CONFIG_ACPI_HED is not set +# CONFIG_ACPI_CUSTOM_METHOD is not set +# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set +CONFIG_ACPI_NUMA=y +# CONFIG_ACPI_HMAT is not set +CONFIG_ACPI_WATCHDOG=y +# CONFIG_ACPI_CONFIGFS is not set +# CONFIG_ACPI_PFRUT is not set +CONFIG_ACPI_PPTT=y +# CONFIG_ACPI_FFH is not set +# CONFIG_PMIC_OPREGION is not set +# end of Power management options + +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_DIRTY_RING=y +CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL=y +CONFIG_KVM_XFER_TO_GUEST_WORK=y +CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y CONFIG_VIRTUALIZATION=y -CONFIG_KVM=m +CONFIG_KVM=y + +# +# General architecture-dependent options +# +CONFIG_GENERIC_ENTRY=y +CONFIG_KPROBES=y CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +CONFIG_KPROBES_ON_FTRACE=y +CONFIG_UPROBES=y +CONFIG_HAVE_64BIT_ALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_KRETPROBES=y +CONFIG_KRETPROBE_ON_RETHOOK=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_KPROBES_ON_FTRACE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_CPU_FINALIZE_INIT=y +CONFIG_ARCH_WANTS_NO_INSTR=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_MMU_GATHER_MERGE_VMAS=y +CONFIG_MMU_LAZY_TLB_REFCOUNT=y +CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS=y +CONFIG_HAVE_ARCH_SECCOMP=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +# CONFIG_SECCOMP_CACHE_DEBUG is not set +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_ARCH_SUPPORTS_LTO_CLANG=y +CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y +CONFIG_LTO_NONE=y +CONFIG_HAVE_CONTEXT_TRACKING_USER=y +CONFIG_HAVE_TIF_NOHZ=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_ARCH_WANT_PMD_MKWRITE=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_HAVE_EXIT_THREAD=y +CONFIG_ARCH_MMAP_RND_BITS=12 +CONFIG_PAGE_SIZE_LESS_THAN_64KB=y +CONFIG_PAGE_SIZE_LESS_THAN_256KB=y +CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT=y +# CONFIG_COMPAT_32BIT_TIME is not set +CONFIG_ARCH_HAS_PHYS_TO_DMA=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y +# CONFIG_LOCK_EVENT_COUNTS is not set +CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +# end of GCOV-based kernel profiling + +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_GCC_PLUGINS=y +# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set +CONFIG_FUNCTION_ALIGNMENT=0 +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULE_SIG_FORMAT=y CONFIG_MODULES=y +# CONFIG_MODULE_DEBUG is not set CONFIG_MODULE_FORCE_LOAD=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set CONFIG_MODVERSIONS=y +CONFIG_ASM_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +CONFIG_MODULE_SIG_ALL=y +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set CONFIG_MODULE_SIG_SHA256=y +# CONFIG_MODULE_SIG_SHA384 is not set +# CONFIG_MODULE_SIG_SHA512 is not set +CONFIG_MODULE_SIG_HASH="sha256" +CONFIG_MODULE_COMPRESS_NONE=y +# CONFIG_MODULE_COMPRESS_GZIP is not set +# CONFIG_MODULE_COMPRESS_XZ is not set +# CONFIG_MODULE_COMPRESS_ZSTD is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +CONFIG_MODPROBE_PATH="/sbin/modprobe" +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLOCK_LEGACY_AUTOLOAD=y +CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_CGROUP_PUNT_BIO=y +CONFIG_BLK_DEV_BSG_COMMON=y +CONFIG_BLK_ICQ=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_INTEGRITY_T10=m CONFIG_BLK_DEV_ZONED=y CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set CONFIG_BLK_WBT=y +CONFIG_BLK_WBT_MQ=y +# CONFIG_BLK_CGROUP_IOLATENCY is not set +# CONFIG_BLK_CGROUP_FC_APPID is not set +# CONFIG_BLK_CGROUP_IOCOST is not set +# CONFIG_BLK_CGROUP_IOPRIO is not set +CONFIG_BLK_DEBUG_FS=y +CONFIG_BLK_DEBUG_FS_ZONED=y +# CONFIG_BLK_SED_OPAL is not set +# CONFIG_BLK_INLINE_ENCRYPTION is not set + +# +# Partition Types +# CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y CONFIG_BSD_DISKLABEL=y +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set CONFIG_UNIXWARE_DISKLABEL=y +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +# end of Partition Types + +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_PM=y +CONFIG_BLOCK_HOLDER_DEPRECATED=y +CONFIG_BLK_MQ_STACKING=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +# CONFIG_BFQ_CGROUP_DEBUG is not set +# end of IO Schedulers + +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK=y +CONFIG_ARCH_INLINE_SPIN_LOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_READ_LOCK=y +CONFIG_ARCH_INLINE_READ_LOCK_BH=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_READ_UNLOCK=y +CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_WRITE_LOCK=y +CONFIG_ARCH_INLINE_WRITE_LOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_SPIN_TRYLOCK=y +CONFIG_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_INLINE_SPIN_LOCK=y +CONFIG_INLINE_SPIN_LOCK_BH=y +CONFIG_INLINE_SPIN_LOCK_IRQ=y +CONFIG_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_INLINE_SPIN_UNLOCK_BH=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_READ_LOCK=y +CONFIG_INLINE_READ_LOCK_BH=y +CONFIG_INLINE_READ_LOCK_IRQ=y +CONFIG_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_BH=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_WRITE_LOCK=y +CONFIG_INLINE_WRITE_LOCK_BH=y +CONFIG_INLINE_WRITE_LOCK_IRQ=y +CONFIG_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_BH=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y +CONFIG_CK_KABI_RESERVE=y +CONFIG_CK_KABI_SIZE_ALIGN_CHECKS=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_ARCH_BINFMT_ELF_STATE=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_ZPOOL=y +CONFIG_SWAP=y CONFIG_ZSWAP=y +# CONFIG_ZSWAP_DEFAULT_ON is not set +# CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y +CONFIG_ZSWAP_COMPRESSOR_DEFAULT="zstd" +CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y +# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set +# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set +CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud" +CONFIG_ZBUD=y CONFIG_Z3FOLD=y +CONFIG_ZSMALLOC=y CONFIG_ZSMALLOC_STAT=y +CONFIG_ZSMALLOC_CHAIN_SIZE=8 + +# +# SLAB allocator options +# +# CONFIG_SLAB_DEPRECATED is not set +CONFIG_SLUB=y +# CONFIG_SLUB_TINY is not set +CONFIG_SLAB_MERGE_DEFAULT=y CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SLAB_FREELIST_HARDENED is not set +# CONFIG_SLUB_STATS is not set +CONFIG_SLUB_CPU_PARTIAL=y +# CONFIG_RANDOM_KMALLOC_CACHES is not set +# end of SLAB allocator options + # CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set # CONFIG_COMPAT_BRK is not set +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP=y +CONFIG_HAVE_FAST_GUP=y +CONFIG_ARCH_KEEP_MEMBLOCK=y +CONFIG_NUMA_KEEP_MEMINFO=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y CONFIG_MEMORY_HOTREMOVE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 +CONFIG_PAGE_REPORTING=y +CONFIG_MIGRATION=y +CONFIG_CONTIG_ALLOC=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_MMU_NOTIFIER=y CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +# CONFIG_READ_ONLY_THP_FOR_FS is not set +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +# CONFIG_CMA_SYSFS is not set +CONFIG_CMA_AREAS=19 +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +CONFIG_PAGE_IDLE_FLAG=y CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_ZONE_DMA32=y +CONFIG_HMM_MIRROR=y +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_TEST is not set +# CONFIG_DMAPOOL_TEST is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_MEMFD_CREATE=y +# CONFIG_ANON_VMA_NAME is not set CONFIG_USERFAULTFD=y +# CONFIG_LRU_GEN is not set +CONFIG_LOCK_MM_AND_FIND_VMA=y + +# +# Data Access Monitoring +# +# CONFIG_DAMON is not set +# end of Data Access Monitoring +# end of Memory Management options + CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y +CONFIG_NET_XGRESS=y +CONFIG_NET_REDIRECT=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# CONFIG_PACKET=y CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_AF_UNIX_OOB=y CONFIG_UNIX_DIAG=m CONFIG_TLS=m CONFIG_TLS_DEVICE=y CONFIG_TLS_TOE=y +CONFIG_XFRM=y +CONFIG_XFRM_OFFLOAD=y +CONFIG_XFRM_ALGO=y CONFIG_XFRM_USER=y CONFIG_XFRM_INTERFACE=m CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_AH=m +CONFIG_XFRM_ESP=m +CONFIG_XFRM_IPCOMP=m CONFIG_NET_KEY=m CONFIG_NET_KEY_MIGRATE=y +CONFIG_XFRM_ESPINTCP=y CONFIG_SMC=m CONFIG_SMC_DIAG=m CONFIG_XDP_SOCKETS=y CONFIG_XDP_SOCKETS_DIAG=m +CONFIG_NET_HANDSHAKE=y +CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_FIB_TRIE_STATS=y CONFIG_IP_MULTIPLE_TABLES=y CONFIG_IP_ROUTE_MULTIPATH=y CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m CONFIG_NET_IPGRE=m CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y CONFIG_IP_MROUTE=y CONFIG_IP_MROUTE_MULTIPLE_TABLES=y CONFIG_IP_PIMSM_V1=y CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +CONFIG_NET_FOU=m CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_ESP_OFFLOAD=m CONFIG_INET_ESPINTCP=y CONFIG_INET_IPCOMP=m +CONFIG_INET_TABLE_PERTURB_ORDER=16 +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m CONFIG_INET_UDP_DIAG=m CONFIG_INET_RAW_DIAG=m CONFIG_INET_DIAG_DESTROY=y CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m CONFIG_TCP_CONG_CUBIC=m +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m CONFIG_TCP_CONG_HSTCP=m CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m CONFIG_TCP_CONG_NV=m CONFIG_TCP_CONG_SCALABLE=m CONFIG_TCP_CONG_LP=m @@ -151,6 +908,8 @@ CONFIG_TCP_CONG_ILLINOIS=m CONFIG_TCP_CONG_DCTCP=m CONFIG_TCP_CONG_CDG=m CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_RENO=y +CONFIG_DEFAULT_TCP_CONG="reno" CONFIG_TCP_MD5SIG=y CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y @@ -163,9 +922,17 @@ CONFIG_INET6_ESPINTCP=y CONFIG_INET6_IPCOMP=m CONFIG_IPV6_MIP6=m CONFIG_IPV6_ILA=m +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m CONFIG_IPV6_GRE=m +CONFIG_IPV6_FOU=m +CONFIG_IPV6_FOU_TUNNEL=m +CONFIG_IPV6_MULTIPLE_TABLES=y CONFIG_IPV6_SUBTREES=y CONFIG_IPV6_MROUTE=y CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y @@ -173,21 +940,53 @@ CONFIG_IPV6_PIMSM_V2=y CONFIG_IPV6_SEG6_LWTUNNEL=y CONFIG_IPV6_SEG6_HMAC=y CONFIG_IPV6_RPL_LWTUNNEL=y +# CONFIG_IPV6_IOAM6_LWTUNNEL is not set CONFIG_NETLABEL=y CONFIG_MPTCP=y +CONFIG_INET_MPTCP_DIAG=m +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y CONFIG_NETWORK_PHY_TIMESTAMPING=y CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_EGRESS=y +CONFIG_NETFILTER_SKIP_EGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_BPF_LINK=y +# CONFIG_NETFILTER_NETLINK_HOOK is not set +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_SYSLOG=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y CONFIG_NF_CONNTRACK_SECMARK=y CONFIG_NF_CONNTRACK_ZONES=y +# CONFIG_NF_CONNTRACK_PROCFS is not set CONFIG_NF_CONNTRACK_EVENTS=y CONFIG_NF_CONNTRACK_TIMEOUT=y CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CONNTRACK_OVS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m CONFIG_NF_CONNTRACK_NETBIOS_NS=m CONFIG_NF_CONNTRACK_SNMP=m CONFIG_NF_CONNTRACK_PPTP=m @@ -198,6 +997,16 @@ CONFIG_NF_CT_NETLINK=m CONFIG_NF_CT_NETLINK_TIMEOUT=m CONFIG_NF_CT_NETLINK_HELPER=m CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NF_NAT_OVS=y +CONFIG_NETFILTER_SYNPROXY=m CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=y CONFIG_NF_TABLES_NETDEV=y @@ -214,41 +1023,67 @@ CONFIG_NFT_TUNNEL=m CONFIG_NFT_QUEUE=m CONFIG_NFT_QUOTA=m CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m CONFIG_NFT_COMPAT=m CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m CONFIG_NFT_FIB_INET=m CONFIG_NFT_XFRM=m CONFIG_NFT_SOCKET=m CONFIG_NFT_OSF=m CONFIG_NFT_TPROXY=m CONFIG_NFT_SYNPROXY=m +CONFIG_NF_DUP_NETDEV=m CONFIG_NFT_DUP_NETDEV=m CONFIG_NFT_FWD_NETDEV=m CONFIG_NFT_FIB_NETDEV=m +# CONFIG_NFT_REJECT_NETDEV is not set CONFIG_NF_FLOW_TABLE_INET=m CONFIG_NF_FLOW_TABLE=m +# CONFIG_NF_FLOW_TABLE_PROCFS is not set CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# CONFIG_NETFILTER_XT_TARGET_AUDIT=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_CONNMARK=m CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m CONFIG_NETFILTER_XT_TARGET_HMARK=m CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m CONFIG_NETFILTER_XT_TARGET_LED=m CONFIG_NETFILTER_XT_TARGET_LOG=m CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m CONFIG_NETFILTER_XT_TARGET_TEE=m CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_SECMARK=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m CONFIG_NETFILTER_XT_MATCH_BPF=m CONFIG_NETFILTER_XT_MATCH_CGROUP=m @@ -260,11 +1095,14 @@ CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m CONFIG_NETFILTER_XT_MATCH_CONNMARK=m CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m CONFIG_NETFILTER_XT_MATCH_ESP=m CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m CONFIG_NETFILTER_XT_MATCH_IPCOMP=m CONFIG_NETFILTER_XT_MATCH_IPRANGE=m CONFIG_NETFILTER_XT_MATCH_IPVS=m @@ -284,6 +1122,7 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m @@ -291,7 +1130,10 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_TIME=m CONFIG_NETFILTER_XT_MATCH_U32=m +# end of Core Netfilter Configuration + CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 CONFIG_IP_SET_BITMAP_IP=m CONFIG_IP_SET_BITMAP_IPMAC=m CONFIG_IP_SET_BITMAP_PORT=m @@ -311,11 +1153,21 @@ CONFIG_IP_SET_LIST_SET=m CONFIG_IP_VS=m CONFIG_IP_VS_IPV6=y CONFIG_IP_VS_DEBUG=y +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# CONFIG_IP_VS_PROTO_TCP=y CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y CONFIG_IP_VS_PROTO_ESP=y CONFIG_IP_VS_PROTO_AH=y CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# CONFIG_IP_VS_RR=m CONFIG_IP_VS_WRR=m CONFIG_IP_VS_LC=m @@ -329,13 +1181,43 @@ CONFIG_IP_VS_SH=m CONFIG_IP_VS_MH=m CONFIG_IP_VS_SED=m CONFIG_IP_VS_NQ=m +# CONFIG_IP_VS_TWOS is not set + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_REJECT_IPV4=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m CONFIG_NF_TABLES_ARP=y +CONFIG_NF_DUP_IPV4=m CONFIG_NF_LOG_ARP=m CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -356,8 +1238,20 @@ CONFIG_IP_NF_SECURITY=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_REJECT_IPV6=m CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -379,6 +1273,9 @@ CONFIG_IP6_NF_SECURITY=m CONFIG_IP6_NF_NAT=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=m CONFIG_NF_TABLES_BRIDGE=m CONFIG_NFT_BRIDGE_META=m CONFIG_NFT_BRIDGE_REJECT=m @@ -405,19 +1302,43 @@ CONFIG_BRIDGE_EBT_SNAT=m CONFIG_BRIDGE_EBT_LOG=m CONFIG_BRIDGE_EBT_NFLOG=m CONFIG_BPFILTER=y +CONFIG_BPFILTER_UMH=m CONFIG_IP_DCCP=m +CONFIG_INET_DCCP_DIAG=m + +# +# DCCP CCIDs Configuration +# CONFIG_IP_DCCP_CCID2_DEBUG=y +CONFIG_IP_DCCP_CCID3=y CONFIG_IP_DCCP_CCID3_DEBUG=y +CONFIG_IP_DCCP_TFRC_LIB=y +CONFIG_IP_DCCP_TFRC_DEBUG=y +# end of DCCP CCIDs Configuration + +# +# DCCP Kernel Hacking +# CONFIG_IP_DCCP_DEBUG=y +# end of DCCP Kernel Hacking + +CONFIG_IP_SCTP=m CONFIG_SCTP_DBG_OBJCNT=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_INET_SCTP_DIAG=m CONFIG_RDS=m CONFIG_RDS_RDMA=m CONFIG_RDS_TCP=m CONFIG_RDS_DEBUG=y CONFIG_TIPC=m CONFIG_TIPC_MEDIA_IB=y +CONFIG_TIPC_MEDIA_UDP=y +CONFIG_TIPC_CRYPTO=y +CONFIG_TIPC_DIAG=m CONFIG_ATM=m CONFIG_ATM_CLIP=m CONFIG_ATM_CLIP_NO_ICMP=y @@ -426,30 +1347,46 @@ CONFIG_ATM_MPOA=m CONFIG_ATM_BR2684=m CONFIG_ATM_BR2684_IPFILTER=y CONFIG_L2TP=m +# CONFIG_L2TP_DEBUGFS is not set CONFIG_L2TP_V3=y CONFIG_L2TP_IP=m CONFIG_L2TP_ETH=m +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y CONFIG_BRIDGE_VLAN_FILTERING=y CONFIG_BRIDGE_MRP=y +# CONFIG_BRIDGE_CFM is not set CONFIG_NET_DSA=m +# CONFIG_NET_DSA_TAG_NONE is not set CONFIG_NET_DSA_TAG_AR9331=m +CONFIG_NET_DSA_TAG_BRCM_COMMON=m CONFIG_NET_DSA_TAG_BRCM=m +# CONFIG_NET_DSA_TAG_BRCM_LEGACY is not set CONFIG_NET_DSA_TAG_BRCM_PREPEND=m +# CONFIG_NET_DSA_TAG_HELLCREEK is not set CONFIG_NET_DSA_TAG_GSWIP=m +CONFIG_NET_DSA_TAG_DSA_COMMON=m CONFIG_NET_DSA_TAG_DSA=m CONFIG_NET_DSA_TAG_EDSA=m CONFIG_NET_DSA_TAG_MTK=m CONFIG_NET_DSA_TAG_KSZ=m CONFIG_NET_DSA_TAG_OCELOT=m +# CONFIG_NET_DSA_TAG_OCELOT_8021Q is not set CONFIG_NET_DSA_TAG_QCA=m CONFIG_NET_DSA_TAG_RTL4_A=m +# CONFIG_NET_DSA_TAG_RTL8_4 is not set +# CONFIG_NET_DSA_TAG_RZN1_A5PSW is not set CONFIG_NET_DSA_TAG_LAN9303=m CONFIG_NET_DSA_TAG_SJA1105=m CONFIG_NET_DSA_TAG_TRAILER=m +# CONFIG_NET_DSA_TAG_XRS700X is not set CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q_GVRP=y CONFIG_VLAN_8021Q_MVRP=y +CONFIG_LLC=m CONFIG_LLC2=m CONFIG_ATALK=m CONFIG_DEV_APPLETALK=m @@ -459,12 +1396,18 @@ CONFIG_X25=m CONFIG_LAPB=m CONFIG_PHONET=m CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_DEBUGFS is not set # CONFIG_6LOWPAN_NHC is not set CONFIG_IEEE802154=m CONFIG_IEEE802154_NL802154_EXPERIMENTAL=y +CONFIG_IEEE802154_SOCKET=m CONFIG_IEEE802154_6LOWPAN=m CONFIG_MAC802154=m CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# CONFIG_NET_SCH_HTB=m CONFIG_NET_SCH_HFSC=m CONFIG_NET_SCH_PRIO=m @@ -476,6 +1419,7 @@ CONFIG_NET_SCH_TEQL=m CONFIG_NET_SCH_TBF=m CONFIG_NET_SCH_CBS=m CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_MQPRIO_LIB=m CONFIG_NET_SCH_TAPRIO=m CONFIG_NET_SCH_GRED=m CONFIG_NET_SCH_NETEM=m @@ -495,7 +1439,18 @@ CONFIG_NET_SCH_INGRESS=m CONFIG_NET_SCH_PLUG=m CONFIG_NET_SCH_ETS=m CONFIG_NET_SCH_DEFAULT=y +# CONFIG_DEFAULT_FQ is not set +# CONFIG_DEFAULT_CODEL is not set CONFIG_DEFAULT_FQ_CODEL=y +# CONFIG_DEFAULT_FQ_PIE is not set +# CONFIG_DEFAULT_SFQ is not set +# CONFIG_DEFAULT_PFIFO_FAST is not set +CONFIG_DEFAULT_NET_SCH="fq_codel" + +# +# Classification +# +CONFIG_NET_CLS=y CONFIG_NET_CLS_BASIC=m CONFIG_NET_CLS_ROUTE4=m CONFIG_NET_CLS_FW=m @@ -508,11 +1463,13 @@ CONFIG_NET_CLS_BPF=m CONFIG_NET_CLS_FLOWER=m CONFIG_NET_CLS_MATCHALL=m CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 CONFIG_NET_EMATCH_CMP=m CONFIG_NET_EMATCH_NBYTE=m CONFIG_NET_EMATCH_U32=m CONFIG_NET_EMATCH_META=m CONFIG_NET_EMATCH_TEXT=m +# CONFIG_NET_EMATCH_CANID is not set CONFIG_NET_EMATCH_IPSET=m CONFIG_NET_EMATCH_IPT=m CONFIG_NET_CLS_ACT=y @@ -541,29 +1498,70 @@ CONFIG_NET_IFE_SKBMARK=m CONFIG_NET_IFE_SKBPRIO=m CONFIG_NET_IFE_SKBTCINDEX=m CONFIG_NET_TC_SKB_EXT=y +CONFIG_NET_SCH_FIFO=y CONFIG_DCB=y CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m +CONFIG_BATMAN_ADV_BATMAN_V=y +CONFIG_BATMAN_ADV_BLA=y +CONFIG_BATMAN_ADV_DAT=y CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_MCAST=y CONFIG_BATMAN_ADV_DEBUG=y +# CONFIG_BATMAN_ADV_TRACING is not set CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VSOCKETS_LOOPBACK=m CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=y CONFIG_MPLS_ROUTING=m CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_NSH=y CONFIG_HSR=m +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_L3_MASTER_DEV=y CONFIG_QRTR=m CONFIG_QRTR_TUN=m CONFIG_NET_NCSI=y CONFIG_NCSI_OEM_CMD_GET_MAC=y +# CONFIG_NCSI_OEM_CMD_KEEP_PHY is not set +CONFIG_PCPU_DEV_REFCNT=y +CONFIG_MAX_SKB_FRAGS=17 +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_SOCK_RX_QUEUE_MAPPING=y +CONFIG_XPS=y CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# CONFIG_NET_PKTGEN=m +# CONFIG_NET_DROP_MONITOR is not set +# end of Network testing +# end of Networking options + +# CONFIG_HAMRADIO is not set CONFIG_CAN=m +CONFIG_CAN_RAW=m +CONFIG_CAN_BCM=m +CONFIG_CAN_GW=m +# CONFIG_CAN_J1939 is not set +# CONFIG_CAN_ISOTP is not set CONFIG_BT=m +CONFIG_BT_BREDR=y CONFIG_BT_RFCOMM=m CONFIG_BT_RFCOMM_TTY=y CONFIG_BT_BNEP=m @@ -572,95 +1570,514 @@ CONFIG_BT_BNEP_PROTO_FILTER=y CONFIG_BT_CMTP=m CONFIG_BT_HIDP=m CONFIG_BT_HS=y +CONFIG_BT_LE=y +CONFIG_BT_LE_L2CAP_ECRED=y +# CONFIG_BT_6LOWPAN is not set +# CONFIG_BT_LEDS is not set +# CONFIG_BT_MSFTEXT is not set +# CONFIG_BT_AOSPEXT is not set +CONFIG_BT_DEBUGFS=y +# CONFIG_BT_SELFTEST is not set + +# +# Bluetooth device drivers +# +CONFIG_BT_INTEL=m +CONFIG_BT_RTL=m CONFIG_BT_HCIBTUSB=m CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y +CONFIG_BT_HCIBTUSB_POLL_SYNC=y # CONFIG_BT_HCIBTUSB_BCM is not set +# CONFIG_BT_HCIBTUSB_MTK is not set +CONFIG_BT_HCIBTUSB_RTL=y CONFIG_BT_HCIBTSDIO=m CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_H4=y CONFIG_BT_HCIUART_BCSP=y CONFIG_BT_HCIUART_ATH3K=y +# CONFIG_BT_HCIUART_INTEL is not set +# CONFIG_BT_HCIUART_AG6XX is not set CONFIG_BT_HCIBCM203X=m +# CONFIG_BT_HCIBCM4377 is not set CONFIG_BT_HCIBPA10X=m CONFIG_BT_HCIBFUSB=m CONFIG_BT_HCIVHCI=m CONFIG_BT_MRVL=m CONFIG_BT_MRVL_SDIO=m CONFIG_BT_ATH3K=m +# CONFIG_BT_MTKSDIO is not set +# CONFIG_BT_VIRTIO is not set +# end of Bluetooth device drivers + +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=y +# CONFIG_MCTP is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +# CONFIG_CFG80211_CERTIFICATION_ONUS is not set +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +CONFIG_CFG80211_CRDA_SUPPORT=y CONFIG_CFG80211_WEXT=y CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +# CONFIG_MAC80211_DEBUGFS is not set +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y CONFIG_RFKILL_INPUT=y +# CONFIG_RFKILL_GPIO is not set CONFIG_NET_9P=y +CONFIG_NET_9P_FD=y CONFIG_NET_9P_VIRTIO=y +# CONFIG_NET_9P_RDMA is not set +# CONFIG_NET_9P_DEBUG is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +# CONFIG_NFC is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_SOCK_VALIDATE_XMIT=y +CONFIG_NET_SELFTESTS=y +CONFIG_NET_SOCK_MSG=y +CONFIG_NET_DEVLINK=y +CONFIG_PAGE_POOL=y +# CONFIG_PAGE_POOL_STATS is not set +CONFIG_FAILOVER=m +CONFIG_ETHTOOL_NETLINK=y + +# +# Device Drivers +# +CONFIG_HAVE_PCI=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DOMAINS_GENERIC=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y CONFIG_PCIEAER=y CONFIG_PCIEAER_INJECT=m CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y CONFIG_PCIE_DPC=y +# CONFIG_PCIE_PTM is not set +# CONFIG_PCIE_EDR is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_ARCH_FALLBACKS=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set CONFIG_PCI_STUB=y CONFIG_PCI_PF_STUB=m +CONFIG_PCI_ATS=y +CONFIG_PCI_ECAM=y CONFIG_PCI_IOV=y +# CONFIG_PCI_PRI is not set +# CONFIG_PCI_PASID is not set +CONFIG_PCI_LABEL=y +# CONFIG_PCI_DYNAMIC_OF_NODES is not set +# CONFIG_PCIE_BUS_TUNE_OFF is not set +CONFIG_PCIE_BUS_DEFAULT=y +# CONFIG_PCIE_BUS_SAFE is not set +# CONFIG_PCIE_BUS_PERFORMANCE is not set +# CONFIG_PCIE_BUS_PEER2PEER is not set +CONFIG_VGA_ARB=y CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_HOTPLUG_PCI=y CONFIG_HOTPLUG_PCI_ACPI=y +# CONFIG_HOTPLUG_PCI_ACPI_IBM is not set +# CONFIG_HOTPLUG_PCI_CPCI is not set CONFIG_HOTPLUG_PCI_SHPC=y + +# +# PCI controller drivers +# +# CONFIG_PCI_FTPCI100 is not set +# CONFIG_PCI_HOST_GENERIC is not set +CONFIG_PCI_LOONGSON=y +# CONFIG_PCIE_MICROCHIP_HOST is not set +# CONFIG_PCIE_XILINX is not set + +# +# Cadence-based PCIe controllers +# +# CONFIG_PCIE_CADENCE_PLAT_HOST is not set +# CONFIG_PCI_J721E_HOST is not set +# end of Cadence-based PCIe controllers + +# +# DesignWare-based PCIe controllers +# +# CONFIG_PCI_MESON is not set +# CONFIG_PCIE_DW_PLAT_HOST is not set +# end of DesignWare-based PCIe controllers + +# +# Mobiveil-based PCIe controllers +# +# end of Mobiveil-based PCIe controllers +# end of PCI controller drivers + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# end of PCI switch controller drivers + +# CONFIG_CXL_BUS is not set CONFIG_PCCARD=m # CONFIG_PCMCIA is not set +CONFIG_CARDBUS=y + +# +# PC-card bridges +# CONFIG_YENTA=m +CONFIG_YENTA_O2=y +CONFIG_YENTA_RICOH=y +CONFIG_YENTA_TI=y +CONFIG_YENTA_ENE_TUNE=y +CONFIG_YENTA_TOSHIBA=y CONFIG_RAPIDIO=y CONFIG_RAPIDIO_TSI721=y +CONFIG_RAPIDIO_DISC_TIMEOUT=30 CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS=y +# CONFIG_RAPIDIO_DMA_ENGINE is not set +# CONFIG_RAPIDIO_DEBUG is not set CONFIG_RAPIDIO_ENUM_BASIC=m CONFIG_RAPIDIO_CHMAN=m CONFIG_RAPIDIO_MPORT_CDEV=m + +# +# RapidIO Switch drivers +# +# CONFIG_RAPIDIO_CPS_XX is not set +# CONFIG_RAPIDIO_CPS_GEN2 is not set +# CONFIG_RAPIDIO_RXS_GEN3 is not set +# end of RapidIO Switch drivers + +# +# Generic Driver Options +# +CONFIG_AUXILIARY_BUS=y CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="" CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_DEVTMPFS_SAFE is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_FW_LOADER_DEBUG=y +CONFIG_FW_LOADER_PAGED_BUF=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set CONFIG_FW_LOADER_COMPRESS=y +CONFIG_FW_LOADER_COMPRESS_XZ=y +# CONFIG_FW_LOADER_COMPRESS_ZSTD is not set +CONFIG_FW_CACHE=y +# CONFIG_FW_UPLOAD is not set +# end of Firmware loader + +CONFIG_WANT_DEV_COREDUMP=y +CONFIG_ALLOW_DEV_COREDUMP=y +CONFIG_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_SOC_BUS=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=m +CONFIG_REGMAP_SPI=m +CONFIG_REGMAP_MMIO=y +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set +# end of Generic Driver Options + +# +# Bus devices +# +# CONFIG_MOXTET is not set +# CONFIG_MHI_BUS is not set +# CONFIG_MHI_BUS_EP is not set +# end of Bus devices + +# +# Cache Drivers +# +# end of Cache Drivers + CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y + +# +# Firmware Drivers +# + +# +# ARM System Control and Management Interface Protocol +# +# end of ARM System Control and Management Interface Protocol + +# CONFIG_FIRMWARE_MEMMAP is not set +CONFIG_DMIID=y CONFIG_DMI_SYSFS=y +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y CONFIG_ISCSI_IBFT=m +CONFIG_SYSFB=y +# CONFIG_SYSFB_SIMPLEFB is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=m +# CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE is not set +CONFIG_EFI_RUNTIME_WRAPPERS=y +CONFIG_EFI_GENERIC_STUB=y CONFIG_EFI_ZBOOT=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set CONFIG_EFI_CAPSULE_LOADER=m CONFIG_EFI_TEST=m +# CONFIG_RESET_ATTACK_MITIGATION is not set +# CONFIG_EFI_DISABLE_PCI_DMA is not set +CONFIG_EFI_EARLYCON=y +CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y +# CONFIG_EFI_DISABLE_RUNTIME is not set +# CONFIG_EFI_COCO_SECRET is not set +# end of EFI (Extensible Firmware Interface) Support + +# +# Tegra firmware driver +# +# end of Tegra firmware driver +# end of Firmware Drivers + +# CONFIG_GNSS is not set CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set + +# +# Partition parsers +# +# CONFIG_MTD_AR7_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +CONFIG_MTD_OF_PARTS=m +# CONFIG_MTD_REDBOOT_PARTS is not set +# end of Partition parsers + +# +# User Modules And Translation Layers +# +CONFIG_MTD_BLKDEVS=m CONFIG_MTD_BLOCK=m +# CONFIG_MTD_BLOCK_RO is not set + +# +# Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK. +# +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# CONFIG_MTD_CFI=m CONFIG_MTD_JEDECPROBE=m +CONFIG_MTD_GEN_PROBE=m +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y CONFIG_MTD_CFI_INTELEXT=m CONFIG_MTD_CFI_AMDSTD=m CONFIG_MTD_CFI_STAA=m +CONFIG_MTD_CFI_UTIL=m CONFIG_MTD_RAM=m CONFIG_MTD_ROM=m +# CONFIG_MTD_ABSENT is not set +# end of RAM/ROM/Flash chip drivers + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PHYSMAP is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set +# end of Mapping drivers for chip access + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_MCHP48L640 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set CONFIG_MTD_BLOCK2MTD=m + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# end of Self-contained MTD device drivers + +# +# NAND +# +# CONFIG_MTD_ONENAND is not set +# CONFIG_MTD_RAW_NAND is not set +# CONFIG_MTD_SPI_NAND is not set + +# +# ECC engine support +# +# CONFIG_MTD_NAND_ECC_SW_HAMMING is not set +# CONFIG_MTD_NAND_ECC_SW_BCH is not set +# CONFIG_MTD_NAND_ECC_MXIC is not set +# end of ECC engine support +# end of NAND + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# end of LPDDR & LPDDR2 PCM memory drivers + CONFIG_MTD_SPI_NOR=m +CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y +# CONFIG_MTD_SPI_NOR_SWP_DISABLE is not set +CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE=y +# CONFIG_MTD_SPI_NOR_SWP_KEEP is not set CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_FASTMAP is not set CONFIG_MTD_UBI_GLUEBI=m CONFIG_MTD_UBI_BLOCK=y +# CONFIG_MTD_HYPERBUS is not set +CONFIG_DTC=y +CONFIG_OF=y +# CONFIG_OF_UNITTEST is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_KOBJ=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_IRQ=y +CONFIG_OF_RESERVED_MEM=y +# CONFIG_OF_OVERLAY is not set +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y CONFIG_PARPORT=m CONFIG_PARPORT_PC=m CONFIG_PARPORT_SERIAL=m CONFIG_PARPORT_PC_FIFO=y +# CONFIG_PARPORT_PC_SUPERIO is not set CONFIG_PARPORT_1284=y +CONFIG_PARPORT_NOT_PC=y +CONFIG_PNP=y # CONFIG_PNP_DEBUG_MESSAGES is not set + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_CDROM=m +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set CONFIG_ZRAM=m +# CONFIG_ZRAM_DEF_COMP_LZORLE is not set CONFIG_ZRAM_DEF_COMP_ZSTD=y +# CONFIG_ZRAM_DEF_COMP_LZ4 is not set +# CONFIG_ZRAM_DEF_COMP_LZO is not set +# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set +# CONFIG_ZRAM_DEF_COMP_842 is not set +CONFIG_ZRAM_DEF_COMP="zstd" CONFIG_ZRAM_WRITEBACK=y +# CONFIG_ZRAM_MEMORY_TRACKING is not set +# CONFIG_ZRAM_MULTI_COMP is not set CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 CONFIG_BLK_DEV_DRBD=m +# CONFIG_DRBD_FAULT_INJECTION is not set CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_SIZE=8192 CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +# CONFIG_ATA_OVER_ETH is not set CONFIG_VIRTIO_BLK=m CONFIG_BLK_DEV_RBD=m +# CONFIG_BLK_DEV_UBLK is not set + +# +# NVME Support +# +CONFIG_NVME_CORE=m CONFIG_BLK_DEV_NVME=m CONFIG_NVME_MULTIPATH=y +# CONFIG_NVME_VERBOSE_ERRORS is not set +# CONFIG_NVME_HWMON is not set +CONFIG_NVME_FABRICS=m CONFIG_NVME_RDMA=m CONFIG_NVME_FC=m CONFIG_NVME_TCP=m +# CONFIG_NVME_AUTH is not set CONFIG_NVME_TARGET=m CONFIG_NVME_TARGET_PASSTHRU=y CONFIG_NVME_TARGET_LOOP=m @@ -668,57 +2085,183 @@ CONFIG_NVME_TARGET_RDMA=m CONFIG_NVME_TARGET_FC=m CONFIG_NVME_TARGET_FCLOOP=m CONFIG_NVME_TARGET_TCP=m +# CONFIG_NVME_TARGET_AUTH is not set +# end of NVME Support + +# +# Misc devices +# +CONFIG_SENSORS_LIS3LV02D=m +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +CONFIG_TIFM_CORE=m +CONFIG_TIFM_7XX1=m +# CONFIG_ICS932S401 is not set CONFIG_ENCLOSURE_SERVICES=m +# CONFIG_HP_ILO is not set CONFIG_APDS9802ALS=m CONFIG_ISL29003=m CONFIG_ISL29020=m CONFIG_SENSORS_TSL2550=m CONFIG_SENSORS_BH1770=m CONFIG_SENSORS_APDS990X=m +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +# CONFIG_DW_XDATA_PCIE is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_XILINX_SDFEC is not set +CONFIG_MISC_RTSX=m +# CONFIG_HISI_HIKEY_USB is not set +# CONFIG_OPEN_DICE is not set +# CONFIG_VCPU_STALL_DETECTOR is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# CONFIG_EEPROM_AT24=m +# CONFIG_EEPROM_AT25 is not set CONFIG_EEPROM_LEGACY=m CONFIG_EEPROM_MAX6875=m +CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_EEPROM_EE1004 is not set +# end of EEPROM support + +CONFIG_CB710_CORE=m +# CONFIG_CB710_DEBUG is not set +CONFIG_CB710_DEBUG_ASSUMPTIONS=y + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +# end of Texas Instruments shared transport line discipline + CONFIG_SENSORS_LIS3_I2C=m +CONFIG_ALTERA_STAPL=m +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_BCM_VK is not set +# CONFIG_MISC_ALCOR_PCI is not set CONFIG_MISC_RTSX_PCI=m CONFIG_MISC_RTSX_USB=m CONFIG_UACCE=m CONFIG_PVPANIC=y +# CONFIG_PVPANIC_MMIO is not set +# CONFIG_PVPANIC_PCI is not set +# CONFIG_GP_PCI1XXXX is not set +# end of Misc devices + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=y +CONFIG_SCSI_COMMON=y +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# CONFIG_BLK_DEV_SD=m CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=m CONFIG_CHR_DEV_SG=m +CONFIG_BLK_DEV_BSG=y CONFIG_CHR_DEV_SCH=m CONFIG_SCSI_ENCLOSURE=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=y +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=m +# CONFIG_SCSI_CXGB3_ISCSI is not set CONFIG_SCSI_CXGB4_ISCSI=m CONFIG_SCSI_BNX2_ISCSI=m CONFIG_SCSI_BNX2X_FCOE=m CONFIG_BE2ISCSI=m +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set CONFIG_SCSI_HPSA=m +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set CONFIG_SCSI_AACRAID=m +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set CONFIG_SCSI_MVSAS=y # CONFIG_SCSI_MVSAS_DEBUG is not set CONFIG_SCSI_MVSAS_TASKLET=y CONFIG_SCSI_MVUMI=y +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set CONFIG_MEGARAID_NEWGEN=y CONFIG_MEGARAID_MM=y CONFIG_MEGARAID_MAILBOX=y CONFIG_MEGARAID_LEGACY=y CONFIG_MEGARAID_SAS=m CONFIG_SCSI_MPT3SAS=y +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 CONFIG_SCSI_MPT2SAS=m +# CONFIG_SCSI_MPI3MR is not set CONFIG_SCSI_SMARTPQI=m +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_BUSLOGIC is not set +# CONFIG_SCSI_MYRB is not set +# CONFIG_SCSI_MYRS is not set CONFIG_LIBFC=m CONFIG_LIBFCOE=m CONFIG_FCOE=m +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FDOMAIN_PCI is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_PPA is not set +# CONFIG_SCSI_IMM is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set CONFIG_SCSI_QLOGIC_1280=m CONFIG_SCSI_QLA_FC=m CONFIG_TCM_QLA2XXX=m +# CONFIG_TCM_QLA2XXX_DEBUG is not set CONFIG_SCSI_QLA_ISCSI=m +# CONFIG_SCSI_LPFC is not set +# CONFIG_SCSI_EFCT is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +# CONFIG_SCSI_DEBUG is not set +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_BFA_FC is not set CONFIG_SCSI_VIRTIO=m CONFIG_SCSI_CHELSIO_FCOE=m CONFIG_SCSI_DH=y @@ -726,25 +2269,141 @@ CONFIG_SCSI_DH_RDAC=y CONFIG_SCSI_DH_HP_SW=y CONFIG_SCSI_DH_EMC=y CONFIG_SCSI_DH_ALUA=y +# end of SCSI device support + CONFIG_ATA=y +CONFIG_SATA_HOST=y +CONFIG_PATA_TIMINGS=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_FORCE=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# CONFIG_SATA_AHCI=y +CONFIG_SATA_MOBILE_LPM_POLICY=0 CONFIG_SATA_AHCI_PLATFORM=y +# CONFIG_AHCI_DWC is not set +# CONFIG_AHCI_CEVA is not set +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# CONFIG_ATA_PIIX=m +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set +# CONFIG_SATA_ZHAOXIN is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set CONFIG_PATA_ATIIXP=y +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_OF_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set +# CONFIG_PATA_PARPORT is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set CONFIG_ATA_GENERIC=m +# CONFIG_PATA_LEGACY is not set CONFIG_MD=y CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_BITMAP_FILE=y CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m CONFIG_MD_MULTIPATH=m CONFIG_MD_FAULTY=m +# CONFIG_MD_CLUSTER is not set CONFIG_BCACHE=m +# CONFIG_BCACHE_DEBUG is not set +# CONFIG_BCACHE_CLOSURES_DEBUG is not set +# CONFIG_BCACHE_ASYNC_REGISTRATION is not set +CONFIG_BLK_DEV_DM_BUILTIN=y CONFIG_BLK_DEV_DM=m +# CONFIG_DM_DEBUG is not set +CONFIG_DM_BUFIO=m +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +# CONFIG_DM_UNSTRIPED is not set CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m CONFIG_DM_WRITECACHE=m +# CONFIG_DM_EBS is not set CONFIG_DM_ERA=m +# CONFIG_DM_CLONE is not set CONFIG_DM_MIRROR=m CONFIG_DM_LOG_USERSPACE=m CONFIG_DM_RAID=m @@ -752,33 +2411,57 @@ CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m CONFIG_DM_MULTIPATH_QL=m CONFIG_DM_MULTIPATH_ST=m +# CONFIG_DM_MULTIPATH_HST is not set +# CONFIG_DM_MULTIPATH_IOA is not set CONFIG_DM_DELAY=m +# CONFIG_DM_DUST is not set CONFIG_DM_UEVENT=y CONFIG_DM_FLAKEY=m CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set +# CONFIG_DM_VERITY_FEC is not set CONFIG_DM_SWITCH=m CONFIG_DM_LOG_WRITES=m CONFIG_DM_INTEGRITY=m +# CONFIG_DM_ZONED is not set +CONFIG_DM_AUDIT=y CONFIG_TARGET_CORE=m CONFIG_TCM_IBLOCK=m CONFIG_TCM_FILEIO=m CONFIG_TCM_PSCSI=m CONFIG_TCM_USER2=m CONFIG_LOOPBACK_TARGET=m +# CONFIG_TCM_FC is not set CONFIG_ISCSI_TARGET=m CONFIG_ISCSI_TARGET_CXGB4=m +# CONFIG_SBP_TARGET is not set +# CONFIG_REMOTE_TARGET is not set CONFIG_FUSION=y CONFIG_FUSION_SPI=m +# CONFIG_FUSION_FC is not set CONFIG_FUSION_SAS=m +CONFIG_FUSION_MAX_SGE=128 CONFIG_FUSION_CTL=m CONFIG_FUSION_LOGGING=y + +# +# IEEE 1394 (FireWire) support +# CONFIG_FIREWIRE=m CONFIG_FIREWIRE_OHCI=m CONFIG_FIREWIRE_SBP2=m CONFIG_FIREWIRE_NET=m +# CONFIG_FIREWIRE_NOSY is not set +# end of IEEE 1394 (FireWire) support + +CONFIG_NETDEVICES=y +CONFIG_MII=y +CONFIG_NET_CORE=y CONFIG_BONDING=m CONFIG_DUMMY=m CONFIG_WIREGUARD=m +# CONFIG_WIREGUARD_DEBUG is not set +# CONFIG_EQUALIZER is not set CONFIG_NET_FC=y CONFIG_IFB=m CONFIG_NET_TEAM=m @@ -789,89 +2472,210 @@ CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m +CONFIG_IPVLAN_L3S=y CONFIG_IPVLAN=m CONFIG_IPVTAP=m CONFIG_VXLAN=m CONFIG_GENEVE=m +# CONFIG_BAREUDP is not set +# CONFIG_GTP is not set +# CONFIG_AMT is not set CONFIG_MACSEC=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y +# CONFIG_NETCONSOLE_EXTENDED_LOG is not set +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y CONFIG_NTB_NETDEV=m CONFIG_RIONET=m +CONFIG_RIONET_TX_SIZE=128 +CONFIG_RIONET_RX_SIZE=128 CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set CONFIG_VETH=m CONFIG_VIRTIO_NET=m CONFIG_NLMON=m CONFIG_NET_VRF=m CONFIG_VSOCKMON=m +# CONFIG_ARCNET is not set # CONFIG_ATM_DRIVERS is not set + +# +# Distributed Switch Architecture drivers +# +# CONFIG_B53 is not set +# CONFIG_NET_DSA_BCM_SF2 is not set +# CONFIG_NET_DSA_LOOP is not set +# CONFIG_NET_DSA_HIRSCHMANN_HELLCREEK is not set +# CONFIG_NET_DSA_LANTIQ_GSWIP is not set +# CONFIG_NET_DSA_MT7530 is not set +# CONFIG_NET_DSA_MV88E6060 is not set +# CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON is not set +# CONFIG_NET_DSA_MV88E6XXX is not set +# CONFIG_NET_DSA_AR9331 is not set +# CONFIG_NET_DSA_QCA8K is not set +# CONFIG_NET_DSA_SJA1105 is not set +# CONFIG_NET_DSA_XRS700X_I2C is not set +# CONFIG_NET_DSA_XRS700X_MDIO is not set +# CONFIG_NET_DSA_REALTEK is not set +# CONFIG_NET_DSA_SMSC_LAN9303_I2C is not set +# CONFIG_NET_DSA_SMSC_LAN9303_MDIO is not set +# CONFIG_NET_DSA_VITESSE_VSC73XX_SPI is not set +# CONFIG_NET_DSA_VITESSE_VSC73XX_PLATFORM is not set +# end of Distributed Switch Architecture drivers + +CONFIG_ETHERNET=y +CONFIG_MDIO=m # CONFIG_NET_VENDOR_3COM is not set # CONFIG_NET_VENDOR_ADAPTEC is not set # CONFIG_NET_VENDOR_AGERE is not set # CONFIG_NET_VENDOR_ALACRITECH is not set # CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set # CONFIG_NET_VENDOR_AMAZON is not set # CONFIG_NET_VENDOR_AMD is not set # CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set +CONFIG_NET_VENDOR_ASIX=y +# CONFIG_SPI_AX88796C is not set # CONFIG_NET_VENDOR_ATHEROS is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set CONFIG_BNX2=y +CONFIG_CNIC=m CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y CONFIG_BNXT_DCB=y +CONFIG_BNXT_HWMON=y +CONFIG_NET_VENDOR_CADENCE=y +# CONFIG_MACB is not set # CONFIG_NET_VENDOR_CAVIUM is not set +CONFIG_NET_VENDOR_CHELSIO=y CONFIG_CHELSIO_T1=m CONFIG_CHELSIO_T1_1G=y CONFIG_CHELSIO_T3=m +CONFIG_CHELSIO_T4=m +# CONFIG_CHELSIO_T4_DCB is not set CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_LIB=m +CONFIG_CHELSIO_INLINE_CRYPTO=y +# CONFIG_CRYPTO_DEV_CHELSIO_TLS is not set CONFIG_CHELSIO_IPSEC_INLINE=m +# CONFIG_CHELSIO_TLS_DEVICE is not set # CONFIG_NET_VENDOR_CISCO is not set # CONFIG_NET_VENDOR_CORTINA is not set +CONFIG_NET_VENDOR_DAVICOM=y +# CONFIG_DM9051 is not set CONFIG_DNET=m # CONFIG_NET_VENDOR_DEC is not set # CONFIG_NET_VENDOR_DLINK is not set # CONFIG_NET_VENDOR_EMULEX is not set +CONFIG_NET_VENDOR_ENGLEDER=y +# CONFIG_TSNEP is not set # CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_NET_VENDOR_FUNGIBLE=y +# CONFIG_FUN_ETH is not set +CONFIG_NET_VENDOR_GOOGLE=y +CONFIG_NET_VENDOR_HUAWEI=y # CONFIG_NET_VENDOR_I825XX is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set CONFIG_E1000=m CONFIG_E1000E=m CONFIG_IGB=m +CONFIG_IGB_HWMON=y CONFIG_IGBVF=m CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y CONFIG_IXGBE_DCB=y +CONFIG_IXGBE_IPSEC=y CONFIG_IXGBEVF=m +CONFIG_IXGBEVF_IPSEC=y CONFIG_I40E=m CONFIG_I40E_DCB=y +CONFIG_IAVF=m CONFIG_I40EVF=m CONFIG_ICE=m +CONFIG_ICE_SWITCHDEV=y CONFIG_FM10K=m +# CONFIG_IGC is not set +# CONFIG_JME is not set +CONFIG_NET_VENDOR_ADI=y +# CONFIG_ADIN1110 is not set +CONFIG_NET_VENDOR_LITEX=y +# CONFIG_LITEX_LITEETH is not set # CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_NET_VENDOR_MELLANOX=y CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y # CONFIG_MLX4_CORE_GEN2 is not set CONFIG_MLX5_CORE=m CONFIG_MLX5_FPGA=y CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +CONFIG_MLX5_ESWITCH=y +CONFIG_MLX5_BRIDGE=y +CONFIG_MLX5_CLS_ACT=y +CONFIG_MLX5_TC_CT=y +CONFIG_MLX5_TC_SAMPLE=y +CONFIG_MLX5_CORE_EN_DCB=y CONFIG_MLX5_CORE_IPOIB=y +# CONFIG_MLX5_MACSEC is not set +# CONFIG_MLX5_EN_IPSEC is not set +# CONFIG_MLX5_EN_TLS is not set +CONFIG_MLX5_SW_STEERING=y +# CONFIG_MLX5_SF is not set CONFIG_MLXSW_CORE=m +CONFIG_MLXSW_CORE_HWMON=y +CONFIG_MLXSW_CORE_THERMAL=y +CONFIG_MLXSW_PCI=m +CONFIG_MLXSW_I2C=m +CONFIG_MLXSW_SPECTRUM=m +CONFIG_MLXSW_SPECTRUM_DCB=y +CONFIG_MLXSW_MINIMAL=m +CONFIG_MLXFW=m # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_MICROCHIP is not set # CONFIG_NET_VENDOR_MICROSEMI is not set +CONFIG_NET_VENDOR_MICROSOFT=y # CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_FEALNX is not set # CONFIG_NET_VENDOR_NI is not set # CONFIG_NET_VENDOR_NATSEMI is not set +CONFIG_NET_VENDOR_NETERION=y +# CONFIG_S2IO is not set # CONFIG_NET_VENDOR_NETRONOME is not set # CONFIG_NET_VENDOR_NVIDIA is not set # CONFIG_NET_VENDOR_OKI is not set CONFIG_ETHOC=m +CONFIG_NET_VENDOR_PACKET_ENGINES=y +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +CONFIG_NET_VENDOR_PENSANDO=y +# CONFIG_IONIC is not set # CONFIG_NET_VENDOR_QLOGIC is not set # CONFIG_NET_VENDOR_BROCADE is not set # CONFIG_NET_VENDOR_QUALCOMM is not set # CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y CONFIG_8139CP=m CONFIG_8139TOO=m # CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set CONFIG_R8169=m # CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set @@ -882,23 +2686,54 @@ CONFIG_R8169=m # CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_SOCIONEXT is not set +CONFIG_NET_VENDOR_STMICRO=y CONFIG_STMMAC_ETH=y +# CONFIG_STMMAC_SELFTESTS is not set +CONFIG_STMMAC_PLATFORM=y +# CONFIG_DWMAC_DWC_QOS_ETH is not set +CONFIG_DWMAC_GENERIC=y +# CONFIG_DWMAC_INTEL_PLAT is not set +CONFIG_DWMAC_LOONGSON=m +# CONFIG_STMMAC_PCI is not set # CONFIG_NET_VENDOR_SUN is not set # CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_TEHUTI is not set # CONFIG_NET_VENDOR_TI is not set +CONFIG_NET_VENDOR_VERTEXCOM=y +# CONFIG_MSE102X is not set # CONFIG_NET_VENDOR_VIA is not set +CONFIG_NET_VENDOR_WANGXUN=y +CONFIG_LIBWX=m CONFIG_NGBE=m CONFIG_TXGBE=m # CONFIG_NET_VENDOR_WIZNET is not set # CONFIG_NET_VENDOR_XILINX is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_PHYLINK=y +CONFIG_PHYLIB=y +CONFIG_SWPHY=y CONFIG_LED_TRIGGER_PHY=y +CONFIG_PHYLIB_LEDS=y +CONFIG_FIXED_PHY=y CONFIG_SFP=y + +# +# MII PHY device drivers +# CONFIG_AMD_PHY=m +# CONFIG_ADIN_PHY is not set +# CONFIG_ADIN1100_PHY is not set CONFIG_AQUANTIA_PHY=m +# CONFIG_AX88796B_PHY is not set CONFIG_BROADCOM_PHY=m +# CONFIG_BCM54140_PHY is not set CONFIG_BCM7XXX_PHY=m +# CONFIG_BCM84881_PHY is not set CONFIG_BCM87XX_PHY=m +CONFIG_BCM_NET_PHYLIB=m +CONFIG_BCM_NET_PHYPTP=m CONFIG_CICADA_PHY=m CONFIG_CORTINA_PHY=m CONFIG_DAVICOM_PHY=m @@ -908,43 +2743,127 @@ CONFIG_INTEL_XWAY_PHY=m CONFIG_LSI_ET1011C_PHY=m CONFIG_MARVELL_PHY=m CONFIG_MARVELL_10G_PHY=y +# CONFIG_MARVELL_88Q2XXX_PHY is not set +# CONFIG_MARVELL_88X2222_PHY is not set +# CONFIG_MAXLINEAR_GPHY is not set +# CONFIG_MEDIATEK_GE_PHY is not set CONFIG_MICREL_PHY=m +# CONFIG_MICROCHIP_T1S_PHY is not set +CONFIG_MICROCHIP_PHY=m CONFIG_MICROCHIP_T1_PHY=m CONFIG_MICROSEMI_PHY=m +# CONFIG_MOTORCOMM_PHY is not set CONFIG_NATIONAL_PHY=m +# CONFIG_NXP_CBTX_PHY is not set +# CONFIG_NXP_C45_TJA11XX_PHY is not set +# CONFIG_NXP_TJA11XX_PHY is not set +# CONFIG_NCN26000_PHY is not set CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m CONFIG_RENESAS_PHY=m CONFIG_ROCKCHIP_PHY=m +CONFIG_SMSC_PHY=m CONFIG_STE10XP=m CONFIG_TERANETICS_PHY=m CONFIG_DP83822_PHY=m CONFIG_DP83TC811_PHY=m CONFIG_DP83848_PHY=m CONFIG_DP83867_PHY=m +# CONFIG_DP83869_PHY is not set +# CONFIG_DP83TD510_PHY is not set CONFIG_VITESSE_PHY=m CONFIG_XILINX_GMII2RGMII=m CONFIG_MICREL_KS8995MA=m +# CONFIG_PSE_CONTROLLER is not set +CONFIG_CAN_DEV=m CONFIG_CAN_VCAN=m +# CONFIG_CAN_VXCAN is not set +CONFIG_CAN_NETLINK=y +CONFIG_CAN_CALC_BITTIMING=y +# CONFIG_CAN_CAN327 is not set +# CONFIG_CAN_FLEXCAN is not set +# CONFIG_CAN_GRCAN is not set +# CONFIG_CAN_KVASER_PCIEFD is not set CONFIG_CAN_SLCAN=m CONFIG_CAN_C_CAN=m CONFIG_CAN_C_CAN_PLATFORM=m CONFIG_CAN_C_CAN_PCI=m CONFIG_CAN_CC770=m +# CONFIG_CAN_CC770_ISA is not set CONFIG_CAN_CC770_PLATFORM=m +# CONFIG_CAN_CTUCANFD_PCI is not set +# CONFIG_CAN_CTUCANFD_PLATFORM is not set +# CONFIG_CAN_IFI_CANFD is not set +# CONFIG_CAN_M_CAN is not set +# CONFIG_CAN_PEAK_PCIEFD is not set CONFIG_CAN_SJA1000=m CONFIG_CAN_EMS_PCI=m +# CONFIG_CAN_F81601 is not set CONFIG_CAN_KVASER_PCI=m CONFIG_CAN_PEAK_PCI=m +CONFIG_CAN_PEAK_PCIEC=y CONFIG_CAN_PLX_PCI=m +# CONFIG_CAN_SJA1000_ISA is not set CONFIG_CAN_SJA1000_PLATFORM=m CONFIG_CAN_SOFTING=m + +# +# CAN SPI interfaces +# +# CONFIG_CAN_HI311X is not set +# CONFIG_CAN_MCP251X is not set +# CONFIG_CAN_MCP251XFD is not set +# end of CAN SPI interfaces + +# +# CAN USB interfaces +# CONFIG_CAN_8DEV_USB=m CONFIG_CAN_EMS_USB=m +# CONFIG_CAN_ESD_USB is not set +# CONFIG_CAN_ETAS_ES58X is not set +# CONFIG_CAN_F81604 is not set +# CONFIG_CAN_GS_USB is not set CONFIG_CAN_KVASER_USB=m +# CONFIG_CAN_MCBA_USB is not set CONFIG_CAN_PEAK_USB=m +# CONFIG_CAN_UCAN is not set +# end of CAN USB interfaces + +# CONFIG_CAN_DEBUG_DEVICES is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +CONFIG_FWNODE_MDIO=y +CONFIG_OF_MDIO=y +CONFIG_ACPI_MDIO=y +CONFIG_MDIO_DEVRES=y CONFIG_MDIO_BITBANG=m +# CONFIG_MDIO_BCM_UNIMAC is not set +CONFIG_MDIO_CAVIUM=m +# CONFIG_MDIO_GPIO is not set +# CONFIG_MDIO_HISI_FEMAC is not set +CONFIG_MDIO_I2C=y +# CONFIG_MDIO_MVUSB is not set CONFIG_MDIO_MSCC_MIIM=m +# CONFIG_MDIO_OCTEON is not set +# CONFIG_MDIO_IPQ4019 is not set +# CONFIG_MDIO_IPQ8064 is not set CONFIG_MDIO_THUNDER=m + +# +# MDIO Multiplexers +# +# CONFIG_MDIO_BUS_MUX_GPIO is not set +# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set +# CONFIG_MDIO_BUS_MUX_MMIOREG is not set + +# +# PCS device drivers +# +CONFIG_PCS_XPCS=y +# end of PCS device drivers + +# CONFIG_PLIP is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m @@ -953,31 +2872,47 @@ CONFIG_PPP_MPPE=m CONFIG_PPP_MULTILINK=y CONFIG_PPPOATM=m CONFIG_PPPOE=m +# CONFIG_PPPOE_HASH_BITS_1 is not set +# CONFIG_PPPOE_HASH_BITS_2 is not set +CONFIG_PPPOE_HASH_BITS_4=y +# CONFIG_PPPOE_HASH_BITS_8 is not set +CONFIG_PPPOE_HASH_BITS=4 CONFIG_PPTP=m CONFIG_PPPOL2TP=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m CONFIG_SLIP=m +CONFIG_SLHC=m CONFIG_SLIP_COMPRESSED=y CONFIG_SLIP_SMART=y +# CONFIG_SLIP_MODE_SLIP6 is not set +CONFIG_USB_NET_DRIVERS=y CONFIG_USB_CATC=m CONFIG_USB_KAWETH=m CONFIG_USB_PEGASUS=m CONFIG_USB_RTL8150=m CONFIG_USB_RTL8152=m CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=m # CONFIG_USB_NET_AX8817X is not set # CONFIG_USB_NET_AX88179_178A is not set +CONFIG_USB_NET_CDCETHER=m CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m CONFIG_USB_NET_HUAWEI_CDC_NCM=m CONFIG_USB_NET_CDC_MBIM=m CONFIG_USB_NET_DM9601=m +# CONFIG_USB_NET_SR9700 is not set +# CONFIG_USB_NET_SR9800 is not set CONFIG_USB_NET_SMSC75XX=m CONFIG_USB_NET_SMSC95XX=m CONFIG_USB_NET_GL620A=m # CONFIG_USB_NET_NET1080 is not set CONFIG_USB_NET_PLUSB=m CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m +CONFIG_USB_NET_CDC_SUBSET=m CONFIG_USB_ALI_M5632=y CONFIG_USB_AN2720=y # CONFIG_USB_BELKIN is not set @@ -990,41 +2925,155 @@ CONFIG_USB_NET_KALMIA=m CONFIG_USB_NET_QMI_WWAN=m CONFIG_USB_HSO=m CONFIG_USB_NET_INT51X1=m +# CONFIG_USB_CDC_PHONET is not set CONFIG_USB_IPHETH=m CONFIG_USB_SIERRA_NET=m CONFIG_USB_VL600=m CONFIG_USB_NET_CH9200=m +# CONFIG_USB_NET_AQC111 is not set +CONFIG_USB_RTL8153_ECM=m +CONFIG_WLAN=y # CONFIG_WLAN_VENDOR_ADMTEK is not set +CONFIG_ATH_COMMON=m +CONFIG_WLAN_VENDOR_ATH=y +# CONFIG_ATH_DEBUG is not set +# CONFIG_ATH5K is not set +# CONFIG_ATH5K_PCI is not set +CONFIG_ATH9K_HW=m +CONFIG_ATH9K_COMMON=m +CONFIG_ATH9K_BTCOEX_SUPPORT=y CONFIG_ATH9K=m +CONFIG_ATH9K_PCI=y CONFIG_ATH9K_AHB=y +# CONFIG_ATH9K_DEBUGFS is not set +# CONFIG_ATH9K_DYNACK is not set CONFIG_ATH9K_WOW=y +CONFIG_ATH9K_RFKILL=y +# CONFIG_ATH9K_CHANNEL_CONTEXT is not set +CONFIG_ATH9K_PCOEM=y +# CONFIG_ATH9K_PCI_NO_EEPROM is not set CONFIG_ATH9K_HTC=m +# CONFIG_ATH9K_HTC_DEBUGFS is not set +# CONFIG_ATH9K_HWRNG is not set +# CONFIG_CARL9170 is not set +# CONFIG_ATH6KL is not set +# CONFIG_AR5523 is not set +# CONFIG_WIL6210 is not set CONFIG_ATH10K=m +CONFIG_ATH10K_CE=y CONFIG_ATH10K_PCI=m +# CONFIG_ATH10K_AHB is not set +# CONFIG_ATH10K_SDIO is not set +# CONFIG_ATH10K_USB is not set +# CONFIG_ATH10K_DEBUG is not set +# CONFIG_ATH10K_DEBUGFS is not set +# CONFIG_ATH10K_TRACING is not set +# CONFIG_WCN36XX is not set +# CONFIG_ATH11K is not set +# CONFIG_ATH12K is not set # CONFIG_WLAN_VENDOR_ATMEL is not set +CONFIG_WLAN_VENDOR_BROADCOM=y +# CONFIG_B43 is not set +# CONFIG_B43LEGACY is not set +CONFIG_BRCMUTIL=m CONFIG_BRCMSMAC=m +CONFIG_BRCMSMAC_LEDS=y CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_PROTO_BCDC=y +CONFIG_BRCMFMAC_PROTO_MSGBUF=y +CONFIG_BRCMFMAC_SDIO=y CONFIG_BRCMFMAC_USB=y CONFIG_BRCMFMAC_PCIE=y +# CONFIG_BRCM_TRACING is not set +# CONFIG_BRCMDBG is not set # CONFIG_WLAN_VENDOR_CISCO is not set +CONFIG_WLAN_VENDOR_INTEL=y +# CONFIG_IPW2100 is not set +# CONFIG_IPW2200 is not set +# CONFIG_IWL4965 is not set +# CONFIG_IWL3945 is not set CONFIG_IWLWIFI=m +CONFIG_IWLWIFI_LEDS=y CONFIG_IWLDVM=m CONFIG_IWLMVM=m +CONFIG_IWLWIFI_OPMODE_MODULAR=y + +# +# Debugging Options +# +# CONFIG_IWLWIFI_DEBUG is not set +CONFIG_IWLWIFI_DEVICE_TRACING=y +# end of Debugging Options + # CONFIG_WLAN_VENDOR_INTERSIL is not set +CONFIG_WLAN_VENDOR_MARVELL=y +# CONFIG_LIBERTAS is not set +# CONFIG_LIBERTAS_THINFIRM is not set CONFIG_MWIFIEX=m CONFIG_MWIFIEX_SDIO=m CONFIG_MWIFIEX_PCIE=m CONFIG_MWIFIEX_USB=m +# CONFIG_MWL8K is not set +CONFIG_WLAN_VENDOR_MEDIATEK=y CONFIG_MT7601U=m +CONFIG_MT76_CORE=m +CONFIG_MT76_LEDS=y +CONFIG_MT76_USB=m +CONFIG_MT76x02_LIB=m +CONFIG_MT76x02_USB=m +CONFIG_MT76x0_COMMON=m CONFIG_MT76x0U=m +# CONFIG_MT76x0E is not set +CONFIG_MT76x2_COMMON=m +# CONFIG_MT76x2E is not set CONFIG_MT76x2U=m +# CONFIG_MT7603E is not set +# CONFIG_MT7615E is not set +# CONFIG_MT7663U is not set +# CONFIG_MT7663S is not set +# CONFIG_MT7915E is not set +# CONFIG_MT7921E is not set +# CONFIG_MT7921S is not set +# CONFIG_MT7921U is not set +# CONFIG_MT7996E is not set +CONFIG_WLAN_VENDOR_MICROCHIP=y +# CONFIG_WILC1000_SDIO is not set +# CONFIG_WILC1000_SPI is not set +CONFIG_WLAN_VENDOR_PURELIFI=y +# CONFIG_PLFXLC is not set +CONFIG_WLAN_VENDOR_RALINK=y CONFIG_RT2X00=m +# CONFIG_RT2400PCI is not set +# CONFIG_RT2500PCI is not set +# CONFIG_RT61PCI is not set CONFIG_RT2800PCI=m +CONFIG_RT2800PCI_RT33XX=y +CONFIG_RT2800PCI_RT35XX=y +CONFIG_RT2800PCI_RT53XX=y +CONFIG_RT2800PCI_RT3290=y +# CONFIG_RT2500USB is not set +# CONFIG_RT73USB is not set CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT33XX=y +CONFIG_RT2800USB_RT35XX=y CONFIG_RT2800USB_RT3573=y CONFIG_RT2800USB_RT53XX=y CONFIG_RT2800USB_RT55XX=y CONFIG_RT2800USB_UNKNOWN=y +CONFIG_RT2800_LIB=m +CONFIG_RT2800_LIB_MMIO=m +CONFIG_RT2X00_LIB_MMIO=m +CONFIG_RT2X00_LIB_PCI=m +CONFIG_RT2X00_LIB_USB=m +CONFIG_RT2X00_LIB=m +CONFIG_RT2X00_LIB_FIRMWARE=y +CONFIG_RT2X00_LIB_CRYPTO=y +CONFIG_RT2X00_LIB_LEDS=y +# CONFIG_RT2X00_DEBUG is not set +CONFIG_WLAN_VENDOR_REALTEK=y +# CONFIG_RTL8180 is not set +# CONFIG_RTL8187 is not set +CONFIG_RTL_CARDS=m CONFIG_RTL8192CE=m CONFIG_RTL8192SE=m CONFIG_RTL8192DE=m @@ -1034,29 +3083,77 @@ CONFIG_RTL8188EE=m CONFIG_RTL8192EE=m CONFIG_RTL8821AE=m CONFIG_RTL8192CU=m +CONFIG_RTLWIFI=m +CONFIG_RTLWIFI_PCI=m +CONFIG_RTLWIFI_USB=m # CONFIG_RTLWIFI_DEBUG is not set +CONFIG_RTL8192C_COMMON=m +CONFIG_RTL8723_COMMON=m +CONFIG_RTLBTCOEXIST=m CONFIG_RTL8XXXU=m +# CONFIG_RTL8XXXU_UNTESTED is not set +# CONFIG_RTW88 is not set +# CONFIG_RTW89 is not set # CONFIG_WLAN_VENDOR_RSI is not set +CONFIG_WLAN_VENDOR_SILABS=y +# CONFIG_WFX is not set # CONFIG_WLAN_VENDOR_ST is not set # CONFIG_WLAN_VENDOR_TI is not set +CONFIG_WLAN_VENDOR_ZYDAS=y +# CONFIG_USB_ZD1201 is not set CONFIG_ZD1211RW=m +# CONFIG_ZD1211RW_DEBUG is not set +CONFIG_WLAN_VENDOR_QUANTENNA=y +# CONFIG_QTNFMAC_PCIE is not set CONFIG_USB_NET_RNDIS_WLAN=m CONFIG_MAC80211_HWSIM=m +# CONFIG_VIRT_WIFI is not set CONFIG_WAN=y CONFIG_HDLC=m CONFIG_HDLC_RAW=m +# CONFIG_HDLC_RAW_ETH is not set CONFIG_HDLC_CISCO=m CONFIG_HDLC_FR=m CONFIG_HDLC_PPP=m +# CONFIG_HDLC_X25 is not set +# CONFIG_PCI200SYN is not set +# CONFIG_WANXL is not set +# CONFIG_PC300TOO is not set +# CONFIG_FARSYNC is not set +# CONFIG_LAPBETHER is not set +CONFIG_IEEE802154_DRIVERS=m CONFIG_IEEE802154_FAKELB=m +# CONFIG_IEEE802154_AT86RF230 is not set +# CONFIG_IEEE802154_MRF24J40 is not set +# CONFIG_IEEE802154_CC2520 is not set +# CONFIG_IEEE802154_ATUSB is not set +# CONFIG_IEEE802154_ADF7242 is not set +# CONFIG_IEEE802154_CA8210 is not set +# CONFIG_IEEE802154_MCR20A is not set +# CONFIG_IEEE802154_HWSIM is not set + +# +# Wireless WAN +# +# CONFIG_WWAN is not set +# end of Wireless WAN + CONFIG_VMXNET3=m CONFIG_FUJITSU_ES=m CONFIG_USB4_NET=m CONFIG_NETDEVSIM=m +CONFIG_NET_FAILOVER=m CONFIG_ISDN=y +CONFIG_ISDN_CAPI=y +CONFIG_CAPI_TRACE=y +CONFIG_ISDN_CAPI_MIDDLEWARE=y CONFIG_MISDN=m CONFIG_MISDN_DSP=m CONFIG_MISDN_L1OIP=m + +# +# mISDN hardware drivers +# CONFIG_MISDN_HFCPCI=m CONFIG_MISDN_HFCMULTI=m CONFIG_MISDN_HFCUSB=m @@ -1065,161 +3162,777 @@ CONFIG_MISDN_SPEEDFAX=m CONFIG_MISDN_INFINEON=m CONFIG_MISDN_W6692=m CONFIG_MISDN_NETJET=m +CONFIG_MISDN_HDLC=m +CONFIG_MISDN_IPAC=m +CONFIG_MISDN_ISAR=m + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y +CONFIG_INPUT_FF_MEMLESS=m +CONFIG_INPUT_SPARSEKMAP=y +# CONFIG_INPUT_MATRIXKMAP is not set +CONFIG_INPUT_VIVALDIFMAP=y + +# +# Userland interfaces +# CONFIG_INPUT_MOUSEDEV=y CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 CONFIG_INPUT_JOYDEV=m CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADC is not set +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1050 is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_OMAP4 is not set +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set CONFIG_KEYBOARD_XTKBD=m +# CONFIG_KEYBOARD_CAP11XX is not set +# CONFIG_KEYBOARD_BCM is not set +# CONFIG_KEYBOARD_CYPRESS_SF is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_BYD=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_TRACKPOINT=y CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_ELANTECH_SMBUS=y CONFIG_MOUSE_PS2_SENTELIC=y +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +CONFIG_MOUSE_PS2_FOCALTECH=y +CONFIG_MOUSE_PS2_SMBUS=y CONFIG_MOUSE_SERIAL=m CONFIG_MOUSE_APPLETOUCH=m CONFIG_MOUSE_BCM5974=m CONFIG_MOUSE_CYAPA=m CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_I2C=y CONFIG_MOUSE_ELAN_I2C_SMBUS=y CONFIG_MOUSE_VSXXXAA=m +# CONFIG_MOUSE_GPIO is not set CONFIG_MOUSE_SYNAPTICS_I2C=m CONFIG_MOUSE_SYNAPTICS_USB=m +# CONFIG_INPUT_JOYSTICK is not set CONFIG_INPUT_TABLET=y CONFIG_TABLET_USB_ACECAD=m CONFIG_TABLET_USB_AIPTEK=m +# CONFIG_TABLET_USB_HANWANG is not set CONFIG_TABLET_USB_KBTAB=m +# CONFIG_TABLET_USB_PEGASUS is not set CONFIG_TABLET_SERIAL_WACOM4=m CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_ADC is not set +# CONFIG_TOUCHSCREEN_AR1021_I2C is not set +# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set +# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_BU21029 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8318 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 is not set +# CONFIG_TOUCHSCREEN_CY8CTMA140 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP5 is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_EGALAX is not set +# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set +# CONFIG_TOUCHSCREEN_EXC3000 is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GOODIX is not set +# CONFIG_TOUCHSCREEN_HIDEEP is not set +# CONFIG_TOUCHSCREEN_HYCON_HY46XX is not set +# CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX is not set +# CONFIG_TOUCHSCREEN_ILI210X is not set +# CONFIG_TOUCHSCREEN_ILITEK is not set +# CONFIG_TOUCHSCREEN_S6SY761 is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_EKTF2127 is not set +# CONFIG_TOUCHSCREEN_ELAN is not set CONFIG_TOUCHSCREEN_ELO=m CONFIG_TOUCHSCREEN_WACOM_W8001=m CONFIG_TOUCHSCREEN_WACOM_I2C=m +# CONFIG_TOUCHSCREEN_MAX11801 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MMS114 is not set +# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set +# CONFIG_TOUCHSCREEN_MSG2638 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_NOVATEK_NVT_TS is not set +# CONFIG_TOUCHSCREEN_IMAGIS is not set +# CONFIG_TOUCHSCREEN_IMX6UL_TSC is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_PIXCIR is not set +# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set +# CONFIG_TOUCHSCREEN_WM97XX is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC_SERIO is not set +# CONFIG_TOUCHSCREEN_TSC2004 is not set +# CONFIG_TOUCHSCREEN_TSC2005 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_RM_TS is not set +# CONFIG_TOUCHSCREEN_SILEAD is not set +# CONFIG_TOUCHSCREEN_SIS_I2C is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_STMFTS is not set +# CONFIG_TOUCHSCREEN_SUR40 is not set +# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set +# CONFIG_TOUCHSCREEN_SX8654 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +# CONFIG_TOUCHSCREEN_ZET6223 is not set +# CONFIG_TOUCHSCREEN_ZFORCE is not set +# CONFIG_TOUCHSCREEN_COLIBRI_VF50 is not set +# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set +# CONFIG_TOUCHSCREEN_IQS5XX is not set +# CONFIG_TOUCHSCREEN_IQS7211 is not set +# CONFIG_TOUCHSCREEN_ZINITIX is not set +# CONFIG_TOUCHSCREEN_HIMAX_HX83112B is not set CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ATMEL_CAPTOUCH is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_E3X0_BUTTON is not set +# CONFIG_INPUT_MMA8450 is not set +# CONFIG_INPUT_GPIO_BEEPER is not set +# CONFIG_INPUT_GPIO_DECODER is not set +# CONFIG_INPUT_GPIO_VIBRA is not set CONFIG_INPUT_ATI_REMOTE2=m CONFIG_INPUT_KEYSPAN_REMOTE=m +# CONFIG_INPUT_KXTJ9 is not set CONFIG_INPUT_POWERMATE=m CONFIG_INPUT_YEALINK=m CONFIG_INPUT_CM109=m CONFIG_INPUT_UINPUT=m +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_PWM_BEEPER is not set +# CONFIG_INPUT_PWM_VIBRA is not set CONFIG_INPUT_GPIO_ROTARY_ENCODER=m +# CONFIG_INPUT_DA7280_HAPTICS is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_IMS_PCU is not set +# CONFIG_INPUT_IQS269A is not set +# CONFIG_INPUT_IQS626A is not set +# CONFIG_INPUT_IQS7222 is not set +# CONFIG_INPUT_CMA3000 is not set +# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set +# CONFIG_INPUT_DRV260X_HAPTICS is not set +# CONFIG_INPUT_DRV2665_HAPTICS is not set +# CONFIG_INPUT_DRV2667_HAPTICS is not set +CONFIG_RMI4_CORE=m CONFIG_RMI4_I2C=m CONFIG_RMI4_SPI=m CONFIG_RMI4_SMB=m +CONFIG_RMI4_F03=y +CONFIG_RMI4_F03_SERIO=m +CONFIG_RMI4_2D_SENSOR=y +CONFIG_RMI4_F11=y +CONFIG_RMI4_F12=y +CONFIG_RMI4_F30=y CONFIG_RMI4_F34=y +# CONFIG_RMI4_F3A is not set +# CONFIG_RMI4_F54 is not set CONFIG_RMI4_F55=y + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +CONFIG_SERIO_I8042=y CONFIG_SERIO_SERPORT=m +# CONFIG_SERIO_PARKBD is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y CONFIG_SERIO_RAW=m CONFIG_SERIO_ALTERA_PS2=m +# CONFIG_SERIO_PS2MULT is not set CONFIG_SERIO_ARC_PS2=m +# CONFIG_SERIO_APBPS2 is not set +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y CONFIG_LEGACY_PTY_COUNT=16 +CONFIG_LEGACY_TIOCSTI=y +CONFIG_LDISC_AUTOLOAD=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y CONFIG_SERIAL_8250=y # CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +CONFIG_SERIAL_8250_16550A_VARIANTS=y +# CONFIG_SERIAL_8250_FINTEK is not set CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCILIB=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y CONFIG_SERIAL_8250_NR_UARTS=16 CONFIG_SERIAL_8250_RUNTIME_UARTS=16 CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_MANY_PORTS=y +# CONFIG_SERIAL_8250_PCI1XXXX is not set CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DWLIB=y CONFIG_SERIAL_8250_DW=y +# CONFIG_SERIAL_8250_RT288X is not set +CONFIG_SERIAL_8250_PERICOM=y +# CONFIG_SERIAL_OF_PLATFORM is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y CONFIG_SERIAL_JSM=m +# CONFIG_SERIAL_SIFIVE is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_XILINX_PS_UART is not set CONFIG_SERIAL_ARC=m +CONFIG_SERIAL_ARC_NR_PORTS=1 +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set +# CONFIG_SERIAL_SPRD is not set +# end of Serial drivers + +CONFIG_SERIAL_MCTRL_GPIO=y CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set CONFIG_N_HDLC=m CONFIG_N_GSM=m CONFIG_NOZOMI=m +# CONFIG_NULL_TTY is not set +CONFIG_HVC_DRIVER=y +# CONFIG_SERIAL_DEV_BUS is not set +# CONFIG_TTY_PRINTK is not set CONFIG_PRINTER=m +# CONFIG_LP_CONSOLE is not set CONFIG_PPDEV=m CONFIG_VIRTIO_CONSOLE=y CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +CONFIG_IPMI_PLAT_DATA=y CONFIG_IPMI_PANIC_EVENT=y CONFIG_IPMI_PANIC_STRING=y CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m CONFIG_IPMI_SSIF=m CONFIG_IPMI_WATCHDOG=m CONFIG_IPMI_POWEROFF=m CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_TIMERIOMEM=m +# CONFIG_HW_RANDOM_BA431 is not set CONFIG_HW_RANDOM_VIRTIO=m +# CONFIG_HW_RANDOM_CCTRNG is not set +# CONFIG_HW_RANDOM_XIPHERA is not set +# CONFIG_APPLICOM is not set +CONFIG_DEVMEM=y +CONFIG_DEVPORT=y +CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=m +# CONFIG_TCG_TIS is not set CONFIG_TCG_TIS_SPI=m +# CONFIG_TCG_TIS_SPI_CR50 is not set +# CONFIG_TCG_TIS_I2C is not set +# CONFIG_TCG_TIS_I2C_CR50 is not set CONFIG_TCG_TIS_I2C_ATMEL=m CONFIG_TCG_TIS_I2C_INFINEON=m CONFIG_TCG_TIS_I2C_NUVOTON=m CONFIG_TCG_ATMEL=m CONFIG_TCG_INFINEON=m +CONFIG_TCG_CRB=y +# CONFIG_TCG_VTPM_PROXY is not set +CONFIG_TCG_TIS_ST33ZP24=m CONFIG_TCG_TIS_ST33ZP24_I2C=m CONFIG_TCG_TIS_ST33ZP24_SPI=m +# CONFIG_XILLYBUS is not set +# CONFIG_XILLYUSB is not set +# end of Character devices + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y CONFIG_I2C_CHARDEV=y +# CONFIG_I2C_MUX is not set +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_SMBUS=m +CONFIG_I2C_ALGOBIT=y +CONFIG_I2C_ALGOPCA=m + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set CONFIG_I2C_AMD756=m CONFIG_I2C_AMD8111=m +# CONFIG_I2C_AMD_MP2 is not set +# CONFIG_I2C_I801 is not set CONFIG_I2C_ISCH=m CONFIG_I2C_PIIX4=y CONFIG_I2C_NFORCE2=m +# CONFIG_I2C_NVIDIA_GPU is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set CONFIG_I2C_SIS96X=m CONFIG_I2C_VIA=m CONFIG_I2C_VIAPRO=m +# CONFIG_I2C_ZHAOXIN is not set + +# +# ACPI drivers +# CONFIG_I2C_SCMI=m +# CONFIG_I2C_ZHAOXIN_SMBUS is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CBUS_GPIO is not set +CONFIG_I2C_DESIGNWARE_CORE=y +# CONFIG_I2C_DESIGNWARE_SLAVE is not set CONFIG_I2C_DESIGNWARE_PLATFORM=y +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set CONFIG_I2C_GPIO=y +# CONFIG_I2C_GPIO_FAULT_INJECTOR is not set CONFIG_I2C_LS2X=m +# CONFIG_I2C_OCORES is not set CONFIG_I2C_PCA_PLATFORM=m +# CONFIG_I2C_RK3X is not set CONFIG_I2C_SIMTEC=m +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# CONFIG_I2C_DIOLAN_U2C=m +# CONFIG_I2C_CP2615 is not set CONFIG_I2C_PARPORT=m +# CONFIG_I2C_PCI1XXXX is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set CONFIG_I2C_TINY_USB=m CONFIG_I2C_VIPERBOARD=m + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_VIRTIO is not set +# end of I2C Hardware Bus support + CONFIG_I2C_STUB=m +# CONFIG_I2C_SLAVE is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + +# CONFIG_I3C is not set CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +CONFIG_SPI_MEM=y + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_BUTTERFLY is not set +# CONFIG_SPI_CADENCE is not set +# CONFIG_SPI_CADENCE_XSPI is not set +# CONFIG_SPI_DESIGNWARE is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_LM70_LLP is not set +CONFIG_SPI_LOONGSON_CORE=y CONFIG_SPI_LOONGSON_PCI=y CONFIG_SPI_LOONGSON_PLATFORM=m +# CONFIG_SPI_FSL_SPI is not set +# CONFIG_SPI_MICROCHIP_CORE is not set +# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PCI1XXXX is not set +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_SIFIVE is not set +# CONFIG_SPI_SN_F_OSPI is not set +# CONFIG_SPI_MXIC is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_ZYNQMP_GQSPI is not set +# CONFIG_SPI_AMD is not set + +# +# SPI Multiplexer support +# +# CONFIG_SPI_MUX is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +CONFIG_SPI_DYNAMIC=y +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set CONFIG_PPS_CLIENT_LDISC=m CONFIG_PPS_CLIENT_PARPORT=m CONFIG_PPS_CLIENT_GPIO=m + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +CONFIG_PTP_1588_CLOCK_OPTIONAL=y CONFIG_DP83640_PHY=m +# CONFIG_PTP_1588_CLOCK_INES is not set +# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set +# CONFIG_PTP_1588_CLOCK_IDTCM is not set +# CONFIG_PTP_1588_CLOCK_MOCK is not set +# CONFIG_PTP_1588_CLOCK_OCP is not set +# end of PTP clock support + CONFIG_PINCTRL=y +CONFIG_PINMUX=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_CY8C95X0 is not set CONFIG_PINCTRL_LOONGSON2=y +# CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_MICROCHIP_SGPIO is not set +# CONFIG_PINCTRL_OCELOT is not set +# CONFIG_PINCTRL_SINGLE is not set +# CONFIG_PINCTRL_STMFX is not set +# CONFIG_PINCTRL_SX150X is not set + +# +# Renesas pinctrl drivers +# +# end of Renesas pinctrl drivers + +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_OF_GPIO=y +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_CDEV=y +CONFIG_GPIO_CDEV_V1=y +CONFIG_GPIO_GENERIC=y + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_74XX_MMIO is not set +# CONFIG_GPIO_ALTERA is not set CONFIG_GPIO_AMDPT=m +# CONFIG_GPIO_CADENCE is not set +# CONFIG_GPIO_DWAPB is not set +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_FTGPIO010 is not set +# CONFIG_GPIO_GENERIC_PLATFORM is not set +# CONFIG_GPIO_GRGPIO is not set +# CONFIG_GPIO_HLWD is not set +# CONFIG_GPIO_LOGICVC is not set CONFIG_GPIO_LOONGSON_64BIT=y +# CONFIG_GPIO_MB86S7X is not set +# CONFIG_GPIO_SIFIVE is not set +# CONFIG_GPIO_SYSCON is not set +# CONFIG_GPIO_XILINX is not set +# CONFIG_GPIO_AMD_FCH is not set +# end of Memory mapped GPIO drivers + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADNP is not set +# CONFIG_GPIO_FXL6408 is not set +# CONFIG_GPIO_DS4520 is not set +# CONFIG_GPIO_GW_PLD is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCA9570 is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set +# end of I2C GPIO expanders + +# +# MFD GPIO expanders +# +# end of MFD GPIO expanders + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set +# end of PCI GPIO expanders + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_74X164 is not set +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set +# end of SPI GPIO expanders + +# +# USB GPIO expanders +# CONFIG_GPIO_VIPERBOARD=m +# end of USB GPIO expanders + +# +# Virtual GPIO drivers +# +# CONFIG_GPIO_AGGREGATOR is not set +# CONFIG_GPIO_LATCH is not set +# CONFIG_GPIO_MOCKUP is not set +# CONFIG_GPIO_VIRTIO is not set +# CONFIG_GPIO_SIM is not set +# end of Virtual GPIO drivers + +# CONFIG_W1 is not set CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_GPIO is not set +# CONFIG_POWER_RESET_GPIO_RESTART is not set +# CONFIG_POWER_RESET_LTC2952 is not set +# CONFIG_POWER_RESET_RESTART is not set +# CONFIG_POWER_RESET_SYSCON is not set +# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set +# CONFIG_SYSCON_REBOOT_MODE is not set +# CONFIG_NVMEM_REBOOT_MODE is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_POWER_SUPPLY_HWMON=y +# CONFIG_GENERIC_ADC_BATTERY is not set +# CONFIG_IP5XXX_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_CW2015 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SAMSUNG_SDI is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_LT3651 is not set +# CONFIG_CHARGER_LTC4162L is not set +# CONFIG_CHARGER_DETECTOR_MAX14656 is not set +# CONFIG_CHARGER_MAX77976 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ2515X is not set +# CONFIG_CHARGER_BQ25890 is not set +# CONFIG_CHARGER_BQ25980 is not set +# CONFIG_CHARGER_BQ256XX is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_BATTERY_GOLDFISH is not set +# CONFIG_BATTERY_RT5033 is not set +# CONFIG_CHARGER_RT9455 is not set +# CONFIG_CHARGER_BD99954 is not set +# CONFIG_BATTERY_UG3105 is not set +CONFIG_HWMON=y +CONFIG_HWMON_VID=m +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_AD7314 is not set CONFIG_SENSORS_AD7414=m CONFIG_SENSORS_AD7418=m CONFIG_SENSORS_ADM1025=m CONFIG_SENSORS_ADM1026=m CONFIG_SENSORS_ADM1029=m CONFIG_SENSORS_ADM1031=m +# CONFIG_SENSORS_ADM1177 is not set CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADT7X10=m +# CONFIG_SENSORS_ADT7310 is not set CONFIG_SENSORS_ADT7410=m CONFIG_SENSORS_ADT7411=m CONFIG_SENSORS_ADT7462=m CONFIG_SENSORS_ADT7470=m CONFIG_SENSORS_ADT7475=m +# CONFIG_SENSORS_AHT10 is not set +# CONFIG_SENSORS_AQUACOMPUTER_D5NEXT is not set +# CONFIG_SENSORS_AS370 is not set CONFIG_SENSORS_ASC7621=m +# CONFIG_SENSORS_AXI_FAN_CONTROL is not set CONFIG_SENSORS_ATXP1=m +# CONFIG_SENSORS_CORSAIR_CPRO is not set +# CONFIG_SENSORS_CORSAIR_PSU is not set +# CONFIG_SENSORS_DRIVETEMP is not set CONFIG_SENSORS_DS620=m CONFIG_SENSORS_DS1621=m CONFIG_SENSORS_I5K_AMB=m CONFIG_SENSORS_F71805F=m CONFIG_SENSORS_F71882FG=m CONFIG_SENSORS_F75375S=m +# CONFIG_SENSORS_FTSTEUTATES is not set CONFIG_SENSORS_GL518SM=m CONFIG_SENSORS_GL520SM=m CONFIG_SENSORS_G760A=m +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_GPIO_FAN is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_HS3001 is not set CONFIG_SENSORS_IBMAEM=m CONFIG_SENSORS_IBMPEX=m +# CONFIG_SENSORS_IIO_HWMON is not set CONFIG_SENSORS_IT87=m CONFIG_SENSORS_JC42=m +# CONFIG_SENSORS_POWR1220 is not set CONFIG_SENSORS_LINEAGE=m +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2947_I2C is not set +# CONFIG_SENSORS_LTC2947_SPI is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC2992 is not set CONFIG_SENSORS_LTC4151=m CONFIG_SENSORS_LTC4215=m +# CONFIG_SENSORS_LTC4222 is not set CONFIG_SENSORS_LTC4245=m +# CONFIG_SENSORS_LTC4260 is not set CONFIG_SENSORS_LTC4261=m +# CONFIG_SENSORS_MAX1111 is not set +# CONFIG_SENSORS_MAX127 is not set CONFIG_SENSORS_MAX16065=m CONFIG_SENSORS_MAX1619=m CONFIG_SENSORS_MAX1668=m CONFIG_SENSORS_MAX197=m +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX31730 is not set +# CONFIG_SENSORS_MAX31760 is not set +# CONFIG_MAX31827 is not set +# CONFIG_SENSORS_MAX6620 is not set +# CONFIG_SENSORS_MAX6621 is not set CONFIG_SENSORS_MAX6639=m CONFIG_SENSORS_MAX6650=m CONFIG_SENSORS_MAX6697=m +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MC34VR500 is not set CONFIG_SENSORS_MCP3021=m +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_TPS23861 is not set +# CONFIG_SENSORS_MR75203 is not set +# CONFIG_SENSORS_ADCXX is not set CONFIG_SENSORS_LM63=m +# CONFIG_SENSORS_LM70 is not set CONFIG_SENSORS_LM73=m CONFIG_SENSORS_LM75=m CONFIG_SENSORS_LM77=m @@ -1237,72 +3950,322 @@ CONFIG_SENSORS_LM95245=m CONFIG_SENSORS_PC87360=m CONFIG_SENSORS_PC87427=m CONFIG_SENSORS_NTC_THERMISTOR=m +# CONFIG_SENSORS_NCT6683 is not set +CONFIG_SENSORS_NCT6775_CORE=m CONFIG_SENSORS_NCT6775=m +# CONFIG_SENSORS_NCT6775_I2C is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_NZXT_KRAKEN2 is not set +# CONFIG_SENSORS_NZXT_SMART2 is not set +# CONFIG_SENSORS_OCC_P8_I2C is not set CONFIG_SENSORS_PCF8591=m CONFIG_PMBUS=m +CONFIG_SENSORS_PMBUS=m +# CONFIG_SENSORS_ACBEL_FSG032 is not set +# CONFIG_SENSORS_ADM1266 is not set CONFIG_SENSORS_ADM1275=m +# CONFIG_SENSORS_BEL_PFE is not set +# CONFIG_SENSORS_BPA_RS600 is not set +# CONFIG_SENSORS_DELTA_AHE50DC_FAN is not set +# CONFIG_SENSORS_FSP_3Y is not set +# CONFIG_SENSORS_IBM_CFFPS is not set +# CONFIG_SENSORS_DPS920AB is not set +# CONFIG_SENSORS_INSPUR_IPSPS is not set +# CONFIG_SENSORS_IR35221 is not set +# CONFIG_SENSORS_IR36021 is not set +# CONFIG_SENSORS_IR38064 is not set +# CONFIG_SENSORS_IRPS5401 is not set +# CONFIG_SENSORS_ISL68137 is not set CONFIG_SENSORS_LM25066=m +# CONFIG_SENSORS_LT7182S is not set CONFIG_SENSORS_LTC2978=m +# CONFIG_SENSORS_LTC3815 is not set +# CONFIG_SENSORS_MAX15301 is not set CONFIG_SENSORS_MAX16064=m +# CONFIG_SENSORS_MAX16601 is not set +# CONFIG_SENSORS_MAX20730 is not set +# CONFIG_SENSORS_MAX20751 is not set +# CONFIG_SENSORS_MAX31785 is not set CONFIG_SENSORS_MAX34440=m CONFIG_SENSORS_MAX8688=m +# CONFIG_SENSORS_MP2888 is not set +# CONFIG_SENSORS_MP2975 is not set +# CONFIG_SENSORS_MP5023 is not set +# CONFIG_SENSORS_MPQ7932 is not set +# CONFIG_SENSORS_PIM4328 is not set +# CONFIG_SENSORS_PLI1209BC is not set +# CONFIG_SENSORS_PM6764TR is not set +# CONFIG_SENSORS_PXE1610 is not set +# CONFIG_SENSORS_Q54SJ108A2 is not set +# CONFIG_SENSORS_STPDDC60 is not set +# CONFIG_SENSORS_TDA38640 is not set +# CONFIG_SENSORS_TPS40422 is not set +# CONFIG_SENSORS_TPS53679 is not set +# CONFIG_SENSORS_TPS546D24 is not set CONFIG_SENSORS_UCD9000=m CONFIG_SENSORS_UCD9200=m +# CONFIG_SENSORS_XDPE152 is not set +# CONFIG_SENSORS_XDPE122 is not set CONFIG_SENSORS_ZL6100=m +# CONFIG_SENSORS_PWM_FAN is not set +# CONFIG_SENSORS_SBTSI is not set +# CONFIG_SENSORS_SBRMI is not set CONFIG_SENSORS_SHT15=m CONFIG_SENSORS_SHT21=m +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHT4x is not set +# CONFIG_SENSORS_SHTC1 is not set CONFIG_SENSORS_SIS5595=m CONFIG_SENSORS_DME1737=m CONFIG_SENSORS_EMC1403=m +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC2305 is not set CONFIG_SENSORS_EMC6W201=m CONFIG_SENSORS_SMSC47M1=m CONFIG_SENSORS_SMSC47M192=m CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SCH56XX_COMMON=m CONFIG_SENSORS_SCH5627=m CONFIG_SENSORS_SCH5636=m +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_ADC128D818 is not set CONFIG_SENSORS_ADS7828=m +# CONFIG_SENSORS_ADS7871 is not set CONFIG_SENSORS_AMC6821=m CONFIG_SENSORS_INA209=m CONFIG_SENSORS_INA2XX=m +# CONFIG_SENSORS_INA238 is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set CONFIG_SENSORS_THMC50=m CONFIG_SENSORS_TMP102=m +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set CONFIG_SENSORS_TMP401=m CONFIG_SENSORS_TMP421=m +# CONFIG_SENSORS_TMP464 is not set +# CONFIG_SENSORS_TMP513 is not set CONFIG_SENSORS_VIA686A=m CONFIG_SENSORS_VT1211=m CONFIG_SENSORS_VT8231=m +# CONFIG_SENSORS_W83773G is not set CONFIG_SENSORS_W83781D=m CONFIG_SENSORS_W83791D=m CONFIG_SENSORS_W83792D=m CONFIG_SENSORS_W83793=m CONFIG_SENSORS_W83795=m +# CONFIG_SENSORS_W83795_FANCTRL is not set CONFIG_SENSORS_W83L785TS=m CONFIG_SENSORS_W83L786NG=m CONFIG_SENSORS_W83627HF=m CONFIG_SENSORS_W83627EHF=m + +# +# ACPI drivers +# CONFIG_SENSORS_ACPI_POWER=m +CONFIG_THERMAL=y +# CONFIG_THERMAL_NETLINK is not set +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_OF=y +# CONFIG_THERMAL_WRITABLE_TRIPS is not set +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +# CONFIG_THERMAL_GOV_BANG_BANG is not set +# CONFIG_THERMAL_GOV_USER_SPACE is not set +# CONFIG_CPU_THERMAL is not set +# CONFIG_DEVFREQ_THERMAL is not set CONFIG_THERMAL_EMULATION=y +# CONFIG_THERMAL_MMIO is not set +# CONFIG_GENERIC_ADC_THERMAL is not set CONFIG_LOONGSON2_THERMAL=m CONFIG_WATCHDOG=y CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +CONFIG_WATCHDOG_OPEN_TIMEOUT=0 CONFIG_WATCHDOG_SYSFS=y +# CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT is not set + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set + +# +# Watchdog Device Drivers +# CONFIG_SOFT_WATCHDOG=m CONFIG_GPIO_WATCHDOG=m CONFIG_WDAT_WDT=m +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_ZIIRAVE_WATCHDOG is not set +# CONFIG_CADENCE_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set CONFIG_ALIM7101_WDT=m CONFIG_I6300ESB_WDT=m +# CONFIG_MEN_A21_WDT is not set + +# +# PCI-based Watchdog Cards +# CONFIG_PCIPCWATCHDOG=m CONFIG_WDTPCI=m + +# +# USB-based Watchdog Cards +# CONFIG_USBPCWATCHDOG=m +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +CONFIG_BCMA=m +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +CONFIG_BCMA_DRIVER_PCI=y CONFIG_BCMA_DRIVER_GMAC_CMN=y CONFIG_BCMA_DRIVER_GPIO=y +# CONFIG_BCMA_DEBUG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=y +# CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_AS3711 is not set +# CONFIG_MFD_SMPRO is not set +# CONFIG_MFD_AS3722 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set +# CONFIG_MFD_ATMEL_FLEXCOM is not set +# CONFIG_MFD_ATMEL_HLCDC is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_CS42L43_I2C is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_MFD_MAX5970 is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_GATEWORKS_GSC is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_MP2629 is not set +# CONFIG_MFD_HI6421_PMIC is not set +# CONFIG_LPC_ICH is not set +CONFIG_LPC_SCH=m +# CONFIG_MFD_IQS62X is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77541 is not set +# CONFIG_MFD_MAX77620 is not set +# CONFIG_MFD_MAX77650 is not set +# CONFIG_MFD_MAX77686 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77714 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6360 is not set +# CONFIG_MFD_MT6370 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_MFD_OCELOT is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_CPCAP is not set CONFIG_MFD_VIPERBOARD=m +# CONFIG_MFD_NTXEC is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_SY7636A is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT4831 is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RT5120 is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_RK8XX_I2C is not set +# CONFIG_MFD_RK8XX_SPI is not set +# CONFIG_MFD_RN5T618 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set CONFIG_MFD_SM501=m CONFIG_MFD_SM501_GPIO=y +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_STMPE is not set +CONFIG_MFD_SYSCON=y +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TI_LP87565 is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS65219 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS6594_I2C is not set +# CONFIG_MFD_TPS6594_SPI is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TQMX86 is not set CONFIG_MFD_VX855=m +# CONFIG_MFD_LOCHNAGAR is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_ROHM_BD718XX is not set +# CONFIG_MFD_ROHM_BD71828 is not set +# CONFIG_MFD_ROHM_BD957XMUF is not set +# CONFIG_MFD_STPMIC1 is not set +# CONFIG_MFD_STMFX is not set +# CONFIG_MFD_ATC260X_I2C is not set +# CONFIG_MFD_QCOM_PM8008 is not set +# CONFIG_MFD_INTEL_M10_BMC_SPI is not set +# CONFIG_MFD_RSMU_I2C is not set +# CONFIG_MFD_RSMU_SPI is not set +# end of Multifunction device drivers + +# CONFIG_REGULATOR is not set CONFIG_RC_CORE=m CONFIG_LIRC=y +CONFIG_RC_MAP=m CONFIG_RC_DECODERS=y CONFIG_IR_IMON_DECODER=m CONFIG_IR_JVC_DECODER=m @@ -1310,6 +4273,7 @@ CONFIG_IR_MCE_KBD_DECODER=m CONFIG_IR_NEC_DECODER=m CONFIG_IR_RC5_DECODER=m CONFIG_IR_RC6_DECODER=m +# CONFIG_IR_RCMM_DECODER is not set CONFIG_IR_SANYO_DECODER=m CONFIG_IR_SHARP_DECODER=m CONFIG_IR_SONY_DECODER=m @@ -1317,31 +4281,114 @@ CONFIG_IR_XMP_DECODER=m CONFIG_RC_DEVICES=y CONFIG_IR_ENE=m CONFIG_IR_FINTEK=m +# CONFIG_IR_GPIO_CIR is not set +# CONFIG_IR_GPIO_TX is not set +# CONFIG_IR_HIX5HD2 is not set +# CONFIG_IR_IGORPLUGUSB is not set CONFIG_IR_IGUANA=m CONFIG_IR_IMON=m CONFIG_IR_IMON_RAW=m CONFIG_IR_ITE_CIR=m CONFIG_IR_MCEUSB=m CONFIG_IR_NUVOTON=m +# CONFIG_IR_PWM_TX is not set CONFIG_IR_REDRAT3=m CONFIG_IR_SERIAL=m CONFIG_IR_SERIAL_TRANSMITTER=y +# CONFIG_IR_SPI is not set CONFIG_IR_STREAMZAP=m +# CONFIG_IR_TOY is not set CONFIG_IR_TTUSBIR=m CONFIG_RC_ATI_REMOTE=m +# CONFIG_RC_LOOPBACK is not set +# CONFIG_RC_XBOX_DVD is not set +CONFIG_CEC_CORE=m + +# +# CEC support +# +# CONFIG_MEDIA_CEC_RC is not set +CONFIG_MEDIA_CEC_SUPPORT=y +# CONFIG_CEC_CH7322 is not set CONFIG_USB_PULSE8_CEC=m CONFIG_USB_RAINSHADOW_CEC=m +# end of CEC support + CONFIG_MEDIA_SUPPORT=m -CONFIG_DVB_MAX_ADAPTERS=8 -CONFIG_MEDIA_USB_SUPPORT=y -CONFIG_USB_GSPCA=m +# CONFIG_MEDIA_SUPPORT_FILTER is not set +# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set + +# +# Media device types +# +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_ANALOG_TV_SUPPORT=y +CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y +CONFIG_MEDIA_RADIO_SUPPORT=y +CONFIG_MEDIA_SDR_SUPPORT=y +CONFIG_MEDIA_PLATFORM_SUPPORT=y +CONFIG_MEDIA_TEST_SUPPORT=y +# end of Media device types + +# +# Media core support +# +CONFIG_VIDEO_DEV=m +CONFIG_MEDIA_CONTROLLER=y +CONFIG_DVB_CORE=m +# end of Media core support + +# +# Video4Linux options +# +CONFIG_VIDEO_V4L2_I2C=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +# CONFIG_VIDEO_ADV_DEBUG is not set +# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set +CONFIG_VIDEO_TUNER=m +CONFIG_V4L2_FWNODE=m +CONFIG_V4L2_ASYNC=m +# end of Video4Linux options + +# +# Media controller options +# +CONFIG_MEDIA_CONTROLLER_DVB=y +# end of Media controller options + +# +# Digital TV options +# +# CONFIG_DVB_MMAP is not set +CONFIG_DVB_NET=y +CONFIG_DVB_MAX_ADAPTERS=8 +CONFIG_DVB_DYNAMIC_MINORS=y +# CONFIG_DVB_DEMUX_SECTION_LOSS_LOG is not set +# CONFIG_DVB_ULE_DEBUG is not set +# end of Digital TV options + +# +# Media drivers +# + +# +# Media drivers +# +CONFIG_MEDIA_USB_SUPPORT=y + +# +# Webcam devices +# +CONFIG_USB_GSPCA=m CONFIG_USB_GSPCA_BENQ=m CONFIG_USB_GSPCA_CONEX=m CONFIG_USB_GSPCA_CPIA1=m +# CONFIG_USB_GSPCA_DTCS033 is not set CONFIG_USB_GSPCA_ETOMS=m CONFIG_USB_GSPCA_FINEPIX=m CONFIG_USB_GSPCA_JEILINJ=m CONFIG_USB_GSPCA_JL2005BCD=m +# CONFIG_USB_GSPCA_KINECT is not set CONFIG_USB_GSPCA_KONICA=m CONFIG_USB_GSPCA_MARS=m CONFIG_USB_GSPCA_MR97310A=m @@ -1368,10 +4415,12 @@ CONFIG_USB_GSPCA_SQ905=m CONFIG_USB_GSPCA_SQ905C=m CONFIG_USB_GSPCA_SQ930X=m CONFIG_USB_GSPCA_STK014=m +# CONFIG_USB_GSPCA_STK1135 is not set CONFIG_USB_GSPCA_STV0680=m CONFIG_USB_GSPCA_SUNPLUS=m CONFIG_USB_GSPCA_T613=m CONFIG_USB_GSPCA_TOPRO=m +# CONFIG_USB_GSPCA_TOUPTEK is not set CONFIG_USB_GSPCA_TV8532=m CONFIG_USB_GSPCA_VC032X=m CONFIG_USB_GSPCA_VICAM=m @@ -1381,31 +4430,62 @@ CONFIG_USB_GL860=m CONFIG_USB_M5602=m CONFIG_USB_STV06XX=m CONFIG_USB_PWC=m +# CONFIG_USB_PWC_DEBUG is not set +CONFIG_USB_PWC_INPUT_EVDEV=y CONFIG_USB_S2255=m +# CONFIG_VIDEO_USBTV is not set CONFIG_USB_VIDEO_CLASS=m +CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y + +# +# Analog TV USB devices +# +# CONFIG_VIDEO_GO7007 is not set CONFIG_VIDEO_HDPVR=m CONFIG_VIDEO_PVRUSB2=m +CONFIG_VIDEO_PVRUSB2_SYSFS=y +CONFIG_VIDEO_PVRUSB2_DVB=y +# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set +# CONFIG_VIDEO_STK1160 is not set + +# +# Analog/digital TV USB devices +# CONFIG_VIDEO_AU0828=m +CONFIG_VIDEO_AU0828_V4L2=y +# CONFIG_VIDEO_AU0828_RC is not set + +# +# Digital TV USB devices +# +# CONFIG_DVB_AS102 is not set CONFIG_DVB_B2C2_FLEXCOP_USB=m +# CONFIG_DVB_B2C2_FLEXCOP_USB_DEBUG is not set CONFIG_DVB_USB_V2=m CONFIG_DVB_USB_AF9035=m CONFIG_DVB_USB_ANYSEE=m CONFIG_DVB_USB_AU6610=m CONFIG_DVB_USB_AZ6007=m CONFIG_DVB_USB_CE6230=m +# CONFIG_DVB_USB_DVBSKY is not set CONFIG_DVB_USB_EC168=m CONFIG_DVB_USB_GL861=m CONFIG_DVB_USB_LME2510=m CONFIG_DVB_USB_MXL111SF=m +# CONFIG_DVB_USB_ZD1301 is not set CONFIG_DVB_USB=m +# CONFIG_DVB_USB_DEBUG is not set CONFIG_DVB_USB_A800=m CONFIG_DVB_USB_AF9005=m CONFIG_DVB_USB_AF9005_REMOTE=m CONFIG_DVB_USB_AZ6027=m CONFIG_DVB_USB_CINERGY_T2=m CONFIG_DVB_USB_CXUSB=m +# CONFIG_DVB_USB_CXUSB_ANALOG is not set CONFIG_DVB_USB_DIB0700=m +CONFIG_DVB_USB_DIB3000MC=m CONFIG_DVB_USB_DIBUSB_MB=m +# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set CONFIG_DVB_USB_DIBUSB_MC=m CONFIG_DVB_USB_DIGITV=m CONFIG_DVB_USB_DTT200U=m @@ -1424,103 +4504,1042 @@ CONFIG_DVB_USB_VP7045=m CONFIG_SMS_USB_DRV=m CONFIG_DVB_TTUSB_BUDGET=m CONFIG_DVB_TTUSB_DEC=m + +# +# Webcam, TV (analog/digital) USB devices +# CONFIG_VIDEO_EM28XX=m +# CONFIG_VIDEO_EM28XX_V4L2 is not set CONFIG_VIDEO_EM28XX_ALSA=m CONFIG_VIDEO_EM28XX_DVB=m +CONFIG_VIDEO_EM28XX_RC=m + +# +# Software defined radio USB devices +# +# CONFIG_USB_AIRSPY is not set +# CONFIG_USB_HACKRF is not set +# CONFIG_USB_MSI2500 is not set CONFIG_MEDIA_PCI_SUPPORT=y + +# +# Media capture support +# +# CONFIG_VIDEO_SOLO6X10 is not set +# CONFIG_VIDEO_TW5864 is not set +# CONFIG_VIDEO_TW68 is not set +# CONFIG_VIDEO_TW686X is not set +# CONFIG_VIDEO_ZORAN is not set + +# +# Media capture/analog TV support +# +# CONFIG_VIDEO_DT3155 is not set CONFIG_VIDEO_IVTV=m +# CONFIG_VIDEO_IVTV_ALSA is not set CONFIG_VIDEO_FB_IVTV=m +# CONFIG_VIDEO_HEXIUM_GEMINI is not set +# CONFIG_VIDEO_HEXIUM_ORION is not set +# CONFIG_VIDEO_MXB is not set + +# +# Media capture/analog/hybrid TV support +# CONFIG_VIDEO_BT848=m CONFIG_DVB_BT8XX=m CONFIG_VIDEO_CX18=m +# CONFIG_VIDEO_CX18_ALSA is not set CONFIG_VIDEO_CX23885=m CONFIG_MEDIA_ALTERA_CI=m +# CONFIG_VIDEO_CX25821 is not set CONFIG_VIDEO_CX88=m CONFIG_VIDEO_CX88_ALSA=m CONFIG_VIDEO_CX88_BLACKBIRD=m CONFIG_VIDEO_CX88_DVB=m # CONFIG_VIDEO_CX88_ENABLE_VP3054 is not set +CONFIG_VIDEO_CX88_MPEG=m CONFIG_VIDEO_SAA7134=m CONFIG_VIDEO_SAA7134_ALSA=m +CONFIG_VIDEO_SAA7134_RC=y CONFIG_VIDEO_SAA7134_DVB=m CONFIG_VIDEO_SAA7164=m + +# +# Media digital TV PCI Adapters +# CONFIG_DVB_B2C2_FLEXCOP_PCI=m +# CONFIG_DVB_B2C2_FLEXCOP_PCI_DEBUG is not set CONFIG_DVB_DDBRIDGE=m +# CONFIG_DVB_DDBRIDGE_MSIENABLE is not set CONFIG_DVB_DM1105=m CONFIG_MANTIS_CORE=m CONFIG_DVB_MANTIS=m CONFIG_DVB_HOPPER=m +# CONFIG_DVB_NETUP_UNIDVB is not set CONFIG_DVB_NGENE=m CONFIG_DVB_PLUTO2=m CONFIG_DVB_PT1=m +# CONFIG_DVB_PT3 is not set +# CONFIG_DVB_SMIPCIE is not set CONFIG_DVB_BUDGET_CORE=m CONFIG_DVB_BUDGET=m CONFIG_DVB_BUDGET_CI=m CONFIG_DVB_BUDGET_AV=m +# CONFIG_IPU_BRIDGE is not set +CONFIG_RADIO_ADAPTERS=m +# CONFIG_RADIO_MAXIRADIO is not set +# CONFIG_RADIO_SAA7706H is not set +# CONFIG_RADIO_SHARK is not set +# CONFIG_RADIO_SHARK2 is not set +# CONFIG_RADIO_SI4713 is not set +CONFIG_RADIO_TEA575X=m +# CONFIG_RADIO_TEA5764 is not set +# CONFIG_RADIO_TEF6862 is not set +# CONFIG_RADIO_WL1273 is not set +# CONFIG_USB_DSBR is not set +# CONFIG_USB_KEENE is not set +# CONFIG_USB_MA901 is not set +# CONFIG_USB_MR800 is not set +# CONFIG_USB_RAREMONO is not set +# CONFIG_RADIO_SI470X is not set +CONFIG_MEDIA_PLATFORM_DRIVERS=y +# CONFIG_V4L_PLATFORM_DRIVERS is not set +# CONFIG_SDR_PLATFORM_DRIVERS is not set +# CONFIG_DVB_PLATFORM_DRIVERS is not set +# CONFIG_V4L_MEM2MEM_DRIVERS is not set + +# +# Allegro DVT media platform drivers +# + +# +# Amlogic media platform drivers +# + +# +# Amphion drivers +# + +# +# Aspeed media platform drivers +# + +# +# Atmel media platform drivers +# + +# +# Cadence media platform drivers +# +# CONFIG_VIDEO_CADENCE_CSI2RX is not set +# CONFIG_VIDEO_CADENCE_CSI2TX is not set + +# +# Chips&Media media platform drivers +# + +# +# Intel media platform drivers +# + +# +# Marvell media platform drivers +# + +# +# Mediatek media platform drivers +# + +# +# Microchip Technology, Inc. media platform drivers +# + +# +# NVidia media platform drivers +# + +# +# NXP media platform drivers +# + +# +# Qualcomm media platform drivers +# + +# +# Renesas media platform drivers +# + +# +# Rockchip media platform drivers +# + +# +# Samsung media platform drivers +# + +# +# STMicroelectronics media platform drivers +# + +# +# Sunxi media platform drivers +# + +# +# Texas Instruments drivers +# + +# +# Verisilicon media platform drivers +# + +# +# VIA media platform drivers +# + +# +# Xilinx media platform drivers +# + +# +# MMC/SDIO DVB adapters +# CONFIG_SMS_SDIO_DRV=m +# CONFIG_V4L_TEST_DRIVERS is not set +# CONFIG_DVB_TEST_DRIVERS is not set + +# +# FireWire (IEEE 1394) Adapters +# CONFIG_DVB_FIREDTV=m +CONFIG_DVB_FIREDTV_INPUT=y +CONFIG_MEDIA_COMMON_OPTIONS=y + +# +# common driver options +# +CONFIG_CYPRESS_FIRMWARE=m +CONFIG_TTPCI_EEPROM=m +CONFIG_UVC_COMMON=m +CONFIG_VIDEO_CX2341X=m +CONFIG_VIDEO_TVEEPROM=m +CONFIG_DVB_B2C2_FLEXCOP=m +CONFIG_VIDEO_SAA7146=m +CONFIG_VIDEO_SAA7146_VV=m +CONFIG_SMS_SIANO_MDTV=m +CONFIG_SMS_SIANO_RC=y +# CONFIG_SMS_SIANO_DEBUGFS is not set +CONFIG_VIDEOBUF2_CORE=m +CONFIG_VIDEOBUF2_V4L2=m +CONFIG_VIDEOBUF2_MEMOPS=m +CONFIG_VIDEOBUF2_VMALLOC=m +CONFIG_VIDEOBUF2_DMA_SG=m +CONFIG_VIDEOBUF2_DVB=m +# end of Media drivers + +# +# Media ancillary drivers +# +CONFIG_MEDIA_ATTACH=y +CONFIG_VIDEO_IR_I2C=m +CONFIG_VIDEO_CAMERA_SENSOR=y +# CONFIG_VIDEO_AR0521 is not set +# CONFIG_VIDEO_HI556 is not set +# CONFIG_VIDEO_HI846 is not set +# CONFIG_VIDEO_HI847 is not set +# CONFIG_VIDEO_IMX208 is not set +# CONFIG_VIDEO_IMX214 is not set +# CONFIG_VIDEO_IMX219 is not set +# CONFIG_VIDEO_IMX258 is not set +# CONFIG_VIDEO_IMX274 is not set +# CONFIG_VIDEO_IMX290 is not set +# CONFIG_VIDEO_IMX296 is not set +# CONFIG_VIDEO_IMX319 is not set +# CONFIG_VIDEO_IMX334 is not set +# CONFIG_VIDEO_IMX335 is not set +# CONFIG_VIDEO_IMX355 is not set +# CONFIG_VIDEO_IMX412 is not set +# CONFIG_VIDEO_IMX415 is not set +# CONFIG_VIDEO_MT9M001 is not set +# CONFIG_VIDEO_MT9M111 is not set +# CONFIG_VIDEO_MT9P031 is not set +# CONFIG_VIDEO_MT9T112 is not set +# CONFIG_VIDEO_MT9V011 is not set +# CONFIG_VIDEO_MT9V032 is not set +# CONFIG_VIDEO_MT9V111 is not set +# CONFIG_VIDEO_OG01A1B is not set +# CONFIG_VIDEO_OV01A10 is not set +# CONFIG_VIDEO_OV02A10 is not set +# CONFIG_VIDEO_OV08D10 is not set +# CONFIG_VIDEO_OV08X40 is not set +# CONFIG_VIDEO_OV13858 is not set +# CONFIG_VIDEO_OV13B10 is not set +# CONFIG_VIDEO_OV2640 is not set +# CONFIG_VIDEO_OV2659 is not set +# CONFIG_VIDEO_OV2680 is not set +# CONFIG_VIDEO_OV2685 is not set +# CONFIG_VIDEO_OV2740 is not set +# CONFIG_VIDEO_OV4689 is not set +# CONFIG_VIDEO_OV5640 is not set +# CONFIG_VIDEO_OV5645 is not set +# CONFIG_VIDEO_OV5647 is not set +# CONFIG_VIDEO_OV5648 is not set +# CONFIG_VIDEO_OV5670 is not set +# CONFIG_VIDEO_OV5675 is not set +# CONFIG_VIDEO_OV5693 is not set +# CONFIG_VIDEO_OV5695 is not set +# CONFIG_VIDEO_OV6650 is not set +# CONFIG_VIDEO_OV7251 is not set +# CONFIG_VIDEO_OV7640 is not set +# CONFIG_VIDEO_OV7670 is not set +# CONFIG_VIDEO_OV772X is not set +# CONFIG_VIDEO_OV7740 is not set +# CONFIG_VIDEO_OV8856 is not set +# CONFIG_VIDEO_OV8858 is not set +# CONFIG_VIDEO_OV8865 is not set +# CONFIG_VIDEO_OV9282 is not set +# CONFIG_VIDEO_OV9640 is not set +# CONFIG_VIDEO_OV9650 is not set +# CONFIG_VIDEO_OV9734 is not set +# CONFIG_VIDEO_RDACM20 is not set +# CONFIG_VIDEO_RDACM21 is not set +# CONFIG_VIDEO_RJ54N1 is not set +# CONFIG_VIDEO_S5C73M3 is not set +# CONFIG_VIDEO_S5K5BAF is not set +# CONFIG_VIDEO_S5K6A3 is not set +# CONFIG_VIDEO_ST_VGXY61 is not set +# CONFIG_VIDEO_CCS is not set +# CONFIG_VIDEO_ET8EK8 is not set + +# +# Lens drivers +# +# CONFIG_VIDEO_AD5820 is not set +# CONFIG_VIDEO_AK7375 is not set +# CONFIG_VIDEO_DW9714 is not set +# CONFIG_VIDEO_DW9719 is not set +# CONFIG_VIDEO_DW9768 is not set +# CONFIG_VIDEO_DW9807_VCM is not set +# end of Lens drivers + +# +# Flash devices +# +# CONFIG_VIDEO_ADP1653 is not set +# CONFIG_VIDEO_LM3560 is not set +# CONFIG_VIDEO_LM3646 is not set +# end of Flash devices + +# +# Audio decoders, processors and mixers +# +CONFIG_VIDEO_CS3308=m +CONFIG_VIDEO_CS5345=m +CONFIG_VIDEO_CS53L32A=m +CONFIG_VIDEO_MSP3400=m +# CONFIG_VIDEO_SONY_BTF_MPX is not set +# CONFIG_VIDEO_TDA1997X is not set +# CONFIG_VIDEO_TDA7432 is not set +# CONFIG_VIDEO_TDA9840 is not set +# CONFIG_VIDEO_TEA6415C is not set +# CONFIG_VIDEO_TEA6420 is not set +# CONFIG_VIDEO_TLV320AIC23B is not set +# CONFIG_VIDEO_TVAUDIO is not set +# CONFIG_VIDEO_UDA1342 is not set +CONFIG_VIDEO_VP27SMPX=m +CONFIG_VIDEO_WM8739=m +CONFIG_VIDEO_WM8775=m +# end of Audio decoders, processors and mixers + +# +# RDS decoders +# +# CONFIG_VIDEO_SAA6588 is not set +# end of RDS decoders + +# +# Video decoders +# +# CONFIG_VIDEO_ADV7180 is not set +# CONFIG_VIDEO_ADV7183 is not set +# CONFIG_VIDEO_ADV748X is not set +# CONFIG_VIDEO_ADV7604 is not set +# CONFIG_VIDEO_ADV7842 is not set +# CONFIG_VIDEO_BT819 is not set +# CONFIG_VIDEO_BT856 is not set +# CONFIG_VIDEO_BT866 is not set +# CONFIG_VIDEO_ISL7998X is not set +# CONFIG_VIDEO_KS0127 is not set +# CONFIG_VIDEO_ML86V7667 is not set +# CONFIG_VIDEO_SAA7110 is not set +CONFIG_VIDEO_SAA711X=m +# CONFIG_VIDEO_TC358743 is not set +# CONFIG_VIDEO_TC358746 is not set +# CONFIG_VIDEO_TVP514X is not set +# CONFIG_VIDEO_TVP5150 is not set +# CONFIG_VIDEO_TVP7002 is not set +# CONFIG_VIDEO_TW2804 is not set +# CONFIG_VIDEO_TW9903 is not set +# CONFIG_VIDEO_TW9906 is not set +# CONFIG_VIDEO_TW9910 is not set +# CONFIG_VIDEO_VPX3220 is not set + +# +# Video and audio decoders +# +CONFIG_VIDEO_SAA717X=m +CONFIG_VIDEO_CX25840=m +# end of Video decoders + +# +# Video encoders +# +# CONFIG_VIDEO_ADV7170 is not set +# CONFIG_VIDEO_ADV7175 is not set +# CONFIG_VIDEO_ADV7343 is not set +# CONFIG_VIDEO_ADV7393 is not set +# CONFIG_VIDEO_ADV7511 is not set +# CONFIG_VIDEO_AK881X is not set +CONFIG_VIDEO_SAA7127=m +# CONFIG_VIDEO_SAA7185 is not set +# CONFIG_VIDEO_THS8200 is not set +# end of Video encoders + +# +# Video improvement chips +# +CONFIG_VIDEO_UPD64031A=m +CONFIG_VIDEO_UPD64083=m +# end of Video improvement chips + +# +# Audio/Video compression chips +# +# CONFIG_VIDEO_SAA6752HS is not set +# end of Audio/Video compression chips + +# +# SDR tuner chips +# +# CONFIG_SDR_MAX2175 is not set +# end of SDR tuner chips + +# +# Miscellaneous helper chips +# +# CONFIG_VIDEO_I2C is not set +CONFIG_VIDEO_M52790=m +# CONFIG_VIDEO_ST_MIPID02 is not set +# CONFIG_VIDEO_THS7303 is not set +# end of Miscellaneous helper chips + +# +# Video serializers and deserializers +# +# CONFIG_VIDEO_DS90UB913 is not set +# CONFIG_VIDEO_DS90UB953 is not set +# CONFIG_VIDEO_DS90UB960 is not set +# end of Video serializers and deserializers + +# +# Media SPI Adapters +# +CONFIG_CXD2880_SPI_DRV=m +# CONFIG_VIDEO_GS1662 is not set +# end of Media SPI Adapters + +CONFIG_MEDIA_TUNER=m + +# +# Customize TV tuners +# +CONFIG_MEDIA_TUNER_E4000=m +CONFIG_MEDIA_TUNER_FC0011=m +CONFIG_MEDIA_TUNER_FC0012=m +CONFIG_MEDIA_TUNER_FC0013=m +CONFIG_MEDIA_TUNER_FC2580=m +CONFIG_MEDIA_TUNER_IT913X=m +CONFIG_MEDIA_TUNER_M88RS6000T=m +CONFIG_MEDIA_TUNER_MAX2165=m +CONFIG_MEDIA_TUNER_MC44S803=m +CONFIG_MEDIA_TUNER_MSI001=m +CONFIG_MEDIA_TUNER_MT2060=m +CONFIG_MEDIA_TUNER_MT2063=m +CONFIG_MEDIA_TUNER_MT20XX=m +CONFIG_MEDIA_TUNER_MT2131=m +CONFIG_MEDIA_TUNER_MT2266=m +CONFIG_MEDIA_TUNER_MXL301RF=m +CONFIG_MEDIA_TUNER_MXL5005S=m +CONFIG_MEDIA_TUNER_MXL5007T=m +CONFIG_MEDIA_TUNER_QM1D1B0004=m +CONFIG_MEDIA_TUNER_QM1D1C0042=m +CONFIG_MEDIA_TUNER_QT1010=m +CONFIG_MEDIA_TUNER_R820T=m +CONFIG_MEDIA_TUNER_SI2157=m +CONFIG_MEDIA_TUNER_SIMPLE=m +CONFIG_MEDIA_TUNER_TDA18212=m +CONFIG_MEDIA_TUNER_TDA18218=m +CONFIG_MEDIA_TUNER_TDA18250=m +CONFIG_MEDIA_TUNER_TDA18271=m +CONFIG_MEDIA_TUNER_TDA827X=m +CONFIG_MEDIA_TUNER_TDA8290=m +CONFIG_MEDIA_TUNER_TDA9887=m +CONFIG_MEDIA_TUNER_TEA5761=m +CONFIG_MEDIA_TUNER_TEA5767=m +CONFIG_MEDIA_TUNER_TUA9001=m +CONFIG_MEDIA_TUNER_XC2028=m +CONFIG_MEDIA_TUNER_XC4000=m +CONFIG_MEDIA_TUNER_XC5000=m +# end of Customize TV tuners + +# +# Customise DVB Frontends +# + +# +# Multistandard (satellite) frontends +# +CONFIG_DVB_MXL5XX=m +CONFIG_DVB_STB0899=m +CONFIG_DVB_STB6100=m +CONFIG_DVB_STV090x=m +CONFIG_DVB_STV0910=m +CONFIG_DVB_STV6110x=m +CONFIG_DVB_STV6111=m + +# +# Multistandard (cable + terrestrial) frontends +# +CONFIG_DVB_DRXK=m +CONFIG_DVB_MN88472=m +CONFIG_DVB_MN88473=m +CONFIG_DVB_SI2165=m +CONFIG_DVB_TDA18271C2DD=m + +# +# DVB-S (satellite) frontends +# +CONFIG_DVB_CX24110=m +CONFIG_DVB_CX24116=m +CONFIG_DVB_CX24117=m +CONFIG_DVB_CX24120=m +CONFIG_DVB_CX24123=m +CONFIG_DVB_DS3000=m +CONFIG_DVB_MB86A16=m +CONFIG_DVB_MT312=m +CONFIG_DVB_S5H1420=m +CONFIG_DVB_SI21XX=m +CONFIG_DVB_STB6000=m +CONFIG_DVB_STV0288=m +CONFIG_DVB_STV0299=m +CONFIG_DVB_STV0900=m +CONFIG_DVB_STV6110=m +CONFIG_DVB_TDA10071=m +CONFIG_DVB_TDA10086=m +CONFIG_DVB_TDA8083=m +CONFIG_DVB_TDA8261=m +CONFIG_DVB_TDA826X=m +CONFIG_DVB_TS2020=m +CONFIG_DVB_TUA6100=m +CONFIG_DVB_TUNER_CX24113=m +CONFIG_DVB_TUNER_ITD1000=m +CONFIG_DVB_VES1X93=m +CONFIG_DVB_ZL10036=m +CONFIG_DVB_ZL10039=m + +# +# DVB-T (terrestrial) frontends +# +CONFIG_DVB_CX22700=m +CONFIG_DVB_CX22702=m +CONFIG_DVB_CXD2820R=m +CONFIG_DVB_CXD2841ER=m +CONFIG_DVB_DIB3000MB=m +CONFIG_DVB_DIB3000MC=m +CONFIG_DVB_DIB7000M=m +CONFIG_DVB_DIB7000P=m +CONFIG_DVB_DIB9000=m +CONFIG_DVB_DRXD=m +CONFIG_DVB_EC100=m +CONFIG_DVB_GP8PSK_FE=m +CONFIG_DVB_L64781=m +CONFIG_DVB_MT352=m +CONFIG_DVB_NXT6000=m +CONFIG_DVB_S5H1432=m +CONFIG_DVB_SP887X=m +CONFIG_DVB_STV0367=m +CONFIG_DVB_TDA10048=m +CONFIG_DVB_TDA1004X=m +CONFIG_DVB_ZD1301_DEMOD=m +CONFIG_DVB_ZL10353=m +CONFIG_DVB_CXD2880=m + +# +# DVB-C (cable) frontends +# +CONFIG_DVB_STV0297=m +CONFIG_DVB_TDA10021=m +CONFIG_DVB_TDA10023=m +CONFIG_DVB_VES1820=m + +# +# ATSC (North American/Korean Terrestrial/Cable DTV) frontends +# +CONFIG_DVB_AU8522=m +CONFIG_DVB_AU8522_DTV=m +CONFIG_DVB_AU8522_V4L=m +CONFIG_DVB_BCM3510=m +CONFIG_DVB_LG2160=m +CONFIG_DVB_LGDT3305=m +CONFIG_DVB_LGDT330X=m +CONFIG_DVB_MXL692=m +CONFIG_DVB_NXT200X=m +CONFIG_DVB_OR51132=m +CONFIG_DVB_OR51211=m +CONFIG_DVB_S5H1409=m +CONFIG_DVB_S5H1411=m + +# +# ISDB-T (terrestrial) frontends +# +CONFIG_DVB_DIB8000=m +CONFIG_DVB_MB86A20S=m +CONFIG_DVB_S921=m + +# +# ISDB-S (satellite) & ISDB-T (terrestrial) frontends +# +CONFIG_DVB_MN88443X=m +CONFIG_DVB_TC90522=m + +# +# Digital terrestrial only tuners/PLL +# +CONFIG_DVB_PLL=m +CONFIG_DVB_TUNER_DIB0070=m +CONFIG_DVB_TUNER_DIB0090=m + +# +# SEC control devices for DVB-S +# +CONFIG_DVB_A8293=m +CONFIG_DVB_AF9033=m +CONFIG_DVB_ASCOT2E=m +CONFIG_DVB_ATBM8830=m +CONFIG_DVB_HELENE=m +CONFIG_DVB_HORUS3A=m +CONFIG_DVB_ISL6405=m +CONFIG_DVB_ISL6421=m +CONFIG_DVB_ISL6423=m +CONFIG_DVB_IX2505V=m +CONFIG_DVB_LGS8GL5=m +CONFIG_DVB_LGS8GXX=m +CONFIG_DVB_LNBH25=m +CONFIG_DVB_LNBH29=m +CONFIG_DVB_LNBP21=m +CONFIG_DVB_LNBP22=m +CONFIG_DVB_M88RS2000=m +CONFIG_DVB_TDA665x=m +CONFIG_DVB_DRX39XYJ=m + +# +# Common Interface (EN50221) controller drivers +# +CONFIG_DVB_CXD2099=m +CONFIG_DVB_SP2=m +# end of Customise DVB Frontends + +# +# Tools to develop new frontends +# +# CONFIG_DVB_DUMMY_FE is not set +# end of Media ancillary drivers + +# +# Graphics support +# +CONFIG_APERTURE_HELPERS=y +CONFIG_VIDEO_CMDLINE=y +CONFIG_VIDEO_NOMODESET=y +# CONFIG_AUXDISPLAY is not set +# CONFIG_PANEL is not set CONFIG_DRM=y +# CONFIG_DRM_DEBUG_MM is not set +CONFIG_DRM_KMS_HELPER=y +# CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS is not set +# CONFIG_DRM_DEBUG_MODESET_LOCK is not set +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +# CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DISPLAY_HELPER=m +CONFIG_DRM_DISPLAY_DP_HELPER=y +CONFIG_DRM_DISPLAY_HDCP_HELPER=y +CONFIG_DRM_DISPLAY_HDMI_HELPER=y CONFIG_DRM_DP_AUX_CHARDEV=y CONFIG_DRM_DP_CEC=y +CONFIG_DRM_TTM=y +CONFIG_DRM_EXEC=m +CONFIG_DRM_BUDDY=m +CONFIG_DRM_VRAM_HELPER=m +CONFIG_DRM_TTM_HELPER=m +CONFIG_DRM_GEM_SHMEM_HELPER=y +CONFIG_DRM_SUBALLOC_HELPER=m +CONFIG_DRM_SCHED=m + +# +# I2C encoder or helper chips +# # CONFIG_DRM_I2C_CH7006 is not set # CONFIG_DRM_I2C_SIL164 is not set +# CONFIG_DRM_I2C_NXP_TDA998X is not set +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# end of I2C encoder or helper chips + +# +# ARM devices +# +# CONFIG_DRM_KOMEDA is not set +# end of ARM devices + CONFIG_DRM_RADEON=m CONFIG_DRM_RADEON_USERPTR=y CONFIG_DRM_AMDGPU=m CONFIG_DRM_AMDGPU_SI=y CONFIG_DRM_AMDGPU_CIK=y CONFIG_DRM_AMDGPU_USERPTR=y +# CONFIG_DRM_AMDGPU_WERROR is not set + +# +# ACP (Audio CoProcessor) Configuration +# +# CONFIG_DRM_AMD_ACP is not set +# end of ACP (Audio CoProcessor) Configuration + +# +# Display Engine Configuration +# +CONFIG_DRM_AMD_DC=y +CONFIG_DRM_AMD_DC_FP=y +# CONFIG_DRM_AMD_DC_SI is not set +# CONFIG_DRM_AMD_SECURE_DISPLAY is not set +# end of Display Engine Configuration + CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +# CONFIG_NOUVEAU_DEBUG_MMU is not set +# CONFIG_NOUVEAU_DEBUG_PUSH is not set +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +# CONFIG_DRM_VGEM is not set CONFIG_DRM_VKMS=m CONFIG_DRM_UDL=m CONFIG_DRM_AST=y CONFIG_DRM_MGAG200=m CONFIG_DRM_QXL=m CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_VIRTIO_GPU_KMS=y +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_ABT_Y030XX067A is not set +# CONFIG_DRM_PANEL_ARM_VERSATILE is not set +# CONFIG_DRM_PANEL_AUO_A030JTN01 is not set +# CONFIG_DRM_PANEL_LVDS is not set +# CONFIG_DRM_PANEL_SIMPLE is not set +# CONFIG_DRM_PANEL_EDP is not set +# CONFIG_DRM_PANEL_ILITEK_IL9322 is not set +# CONFIG_DRM_PANEL_ILITEK_ILI9341 is not set +# CONFIG_DRM_PANEL_INNOLUX_EJ030NA is not set +# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set +# CONFIG_DRM_PANEL_LG_LB035Q02 is not set +# CONFIG_DRM_PANEL_LG_LG4573 is not set +# CONFIG_DRM_PANEL_NEC_NL8048HL11 is not set +# CONFIG_DRM_PANEL_NEWVISION_NV3052C is not set +# CONFIG_DRM_PANEL_NOVATEK_NT39016 is not set +# CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO is not set +# CONFIG_DRM_PANEL_ORISETECH_OTA5601A is not set +# CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20 is not set +# CONFIG_DRM_PANEL_SAMSUNG_DB7430 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6D27A1 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set +# CONFIG_DRM_PANEL_SEIKO_43WVF1G is not set +# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set +# CONFIG_DRM_PANEL_SONY_ACX565AKM is not set +# CONFIG_DRM_PANEL_TPO_TD028TTEC1 is not set +# CONFIG_DRM_PANEL_TPO_TPG110 is not set +# CONFIG_DRM_PANEL_WIDECHIPS_WS2401 is not set +# end of Display Panels + +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_CHIPONE_ICN6211 is not set +# CONFIG_DRM_CHRONTEL_CH7033 is not set +# CONFIG_DRM_DISPLAY_CONNECTOR is not set +# CONFIG_DRM_ITE_IT6505 is not set +# CONFIG_DRM_LONTIUM_LT8912B is not set +# CONFIG_DRM_LONTIUM_LT9211 is not set +# CONFIG_DRM_LONTIUM_LT9611 is not set +# CONFIG_DRM_LONTIUM_LT9611UXC is not set +# CONFIG_DRM_ITE_IT66121 is not set +# CONFIG_DRM_LVDS_CODEC is not set +# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set +# CONFIG_DRM_NWL_MIPI_DSI is not set +# CONFIG_DRM_NXP_PTN3460 is not set +# CONFIG_DRM_PARADE_PS8622 is not set +# CONFIG_DRM_PARADE_PS8640 is not set +# CONFIG_DRM_SAMSUNG_DSIM is not set +# CONFIG_DRM_SIL_SII8620 is not set +# CONFIG_DRM_SII902X is not set +# CONFIG_DRM_SII9234 is not set +# CONFIG_DRM_SIMPLE_BRIDGE is not set +# CONFIG_DRM_THINE_THC63LVD1024 is not set +# CONFIG_DRM_TOSHIBA_TC358762 is not set +# CONFIG_DRM_TOSHIBA_TC358764 is not set +# CONFIG_DRM_TOSHIBA_TC358767 is not set +# CONFIG_DRM_TOSHIBA_TC358768 is not set +# CONFIG_DRM_TOSHIBA_TC358775 is not set +# CONFIG_DRM_TI_DLPC3433 is not set +# CONFIG_DRM_TI_TFP410 is not set +# CONFIG_DRM_TI_SN65DSI83 is not set +# CONFIG_DRM_TI_SN65DSI86 is not set +# CONFIG_DRM_TI_TPD12S015 is not set +# CONFIG_DRM_ANALOGIX_ANX6345 is not set +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# CONFIG_DRM_ANALOGIX_ANX7625 is not set +# CONFIG_DRM_I2C_ADV7511 is not set +# CONFIG_DRM_CDNS_DSI is not set +# CONFIG_DRM_CDNS_MHDP8546 is not set +# end of Display Interface Bridges + CONFIG_DRM_LOONGSON=y +# CONFIG_DRM_ETNAVIV is not set +# CONFIG_DRM_LOGICVC is not set +# CONFIG_DRM_ARCPGU is not set CONFIG_DRM_BOCHS=m CONFIG_DRM_CIRRUS_QEMU=m +# CONFIG_DRM_GM12U320 is not set +# CONFIG_DRM_PANEL_MIPI_DBI is not set +# CONFIG_DRM_SIMPLEDRM is not set +# CONFIG_TINYDRM_HX8357D is not set +# CONFIG_TINYDRM_ILI9163 is not set +# CONFIG_TINYDRM_ILI9225 is not set +# CONFIG_TINYDRM_ILI9341 is not set +# CONFIG_TINYDRM_ILI9486 is not set +# CONFIG_TINYDRM_MI0283QT is not set +# CONFIG_TINYDRM_REPAPER is not set +# CONFIG_TINYDRM_ST7586 is not set +# CONFIG_TINYDRM_ST7735R is not set +# CONFIG_DRM_GUD is not set +# CONFIG_DRM_SSD130X is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y +# CONFIG_HYDCU_FIXUP_HEADER is not set + +# +# Frame buffer Devices +# CONFIG_FB=y +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_UVESA is not set CONFIG_FB_EFI=y +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set CONFIG_FB_RADEON=y +CONFIG_FB_RADEON_I2C=y +CONFIG_FB_RADEON_BACKLIGHT=y +# CONFIG_FB_RADEON_DEBUG is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SM501 is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SSD1307 is not set +# CONFIG_FB_SM712 is not set CONFIG_FB_LS2K500=m +CONFIG_FB_CORE=y +CONFIG_FB_NOTIFY=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_DEVICE=y +CONFIG_FB_DDC=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_IOMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y +CONFIG_FB_BACKLIGHT=y +CONFIG_FB_MODE_HELPERS=y CONFIG_FB_TILEBLITTING=y +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set CONFIG_LCD_PLATFORM=m +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_KTD253 is not set +# CONFIG_BACKLIGHT_KTZ8866 is not set +# CONFIG_BACKLIGHT_PWM is not set +# CONFIG_BACKLIGHT_QCOM_WLED is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3630A is not set +# CONFIG_BACKLIGHT_LM3639 is not set CONFIG_BACKLIGHT_LP855X=m +# CONFIG_BACKLIGHT_GPIO is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# CONFIG_BACKLIGHT_LED is not set +# end of Backlight & LCD device support + +CONFIG_HDMI=y + +# +# Console display driver support +# # CONFIG_VGA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION is not set +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +# end of Console display driver support + CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +# end of Graphics support + +# CONFIG_DRM_ACCEL is not set CONFIG_SOUND=y +CONFIG_SOUND_OSS_CORE=y +CONFIG_SOUND_OSS_CORE_PRECLAIM=y CONFIG_SND=y +CONFIG_SND_TIMER=m +CONFIG_SND_PCM=m +CONFIG_SND_HWDEP=m +CONFIG_SND_SEQ_DEVICE=m +CONFIG_SND_RAWMIDI=m +CONFIG_SND_JACK=y +CONFIG_SND_JACK_INPUT_DEV=y CONFIG_SND_OSSEMUL=y +# CONFIG_SND_MIXER_OSS is not set +# CONFIG_SND_PCM_OSS is not set +CONFIG_SND_PCM_TIMER=y CONFIG_SND_HRTIMER=m +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_MAX_CARDS=32 # CONFIG_SND_SUPPORT_OLD_API is not set +CONFIG_SND_PROC_FS=y +CONFIG_SND_VERBOSE_PROCFS=y +# CONFIG_SND_VERBOSE_PRINTK is not set +CONFIG_SND_CTL_FAST_LOOKUP=y +# CONFIG_SND_DEBUG is not set +# CONFIG_SND_CTL_INPUT_VALIDATION is not set +CONFIG_SND_VMASTER=y +CONFIG_SND_CTL_LED=m CONFIG_SND_SEQUENCER=m CONFIG_SND_SEQ_DUMMY=m CONFIG_SND_SEQUENCER_OSS=m +CONFIG_SND_SEQ_HRTIMER_DEFAULT=y +CONFIG_SND_SEQ_MIDI_EVENT=m +CONFIG_SND_SEQ_MIDI=m +CONFIG_SND_SEQ_MIDI_EMUL=m +CONFIG_SND_SEQ_VIRMIDI=m +# CONFIG_SND_SEQ_UMP is not set +CONFIG_SND_MPU401_UART=m +CONFIG_SND_OPL3_LIB=m +CONFIG_SND_OPL3_LIB_SEQ=m +CONFIG_SND_VX_LIB=m +CONFIG_SND_AC97_CODEC=m +CONFIG_SND_DRIVERS=y CONFIG_SND_DUMMY=m CONFIG_SND_ALOOP=m +# CONFIG_SND_PCMTEST is not set CONFIG_SND_VIRMIDI=m CONFIG_SND_MTPAV=m +# CONFIG_SND_MTS64 is not set +# CONFIG_SND_SERIAL_U16550 is not set CONFIG_SND_MPU401=m +# CONFIG_SND_PORTMAN2X4 is not set CONFIG_SND_AC97_POWER_SAVE=y CONFIG_SND_AC97_POWER_SAVE_DEFAULT=5 +CONFIG_SND_PCI=y CONFIG_SND_AD1889=m CONFIG_SND_ATIIXP=m CONFIG_SND_ATIIXP_MODEM=m CONFIG_SND_AU8810=m CONFIG_SND_AU8820=m CONFIG_SND_AU8830=m +# CONFIG_SND_AW2 is not set CONFIG_SND_BT87X=m CONFIG_SND_BT87X_OVERCLOCK=y CONFIG_SND_CA0106=m CONFIG_SND_CMIPCI=m +CONFIG_SND_OXYGEN_LIB=m CONFIG_SND_OXYGEN=m +# CONFIG_SND_CS4281 is not set CONFIG_SND_CS46XX=m +CONFIG_SND_CS46XX_NEW_DSP=y CONFIG_SND_CTXFI=m CONFIG_SND_DARLA20=m CONFIG_SND_GINA20=m @@ -1538,6 +5557,7 @@ CONFIG_SND_INDIGOIOX=m CONFIG_SND_INDIGODJX=m CONFIG_SND_ENS1370=m CONFIG_SND_ENS1371=m +# CONFIG_SND_FM801 is not set CONFIG_SND_HDSP=m CONFIG_SND_HDSPM=m CONFIG_SND_ICE1724=m @@ -1547,7 +5567,9 @@ CONFIG_SND_KORG1212=m CONFIG_SND_LOLA=m CONFIG_SND_LX6464ES=m CONFIG_SND_MIXART=m +# CONFIG_SND_NM256 is not set CONFIG_SND_PCXHR=m +# CONFIG_SND_RIPTIDE is not set CONFIG_SND_RME32=m CONFIG_SND_RME96=m CONFIG_SND_RME9652=m @@ -1555,35 +5577,68 @@ CONFIG_SND_VIA82XX=m CONFIG_SND_VIA82XX_MODEM=m CONFIG_SND_VIRTUOSO=m CONFIG_SND_VX222=m +# CONFIG_SND_YMFPCI is not set + +# +# HD-Audio +# +CONFIG_SND_HDA=m +CONFIG_SND_HDA_GENERIC_LEDS=y CONFIG_SND_HDA_INTEL=m CONFIG_SND_HDA_HWDEP=y +CONFIG_SND_HDA_RECONFIG=y CONFIG_SND_HDA_INPUT_BEEP=y CONFIG_SND_HDA_INPUT_BEEP_MODE=0 CONFIG_SND_HDA_PATCH_LOADER=y +# CONFIG_SND_HDA_SCODEC_CS35L41_I2C is not set +# CONFIG_SND_HDA_SCODEC_CS35L41_SPI is not set +# CONFIG_SND_HDA_SCODEC_CS35L56_I2C is not set +# CONFIG_SND_HDA_SCODEC_CS35L56_SPI is not set +# CONFIG_SND_HDA_SCODEC_TAS2781_I2C is not set CONFIG_SND_HDA_CODEC_REALTEK=m CONFIG_SND_HDA_CODEC_ANALOG=m CONFIG_SND_HDA_CODEC_SIGMATEL=m CONFIG_SND_HDA_CODEC_VIA=m CONFIG_SND_HDA_CODEC_HDMI=m CONFIG_SND_HDA_CODEC_CIRRUS=m +# CONFIG_SND_HDA_CODEC_CS8409 is not set CONFIG_SND_HDA_CODEC_CONEXANT=m CONFIG_SND_HDA_CODEC_CA0110=m CONFIG_SND_HDA_CODEC_CA0132=m +CONFIG_SND_HDA_CODEC_CA0132_DSP=y CONFIG_SND_HDA_CODEC_CMEDIA=m CONFIG_SND_HDA_CODEC_SI3054=m +CONFIG_SND_HDA_GENERIC=m +CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0 +# CONFIG_SND_HDA_INTEL_HDMI_SILENT_STREAM is not set +# CONFIG_SND_HDA_CTL_DEV_ID is not set +# end of HD-Audio + +CONFIG_SND_HDA_CORE=m +CONFIG_SND_HDA_DSP_LOADER=y +CONFIG_SND_HDA_COMPONENT=y CONFIG_SND_HDA_PREALLOC_SIZE=512 +CONFIG_SND_INTEL_NHLT=y +CONFIG_SND_INTEL_DSP_CONFIG=m +CONFIG_SND_INTEL_SOUNDWIRE_ACPI=m # CONFIG_SND_SPI is not set +CONFIG_SND_USB=y CONFIG_SND_USB_AUDIO=m +# CONFIG_SND_USB_AUDIO_MIDI_V2 is not set +CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER=y CONFIG_SND_USB_UA101=m CONFIG_SND_USB_CAIAQ=m CONFIG_SND_USB_CAIAQ_INPUT=y CONFIG_SND_USB_6FIRE=m CONFIG_SND_USB_HIFACE=m CONFIG_SND_BCD2000=m +CONFIG_SND_USB_LINE6=m CONFIG_SND_USB_POD=m CONFIG_SND_USB_PODHD=m CONFIG_SND_USB_TONEPORT=m CONFIG_SND_USB_VARIAX=m +CONFIG_SND_FIREWIRE=y +CONFIG_SND_FIREWIRE_LIB=m CONFIG_SND_DICE=m CONFIG_SND_OXFW=m CONFIG_SND_ISIGHT=m @@ -1594,36 +5649,306 @@ CONFIG_SND_FIREWIRE_TASCAM=m CONFIG_SND_FIREWIRE_MOTU=m CONFIG_SND_FIREFACE=m CONFIG_SND_SOC=m +# CONFIG_SND_SOC_ADI is not set +# CONFIG_SND_SOC_AMD_ACP is not set +# CONFIG_SND_AMD_ACP_CONFIG is not set +# CONFIG_SND_ATMEL_SOC is not set +# CONFIG_SND_BCM63XX_I2S_WHISTLER is not set +# CONFIG_SND_DESIGNWARE_I2S is not set + +# +# SoC Audio for Freescale CPUs +# + +# +# Common SoC Audio options for Freescale CPUs: +# +# CONFIG_SND_SOC_FSL_ASRC is not set +# CONFIG_SND_SOC_FSL_SAI is not set +# CONFIG_SND_SOC_FSL_AUDMIX is not set +# CONFIG_SND_SOC_FSL_SSI is not set +# CONFIG_SND_SOC_FSL_SPDIF is not set +# CONFIG_SND_SOC_FSL_ESAI is not set +# CONFIG_SND_SOC_FSL_MICFIL is not set +# CONFIG_SND_SOC_FSL_XCVR is not set +# CONFIG_SND_SOC_IMX_AUDMUX is not set +# end of SoC Audio for Freescale CPUs + +# CONFIG_SND_SOC_CHV3_I2S is not set +# CONFIG_SND_I2S_HI6210_I2S is not set + +# +# SoC Audio for Loongson CPUs +# +# CONFIG_SND_SOC_LOONGSON_I2S_PCI is not set +# CONFIG_SND_SOC_LOONGSON_CARD is not set +# end of SoC Audio for Loongson CPUs + +# CONFIG_SND_SOC_IMG is not set +# CONFIG_SND_SOC_MTK_BTCVSD is not set +# CONFIG_SND_SOC_SOF_TOPLEVEL is not set + +# +# STMicroelectronics STM32 SOC audio support +# +# end of STMicroelectronics STM32 SOC audio support + +# CONFIG_SND_SOC_XILINX_I2S is not set +# CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER is not set +# CONFIG_SND_SOC_XILINX_SPDIF is not set +# CONFIG_SND_SOC_XTFPGA_I2S is not set +CONFIG_SND_SOC_I2C_AND_SPI=m + +# +# CODEC drivers +# +# CONFIG_SND_SOC_AC97_CODEC is not set +# CONFIG_SND_SOC_ADAU1372_I2C is not set +# CONFIG_SND_SOC_ADAU1372_SPI is not set +# CONFIG_SND_SOC_ADAU1701 is not set +# CONFIG_SND_SOC_ADAU1761_I2C is not set +# CONFIG_SND_SOC_ADAU1761_SPI is not set +# CONFIG_SND_SOC_ADAU7002 is not set +# CONFIG_SND_SOC_ADAU7118_HW is not set +# CONFIG_SND_SOC_ADAU7118_I2C is not set +# CONFIG_SND_SOC_AK4104 is not set +# CONFIG_SND_SOC_AK4118 is not set +# CONFIG_SND_SOC_AK4375 is not set +# CONFIG_SND_SOC_AK4458 is not set +# CONFIG_SND_SOC_AK4554 is not set +# CONFIG_SND_SOC_AK4613 is not set +# CONFIG_SND_SOC_AK4642 is not set +# CONFIG_SND_SOC_AK5386 is not set +# CONFIG_SND_SOC_AK5558 is not set +# CONFIG_SND_SOC_ALC5623 is not set +# CONFIG_SND_SOC_AUDIO_IIO_AUX is not set +# CONFIG_SND_SOC_AW8738 is not set +# CONFIG_SND_SOC_AW88395 is not set +# CONFIG_SND_SOC_AW88261 is not set +# CONFIG_SND_SOC_BD28623 is not set +# CONFIG_SND_SOC_BT_SCO is not set +# CONFIG_SND_SOC_CHV3_CODEC is not set +# CONFIG_SND_SOC_CS35L32 is not set +# CONFIG_SND_SOC_CS35L33 is not set +# CONFIG_SND_SOC_CS35L34 is not set +# CONFIG_SND_SOC_CS35L35 is not set +# CONFIG_SND_SOC_CS35L36 is not set +# CONFIG_SND_SOC_CS35L41_SPI is not set +# CONFIG_SND_SOC_CS35L41_I2C is not set +# CONFIG_SND_SOC_CS35L45_SPI is not set +# CONFIG_SND_SOC_CS35L45_I2C is not set +# CONFIG_SND_SOC_CS35L56_I2C is not set +# CONFIG_SND_SOC_CS35L56_SPI is not set +# CONFIG_SND_SOC_CS42L42 is not set +# CONFIG_SND_SOC_CS42L51_I2C is not set +# CONFIG_SND_SOC_CS42L52 is not set +# CONFIG_SND_SOC_CS42L56 is not set +# CONFIG_SND_SOC_CS42L73 is not set +# CONFIG_SND_SOC_CS42L83 is not set +# CONFIG_SND_SOC_CS4234 is not set +# CONFIG_SND_SOC_CS4265 is not set +# CONFIG_SND_SOC_CS4270 is not set +# CONFIG_SND_SOC_CS4271_I2C is not set +# CONFIG_SND_SOC_CS4271_SPI is not set +# CONFIG_SND_SOC_CS42XX8_I2C is not set +# CONFIG_SND_SOC_CS43130 is not set +# CONFIG_SND_SOC_CS4341 is not set +# CONFIG_SND_SOC_CS4349 is not set +# CONFIG_SND_SOC_CS53L30 is not set +# CONFIG_SND_SOC_CX2072X is not set +# CONFIG_SND_SOC_DA7213 is not set +# CONFIG_SND_SOC_DMIC is not set +# CONFIG_SND_SOC_ES7134 is not set +# CONFIG_SND_SOC_ES7241 is not set +# CONFIG_SND_SOC_ES8316 is not set +# CONFIG_SND_SOC_ES8326 is not set +# CONFIG_SND_SOC_ES8328_I2C is not set +# CONFIG_SND_SOC_ES8328_SPI is not set +# CONFIG_SND_SOC_GTM601 is not set +# CONFIG_SND_SOC_HDA is not set +# CONFIG_SND_SOC_ICS43432 is not set +# CONFIG_SND_SOC_IDT821034 is not set +# CONFIG_SND_SOC_INNO_RK3036 is not set +# CONFIG_SND_SOC_MAX98088 is not set +# CONFIG_SND_SOC_MAX98090 is not set +# CONFIG_SND_SOC_MAX98357A is not set +# CONFIG_SND_SOC_MAX98504 is not set +# CONFIG_SND_SOC_MAX9867 is not set +# CONFIG_SND_SOC_MAX98927 is not set +# CONFIG_SND_SOC_MAX98520 is not set +# CONFIG_SND_SOC_MAX98373_I2C is not set +# CONFIG_SND_SOC_MAX98388 is not set +# CONFIG_SND_SOC_MAX98390 is not set +# CONFIG_SND_SOC_MAX98396 is not set +# CONFIG_SND_SOC_MAX9860 is not set +# CONFIG_SND_SOC_MSM8916_WCD_DIGITAL is not set +# CONFIG_SND_SOC_PCM1681 is not set +# CONFIG_SND_SOC_PCM1789_I2C is not set +# CONFIG_SND_SOC_PCM179X_I2C is not set +# CONFIG_SND_SOC_PCM179X_SPI is not set +# CONFIG_SND_SOC_PCM186X_I2C is not set +# CONFIG_SND_SOC_PCM186X_SPI is not set +# CONFIG_SND_SOC_PCM3060_I2C is not set +# CONFIG_SND_SOC_PCM3060_SPI is not set +# CONFIG_SND_SOC_PCM3168A_I2C is not set +# CONFIG_SND_SOC_PCM3168A_SPI is not set +# CONFIG_SND_SOC_PCM5102A is not set +# CONFIG_SND_SOC_PCM512x_I2C is not set +# CONFIG_SND_SOC_PCM512x_SPI is not set +# CONFIG_SND_SOC_PEB2466 is not set +# CONFIG_SND_SOC_RK3328 is not set +# CONFIG_SND_SOC_RT5616 is not set +# CONFIG_SND_SOC_RT5631 is not set +# CONFIG_SND_SOC_RT5640 is not set +# CONFIG_SND_SOC_RT5659 is not set +# CONFIG_SND_SOC_RT9120 is not set +# CONFIG_SND_SOC_SGTL5000 is not set +# CONFIG_SND_SOC_SIMPLE_AMPLIFIER is not set +# CONFIG_SND_SOC_SIMPLE_MUX is not set +# CONFIG_SND_SOC_SMA1303 is not set +# CONFIG_SND_SOC_SPDIF is not set +# CONFIG_SND_SOC_SRC4XXX_I2C is not set +# CONFIG_SND_SOC_SSM2305 is not set +# CONFIG_SND_SOC_SSM2518 is not set +# CONFIG_SND_SOC_SSM2602_SPI is not set +# CONFIG_SND_SOC_SSM2602_I2C is not set +# CONFIG_SND_SOC_SSM3515 is not set +# CONFIG_SND_SOC_SSM4567 is not set +# CONFIG_SND_SOC_STA32X is not set +# CONFIG_SND_SOC_STA350 is not set +# CONFIG_SND_SOC_STI_SAS is not set +# CONFIG_SND_SOC_TAS2552 is not set +# CONFIG_SND_SOC_TAS2562 is not set +# CONFIG_SND_SOC_TAS2764 is not set +# CONFIG_SND_SOC_TAS2770 is not set +# CONFIG_SND_SOC_TAS2780 is not set +# CONFIG_SND_SOC_TAS2781_I2C is not set +# CONFIG_SND_SOC_TAS5086 is not set +# CONFIG_SND_SOC_TAS571X is not set +# CONFIG_SND_SOC_TAS5720 is not set +# CONFIG_SND_SOC_TAS5805M is not set +# CONFIG_SND_SOC_TAS6424 is not set +# CONFIG_SND_SOC_TDA7419 is not set +# CONFIG_SND_SOC_TFA9879 is not set +# CONFIG_SND_SOC_TFA989X is not set +# CONFIG_SND_SOC_TLV320ADC3XXX is not set +# CONFIG_SND_SOC_TLV320AIC23_I2C is not set +# CONFIG_SND_SOC_TLV320AIC23_SPI is not set +# CONFIG_SND_SOC_TLV320AIC31XX is not set +# CONFIG_SND_SOC_TLV320AIC32X4_I2C is not set +# CONFIG_SND_SOC_TLV320AIC32X4_SPI is not set +# CONFIG_SND_SOC_TLV320AIC3X_I2C is not set +# CONFIG_SND_SOC_TLV320AIC3X_SPI is not set +# CONFIG_SND_SOC_TLV320ADCX140 is not set +# CONFIG_SND_SOC_TS3A227E is not set +# CONFIG_SND_SOC_TSCS42XX is not set +# CONFIG_SND_SOC_TSCS454 is not set +# CONFIG_SND_SOC_UDA1334 is not set +# CONFIG_SND_SOC_WM8510 is not set +# CONFIG_SND_SOC_WM8523 is not set +# CONFIG_SND_SOC_WM8524 is not set +# CONFIG_SND_SOC_WM8580 is not set +# CONFIG_SND_SOC_WM8711 is not set +# CONFIG_SND_SOC_WM8728 is not set +# CONFIG_SND_SOC_WM8731_I2C is not set +# CONFIG_SND_SOC_WM8731_SPI is not set +# CONFIG_SND_SOC_WM8737 is not set +# CONFIG_SND_SOC_WM8741 is not set +# CONFIG_SND_SOC_WM8750 is not set +# CONFIG_SND_SOC_WM8753 is not set +# CONFIG_SND_SOC_WM8770 is not set +# CONFIG_SND_SOC_WM8776 is not set +# CONFIG_SND_SOC_WM8782 is not set +# CONFIG_SND_SOC_WM8804_I2C is not set +# CONFIG_SND_SOC_WM8804_SPI is not set +# CONFIG_SND_SOC_WM8903 is not set +# CONFIG_SND_SOC_WM8904 is not set +# CONFIG_SND_SOC_WM8940 is not set +# CONFIG_SND_SOC_WM8960 is not set +# CONFIG_SND_SOC_WM8961 is not set +# CONFIG_SND_SOC_WM8962 is not set +# CONFIG_SND_SOC_WM8974 is not set +# CONFIG_SND_SOC_WM8978 is not set +# CONFIG_SND_SOC_WM8985 is not set +# CONFIG_SND_SOC_ZL38060 is not set +# CONFIG_SND_SOC_MAX9759 is not set +# CONFIG_SND_SOC_MT6351 is not set +# CONFIG_SND_SOC_MT6358 is not set +# CONFIG_SND_SOC_MT6660 is not set +# CONFIG_SND_SOC_NAU8315 is not set +# CONFIG_SND_SOC_NAU8540 is not set +# CONFIG_SND_SOC_NAU8810 is not set +# CONFIG_SND_SOC_NAU8821 is not set +# CONFIG_SND_SOC_NAU8822 is not set +# CONFIG_SND_SOC_NAU8824 is not set +# CONFIG_SND_SOC_TPA6130A2 is not set +# CONFIG_SND_SOC_LPASS_WSA_MACRO is not set +# CONFIG_SND_SOC_LPASS_VA_MACRO is not set +# CONFIG_SND_SOC_LPASS_RX_MACRO is not set +# CONFIG_SND_SOC_LPASS_TX_MACRO is not set +# end of CODEC drivers + +# CONFIG_SND_SIMPLE_CARD is not set +# CONFIG_SND_AUDIO_GRAPH_CARD is not set +# CONFIG_SND_AUDIO_GRAPH_CARD2 is not set +# CONFIG_SND_TEST_COMPONENT is not set +# CONFIG_SND_VIRTIO is not set +CONFIG_AC97_BUS=m +CONFIG_HID_SUPPORT=y +CONFIG_HID=y CONFIG_HID_BATTERY_STRENGTH=y CONFIG_HIDRAW=y CONFIG_UHID=m +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# CONFIG_HID_A4TECH=m +# CONFIG_HID_ACCUTOUCH is not set CONFIG_HID_ACRUX=m +# CONFIG_HID_ACRUX_FF is not set CONFIG_HID_APPLE=m CONFIG_HID_APPLEIR=m CONFIG_HID_ASUS=m CONFIG_HID_AUREAL=m CONFIG_HID_BELKIN=m CONFIG_HID_BETOP_FF=m +# CONFIG_HID_BIGBEN_FF is not set CONFIG_HID_CHERRY=m CONFIG_HID_CHICONY=m CONFIG_HID_CORSAIR=m +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_MACALLY is not set CONFIG_HID_PRODIKEYS=m CONFIG_HID_CMEDIA=m +# CONFIG_HID_CP2112 is not set +# CONFIG_HID_CREATIVE_SB0540 is not set CONFIG_HID_CYPRESS=m CONFIG_HID_DRAGONRISE=m +# CONFIG_DRAGONRISE_FF is not set +# CONFIG_HID_EMS_FF is not set CONFIG_HID_ELAN=m CONFIG_HID_ELECOM=m CONFIG_HID_ELO=m +# CONFIG_HID_EVISION is not set CONFIG_HID_EZKEY=m +# CONFIG_HID_FT260 is not set CONFIG_HID_GEMBIRD=m CONFIG_HID_GFRM=m +# CONFIG_HID_GLORIOUS is not set CONFIG_HID_HOLTEK=m +# CONFIG_HOLTEK_FF is not set +# CONFIG_HID_GOOGLE_STADIA_FF is not set +# CONFIG_HID_VIVALDI is not set CONFIG_HID_GT683R=m CONFIG_HID_KEYTOUCH=m CONFIG_HID_KYE=m CONFIG_HID_UCLOGIC=m CONFIG_HID_WALTOP=m +# CONFIG_HID_VIEWSONIC is not set +# CONFIG_HID_VRC2 is not set +# CONFIG_HID_XIAOMI is not set CONFIG_HID_GYRATION=m CONFIG_HID_ICADE=m CONFIG_HID_ITE=m @@ -1631,69 +5956,169 @@ CONFIG_HID_JABRA=m CONFIG_HID_TWINHAN=m CONFIG_HID_KENSINGTON=m CONFIG_HID_LCPOWER=m +CONFIG_HID_LED=m CONFIG_HID_LENOVO=m +# CONFIG_HID_LETSKETCH is not set CONFIG_HID_LOGITECH=m CONFIG_HID_LOGITECH_DJ=m +CONFIG_HID_LOGITECH_HIDPP=m CONFIG_LOGITECH_FF=y CONFIG_LOGIRUMBLEPAD2_FF=y CONFIG_LOGIG940_FF=y +CONFIG_LOGIWHEELS_FF=y CONFIG_HID_MAGICMOUSE=y +# CONFIG_HID_MALTRON is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_MEGAWORLD_FF is not set +# CONFIG_HID_REDRAGON is not set CONFIG_HID_MICROSOFT=m CONFIG_HID_MONTEREY=m CONFIG_HID_MULTITOUCH=m +# CONFIG_HID_NINTENDO is not set CONFIG_HID_NTI=m CONFIG_HID_NTRIG=y +# CONFIG_HID_NVIDIA_SHIELD is not set CONFIG_HID_ORTEK=m CONFIG_HID_PANTHERLORD=m +# CONFIG_PANTHERLORD_FF is not set CONFIG_HID_PENMOUNT=m CONFIG_HID_PETALYNX=m CONFIG_HID_PICOLCD=m +# CONFIG_HID_PICOLCD_FB is not set +# CONFIG_HID_PICOLCD_BACKLIGHT is not set +# CONFIG_HID_PICOLCD_LCD is not set +# CONFIG_HID_PICOLCD_LEDS is not set +# CONFIG_HID_PICOLCD_CIR is not set CONFIG_HID_PLANTRONICS=m +# CONFIG_HID_PXRC is not set +# CONFIG_HID_RAZER is not set CONFIG_HID_PRIMAX=m +# CONFIG_HID_RETRODE is not set CONFIG_HID_ROCCAT=m CONFIG_HID_SAITEK=m CONFIG_HID_SAMSUNG=m +# CONFIG_HID_SEMITEK is not set +# CONFIG_HID_SIGMAMICRO is not set CONFIG_HID_SONY=m CONFIG_SONY_FF=y CONFIG_HID_SPEEDLINK=m +# CONFIG_HID_STEAM is not set CONFIG_HID_STEELSERIES=m CONFIG_HID_SUNPLUS=m CONFIG_HID_RMI=m CONFIG_HID_GREENASIA=m +# CONFIG_GREENASIA_FF is not set CONFIG_HID_SMARTJOYPLUS=m +# CONFIG_SMARTJOYPLUS_FF is not set CONFIG_HID_TIVO=m CONFIG_HID_TOPSEED=m +# CONFIG_HID_TOPRE is not set CONFIG_HID_THINGM=m CONFIG_HID_THRUSTMASTER=m +# CONFIG_THRUSTMASTER_FF is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_U2FZERO is not set CONFIG_HID_WACOM=m CONFIG_HID_WIIMOTE=m CONFIG_HID_XINMO=m CONFIG_HID_ZEROPLUS=m +# CONFIG_ZEROPLUS_FF is not set CONFIG_HID_ZYDACRON=m CONFIG_HID_SENSOR_HUB=y CONFIG_HID_SENSOR_CUSTOM_SENSOR=m CONFIG_HID_ALPS=m +# CONFIG_HID_MCP2221 is not set +# end of Special HID drivers + +# +# HID-BPF support +# +# CONFIG_HID_BPF is not set +# end of HID-BPF support + +# +# USB HID support +# +CONFIG_USB_HID=y CONFIG_HID_PID=y CONFIG_USB_HIDDEV=y +# end of USB HID support + CONFIG_I2C_HID=m +# CONFIG_I2C_HID_ACPI is not set +# CONFIG_I2C_HID_OF is not set +# CONFIG_I2C_HID_OF_ELAN is not set +# CONFIG_I2C_HID_OF_GOODIX is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y CONFIG_USB_LED_TRIG=y +# CONFIG_USB_ULPI_BUS is not set +# CONFIG_USB_CONN_GPIO is not set +CONFIG_USB_ARCH_HAS_HCD=y CONFIG_USB=y +CONFIG_USB_PCI=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_FEW_INIT_RETRIES is not set +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_PRODUCTLIST is not set +# CONFIG_USB_OTG_DISABLE_EXTERNAL_HUB is not set CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_AUTOSUSPEND_DELAY=2 CONFIG_USB_MON=y + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set CONFIG_USB_XHCI_HCD=y CONFIG_USB_XHCI_DBGCAP=y +CONFIG_USB_XHCI_PCI=y +# CONFIG_USB_XHCI_PCI_RENESAS is not set CONFIG_USB_XHCI_PLATFORM=m CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +# CONFIG_USB_EHCI_FSL is not set CONFIG_USB_EHCI_HCD_PLATFORM=y +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y CONFIG_USB_OHCI_HCD_PLATFORM=y CONFIG_USB_UHCI_HCD=y +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_BCMA is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m CONFIG_USB_TMC=m + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y CONFIG_USB_STORAGE_DATAFAB=m CONFIG_USB_STORAGE_FREECOM=m CONFIG_USB_STORAGE_ISD200=m @@ -1707,12 +6132,40 @@ CONFIG_USB_STORAGE_KARMA=m CONFIG_USB_STORAGE_CYPRESS_ATACB=m CONFIG_USB_STORAGE_ENE_UB6250=m CONFIG_USB_UAS=m + +# +# USB Imaging devices +# CONFIG_USB_MDC800=m CONFIG_USB_MICROTEK=m +# CONFIG_USBIP_CORE is not set + +# +# USB dual-mode controller drivers +# +# CONFIG_USB_CDNS_SUPPORT is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set CONFIG_USB_DWC2=y CONFIG_USB_DWC2_HOST=y + +# +# Gadget/Dual-role mode requires USB Gadget support to be enabled +# +# CONFIG_USB_DWC2_PERIPHERAL is not set +# CONFIG_USB_DWC2_DUAL_ROLE is not set +# CONFIG_USB_DWC2_PCI is not set +# CONFIG_USB_DWC2_DEBUG is not set +# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# CONFIG_USB_SERIAL=m CONFIG_USB_SERIAL_GENERIC=y +# CONFIG_USB_SERIAL_SIMPLE is not set CONFIG_USB_SERIAL_AIRCABLE=m CONFIG_USB_SERIAL_ARK3116=m CONFIG_USB_SERIAL_BELKIN=m @@ -1728,6 +6181,7 @@ CONFIG_USB_SERIAL_IPAQ=m CONFIG_USB_SERIAL_IR=m CONFIG_USB_SERIAL_EDGEPORT=m CONFIG_USB_SERIAL_EDGEPORT_TI=m +# CONFIG_USB_SERIAL_F81232 is not set CONFIG_USB_SERIAL_F8153X=m CONFIG_USB_SERIAL_GARMIN=m CONFIG_USB_SERIAL_IPW=m @@ -1737,6 +6191,7 @@ CONFIG_USB_SERIAL_KEYSPAN=m CONFIG_USB_SERIAL_KLSI=m CONFIG_USB_SERIAL_KOBIL_SCT=m CONFIG_USB_SERIAL_MCT_U232=m +# CONFIG_USB_SERIAL_METRO is not set CONFIG_USB_SERIAL_MOS7720=m CONFIG_USB_SERIAL_MOS7715_PARPORT=y CONFIG_USB_SERIAL_MOS7840=m @@ -1753,14 +6208,21 @@ CONFIG_USB_SERIAL_SIERRAWIRELESS=m CONFIG_USB_SERIAL_SYMBOL=m CONFIG_USB_SERIAL_TI=m CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_WWAN=m CONFIG_USB_SERIAL_OPTION=m CONFIG_USB_SERIAL_OMNINET=m CONFIG_USB_SERIAL_OPTICON=m CONFIG_USB_SERIAL_XSENS_MT=m +# CONFIG_USB_SERIAL_WISHBONE is not set CONFIG_USB_SERIAL_SSU100=m CONFIG_USB_SERIAL_QT2=m CONFIG_USB_SERIAL_UPD78F0730=m +# CONFIG_USB_SERIAL_XR is not set CONFIG_USB_SERIAL_DEBUG=m + +# +# USB Miscellaneous drivers +# CONFIG_USB_USS720=m CONFIG_USB_EMI62=m CONFIG_USB_EMI26=m @@ -1768,138 +6230,541 @@ CONFIG_USB_ADUTUX=m CONFIG_USB_SEVSEG=m CONFIG_USB_LEGOTOWER=m CONFIG_USB_LCD=m +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set CONFIG_USB_IDMOUSE=m CONFIG_USB_APPLEDISPLAY=m +# CONFIG_APPLE_MFI_FASTCHARGE is not set CONFIG_USB_SISUSBVGA=m CONFIG_USB_LD=m +# CONFIG_USB_TRANCEVIBRATOR is not set CONFIG_USB_IOWARRIOR=m +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set CONFIG_USB_ISIGHTFW=m +# CONFIG_USB_YUREX is not set +CONFIG_USB_EZUSB_FX2=m +# CONFIG_USB_HUB_USB251XB is not set CONFIG_USB_HSIC_USB3503=m +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +# CONFIG_USB_CHAOSKEY is not set +# CONFIG_USB_ONBOARD_HUB is not set CONFIG_USB_ATM=m CONFIG_USB_SPEEDTOUCH=m CONFIG_USB_CXACRU=m CONFIG_USB_UEAGLEATM=m CONFIG_USB_XUSBATM=m + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +# end of USB Physical Layer drivers + CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=2 +CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2 + +# +# USB Peripheral Controller +# +# CONFIG_USB_GR_UDC is not set +# CONFIG_USB_R8A66597 is not set +# CONFIG_USB_PXA27X is not set +# CONFIG_USB_MV_UDC is not set +# CONFIG_USB_MV_U3D is not set +# CONFIG_USB_SNP_UDC_PLAT is not set +# CONFIG_USB_M66592 is not set +# CONFIG_USB_BDC_UDC is not set +# CONFIG_USB_AMD5536UDC is not set +# CONFIG_USB_NET2272 is not set +# CONFIG_USB_NET2280 is not set +# CONFIG_USB_GOKU is not set +# CONFIG_USB_EG20T is not set +# CONFIG_USB_GADGET_XILINX is not set +# CONFIG_USB_MAX3420_UDC is not set +# CONFIG_USB_CDNS2_UDC is not set +# CONFIG_USB_DUMMY_HCD is not set +# end of USB Peripheral Controller + +# CONFIG_USB_CONFIGFS is not set + +# +# USB Gadget precomposed configurations +# +# CONFIG_USB_ZERO is not set +# CONFIG_USB_AUDIO is not set +# CONFIG_USB_ETH is not set +# CONFIG_USB_G_NCM is not set +# CONFIG_USB_GADGETFS is not set +# CONFIG_USB_FUNCTIONFS is not set +# CONFIG_USB_MASS_STORAGE is not set +# CONFIG_USB_GADGET_TARGET is not set +# CONFIG_USB_G_SERIAL is not set +# CONFIG_USB_MIDI_GADGET is not set +# CONFIG_USB_G_PRINTER is not set +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_NOKIA is not set +# CONFIG_USB_G_ACM_MS is not set +# CONFIG_USB_G_MULTI is not set +# CONFIG_USB_G_HID is not set +# CONFIG_USB_G_DBGP is not set +# CONFIG_USB_G_WEBCAM is not set +# CONFIG_USB_RAW_GADGET is not set +# end of USB Gadget precomposed configurations + CONFIG_TYPEC=m CONFIG_TYPEC_TCPM=m CONFIG_TYPEC_TCPCI=m CONFIG_TYPEC_RT1711H=m +# CONFIG_TYPEC_TCPCI_MAXIM is not set CONFIG_TYPEC_FUSB302=m CONFIG_TYPEC_UCSI=m +# CONFIG_UCSI_CCG is not set CONFIG_UCSI_ACPI=m +# CONFIG_UCSI_STM32G0 is not set CONFIG_TYPEC_TPS6598X=m +# CONFIG_TYPEC_ANX7411 is not set +# CONFIG_TYPEC_RT1719 is not set +# CONFIG_TYPEC_HD3SS3220 is not set +# CONFIG_TYPEC_STUSB160X is not set +# CONFIG_TYPEC_WUSB3801 is not set + +# +# USB Type-C Multiplexer/DeMultiplexer Switch support +# +# CONFIG_TYPEC_MUX_FSA4480 is not set +# CONFIG_TYPEC_MUX_GPIO_SBU is not set CONFIG_TYPEC_MUX_PI3USB30532=m +# CONFIG_TYPEC_MUX_NB7VPQ904M is not set +# end of USB Type-C Multiplexer/DeMultiplexer Switch support + +# +# USB Type-C Alternate Mode drivers +# CONFIG_TYPEC_DP_ALTMODE=m +# CONFIG_TYPEC_NVIDIA_ALTMODE is not set +# end of USB Type-C Alternate Mode drivers + +CONFIG_USB_ROLE_SWITCH=y CONFIG_MMC=m +CONFIG_PWRSEQ_EMMC=m +# CONFIG_PWRSEQ_SD8787 is not set +CONFIG_PWRSEQ_SIMPLE=m +CONFIG_MMC_BLOCK=m +CONFIG_MMC_BLOCK_MINORS=8 CONFIG_SDIO_UART=m +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_IO_ACCESSORS=y CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_RICOH_MMC=y CONFIG_MMC_SDHCI_ACPI=m CONFIG_MMC_SDHCI_PLTFM=m +# CONFIG_MMC_SDHCI_OF_ARASAN is not set +# CONFIG_MMC_SDHCI_OF_AT91 is not set +# CONFIG_MMC_SDHCI_OF_DWCMSHC is not set +# CONFIG_MMC_SDHCI_CADENCE is not set +# CONFIG_MMC_SDHCI_F_SDH30 is not set +# CONFIG_MMC_SDHCI_MILBEAUT is not set CONFIG_MMC_TIFM_SD=m +# CONFIG_MMC_SPI is not set CONFIG_MMC_CB710=m CONFIG_MMC_VIA_SDMMC=m CONFIG_MMC_VUB300=m CONFIG_MMC_USHC=m +# CONFIG_MMC_USDHI6ROL0 is not set CONFIG_MMC_REALTEK_PCI=m CONFIG_MMC_REALTEK_USB=m +CONFIG_MMC_CQHCI=m +# CONFIG_MMC_HSQ is not set +# CONFIG_MMC_TOSHIBA_PCI is not set +# CONFIG_MMC_MTK is not set CONFIG_MMC_SDHCI_XENON=m +# CONFIG_MMC_SDHCI_OMAP is not set +# CONFIG_MMC_SDHCI_AM654 is not set +# CONFIG_SCSI_UFSHCD is not set CONFIG_MEMSTICK=m +# CONFIG_MEMSTICK_DEBUG is not set + +# +# MemoryStick drivers +# +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set CONFIG_MSPRO_BLOCK=m +# CONFIG_MS_BLOCK is not set + +# +# MemoryStick Host Controller Drivers +# CONFIG_MEMSTICK_TIFM_MS=m CONFIG_MEMSTICK_JMICRON_38X=m CONFIG_MEMSTICK_R592=m CONFIG_MEMSTICK_REALTEK_PCI=m CONFIG_MEMSTICK_REALTEK_USB=m +CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y +# CONFIG_LEDS_CLASS_FLASH is not set +# CONFIG_LEDS_CLASS_MULTICOLOR is not set +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_AN30259A is not set +# CONFIG_LEDS_AW200XX is not set +# CONFIG_LEDS_AW2013 is not set +# CONFIG_LEDS_BCM6328 is not set +# CONFIG_LEDS_BCM6358 is not set +# CONFIG_LEDS_CR0014114 is not set +# CONFIG_LEDS_EL15203000 is not set CONFIG_LEDS_LM3530=m +# CONFIG_LEDS_LM3532 is not set +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_LM3692X is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set CONFIG_LEDS_LP3944=m +# CONFIG_LEDS_LP3952 is not set +# CONFIG_LEDS_LP50XX is not set +# CONFIG_LEDS_LP55XX_COMMON is not set +# CONFIG_LEDS_LP8860 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_PCA995X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_PWM is not set +# CONFIG_LEDS_BD2606MVV is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_LT3593 is not set +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set +# CONFIG_LEDS_IS31FL319X is not set +# CONFIG_LEDS_IS31FL32XX is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# CONFIG_LEDS_BLINKM=m +# CONFIG_LEDS_SYSCON is not set +# CONFIG_LEDS_MLXREG is not set +# CONFIG_LEDS_USER is not set +# CONFIG_LEDS_SPI_BYTE is not set +# CONFIG_LEDS_LM3697 is not set + +# +# Flash and Torch LED drivers +# + +# +# RGB LED drivers +# + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_TIMER=m CONFIG_LEDS_TRIGGER_ONESHOT=m CONFIG_LEDS_TRIGGER_DISK=y +# CONFIG_LEDS_TRIGGER_MTD is not set CONFIG_LEDS_TRIGGER_HEARTBEAT=m CONFIG_LEDS_TRIGGER_BACKLIGHT=m +# CONFIG_LEDS_TRIGGER_CPU is not set +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# CONFIG_LEDS_TRIGGER_TRANSIENT=m CONFIG_LEDS_TRIGGER_CAMERA=m +# CONFIG_LEDS_TRIGGER_PANIC is not set +# CONFIG_LEDS_TRIGGER_NETDEV is not set +# CONFIG_LEDS_TRIGGER_PATTERN is not set CONFIG_LEDS_TRIGGER_AUDIO=y +# CONFIG_LEDS_TRIGGER_TTY is not set + +# +# Simple LED drivers +# +# CONFIG_ACCESSIBILITY is not set CONFIG_INFINIBAND=m CONFIG_INFINIBAND_USER_MAD=m CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +CONFIG_INFINIBAND_VIRT_DMA=y CONFIG_INFINIBAND_BNXT_RE=m CONFIG_INFINIBAND_CXGB4=m +# CONFIG_INFINIBAND_EFA is not set +# CONFIG_INFINIBAND_ERDMA is not set +# CONFIG_INFINIBAND_IRDMA is not set CONFIG_MLX4_INFINIBAND=m CONFIG_MLX5_INFINIBAND=m +# CONFIG_INFINIBAND_MTHCA is not set +# CONFIG_INFINIBAND_OCRDMA is not set CONFIG_INFINIBAND_VMWARE_PVRDMA=m CONFIG_RDMA_RXE=m +# CONFIG_RDMA_SIW is not set CONFIG_INFINIBAND_IPOIB=m CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set CONFIG_INFINIBAND_SRP=m CONFIG_INFINIBAND_SRPT=m CONFIG_INFINIBAND_ISER=m CONFIG_INFINIBAND_ISERT=m +# CONFIG_INFINIBAND_RTRS_CLIENT is not set +# CONFIG_INFINIBAND_RTRS_SERVER is not set +CONFIG_RTC_LIB=y CONFIG_RTC_CLASS=y -# CONFIG_RTC_SYSTOHC is not set +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABEOZ9 is not set +# CONFIG_RTC_DRV_ABX80X is not set CONFIG_RTC_DRV_DS1307=m +# CONFIG_RTC_DRV_DS1307_CENTURY is not set CONFIG_RTC_DRV_DS1374=m +# CONFIG_RTC_DRV_DS1374_WDT is not set CONFIG_RTC_DRV_DS1672=m +# CONFIG_RTC_DRV_HYM8563 is not set CONFIG_RTC_DRV_MAX6900=m +# CONFIG_RTC_DRV_NCT3018Y is not set CONFIG_RTC_DRV_RS5C372=m CONFIG_RTC_DRV_ISL1208=m CONFIG_RTC_DRV_ISL12022=m +# CONFIG_RTC_DRV_ISL12026 is not set CONFIG_RTC_DRV_X1205=m CONFIG_RTC_DRV_PCF8523=m +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF85363 is not set CONFIG_RTC_DRV_PCF8563=m CONFIG_RTC_DRV_PCF8583=m CONFIG_RTC_DRV_M41T80=m CONFIG_RTC_DRV_M41T80_WDT=y CONFIG_RTC_DRV_BQ32K=m +# CONFIG_RTC_DRV_S35390A is not set CONFIG_RTC_DRV_FM3130=m +# CONFIG_RTC_DRV_RX8010 is not set CONFIG_RTC_DRV_RX8581=m CONFIG_RTC_DRV_RX8025=m CONFIG_RTC_DRV_EM3027=m +# CONFIG_RTC_DRV_RV3028 is not set +# CONFIG_RTC_DRV_RV3032 is not set CONFIG_RTC_DRV_RV8803=m +# CONFIG_RTC_DRV_SD3078 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1302 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1343 is not set +# CONFIG_RTC_DRV_DS1347 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6916 is not set +# CONFIG_RTC_DRV_R9701 is not set CONFIG_RTC_DRV_RX4581=m +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_PCF2123 is not set +# CONFIG_RTC_DRV_MCP795 is not set +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_DS3232_HWMON=y +# CONFIG_RTC_DRV_PCF2127 is not set CONFIG_RTC_DRV_RV3029C2=m # CONFIG_RTC_DRV_RV3029_HWMON is not set +# CONFIG_RTC_DRV_RX6110 is not set + +# +# Platform RTC drivers +# CONFIG_RTC_DRV_DS1286=m CONFIG_RTC_DRV_DS1511=m CONFIG_RTC_DRV_DS1553=m +# CONFIG_RTC_DRV_DS1685_FAMILY is not set CONFIG_RTC_DRV_DS1742=m CONFIG_RTC_DRV_DS2404=m CONFIG_RTC_DRV_EFI=m CONFIG_RTC_DRV_STK17TA8=m +# CONFIG_RTC_DRV_M48T86 is not set CONFIG_RTC_DRV_M48T35=m CONFIG_RTC_DRV_M48T59=m CONFIG_RTC_DRV_MSM6242=m CONFIG_RTC_DRV_RP5C01=m +# CONFIG_RTC_DRV_ZYNQMP is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_CADENCE is not set +# CONFIG_RTC_DRV_FTRTC010 is not set CONFIG_RTC_DRV_LOONGSON=y +# CONFIG_RTC_DRV_R7301 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set +# CONFIG_RTC_DRV_GOLDFISH is not set CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_ACPI=y +CONFIG_DMA_OF=y +# CONFIG_ALTERA_MSGDMA is not set +# CONFIG_DW_AXI_DMAC is not set +# CONFIG_FSL_EDMA is not set +# CONFIG_INTEL_IDMA64 is not set +# CONFIG_PLX_DMA is not set +# CONFIG_XILINX_DMA is not set +# CONFIG_XILINX_XDMA is not set +# CONFIG_XILINX_ZYNQMP_DPDMA is not set +# CONFIG_QCOM_HIDMA_MGMT is not set +# CONFIG_QCOM_HIDMA is not set +CONFIG_DW_DMAC_CORE=m CONFIG_DW_DMAC=m +# CONFIG_DW_DMAC_PCI is not set +# CONFIG_DW_EDMA is not set +# CONFIG_SF_PDMA is not set + +# +# DMA Clients +# CONFIG_ASYNC_TX_DMA=y +# CONFIG_DMATEST is not set + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +# CONFIG_UDMABUF is not set +# CONFIG_DMABUF_MOVE_NOTIFY is not set +# CONFIG_DMABUF_DEBUG is not set +# CONFIG_DMABUF_SELFTESTS is not set +# CONFIG_DMABUF_HEAPS is not set +# CONFIG_DMABUF_SYSFS_STATS is not set +# end of DMABUF options + +CONFIG_UIO=m CONFIG_UIO_CIF=m CONFIG_UIO_PDRV_GENIRQ=m CONFIG_UIO_DMEM_GENIRQ=m CONFIG_UIO_AEC=m CONFIG_UIO_SERCOS3=m CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set CONFIG_VFIO=m +CONFIG_VFIO_GROUP=y +CONFIG_VFIO_CONTAINER=y CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_VIRQFD=y + +# +# VFIO support for PCI devices +# +CONFIG_VFIO_PCI_CORE=m +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y CONFIG_VFIO_PCI=m +# CONFIG_MLX5_VFIO_PCI is not set +# end of VFIO support for PCI devices + +CONFIG_IRQ_BYPASS_MANAGER=m +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO_ANCHOR=y +CONFIG_VIRTIO=y +CONFIG_VIRTIO_PCI_LIB=y +CONFIG_VIRTIO_PCI_LIB_LEGACY=y +CONFIG_VIRTIO_MENU=y CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y CONFIG_VIRTIO_BALLOON=m CONFIG_VIRTIO_INPUT=m CONFIG_VIRTIO_MMIO=m CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y +CONFIG_VIRTIO_DMA_SHARED_BUFFER=m +# CONFIG_VDPA is not set +CONFIG_VHOST_IOTLB=m +CONFIG_VHOST_TASK=y +CONFIG_VHOST=m +CONFIG_VHOST_MENU=y CONFIG_VHOST_NET=m CONFIG_VHOST_SCSI=m CONFIG_VHOST_VSOCK=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# Microsoft Hyper-V guest support +# +# end of Microsoft Hyper-V guest support + +# CONFIG_GREYBUS is not set CONFIG_COMEDI=m +# CONFIG_COMEDI_DEBUG is not set +CONFIG_COMEDI_DEFAULT_BUF_SIZE_KB=2048 +CONFIG_COMEDI_DEFAULT_BUF_MAXSIZE_KB=20480 +# CONFIG_COMEDI_MISC_DRIVERS is not set +# CONFIG_COMEDI_ISA_DRIVERS is not set CONFIG_COMEDI_PCI_DRIVERS=m CONFIG_COMEDI_8255_PCI=m +# CONFIG_COMEDI_ADDI_APCI_1032 is not set +# CONFIG_COMEDI_ADDI_APCI_1500 is not set +# CONFIG_COMEDI_ADDI_APCI_1516 is not set +# CONFIG_COMEDI_ADDI_APCI_1564 is not set +# CONFIG_COMEDI_ADDI_APCI_16XX is not set +# CONFIG_COMEDI_ADDI_APCI_2032 is not set +# CONFIG_COMEDI_ADDI_APCI_2200 is not set +# CONFIG_COMEDI_ADDI_APCI_3120 is not set +# CONFIG_COMEDI_ADDI_APCI_3501 is not set +# CONFIG_COMEDI_ADDI_APCI_3XXX is not set CONFIG_COMEDI_ADL_PCI6208=m CONFIG_COMEDI_ADL_PCI7X3X=m CONFIG_COMEDI_ADL_PCI8164=m @@ -1911,39 +6776,916 @@ CONFIG_COMEDI_ADV_PCI1723=m CONFIG_COMEDI_ADV_PCI1724=m CONFIG_COMEDI_ADV_PCI1760=m CONFIG_COMEDI_ADV_PCI_DIO=m +# CONFIG_COMEDI_AMPLC_DIO200_PCI is not set +# CONFIG_COMEDI_AMPLC_PC236_PCI is not set +# CONFIG_COMEDI_AMPLC_PC263_PCI is not set +# CONFIG_COMEDI_AMPLC_PCI224 is not set +# CONFIG_COMEDI_AMPLC_PCI230 is not set +# CONFIG_COMEDI_CONTEC_PCI_DIO is not set +# CONFIG_COMEDI_DAS08_PCI is not set +# CONFIG_COMEDI_DT3000 is not set +# CONFIG_COMEDI_DYNA_PCI10XX is not set +# CONFIG_COMEDI_GSC_HPDI is not set +# CONFIG_COMEDI_MF6X4 is not set +# CONFIG_COMEDI_ICP_MULTI is not set +# CONFIG_COMEDI_DAQBOARD2000 is not set +# CONFIG_COMEDI_JR3_PCI is not set +# CONFIG_COMEDI_KE_COUNTER is not set +# CONFIG_COMEDI_CB_PCIDAS64 is not set +# CONFIG_COMEDI_CB_PCIDAS is not set +# CONFIG_COMEDI_CB_PCIDDA is not set +# CONFIG_COMEDI_CB_PCIMDAS is not set +# CONFIG_COMEDI_CB_PCIMDDA is not set +# CONFIG_COMEDI_ME4000 is not set +# CONFIG_COMEDI_ME_DAQ is not set +# CONFIG_COMEDI_NI_6527 is not set +# CONFIG_COMEDI_NI_65XX is not set +# CONFIG_COMEDI_NI_660X is not set +# CONFIG_COMEDI_NI_670X is not set CONFIG_COMEDI_NI_LABPC_PCI=m CONFIG_COMEDI_NI_PCIDIO=m CONFIG_COMEDI_NI_PCIMIO=m +# CONFIG_COMEDI_RTD520 is not set +# CONFIG_COMEDI_S626 is not set +CONFIG_COMEDI_MITE=m +CONFIG_COMEDI_NI_TIOCMD=m +# CONFIG_COMEDI_USB_DRIVERS is not set +CONFIG_COMEDI_8254=m +CONFIG_COMEDI_8255=m +# CONFIG_COMEDI_8255_SA is not set +# CONFIG_COMEDI_KCOMEDILIB is not set +CONFIG_COMEDI_NI_LABPC=m +CONFIG_COMEDI_NI_TIO=m +CONFIG_COMEDI_NI_ROUTING=m +# CONFIG_COMEDI_TESTS is not set CONFIG_STAGING=y +# CONFIG_PRISM2_USB is not set +# CONFIG_RTL8192U is not set +# CONFIG_RTLLIB is not set +# CONFIG_RTL8723BS is not set +# CONFIG_R8712U is not set +# CONFIG_RTS5208 is not set +# CONFIG_VT6655 is not set +# CONFIG_VT6656 is not set + +# +# IIO staging drivers +# + +# +# Accelerometers +# +# CONFIG_ADIS16203 is not set +# CONFIG_ADIS16240 is not set +# end of Accelerometers + +# +# Analog to digital converters +# +# CONFIG_AD7816 is not set +# end of Analog to digital converters + +# +# Analog digital bi-direction converters +# +# CONFIG_ADT7316 is not set +# end of Analog digital bi-direction converters + +# +# Direct Digital Synthesis +# +# CONFIG_AD9832 is not set +# CONFIG_AD9834 is not set +# end of Direct Digital Synthesis + +# +# Network Analyzer, Impedance Converters +# +# CONFIG_AD5933 is not set +# end of Network Analyzer, Impedance Converters + +# +# Resolver to digital converters +# +# CONFIG_AD2S1210 is not set +# end of Resolver to digital converters +# end of IIO staging drivers + +# CONFIG_FB_SM750 is not set +# CONFIG_STAGING_MEDIA is not set +# CONFIG_STAGING_BOARD is not set +# CONFIG_LTE_GDM724X is not set +# CONFIG_FB_TFT is not set +# CONFIG_KS7010 is not set +# CONFIG_PI433 is not set +# CONFIG_XIL_AXIS_FIFO is not set +# CONFIG_FIELDBUS_DEV is not set +# CONFIG_QLGE is not set +# CONFIG_VME_BUS is not set +CONFIG_LOONGARCH_PLATFORM_DEVICES=y +CONFIG_LOONGSON_LAPTOP=y +# CONFIG_GOLDFISH is not set +CONFIG_HAVE_CLK=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y +# CONFIG_LMK04832 is not set +# CONFIG_COMMON_CLK_MAX9485 is not set +# CONFIG_COMMON_CLK_SI5341 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI514 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_SI570 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CDCE925 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_COMMON_CLK_AXI_CLKGEN is not set CONFIG_COMMON_CLK_LOONGSON2=y +# CONFIG_COMMON_CLK_PWM is not set +# CONFIG_COMMON_CLK_RS9_PCIE is not set +# CONFIG_COMMON_CLK_SI521XX is not set +# CONFIG_COMMON_CLK_VC3 is not set +# CONFIG_COMMON_CLK_VC5 is not set +# CONFIG_COMMON_CLK_VC7 is not set +# CONFIG_COMMON_CLK_FIXED_MMIO is not set +# CONFIG_XILINX_VCU is not set +# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set +# CONFIG_HWSPINLOCK is not set + +# +# Clock Source drivers +# +# end of Clock Source drivers + +# CONFIG_MAILBOX is not set +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +# end of Generic IOMMU Pagetable Support + +# CONFIG_IOMMU_DEBUGFS is not set +CONFIG_IOMMU_DEFAULT_DMA_STRICT=y +# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set +CONFIG_OF_IOMMU=y +# CONFIG_IOMMUFD is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Broadcom SoC drivers +# +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# end of NXP/Freescale QorIQ SoC drivers + +# +# fujitsu SoC drivers +# +# end of fujitsu SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Enable LiteX SoC Builder specific drivers +# +# CONFIG_LITEX_SOC_CONTROLLER is not set +# end of Enable LiteX SoC Builder specific drivers + CONFIG_LOONGSON2_GUTS=y CONFIG_LOONGSON2_PM=y +# CONFIG_WPCM450_SOC is not set + +# +# Qualcomm SoC drivers +# +# end of Qualcomm SoC drivers + +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + CONFIG_PM_DEVFREQ=y + +# +# DEVFREQ Governors +# CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y CONFIG_DEVFREQ_GOV_PERFORMANCE=y CONFIG_DEVFREQ_GOV_POWERSAVE=y CONFIG_DEVFREQ_GOV_USERSPACE=y +# CONFIG_DEVFREQ_GOV_PASSIVE is not set + +# +# DEVFREQ Drivers +# +# CONFIG_PM_DEVFREQ_EVENT is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set CONFIG_IIO=m +CONFIG_IIO_BUFFER=y +# CONFIG_IIO_BUFFER_CB is not set +# CONFIG_IIO_BUFFER_DMA is not set +# CONFIG_IIO_BUFFER_DMAENGINE is not set +# CONFIG_IIO_BUFFER_HW_CONSUMER is not set +CONFIG_IIO_KFIFO_BUF=m +CONFIG_IIO_TRIGGERED_BUFFER=m +# CONFIG_IIO_CONFIGFS is not set +CONFIG_IIO_TRIGGER=y +CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 +# CONFIG_IIO_SW_DEVICE is not set +# CONFIG_IIO_SW_TRIGGER is not set +# CONFIG_IIO_TRIGGERED_EVENT is not set + +# +# Accelerometers +# +# CONFIG_ADIS16201 is not set +# CONFIG_ADIS16209 is not set +# CONFIG_ADXL313_I2C is not set +# CONFIG_ADXL313_SPI is not set +# CONFIG_ADXL345_I2C is not set +# CONFIG_ADXL345_SPI is not set +# CONFIG_ADXL355_I2C is not set +# CONFIG_ADXL355_SPI is not set +# CONFIG_ADXL367_SPI is not set +# CONFIG_ADXL367_I2C is not set +# CONFIG_ADXL372_SPI is not set +# CONFIG_ADXL372_I2C is not set +# CONFIG_BMA180 is not set +# CONFIG_BMA220 is not set +# CONFIG_BMA400 is not set +# CONFIG_BMC150_ACCEL is not set +# CONFIG_BMI088_ACCEL is not set +# CONFIG_DA280 is not set +# CONFIG_DA311 is not set +# CONFIG_DMARD06 is not set +# CONFIG_DMARD09 is not set +# CONFIG_DMARD10 is not set +# CONFIG_FXLS8962AF_I2C is not set +# CONFIG_FXLS8962AF_SPI is not set CONFIG_HID_SENSOR_ACCEL_3D=m +# CONFIG_IIO_ST_ACCEL_3AXIS is not set +# CONFIG_IIO_KX022A_SPI is not set +# CONFIG_IIO_KX022A_I2C is not set +# CONFIG_KXSD9 is not set +# CONFIG_KXCJK1013 is not set +# CONFIG_MC3230 is not set +# CONFIG_MMA7455_I2C is not set +# CONFIG_MMA7455_SPI is not set +# CONFIG_MMA7660 is not set +# CONFIG_MMA8452 is not set +# CONFIG_MMA9551 is not set +# CONFIG_MMA9553 is not set +# CONFIG_MSA311 is not set +# CONFIG_MXC4005 is not set +# CONFIG_MXC6255 is not set +# CONFIG_SCA3000 is not set +# CONFIG_SCA3300 is not set +# CONFIG_STK8312 is not set +# CONFIG_STK8BA50 is not set +# end of Accelerometers + +# +# Analog to digital converters +# +# CONFIG_AD4130 is not set +# CONFIG_AD7091R5 is not set +# CONFIG_AD7124 is not set +# CONFIG_AD7192 is not set +# CONFIG_AD7266 is not set +# CONFIG_AD7280 is not set +# CONFIG_AD7291 is not set +# CONFIG_AD7292 is not set +# CONFIG_AD7298 is not set +# CONFIG_AD7476 is not set +# CONFIG_AD7606_IFACE_PARALLEL is not set +# CONFIG_AD7606_IFACE_SPI is not set +# CONFIG_AD7766 is not set +# CONFIG_AD7768_1 is not set +# CONFIG_AD7780 is not set +# CONFIG_AD7791 is not set +# CONFIG_AD7793 is not set +# CONFIG_AD7887 is not set +# CONFIG_AD7923 is not set +# CONFIG_AD7949 is not set +# CONFIG_AD799X is not set +# CONFIG_ADI_AXI_ADC is not set +# CONFIG_ENVELOPE_DETECTOR is not set +# CONFIG_HI8435 is not set +# CONFIG_HX711 is not set +# CONFIG_INA2XX_ADC is not set +# CONFIG_LTC2471 is not set +# CONFIG_LTC2485 is not set +# CONFIG_LTC2496 is not set +# CONFIG_LTC2497 is not set +# CONFIG_MAX1027 is not set +# CONFIG_MAX11100 is not set +# CONFIG_MAX1118 is not set +# CONFIG_MAX11205 is not set +# CONFIG_MAX11410 is not set +# CONFIG_MAX1241 is not set +# CONFIG_MAX1363 is not set +# CONFIG_MAX9611 is not set +# CONFIG_MCP320X is not set +# CONFIG_MCP3422 is not set +# CONFIG_MCP3911 is not set +# CONFIG_NAU7802 is not set +# CONFIG_RICHTEK_RTQ6056 is not set +# CONFIG_SD_ADC_MODULATOR is not set +# CONFIG_TI_ADC081C is not set +# CONFIG_TI_ADC0832 is not set +# CONFIG_TI_ADC084S021 is not set +# CONFIG_TI_ADC12138 is not set +# CONFIG_TI_ADC108S102 is not set +# CONFIG_TI_ADC128S052 is not set +# CONFIG_TI_ADC161S626 is not set +# CONFIG_TI_ADS1015 is not set +# CONFIG_TI_ADS7924 is not set +# CONFIG_TI_ADS1100 is not set +# CONFIG_TI_ADS7950 is not set +# CONFIG_TI_ADS8344 is not set +# CONFIG_TI_ADS8688 is not set +# CONFIG_TI_ADS124S08 is not set +# CONFIG_TI_ADS131E08 is not set +# CONFIG_TI_LMP92064 is not set +# CONFIG_TI_TLC4541 is not set +# CONFIG_TI_TSC2046 is not set +# CONFIG_VF610_ADC is not set +# CONFIG_VIPERBOARD_ADC is not set +# CONFIG_XILINX_XADC is not set +# end of Analog to digital converters + +# +# Analog to digital and digital to analog converters +# +# CONFIG_AD74115 is not set +# CONFIG_AD74413R is not set +# end of Analog to digital and digital to analog converters + +# +# Analog Front Ends +# +# CONFIG_IIO_RESCALE is not set +# end of Analog Front Ends + +# +# Amplifiers +# +# CONFIG_AD8366 is not set +# CONFIG_ADA4250 is not set +# CONFIG_HMC425 is not set +# end of Amplifiers + +# +# Capacitance to digital converters +# +# CONFIG_AD7150 is not set +# CONFIG_AD7746 is not set +# end of Capacitance to digital converters + +# +# Chemical Sensors +# +# CONFIG_ATLAS_PH_SENSOR is not set +# CONFIG_ATLAS_EZO_SENSOR is not set +# CONFIG_BME680 is not set +# CONFIG_CCS811 is not set +# CONFIG_IAQCORE is not set +# CONFIG_SCD30_CORE is not set +# CONFIG_SCD4X is not set +# CONFIG_SENSIRION_SGP30 is not set +# CONFIG_SENSIRION_SGP40 is not set +# CONFIG_SPS30_I2C is not set +# CONFIG_SENSEAIR_SUNRISE_CO2 is not set +# CONFIG_VZ89X is not set +# end of Chemical Sensors + +# +# Hid Sensor IIO Common +# +CONFIG_HID_SENSOR_IIO_COMMON=m +CONFIG_HID_SENSOR_IIO_TRIGGER=m +# end of Hid Sensor IIO Common + +# +# IIO SCMI Sensors +# +# end of IIO SCMI Sensors + +# +# SSP Sensor Common +# +# CONFIG_IIO_SSP_SENSORHUB is not set +# end of SSP Sensor Common + +# +# Digital to analog converters +# +# CONFIG_AD3552R is not set +# CONFIG_AD5064 is not set +# CONFIG_AD5360 is not set +# CONFIG_AD5380 is not set +# CONFIG_AD5421 is not set +# CONFIG_AD5446 is not set +# CONFIG_AD5449 is not set +# CONFIG_AD5592R is not set +# CONFIG_AD5593R is not set +# CONFIG_AD5504 is not set +# CONFIG_AD5624R_SPI is not set +# CONFIG_LTC2688 is not set +# CONFIG_AD5686_SPI is not set +# CONFIG_AD5696_I2C is not set +# CONFIG_AD5755 is not set +# CONFIG_AD5758 is not set +# CONFIG_AD5761 is not set +# CONFIG_AD5764 is not set +# CONFIG_AD5766 is not set +# CONFIG_AD5770R is not set +# CONFIG_AD5791 is not set +# CONFIG_AD7293 is not set +# CONFIG_AD7303 is not set +# CONFIG_AD8801 is not set +# CONFIG_DPOT_DAC is not set +# CONFIG_DS4424 is not set +# CONFIG_LTC1660 is not set +# CONFIG_LTC2632 is not set +# CONFIG_M62332 is not set +# CONFIG_MAX517 is not set +# CONFIG_MAX5522 is not set +# CONFIG_MAX5821 is not set +# CONFIG_MCP4725 is not set +# CONFIG_MCP4728 is not set +# CONFIG_MCP4922 is not set +# CONFIG_TI_DAC082S085 is not set +# CONFIG_TI_DAC5571 is not set +# CONFIG_TI_DAC7311 is not set +# CONFIG_TI_DAC7612 is not set +# CONFIG_VF610_DAC is not set +# end of Digital to analog converters + +# +# IIO dummy driver +# +# end of IIO dummy driver + +# +# Filters +# +# CONFIG_ADMV8818 is not set +# end of Filters + +# +# Frequency Synthesizers DDS/PLL +# + +# +# Clock Generator/Distribution +# +# CONFIG_AD9523 is not set +# end of Clock Generator/Distribution + +# +# Phase-Locked Loop (PLL) frequency synthesizers +# +# CONFIG_ADF4350 is not set +# CONFIG_ADF4371 is not set +# CONFIG_ADF4377 is not set +# CONFIG_ADMV1013 is not set +# CONFIG_ADMV1014 is not set +# CONFIG_ADMV4420 is not set +# CONFIG_ADRF6780 is not set +# end of Phase-Locked Loop (PLL) frequency synthesizers +# end of Frequency Synthesizers DDS/PLL + +# +# Digital gyroscope sensors +# +# CONFIG_ADIS16080 is not set +# CONFIG_ADIS16130 is not set +# CONFIG_ADIS16136 is not set +# CONFIG_ADIS16260 is not set +# CONFIG_ADXRS290 is not set +# CONFIG_ADXRS450 is not set +# CONFIG_BMG160 is not set +# CONFIG_FXAS21002C is not set CONFIG_HID_SENSOR_GYRO_3D=m +# CONFIG_MPU3050_I2C is not set +# CONFIG_IIO_ST_GYRO_3AXIS is not set +# CONFIG_ITG3200 is not set +# end of Digital gyroscope sensors + +# +# Health Sensors +# + +# +# Heart Rate Monitors +# +# CONFIG_AFE4403 is not set +# CONFIG_AFE4404 is not set +# CONFIG_MAX30100 is not set +# CONFIG_MAX30102 is not set +# end of Heart Rate Monitors +# end of Health Sensors + +# +# Humidity sensors +# +# CONFIG_AM2315 is not set +# CONFIG_DHT11 is not set +# CONFIG_HDC100X is not set +# CONFIG_HDC2010 is not set CONFIG_HID_SENSOR_HUMIDITY=m +# CONFIG_HTS221 is not set +# CONFIG_HTU21 is not set +# CONFIG_SI7005 is not set +# CONFIG_SI7020 is not set +# end of Humidity sensors + +# +# Inertial measurement units +# +# CONFIG_ADIS16400 is not set +# CONFIG_ADIS16460 is not set +# CONFIG_ADIS16475 is not set +# CONFIG_ADIS16480 is not set +# CONFIG_BMI160_I2C is not set +# CONFIG_BMI160_SPI is not set +# CONFIG_BOSCH_BNO055_I2C is not set +# CONFIG_FXOS8700_I2C is not set +# CONFIG_FXOS8700_SPI is not set +# CONFIG_KMX61 is not set +# CONFIG_INV_ICM42600_I2C is not set +# CONFIG_INV_ICM42600_SPI is not set +# CONFIG_INV_MPU6050_I2C is not set +# CONFIG_INV_MPU6050_SPI is not set +# CONFIG_IIO_ST_LSM6DSX is not set +# CONFIG_IIO_ST_LSM9DS0 is not set +# end of Inertial measurement units + +# +# Light sensors +# +# CONFIG_ACPI_ALS is not set +# CONFIG_ADJD_S311 is not set +# CONFIG_ADUX1020 is not set +# CONFIG_AL3010 is not set +# CONFIG_AL3320A is not set +# CONFIG_APDS9300 is not set +# CONFIG_APDS9960 is not set +# CONFIG_AS73211 is not set +# CONFIG_BH1750 is not set +# CONFIG_BH1780 is not set +# CONFIG_CM32181 is not set +# CONFIG_CM3232 is not set +# CONFIG_CM3323 is not set +# CONFIG_CM3605 is not set +# CONFIG_CM36651 is not set +# CONFIG_GP2AP002 is not set +# CONFIG_GP2AP020A00F is not set +# CONFIG_SENSORS_ISL29018 is not set +# CONFIG_SENSORS_ISL29028 is not set +# CONFIG_ISL29125 is not set CONFIG_HID_SENSOR_ALS=m CONFIG_HID_SENSOR_PROX=m +# CONFIG_JSA1212 is not set +# CONFIG_ROHM_BU27008 is not set +# CONFIG_ROHM_BU27034 is not set +# CONFIG_RPR0521 is not set +# CONFIG_LTR501 is not set +# CONFIG_LTRF216A is not set +# CONFIG_LV0104CS is not set +# CONFIG_MAX44000 is not set +# CONFIG_MAX44009 is not set +# CONFIG_NOA1305 is not set +# CONFIG_OPT3001 is not set +# CONFIG_OPT4001 is not set +# CONFIG_PA12203001 is not set +# CONFIG_SI1133 is not set +# CONFIG_SI1145 is not set +# CONFIG_STK3310 is not set +# CONFIG_ST_UVIS25 is not set +# CONFIG_TCS3414 is not set +# CONFIG_TCS3472 is not set +# CONFIG_SENSORS_TSL2563 is not set +# CONFIG_TSL2583 is not set +# CONFIG_TSL2591 is not set +# CONFIG_TSL2772 is not set +# CONFIG_TSL4531 is not set +# CONFIG_US5182D is not set +# CONFIG_VCNL4000 is not set +# CONFIG_VCNL4035 is not set +# CONFIG_VEML6030 is not set +# CONFIG_VEML6070 is not set +# CONFIG_VL6180 is not set +# CONFIG_ZOPT2201 is not set +# end of Light sensors + +# +# Magnetometer sensors +# +# CONFIG_AK8974 is not set +# CONFIG_AK8975 is not set +# CONFIG_AK09911 is not set +# CONFIG_BMC150_MAGN_I2C is not set +# CONFIG_BMC150_MAGN_SPI is not set +# CONFIG_MAG3110 is not set CONFIG_HID_SENSOR_MAGNETOMETER_3D=m +# CONFIG_MMC35240 is not set +# CONFIG_IIO_ST_MAGN_3AXIS is not set +# CONFIG_SENSORS_HMC5843_I2C is not set +# CONFIG_SENSORS_HMC5843_SPI is not set +# CONFIG_SENSORS_RM3100_I2C is not set +# CONFIG_SENSORS_RM3100_SPI is not set +# CONFIG_TI_TMAG5273 is not set +# CONFIG_YAMAHA_YAS530 is not set +# end of Magnetometer sensors + +# +# Multiplexers +# +# CONFIG_IIO_MUX is not set +# end of Multiplexers + +# +# Inclinometer sensors +# CONFIG_HID_SENSOR_INCLINOMETER_3D=m CONFIG_HID_SENSOR_DEVICE_ROTATION=m +# end of Inclinometer sensors + +# +# Triggers - standalone +# +# CONFIG_IIO_INTERRUPT_TRIGGER is not set +# CONFIG_IIO_SYSFS_TRIGGER is not set +# end of Triggers - standalone + +# +# Linear and angular position sensors +# +# CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE is not set +# end of Linear and angular position sensors + +# +# Digital potentiometers +# +# CONFIG_AD5110 is not set +# CONFIG_AD5272 is not set +# CONFIG_DS1803 is not set +# CONFIG_MAX5432 is not set +# CONFIG_MAX5481 is not set +# CONFIG_MAX5487 is not set +# CONFIG_MCP4018 is not set +# CONFIG_MCP4131 is not set +# CONFIG_MCP4531 is not set +# CONFIG_MCP41010 is not set +# CONFIG_TPL0102 is not set +# CONFIG_X9250 is not set +# end of Digital potentiometers + +# +# Digital potentiostats +# +# CONFIG_LMP91000 is not set +# end of Digital potentiostats + +# +# Pressure sensors +# +# CONFIG_ABP060MG is not set +# CONFIG_BMP280 is not set +# CONFIG_DLHL60D is not set +# CONFIG_DPS310 is not set CONFIG_HID_SENSOR_PRESS=m +# CONFIG_HP03 is not set +# CONFIG_ICP10100 is not set +# CONFIG_MPL115_I2C is not set +# CONFIG_MPL115_SPI is not set +# CONFIG_MPL3115 is not set +# CONFIG_MPRLS0025PA is not set +# CONFIG_MS5611 is not set +# CONFIG_MS5637 is not set +# CONFIG_IIO_ST_PRESS is not set +# CONFIG_T5403 is not set +# CONFIG_HP206C is not set +# CONFIG_ZPA2326 is not set +# end of Pressure sensors + +# +# Lightning sensors +# +# CONFIG_AS3935 is not set +# end of Lightning sensors + +# +# Proximity and distance sensors +# +# CONFIG_IRSD200 is not set +# CONFIG_ISL29501 is not set +# CONFIG_LIDAR_LITE_V2 is not set +# CONFIG_MB1232 is not set +# CONFIG_PING is not set +# CONFIG_RFD77402 is not set +# CONFIG_SRF04 is not set +# CONFIG_SX9310 is not set +# CONFIG_SX9324 is not set +# CONFIG_SX9360 is not set +# CONFIG_SX9500 is not set +# CONFIG_SRF08 is not set +# CONFIG_VCNL3020 is not set +# CONFIG_VL53L0X_I2C is not set +# end of Proximity and distance sensors + +# +# Resolver to digital converters +# +# CONFIG_AD2S90 is not set +# CONFIG_AD2S1200 is not set +# end of Resolver to digital converters + +# +# Temperature sensors +# +# CONFIG_LTC2983 is not set +# CONFIG_MAXIM_THERMOCOUPLE is not set CONFIG_HID_SENSOR_TEMP=m +# CONFIG_MLX90614 is not set +# CONFIG_MLX90632 is not set +# CONFIG_TMP006 is not set +# CONFIG_TMP007 is not set +# CONFIG_TMP117 is not set +# CONFIG_TSYS01 is not set +# CONFIG_TSYS02D is not set +# CONFIG_MAX30208 is not set +# CONFIG_MAX31856 is not set +# CONFIG_MAX31865 is not set +# end of Temperature sensors + CONFIG_NTB=m +# CONFIG_NTB_MSI is not set +# CONFIG_NTB_IDT is not set +# CONFIG_NTB_EPF is not set +# CONFIG_NTB_SWITCHTEC is not set CONFIG_NTB_PINGPONG=m CONFIG_NTB_TOOL=m CONFIG_NTB_PERF=m CONFIG_NTB_TRANSPORT=m CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +# CONFIG_PWM_DEBUG is not set +# CONFIG_PWM_ATMEL_TCB is not set +# CONFIG_PWM_CLK is not set +# CONFIG_PWM_DWC is not set +# CONFIG_PWM_FSL_FTM is not set +# CONFIG_PWM_PCA9685 is not set +# CONFIG_PWM_XILINX is not set + +# +# IRQ chip support +# +CONFIG_IRQCHIP=y +# CONFIG_AL_FIC is not set +# CONFIG_XILINX_INTC is not set +CONFIG_IRQ_LOONGARCH_CPU=y +CONFIG_LOONGSON_LIOINTC=y +CONFIG_LOONGSON_EIOINTC=y +CONFIG_LOONGSON_HTVEC=y +CONFIG_LOONGSON_PCH_PIC=y +CONFIG_LOONGSON_PCH_MSI=y +CONFIG_LOONGSON_PCH_LPC=y +# end of IRQ chip support + +# CONFIG_IPACK_BUS is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_SIMPLE is not set +# CONFIG_RESET_TI_SYSCON is not set +# CONFIG_RESET_TI_TPS380X is not set + +# +# PHY Subsystem +# +# CONFIG_GENERIC_PHY is not set +# CONFIG_PHY_CAN_TRANSCEIVER is not set + +# +# PHY drivers for Broadcom platforms +# +# CONFIG_BCM_KONA_USB2_PHY is not set +# end of PHY drivers for Broadcom platforms + +# CONFIG_PHY_CADENCE_TORRENT is not set +# CONFIG_PHY_CADENCE_DPHY is not set +# CONFIG_PHY_CADENCE_DPHY_RX is not set +# CONFIG_PHY_CADENCE_SIERRA is not set +# CONFIG_PHY_CADENCE_SALVO is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_LAN966X_SERDES is not set +# CONFIG_PHY_CPCAP_USB is not set +# CONFIG_PHY_MAPPHONE_MDM6600 is not set +# CONFIG_PHY_OCELOT_SERDES is not set +# CONFIG_PHY_SAMSUNG_USB2 is not set +# end of PHY Subsystem + CONFIG_POWERCAP=y +# CONFIG_DTPM is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# CONFIG_DWC_PCIE_PMU is not set +# end of Performance monitor support + +CONFIG_RAS=y CONFIG_USB4=m +# CONFIG_USB4_DEBUGFS_WRITE is not set +# CONFIG_USB4_DMA_TEST is not set + +# +# Android +# +# CONFIG_ANDROID_BINDER_IPC is not set +# end of Android + +# CONFIG_LIBNVDIMM is not set CONFIG_DAX=y CONFIG_DEV_DAX=m +CONFIG_DEV_DAX_KMEM=m +CONFIG_NVMEM=y +CONFIG_NVMEM_SYSFS=y + +# +# Layout Types +# +# CONFIG_NVMEM_LAYOUT_SL28_VPD is not set +# CONFIG_NVMEM_LAYOUT_ONIE_TLV is not set +# end of Layout Types + +# CONFIG_NVMEM_RMEM is not set +# CONFIG_NVMEM_U_BOOT_ENV is not set + +# +# HW tracing support +# +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +# CONFIG_FSI is not set +CONFIG_PM_OPP=y +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# CONFIG_MOST is not set +# CONFIG_PECI is not set +# CONFIG_HTE is not set +# end of Device Drivers + +# +# File systems +# +# CONFIG_VALIDATE_FS_PARSER is not set +CONFIG_FS_IOMAP=y +CONFIG_BUFFER_HEAD=y +CONFIG_LEGACY_DIRECT_IO=y CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y @@ -1951,105 +7693,289 @@ CONFIG_EXT2_FS_SECURITY=y CONFIG_EXT3_FS=y CONFIG_EXT3_FS_POSIX_ACL=y CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set CONFIG_JFS_FS=m CONFIG_JFS_POSIX_ACL=y CONFIG_JFS_SECURITY=y +# CONFIG_JFS_DEBUG is not set +# CONFIG_JFS_STATISTICS is not set CONFIG_XFS_FS=y +CONFIG_XFS_SUPPORT_V4=y +CONFIG_XFS_SUPPORT_ASCII_CI=y CONFIG_XFS_QUOTA=y CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_ONLINE_SCRUB is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set CONFIG_GFS2_FS=m CONFIG_GFS2_FS_LOCKING_DLM=y CONFIG_OCFS2_FS=m +CONFIG_OCFS2_FS_O2CB=m +CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m +CONFIG_OCFS2_FS_STATS=y +CONFIG_OCFS2_DEBUG_MASKLOG=y +# CONFIG_OCFS2_DEBUG_FS is not set CONFIG_BTRFS_FS=y CONFIG_BTRFS_FS_POSIX_ACL=y +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +# CONFIG_ZONEFS_FS is not set +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +# CONFIG_FS_VERITY is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y CONFIG_FANOTIFY=y CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=y CONFIG_QFMT_V1=m CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y CONFIG_AUTOFS_FS=y CONFIG_FUSE_FS=m CONFIG_CUSE=m CONFIG_VIRTIO_FS=m +# CONFIG_VIRT_FUSE is not set CONFIG_OVERLAY_FS=y +CONFIG_OVERLAY_FS_REDIRECT_DIR=y # CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW is not set CONFIG_OVERLAY_FS_INDEX=y CONFIG_OVERLAY_FS_XINO_AUTO=y CONFIG_OVERLAY_FS_METACOPY=y +# CONFIG_OVERLAY_FS_DEBUG is not set + +# +# Caches +# +CONFIG_NETFS_SUPPORT=y +CONFIG_NETFS_STATS=y CONFIG_FSCACHE=m CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_DEBUG is not set CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_ERROR_INJECTION is not set +# CONFIG_CACHEFILES_ONDEMAND is not set +# end of Caches + +# +# CD-ROM/DVD Filesystems +# CONFIG_ISO9660_FS=m CONFIG_JOLIET=y CONFIG_ZISOFS=y CONFIG_UDF_FS=m +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/EXFAT/NT Filesystems +# +CONFIG_FAT_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_FAT_DEFAULT_CODEPAGE=936 CONFIG_FAT_DEFAULT_IOCHARSET="gb2312" +# CONFIG_FAT_DEFAULT_UTF8 is not set CONFIG_EXFAT_FS=m +CONFIG_EXFAT_DEFAULT_IOCHARSET="utf8" CONFIG_NTFS_FS=m +# CONFIG_NTFS_DEBUG is not set +# CONFIG_NTFS_RW is not set CONFIG_NTFS3_FS=m CONFIG_NTFS3_64BIT_CLUSTER=y CONFIG_NTFS3_LZX_XPRESS=y +# CONFIG_NTFS3_FS_POSIX_ACL is not set +# end of DOS/FAT/EXFAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y CONFIG_PROC_VMCORE_DEVICE_DUMP=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +# CONFIG_TMPFS_INODE64 is not set +# CONFIG_TMPFS_QUOTA is not set +CONFIG_ARCH_SUPPORTS_HUGETLBFS=y CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP=y +# CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON is not set CONFIG_CONFIGFS_FS=y CONFIG_EFIVAR_FS=y +# end of Pseudo filesystems + +CONFIG_MISC_FILESYSTEMS=y CONFIG_ORANGEFS_FS=m +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set CONFIG_ECRYPT_FS=m CONFIG_ECRYPT_FS_MESSAGING=y CONFIG_HFS_FS=m CONFIG_HFSPLUS_FS=m +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set CONFIG_UBIFS_FS=m CONFIG_UBIFS_FS_ADVANCED_COMPR=y +CONFIG_UBIFS_FS_LZO=y +CONFIG_UBIFS_FS_ZLIB=y +CONFIG_UBIFS_FS_ZSTD=y +# CONFIG_UBIFS_ATIME_SUPPORT is not set +CONFIG_UBIFS_FS_XATTR=y +CONFIG_UBIFS_FS_SECURITY=y +# CONFIG_UBIFS_FS_AUTHENTICATION is not set CONFIG_CRAMFS=m +CONFIG_CRAMFS_BLOCKDEV=y +# CONFIG_CRAMFS_MTD is not set CONFIG_SQUASHFS=m +# CONFIG_SQUASHFS_FILE_CACHE is not set CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set +CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y CONFIG_SQUASHFS_LZ4=y CONFIG_SQUASHFS_LZO=y CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set CONFIG_MINIX_FS=m +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set CONFIG_ROMFS_FS=m +CONFIG_ROMFS_BACKED_BY_BLOCK=y +# CONFIG_ROMFS_BACKED_BY_MTD is not set +# CONFIG_ROMFS_BACKED_BY_BOTH is not set +CONFIG_ROMFS_ON_BLOCK=y CONFIG_PSTORE=m +CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 +CONFIG_PSTORE_COMPRESS=y +# CONFIG_PSTORE_CONSOLE is not set +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +# CONFIG_PSTORE_RAM is not set +# CONFIG_PSTORE_BLK is not set CONFIG_SYSV_FS=m CONFIG_UFS_FS=m +# CONFIG_UFS_FS_WRITE is not set +# CONFIG_UFS_DEBUG is not set CONFIG_EROFS_FS=m +# CONFIG_EROFS_FS_DEBUG is not set +CONFIG_EROFS_FS_XATTR=y +CONFIG_EROFS_FS_POSIX_ACL=y +CONFIG_EROFS_FS_SECURITY=y +CONFIG_EROFS_FS_ZIP=y CONFIG_EROFS_FS_ZIP_LZMA=y +# CONFIG_EROFS_FS_ZIP_DEFLATE is not set CONFIG_EROFS_FS_PCPU_KTHREAD=y +CONFIG_EROFS_FS_PCPU_KTHREAD_HIPRI=y +CONFIG_NETWORK_FILESYSTEMS=y CONFIG_NFS_FS=y # CONFIG_NFS_V2 is not set CONFIG_NFS_V3=m CONFIG_NFS_V3_ACL=y CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set CONFIG_NFS_V4_1=y CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +# CONFIG_ROOT_NFS is not set +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y # CONFIG_NFS_DISABLE_UDP_SUPPORT is not set +CONFIG_NFS_V4_2_READ_PLUS=y CONFIG_NFSD=y +# CONFIG_NFSD_V2 is not set CONFIG_NFSD_V3_ACL=y CONFIG_NFSD_V4=y +CONFIG_NFSD_PNFS=y CONFIG_NFSD_BLOCKLAYOUT=y CONFIG_NFSD_SCSILAYOUT=y CONFIG_NFSD_FLEXFILELAYOUT=y CONFIG_NFSD_V4_2_INTER_SSC=y CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_GRACE_PERIOD=y +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=y +CONFIG_NFS_COMMON=y +CONFIG_NFS_V4_2_SSC_HELPER=y +CONFIG_SUNRPC=y +CONFIG_SUNRPC_GSS=y +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=y +CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1=y +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA is not set +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 is not set CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m CONFIG_CEPH_FS=m CONFIG_CEPH_FSCACHE=y CONFIG_CEPH_FS_POSIX_ACL=y CONFIG_CEPH_FS_SECURITY_LABEL=y CONFIG_CIFS=m # CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y CONFIG_CIFS_UPCALL=y CONFIG_CIFS_XATTR=y CONFIG_CIFS_POSIX=y # CONFIG_CIFS_DEBUG is not set CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_SWN_UPCALL is not set +# CONFIG_CIFS_SMB_DIRECT is not set +# CONFIG_CIFS_FSCACHE is not set +# CONFIG_SMB_SERVER is not set +CONFIG_SMBFS=m +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set CONFIG_9P_FS=y +# CONFIG_9P_FS_POSIX_ACL is not set +# CONFIG_9P_FS_SECURITY is not set +CONFIG_NLS=y CONFIG_NLS_DEFAULT="utf8" CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_737=m @@ -2100,44 +8026,207 @@ CONFIG_NLS_MAC_INUIT=m CONFIG_NLS_MAC_ROMANIAN=m CONFIG_NLS_MAC_TURKISH=m CONFIG_NLS_UTF8=y +CONFIG_NLS_UCS2_UTILS=m CONFIG_DLM=m CONFIG_DLM_DEBUG=y +# CONFIG_UNICODE is not set +CONFIG_IO_WQ=y +# end of File systems + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_KEYS_REQUEST_CACHE is not set CONFIG_PERSISTENT_KEYRINGS=y CONFIG_TRUSTED_KEYS=y +CONFIG_TRUSTED_KEYS_TPM=y +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_USER_DECRYPTED_DATA is not set CONFIG_KEY_DH_OPERATIONS=y +# CONFIG_SECURITY_DMESG_RESTRICT is not set CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y CONFIG_SECURITY_INFINIBAND=y CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y CONFIG_LSM_MMAP_MIN_ADDR=65535 CONFIG_HARDENED_USERCOPY=y +# CONFIG_FORTIFY_SOURCE is not set +# CONFIG_STATIC_USERMODEHELPER is not set CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9 +CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256 +# CONFIG_SECURITY_SELINUX_DEBUG is not set +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set CONFIG_SECURITY_APPARMOR=y +# CONFIG_SECURITY_APPARMOR_DEBUG is not set +CONFIG_SECURITY_APPARMOR_INTROSPECT_POLICY=y +CONFIG_SECURITY_APPARMOR_HASH=y +CONFIG_SECURITY_APPARMOR_HASH_DEFAULT=y +CONFIG_SECURITY_APPARMOR_EXPORT_BINARY=y +CONFIG_SECURITY_APPARMOR_PARANOID_LOAD=y +# CONFIG_SECURITY_LOADPIN is not set CONFIG_SECURITY_YAMA=y +# CONFIG_SECURITY_SAFESETID is not set CONFIG_SECURITY_LOCKDOWN_LSM=y CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y +CONFIG_LOCK_DOWN_KERNEL_FORCE_NONE=y +# CONFIG_LOCK_DOWN_KERNEL_FORCE_INTEGRITY is not set +# CONFIG_LOCK_DOWN_KERNEL_FORCE_CONFIDENTIALITY is not set +# CONFIG_SECURITY_LANDLOCK is not set +CONFIG_INTEGRITY=y CONFIG_INTEGRITY_SIGNATURE=y CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_TRUSTED_KEYRING=y CONFIG_INTEGRITY_PLATFORM_KEYRING=y +# CONFIG_INTEGRITY_MACHINE_KEYRING is not set +CONFIG_LOAD_UEFI_KEYS=y +CONFIG_INTEGRITY_AUDIT=y CONFIG_IMA=y +CONFIG_IMA_MEASURE_PCR_IDX=10 +CONFIG_IMA_LSM_RULES=y +CONFIG_IMA_NG_TEMPLATE=y +# CONFIG_IMA_SIG_TEMPLATE is not set +CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng" +# CONFIG_IMA_DEFAULT_HASH_SHA1 is not set CONFIG_IMA_DEFAULT_HASH_SHA256=y +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set +# CONFIG_IMA_DEFAULT_HASH_SM3 is not set +CONFIG_IMA_DEFAULT_HASH="sha256" +# CONFIG_IMA_WRITE_POLICY is not set CONFIG_IMA_READ_POLICY=y CONFIG_IMA_APPRAISE=y +# CONFIG_IMA_ARCH_POLICY is not set +# CONFIG_IMA_APPRAISE_BUILD_POLICY is not set +CONFIG_IMA_APPRAISE_BOOTPARAM=y +# CONFIG_IMA_APPRAISE_MODSIG is not set +# CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY is not set +# CONFIG_IMA_BLACKLIST_KEYRING is not set CONFIG_IMA_LOAD_X509=y +CONFIG_IMA_X509_PATH="/etc/keys/x509_ima.der" +# CONFIG_IMA_APPRAISE_SIGNED_INIT is not set +CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y +CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y +# CONFIG_IMA_DISABLE_HTABLE is not set CONFIG_EVM=y +CONFIG_EVM_ATTR_FSUUID=y +# CONFIG_EVM_ADD_XATTRS is not set CONFIG_EVM_LOAD_X509=y +CONFIG_EVM_X509_PATH="/etc/keys/x509_evm.der" +# CONFIG_DEFAULT_SECURITY_SELINUX is not set +# CONFIG_DEFAULT_SECURITY_APPARMOR is not set CONFIG_DEFAULT_SECURITY_DAC=y CONFIG_LSM="landlock,lockdown,yama,loadpin,safesetid,integrity,bpf" + +# +# Kernel hardening options +# + +# +# Memory initialization +# +CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y +# CONFIG_INIT_STACK_NONE is not set +# CONFIG_INIT_STACK_ALL_PATTERN is not set +CONFIG_INIT_STACK_ALL_ZERO=y +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y +# CONFIG_ZERO_CALL_USED_REGS is not set +# end of Memory initialization + +# +# Hardening of kernel data structures +# +CONFIG_LIST_HARDENED=y +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# end of Hardening of kernel data structures + +CONFIG_CC_HAS_RANDSTRUCT=y +CONFIG_RANDSTRUCT_NONE=y +# CONFIG_RANDSTRUCT_FULL is not set +# CONFIG_RANDSTRUCT_PERFORMANCE is not set +# end of Kernel hardening options +# end of Security options + +CONFIG_XOR_BLOCKS=y +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_FIPS_NAME="Linux Kernel Cryptographic API" +# CONFIG_CRYPTO_FIPS_CUSTOM_VERSION is not set +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_SIG2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=y +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y CONFIG_CRYPTO_USER=m # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +# CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is not set +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y CONFIG_CRYPTO_PCRYPT=m CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_AUTHENC=m CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_ENGINE=m +# end of Crypto core or helper + +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=y +# CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set +CONFIG_CRYPTO_ECC=m +CONFIG_CRYPTO_ECDH=m +# CONFIG_CRYPTO_ECDSA is not set +# CONFIG_CRYPTO_ECRDSA is not set CONFIG_CRYPTO_SM2=y +# CONFIG_CRYPTO_CURVE25519 is not set +# end of Public-key cryptography + +# +# Block ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set CONFIG_CRYPTO_ANUBIS=m +# CONFIG_CRYPTO_ARIA is not set CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m CONFIG_CRYPTO_CAST5=m CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_DES=m @@ -2145,59 +8234,618 @@ CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=y +CONFIG_CRYPTO_SM4_GENERIC=y CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m +# end of Block ciphers + +# +# Length-preserving ciphers and modes +# +# CONFIG_CRYPTO_ADIANTUM is not set CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_CBC=y CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTR=y CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_HCTR2 is not set +# CONFIG_CRYPTO_KEYWRAP is not set CONFIG_CRYPTO_LRW=m +# CONFIG_CRYPTO_OFB is not set CONFIG_CRYPTO_PCBC=m +# CONFIG_CRYPTO_XTS is not set +# end of Length-preserving ciphers and modes + +# +# AEAD (authenticated encryption with associated data) ciphers +# +# CONFIG_CRYPTO_AEGIS128 is not set CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_GENIV=y CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=m +CONFIG_CRYPTO_ESSIV=m +# end of AEAD (authenticated encryption with associated data) ciphers + +# +# Hashes, digests, and MACs +# +CONFIG_CRYPTO_BLAKE2B=y +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_POLY1305=m CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_SHA3=y +CONFIG_CRYPTO_SM3=y +CONFIG_CRYPTO_SM3_GENERIC=y +# CONFIG_CRYPTO_STREEBOG is not set CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_XXHASH=y +# end of Hashes, digests, and MACs + +# +# CRCs (cyclic redundancy checks) +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_CRC64_ROCKSOFT=m +# end of CRCs (cyclic redundancy checks) + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=m +CONFIG_CRYPTO_LZO=m CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ZSTD=y +# end of Compression + +# +# Random number generation +# CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y CONFIG_CRYPTO_DRBG_HASH=y CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +# CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE is not set +CONFIG_CRYPTO_KDF800108_CTR=y +# end of Random number generation + +# +# Userspace interface +# +CONFIG_CRYPTO_USER_API=y CONFIG_CRYPTO_USER_API_HASH=y CONFIG_CRYPTO_USER_API_SKCIPHER=y CONFIG_CRYPTO_USER_API_RNG=y +# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y +# CONFIG_CRYPTO_STATS is not set +# end of Userspace interface + +CONFIG_CRYPTO_HASH_INFO=y + +# +# Accelerated Cryptographic Algorithms for CPU (loongarch) +# CONFIG_CRYPTO_CRC32_LOONGARCH=m +# end of Accelerated Cryptographic Algorithms for CPU (loongarch) + +CONFIG_CRYPTO_HW=y +# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set +# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set +CONFIG_CRYPTO_DEV_NITROX=m CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m +# CONFIG_CRYPTO_DEV_QAT_DH895xCC is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXX is not set +# CONFIG_CRYPTO_DEV_QAT_C62X is not set +# CONFIG_CRYPTO_DEV_QAT_4XXX is not set +# CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set +# CONFIG_CRYPTO_DEV_QAT_C62XVF is not set CONFIG_CRYPTO_DEV_CHELSIO=m CONFIG_CRYPTO_DEV_VIRTIO=m +# CONFIG_CRYPTO_DEV_SAFEXCEL is not set +# CONFIG_CRYPTO_DEV_CCREE is not set +# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set CONFIG_SIGNED_PE_FILE_VERIFICATION=y +# CONFIG_FIPS_SIGNATURE_SELFTEST is not set + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_MODULE_SIG_KEY_TYPE_RSA=y +# CONFIG_MODULE_SIG_KEY_TYPE_ECDSA is not set +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set CONFIG_SECONDARY_TRUSTED_KEYRING=y CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" CONFIG_SYSTEM_REVOCATION_LIST=y +CONFIG_SYSTEM_REVOCATION_KEYS="" +# CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE is not set +# end of Certificates for signature checking + +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=y +CONFIG_RAID6_PQ_BENCHMARK=y +CONFIG_PACKING=y +CONFIG_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_CORDIC=m +# CONFIG_PRIME_NUMBERS is not set +CONFIG_RATIONAL=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y + +# +# Crypto library routines +# +CONFIG_CRYPTO_LIB_UTILS=y +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_ARC4=m +CONFIG_CRYPTO_LIB_GF128MUL=y +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y +CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m +CONFIG_CRYPTO_LIB_CHACHA=m +CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m +CONFIG_CRYPTO_LIB_CURVE25519=m +CONFIG_CRYPTO_LIB_DES=m +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=1 +CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m +CONFIG_CRYPTO_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m +CONFIG_CRYPTO_LIB_SHA1=y +CONFIG_CRYPTO_LIB_SHA256=y +# end of Crypto library routines + +CONFIG_CRC_CCITT=m +CONFIG_CRC16=y CONFIG_CRC_T10DIF=y +CONFIG_CRC64_ROCKSOFT=m CONFIG_CRC_ITU_T=y +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC64=m +# CONFIG_CRC4 is not set CONFIG_CRC7=m +CONFIG_LIBCRC32C=y +# CONFIG_CRC8 is not set +CONFIG_XXHASH=y +CONFIG_AUDIT_GENERIC=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_842_COMPRESS=m +CONFIG_842_DECOMPRESS=m +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=m +CONFIG_LZ4HC_COMPRESS=m +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMMON=y +CONFIG_ZSTD_COMPRESS=y +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_MICROLZMA=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_DECOMPRESS_ZSTD=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_DMA_DECLARE_COHERENT=y +CONFIG_SWIOTLB=y +# CONFIG_SWIOTLB_DYNAMIC is not set +# CONFIG_DMA_RESTRICTED_POOL is not set CONFIG_DMA_CMA=y +# CONFIG_DMA_NUMA_CMA is not set + +# +# Default contiguous memory area size: +# +CONFIG_CMA_SIZE_MBYTES=16 +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_ALIGNMENT=8 +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_DMA_MAP_BENCHMARK is not set +CONFIG_SGL_ALLOC=y +CONFIG_CHECK_SIGNATURE=y +# CONFIG_CPUMASK_OFFSTACK is not set +# CONFIG_FORCE_NR_CPUS is not set +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_LRU_CACHE=m +CONFIG_CLZ_TAB=y +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_SIGNATURE=y +CONFIG_DIMLIB=y +CONFIG_LIBFDT=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_HAVE_GENERIC_VDSO=y +CONFIG_GENERIC_GETTIMEOFDAY=y +CONFIG_GENERIC_VDSO_TIME_NS=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_POOL=y +CONFIG_ARCH_STACKWALK=y +CONFIG_STACKDEPOT=y +CONFIG_SBITMAP=y +CONFIG_PARMAN=m +CONFIG_OBJAGG=m +# end of Library routines + +CONFIG_GENERIC_LIB_ASHLDI3=y +CONFIG_GENERIC_LIB_ASHRDI3=y +CONFIG_GENERIC_LIB_LSHRDI3=y +CONFIG_GENERIC_LIB_CMPDI2=y +CONFIG_GENERIC_LIB_UCMPDI2=y +CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y +CONFIG_PLDMFW=y +CONFIG_ASN1_ENCODER=y + +# +# Kernel hacking +# + +# +# printk and dmesg options +# CONFIG_PRINTK_TIME=y CONFIG_PRINTK_CALLER=y +# CONFIG_STACKTRACE_BUILD_ID is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 CONFIG_BOOT_PRINTK_DELAY=y CONFIG_DYNAMIC_DEBUG=y +CONFIG_DYNAMIC_DEBUG_CORE=y +CONFIG_SYMBOLIC_ERRNAME=y +CONFIG_DEBUG_BUGVERBOSE=y +# end of printk and dmesg options + +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +CONFIG_AS_HAS_NON_CONST_LEB128=y +# CONFIG_DEBUG_INFO_NONE is not set +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y +# CONFIG_DEBUG_INFO_DWARF4 is not set +# CONFIG_DEBUG_INFO_DWARF5 is not set +# CONFIG_DEBUG_INFO_REDUCED is not set +CONFIG_DEBUG_INFO_COMPRESSED_NONE=y +# CONFIG_DEBUG_INFO_COMPRESSED_ZLIB is not set +# CONFIG_DEBUG_INFO_COMPRESSED_ZSTD is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +CONFIG_DEBUG_INFO_BTF=y +# CONFIG_GDB_SCRIPTS is not set CONFIG_FRAME_WARN=4096 CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +# CONFIG_HEADERS_INSTALL is not set CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +# CONFIG_VMLINUX_MAP is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +# +# Generic Kernel Debugging Instruments +# CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_FS_ALLOW_ALL=y +# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set +# CONFIG_DEBUG_FS_ALLOW_NONE is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_UBSAN is not set +CONFIG_HAVE_KCSAN_COMPILER=y +# end of Generic Kernel Debugging Instruments + +# +# Networking Debugging +# +# CONFIG_NET_DEV_REFCNT_TRACKER is not set +# CONFIG_NET_NS_REFCNT_TRACKER is not set +# CONFIG_DEBUG_NET is not set +# end of Networking Debugging + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +CONFIG_SLUB_DEBUG=y +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_PAGE_OWNER is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SHRINKER_DEBUG is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_DEBUG_STACKOVERFLOW=y +# CONFIG_DEBUG_STACKOVERFLOW is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_ARCH_DISABLE_KASAN_INLINE=y +CONFIG_CC_HAS_KASAN_GENERIC=y +CONFIG_CC_HAS_KASAN_SW_TAGS=y +CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y +# CONFIG_KASAN is not set +CONFIG_HAVE_ARCH_KFENCE=y +# CONFIG_KFENCE is not set +# end of Memory Debugging + CONFIG_DEBUG_SHIRQ=y + +# +# Debug Oops, Lockups and Hangs +# CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y +# CONFIG_SDEI_WATCHDOG is not set +CONFIG_HARDLOCKUP_DETECTOR=y +# CONFIG_HARDLOCKUP_DETECTOR_PERF is not set +CONFIG_HARDLOCKUP_DETECTOR_BUDDY=y +# CONFIG_HARDLOCKUP_DETECTOR_ARCH is not set +CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER=y +# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_WQ_CPU_INTENSIVE_REPORT is not set +# CONFIG_TEST_LOCKUP is not set +# end of Debug Oops, Lockups and Hangs + +# +# Scheduler Debugging +# # CONFIG_SCHED_DEBUG is not set +CONFIG_SCHED_INFO=y CONFIG_SCHEDSTATS=y +CONFIG_SCHED_ACPU=y +# end of Scheduler Debugging + +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +# CONFIG_SCF_TORTURE_TEST is not set +# CONFIG_CSD_LOCK_WAIT_DEBUG is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +# CONFIG_DEBUG_IRQFLAGS is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set + +# +# Debug kernel data structures +# CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PLIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_MAPLE_TREE is not set +# end of Debug kernel data structures + +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_RCU_SCALE_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_REF_SCALE_TEST is not set CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 +# CONFIG_RCU_CPU_STALL_CPUTIME is not set # CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_DEBUG_CGROUP_REF is not set +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_NOP_TRACER=y +CONFIG_HAVE_RETHOOK=y +CONFIG_RETHOOK=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_BOOTTIME_TRACING is not set +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_FUNCTION_GRAPH_RETVAL is not set +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y +# CONFIG_FPROBE is not set +# CONFIG_FUNCTION_PROFILER is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_HWLAT_TRACER is not set +# CONFIG_OSNOISE_TRACER is not set +# CONFIG_TIMERLAT_TRACER is not set +CONFIG_FTRACE_SYSCALLS=y +# CONFIG_TRACER_SNAPSHOT is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_PROBE_EVENTS_BTF_ARGS=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_DYNAMIC_EVENTS=y +CONFIG_PROBE_EVENTS=y +CONFIG_FTRACE_MCOUNT_RECORD=y +CONFIG_FTRACE_MCOUNT_USE_CC=y +# CONFIG_SYNTH_EVENTS is not set +# CONFIG_USER_EVENTS is not set +# CONFIG_TRACE_EVENT_INJECT is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_FTRACE_RECORD_RECURSION is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_KPROBE_EVENT_GEN_TEST is not set +# CONFIG_RV is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y # CONFIG_STRICT_DEVMEM is not set + +# +# loongarch Debugging +# +# CONFIG_UNWINDER_GUESS is not set +CONFIG_UNWINDER_PROLOGUE=y +# end of loongarch Debugging + +# +# Kernel Testing and Coverage +# +# CONFIG_KUNIT is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +# CONFIG_FUNCTION_ERROR_INJECTION is not set +# CONFIG_FAULT_INJECTION is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set # CONFIG_RUNTIME_TESTING_MENU is not set +# end of Kernel Testing and Coverage + +# +# Rust hacking +# +# end of Rust hacking +# end of Kernel hacking -- Gitee From 0c7b6e7f91fa02d54dc20f251ea97d23c0861392 Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Fri, 22 Jul 2022 16:23:56 +0800 Subject: [PATCH 0669/2138] anolis: arm_mpam: Fix L3 cache size display error in resctrl fs ANBZ: #8686 The value of L3 cache in the size file is 0 now. $ cat /sys/fs/resctrl/size MB:0=100;1=100 L3:0=0;1=0 We fix it in this patch. Signed-off-by: Xin Hao Tested-by: Shawn Wang Reviewed-by: Xin Hao Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/564 Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/platform/mpam/mpam_resctrl.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index c7ebb4d37b47..6d25e98a757d 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -739,6 +739,8 @@ static int mpam_resctrl_resource_init(struct mpam_resctrl_res *res) res->resctrl_res.rid == RDT_RESOURCE_L3) { bool has_csu = cache_has_usable_csu(class); + r->cache_level = class->level; + /* TODO: Scaling is not yet supported */ r->cache.cbm_len = class->props.cpbm_wd; r->cache.arch_has_sparse_bitmasks = true; -- Gitee From 31ead5e66bd19b9d767543301b8e787e2f92c344 Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Mon, 25 Jul 2022 11:33:06 +0800 Subject: [PATCH 0670/2138] anolis: arm_mpam: Remove the zero padding for MB percentage showing in schemata ANBZ: #8686 Zero padding is used for MB percentage showing in the schemata file on ARM64 machines, which is different from the x86_64 machines. This patch removes it. Signed-off-by: Shawn Wang Signed-off-by: Xin Hao Reviewed-by: Xin Hao Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/564 Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/platform/mpam/mpam_resctrl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index 6d25e98a757d..41ae848b6f61 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -790,7 +790,7 @@ static int mpam_resctrl_resource_init(struct mpam_resctrl_res *res) struct mpam_props *cprops = &class->props; /* TODO: kill these properties off as they are derivatives */ - r->format_str = "%d=%0*u"; + r->format_str = "%d=%*u"; r->fflags = RFTYPE_RES_MB; r->default_ctrl = MAX_MBA_BW; r->data_width = 3; -- Gitee From feb73550d1344da40c7519a9d097a2e409412533 Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Fri, 22 Jul 2022 17:36:53 +0800 Subject: [PATCH 0671/2138] anolis: arm_mpam: Fix MB default percentage error in schemata ANBZ: #8686 When creating a new group, the default percentage of MB resource in schemata is 97 instead of 100. This patch fixes it. Signed-off-by: Shawn Wang Signed-off-by: Xin Hao Reviewed-by: Xin Hao Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/564 Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/platform/mpam/mpam_resctrl.c | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index 41ae848b6f61..dfecac759f41 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -539,10 +539,17 @@ static u32 mbw_max_to_percent(u16 mbw_max, struct mpam_props *cprops) for (bit = 15; bit; bit--) { if (mbw_max & BIT(bit)) - value += MAX_MBA_BW / divisor; + /* + * Left shift by 16 bits to preserve the precision of + * the division operation. + */ + value += (MAX_MBA_BW << 16) / divisor; divisor <<= 1; } + /* Use the upper bound of the fixed-point fraction. */ + value = (value + (MAX_MBA_BW << (16 - cprops->bwa_wd))) >> 16; + return value; } @@ -561,23 +568,29 @@ static u32 percent_to_mbw_pbm(u8 pc, struct mpam_props *cprops) static u16 percent_to_mbw_max(u8 pc, struct mpam_props *cprops) { u8 bit; - u32 divisor = 2, value = 0; + u32 pc_ls, divisor = 2, value = 0; if (WARN_ON_ONCE(cprops->bwa_wd > 15)) return MAX_MBA_BW; + /* + * Left shift by 16 bits to preserve the precision of the division + * operation. + */ + pc_ls = (u32) pc << 16; + for (bit = 15; bit; bit--) { - if (pc >= MAX_MBA_BW / divisor) { - pc -= MAX_MBA_BW / divisor; + if (pc_ls >= (MAX_MBA_BW << 16) / divisor) { + pc_ls -= (MAX_MBA_BW << 16) / divisor; value |= BIT(bit); } divisor <<= 1; - if (!pc || !(MAX_MBA_BW / divisor)) + if (!pc_ls || !((MAX_MBA_BW << 16) / divisor)) break; } - value &= GENMASK(15, 15 - cprops->bwa_wd); + value &= GENMASK(15, 15 - cprops->bwa_wd + 1); return value; } -- Gitee From 7ab055ba90a40e6e2246057e48fc819ed49f1451 Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Mon, 25 Jul 2022 18:00:07 +0800 Subject: [PATCH 0672/2138] anolis: arm_mpam: Fix the inaccurate MB granularity in resctrl info directory ANBZ: #8686 There is a problem with how the current MB granularity is calculated. By definition, cprops->bwa_wd should be used as an exponential rather than a linear value. Besides, since the minimum granularity cannot be less than 1% with current representation, when the granularity is too fine, 1 is returned. Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/platform/mpam/mpam_resctrl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index dfecac759f41..c53b89557b4d 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -515,7 +515,7 @@ static u32 get_mba_granularity(struct mpam_props *cprops) * bwa_wd is the number of bits implemented in the 0.xxx * fixed point fraction. 1 bit is 50%, 2 is 25% etc. */ - return MAX_MBA_BW / (cprops->bwa_wd + 1); + return max_t(u32, 1, (MAX_MBA_BW / BIT(cprops->bwa_wd))); } return 0; -- Gitee From c3de790fff4d1c99f7f78145b43daee3beef9f9f Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Wed, 27 Jul 2022 11:13:26 +0800 Subject: [PATCH 0673/2138] anolis: arm_mpam: Limit MB percentage in schemata to multiples of granularity ANBZ: #8686 The current setting of MB percentage is not tied to MB granularity. This patch sets MB percentage to a multiple of the bandwidth granularity, and limits the minimum bandwidth to the granularity value. Signed-off-by: Shawn Wang Signed-off-by: Xin Hao Reviewed-by: Xin Hao Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/564 Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/platform/mpam/mpam_resctrl.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index c53b89557b4d..06af2d77b93e 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -568,11 +568,17 @@ static u32 percent_to_mbw_pbm(u8 pc, struct mpam_props *cprops) static u16 percent_to_mbw_max(u8 pc, struct mpam_props *cprops) { u8 bit; - u32 pc_ls, divisor = 2, value = 0; + u32 granularity, pc_ls, divisor = 2, value = 0; if (WARN_ON_ONCE(cprops->bwa_wd > 15)) return MAX_MBA_BW; + /* Set the pc value to be a multiple of granularity. */ + granularity = get_mba_granularity(cprops); + pc = roundup(pc, (u8) granularity); + if (pc > 100) + pc = 100; + /* * Left shift by 16 bits to preserve the precision of the division * operation. @@ -811,6 +817,7 @@ static int mpam_resctrl_resource_init(struct mpam_resctrl_res *res) r->membw.delay_linear = true; r->membw.throttle_mode = THREAD_THROTTLE_UNDEFINED; r->membw.bw_gran = get_mba_granularity(cprops); + r->membw.min_bw = r->membw.bw_gran; /* Round up to at least 1% */ if (!r->membw.bw_gran) -- Gitee From 207507aa7dc67186d6fcba75d62b3ca760ea560f Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Wed, 24 Aug 2022 16:41:57 +0800 Subject: [PATCH 0674/2138] anolis: arm_mpam: Fix the problem that error interrupt handling cannot exit normally ANBZ: #8686 Current MPAM driver uses threaded irq when handling shared error interrupt. When an error occurs, the handler thread calls free_irq() to unregister MPAM's irqs while free_irq() needs to wait for the handler thread to exit, which causes a deadlock. To fix this problem, we change the bottom half from threaded irq to workqueue. Signed-off-by: Shawn Wang Signed-off-by: Xin Hao Reviewed-by: Xin Hao Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/656 Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/platform/mpam/mpam_devices.c | 33 ++++++++++------------------ 1 file changed, 12 insertions(+), 21 deletions(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 32532a918115..f5f23725d13a 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -1964,13 +1964,15 @@ static irqreturn_t __mpam_irq_handler(int irq, struct mpam_msc *msc) pr_err("error irq from msc:%u '%s', partid:%u, pmg: %u, ris: %u\n", msc->id, mpam_errcode_names[errcode], partid, pmg, ris); - if (irq_is_percpu(irq)) { - mpam_disable_msc_ecr(msc); - schedule_work(&mpam_broken_work); - return IRQ_HANDLED; - } + /* + * To prevent this interrupt from repeatedly cancelling the scheduled + * work to disable mpam, disable the error interrupt. + */ + mpam_disable_msc_ecr(msc); - return IRQ_WAKE_THREAD; + schedule_work(&mpam_broken_work); + + return IRQ_HANDLED; } static irqreturn_t mpam_ppi_handler(int irq, void *dev_id) @@ -1987,8 +1989,6 @@ static irqreturn_t mpam_spi_handler(int irq, void *dev_id) return __mpam_irq_handler(irq, msc); } -static irqreturn_t mpam_disable_thread(int irq, void *dev_id); - static int mpam_register_irqs(void) { int err, irq; @@ -2018,11 +2018,9 @@ static int mpam_register_irqs(void) true); mutex_unlock(&msc->lock); } else { - err = devm_request_threaded_irq(&msc->pdev->dev, irq, - &mpam_spi_handler, - &mpam_disable_thread, - IRQF_SHARED, - "mpam:msc:error", msc); + err = devm_request_irq(&msc->pdev->dev, irq, + &mpam_spi_handler, IRQF_SHARED, + "mpam:msc:error", msc); if (err) return err; } @@ -2228,7 +2226,7 @@ void mpam_reset_class(struct mpam_class *class) * All of MPAMs errors indicate a software bug, restore any modified * controls to their reset values. */ -static irqreturn_t mpam_disable_thread(int irq, void *dev_id) +void mpam_disable(struct work_struct *ignored) { int idx; struct mpam_class *class; @@ -2250,13 +2248,6 @@ static irqreturn_t mpam_disable_thread(int irq, void *dev_id) list_for_each_entry_rcu(class, &mpam_classes, classes_list) mpam_reset_class(class); srcu_read_unlock(&mpam_srcu, idx); - - return IRQ_HANDLED; -} - -void mpam_disable(struct work_struct *ignored) -{ - mpam_disable_thread(0, NULL); } /* -- Gitee From 5acf7dc213393d338bb8a4a9dbffb8c4b15a593f Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Mon, 28 Nov 2022 20:22:26 +0800 Subject: [PATCH 0675/2138] anolis: arm_mpam: Clear mpam_msc properly when driver probing failed ANBZ: #8686 When mpam_msc_drv_probe() fails to call mpam_acpi_parse_resources(), the current implementation will not delete the failed msc device from mpam_all_msc list. It will cause the following error when adding a new msc device to mpam_all_msc: list_add corruption. next->prev should be prev (ffff8000120a3228), but was ffff040000d99080. (next=ffff040000d99080) ... Call trace: __list_add_valid+0x98/Oxb0 mpam_msc_drv_probe+0x374/0x508 platform_drv_probe+0x58/0xa8 really_probe+0xc0/0x420 __driver_probe_device+0x114/0x188 river_probe_device+0x44/0xf8 ... Fix it by calling mpam_msc_drv_remove() when driver probing failed. Signed-off-by: Shawn Wang Reviewed-by: Xin Hao Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/925 Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/platform/mpam/mpam_devices.c | 43 ++++++++++++++-------------- 1 file changed, 21 insertions(+), 22 deletions(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index f5f23725d13a..12015951a63d 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -1688,6 +1688,24 @@ static void mpam_pcc_rx_callback(struct mbox_client *cl, void *msg) /* TODO: wake up tasks blocked on this MSC's PCC channel */ } +static int mpam_msc_drv_remove(struct platform_device *pdev) +{ + struct mpam_msc *msc = platform_get_drvdata(pdev); + + if (!msc) + return 0; + + mutex_lock(&mpam_list_lock); + mpam_num_msc--; + platform_set_drvdata(pdev, NULL); + list_del_rcu(&msc->glbl_list); + mpam_msc_destroy(msc); + synchronize_srcu(&mpam_srcu); + mutex_unlock(&mpam_list_lock); + + return 0; +} + static int mpam_msc_drv_probe(struct platform_device *pdev) { int err; @@ -1733,7 +1751,6 @@ static int mpam_msc_drv_probe(struct platform_device *pdev) err = mpam_msc_setup_error_irq(msc); if (err) { - devm_kfree(&pdev->dev, msc); msc = ERR_PTR(err); break; } @@ -1749,7 +1766,6 @@ static int mpam_msc_drv_probe(struct platform_device *pdev) &msc_res); if (IS_ERR(io)) { pr_err("Failed to map MSC base address\n"); - devm_kfree(&pdev->dev, msc); err = PTR_ERR(io); break; } @@ -1766,7 +1782,6 @@ static int mpam_msc_drv_probe(struct platform_device *pdev) msc->pcc_subspace_id); if (IS_ERR(msc->pcc_chan)) { pr_err("Failed to request MSC PCC channel\n"); - devm_kfree(&pdev->dev, msc); err = PTR_ERR(msc->pcc_chan); break; } @@ -1777,7 +1792,6 @@ static int mpam_msc_drv_probe(struct platform_device *pdev) if (IS_ERR(io)) { pr_err("Failed to map MSC base address\n"); pcc_mbox_free_channel(msc->pcc_chan); - devm_kfree(&pdev->dev, msc); err = PTR_ERR(io); break; } @@ -1801,6 +1815,9 @@ static int mpam_msc_drv_probe(struct platform_device *pdev) err = mpam_dt_parse_resources(msc, plat_data); } + if (err) + mpam_msc_drv_remove(pdev); + if (!err && fw_num_msc == mpam_num_msc) mpam_register_cpuhp_callbacks(&mpam_discovery_cpu_online); @@ -2277,24 +2294,6 @@ void mpam_enable(struct work_struct *work) mpam_enable_once(); } -static int mpam_msc_drv_remove(struct platform_device *pdev) -{ - struct mpam_msc *msc = platform_get_drvdata(pdev); - - if (!msc) - return 0; - - mutex_lock(&mpam_list_lock); - mpam_num_msc--; - platform_set_drvdata(pdev, NULL); - list_del_rcu(&msc->glbl_list); - mpam_msc_destroy(msc); - synchronize_srcu(&mpam_srcu); - mutex_unlock(&mpam_list_lock); - - return 0; -} - struct mpam_write_config_arg { struct mpam_msc_ris *ris; struct mpam_component *comp; -- Gitee From 7172d56f91bbee8516b0a7c16ae27fae72cefa40 Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Mon, 28 Nov 2022 19:34:01 +0800 Subject: [PATCH 0676/2138] anolis: arm_mpam: Fix wrong MMIO space size for msc platform device ANBZ: #8686 Current end field in struct resource for MPAM MSC's MMIO space is assigned a wrong value, which will cause resource_size() to return a size that is 1 greater than the real size. Fix it. However, current MSC MMIO space size in Yitian-710's BIOS is 0x1000, which will cause some registers cannot be accessed, such as MPAMCFG_CPBM. To avoid register access error, force the MMIO space size to be equal to or greater than MPAM_MIN_MMIO_SIZE. Since current maximum register offset is 0x2000, assign MPAM_MIN_MMIO_SIZE to 0x3000. Note that this patch will make MPAM on Yitian-710 unavailable, so you need to upgrade the BIOS to B137 or above. Signed-off-by: Shawn Wang Reviewed-by: Xin Hao Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/925 Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/platform/mpam/mpam_devices.c | 7 ++++++- drivers/platform/mpam/mpam_internal.h | 1 + 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 12015951a63d..ad641f21d301 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -1769,8 +1769,13 @@ static int mpam_msc_drv_probe(struct platform_device *pdev) err = PTR_ERR(io); break; } - msc->mapped_hwpage_sz = msc_res->end - msc_res->start; + msc->mapped_hwpage_sz = msc_res->end - msc_res->start + 1; msc->mapped_hwpage = io; + if (msc->mapped_hwpage_sz < MPAM_MIN_MMIO_SIZE) { + pr_err("MSC MMIO space size is too small\n"); + err = -EINVAL; + break; + } } else if (msc->iface == MPAM_IFACE_PCC) { msc->pcc_cl.dev = &pdev->dev; msc->pcc_cl.rx_callback = mpam_pcc_rx_callback; diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index 48554ff93e09..014524e5fa4f 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -321,6 +321,7 @@ void mpam_resctrl_exit(void); * Partitioning and Monitoring (MPAM), for Armv8-A. DDI 0598A.a */ #define MPAM_ARCHITECTURE_V1 0x10 +#define MPAM_MIN_MMIO_SIZE 0x3000 /* Memory mapped control pages: */ /* ID Register offsets in the memory mapped page */ -- Gitee From b42ba553329d6e093c05fac32d5af671a36fa20a Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Tue, 17 Jan 2023 14:40:01 +0800 Subject: [PATCH 0677/2138] anolis: arm_mpam: Fix the wrong PARTID_MAX and PMG_MAX usage ANBZ: #8686 The PARTID_MAX and PMG_MAX fields in MPAM system and MMIO registers mean the largest index that can be used instead of the total number. Fix the wrong usage of these values. Besides, add READ_ONCE() to prevent compiler from optimizing mpam_pmg_max reading. Signed-off-by: Shawn Wang Reviewed-by: Xin Hao Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/1090 Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/platform/mpam/mpam_devices.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index ad641f21d301..e22540111d5b 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -1240,7 +1240,7 @@ static int mpam_reprogram_ris(void *_arg) spin_lock(&partid_max_lock); partid_max = mpam_partid_max; spin_unlock(&partid_max_lock); - for (partid = 0; partid < partid_max; partid++) + for (partid = 0; partid <= partid_max; partid++) mpam_reprogram_ris_partid(ris, partid, cfg); return 0; @@ -1396,7 +1396,7 @@ static void mpam_reprogram_msc(struct mpam_msc *msc) } reset = true; - for (partid = 0; partid < mpam_partid_max; partid++) { + for (partid = 0; partid <= mpam_partid_max; partid++) { cfg = &ris->comp->cfg[partid]; if (cfg->features) reset = false; @@ -2119,7 +2119,7 @@ static int __allocate_component_cfg(struct mpam_component *comp) if (comp->cfg) return 0; - comp->cfg = kcalloc(mpam_partid_max, sizeof(*comp->cfg), GFP_KERNEL); + comp->cfg = kcalloc(mpam_partid_max + 1, sizeof(*comp->cfg), GFP_KERNEL); if (!comp->cfg) return -ENOMEM; @@ -2220,7 +2220,7 @@ static void mpam_enable_once(void) mpam_register_cpuhp_callbacks(mpam_cpu_online); pr_info("MPAM enabled with %u partid and %u pmg\n", - READ_ONCE(mpam_partid_max) + 1, mpam_pmg_max + 1); + READ_ONCE(mpam_partid_max) + 1, READ_ONCE(mpam_pmg_max) + 1); } void mpam_reset_class(struct mpam_class *class) @@ -2231,7 +2231,7 @@ void mpam_reset_class(struct mpam_class *class) idx = srcu_read_lock(&mpam_srcu); list_for_each_entry_rcu(comp, &class->components, class_list) { - memset(comp->cfg, 0, (mpam_partid_max * sizeof(*comp->cfg))); + memset(comp->cfg, 0, ((mpam_partid_max + 1) * sizeof(*comp->cfg))); list_for_each_entry_rcu(ris, &comp->ris, comp_list) { mutex_lock(&ris->msc->lock); -- Gitee From ef8d774cf57748500130c68bde6484b187ba037a Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Tue, 1 Aug 2023 22:59:59 +0800 Subject: [PATCH 0678/2138] anolis: arm_mpam: Fix schemata's display error of MB when CDP is enabled ANBZ: #8686 If the "MB" in a non-default ctrlmon group is changed when resctrl filesystem is mounted with cdp option, the schemata may display a value that is not consistent with the expected setting. The reason for this error is that MB resource only uses CDP_NONE as its resctrl_conf_type, that makes resctrl_get_config_index return a wrong PARTID when CDP is enabled. The right PARTID should be gotten by CDP_CODE and CDP_DATA. Since the MB setting will keep the same for both code and data, here only use the PARTID of CDP_CODE to get the real setting of MB resource if CDP is enabled. Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/1983 Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/platform/mpam/mpam_resctrl.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index 06af2d77b93e..bc26aaeebc76 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -931,7 +931,10 @@ u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); cprops = &res->class->props; - partid = resctrl_get_config_index(closid, type); + if (mpam_resctrl_hide_cdp(r->rid)) + partid = resctrl_get_config_index(closid, CDP_CODE); + else + partid = resctrl_get_config_index(closid, type); cfg = &dom->comp->cfg[partid]; switch (r->rid) { -- Gitee From 5f00f9ed84b98b8692b188aabff89f4f1eefc67c Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Tue, 2 Apr 2024 15:06:34 +0800 Subject: [PATCH 0679/2138] anolis: arm_mpam: Fix the wrong checking when using USE_RMID_IDX ANBZ: #8686 The mon field in struct mon_cfg is of type u16, whose max value is less than USE_RMID_IDX macro. When mon is assigned to USE_RMID_IDX, the value of cfg.mon will always be 0. The if statement will always be false, which is incorrect. Fix it by comparing the original u32 mon to USE_RMID_IDX first. Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/platform/mpam/mpam_resctrl.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index bc26aaeebc76..25cf64386b64 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -359,9 +359,10 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, return -EINVAL; } - cfg.mon = mon; - if (cfg.mon == USE_RMID_IDX) + if (mon == USE_RMID_IDX) cfg.mon = resctrl_arch_rmid_idx_encode(closid, rmid); + else + cfg.mon = mon; cfg.match_pmg = true; cfg.pmg = rmid; -- Gitee From 2f97ab09bad74b5b2b5411a2468dc740c783bfcf Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Wed, 24 Jan 2024 18:05:47 +0800 Subject: [PATCH 0680/2138] anolis: ACPI / MPAM: Avoid MPAM MSC has the same identifier ANBZ: #8686 Use an extra msc_num as the MSC device id instead of msc->identifier, since MSC nodes with different types in MPAM ACPI table may have the same identifier value. Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/acpi/arm64/mpam.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/acpi/arm64/mpam.c b/drivers/acpi/arm64/mpam.c index 8a63449f27b5..c1f9cafaf1e2 100644 --- a/drivers/acpi/arm64/mpam.c +++ b/drivers/acpi/arm64/mpam.c @@ -194,6 +194,7 @@ static int __init _parse_table(struct acpi_table_header *table) struct resource res[3]; char uid[16]; u32 acpi_id; + int msc_num = 0; table_end = (char *)table + table->length; @@ -220,7 +221,12 @@ static int __init _parse_table(struct acpi_table_header *table) memset(res, 0, sizeof(res)); memset(props, 0, sizeof(props)); - pdev = platform_device_alloc("mpam_msc", tbl_msc->identifier); + /* + * Use an extra msc_num instead of msc->identifier, since MSC + * nodes with different types in MPAM ACPI table may have the + * same id value. + */ + pdev = platform_device_alloc("mpam_msc", msc_num++); if (IS_ERR(pdev)) { err = PTR_ERR(pdev); break; -- Gitee From 51812247d99c86c037c85983c8449025c08c3b0e Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Tue, 9 May 2023 15:01:57 +0800 Subject: [PATCH 0681/2138] anolis: arm_mpam: Identify different types of machines for MPAM implementation specific features ANBZ: #8686 ARM MPAM allows different machines have different MPAM implementation-specific features. To avoid affecting MPAM standard features and distinguish different machines, introduce a new variable mpam_current_machine as a machine identifier, which is based on the information from MPAM ACPI table or the device tree. Now only Yitian710 is supported. Machines without specific features are not affected. Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2661 [ refactor the code for kernel-6.6 ] Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/acpi/arm64/mpam.c | 67 +++++++++++++++++++++++++++- drivers/platform/mpam/mpam_devices.c | 13 ++++++ include/linux/arm_mpam.h | 15 +++++++ 3 files changed, 93 insertions(+), 2 deletions(-) diff --git a/drivers/acpi/arm64/mpam.c b/drivers/acpi/arm64/mpam.c index c1f9cafaf1e2..92ecfc614d14 100644 --- a/drivers/acpi/arm64/mpam.c +++ b/drivers/acpi/arm64/mpam.c @@ -22,6 +22,26 @@ #define ACPI_MPAM_MSC_IRQ_AFFINITY_PROCESSOR_CONTAINER (1<<3) #define ACPI_MPAM_MSC_IRQ_AFFINITY_VALID (1<<4) +/* Use OEM info in MPAM ACPI table to distinguish different machine types */ +struct acpi_mpam_machine_oem_info { + enum mpam_machine_type type; + char signature[ACPI_NAMESEG_SIZE + 1]; + u8 revision; + char oem_id[ACPI_OEM_ID_SIZE + 1]; + char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; + u32 oem_revision; +}; + +static struct acpi_mpam_machine_oem_info acpi_mpam_machines[MPAM_NUM_MACHINE_TYPES] = { + [MPAM_YITIAN710] = { + .signature = "YMPM", + .revision = 0, + .oem_id = "PTG ", + .oem_table_id = "PTG01 ", + .oem_revision = 0, + }, +}; + static bool frob_irq(struct platform_device *pdev, int intid, u32 flags, int *irq, u32 processor_container_uid) { @@ -297,18 +317,34 @@ static int __init _parse_table(struct acpi_table_header *table) static struct acpi_table_header *get_table(void) { struct acpi_table_header *table; + enum mpam_machine_type mtype; acpi_status status; if (acpi_disabled || !mpam_cpus_have_feature()) return NULL; - status = acpi_get_table(ACPI_SIG_MPAM, 0, &table); + mtype = acpi_mpam_get_machine_type(); + + if (mtype != MPAM_DEFAULT_MACHINE) + status = acpi_get_table(acpi_mpam_machines[mtype].signature, 0, &table); + else + status = acpi_get_table(ACPI_SIG_MPAM, 0, &table); if (ACPI_FAILURE(status)) return NULL; - if (table->revision != 1) + if (mtype == MPAM_DEFAULT_MACHINE && table->revision != 1) return NULL; + /* + * Kunpeng's MPAM ACPI adopts an older version of MPAM ACPI, so + * this MPAM ACPI driver is not suitable for Kunpeng platform. + * Skip it. + */ + if (!strncmp(table->oem_id, "HISI", 4)) { + acpi_put_table(table); + return NULL; + } + return table; } @@ -367,6 +403,33 @@ int acpi_mpam_count_msc(void) return ret; } +enum mpam_machine_type acpi_mpam_get_machine_type(void) +{ + struct acpi_table_header *table; + enum mpam_machine_type ret; + acpi_status status; + int i; + + ret = MPAM_DEFAULT_MACHINE; + + for (i = MPAM_DEFAULT_MACHINE + 1; i < MPAM_NUM_MACHINE_TYPES; i++) { + status = acpi_get_table(acpi_mpam_machines[i].signature, 0, &table); + if (ACPI_FAILURE(status)) + continue; + + if (!memcmp(acpi_mpam_machines[i].oem_id, table->oem_id, ACPI_OEM_ID_SIZE) && + !memcmp(acpi_mpam_machines[i].oem_table_id, table->oem_table_id, + ACPI_OEM_TABLE_ID_SIZE) && + acpi_mpam_machines[i].oem_revision == table->oem_revision) { + ret = i; + } + + acpi_put_table(table); + } + + return ret; +} + /* * Call after ACPI devices have been created, which happens behind acpi_scan_init() * called from subsys_initcall(). PCC requires the mailbox driver, which is diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index e22540111d5b..3351dc9d1b1b 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -45,6 +45,8 @@ static LIST_HEAD(mpam_all_msc); struct srcu_struct mpam_srcu; +enum mpam_machine_type mpam_current_machine; + /* MPAM isn't available until all the MSC have been probed. */ static u32 mpam_num_msc; @@ -1561,6 +1563,12 @@ static int mpam_msc_setup_error_irq(struct mpam_msc *msc) return 0; } +static enum mpam_machine_type mpam_dt_get_machine_type(void) +{ + /* FIXME: not supported yet */ + return MPAM_DEFAULT_MACHINE; +} + static int mpam_dt_count_msc(void) { int count = 0; @@ -2386,6 +2394,11 @@ static int __init mpam_msc_driver_init(void) init_srcu_struct(&mpam_srcu); + if (!acpi_disabled) + mpam_current_machine = acpi_mpam_get_machine_type(); + else + mpam_current_machine = mpam_dt_get_machine_type(); + /* * If the MPAM CPU interface is not implemented, or reserved by * firmware, there is no point touching the rest of the hardware. diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index 239d27af9e32..5423a2eff810 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -31,11 +31,22 @@ enum mpam_class_types { MPAM_CLASS_UNKNOWN, /* Everything else, e.g. SMMU */ }; +enum mpam_machine_type { + MPAM_DEFAULT_MACHINE, + MPAM_YITIAN710, + + MPAM_NUM_MACHINE_TYPES, +}; + +/* Machine identifier which can be used for vendor-specific MPAM features */ +extern enum mpam_machine_type mpam_current_machine; + #ifdef CONFIG_ACPI_MPAM /* Parse the ACPI description of resources entries for this MSC. */ int acpi_mpam_parse_resources(struct mpam_msc *msc, struct acpi_mpam_msc_node *tbl_msc); int acpi_mpam_count_msc(void); +enum mpam_machine_type acpi_mpam_get_machine_type(void); #else static inline int acpi_mpam_parse_resources(struct mpam_msc *msc, struct acpi_mpam_msc_node *tbl_msc) @@ -43,6 +54,10 @@ static inline int acpi_mpam_parse_resources(struct mpam_msc *msc, return -EINVAL; } static inline int acpi_mpam_count_msc(void) { return -EINVAL; } +static inline enum mpam_machine_type acpi_mpam_get_machine_type(void) +{ + return MPAM_DEFAULT_MACHINE; +} #endif int mpam_register_requestor(u16 partid_max, u8 pmg_max); -- Gitee From cef3d42db0588eb862e18aea2c00b85d36b53e21 Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 26 Feb 2021 20:21:43 +0800 Subject: [PATCH 0682/2138] openEuler: ACPI / PPTT: Filthy hack to find _a_ backwards reference in the PPTT [ROTTEN] ANBZ: #8686 commit 0ed11dc41fe828a3d2b69220347b3a2ed9795ba9 openEuler. hulk inclusion category: feature feature: ARM MPAM support bugzilla: 48265 CVE: NA -------------------------------- The alpha MPAM table contains a pointer to the PPTT cache, which it expects to be unique, which isn't guaranteed. Ideally we'd take a cache-id, but the hardware doesn't have a suitable property, instead arm64 will generate an id from the cpu affinity ids. To find the cache id we need to find the cacheinfo structure, which we can do if we have a pptt cpu_node (different to the cache node), as this is the fw_token used to match the Processor Container that contains all the CPUs that share this cache. How can we find the expected-to-be-unique cpu_node from the cache_node? ... add acpi_pptt_find_cache_backwards() to find a PPTT processor node given a PPTT cache node. This is totally broken as many processor nodes may point at the same PPTT cache indicating different instances of the cache. (e.g. all the L1 caches are the same shape, but they aren't the same cache). This only works if you cooked your PPTT table to look like this. Signed-off-by: James Morse # ... but its still GPLv2 Signed-off-by: Wang ShaoBo Reviewed-by: Xie XiuQi Signed-off-by: Yang Yingliang Reviewed-by: Cheng Jian Signed-off-by: Zheng Zengkai Signed-off-by: Xin Hao Reviewed-by: Baolin Wang Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/acpi/pptt.c | 48 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/acpi.h | 4 ++++ 2 files changed, 52 insertions(+) diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index 201df60e4a79..66b637502595 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -296,6 +296,54 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he return NULL; } +/* + * acpi_pptt_find_cache_backwards() - Given a PPTT cache find a processor node + * that points to it. This lets us find a cacheinfo node by fw_token, but + * is totally broken as many processor node may point at the same PPTT + * cache indicating different instances of the cache. (e.g. all the L1 + * caches are the same shape, but they aren't the same cache). + * This only works if you cooked your PPTT table to look like this. + */ +struct acpi_pptt_processor * +acpi_pptt_find_cache_backwards(struct acpi_table_header *table_hdr, + struct acpi_pptt_cache *cache) +{ + struct acpi_pptt_processor *cpu_node; + struct acpi_subtable_header *entry; + struct acpi_subtable_header *res; + unsigned long table_end; + u32 proc_sz; + int i; + + table_end = (unsigned long)table_hdr + table_hdr->length; + entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr, + sizeof(struct acpi_table_pptt)); + proc_sz = sizeof(struct acpi_pptt_processor *); + + /* find the processor structure which points at with this cpuid */ + while ((unsigned long)entry + proc_sz < table_end) { + if (entry->length == 0) { + pr_warn("Invalid zero length subtable\n"); + break; + } + + cpu_node = (struct acpi_pptt_processor *)entry; + entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry, + entry->length); + + if (cpu_node->header.type != ACPI_PPTT_TYPE_PROCESSOR) + continue; + + for (i = 0; i < cpu_node->number_of_priv_resources; i++) { + res = acpi_get_pptt_resource(table_hdr, cpu_node, i); + if (&cache->header == res) + return cpu_node; + } + } + + return NULL; +} + /* parent_node points into the table, but the table isn't provided. */ static void acpi_pptt_get_child_cpus(struct acpi_pptt_processor *parent_node, cpumask_t *cpus) diff --git a/include/linux/acpi.h b/include/linux/acpi.h index fc4cc82128f0..3b5d2eef29eb 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -1565,4 +1565,8 @@ static inline void acpi_device_notify(struct device *dev) { } static inline void acpi_device_notify_remove(struct device *dev) { } #endif +struct acpi_pptt_processor * +acpi_pptt_find_cache_backwards(struct acpi_table_header *table_hdr, + struct acpi_pptt_cache *cache); + #endif /*_LINUX_ACPI_H*/ -- Gitee From 5ee6998b32a708e99546178d9c75bb5b85216e57 Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Thu, 14 Jul 2022 14:53:12 +0800 Subject: [PATCH 0683/2138] anolis: ACPI / PPTT: Add a helper to fill a cpumask from a cache with specific id and level ANBZ: #8686 Function `acpi_pptt_get_cpumask_from_cache_id()` assume each cache has different cache id. But in the implementation, caches in different levels may have the same id. So we add a new helper function to get the cpumask of a cache with specific id and level. Signed-off-by: Shawn Wang Reviewed-by: Xin Hao Reviewed-by: Baolin Wang Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/acpi/pptt.c | 60 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/acpi.h | 8 ++++++ 2 files changed, 68 insertions(+) diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index 66b637502595..fa40ed82c198 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -1134,3 +1134,63 @@ int acpi_pptt_get_cpumask_from_cache_id(u32 cache_id, cpumask_t *cpus) acpi_put_table(table); return 0; } + +/** + * acpi_pptt_get_cpumask_from_cache_id_and_level() - Get the cpus associated with the + * cache specified by id and level + * @cache_id: The id field of the unified cache + * @cache_level: The level of the unified cache + * @cpus: Where to buidl the cpumask + * + * Determine which CPUs are below this cache in the PPTT. This allows the property + * to be found even if the CPUs are offline. + * + * The PPTT table must be rev 3 or later, + * + * Return: -ENOENT if the PPTT doesn't exist, or the cache cannot be found. + * Otherwise returns 0 and sets the cpus in the provided cpumask. + */ +int acpi_pptt_get_cpumask_from_cache_id_and_level(u32 cache_id, u32 cache_level, + cpumask_t *cpus) +{ + u32 acpi_cpu_id; + acpi_status status; + int cpu; + struct acpi_table_header *table; + struct acpi_pptt_cache *cache_node; + struct acpi_pptt_processor *cpu_node; + + cpumask_clear(cpus); + + status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); + if (ACPI_FAILURE(status)) { + acpi_pptt_warn_missing(); + return -ENOENT; + } + + if (table->revision < 3) { + acpi_put_table(table); + return -ENOENT; + } + + for_each_possible_cpu(cpu) { + acpi_cpu_id = get_acpi_id_for_cpu(cpu); + cpu_node = acpi_find_processor_node(table, acpi_cpu_id); + if (!cpu_node) + continue; + + cache_node = acpi_find_cache_node(table, acpi_cpu_id, + ACPI_PPTT_CACHE_TYPE_UNIFIED, + cache_level, &cpu_node); + + if (!cache_node) + continue; + + cpu_node = acpi_pptt_find_cache_backwards(table, cache_node); + if (cpu_node->acpi_processor_id == cache_id) + cpumask_set_cpu(cpu, cpus); + } + + acpi_put_table(table); + return 0; +} diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 3b5d2eef29eb..487fc3f49e6e 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -1498,6 +1498,8 @@ int find_acpi_cpu_topology_hetero_id(unsigned int cpu); int find_acpi_cache_level_from_id(u32 cache_id); int acpi_pptt_get_cpus_from_container(u32 acpi_cpu_id, cpumask_t *cpus); int acpi_pptt_get_cpumask_from_cache_id(u32 cache_id, cpumask_t *cpus); +int acpi_pptt_get_cpumask_from_cache_id_and_level(u32 cache_id, u32 cache_level, + cpumask_t *cpus); #else static inline int acpi_pptt_cpu_is_thread(unsigned int cpu) { @@ -1533,6 +1535,12 @@ static inline int acpi_pptt_get_cpumask_from_cache_id(u32 cache_id, { return -EINVAL; } +static inline int acpi_pptt_get_cpumask_from_cache_id_and_level(u32 cache_id, + u32 cache_level, + cpumask_t *cpus) +{ + return -EINVAL; +} #endif #ifdef CONFIG_ARM64 -- Gitee From cb68f92fb57daa9ac530b82cf47b591c1ede765b Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Wed, 27 Jul 2022 15:42:18 +0800 Subject: [PATCH 0684/2138] anolis: ACPI / PPTT: Downgrade the revision requirement in acpi_pptt_get_cpumask_from_cache_id_and_level ANBZ: #8686 Since function `acpi_pptt_get_cpumask_from_cache_id_and_level()` does not really use the cache id in PPTT, in order to maintain compatibility with the old revision, we downgrade the revision requirement from 3 to 2. Signed-off-by: Shawn Wang Signed-off-by: Xin Hao Reviewed-by: Xin Hao Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/580 Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/acpi/pptt.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index fa40ed82c198..56b53d45dadd 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -1168,7 +1168,11 @@ int acpi_pptt_get_cpumask_from_cache_id_and_level(u32 cache_id, u32 cache_level, return -ENOENT; } - if (table->revision < 3) { + /* + * FIXME: Since this function does not actually use the cache id in the + * PPTT table, we downgrade the revision requirement. + */ + if (table->revision < 2) { acpi_put_table(table); return -ENOENT; } -- Gitee From a323f9105179d45eb38a4ca97de71ca348d6fabd Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Tue, 23 Jan 2024 13:35:33 +0800 Subject: [PATCH 0685/2138] anolis: arm_mpam: Add cache msc info parsing for Yitian710 specific MPAM ACPI table ANBZ: #8686 Since Yitian710 uses an specific MPAM ACPI table for cache msc, which may contains the same cache id at different cache levels. So it is conflict with the code of git://git.kernel.org/pub/scm/linux/kernel/git/morse/linux.git. So we identify the current machine and use some specific functions to parse the cache level and its cpumasks for Yitian710 platform. Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2661 Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/acpi/arm64/mpam.c | 17 +++++++++++++---- drivers/platform/mpam/mpam_devices.c | 6 +++++- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/drivers/acpi/arm64/mpam.c b/drivers/acpi/arm64/mpam.c index 92ecfc614d14..7a4bd70972c2 100644 --- a/drivers/acpi/arm64/mpam.c +++ b/drivers/acpi/arm64/mpam.c @@ -124,10 +124,19 @@ static int acpi_mpam_parse_resource(struct mpam_msc *msc, switch (res->locator_type) { case ACPI_MPAM_LOCATION_TYPE_PROCESSOR_CACHE: cache_id = res->locator.cache_locator.cache_reference; - level = find_acpi_cache_level_from_id(cache_id); - if (level < 0) { - pr_err_once("Bad level for cache with id %u\n", cache_id); - return level; + if (mpam_current_machine == MPAM_YITIAN710) { + /* + * YITIAN710's BIOS doesn't support find level from + * cache id. Since it only supports L3 cache, use a + * fixed value, 3. + */ + level = 3; + } else { + level = find_acpi_cache_level_from_id(cache_id); + if (level < 0) { + pr_err_once("Bad level for cache with id %u\n", cache_id); + return level; + } } return mpam_ris_create(msc, res->ris_index, MPAM_CLASS_CACHE, level, cache_id); diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 3351dc9d1b1b..b6672b7881f7 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -367,8 +367,12 @@ static int get_cpumask_from_cache_id(u32 cache_id, u32 cache_level, int iter_cache_id; struct device_node *iter; - if (!acpi_disabled) + if (!acpi_disabled) { + if (mpam_current_machine == MPAM_YITIAN710) + return acpi_pptt_get_cpumask_from_cache_id_and_level( + cache_id, cache_level, affinity); return acpi_pptt_get_cpumask_from_cache_id(cache_id, affinity); + } for_each_possible_cpu(cpu) { iter = of_get_cpu_node(cpu, NULL); -- Gitee From 93947d759c62b8dd27d818f6216e96c2978ac8fe Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Mon, 8 May 2023 16:05:32 +0800 Subject: [PATCH 0686/2138] anolis: arm_mpam: Add supportion for implementation-defined MB monitoring ANBZ: #8686 Since Yitian710 uses an implementation-defined MB monitoring feature, introduce a new feature named mpam_feat_impl_msmon_mbwu to differentiate it from the standard MB monitoring feature. Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2661 Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/acpi/arm64/mpam.c | 4 ++ drivers/platform/mpam/mpam_devices.c | 56 ++++++++++++++++++++++++++- drivers/platform/mpam/mpam_internal.h | 8 ++++ 3 files changed, 66 insertions(+), 2 deletions(-) diff --git a/drivers/acpi/arm64/mpam.c b/drivers/acpi/arm64/mpam.c index 7a4bd70972c2..153ef041abf0 100644 --- a/drivers/acpi/arm64/mpam.c +++ b/drivers/acpi/arm64/mpam.c @@ -22,6 +22,8 @@ #define ACPI_MPAM_MSC_IRQ_AFFINITY_PROCESSOR_CONTAINER (1<<3) #define ACPI_MPAM_MSC_IRQ_AFFINITY_VALID (1<<4) +int ddrc_freq; + /* Use OEM info in MPAM ACPI table to distinguish different machine types */ struct acpi_mpam_machine_oem_info { enum mpam_machine_type type; @@ -141,6 +143,8 @@ static int acpi_mpam_parse_resource(struct mpam_msc *msc, return mpam_ris_create(msc, res->ris_index, MPAM_CLASS_CACHE, level, cache_id); case ACPI_MPAM_LOCATION_TYPE_MEMORY: + if (mpam_current_machine == MPAM_YITIAN710) + ddrc_freq = res->locator.memory_locator.reserved; return mpam_ris_create(msc, res->ris_index, MPAM_CLASS_MEMORY, 255, res->locator.memory_locator.proximity_domain); default: diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index b6672b7881f7..0827f2c61f91 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -35,6 +35,8 @@ #include "mpam_internal.h" +extern int ddrc_freq; + /* * mpam_list_lock protects the SRCU lists when writing. Once the * mpam_enabled key is enabled these lists are read-only, @@ -692,6 +694,10 @@ static void mpam_ris_hw_probe(struct mpam_msc_ris *ris) } } + if (FIELD_GET(MPAMF_IDR_HAS_IMPL_IDR, ris->idr)) + if (mpam_current_machine == MPAM_YITIAN710 && class->type == MPAM_CLASS_MEMORY) + mpam_set_feature(mpam_feat_impl_msmon_mbwu, props); + /* * RIS with PARTID narrowing don't have enough storage for one * configuration per PARTID. If these are in a class we could use, @@ -1015,6 +1021,45 @@ static void __ris_msmon_read(void *arg) *(m->val) += now; } +static void __ris_impl_msmon_read(void *arg) +{ + unsigned long flags; + struct mon_read *m = arg; + u64 mb_val = 0; + struct mon_cfg *ctx = m->ctx; + struct mpam_msc *msc = m->ris->msc; + u32 custom_reg_base_addr, cycle, val; + + lockdep_assert_held(&msc->lock); + if (m->type != mpam_feat_impl_msmon_mbwu) + return; + + /* Other machine can extend this function */ + if (mpam_current_machine != MPAM_YITIAN710) + return; + + spin_lock_irqsave(&msc->part_sel_lock, flags); + + __mpam_write_reg(msc, MPAMCFG_PART_SEL, ctx->mon); + + custom_reg_base_addr = __mpam_read_reg(msc, MPAMF_IMPL_IDR); + + cycle = __mpam_read_reg(msc, custom_reg_base_addr + MPAMF_CUST_WINDW_OFFSET); + val = __mpam_read_reg(msc, custom_reg_base_addr + MPAMF_CUST_MBWC_OFFSET); + + spin_unlock_irqrestore(&msc->part_sel_lock, flags); + + if (val & MSMON___NRDY) { + m->err = -EBUSY; + return; + } + + mb_val = MBWU_GET(val); + + mb_val = mb_val * 32 * ddrc_freq * 1000000 / cycle; /* B/s */ + *(m->val) += mb_val; +} + static int _msmon_read(struct mpam_component *comp, struct mon_read *arg) { int err, idx; @@ -1027,8 +1072,15 @@ static int _msmon_read(struct mpam_component *comp, struct mon_read *arg) msc = ris->msc; mutex_lock(&msc->lock); - err = smp_call_function_any(&msc->accessibility, - __ris_msmon_read, arg, true); + if (arg->type == mpam_feat_msmon_csu || + arg->type == mpam_feat_msmon_mbwu) + err = smp_call_function_any(&msc->accessibility, + __ris_msmon_read, arg, true); + else if (arg->type == mpam_feat_impl_msmon_mbwu) + err = smp_call_function_any(&msc->accessibility, + __ris_impl_msmon_read, arg, true); + else + err = -EOPNOTSUPP; mutex_unlock(&msc->lock); if (!err && arg->err) err = arg->err; diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h index 014524e5fa4f..d84413e5e031 100644 --- a/drivers/platform/mpam/mpam_internal.h +++ b/drivers/platform/mpam/mpam_internal.h @@ -108,6 +108,7 @@ enum mpam_device_features { mpam_feat_msmon_mbwu_capture, mpam_feat_msmon_mbwu_rwbw, mpam_feat_msmon_capt, + mpam_feat_impl_msmon_mbwu, mpam_feat_partid_nrw, MPAM_FEATURE_LAST, }; @@ -574,4 +575,11 @@ void mpam_resctrl_exit(void); */ #define MSMON_CAPT_EVNT_NOW BIT(0) +/* Used for PTG Yitian710 specific MB monitoring feature */ +#define MBWU_MASK GENMASK(23, 0) +#define MBWU_WINWD_MAX GENMASK(22, 0) +#define MBWU_GET(v) ((v) & MBWU_MASK) +#define MPAMF_CUST_MBWC_OFFSET 0x08 +#define MPAMF_CUST_WINDW_OFFSET 0x0C + #endif /* MPAM_INTERNAL_H */ -- Gitee From 14142e2b60424467983dfb2a030313ac83d203e5 Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Mon, 8 May 2023 16:20:42 +0800 Subject: [PATCH 0687/2138] anolis: fs/resctrl: Add a new resctrl monitoring event to get MB in Bps ANBZ: #8686 Some platforms like Yitian710 can get the memory bandwidth of a specific PARTID in Bps directly, while current resctrl file system only support mbm_{local,total}_bytes as counters in bytes. Add a new resctrl monitoring event mbm_Bps to support this feature. To avoid introducing a new interface, remains the name "mbm_local_bytes" instead of "mbm_Bps" as before. Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2661 Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- arch/x86/include/asm/resctrl.h | 5 +++++ arch/x86/kernel/cpu/resctrl/monitor.c | 2 ++ drivers/platform/mpam/mpam_resctrl.c | 29 +++++++++++++++++++++++++++ fs/resctrl/monitor.c | 16 +++++++++++++++ fs/resctrl/rdtgroup.c | 5 +++-- include/linux/arm_mpam.h | 1 + include/linux/resctrl_types.h | 3 +++ 7 files changed, 59 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index 746431c66fc4..f159bddbec51 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -99,6 +99,11 @@ static inline bool resctrl_arch_is_mbm_local_enabled(void) return (rdt_mon_features & (1 << QOS_L3_MBM_LOCAL_EVENT_ID)); } +static inline bool resctrl_arch_is_mbm_bps_enabled(void) +{ + return false; +} + /* * __resctrl_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR * diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 02fb9d87479a..e0cc1b499279 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -134,6 +134,8 @@ static struct arch_mbm_state *get_arch_mbm_state(struct rdt_hw_domain *hw_dom, return &hw_dom->arch_mbm_total[rmid]; case QOS_L3_MBM_LOCAL_EVENT_ID: return &hw_dom->arch_mbm_local[rmid]; + default: + break; } /* Never expect to get here */ diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c index 25cf64386b64..98b3b1baa91e 100644 --- a/drivers/platform/mpam/mpam_resctrl.c +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -35,6 +35,7 @@ static bool exposed_alloc_capable; static bool exposed_mon_capable; static struct mpam_class *mbm_local_class; static struct mpam_class *mbm_total_class; +static struct mpam_class *mbm_bps_class; /* * MPAM emulates CDP by setting different PARTID in the I/D fields of MPAM1_EL1. @@ -79,6 +80,11 @@ bool resctrl_arch_is_mbm_total_enabled(void) return mbm_total_class; } +bool resctrl_arch_is_mbm_bps_enabled(void) +{ + return mbm_bps_class; +} + bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level rid) { switch (rid) { @@ -272,6 +278,10 @@ static void *resctrl_arch_mon_ctx_alloc_no_wait(struct rdt_resource *r, case QOS_L3_MBM_LOCAL_EVENT_ID: case QOS_L3_MBM_TOTAL_EVENT_ID: return mon_is_rmid_idx; + case QOS_MC_MBM_BPS_EVENT_ID: + if (mpam_current_machine == MPAM_YITIAN710) + return mon_is_rmid_idx; + return ERR_PTR(-EOPNOTSUPP); } return ERR_PTR(-EOPNOTSUPP); @@ -316,6 +326,7 @@ void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid, return; case QOS_L3_MBM_TOTAL_EVENT_ID: case QOS_L3_MBM_LOCAL_EVENT_ID: + case QOS_MC_MBM_BPS_EVENT_ID: return; } } @@ -355,6 +366,10 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, case QOS_L3_MBM_TOTAL_EVENT_ID: type = mpam_feat_msmon_mbwu; break; + case QOS_MC_MBM_BPS_EVENT_ID: + if (mpam_current_machine == MPAM_YITIAN710) + type = mpam_feat_impl_msmon_mbwu; + break; default: return -EINVAL; } @@ -487,6 +502,16 @@ static bool class_has_usable_mbwu(struct mpam_class *class) return (mpam_partid_max > 1) || (mpam_pmg_max != 0); } +static bool class_has_usable_impl_mbwu(struct mpam_class *class) +{ + struct mpam_props *cprops = &class->props; + + if (!mpam_has_feature(mpam_feat_impl_msmon_mbwu, cprops)) + return false; + + return true; +} + static bool mba_class_use_mbw_part(struct mpam_props *cprops) { /* TODO: Scaling is not yet supported */ @@ -832,6 +857,10 @@ static int mpam_resctrl_resource_init(struct mpam_resctrl_res *res) if (has_mbwu && class->type == MPAM_CLASS_MEMORY) { mbm_total_class = class; r->mon_capable = true; + } else if (class_has_usable_impl_mbwu(class)) { + r->mon_capable = true; + if (mpam_current_machine == MPAM_YITIAN710) + mbm_bps_class = class; } } diff --git a/fs/resctrl/monitor.c b/fs/resctrl/monitor.c index 06f660dfd929..51baa0f71b65 100644 --- a/fs/resctrl/monitor.c +++ b/fs/resctrl/monitor.c @@ -790,6 +790,11 @@ static struct mon_evt mbm_local_event = { .evtid = QOS_L3_MBM_LOCAL_EVENT_ID, }; +static struct mon_evt mbm_bps_event = { + .name = "mbm_local_bytes", + .evtid = QOS_MC_MBM_BPS_EVENT_ID, +}; + /* * Initialize the event list for the resource. * @@ -809,6 +814,14 @@ static void l3_mon_evt_init(struct rdt_resource *r) list_add_tail(&mbm_local_event.list, &r->evt_list); } +static void mc_mon_evt_init(struct rdt_resource *r) +{ + INIT_LIST_HEAD(&r->evt_list); + + if (resctrl_arch_is_mbm_bps_enabled()) + list_add_tail(&mbm_bps_event.list, &r->evt_list); +} + int resctrl_mon_resource_init(void) { struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); @@ -832,6 +845,9 @@ int resctrl_mon_resource_init(void) mbm_config_rftype_init("mbm_local_bytes_config"); } + r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); + mc_mon_evt_init(r); + return 0; } diff --git a/fs/resctrl/rdtgroup.c b/fs/resctrl/rdtgroup.c index ea969ddb1a9d..e39f22453d84 100644 --- a/fs/resctrl/rdtgroup.c +++ b/fs/resctrl/rdtgroup.c @@ -113,13 +113,14 @@ void rdt_staged_configs_clear(void) static bool resctrl_is_mbm_enabled(void) { return (resctrl_arch_is_mbm_total_enabled() || - resctrl_arch_is_mbm_local_enabled()); + resctrl_arch_is_mbm_local_enabled() || + resctrl_arch_is_mbm_bps_enabled()); } static bool resctrl_is_mbm_event(int e) { return (e >= QOS_L3_MBM_TOTAL_EVENT_ID && - e <= QOS_L3_MBM_LOCAL_EVENT_ID); + e <= QOS_MC_MBM_BPS_EVENT_ID); } /* diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h index 5423a2eff810..660776491941 100644 --- a/include/linux/arm_mpam.h +++ b/include/linux/arm_mpam.h @@ -81,6 +81,7 @@ bool resctrl_arch_mon_capable(void); bool resctrl_arch_is_llc_occupancy_enabled(void); bool resctrl_arch_is_mbm_local_enabled(void); bool resctrl_arch_is_mbm_total_enabled(void); +bool resctrl_arch_is_mbm_bps_enabled(void); /* reset cached configurations, then all devices */ void resctrl_arch_reset_resources(void); diff --git a/include/linux/resctrl_types.h b/include/linux/resctrl_types.h index fe0b10b589c0..a0d8694be783 100644 --- a/include/linux/resctrl_types.h +++ b/include/linux/resctrl_types.h @@ -93,6 +93,9 @@ enum resctrl_event_id { QOS_L3_OCCUP_EVENT_ID = 0x01, QOS_L3_MBM_TOTAL_EVENT_ID = 0x02, QOS_L3_MBM_LOCAL_EVENT_ID = 0x03, + QOS_MC_MBM_BPS_EVENT_ID = 0x04, }; +#define RESCTRL_MAX_EVENT_NUM 4 + #endif /* __LINUX_RESCTRL_TYPES_H */ -- Gitee From def1a4c64b59aa5dd64bc2a55377b45fe06bc163 Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Tue, 16 May 2023 17:58:10 +0800 Subject: [PATCH 0688/2138] anolis: arm_mpam: Maximize Yitian710's MB monitoring window width ANBZ: #8686 To improve the vendor-defined MB monitoring accuracy on Yitian710, maximize the MB monitoring window width on reprogramming. Signed-off-by: Shawn Wang Reviewed-by: Xin Hao Reviewed-by: Baolin Wang Reviewed-by: Baolin Wang Reviewed-by: Xin Hao Link: https://gitee.com/anolis/cloud-kernel/pulls/1643 [ add machine type checking ] Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- drivers/platform/mpam/mpam_devices.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 0827f2c61f91..e1a1dee8578c 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -1220,6 +1220,7 @@ static void mpam_reprogram_ris_partid(struct mpam_msc_ris *ris, u16 partid, struct mpam_props *rprops = &ris->props; u16 dspri = GENMASK(rprops->dspri_wd, 0); u16 intpri = GENMASK(rprops->intpri_wd, 0); + u32 custom_reg_base_addr; spin_lock(&msc->part_sel_lock); __mpam_part_sel(ris->ris_idx, partid, msc); @@ -1276,6 +1277,15 @@ static void mpam_reprogram_ris_partid(struct mpam_msc_ris *ris, u16 partid, mpam_write_partsel_reg(msc, PRI, pri_val); } + if (FIELD_GET(MPAMF_IDR_HAS_IMPL_IDR, ris->idr)) { + if (mpam_current_machine == MPAM_YITIAN710) { + custom_reg_base_addr = __mpam_read_reg(msc, MPAMF_IMPL_IDR); + __mpam_write_reg(msc, custom_reg_base_addr + + MPAMF_CUST_WINDW_OFFSET, + MBWU_WINWD_MAX); + } + } + spin_unlock(&msc->part_sel_lock); } -- Gitee From 97433c0f96b8d03fdfa3c9486ff7f79359321a80 Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Sun, 7 Apr 2024 10:42:41 +0800 Subject: [PATCH 0689/2138] anolis: arm_mpam: Fix kernel boot failure when the firmware does not support MPAM ANBZ: #8686 Accessing MPAM registers is forbidden when the firmware disables MPAM, even if MPAM is shown in ID_AA64PFR0_EL1 register. In such firmware, whether MPAM is disabled can only be known by the MPAM ACPI table. Since some MPAM registers are accessed before parsing the MPAM table in current implementation, when the kernel is booted from a firmware disabling MPAM, boot failure will occur. Fix it by removing all MPAM registers accessing before the MPAM table is parsed. Signed-off-by: Shawn Wang Reviewed-by: Xin Hao Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/780 Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- arch/arm64/include/asm/cpu.h | 1 - arch/arm64/include/asm/cpufeature.h | 6 --- arch/arm64/kernel/cpufeature.c | 62 +--------------------------- arch/arm64/kernel/cpuinfo.c | 4 -- arch/arm64/kernel/mpam.c | 10 ----- drivers/platform/mpam/mpam_devices.c | 40 +++++++++++++----- 6 files changed, 31 insertions(+), 92 deletions(-) diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h index 1cb5bafd9238..e749838b9c5d 100644 --- a/arch/arm64/include/asm/cpu.h +++ b/arch/arm64/include/asm/cpu.h @@ -47,7 +47,6 @@ struct cpuinfo_arm64 { u64 reg_revidr; u64 reg_gmid; u64 reg_smidr; - u64 reg_mpamidr; u64 reg_id_aa64dfr0; u64 reg_id_aa64dfr1; diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index e873848ad9d9..24c2564268e5 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -838,12 +838,6 @@ static inline bool system_supports_tlb_range(void) cpus_have_const_cap(ARM64_HAS_TLB_RANGE); } -static inline bool cpus_support_mpam(void) -{ - return IS_ENABLED(CONFIG_ARM64_MPAM) && - cpus_have_final_cap(ARM64_MPAM); -} - int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt); bool try_emulate_mrs(struct pt_regs *regs, u32 isn); diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 8ec1c9dd3644..148986926ed9 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -84,7 +84,6 @@ #include #include #include -#include #include #include #include @@ -624,18 +623,6 @@ static const struct arm64_ftr_bits ftr_smcr[] = { ARM64_FTR_END, }; -static const struct arm64_ftr_bits ftr_mpamidr[] = { - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, - MPAMIDR_PMG_MAX_SHIFT, MPAMIDR_PMG_MAX_LEN, 0), /* PMG_MAX */ - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, - MPAMIDR_VPMR_MAX_SHIFT, MPAMIDR_VPMR_MAX_LEN, 0), /* VPMR_MAX */ - ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, - MPAMIDR_HAS_HCR_SHIFT, 1, 0), /* HAS_HCR */ - ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, - MPAMIDR_PARTID_MAX_SHIFT, MPAMIDR_PARTID_MAX_LEN, 0), /* PARTID_MAX */ - ARM64_FTR_END, -}; - /* * Common ftr bits for a 32bit register with all hidden, strict * attributes, with 4bit feature fields and a default safe value of @@ -752,9 +739,6 @@ static const struct __ftr_reg_entry { ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr), ARM64_FTR_REG(SYS_SMCR_EL1, ftr_smcr), - /* Op1 = 0, CRn = 10, CRm = 4 */ - ARM64_FTR_REG(SYS_MPAMIDR_EL1, ftr_mpamidr), - /* Op1 = 1, CRn = 0, CRm = 0 */ ARM64_FTR_REG(SYS_GMID_EL1, ftr_gmid), @@ -1074,9 +1058,6 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info) vec_init_vq_map(ARM64_VEC_SME); } - if (id_aa64pfr0_mpam(info->reg_id_aa64pfr0)) - init_cpu_ftr_reg(SYS_MPAMIDR_EL1, info->reg_mpamidr); - if (id_aa64pfr1_mte(info->reg_id_aa64pfr1)) init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid); @@ -1336,11 +1317,6 @@ void update_cpu_features(int cpu, vec_update_vq_map(ARM64_VEC_SME); } - if (id_aa64pfr0_mpam(info->reg_id_aa64pfr0)) { - taint |= check_update_ftr_reg(SYS_MPAMIDR_EL1, cpu, - info->reg_mpamidr, boot->reg_mpamidr); - } - /* * The kernel uses the LDGM/STGM instructions and the number of tags * they read/write depends on the GMID_EL1.BS field. Check that the @@ -2274,39 +2250,6 @@ cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap) return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT); } -static bool __maybe_unused -test_has_mpam(const struct arm64_cpu_capabilities *entry, int scope) -{ - if (!has_cpuid_feature(entry, scope)) - return false; - - /* Check firmware actually enabled MPAM on this cpu. */ - return (read_sysreg_s(SYS_MPAM1_EL1) & MPAM_SYSREG_EN); -} - -static void __maybe_unused -cpu_enable_mpam(const struct arm64_cpu_capabilities *entry) -{ - /* - * Access by the kernel (at EL1) should use the reserved PARTID - * which is configured unrestricted. This avoids priority-inversion - * where latency sensitive tasks have to wait for a task that has - * been throttled to release the lock. - */ - write_sysreg_s(0, SYS_MPAM1_EL1); -} - -static void mpam_extra_caps(void) -{ - u64 idr = read_sanitised_ftr_reg(SYS_MPAMIDR_EL1); - - if (!IS_ENABLED(CONFIG_ARM64_MPAM)) - return; - - if (idr & MPAMIDR_HAS_HCR) - __enable_mpam_hcr(); -} - static const struct arm64_cpu_capabilities arm64_features[] = { { .capability = ARM64_ALWAYS_BOOT, @@ -2792,8 +2735,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .desc = "Memory Partitioning And Monitoring", .type = ARM64_CPUCAP_SYSTEM_FEATURE, .capability = ARM64_MPAM, - .matches = test_has_mpam, - .cpu_enable = cpu_enable_mpam, + .matches = has_cpuid_feature, ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, MPAM, 1) }, #endif @@ -3445,8 +3387,6 @@ void __init setup_cpu_features(void) if (!cwg) pr_warn("No Cache Writeback Granule information, assuming %d\n", ARCH_DMA_MINALIGN); - - mpam_extra_caps(); } static int enable_mismatched_32bit_el0(unsigned int cpu) diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 1b1fe0f58a86..98fda8500535 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -460,10 +460,6 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) __cpuinfo_store_cpu_32bit(&info->aarch32); - if (IS_ENABLED(CONFIG_ARM64_MPAM) && - id_aa64pfr0_mpam(info->reg_id_aa64pfr0)) - info->reg_mpamidr = read_cpuid(MPAMIDR_EL1); - cpuinfo_detect_icache_policy(info); } diff --git a/arch/arm64/kernel/mpam.c b/arch/arm64/kernel/mpam.c index 02f43334f078..134b44118553 100644 --- a/arch/arm64/kernel/mpam.c +++ b/arch/arm64/kernel/mpam.c @@ -11,13 +11,3 @@ DEFINE_STATIC_KEY_FALSE(arm64_mpam_has_hcr); DEFINE_STATIC_KEY_FALSE(mpam_enabled); DEFINE_PER_CPU(u64, arm64_mpam_default); DEFINE_PER_CPU(u64, arm64_mpam_current); - -static int __init arm64_mpam_register_cpus(void) -{ - u64 mpamidr = read_sanitised_ftr_reg(SYS_MPAMIDR_EL1); - u16 partid_max = FIELD_GET(MPAMIDR_PARTID_MAX, mpamidr); - u8 pmg_max = FIELD_GET(MPAMIDR_PMG_MAX, mpamidr); - - return mpam_register_requestor(partid_max, pmg_max); -} -arch_initcall(arm64_mpam_register_cpus) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index e1a1dee8578c..906f8a6b6940 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -2451,9 +2451,19 @@ static void mpam_dt_create_foundling_msc(void) } } +static int __init arm64_mpam_register_cpus(void) +{ + u64 mpamidr = read_sysreg_s(SYS_MPAMIDR_EL1); + u16 partid_max = FIELD_GET(MPAMIDR_PARTID_MAX, mpamidr); + u8 pmg_max = FIELD_GET(MPAMIDR_PMG_MAX, mpamidr); + + return mpam_register_requestor(partid_max, pmg_max); +} + static int __init mpam_msc_driver_init(void) { bool mpam_not_available = false; + int err; if (!mpam_cpus_have_feature()) return -EOPNOTSUPP; @@ -2465,6 +2475,26 @@ static int __init mpam_msc_driver_init(void) else mpam_current_machine = mpam_dt_get_machine_type(); + if (!acpi_disabled) + fw_num_msc = acpi_mpam_count_msc(); + else + fw_num_msc = mpam_dt_count_msc(); + + if (fw_num_msc <= 0) { + pr_err("No MSC devices found in firmware\n"); + return -EINVAL; + } + + /* + * Access MPAM system registers after MPAM ACPI table is parsed, since + * some BIOSs disable MPAM system registers accessing but export MPAM in + * ID_AA64PFR0_EL1. So we can only rely on the MPAM ACPI table to + * determine whether MPAM feature is enabled. + */ + err = arm64_mpam_register_cpus(); + if (err) + return err; + /* * If the MPAM CPU interface is not implemented, or reserved by * firmware, there is no point touching the rest of the hardware. @@ -2477,16 +2507,6 @@ static int __init mpam_msc_driver_init(void) if (mpam_not_available) return 0; - if (!acpi_disabled) - fw_num_msc = acpi_mpam_count_msc(); - else - fw_num_msc = mpam_dt_count_msc(); - - if (fw_num_msc <= 0) { - pr_err("No MSC devices found in firmware\n"); - return -EINVAL; - } - if (acpi_disabled) mpam_dt_create_foundling_msc(); -- Gitee From 3a5df0b78e5ec53dfe9721694de6d5e9519bb69c Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Wed, 15 Nov 2023 15:39:44 +0800 Subject: [PATCH 0690/2138] anolis: KVM: arm64: Only access MPAM registers when MPAM is enabled ANBZ: #8686 The commmit dda408b60b40 ("KVM: arm64: Trap guest accesses to the MPAM registers") will access MPAM registers like MPAM2_EL2 when mpam_cpus_have_feature() returns true. However, mpam_cpus_have_feature() only check the MPAM bits in ID_AA64PFR0_EL1. But in some BIOS, it is also necessary to add a check for the existence of the MPAM ACPI table. Otherwise, access to these registers will cause the system to crash. To fix this error, add a condition on mpam_enabled. mpam_enabled is only true if the MPAM ACPI table exists and the host kernel enables MPAM. Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2437 Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- arch/arm64/kernel/image-vars.h | 1 + arch/arm64/kvm/hyp/include/hyp/switch.h | 4 ++-- arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h | 6 ++++-- arch/arm64/kvm/hyp/nvhe/switch.c | 3 ++- 4 files changed, 9 insertions(+), 5 deletions(-) diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index d10d3fed31d9..6999668e9ecf 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -67,6 +67,7 @@ KVM_NVHE_ALIAS(__hyp_stub_vectors); /* Additional static keys for cpufeatures */ #ifdef CONFIG_ARM64_MPAM KVM_NVHE_ALIAS(arm64_mpam_has_hcr); +KVM_NVHE_ALIAS(mpam_enabled); #endif /* Static keys which are set if a vGIC trap should be handled in hyp. */ diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 657320f453e6..da30acce6308 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -177,7 +177,7 @@ static inline void __activate_traps_mpam(struct kvm_vcpu *vcpu) { u64 r = MPAM_SYSREG_TRAP_MPAM0_EL1 | MPAM_SYSREG_TRAP_MPAM1_EL1; - if (!mpam_cpus_have_feature()) + if (!mpam_cpus_have_feature() || !static_branch_likely(&mpam_enabled)) return; /* trap guest access to MPAMIDR_EL1 */ @@ -193,7 +193,7 @@ static inline void __activate_traps_mpam(struct kvm_vcpu *vcpu) static inline void __deactivate_traps_mpam(void) { - if (!mpam_cpus_have_feature()) + if (!mpam_cpus_have_feature() || !static_branch_likely(&mpam_enabled)) return; write_sysreg_s(0, SYS_MPAM2_EL2); diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h index c8767abd693e..8e99f66b377b 100644 --- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h +++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h @@ -250,7 +250,8 @@ static inline void __sysreg32_restore_state(struct kvm_vcpu *vcpu) */ static inline void __mpam_guest_load(void) { - if (IS_ENABLED(CONFIG_ARM64_MPAM) && mpam_cpus_have_feature()) + if (IS_ENABLED(CONFIG_ARM64_MPAM) && mpam_cpus_have_feature() && + static_branch_likely(&mpam_enabled)) write_sysreg_el1(read_sysreg_s(SYS_MPAM0_EL1), SYS_MPAM1); } @@ -264,7 +265,8 @@ static inline void __mpam_guest_put(void) u64 val, mask = MPAM_SYSREG_PMG_D | MPAM_SYSREG_PMG_I | MPAM_SYSREG_PARTID_D | MPAM_SYSREG_PARTID_I; - if (IS_ENABLED(CONFIG_ARM64_MPAM) && mpam_cpus_have_feature()) { + if (IS_ENABLED(CONFIG_ARM64_MPAM) && mpam_cpus_have_feature() && + static_branch_likely(&mpam_enabled)) { val = FIELD_GET(mask, read_sysreg_s(SYS_MPAM2_EL2)); write_sysreg_el1(val, SYS_MPAM1); } diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index c2118f658e22..04b7f83c2ae3 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -245,7 +245,8 @@ static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code) /* Use the host thread's partid and pmg for world switch */ static void __mpam_copy_el1_to_el2(void) { - if (IS_ENABLED(CONFIG_ARM64_MPAM) && mpam_cpus_have_feature()) + if (IS_ENABLED(CONFIG_ARM64_MPAM) && mpam_cpus_have_feature() && + static_branch_likely(&mpam_enabled)) write_sysreg_s(read_sysreg_s(SYS_MPAM1_EL1), SYS_MPAM2_EL2); } -- Gitee From fb204e16978d0cf4c2c152ae9ae36cba3f58fee4 Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Sun, 7 Apr 2024 14:25:42 +0800 Subject: [PATCH 0691/2138] anolis: configs: arm64: Enable ARM64_MPAM ANBZ: #8686 Enable ARM64_MPAM to use it for cache and memory bandwidth resource partitioning and monitoring. Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3012 --- arch/arm64/configs/anolis-debug_defconfig | 1 + arch/arm64/configs/anolis_defconfig | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index 97230e6e79b5..1c039289682b 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -500,6 +500,7 @@ CONFIG_AS_HAS_LDAPR=y CONFIG_ARM64_AMU_EXTN=y CONFIG_AS_HAS_ARMV8_4=y CONFIG_ARM64_TLB_RANGE=y +CONFIG_ARM64_MPAM=y # end of ARMv8.4 architectural features # diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index 6c0af4f2c954..4b995ee48cd6 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -498,6 +498,7 @@ CONFIG_AS_HAS_LDAPR=y CONFIG_ARM64_AMU_EXTN=y CONFIG_AS_HAS_ARMV8_4=y CONFIG_ARM64_TLB_RANGE=y +CONFIG_ARM64_MPAM=y # end of ARMv8.4 architectural features # -- Gitee From c74539a335ab8eca26e10b1438fe3714711e19b1 Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Fri, 7 Jul 2023 10:47:37 +0800 Subject: [PATCH 0692/2138] anolis: sched/isolation: fix a memory leak in procfs ANBZ: #8684 kmemleak reported: unreferenced object 0xffff002aadb62380 (size 128): comm "stress-ng-procf", pid 20578, jiffies 4296227961 (age 75018.420s) hex dump (first 32 bytes): 70 51 95 10 00 a0 ff ff 30 52 95 10 00 a0 ff ff pQ......0R...... d0 51 95 10 00 a0 ff ff a0 df 22 10 00 a0 ff ff .Q........"..... backtrace: [<00000000197d68ab>] kmem_cache_alloc_trace+0x1e0/0x460 [<0000000030298c46>] single_open+0x58/0x1ac [<00000000bde8ff2a>] dyn_isolcpus_open+0x24/0x2c [<00000000b2d1b210>] proc_reg_open+0x2a8/0x4b0 [<0000000070f0cdd4>] do_dentry_open+0x3bc/0xe54 [<000000004e13fb43>] vfs_open+0x94/0xd0 [<000000006631615d>] do_open+0x538/0x904 [<000000007cd55e85>] path_openat+0x1b4/0x3d4 [<00000000aee11823>] do_filp_open+0x140/0x310 [<00000000ddf652cc>] do_sys_openat2+0x124/0x330 [<00000000b88efbfc>] __arm64_sys_openat+0x13c/0x1c4 [<000000008f6ca77d>] el0_svc_common+0x154/0x520 [<0000000055a96fd0>] do_el0_svc+0xac/0xd4 [<00000000ea0094cf>] el0_svc+0x1c/0x30 [<0000000033b4e46c>] el0_sync_handler+0xa8/0xac [<00000000f389ba95>] el0_sync+0x168/0x180 unreferenced object 0xffff0041fd058db8 (size 232): comm "stress-ng-procf", pid 20578, jiffies 4296227961 (age 75018.420s) hex dump (first 32 bytes): 00 10 3b 9e 40 00 ff ff 00 10 00 00 00 00 00 00 ..;.@........... 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ backtrace: [<00000000c9b5f866>] kmem_cache_alloc+0x1d4/0x43c [<0000000057ea497b>] seq_open+0x54/0x150 [<00000000c1a5ea00>] single_open+0xf0/0x1ac [<00000000bde8ff2a>] dyn_isolcpus_open+0x24/0x2c [<00000000b2d1b210>] proc_reg_open+0x2a8/0x4b0 [<0000000070f0cdd4>] do_dentry_open+0x3bc/0xe54 [<000000004e13fb43>] vfs_open+0x94/0xd0 [<000000006631615d>] do_open+0x538/0x904 [<000000007cd55e85>] path_openat+0x1b4/0x3d4 [<00000000aee11823>] do_filp_open+0x140/0x310 [<00000000ddf652cc>] do_sys_openat2+0x124/0x330 [<00000000b88efbfc>] __arm64_sys_openat+0x13c/0x1c4 [<000000008f6ca77d>] el0_svc_common+0x154/0x520 [<0000000055a96fd0>] do_el0_svc+0xac/0xd4 [<00000000ea0094cf>] el0_svc+0x1c/0x30 [<0000000033b4e46c>] el0_sync_handler+0xa8/0xac Because proc_dyn_isolcpus_operations is not set proc_release handler. Fix it. Fixes: 1d4479f87314 ("anolis: sched/isolation: dynamical CPU isolation support") Signed-off-by: Tianchen Ding Reviewed-by: Cruz Link: https://gitee.com/anolis/cloud-kernel/pulls/3017 --- kernel/sched/isolation.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c index 6fdb7ed32ebf..33dcd35605ab 100644 --- a/kernel/sched/isolation.c +++ b/kernel/sched/isolation.c @@ -413,6 +413,7 @@ static const struct proc_ops proc_dyn_isolcpus_operations = { .proc_read = seq_read, .proc_write = write_dyn_isolcpus, .proc_lseek = noop_llseek, + .proc_release = single_release, }; static int __init dyn_isolcpus_init(void) -- Gitee From 9b7631fbee9808a4f3336d2b54181c1821c8b128 Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Tue, 29 Aug 2023 11:13:57 +0100 Subject: [PATCH 0693/2138] Documentation: ABI: debugfs-driver-qat: fix fw_counters path ANBZ: #8589 commit 7ba98583448b7a0dbfa8121c7be642651e0abd61 upstream. Intel-SIG: commit 7ba98583448b Documentation: ABI: debugfs-driver-qat: fix fw_counters path Backport to support Intel QAT in-tree driver The debugfs description for fw_counters reports an incorrect path indicating a qat folder that does not exist. Fix it. Fixes: 865b50fe6ea8 ("crypto: qat - add fw_counters debugfs file") Signed-off-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- Documentation/ABI/testing/debugfs-driver-qat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/ABI/testing/debugfs-driver-qat b/Documentation/ABI/testing/debugfs-driver-qat index 6731ffacc5f0..3f9b4f708051 100644 --- a/Documentation/ABI/testing/debugfs-driver-qat +++ b/Documentation/ABI/testing/debugfs-driver-qat @@ -1,4 +1,4 @@ -What: /sys/kernel/debug/qat__/qat/fw_counters +What: /sys/kernel/debug/qat__/fw_counters Date: November 2023 KernelVersion: 6.6 Contact: qat-linux@intel.com -- Gitee From fb06449793bfaef3c126417088333c7d32d2c051 Mon Sep 17 00:00:00 2001 From: Jinjie Ruan Date: Wed, 30 Aug 2023 15:54:51 +0800 Subject: [PATCH 0694/2138] crypto: qat - Use list_for_each_entry() helper ANBZ: #8589 commit 65029eec5ceba7d847f27171cdddb046bdc3a069 upstream. Intel-SIG: commit 65029eec5ceb crypto: qat - Use list_for_each_entry() helper Backport to support Intel QAT in-tree driver Convert list_for_each() to list_for_each_entry() so that the list_itr list_head pointer and list_entry() call are no longer needed, which can reduce a few lines of code. No functional changed. Signed-off-by: Jinjie Ruan Reviewed-by: Andy Shevchenko Acked-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../crypto/intel/qat/qat_common/adf_init.c | 24 +++++-------------- 1 file changed, 6 insertions(+), 18 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c index 0f9e2d59ce38..b4cf605ccf3e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_init.c +++ b/drivers/crypto/intel/qat/qat_common/adf_init.c @@ -61,7 +61,6 @@ int adf_service_unregister(struct service_hndl *service) static int adf_dev_init(struct adf_accel_dev *accel_dev) { struct service_hndl *service; - struct list_head *list_itr; struct adf_hw_device_data *hw_data = accel_dev->hw_device; int ret; @@ -140,8 +139,7 @@ static int adf_dev_init(struct adf_accel_dev *accel_dev) * This is to facilitate any ordering dependencies between services * prior to starting any of the accelerators. */ - list_for_each(list_itr, &service_table) { - service = list_entry(list_itr, struct service_hndl, list); + list_for_each_entry(service, &service_table, list) { if (service->event_hld(accel_dev, ADF_EVENT_INIT)) { dev_err(&GET_DEV(accel_dev), "Failed to initialise service %s\n", @@ -168,7 +166,6 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct service_hndl *service; - struct list_head *list_itr; int ret; set_bit(ADF_STATUS_STARTING, &accel_dev->status); @@ -212,8 +209,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev) adf_heartbeat_start(accel_dev); - list_for_each(list_itr, &service_table) { - service = list_entry(list_itr, struct service_hndl, list); + list_for_each_entry(service, &service_table, list) { if (service->event_hld(accel_dev, ADF_EVENT_START)) { dev_err(&GET_DEV(accel_dev), "Failed to start service %s\n", @@ -264,7 +260,6 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct service_hndl *service; - struct list_head *list_itr; bool wait = false; int ret; @@ -289,8 +284,7 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev) qat_comp_algs_unregister(); clear_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status); - list_for_each(list_itr, &service_table) { - service = list_entry(list_itr, struct service_hndl, list); + list_for_each_entry(service, &service_table, list) { if (!test_bit(accel_dev->accel_id, service->start_status)) continue; ret = service->event_hld(accel_dev, ADF_EVENT_STOP); @@ -327,7 +321,6 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct service_hndl *service; - struct list_head *list_itr; if (!hw_data) { dev_err(&GET_DEV(accel_dev), @@ -349,8 +342,7 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev) &accel_dev->status); } - list_for_each(list_itr, &service_table) { - service = list_entry(list_itr, struct service_hndl, list); + list_for_each_entry(service, &service_table, list) { if (!test_bit(accel_dev->accel_id, service->init_status)) continue; if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN)) @@ -387,10 +379,8 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev) int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev) { struct service_hndl *service; - struct list_head *list_itr; - list_for_each(list_itr, &service_table) { - service = list_entry(list_itr, struct service_hndl, list); + list_for_each_entry(service, &service_table, list) { if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING)) dev_err(&GET_DEV(accel_dev), "Failed to restart service %s.\n", @@ -402,10 +392,8 @@ int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev) int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev) { struct service_hndl *service; - struct list_head *list_itr; - list_for_each(list_itr, &service_table) { - service = list_entry(list_itr, struct service_hndl, list); + list_for_each_entry(service, &service_table, list) { if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED)) dev_err(&GET_DEV(accel_dev), "Failed to restart service %s.\n", -- Gitee From a8a9ef2a3cb154de1dc697987ee749de48280123 Mon Sep 17 00:00:00 2001 From: Justin Stitt Date: Wed, 13 Sep 2023 00:51:05 +0000 Subject: [PATCH 0695/2138] crypto: qat - refactor deprecated strncpy ANBZ: #8589 commit 3102bbcdcd3c945ef0bcea498d3a0c6384536d6c upstream. Intel-SIG: commit 3102bbcdcd3c crypto: qat - refactor deprecated strncpy Backport to support Intel QAT in-tree driver `strncpy` is deprecated for use on NUL-terminated destination strings [1]. We should prefer more robust and less ambiguous string interfaces. `buf` is expected to be NUL-terminated for its eventual use in `kstrtoul()` and NUL-padding is not required. Due to the above, a suitable replacement is `strscpy` [2] due to the fact that it guarantees NUL-termination on the destination buffer. Link: https://www.kernel.org/doc/html/latest/process/deprecated.html#strncpy-on-nul-terminated-strings [1] Link: https://manpages.debian.org/testing/linux-manual-4.8/strscpy.9.en.html [2] Link: https://github.com/KSPP/linux/issues/90 Cc: linux-hardening@vger.kernel.org Signed-off-by: Justin Stitt Acked-by: Giovanni Cabiddu Reviewed-by: Kees Cook Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/qat_uclo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c index 4bd150d1441a..e27ea7e28c51 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c @@ -200,7 +200,7 @@ static int qat_uclo_parse_num(char *str, unsigned int *num) unsigned long ae = 0; int i; - strncpy(buf, str, 15); + strscpy(buf, str, sizeof(buf)); for (i = 0; i < 16; i++) { if (!isdigit(buf[i])) { buf[i] = '\0'; -- Gitee From 4bdedc49744601ea527a584b628c34e68fa72805 Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Thu, 14 Sep 2023 10:55:46 +0100 Subject: [PATCH 0696/2138] crypto: qat - do not shadow error code ANBZ: #8589 commit c362a58e8da7828cf1501e1af9d43cd6c9641c5b upstream. Intel-SIG: commit c362a58e8da7 crypto: qat - do not shadow error code Backport to support Intel QAT in-tree driver Do not shadow the return code from adf_dev_down() in the error path of the DEV_DOWN command. Signed-off-by: Giovanni Cabiddu Reviewed-by: Adam Guerin Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_sysfs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c index 8f04b0d3c5ac..f4a89f7ed4e9 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c @@ -61,8 +61,8 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr, } ret = adf_dev_down(accel_dev, true); - if (ret < 0) - return -EINVAL; + if (ret) + return ret; break; case DEV_UP: -- Gitee From 1ace87bad90b7274921bd5ef16da206124d92512 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Fri, 22 Sep 2023 10:54:33 -0700 Subject: [PATCH 0697/2138] crypto: qat - Annotate struct adf_fw_counters with __counted_by ANBZ: #8589 commit 141f12be09ac693e2384a7999f6782c7750c30a5 upstream. Intel-SIG: commit 141f12be09ac crypto: qat - Annotate struct adf_fw_counters with __counted_by Backport to support Intel QAT in-tree driver Prepare for the coming implementation by GCC and Clang of the __counted_by attribute. Flexible array members annotated with __counted_by can have their accesses bounds-checked at run-time checking via CONFIG_UBSAN_BOUNDS (for array indexing) and CONFIG_FORTIFY_SOURCE (for strcpy/memcpy-family functions). As found with Coccinelle[1], add __counted_by for struct adf_fw_counters. [1] https://github.com/kees/kernel-tools/blob/trunk/coccinelle/examples/counted_by.cocci Cc: Giovanni Cabiddu Cc: Herbert Xu Cc: "David S. Miller" Cc: Nathan Chancellor Cc: Nick Desaulniers Cc: Tom Rix Cc: Adam Guerin Cc: Lucas Segarra Fernandez Cc: Andy Shevchenko Cc: qat-linux@intel.com Cc: linux-crypto@vger.kernel.org Cc: llvm@lists.linux.dev Signed-off-by: Kees Cook Reviewed-by: Gustavo A. R. Silva Acked-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_fw_counters.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c b/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c index cb6e09ef5c9f..6abe4736eab8 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c +++ b/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c @@ -34,7 +34,7 @@ struct adf_ae_counters { struct adf_fw_counters { u16 ae_count; - struct adf_ae_counters ae_counters[]; + struct adf_ae_counters ae_counters[] __counted_by(ae_count); }; static void adf_fw_counters_parse_ae_values(struct adf_ae_counters *ae_counters, u32 ae, -- Gitee From 9b88a3f00e5a49e264336fe1c9613d86799c5b89 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 30 Aug 2023 17:55:02 +0800 Subject: [PATCH 0698/2138] crypto: qat - Remove zlib-deflate ANBZ: #8589 commit e9dd20e0e5f62d01d9404db2cf9824d1faebcf71 upstream. Intel-SIG: commit e9dd20e0e5f6 crypto: qat - Remove zlib-deflate Backport to support Intel QAT in-tree driver Remove the implementation of zlib-deflate because it is completely unused in the kernel. Signed-off-by: Herbert Xu Reviewed-by: Ard Biesheuvel [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../intel/qat/qat_common/qat_comp_algs.c | 129 +----------------- 1 file changed, 1 insertion(+), 128 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c index b533984906ec..bf8c0ee62917 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c +++ b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c @@ -109,69 +109,6 @@ static void qat_comp_resubmit(struct work_struct *work) acomp_request_complete(areq, ret); } -static int parse_zlib_header(u16 zlib_h) -{ - int ret = -EINVAL; - __be16 header; - u8 *header_p; - u8 cmf, flg; - - header = cpu_to_be16(zlib_h); - header_p = (u8 *)&header; - - flg = header_p[0]; - cmf = header_p[1]; - - if (cmf >> QAT_RFC_1950_CM_OFFSET > QAT_RFC_1950_CM_DEFLATE_CINFO_32K) - return ret; - - if ((cmf & QAT_RFC_1950_CM_MASK) != QAT_RFC_1950_CM_DEFLATE) - return ret; - - if (flg & QAT_RFC_1950_DICT_MASK) - return ret; - - return 0; -} - -static int qat_comp_rfc1950_callback(struct qat_compression_req *qat_req, - void *resp) -{ - struct acomp_req *areq = qat_req->acompress_req; - enum direction dir = qat_req->dir; - __be32 qat_produced_adler; - - qat_produced_adler = cpu_to_be32(qat_comp_get_produced_adler32(resp)); - - if (dir == COMPRESSION) { - __be16 zlib_header; - - zlib_header = cpu_to_be16(QAT_RFC_1950_COMP_HDR); - scatterwalk_map_and_copy(&zlib_header, areq->dst, 0, QAT_RFC_1950_HDR_SIZE, 1); - areq->dlen += QAT_RFC_1950_HDR_SIZE; - - scatterwalk_map_and_copy(&qat_produced_adler, areq->dst, areq->dlen, - QAT_RFC_1950_FOOTER_SIZE, 1); - areq->dlen += QAT_RFC_1950_FOOTER_SIZE; - } else { - __be32 decomp_adler; - int footer_offset; - int consumed; - - consumed = qat_comp_get_consumed_ctr(resp); - footer_offset = consumed + QAT_RFC_1950_HDR_SIZE; - if (footer_offset + QAT_RFC_1950_FOOTER_SIZE > areq->slen) - return -EBADMSG; - - scatterwalk_map_and_copy(&decomp_adler, areq->src, footer_offset, - QAT_RFC_1950_FOOTER_SIZE, 0); - - if (qat_produced_adler != decomp_adler) - return -EBADMSG; - } - return 0; -} - static void qat_comp_generic_callback(struct qat_compression_req *qat_req, void *resp) { @@ -293,18 +230,6 @@ static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm) memset(ctx, 0, sizeof(*ctx)); } -static int qat_comp_alg_rfc1950_init_tfm(struct crypto_acomp *acomp_tfm) -{ - struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm); - struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm); - int ret; - - ret = qat_comp_alg_init_tfm(acomp_tfm); - ctx->qat_comp_callback = &qat_comp_rfc1950_callback; - - return ret; -} - static int qat_comp_alg_compress_decompress(struct acomp_req *areq, enum direction dir, unsigned int shdr, unsigned int sftr, unsigned int dhdr, unsigned int dftr) @@ -400,43 +325,6 @@ static int qat_comp_alg_decompress(struct acomp_req *req) return qat_comp_alg_compress_decompress(req, DECOMPRESSION, 0, 0, 0, 0); } -static int qat_comp_alg_rfc1950_compress(struct acomp_req *req) -{ - if (!req->dst && req->dlen != 0) - return -EINVAL; - - if (req->dst && req->dlen <= QAT_RFC_1950_HDR_SIZE + QAT_RFC_1950_FOOTER_SIZE) - return -EINVAL; - - return qat_comp_alg_compress_decompress(req, COMPRESSION, 0, 0, - QAT_RFC_1950_HDR_SIZE, - QAT_RFC_1950_FOOTER_SIZE); -} - -static int qat_comp_alg_rfc1950_decompress(struct acomp_req *req) -{ - struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(req); - struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm); - struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm); - struct adf_accel_dev *accel_dev = ctx->inst->accel_dev; - u16 zlib_header; - int ret; - - if (req->slen <= QAT_RFC_1950_HDR_SIZE + QAT_RFC_1950_FOOTER_SIZE) - return -EBADMSG; - - scatterwalk_map_and_copy(&zlib_header, req->src, 0, QAT_RFC_1950_HDR_SIZE, 0); - - ret = parse_zlib_header(zlib_header); - if (ret) { - dev_dbg(&GET_DEV(accel_dev), "Error parsing zlib header\n"); - return ret; - } - - return qat_comp_alg_compress_decompress(req, DECOMPRESSION, QAT_RFC_1950_HDR_SIZE, - QAT_RFC_1950_FOOTER_SIZE, 0, 0); -} - static struct acomp_alg qat_acomp[] = { { .base = { .cra_name = "deflate", @@ -452,22 +340,7 @@ static struct acomp_alg qat_acomp[] = { { .decompress = qat_comp_alg_decompress, .dst_free = sgl_free, .reqsize = sizeof(struct qat_compression_req), -}, { - .base = { - .cra_name = "zlib-deflate", - .cra_driver_name = "qat_zlib_deflate", - .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_ctxsize = sizeof(struct qat_compression_ctx), - .cra_module = THIS_MODULE, - }, - .init = qat_comp_alg_rfc1950_init_tfm, - .exit = qat_comp_alg_exit_tfm, - .compress = qat_comp_alg_rfc1950_compress, - .decompress = qat_comp_alg_rfc1950_decompress, - .dst_free = sgl_free, - .reqsize = sizeof(struct qat_compression_req), -} }; +}}; int qat_comp_algs_register(void) { -- Gitee From 2efb846d45421ba006c106a7a71bc30dce39631a Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Mon, 2 Oct 2023 09:51:09 +0100 Subject: [PATCH 0699/2138] crypto: qat - add namespace to driver ANBZ: #8589 commit 4999999ed7e099fcc2476c8b3a245c4c2c9026c0 upstream. Intel-SIG: commit 4999999ed7e0 crypto: qat - add namespace to driver Backport to support Intel QAT in-tree driver Create CRYPTO_QAT namespace for symbols exported by the qat_common module and import those in the QAT drivers. It will reduce the global namespace crowdedness and potential misuse or the API. This does not introduce any functional change. Suggested-by: Andy Shevchenko Signed-off-by: Giovanni Cabiddu Reviewed-by: Lucas Segarra Fernandez Reviewed-by: Andy Shevchenko Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_4xxx/adf_drv.c | 1 + drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c | 1 + drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c | 1 + drivers/crypto/intel/qat/qat_c62x/adf_drv.c | 1 + drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c | 1 + drivers/crypto/intel/qat/qat_common/Makefile | 1 + drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c | 1 + drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c | 1 + 8 files changed, 8 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c index f6f9e20f74b5..b49a7960bc91 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c @@ -469,3 +469,4 @@ MODULE_FIRMWARE(ADF_402XX_MMP); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); MODULE_SOFTDEP("pre: crypto-intel_qat"); +MODULE_IMPORT_NS(CRYPTO_QAT); diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c index 468c9102093f..956a4c85609a 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c @@ -252,3 +252,4 @@ MODULE_FIRMWARE(ADF_C3XXX_FW); MODULE_FIRMWARE(ADF_C3XXX_MMP); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); +MODULE_IMPORT_NS(CRYPTO_QAT); diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c index d5a0ecca9d0b..a8de9cd09c05 100644 --- a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c @@ -226,3 +226,4 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel"); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); +MODULE_IMPORT_NS(CRYPTO_QAT); diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c index 0186921be936..ad0ca4384998 100644 --- a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c @@ -252,3 +252,4 @@ MODULE_FIRMWARE(ADF_C62X_FW); MODULE_FIRMWARE(ADF_C62X_MMP); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); +MODULE_IMPORT_NS(CRYPTO_QAT); diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c index c9ae6c0d0dca..53b8ddb63364 100644 --- a/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c @@ -226,3 +226,4 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel"); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); +MODULE_IMPORT_NS(CRYPTO_QAT); diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index 8dbf146de3fa..a3c611264caf 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o +ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CRYPTO_QAT intel_qat-objs := adf_cfg.o \ adf_isr.o \ adf_ctl_drv.o \ diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c index 1e748e8ce12d..40b456b8035b 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c @@ -252,3 +252,4 @@ MODULE_FIRMWARE(ADF_DH895XCC_FW); MODULE_FIRMWARE(ADF_DH895XCC_MMP); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); +MODULE_IMPORT_NS(CRYPTO_QAT); diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c index fefb85ceaeb9..d59cb1ba2ad5 100644 --- a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c @@ -226,3 +226,4 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel"); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); +MODULE_IMPORT_NS(CRYPTO_QAT); -- Gitee From 0664ddd26ee3882255f50f0c89ed06cec303f878 Mon Sep 17 00:00:00 2001 From: Lucas Segarra Fernandez Date: Wed, 4 Oct 2023 12:09:19 +0200 Subject: [PATCH 0700/2138] crypto: qat - refactor included headers ANBZ: #8589 commit 756762decc604a5ac5c041f23dd447c5e691f459 upstream. Intel-SIG: commit 756762decc60 crypto: qat - refactor included headers Backport to support Intel QAT in-tree driver Include kernel.h for GENMASK(), kstrtobool() and types. Add forward declaration for struct adf_accel_dev. Remove unneeded include. This change doesn't introduce any function change. Signed-off-by: Lucas Segarra Fernandez Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c | 2 ++ drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c index 34c6cd8e27c0..b0e60471163c 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c @@ -2,6 +2,8 @@ /* Copyright(c) 2022 Intel Corporation */ #include #include +#include + #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_gen4_pm.h" diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h index c2768762cca3..39d37b352b45 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h @@ -3,7 +3,9 @@ #ifndef ADF_GEN4_PM_H #define ADF_GEN4_PM_H -#include "adf_accel_devices.h" +#include + +struct adf_accel_dev; /* Power management registers */ #define ADF_GEN4_PM_HOST_MSG (0x50A01C) -- Gitee From 11c808c7cad89435379971ad6e67b0a79940b6ca Mon Sep 17 00:00:00 2001 From: Lucas Segarra Fernandez Date: Wed, 4 Oct 2023 12:09:20 +0200 Subject: [PATCH 0701/2138] crypto: qat - add pm_status debugfs file ANBZ: #8589 commit e079231676e05d6c88fba4585db1ac399a790b63 upstream. Intel-SIG: commit e079231676e0 crypto: qat - add pm_status debugfs file Backport to support Intel QAT in-tree driver QAT devices implement a mechanism that allows them to go autonomously to a low power state depending on the load. Expose power management info by providing the "pm_status" file under debugfs. This includes PM state, PM event log, PM event counters, PM HW CSRs, per-resource type constrain counters and per-domain power gating status specific to the QAT device. This information is retrieved from (1) the FW by means of ICP_QAT_FW_PM_INFO command, (2) CSRs and (3) counters collected by the device driver. In addition, add logic to keep track and report power management event interrupts and acks/nacks sent to FW to allow/prevent state transitions. Signed-off-by: Lucas Segarra Fernandez Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- Documentation/ABI/testing/debugfs-driver-qat | 9 + drivers/crypto/intel/qat/qat_common/Makefile | 2 + .../intel/qat/qat_common/adf_accel_devices.h | 13 + .../crypto/intel/qat/qat_common/adf_admin.c | 27 ++ .../intel/qat/qat_common/adf_common_drv.h | 1 + .../crypto/intel/qat/qat_common/adf_dbgfs.c | 3 + .../crypto/intel/qat/qat_common/adf_gen4_pm.c | 24 +- .../crypto/intel/qat/qat_common/adf_gen4_pm.h | 46 +++ .../qat/qat_common/adf_gen4_pm_debugfs.c | 265 ++++++++++++++++++ .../intel/qat/qat_common/adf_pm_dbgfs.c | 48 ++++ .../intel/qat/qat_common/adf_pm_dbgfs.h | 12 + .../qat/qat_common/icp_qat_fw_init_admin.h | 35 +++ 12 files changed, 480 insertions(+), 5 deletions(-) create mode 100644 drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.h diff --git a/Documentation/ABI/testing/debugfs-driver-qat b/Documentation/ABI/testing/debugfs-driver-qat index 3f9b4f708051..0656f27d1042 100644 --- a/Documentation/ABI/testing/debugfs-driver-qat +++ b/Documentation/ABI/testing/debugfs-driver-qat @@ -59,3 +59,12 @@ Description: (RO) Read returns the device health status. The driver does not monitor for Heartbeat. It is left for a user to poll the status periodically. + +What: /sys/kernel/debug/qat__/pm_status +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: (RO) Read returns power management information specific to the + QAT device. + + This attribute is only available for qat_4xxx devices. diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index a3c611264caf..2f0330651622 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -33,8 +33,10 @@ intel_qat-objs := adf_cfg.o \ intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \ adf_fw_counters.o \ + adf_gen4_pm_debugfs.o \ adf_heartbeat.o \ adf_heartbeat_dbgfs.o \ + adf_pm_dbgfs.o \ adf_dbgfs.o intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \ diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 79d5a1535eda..36c6a6bf4a66 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -292,6 +292,18 @@ struct adf_dc_data { dma_addr_t ovf_buff_p; }; +struct adf_pm { + struct dentry *debugfs_pm_status; + bool present; + int idle_irq_counters; + int throttle_irq_counters; + int fw_irq_counters; + int host_ack_counter; + int host_nack_counter; + ssize_t (*print_pm_status)(struct adf_accel_dev *accel_dev, + char __user *buf, size_t count, loff_t *pos); +}; + struct adf_accel_dev { struct adf_etr_data *transport; struct adf_hw_device_data *hw_device; @@ -299,6 +311,7 @@ struct adf_accel_dev { struct adf_fw_loader_data *fw_loader; struct adf_admin_comms *admin; struct adf_dc_data *dc_data; + struct adf_pm power_management; struct list_head crypto_list; struct list_head compression_list; unsigned long status; diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c index 194d64d4b99a..2d45167b48a0 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_admin.c +++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c @@ -379,6 +379,33 @@ int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay) return adf_send_admin(accel_dev, &req, &resp, ae_mask); } +int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr, + size_t buff_size) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct icp_qat_fw_init_admin_req req = { }; + struct icp_qat_fw_init_admin_resp resp; + u32 ae_mask = hw_data->admin_ae_mask; + int ret; + + /* Query pm info via init/admin cmd */ + if (!accel_dev->admin) { + dev_err(&GET_DEV(accel_dev), "adf_admin is not available\n"); + return -EFAULT; + } + + req.cmd_id = ICP_QAT_FW_PM_INFO; + req.init_cfg_sz = buff_size; + req.init_cfg_ptr = p_state_addr; + + ret = adf_send_admin(accel_dev, &req, &resp, ae_mask); + if (ret) + dev_err(&GET_DEV(accel_dev), + "Failed to query power-management info\n"); + + return ret; +} + int adf_init_admin_comms(struct adf_accel_dev *accel_dev) { struct adf_admin_comms *admin; diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index 79ff7982378d..46dd81074166 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -95,6 +95,7 @@ int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay); int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt); int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks); int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp); +int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr, size_t buff_size); int adf_init_arb(struct adf_accel_dev *accel_dev); void adf_exit_arb(struct adf_accel_dev *accel_dev); void adf_update_ring_arb(struct adf_etr_ring_data *ring); diff --git a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c index 056fc59b5ae6..9da1424ac44d 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c @@ -8,6 +8,7 @@ #include "adf_dbgfs.h" #include "adf_fw_counters.h" #include "adf_heartbeat_dbgfs.h" +#include "adf_pm_dbgfs.h" /** * adf_dbgfs_init() - add persistent debugfs entries @@ -54,6 +55,7 @@ void adf_dbgfs_add(struct adf_accel_dev *accel_dev) if (!accel_dev->is_vf) { adf_fw_counters_dbgfs_add(accel_dev); adf_heartbeat_dbgfs_add(accel_dev); + adf_pm_dbgfs_add(accel_dev); } } @@ -64,6 +66,7 @@ void adf_dbgfs_add(struct adf_accel_dev *accel_dev) void adf_dbgfs_rm(struct adf_accel_dev *accel_dev) { if (!accel_dev->is_vf) { + adf_pm_dbgfs_rm(accel_dev); adf_heartbeat_dbgfs_rm(accel_dev); adf_fw_counters_dbgfs_rm(accel_dev); } diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c index b0e60471163c..c663d3a20c5b 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c @@ -12,11 +12,6 @@ #include "adf_gen4_hw_data.h" #include "adf_cfg.h" -enum qat_pm_host_msg { - PM_NO_CHANGE = 0, - PM_SET_MIN, -}; - struct adf_gen4_pm_data { struct work_struct pm_irq_work; struct adf_accel_dev *accel_dev; @@ -27,6 +22,7 @@ static int send_host_msg(struct adf_accel_dev *accel_dev) { char pm_idle_support_cfg[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {}; void __iomem *pmisc = adf_get_pmisc_base(accel_dev); + struct adf_pm *pm = &accel_dev->power_management; bool pm_idle_support; u32 msg; int ret; @@ -41,6 +37,11 @@ static int send_host_msg(struct adf_accel_dev *accel_dev) if (ret) pm_idle_support = true; + if (pm_idle_support) + pm->host_ack_counter++; + else + pm->host_nack_counter++; + /* Send HOST_MSG */ msg = FIELD_PREP(ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK, pm_idle_support ? PM_SET_MIN : PM_NO_CHANGE); @@ -61,17 +62,27 @@ static void pm_bh_handler(struct work_struct *work) container_of(work, struct adf_gen4_pm_data, pm_irq_work); struct adf_accel_dev *accel_dev = pm_data->accel_dev; void __iomem *pmisc = adf_get_pmisc_base(accel_dev); + struct adf_pm *pm = &accel_dev->power_management; u32 pm_int_sts = pm_data->pm_int_sts; u32 val; /* PM Idle interrupt */ if (pm_int_sts & ADF_GEN4_PM_IDLE_STS) { + pm->idle_irq_counters++; /* Issue host message to FW */ if (send_host_msg(accel_dev)) dev_warn_ratelimited(&GET_DEV(accel_dev), "Failed to send host msg to FW\n"); } + /* PM throttle interrupt */ + if (pm_int_sts & ADF_GEN4_PM_THR_STS) + pm->throttle_irq_counters++; + + /* PM fw interrupt */ + if (pm_int_sts & ADF_GEN4_PM_FW_INT_STS) + pm->fw_irq_counters++; + /* Clear interrupt status */ ADF_CSR_WR(pmisc, ADF_GEN4_PM_INTERRUPT, pm_int_sts); @@ -131,6 +142,9 @@ int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev) if (ret) return ret; + /* Initialize PM internal data */ + adf_gen4_init_dev_pm_data(accel_dev); + /* Enable default PM interrupts: IDLE, THROTTLE */ val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT); val |= ADF_GEN4_PM_INT_EN_DEFAULT; diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h index 39d37b352b45..a49352b79a7a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h @@ -7,6 +7,11 @@ struct adf_accel_dev; +enum qat_pm_host_msg { + PM_NO_CHANGE = 0, + PM_SET_MIN, +}; + /* Power management registers */ #define ADF_GEN4_PM_HOST_MSG (0x50A01C) @@ -41,7 +46,48 @@ struct adf_accel_dev; #define ADF_GEN4_PM_MAX_IDLE_FILTER (0x7) #define ADF_GEN4_PM_DEFAULT_IDLE_SUPPORT (0x1) +/* PM CSRs fields masks */ +#define ADF_GEN4_PM_DOMAIN_POWER_GATED_MASK GENMASK(15, 0) +#define ADF_GEN4_PM_SSM_PM_ENABLE_MASK GENMASK(15, 0) +#define ADF_GEN4_PM_IDLE_FILTER_MASK GENMASK(5, 3) +#define ADF_GEN4_PM_IDLE_ENABLE_MASK BIT(2) +#define ADF_GEN4_PM_ENABLE_PM_MASK BIT(21) +#define ADF_GEN4_PM_ENABLE_PM_IDLE_MASK BIT(22) +#define ADF_GEN4_PM_ENABLE_DEEP_PM_IDLE_MASK BIT(23) +#define ADF_GEN4_PM_CURRENT_WP_MASK GENMASK(19, 11) +#define ADF_GEN4_PM_CPM_PM_STATE_MASK GENMASK(22, 20) +#define ADF_GEN4_PM_PENDING_WP_MASK GENMASK(31, 23) +#define ADF_GEN4_PM_THR_VALUE_MASK GENMASK(6, 4) +#define ADF_GEN4_PM_MIN_PWR_ACK_MASK BIT(7) +#define ADF_GEN4_PM_MIN_PWR_ACK_PENDING_MASK BIT(17) +#define ADF_GEN4_PM_CPR_ACTIVE_COUNT_MASK BIT(0) +#define ADF_GEN4_PM_CPR_MANAGED_COUNT_MASK BIT(0) +#define ADF_GEN4_PM_XLT_ACTIVE_COUNT_MASK BIT(1) +#define ADF_GEN4_PM_XLT_MANAGED_COUNT_MASK BIT(1) +#define ADF_GEN4_PM_DCPR_ACTIVE_COUNT_MASK GENMASK(3, 2) +#define ADF_GEN4_PM_DCPR_MANAGED_COUNT_MASK GENMASK(3, 2) +#define ADF_GEN4_PM_PKE_ACTIVE_COUNT_MASK GENMASK(8, 4) +#define ADF_GEN4_PM_PKE_MANAGED_COUNT_MASK GENMASK(8, 4) +#define ADF_GEN4_PM_WAT_ACTIVE_COUNT_MASK GENMASK(13, 9) +#define ADF_GEN4_PM_WAT_MANAGED_COUNT_MASK GENMASK(13, 9) +#define ADF_GEN4_PM_WCP_ACTIVE_COUNT_MASK GENMASK(18, 14) +#define ADF_GEN4_PM_WCP_MANAGED_COUNT_MASK GENMASK(18, 14) +#define ADF_GEN4_PM_UCS_ACTIVE_COUNT_MASK GENMASK(20, 19) +#define ADF_GEN4_PM_UCS_MANAGED_COUNT_MASK GENMASK(20, 19) +#define ADF_GEN4_PM_CPH_ACTIVE_COUNT_MASK GENMASK(24, 21) +#define ADF_GEN4_PM_CPH_MANAGED_COUNT_MASK GENMASK(24, 21) +#define ADF_GEN4_PM_ATH_ACTIVE_COUNT_MASK GENMASK(28, 25) +#define ADF_GEN4_PM_ATH_MANAGED_COUNT_MASK GENMASK(28, 25) + int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev); bool adf_gen4_handle_pm_interrupt(struct adf_accel_dev *accel_dev); +#ifdef CONFIG_DEBUG_FS +void adf_gen4_init_dev_pm_data(struct adf_accel_dev *accel_dev); +#else +static inline void adf_gen4_init_dev_pm_data(struct adf_accel_dev *accel_dev) +{ +} +#endif /* CONFIG_DEBUG_FS */ + #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c new file mode 100644 index 000000000000..5114759287c6 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c @@ -0,0 +1,265 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ +#include +#include +#include +#include + +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "adf_gen4_pm.h" +#include "icp_qat_fw_init_admin.h" + +/* + * This is needed because a variable is used to index the mask at + * pm_scnprint_table(), making it not compile time constant, so the compile + * asserts from FIELD_GET() or u32_get_bits() won't be fulfilled. + */ +#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1)) + +#define PM_INFO_MEMBER_OFF(member) \ + (offsetof(struct icp_qat_fw_init_admin_pm_info, member) / sizeof(u32)) + +#define PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, _mask_) \ +{ \ + .reg_offset = PM_INFO_MEMBER_OFF(_reg_), \ + .key = __stringify(_field_), \ + .field_mask = _mask_, \ +} + +#define PM_INFO_REGSET_ENTRY32(_reg_, _field_) \ + PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, GENMASK(31, 0)) + +#define PM_INFO_REGSET_ENTRY(_reg_, _field_) \ + PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, ADF_GEN4_PM_##_field_##_MASK) + +#define PM_INFO_MAX_KEY_LEN 21 + +struct pm_status_row { + int reg_offset; + u32 field_mask; + const char *key; +}; + +static struct pm_status_row pm_fuse_rows[] = { + PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_PM), + PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_PM_IDLE), + PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_DEEP_PM_IDLE), +}; + +static struct pm_status_row pm_info_rows[] = { + PM_INFO_REGSET_ENTRY(pm.status, CPM_PM_STATE), + PM_INFO_REGSET_ENTRY(pm.status, PENDING_WP), + PM_INFO_REGSET_ENTRY(pm.status, CURRENT_WP), + PM_INFO_REGSET_ENTRY(pm.fw_init, IDLE_ENABLE), + PM_INFO_REGSET_ENTRY(pm.fw_init, IDLE_FILTER), + PM_INFO_REGSET_ENTRY(pm.main, MIN_PWR_ACK), + PM_INFO_REGSET_ENTRY(pm.thread, MIN_PWR_ACK_PENDING), + PM_INFO_REGSET_ENTRY(pm.main, THR_VALUE), +}; + +static struct pm_status_row pm_ssm_rows[] = { + PM_INFO_REGSET_ENTRY(ssm.pm_enable, SSM_PM_ENABLE), + PM_INFO_REGSET_ENTRY32(ssm.active_constraint, ACTIVE_CONSTRAINT), + PM_INFO_REGSET_ENTRY(ssm.pm_domain_status, DOMAIN_POWER_GATED), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, ATH_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, CPH_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, PKE_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, CPR_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, DCPR_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, UCS_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, XLT_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, WAT_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, WCP_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, ATH_MANAGED_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, CPH_MANAGED_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, PKE_MANAGED_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, CPR_MANAGED_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, DCPR_MANAGED_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, UCS_MANAGED_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, XLT_MANAGED_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, WAT_MANAGED_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, WCP_MANAGED_COUNT), +}; + +static struct pm_status_row pm_log_rows[] = { + PM_INFO_REGSET_ENTRY32(event_counters.host_msg, HOST_MSG_EVENT_COUNT), + PM_INFO_REGSET_ENTRY32(event_counters.sys_pm, SYS_PM_EVENT_COUNT), + PM_INFO_REGSET_ENTRY32(event_counters.local_ssm, SSM_EVENT_COUNT), + PM_INFO_REGSET_ENTRY32(event_counters.timer, TIMER_EVENT_COUNT), + PM_INFO_REGSET_ENTRY32(event_counters.unknown, UNKNOWN_EVENT_COUNT), +}; + +static struct pm_status_row pm_event_rows[ICP_QAT_NUMBER_OF_PM_EVENTS] = { + PM_INFO_REGSET_ENTRY32(event_log[0], EVENT0), + PM_INFO_REGSET_ENTRY32(event_log[1], EVENT1), + PM_INFO_REGSET_ENTRY32(event_log[2], EVENT2), + PM_INFO_REGSET_ENTRY32(event_log[3], EVENT3), + PM_INFO_REGSET_ENTRY32(event_log[4], EVENT4), + PM_INFO_REGSET_ENTRY32(event_log[5], EVENT5), + PM_INFO_REGSET_ENTRY32(event_log[6], EVENT6), + PM_INFO_REGSET_ENTRY32(event_log[7], EVENT7), +}; + +static struct pm_status_row pm_csrs_rows[] = { + PM_INFO_REGSET_ENTRY32(pm.fw_init, CPM_PM_FW_INIT), + PM_INFO_REGSET_ENTRY32(pm.status, CPM_PM_STATUS), + PM_INFO_REGSET_ENTRY32(pm.main, CPM_PM_MASTER_FW), + PM_INFO_REGSET_ENTRY32(pm.pwrreq, CPM_PM_PWRREQ), +}; + +static int pm_scnprint_table(char *buff, struct pm_status_row *table, + u32 *pm_info_regs, size_t buff_size, int table_len, + bool lowercase) +{ + char key[PM_INFO_MAX_KEY_LEN]; + int wr = 0; + int i; + + for (i = 0; i < table_len; i++) { + if (lowercase) + string_lower(key, table[i].key); + else + string_upper(key, table[i].key); + + wr += scnprintf(&buff[wr], buff_size - wr, "%s: %#x\n", key, + field_get(table[i].field_mask, + pm_info_regs[table[i].reg_offset])); + } + + return wr; +} + +static int pm_scnprint_table_upper_keys(char *buff, struct pm_status_row *table, + u32 *pm_info_regs, size_t buff_size, + int table_len) +{ + return pm_scnprint_table(buff, table, pm_info_regs, buff_size, + table_len, false); +} + +static int pm_scnprint_table_lower_keys(char *buff, struct pm_status_row *table, + u32 *pm_info_regs, size_t buff_size, + int table_len) +{ + return pm_scnprint_table(buff, table, pm_info_regs, buff_size, + table_len, true); +} + +static_assert(sizeof(struct icp_qat_fw_init_admin_pm_info) < PAGE_SIZE); + +static ssize_t adf_gen4_print_pm_status(struct adf_accel_dev *accel_dev, + char __user *buf, size_t count, + loff_t *pos) +{ + void __iomem *pmisc = adf_get_pmisc_base(accel_dev); + struct adf_pm *pm = &accel_dev->power_management; + struct icp_qat_fw_init_admin_pm_info *pm_info; + dma_addr_t p_state_addr; + u32 *pm_info_regs; + char *pm_kv; + int len = 0; + u32 val; + int ret; + + pm_info = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!pm_info) + return -ENOMEM; + + pm_kv = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!pm_kv) { + ret = -ENOMEM; + goto out_free; + } + + p_state_addr = dma_map_single(&GET_DEV(accel_dev), pm_info, PAGE_SIZE, + DMA_FROM_DEVICE); + ret = dma_mapping_error(&GET_DEV(accel_dev), p_state_addr); + if (ret) + goto out_free; + + /* Query PM info from QAT FW */ + ret = adf_get_pm_info(accel_dev, p_state_addr, PAGE_SIZE); + dma_unmap_single(&GET_DEV(accel_dev), p_state_addr, PAGE_SIZE, + DMA_FROM_DEVICE); + if (ret) + goto out_free; + + pm_info_regs = (u32 *)pm_info; + + /* Fusectl related */ + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, + "----------- PM Fuse info ---------\n"); + len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_fuse_rows, + pm_info_regs, PAGE_SIZE - len, + ARRAY_SIZE(pm_fuse_rows)); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "max_pwrreq: %#x\n", + pm_info->max_pwrreq); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "min_pwrreq: %#x\n", + pm_info->min_pwrreq); + + /* PM related */ + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, + "------------ PM Info ------------\n"); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "power_level: %s\n", + pm_info->pwr_state == PM_SET_MIN ? "min" : "max"); + len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_info_rows, + pm_info_regs, PAGE_SIZE - len, + ARRAY_SIZE(pm_info_rows)); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "pm_mode: STATIC\n"); + + /* SSM related */ + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, + "----------- SSM_PM Info ----------\n"); + len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_ssm_rows, + pm_info_regs, PAGE_SIZE - len, + ARRAY_SIZE(pm_ssm_rows)); + + /* Log related */ + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, + "------------- PM Log -------------\n"); + len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_log_rows, + pm_info_regs, PAGE_SIZE - len, + ARRAY_SIZE(pm_log_rows)); + + len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_event_rows, + pm_info_regs, PAGE_SIZE - len, + ARRAY_SIZE(pm_event_rows)); + + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "idle_irq_count: %#x\n", + pm->idle_irq_counters); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "fw_irq_count: %#x\n", + pm->fw_irq_counters); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, + "throttle_irq_count: %#x\n", pm->throttle_irq_counters); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "host_ack_count: %#x\n", + pm->host_ack_counter); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "host_nack_count: %#x\n", + pm->host_nack_counter); + + /* CSRs content */ + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, + "----------- HW PM CSRs -----------\n"); + len += pm_scnprint_table_upper_keys(&pm_kv[len], pm_csrs_rows, + pm_info_regs, PAGE_SIZE - len, + ARRAY_SIZE(pm_csrs_rows)); + + val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_HOST_MSG); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, + "CPM_PM_HOST_MSG: %#x\n", val); + val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, + "CPM_PM_INTERRUPT: %#x\n", val); + ret = simple_read_from_buffer(buf, count, pos, pm_kv, len); + +out_free: + kfree(pm_info); + kfree(pm_kv); + return ret; +} + +void adf_gen4_init_dev_pm_data(struct adf_accel_dev *accel_dev) +{ + accel_dev->power_management.print_pm_status = adf_gen4_print_pm_status; + accel_dev->power_management.present = true; +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.c new file mode 100644 index 000000000000..f0a13c190196 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ +#include +#include +#include + +#include "adf_accel_devices.h" +#include "adf_pm_dbgfs.h" + +static ssize_t pm_status_read(struct file *f, char __user *buf, size_t count, + loff_t *pos) +{ + struct adf_accel_dev *accel_dev = file_inode(f)->i_private; + struct adf_pm pm = accel_dev->power_management; + + if (pm.print_pm_status) + return pm.print_pm_status(accel_dev, buf, count, pos); + + return count; +} + +static const struct file_operations pm_status_fops = { + .owner = THIS_MODULE, + .read = pm_status_read, +}; + +void adf_pm_dbgfs_add(struct adf_accel_dev *accel_dev) +{ + struct adf_pm *pm = &accel_dev->power_management; + + if (!pm->present || !pm->print_pm_status) + return; + + pm->debugfs_pm_status = debugfs_create_file("pm_status", 0400, + accel_dev->debugfs_dir, + accel_dev, &pm_status_fops); +} + +void adf_pm_dbgfs_rm(struct adf_accel_dev *accel_dev) +{ + struct adf_pm *pm = &accel_dev->power_management; + + if (!pm->present) + return; + + debugfs_remove(pm->debugfs_pm_status); + pm->debugfs_pm_status = NULL; +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.h b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.h new file mode 100644 index 000000000000..83632e5aa097 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ + +#ifndef ADF_PM_DBGFS_H_ +#define ADF_PM_DBGFS_H_ + +struct adf_accel_dev; + +void adf_pm_dbgfs_rm(struct adf_accel_dev *accel_dev); +void adf_pm_dbgfs_add(struct adf_accel_dev *accel_dev); + +#endif /* ADF_PM_DBGFS_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h index 019a6443834e..2ebbec75d778 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h @@ -20,6 +20,7 @@ enum icp_qat_fw_init_admin_cmd_id { ICP_QAT_FW_HEARTBEAT_TIMER_SET = 13, ICP_QAT_FW_TIMER_GET = 19, ICP_QAT_FW_PM_STATE_CONFIG = 128, + ICP_QAT_FW_PM_INFO = 129, }; enum icp_qat_fw_init_admin_resp_status { @@ -108,4 +109,38 @@ struct icp_qat_fw_init_admin_resp { #define ICP_QAT_FW_SYNC ICP_QAT_FW_HEARTBEAT_SYNC +#define ICP_QAT_NUMBER_OF_PM_EVENTS 8 + +struct icp_qat_fw_init_admin_pm_info { + __u16 max_pwrreq; + __u16 min_pwrreq; + __u16 resvrd1; + __u8 pwr_state; + __u8 resvrd2; + __u32 fusectl0; + struct_group(event_counters, + __u32 sys_pm; + __u32 host_msg; + __u32 unknown; + __u32 local_ssm; + __u32 timer; + ); + __u32 event_log[ICP_QAT_NUMBER_OF_PM_EVENTS]; + struct_group(pm, + __u32 fw_init; + __u32 pwrreq; + __u32 status; + __u32 main; + __u32 thread; + ); + struct_group(ssm, + __u32 pm_enable; + __u32 pm_active_status; + __u32 pm_managed_status; + __u32 pm_domain_status; + __u32 active_constraint; + ); + __u32 resvrd3[6]; +}; + #endif -- Gitee From 5fa4f611cb447ca401711cfd0a1ef31c96726850 Mon Sep 17 00:00:00 2001 From: Lucas Segarra Fernandez Date: Wed, 4 Oct 2023 12:36:42 +0200 Subject: [PATCH 0702/2138] crypto: qat - add cnv_errors debugfs file ANBZ: #8589 commit d807f0240c713bdd7c81a7e212f2feb0b5cd6725 upstream. Intel-SIG: commit d807f0240c71 crypto: qat - add cnv_errors debugfs file Backport to support Intel QAT in-tree driver The Compress and Verify (CnV) feature check and ensures data integrity in the compression operation. The implementation of CnV keeps a record of the CnV errors that have occurred since the driver was loaded. Expose CnV error stats by providing the "cnv_errors" file under debugfs. This includes the number of errors detected up to now and the type of the last error. The error count is provided on a per Acceleration Engine basis and it is reset every time the driver is loaded. Signed-off-by: Lucas Segarra Fernandez Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- Documentation/ABI/testing/debugfs-driver-qat | 13 + drivers/crypto/intel/qat/qat_common/Makefile | 1 + .../intel/qat/qat_common/adf_accel_devices.h | 1 + .../crypto/intel/qat/qat_common/adf_admin.c | 21 ++ .../intel/qat/qat_common/adf_cnv_dbgfs.c | 299 ++++++++++++++++++ .../intel/qat/qat_common/adf_cnv_dbgfs.h | 11 + .../intel/qat/qat_common/adf_common_drv.h | 1 + .../crypto/intel/qat/qat_common/adf_dbgfs.c | 3 + .../qat/qat_common/icp_qat_fw_init_admin.h | 5 + 9 files changed, 355 insertions(+) create mode 100644 drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.h diff --git a/Documentation/ABI/testing/debugfs-driver-qat b/Documentation/ABI/testing/debugfs-driver-qat index 0656f27d1042..b2db010d851e 100644 --- a/Documentation/ABI/testing/debugfs-driver-qat +++ b/Documentation/ABI/testing/debugfs-driver-qat @@ -68,3 +68,16 @@ Description: (RO) Read returns power management information specific to the QAT device. This attribute is only available for qat_4xxx devices. + +What: /sys/kernel/debug/qat__/cnv_errors +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: (RO) Read returns, for each Acceleration Engine (AE), the number + of errors and the type of the last error detected by the device + when performing verified compression. + Reported counters:: + + : Number of Compress and Verify (CnV) errors and type + of the last CnV error detected by Acceleration + Engine N. diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index 2f0330651622..bb3b2516e6c6 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -33,6 +33,7 @@ intel_qat-objs := adf_cfg.o \ intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \ adf_fw_counters.o \ + adf_cnv_dbgfs.o \ adf_gen4_pm_debugfs.o \ adf_heartbeat.o \ adf_heartbeat_dbgfs.o \ diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 36c6a6bf4a66..3674904d0527 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -318,6 +318,7 @@ struct adf_accel_dev { atomic_t ref_count; struct dentry *debugfs_dir; struct dentry *fw_cntr_dbgfile; + struct dentry *cnv_dbgfile; struct list_head list; struct module *owner; struct adf_accel_pci accel_pci_dev; diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c index 2d45167b48a0..3a04e743497f 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_admin.c +++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c @@ -406,6 +406,27 @@ int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr, return ret; } +int adf_get_cnv_stats(struct adf_accel_dev *accel_dev, u16 ae, u16 *err_cnt, + u16 *latest_err) +{ + struct icp_qat_fw_init_admin_req req = { }; + struct icp_qat_fw_init_admin_resp resp; + int ret; + + req.cmd_id = ICP_QAT_FW_CNV_STATS_GET; + + ret = adf_put_admin_msg_sync(accel_dev, ae, &req, &resp); + if (ret) + return ret; + if (resp.status) + return -EPROTONOSUPPORT; + + *err_cnt = resp.error_count; + *latest_err = resp.latest_error; + + return ret; +} + int adf_init_admin_comms(struct adf_accel_dev *accel_dev) { struct adf_admin_comms *admin; diff --git a/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c new file mode 100644 index 000000000000..aa5b6ff1dfb4 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c @@ -0,0 +1,299 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ + +#include +#include +#include + +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "adf_cnv_dbgfs.h" +#include "qat_compression.h" + +#define CNV_DEBUGFS_FILENAME "cnv_errors" +#define CNV_MIN_PADDING 16 + +#define CNV_ERR_INFO_MASK GENMASK(11, 0) +#define CNV_ERR_TYPE_MASK GENMASK(15, 12) +#define CNV_SLICE_ERR_MASK GENMASK(7, 0) +#define CNV_SLICE_ERR_SIGN_BIT_INDEX 7 +#define CNV_DELTA_ERR_SIGN_BIT_INDEX 11 + +enum cnv_error_type { + CNV_ERR_TYPE_NONE, + CNV_ERR_TYPE_CHECKSUM, + CNV_ERR_TYPE_DECOMP_PRODUCED_LENGTH, + CNV_ERR_TYPE_DECOMPRESSION, + CNV_ERR_TYPE_TRANSLATION, + CNV_ERR_TYPE_DECOMP_CONSUMED_LENGTH, + CNV_ERR_TYPE_UNKNOWN, + CNV_ERR_TYPES_COUNT +}; + +#define CNV_ERROR_TYPE_GET(latest_err) \ + min_t(u16, u16_get_bits(latest_err, CNV_ERR_TYPE_MASK), CNV_ERR_TYPE_UNKNOWN) + +#define CNV_GET_DELTA_ERR_INFO(latest_error) \ + sign_extend32(latest_error, CNV_DELTA_ERR_SIGN_BIT_INDEX) + +#define CNV_GET_SLICE_ERR_INFO(latest_error) \ + sign_extend32(latest_error, CNV_SLICE_ERR_SIGN_BIT_INDEX) + +#define CNV_GET_DEFAULT_ERR_INFO(latest_error) \ + u16_get_bits(latest_error, CNV_ERR_INFO_MASK) + +enum cnv_fields { + CNV_ERR_COUNT, + CNV_LATEST_ERR, + CNV_FIELDS_COUNT +}; + +static const char * const cnv_field_names[CNV_FIELDS_COUNT] = { + [CNV_ERR_COUNT] = "Total Errors", + [CNV_LATEST_ERR] = "Last Error", +}; + +static const char * const cnv_error_names[CNV_ERR_TYPES_COUNT] = { + [CNV_ERR_TYPE_NONE] = "No Error", + [CNV_ERR_TYPE_CHECKSUM] = "Checksum Error", + [CNV_ERR_TYPE_DECOMP_PRODUCED_LENGTH] = "Length Error-P", + [CNV_ERR_TYPE_DECOMPRESSION] = "Decomp Error", + [CNV_ERR_TYPE_TRANSLATION] = "Xlat Error", + [CNV_ERR_TYPE_DECOMP_CONSUMED_LENGTH] = "Length Error-C", + [CNV_ERR_TYPE_UNKNOWN] = "Unknown Error", +}; + +struct ae_cnv_errors { + u16 ae; + u16 err_cnt; + u16 latest_err; + bool is_comp_ae; +}; + +struct cnv_err_stats { + u16 ae_count; + struct ae_cnv_errors ae_cnv_errors[]; +}; + +static s16 get_err_info(u8 error_type, u16 latest) +{ + switch (error_type) { + case CNV_ERR_TYPE_DECOMP_PRODUCED_LENGTH: + case CNV_ERR_TYPE_DECOMP_CONSUMED_LENGTH: + return CNV_GET_DELTA_ERR_INFO(latest); + case CNV_ERR_TYPE_DECOMPRESSION: + case CNV_ERR_TYPE_TRANSLATION: + return CNV_GET_SLICE_ERR_INFO(latest); + default: + return CNV_GET_DEFAULT_ERR_INFO(latest); + } +} + +static void *qat_cnv_errors_seq_start(struct seq_file *sfile, loff_t *pos) +{ + struct cnv_err_stats *err_stats = sfile->private; + + if (*pos == 0) + return SEQ_START_TOKEN; + + if (*pos > err_stats->ae_count) + return NULL; + + return &err_stats->ae_cnv_errors[*pos - 1]; +} + +static void *qat_cnv_errors_seq_next(struct seq_file *sfile, void *v, + loff_t *pos) +{ + struct cnv_err_stats *err_stats = sfile->private; + + (*pos)++; + + if (*pos > err_stats->ae_count) + return NULL; + + return &err_stats->ae_cnv_errors[*pos - 1]; +} + +static void qat_cnv_errors_seq_stop(struct seq_file *sfile, void *v) +{ +} + +static int qat_cnv_errors_seq_show(struct seq_file *sfile, void *v) +{ + struct ae_cnv_errors *ae_errors; + unsigned int i; + s16 err_info; + u8 err_type; + + if (v == SEQ_START_TOKEN) { + seq_puts(sfile, "AE "); + for (i = 0; i < CNV_FIELDS_COUNT; ++i) + seq_printf(sfile, " %*s", CNV_MIN_PADDING, + cnv_field_names[i]); + } else { + ae_errors = v; + + if (!ae_errors->is_comp_ae) + return 0; + + err_type = CNV_ERROR_TYPE_GET(ae_errors->latest_err); + err_info = get_err_info(err_type, ae_errors->latest_err); + + seq_printf(sfile, "%d:", ae_errors->ae); + seq_printf(sfile, " %*d", CNV_MIN_PADDING, ae_errors->err_cnt); + seq_printf(sfile, "%*s [%d]", CNV_MIN_PADDING, + cnv_error_names[err_type], err_info); + } + seq_putc(sfile, '\n'); + + return 0; +} + +static const struct seq_operations qat_cnv_errors_sops = { + .start = qat_cnv_errors_seq_start, + .next = qat_cnv_errors_seq_next, + .stop = qat_cnv_errors_seq_stop, + .show = qat_cnv_errors_seq_show, +}; + +/** + * cnv_err_stats_alloc() - Get CNV stats for the provided device. + * @accel_dev: Pointer to a QAT acceleration device + * + * Allocates and populates table of CNV errors statistics for each non-admin AE + * available through the supplied acceleration device. The caller becomes the + * owner of such memory and is responsible for the deallocation through a call + * to kfree(). + * + * Returns: a pointer to a dynamically allocated struct cnv_err_stats on success + * or a negative value on error. + */ +static struct cnv_err_stats *cnv_err_stats_alloc(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + struct cnv_err_stats *err_stats; + unsigned long ae_count; + unsigned long ae_mask; + size_t err_stats_size; + unsigned long ae; + unsigned int i; + u16 latest_err; + u16 err_cnt; + int ret; + + if (!adf_dev_started(accel_dev)) { + dev_err(&GET_DEV(accel_dev), "QAT Device not started\n"); + return ERR_PTR(-EBUSY); + } + + /* Ignore the admin AEs */ + ae_mask = hw_data->ae_mask & ~hw_data->admin_ae_mask; + ae_count = hweight_long(ae_mask); + if (unlikely(!ae_count)) + return ERR_PTR(-EINVAL); + + err_stats_size = struct_size(err_stats, ae_cnv_errors, ae_count); + err_stats = kmalloc(err_stats_size, GFP_KERNEL); + if (!err_stats) + return ERR_PTR(-ENOMEM); + + err_stats->ae_count = ae_count; + + i = 0; + for_each_set_bit(ae, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) { + ret = adf_get_cnv_stats(accel_dev, ae, &err_cnt, &latest_err); + if (ret) { + dev_dbg(&GET_DEV(accel_dev), + "Failed to get CNV stats for ae %ld, [%d].\n", + ae, ret); + err_stats->ae_cnv_errors[i++].is_comp_ae = false; + continue; + } + err_stats->ae_cnv_errors[i].is_comp_ae = true; + err_stats->ae_cnv_errors[i].latest_err = latest_err; + err_stats->ae_cnv_errors[i].err_cnt = err_cnt; + err_stats->ae_cnv_errors[i].ae = ae; + i++; + } + + return err_stats; +} + +static int qat_cnv_errors_file_open(struct inode *inode, struct file *file) +{ + struct adf_accel_dev *accel_dev = inode->i_private; + struct seq_file *cnv_errors_seq_file; + struct cnv_err_stats *cnv_err_stats; + int ret; + + cnv_err_stats = cnv_err_stats_alloc(accel_dev); + if (IS_ERR(cnv_err_stats)) + return PTR_ERR(cnv_err_stats); + + ret = seq_open(file, &qat_cnv_errors_sops); + if (unlikely(ret)) { + kfree(cnv_err_stats); + return ret; + } + + cnv_errors_seq_file = file->private_data; + cnv_errors_seq_file->private = cnv_err_stats; + return ret; +} + +static int qat_cnv_errors_file_release(struct inode *inode, struct file *file) +{ + struct seq_file *cnv_errors_seq_file = file->private_data; + + kfree(cnv_errors_seq_file->private); + cnv_errors_seq_file->private = NULL; + + return seq_release(inode, file); +} + +static const struct file_operations qat_cnv_fops = { + .owner = THIS_MODULE, + .open = qat_cnv_errors_file_open, + .read = seq_read, + .llseek = seq_lseek, + .release = qat_cnv_errors_file_release, +}; + +static ssize_t no_comp_file_read(struct file *f, char __user *buf, size_t count, + loff_t *pos) +{ + char *file_msg = "No engine configured for comp\n"; + + return simple_read_from_buffer(buf, count, pos, file_msg, + strlen(file_msg)); +} + +static const struct file_operations qat_cnv_no_comp_fops = { + .owner = THIS_MODULE, + .read = no_comp_file_read, +}; + +void adf_cnv_dbgfs_add(struct adf_accel_dev *accel_dev) +{ + const struct file_operations *fops; + void *data; + + if (adf_hw_dev_has_compression(accel_dev)) { + fops = &qat_cnv_fops; + data = accel_dev; + } else { + fops = &qat_cnv_no_comp_fops; + data = NULL; + } + + accel_dev->cnv_dbgfile = debugfs_create_file(CNV_DEBUGFS_FILENAME, 0400, + accel_dev->debugfs_dir, + data, fops); +} + +void adf_cnv_dbgfs_rm(struct adf_accel_dev *accel_dev) +{ + debugfs_remove(accel_dev->cnv_dbgfile); + accel_dev->cnv_dbgfile = NULL; +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.h b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.h new file mode 100644 index 000000000000..b02b0961c433 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ +#ifndef ADF_CNV_DBG_H +#define ADF_CNV_DBG_H + +struct adf_accel_dev; + +void adf_cnv_dbgfs_add(struct adf_accel_dev *accel_dev); +void adf_cnv_dbgfs_rm(struct adf_accel_dev *accel_dev); + +#endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index 46dd81074166..18a382508542 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -96,6 +96,7 @@ int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt); int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks); int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp); int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr, size_t buff_size); +int adf_get_cnv_stats(struct adf_accel_dev *accel_dev, u16 ae, u16 *err_cnt, u16 *latest_err); int adf_init_arb(struct adf_accel_dev *accel_dev); void adf_exit_arb(struct adf_accel_dev *accel_dev); void adf_update_ring_arb(struct adf_etr_ring_data *ring); diff --git a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c index 9da1424ac44d..4f0df367c9e1 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c @@ -5,6 +5,7 @@ #include "adf_accel_devices.h" #include "adf_cfg.h" #include "adf_common_drv.h" +#include "adf_cnv_dbgfs.h" #include "adf_dbgfs.h" #include "adf_fw_counters.h" #include "adf_heartbeat_dbgfs.h" @@ -56,6 +57,7 @@ void adf_dbgfs_add(struct adf_accel_dev *accel_dev) adf_fw_counters_dbgfs_add(accel_dev); adf_heartbeat_dbgfs_add(accel_dev); adf_pm_dbgfs_add(accel_dev); + adf_cnv_dbgfs_add(accel_dev); } } @@ -66,6 +68,7 @@ void adf_dbgfs_add(struct adf_accel_dev *accel_dev) void adf_dbgfs_rm(struct adf_accel_dev *accel_dev) { if (!accel_dev->is_vf) { + adf_cnv_dbgfs_rm(accel_dev); adf_pm_dbgfs_rm(accel_dev); adf_heartbeat_dbgfs_rm(accel_dev); adf_fw_counters_dbgfs_rm(accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h index 2ebbec75d778..9e5ce419d875 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h @@ -19,6 +19,7 @@ enum icp_qat_fw_init_admin_cmd_id { ICP_QAT_FW_DC_CHAIN_INIT = 11, ICP_QAT_FW_HEARTBEAT_TIMER_SET = 13, ICP_QAT_FW_TIMER_GET = 19, + ICP_QAT_FW_CNV_STATS_GET = 20, ICP_QAT_FW_PM_STATE_CONFIG = 128, ICP_QAT_FW_PM_INFO = 129, }; @@ -65,6 +66,10 @@ struct icp_qat_fw_init_admin_resp { __u16 version_major_num; }; __u32 extended_features; + struct { + __u16 error_count; + __u16 latest_error; + }; }; __u64 opaque_data; union { -- Gitee From 3d1c5c2f30eacc495ae1f97cac6949893ed1bbd9 Mon Sep 17 00:00:00 2001 From: Shashank Gupta Date: Fri, 20 Oct 2023 11:32:45 +0100 Subject: [PATCH 0703/2138] crypto: qat - add infrastructure for error reporting ANBZ: #8589 commit 93b2f7de7db598b0fe429948c739c212f8316330 upstream. Intel-SIG: commit 93b2f7de7db5 crypto: qat - add infrastructure for error reporting Backport to support Intel QAT in-tree driver Add infrastructure for enabling, disabling and reporting errors in the QAT driver. This adds a new structure, adf_ras_ops, to adf_hw_device_data that contains the following methods: - enable_ras_errors(): allows to enable RAS errors at device initialization. - disable_ras_errors(): allows to disable RAS errors at device shutdown. - handle_interrupt(): allows to detect if there is an error and report if a reset is required. This is executed immediately after the error is reported, in the context of an ISR. An initial, empty, implementation of the methods above is provided for QAT GEN4. Signed-off-by: Shashank Gupta Reviewed-by: Giovanni Cabiddu Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 2 ++ drivers/crypto/intel/qat/qat_common/Makefile | 1 + .../intel/qat/qat_common/adf_accel_devices.h | 8 ++++++ .../intel/qat/qat_common/adf_gen4_ras.c | 26 +++++++++++++++++++ .../intel/qat/qat_common/adf_gen4_ras.h | 10 +++++++ .../crypto/intel/qat/qat_common/adf_init.c | 6 +++++ drivers/crypto/intel/qat/qat_common/adf_isr.c | 18 +++++++++++++ 7 files changed, 71 insertions(+) create mode 100644 drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index 403f07371445..3151435350c4 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -10,6 +10,7 @@ #include #include #include +#include "adf_gen4_ras.h" #include #include "adf_4xxx_hw_data.h" #include "icp_qat_hw.h" @@ -571,6 +572,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops); adf_gen4_init_dc_ops(&hw_data->dc_ops); + adf_gen4_init_ras_ops(&hw_data->ras_ops); } void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data) diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index bb3b2516e6c6..9ba2f8aa1e81 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -19,6 +19,7 @@ intel_qat-objs := adf_cfg.o \ adf_gen4_pm.o \ adf_gen2_dc.o \ adf_gen4_dc.o \ + adf_gen4_ras.o \ adf_gen4_timer.o \ adf_clock.o \ qat_crypto.o \ diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 3674904d0527..eb43a6cfa99e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -152,6 +152,13 @@ struct adf_accel_dev; struct adf_etr_data; struct adf_etr_ring_data; +struct adf_ras_ops { + void (*enable_ras_errors)(struct adf_accel_dev *accel_dev); + void (*disable_ras_errors)(struct adf_accel_dev *accel_dev); + bool (*handle_interrupt)(struct adf_accel_dev *accel_dev, + bool *reset_required); +}; + struct adf_pfvf_ops { int (*enable_comms)(struct adf_accel_dev *accel_dev); u32 (*get_pf2vf_offset)(u32 i); @@ -215,6 +222,7 @@ struct adf_hw_device_data { struct adf_pfvf_ops pfvf_ops; struct adf_hw_csr_ops csr_ops; struct adf_dc_ops dc_ops; + struct adf_ras_ops ras_ops; const char *fw_name; const char *fw_mmp_name; u32 fuses; diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c new file mode 100644 index 000000000000..0bf243a51527 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ +#include "adf_common_drv.h" +#include "adf_gen4_ras.h" + +static void adf_gen4_enable_ras(struct adf_accel_dev *accel_dev) +{ +} + +static void adf_gen4_disable_ras(struct adf_accel_dev *accel_dev) +{ +} + +static bool adf_gen4_handle_interrupt(struct adf_accel_dev *accel_dev, + bool *reset_required) +{ + return false; +} + +void adf_gen4_init_ras_ops(struct adf_ras_ops *ras_ops) +{ + ras_ops->enable_ras_errors = adf_gen4_enable_ras; + ras_ops->disable_ras_errors = adf_gen4_disable_ras; + ras_ops->handle_interrupt = adf_gen4_handle_interrupt; +} +EXPORT_SYMBOL_GPL(adf_gen4_init_ras_ops); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h new file mode 100644 index 000000000000..2765d3529c0d --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ +#ifndef ADF_GEN4_RAS_H_ +#define ADF_GEN4_RAS_H_ + +struct adf_ras_ops; + +void adf_gen4_init_ras_ops(struct adf_ras_ops *ras_ops); + +#endif /* ADF_GEN4_RAS_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c index b4cf605ccf3e..4cf49f52d4dd 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_init.c +++ b/drivers/crypto/intel/qat/qat_common/adf_init.c @@ -119,6 +119,9 @@ static int adf_dev_init(struct adf_accel_dev *accel_dev) } set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status); + if (hw_data->ras_ops.enable_ras_errors) + hw_data->ras_ops.enable_ras_errors(accel_dev); + hw_data->enable_ints(accel_dev); hw_data->enable_error_correction(accel_dev); @@ -353,6 +356,9 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev) clear_bit(accel_dev->accel_id, service->init_status); } + if (hw_data->ras_ops.disable_ras_errors) + hw_data->ras_ops.disable_ras_errors(accel_dev); + adf_heartbeat_shutdown(accel_dev); hw_data->disable_iov(accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c index 2aba194a7c29..3557a0d6dea2 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_isr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c @@ -132,6 +132,21 @@ static bool adf_handle_pm_int(struct adf_accel_dev *accel_dev) return false; } +static bool adf_handle_ras_int(struct adf_accel_dev *accel_dev) +{ + struct adf_ras_ops *ras_ops = &accel_dev->hw_device->ras_ops; + bool reset_required; + + if (ras_ops->handle_interrupt && + ras_ops->handle_interrupt(accel_dev, &reset_required)) { + if (reset_required) + dev_err(&GET_DEV(accel_dev), "Fatal error, reset required\n"); + return true; + } + + return false; +} + static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr) { struct adf_accel_dev *accel_dev = dev_ptr; @@ -145,6 +160,9 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr) if (adf_handle_pm_int(accel_dev)) return IRQ_HANDLED; + if (adf_handle_ras_int(accel_dev)) + return IRQ_HANDLED; + dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n", accel_dev->accel_id); -- Gitee From 9431d1b517c58e7ff5ec0ffb928e4cf9a8c5842a Mon Sep 17 00:00:00 2001 From: Shashank Gupta Date: Fri, 20 Oct 2023 11:32:46 +0100 Subject: [PATCH 0704/2138] crypto: qat - add reporting of correctable errors for QAT GEN4 ANBZ: #8589 commit df8c184b77a9c6d52e6c7627bbcb902cdc4d2171 upstream. Intel-SIG: commit df8c184b77a9 crypto: qat - add reporting of correctable errors for QAT GEN4 Backport to support Intel QAT in-tree driver Add logic to detect and report correctable errors in QAT GEN4 devices. This includes (1) enabling, disabling and handling error reported through the ERRSOU0 register and (2) logic to log the errors in the system log. Signed-off-by: Shashank Gupta Reviewed-by: Giovanni Cabiddu Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../intel/qat/qat_common/adf_gen4_ras.c | 64 ++++++++++++++++++- .../intel/qat/qat_common/adf_gen4_ras.h | 11 ++++ 2 files changed, 74 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c index 0bf243a51527..4fbaadbe480e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c @@ -1,20 +1,82 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2023 Intel Corporation */ #include "adf_common_drv.h" +#include "adf_gen4_hw_data.h" #include "adf_gen4_ras.h" +static void enable_errsou_reporting(void __iomem *csr) +{ + /* Enable correctable error reporting in ERRSOU0 */ + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK0, 0); +} + +static void disable_errsou_reporting(void __iomem *csr) +{ + /* Disable correctable error reporting in ERRSOU0 */ + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK0, ADF_GEN4_ERRSOU0_BIT); +} + +static void enable_ae_error_reporting(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + u32 ae_mask = GET_HW_DATA(accel_dev)->ae_mask; + + /* Enable Acceleration Engine correctable error reporting */ + ADF_CSR_WR(csr, ADF_GEN4_HIAECORERRLOGENABLE_CPP0, ae_mask); +} + +static void disable_ae_error_reporting(void __iomem *csr) +{ + /* Disable Acceleration Engine correctable error reporting */ + ADF_CSR_WR(csr, ADF_GEN4_HIAECORERRLOGENABLE_CPP0, 0); +} + static void adf_gen4_enable_ras(struct adf_accel_dev *accel_dev) { + void __iomem *csr = adf_get_pmisc_base(accel_dev); + + enable_errsou_reporting(csr); + enable_ae_error_reporting(accel_dev, csr); } static void adf_gen4_disable_ras(struct adf_accel_dev *accel_dev) { + void __iomem *csr = adf_get_pmisc_base(accel_dev); + + disable_errsou_reporting(csr); + disable_ae_error_reporting(csr); +} + +static void adf_gen4_process_errsou0(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + u32 aecorrerr = ADF_CSR_RD(csr, ADF_GEN4_HIAECORERRLOG_CPP0); + + aecorrerr &= GET_HW_DATA(accel_dev)->ae_mask; + + dev_warn(&GET_DEV(accel_dev), + "Correctable error detected in AE: 0x%x\n", + aecorrerr); + + /* Clear interrupt from ERRSOU0 */ + ADF_CSR_WR(csr, ADF_GEN4_HIAECORERRLOG_CPP0, aecorrerr); } static bool adf_gen4_handle_interrupt(struct adf_accel_dev *accel_dev, bool *reset_required) { - return false; + void __iomem *csr = adf_get_pmisc_base(accel_dev); + u32 errsou = ADF_CSR_RD(csr, ADF_GEN4_ERRSOU0); + bool handled = false; + + *reset_required = false; + + if (errsou & ADF_GEN4_ERRSOU0_BIT) { + adf_gen4_process_errsou0(accel_dev, csr); + handled = true; + } + + return handled; } void adf_gen4_init_ras_ops(struct adf_ras_ops *ras_ops) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h index 2765d3529c0d..e6c4dfbb2389 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h @@ -3,8 +3,19 @@ #ifndef ADF_GEN4_RAS_H_ #define ADF_GEN4_RAS_H_ +#include + struct adf_ras_ops; +/* ERRSOU0 Correctable error mask*/ +#define ADF_GEN4_ERRSOU0_BIT BIT(0) + +/* HI AE Correctable error log */ +#define ADF_GEN4_HIAECORERRLOG_CPP0 0x41A308 + +/* HI AE Correctable error log enable */ +#define ADF_GEN4_HIAECORERRLOGENABLE_CPP0 0x41A318 + void adf_gen4_init_ras_ops(struct adf_ras_ops *ras_ops); #endif /* ADF_GEN4_RAS_H_ */ -- Gitee From c1d1c52fd4dff541131589c8b9126fb2d20e5ed2 Mon Sep 17 00:00:00 2001 From: Shashank Gupta Date: Fri, 20 Oct 2023 11:32:47 +0100 Subject: [PATCH 0705/2138] crypto: qat - add reporting of errors from ERRSOU1 for QAT GEN4 ANBZ: #8589 commit 4926e89d19b0631d8f5f5f292c4caf0f0de08f4f upstream. Intel-SIG: commit 4926e89d19b0 crypto: qat - add reporting of errors from ERRSOU1 for QAT GEN4 Backport to support Intel QAT in-tree driver Add logic to detect and report uncorrectable errors reported through the ERRSOU1 register in QAT GEN4 devices. This also introduces the adf_dev_err_mask structure as part of adf_hw_device_data which will allow to provide different error masks per device generation. Signed-off-by: Shashank Gupta Reviewed-by: Giovanni Cabiddu Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 6 + .../intel/qat/qat_4xxx/adf_4xxx_hw_data.h | 2 + .../intel/qat/qat_common/adf_accel_devices.h | 6 + .../intel/qat/qat_common/adf_gen4_ras.c | 289 ++++++++++++++++++ .../intel/qat/qat_common/adf_gen4_ras.h | 190 ++++++++++++ 5 files changed, 493 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index 3151435350c4..944ca89a7717 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -505,6 +505,11 @@ static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num) return fw_config[obj_num].ae_mask; } +static void adf_gen4_set_err_mask(struct adf_dev_err_mask *dev_err_mask) +{ + dev_err_mask->cppagentcmdpar_mask = ADF_4XXX_HICPPAGENTCMDPARERRLOG_MASK; +} + void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) { hw_data->dev_class = &adf_4xxx_class; @@ -569,6 +574,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->get_hb_clock = get_heartbeat_clock; hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; + adf_gen4_set_err_mask(&hw_data->dev_err_mask); adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops); adf_gen4_init_dc_ops(&hw_data->dc_ops); diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h index bb3d95a8fb21..7695b4e7277e 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h @@ -28,6 +28,8 @@ #define ADF_4XXX_ACCELENGINES_MASK (0x1FF) #define ADF_4XXX_ADMIN_AE_MASK (0x100) +#define ADF_4XXX_HICPPAGENTCMDPARERRLOG_MASK 0x1F + #define ADF_4XXX_ETR_MAX_BANKS 64 /* MSIX interrupt */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index eb43a6cfa99e..c173873b3e2b 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -176,6 +176,10 @@ struct adf_dc_ops { void (*build_deflate_ctx)(void *ctx); }; +struct adf_dev_err_mask { + u32 cppagentcmdpar_mask; +}; + struct adf_hw_device_data { struct adf_hw_device_class *dev_class; u32 (*get_accel_mask)(struct adf_hw_device_data *self); @@ -223,6 +227,7 @@ struct adf_hw_device_data { struct adf_hw_csr_ops csr_ops; struct adf_dc_ops dc_ops; struct adf_ras_ops ras_ops; + struct adf_dev_err_mask dev_err_mask; const char *fw_name; const char *fw_mmp_name; u32 fuses; @@ -271,6 +276,7 @@ struct adf_hw_device_data { #define GET_SRV_TYPE(accel_dev, idx) \ (((GET_HW_DATA(accel_dev)->ring_to_svc_map) >> (ADF_SRV_TYPE_BIT_LEN * (idx))) \ & ADF_SRV_TYPE_MASK) +#define GET_ERR_MASK(accel_dev) (&GET_HW_DATA(accel_dev)->dev_err_mask) #define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines) #define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops) #define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c index 4fbaadbe480e..59ae5a574091 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c @@ -8,12 +8,18 @@ static void enable_errsou_reporting(void __iomem *csr) { /* Enable correctable error reporting in ERRSOU0 */ ADF_CSR_WR(csr, ADF_GEN4_ERRMSK0, 0); + + /* Enable uncorrectable error reporting in ERRSOU1 */ + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK1, 0); } static void disable_errsou_reporting(void __iomem *csr) { /* Disable correctable error reporting in ERRSOU0 */ ADF_CSR_WR(csr, ADF_GEN4_ERRMSK0, ADF_GEN4_ERRSOU0_BIT); + + /* Disable uncorrectable error reporting in ERRSOU1 */ + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK1, ADF_GEN4_ERRSOU1_BITMASK); } static void enable_ae_error_reporting(struct adf_accel_dev *accel_dev, @@ -23,12 +29,73 @@ static void enable_ae_error_reporting(struct adf_accel_dev *accel_dev, /* Enable Acceleration Engine correctable error reporting */ ADF_CSR_WR(csr, ADF_GEN4_HIAECORERRLOGENABLE_CPP0, ae_mask); + + /* Enable Acceleration Engine uncorrectable error reporting */ + ADF_CSR_WR(csr, ADF_GEN4_HIAEUNCERRLOGENABLE_CPP0, ae_mask); } static void disable_ae_error_reporting(void __iomem *csr) { /* Disable Acceleration Engine correctable error reporting */ ADF_CSR_WR(csr, ADF_GEN4_HIAECORERRLOGENABLE_CPP0, 0); + + /* Disable Acceleration Engine uncorrectable error reporting */ + ADF_CSR_WR(csr, ADF_GEN4_HIAEUNCERRLOGENABLE_CPP0, 0); +} + +static void enable_cpp_error_reporting(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + + /* Enable HI CPP Agents Command Parity Error Reporting */ + ADF_CSR_WR(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOGENABLE, + err_mask->cppagentcmdpar_mask); +} + +static void disable_cpp_error_reporting(void __iomem *csr) +{ + /* Disable HI CPP Agents Command Parity Error Reporting */ + ADF_CSR_WR(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOGENABLE, 0); +} + +static void enable_ti_ri_error_reporting(void __iomem *csr) +{ + /* Enable RI Memory error reporting */ + ADF_CSR_WR(csr, ADF_GEN4_RI_MEM_PAR_ERR_EN0, + ADF_GEN4_RIMEM_PARERR_STS_FATAL_BITMASK | + ADF_GEN4_RIMEM_PARERR_STS_UNCERR_BITMASK); + + /* Enable IOSF Primary Command Parity error Reporting */ + ADF_CSR_WR(csr, ADF_GEN4_RIMISCCTL, ADF_GEN4_RIMISCSTS_BIT); + + /* Enable TI Internal Memory Parity Error reporting */ + ADF_CSR_WR(csr, ADF_GEN4_TI_CI_PAR_ERR_MASK, 0); + ADF_CSR_WR(csr, ADF_GEN4_TI_PULL0FUB_PAR_ERR_MASK, 0); + ADF_CSR_WR(csr, ADF_GEN4_TI_PUSHFUB_PAR_ERR_MASK, 0); + ADF_CSR_WR(csr, ADF_GEN4_TI_CD_PAR_ERR_MASK, 0); + ADF_CSR_WR(csr, ADF_GEN4_TI_TRNSB_PAR_ERR_MASK, 0); +} + +static void disable_ti_ri_error_reporting(void __iomem *csr) +{ + /* Disable RI Memory error reporting */ + ADF_CSR_WR(csr, ADF_GEN4_RI_MEM_PAR_ERR_EN0, 0); + + /* Disable IOSF Primary Command Parity error Reporting */ + ADF_CSR_WR(csr, ADF_GEN4_RIMISCCTL, 0); + + /* Disable TI Internal Memory Parity Error reporting */ + ADF_CSR_WR(csr, ADF_GEN4_TI_CI_PAR_ERR_MASK, + ADF_GEN4_TI_CI_PAR_STS_BITMASK); + ADF_CSR_WR(csr, ADF_GEN4_TI_PULL0FUB_PAR_ERR_MASK, + ADF_GEN4_TI_PULL0FUB_PAR_STS_BITMASK); + ADF_CSR_WR(csr, ADF_GEN4_TI_PUSHFUB_PAR_ERR_MASK, + ADF_GEN4_TI_PUSHFUB_PAR_STS_BITMASK); + ADF_CSR_WR(csr, ADF_GEN4_TI_CD_PAR_ERR_MASK, + ADF_GEN4_TI_CD_PAR_STS_BITMASK); + ADF_CSR_WR(csr, ADF_GEN4_TI_TRNSB_PAR_ERR_MASK, + ADF_GEN4_TI_TRNSB_PAR_STS_BITMASK); } static void adf_gen4_enable_ras(struct adf_accel_dev *accel_dev) @@ -37,6 +104,8 @@ static void adf_gen4_enable_ras(struct adf_accel_dev *accel_dev) enable_errsou_reporting(csr); enable_ae_error_reporting(accel_dev, csr); + enable_cpp_error_reporting(accel_dev, csr); + enable_ti_ri_error_reporting(csr); } static void adf_gen4_disable_ras(struct adf_accel_dev *accel_dev) @@ -45,6 +114,8 @@ static void adf_gen4_disable_ras(struct adf_accel_dev *accel_dev) disable_errsou_reporting(csr); disable_ae_error_reporting(csr); + disable_cpp_error_reporting(csr); + disable_ti_ri_error_reporting(csr); } static void adf_gen4_process_errsou0(struct adf_accel_dev *accel_dev, @@ -62,6 +133,218 @@ static void adf_gen4_process_errsou0(struct adf_accel_dev *accel_dev, ADF_CSR_WR(csr, ADF_GEN4_HIAECORERRLOG_CPP0, aecorrerr); } +static bool adf_handle_cpp_aeunc(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 aeuncorerr; + + if (!(errsou & ADF_GEN4_ERRSOU1_HIAEUNCERRLOG_CPP0_BIT)) + return false; + + aeuncorerr = ADF_CSR_RD(csr, ADF_GEN4_HIAEUNCERRLOG_CPP0); + aeuncorerr &= GET_HW_DATA(accel_dev)->ae_mask; + + dev_err(&GET_DEV(accel_dev), + "Uncorrectable error detected in AE: 0x%x\n", + aeuncorerr); + + ADF_CSR_WR(csr, ADF_GEN4_HIAEUNCERRLOG_CPP0, aeuncorerr); + + return false; +} + +static bool adf_handle_cppcmdparerr(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + u32 cmdparerr; + + if (!(errsou & ADF_GEN4_ERRSOU1_HICPPAGENTCMDPARERRLOG_BIT)) + return false; + + cmdparerr = ADF_CSR_RD(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOG); + cmdparerr &= err_mask->cppagentcmdpar_mask; + + dev_err(&GET_DEV(accel_dev), + "HI CPP agent command parity error: 0x%x\n", + cmdparerr); + + ADF_CSR_WR(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOG, cmdparerr); + + return true; +} + +static bool adf_handle_ri_mem_par_err(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + bool reset_required = false; + u32 rimem_parerr_sts; + + if (!(errsou & ADF_GEN4_ERRSOU1_RIMEM_PARERR_STS_BIT)) + return false; + + rimem_parerr_sts = ADF_CSR_RD(csr, ADF_GEN4_RIMEM_PARERR_STS); + rimem_parerr_sts &= ADF_GEN4_RIMEM_PARERR_STS_UNCERR_BITMASK | + ADF_GEN4_RIMEM_PARERR_STS_FATAL_BITMASK; + + if (rimem_parerr_sts & ADF_GEN4_RIMEM_PARERR_STS_UNCERR_BITMASK) + dev_err(&GET_DEV(accel_dev), + "RI Memory Parity uncorrectable error: 0x%x\n", + rimem_parerr_sts); + + if (rimem_parerr_sts & ADF_GEN4_RIMEM_PARERR_STS_FATAL_BITMASK) { + dev_err(&GET_DEV(accel_dev), + "RI Memory Parity fatal error: 0x%x\n", + rimem_parerr_sts); + reset_required = true; + } + + ADF_CSR_WR(csr, ADF_GEN4_RIMEM_PARERR_STS, rimem_parerr_sts); + + return reset_required; +} + +static bool adf_handle_ti_ci_par_sts(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 ti_ci_par_sts; + + if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT)) + return false; + + ti_ci_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_CI_PAR_STS); + ti_ci_par_sts &= ADF_GEN4_TI_CI_PAR_STS_BITMASK; + + if (ti_ci_par_sts) { + dev_err(&GET_DEV(accel_dev), + "TI Memory Parity Error: 0x%x\n", ti_ci_par_sts); + ADF_CSR_WR(csr, ADF_GEN4_TI_CI_PAR_STS, ti_ci_par_sts); + } + + return false; +} + +static bool adf_handle_ti_pullfub_par_sts(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 ti_pullfub_par_sts; + + if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT)) + return false; + + ti_pullfub_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_PULL0FUB_PAR_STS); + ti_pullfub_par_sts &= ADF_GEN4_TI_PULL0FUB_PAR_STS_BITMASK; + + if (ti_pullfub_par_sts) { + dev_err(&GET_DEV(accel_dev), + "TI Pull Parity Error: 0x%x\n", ti_pullfub_par_sts); + + ADF_CSR_WR(csr, ADF_GEN4_TI_PULL0FUB_PAR_STS, + ti_pullfub_par_sts); + } + + return false; +} + +static bool adf_handle_ti_pushfub_par_sts(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 ti_pushfub_par_sts; + + if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT)) + return false; + + ti_pushfub_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_PUSHFUB_PAR_STS); + ti_pushfub_par_sts &= ADF_GEN4_TI_PUSHFUB_PAR_STS_BITMASK; + + if (ti_pushfub_par_sts) { + dev_err(&GET_DEV(accel_dev), + "TI Push Parity Error: 0x%x\n", ti_pushfub_par_sts); + + ADF_CSR_WR(csr, ADF_GEN4_TI_PUSHFUB_PAR_STS, + ti_pushfub_par_sts); + } + + return false; +} + +static bool adf_handle_ti_cd_par_sts(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 ti_cd_par_sts; + + if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT)) + return false; + + ti_cd_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_CD_PAR_STS); + ti_cd_par_sts &= ADF_GEN4_TI_CD_PAR_STS_BITMASK; + + if (ti_cd_par_sts) { + dev_err(&GET_DEV(accel_dev), + "TI CD Parity Error: 0x%x\n", ti_cd_par_sts); + + ADF_CSR_WR(csr, ADF_GEN4_TI_CD_PAR_STS, ti_cd_par_sts); + } + + return false; +} + +static bool adf_handle_ti_trnsb_par_sts(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 ti_trnsb_par_sts; + + if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT)) + return false; + + ti_trnsb_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_TRNSB_PAR_STS); + ti_trnsb_par_sts &= ADF_GEN4_TI_TRNSB_PAR_STS_BITMASK; + + if (ti_trnsb_par_sts) { + dev_err(&GET_DEV(accel_dev), + "TI TRNSB Parity Error: 0x%x\n", ti_trnsb_par_sts); + + ADF_CSR_WR(csr, ADF_GEN4_TI_TRNSB_PAR_STS, ti_trnsb_par_sts); + } + + return false; +} + +static bool adf_handle_iosfp_cmd_parerr(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 rimiscsts; + + if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT)) + return false; + + rimiscsts = ADF_CSR_RD(csr, ADF_GEN4_RIMISCSTS); + rimiscsts &= ADF_GEN4_RIMISCSTS_BIT; + + dev_err(&GET_DEV(accel_dev), + "Command Parity error detected on IOSFP: 0x%x\n", + rimiscsts); + + ADF_CSR_WR(csr, ADF_GEN4_RIMISCSTS, rimiscsts); + + return true; +} + +static void adf_gen4_process_errsou1(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou, + bool *reset_required) +{ + *reset_required |= adf_handle_cpp_aeunc(accel_dev, csr, errsou); + *reset_required |= adf_handle_cppcmdparerr(accel_dev, csr, errsou); + *reset_required |= adf_handle_ri_mem_par_err(accel_dev, csr, errsou); + *reset_required |= adf_handle_ti_ci_par_sts(accel_dev, csr, errsou); + *reset_required |= adf_handle_ti_pullfub_par_sts(accel_dev, csr, errsou); + *reset_required |= adf_handle_ti_pushfub_par_sts(accel_dev, csr, errsou); + *reset_required |= adf_handle_ti_cd_par_sts(accel_dev, csr, errsou); + *reset_required |= adf_handle_ti_trnsb_par_sts(accel_dev, csr, errsou); + *reset_required |= adf_handle_iosfp_cmd_parerr(accel_dev, csr, errsou); +} + static bool adf_gen4_handle_interrupt(struct adf_accel_dev *accel_dev, bool *reset_required) { @@ -76,6 +359,12 @@ static bool adf_gen4_handle_interrupt(struct adf_accel_dev *accel_dev, handled = true; } + errsou = ADF_CSR_RD(csr, ADF_GEN4_ERRSOU1); + if (errsou & ADF_GEN4_ERRSOU1_BITMASK) { + adf_gen4_process_errsou1(accel_dev, csr, errsou, reset_required); + handled = true; + } + return handled; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h index e6c4dfbb2389..67a85cc74a44 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h @@ -15,6 +15,196 @@ struct adf_ras_ops; /* HI AE Correctable error log enable */ #define ADF_GEN4_HIAECORERRLOGENABLE_CPP0 0x41A318 +#define ADF_GEN4_ERRSOU1_HIAEUNCERRLOG_CPP0_BIT BIT(0) +#define ADF_GEN4_ERRSOU1_HICPPAGENTCMDPARERRLOG_BIT BIT(1) +#define ADF_GEN4_ERRSOU1_RIMEM_PARERR_STS_BIT BIT(2) +#define ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT BIT(3) +#define ADF_GEN4_ERRSOU1_RIMISCSTS_BIT BIT(4) + +#define ADF_GEN4_ERRSOU1_BITMASK ( \ + (ADF_GEN4_ERRSOU1_HIAEUNCERRLOG_CPP0_BIT) | \ + (ADF_GEN4_ERRSOU1_HICPPAGENTCMDPARERRLOG_BIT) | \ + (ADF_GEN4_ERRSOU1_RIMEM_PARERR_STS_BIT) | \ + (ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT) | \ + (ADF_GEN4_ERRSOU1_RIMISCSTS_BIT)) + +/* HI AE Uncorrectable error log */ +#define ADF_GEN4_HIAEUNCERRLOG_CPP0 0x41A300 + +/* HI AE Uncorrectable error log enable */ +#define ADF_GEN4_HIAEUNCERRLOGENABLE_CPP0 0x41A320 + +/* HI CPP Agent Command parity error log */ +#define ADF_GEN4_HICPPAGENTCMDPARERRLOG 0x41A310 + +/* HI CPP Agent Command parity error logging enable */ +#define ADF_GEN4_HICPPAGENTCMDPARERRLOGENABLE 0x41A314 + +/* RI Memory parity error status register */ +#define ADF_GEN4_RIMEM_PARERR_STS 0x41B128 + +/* RI Memory parity error reporting enable */ +#define ADF_GEN4_RI_MEM_PAR_ERR_EN0 0x41B12C + +/* + * RI Memory parity error mask + * BIT(0) - BIT(3) - ri_iosf_pdata_rxq[0:3] parity error + * BIT(4) - ri_tlq_phdr parity error + * BIT(5) - ri_tlq_pdata parity error + * BIT(6) - ri_tlq_nphdr parity error + * BIT(7) - ri_tlq_npdata parity error + * BIT(8) - BIT(9) - ri_tlq_cplhdr[0:1] parity error + * BIT(10) - BIT(17) - ri_tlq_cpldata[0:7] parity error + * BIT(18) - set this bit to 1 to enable logging status to ri_mem_par_err_sts0 + * BIT(19) - ri_cds_cmd_fifo parity error + * BIT(20) - ri_obc_ricpl_fifo parity error + * BIT(21) - ri_obc_tiricpl_fifo parity error + * BIT(22) - ri_obc_cppcpl_fifo parity error + * BIT(23) - ri_obc_pendcpl_fifo parity error + * BIT(24) - ri_cpp_cmd_fifo parity error + * BIT(25) - ri_cds_ticmd_fifo parity error + * BIT(26) - riti_cmd_fifo parity error + * BIT(27) - ri_int_msixtbl parity error + * BIT(28) - ri_int_imstbl parity error + * BIT(30) - ri_kpt_fuses parity error + */ +#define ADF_GEN4_RIMEM_PARERR_STS_UNCERR_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(5) | \ + BIT(7) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | \ + BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | \ + BIT(20) | BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25) | \ + BIT(26) | BIT(27) | BIT(28) | BIT(30)) + +#define ADF_GEN4_RIMEM_PARERR_STS_FATAL_BITMASK \ + (BIT(4) | BIT(6) | BIT(8) | BIT(9)) + +/* TI CI parity status */ +#define ADF_GEN4_TI_CI_PAR_STS 0x50060C + +/* TI CI parity reporting mask */ +#define ADF_GEN4_TI_CI_PAR_ERR_MASK 0x500608 + +/* + * TI CI parity status mask + * BIT(0) - CdCmdQ_sts patiry error status + * BIT(1) - CdDataQ_sts parity error status + * BIT(3) - CPP_SkidQ_sts parity error status + * BIT(7) - CPP_SkidQ_sc_sts parity error status + */ +#define ADF_GEN4_TI_CI_PAR_STS_BITMASK \ + (BIT(0) | BIT(1) | BIT(3) | BIT(7)) + +/* TI PULLFUB parity status */ +#define ADF_GEN4_TI_PULL0FUB_PAR_STS 0x500618 + +/* TI PULLFUB parity error reporting mask */ +#define ADF_GEN4_TI_PULL0FUB_PAR_ERR_MASK 0x500614 + +/* + * TI PULLFUB parity status mask + * BIT(0) - TrnPullReqQ_sts parity status + * BIT(1) - TrnSharedDataQ_sts parity status + * BIT(2) - TrnPullReqDataQ_sts parity status + * BIT(4) - CPP_CiPullReqQ_sts parity status + * BIT(5) - CPP_TrnPullReqQ_sts parity status + * BIT(6) - CPP_PullidQ_sts parity status + * BIT(7) - CPP_WaitDataQ_sts parity status + * BIT(8) - CPP_CdDataQ_sts parity status + * BIT(9) - CPP_TrnDataQP0_sts parity status + * BIT(10) - BIT(11) - CPP_TrnDataQRF[00:01]_sts parity status + * BIT(12) - CPP_TrnDataQP1_sts parity status + * BIT(13) - BIT(14) - CPP_TrnDataQRF[10:11]_sts parity status + */ +#define ADF_GEN4_TI_PULL0FUB_PAR_STS_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | \ + BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14)) + +/* TI PUSHUB parity status */ +#define ADF_GEN4_TI_PUSHFUB_PAR_STS 0x500630 + +/* TI PUSHFUB parity error reporting mask */ +#define ADF_GEN4_TI_PUSHFUB_PAR_ERR_MASK 0x50062C + +/* + * TI PUSHUB parity status mask + * BIT(0) - SbPushReqQ_sts parity status + * BIT(1) - BIT(2) - SbPushDataQ[0:1]_sts parity status + * BIT(4) - CPP_CdPushReqQ_sts parity status + * BIT(5) - BIT(6) - CPP_CdPushDataQ[0:1]_sts parity status + * BIT(7) - CPP_SbPushReqQ_sts parity status + * BIT(8) - CPP_SbPushDataQP_sts parity status + * BIT(9) - BIT(10) - CPP_SbPushDataQRF[0:1]_sts parity status + */ +#define ADF_GEN4_TI_PUSHFUB_PAR_STS_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(4) | BIT(5) | \ + BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10)) + +/* TI CD parity status */ +#define ADF_GEN4_TI_CD_PAR_STS 0x50063C + +/* TI CD parity error mask */ +#define ADF_GEN4_TI_CD_PAR_ERR_MASK 0x500638 + +/* + * TI CD parity status mask + * BIT(0) - BIT(15) - CtxMdRam[0:15]_sts parity status + * BIT(16) - Leaf2ClusterRam_sts parity status + * BIT(17) - BIT(18) - Ring2LeafRam[0:1]_sts parity status + * BIT(19) - VirtualQ_sts parity status + * BIT(20) - DtRdQ_sts parity status + * BIT(21) - DtWrQ_sts parity status + * BIT(22) - RiCmdQ_sts parity status + * BIT(23) - BypassQ_sts parity status + * BIT(24) - DtRdQ_sc_sts parity status + * BIT(25) - DtWrQ_sc_sts parity status + */ +#define ADF_GEN4_TI_CD_PAR_STS_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | \ + BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | \ + BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | \ + BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25)) + +/* TI TRNSB parity status */ +#define ADF_GEN4_TI_TRNSB_PAR_STS 0x500648 + +/* TI TRNSB Parity error reporting mask */ +#define ADF_GEN4_TI_TRNSB_PAR_ERR_MASK 0x500644 + +/* + * TI TRNSB parity status mask + * BIT(0) - TrnPHdrQP_sts parity status + * BIT(1) - TrnPHdrQRF_sts parity status + * BIT(2) - TrnPDataQP_sts parity status + * BIT(3) - BIT(6) - TrnPDataQRF[0:3]_sts parity status + * BIT(7) - TrnNpHdrQP_sts parity status + * BIT(8) - BIT(9) - TrnNpHdrQRF[0:1]_sts parity status + * BIT(10) - TrnCplHdrQ_sts parity status + * BIT(11) - TrnPutObsReqQ_sts parity status + * BIT(12) - TrnPushReqQ_sts parity status + * BIT(13) - SbSplitIdRam_sts parity status + * BIT(14) - SbReqCountQ_sts parity status + * BIT(15) - SbCplTrkRam_sts parity status + * BIT(16) - SbGetObsReqQ_sts parity status + * BIT(17) - SbEpochIdQ_sts parity status + * BIT(18) - SbAtCplHdrQ_sts parity status + * BIT(19) - SbAtCplDataQ_sts parity status + * BIT(20) - SbReqCountRam_sts parity status + * BIT(21) - SbAtCplHdrQ_sc_sts parity status + */ +#define ADF_GEN4_TI_TRNSB_PAR_STS_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | \ + BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | \ + BIT(13) | BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | \ + BIT(19) | BIT(20) | BIT(21)) + +/* Status register to log misc error on RI */ +#define ADF_GEN4_RIMISCSTS 0x41B1B8 + +/* Status control register to log misc RI error */ +#define ADF_GEN4_RIMISCCTL 0x41B1BC + +/* Command Parity error detected on IOSFP Command to QAT */ +#define ADF_GEN4_RIMISCSTS_BIT BIT(0) void adf_gen4_init_ras_ops(struct adf_ras_ops *ras_ops); -- Gitee From d03b4ee65ccec8d2dd0f51f073cfd73d5f25ebe3 Mon Sep 17 00:00:00 2001 From: Shashank Gupta Date: Fri, 20 Oct 2023 11:32:48 +0100 Subject: [PATCH 0706/2138] crypto: qat - add handling of errors from ERRSOU2 for QAT GEN4 ANBZ: #8589 commit 895f7d532c843f49e0b6dc8341bb911b26da4731 upstream. Intel-SIG: commit 895f7d532c84 crypto: qat - add handling of errors from ERRSOU2 for QAT GEN4 Backport to support Intel QAT in-tree driver Add logic to detect, report and handle uncorrectable errors reported through the ERRSOU2 register in QAT GEN4 devices. Signed-off-by: Shashank Gupta Reviewed-by: Giovanni Cabiddu Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 5 + .../intel/qat/qat_4xxx/adf_4xxx_hw_data.h | 15 + .../intel/qat/qat_common/adf_accel_devices.h | 6 + .../intel/qat/qat_common/adf_gen4_ras.c | 709 ++++++++++++++++++ .../intel/qat/qat_common/adf_gen4_ras.h | 320 ++++++++ 5 files changed, 1055 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index 944ca89a7717..4775df841982 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -508,6 +508,11 @@ static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num) static void adf_gen4_set_err_mask(struct adf_dev_err_mask *dev_err_mask) { dev_err_mask->cppagentcmdpar_mask = ADF_4XXX_HICPPAGENTCMDPARERRLOG_MASK; + dev_err_mask->parerr_ath_cph_mask = ADF_4XXX_PARITYERRORMASK_ATH_CPH_MASK; + dev_err_mask->parerr_cpr_xlt_mask = ADF_4XXX_PARITYERRORMASK_CPR_XLT_MASK; + dev_err_mask->parerr_dcpr_ucs_mask = ADF_4XXX_PARITYERRORMASK_DCPR_UCS_MASK; + dev_err_mask->parerr_pke_mask = ADF_4XXX_PARITYERRORMASK_PKE_MASK; + dev_err_mask->ssmfeatren_mask = ADF_4XXX_SSMFEATREN_MASK; } void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h index 7695b4e7277e..efd5dadc19ed 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h @@ -29,6 +29,21 @@ #define ADF_4XXX_ADMIN_AE_MASK (0x100) #define ADF_4XXX_HICPPAGENTCMDPARERRLOG_MASK 0x1F +#define ADF_4XXX_PARITYERRORMASK_ATH_CPH_MASK 0xF000F +#define ADF_4XXX_PARITYERRORMASK_CPR_XLT_MASK 0x10001 +#define ADF_4XXX_PARITYERRORMASK_DCPR_UCS_MASK 0x30007 +#define ADF_4XXX_PARITYERRORMASK_PKE_MASK 0x3F + +/* + * SSMFEATREN bit mask + * BIT(4) - enables parity detection on CPP + * BIT(12) - enables the logging of push/pull data errors + * in pperr register + * BIT(16) - BIT(23) - enable parity detection on SPPs + */ +#define ADF_4XXX_SSMFEATREN_MASK \ + (BIT(4) | BIT(12) | BIT(16) | BIT(17) | BIT(18) | \ + BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23)) #define ADF_4XXX_ETR_MAX_BANKS 64 diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index c173873b3e2b..c8492d792c0e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -178,6 +178,12 @@ struct adf_dc_ops { struct adf_dev_err_mask { u32 cppagentcmdpar_mask; + u32 parerr_ath_cph_mask; + u32 parerr_cpr_xlt_mask; + u32 parerr_dcpr_ucs_mask; + u32 parerr_pke_mask; + u32 parerr_wat_wcp_mask; + u32 ssmfeatren_mask; }; struct adf_hw_device_data { diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c index 59ae5a574091..877abed683d8 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c @@ -11,15 +11,30 @@ static void enable_errsou_reporting(void __iomem *csr) /* Enable uncorrectable error reporting in ERRSOU1 */ ADF_CSR_WR(csr, ADF_GEN4_ERRMSK1, 0); + + /* + * Enable uncorrectable error reporting in ERRSOU2 + * but disable PM interrupt and CFC attention interrupt by default + */ + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK2, + ADF_GEN4_ERRSOU2_PM_INT_BIT | + ADF_GEN4_ERRSOU2_CPP_CFC_ATT_INT_BITMASK); } static void disable_errsou_reporting(void __iomem *csr) { + u32 val = 0; + /* Disable correctable error reporting in ERRSOU0 */ ADF_CSR_WR(csr, ADF_GEN4_ERRMSK0, ADF_GEN4_ERRSOU0_BIT); /* Disable uncorrectable error reporting in ERRSOU1 */ ADF_CSR_WR(csr, ADF_GEN4_ERRMSK1, ADF_GEN4_ERRSOU1_BITMASK); + + /* Disable uncorrectable error reporting in ERRSOU2 */ + val = ADF_CSR_RD(csr, ADF_GEN4_ERRMSK2); + val |= ADF_GEN4_ERRSOU2_DIS_BITMASK; + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK2, val); } static void enable_ae_error_reporting(struct adf_accel_dev *accel_dev, @@ -51,12 +66,18 @@ static void enable_cpp_error_reporting(struct adf_accel_dev *accel_dev, /* Enable HI CPP Agents Command Parity Error Reporting */ ADF_CSR_WR(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOGENABLE, err_mask->cppagentcmdpar_mask); + + ADF_CSR_WR(csr, ADF_GEN4_CPP_CFC_ERR_CTRL, + ADF_GEN4_CPP_CFC_ERR_CTRL_BITMASK); } static void disable_cpp_error_reporting(void __iomem *csr) { /* Disable HI CPP Agents Command Parity Error Reporting */ ADF_CSR_WR(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOGENABLE, 0); + + ADF_CSR_WR(csr, ADF_GEN4_CPP_CFC_ERR_CTRL, + ADF_GEN4_CPP_CFC_ERR_CTRL_DIS_BITMASK); } static void enable_ti_ri_error_reporting(void __iomem *csr) @@ -98,6 +119,138 @@ static void disable_ti_ri_error_reporting(void __iomem *csr) ADF_GEN4_TI_TRNSB_PAR_STS_BITMASK); } +static void enable_rf_error_reporting(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + + /* Enable RF parity error in Shared RAM */ + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_SRC, 0); + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_ATH_CPH, 0); + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_CPR_XLT, 0); + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_DCPR_UCS, 0); + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_PKE, 0); + + if (err_mask->parerr_wat_wcp_mask) + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_WAT_WCP, 0); +} + +static void disable_rf_error_reporting(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + + /* Disable RF Parity Error reporting in Shared RAM */ + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_SRC, + ADF_GEN4_SSMSOFTERRORPARITY_SRC_BIT); + + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_ATH_CPH, + err_mask->parerr_ath_cph_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_CPR_XLT, + err_mask->parerr_cpr_xlt_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_DCPR_UCS, + err_mask->parerr_dcpr_ucs_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_PKE, + err_mask->parerr_pke_mask); + + if (err_mask->parerr_wat_wcp_mask) + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_WAT_WCP, + err_mask->parerr_wat_wcp_mask); +} + +static void enable_ssm_error_reporting(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + u32 val = 0; + + /* Enable SSM interrupts */ + ADF_CSR_WR(csr, ADF_GEN4_INTMASKSSM, 0); + + /* Enable shared memory error detection & correction */ + val = ADF_CSR_RD(csr, ADF_GEN4_SSMFEATREN); + val |= err_mask->ssmfeatren_mask; + ADF_CSR_WR(csr, ADF_GEN4_SSMFEATREN, val); + + /* Enable SER detection in SER_err_ssmsh register */ + ADF_CSR_WR(csr, ADF_GEN4_SER_EN_SSMSH, + ADF_GEN4_SER_EN_SSMSH_BITMASK); + + /* Enable SSM soft parity error */ + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_ATH_CPH, 0); + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_CPR_XLT, 0); + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_DCPR_UCS, 0); + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_PKE, 0); + + if (err_mask->parerr_wat_wcp_mask) + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_WAT_WCP, 0); + + /* Enable slice hang interrupt reporting */ + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_ATH_CPH, 0); + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_CPR_XLT, 0); + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_DCPR_UCS, 0); + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_PKE, 0); + + if (err_mask->parerr_wat_wcp_mask) + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_WAT_WCP, 0); +} + +static void disable_ssm_error_reporting(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + u32 val = 0; + + /* Disable SSM interrupts */ + ADF_CSR_WR(csr, ADF_GEN4_INTMASKSSM, + ADF_GEN4_INTMASKSSM_BITMASK); + + /* Disable shared memory error detection & correction */ + val = ADF_CSR_RD(csr, ADF_GEN4_SSMFEATREN); + val &= ADF_GEN4_SSMFEATREN_DIS_BITMASK; + ADF_CSR_WR(csr, ADF_GEN4_SSMFEATREN, val); + + /* Disable SER detection in SER_err_ssmsh register */ + ADF_CSR_WR(csr, ADF_GEN4_SER_EN_SSMSH, 0); + + /* Disable SSM soft parity error */ + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_ATH_CPH, + err_mask->parerr_ath_cph_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_CPR_XLT, + err_mask->parerr_cpr_xlt_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_DCPR_UCS, + err_mask->parerr_dcpr_ucs_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_PKE, + err_mask->parerr_pke_mask); + + if (err_mask->parerr_wat_wcp_mask) + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_WAT_WCP, + err_mask->parerr_wat_wcp_mask); + + /* Disable slice hang interrupt reporting */ + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_ATH_CPH, + err_mask->parerr_ath_cph_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_CPR_XLT, + err_mask->parerr_cpr_xlt_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_DCPR_UCS, + err_mask->parerr_dcpr_ucs_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_PKE, + err_mask->parerr_pke_mask); + + if (err_mask->parerr_wat_wcp_mask) + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_WAT_WCP, + err_mask->parerr_wat_wcp_mask); +} + static void adf_gen4_enable_ras(struct adf_accel_dev *accel_dev) { void __iomem *csr = adf_get_pmisc_base(accel_dev); @@ -106,6 +259,8 @@ static void adf_gen4_enable_ras(struct adf_accel_dev *accel_dev) enable_ae_error_reporting(accel_dev, csr); enable_cpp_error_reporting(accel_dev, csr); enable_ti_ri_error_reporting(csr); + enable_rf_error_reporting(accel_dev, csr); + enable_ssm_error_reporting(accel_dev, csr); } static void adf_gen4_disable_ras(struct adf_accel_dev *accel_dev) @@ -116,6 +271,8 @@ static void adf_gen4_disable_ras(struct adf_accel_dev *accel_dev) disable_ae_error_reporting(csr); disable_cpp_error_reporting(csr); disable_ti_ri_error_reporting(csr); + disable_rf_error_reporting(accel_dev, csr); + disable_ssm_error_reporting(accel_dev, csr); } static void adf_gen4_process_errsou0(struct adf_accel_dev *accel_dev, @@ -345,6 +502,552 @@ static void adf_gen4_process_errsou1(struct adf_accel_dev *accel_dev, *reset_required |= adf_handle_iosfp_cmd_parerr(accel_dev, csr, errsou); } +static bool adf_handle_uerrssmsh(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 iastatssm) +{ + u32 reg; + + if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_UERRSSMSH_BIT)) + return false; + + reg = ADF_CSR_RD(csr, ADF_GEN4_UERRSSMSH); + reg &= ADF_GEN4_UERRSSMSH_BITMASK; + + dev_err(&GET_DEV(accel_dev), + "Uncorrectable error on ssm shared memory: 0x%x\n", + reg); + + ADF_CSR_WR(csr, ADF_GEN4_UERRSSMSH, reg); + + return false; +} + +static bool adf_handle_cerrssmsh(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 iastatssm) +{ + u32 reg; + + if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_CERRSSMSH_BIT)) + return false; + + reg = ADF_CSR_RD(csr, ADF_GEN4_CERRSSMSH); + reg &= ADF_GEN4_CERRSSMSH_ERROR_BIT; + + dev_warn(&GET_DEV(accel_dev), + "Correctable error on ssm shared memory: 0x%x\n", + reg); + + ADF_CSR_WR(csr, ADF_GEN4_CERRSSMSH, reg); + + return false; +} + +static bool adf_handle_pperr_err(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 iastatssm) +{ + u32 reg; + + if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_PPERR_BIT)) + return false; + + reg = ADF_CSR_RD(csr, ADF_GEN4_PPERR); + reg &= ADF_GEN4_PPERR_BITMASK; + + dev_err(&GET_DEV(accel_dev), + "Uncorrectable error CPP transaction on memory target: 0x%x\n", + reg); + + ADF_CSR_WR(csr, ADF_GEN4_PPERR, reg); + + return false; +} + +static void adf_poll_slicehang_csr(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 slice_hang_offset, + char *slice_name) +{ + u32 slice_hang_reg = ADF_CSR_RD(csr, slice_hang_offset); + + if (!slice_hang_reg) + return; + + dev_err(&GET_DEV(accel_dev), + "Slice %s hang error encountered\n", slice_name); +} + +static bool adf_handle_slice_hang_error(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 iastatssm) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + + if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SLICEHANG_ERR_BIT)) + return false; + + adf_poll_slicehang_csr(accel_dev, csr, + ADF_GEN4_SLICEHANGSTATUS_ATH_CPH, "ath_cph"); + adf_poll_slicehang_csr(accel_dev, csr, + ADF_GEN4_SLICEHANGSTATUS_CPR_XLT, "cpr_xlt"); + adf_poll_slicehang_csr(accel_dev, csr, + ADF_GEN4_SLICEHANGSTATUS_DCPR_UCS, "dcpr_ucs"); + adf_poll_slicehang_csr(accel_dev, csr, + ADF_GEN4_SLICEHANGSTATUS_PKE, "pke"); + + if (err_mask->parerr_wat_wcp_mask) + adf_poll_slicehang_csr(accel_dev, csr, + ADF_GEN4_SLICEHANGSTATUS_WAT_WCP, + "ath_cph"); + + return false; +} + +static bool adf_handle_spp_pullcmd_err(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + bool reset_required = false; + u32 reg; + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_ATH_CPH); + reg &= err_mask->parerr_ath_cph_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull command fatal error ATH_CPH: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_ATH_CPH, reg); + + reset_required = true; + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_CPR_XLT); + reg &= err_mask->parerr_cpr_xlt_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull command fatal error CPR_XLT: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_CPR_XLT, reg); + + reset_required = true; + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_DCPR_UCS); + reg &= err_mask->parerr_dcpr_ucs_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull command fatal error DCPR_UCS: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_DCPR_UCS, reg); + + reset_required = true; + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_PKE); + reg &= err_mask->parerr_pke_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull command fatal error PKE: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_PKE, reg); + + reset_required = true; + } + + if (err_mask->parerr_wat_wcp_mask) { + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_WAT_WCP); + reg &= err_mask->parerr_wat_wcp_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull command fatal error WAT_WCP: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_WAT_WCP, reg); + + reset_required = true; + } + } + + return reset_required; +} + +static bool adf_handle_spp_pulldata_err(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + u32 reg; + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_ATH_CPH); + reg &= err_mask->parerr_ath_cph_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull data err ATH_CPH: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_ATH_CPH, reg); + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_CPR_XLT); + reg &= err_mask->parerr_cpr_xlt_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull data err CPR_XLT: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_CPR_XLT, reg); + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_DCPR_UCS); + reg &= err_mask->parerr_dcpr_ucs_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull data err DCPR_UCS: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_DCPR_UCS, reg); + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_PKE); + reg &= err_mask->parerr_pke_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull data err PKE: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_PKE, reg); + } + + if (err_mask->parerr_wat_wcp_mask) { + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_WAT_WCP); + reg &= err_mask->parerr_wat_wcp_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull data err WAT_WCP: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_WAT_WCP, reg); + } + } + + return false; +} + +static bool adf_handle_spp_pushcmd_err(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + bool reset_required = false; + u32 reg; + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_ATH_CPH); + reg &= err_mask->parerr_ath_cph_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push command fatal error ATH_CPH: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_ATH_CPH, reg); + + reset_required = true; + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_CPR_XLT); + reg &= err_mask->parerr_cpr_xlt_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push command fatal error CPR_XLT: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_CPR_XLT, reg); + + reset_required = true; + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_DCPR_UCS); + reg &= err_mask->parerr_dcpr_ucs_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push command fatal error DCPR_UCS: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_DCPR_UCS, reg); + + reset_required = true; + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_PKE); + reg &= err_mask->parerr_pke_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push command fatal error PKE: 0x%x\n", + reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_PKE, reg); + + reset_required = true; + } + + if (err_mask->parerr_wat_wcp_mask) { + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_WAT_WCP); + reg &= err_mask->parerr_wat_wcp_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push command fatal error WAT_WCP: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_WAT_WCP, reg); + + reset_required = true; + } + } + + return reset_required; +} + +static bool adf_handle_spp_pushdata_err(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + u32 reg; + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_ATH_CPH); + reg &= err_mask->parerr_ath_cph_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push data err ATH_CPH: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_ATH_CPH, reg); + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_CPR_XLT); + reg &= err_mask->parerr_cpr_xlt_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push data err CPR_XLT: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_CPR_XLT, reg); + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_DCPR_UCS); + reg &= err_mask->parerr_dcpr_ucs_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push data err DCPR_UCS: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_DCPR_UCS, reg); + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_PKE); + reg &= err_mask->parerr_pke_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push data err PKE: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_PKE, reg); + } + + if (err_mask->parerr_wat_wcp_mask) { + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_WAT_WCP); + reg &= err_mask->parerr_wat_wcp_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push data err WAT_WCP: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_WAT_WCP, + reg); + } + } + + return false; +} + +static bool adf_handle_spppar_err(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 iastatssm) +{ + bool reset_required; + + if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SPPPARERR_BIT)) + return false; + + reset_required = adf_handle_spp_pullcmd_err(accel_dev, csr); + reset_required |= adf_handle_spp_pulldata_err(accel_dev, csr); + reset_required |= adf_handle_spp_pushcmd_err(accel_dev, csr); + reset_required |= adf_handle_spp_pushdata_err(accel_dev, csr); + + return reset_required; +} + +static bool adf_handle_ssmcpppar_err(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 iastatssm) +{ + bool reset_required = false; + u32 reg; + + if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SSMCPPERR_BIT)) + return false; + + reg = ADF_CSR_RD(csr, ADF_GEN4_SSMCPPERR); + reg &= ADF_GEN4_SSMCPPERR_FATAL_BITMASK | ADF_GEN4_SSMCPPERR_UNCERR_BITMASK; + if (reg & ADF_GEN4_SSMCPPERR_FATAL_BITMASK) { + dev_err(&GET_DEV(accel_dev), + "Fatal SSM CPP parity error: 0x%x\n", reg); + + reset_required = true; + } + + if (reg & ADF_GEN4_SSMCPPERR_UNCERR_BITMASK) + dev_err(&GET_DEV(accel_dev), + "non-Fatal SSM CPP parity error: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SSMCPPERR, reg); + + return reset_required; +} + +static bool adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 iastatssm) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + u32 reg; + + if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SSMSOFTERRORPARITY_BIT)) + return false; + + reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC); + reg &= ADF_GEN4_SSMSOFTERRORPARITY_SRC_BIT; + if (reg) + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC, reg); + + reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH); + reg &= err_mask->parerr_ath_cph_mask; + if (reg) + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH, reg); + + reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT); + reg &= err_mask->parerr_cpr_xlt_mask; + if (reg) + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT, reg); + + reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS); + reg &= err_mask->parerr_dcpr_ucs_mask; + if (reg) + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS, reg); + + reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE); + reg &= err_mask->parerr_pke_mask; + if (reg) + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE, reg); + + if (err_mask->parerr_wat_wcp_mask) { + reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP); + reg &= err_mask->parerr_wat_wcp_mask; + if (reg) + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP, + reg); + } + + dev_err(&GET_DEV(accel_dev), "Slice ssm soft parity error reported"); + + return false; +} + +static bool adf_handle_ser_err_ssmsh(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 iastatssm) +{ + bool reset_required = false; + u32 reg; + + if (!(iastatssm & (ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_CERR_BIT | + ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_UNCERR_BIT))) + return false; + + reg = ADF_CSR_RD(csr, ADF_GEN4_SER_ERR_SSMSH); + reg &= ADF_GEN4_SER_ERR_SSMSH_FATAL_BITMASK | + ADF_GEN4_SER_ERR_SSMSH_UNCERR_BITMASK | + ADF_GEN4_SER_ERR_SSMSH_CERR_BITMASK; + if (reg & ADF_GEN4_SER_ERR_SSMSH_FATAL_BITMASK) { + dev_err(&GET_DEV(accel_dev), + "Fatal SER_SSMSH_ERR: 0x%x\n", reg); + + reset_required = true; + } + + if (reg & ADF_GEN4_SER_ERR_SSMSH_UNCERR_BITMASK) + dev_err(&GET_DEV(accel_dev), + "non-fatal SER_SSMSH_ERR: 0x%x\n", reg); + + if (reg & ADF_GEN4_SER_ERR_SSMSH_CERR_BITMASK) + dev_warn(&GET_DEV(accel_dev), + "Correctable SER_SSMSH_ERR: 0x%x\n", reg); + + ADF_CSR_WR(csr, ADF_GEN4_SER_ERR_SSMSH, reg); + + return reset_required; +} + +static bool adf_handle_iaintstatssm(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + u32 iastatssm = ADF_CSR_RD(csr, ADF_GEN4_IAINTSTATSSM); + bool reset_required; + + iastatssm &= ADF_GEN4_IAINTSTATSSM_BITMASK; + if (!iastatssm) + return false; + + reset_required = adf_handle_uerrssmsh(accel_dev, csr, iastatssm); + reset_required |= adf_handle_cerrssmsh(accel_dev, csr, iastatssm); + reset_required |= adf_handle_pperr_err(accel_dev, csr, iastatssm); + reset_required |= adf_handle_slice_hang_error(accel_dev, csr, iastatssm); + reset_required |= adf_handle_spppar_err(accel_dev, csr, iastatssm); + reset_required |= adf_handle_ssmcpppar_err(accel_dev, csr, iastatssm); + reset_required |= adf_handle_rf_parr_err(accel_dev, csr, iastatssm); + reset_required |= adf_handle_ser_err_ssmsh(accel_dev, csr, iastatssm); + + ADF_CSR_WR(csr, ADF_GEN4_IAINTSTATSSM, iastatssm); + + return reset_required; +} + +static bool adf_handle_ssm(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + if (!(errsou & ADF_GEN4_ERRSOU2_SSM_ERR_BIT)) + return false; + + return adf_handle_iaintstatssm(accel_dev, csr); +} + +static bool adf_handle_cpp_cfc_err(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + bool reset_required = false; + u32 reg; + + if (!(errsou & ADF_GEN4_ERRSOU2_CPP_CFC_ERR_STATUS_BIT)) + return false; + + reg = ADF_CSR_RD(csr, ADF_GEN4_CPP_CFC_ERR_STATUS); + if (reg & ADF_GEN4_CPP_CFC_ERR_STATUS_DATAPAR_BIT) { + dev_err(&GET_DEV(accel_dev), + "CPP_CFC_ERR: data parity: 0x%x", reg); + } + + if (reg & ADF_GEN4_CPP_CFC_ERR_STATUS_CMDPAR_BIT) { + dev_err(&GET_DEV(accel_dev), + "CPP_CFC_ERR: command parity: 0x%x", reg); + + reset_required = true; + } + + if (reg & ADF_GEN4_CPP_CFC_ERR_STATUS_MERR_BIT) { + dev_err(&GET_DEV(accel_dev), + "CPP_CFC_ERR: multiple errors: 0x%x", reg); + + reset_required = true; + } + + ADF_CSR_WR(csr, ADF_GEN4_CPP_CFC_ERR_STATUS_CLR, + ADF_GEN4_CPP_CFC_ERR_STATUS_CLR_BITMASK); + + return reset_required; +} + +static void adf_gen4_process_errsou2(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou, + bool *reset_required) +{ + *reset_required |= adf_handle_ssm(accel_dev, csr, errsou); + *reset_required |= adf_handle_cpp_cfc_err(accel_dev, csr, errsou); +} + static bool adf_gen4_handle_interrupt(struct adf_accel_dev *accel_dev, bool *reset_required) { @@ -365,6 +1068,12 @@ static bool adf_gen4_handle_interrupt(struct adf_accel_dev *accel_dev, handled = true; } + errsou = ADF_CSR_RD(csr, ADF_GEN4_ERRSOU2); + if (errsou & ADF_GEN4_ERRSOU2_BITMASK) { + adf_gen4_process_errsou2(accel_dev, csr, errsou, reset_required); + handled = true; + } + return handled; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h index 67a85cc74a44..65c1b7925444 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h @@ -203,6 +203,326 @@ struct adf_ras_ops; /* Status control register to log misc RI error */ #define ADF_GEN4_RIMISCCTL 0x41B1BC +/* + * ERRSOU2 bit mask + * BIT(0) - SSM Interrupt Mask + * BIT(1) - CFC on CPP. ORed of CFC Push error and Pull error + * BIT(2) - BIT(4) - CPP attention interrupts, deprecated on gen4 devices + * BIT(18) - PM interrupt + */ +#define ADF_GEN4_ERRSOU2_SSM_ERR_BIT BIT(0) +#define ADF_GEN4_ERRSOU2_CPP_CFC_ERR_STATUS_BIT BIT(1) +#define ADF_GEN4_ERRSOU2_CPP_CFC_ATT_INT_BITMASK \ + (BIT(2) | BIT(3) | BIT(4)) + +#define ADF_GEN4_ERRSOU2_PM_INT_BIT BIT(18) + +#define ADF_GEN4_ERRSOU2_BITMASK \ + (ADF_GEN4_ERRSOU2_SSM_ERR_BIT | \ + ADF_GEN4_ERRSOU2_CPP_CFC_ERR_STATUS_BIT) + +#define ADF_GEN4_ERRSOU2_DIS_BITMASK \ + (ADF_GEN4_ERRSOU2_SSM_ERR_BIT | \ + ADF_GEN4_ERRSOU2_CPP_CFC_ERR_STATUS_BIT | \ + ADF_GEN4_ERRSOU2_CPP_CFC_ATT_INT_BITMASK) + +#define ADF_GEN4_IAINTSTATSSM 0x28 + +/* IAINTSTATSSM error bit mask definitions */ +#define ADF_GEN4_IAINTSTATSSM_UERRSSMSH_BIT BIT(0) +#define ADF_GEN4_IAINTSTATSSM_CERRSSMSH_BIT BIT(1) +#define ADF_GEN4_IAINTSTATSSM_PPERR_BIT BIT(2) +#define ADF_GEN4_IAINTSTATSSM_SLICEHANG_ERR_BIT BIT(3) +#define ADF_GEN4_IAINTSTATSSM_SPPPARERR_BIT BIT(4) +#define ADF_GEN4_IAINTSTATSSM_SSMCPPERR_BIT BIT(5) +#define ADF_GEN4_IAINTSTATSSM_SSMSOFTERRORPARITY_BIT BIT(6) +#define ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_CERR_BIT BIT(7) +#define ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_UNCERR_BIT BIT(8) + +#define ADF_GEN4_IAINTSTATSSM_BITMASK \ + (ADF_GEN4_IAINTSTATSSM_UERRSSMSH_BIT | \ + ADF_GEN4_IAINTSTATSSM_CERRSSMSH_BIT | \ + ADF_GEN4_IAINTSTATSSM_PPERR_BIT | \ + ADF_GEN4_IAINTSTATSSM_SLICEHANG_ERR_BIT | \ + ADF_GEN4_IAINTSTATSSM_SPPPARERR_BIT | \ + ADF_GEN4_IAINTSTATSSM_SSMCPPERR_BIT | \ + ADF_GEN4_IAINTSTATSSM_SSMSOFTERRORPARITY_BIT | \ + ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_CERR_BIT | \ + ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_UNCERR_BIT) + +#define ADF_GEN4_UERRSSMSH 0x18 + +/* + * UERRSSMSH error bit masks definitions + * + * BIT(0) - Indicates one uncorrectable error + * BIT(15) - Indicates multiple uncorrectable errors + * in device shared memory + */ +#define ADF_GEN4_UERRSSMSH_BITMASK (BIT(0) | BIT(15)) + +#define ADF_GEN4_UERRSSMSHAD 0x1C + +#define ADF_GEN4_CERRSSMSH 0x10 + +/* + * CERRSSMSH error bit + * BIT(0) - Indicates one correctable error + */ +#define ADF_GEN4_CERRSSMSH_ERROR_BIT BIT(0) + +#define ADF_GEN4_CERRSSMSHAD 0x14 + +/* SSM error handling features enable register */ +#define ADF_GEN4_SSMFEATREN 0x198 + +/* + * Disable SSM error detection and reporting features + * enabled by device driver on RAS initialization + * + * following bits should be cleared : + * BIT(4) - Disable parity for CPP parity + * BIT(12) - Disable logging push/pull data error in pperr register. + * BIT(16) - BIT(23) - Disable parity for SPPs + * BIT(24) - BIT(27) - Disable parity for SPPs, if it's supported on the device. + */ +#define ADF_GEN4_SSMFEATREN_DIS_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(5) | BIT(6) | BIT(7) | \ + BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(13) | BIT(14) | BIT(15)) + +#define ADF_GEN4_INTMASKSSM 0x0 + +/* + * Error reporting mask in INTMASKSSM + * BIT(0) - Shared memory uncorrectable interrupt mask + * BIT(1) - Shared memory correctable interrupt mask + * BIT(2) - PPERR interrupt mask + * BIT(3) - CPP parity error Interrupt mask + * BIT(4) - SSM interrupt generated by SER correctable error mask + * BIT(5) - SSM interrupt generated by SER uncorrectable error + * - not stop and scream - mask + */ +#define ADF_GEN4_INTMASKSSM_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5)) + +/* CPP push or pull error */ +#define ADF_GEN4_PPERR 0x8 + +#define ADF_GEN4_PPERR_BITMASK (BIT(0) | BIT(1)) + +#define ADF_GEN4_PPERRID 0xC + +/* Slice hang handling related registers */ +#define ADF_GEN4_SLICEHANGSTATUS_ATH_CPH 0x84 +#define ADF_GEN4_SLICEHANGSTATUS_CPR_XLT 0x88 +#define ADF_GEN4_SLICEHANGSTATUS_DCPR_UCS 0x90 +#define ADF_GEN4_SLICEHANGSTATUS_WAT_WCP 0x8C +#define ADF_GEN4_SLICEHANGSTATUS_PKE 0x94 + +#define ADF_GEN4_SHINTMASKSSM_ATH_CPH 0xF0 +#define ADF_GEN4_SHINTMASKSSM_CPR_XLT 0xF4 +#define ADF_GEN4_SHINTMASKSSM_DCPR_UCS 0xFC +#define ADF_GEN4_SHINTMASKSSM_WAT_WCP 0xF8 +#define ADF_GEN4_SHINTMASKSSM_PKE 0x100 + +/* SPP pull cmd parity err_*slice* CSR */ +#define ADF_GEN4_SPPPULLCMDPARERR_ATH_CPH 0x1A4 +#define ADF_GEN4_SPPPULLCMDPARERR_CPR_XLT 0x1A8 +#define ADF_GEN4_SPPPULLCMDPARERR_DCPR_UCS 0x1B0 +#define ADF_GEN4_SPPPULLCMDPARERR_PKE 0x1B4 +#define ADF_GEN4_SPPPULLCMDPARERR_WAT_WCP 0x1AC + +/* SPP pull data parity err_*slice* CSR */ +#define ADF_GEN4_SPPPULLDATAPARERR_ATH_CPH 0x1BC +#define ADF_GEN4_SPPPULLDATAPARERR_CPR_XLT 0x1C0 +#define ADF_GEN4_SPPPULLDATAPARERR_DCPR_UCS 0x1C8 +#define ADF_GEN4_SPPPULLDATAPARERR_PKE 0x1CC +#define ADF_GEN4_SPPPULLDATAPARERR_WAT_WCP 0x1C4 + +/* SPP push cmd parity err_*slice* CSR */ +#define ADF_GEN4_SPPPUSHCMDPARERR_ATH_CPH 0x1D4 +#define ADF_GEN4_SPPPUSHCMDPARERR_CPR_XLT 0x1D8 +#define ADF_GEN4_SPPPUSHCMDPARERR_DCPR_UCS 0x1E0 +#define ADF_GEN4_SPPPUSHCMDPARERR_PKE 0x1E4 +#define ADF_GEN4_SPPPUSHCMDPARERR_WAT_WCP 0x1DC + +/* SPP push data parity err_*slice* CSR */ +#define ADF_GEN4_SPPPUSHDATAPARERR_ATH_CPH 0x1EC +#define ADF_GEN4_SPPPUSHDATAPARERR_CPR_XLT 0x1F0 +#define ADF_GEN4_SPPPUSHDATAPARERR_DCPR_UCS 0x1F8 +#define ADF_GEN4_SPPPUSHDATAPARERR_PKE 0x1FC +#define ADF_GEN4_SPPPUSHDATAPARERR_WAT_WCP 0x1F4 + +/* Accelerator SPP parity error mask registers */ +#define ADF_GEN4_SPPPARERRMSK_ATH_CPH 0x204 +#define ADF_GEN4_SPPPARERRMSK_CPR_XLT 0x208 +#define ADF_GEN4_SPPPARERRMSK_DCPR_UCS 0x210 +#define ADF_GEN4_SPPPARERRMSK_PKE 0x214 +#define ADF_GEN4_SPPPARERRMSK_WAT_WCP 0x20C + +#define ADF_GEN4_SSMCPPERR 0x224 + +/* + * Uncorrectable error mask in SSMCPPERR + * BIT(0) - indicates CPP command parity error + * BIT(1) - indicates CPP Main Push PPID parity error + * BIT(2) - indicates CPP Main ePPID parity error + * BIT(3) - indicates CPP Main push data parity error + * BIT(4) - indicates CPP Main Pull PPID parity error + * BIT(5) - indicates CPP target pull data parity error + */ +#define ADF_GEN4_SSMCPPERR_FATAL_BITMASK \ + (BIT(0) | BIT(1) | BIT(4)) + +#define ADF_GEN4_SSMCPPERR_UNCERR_BITMASK \ + (BIT(2) | BIT(3) | BIT(5)) + +#define ADF_GEN4_SSMSOFTERRORPARITY_SRC 0x9C +#define ADF_GEN4_SSMSOFTERRORPARITYMASK_SRC 0xB8 + +#define ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH 0xA0 +#define ADF_GEN4_SSMSOFTERRORPARITYMASK_ATH_CPH 0xBC + +#define ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT 0xA4 +#define ADF_GEN4_SSMSOFTERRORPARITYMASK_CPR_XLT 0xC0 + +#define ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS 0xAC +#define ADF_GEN4_SSMSOFTERRORPARITYMASK_DCPR_UCS 0xC8 + +#define ADF_GEN4_SSMSOFTERRORPARITY_PKE 0xB0 +#define ADF_GEN4_SSMSOFTERRORPARITYMASK_PKE 0xCC + +#define ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP 0xA8 +#define ADF_GEN4_SSMSOFTERRORPARITYMASK_WAT_WCP 0xC4 + +/* RF parity error detected in SharedRAM */ +#define ADF_GEN4_SSMSOFTERRORPARITY_SRC_BIT BIT(0) + +#define ADF_GEN4_SER_ERR_SSMSH 0x44C + +/* + * Fatal error mask in SER_ERR_SSMSH + * BIT(0) - Indicates an uncorrectable error has occurred in the + * accelerator controller command RFs + * BIT(2) - Parity error occurred in the bank SPP fifos + * BIT(3) - Indicates Parity error occurred in following fifos in + * the design + * BIT(4) - Parity error occurred in flops in the design + * BIT(5) - Uncorrectable error has occurred in the + * target push and pull data register flop + * BIT(7) - Indicates Parity error occurred in the Resource Manager + * pending lock request fifos + * BIT(8) - Indicates Parity error occurred in the Resource Manager + * MECTX command queues logic + * BIT(9) - Indicates Parity error occurred in the Resource Manager + * MECTX sigdone fifo flops + * BIT(10) - Indicates an uncorrectable error has occurred in the + * Resource Manager MECTX command RFs + * BIT(14) - Parity error occurred in Buffer Manager sigdone FIFO + */ + #define ADF_GEN4_SER_ERR_SSMSH_FATAL_BITMASK \ + (BIT(0) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(7) | \ + BIT(8) | BIT(9) | BIT(10) | BIT(14)) + +/* + * Uncorrectable error mask in SER_ERR_SSMSH + * BIT(12) Parity error occurred in Buffer Manager pool 0 + * BIT(13) Parity error occurred in Buffer Manager pool 1 + */ +#define ADF_GEN4_SER_ERR_SSMSH_UNCERR_BITMASK \ + (BIT(12) | BIT(13)) + +/* + * Correctable error mask in SER_ERR_SSMSH + * BIT(1) - Indicates a correctable Error has occurred + * in the slice controller command RFs + * BIT(6) - Indicates a correctable Error has occurred in + * the target push and pull data RFs + * BIT(11) - Indicates an correctable Error has occurred in + * the Resource Manager MECTX command RFs + */ +#define ADF_GEN4_SER_ERR_SSMSH_CERR_BITMASK \ + (BIT(1) | BIT(6) | BIT(11)) + +/* SSM shared memory SER error reporting mask */ +#define ADF_GEN4_SER_EN_SSMSH 0x450 + +/* + * SSM SER error reporting mask in SER_en_err_ssmsh + * BIT(0) - Enables uncorrectable Error detection in : + * 1) slice controller command RFs. + * 2) target push/pull data registers + * BIT(1) - Enables correctable Error detection in : + * 1) slice controller command RFs + * 2) target push/pull data registers + * BIT(2) - Enables Parity error detection in + * 1) bank SPP fifos + * 2) gen4_pull_id_queue + * 3) gen4_push_id_queue + * 4) AE_pull_sigdn_fifo + * 5) DT_push_sigdn_fifo + * 6) slx_push_sigdn_fifo + * 7) secure_push_cmd_fifo + * 8) secure_pull_cmd_fifo + * 9) Head register in FIFO wrapper + * 10) current_cmd in individual push queue + * 11) current_cmd in individual pull queue + * 12) push_command_rxp arbitrated in ssm_push_cmd_queues + * 13) pull_command_rxp arbitrated in ssm_pull_cmd_queues + * BIT(3) - Enables uncorrectable Error detection in + * the resource manager mectx cmd RFs. + * BIT(4) - Enables correctable error detection in the Resource Manager + * mectx command RFs + * BIT(5) - Enables Parity error detection in + * 1) resource manager lock request fifo + * 2) mectx cmdqueues logic + * 3) mectx sigdone fifo + * BIT(6) - Enables Parity error detection in Buffer Manager pools + * and sigdone fifo + */ +#define ADF_GEN4_SER_EN_SSMSH_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6)) + +#define ADF_GEN4_CPP_CFC_ERR_STATUS 0x640C04 + +/* + * BIT(1) - Indicates multiple CPP CFC errors + * BIT(7) - Indicates CPP CFC command parity error type + * BIT(8) - Indicated CPP CFC data parity error type + */ +#define ADF_GEN4_CPP_CFC_ERR_STATUS_MERR_BIT BIT(1) +#define ADF_GEN4_CPP_CFC_ERR_STATUS_CMDPAR_BIT BIT(7) +#define ADF_GEN4_CPP_CFC_ERR_STATUS_DATAPAR_BIT BIT(8) + +/* + * BIT(0) - Enables CFC to detect and log push/pull data error + * BIT(1) - Enables CFC to generate interrupt to PCIEP for CPP error + * BIT(4) - When 1 Parity detection is disabled + * BIT(5) - When 1 Parity detection is disabled on CPP command bus + * BIT(6) - When 1 Parity detection is disabled on CPP push/pull bus + * BIT(9) - When 1 RF parity error detection is disabled + */ +#define ADF_GEN4_CPP_CFC_ERR_CTRL_BITMASK (BIT(0) | BIT(1)) + +#define ADF_GEN4_CPP_CFC_ERR_CTRL_DIS_BITMASK \ + (BIT(4) | BIT(5) | BIT(6) | BIT(9) | BIT(10)) + +#define ADF_GEN4_CPP_CFC_ERR_CTRL 0x640C00 + +/* + * BIT(0) - Clears bit(0) of ADF_GEN4_CPP_CFC_ERR_STATUS + * when an error is reported on CPP + * BIT(1) - Clears bit(1) of ADF_GEN4_CPP_CFC_ERR_STATUS + * when multiple errors are reported on CPP + * BIT(2) - Clears bit(2) of ADF_GEN4_CPP_CFC_ERR_STATUS + * when attention interrupt is reported + */ +#define ADF_GEN4_CPP_CFC_ERR_STATUS_CLR_BITMASK (BIT(0) | BIT(1) | BIT(2)) +#define ADF_GEN4_CPP_CFC_ERR_STATUS_CLR 0x640C08 + +#define ADF_GEN4_CPP_CFC_ERR_PPID_LO 0x640C0C +#define ADF_GEN4_CPP_CFC_ERR_PPID_HI 0x640C10 + /* Command Parity error detected on IOSFP Command to QAT */ #define ADF_GEN4_RIMISCSTS_BIT BIT(0) -- Gitee From 72eb0b225946315dce383dfa5f7d1614a18cb6c8 Mon Sep 17 00:00:00 2001 From: Shashank Gupta Date: Fri, 20 Oct 2023 11:32:49 +0100 Subject: [PATCH 0707/2138] crypto: qat - add handling of compression related errors for QAT GEN4 ANBZ: #8589 commit b67bf7babe36c6c15623ec22ed13ec9069a6cf37 upstream. Intel-SIG: commit b67bf7babe36 crypto: qat - add handling of compression related errors for QAT GEN4 Backport to support Intel QAT in-tree driver Add logic to detect, report and handle correctable and uncorrectable errors related to the compression hardware. These are detected through the EXPRPSSMXLT, EXPRPSSMCPR and EXPRPSSMDCPR registers. Signed-off-by: Shashank Gupta Reviewed-by: Giovanni Cabiddu Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../intel/qat/qat_common/adf_gen4_ras.c | 76 ++++++++++++++++++- .../intel/qat/qat_common/adf_gen4_ras.h | 76 +++++++++++++++++++ 2 files changed, 151 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c index 877abed683d8..285b755e13be 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c @@ -996,13 +996,87 @@ static bool adf_handle_iaintstatssm(struct adf_accel_dev *accel_dev, return reset_required; } +static bool adf_handle_exprpssmcmpr(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + u32 reg = ADF_CSR_RD(csr, ADF_GEN4_EXPRPSSMCPR); + + reg &= ADF_GEN4_EXPRPSSMCPR_UNCERR_BITMASK; + if (!reg) + return false; + + dev_err(&GET_DEV(accel_dev), + "Uncorrectable error exception in SSM CMP: 0x%x", reg); + + ADF_CSR_WR(csr, ADF_GEN4_EXPRPSSMCPR, reg); + + return false; +} + +static bool adf_handle_exprpssmxlt(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + u32 reg = ADF_CSR_RD(csr, ADF_GEN4_EXPRPSSMXLT); + + reg &= ADF_GEN4_EXPRPSSMXLT_UNCERR_BITMASK | + ADF_GEN4_EXPRPSSMXLT_CERR_BIT; + if (!reg) + return false; + + if (reg & ADF_GEN4_EXPRPSSMXLT_UNCERR_BITMASK) + dev_err(&GET_DEV(accel_dev), + "Uncorrectable error exception in SSM XLT: 0x%x", reg); + + if (reg & ADF_GEN4_EXPRPSSMXLT_CERR_BIT) + dev_warn(&GET_DEV(accel_dev), + "Correctable error exception in SSM XLT: 0x%x", reg); + + ADF_CSR_WR(csr, ADF_GEN4_EXPRPSSMXLT, reg); + + return false; +} + +static bool adf_handle_exprpssmdcpr(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + u32 reg; + int i; + + for (i = 0; i < ADF_GEN4_DCPR_SLICES_NUM; i++) { + reg = ADF_CSR_RD(csr, ADF_GEN4_EXPRPSSMDCPR(i)); + reg &= ADF_GEN4_EXPRPSSMDCPR_UNCERR_BITMASK | + ADF_GEN4_EXPRPSSMDCPR_CERR_BITMASK; + if (!reg) + continue; + + if (reg & ADF_GEN4_EXPRPSSMDCPR_UNCERR_BITMASK) + dev_err(&GET_DEV(accel_dev), + "Uncorrectable error exception in SSM DCMP: 0x%x", reg); + + if (reg & ADF_GEN4_EXPRPSSMDCPR_CERR_BITMASK) + dev_warn(&GET_DEV(accel_dev), + "Correctable error exception in SSM DCMP: 0x%x", reg); + + ADF_CSR_WR(csr, ADF_GEN4_EXPRPSSMDCPR(i), reg); + } + + return false; +} + static bool adf_handle_ssm(struct adf_accel_dev *accel_dev, void __iomem *csr, u32 errsou) { + bool reset_required; + if (!(errsou & ADF_GEN4_ERRSOU2_SSM_ERR_BIT)) return false; - return adf_handle_iaintstatssm(accel_dev, csr); + reset_required = adf_handle_iaintstatssm(accel_dev, csr); + reset_required |= adf_handle_exprpssmcmpr(accel_dev, csr); + reset_required |= adf_handle_exprpssmxlt(accel_dev, csr); + reset_required |= adf_handle_exprpssmdcpr(accel_dev, csr); + + return reset_required; } static bool adf_handle_cpp_cfc_err(struct adf_accel_dev *accel_dev, diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h index 65c1b7925444..e3583c3ed827 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h @@ -523,6 +523,82 @@ struct adf_ras_ops; #define ADF_GEN4_CPP_CFC_ERR_PPID_LO 0x640C0C #define ADF_GEN4_CPP_CFC_ERR_PPID_HI 0x640C10 +/* Exception reporting in QAT SSM CMP */ +#define ADF_GEN4_EXPRPSSMCPR 0x2000 + +/* + * Uncorrectable error mask in EXPRPSSMCPR + * BIT(2) - Hard fatal error + * BIT(16) - Parity error detected in CPR Push FIFO + * BIT(17) - Parity error detected in CPR Pull FIFO + * BIT(18) - Parity error detected in CPR Hash Table + * BIT(19) - Parity error detected in CPR History Buffer Copy 0 + * BIT(20) - Parity error detected in CPR History Buffer Copy 1 + * BIT(21) - Parity error detected in CPR History Buffer Copy 2 + * BIT(22) - Parity error detected in CPR History Buffer Copy 3 + * BIT(23) - Parity error detected in CPR History Buffer Copy 4 + * BIT(24) - Parity error detected in CPR History Buffer Copy 5 + * BIT(25) - Parity error detected in CPR History Buffer Copy 6 + * BIT(26) - Parity error detected in CPR History Buffer Copy 7 + */ +#define ADF_GEN4_EXPRPSSMCPR_UNCERR_BITMASK \ + (BIT(2) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | \ + BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25) | BIT(26)) + +/* Exception reporting in QAT SSM XLT */ +#define ADF_GEN4_EXPRPSSMXLT 0xA000 + +/* + * Uncorrectable error mask in EXPRPSSMXLT + * BIT(2) - If set, an Uncorrectable Error event occurred + * BIT(16) - Parity error detected in XLT Push FIFO + * BIT(17) - Parity error detected in XLT Pull FIFO + * BIT(18) - Parity error detected in XLT HCTB0 + * BIT(19) - Parity error detected in XLT HCTB1 + * BIT(20) - Parity error detected in XLT HCTB2 + * BIT(21) - Parity error detected in XLT HCTB3 + * BIT(22) - Parity error detected in XLT CBCL + * BIT(23) - Parity error detected in XLT LITPTR + */ +#define ADF_GEN4_EXPRPSSMXLT_UNCERR_BITMASK \ + (BIT(2) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | BIT(21) | \ + BIT(22) | BIT(23)) + +/* + * Correctable error mask in EXPRPSSMXLT + * BIT(3) - Correctable error event occurred. + */ +#define ADF_GEN4_EXPRPSSMXLT_CERR_BIT BIT(3) + +/* Exception reporting in QAT SSM DCMP */ +#define ADF_GEN4_EXPRPSSMDCPR(_n_) (0x12000 + (_n_) * 0x80) + +/* + * Uncorrectable error mask in EXPRPSSMDCPR + * BIT(2) - Even hard fatal error + * BIT(4) - Odd hard fatal error + * BIT(6) - decode soft error + * BIT(16) - Parity error detected in CPR Push FIFO + * BIT(17) - Parity error detected in CPR Pull FIFO + * BIT(18) - Parity error detected in the Input Buffer + * BIT(19) - symbuf0parerr + * Parity error detected in CPR Push FIFO + * BIT(20) - symbuf1parerr + * Parity error detected in CPR Push FIFO + */ +#define ADF_GEN4_EXPRPSSMDCPR_UNCERR_BITMASK \ + (BIT(2) | BIT(4) | BIT(6) | BIT(16) | BIT(17) | \ + BIT(18) | BIT(19) | BIT(20)) + +/* + * Correctable error mask in EXPRPSSMDCPR + * BIT(3) - Even ecc correctable error + * BIT(5) - Odd ecc correctable error + */ +#define ADF_GEN4_EXPRPSSMDCPR_CERR_BITMASK (BIT(3) | BIT(5)) + +#define ADF_GEN4_DCPR_SLICES_NUM 3 + /* Command Parity error detected on IOSFP Command to QAT */ #define ADF_GEN4_RIMISCSTS_BIT BIT(0) -- Gitee From ffd759b94dba14ce36b213a7edda8f5b61c07938 Mon Sep 17 00:00:00 2001 From: Shashank Gupta Date: Fri, 20 Oct 2023 11:32:50 +0100 Subject: [PATCH 0708/2138] crypto: qat - add adf_get_aram_base() helper function ANBZ: #8589 commit 86df79c3a40a0085555aaa475b4b16c8728ef952 upstream. Intel-SIG: commit 86df79c3a40a crypto: qat - add adf_get_aram_base() helper function Backport to support Intel QAT in-tree driver Add the function adf_get_aram_base() which allows to return the base address of the aram bar. Signed-off-by: Shashank Gupta Reviewed-by: Giovanni Cabiddu Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_common_drv.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index 18a382508542..d9342634f9c1 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -248,4 +248,14 @@ static inline void __iomem *adf_get_pmisc_base(struct adf_accel_dev *accel_dev) return pmisc->virt_addr; } +static inline void __iomem *adf_get_aram_base(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_bar *param; + + param = &GET_BARS(accel_dev)[hw_data->get_sram_bar_id(hw_data)]; + + return param->virt_addr; +} + #endif -- Gitee From 8cdc6bb71a3296bbe31986ccd77ce77fea80a06f Mon Sep 17 00:00:00 2001 From: Shashank Gupta Date: Fri, 20 Oct 2023 11:32:51 +0100 Subject: [PATCH 0709/2138] crypto: qat - add handling of errors from ERRSOU3 for QAT GEN4 ANBZ: #8589 commit 22289dc95833c6584aea1f4e8ab9f4f1641bb076 upstream. Intel-SIG: commit 22289dc95833 crypto: qat - add handling of errors from ERRSOU3 for QAT GEN4 Backport to support Intel QAT in-tree driver Add logic to detect, report and handle uncorrectable errors reported through the ERRSOU3 register in QAT GEN4 devices. Signed-off-by: Shashank Gupta Reviewed-by: Giovanni Cabiddu Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../intel/qat/qat_common/adf_gen4_ras.c | 256 ++++++++++++++++++ .../intel/qat/qat_common/adf_gen4_ras.h | 218 +++++++++++++++ 2 files changed, 474 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c index 285b755e13be..8ba9c9bdb89b 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c @@ -19,6 +19,14 @@ static void enable_errsou_reporting(void __iomem *csr) ADF_CSR_WR(csr, ADF_GEN4_ERRMSK2, ADF_GEN4_ERRSOU2_PM_INT_BIT | ADF_GEN4_ERRSOU2_CPP_CFC_ATT_INT_BITMASK); + + /* + * Enable uncorrectable error reporting in ERRSOU3 + * but disable RLT error interrupt and VFLR notify interrupt by default + */ + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, + ADF_GEN4_ERRSOU3_RLTERROR_BIT | + ADF_GEN4_ERRSOU3_VFLRNOTIFY_BIT); } static void disable_errsou_reporting(void __iomem *csr) @@ -35,6 +43,9 @@ static void disable_errsou_reporting(void __iomem *csr) val = ADF_CSR_RD(csr, ADF_GEN4_ERRMSK2); val |= ADF_GEN4_ERRSOU2_DIS_BITMASK; ADF_CSR_WR(csr, ADF_GEN4_ERRMSK2, val); + + /* Disable uncorrectable error reporting in ERRSOU3 */ + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_ERRSOU3_BITMASK); } static void enable_ae_error_reporting(struct adf_accel_dev *accel_dev, @@ -82,6 +93,8 @@ static void disable_cpp_error_reporting(void __iomem *csr) static void enable_ti_ri_error_reporting(void __iomem *csr) { + u32 reg; + /* Enable RI Memory error reporting */ ADF_CSR_WR(csr, ADF_GEN4_RI_MEM_PAR_ERR_EN0, ADF_GEN4_RIMEM_PARERR_STS_FATAL_BITMASK | @@ -96,10 +109,26 @@ static void enable_ti_ri_error_reporting(void __iomem *csr) ADF_CSR_WR(csr, ADF_GEN4_TI_PUSHFUB_PAR_ERR_MASK, 0); ADF_CSR_WR(csr, ADF_GEN4_TI_CD_PAR_ERR_MASK, 0); ADF_CSR_WR(csr, ADF_GEN4_TI_TRNSB_PAR_ERR_MASK, 0); + + /* Enable error handling in RI, TI CPP interface control registers */ + ADF_CSR_WR(csr, ADF_GEN4_RICPPINTCTL, ADF_GEN4_RICPPINTCTL_BITMASK); + + ADF_CSR_WR(csr, ADF_GEN4_TICPPINTCTL, ADF_GEN4_TICPPINTCTL_BITMASK); + + /* + * Enable error detection and reporting in TIMISCSTS + * with bits 1, 2 and 30 value preserved + */ + reg = ADF_CSR_RD(csr, ADF_GEN4_TIMISCCTL); + reg &= ADF_GEN4_TIMSCCTL_RELAY_BITMASK; + reg |= ADF_GEN4_TIMISCCTL_BIT; + ADF_CSR_WR(csr, ADF_GEN4_TIMISCCTL, reg); } static void disable_ti_ri_error_reporting(void __iomem *csr) { + u32 reg; + /* Disable RI Memory error reporting */ ADF_CSR_WR(csr, ADF_GEN4_RI_MEM_PAR_ERR_EN0, 0); @@ -117,6 +146,19 @@ static void disable_ti_ri_error_reporting(void __iomem *csr) ADF_GEN4_TI_CD_PAR_STS_BITMASK); ADF_CSR_WR(csr, ADF_GEN4_TI_TRNSB_PAR_ERR_MASK, ADF_GEN4_TI_TRNSB_PAR_STS_BITMASK); + + /* Disable error handling in RI, TI CPP interface control registers */ + ADF_CSR_WR(csr, ADF_GEN4_RICPPINTCTL, 0); + + ADF_CSR_WR(csr, ADF_GEN4_TICPPINTCTL, 0); + + /* + * Disable error detection and reporting in TIMISCSTS + * with bits 1, 2 and 30 value preserved + */ + reg = ADF_CSR_RD(csr, ADF_GEN4_TIMISCCTL); + reg &= ADF_GEN4_TIMSCCTL_RELAY_BITMASK; + ADF_CSR_WR(csr, ADF_GEN4_TIMISCCTL, reg); } static void enable_rf_error_reporting(struct adf_accel_dev *accel_dev, @@ -251,8 +293,32 @@ static void disable_ssm_error_reporting(struct adf_accel_dev *accel_dev, err_mask->parerr_wat_wcp_mask); } +static void enable_aram_error_reporting(void __iomem *csr) +{ + ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERRUERR_EN, + ADF_GEN4_REG_ARAMCERRUERR_EN_BITMASK); + + ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERR, + ADF_GEN4_REG_ARAMCERR_EN_BITMASK); + + ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMUERR, + ADF_GEN4_REG_ARAMUERR_EN_BITMASK); + + ADF_CSR_WR(csr, ADF_GEN4_REG_CPPMEMTGTERR, + ADF_GEN4_REG_CPPMEMTGTERR_EN_BITMASK); +} + +static void disable_aram_error_reporting(void __iomem *csr) +{ + ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERRUERR_EN, 0); + ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERR, 0); + ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMUERR, 0); + ADF_CSR_WR(csr, ADF_GEN4_REG_CPPMEMTGTERR, 0); +} + static void adf_gen4_enable_ras(struct adf_accel_dev *accel_dev) { + void __iomem *aram_csr = adf_get_aram_base(accel_dev); void __iomem *csr = adf_get_pmisc_base(accel_dev); enable_errsou_reporting(csr); @@ -261,10 +327,12 @@ static void adf_gen4_enable_ras(struct adf_accel_dev *accel_dev) enable_ti_ri_error_reporting(csr); enable_rf_error_reporting(accel_dev, csr); enable_ssm_error_reporting(accel_dev, csr); + enable_aram_error_reporting(aram_csr); } static void adf_gen4_disable_ras(struct adf_accel_dev *accel_dev) { + void __iomem *aram_csr = adf_get_aram_base(accel_dev); void __iomem *csr = adf_get_pmisc_base(accel_dev); disable_errsou_reporting(csr); @@ -273,6 +341,7 @@ static void adf_gen4_disable_ras(struct adf_accel_dev *accel_dev) disable_ti_ri_error_reporting(csr); disable_rf_error_reporting(accel_dev, csr); disable_ssm_error_reporting(accel_dev, csr); + disable_aram_error_reporting(aram_csr); } static void adf_gen4_process_errsou0(struct adf_accel_dev *accel_dev, @@ -1122,9 +1191,190 @@ static void adf_gen4_process_errsou2(struct adf_accel_dev *accel_dev, *reset_required |= adf_handle_cpp_cfc_err(accel_dev, csr, errsou); } +static bool adf_handle_timiscsts(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 timiscsts; + + if (!(errsou & ADF_GEN4_ERRSOU3_TIMISCSTS_BIT)) + return false; + + timiscsts = ADF_CSR_RD(csr, ADF_GEN4_TIMISCSTS); + + dev_err(&GET_DEV(accel_dev), + "Fatal error in Transmit Interface: 0x%x\n", timiscsts); + + return true; +} + +static bool adf_handle_ricppintsts(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 ricppintsts; + + if (!(errsou & ADF_GEN4_ERRSOU3_RICPPINTSTS_BITMASK)) + return false; + + ricppintsts = ADF_CSR_RD(csr, ADF_GEN4_RICPPINTSTS); + ricppintsts &= ADF_GEN4_RICPPINTSTS_BITMASK; + + dev_err(&GET_DEV(accel_dev), + "RI CPP Uncorrectable Error: 0x%x\n", ricppintsts); + + ADF_CSR_WR(csr, ADF_GEN4_RICPPINTSTS, ricppintsts); + + return false; +} + +static bool adf_handle_ticppintsts(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 ticppintsts; + + if (!(errsou & ADF_GEN4_ERRSOU3_TICPPINTSTS_BITMASK)) + return false; + + ticppintsts = ADF_CSR_RD(csr, ADF_GEN4_TICPPINTSTS); + ticppintsts &= ADF_GEN4_TICPPINTSTS_BITMASK; + + dev_err(&GET_DEV(accel_dev), + "TI CPP Uncorrectable Error: 0x%x\n", ticppintsts); + + ADF_CSR_WR(csr, ADF_GEN4_TICPPINTSTS, ticppintsts); + + return false; +} + +static bool adf_handle_aramcerr(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 aram_cerr; + + if (!(errsou & ADF_GEN4_ERRSOU3_REG_ARAMCERR_BIT)) + return false; + + aram_cerr = ADF_CSR_RD(csr, ADF_GEN4_REG_ARAMCERR); + aram_cerr &= ADF_GEN4_REG_ARAMCERR_BIT; + + dev_warn(&GET_DEV(accel_dev), + "ARAM correctable error : 0x%x\n", aram_cerr); + + aram_cerr |= ADF_GEN4_REG_ARAMCERR_EN_BITMASK; + + ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERR, aram_cerr); + + return false; +} + +static bool adf_handle_aramuerr(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + bool reset_required = false; + u32 aramuerr; + + if (!(errsou & ADF_GEN4_ERRSOU3_REG_ARAMUERR_BIT)) + return false; + + aramuerr = ADF_CSR_RD(csr, ADF_GEN4_REG_ARAMUERR); + aramuerr &= ADF_GEN4_REG_ARAMUERR_ERROR_BIT | + ADF_GEN4_REG_ARAMUERR_MULTI_ERRORS_BIT; + + if (!aramuerr) + return false; + + if (aramuerr & ADF_GEN4_REG_ARAMUERR_MULTI_ERRORS_BIT) { + dev_err(&GET_DEV(accel_dev), + "ARAM multiple uncorrectable errors: 0x%x\n", aramuerr); + + reset_required = true; + } else { + dev_err(&GET_DEV(accel_dev), + "ARAM uncorrectable error: 0x%x\n", aramuerr); + } + + aramuerr |= ADF_GEN4_REG_ARAMUERR_EN_BITMASK; + + ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMUERR, aramuerr); + + return reset_required; +} + +static bool adf_handle_reg_cppmemtgterr(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + bool reset_required = false; + u32 cppmemtgterr; + + if (!(errsou & ADF_GEN4_ERRSOU3_REG_ARAMUERR_BIT)) + return false; + + cppmemtgterr = ADF_CSR_RD(csr, ADF_GEN4_REG_CPPMEMTGTERR); + cppmemtgterr &= ADF_GEN4_REG_CPPMEMTGTERR_BITMASK | + ADF_GEN4_REG_CPPMEMTGTERR_MULTI_ERRORS_BIT; + if (!cppmemtgterr) + return false; + + if (cppmemtgterr & ADF_GEN4_REG_CPPMEMTGTERR_MULTI_ERRORS_BIT) { + dev_err(&GET_DEV(accel_dev), + "Misc memory target multiple uncorrectable errors: 0x%x\n", + cppmemtgterr); + + reset_required = true; + } else { + dev_err(&GET_DEV(accel_dev), + "Misc memory target uncorrectable error: 0x%x\n", cppmemtgterr); + } + + cppmemtgterr |= ADF_GEN4_REG_CPPMEMTGTERR_EN_BITMASK; + + ADF_CSR_WR(csr, ADF_GEN4_REG_CPPMEMTGTERR, cppmemtgterr); + + return reset_required; +} + +static bool adf_handle_atufaultstatus(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 i; + u32 max_rp_num = GET_HW_DATA(accel_dev)->num_banks; + + if (!(errsou & ADF_GEN4_ERRSOU3_ATUFAULTSTATUS_BIT)) + return false; + + for (i = 0; i < max_rp_num; i++) { + u32 atufaultstatus = ADF_CSR_RD(csr, ADF_GEN4_ATUFAULTSTATUS(i)); + + atufaultstatus &= ADF_GEN4_ATUFAULTSTATUS_BIT; + + if (atufaultstatus) { + dev_err(&GET_DEV(accel_dev), + "Ring Pair (%u) ATU detected fault: 0x%x\n", i, + atufaultstatus); + + ADF_CSR_WR(csr, ADF_GEN4_ATUFAULTSTATUS(i), atufaultstatus); + } + } + + return false; +} + +static void adf_gen4_process_errsou3(struct adf_accel_dev *accel_dev, + void __iomem *csr, void __iomem *aram_csr, + u32 errsou, bool *reset_required) +{ + *reset_required |= adf_handle_timiscsts(accel_dev, csr, errsou); + *reset_required |= adf_handle_ricppintsts(accel_dev, csr, errsou); + *reset_required |= adf_handle_ticppintsts(accel_dev, csr, errsou); + *reset_required |= adf_handle_aramcerr(accel_dev, aram_csr, errsou); + *reset_required |= adf_handle_aramuerr(accel_dev, aram_csr, errsou); + *reset_required |= adf_handle_reg_cppmemtgterr(accel_dev, aram_csr, errsou); + *reset_required |= adf_handle_atufaultstatus(accel_dev, csr, errsou); +} + static bool adf_gen4_handle_interrupt(struct adf_accel_dev *accel_dev, bool *reset_required) { + void __iomem *aram_csr = adf_get_aram_base(accel_dev); void __iomem *csr = adf_get_pmisc_base(accel_dev); u32 errsou = ADF_CSR_RD(csr, ADF_GEN4_ERRSOU0); bool handled = false; @@ -1148,6 +1398,12 @@ static bool adf_gen4_handle_interrupt(struct adf_accel_dev *accel_dev, handled = true; } + errsou = ADF_CSR_RD(csr, ADF_GEN4_ERRSOU3); + if (errsou & ADF_GEN4_ERRSOU3_BITMASK) { + adf_gen4_process_errsou3(accel_dev, csr, aram_csr, errsou, reset_required); + handled = true; + } + return handled; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h index e3583c3ed827..53352083cd12 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h @@ -599,6 +599,224 @@ struct adf_ras_ops; #define ADF_GEN4_DCPR_SLICES_NUM 3 +/* + * ERRSOU3 bit masks + * BIT(0) - indicates error Response Order Overflow and/or BME error + * BIT(1) - indicates RI push/pull error + * BIT(2) - indicates TI push/pull error + * BIT(3) - indicates ARAM correctable error + * BIT(4) - indicates ARAM uncorrectable error + * BIT(5) - indicates TI pull parity error + * BIT(6) - indicates RI push parity error + * BIT(7) - indicates VFLR interrupt + * BIT(8) - indicates ring pair interrupts for ATU detected fault + * BIT(9) - indicates error when accessing RLT block + */ +#define ADF_GEN4_ERRSOU3_TIMISCSTS_BIT BIT(0) +#define ADF_GEN4_ERRSOU3_RICPPINTSTS_BITMASK (BIT(1) | BIT(6)) +#define ADF_GEN4_ERRSOU3_TICPPINTSTS_BITMASK (BIT(2) | BIT(5)) +#define ADF_GEN4_ERRSOU3_REG_ARAMCERR_BIT BIT(3) +#define ADF_GEN4_ERRSOU3_REG_ARAMUERR_BIT BIT(4) +#define ADF_GEN4_ERRSOU3_VFLRNOTIFY_BIT BIT(7) +#define ADF_GEN4_ERRSOU3_ATUFAULTSTATUS_BIT BIT(8) +#define ADF_GEN4_ERRSOU3_RLTERROR_BIT BIT(9) + +#define ADF_GEN4_ERRSOU3_BITMASK ( \ + (ADF_GEN4_ERRSOU3_TIMISCSTS_BIT) | \ + (ADF_GEN4_ERRSOU3_RICPPINTSTS_BITMASK) | \ + (ADF_GEN4_ERRSOU3_TICPPINTSTS_BITMASK) | \ + (ADF_GEN4_ERRSOU3_REG_ARAMCERR_BIT) | \ + (ADF_GEN4_ERRSOU3_REG_ARAMUERR_BIT) | \ + (ADF_GEN4_ERRSOU3_VFLRNOTIFY_BIT) | \ + (ADF_GEN4_ERRSOU3_ATUFAULTSTATUS_BIT) | \ + (ADF_GEN4_ERRSOU3_RLTERROR_BIT)) + +/* TI Misc status register */ +#define ADF_GEN4_TIMISCSTS 0x50054C + +/* TI Misc error reporting mask */ +#define ADF_GEN4_TIMISCCTL 0x500548 + +/* + * TI Misc error reporting control mask + * BIT(0) - Enables error detection and logging in TIMISCSTS register + * BIT(1) - It has effect only when SRIOV enabled, this bit is 0 by default + * BIT(2) - Enables the D-F-x counter within the dispatch arbiter + * to start based on the command triggered from + * BIT(30) - Disables VFLR functionality + * By setting this bit will revert to CPM1.x functionality + * bits 1, 2 and 30 value should be preserved and not meant to be changed + * within RAS. + */ +#define ADF_GEN4_TIMISCCTL_BIT BIT(0) +#define ADF_GEN4_TIMSCCTL_RELAY_BITMASK (BIT(1) | BIT(2) | BIT(30)) + +/* RI CPP interface status register */ +#define ADF_GEN4_RICPPINTSTS 0x41A330 + +/* + * Uncorrectable error mask in RICPPINTSTS register + * BIT(0) - RI asserted the CPP error signal during a push + * BIT(1) - RI detected the CPP error signal asserted during a pull + * BIT(2) - RI detected a push data parity error + * BIT(3) - RI detected a push valid parity error + */ +#define ADF_GEN4_RICPPINTSTS_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3)) + +/* RI CPP interface status register control */ +#define ADF_GEN4_RICPPINTCTL 0x41A32C + +/* + * Control bit mask for RICPPINTCTL register + * BIT(0) - value of 1 enables error detection and reporting + * on the RI CPP Push interface + * BIT(1) - value of 1 enables error detection and reporting + * on the RI CPP Pull interface + * BIT(2) - value of 1 enables error detection and reporting + * on the RI Parity + * BIT(3) - value of 1 enable checking parity on CPP + * BIT(4) - value of 1 enables the stop feature of the stop and stream + * for all RI CPP Command RFs + */ +#define ADF_GEN4_RICPPINTCTL_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4)) + +/* Push ID of the command which triggered the transaction error on RI */ +#define ADF_GEN4_RIERRPUSHID 0x41A334 + +/* Pull ID of the command which triggered the transaction error on RI */ +#define ADF_GEN4_RIERRPULLID 0x41A338 + +/* TI CPP interface status register */ +#define ADF_GEN4_TICPPINTSTS 0x50053C + +/* + * Uncorrectable error mask in TICPPINTSTS register + * BIT(0) - value of 1 indicates that the TI asserted + * the CPP error signal during a push + * BIT(1) - value of 1 indicates that the TI detected + * the CPP error signal asserted during a pull + * BIT(2) - value of 1 indicates that the TI detected + * a pull data parity error + */ +#define ADF_GEN4_TICPPINTSTS_BITMASK \ + (BIT(0) | BIT(1) | BIT(2)) + +/* TI CPP interface status register control */ +#define ADF_GEN4_TICPPINTCTL 0x500538 + +/* + * Control bit mask for TICPPINTCTL register + * BIT(0) - value of 1 enables error detection and reporting on + * the TI CPP Push interface + * BIT(1) - value of 1 enables error detection and reporting on + * the TI CPP Push interface + * BIT(2) - value of 1 enables parity error detection and logging on + * the TI CPP Pull interface + * BIT(3) - value of 1 enables CPP CMD and Pull Data parity checking + * BIT(4) - value of 1 enables TI stop part of stop and scream mode on + * CPP/RF Parity error + */ +#define ADF_GEN4_TICPPINTCTL_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4)) + +/* Push ID of the command which triggered the transaction error on TI */ +#define ADF_GEN4_TIERRPUSHID 0x500540 + +/* Pull ID of the command which triggered the transaction error on TI */ +#define ADF_GEN4_TIERRPULLID 0x500544 + +/* Correctable error in ARAM agent register */ +#define ADF_GEN4_REG_ARAMCERR 0x1700 + +#define ADF_GEN4_REG_ARAMCERR_BIT BIT(0) + +/* + * Correctable error enablement in ARAM bit mask + * BIT(3) - enable ARAM RAM to fix and log correctable error + * BIT(26) - enables ARAM agent to generate interrupt for correctable error + */ +#define ADF_GEN4_REG_ARAMCERR_EN_BITMASK (BIT(3) | BIT(26)) + +/* Correctable error address in ARAM agent register */ +#define ADF_GEN4_REG_ARAMCERRAD 0x1708 + +/* Uncorrectable error in ARAM agent register */ +#define ADF_GEN4_REG_ARAMUERR 0x1704 + +/* + * ARAM error bit mask + * BIT(0) - indicates error logged in ARAMCERR or ARAMUCERR + * BIT(18) - indicates uncorrectable multiple errors in ARAM agent + */ +#define ADF_GEN4_REG_ARAMUERR_ERROR_BIT BIT(0) +#define ADF_GEN4_REG_ARAMUERR_MULTI_ERRORS_BIT BIT(18) + +/* + * Uncorrectable error enablement in ARAM bit mask + * BIT(3) - enable ARAM RAM to fix and log uncorrectable error + * BIT(19) - enables ARAM agent to generate interrupt for uncorrectable error + */ +#define ADF_GEN4_REG_ARAMUERR_EN_BITMASK (BIT(3) | BIT(19)) + +/* Unorrectable error address in ARAM agent register */ +#define ADF_GEN4_REG_ARAMUERRAD 0x170C + +/* Uncorrectable error transaction push/pull ID registers*/ +#define ADF_GEN4_REG_ERRPPID_LO 0x1714 +#define ADF_GEN4_REG_ERRPPID_HI 0x1718 + +/* ARAM ECC block error enablement */ +#define ADF_GEN4_REG_ARAMCERRUERR_EN 0x1808 + +/* + * ARAM ECC block error control bit masks + * BIT(0) - enable ARAM CD ECC block error detecting + * BIT(1) - enable ARAM pull request ECC error detecting + * BIT(2) - enable ARAM command dispatch ECC error detecting + * BIT(3) - enable ARAM read datapath push ECC error detecting + * BIT(4) - enable ARAM read datapath pull ECC error detecting + * BIT(5) - enable ARAM RMW ECC error detecting + * BIT(6) - enable ARAM write datapath RMW ECC error detecting + * BIT(7) - enable ARAM write datapath ECC error detecting + */ +#define ADF_GEN4_REG_ARAMCERRUERR_EN_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | \ + BIT(5) | BIT(6) | BIT(7)) + +/* ARAM misc memory target error registers*/ +#define ADF_GEN4_REG_CPPMEMTGTERR 0x1710 + +/* + * ARAM misc memory target error bit masks + * BIT(0) - indicates an error in ARAM target memory + * BIT(1) - indicates multiple errors in ARAM target memory + * BIT(4) - indicates pull error in ARAM target memory + * BIT(5) - indicates parity pull error in ARAM target memory + * BIT(6) - indicates push error in ARAM target memory + */ +#define ADF_GEN4_REG_CPPMEMTGTERR_BITMASK \ + (BIT(0) | BIT(4) | BIT(5) | BIT(6)) + +#define ADF_GEN4_REG_CPPMEMTGTERR_MULTI_ERRORS_BIT BIT(1) + +/* + * ARAM misc memory target error enablement mask + * BIT(2) - enables CPP memory to detect and log push/pull data error + * BIT(7) - enables push/pull error to generate interrupts to RI + * BIT(8) - enables ARAM to check parity on pull data and CPP command buses + * BIT(9) - enables ARAM to autopush to AE when push/parity error is detected + * on lookaside DT + */ +#define ADF_GEN4_REG_CPPMEMTGTERR_EN_BITMASK \ + (BIT(2) | BIT(7) | BIT(8) | BIT(9)) + +/* ATU fault status register */ +#define ADF_GEN4_ATUFAULTSTATUS(i) (0x506000 + ((i) * 0x4)) + +#define ADF_GEN4_ATUFAULTSTATUS_BIT BIT(0) + /* Command Parity error detected on IOSFP Command to QAT */ #define ADF_GEN4_RIMISCSTS_BIT BIT(0) -- Gitee From 54042c90d8c012ae7d0bd94b9f47acde011e8693 Mon Sep 17 00:00:00 2001 From: Shashank Gupta Date: Fri, 20 Oct 2023 11:32:52 +0100 Subject: [PATCH 0710/2138] crypto: qat - add error counters ANBZ: #8589 commit 532d7f6bc458042571752168bcb5e1fdc576b8c4 upstream. Intel-SIG: commit 532d7f6bc458 crypto: qat - add error counters Backport to support Intel QAT in-tree driver Introduce ras counters interface for counting QAT specific device errors and expose them through the newly created qat_ras sysfs group attribute. This adds the following attributes: - errors_correctable: number of correctable errors - errors_nonfatal: number of uncorrectable non fatal errors - errors_fatal: number of uncorrectable fatal errors - reset_error_counters: resets all counters These counters are initialized during device bring up and cleared during device shutdown and are applicable only to QAT GEN4 devices. Signed-off-by: Shashank Gupta Reviewed-by: Giovanni Cabiddu Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../ABI/testing/sysfs-driver-qat_ras | 41 +++++++ drivers/crypto/intel/qat/qat_4xxx/adf_drv.c | 1 + drivers/crypto/intel/qat/qat_common/Makefile | 1 + .../intel/qat/qat_common/adf_accel_devices.h | 14 +++ .../crypto/intel/qat/qat_common/adf_init.c | 3 + .../qat/qat_common/adf_sysfs_ras_counters.c | 112 ++++++++++++++++++ .../qat/qat_common/adf_sysfs_ras_counters.h | 27 +++++ 7 files changed, 199 insertions(+) create mode 100644 Documentation/ABI/testing/sysfs-driver-qat_ras create mode 100644 drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.h diff --git a/Documentation/ABI/testing/sysfs-driver-qat_ras b/Documentation/ABI/testing/sysfs-driver-qat_ras new file mode 100644 index 000000000000..176dea1e9c0a --- /dev/null +++ b/Documentation/ABI/testing/sysfs-driver-qat_ras @@ -0,0 +1,41 @@ +What: /sys/bus/pci/devices//qat_ras/errors_correctable +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: (RO) Reports the number of correctable errors detected by the device. + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_ras/errors_nonfatal +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: (RO) Reports the number of non fatal errors detected by the device. + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_ras/errors_fatal +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: (RO) Reports the number of fatal errors detected by the device. + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_ras/reset_error_counters +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: (WO) Write to resets all error counters of a device. + + The following example reports how to reset the counters:: + + # echo 1 > /sys/bus/pci/devices//qat_ras/reset_error_counters + # cat /sys/bus/pci/devices//qat_ras/errors_correctable + 0 + # cat /sys/bus/pci/devices//qat_ras/errors_nonfatal + 0 + # cat /sys/bus/pci/devices//qat_ras/errors_fatal + 0 + + This attribute is only available for qat_4xxx devices. diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c index b49a7960bc91..7d0587d6ec4e 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c @@ -418,6 +418,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_err; } + accel_dev->ras_errors.enabled = true; adf_dbgfs_init(accel_dev); ret = adf_dev_up(accel_dev, true); diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index 9ba2f8aa1e81..fcf74c0dc534 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -13,6 +13,7 @@ intel_qat-objs := adf_cfg.o \ adf_admin.o \ adf_hw_arbiter.o \ adf_sysfs.o \ + adf_sysfs_ras_counters.o \ adf_gen2_hw_data.o \ adf_gen2_config.o \ adf_gen4_hw_data.o \ diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index c8492d792c0e..1c11d90bd9f3 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -7,6 +7,7 @@ #include #include #include +#include #include "adf_cfg_common.h" #include "adf_pfvf_msg.h" @@ -81,6 +82,18 @@ enum dev_sku_info { DEV_SKU_UNKNOWN, }; +enum ras_errors { + ADF_RAS_CORR, + ADF_RAS_UNCORR, + ADF_RAS_FATAL, + ADF_RAS_ERRORS, +}; + +struct adf_error_counters { + atomic_t counter[ADF_RAS_ERRORS]; + bool enabled; +}; + static inline const char *get_sku_info(enum dev_sku_info info) { switch (info) { @@ -361,6 +374,7 @@ struct adf_accel_dev { u8 pf_compat_ver; } vf; }; + struct adf_error_counters ras_errors; struct mutex state_lock; /* protect state of the device */ bool is_vf; u32 accel_id; diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c index 4cf49f52d4dd..ef51c4d028d2 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_init.c +++ b/drivers/crypto/intel/qat/qat_common/adf_init.c @@ -9,6 +9,7 @@ #include "adf_common_drv.h" #include "adf_dbgfs.h" #include "adf_heartbeat.h" +#include "adf_sysfs_ras_counters.h" static LIST_HEAD(service_table); static DEFINE_MUTEX(service_lock); @@ -245,6 +246,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev) set_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status); adf_dbgfs_add(accel_dev); + adf_sysfs_start_ras(accel_dev); return 0; } @@ -271,6 +273,7 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev) return; adf_dbgfs_rm(accel_dev); + adf_sysfs_stop_ras(accel_dev); clear_bit(ADF_STATUS_STARTING, &accel_dev->status); clear_bit(ADF_STATUS_STARTED, &accel_dev->status); diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c new file mode 100644 index 000000000000..cffe2d722995 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ + +#include +#include +#include + +#include "adf_common_drv.h" +#include "adf_sysfs_ras_counters.h" + +static ssize_t errors_correctable_show(struct device *dev, + struct device_attribute *dev_attr, + char *buf) +{ + struct adf_accel_dev *accel_dev; + unsigned long counter; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + counter = ADF_RAS_ERR_CTR_READ(accel_dev->ras_errors, ADF_RAS_CORR); + return scnprintf(buf, PAGE_SIZE, "%ld\n", counter); +} + +static ssize_t errors_nonfatal_show(struct device *dev, + struct device_attribute *dev_attr, + char *buf) +{ + struct adf_accel_dev *accel_dev; + unsigned long counter; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + counter = ADF_RAS_ERR_CTR_READ(accel_dev->ras_errors, ADF_RAS_UNCORR); + return scnprintf(buf, PAGE_SIZE, "%ld\n", counter); +} + +static ssize_t errors_fatal_show(struct device *dev, + struct device_attribute *dev_attr, + char *buf) +{ + struct adf_accel_dev *accel_dev; + unsigned long counter; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + counter = ADF_RAS_ERR_CTR_READ(accel_dev->ras_errors, ADF_RAS_FATAL); + return scnprintf(buf, PAGE_SIZE, "%ld\n", counter); +} + +static ssize_t reset_error_counters_store(struct device *dev, + struct device_attribute *dev_attr, + const char *buf, size_t count) +{ + struct adf_accel_dev *accel_dev; + + if (buf[0] != '1' || count != 2) + return -EINVAL; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + ADF_RAS_ERR_CTR_CLEAR(accel_dev->ras_errors); + + return count; +} + +static DEVICE_ATTR_RO(errors_correctable); +static DEVICE_ATTR_RO(errors_nonfatal); +static DEVICE_ATTR_RO(errors_fatal); +static DEVICE_ATTR_WO(reset_error_counters); + +static struct attribute *qat_ras_attrs[] = { + &dev_attr_errors_correctable.attr, + &dev_attr_errors_nonfatal.attr, + &dev_attr_errors_fatal.attr, + &dev_attr_reset_error_counters.attr, + NULL, +}; + +static struct attribute_group qat_ras_group = { + .attrs = qat_ras_attrs, + .name = "qat_ras", +}; + +void adf_sysfs_start_ras(struct adf_accel_dev *accel_dev) +{ + if (!accel_dev->ras_errors.enabled) + return; + + ADF_RAS_ERR_CTR_CLEAR(accel_dev->ras_errors); + + if (device_add_group(&GET_DEV(accel_dev), &qat_ras_group)) + dev_err(&GET_DEV(accel_dev), + "Failed to create qat_ras attribute group.\n"); +} + +void adf_sysfs_stop_ras(struct adf_accel_dev *accel_dev) +{ + if (!accel_dev->ras_errors.enabled) + return; + + device_remove_group(&GET_DEV(accel_dev), &qat_ras_group); + + ADF_RAS_ERR_CTR_CLEAR(accel_dev->ras_errors); +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.h b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.h new file mode 100644 index 000000000000..99e9d9cf57f8 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ + +#ifndef ADF_RAS_H +#define ADF_RAS_H + +#include +#include + +struct adf_accel_dev; + +void adf_sysfs_start_ras(struct adf_accel_dev *accel_dev); +void adf_sysfs_stop_ras(struct adf_accel_dev *accel_dev); + +#define ADF_RAS_ERR_CTR_READ(ras_errors, ERR) \ + atomic_read(&(ras_errors).counter[ERR]) + +#define ADF_RAS_ERR_CTR_CLEAR(ras_errors) \ + do { \ + for (int err = 0; err < ADF_RAS_ERRORS; ++err) \ + atomic_set(&(ras_errors).counter[err], 0); \ + } while (0) + +#define ADF_RAS_ERR_CTR_INC(ras_errors, ERR) \ + atomic_inc(&(ras_errors).counter[ERR]) + +#endif /* ADF_RAS_H */ -- Gitee From f49c02a7f9d21436d6ca37a2b8d2ea6b8127d76f Mon Sep 17 00:00:00 2001 From: Shashank Gupta Date: Fri, 20 Oct 2023 11:32:53 +0100 Subject: [PATCH 0711/2138] crypto: qat - count QAT GEN4 errors ANBZ: #8589 commit 99b1c9826e481c3ebe6e7d905b7a0edf853639fd upstream. Intel-SIG: commit 99b1c9826e48 crypto: qat - count QAT GEN4 errors Backport to support Intel QAT in-tree driver Add logic to count correctable, non fatal and fatal error for QAT GEN4 devices. These counters are reported through sysfs attributes in the group qat_ras. Signed-off-by: Shashank Gupta Reviewed-by: Giovanni Cabiddu Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../intel/qat/qat_common/adf_gen4_ras.c | 182 ++++++++++++++++-- 1 file changed, 166 insertions(+), 16 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c index 8ba9c9bdb89b..048c24607939 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c @@ -3,6 +3,9 @@ #include "adf_common_drv.h" #include "adf_gen4_hw_data.h" #include "adf_gen4_ras.h" +#include "adf_sysfs_ras_counters.h" + +#define BITS_PER_REG(_n_) (sizeof(_n_) * BITS_PER_BYTE) static void enable_errsou_reporting(void __iomem *csr) { @@ -355,6 +358,8 @@ static void adf_gen4_process_errsou0(struct adf_accel_dev *accel_dev, "Correctable error detected in AE: 0x%x\n", aecorrerr); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); + /* Clear interrupt from ERRSOU0 */ ADF_CSR_WR(csr, ADF_GEN4_HIAECORERRLOG_CPP0, aecorrerr); } @@ -374,6 +379,8 @@ static bool adf_handle_cpp_aeunc(struct adf_accel_dev *accel_dev, "Uncorrectable error detected in AE: 0x%x\n", aeuncorerr); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_HIAEUNCERRLOG_CPP0, aeuncorerr); return false; @@ -395,6 +402,8 @@ static bool adf_handle_cppcmdparerr(struct adf_accel_dev *accel_dev, "HI CPP agent command parity error: 0x%x\n", cmdparerr); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOG, cmdparerr); return true; @@ -413,15 +422,18 @@ static bool adf_handle_ri_mem_par_err(struct adf_accel_dev *accel_dev, rimem_parerr_sts &= ADF_GEN4_RIMEM_PARERR_STS_UNCERR_BITMASK | ADF_GEN4_RIMEM_PARERR_STS_FATAL_BITMASK; - if (rimem_parerr_sts & ADF_GEN4_RIMEM_PARERR_STS_UNCERR_BITMASK) + if (rimem_parerr_sts & ADF_GEN4_RIMEM_PARERR_STS_UNCERR_BITMASK) { dev_err(&GET_DEV(accel_dev), "RI Memory Parity uncorrectable error: 0x%x\n", rimem_parerr_sts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } if (rimem_parerr_sts & ADF_GEN4_RIMEM_PARERR_STS_FATAL_BITMASK) { dev_err(&GET_DEV(accel_dev), "RI Memory Parity fatal error: 0x%x\n", rimem_parerr_sts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); reset_required = true; } @@ -445,6 +457,7 @@ static bool adf_handle_ti_ci_par_sts(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "TI Memory Parity Error: 0x%x\n", ti_ci_par_sts); ADF_CSR_WR(csr, ADF_GEN4_TI_CI_PAR_STS, ti_ci_par_sts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); } return false; @@ -467,6 +480,8 @@ static bool adf_handle_ti_pullfub_par_sts(struct adf_accel_dev *accel_dev, ADF_CSR_WR(csr, ADF_GEN4_TI_PULL0FUB_PAR_STS, ti_pullfub_par_sts); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); } return false; @@ -487,6 +502,8 @@ static bool adf_handle_ti_pushfub_par_sts(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "TI Push Parity Error: 0x%x\n", ti_pushfub_par_sts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_TI_PUSHFUB_PAR_STS, ti_pushfub_par_sts); } @@ -509,6 +526,8 @@ static bool adf_handle_ti_cd_par_sts(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "TI CD Parity Error: 0x%x\n", ti_cd_par_sts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_TI_CD_PAR_STS, ti_cd_par_sts); } @@ -530,6 +549,8 @@ static bool adf_handle_ti_trnsb_par_sts(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "TI TRNSB Parity Error: 0x%x\n", ti_trnsb_par_sts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_TI_TRNSB_PAR_STS, ti_trnsb_par_sts); } @@ -551,6 +572,8 @@ static bool adf_handle_iosfp_cmd_parerr(struct adf_accel_dev *accel_dev, "Command Parity error detected on IOSFP: 0x%x\n", rimiscsts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN4_RIMISCSTS, rimiscsts); return true; @@ -586,6 +609,8 @@ static bool adf_handle_uerrssmsh(struct adf_accel_dev *accel_dev, "Uncorrectable error on ssm shared memory: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_UERRSSMSH, reg); return false; @@ -606,6 +631,8 @@ static bool adf_handle_cerrssmsh(struct adf_accel_dev *accel_dev, "Correctable error on ssm shared memory: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); + ADF_CSR_WR(csr, ADF_GEN4_CERRSSMSH, reg); return false; @@ -626,6 +653,8 @@ static bool adf_handle_pperr_err(struct adf_accel_dev *accel_dev, "Uncorrectable error CPP transaction on memory target: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_PPERR, reg); return false; @@ -642,6 +671,8 @@ static void adf_poll_slicehang_csr(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "Slice %s hang error encountered\n", slice_name); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); } static bool adf_handle_slice_hang_error(struct adf_accel_dev *accel_dev, @@ -682,6 +713,8 @@ static bool adf_handle_spp_pullcmd_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP pull command fatal error ATH_CPH: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_ATH_CPH, reg); reset_required = true; @@ -693,6 +726,8 @@ static bool adf_handle_spp_pullcmd_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP pull command fatal error CPR_XLT: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_CPR_XLT, reg); reset_required = true; @@ -704,6 +739,8 @@ static bool adf_handle_spp_pullcmd_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP pull command fatal error DCPR_UCS: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_DCPR_UCS, reg); reset_required = true; @@ -715,6 +752,8 @@ static bool adf_handle_spp_pullcmd_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP pull command fatal error PKE: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_PKE, reg); reset_required = true; @@ -727,6 +766,8 @@ static bool adf_handle_spp_pullcmd_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP pull command fatal error WAT_WCP: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_WAT_WCP, reg); reset_required = true; @@ -748,6 +789,8 @@ static bool adf_handle_spp_pulldata_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP pull data err ATH_CPH: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_ATH_CPH, reg); } @@ -757,6 +800,8 @@ static bool adf_handle_spp_pulldata_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP pull data err CPR_XLT: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_CPR_XLT, reg); } @@ -766,6 +811,8 @@ static bool adf_handle_spp_pulldata_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP pull data err DCPR_UCS: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_DCPR_UCS, reg); } @@ -775,6 +822,8 @@ static bool adf_handle_spp_pulldata_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP pull data err PKE: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_PKE, reg); } @@ -785,6 +834,8 @@ static bool adf_handle_spp_pulldata_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP pull data err WAT_WCP: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_WAT_WCP, reg); } } @@ -805,6 +856,8 @@ static bool adf_handle_spp_pushcmd_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP push command fatal error ATH_CPH: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_ATH_CPH, reg); reset_required = true; @@ -816,6 +869,8 @@ static bool adf_handle_spp_pushcmd_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP push command fatal error CPR_XLT: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_CPR_XLT, reg); reset_required = true; @@ -827,6 +882,8 @@ static bool adf_handle_spp_pushcmd_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP push command fatal error DCPR_UCS: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_DCPR_UCS, reg); reset_required = true; @@ -839,6 +896,8 @@ static bool adf_handle_spp_pushcmd_err(struct adf_accel_dev *accel_dev, "SPP push command fatal error PKE: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_PKE, reg); reset_required = true; @@ -851,6 +910,8 @@ static bool adf_handle_spp_pushcmd_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP push command fatal error WAT_WCP: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_WAT_WCP, reg); reset_required = true; @@ -872,6 +933,8 @@ static bool adf_handle_spp_pushdata_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP push data err ATH_CPH: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_ATH_CPH, reg); } @@ -881,6 +944,8 @@ static bool adf_handle_spp_pushdata_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP push data err CPR_XLT: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_CPR_XLT, reg); } @@ -890,6 +955,8 @@ static bool adf_handle_spp_pushdata_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP push data err DCPR_UCS: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_DCPR_UCS, reg); } @@ -899,6 +966,8 @@ static bool adf_handle_spp_pushdata_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP push data err PKE: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_PKE, reg); } @@ -909,6 +978,8 @@ static bool adf_handle_spp_pushdata_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "SPP push data err WAT_WCP: 0x%x\n", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_WAT_WCP, reg); } @@ -936,8 +1007,11 @@ static bool adf_handle_spppar_err(struct adf_accel_dev *accel_dev, static bool adf_handle_ssmcpppar_err(struct adf_accel_dev *accel_dev, void __iomem *csr, u32 iastatssm) { + u32 reg = ADF_CSR_RD(csr, ADF_GEN4_SSMCPPERR); + u32 bits_num = BITS_PER_REG(reg); bool reset_required = false; - u32 reg; + unsigned long errs_bits; + u32 bit_iterator; if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SSMCPPERR_BIT)) return false; @@ -948,12 +1022,22 @@ static bool adf_handle_ssmcpppar_err(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "Fatal SSM CPP parity error: 0x%x\n", reg); + errs_bits = reg & ADF_GEN4_SSMCPPERR_FATAL_BITMASK; + for_each_set_bit(bit_iterator, &errs_bits, bits_num) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + } reset_required = true; } - if (reg & ADF_GEN4_SSMCPPERR_UNCERR_BITMASK) + if (reg & ADF_GEN4_SSMCPPERR_UNCERR_BITMASK) { dev_err(&GET_DEV(accel_dev), "non-Fatal SSM CPP parity error: 0x%x\n", reg); + errs_bits = reg & ADF_GEN4_SSMCPPERR_UNCERR_BITMASK; + + for_each_set_bit(bit_iterator, &errs_bits, bits_num) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } + } ADF_CSR_WR(csr, ADF_GEN4_SSMCPPERR, reg); @@ -971,35 +1055,47 @@ static bool adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev, reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC); reg &= ADF_GEN4_SSMSOFTERRORPARITY_SRC_BIT; - if (reg) + if (reg) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC, reg); + } reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH); reg &= err_mask->parerr_ath_cph_mask; - if (reg) + if (reg) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH, reg); + } reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT); reg &= err_mask->parerr_cpr_xlt_mask; - if (reg) + if (reg) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT, reg); + } reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS); reg &= err_mask->parerr_dcpr_ucs_mask; - if (reg) + if (reg) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS, reg); + } reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE); reg &= err_mask->parerr_pke_mask; - if (reg) + if (reg) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE, reg); + } if (err_mask->parerr_wat_wcp_mask) { reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP); reg &= err_mask->parerr_wat_wcp_mask; - if (reg) + if (reg) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP, reg); + } } dev_err(&GET_DEV(accel_dev), "Slice ssm soft parity error reported"); @@ -1010,8 +1106,11 @@ static bool adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev, static bool adf_handle_ser_err_ssmsh(struct adf_accel_dev *accel_dev, void __iomem *csr, u32 iastatssm) { + u32 reg = ADF_CSR_RD(csr, ADF_GEN4_SER_ERR_SSMSH); + u32 bits_num = BITS_PER_REG(reg); bool reset_required = false; - u32 reg; + unsigned long errs_bits; + u32 bit_iterator; if (!(iastatssm & (ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_CERR_BIT | ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_UNCERR_BIT))) @@ -1025,17 +1124,34 @@ static bool adf_handle_ser_err_ssmsh(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "Fatal SER_SSMSH_ERR: 0x%x\n", reg); + errs_bits = reg & ADF_GEN4_SER_ERR_SSMSH_FATAL_BITMASK; + for_each_set_bit(bit_iterator, &errs_bits, bits_num) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + } + reset_required = true; } - if (reg & ADF_GEN4_SER_ERR_SSMSH_UNCERR_BITMASK) + if (reg & ADF_GEN4_SER_ERR_SSMSH_UNCERR_BITMASK) { dev_err(&GET_DEV(accel_dev), "non-fatal SER_SSMSH_ERR: 0x%x\n", reg); - if (reg & ADF_GEN4_SER_ERR_SSMSH_CERR_BITMASK) + errs_bits = reg & ADF_GEN4_SER_ERR_SSMSH_UNCERR_BITMASK; + for_each_set_bit(bit_iterator, &errs_bits, bits_num) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } + } + + if (reg & ADF_GEN4_SER_ERR_SSMSH_CERR_BITMASK) { dev_warn(&GET_DEV(accel_dev), "Correctable SER_SSMSH_ERR: 0x%x\n", reg); + errs_bits = reg & ADF_GEN4_SER_ERR_SSMSH_CERR_BITMASK; + for_each_set_bit(bit_iterator, &errs_bits, bits_num) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); + } + } + ADF_CSR_WR(csr, ADF_GEN4_SER_ERR_SSMSH, reg); return reset_required; @@ -1077,6 +1193,8 @@ static bool adf_handle_exprpssmcmpr(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "Uncorrectable error exception in SSM CMP: 0x%x", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_EXPRPSSMCPR, reg); return false; @@ -1092,14 +1210,20 @@ static bool adf_handle_exprpssmxlt(struct adf_accel_dev *accel_dev, if (!reg) return false; - if (reg & ADF_GEN4_EXPRPSSMXLT_UNCERR_BITMASK) + if (reg & ADF_GEN4_EXPRPSSMXLT_UNCERR_BITMASK) { dev_err(&GET_DEV(accel_dev), "Uncorrectable error exception in SSM XLT: 0x%x", reg); - if (reg & ADF_GEN4_EXPRPSSMXLT_CERR_BIT) + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } + + if (reg & ADF_GEN4_EXPRPSSMXLT_CERR_BIT) { dev_warn(&GET_DEV(accel_dev), "Correctable error exception in SSM XLT: 0x%x", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); + } + ADF_CSR_WR(csr, ADF_GEN4_EXPRPSSMXLT, reg); return false; @@ -1118,14 +1242,20 @@ static bool adf_handle_exprpssmdcpr(struct adf_accel_dev *accel_dev, if (!reg) continue; - if (reg & ADF_GEN4_EXPRPSSMDCPR_UNCERR_BITMASK) + if (reg & ADF_GEN4_EXPRPSSMDCPR_UNCERR_BITMASK) { dev_err(&GET_DEV(accel_dev), "Uncorrectable error exception in SSM DCMP: 0x%x", reg); - if (reg & ADF_GEN4_EXPRPSSMDCPR_CERR_BITMASK) + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } + + if (reg & ADF_GEN4_EXPRPSSMDCPR_CERR_BITMASK) { dev_warn(&GET_DEV(accel_dev), "Correctable error exception in SSM DCMP: 0x%x", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); + } + ADF_CSR_WR(csr, ADF_GEN4_EXPRPSSMDCPR(i), reg); } @@ -1161,11 +1291,13 @@ static bool adf_handle_cpp_cfc_err(struct adf_accel_dev *accel_dev, if (reg & ADF_GEN4_CPP_CFC_ERR_STATUS_DATAPAR_BIT) { dev_err(&GET_DEV(accel_dev), "CPP_CFC_ERR: data parity: 0x%x", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); } if (reg & ADF_GEN4_CPP_CFC_ERR_STATUS_CMDPAR_BIT) { dev_err(&GET_DEV(accel_dev), "CPP_CFC_ERR: command parity: 0x%x", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); reset_required = true; } @@ -1173,6 +1305,7 @@ static bool adf_handle_cpp_cfc_err(struct adf_accel_dev *accel_dev, if (reg & ADF_GEN4_CPP_CFC_ERR_STATUS_MERR_BIT) { dev_err(&GET_DEV(accel_dev), "CPP_CFC_ERR: multiple errors: 0x%x", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); reset_required = true; } @@ -1204,6 +1337,8 @@ static bool adf_handle_timiscsts(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "Fatal error in Transmit Interface: 0x%x\n", timiscsts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + return true; } @@ -1221,6 +1356,8 @@ static bool adf_handle_ricppintsts(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "RI CPP Uncorrectable Error: 0x%x\n", ricppintsts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_RICPPINTSTS, ricppintsts); return false; @@ -1240,6 +1377,8 @@ static bool adf_handle_ticppintsts(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "TI CPP Uncorrectable Error: 0x%x\n", ticppintsts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_TICPPINTSTS, ticppintsts); return false; @@ -1259,6 +1398,8 @@ static bool adf_handle_aramcerr(struct adf_accel_dev *accel_dev, dev_warn(&GET_DEV(accel_dev), "ARAM correctable error : 0x%x\n", aram_cerr); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); + aram_cerr |= ADF_GEN4_REG_ARAMCERR_EN_BITMASK; ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERR, aram_cerr); @@ -1286,10 +1427,14 @@ static bool adf_handle_aramuerr(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "ARAM multiple uncorrectable errors: 0x%x\n", aramuerr); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + reset_required = true; } else { dev_err(&GET_DEV(accel_dev), "ARAM uncorrectable error: 0x%x\n", aramuerr); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); } aramuerr |= ADF_GEN4_REG_ARAMUERR_EN_BITMASK; @@ -1319,10 +1464,13 @@ static bool adf_handle_reg_cppmemtgterr(struct adf_accel_dev *accel_dev, "Misc memory target multiple uncorrectable errors: 0x%x\n", cppmemtgterr); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + reset_required = true; } else { dev_err(&GET_DEV(accel_dev), "Misc memory target uncorrectable error: 0x%x\n", cppmemtgterr); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); } cppmemtgterr |= ADF_GEN4_REG_CPPMEMTGTERR_EN_BITMASK; @@ -1351,6 +1499,8 @@ static bool adf_handle_atufaultstatus(struct adf_accel_dev *accel_dev, "Ring Pair (%u) ATU detected fault: 0x%x\n", i, atufaultstatus); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_ATUFAULTSTATUS(i), atufaultstatus); } } -- Gitee From e384f3ee0914c599cc0d89ba0a278c7b2e7e0d4c Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Fri, 20 Oct 2023 15:49:24 +0200 Subject: [PATCH 0712/2138] crypto: qat - move admin api ANBZ: #8589 commit 8e6857f76dafba874593107f9e5c20030c5956ed upstream. Intel-SIG: commit 8e6857f76daf crypto: qat - move admin api Backport to support Intel QAT in-tree driver The admin API is growing and deserves its own include. Move it from adf_common_drv.h to adf_admin.h. Signed-off-by: Giovanni Cabiddu Reviewed-by: Damian Muszynski Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 1 + .../intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c | 1 + .../intel/qat/qat_c62x/adf_c62x_hw_data.c | 1 + .../crypto/intel/qat/qat_common/adf_admin.c | 1 + .../crypto/intel/qat/qat_common/adf_admin.h | 19 +++++++++++++++++++ .../crypto/intel/qat/qat_common/adf_clock.c | 1 + .../intel/qat/qat_common/adf_cnv_dbgfs.c | 1 + .../intel/qat/qat_common/adf_common_drv.h | 10 ---------- .../intel/qat/qat_common/adf_fw_counters.c | 1 + .../crypto/intel/qat/qat_common/adf_gen4_pm.c | 1 + .../qat/qat_common/adf_gen4_pm_debugfs.c | 1 + .../intel/qat/qat_common/adf_gen4_timer.c | 1 + .../intel/qat/qat_common/adf_heartbeat.c | 1 + .../qat/qat_common/adf_heartbeat_dbgfs.c | 1 + .../qat/qat_dh895xcc/adf_dh895xcc_hw_data.c | 1 + 15 files changed, 32 insertions(+), 10 deletions(-) create mode 100644 drivers/crypto/intel/qat/qat_common/adf_admin.h diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index 4775df841982..44b43865e714 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -2,6 +2,7 @@ /* Copyright(c) 2020 - 2021 Intel Corporation */ #include #include +#include #include #include #include diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c index 9c00c441b602..a882e0ea2279 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2014 - 2021 Intel Corporation */ #include +#include #include #include #include diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c index 355a781693eb..48cf3eb7c734 100644 --- a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2014 - 2021 Intel Corporation */ #include +#include #include #include #include diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c index 3a04e743497f..15ffda582334 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_admin.c +++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c @@ -7,6 +7,7 @@ #include #include #include "adf_accel_devices.h" +#include "adf_admin.h" #include "adf_common_drv.h" #include "adf_cfg.h" #include "adf_heartbeat.h" diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.h b/drivers/crypto/intel/qat/qat_common/adf_admin.h new file mode 100644 index 000000000000..03507ec3a51d --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_admin.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ +#ifndef ADF_ADMIN +#define ADF_ADMIN + +struct adf_accel_dev; + +int adf_init_admin_comms(struct adf_accel_dev *accel_dev); +void adf_exit_admin_comms(struct adf_accel_dev *accel_dev); +int adf_send_admin_init(struct adf_accel_dev *accel_dev); +int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u64 *resps); +int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay); +int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt); +int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks); +int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp); +int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr, size_t buff_size); +int adf_get_cnv_stats(struct adf_accel_dev *accel_dev, u16 ae, u16 *err_cnt, u16 *latest_err); + +#endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_clock.c b/drivers/crypto/intel/qat/qat_common/adf_clock.c index eae44969dc84..cf89f57de2a7 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_clock.c +++ b/drivers/crypto/intel/qat/qat_common/adf_clock.c @@ -10,6 +10,7 @@ #include #include #include +#include "adf_admin.h" #include "adf_accel_devices.h" #include "adf_clock.h" #include "adf_common_drv.h" diff --git a/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c index aa5b6ff1dfb4..07119c487da0 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c @@ -6,6 +6,7 @@ #include #include "adf_accel_devices.h" +#include "adf_admin.h" #include "adf_common_drv.h" #include "adf_cnv_dbgfs.h" #include "qat_compression.h" diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index d9342634f9c1..f06188033a93 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -87,16 +87,6 @@ void adf_reset_flr(struct adf_accel_dev *accel_dev); void adf_dev_restore(struct adf_accel_dev *accel_dev); int adf_init_aer(void); void adf_exit_aer(void); -int adf_init_admin_comms(struct adf_accel_dev *accel_dev); -void adf_exit_admin_comms(struct adf_accel_dev *accel_dev); -int adf_send_admin_init(struct adf_accel_dev *accel_dev); -int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u64 *resps); -int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay); -int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt); -int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks); -int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp); -int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr, size_t buff_size); -int adf_get_cnv_stats(struct adf_accel_dev *accel_dev, u16 ae, u16 *err_cnt, u16 *latest_err); int adf_init_arb(struct adf_accel_dev *accel_dev); void adf_exit_arb(struct adf_accel_dev *accel_dev); void adf_update_ring_arb(struct adf_etr_ring_data *ring); diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c b/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c index 6abe4736eab8..98fb7ccfed9f 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c +++ b/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c @@ -9,6 +9,7 @@ #include #include "adf_accel_devices.h" +#include "adf_admin.h" #include "adf_common_drv.h" #include "adf_fw_counters.h" diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c index c663d3a20c5b..5dafd9a270db 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c @@ -5,6 +5,7 @@ #include #include "adf_accel_devices.h" +#include "adf_admin.h" #include "adf_common_drv.h" #include "adf_gen4_pm.h" #include "adf_cfg_strings.h" diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c index 5114759287c6..ee0b5079de3e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c @@ -6,6 +6,7 @@ #include #include "adf_accel_devices.h" +#include "adf_admin.h" #include "adf_common_drv.h" #include "adf_gen4_pm.h" #include "icp_qat_fw_init_admin.h" diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c index 646c57922fcd..35ccb91d6ec1 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c @@ -9,6 +9,7 @@ #include #include +#include "adf_admin.h" #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_gen4_timer.h" diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c index beef9a5f6c75..13f48d2f6da8 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c @@ -12,6 +12,7 @@ #include #include #include "adf_accel_devices.h" +#include "adf_admin.h" #include "adf_cfg.h" #include "adf_cfg_strings.h" #include "adf_clock.h" diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c index 803cbfd838f0..2661af6a2ef6 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c @@ -8,6 +8,7 @@ #include #include #include +#include "adf_admin.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_heartbeat.h" diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c index 0e40897cc983..ac04662ca806 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c +++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2014 - 2021 Intel Corporation */ #include +#include #include #include #include -- Gitee From 75b83bb02f576979acae43ad0731147a4747cefb Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 20 Oct 2023 15:49:25 +0200 Subject: [PATCH 0713/2138] units: Add BYTES_PER_*BIT ANBZ: #8589 commit e8eed5f7366f1f5decb694168bd06fb59ef6b12c upstream. Intel-SIG: commit e8eed5f7366f units: Add BYTES_PER_*BIT Backport to support Intel QAT in-tree driver There is going to be a new user of the BYTES_PER_[K/M/G]BIT definition besides possibly existing ones. Add them to the header. Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- include/linux/units.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/linux/units.h b/include/linux/units.h index 2793a41e73a2..ff1bd6b5f5b3 100644 --- a/include/linux/units.h +++ b/include/linux/units.h @@ -31,6 +31,10 @@ #define MICROWATT_PER_MILLIWATT 1000UL #define MICROWATT_PER_WATT 1000000UL +#define BYTES_PER_KBIT (KILO / BITS_PER_BYTE) +#define BYTES_PER_MBIT (MEGA / BITS_PER_BYTE) +#define BYTES_PER_GBIT (GIGA / BITS_PER_BYTE) + #define ABSOLUTE_ZERO_MILLICELSIUS -273150 static inline long milli_kelvin_to_millicelsius(long t) -- Gitee From e7121a646dd03717666de1730aed981cd18b93ec Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 20 Oct 2023 15:49:26 +0200 Subject: [PATCH 0714/2138] crypto: qat - add bits.h to icp_qat_hw.h ANBZ: #8589 commit 02e7f67c47269135f41650ac1b693034e3e8f507 upstream. Intel-SIG: commit 02e7f67c4726 crypto: qat - add bits.h to icp_qat_hw.h Backport to support Intel QAT in-tree driver Some enums use the macro BIT. Include bits.h as it is missing. Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/icp_qat_hw.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h index 0c8883e2ccc6..eb2ef225bcee 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h @@ -3,6 +3,8 @@ #ifndef _ICP_QAT_HW_H_ #define _ICP_QAT_HW_H_ +#include + enum icp_qat_hw_ae_id { ICP_QAT_HW_AE_0 = 0, ICP_QAT_HW_AE_1 = 1, -- Gitee From 001a93b06df1ec6b398067855e083d2a020b0ebb Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 20 Oct 2023 15:49:27 +0200 Subject: [PATCH 0715/2138] crypto: qat - add retrieval of fw capabilities ANBZ: #8589 commit c7fd53796dbd09c3ef55032925bc7f8f238f9405 upstream. Intel-SIG: commit c7fd53796dbd crypto: qat - add retrieval of fw capabilities Backport to support Intel QAT in-tree driver The QAT firmware provides a mechanism to retrieve its capabilities through the init admin interface. Add logic to retrieve the firmware capability mask from the firmware through the init/admin channel. This mask reports if the power management, telemetry and rate limiting features are supported. The fw capabilities are stored in the accel_dev structure and are used to detect if a certain feature is supported by the firmware loaded in the device. This is supported only by devices which have an admin AE. Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../intel/qat/qat_common/adf_accel_devices.h | 1 + .../crypto/intel/qat/qat_common/adf_admin.c | 23 +++++++++++++++++++ .../qat/qat_common/icp_qat_fw_init_admin.h | 3 +++ 3 files changed, 27 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 1c11d90bd9f3..908959288ce5 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -253,6 +253,7 @@ struct adf_hw_device_data { u32 straps; u32 accel_capabilities_mask; u32 extended_dc_capabilities; + u16 fw_capabilities; u32 clock_frequency; u32 instance_id; u16 accel_mask; diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c index 15ffda582334..50e054ba2c33 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_admin.c +++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c @@ -310,6 +310,26 @@ static bool is_dcc_enabled(struct adf_accel_dev *accel_dev) return !strcmp(services, "dcc"); } +static int adf_get_fw_capabilities(struct adf_accel_dev *accel_dev, u16 *caps) +{ + u32 ae_mask = accel_dev->hw_device->admin_ae_mask; + struct icp_qat_fw_init_admin_resp resp = { }; + struct icp_qat_fw_init_admin_req req = { }; + int ret; + + if (!ae_mask) + return 0; + + req.cmd_id = ICP_QAT_FW_CAPABILITIES_GET; + ret = adf_send_admin(accel_dev, &req, &resp, ae_mask); + if (ret) + return ret; + + *caps = resp.fw_capabilities; + + return 0; +} + /** * adf_send_admin_init() - Function sends init message to FW * @accel_dev: Pointer to acceleration device. @@ -320,6 +340,7 @@ static bool is_dcc_enabled(struct adf_accel_dev *accel_dev) */ int adf_send_admin_init(struct adf_accel_dev *accel_dev) { + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); u32 dc_capabilities = 0; int ret; @@ -340,6 +361,8 @@ int adf_send_admin_init(struct adf_accel_dev *accel_dev) } accel_dev->hw_device->extended_dc_capabilities = dc_capabilities; + adf_get_fw_capabilities(accel_dev, &hw_data->fw_capabilities); + return adf_init_ae(accel_dev); } EXPORT_SYMBOL_GPL(adf_send_admin_init); diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h index 9e5ce419d875..e4de9a30e0bd 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h @@ -16,6 +16,7 @@ enum icp_qat_fw_init_admin_cmd_id { ICP_QAT_FW_HEARTBEAT_SYNC = 7, ICP_QAT_FW_HEARTBEAT_GET = 8, ICP_QAT_FW_COMP_CAPABILITY_GET = 9, + ICP_QAT_FW_CRYPTO_CAPABILITY_GET = 10, ICP_QAT_FW_DC_CHAIN_INIT = 11, ICP_QAT_FW_HEARTBEAT_TIMER_SET = 13, ICP_QAT_FW_TIMER_GET = 19, @@ -109,10 +110,12 @@ struct icp_qat_fw_init_admin_resp { __u32 unsuccessful_count; __u64 resrvd8; }; + __u16 fw_capabilities; }; } __packed; #define ICP_QAT_FW_SYNC ICP_QAT_FW_HEARTBEAT_SYNC +#define ICP_QAT_FW_CAPABILITIES_GET ICP_QAT_FW_CRYPTO_CAPABILITY_GET #define ICP_QAT_NUMBER_OF_PM_EVENTS 8 -- Gitee From a799c6b3a3a70d6fff12b4874c4ac75a0b375c43 Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 20 Oct 2023 15:49:28 +0200 Subject: [PATCH 0716/2138] crypto: qat - add rate limiting feature to qat_4xxx ANBZ: #8589 commit d9fb8408376e70a903d06ac86e42e0d0f44a5785 upstream. Intel-SIG: commit d9fb8408376e crypto: qat - add rate limiting feature to qat_4xxx Backport to support Intel QAT in-tree driver The Rate Limiting (RL) feature allows to control the rate of requests that can be submitted on a ring pair (RP). This allows sharing a QAT device among multiple users while ensuring a guaranteed throughput. The driver provides a mechanism that allows users to set policies, that are programmed to the device. The device is then enforcing those policies. Configuration of RL is accomplished through entities called SLAs (Service Level Agreement). Each SLA object gets a unique identifier and defines the limitations for a single service across up to four ring pairs (RPs count allocated to a single VF). The rate is determined using two fields: * CIR (Committed Information Rate), i.e., the guaranteed rate. * PIR (Peak Information Rate), i.e., the maximum rate achievable when the device has available resources. The rate values are expressed in permille scale i.e. 0-1000. Ring pair selection is achieved by providing a 64-bit mask, where each bit corresponds to one of the ring pairs. This adds an interface and logic that allow to add, update, retrieve and remove an SLA. Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 20 + .../intel/qat/qat_4xxx/adf_4xxx_hw_data.h | 13 +- drivers/crypto/intel/qat/qat_common/Makefile | 2 + .../intel/qat/qat_common/adf_accel_devices.h | 3 + .../crypto/intel/qat/qat_common/adf_admin.c | 47 + .../crypto/intel/qat/qat_common/adf_admin.h | 8 + .../intel/qat/qat_common/adf_gen4_hw_data.h | 7 + .../crypto/intel/qat/qat_common/adf_init.c | 10 + drivers/crypto/intel/qat/qat_common/adf_rl.c | 1159 +++++++++++++++++ drivers/crypto/intel/qat/qat_common/adf_rl.h | 169 +++ .../intel/qat/qat_common/adf_rl_admin.c | 97 ++ .../intel/qat/qat_common/adf_rl_admin.h | 18 + .../qat/qat_common/icp_qat_fw_init_admin.h | 38 + 13 files changed, 1590 insertions(+), 1 deletion(-) create mode 100644 drivers/crypto/intel/qat/qat_common/adf_rl.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_rl.h create mode 100644 drivers/crypto/intel/qat/qat_common/adf_rl_admin.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_rl_admin.h diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index 44b43865e714..b4be33346cbd 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -320,6 +320,24 @@ static u32 get_heartbeat_clock(struct adf_hw_device_data *self) return ADF_4XXX_KPT_COUNTER_FREQ; } +static void adf_init_rl_data(struct adf_rl_hw_data *rl_data) +{ + rl_data->pciout_tb_offset = ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET; + rl_data->pciin_tb_offset = ADF_GEN4_RL_TOKEN_PCIEIN_BUCKET_OFFSET; + rl_data->r2l_offset = ADF_GEN4_RL_R2L_OFFSET; + rl_data->l2c_offset = ADF_GEN4_RL_L2C_OFFSET; + rl_data->c2s_offset = ADF_GEN4_RL_C2S_OFFSET; + + rl_data->pcie_scale_div = ADF_4XXX_RL_PCIE_SCALE_FACTOR_DIV; + rl_data->pcie_scale_mul = ADF_4XXX_RL_PCIE_SCALE_FACTOR_MUL; + rl_data->dcpr_correction = ADF_4XXX_RL_DCPR_CORRECTION; + rl_data->max_tp[ADF_SVC_ASYM] = ADF_4XXX_RL_MAX_TP_ASYM; + rl_data->max_tp[ADF_SVC_SYM] = ADF_4XXX_RL_MAX_TP_SYM; + rl_data->max_tp[ADF_SVC_DC] = ADF_4XXX_RL_MAX_TP_DC; + rl_data->scan_interval = ADF_4XXX_RL_SCANS_PER_SEC; + rl_data->scale_ref = ADF_4XXX_RL_SLICE_REF; +} + static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) { struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR]; @@ -579,12 +597,14 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->stop_timer = adf_gen4_timer_stop; hw_data->get_hb_clock = get_heartbeat_clock; hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; + hw_data->clock_frequency = ADF_4XXX_AE_FREQ; adf_gen4_set_err_mask(&hw_data->dev_err_mask); adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops); adf_gen4_init_dc_ops(&hw_data->dc_ops); adf_gen4_init_ras_ops(&hw_data->ras_ops); + adf_init_rl_data(&hw_data->rl_data); } void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data) diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h index efd5dadc19ed..33423295e90f 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h @@ -82,8 +82,19 @@ #define ADF_402XX_ASYM_OBJ "qat_402xx_asym.bin" #define ADF_402XX_ADMIN_OBJ "qat_402xx_admin.bin" +/* RL constants */ +#define ADF_4XXX_RL_PCIE_SCALE_FACTOR_DIV 100 +#define ADF_4XXX_RL_PCIE_SCALE_FACTOR_MUL 102 +#define ADF_4XXX_RL_DCPR_CORRECTION 1 +#define ADF_4XXX_RL_SCANS_PER_SEC 954 +#define ADF_4XXX_RL_MAX_TP_ASYM 173750UL +#define ADF_4XXX_RL_MAX_TP_SYM 95000UL +#define ADF_4XXX_RL_MAX_TP_DC 45000UL +#define ADF_4XXX_RL_SLICE_REF 1000UL + /* Clocks frequency */ -#define ADF_4XXX_KPT_COUNTER_FREQ (100 * HZ_PER_MHZ) +#define ADF_4XXX_KPT_COUNTER_FREQ (100 * HZ_PER_MHZ) +#define ADF_4XXX_AE_FREQ (1000 * HZ_PER_MHZ) /* qat_4xxx fuse bits are different from old GENs, redefine them */ enum icp_qat_4xxx_slice_mask { diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index fcf74c0dc534..5f09dfd4798b 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -29,6 +29,8 @@ intel_qat-objs := adf_cfg.o \ qat_algs.o \ qat_asym_algs.o \ qat_algs_send.o \ + adf_rl.o \ + adf_rl_admin.o \ qat_uclo.o \ qat_hal.o \ qat_bl.o diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 908959288ce5..30c2b15ff801 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -9,6 +9,7 @@ #include #include #include "adf_cfg_common.h" +#include "adf_rl.h" #include "adf_pfvf_msg.h" #define ADF_DH895XCC_DEVICE_NAME "dh895xcc" @@ -247,6 +248,7 @@ struct adf_hw_device_data { struct adf_dc_ops dc_ops; struct adf_ras_ops ras_ops; struct adf_dev_err_mask dev_err_mask; + struct adf_rl_hw_data rl_data; const char *fw_name; const char *fw_mmp_name; u32 fuses; @@ -358,6 +360,7 @@ struct adf_accel_dev { struct adf_accel_pci accel_pci_dev; struct adf_timer *timer; struct adf_heartbeat *heartbeat; + struct adf_rl *rate_limiting; union { struct { /* protects VF2PF interrupts access */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c index 50e054ba2c33..54b673ec2362 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_admin.c +++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c @@ -330,6 +330,53 @@ static int adf_get_fw_capabilities(struct adf_accel_dev *accel_dev, u16 *caps) return 0; } +int adf_send_admin_rl_init(struct adf_accel_dev *accel_dev, + struct icp_qat_fw_init_admin_slice_cnt *slices) +{ + u32 ae_mask = accel_dev->hw_device->admin_ae_mask; + struct icp_qat_fw_init_admin_resp resp = { }; + struct icp_qat_fw_init_admin_req req = { }; + int ret; + + req.cmd_id = ICP_QAT_FW_RL_INIT; + + ret = adf_send_admin(accel_dev, &req, &resp, ae_mask); + if (ret) + return ret; + + memcpy(slices, &resp.slices, sizeof(*slices)); + + return 0; +} + +int adf_send_admin_rl_add_update(struct adf_accel_dev *accel_dev, + struct icp_qat_fw_init_admin_req *req) +{ + u32 ae_mask = accel_dev->hw_device->admin_ae_mask; + struct icp_qat_fw_init_admin_resp resp = { }; + + /* + * req struct filled in rl implementation. Used commands + * ICP_QAT_FW_RL_ADD for a new SLA + * ICP_QAT_FW_RL_UPDATE for update SLA + */ + return adf_send_admin(accel_dev, req, &resp, ae_mask); +} + +int adf_send_admin_rl_delete(struct adf_accel_dev *accel_dev, u16 node_id, + u8 node_type) +{ + u32 ae_mask = accel_dev->hw_device->admin_ae_mask; + struct icp_qat_fw_init_admin_resp resp = { }; + struct icp_qat_fw_init_admin_req req = { }; + + req.cmd_id = ICP_QAT_FW_RL_REMOVE; + req.node_id = node_id; + req.node_type = node_type; + + return adf_send_admin(accel_dev, &req, &resp, ae_mask); +} + /** * adf_send_admin_init() - Function sends init message to FW * @accel_dev: Pointer to acceleration device. diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.h b/drivers/crypto/intel/qat/qat_common/adf_admin.h index 03507ec3a51d..55cbcbc66c9f 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_admin.h +++ b/drivers/crypto/intel/qat/qat_common/adf_admin.h @@ -3,6 +3,8 @@ #ifndef ADF_ADMIN #define ADF_ADMIN +#include "icp_qat_fw_init_admin.h" + struct adf_accel_dev; int adf_init_admin_comms(struct adf_accel_dev *accel_dev); @@ -12,6 +14,12 @@ int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay); int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt); int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks); +int adf_send_admin_rl_init(struct adf_accel_dev *accel_dev, + struct icp_qat_fw_init_admin_slice_cnt *slices); +int adf_send_admin_rl_add_update(struct adf_accel_dev *accel_dev, + struct icp_qat_fw_init_admin_req *req); +int adf_send_admin_rl_delete(struct adf_accel_dev *accel_dev, u16 node_id, + u8 node_type); int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp); int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr, size_t buff_size); int adf_get_cnv_stats(struct adf_accel_dev *accel_dev, u16 ae, u16 *err_cnt, u16 *latest_err); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h index 02d7a019ebf8..1813fe1d5a06 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h @@ -139,6 +139,13 @@ do { \ /* Number of heartbeat counter pairs */ #define ADF_NUM_HB_CNT_PER_AE ADF_NUM_THREADS_PER_AE +/* Rate Limiting */ +#define ADF_GEN4_RL_R2L_OFFSET 0x508000 +#define ADF_GEN4_RL_L2C_OFFSET 0x509000 +#define ADF_GEN4_RL_C2S_OFFSET 0x508818 +#define ADF_GEN4_RL_TOKEN_PCIEIN_BUCKET_OFFSET 0x508800 +#define ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET 0x508804 + void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number); diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c index ef51c4d028d2..81c39f3d07e1 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_init.c +++ b/drivers/crypto/intel/qat/qat_common/adf_init.c @@ -9,6 +9,7 @@ #include "adf_common_drv.h" #include "adf_dbgfs.h" #include "adf_heartbeat.h" +#include "adf_rl.h" #include "adf_sysfs_ras_counters.h" static LIST_HEAD(service_table); @@ -137,6 +138,9 @@ static int adf_dev_init(struct adf_accel_dev *accel_dev) } adf_heartbeat_init(accel_dev); + ret = adf_rl_init(accel_dev); + if (ret && ret != -EOPNOTSUPP) + return ret; /* * Subservice initialisation is divided into two stages: init and start. @@ -212,6 +216,9 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev) } adf_heartbeat_start(accel_dev); + ret = adf_rl_start(accel_dev); + if (ret && ret != -EOPNOTSUPP) + return ret; list_for_each_entry(service, &service_table, list) { if (service->event_hld(accel_dev, ADF_EVENT_START)) { @@ -272,6 +279,7 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev) !test_bit(ADF_STATUS_STARTING, &accel_dev->status)) return; + adf_rl_stop(accel_dev); adf_dbgfs_rm(accel_dev); adf_sysfs_stop_ras(accel_dev); @@ -359,6 +367,8 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev) clear_bit(accel_dev->accel_id, service->init_status); } + adf_rl_exit(accel_dev); + if (hw_data->ras_ops.disable_ras_errors) hw_data->ras_ops.disable_ras_errors(accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c new file mode 100644 index 000000000000..88a03105b52a --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c @@ -0,0 +1,1159 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ + +#define dev_fmt(fmt) "RateLimiting: " fmt + +#include +#include + +#include +#include +#include +#include +#include + +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "adf_rl_admin.h" +#include "adf_rl.h" + +#define RL_TOKEN_GRANULARITY_PCIEIN_BUCKET 0U +#define RL_TOKEN_GRANULARITY_PCIEOUT_BUCKET 0U +#define RL_TOKEN_PCIE_SIZE 64 +#define RL_TOKEN_ASYM_SIZE 1024 +#define RL_CSR_SIZE 4U +#define RL_CAPABILITY_MASK GENMASK(6, 4) +#define RL_CAPABILITY_VALUE 0x70 +#define RL_VALIDATE_NON_ZERO(input) ((input) == 0) +#define ROOT_MASK GENMASK(1, 0) +#define CLUSTER_MASK GENMASK(3, 0) +#define LEAF_MASK GENMASK(5, 0) + +static int validate_user_input(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in, + bool is_update) +{ + const unsigned long rp_mask = sla_in->rp_mask; + size_t rp_mask_size; + int i, cnt; + + if (sla_in->pir < sla_in->cir) { + dev_notice(&GET_DEV(accel_dev), + "PIR must be >= CIR, setting PIR to CIR\n"); + sla_in->pir = sla_in->cir; + } + + if (!is_update) { + cnt = 0; + rp_mask_size = sizeof(sla_in->rp_mask) * BITS_PER_BYTE; + for_each_set_bit(i, &rp_mask, rp_mask_size) { + if (++cnt > RL_RP_CNT_PER_LEAF_MAX) { + dev_notice(&GET_DEV(accel_dev), + "Too many ring pairs selected for this SLA\n"); + return -EINVAL; + } + } + + if (sla_in->srv >= ADF_SVC_NONE) { + dev_notice(&GET_DEV(accel_dev), + "Wrong service type\n"); + return -EINVAL; + } + + if (sla_in->type > RL_LEAF) { + dev_notice(&GET_DEV(accel_dev), + "Wrong node type\n"); + return -EINVAL; + } + + if (sla_in->parent_id < RL_PARENT_DEFAULT_ID || + sla_in->parent_id >= RL_NODES_CNT_MAX) { + dev_notice(&GET_DEV(accel_dev), + "Wrong parent ID\n"); + return -EINVAL; + } + } + + return 0; +} + +static int validate_sla_id(struct adf_accel_dev *accel_dev, int sla_id) +{ + struct rl_sla *sla; + + if (sla_id <= RL_SLA_EMPTY_ID || sla_id >= RL_NODES_CNT_MAX) { + dev_notice(&GET_DEV(accel_dev), "Provided ID is out of bounds\n"); + return -EINVAL; + } + + sla = accel_dev->rate_limiting->sla[sla_id]; + + if (!sla) { + dev_notice(&GET_DEV(accel_dev), "SLA with provided ID does not exist\n"); + return -EINVAL; + } + + if (sla->type != RL_LEAF) { + dev_notice(&GET_DEV(accel_dev), "This ID is reserved for internal use\n"); + return -EINVAL; + } + + return 0; +} + +/** + * find_parent() - Find the parent for a new SLA + * @rl_data: pointer to ratelimiting data + * @sla_in: pointer to user input data for a new SLA + * + * Function returns a pointer to the parent SLA. If the parent ID is provided + * as input in the user data, then such ID is validated and the parent SLA + * is returned. + * Otherwise, it returns the default parent SLA (root or cluster) for + * the new object. + * + * Return: + * * Pointer to the parent SLA object + * * NULL - when parent cannot be found + */ +static struct rl_sla *find_parent(struct adf_rl *rl_data, + struct adf_rl_sla_input_data *sla_in) +{ + int input_parent_id = sla_in->parent_id; + struct rl_sla *root = NULL; + struct rl_sla *parent_sla; + int i; + + if (sla_in->type == RL_ROOT) + return NULL; + + if (input_parent_id > RL_PARENT_DEFAULT_ID) { + parent_sla = rl_data->sla[input_parent_id]; + /* + * SLA can be a parent if it has the same service as the child + * and its type is higher in the hierarchy, + * for example the parent type of a LEAF must be a CLUSTER. + */ + if (parent_sla && parent_sla->srv == sla_in->srv && + parent_sla->type == sla_in->type - 1) + return parent_sla; + + return NULL; + } + + /* If input_parent_id is not valid, get root for this service type. */ + for (i = 0; i < RL_ROOT_MAX; i++) { + if (rl_data->root[i] && rl_data->root[i]->srv == sla_in->srv) { + root = rl_data->root[i]; + break; + } + } + + if (!root) + return NULL; + + /* + * If the type of this SLA is cluster, then return the root. + * Otherwise, find the default (i.e. first) cluster for this service. + */ + if (sla_in->type == RL_CLUSTER) + return root; + + for (i = 0; i < RL_CLUSTER_MAX; i++) { + if (rl_data->cluster[i] && rl_data->cluster[i]->parent == root) + return rl_data->cluster[i]; + } + + return NULL; +} + +static enum adf_cfg_service_type srv_to_cfg_svc_type(enum adf_base_services rl_srv) +{ + switch (rl_srv) { + case ADF_SVC_ASYM: + return ASYM; + case ADF_SVC_SYM: + return SYM; + case ADF_SVC_DC: + return COMP; + default: + return UNUSED; + } +} + +/** + * get_sla_arr_of_type() - Returns a pointer to SLA type specific array + * @rl_data: pointer to ratelimiting data + * @type: SLA type + * @sla_arr: pointer to variable where requested pointer will be stored + * + * Return: Max number of elements allowed for the returned array + */ +static u32 get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type, + struct rl_sla ***sla_arr) +{ + switch (type) { + case RL_LEAF: + *sla_arr = rl_data->leaf; + return RL_LEAF_MAX; + case RL_CLUSTER: + *sla_arr = rl_data->cluster; + return RL_CLUSTER_MAX; + case RL_ROOT: + *sla_arr = rl_data->root; + return RL_ROOT_MAX; + default: + *sla_arr = NULL; + return 0; + } +} + +static bool is_service_enabled(struct adf_accel_dev *accel_dev, + enum adf_base_services rl_srv) +{ + enum adf_cfg_service_type arb_srv = srv_to_cfg_svc_type(rl_srv); + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + u8 rps_per_bundle = hw_data->num_banks_per_vf; + int i; + + for (i = 0; i < rps_per_bundle; i++) { + if (GET_SRV_TYPE(accel_dev, i) == arb_srv) + return true; + } + + return false; +} + +/** + * prepare_rp_ids() - Creates an array of ring pair IDs from bitmask + * @accel_dev: pointer to acceleration device structure + * @sla: SLA object data where result will be written + * @rp_mask: bitmask of ring pair IDs + * + * Function tries to convert provided bitmap to an array of IDs. It checks if + * RPs aren't in use, are assigned to SLA service or if a number of provided + * IDs is not too big. If successful, writes the result into the field + * sla->ring_pairs_cnt. + * + * Return: + * * 0 - ok + * * -EINVAL - ring pairs array cannot be created from provided mask + */ +static int prepare_rp_ids(struct adf_accel_dev *accel_dev, struct rl_sla *sla, + const unsigned long rp_mask) +{ + enum adf_cfg_service_type arb_srv = srv_to_cfg_svc_type(sla->srv); + u16 rps_per_bundle = GET_HW_DATA(accel_dev)->num_banks_per_vf; + bool *rp_in_use = accel_dev->rate_limiting->rp_in_use; + size_t rp_cnt_max = ARRAY_SIZE(sla->ring_pairs_ids); + u16 rp_id_max = GET_HW_DATA(accel_dev)->num_banks; + u16 cnt = 0; + u16 rp_id; + + for_each_set_bit(rp_id, &rp_mask, rp_id_max) { + if (cnt >= rp_cnt_max) { + dev_notice(&GET_DEV(accel_dev), + "Assigned more ring pairs than supported"); + return -EINVAL; + } + + if (rp_in_use[rp_id]) { + dev_notice(&GET_DEV(accel_dev), + "RP %u already assigned to other SLA", rp_id); + return -EINVAL; + } + + if (GET_SRV_TYPE(accel_dev, rp_id % rps_per_bundle) != arb_srv) { + dev_notice(&GET_DEV(accel_dev), + "RP %u does not support SLA service", rp_id); + return -EINVAL; + } + + sla->ring_pairs_ids[cnt++] = rp_id; + } + + sla->ring_pairs_cnt = cnt; + + return 0; +} + +static void mark_rps_usage(struct rl_sla *sla, bool *rp_in_use, bool used) +{ + u16 rp_id; + int i; + + for (i = 0; i < sla->ring_pairs_cnt; i++) { + rp_id = sla->ring_pairs_ids[i]; + rp_in_use[rp_id] = used; + } +} + +static void assign_rps_to_leaf(struct adf_accel_dev *accel_dev, + struct rl_sla *sla, bool clear) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); + u32 base_offset = hw_data->rl_data.r2l_offset; + u32 node_id = clear ? 0U : (sla->node_id & LEAF_MASK); + u32 offset; + int i; + + for (i = 0; i < sla->ring_pairs_cnt; i++) { + offset = base_offset + (RL_CSR_SIZE * sla->ring_pairs_ids[i]); + ADF_CSR_WR(pmisc_addr, offset, node_id); + } +} + +static void assign_leaf_to_cluster(struct adf_accel_dev *accel_dev, + struct rl_sla *sla, bool clear) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); + u32 base_offset = hw_data->rl_data.l2c_offset; + u32 node_id = sla->node_id & LEAF_MASK; + u32 parent_id = clear ? 0U : (sla->parent->node_id & CLUSTER_MASK); + u32 offset; + + offset = base_offset + (RL_CSR_SIZE * node_id); + ADF_CSR_WR(pmisc_addr, offset, parent_id); +} + +static void assign_cluster_to_root(struct adf_accel_dev *accel_dev, + struct rl_sla *sla, bool clear) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); + u32 base_offset = hw_data->rl_data.c2s_offset; + u32 node_id = sla->node_id & CLUSTER_MASK; + u32 parent_id = clear ? 0U : (sla->parent->node_id & ROOT_MASK); + u32 offset; + + offset = base_offset + (RL_CSR_SIZE * node_id); + ADF_CSR_WR(pmisc_addr, offset, parent_id); +} + +static void assign_node_to_parent(struct adf_accel_dev *accel_dev, + struct rl_sla *sla, bool clear_assignment) +{ + switch (sla->type) { + case RL_LEAF: + assign_rps_to_leaf(accel_dev, sla, clear_assignment); + assign_leaf_to_cluster(accel_dev, sla, clear_assignment); + break; + case RL_CLUSTER: + assign_cluster_to_root(accel_dev, sla, clear_assignment); + break; + default: + break; + } +} + +/** + * can_parent_afford_sla() - Verifies if parent allows to create an SLA + * @sla_in: pointer to user input data for a new SLA + * @sla_parent: pointer to parent SLA object + * @sla_cir: current child CIR value (only for update) + * @is_update: request is a update + * + * Algorithm verifies if parent has enough remaining budget to take assignment + * of a child with provided parameters. In update case current CIR value must be + * returned to budget first. + * PIR value cannot exceed the PIR assigned to parent. + * + * Return: + * * true - SLA can be created + * * false - SLA cannot be created + */ +static bool can_parent_afford_sla(struct adf_rl_sla_input_data *sla_in, + struct rl_sla *sla_parent, u32 sla_cir, + bool is_update) +{ + u32 rem_cir = sla_parent->rem_cir; + + if (is_update) + rem_cir += sla_cir; + + if (sla_in->cir > rem_cir || sla_in->pir > sla_parent->pir) + return false; + + return true; +} + +/** + * can_node_afford_update() - Verifies if SLA can be updated with input data + * @sla_in: pointer to user input data for a new SLA + * @sla: pointer to SLA object selected for update + * + * Algorithm verifies if a new CIR value is big enough to satisfy currently + * assigned child SLAs and if PIR can be updated + * + * Return: + * * true - SLA can be updated + * * false - SLA cannot be updated + */ +static bool can_node_afford_update(struct adf_rl_sla_input_data *sla_in, + struct rl_sla *sla) +{ + u32 cir_in_use = sla->cir - sla->rem_cir; + + /* new CIR cannot be smaller then currently consumed value */ + if (cir_in_use > sla_in->cir) + return false; + + /* PIR of root/cluster cannot be reduced in node with assigned children */ + if (sla_in->pir < sla->pir && sla->type != RL_LEAF && cir_in_use > 0) + return false; + + return true; +} + +static bool is_enough_budget(struct adf_rl *rl_data, struct rl_sla *sla, + struct adf_rl_sla_input_data *sla_in, + bool is_update) +{ + u32 max_val = rl_data->device_data->scale_ref; + struct rl_sla *parent = sla->parent; + bool ret = true; + + if (sla_in->cir > max_val || sla_in->pir > max_val) + ret = false; + + switch (sla->type) { + case RL_LEAF: + ret &= can_parent_afford_sla(sla_in, parent, sla->cir, + is_update); + break; + case RL_CLUSTER: + ret &= can_parent_afford_sla(sla_in, parent, sla->cir, + is_update); + + if (is_update) + ret &= can_node_afford_update(sla_in, sla); + + break; + case RL_ROOT: + if (is_update) + ret &= can_node_afford_update(sla_in, sla); + + break; + default: + ret = false; + break; + } + + return ret; +} + +static void update_budget(struct rl_sla *sla, u32 old_cir, bool is_update) +{ + switch (sla->type) { + case RL_LEAF: + if (is_update) + sla->parent->rem_cir += old_cir; + + sla->parent->rem_cir -= sla->cir; + sla->rem_cir = 0; + break; + case RL_CLUSTER: + if (is_update) { + sla->parent->rem_cir += old_cir; + sla->rem_cir = sla->cir - (old_cir - sla->rem_cir); + } else { + sla->rem_cir = sla->cir; + } + + sla->parent->rem_cir -= sla->cir; + break; + case RL_ROOT: + if (is_update) + sla->rem_cir = sla->cir - (old_cir - sla->rem_cir); + else + sla->rem_cir = sla->cir; + break; + default: + break; + } +} + +/** + * get_next_free_sla_id() - finds next free ID in the SLA array + * @rl_data: Pointer to ratelimiting data structure + * + * Return: + * * 0 : RL_NODES_CNT_MAX - correct ID + * * -ENOSPC - all SLA slots are in use + */ +static int get_next_free_sla_id(struct adf_rl *rl_data) +{ + int i = 0; + + while (i < RL_NODES_CNT_MAX && rl_data->sla[i++]) + ; + + if (i == RL_NODES_CNT_MAX) + return -ENOSPC; + + return i - 1; +} + +/** + * get_next_free_node_id() - finds next free ID in the array of that node type + * @rl_data: Pointer to ratelimiting data structure + * @sla: Pointer to SLA object for which the ID is searched + * + * Return: + * * 0 : RL_[NODE_TYPE]_MAX - correct ID + * * -ENOSPC - all slots of that type are in use + */ +static int get_next_free_node_id(struct adf_rl *rl_data, struct rl_sla *sla) +{ + struct adf_hw_device_data *hw_device = GET_HW_DATA(rl_data->accel_dev); + int max_id, i, step, rp_per_leaf; + struct rl_sla **sla_list; + + rp_per_leaf = hw_device->num_banks / hw_device->num_banks_per_vf; + + /* + * Static nodes mapping: + * root0 - cluster[0,4,8,12] - leaf[0-15] + * root1 - cluster[1,5,9,13] - leaf[16-31] + * root2 - cluster[2,6,10,14] - leaf[32-47] + */ + switch (sla->type) { + case RL_LEAF: + i = sla->srv * rp_per_leaf; + step = 1; + max_id = i + rp_per_leaf; + sla_list = rl_data->leaf; + break; + case RL_CLUSTER: + i = sla->srv; + step = 4; + max_id = RL_CLUSTER_MAX; + sla_list = rl_data->cluster; + break; + case RL_ROOT: + return sla->srv; + default: + return -EINVAL; + } + + while (i < max_id && sla_list[i]) + i += step; + + if (i >= max_id) + return -ENOSPC; + + return i; +} + +u32 adf_rl_calculate_slice_tokens(struct adf_accel_dev *accel_dev, u32 sla_val, + enum adf_base_services svc_type) +{ + struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data; + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + u64 avail_slice_cycles, allocated_tokens; + + if (!sla_val) + return 0; + + avail_slice_cycles = hw_data->clock_frequency; + + switch (svc_type) { + case ADF_SVC_ASYM: + avail_slice_cycles *= device_data->slices.pke_cnt; + break; + case ADF_SVC_SYM: + avail_slice_cycles *= device_data->slices.cph_cnt; + break; + case ADF_SVC_DC: + avail_slice_cycles *= device_data->slices.dcpr_cnt; + break; + default: + break; + } + + do_div(avail_slice_cycles, device_data->scan_interval); + allocated_tokens = avail_slice_cycles * sla_val; + do_div(allocated_tokens, device_data->scale_ref); + + return allocated_tokens; +} + +u32 adf_rl_calculate_ae_cycles(struct adf_accel_dev *accel_dev, u32 sla_val, + enum adf_base_services svc_type) +{ + struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data; + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + u64 allocated_ae_cycles, avail_ae_cycles; + + if (!sla_val) + return 0; + + avail_ae_cycles = hw_data->clock_frequency; + avail_ae_cycles *= hw_data->get_num_aes(hw_data) - 1; + do_div(avail_ae_cycles, device_data->scan_interval); + + sla_val *= device_data->max_tp[svc_type]; + sla_val /= device_data->scale_ref; + + allocated_ae_cycles = (sla_val * avail_ae_cycles); + do_div(allocated_ae_cycles, device_data->max_tp[svc_type]); + + return allocated_ae_cycles; +} + +u32 adf_rl_calculate_pci_bw(struct adf_accel_dev *accel_dev, u32 sla_val, + enum adf_base_services svc_type, bool is_bw_out) +{ + struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data; + u64 sla_to_bytes, allocated_bw, sla_scaled; + + if (!sla_val) + return 0; + + sla_to_bytes = sla_val; + sla_to_bytes *= device_data->max_tp[svc_type]; + do_div(sla_to_bytes, device_data->scale_ref); + + sla_to_bytes *= (svc_type == ADF_SVC_ASYM) ? RL_TOKEN_ASYM_SIZE : + BYTES_PER_MBIT; + if (svc_type == ADF_SVC_DC && is_bw_out) + sla_to_bytes *= device_data->slices.dcpr_cnt - + device_data->dcpr_correction; + + sla_scaled = sla_to_bytes * device_data->pcie_scale_mul; + do_div(sla_scaled, device_data->pcie_scale_div); + allocated_bw = sla_scaled; + do_div(allocated_bw, RL_TOKEN_PCIE_SIZE); + do_div(allocated_bw, device_data->scan_interval); + + return allocated_bw; +} + +/** + * add_new_sla_entry() - creates a new SLA object and fills it with user data + * @accel_dev: pointer to acceleration device structure + * @sla_in: pointer to user input data for a new SLA + * @sla_out: Pointer to variable that will contain the address of a new + * SLA object if the operation succeeds + * + * Return: + * * 0 - ok + * * -ENOMEM - memory allocation failed + * * -EINVAL - invalid user input + * * -ENOSPC - all available SLAs are in use + */ +static int add_new_sla_entry(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in, + struct rl_sla **sla_out) +{ + struct adf_rl *rl_data = accel_dev->rate_limiting; + struct rl_sla *sla; + int ret = 0; + + sla = kzalloc(sizeof(*sla), GFP_KERNEL); + if (!sla) { + ret = -ENOMEM; + goto ret_err; + } + *sla_out = sla; + + if (!is_service_enabled(accel_dev, sla_in->srv)) { + dev_notice(&GET_DEV(accel_dev), + "Provided service is not enabled\n"); + ret = -EINVAL; + goto ret_err; + } + + sla->srv = sla_in->srv; + sla->type = sla_in->type; + ret = get_next_free_node_id(rl_data, sla); + if (ret < 0) { + dev_notice(&GET_DEV(accel_dev), + "Exceeded number of available nodes for that service\n"); + goto ret_err; + } + sla->node_id = ret; + + ret = get_next_free_sla_id(rl_data); + if (ret < 0) { + dev_notice(&GET_DEV(accel_dev), + "Allocated maximum SLAs number\n"); + goto ret_err; + } + sla->sla_id = ret; + + sla->parent = find_parent(rl_data, sla_in); + if (!sla->parent && sla->type != RL_ROOT) { + if (sla_in->parent_id != RL_PARENT_DEFAULT_ID) + dev_notice(&GET_DEV(accel_dev), + "Provided parent ID does not exist or cannot be parent for this SLA."); + else + dev_notice(&GET_DEV(accel_dev), + "Unable to find parent node for this service. Is service enabled?"); + ret = -EINVAL; + goto ret_err; + } + + if (sla->type == RL_LEAF) { + ret = prepare_rp_ids(accel_dev, sla, sla_in->rp_mask); + if (!sla->ring_pairs_cnt || ret) { + dev_notice(&GET_DEV(accel_dev), + "Unable to find ring pairs to assign to the leaf"); + if (!ret) + ret = -EINVAL; + + goto ret_err; + } + } + + return 0; + +ret_err: + kfree(sla); + *sla_out = NULL; + + return ret; +} + +static int initialize_default_nodes(struct adf_accel_dev *accel_dev) +{ + struct adf_rl *rl_data = accel_dev->rate_limiting; + struct adf_rl_hw_data *device_data = rl_data->device_data; + struct adf_rl_sla_input_data sla_in = { }; + int ret = 0; + int i; + + /* Init root for each enabled service */ + sla_in.type = RL_ROOT; + sla_in.parent_id = RL_PARENT_DEFAULT_ID; + + for (i = 0; i < ADF_SVC_NONE; i++) { + if (!is_service_enabled(accel_dev, i)) + continue; + + sla_in.cir = device_data->scale_ref; + sla_in.pir = sla_in.cir; + sla_in.srv = i; + + ret = adf_rl_add_sla(accel_dev, &sla_in); + if (ret) + return ret; + } + + /* Init default cluster for each root */ + sla_in.type = RL_CLUSTER; + for (i = 0; i < ADF_SVC_NONE; i++) { + if (!rl_data->root[i]) + continue; + + sla_in.cir = rl_data->root[i]->cir; + sla_in.pir = sla_in.cir; + sla_in.srv = rl_data->root[i]->srv; + + ret = adf_rl_add_sla(accel_dev, &sla_in); + if (ret) + return ret; + } + + return 0; +} + +static void clear_sla(struct adf_rl *rl_data, struct rl_sla *sla) +{ + bool *rp_in_use = rl_data->rp_in_use; + struct rl_sla **sla_type_arr = NULL; + int i, sla_id, node_id; + u32 old_cir; + + sla_id = sla->sla_id; + node_id = sla->node_id; + old_cir = sla->cir; + sla->cir = 0; + sla->pir = 0; + + for (i = 0; i < sla->ring_pairs_cnt; i++) + rp_in_use[sla->ring_pairs_ids[i]] = false; + + update_budget(sla, old_cir, true); + get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr); + assign_node_to_parent(rl_data->accel_dev, sla, true); + adf_rl_send_admin_delete_msg(rl_data->accel_dev, node_id, sla->type); + mark_rps_usage(sla, rl_data->rp_in_use, false); + + kfree(sla); + rl_data->sla[sla_id] = NULL; + sla_type_arr[node_id] = NULL; +} + +/** + * add_update_sla() - handles the creation and the update of an SLA + * @accel_dev: pointer to acceleration device structure + * @sla_in: pointer to user input data for a new/updated SLA + * @is_update: flag to indicate if this is an update or an add operation + * + * Return: + * * 0 - ok + * * -ENOMEM - memory allocation failed + * * -EINVAL - user input data cannot be used to create SLA + * * -ENOSPC - all available SLAs are in use + */ +static int add_update_sla(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in, bool is_update) +{ + struct adf_rl *rl_data = accel_dev->rate_limiting; + struct rl_sla **sla_type_arr = NULL; + struct rl_sla *sla = NULL; + u32 old_cir = 0; + int ret; + + if (!sla_in) { + dev_warn(&GET_DEV(accel_dev), + "SLA input data pointer is missing\n"); + ret = -EFAULT; + goto ret_err; + } + + /* Input validation */ + ret = validate_user_input(accel_dev, sla_in, is_update); + if (ret) + goto ret_err; + + mutex_lock(&rl_data->rl_lock); + + if (is_update) { + ret = validate_sla_id(accel_dev, sla_in->sla_id); + if (ret) + goto ret_err; + + sla = rl_data->sla[sla_in->sla_id]; + old_cir = sla->cir; + } else { + ret = add_new_sla_entry(accel_dev, sla_in, &sla); + if (ret) + goto ret_err; + } + + if (!is_enough_budget(rl_data, sla, sla_in, is_update)) { + dev_notice(&GET_DEV(accel_dev), + "Input value exceeds the remaining budget%s\n", + is_update ? " or more budget is already in use" : ""); + ret = -EINVAL; + goto ret_err; + } + sla->cir = sla_in->cir; + sla->pir = sla_in->pir; + + /* Apply SLA */ + assign_node_to_parent(accel_dev, sla, false); + ret = adf_rl_send_admin_add_update_msg(accel_dev, sla, is_update); + if (ret) { + dev_notice(&GET_DEV(accel_dev), + "Failed to apply an SLA\n"); + goto ret_err; + } + update_budget(sla, old_cir, is_update); + + if (!is_update) { + mark_rps_usage(sla, rl_data->rp_in_use, true); + get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr); + sla_type_arr[sla->node_id] = sla; + rl_data->sla[sla->sla_id] = sla; + } + + sla_in->sla_id = sla->sla_id; + goto ret_ok; + +ret_err: + if (!is_update) { + sla_in->sla_id = -1; + kfree(sla); + } +ret_ok: + mutex_unlock(&rl_data->rl_lock); + return ret; +} + +/** + * adf_rl_add_sla() - handles the creation of an SLA + * @accel_dev: pointer to acceleration device structure + * @sla_in: pointer to user input data required to add an SLA + * + * Return: + * * 0 - ok + * * -ENOMEM - memory allocation failed + * * -EINVAL - invalid user input + * * -ENOSPC - all available SLAs are in use + */ +int adf_rl_add_sla(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in) +{ + return add_update_sla(accel_dev, sla_in, false); +} + +/** + * adf_rl_update_sla() - handles the update of an SLA + * @accel_dev: pointer to acceleration device structure + * @sla_in: pointer to user input data required to update an SLA + * + * Return: + * * 0 - ok + * * -EINVAL - user input data cannot be used to update SLA + */ +int adf_rl_update_sla(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in) +{ + return add_update_sla(accel_dev, sla_in, true); +} + +/** + * adf_rl_get_sla() - returns an existing SLA data + * @accel_dev: pointer to acceleration device structure + * @sla_in: pointer to user data where SLA info will be stored + * + * The sla_id for which data are requested should be set in sla_id structure + * + * Return: + * * 0 - ok + * * -EINVAL - provided sla_id does not exist + */ +int adf_rl_get_sla(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in) +{ + struct rl_sla *sla; + int ret, i; + + ret = validate_sla_id(accel_dev, sla_in->sla_id); + if (ret) + return ret; + + sla = accel_dev->rate_limiting->sla[sla_in->sla_id]; + sla_in->type = sla->type; + sla_in->srv = sla->srv; + sla_in->cir = sla->cir; + sla_in->pir = sla->pir; + sla_in->rp_mask = 0U; + if (sla->parent) + sla_in->parent_id = sla->parent->sla_id; + else + sla_in->parent_id = RL_PARENT_DEFAULT_ID; + + for (i = 0; i < sla->ring_pairs_cnt; i++) + sla_in->rp_mask |= BIT(sla->ring_pairs_ids[i]); + + return 0; +} + +/** + * adf_rl_get_capability_remaining() - returns the remaining SLA value (CIR) for + * selected service or provided sla_id + * @accel_dev: pointer to acceleration device structure + * @srv: service ID for which capability is requested + * @sla_id: ID of the cluster or root to which we want assign a new SLA + * + * Check if the provided SLA id is valid. If it is and the service matches + * the requested service and the type is cluster or root, return the remaining + * capability. + * If the provided ID does not match the service or type, return the remaining + * capacity of the default cluster for that service. + * + * Return: + * * Positive value - correct remaining value + * * -EINVAL - algorithm cannot find a remaining value for provided data + */ +int adf_rl_get_capability_remaining(struct adf_accel_dev *accel_dev, + enum adf_base_services srv, int sla_id) +{ + struct adf_rl *rl_data = accel_dev->rate_limiting; + struct rl_sla *sla = NULL; + int i; + + if (srv >= ADF_SVC_NONE) + return -EINVAL; + + if (sla_id > RL_SLA_EMPTY_ID && !validate_sla_id(accel_dev, sla_id)) { + sla = rl_data->sla[sla_id]; + + if (sla->srv == srv && sla->type <= RL_CLUSTER) + goto ret_ok; + } + + for (i = 0; i < RL_CLUSTER_MAX; i++) { + if (!rl_data->cluster[i]) + continue; + + if (rl_data->cluster[i]->srv == srv) { + sla = rl_data->cluster[i]; + goto ret_ok; + } + } + + return -EINVAL; +ret_ok: + return sla->rem_cir; +} + +/** + * adf_rl_remove_sla() - removes provided sla_id + * @accel_dev: pointer to acceleration device structure + * @sla_id: ID of the cluster or root to which we want assign an new SLA + * + * Return: + * * 0 - ok + * * -EINVAL - wrong sla_id or it still have assigned children + */ +int adf_rl_remove_sla(struct adf_accel_dev *accel_dev, u32 sla_id) +{ + struct adf_rl *rl_data = accel_dev->rate_limiting; + struct rl_sla *sla; + int ret = 0; + + mutex_lock(&rl_data->rl_lock); + ret = validate_sla_id(accel_dev, sla_id); + if (ret) + goto err_ret; + + sla = rl_data->sla[sla_id]; + + if (sla->type < RL_LEAF && sla->rem_cir != sla->cir) { + dev_notice(&GET_DEV(accel_dev), + "To remove parent SLA all its children must be removed first"); + ret = -EINVAL; + goto err_ret; + } + + clear_sla(rl_data, sla); + +err_ret: + mutex_unlock(&rl_data->rl_lock); + return ret; +} + +/** + * adf_rl_remove_sla_all() - removes all SLAs from device + * @accel_dev: pointer to acceleration device structure + * @incl_default: set to true if default SLAs also should be removed + */ +void adf_rl_remove_sla_all(struct adf_accel_dev *accel_dev, bool incl_default) +{ + struct adf_rl *rl_data = accel_dev->rate_limiting; + int end_type = incl_default ? RL_ROOT : RL_LEAF; + struct rl_sla **sla_type_arr = NULL; + u32 max_id; + int i, j; + + mutex_lock(&rl_data->rl_lock); + + /* Unregister and remove all SLAs */ + for (j = RL_LEAF; j >= end_type; j--) { + max_id = get_sla_arr_of_type(rl_data, j, &sla_type_arr); + + for (i = 0; i < max_id; i++) { + if (!sla_type_arr[i]) + continue; + + clear_sla(rl_data, sla_type_arr[i]); + } + } + + mutex_unlock(&rl_data->rl_lock); +} + +int adf_rl_init(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + struct adf_rl_hw_data *rl_hw_data = &hw_data->rl_data; + struct adf_rl *rl; + int ret = 0; + + /* Validate device parameters */ + if (RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_ASYM]) || + RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_SYM]) || + RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_DC]) || + RL_VALIDATE_NON_ZERO(rl_hw_data->scan_interval) || + RL_VALIDATE_NON_ZERO(rl_hw_data->pcie_scale_div) || + RL_VALIDATE_NON_ZERO(rl_hw_data->pcie_scale_mul) || + RL_VALIDATE_NON_ZERO(rl_hw_data->scale_ref)) { + ret = -EOPNOTSUPP; + goto err_ret; + } + + rl = kzalloc(sizeof(*rl), GFP_KERNEL); + if (!rl) { + ret = -ENOMEM; + goto err_ret; + } + + mutex_init(&rl->rl_lock); + rl->device_data = &accel_dev->hw_device->rl_data; + rl->accel_dev = accel_dev; + accel_dev->rate_limiting = rl; + +err_ret: + return ret; +} + +int adf_rl_start(struct adf_accel_dev *accel_dev) +{ + struct adf_rl_hw_data *rl_hw_data = &GET_HW_DATA(accel_dev)->rl_data; + void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); + u16 fw_caps = GET_HW_DATA(accel_dev)->fw_capabilities; + int ret; + + if (!accel_dev->rate_limiting) { + ret = -EOPNOTSUPP; + goto ret_err; + } + + if ((fw_caps & RL_CAPABILITY_MASK) != RL_CAPABILITY_VALUE) { + dev_info(&GET_DEV(accel_dev), "not supported\n"); + ret = -EOPNOTSUPP; + goto ret_free; + } + + ADF_CSR_WR(pmisc_addr, rl_hw_data->pciin_tb_offset, + RL_TOKEN_GRANULARITY_PCIEIN_BUCKET); + ADF_CSR_WR(pmisc_addr, rl_hw_data->pciout_tb_offset, + RL_TOKEN_GRANULARITY_PCIEOUT_BUCKET); + + ret = adf_rl_send_admin_init_msg(accel_dev, &rl_hw_data->slices); + if (ret) { + dev_err(&GET_DEV(accel_dev), "initialization failed\n"); + goto ret_free; + } + + ret = initialize_default_nodes(accel_dev); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "failed to initialize default SLAs\n"); + goto ret_sla_rm; + } + + return 0; + +ret_sla_rm: + adf_rl_remove_sla_all(accel_dev, true); +ret_free: + kfree(accel_dev->rate_limiting); + accel_dev->rate_limiting = NULL; +ret_err: + return ret; +} + +void adf_rl_stop(struct adf_accel_dev *accel_dev) +{ + if (!accel_dev->rate_limiting) + return; + + adf_rl_remove_sla_all(accel_dev, true); +} + +void adf_rl_exit(struct adf_accel_dev *accel_dev) +{ + if (!accel_dev->rate_limiting) + return; + + kfree(accel_dev->rate_limiting); + accel_dev->rate_limiting = NULL; +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.h b/drivers/crypto/intel/qat/qat_common/adf_rl.h new file mode 100644 index 000000000000..1ccb6613c92e --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_rl.h @@ -0,0 +1,169 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ + +#ifndef ADF_RL_H_ +#define ADF_RL_H_ + +#include +#include + +struct adf_accel_dev; + +#define RL_ROOT_MAX 4 +#define RL_CLUSTER_MAX 16 +#define RL_LEAF_MAX 64 +#define RL_NODES_CNT_MAX (RL_ROOT_MAX + RL_CLUSTER_MAX + RL_LEAF_MAX) +#define RL_RP_CNT_PER_LEAF_MAX 4U +#define RL_RP_CNT_MAX 64 +#define RL_SLA_EMPTY_ID -1 +#define RL_PARENT_DEFAULT_ID -1 + +enum rl_node_type { + RL_ROOT, + RL_CLUSTER, + RL_LEAF, +}; + +enum adf_base_services { + ADF_SVC_ASYM = 0, + ADF_SVC_SYM, + ADF_SVC_DC, + ADF_SVC_NONE, +}; + +/** + * struct adf_rl_sla_input_data - ratelimiting user input data structure + * @rp_mask: 64 bit bitmask of ring pair IDs which will be assigned to SLA. + * Eg. 0x5 -> RP0 and RP2 assigned; 0xA005 -> RP0,2,13,15 assigned. + * @sla_id: ID of current SLA for operations update, rm, get. For the add + * operation, this field will be updated with the ID of the newly + * added SLA + * @parent_id: ID of the SLA to which the current one should be assigned. + * Set to -1 to refer to the default parent. + * @cir: Committed information rate. Rate guaranteed to be achieved. Input value + * is expressed in permille scale, i.e. 1000 refers to the maximum + * device throughput for a selected service. + * @pir: Peak information rate. Maximum rate available that the SLA can achieve. + * Input value is expressed in permille scale, i.e. 1000 refers to + * the maximum device throughput for a selected service. + * @type: SLA type: root, cluster, node + * @srv: Service associated to the SLA: asym, sym dc. + * + * This structure is used to perform operations on an SLA. + * Depending on the operation, some of the parameters are ignored. + * The following list reports which parameters should be set for each operation. + * - add: all except sla_id + * - update: cir, pir, sla_id + * - rm: sla_id + * - rm_all: - + * - get: sla_id + * - get_capability_rem: srv, sla_id + */ +struct adf_rl_sla_input_data { + u64 rp_mask; + int sla_id; + int parent_id; + unsigned int cir; + unsigned int pir; + enum rl_node_type type; + enum adf_base_services srv; +}; + +struct rl_slice_cnt { + u8 dcpr_cnt; + u8 pke_cnt; + u8 cph_cnt; +}; + +struct adf_rl_hw_data { + u32 scale_ref; + u32 scan_interval; + u32 r2l_offset; + u32 l2c_offset; + u32 c2s_offset; + u32 pciin_tb_offset; + u32 pciout_tb_offset; + u32 pcie_scale_mul; + u32 pcie_scale_div; + u32 dcpr_correction; + u32 max_tp[RL_ROOT_MAX]; + struct rl_slice_cnt slices; +}; + +/** + * struct adf_rl - ratelimiting data structure + * @accel_dev: pointer to acceleration device data + * @device_data: pointer to rate limiting data specific to a device type (or revision) + * @sla: array of pointers to SLA objects + * @root: array of pointers to root type SLAs, element number reflects node_id + * @cluster: array of pointers to cluster type SLAs, element number reflects node_id + * @leaf: array of pointers to leaf type SLAs, element number reflects node_id + * @rp_in_use: array of ring pair IDs already used in one of SLAs + * @rl_lock: mutex object which is protecting data in this structure + * @input: structure which is used for holding the data received from user + */ +struct adf_rl { + struct adf_accel_dev *accel_dev; + struct adf_rl_hw_data *device_data; + /* mapping sla_id to SLA objects */ + struct rl_sla *sla[RL_NODES_CNT_MAX]; + struct rl_sla *root[RL_ROOT_MAX]; + struct rl_sla *cluster[RL_CLUSTER_MAX]; + struct rl_sla *leaf[RL_LEAF_MAX]; + bool rp_in_use[RL_RP_CNT_MAX]; + /* Mutex protecting writing to SLAs lists */ + struct mutex rl_lock; +}; + +/** + * struct rl_sla - SLA object data structure + * @parent: pointer to the parent SLA (root/cluster) + * @type: SLA type + * @srv: service associated with this SLA + * @sla_id: ID of the SLA, used as element number in SLA array and as identifier + * shared with the user + * @node_id: ID of node, each of SLA type have a separate ID list + * @cir: committed information rate + * @pir: peak information rate (PIR >= CIR) + * @rem_cir: if this SLA is a parent then this field represents a remaining + * value to be used by child SLAs. + * @ring_pairs_ids: array with numeric ring pairs IDs assigned to this SLA + * @ring_pairs_cnt: number of assigned ring pairs listed in the array above + */ +struct rl_sla { + struct rl_sla *parent; + enum rl_node_type type; + enum adf_base_services srv; + u32 sla_id; + u32 node_id; + u32 cir; + u32 pir; + u32 rem_cir; + u16 ring_pairs_ids[RL_RP_CNT_PER_LEAF_MAX]; + u16 ring_pairs_cnt; +}; + +int adf_rl_add_sla(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in); +int adf_rl_update_sla(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in); +int adf_rl_get_sla(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in); +int adf_rl_get_capability_remaining(struct adf_accel_dev *accel_dev, + enum adf_base_services srv, int sla_id); +int adf_rl_remove_sla(struct adf_accel_dev *accel_dev, u32 sla_id); +void adf_rl_remove_sla_all(struct adf_accel_dev *accel_dev, bool incl_default); + +int adf_rl_init(struct adf_accel_dev *accel_dev); +int adf_rl_start(struct adf_accel_dev *accel_dev); +void adf_rl_stop(struct adf_accel_dev *accel_dev); +void adf_rl_exit(struct adf_accel_dev *accel_dev); + +u32 adf_rl_calculate_pci_bw(struct adf_accel_dev *accel_dev, u32 sla_val, + enum adf_base_services svc_type, bool is_bw_out); +u32 adf_rl_calculate_ae_cycles(struct adf_accel_dev *accel_dev, u32 sla_val, + enum adf_base_services svc_type); +u32 adf_rl_calculate_slice_tokens(struct adf_accel_dev *accel_dev, u32 sla_val, + enum adf_base_services svc_type); + +#endif /* ADF_RL_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl_admin.c b/drivers/crypto/intel/qat/qat_common/adf_rl_admin.c new file mode 100644 index 000000000000..698a14f4ce66 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_rl_admin.c @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ + +#include +#include + +#include "adf_admin.h" +#include "adf_accel_devices.h" +#include "adf_rl_admin.h" + +static void +prep_admin_req_msg(struct rl_sla *sla, dma_addr_t dma_addr, + struct icp_qat_fw_init_admin_sla_config_params *fw_params, + struct icp_qat_fw_init_admin_req *req, bool is_update) +{ + req->cmd_id = is_update ? ICP_QAT_FW_RL_UPDATE : ICP_QAT_FW_RL_ADD; + req->init_cfg_ptr = dma_addr; + req->init_cfg_sz = sizeof(*fw_params); + req->node_id = sla->node_id; + req->node_type = sla->type; + req->rp_count = sla->ring_pairs_cnt; + req->svc_type = sla->srv; +} + +static void +prep_admin_req_params(struct adf_accel_dev *accel_dev, struct rl_sla *sla, + struct icp_qat_fw_init_admin_sla_config_params *fw_params) +{ + fw_params->pcie_in_cir = + adf_rl_calculate_pci_bw(accel_dev, sla->cir, sla->srv, false); + fw_params->pcie_in_pir = + adf_rl_calculate_pci_bw(accel_dev, sla->pir, sla->srv, false); + fw_params->pcie_out_cir = + adf_rl_calculate_pci_bw(accel_dev, sla->cir, sla->srv, true); + fw_params->pcie_out_pir = + adf_rl_calculate_pci_bw(accel_dev, sla->pir, sla->srv, true); + + fw_params->slice_util_cir = + adf_rl_calculate_slice_tokens(accel_dev, sla->cir, sla->srv); + fw_params->slice_util_pir = + adf_rl_calculate_slice_tokens(accel_dev, sla->pir, sla->srv); + + fw_params->ae_util_cir = + adf_rl_calculate_ae_cycles(accel_dev, sla->cir, sla->srv); + fw_params->ae_util_pir = + adf_rl_calculate_ae_cycles(accel_dev, sla->pir, sla->srv); + + memcpy(fw_params->rp_ids, sla->ring_pairs_ids, + sizeof(sla->ring_pairs_ids)); +} + +int adf_rl_send_admin_init_msg(struct adf_accel_dev *accel_dev, + struct rl_slice_cnt *slices_int) +{ + struct icp_qat_fw_init_admin_slice_cnt slices_resp = { }; + int ret; + + ret = adf_send_admin_rl_init(accel_dev, &slices_resp); + if (ret) + return ret; + + slices_int->dcpr_cnt = slices_resp.dcpr_cnt; + slices_int->pke_cnt = slices_resp.pke_cnt; + /* For symmetric crypto, slice tokens are relative to the UCS slice */ + slices_int->cph_cnt = slices_resp.ucs_cnt; + + return 0; +} + +int adf_rl_send_admin_add_update_msg(struct adf_accel_dev *accel_dev, + struct rl_sla *sla, bool is_update) +{ + struct icp_qat_fw_init_admin_sla_config_params *fw_params; + struct icp_qat_fw_init_admin_req req = { }; + dma_addr_t dma_addr; + int ret; + + fw_params = dma_alloc_coherent(&GET_DEV(accel_dev), sizeof(*fw_params), + &dma_addr, GFP_KERNEL); + if (!fw_params) + return -ENOMEM; + + prep_admin_req_params(accel_dev, sla, fw_params); + prep_admin_req_msg(sla, dma_addr, fw_params, &req, is_update); + ret = adf_send_admin_rl_add_update(accel_dev, &req); + + dma_free_coherent(&GET_DEV(accel_dev), sizeof(*fw_params), fw_params, + dma_addr); + + return ret; +} + +int adf_rl_send_admin_delete_msg(struct adf_accel_dev *accel_dev, u16 node_id, + u8 node_type) +{ + return adf_send_admin_rl_delete(accel_dev, node_id, node_type); +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl_admin.h b/drivers/crypto/intel/qat/qat_common/adf_rl_admin.h new file mode 100644 index 000000000000..dd5419b7e896 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_rl_admin.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ + +#ifndef ADF_RL_ADMIN_H_ +#define ADF_RL_ADMIN_H_ + +#include + +#include "adf_rl.h" + +int adf_rl_send_admin_init_msg(struct adf_accel_dev *accel_dev, + struct rl_slice_cnt *slices_int); +int adf_rl_send_admin_add_update_msg(struct adf_accel_dev *accel_dev, + struct rl_sla *sla, bool is_update); +int adf_rl_send_admin_delete_msg(struct adf_accel_dev *accel_dev, u16 node_id, + u8 node_type); + +#endif /* ADF_RL_ADMIN_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h index e4de9a30e0bd..cd418b51d9f3 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h @@ -5,6 +5,8 @@ #include "icp_qat_fw.h" +#define RL_MAX_RP_IDS 16 + enum icp_qat_fw_init_admin_cmd_id { ICP_QAT_FW_INIT_AE = 0, ICP_QAT_FW_TRNG_ENABLE = 1, @@ -19,10 +21,14 @@ enum icp_qat_fw_init_admin_cmd_id { ICP_QAT_FW_CRYPTO_CAPABILITY_GET = 10, ICP_QAT_FW_DC_CHAIN_INIT = 11, ICP_QAT_FW_HEARTBEAT_TIMER_SET = 13, + ICP_QAT_FW_RL_INIT = 15, ICP_QAT_FW_TIMER_GET = 19, ICP_QAT_FW_CNV_STATS_GET = 20, ICP_QAT_FW_PM_STATE_CONFIG = 128, ICP_QAT_FW_PM_INFO = 129, + ICP_QAT_FW_RL_ADD = 134, + ICP_QAT_FW_RL_UPDATE = 135, + ICP_QAT_FW_RL_REMOVE = 136, }; enum icp_qat_fw_init_admin_resp_status { @@ -30,6 +36,30 @@ enum icp_qat_fw_init_admin_resp_status { ICP_QAT_FW_INIT_RESP_STATUS_FAIL }; +struct icp_qat_fw_init_admin_slice_cnt { + __u8 cpr_cnt; + __u8 xlt_cnt; + __u8 dcpr_cnt; + __u8 pke_cnt; + __u8 wat_cnt; + __u8 wcp_cnt; + __u8 ucs_cnt; + __u8 cph_cnt; + __u8 ath_cnt; +}; + +struct icp_qat_fw_init_admin_sla_config_params { + __u32 pcie_in_cir; + __u32 pcie_in_pir; + __u32 pcie_out_cir; + __u32 pcie_out_pir; + __u32 slice_util_cir; + __u32 slice_util_pir; + __u32 ae_util_cir; + __u32 ae_util_pir; + __u16 rp_ids[RL_MAX_RP_IDS]; +}; + struct icp_qat_fw_init_admin_req { __u16 init_cfg_sz; __u8 resrvd1; @@ -49,6 +79,13 @@ struct icp_qat_fw_init_admin_req { struct { __u32 heartbeat_ticks; }; + struct { + __u16 node_id; + __u8 node_type; + __u8 svc_type; + __u8 resrvd5[3]; + __u8 rp_count; + }; __u32 idle_filter; }; @@ -110,6 +147,7 @@ struct icp_qat_fw_init_admin_resp { __u32 unsuccessful_count; __u64 resrvd8; }; + struct icp_qat_fw_init_admin_slice_cnt slices; __u16 fw_capabilities; }; } __packed; -- Gitee From 910df10110117ef46448c4ac1c78da2775c2d150 Mon Sep 17 00:00:00 2001 From: Ciunas Bennett Date: Fri, 20 Oct 2023 15:49:29 +0200 Subject: [PATCH 0717/2138] crypto: qat - add rate limiting sysfs interface ANBZ: #8589 commit db74e16258198094701f18ab4da3410c44ffdb2e upstream. Intel-SIG: commit db74e1625819 crypto: qat - add rate limiting sysfs interface Backport to support Intel QAT in-tree driver Add an interface for the rate limiting feature which allows to add, remove and modify a QAT SLA (Service Level Agreement). This adds a new sysfs attribute group, `qat_rl`, which can be accessed from /sys/bus/pci/devices/ with the following hierarchy: |-+ qat_rl |---- id (RW) # SLA identifier |---- cir (RW) # Committed Information Rate |---- pir (RW) # Peak Information Rate |---- srv (RW) # Service to be rate limited |---- rp (RW) (HEX) # Ring pairs to be rate limited |---- cap_rem (RW) # Remaining capability for a service |---- sla_op (WO) # Allows to perform an operation on an SLA The API works by setting the appropriate RW attributes and then issuing a command through the `sla_op`. For example, to create an SLA, a user needs to input the necessary data into the attributes cir, pir, srv and rp and then write into `sla_op` the command `add` to execute the operation. The API also provides `cap_rem` attribute to get information about the remaining device capability within a certain service which is required when setting an SLA. Signed-off-by: Ciunas Bennett Reviewed-by: Giovanni Cabiddu Reviewed-by: Damian Muszynski Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- Documentation/ABI/testing/sysfs-driver-qat_rl | 226 +++++++++ drivers/crypto/intel/qat/qat_common/Makefile | 1 + drivers/crypto/intel/qat/qat_common/adf_rl.c | 10 + drivers/crypto/intel/qat/qat_common/adf_rl.h | 7 + .../intel/qat/qat_common/adf_sysfs_rl.c | 451 ++++++++++++++++++ .../intel/qat/qat_common/adf_sysfs_rl.h | 11 + 6 files changed, 706 insertions(+) create mode 100644 Documentation/ABI/testing/sysfs-driver-qat_rl create mode 100644 drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.h diff --git a/Documentation/ABI/testing/sysfs-driver-qat_rl b/Documentation/ABI/testing/sysfs-driver-qat_rl new file mode 100644 index 000000000000..8c282ae3155d --- /dev/null +++ b/Documentation/ABI/testing/sysfs-driver-qat_rl @@ -0,0 +1,226 @@ +What: /sys/bus/pci/devices//qat_rl/sla_op +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (WO) This attribute is used to perform an operation on an SLA. + The supported operations are: add, update, rm, rm_all, and get. + + Input values must be filled through the associated attribute in + this group before a write to this file. + If the operation completes successfully, the associated + attributes will be updated. + The associated attributes are: cir, pir, srv, rp, and id. + + Supported operations: + + * add: Creates a new SLA with the provided inputs from user. + * Inputs: cir, pir, srv, and rp + * Output: id + + * get: Returns the configuration of the specified SLA in id attribute + * Inputs: id + * Outputs: cir, pir, srv, and rp + + * update: Updates the SLA with new values set in the following attributes + * Inputs: id, cir, and pir + + * rm: Removes the specified SLA in the id attribute. + * Inputs: id + + * rm_all: Removes all the configured SLAs. + * Inputs: None + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_rl/rp +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (RW) When read, reports the current assigned ring pairs for the + queried SLA. + When wrote to, configures the ring pairs associated to a new SLA. + + The value is a 64-bit bit mask and is written/displayed in hex. + Each bit of this mask represents a single ring pair i.e., + bit 1 == ring pair id 0; bit 3 == ring pair id 2. + + Selected ring pairs must to be assigned to a single service, + i.e. the one provided with the srv attribute. The service + assigned to a certain ring pair can be checked by querying + the attribute qat/rp2srv. + + The maximum number of ring pairs is 4 per SLA. + + Applicability in sla_op: + + * WRITE: add operation + * READ: get operation + + Example usage:: + + ## Read + # echo 4 > /sys/bus/pci/devices//qat_rl/id + # cat /sys/bus/pci/devices//qat_rl/rp + 0x5 + + ## Write + # echo 0x5 > /sys/bus/pci/devices//qat_rl/rp + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_rl/id +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (RW) If written to, the value is used to retrieve a particular + SLA and operate on it. + This is valid only for the following operations: update, rm, + and get. + A read of this attribute is only guaranteed to have correct data + after creation of an SLA. + + Applicability in sla_op: + + * WRITE: rm and update operations + * READ: add and get operations + + Example usage:: + + ## Read + ## Set attributes e.g. cir, pir, srv, etc + # echo "add" > /sys/bus/pci/devices//qat_rl/sla_op + # cat /sys/bus/pci/devices//qat_rl/id + 4 + + ## Write + # echo 7 > /sys/bus/pci/devices//qat_rl/id + # echo "get" > /sys/bus/pci/devices//qat_rl/sla_op + # cat /sys/bus/pci/devices//qat_rl/rp + 0x5 ## ring pair ID 0 and ring pair ID 2 + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_rl/cir +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (RW) Committed information rate (CIR). Rate guaranteed to be + achieved by a particular SLA. The value is expressed in + permille scale, i.e. 1000 refers to the maximum device + throughput for a selected service. + + After sending a "get" to sla_op, this will be populated with the + CIR for that queried SLA. + Write to this file before sending an "add/update" sla_op, to set + the SLA to the specified value. + + Applicability in sla_op: + + * WRITE: add and update operations + * READ: get operation + + Example usage:: + + ## Write + # echo 500 > /sys/bus/pci/devices//qat_rl/cir + # echo "add" /sys/bus/pci/devices//qat_rl/sla_op + + ## Read + # echo 4 > /sys/bus/pci/devices//qat_rl/id + # echo "get" > /sys/bus/pci/devices//qat_rl/sla_op + # cat /sys/bus/pci/devices//qat_rl/cir + 500 + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_rl/pir +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (RW) Peak information rate (PIR). The maximum rate that can be + achieved by that particular SLA. An SLA can reach a value + between CIR and PIR when the device is not fully utilized by + requests from other users (assigned to different SLAs). + + After sending a "get" to sla_op, this will be populated with the + PIR for that queried SLA. + Write to this file before sending an "add/update" sla_op, to set + the SLA to the specified value. + + Applicability in sla_op: + + * WRITE: add and update operations + * READ: get operation + + Example usage:: + + ## Write + # echo 750 > /sys/bus/pci/devices//qat_rl/pir + # echo "add" > /sys/bus/pci/devices//qat_rl/sla_op + + ## Read + # echo 4 > /sys/bus/pci/devices//qat_rl/id + # echo "get" > /sys/bus/pci/devices//qat_rl/sla_op + # cat /sys/bus/pci/devices//qat_rl/pir + 750 + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_rl/srv +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (RW) Service (SRV). Represents the service (sym, asym, dc) + associated to an SLA. + Can be written to or queried to set/show the SRV type for an SLA. + The SRV attribute is used to specify the SRV type before adding + an SLA. After an SLA is configured, reports the service + associated to that SLA. + + Applicability in sla_op: + + * WRITE: add and update operations + * READ: get operation + + Example usage:: + + ## Write + # echo "dc" > /sys/bus/pci/devices//qat_rl/srv + # echo "add" > /sys/bus/pci/devices//qat_rl/sla_op + # cat /sys/bus/pci/devices//qat_rl/id + 4 + + ## Read + # echo 4 > /sys/bus/pci/devices//qat_rl/id + # echo "get" > /sys/bus/pci/devices//qat_rl/sla_op + # cat /sys/bus/pci/devices//qat_rl/srv + dc + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_rl/cap_rem +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (RW) This file will return the remaining capability for a + particular service/sla. This is the remaining value that a new + SLA can be set to or a current SLA can be increased with. + + Example usage:: + + # echo "asym" > /sys/bus/pci/devices//qat_rl/cap_rem + # cat /sys/bus/pci/devices//qat_rl/cap_rem + 250 + # echo 250 > /sys/bus/pci/devices//qat_rl/cir + # echo "add" > /sys/bus/pci/devices//qat_rl/sla_op + # cat /sys/bus/pci/devices//qat_rl/cap_rem + 0 + + This attribute is only available for qat_4xxx devices. diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index 5f09dfd4798b..779a8aa0b8d2 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -31,6 +31,7 @@ intel_qat-objs := adf_cfg.o \ qat_algs_send.o \ adf_rl.o \ adf_rl_admin.o \ + adf_sysfs_rl.o \ qat_uclo.o \ qat_hal.o \ qat_bl.o diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c index 88a03105b52a..86e3e2152b1b 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_rl.c +++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c @@ -16,6 +16,7 @@ #include "adf_common_drv.h" #include "adf_rl_admin.h" #include "adf_rl.h" +#include "adf_sysfs_rl.h" #define RL_TOKEN_GRANULARITY_PCIEIN_BUCKET 0U #define RL_TOKEN_GRANULARITY_PCIEOUT_BUCKET 0U @@ -1130,8 +1131,16 @@ int adf_rl_start(struct adf_accel_dev *accel_dev) goto ret_sla_rm; } + ret = adf_sysfs_rl_add(accel_dev); + if (ret) { + dev_err(&GET_DEV(accel_dev), "failed to add sysfs interface\n"); + goto ret_sysfs_rm; + } + return 0; +ret_sysfs_rm: + adf_sysfs_rl_rm(accel_dev); ret_sla_rm: adf_rl_remove_sla_all(accel_dev, true); ret_free: @@ -1146,6 +1155,7 @@ void adf_rl_stop(struct adf_accel_dev *accel_dev) if (!accel_dev->rate_limiting) return; + adf_sysfs_rl_rm(accel_dev); adf_rl_remove_sla_all(accel_dev, true); } diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.h b/drivers/crypto/intel/qat/qat_common/adf_rl.h index 1ccb6613c92e..eb5a330f8543 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_rl.h +++ b/drivers/crypto/intel/qat/qat_common/adf_rl.h @@ -75,6 +75,12 @@ struct rl_slice_cnt { u8 cph_cnt; }; +struct adf_rl_interface_data { + struct adf_rl_sla_input_data input; + enum adf_base_services cap_rem_srv; + struct rw_semaphore lock; +}; + struct adf_rl_hw_data { u32 scale_ref; u32 scan_interval; @@ -113,6 +119,7 @@ struct adf_rl { bool rp_in_use[RL_RP_CNT_MAX]; /* Mutex protecting writing to SLAs lists */ struct mutex rl_lock; + struct adf_rl_interface_data user_input; }; /** diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c new file mode 100644 index 000000000000..abf9c52474ec --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c @@ -0,0 +1,451 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ + +#define dev_fmt(fmt) "RateLimiting: " fmt + +#include +#include +#include +#include + +#include "adf_common_drv.h" +#include "adf_rl.h" +#include "adf_sysfs_rl.h" + +#define GET_RL_STRUCT(accel_dev) ((accel_dev)->rate_limiting->user_input) + +enum rl_ops { + ADD, + UPDATE, + RM, + RM_ALL, + GET, +}; + +enum rl_params { + RP_MASK, + ID, + CIR, + PIR, + SRV, + CAP_REM_SRV, +}; + +static const char *const rl_services[] = { + [ADF_SVC_ASYM] = "asym", + [ADF_SVC_SYM] = "sym", + [ADF_SVC_DC] = "dc", +}; + +static const char *const rl_operations[] = { + [ADD] = "add", + [UPDATE] = "update", + [RM] = "rm", + [RM_ALL] = "rm_all", + [GET] = "get", +}; + +static int set_param_u(struct device *dev, enum rl_params param, u64 set) +{ + struct adf_rl_interface_data *data; + struct adf_accel_dev *accel_dev; + int ret = 0; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + data = &GET_RL_STRUCT(accel_dev); + + down_write(&data->lock); + switch (param) { + case RP_MASK: + data->input.rp_mask = set; + break; + case CIR: + data->input.cir = set; + break; + case PIR: + data->input.pir = set; + break; + case SRV: + data->input.srv = set; + break; + case CAP_REM_SRV: + data->cap_rem_srv = set; + break; + default: + ret = -EINVAL; + break; + } + up_write(&data->lock); + + return ret; +} + +static int set_param_s(struct device *dev, enum rl_params param, int set) +{ + struct adf_rl_interface_data *data; + struct adf_accel_dev *accel_dev; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev || param != ID) + return -EINVAL; + + data = &GET_RL_STRUCT(accel_dev); + + down_write(&data->lock); + data->input.sla_id = set; + up_write(&data->lock); + + return 0; +} + +static int get_param_u(struct device *dev, enum rl_params param, u64 *get) +{ + struct adf_rl_interface_data *data; + struct adf_accel_dev *accel_dev; + int ret = 0; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + data = &GET_RL_STRUCT(accel_dev); + + down_read(&data->lock); + switch (param) { + case RP_MASK: + *get = data->input.rp_mask; + break; + case CIR: + *get = data->input.cir; + break; + case PIR: + *get = data->input.pir; + break; + case SRV: + *get = data->input.srv; + break; + default: + ret = -EINVAL; + } + up_read(&data->lock); + + return ret; +} + +static int get_param_s(struct device *dev, enum rl_params param) +{ + struct adf_rl_interface_data *data; + struct adf_accel_dev *accel_dev; + int ret = 0; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + data = &GET_RL_STRUCT(accel_dev); + + down_read(&data->lock); + if (param == ID) + ret = data->input.sla_id; + up_read(&data->lock); + + return ret; +} + +static ssize_t rp_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret; + u64 get; + + ret = get_param_u(dev, RP_MASK, &get); + if (ret) + return ret; + + return sysfs_emit(buf, "%#llx\n", get); +} + +static ssize_t rp_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int err; + u64 val; + + err = kstrtou64(buf, 16, &val); + if (err) + return err; + + err = set_param_u(dev, RP_MASK, val); + if (err) + return err; + + return count; +} +static DEVICE_ATTR_RW(rp); + +static ssize_t id_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%d\n", get_param_s(dev, ID)); +} + +static ssize_t id_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int err; + int val; + + err = kstrtoint(buf, 10, &val); + if (err) + return err; + + err = set_param_s(dev, ID, val); + if (err) + return err; + + return count; +} +static DEVICE_ATTR_RW(id); + +static ssize_t cir_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret; + u64 get; + + ret = get_param_u(dev, CIR, &get); + if (ret) + return ret; + + return sysfs_emit(buf, "%llu\n", get); +} + +static ssize_t cir_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned int val; + int err; + + err = kstrtouint(buf, 10, &val); + if (err) + return err; + + err = set_param_u(dev, CIR, val); + if (err) + return err; + + return count; +} +static DEVICE_ATTR_RW(cir); + +static ssize_t pir_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret; + u64 get; + + ret = get_param_u(dev, PIR, &get); + if (ret) + return ret; + + return sysfs_emit(buf, "%llu\n", get); +} + +static ssize_t pir_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned int val; + int err; + + err = kstrtouint(buf, 10, &val); + if (err) + return err; + + err = set_param_u(dev, PIR, val); + if (err) + return err; + + return count; +} +static DEVICE_ATTR_RW(pir); + +static ssize_t srv_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret; + u64 get; + + ret = get_param_u(dev, SRV, &get); + if (ret) + return ret; + + if (get == ADF_SVC_NONE) + return -EINVAL; + + return sysfs_emit(buf, "%s\n", rl_services[get]); +} + +static ssize_t srv_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned int val; + int ret; + + ret = sysfs_match_string(rl_services, buf); + if (ret < 0) + return ret; + + val = ret; + ret = set_param_u(dev, SRV, val); + if (ret) + return ret; + + return count; +} +static DEVICE_ATTR_RW(srv); + +static ssize_t cap_rem_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct adf_rl_interface_data *data; + struct adf_accel_dev *accel_dev; + int ret, rem_cap; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + data = &GET_RL_STRUCT(accel_dev); + + down_read(&data->lock); + rem_cap = adf_rl_get_capability_remaining(accel_dev, data->cap_rem_srv, + RL_SLA_EMPTY_ID); + up_read(&data->lock); + if (rem_cap < 0) + return rem_cap; + + ret = sysfs_emit(buf, "%u\n", rem_cap); + + return ret; +} + +static ssize_t cap_rem_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned int val; + int ret; + + ret = sysfs_match_string(rl_services, buf); + if (ret < 0) + return ret; + + val = ret; + ret = set_param_u(dev, CAP_REM_SRV, val); + if (ret) + return ret; + + return count; +} +static DEVICE_ATTR_RW(cap_rem); + +static ssize_t sla_op_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct adf_rl_interface_data *data; + struct adf_accel_dev *accel_dev; + int ret; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + data = &GET_RL_STRUCT(accel_dev); + + ret = sysfs_match_string(rl_operations, buf); + if (ret < 0) + return ret; + + down_write(&data->lock); + switch (ret) { + case ADD: + data->input.parent_id = RL_PARENT_DEFAULT_ID; + data->input.type = RL_LEAF; + data->input.sla_id = 0; + ret = adf_rl_add_sla(accel_dev, &data->input); + if (ret) + goto err_free_lock; + break; + case UPDATE: + ret = adf_rl_update_sla(accel_dev, &data->input); + if (ret) + goto err_free_lock; + break; + case RM: + ret = adf_rl_remove_sla(accel_dev, data->input.sla_id); + if (ret) + goto err_free_lock; + break; + case RM_ALL: + adf_rl_remove_sla_all(accel_dev, false); + break; + case GET: + ret = adf_rl_get_sla(accel_dev, &data->input); + if (ret) + goto err_free_lock; + break; + default: + ret = -EINVAL; + goto err_free_lock; + } + up_write(&data->lock); + + return count; + +err_free_lock: + up_write(&data->lock); + + return ret; +} +static DEVICE_ATTR_WO(sla_op); + +static struct attribute *qat_rl_attrs[] = { + &dev_attr_rp.attr, + &dev_attr_id.attr, + &dev_attr_cir.attr, + &dev_attr_pir.attr, + &dev_attr_srv.attr, + &dev_attr_cap_rem.attr, + &dev_attr_sla_op.attr, + NULL, +}; + +static struct attribute_group qat_rl_group = { + .attrs = qat_rl_attrs, + .name = "qat_rl", +}; + +int adf_sysfs_rl_add(struct adf_accel_dev *accel_dev) +{ + struct adf_rl_interface_data *data; + int ret; + + data = &GET_RL_STRUCT(accel_dev); + + ret = device_add_group(&GET_DEV(accel_dev), &qat_rl_group); + if (ret) + dev_err(&GET_DEV(accel_dev), + "Failed to create qat_rl attribute group\n"); + + data->cap_rem_srv = ADF_SVC_NONE; + data->input.srv = ADF_SVC_NONE; + + return ret; +} + +void adf_sysfs_rl_rm(struct adf_accel_dev *accel_dev) +{ + device_remove_group(&GET_DEV(accel_dev), &qat_rl_group); +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.h b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.h new file mode 100644 index 000000000000..22d36aa8a757 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ +#ifndef ADF_SYSFS_RL_H_ +#define ADF_SYSFS_RL_H_ + +struct adf_accel_dev; + +int adf_sysfs_rl_add(struct adf_accel_dev *accel_dev); +void adf_sysfs_rl_rm(struct adf_accel_dev *accel_dev); + +#endif /* ADF_SYSFS_RL_H_ */ -- Gitee From 383c94a04395399994136658a22ad726503974ff Mon Sep 17 00:00:00 2001 From: Ciunas Bennett Date: Fri, 20 Oct 2023 15:49:30 +0200 Subject: [PATCH 0718/2138] crypto: qat - add rp2svc sysfs attribute ANBZ: #8589 commit dbc8876dd873a6ac5e3191b419d2de5ca613165f upstream. Intel-SIG: commit dbc8876dd873 crypto: qat - add rp2svc sysfs attribute Backport to support Intel QAT in-tree driver Add the attribute `rp2svc` to the `qat` attribute group. This provides a way for a user to query a specific ring pair for the type of service that is currently configured for. When read, the service will be returned for the defined ring pair. When written to this value will be stored as the ring pair to return the service of. Signed-off-by: Ciunas Bennett Reviewed-by: Giovanni Cabiddu Reviewed-by: Damian Muszynski Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- Documentation/ABI/testing/sysfs-driver-qat | 32 +++++++++ .../intel/qat/qat_common/adf_accel_devices.h | 6 ++ .../crypto/intel/qat/qat_common/adf_sysfs.c | 66 +++++++++++++++++++ 3 files changed, 104 insertions(+) diff --git a/Documentation/ABI/testing/sysfs-driver-qat b/Documentation/ABI/testing/sysfs-driver-qat index 96834d103a09..f24a5ddca94b 100644 --- a/Documentation/ABI/testing/sysfs-driver-qat +++ b/Documentation/ABI/testing/sysfs-driver-qat @@ -95,3 +95,35 @@ Description: (RW) This configuration option provides a way to force the device i 0 This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat/rp2srv +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (RW) This attribute provides a way for a user to query a + specific ring pair for the type of service that it is currently + configured for. + + When written to, the value is cached and used to perform the + read operation. Allowed values are in the range 0 to N-1, where + N is the max number of ring pairs supported by a device. This + can be queried using the attribute qat/num_rps. + + A read returns the service associated to the ring pair queried. + + The values are: + + * dc: the ring pair is configured for running compression services + * sym: the ring pair is configured for running symmetric crypto + services + * asym: the ring pair is configured for running asymmetric crypto + services + + Example usage:: + + # echo 1 > /sys/bus/pci/devices//qat/rp2srv + # cat /sys/bus/pci/devices//qat/rp2srv + sym + + This attribute is only available for qat_4xxx devices. diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 30c2b15ff801..4ff5729a3496 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -340,6 +340,11 @@ struct adf_pm { char __user *buf, size_t count, loff_t *pos); }; +struct adf_sysfs { + int ring_num; + struct rw_semaphore lock; /* protects access to the fields in this struct */ +}; + struct adf_accel_dev { struct adf_etr_data *transport; struct adf_hw_device_data *hw_device; @@ -361,6 +366,7 @@ struct adf_accel_dev { struct adf_timer *timer; struct adf_heartbeat *heartbeat; struct adf_rl *rate_limiting; + struct adf_sysfs sysfs; union { struct { /* protects VF2PF interrupts access */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c index f4a89f7ed4e9..9317127128a9 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c @@ -8,6 +8,8 @@ #include "adf_cfg_services.h" #include "adf_common_drv.h" +#define UNSET_RING_NUM -1 + static const char * const state_operations[] = { [DEV_DOWN] = "down", [DEV_UP] = "up", @@ -205,10 +207,72 @@ static DEVICE_ATTR_RW(pm_idle_enabled); static DEVICE_ATTR_RW(state); static DEVICE_ATTR_RW(cfg_services); +static ssize_t rp2srv_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct adf_hw_device_data *hw_data; + struct adf_accel_dev *accel_dev; + enum adf_cfg_service_type svc; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + hw_data = GET_HW_DATA(accel_dev); + + if (accel_dev->sysfs.ring_num == UNSET_RING_NUM) + return -EINVAL; + + down_read(&accel_dev->sysfs.lock); + svc = GET_SRV_TYPE(accel_dev, accel_dev->sysfs.ring_num % + hw_data->num_banks_per_vf); + up_read(&accel_dev->sysfs.lock); + + switch (svc) { + case COMP: + return sysfs_emit(buf, "%s\n", ADF_CFG_DC); + case SYM: + return sysfs_emit(buf, "%s\n", ADF_CFG_SYM); + case ASYM: + return sysfs_emit(buf, "%s\n", ADF_CFG_ASYM); + default: + break; + } + return -EINVAL; +} + +static ssize_t rp2srv_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct adf_accel_dev *accel_dev; + int ring, num_rings, ret; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + ret = kstrtouint(buf, 10, &ring); + if (ret) + return ret; + + num_rings = GET_MAX_BANKS(accel_dev); + if (ring >= num_rings) { + dev_err(&GET_DEV(accel_dev), + "Device does not support more than %u ring pairs\n", + num_rings); + return -EINVAL; + } + + down_write(&accel_dev->sysfs.lock); + accel_dev->sysfs.ring_num = ring; + up_write(&accel_dev->sysfs.lock); + + return count; +} +static DEVICE_ATTR_RW(rp2srv); + static struct attribute *qat_attrs[] = { &dev_attr_state.attr, &dev_attr_cfg_services.attr, &dev_attr_pm_idle_enabled.attr, + &dev_attr_rp2srv.attr, NULL, }; @@ -227,6 +291,8 @@ int adf_sysfs_init(struct adf_accel_dev *accel_dev) "Failed to create qat attribute group: %d\n", ret); } + accel_dev->sysfs.ring_num = UNSET_RING_NUM; + return ret; } EXPORT_SYMBOL_GPL(adf_sysfs_init); -- Gitee From 624344ee3d8e15828c6b79b1774405a5ae212b36 Mon Sep 17 00:00:00 2001 From: Ciunas Bennett Date: Fri, 20 Oct 2023 15:49:31 +0200 Subject: [PATCH 0719/2138] crypto: qat - add num_rps sysfs attribute ANBZ: #8589 commit 71fed09b49c168435fc28d57870007495475d946 upstream. Intel-SIG: commit 71fed09b49c1 crypto: qat - add num_rps sysfs attribute Backport to support Intel QAT in-tree driver Add the attribute `num_rps` to the `qat` attribute group. This returns the number of ring pairs that a single device has. This allows to know the maximum value that can be set to the attribute `rp2svc`. Signed-off-by: Ciunas Bennett Reviewed-by: Giovanni Cabiddu Reviewed-by: Damian Muszynski Reviewed-by: Tero Kristo Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- Documentation/ABI/testing/sysfs-driver-qat | 14 ++++++++++++++ drivers/crypto/intel/qat/qat_common/adf_sysfs.c | 14 ++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/Documentation/ABI/testing/sysfs-driver-qat b/Documentation/ABI/testing/sysfs-driver-qat index f24a5ddca94b..bbf329cf0d67 100644 --- a/Documentation/ABI/testing/sysfs-driver-qat +++ b/Documentation/ABI/testing/sysfs-driver-qat @@ -127,3 +127,17 @@ Description: sym This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat/num_rps +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (RO) Returns the number of ring pairs that a single device has. + + Example usage:: + + # cat /sys/bus/pci/devices//qat/num_rps + 64 + + This attribute is only available for qat_4xxx devices. diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c index 9317127128a9..ddffc98119c6 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c @@ -268,11 +268,25 @@ static ssize_t rp2srv_store(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RW(rp2srv); +static ssize_t num_rps_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct adf_accel_dev *accel_dev; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + return sysfs_emit(buf, "%u\n", GET_MAX_BANKS(accel_dev)); +} +static DEVICE_ATTR_RO(num_rps); + static struct attribute *qat_attrs[] = { &dev_attr_state.attr, &dev_attr_cfg_services.attr, &dev_attr_pm_idle_enabled.attr, &dev_attr_rp2srv.attr, + &dev_attr_num_rps.attr, NULL, }; -- Gitee From c01d75ef44ddd5f4834a13312dc5d4e91d0ed14d Mon Sep 17 00:00:00 2001 From: Xingui Yang Date: Tue, 5 Sep 2023 02:48:33 +0000 Subject: [PATCH 0720/2138] seq_file: add helper macro to define attribute for rw file ANBZ: #8589 commit 9cba82bba500e3ce875381350f289cfb3aa633ba upstream. Intel-SIG: commit 9cba82bba500 seq_file: add helper macro to define attribute for rw file Backport to support Intel QAT in-tree driver Patch series "Add helper macro DEFINE_SHOW_STORE_ATTRIBUTE() at seq_file.c", v6. We already own DEFINE_SHOW_ATTRIBUTE() helper macro for defining attribute for read-only file, but we found many of drivers also want a helper macro for read-write file too. So we add this helper macro to reduce duplicated code. This patch (of 3): We already own DEFINE_SHOW_ATTRIBUTE() helper macro for defining attribute for read-only file, but many of drivers want a helper macro for read-write file too. So we add DEFINE_SHOW_STORE_ATTRIBUTE() helper to reduce duplicated code. Link: https://lkml.kernel.org/r/20230905024835.43219-1-yangxingui@huawei.com Link: https://lkml.kernel.org/r/20230905024835.43219-2-yangxingui@huawei.com Signed-off-by: Luo Jiaxing Co-developed-by: Xingui Yang Signed-off-by: Xingui Yang Reviewed-by: Andy Shevchenko Cc: Al Viro Cc: Animesh Manna Cc: Anshuman Gupta Cc: Damien Le Moal Cc: Felipe Balbi Cc: Greg Kroah-Hartman Cc: Himanshu Madhani Cc: James Bottomley Cc: John Garry Cc: Martin K. Petersen Cc: Uma Shankar Cc: Xiang Chen Cc: Zeng Tao Signed-off-by: Andrew Morton [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- include/linux/seq_file.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 386ab580b839..234bcdb1fba4 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -207,6 +207,21 @@ static const struct file_operations __name ## _fops = { \ .release = single_release, \ } +#define DEFINE_SHOW_STORE_ATTRIBUTE(__name) \ +static int __name ## _open(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, __name ## _show, inode->i_private); \ +} \ + \ +static const struct file_operations __name ## _fops = { \ + .owner = THIS_MODULE, \ + .open = __name ## _open, \ + .read = seq_read, \ + .write = __name ## _write, \ + .llseek = seq_lseek, \ + .release = single_release, \ +} + #define DEFINE_PROC_SHOW_ATTRIBUTE(__name) \ static int __name ## _open(struct inode *inode, struct file *file) \ { \ -- Gitee From 0728d8d30e7935bb3a03cde3551a5d61901cef1b Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 28 Nov 2023 19:44:03 +0200 Subject: [PATCH 0721/2138] units: add missing header ANBZ: #8589 commit 8e92157d7f6190c86bfd6144a409001469827100 upstream. Intel-SIG: commit 8e92157d7f61 units: add missing header Backport to support Intel QAT in-tree driver BITS_PER_BYTE is defined in bits.h. Link: https://lkml.kernel.org/r/20231128174404.393393-1-andriy.shevchenko@linux.intel.com Fixes: e8eed5f7366f ("units: Add BYTES_PER_*BIT") Signed-off-by: Andy Shevchenko Reviewed-by: Randy Dunlap Cc: Damian Muszynski Cc: Rasmus Villemoes Cc: Herbert Xu Signed-off-by: Andrew Morton [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- include/linux/units.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/linux/units.h b/include/linux/units.h index ff1bd6b5f5b3..45110daaf8d3 100644 --- a/include/linux/units.h +++ b/include/linux/units.h @@ -2,6 +2,7 @@ #ifndef _LINUX_UNITS_H #define _LINUX_UNITS_H +#include #include /* Metric prefixes in accordance with Système international (d'unités) */ -- Gitee From 21613da1bf80b1a1014a48373786b5492db78d8f Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 31 Oct 2023 11:58:32 +0300 Subject: [PATCH 0722/2138] crypto: qat - prevent underflow in rp2srv_store() ANBZ: #8589 commit e53c741303a59ee1682e11f61b7772863e02526d upstream. Intel-SIG: commit e53c741303a5 crypto: qat - prevent underflow in rp2srv_store() Backport to support Intel QAT in-tree driver The "ring" variable has an upper bounds check but nothing checks for negatives. This code uses kstrtouint() already and it was obviously intended to be declared as unsigned int. Make it so. Fixes: dbc8876dd873 ("crypto: qat - add rp2svc sysfs attribute") Signed-off-by: Dan Carpenter Acked-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_sysfs.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c index ddffc98119c6..6f0b3629da13 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c @@ -242,7 +242,8 @@ static ssize_t rp2srv_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct adf_accel_dev *accel_dev; - int ring, num_rings, ret; + int num_rings, ret; + unsigned int ring; accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); if (!accel_dev) -- Gitee From e6b1f297612bd5ce20612227224d2d0b14ce9c68 Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Tue, 21 Nov 2023 17:59:45 +0100 Subject: [PATCH 0723/2138] crypto: qat - add sysfs_added flag for ras ANBZ: #8589 commit 65089000ba8c2ae713ccac6603319143f3e1c08b upstream. Intel-SIG: commit 65089000ba8c crypto: qat - add sysfs_added flag for ras Backport to support Intel QAT in-tree driver The qat_ras sysfs attribute group is registered within the adf_dev_start() function, alongside other driver components. If any of the functions preceding the group registration fails, the adf_dev_start() function returns, and the caller, to undo the operation, invokes adf_dev_stop() followed by adf_dev_shutdown(). However, the current flow lacks information about whether the registration of the qat_ras attribute group was successful or not. In cases where this condition is encountered, an error similar to the following might be reported: 4xxx 0000:6b:00.0: Starting device qat_dev0 4xxx 0000:6b:00.0: qat_dev0 started 9 acceleration engines 4xxx 0000:6b:00.0: Failed to send init message 4xxx 0000:6b:00.0: Failed to start device qat_dev0 sysfs group 'qat_ras' not found for kobject '0000:6b:00.0' ... sysfs_remove_groups+0x29/0x50 adf_sysfs_stop_ras+0x4b/0x80 [intel_qat] adf_dev_stop+0x43/0x1d0 [intel_qat] adf_dev_down+0x4b/0x150 [intel_qat] ... 4xxx 0000:6b:00.0: qat_dev0 stopped 9 acceleration engines 4xxx 0000:6b:00.0: Resetting device qat_dev0 To prevent attempting to remove attributes from a group that has not been added yet, a flag named 'sysfs_added' is introduced. This flag is set to true upon the successful registration of the attribute group. Fixes: 532d7f6bc458 ("crypto: qat - add error counters") Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Reviewed-by: Ahsan Atta Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_accel_devices.h | 1 + .../crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 4ff5729a3496..9d5fdd529a2e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -92,6 +92,7 @@ enum ras_errors { struct adf_error_counters { atomic_t counter[ADF_RAS_ERRORS]; + bool sysfs_added; bool enabled; }; diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c index cffe2d722995..e97c67c87b3c 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c @@ -99,6 +99,8 @@ void adf_sysfs_start_ras(struct adf_accel_dev *accel_dev) if (device_add_group(&GET_DEV(accel_dev), &qat_ras_group)) dev_err(&GET_DEV(accel_dev), "Failed to create qat_ras attribute group.\n"); + + accel_dev->ras_errors.sysfs_added = true; } void adf_sysfs_stop_ras(struct adf_accel_dev *accel_dev) @@ -106,7 +108,10 @@ void adf_sysfs_stop_ras(struct adf_accel_dev *accel_dev) if (!accel_dev->ras_errors.enabled) return; - device_remove_group(&GET_DEV(accel_dev), &qat_ras_group); + if (accel_dev->ras_errors.sysfs_added) { + device_remove_group(&GET_DEV(accel_dev), &qat_ras_group); + accel_dev->ras_errors.sysfs_added = false; + } ADF_RAS_ERR_CTR_CLEAR(accel_dev->ras_errors); } -- Gitee From 415a74661cf97858653dde1627df336e36e98d9c Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Tue, 21 Nov 2023 18:02:23 +0100 Subject: [PATCH 0724/2138] crypto: qat - add sysfs_added flag for rate limiting ANBZ: #8589 commit d71fdd0f3c278c7f132c3a522645ebf9157edd6d upstream. Intel-SIG: commit d71fdd0f3c27 crypto: qat - add sysfs_added flag for rate limiting Backport to support Intel QAT in-tree driver The qat_rl sysfs attribute group is registered within the adf_dev_start() function, alongside other driver components. If any of the functions preceding the group registration fails, the adf_dev_start() function returns, and the caller, to undo the operation, invokes adf_dev_stop() followed by adf_dev_shutdown(). However, the current flow lacks information about whether the registration of the qat_rl attribute group was successful or not. In cases where this condition is encountered, an error similar to the following might be reported: 4xxx 0000:6b:00.0: Starting device qat_dev0 4xxx 0000:6b:00.0: qat_dev0 started 9 acceleration engines 4xxx 0000:6b:00.0: Failed to send init message 4xxx 0000:6b:00.0: Failed to start device qat_dev0 sysfs group 'qat_rl' not found for kobject '0000:6b:00.0' ... sysfs_remove_groups+0x2d/0x50 adf_sysfs_rl_rm+0x44/0x70 [intel_qat] adf_rl_stop+0x2d/0xb0 [intel_qat] adf_dev_stop+0x33/0x1d0 [intel_qat] adf_dev_down+0xf1/0x150 [intel_qat] ... 4xxx 0000:6b:00.0: qat_dev0 stopped 9 acceleration engines 4xxx 0000:6b:00.0: Resetting device qat_dev0 To prevent attempting to remove attributes from a group that has not been added yet, a flag named 'sysfs_added' is introduced. This flag is set to true upon the successful registration of the attribute group. Fixes: d9fb8408376e ("crypto: qat - add rate limiting feature to qat_4xxx") Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Reviewed-by: Ahsan Atta Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_rl.h | 1 + drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.h b/drivers/crypto/intel/qat/qat_common/adf_rl.h index eb5a330f8543..269c6656fb90 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_rl.h +++ b/drivers/crypto/intel/qat/qat_common/adf_rl.h @@ -79,6 +79,7 @@ struct adf_rl_interface_data { struct adf_rl_sla_input_data input; enum adf_base_services cap_rem_srv; struct rw_semaphore lock; + bool sysfs_added; }; struct adf_rl_hw_data { diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c index abf9c52474ec..bedb514d4e30 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c @@ -441,11 +441,19 @@ int adf_sysfs_rl_add(struct adf_accel_dev *accel_dev) data->cap_rem_srv = ADF_SVC_NONE; data->input.srv = ADF_SVC_NONE; + data->sysfs_added = true; return ret; } void adf_sysfs_rl_rm(struct adf_accel_dev *accel_dev) { + struct adf_rl_interface_data *data; + + data = &GET_RL_STRUCT(accel_dev); + if (!data->sysfs_added) + return; + device_remove_group(&GET_DEV(accel_dev), &qat_rl_group); + data->sysfs_added = false; } -- Gitee From 3b25d892c3709d9391163075e5e6aa24ed0e0427 Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Tue, 28 Nov 2023 18:37:32 +0100 Subject: [PATCH 0725/2138] crypto: qat - fix error path in add_update_sla() ANBZ: #8589 commit 6627f03c21cb7001ae4dbbfb7a8514516d02331c upstream. Intel-SIG: commit 6627f03c21cb crypto: qat - fix error path in add_update_sla() Backport to support Intel QAT in-tree driver The input argument `sla_in` is a pointer to a structure that contains the parameters of the SLA which is being added or updated. If this pointer is NULL, the function should return an error as the data required for the algorithm is not available. By mistake, the logic jumps to the error path which dereferences the pointer. This results in a warnings reported by the static analyzer Smatch when executed without a database: drivers/crypto/intel/qat/qat_common/adf_rl.c:871 add_update_sla() error: we previously assumed 'sla_in' could be null (see line 812) This issue was not found in internal testing as the pointer cannot be NULL. The function add_update_sla() is only called (indirectly) by the rate limiting sysfs interface implementation in adf_sysfs_rl.c which ensures that the data structure is allocated and valid. This is also proven by the fact that Smatch executed with a database does not report such error. Fix it by returning with error if the pointer `sla_in` is NULL. Fixes: d9fb8408376e ("crypto: qat - add rate limiting feature to qat_4xxx") Reported-by: Dan Carpenter Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_rl.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c index 86e3e2152b1b..f2de3cd7d05d 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_rl.c +++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c @@ -812,8 +812,7 @@ static int add_update_sla(struct adf_accel_dev *accel_dev, if (!sla_in) { dev_warn(&GET_DEV(accel_dev), "SLA input data pointer is missing\n"); - ret = -EFAULT; - goto ret_err; + return -EFAULT; } /* Input validation */ -- Gitee From 5dd0105a2e92d06b531934e82565974b576adaf2 Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Tue, 28 Nov 2023 18:39:30 +0100 Subject: [PATCH 0726/2138] crypto: qat - fix mutex ordering in adf_rl ANBZ: #8589 commit 487caa8d5ef9a9a27b092c5790d529a7a0c24f8b upstream. Intel-SIG: commit 487caa8d5ef9 crypto: qat - fix mutex ordering in adf_rl Backport to support Intel QAT in-tree driver If the function validate_user_input() returns an error, the error path attempts to unlock an unacquired mutex. Acquire the mutex before calling validate_user_input(). This is not strictly necessary but simplifies the code. Fixes: d9fb8408376e ("crypto: qat - add rate limiting feature to qat_4xxx") Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_rl.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c index f2de3cd7d05d..de1b214dba1f 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_rl.c +++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c @@ -815,13 +815,13 @@ static int add_update_sla(struct adf_accel_dev *accel_dev, return -EFAULT; } + mutex_lock(&rl_data->rl_lock); + /* Input validation */ ret = validate_user_input(accel_dev, sla_in, is_update); if (ret) goto ret_err; - mutex_lock(&rl_data->rl_lock); - if (is_update) { ret = validate_sla_id(accel_dev, sla_in->sla_id); if (ret) -- Gitee From f1f0845818098b0bd4dbb9134a8ddb690802d746 Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Tue, 28 Nov 2023 19:17:25 +0000 Subject: [PATCH 0727/2138] crypto: qat - add NULL pointer check ANBZ: #8589 commit a643212c9f28d09225c3792c316bc4aaf6be4a68 upstream. Intel-SIG: commit a643212c9f28 crypto: qat - add NULL pointer check Backport to support Intel QAT in-tree driver There is a possibility that the function adf_devmgr_pci_to_accel_dev() might return a NULL pointer. Add a NULL pointer check in the function rp2srv_show(). Fixes: dbc8876dd873 ("crypto: qat - add rp2svc sysfs attribute") Signed-off-by: Giovanni Cabiddu Reviewed-by: Ahsan Atta Reviewed-by: David Guckian Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_sysfs.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c index 6f0b3629da13..d450dad32c9e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c @@ -215,6 +215,9 @@ static ssize_t rp2srv_show(struct device *dev, struct device_attribute *attr, enum adf_cfg_service_type svc; accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + hw_data = GET_HW_DATA(accel_dev); if (accel_dev->sysfs.ring_num == UNSET_RING_NUM) -- Gitee From 2832adbe8613a6915ba3bb1d2a64b59e0dff24c7 Mon Sep 17 00:00:00 2001 From: Jie Wang Date: Fri, 15 Dec 2023 05:01:45 -0500 Subject: [PATCH 0728/2138] crypto: qat - change signature of uof_get_num_objs() ANBZ: #8589 commit b34bd0fd563df763ccca998b3d5fc824c536c28a upstream. Intel-SIG: commit b34bd0fd563d crypto: qat - change signature of uof_get_num_objs() Backport to support Intel QAT in-tree driver Add accel_dev as parameter of the function uof_get_num_objs(). This is in preparation for the introduction of the QAT 420xx driver as it will allow to reconfigure the ae_mask when a configuration that does not require all AEs is loaded on the device. This does not introduce any functional change. Signed-off-by: Jie Wang Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 2 +- drivers/crypto/intel/qat/qat_common/adf_accel_devices.h | 2 +- drivers/crypto/intel/qat/qat_common/adf_accel_engine.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index b4be33346cbd..489793593b59 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -390,7 +390,7 @@ static int adf_init_device(struct adf_accel_dev *accel_dev) return ret; } -static u32 uof_get_num_objs(void) +static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev) { return ARRAY_SIZE(adf_fw_cy_config); } diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 9d5fdd529a2e..33de8855fd66 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -241,7 +241,7 @@ struct adf_hw_device_data { void (*reset_device)(struct adf_accel_dev *accel_dev); void (*set_msix_rttable)(struct adf_accel_dev *accel_dev); const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num); - u32 (*uof_get_num_objs)(void); + u32 (*uof_get_num_objs)(struct adf_accel_dev *accel_dev); u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num); int (*dev_config)(struct adf_accel_dev *accel_dev); struct adf_pfvf_ops pfvf_ops; diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c index 6be064dc64c8..4b5d0350fc2e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c @@ -19,7 +19,7 @@ static int adf_ae_fw_load_images(struct adf_accel_dev *accel_dev, void *fw_addr, int i; loader = loader_data->fw_loader; - num_objs = hw_device->uof_get_num_objs(); + num_objs = hw_device->uof_get_num_objs(accel_dev); for (i = 0; i < num_objs; i++) { obj_name = hw_device->uof_get_name(accel_dev, i); -- Gitee From 03d6c0a8a5d3d5820eb12eeaeb1ab2447798105a Mon Sep 17 00:00:00 2001 From: Jie Wang Date: Fri, 15 Dec 2023 05:01:46 -0500 Subject: [PATCH 0729/2138] crypto: qat - relocate portions of qat_4xxx code ANBZ: #8589 commit de51d22364921dcdb28ef34cd6276c38e126b899 upstream. Intel-SIG: commit de51d2236492 crypto: qat - relocate portions of qat_4xxx code Backport to support Intel QAT in-tree driver Move logic that is common between QAT GEN4 accelerators to the qat_common folder. This includes addresses of CSRs, setters and configuration logic. When moved, functions and defines have been renamed from 4XXX to GEN4. Code specific to the device is moved to the file adf_gen4_hw_data.c. Code related to configuration is moved to the newly created adf_gen4_config.c. This does not introduce any functional change. Signed-off-by: Jie Wang Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 188 ++---------- .../intel/qat/qat_4xxx/adf_4xxx_hw_data.h | 52 ---- drivers/crypto/intel/qat/qat_4xxx/adf_drv.c | 277 +---------------- drivers/crypto/intel/qat/qat_common/Makefile | 1 + .../intel/qat/qat_common/adf_gen4_config.c | 287 ++++++++++++++++++ .../intel/qat/qat_common/adf_gen4_config.h | 11 + .../intel/qat/qat_common/adf_gen4_hw_data.c | 148 +++++++++ .../intel/qat/qat_common/adf_gen4_hw_data.h | 72 +++++ 8 files changed, 552 insertions(+), 484 deletions(-) create mode 100644 drivers/crypto/intel/qat/qat_common/adf_gen4_config.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_gen4_config.h diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index 489793593b59..0da8ee847c0f 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -120,11 +121,6 @@ static struct adf_hw_device_class adf_4xxx_class = { .instances = 0, }; -static u32 get_accel_mask(struct adf_hw_device_data *self) -{ - return ADF_4XXX_ACCELERATORS_MASK; -} - static u32 get_ae_mask(struct adf_hw_device_data *self) { u32 me_disable = self->fuses; @@ -132,55 +128,6 @@ static u32 get_ae_mask(struct adf_hw_device_data *self) return ~me_disable & ADF_4XXX_ACCELENGINES_MASK; } -static u32 get_num_accels(struct adf_hw_device_data *self) -{ - return ADF_4XXX_MAX_ACCELERATORS; -} - -static u32 get_num_aes(struct adf_hw_device_data *self) -{ - if (!self || !self->ae_mask) - return 0; - - return hweight32(self->ae_mask); -} - -static u32 get_misc_bar_id(struct adf_hw_device_data *self) -{ - return ADF_4XXX_PMISC_BAR; -} - -static u32 get_etr_bar_id(struct adf_hw_device_data *self) -{ - return ADF_4XXX_ETR_BAR; -} - -static u32 get_sram_bar_id(struct adf_hw_device_data *self) -{ - return ADF_4XXX_SRAM_BAR; -} - -/* - * The vector routing table is used to select the MSI-X entry to use for each - * interrupt source. - * The first ADF_4XXX_ETR_MAX_BANKS entries correspond to ring interrupts. - * The final entry corresponds to VF2PF or error interrupts. - * This vector table could be used to configure one MSI-X entry to be shared - * between multiple interrupt sources. - * - * The default routing is set to have a one to one correspondence between the - * interrupt source and the MSI-X entry used. - */ -static void set_msix_default_rttable(struct adf_accel_dev *accel_dev) -{ - void __iomem *csr; - int i; - - csr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr; - for (i = 0; i <= ADF_4XXX_ETR_MAX_BANKS; i++) - ADF_CSR_WR(csr, ADF_4XXX_MSIX_RTTABLE_OFFSET(i), i); -} - static u32 get_accel_cap(struct adf_accel_dev *accel_dev) { struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev; @@ -189,7 +136,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) u32 fusectl1; /* Read accelerator capabilities mask */ - pci_read_config_dword(pdev, ADF_4XXX_FUSECTL1_OFFSET, &fusectl1); + pci_read_config_dword(pdev, ADF_GEN4_FUSECTL1_OFFSET, &fusectl1); capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | ICP_ACCEL_CAPABILITIES_CIPHER | @@ -204,27 +151,27 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) ICP_ACCEL_CAPABILITIES_AES_V2; /* A set bit in fusectl1 means the feature is OFF in this SKU */ - if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE) { + if (fusectl1 & ICP_ACCEL_GEN4_MASK_CIPHER_SLICE) { capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_HKDF; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; } - if (fusectl1 & ICP_ACCEL_4XXX_MASK_UCS_SLICE) { + if (fusectl1 & ICP_ACCEL_GEN4_MASK_UCS_SLICE) { capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; } - if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE) { + if (fusectl1 & ICP_ACCEL_GEN4_MASK_AUTH_SLICE) { capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; } - if (fusectl1 & ICP_ACCEL_4XXX_MASK_SMX_SLICE) { + if (fusectl1 & ICP_ACCEL_GEN4_MASK_SMX_SLICE) { capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM3; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM4; } @@ -234,7 +181,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) ICP_ACCEL_CAPABILITIES_SM2 | ICP_ACCEL_CAPABILITIES_ECEDMONT; - if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE) { + if (fusectl1 & ICP_ACCEL_GEN4_MASK_PKE_SLICE) { capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC; capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2; capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT; @@ -245,7 +192,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION | ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64; - if (fusectl1 & ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE) { + if (fusectl1 & ICP_ACCEL_GEN4_MASK_COMPRESS_SLICE) { capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION; capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION; capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION; @@ -281,11 +228,6 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) } } -static enum dev_sku_info get_sku(struct adf_hw_device_data *self) -{ - return DEV_SKU_1; -} - static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev) { switch (adf_get_service_enabled(accel_dev)) { @@ -298,28 +240,6 @@ static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev) } } -static void get_arb_info(struct arb_info *arb_info) -{ - arb_info->arb_cfg = ADF_4XXX_ARB_CONFIG; - arb_info->arb_offset = ADF_4XXX_ARB_OFFSET; - arb_info->wt2sam_offset = ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET; -} - -static void get_admin_info(struct admin_info *admin_csrs_info) -{ - admin_csrs_info->mailbox_offset = ADF_4XXX_MAILBOX_BASE_OFFSET; - admin_csrs_info->admin_msg_ur = ADF_4XXX_ADMINMSGUR_OFFSET; - admin_csrs_info->admin_msg_lr = ADF_4XXX_ADMINMSGLR_OFFSET; -} - -static u32 get_heartbeat_clock(struct adf_hw_device_data *self) -{ - /* - * 4XXX uses KPT counter for HB - */ - return ADF_4XXX_KPT_COUNTER_FREQ; -} - static void adf_init_rl_data(struct adf_rl_hw_data *rl_data) { rl_data->pciout_tb_offset = ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET; @@ -338,58 +258,6 @@ static void adf_init_rl_data(struct adf_rl_hw_data *rl_data) rl_data->scale_ref = ADF_4XXX_RL_SLICE_REF; } -static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) -{ - struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR]; - void __iomem *csr = misc_bar->virt_addr; - - /* Enable all in errsou3 except VFLR notification on host */ - ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY); -} - -static void adf_enable_ints(struct adf_accel_dev *accel_dev) -{ - void __iomem *addr; - - addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr; - - /* Enable bundle interrupts */ - ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET, 0); - ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET, 0); - - /* Enable misc interrupts */ - ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0); -} - -static int adf_init_device(struct adf_accel_dev *accel_dev) -{ - void __iomem *addr; - u32 status; - u32 csr; - int ret; - - addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr; - - /* Temporarily mask PM interrupt */ - csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2); - csr |= ADF_GEN4_PM_SOU; - ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr); - - /* Set DRV_ACTIVE bit to power up the device */ - ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE); - - /* Poll status register to make sure the device is powered up */ - ret = read_poll_timeout(ADF_CSR_RD, status, - status & ADF_GEN4_PM_INIT_STATE, - ADF_GEN4_PM_POLL_DELAY_US, - ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr, - ADF_GEN4_PM_STATUS); - if (ret) - dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n"); - - return ret; -} - static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev) { return ARRAY_SIZE(adf_fw_cy_config); @@ -538,37 +406,37 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) { hw_data->dev_class = &adf_4xxx_class; hw_data->instance_id = adf_4xxx_class.instances++; - hw_data->num_banks = ADF_4XXX_ETR_MAX_BANKS; - hw_data->num_banks_per_vf = ADF_4XXX_NUM_BANKS_PER_VF; - hw_data->num_rings_per_bank = ADF_4XXX_NUM_RINGS_PER_BANK; - hw_data->num_accel = ADF_4XXX_MAX_ACCELERATORS; + hw_data->num_banks = ADF_GEN4_ETR_MAX_BANKS; + hw_data->num_banks_per_vf = ADF_GEN4_NUM_BANKS_PER_VF; + hw_data->num_rings_per_bank = ADF_GEN4_NUM_RINGS_PER_BANK; + hw_data->num_accel = ADF_GEN4_MAX_ACCELERATORS; hw_data->num_engines = ADF_4XXX_MAX_ACCELENGINES; hw_data->num_logical_accel = 1; - hw_data->tx_rx_gap = ADF_4XXX_RX_RINGS_OFFSET; - hw_data->tx_rings_mask = ADF_4XXX_TX_RINGS_MASK; + hw_data->tx_rx_gap = ADF_GEN4_RX_RINGS_OFFSET; + hw_data->tx_rings_mask = ADF_GEN4_TX_RINGS_MASK; hw_data->ring_to_svc_map = ADF_GEN4_DEFAULT_RING_TO_SRV_MAP; hw_data->alloc_irq = adf_isr_resource_alloc; hw_data->free_irq = adf_isr_resource_free; - hw_data->enable_error_correction = adf_enable_error_correction; - hw_data->get_accel_mask = get_accel_mask; + hw_data->enable_error_correction = adf_gen4_enable_error_correction; + hw_data->get_accel_mask = adf_gen4_get_accel_mask; hw_data->get_ae_mask = get_ae_mask; - hw_data->get_num_accels = get_num_accels; - hw_data->get_num_aes = get_num_aes; - hw_data->get_sram_bar_id = get_sram_bar_id; - hw_data->get_etr_bar_id = get_etr_bar_id; - hw_data->get_misc_bar_id = get_misc_bar_id; - hw_data->get_arb_info = get_arb_info; - hw_data->get_admin_info = get_admin_info; + hw_data->get_num_accels = adf_gen4_get_num_accels; + hw_data->get_num_aes = adf_gen4_get_num_aes; + hw_data->get_sram_bar_id = adf_gen4_get_sram_bar_id; + hw_data->get_etr_bar_id = adf_gen4_get_etr_bar_id; + hw_data->get_misc_bar_id = adf_gen4_get_misc_bar_id; + hw_data->get_arb_info = adf_gen4_get_arb_info; + hw_data->get_admin_info = adf_gen4_get_admin_info; hw_data->get_accel_cap = get_accel_cap; - hw_data->get_sku = get_sku; + hw_data->get_sku = adf_gen4_get_sku; hw_data->init_admin_comms = adf_init_admin_comms; hw_data->exit_admin_comms = adf_exit_admin_comms; hw_data->send_admin_init = adf_send_admin_init; hw_data->init_arb = adf_init_arb; hw_data->exit_arb = adf_exit_arb; hw_data->get_arb_mapping = adf_get_arbiter_mapping; - hw_data->enable_ints = adf_enable_ints; - hw_data->init_device = adf_init_device; + hw_data->enable_ints = adf_gen4_enable_ints; + hw_data->init_device = adf_gen4_init_device; hw_data->reset_device = adf_reset_flr; hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK; switch (dev_id) { @@ -585,7 +453,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) } hw_data->uof_get_num_objs = uof_get_num_objs; hw_data->uof_get_ae_mask = uof_get_ae_mask; - hw_data->set_msix_rttable = set_msix_default_rttable; + hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable; hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer; hw_data->get_ring_to_svc_map = get_ring_to_svc_map; hw_data->disable_iov = adf_disable_sriov; @@ -595,7 +463,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->dev_config = adf_gen4_dev_config; hw_data->start_timer = adf_gen4_timer_start; hw_data->stop_timer = adf_gen4_timer_stop; - hw_data->get_hb_clock = get_heartbeat_clock; + hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock; hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; hw_data->clock_frequency = ADF_4XXX_AE_FREQ; diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h index 33423295e90f..76388363ea87 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h @@ -6,25 +6,8 @@ #include #include -/* PCIe configuration space */ -#define ADF_4XXX_SRAM_BAR 0 -#define ADF_4XXX_PMISC_BAR 1 -#define ADF_4XXX_ETR_BAR 2 -#define ADF_4XXX_RX_RINGS_OFFSET 1 -#define ADF_4XXX_TX_RINGS_MASK 0x1 -#define ADF_4XXX_MAX_ACCELERATORS 1 #define ADF_4XXX_MAX_ACCELENGINES 9 -#define ADF_4XXX_BAR_MASK (BIT(0) | BIT(2) | BIT(4)) -/* Physical function fuses */ -#define ADF_4XXX_FUSECTL0_OFFSET (0x2C8) -#define ADF_4XXX_FUSECTL1_OFFSET (0x2CC) -#define ADF_4XXX_FUSECTL2_OFFSET (0x2D0) -#define ADF_4XXX_FUSECTL3_OFFSET (0x2D4) -#define ADF_4XXX_FUSECTL4_OFFSET (0x2D8) -#define ADF_4XXX_FUSECTL5_OFFSET (0x2DC) - -#define ADF_4XXX_ACCELERATORS_MASK (0x1) #define ADF_4XXX_ACCELENGINES_MASK (0x1FF) #define ADF_4XXX_ADMIN_AE_MASK (0x100) @@ -45,28 +28,6 @@ (BIT(4) | BIT(12) | BIT(16) | BIT(17) | BIT(18) | \ BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23)) -#define ADF_4XXX_ETR_MAX_BANKS 64 - -/* MSIX interrupt */ -#define ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET (0x41A040) -#define ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET (0x41A044) -#define ADF_4XXX_SMIAPF_MASK_OFFSET (0x41A084) -#define ADF_4XXX_MSIX_RTTABLE_OFFSET(i) (0x409000 + ((i) * 0x04)) - -/* Bank and ring configuration */ -#define ADF_4XXX_NUM_RINGS_PER_BANK 2 -#define ADF_4XXX_NUM_BANKS_PER_VF 4 - -/* Arbiter configuration */ -#define ADF_4XXX_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0)) -#define ADF_4XXX_ARB_OFFSET (0x0) -#define ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET (0x400) - -/* Admin Interface Reg Offset */ -#define ADF_4XXX_ADMINMSGUR_OFFSET (0x500574) -#define ADF_4XXX_ADMINMSGLR_OFFSET (0x500578) -#define ADF_4XXX_MAILBOX_BASE_OFFSET (0x600970) - /* Firmware Binaries */ #define ADF_4XXX_FW "qat_4xxx.bin" #define ADF_4XXX_MMP "qat_4xxx_mmp.bin" @@ -93,22 +54,9 @@ #define ADF_4XXX_RL_SLICE_REF 1000UL /* Clocks frequency */ -#define ADF_4XXX_KPT_COUNTER_FREQ (100 * HZ_PER_MHZ) #define ADF_4XXX_AE_FREQ (1000 * HZ_PER_MHZ) -/* qat_4xxx fuse bits are different from old GENs, redefine them */ -enum icp_qat_4xxx_slice_mask { - ICP_ACCEL_4XXX_MASK_CIPHER_SLICE = BIT(0), - ICP_ACCEL_4XXX_MASK_AUTH_SLICE = BIT(1), - ICP_ACCEL_4XXX_MASK_PKE_SLICE = BIT(2), - ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE = BIT(3), - ICP_ACCEL_4XXX_MASK_UCS_SLICE = BIT(4), - ICP_ACCEL_4XXX_MASK_EIA3_SLICE = BIT(5), - ICP_ACCEL_4XXX_MASK_SMX_SLICE = BIT(7), -}; - void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id); void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data); -int adf_gen4_dev_config(struct adf_accel_dev *accel_dev); #endif diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c index 7d0587d6ec4e..d26564cebdec 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c @@ -8,13 +8,10 @@ #include #include #include -#include +#include +#include #include "adf_4xxx_hw_data.h" -#include "adf_cfg_services.h" -#include "qat_compression.h" -#include "qat_crypto.h" -#include "adf_transport_access_macros.h" static const struct pci_device_id adf_pci_tbl[] = { { PCI_VDEVICE(INTEL, ADF_4XXX_PCI_DEVICE_ID), }, @@ -35,270 +32,6 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) adf_devmgr_rm_dev(accel_dev, NULL); } -static int adf_cfg_dev_init(struct adf_accel_dev *accel_dev) -{ - const char *config; - int ret; - - config = accel_dev->accel_id % 2 ? ADF_CFG_DC : ADF_CFG_CY; - - ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC); - if (ret) - return ret; - - /* Default configuration is crypto only for even devices - * and compression for odd devices - */ - ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, - ADF_SERVICES_ENABLED, config, - ADF_STR); - if (ret) - return ret; - - adf_heartbeat_save_cfg_param(accel_dev, ADF_CFG_HB_TIMER_MIN_MS); - - return 0; -} - -static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev) -{ - char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; - int banks = GET_MAX_BANKS(accel_dev); - int cpus = num_online_cpus(); - unsigned long bank, val; - int instances; - int ret; - int i; - - if (adf_hw_dev_has_crypto(accel_dev)) - instances = min(cpus, banks / 2); - else - instances = 0; - - for (i = 0; i < instances; i++) { - val = i; - bank = i * 2; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &bank, ADF_DEC); - if (ret) - goto err; - - bank += 1; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &bank, ADF_DEC); - if (ret) - goto err; - - snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, - i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i); - val = 128; - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 512; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 0; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 0; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 1; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 1; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = ADF_COALESCING_DEF_TIME; - snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); - ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0", - key, &val, ADF_DEC); - if (ret) - goto err; - } - - val = i; - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, - &val, ADF_DEC); - if (ret) - goto err; - - val = 0; - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, - &val, ADF_DEC); - if (ret) - goto err; - - return 0; -err: - dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n"); - return ret; -} - -static int adf_comp_dev_config(struct adf_accel_dev *accel_dev) -{ - char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; - int banks = GET_MAX_BANKS(accel_dev); - int cpus = num_online_cpus(); - unsigned long val; - int instances; - int ret; - int i; - - if (adf_hw_dev_has_compression(accel_dev)) - instances = min(cpus, banks); - else - instances = 0; - - for (i = 0; i < instances; i++) { - val = i; - snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 512; - snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 0; - snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 1; - snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = ADF_COALESCING_DEF_TIME; - snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); - ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0", - key, &val, ADF_DEC); - if (ret) - goto err; - } - - val = i; - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, - &val, ADF_DEC); - if (ret) - goto err; - - val = 0; - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, - &val, ADF_DEC); - if (ret) - goto err; - - return 0; -err: - dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n"); - return ret; -} - -static int adf_no_dev_config(struct adf_accel_dev *accel_dev) -{ - unsigned long val; - int ret; - - val = 0; - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, - &val, ADF_DEC); - if (ret) - return ret; - - return adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, - &val, ADF_DEC); -} - -int adf_gen4_dev_config(struct adf_accel_dev *accel_dev) -{ - char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; - int ret; - - ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC); - if (ret) - goto err; - - ret = adf_cfg_section_add(accel_dev, "Accelerator0"); - if (ret) - goto err; - - ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, - ADF_SERVICES_ENABLED, services); - if (ret) - goto err; - - ret = sysfs_match_string(adf_cfg_services, services); - if (ret < 0) - goto err; - - switch (ret) { - case SVC_CY: - case SVC_CY2: - ret = adf_crypto_dev_config(accel_dev); - break; - case SVC_DC: - case SVC_DCC: - ret = adf_comp_dev_config(accel_dev); - break; - default: - ret = adf_no_dev_config(accel_dev); - break; - } - - if (ret) - goto err; - - set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); - - return ret; - -err: - dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n"); - return ret; -} - static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct adf_accel_dev *accel_dev; @@ -348,7 +81,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adf_init_hw_data_4xxx(accel_dev->hw_device, ent->device); pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); - pci_read_config_dword(pdev, ADF_4XXX_FUSECTL4_OFFSET, &hw_data->fuses); + pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses); /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(hw_data); @@ -381,7 +114,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_err; } - ret = adf_cfg_dev_init(accel_dev); + ret = adf_gen4_cfg_dev_init(accel_dev); if (ret) { dev_err(&pdev->dev, "Failed to initialize configuration.\n"); goto out_err; @@ -396,7 +129,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } /* Find and map all the device's BARS */ - bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_4XXX_BAR_MASK; + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_GEN4_BAR_MASK; ret = pcim_iomap_regions_request_all(pdev, bar_mask, pci_name(pdev)); if (ret) { diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index 779a8aa0b8d2..928de6997155 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -16,6 +16,7 @@ intel_qat-objs := adf_cfg.o \ adf_sysfs_ras_counters.o \ adf_gen2_hw_data.o \ adf_gen2_config.o \ + adf_gen4_config.o \ adf_gen4_hw_data.o \ adf_gen4_pm.o \ adf_gen2_dc.o \ diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c new file mode 100644 index 000000000000..fe1f3d727dc5 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c @@ -0,0 +1,287 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ +#include "adf_accel_devices.h" +#include "adf_cfg.h" +#include "adf_cfg_services.h" +#include "adf_cfg_strings.h" +#include "adf_common_drv.h" +#include "adf_gen4_config.h" +#include "adf_heartbeat.h" +#include "adf_transport_access_macros.h" +#include "qat_compression.h" +#include "qat_crypto.h" + +static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev) +{ + char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; + int banks = GET_MAX_BANKS(accel_dev); + int cpus = num_online_cpus(); + unsigned long bank, val; + int instances; + int ret; + int i; + + if (adf_hw_dev_has_crypto(accel_dev)) + instances = min(cpus, banks / 2); + else + instances = 0; + + for (i = 0; i < instances; i++) { + val = i; + bank = i * 2; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &bank, ADF_DEC); + if (ret) + goto err; + + bank += 1; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &bank, ADF_DEC); + if (ret) + goto err; + + snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, + i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i); + val = 128; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 512; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 0; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 0; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 1; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 1; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = ADF_COALESCING_DEF_TIME; + snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); + ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0", + key, &val, ADF_DEC); + if (ret) + goto err; + } + + val = i; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, + &val, ADF_DEC); + if (ret) + goto err; + + val = 0; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, + &val, ADF_DEC); + if (ret) + goto err; + + return 0; +err: + dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n"); + return ret; +} + +static int adf_comp_dev_config(struct adf_accel_dev *accel_dev) +{ + char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; + int banks = GET_MAX_BANKS(accel_dev); + int cpus = num_online_cpus(); + unsigned long val; + int instances; + int ret; + int i; + + if (adf_hw_dev_has_compression(accel_dev)) + instances = min(cpus, banks); + else + instances = 0; + + for (i = 0; i < instances; i++) { + val = i; + snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 512; + snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 0; + snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 1; + snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = ADF_COALESCING_DEF_TIME; + snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); + ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0", + key, &val, ADF_DEC); + if (ret) + goto err; + } + + val = i; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, + &val, ADF_DEC); + if (ret) + goto err; + + val = 0; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, + &val, ADF_DEC); + if (ret) + goto err; + + return 0; +err: + dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n"); + return ret; +} + +static int adf_no_dev_config(struct adf_accel_dev *accel_dev) +{ + unsigned long val; + int ret; + + val = 0; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, + &val, ADF_DEC); + if (ret) + return ret; + + return adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, + &val, ADF_DEC); +} + +/** + * adf_gen4_dev_config() - create dev config required to create instances + * + * @accel_dev: Pointer to acceleration device. + * + * Function creates device configuration required to create instances + * + * Return: 0 on success, error code otherwise. + */ +int adf_gen4_dev_config(struct adf_accel_dev *accel_dev) +{ + char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; + int ret; + + ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC); + if (ret) + goto err; + + ret = adf_cfg_section_add(accel_dev, "Accelerator0"); + if (ret) + goto err; + + ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, + ADF_SERVICES_ENABLED, services); + if (ret) + goto err; + + ret = sysfs_match_string(adf_cfg_services, services); + if (ret < 0) + goto err; + + switch (ret) { + case SVC_CY: + case SVC_CY2: + ret = adf_crypto_dev_config(accel_dev); + break; + case SVC_DC: + case SVC_DCC: + ret = adf_comp_dev_config(accel_dev); + break; + default: + ret = adf_no_dev_config(accel_dev); + break; + } + + if (ret) + goto err; + + set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); + + return ret; + +err: + dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n"); + return ret; +} +EXPORT_SYMBOL_GPL(adf_gen4_dev_config); + +int adf_gen4_cfg_dev_init(struct adf_accel_dev *accel_dev) +{ + const char *config; + int ret; + + config = accel_dev->accel_id % 2 ? ADF_CFG_DC : ADF_CFG_CY; + + ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC); + if (ret) + return ret; + + /* Default configuration is crypto only for even devices + * and compression for odd devices + */ + ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, + ADF_SERVICES_ENABLED, config, + ADF_STR); + if (ret) + return ret; + + adf_heartbeat_save_cfg_param(accel_dev, ADF_CFG_HB_TIMER_MIN_MS); + + return 0; +} +EXPORT_SYMBOL_GPL(adf_gen4_cfg_dev_init); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h new file mode 100644 index 000000000000..bb87655f69a8 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ +#ifndef ADF_GEN4_CONFIG_H_ +#define ADF_GEN4_CONFIG_H_ + +#include "adf_accel_devices.h" + +int adf_gen4_dev_config(struct adf_accel_dev *accel_dev); +int adf_gen4_cfg_dev_init(struct adf_accel_dev *accel_dev); + +#endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c index 3148a62938fd..ee08b34876dd 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c @@ -4,6 +4,7 @@ #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_gen4_hw_data.h" +#include "adf_gen4_pm.h" static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size) { @@ -102,6 +103,131 @@ void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops) } EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops); +u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self) +{ + return ADF_GEN4_ACCELERATORS_MASK; +} +EXPORT_SYMBOL_GPL(adf_gen4_get_accel_mask); + +u32 adf_gen4_get_num_accels(struct adf_hw_device_data *self) +{ + return ADF_GEN4_MAX_ACCELERATORS; +} +EXPORT_SYMBOL_GPL(adf_gen4_get_num_accels); + +u32 adf_gen4_get_num_aes(struct adf_hw_device_data *self) +{ + if (!self || !self->ae_mask) + return 0; + + return hweight32(self->ae_mask); +} +EXPORT_SYMBOL_GPL(adf_gen4_get_num_aes); + +u32 adf_gen4_get_misc_bar_id(struct adf_hw_device_data *self) +{ + return ADF_GEN4_PMISC_BAR; +} +EXPORT_SYMBOL_GPL(adf_gen4_get_misc_bar_id); + +u32 adf_gen4_get_etr_bar_id(struct adf_hw_device_data *self) +{ + return ADF_GEN4_ETR_BAR; +} +EXPORT_SYMBOL_GPL(adf_gen4_get_etr_bar_id); + +u32 adf_gen4_get_sram_bar_id(struct adf_hw_device_data *self) +{ + return ADF_GEN4_SRAM_BAR; +} +EXPORT_SYMBOL_GPL(adf_gen4_get_sram_bar_id); + +enum dev_sku_info adf_gen4_get_sku(struct adf_hw_device_data *self) +{ + return DEV_SKU_1; +} +EXPORT_SYMBOL_GPL(adf_gen4_get_sku); + +void adf_gen4_get_arb_info(struct arb_info *arb_info) +{ + arb_info->arb_cfg = ADF_GEN4_ARB_CONFIG; + arb_info->arb_offset = ADF_GEN4_ARB_OFFSET; + arb_info->wt2sam_offset = ADF_GEN4_ARB_WRK_2_SER_MAP_OFFSET; +} +EXPORT_SYMBOL_GPL(adf_gen4_get_arb_info); + +void adf_gen4_get_admin_info(struct admin_info *admin_csrs_info) +{ + admin_csrs_info->mailbox_offset = ADF_GEN4_MAILBOX_BASE_OFFSET; + admin_csrs_info->admin_msg_ur = ADF_GEN4_ADMINMSGUR_OFFSET; + admin_csrs_info->admin_msg_lr = ADF_GEN4_ADMINMSGLR_OFFSET; +} +EXPORT_SYMBOL_GPL(adf_gen4_get_admin_info); + +u32 adf_gen4_get_heartbeat_clock(struct adf_hw_device_data *self) +{ + /* + * GEN4 uses KPT counter for HB + */ + return ADF_GEN4_KPT_COUNTER_FREQ; +} +EXPORT_SYMBOL_GPL(adf_gen4_get_heartbeat_clock); + +void adf_gen4_enable_error_correction(struct adf_accel_dev *accel_dev) +{ + struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR]; + void __iomem *csr = misc_bar->virt_addr; + + /* Enable all in errsou3 except VFLR notification on host */ + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY); +} +EXPORT_SYMBOL_GPL(adf_gen4_enable_error_correction); + +void adf_gen4_enable_ints(struct adf_accel_dev *accel_dev) +{ + void __iomem *addr; + + addr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr; + + /* Enable bundle interrupts */ + ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_RP_X0_MASK_OFFSET, 0); + ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_RP_X1_MASK_OFFSET, 0); + + /* Enable misc interrupts */ + ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_MASK_OFFSET, 0); +} +EXPORT_SYMBOL_GPL(adf_gen4_enable_ints); + +int adf_gen4_init_device(struct adf_accel_dev *accel_dev) +{ + void __iomem *addr; + u32 status; + u32 csr; + int ret; + + addr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr; + + /* Temporarily mask PM interrupt */ + csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2); + csr |= ADF_GEN4_PM_SOU; + ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr); + + /* Set DRV_ACTIVE bit to power up the device */ + ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE); + + /* Poll status register to make sure the device is powered up */ + ret = read_poll_timeout(ADF_CSR_RD, status, + status & ADF_GEN4_PM_INIT_STATE, + ADF_GEN4_PM_POLL_DELAY_US, + ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr, + ADF_GEN4_PM_STATUS); + if (ret) + dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n"); + + return ret; +} +EXPORT_SYMBOL_GPL(adf_gen4_init_device); + static inline void adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper, u32 *lower) { @@ -135,6 +261,28 @@ void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev) } EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer); +/* + * The vector routing table is used to select the MSI-X entry to use for each + * interrupt source. + * The first ADF_GEN4_ETR_MAX_BANKS entries correspond to ring interrupts. + * The final entry corresponds to VF2PF or error interrupts. + * This vector table could be used to configure one MSI-X entry to be shared + * between multiple interrupt sources. + * + * The default routing is set to have a one to one correspondence between the + * interrupt source and the MSI-X entry used. + */ +void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev) +{ + void __iomem *csr; + int i; + + csr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr; + for (i = 0; i <= ADF_GEN4_ETR_MAX_BANKS; i++) + ADF_CSR_WR(csr, ADF_GEN4_MSIX_RTTABLE_OFFSET(i), i); +} +EXPORT_SYMBOL_GPL(adf_gen4_set_msix_default_rttable); + int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev) { return 0; diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h index 1813fe1d5a06..b42fb8048c04 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h @@ -3,9 +3,55 @@ #ifndef ADF_GEN4_HW_CSR_DATA_H_ #define ADF_GEN4_HW_CSR_DATA_H_ +#include + #include "adf_accel_devices.h" #include "adf_cfg_common.h" +/* PCIe configuration space */ +#define ADF_GEN4_BAR_MASK (BIT(0) | BIT(2) | BIT(4)) +#define ADF_GEN4_SRAM_BAR 0 +#define ADF_GEN4_PMISC_BAR 1 +#define ADF_GEN4_ETR_BAR 2 + +/* Clocks frequency */ +#define ADF_GEN4_KPT_COUNTER_FREQ (100 * HZ_PER_MHZ) + +/* Physical function fuses */ +#define ADF_GEN4_FUSECTL0_OFFSET 0x2C8 +#define ADF_GEN4_FUSECTL1_OFFSET 0x2CC +#define ADF_GEN4_FUSECTL2_OFFSET 0x2D0 +#define ADF_GEN4_FUSECTL3_OFFSET 0x2D4 +#define ADF_GEN4_FUSECTL4_OFFSET 0x2D8 +#define ADF_GEN4_FUSECTL5_OFFSET 0x2DC + +/* Accelerators */ +#define ADF_GEN4_ACCELERATORS_MASK 0x1 +#define ADF_GEN4_MAX_ACCELERATORS 1 + +/* MSIX interrupt */ +#define ADF_GEN4_SMIAPF_RP_X0_MASK_OFFSET 0x41A040 +#define ADF_GEN4_SMIAPF_RP_X1_MASK_OFFSET 0x41A044 +#define ADF_GEN4_SMIAPF_MASK_OFFSET 0x41A084 +#define ADF_GEN4_MSIX_RTTABLE_OFFSET(i) (0x409000 + ((i) * 0x04)) + +/* Bank and ring configuration */ +#define ADF_GEN4_NUM_RINGS_PER_BANK 2 +#define ADF_GEN4_NUM_BANKS_PER_VF 4 +#define ADF_GEN4_ETR_MAX_BANKS 64 +#define ADF_GEN4_RX_RINGS_OFFSET 1 +#define ADF_GEN4_TX_RINGS_MASK 0x1 + +/* Arbiter configuration */ +#define ADF_GEN4_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0)) +#define ADF_GEN4_ARB_OFFSET 0x0 +#define ADF_GEN4_ARB_WRK_2_SER_MAP_OFFSET 0x400 + +/* Admin Interface Reg Offset */ +#define ADF_GEN4_ADMINMSGUR_OFFSET 0x500574 +#define ADF_GEN4_ADMINMSGLR_OFFSET 0x500578 +#define ADF_GEN4_MAILBOX_BASE_OFFSET 0x600970 + /* Transport access */ #define ADF_BANK_INT_SRC_SEL_MASK 0x44UL #define ADF_RING_CSR_RING_CONFIG 0x1000 @@ -147,6 +193,32 @@ do { \ #define ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET 0x508804 void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); + +enum icp_qat_gen4_slice_mask { + ICP_ACCEL_GEN4_MASK_CIPHER_SLICE = BIT(0), + ICP_ACCEL_GEN4_MASK_AUTH_SLICE = BIT(1), + ICP_ACCEL_GEN4_MASK_PKE_SLICE = BIT(2), + ICP_ACCEL_GEN4_MASK_COMPRESS_SLICE = BIT(3), + ICP_ACCEL_GEN4_MASK_UCS_SLICE = BIT(4), + ICP_ACCEL_GEN4_MASK_EIA3_SLICE = BIT(5), + ICP_ACCEL_GEN4_MASK_SMX_SLICE = BIT(7), +}; + +void adf_gen4_enable_error_correction(struct adf_accel_dev *accel_dev); +void adf_gen4_enable_ints(struct adf_accel_dev *accel_dev); +u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self); +void adf_gen4_get_admin_info(struct admin_info *admin_csrs_info); +void adf_gen4_get_arb_info(struct arb_info *arb_info); +u32 adf_gen4_get_etr_bar_id(struct adf_hw_device_data *self); +u32 adf_gen4_get_heartbeat_clock(struct adf_hw_device_data *self); +u32 adf_gen4_get_misc_bar_id(struct adf_hw_device_data *self); +u32 adf_gen4_get_num_accels(struct adf_hw_device_data *self); +u32 adf_gen4_get_num_aes(struct adf_hw_device_data *self); +enum dev_sku_info adf_gen4_get_sku(struct adf_hw_device_data *self); +u32 adf_gen4_get_sram_bar_id(struct adf_hw_device_data *self); +int adf_gen4_init_device(struct adf_accel_dev *accel_dev); void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number); +void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev); +void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); #endif -- Gitee From b00479b98c11690bdde30e7d91aa8ae127e90d25 Mon Sep 17 00:00:00 2001 From: Jie Wang Date: Fri, 15 Dec 2023 05:01:47 -0500 Subject: [PATCH 0730/2138] crypto: qat - move fw config related structures ANBZ: #8589 commit 98a4f29fba0ffc1f1b026d9cb717fbe7edd66ffe upstream. Intel-SIG: commit 98a4f29fba0f crypto: qat - move fw config related structures Backport to support Intel QAT in-tree driver Relocate the structures adf_fw_objs and adf_fw_config from the file adf_4xxx_hw_data.c to the newly created adf_fw_config.h. These structures will be used by new device drivers. This does not introduce any functional change. Signed-off-by: Jie Wang Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 13 +------------ .../intel/qat/qat_common/adf_fw_config.h | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 12 deletions(-) create mode 100644 drivers/crypto/intel/qat/qat_common/adf_fw_config.h diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index 0da8ee847c0f..a8ab40db7b28 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -21,13 +22,6 @@ #define ADF_AE_GROUP_1 GENMASK(7, 4) #define ADF_AE_GROUP_2 BIT(8) -enum adf_fw_objs { - ADF_FW_SYM_OBJ, - ADF_FW_ASYM_OBJ, - ADF_FW_DC_OBJ, - ADF_FW_ADMIN_OBJ, -}; - static const char * const adf_4xxx_fw_objs[] = { [ADF_FW_SYM_OBJ] = ADF_4XXX_SYM_OBJ, [ADF_FW_ASYM_OBJ] = ADF_4XXX_ASYM_OBJ, @@ -42,11 +36,6 @@ static const char * const adf_402xx_fw_objs[] = { [ADF_FW_ADMIN_OBJ] = ADF_402XX_ADMIN_OBJ, }; -struct adf_fw_config { - u32 ae_mask; - enum adf_fw_objs obj; -}; - static const struct adf_fw_config adf_fw_cy_config[] = { {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ}, {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ}, diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_config.h b/drivers/crypto/intel/qat/qat_common/adf_fw_config.h new file mode 100644 index 000000000000..4f86696800c9 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_fw_config.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ +#ifndef ADF_FW_CONFIG_H_ +#define ADF_FW_CONFIG_H_ + +enum adf_fw_objs { + ADF_FW_SYM_OBJ, + ADF_FW_ASYM_OBJ, + ADF_FW_DC_OBJ, + ADF_FW_ADMIN_OBJ, +}; + +struct adf_fw_config { + u32 ae_mask; + enum adf_fw_objs obj; +}; + +#endif -- Gitee From 23141e9918ac966c7ee8549e69437adfe29237ff Mon Sep 17 00:00:00 2001 From: Jie Wang Date: Fri, 15 Dec 2023 05:01:48 -0500 Subject: [PATCH 0731/2138] crypto: qat - add support for 420xx devices ANBZ: #8589 commit fcf60f4bcf54952cc14d14178c358be222dbeb43 upstream. Intel-SIG: commit fcf60f4bcf54 crypto: qat - add support for 420xx devices Backport to support Intel QAT in-tree driver Add support for 420xx devices by including a new device driver that supports such devices, updates to the firmware loader and capabilities. Compared to 4xxx devices, 420xx devices have more acceleration engines (16 service engines and 1 admin) and support the wireless cipher algorithms ZUC and Snow 3G. Signed-off-by: Jie Wang Co-developed-by: Dong Xie Signed-off-by: Dong Xie Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/Kconfig | 11 + drivers/crypto/intel/qat/Makefile | 1 + drivers/crypto/intel/qat/qat_420xx/Makefile | 4 + .../intel/qat/qat_420xx/adf_420xx_hw_data.c | 552 ++++++++++++++++++ .../intel/qat/qat_420xx/adf_420xx_hw_data.h | 55 ++ drivers/crypto/intel/qat/qat_420xx/adf_drv.c | 202 +++++++ .../intel/qat/qat_common/adf_accel_devices.h | 3 + .../intel/qat/qat_common/adf_cfg_common.h | 1 + .../intel/qat/qat_common/adf_gen4_hw_data.h | 2 + .../crypto/intel/qat/qat_common/icp_qat_hw.h | 14 +- .../intel/qat/qat_common/icp_qat_uclo.h | 2 +- drivers/crypto/intel/qat/qat_common/qat_hal.c | 6 +- .../crypto/intel/qat/qat_common/qat_uclo.c | 1 + 13 files changed, 849 insertions(+), 5 deletions(-) create mode 100644 drivers/crypto/intel/qat/qat_420xx/Makefile create mode 100644 drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c create mode 100644 drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.h create mode 100644 drivers/crypto/intel/qat/qat_420xx/adf_drv.c diff --git a/drivers/crypto/intel/qat/Kconfig b/drivers/crypto/intel/qat/Kconfig index 1220cc86f910..c120f6715a09 100644 --- a/drivers/crypto/intel/qat/Kconfig +++ b/drivers/crypto/intel/qat/Kconfig @@ -59,6 +59,17 @@ config CRYPTO_DEV_QAT_4XXX To compile this as a module, choose M here: the module will be called qat_4xxx. +config CRYPTO_DEV_QAT_420XX + tristate "Support for Intel(R) QAT_420XX" + depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST) + select CRYPTO_DEV_QAT + help + Support for Intel(R) QuickAssist Technology QAT_420xx + for accelerating crypto and compression workloads. + + To compile this as a module, choose M here: the module + will be called qat_420xx. + config CRYPTO_DEV_QAT_DH895xCCVF tristate "Support for Intel(R) DH895xCC Virtual Function" depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST) diff --git a/drivers/crypto/intel/qat/Makefile b/drivers/crypto/intel/qat/Makefile index 258c8a626ce0..235b69f4f3f7 100644 --- a/drivers/crypto/intel/qat/Makefile +++ b/drivers/crypto/intel/qat/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/ obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx/ obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x/ obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx/ +obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx/ obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/ obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf/ obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf/ diff --git a/drivers/crypto/intel/qat/qat_420xx/Makefile b/drivers/crypto/intel/qat/qat_420xx/Makefile new file mode 100644 index 000000000000..a90fbe00b3c8 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_420xx/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only +ccflags-y := -I $(srctree)/$(src)/../qat_common +obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx.o +qat_420xx-objs := adf_drv.o adf_420xx_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c new file mode 100644 index 000000000000..d296eb18db3c --- /dev/null +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -0,0 +1,552 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "adf_420xx_hw_data.h" +#include "icp_qat_hw.h" + +#define ADF_AE_GROUP_0 GENMASK(3, 0) +#define ADF_AE_GROUP_1 GENMASK(7, 4) +#define ADF_AE_GROUP_2 GENMASK(11, 8) +#define ADF_AE_GROUP_3 GENMASK(15, 12) +#define ADF_AE_GROUP_4 BIT(16) + +static const char * const adf_420xx_fw_objs[] = { + [ADF_FW_SYM_OBJ] = ADF_420XX_SYM_OBJ, + [ADF_FW_ASYM_OBJ] = ADF_420XX_ASYM_OBJ, + [ADF_FW_DC_OBJ] = ADF_420XX_DC_OBJ, + [ADF_FW_ADMIN_OBJ] = ADF_420XX_ADMIN_OBJ, +}; + +static const struct adf_fw_config adf_fw_cy_config[] = { + {ADF_AE_GROUP_3, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_2, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, +}; + +static const struct adf_fw_config adf_fw_dc_config[] = { + {ADF_AE_GROUP_1, ADF_FW_DC_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_DC_OBJ}, + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, +}; + +static const struct adf_fw_config adf_fw_sym_config[] = { + {ADF_AE_GROUP_3, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_2, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, +}; + +static const struct adf_fw_config adf_fw_asym_config[] = { + {ADF_AE_GROUP_3, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_2, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, +}; + +static const struct adf_fw_config adf_fw_asym_dc_config[] = { + {ADF_AE_GROUP_3, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_2, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_DC_OBJ}, + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, +}; + +static const struct adf_fw_config adf_fw_sym_dc_config[] = { + {ADF_AE_GROUP_2, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_DC_OBJ}, + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, +}; + +static const struct adf_fw_config adf_fw_dcc_config[] = { + {ADF_AE_GROUP_1, ADF_FW_DC_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, +}; + +/* Worker thread to service arbiter mappings */ +static const u32 default_thrd_to_arb_map[ADF_420XX_MAX_ACCELENGINES] = { + 0x00000055, 0x00000055, 0x00000055, 0x00000055, + 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, + 0x00000055, 0x00000055, 0x00000055, 0x00000055, + 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, + 0x0 +}; + +static const u32 thrd_to_arb_map_asym[ADF_420XX_MAX_ACCELENGINES] = { + 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, + 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, + 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, + 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, + 0x0 +}; + +static const u32 thrd_to_arb_map_sym[ADF_420XX_MAX_ACCELENGINES] = { + 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, + 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, + 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, + 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, + 0x0 +}; + +static const u32 thrd_to_arb_map_asym_dc[ADF_420XX_MAX_ACCELENGINES] = { + 0x00000055, 0x00000055, 0x00000055, 0x00000055, + 0x000000AA, 0x000000AA, 0x000000AA, 0x000000AA, + 0x000000AA, 0x000000AA, 0x000000AA, 0x000000AA, + 0x000000AA, 0x000000AA, 0x000000AA, 0x000000AA, + 0x0 +}; + +static const u32 thrd_to_arb_map_sym_dc[ADF_420XX_MAX_ACCELENGINES] = { + 0x00000055, 0x00000055, 0x00000055, 0x00000055, + 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, + 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0 +}; + +static const u32 thrd_to_arb_map_dc[ADF_420XX_MAX_ACCELENGINES] = { + 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, + 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0 +}; + +static const u32 thrd_to_arb_map_dcc[ADF_420XX_MAX_ACCELENGINES] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0 +}; + +static struct adf_hw_device_class adf_420xx_class = { + .name = ADF_420XX_DEVICE_NAME, + .type = DEV_420XX, + .instances = 0, +}; + +static u32 get_ae_mask(struct adf_hw_device_data *self) +{ + u32 me_disable = self->fuses; + + return ~me_disable & ADF_420XX_ACCELENGINES_MASK; +} + +static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev) +{ + switch (adf_get_service_enabled(accel_dev)) { + case SVC_CY: + case SVC_CY2: + return ARRAY_SIZE(adf_fw_cy_config); + case SVC_DC: + return ARRAY_SIZE(adf_fw_dc_config); + case SVC_DCC: + return ARRAY_SIZE(adf_fw_dcc_config); + case SVC_SYM: + return ARRAY_SIZE(adf_fw_sym_config); + case SVC_ASYM: + return ARRAY_SIZE(adf_fw_asym_config); + case SVC_ASYM_DC: + case SVC_DC_ASYM: + return ARRAY_SIZE(adf_fw_asym_dc_config); + case SVC_SYM_DC: + case SVC_DC_SYM: + return ARRAY_SIZE(adf_fw_sym_dc_config); + default: + return 0; + } +} + +static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev) +{ + switch (adf_get_service_enabled(accel_dev)) { + case SVC_CY: + case SVC_CY2: + return adf_fw_cy_config; + case SVC_DC: + return adf_fw_dc_config; + case SVC_DCC: + return adf_fw_dcc_config; + case SVC_SYM: + return adf_fw_sym_config; + case SVC_ASYM: + return adf_fw_asym_config; + case SVC_ASYM_DC: + case SVC_DC_ASYM: + return adf_fw_asym_dc_config; + case SVC_SYM_DC: + case SVC_DC_SYM: + return adf_fw_sym_dc_config; + default: + return NULL; + } +} + +static void update_ae_mask(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + const struct adf_fw_config *fw_config; + u32 config_ae_mask = 0; + u32 ae_mask, num_objs; + int i; + + ae_mask = get_ae_mask(hw_data); + + /* Modify the AE mask based on the firmware configuration loaded */ + fw_config = get_fw_config(accel_dev); + num_objs = uof_get_num_objs(accel_dev); + + config_ae_mask |= ADF_420XX_ADMIN_AE_MASK; + for (i = 0; i < num_objs; i++) + config_ae_mask |= fw_config[i].ae_mask; + + hw_data->ae_mask = ae_mask & config_ae_mask; +} + +static u32 get_accel_cap(struct adf_accel_dev *accel_dev) +{ + u32 capabilities_sym, capabilities_asym, capabilities_dc; + struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev; + u32 capabilities_dcc; + u32 fusectl1; + + /* As a side effect, update ae_mask based on configuration */ + update_ae_mask(accel_dev); + + /* Read accelerator capabilities mask */ + pci_read_config_dword(pdev, ADF_GEN4_FUSECTL1_OFFSET, &fusectl1); + + capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | + ICP_ACCEL_CAPABILITIES_CIPHER | + ICP_ACCEL_CAPABILITIES_AUTHENTICATION | + ICP_ACCEL_CAPABILITIES_SHA3 | + ICP_ACCEL_CAPABILITIES_SHA3_EXT | + ICP_ACCEL_CAPABILITIES_HKDF | + ICP_ACCEL_CAPABILITIES_CHACHA_POLY | + ICP_ACCEL_CAPABILITIES_AESGCM_SPC | + ICP_ACCEL_CAPABILITIES_SM3 | + ICP_ACCEL_CAPABILITIES_SM4 | + ICP_ACCEL_CAPABILITIES_AES_V2 | + ICP_ACCEL_CAPABILITIES_ZUC | + ICP_ACCEL_CAPABILITIES_ZUC_256 | + ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT | + ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN; + + /* A set bit in fusectl1 means the feature is OFF in this SKU */ + if (fusectl1 & ICP_ACCEL_GEN4_MASK_CIPHER_SLICE) { + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_HKDF; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; + } + + if (fusectl1 & ICP_ACCEL_GEN4_MASK_UCS_SLICE) { + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; + } + + if (fusectl1 & ICP_ACCEL_GEN4_MASK_AUTH_SLICE) { + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; + } + + if (fusectl1 & ICP_ACCEL_GEN4_MASK_SMX_SLICE) { + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM3; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM4; + } + + if (fusectl1 & ICP_ACCEL_GEN4_MASK_WCP_WAT_SLICE) { + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT; + } + + if (fusectl1 & ICP_ACCEL_GEN4_MASK_EIA3_SLICE) { + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256; + } + + if (fusectl1 & ICP_ACCEL_GEN4_MASK_ZUC_256_SLICE) + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256; + + capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | + ICP_ACCEL_CAPABILITIES_SM2 | + ICP_ACCEL_CAPABILITIES_ECEDMONT; + + if (fusectl1 & ICP_ACCEL_GEN4_MASK_PKE_SLICE) { + capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC; + capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2; + capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT; + } + + capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION | + ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION | + ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION | + ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64; + + if (fusectl1 & ICP_ACCEL_GEN4_MASK_COMPRESS_SLICE) { + capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION; + capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION; + capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION; + capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64; + } + + switch (adf_get_service_enabled(accel_dev)) { + case SVC_CY: + case SVC_CY2: + return capabilities_sym | capabilities_asym; + case SVC_DC: + return capabilities_dc; + case SVC_DCC: + /* + * Sym capabilities are available for chaining operations, + * but sym crypto instances cannot be supported + */ + capabilities_dcc = capabilities_dc | capabilities_sym; + capabilities_dcc &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; + return capabilities_dcc; + case SVC_SYM: + return capabilities_sym; + case SVC_ASYM: + return capabilities_asym; + case SVC_ASYM_DC: + case SVC_DC_ASYM: + return capabilities_asym | capabilities_dc; + case SVC_SYM_DC: + case SVC_DC_SYM: + return capabilities_sym | capabilities_dc; + default: + return 0; + } +} + +static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev) +{ + switch (adf_get_service_enabled(accel_dev)) { + case SVC_ASYM: + return thrd_to_arb_map_asym; + case SVC_SYM: + return thrd_to_arb_map_sym; + case SVC_DC: + return thrd_to_arb_map_dc; + case SVC_DCC: + return thrd_to_arb_map_dcc; + case SVC_ASYM_DC: + case SVC_DC_ASYM: + return thrd_to_arb_map_asym_dc; + case SVC_DC_SYM: + case SVC_SYM_DC: + return thrd_to_arb_map_sym_dc; + default: + return default_thrd_to_arb_map; + } +} + +static void adf_init_rl_data(struct adf_rl_hw_data *rl_data) +{ + rl_data->pciout_tb_offset = ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET; + rl_data->pciin_tb_offset = ADF_GEN4_RL_TOKEN_PCIEIN_BUCKET_OFFSET; + rl_data->r2l_offset = ADF_GEN4_RL_R2L_OFFSET; + rl_data->l2c_offset = ADF_GEN4_RL_L2C_OFFSET; + rl_data->c2s_offset = ADF_GEN4_RL_C2S_OFFSET; + + rl_data->pcie_scale_div = ADF_420XX_RL_PCIE_SCALE_FACTOR_DIV; + rl_data->pcie_scale_mul = ADF_420XX_RL_PCIE_SCALE_FACTOR_MUL; + rl_data->dcpr_correction = ADF_420XX_RL_DCPR_CORRECTION; + rl_data->max_tp[ADF_SVC_ASYM] = ADF_420XX_RL_MAX_TP_ASYM; + rl_data->max_tp[ADF_SVC_SYM] = ADF_420XX_RL_MAX_TP_SYM; + rl_data->max_tp[ADF_SVC_DC] = ADF_420XX_RL_MAX_TP_DC; + rl_data->scan_interval = ADF_420XX_RL_SCANS_PER_SEC; + rl_data->scale_ref = ADF_420XX_RL_SLICE_REF; +} + +enum adf_rp_groups { + RP_GROUP_0 = 0, + RP_GROUP_1, + RP_GROUP_COUNT +}; + +static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev) +{ + enum adf_cfg_service_type rps[RP_GROUP_COUNT] = { }; + const struct adf_fw_config *fw_config; + u16 ring_to_svc_map; + int i, j; + + fw_config = get_fw_config(accel_dev); + if (!fw_config) + return 0; + + for (i = 0; i < RP_GROUP_COUNT; i++) { + switch (fw_config[i].ae_mask) { + case ADF_AE_GROUP_0: + j = RP_GROUP_0; + break; + case ADF_AE_GROUP_1: + j = RP_GROUP_1; + break; + default: + return 0; + } + + switch (fw_config[i].obj) { + case ADF_FW_SYM_OBJ: + rps[j] = SYM; + break; + case ADF_FW_ASYM_OBJ: + rps[j] = ASYM; + break; + case ADF_FW_DC_OBJ: + rps[j] = COMP; + break; + default: + rps[j] = 0; + break; + } + } + + ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT | + rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT | + rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT | + rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT; + + return ring_to_svc_map; +} + +static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num, + const char * const fw_objs[], int num_objs) +{ + const struct adf_fw_config *fw_config; + int id; + + fw_config = get_fw_config(accel_dev); + if (fw_config) + id = fw_config[obj_num].obj; + else + id = -EINVAL; + + if (id < 0 || id > num_objs) + return NULL; + + return fw_objs[id]; +} + +static const char *uof_get_name_420xx(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + int num_fw_objs = ARRAY_SIZE(adf_420xx_fw_objs); + + return uof_get_name(accel_dev, obj_num, adf_420xx_fw_objs, num_fw_objs); +} + +static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + const struct adf_fw_config *fw_config; + + fw_config = get_fw_config(accel_dev); + if (!fw_config) + return 0; + + return fw_config[obj_num].ae_mask; +} + +static void adf_gen4_set_err_mask(struct adf_dev_err_mask *dev_err_mask) +{ + dev_err_mask->cppagentcmdpar_mask = ADF_420XX_HICPPAGENTCMDPARERRLOG_MASK; + dev_err_mask->parerr_ath_cph_mask = ADF_420XX_PARITYERRORMASK_ATH_CPH_MASK; + dev_err_mask->parerr_cpr_xlt_mask = ADF_420XX_PARITYERRORMASK_CPR_XLT_MASK; + dev_err_mask->parerr_dcpr_ucs_mask = ADF_420XX_PARITYERRORMASK_DCPR_UCS_MASK; + dev_err_mask->parerr_pke_mask = ADF_420XX_PARITYERRORMASK_PKE_MASK; + dev_err_mask->ssmfeatren_mask = ADF_420XX_SSMFEATREN_MASK; +} + +void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id) +{ + hw_data->dev_class = &adf_420xx_class; + hw_data->instance_id = adf_420xx_class.instances++; + hw_data->num_banks = ADF_GEN4_ETR_MAX_BANKS; + hw_data->num_banks_per_vf = ADF_GEN4_NUM_BANKS_PER_VF; + hw_data->num_rings_per_bank = ADF_GEN4_NUM_RINGS_PER_BANK; + hw_data->num_accel = ADF_GEN4_MAX_ACCELERATORS; + hw_data->num_engines = ADF_420XX_MAX_ACCELENGINES; + hw_data->num_logical_accel = 1; + hw_data->tx_rx_gap = ADF_GEN4_RX_RINGS_OFFSET; + hw_data->tx_rings_mask = ADF_GEN4_TX_RINGS_MASK; + hw_data->ring_to_svc_map = ADF_GEN4_DEFAULT_RING_TO_SRV_MAP; + hw_data->alloc_irq = adf_isr_resource_alloc; + hw_data->free_irq = adf_isr_resource_free; + hw_data->enable_error_correction = adf_gen4_enable_error_correction; + hw_data->get_accel_mask = adf_gen4_get_accel_mask; + hw_data->get_ae_mask = get_ae_mask; + hw_data->get_num_accels = adf_gen4_get_num_accels; + hw_data->get_num_aes = adf_gen4_get_num_aes; + hw_data->get_sram_bar_id = adf_gen4_get_sram_bar_id; + hw_data->get_etr_bar_id = adf_gen4_get_etr_bar_id; + hw_data->get_misc_bar_id = adf_gen4_get_misc_bar_id; + hw_data->get_arb_info = adf_gen4_get_arb_info; + hw_data->get_admin_info = adf_gen4_get_admin_info; + hw_data->get_accel_cap = get_accel_cap; + hw_data->get_sku = adf_gen4_get_sku; + hw_data->init_admin_comms = adf_init_admin_comms; + hw_data->exit_admin_comms = adf_exit_admin_comms; + hw_data->send_admin_init = adf_send_admin_init; + hw_data->init_arb = adf_init_arb; + hw_data->exit_arb = adf_exit_arb; + hw_data->get_arb_mapping = adf_get_arbiter_mapping; + hw_data->enable_ints = adf_gen4_enable_ints; + hw_data->init_device = adf_gen4_init_device; + hw_data->reset_device = adf_reset_flr; + hw_data->admin_ae_mask = ADF_420XX_ADMIN_AE_MASK; + hw_data->fw_name = ADF_420XX_FW; + hw_data->fw_mmp_name = ADF_420XX_MMP; + hw_data->uof_get_name = uof_get_name_420xx; + hw_data->uof_get_num_objs = uof_get_num_objs; + hw_data->uof_get_ae_mask = uof_get_ae_mask; + hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable; + hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer; + hw_data->get_ring_to_svc_map = get_ring_to_svc_map; + hw_data->disable_iov = adf_disable_sriov; + hw_data->ring_pair_reset = adf_gen4_ring_pair_reset; + hw_data->enable_pm = adf_gen4_enable_pm; + hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt; + hw_data->dev_config = adf_gen4_dev_config; + hw_data->start_timer = adf_gen4_timer_start; + hw_data->stop_timer = adf_gen4_timer_stop; + hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock; + hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; + hw_data->clock_frequency = ADF_420XX_AE_FREQ; + + adf_gen4_set_err_mask(&hw_data->dev_err_mask); + adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); + adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops); + adf_gen4_init_dc_ops(&hw_data->dc_ops); + adf_gen4_init_ras_ops(&hw_data->ras_ops); + adf_init_rl_data(&hw_data->rl_data); +} + +void adf_clean_hw_data_420xx(struct adf_hw_device_data *hw_data) +{ + hw_data->dev_class->instances--; +} diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.h b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.h new file mode 100644 index 000000000000..99abbfc14820 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ +#ifndef ADF_420XX_HW_DATA_H_ +#define ADF_420XX_HW_DATA_H_ + +#include + +#define ADF_420XX_MAX_ACCELENGINES 17 + +#define ADF_420XX_ACCELENGINES_MASK 0x1FFFF +#define ADF_420XX_ADMIN_AE_MASK 0x10000 + +#define ADF_420XX_HICPPAGENTCMDPARERRLOG_MASK (0xFF) +#define ADF_420XX_PARITYERRORMASK_ATH_CPH_MASK (0xFF00FF) +#define ADF_420XX_PARITYERRORMASK_CPR_XLT_MASK (0x10001) +#define ADF_420XX_PARITYERRORMASK_DCPR_UCS_MASK (0xF0007) +#define ADF_420XX_PARITYERRORMASK_PKE_MASK (0xFFF) +#define ADF_420XX_PARITYERRORMASK_WAT_WCP_MASK (0x3FF03FF) + +/* + * SSMFEATREN bit mask + * BIT(4) - enables parity detection on CPP + * BIT(12) - enables the logging of push/pull data errors + * in pperr register + * BIT(16) - BIT(27) - enable parity detection on SPPs + */ +#define ADF_420XX_SSMFEATREN_MASK \ + (BIT(4) | BIT(12) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | \ + BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25) | BIT(26) | BIT(27)) + +/* Firmware Binaries */ +#define ADF_420XX_FW "qat_420xx.bin" +#define ADF_420XX_MMP "qat_420xx_mmp.bin" +#define ADF_420XX_SYM_OBJ "qat_420xx_sym.bin" +#define ADF_420XX_DC_OBJ "qat_420xx_dc.bin" +#define ADF_420XX_ASYM_OBJ "qat_420xx_asym.bin" +#define ADF_420XX_ADMIN_OBJ "qat_420xx_admin.bin" + +/* RL constants */ +#define ADF_420XX_RL_PCIE_SCALE_FACTOR_DIV 100 +#define ADF_420XX_RL_PCIE_SCALE_FACTOR_MUL 102 +#define ADF_420XX_RL_DCPR_CORRECTION 1 +#define ADF_420XX_RL_SCANS_PER_SEC 954 +#define ADF_420XX_RL_MAX_TP_ASYM 173750UL +#define ADF_420XX_RL_MAX_TP_SYM 95000UL +#define ADF_420XX_RL_MAX_TP_DC 40000UL +#define ADF_420XX_RL_SLICE_REF 1000UL + +/* Clocks frequency */ +#define ADF_420XX_AE_FREQ (1000 * HZ_PER_MHZ) + +void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id); +void adf_clean_hw_data_420xx(struct adf_hw_device_data *hw_data); + +#endif diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c new file mode 100644 index 000000000000..2a3598409eeb --- /dev/null +++ b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "adf_420xx_hw_data.h" + +static const struct pci_device_id adf_pci_tbl[] = { + { PCI_VDEVICE(INTEL, ADF_420XX_PCI_DEVICE_ID), }, + { } +}; +MODULE_DEVICE_TABLE(pci, adf_pci_tbl); + +static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) +{ + if (accel_dev->hw_device) { + adf_clean_hw_data_420xx(accel_dev->hw_device); + accel_dev->hw_device = NULL; + } + adf_dbgfs_exit(accel_dev); + adf_cfg_dev_remove(accel_dev); + adf_devmgr_rm_dev(accel_dev, NULL); +} + +static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct adf_accel_dev *accel_dev; + struct adf_accel_pci *accel_pci_dev; + struct adf_hw_device_data *hw_data; + unsigned int i, bar_nr; + unsigned long bar_mask; + struct adf_bar *bar; + int ret; + + if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) { + /* + * If the accelerator is connected to a node with no memory + * there is no point in using the accelerator since the remote + * memory transaction will be very slow. + */ + dev_err(&pdev->dev, "Invalid NUMA configuration.\n"); + return -EINVAL; + } + + accel_dev = devm_kzalloc(&pdev->dev, sizeof(*accel_dev), GFP_KERNEL); + if (!accel_dev) + return -ENOMEM; + + INIT_LIST_HEAD(&accel_dev->crypto_list); + accel_pci_dev = &accel_dev->accel_pci_dev; + accel_pci_dev->pci_dev = pdev; + + /* + * Add accel device to accel table + * This should be called before adf_cleanup_accel is called + */ + if (adf_devmgr_add_dev(accel_dev, NULL)) { + dev_err(&pdev->dev, "Failed to add new accelerator device.\n"); + return -EFAULT; + } + + accel_dev->owner = THIS_MODULE; + /* Allocate and initialise device hardware meta-data structure */ + hw_data = devm_kzalloc(&pdev->dev, sizeof(*hw_data), GFP_KERNEL); + if (!hw_data) { + ret = -ENOMEM; + goto out_err; + } + + accel_dev->hw_device = hw_data; + adf_init_hw_data_420xx(accel_dev->hw_device, ent->device); + + pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); + pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses); + + /* Get Accelerators and Accelerators Engines masks */ + hw_data->accel_mask = hw_data->get_accel_mask(hw_data); + hw_data->ae_mask = hw_data->get_ae_mask(hw_data); + accel_pci_dev->sku = hw_data->get_sku(hw_data); + /* If the device has no acceleration engines then ignore it */ + if (!hw_data->accel_mask || !hw_data->ae_mask || + (~hw_data->ae_mask & 0x01)) { + dev_err(&pdev->dev, "No acceleration units found.\n"); + ret = -EFAULT; + goto out_err; + } + + /* Create device configuration table */ + ret = adf_cfg_dev_add(accel_dev); + if (ret) + goto out_err; + + /* Enable PCI device */ + ret = pcim_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, "Can't enable PCI device.\n"); + goto out_err; + } + + /* Set DMA identifier */ + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (ret) { + dev_err(&pdev->dev, "No usable DMA configuration.\n"); + goto out_err; + } + + ret = adf_gen4_cfg_dev_init(accel_dev); + if (ret) { + dev_err(&pdev->dev, "Failed to initialize configuration.\n"); + goto out_err; + } + + /* Get accelerator capabilities mask */ + hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); + if (!hw_data->accel_capabilities_mask) { + dev_err(&pdev->dev, "Failed to get capabilities mask.\n"); + ret = -EINVAL; + goto out_err; + } + + /* Find and map all the device's BARS */ + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_GEN4_BAR_MASK; + + ret = pcim_iomap_regions_request_all(pdev, bar_mask, pci_name(pdev)); + if (ret) { + dev_err(&pdev->dev, "Failed to map pci regions.\n"); + goto out_err; + } + + i = 0; + for_each_set_bit(bar_nr, &bar_mask, PCI_STD_NUM_BARS) { + bar = &accel_pci_dev->pci_bars[i++]; + bar->virt_addr = pcim_iomap_table(pdev)[bar_nr]; + } + + pci_set_master(pdev); + + if (pci_save_state(pdev)) { + dev_err(&pdev->dev, "Failed to save pci state.\n"); + ret = -ENOMEM; + goto out_err; + } + + accel_dev->ras_errors.enabled = true; + adf_dbgfs_init(accel_dev); + + ret = adf_dev_up(accel_dev, true); + if (ret) + goto out_err_dev_stop; + + ret = adf_sysfs_init(accel_dev); + if (ret) + goto out_err_dev_stop; + + return ret; + +out_err_dev_stop: + adf_dev_down(accel_dev, false); +out_err: + adf_cleanup_accel(accel_dev); + return ret; +} + +static void adf_remove(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + if (!accel_dev) { + pr_err("QAT: Driver removal failed\n"); + return; + } + adf_dev_down(accel_dev, false); + adf_cleanup_accel(accel_dev); +} + +static struct pci_driver adf_driver = { + .id_table = adf_pci_tbl, + .name = ADF_420XX_DEVICE_NAME, + .probe = adf_probe, + .remove = adf_remove, + .sriov_configure = adf_sriov_configure, + .err_handler = &adf_err_handler, +}; + +module_pci_driver(adf_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Intel"); +MODULE_FIRMWARE(ADF_420XX_FW); +MODULE_FIRMWARE(ADF_420XX_MMP); +MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); +MODULE_VERSION(ADF_DRV_VERSION); +MODULE_SOFTDEP("pre: crypto-intel_qat"); +MODULE_IMPORT_NS(CRYPTO_QAT); diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 33de8855fd66..7df6336ddd62 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -19,12 +19,15 @@ #define ADF_C3XXX_DEVICE_NAME "c3xxx" #define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf" #define ADF_4XXX_DEVICE_NAME "4xxx" +#define ADF_420XX_DEVICE_NAME "420xx" #define ADF_4XXX_PCI_DEVICE_ID 0x4940 #define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941 #define ADF_401XX_PCI_DEVICE_ID 0x4942 #define ADF_401XXIOV_PCI_DEVICE_ID 0x4943 #define ADF_402XX_PCI_DEVICE_ID 0x4944 #define ADF_402XXIOV_PCI_DEVICE_ID 0x4945 +#define ADF_420XX_PCI_DEVICE_ID 0x4946 +#define ADF_420XXIOV_PCI_DEVICE_ID 0x4947 #define ADF_DEVICE_FUSECTL_OFFSET 0x40 #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C #define ADF_DEVICE_FUSECTL_MASK 0x80000000 diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h index 6e5de1dab97b..89df3888d7ea 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h @@ -47,6 +47,7 @@ enum adf_device_type { DEV_C3XXX, DEV_C3XXXVF, DEV_4XXX, + DEV_420XX, }; struct adf_dev_status_info { diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h index b42fb8048c04..051ad20581a6 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h @@ -202,6 +202,8 @@ enum icp_qat_gen4_slice_mask { ICP_ACCEL_GEN4_MASK_UCS_SLICE = BIT(4), ICP_ACCEL_GEN4_MASK_EIA3_SLICE = BIT(5), ICP_ACCEL_GEN4_MASK_SMX_SLICE = BIT(7), + ICP_ACCEL_GEN4_MASK_WCP_WAT_SLICE = BIT(8), + ICP_ACCEL_GEN4_MASK_ZUC_256_SLICE = BIT(9), }; void adf_gen4_enable_error_correction(struct adf_accel_dev *accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h index eb2ef225bcee..b8f1c4ffb8b5 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h @@ -18,7 +18,12 @@ enum icp_qat_hw_ae_id { ICP_QAT_HW_AE_9 = 9, ICP_QAT_HW_AE_10 = 10, ICP_QAT_HW_AE_11 = 11, - ICP_QAT_HW_AE_DELIMITER = 12 + ICP_QAT_HW_AE_12 = 12, + ICP_QAT_HW_AE_13 = 13, + ICP_QAT_HW_AE_14 = 14, + ICP_QAT_HW_AE_15 = 15, + ICP_QAT_HW_AE_16 = 16, + ICP_QAT_HW_AE_DELIMITER = 17 }; enum icp_qat_hw_qat_id { @@ -95,7 +100,7 @@ enum icp_qat_capabilities_mask { /* Bits 10-11 are currently reserved */ ICP_ACCEL_CAPABILITIES_HKDF = BIT(12), ICP_ACCEL_CAPABILITIES_ECEDMONT = BIT(13), - /* Bit 14 is currently reserved */ + ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN = BIT(14), ICP_ACCEL_CAPABILITIES_SHA3_EXT = BIT(15), ICP_ACCEL_CAPABILITIES_AESGCM_SPC = BIT(16), ICP_ACCEL_CAPABILITIES_CHACHA_POLY = BIT(17), @@ -107,7 +112,10 @@ enum icp_qat_capabilities_mask { ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64 = BIT(23), ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION = BIT(24), ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION = BIT(25), - ICP_ACCEL_CAPABILITIES_AES_V2 = BIT(26) + ICP_ACCEL_CAPABILITIES_AES_V2 = BIT(26), + /* Bits 27-28 are currently reserved */ + ICP_ACCEL_CAPABILITIES_ZUC_256 = BIT(29), + ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT = BIT(30), }; #define QAT_AUTH_MODE_BITPOS 4 diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h index 69482abdb8b9..e28241bdd0f4 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h @@ -7,7 +7,7 @@ #define ICP_QAT_AC_C62X_DEV_TYPE 0x01000000 #define ICP_QAT_AC_C3XXX_DEV_TYPE 0x02000000 #define ICP_QAT_AC_4XXX_A_DEV_TYPE 0x08000000 -#define ICP_QAT_UCLO_MAX_AE 12 +#define ICP_QAT_UCLO_MAX_AE 17 #define ICP_QAT_UCLO_MAX_CTX 8 #define ICP_QAT_UCLO_MAX_UIMAGE (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX) #define ICP_QAT_UCLO_MAX_USTORE 0x4000 diff --git a/drivers/crypto/intel/qat/qat_common/qat_hal.c b/drivers/crypto/intel/qat/qat_common/qat_hal.c index cbb946a80076..317cafa9d11f 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_hal.c +++ b/drivers/crypto/intel/qat/qat_common/qat_hal.c @@ -697,12 +697,16 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle, case ADF_4XXX_PCI_DEVICE_ID: case ADF_401XX_PCI_DEVICE_ID: case ADF_402XX_PCI_DEVICE_ID: + case ADF_420XX_PCI_DEVICE_ID: handle->chip_info->mmp_sram_size = 0; handle->chip_info->nn = false; handle->chip_info->lm2lm3 = true; handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG_2X; handle->chip_info->icp_rst_csr = ICP_RESET_CPP0; - handle->chip_info->icp_rst_mask = 0x100015; + if (handle->pci_dev->device == ADF_420XX_PCI_DEVICE_ID) + handle->chip_info->icp_rst_mask = 0x100155; + else + handle->chip_info->icp_rst_mask = 0x100015; handle->chip_info->glb_clk_enable_csr = ICP_GLOBAL_CLK_ENABLE_CPP0; handle->chip_info->misc_ctl_csr = MISC_CONTROL_C4XXX; handle->chip_info->wakeup_event_val = 0x80000000; diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c index e27ea7e28c51..ad2c64af7427 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c @@ -733,6 +733,7 @@ qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle) case ADF_4XXX_PCI_DEVICE_ID: case ADF_401XX_PCI_DEVICE_ID: case ADF_402XX_PCI_DEVICE_ID: + case ADF_420XX_PCI_DEVICE_ID: return ICP_QAT_AC_4XXX_A_DEV_TYPE; default: pr_err("QAT: unsupported device 0x%x\n", -- Gitee From e28089da826b2b0ed1677b7b6f2c811daeb28a29 Mon Sep 17 00:00:00 2001 From: Lucas Segarra Fernandez Date: Fri, 22 Dec 2023 11:35:05 +0100 Subject: [PATCH 0732/2138] crypto: qat - include pci.h for GET_DEV() ANBZ: #8589 commit b6e4b6eb1e6393580482581470a3a08c15ab977b upstream. Intel-SIG: commit b6e4b6eb1e63 crypto: qat - include pci.h for GET_DEV() Backport to support Intel QAT in-tree driver GET_DEV() macro expansion relies on struct pci_dev being defined. Include at adf_accel_devices.h. Signed-off-by: Lucas Segarra Fernandez Reviewed-by: Giovanni Cabiddu Reviewed-by: Damian Muszynski Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_accel_devices.h | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 7df6336ddd62..fc7786d71e96 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include "adf_cfg_common.h" -- Gitee From e2ea979da6268b39aa8c224fe6cbf7ddd605325b Mon Sep 17 00:00:00 2001 From: Lucas Segarra Fernandez Date: Fri, 22 Dec 2023 11:35:06 +0100 Subject: [PATCH 0733/2138] crypto: qat - add admin msgs for telemetry ANBZ: #8589 commit 7f06679dd54a331d750e5d6f6f04a9df2eba72ff upstream. Intel-SIG: commit 7f06679dd54a crypto: qat - add admin msgs for telemetry Backport to support Intel QAT in-tree driver Extend the admin interface with two new public APIs to enable and disable the telemetry feature: adf_send_admin_tl_start() and adf_send_admin_tl_stop(). The first, sends to the firmware, through the ICP_QAT_FW_TL_START message, the IO address where the firmware will write telemetry metrics and a list of ring pairs (maximum 4) to be monitored. It returns the number of accelerators of each type supported by this hardware. After this message is sent, the firmware starts periodically reporting telemetry data using by writing into the dma buffer specified as input. The second, sends the admin message ICP_QAT_FW_TL_STOP which stops the reporting of telemetry data. This patch is based on earlier work done by Wojciech Ziemba. Signed-off-by: Lucas Segarra Fernandez Reviewed-by: Giovanni Cabiddu Reviewed-by: Damian Muszynski Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../crypto/intel/qat/qat_common/adf_admin.c | 37 +++++++++++++++++++ .../crypto/intel/qat/qat_common/adf_admin.h | 4 ++ .../qat/qat_common/icp_qat_fw_init_admin.h | 10 +++++ 3 files changed, 51 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c index 54b673ec2362..acad526eb741 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_admin.c +++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c @@ -498,6 +498,43 @@ int adf_get_cnv_stats(struct adf_accel_dev *accel_dev, u16 ae, u16 *err_cnt, return ret; } +int adf_send_admin_tl_start(struct adf_accel_dev *accel_dev, + dma_addr_t tl_dma_addr, size_t layout_sz, u8 *rp_indexes, + struct icp_qat_fw_init_admin_slice_cnt *slice_count) +{ + u32 ae_mask = GET_HW_DATA(accel_dev)->admin_ae_mask; + struct icp_qat_fw_init_admin_resp resp = { }; + struct icp_qat_fw_init_admin_req req = { }; + int ret; + + req.cmd_id = ICP_QAT_FW_TL_START; + req.init_cfg_ptr = tl_dma_addr; + req.init_cfg_sz = layout_sz; + + if (rp_indexes) + memcpy(&req.rp_indexes, rp_indexes, sizeof(req.rp_indexes)); + + ret = adf_send_admin(accel_dev, &req, &resp, ae_mask); + if (ret) + return ret; + + memcpy(slice_count, &resp.slices, sizeof(*slice_count)); + + return 0; +} + +int adf_send_admin_tl_stop(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + struct icp_qat_fw_init_admin_resp resp = { }; + struct icp_qat_fw_init_admin_req req = { }; + u32 ae_mask = hw_data->admin_ae_mask; + + req.cmd_id = ICP_QAT_FW_TL_STOP; + + return adf_send_admin(accel_dev, &req, &resp, ae_mask); +} + int adf_init_admin_comms(struct adf_accel_dev *accel_dev) { struct adf_admin_comms *admin; diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.h b/drivers/crypto/intel/qat/qat_common/adf_admin.h index 55cbcbc66c9f..647c8e196752 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_admin.h +++ b/drivers/crypto/intel/qat/qat_common/adf_admin.h @@ -23,5 +23,9 @@ int adf_send_admin_rl_delete(struct adf_accel_dev *accel_dev, u16 node_id, int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp); int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr, size_t buff_size); int adf_get_cnv_stats(struct adf_accel_dev *accel_dev, u16 ae, u16 *err_cnt, u16 *latest_err); +int adf_send_admin_tl_start(struct adf_accel_dev *accel_dev, + dma_addr_t tl_dma_addr, size_t layout_sz, u8 *rp_indexes, + struct icp_qat_fw_init_admin_slice_cnt *slice_count); +int adf_send_admin_tl_stop(struct adf_accel_dev *accel_dev); #endif diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h index cd418b51d9f3..63cf18e2a4e5 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h @@ -29,6 +29,8 @@ enum icp_qat_fw_init_admin_cmd_id { ICP_QAT_FW_RL_ADD = 134, ICP_QAT_FW_RL_UPDATE = 135, ICP_QAT_FW_RL_REMOVE = 136, + ICP_QAT_FW_TL_START = 137, + ICP_QAT_FW_TL_STOP = 138, }; enum icp_qat_fw_init_admin_resp_status { @@ -36,6 +38,13 @@ enum icp_qat_fw_init_admin_resp_status { ICP_QAT_FW_INIT_RESP_STATUS_FAIL }; +struct icp_qat_fw_init_admin_tl_rp_indexes { + __u8 rp_num_index_0; + __u8 rp_num_index_1; + __u8 rp_num_index_2; + __u8 rp_num_index_3; +}; + struct icp_qat_fw_init_admin_slice_cnt { __u8 cpr_cnt; __u8 xlt_cnt; @@ -87,6 +96,7 @@ struct icp_qat_fw_init_admin_req { __u8 rp_count; }; __u32 idle_filter; + struct icp_qat_fw_init_admin_tl_rp_indexes rp_indexes; }; __u32 resrvd4; -- Gitee From f486d11b44b17da6b12fa015a63096758cc821e7 Mon Sep 17 00:00:00 2001 From: Lucas Segarra Fernandez Date: Fri, 22 Dec 2023 11:35:07 +0100 Subject: [PATCH 0734/2138] crypto: qat - add support for device telemetry ANBZ: #8589 commit 69e7649f7cc2aaa7889174456d39319a623c1a18 upstream. Intel-SIG: commit 69e7649f7cc2 crypto: qat - add support for device telemetry Backport to support Intel QAT in-tree driver Expose through debugfs device telemetry data for QAT GEN4 devices. This allows to gather metrics about the performance and the utilization of a device. In particular, statistics on (1) the utilization of the PCIe channel, (2) address translation, when SVA is enabled and (3) the internal engines for crypto and data compression. If telemetry is supported by the firmware, the driver allocates a DMA region and a circular buffer. When telemetry is enabled, through the `control` attribute in debugfs, the driver sends to the firmware, via the admin interface, the `TL_START` command. This triggers the device to periodically gather telemetry data from hardware registers and write it into the DMA memory region. The device writes into the shared region every second. The driver, every 500ms, snapshots the DMA shared region into the circular buffer. This is then used to compute basic metric (min/max/average) on each counter, every time the `device_data` attribute is queried. Telemetry counters are exposed through debugfs in the folder /sys/kernel/debug/qat__/telemetry. For details, refer to debugfs-driver-qat_telemetry in Documentation/ABI. This patch is based on earlier work done by Wojciech Ziemba. Signed-off-by: Lucas Segarra Fernandez Reviewed-by: Giovanni Cabiddu Reviewed-by: Damian Muszynski Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../ABI/testing/debugfs-driver-qat_telemetry | 103 ++++ .../intel/qat/qat_420xx/adf_420xx_hw_data.c | 2 + .../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 2 + drivers/crypto/intel/qat/qat_common/Makefile | 3 + .../intel/qat/qat_common/adf_accel_devices.h | 4 + .../crypto/intel/qat/qat_common/adf_dbgfs.c | 3 + .../crypto/intel/qat/qat_common/adf_gen4_tl.c | 118 ++++ .../crypto/intel/qat/qat_common/adf_gen4_tl.h | 121 +++++ .../crypto/intel/qat/qat_common/adf_init.c | 12 + .../intel/qat/qat_common/adf_telemetry.c | 271 ++++++++++ .../intel/qat/qat_common/adf_telemetry.h | 92 ++++ .../intel/qat/qat_common/adf_tl_debugfs.c | 502 ++++++++++++++++++ .../intel/qat/qat_common/adf_tl_debugfs.h | 106 ++++ 13 files changed, 1339 insertions(+) create mode 100644 Documentation/ABI/testing/debugfs-driver-qat_telemetry create mode 100644 drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h create mode 100644 drivers/crypto/intel/qat/qat_common/adf_telemetry.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_telemetry.h create mode 100644 drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h diff --git a/Documentation/ABI/testing/debugfs-driver-qat_telemetry b/Documentation/ABI/testing/debugfs-driver-qat_telemetry new file mode 100644 index 000000000000..24532365387c --- /dev/null +++ b/Documentation/ABI/testing/debugfs-driver-qat_telemetry @@ -0,0 +1,103 @@ +What: /sys/kernel/debug/qat__/telemetry/control +Date: March 2024 +KernelVersion: 6.8 +Contact: qat-linux@intel.com +Description: (RW) Enables/disables the reporting of telemetry metrics. + + Allowed values to write: + ======================== + * 0: disable telemetry + * 1: enable telemetry + * 2, 3, 4: enable telemetry and calculate minimum, maximum + and average for each counter over 2, 3 or 4 samples + + Returned values: + ================ + * 1-4: telemetry is enabled and running + * 0: telemetry is disabled + + Example. + + Writing '3' to this file starts the collection of + telemetry metrics. Samples are collected every second and + stored in a circular buffer of size 3. These values are then + used to calculate the minimum, maximum and average for each + counter. After enabling, counters can be retrieved through + the ``device_data`` file:: + + echo 3 > /sys/kernel/debug/qat_4xxx_0000:6b:00.0/telemetry/control + + Writing '0' to this file stops the collection of telemetry + metrics:: + + echo 0 > /sys/kernel/debug/qat_4xxx_0000:6b:00.0/telemetry/control + + This attribute is only available for qat_4xxx devices. + +What: /sys/kernel/debug/qat__/telemetry/device_data +Date: March 2024 +KernelVersion: 6.8 +Contact: qat-linux@intel.com +Description: (RO) Reports device telemetry counters. + Reads report metrics about performance and utilization of + a QAT device: + + ======================= ======================================== + Field Description + ======================= ======================================== + sample_cnt number of acquisitions of telemetry data + from the device. Reads are performed + every 1000 ms. + pci_trans_cnt number of PCIe partial transactions + max_rd_lat maximum logged read latency [ns] (could + be any read operation) + rd_lat_acc_avg average read latency [ns] + max_gp_lat max get to put latency [ns] (only takes + samples for AE0) + gp_lat_acc_avg average get to put latency [ns] + bw_in PCIe, write bandwidth [Mbps] + bw_out PCIe, read bandwidth [Mbps] + at_page_req_lat_avg Address Translator(AT), average page + request latency [ns] + at_trans_lat_avg AT, average page translation latency [ns] + at_max_tlb_used AT, maximum uTLB used + util_cpr utilization of Compression slice N [%] + exec_cpr execution count of Compression slice N + util_xlt utilization of Translator slice N [%] + exec_xlt execution count of Translator slice N + util_dcpr utilization of Decompression slice N [%] + exec_dcpr execution count of Decompression slice N + util_pke utilization of PKE N [%] + exec_pke execution count of PKE N + util_ucs utilization of UCS slice N [%] + exec_ucs execution count of UCS slice N + util_wat utilization of Wireless Authentication + slice N [%] + exec_wat execution count of Wireless Authentication + slice N + util_wcp utilization of Wireless Cipher slice N [%] + exec_wcp execution count of Wireless Cipher slice N + util_cph utilization of Cipher slice N [%] + exec_cph execution count of Cipher slice N + util_ath utilization of Authentication slice N [%] + exec_ath execution count of Authentication slice N + ======================= ======================================== + + The telemetry report file can be read with the following command:: + + cat /sys/kernel/debug/qat_4xxx_0000:6b:00.0/telemetry/device_data + + If ``control`` is set to 1, only the current values of the + counters are displayed:: + + + + If ``control`` is 2, 3 or 4, counters are displayed in the + following format:: + + + + If a device lacks of a specific accelerator, the corresponding + attribute is not reported. + + This attribute is only available for qat_4xxx devices. diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c index d296eb18db3c..a7730d8057d6 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "adf_420xx_hw_data.h" #include "icp_qat_hw.h" @@ -543,6 +544,7 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id) adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops); adf_gen4_init_dc_ops(&hw_data->dc_ops); adf_gen4_init_ras_ops(&hw_data->ras_ops); + adf_gen4_init_tl_data(&hw_data->tl_data); adf_init_rl_data(&hw_data->rl_data); } diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index a8ab40db7b28..9c0e5a72d8eb 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -15,6 +15,7 @@ #include #include "adf_gen4_ras.h" #include +#include #include "adf_4xxx_hw_data.h" #include "icp_qat_hw.h" @@ -461,6 +462,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops); adf_gen4_init_dc_ops(&hw_data->dc_ops); adf_gen4_init_ras_ops(&hw_data->ras_ops); + adf_gen4_init_tl_data(&hw_data->tl_data); adf_init_rl_data(&hw_data->rl_data); } diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index 928de6997155..6908727bff3b 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -41,9 +41,12 @@ intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \ adf_fw_counters.o \ adf_cnv_dbgfs.o \ adf_gen4_pm_debugfs.o \ + adf_gen4_tl.o \ adf_heartbeat.o \ adf_heartbeat_dbgfs.o \ adf_pm_dbgfs.o \ + adf_telemetry.o \ + adf_tl_debugfs.o \ adf_dbgfs.o intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \ diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index fc7786d71e96..b274ebc799c9 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -11,6 +11,7 @@ #include #include "adf_cfg_common.h" #include "adf_rl.h" +#include "adf_telemetry.h" #include "adf_pfvf_msg.h" #define ADF_DH895XCC_DEVICE_NAME "dh895xcc" @@ -254,6 +255,7 @@ struct adf_hw_device_data { struct adf_ras_ops ras_ops; struct adf_dev_err_mask dev_err_mask; struct adf_rl_hw_data rl_data; + struct adf_tl_hw_data tl_data; const char *fw_name; const char *fw_mmp_name; u32 fuses; @@ -308,6 +310,7 @@ struct adf_hw_device_data { #define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops) #define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops) #define GET_DC_OPS(accel_dev) (&(accel_dev)->hw_device->dc_ops) +#define GET_TL_DATA(accel_dev) GET_HW_DATA(accel_dev)->tl_data #define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev struct adf_admin_comms; @@ -356,6 +359,7 @@ struct adf_accel_dev { struct adf_cfg_device_data *cfg; struct adf_fw_loader_data *fw_loader; struct adf_admin_comms *admin; + struct adf_telemetry *telemetry; struct adf_dc_data *dc_data; struct adf_pm power_management; struct list_head crypto_list; diff --git a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c index 4f0df367c9e1..4c11ad1ebcf0 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c @@ -10,6 +10,7 @@ #include "adf_fw_counters.h" #include "adf_heartbeat_dbgfs.h" #include "adf_pm_dbgfs.h" +#include "adf_tl_debugfs.h" /** * adf_dbgfs_init() - add persistent debugfs entries @@ -58,6 +59,7 @@ void adf_dbgfs_add(struct adf_accel_dev *accel_dev) adf_heartbeat_dbgfs_add(accel_dev); adf_pm_dbgfs_add(accel_dev); adf_cnv_dbgfs_add(accel_dev); + adf_tl_dbgfs_add(accel_dev); } } @@ -68,6 +70,7 @@ void adf_dbgfs_add(struct adf_accel_dev *accel_dev) void adf_dbgfs_rm(struct adf_accel_dev *accel_dev) { if (!accel_dev->is_vf) { + adf_tl_dbgfs_rm(accel_dev); adf_cnv_dbgfs_rm(accel_dev); adf_pm_dbgfs_rm(accel_dev); adf_heartbeat_dbgfs_rm(accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c new file mode 100644 index 000000000000..4efbe6bc651c --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2023 Intel Corporation. */ +#include +#include + +#include "adf_gen4_tl.h" +#include "adf_telemetry.h" +#include "adf_tl_debugfs.h" + +#define ADF_GEN4_TL_DEV_REG_OFF(reg) ADF_TL_DEV_REG_OFF(reg, gen4) + +#define ADF_GEN4_TL_SL_UTIL_COUNTER(_name) \ + ADF_TL_COUNTER("util_" #_name, \ + ADF_TL_SIMPLE_COUNT, \ + ADF_TL_SLICE_REG_OFF(_name, reg_tm_slice_util, gen4)) + +#define ADF_GEN4_TL_SL_EXEC_COUNTER(_name) \ + ADF_TL_COUNTER("exec_" #_name, \ + ADF_TL_SIMPLE_COUNT, \ + ADF_TL_SLICE_REG_OFF(_name, reg_tm_slice_exec_cnt, gen4)) + +/* Device level counters. */ +static const struct adf_tl_dbg_counter dev_counters[] = { + /* PCIe partial transactions. */ + ADF_TL_COUNTER(PCI_TRANS_CNT_NAME, ADF_TL_SIMPLE_COUNT, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_pci_trans_cnt)), + /* Max read latency[ns]. */ + ADF_TL_COUNTER(MAX_RD_LAT_NAME, ADF_TL_COUNTER_NS, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_rd_lat_max)), + /* Read latency average[ns]. */ + ADF_TL_COUNTER_LATENCY(RD_LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_rd_lat_acc), + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_rd_cmpl_cnt)), + /* Max get to put latency[ns]. */ + ADF_TL_COUNTER(MAX_LAT_NAME, ADF_TL_COUNTER_NS, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_gp_lat_max)), + /* Get to put latency average[ns]. */ + ADF_TL_COUNTER_LATENCY(LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_gp_lat_acc), + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_ae_put_cnt)), + /* PCIe write bandwidth[Mbps]. */ + ADF_TL_COUNTER(BW_IN_NAME, ADF_TL_COUNTER_MBPS, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_bw_in)), + /* PCIe read bandwidth[Mbps]. */ + ADF_TL_COUNTER(BW_OUT_NAME, ADF_TL_COUNTER_MBPS, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_bw_out)), + /* Page request latency average[ns]. */ + ADF_TL_COUNTER_LATENCY(PAGE_REQ_LAT_NAME, ADF_TL_COUNTER_NS_AVG, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_page_req_lat_acc), + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_page_req_cnt)), + /* Page translation latency average[ns]. */ + ADF_TL_COUNTER_LATENCY(AT_TRANS_LAT_NAME, ADF_TL_COUNTER_NS_AVG, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_trans_lat_acc), + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_trans_lat_cnt)), + /* Maximum uTLB used. */ + ADF_TL_COUNTER(AT_MAX_UTLB_USED_NAME, ADF_TL_SIMPLE_COUNT, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_max_tlb_used)), +}; + +/* Slice utilization counters. */ +static const struct adf_tl_dbg_counter sl_util_counters[ADF_TL_SL_CNT_COUNT] = { + /* Compression slice utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(cpr), + /* Translator slice utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(xlt), + /* Decompression slice utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(dcpr), + /* PKE utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(pke), + /* Wireless Authentication slice utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(wat), + /* Wireless Cipher slice utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(wcp), + /* UCS slice utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(ucs), + /* Cipher slice utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(cph), + /* Authentication slice utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(ath), +}; + +/* Slice execution counters. */ +static const struct adf_tl_dbg_counter sl_exec_counters[ADF_TL_SL_CNT_COUNT] = { + /* Compression slice execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(cpr), + /* Translator slice execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(xlt), + /* Decompression slice execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(dcpr), + /* PKE execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(pke), + /* Wireless Authentication slice execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(wat), + /* Wireless Cipher slice execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(wcp), + /* UCS slice execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(ucs), + /* Cipher slice execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(cph), + /* Authentication slice execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(ath), +}; + +void adf_gen4_init_tl_data(struct adf_tl_hw_data *tl_data) +{ + tl_data->layout_sz = ADF_GEN4_TL_LAYOUT_SZ; + tl_data->slice_reg_sz = ADF_GEN4_TL_SLICE_REG_SZ; + tl_data->num_hbuff = ADF_GEN4_TL_NUM_HIST_BUFFS; + tl_data->msg_cnt_off = ADF_GEN4_TL_MSG_CNT_OFF; + tl_data->cpp_ns_per_cycle = ADF_GEN4_CPP_NS_PER_CYCLE; + tl_data->bw_units_to_bytes = ADF_GEN4_TL_BW_HW_UNITS_TO_BYTES; + + tl_data->dev_counters = dev_counters; + tl_data->num_dev_counters = ARRAY_SIZE(dev_counters); + tl_data->sl_util_counters = sl_util_counters; + tl_data->sl_exec_counters = sl_exec_counters; +} +EXPORT_SYMBOL_GPL(adf_gen4_init_tl_data); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h new file mode 100644 index 000000000000..feb2eecf24cf --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2023 Intel Corporation. */ +#ifndef ADF_GEN4_TL_H +#define ADF_GEN4_TL_H + +#include +#include + +struct adf_tl_hw_data; + +/* Computation constants. */ +#define ADF_GEN4_CPP_NS_PER_CYCLE 2 +#define ADF_GEN4_TL_BW_HW_UNITS_TO_BYTES 64 + +/* Maximum aggregation time. Value in milliseconds. */ +#define ADF_GEN4_TL_MAX_AGGR_TIME_MS 4000 +/* Num of buffers to store historic values. */ +#define ADF_GEN4_TL_NUM_HIST_BUFFS \ + (ADF_GEN4_TL_MAX_AGGR_TIME_MS / ADF_TL_DATA_WR_INTERVAL_MS) + +/* Max number of HW resources of one type. */ +#define ADF_GEN4_TL_MAX_SLICES_PER_TYPE 24 + +/** + * struct adf_gen4_tl_slice_data_regs - HW slice data as populated by FW. + * @reg_tm_slice_exec_cnt: Slice execution count. + * @reg_tm_slice_util: Slice utilization. + */ +struct adf_gen4_tl_slice_data_regs { + __u32 reg_tm_slice_exec_cnt; + __u32 reg_tm_slice_util; +}; + +#define ADF_GEN4_TL_SLICE_REG_SZ sizeof(struct adf_gen4_tl_slice_data_regs) + +/** + * struct adf_gen4_tl_device_data_regs - This structure stores device telemetry + * counter values as are being populated periodically by device. + * @reg_tl_rd_lat_acc: read latency accumulator + * @reg_tl_gp_lat_acc: get-put latency accumulator + * @reg_tl_at_page_req_lat_acc: AT/DevTLB page request latency accumulator + * @reg_tl_at_trans_lat_acc: DevTLB transaction latency accumulator + * @reg_tl_re_acc: accumulated ring empty time + * @reg_tl_pci_trans_cnt: PCIe partial transactions + * @reg_tl_rd_lat_max: maximum logged read latency + * @reg_tl_rd_cmpl_cnt: read requests completed count + * @reg_tl_gp_lat_max: maximum logged get to put latency + * @reg_tl_ae_put_cnt: Accelerator Engine put counts across all rings + * @reg_tl_bw_in: PCIe write bandwidth + * @reg_tl_bw_out: PCIe read bandwidth + * @reg_tl_at_page_req_cnt: DevTLB page requests count + * @reg_tl_at_trans_lat_cnt: DevTLB transaction latency samples count + * @reg_tl_at_max_tlb_used: maximum uTLB used + * @reg_tl_re_cnt: ring empty time samples count + * @reserved: reserved + * @ath_slices: array of Authentication slices utilization registers + * @cph_slices: array of Cipher slices utilization registers + * @cpr_slices: array of Compression slices utilization registers + * @xlt_slices: array of Translator slices utilization registers + * @dcpr_slices: array of Decompression slices utilization registers + * @pke_slices: array of PKE slices utilization registers + * @ucs_slices: array of UCS slices utilization registers + * @wat_slices: array of Wireless Authentication slices utilization registers + * @wcp_slices: array of Wireless Cipher slices utilization registers + */ +struct adf_gen4_tl_device_data_regs { + __u64 reg_tl_rd_lat_acc; + __u64 reg_tl_gp_lat_acc; + __u64 reg_tl_at_page_req_lat_acc; + __u64 reg_tl_at_trans_lat_acc; + __u64 reg_tl_re_acc; + __u32 reg_tl_pci_trans_cnt; + __u32 reg_tl_rd_lat_max; + __u32 reg_tl_rd_cmpl_cnt; + __u32 reg_tl_gp_lat_max; + __u32 reg_tl_ae_put_cnt; + __u32 reg_tl_bw_in; + __u32 reg_tl_bw_out; + __u32 reg_tl_at_page_req_cnt; + __u32 reg_tl_at_trans_lat_cnt; + __u32 reg_tl_at_max_tlb_used; + __u32 reg_tl_re_cnt; + __u32 reserved; + struct adf_gen4_tl_slice_data_regs ath_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; + struct adf_gen4_tl_slice_data_regs cph_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; + struct adf_gen4_tl_slice_data_regs cpr_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; + struct adf_gen4_tl_slice_data_regs xlt_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; + struct adf_gen4_tl_slice_data_regs dcpr_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; + struct adf_gen4_tl_slice_data_regs pke_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; + struct adf_gen4_tl_slice_data_regs ucs_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; + struct adf_gen4_tl_slice_data_regs wat_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; + struct adf_gen4_tl_slice_data_regs wcp_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; +}; + +/** + * struct adf_gen4_tl_layout - This structure represents entire telemetry + * counters data: Device + 4 Ring Pairs as are being populated periodically + * by device. + * @tl_device_data_regs: structure of device telemetry registers + * @reserved1: reserved + * @reg_tl_msg_cnt: telemetry messages counter + * @reserved: reserved + */ +struct adf_gen4_tl_layout { + struct adf_gen4_tl_device_data_regs tl_device_data_regs; + __u32 reserved1[14]; + __u32 reg_tl_msg_cnt; + __u32 reserved; +}; + +#define ADF_GEN4_TL_LAYOUT_SZ sizeof(struct adf_gen4_tl_layout) +#define ADF_GEN4_TL_MSG_CNT_OFF offsetof(struct adf_gen4_tl_layout, reg_tl_msg_cnt) + +#ifdef CONFIG_DEBUG_FS +void adf_gen4_init_tl_data(struct adf_tl_hw_data *tl_data); +#else +static inline void adf_gen4_init_tl_data(struct adf_tl_hw_data *tl_data) +{ +} +#endif /* CONFIG_DEBUG_FS */ +#endif /* ADF_GEN4_TL_H */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c index 81c39f3d07e1..f43ae9111553 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_init.c +++ b/drivers/crypto/intel/qat/qat_common/adf_init.c @@ -11,6 +11,7 @@ #include "adf_heartbeat.h" #include "adf_rl.h" #include "adf_sysfs_ras_counters.h" +#include "adf_telemetry.h" static LIST_HEAD(service_table); static DEFINE_MUTEX(service_lock); @@ -142,6 +143,10 @@ static int adf_dev_init(struct adf_accel_dev *accel_dev) if (ret && ret != -EOPNOTSUPP) return ret; + ret = adf_tl_init(accel_dev); + if (ret && ret != -EOPNOTSUPP) + return ret; + /* * Subservice initialisation is divided into two stages: init and start. * This is to facilitate any ordering dependencies between services @@ -220,6 +225,10 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev) if (ret && ret != -EOPNOTSUPP) return ret; + ret = adf_tl_start(accel_dev); + if (ret && ret != -EOPNOTSUPP) + return ret; + list_for_each_entry(service, &service_table, list) { if (service->event_hld(accel_dev, ADF_EVENT_START)) { dev_err(&GET_DEV(accel_dev), @@ -279,6 +288,7 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev) !test_bit(ADF_STATUS_STARTING, &accel_dev->status)) return; + adf_tl_stop(accel_dev); adf_rl_stop(accel_dev); adf_dbgfs_rm(accel_dev); adf_sysfs_stop_ras(accel_dev); @@ -374,6 +384,8 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev) adf_heartbeat_shutdown(accel_dev); + adf_tl_shutdown(accel_dev); + hw_data->disable_iov(accel_dev); if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) { diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c new file mode 100644 index 000000000000..05c476d58895 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c @@ -0,0 +1,271 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2023 Intel Corporation. */ +#define dev_fmt(fmt) "Telemetry: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "adf_admin.h" +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "adf_telemetry.h" + +#define TL_IS_ZERO(input) ((input) == 0) + +static bool is_tl_supported(struct adf_accel_dev *accel_dev) +{ + u16 fw_caps = GET_HW_DATA(accel_dev)->fw_capabilities; + + return fw_caps & TL_CAPABILITY_BIT; +} + +static int validate_tl_data(struct adf_tl_hw_data *tl_data) +{ + if (!tl_data->dev_counters || + TL_IS_ZERO(tl_data->num_dev_counters) || + !tl_data->sl_util_counters || + !tl_data->sl_exec_counters) + return -EOPNOTSUPP; + + return 0; +} + +static int adf_tl_alloc_mem(struct adf_accel_dev *accel_dev) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); + struct device *dev = &GET_DEV(accel_dev); + size_t regs_sz = tl_data->layout_sz; + struct adf_telemetry *telemetry; + int node = dev_to_node(dev); + void *tl_data_regs; + unsigned int i; + + telemetry = kzalloc_node(sizeof(*telemetry), GFP_KERNEL, node); + if (!telemetry) + return -ENOMEM; + + telemetry->regs_hist_buff = kmalloc_array(tl_data->num_hbuff, + sizeof(*telemetry->regs_hist_buff), + GFP_KERNEL); + if (!telemetry->regs_hist_buff) + goto err_free_tl; + + telemetry->regs_data = dma_alloc_coherent(dev, regs_sz, + &telemetry->regs_data_p, + GFP_KERNEL); + if (!telemetry->regs_data) + goto err_free_regs_hist_buff; + + for (i = 0; i < tl_data->num_hbuff; i++) { + tl_data_regs = kzalloc_node(regs_sz, GFP_KERNEL, node); + if (!tl_data_regs) + goto err_free_dma; + + telemetry->regs_hist_buff[i] = tl_data_regs; + } + + accel_dev->telemetry = telemetry; + + return 0; + +err_free_dma: + dma_free_coherent(dev, regs_sz, telemetry->regs_data, + telemetry->regs_data_p); + + while (i--) + kfree(telemetry->regs_hist_buff[i]); + +err_free_regs_hist_buff: + kfree(telemetry->regs_hist_buff); +err_free_tl: + kfree(telemetry); + + return -ENOMEM; +} + +static void adf_tl_free_mem(struct adf_accel_dev *accel_dev) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); + struct adf_telemetry *telemetry = accel_dev->telemetry; + struct device *dev = &GET_DEV(accel_dev); + size_t regs_sz = tl_data->layout_sz; + unsigned int i; + + for (i = 0; i < tl_data->num_hbuff; i++) + kfree(telemetry->regs_hist_buff[i]); + + dma_free_coherent(dev, regs_sz, telemetry->regs_data, + telemetry->regs_data_p); + + kfree(telemetry->regs_hist_buff); + kfree(telemetry); + accel_dev->telemetry = NULL; +} + +static unsigned long get_next_timeout(void) +{ + return msecs_to_jiffies(ADF_TL_TIMER_INT_MS); +} + +static void snapshot_regs(struct adf_telemetry *telemetry, size_t size) +{ + void *dst = telemetry->regs_hist_buff[telemetry->hb_num]; + void *src = telemetry->regs_data; + + memcpy(dst, src, size); +} + +static void tl_work_handler(struct work_struct *work) +{ + struct delayed_work *delayed_work; + struct adf_telemetry *telemetry; + struct adf_tl_hw_data *tl_data; + u32 msg_cnt, old_msg_cnt; + size_t layout_sz; + u32 *regs_data; + size_t id; + + delayed_work = to_delayed_work(work); + telemetry = container_of(delayed_work, struct adf_telemetry, work_ctx); + tl_data = &GET_TL_DATA(telemetry->accel_dev); + regs_data = telemetry->regs_data; + + id = tl_data->msg_cnt_off / sizeof(*regs_data); + layout_sz = tl_data->layout_sz; + + if (!atomic_read(&telemetry->state)) { + cancel_delayed_work_sync(&telemetry->work_ctx); + return; + } + + msg_cnt = regs_data[id]; + old_msg_cnt = msg_cnt; + if (msg_cnt == telemetry->msg_cnt) + goto out; + + mutex_lock(&telemetry->regs_hist_lock); + + snapshot_regs(telemetry, layout_sz); + + /* Check if data changed while updating it */ + msg_cnt = regs_data[id]; + if (old_msg_cnt != msg_cnt) + snapshot_regs(telemetry, layout_sz); + + telemetry->msg_cnt = msg_cnt; + telemetry->hb_num++; + telemetry->hb_num %= telemetry->hbuffs; + + mutex_unlock(&telemetry->regs_hist_lock); + +out: + adf_misc_wq_queue_delayed_work(&telemetry->work_ctx, get_next_timeout()); +} + +int adf_tl_halt(struct adf_accel_dev *accel_dev) +{ + struct adf_telemetry *telemetry = accel_dev->telemetry; + struct device *dev = &GET_DEV(accel_dev); + int ret; + + cancel_delayed_work_sync(&telemetry->work_ctx); + atomic_set(&telemetry->state, 0); + + ret = adf_send_admin_tl_stop(accel_dev); + if (ret) + dev_err(dev, "failed to stop telemetry\n"); + + return ret; +} + +int adf_tl_run(struct adf_accel_dev *accel_dev, int state) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); + struct adf_telemetry *telemetry = accel_dev->telemetry; + struct device *dev = &GET_DEV(accel_dev); + size_t layout_sz = tl_data->layout_sz; + int ret; + + ret = adf_send_admin_tl_start(accel_dev, telemetry->regs_data_p, + layout_sz, NULL, &telemetry->slice_cnt); + if (ret) { + dev_err(dev, "failed to start telemetry\n"); + return ret; + } + + telemetry->hbuffs = state; + atomic_set(&telemetry->state, state); + + adf_misc_wq_queue_delayed_work(&telemetry->work_ctx, get_next_timeout()); + + return 0; +} + +int adf_tl_init(struct adf_accel_dev *accel_dev) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); + struct device *dev = &GET_DEV(accel_dev); + struct adf_telemetry *telemetry; + int ret; + + ret = validate_tl_data(tl_data); + if (ret) + return ret; + + ret = adf_tl_alloc_mem(accel_dev); + if (ret) { + dev_err(dev, "failed to initialize: %d\n", ret); + return ret; + } + + telemetry = accel_dev->telemetry; + telemetry->accel_dev = accel_dev; + + mutex_init(&telemetry->wr_lock); + mutex_init(&telemetry->regs_hist_lock); + INIT_DELAYED_WORK(&telemetry->work_ctx, tl_work_handler); + + return 0; +} + +int adf_tl_start(struct adf_accel_dev *accel_dev) +{ + struct device *dev = &GET_DEV(accel_dev); + + if (!accel_dev->telemetry) + return -EOPNOTSUPP; + + if (!is_tl_supported(accel_dev)) { + dev_info(dev, "feature not supported by FW\n"); + adf_tl_free_mem(accel_dev); + return -EOPNOTSUPP; + } + + return 0; +} + +void adf_tl_stop(struct adf_accel_dev *accel_dev) +{ + if (!accel_dev->telemetry) + return; + + if (atomic_read(&accel_dev->telemetry->state)) + adf_tl_halt(accel_dev); +} + +void adf_tl_shutdown(struct adf_accel_dev *accel_dev) +{ + if (!accel_dev->telemetry) + return; + + adf_tl_free_mem(accel_dev); +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h new file mode 100644 index 000000000000..08de17621467 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2023 Intel Corporation. */ +#ifndef ADF_TELEMETRY_H +#define ADF_TELEMETRY_H + +#include +#include +#include +#include + +#include "icp_qat_fw_init_admin.h" + +struct adf_accel_dev; +struct adf_tl_dbg_counter; +struct dentry; + +#define ADF_TL_SL_CNT_COUNT \ + (sizeof(struct icp_qat_fw_init_admin_slice_cnt) / sizeof(__u8)) + +#define TL_CAPABILITY_BIT BIT(1) +/* Interval within device writes data to DMA region. Value in milliseconds. */ +#define ADF_TL_DATA_WR_INTERVAL_MS 1000 +/* Interval within timer interrupt should be handled. Value in milliseconds. */ +#define ADF_TL_TIMER_INT_MS (ADF_TL_DATA_WR_INTERVAL_MS / 2) + +struct adf_tl_hw_data { + size_t layout_sz; + size_t slice_reg_sz; + size_t msg_cnt_off; + const struct adf_tl_dbg_counter *dev_counters; + const struct adf_tl_dbg_counter *sl_util_counters; + const struct adf_tl_dbg_counter *sl_exec_counters; + u8 num_hbuff; + u8 cpp_ns_per_cycle; + u8 bw_units_to_bytes; + u8 num_dev_counters; +}; + +struct adf_telemetry { + struct adf_accel_dev *accel_dev; + atomic_t state; + u32 hbuffs; + int hb_num; + u32 msg_cnt; + dma_addr_t regs_data_p; /* bus address for DMA mapping */ + void *regs_data; /* virtual address for DMA mapping */ + /** + * @regs_hist_buff: array of pointers to copies of the last @hbuffs + * values of @regs_data + */ + void **regs_hist_buff; + struct dentry *dbg_dir; + /** + * @regs_hist_lock: protects from race conditions between write and read + * to the copies referenced by @regs_hist_buff + */ + struct mutex regs_hist_lock; + /** + * @wr_lock: protects from concurrent writes to debugfs telemetry files + */ + struct mutex wr_lock; + struct delayed_work work_ctx; + struct icp_qat_fw_init_admin_slice_cnt slice_cnt; +}; + +#ifdef CONFIG_DEBUG_FS +int adf_tl_init(struct adf_accel_dev *accel_dev); +int adf_tl_start(struct adf_accel_dev *accel_dev); +void adf_tl_stop(struct adf_accel_dev *accel_dev); +void adf_tl_shutdown(struct adf_accel_dev *accel_dev); +int adf_tl_run(struct adf_accel_dev *accel_dev, int state); +int adf_tl_halt(struct adf_accel_dev *accel_dev); +#else +static inline int adf_tl_init(struct adf_accel_dev *accel_dev) +{ + return 0; +} + +static inline int adf_tl_start(struct adf_accel_dev *accel_dev) +{ + return 0; +} + +static inline void adf_tl_stop(struct adf_accel_dev *accel_dev) +{ +} + +static inline void adf_tl_shutdown(struct adf_accel_dev *accel_dev) +{ +} +#endif /* CONFIG_DEBUG_FS */ +#endif /* ADF_TELEMETRY_H */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c new file mode 100644 index 000000000000..accb46d6ea3c --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c @@ -0,0 +1,502 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2023 Intel Corporation. */ +#define dev_fmt(fmt) "Telemetry debugfs: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "adf_accel_devices.h" +#include "adf_telemetry.h" +#include "adf_tl_debugfs.h" + +#define TL_VALUE_MIN_PADDING 20 +#define TL_KEY_MIN_PADDING 23 + +static int tl_collect_values_u32(struct adf_telemetry *telemetry, + size_t counter_offset, u64 *arr) +{ + unsigned int samples, hb_idx, i; + u32 *regs_hist_buff; + u32 counter_val; + + samples = min(telemetry->msg_cnt, telemetry->hbuffs); + hb_idx = telemetry->hb_num + telemetry->hbuffs - samples; + + mutex_lock(&telemetry->regs_hist_lock); + + for (i = 0; i < samples; i++) { + regs_hist_buff = telemetry->regs_hist_buff[hb_idx % telemetry->hbuffs]; + counter_val = regs_hist_buff[counter_offset / sizeof(counter_val)]; + arr[i] = counter_val; + hb_idx++; + } + + mutex_unlock(&telemetry->regs_hist_lock); + + return samples; +} + +static int tl_collect_values_u64(struct adf_telemetry *telemetry, + size_t counter_offset, u64 *arr) +{ + unsigned int samples, hb_idx, i; + u64 *regs_hist_buff; + u64 counter_val; + + samples = min(telemetry->msg_cnt, telemetry->hbuffs); + hb_idx = telemetry->hb_num + telemetry->hbuffs - samples; + + mutex_lock(&telemetry->regs_hist_lock); + + for (i = 0; i < samples; i++) { + regs_hist_buff = telemetry->regs_hist_buff[hb_idx % telemetry->hbuffs]; + counter_val = regs_hist_buff[counter_offset / sizeof(counter_val)]; + arr[i] = counter_val; + hb_idx++; + } + + mutex_unlock(&telemetry->regs_hist_lock); + + return samples; +} + +/** + * avg_array() - Return average of values within an array. + * @array: Array of values. + * @len: Number of elements. + * + * This algorithm computes average of an array without running into overflow. + * + * Return: average of values. + */ +#define avg_array(array, len) ( \ +{ \ + typeof(&(array)[0]) _array = (array); \ + __unqual_scalar_typeof(_array[0]) _x = 0; \ + __unqual_scalar_typeof(_array[0]) _y = 0; \ + __unqual_scalar_typeof(_array[0]) _a, _b; \ + typeof(len) _len = (len); \ + size_t _i; \ + \ + for (_i = 0; _i < _len; _i++) { \ + _a = _array[_i]; \ + _b = do_div(_a, _len); \ + _x += _a; \ + if (_y >= _len - _b) { \ + _x++; \ + _y -= _len - _b; \ + } else { \ + _y += _b; \ + } \ + } \ + do_div(_y, _len); \ + (_x + _y); \ +}) + +/* Calculation function for simple counter. */ +static int tl_calc_count(struct adf_telemetry *telemetry, + const struct adf_tl_dbg_counter *ctr, + struct adf_tl_dbg_aggr_values *vals) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev); + u64 *hist_vals; + int sample_cnt; + int ret = 0; + + hist_vals = kmalloc_array(tl_data->num_hbuff, sizeof(*hist_vals), + GFP_KERNEL); + if (!hist_vals) + return -ENOMEM; + + memset(vals, 0, sizeof(*vals)); + sample_cnt = tl_collect_values_u32(telemetry, ctr->offset1, hist_vals); + if (!sample_cnt) + goto out_free_hist_vals; + + vals->curr = hist_vals[sample_cnt - 1]; + vals->min = min_array(hist_vals, sample_cnt); + vals->max = max_array(hist_vals, sample_cnt); + vals->avg = avg_array(hist_vals, sample_cnt); + +out_free_hist_vals: + kfree(hist_vals); + return ret; +} + +/* Convert CPP bus cycles to ns. */ +static int tl_cycles_to_ns(struct adf_telemetry *telemetry, + const struct adf_tl_dbg_counter *ctr, + struct adf_tl_dbg_aggr_values *vals) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev); + u8 cpp_ns_per_cycle = tl_data->cpp_ns_per_cycle; + int ret; + + ret = tl_calc_count(telemetry, ctr, vals); + if (ret) + return ret; + + vals->curr *= cpp_ns_per_cycle; + vals->min *= cpp_ns_per_cycle; + vals->max *= cpp_ns_per_cycle; + vals->avg *= cpp_ns_per_cycle; + + return 0; +} + +/* + * Compute latency cumulative average with division of accumulated value + * by sample count. Returned value is in ns. + */ +static int tl_lat_acc_avg(struct adf_telemetry *telemetry, + const struct adf_tl_dbg_counter *ctr, + struct adf_tl_dbg_aggr_values *vals) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev); + u8 cpp_ns_per_cycle = tl_data->cpp_ns_per_cycle; + u8 num_hbuff = tl_data->num_hbuff; + int sample_cnt, i; + u64 *hist_vals; + u64 *hist_cnt; + int ret = 0; + + hist_vals = kmalloc_array(num_hbuff, sizeof(*hist_vals), GFP_KERNEL); + if (!hist_vals) + return -ENOMEM; + + hist_cnt = kmalloc_array(num_hbuff, sizeof(*hist_cnt), GFP_KERNEL); + if (!hist_cnt) { + ret = -ENOMEM; + goto out_free_hist_vals; + } + + memset(vals, 0, sizeof(*vals)); + sample_cnt = tl_collect_values_u64(telemetry, ctr->offset1, hist_vals); + if (!sample_cnt) + goto out_free_hist_cnt; + + tl_collect_values_u32(telemetry, ctr->offset2, hist_cnt); + + for (i = 0; i < sample_cnt; i++) { + /* Avoid division by 0 if count is 0. */ + if (hist_cnt[i]) + hist_vals[i] = div_u64(hist_vals[i] * cpp_ns_per_cycle, + hist_cnt[i]); + else + hist_vals[i] = 0; + } + + vals->curr = hist_vals[sample_cnt - 1]; + vals->min = min_array(hist_vals, sample_cnt); + vals->max = max_array(hist_vals, sample_cnt); + vals->avg = avg_array(hist_vals, sample_cnt); + +out_free_hist_cnt: + kfree(hist_cnt); +out_free_hist_vals: + kfree(hist_vals); + return ret; +} + +/* Convert HW raw bandwidth units to Mbps. */ +static int tl_bw_hw_units_to_mbps(struct adf_telemetry *telemetry, + const struct adf_tl_dbg_counter *ctr, + struct adf_tl_dbg_aggr_values *vals) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev); + u16 bw_hw_2_bits = tl_data->bw_units_to_bytes * BITS_PER_BYTE; + u64 *hist_vals; + int sample_cnt; + int ret = 0; + + hist_vals = kmalloc_array(tl_data->num_hbuff, sizeof(*hist_vals), + GFP_KERNEL); + if (!hist_vals) + return -ENOMEM; + + memset(vals, 0, sizeof(*vals)); + sample_cnt = tl_collect_values_u32(telemetry, ctr->offset1, hist_vals); + if (!sample_cnt) + goto out_free_hist_vals; + + vals->curr = div_u64(hist_vals[sample_cnt - 1] * bw_hw_2_bits, MEGA); + vals->min = div_u64(min_array(hist_vals, sample_cnt) * bw_hw_2_bits, MEGA); + vals->max = div_u64(max_array(hist_vals, sample_cnt) * bw_hw_2_bits, MEGA); + vals->avg = div_u64(avg_array(hist_vals, sample_cnt) * bw_hw_2_bits, MEGA); + +out_free_hist_vals: + kfree(hist_vals); + return ret; +} + +static void tl_seq_printf_counter(struct adf_telemetry *telemetry, + struct seq_file *s, const char *name, + struct adf_tl_dbg_aggr_values *vals) +{ + seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, name); + seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->curr); + if (atomic_read(&telemetry->state) > 1) { + seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->min); + seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->max); + seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->avg); + } + seq_puts(s, "\n"); +} + +static int tl_calc_and_print_counter(struct adf_telemetry *telemetry, + struct seq_file *s, + const struct adf_tl_dbg_counter *ctr, + const char *name) +{ + const char *counter_name = name ? name : ctr->name; + enum adf_tl_counter_type type = ctr->type; + struct adf_tl_dbg_aggr_values vals; + int ret; + + switch (type) { + case ADF_TL_SIMPLE_COUNT: + ret = tl_calc_count(telemetry, ctr, &vals); + break; + case ADF_TL_COUNTER_NS: + ret = tl_cycles_to_ns(telemetry, ctr, &vals); + break; + case ADF_TL_COUNTER_NS_AVG: + ret = tl_lat_acc_avg(telemetry, ctr, &vals); + break; + case ADF_TL_COUNTER_MBPS: + ret = tl_bw_hw_units_to_mbps(telemetry, ctr, &vals); + break; + default: + return -EINVAL; + } + + if (ret) + return ret; + + tl_seq_printf_counter(telemetry, s, counter_name, &vals); + + return 0; +} + +static int tl_print_sl_counter(struct adf_telemetry *telemetry, + const struct adf_tl_dbg_counter *ctr, + struct seq_file *s, u8 cnt_id) +{ + size_t sl_regs_sz = GET_TL_DATA(telemetry->accel_dev).slice_reg_sz; + struct adf_tl_dbg_counter slice_ctr; + size_t offset_inc = cnt_id * sl_regs_sz; + char cnt_name[MAX_COUNT_NAME_SIZE]; + + snprintf(cnt_name, MAX_COUNT_NAME_SIZE, "%s%d", ctr->name, cnt_id); + slice_ctr = *ctr; + slice_ctr.offset1 += offset_inc; + + return tl_calc_and_print_counter(telemetry, s, &slice_ctr, cnt_name); +} + +static int tl_calc_and_print_sl_counters(struct adf_accel_dev *accel_dev, + struct seq_file *s, u8 cnt_type, u8 cnt_id) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); + struct adf_telemetry *telemetry = accel_dev->telemetry; + const struct adf_tl_dbg_counter *sl_tl_util_counters; + const struct adf_tl_dbg_counter *sl_tl_exec_counters; + const struct adf_tl_dbg_counter *ctr; + int ret; + + sl_tl_util_counters = tl_data->sl_util_counters; + sl_tl_exec_counters = tl_data->sl_exec_counters; + + ctr = &sl_tl_util_counters[cnt_type]; + + ret = tl_print_sl_counter(telemetry, ctr, s, cnt_id); + if (ret) { + dev_notice(&GET_DEV(accel_dev), + "invalid slice utilization counter type\n"); + return ret; + } + + ctr = &sl_tl_exec_counters[cnt_type]; + + ret = tl_print_sl_counter(telemetry, ctr, s, cnt_id); + if (ret) { + dev_notice(&GET_DEV(accel_dev), + "invalid slice execution counter type\n"); + return ret; + } + + return 0; +} + +static void tl_print_msg_cnt(struct seq_file *s, u32 msg_cnt) +{ + seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, SNAPSHOT_CNT_MSG); + seq_printf(s, "%*u\n", TL_VALUE_MIN_PADDING, msg_cnt); +} + +static int tl_print_dev_data(struct adf_accel_dev *accel_dev, + struct seq_file *s) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); + struct adf_telemetry *telemetry = accel_dev->telemetry; + const struct adf_tl_dbg_counter *dev_tl_counters; + u8 num_dev_counters = tl_data->num_dev_counters; + u8 *sl_cnt = (u8 *)&telemetry->slice_cnt; + const struct adf_tl_dbg_counter *ctr; + unsigned int i; + int ret; + u8 j; + + if (!atomic_read(&telemetry->state)) { + dev_info(&GET_DEV(accel_dev), "not enabled\n"); + return -EPERM; + } + + dev_tl_counters = tl_data->dev_counters; + + tl_print_msg_cnt(s, telemetry->msg_cnt); + + /* Print device level telemetry. */ + for (i = 0; i < num_dev_counters; i++) { + ctr = &dev_tl_counters[i]; + ret = tl_calc_and_print_counter(telemetry, s, ctr, NULL); + if (ret) { + dev_notice(&GET_DEV(accel_dev), + "invalid counter type\n"); + return ret; + } + } + + /* Print per slice telemetry. */ + for (i = 0; i < ADF_TL_SL_CNT_COUNT; i++) { + for (j = 0; j < sl_cnt[i]; j++) { + ret = tl_calc_and_print_sl_counters(accel_dev, s, i, j); + if (ret) + return ret; + } + } + + return 0; +} + +static int tl_dev_data_show(struct seq_file *s, void *unused) +{ + struct adf_accel_dev *accel_dev = s->private; + + if (!accel_dev) + return -EINVAL; + + return tl_print_dev_data(accel_dev, s); +} +DEFINE_SHOW_ATTRIBUTE(tl_dev_data); + +static int tl_control_show(struct seq_file *s, void *unused) +{ + struct adf_accel_dev *accel_dev = s->private; + + if (!accel_dev) + return -EINVAL; + + seq_printf(s, "%d\n", atomic_read(&accel_dev->telemetry->state)); + + return 0; +} + +static ssize_t tl_control_write(struct file *file, const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct seq_file *seq_f = file->private_data; + struct adf_accel_dev *accel_dev; + struct adf_telemetry *telemetry; + struct adf_tl_hw_data *tl_data; + struct device *dev; + u32 input; + int ret; + + accel_dev = seq_f->private; + if (!accel_dev) + return -EINVAL; + + tl_data = &GET_TL_DATA(accel_dev); + telemetry = accel_dev->telemetry; + dev = &GET_DEV(accel_dev); + + mutex_lock(&telemetry->wr_lock); + + ret = kstrtou32_from_user(userbuf, count, 10, &input); + if (ret) + goto unlock_and_exit; + + if (input > tl_data->num_hbuff) { + dev_info(dev, "invalid control input\n"); + ret = -EINVAL; + goto unlock_and_exit; + } + + /* If input is 0, just stop telemetry. */ + if (!input) { + ret = adf_tl_halt(accel_dev); + if (!ret) + ret = count; + + goto unlock_and_exit; + } + + /* If TL is already enabled, stop it. */ + if (atomic_read(&telemetry->state)) { + dev_info(dev, "already enabled, restarting.\n"); + ret = adf_tl_halt(accel_dev); + if (ret) + goto unlock_and_exit; + } + + ret = adf_tl_run(accel_dev, input); + if (ret) + goto unlock_and_exit; + + ret = count; + +unlock_and_exit: + mutex_unlock(&telemetry->wr_lock); + return ret; +} +DEFINE_SHOW_STORE_ATTRIBUTE(tl_control); + +void adf_tl_dbgfs_add(struct adf_accel_dev *accel_dev) +{ + struct adf_telemetry *telemetry = accel_dev->telemetry; + struct dentry *parent = accel_dev->debugfs_dir; + struct dentry *dir; + + if (!telemetry) + return; + + dir = debugfs_create_dir("telemetry", parent); + accel_dev->telemetry->dbg_dir = dir; + debugfs_create_file("device_data", 0444, dir, accel_dev, &tl_dev_data_fops); + debugfs_create_file("control", 0644, dir, accel_dev, &tl_control_fops); +} + +void adf_tl_dbgfs_rm(struct adf_accel_dev *accel_dev) +{ + struct adf_telemetry *telemetry = accel_dev->telemetry; + struct dentry *dbg_dir; + + if (!telemetry) + return; + + dbg_dir = telemetry->dbg_dir; + + debugfs_remove_recursive(dbg_dir); + + if (atomic_read(&telemetry->state)) + adf_tl_halt(accel_dev); +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h new file mode 100644 index 000000000000..b2e8f1912c16 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2023 Intel Corporation. */ +#ifndef ADF_TL_DEBUGFS_H +#define ADF_TL_DEBUGFS_H + +#include + +struct adf_accel_dev; + +#define MAX_COUNT_NAME_SIZE 32 +#define SNAPSHOT_CNT_MSG "sample_cnt" +#define RP_NUM_INDEX "rp_num" +#define PCI_TRANS_CNT_NAME "pci_trans_cnt" +#define MAX_RD_LAT_NAME "max_rd_lat" +#define RD_LAT_ACC_NAME "rd_lat_acc_avg" +#define MAX_LAT_NAME "max_gp_lat" +#define LAT_ACC_NAME "gp_lat_acc_avg" +#define BW_IN_NAME "bw_in" +#define BW_OUT_NAME "bw_out" +#define PAGE_REQ_LAT_NAME "at_page_req_lat_avg" +#define AT_TRANS_LAT_NAME "at_trans_lat_avg" +#define AT_MAX_UTLB_USED_NAME "at_max_tlb_used" +#define AT_GLOB_DTLB_HIT_NAME "at_glob_devtlb_hit" +#define AT_GLOB_DTLB_MISS_NAME "at_glob_devtlb_miss" +#define AT_PAYLD_DTLB_HIT_NAME "tl_at_payld_devtlb_hit" +#define AT_PAYLD_DTLB_MISS_NAME "tl_at_payld_devtlb_miss" + +#define ADF_TL_DATA_REG_OFF(reg, qat_gen) \ + offsetof(struct adf_##qat_gen##_tl_layout, reg) + +#define ADF_TL_DEV_REG_OFF(reg, qat_gen) \ + (ADF_TL_DATA_REG_OFF(tl_device_data_regs, qat_gen) + \ + offsetof(struct adf_##qat_gen##_tl_device_data_regs, reg)) + +#define ADF_TL_SLICE_REG_OFF(slice, reg, qat_gen) \ + (ADF_TL_DEV_REG_OFF(slice##_slices[0], qat_gen) + \ + offsetof(struct adf_##qat_gen##_tl_slice_data_regs, reg)) + +/** + * enum adf_tl_counter_type - telemetry counter types + * @ADF_TL_COUNTER_UNSUPPORTED: unsupported counter + * @ADF_TL_SIMPLE_COUNT: simple counter + * @ADF_TL_COUNTER_NS: latency counter, value in ns + * @ADF_TL_COUNTER_NS_AVG: accumulated average latency counter, value in ns + * @ADF_TL_COUNTER_MBPS: bandwidth, value in MBps + */ +enum adf_tl_counter_type { + ADF_TL_COUNTER_UNSUPPORTED, + ADF_TL_SIMPLE_COUNT, + ADF_TL_COUNTER_NS, + ADF_TL_COUNTER_NS_AVG, + ADF_TL_COUNTER_MBPS, +}; + +/** + * struct adf_tl_dbg_counter - telemetry counter definition + * @name: name of the counter as printed in the report + * @adf_tl_counter_type: type of the counter + * @offset1: offset of 1st register + * @offset2: offset of 2nd optional register + */ +struct adf_tl_dbg_counter { + const char *name; + enum adf_tl_counter_type type; + size_t offset1; + size_t offset2; +}; + +#define ADF_TL_COUNTER(_name, _type, _offset) \ +{ .name = _name, \ + .type = _type, \ + .offset1 = _offset \ +} + +#define ADF_TL_COUNTER_LATENCY(_name, _type, _offset1, _offset2) \ +{ .name = _name, \ + .type = _type, \ + .offset1 = _offset1, \ + .offset2 = _offset2 \ +} + +/* Telemetry counter aggregated values. */ +struct adf_tl_dbg_aggr_values { + u64 curr; + u64 min; + u64 max; + u64 avg; +}; + +/** + * adf_tl_dbgfs_add() - Add telemetry's debug fs entries. + * @accel_dev: Pointer to acceleration device. + * + * Creates telemetry's debug fs folder and attributes in QAT debug fs root. + */ +void adf_tl_dbgfs_add(struct adf_accel_dev *accel_dev); + +/** + * adf_tl_dbgfs_rm() - Remove telemetry's debug fs entries. + * @accel_dev: Pointer to acceleration device. + * + * Removes telemetry's debug fs folder and attributes from QAT debug fs root. + */ +void adf_tl_dbgfs_rm(struct adf_accel_dev *accel_dev); + +#endif /* ADF_TL_DEBUGFS_H */ -- Gitee From 211f2a654047f4b37af010c3145f0aa963a997bd Mon Sep 17 00:00:00 2001 From: Lucas Segarra Fernandez Date: Fri, 22 Dec 2023 11:35:08 +0100 Subject: [PATCH 0735/2138] crypto: qat - add support for ring pair level telemetry ANBZ: #8589 commit eb52707716e3f2cdf16f4e95e3a800cca190504f upstream. Intel-SIG: commit eb52707716e3 crypto: qat - add support for ring pair level telemetry Backport to support Intel QAT in-tree driver Expose through debugfs ring pair telemetry data for QAT GEN4 devices. This allows to gather metrics about the PCIe channel and device TLB for a selected ring pair. It is possible to monitor maximum 4 ring pairs at the time per device. For details, refer to debugfs-driver-qat_telemetry in Documentation/ABI. This patch is based on earlier work done by Wojciech Ziemba. Signed-off-by: Lucas Segarra Fernandez Reviewed-by: Giovanni Cabiddu Reviewed-by: Damian Muszynski Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../ABI/testing/debugfs-driver-qat_telemetry | 125 +++++++++++ .../intel/qat/qat_420xx/adf_420xx_hw_data.c | 1 + .../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 1 + .../intel/qat/qat_common/adf_accel_devices.h | 1 + .../intel/qat/qat_common/adf_gen4_hw_data.h | 1 + .../crypto/intel/qat/qat_common/adf_gen4_tl.c | 35 +++ .../crypto/intel/qat/qat_common/adf_gen4_tl.h | 41 +++- .../intel/qat/qat_common/adf_telemetry.c | 23 +- .../intel/qat/qat_common/adf_telemetry.h | 7 + .../intel/qat/qat_common/adf_tl_debugfs.c | 208 ++++++++++++++++++ .../intel/qat/qat_common/adf_tl_debugfs.h | 11 + 11 files changed, 449 insertions(+), 5 deletions(-) diff --git a/Documentation/ABI/testing/debugfs-driver-qat_telemetry b/Documentation/ABI/testing/debugfs-driver-qat_telemetry index 24532365387c..eacee2072088 100644 --- a/Documentation/ABI/testing/debugfs-driver-qat_telemetry +++ b/Documentation/ABI/testing/debugfs-driver-qat_telemetry @@ -101,3 +101,128 @@ Description: (RO) Reports device telemetry counters. attribute is not reported. This attribute is only available for qat_4xxx devices. + +What: /sys/kernel/debug/qat__/telemetry/rp__data +Date: March 2024 +KernelVersion: 6.8 +Contact: qat-linux@intel.com +Description: (RW) Selects up to 4 Ring Pairs (RP) to monitor, one per file, + and report telemetry counters related to each. + + Allowed values to write: + ======================== + * 0 to ````: + Ring pair to be monitored. The value of ``num_rps`` can be + retrieved through ``/sys/bus/pci/devices//qat/num_rps``. + See Documentation/ABI/testing/sysfs-driver-qat. + + Reads report metrics about performance and utilization of + the selected RP: + + ======================= ======================================== + Field Description + ======================= ======================================== + sample_cnt number of acquisitions of telemetry data + from the device. Reads are performed + every 1000 ms + rp_num RP number associated with slot + service_type service associated to the RP + pci_trans_cnt number of PCIe partial transactions + gp_lat_acc_avg average get to put latency [ns] + bw_in PCIe, write bandwidth [Mbps] + bw_out PCIe, read bandwidth [Mbps] + at_glob_devtlb_hit Message descriptor DevTLB hit rate + at_glob_devtlb_miss Message descriptor DevTLB miss rate + tl_at_payld_devtlb_hit Payload DevTLB hit rate + tl_at_payld_devtlb_miss Payload DevTLB miss rate + ======================= ======================================== + + Example. + + Writing the value '32' to the file ``rp_C_data`` starts the + collection of telemetry metrics for ring pair 32:: + + echo 32 > /sys/kernel/debug/qat_4xxx_0000:6b:00.0/telemetry/rp_C_data + + Once a ring pair is selected, statistics can be read accessing + the file:: + + cat /sys/kernel/debug/qat_4xxx_0000:6b:00.0/telemetry/rp_C_data + + If ``control`` is set to 1, only the current values of the + counters are displayed:: + + + + If ``control`` is 2, 3 or 4, counters are displayed in the + following format:: + + + + + On QAT GEN4 devices there are 64 RPs on a PF, so the allowed + values are 0..63. This number is absolute to the device. + If Virtual Functions (VF) are used, the ring pair number can + be derived from the Bus, Device, Function of the VF: + + ============ ====== ====== ====== ====== + PCI BDF/VF RP0 RP1 RP2 RP3 + ============ ====== ====== ====== ====== + 0000:6b:0.1 RP 0 RP 1 RP 2 RP 3 + 0000:6b:0.2 RP 4 RP 5 RP 6 RP 7 + 0000:6b:0.3 RP 8 RP 9 RP 10 RP 11 + 0000:6b:0.4 RP 12 RP 13 RP 14 RP 15 + 0000:6b:0.5 RP 16 RP 17 RP 18 RP 19 + 0000:6b:0.6 RP 20 RP 21 RP 22 RP 23 + 0000:6b:0.7 RP 24 RP 25 RP 26 RP 27 + 0000:6b:1.0 RP 28 RP 29 RP 30 RP 31 + 0000:6b:1.1 RP 32 RP 33 RP 34 RP 35 + 0000:6b:1.2 RP 36 RP 37 RP 38 RP 39 + 0000:6b:1.3 RP 40 RP 41 RP 42 RP 43 + 0000:6b:1.4 RP 44 RP 45 RP 46 RP 47 + 0000:6b:1.5 RP 48 RP 49 RP 50 RP 51 + 0000:6b:1.6 RP 52 RP 53 RP 54 RP 55 + 0000:6b:1.7 RP 56 RP 57 RP 58 RP 59 + 0000:6b:2.0 RP 60 RP 61 RP 62 RP 63 + ============ ====== ====== ====== ====== + + The mapping is only valid for the BDFs of VFs on the host. + + + The service provided on a ring-pair varies depending on the + configuration. The configuration for a given device can be + queried and set using ``cfg_services``. + See Documentation/ABI/testing/sysfs-driver-qat for details. + + The following table reports how ring pairs are mapped to VFs + on the PF 0000:6b:0.0 configured for `sym;asym` or `asym;sym`: + + =========== ============ =========== ============ =========== + PCI BDF/VF RP0/service RP1/service RP2/service RP3/service + =========== ============ =========== ============ =========== + 0000:6b:0.1 RP 0 asym RP 1 sym RP 2 asym RP 3 sym + 0000:6b:0.2 RP 4 asym RP 5 sym RP 6 asym RP 7 sym + 0000:6b:0.3 RP 8 asym RP 9 sym RP10 asym RP11 sym + ... ... ... ... ... + =========== ============ =========== ============ =========== + + All VFs follow the same pattern. + + + The following table reports how ring pairs are mapped to VFs on + the PF 0000:6b:0.0 configured for `dc`: + + =========== ============ =========== ============ =========== + PCI BDF/VF RP0/service RP1/service RP2/service RP3/service + =========== ============ =========== ============ =========== + 0000:6b:0.1 RP 0 dc RP 1 dc RP 2 dc RP 3 dc + 0000:6b:0.2 RP 4 dc RP 5 dc RP 6 dc RP 7 dc + 0000:6b:0.3 RP 8 dc RP 9 dc RP10 dc RP11 dc + ... ... ... ... ... + =========== ============ =========== ============ =========== + + The mapping of a RP to a service can be retrieved using + ``rp2srv`` from sysfs. + See Documentation/ABI/testing/sysfs-driver-qat for details. + + This attribute is only available for qat_4xxx devices. diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c index a7730d8057d6..5edce27db864 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -520,6 +520,7 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->init_device = adf_gen4_init_device; hw_data->reset_device = adf_reset_flr; hw_data->admin_ae_mask = ADF_420XX_ADMIN_AE_MASK; + hw_data->num_rps = ADF_GEN4_MAX_RPS; hw_data->fw_name = ADF_420XX_FW; hw_data->fw_mmp_name = ADF_420XX_MMP; hw_data->uof_get_name = uof_get_name_420xx; diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index 9c0e5a72d8eb..548b66ec771f 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -429,6 +429,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->init_device = adf_gen4_init_device; hw_data->reset_device = adf_reset_flr; hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK; + hw_data->num_rps = ADF_GEN4_MAX_RPS; switch (dev_id) { case ADF_402XX_PCI_DEVICE_ID: hw_data->fw_name = ADF_402XX_FW; diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index b274ebc799c9..db671879b1f8 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -278,6 +278,7 @@ struct adf_hw_device_data { u8 num_logical_accel; u8 num_engines; u32 num_hb_ctrs; + u8 num_rps; }; /* CSR write macro */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h index 051ad20581a6..46a782ba456f 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h @@ -36,6 +36,7 @@ #define ADF_GEN4_MSIX_RTTABLE_OFFSET(i) (0x409000 + ((i) * 0x04)) /* Bank and ring configuration */ +#define ADF_GEN4_MAX_RPS 64 #define ADF_GEN4_NUM_RINGS_PER_BANK 2 #define ADF_GEN4_NUM_BANKS_PER_VF 4 #define ADF_GEN4_ETR_MAX_BANKS 64 diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c index 4efbe6bc651c..7fc7a77f6aed 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c @@ -9,6 +9,8 @@ #define ADF_GEN4_TL_DEV_REG_OFF(reg) ADF_TL_DEV_REG_OFF(reg, gen4) +#define ADF_GEN4_TL_RP_REG_OFF(reg) ADF_TL_RP_REG_OFF(reg, gen4) + #define ADF_GEN4_TL_SL_UTIL_COUNTER(_name) \ ADF_TL_COUNTER("util_" #_name, \ ADF_TL_SIMPLE_COUNT, \ @@ -101,11 +103,42 @@ static const struct adf_tl_dbg_counter sl_exec_counters[ADF_TL_SL_CNT_COUNT] = { ADF_GEN4_TL_SL_EXEC_COUNTER(ath), }; +/* Ring pair counters. */ +static const struct adf_tl_dbg_counter rp_counters[] = { + /* PCIe partial transactions. */ + ADF_TL_COUNTER(PCI_TRANS_CNT_NAME, ADF_TL_SIMPLE_COUNT, + ADF_GEN4_TL_RP_REG_OFF(reg_tl_pci_trans_cnt)), + /* Get to put latency average[ns]. */ + ADF_TL_COUNTER_LATENCY(LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG, + ADF_GEN4_TL_RP_REG_OFF(reg_tl_gp_lat_acc), + ADF_GEN4_TL_RP_REG_OFF(reg_tl_ae_put_cnt)), + /* PCIe write bandwidth[Mbps]. */ + ADF_TL_COUNTER(BW_IN_NAME, ADF_TL_COUNTER_MBPS, + ADF_GEN4_TL_RP_REG_OFF(reg_tl_bw_in)), + /* PCIe read bandwidth[Mbps]. */ + ADF_TL_COUNTER(BW_OUT_NAME, ADF_TL_COUNTER_MBPS, + ADF_GEN4_TL_RP_REG_OFF(reg_tl_bw_out)), + /* Message descriptor DevTLB hit rate. */ + ADF_TL_COUNTER(AT_GLOB_DTLB_HIT_NAME, ADF_TL_SIMPLE_COUNT, + ADF_GEN4_TL_RP_REG_OFF(reg_tl_at_glob_devtlb_hit)), + /* Message descriptor DevTLB miss rate. */ + ADF_TL_COUNTER(AT_GLOB_DTLB_MISS_NAME, ADF_TL_SIMPLE_COUNT, + ADF_GEN4_TL_RP_REG_OFF(reg_tl_at_glob_devtlb_miss)), + /* Payload DevTLB hit rate. */ + ADF_TL_COUNTER(AT_PAYLD_DTLB_HIT_NAME, ADF_TL_SIMPLE_COUNT, + ADF_GEN4_TL_RP_REG_OFF(reg_tl_at_payld_devtlb_hit)), + /* Payload DevTLB miss rate. */ + ADF_TL_COUNTER(AT_PAYLD_DTLB_MISS_NAME, ADF_TL_SIMPLE_COUNT, + ADF_GEN4_TL_RP_REG_OFF(reg_tl_at_payld_devtlb_miss)), +}; + void adf_gen4_init_tl_data(struct adf_tl_hw_data *tl_data) { tl_data->layout_sz = ADF_GEN4_TL_LAYOUT_SZ; tl_data->slice_reg_sz = ADF_GEN4_TL_SLICE_REG_SZ; + tl_data->rp_reg_sz = ADF_GEN4_TL_RP_REG_SZ; tl_data->num_hbuff = ADF_GEN4_TL_NUM_HIST_BUFFS; + tl_data->max_rp = ADF_GEN4_TL_MAX_RP_NUM; tl_data->msg_cnt_off = ADF_GEN4_TL_MSG_CNT_OFF; tl_data->cpp_ns_per_cycle = ADF_GEN4_CPP_NS_PER_CYCLE; tl_data->bw_units_to_bytes = ADF_GEN4_TL_BW_HW_UNITS_TO_BYTES; @@ -114,5 +147,7 @@ void adf_gen4_init_tl_data(struct adf_tl_hw_data *tl_data) tl_data->num_dev_counters = ARRAY_SIZE(dev_counters); tl_data->sl_util_counters = sl_util_counters; tl_data->sl_exec_counters = sl_exec_counters; + tl_data->rp_counters = rp_counters; + tl_data->num_rp_counters = ARRAY_SIZE(rp_counters); } EXPORT_SYMBOL_GPL(adf_gen4_init_tl_data); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h index feb2eecf24cf..32df4163beb9 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h @@ -21,6 +21,9 @@ struct adf_tl_hw_data; /* Max number of HW resources of one type. */ #define ADF_GEN4_TL_MAX_SLICES_PER_TYPE 24 +/* Max number of simultaneously monitored ring pairs. */ +#define ADF_GEN4_TL_MAX_RP_NUM 4 + /** * struct adf_gen4_tl_slice_data_regs - HW slice data as populated by FW. * @reg_tm_slice_exec_cnt: Slice execution count. @@ -92,18 +95,52 @@ struct adf_gen4_tl_device_data_regs { struct adf_gen4_tl_slice_data_regs wcp_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; }; +/** + * struct adf_gen4_tl_ring_pair_data_regs - This structure stores Ring Pair + * telemetry counter values as are being populated periodically by device. + * @reg_tl_gp_lat_acc: get-put latency accumulator + * @reserved: reserved + * @reg_tl_pci_trans_cnt: PCIe partial transactions + * @reg_tl_ae_put_cnt: Accelerator Engine put counts across all rings + * @reg_tl_bw_in: PCIe write bandwidth + * @reg_tl_bw_out: PCIe read bandwidth + * @reg_tl_at_glob_devtlb_hit: Message descriptor DevTLB hit rate + * @reg_tl_at_glob_devtlb_miss: Message descriptor DevTLB miss rate + * @reg_tl_at_payld_devtlb_hit: Payload DevTLB hit rate + * @reg_tl_at_payld_devtlb_miss: Payload DevTLB miss rate + * @reg_tl_re_cnt: ring empty time samples count + * @reserved1: reserved + */ +struct adf_gen4_tl_ring_pair_data_regs { + __u64 reg_tl_gp_lat_acc; + __u64 reserved; + __u32 reg_tl_pci_trans_cnt; + __u32 reg_tl_ae_put_cnt; + __u32 reg_tl_bw_in; + __u32 reg_tl_bw_out; + __u32 reg_tl_at_glob_devtlb_hit; + __u32 reg_tl_at_glob_devtlb_miss; + __u32 reg_tl_at_payld_devtlb_hit; + __u32 reg_tl_at_payld_devtlb_miss; + __u32 reg_tl_re_cnt; + __u32 reserved1; +}; + +#define ADF_GEN4_TL_RP_REG_SZ sizeof(struct adf_gen4_tl_ring_pair_data_regs) + /** * struct adf_gen4_tl_layout - This structure represents entire telemetry * counters data: Device + 4 Ring Pairs as are being populated periodically * by device. * @tl_device_data_regs: structure of device telemetry registers - * @reserved1: reserved + * @tl_ring_pairs_data_regs: array of ring pairs telemetry registers * @reg_tl_msg_cnt: telemetry messages counter * @reserved: reserved */ struct adf_gen4_tl_layout { struct adf_gen4_tl_device_data_regs tl_device_data_regs; - __u32 reserved1[14]; + struct adf_gen4_tl_ring_pair_data_regs + tl_ring_pairs_data_regs[ADF_GEN4_TL_MAX_RP_NUM]; __u32 reg_tl_msg_cnt; __u32 reserved; }; diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c index 05c476d58895..2ff714d11bd2 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c +++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c @@ -33,7 +33,9 @@ static int validate_tl_data(struct adf_tl_hw_data *tl_data) if (!tl_data->dev_counters || TL_IS_ZERO(tl_data->num_dev_counters) || !tl_data->sl_util_counters || - !tl_data->sl_exec_counters) + !tl_data->sl_exec_counters || + !tl_data->rp_counters || + TL_IS_ZERO(tl_data->num_rp_counters)) return -EOPNOTSUPP; return 0; @@ -53,11 +55,17 @@ static int adf_tl_alloc_mem(struct adf_accel_dev *accel_dev) if (!telemetry) return -ENOMEM; + telemetry->rp_num_indexes = kmalloc_array(tl_data->max_rp, + sizeof(*telemetry->rp_num_indexes), + GFP_KERNEL); + if (!telemetry->rp_num_indexes) + goto err_free_tl; + telemetry->regs_hist_buff = kmalloc_array(tl_data->num_hbuff, sizeof(*telemetry->regs_hist_buff), GFP_KERNEL); if (!telemetry->regs_hist_buff) - goto err_free_tl; + goto err_free_rp_indexes; telemetry->regs_data = dma_alloc_coherent(dev, regs_sz, &telemetry->regs_data_p, @@ -86,6 +94,8 @@ static int adf_tl_alloc_mem(struct adf_accel_dev *accel_dev) err_free_regs_hist_buff: kfree(telemetry->regs_hist_buff); +err_free_rp_indexes: + kfree(telemetry->rp_num_indexes); err_free_tl: kfree(telemetry); @@ -107,6 +117,7 @@ static void adf_tl_free_mem(struct adf_accel_dev *accel_dev) telemetry->regs_data_p); kfree(telemetry->regs_hist_buff); + kfree(telemetry->rp_num_indexes); kfree(telemetry); accel_dev->telemetry = NULL; } @@ -196,7 +207,8 @@ int adf_tl_run(struct adf_accel_dev *accel_dev, int state) int ret; ret = adf_send_admin_tl_start(accel_dev, telemetry->regs_data_p, - layout_sz, NULL, &telemetry->slice_cnt); + layout_sz, telemetry->rp_num_indexes, + &telemetry->slice_cnt); if (ret) { dev_err(dev, "failed to start telemetry\n"); return ret; @@ -213,8 +225,10 @@ int adf_tl_run(struct adf_accel_dev *accel_dev, int state) int adf_tl_init(struct adf_accel_dev *accel_dev) { struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); + u8 max_rp = GET_TL_DATA(accel_dev).max_rp; struct device *dev = &GET_DEV(accel_dev); struct adf_telemetry *telemetry; + unsigned int i; int ret; ret = validate_tl_data(tl_data); @@ -234,6 +248,9 @@ int adf_tl_init(struct adf_accel_dev *accel_dev) mutex_init(&telemetry->regs_hist_lock); INIT_DELAYED_WORK(&telemetry->work_ctx, tl_work_handler); + for (i = 0; i < max_rp; i++) + telemetry->rp_num_indexes[i] = ADF_TL_RP_REGS_DISABLED; + return 0; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h index 08de17621467..9be81cd3b886 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h +++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h @@ -23,17 +23,23 @@ struct dentry; /* Interval within timer interrupt should be handled. Value in milliseconds. */ #define ADF_TL_TIMER_INT_MS (ADF_TL_DATA_WR_INTERVAL_MS / 2) +#define ADF_TL_RP_REGS_DISABLED (0xff) + struct adf_tl_hw_data { size_t layout_sz; size_t slice_reg_sz; + size_t rp_reg_sz; size_t msg_cnt_off; const struct adf_tl_dbg_counter *dev_counters; const struct adf_tl_dbg_counter *sl_util_counters; const struct adf_tl_dbg_counter *sl_exec_counters; + const struct adf_tl_dbg_counter *rp_counters; u8 num_hbuff; u8 cpp_ns_per_cycle; u8 bw_units_to_bytes; u8 num_dev_counters; + u8 num_rp_counters; + u8 max_rp; }; struct adf_telemetry { @@ -50,6 +56,7 @@ struct adf_telemetry { */ void **regs_hist_buff; struct dentry *dbg_dir; + u8 *rp_num_indexes; /** * @regs_hist_lock: protects from race conditions between write and read * to the copies referenced by @regs_hist_buff diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c index accb46d6ea3c..c8241f5a0a26 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -14,11 +15,13 @@ #include #include "adf_accel_devices.h" +#include "adf_cfg_strings.h" #include "adf_telemetry.h" #include "adf_tl_debugfs.h" #define TL_VALUE_MIN_PADDING 20 #define TL_KEY_MIN_PADDING 23 +#define TL_RP_SRV_UNKNOWN "Unknown" static int tl_collect_values_u32(struct adf_telemetry *telemetry, size_t counter_offset, u64 *arr) @@ -470,11 +473,210 @@ static ssize_t tl_control_write(struct file *file, const char __user *userbuf, } DEFINE_SHOW_STORE_ATTRIBUTE(tl_control); +static int get_rp_index_from_file(const struct file *f, u8 *rp_id, u8 rp_num) +{ + char alpha; + u8 index; + int ret; + + ret = sscanf(f->f_path.dentry->d_name.name, ADF_TL_RP_REGS_FNAME, &alpha); + if (ret != 1) + return -EINVAL; + + index = ADF_TL_DBG_RP_INDEX_ALPHA(alpha); + *rp_id = index; + + return 0; +} + +static int adf_tl_dbg_change_rp_index(struct adf_accel_dev *accel_dev, + unsigned int new_rp_num, + unsigned int rp_regs_index) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + struct adf_telemetry *telemetry = accel_dev->telemetry; + struct device *dev = &GET_DEV(accel_dev); + unsigned int i; + u8 curr_state; + int ret; + + if (new_rp_num >= hw_data->num_rps) { + dev_info(dev, "invalid Ring Pair number selected\n"); + return -EINVAL; + } + + for (i = 0; i < hw_data->tl_data.max_rp; i++) { + if (telemetry->rp_num_indexes[i] == new_rp_num) { + dev_info(dev, "RP nr: %d is already selected in slot rp_%c_data\n", + new_rp_num, ADF_TL_DBG_RP_ALPHA_INDEX(i)); + return 0; + } + } + + dev_dbg(dev, "selecting RP nr %u into slot rp_%c_data\n", + new_rp_num, ADF_TL_DBG_RP_ALPHA_INDEX(rp_regs_index)); + + curr_state = atomic_read(&telemetry->state); + + if (curr_state) { + ret = adf_tl_halt(accel_dev); + if (ret) + return ret; + + telemetry->rp_num_indexes[rp_regs_index] = new_rp_num; + + ret = adf_tl_run(accel_dev, curr_state); + if (ret) + return ret; + } else { + telemetry->rp_num_indexes[rp_regs_index] = new_rp_num; + } + + return 0; +} + +static void tl_print_rp_srv(struct adf_accel_dev *accel_dev, struct seq_file *s, + u8 rp_idx) +{ + u32 banks_per_vf = GET_HW_DATA(accel_dev)->num_banks_per_vf; + enum adf_cfg_service_type svc; + + seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, RP_SERVICE_TYPE); + + svc = GET_SRV_TYPE(accel_dev, rp_idx % banks_per_vf); + switch (svc) { + case COMP: + seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_DC); + break; + case SYM: + seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_SYM); + break; + case ASYM: + seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_ASYM); + break; + default: + seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, TL_RP_SRV_UNKNOWN); + break; + } +} + +static int tl_print_rp_data(struct adf_accel_dev *accel_dev, struct seq_file *s, + u8 rp_regs_index) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); + struct adf_telemetry *telemetry = accel_dev->telemetry; + const struct adf_tl_dbg_counter *rp_tl_counters; + u8 num_rp_counters = tl_data->num_rp_counters; + size_t rp_regs_sz = tl_data->rp_reg_sz; + struct adf_tl_dbg_counter ctr; + unsigned int i; + u8 rp_idx; + int ret; + + if (!atomic_read(&telemetry->state)) { + dev_info(&GET_DEV(accel_dev), "not enabled\n"); + return -EPERM; + } + + rp_tl_counters = tl_data->rp_counters; + rp_idx = telemetry->rp_num_indexes[rp_regs_index]; + + if (rp_idx == ADF_TL_RP_REGS_DISABLED) { + dev_info(&GET_DEV(accel_dev), "no RP number selected in rp_%c_data\n", + ADF_TL_DBG_RP_ALPHA_INDEX(rp_regs_index)); + return -EPERM; + } + + tl_print_msg_cnt(s, telemetry->msg_cnt); + seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, RP_NUM_INDEX); + seq_printf(s, "%*d\n", TL_VALUE_MIN_PADDING, rp_idx); + tl_print_rp_srv(accel_dev, s, rp_idx); + + for (i = 0; i < num_rp_counters; i++) { + ctr = rp_tl_counters[i]; + ctr.offset1 += rp_regs_sz * rp_regs_index; + ctr.offset2 += rp_regs_sz * rp_regs_index; + ret = tl_calc_and_print_counter(telemetry, s, &ctr, NULL); + if (ret) { + dev_dbg(&GET_DEV(accel_dev), + "invalid RP counter type\n"); + return ret; + } + } + + return 0; +} + +static int tl_rp_data_show(struct seq_file *s, void *unused) +{ + struct adf_accel_dev *accel_dev = s->private; + u8 rp_regs_index; + u8 max_rp; + int ret; + + if (!accel_dev) + return -EINVAL; + + max_rp = GET_TL_DATA(accel_dev).max_rp; + ret = get_rp_index_from_file(s->file, &rp_regs_index, max_rp); + if (ret) { + dev_dbg(&GET_DEV(accel_dev), "invalid RP data file name\n"); + return ret; + } + + return tl_print_rp_data(accel_dev, s, rp_regs_index); +} + +static ssize_t tl_rp_data_write(struct file *file, const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct seq_file *seq_f = file->private_data; + struct adf_accel_dev *accel_dev; + struct adf_telemetry *telemetry; + unsigned int new_rp_num; + u8 rp_regs_index; + u8 max_rp; + int ret; + + accel_dev = seq_f->private; + if (!accel_dev) + return -EINVAL; + + telemetry = accel_dev->telemetry; + max_rp = GET_TL_DATA(accel_dev).max_rp; + + mutex_lock(&telemetry->wr_lock); + + ret = get_rp_index_from_file(file, &rp_regs_index, max_rp); + if (ret) { + dev_dbg(&GET_DEV(accel_dev), "invalid RP data file name\n"); + goto unlock_and_exit; + } + + ret = kstrtou32_from_user(userbuf, count, 10, &new_rp_num); + if (ret) + goto unlock_and_exit; + + ret = adf_tl_dbg_change_rp_index(accel_dev, new_rp_num, rp_regs_index); + if (ret) + goto unlock_and_exit; + + ret = count; + +unlock_and_exit: + mutex_unlock(&telemetry->wr_lock); + return ret; +} +DEFINE_SHOW_STORE_ATTRIBUTE(tl_rp_data); + void adf_tl_dbgfs_add(struct adf_accel_dev *accel_dev) { struct adf_telemetry *telemetry = accel_dev->telemetry; struct dentry *parent = accel_dev->debugfs_dir; + u8 max_rp = GET_TL_DATA(accel_dev).max_rp; + char name[ADF_TL_RP_REGS_FNAME_SIZE]; struct dentry *dir; + unsigned int i; if (!telemetry) return; @@ -483,6 +685,12 @@ void adf_tl_dbgfs_add(struct adf_accel_dev *accel_dev) accel_dev->telemetry->dbg_dir = dir; debugfs_create_file("device_data", 0444, dir, accel_dev, &tl_dev_data_fops); debugfs_create_file("control", 0644, dir, accel_dev, &tl_control_fops); + + for (i = 0; i < max_rp; i++) { + snprintf(name, sizeof(name), ADF_TL_RP_REGS_FNAME, + ADF_TL_DBG_RP_ALPHA_INDEX(i)); + debugfs_create_file(name, 0644, dir, accel_dev, &tl_rp_data_fops); + } } void adf_tl_dbgfs_rm(struct adf_accel_dev *accel_dev) diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h index b2e8f1912c16..11cc9eae19b3 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h +++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h @@ -24,6 +24,13 @@ struct adf_accel_dev; #define AT_GLOB_DTLB_MISS_NAME "at_glob_devtlb_miss" #define AT_PAYLD_DTLB_HIT_NAME "tl_at_payld_devtlb_hit" #define AT_PAYLD_DTLB_MISS_NAME "tl_at_payld_devtlb_miss" +#define RP_SERVICE_TYPE "service_type" + +#define ADF_TL_DBG_RP_ALPHA_INDEX(index) ((index) + 'A') +#define ADF_TL_DBG_RP_INDEX_ALPHA(alpha) ((alpha) - 'A') + +#define ADF_TL_RP_REGS_FNAME "rp_%c_data" +#define ADF_TL_RP_REGS_FNAME_SIZE 16 #define ADF_TL_DATA_REG_OFF(reg, qat_gen) \ offsetof(struct adf_##qat_gen##_tl_layout, reg) @@ -36,6 +43,10 @@ struct adf_accel_dev; (ADF_TL_DEV_REG_OFF(slice##_slices[0], qat_gen) + \ offsetof(struct adf_##qat_gen##_tl_slice_data_regs, reg)) +#define ADF_TL_RP_REG_OFF(reg, qat_gen) \ + (ADF_TL_DATA_REG_OFF(tl_ring_pairs_data_regs[0], qat_gen) + \ + offsetof(struct adf_##qat_gen##_tl_ring_pair_data_regs, reg)) + /** * enum adf_tl_counter_type - telemetry counter types * @ADF_TL_COUNTER_UNSUPPORTED: unsupported counter -- Gitee From 73f594d0b99ae83f53cf2964f2633269ba6e71ae Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 22 Dec 2023 14:15:35 +0100 Subject: [PATCH 0736/2138] crypto: qat - generate dynamically arbiter mappings ANBZ: #8589 commit 5da6a2d5353e0e234f12ccacaf6f50656cc33278 upstream. Intel-SIG: commit 5da6a2d5353e crypto: qat - generate dynamically arbiter mappings Backport to support Intel QAT in-tree driver The thread-to-arbiter mapping describes which arbiter can assign jobs to an acceleration engine thread. The existing mappings are functionally correct, but hardcoded and not optimized. Replace the static mappings with an algorithm that generates optimal mappings, based on the loaded configuration. The logic has been made common so that it can be shared between all QAT GEN4 devices. Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../intel/qat/qat_420xx/adf_420xx_hw_data.c | 131 +++++++----------- .../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 110 ++++++++++----- .../intel/qat/qat_common/adf_accel_devices.h | 4 + .../intel/qat/qat_common/adf_gen4_hw_data.c | 90 ++++++++++++ .../intel/qat/qat_common/adf_gen4_hw_data.h | 12 ++ 5 files changed, 235 insertions(+), 112 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c index 5edce27db864..a87d29ae724f 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -25,6 +25,10 @@ #define ADF_AE_GROUP_3 GENMASK(15, 12) #define ADF_AE_GROUP_4 BIT(16) +#define ENA_THD_MASK_ASYM GENMASK(1, 0) +#define ENA_THD_MASK_SYM GENMASK(3, 0) +#define ENA_THD_MASK_DC GENMASK(1, 0) + static const char * const adf_420xx_fw_objs[] = { [ADF_FW_SYM_OBJ] = ADF_420XX_SYM_OBJ, [ADF_FW_ASYM_OBJ] = ADF_420XX_ASYM_OBJ, @@ -83,62 +87,6 @@ static const struct adf_fw_config adf_fw_dcc_config[] = { {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, }; -/* Worker thread to service arbiter mappings */ -static const u32 default_thrd_to_arb_map[ADF_420XX_MAX_ACCELENGINES] = { - 0x00000055, 0x00000055, 0x00000055, 0x00000055, - 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, - 0x00000055, 0x00000055, 0x00000055, 0x00000055, - 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, - 0x0 -}; - -static const u32 thrd_to_arb_map_asym[ADF_420XX_MAX_ACCELENGINES] = { - 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, - 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, - 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, - 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, - 0x0 -}; - -static const u32 thrd_to_arb_map_sym[ADF_420XX_MAX_ACCELENGINES] = { - 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, - 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, - 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, - 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, - 0x0 -}; - -static const u32 thrd_to_arb_map_asym_dc[ADF_420XX_MAX_ACCELENGINES] = { - 0x00000055, 0x00000055, 0x00000055, 0x00000055, - 0x000000AA, 0x000000AA, 0x000000AA, 0x000000AA, - 0x000000AA, 0x000000AA, 0x000000AA, 0x000000AA, - 0x000000AA, 0x000000AA, 0x000000AA, 0x000000AA, - 0x0 -}; - -static const u32 thrd_to_arb_map_sym_dc[ADF_420XX_MAX_ACCELENGINES] = { - 0x00000055, 0x00000055, 0x00000055, 0x00000055, - 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, - 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, 0x0000AAAA, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x0 -}; - -static const u32 thrd_to_arb_map_dc[ADF_420XX_MAX_ACCELENGINES] = { - 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, - 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x0 -}; - -static const u32 thrd_to_arb_map_dcc[ADF_420XX_MAX_ACCELENGINES] = { - 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x0 -}; static struct adf_hw_device_class adf_420xx_class = { .name = ADF_420XX_DEVICE_NAME, @@ -346,24 +294,11 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev) { - switch (adf_get_service_enabled(accel_dev)) { - case SVC_ASYM: - return thrd_to_arb_map_asym; - case SVC_SYM: - return thrd_to_arb_map_sym; - case SVC_DC: - return thrd_to_arb_map_dc; - case SVC_DCC: - return thrd_to_arb_map_dcc; - case SVC_ASYM_DC: - case SVC_DC_ASYM: - return thrd_to_arb_map_asym_dc; - case SVC_DC_SYM: - case SVC_SYM_DC: - return thrd_to_arb_map_sym_dc; - default: - return default_thrd_to_arb_map; - } + if (adf_gen4_init_thd2arb_map(accel_dev)) + dev_warn(&GET_DEV(accel_dev), + "Generate of the thread to arbiter map failed"); + + return GET_HW_DATA(accel_dev)->thd_to_arb_map; } static void adf_init_rl_data(struct adf_rl_hw_data *rl_data) @@ -384,11 +319,47 @@ static void adf_init_rl_data(struct adf_rl_hw_data *rl_data) rl_data->scale_ref = ADF_420XX_RL_SLICE_REF; } -enum adf_rp_groups { - RP_GROUP_0 = 0, - RP_GROUP_1, - RP_GROUP_COUNT -}; +static int get_rp_group(struct adf_accel_dev *accel_dev, u32 ae_mask) +{ + switch (ae_mask) { + case ADF_AE_GROUP_0: + return RP_GROUP_0; + case ADF_AE_GROUP_1: + case ADF_AE_GROUP_3: + return RP_GROUP_1; + case ADF_AE_GROUP_2: + if (get_fw_config(accel_dev) == adf_fw_cy_config) + return RP_GROUP_0; + else + return RP_GROUP_1; + default: + dev_dbg(&GET_DEV(accel_dev), "ae_mask not recognized"); + return -EINVAL; + } +} + +static u32 get_ena_thd_mask(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + const struct adf_fw_config *fw_config; + + if (obj_num >= uof_get_num_objs(accel_dev)) + return ADF_GEN4_ENA_THD_MASK_ERROR; + + fw_config = get_fw_config(accel_dev); + if (!fw_config) + return ADF_GEN4_ENA_THD_MASK_ERROR; + + switch (fw_config[obj_num].obj) { + case ADF_FW_ASYM_OBJ: + return ENA_THD_MASK_ASYM; + case ADF_FW_SYM_OBJ: + return ENA_THD_MASK_SYM; + case ADF_FW_DC_OBJ: + return ENA_THD_MASK_DC; + default: + return ADF_GEN4_ENA_THD_MASK_ERROR; + } +} static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev) { @@ -526,6 +497,8 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->uof_get_name = uof_get_name_420xx; hw_data->uof_get_num_objs = uof_get_num_objs; hw_data->uof_get_ae_mask = uof_get_ae_mask; + hw_data->get_rp_group = get_rp_group; + hw_data->get_ena_thd_mask = get_ena_thd_mask; hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable; hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer; hw_data->get_ring_to_svc_map = get_ring_to_svc_map; diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index 548b66ec771f..a6b5b2d8f96f 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -23,6 +23,11 @@ #define ADF_AE_GROUP_1 GENMASK(7, 4) #define ADF_AE_GROUP_2 BIT(8) +#define ENA_THD_MASK_ASYM GENMASK(1, 0) +#define ENA_THD_MASK_ASYM_401XX GENMASK(5, 0) +#define ENA_THD_MASK_SYM GENMASK(6, 0) +#define ENA_THD_MASK_DC GENMASK(1, 0) + static const char * const adf_4xxx_fw_objs[] = { [ADF_FW_SYM_OBJ] = ADF_4XXX_SYM_OBJ, [ADF_FW_ASYM_OBJ] = ADF_4XXX_ASYM_OBJ, @@ -86,25 +91,6 @@ static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_dc_config)) static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_dc_config)); static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dcc_config)); -/* Worker thread to service arbiter mappings */ -static const u32 default_thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = { - 0x5555555, 0x5555555, 0x5555555, 0x5555555, - 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, - 0x0 -}; - -static const u32 thrd_to_arb_map_dc[ADF_4XXX_MAX_ACCELENGINES] = { - 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, - 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, - 0x0 -}; - -static const u32 thrd_to_arb_map_dcc[ADF_4XXX_MAX_ACCELENGINES] = { - 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, - 0x0 -}; - static struct adf_hw_device_class adf_4xxx_class = { .name = ADF_4XXX_DEVICE_NAME, .type = DEV_4XXX, @@ -220,14 +206,11 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev) { - switch (adf_get_service_enabled(accel_dev)) { - case SVC_DC: - return thrd_to_arb_map_dc; - case SVC_DCC: - return thrd_to_arb_map_dcc; - default: - return default_thrd_to_arb_map; - } + if (adf_gen4_init_thd2arb_map(accel_dev)) + dev_warn(&GET_DEV(accel_dev), + "Generate of the thread to arbiter map failed"); + + return GET_HW_DATA(accel_dev)->thd_to_arb_map; } static void adf_init_rl_data(struct adf_rl_hw_data *rl_data) @@ -278,11 +261,64 @@ static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev } } -enum adf_rp_groups { - RP_GROUP_0 = 0, - RP_GROUP_1, - RP_GROUP_COUNT -}; +static int get_rp_group(struct adf_accel_dev *accel_dev, u32 ae_mask) +{ + switch (ae_mask) { + case ADF_AE_GROUP_0: + return RP_GROUP_0; + case ADF_AE_GROUP_1: + return RP_GROUP_1; + default: + dev_dbg(&GET_DEV(accel_dev), "ae_mask not recognized"); + return -EINVAL; + } +} + +static u32 get_ena_thd_mask(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + const struct adf_fw_config *fw_config; + + if (obj_num >= uof_get_num_objs(accel_dev)) + return ADF_GEN4_ENA_THD_MASK_ERROR; + + fw_config = get_fw_config(accel_dev); + if (!fw_config) + return ADF_GEN4_ENA_THD_MASK_ERROR; + + switch (fw_config[obj_num].obj) { + case ADF_FW_ASYM_OBJ: + return ENA_THD_MASK_ASYM; + case ADF_FW_SYM_OBJ: + return ENA_THD_MASK_SYM; + case ADF_FW_DC_OBJ: + return ENA_THD_MASK_DC; + default: + return ADF_GEN4_ENA_THD_MASK_ERROR; + } +} + +static u32 get_ena_thd_mask_401xx(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + const struct adf_fw_config *fw_config; + + if (obj_num >= uof_get_num_objs(accel_dev)) + return ADF_GEN4_ENA_THD_MASK_ERROR; + + fw_config = get_fw_config(accel_dev); + if (!fw_config) + return ADF_GEN4_ENA_THD_MASK_ERROR; + + switch (fw_config[obj_num].obj) { + case ADF_FW_ASYM_OBJ: + return ENA_THD_MASK_ASYM_401XX; + case ADF_FW_SYM_OBJ: + return ENA_THD_MASK_SYM; + case ADF_FW_DC_OBJ: + return ENA_THD_MASK_DC; + default: + return ADF_GEN4_ENA_THD_MASK_ERROR; + } +} static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev) { @@ -436,14 +472,22 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->fw_mmp_name = ADF_402XX_MMP; hw_data->uof_get_name = uof_get_name_402xx; break; - + case ADF_401XX_PCI_DEVICE_ID: + hw_data->fw_name = ADF_4XXX_FW; + hw_data->fw_mmp_name = ADF_4XXX_MMP; + hw_data->uof_get_name = uof_get_name_4xxx; + hw_data->get_ena_thd_mask = get_ena_thd_mask_401xx; + break; default: hw_data->fw_name = ADF_4XXX_FW; hw_data->fw_mmp_name = ADF_4XXX_MMP; hw_data->uof_get_name = uof_get_name_4xxx; + hw_data->get_ena_thd_mask = get_ena_thd_mask; + break; } hw_data->uof_get_num_objs = uof_get_num_objs; hw_data->uof_get_ae_mask = uof_get_ae_mask; + hw_data->get_rp_group = get_rp_group; hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable; hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer; hw_data->get_ring_to_svc_map = get_ring_to_svc_map; diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index db671879b1f8..a16c7e6edc65 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -13,6 +13,7 @@ #include "adf_rl.h" #include "adf_telemetry.h" #include "adf_pfvf_msg.h" +#include "icp_qat_hw.h" #define ADF_DH895XCC_DEVICE_NAME "dh895xcc" #define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf" @@ -248,6 +249,8 @@ struct adf_hw_device_data { const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num); u32 (*uof_get_num_objs)(struct adf_accel_dev *accel_dev); u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num); + int (*get_rp_group)(struct adf_accel_dev *accel_dev, u32 ae_mask); + u32 (*get_ena_thd_mask)(struct adf_accel_dev *accel_dev, u32 obj_num); int (*dev_config)(struct adf_accel_dev *accel_dev); struct adf_pfvf_ops pfvf_ops; struct adf_hw_csr_ops csr_ops; @@ -270,6 +273,7 @@ struct adf_hw_device_data { u32 admin_ae_mask; u16 tx_rings_mask; u16 ring_to_svc_map; + u32 thd_to_arb_map[ICP_QAT_HW_AE_DELIMITER]; u8 tx_rx_gap; u8 num_banks; u16 num_banks_per_vf; diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c index ee08b34876dd..9985683056d5 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c @@ -2,6 +2,7 @@ /* Copyright(c) 2020 Intel Corporation */ #include #include "adf_accel_devices.h" +#include "adf_cfg_services.h" #include "adf_common_drv.h" #include "adf_gen4_hw_data.h" #include "adf_gen4_pm.h" @@ -340,3 +341,92 @@ int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number) return ret; } EXPORT_SYMBOL_GPL(adf_gen4_ring_pair_reset); + +static const u32 thrd_to_arb_map_dcc[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0 +}; + +static const u16 rp_group_to_arb_mask[] = { + [RP_GROUP_0] = 0x5, + [RP_GROUP_1] = 0xA, +}; + +static bool is_single_service(int service_id) +{ + switch (service_id) { + case SVC_DC: + case SVC_SYM: + case SVC_ASYM: + return true; + case SVC_CY: + case SVC_CY2: + case SVC_DCC: + case SVC_ASYM_DC: + case SVC_DC_ASYM: + case SVC_SYM_DC: + case SVC_DC_SYM: + default: + return false; + } +} + +int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + u32 *thd2arb_map = hw_data->thd_to_arb_map; + unsigned int ae_cnt, worker_obj_cnt, i, j; + unsigned long ae_mask, thds_mask; + int srv_id, rp_group; + u32 thd2arb_map_base; + u16 arb_mask; + + if (!hw_data->get_rp_group || !hw_data->get_ena_thd_mask || + !hw_data->get_num_aes || !hw_data->uof_get_num_objs || + !hw_data->uof_get_ae_mask) + return -EFAULT; + + srv_id = adf_get_service_enabled(accel_dev); + if (srv_id < 0) + return srv_id; + + ae_cnt = hw_data->get_num_aes(hw_data); + worker_obj_cnt = hw_data->uof_get_num_objs(accel_dev) - + ADF_GEN4_ADMIN_ACCELENGINES; + + if (srv_id == SVC_DCC) { + memcpy(thd2arb_map, thrd_to_arb_map_dcc, + array_size(sizeof(*thd2arb_map), ae_cnt)); + return 0; + } + + for (i = 0; i < worker_obj_cnt; i++) { + ae_mask = hw_data->uof_get_ae_mask(accel_dev, i); + rp_group = hw_data->get_rp_group(accel_dev, ae_mask); + thds_mask = hw_data->get_ena_thd_mask(accel_dev, i); + thd2arb_map_base = 0; + + if (rp_group >= RP_GROUP_COUNT || rp_group < RP_GROUP_0) + return -EINVAL; + + if (thds_mask == ADF_GEN4_ENA_THD_MASK_ERROR) + return -EINVAL; + + if (is_single_service(srv_id)) + arb_mask = rp_group_to_arb_mask[RP_GROUP_0] | + rp_group_to_arb_mask[RP_GROUP_1]; + else + arb_mask = rp_group_to_arb_mask[rp_group]; + + for_each_set_bit(j, &thds_mask, ADF_NUM_THREADS_PER_AE) + thd2arb_map_base |= arb_mask << (j * 4); + + for_each_set_bit(j, &ae_mask, ae_cnt) + thd2arb_map[j] = thd2arb_map_base; + } + return 0; +} +EXPORT_SYMBOL_GPL(adf_gen4_init_thd2arb_map); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h index 46a782ba456f..7d8a774cadc8 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h @@ -28,6 +28,7 @@ /* Accelerators */ #define ADF_GEN4_ACCELERATORS_MASK 0x1 #define ADF_GEN4_MAX_ACCELERATORS 1 +#define ADF_GEN4_ADMIN_ACCELENGINES 1 /* MSIX interrupt */ #define ADF_GEN4_SMIAPF_RP_X0_MASK_OFFSET 0x41A040 @@ -193,6 +194,9 @@ do { \ #define ADF_GEN4_RL_TOKEN_PCIEIN_BUCKET_OFFSET 0x508800 #define ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET 0x508804 +/* Arbiter threads mask with error value */ +#define ADF_GEN4_ENA_THD_MASK_ERROR GENMASK(ADF_NUM_THREADS_PER_AE, 0) + void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); enum icp_qat_gen4_slice_mask { @@ -207,6 +211,12 @@ enum icp_qat_gen4_slice_mask { ICP_ACCEL_GEN4_MASK_ZUC_256_SLICE = BIT(9), }; +enum adf_gen4_rp_groups { + RP_GROUP_0, + RP_GROUP_1, + RP_GROUP_COUNT +}; + void adf_gen4_enable_error_correction(struct adf_accel_dev *accel_dev); void adf_gen4_enable_ints(struct adf_accel_dev *accel_dev); u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self); @@ -224,4 +234,6 @@ void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number); void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev); void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); +int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev); + #endif -- Gitee From 07bef9e961218cf24782bb0398e21e52d3d46f38 Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 19 Jan 2024 17:12:38 +0100 Subject: [PATCH 0737/2138] crypto: qat - fix arbiter mapping generation algorithm for QAT 402xx ANBZ: #8589 commit e1d54d153fc3e697b841999df7cbad51492def8e upstream. Intel-SIG: commit e1d54d153fc3 crypto: qat - fix arbiter mapping generation algorithm for QAT 402xx Backport to support Intel QAT in-tree driver The commit "crypto: qat - generate dynamically arbiter mappings" introduced a regression on qat_402xx devices. This is reported when the driver probes the device, as indicated by the following error messages: 4xxx 0000:0b:00.0: enabling device (0140 -> 0142) 4xxx 0000:0b:00.0: Generate of the thread to arbiter map failed 4xxx 0000:0b:00.0: Direct firmware load for qat_402xx_mmp.bin failed with error -2 The root cause of this issue was the omission of a necessary function pointer required by the mapping algorithm during the implementation. Fix it by adding the missing function pointer. Fixes: 5da6a2d5353e ("crypto: qat - generate dynamically arbiter mappings") Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index a6b5b2d8f96f..824cd7186320 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -471,6 +471,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->fw_name = ADF_402XX_FW; hw_data->fw_mmp_name = ADF_402XX_MMP; hw_data->uof_get_name = uof_get_name_402xx; + hw_data->get_ena_thd_mask = get_ena_thd_mask; break; case ADF_401XX_PCI_DEVICE_ID: hw_data->fw_name = ADF_4XXX_FW; -- Gitee From 98f85a81d41a5d21b68978399f92ad66f05b2bd6 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 3 Jan 2024 17:26:02 +0100 Subject: [PATCH 0738/2138] crypto: qat - avoid memcpy() overflow warning ANBZ: #8589 commit 23a22e831ed4e6aa0831312e8cc8b7c60a657f60 upstream. Intel-SIG: commit 23a22e831ed4 crypto: qat - avoid memcpy() overflow warning Backport to support Intel QAT in-tree driver The use of array_size() leads gcc to assume the memcpy() can have a larger limit than actually possible, which triggers a string fortification warning: In file included from include/linux/string.h:296, from include/linux/bitmap.h:12, from include/linux/cpumask.h:12, from include/linux/sched.h:16, from include/linux/delay.h:23, from include/linux/iopoll.h:12, from drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c:3: In function 'fortify_memcpy_chk', inlined from 'adf_gen4_init_thd2arb_map' at drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c:401:3: include/linux/fortify-string.h:579:4: error: call to '__write_overflow_field' declared with attribute warning: detected write beyond size of field (1st parameter); maybe use struct_group()? [-Werror=attribute-warning] 579 | __write_overflow_field(p_size_field, size); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ include/linux/fortify-string.h:588:4: error: call to '__read_overflow2_field' declared with attribute warning: detected read beyond size of field (2nd parameter); maybe use struct_group()? [-Werror=attribute-warning] 588 | __read_overflow2_field(q_size_field, size); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Add an explicit range check to avoid this. Fixes: 5da6a2d5353e ("crypto: qat - generate dynamically arbiter mappings") Signed-off-by: Arnd Bergmann Acked-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c index 9985683056d5..f752653ccb47 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c @@ -398,6 +398,9 @@ int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev) ADF_GEN4_ADMIN_ACCELENGINES; if (srv_id == SVC_DCC) { + if (ae_cnt > ICP_QAT_HW_AE_DELIMITER) + return -EINVAL; + memcpy(thd2arb_map, thrd_to_arb_map_dcc, array_size(sizeof(*thd2arb_map), ae_cnt)); return 0; -- Gitee From 6205791313dc23170b0ef92580f1fb6665522997 Mon Sep 17 00:00:00 2001 From: Erick Archer Date: Sun, 21 Jan 2024 17:40:43 +0100 Subject: [PATCH 0739/2138] crypto: qat - use kcalloc_node() instead of kzalloc_node() ANBZ: #8589 commit 4da3bc65d218605557696109e42cfeee666d601f upstream. Intel-SIG: commit 4da3bc65d218 crypto: qat - use kcalloc_node() instead of kzalloc_node() Backport to support Intel QAT in-tree driver As noted in the "Deprecated Interfaces, Language Features, Attributes, and Conventions" documentation [1], size calculations (especially multiplication) should not be performed in memory allocator (or similar) function arguments due to the risk of them overflowing. This could lead to values wrapping around and a smaller allocation being made than the caller was expecting. Using those allocations could lead to linear overflows of heap memory and other misbehaviors. So, use the purpose specific kcalloc_node() function instead of the argument count * size in the kzalloc_node() function. Link: https://www.kernel.org/doc/html/next/process/deprecated.html#open-coded-arithmetic-in-allocator-arguments [1] Link: https://github.com/KSPP/linux/issues/162 Signed-off-by: Erick Archer Reviewed-by: Gustavo A. R. Silva Acked-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_isr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c index 3557a0d6dea2..a13d9885d60f 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_isr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c @@ -272,7 +272,7 @@ static int adf_isr_alloc_msix_vectors_data(struct adf_accel_dev *accel_dev) if (!accel_dev->pf.vf_info) msix_num_entries += hw_data->num_banks; - irqs = kzalloc_node(msix_num_entries * sizeof(*irqs), + irqs = kcalloc_node(msix_num_entries, sizeof(*irqs), GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev))); if (!irqs) return -ENOMEM; -- Gitee From 7aef82de02585cd87b97589ca7427e07148c35da Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 2 Feb 2024 18:53:16 +0800 Subject: [PATCH 0740/2138] crypto: qat - add heartbeat error simulator ANBZ: #8589 commit e2b67859ab6efd4458bda1baaee20331a367d995 upstream. Intel-SIG: commit e2b67859ab6e crypto: qat - add heartbeat error simulator Backport to support Intel QAT in-tree driver Add a mechanism that allows to inject a heartbeat error for testing purposes. A new attribute `inject_error` is added to debugfs for each QAT device. Upon a write on this attribute, the driver will inject an error on the device which can then be detected by the heartbeat feature. Errors are breaking the device functionality thus they require a device reset in order to be recovered. This functionality is not compiled by default, to enable it CRYPTO_DEV_QAT_ERROR_INJECTION must be set. Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Reviewed-by: Lucas Segarra Fernandez Reviewed-by: Ahsan Atta Reviewed-by: Markas Rapoportas Signed-off-by: Mun Chun Yep Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- Documentation/ABI/testing/debugfs-driver-qat | 26 +++++++ drivers/crypto/intel/qat/Kconfig | 14 ++++ drivers/crypto/intel/qat/qat_common/Makefile | 2 + .../intel/qat/qat_common/adf_common_drv.h | 1 + .../intel/qat/qat_common/adf_heartbeat.c | 6 -- .../intel/qat/qat_common/adf_heartbeat.h | 18 +++++ .../qat/qat_common/adf_heartbeat_dbgfs.c | 52 +++++++++++++ .../qat/qat_common/adf_heartbeat_inject.c | 76 +++++++++++++++++++ .../intel/qat/qat_common/adf_hw_arbiter.c | 25 ++++++ 9 files changed, 214 insertions(+), 6 deletions(-) create mode 100644 drivers/crypto/intel/qat/qat_common/adf_heartbeat_inject.c diff --git a/Documentation/ABI/testing/debugfs-driver-qat b/Documentation/ABI/testing/debugfs-driver-qat index b2db010d851e..bd6793760f29 100644 --- a/Documentation/ABI/testing/debugfs-driver-qat +++ b/Documentation/ABI/testing/debugfs-driver-qat @@ -81,3 +81,29 @@ Description: (RO) Read returns, for each Acceleration Engine (AE), the number : Number of Compress and Verify (CnV) errors and type of the last CnV error detected by Acceleration Engine N. + +What: /sys/kernel/debug/qat__/heartbeat/inject_error +Date: March 2024 +KernelVersion: 6.8 +Contact: qat-linux@intel.com +Description: (WO) Write to inject an error that simulates an heartbeat + failure. This is to be used for testing purposes. + + After writing this file, the driver stops arbitration on a + random engine and disables the fetching of heartbeat counters. + If a workload is running on the device, a job submitted to the + accelerator might not get a response and a read of the + `heartbeat/status` attribute might report -1, i.e. device + unresponsive. + The error is unrecoverable thus the device must be restarted to + restore its functionality. + + This attribute is available only when the kernel is built with + CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION=y. + + A write of 1 enables error injection. + + The following example shows how to enable error injection:: + + # cd /sys/kernel/debug/qat__ + # echo 1 > heartbeat/inject_error diff --git a/drivers/crypto/intel/qat/Kconfig b/drivers/crypto/intel/qat/Kconfig index c120f6715a09..02fb8abe4e6e 100644 --- a/drivers/crypto/intel/qat/Kconfig +++ b/drivers/crypto/intel/qat/Kconfig @@ -106,3 +106,17 @@ config CRYPTO_DEV_QAT_C62XVF To compile this as a module, choose M here: the module will be called qat_c62xvf. + +config CRYPTO_DEV_QAT_ERROR_INJECTION + bool "Support for Intel(R) QAT Devices Heartbeat Error Injection" + depends on CRYPTO_DEV_QAT + depends on DEBUG_FS + help + Enables a mechanism that allows to inject a heartbeat error on + Intel(R) QuickAssist devices for testing purposes. + + This is intended for developer use only. + If unsure, say N. + + This functionality is available via debugfs entry of the Intel(R) + QuickAssist device diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index 6908727bff3b..5915cde8a7aa 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -53,3 +53,5 @@ intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \ adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \ adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \ adf_gen2_pfvf.o adf_gen4_pfvf.o + +intel_qat-$(CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION) += adf_heartbeat_inject.o diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index f06188033a93..0baae42deb3a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -90,6 +90,7 @@ void adf_exit_aer(void); int adf_init_arb(struct adf_accel_dev *accel_dev); void adf_exit_arb(struct adf_accel_dev *accel_dev); void adf_update_ring_arb(struct adf_etr_ring_data *ring); +int adf_disable_arb_thd(struct adf_accel_dev *accel_dev, u32 ae, u32 thr); int adf_dev_get(struct adf_accel_dev *accel_dev); void adf_dev_put(struct adf_accel_dev *accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c index 13f48d2f6da8..f88b1bc6857e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c @@ -23,12 +23,6 @@ #define ADF_HB_EMPTY_SIG 0xA5A5A5A5 -/* Heartbeat counter pair */ -struct hb_cnt_pair { - __u16 resp_heartbeat_cnt; - __u16 req_heartbeat_cnt; -}; - static int adf_hb_check_polling_freq(struct adf_accel_dev *accel_dev) { u64 curr_time = adf_clock_get_current_time(); diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h index b22e3cb29798..24c3f4f24c86 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h @@ -19,6 +19,12 @@ enum adf_device_heartbeat_status { HB_DEV_UNSUPPORTED, }; +/* Heartbeat counter pair */ +struct hb_cnt_pair { + __u16 resp_heartbeat_cnt; + __u16 req_heartbeat_cnt; +}; + struct adf_heartbeat { unsigned int hb_sent_counter; unsigned int hb_failed_counter; @@ -35,6 +41,9 @@ struct adf_heartbeat { struct dentry *cfg; struct dentry *sent; struct dentry *failed; +#ifdef CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION + struct dentry *inject_error; +#endif } dbgfs; }; @@ -51,6 +60,15 @@ void adf_heartbeat_status(struct adf_accel_dev *accel_dev, enum adf_device_heartbeat_status *hb_status); void adf_heartbeat_check_ctrs(struct adf_accel_dev *accel_dev); +#ifdef CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION +int adf_heartbeat_inject_error(struct adf_accel_dev *accel_dev); +#else +static inline int adf_heartbeat_inject_error(struct adf_accel_dev *accel_dev) +{ + return -EPERM; +} +#endif + #else static inline int adf_heartbeat_init(struct adf_accel_dev *accel_dev) { diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c index 2661af6a2ef6..5cd6c2d6f90a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c @@ -155,6 +155,43 @@ static const struct file_operations adf_hb_cfg_fops = { .write = adf_hb_cfg_write, }; +static ssize_t adf_hb_error_inject_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct adf_accel_dev *accel_dev = file->private_data; + size_t written_chars; + char buf[3]; + int ret; + + /* last byte left as string termination */ + if (count != 2) + return -EINVAL; + + written_chars = simple_write_to_buffer(buf, sizeof(buf) - 1, + ppos, user_buf, count); + if (buf[0] != '1') + return -EINVAL; + + ret = adf_heartbeat_inject_error(accel_dev); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Heartbeat error injection failed with status %d\n", + ret); + return ret; + } + + dev_info(&GET_DEV(accel_dev), "Heartbeat error injection enabled\n"); + + return written_chars; +} + +static const struct file_operations adf_hb_error_inject_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = adf_hb_error_inject_write, +}; + void adf_heartbeat_dbgfs_add(struct adf_accel_dev *accel_dev) { struct adf_heartbeat *hb = accel_dev->heartbeat; @@ -171,6 +208,17 @@ void adf_heartbeat_dbgfs_add(struct adf_accel_dev *accel_dev) &hb->hb_failed_counter, &adf_hb_stats_fops); hb->dbgfs.cfg = debugfs_create_file("config", 0600, hb->dbgfs.base_dir, accel_dev, &adf_hb_cfg_fops); + + if (IS_ENABLED(CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION)) { + struct dentry *inject_error __maybe_unused; + + inject_error = debugfs_create_file("inject_error", 0200, + hb->dbgfs.base_dir, accel_dev, + &adf_hb_error_inject_fops); +#ifdef CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION + hb->dbgfs.inject_error = inject_error; +#endif + } } EXPORT_SYMBOL_GPL(adf_heartbeat_dbgfs_add); @@ -189,6 +237,10 @@ void adf_heartbeat_dbgfs_rm(struct adf_accel_dev *accel_dev) hb->dbgfs.failed = NULL; debugfs_remove(hb->dbgfs.cfg); hb->dbgfs.cfg = NULL; +#ifdef CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION + debugfs_remove(hb->dbgfs.inject_error); + hb->dbgfs.inject_error = NULL; +#endif debugfs_remove(hb->dbgfs.base_dir); hb->dbgfs.base_dir = NULL; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_inject.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_inject.c new file mode 100644 index 000000000000..a3b474bdef6c --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_inject.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ +#include + +#include "adf_admin.h" +#include "adf_common_drv.h" +#include "adf_heartbeat.h" + +#define MAX_HB_TICKS 0xFFFFFFFF + +static int adf_hb_set_timer_to_max(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + + accel_dev->heartbeat->hb_timer = 0; + + if (hw_data->stop_timer) + hw_data->stop_timer(accel_dev); + + return adf_send_admin_hb_timer(accel_dev, MAX_HB_TICKS); +} + +static void adf_set_hb_counters_fail(struct adf_accel_dev *accel_dev, u32 ae, + u32 thr) +{ + struct hb_cnt_pair *stats = accel_dev->heartbeat->dma.virt_addr; + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + const size_t max_aes = hw_device->get_num_aes(hw_device); + const size_t hb_ctrs = hw_device->num_hb_ctrs; + size_t thr_id = ae * hb_ctrs + thr; + u16 num_rsp = stats[thr_id].resp_heartbeat_cnt; + + /* + * Inject live.req != live.rsp and live.rsp == last.rsp + * to trigger the heartbeat error detection + */ + stats[thr_id].req_heartbeat_cnt++; + stats += (max_aes * hb_ctrs); + stats[thr_id].resp_heartbeat_cnt = num_rsp; +} + +int adf_heartbeat_inject_error(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + const size_t max_aes = hw_device->get_num_aes(hw_device); + const size_t hb_ctrs = hw_device->num_hb_ctrs; + u32 rand, rand_ae, rand_thr; + unsigned long ae_mask; + int ret; + + ae_mask = hw_device->ae_mask; + + do { + /* Ensure we have a valid ae */ + get_random_bytes(&rand, sizeof(rand)); + rand_ae = rand % max_aes; + } while (!test_bit(rand_ae, &ae_mask)); + + get_random_bytes(&rand, sizeof(rand)); + rand_thr = rand % hb_ctrs; + + /* Increase the heartbeat timer to prevent FW updating HB counters */ + ret = adf_hb_set_timer_to_max(accel_dev); + if (ret) + return ret; + + /* Configure worker threads to stop processing any packet */ + ret = adf_disable_arb_thd(accel_dev, rand_ae, rand_thr); + if (ret) + return ret; + + /* Change HB counters memory to simulate a hang */ + adf_set_hb_counters_fail(accel_dev, rand_ae, rand_thr); + + return 0; +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c index dd9a31c20bc9..f93d9cca70ce 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c +++ b/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c @@ -99,3 +99,28 @@ void adf_exit_arb(struct adf_accel_dev *accel_dev) csr_ops->write_csr_ring_srv_arb_en(csr, i, 0); } EXPORT_SYMBOL_GPL(adf_exit_arb); + +int adf_disable_arb_thd(struct adf_accel_dev *accel_dev, u32 ae, u32 thr) +{ + void __iomem *csr = accel_dev->transport->banks[0].csr_addr; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + const u32 *thd_2_arb_cfg; + struct arb_info info; + u32 ae_thr_map; + + if (ADF_AE_STRAND0_THREAD == thr || ADF_AE_STRAND1_THREAD == thr) + thr = ADF_AE_ADMIN_THREAD; + + hw_data->get_arb_info(&info); + thd_2_arb_cfg = hw_data->get_arb_mapping(accel_dev); + if (!thd_2_arb_cfg) + return -EFAULT; + + /* Disable scheduling for this particular AE and thread */ + ae_thr_map = *(thd_2_arb_cfg + ae); + ae_thr_map &= ~(GENMASK(3, 0) << (thr * BIT(2))); + + WRITE_CSR_ARB_WT2SAM(csr, info.arb_offset, info.wt2sam_offset, ae, + ae_thr_map); + return 0; +} -- Gitee From 2ccc56f35c87cbd26f7f2c1407e20b6853fb4c17 Mon Sep 17 00:00:00 2001 From: Furong Zhou Date: Fri, 2 Feb 2024 18:53:17 +0800 Subject: [PATCH 0741/2138] crypto: qat - add fatal error notify method ANBZ: #8589 commit ae508d7afb753f7576c435226e32b9535b7f8b10 upstream. Intel-SIG: commit ae508d7afb75 crypto: qat - add fatal error notify method Backport to support Intel QAT in-tree driver Add error notify method to report a fatal error event to all the subsystems registered. In addition expose an API, adf_notify_fatal_error(), that allows to trigger a fatal error notification asynchronously in the context of a workqueue. This will be invoked when a fatal error is detected by the ISR or through Heartbeat. Signed-off-by: Furong Zhou Reviewed-by: Ahsan Atta Reviewed-by: Markas Rapoportas Reviewed-by: Giovanni Cabiddu Signed-off-by: Mun Chun Yep Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_aer.c | 30 +++++++++++++++++++ .../intel/qat/qat_common/adf_common_drv.h | 3 ++ .../crypto/intel/qat/qat_common/adf_init.c | 12 ++++++++ 3 files changed, 45 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c index af495a6f039f..5c7af930067a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_aer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c @@ -8,6 +8,11 @@ #include "adf_accel_devices.h" #include "adf_common_drv.h" +struct adf_fatal_error_data { + struct adf_accel_dev *accel_dev; + struct work_struct work; +}; + static struct workqueue_struct *device_reset_wq; static pci_ers_result_t adf_error_detected(struct pci_dev *pdev, @@ -172,6 +177,31 @@ const struct pci_error_handlers adf_err_handler = { }; EXPORT_SYMBOL_GPL(adf_err_handler); +static void adf_notify_fatal_error_worker(struct work_struct *work) +{ + struct adf_fatal_error_data *wq_data = + container_of(work, struct adf_fatal_error_data, work); + struct adf_accel_dev *accel_dev = wq_data->accel_dev; + + adf_error_notifier(accel_dev); + kfree(wq_data); +} + +int adf_notify_fatal_error(struct adf_accel_dev *accel_dev) +{ + struct adf_fatal_error_data *wq_data; + + wq_data = kzalloc(sizeof(*wq_data), GFP_ATOMIC); + if (!wq_data) + return -ENOMEM; + + wq_data->accel_dev = accel_dev; + INIT_WORK(&wq_data->work, adf_notify_fatal_error_worker); + adf_misc_wq_queue_work(&wq_data->work); + + return 0; +} + int adf_init_aer(void) { device_reset_wq = alloc_workqueue("qat_device_reset_wq", diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index 0baae42deb3a..8c062d5a8db2 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -40,6 +40,7 @@ enum adf_event { ADF_EVENT_SHUTDOWN, ADF_EVENT_RESTARTING, ADF_EVENT_RESTARTED, + ADF_EVENT_FATAL_ERROR, }; struct service_hndl { @@ -60,6 +61,8 @@ int adf_dev_restart(struct adf_accel_dev *accel_dev); void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data); void adf_clean_vf_map(bool); +int adf_notify_fatal_error(struct adf_accel_dev *accel_dev); +void adf_error_notifier(struct adf_accel_dev *accel_dev); int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev, struct adf_accel_dev *pf); void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev, diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c index f43ae9111553..74f0818c0703 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_init.c +++ b/drivers/crypto/intel/qat/qat_common/adf_init.c @@ -433,6 +433,18 @@ int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev) return 0; } +void adf_error_notifier(struct adf_accel_dev *accel_dev) +{ + struct service_hndl *service; + + list_for_each_entry(service, &service_table, list) { + if (service->event_hld(accel_dev, ADF_EVENT_FATAL_ERROR)) + dev_err(&GET_DEV(accel_dev), + "Failed to send error event to %s.\n", + service->name); + } +} + static int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev) { char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; -- Gitee From db6cffaecf55b7f5501eb64608746830597025f9 Mon Sep 17 00:00:00 2001 From: Furong Zhou Date: Fri, 2 Feb 2024 18:53:18 +0800 Subject: [PATCH 0742/2138] crypto: qat - disable arbitration before reset ANBZ: #8589 commit 758a0087db98fa23a3597289dbf3643ba9db2700 upstream. Intel-SIG: commit 758a0087db98 crypto: qat - disable arbitration before reset Backport to support Intel QAT in-tree driver Disable arbitration to avoid new requests to be processed before resetting a device. This is needed so that new requests are not fetched when an error is detected. Signed-off-by: Furong Zhou Reviewed-by: Ahsan Atta Reviewed-by: Markas Rapoportas Reviewed-by: Giovanni Cabiddu Signed-off-by: Mun Chun Yep Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_aer.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c index 5c7af930067a..fbf70921156a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_aer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c @@ -182,8 +182,16 @@ static void adf_notify_fatal_error_worker(struct work_struct *work) struct adf_fatal_error_data *wq_data = container_of(work, struct adf_fatal_error_data, work); struct adf_accel_dev *accel_dev = wq_data->accel_dev; + struct adf_hw_device_data *hw_device = accel_dev->hw_device; adf_error_notifier(accel_dev); + + if (!accel_dev->is_vf) { + /* Disable arbitration to stop processing of new requests */ + if (hw_device->exit_arb) + hw_device->exit_arb(accel_dev); + } + kfree(wq_data); } -- Gitee From 01c1550b2c28588ecdd8ab29dd525e85163cabf4 Mon Sep 17 00:00:00 2001 From: Mun Chun Yep Date: Fri, 2 Feb 2024 18:53:19 +0800 Subject: [PATCH 0743/2138] crypto: qat - update PFVF protocol for recovery ANBZ: #8589 commit ec26f8e6c784ae391e69b19f4738d7196ed7794d upstream. Intel-SIG: commit ec26f8e6c784 crypto: qat - update PFVF protocol for recovery Backport to support Intel QAT in-tree driver Update the PFVF logic to handle restart and recovery. This adds the following functions: * adf_pf2vf_notify_fatal_error(): allows the PF to notify VFs that the device detected a fatal error and requires a reset. This sends to VF the event `ADF_PF2VF_MSGTYPE_FATAL_ERROR`. * adf_pf2vf_wait_for_restarting_complete(): allows the PF to wait for `ADF_VF2PF_MSGTYPE_RESTARTING_COMPLETE` events from active VFs before proceeding with a reset. * adf_pf2vf_notify_restarted(): enables the PF to notify VFs with an `ADF_PF2VF_MSGTYPE_RESTARTED` event after recovery, indicating that the device is back to normal. This prompts VF drivers switch back to use the accelerator for workload processing. These changes improve the communication and synchronization between PF and VF drivers during system restart and recovery processes. Signed-off-by: Mun Chun Yep Reviewed-by: Ahsan Atta Reviewed-by: Markas Rapoportas Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../intel/qat/qat_common/adf_accel_devices.h | 1 + drivers/crypto/intel/qat/qat_common/adf_aer.c | 3 + .../intel/qat/qat_common/adf_pfvf_msg.h | 7 +- .../intel/qat/qat_common/adf_pfvf_pf_msg.c | 64 ++++++++++++++++++- .../intel/qat/qat_common/adf_pfvf_pf_msg.h | 21 ++++++ .../intel/qat/qat_common/adf_pfvf_pf_proto.c | 8 +++ .../intel/qat/qat_common/adf_pfvf_vf_proto.c | 6 ++ .../crypto/intel/qat/qat_common/adf_sriov.c | 1 + 8 files changed, 109 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index a16c7e6edc65..4a3c36aaa7ca 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -332,6 +332,7 @@ struct adf_accel_vf_info { struct ratelimit_state vf2pf_ratelimit; u32 vf_nr; bool init; + bool restarting; u8 vf_compat_ver; }; diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c index fbf70921156a..0510051f5f43 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_aer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c @@ -7,6 +7,7 @@ #include #include "adf_accel_devices.h" #include "adf_common_drv.h" +#include "adf_pfvf_pf_msg.h" struct adf_fatal_error_data { struct adf_accel_dev *accel_dev; @@ -190,6 +191,8 @@ static void adf_notify_fatal_error_worker(struct work_struct *work) /* Disable arbitration to stop processing of new requests */ if (hw_device->exit_arb) hw_device->exit_arb(accel_dev); + if (accel_dev->pf.vf_info) + adf_pf2vf_notify_fatal_error(accel_dev); } kfree(wq_data); diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h index 204a42438992..d1b3ef9cadac 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h @@ -99,6 +99,8 @@ enum pf2vf_msgtype { ADF_PF2VF_MSGTYPE_RESTARTING = 0x01, ADF_PF2VF_MSGTYPE_VERSION_RESP = 0x02, ADF_PF2VF_MSGTYPE_BLKMSG_RESP = 0x03, + ADF_PF2VF_MSGTYPE_FATAL_ERROR = 0x04, + ADF_PF2VF_MSGTYPE_RESTARTED = 0x05, /* Values from 0x10 are Gen4 specific, message type is only 4 bits in Gen2 devices. */ ADF_PF2VF_MSGTYPE_RP_RESET_RESP = 0x10, }; @@ -112,6 +114,7 @@ enum vf2pf_msgtype { ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ = 0x07, ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ = 0x08, ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ = 0x09, + ADF_VF2PF_MSGTYPE_RESTARTING_COMPLETE = 0x0a, /* Values from 0x10 are Gen4 specific, message type is only 4 bits in Gen2 devices. */ ADF_VF2PF_MSGTYPE_RP_RESET = 0x10, }; @@ -124,8 +127,10 @@ enum pfvf_compatibility_version { ADF_PFVF_COMPAT_FAST_ACK = 0x03, /* Ring to service mapping support for non-standard mappings */ ADF_PFVF_COMPAT_RING_TO_SVC_MAP = 0x04, + /* Fallback compat */ + ADF_PFVF_COMPAT_FALLBACK = 0x05, /* Reference to the latest version */ - ADF_PFVF_COMPAT_THIS_VERSION = 0x04, + ADF_PFVF_COMPAT_THIS_VERSION = 0x05, }; /* PF->VF Version Response */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c index 14c069f0d71a..0e31f4b41844 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c @@ -1,21 +1,83 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2015 - 2021 Intel Corporation */ +#include #include #include "adf_accel_devices.h" #include "adf_pfvf_msg.h" #include "adf_pfvf_pf_msg.h" #include "adf_pfvf_pf_proto.h" +#define ADF_PF_WAIT_RESTARTING_COMPLETE_DELAY 100 +#define ADF_VF_SHUTDOWN_RETRY 100 + void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev) { struct adf_accel_vf_info *vf; struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_RESTARTING }; int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev)); + dev_dbg(&GET_DEV(accel_dev), "pf2vf notify restarting\n"); for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) { - if (vf->init && adf_send_pf2vf_msg(accel_dev, i, msg)) + vf->restarting = false; + if (!vf->init) + continue; + if (adf_send_pf2vf_msg(accel_dev, i, msg)) dev_err(&GET_DEV(accel_dev), "Failed to send restarting msg to VF%d\n", i); + else if (vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK) + vf->restarting = true; + } +} + +void adf_pf2vf_wait_for_restarting_complete(struct adf_accel_dev *accel_dev) +{ + int num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev)); + int i, retries = ADF_VF_SHUTDOWN_RETRY; + struct adf_accel_vf_info *vf; + bool vf_running; + + dev_dbg(&GET_DEV(accel_dev), "pf2vf wait for restarting complete\n"); + do { + vf_running = false; + for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) + if (vf->restarting) + vf_running = true; + if (!vf_running) + break; + msleep(ADF_PF_WAIT_RESTARTING_COMPLETE_DELAY); + } while (--retries); + + if (vf_running) + dev_warn(&GET_DEV(accel_dev), "Some VFs are still running\n"); +} + +void adf_pf2vf_notify_restarted(struct adf_accel_dev *accel_dev) +{ + struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_RESTARTED }; + int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev)); + struct adf_accel_vf_info *vf; + + dev_dbg(&GET_DEV(accel_dev), "pf2vf notify restarted\n"); + for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) { + if (vf->init && vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK && + adf_send_pf2vf_msg(accel_dev, i, msg)) + dev_err(&GET_DEV(accel_dev), + "Failed to send restarted msg to VF%d\n", i); + } +} + +void adf_pf2vf_notify_fatal_error(struct adf_accel_dev *accel_dev) +{ + struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_FATAL_ERROR }; + int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev)); + struct adf_accel_vf_info *vf; + + dev_dbg(&GET_DEV(accel_dev), "pf2vf notify fatal error\n"); + for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) { + if (vf->init && vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK && + adf_send_pf2vf_msg(accel_dev, i, msg)) + dev_err(&GET_DEV(accel_dev), + "Failed to send fatal error msg to VF%d\n", i); } } diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h index e8982d1ac896..f203d88c919c 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h @@ -5,7 +5,28 @@ #include "adf_accel_devices.h" +#if defined(CONFIG_PCI_IOV) void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev); +void adf_pf2vf_wait_for_restarting_complete(struct adf_accel_dev *accel_dev); +void adf_pf2vf_notify_restarted(struct adf_accel_dev *accel_dev); +void adf_pf2vf_notify_fatal_error(struct adf_accel_dev *accel_dev); +#else +static inline void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev) +{ +} + +static inline void adf_pf2vf_wait_for_restarting_complete(struct adf_accel_dev *accel_dev) +{ +} + +static inline void adf_pf2vf_notify_restarted(struct adf_accel_dev *accel_dev) +{ +} + +static inline void adf_pf2vf_notify_fatal_error(struct adf_accel_dev *accel_dev) +{ +} +#endif typedef int (*adf_pf2vf_blkmsg_provider)(struct adf_accel_dev *accel_dev, u8 *buffer, u8 compat); diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c index 388e58bcbcaf..9ab93fbfefde 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c @@ -291,6 +291,14 @@ static int adf_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, vf_info->init = false; } break; + case ADF_VF2PF_MSGTYPE_RESTARTING_COMPLETE: + { + dev_dbg(&GET_DEV(accel_dev), + "Restarting Complete received from VF%d\n", vf_nr); + vf_info->restarting = false; + vf_info->init = false; + } + break; case ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ: case ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ: case ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ: diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c index 1015155b6374..dc284a089c88 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c @@ -308,6 +308,12 @@ static bool adf_handle_pf2vf_msg(struct adf_accel_dev *accel_dev, adf_pf2vf_handle_pf_restarting(accel_dev); return false; + case ADF_PF2VF_MSGTYPE_RESTARTED: + dev_dbg(&GET_DEV(accel_dev), "Restarted message received from PF\n"); + return true; + case ADF_PF2VF_MSGTYPE_FATAL_ERROR: + dev_err(&GET_DEV(accel_dev), "Fatal error received from PF\n"); + return true; case ADF_PF2VF_MSGTYPE_VERSION_RESP: case ADF_PF2VF_MSGTYPE_BLKMSG_RESP: case ADF_PF2VF_MSGTYPE_RP_RESET_RESP: diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c index f44025bb6f99..cb2a9830f192 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sriov.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c @@ -103,6 +103,7 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev) return; adf_pf2vf_notify_restarting(accel_dev); + adf_pf2vf_wait_for_restarting_complete(accel_dev); pci_disable_sriov(accel_to_pci_dev(accel_dev)); /* Disable VF to PF interrupts */ -- Gitee From 82e86ea765fea50815e0a3582322d02d23a76083 Mon Sep 17 00:00:00 2001 From: Mun Chun Yep Date: Fri, 2 Feb 2024 18:53:20 +0800 Subject: [PATCH 0744/2138] crypto: qat - re-enable sriov after pf reset ANBZ: #8589 commit 4469f9b2346834085fe4478ee1a851ee1de8ccb2 upstream. Intel-SIG: commit 4469f9b23468 crypto: qat - re-enable sriov after pf reset Backport to support Intel QAT in-tree driver When a Physical Function (PF) is reset, SR-IOV gets disabled, making the associated Virtual Functions (VFs) unavailable. Even after reset and using pci_restore_state, VFs remain uncreated because the numvfs still at 0. Therefore, it's necessary to reconfigure SR-IOV to re-enable VFs. This commit introduces the ADF_SRIOV_ENABLED configuration flag to cache the SR-IOV enablement state. SR-IOV is only re-enabled if it was previously configured. This commit also introduces a dedicated workqueue without `WQ_MEM_RECLAIM` flag for enabling SR-IOV during Heartbeat and CPM error resets, preventing workqueue flushing warning. This patch is based on earlier work done by Shashank Gupta. Signed-off-by: Mun Chun Yep Reviewed-by: Ahsan Atta Reviewed-by: Markas Rapoportas Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_aer.c | 40 ++++++++++++++++++- .../intel/qat/qat_common/adf_cfg_strings.h | 1 + .../intel/qat/qat_common/adf_common_drv.h | 5 +++ .../crypto/intel/qat/qat_common/adf_sriov.c | 37 +++++++++++++++-- 4 files changed, 79 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c index 0510051f5f43..983138ce1b77 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_aer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c @@ -15,6 +15,7 @@ struct adf_fatal_error_data { }; static struct workqueue_struct *device_reset_wq; +static struct workqueue_struct *device_sriov_wq; static pci_ers_result_t adf_error_detected(struct pci_dev *pdev, pci_channel_state_t state) @@ -43,6 +44,13 @@ struct adf_reset_dev_data { struct work_struct reset_work; }; +/* sriov dev data */ +struct adf_sriov_dev_data { + struct adf_accel_dev *accel_dev; + struct completion compl; + struct work_struct sriov_work; +}; + void adf_reset_sbr(struct adf_accel_dev *accel_dev) { struct pci_dev *pdev = accel_to_pci_dev(accel_dev); @@ -88,11 +96,22 @@ void adf_dev_restore(struct adf_accel_dev *accel_dev) } } +static void adf_device_sriov_worker(struct work_struct *work) +{ + struct adf_sriov_dev_data *sriov_data = + container_of(work, struct adf_sriov_dev_data, sriov_work); + + adf_reenable_sriov(sriov_data->accel_dev); + complete(&sriov_data->compl); +} + static void adf_device_reset_worker(struct work_struct *work) { struct adf_reset_dev_data *reset_data = container_of(work, struct adf_reset_dev_data, reset_work); struct adf_accel_dev *accel_dev = reset_data->accel_dev; + unsigned long wait_jiffies = msecs_to_jiffies(10000); + struct adf_sriov_dev_data sriov_data; adf_dev_restarting_notify(accel_dev); if (adf_dev_restart(accel_dev)) { @@ -103,6 +122,14 @@ static void adf_device_reset_worker(struct work_struct *work) WARN(1, "QAT: device restart failed. Device is unusable\n"); return; } + + sriov_data.accel_dev = accel_dev; + init_completion(&sriov_data.compl); + INIT_WORK(&sriov_data.sriov_work, adf_device_sriov_worker); + queue_work(device_sriov_wq, &sriov_data.sriov_work); + if (wait_for_completion_timeout(&sriov_data.compl, wait_jiffies)) + adf_pf2vf_notify_restarted(accel_dev); + adf_dev_restarted_notify(accel_dev); clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); @@ -217,7 +244,14 @@ int adf_init_aer(void) { device_reset_wq = alloc_workqueue("qat_device_reset_wq", WQ_MEM_RECLAIM, 0); - return !device_reset_wq ? -EFAULT : 0; + if (!device_reset_wq) + return -EFAULT; + + device_sriov_wq = alloc_workqueue("qat_device_sriov_wq", 0, 0); + if (!device_sriov_wq) + return -EFAULT; + + return 0; } void adf_exit_aer(void) @@ -225,4 +259,8 @@ void adf_exit_aer(void) if (device_reset_wq) destroy_workqueue(device_reset_wq); device_reset_wq = NULL; + + if (device_sriov_wq) + destroy_workqueue(device_sriov_wq); + device_sriov_wq = NULL; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h index 322b76903a73..e015ad6cace2 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h @@ -49,5 +49,6 @@ ADF_ETRMGR_BANK "%d" ADF_ETRMGR_CORE_AFFINITY #define ADF_ACCEL_STR "Accelerator%d" #define ADF_HEARTBEAT_TIMER "HeartbeatTimer" +#define ADF_SRIOV_ENABLED "SriovEnabled" #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index 8c062d5a8db2..10891c9da6e7 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -192,6 +192,7 @@ bool adf_misc_wq_queue_delayed_work(struct delayed_work *work, #if defined(CONFIG_PCI_IOV) int adf_sriov_configure(struct pci_dev *pdev, int numvfs); void adf_disable_sriov(struct adf_accel_dev *accel_dev); +void adf_reenable_sriov(struct adf_accel_dev *accel_dev); void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask); void adf_disable_all_vf2pf_interrupts(struct adf_accel_dev *accel_dev); bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev); @@ -212,6 +213,10 @@ static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev) { } +static inline void adf_reenable_sriov(struct adf_accel_dev *accel_dev) +{ +} + static inline int adf_init_pf_wq(void) { return 0; diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c index cb2a9830f192..87a70c00c41e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sriov.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c @@ -60,7 +60,6 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev) /* This ptr will be populated when VFs will be created */ vf_info->accel_dev = accel_dev; vf_info->vf_nr = i; - vf_info->vf_compat_ver = 0; mutex_init(&vf_info->pf2vf_lock); ratelimit_state_init(&vf_info->vf2pf_ratelimit, @@ -84,6 +83,32 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev) return pci_enable_sriov(pdev, totalvfs); } +void adf_reenable_sriov(struct adf_accel_dev *accel_dev) +{ + struct pci_dev *pdev = accel_to_pci_dev(accel_dev); + char cfg[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; + unsigned long val = 0; + + if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, + ADF_SRIOV_ENABLED, cfg)) + return; + + if (!accel_dev->pf.vf_info) + return; + + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, + &val, ADF_DEC)) + return; + + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, + &val, ADF_DEC)) + return; + + set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); + dev_dbg(&pdev->dev, "Re-enabling SRIOV\n"); + adf_enable_sriov(accel_dev); +} + /** * adf_disable_sriov() - Disable SRIOV for the device * @accel_dev: Pointer to accel device. @@ -116,8 +141,10 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev) for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) mutex_destroy(&vf->pf2vf_lock); - kfree(accel_dev->pf.vf_info); - accel_dev->pf.vf_info = NULL; + if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) { + kfree(accel_dev->pf.vf_info); + accel_dev->pf.vf_info = NULL; + } } EXPORT_SYMBOL_GPL(adf_disable_sriov); @@ -195,6 +222,10 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs) if (ret) return ret; + val = 1; + adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, ADF_SRIOV_ENABLED, + &val, ADF_DEC); + return numvfs; } EXPORT_SYMBOL_GPL(adf_sriov_configure); -- Gitee From e53abbadddeefd3b7a944c5ba28103bca4988bcb Mon Sep 17 00:00:00 2001 From: Mun Chun Yep Date: Fri, 2 Feb 2024 18:53:21 +0800 Subject: [PATCH 0745/2138] crypto: qat - add fatal error notification ANBZ: #8589 commit 2aaa1995a94a3187e52ddb9f127fa1307ee8ad00 upstream. Intel-SIG: commit 2aaa1995a94a crypto: qat - add fatal error notification Backport to support Intel QAT in-tree driver Notify a fatal error condition and optionally reset the device in the following cases: * if the device reports an uncorrectable fatal error through an interrupt * if the heartbeat feature detects that the device is not responding This patch is based on earlier work done by Shashank Gupta. Signed-off-by: Mun Chun Yep Reviewed-by: Ahsan Atta Reviewed-by: Markas Rapoportas Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_heartbeat.c | 3 +++ drivers/crypto/intel/qat/qat_common/adf_isr.c | 7 ++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c index f88b1bc6857e..fe8428d4ff39 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c @@ -229,6 +229,9 @@ void adf_heartbeat_status(struct adf_accel_dev *accel_dev, "Heartbeat ERROR: QAT is not responding.\n"); *hb_status = HB_DEV_UNRESPONSIVE; hb->hb_failed_counter++; + if (adf_notify_fatal_error(accel_dev)) + dev_err(&GET_DEV(accel_dev), + "Failed to notify fatal error\n"); return; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c index a13d9885d60f..020d213f4c99 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_isr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c @@ -139,8 +139,13 @@ static bool adf_handle_ras_int(struct adf_accel_dev *accel_dev) if (ras_ops->handle_interrupt && ras_ops->handle_interrupt(accel_dev, &reset_required)) { - if (reset_required) + if (reset_required) { dev_err(&GET_DEV(accel_dev), "Fatal error, reset required\n"); + if (adf_notify_fatal_error(accel_dev)) + dev_err(&GET_DEV(accel_dev), + "Failed to notify fatal error\n"); + } + return true; } -- Gitee From 0dbc82fb2138a86e1d1cd4b6d5f3fa583eb5e3b7 Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 2 Feb 2024 18:53:22 +0800 Subject: [PATCH 0746/2138] crypto: qat - add auto reset on error ANBZ: #8589 commit f5419a4239af8b3951f990c83d0d8c865a485475 upstream. Intel-SIG: commit f5419a4239af crypto: qat - add auto reset on error Backport to support Intel QAT in-tree driver Expose the `auto_reset` sysfs attribute to configure the driver to reset the device when a fatal error is detected. When auto reset is enabled, the driver resets the device when it detects either an heartbeat failure or a fatal error through an interrupt. This patch is based on earlier work done by Shashank Gupta. Signed-off-by: Damian Muszynski Reviewed-by: Ahsan Atta Reviewed-by: Markas Rapoportas Reviewed-by: Giovanni Cabiddu Signed-off-by: Mun Chun Yep Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- Documentation/ABI/testing/sysfs-driver-qat | 20 ++++++++++ .../intel/qat/qat_common/adf_accel_devices.h | 1 + drivers/crypto/intel/qat/qat_common/adf_aer.c | 11 +++++- .../intel/qat/qat_common/adf_common_drv.h | 1 + .../crypto/intel/qat/qat_common/adf_sysfs.c | 37 +++++++++++++++++++ 5 files changed, 69 insertions(+), 1 deletion(-) diff --git a/Documentation/ABI/testing/sysfs-driver-qat b/Documentation/ABI/testing/sysfs-driver-qat index bbf329cf0d67..6778f1fea874 100644 --- a/Documentation/ABI/testing/sysfs-driver-qat +++ b/Documentation/ABI/testing/sysfs-driver-qat @@ -141,3 +141,23 @@ Description: 64 This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat/auto_reset +Date: March 2024 +KernelVersion: 6.8 +Contact: qat-linux@intel.com +Description: (RW) Reports the current state of the autoreset feature + for a QAT device + + Write to the attribute to enable or disable device auto reset. + + Device auto reset is disabled by default. + + The values are:: + + * 1/Yy/on: auto reset enabled. If the device encounters an + unrecoverable error, it will be reset automatically. + * 0/Nn/off: auto reset disabled. If the device encounters an + unrecoverable error, it will not be reset. + + This attribute is only available for qat_4xxx devices. diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 4a3c36aaa7ca..0f26aa976c8c 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -402,6 +402,7 @@ struct adf_accel_dev { struct adf_error_counters ras_errors; struct mutex state_lock; /* protect state of the device */ bool is_vf; + bool autoreset_on_error; u32 accel_id; }; #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c index 983138ce1b77..2d7d30c231a9 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_aer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c @@ -205,6 +205,14 @@ const struct pci_error_handlers adf_err_handler = { }; EXPORT_SYMBOL_GPL(adf_err_handler); +int adf_dev_autoreset(struct adf_accel_dev *accel_dev) +{ + if (accel_dev->autoreset_on_error) + return adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_ASYNC); + + return 0; +} + static void adf_notify_fatal_error_worker(struct work_struct *work) { struct adf_fatal_error_data *wq_data = @@ -216,10 +224,11 @@ static void adf_notify_fatal_error_worker(struct work_struct *work) if (!accel_dev->is_vf) { /* Disable arbitration to stop processing of new requests */ - if (hw_device->exit_arb) + if (accel_dev->autoreset_on_error && hw_device->exit_arb) hw_device->exit_arb(accel_dev); if (accel_dev->pf.vf_info) adf_pf2vf_notify_fatal_error(accel_dev); + adf_dev_autoreset(accel_dev); } kfree(wq_data); diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index 10891c9da6e7..57328249c89e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -87,6 +87,7 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev); extern const struct pci_error_handlers adf_err_handler; void adf_reset_sbr(struct adf_accel_dev *accel_dev); void adf_reset_flr(struct adf_accel_dev *accel_dev); +int adf_dev_autoreset(struct adf_accel_dev *accel_dev); void adf_dev_restore(struct adf_accel_dev *accel_dev); int adf_init_aer(void); void adf_exit_aer(void); diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c index d450dad32c9e..4e7f70d4049d 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c @@ -204,6 +204,42 @@ static ssize_t pm_idle_enabled_store(struct device *dev, struct device_attribute } static DEVICE_ATTR_RW(pm_idle_enabled); +static ssize_t auto_reset_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + char *auto_reset; + struct adf_accel_dev *accel_dev; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + auto_reset = accel_dev->autoreset_on_error ? "on" : "off"; + + return sysfs_emit(buf, "%s\n", auto_reset); +} + +static ssize_t auto_reset_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct adf_accel_dev *accel_dev; + bool enabled = false; + int ret; + + ret = kstrtobool(buf, &enabled); + if (ret) + return ret; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + accel_dev->autoreset_on_error = enabled; + + return count; +} +static DEVICE_ATTR_RW(auto_reset); + static DEVICE_ATTR_RW(state); static DEVICE_ATTR_RW(cfg_services); @@ -291,6 +327,7 @@ static struct attribute *qat_attrs[] = { &dev_attr_pm_idle_enabled.attr, &dev_attr_rp2srv.attr, &dev_attr_num_rps.attr, + &dev_attr_auto_reset.attr, NULL, }; -- Gitee From 83f6a7df479f577176136ccf3350caac73b6848c Mon Sep 17 00:00:00 2001 From: Furong Zhou Date: Fri, 2 Feb 2024 18:53:23 +0800 Subject: [PATCH 0747/2138] crypto: qat - limit heartbeat notifications ANBZ: #8589 commit 750fa7c20e60926431ec50d63899771ffcd9fd5c upstream. Intel-SIG: commit 750fa7c20e60 crypto: qat - limit heartbeat notifications Backport to support Intel QAT in-tree driver When the driver detects an heartbeat failure, it starts the recovery flow. Set a limit so that the number of events is limited in case the heartbeat status is read too frequently. Signed-off-by: Furong Zhou Reviewed-by: Ahsan Atta Reviewed-by: Markas Rapoportas Reviewed-by: Giovanni Cabiddu Signed-off-by: Mun Chun Yep Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../crypto/intel/qat/qat_common/adf_heartbeat.c | 17 ++++++++++++++--- .../crypto/intel/qat/qat_common/adf_heartbeat.h | 3 +++ 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c index fe8428d4ff39..b19aa1ef8eee 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c @@ -205,6 +205,19 @@ static int adf_hb_get_status(struct adf_accel_dev *accel_dev) return ret; } +static void adf_heartbeat_reset(struct adf_accel_dev *accel_dev) +{ + u64 curr_time = adf_clock_get_current_time(); + u64 time_since_reset = curr_time - accel_dev->heartbeat->last_hb_reset_time; + + if (time_since_reset < ADF_CFG_HB_RESET_MS) + return; + + accel_dev->heartbeat->last_hb_reset_time = curr_time; + if (adf_notify_fatal_error(accel_dev)) + dev_err(&GET_DEV(accel_dev), "Failed to notify fatal error\n"); +} + void adf_heartbeat_status(struct adf_accel_dev *accel_dev, enum adf_device_heartbeat_status *hb_status) { @@ -229,9 +242,7 @@ void adf_heartbeat_status(struct adf_accel_dev *accel_dev, "Heartbeat ERROR: QAT is not responding.\n"); *hb_status = HB_DEV_UNRESPONSIVE; hb->hb_failed_counter++; - if (adf_notify_fatal_error(accel_dev)) - dev_err(&GET_DEV(accel_dev), - "Failed to notify fatal error\n"); + adf_heartbeat_reset(accel_dev); return; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h index 24c3f4f24c86..16fdfb48b196 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h @@ -13,6 +13,8 @@ struct dentry; #define ADF_CFG_HB_TIMER_DEFAULT_MS 500 #define ADF_CFG_HB_COUNT_THRESHOLD 3 +#define ADF_CFG_HB_RESET_MS 5000 + enum adf_device_heartbeat_status { HB_DEV_UNRESPONSIVE = 0, HB_DEV_ALIVE, @@ -30,6 +32,7 @@ struct adf_heartbeat { unsigned int hb_failed_counter; unsigned int hb_timer; u64 last_hb_check_time; + u64 last_hb_reset_time; bool ctrs_cnt_checked; struct hb_dma_addr { dma_addr_t phy_addr; -- Gitee From 49d5378afa4e34cef55293375e976c80b4ccd4ce Mon Sep 17 00:00:00 2001 From: Mun Chun Yep Date: Fri, 2 Feb 2024 18:53:24 +0800 Subject: [PATCH 0748/2138] crypto: qat - improve aer error reset handling ANBZ: #8589 commit 9567d3dc760931afc38f7f1144c66dd8c4b8c680 upstream. Intel-SIG: commit 9567d3dc7609 crypto: qat - improve aer error reset handling Backport to support Intel QAT in-tree driver Rework the AER reset and recovery flow to take into account root port integrated devices that gets reset between the error detected and the slot reset callbacks. In adf_error_detected() the devices is gracefully shut down. The worker threads are disabled, the error conditions are notified to listeners and through PFVF comms and finally the device is reset as part of adf_dev_down(). In adf_slot_reset(), the device is brought up again. If SRIOV VFs were enabled before reset, these are re-enabled and VFs are notified of restarting through PFVF comms. Signed-off-by: Mun Chun Yep Reviewed-by: Ahsan Atta Reviewed-by: Markas Rapoportas Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_aer.c | 26 ++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c index 2d7d30c231a9..04260f61d042 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_aer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c @@ -33,6 +33,19 @@ static pci_ers_result_t adf_error_detected(struct pci_dev *pdev, return PCI_ERS_RESULT_DISCONNECT; } + set_bit(ADF_STATUS_RESTARTING, &accel_dev->status); + if (accel_dev->hw_device->exit_arb) { + dev_dbg(&pdev->dev, "Disabling arbitration\n"); + accel_dev->hw_device->exit_arb(accel_dev); + } + adf_error_notifier(accel_dev); + adf_pf2vf_notify_fatal_error(accel_dev); + adf_dev_restarting_notify(accel_dev); + adf_pf2vf_notify_restarting(accel_dev); + adf_pf2vf_wait_for_restarting_complete(accel_dev); + pci_clear_master(pdev); + adf_dev_down(accel_dev, false); + return PCI_ERS_RESULT_NEED_RESET; } @@ -181,14 +194,25 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev, static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev) { struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + int res = 0; if (!accel_dev) { pr_err("QAT: Can't find acceleration device\n"); return PCI_ERS_RESULT_DISCONNECT; } - if (adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_SYNC)) + + if (!pdev->is_busmaster) + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + res = adf_dev_up(accel_dev, false); + if (res && res != -EALREADY) return PCI_ERS_RESULT_DISCONNECT; + adf_reenable_sriov(accel_dev); + adf_pf2vf_notify_restarted(accel_dev); + adf_dev_restarted_notify(accel_dev); + clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); return PCI_ERS_RESULT_RECOVERED; } -- Gitee From dc378d5c39c9ae648bb82d481cc7dd57788d9f5c Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 9 Feb 2024 13:42:07 +0100 Subject: [PATCH 0749/2138] crypto: qat - change SLAs cleanup flow at shutdown ANBZ: #8589 commit c2304e1a0b8051a60d4eb9c99a1c509d90380ae5 upstream. Intel-SIG: commit c2304e1a0b80 crypto: qat - change SLAs cleanup flow at shutdown Backport to support Intel QAT in-tree driver The implementation of the Rate Limiting (RL) feature includes the cleanup of all SLAs during device shutdown. For each SLA, the firmware is notified of the removal through an admin message, the data structures that take into account the budgets are updated and the memory is freed. However, this explicit cleanup is not necessary as (1) the device is reset, and the firmware state is lost and (2) all RL data structures are freed anyway. In addition, if the device is unresponsive, for example after a PCI AER error is detected, the admin interface might not be available. This might slow down the shutdown sequence and cause a timeout in the recovery flows which in turn makes the driver believe that the device is not recoverable. Fix by replacing the explicit SLAs removal with just a free of the SLA data structures. Fixes: d9fb8408376e ("crypto: qat - add rate limiting feature to qat_4xxx") Cc: Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_rl.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c index de1b214dba1f..d4f2db3c53d8 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_rl.c +++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c @@ -788,6 +788,24 @@ static void clear_sla(struct adf_rl *rl_data, struct rl_sla *sla) sla_type_arr[node_id] = NULL; } +static void free_all_sla(struct adf_accel_dev *accel_dev) +{ + struct adf_rl *rl_data = accel_dev->rate_limiting; + int sla_id; + + mutex_lock(&rl_data->rl_lock); + + for (sla_id = 0; sla_id < RL_NODES_CNT_MAX; sla_id++) { + if (!rl_data->sla[sla_id]) + continue; + + kfree(rl_data->sla[sla_id]); + rl_data->sla[sla_id] = NULL; + } + + mutex_unlock(&rl_data->rl_lock); +} + /** * add_update_sla() - handles the creation and the update of an SLA * @accel_dev: pointer to acceleration device structure @@ -1155,7 +1173,7 @@ void adf_rl_stop(struct adf_accel_dev *accel_dev) return; adf_sysfs_rl_rm(accel_dev); - adf_rl_remove_sla_all(accel_dev, true); + free_all_sla(accel_dev); } void adf_rl_exit(struct adf_accel_dev *accel_dev) -- Gitee From b39e60cab7224af2a5ad9b2da58f618307669923 Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Mon, 12 Feb 2024 13:05:09 +0000 Subject: [PATCH 0750/2138] Documentation: qat: fix auto_reset section ANBZ: #8589 commit 2ecd43413d7668d67b9b8a56f882aa1ea12b8a62 upstream. Intel-SIG: commit 2ecd43413d76 Documentation: qat: fix auto_reset section Backport to support Intel QAT in-tree driver Remove unneeded colon in the auto_reset section. This resolves the following errors when building the documentation: Documentation/ABI/testing/sysfs-driver-qat:146: ERROR: Unexpected indentation. Documentation/ABI/testing/sysfs-driver-qat:146: WARNING: Block quote ends without a blank line; unexpected unindent. Fixes: f5419a4239af ("crypto: qat - add auto reset on error") Reported-by: Stephen Rothwell Closes: https://lore.kernel.org/linux-kernel/20240212144830.70495d07@canb.auug.org.au/T/ Signed-off-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- Documentation/ABI/testing/sysfs-driver-qat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/ABI/testing/sysfs-driver-qat b/Documentation/ABI/testing/sysfs-driver-qat index 6778f1fea874..96020fb051c3 100644 --- a/Documentation/ABI/testing/sysfs-driver-qat +++ b/Documentation/ABI/testing/sysfs-driver-qat @@ -153,7 +153,7 @@ Description: (RW) Reports the current state of the autoreset feature Device auto reset is disabled by default. - The values are:: + The values are: * 1/Yy/on: auto reset enabled. If the device encounters an unrecoverable error, it will be reset automatically. -- Gitee From e444a42b432fc4acbef5ec5c75d8532d2f446c4e Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 13 Feb 2024 21:09:41 +0300 Subject: [PATCH 0751/2138] crypto: qat - uninitialized variable in adf_hb_error_inject_write() ANBZ: #8589 commit bcc06e1b3dadc76140203753a08979374c965ada upstream. Intel-SIG: commit bcc06e1b3dad crypto: qat - uninitialized variable in adf_hb_error_inject_write() Backport to support Intel QAT in-tree driver There are a few issues in this code. If *ppos is non-zero then the first part of the buffer is not initialized. We never initialize the last character of the buffer. The return is not checked so it's possible that none of the buffer is initialized. This is debugfs code which is root only and the impact of these bugs is very small. However, it's still worth fixing. To fix this: 1) Check that *ppos is zero. 2) Use copy_from_user() instead of simple_write_to_buffer(). 3) Explicitly add a NUL terminator. Fixes: e2b67859ab6e ("crypto: qat - add heartbeat error simulator") Signed-off-by: Dan Carpenter Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c index 5cd6c2d6f90a..cccdff24b48d 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c @@ -160,16 +160,17 @@ static ssize_t adf_hb_error_inject_write(struct file *file, size_t count, loff_t *ppos) { struct adf_accel_dev *accel_dev = file->private_data; - size_t written_chars; char buf[3]; int ret; /* last byte left as string termination */ - if (count != 2) + if (*ppos != 0 || count != 2) return -EINVAL; - written_chars = simple_write_to_buffer(buf, sizeof(buf) - 1, - ppos, user_buf, count); + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + buf[count] = '\0'; + if (buf[0] != '1') return -EINVAL; @@ -183,7 +184,7 @@ static ssize_t adf_hb_error_inject_write(struct file *file, dev_info(&GET_DEV(accel_dev), "Heartbeat error injection enabled\n"); - return written_chars; + return count; } static const struct file_operations adf_hb_error_inject_fops = { -- Gitee From 0a1bcbface20e055f66d9b8b3f63ef2695ced3ea Mon Sep 17 00:00:00 2001 From: Adam Guerin Date: Fri, 16 Feb 2024 15:19:55 +0000 Subject: [PATCH 0752/2138] crypto: qat - remove unused macros in qat_comp_alg.c ANBZ: #8589 commit dfff0e35fa5dd84ae75052ba129b0219d83e46dc upstream. Intel-SIG: commit dfff0e35fa5d crypto: qat - remove unused macros in qat_comp_alg.c Backport to support Intel QAT in-tree driver As a result of the removal of qat_zlib_deflate, some defines where not removed. Remove them. This is to fix the following warning when compiling the QAT driver using the clang compiler with CC=clang W=2: drivers/crypto/intel/qat/qat_common/qat_comp_algs.c:21:9: warning: macro is not used [-Wunused-macros] 21 | #define QAT_RFC_1950_CM_OFFSET 4 | ^ drivers/crypto/intel/qat/qat_common/qat_comp_algs.c:16:9: warning: macro is not used [-Wunused-macros] 16 | #define QAT_RFC_1950_HDR_SIZE 2 | ^ drivers/crypto/intel/qat/qat_common/qat_comp_algs.c:17:9: warning: macro is not used [-Wunused-macros] 17 | #define QAT_RFC_1950_FOOTER_SIZE 4 | ^ drivers/crypto/intel/qat/qat_common/qat_comp_algs.c:22:9: warning: macro is not used [-Wunused-macros] 22 | #define QAT_RFC_1950_DICT_MASK 0x20 | ^ drivers/crypto/intel/qat/qat_common/qat_comp_algs.c:18:9: warning: macro is not used [-Wunused-macros] 18 | #define QAT_RFC_1950_CM_DEFLATE 8 | ^ drivers/crypto/intel/qat/qat_common/qat_comp_algs.c:20:9: warning: macro is not used [-Wunused-macros] 20 | #define QAT_RFC_1950_CM_MASK 0x0f | ^ drivers/crypto/intel/qat/qat_common/qat_comp_algs.c:23:9: warning: macro is not used [-Wunused-macros] 23 | #define QAT_RFC_1950_COMP_HDR 0x785e | ^ drivers/crypto/intel/qat/qat_common/qat_comp_algs.c:19:9: warning: macro is not used [-Wunused-macros] 19 | #define QAT_RFC_1950_CM_DEFLATE_CINFO_32K 7 | ^ Fixes: e9dd20e0e5f6 ("crypto: qat - Remove zlib-deflate") Signed-off-by: Adam Guerin Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/qat_comp_algs.c | 9 --------- 1 file changed, 9 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c index bf8c0ee62917..2ba4aa22e092 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c +++ b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c @@ -13,15 +13,6 @@ #include "qat_compression.h" #include "qat_algs_send.h" -#define QAT_RFC_1950_HDR_SIZE 2 -#define QAT_RFC_1950_FOOTER_SIZE 4 -#define QAT_RFC_1950_CM_DEFLATE 8 -#define QAT_RFC_1950_CM_DEFLATE_CINFO_32K 7 -#define QAT_RFC_1950_CM_MASK 0x0f -#define QAT_RFC_1950_CM_OFFSET 4 -#define QAT_RFC_1950_DICT_MASK 0x20 -#define QAT_RFC_1950_COMP_HDR 0x785e - static DEFINE_MUTEX(algs_lock); static unsigned int active_devs; -- Gitee From c01c9862c95f8fa162663c50d5cebda8712434c4 Mon Sep 17 00:00:00 2001 From: Adam Guerin Date: Fri, 16 Feb 2024 15:19:56 +0000 Subject: [PATCH 0753/2138] crypto: qat - removed unused macro in adf_cnv_dbgfs.c ANBZ: #8589 commit 9a5dcada14d5e027856a1bc38443e54111438da6 upstream. Intel-SIG: commit 9a5dcada14d5 crypto: qat - removed unused macro in adf_cnv_dbgfs.c Backport to support Intel QAT in-tree driver This macro was added but never used, remove it. This is to fix the following warning when compiling the QAT driver using the clang compiler with CC=clang W=2: drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c:19:9: warning: macro is not used [-Wunused-macros] 19 | #define CNV_SLICE_ERR_MASK GENMASK(7, 0) | ^ Fixes: d807f0240c71 ("crypto: qat - add cnv_errors debugfs file") Signed-off-by: Adam Guerin Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c index 07119c487da0..627953a72d47 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c @@ -16,7 +16,6 @@ #define CNV_ERR_INFO_MASK GENMASK(11, 0) #define CNV_ERR_TYPE_MASK GENMASK(15, 12) -#define CNV_SLICE_ERR_MASK GENMASK(7, 0) #define CNV_SLICE_ERR_SIGN_BIT_INDEX 7 #define CNV_DELTA_ERR_SIGN_BIT_INDEX 11 -- Gitee From 89be98bbc80156e9f607f0726b39ff0c0716a329 Mon Sep 17 00:00:00 2001 From: Adam Guerin Date: Fri, 16 Feb 2024 15:19:58 +0000 Subject: [PATCH 0754/2138] crypto: qat - remove double initialization of value ANBZ: #8589 commit a66cf93ab33853f17b8cc33a99263dd0a383a1a1 upstream. Intel-SIG: commit a66cf93ab338 crypto: qat - remove double initialization of value Backport to support Intel QAT in-tree driver Remove double initialization of the reg variable. This is to fix the following warning when compiling the QAT driver using clang scan-build: drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c:1010:6: warning: Value stored to 'reg' during its initialization is never read [deadcode.DeadStores] 1010 | u32 reg = ADF_CSR_RD(csr, ADF_GEN4_SSMCPPERR); | ^~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c:1109:6: warning: Value stored to 'reg' during its initialization is never read [deadcode.DeadStores] 1109 | u32 reg = ADF_CSR_RD(csr, ADF_GEN4_SER_ERR_SSMSH); | ^~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Fixes: 99b1c9826e48 ("crypto: qat - count QAT GEN4 errors") Signed-off-by: Adam Guerin Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c index 048c24607939..2dd3772bf58a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c @@ -1007,8 +1007,7 @@ static bool adf_handle_spppar_err(struct adf_accel_dev *accel_dev, static bool adf_handle_ssmcpppar_err(struct adf_accel_dev *accel_dev, void __iomem *csr, u32 iastatssm) { - u32 reg = ADF_CSR_RD(csr, ADF_GEN4_SSMCPPERR); - u32 bits_num = BITS_PER_REG(reg); + u32 reg, bits_num = BITS_PER_REG(reg); bool reset_required = false; unsigned long errs_bits; u32 bit_iterator; @@ -1106,8 +1105,7 @@ static bool adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev, static bool adf_handle_ser_err_ssmsh(struct adf_accel_dev *accel_dev, void __iomem *csr, u32 iastatssm) { - u32 reg = ADF_CSR_RD(csr, ADF_GEN4_SER_ERR_SSMSH); - u32 bits_num = BITS_PER_REG(reg); + u32 reg, bits_num = BITS_PER_REG(reg); bool reset_required = false; unsigned long errs_bits; u32 bit_iterator; -- Gitee From bd3f046339189d1a5f989eb2cb73dc1a2df112b0 Mon Sep 17 00:00:00 2001 From: Adam Guerin Date: Fri, 16 Feb 2024 15:19:59 +0000 Subject: [PATCH 0755/2138] crypto: qat - remove unnecessary description from comment ANBZ: #8589 commit ff391345141e727320ca906e6928c6a1f14e7e37 upstream. Intel-SIG: commit ff391345141e crypto: qat - remove unnecessary description from comment Backport to support Intel QAT in-tree driver Remove extra description from comments as it is not required. This is to fix the following warning when compiling the QAT driver using the clang compiler with CC=clang W=2: drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c:65: warning: contents before sections drivers/crypto/intel/qat/qat_common/adf_isr.c:380: warning: contents before sections drivers/crypto/intel/qat/qat_common/adf_vf_isr.c:298: warning: contents before sections Signed-off-by: Adam Guerin Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c | 4 ++-- drivers/crypto/intel/qat/qat_common/adf_isr.c | 2 -- drivers/crypto/intel/qat/qat_common/adf_vf_isr.c | 2 -- 3 files changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c index 86ee36feefad..f07b748795f7 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c @@ -60,10 +60,10 @@ static int adf_get_vf_real_id(u32 fake) /** * adf_clean_vf_map() - Cleans VF id mapings - * - * Function cleans internal ids for virtual functions. * @vf: flag indicating whether mappings is cleaned * for vfs only or for vfs and pfs + * + * Function cleans internal ids for virtual functions. */ void adf_clean_vf_map(bool vf) { diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c index 020d213f4c99..cae1aee5479a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_isr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c @@ -380,8 +380,6 @@ EXPORT_SYMBOL_GPL(adf_isr_resource_alloc); /** * adf_init_misc_wq() - Init misc workqueue * - * Function init workqueue 'qat_misc_wq' for general purpose. - * * Return: 0 on success, error code otherwise. */ int __init adf_init_misc_wq(void) diff --git a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c index b05c3957a160..cdbb2d687b1b 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c @@ -293,8 +293,6 @@ EXPORT_SYMBOL_GPL(adf_flush_vf_wq); /** * adf_init_vf_wq() - Init workqueue for VF * - * Function init workqueue 'adf_vf_stop_wq' for VF. - * * Return: 0 on success, error code otherwise. */ int __init adf_init_vf_wq(void) -- Gitee From 258047ba431770ab43aff01ce6dc28df78c01d4e Mon Sep 17 00:00:00 2001 From: Adam Guerin Date: Fri, 16 Feb 2024 15:20:00 +0000 Subject: [PATCH 0756/2138] crypto: qat - fix comment structure ANBZ: #8589 commit bca79b9f5639b2fd4692904bce696291336e0246 upstream. Intel-SIG: commit bca79b9f5639 crypto: qat - fix comment structure Backport to support Intel QAT in-tree driver Move comment description to the same line as the function name. This is to fix the following warning when compiling the QAT driver using the clang compiler with CC=clang W=2: drivers/crypto/intel/qat/qat_common/qat_crypto.c:108: warning: missing initial short description on line: * qat_crypto_vf_dev_config() Signed-off-by: Adam Guerin Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_common/qat_crypto.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/qat_crypto.c b/drivers/crypto/intel/qat/qat_common/qat_crypto.c index 40c8e74d1cf9..101c6ea41673 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_crypto.c +++ b/drivers/crypto/intel/qat/qat_common/qat_crypto.c @@ -105,8 +105,8 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node) } /** - * qat_crypto_vf_dev_config() - * create dev config required to create crypto inst. + * qat_crypto_vf_dev_config() - create dev config required to create + * crypto inst. * * @accel_dev: Pointer to acceleration device. * -- Gitee From b2863d049ddb49cbb4c32fa694ff42e9621bfabd Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 16 Feb 2024 18:21:55 +0100 Subject: [PATCH 0757/2138] crypto: qat - fix ring to service map for dcc in 420xx ANBZ: #8589 commit a20a6060e0dd57fecaf55487985aef28bd08c6bf upstream. Intel-SIG: commit a20a6060e0dd crypto: qat - fix ring to service map for dcc in 420xx Backport to support Intel QAT in-tree driver If a device is configured for data compression chaining (dcc), half of the engines are loaded with the symmetric crypto image and the rest are loaded with the compression image. However, in such configuration all rings can handle compression requests. Fix the ring to service mapping so that when a device is configured for dcc, the ring to service mapping reports that all rings in a bank can be used for compression. Fixes: fcf60f4bcf54 ("crypto: qat - add support for 420xx devices") Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c index a87d29ae724f..7909b51e97c3 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -372,6 +372,13 @@ static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev) if (!fw_config) return 0; + /* If dcc, all rings handle compression requests */ + if (adf_get_service_enabled(accel_dev) == SVC_DCC) { + for (i = 0; i < RP_GROUP_COUNT; i++) + rps[i] = COMP; + goto set_mask; + } + for (i = 0; i < RP_GROUP_COUNT; i++) { switch (fw_config[i].ae_mask) { case ADF_AE_GROUP_0: @@ -400,6 +407,7 @@ static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev) } } +set_mask: ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT | rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT | rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT | -- Gitee From f10e8e25dc88cd0879be390ab9f38d3ee7a0ca5b Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Fri, 16 Feb 2024 18:21:56 +0100 Subject: [PATCH 0758/2138] crypto: qat - make ring to service map common for QAT GEN4 ANBZ: #8589 commit ed3d95fe788dec7c23bb20b41f8af47cbce04715 upstream. Intel-SIG: commit ed3d95fe788d crypto: qat - make ring to service map common for QAT GEN4 Backport to support Intel QAT in-tree driver The function get_ring_to_svc_map() is present in both 420xx and 4xxx drivers. Rework the logic to make it generic to GEN4 devices and move it to qat_common/adf_gen4_hw_data.c. Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- .../intel/qat/qat_420xx/adf_420xx_hw_data.c | 72 +++++-------------- .../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 72 +++++-------------- .../intel/qat/qat_common/adf_accel_devices.h | 1 + .../intel/qat/qat_common/adf_gen4_hw_data.c | 56 +++++++++++++++ .../intel/qat/qat_common/adf_gen4_hw_data.h | 1 + 5 files changed, 90 insertions(+), 112 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c index 7909b51e97c3..1102c47f8293 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -361,61 +361,6 @@ static u32 get_ena_thd_mask(struct adf_accel_dev *accel_dev, u32 obj_num) } } -static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev) -{ - enum adf_cfg_service_type rps[RP_GROUP_COUNT] = { }; - const struct adf_fw_config *fw_config; - u16 ring_to_svc_map; - int i, j; - - fw_config = get_fw_config(accel_dev); - if (!fw_config) - return 0; - - /* If dcc, all rings handle compression requests */ - if (adf_get_service_enabled(accel_dev) == SVC_DCC) { - for (i = 0; i < RP_GROUP_COUNT; i++) - rps[i] = COMP; - goto set_mask; - } - - for (i = 0; i < RP_GROUP_COUNT; i++) { - switch (fw_config[i].ae_mask) { - case ADF_AE_GROUP_0: - j = RP_GROUP_0; - break; - case ADF_AE_GROUP_1: - j = RP_GROUP_1; - break; - default: - return 0; - } - - switch (fw_config[i].obj) { - case ADF_FW_SYM_OBJ: - rps[j] = SYM; - break; - case ADF_FW_ASYM_OBJ: - rps[j] = ASYM; - break; - case ADF_FW_DC_OBJ: - rps[j] = COMP; - break; - default: - rps[j] = 0; - break; - } - } - -set_mask: - ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT | - rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT | - rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT | - rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT; - - return ring_to_svc_map; -} - static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num, const char * const fw_objs[], int num_objs) { @@ -441,6 +386,20 @@ static const char *uof_get_name_420xx(struct adf_accel_dev *accel_dev, u32 obj_n return uof_get_name(accel_dev, obj_num, adf_420xx_fw_objs, num_fw_objs); } +static int uof_get_obj_type(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + const struct adf_fw_config *fw_config; + + if (obj_num >= uof_get_num_objs(accel_dev)) + return -EINVAL; + + fw_config = get_fw_config(accel_dev); + if (!fw_config) + return -EINVAL; + + return fw_config[obj_num].obj; +} + static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num) { const struct adf_fw_config *fw_config; @@ -504,12 +463,13 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->fw_mmp_name = ADF_420XX_MMP; hw_data->uof_get_name = uof_get_name_420xx; hw_data->uof_get_num_objs = uof_get_num_objs; + hw_data->uof_get_obj_type = uof_get_obj_type; hw_data->uof_get_ae_mask = uof_get_ae_mask; hw_data->get_rp_group = get_rp_group; hw_data->get_ena_thd_mask = get_ena_thd_mask; hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable; hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer; - hw_data->get_ring_to_svc_map = get_ring_to_svc_map; + hw_data->get_ring_to_svc_map = adf_gen4_get_ring_to_svc_map; hw_data->disable_iov = adf_disable_sriov; hw_data->ring_pair_reset = adf_gen4_ring_pair_reset; hw_data->enable_pm = adf_gen4_enable_pm; diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index 824cd7186320..a9e389077db2 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -320,61 +320,6 @@ static u32 get_ena_thd_mask_401xx(struct adf_accel_dev *accel_dev, u32 obj_num) } } -static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev) -{ - enum adf_cfg_service_type rps[RP_GROUP_COUNT]; - const struct adf_fw_config *fw_config; - u16 ring_to_svc_map; - int i, j; - - fw_config = get_fw_config(accel_dev); - if (!fw_config) - return 0; - - /* If dcc, all rings handle compression requests */ - if (adf_get_service_enabled(accel_dev) == SVC_DCC) { - for (i = 0; i < RP_GROUP_COUNT; i++) - rps[i] = COMP; - goto set_mask; - } - - for (i = 0; i < RP_GROUP_COUNT; i++) { - switch (fw_config[i].ae_mask) { - case ADF_AE_GROUP_0: - j = RP_GROUP_0; - break; - case ADF_AE_GROUP_1: - j = RP_GROUP_1; - break; - default: - return 0; - } - - switch (fw_config[i].obj) { - case ADF_FW_SYM_OBJ: - rps[j] = SYM; - break; - case ADF_FW_ASYM_OBJ: - rps[j] = ASYM; - break; - case ADF_FW_DC_OBJ: - rps[j] = COMP; - break; - default: - rps[j] = 0; - break; - } - } - -set_mask: - ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT | - rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT | - rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT | - rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT; - - return ring_to_svc_map; -} - static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num, const char * const fw_objs[], int num_objs) { @@ -407,6 +352,20 @@ static const char *uof_get_name_402xx(struct adf_accel_dev *accel_dev, u32 obj_n return uof_get_name(accel_dev, obj_num, adf_402xx_fw_objs, num_fw_objs); } +static int uof_get_obj_type(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + const struct adf_fw_config *fw_config; + + if (obj_num >= uof_get_num_objs(accel_dev)) + return -EINVAL; + + fw_config = get_fw_config(accel_dev); + if (!fw_config) + return -EINVAL; + + return fw_config[obj_num].obj; +} + static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num) { const struct adf_fw_config *fw_config; @@ -487,11 +446,12 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) break; } hw_data->uof_get_num_objs = uof_get_num_objs; + hw_data->uof_get_obj_type = uof_get_obj_type; hw_data->uof_get_ae_mask = uof_get_ae_mask; hw_data->get_rp_group = get_rp_group; hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable; hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer; - hw_data->get_ring_to_svc_map = get_ring_to_svc_map; + hw_data->get_ring_to_svc_map = adf_gen4_get_ring_to_svc_map; hw_data->disable_iov = adf_disable_sriov; hw_data->ring_pair_reset = adf_gen4_ring_pair_reset; hw_data->enable_pm = adf_gen4_enable_pm; diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 0f26aa976c8c..08658c3a01e9 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -248,6 +248,7 @@ struct adf_hw_device_data { void (*set_msix_rttable)(struct adf_accel_dev *accel_dev); const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num); u32 (*uof_get_num_objs)(struct adf_accel_dev *accel_dev); + int (*uof_get_obj_type)(struct adf_accel_dev *accel_dev, u32 obj_num); u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num); int (*get_rp_group)(struct adf_accel_dev *accel_dev, u32 ae_mask); u32 (*get_ena_thd_mask)(struct adf_accel_dev *accel_dev, u32 obj_num); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c index f752653ccb47..d28e1921940a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c @@ -4,6 +4,7 @@ #include "adf_accel_devices.h" #include "adf_cfg_services.h" #include "adf_common_drv.h" +#include "adf_fw_config.h" #include "adf_gen4_hw_data.h" #include "adf_gen4_pm.h" @@ -433,3 +434,58 @@ int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev) return 0; } EXPORT_SYMBOL_GPL(adf_gen4_init_thd2arb_map); + +u16 adf_gen4_get_ring_to_svc_map(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + enum adf_cfg_service_type rps[RP_GROUP_COUNT] = { }; + unsigned int ae_mask, start_id, worker_obj_cnt, i; + u16 ring_to_svc_map; + int rp_group; + + if (!hw_data->get_rp_group || !hw_data->uof_get_ae_mask || + !hw_data->uof_get_obj_type || !hw_data->uof_get_num_objs) + return 0; + + /* If dcc, all rings handle compression requests */ + if (adf_get_service_enabled(accel_dev) == SVC_DCC) { + for (i = 0; i < RP_GROUP_COUNT; i++) + rps[i] = COMP; + goto set_mask; + } + + worker_obj_cnt = hw_data->uof_get_num_objs(accel_dev) - + ADF_GEN4_ADMIN_ACCELENGINES; + start_id = worker_obj_cnt - RP_GROUP_COUNT; + + for (i = start_id; i < worker_obj_cnt; i++) { + ae_mask = hw_data->uof_get_ae_mask(accel_dev, i); + rp_group = hw_data->get_rp_group(accel_dev, ae_mask); + if (rp_group >= RP_GROUP_COUNT || rp_group < RP_GROUP_0) + return 0; + + switch (hw_data->uof_get_obj_type(accel_dev, i)) { + case ADF_FW_SYM_OBJ: + rps[rp_group] = SYM; + break; + case ADF_FW_ASYM_OBJ: + rps[rp_group] = ASYM; + break; + case ADF_FW_DC_OBJ: + rps[rp_group] = COMP; + break; + default: + rps[rp_group] = 0; + break; + } + } + +set_mask: + ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT | + rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT | + rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT | + rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT; + + return ring_to_svc_map; +} +EXPORT_SYMBOL_GPL(adf_gen4_get_ring_to_svc_map); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h index 7d8a774cadc8..c6e80df5a85a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h @@ -235,5 +235,6 @@ int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number); void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev); void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev); +u16 adf_gen4_get_ring_to_svc_map(struct adf_accel_dev *accel_dev); #endif -- Gitee From d232dad5d628f03ea3f660bb80b1cd579adc22ce Mon Sep 17 00:00:00 2001 From: Aichun Shi Date: Tue, 26 Mar 2024 15:48:22 +0800 Subject: [PATCH 0759/2138] x86: configs: Add Intel QuickAssist Technology(QAT) kernel config ANBZ: #8589 Intel-SIG: no upstream x86: configs: Add Intel QuickAssist Technology(QAT) kernel config Backport to support Intel QAT in-tree driver Signed-off-by: Aichun Shi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2954 --- arch/x86/configs/anolis-debug_defconfig | 4 +++- arch/x86/configs/anolis_defconfig | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index e9f3e126b433..707eaf6a15ef 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -7436,10 +7436,12 @@ CONFIG_CRYPTO_DEV_QAT=m CONFIG_CRYPTO_DEV_QAT_DH895xCC=m CONFIG_CRYPTO_DEV_QAT_C3XXX=m CONFIG_CRYPTO_DEV_QAT_C62X=m -# CONFIG_CRYPTO_DEV_QAT_4XXX is not set +CONFIG_CRYPTO_DEV_QAT_4XXX=m +# CONFIG_CRYPTO_DEV_QAT_420XX is not set CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m CONFIG_CRYPTO_DEV_QAT_C62XVF=m +# CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION is not set CONFIG_CRYPTO_DEV_CHELSIO=m # CONFIG_CRYPTO_DEV_VIRTIO is not set # CONFIG_CRYPTO_DEV_SAFEXCEL is not set diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index caace118a265..ad45ec0bbcbf 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -7427,10 +7427,12 @@ CONFIG_CRYPTO_DEV_QAT=m CONFIG_CRYPTO_DEV_QAT_DH895xCC=m CONFIG_CRYPTO_DEV_QAT_C3XXX=m CONFIG_CRYPTO_DEV_QAT_C62X=m -# CONFIG_CRYPTO_DEV_QAT_4XXX is not set +CONFIG_CRYPTO_DEV_QAT_4XXX=m +# CONFIG_CRYPTO_DEV_QAT_420XX is not set CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m CONFIG_CRYPTO_DEV_QAT_C62XVF=m +# CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION is not set CONFIG_CRYPTO_DEV_CHELSIO=m # CONFIG_CRYPTO_DEV_VIRTIO is not set # CONFIG_CRYPTO_DEV_SAFEXCEL is not set -- Gitee From ea81c2bdfa17127ed076ada81e5aa08fa104dc5c Mon Sep 17 00:00:00 2001 From: Binbin Wu Date: Wed, 13 Sep 2023 20:42:12 +0800 Subject: [PATCH 0760/2138] KVM: x86: Consolidate flags for __linearize() ANBZ: #8355 commit 7b0dd9430cf0c1ae19645d2a6608a5fb57faffe4 upstream. Consolidate @write and @fetch of __linearize() into a set of flags so that additional flags can be added without needing more/new boolean parameters, to precisely identify the access type. No functional change intended. Intel-SIG: commit 7b0dd9430cf0 KVM: x86: Consolidate flags for __linearize() Backport KVM Linear Address Masking (LAM) support. Signed-off-by: Binbin Wu Reviewed-by: Chao Gao Acked-by: Kai Huang Tested-by: Xuelian Guo Link: https://lore.kernel.org/r/20230913124227.12574-2-binbin.wu@linux.intel.com Signed-off-by: Sean Christopherson [ Zhiquan Li: amend commit log ] Signed-off-by: Zhiquan Li Reviewed-by: Xuchun Shang Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2805 --- arch/x86/kvm/emulate.c | 21 +++++++++++---------- arch/x86/kvm/kvm_emulate.h | 4 ++++ 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 2673cd5c46cb..87ee1802166a 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -687,8 +687,8 @@ static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size) static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, unsigned *max_size, unsigned size, - bool write, bool fetch, - enum x86emul_mode mode, ulong *linear) + enum x86emul_mode mode, ulong *linear, + unsigned int flags) { struct desc_struct desc; bool usable; @@ -717,11 +717,11 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, if (!usable) goto bad; /* code segment in protected mode or read-only data segment */ - if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8)) - || !(desc.type & 2)) && write) + if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8)) || !(desc.type & 2)) && + (flags & X86EMUL_F_WRITE)) goto bad; /* unreadable code segment */ - if (!fetch && (desc.type & 8) && !(desc.type & 2)) + if (!(flags & X86EMUL_F_FETCH) && (desc.type & 8) && !(desc.type & 2)) goto bad; lim = desc_limit_scaled(&desc); if (!(desc.type & 8) && (desc.type & 4)) { @@ -757,8 +757,8 @@ static int linearize(struct x86_emulate_ctxt *ctxt, ulong *linear) { unsigned max_size; - return __linearize(ctxt, addr, &max_size, size, write, false, - ctxt->mode, linear); + return __linearize(ctxt, addr, &max_size, size, ctxt->mode, linear, + write ? X86EMUL_F_WRITE : 0); } static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst) @@ -771,7 +771,8 @@ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst) if (ctxt->op_bytes != sizeof(unsigned long)) addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1); - rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, &linear); + rc = __linearize(ctxt, addr, &max_size, 1, ctxt->mode, &linear, + X86EMUL_F_FETCH); if (rc == X86EMUL_CONTINUE) ctxt->_eip = addr.ea; return rc; @@ -907,8 +908,8 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) * boundary check itself. Instead, we use max_size to check * against op_size. */ - rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode, - &linear); + rc = __linearize(ctxt, addr, &max_size, 0, ctxt->mode, &linear, + X86EMUL_F_FETCH); if (unlikely(rc != X86EMUL_CONTINUE)) return rc; diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h index be7aeb9b8ea3..e24c8ac7b930 100644 --- a/arch/x86/kvm/kvm_emulate.h +++ b/arch/x86/kvm/kvm_emulate.h @@ -88,6 +88,10 @@ struct x86_instruction_info { #define X86EMUL_IO_NEEDED 5 /* IO is needed to complete emulation */ #define X86EMUL_INTERCEPTED 6 /* Intercepted by nested VMCB/VMCS */ +/* x86-specific emulation flags */ +#define X86EMUL_F_WRITE BIT(0) +#define X86EMUL_F_FETCH BIT(1) + struct x86_emulate_ops { void (*vm_bugged)(struct x86_emulate_ctxt *ctxt); /* -- Gitee From 8cf4bea007af39c2c6a5e3efe878e732ef3412b7 Mon Sep 17 00:00:00 2001 From: Binbin Wu Date: Wed, 13 Sep 2023 20:42:14 +0800 Subject: [PATCH 0761/2138] KVM: x86: Add an emulation flag for implicit system access ANBZ: #8355 commit 3963c52df42231f72277cd138994ac94f1183d2b upstream. Add an emulation flag X86EMUL_F_IMPLICIT to identify implicit system access in instruction emulation. Don't bother wiring up any usage at this point, as Linear Address Space Separation (LASS) will be the first "real" consumer of the flag and LASS support will require dedicated hooks, i.e. there aren't any existing calls where passing X86EMUL_F_IMPLICIT is meaningful. Add the IMPLICIT flag even though there's no imminent usage so that Linear Address Masking (LAM) support can reference the flag to document that addresses for implicit accesses aren't untagged. Intel-SIG: commit 3963c52df422 KVM: x86: Add an emulation flag for implicit system access Backport KVM Linear Address Masking (LAM) support. Signed-off-by: Binbin Wu Tested-by: Xuelian Guo Link: https://lore.kernel.org/r/20230913124227.12574-4-binbin.wu@linux.intel.com Signed-off-by: Sean Christopherson [ Zhiquan Li: amend commit log ] Signed-off-by: Zhiquan Li Reviewed-by: Xuchun Shang Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2805 --- arch/x86/kvm/kvm_emulate.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h index e24c8ac7b930..65fc7ef5ca3d 100644 --- a/arch/x86/kvm/kvm_emulate.h +++ b/arch/x86/kvm/kvm_emulate.h @@ -91,6 +91,7 @@ struct x86_instruction_info { /* x86-specific emulation flags */ #define X86EMUL_F_WRITE BIT(0) #define X86EMUL_F_FETCH BIT(1) +#define X86EMUL_F_IMPLICIT BIT(2) struct x86_emulate_ops { void (*vm_bugged)(struct x86_emulate_ctxt *ctxt); -- Gitee From d2ce045b2262bb01c116db8a8e9eaea285ace631 Mon Sep 17 00:00:00 2001 From: Binbin Wu Date: Wed, 13 Sep 2023 20:42:15 +0800 Subject: [PATCH 0762/2138] KVM: x86: Add X86EMUL_F_INVLPG and pass it in em_invlpg() ANBZ: #8355 commit 538ac9a92d669c4ccfc64739a32efab2793cea1d upstream. Add an emulation flag X86EMUL_F_INVLPG, which is used to identify an instruction that does TLB invalidation without true memory access. Only invlpg & invlpga implemented in emulator belong to this kind. invlpga doesn't need additional information for emulation. Just pass the flag to em_invlpg(). Linear Address Masking (LAM) and Linear Address Space Separation (LASS) don't apply to addresses that are inputs to TLB invalidation. The flag will be consumed to support LAM/LASS virtualization. Intel-SIG: commit 538ac9a92d66 KVM: x86: Add X86EMUL_F_INVLPG and pass it in em_invlpg() Backport KVM Linear Address Masking (LAM) support. Signed-off-by: Binbin Wu Tested-by: Xuelian Guo Link: https://lore.kernel.org/r/20230913124227.12574-5-binbin.wu@linux.intel.com Signed-off-by: Sean Christopherson [ Zhiquan Li: amend commit log ] Signed-off-by: Zhiquan Li Reviewed-by: Xuchun Shang Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2805 --- arch/x86/kvm/emulate.c | 4 +++- arch/x86/kvm/kvm_emulate.h | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 87ee1802166a..ceec8c5f9687 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -3440,8 +3440,10 @@ static int em_invlpg(struct x86_emulate_ctxt *ctxt) { int rc; ulong linear; + unsigned int max_size; - rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear); + rc = __linearize(ctxt, ctxt->src.addr.mem, &max_size, 1, ctxt->mode, + &linear, X86EMUL_F_INVLPG); if (rc == X86EMUL_CONTINUE) ctxt->ops->invlpg(ctxt, linear); /* Disable writeback. */ diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h index 65fc7ef5ca3d..8bd9b23543cc 100644 --- a/arch/x86/kvm/kvm_emulate.h +++ b/arch/x86/kvm/kvm_emulate.h @@ -92,6 +92,7 @@ struct x86_instruction_info { #define X86EMUL_F_WRITE BIT(0) #define X86EMUL_F_FETCH BIT(1) #define X86EMUL_F_IMPLICIT BIT(2) +#define X86EMUL_F_INVLPG BIT(3) struct x86_emulate_ops { void (*vm_bugged)(struct x86_emulate_ctxt *ctxt); -- Gitee From 40c6ebd0acb708af7b8e79187c80f3483af21a2b Mon Sep 17 00:00:00 2001 From: Binbin Wu Date: Wed, 13 Sep 2023 20:42:16 +0800 Subject: [PATCH 0763/2138] KVM: x86/mmu: Drop non-PA bits when getting GFN for guest's PGD ANBZ: #8355 commit a130066f74008858ac425b7497d231742474a0ea upstream. Drop non-PA bits when getting GFN for guest's PGD with the maximum theoretical mask for guest MAXPHYADDR. Do it unconditionally because it's harmless for 32-bit guests, querying 64-bit mode would be more expensive, and for EPT the mask isn't tied to guest mode. Using PT_BASE_ADDR_MASK would be technically wrong (PAE paging has 64-bit elements _except_ for CR3, which has only 32 valid bits), it wouldn't matter in practice though. Opportunistically use GENMASK_ULL() to define __PT_BASE_ADDR_MASK. Intel-SIG: commit a130066f7400 KVM: x86/mmu: Drop non-PA bits when getting GFN for guest's PGD Backport KVM Linear Address Masking (LAM) support. Signed-off-by: Binbin Wu Tested-by: Xuelian Guo Link: https://lore.kernel.org/r/20230913124227.12574-6-binbin.wu@linux.intel.com Signed-off-by: Sean Christopherson [ Zhiquan Li: amend commit log ] Signed-off-by: Zhiquan Li Reviewed-by: Xuchun Shang Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2805 --- arch/x86/kvm/mmu/mmu.c | 2 +- arch/x86/kvm/mmu/mmu_internal.h | 1 + arch/x86/kvm/mmu/paging_tmpl.h | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index ff85526a9d48..3517d7763fbc 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3774,7 +3774,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) hpa_t root; root_pgd = kvm_mmu_get_guest_pgd(vcpu, mmu); - root_gfn = root_pgd >> PAGE_SHIFT; + root_gfn = (root_pgd & __PT_BASE_ADDR_MASK) >> PAGE_SHIFT; if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) { mmu->root.hpa = kvm_mmu_get_dummy_root(); diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index decc1f153669..68f8564d85a9 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -13,6 +13,7 @@ #endif /* Page table builder macros common to shadow (host) PTEs and guest PTEs. */ +#define __PT_BASE_ADDR_MASK GENMASK_ULL(51, 12) #define __PT_LEVEL_SHIFT(level, bits_per_level) \ (PAGE_SHIFT + ((level) - 1) * (bits_per_level)) #define __PT_INDEX(address, level, bits_per_level) \ diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index c6b2c52aceac..ab0ed5d66e0c 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -62,7 +62,7 @@ #endif /* Common logic, but per-type values. These also need to be undefined. */ -#define PT_BASE_ADDR_MASK ((pt_element_t)(((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))) +#define PT_BASE_ADDR_MASK ((pt_element_t)__PT_BASE_ADDR_MASK) #define PT_LVL_ADDR_MASK(lvl) __PT_LVL_ADDR_MASK(PT_BASE_ADDR_MASK, lvl, PT_LEVEL_BITS) #define PT_LVL_OFFSET_MASK(lvl) __PT_LVL_OFFSET_MASK(PT_BASE_ADDR_MASK, lvl, PT_LEVEL_BITS) #define PT_INDEX(addr, lvl) __PT_INDEX(addr, lvl, PT_LEVEL_BITS) -- Gitee From 3f84de2ceea8d66417b924bcf8701c4f0563eb9c Mon Sep 17 00:00:00 2001 From: Binbin Wu Date: Wed, 13 Sep 2023 20:42:17 +0800 Subject: [PATCH 0764/2138] KVM: x86: Add & use kvm_vcpu_is_legal_cr3() to check CR3's legality ANBZ: #8355 commit 2c49db455ee27c72a680c9e4fad1c12433902ee3 upstream. Add and use kvm_vcpu_is_legal_cr3() to check CR3's legality to provide a clear distinction between CR3 and GPA checks. This will allow exempting bits from kvm_vcpu_is_legal_cr3() without affecting general GPA checks, e.g. for upcoming features that will use high bits in CR3 for feature enabling. No functional change intended. Intel-SIG: commit 2c49db455ee2 KVM: x86: Add & use kvm_vcpu_is_legal_cr3() to check CR3's legality Backport KVM Linear Address Masking (LAM) support. Signed-off-by: Binbin Wu Tested-by: Xuelian Guo Link: https://lore.kernel.org/r/20230913124227.12574-7-binbin.wu@linux.intel.com Signed-off-by: Sean Christopherson [ Zhiquan Li: amend commit log ] Signed-off-by: Zhiquan Li Reviewed-by: Xuchun Shang Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2805 --- arch/x86/kvm/cpuid.h | 5 +++++ arch/x86/kvm/svm/nested.c | 4 ++-- arch/x86/kvm/vmx/nested.c | 4 ++-- arch/x86/kvm/x86.c | 4 ++-- 4 files changed, 11 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index 110dae2b0e49..90de5ef1edff 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -289,4 +289,9 @@ static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu, vcpu->arch.governed_features.enabled); } +static inline bool kvm_vcpu_is_legal_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) +{ + return kvm_vcpu_is_legal_gpa(vcpu, cr3); +} + #endif diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index acf22bd99efc..634edce60d56 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -300,7 +300,7 @@ static bool __nested_vmcb_check_save(struct kvm_vcpu *vcpu, if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) { if (CC(!(save->cr4 & X86_CR4_PAE)) || CC(!(save->cr0 & X86_CR0_PE)) || - CC(kvm_vcpu_is_illegal_gpa(vcpu, save->cr3))) + CC(!kvm_vcpu_is_legal_cr3(vcpu, save->cr3))) return false; } @@ -509,7 +509,7 @@ static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu) static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_npt, bool reload_pdptrs) { - if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) + if (CC(!kvm_vcpu_is_legal_cr3(vcpu, cr3))) return -EINVAL; if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) && diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index d3e346a574f1..b2eb25e637a0 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -1086,7 +1086,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept, bool reload_pdptrs, enum vm_entry_failure_code *entry_failure_code) { - if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) { + if (CC(!kvm_vcpu_is_legal_cr3(vcpu, cr3))) { *entry_failure_code = ENTRY_FAIL_DEFAULT; return -EINVAL; } @@ -2927,7 +2927,7 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu, if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) || CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) || - CC(kvm_vcpu_is_illegal_gpa(vcpu, vmcs12->host_cr3))) + CC(!kvm_vcpu_is_legal_cr3(vcpu, vmcs12->host_cr3))) return -EINVAL; if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) || diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f5abd4848400..5683a2d3f399 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1284,7 +1284,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) * stuff CR3, e.g. for RSM emulation, and there is no guarantee that * the current vCPU mode is accurate. */ - if (kvm_vcpu_is_illegal_gpa(vcpu, cr3)) + if (!kvm_vcpu_is_legal_cr3(vcpu, cr3)) return 1; if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, cr3)) @@ -11527,7 +11527,7 @@ static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) */ if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA)) return false; - if (kvm_vcpu_is_illegal_gpa(vcpu, sregs->cr3)) + if (!kvm_vcpu_is_legal_cr3(vcpu, sregs->cr3)) return false; } else { /* -- Gitee From 627623021be24fe14decb8ef8dadb0d920911813 Mon Sep 17 00:00:00 2001 From: Binbin Wu Date: Wed, 13 Sep 2023 20:42:18 +0800 Subject: [PATCH 0765/2138] KVM: x86: Remove kvm_vcpu_is_illegal_gpa() ANBZ: #8355 commit 9c8021d4ae85f1531230fc33653e06e9f1fdb7f1 upstream. Remove kvm_vcpu_is_illegal_gpa() and use !kvm_vcpu_is_legal_gpa() instead. The "illegal" helper actually predates the "legal" helper, the only reason the "illegal" variant wasn't removed by commit 4bda0e97868a ("KVM: x86: Add a helper to check for a legal GPA") was to avoid code churn. Now that CR3 has a dedicated helper, there are fewer callers, and so the code churn isn't that much of a deterrent. No functional change intended. Intel-SIG: commit 9c8021d4ae85 KVM: x86: Remove kvm_vcpu_is_illegal_gpa() Backport KVM Linear Address Masking (LAM) support. Signed-off-by: Binbin Wu Tested-by: Xuelian Guo Link: https://lore.kernel.org/r/20230913124227.12574-8-binbin.wu@linux.intel.com [sean: provide a bit of history in the changelog] Signed-off-by: Sean Christopherson [ Zhiquan Li: amend commit log ] Signed-off-by: Zhiquan Li Reviewed-by: Xuchun Shang Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2805 --- arch/x86/kvm/cpuid.h | 5 ----- arch/x86/kvm/vmx/nested.c | 2 +- arch/x86/kvm/vmx/vmx.c | 2 +- 3 files changed, 2 insertions(+), 7 deletions(-) diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index 90de5ef1edff..b8ab3a7d3381 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -48,11 +48,6 @@ static inline bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) return !(gpa & vcpu->arch.reserved_gpa_bits); } -static inline bool kvm_vcpu_is_illegal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) -{ - return !kvm_vcpu_is_legal_gpa(vcpu, gpa); -} - static inline bool kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, gpa_t alignment) { diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index b2eb25e637a0..a94ae4045d65 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -2732,7 +2732,7 @@ static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp) } /* Reserved bits should not be set */ - if (CC(kvm_vcpu_is_illegal_gpa(vcpu, new_eptp) || ((new_eptp >> 7) & 0x1f))) + if (CC(!kvm_vcpu_is_legal_gpa(vcpu, new_eptp) || ((new_eptp >> 7) & 0x1f))) return false; /* AD, if set, should be supported */ diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index c23f811694c4..6a9c98a4a9f1 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -5800,7 +5800,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu) * would also use advanced VM-exit information for EPT violations to * reconstruct the page fault error code. */ - if (unlikely(allow_smaller_maxphyaddr && kvm_vcpu_is_illegal_gpa(vcpu, gpa))) + if (unlikely(allow_smaller_maxphyaddr && !kvm_vcpu_is_legal_gpa(vcpu, gpa))) return kvm_emulate_instruction(vcpu, 0); return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0); -- Gitee From be0d99f8c975b600262a77ce689c08fbf7d74fc7 Mon Sep 17 00:00:00 2001 From: Binbin Wu Date: Wed, 13 Sep 2023 20:42:19 +0800 Subject: [PATCH 0766/2138] KVM: x86: Introduce get_untagged_addr() in kvm_x86_ops and call it in emulator ANBZ: #8355 commit 37a41847b770c722e98ace72f3851fb49b360c08 upstream. Introduce a new interface get_untagged_addr() to kvm_x86_ops to untag the metadata from linear address. Call the interface in linearization of instruction emulator for 64-bit mode. When enabled feature like Intel Linear Address Masking (LAM) or AMD Upper Address Ignore (UAI), linear addresses may be tagged with metadata that needs to be dropped prior to canonicality checks, i.e. the metadata is ignored. Introduce get_untagged_addr() to kvm_x86_ops to hide the vendor specific code, as sadly LAM and UAI have different semantics. Pass the emulator flags to allow vendor specific implementation to precisely identify the access type (LAM doesn't untag certain accesses). Intel-SIG: commit 37a41847b770 KVM: x86: Introduce get_untagged_addr() in kvm_x86_ops and call it in emulator Backport KVM Linear Address Masking (LAM) support. Signed-off-by: Binbin Wu Reviewed-by: Chao Gao Tested-by: Xuelian Guo Link: https://lore.kernel.org/r/20230913124227.12574-9-binbin.wu@linux.intel.com [sean: massage changelog] Signed-off-by: Sean Christopherson [ Zhiquan Li: amend commit log and resolve the conflict ] Signed-off-by: Zhiquan Li Reviewed-by: Xuchun Shang Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2805 --- arch/x86/include/asm/kvm-x86-ops.h | 1 + arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/emulate.c | 2 +- arch/x86/kvm/kvm_emulate.h | 3 +++ arch/x86/kvm/x86.c | 10 ++++++++++ 5 files changed, 17 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index b54e72a0100b..0c540ac3872e 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -135,6 +135,7 @@ KVM_X86_OP(msr_filter_changed) KVM_X86_OP(complete_emulated_msr) KVM_X86_OP(vcpu_deliver_sipi_vector) KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons); +KVM_X86_OP_OPTIONAL(get_untagged_addr) KVM_X86_OP_OPTIONAL(vm_attestation) KVM_X86_OP_OPTIONAL(control_pre_system_reset) KVM_X86_OP_OPTIONAL(control_post_system_reset) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 6a3ae64dfd06..e93a85779423 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1752,6 +1752,8 @@ struct kvm_x86_ops { */ unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *vcpu); + gva_t (*get_untagged_addr)(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags); + int (*vm_attestation)(struct kvm *kvm, unsigned long gpa, unsigned long len); int (*control_pre_system_reset)(struct kvm *kvm); int (*control_post_system_reset)(struct kvm *kvm); diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index ceec8c5f9687..e223043ef5b2 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -701,7 +701,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, *max_size = 0; switch (mode) { case X86EMUL_MODE_PROT64: - *linear = la; + *linear = la = ctxt->ops->get_untagged_addr(ctxt, la, flags); va_bits = ctxt_virt_addr_bits(ctxt); if (!__is_canonical_address(la, va_bits)) goto bad; diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h index 8bd9b23543cc..e6d149825169 100644 --- a/arch/x86/kvm/kvm_emulate.h +++ b/arch/x86/kvm/kvm_emulate.h @@ -230,6 +230,9 @@ struct x86_emulate_ops { int (*leave_smm)(struct x86_emulate_ctxt *ctxt); void (*triple_fault)(struct x86_emulate_ctxt *ctxt); int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr); + + gva_t (*get_untagged_addr)(struct x86_emulate_ctxt *ctxt, gva_t addr, + unsigned int flags); }; /* Type, address-of, and value of an instruction's operand. */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 5683a2d3f399..713ca894779b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8359,6 +8359,15 @@ static void emulator_vm_bugged(struct x86_emulate_ctxt *ctxt) kvm_vm_bugged(kvm); } +static gva_t emulator_get_untagged_addr(struct x86_emulate_ctxt *ctxt, + gva_t addr, unsigned int flags) +{ + if (!kvm_x86_ops.get_untagged_addr) + return addr; + + return static_call(kvm_x86_get_untagged_addr)(emul_to_vcpu(ctxt), addr, flags); +} + static const struct x86_emulate_ops emulate_ops = { .vm_bugged = emulator_vm_bugged, .read_gpr = emulator_read_gpr, @@ -8403,6 +8412,7 @@ static const struct x86_emulate_ops emulate_ops = { .leave_smm = emulator_leave_smm, .triple_fault = emulator_triple_fault, .set_xcr = emulator_set_xcr, + .get_untagged_addr = emulator_get_untagged_addr, }; static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) -- Gitee From 8e3c24fcd743091a3f9ccba9096cce6c8c8fde13 Mon Sep 17 00:00:00 2001 From: Binbin Wu Date: Wed, 13 Sep 2023 20:42:20 +0800 Subject: [PATCH 0767/2138] KVM: x86: Untag addresses for LAM emulation where applicable ANBZ: #8355 commit b39bd520a60c667a339e315ce7a3de2f7178f6e3 upstream. Stub in vmx_get_untagged_addr() and wire up calls from the emulator (via get_untagged_addr()) and "direct" calls from various VM-Exit handlers in VMX where LAM untagging is supposed to be applied. Defer implementing the guts of vmx_get_untagged_addr() to future patches purely to make the changes easier to consume. LAM is active only for 64-bit linear addresses and several types of accesses are exempted. - Cases need to untag address (handled in get_vmx_mem_address()) Operand(s) of VMX instructions and INVPCID. Operand(s) of SGX ENCLS. - Cases LAM doesn't apply to (no change needed) Operand of INVLPG. Linear address in INVPCID descriptor. Linear address in INVVPID descriptor. BASEADDR specified in SECS of ECREATE. Note: - LAM doesn't apply to write to control registers or MSRs - LAM masking is applied before walking page tables, i.e. the faulting linear address in CR2 doesn't contain the metadata. - The guest linear address saved in VMCS doesn't contain metadata. Intel-SIG: commit b39bd520a60c KVM: x86: Untag addresses for LAM emulation where applicable Backport KVM Linear Address Masking (LAM) support. Signed-off-by: Binbin Wu Reviewed-by: Chao Gao Tested-by: Xuelian Guo Link: https://lore.kernel.org/r/20230913124227.12574-10-binbin.wu@linux.intel.com [sean: massage changelog] Signed-off-by: Sean Christopherson [ Zhiquan Li: amend commit log ] Signed-off-by: Zhiquan Li Reviewed-by: Xuchun Shang Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2805 --- arch/x86/kvm/vmx/nested.c | 5 +++++ arch/x86/kvm/vmx/sgx.c | 1 + arch/x86/kvm/vmx/vmx.c | 7 +++++++ arch/x86/kvm/vmx/vmx.h | 2 ++ arch/x86/kvm/x86.c | 4 ++++ 5 files changed, 19 insertions(+) diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index a94ae4045d65..4872ac288f51 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -5027,6 +5027,7 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, else *ret = off; + *ret = vmx_get_untagged_addr(vcpu, *ret, 0); /* Long mode: #GP(0)/#SS(0) if the memory address is in a * non-canonical form. This is the only check on the memory * destination for long mode! @@ -5850,6 +5851,10 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) vpid02 = nested_get_vpid02(vcpu); switch (type) { case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: + /* + * LAM doesn't apply to addresses that are inputs to TLB + * invalidation. + */ if (!operand.vpid || is_noncanonical_address(operand.gla, vcpu)) return nested_vmx_fail(vcpu, diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c index 3e822e582497..6fef01e0536e 100644 --- a/arch/x86/kvm/vmx/sgx.c +++ b/arch/x86/kvm/vmx/sgx.c @@ -37,6 +37,7 @@ static int sgx_get_encls_gva(struct kvm_vcpu *vcpu, unsigned long offset, if (!IS_ALIGNED(*gva, alignment)) { fault = true; } else if (likely(is_64_bit_mode(vcpu))) { + *gva = vmx_get_untagged_addr(vcpu, *gva, 0); fault = is_noncanonical_address(*gva, vcpu); } else { *gva &= 0xffffffff; diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 6a9c98a4a9f1..406205115bcc 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -8251,6 +8251,11 @@ static void vmx_vm_destroy(struct kvm *kvm) free_pages((unsigned long)kvm_vmx->pid_table, vmx_get_pid_table_order(kvm)); } +gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags) +{ + return gva; +} + static struct kvm_x86_ops vmx_x86_ops __initdata = { .name = KBUILD_MODNAME, @@ -8391,6 +8396,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .complete_emulated_msr = kvm_complete_insn_gp, .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector, + + .get_untagged_addr = vmx_get_untagged_addr, }; static unsigned int vmx_handle_intel_pt_intr(void) diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 6be1627d888e..cbbe5122cfa6 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -421,6 +421,8 @@ void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type); u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu); u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu); +gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags); + static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool value) { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 713ca894779b..676efdcefaeb 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -13461,6 +13461,10 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) switch (type) { case INVPCID_TYPE_INDIV_ADDR: + /* + * LAM doesn't apply to addresses that are inputs to TLB + * invalidation. + */ if ((!pcid_enabled && (operand.pcid != 0)) || is_noncanonical_address(operand.gla, vcpu)) { kvm_inject_gp(vcpu, 0); -- Gitee From 05e5c2ef6e567715a048d77018d574e8e596f4a1 Mon Sep 17 00:00:00 2001 From: Robert Hoo Date: Wed, 13 Sep 2023 20:42:21 +0800 Subject: [PATCH 0768/2138] KVM: x86: Virtualize LAM for supervisor pointer ANBZ: #8355 commit 93d1c9f498a7505e0e0a0198f3b3d7f97fcc5fa6 upstream. Add support to allow guests to set the new CR4 control bit for LAM and add implementation to get untagged address for supervisor pointers. LAM modifies the canonicality check applied to 64-bit linear addresses for data accesses, allowing software to use of the untranslated address bits for metadata and masks the metadata bits before using them as linear addresses to access memory. LAM uses CR4.LAM_SUP (bit 28) to configure and enable LAM for supervisor pointers. It also changes VMENTER to allow the bit to be set in VMCS's HOST_CR4 and GUEST_CR4 to support virtualization. Note CR4.LAM_SUP is allowed to be set even not in 64-bit mode, but it will not take effect since LAM only applies to 64-bit linear addresses. Move CR4.LAM_SUP out of CR4_RESERVED_BITS, its reservation depends on vcpu supporting LAM or not. Leave it intercepted to prevent guest from setting the bit if LAM is not exposed to guest as well as to avoid vmread every time when KVM fetches its value, with the expectation that guest won't toggle the bit frequently. Set CR4.LAM_SUP bit in the emulated IA32_VMX_CR4_FIXED1 MSR for guests to allow guests to enable LAM for supervisor pointers in nested VMX operation. Hardware is not required to do TLB flush when CR4.LAM_SUP toggled, KVM doesn't need to emulate TLB flush based on it. There's no other features or vmx_exec_controls connection, and no other code needed in {kvm,vmx}_set_cr4(). Skip address untag for instruction fetches (which includes branch targets), operand of INVLPG instructions, and implicit system accesses, all of which are not subject to untagging. Note, get_untagged_addr() isn't invoked for implicit system accesses as there is no reason to do so, but check the flag anyways for documentation purposes. Intel-SIG: commit 93d1c9f498a7 KVM: x86: Virtualize LAM for supervisor pointer Backport KVM Linear Address Masking (LAM) support. Signed-off-by: Robert Hoo Co-developed-by: Binbin Wu Signed-off-by: Binbin Wu Reviewed-by: Chao Gao Reviewed-by: Kai Huang Tested-by: Xuelian Guo Link: https://lore.kernel.org/r/20230913124227.12574-11-binbin.wu@linux.intel.com Signed-off-by: Sean Christopherson [ Zhiquan Li: amend commit log ] Signed-off-by: Zhiquan Li Reviewed-by: Xuchun Shang Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2805 --- arch/x86/include/asm/kvm_host.h | 3 ++- arch/x86/kvm/vmx/vmx.c | 39 ++++++++++++++++++++++++++++++++- arch/x86/kvm/x86.h | 2 ++ 3 files changed, 42 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index e93a85779423..213d17d35ce3 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -125,7 +125,8 @@ | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \ | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \ | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_VMXE \ - | X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP)) + | X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP \ + | X86_CR4_LAM_SUP)) #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 406205115bcc..d1849264e2fe 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7699,6 +7699,9 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu) cr4_fixed1_update(X86_CR4_UMIP, ecx, feature_bit(UMIP)); cr4_fixed1_update(X86_CR4_LA57, ecx, feature_bit(LA57)); + entry = kvm_find_cpuid_entry_index(vcpu, 0x7, 1); + cr4_fixed1_update(X86_CR4_LAM_SUP, eax, feature_bit(LAM)); + #undef cr4_fixed1_update } @@ -8251,9 +8254,43 @@ static void vmx_vm_destroy(struct kvm *kvm) free_pages((unsigned long)kvm_vmx->pid_table, vmx_get_pid_table_order(kvm)); } +/* + * Note, the SDM states that the linear address is masked *after* the modified + * canonicality check, whereas KVM masks (untags) the address and then performs + * a "normal" canonicality check. Functionally, the two methods are identical, + * and when the masking occurs relative to the canonicality check isn't visible + * to software, i.e. KVM's behavior doesn't violate the SDM. + */ gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags) { - return gva; + int lam_bit; + + if (flags & (X86EMUL_F_FETCH | X86EMUL_F_IMPLICIT | X86EMUL_F_INVLPG)) + return gva; + + if (!is_64_bit_mode(vcpu)) + return gva; + + /* + * Bit 63 determines if the address should be treated as user address + * or a supervisor address. + */ + if (!(gva & BIT_ULL(63))) { + /* KVM doesn't yet virtualize LAM_U{48,57}. */ + return gva; + } else { + if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_LAM_SUP)) + return gva; + + lam_bit = kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 56 : 47; + } + + /* + * Untag the address by sign-extending the lam_bit, but NOT to bit 63. + * Bit 63 is retained from the raw virtual address so that untagging + * doesn't change a user access to a supervisor access, and vice versa. + */ + return (sign_extend64(gva, lam_bit) & ~BIT_ULL(63)) | (gva & BIT_ULL(63)); } static struct kvm_x86_ops vmx_x86_ops __initdata = { diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 1e7be1f6ab29..53e883721e71 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -529,6 +529,8 @@ bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type); __reserved_bits |= X86_CR4_VMXE; \ if (!__cpu_has(__c, X86_FEATURE_PCID)) \ __reserved_bits |= X86_CR4_PCIDE; \ + if (!__cpu_has(__c, X86_FEATURE_LAM)) \ + __reserved_bits |= X86_CR4_LAM_SUP; \ __reserved_bits; \ }) -- Gitee From 2dab99aa00859e19f20ffe4ef337263bd844a819 Mon Sep 17 00:00:00 2001 From: Robert Hoo Date: Wed, 13 Sep 2023 20:42:22 +0800 Subject: [PATCH 0769/2138] KVM: x86: Virtualize LAM for user pointer ANBZ: #8355 commit 3098e6eca88e543ea0d190d1fa72b1c047bb3e7d upstream. Add support to allow guests to set the new CR3 control bits for Linear Address Masking (LAM) and add implementation to get untagged address for user pointers. LAM modifies the canonical check for 64-bit linear addresses, allowing software to use the masked/ignored address bits for metadata. Hardware masks off the metadata bits before using the linear addresses to access memory. LAM uses two new CR3 non-address bits, LAM_U48 (bit 62) and LAM_U57 (bit 61), to configure LAM for user pointers. LAM also changes VMENTER to allow both bits to be set in VMCS's HOST_CR3 and GUEST_CR3 for virtualization. When EPT is on, CR3 is not trapped by KVM and it's up to the guest to set any of the two LAM control bits. However, when EPT is off, the actual CR3 used by the guest is generated from the shadow MMU root which is different from the CR3 that is *set* by the guest, and KVM needs to manually apply any active control bits to VMCS's GUEST_CR3 based on the cached CR3 *seen* by the guest. KVM manually checks guest's CR3 to make sure it points to a valid guest physical address (i.e. to support smaller MAXPHYSADDR in the guest). Extend this check to allow the two LAM control bits to be set. After check, LAM bits of guest CR3 will be stripped off to extract guest physical address. In case of nested, for a guest which supports LAM, both VMCS12's HOST_CR3 and GUEST_CR3 are allowed to have the new LAM control bits set, i.e. when L0 enters L1 to emulate a VMEXIT from L2 to L1 or when L0 enters L2 directly. KVM also manually checks VMCS12's HOST_CR3 and GUEST_CR3 being valid physical address. Extend such check to allow the new LAM control bits too. Note, LAM doesn't have a global control bit to turn on/off LAM completely, but purely depends on hardware's CPUID to determine it can be enabled or not. That means, when EPT is on, even when KVM doesn't expose LAM to guest, the guest can still set LAM control bits in CR3 w/o causing problem. This is an unfortunate virtualization hole. KVM could choose to intercept CR3 in this case and inject fault but this would hurt performance when running a normal VM w/o LAM support. This is undesirable. Just choose to let the guest do such illegal thing as the worst case is guest being killed when KVM eventually find out such illegal behaviour and that the guest is misbehaving. Intel-SIG: commit 3098e6eca88e KVM: x86: Virtualize LAM for user pointer Backport KVM Linear Address Masking (LAM) support. Suggested-by: Sean Christopherson Signed-off-by: Robert Hoo Co-developed-by: Binbin Wu Signed-off-by: Binbin Wu Reviewed-by: Kai Huang Reviewed-by: Chao Gao Tested-by: Xuelian Guo Link: https://lore.kernel.org/r/20230913124227.12574-12-binbin.wu@linux.intel.com Signed-off-by: Sean Christopherson [ Zhiquan Li: amend commit log ] Signed-off-by: Zhiquan Li Reviewed-by: Xuchun Shang Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2805 --- arch/x86/kvm/cpuid.h | 4 ++++ arch/x86/kvm/mmu.h | 9 +++++++++ arch/x86/kvm/vmx/vmx.c | 12 +++++++++--- 3 files changed, 22 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index b8ab3a7d3381..389590eb8791 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -286,6 +286,10 @@ static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu, static inline bool kvm_vcpu_is_legal_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) { + if (kvm_cpu_cap_has(X86_FEATURE_LAM) && + guest_cpuid_has(vcpu, X86_FEATURE_LAM)) + cr3 &= ~(X86_CR3_LAM_U48 | X86_CR3_LAM_U57); + return kvm_vcpu_is_legal_gpa(vcpu, cr3); } diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 253fb2093d5d..e700f1f854ae 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -146,6 +146,15 @@ static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu) return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu)); } +static inline unsigned long kvm_get_active_cr3_lam_bits(struct kvm_vcpu *vcpu) +{ + if (!kvm_cpu_cap_has(X86_FEATURE_LAM) || + !guest_cpuid_has(vcpu, X86_FEATURE_LAM)) + return 0; + + return kvm_read_cr3(vcpu) & (X86_CR3_LAM_U48 | X86_CR3_LAM_U57); +} + static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu) { u64 root_hpa = vcpu->arch.mmu->root.hpa; diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index d1849264e2fe..9c2c53ce7370 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -3413,7 +3413,8 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, update_guest_cr3 = false; vmx_ept_load_pdptrs(vcpu); } else { - guest_cr3 = root_hpa | kvm_get_active_pcid(vcpu); + guest_cr3 = root_hpa | kvm_get_active_pcid(vcpu) | + kvm_get_active_cr3_lam_bits(vcpu); } if (update_guest_cr3) @@ -8264,6 +8265,7 @@ static void vmx_vm_destroy(struct kvm *kvm) gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags) { int lam_bit; + unsigned long cr3_bits; if (flags & (X86EMUL_F_FETCH | X86EMUL_F_IMPLICIT | X86EMUL_F_INVLPG)) return gva; @@ -8276,8 +8278,12 @@ gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags * or a supervisor address. */ if (!(gva & BIT_ULL(63))) { - /* KVM doesn't yet virtualize LAM_U{48,57}. */ - return gva; + cr3_bits = kvm_get_active_cr3_lam_bits(vcpu); + if (!(cr3_bits & (X86_CR3_LAM_U57 | X86_CR3_LAM_U48))) + return gva; + + /* LAM_U48 is ignored if LAM_U57 is set. */ + lam_bit = cr3_bits & X86_CR3_LAM_U57 ? 56 : 47; } else { if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_LAM_SUP)) return gva; -- Gitee From c8848d0b181a6e71166dcb0206fd2407973340cf Mon Sep 17 00:00:00 2001 From: Robert Hoo Date: Wed, 13 Sep 2023 20:42:23 +0800 Subject: [PATCH 0770/2138] KVM: x86: Advertise and enable LAM (user and supervisor) ANBZ: #8355 commit 703d794cb8cb28c07b22c1c845f5c4d4c419aff7 upstream. LAM is enumerated by CPUID.7.1:EAX.LAM[bit 26]. Advertise the feature to userspace and enable it as the final step after the LAM virtualization support for supervisor and user pointers. SGX LAM support is not advertised yet. SGX LAM support is enumerated in SGX's own CPUID and there's no hard requirement that it must be supported when LAM is reported in CPUID leaf 0x7. Intel-SIG: commit 703d794cb8cb KVM: x86: Advertise and enable LAM (user and supervisor) Backport KVM Linear Address Masking (LAM) support. Signed-off-by: Robert Hoo Signed-off-by: Binbin Wu Reviewed-by: Jingqi Liu Reviewed-by: Chao Gao Reviewed-by: Kai Huang Tested-by: Xuelian Guo Link: https://lore.kernel.org/r/20230913124227.12574-13-binbin.wu@linux.intel.com Signed-off-by: Sean Christopherson [ Zhiquan Li: amend commit log ] Signed-off-by: Zhiquan Li Reviewed-by: Xuchun Shang Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2805 --- arch/x86/kvm/cpuid.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 7e6763c2bc01..4b7b41ebb5c9 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -691,7 +691,7 @@ void kvm_set_cpu_caps(void) kvm_cpu_cap_mask(CPUID_7_1_EAX, F(AVX_VNNI) | F(AVX512_BF16) | F(CMPCCXADD) | F(FZRM) | F(FSRS) | F(FSRC) | - F(AMX_FP16) | F(AVX_IFMA) + F(AMX_FP16) | F(AVX_IFMA) | F(LAM) ); kvm_cpu_cap_init_kvm_defined(CPUID_7_1_EDX, -- Gitee From 0873f44dffaa6eec8199c028e561c0e66a1a9cb3 Mon Sep 17 00:00:00 2001 From: Binbin Wu Date: Wed, 13 Sep 2023 20:42:24 +0800 Subject: [PATCH 0771/2138] KVM: x86: Use KVM-governed feature framework to track "LAM enabled" ANBZ: #8355 commit 183bdd161c2b773a62f01d1c030f5a3a5b7c33b5 upstream. Use the governed feature framework to track if Linear Address Masking (LAM) is "enabled", i.e. if LAM can be used by the guest. Using the framework to avoid the relative expensive call guest_cpuid_has() during cr3 and vmexit handling paths for LAM. No functional change intended. Intel-SIG: commit 183bdd161c2b KVM: x86: Use KVM-governed feature framework to track "LAM enabled" Backport KVM Linear Address Masking (LAM) support. Signed-off-by: Binbin Wu Tested-by: Xuelian Guo Link: https://lore.kernel.org/r/20230913124227.12574-14-binbin.wu@linux.intel.com Signed-off-by: Sean Christopherson [ Zhiquan Li: amend commit log ] Signed-off-by: Zhiquan Li Reviewed-by: Xuchun Shang Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/2805 --- arch/x86/kvm/cpuid.h | 3 +-- arch/x86/kvm/governed_features.h | 1 + arch/x86/kvm/mmu.h | 3 +-- arch/x86/kvm/vmx/vmx.c | 1 + 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index 389590eb8791..af4c0456bfb7 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -286,8 +286,7 @@ static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu, static inline bool kvm_vcpu_is_legal_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) { - if (kvm_cpu_cap_has(X86_FEATURE_LAM) && - guest_cpuid_has(vcpu, X86_FEATURE_LAM)) + if (guest_can_use(vcpu, X86_FEATURE_LAM)) cr3 &= ~(X86_CR3_LAM_U48 | X86_CR3_LAM_U57); return kvm_vcpu_is_legal_gpa(vcpu, cr3); diff --git a/arch/x86/kvm/governed_features.h b/arch/x86/kvm/governed_features.h index 423a73395c10..ad463b1ed4e4 100644 --- a/arch/x86/kvm/governed_features.h +++ b/arch/x86/kvm/governed_features.h @@ -16,6 +16,7 @@ KVM_GOVERNED_X86_FEATURE(PAUSEFILTER) KVM_GOVERNED_X86_FEATURE(PFTHRESHOLD) KVM_GOVERNED_X86_FEATURE(VGIF) KVM_GOVERNED_X86_FEATURE(VNMI) +KVM_GOVERNED_X86_FEATURE(LAM) #undef KVM_GOVERNED_X86_FEATURE #undef KVM_GOVERNED_FEATURE diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index e700f1f854ae..f04cc5ade1cd 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -148,8 +148,7 @@ static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu) static inline unsigned long kvm_get_active_cr3_lam_bits(struct kvm_vcpu *vcpu) { - if (!kvm_cpu_cap_has(X86_FEATURE_LAM) || - !guest_cpuid_has(vcpu, X86_FEATURE_LAM)) + if (!guest_can_use(vcpu, X86_FEATURE_LAM)) return 0; return kvm_read_cr3(vcpu) & (X86_CR3_LAM_U48 | X86_CR3_LAM_U57); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 9c2c53ce7370..88f4443ef775 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7789,6 +7789,7 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_XSAVES); kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_VMX); + kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_LAM); vmx_setup_uret_msrs(vmx); -- Gitee From 800bbf24020c9d24ce8182fcbfc17c9113863412 Mon Sep 17 00:00:00 2001 From: Jingbo Xu Date: Mon, 26 Feb 2024 11:54:35 +0800 Subject: [PATCH 0772/2138] fuse: add support for explicit export disabling ANBZ: #8702 commit e022f6a1c711ab6d76e9e59dce77e2b25df75076 upstream. open_by_handle_at(2) can fail with -ESTALE with a valid handle returned by a previous name_to_handle_at(2) for evicted fuse inodes, which is especially common when entry_valid_timeout is 0, e.g. when the fuse daemon is in "cache=none" mode. The time sequence is like: name_to_handle_at(2) # succeed evict fuse inode open_by_handle_at(2) # fail The root cause is that, with 0 entry_valid_timeout, the dput() called in name_to_handle_at(2) will trigger iput -> evict(), which will send FUSE_FORGET to the daemon. The following open_by_handle_at(2) will send a new FUSE_LOOKUP request upon inode cache miss since the previous inode eviction. Then the fuse daemon may fail the FUSE_LOOKUP request with -ENOENT as the cached metadata of the requested inode has already been cleaned up during the previous FUSE_FORGET. The returned -ENOENT is treated as -ESTALE when open_by_handle_at(2) returns. This confuses the application somehow, as open_by_handle_at(2) fails when the previous name_to_handle_at(2) succeeds. The returned errno is also confusing as the requested file is not deleted and already there. It is reasonable to fail name_to_handle_at(2) early in this case, after which the application can fallback to open(2) to access files. Since this issue typically appears when entry_valid_timeout is 0 which is configured by the fuse daemon, the fuse daemon is the right person to explicitly disable the export when required. Also considering FUSE_EXPORT_SUPPORT actually indicates the support for lookups of "." and "..", and there are existing fuse daemons supporting export without FUSE_EXPORT_SUPPORT set, for compatibility, we add a new INIT flag for such purpose. Reviewed-by: Amir Goldstein Signed-off-by: Jingbo Xu Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3008 --- fs/fuse/inode.c | 11 ++++++++++- include/uapi/linux/fuse.h | 5 +++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 943e584c9672..43b1ffb63b5d 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -1141,6 +1141,11 @@ static struct dentry *fuse_get_parent(struct dentry *child) return parent; } +/* only for fid encoding; no support for file handle */ +static const struct export_operations fuse_export_fid_operations = { + .encode_fh = fuse_encode_fh, +}; + static const struct export_operations fuse_export_operations = { .fh_to_dentry = fuse_fh_to_dentry, .fh_to_parent = fuse_fh_to_parent, @@ -1315,6 +1320,8 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args, fc->create_supp_group = 1; if (flags & FUSE_DIRECT_IO_ALLOW_MMAP) fc->direct_io_allow_mmap = 1; + if (flags & FUSE_NO_EXPORT_SUPPORT) + fm->sb->s_export_op = &fuse_export_fid_operations; } else { ra_pages = fc->max_read / PAGE_SIZE; fc->no_lock = 1; @@ -1361,7 +1368,8 @@ void fuse_send_init(struct fuse_mount *fm) FUSE_NO_OPENDIR_SUPPORT | FUSE_EXPLICIT_INVAL_DATA | FUSE_HANDLE_KILLPRIV_V2 | FUSE_SETXATTR_EXT | FUSE_INIT_EXT | FUSE_SECURITY_CTX | FUSE_CREATE_SUPP_GROUP | - FUSE_HAS_EXPIRE_ONLY | FUSE_DIRECT_IO_ALLOW_MMAP; + FUSE_HAS_EXPIRE_ONLY | FUSE_DIRECT_IO_ALLOW_MMAP | + FUSE_NO_EXPORT_SUPPORT; #ifdef CONFIG_FUSE_DAX if (fm->fc->dax) flags |= FUSE_MAP_ALIGNMENT; @@ -1556,6 +1564,7 @@ static int fuse_fill_super_submount(struct super_block *sb, sb->s_bdi = bdi_get(parent_sb->s_bdi); sb->s_xattr = parent_sb->s_xattr; + sb->s_export_op = parent_sb->s_export_op; sb->s_time_gran = parent_sb->s_time_gran; sb->s_blocksize = parent_sb->s_blocksize; sb->s_blocksize_bits = parent_sb->s_blocksize_bits; diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index e7418d15fe39..33b56d9e4803 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h @@ -211,6 +211,9 @@ * 7.39 * - add FUSE_DIRECT_IO_ALLOW_MMAP * - add FUSE_STATX and related structures + * + * 7.40 + * - add FUSE_NO_EXPORT_SUPPORT init flag */ #ifndef _LINUX_FUSE_H @@ -410,6 +413,7 @@ struct fuse_file_lock { * symlink and mknod (single group that matches parent) * FUSE_HAS_EXPIRE_ONLY: kernel supports expiry-only entry invalidation * FUSE_DIRECT_IO_ALLOW_MMAP: allow shared mmap in FOPEN_DIRECT_IO mode. + * FUSE_NO_EXPORT_SUPPORT: explicitly disable export support */ #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) @@ -449,6 +453,7 @@ struct fuse_file_lock { #define FUSE_CREATE_SUPP_GROUP (1ULL << 34) #define FUSE_HAS_EXPIRE_ONLY (1ULL << 35) #define FUSE_DIRECT_IO_ALLOW_MMAP (1ULL << 36) +#define FUSE_NO_EXPORT_SUPPORT (1ULL << 38) /* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */ #define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP -- Gitee From d496abf37324f1697c26531735dd7909697a01fb Mon Sep 17 00:00:00 2001 From: Yihao Wu Date: Sat, 18 Apr 2020 19:35:55 +0800 Subject: [PATCH 0773/2138] anolis: sched: add kconfig SCHED_SLI ANBZ: #8657 This introduces the new bool kconfig SCHED_SLI, determining whether the scheduler SLI feature should be built-in or not. Signed-off-by: Yihao Wu Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- init/Kconfig | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/init/Kconfig b/init/Kconfig index ccebe67eed59..143335723252 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1133,6 +1133,21 @@ config CGROUP_DEVICE Provides a cgroup controller implementing whitelists for devices which a process in the cgroup can mknod or open. +config SCHED_SLI + bool "cgroup CPU usage and additional scheduler statistics" + depends on CGROUP_CPUACCT + depends on FAIR_GROUP_SCHED + default Y + help + This accounts CPU time spent by tasks in a cgroup into "usr%" "sys%" + "idle" "steal%" "irq%" "softirq%" "guest%". And this exports + nr_migrations, nr_running, nr_uninterruptible of a cgroup. + + The corresponding interface is cpuacct.proc_stat. + + Note CPU usage requires cpuacct and cpu cgroup subsys to be mounted + together to take effect. + config CGROUP_CPUACCT bool "Simple CPU accounting controller" help -- Gitee From 8d2d062a91e4cfe1b890cecbbb98170394c7d737 Mon Sep 17 00:00:00 2001 From: Yi Tao Date: Mon, 11 Mar 2024 15:29:03 +0800 Subject: [PATCH 0774/2138] anolis: configs: enable CONFIG_SCHED_SLI ANBZ: #8657 Enable CONFIG_SCHED_SLI by default. Signed-off-by: Yihao Wu Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- arch/arm64/configs/anolis-debug_defconfig | 1 + arch/arm64/configs/anolis_defconfig | 1 + arch/x86/configs/anolis-debug_defconfig | 1 + arch/x86/configs/anolis_defconfig | 1 + 4 files changed, 4 insertions(+) diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index 1c039289682b..a9dd69260bb5 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -195,6 +195,7 @@ CONFIG_CGROUP_HUGETLB=y CONFIG_CPUSETS=y CONFIG_PROC_PID_CPUSET=y CONFIG_CGROUP_DEVICE=y +CONFIG_SCHED_SLI=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y CONFIG_CGROUP_BPF=y diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index 4b995ee48cd6..1935cdb56003 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -194,6 +194,7 @@ CONFIG_CGROUP_HUGETLB=y CONFIG_CPUSETS=y CONFIG_PROC_PID_CPUSET=y CONFIG_CGROUP_DEVICE=y +CONFIG_SCHED_SLI=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y CONFIG_CGROUP_BPF=y diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index 707eaf6a15ef..ba7a304585a4 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -214,6 +214,7 @@ CONFIG_CGROUP_HUGETLB=y CONFIG_CPUSETS=y CONFIG_PROC_PID_CPUSET=y CONFIG_CGROUP_DEVICE=y +CONFIG_SCHED_SLI=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y CONFIG_CGROUP_BPF=y diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index ad45ec0bbcbf..1595b3a3616d 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -213,6 +213,7 @@ CONFIG_CGROUP_HUGETLB=y CONFIG_CPUSETS=y CONFIG_PROC_PID_CPUSET=y CONFIG_CGROUP_DEVICE=y +CONFIG_SCHED_SLI=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y CONFIG_CGROUP_BPF=y -- Gitee From e9c4fadfd81049902984f993fb9c8c6baf75a149 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Thu, 3 Jun 2021 15:14:39 +0800 Subject: [PATCH 0775/2138] anolis: sched: Maintain "nr_uninterruptible" in runqueue ANBZ: #8657 It's relatively easy to maintain nr_uninterruptible in scheduler compared to doing it in cpuacct, we assume that "cpu,cpuacct" are bound together, so that it can be used for per-cgroup load. This will be needed to calculate per-cgroup load average later. Signed-off-by: Xunlei Pang Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- include/linux/cgroup.h | 2 ++ include/linux/sched.h | 4 ++++ kernel/cgroup/cgroup.c | 6 ++++++ kernel/sched/core.c | 26 ++++++++++++++++++++++++-- kernel/sched/fair.c | 13 +++++++++++++ kernel/sched/rt.c | 13 +++++++++++++ kernel/sched/sched.h | 14 +++++++++++++- 7 files changed, 75 insertions(+), 3 deletions(-) diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index b307013b9c6c..f1219e29d309 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -121,6 +121,8 @@ void cgroup_file_show(struct cgroup_file *cfile, bool show); int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry); int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *tsk); +extern struct cgroup_subsys_state *global_cgroup_css(struct cgroup *cgrp, + int ssid); void cgroup_fork(struct task_struct *p); extern int cgroup_can_fork(struct task_struct *p, diff --git a/include/linux/sched.h b/include/linux/sched.h index ea17a90d15bd..db0620a1b7c7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -132,6 +132,10 @@ struct user_event_mm; #define task_is_traced(task) ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0) #define task_is_stopped(task) ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0) #define task_is_stopped_or_traced(task) ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED)) != 0) +#define task_contributes_to_load(task) \ + ((READ_ONCE((task)->__state) & TASK_UNINTERRUPTIBLE) != 0 && \ + (READ_ONCE((task)->__state) & TASK_FROZEN) == 0 && \ + (READ_ONCE((task)->__state) & TASK_NOLOAD) == 0) /* * Special states are those that do not use the normal wait-loop pattern. See diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 1e8078e73762..97f2bd6dc314 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -493,6 +493,12 @@ static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp, return &cgrp->self; } +struct cgroup_subsys_state *global_cgroup_css(struct cgroup *cgrp, + int ssid) +{ + return cgroup_css(cgrp, cgroup_subsys[(ssid)]); +} + /** * cgroup_e_css_by_mask - obtain a cgroup's effective css for the specified ss * @cgrp: the cgroup of interest diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0dc08a7eab36..a4a7722678de 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2132,6 +2132,12 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) p->sched_class->dequeue_task(rq, p, flags); } +static void update_nr_uninterruptible(struct task_struct *tsk, long inc) +{ + if (tsk->sched_class->update_nr_uninterruptible) + tsk->sched_class->update_nr_uninterruptible(tsk, inc); +} + void activate_task(struct rq *rq, struct task_struct *p, int flags) { if (task_on_rq_migrating(p)) @@ -3785,8 +3791,10 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, lockdep_assert_rq_held(rq); - if (p->sched_contributes_to_load) + if (p->sched_contributes_to_load) { + update_nr_uninterruptible(p, -1); rq->nr_uninterruptible--; + } #ifdef CONFIG_SMP if (wake_flags & WF_MIGRATED) @@ -6800,8 +6808,10 @@ static void __sched notrace __schedule(unsigned int sched_mode) !(prev_state & TASK_NOLOAD) && !(prev_state & TASK_FROZEN); - if (prev->sched_contributes_to_load) + if (prev->sched_contributes_to_load) { + update_nr_uninterruptible(prev, 1); rq->nr_uninterruptible++; + } /* * __schedule() ttwu() @@ -10724,8 +10734,20 @@ void sched_move_task(struct task_struct *tsk) if (running) put_prev_task(rq, tsk); + /* decrease old group */ + if ((!queued && task_contributes_to_load(tsk)) || + (READ_ONCE(tsk->__state) == TASK_WAKING && + tsk->sched_contributes_to_load)) + update_nr_uninterruptible(tsk, -1); + sched_change_group(tsk, group); + /* increase new group after change */ + if ((!queued && task_contributes_to_load(tsk)) || + (READ_ONCE(tsk->__state) == TASK_WAKING && + tsk->sched_contributes_to_load)) + update_nr_uninterruptible(tsk, 1); + if (queued) enqueue_task(rq, tsk, queue_flags); if (running) { diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 4d4d4de16c9f..e8cf46db8397 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -13230,6 +13230,16 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task return rr_interval; } +#ifdef CONFIG_SCHED_SLI +static void update_nr_uninterruptible_fair(struct task_struct *p, long inc) +{ + struct sched_entity *se = &p->se; + + for_each_sched_entity(se) + cfs_rq_of(se)->nr_uninterruptible += inc; +} +#endif + /* * All the scheduling class methods: */ @@ -13277,6 +13287,9 @@ DEFINE_SCHED_CLASS(fair) = { #ifdef CONFIG_SCHED_CORE .task_is_throttled = task_is_throttled_fair, #endif +#ifdef CONFIG_SCHED_SLI + .update_nr_uninterruptible = update_nr_uninterruptible_fair, +#endif #ifdef CONFIG_UCLAMP_TASK .uclamp_enabled = 1, diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index b89223a97316..ec6d87169794 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2695,6 +2695,16 @@ static int task_is_throttled_rt(struct task_struct *p, int cpu) } #endif +#ifdef CONFIG_SCHED_SLI +static void update_nr_uninterruptible_rt(struct task_struct *p, long inc) +{ + struct sched_rt_entity *se = &p->rt; + + for_each_sched_rt_entity(se) + rt_rq_of_se(se)->nr_uninterruptible += inc; +} +#endif + DEFINE_SCHED_CLASS(rt) = { .enqueue_task = enqueue_task_rt, @@ -2731,6 +2741,9 @@ DEFINE_SCHED_CLASS(rt) = { #ifdef CONFIG_SCHED_CORE .task_is_throttled = task_is_throttled_rt, #endif +#ifdef CONFIG_SCHED_SLI + .update_nr_uninterruptible = update_nr_uninterruptible_rt, +#endif #ifdef CONFIG_UCLAMP_TASK .uclamp_enabled = 1, diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 74fcbb36c538..be96b37b9b80 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -660,6 +660,8 @@ struct cfs_rq { #endif #endif /* CONFIG_CFS_BANDWIDTH */ #endif /* CONFIG_FAIR_GROUP_SCHED */ + + unsigned long nr_uninterruptible; }; static inline int rt_bandwidth_enabled(void) @@ -706,6 +708,8 @@ struct rt_rq { struct rq *rq; struct task_group *tg; #endif + + unsigned long nr_uninterruptible; }; static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq) @@ -1028,7 +1032,14 @@ struct rq { struct task_struct *idle; struct task_struct *stop; unsigned long next_balance; - struct mm_struct *prev_mm; + + /* + * Frequent writing to prev_mm and clock_update_flags on local + * CPU causes cacheline containing idle to be invalidated on + * other CPUs. Put prev_mm and sequential fields on a new + * cacheline to fix it. + */ + struct mm_struct *prev_mm ____cacheline_aligned; unsigned int clock_update_flags; u64 clock; @@ -2327,6 +2338,7 @@ struct sched_class { #ifdef CONFIG_SCHED_CORE int (*task_is_throttled)(struct task_struct *p, int cpu); #endif + void (*update_nr_uninterruptible)(struct task_struct *p, long inc); }; static inline void put_prev_task(struct rq *rq, struct task_struct *prev) -- Gitee From 3c3d66143a1e51501288531d6aeeafbe9c58fade Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Thu, 3 Jun 2021 16:23:39 +0800 Subject: [PATCH 0776/2138] anolis: cpuacct: export cpuacct.proc_stat interface ANBZ: #8657 Add the cgroup file "cpuacct.proc_stat", we'll export per-cgroup cpu usages and some other scheduler statistics in this interface. Signed-off-by: Xunlei Pang Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- fs/proc/stat.c | 2 +- include/linux/sched.h | 5 ++ kernel/sched/cpuacct.c | 148 +++++++++++++++++++++++++++++++++++++++++ kernel/sched/sched.h | 5 ++ 4 files changed, 159 insertions(+), 1 deletion(-) diff --git a/fs/proc/stat.c b/fs/proc/stat.c index da60956b2915..ce8a751185f5 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -38,7 +38,7 @@ u64 get_idle_time(struct kernel_cpustat *kcs, int cpu) return idle; } -static u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu) +u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu) { u64 iowait, iowait_usecs = -1ULL; diff --git a/include/linux/sched.h b/include/linux/sched.h index db0620a1b7c7..b5017419e3fc 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2513,4 +2513,9 @@ static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); } extern void sched_set_stop_task(int cpu, struct task_struct *stop); +struct cpuacct_usage_result { + u64 user, nice, system, irq, softirq; + u64 steal, iowait, idle, guest, guest_nice; +}; + #endif diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 0de9dda09949..67b6c7817397 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -20,11 +20,17 @@ static const char * const cpuacct_stat_desc[] = { [CPUACCT_STAT_SYSTEM] = "system", }; +struct cpuacct_prev_cputime { + struct prev_cputime prev_cputime1; /* utime and stime */ + struct prev_cputime prev_cputime2; /* user and nice */ +} ____cacheline_aligned; + /* track CPU usage of a group of tasks and its child groups */ struct cpuacct { struct cgroup_subsys_state css; /* cpuusage holds pointer to a u64-type object on every CPU */ u64 __percpu *cpuusage; + struct cpuacct_prev_cputime __percpu *prev_cputime; struct kernel_cpustat __percpu *cpustat; }; @@ -45,8 +51,10 @@ static inline struct cpuacct *parent_ca(struct cpuacct *ca) } static DEFINE_PER_CPU(u64, root_cpuacct_cpuusage); +static DEFINE_PER_CPU(struct cpuacct_prev_cputime, root_cpuacct_prev_cputime); static struct cpuacct root_cpuacct = { .cpustat = &kernel_cpustat, + .prev_cputime = &root_cpuacct_prev_cputime, .cpuusage = &root_cpuacct_cpuusage, }; @@ -55,6 +63,7 @@ static struct cgroup_subsys_state * cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) { struct cpuacct *ca; + int i; if (!parent_css) return &root_cpuacct.css; @@ -71,8 +80,21 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) if (!ca->cpustat) goto out_free_cpuusage; + ca->prev_cputime = alloc_percpu(struct cpuacct_prev_cputime); + if (!ca->prev_cputime) + goto out_free_cpustat; + + for_each_possible_cpu(i) { + prev_cputime_init( + &per_cpu_ptr(ca->prev_cputime, i)->prev_cputime1); + prev_cputime_init( + &per_cpu_ptr(ca->prev_cputime, i)->prev_cputime2); + } + return &ca->css; +out_free_cpustat: + free_percpu(ca->cpustat); out_free_cpuusage: free_percpu(ca->cpuusage); out_free_ca: @@ -86,6 +108,7 @@ static void cpuacct_css_free(struct cgroup_subsys_state *css) { struct cpuacct *ca = css_ca(css); + free_percpu(ca->prev_cputime); free_percpu(ca->cpustat); free_percpu(ca->cpuusage); kfree(ca); @@ -289,6 +312,125 @@ static int cpuacct_stats_show(struct seq_file *sf, void *v) return 0; } +#ifdef CONFIG_SCHED_SLI +#ifndef arch_idle_time +#define arch_idle_time(cpu) 0 +#endif + +static inline struct task_group *cgroup_tg(struct cgroup *cgrp) +{ + return container_of(global_cgroup_css(cgrp, cpu_cgrp_id), + struct task_group, css); +} + +static void __cpuacct_get_usage_result(struct cpuacct *ca, int cpu, + struct task_group *tg, struct cpuacct_usage_result *res) +{ + struct kernel_cpustat *kcpustat; + u64 *cpuusage; + struct cpuacct_prev_cputime *prev_cputime; + struct task_cputime cputime; + u64 tick_user, tick_nice, tick_sys, left, right; + struct sched_entity *se; + + kcpustat = per_cpu_ptr(ca->cpustat, cpu); + if (unlikely(!tg)) { + memset(res, 0, sizeof(*res)); + return; + } + + cpuusage = per_cpu_ptr(ca->cpuusage, cpu); + + se = tg->se[cpu]; + prev_cputime = per_cpu_ptr(ca->prev_cputime, cpu); + tick_user = kcpustat->cpustat[CPUTIME_USER]; + tick_nice = kcpustat->cpustat[CPUTIME_NICE]; + tick_sys = kcpustat->cpustat[CPUTIME_SYSTEM]; + + /* Calculate system run time */ + cputime.sum_exec_runtime = *cpuusage; + cputime.utime = tick_user + tick_nice; + cputime.stime = tick_sys; + cputime_adjust(&cputime, &prev_cputime->prev_cputime1, &left, &right); + res->system = right; + + /* Calculate user and nice run time */ + cputime.sum_exec_runtime = left; /* user + nice */ + cputime.utime = tick_user; + cputime.stime = tick_nice; + cputime_adjust(&cputime, &prev_cputime->prev_cputime2, &left, &right); + res->user = left; + res->nice = right; + + res->irq = kcpustat->cpustat[CPUTIME_IRQ]; + res->softirq = kcpustat->cpustat[CPUTIME_SOFTIRQ]; + if (se) + res->steal = __schedstats_from_se(se)->wait_sum; + else + res->steal = 0; + res->guest = res->guest_nice = 0; /* currently always 0 */ +} + +static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) +{ + struct cpuacct *ca = css_ca(seq_css(sf)); + struct cgroup *cgrp = seq_css(sf)->cgroup; + u64 user, nice, system, idle, iowait, irq, softirq, steal, guest; + int cpu; + + user = nice = system = idle = iowait = + irq = softirq = steal = guest = 0; + + if (ca != &root_cpuacct) { + struct cpuacct_usage_result res; + + for_each_possible_cpu(cpu) { + rcu_read_lock(); + __cpuacct_get_usage_result(ca, cpu, + cgroup_tg(cgrp), &res); + rcu_read_unlock(); + + user += res.user; + nice += res.nice; + system += res.system; + irq += res.irq; + softirq += res.softirq; + steal += res.steal; + guest += res.guest; + iowait += res.iowait; + idle += res.idle; + } + } else { + struct kernel_cpustat *kcpustat; + + for_each_possible_cpu(cpu) { + kcpustat = per_cpu_ptr(ca->cpustat, cpu); + user += kcpustat->cpustat[CPUTIME_USER]; + nice += kcpustat->cpustat[CPUTIME_NICE]; + system += kcpustat->cpustat[CPUTIME_SYSTEM]; + irq += kcpustat->cpustat[CPUTIME_IRQ]; + softirq += kcpustat->cpustat[CPUTIME_SOFTIRQ]; + guest += kcpustat->cpustat[CPUTIME_GUEST]; + idle += get_idle_time(kcpustat, cpu); + iowait += get_iowait_time(kcpustat, cpu); + steal += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + } + } + + seq_printf(sf, "user %lld\n", nsec_to_clock_t(user)); + seq_printf(sf, "nice %lld\n", nsec_to_clock_t(nice)); + seq_printf(sf, "system %lld\n", nsec_to_clock_t(system)); + seq_printf(sf, "idle %lld\n", nsec_to_clock_t(idle)); + seq_printf(sf, "iowait %lld\n", nsec_to_clock_t(iowait)); + seq_printf(sf, "irq %lld\n", nsec_to_clock_t(irq)); + seq_printf(sf, "softirq %lld\n", nsec_to_clock_t(softirq)); + seq_printf(sf, "steal %lld\n", nsec_to_clock_t(steal)); + seq_printf(sf, "guest %lld\n", nsec_to_clock_t(guest)); + + return 0; +} +#endif + static struct cftype files[] = { { .name = "usage", @@ -323,6 +465,12 @@ static struct cftype files[] = { .name = "stat", .seq_show = cpuacct_stats_show, }, +#ifdef CONFIG_SCHED_SLI + { + .name = "proc_stat", + .seq_show = cpuacct_proc_stats_show, + }, +#endif { } /* terminate */ }; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index be96b37b9b80..04c75dc3294c 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3572,4 +3572,9 @@ static inline void init_sched_mm_cid(struct task_struct *t) { } extern u64 avg_vruntime(struct cfs_rq *cfs_rq); extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se); +#ifdef CONFIG_SCHED_SLI +extern u64 get_idle_time(struct kernel_cpustat *kcs, int cpu); +extern u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu); +#endif + #endif /* _KERNEL_SCHED_SCHED_H */ -- Gitee From 1e03a6f3e2513186092d810b0add66193efedf51 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Wed, 6 Nov 2019 20:13:02 +0800 Subject: [PATCH 0777/2138] anolis: cpuacct/proc_stat: Consider isolcpus ANBZ: #8657 When "isolcpus=" is passed, skip all its accountings. Signed-off-by: Xunlei Pang Tested-by: Yihao Wu Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- kernel/sched/cpuacct.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 67b6c7817397..8ae703c442a1 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -385,6 +385,9 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) struct cpuacct_usage_result res; for_each_possible_cpu(cpu) { + if (!housekeeping_cpu(cpu, HK_TYPE_DOMAIN)) + continue; + rcu_read_lock(); __cpuacct_get_usage_result(ca, cpu, cgroup_tg(cgrp), &res); -- Gitee From efc0b6bf5b6a1fea0db7f4710962e0f09030f248 Mon Sep 17 00:00:00 2001 From: Shanpei Chen Date: Thu, 3 Jun 2021 18:44:29 +0800 Subject: [PATCH 0778/2138] anolis: sched/cputime: Fix guest cputime of cpuacct.proc_stat ANBZ: #8657 For container only cases, since guest cputime is always 0, we don't calculate it and return 0 directly before. Howerver, when running vm inside a cgroup, we expect the cgroup to maintain guest cputime correctly. Signed-off-by: Shanpei Chen Signed-off-by: Yihao Wu Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- kernel/sched/cpuacct.c | 5 ++++- kernel/sched/cputime.c | 6 ++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 8ae703c442a1..b3f33c8ae160 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -368,7 +368,8 @@ static void __cpuacct_get_usage_result(struct cpuacct *ca, int cpu, res->steal = __schedstats_from_se(se)->wait_sum; else res->steal = 0; - res->guest = res->guest_nice = 0; /* currently always 0 */ + res->guest = kcpustat->cpustat[CPUTIME_GUEST]; + res->guest_nice = kcpustat->cpustat[CPUTIME_GUEST_NICE]; } static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) @@ -400,6 +401,7 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) softirq += res.softirq; steal += res.steal; guest += res.guest; + guest += res.guest_nice; iowait += res.iowait; idle += res.idle; } @@ -414,6 +416,7 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) irq += kcpustat->cpustat[CPUTIME_IRQ]; softirq += kcpustat->cpustat[CPUTIME_SOFTIRQ]; guest += kcpustat->cpustat[CPUTIME_GUEST]; + guest += kcpustat->cpustat[CPUTIME_GUEST_NICE]; idle += get_idle_time(kcpustat, cpu); iowait += get_iowait_time(kcpustat, cpu); steal += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 6b8da47b5ade..8685c8e019f8 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -142,8 +142,6 @@ void account_user_time(struct task_struct *p, u64 cputime) */ void account_guest_time(struct task_struct *p, u64 cputime) { - u64 *cpustat = kcpustat_this_cpu->cpustat; - /* Add guest time to process. */ p->utime += cputime; account_group_user_time(p, cputime); @@ -152,10 +150,10 @@ void account_guest_time(struct task_struct *p, u64 cputime) /* Add guest time to cpustat. */ if (task_nice(p) > 0) { task_group_account_field(p, CPUTIME_NICE, cputime); - cpustat[CPUTIME_GUEST_NICE] += cputime; + task_group_account_field(p, CPUTIME_GUEST_NICE, cputime); } else { task_group_account_field(p, CPUTIME_USER, cputime); - cpustat[CPUTIME_GUEST] += cputime; + task_group_account_field(p, CPUTIME_GUEST, cputime); } } -- Gitee From eca3b6d97589c3902e8a3ff83e80e3bbb7acb028 Mon Sep 17 00:00:00 2001 From: Yihao Wu Date: Fri, 17 Jan 2020 01:03:44 +0800 Subject: [PATCH 0779/2138] anolis: cpuacct: Export nr_running & nr_uninterruptible ANBZ: #8657 cpu cgroup's nr_running and nr_uninterruptible are useful for troubleshooting. Export them in cpuacct.proc_stat. Signed-off-by: Yihao Wu Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- kernel/sched/cpuacct.c | 101 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 101 insertions(+) diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index b3f33c8ae160..7e8b40df52de 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -323,6 +323,95 @@ static inline struct task_group *cgroup_tg(struct cgroup *cgrp) struct task_group, css); } +static inline unsigned long nr_uninterruptible(void) +{ + unsigned long i, sum = 0; + + for_each_possible_cpu(i) + sum += cpu_rq(i)->nr_uninterruptible; + + /* + * Since we read the counters lockless, it might be slightly + * inaccurate. Do not allow it to go below zero though: + */ + if (unlikely((long)sum < 0)) + sum = 0; + + return sum; +} + +#ifdef CONFIG_CFS_BANDWIDTH +static inline bool tg_cfs_throttled(struct task_group *tg, int cpu) +{ + return tg->cfs_rq[cpu]->throttle_count; +} +#else +static inline bool tg_cfs_throttled(struct task_group *tg, int cpu) +{ + return false; +} +#endif + +#ifdef CONFIG_RT_GROUP_SCHED +static inline bool tg_rt_throttled(struct task_group *tg, int cpu) +{ + return tg->rt_rq[cpu]->rt_throttled && !tg->rt_rq[cpu]->rt_nr_boosted; +} +#endif + +static unsigned long ca_running(struct cpuacct *ca, int cpu) +{ + unsigned long nr_running = 0; + struct cgroup *cgrp = ca->css.cgroup; + struct task_group *tg; + + /* Make sure it is only called for non-root cpuacct */ + if (ca == &root_cpuacct) + return 0; + + rcu_read_lock(); + tg = cgroup_tg(cgrp); + if (unlikely(!tg)) + goto out; + + if (!tg_cfs_throttled(tg, cpu)) + nr_running += tg->cfs_rq[cpu]->h_nr_running; +#ifdef CONFIG_RT_GROUP_SCHED + if (!tg_rt_throttled(tg, cpu)) + nr_running += tg->rt_rq[cpu]->rt_nr_running; +#endif + /* SCHED_DEADLINE doesn't support cgroup yet */ + +out: + rcu_read_unlock(); + return nr_running; +} + +static unsigned long ca_uninterruptible(struct cpuacct *ca, int cpu) +{ + unsigned long nr = 0; + struct cgroup *cgrp = ca->css.cgroup; + struct task_group *tg; + + /* Make sure it is only called for non-root cpuacct */ + if (ca == &root_cpuacct) + return nr; + + rcu_read_lock(); + tg = cgroup_tg(cgrp); + if (unlikely(!tg)) + goto out_rcu_unlock; + + nr = tg->cfs_rq[cpu]->nr_uninterruptible; +#ifdef CONFIG_RT_GROUP_SCHED + nr += tg->rt_rq[cpu]->nr_uninterruptible; +#endif + +out_rcu_unlock: + rcu_read_unlock(); + return nr; +} + static void __cpuacct_get_usage_result(struct cpuacct *ca, int cpu, struct task_group *tg, struct cpuacct_usage_result *res) { @@ -377,6 +466,7 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) struct cpuacct *ca = css_ca(seq_css(sf)); struct cgroup *cgrp = seq_css(sf)->cgroup; u64 user, nice, system, idle, iowait, irq, softirq, steal, guest; + unsigned long nr_run = 0, nr_uninter = 0; int cpu; user = nice = system = idle = iowait = @@ -404,6 +494,9 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) guest += res.guest_nice; iowait += res.iowait; idle += res.idle; + + nr_run += ca_running(ca, cpu); + nr_uninter += ca_uninterruptible(ca, cpu); } } else { struct kernel_cpustat *kcpustat; @@ -421,6 +514,9 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) iowait += get_iowait_time(kcpustat, cpu); steal += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; } + + nr_run = nr_running(); + nr_uninter = nr_uninterruptible(); } seq_printf(sf, "user %lld\n", nsec_to_clock_t(user)); @@ -433,6 +529,11 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) seq_printf(sf, "steal %lld\n", nsec_to_clock_t(steal)); seq_printf(sf, "guest %lld\n", nsec_to_clock_t(guest)); + seq_printf(sf, "nr_running %lld\n", (u64)nr_run); + if ((long) nr_uninter < 0) + nr_uninter = 0; + seq_printf(sf, "nr_uninterruptible %lld\n", (u64)nr_uninter); + return 0; } #endif -- Gitee From 2b5a3521c1226816f40e28fc3a87881b1c547fd2 Mon Sep 17 00:00:00 2001 From: Yihao Wu Date: Fri, 4 Jun 2021 11:18:37 +0800 Subject: [PATCH 0780/2138] anolis: sched: Introduce per-cgroup idle accounting ANBZ: #8657 Since we concern idle, let's take idle as the center state. And omit transition between other stats. Below is the state transition graph: sleep->deque +-----------+ cpumask +-------+ exit->deque +-------+ |ineffective|-------- | idle | <-----------|running| +-----------+ +-------+ +-------+ ^ | unthrtl child -> deque | | wake -> deque | |thrtl chlid -> enque migrate -> deque | |migrate -> enque | v +-------+ | steal | +-------+ We conclude idle state condition as: !se->on_rq && !my_q->throttled && cpu allowed. From this graph and condition, we can hook (de|en)queue_task_fair update_cpumasks_hier, (un|)throttle_cfs_rq to account idle state. In the hooked functions, we also check the conditions, to avoid accounting unwanted cpu clocks. Signed-off-by: Yihao Wu Signed-off-by: Shanpei Chen Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- include/linux/cgroup.h | 12 +++++ include/linux/sched.h | 4 ++ kernel/cgroup/cpuset.c | 16 +++++++ kernel/sched/cpuacct.c | 100 +++++++++++++++++++++++++++++++++++++++-- kernel/sched/fair.c | 29 +++++++++++- kernel/sched/sched.h | 5 +++ 6 files changed, 161 insertions(+), 5 deletions(-) diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index f1219e29d309..5eda25d7c3a5 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -857,4 +857,16 @@ static inline void cgroup_bpf_put(struct cgroup *cgrp) {} #endif /* CONFIG_CGROUP_BPF */ +#ifdef CONFIG_SCHED_SLI +void cpuacct_cpuset_changed(struct cgroup *cgrp, + struct cpumask *effective, struct cpumask *new_added); +void cgroup_idle_end(struct sched_entity *se); +void cgroup_idle_start(struct sched_entity *se); +#else +static inline void cpuacct_cpuset_changed(struct cgroup *cgrp, + struct cpumask *effective, struct cpumask *new_added) { } +static inline void cgroup_idle_end(struct sched_entity *se) { } +static inline void cgroup_idle_start(struct sched_entity *se) { } +#endif + #endif /* _LINUX_CGROUP_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index b5017419e3fc..f61560ee2800 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -575,6 +575,10 @@ struct sched_entity { s64 vlag; u64 slice; + u64 cg_idle_start; + u64 cg_idle_sum; + seqlock_t idle_seqlock; + u64 nr_migrations; #ifdef CONFIG_FAIR_GROUP_SCHED diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 5b54d3573c82..a35e17da6993 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -1601,6 +1601,8 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd, return 0; } +static struct cpumask added, deleted, old_cpus; + /* * update_cpumasks_hier() flags */ @@ -1657,6 +1659,11 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, parent->child_ecpus_count--; } + if (cpumask_empty(cp->effective_cpus)) + cpumask_copy(&old_cpus, parent->effective_cpus); + else + cpumask_copy(&old_cpus, cp->effective_cpus); + /* * Skip the whole subtree if * 1) the cpumask remains the same, @@ -1748,8 +1755,16 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, WARN_ON(!is_in_v2_mode() && !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); + /* add = new - old = new & (~old) */ + cpumask_andnot(&added, tmp->new_cpus, &old_cpus); + cpuacct_cpuset_changed(cs->css.cgroup, NULL, &added); + update_tasks_cpumask(cp, tmp->new_cpus); + /* deleted = old - new = old & (~new) */ + cpumask_andnot(&deleted, &old_cpus, tmp->new_cpus); + cpuacct_cpuset_changed(cs->css.cgroup, &deleted, NULL); + /* * On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE * from parent if current cpuset isn't a valid partition root @@ -3329,6 +3344,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) cs->effective_mems = parent->mems_allowed; cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); cpumask_copy(cs->effective_cpus, parent->cpus_allowed); + cpuacct_cpuset_changed(cs->css.cgroup, NULL, cs->effective_cpus); spin_unlock_irq(&callback_lock); out_unlock: mutex_unlock(&cpuset_mutex); diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 7e8b40df52de..12a455867d0a 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -412,6 +412,84 @@ static unsigned long ca_uninterruptible(struct cpuacct *ca, int cpu) return nr; } +void cgroup_idle_start(struct sched_entity *se) +{ + unsigned long flags; + u64 clock; + + if (!schedstat_enabled()) + return; + + clock = __rq_clock_broken(se->cfs_rq->rq); + + local_irq_save(flags); + + write_seqlock(&se->idle_seqlock); + __schedstat_set(se->cg_idle_start, clock); + write_sequnlock(&se->idle_seqlock); + + local_irq_restore(flags); +} + +void cgroup_idle_end(struct sched_entity *se) +{ + unsigned long flags; + u64 clock; + u64 idle_start; + + if (!schedstat_enabled()) + return; + + clock = __rq_clock_broken(se->cfs_rq->rq); + + local_irq_save(flags); + + write_seqlock(&se->idle_seqlock); + idle_start = schedstat_val(se->cg_idle_start); + __schedstat_add(se->cg_idle_sum, clock - idle_start); + __schedstat_set(se->cg_idle_start, 0); + write_sequnlock(&se->idle_seqlock); + + local_irq_restore(flags); +} + +void cpuacct_cpuset_changed(struct cgroup *cgrp, struct cpumask *deleted, + struct cpumask *added) +{ + struct task_group *tg; + struct sched_entity *se; + int cpu; + + if (!schedstat_enabled()) + return; + + rcu_read_lock(); + tg = cgroup_tg(cgrp); + + if (!tg) { + rcu_read_unlock(); + return; + } + + if (added) { + /* Mark newly added cpus as newly-idle */ + for_each_cpu(cpu, added) { + se = tg->se[cpu]; + cgroup_idle_start(se); + } + } + + if (deleted) { + /* Mark ineffective_cpus as idle-invalid */ + for_each_cpu(cpu, deleted) { + se = tg->se[cpu]; + cgroup_idle_end(se); + } + } + + rcu_read_unlock(); +} + static void __cpuacct_get_usage_result(struct cpuacct *ca, int cpu, struct task_group *tg, struct cpuacct_usage_result *res) { @@ -453,10 +531,26 @@ static void __cpuacct_get_usage_result(struct cpuacct *ca, int cpu, res->irq = kcpustat->cpustat[CPUTIME_IRQ]; res->softirq = kcpustat->cpustat[CPUTIME_SOFTIRQ]; - if (se) - res->steal = __schedstats_from_se(se)->wait_sum; - else + + if (se && schedstat_enabled()) { + unsigned int seq; + u64 idle_start; + u64 clock = cpu_clock(cpu); + + do { + seq = read_seqbegin(&se->idle_seqlock); + res->idle = schedstat_val(se->cg_idle_sum); + idle_start = schedstat_val(se->cg_idle_start); + clock = cpu_clock(cpu); + if (idle_start && clock > idle_start) + res->idle += clock - idle_start; + } while (read_seqretry(&se->idle_seqlock, seq)); + res->steal = 0; + } else { + res->idle = res->iowait = res->steal = 0; + } + res->guest = kcpustat->cpustat[CPUTIME_GUEST]; res->guest_nice = kcpustat->cpustat[CPUTIME_GUEST_NICE]; } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e8cf46db8397..fc75505a1e22 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5819,6 +5819,9 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) if (!se->on_rq) goto done; + if (se->my_q != cfs_rq) + cgroup_idle_start(se); + dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); if (cfs_rq_is_idle(group_cfs_rq(se))) @@ -5867,6 +5870,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) { + struct cfs_rq *bottom_cfs_rq = cfs_rq; struct rq *rq = rq_of(cfs_rq); struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); struct sched_entity *se; @@ -5910,6 +5914,9 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) if (se->on_rq) break; + + if (se->my_q != bottom_cfs_rq) + cgroup_idle_end(se); enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP); if (cfs_rq_is_idle(group_cfs_rq(se))) @@ -6796,6 +6803,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) cfs_rq = cfs_rq_of(se); enqueue_entity(cfs_rq, se, flags); + if (!entity_is_task(se)) + cgroup_idle_end(se); + cfs_rq->h_nr_running++; cfs_rq->idle_h_nr_running += idle_h_nr_running; @@ -6803,8 +6813,13 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) idle_h_nr_running = 1; /* end evaluation on encountering a throttled cfs_rq */ - if (cfs_rq_throttled(cfs_rq)) + if (cfs_rq_throttled(cfs_rq)) { +#ifdef CONFIG_FAIR_GROUP_SCHED + if (cfs_rq->nr_running == 1) + cgroup_idle_end(se->parent); +#endif goto enqueue_throttle; + } flags = ENQUEUE_WAKEUP; } @@ -6874,6 +6889,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) cfs_rq = cfs_rq_of(se); dequeue_entity(cfs_rq, se, flags); + if (!entity_is_task(se)) + cgroup_idle_start(se); + cfs_rq->h_nr_running--; cfs_rq->idle_h_nr_running -= idle_h_nr_running; @@ -6881,8 +6899,13 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) idle_h_nr_running = 1; /* end evaluation on encountering a throttled cfs_rq */ - if (cfs_rq_throttled(cfs_rq)) + if (cfs_rq_throttled(cfs_rq)) { +#ifdef CONFIG_FAIR_GROUP_SCHED + if (!cfs_rq->nr_running) + cgroup_idle_start(se->parent); +#endif goto dequeue_throttle; + } /* Don't dequeue parent if it has other entities besides us */ if (cfs_rq->load.weight) { @@ -13072,6 +13095,8 @@ void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, /* guarantee group entities always have weight */ update_load_set(&se->load, NICE_0_LOAD); se->parent = parent; + seqlock_init(&se->idle_seqlock); + se->cg_idle_start = cpu_clock(cpu); } static DEFINE_MUTEX(shares_mutex); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 04c75dc3294c..5fb45db0b744 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1504,6 +1504,11 @@ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) extern void update_rq_clock(struct rq *rq); +static inline u64 __rq_clock_broken(struct rq *rq) +{ + return READ_ONCE(rq->clock); +} + /* * rq::clock_update_flags bits * -- Gitee From 828580bfe59ee06f7608a4777a7f16a4a00a72e8 Mon Sep 17 00:00:00 2001 From: Yihao Wu Date: Fri, 4 Jun 2021 14:32:01 +0800 Subject: [PATCH 0781/2138] anolis: sched: Introduce per-cgroup steal accounting ANBZ: #8657 From the previous patch. We know there are 4 possible states. Since steal state's transition is complex. We choose to account its supplement. steal = elapse - idle - sum_exec_raw - ineffective Where elapse is the time since the cgroup is created. sum_exec_raw is the running time including IRQ time. ineffective is the total time that the cpuacct-binded cpuset doesn't allow this cpu for the cgroup. Signed-off-by: Yihao Wu Signed-off-by: Shanpei Chen Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- include/linux/sched.h | 6 ++++++ kernel/sched/cpuacct.c | 22 ++++++++++++++++++++-- kernel/sched/fair.c | 16 +++++++++++++++- 3 files changed, 41 insertions(+), 3 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index f61560ee2800..f4dcd6cbf1c3 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -575,8 +575,14 @@ struct sched_entity { s64 vlag; u64 slice; + /* irq time is included */ + u64 exec_start_raw; + u64 sum_exec_raw; u64 cg_idle_start; u64 cg_idle_sum; + u64 cg_init_time; + u64 cg_ineffective_sum; + u64 cg_ineffective_start; seqlock_t idle_seqlock; u64 nr_migrations; diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 12a455867d0a..339b1c4069d4 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -476,6 +476,10 @@ void cpuacct_cpuset_changed(struct cgroup *cgrp, struct cpumask *deleted, for_each_cpu(cpu, added) { se = tg->se[cpu]; cgroup_idle_start(se); + __schedstat_add(se->cg_ineffective_sum, + __rq_clock_broken(cpu_rq(cpu)) - + se->cg_ineffective_start); + __schedstat_set(se->cg_ineffective_start, 0); } } @@ -484,6 +488,9 @@ void cpuacct_cpuset_changed(struct cgroup *cgrp, struct cpumask *deleted, for_each_cpu(cpu, deleted) { se = tg->se[cpu]; cgroup_idle_end(se); + /* Use __rq_clock_broken to avoid warning */ + __schedstat_set(se->cg_ineffective_start, + __rq_clock_broken(cpu_rq(cpu))); } } @@ -534,8 +541,8 @@ static void __cpuacct_get_usage_result(struct cpuacct *ca, int cpu, if (se && schedstat_enabled()) { unsigned int seq; - u64 idle_start; - u64 clock = cpu_clock(cpu); + u64 idle_start, ineff, ineff_start, elapse, complement; + u64 clock; do { seq = read_seqbegin(&se->idle_seqlock); @@ -546,7 +553,18 @@ static void __cpuacct_get_usage_result(struct cpuacct *ca, int cpu, res->idle += clock - idle_start; } while (read_seqretry(&se->idle_seqlock, seq)); + ineff = schedstat_val(se->cg_ineffective_sum); + ineff_start = schedstat_val(se->cg_ineffective_start); + if (ineff_start) + __schedstat_add(ineff, clock - ineff_start); + res->steal = 0; + + elapse = clock - schedstat_val(se->cg_init_time); + complement = res->idle + se->sum_exec_raw + ineff; + if (elapse > complement) + res->steal = elapse - complement; + } else { res->idle = res->iowait = res->steal = 0; } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index fc75505a1e22..b8f50fcf5da2 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1159,6 +1159,18 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq) } #endif /* CONFIG_SMP */ +static inline void +update_exec_raw(struct cfs_rq *cfs_rq, struct sched_entity *curr) +{ + u64 now = rq_clock(rq_of(cfs_rq)); + + curr->sum_exec_raw += now - curr->exec_start_raw; + curr->exec_start_raw = now; +} + +/* + * Update the current task's runtime statistics. + */ static s64 update_curr_se(struct rq *rq, struct sched_entity *curr) { u64 now = rq_clock_task(rq); @@ -1227,6 +1239,7 @@ static void update_curr(struct cfs_rq *cfs_rq) update_curr_task(task_of(curr), delta_exec); account_cfs_rq_runtime(cfs_rq, delta_exec); + update_exec_raw(cfs_rq, curr); } static void update_curr_fair(struct rq *rq) @@ -1353,6 +1366,7 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) * We are starting a new run period: */ se->exec_start = rq_clock_task(rq_of(cfs_rq)); + se->exec_start_raw = rq_clock(rq_of(cfs_rq)); } /************************************************** @@ -13096,7 +13110,7 @@ void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, update_load_set(&se->load, NICE_0_LOAD); se->parent = parent; seqlock_init(&se->idle_seqlock); - se->cg_idle_start = cpu_clock(cpu); + se->cg_idle_start = se->cg_init_time = cpu_clock(cpu); } static DEFINE_MUTEX(shares_mutex); -- Gitee From fd7b98cc0758b2c77991c1a1435c7f436ef553b9 Mon Sep 17 00:00:00 2001 From: Yihao Wu Date: Fri, 4 Jun 2021 16:31:35 +0800 Subject: [PATCH 0782/2138] anolis: sched: Introduce per-cgroup iowait accounting ANBZ: #8657 We account iowait when the cgroup's se is idle, and it has blocked task on the hierarchy of se->my_q. To achieve this, we also add cg_nr_running to track the hierarchical number of blocked tasks. We do it when a blocked task wakes up or a task is blocked. Signed-off-by: Yihao Wu Signed-off-by: Shanpei Chen Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- include/linux/sched.h | 4 ++++ kernel/sched/core.c | 3 +++ kernel/sched/cpuacct.c | 26 +++++++++++++++++++++-- kernel/sched/fair.c | 47 ++++++++++++++++++++++++++++++++++++++++++ kernel/sched/sched.h | 6 ++++++ 5 files changed, 84 insertions(+), 2 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index f4dcd6cbf1c3..149ec0e03277 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -581,9 +581,13 @@ struct sched_entity { u64 cg_idle_start; u64 cg_idle_sum; u64 cg_init_time; + u64 cg_nr_iowait; + u64 cg_iowait_sum; + u64 cg_iowait_start; u64 cg_ineffective_sum; u64 cg_ineffective_start; seqlock_t idle_seqlock; + spinlock_t iowait_lock; u64 nr_migrations; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a4a7722678de..d60d9a02bc8d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3804,6 +3804,7 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, if (p->in_iowait) { delayacct_blkio_end(p); atomic_dec(&task_rq(p)->nr_iowait); + update_nr_iowait(p, -1); } activate_task(rq, p, en_flags); @@ -4367,6 +4368,7 @@ int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) if (p->in_iowait) { delayacct_blkio_end(p); atomic_dec(&task_rq(p)->nr_iowait); + update_nr_iowait(p, -1); } wake_flags |= WF_MIGRATED; @@ -6828,6 +6830,7 @@ static void __sched notrace __schedule(unsigned int sched_mode) if (prev->in_iowait) { atomic_inc(&rq->nr_iowait); + update_nr_iowait(prev, 1); delayacct_blkio_start(); } } diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 339b1c4069d4..093d16cf3daa 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -428,6 +428,11 @@ void cgroup_idle_start(struct sched_entity *se) __schedstat_set(se->cg_idle_start, clock); write_sequnlock(&se->idle_seqlock); + spin_lock(&se->iowait_lock); + if (schedstat_val(se->cg_nr_iowait)) + __schedstat_set(se->cg_iowait_start, clock); + spin_unlock(&se->iowait_lock); + local_irq_restore(flags); } @@ -435,7 +440,7 @@ void cgroup_idle_end(struct sched_entity *se) { unsigned long flags; u64 clock; - u64 idle_start; + u64 idle_start, iowait_start; if (!schedstat_enabled()) return; @@ -450,6 +455,14 @@ void cgroup_idle_end(struct sched_entity *se) __schedstat_set(se->cg_idle_start, 0); write_sequnlock(&se->idle_seqlock); + spin_lock(&se->iowait_lock); + if (schedstat_val(se->cg_nr_iowait)) { + iowait_start = schedstat_val(se->cg_iowait_start); + __schedstat_add(se->cg_iowait_sum, clock - iowait_start); + __schedstat_set(se->cg_iowait_start, 0); + } + spin_unlock(&se->iowait_lock); + local_irq_restore(flags); } @@ -541,8 +554,9 @@ static void __cpuacct_get_usage_result(struct cpuacct *ca, int cpu, if (se && schedstat_enabled()) { unsigned int seq; + unsigned long flags; u64 idle_start, ineff, ineff_start, elapse, complement; - u64 clock; + u64 clock, iowait_start; do { seq = read_seqbegin(&se->idle_seqlock); @@ -558,6 +572,13 @@ static void __cpuacct_get_usage_result(struct cpuacct *ca, int cpu, if (ineff_start) __schedstat_add(ineff, clock - ineff_start); + spin_lock_irqsave(&se->iowait_lock, flags); + res->iowait = schedstat_val(se->cg_iowait_sum); + iowait_start = schedstat_val(se->cg_iowait_start); + if (iowait_start) + __schedstat_add(res->iowait, clock - iowait_start); + spin_unlock_irqrestore(&se->iowait_lock, flags); + res->steal = 0; elapse = clock - schedstat_val(se->cg_init_time); @@ -565,6 +586,7 @@ static void __cpuacct_get_usage_result(struct cpuacct *ca, int cpu, if (elapse > complement) res->steal = elapse - complement; + res->idle -= res->iowait; } else { res->idle = res->iowait = res->steal = 0; } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index b8f50fcf5da2..d2e020621d4c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -12957,6 +12957,46 @@ void init_cfs_rq(struct cfs_rq *cfs_rq) } #ifdef CONFIG_FAIR_GROUP_SCHED + +#ifdef CONFIG_SCHED_SLI +static void update_nr_iowait_fair(struct task_struct *p, long inc) +{ + unsigned long flags; + struct sched_entity *se = p->se.parent; + u64 clock; + + if (!schedstat_enabled()) + return; + + clock = __rq_clock_broken(cpu_rq(task_cpu(p))); + + for_each_sched_entity(se) { + /* + * Avoid locking rq->lock from try_to_wakeup hot path, in + * the price of poor consistency among cgroup hierarchy, + * which we can tolerate. + * While accessing se->on_rq does need to hold rq->lock. We + * already do, because when inc==1, the caller is __schedule + * and task_move_group_fair + */ + spin_lock_irqsave(&se->iowait_lock, flags); + if (!se->on_rq && !schedstat_val(se->cg_nr_iowait) && inc > 0) + __schedstat_set(se->cg_iowait_start, clock); + if (schedstat_val(se->cg_iowait_start) > 0 && + schedstat_val(se->cg_nr_iowait) + inc == 0) { + __schedstat_add(se->cg_iowait_sum, clock - + schedstat_val(se->cg_iowait_start)); + __schedstat_set(se->cg_iowait_start, 0); + } + __schedstat_add(se->cg_nr_iowait, inc); + spin_unlock_irqrestore(&se->iowait_lock, flags); + } +} +#else +static void update_nr_iowait_fair(struct task_struct *p, long inc) {} +#endif + + static void task_change_group_fair(struct task_struct *p) { /* @@ -12966,6 +13006,9 @@ static void task_change_group_fair(struct task_struct *p) if (READ_ONCE(p->__state) == TASK_NEW) return; + if (p->in_iowait) + update_nr_iowait_fair(p, -1); + detach_task_cfs_rq(p); #ifdef CONFIG_SMP @@ -12974,6 +13017,8 @@ static void task_change_group_fair(struct task_struct *p) #endif set_task_rq(p, task_cpu(p)); attach_task_cfs_rq(p); + if (p->in_iowait) + update_nr_iowait_fair(p, 1); } void free_fair_sched_group(struct task_group *tg) @@ -13110,6 +13155,7 @@ void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, update_load_set(&se->load, NICE_0_LOAD); se->parent = parent; seqlock_init(&se->idle_seqlock); + spin_lock_init(&se->iowait_lock); se->cg_idle_start = se->cg_init_time = cpu_clock(cpu); } @@ -13328,6 +13374,7 @@ DEFINE_SCHED_CLASS(fair) = { #endif #ifdef CONFIG_SCHED_SLI .update_nr_uninterruptible = update_nr_uninterruptible_fair, + .update_nr_iowait = update_nr_iowait_fair, #endif #ifdef CONFIG_UCLAMP_TASK diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 5fb45db0b744..8031f2463c0f 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2344,6 +2344,7 @@ struct sched_class { int (*task_is_throttled)(struct task_struct *p, int cpu); #endif void (*update_nr_uninterruptible)(struct task_struct *p, long inc); + void (*update_nr_iowait)(struct task_struct *p, long inc); }; static inline void put_prev_task(struct rq *rq, struct task_struct *prev) @@ -2357,6 +2358,11 @@ static inline void set_next_task(struct rq *rq, struct task_struct *next) next->sched_class->set_next_task(rq, next, false); } +static inline void update_nr_iowait(struct task_struct *p, long inc) +{ + if (p->sched_class->update_nr_iowait) + p->sched_class->update_nr_iowait(p, inc); +} /* * Helper to define a sched_class instance; each one is placed in a separate -- Gitee From 33e2619473bd74f1626149160d090147d3871e5d Mon Sep 17 00:00:00 2001 From: Yihao Wu Date: Fri, 10 Sep 2021 20:44:15 +0800 Subject: [PATCH 0783/2138] anolis: sched: Fix cg_nr_iowait race condition ANBZ: #8657 Consider p->in_iowait and p->on_rq. Intuitively, they seem to be exclusive. But there are actually exact two exceptions: 1) and 3) in the time series: +----------------+ | |io_schedule_... | | |in_iowait=1 | | +----------------+ | | | 1) sees in_iowait=1 & on_rq=1 | +-rq_lock--------+ | | | | | __schedule out:| | | nr_iowait ++ | | | deactivate | | | on_rq = 0 | | +----------------+ | | | 2) sees in_iowait=1 & on_rq=0 | +----------------+ | | try_to_wake_up:| | | on_rq = 1 | | | nr_iowait -- | | +----------------+ | | | 3) sees in_iowait=1 & on_rq=1 | | +-rq_lock--------+ | | __schedule in: | | +----------------+ | +----------------+ | |in_iowait=0 | | +----------------+ | In case 1, there's a window between updating in_iowait and on_rq. And this window is not protected by rq->lock or p->pi_lock. And case 3 it's more obvious. on_rq is updated by try_to_wake_up, while in_iowait is updated by io_schedule in the near future. case 2 is a trivial one (meaning it's not buggy) In conclusion, there's no guarantee atomically updating on_rq and in_iowait. Next, we concentrate on nr_iowait, and take task_move_group_fair into consideration too. ==== case 1 ==== without bugfix: old cgroup new cgroup | task_move_group_fair -- -> -1 ++ -> 1 | __schedule out ++ -> 2 v try_to_wake_up -- -> 1 with bugfix: old cgroup new cgroup | task_move_group_fair | __schedule out ++ -> 1 v try_to_wake_up -- -> 0 ==== case 3 ==== without bugfix: old cgroup new cgroup | __schedule out ++ -> 1 | try_to_wake_up -- -> 0 v task_move_group_fair -- -> -1 ++ -> 1 with bugfix: old cgroup new cgroup | __schedule out ++ -> 1 | try_to_wake_up -- -> 0 v task_move_group_fair The bugfix is simple, use !on_rq to filter out case 1 and 3, since they are false-positive cases and should not update nr_iowait. Signed-off-by: Yihao Wu Signed-off-by: Yi Tao --- kernel/sched/fair.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index d2e020621d4c..7e5935df8862 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -13006,7 +13006,20 @@ static void task_change_group_fair(struct task_struct *p) if (READ_ONCE(p->__state) == TASK_NEW) return; - if (p->in_iowait) + /* + * p->in_iowait is obvious. If p is in_iowait, we should transfer + * iowait to the new cgroup, otherwise try_to_wake_up will decrease + * from the new cgroup, leaving old cgroup's nr_iowait to be 1, and + * new cgroup's nr_iowait to be -1 + * + * !p->on_rq is necessary too, because iowait and on_rq are not + * updated at the same time. After try_to_wake_up, p->in_iowait + * remains 1, while on_rq becomes 1. In this case, p is not at all + * in_iowait already, so don't be stupid to transfer nr_iowait. + * Similarly, when io_schedule, there's a window between setting + * p->in_iowait to 1 and setting p->on_rq to 0, don't either. + */ + if (p->in_iowait && !p->on_rq) update_nr_iowait_fair(p, -1); detach_task_cfs_rq(p); @@ -13017,7 +13030,8 @@ static void task_change_group_fair(struct task_struct *p) #endif set_task_rq(p, task_cpu(p)); attach_task_cfs_rq(p); - if (p->in_iowait) + /* Same as above */ + if (p->in_iowait && !p->on_rq) update_nr_iowait_fair(p, 1); } -- Gitee From 3e13d2b753f62b7fab94e91b78bbd0c4fea1cd26 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Fri, 4 Jun 2021 16:57:15 +0800 Subject: [PATCH 0784/2138] anolis: configs: Enable rich container ANBZ: #8657 Enable CONFIG_RICH_CONTAINER in all configure files, for both x86 and arm64. It is off by default, to turn it on: echo 1 > /proc/sys/kernel/rich_container_enable Signed-off-by: Xunlei Pang Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- arch/arm64/configs/anolis-debug_defconfig | 1 + arch/arm64/configs/anolis_defconfig | 1 + arch/x86/configs/anolis-debug_defconfig | 1 + arch/x86/configs/anolis_defconfig | 1 + init/Kconfig | 14 ++++++++++++++ 5 files changed, 18 insertions(+) diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index a9dd69260bb5..65ebc3694ef2 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -196,6 +196,7 @@ CONFIG_CPUSETS=y CONFIG_PROC_PID_CPUSET=y CONFIG_CGROUP_DEVICE=y CONFIG_SCHED_SLI=y +CONFIG_RICH_CONTAINER=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y CONFIG_CGROUP_BPF=y diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index 1935cdb56003..a563537ca907 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -195,6 +195,7 @@ CONFIG_CPUSETS=y CONFIG_PROC_PID_CPUSET=y CONFIG_CGROUP_DEVICE=y CONFIG_SCHED_SLI=y +CONFIG_RICH_CONTAINER=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y CONFIG_CGROUP_BPF=y diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index ba7a304585a4..6f4e977693dd 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -215,6 +215,7 @@ CONFIG_CPUSETS=y CONFIG_PROC_PID_CPUSET=y CONFIG_CGROUP_DEVICE=y CONFIG_SCHED_SLI=y +CONFIG_RICH_CONTAINER=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y CONFIG_CGROUP_BPF=y diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 1595b3a3616d..47269a2f1c76 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -214,6 +214,7 @@ CONFIG_CPUSETS=y CONFIG_PROC_PID_CPUSET=y CONFIG_CGROUP_DEVICE=y CONFIG_SCHED_SLI=y +CONFIG_RICH_CONTAINER=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y CONFIG_CGROUP_BPF=y diff --git a/init/Kconfig b/init/Kconfig index 143335723252..41d0e2546ac0 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1148,6 +1148,20 @@ config SCHED_SLI Note CPU usage requires cpuacct and cpu cgroup subsys to be mounted together to take effect. +config RICH_CONTAINER + bool "Alibaba rich container" + depends on CGROUP_CPUACCT + depends on CFS_BANDWIDTH + depends on CPUSETS + default n + help + Make containers feel like VMs. Change the following interface + to reflect the information from containers, like: + "/proc/cpuinfo", "/proc/meminfo", "/sys/devices/system/cpu/online". + Then tools(i.e. top, free) can work in containers as in VMs. + Note that it requires "cpu,cpuacct,cpuset" shared v1 hierarchy to + work properly. + config CGROUP_CPUACCT bool "Simple CPU accounting controller" help -- Gitee From 05a8f67e1037eb248ed903bbd7419b5f5390d664 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Fri, 4 Jun 2021 17:19:11 +0800 Subject: [PATCH 0785/2138] anolis: x86: cpuinfo: Add cpuinfo support for rich container ANBZ: #8657 Make /proc/cpuinfo container aware. E.g. cpuset.cpus is 4-7, then it will show as the faked cpu0~cpu3 from the rich container. Signed-off-by: Xunlei Pang Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- arch/x86/kernel/cpu/proc.c | 32 +++++++++++++++------- include/linux/pid_namespace.h | 12 +++++++++ include/linux/sched.h | 13 +++++++++ kernel/sched/cpuacct.c | 50 +++++++++++++++++++++++++++++++++++ 4 files changed, 97 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index a0f81db51eac..2941134c47da 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c @@ -17,14 +17,22 @@ extern const char * const x86_vmx_flags[NVMXINTS*32]; * Get CPU information for use by the procfs. */ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, - unsigned int cpu) + unsigned int cpu, unsigned int index, + bool rich_container, unsigned int total) { #ifdef CONFIG_SMP - seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); - seq_printf(m, "siblings\t: %d\n", - cpumask_weight(topology_core_cpumask(cpu))); - seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); - seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); + if (rich_container) { + seq_puts(m, "physical id\t: 0\n"); + seq_printf(m, "siblings\t: %d\n", total); + seq_printf(m, "core id\t\t: %d\n", index); + seq_printf(m, "cpu cores\t: %d\n", total); + } else { + seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); + seq_printf(m, "siblings\t: %d\n", + cpumask_weight(topology_core_cpumask(cpu))); + seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); + seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); + } seq_printf(m, "apicid\t\t: %d\n", c->apicid); seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid); #endif @@ -63,16 +71,20 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) static int show_cpuinfo(struct seq_file *m, void *v) { struct cpuinfo_x86 *c = v; - unsigned int cpu; + unsigned int cpu, index, total; int i; + bool rich_container = false; + + index = cpu = c->cpu_index; + if (check_rich_container(cpu, &index, &rich_container, &total)) + return 0; - cpu = c->cpu_index; seq_printf(m, "processor\t: %u\n" "vendor_id\t: %s\n" "cpu family\t: %d\n" "model\t\t: %u\n" "model name\t: %s\n", - cpu, + index, c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown", c->x86, c->x86_model, @@ -95,7 +107,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (c->x86_cache_size) seq_printf(m, "cache size\t: %u KB\n", c->x86_cache_size); - show_cpuinfo_core(m, c, cpu); + show_cpuinfo_core(m, c, cpu, index, rich_container, total); show_cpuinfo_misc(m, c); seq_puts(m, "flags\t\t:"); diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index f9f9931e02d6..ea8b24c3b4ec 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -123,4 +123,16 @@ static inline bool task_is_in_init_pid_ns(struct task_struct *tsk) return task_active_pid_ns(tsk) == &init_pid_ns; } +#ifdef CONFIG_RICH_CONTAINER +static inline bool in_rich_container(struct task_struct *tsk) +{ + return !task_is_in_init_pid_ns(tsk) && child_cpuacct(tsk); +} +#else +static inline bool in_rich_container(struct task_struct *tsk) +{ + return false; +} +#endif + #endif /* _LINUX_PID_NS_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 149ec0e03277..fe295618778b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2532,4 +2532,17 @@ struct cpuacct_usage_result { u64 steal, iowait, idle, guest, guest_nice; }; +#ifdef CONFIG_RICH_CONTAINER +bool child_cpuacct(struct task_struct *tsk); +bool check_rich_container(unsigned int cpu, unsigned int *index, + bool *rich_container, unsigned int *total); + +#else +static inline bool check_rich_container(unsigned int cpu, unsigned int *index, + bool *rich_container, unsigned int *total) +{ + return false; +} +#endif + #endif diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 093d16cf3daa..1daefd079ea5 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -750,3 +750,53 @@ struct cgroup_subsys cpuacct_cgrp_subsys = { .legacy_cftypes = files, .early_init = true, }; + +#ifdef CONFIG_RICH_CONTAINER +bool child_cpuacct(struct task_struct *tsk) +{ + struct cpuacct *ca = task_ca(tsk); + + if (ca && ca != &root_cpuacct) + return true; + + return false; +} + +bool check_rich_container(unsigned int cpu, unsigned int *index, + bool *rich_container, unsigned int *total) +{ + struct cpumask cpuset_allowed; + struct task_struct *init_tsk; + bool in_rich; + int i, id = 0; + + rcu_read_lock(); + in_rich = in_rich_container(current); + rcu_read_unlock(); + if (!in_rich) + return false; + + *rich_container = true; + + read_lock(&tasklist_lock); + init_tsk = task_active_pid_ns(current)->child_reaper; + get_task_struct(init_tsk); + read_unlock(&tasklist_lock); + cpuset_cpus_allowed(init_tsk, &cpuset_allowed); + put_task_struct(init_tsk); + + *total = cpumask_weight(&cpuset_allowed); + if (cpumask_test_cpu(cpu, &cpuset_allowed)) { + for_each_cpu(i, &cpuset_allowed) { + if (i == cpu) + break; + id++; + } + *index = id; + return false; + } + + /* Hide this cpu in the container */ + return true; +} +#endif -- Gitee From c45fcd8053ea3bcbe44096021f20ed2e354d2f1a Mon Sep 17 00:00:00 2001 From: zou cao Date: Mon, 23 Nov 2020 10:56:02 +0800 Subject: [PATCH 0786/2138] anolis: arm64: cpuinfo: Add cpuinfo support for rich container ANBZ: #8657 add arm64 cpuinfo support for rich container Signed-off-by: zou cao Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- arch/arm64/kernel/cpuinfo.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 98fda8500535..07ee88abfa0d 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -178,17 +178,24 @@ static int c_show(struct seq_file *m, void *v) { int i, j; bool compat = personality(current->personality) == PER_LINUX32; + unsigned int cpu, index, total; + bool rich_container = false; for_each_online_cpu(i) { struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i); u32 midr = cpuinfo->reg_midr; + index = cpu = i; + + if (check_rich_container(cpu, &index, &rich_container, &total)) + continue; + /* * glibc reads /proc/cpuinfo to determine the number of * online processors, looking for lines beginning with * "processor". Give glibc what it expects. */ - seq_printf(m, "processor\t: %d\n", i); + seq_printf(m, "processor\t: %d\n", index); if (compat) seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n", MIDR_REVISION(midr), COMPAT_ELF_PLATFORM); -- Gitee From d41a6d70d346406d48aa90854fbe1d1c0cbb7066 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Mon, 23 Nov 2020 10:45:55 +0800 Subject: [PATCH 0787/2138] anolis: sysfs/cpu: Add online cpus support for rich container ANBZ: #8657 Make /sys/devices/system/cpu/online container aware. E.g. cpuset.cpus is 4-7, then it will show "4-7" in the rich container. Signed-off-by: Xunlei Pang Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- drivers/base/cpu.c | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index ef427ee787a9..925dacc2d266 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -21,6 +21,8 @@ #include #include #include +#include +#include #include "base.h" @@ -216,8 +218,31 @@ static ssize_t show_cpus_attr(struct device *dev, char *buf) { struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr); + struct cpumask cpuset_allowed; + struct task_struct *init_tsk; + bool rich_container; + + rcu_read_lock(); + rich_container = in_rich_container(current); + if (rich_container) { + read_lock(&tasklist_lock); + init_tsk = task_active_pid_ns(current)->child_reaper; + get_task_struct(init_tsk); + read_unlock(&tasklist_lock); + } else { + init_tsk = NULL; + } + rcu_read_unlock(); + + if (rich_container && !strcmp(attr->attr.name, "online")) + cpuset_cpus_allowed(init_tsk, &cpuset_allowed); + else + cpumask_copy(&cpuset_allowed, ca->map); + + if (init_tsk) + put_task_struct(init_tsk); - return cpumap_print_to_pagebuf(true, buf, ca->map); + return cpumap_print_to_pagebuf(true, buf, &cpuset_allowed); } #define _CPU_ATTR(name, map) \ -- Gitee From fb7c97210a760e821d31c75c26bccc54327978f2 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Sat, 5 Jun 2021 00:30:06 +0800 Subject: [PATCH 0788/2138] anolis: pidns: Support rich container switch on/off ANBZ: #8657 Introduce "/proc/sys/kernel/rich_container_enable" to control rich container at runtime. Default off. Signed-off-by: Xunlei Pang Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- include/linux/pid_namespace.h | 4 ++++ kernel/pid_namespace.c | 4 ++++ kernel/sysctl.c | 11 +++++++++++ 3 files changed, 19 insertions(+) diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index ea8b24c3b4ec..a71999081213 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -124,8 +124,12 @@ static inline bool task_is_in_init_pid_ns(struct task_struct *tsk) } #ifdef CONFIG_RICH_CONTAINER +extern int sysctl_rich_container_enable; static inline bool in_rich_container(struct task_struct *tsk) { + if (sysctl_rich_container_enable == 0) + return false; + return !task_is_in_init_pid_ns(tsk) && child_cpuacct(tsk); } #else diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index e9b2bb260ee6..3d488f64a72d 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c @@ -25,6 +25,10 @@ #include #include "pid_sysctl.h" +#ifdef CONFIG_RICH_CONTAINER +int sysctl_rich_container_enable; +#endif + static DEFINE_MUTEX(pid_caches_mutex); static struct kmem_cache *pid_ns_cachep; /* Write once array, filled from the beginning. */ diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 47bdd8216fc5..6d0bcc3c205e 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2079,6 +2079,17 @@ static struct ctl_table kern_table[] = { .extra2 = SYSCTL_ONE, }, #endif /* CONFIG_SCHED_ACPU*/ +#ifdef CONFIG_RICH_CONTAINER + { + .procname = "rich_container_enable", + .data = &sysctl_rich_container_enable, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, +#endif { } }; -- Gitee From 8ed3ec4c9e5a7f9f9edaa034cca4d39792b1f825 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Sun, 6 Jun 2021 15:54:11 +0800 Subject: [PATCH 0789/2138] anolis: proc/stat: Add top support for rich container ANBZ: #8657 Add in_rich_container() helper to support rich container. Make /proc/stat container aware. E.g. cpuset.cpus is 4-7, then it will show 4-cpu top data in the rich container. Signed-off-by: Xunlei Pang Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- fs/proc/stat.c | 178 ++++++++++++++++++++++++++++++----------- include/linux/sched.h | 11 +++ kernel/sched/cpuacct.c | 17 ++++ 3 files changed, 158 insertions(+), 48 deletions(-) diff --git a/fs/proc/stat.c b/fs/proc/stat.c index ce8a751185f5..d2ec9dcddb31 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -14,6 +14,8 @@ #include #include #include +#include +#include #ifndef arch_irq_stat_cpu #define arch_irq_stat_cpu(cpu) 0 @@ -81,13 +83,18 @@ static void show_all_irqs(struct seq_file *p) static int show_stat(struct seq_file *p, void *v) { - int i, j; + int i, j, seq = 0; u64 user, nice, system, idle, iowait, irq, softirq, steal; u64 guest, guest_nice; u64 sum = 0; u64 sum_softirq = 0; unsigned int per_softirq_sums[NR_SOFTIRQS] = {0}; struct timespec64 boottime; + struct cpumask cpuset_allowed; + unsigned int nr_runnable = 0; + struct task_struct *init_tsk = NULL; + struct cpuacct_usage_result res; + bool rich_container; user = nice = system = idle = iowait = irq = softirq = steal = 0; @@ -96,24 +103,54 @@ static int show_stat(struct seq_file *p, void *v) /* shift boot timestamp according to the timens offset */ timens_sub_boottime(&boottime); + rcu_read_lock(); + rich_container = in_rich_container(current); + if (rich_container) { + /* fix btime in containers */ + read_lock(&tasklist_lock); + init_tsk = task_active_pid_ns(current)->child_reaper; + get_task_struct(init_tsk); + read_unlock(&tasklist_lock); + boottime.tv_sec += init_tsk->start_time / NSEC_PER_SEC; + + cpuset_cpus_allowed(init_tsk, &cpuset_allowed); + for_each_cpu(i, &cpuset_allowed) { + cpuacct_get_usage_result(init_tsk, i, &res); + user += res.user; + nice += res.nice; + system += res.system; + idle += res.idle; + iowait += res.iowait; + irq += res.irq; + softirq += res.softirq; + steal += res.steal; + guest += res.guest; + guest_nice += res.guest_nice; + } + } else { + for_each_possible_cpu(i) { + struct kernel_cpustat kcpustat; + u64 *cpustat = kcpustat.cpustat; + + kcpustat_cpu_fetch(&kcpustat, i); + + user += cpustat[CPUTIME_USER]; + nice += cpustat[CPUTIME_NICE]; + system += cpustat[CPUTIME_SYSTEM]; + idle += get_idle_time(&kcpustat, i); + iowait += get_iowait_time(&kcpustat, i); + irq += cpustat[CPUTIME_IRQ]; + softirq += cpustat[CPUTIME_SOFTIRQ]; + steal += cpustat[CPUTIME_STEAL]; + guest += cpustat[CPUTIME_GUEST]; + guest_nice += cpustat[CPUTIME_GUEST_NICE]; + } + } + rcu_read_unlock(); + for_each_possible_cpu(i) { - struct kernel_cpustat kcpustat; - u64 *cpustat = kcpustat.cpustat; - - kcpustat_cpu_fetch(&kcpustat, i); - - user += cpustat[CPUTIME_USER]; - nice += cpustat[CPUTIME_NICE]; - system += cpustat[CPUTIME_SYSTEM]; - idle += get_idle_time(&kcpustat, i); - iowait += get_iowait_time(&kcpustat, i); - irq += cpustat[CPUTIME_IRQ]; - softirq += cpustat[CPUTIME_SOFTIRQ]; - steal += cpustat[CPUTIME_STEAL]; - guest += cpustat[CPUTIME_GUEST]; - guest_nice += cpustat[CPUTIME_GUEST_NICE]; - sum += kstat_cpu_irqs_sum(i); - sum += arch_irq_stat_cpu(i); + sum += kstat_cpu_irqs_sum(i); + sum += arch_irq_stat_cpu(i); for (j = 0; j < NR_SOFTIRQS; j++) { unsigned int softirq_stat = kstat_softirqs_cpu(j, i); @@ -136,40 +173,85 @@ static int show_stat(struct seq_file *p, void *v) seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest_nice)); seq_putc(p, '\n'); - for_each_online_cpu(i) { - struct kernel_cpustat kcpustat; - u64 *cpustat = kcpustat.cpustat; - - kcpustat_cpu_fetch(&kcpustat, i); - - /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ - user = cpustat[CPUTIME_USER]; - nice = cpustat[CPUTIME_NICE]; - system = cpustat[CPUTIME_SYSTEM]; - idle = get_idle_time(&kcpustat, i); - iowait = get_iowait_time(&kcpustat, i); - irq = cpustat[CPUTIME_IRQ]; - softirq = cpustat[CPUTIME_SOFTIRQ]; - steal = cpustat[CPUTIME_STEAL]; - guest = cpustat[CPUTIME_GUEST]; - guest_nice = cpustat[CPUTIME_GUEST_NICE]; - seq_printf(p, "cpu%d", i); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(user)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(system)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(idle)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(iowait)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(irq)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(softirq)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest_nice)); - seq_putc(p, '\n'); + rcu_read_lock(); + if (rich_container) { + for_each_cpu(i, &cpuset_allowed) { + cpuacct_get_usage_result(init_tsk, i, &res); + + seq_printf(p, "cpu%d", seq++); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.user)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.nice)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.system)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.idle)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.iowait)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.irq)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.softirq)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.steal)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.guest)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.guest_nice)); + seq_putc(p, '\n'); + } + } else { + for_each_online_cpu(i) { + struct kernel_cpustat kcpustat; + u64 *cpustat = kcpustat.cpustat; + + kcpustat_cpu_fetch(&kcpustat, i); + + /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ + user = cpustat[CPUTIME_USER]; + nice = cpustat[CPUTIME_NICE]; + system = cpustat[CPUTIME_SYSTEM]; + idle = get_idle_time(&kcpustat, i); + iowait = get_iowait_time(&kcpustat, i); + irq = cpustat[CPUTIME_IRQ]; + softirq = cpustat[CPUTIME_SOFTIRQ]; + steal = cpustat[CPUTIME_STEAL]; + guest = cpustat[CPUTIME_GUEST]; + guest_nice = cpustat[CPUTIME_GUEST_NICE]; + + seq_printf(p, "cpu%d", i); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(user)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(system)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(idle)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(iowait)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(irq)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(softirq)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(guest_nice)); + seq_putc(p, '\n'); + } } + rcu_read_unlock(); + seq_put_decimal_ull(p, "intr ", (unsigned long long)sum); show_all_irqs(p); + rcu_read_lock(); + if (rich_container) { + for_each_cpu(i, &cpuset_allowed) + nr_runnable += task_ca_running(init_tsk, i); + } else + nr_runnable = nr_running(); + rcu_read_unlock(); + + if (rich_container) + put_task_struct(init_tsk); + seq_printf(p, "\nctxt %llu\n" "btime %llu\n" @@ -179,7 +261,7 @@ static int show_stat(struct seq_file *p, void *v) nr_context_switches(), (unsigned long long)boottime.tv_sec, total_forks, - nr_running(), + nr_runnable, nr_iowait()); seq_put_decimal_ull(p, "softirq ", (unsigned long long)sum_softirq); diff --git a/include/linux/sched.h b/include/linux/sched.h index fe295618778b..0a79e0dce188 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2534,10 +2534,21 @@ struct cpuacct_usage_result { #ifdef CONFIG_RICH_CONTAINER bool child_cpuacct(struct task_struct *tsk); +void cpuacct_get_usage_result(struct task_struct *tsk, int cpu, + struct cpuacct_usage_result *res); +unsigned long task_ca_running(struct task_struct *tsk, int cpu); bool check_rich_container(unsigned int cpu, unsigned int *index, bool *rich_container, unsigned int *total); #else +static inline void cpuacct_get_usage_result(struct task_struct *tsk, + int cpu, struct cpuacct_usage_result *res) { } + +static inline unsigned long task_ca_running(struct task_struct *tsk, int cpu) +{ + return 0; +} + static inline bool check_rich_container(unsigned int cpu, unsigned int *index, bool *rich_container, unsigned int *total) { diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 1daefd079ea5..18f6d31ffbbe 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -317,6 +317,13 @@ static int cpuacct_stats_show(struct seq_file *sf, void *v) #define arch_idle_time(cpu) 0 #endif +static unsigned long ca_running(struct cpuacct *ca, int cpu); + +unsigned long task_ca_running(struct task_struct *tsk, int cpu) +{ + return ca_running(task_ca(tsk), cpu); +} + static inline struct task_group *cgroup_tg(struct cgroup *cgrp) { return container_of(global_cgroup_css(cgrp, cpu_cgrp_id), @@ -595,6 +602,16 @@ static void __cpuacct_get_usage_result(struct cpuacct *ca, int cpu, res->guest_nice = kcpustat->cpustat[CPUTIME_GUEST_NICE]; } +void cpuacct_get_usage_result(struct task_struct *tsk, int cpu, + struct cpuacct_usage_result *res) +{ + struct cpuacct *ca = task_ca(tsk); + struct cgroup *cgrp = ca->css.cgroup; + struct task_group *tg = cgroup_tg(cgrp); + + __cpuacct_get_usage_result(ca, cpu, tg, res); +} + static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) { struct cpuacct *ca = css_ca(seq_css(sf)); -- Gitee From 437412a89ae3fe3bf98dba9dcb8278905c34c3f5 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Sun, 6 Jun 2021 17:05:24 +0800 Subject: [PATCH 0790/2138] anolis: proc/loadavg: Add load support for rich container ANBZ: #8657 Fetch nr_running and nr_uninterruptible from scheduler(cpu cgroup) and use them to calculate per-cgroup load like system global load. Note that calc_cgroup_load() walks the whole hierarchy and calculate load periodically, it can cause overhead if too many cpu cgroups. Make /proc/loadavg container aware. "uptime" command will show the per-cgroup load of the rich container. Signed-off-by: Xunlei Pang Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- fs/proc/loadavg.c | 26 ++++++++++++-- include/linux/sched.h | 5 +++ kernel/sched/cpuacct.c | 81 ++++++++++++++++++++++++++++++++++++++++++ kernel/sched/loadavg.c | 2 ++ kernel/sched/sched.h | 3 ++ 5 files changed, 115 insertions(+), 2 deletions(-) diff --git a/fs/proc/loadavg.c b/fs/proc/loadavg.c index 817981e57223..7205049d2935 100644 --- a/fs/proc/loadavg.c +++ b/fs/proc/loadavg.c @@ -9,19 +9,41 @@ #include #include #include +#include #include "internal.h" static int loadavg_proc_show(struct seq_file *m, void *v) { unsigned long avnrun[3]; + unsigned int nr_R = 0; + struct cpumask cpuset_allowed; + int i; - get_avenrun(avnrun, FIXED_1/200, 0); + rcu_read_lock(); + if (in_rich_container(current)) { + struct task_struct *init_tsk; + + read_lock(&tasklist_lock); + init_tsk = task_active_pid_ns(current)->child_reaper; + get_task_struct(init_tsk); + read_unlock(&tasklist_lock); + + get_cgroup_avenrun(init_tsk, avnrun, FIXED_1/200, 0); + cpuset_cpus_allowed(init_tsk, &cpuset_allowed); + for_each_cpu(i, &cpuset_allowed) + nr_R += task_ca_running(init_tsk, i); + put_task_struct(init_tsk); + } else { + get_avenrun(avnrun, FIXED_1/200, 0); + nr_R = nr_running(); + } + rcu_read_unlock(); seq_printf(m, "%lu.%02lu %lu.%02lu %lu.%02lu %u/%d %d\n", LOAD_INT(avnrun[0]), LOAD_FRAC(avnrun[0]), LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]), LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]), - nr_running(), nr_threads, + nr_R, nr_threads, idr_get_cursor(&task_active_pid_ns(current)->idr) - 1); return 0; } diff --git a/include/linux/sched.h b/include/linux/sched.h index 0a79e0dce188..0c133b3a04a5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2537,6 +2537,8 @@ bool child_cpuacct(struct task_struct *tsk); void cpuacct_get_usage_result(struct task_struct *tsk, int cpu, struct cpuacct_usage_result *res); unsigned long task_ca_running(struct task_struct *tsk, int cpu); +void get_cgroup_avenrun(struct task_struct *tsk, unsigned long *loads, + unsigned long offset, int shift); bool check_rich_container(unsigned int cpu, unsigned int *index, bool *rich_container, unsigned int *total); @@ -2549,6 +2551,9 @@ static inline unsigned long task_ca_running(struct task_struct *tsk, int cpu) return 0; } +static inline void get_cgroup_avenrun(struct task_struct *tsk, + unsigned long *loads, unsigned long offset, int shift) { } + static inline bool check_rich_container(unsigned int cpu, unsigned int *index, bool *rich_container, unsigned int *total) { diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 18f6d31ffbbe..61c6e962cc81 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -32,6 +32,7 @@ struct cpuacct { u64 __percpu *cpuusage; struct cpuacct_prev_cputime __percpu *prev_cputime; struct kernel_cpustat __percpu *cpustat; + unsigned long avenrun[3]; }; static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css) @@ -319,6 +320,23 @@ static int cpuacct_stats_show(struct seq_file *sf, void *v) static unsigned long ca_running(struct cpuacct *ca, int cpu); +static void __get_cgroup_avenrun(struct cpuacct *ca, unsigned long *loads, + unsigned long offset, int shift) +{ + unsigned long *avenrun; + + avenrun = ca->avenrun; + loads[0] = (avenrun[0] + offset) << shift; + loads[1] = (avenrun[1] + offset) << shift; + loads[2] = (avenrun[2] + offset) << shift; +} + +void get_cgroup_avenrun(struct task_struct *tsk, unsigned long *loads, + unsigned long offset, int shift) +{ + __get_cgroup_avenrun(task_ca(tsk), loads, offset, shift); +} + unsigned long task_ca_running(struct task_struct *tsk, int cpu) { return ca_running(task_ca(tsk), cpu); @@ -517,6 +535,57 @@ void cpuacct_cpuset_changed(struct cgroup *cgrp, struct cpumask *deleted, rcu_read_unlock(); } +static void cpuacct_calc_load(struct cpuacct *acct) +{ + if (acct != &root_cpuacct) { + long active = 0; + int cpu; + + for_each_possible_cpu(cpu) { + active += ca_running(acct, cpu); + active += ca_uninterruptible(acct, cpu); + } + active = active > 0 ? active * FIXED_1 : 0; + acct->avenrun[0] = calc_load(acct->avenrun[0], EXP_1, active); + acct->avenrun[1] = calc_load(acct->avenrun[1], EXP_5, active); + acct->avenrun[2] = calc_load(acct->avenrun[2], EXP_15, active); + } else { + acct->avenrun[0] = avenrun[0]; + acct->avenrun[1] = avenrun[1]; + acct->avenrun[2] = avenrun[2]; + } +} + +/* + * Currently we walk the whole cpuacct tree to perform per-cgroup + * load calculation, but it can cause overhead if there're too many + * cgroups. + * + * TODO: A better way to avoid the possible overhead. + * Consider NO_HZ. + */ +void calc_cgroup_load(void) +{ + struct cgroup_subsys_state *css; + struct cpuacct *acct; + + rcu_read_lock(); + css_for_each_descendant_pre(css, &root_cpuacct.css) { + acct = NULL; + if (css && css_tryget(css)) + acct = container_of(css, struct cpuacct, css); + rcu_read_unlock(); + if (acct) { + cpuacct_calc_load(acct); + css_put(&acct->css); + } + rcu_read_lock(); + if (!css) + break; + } + rcu_read_unlock(); +} + static void __cpuacct_get_usage_result(struct cpuacct *ca, int cpu, struct task_group *tg, struct cpuacct_usage_result *res) { @@ -617,6 +686,7 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) struct cpuacct *ca = css_ca(seq_css(sf)); struct cgroup *cgrp = seq_css(sf)->cgroup; u64 user, nice, system, idle, iowait, irq, softirq, steal, guest; + unsigned long load, avnrun[3]; unsigned long nr_run = 0, nr_uninter = 0; int cpu; @@ -649,6 +719,8 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) nr_run += ca_running(ca, cpu); nr_uninter += ca_uninterruptible(ca, cpu); } + + __get_cgroup_avenrun(ca, avnrun, FIXED_1/200, 0); } else { struct kernel_cpustat *kcpustat; @@ -668,6 +740,8 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) nr_run = nr_running(); nr_uninter = nr_uninterruptible(); + + get_avenrun(avnrun, FIXED_1/200, 0); } seq_printf(sf, "user %lld\n", nsec_to_clock_t(user)); @@ -680,6 +754,13 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) seq_printf(sf, "steal %lld\n", nsec_to_clock_t(steal)); seq_printf(sf, "guest %lld\n", nsec_to_clock_t(guest)); + load = LOAD_INT(avnrun[0]) * 100 + LOAD_FRAC(avnrun[0]); + seq_printf(sf, "load average(1min) %lld\n", (u64)load); + load = LOAD_INT(avnrun[1]) * 100 + LOAD_FRAC(avnrun[1]); + seq_printf(sf, "load average(5min) %lld\n", (u64)load); + load = LOAD_INT(avnrun[2]) * 100 + LOAD_FRAC(avnrun[2]); + seq_printf(sf, "load average(15min) %lld\n", (u64)load); + seq_printf(sf, "nr_running %lld\n", (u64)nr_run); if ((long) nr_uninter < 0) nr_uninter = 0; diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c index 52c8f8226b0d..fc4bee183fce 100644 --- a/kernel/sched/loadavg.c +++ b/kernel/sched/loadavg.c @@ -371,6 +371,8 @@ void calc_global_load(void) WRITE_ONCE(calc_load_update, sample_window + LOAD_FREQ); + calc_cgroup_load(); + /* * In case we went to NO_HZ for multiple LOAD_FREQ intervals * catch up in bulk. diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 8031f2463c0f..c597b62d4526 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3586,6 +3586,9 @@ extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se); #ifdef CONFIG_SCHED_SLI extern u64 get_idle_time(struct kernel_cpustat *kcs, int cpu); extern u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu); +void calc_cgroup_load(void); +#else +static inline void calc_cgroup_load(void) { } #endif #endif /* _KERNEL_SCHED_SCHED_H */ -- Gitee From aa1dc20645262e29acd0ea5fb6bbd7b654807d70 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Sun, 6 Jun 2021 17:53:57 +0800 Subject: [PATCH 0791/2138] anolis: proc/uptime: Add uptime support for rich container ANBZ: #8657 Make /proc/uptime container aware. "uptime" in the container will show the time elapsed based on the container's initial task creation time. Signed-off-by: Xunlei Pang Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- fs/proc/uptime.c | 37 ++++++++++++++++++++++++++++++------- 1 file changed, 30 insertions(+), 7 deletions(-) diff --git a/fs/proc/uptime.c b/fs/proc/uptime.c index b5343d209381..95622fd62885 100644 --- a/fs/proc/uptime.c +++ b/fs/proc/uptime.c @@ -7,6 +7,8 @@ #include #include #include +#include +#include #include "internal.h" static int uptime_proc_show(struct seq_file *m, void *v) @@ -17,16 +19,37 @@ static int uptime_proc_show(struct seq_file *m, void *v) u32 rem; int i; + ktime_get_boottime_ts64(&uptime); + timens_add_boottime(&uptime); + idle_nsec = 0; - for_each_possible_cpu(i) { - struct kernel_cpustat kcs; - kcpustat_cpu_fetch(&kcs, i); - idle_nsec += get_idle_time(&kcs, i); - } + rcu_read_lock(); + if (in_rich_container(current)) { + struct task_struct *init_tsk; + struct cpuacct_usage_result res; - ktime_get_boottime_ts64(&uptime); - timens_add_boottime(&uptime); + read_lock(&tasklist_lock); + init_tsk = task_active_pid_ns(current)->child_reaper; + get_task_struct(init_tsk); + read_unlock(&tasklist_lock); + + for_each_possible_cpu(i) { + cpuacct_get_usage_result(init_tsk, i, &res); + idle_nsec += res.idle; + } + uptime = timespec64_sub(uptime, + ns_to_timespec64(init_tsk->start_time)); + put_task_struct(init_tsk); + } else { + for_each_possible_cpu(i) { + struct kernel_cpustat kcs; + + kcpustat_cpu_fetch(&kcs, i); + idle_nsec += get_idle_time(&kcs, i); + } + } + rcu_read_unlock(); idle.tv_sec = div_u64_rem(idle_nsec, NSEC_PER_SEC, &rem); idle.tv_nsec = rem; -- Gitee From 3bb06bfba989d8a6b74d85ba40064bb04b1e7120 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Sun, 6 Jun 2021 18:13:07 +0800 Subject: [PATCH 0792/2138] anolis: pidstat: Add task uptime support for rich container ANBZ: #8657 Make /proc/pid/stat task uptime container aware. "ps" in the container will show the time elapsed based on the container's initial task creation time. Signed-off-by: Xunlei Pang Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- fs/proc/array.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/fs/proc/array.c b/fs/proc/array.c index 34a47fb0c57f..d7142ab120df 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -484,6 +484,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, int exit_code = task->exit_code; struct signal_struct *sig = task->signal; unsigned int seq = 1; + struct task_struct *init_tsk; state = *get_task_state(task); vsize = eip = esp = 0; @@ -586,6 +587,18 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, start_time = nsec_to_clock_t(timens_add_boottime_ns(task->start_boottime)); + /* + * While uptime in container is fixed to container start time, + * task start time need to be fixed too, otherwise wrong start + * time will show in "ps". + */ + rcu_read_lock(); + if (in_rich_container(current)) { + init_tsk = task_active_pid_ns(current)->child_reaper; + start_time -= nsec_to_clock_t(init_tsk->start_boottime); + } + rcu_read_unlock(); + seq_put_decimal_ull(m, "", pid_nr_ns(pid, ns)); seq_puts(m, " ("); proc_task_name(m, task, false); -- Gitee From e6de70a2920f9e9b13fb753072b2be3fa777a588 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Fri, 2 Aug 2019 18:12:09 +0800 Subject: [PATCH 0793/2138] anolis: fs,quota: Restrict privileged hardlimit in rich container ANBZ: #8657 For CAP_SYS_RESOURCE privileged users, fs quota's hardlimit is ignored. But in alibaba rich container, we expect all users including privileged ones having its quota limited. Signed-off-by: Xunlei Pang Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- fs/quota/dquot.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 67562c78e57d..f4bb4accdd77 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -83,6 +83,7 @@ #include "../internal.h" /* ugh */ #include +#include /* * There are five quota SMP locks: @@ -1293,6 +1294,13 @@ static void flush_warnings(struct dquot_warn *warn) static int ignore_hardlimit(struct dquot *dquot) { struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type]; + bool rich_container; + + rcu_read_lock(); + rich_container = in_rich_container(current); + rcu_read_unlock(); + if (rich_container) + return 0; return capable(CAP_SYS_RESOURCE) && (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || -- Gitee From 4aff18fa48994a94764c6e5bc4ed3b94f76e5a81 Mon Sep 17 00:00:00 2001 From: Yihao Wu Date: Sun, 6 Jun 2021 19:02:10 +0800 Subject: [PATCH 0794/2138] anolis: sched: Add SLI switch for cpuacct ANBZ: #8657 Add an interface to switch on/off some heavy calculation of CPU SLI features. This interface allows user to control of which cpuacct SLI needs to be tracked. Huge overhead can be reduced, when there are too many cgroups. The switch is on for rich containers and pod cgroups by default. Signed-off-by: Yihao Wu Signed-off-by: Shanpei Chen Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- include/linux/sched.h | 6 +++ kernel/fork.c | 2 + kernel/sched/cpuacct.c | 111 +++++++++++++++++++++++++++++++++-------- 3 files changed, 98 insertions(+), 21 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 0c133b3a04a5..4e0591536738 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2561,4 +2561,10 @@ static inline bool check_rich_container(unsigned int cpu, unsigned int *index, } #endif +#ifdef CONFIG_SCHED_SLI +void create_rich_container_reaper(struct task_struct *tsk); +#else +static inline void create_rich_container_reaper(struct task_struct *tsk) { } +#endif + #endif diff --git a/kernel/fork.c b/kernel/fork.c index f31cd315aad5..73e3e6e4b5b9 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2733,6 +2733,8 @@ __latent_entropy struct task_struct *copy_process( proc_fork_connector(p); sched_post_fork(p); cgroup_post_fork(p, args); + if (likely(p->pid) && is_child_reaper(pid)) + create_rich_container_reaper(p); perf_event_fork(p); trace_task_newtask(p, clone_flags); diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 61c6e962cc81..0f9f16b979ca 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -32,6 +32,10 @@ struct cpuacct { u64 __percpu *cpuusage; struct cpuacct_prev_cputime __percpu *prev_cputime; struct kernel_cpustat __percpu *cpustat; +#ifdef CONFIG_SCHED_SLI + struct list_head sli_list; + bool sli_enabled; +#endif unsigned long avenrun[3]; }; @@ -59,6 +63,57 @@ static struct cpuacct root_cpuacct = { .cpuusage = &root_cpuacct_cpuusage, }; +#ifdef CONFIG_SCHED_SLI +static DEFINE_SPINLOCK(sli_ca_lock); +LIST_HEAD(sli_ca_list); + +static void ca_enable_sli(struct cpuacct *ca, bool val) +{ + spin_lock(&sli_ca_lock); + if (val && !READ_ONCE(ca->sli_enabled)) + list_add_tail_rcu(&ca->sli_list, &sli_ca_list); + else if (!val && READ_ONCE(ca->sli_enabled)) + list_del_rcu(&ca->sli_list); + WRITE_ONCE(ca->sli_enabled, val); + spin_unlock(&sli_ca_lock); +} + +void create_rich_container_reaper(struct task_struct *tsk) +{ + struct cpuacct *ca; + struct cpuacct *parent_ca; + struct cgroup_subsys_state *css; + + if (thread_group_leader(tsk)) { + rcu_read_lock(); + css = task_css(tsk, cpuacct_cgrp_id); + ca = css_ca(css); + if (!ca || !in_rich_container(tsk)) { + rcu_read_unlock(); + return; + } + + ca_enable_sli(ca, true); + parent_ca = css_ca(css->parent); + if (parent_ca && parent_ca != &root_cpuacct) + ca_enable_sli(parent_ca, true); + rcu_read_unlock(); + } +} + +static int enable_sli_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val) +{ + ca_enable_sli(css_ca(css), !!val); + return 0; +} + +static u64 enable_sli_read(struct cgroup_subsys_state *css, struct cftype *cft) +{ + return READ_ONCE(css_ca(css)->sli_enabled); +} +#endif + /* Create a new CPU accounting group */ static struct cgroup_subsys_state * cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) @@ -85,6 +140,10 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) if (!ca->prev_cputime) goto out_free_cpustat; +#ifdef CONFIG_SCHED_SLI + INIT_LIST_HEAD(&ca->sli_list); +#endif + for_each_possible_cpu(i) { prev_cputime_init( &per_cpu_ptr(ca->prev_cputime, i)->prev_cputime1); @@ -104,6 +163,13 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) return ERR_PTR(-ENOMEM); } +#ifdef CONFIG_SCHED_SLI +static void cpuacct_css_offline(struct cgroup_subsys_state *css) +{ + ca_enable_sli(css_ca(css), false); +} +#endif + /* Destroy an existing CPU accounting group */ static void cpuacct_css_free(struct cgroup_subsys_state *css) { @@ -557,32 +623,16 @@ static void cpuacct_calc_load(struct cpuacct *acct) } /* - * Currently we walk the whole cpuacct tree to perform per-cgroup - * load calculation, but it can cause overhead if there're too many - * cgroups. - * - * TODO: A better way to avoid the possible overhead. - * Consider NO_HZ. + * We walk cpuacct whose SLI is enabled to perform per-cgroup load calculation + * the overhead is acceptable if SLI is not enabled for most of the cgroups. */ void calc_cgroup_load(void) { - struct cgroup_subsys_state *css; - struct cpuacct *acct; + struct cpuacct *ca; rcu_read_lock(); - css_for_each_descendant_pre(css, &root_cpuacct.css) { - acct = NULL; - if (css && css_tryget(css)) - acct = container_of(css, struct cpuacct, css); - rcu_read_unlock(); - if (acct) { - cpuacct_calc_load(acct); - css_put(&acct->css); - } - rcu_read_lock(); - if (!css) - break; - } + list_for_each_entry_rcu(ca, &sli_ca_list, sli_list) + cpuacct_calc_load(ca); rcu_read_unlock(); } @@ -809,6 +859,11 @@ static struct cftype files[] = { .name = "proc_stat", .seq_show = cpuacct_proc_stats_show, }, + { + .name = "enable_sli", + .read_u64 = enable_sli_read, + .write_u64 = enable_sli_write + }, #endif { } /* terminate */ }; @@ -842,9 +897,23 @@ void cpuacct_account_field(struct task_struct *tsk, int index, u64 val) __this_cpu_add(ca->cpustat->cpustat[index], val); } +static void cpuacct_cgroup_attach(struct cgroup_taskset *tset) +{ + struct task_struct *task; + struct cgroup_subsys_state *css; + + cgroup_taskset_for_each(task, css, tset) + if (task->pid && is_child_reaper(task_pid(task))) + create_rich_container_reaper(task); +} + struct cgroup_subsys cpuacct_cgrp_subsys = { .css_alloc = cpuacct_css_alloc, .css_free = cpuacct_css_free, +#ifdef CONFIG_SCHED_SLI + .css_offline = cpuacct_css_offline, +#endif + .attach = cpuacct_cgroup_attach, .legacy_cftypes = files, .early_init = true, }; -- Gitee From 91e80c1f1fe4af75cad1207b8d3e84bfd9db565a Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Sun, 6 Jun 2021 19:13:58 +0800 Subject: [PATCH 0795/2138] anolis: sched: introduce asynchronous cgroup load calculation. ANBZ: #8657 Nowadays there are too much cgroups with deep hierarchy, to calculate the load for all of them together take a very long time, with irq disabled. This leadinto a big performance issue in our production, so move the work into a dedicated kthread, although the work still take long time, but it won't disable the irq and preempt still possible. The sideeffect of this way is the load calc period of cpuacct will be not so precisely 5HZ, however, since the calculation itself takes too long, either way got this problem anyway. The feature is enabled by default, could turn off it with: echo 0 > /proc/async_load_calc Signed-off-by: Michael Wang Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- kernel/sched/cpuacct.c | 156 +++++++++++++++++++++++++++++++++++++++++ kernel/sched/loadavg.c | 3 +- kernel/sched/sched.h | 5 ++ 3 files changed, 163 insertions(+), 1 deletion(-) diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 0f9f16b979ca..983d97d050a6 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -35,6 +35,7 @@ struct cpuacct { #ifdef CONFIG_SCHED_SLI struct list_head sli_list; bool sli_enabled; + u64 next_load_update; #endif unsigned long avenrun[3]; }; @@ -918,6 +919,161 @@ struct cgroup_subsys cpuacct_cgrp_subsys = { .early_init = true, }; +#ifdef CONFIG_SCHED_SLI +static DEFINE_STATIC_KEY_FALSE(async_load_calc); + +bool async_load_calc_enabled(void) +{ + return static_branch_likely(&async_load_calc); +} + +static int async_load_calc_show(struct seq_file *m, void *v) +{ + seq_printf(m, "%d\n", async_load_calc_enabled()); + return 0; +} + +static int async_load_calc_open(struct inode *inode, struct file *file) +{ + return single_open(file, async_load_calc_show, NULL); +} + +static void async_calc_cgroup_load(void) +{ + int cnt; + struct cpuacct *ca; + +again: + cnt = 1; + rcu_read_lock(); + list_for_each_entry_rcu(ca, &sli_ca_list, sli_list) { + unsigned long next_update = ca->next_load_update; + + /* + * Need per ca check since after break the list + * could have been changed, otherwise the loop + * will be endless. + */ + if (time_before(jiffies, next_update + 10)) + continue; + + cpuacct_calc_load(ca); + ca->next_load_update = jiffies + LOAD_FREQ; + + /* Take a break for every 100 ca */ + if (cnt++ >= 100) { + rcu_read_unlock(); + cond_resched(); + goto again; + } + } + rcu_read_unlock(); +} + +int load_calc_func(void *unsed) +{ + unsigned long next_update = jiffies + LOAD_FREQ; + + while (!kthread_should_stop()) { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(HZ/5); + set_current_state(TASK_RUNNING); + + if (time_before(jiffies, next_update + 10)) + continue; + + async_calc_cgroup_load(); + next_update += LOAD_FREQ; + } + + return 0; +} + +static struct task_struct *load_calc_p; + +static int mod_async_load_calc(bool enable) +{ + if (enable == async_load_calc_enabled()) + return 0; + + if (enable) { + load_calc_p = kthread_create(load_calc_func, NULL, "load_calc"); + if (!load_calc_p) + return -ENOMEM; + + wake_up_process(load_calc_p); + static_branch_enable(&async_load_calc); + } else { + kthread_stop(load_calc_p); + load_calc_p = NULL; + + static_branch_disable(&async_load_calc); + } + + return 0; +} + +static DEFINE_MUTEX(load_calc_mutex); + +static ssize_t async_load_calc_write(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + char val = -1; + int ret = 0; + + if (count < 1 || *ppos) { + ret = -EINVAL; + goto out; + } + + if (copy_from_user(&val, ubuf, 1)) { + ret = -EFAULT; + goto out; + } + + mutex_lock(&load_calc_mutex); + + switch (val) { + case '0': + ret = mod_async_load_calc(false); + break; + case '1': + ret = mod_async_load_calc(true); + break; + default: + ret = -EINVAL; + } + + mutex_unlock(&load_calc_mutex); +out: + return ret ? ret : count; +} + +static const struct proc_ops async_load_calc_opt = { + .proc_open = async_load_calc_open, + .proc_read = seq_read, + .proc_write = async_load_calc_write, + .proc_lseek = seq_lseek, + .proc_release = single_release, +}; + +static int __init async_load_calc_init(void) +{ + if (!proc_create("async_load_calc", 0600, NULL, + &async_load_calc_opt)) { + pr_err("Failed to register async_load_calc interface\n"); + return 0; + } + + if (mod_async_load_calc(true)) + pr_err("Failed to enable async_load_calc\n"); + + return 0; +} +late_initcall_sync(async_load_calc_init); +#endif + #ifdef CONFIG_RICH_CONTAINER bool child_cpuacct(struct task_struct *tsk) { diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c index fc4bee183fce..c11a84d4676d 100644 --- a/kernel/sched/loadavg.c +++ b/kernel/sched/loadavg.c @@ -371,7 +371,8 @@ void calc_global_load(void) WRITE_ONCE(calc_load_update, sample_window + LOAD_FREQ); - calc_cgroup_load(); + if (!async_load_calc_enabled()) + calc_cgroup_load(); /* * In case we went to NO_HZ for multiple LOAD_FREQ intervals diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c597b62d4526..0b77d8567d71 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3587,8 +3587,13 @@ extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se); extern u64 get_idle_time(struct kernel_cpustat *kcs, int cpu); extern u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu); void calc_cgroup_load(void); +bool async_load_calc_enabled(void); #else static inline void calc_cgroup_load(void) { } +static inline bool async_load_calc_enabled(void) +{ + return false; +} #endif #endif /* _KERNEL_SCHED_SCHED_H */ -- Gitee From 37c135f62bbb6223c36a1f84ac6623a035a17934 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Fri, 4 Jun 2021 18:47:36 +0800 Subject: [PATCH 0796/2138] anolis: meminfo: Add meminfo support for rich container ANBZ: #8657 Make /proc/meminfo container aware. "free" command will show the memory cgroup information of the rich container. Signed-off-by: Xunlei Pang Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- fs/proc/meminfo.c | 137 ++++++++++++++++++++++++------------- include/linux/memcontrol.h | 9 +++ include/linux/vmstat.h | 17 +++++ mm/memcontrol.c | 75 ++++++++++++++++++++ 4 files changed, 192 insertions(+), 46 deletions(-) diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index 45af9a989d40..e3246083c8ce 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -20,6 +20,7 @@ #include #include #include "internal.h" +#include void __attribute__((weak)) arch_report_meminfo(struct seq_file *m) { @@ -35,43 +36,89 @@ static int meminfo_proc_show(struct seq_file *m, void *v) { struct sysinfo i; unsigned long committed; - long cached; - long available; - unsigned long pages[NR_LRU_LISTS]; unsigned long sreclaimable, sunreclaim; int lru; - si_meminfo(&i); - si_swapinfo(&i); - committed = vm_memory_committed(); - - cached = global_node_page_state(NR_FILE_PAGES) - - total_swapcache_pages() - i.bufferram; - if (cached < 0) - cached = 0; + struct mem_cgroup *memcg = NULL; + struct sysinfo_ext ext; + +#ifdef CONFIG_MEMCG + rcu_read_lock(); + if (in_rich_container(current)) { + struct task_struct *init_tsk; + + /* + * current may be in a subcgroup, use reaper instead. + * We assume the reaper always be in the container's + * top group. + */ + read_lock(&tasklist_lock); + init_tsk = task_active_pid_ns(current)->child_reaper; + get_task_struct(init_tsk); + read_unlock(&tasklist_lock); + + memcg = mem_cgroup_from_task(init_tsk); + if (mem_cgroup_is_root(memcg)) + memcg = NULL; + else + css_get(&memcg->css); + put_task_struct(init_tsk); + } + rcu_read_unlock(); +#endif - for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) - pages[lru] = global_node_page_state(NR_LRU_BASE + lru); + if (!memcg) { + si_meminfo(&i); + si_swapinfo(&i); - available = si_mem_available(); + ext.cached = global_node_page_state(NR_FILE_PAGES) - + total_swapcache_pages() - i.bufferram; + if (ext.cached < 0) + ext.cached = 0; + + for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) { + ext.lrupages[lru] = + global_node_page_state(NR_LRU_BASE + lru); + } + ext.available = si_mem_available(); + ext.file_dirty = global_node_page_state(NR_FILE_DIRTY); + ext.writeback = global_node_page_state(NR_WRITEBACK); + ext.anon_mapped = global_node_page_state(NR_ANON_MAPPED); + ext.file_mapped = global_node_page_state(NR_FILE_MAPPED); + ext.slab_reclaimable = + global_node_page_state(NR_SLAB_RECLAIMABLE_B); + ext.slab_unreclaimable = + global_node_page_state(NR_SLAB_UNRECLAIMABLE_B); + ext.kernel_stack_kb = + global_node_page_state(NR_KERNEL_STACK_KB); + ext.writeback_temp = global_node_page_state(NR_WRITEBACK_TEMP); + ext.anon_thps = global_node_page_state(NR_ANON_THPS); + ext.shmem_thps = global_node_page_state(NR_SHMEM_THPS); + ext.shmem_pmd_mapped = + global_node_page_state(NR_SHMEM_PMDMAPPED); + } else { + memcg_meminfo(memcg, &i, &ext); + } + + committed = percpu_counter_read_positive(&vm_committed_as); sreclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B); sunreclaim = global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B); show_val_kb(m, "MemTotal: ", i.totalram); show_val_kb(m, "MemFree: ", i.freeram); - show_val_kb(m, "MemAvailable: ", available); + show_val_kb(m, "MemAvailable: ", ext.available); show_val_kb(m, "Buffers: ", i.bufferram); - show_val_kb(m, "Cached: ", cached); + show_val_kb(m, "Cached: ", ext.cached); show_val_kb(m, "SwapCached: ", total_swapcache_pages()); - show_val_kb(m, "Active: ", pages[LRU_ACTIVE_ANON] + - pages[LRU_ACTIVE_FILE]); - show_val_kb(m, "Inactive: ", pages[LRU_INACTIVE_ANON] + - pages[LRU_INACTIVE_FILE]); - show_val_kb(m, "Active(anon): ", pages[LRU_ACTIVE_ANON]); - show_val_kb(m, "Inactive(anon): ", pages[LRU_INACTIVE_ANON]); - show_val_kb(m, "Active(file): ", pages[LRU_ACTIVE_FILE]); - show_val_kb(m, "Inactive(file): ", pages[LRU_INACTIVE_FILE]); - show_val_kb(m, "Unevictable: ", pages[LRU_UNEVICTABLE]); + show_val_kb(m, "Active: ", ext.lrupages[LRU_ACTIVE_ANON] + + ext.lrupages[LRU_ACTIVE_FILE]); + show_val_kb(m, "Inactive: ", ext.lrupages[LRU_INACTIVE_ANON] + + ext.lrupages[LRU_INACTIVE_FILE]); + show_val_kb(m, "Active(anon): ", ext.lrupages[LRU_ACTIVE_ANON]); + show_val_kb(m, "Inactive(anon): ", ext.lrupages[LRU_INACTIVE_ANON]); + show_val_kb(m, "Active(file): ", ext.lrupages[LRU_ACTIVE_FILE]); + show_val_kb(m, "Inactive(file): ", ext.lrupages[LRU_INACTIVE_FILE]); + show_val_kb(m, "Unevictable: ", ext.lrupages[LRU_UNEVICTABLE]); show_val_kb(m, "Mlocked: ", global_zone_page_state(NR_MLOCK)); #ifdef CONFIG_HIGHMEM @@ -95,22 +142,19 @@ static int meminfo_proc_show(struct seq_file *m, void *v) (unsigned long)atomic_read(&zswap_stored_pages) << (PAGE_SHIFT - 10)); #endif - show_val_kb(m, "Dirty: ", - global_node_page_state(NR_FILE_DIRTY)); - show_val_kb(m, "Writeback: ", - global_node_page_state(NR_WRITEBACK)); - show_val_kb(m, "AnonPages: ", - global_node_page_state(NR_ANON_MAPPED)); - show_val_kb(m, "Mapped: ", - global_node_page_state(NR_FILE_MAPPED)); + show_val_kb(m, "Dirty: ", ext.file_dirty); + show_val_kb(m, "Writeback: ", ext.writeback); + show_val_kb(m, "AnonPages: ", ext.anon_mapped); + show_val_kb(m, "Mapped: ", ext.file_mapped); show_val_kb(m, "Shmem: ", i.sharedram); show_val_kb(m, "KReclaimable: ", sreclaimable + global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE)); - show_val_kb(m, "Slab: ", sreclaimable + sunreclaim); - show_val_kb(m, "SReclaimable: ", sreclaimable); - show_val_kb(m, "SUnreclaim: ", sunreclaim); - seq_printf(m, "KernelStack: %8lu kB\n", - global_node_page_state(NR_KERNEL_STACK_KB)); + show_val_kb(m, "Slab: ", + ext.slab_reclaimable + ext.slab_unreclaimable); + + show_val_kb(m, "SReclaimable: ", ext.slab_reclaimable); + show_val_kb(m, "SUnreclaim: ", ext.slab_unreclaimable); + seq_printf(m, "KernelStack: %8lu kB\n", ext.kernel_stack_kb); #ifdef CONFIG_SHADOW_CALL_STACK seq_printf(m, "ShadowCallStack:%8lu kB\n", global_node_page_state(NR_KERNEL_SCS_KB)); @@ -123,8 +167,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v) show_val_kb(m, "NFS_Unstable: ", 0); show_val_kb(m, "Bounce: ", global_zone_page_state(NR_BOUNCE)); - show_val_kb(m, "WritebackTmp: ", - global_node_page_state(NR_WRITEBACK_TEMP)); + show_val_kb(m, "WritebackTmp: ", ext.writeback_temp); show_val_kb(m, "CommitLimit: ", vm_commit_limit()); show_val_kb(m, "Committed_AS: ", committed); seq_printf(m, "VmallocTotal: %8lu kB\n", @@ -141,12 +184,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v) #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE - show_val_kb(m, "AnonHugePages: ", - global_node_page_state(NR_ANON_THPS)); - show_val_kb(m, "ShmemHugePages: ", - global_node_page_state(NR_SHMEM_THPS)); - show_val_kb(m, "ShmemPmdMapped: ", - global_node_page_state(NR_SHMEM_PMDMAPPED)); + show_val_kb(m, "AnonHugePages: ", ext.anon_thps * HPAGE_PMD_NR); + show_val_kb(m, "ShmemHugePages: ", ext.shmem_thps * HPAGE_PMD_NR); + show_val_kb(m, "ShmemPmdMapped: ", ext.shmem_pmd_mapped * HPAGE_PMD_NR); show_val_kb(m, "FileHugePages: ", global_node_page_state(NR_FILE_THPS)); show_val_kb(m, "FilePmdMapped: ", @@ -168,6 +208,11 @@ static int meminfo_proc_show(struct seq_file *m, void *v) arch_report_meminfo(m); +#ifdef CONFIG_MEMCG + if (memcg) + css_put(&memcg->css); +#endif + return 0; } diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index b1fdb1554f2f..3e7448d71c96 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1150,6 +1150,9 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, gfp_t gfp_mask, unsigned long *total_scanned); +void memcg_meminfo(struct mem_cgroup *memcg, + struct sysinfo *info, struct sysinfo_ext *ext); + #else /* CONFIG_MEMCG */ #define MEM_CGROUP_ID_SHIFT 0 @@ -1566,6 +1569,12 @@ void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) { } +static inline void +memcg_meminfo(struct mem_cgroup *memcg, + struct sysinfo *info, struct sysinfo_ext *ext) +{ +} + static inline void split_page_memcg(struct page *head, unsigned int nr) { } diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 3219b368db79..b2c512708086 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -34,6 +34,23 @@ struct reclaim_stat { unsigned nr_lazyfree_fail; }; +struct sysinfo_ext { + unsigned long lrupages[NR_LRU_LISTS]; + unsigned long cached; + unsigned long available; + unsigned long file_dirty; + unsigned long writeback; + unsigned long anon_mapped; + unsigned long file_mapped; + unsigned long slab_reclaimable; + unsigned long slab_unreclaimable; + unsigned long kernel_stack_kb; + unsigned long writeback_temp; + unsigned long anon_thps; + unsigned long shmem_thps; + unsigned long shmem_pmd_mapped; +}; + enum writeback_stat_item { NR_DIRTY_THRESHOLD, NR_DIRTY_BG_THRESHOLD, diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 8adc1af822c4..c2951593221b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -7982,4 +7982,79 @@ static int __init mem_cgroup_swap_init(void) } subsys_initcall(mem_cgroup_swap_init); +void memcg_meminfo(struct mem_cgroup *memcg, + struct sysinfo *info, struct sysinfo_ext *ext) +{ + struct mem_cgroup *iter; + unsigned long limit, memsw_limit, usage, totalram_pages_tmp; + unsigned long pagecache, memcg_wmark, swap_size; + int i; + + ext->cached = memcg_page_state(memcg, NR_FILE_PAGES); + ext->file_dirty = memcg_page_state(memcg, NR_FILE_DIRTY); + ext->writeback = memcg_page_state(memcg, NR_WRITEBACK); + ext->anon_mapped = memcg_page_state(memcg, NR_ANON_MAPPED); + ext->file_mapped = memcg_page_state(memcg, NR_FILE_MAPPED); + ext->slab_reclaimable = memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B); + ext->slab_unreclaimable = + memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B); + ext->kernel_stack_kb = memcg_page_state(memcg, NR_KERNEL_STACK_KB); + ext->writeback_temp = 0; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + ext->anon_thps = memcg_page_state(memcg, NR_ANON_THPS); +#endif + ext->shmem_thps = 0; + ext->shmem_pmd_mapped = 0; + + swap_size = memcg_page_state(memcg, MEMCG_SWAP); + limit = memsw_limit = PAGE_COUNTER_MAX; + for (iter = memcg; iter; iter = parent_mem_cgroup(iter)) { + limit = min(limit, iter->memory.max); + memsw_limit = min(memsw_limit, iter->memsw.max); + } + usage = mem_cgroup_usage(memcg, false); + totalram_pages_tmp = totalram_pages(); + info->totalram = limit > totalram_pages_tmp ? totalram_pages_tmp : limit; + info->sharedram = memcg_page_state(memcg, NR_SHMEM); + info->freeram = info->totalram - usage; + /* these are not accounted by memcg yet */ + /* if give bufferram the global value, free may show a quite + * large number in the ±buffers/caches row, the reason is + * it's equal to group_used - global_buffer - group_cached, + * if global_buffer > group_used, we get a rewind large value. + */ + info->bufferram = 0; + info->totalhigh = totalhigh_pages(); + info->freehigh = nr_free_highpages(); + info->mem_unit = PAGE_SIZE; + + /* fill in swinfo */ + si_swapinfo(info); + if (memsw_limit < info->totalswap) + info->totalswap = memsw_limit; + info->freeswap = info->totalswap - swap_size; + + for (i = 0; i < NR_LRU_LISTS; i++) + ext->lrupages[i] = memcg_page_state(memcg, NR_LRU_BASE + i); + + /* Like what si_mem_available() does */ + + // TODO: memcg_wmark depends on background async page reclaim, waiting + // for it. + + //memcg_wmark = memcg->memory.wmark_high; + //if (memcg->wmark_ratio && info->totalram > memcg_wmark) + // memcg_wmark = info->totalram - memcg_wmark; + //else + // memcg_wmark = 0; + memcg_wmark = 0; + + pagecache = ext->lrupages[LRU_ACTIVE_FILE] + + ext->lrupages[LRU_INACTIVE_FILE]; + pagecache -= min(pagecache / 2, memcg_wmark); + ext->available = info->freeram + pagecache; + ext->available += ext->slab_reclaimable - + min(ext->slab_reclaimable / 2, memcg_wmark); +} + #endif /* CONFIG_SWAP */ -- Gitee From 82ccd3a1a9478f71604ad414745b4abde6482bbd Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Tue, 8 Jun 2021 18:42:07 +0800 Subject: [PATCH 0797/2138] anolis: sched/fair: Add parent_wait_contrib statistics ANBZ: #8657 Add a new field "parent_wait_contrib" in struct sched_statistics, it means parent wait_sum during this se running, i.e. preempted by other groups outside the parent group. For a cgroup se: "sum_exec_runtime" stands for "ON CPU" time. "sum_exec_runtime + wait_sum" stands for "Serve" time. "wait_sum" stand for "Queue" time. "parent_wait_contrib" stands for "Queue other" time. "wait_sum - parent_wait_contrib" stands for "Queue self" time. This is useful for containers collision analysis. Signed-off-by: Xunlei Pang Signed-off-by: Yihao Wu Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- include/linux/sched.h | 2 ++ kernel/sched/debug.c | 2 ++ kernel/sched/fair.c | 35 +++++++++++++++++++++++++++++++++-- 3 files changed, 37 insertions(+), 2 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 4e0591536738..6c4036c9c27b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -516,6 +516,8 @@ struct sched_statistics { u64 wait_max; u64 wait_count; u64 wait_sum; + u64 parent_wait_sum_base; + u64 parent_wait_contrib; u64 iowait_count; u64 iowait_sum; diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 0baa877597df..075750fbf7b1 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -519,6 +519,7 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group PN_SCHEDSTAT(slice_max); PN_SCHEDSTAT(wait_max); PN_SCHEDSTAT(wait_sum); + PN_SCHEDSTAT(parent_wait_contrib); P_SCHEDSTAT(wait_count); } @@ -1022,6 +1023,7 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, PN_SCHEDSTAT(slice_max); PN_SCHEDSTAT(wait_max); PN_SCHEDSTAT(wait_sum); + PN_SCHEDSTAT(parent_wait_contrib); P_SCHEDSTAT(wait_count); PN_SCHEDSTAT(iowait_sum); P_SCHEDSTAT(iowait_count); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7e5935df8862..6da066561111 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1250,8 +1250,10 @@ static void update_curr_fair(struct rq *rq) static inline void update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) { - struct sched_statistics *stats; + struct sched_statistics *stats, *pstats; struct task_struct *p = NULL; + u64 parent_wait_sum, delta, clock = rq_clock(rq_of(cfs_rq)); + struct sched_entity *pse = parent_entity(se); if (!schedstat_enabled()) return; @@ -1262,13 +1264,28 @@ update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) p = task_of(se); __update_stats_wait_start(rq_of(cfs_rq), p, stats); + + if (!pse) + return; + + pstats = __schedstats_from_se(pse); + + if (schedstat_val(pstats->wait_start)) + delta = clock - schedstat_val(pstats->wait_start); + else + delta = 0; + parent_wait_sum = schedstat_val(pstats->wait_sum) + delta; + __schedstat_set(stats->parent_wait_sum_base, parent_wait_sum); } static inline void update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) { - struct sched_statistics *stats; + struct sched_statistics *stats, *pstats; struct task_struct *p = NULL; + struct sched_entity *pse = parent_entity(se); + u64 parent_wait_sum, clock = rq_clock(rq_of(cfs_rq)); + u64 delta; if (!schedstat_enabled()) return; @@ -1288,6 +1305,20 @@ update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) p = task_of(se); __update_stats_wait_end(rq_of(cfs_rq), p, stats); + + if (!pse) + return; + + pstats = __schedstats_from_se(pse); + + /* pick_next_task_fair() can update parent wait_start to 0 */ + if (schedstat_val(pstats->wait_start)) + delta = clock - schedstat_val(pstats->wait_start); + else + delta = 0; + parent_wait_sum = schedstat_val(pstats->wait_sum) + delta; + delta = parent_wait_sum - schedstat_val(stats->parent_wait_sum_base); + __schedstat_add(stats->parent_wait_contrib, delta); } static inline void -- Gitee From e66181967e65ede6ac8c4ccc12ee864ab7787854 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Tue, 8 Jun 2021 18:10:25 +0800 Subject: [PATCH 0798/2138] anolis: sched/fair: Add sched_cfs_statistics to export some ANBZ: #8657 Export the following cfs statistics of cgroups: cat cpuacct.sched_cfs_statistics [Serve time] [On CPU time] [Queue other time] [Queue sibling time] [Queue max time] These values include throttle time exported by "cgroup/cpu/cpu.stat". Signed-off-by: Xunlei Pang Signed-off-by: Yihao Wu Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- kernel/sched/cpuacct.c | 45 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 983d97d050a6..b7d2e32c2387 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -819,6 +819,47 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) return 0; } + +static int cpuacct_sched_cfs_show(struct seq_file *sf, void *v) +{ + struct cgroup *cgrp = seq_css(sf)->cgroup; + struct task_group *tg = cgroup_tg(cgrp); + struct sched_entity *se; + struct sched_statistics *stats; + int cpu; + u64 wait_max = 0, wait_sum = 0, wait_sum_other = 0, exec_sum = 0; + + if (!schedstat_enabled()) + goto out_show; + + rcu_read_lock(); + tg = cgroup_tg(cgrp); + if (unlikely(!tg)) { + WARN_ONCE(1, "cgroup \"cpu,cpuacct\" are not bound together"); + goto rcu_unlock_show; + } + + for_each_online_cpu(cpu) { + se = tg->se[cpu]; + if (!se) + continue; + stats = __schedstats_from_se(se); + exec_sum += schedstat_val(se->sum_exec_runtime); + wait_sum_other += + schedstat_val(stats->parent_wait_contrib); + wait_sum += schedstat_val(stats->wait_sum); + wait_max = max(wait_max, schedstat_val(stats->wait_max)); + } +rcu_unlock_show: + rcu_read_unlock(); +out_show: + /* [Serve time] [On CPU time] [Queue other time] [Queue sibling time] [Queue max time] */ + seq_printf(sf, "%lld %lld %lld %lld %lld\n", + exec_sum + wait_sum, exec_sum, wait_sum_other, + wait_sum - wait_sum_other, wait_max); + + return 0; +} #endif static struct cftype files[] = { @@ -865,6 +906,10 @@ static struct cftype files[] = { .read_u64 = enable_sli_read, .write_u64 = enable_sli_write }, + { + .name = "sched_cfs_statistics", + .seq_show = cpuacct_sched_cfs_show, + }, #endif { } /* terminate */ }; -- Gitee From 5cb86bc68132c130b19858c0bb1435bef15280bc Mon Sep 17 00:00:00 2001 From: Yihao Wu Date: Tue, 8 Jun 2021 19:42:36 +0800 Subject: [PATCH 0799/2138] anolis: cpuacct: make cpuacct record nr_migrations ANBZ: #8657 This patch makes cpuacct to be able to monitor the number of across-cpu-migrations. Output as follows: [root@caspar /sys/fs/cgroup/cpuacct] # cat cpuacct.proc_stat user 7727 nice 4 nr_migrations 48432 Signed-off-by: Zhu Yanhai Signed-off-by: Caspar Zhang Signed-off-by: Yihao Wu Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- kernel/sched/core.c | 1 + kernel/sched/cpuacct.c | 44 ++++++++++++++++++++++++++++++++++++++++++ kernel/sched/sched.h | 2 ++ 3 files changed, 47 insertions(+) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d60d9a02bc8d..79d3dc1c21f4 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3407,6 +3407,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) p->se.nr_migrations++; rseq_migrate(p); sched_mm_cid_migrate_from(p); + task_ca_increase_nr_migrations(p); perf_event_task_migrate(p); } diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index b7d2e32c2387..f66127f79c39 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -25,6 +25,13 @@ struct cpuacct_prev_cputime { struct prev_cputime prev_cputime2; /* user and nice */ } ____cacheline_aligned; +#ifdef CONFIG_SCHED_SLI +/* Maintain various statistics */ +struct cpuacct_alistats { + u64 nr_migrations; +} ____cacheline_aligned; +#endif + /* track CPU usage of a group of tasks and its child groups */ struct cpuacct { struct cgroup_subsys_state css; @@ -33,6 +40,7 @@ struct cpuacct { struct cpuacct_prev_cputime __percpu *prev_cputime; struct kernel_cpustat __percpu *cpustat; #ifdef CONFIG_SCHED_SLI + struct cpuacct_alistats __percpu *alistats; struct list_head sli_list; bool sli_enabled; u64 next_load_update; @@ -58,12 +66,32 @@ static inline struct cpuacct *parent_ca(struct cpuacct *ca) static DEFINE_PER_CPU(u64, root_cpuacct_cpuusage); static DEFINE_PER_CPU(struct cpuacct_prev_cputime, root_cpuacct_prev_cputime); +#ifdef CONFIG_SCHED_SLI +static DEFINE_PER_CPU(struct cpuacct_alistats, root_alistats); +#endif + static struct cpuacct root_cpuacct = { .cpustat = &kernel_cpustat, .prev_cputime = &root_cpuacct_prev_cputime, .cpuusage = &root_cpuacct_cpuusage, +#ifdef CONFIG_SCHED_SLI + .alistats = &root_alistats, +#endif }; +#ifdef CONFIG_SCHED_SLI +void task_ca_increase_nr_migrations(struct task_struct *tsk) +{ + struct cpuacct *ca; + + rcu_read_lock(); + ca = task_ca(tsk); + if (ca) + this_cpu_ptr(ca->alistats)->nr_migrations++; + rcu_read_unlock(); +} +#endif + #ifdef CONFIG_SCHED_SLI static DEFINE_SPINLOCK(sli_ca_lock); LIST_HEAD(sli_ca_list); @@ -143,6 +171,10 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) #ifdef CONFIG_SCHED_SLI INIT_LIST_HEAD(&ca->sli_list); + + ca->alistats = alloc_percpu(struct cpuacct_alistats); + if (!ca->alistats) + goto out_free_pre_cputime; #endif for_each_possible_cpu(i) { @@ -154,6 +186,8 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) return &ca->css; +out_free_pre_cputime: + free_percpu(ca->prev_cputime); out_free_cpustat: free_percpu(ca->cpustat); out_free_cpuusage: @@ -179,6 +213,9 @@ static void cpuacct_css_free(struct cgroup_subsys_state *css) free_percpu(ca->prev_cputime); free_percpu(ca->cpustat); free_percpu(ca->cpuusage); +#ifdef CONFIG_SCHED_SLI + free_percpu(ca->alistats); +#endif kfree(ca); } @@ -737,6 +774,8 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) struct cpuacct *ca = css_ca(seq_css(sf)); struct cgroup *cgrp = seq_css(sf)->cgroup; u64 user, nice, system, idle, iowait, irq, softirq, steal, guest; + u64 nr_migrations = 0; + struct cpuacct_alistats *alistats; unsigned long load, avnrun[3]; unsigned long nr_run = 0, nr_uninter = 0; int cpu; @@ -767,6 +806,8 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) iowait += res.iowait; idle += res.idle; + alistats = per_cpu_ptr(ca->alistats, cpu); + nr_migrations += alistats->nr_migrations; nr_run += ca_running(ca, cpu); nr_uninter += ca_uninterruptible(ca, cpu); } @@ -787,6 +828,8 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) idle += get_idle_time(kcpustat, cpu); iowait += get_iowait_time(kcpustat, cpu); steal += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + alistats = per_cpu_ptr(ca->alistats, cpu); + nr_migrations += alistats->nr_migrations; } nr_run = nr_running(); @@ -816,6 +859,7 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) if ((long) nr_uninter < 0) nr_uninter = 0; seq_printf(sf, "nr_uninterruptible %lld\n", (u64)nr_uninter); + seq_printf(sf, "nr_migrations %lld\n", (u64)nr_migrations); return 0; } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 0b77d8567d71..61a6571819cc 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3586,9 +3586,11 @@ extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se); #ifdef CONFIG_SCHED_SLI extern u64 get_idle_time(struct kernel_cpustat *kcs, int cpu); extern u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu); +extern void task_ca_increase_nr_migrations(struct task_struct *tsk); void calc_cgroup_load(void); bool async_load_calc_enabled(void); #else +static inline void task_ca_increase_nr_migrations(struct task_struct *tsk) { } static inline void calc_cgroup_load(void) { } static inline bool async_load_calc_enabled(void) { -- Gitee From a9e85350f2b6c5cceaf1920d7156ac78df57686f Mon Sep 17 00:00:00 2001 From: Yihao Wu Date: Wed, 9 Jun 2021 10:39:58 +0800 Subject: [PATCH 0800/2138] anolis: Introduce cfs scheduling latency histograms ANBZ: #8657 Export wait_latency in "cpuacct.wait_latency", which indicates the time that tasks in a cpuacct cgroup wait on a cfs_rq to be scheduled. This is like "perf sched", but it gives smaller overhead. So it can be used as monitor constantly. wait_latency is useful to debug application's high RT problem. It can tell if it's caused by scheduling or not. If it is, loadavg can tell if it's caused by bad scheduling bahaviour or system overloads. System admins can also use wait_latency to define SLA. To ensure SLA is guaranteed, there are various ways to decrease wait_latency. This feature is disabled by default for performance concerns. It can be switched on dynamically by "echo 0 > /proc/cpusli/sched_lat_enable" Example: $ cat /sys/fs/cgroup/cpuacct/a/cpuacct.wait_latency 0-1ms: 4139 1-4ms: 317 4-7ms: 568 7-10ms: 0 10-100ms: 42324 100-500ms: 9131 500-1000ms: 95 1000-5000ms: 134 5000-10000ms: 0 >=10000ms: 0 total(ms): 4256455 Signed-off-by: Yihao Wu Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- kernel/sched/cpuacct.c | 255 ++++++++++++++++++++++++++++++++++++++++- kernel/sched/fair.c | 6 +- kernel/sched/sched.h | 3 + 3 files changed, 262 insertions(+), 2 deletions(-) diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index f66127f79c39..0e60d1ad84fd 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -32,6 +32,72 @@ struct cpuacct_alistats { } ____cacheline_aligned; #endif +enum sched_lat_stat_item { + SCHED_LAT_WAIT, + SCHED_LAT_NR_STAT +}; + +/* + * [0, 1ms) + * [1, 4ms) + * [4, 7ms) + * [7, 10ms) + * [10, 100ms) + * [100, 500ms) + * [500, 1000ms) + * [1000, 5000ms) + * [5000, 10000ms) + * [10000ms, INF) + * total(ms) + */ +/* Scheduler latency histogram distribution, in milliseconds */ +enum sched_lat_count_t { + SCHED_LAT_0_1, + SCHED_LAT_1_4, + SCHED_LAT_4_7, + SCHED_LAT_7_10, + SCHED_LAT_10_100, + SCHED_LAT_100_500, + SCHED_LAT_500_1000, + SCHED_LAT_1000_5000, + SCHED_LAT_5000_10000, + SCHED_LAT_10000_INF, + SCHED_LAT_TOTAL, + SCHED_LAT_NR_COUNT, +}; + +struct sched_cgroup_lat_stat_cpu { + unsigned long item[SCHED_LAT_NR_STAT][SCHED_LAT_NR_COUNT]; +}; + +static inline enum sched_lat_count_t get_sched_lat_count_idx(u64 msecs) +{ + enum sched_lat_count_t idx; + + if (msecs < 1) + idx = SCHED_LAT_0_1; + else if (msecs < 4) + idx = SCHED_LAT_1_4; + else if (msecs < 7) + idx = SCHED_LAT_4_7; + else if (msecs < 10) + idx = SCHED_LAT_7_10; + else if (msecs < 100) + idx = SCHED_LAT_10_100; + else if (msecs < 500) + idx = SCHED_LAT_100_500; + else if (msecs < 1000) + idx = SCHED_LAT_500_1000; + else if (msecs < 5000) + idx = SCHED_LAT_1000_5000; + else if (msecs < 10000) + idx = SCHED_LAT_5000_10000; + else + idx = SCHED_LAT_10000_INF; + + return idx; +} + /* track CPU usage of a group of tasks and its child groups */ struct cpuacct { struct cgroup_subsys_state css; @@ -41,6 +107,7 @@ struct cpuacct { struct kernel_cpustat __percpu *cpustat; #ifdef CONFIG_SCHED_SLI struct cpuacct_alistats __percpu *alistats; + struct sched_cgroup_lat_stat_cpu __percpu *lat_stat_cpu; struct list_head sli_list; bool sli_enabled; u64 next_load_update; @@ -68,6 +135,7 @@ static DEFINE_PER_CPU(u64, root_cpuacct_cpuusage); static DEFINE_PER_CPU(struct cpuacct_prev_cputime, root_cpuacct_prev_cputime); #ifdef CONFIG_SCHED_SLI static DEFINE_PER_CPU(struct cpuacct_alistats, root_alistats); +static DEFINE_PER_CPU(struct sched_cgroup_lat_stat_cpu, root_lat_stat_cpu); #endif static struct cpuacct root_cpuacct = { @@ -76,10 +144,83 @@ static struct cpuacct root_cpuacct = { .cpuusage = &root_cpuacct_cpuusage, #ifdef CONFIG_SCHED_SLI .alistats = &root_alistats, + .lat_stat_cpu = &root_lat_stat_cpu, #endif }; #ifdef CONFIG_SCHED_SLI +static DEFINE_STATIC_KEY_TRUE(cpuacct_no_sched_lat); +static int cpuacct_sched_lat_enabled_show(struct seq_file *m, void *v) +{ + seq_printf(m, "%d\n", !static_key_enabled(&cpuacct_no_sched_lat)); + return 0; +} + +static int cpuacct_sched_lat_enabled_open(struct inode *inode, + struct file *file) +{ + return single_open(file, cpuacct_sched_lat_enabled_show, NULL); +} + +static ssize_t cpuacct_sched_lat_enabled_write(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + char val = -1; + int ret = count; + + if (count < 1 || *ppos) { + ret = -EINVAL; + goto out; + } + + if (copy_from_user(&val, ubuf, 1)) { + ret = -EFAULT; + goto out; + } + + switch (val) { + case '0': + static_branch_enable(&cpuacct_no_sched_lat); + break; + case '1': + static_branch_disable(&cpuacct_no_sched_lat); + break; + default: + ret = -EINVAL; + } + +out: + return ret; +} + +static const struct proc_ops cpuacct_sched_lat_enabled_fops = { + .proc_open = cpuacct_sched_lat_enabled_open, + .proc_read = seq_read, + .proc_write = cpuacct_sched_lat_enabled_write, + .proc_lseek = seq_lseek, + .proc_release = single_release, +}; + +static int __init init_cpuacct_sched_lat_enabled(void) +{ + struct proc_dir_entry *ca_dir, *sched_lat_enabled_file; + + ca_dir = proc_mkdir("cpusli", NULL); + if (!ca_dir) + return -ENOMEM; + + sched_lat_enabled_file = proc_create("sched_lat_enabled", 0600, + ca_dir, &cpuacct_sched_lat_enabled_fops); + if (!sched_lat_enabled_file) { + remove_proc_entry("cpusli", NULL); + return -ENOMEM; + } + + return 0; +} +device_initcall(init_cpuacct_sched_lat_enabled); + void task_ca_increase_nr_migrations(struct task_struct *tsk) { struct cpuacct *ca; @@ -90,6 +231,25 @@ void task_ca_increase_nr_migrations(struct task_struct *tsk) this_cpu_ptr(ca->alistats)->nr_migrations++; rcu_read_unlock(); } + +void cpuacct_update_latency(struct task_struct *tsk, u64 delta) +{ + enum sched_lat_count_t idx; + struct cpuacct *ca; + unsigned int msecs; + + if (static_branch_likely(&cpuacct_no_sched_lat)) + return; + + rcu_read_lock(); + ca = task_ca(tsk); + msecs = delta >> 20; /* Proximately to speed up */ + idx = get_sched_lat_count_idx(msecs); + this_cpu_inc(ca->lat_stat_cpu->item[SCHED_LAT_WAIT][idx]); + this_cpu_add(ca->lat_stat_cpu->item[SCHED_LAT_WAIT][SCHED_LAT_TOTAL], + delta); + rcu_read_unlock(); +} #endif #ifdef CONFIG_SCHED_SLI @@ -170,11 +330,16 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) goto out_free_cpustat; #ifdef CONFIG_SCHED_SLI + ca->lat_stat_cpu = alloc_percpu(struct sched_cgroup_lat_stat_cpu); + if (!ca->lat_stat_cpu) + goto out_free_pre_cputime; + + INIT_LIST_HEAD(&ca->sli_list); ca->alistats = alloc_percpu(struct cpuacct_alistats); if (!ca->alistats) - goto out_free_pre_cputime; + goto out_free_lat_stat_cpu; #endif for_each_possible_cpu(i) { @@ -186,8 +351,12 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) return &ca->css; +#ifdef CONFIG_SCHED_SLI +out_free_lat_stat_cpu: + free_percpu(ca->lat_stat_cpu); out_free_pre_cputime: free_percpu(ca->prev_cputime); +#endif out_free_cpustat: free_percpu(ca->cpustat); out_free_cpuusage: @@ -215,6 +384,7 @@ static void cpuacct_css_free(struct cgroup_subsys_state *css) free_percpu(ca->cpuusage); #ifdef CONFIG_SCHED_SLI free_percpu(ca->alistats); + free_percpu(ca->lat_stat_cpu); #endif kfree(ca); } @@ -904,6 +1074,83 @@ static int cpuacct_sched_cfs_show(struct seq_file *sf, void *v) return 0; } + +#define SCHED_LAT_STAT_SMP_WRITE(name, sidx) \ +static void smp_write_##name(void *info) \ +{ \ + struct cpuacct *ca = (struct cpuacct *)info; \ + int i; \ + \ + for (i = SCHED_LAT_0_1; i < SCHED_LAT_NR_COUNT; i++) \ + this_cpu_write(ca->lat_stat_cpu->item[sidx][i], 0); \ +} \ + +SCHED_LAT_STAT_SMP_WRITE(sched_wait_latency, SCHED_LAT_WAIT); + +smp_call_func_t smp_sched_lat_write_funcs[] = { + smp_write_sched_wait_latency +}; + +static int sched_lat_stat_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val) +{ + struct cpuacct *ca = css_ca(css); + enum sched_lat_stat_item idx = cft->private; + smp_call_func_t func = smp_sched_lat_write_funcs[idx]; + + if (val != 0) + return -EINVAL; + + func((void *)ca); + smp_call_function(func, (void *)ca, 1); + + return 0; +} + +static u64 sched_lat_stat_gather(struct cpuacct *ca, + enum sched_lat_stat_item sidx, + enum sched_lat_count_t cidx) +{ + u64 sum = 0; + int cpu; + + for_each_possible_cpu(cpu) + sum += per_cpu_ptr(ca->lat_stat_cpu, cpu)->item[sidx][cidx]; + + return sum; +} + +static int sched_lat_stat_show(struct seq_file *sf, void *v) +{ + struct cpuacct *ca = css_ca(seq_css(sf)); + enum sched_lat_stat_item s = seq_cft(sf)->private; + + /* CFS scheduling latency cgroup and task histgrams */ + seq_printf(sf, "0-1ms: \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_0_1)); + seq_printf(sf, "1-4ms: \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_1_4)); + seq_printf(sf, "4-7ms: \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_4_7)); + seq_printf(sf, "7-10ms: \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_7_10)); + seq_printf(sf, "10-100ms: \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_10_100)); + seq_printf(sf, "100-500ms: \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_100_500)); + seq_printf(sf, "500-1000ms: \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_500_1000)); + seq_printf(sf, "1000-5000ms: \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_1000_5000)); + seq_printf(sf, "5000-10000ms: \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_5000_10000)); + seq_printf(sf, ">=10000ms: \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_10000_INF)); + seq_printf(sf, "total(ms): \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_TOTAL) / 1000000); + + return 0; +} #endif static struct cftype files[] = { @@ -954,6 +1201,12 @@ static struct cftype files[] = { .name = "sched_cfs_statistics", .seq_show = cpuacct_sched_cfs_show, }, + { + .name = "wait_latency", + .private = SCHED_LAT_WAIT, + .write_u64 = sched_lat_stat_write, + .seq_show = sched_lat_stat_show + }, #endif { } /* terminate */ }; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 6da066561111..d40e00995b16 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1292,6 +1292,8 @@ update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) stats = __schedstats_from_se(se); + delta = clock - schedstat_val(stats->wait_start); + /* * When the sched_schedstat changes from 0 to 1, some sched se * maybe already in the runqueue, the se->statistics.wait_start @@ -1301,8 +1303,10 @@ update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) if (unlikely(!schedstat_val(stats->wait_start))) return; - if (entity_is_task(se)) + if (entity_is_task(se)) { p = task_of(se); + cpuacct_update_latency(p, delta); + } __update_stats_wait_end(rq_of(cfs_rq), p, stats); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 61a6571819cc..c77a28fe02f3 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3587,10 +3587,13 @@ extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se); extern u64 get_idle_time(struct kernel_cpustat *kcs, int cpu); extern u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu); extern void task_ca_increase_nr_migrations(struct task_struct *tsk); +void cpuacct_update_latency(struct task_struct *tsk, u64 delta); void calc_cgroup_load(void); bool async_load_calc_enabled(void); #else static inline void task_ca_increase_nr_migrations(struct task_struct *tsk) { } +static inline void cpuacct_update_latency(struct task_struct *tsk, + u64 delta) { } static inline void calc_cgroup_load(void) { } static inline bool async_load_calc_enabled(void) { -- Gitee From 645d12f5ce52f680e3a465ad10deff2fcc351af8 Mon Sep 17 00:00:00 2001 From: Yihao Wu Date: Wed, 9 Jun 2021 11:03:59 +0800 Subject: [PATCH 0801/2138] anolis: sched: Add cgroup-level blocked time histograms MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8657 This patch measures time that tasks in cpuacct cgroup blocks. There are two types: blocked due to IO, and others like locks. And they are exported in"cpuacct.ioblock_latency" and "cpuacct.block_latency" respectively. According to histogram, we know the detailed distribution of the duration. And according to total(ms), we know the percentage of time tasks spent off rq, waiting for resources: (△ioblock_latency.total(ms) + △block_latency.total(ms)) / △wall_time The interface output format is identical to cpuacct.wait_latency. Signed-off-by: Yihao Wu Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- kernel/sched/cpuacct.c | 52 +++++++++++++++++++++++++++++++++++++++++- kernel/sched/sched.h | 3 +++ kernel/sched/stats.c | 1 + 3 files changed, 55 insertions(+), 1 deletion(-) diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 0e60d1ad84fd..34b40e7b6d57 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -34,6 +34,8 @@ struct cpuacct_alistats { enum sched_lat_stat_item { SCHED_LAT_WAIT, + SCHED_LAT_BLOCK, + SCHED_LAT_IOBLOCK, SCHED_LAT_NR_STAT }; @@ -232,6 +234,34 @@ void task_ca_increase_nr_migrations(struct task_struct *tsk) rcu_read_unlock(); } +void task_ca_update_block(struct task_struct *tsk, u64 runtime) +{ + int idx; + enum sched_lat_stat_item s; + struct cpuacct *ca; + unsigned int msecs; + + if (static_branch_likely(&cpuacct_no_sched_lat)) + return; + + rcu_read_lock(); + ca = task_ca(tsk); + if (!ca) { + rcu_read_unlock(); + return; + } + if (tsk->in_iowait) + s = SCHED_LAT_IOBLOCK; + else + s = SCHED_LAT_BLOCK; + + msecs = runtime >> 20; /* Proximately to speed up */ + idx = get_sched_lat_count_idx(msecs); + this_cpu_inc(ca->lat_stat_cpu->item[s][idx]); + this_cpu_add(ca->lat_stat_cpu->item[s][SCHED_LAT_TOTAL], runtime); + rcu_read_unlock(); +} + void cpuacct_update_latency(struct task_struct *tsk, u64 delta) { enum sched_lat_count_t idx; @@ -243,6 +273,10 @@ void cpuacct_update_latency(struct task_struct *tsk, u64 delta) rcu_read_lock(); ca = task_ca(tsk); + if (!ca) { + rcu_read_unlock(); + return; + } msecs = delta >> 20; /* Proximately to speed up */ idx = get_sched_lat_count_idx(msecs); this_cpu_inc(ca->lat_stat_cpu->item[SCHED_LAT_WAIT][idx]); @@ -1086,9 +1120,13 @@ static void smp_write_##name(void *info) \ } \ SCHED_LAT_STAT_SMP_WRITE(sched_wait_latency, SCHED_LAT_WAIT); +SCHED_LAT_STAT_SMP_WRITE(sched_block_latency, SCHED_LAT_BLOCK); +SCHED_LAT_STAT_SMP_WRITE(sched_ioblock_latency, SCHED_LAT_IOBLOCK); smp_call_func_t smp_sched_lat_write_funcs[] = { - smp_write_sched_wait_latency + smp_write_sched_wait_latency, + smp_write_sched_block_latency, + smp_write_sched_ioblock_latency }; static int sched_lat_stat_write(struct cgroup_subsys_state *css, @@ -1207,6 +1245,18 @@ static struct cftype files[] = { .write_u64 = sched_lat_stat_write, .seq_show = sched_lat_stat_show }, + { + .name = "block_latency", + .private = SCHED_LAT_BLOCK, + .write_u64 = sched_lat_stat_write, + .seq_show = sched_lat_stat_show + }, + { + .name = "ioblock_latency", + .private = SCHED_LAT_IOBLOCK, + .write_u64 = sched_lat_stat_write, + .seq_show = sched_lat_stat_show + }, #endif { } /* terminate */ }; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c77a28fe02f3..c529f5be0084 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3588,12 +3588,15 @@ extern u64 get_idle_time(struct kernel_cpustat *kcs, int cpu); extern u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu); extern void task_ca_increase_nr_migrations(struct task_struct *tsk); void cpuacct_update_latency(struct task_struct *tsk, u64 delta); +void task_ca_update_block(struct task_struct *tsk, u64 runtime); void calc_cgroup_load(void); bool async_load_calc_enabled(void); #else static inline void task_ca_increase_nr_migrations(struct task_struct *tsk) { } static inline void cpuacct_update_latency(struct task_struct *tsk, u64 delta) { } +static inline void task_ca_update_block(struct task_struct *tsk, + u64 runtime) { } static inline void calc_cgroup_load(void) { } static inline bool async_load_calc_enabled(void) { diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c index 966f4eacfe51..e2214965f26c 100644 --- a/kernel/sched/stats.c +++ b/kernel/sched/stats.c @@ -85,6 +85,7 @@ void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p, if (p) { if (p->in_iowait) { + task_ca_update_block(p, delta); __schedstat_add(stats->iowait_sum, delta); __schedstat_inc(stats->iowait_count); trace_sched_stat_iowait(p, delta); -- Gitee From fd0b13bed53df8aaa26b3767062786bef834bf21 Mon Sep 17 00:00:00 2001 From: Yihao Wu Date: Wed, 9 Jun 2021 14:12:44 +0800 Subject: [PATCH 0802/2138] anolis: sched: Add cgroup's scheduling latency histograms ANBZ: #8657 This patch adds cpuacct.cgroup_wait_latency interface. It exports the histogram of the sched entity's schedule latency. Unlike wait_latency, the sched entity is a cgroup rather than task. This is useful when tasks are not directly clustered under one cgroup. For examples: cgroup1 --- cgroupA --- task1 --- cgroupB --- task2 cgroup2 --- cgroupC --- task3 --- cgroupD --- task4 This is a common cgroup hierarchy used by many applications. With cgroup_wait_latency, we can just read from cgroup1 to know aggregated wait latency information of task1 and task2. The interface output format is identical to cpuacct.wait_latency. Signed-off-by: Yihao Wu Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- kernel/sched/cpuacct.c | 37 +++++++++++++++++++++++++++++++------ kernel/sched/fair.c | 6 +++--- kernel/sched/sched.h | 4 ++-- 3 files changed, 36 insertions(+), 11 deletions(-) diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 34b40e7b6d57..5c562b2fee25 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -36,6 +36,7 @@ enum sched_lat_stat_item { SCHED_LAT_WAIT, SCHED_LAT_BLOCK, SCHED_LAT_IOBLOCK, + SCHED_LAT_CGROUP_WAIT, SCHED_LAT_NR_STAT }; @@ -117,6 +118,12 @@ struct cpuacct { unsigned long avenrun[3]; }; +static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp) +{ + return container_of(global_cgroup_css(cgrp, cpuacct_cgrp_id), + struct cpuacct, css); +} + static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css) { return css ? container_of(css, struct cpuacct, css) : NULL; @@ -262,26 +269,36 @@ void task_ca_update_block(struct task_struct *tsk, u64 runtime) rcu_read_unlock(); } -void cpuacct_update_latency(struct task_struct *tsk, u64 delta) +void cpuacct_update_latency(struct sched_entity *se, u64 delta) { - enum sched_lat_count_t idx; + int idx; + enum sched_lat_stat_item s; struct cpuacct *ca; unsigned int msecs; + struct task_group *tg; if (static_branch_likely(&cpuacct_no_sched_lat)) return; + tg = se->cfs_rq->tg; + if (task_group_is_autogroup(tg)) + return; + rcu_read_lock(); - ca = task_ca(tsk); + ca = cgroup_ca(tg->css.cgroup); if (!ca) { rcu_read_unlock(); return; } + if (entity_is_task(se)) + s = SCHED_LAT_WAIT; + else + s = SCHED_LAT_CGROUP_WAIT; + msecs = delta >> 20; /* Proximately to speed up */ idx = get_sched_lat_count_idx(msecs); - this_cpu_inc(ca->lat_stat_cpu->item[SCHED_LAT_WAIT][idx]); - this_cpu_add(ca->lat_stat_cpu->item[SCHED_LAT_WAIT][SCHED_LAT_TOTAL], - delta); + this_cpu_inc(ca->lat_stat_cpu->item[s][idx]); + this_cpu_add(ca->lat_stat_cpu->item[s][SCHED_LAT_TOTAL], delta); rcu_read_unlock(); } #endif @@ -1120,11 +1137,13 @@ static void smp_write_##name(void *info) \ } \ SCHED_LAT_STAT_SMP_WRITE(sched_wait_latency, SCHED_LAT_WAIT); +SCHED_LAT_STAT_SMP_WRITE(sched_wait_cgroup_latency, SCHED_LAT_CGROUP_WAIT); SCHED_LAT_STAT_SMP_WRITE(sched_block_latency, SCHED_LAT_BLOCK); SCHED_LAT_STAT_SMP_WRITE(sched_ioblock_latency, SCHED_LAT_IOBLOCK); smp_call_func_t smp_sched_lat_write_funcs[] = { smp_write_sched_wait_latency, + smp_write_sched_wait_cgroup_latency, smp_write_sched_block_latency, smp_write_sched_ioblock_latency }; @@ -1245,6 +1264,12 @@ static struct cftype files[] = { .write_u64 = sched_lat_stat_write, .seq_show = sched_lat_stat_show }, + { + .name = "cgroup_wait_latency", + .private = SCHED_LAT_CGROUP_WAIT, + .write_u64 = sched_lat_stat_write, + .seq_show = sched_lat_stat_show + }, { .name = "block_latency", .private = SCHED_LAT_BLOCK, diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index d40e00995b16..62cd0ef8ed8d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1303,10 +1303,10 @@ update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) if (unlikely(!schedstat_val(stats->wait_start))) return; - if (entity_is_task(se)) { + if (entity_is_task(se)) p = task_of(se); - cpuacct_update_latency(p, delta); - } + + cpuacct_update_latency(se, delta); __update_stats_wait_end(rq_of(cfs_rq), p, stats); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c529f5be0084..d9ad5b939e50 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3587,13 +3587,13 @@ extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se); extern u64 get_idle_time(struct kernel_cpustat *kcs, int cpu); extern u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu); extern void task_ca_increase_nr_migrations(struct task_struct *tsk); -void cpuacct_update_latency(struct task_struct *tsk, u64 delta); +void cpuacct_update_latency(struct sched_entity *se, u64 delta); void task_ca_update_block(struct task_struct *tsk, u64 runtime); void calc_cgroup_load(void); bool async_load_calc_enabled(void); #else static inline void task_ca_increase_nr_migrations(struct task_struct *tsk) { } -static inline void cpuacct_update_latency(struct task_struct *tsk, +static inline void cpuacct_update_latency(struct sched_entity *se, u64 delta) { } static inline void task_ca_update_block(struct task_struct *tsk, u64 runtime) { } -- Gitee From 608fe37290c8f5492c4c35ca3a2e672285cae1c2 Mon Sep 17 00:00:00 2001 From: Yihao Wu Date: Thu, 18 Jun 2020 18:30:20 +0800 Subject: [PATCH 0803/2138] anolis: sched: Add "nr" to sched latency histogram ANBZ: #8657 Sometimes histogram is not precise enough because each sample is roughly accounted into a histogram bar. And average latency is more pratical for some users. This patch adds a "nr" field in 4 latency histogram interfaces, so lat(avg) = total(ms) / nr And compared to histogram, average latency is better to be used as a SLI because of simplicity. Example $ cat /sys/fs/cgroup/cpuacct/a/cpuacct.wait_latency 0-1ms: 4139 1-4ms: 317 4-7ms: 568 7-10ms: 0 10-100ms: 42324 100-500ms: 9131 500-1000ms: 95 1000-5000ms: 134 5000-10000ms: 0 >=10000ms: 0 total(ms): 4256455 nr: 182128 Signed-off-by: Yihao Wu Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- kernel/sched/cpuacct.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 5c562b2fee25..aaff1693edd3 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -66,6 +66,7 @@ enum sched_lat_count_t { SCHED_LAT_5000_10000, SCHED_LAT_10000_INF, SCHED_LAT_TOTAL, + SCHED_LAT_NR, SCHED_LAT_NR_COUNT, }; @@ -265,6 +266,7 @@ void task_ca_update_block(struct task_struct *tsk, u64 runtime) msecs = runtime >> 20; /* Proximately to speed up */ idx = get_sched_lat_count_idx(msecs); this_cpu_inc(ca->lat_stat_cpu->item[s][idx]); + this_cpu_inc(ca->lat_stat_cpu->item[s][SCHED_LAT_NR]); this_cpu_add(ca->lat_stat_cpu->item[s][SCHED_LAT_TOTAL], runtime); rcu_read_unlock(); } @@ -298,6 +300,7 @@ void cpuacct_update_latency(struct sched_entity *se, u64 delta) msecs = delta >> 20; /* Proximately to speed up */ idx = get_sched_lat_count_idx(msecs); this_cpu_inc(ca->lat_stat_cpu->item[s][idx]); + this_cpu_inc(ca->lat_stat_cpu->item[s][SCHED_LAT_NR]); this_cpu_add(ca->lat_stat_cpu->item[s][SCHED_LAT_TOTAL], delta); rcu_read_unlock(); } @@ -1205,6 +1208,8 @@ static int sched_lat_stat_show(struct seq_file *sf, void *v) sched_lat_stat_gather(ca, s, SCHED_LAT_10000_INF)); seq_printf(sf, "total(ms): \t%llu\n", sched_lat_stat_gather(ca, s, SCHED_LAT_TOTAL) / 1000000); + seq_printf(sf, "nr: \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_NR)); return 0; } -- Gitee From d36e56f769c96d3a51cd98ba8580c5a3317e7b93 Mon Sep 17 00:00:00 2001 From: Yihao Wu Date: Thu, 21 May 2020 13:13:06 +0800 Subject: [PATCH 0804/2138] anolis: sched: Finer grain of sched latency ANBZ: #8657 Many samples are between 10ms-50ms. To display more informative distribution of latency, divide 10ms-50ms into 5 parts uniformly. Example: $ cat /sys/fs/cgroup/cpuacct/a/cpuacct.wait_latency 0-1ms: 59726433 1-4ms: 167 4-7ms: 0 7-10ms: 0 10-20ms: 5 20-30ms: 0 30-40ms: 3 40-50ms: 0 50-100ms: 0 100-500ms: 0 500-1000ms: 0 1000-5000ms: 0 5000-10000ms: 0 >=10000ms: 0 total(ms): 45554 nr: 59726600 Signed-off-by: Yihao Wu Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- kernel/sched/cpuacct.c | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index aaff1693edd3..3f264a22a28d 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -59,7 +59,11 @@ enum sched_lat_count_t { SCHED_LAT_1_4, SCHED_LAT_4_7, SCHED_LAT_7_10, - SCHED_LAT_10_100, + SCHED_LAT_10_20, + SCHED_LAT_20_30, + SCHED_LAT_30_40, + SCHED_LAT_40_50, + SCHED_LAT_50_100, SCHED_LAT_100_500, SCHED_LAT_500_1000, SCHED_LAT_1000_5000, @@ -86,8 +90,16 @@ static inline enum sched_lat_count_t get_sched_lat_count_idx(u64 msecs) idx = SCHED_LAT_4_7; else if (msecs < 10) idx = SCHED_LAT_7_10; + else if (msecs < 20) + idx = SCHED_LAT_10_20; + else if (msecs < 30) + idx = SCHED_LAT_20_30; + else if (msecs < 40) + idx = SCHED_LAT_30_40; + else if (msecs < 50) + idx = SCHED_LAT_40_50; else if (msecs < 100) - idx = SCHED_LAT_10_100; + idx = SCHED_LAT_50_100; else if (msecs < 500) idx = SCHED_LAT_100_500; else if (msecs < 1000) @@ -1194,8 +1206,16 @@ static int sched_lat_stat_show(struct seq_file *sf, void *v) sched_lat_stat_gather(ca, s, SCHED_LAT_4_7)); seq_printf(sf, "7-10ms: \t%llu\n", sched_lat_stat_gather(ca, s, SCHED_LAT_7_10)); - seq_printf(sf, "10-100ms: \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_10_100)); + seq_printf(sf, "10-20ms: \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_10_20)); + seq_printf(sf, "20-30ms: \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_20_30)); + seq_printf(sf, "30-40ms: \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_30_40)); + seq_printf(sf, "40-50ms: \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_40_50)); + seq_printf(sf, "50-100ms: \t%llu\n", + sched_lat_stat_gather(ca, s, SCHED_LAT_50_100)); seq_printf(sf, "100-500ms: \t%llu\n", sched_lat_stat_gather(ca, s, SCHED_LAT_100_500)); seq_printf(sf, "500-1000ms: \t%llu\n", -- Gitee From 36bc9b1fd3065c6ba6bf8bedbd9451d47a5d869a Mon Sep 17 00:00:00 2001 From: Erwei Deng Date: Thu, 27 Aug 2020 16:47:42 +0800 Subject: [PATCH 0805/2138] anolis: sched: get_sched_lat_count_idx optimization ANBZ: #8657 Optimize the get_sched_lat_count_idx function. The raw function use too many if-else branches which could consume more time and bring a little performance loss. I use another method that use less if-else branches and do some test for the performance improvement. I generate 10000 random numbers in each different ranges and run these two methods recording the total running time(us). See the result table below: --------------------------------------- range | raw | own | perf --------------------------------------- [0, 10) | 163 | 57 | +65.03% [0, 50) | 209 | 81 | +61.24% [0, 100) | 174 | 131 | +24.71% [0, 1000) | 192 | 73 | +61.98% [0, 10000) | 203 | 79 | +61.08% [0, 100000) | 141 | 69 | +51.06% We can see that our own method displays the better result. Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- kernel/sched/cpuacct.c | 44 +++++++++++++----------------------------- 1 file changed, 13 insertions(+), 31 deletions(-) diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 3f264a22a28d..a15a8e761188 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -80,38 +80,20 @@ struct sched_cgroup_lat_stat_cpu { static inline enum sched_lat_count_t get_sched_lat_count_idx(u64 msecs) { - enum sched_lat_count_t idx; - if (msecs < 1) - idx = SCHED_LAT_0_1; - else if (msecs < 4) - idx = SCHED_LAT_1_4; - else if (msecs < 7) - idx = SCHED_LAT_4_7; - else if (msecs < 10) - idx = SCHED_LAT_7_10; - else if (msecs < 20) - idx = SCHED_LAT_10_20; - else if (msecs < 30) - idx = SCHED_LAT_20_30; - else if (msecs < 40) - idx = SCHED_LAT_30_40; - else if (msecs < 50) - idx = SCHED_LAT_40_50; - else if (msecs < 100) - idx = SCHED_LAT_50_100; - else if (msecs < 500) - idx = SCHED_LAT_100_500; - else if (msecs < 1000) - idx = SCHED_LAT_500_1000; - else if (msecs < 5000) - idx = SCHED_LAT_1000_5000; - else if (msecs < 10000) - idx = SCHED_LAT_5000_10000; - else - idx = SCHED_LAT_10000_INF; - - return idx; + return SCHED_LAT_0_1; + if (msecs < 10) + return SCHED_LAT_0_1 + (msecs + 2) / 3; + if (msecs < 50) + return SCHED_LAT_7_10 + msecs / 10; + if (msecs < 100) + return SCHED_LAT_50_100; + if (msecs < 1000) + return SCHED_LAT_100_500 + (msecs / 500); + if (msecs < 10000) + return SCHED_LAT_1000_5000 + (msecs / 5000); + + return SCHED_LAT_10000_INF; } /* track CPU usage of a group of tasks and its child groups */ -- Gitee From b736d174aef9b0b827a356ab6ca92ff939600797 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Wed, 9 Jun 2021 17:54:47 +0800 Subject: [PATCH 0806/2138] anolis: sched: Introduce load 1/5/15 for running tasks ANBZ: #8657 Traditional load 1/5/15 includes both running and uninterruptible tasks, sometimes we need to distinguish the two types of loads, thus adding a separate load 1/5/15 for running tasks only. Signed-off-by: Xunlei Pang Signed-off-by: Yihao Wu Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- fs/proc/loadavg.c | 2 +- include/linux/sched.h | 5 +- include/linux/sched/loadavg.h | 10 ++++ kernel/sched/core.c | 8 ++++ kernel/sched/cpuacct.c | 54 +++++++++++++++++---- kernel/sched/loadavg.c | 89 +++++++++++++++++++++++++++++++++-- kernel/sched/sched.h | 13 +++++ 7 files changed, 163 insertions(+), 18 deletions(-) diff --git a/fs/proc/loadavg.c b/fs/proc/loadavg.c index 7205049d2935..73d956336bfa 100644 --- a/fs/proc/loadavg.c +++ b/fs/proc/loadavg.c @@ -27,8 +27,8 @@ static int loadavg_proc_show(struct seq_file *m, void *v) init_tsk = task_active_pid_ns(current)->child_reaper; get_task_struct(init_tsk); read_unlock(&tasklist_lock); + get_cgroup_avenrun(init_tsk, avnrun, FIXED_1/200, 0, false); - get_cgroup_avenrun(init_tsk, avnrun, FIXED_1/200, 0); cpuset_cpus_allowed(init_tsk, &cpuset_allowed); for_each_cpu(i, &cpuset_allowed) nr_R += task_ca_running(init_tsk, i); diff --git a/include/linux/sched.h b/include/linux/sched.h index 6c4036c9c27b..cf03b57c117c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2540,7 +2540,7 @@ void cpuacct_get_usage_result(struct task_struct *tsk, int cpu, struct cpuacct_usage_result *res); unsigned long task_ca_running(struct task_struct *tsk, int cpu); void get_cgroup_avenrun(struct task_struct *tsk, unsigned long *loads, - unsigned long offset, int shift); + unsigned long offset, int shift, bool running); bool check_rich_container(unsigned int cpu, unsigned int *index, bool *rich_container, unsigned int *total); @@ -2554,7 +2554,8 @@ static inline unsigned long task_ca_running(struct task_struct *tsk, int cpu) } static inline void get_cgroup_avenrun(struct task_struct *tsk, - unsigned long *loads, unsigned long offset, int shift) { } + unsigned long *loads, unsigned long offset, + int shift, bool running) { } static inline bool check_rich_container(unsigned int cpu, unsigned int *index, bool *rich_container, unsigned int *total) diff --git a/include/linux/sched/loadavg.h b/include/linux/sched/loadavg.h index 83ec54b65e79..20165894027d 100644 --- a/include/linux/sched/loadavg.h +++ b/include/linux/sched/loadavg.h @@ -13,8 +13,18 @@ * 11 bit fractions. */ extern unsigned long avenrun[]; /* Load averages */ +extern unsigned long avenrun_r[]; /* R load averages */ + extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); +#ifdef CONFIG_SCHED_SLI +extern void get_avenrun_r(unsigned long *loads, unsigned long offset, + int shift); +#else +static inline void get_avenrun_r(unsigned long *loads, unsigned long offset, + int shift) { } +#endif + #define FSHIFT 11 /* nr of bits of precision */ #define FIXED_1 (1<__lock); rq->nr_running = 0; rq->calc_load_active = 0; +#ifdef CONFIG_SCHED_SLI + rq->calc_load_active_r = 0; +#endif rq->calc_load_update = jiffies + LOAD_FREQ; init_cfs_rq(&rq->cfs); init_rt_rq(&rq->rt); diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index a15a8e761188..ef183fb41911 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -111,6 +111,9 @@ struct cpuacct { u64 next_load_update; #endif unsigned long avenrun[3]; +#ifdef CONFIG_SCHED_SLI + unsigned long avenrun_r[3]; +#endif }; static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp) @@ -397,6 +400,10 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) &per_cpu_ptr(ca->prev_cputime, i)->prev_cputime2); } + ca->avenrun[0] = ca->avenrun[1] = ca->avenrun[2] = 0; +#ifdef CONFIG_SCHED_SLI + ca->avenrun_r[0] = ca->avenrun_r[1] = ca->avenrun_r[2] = 0; +#endif return &ca->css; #ifdef CONFIG_SCHED_SLI @@ -643,20 +650,24 @@ static int cpuacct_stats_show(struct seq_file *sf, void *v) static unsigned long ca_running(struct cpuacct *ca, int cpu); static void __get_cgroup_avenrun(struct cpuacct *ca, unsigned long *loads, - unsigned long offset, int shift) + unsigned long offset, int shift, bool running) { unsigned long *avenrun; - avenrun = ca->avenrun; + if (running) + avenrun = ca->avenrun_r; + else + avenrun = ca->avenrun; + loads[0] = (avenrun[0] + offset) << shift; loads[1] = (avenrun[1] + offset) << shift; loads[2] = (avenrun[2] + offset) << shift; } void get_cgroup_avenrun(struct task_struct *tsk, unsigned long *loads, - unsigned long offset, int shift) + unsigned long offset, int shift, bool running) { - __get_cgroup_avenrun(task_ca(tsk), loads, offset, shift); + __get_cgroup_avenrun(task_ca(tsk), loads, offset, shift, running); } unsigned long task_ca_running(struct task_struct *tsk, int cpu) @@ -859,22 +870,36 @@ void cpuacct_cpuset_changed(struct cgroup *cgrp, struct cpumask *deleted, static void cpuacct_calc_load(struct cpuacct *acct) { - if (acct != &root_cpuacct) { - long active = 0; - int cpu; + long active = 0, active_r = 0, nr_r; + int cpu; + if (acct != &root_cpuacct) { for_each_possible_cpu(cpu) { - active += ca_running(acct, cpu); + nr_r = ca_running(acct, cpu); + active += nr_r; + active_r += nr_r; active += ca_uninterruptible(acct, cpu); } active = active > 0 ? active * FIXED_1 : 0; acct->avenrun[0] = calc_load(acct->avenrun[0], EXP_1, active); acct->avenrun[1] = calc_load(acct->avenrun[1], EXP_5, active); acct->avenrun[2] = calc_load(acct->avenrun[2], EXP_15, active); + + active_r = active_r > 0 ? active_r * FIXED_1 : 0; + acct->avenrun_r[0] = calc_load(acct->avenrun_r[0], + EXP_1, active_r); + acct->avenrun_r[1] = calc_load(acct->avenrun_r[1], + EXP_5, active_r); + acct->avenrun_r[2] = calc_load(acct->avenrun_r[2], + EXP_15, active_r); } else { acct->avenrun[0] = avenrun[0]; acct->avenrun[1] = avenrun[1]; acct->avenrun[2] = avenrun[2]; + + acct->avenrun_r[0] = avenrun_r[0]; + acct->avenrun_r[1] = avenrun_r[1]; + acct->avenrun_r[2] = avenrun_r[2]; } } @@ -994,7 +1019,7 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) u64 user, nice, system, idle, iowait, irq, softirq, steal, guest; u64 nr_migrations = 0; struct cpuacct_alistats *alistats; - unsigned long load, avnrun[3]; + unsigned long load, avnrun[3], avnrun_r[3]; unsigned long nr_run = 0, nr_uninter = 0; int cpu; @@ -1030,7 +1055,8 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) nr_uninter += ca_uninterruptible(ca, cpu); } - __get_cgroup_avenrun(ca, avnrun, FIXED_1/200, 0); + __get_cgroup_avenrun(ca, avnrun, FIXED_1/200, 0, false); + __get_cgroup_avenrun(ca, avnrun_r, FIXED_1/200, 0, true); } else { struct kernel_cpustat *kcpustat; @@ -1054,6 +1080,7 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) nr_uninter = nr_uninterruptible(); get_avenrun(avnrun, FIXED_1/200, 0); + get_avenrun_r(avnrun_r, FIXED_1/200, 0); } seq_printf(sf, "user %lld\n", nsec_to_clock_t(user)); @@ -1079,6 +1106,13 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) seq_printf(sf, "nr_uninterruptible %lld\n", (u64)nr_uninter); seq_printf(sf, "nr_migrations %lld\n", (u64)nr_migrations); + load = LOAD_INT(avnrun_r[0]) * 100 + LOAD_FRAC(avnrun_r[0]); + seq_printf(sf, "running load average(1min) %lld\n", (u64)load); + load = LOAD_INT(avnrun_r[1]) * 100 + LOAD_FRAC(avnrun_r[1]); + seq_printf(sf, "running load average(5min) %lld\n", (u64)load); + load = LOAD_INT(avnrun_r[2]) * 100 + LOAD_FRAC(avnrun_r[2]); + seq_printf(sf, "running load average(15min) %lld\n", (u64)load); + return 0; } diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c index c11a84d4676d..13761c5d1bfb 100644 --- a/kernel/sched/loadavg.c +++ b/kernel/sched/loadavg.c @@ -56,8 +56,10 @@ /* Variables and functions for calc_load */ atomic_long_t calc_load_tasks; -unsigned long calc_load_update; +atomic_long_t calc_load_tasks_r; unsigned long avenrun[3]; +unsigned long avenrun_r[3]; +unsigned long calc_load_update; EXPORT_SYMBOL(avenrun); /* should be removed */ /** @@ -90,6 +92,29 @@ long calc_load_fold_active(struct rq *this_rq, long adjust) return delta; } +#ifdef CONFIG_SCHED_SLI +void get_avenrun_r(unsigned long *loads, unsigned long offset, int shift) +{ + loads[0] = (avenrun_r[0] + offset) << shift; + loads[1] = (avenrun_r[1] + offset) << shift; + loads[2] = (avenrun_r[2] + offset) << shift; +} + +long calc_load_fold_active_r(struct rq *this_rq, long adjust) +{ + long nr_active, delta = 0; + + nr_active = this_rq->nr_running - adjust; + + if (nr_active != this_rq->calc_load_active_r) { + delta = nr_active - this_rq->calc_load_active_r; + this_rq->calc_load_active_r = nr_active; + } + + return delta; +} +#endif + /** * fixed_power_int - compute: x^n, in O(log n) time * @@ -203,6 +228,9 @@ calc_load_n(unsigned long load, unsigned long exp, * When making the ILB scale, we should try to pull this in as well. */ static atomic_long_t calc_load_nohz[2]; +#ifdef CONFIG_SCHED_SLI +static atomic_long_t calc_load_nohz_r[2]; +#endif static int calc_load_idx; static inline int calc_load_write_idx(void) @@ -233,13 +261,17 @@ static inline int calc_load_read_idx(void) static void calc_load_nohz_fold(struct rq *rq) { long delta; + int idx = calc_load_write_idx(); delta = calc_load_fold_active(rq, 0); - if (delta) { - int idx = calc_load_write_idx(); - + if (delta) atomic_long_add(delta, &calc_load_nohz[idx]); - } + +#ifdef CONFIG_SCHED_SLI + delta = calc_load_fold_active_r(rq, 0); + if (delta) + atomic_long_add(delta, &calc_load_nohz_r[idx]); +#endif } void calc_load_nohz_start(void) @@ -291,6 +323,19 @@ static long calc_load_nohz_read(void) return delta; } +#ifdef CONFIG_SCHED_SLI +static long calc_load_nohz_r_read(void) +{ + int idx = calc_load_read_idx(); + long delta = 0; + + if (atomic_long_read(&calc_load_nohz_r[idx])) + delta = atomic_long_xchg(&calc_load_nohz_r[idx], 0); + + return delta; +} +#endif + /* * NO_HZ can leave us missing all per-CPU ticks calling * calc_load_fold_active(), but since a NO_HZ CPU folds its delta into @@ -320,6 +365,16 @@ static void calc_global_nohz(void) avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n); avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n); +#ifdef CONFIG_SCHED_SLI + /* Calc avenrun_r */ + active = atomic_long_read(&calc_load_tasks_r); + active = active > 0 ? active * FIXED_1 : 0; + + avenrun_r[0] = calc_load_n(avenrun_r[0], EXP_1, active, n); + avenrun_r[1] = calc_load_n(avenrun_r[1], EXP_5, active, n); + avenrun_r[2] = calc_load_n(avenrun_r[2], EXP_15, active, n); +#endif + WRITE_ONCE(calc_load_update, sample_window + n * LOAD_FREQ); } @@ -336,6 +391,7 @@ static void calc_global_nohz(void) #else /* !CONFIG_NO_HZ_COMMON */ static inline long calc_load_nohz_read(void) { return 0; } +static inline long calc_load_nohz_r_read(void) { return 0; } static inline void calc_global_nohz(void) { } #endif /* CONFIG_NO_HZ_COMMON */ @@ -369,6 +425,23 @@ void calc_global_load(void) avenrun[1] = calc_load(avenrun[1], EXP_5, active); avenrun[2] = calc_load(avenrun[2], EXP_15, active); +#ifdef CONFIG_SCHED_SLI + /* + * Calculate load 1/5/15 for running tasks only. We do not + * invent common functions to keep the same layout as upstream. + */ + delta = calc_load_nohz_r_read(); + if (delta) + atomic_long_add(delta, &calc_load_tasks_r); + + active = atomic_long_read(&calc_load_tasks_r); + active = active > 0 ? active * FIXED_1 : 0; + + avenrun_r[0] = calc_load(avenrun_r[0], EXP_1, active); + avenrun_r[1] = calc_load(avenrun_r[1], EXP_5, active); + avenrun_r[2] = calc_load(avenrun_r[2], EXP_15, active); +#endif + WRITE_ONCE(calc_load_update, sample_window + LOAD_FREQ); if (!async_load_calc_enabled()) @@ -396,5 +469,11 @@ void calc_global_load_tick(struct rq *this_rq) if (delta) atomic_long_add(delta, &calc_load_tasks); +#ifdef CONFIG_SCHED_SLI + delta = calc_load_fold_active_r(this_rq, 0); + if (delta) + atomic_long_add(delta, &calc_load_tasks_r); +#endif + this_rq->calc_load_update += LOAD_FREQ; } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index d9ad5b939e50..663b66266845 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -115,12 +115,22 @@ extern __read_mostly int scheduler_running; extern unsigned long calc_load_update; extern atomic_long_t calc_load_tasks; +extern atomic_long_t calc_load_tasks_r; extern unsigned int sysctl_sched_child_runs_first; extern void calc_global_load_tick(struct rq *this_rq); extern long calc_load_fold_active(struct rq *this_rq, long adjust); +#ifdef CONFIG_SCHED_SLI +extern long calc_load_fold_active_r(struct rq *this_rq, long adjust); +#else +static inline long calc_load_fold_active_r(struct rq *this_rq, long adjust) +{ + return 0; +} +#endif + extern void call_trace_sched_update_nr_running(struct rq *rq, int count); extern unsigned int sysctl_sched_rt_period; @@ -1126,6 +1136,9 @@ struct rq { /* calc_load related fields */ unsigned long calc_load_update; long calc_load_active; +#ifdef CONFIG_SCHED_SLI + long calc_load_active_r; +#endif #ifdef CONFIG_SCHED_HRTICK #ifdef CONFIG_SMP -- Gitee From 56e81f5d158eb517b3f0ae4d451b07ebf0bf2521 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Tue, 15 Jun 2021 11:17:39 +0800 Subject: [PATCH 0807/2138] anolis: make the rich container support k8s. ANBZ: #8657 For k8s, multpile containers share one pid_namespace, the child reaper lives in one special "pause" container amongst. Then we should use current other than the child reaper to get the information, thus it deserves a runtime interface. Introduce "/proc/sys/kernel/rich_container_source": - 0 means to use cgroups of "current" by default. - 1 means to use cgroups of "child reaper". Signed-off-by: Xunlei Pang Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- drivers/base/cpu.c | 14 +----- fs/proc/loadavg.c | 8 +-- fs/proc/meminfo.c | 19 +------ fs/proc/stat.c | 10 ++-- fs/proc/uptime.c | 4 +- include/linux/memcontrol.h | 9 ++++ include/linux/pid_namespace.h | 8 +++ include/linux/sched.h | 40 +++++++++++---- kernel/cgroup/cpuset.c | 24 +++++++++ kernel/pid_namespace.c | 1 + kernel/sched/cpuacct.c | 94 ++++++++++++++++++++++++----------- kernel/sysctl.c | 10 ++++ mm/memcontrol.c | 34 +++++++++++++ 13 files changed, 199 insertions(+), 76 deletions(-) diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 925dacc2d266..91d16e663099 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -219,29 +219,17 @@ static ssize_t show_cpus_attr(struct device *dev, { struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr); struct cpumask cpuset_allowed; - struct task_struct *init_tsk; bool rich_container; rcu_read_lock(); rich_container = in_rich_container(current); - if (rich_container) { - read_lock(&tasklist_lock); - init_tsk = task_active_pid_ns(current)->child_reaper; - get_task_struct(init_tsk); - read_unlock(&tasklist_lock); - } else { - init_tsk = NULL; - } rcu_read_unlock(); if (rich_container && !strcmp(attr->attr.name, "online")) - cpuset_cpus_allowed(init_tsk, &cpuset_allowed); + rich_container_get_cpuset_cpus(&cpuset_allowed); else cpumask_copy(&cpuset_allowed, ca->map); - if (init_tsk) - put_task_struct(init_tsk); - return cpumap_print_to_pagebuf(true, buf, &cpuset_allowed); } diff --git a/fs/proc/loadavg.c b/fs/proc/loadavg.c index 73d956336bfa..7a7e443a58c8 100644 --- a/fs/proc/loadavg.c +++ b/fs/proc/loadavg.c @@ -22,16 +22,18 @@ static int loadavg_proc_show(struct seq_file *m, void *v) rcu_read_lock(); if (in_rich_container(current)) { struct task_struct *init_tsk; + enum rich_container_source from; read_lock(&tasklist_lock); init_tsk = task_active_pid_ns(current)->child_reaper; get_task_struct(init_tsk); read_unlock(&tasklist_lock); - get_cgroup_avenrun(init_tsk, avnrun, FIXED_1/200, 0, false); - cpuset_cpus_allowed(init_tsk, &cpuset_allowed); + rich_container_source(&from); + rich_container_get_avenrun(from, init_tsk, avnrun, FIXED_1/200, 0, false); + rich_container_get_cpuset_cpus(&cpuset_allowed); for_each_cpu(i, &cpuset_allowed) - nr_R += task_ca_running(init_tsk, i); + nr_R += rich_container_get_running(from, init_tsk, i); put_task_struct(init_tsk); } else { get_avenrun(avnrun, FIXED_1/200, 0); diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index e3246083c8ce..fc14e63d0c15 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -45,24 +45,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v) #ifdef CONFIG_MEMCG rcu_read_lock(); if (in_rich_container(current)) { - struct task_struct *init_tsk; - - /* - * current may be in a subcgroup, use reaper instead. - * We assume the reaper always be in the container's - * top group. - */ - read_lock(&tasklist_lock); - init_tsk = task_active_pid_ns(current)->child_reaper; - get_task_struct(init_tsk); - read_unlock(&tasklist_lock); - - memcg = mem_cgroup_from_task(init_tsk); - if (mem_cgroup_is_root(memcg)) - memcg = NULL; - else - css_get(&memcg->css); - put_task_struct(init_tsk); + memcg = rich_container_get_memcg(); } rcu_read_unlock(); #endif diff --git a/fs/proc/stat.c b/fs/proc/stat.c index d2ec9dcddb31..9c1d734f0069 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -94,6 +94,7 @@ static int show_stat(struct seq_file *p, void *v) unsigned int nr_runnable = 0; struct task_struct *init_tsk = NULL; struct cpuacct_usage_result res; + enum rich_container_source from; bool rich_container; user = nice = system = idle = iowait = @@ -113,9 +114,10 @@ static int show_stat(struct seq_file *p, void *v) read_unlock(&tasklist_lock); boottime.tv_sec += init_tsk->start_time / NSEC_PER_SEC; - cpuset_cpus_allowed(init_tsk, &cpuset_allowed); + rich_container_get_cpuset_cpus(&cpuset_allowed); + rich_container_source(&from); for_each_cpu(i, &cpuset_allowed) { - cpuacct_get_usage_result(init_tsk, i, &res); + rich_container_get_usage(from, init_tsk, i, &res); user += res.user; nice += res.nice; system += res.system; @@ -176,7 +178,7 @@ static int show_stat(struct seq_file *p, void *v) rcu_read_lock(); if (rich_container) { for_each_cpu(i, &cpuset_allowed) { - cpuacct_get_usage_result(init_tsk, i, &res); + rich_container_get_usage(from, init_tsk, i, &res); seq_printf(p, "cpu%d", seq++); seq_put_decimal_ull(p, " ", @@ -244,7 +246,7 @@ static int show_stat(struct seq_file *p, void *v) rcu_read_lock(); if (rich_container) { for_each_cpu(i, &cpuset_allowed) - nr_runnable += task_ca_running(init_tsk, i); + nr_runnable += rich_container_get_running(from, init_tsk, i); } else nr_runnable = nr_running(); rcu_read_unlock(); diff --git a/fs/proc/uptime.c b/fs/proc/uptime.c index 95622fd62885..591909b4d111 100644 --- a/fs/proc/uptime.c +++ b/fs/proc/uptime.c @@ -26,6 +26,7 @@ static int uptime_proc_show(struct seq_file *m, void *v) rcu_read_lock(); if (in_rich_container(current)) { + enum rich_container_source from; struct task_struct *init_tsk; struct cpuacct_usage_result res; @@ -34,8 +35,9 @@ static int uptime_proc_show(struct seq_file *m, void *v) get_task_struct(init_tsk); read_unlock(&tasklist_lock); + rich_container_source(&from); for_each_possible_cpu(i) { - cpuacct_get_usage_result(init_tsk, i, &res); + rich_container_get_usage(from, init_tsk, i, &res); idle_nsec += res.idle; } uptime = timespec64_sub(uptime, diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 3e7448d71c96..2986a43acaef 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1153,6 +1153,15 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, void memcg_meminfo(struct mem_cgroup *memcg, struct sysinfo *info, struct sysinfo_ext *ext); +#ifdef CONFIG_RICH_CONTAINER +struct mem_cgroup *rich_container_get_memcg(void); +#else +static inline struct mem_cgroup *rich_container_get_memcg(void) +{ + return NULL; +} +#endif + #else /* CONFIG_MEMCG */ #define MEM_CGROUP_ID_SHIFT 0 diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index a71999081213..d913d86d29d8 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -125,6 +125,8 @@ static inline bool task_is_in_init_pid_ns(struct task_struct *tsk) #ifdef CONFIG_RICH_CONTAINER extern int sysctl_rich_container_enable; +extern int sysctl_rich_container_source; + static inline bool in_rich_container(struct task_struct *tsk) { if (sysctl_rich_container_enable == 0) @@ -132,11 +134,17 @@ static inline bool in_rich_container(struct task_struct *tsk) return !task_is_in_init_pid_ns(tsk) && child_cpuacct(tsk); } + +void rich_container_get_cpuset_cpus(struct cpumask *pmask); #else static inline bool in_rich_container(struct task_struct *tsk) { return false; } + +static inline void rich_container_get_cpuset_cpus(struct cpumask *pmask) +{ +} #endif #endif /* _LINUX_PID_NS_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index cf03b57c117c..c4a72d16bf1a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2534,28 +2534,50 @@ struct cpuacct_usage_result { u64 steal, iowait, idle, guest, guest_nice; }; +enum rich_container_source { + RICH_CONTAINER_REAPER, + RICH_CONTAINER_CURRENT, +}; + #ifdef CONFIG_RICH_CONTAINER +void rich_container_source(enum rich_container_source *from); bool child_cpuacct(struct task_struct *tsk); -void cpuacct_get_usage_result(struct task_struct *tsk, int cpu, +void rich_container_get_usage(enum rich_container_source from, + struct task_struct *reaper, int cpu, struct cpuacct_usage_result *res); -unsigned long task_ca_running(struct task_struct *tsk, int cpu); -void get_cgroup_avenrun(struct task_struct *tsk, unsigned long *loads, +unsigned long rich_container_get_running(enum rich_container_source from, + struct task_struct *reaper, int cpu); +void rich_container_get_avenrun(enum rich_container_source from, + struct task_struct *reaper, unsigned long *loads, unsigned long offset, int shift, bool running); bool check_rich_container(unsigned int cpu, unsigned int *index, bool *rich_container, unsigned int *total); #else -static inline void cpuacct_get_usage_result(struct task_struct *tsk, - int cpu, struct cpuacct_usage_result *res) { } +static inline void +rich_container_source(enum rich_container_source *from) +{ +} -static inline unsigned long task_ca_running(struct task_struct *tsk, int cpu) +static inline void +rich_container_get_usage(enum rich_container_source from, + struct task_struct *reaper, int cpu, + struct cpuacct_usage_result *res) +{ +} + +static inline unsigned long +rich_container_get_running(enum rich_container_source from, + struct task_struct *reaper, int cpu) { return 0; } -static inline void get_cgroup_avenrun(struct task_struct *tsk, - unsigned long *loads, unsigned long offset, - int shift, bool running) { } +static inline void rich_container_get_avenrun(enum rich_container_source from, + struct task_struct *reaper, unsigned long *loads, + unsigned long offset, int shift, bool running) +{ +} static inline bool check_rich_container(unsigned int cpu, unsigned int *index, bool *rich_container, unsigned int *total) diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index a35e17da6993..72050e6c0224 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -44,6 +44,7 @@ #include #include #include +#include DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key); DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key); @@ -4003,6 +4004,29 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) spin_unlock_irqrestore(&callback_lock, flags); } +#ifdef CONFIG_RICH_CONTAINER +void rich_container_get_cpuset_cpus(struct cpumask *pmask) +{ + unsigned long flags; + struct task_struct *p; + + rcu_read_lock(); + if (sysctl_rich_container_source == 1) { + read_lock(&tasklist_lock); + p = task_active_pid_ns(current)->child_reaper; + read_unlock(&tasklist_lock); + + } else { + p = current; + } + + spin_lock_irqsave(&callback_lock, flags); + guarantee_online_cpus(p, pmask); + spin_unlock_irqrestore(&callback_lock, flags); + rcu_read_unlock(); +} +#endif + /** * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe. * @tsk: pointer to task_struct with which the scheduler is struggling diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index 3d488f64a72d..9d54ed62f36f 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c @@ -27,6 +27,7 @@ #ifdef CONFIG_RICH_CONTAINER int sysctl_rich_container_enable; +int sysctl_rich_container_source; /* 0 - current; 1 - child_reaper */ #endif static DEFINE_MUTEX(pid_caches_mutex); diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index ef183fb41911..a1a0e0a23ed9 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -664,17 +664,6 @@ static void __get_cgroup_avenrun(struct cpuacct *ca, unsigned long *loads, loads[2] = (avenrun[2] + offset) << shift; } -void get_cgroup_avenrun(struct task_struct *tsk, unsigned long *loads, - unsigned long offset, int shift, bool running) -{ - __get_cgroup_avenrun(task_ca(tsk), loads, offset, shift, running); -} - -unsigned long task_ca_running(struct task_struct *tsk, int cpu) -{ - return ca_running(task_ca(tsk), cpu); -} - static inline struct task_group *cgroup_tg(struct cgroup *cgrp) { return container_of(global_cgroup_css(cgrp, cpu_cgrp_id), @@ -1002,16 +991,6 @@ static void __cpuacct_get_usage_result(struct cpuacct *ca, int cpu, res->guest_nice = kcpustat->cpustat[CPUTIME_GUEST_NICE]; } -void cpuacct_get_usage_result(struct task_struct *tsk, int cpu, - struct cpuacct_usage_result *res) -{ - struct cpuacct *ca = task_ca(tsk); - struct cgroup *cgrp = ca->css.cgroup; - struct task_group *tg = cgroup_tg(cgrp); - - __cpuacct_get_usage_result(ca, cpu, tg, res); -} - static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) { struct cpuacct *ca = css_ca(seq_css(sf)); @@ -1543,11 +1522,11 @@ bool child_cpuacct(struct task_struct *tsk) return false; } + bool check_rich_container(unsigned int cpu, unsigned int *index, bool *rich_container, unsigned int *total) { struct cpumask cpuset_allowed; - struct task_struct *init_tsk; bool in_rich; int i, id = 0; @@ -1559,12 +1538,7 @@ bool check_rich_container(unsigned int cpu, unsigned int *index, *rich_container = true; - read_lock(&tasklist_lock); - init_tsk = task_active_pid_ns(current)->child_reaper; - get_task_struct(init_tsk); - read_unlock(&tasklist_lock); - cpuset_cpus_allowed(init_tsk, &cpuset_allowed); - put_task_struct(init_tsk); + rich_container_get_cpuset_cpus(&cpuset_allowed); *total = cpumask_weight(&cpuset_allowed); if (cpumask_test_cpu(cpu, &cpuset_allowed)) { @@ -1580,4 +1554,68 @@ bool check_rich_container(unsigned int cpu, unsigned int *index, /* Hide this cpu in the container */ return true; } + +void rich_container_source(enum rich_container_source *from) +{ + if (sysctl_rich_container_source == 1) + *from = RICH_CONTAINER_REAPER; + else + *from = RICH_CONTAINER_CURRENT; +} + +void rich_container_get_usage(enum rich_container_source from, + struct task_struct *reaper, int cpu, + struct cpuacct_usage_result *res) +{ + struct cpuacct *ca_src; + struct task_group *tg; + + rcu_read_lock(); + /* To avoid iterating css for every cpu */ + if (likely(from == RICH_CONTAINER_REAPER)) + ca_src = task_ca(reaper); + else + ca_src = task_ca(current); + + tg = cgroup_tg(ca_src->css.cgroup); + __cpuacct_get_usage_result(ca_src, cpu, tg, res); + rcu_read_unlock(); +} + +unsigned long rich_container_get_running(enum rich_container_source from, + struct task_struct *reaper, int cpu) +{ + struct cpuacct *ca_src; + unsigned long nr; + + rcu_read_lock(); + /* To avoid iterating css for every cpu */ + if (likely(from == RICH_CONTAINER_REAPER)) + ca_src = task_ca(reaper); + else + ca_src = task_ca(current); + + nr = ca_running(ca_src, cpu); + rcu_read_unlock(); + + return nr; +} + +void rich_container_get_avenrun(enum rich_container_source from, + struct task_struct *reaper, unsigned long *loads, + unsigned long offset, int shift, bool running) +{ + struct cpuacct *ca_src; + + rcu_read_lock(); + /* To avoid iterating css for every cpu */ + if (likely(from == RICH_CONTAINER_REAPER)) + ca_src = task_ca(reaper); + else + ca_src = task_ca(current); + + __get_cgroup_avenrun(ca_src, loads, offset, shift, running); + rcu_read_unlock(); +} + #endif diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 6d0bcc3c205e..15aaa24bf595 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -63,6 +63,7 @@ #include #include #include +#include #include "../lib/kstrtox.h" @@ -2089,6 +2090,15 @@ static struct ctl_table kern_table[] = { .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, + { + .procname = "rich_container_source", + .data = &sysctl_rich_container_source, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, #endif { } }; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c2951593221b..e6a0d5034741 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -64,6 +64,7 @@ #include #include #include +#include #include "internal.h" #include #include @@ -7982,6 +7983,39 @@ static int __init mem_cgroup_swap_init(void) } subsys_initcall(mem_cgroup_swap_init); +#endif /* CONFIG_MEMCG_SWAP */ + +#ifdef CONFIG_RICH_CONTAINER +static inline struct mem_cgroup *css_memcg(struct cgroup_subsys_state *css) +{ + return css ? container_of(css, struct mem_cgroup, css) : NULL; +} + +/* with rcu lock held */ +struct mem_cgroup *rich_container_get_memcg(void) +{ + struct cgroup_subsys_state *css; + struct mem_cgroup *memcg_src; + + if (sysctl_rich_container_source == 1) + css = NULL; + else + css = task_css(current, memory_cgrp_id); + + if (css) { + memcg_src = css_memcg(css); + } else { + read_lock(&tasklist_lock); + memcg_src = mem_cgroup_from_task(task_active_pid_ns(current)->child_reaper); + read_unlock(&tasklist_lock); + } + + if (css_tryget(&memcg_src->css)) + return memcg_src; + else + return NULL; +} + void memcg_meminfo(struct mem_cgroup *memcg, struct sysinfo *info, struct sysinfo_ext *ext) { -- Gitee From 02f8eb675d54d02b8add124905032bb784650f62 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Tue, 15 Jun 2021 15:00:35 +0800 Subject: [PATCH 0808/2138] anolis: cpuinfo: Add cpuinfo support of cpu quota and cpu share ANBZ: #8657 lxcfs supports rich container cpuinfo from cpu quota, also sigma cpushare has the requirement of getting cpuinfo from cpu.shares. Thus, introduce new knobs to support these: /proc/sys/kernel/rich_container_cpuinfo_source - 0 uses cpu quota "quota/period" by default. It will fall back to use cpuset.cpus if quota not set. - 1 uses cpuset.cpus. - 2 uses cpushare "cpu.shares/1024" (1024 can be configured below) /proc/sys/kernel/rich_container_cpuinfo_sharesbase - when rich_container_cpuinfo_source is 2, this is the divisor. Note that, after faking cpuinfo in dockers, it's impossible to make /proc/stat match them accordingly, but we only care about the following stats not /proc/stat: /proc/cpuinfo, sysfs online and /proc/meminfo Signed-off-by: Xunlei Pang Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- drivers/base/cpu.c | 13 +++++- include/linux/pid_namespace.h | 15 +++++++ include/linux/sched.h | 9 +++- kernel/sched/core.c | 4 +- kernel/sched/cpuacct.c | 79 ++++++++++++++++++++++++++++++++++- kernel/sched/sched.h | 3 ++ kernel/sysctl.c | 17 ++++++++ 7 files changed, 134 insertions(+), 6 deletions(-) diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 91d16e663099..d186a8b17a4c 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -219,14 +219,23 @@ static ssize_t show_cpus_attr(struct device *dev, { struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr); struct cpumask cpuset_allowed; + struct task_struct __maybe_unused *scenario; bool rich_container; rcu_read_lock(); rich_container = in_rich_container(current); rcu_read_unlock(); - if (rich_container && !strcmp(attr->attr.name, "online")) - rich_container_get_cpuset_cpus(&cpuset_allowed); + if (rich_container && !strcmp(attr->attr.name, "online")) { + read_lock(&tasklist_lock); + scenario = rich_container_get_scenario(); + get_task_struct(scenario); + read_unlock(&tasklist_lock); + + rich_container_get_cpus(scenario, &cpuset_allowed); + + put_task_struct(scenario); + } else cpumask_copy(&cpuset_allowed, ca->map); diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index d913d86d29d8..9da7d0da722c 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -126,6 +126,16 @@ static inline bool task_is_in_init_pid_ns(struct task_struct *tsk) #ifdef CONFIG_RICH_CONTAINER extern int sysctl_rich_container_enable; extern int sysctl_rich_container_source; +extern int sysctl_rich_container_cpuinfo_source; +extern unsigned int sysctl_rich_container_cpuinfo_sharesbase; + +static inline struct task_struct *rich_container_get_scenario(void) +{ + if (sysctl_rich_container_source == 1) + return task_active_pid_ns(current)->child_reaper; + + return current; +} static inline bool in_rich_container(struct task_struct *tsk) { @@ -145,6 +155,11 @@ static inline bool in_rich_container(struct task_struct *tsk) static inline void rich_container_get_cpuset_cpus(struct cpumask *pmask) { } + +static inline struct task_struct *rich_container_get_scenario(void) +{ + return NULL; +} #endif #endif /* _LINUX_PID_NS_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index c4a72d16bf1a..7839b5feba6b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2553,7 +2553,9 @@ void rich_container_get_avenrun(enum rich_container_source from, bool check_rich_container(unsigned int cpu, unsigned int *index, bool *rich_container, unsigned int *total); -#else +void rich_container_get_cpus(struct task_struct *tsk, struct cpumask *pmask); + +#else /* CONFIG_RICH_CONTAINER */ static inline void rich_container_source(enum rich_container_source *from) { @@ -2584,6 +2586,11 @@ static inline bool check_rich_container(unsigned int cpu, unsigned int *index, { return false; } + +static inline +void rich_container_get_cpus(struct task_struct *tsk, struct cpumask *pmask) +{ +} #endif #ifdef CONFIG_SCHED_SLI diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 052cb417ee60..8dcf9fa66e24 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -11194,7 +11194,7 @@ static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) return tg_set_cfs_bandwidth(tg, period, quota, burst, init_buffer); } -static long tg_get_cfs_quota(struct task_group *tg) +long tg_get_cfs_quota(struct task_group *tg) { u64 quota_us; @@ -11222,7 +11222,7 @@ static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) return tg_set_cfs_bandwidth(tg, period, quota, burst, init_buffer); } -static long tg_get_cfs_period(struct task_group *tg) +long tg_get_cfs_period(struct task_group *tg) { u64 cfs_period_us; diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index a1a0e0a23ed9..71abcc0b79c5 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -1512,6 +1512,77 @@ late_initcall_sync(async_load_calc_init); #endif #ifdef CONFIG_RICH_CONTAINER + +/* 0 - cpu quota; 1 - cpuset.cpus; 2 - cpu.shares */ +int sysctl_rich_container_cpuinfo_source; +/* when cpu.shares */ +unsigned int sysctl_rich_container_cpuinfo_sharesbase = 1024; + +static inline struct task_group *css_tg(struct cgroup_subsys_state *css) +{ + return css ? container_of(css, struct task_group, css) : NULL; +} + +static inline struct task_group *task_tg(struct task_struct *tsk) +{ + return css_tg(task_css(tsk, cpu_cgrp_id)); +} + +void rich_container_get_cpus(struct task_struct *tsk, struct cpumask *pmask) +{ + struct task_group *tg; + int i, cpus; + + /* cfs quota source */ + if (sysctl_rich_container_cpuinfo_source == 0) { + long quota, period; + + rcu_read_lock(); + tg = task_tg(tsk); + quota = tg_get_cfs_quota(tg); + period = tg_get_cfs_period(tg); + rcu_read_unlock(); + + if (quota == -1) { + /* Fallback to use cpuset.cpus if quota not set */ + goto cpuset_source; + } else { + /* period can't be 0 */ + cpus = (quota + period - 1) / period; + cpus = clamp(cpus, 1, (int)num_online_cpus()); + cpumask_clear(pmask); + for (i = 0; i < cpus; i++) + cpumask_set_cpu(i, pmask); + } + + return; + } + + /* cpu.shares source */ + if (sysctl_rich_container_cpuinfo_source == 2) { + unsigned long shares; + + rcu_read_lock(); + tg = task_tg(tsk); + shares = scale_load_down(tg->shares); + rcu_read_unlock(); + + /* sysctl_rich_container_cpuinfo_sharesbase can't be 0 */ + cpus = (shares + sysctl_rich_container_cpuinfo_sharesbase - 1) / + sysctl_rich_container_cpuinfo_sharesbase; + cpus = clamp(cpus, 1, (int)num_online_cpus()); + cpumask_clear(pmask); + for (i = 0; i < cpus; i++) + cpumask_set_cpu(i, pmask); + + return; + } + +cpuset_source: + /* cpuset.cpus source */ + cpuset_cpus_allowed(tsk, pmask); +} + bool child_cpuacct(struct task_struct *tsk) { struct cpuacct *ca = task_ca(tsk); @@ -1527,6 +1598,7 @@ bool check_rich_container(unsigned int cpu, unsigned int *index, bool *rich_container, unsigned int *total) { struct cpumask cpuset_allowed; + struct task_struct __maybe_unused *scenario; bool in_rich; int i, id = 0; @@ -1538,7 +1610,12 @@ bool check_rich_container(unsigned int cpu, unsigned int *index, *rich_container = true; - rich_container_get_cpuset_cpus(&cpuset_allowed); + read_lock(&tasklist_lock); + scenario = rich_container_get_scenario(); + get_task_struct(scenario); + read_unlock(&tasklist_lock); + rich_container_get_cpus(scenario, &cpuset_allowed); + put_task_struct(scenario); *total = cpumask_weight(&cpuset_allowed); if (cpumask_test_cpu(cpu, &cpuset_allowed)) { diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 663b66266845..82e1616838fa 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3617,4 +3617,7 @@ static inline bool async_load_calc_enabled(void) } #endif +long tg_get_cfs_quota(struct task_group *tg); +long tg_get_cfs_period(struct task_group *tg); + #endif /* _KERNEL_SCHED_SCHED_H */ diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 15aaa24bf595..777baff4d527 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2099,6 +2099,23 @@ static struct ctl_table kern_table[] = { .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, + { + .procname = "rich_container_cpuinfo_source", + .data = &sysctl_rich_container_cpuinfo_source, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_TWO, + }, + { + .procname = "rich_container_cpuinfo_sharesbase", + .data = &sysctl_rich_container_cpuinfo_sharesbase, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = proc_douintvec_minmax, + .extra1 = SYSCTL_TWO, + }, #endif { } }; -- Gitee From ba1d5001220ba82b736cebc72d142ce8ce7ca9dd Mon Sep 17 00:00:00 2001 From: Erwei Deng Date: Wed, 15 Dec 2021 10:10:54 +0800 Subject: [PATCH 0809/2138] anolis: Kconfig: RICH_CONTAINER should select SCHED_SLI. ANBZ: #8657 The SCHED_SLI should be setted automatically when RICH_CONTAINER is setted. If RICH_CONTAINER is opened and SCHED_SLI closed, it makes no sense to open the rich container. Signed-off-by: Erwei Deng Signed-off-by: Yi Tao --- init/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/init/Kconfig b/init/Kconfig index 41d0e2546ac0..065310947818 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1153,6 +1153,7 @@ config RICH_CONTAINER depends on CGROUP_CPUACCT depends on CFS_BANDWIDTH depends on CPUSETS + select SCHED_SLI default n help Make containers feel like VMs. Change the following interface -- Gitee From 1342783e901f59c3c484949d8d0d82807f86873a Mon Sep 17 00:00:00 2001 From: Yi Tao Date: Wed, 6 Sep 2023 12:06:24 +0800 Subject: [PATCH 0810/2138] anolis: sched/fair: Support sched_cfs_statistics in cgroup v2 ANBZ: #8657 Export the following cfs statistics of cgroups: cat cpu.sched_cfs_statistics [Serve time] [On CPU time] [Queue other time] [Queue sibling time] [Queue max time] Signed-off-by: Yi Tao --- kernel/sched/core.c | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 8dcf9fa66e24..f654ce6f33c2 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -11802,6 +11802,41 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of, } #endif +#ifdef CONFIG_SCHED_SLI +static int cpu_sched_cfs_show(struct seq_file *sf, void *v) +{ + struct task_group *tg = css_tg(seq_css(sf)); + struct sched_entity *se; + struct sched_statistics *stats; + int cpu; + u64 wait_max = 0, wait_sum = 0, wait_sum_other = 0, exec_sum = 0; + + if (!schedstat_enabled()) + goto out_show; + + rcu_read_lock(); + for_each_online_cpu(cpu) { + se = tg->se[cpu]; + if (!se) + continue; + stats = __schedstats_from_se(se); + exec_sum += schedstat_val(se->sum_exec_runtime); + wait_sum_other += + schedstat_val(stats->parent_wait_contrib); + wait_sum += schedstat_val(stats->wait_sum); + wait_max = max(wait_max, schedstat_val(stats->wait_max)); + } + rcu_read_unlock(); +out_show: + /* [Serve time] [On CPU time] [Queue other time] [Queue sibling time] [Queue max time] */ + seq_printf(sf, "%lld %lld %lld %lld %lld\n", + exec_sum + wait_sum, exec_sum, wait_sum_other, + wait_sum - wait_sum_other, wait_max); + + return 0; +} +#endif + static struct cftype cpu_files[] = { #ifdef CONFIG_FAIR_GROUP_SCHED { @@ -11863,6 +11898,12 @@ static struct cftype cpu_files[] = { .read_u64 = cpu_ht_ratio_read, .write_u64 = cpu_ht_ratio_write, }, +#endif +#ifdef CONFIG_SCHED_SLI + { + .name = "sched_cfs_statistics", + .seq_show = cpu_sched_cfs_show, + }, #endif { } /* terminate */ }; -- Gitee From 70f044b59fe3cfe1129dd4d483e99c287c0c4120 Mon Sep 17 00:00:00 2001 From: Yi Tao Date: Tue, 12 Sep 2023 17:03:19 +0800 Subject: [PATCH 0811/2138] anolis: sched, cpuacct: Move scheduling latency data from cpuacct to task_group ANBZ: #8657 Because cgroup v2 disables the cpuacct controller, to support retrieving scheduling latency data in cgroup v2, move the relevant structures from cpuacct to task_group. Signed-off-by: Yi Tao --- kernel/sched/core.c | 280 ++++++++++++++++++++++++++++++++++++- kernel/sched/cpuacct.c | 308 +---------------------------------------- kernel/sched/fair.c | 2 +- kernel/sched/sched.h | 61 +++++++- kernel/sched/stats.c | 2 +- 5 files changed, 339 insertions(+), 314 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f654ce6f33c2..890371afa034 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -10106,11 +10106,19 @@ int in_sched_functions(unsigned long addr) } #ifdef CONFIG_CGROUP_SCHED +#ifdef CONFIG_SCHED_SLI +static DEFINE_PER_CPU(struct sched_cgroup_lat_stat_cpu, root_lat_stat_cpu); +#endif + /* * Default task group. * Every task in system belongs to this group at bootup. */ -struct task_group root_task_group; +struct task_group root_task_group = { +#ifdef CONFIG_SCHED_SLI + .lat_stat_cpu = &root_lat_stat_cpu, +#endif +}; LIST_HEAD(task_groups); /* Cacheline aligned slab cache for task_group */ @@ -10584,6 +10592,12 @@ static void sched_free_group(struct task_group *tg) free_fair_sched_group(tg); free_rt_sched_group(tg); autogroup_free(tg); + +#ifdef CONFIG_SCHED_SLI + if (tg->lat_stat_cpu) + free_percpu(tg->lat_stat_cpu); +#endif + kmem_cache_free(task_group_cache, tg); } @@ -10618,6 +10632,12 @@ struct task_group *sched_create_group(struct task_group *parent) if (!alloc_rt_sched_group(tg, parent)) goto err; +#ifdef CONFIG_SCHED_SLI + tg->lat_stat_cpu = alloc_percpu(struct sched_cgroup_lat_stat_cpu); + if (!tg->lat_stat_cpu) + goto err; +#endif + alloc_uclamp_sched_group(tg, parent); #if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) @@ -11803,6 +11823,264 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of, #endif #ifdef CONFIG_SCHED_SLI +static DEFINE_STATIC_KEY_TRUE(cpu_no_sched_lat); +static int cpu_sched_lat_enabled_show(struct seq_file *m, void *v) +{ + seq_printf(m, "%d\n", !static_key_enabled(&cpu_no_sched_lat)); + return 0; +} + +static int cpu_sched_lat_enabled_open(struct inode *inode, + struct file *file) +{ + return single_open(file, cpu_sched_lat_enabled_show, NULL); +} + +static ssize_t cpu_sched_lat_enabled_write(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + char val = -1; + int ret = count; + + if (count < 1 || *ppos) { + ret = -EINVAL; + goto out; + } + + if (copy_from_user(&val, ubuf, 1)) { + ret = -EFAULT; + goto out; + } + + switch (val) { + case '0': + static_branch_enable(&cpu_no_sched_lat); + break; + case '1': + static_branch_disable(&cpu_no_sched_lat); + break; + default: + ret = -EINVAL; + } + +out: + return ret; +} + +static const struct proc_ops cpu_sched_lat_enabled_fops = { + .proc_open = cpu_sched_lat_enabled_open, + .proc_read = seq_read, + .proc_write = cpu_sched_lat_enabled_write, + .proc_lseek = seq_lseek, + .proc_release = single_release, +}; + +static int __init init_cpu_sched_lat_enabled(void) +{ + struct proc_dir_entry *ca_dir, *sched_lat_enabled_file; + + ca_dir = proc_mkdir("cpusli", NULL); + if (!ca_dir) + return -ENOMEM; + + sched_lat_enabled_file = proc_create("sched_lat_enabled", 0600, + ca_dir, &cpu_sched_lat_enabled_fops); + if (!sched_lat_enabled_file) { + remove_proc_entry("cpusli", NULL); + return -ENOMEM; + } + + return 0; +} +device_initcall(init_cpu_sched_lat_enabled); + +static inline enum sched_lat_count_t get_sched_lat_count_idx(u64 msecs) +{ + if (msecs < 1) + return SCHED_LAT_0_1; + if (msecs < 10) + return SCHED_LAT_0_1 + (msecs + 2) / 3; + if (msecs < 50) + return SCHED_LAT_7_10 + msecs / 10; + if (msecs < 100) + return SCHED_LAT_50_100; + if (msecs < 1000) + return SCHED_LAT_100_500 + (msecs / 500); + if (msecs < 10000) + return SCHED_LAT_1000_5000 + (msecs / 5000); + + return SCHED_LAT_10000_INF; +} + +struct task_group *cgroup_tg(struct cgroup *cgrp) +{ + return container_of(global_cgroup_css(cgrp, cpu_cgrp_id), + struct task_group, css); +} + +void task_cpu_update_block(struct task_struct *tsk, u64 runtime) +{ + int idx; + enum sched_lat_stat_item s; + struct task_group *tg; + unsigned int msecs; + + if (static_branch_likely(&cpu_no_sched_lat)) + return; + + rcu_read_lock(); + tg = css_tg(task_css(tsk, cpu_cgrp_id)); + if (!tg) { + rcu_read_unlock(); + return; + } + if (tsk->in_iowait) + s = SCHED_LAT_IOBLOCK; + else + s = SCHED_LAT_BLOCK; + + msecs = runtime >> 20; /* Proximately to speed up */ + idx = get_sched_lat_count_idx(msecs); + this_cpu_inc(tg->lat_stat_cpu->item[s][idx]); + this_cpu_inc(tg->lat_stat_cpu->item[s][SCHED_LAT_NR]); + this_cpu_add(tg->lat_stat_cpu->item[s][SCHED_LAT_TOTAL], runtime); + rcu_read_unlock(); +} + +void cpu_update_latency(struct sched_entity *se, u64 delta) +{ + int idx; + enum sched_lat_stat_item s; + unsigned int msecs; + struct task_group *tg; + + if (static_branch_likely(&cpu_no_sched_lat)) + return; + + rcu_read_lock(); + tg = se->cfs_rq->tg; + if (!tg) { + rcu_read_unlock(); + return; + } + if (entity_is_task(se)) + s = SCHED_LAT_WAIT; + else + s = SCHED_LAT_CGROUP_WAIT; + + msecs = delta >> 20; /* Proximately to speed up */ + idx = get_sched_lat_count_idx(msecs); + this_cpu_inc(tg->lat_stat_cpu->item[s][idx]); + this_cpu_inc(tg->lat_stat_cpu->item[s][SCHED_LAT_NR]); + this_cpu_add(tg->lat_stat_cpu->item[s][SCHED_LAT_TOTAL], delta); + rcu_read_unlock(); +} + +#define SCHED_LAT_STAT_SMP_WRITE(name, sidx) \ +static void smp_write_##name(void *info) \ +{ \ + struct task_group *tg = (struct task_group *)info; \ + int i; \ + \ + for (i = SCHED_LAT_0_1; i < SCHED_LAT_NR_COUNT; i++) \ + this_cpu_write(tg->lat_stat_cpu->item[sidx][i], 0); \ +} \ + +SCHED_LAT_STAT_SMP_WRITE(sched_wait_latency, SCHED_LAT_WAIT); +SCHED_LAT_STAT_SMP_WRITE(sched_wait_cgroup_latency, SCHED_LAT_CGROUP_WAIT); +SCHED_LAT_STAT_SMP_WRITE(sched_block_latency, SCHED_LAT_BLOCK); +SCHED_LAT_STAT_SMP_WRITE(sched_ioblock_latency, SCHED_LAT_IOBLOCK); + +smp_call_func_t smp_sched_lat_write_funcs[] = { + smp_write_sched_wait_latency, + smp_write_sched_block_latency, + smp_write_sched_ioblock_latency, + smp_write_sched_wait_cgroup_latency +}; + +int sched_lat_stat_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val) +{ + struct cgroup *cgrp = css->cgroup; + struct task_group *tg = cgroup_tg(cgrp); + enum sched_lat_stat_item idx = cft->private; + smp_call_func_t func = smp_sched_lat_write_funcs[idx]; + + if (unlikely(!tg)) { + WARN_ONCE(1, "cgroup \"cpu,cpuacct\" are not bound together"); + return -EOPNOTSUPP; + } + + if (val != 0) + return -EINVAL; + + func((void *)tg); + smp_call_function(func, (void *)tg, 1); + + return 0; +} + +static u64 sched_lat_stat_gather(struct task_group *tg, + enum sched_lat_stat_item sidx, + enum sched_lat_count_t cidx) +{ + u64 sum = 0; + int cpu; + + for_each_possible_cpu(cpu) + sum += per_cpu_ptr(tg->lat_stat_cpu, cpu)->item[sidx][cidx]; + + return sum; +} + +int sched_lat_stat_show(struct seq_file *sf, void *v) +{ + struct task_group *tg = cgroup_tg(seq_css(sf)->cgroup); + enum sched_lat_stat_item s = seq_cft(sf)->private; + + if (unlikely(!tg)) { + WARN_ONCE(1, "cgroup \"cpu,cpuacct\" are not bound together"); + return -EOPNOTSUPP; + } + + /* CFS scheduling latency cgroup and task histgrams */ + seq_printf(sf, "0-1ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_0_1)); + seq_printf(sf, "1-4ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_1_4)); + seq_printf(sf, "4-7ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_4_7)); + seq_printf(sf, "7-10ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_7_10)); + seq_printf(sf, "10-20ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_10_20)); + seq_printf(sf, "20-30ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_20_30)); + seq_printf(sf, "30-40ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_30_40)); + seq_printf(sf, "40-50ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_40_50)); + seq_printf(sf, "50-100ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_50_100)); + seq_printf(sf, "100-500ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_100_500)); + seq_printf(sf, "500-1000ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_500_1000)); + seq_printf(sf, "1000-5000ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_1000_5000)); + seq_printf(sf, "5000-10000ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_5000_10000)); + seq_printf(sf, ">=10000ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_10000_INF)); + seq_printf(sf, "total(ms): \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_TOTAL) / 1000000); + seq_printf(sf, "nr: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_NR)); + + return 0; +} + static int cpu_sched_cfs_show(struct seq_file *sf, void *v) { struct task_group *tg = css_tg(seq_css(sf)); diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 71abcc0b79c5..6d87a617d00e 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -32,69 +32,6 @@ struct cpuacct_alistats { } ____cacheline_aligned; #endif -enum sched_lat_stat_item { - SCHED_LAT_WAIT, - SCHED_LAT_BLOCK, - SCHED_LAT_IOBLOCK, - SCHED_LAT_CGROUP_WAIT, - SCHED_LAT_NR_STAT -}; - -/* - * [0, 1ms) - * [1, 4ms) - * [4, 7ms) - * [7, 10ms) - * [10, 100ms) - * [100, 500ms) - * [500, 1000ms) - * [1000, 5000ms) - * [5000, 10000ms) - * [10000ms, INF) - * total(ms) - */ -/* Scheduler latency histogram distribution, in milliseconds */ -enum sched_lat_count_t { - SCHED_LAT_0_1, - SCHED_LAT_1_4, - SCHED_LAT_4_7, - SCHED_LAT_7_10, - SCHED_LAT_10_20, - SCHED_LAT_20_30, - SCHED_LAT_30_40, - SCHED_LAT_40_50, - SCHED_LAT_50_100, - SCHED_LAT_100_500, - SCHED_LAT_500_1000, - SCHED_LAT_1000_5000, - SCHED_LAT_5000_10000, - SCHED_LAT_10000_INF, - SCHED_LAT_TOTAL, - SCHED_LAT_NR, - SCHED_LAT_NR_COUNT, -}; - -struct sched_cgroup_lat_stat_cpu { - unsigned long item[SCHED_LAT_NR_STAT][SCHED_LAT_NR_COUNT]; -}; - -static inline enum sched_lat_count_t get_sched_lat_count_idx(u64 msecs) -{ - if (msecs < 1) - return SCHED_LAT_0_1; - if (msecs < 10) - return SCHED_LAT_0_1 + (msecs + 2) / 3; - if (msecs < 50) - return SCHED_LAT_7_10 + msecs / 10; - if (msecs < 100) - return SCHED_LAT_50_100; - if (msecs < 1000) - return SCHED_LAT_100_500 + (msecs / 500); - if (msecs < 10000) - return SCHED_LAT_1000_5000 + (msecs / 5000); - - return SCHED_LAT_10000_INF; -} /* track CPU usage of a group of tasks and its child groups */ struct cpuacct { @@ -105,7 +42,6 @@ struct cpuacct { struct kernel_cpustat __percpu *cpustat; #ifdef CONFIG_SCHED_SLI struct cpuacct_alistats __percpu *alistats; - struct sched_cgroup_lat_stat_cpu __percpu *lat_stat_cpu; struct list_head sli_list; bool sli_enabled; u64 next_load_update; @@ -142,7 +78,6 @@ static DEFINE_PER_CPU(u64, root_cpuacct_cpuusage); static DEFINE_PER_CPU(struct cpuacct_prev_cputime, root_cpuacct_prev_cputime); #ifdef CONFIG_SCHED_SLI static DEFINE_PER_CPU(struct cpuacct_alistats, root_alistats); -static DEFINE_PER_CPU(struct sched_cgroup_lat_stat_cpu, root_lat_stat_cpu); #endif static struct cpuacct root_cpuacct = { @@ -151,82 +86,10 @@ static struct cpuacct root_cpuacct = { .cpuusage = &root_cpuacct_cpuusage, #ifdef CONFIG_SCHED_SLI .alistats = &root_alistats, - .lat_stat_cpu = &root_lat_stat_cpu, #endif }; #ifdef CONFIG_SCHED_SLI -static DEFINE_STATIC_KEY_TRUE(cpuacct_no_sched_lat); -static int cpuacct_sched_lat_enabled_show(struct seq_file *m, void *v) -{ - seq_printf(m, "%d\n", !static_key_enabled(&cpuacct_no_sched_lat)); - return 0; -} - -static int cpuacct_sched_lat_enabled_open(struct inode *inode, - struct file *file) -{ - return single_open(file, cpuacct_sched_lat_enabled_show, NULL); -} - -static ssize_t cpuacct_sched_lat_enabled_write(struct file *file, - const char __user *ubuf, - size_t count, loff_t *ppos) -{ - char val = -1; - int ret = count; - - if (count < 1 || *ppos) { - ret = -EINVAL; - goto out; - } - - if (copy_from_user(&val, ubuf, 1)) { - ret = -EFAULT; - goto out; - } - - switch (val) { - case '0': - static_branch_enable(&cpuacct_no_sched_lat); - break; - case '1': - static_branch_disable(&cpuacct_no_sched_lat); - break; - default: - ret = -EINVAL; - } - -out: - return ret; -} - -static const struct proc_ops cpuacct_sched_lat_enabled_fops = { - .proc_open = cpuacct_sched_lat_enabled_open, - .proc_read = seq_read, - .proc_write = cpuacct_sched_lat_enabled_write, - .proc_lseek = seq_lseek, - .proc_release = single_release, -}; - -static int __init init_cpuacct_sched_lat_enabled(void) -{ - struct proc_dir_entry *ca_dir, *sched_lat_enabled_file; - - ca_dir = proc_mkdir("cpusli", NULL); - if (!ca_dir) - return -ENOMEM; - - sched_lat_enabled_file = proc_create("sched_lat_enabled", 0600, - ca_dir, &cpuacct_sched_lat_enabled_fops); - if (!sched_lat_enabled_file) { - remove_proc_entry("cpusli", NULL); - return -ENOMEM; - } - - return 0; -} -device_initcall(init_cpuacct_sched_lat_enabled); void task_ca_increase_nr_migrations(struct task_struct *tsk) { @@ -239,68 +102,6 @@ void task_ca_increase_nr_migrations(struct task_struct *tsk) rcu_read_unlock(); } -void task_ca_update_block(struct task_struct *tsk, u64 runtime) -{ - int idx; - enum sched_lat_stat_item s; - struct cpuacct *ca; - unsigned int msecs; - - if (static_branch_likely(&cpuacct_no_sched_lat)) - return; - - rcu_read_lock(); - ca = task_ca(tsk); - if (!ca) { - rcu_read_unlock(); - return; - } - if (tsk->in_iowait) - s = SCHED_LAT_IOBLOCK; - else - s = SCHED_LAT_BLOCK; - - msecs = runtime >> 20; /* Proximately to speed up */ - idx = get_sched_lat_count_idx(msecs); - this_cpu_inc(ca->lat_stat_cpu->item[s][idx]); - this_cpu_inc(ca->lat_stat_cpu->item[s][SCHED_LAT_NR]); - this_cpu_add(ca->lat_stat_cpu->item[s][SCHED_LAT_TOTAL], runtime); - rcu_read_unlock(); -} - -void cpuacct_update_latency(struct sched_entity *se, u64 delta) -{ - int idx; - enum sched_lat_stat_item s; - struct cpuacct *ca; - unsigned int msecs; - struct task_group *tg; - - if (static_branch_likely(&cpuacct_no_sched_lat)) - return; - - tg = se->cfs_rq->tg; - if (task_group_is_autogroup(tg)) - return; - - rcu_read_lock(); - ca = cgroup_ca(tg->css.cgroup); - if (!ca) { - rcu_read_unlock(); - return; - } - if (entity_is_task(se)) - s = SCHED_LAT_WAIT; - else - s = SCHED_LAT_CGROUP_WAIT; - - msecs = delta >> 20; /* Proximately to speed up */ - idx = get_sched_lat_count_idx(msecs); - this_cpu_inc(ca->lat_stat_cpu->item[s][idx]); - this_cpu_inc(ca->lat_stat_cpu->item[s][SCHED_LAT_NR]); - this_cpu_add(ca->lat_stat_cpu->item[s][SCHED_LAT_TOTAL], delta); - rcu_read_unlock(); -} #endif #ifdef CONFIG_SCHED_SLI @@ -381,16 +182,11 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) goto out_free_cpustat; #ifdef CONFIG_SCHED_SLI - ca->lat_stat_cpu = alloc_percpu(struct sched_cgroup_lat_stat_cpu); - if (!ca->lat_stat_cpu) - goto out_free_pre_cputime; - - INIT_LIST_HEAD(&ca->sli_list); ca->alistats = alloc_percpu(struct cpuacct_alistats); if (!ca->alistats) - goto out_free_lat_stat_cpu; + goto out_free_pre_cputime; #endif for_each_possible_cpu(i) { @@ -407,8 +203,6 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) return &ca->css; #ifdef CONFIG_SCHED_SLI -out_free_lat_stat_cpu: - free_percpu(ca->lat_stat_cpu); out_free_pre_cputime: free_percpu(ca->prev_cputime); #endif @@ -439,7 +233,6 @@ static void cpuacct_css_free(struct cgroup_subsys_state *css) free_percpu(ca->cpuusage); #ifdef CONFIG_SCHED_SLI free_percpu(ca->alistats); - free_percpu(ca->lat_stat_cpu); #endif kfree(ca); } @@ -664,12 +457,6 @@ static void __get_cgroup_avenrun(struct cpuacct *ca, unsigned long *loads, loads[2] = (avenrun[2] + offset) << shift; } -static inline struct task_group *cgroup_tg(struct cgroup *cgrp) -{ - return container_of(global_cgroup_css(cgrp, cpu_cgrp_id), - struct task_group, css); -} - static inline unsigned long nr_uninterruptible(void) { unsigned long i, sum = 0; @@ -1135,99 +922,6 @@ static int cpuacct_sched_cfs_show(struct seq_file *sf, void *v) return 0; } - -#define SCHED_LAT_STAT_SMP_WRITE(name, sidx) \ -static void smp_write_##name(void *info) \ -{ \ - struct cpuacct *ca = (struct cpuacct *)info; \ - int i; \ - \ - for (i = SCHED_LAT_0_1; i < SCHED_LAT_NR_COUNT; i++) \ - this_cpu_write(ca->lat_stat_cpu->item[sidx][i], 0); \ -} \ - -SCHED_LAT_STAT_SMP_WRITE(sched_wait_latency, SCHED_LAT_WAIT); -SCHED_LAT_STAT_SMP_WRITE(sched_wait_cgroup_latency, SCHED_LAT_CGROUP_WAIT); -SCHED_LAT_STAT_SMP_WRITE(sched_block_latency, SCHED_LAT_BLOCK); -SCHED_LAT_STAT_SMP_WRITE(sched_ioblock_latency, SCHED_LAT_IOBLOCK); - -smp_call_func_t smp_sched_lat_write_funcs[] = { - smp_write_sched_wait_latency, - smp_write_sched_wait_cgroup_latency, - smp_write_sched_block_latency, - smp_write_sched_ioblock_latency -}; - -static int sched_lat_stat_write(struct cgroup_subsys_state *css, - struct cftype *cft, u64 val) -{ - struct cpuacct *ca = css_ca(css); - enum sched_lat_stat_item idx = cft->private; - smp_call_func_t func = smp_sched_lat_write_funcs[idx]; - - if (val != 0) - return -EINVAL; - - func((void *)ca); - smp_call_function(func, (void *)ca, 1); - - return 0; -} - -static u64 sched_lat_stat_gather(struct cpuacct *ca, - enum sched_lat_stat_item sidx, - enum sched_lat_count_t cidx) -{ - u64 sum = 0; - int cpu; - - for_each_possible_cpu(cpu) - sum += per_cpu_ptr(ca->lat_stat_cpu, cpu)->item[sidx][cidx]; - - return sum; -} - -static int sched_lat_stat_show(struct seq_file *sf, void *v) -{ - struct cpuacct *ca = css_ca(seq_css(sf)); - enum sched_lat_stat_item s = seq_cft(sf)->private; - - /* CFS scheduling latency cgroup and task histgrams */ - seq_printf(sf, "0-1ms: \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_0_1)); - seq_printf(sf, "1-4ms: \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_1_4)); - seq_printf(sf, "4-7ms: \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_4_7)); - seq_printf(sf, "7-10ms: \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_7_10)); - seq_printf(sf, "10-20ms: \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_10_20)); - seq_printf(sf, "20-30ms: \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_20_30)); - seq_printf(sf, "30-40ms: \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_30_40)); - seq_printf(sf, "40-50ms: \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_40_50)); - seq_printf(sf, "50-100ms: \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_50_100)); - seq_printf(sf, "100-500ms: \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_100_500)); - seq_printf(sf, "500-1000ms: \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_500_1000)); - seq_printf(sf, "1000-5000ms: \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_1000_5000)); - seq_printf(sf, "5000-10000ms: \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_5000_10000)); - seq_printf(sf, ">=10000ms: \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_10000_INF)); - seq_printf(sf, "total(ms): \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_TOTAL) / 1000000); - seq_printf(sf, "nr: \t%llu\n", - sched_lat_stat_gather(ca, s, SCHED_LAT_NR)); - - return 0; -} #endif static struct cftype files[] = { diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 62cd0ef8ed8d..f64862a1978d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1306,7 +1306,7 @@ update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) if (entity_is_task(se)) p = task_of(se); - cpuacct_update_latency(se, delta); + cpu_update_latency(se, delta); __update_stats_wait_end(rq_of(cfs_rq), p, stats); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 82e1616838fa..c6d22cef8dd6 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -348,6 +348,52 @@ struct rt_rq; extern struct list_head task_groups; +enum sched_lat_stat_item { + SCHED_LAT_WAIT, + SCHED_LAT_BLOCK, + SCHED_LAT_IOBLOCK, + SCHED_LAT_CGROUP_WAIT, + SCHED_LAT_NR_STAT +}; + +/* + * [0, 1ms) + * [1, 4ms) + * [4, 7ms) + * [7, 10ms) + * [10, 100ms) + * [100, 500ms) + * [500, 1000ms) + * [1000, 5000ms) + * [5000, 10000ms) + * [10000ms, INF) + * total(ms) + */ +/* Scheduler latency histogram distribution, in milliseconds */ +enum sched_lat_count_t { + SCHED_LAT_0_1, + SCHED_LAT_1_4, + SCHED_LAT_4_7, + SCHED_LAT_7_10, + SCHED_LAT_10_20, + SCHED_LAT_20_30, + SCHED_LAT_30_40, + SCHED_LAT_40_50, + SCHED_LAT_50_100, + SCHED_LAT_100_500, + SCHED_LAT_500_1000, + SCHED_LAT_1000_5000, + SCHED_LAT_5000_10000, + SCHED_LAT_10000_INF, + SCHED_LAT_TOTAL, + SCHED_LAT_NR, + SCHED_LAT_NR_COUNT, +}; + +struct sched_cgroup_lat_stat_cpu { + unsigned long item[SCHED_LAT_NR_STAT][SCHED_LAT_NR_COUNT]; +}; + struct cfs_bandwidth { #ifdef CONFIG_CFS_BANDWIDTH raw_spinlock_t lock; @@ -434,6 +480,9 @@ struct task_group { unsigned int ht_ratio; #endif +#ifdef CONFIG_SCHED_SLI + struct sched_cgroup_lat_stat_cpu __percpu *lat_stat_cpu; +#endif }; #ifdef CONFIG_FAIR_GROUP_SCHED @@ -3600,15 +3649,19 @@ extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se); extern u64 get_idle_time(struct kernel_cpustat *kcs, int cpu); extern u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu); extern void task_ca_increase_nr_migrations(struct task_struct *tsk); -void cpuacct_update_latency(struct sched_entity *se, u64 delta); -void task_ca_update_block(struct task_struct *tsk, u64 runtime); +void cpu_update_latency(struct sched_entity *se, u64 delta); +void task_cpu_update_block(struct task_struct *tsk, u64 runtime); void calc_cgroup_load(void); bool async_load_calc_enabled(void); +struct task_group *cgroup_tg(struct cgroup *cgrp); +int sched_lat_stat_show(struct seq_file *sf, void *v); +int sched_lat_stat_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val); #else static inline void task_ca_increase_nr_migrations(struct task_struct *tsk) { } -static inline void cpuacct_update_latency(struct sched_entity *se, +static inline void cpu_update_latency(struct sched_entity *se, u64 delta) { } -static inline void task_ca_update_block(struct task_struct *tsk, +static inline void task_cpu_update_block(struct task_struct *tsk, u64 runtime) { } static inline void calc_cgroup_load(void) { } static inline bool async_load_calc_enabled(void) diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c index e2214965f26c..6fd147039ebd 100644 --- a/kernel/sched/stats.c +++ b/kernel/sched/stats.c @@ -85,7 +85,7 @@ void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p, if (p) { if (p->in_iowait) { - task_ca_update_block(p, delta); + task_cpu_update_block(p, delta); __schedstat_add(stats->iowait_sum, delta); __schedstat_inc(stats->iowait_count); trace_sched_stat_iowait(p, delta); -- Gitee From dcf06decee086a082d018ee698281bca182b9300 Mon Sep 17 00:00:00 2001 From: Yi Tao Date: Tue, 12 Sep 2023 17:39:56 +0800 Subject: [PATCH 0812/2138] anolis: sched: Support latency histograms in cpu controller ANBZ: #8657 Export latency histograms in cpu controller. The relevant interface names are as follows: cpu.wait_latency cpu.cgroup_wait_latency cpu.block_latency cpu.ioblock_latency Signed-off-by: Yi Tao --- kernel/sched/core.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 890371afa034..7b76f57ee83a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -12182,6 +12182,30 @@ static struct cftype cpu_files[] = { .name = "sched_cfs_statistics", .seq_show = cpu_sched_cfs_show, }, + { + .name = "wait_latency", + .private = SCHED_LAT_WAIT, + .write_u64 = sched_lat_stat_write, + .seq_show = sched_lat_stat_show + }, + { + .name = "cgroup_wait_latency", + .private = SCHED_LAT_CGROUP_WAIT, + .write_u64 = sched_lat_stat_write, + .seq_show = sched_lat_stat_show + }, + { + .name = "block_latency", + .private = SCHED_LAT_BLOCK, + .write_u64 = sched_lat_stat_write, + .seq_show = sched_lat_stat_show + }, + { + .name = "ioblock_latency", + .private = SCHED_LAT_IOBLOCK, + .write_u64 = sched_lat_stat_write, + .seq_show = sched_lat_stat_show + }, #endif { } /* terminate */ }; -- Gitee From 36c271ba6cd774be6a2103f43199d37101a113d1 Mon Sep 17 00:00:00 2001 From: "Carrie.Cai" Date: Sun, 7 Apr 2024 15:51:32 +0800 Subject: [PATCH 0813/2138] anolis: crypto:update Mont-TSSE driver for new function ANBZ: #8710 Mont-TSSE(TM) is a high speed crypto algorithm accelerator, it support SM2/3/4, AES and SHA algorithms. The new function support users to update firmware file by sysfs.Besides, the coding standard problems have been fixed in Mont-TSSE driver. Performance: SM4-CBC: 179Gbps; SM3-HASH:195Gbps; SM2-SIGN:955223ops;SM2-VERIFY:345385ops; SM2-ENC:207253ops; SM2-DEC:239520ops Signed-off-by: Carrie.Cai Reviewed-by: Tianjia Zhang Link: https://gitee.com/anolis/cloud-kernel/pulls/3014 --- drivers/crypto/montage/tsse/tsse_dev.h | 3 +- drivers/crypto/montage/tsse/tsse_dev_drv.c | 65 +++++++- drivers/crypto/montage/tsse/tsse_dev_mgr.c | 30 +--- drivers/crypto/montage/tsse/tsse_fw_service.c | 103 ++++++------ drivers/crypto/montage/tsse/tsse_fw_service.h | 6 +- drivers/crypto/montage/tsse/tsse_ipc.c | 153 +++++++++--------- drivers/crypto/montage/tsse/tsse_ipc.h | 15 +- drivers/crypto/montage/tsse/tsse_service.c | 8 +- 8 files changed, 202 insertions(+), 181 deletions(-) diff --git a/drivers/crypto/montage/tsse/tsse_dev.h b/drivers/crypto/montage/tsse/tsse_dev.h index d1dafee61300..c16d2ae7c414 100644 --- a/drivers/crypto/montage/tsse/tsse_dev.h +++ b/drivers/crypto/montage/tsse/tsse_dev.h @@ -18,6 +18,7 @@ #define TSSE_PCI_MAX_BARS 4 #define TSSE_FW_VERSION_LEN 32 + struct tsse_bar { void __iomem *virt_addr; resource_size_t addr; @@ -58,6 +59,7 @@ struct tsse_dev { void *mbx_hw; const struct firmware *fw; char fw_version[TSSE_FW_VERSION_LEN]; + bool fw_version_exist; }; #define TSSEDEV_TO_DEV(tssedev) (&((tssedev)->tsse_pci_dev.pci_dev->dev)) #define TSSE_DEV_BARS(tssedev) ((tssedev)->tsse_pci_dev.bars) @@ -72,7 +74,6 @@ int tsse_devmgr_add_dev(struct tsse_dev *tsse_dev); void tsse_devmgr_rm_dev(struct tsse_dev *tdev); int tsse_prepare_restart_dev(struct tsse_dev *tdev); int tsse_start_dev(struct tsse_dev *tdev); -struct tsse_dev *get_tssedev(int id); static inline struct tsse_dev *pci_to_tsse_dev(struct pci_dev *pci_dev) { diff --git a/drivers/crypto/montage/tsse/tsse_dev_drv.c b/drivers/crypto/montage/tsse/tsse_dev_drv.c index 9e914576a129..86c619d64f5e 100644 --- a/drivers/crypto/montage/tsse/tsse_dev_drv.c +++ b/drivers/crypto/montage/tsse/tsse_dev_drv.c @@ -11,18 +11,25 @@ #include #include #include +#include #include "tsse_dev_drv.h" #include "tsse_vuart.h" #include "tsse_ipc.h" #include "tsse_fw_service.h" +#define CLUSTER_SLOT_CONFIG_OFFSET 0x5780000 +#define QPAIR_SETTING_OFFSET 0x50000 +#define BAR_START 2 +#define BAR_END 4 + static DEFINE_IDA(tsse_ida); static inline void tsse_qpair_enable_pf(struct tsse_dev *tdev, bool enable) { writel(enable ? 1 : 0, - TSSE_DEV_BARS(tdev)[2].virt_addr + 0x5780000 + 0x50000); + TSSE_DEV_BARS(tdev)[2].virt_addr + + CLUSTER_SLOT_CONFIG_OFFSET + QPAIR_SETTING_OFFSET); } static int tsse_sriov_disable(struct tsse_dev *tdev) { @@ -107,6 +114,40 @@ static int tsse_sriov_configure(struct pci_dev *pdev, int num_vfs_param) return num_vfs_param; } +/** + * tsse_image_load_store() - This function will be called when user + * writes string to /sys/bus/pci/devices/.../tsse_image_load. + * Driver will always loads /lib/firmware/tsse_firmware.bin. + * @dev: device + * @attr: device attribute + * @buf: string that user writes + * @count: string length that user writes + * Return: the number of bytes used from the buffer, here it is just the count argument. + */ +static ssize_t tsse_image_load_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct pci_dev *pdev = NULL; + struct tsse_dev *tdev = NULL; + + pdev = container_of(dev, struct pci_dev, dev); + if (pdev) + tdev = pci_to_tsse_dev(pdev); + if (buf && count && tdev) { + tsse_dev_info(tdev, "receive command to load firmware %s\n", TSSE_FIRMWARE); + if (!tsse_fw_load(pdev, TSSE_FIRMWARE, &tdev->fw)) { + if (!get_firmware_version(tdev->fw, tdev->fw_version)) + tdev->fw_version_exist = true; + if (tsse_fw_manual_load_ipc(pdev)) + dev_err(&pdev->dev, "%s %d: firmware update failed\n", + __func__, __LINE__); + } + } + return count; +} + +DEVICE_ATTR_WO(tsse_image_load); + static int device_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int status = 0; @@ -163,7 +204,7 @@ static int device_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto out_err; } - for (bar = 2; bar < 4;) { + for (bar = BAR_START; bar < BAR_END;) { TSSE_DEV_BARS(tdev)[bar].addr = pci_resource_start(pdev, bar); TSSE_DEV_BARS(tdev)[bar].size = pci_resource_len(pdev, bar); TSSE_DEV_BARS(tdev) @@ -219,9 +260,13 @@ static int device_probe(struct pci_dev *pdev, const struct pci_device_id *id) status = -EFAULT; goto out_err_port_init; } + + tdev->fw_version_exist = false; /* Its result not break driver init process */ - if (!tsse_fw_load(pdev)) - get_firmware_version((char *)tdev->fw->data, tdev->fw->size, tdev->fw_version); + if (!tsse_fw_load(pdev, TSSE_FIRMWARE, &tdev->fw)) { + if (!get_firmware_version(tdev->fw, tdev->fw_version)) + tdev->fw_version_exist = true; + } if (tsse_ipc_init(pdev)) { dev_err(&pdev->dev, @@ -231,13 +276,22 @@ static int device_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto out_err_ipc; } + if (sysfs_create_file(&pdev->dev.kobj, &dev_attr_tsse_image_load.attr)) { + dev_err(&pdev->dev, + "%s %d: sysfs_create_file failed for tsse image load.\n", + __func__, __LINE__); + status = -EFAULT; + goto out_err_image_load; + } + tsse_dev_info(tdev, "successful\n"); pci_read_config_dword(pdev, 0x720, &tmp_val); tsse_dev_dbg(tdev, "the value of FILTER_MASK_2_REG is 0x%x\n", tmp_val); return 0; - +out_err_image_load: + tsse_ipc_deinit(tdev); out_err_ipc: vuart_uninit_port(pdev); out_err_port_init: @@ -261,6 +315,7 @@ static void device_remove(struct pci_dev *pdev) release_firmware(tdev->fw); tdev->fw = NULL; } + sysfs_remove_file(&pdev->dev.kobj, &dev_attr_tsse_image_load.attr); tsse_ipc_deinit(tdev); vuart_uninit_port(pdev); tsse_devmgr_rm_dev(tdev); diff --git a/drivers/crypto/montage/tsse/tsse_dev_mgr.c b/drivers/crypto/montage/tsse/tsse_dev_mgr.c index 159f75c8f46f..39553eb96832 100644 --- a/drivers/crypto/montage/tsse/tsse_dev_mgr.c +++ b/drivers/crypto/montage/tsse/tsse_dev_mgr.c @@ -34,12 +34,6 @@ static inline void tsse_list_add(struct list_head *new, struct list_head *prev, WRITE_ONCE(prev->next, new); } -static inline void tsse_list_add_tail(struct list_head *new, - struct list_head *head) -{ - tsse_list_add(new, head->prev, head); -} - static int tsse_dev_pf_get(struct tsse_dev *vf_tsse_dev) { int ret = 0; @@ -104,7 +98,7 @@ void tsse_dev_put(struct tsse_dev *tdev) } } -int tsse_stop_dev(struct tsse_dev *tdev, bool busy_exit) +static int tsse_stop_dev(struct tsse_dev *tdev, bool busy_exit) { int times, max_retry = 150; @@ -172,13 +166,11 @@ int tsse_start_dev(struct tsse_dev *tdev) clear_bit(TSSE_DEV_STATUS_STARTED, &tdev->status); return ret; } -EXPORT_SYMBOL_GPL(tsse_start_dev); int tsse_prepare_restart_dev(struct tsse_dev *tdev) { return tsse_stop_dev(tdev, false); } -EXPORT_SYMBOL_GPL(tsse_prepare_restart_dev); void tsse_devmgr_rm_dev(struct tsse_dev *tdev) { @@ -186,7 +178,6 @@ void tsse_devmgr_rm_dev(struct tsse_dev *tdev) tsse_dev_free_irq_vectors(tdev); msleep(300); } -EXPORT_SYMBOL_GPL(tsse_devmgr_rm_dev); int tsse_devmgr_add_dev(struct tsse_dev *tdev) { @@ -203,27 +194,8 @@ int tsse_devmgr_add_dev(struct tsse_dev *tdev) } return ret; } -EXPORT_SYMBOL_GPL(tsse_devmgr_add_dev); struct list_head *tsse_devmgr_get_head(void) { return &tsse_dev_table; } - -struct tsse_dev *get_tssedev(int id) -{ - struct list_head *itr; - struct tsse_dev *ptr; - - mutex_lock(&tsse_dev_table_lock); - - list_for_each(itr, &tsse_dev_table) { - ptr = list_entry(itr, struct tsse_dev, list); - break; - } - - mutex_unlock(&tsse_dev_table_lock); - - return ptr; -} -EXPORT_SYMBOL_GPL(get_tssedev); diff --git a/drivers/crypto/montage/tsse/tsse_fw_service.c b/drivers/crypto/montage/tsse/tsse_fw_service.c index fc3907a7c503..486352bc8f84 100644 --- a/drivers/crypto/montage/tsse/tsse_fw_service.c +++ b/drivers/crypto/montage/tsse/tsse_fw_service.c @@ -21,25 +21,28 @@ #include "tsse_service.h" #define SEARCH_PATTERN "MT_CFG_BUILD_VERSION_DETAIL" -#define SEARCH_PATTERN_LEN 28 +#define SPACE_CH ' ' -int fw_send_msg(struct tsse_ipc *tsseipc, struct ipc_msg *msg) +static int fw_send_msg(struct tsse_ipc *tsseipc, struct ipc_msg *msg) { u8 *h2d; u32 int_reg; - u32 rc; mutex_lock(&tsseipc->list_lock); int_reg = readl(tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); if ((int_reg & IPC_REGISTER_INT_SET) != 0) { - rc = -1; mutex_unlock(&tsseipc->list_lock); - return rc; + return -EFAULT; + } + if (msg->header.i_len < sizeof(struct ipc_header) + + sizeof(struct msg_info) + sizeof(struct fw_load)) { + dev_err(tsseipc->dev, "msg format error\n"); + return -EFAULT; } h2d = (u8 *)(tsseipc->virt_addr + HOST2MAIN_IPC_OFFSET); memcpy_toio(h2d, msg, sizeof(struct ipc_header)); - memcpy_toio(h2d + sizeof(struct ipc_header), (u32 *)msg->i_data, + memcpy_toio(h2d + sizeof(struct ipc_header), (u8 *)msg->i_data, msg->header.i_len - sizeof(struct ipc_header)); writel(0x1, tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); @@ -48,41 +51,30 @@ int fw_send_msg(struct tsse_ipc *tsseipc, struct ipc_msg *msg) return 0; } -void fw_free(void *msg_t) -{ - struct tsse_msg *tssemsg; - struct ipc_msg *payload; - - payload = (struct ipc_msg *)msg_t; - tssemsg = container_of(payload, struct tsse_msg, ipc_payload); - - kvfree(tssemsg); -} - -int get_firmware_version(char *fw_buffer, uint32_t buffer_len, char *fw_version) +/** + * get_firmware_version() - Get version information from firmware + * @fw: firmware pointer + * @fw_version_out: firmware version string output + * Return: 0 on success, error code otherwise + */ +int get_firmware_version(const struct firmware *fw, char *fw_version_out) { - char *pattern; - char *space_ch = " "; + const char *pattern = SEARCH_PATTERN; + const uint8_t *fw_buffer = fw->data; uint32_t pattern_i = 0, buffer_i = 0; - uint32_t pattern_len = SEARCH_PATTERN_LEN - 1; // Not include "\0" + uint32_t pattern_len = strlen(pattern); // Not include "\0" uint32_t version_start = 0; uint32_t version_len = 0; - pattern = kzalloc(SEARCH_PATTERN_LEN, GFP_KERNEL); - if (!pattern) - return -1; - - snprintf(pattern, SEARCH_PATTERN_LEN, SEARCH_PATTERN); - - while (buffer_i < buffer_len) { - if (pattern[pattern_i] == fw_buffer[buffer_i]) { + while (buffer_i < fw->size) { + if (pattern[pattern_i] == (char) fw_buffer[buffer_i]) { buffer_i++; pattern_i++; } if (pattern_i == pattern_len) { break; // pattern found - } else if ((buffer_i < buffer_len) && - (pattern[pattern_i] != fw_buffer[buffer_i])) { + } else if ((buffer_i < fw->size) && + (pattern[pattern_i] != (char) fw_buffer[buffer_i])) { // mismatch after pattern_i matches if (pattern_i != 0) { // since the pattern has no common prefix, when mismatch, @@ -93,22 +85,29 @@ int get_firmware_version(char *fw_buffer, uint32_t buffer_len, char *fw_version) } } } - kfree(pattern); if (pattern_i == pattern_len) { buffer_i++; version_start = buffer_i; - while (buffer_i < buffer_len) { - if (fw_buffer[buffer_i] == space_ch[0]) { + while (buffer_i < fw->size) { + if (fw_buffer[buffer_i] == SPACE_CH) { version_len = buffer_i - version_start; - strscpy(fw_version, fw_buffer + version_start, version_len + 1); + if (version_len >= TSSE_FW_VERSION_LEN - 1) + version_len = TSSE_FW_VERSION_LEN - 2; + strscpy(fw_version_out, fw_buffer + version_start, version_len + 1); return 0; } buffer_i++; } } - return -1; + return -EINVAL; } +/** + * fw_service() - Firmware service to handle IPC message from mainCPU. + * It will write init or manual load firmware to PCIe BAR and send message back. + * @tsseipc_t: pointer to a structure used for IPC + * @msg_t: pointer to IPC message + */ void fw_service(void *tsseipc_t, void *msg_t) { void __iomem *fw; @@ -120,16 +119,15 @@ void fw_service(void *tsseipc_t, void *msg_t) struct ipc_msg *msg = (struct ipc_msg *)msg_t; task_offset = sizeof(struct msg_info); - fw_task = (struct fw_load *)(msg->i_data + - task_offset / sizeof(uint32_t)); - + fw_task = (struct fw_load *)((uint8_t *)msg->i_data + task_offset); tdev = pci_to_tsse_dev(tsseipc->pdev); + if (!tdev || !tdev->fw) { fw_task->result = 1; fw_task->size = 0; dev_info(tsseipc->dev, "firmware loading failed\n"); - fw_send_msg(tsseipc, msg); - fw_free(msg); + if (fw_send_msg(tsseipc, msg)) + dev_err(tsseipc->dev, "notify device failed\n"); return; } @@ -140,24 +138,33 @@ void fw_service(void *tsseipc_t, void *msg_t) memcpy_toio((u8 *)fw, tdev->fw->data, size); dev_info(tsseipc->dev, "firmware loading done\n"); - fw_send_msg(tsseipc, msg); - fw_free(msg); + if (fw_send_msg(tsseipc, msg)) + dev_err(tsseipc->dev, "notify device failed\n"); - dev_info(tsseipc->dev, "firmware version: %s\n", tdev->fw_version); + if (tdev->fw_version_exist) + dev_info(tsseipc->dev, "firmware version: %s\n", tdev->fw_version); if (tdev->fw) { release_firmware(tdev->fw); tdev->fw = NULL; + memset(tdev->fw_version, 0, TSSE_FW_VERSION_LEN); + tdev->fw_version_exist = false; } } -int tsse_fw_load(struct pci_dev *pdev) +/** + * tsse_fw_load() - Load firmware from /lib/firmware + * @pdev: pci device + * @name: firmware file name + * @fw: pointer to firmware pointer + * Return: 0 on success, error code otherwise + */ +int tsse_fw_load(struct pci_dev *pdev, const char *name, const struct firmware **fw) { int result; - struct tsse_dev *tdev = pci_to_tsse_dev(pdev); - result = request_firmware(&tdev->fw, TSSE_FIRMWARE, &pdev->dev); + result = request_firmware(fw, name, &pdev->dev); if (result) - dev_err(&pdev->dev, "%s failed\n", __func__); + dev_err(&pdev->dev, "%s failed for %s\n", __func__, name); return result; } diff --git a/drivers/crypto/montage/tsse/tsse_fw_service.h b/drivers/crypto/montage/tsse/tsse_fw_service.h index 973ca6a0bce9..706ea6d29769 100644 --- a/drivers/crypto/montage/tsse/tsse_fw_service.h +++ b/drivers/crypto/montage/tsse/tsse_fw_service.h @@ -8,10 +8,12 @@ #ifndef __TSSE_FW_SERVICE_H__ #define __TSSE_FW_SERVICE_H__ +#include + #define FW_BASE 0x7000000 #define TSSE_FIRMWARE "tsse_firmware.bin" void fw_service(void *tsseipc_t, void *msg_t); -int tsse_fw_load(struct pci_dev *pdev); -int get_firmware_version(char *fw_buffer, uint32_t buffer_len, char *fw_version); +int tsse_fw_load(struct pci_dev *pdev, const char *name, const struct firmware **fw); +int get_firmware_version(const struct firmware *fw, char *fw_version_out); #endif diff --git a/drivers/crypto/montage/tsse/tsse_ipc.c b/drivers/crypto/montage/tsse/tsse_ipc.c index 0f92c096f211..b75ca97db6b6 100644 --- a/drivers/crypto/montage/tsse/tsse_ipc.c +++ b/drivers/crypto/montage/tsse/tsse_ipc.c @@ -14,60 +14,43 @@ #include "tsse_dev.h" #include "tsse_service.h" -struct tsse_msg *get_msginf(void __iomem *d2h) +/** + * get_msginf() - Create ipc_msg and read message from BAR. + * Return the pointer to ipc_msg, the caller is responsible for free it. + * @d2h: device2host memory pointer + * Return: new ipc_msg pointer, which points to message read from device + */ +static struct ipc_msg *get_msginf(void __iomem *d2h) { - uint32_t u_len; - struct tsse_msg *tssemsg; + uint32_t u_len = 0; + struct ipc_msg *msg = NULL; + uint8_t *device_msg_data = NULL; struct ipc_header *ipc_info = (struct ipc_header *)d2h; + // The memory layout in d2h should at least contains: + // ipc_header, msg_info and fw_load (message body) + if (ipc_info->i_len < sizeof(struct ipc_header) + + sizeof(struct msg_info) + sizeof(struct fw_load)) { + pr_info("%s(): msg format error\n", __func__); + return NULL; + } u_len = ipc_info->i_len - sizeof(struct ipc_header); - - tssemsg = (struct tsse_msg *)(kzalloc(sizeof(struct tsse_msg) + u_len, + msg = (struct ipc_msg *)(kzalloc(sizeof(struct ipc_msg) + u_len, GFP_ATOMIC)); - - if (!tssemsg) { - pr_info("%s(): tssemsg kzalloc failed\n", __func__); + if (!msg) { + pr_info("%s(): ipc_msg kzalloc failed\n", __func__); return NULL; } - tssemsg->ipc_payload.header.inst_id = ipc_info->inst_id; - tssemsg->ipc_payload.header.tgid = ipc_info->tgid; - tssemsg->ipc_payload.header.i_len = ipc_info->i_len; - - return tssemsg; -} - -void ipc_recieve_msg(struct tsse_ipc *tsseipc, struct ipc_msg *msg) -{ - uint32_t u_len = msg->header.i_len - sizeof(struct ipc_header); - uint32_t *msg_data = NULL; - void __iomem *d2h = tsseipc->virt_addr + MAIN2HOST_IPC_OFFSET; - - msg_data = (uint32_t *)(d2h + sizeof(struct ipc_header)); - memcpy_fromio(msg->i_data, msg_data, u_len); - return; - -} - -int msg_rout(struct tsse_ipc *tsseipc, struct tsse_msg *tssemsg) -{ - int ret = 0; - struct ipc_msg *msg; - struct msg_info *info; - uint32_t msg_class; + msg->header.inst_id = ipc_info->inst_id; + msg->header.tgid = ipc_info->tgid; + msg->header.i_len = ipc_info->i_len; - msg = &tssemsg->ipc_payload; + device_msg_data = (uint8_t *)(d2h + sizeof(struct ipc_header)); + memcpy_fromio((uint8_t *)msg->i_data, device_msg_data, u_len); - ipc_recieve_msg(tsseipc, msg); - info = (struct msg_info *)msg->i_data; - msg_class = info->msg_class; - if (msg_class == IPC_MESSAGE_BOOT) { - service_rout(tsseipc, msg); - return 0; - } - - return ret; + return msg; } static irqreturn_t tsse_ipc_d2h_irqhandler(int irq, void *dev_id) @@ -90,70 +73,59 @@ bool check_send_enbit(struct tsse_ipc *tsseipc) else return false; } -EXPORT_SYMBOL(check_send_enbit); void notify_device(struct tsse_ipc *tsseipc) { writel(0x1, tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); return; -} -EXPORT_SYMBOL(notify_device); - -void ipc_send_msg(struct tsse_ipc *tsseipc, struct ipc_data *msg) -{ - u8 *h2d = NULL; - - h2d = (u8 *)(tsseipc->virt_addr + HOST2MAIN_IPC_OFFSET); - memcpy_toio(h2d, msg, sizeof(struct ipc_header)); - memcpy_toio(h2d + sizeof(struct ipc_header), (u32 *)msg->i_ptr, - msg->header.i_len - sizeof(struct ipc_header)); - return; - } -void ipc_hw_init(struct tsse_ipc *hw_ipc) +/** + * ipc_hw_init()- Enable main2host interrupt, cleanup interrupt + * set value in host2main and main2host. + * @hw_ipc: pointer to a structure used for IPC + */ +static void ipc_hw_init(struct tsse_ipc *hw_ipc) { writel(0x1, hw_ipc->virt_addr + MAIN2HOST_INTR_ENABLE_OFFSET); writel(0x0, hw_ipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); writel(0x0, hw_ipc->virt_addr + MAIN2HOST_INTR_SET_OFFSET); } -int ipc_init_msg(struct tsse_ipc *tsseipc) +static int ipc_init_msg(struct tsse_ipc *tsseipc) { u8 *h2d; u32 int_reg; - u32 rc; u32 cmd_len; + u32 i_len; struct ipc_msg *msg; struct msg_info *info_msg; - msg = (struct ipc_msg *)(kzalloc( - sizeof(struct ipc_msg) + sizeof(struct msg_info), GFP_ATOMIC)); + cmd_len = sizeof(uint32_t); + i_len = sizeof(struct ipc_header) + sizeof(struct msg_info) + cmd_len; + msg = (struct ipc_msg *)(kzalloc(i_len, GFP_ATOMIC)); if (!msg) { pr_info("%s(): msg kzalloc failed\n", __func__); - return -1; + return -EFAULT; } - cmd_len = sizeof(uint32_t); - msg->header.i_len = - sizeof(struct ipc_header) + sizeof(struct msg_info) + cmd_len; + msg->header.i_len = i_len; info_msg = (struct msg_info *)msg->i_data; info_msg->msg_class = IPC_MESSAGE_BASIC; - *(msg->i_data + sizeof(struct msg_info) / 4) = IPC_BASIC_CMD_HOST_INIT; + *(uint32_t *)((uint8_t *)msg->i_data + sizeof(struct msg_info)) = IPC_BASIC_CMD_HOST_INIT; mutex_lock(&tsseipc->list_lock); int_reg = readl(tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); if ((int_reg & IPC_REGISTER_INT_SET) != 0) { - rc = -1; mutex_unlock(&tsseipc->list_lock); kfree(msg); - return rc; + return -EFAULT; } h2d = (u8 *)(tsseipc->virt_addr + HOST2MAIN_IPC_OFFSET); memcpy_toio(h2d, msg, sizeof(struct ipc_header)); - memcpy_toio(h2d + sizeof(struct ipc_header), (u32 *)msg->i_data, + memcpy_toio(h2d + sizeof(struct ipc_header), (u8 *)msg->i_data, sizeof(struct msg_info) + sizeof(uint32_t)); writel(0x1, tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); @@ -168,13 +140,15 @@ static void tsse_ipc_bh_handler(unsigned long data) struct tsse_ipc *tsseipc = (struct tsse_ipc *)data; void __iomem *d2h_payload = tsseipc->virt_addr + MAIN2HOST_IPC_OFFSET; - struct tsse_msg *msg_tsse = get_msginf(d2h_payload); + struct ipc_msg *msg = get_msginf(d2h_payload); - if (!msg_tsse) { + if (!msg) { dev_err(tsseipc->dev, "get_msginf is NULL\n"); return; } - msg_rout(tsseipc, msg_tsse); + if (service_rout(tsseipc, msg)) + dev_err(tsseipc->dev, "illegal message class\n"); + kfree(msg); } int tsse_ipc_init(struct pci_dev *pdev) @@ -198,12 +172,18 @@ int tsse_ipc_init(struct pci_dev *pdev) rc = request_threaded_irq(pci_irq_vector(pdev, 0), NULL, tsse_ipc_d2h_irqhandler, IRQF_SHARED, "pf-ipc", ipc); + if (rc) { + dev_err(&pdev->dev, "request_threaded_irq failed\n"); + return rc; + } ipc_hw_init(ipc); - ipc_init_msg(ipc); - + rc = ipc_init_msg(ipc); + if (rc) { + dev_err(&pdev->dev, "ipc_init_msg failed\n"); + tsse_ipc_deinit(tdev); + } return rc; } -EXPORT_SYMBOL_GPL(tsse_ipc_init); void tsse_ipc_deinit(void *tdev_t) { @@ -214,8 +194,23 @@ void tsse_ipc_deinit(void *tdev_t) tdev = tdev_t; tsseipc = tdev->ipc; pdev = tsseipc->pdev; - free_irq(pci_irq_vector(pdev, 0), tdev->ipc); - return; + if (tsseipc) { + free_irq(pci_irq_vector(pdev, 0), tdev->ipc); + tdev->ipc = NULL; + } +} +int tsse_fw_manual_load_ipc(struct pci_dev *pdev) +{ + struct tsse_dev *tdev = pci_to_tsse_dev(pdev); + struct tsse_ipc *ipc = tdev->ipc; + int rc = -EFAULT; + + if (ipc) { + ipc_hw_init(ipc); + rc = ipc_init_msg(ipc); + if (rc) + dev_err(&pdev->dev, "ipc_init_msg failed\n"); + } + return rc; } -EXPORT_SYMBOL_GPL(tsse_ipc_deinit); diff --git a/drivers/crypto/montage/tsse/tsse_ipc.h b/drivers/crypto/montage/tsse/tsse_ipc.h index 59dcbf6eafc4..82f8df71c983 100644 --- a/drivers/crypto/montage/tsse/tsse_ipc.h +++ b/drivers/crypto/montage/tsse/tsse_ipc.h @@ -38,11 +38,11 @@ enum IPC_BASIC_CMD { IPC_BASIC_CMD_HOST_INIT = 0x1, - IPC_BASIC_CMD_PING = 0x2, + IPC_BASIC_CMD_PING = 0x2 }; enum IPC_BOOT_CMD { - IPC_BOOT_CMD_GET_FIRMWARE = 0x1, + IPC_BOOT_CMD_GET_FIRMWARE = 0x1 }; enum IPC_MESSAGE_CLASS { @@ -62,11 +62,6 @@ struct ipc_header { uint32_t reserved[2]; }; -struct ipc_data { - struct ipc_header header; - void *i_ptr; -}; - struct ipc_msg { struct ipc_header header; uint32_t i_data[]; @@ -92,11 +87,6 @@ struct ipc_layout { struct msg_info info; }; -struct tsse_msg { - struct list_head list; - struct ipc_msg ipc_payload; -}; - struct tsse_ipc { struct device *dev; struct pci_dev *pdev; @@ -107,6 +97,7 @@ struct tsse_ipc { int tsse_ipc_init(struct pci_dev *pdev); void tsse_ipc_deinit(void *tdev); +int tsse_fw_manual_load_ipc(struct pci_dev *pdev); bool check_send_enbit(struct tsse_ipc *tsseipc); void notify_device(struct tsse_ipc *tsseipc); #endif diff --git a/drivers/crypto/montage/tsse/tsse_service.c b/drivers/crypto/montage/tsse/tsse_service.c index 64121a655803..e4be85535b77 100644 --- a/drivers/crypto/montage/tsse/tsse_service.c +++ b/drivers/crypto/montage/tsse/tsse_service.c @@ -5,14 +5,13 @@ * Copyright © 2023 Montage Technology. All rights reserved. */ #include -#include "tsse_ipc.h" -#include "tsse_fw_service.h" +#include "tsse_service.h" int service_rout(struct tsse_ipc *tsseipc, struct ipc_msg *msg) { struct msg_info *info; uint32_t msg_class; - int ret; + int ret = 0; info = (struct msg_info *)msg->i_data; msg_class = info->msg_class; @@ -25,6 +24,5 @@ int service_rout(struct tsse_ipc *tsseipc, struct ipc_msg *msg) ret = -EINVAL; break; } - return 0; - + return ret; } -- Gitee From d96b8c8b446dbf17f496fe7cc4c7bbd00be2a4d8 Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Wed, 11 Aug 2021 11:31:59 +0800 Subject: [PATCH 0814/2138] anolis: efi: cper: print raw data info of estatus for Yitian SoC ANBZ: #8642 To report error type more clearly, add raw data of estatus. The raw data follows any Generic Error Data Entries. And it includes two levels of information. The top level is a raw data header structure which include its type, subtype and the count of ras_reg_common. The next level is one or more ras_reg_common structures providing the detail specific information about the error. Now, we just dump the raw data. Signed-off-by: Shuai Xue Reviewed-by: luanshi Reviewed-by: Baolin Wang [Ruidong: use pr_info instead of printk to fix checkpatch warning] Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- drivers/firmware/efi/cper.c | 28 +++++++++++++++++ include/acpi/ghes.h | 60 +++++++++++++++++++++++++++++++++++++ 2 files changed, 88 insertions(+) diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 35c37f667781..2860a1efb133 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -630,6 +630,9 @@ void cper_estatus_print(const char *pfx, int sec_no = 0; char newpfx[64]; __u16 severity; + struct raw_data_header *r_data_header; + struct ras_reg_common *reg_common; + int sub_record_no = 0; severity = estatus->error_severity; if (severity == CPER_SEV_CORRECTED) @@ -643,6 +646,31 @@ void cper_estatus_print(const char *pfx, cper_estatus_print_section(newpfx, gdata, sec_no); sec_no++; } + + r_data_header = (struct raw_data_header *)((void *)estatus + + estatus->raw_data_offset); + /* + * ONLY processor, CMN, GIC, and SMMU has raw error data which follow + * any Generic Error Data Entries. The raw error data format is vendor + * implementation defined. + */ + if (!r_data_header->ras_count) + return; + + pr_info("%s type: 0x%x, ras_count: %d\n", pfx, r_data_header->type, + r_data_header->ras_count); + + apei_estatus_for_each_raw_reg_common(r_data_header, reg_common) { + pr_info("%s sub_type: 0x%x\n", pfx, + r_data_header->sub_type[sub_record_no]); + pr_info("%s fr: 0x%llx, ctrl: 0x%llx, status: 0x%llx, addr: 0x%llx\n", + pfx, reg_common->fr, reg_common->ctrl, + reg_common->status, reg_common->addr); + pr_info("%s misc0: 0x%llx, misc1: 0x%llx, misc2: 0x%llx, misc3: 0x%llx\n", + pfx, reg_common->misc0, reg_common->misc1, + reg_common->misc2, reg_common->misc3); + sub_record_no++; + } } EXPORT_SYMBOL_GPL(cper_estatus_print); diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h index be1dd4c1a917..af0fe2873b3a 100644 --- a/include/acpi/ghes.h +++ b/include/acpi/ghes.h @@ -133,4 +133,64 @@ static inline int ghes_notify_sea(void) { return -ENOENT; } struct notifier_block; extern void ghes_register_report_chain(struct notifier_block *nb); extern void ghes_unregister_report_chain(struct notifier_block *nb); + +#pragma pack(1) +struct raw_data_header { + uint32_t signature; /* 'r' 'a' 'w' 'd' */ + uint8_t type; + uint8_t ras_count; + /* one record may have multiple sub-record (up to 6) */ + uint8_t sub_type[6]; +}; + +struct ras_reg_common { + uint64_t fr; + uint64_t ctrl; + uint64_t status; + uint64_t addr; + uint64_t misc0; + uint64_t misc1; + uint64_t misc2; + uint64_t misc3; +}; + +enum ras_type { + ERR_TYPE_GENERIC = 0x40, + ERR_TYPE_CORE = 0x41, + ERR_TYPE_GIC = 0x42, + ERR_TYPE_CMN = 0x43, + ERR_TYPE_SMMU = 0x44, + ERR_TYPE_DDR = 0x50, + ERR_TYPE_PCI = 0x60 +}; +enum cmn_node_type { + NODE_TYPE_DVM = 0x1, + NODE_TYPE_CFG = 0x2, + NODE_TYPE_DTC = 0x3, + NODE_TYPE_HN_I = 0x4, + NODE_TYPE_HN_F = 0x5, + NODE_TYPE_XP = 0x6, + NODE_TYPE_SBSX = 0x7, + NODE_TYPE_MPAM_S = 0x8, + NODE_TYPE_MPAM_NS = 0x9, + NODE_TYPE_RN_I = 0xA, + NODE_TYPE_RN_D = 0xD, + NODE_TYPE_RN_SAM = 0xF, + NODE_TYPE_HN_P = 0x11, + /* Coherent Multichip Link (CML) node types */ + NODE_TYPE_CML_BASE = 0x100, + NODE_TYPE_CXRA = 0x100, + NODE_TYPE_CXHA = 0x101, + NODE_TYPE_CXLA = 0x102, + NODE_TYPE_CCRA = 0x103, + NODE_TYPE_CCHA = 0x104, + NODE_TYPE_CCLA = 0x105, +}; +#pragma pack() + +#define apei_estatus_for_each_raw_reg_common(r_data_header, reg_common) \ + for (reg_common = (struct ras_reg_common *)(r_data_header + 1); \ + (void *)(reg_common) - (void *)(r_data_header + 1) < r_data_header->ras_count; \ + reg_common = (((void *)(reg_common)) + 1)) + #endif /* GHES_H */ -- Gitee From 53cda24bfd8f9b959c453162058402aeaee2857e Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Thu, 10 Feb 2022 21:46:47 +0800 Subject: [PATCH 0815/2138] anolis: efi: cper: print error type string of raw data info ANBZ: #8642 The error type number of raw data info is not readable, so print the error type string. Signed-off-by: Shuai Xue Reviewed-by: luanshi Reviewed-by: Baolin Wang Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- drivers/firmware/efi/cper.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 2860a1efb133..d4559af9705b 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -623,6 +623,20 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata pr_err(FW_WARN "error section length is too small\n"); } +static char *cper_raw_err_type_str(u64 type) +{ + switch (type) { + case 0x40: return "GENERIC"; + case 0x41: return "CORE"; + case 0x42: return "GIC"; + case 0x43: return "CMN"; + case 0x44: return "SMMU"; + case 0x50: return "DDR"; + case 0x60: return "PCI"; + default: return "Reserved"; + } +} + void cper_estatus_print(const char *pfx, const struct acpi_hest_generic_status *estatus) { @@ -657,7 +671,8 @@ void cper_estatus_print(const char *pfx, if (!r_data_header->ras_count) return; - pr_info("%s type: 0x%x, ras_count: %d\n", pfx, r_data_header->type, + pr_info("%s type: %s (0x%x), ras_count:%d\n", pfx, + cper_raw_err_type_str(r_data_header->type), r_data_header->type, r_data_header->ras_count); apei_estatus_for_each_raw_reg_common(r_data_header, reg_common) { -- Gitee From 7af37f2db14860e11be053e2b4b69634eef0e311 Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Tue, 9 Aug 2022 12:58:07 +0800 Subject: [PATCH 0816/2138] anolis: efi: cper: move Yitian specific raw data handling into CONFIG_YITIAN_CPER_RAWDATA ANBZ: #8642 Raw data is part of estatus and its format is vendor defined. Add a separate config CONFIG_YITIAN_CPER_RAWDATA to enable or disable raw data for Yitian 710 and move Yitian specific raw data handling into CONFIG_YITIAN_CPER_RAWDATA. Signed-off-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/638 Reviewed-by: Baolin Wang Reviewed-by: Bixuan Cui [Ruidong: Modify Kconfig help description to fix checkpath warning] Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- drivers/firmware/efi/Kconfig | 9 +++++++++ drivers/firmware/efi/cper.c | 4 ++++ include/acpi/ghes.h | 3 +++ 3 files changed, 16 insertions(+) diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 138491a4b494..46d1a358aabb 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -301,3 +301,12 @@ config UEFI_CPER_X86 bool depends on UEFI_CPER && X86 default y + +config YITIAN_CPER_RAWDATA + bool "Print Yitian custom raw data about platform error info" + depends on EFI && ACPI + help + Allow print Yitian custom raw data about platform error info, + including CMN, GIC, SMMU, DDR, etc. It gathers more useful error + information from hardware, which helps to debug and test RAS + feature. diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index d4559af9705b..5909deb0b176 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -623,6 +623,7 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata pr_err(FW_WARN "error section length is too small\n"); } +#ifdef CONFIG_YITIAN_CPER_RAWDATA static char *cper_raw_err_type_str(u64 type) { switch (type) { @@ -636,6 +637,7 @@ static char *cper_raw_err_type_str(u64 type) default: return "Reserved"; } } +#endif /* CONFIG_YITIAN_CPER_RAWDATA */ void cper_estatus_print(const char *pfx, const struct acpi_hest_generic_status *estatus) @@ -661,6 +663,7 @@ void cper_estatus_print(const char *pfx, sec_no++; } +#ifdef CONFIG_YITIAN_CPER_RAWDATA r_data_header = (struct raw_data_header *)((void *)estatus + estatus->raw_data_offset); /* @@ -686,6 +689,7 @@ void cper_estatus_print(const char *pfx, reg_common->misc2, reg_common->misc3); sub_record_no++; } +#endif /* CONFIG_YITIAN_CPER_RAWDATA */ } EXPORT_SYMBOL_GPL(cper_estatus_print); diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h index af0fe2873b3a..04269449927e 100644 --- a/include/acpi/ghes.h +++ b/include/acpi/ghes.h @@ -134,6 +134,7 @@ struct notifier_block; extern void ghes_register_report_chain(struct notifier_block *nb); extern void ghes_unregister_report_chain(struct notifier_block *nb); +#ifdef CONFIG_YITIAN_CPER_RAWDATA #pragma pack(1) struct raw_data_header { uint32_t signature; /* 'r' 'a' 'w' 'd' */ @@ -163,6 +164,7 @@ enum ras_type { ERR_TYPE_DDR = 0x50, ERR_TYPE_PCI = 0x60 }; + enum cmn_node_type { NODE_TYPE_DVM = 0x1, NODE_TYPE_CFG = 0x2, @@ -192,5 +194,6 @@ enum cmn_node_type { for (reg_common = (struct ras_reg_common *)(r_data_header + 1); \ (void *)(reg_common) - (void *)(r_data_header + 1) < r_data_header->ras_count; \ reg_common = (((void *)(reg_common)) + 1)) +#endif /* CONFIG_YITIAN_CPER_RAWDATA */ #endif /* GHES_H */ -- Gitee From a6c29e90b02d8d88847b5c50072f4321467f6482 Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Wed, 3 Aug 2022 21:13:37 +0800 Subject: [PATCH 0817/2138] anolis: efi: cper: check Yitian raw data signature ANBZ: #8642 The raw data supported now is only applied to Yitan 710 SoC, add signature check before dumping any info. Signed-off-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/638 Reviewed-by: Baolin Wang Reviewed-by: Bixuan Cui Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- drivers/firmware/efi/cper.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 5909deb0b176..69f97b273064 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -666,6 +666,16 @@ void cper_estatus_print(const char *pfx, #ifdef CONFIG_YITIAN_CPER_RAWDATA r_data_header = (struct raw_data_header *)((void *)estatus + estatus->raw_data_offset); + if (estatus->raw_data_length < sizeof(struct raw_data_header)) + return; + +#define YITIAN_SIGNATURE_16(A, B) ((A) | (B << 8)) +#define YITIAN_SIGNATURE_32(A, B, C, D) \ + (YITIAN_SIGNATURE_16(A, B) | (YITIAN_SIGNATURE_16(C, D) << 16)) + + if (r_data_header->signature != YITIAN_SIGNATURE_32('r', 'a', 'w', 'd')) + return; + /* * ONLY processor, CMN, GIC, and SMMU has raw error data which follow * any Generic Error Data Entries. The raw error data format is vendor -- Gitee From 0730832931cf07257e481b9e860a5f160a01809b Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Wed, 20 Jul 2022 18:52:27 +0800 Subject: [PATCH 0818/2138] anolis: efi: cper: refactor raw data handling for Yitian ANBZ: #8642 The raw data format name for Yitan 710 is misleading, rename with yitian prefix firstly. Refactor core and uncore raw data handler to a separate function so that we could add DDR raw data handler easily later. There should be no functional change as a result of this patch. Signed-off-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/638 Reviewed-by: Baolin Wang Reviewed-by: Bixuan Cui Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- drivers/firmware/efi/cper.c | 115 +++++++++++++++++++++--------------- include/acpi/ghes.h | 16 ++--- 2 files changed, 77 insertions(+), 54 deletions(-) diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 69f97b273064..bcc509c21388 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -624,19 +624,78 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata } #ifdef CONFIG_YITIAN_CPER_RAWDATA -static char *cper_raw_err_type_str(u64 type) +static char *yitian_raw_err_type_str(u64 type) { switch (type) { - case 0x40: return "GENERIC"; - case 0x41: return "CORE"; - case 0x42: return "GIC"; - case 0x43: return "CMN"; - case 0x44: return "SMMU"; - case 0x50: return "DDR"; - case 0x60: return "PCI"; + case ERR_TYPE_GENERIC: return "GENERIC"; + case ERR_TYPE_CORE: return "CORE"; + case ERR_TYPE_GIC: return "GIC"; + case ERR_TYPE_CMN: return "CMN"; + case ERR_TYPE_SMMU: return "SMMU"; + case ERR_TYPE_DDR: return "DDR"; + case ERR_TYPE_PCI: return "PCI"; default: return "Reserved"; } } + +void yitian_platform_raw_data_print(const char *pfx, + struct yitian_raw_data_header *header) +{ + struct yitian_ras_common_reg *common_reg; + int sub_record_no = 0; + + yitian_estatus_for_each_raw_reg_common(header, common_reg) { + pr_info("%s sub_type: 0x%x\n", pfx, + header->sub_type[sub_record_no]); + pr_info("%s fr: 0x%llx, ctrl: 0x%llx, status: 0x%llx, addr: 0x%llx\n", + pfx, common_reg->fr, common_reg->ctrl, + common_reg->status, common_reg->addr); + pr_info("%s misc0: 0x%llx, misc1: 0x%llx, misc2: 0x%llx, misc3: 0x%llx\n", + pfx, common_reg->misc0, common_reg->misc1, + common_reg->misc2, common_reg->misc3); + sub_record_no++; + } +} + +void yitian_raw_data_print(const char *pfx, + const struct acpi_hest_generic_status *estatus) +{ + struct yitian_raw_data_header *header; + + if (estatus->raw_data_length < sizeof(*header)) + return; + + header = (struct yitian_raw_data_header *)((void *)estatus + + estatus->raw_data_offset); + +#define YITIAN_SIGNATURE_16(A, B) ((A) | (B << 8)) +#define YITIAN_SIGNATURE_32(A, B, C, D) \ + (YITIAN_SIGNATURE_16(A, B) | (YITIAN_SIGNATURE_16(C, D) << 16)) + + if (header->signature != YITIAN_SIGNATURE_32('r', 'a', 'w', 'd')) + return; + + /* + * ONLY processor, CMN, GIC, and SMMU has raw error data which follow + * any Generic Error Data Entries. The raw error data format is vendor + * implementation defined. + */ + if (!header->common_reg_nr) + return; + + pr_info("%s type: %s (0x%x), common_reg_nr:%d\n", pfx, + yitian_raw_err_type_str(header->type), header->type, + header->common_reg_nr); + + switch (header->type) { + case ERR_TYPE_CORE: + case ERR_TYPE_GIC: + case ERR_TYPE_CMN: + case ERR_TYPE_SMMU: + yitian_platform_raw_data_print(pfx, header); + break; + } +} #endif /* CONFIG_YITIAN_CPER_RAWDATA */ void cper_estatus_print(const char *pfx, @@ -646,9 +705,6 @@ void cper_estatus_print(const char *pfx, int sec_no = 0; char newpfx[64]; __u16 severity; - struct raw_data_header *r_data_header; - struct ras_reg_common *reg_common; - int sub_record_no = 0; severity = estatus->error_severity; if (severity == CPER_SEV_CORRECTED) @@ -664,41 +720,8 @@ void cper_estatus_print(const char *pfx, } #ifdef CONFIG_YITIAN_CPER_RAWDATA - r_data_header = (struct raw_data_header *)((void *)estatus + - estatus->raw_data_offset); - if (estatus->raw_data_length < sizeof(struct raw_data_header)) - return; - -#define YITIAN_SIGNATURE_16(A, B) ((A) | (B << 8)) -#define YITIAN_SIGNATURE_32(A, B, C, D) \ - (YITIAN_SIGNATURE_16(A, B) | (YITIAN_SIGNATURE_16(C, D) << 16)) - - if (r_data_header->signature != YITIAN_SIGNATURE_32('r', 'a', 'w', 'd')) - return; - - /* - * ONLY processor, CMN, GIC, and SMMU has raw error data which follow - * any Generic Error Data Entries. The raw error data format is vendor - * implementation defined. - */ - if (!r_data_header->ras_count) - return; - - pr_info("%s type: %s (0x%x), ras_count:%d\n", pfx, - cper_raw_err_type_str(r_data_header->type), r_data_header->type, - r_data_header->ras_count); - - apei_estatus_for_each_raw_reg_common(r_data_header, reg_common) { - pr_info("%s sub_type: 0x%x\n", pfx, - r_data_header->sub_type[sub_record_no]); - pr_info("%s fr: 0x%llx, ctrl: 0x%llx, status: 0x%llx, addr: 0x%llx\n", - pfx, reg_common->fr, reg_common->ctrl, - reg_common->status, reg_common->addr); - pr_info("%s misc0: 0x%llx, misc1: 0x%llx, misc2: 0x%llx, misc3: 0x%llx\n", - pfx, reg_common->misc0, reg_common->misc1, - reg_common->misc2, reg_common->misc3); - sub_record_no++; - } + if (estatus->raw_data_length) + yitian_raw_data_print(pfx, estatus); #endif /* CONFIG_YITIAN_CPER_RAWDATA */ } EXPORT_SYMBOL_GPL(cper_estatus_print); diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h index 04269449927e..9f236d61521c 100644 --- a/include/acpi/ghes.h +++ b/include/acpi/ghes.h @@ -136,15 +136,15 @@ extern void ghes_unregister_report_chain(struct notifier_block *nb); #ifdef CONFIG_YITIAN_CPER_RAWDATA #pragma pack(1) -struct raw_data_header { +struct yitian_raw_data_header { uint32_t signature; /* 'r' 'a' 'w' 'd' */ uint8_t type; - uint8_t ras_count; + uint8_t common_reg_nr; /* one record may have multiple sub-record (up to 6) */ uint8_t sub_type[6]; }; -struct ras_reg_common { +struct yitian_ras_common_reg { uint64_t fr; uint64_t ctrl; uint64_t status; @@ -155,7 +155,7 @@ struct ras_reg_common { uint64_t misc3; }; -enum ras_type { +enum yitian_ras_type { ERR_TYPE_GENERIC = 0x40, ERR_TYPE_CORE = 0x41, ERR_TYPE_GIC = 0x42, @@ -190,10 +190,10 @@ enum cmn_node_type { }; #pragma pack() -#define apei_estatus_for_each_raw_reg_common(r_data_header, reg_common) \ - for (reg_common = (struct ras_reg_common *)(r_data_header + 1); \ - (void *)(reg_common) - (void *)(r_data_header + 1) < r_data_header->ras_count; \ - reg_common = (((void *)(reg_common)) + 1)) +#define yitian_estatus_for_each_raw_reg_common(header, reg) \ + for (reg = (struct yitian_ras_common_reg *)(header + 1); \ + (void *)(reg) - (void *)(header + 1) < header->common_reg_nr; \ + reg = (((void *)(reg)) + 1)) #endif /* CONFIG_YITIAN_CPER_RAWDATA */ #endif /* GHES_H */ -- Gitee From a4eaf33820e54f9a872476953bc8b462d0683cd4 Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Wed, 3 Aug 2022 21:23:26 +0800 Subject: [PATCH 0819/2138] anolis: efi: cper: add DDR raw data support for Yitian ANBZ: #8642 Firmware reports error information for DDR hardware error through ddr_raw_data, which following any Generic Error Data Entries in ACPI Platform Error Interfaces (APEI). The ddr_raw_data describe exception information, including: - exception: source of synchronous or asynchronous exception - system registers (optional): When a hardware error is consumed by core, synchronous external abort exception will be raised. The system registers of core provides the exception information, e.g. ELR_ELX for preferred exception link address, ESR_ELx for exception syndrome. Firmware reports system registers only when synchronous external abort occurs. - ECC registers (optional): A wide range of information about the detected errors can be obtained by reading the ECC error reporting registers from DDR controller. Firmware reports ECC registers only when ECC error occurs. Signed-off-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/638 Reviewed-by: Baolin Wang Reviewed-by: Bixuan Cui [Ruidong: Modify printk to pr_info to fix checkpatch warning] Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- drivers/firmware/efi/cper.c | 49 +++++++++++++++++++++++++++++++++++++ include/acpi/ghes.h | 37 ++++++++++++++++++++++++++++ 2 files changed, 86 insertions(+) diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index bcc509c21388..b3c40759fb06 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -657,6 +657,52 @@ void yitian_platform_raw_data_print(const char *pfx, } } +static void yitian_ddr_raw_data_print(const char *pfx, + struct yitian_raw_data_header *header) +{ + struct yitian_ddr_raw_data *data; + + data = (struct yitian_ddr_raw_data *)(header + 1); + + switch (data->ex_type) { + case 0x1: + pr_info("%s Synchronous Exception taken in EL%d\n", pfx, data->el_nr); + break; + case 0x2: + pr_info("%s Interrupt: %d\n", pfx, data->intr); + break; + case 0x3: + pr_info("%s SError\n", pfx); + break; + default: + pr_info("%s Unknown interrupt type\n", pfx); + } + + /* System regs is valid only when it's a synchronous exception */ + if (data->ex_type == 1) { + struct yitian_ddr_sys_reg *sys_regs = &data->sys_regs; + + pr_info("%s ESR: 0x%llx, ELR: 0x%llx, FAR: 0x%llx, SCR: 0x%llx, SCTLR: 0x%llx, LR: 0x%llx\n", + pfx, sys_regs->esr, sys_regs->elr, sys_regs->far, + sys_regs->scr, sys_regs->sctlr, sys_regs->lr); + } + + /* ECC Data is valid only when it's a ECC error */ + if (data->err_type == 1) { + struct yitian_ddr_ecc_data *ecc_data = &data->ecc_data; + + pr_info("%s ECCERRCNT: 0x%x, ECCSTAT: 0x%x, ADVECCSTAT: 0x%x, ECCSYMBOL: 0x%x, ECCERRCNTSTAT: 0x%x, ECCERRCNT0: 0x%x, ECCERRCNT1: 0x%x, ECCCADDR0: 0x%x, ECCCADDR1: 0x%x, ECCCDATA0: 0x%x, ECCCDATA1: 0x%x, ECCUADDR0: 0x%x, ECCUADDR1: 0x%x, ECCUDATA0: 0x%x, ECCUDATA1: 0x%x\n", + pfx, ecc_data->eccerrcnt, ecc_data->eccstat, + ecc_data->adveccstat, ecc_data->eccsymbol, + ecc_data->eccerrcntstat, ecc_data->eccerrcnt0, + ecc_data->eccerrcnt1, ecc_data->ecccaddr0, + ecc_data->ecccaddr1, ecc_data->ecccdata0, + ecc_data->ecccdata1, ecc_data->eccuaddr0, + ecc_data->eccuaddr1, ecc_data->eccudata0, + ecc_data->eccudata1); + } +} + void yitian_raw_data_print(const char *pfx, const struct acpi_hest_generic_status *estatus) { @@ -694,6 +740,9 @@ void yitian_raw_data_print(const char *pfx, case ERR_TYPE_SMMU: yitian_platform_raw_data_print(pfx, header); break; + case ERR_TYPE_DDR: + yitian_ddr_raw_data_print(pfx, header); + break; } } #endif /* CONFIG_YITIAN_CPER_RAWDATA */ diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h index 9f236d61521c..a7d853c78da3 100644 --- a/include/acpi/ghes.h +++ b/include/acpi/ghes.h @@ -188,6 +188,43 @@ enum cmn_node_type { NODE_TYPE_CCHA = 0x104, NODE_TYPE_CCLA = 0x105, }; + +struct yitian_ddr_sys_reg { + uint64_t esr; + uint64_t elr; + uint64_t far; + uint64_t scr; + uint64_t sctlr; + uint64_t lr; +}; + +struct yitian_ddr_ecc_data { + uint32_t eccerrcnt; + uint32_t eccstat; + uint32_t adveccstat; + uint32_t eccsymbol; + uint32_t eccerrcntstat; + uint32_t eccerrcnt0; + uint32_t eccerrcnt1; + uint32_t ecccaddr0; + uint32_t ecccaddr1; + uint32_t ecccdata0; + uint32_t ecccdata1; + uint32_t eccuaddr0; + uint32_t eccuaddr1; + uint32_t eccudata0; + uint32_t eccudata1; +}; + +struct yitian_ddr_raw_data { + uint32_t intr; /* interrupt num, valid for interrupt only, for exception intr=0 */ + uint8_t ex_type; /* 1:sync exception 2:interrupt 3:Serror */ + uint8_t el_nr; /* error el, only valid for ex_type==1, 0:el0 1:el1 2:el2 */ + uint8_t err_type; /* 1:ecc 2:CA parity 3:R/W CRC */ + struct yitian_ddr_sys_reg sys_regs; /* Only valid for ex_type==1 */ + struct yitian_ddr_ecc_data ecc_data; /* Only valid for err_type==1 */ +}; + #pragma pack() #define yitian_estatus_for_each_raw_reg_common(header, reg) \ -- Gitee From 51aedfe1b693cbdc7ac97a850fb800198d9ec45a Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Thu, 13 Oct 2022 16:54:20 +0800 Subject: [PATCH 0820/2138] anolis: efi: cper: fix raw data register iteration ANBZ: #8642 One raw data record may have multiple sub-records. The for_each macro only iterates the first record now. Compare sub_record_no with common_reg_nr when iterates the sub-records. Fixes: acb1ce6cffb8 ("anolis: efi: cper: print raw data info of estatus for Yitian SoC") Signed-off-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/767 Reviewed-by: Baolin Wang [Ruidong: Modify printk to pr_info to fix checkpatch warning] Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- drivers/firmware/efi/cper.c | 3 +-- include/acpi/ghes.h | 6 +++--- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index b3c40759fb06..c60793101a7d 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -644,7 +644,7 @@ void yitian_platform_raw_data_print(const char *pfx, struct yitian_ras_common_reg *common_reg; int sub_record_no = 0; - yitian_estatus_for_each_raw_reg_common(header, common_reg) { + yitian_estatus_for_each_raw_reg_common(header, common_reg, sub_record_no) { pr_info("%s sub_type: 0x%x\n", pfx, header->sub_type[sub_record_no]); pr_info("%s fr: 0x%llx, ctrl: 0x%llx, status: 0x%llx, addr: 0x%llx\n", @@ -653,7 +653,6 @@ void yitian_platform_raw_data_print(const char *pfx, pr_info("%s misc0: 0x%llx, misc1: 0x%llx, misc2: 0x%llx, misc3: 0x%llx\n", pfx, common_reg->misc0, common_reg->misc1, common_reg->misc2, common_reg->misc3); - sub_record_no++; } } diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h index a7d853c78da3..82eba57ac423 100644 --- a/include/acpi/ghes.h +++ b/include/acpi/ghes.h @@ -227,10 +227,10 @@ struct yitian_ddr_raw_data { #pragma pack() -#define yitian_estatus_for_each_raw_reg_common(header, reg) \ +#define yitian_estatus_for_each_raw_reg_common(header, reg, nr) \ for (reg = (struct yitian_ras_common_reg *)(header + 1); \ - (void *)(reg) - (void *)(header + 1) < header->common_reg_nr; \ - reg = (((void *)(reg)) + 1)) + nr < header->common_reg_nr; \ + reg++, nr++) #endif /* CONFIG_YITIAN_CPER_RAWDATA */ #endif /* GHES_H */ -- Gitee From 9693b5da09bb0f8e7807c0f1dbe023dcdef6bb1a Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Thu, 1 Dec 2022 19:32:56 +0800 Subject: [PATCH 0821/2138] anolis: efi: cper: refactor raw data header check to a function ANBZ: #8642 Move yitian raw data header sanity check in a separate function yitian_estatus_check_header() so that it could be used in GHES error handling later. There should be no functional change as a result of this patch. Signed-off-by: Shuai Xue Signed-off-by: Bixuan Cui Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/1019 Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- drivers/acpi/apei/apei-internal.h | 4 ++++ drivers/firmware/efi/cper.c | 21 +++++++++++++++++---- 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h index 67c2c3b959e1..448370641d1d 100644 --- a/drivers/acpi/apei/apei-internal.h +++ b/drivers/acpi/apei/apei-internal.h @@ -131,3 +131,7 @@ static inline u32 cper_estatus_len(struct acpi_hest_generic_status *estatus) int apei_osc_setup(void); #endif + +#ifdef CONFIG_YITIAN_CPER_RAWDATA +bool yitian_estatus_check_header(const struct acpi_hest_generic_status *estatus); +#endif /* CONFIG_YITIAN_CPER_RAWDATA */ diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index c60793101a7d..30a62a97ae98 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -702,13 +702,12 @@ static void yitian_ddr_raw_data_print(const char *pfx, } } -void yitian_raw_data_print(const char *pfx, - const struct acpi_hest_generic_status *estatus) +bool yitian_estatus_check_header(const struct acpi_hest_generic_status *estatus) { struct yitian_raw_data_header *header; if (estatus->raw_data_length < sizeof(*header)) - return; + return false; header = (struct yitian_raw_data_header *)((void *)estatus + estatus->raw_data_offset); @@ -718,7 +717,7 @@ void yitian_raw_data_print(const char *pfx, (YITIAN_SIGNATURE_16(A, B) | (YITIAN_SIGNATURE_16(C, D) << 16)) if (header->signature != YITIAN_SIGNATURE_32('r', 'a', 'w', 'd')) - return; + return false; /* * ONLY processor, CMN, GIC, and SMMU has raw error data which follow @@ -726,8 +725,22 @@ void yitian_raw_data_print(const char *pfx, * implementation defined. */ if (!header->common_reg_nr) + return false; + + return true; +} + +void yitian_raw_data_print(const char *pfx, + const struct acpi_hest_generic_status *estatus) +{ + struct yitian_raw_data_header *header; + + if (!yitian_estatus_check_header(estatus)) return; + header = (struct yitian_raw_data_header *)((void *)estatus + + estatus->raw_data_offset); + pr_info("%s type: %s (0x%x), common_reg_nr:%d\n", pfx, yitian_raw_err_type_str(header->type), header->type, header->common_reg_nr); -- Gitee From a4e4a37b050e26b01a7e4063b910b50b0918be66 Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Thu, 10 Aug 2023 15:58:26 +0800 Subject: [PATCH 0822/2138] anolis: efi: cper: add ARM64 dependence for YITIAN_CPER_RAWDATA ANBZ: #8642 Add ARM64 dependence for YITIAN_CPER_RAWDATA to avoid it being enabled on other platforms. Signed-off-by: Shuai Xue Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2046 Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- drivers/firmware/efi/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 46d1a358aabb..b76f9df4885a 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -304,7 +304,7 @@ config UEFI_CPER_X86 config YITIAN_CPER_RAWDATA bool "Print Yitian custom raw data about platform error info" - depends on EFI && ACPI + depends on EFI && ACPI && ARM64 help Allow print Yitian custom raw data about platform error info, including CMN, GIC, SMMU, DDR, etc. It gathers more useful error -- Gitee From 2faee9230e3283df4bcd9e26dcf781f05313e418 Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Tue, 31 Jan 2023 12:59:18 +0800 Subject: [PATCH 0823/2138] anolis: ACPI: APEI: set memory failure flags as MF_ACTION_REQUIRED on synchronous events ANBZ: #8642 There are two major types of uncorrected error (UC) : - Action Required: The error is detected and the processor already consumes the memory. OS requires to take action (for example, offline failure page/kill failure thread) to recover this uncorrectable error. - Action Optional: The error is detected out of processor execution context. Some data in the memory are corrupted. But the data have not been consumed. OS is optional to take action to recover this uncorrectable error. For X86 platform, we can easily distinguish between these two types based on the MCA Bank. While for arm64 platform, the memory failure flags for all UCs which severity are GHES_SEV_RECOVERABLE are set as 0, a.k.a, Action Optional now. The main challenge is to tell whether APEI delivers signals synchronously. There are no hint in industry standard APEI. On Yitian 710, we have all hardware RAS information in custom DDR raw data. yitian_ddr_raw_data::ex_type indicates the signal is a synchronous exception, interrupt, or SError. Set memory failure flags as MF_ACTION_REQUIRED on synchronous events. Signed-off-by: Shuai Xue Signed-off-by: Bixuan Cui Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/1124 Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- drivers/acpi/apei/ghes.c | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index ab2a82cb1b0b..4f9ee26e01d9 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -673,6 +673,33 @@ static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata, schedule_work(&entry->work); } +#ifdef CONFIG_YITIAN_CPER_RAWDATA +/* + * Check if the event is synchronous exception by Yitian DDR Raw data + * NOTE: only works for Yitian 710 now + */ +static bool is_sync_event(const struct acpi_hest_generic_status *estatus) +{ + struct yitian_raw_data_header *header; + struct yitian_ddr_raw_data *data; + + if (!yitian_estatus_check_header(estatus)) + return false; + + header = (struct yitian_raw_data_header *)((void *)estatus + + estatus->raw_data_offset); + if (header->type != ERR_TYPE_DDR) + return false; + + data = (struct yitian_ddr_raw_data *)(header + 1); + /* 1 for synchronous exception */ + if (data->ex_type == 1) + return true; + + return false; +} +#endif /* CONFIG_YITIAN_CPER_RAWDATA */ + static bool ghes_do_proc(struct ghes *ghes, const struct acpi_hest_generic_status *estatus) { @@ -685,6 +712,10 @@ static bool ghes_do_proc(struct ghes *ghes, bool sync = is_hest_sync_notify(ghes); sev = ghes_severity(estatus->error_severity); +#ifdef CONFIG_YITIAN_CPER_RAWDATA + if (estatus->raw_data_length) + sync = is_sync_event(estatus); +#endif /* CONFIG_YITIAN_CPER_RAWDATA */ apei_estatus_for_each_section(estatus, gdata) { sec_type = (guid_t *)gdata->section_type; sec_sev = ghes_severity(gdata->error_severity); -- Gitee From 6fe402c78e55fc11eb15439d43fb32f78d4545ef Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Tue, 31 Jan 2023 14:03:03 +0800 Subject: [PATCH 0824/2138] anolis: ACPI: APEI: handle synchronous exceptions in task work ANBZ: #8642 On Arm64 platform, errors could be signaled by synchronous interrupt, e.g. when an error is detected by a background scrubber, or signaled by synchronous exception, e.g. when an uncorrected error is consumed. Both synchronous and asynchronous error are queued and handled by a dedicated kthread in workqueue. commit 7f17b4a121d0 ("ACPI: APEI: Kick the memory_failure() queue for synchronous errors") keep track of whether memory_failure() work was queued, and make task_work pending to flush out the workqueue so that the work for synchronous error is processed before returning to user-space. The trick ensures that the corrupted page is unmapped and poisoned. And after returning to user-space, the task starts at current instruction which triggering a page fault and kernel will send sigbus due to VM_FAULT_HWPOISON. Although the task could be killed by page fault, the memory failure is handled in a kthread context so that the hwpoison-aware mechanisms, e.g. PF_MCE_EARLY, early kill, does not work as expected. For example, hwpoison-aware user-space process like QEMU set PF_MCE_EARLY through prctl while init sigbus handler. Then the early_kill mode of memory_failure() will send wrong si_code by SIGBUS signal: the actual user-space process accessing the corrupt memory location will be collected by find_early_kill_thread(), and then send SIGBUS with BUS_MCEERR_AO si_code to the actual user-space process instead of BUS_MCEERR_AR. The KVM use the si_code: BUS_MCEERR_AO for 'action optional' early notifications, and BUS_MCEERR_AR for 'action required' synchronous/late notifications. To this end, separate synchronous and asynchronous error handling into different paths like X86 platform does: - task work for synchronous error. - and workqueue for asynchronous error. The task work function memory_failure_cb() includes three parts: - poison the page and unmap it, then send SIGBUS with appropriate si_code to the process which accessing the page in memory failure work. - free the task work struct. - send SIGBUS to current if memory failure fails. Signed-off-by: Shuai Xue Signed-off-by: Bixuan Cui Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/1124 [Ruidong: remove unnecessary else] Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- drivers/acpi/apei/ghes.c | 66 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 4f9ee26e01d9..64b6193e2475 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -483,9 +483,62 @@ static void ghes_kick_task_work(struct callback_head *head) gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, node_len); } +/* + * Tasks can handle task_work: + * + * - All user task: run task work before return to user. + */ +static bool should_add_task_work(struct task_struct *task) +{ + if (task->mm) + return true; + + return false; +} + +/** + * struct mce_task_work - for synchronous RAS event + * + * @twork: callback_head for task work + * @pfn: page frame number of corrupted page + * @flags: fine tune action taken + * + * Structure to pass task work to be handled before + * ret_to_user via task_work_add(). + */ +struct mce_task_work { + struct callback_head twork; + u64 pfn; + int flags; +}; + +static void memory_failure_cb(struct callback_head *twork) +{ + int rc; + struct mce_task_work *twcb = + container_of(twork, struct mce_task_work, twork); + + rc = memory_failure(twcb->pfn, twcb->flags); + kfree(twcb); + + if (!rc) + return; + /* + * -EHWPOISON from memory_failure() means that it already sent SIGBUS + * to the current process with the proper error info, so no need to + * send SIGBUS here again. + */ + if (rc == -EHWPOISON) + return; + + pr_err("Memory error not recovered"); + force_sig(SIGBUS); +} + static bool ghes_do_memory_failure(u64 physical_addr, int flags) { unsigned long pfn; + struct mce_task_work *twcb; if (!IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE)) return false; @@ -498,7 +551,20 @@ static bool ghes_do_memory_failure(u64 physical_addr, int flags) return false; } + if (flags == MF_ACTION_REQUIRED && should_add_task_work(current)) { + twcb = kmalloc(sizeof(*twcb), GFP_ATOMIC); + if (!twcb) + return false; + + twcb->pfn = pfn; + twcb->flags = flags; + init_task_work(&twcb->twork, memory_failure_cb); + task_work_add(current, &twcb->twork, TWA_RESUME); + return false; + } + memory_failure_queue(pfn, flags); + return true; } -- Gitee From 8499f5f55a88e5f807d50ac14a0f6303a483c00f Mon Sep 17 00:00:00 2001 From: Yao Hongbo Date: Mon, 7 Mar 2022 14:11:47 +0800 Subject: [PATCH 0825/2138] anolis: pci: Add a quirk for ALIBABA yitian710 to recover fatal aer ANBZ: #8642 On Alibaba yitian710 Soc, the Hardware does always clear pcie config space and some key registers between resetting the secondary bus. This results in the OS cannot recover the fatal pcie error, which causes unexpected system error finally. Luckily, it seems a simple save/restore of these regs during the bus reset can fix the issues. Signed-off-by: Yao Hongbo Reviewed-by: Baolin Wang Reviewed-by: luanshi Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- drivers/pci/pci.c | 70 +++++++++++++++++++++++++++++++++++++++++++- drivers/pci/quirks.c | 15 ++++++++++ include/linux/pci.h | 11 +++++++ 3 files changed, 95 insertions(+), 1 deletion(-) diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 3d57d1ec15b4..1dd6cda0d242 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -5245,6 +5245,65 @@ int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type) PCIE_RESET_READY_POLL_MS - delay); } +static void pci_save_yitian710_regs(struct pci_dev *dev, + struct pci_saved_regs *saved) +{ + int i; + u16 ctrl, ctrl2; + + /* if not yitian 710, should return here */ + if (!dev->broken_bus_reset) + return; + + /* save pcie type1 config space header*/ + for (i = 0; i < 16; i++) + pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]); + + pci_read_config_word(dev, PCI_EXP_DEVCTL, &ctrl); + pci_read_config_word(dev, PCI_EXP_DEVCTL2, &ctrl2); + + saved->mps = (ctrl & PCI_EXP_DEVCTL_PAYLOAD) >> 5; + saved->mrrs = (ctrl & PCI_EXP_DEVCTL_READRQ) >> 12; + saved->comp_timeout_val = ctrl2 & PCI_EXP_DEVCTL2_COMP_TIMEOUT; + saved->comp_timeout_dis = (ctrl2 & PCI_EXP_DEVCTL2_COMP_TMOUT_DIS) >> 4; + if (dev->acs_cap) + pci_read_config_dword(dev, dev->acs_cap + PCI_ACS_CAP, + &saved->acs_cap_ctrl); + + pci_read_config_dword(dev, PCI_EXP_SLTCTL, &saved->slot_ctrl_status); +} + +static void pci_restore_yitian710_regs(struct pci_dev *dev, + struct pci_saved_regs *saved) +{ + u16 regval; + + if (!dev->broken_bus_reset) + return; + + /* restore pcie type1 config space header */ + pci_restore_config_space_range(dev, 0, 15, 0, false); + + regval = (saved->mps) << 5; + pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_PAYLOAD, regval); + regval = (saved->mrrs) << 12; + pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_READRQ, regval); + regval = saved->comp_timeout_val; + pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_COMP_TIMEOUT, regval); + regval = (saved->comp_timeout_dis) << 4; + pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, regval); + + if (dev->acs_cap) + pci_write_config_dword(dev, dev->acs_cap + PCI_ACS_CAP, + saved->acs_cap_ctrl); + + pci_write_config_dword(dev, PCI_EXP_SLTCTL, saved->slot_ctrl_status); +} + void pci_reset_secondary_bus(struct pci_dev *dev) { u16 ctrl; @@ -5277,9 +5336,18 @@ void __weak pcibios_reset_secondary_bus(struct pci_dev *dev) */ int pci_bridge_secondary_bus_reset(struct pci_dev *dev) { + int rc; + struct pci_saved_regs saved = { }; + + /* save key regs for yitian710 during bus rest*/ + pci_save_yitian710_regs(dev, &saved); + pcibios_reset_secondary_bus(dev); + rc = pci_bridge_wait_for_secondary_bus(dev, "bus reset"); - return pci_bridge_wait_for_secondary_bus(dev, "bus reset"); + /* restore regs for yitian710*/ + pci_restore_yitian710_regs(dev, &saved); + return rc; } EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 00bdb5fd8218..5b1dbdca3253 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -6283,3 +6283,18 @@ static void pci_fixup_d3cold_delay_1sec(struct pci_dev *pdev) pdev->d3cold_delay = 1000; } DECLARE_PCI_FIXUP_FINAL(0x5555, 0x0004, pci_fixup_d3cold_delay_1sec); +/* + * On Alibaba yitian710 Soc, the Hardware does always clear pcie config space + * and some key registers between resetting the secondary bus. This results in + * the OS cannot recover the fatal pcie error, which causes unexpected system + * error finally. + * + * Luckily, it seems a simple save/restore of these regs during the bus reset + * can fix the issues. + */ +static void quirk_save_yitian710_regs(struct pci_dev *dev) +{ + dev->broken_bus_reset = 1; +} +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_ALIBABA, 0x8000, + PCI_CLASS_BRIDGE_PCI, 8, quirk_save_yitian710_regs); diff --git a/include/linux/pci.h b/include/linux/pci.h index f75eb4d3e30c..30f379ebe027 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -312,6 +312,16 @@ struct pci_vpd { u8 cap; }; +/* The structure describes the regs to be saved for yitian710 SoC. */ +struct pci_saved_regs { + u8 mps; + u8 mrrs; + u8 comp_timeout_val; + u8 comp_timeout_dis; + u32 acs_cap_ctrl; + u32 slot_ctrl_status; /* should be the last register to restore */ +}; + struct irq_affinity; struct pcie_link_state; struct pci_sriov; @@ -465,6 +475,7 @@ struct pci_dev { unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */ unsigned int rom_bar_overlap:1; /* ROM BAR disable broken */ unsigned int rom_attr_enabled:1; /* Display of ROM attribute enabled? */ + unsigned int broken_bus_reset:1; /* Abnormal bus reset */ pci_dev_flags_t dev_flags; atomic_t enable_cnt; /* pci_enable_device has been called */ -- Gitee From 5e41f975e3152e0f0a6f92bd8dfc25138a7658f3 Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Wed, 1 Mar 2023 13:27:13 +0800 Subject: [PATCH 0826/2138] anolis: pci: fix quirk for Yitian 710 to support AER fatal error recovery ANBZ: #8642 When a fatal error has been detected, the AER driver will reset the PCIe physical link to recover the unreliable link. It is a hot reset triggered by setting secondary bus reset (Bit 6) in bridge control register and resulting in a link down. (NOTE: this type of link down will not be reported by a suprise link down error) However, on the Alibaba Yitian 710 SoC, the hardware clears the PCIe config space and some key registers after a link down, preventing the OS from recovering the error and resulting in an unexpected SError. Commit ae314b6cdbc3 ("anolis: pci: Add a quirk for ALIBABA yitian710 to recover fatal aer") adds a quirk to address the SError problem but fatal errors are still not recovered. The root cause is that the cleared registers are not saved/restored correctly. To fix this issue, pcie_capability_read_word() and similar interfaces are used to proform the save/restore procedures. In addition, Root Error Command Register (Offset 2Ch in Advanced Error Reporting Extended Capability) is also cleared so that save/restore procedures are applied to it. Fixes: f89afed29adb ("anolis: pci: Add a quirk for ALIBABA yitian710 to recover fatal aer") Signed-off-by: Shuai Xue Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/1508Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/1384 Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- drivers/pci/pci.c | 44 +++++++++++++++++++++----------------------- include/linux/pci.h | 10 +++++----- 2 files changed, 26 insertions(+), 28 deletions(-) diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 1dd6cda0d242..29e1d3ba869a 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -5249,7 +5249,6 @@ static void pci_save_yitian710_regs(struct pci_dev *dev, struct pci_saved_regs *saved) { int i; - u16 ctrl, ctrl2; /* if not yitian 710, should return here */ if (!dev->broken_bus_reset) @@ -5259,49 +5258,48 @@ static void pci_save_yitian710_regs(struct pci_dev *dev, for (i = 0; i < 16; i++) pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]); - pci_read_config_word(dev, PCI_EXP_DEVCTL, &ctrl); - pci_read_config_word(dev, PCI_EXP_DEVCTL2, &ctrl2); + pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &saved->dev_ctrl); + pcie_capability_read_word(dev, PCI_EXP_RTCTL, &saved->root_ctrl); + pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &saved->dev_ctrl2); - saved->mps = (ctrl & PCI_EXP_DEVCTL_PAYLOAD) >> 5; - saved->mrrs = (ctrl & PCI_EXP_DEVCTL_READRQ) >> 12; - saved->comp_timeout_val = ctrl2 & PCI_EXP_DEVCTL2_COMP_TIMEOUT; - saved->comp_timeout_dis = (ctrl2 & PCI_EXP_DEVCTL2_COMP_TMOUT_DIS) >> 4; if (dev->acs_cap) pci_read_config_dword(dev, dev->acs_cap + PCI_ACS_CAP, &saved->acs_cap_ctrl); + if (dev->aer_cap) + pci_read_config_dword(dev, dev->aer_cap + PCI_ERR_ROOT_COMMAND, + &saved->root_err_cmd); - pci_read_config_dword(dev, PCI_EXP_SLTCTL, &saved->slot_ctrl_status); + pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &saved->slot_ctrl); } static void pci_restore_yitian710_regs(struct pci_dev *dev, struct pci_saved_regs *saved) { - u16 regval; - if (!dev->broken_bus_reset) return; /* restore pcie type1 config space header */ pci_restore_config_space_range(dev, 0, 15, 0, false); - regval = (saved->mps) << 5; - pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, - PCI_EXP_DEVCTL_PAYLOAD, regval); - regval = (saved->mrrs) << 12; - pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, - PCI_EXP_DEVCTL_READRQ, regval); - regval = saved->comp_timeout_val; - pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL2, - PCI_EXP_DEVCTL2_COMP_TIMEOUT, regval); - regval = (saved->comp_timeout_dis) << 4; - pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL2, - PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, regval); + /* + * restore Device Control, Root Control Register and Device Control 2 + * in PCI Express Capability + */ + pcie_capability_write_word(dev, PCI_EXP_DEVCTL, saved->dev_ctrl); + pcie_capability_write_word(dev, PCI_EXP_RTCTL, saved->root_ctrl); + pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, saved->dev_ctrl2); + /* restore ACS Capability Register */ if (dev->acs_cap) pci_write_config_dword(dev, dev->acs_cap + PCI_ACS_CAP, saved->acs_cap_ctrl); + /* restore AER Root Error Command Register */ + if (dev->aer_cap) + pci_write_config_dword(dev, dev->aer_cap + PCI_ERR_ROOT_COMMAND, + saved->root_err_cmd); - pci_write_config_dword(dev, PCI_EXP_SLTCTL, saved->slot_ctrl_status); + /* restore Slot Control Register */ + pcie_capability_write_word(dev, PCI_EXP_SLTCTL, saved->slot_ctrl); } void pci_reset_secondary_bus(struct pci_dev *dev) diff --git a/include/linux/pci.h b/include/linux/pci.h index 30f379ebe027..7b2404e7f540 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -314,12 +314,12 @@ struct pci_vpd { /* The structure describes the regs to be saved for yitian710 SoC. */ struct pci_saved_regs { - u8 mps; - u8 mrrs; - u8 comp_timeout_val; - u8 comp_timeout_dis; + u16 dev_ctrl; + u16 dev_ctrl2; u32 acs_cap_ctrl; - u32 slot_ctrl_status; /* should be the last register to restore */ + u32 root_err_cmd; + u16 root_ctrl; + u16 slot_ctrl; /* should be the last register to restore */ }; struct irq_affinity; -- Gitee From 263323544c7c778342429efdc0da7f3fb2837eab Mon Sep 17 00:00:00 2001 From: Tong Tiangen Date: Mon, 8 May 2023 09:44:32 +0800 Subject: [PATCH 0827/2138] uaccess: add generic fallback version of copy_mc_to_user() ANBZ: #8642 cherry-picked from https://lore.kernel.org/lkml/20230508014436.198717-1-tongtiangen@huawei.com/. x86/powerpc has it's implementation of copy_mc_to_user(), we add generic fallback in include/linux/uaccess.h prepare for other architechures to enable CONFIG_ARCH_HAS_COPY_MC. Signed-off-by: Tong Tiangen Acked-by: Michael Ellerman Signed-off-by: Ruidong Tian Reviewed-by: Shuai Xue Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2131 Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- arch/x86/include/asm/uaccess.h | 1 + include/linux/uaccess.h | 9 +++++++++ 2 files changed, 10 insertions(+) diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 3a7755c1a441..3db67f44063b 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -497,6 +497,7 @@ copy_mc_to_kernel(void *to, const void *from, unsigned len); unsigned long __must_check copy_mc_to_user(void __user *to, const void *from, unsigned len); +#define copy_mc_to_user copy_mc_to_user #endif /* diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 3064314f4832..550287c92990 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -205,6 +205,15 @@ copy_mc_to_kernel(void *dst, const void *src, size_t cnt) } #endif +#ifndef copy_mc_to_user +static inline unsigned long __must_check +copy_mc_to_user(void *dst, const void *src, size_t cnt) +{ + check_object_size(src, cnt, true); + return raw_copy_to_user(dst, src, cnt); +} +#endif + static __always_inline void pagefault_disabled_inc(void) { current->pagefault_disabled++; -- Gitee From 478e51895783c0ce4adb0b32b53586a3c97ebb73 Mon Sep 17 00:00:00 2001 From: Tong Tiangen Date: Mon, 8 May 2023 09:44:33 +0800 Subject: [PATCH 0828/2138] arm64: add support for machine check error safe ANBZ: #8642 cherry-picked from https://lore.kernel.org/lkml/20230508014436.198717-1-tongtiangen@huawei.com/. For the arm64 kernel, when it processes hardware memory errors for synchronize notifications(do_sea()), if the errors is consumed within the kernel, the current processing is panic. However, it is not optimal. Take uaccess for example, if the uaccess operation fails due to memory error, only the user process will be affected. Killing the user process and isolating the corrupt page is a better choice. This patch only enable machine error check framework and adds an exception fixup before the kernel panic in do_sea(). Signed-off-by: Tong Tiangen Signed-off-by: Ruidong Tian Reviewed-by: Shuai Xue Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2131 Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- arch/arm64/Kconfig | 1 + arch/arm64/include/asm/extable.h | 1 + arch/arm64/mm/extable.c | 16 ++++++++++++++++ arch/arm64/mm/fault.c | 29 ++++++++++++++++++++++++++++- 4 files changed, 46 insertions(+), 1 deletion(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 7ff5e6becc9b..df023d477f4b 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -21,6 +21,7 @@ config ARM64 select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE select ARCH_HAS_CACHE_LINE_SIZE select ARCH_HAS_CURRENT_STACK_POINTER + select ARCH_HAS_COPY_MC if ACPI_APEI_GHES select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DMA_PREP_COHERENT diff --git a/arch/arm64/include/asm/extable.h b/arch/arm64/include/asm/extable.h index 72b0e71cc3de..f80ebd0addfd 100644 --- a/arch/arm64/include/asm/extable.h +++ b/arch/arm64/include/asm/extable.h @@ -46,4 +46,5 @@ bool ex_handler_bpf(const struct exception_table_entry *ex, #endif /* !CONFIG_BPF_JIT */ bool fixup_exception(struct pt_regs *regs); +bool fixup_exception_mc(struct pt_regs *regs); #endif diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c index 228d681a8715..478e639f8680 100644 --- a/arch/arm64/mm/extable.c +++ b/arch/arm64/mm/extable.c @@ -76,3 +76,19 @@ bool fixup_exception(struct pt_regs *regs) BUG(); } + +bool fixup_exception_mc(struct pt_regs *regs) +{ + const struct exception_table_entry *ex; + + ex = search_exception_tables(instruction_pointer(regs)); + if (!ex) + return false; + + /* + * This is not complete, More Machine check safe extable type can + * be processed here. + */ + + return false; +} diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 2e5d1e238af9..a6d1c333719f 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -728,6 +728,31 @@ static int do_bad(unsigned long far, unsigned long esr, struct pt_regs *regs) return 1; /* "fault" */ } +static bool arm64_do_kernel_sea(unsigned long addr, unsigned int esr, + struct pt_regs *regs, int sig, int code) +{ + if (!IS_ENABLED(CONFIG_ARCH_HAS_COPY_MC)) + return false; + + if (user_mode(regs)) + return false; + + if (apei_claim_sea(regs) < 0) + return false; + + if (!fixup_exception_mc(regs)) + return false; + + if (current->flags & PF_KTHREAD) + return true; + + set_thread_esr(0, esr); + arm64_force_sig_fault(sig, code, addr, + "Uncorrected memory error on access to user memory\n"); + + return true; +} + static int do_sea(unsigned long far, unsigned long esr, struct pt_regs *regs) { const struct fault_info *inf; @@ -753,7 +778,9 @@ static int do_sea(unsigned long far, unsigned long esr, struct pt_regs *regs) */ siaddr = untagged_addr(far); } - arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr); + + if (!arm64_do_kernel_sea(siaddr, esr, regs, inf->sig, inf->code)) + arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr); return 0; } -- Gitee From 4bcbda9f1bffc96df3f436283f46f337b3af158f Mon Sep 17 00:00:00 2001 From: Tong Tiangen Date: Mon, 8 May 2023 09:44:34 +0800 Subject: [PATCH 0829/2138] arm64: add uaccess to machine check safe ANBZ: #8642 cherry-picked from https://lore.kernel.org/lkml/20230508014436.198717-1-tongtiangen@huawei.com/. If user process access memory fails due to hardware memory error, only the relevant processes are affected, so it is more reasonable to kill the user process and isolate the corrupt page than to panic the kernel. Signed-off-by: Tong Tiangen Signed-off-by: Ruidong Tian Reviewed-by: Shuai Xue Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2131 Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- arch/arm64/mm/extable.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c index 478e639f8680..28ec35e3d210 100644 --- a/arch/arm64/mm/extable.c +++ b/arch/arm64/mm/extable.c @@ -85,10 +85,10 @@ bool fixup_exception_mc(struct pt_regs *regs) if (!ex) return false; - /* - * This is not complete, More Machine check safe extable type can - * be processed here. - */ + switch (ex->type) { + case EX_TYPE_UACCESS_ERR_ZERO: + return ex_handler_uaccess_err_zero(ex, regs); + } return false; } -- Gitee From e7dd10cbf510fb57d5de1404482d1ff5f2e3fe39 Mon Sep 17 00:00:00 2001 From: Tong Tiangen Date: Mon, 8 May 2023 09:44:36 +0800 Subject: [PATCH 0830/2138] arm64: support copy_mc_[user]_highpage() ANBZ: #8642 cherry-picked from https://lore.kernel.org/lkml/20230508014436.198717-1-tongtiangen@huawei.com/. Currently, many scenarios that can tolerate memory errors when copying page have been supported in the kernel[1][2][3], all of which are implemented by copy_mc_[user]_highpage(). arm64 should also support this mechanism. Due to mte, arm64 needs to have its own copy_mc_[user]_highpage() architecture implementation, macros __HAVE_ARCH_COPY_MC_HIGHPAGE and __HAVE_ARCH_COPY_MC_USER_HIGHPAGE have been added to control it. Add new helper copy_mc_page() which provide a page copy implementation with machine check safe. The copy_mc_page() in copy_mc_page.S is largely borrows from copy_page() in copy_page.S and the main difference is copy_mc_page() add extable entry to every load/store insn to support machine check safe. Add new extable type EX_TYPE_COPY_MC_PAGE_ERR_ZERO which used in copy_mc_page(). [1]a873dfe1032a ("mm, hwpoison: try to recover from copy-on write faults") [2]5f2500b93cc9 ("mm/khugepaged: recover from poisoned anonymous memory") [3]6b970599e807 ("mm: hwpoison: support recovery from ksm_might_need_to_copy()") Signed-off-by: Tong Tiangen Signed-off-by: Ruidong Tian Reviewed-by: Shuai Xue Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2131 Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/2949 --- arch/arm64/include/asm/asm-extable.h | 15 +++++ arch/arm64/include/asm/assembler.h | 4 ++ arch/arm64/include/asm/mte.h | 5 ++ arch/arm64/include/asm/page.h | 10 ++++ arch/arm64/lib/Makefile | 2 + arch/arm64/lib/copy_mc_page.S | 89 ++++++++++++++++++++++++++++ arch/arm64/lib/mte.S | 27 +++++++++ arch/arm64/mm/copypage.c | 59 ++++++++++++++++-- arch/arm64/mm/extable.c | 7 ++- include/linux/highmem.h | 4 ++ 10 files changed, 213 insertions(+), 9 deletions(-) create mode 100644 arch/arm64/lib/copy_mc_page.S diff --git a/arch/arm64/include/asm/asm-extable.h b/arch/arm64/include/asm/asm-extable.h index 980d1dd8e1a3..819044fefbe7 100644 --- a/arch/arm64/include/asm/asm-extable.h +++ b/arch/arm64/include/asm/asm-extable.h @@ -10,6 +10,7 @@ #define EX_TYPE_UACCESS_ERR_ZERO 2 #define EX_TYPE_KACCESS_ERR_ZERO 3 #define EX_TYPE_LOAD_UNALIGNED_ZEROPAD 4 +#define EX_TYPE_COPY_MC_PAGE_ERR_ZERO 5 /* Data fields for EX_TYPE_UACCESS_ERR_ZERO */ #define EX_DATA_REG_ERR_SHIFT 0 @@ -51,6 +52,16 @@ #define _ASM_EXTABLE_UACCESS(insn, fixup) \ _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, wzr, wzr) +#define _ASM_EXTABLE_COPY_MC_PAGE_ERR_ZERO(insn, fixup, err, zero) \ + __ASM_EXTABLE_RAW(insn, fixup, \ + EX_TYPE_COPY_MC_PAGE_ERR_ZERO, \ + ( \ + EX_DATA_REG(ERR, err) | \ + EX_DATA_REG(ZERO, zero) \ + )) + +#define _ASM_EXTABLE_COPY_MC_PAGE(insn, fixup) \ + _ASM_EXTABLE_COPY_MC_PAGE_ERR_ZERO(insn, fixup, wzr, wzr) /* * Create an exception table entry for uaccess `insn`, which will branch to `fixup` * when an unhandled fault is taken. @@ -59,6 +70,10 @@ _ASM_EXTABLE_UACCESS(\insn, \fixup) .endm + .macro _asm_extable_copy_mc_page, insn, fixup + _ASM_EXTABLE_COPY_MC_PAGE(\insn, \fixup) + .endm + /* * Create an exception table entry for `insn` if `fixup` is provided. Otherwise * do nothing. diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 376a980f2bad..547ab2f85888 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -154,6 +154,10 @@ lr .req x30 // link register #define CPU_LE(code...) code #endif +#define CPY_MC(l, x...) \ +9999: x; \ + _asm_extable_copy_mc_page 9999b, l + /* * Define a macro that constructs a 64-bit value by concatenating two * 32-bit registers. Note that on big endian systems the order of the diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h index 4cedbaa16f41..79474232d413 100644 --- a/arch/arm64/include/asm/mte.h +++ b/arch/arm64/include/asm/mte.h @@ -93,6 +93,7 @@ void mte_zero_clear_page_tags(void *addr); void mte_sync_tags(pte_t pte); void mte_copy_page_tags(void *kto, const void *kfrom); void mte_thread_init_user(void); +int mte_copy_mc_page_tags(void *kto, const void *kfrom); void mte_thread_switch(struct task_struct *next); void mte_cpu_setup(void); void mte_suspend_enter(void); @@ -131,6 +132,10 @@ static inline void mte_copy_page_tags(void *kto, const void *kfrom) static inline void mte_thread_init_user(void) { } +static inline int mte_copy_mc_page_tags(void *kto, const void *kfrom) +{ + return 0; +} static inline void mte_thread_switch(struct task_struct *next) { } diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 2312e6ee595f..62bdc843e3e7 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -36,6 +36,16 @@ struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma, void tag_clear_highpage(struct page *to); #define __HAVE_ARCH_TAG_CLEAR_HIGHPAGE +#ifdef CONFIG_ARCH_HAS_COPY_MC +int copy_mc_page(void *to, const void *from); +int copy_mc_highpage(struct page *to, struct page *from); +#define __HAVE_ARCH_COPY_MC_HIGHPAGE + +int copy_mc_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma); +#define __HAVE_ARCH_COPY_MC_USER_HIGHPAGE +#endif + #define clear_user_page(page, vaddr, pg) clear_page(page) #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index 29490be2546b..a2fd865b816d 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -15,6 +15,8 @@ endif lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o +lib-$(CONFIG_ARCH_HAS_COPY_MC) += copy_mc_page.o + obj-$(CONFIG_CRC32) += crc32.o obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o diff --git a/arch/arm64/lib/copy_mc_page.S b/arch/arm64/lib/copy_mc_page.S new file mode 100644 index 000000000000..656d831ef4b8 --- /dev/null +++ b/arch/arm64/lib/copy_mc_page.S @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 ARM Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include + +/* + * Copy a page from src to dest (both are page aligned) with machine check + * + * Parameters: + * x0 - dest + * x1 - src + * Returns: + * x0 - Return 0 if copy success, or -EFAULT if anything goes wrong + * while copying. + */ +SYM_FUNC_START(__pi_copy_mc_page) +alternative_if ARM64_HAS_NO_HW_PREFETCH + // Prefetch three cache lines ahead. + prfm pldl1strm, [x1, #128] + prfm pldl1strm, [x1, #256] + prfm pldl1strm, [x1, #384] +alternative_else_nop_endif + +CPY_MC(9998f, ldp x2, x3, [x1]) +CPY_MC(9998f, ldp x4, x5, [x1, #16]) +CPY_MC(9998f, ldp x6, x7, [x1, #32]) +CPY_MC(9998f, ldp x8, x9, [x1, #48]) +CPY_MC(9998f, ldp x10, x11, [x1, #64]) +CPY_MC(9998f, ldp x12, x13, [x1, #80]) +CPY_MC(9998f, ldp x14, x15, [x1, #96]) +CPY_MC(9998f, ldp x16, x17, [x1, #112]) + + add x0, x0, #256 + add x1, x1, #128 +1: + tst x0, #(PAGE_SIZE - 1) + +alternative_if ARM64_HAS_NO_HW_PREFETCH + prfm pldl1strm, [x1, #384] +alternative_else_nop_endif + +CPY_MC(9998f, stnp x2, x3, [x0, #-256]) +CPY_MC(9998f, ldp x2, x3, [x1]) +CPY_MC(9998f, stnp x4, x5, [x0, #16 - 256]) +CPY_MC(9998f, ldp x4, x5, [x1, #16]) +CPY_MC(9998f, stnp x6, x7, [x0, #32 - 256]) +CPY_MC(9998f, ldp x6, x7, [x1, #32]) +CPY_MC(9998f, stnp x8, x9, [x0, #48 - 256]) +CPY_MC(9998f, ldp x8, x9, [x1, #48]) +CPY_MC(9998f, stnp x10, x11, [x0, #64 - 256]) +CPY_MC(9998f, ldp x10, x11, [x1, #64]) +CPY_MC(9998f, stnp x12, x13, [x0, #80 - 256]) +CPY_MC(9998f, ldp x12, x13, [x1, #80]) +CPY_MC(9998f, stnp x14, x15, [x0, #96 - 256]) +CPY_MC(9998f, ldp x14, x15, [x1, #96]) +CPY_MC(9998f, stnp x16, x17, [x0, #112 - 256]) +CPY_MC(9998f, ldp x16, x17, [x1, #112]) + + add x0, x0, #128 + add x1, x1, #128 + + b.ne 1b + +CPY_MC(9998f, stnp x2, x3, [x0, #-256]) +CPY_MC(9998f, stnp x4, x5, [x0, #16 - 256]) +CPY_MC(9998f, stnp x6, x7, [x0, #32 - 256]) +CPY_MC(9998f, stnp x8, x9, [x0, #48 - 256]) +CPY_MC(9998f, stnp x10, x11, [x0, #64 - 256]) +CPY_MC(9998f, stnp x12, x13, [x0, #80 - 256]) +CPY_MC(9998f, stnp x14, x15, [x0, #96 - 256]) +CPY_MC(9998f, stnp x16, x17, [x0, #112 - 256]) + + mov x0, #0 + ret + +9998: mov x0, #-EFAULT + ret + +SYM_FUNC_END(__pi_copy_mc_page) +SYM_FUNC_ALIAS(copy_mc_page, __pi_copy_mc_page) +EXPORT_SYMBOL(copy_mc_page) diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S index 5018ac03b6bf..2b748e83f6cf 100644 --- a/arch/arm64/lib/mte.S +++ b/arch/arm64/lib/mte.S @@ -80,6 +80,33 @@ SYM_FUNC_START(mte_copy_page_tags) ret SYM_FUNC_END(mte_copy_page_tags) +/* + * Copy the tags from the source page to the destination one wiht machine check safe + * x0 - address of the destination page + * x1 - address of the source page + * Returns: + * x0 - Return 0 if copy success, or + * -EFAULT if anything goes wrong while copying. + */ +SYM_FUNC_START(mte_copy_mc_page_tags) + mov x2, x0 + mov x3, x1 + multitag_transfer_size x5, x6 +1: +CPY_MC(2f, ldgm x4, [x3]) +CPY_MC(2f, stgm x4, [x2]) + add x2, x2, x5 + add x3, x3, x5 + tst x2, #(PAGE_SIZE - 1) + b.ne 1b + + mov x0, #0 + ret + +2: mov x0, #-EFAULT + ret +SYM_FUNC_END(mte_copy_mc_page_tags) + /* * Read tags from a user buffer (one tag per byte) and set the corresponding * tags at the given kernel address. Used by PTRACE_POKEMTETAGS. diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c index a7bb20055ce0..b062c925daa4 100644 --- a/arch/arm64/mm/copypage.c +++ b/arch/arm64/mm/copypage.c @@ -14,6 +14,21 @@ #include #include +static int do_mte(struct page *to, struct page *from, void *kto, void *kfrom, bool mc) +{ + int ret = 0; + + if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) { + set_bit(PG_mte_tagged, &to->flags); + if (mc) + ret = mte_copy_mc_page_tags(kto, kfrom); + else + mte_copy_page_tags(kto, kfrom); + } + + return ret; +} + void copy_highpage(struct page *to, struct page *from) { void *kto = page_address(to); @@ -24,12 +39,7 @@ void copy_highpage(struct page *to, struct page *from) if (kasan_hw_tags_enabled()) page_kasan_tag_reset(to); - if (system_supports_mte() && page_mte_tagged(from)) { - /* It's a new page, shouldn't have been tagged yet */ - WARN_ON_ONCE(!try_page_mte_tagging(to)); - mte_copy_page_tags(kto, kfrom); - set_page_mte_tagged(to); - } + do_mte(to, from, kto, kfrom, false); } EXPORT_SYMBOL(copy_highpage); @@ -40,3 +50,40 @@ void copy_user_highpage(struct page *to, struct page *from, flush_dcache_page(to); } EXPORT_SYMBOL_GPL(copy_user_highpage); + +#ifdef CONFIG_ARCH_HAS_COPY_MC +/* + * Return -EFAULT if anything goes wrong while copying page or mte. + */ +int copy_mc_highpage(struct page *to, struct page *from) +{ + void *kto = page_address(to); + void *kfrom = page_address(from); + int ret; + + ret = copy_mc_page(kto, kfrom); + if (ret) + return -EFAULT; + + ret = do_mte(to, from, kto, kfrom, true); + if (ret) + return -EFAULT; + + return 0; +} +EXPORT_SYMBOL(copy_mc_highpage); + +int copy_mc_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma) +{ + int ret; + + ret = copy_mc_highpage(to, from); + + if (!ret) + flush_dcache_page(to); + + return ret; +} +EXPORT_SYMBOL_GPL(copy_mc_user_highpage); +#endif diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c index 28ec35e3d210..bdc81518d207 100644 --- a/arch/arm64/mm/extable.c +++ b/arch/arm64/mm/extable.c @@ -16,7 +16,7 @@ get_ex_fixup(const struct exception_table_entry *ex) return ((unsigned long)&ex->fixup + ex->fixup); } -static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex, +static bool ex_handler_fixup_err_zero(const struct exception_table_entry *ex, struct pt_regs *regs) { int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data); @@ -69,7 +69,7 @@ bool fixup_exception(struct pt_regs *regs) return ex_handler_bpf(ex, regs); case EX_TYPE_UACCESS_ERR_ZERO: case EX_TYPE_KACCESS_ERR_ZERO: - return ex_handler_uaccess_err_zero(ex, regs); + return ex_handler_fixup_err_zero(ex, regs); case EX_TYPE_LOAD_UNALIGNED_ZEROPAD: return ex_handler_load_unaligned_zeropad(ex, regs); } @@ -87,7 +87,8 @@ bool fixup_exception_mc(struct pt_regs *regs) switch (ex->type) { case EX_TYPE_UACCESS_ERR_ZERO: - return ex_handler_uaccess_err_zero(ex, regs); + case EX_TYPE_COPY_MC_PAGE_ERR_ZERO: + return ex_handler_fixup_err_zero(ex, regs); } return false; diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 75607d4ba26c..bf4ea9f2f457 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -371,19 +371,23 @@ static inline int copy_mc_highpage(struct page *to, struct page *from) return ret; } #else +#ifndef __HAVE_ARCH_COPY_MC_USER_HIGHPAGE static inline int copy_mc_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { copy_user_highpage(to, from, vaddr, vma); return 0; } +#endif +#ifndef __HAVE_ARCH_COPY_MC_HIGHPAGE static inline int copy_mc_highpage(struct page *to, struct page *from) { copy_highpage(to, from); return 0; } #endif +#endif static inline void memcpy_page(struct page *dst_page, size_t dst_off, struct page *src_page, size_t src_off, -- Gitee From f2259a55bb2fa75e947e685632dee4b9c5743adc Mon Sep 17 00:00:00 2001 From: chench00 Date: Tue, 2 Apr 2024 11:01:42 +0800 Subject: [PATCH 0831/2138] anolis: crypto: command co-processor: Add another mailbox interrupt support for PSP sending command to X86 ANBZ: #8670 The existing kernel supports only interrupt for the mailbox interface for X86 sending commands to PSP and PSP to ack, e.g. the SEV commands. However, some PSP-based security modules in Hygon CPU, such as TPCM and TDM(Trusted Dynamic Measuring), needs sending commands/notifications proactively to X86 core via interrupt and a 2nd mailbox interface. Similar to the existing one, the 2nd mailbox consists of a 32-bits command register and two 32-bits data registers. The PSP interrupt handling needs to add this interrupt support; besides, in order to support user defined command handler, a callback registration function is also provided. Up to 16 command callbacks is supported, which are indexed by command IDs. Currently, command ID 0 is assigned to TPCM and 1 to TDM, while others are reserved. Signed-off-by: chench00 Reviewed-by: Tianjia Zhang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2986 --- drivers/crypto/ccp/Kconfig | 7 +++ drivers/crypto/ccp/psp-dev.c | 106 ++++++++++++++++++++++++++++++++++- drivers/crypto/ccp/psp-dev.h | 5 ++ drivers/crypto/ccp/sp-dev.h | 5 ++ drivers/crypto/ccp/sp-pci.c | 16 +++++- include/linux/psp-sev.h | 10 ++++ 6 files changed, 147 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 9d5d3312f8e3..d62d628fef20 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -53,6 +53,13 @@ config HYGON_GM help Hygon GM ccp driver +config HYGON_PSP2CPU_CMD + bool "Hygon PSP2CPU Command Interface" + default y + depends on CRYPTO_DEV_SP_PSP + help + Hygon PSP2CPU Command Support + config CRYPTO_DEV_CCP_DEBUGFS bool "Enable CCP Internals in DebugFS" default n diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index c110ae79d93f..47de733084b1 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -137,6 +137,102 @@ static irqreturn_t psp_irq_handler(int irq, void *data) return IRQ_HANDLED; } +#ifdef CONFIG_HYGON_PSP2CPU_CMD +static DEFINE_SPINLOCK(p2c_notifier_lock); +static p2c_notifier_t p2c_notifiers[P2C_NOTIFIERS_MAX] = {NULL}; +int psp_register_cmd_notifier(uint32_t cmd_id, int (*notifier)(uint32_t id, uint64_t data)) +{ + int ret = -ENODEV; + unsigned long flags; + + spin_lock_irqsave(&p2c_notifier_lock, flags); + if (cmd_id < P2C_NOTIFIERS_MAX && !p2c_notifiers[cmd_id]) { + p2c_notifiers[cmd_id] = notifier; + ret = 0; + } + spin_unlock_irqrestore(&p2c_notifier_lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(psp_register_cmd_notifier); + +int psp_unregister_cmd_notifier(uint32_t cmd_id, int (*notifier)(uint32_t id, uint64_t data)) +{ + int ret = -ENODEV; + unsigned long flags; + + spin_lock_irqsave(&p2c_notifier_lock, flags); + if (cmd_id < P2C_NOTIFIERS_MAX && p2c_notifiers[cmd_id] == notifier) { + p2c_notifiers[cmd_id] = NULL; + ret = 0; + } + spin_unlock_irqrestore(&p2c_notifier_lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(psp_unregister_cmd_notifier); + +#define PSP2CPU_MAX_LOOP 100 +static irqreturn_t psp_irq_handler_hygon(int irq, void *data) +{ + struct psp_device *psp = data; + struct sev_device *sev = psp->sev_irq_data; + unsigned int status; + int reg; + unsigned long flags; + int count = 0; + uint32_t p2c_cmd; + uint32_t p2c_lo_data; + uint32_t p2c_hi_data; + uint64_t p2c_data; + + /* Read the interrupt status: */ + status = ioread32(psp->io_regs + psp->vdata->intsts_reg); + + while (status && (count++ < PSP2CPU_MAX_LOOP)) { + /* Clear the interrupt status by writing the same value we read. */ + iowrite32(status, psp->io_regs + psp->vdata->intsts_reg); + + /* Check if it is command completion: */ + if (status & SEV_CMD_COMPLETE) { + /* Check if it is SEV command completion: */ + reg = ioread32(psp->io_regs + psp->vdata->sev->cmdresp_reg); + if (reg & PSP_CMDRESP_RESP) { + sev->int_rcvd = 1; + wake_up(&sev->int_queue); + } + } + + if (status & PSP_X86_CMD) { + /* Check if it is P2C command completion: */ + reg = ioread32(psp->io_regs + psp->vdata->p2c_cmdresp_reg); + if (!(reg & PSP_CMDRESP_RESP)) { + p2c_lo_data = ioread32(psp->io_regs + + psp->vdata->p2c_cmdbuff_addr_lo_reg); + p2c_hi_data = ioread32(psp->io_regs + + psp->vdata->p2c_cmdbuff_addr_hi_reg); + p2c_data = (((uint64_t)(p2c_hi_data) << 32) + + ((uint64_t)(p2c_lo_data))); + p2c_cmd = (uint32_t)(reg & SEV_CMDRESP_IOC); + if (p2c_cmd < P2C_NOTIFIERS_MAX) { + spin_lock_irqsave(&p2c_notifier_lock, flags); + if (p2c_notifiers[p2c_cmd]) + p2c_notifiers[p2c_cmd](p2c_cmd, p2c_data); + + spin_unlock_irqrestore(&p2c_notifier_lock, flags); + } + + reg |= PSP_CMDRESP_RESP; + iowrite32(reg, psp->io_regs + psp->vdata->p2c_cmdresp_reg); + } + } + status = ioread32(psp->io_regs + psp->vdata->intsts_reg); + } + + return IRQ_HANDLED; +} +#endif + static void hygon_fixup_psp_caps(struct psp_device *psp) { if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) @@ -599,7 +695,15 @@ int psp_dev_init(struct sp_device *sp) } /* Request an irq */ - ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp); + if (pdev->vendor == PCI_VENDOR_ID_HYGON) { +#ifdef CONFIG_HYGON_PSP2CPU_CMD + ret = sp_request_psp_irq(psp->sp, psp_irq_handler_hygon, psp->name, psp); +#else + ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp); +#endif + } else { + ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp); + } if (ret) { dev_err(dev, "psp: unable to allocate an IRQ\n"); goto e_err; diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h index b0a7bf42e552..694bb3faf8be 100644 --- a/drivers/crypto/ccp/psp-dev.h +++ b/drivers/crypto/ccp/psp-dev.h @@ -33,6 +33,11 @@ #define MAX_PSP_NAME_LEN 16 +#ifdef CONFIG_HYGON_PSP2CPU_CMD +#define PSP_X86_CMD BIT(2) +#define P2C_NOTIFIERS_MAX 16 +#endif + extern struct psp_device *psp_master; typedef void (*psp_irq_handler_t)(int, void *, unsigned int); diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h index 2329ad524b49..d04d9743b680 100644 --- a/drivers/crypto/ccp/sp-dev.h +++ b/drivers/crypto/ccp/sp-dev.h @@ -76,6 +76,11 @@ struct psp_vdata { const unsigned int intsts_reg; const unsigned int bootloader_info_reg; const unsigned int platform_features; +#ifdef CONFIG_HYGON_PSP2CPU_CMD + const unsigned int p2c_cmdresp_reg; + const unsigned int p2c_cmdbuff_addr_lo_reg; + const unsigned int p2c_cmdbuff_addr_hi_reg; +#endif }; /* Structure to hold SP device data */ diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index 5185555a74a7..4f6a0507f7cd 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -129,9 +129,13 @@ static umode_t psp_firmware_is_visible(struct kobject *kobj, struct attribute *a if (!psp) return 0; - +#ifdef CONFIG_X86 + if (attr == &dev_attr_bootloader_version.attr && + psp->vdata->bootloader_info_reg && boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) +#else if (attr == &dev_attr_bootloader_version.attr && psp->vdata->bootloader_info_reg) +#endif val = ioread32(psp->io_regs + psp->vdata->bootloader_info_reg); if (attr == &dev_attr_tee_version.attr && @@ -459,6 +463,11 @@ static const struct psp_vdata pspv1 = { .feature_reg = 0x105fc, /* C2PMSG_63 */ .inten_reg = 0x10610, /* P2CMSG_INTEN */ .intsts_reg = 0x10614, /* P2CMSG_INTSTS */ +#ifdef CONFIG_HYGON_PSP2CPU_CMD + .p2c_cmdresp_reg = 0x105e8, + .p2c_cmdbuff_addr_lo_reg = 0x105ec, + .p2c_cmdbuff_addr_hi_reg = 0x105f0, +#endif }; static const struct psp_vdata pspv2 = { @@ -509,6 +518,11 @@ static const struct psp_vdata psp_csvv1 = { .feature_reg = 0x105fc, .inten_reg = 0x10670, .intsts_reg = 0x10674, +#ifdef CONFIG_HYGON_PSP2CPU_CMD + .p2c_cmdresp_reg = 0x105e8, + .p2c_cmdbuff_addr_lo_reg = 0x105ec, + .p2c_cmdbuff_addr_hi_reg = 0x105f0, +#endif }; #endif diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 1536d0057738..74086c114184 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -677,6 +677,16 @@ struct vpsp_ret { #define GET_PSP_VID(hpa) ((__u16)((__u64)(hpa) >> PSP_VID_SHIFT) & PSP_VID_MASK) #define CLEAR_PSP_VID(hpa) ((__u64)(hpa) & ~((__u64)PSP_VID_MASK << PSP_VID_SHIFT)) +#ifdef CONFIG_HYGON_PSP2CPU_CMD + +typedef int (*p2c_notifier_t)(uint32_t id, uint64_t data); + +int psp_register_cmd_notifier(uint32_t cmd_id, int (*notifier)(uint32_t id, uint64_t data)); + +int psp_unregister_cmd_notifier(uint32_t cmd_id, int (*notifier)(uint32_t id, uint64_t data)); + +#endif + #ifdef CONFIG_CRYPTO_DEV_SP_PSP int psp_do_cmd(int cmd, void *data, int *psp_ret); -- Gitee From 79b78c5814088fb2557245da3510405143bcda37 Mon Sep 17 00:00:00 2001 From: chench00 Date: Mon, 1 Apr 2024 11:16:34 +0800 Subject: [PATCH 0832/2138] anolis: crypto: tdm: Add Hygon TDM driver MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #8670 TDM(Trusted Dynamic Measurement) is a module designed and implemented by HYGON in its X86 CPU's embedded secure processor, providing dynamical measurement service to X86 side aiming at memory that needs to be protected, e.g. the memory area kernel code resides. With this new feature, the goal of protecting any specified memory dynamically in the runtime can be achieved. When the protected memory is modified illegally, TDM will detect the event immediately and give an alarm in the form of an exception, meantime, the abnormal information is recorded inside the TDM for subsequent audit or remote attestation. The TDM driver mainly implements the following functions: (1) Send the required memory block information and configuration information to TDM device for protection; (2) Manage the further distribution of exceptions when TDM detects illegal memory modification and an exception is triggered. (3) Record abnormal information for subsequent audit or attestation. Signed-off-by: chench00 Reviewed-by: Tianjia Zhang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2986 --- drivers/crypto/ccp/Kconfig | 8 + drivers/crypto/ccp/Makefile | 1 + drivers/crypto/ccp/tdm_hygon.c | 1549 ++++++++++++++++++++++++++++++++ drivers/crypto/ccp/tdm_hygon.h | 501 +++++++++++ 4 files changed, 2059 insertions(+) create mode 100644 drivers/crypto/ccp/tdm_hygon.c create mode 100644 drivers/crypto/ccp/tdm_hygon.h diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index d62d628fef20..30902232acce 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -60,6 +60,14 @@ config HYGON_PSP2CPU_CMD help Hygon PSP2CPU Command Support +config TDM_HYGON + tristate "Hygon TDM Interface" + default y + depends on CRYPTO_DEV_CCP_DD + depends on HYGON_PSP2CPU_CMD + help + Hygon TDM driver + config CRYPTO_DEV_CCP_DEBUGFS bool "Enable CCP Internals in DebugFS" default n diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 0da504999951..0c66b4d5792d 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -25,6 +25,7 @@ ccp-crypto-objs := ccp-crypto-main.o \ ccp-crypto-des3.o \ ccp-crypto-rsa.o \ ccp-crypto-sha.o +obj-$(CONFIG_TDM_HYGON) += tdm_hygon.o $(obj)/ccp_sm2_sign.asn1.o: $(obj)/ccp_sm2_sign.asn1.c $(obj)/ccp_sm2_sign.asn1.h $(obj)/ccp-crypto-sm2-hygon.o: $(obj)/ccp_sm2_sign.asn1.h diff --git a/drivers/crypto/ccp/tdm_hygon.c b/drivers/crypto/ccp/tdm_hygon.c new file mode 100644 index 000000000000..56927265841e --- /dev/null +++ b/drivers/crypto/ccp/tdm_hygon.c @@ -0,0 +1,1549 @@ +/* + * The Hygon TDM CPU-to-PSP communication driver + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "tdm_hygon.h" + +#define TDM_CMD_ID_MAX 16 +#define TDM2PSP_CMD(id) (0x110 | (id)) +#define TDM_P2C_CMD_ID 1 +#define TDM_C2P_CMD_SIZE (3*PAGE_SIZE) +#define TDM_KFIFO_SIZE 1024 + +#define TDM_IOC_TYPE 'D' +#define TDM_CMD_LEN_LIMIT (1U << 12) + +struct context_message { + uint32_t flag; + uint32_t pid; + uint8_t comm[16]; + uint8_t module_name[64]; +}; + +struct tdm_task_head { + struct list_head head; + rwlock_t lock; +}; + +struct tdm_task_ctx { + uint32_t task_id; + uint32_t cmd_ctx_flag; + measure_exception_handler_t handler; + struct list_head list; +}; + +static struct tdm_task_head dyn_head; +static unsigned int p2c_cmd_id = TDM_P2C_CMD_ID; +static struct task_struct *kthread; +static DECLARE_KFIFO(kfifo_error_task, unsigned char, TDM_KFIFO_SIZE); +static spinlock_t kfifo_lock; + +static int list_check_exist(uint32_t task_id) +{ + int found = 0; + struct list_head *head = NULL; + rwlock_t *lock = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == task_id) { + found = 1; + break; + } + } + read_unlock(lock); + + return found; +} + +static int list_enqueue(void *entry) +{ + int ret = 0; + struct list_head *head, *entry_list = NULL; + rwlock_t *lock = NULL; + + if (!entry) { + ret = -DYN_NULL_POINTER; + pr_err("Null pointer\n"); + goto end; + } + + head = &dyn_head.head; + lock = &dyn_head.lock; + entry_list = &(((struct tdm_task_ctx *)entry)->list); + + write_lock(lock); + if (entry_list) + list_add_tail(entry_list, head); + write_unlock(lock); + +end: + return 0; +} + +static __maybe_unused int list_print(void) +{ + struct list_head *head = NULL; + rwlock_t *lock = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + pr_info("id: %d ", task_node->task_id); + } + read_unlock(lock); + pr_info("\n"); + + return 0; +} + +static int measure_exception_handling_thread(void *data) +{ + int ret = 0; + int copied = 0; + uint32_t error_task_id = 0xffffffff; + struct measure_status task_measure_status; + struct list_head *head = NULL; + rwlock_t *lock = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + + head = &dyn_head.head; + lock = &dyn_head.lock; + + pr_info("Thread started for measurement exception handler dispatching...\n"); + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + + while (!kfifo_is_empty(&kfifo_error_task)) { + copied = kfifo_out_spinlocked(&kfifo_error_task, + (unsigned char *)&error_task_id, sizeof(uint32_t), &kfifo_lock); + if (copied != sizeof(uint32_t)) { + ret = -DYN_ERR_API; + pr_err("kfifio_out exception,return\n"); + goto end; + } + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == error_task_id) + break; + } + read_unlock(lock); + + if (!task_node) { + ret = -DYN_NULL_POINTER; + pr_err("task_node is null,return\n"); + goto end; + } + + if (task_node->task_id == error_task_id) { + if (task_node->handler) { + pr_info("-----Measurement exception handler dispatching " + "thread------\n"); + pr_info("Measurement exception received for task %d\n", + error_task_id); + pr_info("Step1: Query PSP for task %d status to confirm " + "the error.\n", error_task_id); + pr_info("Step2: Error confirmed, CALL measurement " + "exception handler.\n"); + ret = psp_query_measure_status(error_task_id, + &task_measure_status); + if (ret) { + pr_err("task_id %d status query failed\n", + error_task_id); + goto end; + } + + if (task_measure_status.error == MER_ERR) { + /*error--1 normal--0 */ + pr_info("Error detected for task %d, " + "action TODO!\n", error_task_id); + pr_info("----Measurement exception handler----\n"); + task_node->handler(error_task_id); + pr_info("Exit measurement exception handler.\n"); + } else { + pr_info("No error detected for task %d, please " + "check it again!\n", error_task_id); + } + } else { + pr_err("task %d's callback function is not registered, " + "please check it\n", error_task_id); + } + } + } + } +end: + return ret; +} + +static int tdm_interrupt_handler(uint32_t id, uint64_t data) +{ + if (kthread) { + kfifo_in_spinlocked(&kfifo_error_task, (unsigned char *)&data, sizeof(uint32_t), + &kfifo_lock); + wake_up_process(kthread); + } + + return 0; +} + +static int tdm_do_cmd(unsigned int cmd_id, void *cmd_data, int *error) +{ + if (cmd_id >= TDM_CMD_ID_MAX) { + pr_err("%s cmd_id %u beyond limit\n", __func__, cmd_id); + return -DYN_BEYOND_MAX; + } + + return psp_do_cmd(TDM2PSP_CMD(cmd_id), cmd_data, error); +} + +static int calc_task_context_hash(struct context_message context_msg, uint8_t *hash) +{ + int ret = 0; + struct crypto_shash *shash = NULL; + + if (!hash) { + ret = -DYN_NULL_POINTER; + pr_err("Null pointer\n"); + goto end; + } + + shash = crypto_alloc_shash("sha256", 0, 0); + if (IS_ERR(shash)) { + pr_err("can't alloc hash\n"); + return -DYN_ERR_API; + } + + { + SHASH_DESC_ON_STACK(sdesc, shash); + + sdesc->tfm = shash; + + ret = crypto_shash_init(sdesc); + if (ret) { + ret = -DYN_ERR_API; + pr_err("crypto_shash_init failed\n"); + goto end; + } + + if (context_msg.flag & CONTEXT_CHECK_PID) { + ret = crypto_shash_update(sdesc, (uint8_t *)&context_msg.pid, + sizeof(context_msg.pid)); + if (ret) { + ret = -DYN_ERR_API; + pr_err("crypto_shash_update failed\n"); + goto free_shash; + } + } + + if (context_msg.flag & CONTEXT_CHECK_COMM) { + ret = crypto_shash_update(sdesc, context_msg.comm, + strlen(context_msg.comm)); + if (ret) { + ret = -DYN_ERR_API; + pr_err("crypto_shash_update failed\n"); + goto free_shash; + } + } + + if (context_msg.flag & CONTEXT_CHECK_MODNAME) { + ret = crypto_shash_update(sdesc, context_msg.module_name, + strlen(context_msg.module_name)); + if (ret) { + ret = -DYN_ERR_API; + pr_err("crypto_shash_update failed\n"); + goto free_shash; + } + } + + ret = crypto_shash_final(sdesc, hash); + if (ret) { + ret = -DYN_ERR_API; + pr_err("crypto_shash_final failed\n"); + goto free_shash; + } + } + +free_shash: + crypto_free_shash(shash); +end: + return ret; +} + +static int tdm_get_cmd_context_hash(uint32_t flag, uint8_t *hash) +{ + int ret = 0; + struct context_message ctx_msg = {0}; + unsigned long return_address = 0; +#if IS_BUILTIN(CONFIG_TDM_HYGON) + struct module *p_module = NULL; +#elif IS_ENABLED(CONFIG_KALLSYMS) + char symbol_buf[128] = {0}; + int symbol_len = 0; + char *symbol_begin = NULL; + char *symbol_end = NULL; +#endif + + if (!hash) { + ret = -DYN_NULL_POINTER; + pr_err("Null pointer\n"); + goto end; + } + + ctx_msg.flag = flag; + ctx_msg.pid = current->pid; + memcpy(ctx_msg.comm, current->comm, sizeof(current->comm)); + + return_address = CALLER_ADDR1; + if (return_address) { +#if IS_BUILTIN(CONFIG_TDM_HYGON) + p_module = __module_address(return_address); + // caller is module + if (p_module) + memcpy(ctx_msg.module_name, p_module->name, sizeof(p_module->name)); + // caller is build-in + else + memset(ctx_msg.module_name, 0, sizeof(ctx_msg.module_name)); +#elif IS_ENABLED(CONFIG_KALLSYMS) + symbol_len = sprint_symbol((char *)symbol_buf, return_address); + if (!symbol_len) { + ret = -DYN_ERR_API; + pr_err("sprint_symbol failed\n"); + goto end; + } + symbol_begin = strchr((char *)symbol_buf, '['); + if (!symbol_begin) { + ret = -DYN_NULL_POINTER; + pr_err("module name is not exist\n"); + goto end; + } + symbol_end = strchr((char *)symbol_buf, ']'); + if (!symbol_end) { + ret = -DYN_NULL_POINTER; + pr_err("module name is not exist\n"); + goto end; + } + symbol_begin++; + if (symbol_end - symbol_begin) + memcpy(ctx_msg.module_name, symbol_begin, symbol_end - symbol_begin); + else + memset(ctx_msg.module_name, 0, sizeof(ctx_msg.module_name)); +#else + memset(ctx_msg.module_name, 0, sizeof(ctx_msg.module_name)); +#endif + } else + memset(ctx_msg.module_name, 0, sizeof(ctx_msg.module_name)); + + ret = calc_task_context_hash(ctx_msg, hash); + if (ret) { + pr_err("calc_task_context_hash failed\n"); + goto end; + } + +end: + return ret; +} + +static int tdm_verify_phy_addr_valid(struct addr_range_info *range) +{ + int ret = 0; +#if IS_BUILTIN(CONFIG_TDM_HYGON) + int i; + uint64_t phy_addr_start, phy_addr_end; + + for (i = 0; i < range->count; i++) { + phy_addr_start = __sme_clr(range->addr[i].addr_start); + phy_addr_end = __sme_clr(range->addr[i].addr_start + range->addr[i].length); + + if ((PHYS_PFN(phy_addr_start) >= max_pfn) || (PHYS_PFN(phy_addr_end) >= max_pfn)) { + pr_err("phy_addr or length beyond max_pfn\n"); + ret = -DYN_ERR_MEM; + break; + } + } +#else + pr_warn("TDM: Can't get max_pfn, skip physical address check\n"); +#endif + + return ret; +} + +/* Convert the virtual address to physics address,then judge whether it is + * continuous physics memory + */ +static int ptable_virt_to_phy(uint64_t vaddr, struct addr_info *p_addr_info, uint64_t *left_convert) +{ + int ret = 0; + unsigned int level = 0; + pte_t *pte; + uint64_t local_page_mask = 0; + uint64_t local_page_size = 0; + uint64_t now_base = vaddr; + uint64_t last_phy_addr = 0; + uint64_t last_phy_len = 0; + uint64_t now_phy_addr = 0; + + pte = lookup_address(now_base, &level); + if (!pte) { + ret = -DYN_ERR_MEM; + pr_err("lookup_address failed!\n"); + goto end; + } + + local_page_size = page_level_size(level); + local_page_mask = page_level_mask(level); + + switch (level) { + case PG_LEVEL_4K: + p_addr_info->addr_start = (uint64_t)((pte_val(*pte) & local_page_mask & ~_PAGE_NX) + + (now_base & ~local_page_mask)); + break; + case PG_LEVEL_2M: + p_addr_info->addr_start = (uint64_t)((pmd_val(*(pmd_t *)pte) & local_page_mask & + ~_PAGE_NX) + (now_base & ~local_page_mask)); + break; + case PG_LEVEL_1G: + p_addr_info->addr_start = (uint64_t)((pud_val(*(pud_t *)pte) & local_page_mask & + ~_PAGE_NX) + (now_base & ~local_page_mask)); + break; + default: + pr_err("page table level is not supported!\n"); + return -DYN_ERR_MEM; + } + + if ((p_addr_info->addr_start & ~local_page_mask) == 0) { + /*|--------------page_size-------------------|*/ + /*|-------*left_convert-------|*/ + if (*left_convert < local_page_size) { + p_addr_info->length = *left_convert; + *left_convert = 0; + } + /*|--------------page_size-------------------|-----*/ + /*|---------------------*left_convert-----------------------|*/ + else { + p_addr_info->length = local_page_size; + now_base += local_page_size; + *left_convert -= local_page_size; + } + } else { + /*|--------------page_size-------------------|------*/ + /* |-------*left_convert---------|*/ + if ((p_addr_info->addr_start + *left_convert) < + ((p_addr_info->addr_start & local_page_mask) + local_page_size)) { + p_addr_info->length = *left_convert; + *left_convert = 0; + } + /*|--------------page_size-------------------|........*/ + /* |-----------------*left_convert-----------------|*/ + else { + p_addr_info->length = (p_addr_info->addr_start & local_page_mask) + + local_page_size - p_addr_info->addr_start; + now_base += p_addr_info->length; + *left_convert -= p_addr_info->length; + } + } + + last_phy_len = p_addr_info->length; + last_phy_addr = p_addr_info->addr_start; + + while (*left_convert) { + pte = lookup_address(now_base, &level); + if (!pte) { + ret = -DYN_ERR_MEM; + pr_err("lookup_address failed!\n"); + goto end; + } + + switch (level) { + case PG_LEVEL_4K: + now_phy_addr = (uint64_t)((pte_val(*pte) & local_page_mask & ~_PAGE_NX) + + (now_base & ~local_page_mask)); + break; + case PG_LEVEL_2M: + now_phy_addr = (uint64_t)((pmd_val(*(pmd_t *)pte) & local_page_mask & + ~_PAGE_NX) + (now_base & ~local_page_mask)); + break; + case PG_LEVEL_1G: + now_phy_addr = (uint64_t)((pud_val(*(pud_t *)pte) & local_page_mask & + ~_PAGE_NX) + (now_base & ~local_page_mask)); + break; + default: + pr_err("page table level is not supported!\n"); + return -DYN_ERR_MEM; + } + + /*not continuous memory*/ + if ((last_phy_addr + last_phy_len) != now_phy_addr) + break; + + if (*left_convert < local_page_size) { + p_addr_info->length += *left_convert; + *left_convert = 0; + } else { + p_addr_info->length += local_page_size; + now_base += local_page_size; + *left_convert -= local_page_size; + last_phy_addr = now_phy_addr; + last_phy_len = local_page_size; + } + } + +end: + return ret; +} + +int psp_get_fw_info(struct tdm_version *version) +{ + int ret = 0; + int error; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_fw_cmd *fw_cmd = NULL; + struct tdm_fw_resp *fw_resp = NULL; + + if (!version) { + ret = -DYN_NULL_POINTER; + pr_err("version is null pointer\n"); + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + fw_cmd = (struct tdm_fw_cmd *)tdm_cmdresp_data; + fw_cmd->cmd_type = TDM_FW_VERSION; + + ret = tdm_do_cmd(0, (void *)fw_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + + if (error) { + ret = -error; + pr_err("get_fw_info exception error: 0x%x\n", error); + goto free_cmdresp; + } + + fw_resp = (struct tdm_fw_resp *)tdm_cmdresp_data; + memcpy(version, &fw_resp->version, sizeof(struct tdm_version)); + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_get_fw_info); + +int psp_create_measure_task(struct addr_range_info *range, struct measure_data *data, + uint32_t flag, struct authcode_2b *code) +{ + int ret = 0; + int error; + struct list_head *head = NULL; + struct tdm_task_ctx *task_node = NULL; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_create_cmd *create_cmd = NULL; + struct tdm_create_resp *create_resp = NULL; + uint32_t addr_range_info_len = 0; + struct addr_range_info *paddr_range_info = NULL; + uint32_t info_index = 0; + uint64_t now_base_vaddr = 0; + uint64_t tf_left_size = 0; + uint32_t count = 0; + + if (!range) { + ret = -DYN_NULL_POINTER; + pr_err("range is null pointer\n"); + goto end; + } + if (!data) { + ret = -DYN_NULL_POINTER; + pr_err("data is null pointer\n"); + goto end; + } + if (!code) { + ret = -DYN_NULL_POINTER; + pr_err("code is null pointer\n"); + goto end; + } + if (range->count > RANGE_CNT_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("range->count %d is beyond RANGE_CNT_MAX %d\n", range->count, RANGE_CNT_MAX); + goto end; + } + if (range->count == 0) { + ret = -DYN_ERR_SIZE_SMALL; + pr_err("range->count is zero!\n"); + goto end; + } + + /*create task by vaddr*/ + if (flag & TASK_CREATE_VADDR) { + paddr_range_info = kzalloc(sizeof(struct addr_range_info) + + RANGE_CNT_MAX * sizeof(struct addr_info), GFP_KERNEL); + if (!paddr_range_info) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for paddr_range_info failed\n"); + goto end; + } + + now_base_vaddr = range->addr[0].addr_start; + tf_left_size = range->addr[0].length; + while (tf_left_size && (count++ < RANGE_CNT_MAX + 1)) { + ret = ptable_virt_to_phy(now_base_vaddr, + &paddr_range_info->addr[info_index], &tf_left_size); + if (ret) { + pr_err("address convert failed!\n"); + goto free_paddr_range_info; + } + + now_base_vaddr = now_base_vaddr + + paddr_range_info->addr[info_index++].length; + if (info_index > RANGE_CNT_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("info_index: %d is beyond %d\n", info_index, RANGE_CNT_MAX); + goto free_paddr_range_info; + } + } + + paddr_range_info->count = info_index; + addr_range_info_len = paddr_range_info->count * sizeof(struct addr_info) + + sizeof(struct addr_range_info); + } else { + /*check if physics address valid*/ + ret = tdm_verify_phy_addr_valid(range); + if (ret) { + pr_err("range address is abnormal!\n"); + goto end; + } + addr_range_info_len = range->count * sizeof(struct addr_info) + + sizeof(struct addr_range_info); + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto free_paddr_range_info; + } + + create_cmd = (struct tdm_create_cmd *)tdm_cmdresp_data; + create_cmd->cmd_type = TDM_TASK_CREATE; + create_cmd->cmd_ctx_flag = flag; + + memcpy(&create_cmd->m_data, data, sizeof(struct measure_data)); + create_cmd->authcode_len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : code->len; + + ret = tdm_get_cmd_context_hash(flag, create_cmd->context_hash); + if (ret) { + pr_err("tdm_get_cmd_context_hash failed\n"); + goto free_cmdresp; + } + + if (flag & TASK_CREATE_VADDR) + memcpy(&create_cmd->range_info, paddr_range_info, addr_range_info_len); + else + memcpy(&create_cmd->range_info, range, addr_range_info_len); + + ret = tdm_do_cmd(0, (void *)create_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("create_measure_task exception error: 0x%x\n", error); + goto free_cmdresp; + } + + create_resp = (struct tdm_create_resp *)tdm_cmdresp_data; + code->len = create_resp->authcode_len; + code->len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : code->len; + memcpy(&code->val[0], &create_resp->authcode_val[0], code->len); + + head = &dyn_head.head; + task_node = kzalloc(sizeof(struct tdm_task_ctx), GFP_KERNEL); + if (!task_node) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", sizeof(struct tdm_task_ctx)); + goto free_cmdresp; + } + + task_node->task_id = create_resp->task_id; + task_node->handler = NULL; + task_node->cmd_ctx_flag = flag; + + ret = list_enqueue(task_node); + if (ret) { + pr_err("task %d enqueue failed!!!\n", task_node->task_id); + goto free_task_node; + } + + kfree(tdm_cmdresp_data); + if (flag & TASK_CREATE_VADDR) + kfree(paddr_range_info); + + return task_node->task_id; + +free_task_node: + kfree(task_node); +free_cmdresp: + kfree(tdm_cmdresp_data); +free_paddr_range_info: + if (flag & TASK_CREATE_VADDR) + kfree(paddr_range_info); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_create_measure_task); + +int psp_query_measure_status(uint32_t task_id, struct measure_status *status) +{ + int ret = 0; + int error; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_query_cmd *query_cmd = NULL; + struct tdm_query_resp *query_resp = NULL; + + if (!status) { + ret = -DYN_NULL_POINTER; + pr_err("status is null pointer\n"); + goto end; + } + + if (!list_check_exist(task_id)) { + pr_err("task %d isn't created\n", task_id); + return -DYN_NOT_EXIST; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + query_cmd = (struct tdm_query_cmd *)tdm_cmdresp_data; + query_cmd->cmd_type = TDM_TASK_QUERY; + query_cmd->task_id = task_id; + + ret = tdm_do_cmd(0, query_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + query_resp = (struct tdm_query_resp *)tdm_cmdresp_data; + memcpy(status, &query_resp->m_status, sizeof(struct measure_status)); +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_query_measure_status); + +int psp_register_measure_exception_handler(uint32_t task_id, struct authcode_2b *code, + measure_exception_handler_t handler) +{ + int ret = 0; + int error; + struct list_head *head = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_register_cmd *register_cmd = NULL; + struct tdm_common_cmd *temp_cmd = NULL; + rwlock_t *lock = NULL; + + if (!code) { + ret = -DYN_NULL_POINTER; + pr_err("code is null pointer\n"); + goto end; + } + if (code->len > AUTHCODE_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("authcode len %d is beyond AUTHCODE_MAX %d\n", code->len, AUTHCODE_MAX); + goto end; + } + + if (!list_check_exist(task_id)) { + pr_err("task %d isn't created\n", task_id); + return -DYN_NOT_EXIST; + } + /* check if task_id is registered already */ + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == task_id) { + if ((handler && task_node->handler)) { + pr_err("task %d is registered already\n", task_id); + read_unlock(lock); + return -DYN_EEXIST; + } + break; + /* task_node will be used for next context */ + } + } + read_unlock(lock); + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + register_cmd = (struct tdm_register_cmd *)tdm_cmdresp_data; + temp_cmd = ®ister_cmd->cmd; + temp_cmd->cmd_type = TDM_TASK_VERIFY_AUTH; + temp_cmd->task_id = task_id; + temp_cmd->code_len = code->len; + temp_cmd->code_len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : temp_cmd->code_len; + memcpy(temp_cmd->code_val, code->val, temp_cmd->code_len); + + ret = tdm_get_cmd_context_hash(task_node->cmd_ctx_flag, temp_cmd->context_hash); + if (ret) { + pr_err("tdm_get_cmd_context_hash failed\n"); + goto free_cmdresp; + } + + ret = tdm_do_cmd(0, register_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + write_lock(lock); + task_node->handler = handler; + write_unlock(lock); + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_register_measure_exception_handler); + +int psp_destroy_measure_task(uint32_t task_id, struct authcode_2b *code) +{ + int ret = 0; + int error; + struct list_head *head = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_destroy_cmd *destroy_cmd = NULL; + struct tdm_common_cmd *temp_cmd = NULL; + rwlock_t *lock = NULL; + + if (!code) { + ret = -DYN_NULL_POINTER; + pr_err("code is null pointer\n"); + goto end; + } + if (code->len > AUTHCODE_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("authcode len %d is beyond AUTHCODE_MAX %d\n", code->len, AUTHCODE_MAX); + goto end; + } + + if (!list_check_exist(task_id)) { + pr_err("task %d isn't created\n", task_id); + return -DYN_NOT_EXIST; + } + + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == task_id) + break; + } + read_unlock(lock); + + if (task_node->cmd_ctx_flag & TASK_ATTR_NO_UPDATE) { + pr_warn("Task %d is not allowed to destroy!\n", task_node->task_id); + ret = -DYN_NO_ALLOW_UPDATE; + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + destroy_cmd = (struct tdm_destroy_cmd *)tdm_cmdresp_data; + temp_cmd = &destroy_cmd->cmd; + temp_cmd->cmd_type = TDM_TASK_DESTROY; + temp_cmd->task_id = task_id; + temp_cmd->code_len = code->len; + temp_cmd->code_len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : temp_cmd->code_len; + memcpy(temp_cmd->code_val, code->val, temp_cmd->code_len); + + ret = tdm_get_cmd_context_hash(task_node->cmd_ctx_flag, temp_cmd->context_hash); + if (ret) { + pr_err("tdm_get_cmd_context_hash failed\n"); + goto free_cmdresp; + } + + ret = tdm_do_cmd(0, destroy_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + if (task_node->handler) { + write_lock(lock); + task_node->handler = NULL; + write_unlock(lock); + } + + write_lock(lock); + list_del(&task_node->list); + write_unlock(lock); + + kfree(task_node); + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_destroy_measure_task); + +int psp_update_measure_task(uint32_t task_id, struct authcode_2b *code, + struct measure_update_data *data) +{ + int ret = 0; + int error; + struct list_head *head = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_update_cmd *update_cmd = NULL; + struct tdm_common_cmd *temp_cmd = NULL; + rwlock_t *lock = NULL; + + if (!data) { + ret = -DYN_NULL_POINTER; + pr_err("data is null pointer\n"); + goto end; + } + if (!code) { + ret = -DYN_NULL_POINTER; + pr_err("code is null pointer\n"); + goto end; + } + if (code->len > AUTHCODE_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("authcode len %d is beyond AUTHCODE_MAX %d\n", code->len, AUTHCODE_MAX); + goto end; + } + + if (!list_check_exist(task_id)) { + pr_err("task %d isn't created\n", task_id); + return -DYN_NOT_EXIST; + } + + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == task_id) + break; + } + read_unlock(lock); + + if (task_node->cmd_ctx_flag & TASK_ATTR_NO_UPDATE) { + pr_warn("Task %d is not allowed to update!\n", task_node->task_id); + ret = -DYN_NO_ALLOW_UPDATE; + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + update_cmd = (struct tdm_update_cmd *)tdm_cmdresp_data; + temp_cmd = &update_cmd->cmd; + temp_cmd->cmd_type = TDM_TASK_UPDATE; + temp_cmd->task_id = task_id; + temp_cmd->code_len = code->len; + temp_cmd->code_len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : temp_cmd->code_len; + memcpy(temp_cmd->code_val, code->val, temp_cmd->code_len); + + ret = tdm_get_cmd_context_hash(task_node->cmd_ctx_flag, temp_cmd->context_hash); + if (ret) { + pr_err("tdm_get_cmd_context_hash failed\n"); + goto free_cmdresp; + } + + memcpy(&update_cmd->update_data, data, sizeof(struct measure_update_data)); + + ret = tdm_do_cmd(0, tdm_cmdresp_data, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_update_measure_task); + +int psp_startstop_measure_task(uint32_t task_id, struct authcode_2b *code, bool start) +{ + int ret = 0; + int error; + struct list_head *head = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_startstop_cmd *startstop_cmd = NULL; + struct tdm_startstop_resp *startstop_resp = NULL; + struct tdm_common_cmd *temp_cmd = NULL; + rwlock_t *lock = NULL; + + if (!code) { + ret = -DYN_NULL_POINTER; + pr_err("code is null pointer\n"); + goto end; + } + if (code->len > AUTHCODE_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("authcode len %d is beyond AUTHCODE_MAX %d\n", code->len, AUTHCODE_MAX); + goto end; + } + + if (!list_check_exist(task_id)) { + pr_err("task %d isn't created\n", task_id); + return -DYN_NOT_EXIST; + } + + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == task_id) + break; + } + read_unlock(lock); + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + startstop_cmd = (struct tdm_startstop_cmd *)tdm_cmdresp_data; + temp_cmd = &startstop_cmd->cmd; + temp_cmd->cmd_type = start ? TDM_TASK_START : TDM_TASK_STOP; + temp_cmd->task_id = task_id; + temp_cmd->code_len = code->len; + temp_cmd->code_len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : temp_cmd->code_len; + memcpy(temp_cmd->code_val, code->val, temp_cmd->code_len); + + if ((temp_cmd->cmd_type == TDM_TASK_STOP) && (task_node->cmd_ctx_flag & + TASK_ATTR_NO_UPDATE)) { + pr_warn("Task %d is not allowed to stop!\n", task_node->task_id); + ret = -DYN_NO_ALLOW_UPDATE; + goto free_cmdresp; + } + + ret = tdm_get_cmd_context_hash(task_node->cmd_ctx_flag, temp_cmd->context_hash); + if (ret) { + pr_err("tdm_get_cmd_context_hash failed\n"); + goto free_cmdresp; + } + + ret = tdm_do_cmd(0, startstop_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + startstop_resp = (struct tdm_startstop_resp *)tdm_cmdresp_data; + + kfree(tdm_cmdresp_data); + + return startstop_resp->m_status.status; + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_startstop_measure_task); + +int tdm_export_cert(uint32_t key_usage_id, struct tdm_cert *cert) +{ + int ret = 0; + int error; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_export_cert_cmd *cert_cmd = NULL; + struct tdm_export_cert_resp *cert_resp = NULL; + + if (!cert) { + ret = -DYN_NULL_POINTER; + pr_err("cert is null pointer\n"); + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + cert_cmd = (struct tdm_export_cert_cmd *)tdm_cmdresp_data; + cert_cmd->cmd_type = TDM_EXPORT_CERT; + cert_cmd->key_usage_id = key_usage_id; + + ret = tdm_do_cmd(0, (void *)cert_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + cert_resp = (struct tdm_export_cert_resp *)tdm_cmdresp_data; + memcpy(cert, &cert_resp->cert, sizeof(struct tdm_cert)); + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(tdm_export_cert); + +int tdm_get_report(uint32_t task_id, struct task_selection_2b *selection, + struct data_2b *user_supplied_data, uint8_t report_type, uint32_t key_usage_id, + uint8_t *report_buffer, uint32_t *length) +{ + int ret = 0; + int error; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_get_report_cmd *report_cmd = NULL; + struct tdm_report *report_resp = NULL; + uint32_t needed_length = 0; + + if (!user_supplied_data) { + ret = -DYN_NULL_POINTER; + pr_err("user_supplied_data is null pointer\n"); + goto end; + } + if (!report_buffer) { + ret = -DYN_NULL_POINTER; + pr_err("report_buffer is null pointer\n"); + goto end; + } + if (!length) { + ret = -DYN_NULL_POINTER; + pr_err("length is null pointer\n"); + goto end; + } + if ((report_type != TDM_REPORT_SUMMARY) && (report_type != TDM_REPORT_DETAIL)) { + ret = -DYN_ERR_REPORT_TYPE; + pr_err("invalid report_type\n"); + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + report_cmd = (struct tdm_get_report_cmd *)tdm_cmdresp_data; + + report_cmd->cmd_type = TDM_GET_REPORT; + report_cmd->task_id = task_id; + if (task_id == TDM_TASK_ALL) { + if (!selection) { + ret = -DYN_NULL_POINTER; + pr_err("selection is null pointer\n"); + goto end; + } + report_cmd->selection_len = selection->len; + report_cmd->selection_len = (report_cmd->selection_len > TDM_MAX_TASK_BITMAP) ? + TDM_MAX_TASK_BITMAP : report_cmd->selection_len; + memcpy(&report_cmd->selection_bitmap[0], &selection->bitmap[0], + report_cmd->selection_len); + } + + report_cmd->user_data_len = (user_supplied_data->len > TDM_MAX_NONCE_SIZE) ? + TDM_MAX_NONCE_SIZE : user_supplied_data->len; + memcpy(&report_cmd->user_data_val[0], &user_supplied_data->val[0], + report_cmd->user_data_len); + report_cmd->report_type = report_type; + report_cmd->key_usage_id = key_usage_id; + + ret = tdm_do_cmd(0, (void *)report_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + report_resp = (struct tdm_report *)tdm_cmdresp_data; + if (report_type == TDM_REPORT_SUMMARY) + needed_length = sizeof(struct tdm_report) + sizeof(struct tdm_report_sig); + else + needed_length = sizeof(struct tdm_report) + + report_resp->task_nums * sizeof(struct tdm_detail_task_status) + + sizeof(struct tdm_report_sig); + + if (needed_length > *length) { + pr_warn("needed_length %d is beyond length %d\n", needed_length, *length); + *length = needed_length; + ret = -DYN_ERR_SIZE_SMALL; + } else { + memcpy(report_buffer, report_resp, needed_length); + } + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(tdm_get_report); + +int tdm_get_vpcr_audit(struct pcr_select pcr, struct tpm2b_digest *digest, + struct tdm_pcr_value_2b *pcr_values) +{ + int ret = 0; + int error; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_get_vpcr_cmd *vpcr_cmd = NULL; + struct tdm_get_vpcr_resp *vpcr_resp = NULL; + + if (!digest) { + ret = -DYN_NULL_POINTER; + pr_err("digest is null pointer\n"); + goto end; + } + if (!pcr_values) { + ret = -DYN_NULL_POINTER; + pr_err("pcr_values is null pointer\n"); + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + vpcr_cmd = (struct tdm_get_vpcr_cmd *)tdm_cmdresp_data; + + vpcr_cmd->cmd_type = TDM_VPCR_AUDIT; + memcpy(&vpcr_cmd->pcr, &pcr, sizeof(struct pcr_select)); + + ret = tdm_do_cmd(0, (void *)vpcr_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + vpcr_resp = (struct tdm_get_vpcr_resp *)tdm_cmdresp_data; + memcpy(digest, &vpcr_resp->digest, sizeof(struct tpm2b_digest)); + pcr_values->task_nums = vpcr_resp->pcr_values.task_nums; + memcpy(&pcr_values->task_data[0], &vpcr_resp->pcr_values.task_data[0], + pcr_values->task_nums * sizeof(struct tdm_task_data)); + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(tdm_get_vpcr_audit); + +static long tdm_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) +{ + int ret = 0; + void __user *argp = (void __user *)arg; + unsigned int tdm_cmd = 0; + unsigned char *temp_cmd_data = NULL; + struct task_selection_2b *selection = NULL; + struct data_2b *data = NULL; + uint32_t data_to_user_len = 0; + uint16_t selection_len = 0; + uint16_t user_data_len = 0; + struct tdm_get_report_cmd *report_cmd = NULL; + struct tdm_user_report_cmd *user_report_cmd = NULL; + uint32_t needed_length = 0; + struct tdm_get_vpcr_cmd *vpcr_cmd = NULL; + struct tdm_get_vpcr_resp *vpcr_resp = NULL; + uint32_t pcr_num = 0; + + if (_IOC_TYPE(ioctl) != TDM_IOC_TYPE) { + ret = -EINVAL; + pr_err("ioctl 0x%08x is invalid\n", ioctl); + goto end; + } + + temp_cmd_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!temp_cmd_data) { + ret = -ENOMEM; + pr_err("kzalloc for size 0x%lx failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + tdm_cmd = _IOC_NR(ioctl); + + switch (tdm_cmd) { + case USER_EXPORT_CERT: + ret = tdm_export_cert(TDM_AK_USAGE_ID, (struct tdm_cert *)temp_cmd_data); + if (ret) { + pr_err("Execute tdm export cert command failed!\n"); + goto free_mem; + } + data_to_user_len = sizeof(struct tdm_cert); + break; + + case USER_GET_REPORT: + if (copy_from_user(temp_cmd_data, argp, sizeof(struct tdm_user_report_cmd))) { + pr_err("%s copy from user failed\n", __func__); + ret = -EFAULT; + goto end; + } + + user_report_cmd = (struct tdm_user_report_cmd *)temp_cmd_data; + needed_length = user_report_cmd->needed_length; + report_cmd = &user_report_cmd->report_cmd; + selection_len = report_cmd->selection_len > TDM_MAX_TASK_BITMAP ? + TDM_MAX_TASK_BITMAP : report_cmd->selection_len; + + selection = kzalloc(sizeof(struct task_selection_2b) + + selection_len * sizeof(uint8_t), GFP_KERNEL); + if (!selection) { + ret = -ENOMEM; + pr_err("kzalloc failed\n"); + goto free_mem; + } + + selection->len = selection_len; + memcpy(&selection->bitmap[0], &report_cmd->selection_bitmap[0], selection->len); + + user_data_len = report_cmd->user_data_len > TDM_MAX_NONCE_SIZE ? + TDM_MAX_NONCE_SIZE : report_cmd->user_data_len; + data = kzalloc(sizeof(struct data_2b) + + user_data_len * sizeof(uint8_t), GFP_KERNEL); + if (!data) { + ret = -ENOMEM; + pr_err("kzalloc failed\n"); + goto free_mem; + } + + data->len = user_data_len; + memcpy(&data->val[0], &report_cmd->user_data_val[0], data->len); + + ret = tdm_get_report(report_cmd->task_id, selection, data, report_cmd->report_type, + report_cmd->key_usage_id, temp_cmd_data, &needed_length); + if (ret) { + pr_err("Execute tdm report command failed!\n"); + goto free_mem; + } + + data_to_user_len = needed_length; + break; + + case USER_VPCR_AUDIT: + if (copy_from_user(temp_cmd_data, argp, sizeof(struct tdm_get_vpcr_cmd))) { + pr_err("%s copy from user failed\n", __func__); + ret = -EFAULT; + goto end; + } + + vpcr_cmd = (struct tdm_get_vpcr_cmd *)temp_cmd_data; + vpcr_resp = (struct tdm_get_vpcr_resp *)temp_cmd_data; + pcr_num = vpcr_cmd->pcr.pcr; + + ret = tdm_get_vpcr_audit(vpcr_cmd->pcr, &vpcr_resp->digest, &vpcr_resp->pcr_values); + if (ret) { + pr_err("Execute tdm vpcr audit command failed!\n"); + goto free_mem; + } + + vpcr_resp->pcr = pcr_num; + data_to_user_len = sizeof(struct tdm_get_vpcr_resp) + + vpcr_resp->pcr_values.task_nums * sizeof(struct tdm_task_data); + break; + + case USER_SHOW_DEVICE: + ret = psp_get_fw_info(&((struct tdm_show_device *)temp_cmd_data)->version); + if (ret) { + pr_err("firmware version get failed!\n"); + goto free_mem; + } + + data_to_user_len = sizeof(struct tdm_show_device); + break; + + default: + pr_err("invalid tdm_cmd: %d from user\n", tdm_cmd); + ret = -EINVAL; + goto free_mem; + } + + if (copy_to_user(argp, temp_cmd_data, data_to_user_len)) { + pr_err("%s copy to user failed\n", __func__); + ret = -EFAULT; + goto free_mem; + } + +free_mem: + kfree(temp_cmd_data); + kfree(selection); + kfree(data); +end: + return ret; +} + +static const struct file_operations tdm_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = tdm_ioctl, +}; + +static struct miscdevice misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "tdm", + .fops = &tdm_fops, +}; + +static int __init hygon_tdm_init(void) +{ + int ret = 0; + + INIT_KFIFO(kfifo_error_task); + INIT_LIST_HEAD(&dyn_head.head); + rwlock_init(&dyn_head.lock); + spin_lock_init(&kfifo_lock); + + ret = psp_register_cmd_notifier(p2c_cmd_id, tdm_interrupt_handler); + if (ret) { + pr_err("notifier function registration failed\n"); + return ret; + } + + kthread = kthread_create(measure_exception_handling_thread, NULL, + "measure_exception_handling_thread"); + if (IS_ERR(kthread)) { + pr_err("kthread_create fail\n"); + ret = PTR_ERR(kthread); + return ret; + } + + wake_up_process(kthread); + pr_info("TDM driver loaded successfully!\n"); + + return misc_register(&misc); +} + +static void __exit hygon_tdm_exit(void) +{ + if (kthread) { + kthread_stop(kthread); + kthread = NULL; + } + + psp_unregister_cmd_notifier(p2c_cmd_id, tdm_interrupt_handler); + + misc_deregister(&misc); +} + +MODULE_AUTHOR("niuyongwen@hygon.cn"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("0.7"); +MODULE_DESCRIPTION("The dynamic measure driver"); + +/* + * hygon_tdm_init must be done after ccp module init. + * That's why we use a device_initcall_sync which is + * called after all the device_initcall(includes ccp) but before the + * late_initcall(includes ima). + */ +device_initcall_sync(hygon_tdm_init); +module_exit(hygon_tdm_exit); diff --git a/drivers/crypto/ccp/tdm_hygon.h b/drivers/crypto/ccp/tdm_hygon.h new file mode 100644 index 000000000000..ac5638986103 --- /dev/null +++ b/drivers/crypto/ccp/tdm_hygon.h @@ -0,0 +1,501 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * The Hygon TDM CPU-to-PSP communication driver + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* Change log: + * Version: 0.7 (fw version 1.4) + * 1.Adjust the TDM driver to accommodate multiple versions of the kernel. + * Version: 0.6 (fw version 1.4) + * 1.remove psp_get_fw_info from hygon_tdm_init, add tdm show device support to ioctl for hag. + * Version: 0.5 (fw version 1.4) + * 1.add support for hanging machine when task exception with special attribute. + * Version: 0.4 (fw version 1.3) + * 1.add vpcr support. + * 2.add task create by vaddr. + * Version: 0.3 (fw version 1.2) + * 1.add remote authentication support. + */ +#ifndef __TDM_HYGON_H__ +#define __TDM_HYGON_H__ + +#include +#include + +#define MIN_VPCR 10 +#define MAX_VPCR 16 + +/*Macro definition for measurement*/ +#define TDM_MAX_TASK_BITMAP 16 +#define TDM_MAX_NONCE_SIZE 32 + +#define RANGE_CNT_MAX 0x80 +#define MEASURE_TASK_MAX 100 +#define AUTHCODE_MAX 16 +#define AUTH_TRY_DELAY 1 + +#define HASH_ALGO_SM3 0 +#define HASH_ALGO_SHA1 1 +#define HASH_ALGO_SHA256 2 +#define HASH_ALGO_SHA384 3 +#define HASH_ALGO_SHA512 4 + +#define SM3_256_DIGEST_SIZE 32 +#define SHA1_DIGEST_SIZE 20 +#define SHA256_DIGEST_SIZE 32 +#define SHA384_DIGEST_SIZE 48 +#define SHA512_DIGEST_SIZE 64 + +#define CONTEXT_CHECK_PID 0x1 +#define CONTEXT_CHECK_COMM 0x2 +#define CONTEXT_CHECK_MODNAME 0x4 +#define TASK_ATTR_NO_UPDATE 0x10000 +#define TASK_SUPPORT_VPCR 0x20000 +#define TASK_CREATE_VADDR 0x40000 +#define TASK_EXCEPTION_CRASH 0x80000 + +#define MEASURE_UPDATE_ALGO 0x1 +#define MEASURE_UPDATE_EXPECTED_MEASUREMENT 0x2 + +/*Macro definition for tdm certificate*/ +#define TDM_MAX_CHIP_ID_LEN 40 +#define TDM_CURVE_SM2_ID 0x3 +#define TDM_PUBKEY_LEN 32 +#define TDM_MAX_USER_ID_LEN 126 +#define TDM_SIG_LEN 32 +#define TDM_HEADER_AND_PUBKEY_LEN 284 + +/*Macro definition for tdm report*/ +#define TDM_TASK_ALL 0xffffffff +#define TDM_REPORT_SUMMARY 0 +#define TDM_REPORT_DETAIL 1 + +/* CPU to psp command declaration */ +enum C2P_CMD_TYPE { + TDM_TASK_CREATE = 0x0, + TDM_TASK_VERIFY_AUTH, + TDM_TASK_QUERY, + TDM_TASK_DESTROY, + TDM_TASK_UPDATE, + TDM_TASK_STOP, + TDM_TASK_START, + TDM_FW_VERSION, + TDM_EXPORT_CERT, + TDM_GET_REPORT, + TDM_VPCR_AUDIT, + TDM_MAX_CMD +}; + +/* User interaction command declaration */ +enum USER_CMD_TYPE { + USER_EXPORT_CERT = 0x80, + USER_GET_REPORT, + USER_VPCR_AUDIT, + USER_SHOW_DEVICE, + USER_MAX_CMD +}; + +/*Public usage id definition for tdm certificate*/ +enum _tdm_key_usage_id { + TDM_INVALID_USAGE_ID = 0x1000, + TDM_CEK_USAGE_ID = 0x1004, + TDM_AK_USAGE_ID = 0x2001, + TDM_MAX_USAGE_ID +}; + +/*Public status ans type declaration*/ +enum TDM_TASK_STATUS { + DYN_INIT = 0x0, + DYN_TO_RUN, + DYN_RUN, + DYN_TO_STOP, + DYN_STOP +}; + +enum TDM_MEASURE_STATUS { + MER_NORMAL = 0x0, + MER_ERR +}; + +enum DYN_ERROR_TYPE { + DYN_NORMAL = 0x0, + DYN_NOT_EXIST, + DYN_AUTH_FAIL, + DYN_STATUS_NOT_SUIT, + DYN_BEYOND_MAX, + DYN_DA_PERIOD, + DYN_NULL_POINTER, + DYN_ERR_API, + DYN_EEXIST, + DYN_ERR_MEM, + DYN_ERR_AUTH_LEN, + DYN_ERR_KEY_ID, + DYN_NO_ALLOW_UPDATE, + DYN_ERR_HASH_ALGO, + DYN_ERR_REPORT_TYPE, + DYN_ERR_SIZE_SMALL, + DYN_ERR_ADDR_MAPPING, + DYN_ERR_PCR_NUM, + DYN_ERR_ORIG_TPM_PCR, + DYN_MAX_ERR_TYPE +}; + +/*Data structure declaration for measurement*/ +struct addr_info { + uint64_t addr_start; + uint64_t length; +} __packed; + +struct addr_range_info { + uint32_t count; + struct addr_info addr[]; +} __packed; + +struct measure_data { + uint32_t hash_algo; + uint8_t expected_measurement[32]; + uint32_t period_ms; + uint32_t pcr; +} __packed; + +struct authcode_2b { + uint16_t len; + uint8_t val[]; +} __packed; + +struct measure_status { + uint8_t status; + uint8_t error; + uint64_t count; +} __packed; + +struct measure_update_data { + uint32_t update_flag; + uint32_t algo; + uint8_t expected_measurement[32]; +} __packed; + +struct da_status { + uint64_t err_time; + uint16_t interval_time; + uint16_t err_cnt; +} __packed; + +struct tdm_version { + uint8_t api_major; + uint8_t api_minor; + uint32_t buildId; + uint32_t task_max; + uint32_t range_max_per_task; +} __packed; + +struct task_selection_2b { + uint16_t len; + uint8_t bitmap[]; +}; + +struct data_2b { + uint16_t len; + uint8_t val[]; +}; + +/*Data structure declaration for vpcr*/ +struct pcr_select { + uint16_t hash; + uint32_t pcr; +} __packed; + +union tpmu_ha { + uint8_t sha1[SHA1_DIGEST_SIZE]; + uint8_t sha256[SHA256_DIGEST_SIZE]; + uint8_t sha384[SHA384_DIGEST_SIZE]; + uint8_t sha512[SHA512_DIGEST_SIZE]; + uint8_t sm3_256[SM3_256_DIGEST_SIZE]; +}; + +struct tpm2b_digest { + uint16_t size; + uint8_t buffer[sizeof(union tpmu_ha)]; +} __packed; + +struct tdm_task_data { + uint32_t task_id; + uint8_t hash[32]; +} __packed; + +struct tdm_pcr_value_2b { + uint32_t task_nums; + struct tdm_task_data task_data[]; +} __packed; + +/*Data structure declaration for tdm certificate*/ +struct _tdm_ecc_pubkey { + uint32_t curve_id; + uint8_t pubkey_qx[TDM_PUBKEY_LEN]; + uint8_t pubkey_qy[TDM_PUBKEY_LEN]; + uint16_t user_id_len; + uint8_t user_id[TDM_MAX_USER_ID_LEN]; +} __packed; + +struct _tdm_ecc_signature { + uint8_t sig_r[TDM_SIG_LEN]; + uint8_t sig_s[TDM_SIG_LEN]; +} __packed; + +/* + ************************ Hygon TDM Certificate - ECC256*************************** + *|00h |31:0 |VERSION |Certificate version. 0... | + *|04h |7:0 |- |Reserved. Set to zero | + *|06h |7:0 |CHIP_ID_LEN | | + *|08h |319:0 |CHIP_ID |Unique ID of every chip. | + *|30h |31:0 |KEY_USAGE_ID |Usage id of the key. | + *|34h |63:0 |- |Reserved. Set to zero. | + *|3Ch |31:0 |CURVE_ID |ECC curve id | + *|40h |255:0 |Qx |Public key Qx | + *|60h |255:0 |Qy |Public key Qy | + *|80h |7:0 |USER_ID_LEN |GM user id len | + *|82h |1007:0 |USER_ID |GM user id | + *|100h|223:0 |- |Reserved. Set to zero. | + *|11Ch|31:0 |SIG1_KEY_USAGE_ID|Key type for sig1. | + *|120h|255:0 |SIG1_R |Signature R of key1. | + *|140h|255:0 |SIG1_S |Signature S of key1. | + *|160h|223:0 |- |Reserved. Set to zero | + *|17Ch|31:0 |SIG2_KEY_USAGE_ID|Key type for sig2. | + *|180h|255:0 |SIG2_R |Signature R of key2. | + *|1A0h|255:0 |SIG2_S |Signature S of key2. | + ************************************************************************************* + */ +struct tdm_cert { + uint32_t version; + uint8_t reserved_0[2]; + uint16_t chip_id_len; + uint8_t chip_id[TDM_MAX_CHIP_ID_LEN]; + uint32_t key_usage_id; + uint8_t reserved_1[8]; + struct _tdm_ecc_pubkey ecc_pubkey; + uint8_t reserved_2[28]; + uint32_t sig1_key_usage_id; + struct _tdm_ecc_signature ecc_sig1; + uint8_t reserved_3[28]; + uint32_t sig2_key_usage_id; + struct _tdm_ecc_signature ecc_sig2; +} __packed; + +/*Data structure declaration for tdm measurement report*/ +/* + ******************** Hygon TDM Report for Single Task - ECC256*********************** + *|+(00h) |31:0 |TASK_ID |Measured task ID | + *|+(04h) |31:0 |PERIOD_MS |Meaured period time for the related task | + *|+(08h) |63:0 |MEAURED_COUNT |Meaured count for the related task | + *|+(10h) |31:0 |LAST_MEASURE_ELAPSED_MS|Meaured time for last mesurement. | + *|+(14h) |95:0 |- |Reserved. Set to zero | + *|+(20h) |255:0 |MEASURED_HASH |Mesured hash for the related task. | + ************************************************************************************* + */ +struct tdm_detail_task_status { + uint32_t task_id; + uint32_t period_ms; + uint64_t measured_count; + uint32_t last_measure_elapsed_ms; + uint8_t reserved[12]; + uint8_t measured_hash[32]; +} __packed; + +/* + ************************ Hygon TDM Report - ECC256*************************** + *|00h |31:0 |VERSION |Certificate version. 0... | + *|04h |31:0 |FW_VERSION |Firmware verfion,BUILD_ID | + *|08h |7:0 |REPORT_TYPE |Summary report:0, Detailed report:1 | + *|09h |39:0 |- |Reserved. Set to zero. | + *|0Eh |15:0 |TASK_NUMS |ALL task numbers. | + *|10h |127:0 |TASK_BITMAP |ALL task bitmap. | + *|20h |127:0 |TASK_ERROR_BITMAP |Bitmap for error tasks | + *|30h |127:0 |TASK_RUNNING_BITMAP|Bitmap for runnint tasks | + *|40h |239:0 |- |Reserved. Set to zero. | + *|5Eh |15:0 |USER_DATA_LEN |User supplied data length. | + *|60h |255:0 |USER_DATA |User supplied data. | + *|80h |255:0 |AGGREGATE_HASH |Aggregate hash for tasks | + ************************************************************************************* + */ +struct tdm_report { + uint32_t version; + uint32_t fw_version; + uint8_t report_type; + uint8_t reserved_0[5]; + uint16_t task_nums; + uint8_t task_bitmap[TDM_MAX_TASK_BITMAP]; + uint8_t task_error_bitmap[TDM_MAX_TASK_BITMAP]; + uint8_t task_running_bitmap[TDM_MAX_TASK_BITMAP]; + uint8_t reserved_1[30]; + uint16_t user_supplied_data_len; + uint8_t user_supplied_data[TDM_MAX_NONCE_SIZE]; + uint8_t aggregate_hash[32]; + struct tdm_detail_task_status detailed_task_status[]; +} __packed; + +/* + ************************ Hygon TDM Report Signature - ECC256************************* + *|A0h |223:0 |- |Reserved. Set to zero | + *|BCh |31:0 |SIG_KEY_USAGE_ID |Key type for sig. | + *|C0h |255:0 |SIG_R |Signature R of key. | + *|E0h |255:0 |SIG_S |Signature S of key. | + ************************************************************************************* + */ +struct tdm_report_sig { + uint8_t reserved[28]; + uint32_t sig_key_usage_id; + uint8_t sig_r[TDM_SIG_LEN]; + uint8_t sig_s[TDM_SIG_LEN]; +} __packed; + +/*Data structure declaration for tdm command/response interface*/ +/* + * The following commands use this structure: + * psp_register_measure_exception_handler + * psp_destroy_measure_task + * psp_update_measure_task + * psp_startstop_measure_task + */ +struct tdm_common_cmd { + uint32_t cmd_type; + uint32_t task_id; + uint16_t code_len; + uint8_t code_val[AUTHCODE_MAX]; + uint8_t context_hash[32]; +} __packed; + +/*TASK_CREATE*/ +struct tdm_create_cmd { + uint32_t cmd_type; + uint32_t cmd_ctx_flag; + struct measure_data m_data; + uint16_t authcode_len; + uint8_t context_hash[32]; + struct addr_range_info range_info; +} __packed; + +struct tdm_create_resp { + uint32_t task_id; + uint16_t authcode_len; + uint8_t authcode_val[AUTHCODE_MAX]; +} __packed; + +/*TASK_VERIFY_AUTH*/ +struct tdm_register_cmd { + struct tdm_common_cmd cmd; +} __packed; + +/*TASK_QUERY*/ +struct tdm_query_cmd { + uint32_t cmd_type; + uint32_t task_id; +} __packed; + +struct tdm_query_resp { + struct measure_status m_status; +} __packed; + +/*TASK_DESTROY*/ +struct tdm_destroy_cmd { + struct tdm_common_cmd cmd; +} __packed; + +/*TASK_UPDATE*/ +struct tdm_update_cmd { + struct tdm_common_cmd cmd; + struct measure_update_data update_data; +} __packed; + +/*TASK_STOP,TASK_START*/ +struct tdm_startstop_cmd { + struct tdm_common_cmd cmd; +} __packed; + +struct tdm_startstop_resp { + struct measure_status m_status; +} __packed; + +/*TDM_VERSION*/ +struct tdm_fw_cmd { + uint32_t cmd_type; +} __packed; + +struct tdm_fw_resp { + struct tdm_version version; +} __packed; + +/*TDM_EXPORT_CERT*/ +struct tdm_export_cert_cmd { + uint32_t cmd_type; + uint32_t key_usage_id; +} __packed; + +struct tdm_export_cert_resp { + struct tdm_cert cert; +} __packed; + +/*TDM_GET_REPORT*/ +struct tdm_get_report_cmd { + uint32_t cmd_type; + uint32_t task_id; + uint16_t selection_len; + uint8_t selection_bitmap[TDM_MAX_TASK_BITMAP]; + uint16_t user_data_len; + uint8_t user_data_val[TDM_MAX_NONCE_SIZE]; + uint8_t report_type; + uint32_t key_usage_id; +} __packed; + +/* Resopnse: + * struct tdm_report measure_report; + * struct tdm_report_sig measure_report_sig; + */ + +struct tdm_user_report_cmd { + struct tdm_get_report_cmd report_cmd; + uint32_t needed_length; +} __packed; + +/*TDM_VPCR_AUDIT*/ +struct tdm_get_vpcr_cmd { + uint32_t cmd_type; + struct pcr_select pcr; +} __packed; + +struct tdm_get_vpcr_resp { + uint32_t pcr; + struct tpm2b_digest digest; + struct tdm_pcr_value_2b pcr_values; +} __packed; + +struct tdm_show_device { + struct tdm_version version; +} __packed; + +/*Public api definition for tdm*/ +typedef int (*measure_exception_handler_t)(uint32_t task_id); + +int psp_get_fw_info(struct tdm_version *version); +int psp_create_measure_task(struct addr_range_info *range, struct measure_data *data, + uint32_t flag, struct authcode_2b *code); +int psp_query_measure_status(uint32_t task_id, struct measure_status *status); +int psp_register_measure_exception_handler(uint32_t task_id, struct authcode_2b *code, + measure_exception_handler_t handler); +int psp_destroy_measure_task(uint32_t task_id, struct authcode_2b *code); +int psp_update_measure_task(uint32_t task_id, struct authcode_2b *code, + struct measure_update_data *data); +int psp_startstop_measure_task(uint32_t task_id, struct authcode_2b *code, bool start); +int tdm_export_cert(uint32_t key_usage_id, struct tdm_cert *cert); +int tdm_get_report(uint32_t task_id, struct task_selection_2b *selection, + struct data_2b *user_supplied_data, uint8_t report_type, uint32_t key_usage_id, + uint8_t *report_buffer, uint32_t *length); +int tdm_get_vpcr_audit(struct pcr_select pcr, struct tpm2b_digest *digest, + struct tdm_pcr_value_2b *pcr_values); + +#endif /* __TDM_HYGON_H__*/ -- Gitee From 321399524c51422529c39f23bb77c3120bd91ea3 Mon Sep 17 00:00:00 2001 From: chench00 Date: Mon, 1 Apr 2024 11:21:07 +0800 Subject: [PATCH 0833/2138] anolis: alinux: tpm: add Hygon TPM2 driver ANBZ: #8670 Hygon CPU implemented a firmware-based TPM2 device, which runs on its internal secure processor named PSP. The device is fully compatible with TCG TPM2.0 spec (part 1 ~ 4) in the commands level, but underlying uses an unique private interface in the form of some hardware mailbox between X86 cores and PSP, which is for sure different from the TIS or CRB interfaces defined in the PTP spec. As such, to support this device we need a specialized driver which handles the basic send and receive operations required by the kernel TPM core layer. ACPI device info passed from underlying BIOS indicates the device presence by setting the _HID field (see TCG ACPI Sepcification, Family 1.2 and 2.0, Chapter 8 "ACPI Device") to "HYGT0101", which distinguishes it from the rest of devices. If the BIOS does not support this setting, the driver will not be activated and thus has no impact to the system at all. Signed-off-by: chench00 Reviewed-by: Tianjia Zhang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2986 --- drivers/char/tpm/Kconfig | 12 +++ drivers/char/tpm/Makefile | 1 + drivers/char/tpm/tpm_hygon.c | 186 +++++++++++++++++++++++++++++++++++ 3 files changed, 199 insertions(+) create mode 100644 drivers/char/tpm/tpm_hygon.c diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index 927088b2c3d3..746661ded992 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig @@ -210,5 +210,17 @@ config TCG_FTPM_TEE help This driver proxies for firmware TPM running in TEE. +config TCG_HYGON + tristate "Hygon TPM Interface" + depends on ACPI + depends on CRYPTO_DEV_CCP_DD + depends on CRYPTO_DEV_SP_PSP + default y + help + If you want to make Hygon TPM support available, say Yes and + it will be accessible from within Linux. To compile this + driver as a module, choose M here; the module will be called + tpm_hygon. + source "drivers/char/tpm/st33zp24/Kconfig" endif # TCG_TPM diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile index 0222b1ddb310..ccce74915160 100644 --- a/drivers/char/tpm/Makefile +++ b/drivers/char/tpm/Makefile @@ -42,3 +42,4 @@ obj-$(CONFIG_TCG_XEN) += xen-tpmfront.o obj-$(CONFIG_TCG_CRB) += tpm_crb.o obj-$(CONFIG_TCG_VTPM_PROXY) += tpm_vtpm_proxy.o obj-$(CONFIG_TCG_FTPM_TEE) += tpm_ftpm_tee.o +obj-$(CONFIG_TCG_HYGON) += tpm_hygon.o diff --git a/drivers/char/tpm/tpm_hygon.c b/drivers/char/tpm/tpm_hygon.c new file mode 100644 index 000000000000..37e2e1f19c8d --- /dev/null +++ b/drivers/char/tpm/tpm_hygon.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * The Hygon TPM2.0 device driver. + * + * Copyright (C) 2020 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "tpm.h" + +#define TPM2PSP_CMD(id) (0x100 | (id)) +#define MAX_TPM_BUF_LEN 4096 +#define MAX_CMD_BUF_LEN (MAX_TPM_BUF_LEN + sizeof(u32) + sizeof(u32)) + +struct tpm_hygon_priv { + u8 priv_buf[MAX_CMD_BUF_LEN]; +}; + +/* + * tpm header struct name is different in different kernel versions. + * so redefine it for driver porting. + */ +struct tpm_header_t { + __be16 tag; + __be32 length; + union { + __be32 ordinal; + __be32 return_code; + }; +} __packed; + +static int tpm_c_recv(struct tpm_chip *chip, u8 *buf, size_t count) +{ + int ret = 0; + struct tpm_hygon_priv *priv = dev_get_drvdata(&chip->dev); + struct tpm_header_t *header = (void *)(priv->priv_buf + sizeof(u32) + sizeof(u32)); + u32 len = be32_to_cpu(header->length); + + if (len > count) { + ret = -E2BIG; + goto out; + } + + if (len > 0) + memmove(buf, (u8 *)header, len); + + ret = len; + +out: + return ret; +} + +static int tpm_c_send(struct tpm_chip *chip, u8 *buf, size_t count) +{ + int ret, error; + struct tpm_hygon_priv *priv = dev_get_drvdata(&chip->dev); + u32 buf_size = cpu_to_be32(sizeof(priv->priv_buf)); + u32 cmd_size = cpu_to_be32((u32)count); + u8 *p = priv->priv_buf; + + *(u32 *)p = buf_size; + p += sizeof(buf_size); + *(u32 *)p = cmd_size; + p += sizeof(cmd_size); + memmove(p, buf, count); + + ret = psp_do_cmd(TPM2PSP_CMD(0), priv->priv_buf, &error); + if (ret) { + pr_err("%s: sev do cmd error, %d\n", __func__, error); + ret = -EIO; + } + + return ret; +} + +static const struct tpm_class_ops tpm_c_ops = { + .flags = TPM_OPS_AUTO_STARTUP, + .recv = tpm_c_recv, + .send = tpm_c_send, +}; + +static int hygon_tpm2_acpi_add(struct acpi_device *device) +{ + int ret; + struct tpm_chip *chip; + struct tpm_hygon_priv *priv; + struct device *dev = &device->dev; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + ret = -ENOMEM; + goto err; + } + + chip = tpmm_chip_alloc(dev, &tpm_c_ops); + if (IS_ERR(chip)) { + pr_err("tpmm_chip_alloc fail\n"); + ret = PTR_ERR(chip); + goto err; + } + + dev_set_drvdata(&chip->dev, priv); + + chip->flags |= TPM_CHIP_FLAG_TPM2; + chip->flags |= TPM_CHIP_FLAG_IRQ; + + ret = tpm_chip_register(chip); + if (ret) { + pr_err("tpm_chip_register fail\n"); + goto err; + } + + pr_info("Hygon TPM2 detected\n"); + + return 0; + +err: + return ret; +} + +static void hygon_tpm2_acpi_remove(struct acpi_device *device) +{ + struct device *dev = &device->dev; + struct tpm_chip *chip = dev_get_drvdata(dev); + + tpm_chip_unregister(chip); + + pr_info("Hygon TPM2 removed\n"); +} + +static SIMPLE_DEV_PM_OPS(tpm_hygon_pm, tpm_pm_suspend, tpm_pm_resume); + +static const struct acpi_device_id hygon_tpm2_device_ids[] = { + {"HYGT0101", 0}, + {"", 0}, +}; + +MODULE_DEVICE_TABLE(acpi, hygon_tpm2_device_ids); + +static struct acpi_driver hygon_tpm2_acpi_driver = { + .name = "tpm_hygon", + .ids = hygon_tpm2_device_ids, + .ops = { + .add = hygon_tpm2_acpi_add, + .remove = hygon_tpm2_acpi_remove, + }, + .drv = { + .pm = &tpm_hygon_pm, + }, +}; + +static int __init hygon_tpm2_init(void) +{ + return acpi_bus_register_driver(&hygon_tpm2_acpi_driver); +} + +static void __exit hygon_tpm2_exit(void) +{ + acpi_bus_unregister_driver(&hygon_tpm2_acpi_driver); +} + +/* + * hygon_tpm2_init must be done after ccp module init, but before + * ima module init. That's why we use a device_initcall_sync which is + * called after all the device_initcall(includes ccp) but before the + * late_initcall(includes ima). + */ +device_initcall_sync(hygon_tpm2_init); +module_exit(hygon_tpm2_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("mayuanchen (mayuanchen@hygon.cn)"); +MODULE_DESCRIPTION("TPM2 device driver for Hygon PSP"); -- Gitee From 3d863c1ee28ee1d43d65ca50679b18c9789f587a Mon Sep 17 00:00:00 2001 From: chench00 Date: Mon, 1 Apr 2024 11:23:54 +0800 Subject: [PATCH 0834/2138] anolis: crypto: tdm: Compile the tdm driver into ccp.ko ANBZ: #8670 The TDM driver is no longer a separate ko module, we compile it into ccp.ko Signed-off-by: chench00 Reviewed-by: Tianjia Zhang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2986 --- drivers/crypto/ccp/Kconfig | 4 +- drivers/crypto/ccp/Makefile | 2 +- drivers/crypto/ccp/psp-dev.c | 16 ++++ drivers/crypto/ccp/{tdm_hygon.c => tdm-dev.c} | 89 ++++++++++++++----- drivers/crypto/ccp/{tdm_hygon.h => tdm-dev.h} | 9 +- 5 files changed, 92 insertions(+), 28 deletions(-) rename drivers/crypto/ccp/{tdm_hygon.c => tdm-dev.c} (97%) rename drivers/crypto/ccp/{tdm_hygon.h => tdm-dev.h} (98%) diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 30902232acce..4d5566936262 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -60,8 +60,8 @@ config HYGON_PSP2CPU_CMD help Hygon PSP2CPU Command Support -config TDM_HYGON - tristate "Hygon TDM Interface" +config TDM_DEV_HYGON + bool "Hygon TDM Interface" default y depends on CRYPTO_DEV_CCP_DD depends on HYGON_PSP2CPU_CMD diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 0c66b4d5792d..4550a22f7c63 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -16,6 +16,7 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ psp-ringbuf.o \ csv-dev.o +ccp-$(CONFIG_TDM_DEV_HYGON) += tdm-dev.o obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o ccp-crypto-objs := ccp-crypto-main.o \ ccp-crypto-aes.o \ @@ -25,7 +26,6 @@ ccp-crypto-objs := ccp-crypto-main.o \ ccp-crypto-des3.o \ ccp-crypto-rsa.o \ ccp-crypto-sha.o -obj-$(CONFIG_TDM_HYGON) += tdm_hygon.o $(obj)/ccp_sm2_sign.asn1.o: $(obj)/ccp_sm2_sign.asn1.c $(obj)/ccp_sm2_sign.asn1.h $(obj)/ccp-crypto-sm2-hygon.o: $(obj)/ccp_sm2_sign.asn1.h diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index 47de733084b1..1566b955730e 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -21,6 +21,9 @@ #include "tee-dev.h" #include "platform-access.h" #include "dbc.h" +#ifdef CONFIG_TDM_DEV_HYGON +#include "tdm-dev.h" +#endif struct psp_device *psp_master; @@ -330,6 +333,14 @@ static int psp_init(struct psp_device *psp) if (psp->vdata->platform_access) psp_init_platform_access(psp); +#ifdef CONFIG_TDM_DEV_HYGON + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + ret = tdm_dev_init(); + if (ret) + return ret; + } +#endif + return 0; } @@ -749,6 +760,11 @@ void psp_dev_destroy(struct sp_device *sp) if (!psp) return; +#ifdef CONFIG_TDM_DEV_HYGON + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + tdm_dev_destroy(); +#endif + sev_dev_destroy(psp); tee_dev_destroy(psp); diff --git a/drivers/crypto/ccp/tdm_hygon.c b/drivers/crypto/ccp/tdm-dev.c similarity index 97% rename from drivers/crypto/ccp/tdm_hygon.c rename to drivers/crypto/ccp/tdm-dev.c index 56927265841e..99f6e8f7416d 100644 --- a/drivers/crypto/ccp/tdm_hygon.c +++ b/drivers/crypto/ccp/tdm-dev.c @@ -22,7 +22,12 @@ #include #include #include -#include "tdm_hygon.h" +#include "tdm-dev.h" + +#ifdef pr_fmt +#undef pr_fmt +#endif +#define pr_fmt(fmt) "tdm: " fmt #define TDM_CMD_ID_MAX 16 #define TDM2PSP_CMD(id) (0x110 | (id)) @@ -57,6 +62,9 @@ static unsigned int p2c_cmd_id = TDM_P2C_CMD_ID; static struct task_struct *kthread; static DECLARE_KFIFO(kfifo_error_task, unsigned char, TDM_KFIFO_SIZE); static spinlock_t kfifo_lock; +static int tdm_support; +static int tdm_init_flag; +static int tdm_destroy_flag; static int list_check_exist(uint32_t task_id) { @@ -303,7 +311,7 @@ static int tdm_get_cmd_context_hash(uint32_t flag, uint8_t *hash) int ret = 0; struct context_message ctx_msg = {0}; unsigned long return_address = 0; -#if IS_BUILTIN(CONFIG_TDM_HYGON) +#if IS_BUILTIN(CONFIG_CRYPTO_DEV_CCP_DD) struct module *p_module = NULL; #elif IS_ENABLED(CONFIG_KALLSYMS) char symbol_buf[128] = {0}; @@ -324,7 +332,7 @@ static int tdm_get_cmd_context_hash(uint32_t flag, uint8_t *hash) return_address = CALLER_ADDR1; if (return_address) { -#if IS_BUILTIN(CONFIG_TDM_HYGON) +#if IS_BUILTIN(CONFIG_CRYPTO_DEV_CCP_DD) p_module = __module_address(return_address); // caller is module if (p_module) @@ -375,7 +383,7 @@ static int tdm_get_cmd_context_hash(uint32_t flag, uint8_t *hash) static int tdm_verify_phy_addr_valid(struct addr_range_info *range) { int ret = 0; -#if IS_BUILTIN(CONFIG_TDM_HYGON) +#if IS_BUILTIN(CONFIG_CRYPTO_DEV_CCP_DD) int i; uint64_t phy_addr_start, phy_addr_end; @@ -520,6 +528,29 @@ static int ptable_virt_to_phy(uint64_t vaddr, struct addr_info *p_addr_info, uin return ret; } +int psp_check_tdm_support(void) +{ + int ret = 0; + struct tdm_version version; + + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + if (tdm_support) + goto end; + + ret = psp_get_fw_info(&version); + if (ret) { + tdm_support = 0; + goto end; + } + + tdm_support = 1; + } + +end: + return tdm_support; +} +EXPORT_SYMBOL_GPL(psp_check_tdm_support); + int psp_get_fw_info(struct tdm_version *version) { int ret = 0; @@ -552,7 +583,7 @@ int psp_get_fw_info(struct tdm_version *version) if (error) { ret = -error; - pr_err("get_fw_info exception error: 0x%x\n", error); + pr_warn("get_fw_info exception: 0x%x\n", error); goto free_cmdresp; } @@ -1493,10 +1524,13 @@ static struct miscdevice misc = { .fops = &tdm_fops, }; -static int __init hygon_tdm_init(void) +int tdm_dev_init(void) { int ret = 0; + if (tdm_init_flag) + return 0; + INIT_KFIFO(kfifo_error_task); INIT_LIST_HEAD(&dyn_head.head); rwlock_init(&dyn_head.lock); @@ -1513,17 +1547,38 @@ static int __init hygon_tdm_init(void) if (IS_ERR(kthread)) { pr_err("kthread_create fail\n"); ret = PTR_ERR(kthread); - return ret; + goto unreg; } wake_up_process(kthread); + + ret = misc_register(&misc); + if (ret) { + pr_err("misc_register for tdm failed\n"); + goto stop_kthread; + } + + tdm_init_flag = 1; pr_info("TDM driver loaded successfully!\n"); - return misc_register(&misc); + return ret; + +stop_kthread: + if (kthread) { + kthread_stop(kthread); + kthread = NULL; + } +unreg: + psp_unregister_cmd_notifier(p2c_cmd_id, tdm_interrupt_handler); + + return ret; } -static void __exit hygon_tdm_exit(void) +int tdm_dev_destroy(void) { + if (tdm_destroy_flag) + goto end; + if (kthread) { kthread_stop(kthread); kthread = NULL; @@ -1532,18 +1587,8 @@ static void __exit hygon_tdm_exit(void) psp_unregister_cmd_notifier(p2c_cmd_id, tdm_interrupt_handler); misc_deregister(&misc); + tdm_destroy_flag = 1; +end: + return 0; } -MODULE_AUTHOR("niuyongwen@hygon.cn"); -MODULE_LICENSE("GPL"); -MODULE_VERSION("0.7"); -MODULE_DESCRIPTION("The dynamic measure driver"); - -/* - * hygon_tdm_init must be done after ccp module init. - * That's why we use a device_initcall_sync which is - * called after all the device_initcall(includes ccp) but before the - * late_initcall(includes ima). - */ -device_initcall_sync(hygon_tdm_init); -module_exit(hygon_tdm_exit); diff --git a/drivers/crypto/ccp/tdm_hygon.h b/drivers/crypto/ccp/tdm-dev.h similarity index 98% rename from drivers/crypto/ccp/tdm_hygon.h rename to drivers/crypto/ccp/tdm-dev.h index ac5638986103..afc4761a7e81 100644 --- a/drivers/crypto/ccp/tdm_hygon.h +++ b/drivers/crypto/ccp/tdm-dev.h @@ -22,8 +22,8 @@ * Version: 0.3 (fw version 1.2) * 1.add remote authentication support. */ -#ifndef __TDM_HYGON_H__ -#define __TDM_HYGON_H__ +#ifndef __TDM_DEV_H__ +#define __TDM_DEV_H__ #include #include @@ -481,6 +481,7 @@ struct tdm_show_device { /*Public api definition for tdm*/ typedef int (*measure_exception_handler_t)(uint32_t task_id); +int psp_check_tdm_support(void); int psp_get_fw_info(struct tdm_version *version); int psp_create_measure_task(struct addr_range_info *range, struct measure_data *data, uint32_t flag, struct authcode_2b *code); @@ -498,4 +499,6 @@ int tdm_get_report(uint32_t task_id, struct task_selection_2b *selection, int tdm_get_vpcr_audit(struct pcr_select pcr, struct tpm2b_digest *digest, struct tdm_pcr_value_2b *pcr_values); -#endif /* __TDM_HYGON_H__*/ +int tdm_dev_init(void); +int tdm_dev_destroy(void); +#endif /* __TDM_DEV_H__*/ -- Gitee From a73e109a6f41f50c4406dbfeaa2b57635f96d9b6 Mon Sep 17 00:00:00 2001 From: chench00 Date: Mon, 1 Apr 2024 11:28:24 +0800 Subject: [PATCH 0835/2138] anolis: crypto: tdm: Support dynamic protection for SCT and IDT by HYGON TDM ANBZ: #8670 tdm_kernel_guard is an application that uses HYGON TDM technology to protect important data in the kernel. Through this application, the dynamic protection of SCT and IDT is completed in the system. In the future, more protection objects can be expanded based on this application Signed-off-by: chench00 Reviewed-by: Tianjia Zhang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2986 --- drivers/crypto/ccp/Kconfig | 11 + drivers/crypto/ccp/Makefile | 1 + drivers/crypto/ccp/tdm-kernel-guard.c | 352 ++++++++++++++++++++++++++ 3 files changed, 364 insertions(+) create mode 100644 drivers/crypto/ccp/tdm-kernel-guard.c diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 4d5566936262..702b4c6761fd 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -68,6 +68,17 @@ config TDM_DEV_HYGON help Hygon TDM driver +config TDM_KERNEL_GUARD + tristate "Hygon TDM kernel guard" + default y + depends on TDM_DEV_HYGON + depends on CRYPTO_DEV_CCP_DD + depends on CRYPTO_SM3 + help + The key part of kernel is protected by TDM technology, SCT and IDT + are protected by default, and others are added later according to the + requirements. + config CRYPTO_DEV_CCP_DEBUGFS bool "Enable CCP Internals in DebugFS" default n diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 4550a22f7c63..088d53009824 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -26,6 +26,7 @@ ccp-crypto-objs := ccp-crypto-main.o \ ccp-crypto-des3.o \ ccp-crypto-rsa.o \ ccp-crypto-sha.o +obj-$(CONFIG_TDM_KERNEL_GUARD) += tdm-kernel-guard.o $(obj)/ccp_sm2_sign.asn1.o: $(obj)/ccp_sm2_sign.asn1.c $(obj)/ccp_sm2_sign.asn1.h $(obj)/ccp-crypto-sm2-hygon.o: $(obj)/ccp_sm2_sign.asn1.h diff --git a/drivers/crypto/ccp/tdm-kernel-guard.c b/drivers/crypto/ccp/tdm-kernel-guard.c new file mode 100644 index 000000000000..c3afe888ea04 --- /dev/null +++ b/drivers/crypto/ccp/tdm-kernel-guard.c @@ -0,0 +1,352 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * The Hygon TDM KERNEL GUARD module driver + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include "tdm-dev.h" + +#ifdef pr_fmt +#undef pr_fmt +#endif +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +static int eh_obj = -1; +module_param(eh_obj, int, 0644); +MODULE_PARM_DESC(eh_obj, "security enhance object for TDM"); + +/* Objects are protected by TDM now + * SCT: 0 + * IDT: 1 + */ +enum ENHANCE_OBJS { + SCT = 0, + IDT, + MAX_OBJ +}; + +static char *obj_names[MAX_OBJ] = { + "SCT", + "IDT", +}; + +struct tdm_security_enhance { + uint64_t vaddr; + uint32_t size; + struct addr_range_info *mem_range; + struct authcode_2b *authcode; + struct measure_data mdata; + uint32_t context; + uint32_t task_id; + char *obj_name; +} __packed; + +static struct tdm_security_enhance eh_objs[MAX_OBJ]; + +static int tdm_regi_callback_handler(uint32_t task_id) +{ + int i = 0; + int ret = 0; + + for (i = 0; i < MAX_OBJ; i++) { + if (task_id == eh_objs[i].task_id) { + pr_warn("Obj: %s, Task:%d, corruption detected!\n", eh_objs[i].obj_name, + task_id); + pr_warn("Please check if it's intended, or your machine may be on danger!\n"); + break; + } + } + return ret; +} + +static int calc_expected_hash(uint8_t *base_addr, uint32_t size, uint8_t *hash) +{ + int ret = 0; + struct crypto_shash *shash = NULL; + + shash = crypto_alloc_shash("sm3", 0, 0); + if (IS_ERR(shash)) { + ret = PTR_ERR(shash); + return ret; + } + + { + SHASH_DESC_ON_STACK(sdesc, shash); + + sdesc->tfm = shash; + ret = crypto_shash_init(sdesc); + if (ret) { + pr_err("crypto_shash_init failed\n"); + ret = -1; + goto out; + } + + ret = crypto_shash_update(sdesc, base_addr, size); + if (ret) { + pr_err("crypto_shash_update failed\n"); + ret = -1; + goto out; + } + + ret = crypto_shash_final(sdesc, hash); + if (ret) { + pr_err("crypto_shash_final failed\n"); + ret = -1; + goto out; + } + } + +out: + crypto_free_shash(shash); + return ret; +} + +static int tdm_task_create_and_run(struct tdm_security_enhance *data) +{ + int ret = 0; + int task_status = 0; + + data->task_id = psp_create_measure_task(data->mem_range, &data->mdata, data->context, + data->authcode); + if (data->task_id < 0) { + ret = data->task_id < 0; + pr_err("create measurement task failed with 0x%x!\n", data->task_id); + goto end; + } + + ret = psp_register_measure_exception_handler(data->task_id, data->authcode, + tdm_regi_callback_handler); + if (ret < 0) { + pr_err("task_id %d callback function register failed with 0x%x\n", data->task_id, + ret); + goto release_task; + } + + task_status = psp_startstop_measure_task(data->task_id, data->authcode, true); + if (task_status < 0) { + ret = task_status; + pr_err("task_id %d start failed with 0x%x\n", data->task_id, ret); + goto release_task; + } + + return ret; + +release_task: + psp_destroy_measure_task(data->task_id, data->authcode); +end: + return ret; +} + +int tdm_service_run(struct tdm_security_enhance *data) +{ + int ret = 0; + struct addr_range_info *addr_range = NULL; + + // Allocate memory for addr_range + addr_range = kzalloc(sizeof(struct addr_range_info) + sizeof(struct addr_info), GFP_KERNEL); + if (!addr_range) { + ret = -DYN_ERR_MEM; + pr_err("addr_range kzalloc memory failed\n"); + goto end; + } + + // Fill in addr_range + addr_range->count = 1; + addr_range->addr[0].addr_start = data->vaddr; + addr_range->addr[0].length = data->size; + data->mem_range = addr_range; + + // Context configuration + data->context |= TASK_CREATE_VADDR; + + // Allocate memory for authcode + data->authcode = kzalloc(sizeof(struct authcode_2b) + AUTHCODE_MAX, GFP_KERNEL); + if (!data->authcode) { + ret = -DYN_ERR_MEM; + pr_err("authcode_2b kzalloc memory failed\n"); + goto free_addr_range_info; + } + + data->authcode->len = AUTHCODE_MAX; + + // Measurement data configuration + data->mdata.hash_algo = HASH_ALGO_SM3; + data->mdata.period_ms = 0; + ret = calc_expected_hash((uint8_t *)data->vaddr, data->size, + data->mdata.expected_measurement); + if (ret) { + pr_err("calculate expected hash failed!\n"); + goto free_authcode; + } + + // Create and start tdm task + ret = tdm_task_create_and_run(data); + if (ret) { + pr_err("tdm_task_create_and_run failed!\n"); + goto free_authcode; + } + + return ret; + +free_authcode: + kfree(data->authcode); + data->authcode = NULL; +free_addr_range_info: + kfree(data->mem_range); + data->mem_range = NULL; +end: + return ret; +} + +int tdm_service_exit(struct tdm_security_enhance *data) +{ + int ret = 0; + int task_status = 0; + + task_status = psp_startstop_measure_task(data->task_id, data->authcode, false); + if (task_status < 0) { + ret = task_status; + pr_err("task_id %d stop failed with 0x%x\n", data->task_id, ret); + goto end; + } + + // Waiting for the task to end + msleep(40); + + psp_destroy_measure_task(data->task_id, data->authcode); + + kfree(data->authcode); + data->authcode = NULL; + kfree(data->mem_range); + data->mem_range = NULL; +end: + return ret; +} + +#if !IS_BUILTIN(CONFIG_TDM_KERNEL_GUARD) +static int p_tmp_kprobe_handler(struct kprobe *p_ri, struct pt_regs *p_regs) +{ + return 0; +} + +unsigned long kprobe_symbol_address_byname(const char *name) +{ + int p_ret; + struct kprobe p_kprobe; + unsigned long addr = 0; + + memset(&p_kprobe, 0, sizeof(p_kprobe)); + + p_kprobe.pre_handler = p_tmp_kprobe_handler; + p_kprobe.symbol_name = name; + + p_ret = register_kprobe(&p_kprobe); + if (p_ret < 0) { + pr_err("register_kprobe error [%d] :(\n", p_ret); + return 0; + } + + addr = (unsigned long)p_kprobe.addr; + unregister_kprobe(&p_kprobe); + + return addr; +} +#endif + +static int __init kernel_security_enhance_init(void) +{ + int i = 0; + int ret = 0; + unsigned long *sct_addr; + struct desc_ptr idtr; +#if !IS_BUILTIN(CONFIG_TDM_KERNEL_GUARD) + unsigned long (*f_kallsyms_lookup_name)(const char *); + + f_kallsyms_lookup_name = (unsigned long (*)(const char *))kprobe_symbol_address_byname( + "kallsyms_lookup_name"); + if (!f_kallsyms_lookup_name) { + ret = -DYN_ERR_API; + pr_err("kprobe_symbol_address_byname failed!"); + goto end; + } + + sct_addr = (unsigned long *)f_kallsyms_lookup_name("sys_call_table"); +#else + + sct_addr = (unsigned long *)kallsyms_lookup_name("sys_call_table"); +#endif + if (!sct_addr) { + ret = -DYN_ERR_API; + pr_err("kallsyms_lookup_name for sys_call_table failed!"); + goto end; + } + + asm("sidt %0":"=m"(idtr)); + + if (!psp_check_tdm_support()) + return 0; + + for (i = 0; i < MAX_OBJ; i++) { + memset(&eh_objs[i], 0, sizeof(eh_objs[i])); + eh_objs[i].context = CONTEXT_CHECK_MODNAME; + eh_objs[i].obj_name = obj_names[i]; + } + + if ((eh_obj == -1) || (eh_obj & (1 << SCT))) { + eh_objs[SCT].vaddr = (uint64_t)sct_addr; + eh_objs[SCT].size = NR_syscalls * sizeof(char *); + } + if ((eh_obj == -1) || (eh_obj & (1 << IDT))) { + eh_objs[IDT].vaddr = idtr.address; + eh_objs[IDT].size = idtr.size; + } + + for (i = 0; i < MAX_OBJ; i++) { + if (eh_objs[i].vaddr) + tdm_service_run(&eh_objs[i]); + } + + pr_info("Hygon TDM guard load successfully!\n"); + +end: + return ret; +} + +static void __exit kernel_security_enhance_exit(void) +{ + int i = 0; + + if (!psp_check_tdm_support()) + return; + + for (i = 0; i < MAX_OBJ; i++) { + if (eh_objs[i].vaddr) + tdm_service_exit(&eh_objs[i]); + } + pr_info("Hygon TDM guard unload successfully!\n"); +} + +MODULE_AUTHOR("niuyongwen@hygon.cn"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("0.1"); +MODULE_DESCRIPTION("Kernel security enhancement module by TDM"); + +/* + * kernel_security_enhance_init must be done after ccp module init. + * That's why we use a device_initcall_sync which is + * called after all the device_initcall(includes ccp) but before the + * late_initcall(includes ima). + */ +device_initcall_sync(kernel_security_enhance_init); +module_exit(kernel_security_enhance_exit); -- Gitee From c3c7d8f1025bfea929ba1468f821dfdf1bcde65f Mon Sep 17 00:00:00 2001 From: chench00 Date: Mon, 1 Apr 2024 11:31:45 +0800 Subject: [PATCH 0836/2138] anolis: alinux: tcm: add Hygon TCM2 driver ANBZ: #8670 Hygon CPU implemented a firmware-based TCM2 device, which runs on its internal secure processor named PSP. The device underlying uses an unique private interface in the form of some hardware mailbox between X86 cores and PSP, which is for sure different from the TIS or CRB interfaces defined in the PTP spec. As such, to support this device we need a specialized driver which handles the basic send and receive operations required by the kernel TPM core layer. ACPI device info passed from underlying BIOS indicates the device presence by setting the _HID field to "HYGT0201", which distinguishes it from the rest of devices. If the BIOS does not support this setting, the driver will not be activated and thus has no impact to the system at all. Signed-off-by: chench00 Reviewed-by: Tianjia Zhang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2986 --- drivers/char/tpm/Kconfig | 12 ++ drivers/char/tpm/Makefile | 1 + drivers/char/tpm/tcm_hygon.c | 243 +++++++++++++++++++++++++++++++++++ 3 files changed, 256 insertions(+) create mode 100644 drivers/char/tpm/tcm_hygon.c diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index 746661ded992..301284e07603 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig @@ -222,5 +222,17 @@ config TCG_HYGON driver as a module, choose M here; the module will be called tpm_hygon. +config TCM_HYGON + tristate "Hygon TCM Interface" + depends on ACPI + depends on CRYPTO_DEV_CCP_DD + depends on CRYPTO_DEV_SP_PSP + default y + help + If you want to make Hygon TCM support available, say Yes and + it will be accessible from within Linux. To compile this + driver as a module, choose M here; the module will be called + tcm_hygon. + source "drivers/char/tpm/st33zp24/Kconfig" endif # TCG_TPM diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile index ccce74915160..8f868c9b9ce7 100644 --- a/drivers/char/tpm/Makefile +++ b/drivers/char/tpm/Makefile @@ -43,3 +43,4 @@ obj-$(CONFIG_TCG_CRB) += tpm_crb.o obj-$(CONFIG_TCG_VTPM_PROXY) += tpm_vtpm_proxy.o obj-$(CONFIG_TCG_FTPM_TEE) += tpm_ftpm_tee.o obj-$(CONFIG_TCG_HYGON) += tpm_hygon.o +obj-$(CONFIG_TCM_HYGON) += tcm_hygon.o diff --git a/drivers/char/tpm/tcm_hygon.c b/drivers/char/tpm/tcm_hygon.c new file mode 100644 index 000000000000..ef63d1a0a902 --- /dev/null +++ b/drivers/char/tpm/tcm_hygon.c @@ -0,0 +1,243 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * The Hygon TCM2.0 device driver. + * + * Copyright (C) 2023 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "tpm.h" + +#define TCM2PSP_CMD(id) (0x100 | (id)) +#define MAX_TCM_BUF_LEN 4096 + +struct tcm_hygon_priv { + u8 priv_buf[MAX_TCM_BUF_LEN]; +}; + +struct tcm_header_t { + __be16 tag; + __be32 length; + union { + __be32 ordinal; + __be32 return_code; + }; +} __packed; + +static int tcm_c_recv(struct tpm_chip *chip, u8 *buf, size_t count) +{ + int ret = 0; + struct tcm_hygon_priv *priv = dev_get_drvdata(&chip->dev); + struct tcm_header_t *header = (void *)(priv->priv_buf + sizeof(u32) + sizeof(u32)); + u32 len = be32_to_cpu(header->length); + + if (len > count) { + ret = -E2BIG; + goto out; + } + + if (len > 0) + memmove(buf, (u8 *)header, len); + + ret = len; + +out: + return ret; +} + +static int tcm_c_send(struct tpm_chip *chip, u8 *buf, size_t count) +{ + int ret, error; + struct tcm_hygon_priv *priv = dev_get_drvdata(&chip->dev); + u32 buf_size = sizeof(priv->priv_buf); + u32 cmd_size = (u32)count; + u8 *p = priv->priv_buf; + + if (buf_size - sizeof(u32) - sizeof(u32) < count) { + ret = -E2BIG; + goto out; + } + + *(u32 *)p = cpu_to_be32(buf_size); + p += sizeof(buf_size); + *(u32 *)p = cpu_to_be32(cmd_size); + p += sizeof(cmd_size); + memmove(p, buf, count); + + ret = psp_do_cmd(TCM2PSP_CMD(0), priv->priv_buf, &error); + if (ret) { + pr_err("%s: psp do cmd error, %d\n", __func__, error); + ret = -EIO; + } + +out: + return ret; +} + +static const struct tpm_class_ops tcm_c_ops = { + .flags = TPM_OPS_AUTO_STARTUP, + .recv = tcm_c_recv, + .send = tcm_c_send, +}; + +static void tcm_bios_log_teardown(struct tpm_chip *chip) +{ + int i; + struct inode *inode; + + /* securityfs_remove currently doesn't take care of handling sync + * between removal and opening of pseudo files. To handle this, a + * workaround is added by making i_private = NULL here during removal + * and to check it during open(), both within inode_lock()/unlock(). + * This design ensures that open() either safely gets kref or fails. + */ + for (i = (TPM_NUM_EVENT_LOG_FILES - 1); i >= 0; i--) { + if (chip->bios_dir[i]) { + inode = d_inode(chip->bios_dir[i]); + inode_lock(inode); + inode->i_private = NULL; + inode_unlock(inode); + securityfs_remove(chip->bios_dir[i]); + } + } +} + +static void tcm_chip_unregister(struct tpm_chip *chip) +{ + if (IS_ENABLED(CONFIG_HW_RANDOM_TPM)) + hwrng_unregister(&chip->hwrng); + tcm_bios_log_teardown(chip); + cdev_del(&chip->cdevs); + put_device(&chip->devs); + cdev_device_del(&chip->cdev, &chip->dev); +} + +static int hygon_tcm2_acpi_add(struct acpi_device *device) +{ + int ret; + struct tpm_chip *chip; + struct tcm_hygon_priv *priv; + struct device *dev = &device->dev; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + ret = -ENOMEM; + goto err; + } + + chip = tpmm_chip_alloc(dev, &tcm_c_ops); + if (IS_ERR(chip)) { + pr_err("tcmm_chip_alloc fail\n"); + ret = PTR_ERR(chip); + goto err; + } + + ret = dev_set_name(&chip->dev, "tcm%d", chip->dev_num); + if (ret) { + pr_err("tcm device set name fail\n"); + goto err; + } + + dev_set_drvdata(&chip->dev, priv); + + chip->flags |= TPM_CHIP_FLAG_TPM2; + chip->flags |= TPM_CHIP_FLAG_IRQ; + + ret = tpm_chip_register(chip); + if (ret) { + pr_err("tcm chip_register fail\n"); + goto err; + } + + if (chip->flags & TPM_CHIP_FLAG_TPM2) { + device_del(&chip->devs); + ret = dev_set_name(&chip->devs, "tcmrm%d", chip->dev_num); + if (ret) { + pr_err("tcmrm device set name fail\n"); + goto err_dev; + } + ret = device_add(&chip->devs); + if (ret) { + pr_err("devs add fail\n"); + goto err_dev; + } + } + + pr_info("Hygon TCM2 detected\n"); + + return 0; + +err_dev: + tcm_chip_unregister(chip); + +err: + return ret; +} + +static void hygon_tcm2_acpi_remove(struct acpi_device *device) +{ + struct device *dev = &device->dev; + struct tpm_chip *chip = dev_get_drvdata(dev); + + tpm_chip_unregister(chip); + + pr_info("Hygon TCM2 removed\n"); +} + +static SIMPLE_DEV_PM_OPS(tcm_hygon_pm, tpm_pm_suspend, tpm_pm_resume); + +static const struct acpi_device_id hygon_tcm2_device_ids[] = { + {"HYGT0201", 0}, + {"", 0}, +}; + +MODULE_DEVICE_TABLE(acpi, hygon_tcm2_device_ids); + +static struct acpi_driver hygon_tcm2_acpi_driver = { + .name = "tcm_hygon", + .ids = hygon_tcm2_device_ids, + .ops = { + .add = hygon_tcm2_acpi_add, + .remove = hygon_tcm2_acpi_remove, + }, + .drv = { + .pm = &tcm_hygon_pm, + }, +}; + +static int __init hygon_tcm2_init(void) +{ + return acpi_bus_register_driver(&hygon_tcm2_acpi_driver); +} + +static void __exit hygon_tcm2_exit(void) +{ + acpi_bus_unregister_driver(&hygon_tcm2_acpi_driver); +} + +/* + * hygon_tcm2_init must be done after ccp module init, but before + * ima module init. That's why we use a device_initcall_sync which is + * called after all the device_initcall(includes ccp) but before the + * late_initcall(includes ima). + */ +device_initcall_sync(hygon_tcm2_init); +module_exit(hygon_tcm2_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("mayuanchen (mayuanchen@hygon.cn)"); +MODULE_DESCRIPTION("TCM2 device driver for Hygon PSP"); -- Gitee From 4d3c1b113e32a2fe0ff9b454ebf94e96424922ad Mon Sep 17 00:00:00 2001 From: chench00 Date: Sun, 7 Apr 2024 14:14:44 +0800 Subject: [PATCH 0837/2138] anolis: crypto: command co-processor: Add config to anolis_defconfig ANBZ: #8670 Signed-off-by: chench00 Reviewed-by: Tianjia Zhang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2986 --- arch/x86/configs/anolis-debug_defconfig | 5 +++++ arch/x86/configs/anolis_defconfig | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index 6f4e977693dd..2619e84b4914 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -7430,6 +7430,11 @@ CONFIG_CRYPTO_DEV_CCP_DD=m CONFIG_CRYPTO_DEV_SP_CCP=y CONFIG_CRYPTO_DEV_CCP_CRYPTO=m CONFIG_CRYPTO_DEV_SP_PSP=y +CONFIG_HYGON_PSP2CPU_CMD=y +CONFIG_TCG_HYGON=m +CONFIG_TCM_HYGON=m +CONFIG_TDM_DEV_HYGON=y +CONFIG_TDM_KERNEL_GUARD=m CONFIG_HYGON_GM=y # CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set CONFIG_CRYPTO_DEV_NITROX=m diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 47269a2f1c76..ee0afda89125 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -7421,6 +7421,11 @@ CONFIG_CRYPTO_DEV_CCP_DD=m CONFIG_CRYPTO_DEV_SP_CCP=y CONFIG_CRYPTO_DEV_CCP_CRYPTO=m CONFIG_CRYPTO_DEV_SP_PSP=y +CONFIG_HYGON_PSP2CPU_CMD=y +CONFIG_TCG_HYGON=m +CONFIG_TCM_HYGON=m +CONFIG_TDM_DEV_HYGON=y +CONFIG_TDM_KERNEL_GUARD=m CONFIG_HYGON_GM=y # CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set CONFIG_CRYPTO_DEV_NITROX=m -- Gitee From e57c96b3d86fac9f2d8bc7b2ed20e538b201ed97 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Mon, 11 Mar 2024 12:06:54 +0800 Subject: [PATCH 0838/2138] anolis: KVM: Define CSV3 key management command id ANBZ: #8683 Define Hygon CSV3 key management command id and structure. The command definition is available in Hygon CSV3 spec. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2996 --- include/uapi/linux/kvm.h | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 61c5c6990801..6d8833ac456e 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -2311,4 +2311,25 @@ struct kvm_s390_zpci_op { /* flags for kvm_s390_zpci_op->u.reg_aen.flags */ #define KVM_S390_ZPCIOP_REGAEN_HOST (1 << 0) +/* CSV3 command */ +enum csv3_cmd_id { + KVM_CSV3_NR_MIN = 0xc0, + + KVM_CSV3_INIT = KVM_CSV3_NR_MIN, + KVM_CSV3_LAUNCH_ENCRYPT_DATA, + KVM_CSV3_LAUNCH_ENCRYPT_VMCB, + + KVM_CSV3_NR_MAX, +}; + +struct kvm_csv3_init_data { + __u64 nodemask; +}; + +struct kvm_csv3_launch_encrypt_data { + __u64 gpa; + __u64 uaddr; + __u32 len; +}; + #endif /* __LINUX_KVM_H */ -- Gitee From e9ef83262d59f28368459e68771225804324f416 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Mon, 11 Mar 2024 12:13:58 +0800 Subject: [PATCH 0839/2138] anolis: KVM: SVM: CSV: Add KVM_CSV3_INIT command ANBZ: #8683 The command initializes the CSV3 guest's context. The firmware should be initialized before we issue any CSV3 guest commands. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2996 --- arch/x86/kvm/Makefile | 2 + arch/x86/kvm/svm/csv.c | 110 +++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/svm/csv.h | 23 +++++++++ arch/x86/kvm/svm/svm.c | 4 ++ 4 files changed, 139 insertions(+) create mode 100644 arch/x86/kvm/svm/csv.c create mode 100644 arch/x86/kvm/svm/csv.h diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index c88c8847dfd1..a70ae9e2ad1a 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -41,6 +41,8 @@ obj-$(CONFIG_KVM) += kvm.o obj-$(CONFIG_KVM_INTEL) += kvm-intel.o obj-$(CONFIG_KVM_AMD) += kvm-amd.o +kvm-amd-$(CONFIG_HYGON_CSV) += svm/csv.o + AFLAGS_svm/vmenter.o := -iquote $(obj) $(obj)/svm/vmenter.o: $(obj)/kvm-asm-offsets.h diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c new file mode 100644 index 000000000000..7944d84e49bb --- /dev/null +++ b/arch/x86/kvm/svm/csv.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * CSV driver for KVM + * + * HYGON CSV support + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "kvm_cache_regs.h" +#include "svm.h" +#include "csv.h" +#include "x86.h" + +#undef pr_fmt +#define pr_fmt(fmt) "CSV: " fmt + +struct kvm_csv_info { + struct kvm_sev_info *sev; + + bool csv3_active; /* CSV3 enabled guest */ + unsigned long nodemask; /* Nodemask where CSV3 guest's memory resides */ +}; + +struct kvm_svm_csv { + struct kvm_svm kvm_svm; + struct kvm_csv_info csv_info; +}; + +static struct kvm_x86_ops csv_x86_ops; + +static inline struct kvm_svm_csv *to_kvm_svm_csv(struct kvm *kvm) +{ + return (struct kvm_svm_csv *)container_of(kvm, struct kvm_svm, kvm); +} + +static int csv3_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct kvm_csv3_init_data params; + + if (unlikely(csv->csv3_active)) + return -EINVAL; + + if (unlikely(!sev->es_active)) + return -EINVAL; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + csv->csv3_active = true; + csv->sev = sev; + csv->nodemask = (unsigned long)params.nodemask; + + return 0; +} + +static int csv_mem_enc_op(struct kvm *kvm, void __user *argp) +{ + struct kvm_sev_cmd sev_cmd; + int r = -EINVAL; + + if (!argp) + return 0; + + if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd))) + return -EFAULT; + + mutex_lock(&kvm->lock); + + switch (sev_cmd.id) { + case KVM_CSV3_INIT: + r = csv3_guest_init(kvm, &sev_cmd); + break; + default: + mutex_unlock(&kvm->lock); + if (likely(csv_x86_ops.mem_enc_ioctl)) + r = csv_x86_ops.mem_enc_ioctl(kvm, argp); + goto out; + } + + mutex_unlock(&kvm->lock); + + if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd))) + r = -EFAULT; + +out: + return r; +} + +void __init csv_init(struct kvm_x86_ops *ops) +{ + if (boot_cpu_has(X86_FEATURE_CSV3)) { + memcpy(&csv_x86_ops, ops, sizeof(struct kvm_x86_ops)); + + ops->mem_enc_ioctl = csv_mem_enc_op; + ops->vm_size = sizeof(struct kvm_svm_csv); + } +} diff --git a/arch/x86/kvm/svm/csv.h b/arch/x86/kvm/svm/csv.h new file mode 100644 index 000000000000..df5cf9ea9422 --- /dev/null +++ b/arch/x86/kvm/svm/csv.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * CSV driver for KVM + * + * HYGON CSV support + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#ifndef __SVM_CSV_H +#define __SVM_CSV_H + +#ifdef CONFIG_HYGON_CSV + +void __init csv_init(struct kvm_x86_ops *ops); + +#else /* !CONFIG_HYGON_CSV */ + +static inline void __init csv_init(struct kvm_x86_ops *ops) { } + +#endif /* CONFIG_HYGON_CSV */ + +#endif diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index e10264b375be..800d40670d76 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -48,6 +48,7 @@ #include "svm.h" #include "svm_ops.h" +#include "csv.h" #include "kvm_onhyperv.h" #include "svm_onhyperv.h" @@ -5485,6 +5486,9 @@ static int __init svm_init(void) if (!kvm_is_svm_supported()) return -EOPNOTSUPP; + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + csv_init(&svm_x86_ops); + r = kvm_x86_vendor_init(&svm_init_ops); if (r) return r; -- Gitee From deb52ac1a7dde992a1dda9e2e4fc5d22f4000e49 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Thu, 10 Aug 2023 14:54:45 +0800 Subject: [PATCH 0840/2138] anolis: KVM: SVM: CSV: Add KVM_CSV3_LAUNCH_ENCRYPT_DATA command ANBZ: #8683 The command is used to load and encrypt data in CSV3 guest's private memory. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2996 --- arch/x86/kvm/svm/csv.c | 288 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 288 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 7944d84e49bb..3b6f98e05651 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -24,10 +24,19 @@ #undef pr_fmt #define pr_fmt(fmt) "CSV: " fmt +struct encrypt_data_block { + struct { + u64 npages: 12; + u64 pfn: 52; + } entry[512]; +}; + struct kvm_csv_info { struct kvm_sev_info *sev; bool csv3_active; /* CSV3 enabled guest */ + + struct list_head smr_list; /* List of guest secure memory regions */ unsigned long nodemask; /* Nodemask where CSV3 guest's memory resides */ }; @@ -36,6 +45,12 @@ struct kvm_svm_csv { struct kvm_csv_info csv_info; }; +struct secure_memory_region { + struct list_head list; + u64 npages; + u64 hpa; +}; + static struct kvm_x86_ops csv_x86_ops; static inline struct kvm_svm_csv *to_kvm_svm_csv(struct kvm *kvm) @@ -43,6 +58,35 @@ static inline struct kvm_svm_csv *to_kvm_svm_csv(struct kvm *kvm) return (struct kvm_svm_csv *)container_of(kvm, struct kvm_svm, kvm); } +static bool csv3_guest(struct kvm *kvm) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + + return sev_es_guest(kvm) && csv->csv3_active; +} + +static int __csv_issue_cmd(int fd, int id, void *data, int *error) +{ + struct fd f; + int ret; + + f = fdget(fd); + if (!f.file) + return -EBADF; + + ret = sev_issue_cmd_external_user(f.file, id, data, error); + + fdput(f); + return ret; +} + +static int csv_issue_cmd(struct kvm *kvm, int id, void *data, int *error) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + + return __csv_issue_cmd(sev->fd, id, data, error); +} + static int csv3_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; @@ -66,6 +110,247 @@ static int csv3_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) return 0; } +static int csv3_set_guest_private_memory(struct kvm *kvm) +{ + struct kvm_memslots *slots = kvm_memslots(kvm); + struct kvm_memory_slot *memslot; + struct secure_memory_region *smr; + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct csv3_data_set_guest_private_memory *set_guest_private_memory; + struct csv3_data_memory_region *regions; + nodemask_t nodemask; + nodemask_t *nodemask_ptr; + + LIST_HEAD(tmp_list); + struct list_head *pos, *q; + u32 i = 0, count = 0, remainder; + int ret = 0, error; + u64 size = 0, nr_smr = 0, nr_pages = 0; + u32 smr_entry_shift; + int bkt; + + unsigned int flags = FOLL_HWPOISON; + int npages; + struct page *page; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + nodes_clear(nodemask); + for_each_set_bit(i, &csv->nodemask, BITS_PER_LONG) + if (i < MAX_NUMNODES) + node_set(i, nodemask); + + nodemask_ptr = csv->nodemask ? &nodemask : &node_online_map; + + set_guest_private_memory = kzalloc(sizeof(*set_guest_private_memory), + GFP_KERNEL_ACCOUNT); + if (!set_guest_private_memory) + return -ENOMEM; + + regions = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); + if (!regions) { + kfree(set_guest_private_memory); + return -ENOMEM; + } + + /* Get guest secure memory size */ + kvm_for_each_memslot(memslot, bkt, slots) { + npages = get_user_pages_unlocked(memslot->userspace_addr, 1, + &page, flags); + if (npages != 1) + continue; + + nr_pages += memslot->npages; + + put_page(page); + } + + /* + * NPT secure memory size + * + * PTEs_entries = nr_pages + * PDEs_entries = nr_pages / 512 + * PDPEs_entries = nr_pages / (512 * 512) + * PML4Es_entries = nr_pages / (512 * 512 * 512) + * + * Totals_entries = nr_pages + nr_pages / 512 + nr_pages / (512 * 512) + + * nr_pages / (512 * 512 * 512) <= nr_pages + nr_pages / 256 + * + * Total_NPT_size = (Totals_entries / 512) * PAGE_SIZE = ((nr_pages + + * nr_pages / 256) / 512) * PAGE_SIZE = nr_pages * 8 + nr_pages / 32 + * <= nr_pages * 9 + * + */ + smr_entry_shift = csv_get_smr_entry_shift(); + size = ALIGN((nr_pages << PAGE_SHIFT), 1UL << smr_entry_shift) + + ALIGN(nr_pages * 9, 1UL << smr_entry_shift); + nr_smr = size >> smr_entry_shift; + remainder = nr_smr; + for (i = 0; i < nr_smr; i++) { + smr = kzalloc(sizeof(*smr), GFP_KERNEL_ACCOUNT); + if (!smr) { + ret = -ENOMEM; + goto e_free_smr; + } + + smr->hpa = csv_alloc_from_contiguous((1UL << smr_entry_shift), + nodemask_ptr, + get_order(1 << smr_entry_shift)); + if (!smr->hpa) { + kfree(smr); + ret = -ENOMEM; + goto e_free_smr; + } + + smr->npages = ((1UL << smr_entry_shift) >> PAGE_SHIFT); + list_add_tail(&smr->list, &tmp_list); + + regions[count].size = (1UL << smr_entry_shift); + regions[count].base_address = smr->hpa; + count++; + + if (count >= (PAGE_SIZE / sizeof(regions[0])) || (remainder == count)) { + set_guest_private_memory->nregions = count; + set_guest_private_memory->handle = sev->handle; + set_guest_private_memory->regions_paddr = __sme_pa(regions); + + /* set secury memory region for launch enrypt data */ + ret = csv_issue_cmd(kvm, CSV3_CMD_SET_GUEST_PRIVATE_MEMORY, + set_guest_private_memory, &error); + if (ret) + goto e_free_smr; + + memset(regions, 0, PAGE_SIZE); + remainder -= count; + count = 0; + } + } + + list_splice(&tmp_list, &csv->smr_list); + + goto done; + +e_free_smr: + if (!list_empty(&tmp_list)) { + list_for_each_safe(pos, q, &tmp_list) { + smr = list_entry(pos, struct secure_memory_region, list); + if (smr) { + csv_release_to_contiguous(smr->hpa, + smr->npages << PAGE_SHIFT); + list_del(&smr->list); + kfree(smr); + } + } + } +done: + kfree(set_guest_private_memory); + kfree(regions); + return ret; +} + +static int csv3_launch_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct kvm_csv3_launch_encrypt_data params; + struct csv3_data_launch_encrypt_data *encrypt_data = NULL; + struct encrypt_data_block *blocks = NULL; + u8 *data = NULL; + u32 offset; + u32 num_entries, num_entries_in_block; + u32 num_blocks, num_blocks_max; + u32 i, n; + unsigned long pfn, pfn_sme_mask; + int ret = 0; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) { + ret = -EFAULT; + goto exit; + } + + if ((params.len & ~PAGE_MASK) || !params.len || !params.uaddr) { + ret = -EINVAL; + goto exit; + } + + /* Allocate all the guest memory from CMA */ + ret = csv3_set_guest_private_memory(kvm); + if (ret) + goto exit; + + num_entries = params.len / PAGE_SIZE; + num_entries_in_block = ARRAY_SIZE(blocks->entry); + num_blocks = (num_entries + num_entries_in_block - 1) / num_entries_in_block; + num_blocks_max = ARRAY_SIZE(encrypt_data->data_blocks); + + if (num_blocks >= num_blocks_max) { + ret = -EINVAL; + goto exit; + } + + data = vzalloc(params.len); + if (!data) { + ret = -ENOMEM; + goto exit; + } + if (copy_from_user(data, (void __user *)params.uaddr, params.len)) { + ret = -EFAULT; + goto data_free; + } + + blocks = vzalloc(num_blocks * sizeof(*blocks)); + if (!blocks) { + ret = -ENOMEM; + goto data_free; + } + + for (offset = 0, i = 0, n = 0; offset < params.len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + data); + pfn_sme_mask = __sme_set(pfn << PAGE_SHIFT) >> PAGE_SHIFT; + if (offset && ((blocks[n].entry[i].pfn + 1) == pfn_sme_mask)) + blocks[n].entry[i].npages += 1; + else { + if (offset) { + i = (i + 1) % num_entries_in_block; + n = (i == 0) ? (n + 1) : n; + } + blocks[n].entry[i].pfn = pfn_sme_mask; + blocks[n].entry[i].npages = 1; + } + } + + encrypt_data = kzalloc(sizeof(*encrypt_data), GFP_KERNEL); + if (!encrypt_data) { + ret = -ENOMEM; + goto block_free; + } + + encrypt_data->handle = csv->sev->handle; + encrypt_data->length = params.len; + encrypt_data->gpa = params.gpa; + for (i = 0; i <= n; i++) { + encrypt_data->data_blocks[i] = + __sme_set(vmalloc_to_pfn((void *)blocks + i * sizeof(*blocks)) << PAGE_SHIFT); + } + + clflush_cache_range(data, params.len); + ret = csv_issue_cmd(kvm, CSV3_CMD_LAUNCH_ENCRYPT_DATA, + encrypt_data, &argp->error); + + kfree(encrypt_data); +block_free: + vfree(blocks); +data_free: + vfree(data); +exit: + return ret; +} + static int csv_mem_enc_op(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -83,6 +368,9 @@ static int csv_mem_enc_op(struct kvm *kvm, void __user *argp) case KVM_CSV3_INIT: r = csv3_guest_init(kvm, &sev_cmd); break; + case KVM_CSV3_LAUNCH_ENCRYPT_DATA: + r = csv3_launch_encrypt_data(kvm, &sev_cmd); + break; default: mutex_unlock(&kvm->lock); if (likely(csv_x86_ops.mem_enc_ioctl)) -- Gitee From 9bbc334d07d54111ac92a1553672ac293a9acd55 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Thu, 10 Aug 2023 15:00:59 +0800 Subject: [PATCH 0841/2138] anolis: KVM: SVM: CSV: Add KVM_CSV3_LAUNCH_ENCRYPT_VMCB command ANBZ: #8683 The command is used to get secure VMCB physical address which is allocated in private memory by firmware. Besides, shadow VMCB physical address will be updated in secure VMCB. Also the firmware creates a new private page for guest's VMSA per vcpu. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2996 --- arch/x86/kvm/svm/csv.c | 69 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 3b6f98e05651..8cdf01a2f128 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -65,6 +65,26 @@ static bool csv3_guest(struct kvm *kvm) return sev_es_guest(kvm) && csv->csv3_active; } +static int csv_sync_vmsa(struct vcpu_svm *svm) +{ + struct sev_es_save_area *save = svm->sev_es.vmsa; + + /* Check some debug related fields before encrypting the VMSA */ + if (svm->vcpu.guest_debug || (svm->vmcb->save.dr7 & ~DR7_FIXED_1)) + return -EINVAL; + + memcpy(save, &svm->vmcb->save, sizeof(svm->vmcb->save)); + + /* Sync registgers per spec. */ + save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX]; + save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX]; + save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP]; + save->xcr0 = svm->vcpu.arch.xcr0; + save->xss = svm->vcpu.arch.ia32_xss; + + return 0; +} + static int __csv_issue_cmd(int fd, int id, void *data, int *error) { struct fd f; @@ -351,6 +371,52 @@ static int csv3_launch_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +static int csv3_launch_encrypt_vmcb(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct csv3_data_launch_encrypt_vmcb *encrypt_vmcb = NULL; + struct kvm_vcpu *vcpu; + int ret = 0; + unsigned long i = 0; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + encrypt_vmcb = kzalloc(sizeof(*encrypt_vmcb), GFP_KERNEL); + if (!encrypt_vmcb) { + ret = -ENOMEM; + goto exit; + } + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + ret = csv_sync_vmsa(svm); + if (ret) + goto e_free; + clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE); + clflush_cache_range(svm->vmcb, PAGE_SIZE); + encrypt_vmcb->handle = csv->sev->handle; + encrypt_vmcb->vcpu_id = i; + encrypt_vmcb->vmsa_addr = __sme_pa(svm->sev_es.vmsa); + encrypt_vmcb->vmsa_len = PAGE_SIZE; + encrypt_vmcb->shadow_vmcb_addr = __sme_pa(svm->vmcb); + encrypt_vmcb->shadow_vmcb_len = PAGE_SIZE; + ret = csv_issue_cmd(kvm, CSV3_CMD_LAUNCH_ENCRYPT_VMCB, + encrypt_vmcb, &argp->error); + if (ret) + goto e_free; + + svm->current_vmcb->pa = encrypt_vmcb->secure_vmcb_addr; + svm->vcpu.arch.guest_state_protected = true; + } + +e_free: + kfree(encrypt_vmcb); +exit: + return ret; +} + static int csv_mem_enc_op(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -371,6 +437,9 @@ static int csv_mem_enc_op(struct kvm *kvm, void __user *argp) case KVM_CSV3_LAUNCH_ENCRYPT_DATA: r = csv3_launch_encrypt_data(kvm, &sev_cmd); break; + case KVM_CSV3_LAUNCH_ENCRYPT_VMCB: + r = csv3_launch_encrypt_vmcb(kvm, &sev_cmd); + break; default: mutex_unlock(&kvm->lock); if (likely(csv_x86_ops.mem_enc_ioctl)) -- Gitee From 295310a88adf1a00d271870ad9be6ef23e45c105 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Thu, 10 Aug 2023 15:07:10 +0800 Subject: [PATCH 0842/2138] anolis: KVM: SVM: CSV: Manage CSV3 guest's nested page table ANBZ: #8683 CSV3 guest's nested page table is managed by firmware. All the guest memory is private by default. The firmware maps guest's private memory in nested page table in advance. CSV3 guest may declare some memory regions as shared. It needs to send secure call command with specified memory region to firmware, then firmware frees the private pages which is mapped to the memory region. When guest access the specified memory region by then, nested page fault happens. When nested page fault happens, host needs to issue an external command UPDATE_NPT to firmware. Then firmware helps to map the specified shared pages in nested page table. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2996 --- arch/x86/kvm/svm/csv.c | 482 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 482 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 8cdf01a2f128..b64619528726 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -31,11 +31,41 @@ struct encrypt_data_block { } entry[512]; }; +union csv3_page_attr { + struct { + u64 reserved: 1; + u64 rw: 1; + u64 reserved1: 49; + u64 mmio: 1; + u64 reserved2: 12; + }; + u64 val; +}; + +enum csv3_pg_level { + CSV3_PG_LEVEL_NONE, + CSV3_PG_LEVEL_4K, + CSV3_PG_LEVEL_2M, + CSV3_PG_LEVEL_NUM +}; + +struct shared_page_block { + struct list_head list; + struct page **pages; + u64 count; +}; + struct kvm_csv_info { struct kvm_sev_info *sev; bool csv3_active; /* CSV3 enabled guest */ + /* List of shared pages */ + u64 total_shared_page_count; + struct list_head shared_pages_list; + void *cached_shared_page_block; + struct mutex shared_page_block_lock; + struct list_head smr_list; /* List of guest secure memory regions */ unsigned long nodemask; /* Nodemask where CSV3 guest's memory resides */ }; @@ -58,6 +88,24 @@ static inline struct kvm_svm_csv *to_kvm_svm_csv(struct kvm *kvm) return (struct kvm_svm_csv *)container_of(kvm, struct kvm_svm, kvm); } +static int to_csv3_pg_level(int level) +{ + int ret; + + switch (level) { + case PG_LEVEL_4K: + ret = CSV3_PG_LEVEL_4K; + break; + case PG_LEVEL_2M: + ret = CSV3_PG_LEVEL_2M; + break; + default: + ret = CSV3_PG_LEVEL_NONE; + } + + return ret; +} + static bool csv3_guest(struct kvm *kvm) { struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; @@ -107,6 +155,16 @@ static int csv_issue_cmd(struct kvm *kvm, int id, void *data, int *error) return __csv_issue_cmd(sev->fd, id, data, error); } +static inline void csv3_init_update_npt(struct csv3_data_update_npt *update_npt, + gpa_t gpa, u32 error, u32 handle) +{ + memset(update_npt, 0x00, sizeof(*update_npt)); + + update_npt->gpa = gpa & PAGE_MASK; + update_npt->error_code = error; + update_npt->handle = handle; +} + static int csv3_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; @@ -127,9 +185,20 @@ static int csv3_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) csv->sev = sev; csv->nodemask = (unsigned long)params.nodemask; + INIT_LIST_HEAD(&csv->shared_pages_list); + INIT_LIST_HEAD(&csv->smr_list); + mutex_init(&csv->shared_page_block_lock); + return 0; } +static bool csv3_is_mmio_pfn(kvm_pfn_t pfn) +{ + return !e820__mapped_raw_any(pfn_to_hpa(pfn), + pfn_to_hpa(pfn + 1) - 1, + E820_TYPE_RAM); +} + static int csv3_set_guest_private_memory(struct kvm *kvm) { struct kvm_memslots *slots = kvm_memslots(kvm); @@ -417,6 +486,416 @@ static int csv3_launch_encrypt_vmcb(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +static void csv3_mark_page_dirty(struct kvm_vcpu *vcpu, gva_t gpa, + unsigned long npages) +{ + gfn_t gfn; + gfn_t gfn_end; + + gfn = gpa >> PAGE_SHIFT; + gfn_end = gfn + npages; +#ifdef KVM_HAVE_MMU_RWLOCK + write_lock(&vcpu->kvm->mmu_lock); +#else + spin_lock(&vcpu->kvm->mmu_lock); +#endif + for (; gfn < gfn_end; gfn++) + kvm_vcpu_mark_page_dirty(vcpu, gfn); +#ifdef KVM_HAVE_MMU_RWLOCK + write_unlock(&vcpu->kvm->mmu_lock); +#else + spin_unlock(&vcpu->kvm->mmu_lock); +#endif +} + +static int csv3_mmio_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code) +{ + int r = 0; + struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm); + union csv3_page_attr page_attr = {.mmio = 1}; + union csv3_page_attr page_attr_mask = {.mmio = 1}; + struct csv3_data_update_npt *update_npt; + int psp_ret; + + update_npt = kzalloc(sizeof(*update_npt), GFP_KERNEL); + if (!update_npt) { + r = -ENOMEM; + goto exit; + } + + csv3_init_update_npt(update_npt, gpa, error_code, + kvm_svm->sev_info.handle); + update_npt->page_attr = page_attr.val; + update_npt->page_attr_mask = page_attr_mask.val; + update_npt->level = CSV3_PG_LEVEL_4K; + + r = csv_issue_cmd(vcpu->kvm, CSV3_CMD_UPDATE_NPT, update_npt, &psp_ret); + + if (psp_ret != SEV_RET_SUCCESS) + r = -EFAULT; + + kfree(update_npt); +exit: + return r; +} + +static int __csv3_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, + u32 error_code, struct kvm_memory_slot *slot, + int *psp_ret_ptr, kvm_pfn_t pfn, u32 level) +{ + int r = 0; + struct csv3_data_update_npt *update_npt; + struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm); + int psp_ret = 0; + + update_npt = kzalloc(sizeof(*update_npt), GFP_KERNEL); + if (!update_npt) { + r = -ENOMEM; + goto exit; + } + + csv3_init_update_npt(update_npt, gpa, error_code, + kvm_svm->sev_info.handle); + + update_npt->spa = pfn << PAGE_SHIFT; + update_npt->level = level; + + if (!csv3_is_mmio_pfn(pfn)) + update_npt->spa |= sme_me_mask; + + r = csv_issue_cmd(vcpu->kvm, CSV3_CMD_UPDATE_NPT, update_npt, &psp_ret); + + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); + kvm_flush_remote_tlbs(vcpu->kvm); + + csv3_mark_page_dirty(vcpu, update_npt->gpa, update_npt->npages); + + if (psp_ret_ptr) + *psp_ret_ptr = psp_ret; + + kfree(update_npt); +exit: + return r; +} + +static int csv3_pin_shared_memory(struct kvm_vcpu *vcpu, + struct kvm_memory_slot *slot, gfn_t gfn, + kvm_pfn_t *pfn) +{ + struct page **pages, *page; + u64 hva; + int npinned; + kvm_pfn_t tmp_pfn; + struct kvm *kvm = vcpu->kvm; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct shared_page_block *shared_page_block = NULL; + u64 npages = PAGE_SIZE / sizeof(struct page *); + bool write = !(slot->flags & KVM_MEM_READONLY); + + tmp_pfn = __gfn_to_pfn_memslot(slot, gfn, false, false, NULL, write, + NULL, NULL); + if (unlikely(is_error_pfn(tmp_pfn))) + return -ENOMEM; + + if (csv3_is_mmio_pfn(tmp_pfn)) { + *pfn = tmp_pfn; + return 0; + } + + if (!page_maybe_dma_pinned(pfn_to_page(tmp_pfn))) { + kvm_release_pfn_clean(tmp_pfn); + if (csv->total_shared_page_count % npages == 0) { + shared_page_block = kzalloc(sizeof(*shared_page_block), + GFP_KERNEL_ACCOUNT); + if (!shared_page_block) + return -ENOMEM; + + pages = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); + if (!pages) { + kfree(shared_page_block); + return -ENOMEM; + } + + shared_page_block->pages = pages; + list_add_tail(&shared_page_block->list, + &csv->shared_pages_list); + csv->cached_shared_page_block = shared_page_block; + } else { + shared_page_block = csv->cached_shared_page_block; + pages = shared_page_block->pages; + } + + hva = __gfn_to_hva_memslot(slot, gfn); + npinned = pin_user_pages_fast(hva, 1, FOLL_WRITE | FOLL_LONGTERM, + &page); + if (npinned != 1) { + if (shared_page_block->count == 0) { + list_del(&shared_page_block->list); + kfree(pages); + kfree(shared_page_block); + } + return -ENOMEM; + } + + pages[csv->total_shared_page_count % npages] = page; + shared_page_block->count++; + csv->total_shared_page_count++; + *pfn = page_to_pfn(page); + } else { + kvm_release_pfn_clean(tmp_pfn); + *pfn = tmp_pfn; + } + + return 0; +} + +static int __pfn_mapping_level(struct kvm *kvm, gfn_t gfn, + const struct kvm_memory_slot *slot) +{ + int level = PG_LEVEL_4K; + unsigned long hva; + unsigned long flags; + pgd_t pgd; + p4d_t p4d; + pud_t pud; + pmd_t pmd; + + /* + * Note, using the already-retrieved memslot and __gfn_to_hva_memslot() + * is not solely for performance, it's also necessary to avoid the + * "writable" check in __gfn_to_hva_many(), which will always fail on + * read-only memslots due to gfn_to_hva() assuming writes. Earlier + * page fault steps have already verified the guest isn't writing a + * read-only memslot. + */ + hva = __gfn_to_hva_memslot(slot, gfn); + + /* + * Disable IRQs to prevent concurrent tear down of host page tables, + * e.g. if the primary MMU promotes a P*D to a huge page and then frees + * the original page table. + */ + local_irq_save(flags); + + /* + * Read each entry once. As above, a non-leaf entry can be promoted to + * a huge page _during_ this walk. Re-reading the entry could send the + * walk into the weeks, e.g. p*d_large() returns false (sees the old + * value) and then p*d_offset() walks into the target huge page instead + * of the old page table (sees the new value). + */ + pgd = READ_ONCE(*pgd_offset(kvm->mm, hva)); + if (pgd_none(pgd)) + goto out; + + p4d = READ_ONCE(*p4d_offset(&pgd, hva)); + if (p4d_none(p4d) || !p4d_present(p4d)) + goto out; + + pud = READ_ONCE(*pud_offset(&p4d, hva)); + if (pud_none(pud) || !pud_present(pud)) + goto out; + + if (pud_large(pud)) { + level = PG_LEVEL_1G; + goto out; + } + + pmd = READ_ONCE(*pmd_offset(&pud, hva)); + if (pmd_none(pmd) || !pmd_present(pmd)) + goto out; + + if (pmd_large(pmd)) + level = PG_LEVEL_2M; + +out: + local_irq_restore(flags); + return level; +} + +static int csv3_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn, + struct kvm_memory_slot *slot) +{ + int level; + int page_num; + gfn_t gfn_base; + + if (csv3_is_mmio_pfn(pfn)) { + level = PG_LEVEL_4K; + goto end; + } + + if (!PageCompound(pfn_to_page(pfn))) { + level = PG_LEVEL_4K; + goto end; + } + + level = PG_LEVEL_2M; + page_num = KVM_PAGES_PER_HPAGE(level); + gfn_base = gfn & ~(page_num - 1); + + /* + * 2M aligned guest address in memslot. + */ + if ((gfn_base < slot->base_gfn) || + (gfn_base + page_num > slot->base_gfn + slot->npages)) { + level = PG_LEVEL_4K; + goto end; + } + + /* + * hva in memslot is 2M aligned. + */ + if (__gfn_to_hva_memslot(slot, gfn_base) & ~PMD_MASK) { + level = PG_LEVEL_4K; + goto end; + } + + level = __pfn_mapping_level(vcpu->kvm, gfn, slot); + + /* + * Firmware supports 2M/4K level. + */ + level = level > PG_LEVEL_2M ? PG_LEVEL_2M : level; + +end: + return to_csv3_pg_level(level); +} + +static int csv3_page_fault(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, + gfn_t gfn, u32 error_code) +{ + int ret = 0; + int psp_ret = 0; + int level; + kvm_pfn_t pfn; + struct kvm_csv_info *csv = &to_kvm_svm_csv(vcpu->kvm)->csv_info; + + if (error_code & PFERR_PRESENT_MASK) + level = CSV3_PG_LEVEL_4K; + else { + mutex_lock(&csv->shared_page_block_lock); + ret = csv3_pin_shared_memory(vcpu, slot, gfn, &pfn); + mutex_unlock(&csv->shared_page_block_lock); + if (ret) + goto exit; + + level = csv3_mapping_level(vcpu, gfn, pfn, slot); + } + + ret = __csv3_page_fault(vcpu, gfn << PAGE_SHIFT, error_code, slot, + &psp_ret, pfn, level); + + if (psp_ret != SEV_RET_SUCCESS) + ret = -EFAULT; +exit: + return ret; +} + +static void csv_vm_destroy(struct kvm *kvm) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct list_head *head = &csv->shared_pages_list; + struct list_head *pos, *q; + struct shared_page_block *shared_page_block; + struct kvm_vcpu *vcpu; + unsigned long i = 0; + + struct list_head *smr_head = &csv->smr_list; + struct secure_memory_region *smr; + + if (csv3_guest(kvm)) { + mutex_lock(&csv->shared_page_block_lock); + if (!list_empty(head)) { + list_for_each_safe(pos, q, head) { + shared_page_block = list_entry(pos, + struct shared_page_block, list); + unpin_user_pages(shared_page_block->pages, + shared_page_block->count); + kfree(shared_page_block->pages); + csv->total_shared_page_count -= + shared_page_block->count; + list_del(&shared_page_block->list); + kfree(shared_page_block); + } + } + mutex_unlock(&csv->shared_page_block_lock); + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + svm->current_vmcb->pa = __sme_pa(svm->vmcb); + } + } + + if (likely(csv_x86_ops.vm_destroy)) + csv_x86_ops.vm_destroy(kvm); + + if (!csv3_guest(kvm)) + return; + + /* free secure memory region */ + if (!list_empty(smr_head)) { + list_for_each_safe(pos, q, smr_head) { + smr = list_entry(pos, struct secure_memory_region, list); + if (smr) { + csv_release_to_contiguous(smr->hpa, smr->npages << PAGE_SHIFT); + list_del(&smr->list); + kfree(smr); + } + } + } +} + +static int csv3_handle_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, + u32 error_code) +{ + gfn_t gfn = gpa_to_gfn(gpa); + struct kvm_memory_slot *slot = gfn_to_memslot(vcpu->kvm, gfn); + int ret; + int r = -EIO; + + if (kvm_is_visible_memslot(slot)) + ret = csv3_page_fault(vcpu, slot, gfn, error_code); + else + ret = csv3_mmio_page_fault(vcpu, gpa, error_code); + + if (!ret) + r = 1; + + return r; +} + +static int csv_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) +{ + struct vcpu_svm *svm = to_svm(vcpu); + u32 exit_code = svm->vmcb->control.exit_code; + int ret = -EIO; + + /* + * NPF for csv3 is dedicated. + */ + if (csv3_guest(vcpu->kvm) && exit_code == SVM_EXIT_NPF) { + gpa_t gpa = __sme_clr(svm->vmcb->control.exit_info_2); + u64 error_code = svm->vmcb->control.exit_info_1; + + ret = csv3_handle_page_fault(vcpu, gpa, error_code); + } else { + if (likely(csv_x86_ops.handle_exit)) + ret = csv_x86_ops.handle_exit(vcpu, exit_fastpath); + } + + return ret; +} + +static void csv_guest_memory_reclaimed(struct kvm *kvm) +{ + if (!csv3_guest(kvm)) { + if (likely(csv_x86_ops.guest_memory_reclaimed)) + csv_x86_ops.guest_memory_reclaimed(kvm); + } +} + static int csv_mem_enc_op(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -462,6 +941,9 @@ void __init csv_init(struct kvm_x86_ops *ops) memcpy(&csv_x86_ops, ops, sizeof(struct kvm_x86_ops)); ops->mem_enc_ioctl = csv_mem_enc_op; + ops->vm_destroy = csv_vm_destroy; ops->vm_size = sizeof(struct kvm_svm_csv); + ops->handle_exit = csv_handle_exit; + ops->guest_memory_reclaimed = csv_guest_memory_reclaimed; } } -- Gitee From 02f3822000d774a498425f9221da662c69074070 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Mon, 11 Mar 2024 15:27:59 +0800 Subject: [PATCH 0843/2138] anolis: x86/boot/compressed/64: Add CSV3 guest detection ANBZ: #8683 Check if CSV3 guest is active at boot compressed stage. It checks HYGON hardware with CPUID 0x00000000 and bit30 of MSR 0xc0010131. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2996 --- arch/x86/boot/compressed/Makefile | 1 + arch/x86/boot/compressed/csv.c | 37 ++++++++++++++++++++++++++++++ arch/x86/boot/compressed/csv.h | 23 +++++++++++++++++++ arch/x86/boot/compressed/head_64.S | 10 ++++++++ arch/x86/boot/compressed/misc.h | 1 + arch/x86/kernel/csv-shared.c | 16 +++++++++++++ 6 files changed, 88 insertions(+) create mode 100644 arch/x86/boot/compressed/csv.c create mode 100644 arch/x86/boot/compressed/csv.h create mode 100644 arch/x86/kernel/csv-shared.c diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 658e9ec065c4..872bb46f4640 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -108,6 +108,7 @@ ifdef CONFIG_X86_64 vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/mem_encrypt.o vmlinux-objs-y += $(obj)/pgtable_64.o vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev.o + vmlinux-objs-$(CONFIG_HYGON_CSV) += $(obj)/csv.o endif vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o diff --git a/arch/x86/boot/compressed/csv.c b/arch/x86/boot/compressed/csv.c new file mode 100644 index 000000000000..79ffb8746d17 --- /dev/null +++ b/arch/x86/boot/compressed/csv.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Hygon CSV Support + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#include "misc.h" + +#include "../../kernel/csv-shared.c" + +static unsigned int csv3_enabled __section(".data"); + +void csv_set_status(void) +{ + unsigned int eax; + unsigned int ebx; + unsigned int ecx; + unsigned int edx; + + eax = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + + /* HygonGenuine */ + if (ebx == CPUID_VENDOR_HygonGenuine_ebx && + ecx == CPUID_VENDOR_HygonGenuine_ecx && + edx == CPUID_VENDOR_HygonGenuine_edx && + sme_me_mask) { + unsigned long low, high; + + asm volatile("rdmsr\n" : "=a" (low), "=d" (high) : + "c" (MSR_AMD64_SEV)); + + if (low & MSR_CSV3_ENABLED) + csv3_enabled = 1; + } +} diff --git a/arch/x86/boot/compressed/csv.h b/arch/x86/boot/compressed/csv.h new file mode 100644 index 000000000000..2331d4ade97f --- /dev/null +++ b/arch/x86/boot/compressed/csv.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Hygon CSV header for early boot related functions. + * + * Copyright (C) Hygon Info Technologies Ltd. + * + * Author: Liyang Han + */ + +#ifndef BOOT_COMPRESSED_CSV_H +#define BOOT_COMPRESSED_CSV_H + +#ifdef CONFIG_HYGON_CSV + +void csv_set_status(void); + +#else + +static inline void csv_set_status(void) { } + +#endif + +#endif /* BOOT_COMPRESSED_CSV_H */ diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 1dcb794c5479..215d74c1a6d9 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -397,6 +397,16 @@ SYM_CODE_START(startup_64) movq %r15, %rdi call sev_enable #endif +#ifdef CONFIG_HYGON_CSV + /* + * Check CSV active status. The CSV and CSV2 guest are indicated by + * MSR_AMD64_SEV_ENABLED_BIT and MSR_AMD64_SEV_ES_ENABLED_BIT in MSR + * register 0xc0010131, respectively. + * The CSV3 guest is indicated by MSR_CSV3_ENABLED in MSR register + * 0xc0010131. + */ + call csv_set_status +#endif /* Preserve only the CR4 bits that must be preserved, and clear the rest */ movq %cr4, %rax diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index aae1a2db4251..674433c522ed 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -37,6 +37,7 @@ #include #include "tdx.h" +#include "csv.h" #define BOOT_CTYPE_H #include diff --git a/arch/x86/kernel/csv-shared.c b/arch/x86/kernel/csv-shared.c new file mode 100644 index 000000000000..e46f873fd69d --- /dev/null +++ b/arch/x86/kernel/csv-shared.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Hygon CSV support + * + * This file is shared between decompression boot code and running + * linux kernel. + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#define CPUID_VENDOR_HygonGenuine_ebx 0x6f677948 +#define CPUID_VENDOR_HygonGenuine_ecx 0x656e6975 +#define CPUID_VENDOR_HygonGenuine_edx 0x6e65476e + +#define MSR_CSV3_ENABLED_BIT 30 +#define MSR_CSV3_ENABLED BIT_ULL(MSR_CSV3_ENABLED_BIT) -- Gitee From b821d8ad0be382abbc25c1e92a3a179d91caca86 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Mon, 11 Mar 2024 15:39:50 +0800 Subject: [PATCH 0844/2138] anolis: x86/boot/compressed/64: Init CSV3 secure call pages ANBZ: #8683 CSV3 secure call is a method to communicate with the dedicated secure processor that host cannot tamper with. We declare two dedicated pages named secure call pages to hold the command which guest wants to send to the secure processor. The secure processor always sets only one page of the two as present in nested page table. Read/write action on the two pages will triger NPF then host must issue an external command to the secure processor. The secure processor gets the guest's command if the fault address is secure call page. CSV3 secure call command is used to set specified memory as shared or private in usual. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2996 --- arch/x86/boot/compressed/csv.c | 30 +++++ arch/x86/boot/compressed/csv.h | 2 + arch/x86/boot/compressed/head_64.S | 10 ++ arch/x86/kernel/csv-shared.c | 199 +++++++++++++++++++++++++++++ 4 files changed, 241 insertions(+) diff --git a/arch/x86/boot/compressed/csv.c b/arch/x86/boot/compressed/csv.c index 79ffb8746d17..cad545271cc3 100644 --- a/arch/x86/boot/compressed/csv.c +++ b/arch/x86/boot/compressed/csv.c @@ -7,9 +7,39 @@ #include "misc.h" +#undef __init +#undef __initdata +#undef __pa +#define __init +#define __initdata +#define __pa(x) ((unsigned long)(x)) + #include "../../kernel/csv-shared.c" static unsigned int csv3_enabled __section(".data"); +static unsigned int csv3_secure_call_init; + +/* Invoke it before jump to real kernel in case secure call pages are not mapped + * in the identity page table. + * + * If no #VC happens, there is no identity mapping in page table for secure call + * pages. And page fault is not supported in the early stage when real kernel is + * running. As a result, CSV3 guest will shutdown when access secure call pages + * by then. + */ +void csv_init_secure_call_pages(void *boot_params) +{ + if (!csv3_enabled || csv3_secure_call_init) + return; + + /* + * boot_params may be not sanitized, but it's OK to access e820_table + * field. + */ + csv3_scan_secure_call_pages(boot_params); + csv3_early_secure_call(0, 0, CSV3_SECURE_CMD_RESET); + csv3_secure_call_init = 1; +} void csv_set_status(void) { diff --git a/arch/x86/boot/compressed/csv.h b/arch/x86/boot/compressed/csv.h index 2331d4ade97f..3a2196b328c6 100644 --- a/arch/x86/boot/compressed/csv.h +++ b/arch/x86/boot/compressed/csv.h @@ -13,10 +13,12 @@ #ifdef CONFIG_HYGON_CSV void csv_set_status(void); +void csv_init_secure_call_pages(void *boot_params); #else static inline void csv_set_status(void) { } +static inline void csv_init_secure_call_pages(void *boot_params) { } #endif diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 215d74c1a6d9..e02a88b880f1 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -478,6 +478,16 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) movq %r15, %rdi call initialize_identity_maps +#ifdef CONFIG_HYGON_CSV + /* + * If running as a CSV3 guest, secure call pages must be mapped in + * the identity page table before jumping to the decompressed kernel. + * Scan secure call pages here in safe. + */ + movq %r15, %rdi + call csv_init_secure_call_pages +#endif + /* * Do the extraction, and jump to the new kernel.. */ diff --git a/arch/x86/kernel/csv-shared.c b/arch/x86/kernel/csv-shared.c index e46f873fd69d..fd55e570bbbb 100644 --- a/arch/x86/kernel/csv-shared.c +++ b/arch/x86/kernel/csv-shared.c @@ -8,9 +8,208 @@ * Copyright (C) Hygon Info Technologies Ltd. */ +#include + #define CPUID_VENDOR_HygonGenuine_ebx 0x6f677948 #define CPUID_VENDOR_HygonGenuine_ecx 0x656e6975 #define CPUID_VENDOR_HygonGenuine_edx 0x6e65476e #define MSR_CSV3_ENABLED_BIT 30 #define MSR_CSV3_ENABLED BIT_ULL(MSR_CSV3_ENABLED_BIT) + +/* + ****************************** CSV3 secure call ******************************* + * + * CSV3 guest is based on hygon secure isolated virualization feature. An secure + * processor which resides in hygon SOC manages guest's private memory. The + * secure processor allocates or frees private memory for CSV3 guest and manages + * CSV3 guest's nested page table. + * + * As the secure processor is considered as a PCI device in host, CSV3 guest can + * not communicate with it directly. Howerver, CSV3 guest must request the secure + * processor to change its physical memory between private memory and shared + * memory. CSV3 secure call command is a method used to communicate with secure + * processor that host cannot tamper with the data in CSV3 guest. Host can only + * perform an external command to notify the secure processor to handle the + * pending guest's command. + * + * CSV3 secure call pages: + * Secure call pages are two dedicated pages that reserved by BIOS. We define + * secure call pages as page A and page B. During guest launch stage, the secure + * processor will parse the address of secure call pages. The secure processor + * maps the two pages with same private memory page in NPT. The secure processor + * always set one page as present and another page as non-present in NPT. + + * CSV3 secure call main work flow: + * If we write the guest's commands in one page then read them from another page, + * nested page fault happens and the guest exits to host. Then host will perform + * an external command with the gpa(page A or page B) to the secure processor. + * The secure processor checks that the gpa in NPF belongs to secure call pages, + * read the guest's command to handle, then switch the present bit between the + * two pages. + * + * guest page A guest page B + * | | + * ____|______________|____ + * | | + * | nested page table | + * |______________________| + * \ / + * \ / + * \ / + * \ / + * \ / + * secure memory page + * + * CSV3_SECURE_CMD_ENC: + * CSV3 guest declares a specifid memory range as secure. By default, all of + * CSV3 guest's memory mapped as secure. + * The secure processor allocate a block of secure memory and map the memory + * in CSV3 guest's NPT with the specified guest physical memory range in CSV3 + * secure call. + * + * CSV3_SECURE_CMD_DEC: + * CSV3 guest declares a specified memory range as shared. + * The secure processor save the guest physical memory range in its own ram + * and free the range in CSV3 guest's NPT. When CSV3 guest access the memory, + * a new nested page fault happens. + * + * CSV3_SECURE_CMD_RESET: + * CSV3 guest switches all of the shared memory to secure. + * The secure processor resets all the shared memory in CSV3 guest's NPT and + * clears the saved shared memory range. Then the secure process allocates + * secure memory to map in CSV3 guest's NPT. + * + * CSV3_SECURE_CMD_UPDATE_SECURE_CALL_TABLE: + * CSV3 guest wants to change the secure call pages. + * The secure processor re-init the secure call context. + */ +enum csv3_secure_command_type { + CSV3_SECURE_CMD_ENC = 1, + CSV3_SECURE_CMD_DEC, + CSV3_SECURE_CMD_RESET, + CSV3_SECURE_CMD_UPDATE_SECURE_CALL_TABLE, +}; + +/* + * Secure call page fields. + * Secure call page size is 4KB always. We define CSV3 secure call page structure + * as below. + * guid: Must be in the first 128 bytes of the page. Its value should be + * (0xceba2fa59a5d926ful, 0xa556555d276b21abul) always. + * cmd_type: Command to be issued to the secure processor. + * nums: number of entries in the command. + * base_address:Start address of the memory range. + * size: Size of the memory range. + */ +#define SECURE_CALL_ENTRY_MAX (254) + +/* size of secure call cmd is 4KB. */ +struct csv3_secure_call_cmd { + union { + u8 guid[16]; + u64 guid_64[2]; + }; + u32 cmd_type; + u32 nums; + u64 unused; + struct { + u64 base_address; + u64 size; + } entry[SECURE_CALL_ENTRY_MAX]; +}; + +/* csv3 secure call guid, do not change the value. */ +#define CSV3_SECURE_CALL_GUID_LOW 0xceba2fa59a5d926ful +#define CSV3_SECURE_CALL_GUID_HIGH 0xa556555d276b21abul + +static u64 csv3_boot_sc_page_a __initdata = -1ul; +static u64 csv3_boot_sc_page_b __initdata = -1ul; +static u32 early_page_idx __initdata; + +/** + * csv3_scan_secure_call_pages - try to find the secure call pages. + * @boot_params: boot parameters where e820_table resides. + * + * The secure call pages are reserved by BIOS. We scan all the reserved pages + * to check the CSV3 secure call guid bytes. + */ +void __init csv3_scan_secure_call_pages(struct boot_params *boot_params) +{ + struct boot_e820_entry *entry; + struct csv3_secure_call_cmd *sc_page; + u64 offset; + u64 addr; + u8 i; + u8 table_num; + int count = 0; + + if (!boot_params) + return; + + if (csv3_boot_sc_page_a != -1ul && csv3_boot_sc_page_b != -1ul) + return; + + table_num = min_t(u8, boot_params->e820_entries, + E820_MAX_ENTRIES_ZEROPAGE); + entry = &boot_params->e820_table[0]; + for (i = 0; i < table_num; i++) { + if (entry[i].type != E820_TYPE_RESERVED) + continue; + + addr = entry[i].addr & PAGE_MASK; + for (offset = 0; offset < entry[i].size; offset += PAGE_SIZE) { + sc_page = (void *)(addr + offset); + if (sc_page->guid_64[0] == CSV3_SECURE_CALL_GUID_LOW && + sc_page->guid_64[1] == CSV3_SECURE_CALL_GUID_HIGH) { + if (count == 0) + csv3_boot_sc_page_a = addr + offset; + else if (count == 1) + csv3_boot_sc_page_b = addr + offset; + count++; + } + if (count >= 2) + return; + } + } +} + +/** + * csv3_early_secure_call - issue early secure call command at the stage where + * identity page table is created. + * @base_address: Start address of the specified memory range. + * @num_pages: number of the specific pages. + * @cmd_type: Secure call cmd type. + */ +void __init csv3_early_secure_call(u64 base_address, u64 num_pages, + enum csv3_secure_command_type cmd_type) +{ + struct csv3_secure_call_cmd *page_rd; + struct csv3_secure_call_cmd *page_wr; + u32 cmd_ack; + + if (csv3_boot_sc_page_a == -1ul || csv3_boot_sc_page_b == -1ul) + return; + + /* identity mapping at the stage. */ + page_rd = (void *)(early_page_idx ? csv3_boot_sc_page_a : csv3_boot_sc_page_b); + page_wr = (void *)(early_page_idx ? csv3_boot_sc_page_b : csv3_boot_sc_page_a); + + while (1) { + page_wr->cmd_type = (u32)cmd_type; + page_wr->nums = 1; + page_wr->entry[0].base_address = base_address; + page_wr->entry[0].size = num_pages << PAGE_SHIFT; + + /* + * Write command in page_wr must be done before retrieve cmd + * ack from page_rd, and it is ensured by the mb below. + */ + mb(); + + cmd_ack = page_rd->cmd_type; + if (cmd_ack != cmd_type) + break; + } + early_page_idx ^= 1; +} -- Gitee From 7b3a22b88d1954a6175bc32879d70e1706e29aa2 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Fri, 15 Mar 2024 09:26:46 +0800 Subject: [PATCH 0845/2138] anolis: x86/boot/compressed/64: Add CSV3 update page attr(private/shared) ANBZ: #8683 The function is needed to set encrypted page as private or set decrypted page as shared at the stage where identity page table is created. By default, all memory is set as private. CSV3 guest's NPT is managed by the secure processor. The secure processor must perform the correct action for private/shared memory. The secure processor manages the guest's secure isolated memory which cannot be accessed by other guest or host. As CSV3 feaure, CSV3 guest's encrypted memory maps to secure isolated memory and decrypted memory which is shared with host maps to normal memory. At the stage of kernel decompressing, only GHCB page is set as shared. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2996 --- arch/x86/boot/compressed/csv.c | 14 ++++++++++++++ arch/x86/boot/compressed/csv.h | 5 +++++ arch/x86/boot/compressed/ident_map_64.c | 3 +++ 3 files changed, 22 insertions(+) diff --git a/arch/x86/boot/compressed/csv.c b/arch/x86/boot/compressed/csv.c index cad545271cc3..ae6ca1484048 100644 --- a/arch/x86/boot/compressed/csv.c +++ b/arch/x86/boot/compressed/csv.c @@ -19,6 +19,20 @@ static unsigned int csv3_enabled __section(".data"); static unsigned int csv3_secure_call_init; +void csv_update_page_attr(unsigned long address, pteval_t set, pteval_t clr) +{ + if (!csv3_enabled) + return; + + if ((set | clr) & _PAGE_ENC) { + if (set & _PAGE_ENC) + csv3_early_secure_call(__pa(address), 1, CSV3_SECURE_CMD_ENC); + + if (clr & _PAGE_ENC) + csv3_early_secure_call(__pa(address), 1, CSV3_SECURE_CMD_DEC); + } +} + /* Invoke it before jump to real kernel in case secure call pages are not mapped * in the identity page table. * diff --git a/arch/x86/boot/compressed/csv.h b/arch/x86/boot/compressed/csv.h index 3a2196b328c6..8b8a33551895 100644 --- a/arch/x86/boot/compressed/csv.h +++ b/arch/x86/boot/compressed/csv.h @@ -15,11 +15,16 @@ void csv_set_status(void); void csv_init_secure_call_pages(void *boot_params); +void csv_update_page_attr(unsigned long address, pteval_t set, pteval_t clr); + #else static inline void csv_set_status(void) { } static inline void csv_init_secure_call_pages(void *boot_params) { } +static inline void csv_update_page_attr(unsigned long address, + pteval_t set, pteval_t clr) { } + #endif #endif /* BOOT_COMPRESSED_CSV_H */ diff --git a/arch/x86/boot/compressed/ident_map_64.c b/arch/x86/boot/compressed/ident_map_64.c index aead80ec70a0..a7b4148a943f 100644 --- a/arch/x86/boot/compressed/ident_map_64.c +++ b/arch/x86/boot/compressed/ident_map_64.c @@ -298,6 +298,9 @@ static int set_clr_page_flags(struct x86_mapping_info *info, if ((set | clr) & _PAGE_ENC) { clflush_page(address); + /* On CSV3, notify secure processor to manage page attr changes */ + csv_update_page_attr(address, set, clr); + /* * If the encryption attribute is being cleared, change the page state * to shared in the RMP table. -- Gitee From 14383fb62d7067fc61134682b9e7078890fd4f41 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Fri, 15 Mar 2024 09:32:56 +0800 Subject: [PATCH 0846/2138] anolis: x86/kernel: Add CSV3 early update(enc/dec)/reset memory helpers ANBZ: #8683 The functions are needed to set memory as private/shared memory or reset all memory as private memory at the stage where the identity mapping page table is available. Generally, at early runtime of the decompressed kernel, it needs to obtain CSV3 secure call pages then reset all memory as private before switching to new kernel page table. Otherwise, prior shared memory regions will be wrongly used and private data in guest may be accessed maliciously. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2996 --- arch/x86/include/asm/csv.h | 12 +++++++ arch/x86/kernel/Makefile | 2 ++ arch/x86/kernel/csv.c | 68 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 82 insertions(+) create mode 100644 arch/x86/kernel/csv.c diff --git a/arch/x86/include/asm/csv.h b/arch/x86/include/asm/csv.h index 68f55e1b857b..1fa9b4396c68 100644 --- a/arch/x86/include/asm/csv.h +++ b/arch/x86/include/asm/csv.h @@ -32,6 +32,12 @@ void csv_release_to_contiguous(phys_addr_t pa, size_t size); uint32_t csv_get_smr_entry_shift(void); +bool csv3_active(void); + +void __init csv_early_reset_memory(struct boot_params *bp); +void __init csv_early_update_memory_enc(u64 vaddr, u64 pages); +void __init csv_early_update_memory_dec(u64 vaddr, u64 pages); + #else /* !CONFIG_HYGON_CSV */ #define csv_smr NULL @@ -46,6 +52,12 @@ static inline void csv_release_to_contiguous(phys_addr_t pa, size_t size) { } static inline uint32_t csv_get_smr_entry_shift(void) { return 0; } +static inline bool csv3_active(void) { return false; } + +static inline void __init csv_early_reset_memory(struct boot_params *bp) { } +static inline void __init csv_early_update_memory_enc(u64 vaddr, u64 pages) { } +static inline void __init csv_early_update_memory_dec(u64 vaddr, u64 pages) { } + #endif /* CONFIG_HYGON_CSV */ #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 0000325ab98f..c25d40cbbdbe 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -160,3 +160,5 @@ ifeq ($(CONFIG_X86_64),y) obj-$(CONFIG_MMCONF_FAM10H) += mmconf-fam10h_64.o obj-y += vsmp_64.o endif + +obj-$(CONFIG_HYGON_CSV) += csv.o diff --git a/arch/x86/kernel/csv.c b/arch/x86/kernel/csv.c new file mode 100644 index 000000000000..a90246e31ae5 --- /dev/null +++ b/arch/x86/kernel/csv.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON CSV support + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#include +#include +#include +#include +#include + +#include "../mm/mm_internal.h" +#include "csv-shared.c" + +u32 vendor_ebx __section(".data") = 0; +u32 vendor_ecx __section(".data") = 0; +u32 vendor_edx __section(".data") = 0; + +struct secure_call_pages { + struct csv3_secure_call_cmd page_a; + struct csv3_secure_call_cmd page_b; +}; + +bool noinstr csv3_active(void) +{ + if (vendor_ebx == 0 || vendor_ecx == 0 || vendor_edx == 0) { + u32 eax = 0; + + native_cpuid(&eax, &vendor_ebx, &vendor_ecx, &vendor_edx); + } + + /* HygonGenuine */ + if (vendor_ebx == CPUID_VENDOR_HygonGenuine_ebx && + vendor_ecx == CPUID_VENDOR_HygonGenuine_ecx && + vendor_edx == CPUID_VENDOR_HygonGenuine_edx) + return !!(sev_status & MSR_CSV3_ENABLED); + else + return false; +} + +void __init csv_early_reset_memory(struct boot_params *bp) +{ + if (!csv3_active()) + return; + + csv3_scan_secure_call_pages(bp); + csv3_early_secure_call(0, 0, CSV3_SECURE_CMD_RESET); +} + +void __init csv_early_update_memory_dec(u64 vaddr, u64 pages) +{ + if (!csv3_active()) + return; + + if (pages) + csv3_early_secure_call(__pa(vaddr), pages, CSV3_SECURE_CMD_DEC); +} + +void __init csv_early_update_memory_enc(u64 vaddr, u64 pages) +{ + if (!csv3_active()) + return; + + if (pages) + csv3_early_secure_call(__pa(vaddr), pages, CSV3_SECURE_CMD_ENC); +} -- Gitee From fa7dae806e2cb68007f23585249f64a35fc5aac5 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Fri, 15 Mar 2024 09:36:33 +0800 Subject: [PATCH 0847/2138] anolis: x86/kernel: Set bss decrypted memory as shared in CSV3 guest ANBZ: #8683 Guest kernel declares bss decrypted memory section to share data with host. In CSV3 guest, the decrypted memory must be set as shared. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2996 --- arch/x86/kernel/head64.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index ecedc296b303..369342de9c4e 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -42,6 +42,7 @@ #include #include #include +#include /* * Manage page tables very early on. @@ -160,6 +161,14 @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp, pmdv i = pmd_index(vaddr); pmd[i] -= sme_get_me_mask(); } + + /* On CSV3, move the shared pages out of isolated memory region. */ + if (csv3_active()) { + vaddr = (unsigned long)__start_bss_decrypted; + csv_early_reset_memory(bp); + csv_early_update_memory_dec((unsigned long)vaddr, + (vaddr_end - vaddr) >> PAGE_SHIFT); + } } /* -- Gitee From 2da45e67fa68928f386f34719a452b6453b9aaeb Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Fri, 15 Mar 2024 09:42:09 +0800 Subject: [PATCH 0848/2138] anolis: x86: Update memory shared/private attribute in early boot for CSV3 guest ANBZ: #8683 Add functions to change the memory shared/private attribute in early boot code. When CSV3 is active, the decrypted memory must be mapped to normal (non-isolated) memory in nested page table so that hypervisor and guest can access shared data. But in-place encrypt/decrypt action on the memory is not applicable in CSV3 as CSV3 guest's private page will not be changed to shared page until the secure processor update NPT. Also new secure call pages should be initialized for per cpu to support multiple cpu secure call commands simultaneously. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2996 --- arch/x86/include/asm/csv.h | 5 + arch/x86/kernel/csv.c | 186 ++++++++++++++++++++++++++++++++++ arch/x86/mm/mem_encrypt_amd.c | 14 +++ 3 files changed, 205 insertions(+) diff --git a/arch/x86/include/asm/csv.h b/arch/x86/include/asm/csv.h index 1fa9b4396c68..0cc4e48b9ade 100644 --- a/arch/x86/include/asm/csv.h +++ b/arch/x86/include/asm/csv.h @@ -38,6 +38,8 @@ void __init csv_early_reset_memory(struct boot_params *bp); void __init csv_early_update_memory_enc(u64 vaddr, u64 pages); void __init csv_early_update_memory_dec(u64 vaddr, u64 pages); +void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, bool enc); + #else /* !CONFIG_HYGON_CSV */ #define csv_smr NULL @@ -58,6 +60,9 @@ static inline void __init csv_early_reset_memory(struct boot_params *bp) { } static inline void __init csv_early_update_memory_enc(u64 vaddr, u64 pages) { } static inline void __init csv_early_update_memory_dec(u64 vaddr, u64 pages) { } +static inline void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, + bool enc) { } + #endif /* CONFIG_HYGON_CSV */ #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/kernel/csv.c b/arch/x86/kernel/csv.c index a90246e31ae5..9f3b0f3d3cd9 100644 --- a/arch/x86/kernel/csv.c +++ b/arch/x86/kernel/csv.c @@ -23,6 +23,15 @@ struct secure_call_pages { struct csv3_secure_call_cmd page_b; }; +static u32 csv3_percpu_secure_call_init __initdata; +static u32 early_secure_call_page_idx __initdata; + +static DEFINE_PER_CPU(struct secure_call_pages*, secure_call_data); +static DEFINE_PER_CPU(int, secure_call_page_idx); + +typedef void (*csv3_secure_call_func)(u64 base_address, u64 num_pages, + enum csv3_secure_command_type cmd_type); + bool noinstr csv3_active(void) { if (vendor_ebx == 0 || vendor_ecx == 0 || vendor_edx == 0) { @@ -66,3 +75,180 @@ void __init csv_early_update_memory_enc(u64 vaddr, u64 pages) if (pages) csv3_early_secure_call(__pa(vaddr), pages, CSV3_SECURE_CMD_ENC); } + +static void __init csv3_alloc_secure_call_data(int cpu) +{ + struct secure_call_pages *data; + + data = memblock_alloc(sizeof(*data), PAGE_SIZE); + if (!data) + panic("Can't allocate CSV3 secure all data"); + + per_cpu(secure_call_data, cpu) = data; +} + +static void __init csv3_secure_call_update_table(void) +{ + int cpu; + struct secure_call_pages *data; + struct csv3_secure_call_cmd *page_rd; + struct csv3_secure_call_cmd *page_wr; + u32 cmd_ack; + + if (!csv3_active()) + return; + + page_rd = (void *)early_memremap_encrypted(csv3_boot_sc_page_a, PAGE_SIZE); + page_wr = (void *)early_memremap_encrypted(csv3_boot_sc_page_b, PAGE_SIZE); + + while (1) { + page_wr->cmd_type = CSV3_SECURE_CMD_UPDATE_SECURE_CALL_TABLE; + page_wr->nums = 0; + + /* initialize per-cpu secure call pages */ + for_each_possible_cpu(cpu) { + if (cpu >= SECURE_CALL_ENTRY_MAX) + panic("csv does not support cpus > %d\n", + SECURE_CALL_ENTRY_MAX); + csv3_alloc_secure_call_data(cpu); + data = per_cpu(secure_call_data, cpu); + per_cpu(secure_call_page_idx, cpu) = 0; + page_wr->entry[cpu].base_address = __pa(data); + page_wr->entry[cpu].size = PAGE_SIZE * 2; + page_wr->nums++; + } + + /* + * Write command in page_wr must be done before retrieve cmd + * ack from page_rd, and it is ensured by the mb below. + */ + mb(); + + cmd_ack = page_rd->cmd_type; + if (cmd_ack != CSV3_SECURE_CMD_UPDATE_SECURE_CALL_TABLE) + break; + } + + early_memunmap(page_rd, PAGE_SIZE); + early_memunmap(page_wr, PAGE_SIZE); +} + +/** + * __csv3_early_secure_call - issue secure call command at the stage where new + * kernel page table is created and early identity page + * table is deprecated . + * @base_address: Start address of the specified memory range. + * @num_pages: number of the specific pages. + * @cmd_type: Secure call cmd type. + */ +static void __init __csv3_early_secure_call(u64 base_address, u64 num_pages, + enum csv3_secure_command_type cmd_type) +{ + struct csv3_secure_call_cmd *page_rd; + struct csv3_secure_call_cmd *page_wr; + u32 cmd_ack; + + if (csv3_boot_sc_page_a == -1ul || csv3_boot_sc_page_b == -1ul) + return; + + if (!csv3_percpu_secure_call_init) { + csv3_secure_call_update_table(); + csv3_percpu_secure_call_init = 1; + } + + if (early_secure_call_page_idx == 0) { + page_rd = (void *)early_memremap_encrypted(csv3_boot_sc_page_a, + PAGE_SIZE); + page_wr = (void *)early_memremap_encrypted(csv3_boot_sc_page_b, + PAGE_SIZE); + } else { + page_wr = (void *)early_memremap_encrypted(csv3_boot_sc_page_a, + PAGE_SIZE); + page_rd = (void *)early_memremap_encrypted(csv3_boot_sc_page_b, + PAGE_SIZE); + } + + while (1) { + page_wr->cmd_type = (u32)cmd_type; + page_wr->nums = 1; + page_wr->entry[0].base_address = base_address; + page_wr->entry[0].size = num_pages << PAGE_SHIFT; + + /* + * Write command in page_wr must be done before retrieve cmd + * ack from page_rd, and it is ensured by the mb below. + */ + mb(); + + cmd_ack = page_rd->cmd_type; + if (cmd_ack != cmd_type) + break; + } + + early_memunmap(page_rd, PAGE_SIZE); + early_memunmap(page_wr, PAGE_SIZE); + + early_secure_call_page_idx ^= 1; +} + + +static void __csv3_memory_enc_dec(csv3_secure_call_func secure_call, u64 vaddr, + u64 pages, bool enc) +{ + u64 vaddr_end, vaddr_next; + u64 psize, pmask; + u64 last_paddr, paddr; + u64 last_psize = 0; + pte_t *kpte; + int level; + enum csv3_secure_command_type cmd_type; + + cmd_type = enc ? CSV3_SECURE_CMD_ENC : CSV3_SECURE_CMD_DEC; + vaddr_next = vaddr; + vaddr_end = vaddr + (pages << PAGE_SHIFT); + for (; vaddr < vaddr_end; vaddr = vaddr_next) { + kpte = lookup_address(vaddr, &level); + if (!kpte || pte_none(*kpte)) { + panic("invalid pte, vaddr 0x%llx\n", vaddr); + goto out; + } + + psize = page_level_size(level); + pmask = page_level_mask(level); + + vaddr_next = (vaddr & pmask) + psize; + paddr = ((pte_pfn(*kpte) << PAGE_SHIFT) & pmask) + + (vaddr & ~pmask); + psize -= (vaddr & ~pmask); + + if (vaddr_end - vaddr < psize) + psize = vaddr_end - vaddr; + if (last_psize == 0 || (last_paddr + last_psize) == paddr) { + last_paddr = (last_psize == 0 ? paddr : last_paddr); + last_psize += psize; + } else { + secure_call(last_paddr, last_psize >> PAGE_SHIFT, + cmd_type); + last_paddr = paddr; + last_psize = psize; + } + } + + if (last_psize) + secure_call(last_paddr, last_psize >> PAGE_SHIFT, cmd_type); + +out: + return; +} + +void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, bool enc) +{ + u64 npages; + + if (!csv3_active()) + return; + + npages = (size + (vaddr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT; + __csv3_memory_enc_dec(__csv3_early_secure_call, vaddr & PAGE_MASK, + npages, enc); +} diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c index 1873a65b5655..9645bf5d6f95 100644 --- a/arch/x86/mm/mem_encrypt_amd.c +++ b/arch/x86/mm/mem_encrypt_amd.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "mm_internal.h" @@ -377,6 +378,9 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) */ clflush_cache_range(__va(pa), size); + if (csv3_active()) + goto skip_in_place_enc_dec; + /* Encrypt/decrypt the contents in-place */ if (enc) { sme_early_encrypt(pa, size); @@ -390,6 +394,7 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) early_snp_set_memory_shared((unsigned long)__va(pa), pa, 1); } +skip_in_place_enc_dec: /* Change the page encryption mask. */ new_pte = pfn_pte(pfn, new_prot); set_pte_atomic(kpte, new_pte); @@ -469,6 +474,15 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr, early_set_mem_enc_dec_hypercall(start, size, enc); out: __flush_tlb_all(); + + /* + * On CSV3, the shared and private page attr changes should be managed + * by secure processor. Private pages live in isolated memory region, + * while shared pages live out of isolated memory region. + */ + if (csv3_active()) + csv_early_memory_enc_dec(vaddr_end - size, size, enc); + return ret; } -- Gitee From 6a5ee8eda2d1c325bb00b6812cd2a29ec3fd1602 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Fri, 15 Mar 2024 09:47:23 +0800 Subject: [PATCH 0849/2138] anolis: x86: Add support for changing the memory attribute for CSV3 guest ANBZ: #8683 Add support for changing the memory to private or shared memory for multiple pages if CSV3 is active. When CSV3 guest wants to share data with host like SWIOTLB or change the unused shared memory to private memory, it must perform an secure call command to the secure processor to update mapping in nested page table. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2996 --- arch/x86/include/asm/csv.h | 4 +++ arch/x86/kernel/csv.c | 52 +++++++++++++++++++++++++++++++++++ arch/x86/mm/mem_encrypt_amd.c | 8 ++++++ 3 files changed, 64 insertions(+) diff --git a/arch/x86/include/asm/csv.h b/arch/x86/include/asm/csv.h index 0cc4e48b9ade..30e8a89ce8c0 100644 --- a/arch/x86/include/asm/csv.h +++ b/arch/x86/include/asm/csv.h @@ -40,6 +40,8 @@ void __init csv_early_update_memory_dec(u64 vaddr, u64 pages); void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, bool enc); +void csv_memory_enc_dec(u64 vaddr, u64 pages, bool enc); + #else /* !CONFIG_HYGON_CSV */ #define csv_smr NULL @@ -63,6 +65,8 @@ static inline void __init csv_early_update_memory_dec(u64 vaddr, u64 pages) { } static inline void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, bool enc) { } +static inline void csv_memory_enc_dec(u64 vaddr, u64 pages, bool enc) { } + #endif /* CONFIG_HYGON_CSV */ #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/kernel/csv.c b/arch/x86/kernel/csv.c index 9f3b0f3d3cd9..c0ad12aa94f3 100644 --- a/arch/x86/kernel/csv.c +++ b/arch/x86/kernel/csv.c @@ -191,6 +191,50 @@ static void __init __csv3_early_secure_call(u64 base_address, u64 num_pages, early_secure_call_page_idx ^= 1; } +static void csv3_secure_call(u64 base_address, u64 num_pages, + enum csv3_secure_command_type cmd_type) +{ + u32 cmd_ack; + struct secure_call_pages *data; + struct csv3_secure_call_cmd *page_rd; + struct csv3_secure_call_cmd *page_wr; + int page_idx; + int cpu; + + preempt_disable(); + + cpu = smp_processor_id(); + data = per_cpu(secure_call_data, cpu); + page_idx = per_cpu(secure_call_page_idx, cpu); + + if (page_idx == 0) { + page_rd = &data->page_a; + page_wr = &data->page_b; + } else { + page_rd = &data->page_b; + page_wr = &data->page_a; + } + + while (1) { + page_wr->cmd_type = (u32)cmd_type; + page_wr->nums = 1; + page_wr->entry[0].base_address = base_address; + page_wr->entry[0].size = num_pages << PAGE_SHIFT; + + /* + * Write command in page_wr must be done before retrieve cmd + * ack from page_rd, and it is ensured by the smp_mb below. + */ + smp_mb(); + + cmd_ack = page_rd->cmd_type; + if (cmd_ack != cmd_type) + break; + } + + per_cpu(secure_call_page_idx, cpu) ^= 1; + preempt_enable(); +} static void __csv3_memory_enc_dec(csv3_secure_call_func secure_call, u64 vaddr, u64 pages, bool enc) @@ -252,3 +296,11 @@ void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, bool enc) __csv3_memory_enc_dec(__csv3_early_secure_call, vaddr & PAGE_MASK, npages, enc); } + +void csv_memory_enc_dec(u64 vaddr, u64 pages, bool enc) +{ + if (!csv3_active()) + return; + + __csv3_memory_enc_dec(csv3_secure_call, vaddr & PAGE_MASK, pages, enc); +} diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c index 9645bf5d6f95..f7d88ad030b9 100644 --- a/arch/x86/mm/mem_encrypt_amd.c +++ b/arch/x86/mm/mem_encrypt_amd.c @@ -345,6 +345,14 @@ static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool e if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) enc_dec_hypercall(vaddr, npages << PAGE_SHIFT, enc); + /* + * On CSV3, the shared and private page attr changes should be managed + * by secure processor. Private pages live in isolated memory region, + * while shared pages live out of isolated memory region. + */ + if (csv3_active()) + csv_memory_enc_dec(vaddr, npages, enc); + return true; } -- Gitee From 1aa190083600334b77263d01fa1548a75c20cc50 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Fri, 15 Mar 2024 13:20:27 +0800 Subject: [PATCH 0850/2138] anolis: x86/mm: Print CSV3 info into kernel log ANBZ: #8683 Print Hygon secure virtualization feature. Add CSV3 info in feature list if CSV3 is active. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2996 --- arch/x86/mm/mem_encrypt.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index b97261dfd13d..054f6113be67 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -12,6 +12,7 @@ #include #include #include +#include /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */ bool force_dma_unencrypted(struct device *dev) @@ -58,6 +59,9 @@ static void print_hygon_cc_feature_info(void) /* Encrypted Register State */ if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) pr_info(" HYGON CSV2"); + + if (csv3_active()) + pr_info(" HYGON CSV3"); } static void print_mem_encrypt_feature_info(void) -- Gitee From 83fd74e681088e655d43e4cafd3ff32200f8bdba Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Fri, 15 Mar 2024 13:25:38 +0800 Subject: [PATCH 0851/2138] anolis: mm/cma: add API to enable concurrent allocation from the CMA ANBZ: #8687 The mutex prevents allocating CMA memory concurently, and it's removed and reverted back and forth, refer to patch 60a60e32cf91 and 60a60e32cf91 from mainline. To solve the awkward dilemma, an API to enable concurrency is added, it's up to user to decide whether their CMA can handle concurrent allocations. Signed-off-by: Yangwencheng Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2997 --- include/linux/cma.h | 1 + mm/cma.c | 14 ++++++++++++-- mm/cma.h | 1 + 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/include/linux/cma.h b/include/linux/cma.h index 4dadf9a05752..326ec54b8efa 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -57,4 +57,5 @@ extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) extern void cma_reserve_pages_on_error(struct cma *cma); extern int __init cma_alloc_areas(unsigned int max_cma_size); +extern void cma_enable_concurrency(struct cma *cma); #endif diff --git a/mm/cma.c b/mm/cma.c index 5af7642e607b..304a4e69180c 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -492,10 +492,12 @@ struct page *cma_alloc(struct cma *cma, unsigned long count, spin_unlock_irq(&cma->lock); pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); - mutex_lock(&cma_mutex); + if (!cma->no_mutex) + mutex_lock(&cma_mutex); ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0)); - mutex_unlock(&cma_mutex); + if (!cma->no_mutex) + mutex_unlock(&cma_mutex); if (ret == 0) { page = pfn_to_page(pfn); break; @@ -609,3 +611,11 @@ int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) return 0; } + +void cma_enable_concurrency(struct cma *cma) +{ + if (!cma) + return; + + cma->no_mutex = true; +} diff --git a/mm/cma.h b/mm/cma.h index 12aba820969c..50275c1d98cc 100644 --- a/mm/cma.h +++ b/mm/cma.h @@ -16,6 +16,7 @@ struct cma { unsigned long *bitmap; unsigned int order_per_bit; /* Order of pages represented by one bit */ spinlock_t lock; + bool no_mutex; #ifdef CONFIG_CMA_DEBUGFS struct hlist_head mem_head; spinlock_t mem_head_lock; -- Gitee From fa2a22e41cef63e8370271d3bc32cd256dbc48ed Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Fri, 15 Mar 2024 13:28:39 +0800 Subject: [PATCH 0852/2138] anolis: x86/mm: CSV allows CMA allocation concurrently ANBZ: #8687 CSV allows CMA allocation concurrently. Signed-off-by: Yangwencheng Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2997 --- arch/x86/mm/csv.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/mm/csv.c b/arch/x86/mm/csv.c index fe5ca7ed4493..09f2cb7b358a 100644 --- a/arch/x86/mm/csv.c +++ b/arch/x86/mm/csv.c @@ -168,6 +168,7 @@ static void __init csv_cma_reserve_mem(void) 1 << CSV_CMA_SHIFT, node); break; } + cma_enable_concurrency(csv_cma->cma); if (start > cma_get_base(csv_cma->cma) || !start) start = cma_get_base(csv_cma->cma); -- Gitee From 9f034096db2f1b64415a5c1e8beb197f476780ed Mon Sep 17 00:00:00 2001 From: Wang Yinfeng Date: Tue, 22 Feb 2022 22:13:07 +0800 Subject: [PATCH 0853/2138] anolis: ipmi_si: Phytium S2500 workaround for MMIO-based IPMI ANBZ: #8712 phytium inclusion category: bugfix CVE: NA -------------------------------- The system would hang up when the Phytium S2500 communicates with some BMCs after several rounds of transactions, unless we reset the controller timeout counter manually by calling firmware through SMC. Signed-off-by: Wang Yinfeng Signed-off-by: Chen Baozi Signed-off-by: Jiakun Shuai Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3016 --- drivers/char/ipmi/ipmi_si_mem_io.c | 76 ++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) diff --git a/drivers/char/ipmi/ipmi_si_mem_io.c b/drivers/char/ipmi/ipmi_si_mem_io.c index 86b92e93a70d..dc6cf7d89fea 100644 --- a/drivers/char/ipmi/ipmi_si_mem_io.c +++ b/drivers/char/ipmi/ipmi_si_mem_io.c @@ -3,9 +3,77 @@ #include #include "ipmi_si.h" +#ifdef CONFIG_ARCH_PHYTIUM +#include + +#define CTL_RST_FUNC_ID 0xC2000011 + +static bool apply_phytium2500_workaround; + +struct ipmi_workaround_oem_info { + char oem_id[ACPI_OEM_ID_SIZE + 1]; +}; + +#ifdef CONFIG_ACPI +static struct ipmi_workaround_oem_info wa_info[] = { + { + .oem_id = "KPSVVJ", + } +}; +#endif + +static void ipmi_check_phytium_workaround(void) +{ +#ifdef CONFIG_ACPI + struct acpi_table_header tbl; + int i; + + if (ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_DSDT, 0, &tbl))) + return; + + for (i = 0; i < ARRAY_SIZE(wa_info); i++) { + if (strncmp(wa_info[i].oem_id, tbl.oem_id, ACPI_OEM_ID_SIZE)) + continue; + + apply_phytium2500_workaround = true; + break; + } +#endif +} + +static void ctl_smc(unsigned long arg0, unsigned long arg1, + unsigned long arg2, unsigned long arg3) +{ + struct arm_smccc_res res; + + arm_smccc_smc(arg0, arg1, arg2, arg3, 0, 0, 0, 0, &res); + if (res.a0 != 0) + pr_err("Error: Firmware call SMC reset Failed: %d, addr: 0x%lx\n", + (int)res.a0, arg2); +} + +static void ctl_timeout_reset(void) +{ + ctl_smc(CTL_RST_FUNC_ID, 0x1, 0x28100208, 0x1); + ctl_smc(CTL_RST_FUNC_ID, 0x1, 0x2810020C, 0x1); +} + +static inline void ipmi_phytium_workaround(void) +{ + if (apply_phytium2500_workaround) + ctl_timeout_reset(); +} + +#else +static inline void ipmi_check_phytium_workaround(void) {} +static inline void ipmi_phytium_workaround(void) {} +#endif + static unsigned char intf_mem_inb(const struct si_sm_io *io, unsigned int offset) { + ipmi_phytium_workaround(); + return readb((io->addr)+(offset * io->regspacing)); } @@ -18,6 +86,8 @@ static void intf_mem_outb(const struct si_sm_io *io, unsigned int offset, static unsigned char intf_mem_inw(const struct si_sm_io *io, unsigned int offset) { + ipmi_phytium_workaround(); + return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; } @@ -31,6 +101,8 @@ static void intf_mem_outw(const struct si_sm_io *io, unsigned int offset, static unsigned char intf_mem_inl(const struct si_sm_io *io, unsigned int offset) { + ipmi_phytium_workaround(); + return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; } @@ -44,6 +116,8 @@ static void intf_mem_outl(const struct si_sm_io *io, unsigned int offset, #ifdef readq static unsigned char mem_inq(const struct si_sm_io *io, unsigned int offset) { + ipmi_phytium_workaround(); + return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; } @@ -81,6 +155,8 @@ int ipmi_si_mem_setup(struct si_sm_io *io) if (!addr) return -ENODEV; + ipmi_check_phytium_workaround(); + /* * Figure out the actual readb/readw/readl/etc routine to use based * upon the register size. -- Gitee From 4abf40d2afd5fb06bdb4d1fd3b379144c8caa9c4 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Fri, 15 Mar 2024 14:28:48 +0800 Subject: [PATCH 0854/2138] anolis: crypto: ccp: Define CSV3 migration command id ANBZ: #8688 Define CSV3 migration command id and structure. The command definition is available in CSV3 spec. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3004 --- drivers/crypto/ccp/sev-dev.c | 8 +++ include/linux/psp-csv.h | 97 ++++++++++++++++++++++++++++++++++++ 2 files changed, 105 insertions(+) diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index ba4f9de8b783..e5f41abb0129 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -204,6 +204,14 @@ static int sev_cmd_buffer_len(int cmd) return sizeof(struct csv3_data_dbg_read_vmsa); case CSV3_CMD_DBG_READ_MEM: return sizeof(struct csv3_data_dbg_read_mem); + case CSV3_CMD_SEND_ENCRYPT_DATA: + return sizeof(struct csv3_data_send_encrypt_data); + case CSV3_CMD_SEND_ENCRYPT_CONTEXT: + return sizeof(struct csv3_data_send_encrypt_context); + case CSV3_CMD_RECEIVE_ENCRYPT_DATA: + return sizeof(struct csv3_data_receive_encrypt_data); + case CSV3_CMD_RECEIVE_ENCRYPT_CONTEXT: + return sizeof(struct csv3_data_receive_encrypt_context); default: break; } diff --git a/include/linux/psp-csv.h b/include/linux/psp-csv.h index 960459375cd6..2da1adea8d33 100644 --- a/include/linux/psp-csv.h +++ b/include/linux/psp-csv.h @@ -188,4 +188,101 @@ struct csv3_data_dbg_read_mem { u32 size; /* In */ } __packed; +/** + * struct csv3_data_send_encrypt_data - SEND_ENCRYPT_DATA command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header + * @hdr_len: len of packet header + * @guest_block: physical address containing multiple guest address + * @guest_len: len of guest block + * @flag: flag of send encrypt data + * 0x00000000: migrate pages in guest block + * 0x00000001: set readonly of pages in guest block + * others: invalid + * @trans_block: physical address of a page containing multiple host memory pages + * @trans_len: len of host memory region + */ +struct csv3_data_send_encrypt_data { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In/Out */ + u32 reserved1; /* In */ + u64 guest_block; /* In */ + u32 guest_len; /* In */ + u32 flag; /* In */ + u64 trans_block; /* In */ + u32 trans_len; /* In/Out */ +} __packed; + +/** + * struct csv3_data_send_encrypt_context - SEND_ENCRYPT_CONTEXT command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header + * @hdr_len: len of packet header + * @trans_block: physical address of a page containing multiple host memory pages + * @trans_len: len of host memory region + */ +struct csv3_data_send_encrypt_context { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In/Out */ + u32 reserved1; /* In */ + u64 trans_block; /* In */ + u32 trans_len; /* In/Out */ +} __packed; + +/** + * struct csv3_data_receive_encrypt_data - RECEIVE_ENCRYPT_DATA command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header blob + * @hdr_len: len of packet header + * @guest_block: system physical address containing multiple guest address + * @guest_len: len of guest block memory region + * @trans_block: physical address of a page containing multiple host memory pages + * @trans_len: len of host memory region + */ +struct csv3_data_receive_encrypt_data { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In */ + u32 reserved1; /* In */ + u64 guest_block; /* In */ + u32 guest_len; /* In */ + u32 reserved2; /* In */ + u64 trans_block; /* In */ + u32 trans_len; /* In */ +} __packed; + +/** + * struct csv3_data_receive_encrypt_context - RECEIVE_ENCRYPT_CONTEXT command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header + * @hdr_len: len of packet header + * @trans_block: physical address of a page containing multiple host memory pages + * @trans_len: len of host memory region + * @shadow_vmcb_block: physical address of a page containing multiple shadow vmcb address + * @secure_vmcb_block: physical address of a page containing multiple secure vmcb address + * @vmcb_block_len: len of shadow/secure vmcb block + */ +struct csv3_data_receive_encrypt_context { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In */ + u32 reserved1; /* In */ + u64 trans_block; /* In */ + u32 trans_len; /* In */ + u32 reserved2; /* In */ + u64 shadow_vmcb_block; /* In */ + u64 secure_vmcb_block; /* In */ + u32 vmcb_block_len; /* In */ +} __packed; + #endif -- Gitee From 98a0144d6e4c94f3db088e738dd2b803f8606ee4 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Wed, 25 Oct 2023 10:01:57 +0800 Subject: [PATCH 0855/2138] anolis: KVM: SVM: CSV: Add KVM_CSV3_SEND_ENCRYPT_DATA command ANBZ: #8688 The command is used for encrypting the guest memory page using the encryption context. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3004 --- arch/x86/kvm/svm/csv.c | 171 +++++++++++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 10 +++ 2 files changed, 181 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index b64619528726..454ba28f7702 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -8,6 +8,7 @@ */ #include +#include #include #include #include @@ -42,6 +43,18 @@ union csv3_page_attr { u64 val; }; +struct guest_paddr_block { + struct { + u64 share: 1; + u64 reserved: 11; + u64 gfn: 52; + } entry[512]; +}; + +struct trans_paddr_block { + u64 trans_paddr[512]; +}; + enum csv3_pg_level { CSV3_PG_LEVEL_NONE, CSV3_PG_LEVEL_4K, @@ -486,6 +499,161 @@ static int csv3_launch_encrypt_vmcb(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +/* Userspace wants to query either header or trans length. */ +static int +csv3_send_encrypt_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp, + struct kvm_csv3_send_encrypt_data *params) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv3_data_send_encrypt_data data; + int ret; + + memset(&data, 0, sizeof(data)); + data.handle = sev->handle; + ret = csv_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_DATA, &data, &argp->error); + + params->hdr_len = data.hdr_len; + params->trans_len = data.trans_len; + + if (copy_to_user((void __user *)(uintptr_t)argp->data, params, sizeof(*params))) + ret = -EFAULT; + + return ret; +} + +#define CSV3_SEND_ENCRYPT_DATA_MIGRATE_PAGE 0x00000000 +#define CSV3_SEND_ENCRYPT_DATA_SET_READONLY 0x00000001 +static int csv3_send_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv3_data_send_encrypt_data data; + struct kvm_csv3_send_encrypt_data params; + void *hdr; + void *trans_data; + struct trans_paddr_block *trans_block; + struct guest_paddr_block *guest_block; + unsigned long pfn; + u32 offset; + int ret = 0; + int i; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + /* userspace wants to query either header or trans length */ + if (!params.trans_len || !params.hdr_len) + return csv3_send_encrypt_data_query_lengths(kvm, argp, ¶ms); + + if (!params.trans_uaddr || !params.guest_addr_data || + !params.guest_addr_len || !params.hdr_uaddr) + return -EINVAL; + + if (params.guest_addr_len > sizeof(*guest_block)) + return -EINVAL; + + if (params.trans_len > ARRAY_SIZE(trans_block->trans_paddr) * PAGE_SIZE) + return -EINVAL; + + if ((params.trans_len & PAGE_MASK) == 0 || + (params.trans_len & ~PAGE_MASK) != 0) + return -EINVAL; + + /* allocate memory for header and transport buffer */ + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) { + ret = -ENOMEM; + goto exit; + } + + guest_block = kzalloc(sizeof(*guest_block), GFP_KERNEL_ACCOUNT); + if (!guest_block) { + ret = -ENOMEM; + goto e_free_hdr; + } + + if (copy_from_user(guest_block, + (void __user *)(uintptr_t)params.guest_addr_data, + params.guest_addr_len)) { + ret = -EFAULT; + goto e_free_guest_block; + } + + trans_block = kzalloc(sizeof(*trans_block), GFP_KERNEL_ACCOUNT); + if (!trans_block) { + ret = -ENOMEM; + goto e_free_guest_block; + } + trans_data = vzalloc(params.trans_len); + if (!trans_data) { + ret = -ENOMEM; + goto e_free_trans_block; + } + + for (offset = 0, i = 0; offset < params.trans_len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + trans_data); + trans_block->trans_paddr[i] = __sme_set(pfn_to_hpa(pfn)); + i++; + } + memset(&data, 0, sizeof(data)); + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; + data.trans_block = __psp_pa(trans_block); + data.trans_len = params.trans_len; + + data.guest_block = __psp_pa(guest_block); + data.guest_len = params.guest_addr_len; + data.handle = sev->handle; + + clflush_cache_range(hdr, params.hdr_len); + clflush_cache_range(trans_data, params.trans_len); + clflush_cache_range(trans_block, PAGE_SIZE); + clflush_cache_range(guest_block, PAGE_SIZE); + + data.flag = CSV3_SEND_ENCRYPT_DATA_SET_READONLY; + ret = csv_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_DATA, &data, &argp->error); + if (ret) + goto e_free_trans_data; + + kvm_flush_remote_tlbs(kvm); + + data.flag = CSV3_SEND_ENCRYPT_DATA_MIGRATE_PAGE; + ret = csv_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_DATA, &data, &argp->error); + if (ret) + goto e_free_trans_data; + + ret = -EFAULT; + /* copy transport buffer to user space */ + if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr, + trans_data, params.trans_len)) + goto e_free_trans_data; + + /* copy guest address block to user space */ + if (copy_to_user((void __user *)(uintptr_t)params.guest_addr_data, + guest_block, params.guest_addr_len)) + goto e_free_trans_data; + + /* copy packet header to userspace. */ + if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, + params.hdr_len)) + goto e_free_trans_data; + + ret = 0; +e_free_trans_data: + vfree(trans_data); +e_free_trans_block: + kfree(trans_block); +e_free_guest_block: + kfree(guest_block); +e_free_hdr: + kfree(hdr); +exit: + return ret; +} + static void csv3_mark_page_dirty(struct kvm_vcpu *vcpu, gva_t gpa, unsigned long npages) { @@ -919,6 +1087,9 @@ static int csv_mem_enc_op(struct kvm *kvm, void __user *argp) case KVM_CSV3_LAUNCH_ENCRYPT_VMCB: r = csv3_launch_encrypt_vmcb(kvm, &sev_cmd); break; + case KVM_CSV3_SEND_ENCRYPT_DATA: + r = csv3_send_encrypt_data(kvm, &sev_cmd); + break; default: mutex_unlock(&kvm->lock); if (likely(csv_x86_ops.mem_enc_ioctl)) diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 6d8833ac456e..ec36a2314b6b 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -2318,6 +2318,7 @@ enum csv3_cmd_id { KVM_CSV3_INIT = KVM_CSV3_NR_MIN, KVM_CSV3_LAUNCH_ENCRYPT_DATA, KVM_CSV3_LAUNCH_ENCRYPT_VMCB, + KVM_CSV3_SEND_ENCRYPT_DATA, KVM_CSV3_NR_MAX, }; @@ -2332,4 +2333,13 @@ struct kvm_csv3_launch_encrypt_data { __u32 len; }; +struct kvm_csv3_send_encrypt_data { + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 guest_addr_data; + __u32 guest_addr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + #endif /* __LINUX_KVM_H */ -- Gitee From 2839e2415f7ccd19a3bca5b33801c619c58ae1bf Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Wed, 25 Oct 2023 10:05:56 +0800 Subject: [PATCH 0856/2138] anolis: KVM: SVM: CSV: Add KVM_CSV3_SEND_ENCRYPT_CONTEXT command ANBZ: #8688 The command is used for encrypting the guest cpu context using the encryption context. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3004 --- arch/x86/kvm/svm/csv.c | 118 +++++++++++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 8 +++ 2 files changed, 126 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 454ba28f7702..755918d74fb3 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -654,6 +654,121 @@ static int csv3_send_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +/* Userspace wants to query either header or trans length. */ +static int +csv3_send_encrypt_context_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp, + struct kvm_csv3_send_encrypt_context *params) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv3_data_send_encrypt_context data; + int ret; + + memset(&data, 0, sizeof(data)); + data.handle = sev->handle; + ret = csv_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_CONTEXT, &data, &argp->error); + + params->hdr_len = data.hdr_len; + params->trans_len = data.trans_len; + + if (copy_to_user((void __user *)(uintptr_t)argp->data, params, sizeof(*params))) + ret = -EFAULT; + + return ret; +} + +static int csv3_send_encrypt_context(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv3_data_send_encrypt_context data; + struct kvm_csv3_send_encrypt_context params; + void *hdr; + void *trans_data; + struct trans_paddr_block *trans_block; + unsigned long pfn; + unsigned long i; + u32 offset; + int ret = 0; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + /* userspace wants to query either header or trans length */ + if (!params.trans_len || !params.hdr_len) + return csv3_send_encrypt_context_query_lengths(kvm, argp, ¶ms); + + if (!params.trans_uaddr || !params.hdr_uaddr) + return -EINVAL; + + if (params.trans_len > ARRAY_SIZE(trans_block->trans_paddr) * PAGE_SIZE) + return -EINVAL; + + /* allocate memory for header and transport buffer */ + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) { + ret = -ENOMEM; + goto exit; + } + + trans_block = kzalloc(sizeof(*trans_block), GFP_KERNEL_ACCOUNT); + if (!trans_block) { + ret = -ENOMEM; + goto e_free_hdr; + } + trans_data = vzalloc(params.trans_len); + if (!trans_data) { + ret = -ENOMEM; + goto e_free_trans_block; + } + + for (offset = 0, i = 0; offset < params.trans_len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + trans_data); + trans_block->trans_paddr[i] = __sme_set(pfn_to_hpa(pfn)); + i++; + } + + memset(&data, 0, sizeof(data)); + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; + data.trans_block = __psp_pa(trans_block); + data.trans_len = params.trans_len; + data.handle = sev->handle; + + /* flush hdr, trans data, trans block, secure VMSAs */ + wbinvd_on_all_cpus(); + + ret = csv_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_CONTEXT, &data, &argp->error); + + if (ret) + goto e_free_trans_data; + + /* copy transport buffer to user space */ + if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr, + trans_data, params.trans_len)) { + ret = -EFAULT; + goto e_free_trans_data; + } + + /* copy packet header to userspace. */ + if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, + params.hdr_len)) { + ret = -EFAULT; + goto e_free_trans_data; + } + +e_free_trans_data: + vfree(trans_data); +e_free_trans_block: + kfree(trans_block); +e_free_hdr: + kfree(hdr); +exit: + return ret; +} + static void csv3_mark_page_dirty(struct kvm_vcpu *vcpu, gva_t gpa, unsigned long npages) { @@ -1090,6 +1205,9 @@ static int csv_mem_enc_op(struct kvm *kvm, void __user *argp) case KVM_CSV3_SEND_ENCRYPT_DATA: r = csv3_send_encrypt_data(kvm, &sev_cmd); break; + case KVM_CSV3_SEND_ENCRYPT_CONTEXT: + r = csv3_send_encrypt_context(kvm, &sev_cmd); + break; default: mutex_unlock(&kvm->lock); if (likely(csv_x86_ops.mem_enc_ioctl)) diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index ec36a2314b6b..665f2d68a59b 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -2319,6 +2319,7 @@ enum csv3_cmd_id { KVM_CSV3_LAUNCH_ENCRYPT_DATA, KVM_CSV3_LAUNCH_ENCRYPT_VMCB, KVM_CSV3_SEND_ENCRYPT_DATA, + KVM_CSV3_SEND_ENCRYPT_CONTEXT, KVM_CSV3_NR_MAX, }; @@ -2342,4 +2343,11 @@ struct kvm_csv3_send_encrypt_data { __u32 trans_len; }; +struct kvm_csv3_send_encrypt_context { + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + #endif /* __LINUX_KVM_H */ -- Gitee From 5baf8d5d4ba7cf09d4684f71955427113e7414de Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Wed, 25 Oct 2023 10:24:11 +0800 Subject: [PATCH 0857/2138] anolis: KVM: SVM: CSV: Add KVM_CSV3_RECEIVE_ENCRYPT_DATA command ANBZ: #8688 The command is used for copying the incoming buffer into the CSV3 guest's private memory. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3004 --- arch/x86/kvm/svm/csv.c | 122 +++++++++++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 10 ++++ 2 files changed, 132 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 755918d74fb3..d5620606cde2 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -769,6 +769,125 @@ static int csv3_send_encrypt_context(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +static int csv3_receive_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct csv3_data_receive_encrypt_data data; + struct kvm_csv3_receive_encrypt_data params; + void *hdr; + void *trans_data; + struct trans_paddr_block *trans_block; + struct guest_paddr_block *guest_block; + unsigned long pfn; + int i; + u32 offset; + int ret = 0; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (unlikely(list_empty(&csv->smr_list))) { + /* Allocate all the guest memory from CMA */ + ret = csv3_set_guest_private_memory(kvm); + if (ret) + goto exit; + } + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + if (!params.hdr_uaddr || !params.hdr_len || + !params.guest_addr_data || !params.guest_addr_len || + !params.trans_uaddr || !params.trans_len) + return -EINVAL; + + if (params.guest_addr_len > sizeof(*guest_block)) + return -EINVAL; + + if (params.trans_len > ARRAY_SIZE(trans_block->trans_paddr) * PAGE_SIZE) + return -EINVAL; + + /* allocate memory for header and transport buffer */ + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) { + ret = -ENOMEM; + goto exit; + } + + if (copy_from_user(hdr, + (void __user *)(uintptr_t)params.hdr_uaddr, + params.hdr_len)) { + ret = -EFAULT; + goto e_free_hdr; + } + + guest_block = kzalloc(sizeof(*guest_block), GFP_KERNEL_ACCOUNT); + if (!guest_block) { + ret = -ENOMEM; + goto e_free_hdr; + } + + if (copy_from_user(guest_block, + (void __user *)(uintptr_t)params.guest_addr_data, + params.guest_addr_len)) { + ret = -EFAULT; + goto e_free_guest_block; + } + + trans_block = kzalloc(sizeof(*trans_block), GFP_KERNEL_ACCOUNT); + if (!trans_block) { + ret = -ENOMEM; + goto e_free_guest_block; + } + trans_data = vzalloc(params.trans_len); + if (!trans_data) { + ret = -ENOMEM; + goto e_free_trans_block; + } + + if (copy_from_user(trans_data, + (void __user *)(uintptr_t)params.trans_uaddr, + params.trans_len)) { + ret = -EFAULT; + goto e_free_trans_data; + } + + for (offset = 0, i = 0; offset < params.trans_len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + trans_data); + trans_block->trans_paddr[i] = __sme_set(pfn_to_hpa(pfn)); + i++; + } + + memset(&data, 0, sizeof(data)); + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; + data.trans_block = __psp_pa(trans_block); + data.trans_len = params.trans_len; + data.guest_block = __psp_pa(guest_block); + data.guest_len = params.guest_addr_len; + data.handle = sev->handle; + + clflush_cache_range(hdr, params.hdr_len); + clflush_cache_range(trans_data, params.trans_len); + clflush_cache_range(trans_block, PAGE_SIZE); + clflush_cache_range(guest_block, PAGE_SIZE); + ret = csv_issue_cmd(kvm, CSV3_CMD_RECEIVE_ENCRYPT_DATA, &data, + &argp->error); + +e_free_trans_data: + vfree(trans_data); +e_free_trans_block: + kfree(trans_block); +e_free_guest_block: + kfree(guest_block); +e_free_hdr: + kfree(hdr); +exit: + return ret; +} + static void csv3_mark_page_dirty(struct kvm_vcpu *vcpu, gva_t gpa, unsigned long npages) { @@ -1208,6 +1327,9 @@ static int csv_mem_enc_op(struct kvm *kvm, void __user *argp) case KVM_CSV3_SEND_ENCRYPT_CONTEXT: r = csv3_send_encrypt_context(kvm, &sev_cmd); break; + case KVM_CSV3_RECEIVE_ENCRYPT_DATA: + r = csv3_receive_encrypt_data(kvm, &sev_cmd); + break; default: mutex_unlock(&kvm->lock); if (likely(csv_x86_ops.mem_enc_ioctl)) diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 665f2d68a59b..a79c56e49d18 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -2320,6 +2320,7 @@ enum csv3_cmd_id { KVM_CSV3_LAUNCH_ENCRYPT_VMCB, KVM_CSV3_SEND_ENCRYPT_DATA, KVM_CSV3_SEND_ENCRYPT_CONTEXT, + KVM_CSV3_RECEIVE_ENCRYPT_DATA, KVM_CSV3_NR_MAX, }; @@ -2350,4 +2351,13 @@ struct kvm_csv3_send_encrypt_context { __u32 trans_len; }; +struct kvm_csv3_receive_encrypt_data { + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 guest_addr_data; + __u32 guest_addr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + #endif /* __LINUX_KVM_H */ -- Gitee From 655eb0e55ec4fb01a559bce2867e3277fddc736d Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Wed, 25 Oct 2023 10:29:50 +0800 Subject: [PATCH 0858/2138] anolis: KVM: SVM: CSV: Add KVM_CSV3_RECEIVE_ENCRYPT_CONTEXT command ANBZ: #8688 The command is used for copying the incoming context into the CSV3 guest's private memory. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3004 --- arch/x86/kvm/svm/csv.c | 147 +++++++++++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 8 +++ 2 files changed, 155 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index d5620606cde2..9b9d86169537 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -55,6 +55,10 @@ struct trans_paddr_block { u64 trans_paddr[512]; }; +struct vmcb_paddr_block { + u64 vmcb_paddr[512]; +}; + enum csv3_pg_level { CSV3_PG_LEVEL_NONE, CSV3_PG_LEVEL_4K, @@ -888,6 +892,146 @@ static int csv3_receive_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +static int csv3_receive_encrypt_context(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv3_data_receive_encrypt_context data; + struct kvm_csv3_receive_encrypt_context params; + void *hdr; + void *trans_data; + struct trans_paddr_block *trans_block; + struct vmcb_paddr_block *shadow_vmcb_block; + struct vmcb_paddr_block *secure_vmcb_block; + unsigned long pfn; + u32 offset; + int ret = 0; + struct kvm_vcpu *vcpu; + unsigned long i; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + if (!params.trans_uaddr || !params.trans_len || + !params.hdr_uaddr || !params.hdr_len) + return -EINVAL; + + if (params.trans_len > ARRAY_SIZE(trans_block->trans_paddr) * PAGE_SIZE) + return -EINVAL; + + /* allocate memory for header and transport buffer */ + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) { + ret = -ENOMEM; + goto exit; + } + + if (copy_from_user(hdr, + (void __user *)(uintptr_t)params.hdr_uaddr, + params.hdr_len)) { + ret = -EFAULT; + goto e_free_hdr; + } + + trans_block = kzalloc(sizeof(*trans_block), GFP_KERNEL_ACCOUNT); + if (!trans_block) { + ret = -ENOMEM; + goto e_free_hdr; + } + trans_data = vzalloc(params.trans_len); + if (!trans_data) { + ret = -ENOMEM; + goto e_free_trans_block; + } + + if (copy_from_user(trans_data, + (void __user *)(uintptr_t)params.trans_uaddr, + params.trans_len)) { + ret = -EFAULT; + goto e_free_trans_data; + } + + for (offset = 0, i = 0; offset < params.trans_len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + trans_data); + trans_block->trans_paddr[i] = __sme_set(pfn_to_hpa(pfn)); + i++; + } + + secure_vmcb_block = kzalloc(sizeof(*secure_vmcb_block), + GFP_KERNEL_ACCOUNT); + if (!secure_vmcb_block) { + ret = -ENOMEM; + goto e_free_trans_data; + } + + shadow_vmcb_block = kzalloc(sizeof(*shadow_vmcb_block), + GFP_KERNEL_ACCOUNT); + if (!shadow_vmcb_block) { + ret = -ENOMEM; + goto e_free_secure_vmcb_block; + } + + memset(&data, 0, sizeof(data)); + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + if (i >= ARRAY_SIZE(shadow_vmcb_block->vmcb_paddr)) { + ret = -EINVAL; + goto e_free_shadow_vmcb_block; + } + shadow_vmcb_block->vmcb_paddr[i] = __sme_pa(svm->vmcb); + data.vmcb_block_len += sizeof(shadow_vmcb_block->vmcb_paddr[0]); + } + + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; + data.trans_block = __psp_pa(trans_block); + data.trans_len = params.trans_len; + data.shadow_vmcb_block = __psp_pa(shadow_vmcb_block); + data.secure_vmcb_block = __psp_pa(secure_vmcb_block); + data.handle = sev->handle; + + clflush_cache_range(hdr, params.hdr_len); + clflush_cache_range(trans_data, params.trans_len); + clflush_cache_range(trans_block, PAGE_SIZE); + clflush_cache_range(shadow_vmcb_block, PAGE_SIZE); + clflush_cache_range(secure_vmcb_block, PAGE_SIZE); + + ret = csv_issue_cmd(kvm, CSV3_CMD_RECEIVE_ENCRYPT_CONTEXT, &data, + &argp->error); + if (ret) + goto e_free_shadow_vmcb_block; + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + if (i >= ARRAY_SIZE(secure_vmcb_block->vmcb_paddr)) { + ret = -EINVAL; + goto e_free_shadow_vmcb_block; + } + + svm->current_vmcb->pa = secure_vmcb_block->vmcb_paddr[i]; + svm->vcpu.arch.guest_state_protected = true; + } + +e_free_shadow_vmcb_block: + kfree(shadow_vmcb_block); +e_free_secure_vmcb_block: + kfree(secure_vmcb_block); +e_free_trans_data: + vfree(trans_data); +e_free_trans_block: + kfree(trans_block); +e_free_hdr: + kfree(hdr); +exit: + return ret; +} + static void csv3_mark_page_dirty(struct kvm_vcpu *vcpu, gva_t gpa, unsigned long npages) { @@ -1330,6 +1474,9 @@ static int csv_mem_enc_op(struct kvm *kvm, void __user *argp) case KVM_CSV3_RECEIVE_ENCRYPT_DATA: r = csv3_receive_encrypt_data(kvm, &sev_cmd); break; + case KVM_CSV3_RECEIVE_ENCRYPT_CONTEXT: + r = csv3_receive_encrypt_context(kvm, &sev_cmd); + break; default: mutex_unlock(&kvm->lock); if (likely(csv_x86_ops.mem_enc_ioctl)) diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index a79c56e49d18..def7a0ee9717 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -2321,6 +2321,7 @@ enum csv3_cmd_id { KVM_CSV3_SEND_ENCRYPT_DATA, KVM_CSV3_SEND_ENCRYPT_CONTEXT, KVM_CSV3_RECEIVE_ENCRYPT_DATA, + KVM_CSV3_RECEIVE_ENCRYPT_CONTEXT, KVM_CSV3_NR_MAX, }; @@ -2360,4 +2361,11 @@ struct kvm_csv3_receive_encrypt_data { __u32 trans_len; }; +struct kvm_csv3_receive_encrypt_context { + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + #endif /* __LINUX_KVM_H */ -- Gitee From 64ccfea99404b20b547f4fdfa5e95451a544f0f4 Mon Sep 17 00:00:00 2001 From: Zhao Chen Date: Tue, 9 Jan 2024 17:24:42 +0800 Subject: [PATCH 0859/2138] fuse: Introduce a new notification type for resend pending requests ANBZ: #8703 commit 760eac73f9f69aa28fcb3050b4946c2dcc656d12 upstream. When a FUSE daemon panics and failover, we aim to minimize the impact on applications by reusing the existing FUSE connection. During this process, another daemon is employed to preserve the FUSE connection's file descriptor. The new started FUSE Daemon will takeover the fd and continue to provide service. However, it is possible for some inflight requests to be lost and never returned. As a result, applications awaiting replies would become stuck forever. To address this, we can resend these pending requests to the new started FUSE daemon. This patch introduces a new notification type "FUSE_NOTIFY_RESEND", which can trigger resending of the pending requests, ensuring they are properly processed again. Signed-off-by: Zhao Chen Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3009 --- fs/fuse/dev.c | 56 +++++++++++++++++++++++++++++++++++++++ include/uapi/linux/fuse.h | 2 ++ 2 files changed, 58 insertions(+) diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 8573d79ef29c..d26799ba19ea 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -1777,6 +1777,59 @@ static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size, return err; } +/* + * Resending all processing queue requests. + * + * During a FUSE daemon panics and failover, it is possible for some inflight + * requests to be lost and never returned. As a result, applications awaiting + * replies would become stuck forever. To address this, we can use notification + * to trigger resending of these pending requests to the FUSE daemon, ensuring + * they are properly processed again. + * + * Please note that this strategy is applicable only to idempotent requests or + * if the FUSE daemon takes careful measures to avoid processing duplicated + * non-idempotent requests. + */ +static void fuse_resend(struct fuse_conn *fc) +{ + struct fuse_dev *fud; + struct fuse_req *req, *next; + struct fuse_iqueue *fiq = &fc->iq; + LIST_HEAD(to_queue); + unsigned int i; + + spin_lock(&fc->lock); + if (!fc->connected) { + spin_unlock(&fc->lock); + return; + } + + list_for_each_entry(fud, &fc->devices, entry) { + struct fuse_pqueue *fpq = &fud->pq; + + spin_lock(&fpq->lock); + for (i = 0; i < FUSE_PQ_HASH_SIZE; i++) + list_splice_tail_init(&fpq->processing[i], &to_queue); + spin_unlock(&fpq->lock); + } + spin_unlock(&fc->lock); + + list_for_each_entry_safe(req, next, &to_queue, list) { + __set_bit(FR_PENDING, &req->flags); + } + + spin_lock(&fiq->lock); + /* iq and pq requests are both oldest to newest */ + list_splice(&to_queue, &fiq->pending); + fiq->ops->wake_pending_and_unlock(fiq); +} + +static int fuse_notify_resend(struct fuse_conn *fc) +{ + fuse_resend(fc); + return 0; +} + static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, unsigned int size, struct fuse_copy_state *cs) { @@ -1802,6 +1855,9 @@ static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, case FUSE_NOTIFY_DELETE: return fuse_notify_delete(fc, size, cs); + case FUSE_NOTIFY_RESEND: + return fuse_notify_resend(fc); + default: fuse_copy_finish(cs); return -EINVAL; diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index 33b56d9e4803..aeeecf0fba63 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h @@ -214,6 +214,7 @@ * * 7.40 * - add FUSE_NO_EXPORT_SUPPORT init flag + * - add FUSE_NOTIFY_RESEND */ #ifndef _LINUX_FUSE_H @@ -640,6 +641,7 @@ enum fuse_notify_code { FUSE_NOTIFY_STORE = 4, FUSE_NOTIFY_RETRIEVE = 5, FUSE_NOTIFY_DELETE = 6, + FUSE_NOTIFY_RESEND = 7, FUSE_NOTIFY_CODE_MAX, }; -- Gitee From ea2d923cc08165cb06c377c06ef1263f88b31cf9 Mon Sep 17 00:00:00 2001 From: Zhao Chen Date: Tue, 9 Jan 2024 17:24:43 +0800 Subject: [PATCH 0860/2138] fuse: Use the high bit of request ID for indicating resend requests ANBZ: #8703 commit 9e7f5296f475ba5ab887ae3e55b922e17e99752b upstream. Some FUSE daemons want to know if the received request is a resend request. The high bit of the fuse request ID is utilized for indicating this, enabling the receiver to perform appropriate handling. The init flag "FUSE_HAS_RESEND" is added to indicate this feature. Signed-off-by: Zhao Chen Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3009 --- fs/fuse/dev.c | 2 ++ fs/fuse/inode.c | 2 +- include/uapi/linux/fuse.h | 13 ++++++++++++- 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index d26799ba19ea..3e2acd253ed2 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -1816,6 +1816,8 @@ static void fuse_resend(struct fuse_conn *fc) list_for_each_entry_safe(req, next, &to_queue, list) { __set_bit(FR_PENDING, &req->flags); + /* mark the request as resend request */ + req->in.h.unique |= FUSE_UNIQUE_RESEND; } spin_lock(&fiq->lock); diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 43b1ffb63b5d..95c79f83f3ab 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -1369,7 +1369,7 @@ void fuse_send_init(struct fuse_mount *fm) FUSE_HANDLE_KILLPRIV_V2 | FUSE_SETXATTR_EXT | FUSE_INIT_EXT | FUSE_SECURITY_CTX | FUSE_CREATE_SUPP_GROUP | FUSE_HAS_EXPIRE_ONLY | FUSE_DIRECT_IO_ALLOW_MMAP | - FUSE_NO_EXPORT_SUPPORT; + FUSE_NO_EXPORT_SUPPORT | FUSE_HAS_RESEND; #ifdef CONFIG_FUSE_DAX if (fm->fc->dax) flags |= FUSE_MAP_ALIGNMENT; diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index aeeecf0fba63..8da8e72ed313 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h @@ -214,7 +214,7 @@ * * 7.40 * - add FUSE_NO_EXPORT_SUPPORT init flag - * - add FUSE_NOTIFY_RESEND + * - add FUSE_NOTIFY_RESEND, add FUSE_HAS_RESEND init flag */ #ifndef _LINUX_FUSE_H @@ -415,6 +415,8 @@ struct fuse_file_lock { * FUSE_HAS_EXPIRE_ONLY: kernel supports expiry-only entry invalidation * FUSE_DIRECT_IO_ALLOW_MMAP: allow shared mmap in FOPEN_DIRECT_IO mode. * FUSE_NO_EXPORT_SUPPORT: explicitly disable export support + * FUSE_HAS_RESEND: kernel supports resending pending requests, and the high bit + * of the request ID indicates resend requests */ #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) @@ -455,6 +457,7 @@ struct fuse_file_lock { #define FUSE_HAS_EXPIRE_ONLY (1ULL << 35) #define FUSE_DIRECT_IO_ALLOW_MMAP (1ULL << 36) #define FUSE_NO_EXPORT_SUPPORT (1ULL << 38) +#define FUSE_HAS_RESEND (1ULL << 39) /* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */ #define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP @@ -967,6 +970,14 @@ struct fuse_fallocate_in { uint32_t padding; }; +/** + * FUSE request unique ID flag + * + * Indicates whether this is a resend request. The receiver should handle this + * request accordingly. + */ +#define FUSE_UNIQUE_RESEND (1ULL << 63) + struct fuse_in_header { uint32_t len; uint32_t opcode; -- Gitee From a1aad3b8cb016effac7ae4dec717969a5198a918 Mon Sep 17 00:00:00 2001 From: Xu Ji Date: Thu, 12 Oct 2023 11:46:20 +0800 Subject: [PATCH 0861/2138] anolis: fuse: increase FUSE_MAX_MAX_PAGES limit ANBZ: #8703 Set FUSE_MAX_MAX_PAGES to 1024, we can send read/write requests with max size of 4MB. Signed-off-by: Xu Ji Reviewed-by: Jingbo Xu Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3009 --- fs/fuse/fuse_i.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 695e3e993dc2..554fed2c8320 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -37,7 +37,7 @@ #define FUSE_DEFAULT_MAX_PAGES_PER_REQ 32 /** Maximum of max_pages received in init_out */ -#define FUSE_MAX_MAX_PAGES 256 +#define FUSE_MAX_MAX_PAGES 1024 /** Bias for fi->writectr, meaning new writepages must not be sent */ #define FUSE_NOWRITE INT_MIN -- Gitee From 9d78bcc44cc006528fe14bb5e6df86581d1f2c9a Mon Sep 17 00:00:00 2001 From: Yifei Zhang Date: Sat, 21 Oct 2023 23:26:09 +0800 Subject: [PATCH 0862/2138] anolis: fuse: make fuse support configuring delete_stale feature ANBZ: #8703 by default, fuse always caches dentry for performance, which results in fuse daemon always saving fd created by lookup, and if the file is not deleted through the fuse file system, this will cause fd in fuse daemon to always reference the deleted file and cannot be released. our system can avoid this problem by delete_stale and re-lookup every time instead of caching dentry. virtiofs already supports this, but fuse doesn't enable this by default. This PR is to make the delete_stale configurable. Signed-off-by: Yifei Zhang Reviewed-by: Jingbo Xu Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3009 --- fs/fuse/inode.c | 5 ++++- include/uapi/linux/fuse.h | 3 +++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 95c79f83f3ab..74c05299dcbd 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -1322,6 +1322,8 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args, fc->direct_io_allow_mmap = 1; if (flags & FUSE_NO_EXPORT_SUPPORT) fm->sb->s_export_op = &fuse_export_fid_operations; + if (flags & FUSE_DELETE_STALE) + fc->delete_stale = 1; } else { ra_pages = fc->max_read / PAGE_SIZE; fc->no_lock = 1; @@ -1369,7 +1371,8 @@ void fuse_send_init(struct fuse_mount *fm) FUSE_HANDLE_KILLPRIV_V2 | FUSE_SETXATTR_EXT | FUSE_INIT_EXT | FUSE_SECURITY_CTX | FUSE_CREATE_SUPP_GROUP | FUSE_HAS_EXPIRE_ONLY | FUSE_DIRECT_IO_ALLOW_MMAP | - FUSE_NO_EXPORT_SUPPORT | FUSE_HAS_RESEND; + FUSE_NO_EXPORT_SUPPORT | FUSE_HAS_RESEND | + FUSE_DELETE_STALE; #ifdef CONFIG_FUSE_DAX if (fm->fc->dax) flags |= FUSE_MAP_ALIGNMENT; diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index 8da8e72ed313..730e620286a7 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h @@ -462,6 +462,9 @@ struct fuse_file_lock { /* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */ #define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP +#define FUSE_DELETE_STALE (1ULL << 58) +/* The 59th bit is left to FUSE_DIO_SHARED_MMAP */ + /** * CUSE INIT request/reply flags * -- Gitee From 5858e2a2d46deff5f4009cc3b58dfcf78725cf92 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 8 Apr 2024 19:39:55 +0800 Subject: [PATCH 0863/2138] anolis: x86/delay: add support for Zhaoxin ZXPAUSE instruction ANBZ: #7809 ZXPAUSE instructs the processor to enter an implementation-dependent optimized state. The instruction execution wakes up when the time-stamp counter reaches or exceeds the implicit EDX:EAX 64-bit input value. The instruction execution also wakes up due to the expiration of the operating system time-limit or by an external interrupt. ZXPAUSE is available on processors with X86_FEATURE_ZXPAUSE. ZXPAUSE allows the processor to enter a light-weight power/performance optimized state (C0.1 state) for a period specified by the instruction or until the system time limit. MSR_ZX_PAUSE_CONTROL MSR register allows the OS to enable/disable C0.2 on the processor and to set the maximum time the processor can reside in C0.1 or C0.2. By default C0.2 is disabled. A sysfs interface to adjust the time and the C0.2 enablement is provided in a follow up change. Signed-off-by: leoliu-oc Reviewed-by: Guanjun Link: https://gitee.com/anolis/cloud-kernel/pulls/2710 [Fixes conflicts: use cpufeatures' word 23] Signed-off-by: Qinyun Tan --- MAINTAINERS | 5 + arch/x86/include/asm/cpufeature.h | 7 +- arch/x86/include/asm/cpufeatures.h | 5 +- arch/x86/include/asm/delay.h | 1 + arch/x86/include/asm/disabled-features.h | 3 +- arch/x86/include/asm/msr-index.h | 11 + arch/x86/include/asm/mwait.h | 15 ++ arch/x86/include/asm/required-features.h | 3 +- arch/x86/kernel/cpu/Makefile | 1 + arch/x86/kernel/cpu/centaur.c | 3 + arch/x86/kernel/cpu/zhaoxin.c | 3 + arch/x86/kernel/cpu/zxpause.c | 238 ++++++++++++++++++ arch/x86/kernel/time.c | 2 + arch/x86/lib/delay.c | 27 ++ tools/arch/x86/include/asm/cpufeatures.h | 5 +- .../arch/x86/include/asm/disabled-features.h | 5 +- tools/arch/x86/include/asm/msr-index.h | 11 + .../arch/x86/include/asm/required-features.h | 5 +- 18 files changed, 342 insertions(+), 8 deletions(-) create mode 100644 arch/x86/kernel/cpu/zxpause.c diff --git a/MAINTAINERS b/MAINTAINERS index 58647078de3f..c1023cec8044 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -23901,6 +23901,11 @@ L: linux-hwmon@vger.kernel.org S: Maintained F: drivers/hwmon/zhaoxin-cputemp.c +ZHAOXIN ZXPAUSE INSTRUCTION SUPPORT +M: LeoLiu-oc +S: Maintained +F: arch/x86/kernel/cpu/zxpause.c + ZONEFS FILESYSTEM M: Damien Le Moal M: Naohiro Aota diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index b59e37c4ecb4..c5b1d083bc6b 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -35,6 +35,7 @@ enum cpuid_leafs CPUID_8000_0021_EAX, CPUID_LNX_5, CPUID_8C86_0000_EDX, + CPUID_C000_0006_EAX, NR_CPUID_WORDS, }; @@ -96,8 +97,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 20, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 21, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 22, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 23, feature_bit) || \ REQUIRED_MASK_CHECK || \ - BUILD_BUG_ON_ZERO(NCAPINTS != 23)) + BUILD_BUG_ON_ZERO(NCAPINTS != 24)) #define DISABLED_MASK_BIT_SET(feature_bit) \ ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \ @@ -123,8 +125,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 20, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 21, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 22, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 23, feature_bit) || \ DISABLED_MASK_CHECK || \ - BUILD_BUG_ON_ZERO(NCAPINTS != 23)) + BUILD_BUG_ON_ZERO(NCAPINTS != 24)) #define cpu_has(c, bit) \ (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 42cef8da01b4..dc1811133da0 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -13,7 +13,7 @@ /* * Defines x86 CPU feature bits */ -#define NCAPINTS 23 /* N 32-bit words worth of info */ +#define NCAPINTS 24 /* N 32-bit words worth of info */ #define NBUGINTS 2 /* N 32-bit bug flags */ /* @@ -485,6 +485,9 @@ #define X86_FEATURE_SM3 (22*32 + 1) /* SM3 instructions */ #define X86_FEATURE_SM4 (22*32 + 2) /* SM4 instructions */ +/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000006, word 23 */ +#define X86_FEATURE_ZXPAUSE (23*32 + 0) /* ZHAOXIN ZXPAUSE */ + /* * Extended auxiliary flags: Linux defined - for features scattered in various * CPUID levels like 0x80000022, etc and Linux defined features. diff --git a/arch/x86/include/asm/delay.h b/arch/x86/include/asm/delay.h index 630891d25819..4dbb3fea67fb 100644 --- a/arch/x86/include/asm/delay.h +++ b/arch/x86/include/asm/delay.h @@ -7,6 +7,7 @@ void __init use_tsc_delay(void); void __init use_tpause_delay(void); +void __init use_zxpause_delay(void); void use_mwaitx_delay(void); #endif /* _ASM_X86_DELAY_H */ diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h index b108e656fa5b..c1e800b636f4 100644 --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h @@ -145,6 +145,7 @@ #define DISABLED_MASK20 0 #define DISABLED_MASK21 0 #define DISABLED_MASK22 0 -#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 23) +#define DISABLED_MASK23 0 +#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 24) #endif /* _ASM_X86_DISABLED_FEATURES_H */ diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 8a07cfa720eb..4df030ee0e23 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -75,12 +75,23 @@ #define MSR_IA32_UMWAIT_CONTROL 0xe1 #define MSR_IA32_UMWAIT_CONTROL_C02_DISABLE BIT(0) #define MSR_IA32_UMWAIT_CONTROL_RESERVED BIT(1) + +#define MSR_ZX_PAUSE_CONTROL 0x187f +#define MSR_ZX_PAUSE_CONTROL_C02_DISABLE BIT(0) +#define MSR_ZX_PAUSE_CONTROL_RESERVED BIT(1) + /* * The time field is bit[31:2], but representing a 32bit value with * bit[1:0] zero. */ #define MSR_IA32_UMWAIT_CONTROL_TIME_MASK (~0x03U) +/* + * The time field is bit[31:2], but representing a 32bit value with + * bit[1:0] zero. + */ +#define MSR_ZX_PAUSE_CONTROL_TIME_MASK (~0x03U) + /* Abbreviated from Intel SDM name IA32_CORE_CAPABILITIES */ #define MSR_IA32_CORE_CAPS 0x000000cf #define MSR_IA32_CORE_CAPS_INTEGRITY_CAPS_BIT 2 diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h index bae83810505b..3aa7f98683e3 100644 --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h @@ -26,6 +26,8 @@ #define TPAUSE_C01_STATE 1 #define TPAUSE_C02_STATE 0 +#define ZXPAUSE_C01_STATE 1 + static __always_inline void __monitor(const void *eax, unsigned long ecx, unsigned long edx) { @@ -148,4 +150,17 @@ static inline void __tpause(u32 ecx, u32 edx, u32 eax) #endif } +/* + * Caller can specify whether to enter C0.1 (low latency, less + * power saving) or C0.2 state (saves more power, but longer wakeup + * latency). This may be overridden by the ZX_PAUSE_CONTROL MSR + * which can force requests for C0.2 to be downgraded to C0.1. + */ +static inline void __zxpause(u32 ecx, u32 edx, u32 eax) +{ + /* "zxpause %ecx, %edx, %eax;" */ + asm volatile(".byte 0xf2, 0x0f, 0xa6, 0xd0\t\n" + : + : "c"(ecx), "d"(edx), "a"(eax)); +} #endif /* _ASM_X86_MWAIT_H */ diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h index 76953f757f3c..6a3de575bec6 100644 --- a/arch/x86/include/asm/required-features.h +++ b/arch/x86/include/asm/required-features.h @@ -101,6 +101,7 @@ #define REQUIRED_MASK20 0 #define REQUIRED_MASK21 0 #define REQUIRED_MASK22 0 -#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 23) +#define REQUIRED_MASK23 0 +#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 24) #endif /* _ASM_X86_REQUIRED_FEATURES_H */ diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 4350f6bfc064..dec6b0d9e711 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -25,6 +25,7 @@ obj-y += bugs.o obj-y += aperfmperf.o obj-y += cpuid-deps.o obj-y += umwait.o +obj-$(CONFIG_CPU_SUP_ZHAOXIN) += zxpause.o obj-$(CONFIG_PROC_FS) += proc.o obj-y += capflags.o powerflags.o diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index ad6982391bc9..b15bcf21ac7b 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -117,6 +117,9 @@ static void early_init_centaur(struct cpuinfo_x86 *c) if (c->x86 == 0x6 || (c->x86 == 0x7 && c->x86_model <= 0x3b)) set_cpu_cap(c, X86_FEATURE_CRC32C_LOW_PERF); + if (cpuid_eax(0xC0000000) >= 0xC0000006) + c->x86_capability[CPUID_C000_0006_EAX] = cpuid_eax(0xC0000006); + if (detect_extended_topology_early(c) < 0) detect_ht_early(c); } diff --git a/arch/x86/kernel/cpu/zhaoxin.c b/arch/x86/kernel/cpu/zhaoxin.c index f9a65b57a6bd..8e4201ad1d23 100644 --- a/arch/x86/kernel/cpu/zhaoxin.c +++ b/arch/x86/kernel/cpu/zhaoxin.c @@ -86,6 +86,9 @@ static void early_init_zhaoxin(struct cpuinfo_x86 *c) if (c->x86 == 0x6 || (c->x86 == 0x7 && c->x86_model <= 0x3b)) set_cpu_cap(c, X86_FEATURE_CRC32C_LOW_PERF); + if (cpuid_eax(0xC0000000) >= 0xC0000006) + c->x86_capability[CPUID_C000_0006_EAX] = cpuid_eax(0xC0000006); + if (detect_extended_topology_early(c) < 0) detect_ht_early(c); } diff --git a/arch/x86/kernel/cpu/zxpause.c b/arch/x86/kernel/cpu/zxpause.c new file mode 100644 index 000000000000..7f55f5d9e8c0 --- /dev/null +++ b/arch/x86/kernel/cpu/zxpause.c @@ -0,0 +1,238 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +#include +#include + +#define ZXPAUSE_C02_ENABLE 0 + +#define ZXPAUSE_CTRL_VAL(max_time, c02_disable) \ + (((max_time) & MSR_ZX_PAUSE_CONTROL_TIME_MASK) | \ + ((c02_disable) & MSR_ZX_PAUSE_CONTROL_C02_DISABLE)) + +/* + * Cache ZX_PAUSE_CONTROL MSR. This is a systemwide control. By default, + * zxpause max time is 100000 in TSC-quanta and C0.2 is enabled + */ +static u32 zxpause_control_cached = ZXPAUSE_CTRL_VAL(100000, ZXPAUSE_C02_ENABLE); + +/* + * Cache the original ZX_PAUSE_CONTROL MSR value which is configured by + * hardware or BIOS before kernel boot. + */ +static u32 orig_zxpause_control_cached __ro_after_init; + +/* + * Serialize access to zxpause_control_cached and ZX_PAUSE_CONTROL MSR in + * the sysfs write functions. + */ +static DEFINE_MUTEX(zxpause_lock); + +static void zxpause_update_control_msr(void *unused) +{ + lockdep_assert_irqs_disabled(); + wrmsr(MSR_ZX_PAUSE_CONTROL, READ_ONCE(zxpause_control_cached), 0); +} + +/* + * The CPU hotplug callback sets the control MSR to the global control + * value. + * + * Disable interrupts so the read of zxpause_control_cached and the WRMSR + * are protected against a concurrent sysfs write. Otherwise the sysfs + * write could update the cached value after it had been read on this CPU + * and issue the IPI before the old value had been written. The IPI would + * interrupt, write the new value and after return from IPI the previous + * value would be written by this CPU. + * + * With interrupts disabled the upcoming CPU either sees the new control + * value or the IPI is updating this CPU to the new control value after + * interrupts have been reenabled. + */ +static int zxpause_cpu_online(unsigned int cpu) +{ + local_irq_disable(); + zxpause_update_control_msr(NULL); + local_irq_enable(); + return 0; +} + +/* + * The CPU hotplug callback sets the control MSR to the original control + * value. + */ +static int zxpause_cpu_offline(unsigned int cpu) +{ + /* + * This code is protected by the CPU hotplug already and + * orig_zxpause_control_cached is never changed after it caches + * the original control MSR value in zxpause_init(). So there + * is no race condition here. + */ + wrmsr(MSR_ZX_PAUSE_CONTROL, orig_zxpause_control_cached, 0); + + return 0; +} + +/* + * On resume, restore ZX_PAUSE_CONTROL MSR on the boot processor which + * is the only active CPU at this time. The MSR is set up on the APs via the + * CPU hotplug callback. + * + * This function is invoked on resume from suspend and hibernation. On + * resume from suspend the restore should be not required, but we neither + * trust the firmware nor does it matter if the same value is written + * again. + */ +static void zxpause_syscore_resume(void) +{ + zxpause_update_control_msr(NULL); +} + +static struct syscore_ops zxpause_syscore_ops = { + .resume = zxpause_syscore_resume, +}; + +/* sysfs interface */ + +/* + * When bit 0 in ZX_PAUSE_CONTROL MSR is 1, C0.2 is disabled. + * Otherwise, C0.2 is enabled. + */ +static inline bool zxpause_ctrl_c02_enabled(u32 ctrl) +{ + return !(ctrl & MSR_ZX_PAUSE_CONTROL_C02_DISABLE); +} + +static inline u32 zxpause_ctrl_max_time(u32 ctrl) +{ + return ctrl & MSR_ZX_PAUSE_CONTROL_TIME_MASK; +} + +static inline void zxpause_update_control(u32 maxtime, bool c02_enable) +{ + u32 ctrl = maxtime & MSR_ZX_PAUSE_CONTROL_TIME_MASK; + + if (!c02_enable) + ctrl |= MSR_ZX_PAUSE_CONTROL_C02_DISABLE; + + WRITE_ONCE(zxpause_control_cached, ctrl); + /* Propagate to all CPUs */ + on_each_cpu(zxpause_update_control_msr, NULL, 1); +} + +static ssize_t +enable_c02_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + u32 ctrl = READ_ONCE(zxpause_control_cached); + + return sprintf(buf, "%d\n", zxpause_ctrl_c02_enabled(ctrl)); +} + +static ssize_t enable_c02_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + bool c02_enable; + u32 ctrl; + int ret; + + ret = kstrtobool(buf, &c02_enable); + if (ret) + return ret; + + mutex_lock(&zxpause_lock); + + ctrl = READ_ONCE(zxpause_control_cached); + if (c02_enable != zxpause_ctrl_c02_enabled(ctrl)) + zxpause_update_control(ctrl, c02_enable); + + mutex_unlock(&zxpause_lock); + + return count; +} +static DEVICE_ATTR_RW(enable_c02); + +static ssize_t +max_time_show(struct device *kobj, struct device_attribute *attr, char *buf) +{ + u32 ctrl = READ_ONCE(zxpause_control_cached); + + return sprintf(buf, "%u\n", zxpause_ctrl_max_time(ctrl)); +} + +static ssize_t max_time_store(struct device *kobj, + struct device_attribute *attr, + const char *buf, size_t count) +{ + u32 max_time, ctrl; + int ret; + + ret = kstrtou32(buf, 0, &max_time); + if (ret) + return ret; + + /* bits[1:0] must be zero */ + if (max_time & ~MSR_ZX_PAUSE_CONTROL_TIME_MASK) + return -EINVAL; + + mutex_lock(&zxpause_lock); + + ctrl = READ_ONCE(zxpause_control_cached); + if (max_time != zxpause_ctrl_max_time(ctrl)) + zxpause_update_control(max_time, zxpause_ctrl_c02_enabled(ctrl)); + + mutex_unlock(&zxpause_lock); + + return count; +} +static DEVICE_ATTR_RW(max_time); + +static struct attribute *zxpause_attrs[] = { + &dev_attr_enable_c02.attr, + &dev_attr_max_time.attr, + NULL +}; + +static struct attribute_group zxpause_attr_group = { + .attrs = zxpause_attrs, + .name = "zxpause_control", +}; + +static int __init zxpause_init(void) +{ + struct device *dev; + int ret; + + if (!boot_cpu_has(X86_FEATURE_ZXPAUSE)) + return -ENODEV; + + /* + * Cache the original control MSR value before the control MSR is + * changed. This is the only place where orig_zxpause_control_cached + * is modified. + */ + rdmsrl(MSR_ZX_PAUSE_CONTROL, orig_zxpause_control_cached); + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "zxpause:online", + zxpause_cpu_online, zxpause_cpu_offline); + if (ret < 0) { + /* + * On failure, the control MSR on all CPUs has the + * original control value. + */ + return ret; + } + + register_syscore_ops(&zxpause_syscore_ops); + + /* + * Add zxpause control interface. Ignore failure, so at least the + * default values are set up in case the machine manages to boot. + */ + dev = bus_get_dev_root(&cpu_subsys); + return sysfs_create_group(&dev->kobj, &zxpause_attr_group); +} +device_initcall(zxpause_init); diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c index 52e1f3f0b361..06289c254a0e 100644 --- a/arch/x86/kernel/time.c +++ b/arch/x86/kernel/time.c @@ -84,6 +84,8 @@ static __init void x86_late_time_init(void) if (static_cpu_has(X86_FEATURE_WAITPKG)) use_tpause_delay(); + else if (static_cpu_has(X86_FEATURE_ZXPAUSE)) + use_zxpause_delay(); } /* diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c index 0e65d00e2339..3946badbd78f 100644 --- a/arch/x86/lib/delay.c +++ b/arch/x86/lib/delay.c @@ -117,6 +117,27 @@ static void delay_halt_tpause(u64 start, u64 cycles) __tpause(TPAUSE_C02_STATE, edx, eax); } +/* + * On ZHAOXIN the ZXPAUSE instruction waits until any of: + * 1) the delta of TSC counter exceeds the value provided in EDX:EAX + * 2) global timeout in ZX_PAUSE_CONTROL is exceeded + * 3) an external interrupt occurs + */ +static void delay_halt_zxpause(u64 unused, u64 cycles) +{ + u64 until = cycles; + u32 eax, edx; + + eax = lower_32_bits(until); + edx = upper_32_bits(until); + + /* + * Hard code the deeper (C0.1) sleep state because exit latency is + * small compared to the "microseconds" that usleep() will delay. + */ + __zxpause(ZXPAUSE_C01_STATE, edx, eax); +} + /* * On some AMD platforms, MWAITX has a configurable 32-bit timer, that * counts with TSC frequency. The input value is the number of TSC cycles @@ -183,6 +204,12 @@ void __init use_tpause_delay(void) delay_fn = delay_halt; } +void __init use_zxpause_delay(void) +{ + delay_halt_fn = delay_halt_zxpause; + delay_fn = delay_halt; +} + void use_mwaitx_delay(void) { delay_halt_fn = delay_halt_mwaitx; diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index 845a4023ba44..bc82ca1bb346 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -13,7 +13,7 @@ /* * Defines x86 CPU feature bits */ -#define NCAPINTS 21 /* N 32-bit words worth of info */ +#define NCAPINTS 23 /* N 32-bit words worth of info */ #define NBUGINTS 2 /* N 32-bit bug flags */ /* @@ -443,6 +443,9 @@ #define X86_FEATURE_AUTOIBRS (20*32+ 8) /* "" Automatic IBRS */ #define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* "" SMM_CTL MSR is not present */ +/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000006, word 23 */ +#define X86_FEATURE_ZXPAUSE (23*32 + 0) /* ZHAOXIN ZXPAUSE */ + /* * BUG word(s) */ diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h index fafe9be7a6f4..be3fef5e80ba 100644 --- a/tools/arch/x86/include/asm/disabled-features.h +++ b/tools/arch/x86/include/asm/disabled-features.h @@ -131,6 +131,9 @@ #define DISABLED_MASK18 0 #define DISABLED_MASK19 0 #define DISABLED_MASK20 0 -#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21) +#define DISABLED_MASK21 0 +#define DISABLED_MASK22 0 +#define DISABLED_MASK23 0 +#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 24) #endif /* _ASM_X86_DISABLED_FEATURES_H */ diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h index 1d111350197f..06c4386a620b 100644 --- a/tools/arch/x86/include/asm/msr-index.h +++ b/tools/arch/x86/include/asm/msr-index.h @@ -72,12 +72,23 @@ #define MSR_IA32_UMWAIT_CONTROL 0xe1 #define MSR_IA32_UMWAIT_CONTROL_C02_DISABLE BIT(0) #define MSR_IA32_UMWAIT_CONTROL_RESERVED BIT(1) + +#define MSR_ZX_PAUSE_CONTROL 0x187f +#define MSR_ZX_PAUSE_CONTROL_C02_DISABLE BIT(0) +#define MSR_ZX_PAUSE_CONTROL_RESERVED BIT(1) + /* * The time field is bit[31:2], but representing a 32bit value with * bit[1:0] zero. */ #define MSR_IA32_UMWAIT_CONTROL_TIME_MASK (~0x03U) +/* + * The time field is bit[31:2], but representing a 32bit value with + * bit[1:0] zero. + */ +#define MSR_ZX_PAUSE_CONTROL_TIME_MASK (~0x03U) + /* Abbreviated from Intel SDM name IA32_CORE_CAPABILITIES */ #define MSR_IA32_CORE_CAPS 0x000000cf #define MSR_IA32_CORE_CAPS_INTEGRITY_CAPS_BIT 2 diff --git a/tools/arch/x86/include/asm/required-features.h b/tools/arch/x86/include/asm/required-features.h index 7ba1726b71c7..6a3de575bec6 100644 --- a/tools/arch/x86/include/asm/required-features.h +++ b/tools/arch/x86/include/asm/required-features.h @@ -99,6 +99,9 @@ #define REQUIRED_MASK18 0 #define REQUIRED_MASK19 0 #define REQUIRED_MASK20 0 -#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21) +#define REQUIRED_MASK21 0 +#define REQUIRED_MASK22 0 +#define REQUIRED_MASK23 0 +#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 24) #endif /* _ASM_X86_REQUIRED_FEATURES_H */ -- Gitee From 197f59a3fb43c67bb0828c7d4d06e3d01103cf7f Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 8 Apr 2024 17:25:52 +0800 Subject: [PATCH 0864/2138] anolis: KVM: x86: Introduce support for Zhaoxin ZXPAUSE instruction ANBZ: #7809 This patch introduces support for the ZXPAUSE instruction, a new addition akin to Intel's TPAUSE. Two primary distinctions set apart ZXPAUSE from TPAUSE: 1. ZXPAUSE utilizes a delta tsc, determined from the lesser value between (MSR_ZX_PAUSE_CONTROL[31:2] << 2) and the EDX:EAX input to the ZXPAUSE instruction, subtracted from the current tsc value. In contrast, TPAUSE employs a target tsc, computed from the lesser value between (MSR_IA32_UMWAIT_CONTROL[31:2] << 2) and the EDX:EAX input to the TPAUSE instruction. 2. As of now, ZXPAUSE exclusively supports the C0.1 optimization state, whereas TPAUSE potentially extends support to both C0.1 and C0.2. Successful integration of this patch hinges on QEMU's backing for ZXPAUSE, a contribution we're currently forwarding to QEMU. It also requires the preceding patch in this patchset, which offers Linux kernel support for ZXPAUSE. The choice of the name "vmx->msr_ia32_umwait_control" is deliberate. In patches for other Linux versions (e.g., 5.5), a "vmx->msr_ia32_umwait_control" already exists. By sharing this variable name with Intel, it ensures compatibility. The difference is merely software-based and poses no real-world conflicts. Currently, if the Guest writes to the ZXPAUSE/TPAUSE CONTROL MSR, we simply bypass the WRMSR instruction. If the Guest attempts to use ZXPAUSE/TPAUSE to transition the vCPU into an optimized state, it will succeed, with the duration of the optimized state being the value passed in EDX:EAX. Of course, this state can be interrupted by external interrupts and other events specified in the specification. Signed-off-by: leoliu-oc Reviewed-by: Guanjun Link: https://gitee.com/anolis/cloud-kernel/pulls/2710 --- arch/x86/include/asm/msr-index.h | 7 +++ arch/x86/include/asm/vmx.h | 7 +++ arch/x86/include/asm/vmxfeatures.h | 6 ++- arch/x86/kernel/cpu/feat_ctl.c | 10 ++++ arch/x86/kvm/cpuid.c | 12 ++++- arch/x86/kvm/reverse_cpuid.h | 1 + arch/x86/kvm/vmx/capabilities.h | 7 +++ arch/x86/kvm/vmx/vmcs.h | 2 + arch/x86/kvm/vmx/vmx.c | 68 +++++++++++++++++++++++++- arch/x86/kvm/vmx/vmx.h | 19 +++++++ arch/x86/kvm/x86.c | 6 ++- tools/arch/x86/include/asm/msr-index.h | 7 +++ 12 files changed, 147 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 4df030ee0e23..edb0f0a2c57b 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -785,6 +785,13 @@ #define MSR_VIA_RNG 0x0000110b #define MSR_VIA_BCR2 0x00001147 +/* + * Zhaoxin extend VMCS capabilities: + * bit 0: exec-cntl3 VMCS field. + */ +#define MSR_ZX_EXT_VMCS_CAPS 0x1675 +#define MSR_ZX_VMCS_EXEC_CTL3 BIT(0) + /* Transmeta defined MSRs */ #define MSR_TMTA_LONGRUN_CTRL 0x80868010 #define MSR_TMTA_LONGRUN_FLAGS 0x80868011 diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 0e73616b82f3..3a4f60f19de3 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -84,6 +84,11 @@ */ #define TERTIARY_EXEC_IPI_VIRT VMCS_CONTROL_BIT(IPI_VIRT) +/* + * Definitions of Zhaoxin Tertiary Processor-Based VM-Execution Controls. + */ +#define ZX_TERTIARY_EXEC_GUEST_ZXPAUSE VMCS_CONTROL_BIT(GUEST_ZXPAUSE) + #define PIN_BASED_EXT_INTR_MASK VMCS_CONTROL_BIT(INTR_EXITING) #define PIN_BASED_NMI_EXITING VMCS_CONTROL_BIT(NMI_EXITING) #define PIN_BASED_VIRTUAL_NMIS VMCS_CONTROL_BIT(VIRTUAL_NMIS) @@ -235,6 +240,7 @@ enum vmcs_field { TERTIARY_VM_EXEC_CONTROL_HIGH = 0x00002035, PID_POINTER_TABLE = 0x00002042, PID_POINTER_TABLE_HIGH = 0x00002043, + ZXPAUSE_VMEXIT_TSC = 0x00002200, GUEST_PHYSICAL_ADDRESS = 0x00002400, GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401, VMCS_LINK_POINTER = 0x00002800, @@ -284,6 +290,7 @@ enum vmcs_field { PLE_GAP = 0x00004020, PLE_WINDOW = 0x00004022, NOTIFY_WINDOW = 0x00004024, + ZX_TERTIARY_VM_EXEC_CONTROL = 0x00004200, VM_INSTRUCTION_ERROR = 0x00004400, VM_EXIT_REASON = 0x00004402, VM_EXIT_INTR_INFO = 0x00004404, diff --git a/arch/x86/include/asm/vmxfeatures.h b/arch/x86/include/asm/vmxfeatures.h index c6a7eed03914..ba209bdf57d9 100644 --- a/arch/x86/include/asm/vmxfeatures.h +++ b/arch/x86/include/asm/vmxfeatures.h @@ -5,7 +5,7 @@ /* * Defines VMX CPU feature bits */ -#define NVMXINTS 5 /* N 32-bit words worth of info */ +#define NVMXINTS 6 /* N 32-bit words worth of info */ /* * Note: If the comment begins with a quoted string, that string is used @@ -89,4 +89,8 @@ /* Tertiary Processor-Based VM-Execution Controls, word 3 */ #define VMX_FEATURE_IPI_VIRT ( 3*32+ 4) /* Enable IPI virtualization */ + +/* Zhaoxin Tertiary Processor-Based VM-Execution Controls, word 4 */ +#define VMX_FEATURE_GUEST_ZXPAUSE (4*32 + 0) /* zxpause instruction in guest mode */ + #endif /* _ASM_X86_VMXFEATURES_H */ diff --git a/arch/x86/kernel/cpu/feat_ctl.c b/arch/x86/kernel/cpu/feat_ctl.c index 03851240c3e3..3e0fbf510f1c 100644 --- a/arch/x86/kernel/cpu/feat_ctl.c +++ b/arch/x86/kernel/cpu/feat_ctl.c @@ -17,6 +17,7 @@ enum vmx_feature_leafs { SECONDARY_CTLS, TERTIARY_CTLS_LOW, TERTIARY_CTLS_HIGH, + ZX_TERTIARY_CTLS, NR_VMX_FEATURE_WORDS, }; @@ -97,6 +98,15 @@ static void init_vmx_capabilities(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_EPT_AD); if (c->vmx_capability[MISC_FEATURES] & VMX_F(VPID)) set_cpu_cap(c, X86_FEATURE_VPID); + /* + * Initialize Zhaoxin Tertiary Exec Control feature flags. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) { + rdmsr_safe(MSR_ZX_EXT_VMCS_CAPS, &supported, &ign); + if (supported & MSR_ZX_VMCS_EXEC_CTL3) + c->vmx_capability[ZX_TERTIARY_CTLS] |= VMX_F(GUEST_ZXPAUSE); + } } #endif /* CONFIG_X86_VMX_FEATURE_NAMES */ diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 4b7b41ebb5c9..a9f265216582 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -812,6 +812,9 @@ void kvm_set_cpu_caps(void) F(PMM) | F(PMM_EN) ); + /* Zhaoxin 0xC0000006 leaf */ + kvm_cpu_cap_mask(CPUID_C000_0006_EAX, 0 /* bit0: zxpause */ | 0 /* bit1 HMAC */); + /* * Hide RDTSCP and RDPID if either feature is reported as supported but * probing MSR_TSC_AUX failed. This is purely a sanity check and @@ -1325,17 +1328,22 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) } /*Add support for Centaur's CPUID instruction*/ case 0xC0000000: - /*Just support up to 0xC0000004 now*/ - entry->eax = min(entry->eax, 0xC0000004); + /* Extended to 0xC0000006 */ + entry->eax = min(entry->eax, 0xC0000006); break; case 0xC0000001: cpuid_entry_override(entry, CPUID_C000_0001_EDX); break; + case 0xC0000006: + cpuid_entry_override(entry, CPUID_C000_0006_EAX); + break; + case 3: /* Processor serial number */ case 5: /* MONITOR/MWAIT */ case 0xC0000002: case 0xC0000003: case 0xC0000004: + case 0xC0000005: default: entry->eax = entry->ebx = entry->ecx = entry->edx = 0; break; diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h index 2f4e155080ba..cd2d6abe4762 100644 --- a/arch/x86/kvm/reverse_cpuid.h +++ b/arch/x86/kvm/reverse_cpuid.h @@ -90,6 +90,7 @@ static const struct cpuid_reg reverse_cpuid[] = { [CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX}, [CPUID_8000_0022_EAX] = {0x80000022, 0, CPUID_EAX}, [CPUID_7_2_EDX] = { 7, 2, CPUID_EDX}, + [CPUID_C000_0006_EAX] = {0xc0000006, 0, CPUID_EAX}, }; /* diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h index 41a4533f9989..631e65a21228 100644 --- a/arch/x86/kvm/vmx/capabilities.h +++ b/arch/x86/kvm/vmx/capabilities.h @@ -60,6 +60,7 @@ struct vmcs_config { u32 pin_based_exec_ctrl; u32 cpu_based_exec_ctrl; u32 cpu_based_2nd_exec_ctrl; + u32 zx_cpu_based_3rd_exec_ctrl; u64 cpu_based_3rd_exec_ctrl; u32 vmexit_ctrl; u32 vmentry_ctrl; @@ -255,6 +256,12 @@ static inline bool cpu_has_vmx_xsaves(void) SECONDARY_EXEC_ENABLE_XSAVES; } +static inline bool cpu_has_vmx_zxpause(void) +{ + return vmcs_config.zx_cpu_based_3rd_exec_ctrl & + ZX_TERTIARY_EXEC_GUEST_ZXPAUSE; +} + static inline bool cpu_has_vmx_waitpkg(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & diff --git a/arch/x86/kvm/vmx/vmcs.h b/arch/x86/kvm/vmx/vmcs.h index 7c1996b433e2..4eabed8e5813 100644 --- a/arch/x86/kvm/vmx/vmcs.h +++ b/arch/x86/kvm/vmx/vmcs.h @@ -50,7 +50,9 @@ struct vmcs_controls_shadow { u32 pin; u32 exec; u32 secondary_exec; + u32 zx_tertiary_exec; u64 tertiary_exec; + u64 zx_vmexit_tsc; }; /* diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 88f4443ef775..a21bff07374c 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -218,6 +218,8 @@ int __read_mostly pt_mode = PT_MODE_SYSTEM; module_param(pt_mode, int, S_IRUGO); #endif +static u32 zx_ext_vmcs_cap; + static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush); static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond); static DEFINE_MUTEX(vmx_l1d_flush_mutex); @@ -2019,7 +2021,11 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_UMWAIT_CONTROL: if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx)) return 1; - + msr_info->data = vmx->msr_ia32_umwait_control; + break; + case MSR_ZX_PAUSE_CONTROL: + if (!msr_info->host_initiated && !vmx_guest_zxpause_enabled(vmx)) + return 1; msr_info->data = vmx->msr_ia32_umwait_control; break; case MSR_IA32_SPEC_CTRL: @@ -2279,7 +2285,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) /* The reserved bit 1 and non-32 bit [63:32] should be zero */ if (data & (BIT_ULL(1) | GENMASK_ULL(63, 32))) return 1; + vmx->msr_ia32_umwait_control = data; + break; + case MSR_ZX_PAUSE_CONTROL: + if (!msr_info->host_initiated && !vmx_guest_zxpause_enabled(vmx)) + return 1; + /* The reserved bit 1 and non-32 bit [63:32] should be zero */ + if (data & (BIT_ULL(1) | GENMASK_ULL(63, 32))) + return 1; vmx->msr_ia32_umwait_control = data; break; case MSR_IA32_SPEC_CTRL: @@ -2737,6 +2751,10 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf, vmcs_conf->vmentry_ctrl = _vmentry_control; vmcs_conf->misc = misc_msr; + /* Setup Zhaoxin exec-cntl3 VMCS field. */ + if (zx_ext_vmcs_cap & MSR_ZX_VMCS_EXEC_CTL3) + vmcs_conf->zx_cpu_based_3rd_exec_ctrl |= ZX_TERTIARY_EXEC_GUEST_ZXPAUSE; + #if IS_ENABLED(CONFIG_HYPERV) if (enlightened_vmcs) evmcs_sanitize_exec_ctrls(vmcs_conf); @@ -4530,6 +4548,28 @@ static u64 vmx_tertiary_exec_control(struct vcpu_vmx *vmx) return exec_control; } +static u32 vmx_zx_tertiary_exec_control(struct vcpu_vmx *vmx) +{ + struct kvm_vcpu *vcpu = &vmx->vcpu; + u32 exec_control = vmcs_config.zx_cpu_based_3rd_exec_ctrl; + + /* + * Show errors if Qemu wants to enable guest_zxpause while + * vmx not support it. + */ + if (guest_cpuid_has(vcpu, X86_FEATURE_ZXPAUSE)) { + if (!cpu_has_vmx_zxpause()) + pr_err("VMX not support guest_zxpause!\n"); + else + exec_control |= ZX_TERTIARY_EXEC_GUEST_ZXPAUSE; + } else + exec_control &= ~ZX_TERTIARY_EXEC_GUEST_ZXPAUSE; + + /* enable other features here */ + + return exec_control; +} + /* * Adjust a single secondary execution control bit to intercept/allow an * instruction in the guest. This is usually done based on whether or not a @@ -4736,6 +4776,11 @@ static void init_vmcs(struct vcpu_vmx *vmx) if (cpu_has_secondary_exec_ctrls()) secondary_exec_controls_set(vmx, vmx_secondary_exec_control(vmx)); + if (zx_ext_vmcs_cap & MSR_ZX_VMCS_EXEC_CTL3) { + zx_tertiary_exec_controls_set(vmx, vmx_zx_tertiary_exec_control(vmx)); + zx_vmexit_tsc_controls_set(vmx, 0); + } + if (cpu_has_tertiary_exec_ctrls()) tertiary_exec_controls_set(vmx, vmx_tertiary_exec_control(vmx)); @@ -6270,6 +6315,13 @@ void dump_vmcs(struct kvm_vcpu *vcpu) else tertiary_exec_control = 0; + pr_err("*** Zhaoxin Specific Fields ***\n"); + if (zx_ext_vmcs_cap & MSR_ZX_VMCS_EXEC_CTL3) { + pr_err("Zhaoxin TertiaryExec Cntl = 0x%016x\n", + vmcs_read32(ZX_TERTIARY_VM_EXEC_CONTROL)); + pr_err("ZXPAUSE Saved TSC = 0x%016llx\n", vmcs_read64(ZXPAUSE_VMEXIT_TSC)); + } + pr_err("VMCS %p, last attempted VM-entry on CPU %d\n", vmx->loaded_vmcs->vmcs, vcpu->arch.last_vmentry_cpu); pr_err("*** Guest State ***\n"); @@ -7797,6 +7849,11 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) vmcs_set_secondary_exec_control(vmx, vmx_secondary_exec_control(vmx)); + if (zx_ext_vmcs_cap & MSR_ZX_VMCS_EXEC_CTL3) { + zx_tertiary_exec_controls_set(vmx, vmx_zx_tertiary_exec_control(vmx)); + zx_vmexit_tsc_controls_set(vmx, 0); + } + if (guest_can_use(vcpu, X86_FEATURE_VMX)) vmx->msr_ia32_feature_control_valid_bits |= FEAT_CTL_VMX_ENABLED_INSIDE_SMX | @@ -7947,6 +8004,9 @@ static __init void vmx_set_cpu_caps(void) if (cpu_has_vmx_waitpkg()) kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG); + + if (cpu_has_vmx_zxpause()) + kvm_cpu_cap_check_and_set(X86_FEATURE_ZXPAUSE); } static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu) @@ -8516,6 +8576,12 @@ static __init int hardware_setup(void) unsigned long host_bndcfgs; struct desc_ptr dt; int r; + u32 ign; + + /* Caches Zhaoxin extend VMCS capabilities. */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) + rdmsr_safe(MSR_ZX_EXT_VMCS_CAPS, &zx_ext_vmcs_cap, &ign); store_idt(&dt); host_idt_base = dt.address; diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index cbbe5122cfa6..d5b654770d4b 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -581,6 +581,17 @@ static inline u8 vmx_get_rvi(void) #define KVM_OPTIONAL_VMX_TERTIARY_VM_EXEC_CONTROL \ (TERTIARY_EXEC_IPI_VIRT) +#define KVM_REQUIRED_VMX_ZX_TERTIARY_VM_EXEC_CONTROL 0 +#define KVM_OPTIONAL_VMX_ZX_TERTIARY_VM_EXEC_CONTROL \ + (ZX_TERTIARY_EXEC_GUEST_ZXPAUSE) + +/* + * We shouldn't rw zxpause_vmexit_tsc vmcs field in this + * way, try to use another way in the future. + */ +#define KVM_REQUIRED_VMX_ZXPAUSE_VMEXIT_TSC 0 +#define KVM_OPTIONAL_VMX_ZXPAUSE_VMEXIT_TSC 1 + #define BUILD_CONTROLS_SHADOW(lname, uname, bits) \ static inline void lname##_controls_set(struct vcpu_vmx *vmx, u##bits val) \ { \ @@ -613,6 +624,8 @@ BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL, 32) BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL, 32) BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL, 32) BUILD_CONTROLS_SHADOW(tertiary_exec, TERTIARY_VM_EXEC_CONTROL, 64) +BUILD_CONTROLS_SHADOW(zx_tertiary_exec, ZX_TERTIARY_VM_EXEC_CONTROL, 32) +BUILD_CONTROLS_SHADOW(zx_vmexit_tsc, ZXPAUSE_VMEXIT_TSC, 64) /* * VMX_REGS_LAZY_LOAD_SET - The set of registers that will be updated in the @@ -715,6 +728,12 @@ static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx) SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; } +static inline bool vmx_guest_zxpause_enabled(struct vcpu_vmx *vmx) +{ + return zx_tertiary_exec_controls_get(vmx) & + ZX_TERTIARY_EXEC_GUEST_ZXPAUSE; +} + static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu) { if (!enable_ept) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 676efdcefaeb..804dfa0c3202 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1462,8 +1462,8 @@ static const u32 msrs_to_save_base[] = { MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B, MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B, MSR_IA32_UMWAIT_CONTROL, - MSR_IA32_XFD, MSR_IA32_XFD_ERR, + MSR_ZX_PAUSE_CONTROL, }; static const u32 msrs_to_save_pmu[] = { @@ -7173,6 +7173,10 @@ static void kvm_probe_msr_to_save(u32 msr_index) if (!kvm_cpu_cap_has(X86_FEATURE_WAITPKG)) return; break; + case MSR_ZX_PAUSE_CONTROL: + if (!kvm_cpu_cap_has(X86_FEATURE_ZXPAUSE)) + return; + break; case MSR_IA32_RTIT_CTL: case MSR_IA32_RTIT_STATUS: if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h index 06c4386a620b..3456f6deca51 100644 --- a/tools/arch/x86/include/asm/msr-index.h +++ b/tools/arch/x86/include/asm/msr-index.h @@ -764,6 +764,13 @@ #define MSR_TMTA_LRTI_READOUT 0x80868018 #define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a +/* + * Zhaoxin extend VMCS capabilities: + * bit 0: exec-cntl3 VMCS field. + */ +#define MSR_ZX_EXT_VMCS_CAPS 0x1675 +#define MSR_ZX_VMCS_EXEC_CTL3 BIT(0) + /* Intel defined MSRs. */ #define MSR_IA32_P5_MC_ADDR 0x00000000 #define MSR_IA32_P5_MC_TYPE 0x00000001 -- Gitee From 391bcbda719398ea23563acac815440bb478d643 Mon Sep 17 00:00:00 2001 From: Kaihao Bai Date: Tue, 14 Nov 2023 15:23:06 +0800 Subject: [PATCH 0865/2138] anolis: arm64: reposition mapping flag to make them visible ANBZ: #8540 If splitting linear mapping is enabled, mapping flags can be reused in other places. Thus they should be repositioned to support dynamicly splitting kernel page table. Signed-off-by: Kaihao Bai Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2885 --- arch/arm64/include/asm/mmu.h | 4 ++++ arch/arm64/mm/mmu.c | 4 ---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 94b68850cb9f..41e393e5c7fd 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -12,6 +12,10 @@ #define USER_ASID_FLAG (UL(1) << USER_ASID_BIT) #define TTBR_ASID_MASK (UL(0xffff) << 48) +#define NO_BLOCK_MAPPINGS BIT(0) +#define NO_CONT_MAPPINGS BIT(1) +#define NO_EXEC_MAPPINGS BIT(2) /* assumes FEAT_HPDS is not used */ + #ifndef __ASSEMBLY__ #include diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 3e26d444569e..1e637c1e65eb 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -41,10 +41,6 @@ #include #include -#define NO_BLOCK_MAPPINGS BIT(0) -#define NO_CONT_MAPPINGS BIT(1) -#define NO_EXEC_MAPPINGS BIT(2) /* assumes FEAT_HPDS is not used */ - int idmap_t0sz __ro_after_init; #if VA_BITS > 48 -- Gitee From ff0f0e16444f2a96a2ce80cd1da125c1b6e87b15 Mon Sep 17 00:00:00 2001 From: Kaihao Bai Date: Tue, 14 Nov 2023 15:30:29 +0800 Subject: [PATCH 0866/2138] anolis: arm64: add page table entry tlb flush helper ANBZ: #8540 Current tlb flush helper __flush_tlb_kernel_pgtable() would flush all entries including non-leaf PUD/PMD, which was not needed. Thus add new tlbflush helper that only flushes the leaf PUD/PMD/PTE entry. Signed-off-by: Kaihao Bai Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2885 --- arch/arm64/include/asm/tlbflush.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index b73baaf8ae47..557a8fdaf6c3 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -229,6 +229,10 @@ static inline unsigned long get_trans_granule(void) * determined by 'stride' and only affect any walk-cache entries * if 'last_level' is equal to false. * + * __flush_tlb_kernel_pgtable_entry(addr) + * Invalidate a single kernel mapping for address "addr" on all + * CPUs. Must be called if the corresponding page table is + * last_level entry. * * Finally, take a look at asm/tlb.h to see how tlb_flush() is implemented * on top of these routines, since that is our interface to the mmu_gather @@ -481,6 +485,20 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr) dsb(ish); isb(); } + +/* + * Used to invalidate the TLB entries to the last level page table + * (pud/pmd/pte). + */ +static inline void __flush_tlb_kernel_pgtable_entry(unsigned long kaddr) +{ + unsigned long addr = __TLBI_VADDR(kaddr, 0); + + dsb(ishst); + __tlbi(vaale1is, addr); + dsb(ish); + isb(); +} #endif #endif -- Gitee From f87e17923a67e2ea9c938d8fc217ef6a12f10f98 Mon Sep 17 00:00:00 2001 From: Kaihao Bai Date: Wed, 15 Nov 2023 15:14:56 +0800 Subject: [PATCH 0867/2138] anolis: arm64: support splitting the block mapping kernel page table ANBZ: #8540 When the linear mapping range adopts block or contiguous mapping, the kernel can no longer support to modify the attribute of kernel page table in PTE granularity. Scenarios like RODATA/KFENCE/Crash kernel/Memory failure needs the capability to modify the page table attribute in PTE granularity to ensure the correctness and reliability. However, if the linear mapping range is mapped by PTE level, the ratio of TLB miss would increase obviously and the performance of programs would decrease when accessing the linear mapping areas. Currently, the performance and the attribute control of PTE level are incompatible. In practice, changing the page table attributes of PTE granularity is a low-frequency and occasional behavior, which gives us the opportunity to selectively split the block/contiguous mapping into PTE granularity to achieve both requirements above. This patch provides splitting the block/contiguous mapping dynamicly. To avoid the behavior of TLB prefetch, the process of splitting follows break-before-make principle to avoid TLB conflict. Meanwhile, if cleared the kernel page table entry, it is necessary to avoid accessing corresponding address that might be cleared before. In the system boot stage, only one CPU is working. There is no need to consider other CPUs access the cleared range simultaneously. In addition, when initializing the kernel page table, we should avoid the situation that the physical range that the cleared page table entry pointed to contains the physical address of the page table entry. It would cause a kernel page fault if dynamically splitting the page table entry later. Signed-off-by: Kaihao Bai Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2885 --- arch/arm64/include/asm/mmu.h | 1 + arch/arm64/include/asm/pgtable.h | 9 + arch/arm64/mm/mmu.c | 353 ++++++++++++++++++++++++++++++- 3 files changed, 359 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 41e393e5c7fd..b290d04db44b 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -76,6 +76,7 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot); extern void mark_linear_text_alias_ro(void); extern bool kaslr_requires_kpti(void); +extern void split_linear_mapping(unsigned long virt, phys_addr_t size, pgprot_t prot); #define INIT_MM_CONTEXT(name) \ .pgd = init_pg_dir, diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 07bdf5dd8ebe..a0a07932d6e6 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -256,6 +256,11 @@ static inline pmd_t pmd_mkcont(pmd_t pmd) return __pmd(pmd_val(pmd) | PMD_SECT_CONT); } +static inline pmd_t pmd_mknoncont(pmd_t pmd) +{ + return __pmd(pmd_val(pmd) & ~PMD_SECT_CONT); +} + static inline pte_t pte_mkdevmap(pte_t pte) { return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL)); @@ -491,6 +496,7 @@ static inline int pmd_trans_huge(pmd_t pmd) #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) +#define pmd_exec(pmd) (!(pmd_val(pmd) & PMD_TABLE_PXN)) static inline pmd_t pmd_mkinvalid(pmd_t pmd) { @@ -685,6 +691,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) #define pud_valid(pud) pte_valid(pud_pte(pud)) #define pud_user(pud) pte_user(pud_pte(pud)) #define pud_user_exec(pud) pte_user_exec(pud_pte(pud)) +#define pud_exec(pud) (!(pud_val(pud) & PUD_TABLE_PXN)) static inline void set_pud(pud_t *pudp, pud_t pud) { @@ -752,6 +759,7 @@ static inline pmd_t *pud_pgtable(pud_t pud) #define p4d_none(p4d) (!p4d_val(p4d)) #define p4d_bad(p4d) (!(p4d_val(p4d) & 2)) #define p4d_present(p4d) (p4d_val(p4d)) +#define p4d_exec(p4d) (!(p4d_val(p4d) & P4D_TABLE_PXN)) static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) { @@ -798,6 +806,7 @@ static inline pud_t *p4d_pgtable(p4d_t p4d) #define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;}) /* Match pud_offset folding in */ +#define pud_offset_phys(dir, addr) NULL #define pud_set_fixmap(addr) NULL #define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp) #define pud_clear_fixmap() diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 1e637c1e65eb..6474a2945f8a 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -165,6 +165,49 @@ bool pgattr_change_is_safe(u64 old, u64 new) return ((old ^ new) & ~mask) == 0; } +/* + * If the physical address of block-mapping pud/pmd or contiguous mapping pmd/pte + * entry is located in the physical range it points to, clearing the entry would + * cause the corresponding physcial range can not be accessed any longer. The + * remapping process of this range can not be done because of inaccessible. + * For this case, it should be mapped with PTE level when initializing the page + * table. + */ +static bool should_clear_cont_pte(pmd_t *pmdp, unsigned long addr, phys_addr_t phys) +{ + phys_addr_t pa = pte_offset_phys(pmdp, addr); + + return (pa >> CONT_PTE_SHIFT) == (phys >> CONT_PTE_SHIFT); +} + +static bool should_clear_cont_pmd(pud_t *pudp, unsigned long addr, phys_addr_t phys) +{ + phys_addr_t pa = pmd_offset_phys(pudp, addr); + + return (pa >> CONT_PMD_SHIFT) == (phys >> CONT_PMD_SHIFT); +} + +static bool should_split_pmd(pud_t *pudp, unsigned long addr, phys_addr_t phys) +{ + phys_addr_t pa = pmd_offset_phys(pudp, addr); + + return (pa >> PMD_SHIFT) == (phys >> PMD_SHIFT); +} + +#ifndef __PAGETABLE_PUD_FOLDED +static bool should_split_pud(p4d_t *p4dp, unsigned long addr, phys_addr_t phys) +{ + phys_addr_t pa = pud_offset_phys(p4dp, addr); + + return (pa >> PUD_SHIFT) == (phys >> PUD_SHIFT); +} +#else +static bool should_split_pud(p4d_t *p4dp, unsigned long addr, phys_addr_t phys) +{ + return false; +} +#endif + static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end, phys_addr_t phys, pgprot_t prot) { @@ -219,7 +262,8 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr, /* use a contiguous mapping if the range is suitably aligned */ if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) && - (flags & NO_CONT_MAPPINGS) == 0) + (flags & NO_CONT_MAPPINGS) == 0 && + !should_clear_cont_pte(pmdp, addr, phys)) __prot = __pgprot(pgprot_val(prot) | PTE_CONT); init_pte(pmdp, addr, next, phys, __prot); @@ -236,6 +280,14 @@ static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end, pmd_t *pmdp; pmdp = pmd_set_fixmap_offset(pudp, addr); + /* + * the physical address of PMDs with contiguous flag might locate in the + * physical range they point to. Thus clear the CONT flag earlier to + * avoid inaccessiable situation. + */ + if (should_clear_cont_pmd(pudp, addr, phys)) + prot = __pgprot(pgprot_val(prot) & ~PTE_CONT); + do { pmd_t old_pmd = READ_ONCE(*pmdp); @@ -243,7 +295,8 @@ static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end, /* try section mapping first */ if (((addr | next | phys) & ~PMD_MASK) == 0 && - (flags & NO_BLOCK_MAPPINGS) == 0) { + (flags & NO_BLOCK_MAPPINGS) == 0 && + !should_split_pmd(pudp, addr, phys)) { pmd_set_huge(pmdp, phys, prot); /* @@ -336,11 +389,14 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, next = pud_addr_end(addr, end); /* - * For 4K granule only, attempt to put down a 1GB block + * For 4K granule only, attempt to put down a 1GB block. If the + * physical address of pudp is included in the range where + * itself points to, split the block of pudp earlier. */ if (pud_sect_supported() && ((addr | next | phys) & ~PUD_MASK) == 0 && - (flags & NO_BLOCK_MAPPINGS) == 0) { + (flags & NO_BLOCK_MAPPINGS) == 0 && + !should_split_pud(p4dp, addr, phys)) { pud_set_huge(pudp, phys, prot); /* @@ -1486,3 +1542,292 @@ void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte { set_pte_at(vma->vm_mm, addr, ptep, pte); } + +static void clear_cont_pte_mapping(pmd_t *pmdp, unsigned long addr, + unsigned long end) +{ + pte_t *ptep, *sptep, pte; + unsigned long saddr, next; + int i; + + /* + * Clear the CONT flag of ptes at the input range. CONT flag should be + * cleared at the granularity of CONT_PTE. + */ + addr &= CONT_PTE_MASK; + if (end & ~CONT_PTE_MASK) + end = (end + CONT_PTE_SIZE) & CONT_PTE_MASK; + + do { + pgprot_t prot; + unsigned long pfn; + + saddr = addr; + next = pte_cont_addr_end(addr, end); + ptep = pte_offset_kernel(pmdp, addr); + pte = READ_ONCE(*ptep); + + if (pte_none(pte)) + continue; + + if (pte_cont(READ_ONCE(*ptep))) { + sptep = ptep; + prot = pte_pgprot(pte_mknoncont(pte)); + pfn = pte_pfn(pte); + + /* + * Changing the bit of contiguous entries requires to + * follow Break-Before-Make approach. See ARM DDI + * 0487A.k_iss10775, "Misprogramming of the Contiguous bit", + * page D4-1762. + */ + for (i = 0; i < CONT_PTES; i++, ptep++) + pte_clear(&init_mm, addr, ptep); + + for (i = 0; i < CONT_PTES; i++, saddr += PAGE_SIZE) + __flush_tlb_kernel_pgtable_entry(saddr); + + for (i = 0; i < CONT_PTES; i++, sptep++, pfn++) + set_pte(sptep, pfn_pte(pfn, prot)); + } + } while (addr = next, addr < end); +} + +static void clear_cont_pmd_mapping(pud_t *pudp, unsigned long addr, + unsigned long end) +{ + pmd_t *pmdp, *spmdp, pmd; + unsigned long saddr, next; + int i; + + addr &= CONT_PMD_MASK; + if (end & ~CONT_PMD_MASK) + end = (end + CONT_PMD_SIZE) & CONT_PMD_MASK; + + do { + pgprot_t prot; + unsigned long pfn, pfn_offset = PMD_SIZE >> PAGE_SHIFT; + + saddr = addr; + next = pmd_cont_addr_end(addr, end); + pmdp = pmd_offset(pudp, addr); + pmd = READ_ONCE(*pmdp); + + if (pmd_none(pmd)) + continue; + + WARN_ON(!pmd_present(pmd)); + + if (pte_cont(pmd_pte(pmd))) { + spmdp = pmdp; + prot = pte_pgprot(pmd_pte(pmd_mknoncont(pmd))); + pfn = pmd_pfn(pmd); + + for (i = 0; i < CONT_PMDS; i++, pmdp++) + pmd_clear(pmdp); + + for (i = 0; i < CONT_PMDS; i++, saddr += PMD_SIZE) + __flush_tlb_kernel_pgtable_entry(saddr); + + for (i = 0; i < CONT_PMDS; i++, spmdp++, pfn += pfn_offset) + set_pmd(spmdp, pfn_pmd(pfn, prot)); + } + } while (addr = next, addr < end); +} + +static void split_pmd_mapping(pud_t *pudp, unsigned long addr, unsigned long end, + pgprot_t prot, int flags) +{ + pmd_t *pmdp, pmd, split_pmd; + unsigned long next; + int new_flags = 0; + + /* + * Clear the contiguous pmd if there is any splitting request located in + * the corresponding range. + */ + if (flags & NO_CONT_MAPPINGS) + clear_cont_pmd_mapping(pudp, addr, end); + + do { + next = pmd_addr_end(addr, end); + pmdp = pmd_offset(pudp, addr); + pmd = READ_ONCE(*pmdp); + + if (pmd_none(pmd)) + continue; + + WARN_ON(!pmd_present(pmd)); + + if (!pmd_exec(pmd)) + flags |= NO_EXEC_MAPPINGS; + + if (pmd_sect(pmd)) { + phys_addr_t phys, pte_phys; + pgprot_t orig_prot; + + phys = __virt_to_phys(addr); + + /* + * Get the original protections except PMD_SECT. + */ + orig_prot = __pgprot(pgprot_val(pte_pgprot(pmd_pte(pmd))) | + PMD_TYPE_TABLE); + + /* + * Allocate a new pmd page to re-initialize + * corresponding ptes. + */ + pte_phys = pgd_pgtable_alloc(PAGE_SHIFT); + split_pmd = pfn_pmd(__phys_to_pfn(pte_phys), orig_prot); + + /* + * If addr/next is not PMD aligned, create contiguous + * mapping at the rest of specific split range. + */ + if (addr & ~PMD_MASK) + alloc_init_cont_pte(&split_pmd, addr & PMD_MASK, addr, + phys & PMD_MASK, prot, + pgd_pgtable_alloc, new_flags); + if (next & ~PMD_MASK) + alloc_init_cont_pte(&split_pmd, next, + (next + PMD_SIZE) & PMD_MASK, + phys + next - addr, prot, + pgd_pgtable_alloc, new_flags); + + alloc_init_cont_pte(&split_pmd, addr, next, phys, prot, + pgd_pgtable_alloc, flags); + + /* + * Obey the break-before-make rule to split the page + * table, otherwise it might trigger CONSTRAINED + * UNPREDICTABLE behaviors because TLB conflict. Thus + * clear the original pmd entry and flush it, then set + * the newly allocated pmd page. + */ + pmd_clear(pmdp); + __flush_tlb_kernel_pgtable_entry(addr); + set_pmd(pmdp, split_pmd); + } else { + clear_cont_pte_mapping(pmdp, addr, next); + } + } while (addr = next, addr < end); +} + +static void split_pud_mapping(p4d_t *p4dp, unsigned long addr, unsigned long end, + pgprot_t prot, int flags) +{ + pud_t *pudp, pud, split_pud; + unsigned long next; + int new_flags = 0; + + do { + next = pud_addr_end(addr, end); + pudp = pud_offset(p4dp, addr); + pud = READ_ONCE(*pudp); + + if (pud_none(pud)) + continue; + + WARN_ON(!pud_present(pud)); + + if (!pud_exec(pud)) + flags |= NO_EXEC_MAPPINGS; + + if (pud_sect(pud)) { + phys_addr_t phys, pmd_phys; + pgprot_t orig_prot; + + phys = __virt_to_phys(addr); + + orig_prot = __pgprot(pgprot_val(pte_pgprot(pud_pte(pud))) | + PUD_TYPE_TABLE); + + pmd_phys = pgd_pgtable_alloc(PMD_SHIFT); + split_pud = pfn_pud(__phys_to_pfn(pmd_phys), orig_prot); + + /* + * If addr/next is not PUD aligned, create block and + * contiguous mapping at the rest of specific split range. + */ + if (addr & ~PUD_MASK) + alloc_init_cont_pmd(&split_pud, addr & PUD_MASK, + addr, phys & PUD_MASK, + prot, pgd_pgtable_alloc, new_flags); + if (next & ~PUD_MASK) + alloc_init_cont_pmd(&split_pud, next, + (next + PUD_SIZE) & PUD_MASK, + phys + next - addr, + prot, pgd_pgtable_alloc, new_flags); + + alloc_init_cont_pmd(&split_pud, addr, next, phys, prot, + pgd_pgtable_alloc, flags); + + /* + * Obey the break-before-make rule to split the page + * table, otherwise it might trigger CONSTRAINED + * UNPREDICTABLE behaviors because TLB conflict. Thus + * clear the original pud entry and flush it, then set + * the newly allocated pud page. + */ + pud_clear(pudp); + __flush_tlb_kernel_pgtable_entry(addr); + set_pud(pudp, split_pud); + } else { + split_pmd_mapping(pudp, addr, next, prot, flags); + } + } while (addr = next, addr < end); +} + +static void split_p4d_mapping(pgd_t *pgdp, unsigned long addr, unsigned long end, + pgprot_t prot, int flags) +{ + p4d_t *p4dp, p4d; + unsigned long next; + + do { + next = p4d_addr_end(addr, end); + p4dp = p4d_offset(pgdp, addr); + p4d = READ_ONCE(*p4dp); + + if (p4d_none(p4d)) + continue; + + WARN_ON(!p4d_present(p4d)); + +#if CONFIG_PGTABLE_LEVELS > 3 + /* + * If the original p4d mapping is not executable, remain it even + * splitting. + */ + if (!p4d_exec(p4d)) + flags |= NO_EXEC_MAPPINGS; +#endif + + split_pud_mapping(p4dp, addr, next, prot, flags); + } while (addr = next, addr < end); +} + +void split_linear_mapping(unsigned long virt, phys_addr_t size, pgprot_t prot) +{ + pgd_t *pgdp, pgd; + unsigned long addr, next, end; + int flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; + + addr = virt & PAGE_MASK; + end = PAGE_ALIGN(virt + size); + prot = pgprot_tagged(prot); + + do { + next = pgd_addr_end(addr, end); + pgdp = pgd_offset_k(addr); + pgd = READ_ONCE(*pgdp); + + if (pgd_none(pgd)) + continue; + + WARN_ON(!pgd_present(pgd)); + + split_p4d_mapping(pgdp, addr, next, prot, flags); + } while (addr = next, addr < end); +} -- Gitee From 2857cb220f0ab18df235812bea9a49c40b9c427a Mon Sep 17 00:00:00 2001 From: Kaihao Bai Date: Wed, 15 Nov 2023 15:33:52 +0800 Subject: [PATCH 0868/2138] anolis: arm64: support splitting page tables after system init ANBZ: #8540 RODATA, KFENCE and memory failure require dynamically splitting of page tables after system boot. After system boot stage, other CPUs may access the cleared page table while splitting the page table, and the stop-machine mechanism needs to be used to avoid the access of other CPUs. Signed-off-by: Kaihao Bai Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2885 --- arch/arm64/include/asm/mmu.h | 3 ++- arch/arm64/mm/mmu.c | 46 ++++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index b290d04db44b..bad28b274467 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -77,7 +77,8 @@ extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot); extern void mark_linear_text_alias_ro(void); extern bool kaslr_requires_kpti(void); extern void split_linear_mapping(unsigned long virt, phys_addr_t size, pgprot_t prot); - +extern void split_linear_mapping_after_init(unsigned long virt, phys_addr_t size, + pgprot_t prot); #define INIT_MM_CONTEXT(name) \ .pgd = init_pg_dir, diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 6474a2945f8a..8cda2a3d25cf 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -71,6 +72,15 @@ EXPORT_SYMBOL(empty_zero_page); static DEFINE_SPINLOCK(swapper_pgdir_lock); static DEFINE_MUTEX(fixmap_lock); +static DEFINE_MUTEX(split_linear_mapping_lock); + +static struct split_memory_params { + unsigned long virt; + phys_addr_t size; + pgprot_t prot; + + atomic_t cpu_count; +} split_memory_param; void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd) { @@ -1831,3 +1841,39 @@ void split_linear_mapping(unsigned long virt, phys_addr_t size, pgprot_t prot) split_p4d_mapping(pgdp, addr, next, prot, flags); } while (addr = next, addr < end); } + +static int __split_linear_mapping_after_init(void *data) +{ + struct split_memory_params *param = data; + + if (atomic_inc_return(¶m->cpu_count) == 1) { + split_linear_mapping(param->virt, param->size, param->prot); + atomic_inc(¶m->cpu_count); + } else { + while (atomic_read(¶m->cpu_count) <= num_online_cpus()) + cpu_relax(); + } + return 0; +} + +/* + * When splitting the kernel page table through the Break-Before-Make principle, + * other CPUs might access address that mapped by a cleared entry before + * remapping. Thus the stop machine is used to avoid kernel page fault + * caused by inter-CPU synchronization. + */ +void split_linear_mapping_after_init(unsigned long virt, phys_addr_t size, + pgprot_t prot) + +{ + mutex_lock(&split_linear_mapping_lock); + + split_memory_param.virt = virt; + split_memory_param.size = size; + split_memory_param.prot = prot; + atomic_set(&split_memory_param.cpu_count, 0); + + stop_machine(__split_linear_mapping_after_init, &split_memory_param, cpu_online_mask); + + mutex_unlock(&split_linear_mapping_lock); +} -- Gitee From 2345e385165997d11d72bedb52d2e3a982db337d Mon Sep 17 00:00:00 2001 From: Kaihao Bai Date: Wed, 15 Nov 2023 15:48:55 +0800 Subject: [PATCH 0869/2138] anolis: arm64: support splitting page table of rodata_full enabled ANBZ: #8540 If rodata_full is enabled, each page mapped by the linear mapping range can be set the attribute to Read-Only according to the PTE. Thus, the linear mapping range has to be mapped by all PTE-level to support that. However, changing the attribute to Read-Only is not a frequent event. Splitting the corresponding page is more suitable while guaranteeing the performance. Signed-off-by: Kaihao Bai Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2885 --- arch/arm64/include/asm/set_memory.h | 2 ++ arch/arm64/mm/mmu.c | 4 ++-- arch/arm64/mm/pageattr.c | 19 +++++++++++++++++++ 3 files changed, 23 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/set_memory.h b/arch/arm64/include/asm/set_memory.h index 0f740b781187..20fb7b1d5423 100644 --- a/arch/arm64/include/asm/set_memory.h +++ b/arch/arm64/include/asm/set_memory.h @@ -8,6 +8,8 @@ bool can_set_direct_map(void); #define can_set_direct_map can_set_direct_map +bool can_set_block_and_cont_map(void); + int set_memory_valid(unsigned long addr, int numpages, int enable); int set_direct_map_invalid_noflush(struct page *page); diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 8cda2a3d25cf..633cc234d921 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -649,7 +649,7 @@ static void __init map_mem(pgd_t *pgdp) early_kfence_pool = arm64_kfence_alloc_pool(); - if (can_set_direct_map()) + if (!can_set_block_and_cont_map()) flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; /* @@ -1375,7 +1375,7 @@ int arch_add_memory(int nid, u64 start, u64 size, VM_BUG_ON(!mhp_range_allowed(start, size, true)); - if (can_set_direct_map()) + if (!can_set_block_and_cont_map()) flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 0a62f458c5cb..55a6e79bc0cd 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -33,6 +33,15 @@ bool can_set_direct_map(void) arm64_kfence_can_set_direct_map(); } +/* + * If rodata_full is enabled, the mapping of linear mapping range can also be + * block & cont mapping, here decouples the rodata_full and debug_pagealloc. + */ +bool can_set_block_and_cont_map(void) +{ + return !debug_pagealloc_enabled() && !arm64_kfence_can_set_direct_map(); +} + static int change_page_range(pte_t *ptep, unsigned long addr, void *data) { struct page_change_data *cdata = data; @@ -108,6 +117,16 @@ static int change_memory_common(unsigned long addr, int numpages, if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY || pgprot_val(clear_mask) == PTE_RDONLY)) { for (i = 0; i < area->nr_pages; i++) { + unsigned long virt = (unsigned long)page_address(area->pages[i]); + + /* + * Only split the linear mapping when the attribute is + * changed to read only. Other situations do not suffer + * the mapping type. + */ + if (pgprot_val(set_mask) == PTE_RDONLY && can_set_block_and_cont_map()) + split_linear_mapping_after_init(virt, PAGE_SIZE, PAGE_KERNEL); + __change_memory_common((u64)page_address(area->pages[i]), PAGE_SIZE, set_mask, clear_mask); } -- Gitee From 2a16a59b87d998d1110762b87edec1109eb756ad Mon Sep 17 00:00:00 2001 From: Kaihao Bai Date: Thu, 16 Nov 2023 10:44:25 +0800 Subject: [PATCH 0870/2138] anolis: arm64: support splitting the dynamic allocation range of kfence ANBZ: #8540 Kfence provides the function of dynamically allocating the kfence pool. However, in arm64, the function is supported only the linear mapping range is all PTE-level mapped. Based on splitting dynamically, the kfence pool allocation no longer relies on all PTE-level mapped linear mapping range, but just split the range it allocated into PTE-level. Signed-off-by: Kaihao Bai Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2885 --- arch/arm64/include/asm/kfence.h | 13 ++++++++++++- arch/arm64/mm/pageattr.c | 2 +- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h index e5f86bbf4348..df7ebc9fc416 100644 --- a/arch/arm64/include/asm/kfence.h +++ b/arch/arm64/include/asm/kfence.h @@ -14,7 +14,18 @@ static inline bool arch_kfence_init_pool(struct kfence_pool_area *kpa) { - return can_set_direct_map(); + unsigned long addr = (unsigned long)kpa->addr; + + if (!can_set_block_and_cont_map()) + return false; + + /* + * If the allocated range is block and contiguous mapping, split it + * to pte level before re-initializing kfence pages. + */ + split_linear_mapping_after_init(addr, kpa->pool_size, PAGE_KERNEL); + + return true; } static inline bool kfence_protect_page(unsigned long addr, bool protect) diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 55a6e79bc0cd..d6fe70b2089a 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -39,7 +39,7 @@ bool can_set_direct_map(void) */ bool can_set_block_and_cont_map(void) { - return !debug_pagealloc_enabled() && !arm64_kfence_can_set_direct_map(); + return !debug_pagealloc_enabled(); } static int change_page_range(pte_t *ptep, unsigned long addr, void *data) -- Gitee From acd947b489f299d35558f7d429dd6d5d807e7dad Mon Sep 17 00:00:00 2001 From: Kaihao Bai Date: Thu, 16 Nov 2023 11:46:16 +0800 Subject: [PATCH 0871/2138] anolis: arm64: replace can_set_direct_map by splitting linear mapping ANBZ: #8540 Currently, can_set_direct_map has no callers any more, replace it with another function to indicate the linear region can be split. Signed-off-by: Kaihao Bai Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2885 --- arch/arm64/include/asm/kfence.h | 11 +++-------- arch/arm64/include/asm/set_memory.h | 3 --- arch/arm64/mm/pageattr.c | 25 +++++++------------------ include/linux/set_memory.h | 12 ------------ 4 files changed, 10 insertions(+), 41 deletions(-) diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h index df7ebc9fc416..44994e2a6d88 100644 --- a/arch/arm64/include/asm/kfence.h +++ b/arch/arm64/include/asm/kfence.h @@ -8,10 +8,13 @@ #ifndef __ASM_KFENCE_H #define __ASM_KFENCE_H +#ifdef CONFIG_KFENCE #include #include +extern bool kfence_early_init; + static inline bool arch_kfence_init_pool(struct kfence_pool_area *kpa) { unsigned long addr = (unsigned long)kpa->addr; @@ -37,14 +40,6 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect) static inline bool arch_kfence_free_pool(unsigned long addr) { return false; } -#ifdef CONFIG_KFENCE -extern bool kfence_early_init; -static inline bool arm64_kfence_can_set_direct_map(void) -{ - return !kfence_early_init; -} -#else /* CONFIG_KFENCE */ -static inline bool arm64_kfence_can_set_direct_map(void) { return false; } #endif /* CONFIG_KFENCE */ #endif /* __ASM_KFENCE_H */ diff --git a/arch/arm64/include/asm/set_memory.h b/arch/arm64/include/asm/set_memory.h index 20fb7b1d5423..3f5d866b98d0 100644 --- a/arch/arm64/include/asm/set_memory.h +++ b/arch/arm64/include/asm/set_memory.h @@ -5,9 +5,6 @@ #include -bool can_set_direct_map(void); -#define can_set_direct_map can_set_direct_map - bool can_set_block_and_cont_map(void); int set_memory_valid(unsigned long addr, int numpages, int enable); diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index d6fe70b2089a..d20986e457a7 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -20,19 +20,6 @@ struct page_change_data { bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED); -bool can_set_direct_map(void) -{ - /* - * rodata_full and DEBUG_PAGEALLOC require linear map to be - * mapped at page granularity, so that it is possible to - * protect/unprotect single pages. - * - * KFENCE pool requires page-granular mapping if initialized late. - */ - return rodata_full || debug_pagealloc_enabled() || - arm64_kfence_can_set_direct_map(); -} - /* * If rodata_full is enabled, the mapping of linear mapping range can also be * block & cont mapping, here decouples the rodata_full and debug_pagealloc. @@ -188,8 +175,9 @@ int set_direct_map_invalid_noflush(struct page *page) .clear_mask = __pgprot(PTE_VALID), }; - if (!can_set_direct_map()) - return 0; + if (can_set_block_and_cont_map()) + split_linear_mapping_after_init((unsigned long)page_address(page), + PAGE_SIZE, PAGE_KERNEL); return apply_to_page_range(&init_mm, (unsigned long)page_address(page), @@ -203,8 +191,9 @@ int set_direct_map_default_noflush(struct page *page) .clear_mask = __pgprot(PTE_RDONLY), }; - if (!can_set_direct_map()) - return 0; + if (can_set_block_and_cont_map()) + split_linear_mapping_after_init((unsigned long)page_address(page), + PAGE_SIZE, PAGE_KERNEL); return apply_to_page_range(&init_mm, (unsigned long)page_address(page), @@ -214,7 +203,7 @@ int set_direct_map_default_noflush(struct page *page) #ifdef CONFIG_DEBUG_PAGEALLOC void __kernel_map_pages(struct page *page, int numpages, int enable) { - if (!can_set_direct_map()) + if (can_set_block_and_cont_map()) return; set_memory_valid((unsigned long)page_address(page), numpages, enable); diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h index 95ac8398ee72..7ca06b42672d 100644 --- a/include/linux/set_memory.h +++ b/include/linux/set_memory.h @@ -38,18 +38,6 @@ static inline bool kernel_page_present(struct page *page) { return true; } -#else /* CONFIG_ARCH_HAS_SET_DIRECT_MAP */ -/* - * Some architectures, e.g. ARM64 can disable direct map modifications at - * boot time. Let them overrive this query. - */ -#ifndef can_set_direct_map -static inline bool can_set_direct_map(void) -{ - return true; -} -#define can_set_direct_map can_set_direct_map -#endif #endif /* CONFIG_ARCH_HAS_SET_DIRECT_MAP */ #ifdef CONFIG_X86_64 -- Gitee From e17ce2dca735ffaa9ccec091207ebbae717af2d5 Mon Sep 17 00:00:00 2001 From: Kaihao Bai Date: Wed, 9 Nov 2022 11:17:05 +0800 Subject: [PATCH 0872/2138] anolis: arm64: add the helper to set no-present page table ANBZ: #8540 Add a helper set_memory_np to make the page table corresponding to the virtual address not present. Signed-off-by: Kaihao Bai Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2885 --- arch/arm64/mm/pageattr.c | 12 ++++++++++++ include/asm-generic/set_memory.h | 2 +- include/linux/set_memory.h | 1 + 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index d20986e457a7..801ac339298a 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -168,6 +168,18 @@ int set_memory_valid(unsigned long addr, int numpages, int enable) __pgprot(PTE_VALID)); } +int set_memory_np(unsigned long addr, int numpages) +{ + /* + * If the addr belongs to linear mapping range, split it to pte level + * before changing the attribute of the page table. + */ + if (can_set_block_and_cont_map() && __is_lm_address(addr)) + split_linear_mapping_after_init(addr, PAGE_SIZE * numpages, PAGE_KERNEL); + + return set_memory_valid(addr, numpages, 0); +} + int set_direct_map_invalid_noflush(struct page *page) { struct page_change_data data = { diff --git a/include/asm-generic/set_memory.h b/include/asm-generic/set_memory.h index c86abf6bc7ba..caad5193913c 100644 --- a/include/asm-generic/set_memory.h +++ b/include/asm-generic/set_memory.h @@ -9,5 +9,5 @@ int set_memory_ro(unsigned long addr, int numpages); int set_memory_rw(unsigned long addr, int numpages); int set_memory_x(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages); - +int set_memory_np(unsigned long addr, int numpages); #endif diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h index 7ca06b42672d..e77a3345d20b 100644 --- a/include/linux/set_memory.h +++ b/include/linux/set_memory.h @@ -12,6 +12,7 @@ static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; } static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; } static inline int set_memory_x(unsigned long addr, int numpages) { return 0; } static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; } +static inline int set_memory_np(unsigned long addr, int numpages) { return 0; } #endif #ifndef set_memory_rox -- Gitee From 22b23bbff2b52447c2813ba62809e7a057194f62 Mon Sep 17 00:00:00 2001 From: Kaihao Bai Date: Thu, 16 Nov 2023 17:46:50 +0800 Subject: [PATCH 0873/2138] anolis: mm: avoid speculative access after memory failure ANBZ: #8540 Memory failure would pass an interrupt when the hardware detects a memory page corrupted, then the kernel needs to unmap uncorrectable pages to avoid access and allocation. However, in the implementation like arm64, the uncorrectable page is unmapped only in the page table of user-space process, the page table of the linear mapping range is not considered. It incurs the cpu will continue to accept errors through interrupts because of speculative access. For that, the uncorrectable page needs to be set to invalid to avoid speculative access. If the linear mapping is block mapping, we should firstly split it to pte level. Signed-off-by: Kaihao Bai Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2885 --- mm/memory-failure.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 9018a1162efc..c400b15b4d41 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -60,6 +60,7 @@ #include #include #include +#include #include "swap.h" #include "internal.h" #include "ras/ras_event.h" @@ -2454,10 +2455,17 @@ static void memory_failure_work_func(struct work_struct *work) raw_spin_unlock_irqrestore(&mf_cpu->lock, proc_flags); if (!gotten) break; - if (entry.flags & MF_SOFT_OFFLINE) + if (entry.flags & MF_SOFT_OFFLINE) { soft_offline_page(entry.pfn, entry.flags); - else - memory_failure(entry.pfn, entry.flags); + } else if (!memory_failure(entry.pfn, entry.flags)) { + /* + * If the pfn reported by ghes can not be recovered, set + * the corresponding page table of linear mapping range + * to be non-present, which avoids the speculative + * access of corrupted memory. + */ + set_memory_np((unsigned long)page_to_virt(pfn_to_page(entry.pfn)), 1); + } } } -- Gitee From ada7270b3f358885d79c996225cb88bd7b1b7087 Mon Sep 17 00:00:00 2001 From: Kaihao Bai Date: Thu, 7 Dec 2023 10:21:13 +0800 Subject: [PATCH 0874/2138] anolis: arm64: use XN table mapping attributes for the kfence region ANBZ: #8540 Kfence region is located in the linear region, thus we also need to set PXN attributes for all table entries in the kfence region. Signed-off-by: Kaihao Bai Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2885 --- arch/arm64/mm/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 633cc234d921..1884c70f359b 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -617,7 +617,7 @@ static void __init arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) /* KFENCE pool needs page-level mapping. */ __map_memblock(pgdp, kfence_pool, kfence_pool + kfence_pool_size, pgprot_tagged(PAGE_KERNEL), - NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); + NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS | NO_EXEC_MAPPINGS); memblock_clear_nomap(kfence_pool, kfence_pool_size); __kfence_pool_early_init = phys_to_virt(kfence_pool); } -- Gitee From 61dae4b91c4e22acb05d1a7e61247b1d09d2d9de Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 8 Apr 2024 14:23:31 +0800 Subject: [PATCH 0875/2138] anolis: Add early quirk to identify kh-40000 ANBZ: #8663 Identify kh-40000 platforms by specific PCI device's version number. Signed-off-by: leoliu-oc Reviewed-by: Guanjun Link: https://gitee.com/anolis/cloud-kernel/pulls/2966 --- arch/x86/kernel/early-quirks.c | 19 +++++++++++++++++++ include/linux/dma-map-ops.h | 10 ++++++++++ 2 files changed, 29 insertions(+) diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index a6c1867fc7aa..ddb857d94ed9 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -28,6 +29,7 @@ #include #include #include +#include static void __init fix_hypertransport_config(int num, int slot, int func) { @@ -685,6 +687,19 @@ static void __init apple_airport_reset(int bus, int slot, int func) early_iounmap(mmio, BCM4331_MMIO_SIZE); } +bool is_zhaoxin_kh40000; + +static void quirk_zhaoxin_dma_patch(int num, int slot, int func) +{ + u8 revision; + + revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID); + if (revision == 0x10) { + is_zhaoxin_kh40000 = true; + pr_info("zhaoxin direct dma patch enabled\n"); + } +} + #define QFLAG_APPLY_ONCE 0x1 #define QFLAG_APPLIED 0x2 #define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) @@ -728,6 +743,10 @@ static struct chipset early_qrk[] __initdata = { PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet}, { PCI_VENDOR_ID_BROADCOM, 0x4331, PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset}, + { PCI_VENDOR_ID_ZHAOXIN, 0x1001, PCI_CLASS_BRIDGE_HOST, + PCI_BASE_CLASS_BRIDGE, QFLAG_APPLY_ONCE, quirk_zhaoxin_dma_patch }, + { PCI_VENDOR_ID_ZHAOXIN, 0x345B, PCI_CLASS_BRIDGE_HOST, + PCI_BASE_CLASS_BRIDGE, QFLAG_APPLY_ONCE, quirk_zhaoxin_dma_patch }, {} }; diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index f2fc203fb8a1..f8451912178a 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -509,4 +509,14 @@ pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev, } #endif /* CONFIG_PCI_P2PDMA */ +#if defined CONFIG_PCI && defined CONFIG_X86 + +extern bool is_zhaoxin_kh40000; + +#else + +bool __weak is_zhaoxin_kh40000; + +#endif + #endif /* _LINUX_DMA_MAP_OPS_H */ -- Gitee From 3588434bc6991b04a61f972797031594831f47a2 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 8 Apr 2024 20:35:27 +0800 Subject: [PATCH 0876/2138] anolis: Add kh40000_direct_dma_ops for KH-40000 platform ANBZ: #8663 Add 'kh40000_direct_dma_ops' to replace 'direct_dma_ops' for KH-40000 platform. For coherent DMA access, memory can be allocated only from the memory node of the node where the device resides. For streaming DMA access, add a PCI read operation at the end of DMA access. Signed-off-by: leoliu-oc Reviewed-by: Guanjun Link: https://gitee.com/anolis/cloud-kernel/pulls/2966 --- .../admin-guide/kernel-parameters.txt | 5 + arch/x86/kernel/Makefile | 1 + arch/x86/kernel/early-quirks.c | 1 + arch/x86/kernel/zhaoxin_kh40000.c | 176 ++++++++++++++++++ include/linux/dma-map-ops.h | 1 + kernel/dma/contiguous.c | 3 + 6 files changed, 187 insertions(+) create mode 100644 arch/x86/kernel/zhaoxin_kh40000.c diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 302697c9397b..96f2afba6d90 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2316,6 +2316,11 @@ isapnp= [ISAPNP] Format: ,,, + zhaoxin_patch_bitmask= + [X86] Bitmask for Zhaoxin Platform's patch. + bit 0: enable KH-40000 dma patch's node check function + + isolcpus= [KNL,SMP,ISOL] Isolate a given set of CPUs from disturbance. [Deprecated - use cpusets instead] Format: [flag-list,] diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index c25d40cbbdbe..2b433325ca8f 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -159,6 +159,7 @@ ifeq ($(CONFIG_X86_64),y) obj-$(CONFIG_MMCONF_FAM10H) += mmconf-fam10h_64.o obj-y += vsmp_64.o + obj-$(CONFIG_PCI) += zhaoxin_kh40000.o endif obj-$(CONFIG_HYGON_CSV) += csv.o diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index ddb857d94ed9..b5f5e0916894 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -696,6 +696,7 @@ static void quirk_zhaoxin_dma_patch(int num, int slot, int func) revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID); if (revision == 0x10) { is_zhaoxin_kh40000 = true; + dma_ops = &kh40000_dma_direct_ops; pr_info("zhaoxin direct dma patch enabled\n"); } } diff --git a/arch/x86/kernel/zhaoxin_kh40000.c b/arch/x86/kernel/zhaoxin_kh40000.c new file mode 100644 index 000000000000..c477b18892fa --- /dev/null +++ b/arch/x86/kernel/zhaoxin_kh40000.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "../../../kernel/dma/direct.h" + +/*** + * usage: + * set "zhaoxin_patch_bitmask=" in cmdline + * value description: + * bit 0: enable(1) node check or not(0). default 1 + */ +enum { + ZHAOXIN_P2CW_NODE_CHECK = BIT(0), + ZHAOXIN_PATCH_CODE_MAX = ZHAOXIN_P2CW_NODE_CHECK, +}; + +#define ZHAOXIN_PATCH_CODE_DEFAULT ZHAOXIN_P2CW_NODE_CHECK + +unsigned long zhaoxin_patch_code = ZHAOXIN_PATCH_CODE_DEFAULT; + +static int __init zhaoxin_patch_code_setup(char *str) +{ + int err = kstrtoul(str, 0, &zhaoxin_patch_code); + + if (err || (zhaoxin_patch_code > ZHAOXIN_PATCH_CODE_MAX)) { + pr_err("cmdline 'zhaoxin_patch_bitmask=%s' inappropriate\n", + str); + return err; + } + + if (ZHAOXIN_P2CW_NODE_CHECK | zhaoxin_patch_code) + pr_info("zhaoxin dma patch node check is enabled\n"); + + return 0; +} +__setup("zhaoxin_patch_bitmask=", zhaoxin_patch_code_setup); + +static struct pci_dev *kh40000_get_pci_dev(struct device *dev) +{ + if (dev_is_pci(dev)) + return to_pci_dev(dev); + + if (dev->parent) + return kh40000_get_pci_dev(dev->parent); + + return NULL; +} + +static void kh40000_sync_single_dma_for_cpu(struct device *dev, dma_addr_t paddr, + enum dma_data_direction dir, bool is_iommu) +{ + u8 vid; + struct pci_dev *pci; + u64 dma_mask = *dev->dma_mask; + + /* check direction */ + if ((dir != DMA_FROM_DEVICE) && (dir != DMA_BIDIRECTIONAL)) + return; + + /* check dma capability */ + if (dma_mask <= DMA_BIT_MASK(32)) + return; + + /* check device type */ + pci = kh40000_get_pci_dev(dev); + if (pci == NULL) + return; + + /* get real physical address */ + if (is_iommu) { + struct iommu_domain *domain = iommu_get_dma_domain(dev); + + paddr = iommu_iova_to_phys(domain, paddr); + if (!paddr) + return; + } + + /* check node or not */ + if ((zhaoxin_patch_code & ZHAOXIN_P2CW_NODE_CHECK) + && pfn_to_nid(PFN_DOWN(paddr)) == dev_to_node(dev)) + return; + + /* flush data by one pci read cycle */ + pci_read_config_byte(pci, PCI_VENDOR_ID, &vid); +} + +/* zhaoxin kh-40000 direct dma ops */ +static void *kh40000_dma_direct_alloc(struct device *dev, size_t size, + dma_addr_t *addr, gfp_t gfp, unsigned long attrs) +{ + if (dev->coherent_dma_mask > DMA_BIT_MASK(32)) + gfp |= __GFP_THISNODE; + + return dma_direct_alloc(dev, size, addr, gfp, attrs); +} + +static void kh40000_dma_direct_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + kh40000_sync_single_dma_for_cpu(dev, addr, dir, 0); + dma_direct_unmap_page(dev, addr, size, dir, attrs); +} + +static void kh40000_dma_direct_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nents, i) + kh40000_sync_single_dma_for_cpu(dev, sg_dma_address(sg), dir, 0); + + dma_direct_sync_sg_for_cpu(dev, sgl, nents, dir); +} + +static void kh40000_dma_direct_sync_single_for_cpu(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ + kh40000_sync_single_dma_for_cpu(dev, addr, dir, 0); + dma_direct_sync_single_for_cpu(dev, addr, size, dir); +} + +static void kh40000_dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nents, i) + kh40000_sync_single_dma_for_cpu(dev, sg_dma_address(sg), dir, 0); + + dma_direct_unmap_sg(dev, sgl, nents, dir, attrs); +} + +static void kh40000_dma_direct_unmap_resource(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + kh40000_sync_single_dma_for_cpu(dev, addr, dir, 0); +} + +const struct dma_map_ops kh40000_dma_direct_ops = { + .flags = DMA_F_PCI_P2PDMA_SUPPORTED, + .alloc = kh40000_dma_direct_alloc, + .sync_sg_for_cpu = kh40000_dma_direct_sync_sg_for_cpu, + .unmap_page = kh40000_dma_direct_unmap_page, + .sync_single_for_cpu = kh40000_dma_direct_sync_single_for_cpu, + .unmap_sg = kh40000_dma_direct_unmap_sg, + .unmap_resource = kh40000_dma_direct_unmap_resource, + .dma_supported = dma_direct_supported, + .free = dma_direct_free, + .alloc_pages = dma_direct_alloc_pages, + .free_pages = dma_direct_free_pages, + .sync_single_for_device = dma_direct_sync_single_for_device, + .sync_sg_for_device = dma_direct_sync_sg_for_device, + .get_required_mask = dma_direct_get_required_mask, + .max_mapping_size = dma_direct_max_mapping_size, + .mmap = dma_direct_mmap, + .get_sgtable = dma_direct_get_sgtable, + .map_page = dma_direct_map_page, + .map_sg = dma_direct_map_sg, + .map_resource = dma_direct_map_resource, +}; diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index f8451912178a..aa2255e1b9a1 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -512,6 +512,7 @@ pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev, #if defined CONFIG_PCI && defined CONFIG_X86 extern bool is_zhaoxin_kh40000; +extern const struct dma_map_ops kh40000_dma_direct_ops; #else diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c index f005c66f378c..8b860c7ecabc 100644 --- a/kernel/dma/contiguous.c +++ b/kernel/dma/contiguous.c @@ -224,6 +224,9 @@ void __init dma_contiguous_reserve(phys_addr_t limit) dma_numa_cma_reserve(); + if (is_zhaoxin_kh40000) + return; + pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit); if (size_cmdline != -1) { -- Gitee From f3e36d87557375c9eb4f097e2a66dd1f55b940fc Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 8 Apr 2024 14:25:45 +0800 Subject: [PATCH 0877/2138] anolis: Add kh40000_iommu_dma_ops for KH-40000 platform ANBZ: #8663 Add 'kh40000_iommu_dma_ops' to replace 'intel_dma_ops' for KH-40000 platform. For coherent DMA access, memory can be allocated only from the memory node of the node where the device resides. For streaming DMA access, add a PCI read operation at the end of DMA access. Signed-off-by: leoliu-oc Reviewed-by: Guanjun Link: https://gitee.com/anolis/cloud-kernel/pulls/2966 --- arch/x86/kernel/zhaoxin_kh40000.c | 175 ++++++++++++++++++++++++++++++ drivers/iommu/intel/iommu.c | 3 + include/linux/dma-map-ops.h | 6 + 3 files changed, 184 insertions(+) diff --git a/arch/x86/kernel/zhaoxin_kh40000.c b/arch/x86/kernel/zhaoxin_kh40000.c index c477b18892fa..e8dd3bd43e72 100644 --- a/arch/x86/kernel/zhaoxin_kh40000.c +++ b/arch/x86/kernel/zhaoxin_kh40000.c @@ -174,3 +174,178 @@ const struct dma_map_ops kh40000_dma_direct_ops = { .map_sg = dma_direct_map_sg, .map_resource = dma_direct_map_resource, }; + +/* zhaoxin kh-40000 iommu dma ops */ +static const struct dma_map_ops *iommu_dma_ops; + +static void *kh40000_iommu_dma_alloc(struct device *dev, size_t size, + dma_addr_t *addr, gfp_t gfp, unsigned long attrs) +{ + gfp |= __GFP_THISNODE; + + return iommu_dma_ops->alloc(dev, size, addr, gfp, attrs); +} + +static void kh40000_iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t handle, unsigned long attrs) +{ + iommu_dma_ops->free(dev, size, cpu_addr, handle, attrs); +} + +static struct page *kh40000_dma_common_alloc_pages(struct device *dev, size_t size, + dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) +{ + return iommu_dma_ops->alloc_pages(dev, size, dma_handle, dir, gfp); +} + +static void kh40000_dma_common_free_pages(struct device *dev, size_t size, struct page *page, + dma_addr_t dma_handle, enum dma_data_direction dir) +{ + iommu_dma_ops->free_pages(dev, size, page, dma_handle, dir); +} + +static struct sg_table *kh40000_iommu_dma_alloc_noncontiguous(struct device *dev, + size_t size, enum dma_data_direction dir, gfp_t gfp, + unsigned long attrs) +{ + return iommu_dma_ops->alloc_noncontiguous(dev, size, dir, gfp, attrs); +} + +static void kh40000_iommu_dma_free_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt, enum dma_data_direction dir) +{ + return iommu_dma_ops->free_noncontiguous(dev, size, sgt, dir); +} + +static int kh40000_iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs) +{ + return iommu_dma_ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); +} + +static void kh40000_iommu_dma_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + kh40000_sync_single_dma_for_cpu(dev, addr, dir, 1); + iommu_dma_ops->unmap_page(dev, addr, size, dir, attrs); +} + +static int kh40000_iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs) +{ + return iommu_dma_ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs); +} + +static dma_addr_t kh40000_iommu_dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + return iommu_dma_ops->map_page(dev, page, offset, size, dir, attrs); +} + +static int kh40000_iommu_dma_map_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + return iommu_dma_ops->map_sg(dev, sgl, nents, dir, attrs); +} + +static void kh40000_iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nelems, enum dma_data_direction dir, unsigned long attrs) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nelems, i) + kh40000_sync_single_dma_for_cpu(dev, sg_dma_address(sg), dir, 1); + iommu_dma_ops->unmap_sg(dev, sgl, nelems, dir, attrs); +} + +static void kh40000_iommu_dma_sync_single_for_cpu(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ + kh40000_sync_single_dma_for_cpu(dev, addr, dir, 1); + iommu_dma_ops->sync_single_for_cpu(dev, addr, size, dir); +} + +static void kh40000_iommu_dma_sync_single_for_device(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ + iommu_dma_ops->sync_single_for_device(dev, addr, size, dir); +} + +static void kh40000_iommu_dma_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nelems, + enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nelems, i) + kh40000_sync_single_dma_for_cpu(dev, sg_dma_address(sg), dir, 1); + iommu_dma_ops->sync_sg_for_cpu(dev, sgl, nelems, dir); +} + +static void kh40000_iommu_dma_sync_sg_for_device(struct device *dev, + struct scatterlist *sgl, int nelems, + enum dma_data_direction dir) +{ + iommu_dma_ops->sync_sg_for_device(dev, sgl, nelems, dir); +} + +static dma_addr_t kh40000_iommu_dma_map_resource(struct device *dev, phys_addr_t phys, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + return iommu_dma_ops->map_resource(dev, phys, size, dir, attrs); +} + +static void kh40000_iommu_dma_unmap_resource(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + kh40000_sync_single_dma_for_cpu(dev, addr, dir, 1); + iommu_dma_ops->unmap_resource(dev, addr, size, dir, attrs); +} + +static unsigned long kh40000_iommu_dma_get_merge_boundary(struct device *dev) +{ + return iommu_dma_ops->get_merge_boundary(dev); +} + +static size_t kh40000_iommu_dma_opt_mapping_size(void) +{ + return iommu_dma_ops->opt_mapping_size(); +} + +const struct dma_map_ops kh40000_dma_iommu_ops = { + .flags = DMA_F_PCI_P2PDMA_SUPPORTED, + .alloc = kh40000_iommu_dma_alloc, + .free = kh40000_iommu_dma_free, + .unmap_page = kh40000_iommu_dma_unmap_page, + .alloc_pages = kh40000_dma_common_alloc_pages, + .free_pages = kh40000_dma_common_free_pages, + .alloc_noncontiguous = kh40000_iommu_dma_alloc_noncontiguous, + .free_noncontiguous = kh40000_iommu_dma_free_noncontiguous, + .mmap = kh40000_iommu_dma_mmap, + .get_sgtable = kh40000_iommu_dma_get_sgtable, + .map_page = kh40000_iommu_dma_map_page, + .map_sg = kh40000_iommu_dma_map_sg, + .unmap_sg = kh40000_iommu_dma_unmap_sg, + .sync_single_for_cpu = kh40000_iommu_dma_sync_single_for_cpu, + .sync_single_for_device = kh40000_iommu_dma_sync_single_for_device, + .sync_sg_for_cpu = kh40000_iommu_dma_sync_sg_for_cpu, + .sync_sg_for_device = kh40000_iommu_dma_sync_sg_for_device, + .map_resource = kh40000_iommu_dma_map_resource, + .unmap_resource = kh40000_iommu_dma_unmap_resource, + .get_merge_boundary = kh40000_iommu_dma_get_merge_boundary, + .opt_mapping_size = kh40000_iommu_dma_opt_mapping_size, +}; + +void kh40000_set_iommu_dma_ops(struct device *dev) +{ + if (dev->dma_ops) { + iommu_dma_ops = dev->dma_ops; + set_dma_ops(dev, &kh40000_dma_iommu_ops); + pr_info_once("zhaoxin iommu dma patch enabled\n"); + } +} diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index d6381c00bb8d..ffc528ea7285 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -4445,6 +4445,9 @@ static void intel_iommu_probe_finalize(struct device *dev) { set_dma_ops(dev, NULL); iommu_setup_dma_ops(dev, 0, U64_MAX); + + if (is_zhaoxin_kh40000) + kh40000_set_iommu_dma_ops(dev); } static void intel_iommu_get_resv_regions(struct device *device, diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index aa2255e1b9a1..0ce2ae6c944d 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -513,10 +513,16 @@ pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev, extern bool is_zhaoxin_kh40000; extern const struct dma_map_ops kh40000_dma_direct_ops; +void kh40000_set_iommu_dma_ops(struct device *dev); #else bool __weak is_zhaoxin_kh40000; +static inline void kh40000_set_iommu_dma_ops(struct device *dev) +{ + +} + #endif -- Gitee From 11a28bcbfbd735e20a3e1c1016c8c56740b2c261 Mon Sep 17 00:00:00 2001 From: Shuai Xue Date: Mon, 22 May 2023 16:29:33 +0800 Subject: [PATCH 0878/2138] anolis: add AER config dependency sanity check to fix build error ANBZ: #8642 The AER cached capability position is defined only when CONFIG_PCIEAER is selected on. Commit ce6b528f9aee ("anolis: pci: fix quirk for Yitian 710 to support AER fatal error recovery") add a quirk to save/restore PCIe AER Capability register without CONFIG_PCIEAER sanity check. As a result, the kernel build fails when configuration is based on 'x86_64_defconfig' To fix it, add AER config dependency sanity check. Fixes: 92154a20416b ("anolis: pci: fix quirk for Yitian 710 to support AER fatal error recovery") Fixes: c172093138d6 ("anolis: pci: Add a quirk for ALIBABA yitian710 to recover fatal aer") Signed-off-by: Shuai Xue Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/2086 Signed-off-by: Ruidong Tian Link: https://gitee.com/anolis/cloud-kernel/pulls/3031 --- drivers/pci/pci.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 29e1d3ba869a..4a5f5c9d189d 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -5265,9 +5265,12 @@ static void pci_save_yitian710_regs(struct pci_dev *dev, if (dev->acs_cap) pci_read_config_dword(dev, dev->acs_cap + PCI_ACS_CAP, &saved->acs_cap_ctrl); + +#ifdef CONFIG_PCIEAER if (dev->aer_cap) pci_read_config_dword(dev, dev->aer_cap + PCI_ERR_ROOT_COMMAND, &saved->root_err_cmd); +#endif pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &saved->slot_ctrl); } @@ -5293,10 +5296,13 @@ static void pci_restore_yitian710_regs(struct pci_dev *dev, if (dev->acs_cap) pci_write_config_dword(dev, dev->acs_cap + PCI_ACS_CAP, saved->acs_cap_ctrl); + +#ifdef CONFIG_PCIEAER /* restore AER Root Error Command Register */ if (dev->aer_cap) pci_write_config_dword(dev, dev->aer_cap + PCI_ERR_ROOT_COMMAND, saved->root_err_cmd); +#endif /* restore Slot Control Register */ pcie_capability_write_word(dev, PCI_EXP_SLTCTL, saved->slot_ctrl); -- Gitee From 86c4de5169bf618133cba14a8d079a89bd24e4a8 Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Wed, 10 Apr 2024 15:24:57 +0800 Subject: [PATCH 0879/2138] anolis: configs: adjust some driver related kconfigs ANBZ: #8598 Adjust follow kconfigs. For arm64: CONFIG_I2C=y CONFIG_DRM_PHYTIUM=m For x86: CONFIG_I2C=y CONFIG_VIRTIO_MMIO=m Signed-off-by: Qiao Ma Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/3039 --- arch/arm64/configs/anolis-debug_defconfig | 65 ++++++++++++++++++----- arch/arm64/configs/anolis_defconfig | 65 ++++++++++++++++++----- arch/x86/configs/anolis-debug_defconfig | 56 ++++++++++++------- arch/x86/configs/anolis_defconfig | 56 ++++++++++++------- 4 files changed, 178 insertions(+), 64 deletions(-) diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index 65ebc3694ef2..13db02910f8b 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm64 6.6.7 Kernel Configuration +# Linux/arm64 6.6.25 Kernel Configuration # CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" CONFIG_CC_IS_GCC=y @@ -171,7 +171,7 @@ CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y CONFIG_CC_HAS_INT128=y CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" -CONFIG_GCC11_NO_ARRAY_BOUNDS=y +CONFIG_GCC10_NO_ARRAY_BOUNDS=y CONFIG_CC_NO_ARRAY_BOUNDS=y CONFIG_ARCH_SUPPORTS_INT128=y CONFIG_NUMA_BALANCING=y @@ -399,7 +399,9 @@ CONFIG_ARM64_ERRATUM_2067961=y CONFIG_ARM64_ERRATUM_2441009=y CONFIG_ARM64_ERRATUM_2457168=y CONFIG_ARM64_ERRATUM_2645198=y +CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD=y CONFIG_ARM64_ERRATUM_2966298=y +CONFIG_ARM64_ERRATUM_3117295=y CONFIG_CAVIUM_ERRATUM_22375=y CONFIG_CAVIUM_ERRATUM_23144=y CONFIG_CAVIUM_ERRATUM_23154=y @@ -679,6 +681,7 @@ CONFIG_ACPI_IORT=y CONFIG_ACPI_GTDT=y CONFIG_ACPI_AGDI=y CONFIG_ACPI_APMT=y +CONFIG_ACPI_MPAM=y CONFIG_ACPI_PPTT=y CONFIG_ACPI_PCC=y # CONFIG_ACPI_FFH is not set @@ -807,6 +810,7 @@ CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y CONFIG_STRICT_KERNEL_RWX=y CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y CONFIG_STRICT_MODULE_RWX=y +CONFIG_ARCH_HAS_CPU_RESCTRL=y CONFIG_HAVE_ARCH_COMPILER_H=y CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y CONFIG_ARCH_USE_MEMREMAP_PROT=y @@ -2066,6 +2070,7 @@ CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y CONFIG_UEFI_CPER=y CONFIG_UEFI_CPER_ARM=y +# CONFIG_YITIAN_CPER_RAWDATA is not set CONFIG_ARM_PSCI_FW=y # CONFIG_ARM_PSCI_CHECKER is not set CONFIG_HAVE_ARM_SMCCC=y @@ -2320,10 +2325,6 @@ CONFIG_CB710_DEBUG_ASSUMPTIONS=y # end of Texas Instruments shared transport line discipline # CONFIG_SENSORS_LIS3_I2C is not set - -# -# Altera FPGA firmware download module (requires I2C) -# # CONFIG_ALTERA_STAPL is not set # CONFIG_VMWARE_VMCI is not set # CONFIG_GENWQE is not set @@ -3356,7 +3357,8 @@ CONFIG_TCG_CRB=y # # I2C support # -CONFIG_I2C=m +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y CONFIG_I2C_BOARDINFO=y CONFIG_I2C_COMPAT=y CONFIG_I2C_CHARDEV=m @@ -3509,7 +3511,6 @@ CONFIG_SPI_QUP=y # CONFIG_SPI_XCOMM is not set # CONFIG_SPI_XILINX is not set CONFIG_SPI_XLP=m -# CONFIG_SPI_ZYNQMP_GQSPI is not set # CONFIG_SPI_AMD is not set # @@ -3567,6 +3568,7 @@ CONFIG_GENERIC_PINCONF=y # CONFIG_PINCTRL_OCELOT is not set # CONFIG_PINCTRL_SINGLE is not set # CONFIG_PINCTRL_STMFX is not set +# CONFIG_PINCTRL_SX150X is not set CONFIG_PINCTRL_MSM=y # CONFIG_PINCTRL_IPQ5018 is not set # CONFIG_PINCTRL_IPQ5332 is not set @@ -4069,7 +4071,11 @@ CONFIG_BCMA_DRIVER_GPIO=y # CONFIG_MFD_CORE=m # CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_AS3711 is not set # CONFIG_MFD_SMPRO is not set +# CONFIG_MFD_AS3722 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set # CONFIG_MFD_ATMEL_FLEXCOM is not set # CONFIG_MFD_ATMEL_HLCDC is not set # CONFIG_MFD_BCM590XX is not set @@ -4078,7 +4084,10 @@ CONFIG_MFD_CORE=m # CONFIG_MFD_CS42L43_I2C is not set # CONFIG_MFD_MADERA is not set # CONFIG_MFD_MAX5970 is not set +# CONFIG_PMIC_DA903X is not set # CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set # CONFIG_MFD_DA9062 is not set # CONFIG_MFD_DA9063 is not set # CONFIG_MFD_DA9150 is not set @@ -4096,12 +4105,19 @@ CONFIG_MFD_CORE=m # CONFIG_MFD_KEMPLD is not set # CONFIG_MFD_88PM800 is not set # CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set # CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77541 is not set +# CONFIG_MFD_MAX77620 is not set # CONFIG_MFD_MAX77650 is not set # CONFIG_MFD_MAX77686 is not set # CONFIG_MFD_MAX77693 is not set # CONFIG_MFD_MAX77714 is not set +# CONFIG_MFD_MAX77843 is not set # CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set # CONFIG_MFD_MT6360 is not set # CONFIG_MFD_MT6370 is not set # CONFIG_MFD_MT6397 is not set @@ -4119,38 +4135,55 @@ CONFIG_MFD_CORE=m # CONFIG_MFD_RT4831 is not set # CONFIG_MFD_RT5033 is not set # CONFIG_MFD_RT5120 is not set +# CONFIG_MFD_RC5T583 is not set # CONFIG_MFD_RK8XX_I2C is not set # CONFIG_MFD_RK8XX_SPI is not set # CONFIG_MFD_RN5T618 is not set +# CONFIG_MFD_SEC_CORE is not set # CONFIG_MFD_SI476X_CORE is not set # CONFIG_MFD_SM501 is not set # CONFIG_MFD_SKY81452 is not set # CONFIG_MFD_STMPE is not set CONFIG_MFD_SYSCON=y -# CONFIG_MFD_TI_AM335X_TSCADC is not set # CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set # CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set # CONFIG_TPS6105X is not set # CONFIG_TPS65010 is not set # CONFIG_TPS6507X is not set # CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set # CONFIG_MFD_TPS65217 is not set # CONFIG_MFD_TI_LP873X is not set # CONFIG_MFD_TI_LP87565 is not set # CONFIG_MFD_TPS65218 is not set # CONFIG_MFD_TPS65219 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set # CONFIG_MFD_TPS65912_I2C is not set # CONFIG_MFD_TPS65912_SPI is not set # CONFIG_MFD_TPS6594_I2C is not set # CONFIG_MFD_TPS6594_SPI is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set # CONFIG_MFD_WL1273_CORE is not set # CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TC3589X is not set # CONFIG_MFD_TQMX86 is not set # CONFIG_MFD_VX855 is not set +# CONFIG_MFD_LOCHNAGAR is not set # CONFIG_MFD_ARIZONA_I2C is not set # CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set # CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set # CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_ROHM_BD718XX is not set +# CONFIG_MFD_ROHM_BD71828 is not set +# CONFIG_MFD_ROHM_BD957XMUF is not set +# CONFIG_MFD_STPMIC1 is not set # CONFIG_MFD_STMFX is not set # CONFIG_MFD_ATC260X_I2C is not set # CONFIG_MFD_QCOM_PM8008 is not set @@ -4440,8 +4473,10 @@ CONFIG_DRM_CIRRUS_QEMU=m # CONFIG_DRM_TIDSS is not set # CONFIG_DRM_GUD is not set # CONFIG_DRM_SSD130X is not set +CONFIG_DRM_PHYTIUM=m # CONFIG_DRM_LEGACY is not set CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y +# CONFIG_HYDCU_FIXUP_HEADER is not set # # Frame buffer Devices @@ -5002,8 +5037,6 @@ CONFIG_MMC_CQHCI=m CONFIG_MMC_TOSHIBA_PCI=m CONFIG_MMC_MTK=m # CONFIG_MMC_SDHCI_XENON is not set -# CONFIG_MMC_SDHCI_OMAP is not set -# CONFIG_MMC_SDHCI_AM654 is not set # CONFIG_SCSI_UFSHCD is not set CONFIG_MEMSTICK=m # CONFIG_MEMSTICK_DEBUG is not set @@ -5230,7 +5263,7 @@ CONFIG_RTC_DRV_RS5C348=m CONFIG_RTC_DRV_MAX6902=m CONFIG_RTC_DRV_PCF2123=m CONFIG_RTC_DRV_MCP795=m -CONFIG_RTC_I2C_AND_SPI=m +CONFIG_RTC_I2C_AND_SPI=y # # SPI and I2C RTC drivers @@ -5435,6 +5468,7 @@ CONFIG_CHROME_PLATFORMS=y # CONFIG_CROS_HPS_I2C is not set # CONFIG_CHROMEOS_PRIVACY_SCREEN is not set # CONFIG_MELLANOX_PLATFORM is not set +CONFIG_ARM_CPU_RESCTRL=y CONFIG_SURFACE_PLATFORMS=y # CONFIG_SURFACE_3_POWER_OPREGION is not set # CONFIG_SURFACE_GPE is not set @@ -5981,6 +6015,7 @@ CONFIG_PROC_VMCORE_DEVICE_DUMP=y CONFIG_PROC_SYSCTL=y CONFIG_PROC_PAGE_MONITOR=y CONFIG_PROC_CHILDREN=y +CONFIG_PROC_CPU_RESCTRL=y CONFIG_KERNFS=y CONFIG_SYSFS=y CONFIG_TMPFS=y @@ -6034,6 +6069,8 @@ CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 # CONFIG_HPFS_FS is not set # CONFIG_QNX4FS_FS is not set # CONFIG_QNX6FS_FS is not set +CONFIG_RESCTRL_FS=y +CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID=y # CONFIG_ROMFS_FS is not set CONFIG_PSTORE=y CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 @@ -6531,6 +6568,7 @@ CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m # CONFIG_CRYPTO_DEV_QAT_C3XXX is not set # CONFIG_CRYPTO_DEV_QAT_C62X is not set # CONFIG_CRYPTO_DEV_QAT_4XXX is not set +# CONFIG_CRYPTO_DEV_QAT_420XX is not set # CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set # CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set # CONFIG_CRYPTO_DEV_QAT_C62XVF is not set @@ -6746,6 +6784,7 @@ CONFIG_SG_POOL=y CONFIG_ARCH_HAS_PMEM_API=y CONFIG_MEMREGION=y CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_ARCH_HAS_COPY_MC=y CONFIG_ARCH_STACKWALK=y CONFIG_STACKDEPOT=y CONFIG_STACKDEPOT_ALWAYS_INIT=y @@ -7009,8 +7048,6 @@ CONFIG_DEBUG_NOTIFIERS=y # CONFIG_DEBUG_MAPLE_TREE is not set # end of Debug kernel data structures -CONFIG_DEBUG_CREDENTIALS=y - # # RCU Debugging # diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index a563537ca907..0c4270891c76 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm64 6.6.7 Kernel Configuration +# Linux/arm64 6.6.25 Kernel Configuration # CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" CONFIG_CC_IS_GCC=y @@ -170,7 +170,7 @@ CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y CONFIG_CC_HAS_INT128=y CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" -CONFIG_GCC11_NO_ARRAY_BOUNDS=y +CONFIG_GCC10_NO_ARRAY_BOUNDS=y CONFIG_CC_NO_ARRAY_BOUNDS=y CONFIG_ARCH_SUPPORTS_INT128=y CONFIG_NUMA_BALANCING=y @@ -397,7 +397,9 @@ CONFIG_ARM64_ERRATUM_2067961=y CONFIG_ARM64_ERRATUM_2441009=y CONFIG_ARM64_ERRATUM_2457168=y CONFIG_ARM64_ERRATUM_2645198=y +CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD=y CONFIG_ARM64_ERRATUM_2966298=y +CONFIG_ARM64_ERRATUM_3117295=y CONFIG_CAVIUM_ERRATUM_22375=y CONFIG_CAVIUM_ERRATUM_23144=y CONFIG_CAVIUM_ERRATUM_23154=y @@ -676,6 +678,7 @@ CONFIG_ACPI_IORT=y CONFIG_ACPI_GTDT=y CONFIG_ACPI_AGDI=y CONFIG_ACPI_APMT=y +CONFIG_ACPI_MPAM=y CONFIG_ACPI_PPTT=y CONFIG_ACPI_PCC=y # CONFIG_ACPI_FFH is not set @@ -804,6 +807,7 @@ CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y CONFIG_STRICT_KERNEL_RWX=y CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y CONFIG_STRICT_MODULE_RWX=y +CONFIG_ARCH_HAS_CPU_RESCTRL=y CONFIG_HAVE_ARCH_COMPILER_H=y CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y CONFIG_ARCH_USE_MEMREMAP_PROT=y @@ -2063,6 +2067,7 @@ CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y CONFIG_UEFI_CPER=y CONFIG_UEFI_CPER_ARM=y +# CONFIG_YITIAN_CPER_RAWDATA is not set CONFIG_ARM_PSCI_FW=y # CONFIG_ARM_PSCI_CHECKER is not set CONFIG_HAVE_ARM_SMCCC=y @@ -2317,10 +2322,6 @@ CONFIG_CB710_DEBUG_ASSUMPTIONS=y # end of Texas Instruments shared transport line discipline # CONFIG_SENSORS_LIS3_I2C is not set - -# -# Altera FPGA firmware download module (requires I2C) -# # CONFIG_ALTERA_STAPL is not set # CONFIG_VMWARE_VMCI is not set # CONFIG_GENWQE is not set @@ -3353,7 +3354,8 @@ CONFIG_TCG_CRB=y # # I2C support # -CONFIG_I2C=m +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y CONFIG_I2C_BOARDINFO=y CONFIG_I2C_COMPAT=y CONFIG_I2C_CHARDEV=m @@ -3506,7 +3508,6 @@ CONFIG_SPI_QUP=y # CONFIG_SPI_XCOMM is not set # CONFIG_SPI_XILINX is not set CONFIG_SPI_XLP=m -# CONFIG_SPI_ZYNQMP_GQSPI is not set # CONFIG_SPI_AMD is not set # @@ -3564,6 +3565,7 @@ CONFIG_GENERIC_PINCONF=y # CONFIG_PINCTRL_OCELOT is not set # CONFIG_PINCTRL_SINGLE is not set # CONFIG_PINCTRL_STMFX is not set +# CONFIG_PINCTRL_SX150X is not set CONFIG_PINCTRL_MSM=y # CONFIG_PINCTRL_IPQ5018 is not set # CONFIG_PINCTRL_IPQ5332 is not set @@ -4066,7 +4068,11 @@ CONFIG_BCMA_DRIVER_GPIO=y # CONFIG_MFD_CORE=m # CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_AS3711 is not set # CONFIG_MFD_SMPRO is not set +# CONFIG_MFD_AS3722 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set # CONFIG_MFD_ATMEL_FLEXCOM is not set # CONFIG_MFD_ATMEL_HLCDC is not set # CONFIG_MFD_BCM590XX is not set @@ -4075,7 +4081,10 @@ CONFIG_MFD_CORE=m # CONFIG_MFD_CS42L43_I2C is not set # CONFIG_MFD_MADERA is not set # CONFIG_MFD_MAX5970 is not set +# CONFIG_PMIC_DA903X is not set # CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set # CONFIG_MFD_DA9062 is not set # CONFIG_MFD_DA9063 is not set # CONFIG_MFD_DA9150 is not set @@ -4093,12 +4102,19 @@ CONFIG_MFD_CORE=m # CONFIG_MFD_KEMPLD is not set # CONFIG_MFD_88PM800 is not set # CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set # CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77541 is not set +# CONFIG_MFD_MAX77620 is not set # CONFIG_MFD_MAX77650 is not set # CONFIG_MFD_MAX77686 is not set # CONFIG_MFD_MAX77693 is not set # CONFIG_MFD_MAX77714 is not set +# CONFIG_MFD_MAX77843 is not set # CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set # CONFIG_MFD_MT6360 is not set # CONFIG_MFD_MT6370 is not set # CONFIG_MFD_MT6397 is not set @@ -4116,38 +4132,55 @@ CONFIG_MFD_CORE=m # CONFIG_MFD_RT4831 is not set # CONFIG_MFD_RT5033 is not set # CONFIG_MFD_RT5120 is not set +# CONFIG_MFD_RC5T583 is not set # CONFIG_MFD_RK8XX_I2C is not set # CONFIG_MFD_RK8XX_SPI is not set # CONFIG_MFD_RN5T618 is not set +# CONFIG_MFD_SEC_CORE is not set # CONFIG_MFD_SI476X_CORE is not set # CONFIG_MFD_SM501 is not set # CONFIG_MFD_SKY81452 is not set # CONFIG_MFD_STMPE is not set CONFIG_MFD_SYSCON=y -# CONFIG_MFD_TI_AM335X_TSCADC is not set # CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set # CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set # CONFIG_TPS6105X is not set # CONFIG_TPS65010 is not set # CONFIG_TPS6507X is not set # CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set # CONFIG_MFD_TPS65217 is not set # CONFIG_MFD_TI_LP873X is not set # CONFIG_MFD_TI_LP87565 is not set # CONFIG_MFD_TPS65218 is not set # CONFIG_MFD_TPS65219 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set # CONFIG_MFD_TPS65912_I2C is not set # CONFIG_MFD_TPS65912_SPI is not set # CONFIG_MFD_TPS6594_I2C is not set # CONFIG_MFD_TPS6594_SPI is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set # CONFIG_MFD_WL1273_CORE is not set # CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TC3589X is not set # CONFIG_MFD_TQMX86 is not set # CONFIG_MFD_VX855 is not set +# CONFIG_MFD_LOCHNAGAR is not set # CONFIG_MFD_ARIZONA_I2C is not set # CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set # CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set # CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_ROHM_BD718XX is not set +# CONFIG_MFD_ROHM_BD71828 is not set +# CONFIG_MFD_ROHM_BD957XMUF is not set +# CONFIG_MFD_STPMIC1 is not set # CONFIG_MFD_STMFX is not set # CONFIG_MFD_ATC260X_I2C is not set # CONFIG_MFD_QCOM_PM8008 is not set @@ -4437,8 +4470,10 @@ CONFIG_DRM_CIRRUS_QEMU=m # CONFIG_DRM_TIDSS is not set # CONFIG_DRM_GUD is not set # CONFIG_DRM_SSD130X is not set +CONFIG_DRM_PHYTIUM=m # CONFIG_DRM_LEGACY is not set CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y +# CONFIG_HYDCU_FIXUP_HEADER is not set # # Frame buffer Devices @@ -4999,8 +5034,6 @@ CONFIG_MMC_CQHCI=m CONFIG_MMC_TOSHIBA_PCI=m CONFIG_MMC_MTK=m # CONFIG_MMC_SDHCI_XENON is not set -# CONFIG_MMC_SDHCI_OMAP is not set -# CONFIG_MMC_SDHCI_AM654 is not set # CONFIG_SCSI_UFSHCD is not set CONFIG_MEMSTICK=m # CONFIG_MEMSTICK_DEBUG is not set @@ -5227,7 +5260,7 @@ CONFIG_RTC_DRV_RS5C348=m CONFIG_RTC_DRV_MAX6902=m CONFIG_RTC_DRV_PCF2123=m CONFIG_RTC_DRV_MCP795=m -CONFIG_RTC_I2C_AND_SPI=m +CONFIG_RTC_I2C_AND_SPI=y # # SPI and I2C RTC drivers @@ -5431,6 +5464,7 @@ CONFIG_CHROME_PLATFORMS=y # CONFIG_CROS_HPS_I2C is not set # CONFIG_CHROMEOS_PRIVACY_SCREEN is not set # CONFIG_MELLANOX_PLATFORM is not set +CONFIG_ARM_CPU_RESCTRL=y CONFIG_SURFACE_PLATFORMS=y # CONFIG_SURFACE_3_POWER_OPREGION is not set # CONFIG_SURFACE_GPE is not set @@ -5977,6 +6011,7 @@ CONFIG_PROC_VMCORE_DEVICE_DUMP=y CONFIG_PROC_SYSCTL=y CONFIG_PROC_PAGE_MONITOR=y CONFIG_PROC_CHILDREN=y +CONFIG_PROC_CPU_RESCTRL=y CONFIG_KERNFS=y CONFIG_SYSFS=y CONFIG_TMPFS=y @@ -6030,6 +6065,8 @@ CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 # CONFIG_HPFS_FS is not set # CONFIG_QNX4FS_FS is not set # CONFIG_QNX6FS_FS is not set +CONFIG_RESCTRL_FS=y +CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID=y # CONFIG_ROMFS_FS is not set CONFIG_PSTORE=y CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 @@ -6527,6 +6564,7 @@ CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m # CONFIG_CRYPTO_DEV_QAT_C3XXX is not set # CONFIG_CRYPTO_DEV_QAT_C62X is not set # CONFIG_CRYPTO_DEV_QAT_4XXX is not set +# CONFIG_CRYPTO_DEV_QAT_420XX is not set # CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set # CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set # CONFIG_CRYPTO_DEV_QAT_C62XVF is not set @@ -6741,6 +6779,7 @@ CONFIG_SG_POOL=y CONFIG_ARCH_HAS_PMEM_API=y CONFIG_MEMREGION=y CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_ARCH_HAS_COPY_MC=y CONFIG_ARCH_STACKWALK=y CONFIG_STACKDEPOT=y CONFIG_SBITMAP=y @@ -6958,8 +6997,6 @@ CONFIG_DEBUG_LIST=y # CONFIG_DEBUG_MAPLE_TREE is not set # end of Debug kernel data structures -# CONFIG_DEBUG_CREDENTIALS is not set - # # RCU Debugging # diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index 2619e84b4914..b62162d8c262 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/x86 6.6.7 Kernel Configuration +# Linux/x86 6.6.25 Kernel Configuration # CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" CONFIG_CC_IS_GCC=y @@ -190,7 +190,7 @@ CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y CONFIG_CC_HAS_INT128=y CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" -CONFIG_GCC11_NO_ARRAY_BOUNDS=y +CONFIG_GCC10_NO_ARRAY_BOUNDS=y CONFIG_CC_NO_ARRAY_BOUNDS=y CONFIG_ARCH_SUPPORTS_INT128=y CONFIG_NUMA_BALANCING=y @@ -454,7 +454,6 @@ CONFIG_X86_DIRECT_GBPAGES=y CONFIG_X86_CPA_STATISTICS=y CONFIG_X86_MEM_ENCRYPT=y CONFIG_AMD_MEM_ENCRYPT=y -# CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set CONFIG_NUMA=y CONFIG_AMD_NUMA=y CONFIG_X86_64_ACPI_NUMA=y @@ -549,6 +548,7 @@ CONFIG_CPU_IBRS_ENTRY=y CONFIG_CPU_SRSO=y # CONFIG_SLS is not set # CONFIG_GDS_FORCE_MITIGATION is not set +CONFIG_MITIGATION_RFDS=y CONFIG_ARCH_HAS_ADD_PAGES=y # @@ -882,6 +882,7 @@ CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y CONFIG_STRICT_KERNEL_RWX=y CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y CONFIG_STRICT_MODULE_RWX=y +CONFIG_ARCH_HAS_CPU_RESCTRL=y CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y CONFIG_ARCH_USE_MEMREMAP_PROT=y CONFIG_LOCK_EVENT_COUNTS=y @@ -1869,7 +1870,6 @@ CONFIG_BT_BNEP_MC_FILTER=y CONFIG_BT_BNEP_PROTO_FILTER=y CONFIG_BT_CMTP=m CONFIG_BT_HIDP=m -CONFIG_BT_HS=y CONFIG_BT_LE=y CONFIG_BT_LE_L2CAP_ECRED=y # CONFIG_BT_6LOWPAN is not set @@ -2415,10 +2415,6 @@ CONFIG_CB710_DEBUG_ASSUMPTIONS=y # end of Texas Instruments shared transport line discipline CONFIG_SENSORS_LIS3_I2C=m - -# -# Altera FPGA firmware download module (requires I2C) -# CONFIG_ALTERA_STAPL=m CONFIG_INTEL_MEI=m CONFIG_INTEL_MEI_ME=m @@ -3784,6 +3780,8 @@ CONFIG_TCG_INFINEON=m # CONFIG_TCG_XEN is not set CONFIG_TCG_CRB=y # CONFIG_TCG_VTPM_PROXY is not set +CONFIG_TCG_HYGON=m +CONFIG_TCM_HYGON=m CONFIG_TCG_TIS_ST33ZP24=m CONFIG_TCG_TIS_ST33ZP24_I2C=m # CONFIG_TCG_TIS_ST33ZP24_SPI is not set @@ -3795,7 +3793,8 @@ CONFIG_TELCLOCK=m # # I2C support # -CONFIG_I2C=m +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y CONFIG_I2C_BOARDINFO=y CONFIG_I2C_COMPAT=y CONFIG_I2C_CHARDEV=m @@ -3922,7 +3921,6 @@ CONFIG_SPI_MASTER=y # CONFIG_SPI_MXIC is not set # CONFIG_SPI_XCOMM is not set # CONFIG_SPI_XILINX is not set -# CONFIG_SPI_ZYNQMP_GQSPI is not set # CONFIG_SPI_AMD is not set # @@ -3978,6 +3976,7 @@ CONFIG_GENERIC_PINCONF=y # CONFIG_PINCTRL_AMD is not set # CONFIG_PINCTRL_CY8C95X0 is not set # CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_SX150X is not set # # Intel pinctrl drivers @@ -4514,13 +4513,19 @@ CONFIG_BCMA_DRIVER_GPIO=y # Multifunction device drivers # CONFIG_MFD_CORE=y +# CONFIG_MFD_AS3711 is not set # CONFIG_MFD_SMPRO is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set # CONFIG_MFD_BCM590XX is not set # CONFIG_MFD_BD9571MWV is not set # CONFIG_MFD_AXP20X_I2C is not set # CONFIG_MFD_CS42L43_I2C is not set # CONFIG_MFD_MADERA is not set +# CONFIG_PMIC_DA903X is not set # CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set # CONFIG_MFD_DA9062 is not set # CONFIG_MFD_DA9063 is not set # CONFIG_MFD_DA9150 is not set @@ -4540,9 +4545,15 @@ CONFIG_MFD_INTEL_LPSS_PCI=m # CONFIG_MFD_KEMPLD is not set # CONFIG_MFD_88PM800 is not set # CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set # CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77541 is not set # CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77843 is not set # CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set # CONFIG_MFD_MT6360 is not set # CONFIG_MFD_MT6370 is not set # CONFIG_MFD_MT6397 is not set @@ -4557,30 +4568,40 @@ CONFIG_MFD_VIPERBOARD=m # CONFIG_MFD_RT4831 is not set # CONFIG_MFD_RT5033 is not set # CONFIG_MFD_RT5120 is not set +# CONFIG_MFD_RC5T583 is not set # CONFIG_MFD_SI476X_CORE is not set CONFIG_MFD_SM501=m CONFIG_MFD_SM501_GPIO=y # CONFIG_MFD_SKY81452 is not set # CONFIG_MFD_SYSCON is not set -# CONFIG_MFD_TI_AM335X_TSCADC is not set # CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set # CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set # CONFIG_TPS6105X is not set # CONFIG_TPS65010 is not set # CONFIG_TPS6507X is not set # CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set # CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set # CONFIG_MFD_TPS65912_I2C is not set # CONFIG_MFD_TPS65912_SPI is not set # CONFIG_MFD_TPS6594_I2C is not set # CONFIG_MFD_TPS6594_SPI is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set # CONFIG_MFD_WL1273_CORE is not set # CONFIG_MFD_LM3533 is not set # CONFIG_MFD_TQMX86 is not set CONFIG_MFD_VX855=m # CONFIG_MFD_ARIZONA_I2C is not set # CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set # CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set # CONFIG_MFD_WM8994 is not set # CONFIG_MFD_ATC260X_I2C is not set # CONFIG_MFD_INTEL_M10_BMC_SPI is not set @@ -4821,6 +4842,7 @@ CONFIG_DRM_CIRRUS_QEMU=m # CONFIG_DRM_LEGACY is not set CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y CONFIG_DRM_PRIVACY_SCREEN=y +# CONFIG_HYDCU_FIXUP_HEADER is not set # # Frame buffer Devices @@ -5621,7 +5643,7 @@ CONFIG_RTC_DRV_EM3027=m # CONFIG_RTC_DRV_MAX6902 is not set # CONFIG_RTC_DRV_PCF2123 is not set # CONFIG_RTC_DRV_MCP795 is not set -CONFIG_RTC_I2C_AND_SPI=m +CONFIG_RTC_I2C_AND_SPI=y # # SPI and I2C RTC drivers @@ -5764,7 +5786,7 @@ CONFIG_VIRTIO_PMEM=m CONFIG_VIRTIO_BALLOON=m CONFIG_VIRTIO_MEM=m CONFIG_VIRTIO_INPUT=m -CONFIG_VIRTIO_MMIO=y +CONFIG_VIRTIO_MMIO=m CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y CONFIG_VIRTIO_DMA_SHARED_BUFFER=m # CONFIG_VDPA is not set @@ -6919,6 +6941,8 @@ CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 # CONFIG_HPFS_FS is not set # CONFIG_QNX4FS_FS is not set # CONFIG_QNX6FS_FS is not set +CONFIG_RESCTRL_FS=y +CONFIG_RESCTRL_FS_PSEUDO_LOCK=y # CONFIG_ROMFS_FS is not set CONFIG_PSTORE=y CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 @@ -7430,12 +7454,10 @@ CONFIG_CRYPTO_DEV_CCP_DD=m CONFIG_CRYPTO_DEV_SP_CCP=y CONFIG_CRYPTO_DEV_CCP_CRYPTO=m CONFIG_CRYPTO_DEV_SP_PSP=y +CONFIG_HYGON_GM=y CONFIG_HYGON_PSP2CPU_CMD=y -CONFIG_TCG_HYGON=m -CONFIG_TCM_HYGON=m CONFIG_TDM_DEV_HYGON=y CONFIG_TDM_KERNEL_GUARD=m -CONFIG_HYGON_GM=y # CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set CONFIG_CRYPTO_DEV_NITROX=m CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m @@ -7906,8 +7928,6 @@ CONFIG_DEBUG_NOTIFIERS=y # CONFIG_DEBUG_MAPLE_TREE is not set # end of Debug kernel data structures -CONFIG_DEBUG_CREDENTIALS=y - # # RCU Debugging # diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index ee0afda89125..52662a6020b9 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/x86 6.6.7 Kernel Configuration +# Linux/x86 6.6.25 Kernel Configuration # CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" CONFIG_CC_IS_GCC=y @@ -189,7 +189,7 @@ CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y CONFIG_CC_HAS_INT128=y CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" -CONFIG_GCC11_NO_ARRAY_BOUNDS=y +CONFIG_GCC10_NO_ARRAY_BOUNDS=y CONFIG_CC_NO_ARRAY_BOUNDS=y CONFIG_ARCH_SUPPORTS_INT128=y CONFIG_NUMA_BALANCING=y @@ -451,7 +451,6 @@ CONFIG_X86_DIRECT_GBPAGES=y CONFIG_X86_CPA_STATISTICS=y CONFIG_X86_MEM_ENCRYPT=y CONFIG_AMD_MEM_ENCRYPT=y -# CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set CONFIG_NUMA=y CONFIG_AMD_NUMA=y CONFIG_X86_64_ACPI_NUMA=y @@ -546,6 +545,7 @@ CONFIG_CPU_IBRS_ENTRY=y CONFIG_CPU_SRSO=y # CONFIG_SLS is not set # CONFIG_GDS_FORCE_MITIGATION is not set +CONFIG_MITIGATION_RFDS=y CONFIG_ARCH_HAS_ADD_PAGES=y # @@ -877,6 +877,7 @@ CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y CONFIG_STRICT_KERNEL_RWX=y CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y CONFIG_STRICT_MODULE_RWX=y +CONFIG_ARCH_HAS_CPU_RESCTRL=y CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y CONFIG_ARCH_USE_MEMREMAP_PROT=y # CONFIG_LOCK_EVENT_COUNTS is not set @@ -1864,7 +1865,6 @@ CONFIG_BT_BNEP_MC_FILTER=y CONFIG_BT_BNEP_PROTO_FILTER=y CONFIG_BT_CMTP=m CONFIG_BT_HIDP=m -CONFIG_BT_HS=y CONFIG_BT_LE=y CONFIG_BT_LE_L2CAP_ECRED=y # CONFIG_BT_6LOWPAN is not set @@ -2410,10 +2410,6 @@ CONFIG_CB710_DEBUG_ASSUMPTIONS=y # end of Texas Instruments shared transport line discipline CONFIG_SENSORS_LIS3_I2C=m - -# -# Altera FPGA firmware download module (requires I2C) -# CONFIG_ALTERA_STAPL=m CONFIG_INTEL_MEI=m CONFIG_INTEL_MEI_ME=m @@ -3778,6 +3774,8 @@ CONFIG_TCG_INFINEON=m # CONFIG_TCG_XEN is not set CONFIG_TCG_CRB=y # CONFIG_TCG_VTPM_PROXY is not set +CONFIG_TCG_HYGON=m +CONFIG_TCM_HYGON=m CONFIG_TCG_TIS_ST33ZP24=m CONFIG_TCG_TIS_ST33ZP24_I2C=m # CONFIG_TCG_TIS_ST33ZP24_SPI is not set @@ -3789,7 +3787,8 @@ CONFIG_TELCLOCK=m # # I2C support # -CONFIG_I2C=m +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y CONFIG_I2C_BOARDINFO=y CONFIG_I2C_COMPAT=y CONFIG_I2C_CHARDEV=m @@ -3916,7 +3915,6 @@ CONFIG_SPI_MASTER=y # CONFIG_SPI_MXIC is not set # CONFIG_SPI_XCOMM is not set # CONFIG_SPI_XILINX is not set -# CONFIG_SPI_ZYNQMP_GQSPI is not set # CONFIG_SPI_AMD is not set # @@ -3972,6 +3970,7 @@ CONFIG_GENERIC_PINCONF=y # CONFIG_PINCTRL_AMD is not set # CONFIG_PINCTRL_CY8C95X0 is not set # CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_SX150X is not set # # Intel pinctrl drivers @@ -4508,13 +4507,19 @@ CONFIG_BCMA_DRIVER_GPIO=y # Multifunction device drivers # CONFIG_MFD_CORE=y +# CONFIG_MFD_AS3711 is not set # CONFIG_MFD_SMPRO is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set # CONFIG_MFD_BCM590XX is not set # CONFIG_MFD_BD9571MWV is not set # CONFIG_MFD_AXP20X_I2C is not set # CONFIG_MFD_CS42L43_I2C is not set # CONFIG_MFD_MADERA is not set +# CONFIG_PMIC_DA903X is not set # CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set # CONFIG_MFD_DA9062 is not set # CONFIG_MFD_DA9063 is not set # CONFIG_MFD_DA9150 is not set @@ -4534,9 +4539,15 @@ CONFIG_MFD_INTEL_LPSS_PCI=m # CONFIG_MFD_KEMPLD is not set # CONFIG_MFD_88PM800 is not set # CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set # CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77541 is not set # CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77843 is not set # CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set # CONFIG_MFD_MT6360 is not set # CONFIG_MFD_MT6370 is not set # CONFIG_MFD_MT6397 is not set @@ -4551,30 +4562,40 @@ CONFIG_MFD_VIPERBOARD=m # CONFIG_MFD_RT4831 is not set # CONFIG_MFD_RT5033 is not set # CONFIG_MFD_RT5120 is not set +# CONFIG_MFD_RC5T583 is not set # CONFIG_MFD_SI476X_CORE is not set CONFIG_MFD_SM501=m CONFIG_MFD_SM501_GPIO=y # CONFIG_MFD_SKY81452 is not set # CONFIG_MFD_SYSCON is not set -# CONFIG_MFD_TI_AM335X_TSCADC is not set # CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set # CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set # CONFIG_TPS6105X is not set # CONFIG_TPS65010 is not set # CONFIG_TPS6507X is not set # CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set # CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set # CONFIG_MFD_TPS65912_I2C is not set # CONFIG_MFD_TPS65912_SPI is not set # CONFIG_MFD_TPS6594_I2C is not set # CONFIG_MFD_TPS6594_SPI is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set # CONFIG_MFD_WL1273_CORE is not set # CONFIG_MFD_LM3533 is not set # CONFIG_MFD_TQMX86 is not set CONFIG_MFD_VX855=m # CONFIG_MFD_ARIZONA_I2C is not set # CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set # CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set # CONFIG_MFD_WM8994 is not set # CONFIG_MFD_ATC260X_I2C is not set # CONFIG_MFD_INTEL_M10_BMC_SPI is not set @@ -4815,6 +4836,7 @@ CONFIG_DRM_CIRRUS_QEMU=m # CONFIG_DRM_LEGACY is not set CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y CONFIG_DRM_PRIVACY_SCREEN=y +# CONFIG_HYDCU_FIXUP_HEADER is not set # # Frame buffer Devices @@ -5615,7 +5637,7 @@ CONFIG_RTC_DRV_EM3027=m # CONFIG_RTC_DRV_MAX6902 is not set # CONFIG_RTC_DRV_PCF2123 is not set # CONFIG_RTC_DRV_MCP795 is not set -CONFIG_RTC_I2C_AND_SPI=m +CONFIG_RTC_I2C_AND_SPI=y # # SPI and I2C RTC drivers @@ -5757,7 +5779,7 @@ CONFIG_VIRTIO_PMEM=m CONFIG_VIRTIO_BALLOON=m CONFIG_VIRTIO_MEM=m CONFIG_VIRTIO_INPUT=m -CONFIG_VIRTIO_MMIO=y +CONFIG_VIRTIO_MMIO=m CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y CONFIG_VIRTIO_DMA_SHARED_BUFFER=m # CONFIG_VDPA is not set @@ -6908,6 +6930,8 @@ CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 # CONFIG_HPFS_FS is not set # CONFIG_QNX4FS_FS is not set # CONFIG_QNX6FS_FS is not set +CONFIG_RESCTRL_FS=y +CONFIG_RESCTRL_FS_PSEUDO_LOCK=y # CONFIG_ROMFS_FS is not set CONFIG_PSTORE=y CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 @@ -7421,12 +7445,10 @@ CONFIG_CRYPTO_DEV_CCP_DD=m CONFIG_CRYPTO_DEV_SP_CCP=y CONFIG_CRYPTO_DEV_CCP_CRYPTO=m CONFIG_CRYPTO_DEV_SP_PSP=y +CONFIG_HYGON_GM=y CONFIG_HYGON_PSP2CPU_CMD=y -CONFIG_TCG_HYGON=m -CONFIG_TCM_HYGON=m CONFIG_TDM_DEV_HYGON=y CONFIG_TDM_KERNEL_GUARD=m -CONFIG_HYGON_GM=y # CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set CONFIG_CRYPTO_DEV_NITROX=m CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m @@ -7850,8 +7872,6 @@ CONFIG_DEBUG_LIST=y # CONFIG_DEBUG_MAPLE_TREE is not set # end of Debug kernel data structures -# CONFIG_DEBUG_CREDENTIALS is not set - # # RCU Debugging # -- Gitee From e9fc054739b620c9e599f66c1e400dc954f6bea3 Mon Sep 17 00:00:00 2001 From: wangkaiyuan Date: Tue, 12 Mar 2024 17:33:30 +0800 Subject: [PATCH 0880/2138] anolis: drm/inspur/inspur-drm: Add inspur drm driver Add Inspur DRM driver for Inspur BMC SoC. ANBZ: #8520 Signed-off-by: wangkaiyuan Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/3046 --- arch/arm64/configs/anolis-debug_defconfig | 1 + arch/arm64/configs/anolis_defconfig | 1 + arch/loongarch/configs/anolis-debug_defconfig | 1 + arch/loongarch/configs/anolis_defconfig | 1 + arch/x86/configs/anolis-debug_defconfig | 1 + arch/x86/configs/anolis_defconfig | 1 + drivers/gpu/drm/Kconfig | 2 + drivers/gpu/drm/Makefile | 1 + drivers/gpu/drm/inspur/Kconfig | 5 + drivers/gpu/drm/inspur/Makefile | 4 + drivers/gpu/drm/inspur/inspur-drm/Kconfig | 8 + drivers/gpu/drm/inspur/inspur-drm/Makefile | 3 + .../gpu/drm/inspur/inspur-drm/inspur_drm_de.c | 484 ++++++++++++++++++ .../drm/inspur/inspur-drm/inspur_drm_drv.c | 404 +++++++++++++++ .../drm/inspur/inspur-drm/inspur_drm_drv.h | 86 ++++ .../drm/inspur/inspur-drm/inspur_drm_regs.h | 209 ++++++++ .../drm/inspur/inspur-drm/inspur_drm_vdac.c | 105 ++++ .../gpu/drm/inspur/inspur-drm/inspur_ttm.c | 19 + 18 files changed, 1336 insertions(+) create mode 100644 drivers/gpu/drm/inspur/Kconfig create mode 100644 drivers/gpu/drm/inspur/Makefile create mode 100644 drivers/gpu/drm/inspur/inspur-drm/Kconfig create mode 100644 drivers/gpu/drm/inspur/inspur-drm/Makefile create mode 100644 drivers/gpu/drm/inspur/inspur-drm/inspur_drm_de.c create mode 100644 drivers/gpu/drm/inspur/inspur-drm/inspur_drm_drv.c create mode 100644 drivers/gpu/drm/inspur/inspur-drm/inspur_drm_drv.h create mode 100644 drivers/gpu/drm/inspur/inspur-drm/inspur_drm_regs.h create mode 100644 drivers/gpu/drm/inspur/inspur-drm/inspur_drm_vdac.c create mode 100644 drivers/gpu/drm/inspur/inspur-drm/inspur_ttm.c diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index 13db02910f8b..aeb6bc8aa483 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -4477,6 +4477,7 @@ CONFIG_DRM_PHYTIUM=m # CONFIG_DRM_LEGACY is not set CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y # CONFIG_HYDCU_FIXUP_HEADER is not set +CONFIG_DRM_INSPUR=m # # Frame buffer Devices diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index 0c4270891c76..2940c4369081 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -4474,6 +4474,7 @@ CONFIG_DRM_PHYTIUM=m # CONFIG_DRM_LEGACY is not set CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y # CONFIG_HYDCU_FIXUP_HEADER is not set +CONFIG_DRM_INSPUR=m # # Frame buffer Devices diff --git a/arch/loongarch/configs/anolis-debug_defconfig b/arch/loongarch/configs/anolis-debug_defconfig index 365f27c124b4..e13de1212610 100644 --- a/arch/loongarch/configs/anolis-debug_defconfig +++ b/arch/loongarch/configs/anolis-debug_defconfig @@ -5345,6 +5345,7 @@ CONFIG_DRM_CIRRUS_QEMU=m # CONFIG_DRM_LEGACY is not set CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y # CONFIG_HYDCU_FIXUP_HEADER is not set +CONFIG_DRM_INSPUR=m # # Frame buffer Devices diff --git a/arch/loongarch/configs/anolis_defconfig b/arch/loongarch/configs/anolis_defconfig index 365f27c124b4..e13de1212610 100644 --- a/arch/loongarch/configs/anolis_defconfig +++ b/arch/loongarch/configs/anolis_defconfig @@ -5345,6 +5345,7 @@ CONFIG_DRM_CIRRUS_QEMU=m # CONFIG_DRM_LEGACY is not set CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y # CONFIG_HYDCU_FIXUP_HEADER is not set +CONFIG_DRM_INSPUR=m # # Frame buffer Devices diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index b62162d8c262..ff275d4fdbc1 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -4843,6 +4843,7 @@ CONFIG_DRM_CIRRUS_QEMU=m CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y CONFIG_DRM_PRIVACY_SCREEN=y # CONFIG_HYDCU_FIXUP_HEADER is not set +CONFIG_DRM_INSPUR=m # # Frame buffer Devices diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 52662a6020b9..753475d33452 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -4837,6 +4837,7 @@ CONFIG_DRM_CIRRUS_QEMU=m CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y CONFIG_DRM_PRIVACY_SCREEN=y # CONFIG_HYDCU_FIXUP_HEADER is not set +CONFIG_DRM_INSPUR=m # # Frame buffer Devices diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 2a89adbbf9fa..1c17d051c98f 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -390,6 +390,8 @@ source "drivers/gpu/drm/sprd/Kconfig" source "drivers/gpu/drm/phytium/Kconfig" +source "drivers/gpu/drm/inspur/Kconfig" + config DRM_HYPERV tristate "DRM Support for Hyper-V synthetic video device" depends on DRM && PCI && MMU && HYPERV diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 017ff5a6ebe2..68d2eefe2c25 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -200,3 +200,4 @@ obj-$(CONFIG_DRM_SPRD) += sprd/ obj-$(CONFIG_DRM_LOONGSON) += loongson/ obj-$(CONFIG_HYDCU_FIXUP_HEADER) += hygon/hydcu-fixup-header/ obj-$(CONFIG_DRM_PHYTIUM) += phytium/ +obj-$(CONFIG_DRM_INSPUR) += inspur/ diff --git a/drivers/gpu/drm/inspur/Kconfig b/drivers/gpu/drm/inspur/Kconfig new file mode 100644 index 000000000000..9ee949fc6936 --- /dev/null +++ b/drivers/gpu/drm/inspur/Kconfig @@ -0,0 +1,5 @@ +# License: GPL-2.0 +# +# inspur drm device configuration. + +source "drivers/gpu/drm/inspur/inspur-drm/Kconfig" diff --git a/drivers/gpu/drm/inspur/Makefile b/drivers/gpu/drm/inspur/Makefile new file mode 100644 index 000000000000..9fd0eb7a1035 --- /dev/null +++ b/drivers/gpu/drm/inspur/Makefile @@ -0,0 +1,4 @@ +# +# Makefile for inspur drm drivers. + +obj-$(CONFIG_DRM_INSPUR) += inspur-drm/ diff --git a/drivers/gpu/drm/inspur/inspur-drm/Kconfig b/drivers/gpu/drm/inspur/inspur-drm/Kconfig new file mode 100644 index 000000000000..c060825d6116 --- /dev/null +++ b/drivers/gpu/drm/inspur/inspur-drm/Kconfig @@ -0,0 +1,8 @@ +config DRM_INSPUR + tristate "DRM Support for Inspur BMC" + depends on DRM && PCI && MMU + select DRM_KMS_HELPER + select DRM_VRAM_HELPER + help + Choose this option if you have a Inspur soc chipset.If M is selected the + module will be called inspur - drm. diff --git a/drivers/gpu/drm/inspur/inspur-drm/Makefile b/drivers/gpu/drm/inspur/inspur-drm/Makefile new file mode 100644 index 000000000000..be54bb9e51d0 --- /dev/null +++ b/drivers/gpu/drm/inspur/inspur-drm/Makefile @@ -0,0 +1,3 @@ +inspur-drm-y := inspur_drm_drv.o inspur_drm_de.o inspur_drm_vdac.o inspur_ttm.o + +obj-$(CONFIG_DRM_INSPUR) += inspur-drm.o diff --git a/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_de.c b/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_de.c new file mode 100644 index 000000000000..fae1014e5d59 --- /dev/null +++ b/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_de.c @@ -0,0 +1,484 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include +#include +#include +#include + +#include +#include + + +#include "inspur_drm_drv.h" +#include "inspur_drm_regs.h" + +struct inspur_dislay_pll_config { + unsigned long hdisplay; + unsigned long vdisplay; + u32 pll1_config_value; + u32 pll2_config_value; +}; + +static const struct inspur_dislay_pll_config inspur_pll_table[] = { + { 640, 480, CRT_PLL1_NS_25MHZ, CRT_PLL2_NS_25MHZ }, + { 800, 600, CRT_PLL1_NS_40MHZ, CRT_PLL2_NS_40MHZ }, + { 1024, 768, CRT_PLL1_NS_65MHZ, CRT_PLL2_NS_65MHZ }, + { 1280, 1024, CRT_PLL1_NS_108MHZ, CRT_PLL2_NS_108MHZ }, + { 1920, 1080, CRT_PLL1_NS_148MHZ, CRT_PLL2_NS_148MHZ }, +}; + +#define PADDING(align, data) (((data) + (align) - 1) & (~((align) - 1))) + +static int inspur_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *atom_state) +{ + struct drm_plane_state *state = drm_atomic_get_new_plane_state(atom_state, plane); + struct drm_framebuffer *fb = state->fb; + struct drm_crtc *crtc = state->crtc; + struct drm_crtc_state *crtc_state; + u32 src_w = state->src_w >> 16; + u32 src_h = state->src_h >> 16; + + if (!crtc || !fb) + return 0; + + crtc_state = drm_atomic_get_crtc_state(state->state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + if (src_w != state->crtc_w || src_h != state->crtc_h) { + DRM_DEBUG_ATOMIC("scale not support\n"); + return -EINVAL; + } + + if (state->crtc_x < 0 || state->crtc_y < 0) { + DRM_DEBUG_ATOMIC("crtc_x/y of drm_plane state is invalid\n"); + return -EINVAL; + } + + if (!crtc_state->enable) + return 0; + + if (state->crtc_x + state->crtc_w > + crtc_state->adjusted_mode.hdisplay || + state->crtc_y + state->crtc_h > + crtc_state->adjusted_mode.vdisplay) { + DRM_DEBUG_ATOMIC("visible portion of plane is invalid\n"); + return -EINVAL; + } + + if (state->fb->pitches[0] % 16 != 0) { + DRM_DEBUG_ATOMIC("wrong stride with 16-byte aligned\n"); + return -EINVAL; + } + + return 0; +} + +static void inspur_plane_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *old_state) +{ + struct drm_plane_state *state = plane->state; + u32 reg; + int ret; + s64 gpu_addr = 0; + unsigned int line_l; + struct inspur_drm_private *priv = plane->dev->dev_private; + struct drm_gem_vram_object *gbo; + + if (!state->fb) + return; + + gbo = drm_gem_vram_of_gem(state->fb->obj[0]); + + ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM); + if (ret) { + DRM_ERROR("failed to pin bo: %d", ret); + return; + } + gpu_addr = drm_gem_vram_offset(gbo); + if (gpu_addr < 0) { + drm_gem_vram_unpin(gbo); + return; + } + + writel(gpu_addr, priv->mmio + INSPUR_CRT_FB_ADDRESS); + + reg = state->fb->width * (state->fb->format->cpp[0]); + + line_l = state->fb->pitches[0]; + writel(INSPUR_FIELD(INSPUR_CRT_FB_WIDTH_WIDTH, reg) | + INSPUR_FIELD(INSPUR_CRT_FB_WIDTH_OFFS, line_l), + priv->mmio + INSPUR_CRT_FB_WIDTH); + + /* SET PIXEL FORMAT */ + reg = readl(priv->mmio + INSPUR_CRT_DISP_CTL); + reg &= ~INSPUR_CRT_DISP_CTL_FORMAT_MASK; + reg |= INSPUR_FIELD(INSPUR_CRT_DISP_CTL_FORMAT, + state->fb->format->cpp[0] * 8 / 16); + writel(reg, priv->mmio + INSPUR_CRT_DISP_CTL); +} + +static const u32 channel_formats1[] = { + DRM_FORMAT_RGB565, DRM_FORMAT_BGR565, DRM_FORMAT_RGB888, + DRM_FORMAT_BGR888, DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGBA8888, DRM_FORMAT_BGRA8888, DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888 +}; + +static struct drm_plane_funcs inspur_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_plane_cleanup, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, +}; + +static const struct drm_plane_helper_funcs inspur_plane_helper_funcs = { + .atomic_check = inspur_plane_atomic_check, + .atomic_update = inspur_plane_atomic_update, +}; + +static struct drm_plane *inspur_plane_init(struct inspur_drm_private *priv) +{ + struct drm_device *dev = priv->dev; + struct drm_plane *plane; + int ret = 0; + + plane = devm_kzalloc(dev->dev, sizeof(*plane), GFP_KERNEL); + if (!plane) { + DRM_ERROR("failed to alloc memory when init plane\n"); + return ERR_PTR(-ENOMEM); + } + ret = drm_universal_plane_init(dev, plane, 1, &inspur_plane_funcs, + channel_formats1, + ARRAY_SIZE(channel_formats1), + NULL, DRM_PLANE_TYPE_PRIMARY, NULL); + if (ret) { + DRM_ERROR("failed to init plane: %d\n", ret); + return ERR_PTR(ret); + } + + drm_plane_helper_add(plane, &inspur_plane_helper_funcs); + return plane; +} + +static void inspur_crtc_dpms(struct drm_crtc *crtc, int dpms) +{ + struct inspur_drm_private *priv = crtc->dev->dev_private; + unsigned int reg; + + reg = readl(priv->mmio + INSPUR_CRT_DISP_CTL); + reg &= ~INSPUR_CRT_DISP_CTL_DPMS_MASK; + reg |= INSPUR_FIELD(INSPUR_CRT_DISP_CTL_DPMS, dpms); + reg &= ~INSPUR_CRT_DISP_CTL_TIMING_MASK; + if (dpms == INSPUR_CRT_DPMS_ON) + reg |= INSPUR_CRT_DISP_CTL_TIMING(1); + writel(reg, priv->mmio + INSPUR_CRT_DISP_CTL); +} + +static void inspur_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *old_state) +{ + unsigned int reg; + struct inspur_drm_private *priv = crtc->dev->dev_private; + + inspur_set_power_mode(priv, INSPUR_PW_MODE_CTL_MODE_MODE0); + + /* Enable display power gate & LOCALMEM power gate */ + reg = readl(priv->mmio + INSPUR_CURRENT_GATE); + reg &= ~INSPUR_CURR_GATE_LOCALMEM_MASK; + reg &= ~INSPUR_CURR_GATE_DISPLAY_MASK; + reg |= INSPUR_CURR_GATE_LOCALMEM(1); + reg |= INSPUR_CURR_GATE_DISPLAY(1); + inspur_set_current_gate(priv, reg); + inspur_crtc_dpms(crtc, INSPUR_CRT_DPMS_ON); +} + +static void inspur_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *old_state) +{ + unsigned int reg; + struct inspur_drm_private *priv = crtc->dev->dev_private; + + inspur_crtc_dpms(crtc, INSPUR_CRT_DPMS_OFF); + + inspur_set_power_mode(priv, INSPUR_PW_MODE_CTL_MODE_SLEEP); + + /* Enable display power gate & LOCALMEM power gate */ + reg = readl(priv->mmio + INSPUR_CURRENT_GATE); + reg &= ~INSPUR_CURR_GATE_LOCALMEM_MASK; + reg &= ~INSPUR_CURR_GATE_DISPLAY_MASK; + reg |= INSPUR_CURR_GATE_LOCALMEM(0); + reg |= INSPUR_CURR_GATE_DISPLAY(0); + inspur_set_current_gate(priv, reg); +} + +static enum drm_mode_status +inspur_crtc_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode) +{ + int i = 0; + int vrefresh = drm_mode_vrefresh(mode); + + if (vrefresh < 59 || vrefresh > 61) + return MODE_NOCLOCK; + + for (i = 0; i < ARRAY_SIZE(inspur_pll_table); i++) { + if (inspur_pll_table[i].hdisplay == mode->hdisplay && + inspur_pll_table[i].vdisplay == mode->vdisplay) + return MODE_OK; + } + + return MODE_BAD; +} + +static void set_vclock_inspur(struct drm_device *dev, unsigned long pll) +{ + u32 val; + struct inspur_drm_private *priv = dev->dev_private; + + val = readl(priv->mmio + CRT_PLL1_NS); + val &= ~(CRT_PLL1_NS_OUTER_BYPASS(1)); + writel(val, priv->mmio + CRT_PLL1_NS); + + val = CRT_PLL1_NS_INTER_BYPASS(1) | CRT_PLL1_NS_POWERON(1); + writel(val, priv->mmio + CRT_PLL1_NS); + + writel(pll, priv->mmio + CRT_PLL1_NS); + + usleep_range(1000, 2000); + + val = pll & ~(CRT_PLL1_NS_POWERON(1)); + writel(val, priv->mmio + CRT_PLL1_NS); + + usleep_range(1000, 2000); + + val &= ~(CRT_PLL1_NS_INTER_BYPASS(1)); + writel(val, priv->mmio + CRT_PLL1_NS); + + usleep_range(1000, 2000); + + val |= CRT_PLL1_NS_OUTER_BYPASS(1); + writel(val, priv->mmio + CRT_PLL1_NS); +} + +static void get_pll_config(unsigned long x, unsigned long y, + u32 *pll1, u32 *pll2) +{ + int i; + int count = ARRAY_SIZE(inspur_pll_table); + + for (i = 0; i < count; i++) { + if (inspur_pll_table[i].hdisplay == x && + inspur_pll_table[i].vdisplay == y) { + *pll1 = inspur_pll_table[i].pll1_config_value; + *pll2 = inspur_pll_table[i].pll2_config_value; + return; + } + } + + /* if found none, we use default value */ + *pll1 = CRT_PLL1_NS_25MHZ; + *pll2 = CRT_PLL2_NS_25MHZ; +} + +/* + * This function takes care the extra registers and bit fields required to + * setup a mode in board. + * Explanation about Display Control register: + * FPGA only supports 7 predefined pixel clocks, and clock select is + * in bit 4:0 of new register 0x802a8. + */ +static unsigned int display_ctrl_adjust(struct drm_device *dev, + struct drm_display_mode *mode, + unsigned int ctrl) +{ + unsigned long x, y; + u32 pll1; /* bit[31:0] of PLL */ + u32 pll2; /* bit[63:32] of PLL */ + struct inspur_drm_private *priv = dev->dev_private; + + x = mode->hdisplay; + y = mode->vdisplay; + + get_pll_config(x, y, &pll1, &pll2); + writel(pll2, priv->mmio + CRT_PLL2_NS); + set_vclock_inspur(dev, pll1); + + /* + * inspur has to set up the top-left and bottom-right + * registers as well. + * Note that normal chip only use those two register for + * auto-centering mode. + */ + writel(INSPUR_FIELD(INSPUR_CRT_AUTO_CENTERING_TL_TOP, 0) | + INSPUR_FIELD(INSPUR_CRT_AUTO_CENTERING_TL_LEFT, 0), + priv->mmio + INSPUR_CRT_AUTO_CENTERING_TL); + + writel(INSPUR_FIELD(INSPUR_CRT_AUTO_CENTERING_BR_BOTTOM, y - 1) | + INSPUR_FIELD(INSPUR_CRT_AUTO_CENTERING_BR_RIGHT, x - 1), + priv->mmio + INSPUR_CRT_AUTO_CENTERING_BR); + + /* + * Assume common fields in ctrl have been properly set before + * calling this function. + * This function only sets the extra fields in ctrl. + */ + + /* Set bit 25 of display controller: Select CRT or VGA clock */ + ctrl &= ~INSPUR_CRT_DISP_CTL_CRTSELECT_MASK; + ctrl &= ~INSPUR_CRT_DISP_CTL_CLOCK_PHASE_MASK; + + ctrl |= INSPUR_CRT_DISP_CTL_CRTSELECT(INSPUR_CRTSELECT_CRT); + + /* clock_phase_polarity is 0 */ + ctrl |= INSPUR_CRT_DISP_CTL_CLOCK_PHASE(0); + + writel(ctrl, priv->mmio + INSPUR_CRT_DISP_CTL); + + return ctrl; +} + +static void inspur_crtc_mode_set_nofb(struct drm_crtc *crtc) +{ + unsigned int val; + struct drm_display_mode *mode = &crtc->state->mode; + struct drm_device *dev = crtc->dev; + struct inspur_drm_private *priv = dev->dev_private; + int width = mode->hsync_end - mode->hsync_start; + int height = mode->vsync_end - mode->vsync_start; + + //writel(format_pll_reg(), priv->mmio + INSPUR_CRT_PLL_CTRL); + writel(INSPUR_FIELD(INSPUR_CRT_HORZ_TOTAL_TOTAL, mode->htotal - 1) | + INSPUR_FIELD(INSPUR_CRT_HORZ_TOTAL_DISP_END, mode->hdisplay - 1), + priv->mmio + INSPUR_CRT_HORZ_TOTAL); + + writel(INSPUR_FIELD(INSPUR_CRT_HORZ_SYNC_WIDTH, width) | + INSPUR_FIELD(INSPUR_CRT_HORZ_SYNC_START, mode->hsync_start - 1), + priv->mmio + INSPUR_CRT_HORZ_SYNC); + + writel(INSPUR_FIELD(INSPUR_CRT_VERT_TOTAL_TOTAL, mode->vtotal - 1) | + INSPUR_FIELD(INSPUR_CRT_VERT_TOTAL_DISP_END, mode->vdisplay - 1), + priv->mmio + INSPUR_CRT_VERT_TOTAL); + + writel(INSPUR_FIELD(INSPUR_CRT_VERT_SYNC_HEIGHT, height) | + INSPUR_FIELD(INSPUR_CRT_VERT_SYNC_START, mode->vsync_start - 1), + priv->mmio + INSPUR_CRT_VERT_SYNC); + + val = INSPUR_FIELD(INSPUR_CRT_DISP_CTL_VSYNC_PHASE, 0); + val |= INSPUR_FIELD(INSPUR_CRT_DISP_CTL_HSYNC_PHASE, 0); + val |= INSPUR_CRT_DISP_CTL_TIMING(1); + val |= INSPUR_CRT_DISP_CTL_PLANE(1); + + display_ctrl_adjust(dev, mode, val); +} + +static void inspur_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_atomic_state *old_state) +{ + unsigned int reg; + struct drm_device *dev = crtc->dev; + struct inspur_drm_private *priv = dev->dev_private; + + inspur_set_power_mode(priv, INSPUR_PW_MODE_CTL_MODE_MODE0); + + /* Enable display power gate & LOCALMEM power gate */ + reg = readl(priv->mmio + INSPUR_CURRENT_GATE); + reg &= ~INSPUR_CURR_GATE_DISPLAY_MASK; + reg &= ~INSPUR_CURR_GATE_LOCALMEM_MASK; + reg |= INSPUR_CURR_GATE_DISPLAY(1); + reg |= INSPUR_CURR_GATE_LOCALMEM(1); + inspur_set_current_gate(priv, reg); + + /* We can add more initialization as needed. */ +} + +static void inspur_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *old_state) + +{ + unsigned long flags; + + spin_lock_irqsave(&crtc->dev->event_lock, flags); + if (crtc->state->event) + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); +} + +static int inspur_crtc_enable_vblank(struct drm_crtc *crtc) +{ + struct inspur_drm_private *priv = crtc->dev->dev_private; + + writel(INSPUR_RAW_INTERRUPT_EN_VBLANK(1), + priv->mmio + INSPUR_RAW_INTERRUPT_EN); + + return 0; +} + +static void inspur_crtc_disable_vblank(struct drm_crtc *crtc) +{ + struct inspur_drm_private *priv = crtc->dev->dev_private; + + writel(INSPUR_RAW_INTERRUPT_EN_VBLANK(0), + priv->mmio + INSPUR_RAW_INTERRUPT_EN); +} + +static const struct drm_crtc_funcs inspur_crtc_funcs = { + .page_flip = drm_atomic_helper_page_flip, + .set_config = drm_atomic_helper_set_config, + .destroy = drm_crtc_cleanup, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + .enable_vblank = inspur_crtc_enable_vblank, + .disable_vblank = inspur_crtc_disable_vblank, + +}; + +static const struct drm_crtc_helper_funcs inspur_crtc_helper_funcs = { + .mode_set_nofb = inspur_crtc_mode_set_nofb, + .atomic_begin = inspur_crtc_atomic_begin, + .atomic_flush = inspur_crtc_atomic_flush, + .atomic_enable = inspur_crtc_atomic_enable, + .atomic_disable = inspur_crtc_atomic_disable, + .mode_valid = inspur_crtc_mode_valid, +}; + +int inspur_de_init(struct inspur_drm_private *priv) +{ + struct drm_device *dev = priv->dev; + struct drm_crtc *crtc; + struct drm_plane *plane; + int ret; + + plane = inspur_plane_init(priv); + if (IS_ERR(plane)) { + DRM_ERROR("failed to create plane: %ld\n", PTR_ERR(plane)); + return PTR_ERR(plane); + } + + crtc = devm_kzalloc(dev->dev, sizeof(*crtc), GFP_KERNEL); + if (!crtc) { + DRM_ERROR("failed to alloc memory when init crtc\n"); + return -ENOMEM; + } + + ret = drm_crtc_init_with_planes(dev, crtc, plane, + NULL, &inspur_crtc_funcs, NULL); + if (ret) { + DRM_ERROR("failed to init crtc: %d\n", ret); + return ret; + } + + ret = drm_mode_crtc_set_gamma_size(crtc, 256); + if (ret) { + DRM_ERROR("failed to set gamma size: %d\n", ret); + return ret; + } + drm_crtc_helper_add(crtc, &inspur_crtc_helper_funcs); + + return 0; +} diff --git a/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_drv.c b/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_drv.c new file mode 100644 index 000000000000..c522ca90b00c --- /dev/null +++ b/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_drv.c @@ -0,0 +1,404 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include "inspur_drm_drv.h" +#include "inspur_drm_regs.h" + +#define MEM_SIZE_RESERVE4KVM 0x200000 + +DEFINE_DRM_GEM_FOPS(inspur_fops); +irqreturn_t inspur_drm_interrupt(int irq, void *arg) +{ + struct drm_device *dev = (struct drm_device *)arg; + struct inspur_drm_private *priv = + (struct inspur_drm_private *)dev->dev_private; + u32 status; + + status = readl(priv->mmio + INSPUR_RAW_INTERRUPT); + + if (status & INSPUR_RAW_INTERRUPT_VBLANK(1)) { + writel(INSPUR_RAW_INTERRUPT_VBLANK(1), + priv->mmio + INSPUR_RAW_INTERRUPT); + drm_handle_vblank(dev, 0); + } + + return IRQ_HANDLED; +} + +static struct drm_driver inspur_driver = { + .driver_features = DRIVER_GEM | DRIVER_MODESET | + DRIVER_ATOMIC | DRIVER_HAVE_IRQ, + + .fops = &inspur_fops, + .name = "inspur", + .date = "20240201", + .desc = "inspur drm driver", + .major = 3, + .minor = 0, + .dumb_create = inspur_dumb_create, + .dumb_map_offset = drm_gem_ttm_dumb_map_offset, +}; + +static int __maybe_unused inspur_pm_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct inspur_drm_private *priv = drm_dev->dev_private; + + drm_kms_helper_poll_disable(drm_dev); + priv->suspend_state = drm_atomic_helper_suspend(drm_dev); + if (IS_ERR(priv->suspend_state)) { + DRM_ERROR("drm_atomic_helper_suspend failed: %ld\n", + PTR_ERR(priv->suspend_state)); + drm_kms_helper_poll_enable(drm_dev); + return PTR_ERR(priv->suspend_state); + } + + return 0; +} + +static int __maybe_unused inspur_pm_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct inspur_drm_private *priv = drm_dev->dev_private; + + drm_atomic_helper_resume(drm_dev, priv->suspend_state); + drm_kms_helper_poll_enable(drm_dev); + + return 0; +} + +static const struct dev_pm_ops inspur_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(inspur_pm_suspend, + inspur_pm_resume) +}; + +static int inspur_kms_init(struct inspur_drm_private *priv) +{ + int ret; + + drm_mode_config_init(priv->dev); + priv->mode_config_initialized = true; + + priv->dev->mode_config.min_width = 0; + priv->dev->mode_config.min_height = 0; + priv->dev->mode_config.max_width = 1920; + priv->dev->mode_config.max_height = 1200; + priv->dev->mode_config.preferred_depth = 32; + priv->dev->mode_config.prefer_shadow = 1; + priv->dev->mode_config.funcs = (void *)&inspur_mode_funcs; + + ret = inspur_de_init(priv); + if (ret) { + DRM_ERROR("failed to init de: %d\n", ret); + return ret; + } + + ret = inspur_vdac_init(priv); + if (ret) { + DRM_ERROR("failed to init vdac: %d\n", ret); + return ret; + } + + return 0; +} + +static void inspur_kms_fini(struct inspur_drm_private *priv) +{ + if (priv->mode_config_initialized) { + drm_mode_config_cleanup(priv->dev); + priv->mode_config_initialized = false; + } +} + +/* + * It can operate in one of three modes: 0, 1 or Sleep. + */ +void inspur_set_power_mode(struct inspur_drm_private *priv, + unsigned int power_mode) +{ + unsigned int control_value = 0; + void __iomem *mmio = priv->mmio; + unsigned int input = 1; + + if (power_mode > INSPUR_PW_MODE_CTL_MODE_SLEEP) + return; + + if (power_mode == INSPUR_PW_MODE_CTL_MODE_SLEEP) + input = 0; + + control_value = readl(mmio + INSPUR_POWER_MODE_CTRL); + control_value &= ~(INSPUR_PW_MODE_CTL_MODE_MASK | + INSPUR_PW_MODE_CTL_OSC_INPUT_MASK); + control_value |= INSPUR_FIELD(INSPUR_PW_MODE_CTL_MODE, power_mode); + control_value |= INSPUR_FIELD(INSPUR_PW_MODE_CTL_OSC_INPUT, input); + writel(control_value, mmio + INSPUR_POWER_MODE_CTRL); +} + +void inspur_set_current_gate(struct inspur_drm_private *priv, unsigned int gate) +{ + unsigned int gate_reg; + unsigned int mode; + void __iomem *mmio = priv->mmio; + + /* Get current power mode. */ + mode = (readl(mmio + INSPUR_POWER_MODE_CTRL) & + INSPUR_PW_MODE_CTL_MODE_MASK) >> INSPUR_PW_MODE_CTL_MODE_SHIFT; + + switch (mode) { + case INSPUR_PW_MODE_CTL_MODE_MODE0: + gate_reg = INSPUR_MODE0_GATE; + break; + + case INSPUR_PW_MODE_CTL_MODE_MODE1: + gate_reg = INSPUR_MODE1_GATE; + break; + + default: + gate_reg = INSPUR_MODE0_GATE; + break; + } + writel(gate, mmio + gate_reg); +} + +static void inspur_hw_config(struct inspur_drm_private *priv) +{ + unsigned int reg; + + /* On hardware reset, power mode 0 is default. */ + inspur_set_power_mode(priv, INSPUR_PW_MODE_CTL_MODE_MODE0); + + /* Enable display power gate & LOCALMEM power gate */ + reg = readl(priv->mmio + INSPUR_CURRENT_GATE); + reg &= ~INSPUR_CURR_GATE_DISPLAY_MASK; + reg &= ~INSPUR_CURR_GATE_LOCALMEM_MASK; + reg |= INSPUR_CURR_GATE_DISPLAY(1); + reg |= INSPUR_CURR_GATE_LOCALMEM(1); + + inspur_set_current_gate(priv, reg); + + /* + * Reset the memory controller. If the memory controller + * is not reset in chip,the system might hang when sw accesses + * the memory.The memory should be resetted after + * changing the MXCLK. + */ + reg = readl(priv->mmio + INSPUR_MISC_CTRL); + reg &= ~INSPUR_MSCCTL_LOCALMEM_RESET_MASK; + reg |= INSPUR_MSCCTL_LOCALMEM_RESET(0); + writel(reg, priv->mmio + INSPUR_MISC_CTRL); + + reg &= ~INSPUR_MSCCTL_LOCALMEM_RESET_MASK; + reg |= INSPUR_MSCCTL_LOCALMEM_RESET(1); + + writel(reg, priv->mmio + INSPUR_MISC_CTRL); +} + +static int inspur_hw_map(struct inspur_drm_private *priv) +{ + struct drm_device *dev = priv->dev; + struct pci_dev *pdev = to_pci_dev(dev->dev); + resource_size_t addr, size, ioaddr, iosize; + + ioaddr = pci_resource_start(pdev, 1); + iosize = pci_resource_len(pdev, 1); + priv->mmio = devm_ioremap(dev->dev, ioaddr, iosize); + if (!priv->mmio) { + DRM_ERROR("Cannot map mmio region\n"); + return -ENOMEM; + } + + addr = pci_resource_start(pdev, 0); + size = pci_resource_len(pdev, 0); + priv->fb_map = devm_ioremap(dev->dev, addr, size); + if (!priv->fb_map) { + DRM_ERROR("Cannot map framebuffer\n"); + return -ENOMEM; + } + priv->fb_base = addr; + priv->fb_size = size - MEM_SIZE_RESERVE4KVM; + + return 0; +} + +static void inspur_hw_unmap(struct inspur_drm_private *priv) +{ + struct drm_device *dev = priv->dev; + + if (priv->mmio) { + devm_iounmap(dev->dev, priv->mmio); + priv->mmio = NULL; + } + + if (priv->fb_map) { + devm_iounmap(dev->dev, priv->fb_map); + priv->fb_map = NULL; + } +} + +static int inspur_hw_init(struct inspur_drm_private *priv) +{ + int ret; + + ret = inspur_hw_map(priv); + if (ret) + return ret; + + inspur_hw_config(priv); + + return 0; +} + +void inspur_unload(struct drm_device *dev) +{ + struct inspur_drm_private *priv = dev->dev_private; + struct pci_dev *pdev = to_pci_dev(dev->dev); + + drm_atomic_helper_shutdown(dev); + + free_irq(pdev->irq, dev); + + inspur_kms_fini(priv); + inspur_hw_unmap(priv); + pci_disable_msi(to_pci_dev(dev->dev)); + dev->dev_private = NULL; + +} + +int inspur_load(struct drm_device *dev, unsigned long flags) +{ + struct inspur_drm_private *priv; + struct pci_dev *pdev = to_pci_dev(dev->dev); + int ret; + + priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + DRM_ERROR("no memory to allocate for inspur_drm_private\n"); + return -ENOMEM; + } + dev->dev_private = priv; + priv->dev = dev; + + ret = inspur_hw_init(priv); + if (ret) + goto err; + + ret = + drmm_vram_helper_init(dev, pci_resource_start(pdev, 0), + priv->fb_size); + if (ret) { + drm_err(dev, "Error initializing VRAM MM; %d\n", ret); + goto err; + } + ret = inspur_kms_init(priv); + if (ret) + goto err; + + /* reset all the states of crtc/plane/encoder/connector */ + drm_mode_config_reset(dev); + + return 0; + +err: + inspur_unload(dev); + DRM_ERROR("failed to initialize drm driver: %d\n", ret); + return ret; +} + +static int inspur_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int ret = 0; + struct drm_device *dev; + + ret = + drm_aperture_remove_conflicting_pci_framebuffers(pdev, + &inspur_driver); + if (ret) + return ret; + + dev = drm_dev_alloc(&inspur_driver, &pdev->dev); + if (IS_ERR(dev)) { + DRM_ERROR("failed to allocate drm_device\n"); + return PTR_ERR(dev); + } + + pci_set_drvdata(pdev, dev); + ret = pci_enable_device(pdev); + if (ret) { + drm_err(dev, "failed to enable pci device: %d\n", ret); + return ret; + } + ret = inspur_load(dev, ent->driver_data); + if (ret) + goto err_return; + + ret = drm_dev_register(dev, ent->driver_data); + if (ret) + goto err_inspur_driver_unload; + + drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth); + + return 0; +err_inspur_driver_unload: + inspur_unload(dev); +err_return: + return ret; +} + +static void inspur_pci_remove(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + + drm_put_dev(dev); + pci_disable_device(pdev); +} + +static void inspur_pci_shutdown(struct pci_dev *pdev) +{ + inspur_pci_remove(pdev); +} + +static struct pci_device_id inspur_pci_table[] = { + { 0x1bd4, 0x0750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { 0, } +}; + +static struct pci_driver inspur_pci_driver = { + .name = "inspur-drm", + .id_table = inspur_pci_table, + .probe = inspur_pci_probe, + .remove = inspur_pci_remove, + .shutdown = inspur_pci_shutdown, + .driver.pm = &inspur_pm_ops, +}; + +static int __init inspur_init(void) +{ + return pci_register_driver(&inspur_pci_driver); +} + +static void __exit inspur_exit(void) +{ + return pci_unregister_driver(&inspur_pci_driver); +} + +module_init(inspur_init); +module_exit(inspur_exit); + +MODULE_DEVICE_TABLE(pci, inspur_pci_table); +MODULE_AUTHOR(""); +MODULE_DESCRIPTION("DRM Driver for InspurBMC"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("3.0"); diff --git a/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_drv.h b/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_drv.h new file mode 100644 index 000000000000..d47f1fbc4ad0 --- /dev/null +++ b/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_drv.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef INSPUR_DRM_DRV_H +#define INSPUR_DRM_DRV_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +struct drm_device; +struct drm_gem_object; + +#define inspur_framebuffer drm_framebuffer +#define BPP16_RED 0x0000f800 +#define BPP16_GREEN 0x000007e0 +#define BPP16_BLUE 0x0000001f +#define BPP16_WHITE 0x0000ffff +#define BPP16_GRAY 0x00008410 +#define BPP16_YELLOW 0x0000ffe0 +#define BPP16_CYAN 0x000007ff +#define BPP16_PINK 0x0000f81f +#define BPP16_BLACK 0x00000000 +struct inspur_fbdev { + struct drm_fb_helper helper; + struct inspur_framebuffer *fb; + int size; +}; + +struct inspur_cursor { + struct drm_gem_vram_object *gbo[2]; + unsigned int next_index; +}; + +struct inspur_drm_private { + /* hw */ + void __iomem *mmio; + void __iomem *fb_map; + unsigned long fb_base; + unsigned long fb_size; + + /* drm */ + struct drm_device *dev; + + bool mode_config_initialized; + struct drm_atomic_state *suspend_state; + + /* fbdev */ + struct inspur_fbdev *fbdev; + + /* hw cursor */ + struct inspur_cursor cursor; +}; + +#define to_inspur_framebuffer(x) container_of(x, struct inspur_framebuffer, fb) + +void inspur_set_power_mode(struct inspur_drm_private *priv, + unsigned int power_mode); +void inspur_set_current_gate(struct inspur_drm_private *priv, + unsigned int gate); +int inspur_load(struct drm_device *dev, unsigned long flags); +void inspur_unload(struct drm_device *dev); + +int inspur_de_init(struct inspur_drm_private *priv); +int inspur_vdac_init(struct inspur_drm_private *priv); + +int inspur_gem_create(struct drm_device *dev, u32 size, bool iskernel, + struct drm_gem_object **obj); + +int inspur_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args); + +extern const struct drm_mode_config_funcs inspur_mode_funcs; + +#endif diff --git a/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_regs.h b/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_regs.h new file mode 100644 index 000000000000..1b845440ba44 --- /dev/null +++ b/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_regs.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef INSPUR_DRM_HW_H +#define INSPUR_DRM_HW_H + +/* register definition */ +#define INSPUR_MISC_CTRL 0x4 + +#define INSPUR_MSCCTL_LOCALMEM_RESET(x) ((x) << 6) +#define INSPUR_MSCCTL_LOCALMEM_RESET_MASK 0x40 + +#define INSPUR_CURRENT_GATE 0x000040 +#define INSPUR_CURR_GATE_DISPLAY(x) ((x) << 2) +#define INSPUR_CURR_GATE_DISPLAY_MASK 0x4 + +#define INSPUR_CURR_GATE_LOCALMEM(x) ((x) << 1) +#define INSPUR_CURR_GATE_LOCALMEM_MASK 0x2 + +#define INSPUR_MODE0_GATE 0x000044 +#define INSPUR_MODE1_GATE 0x000048 +#define INSPUR_POWER_MODE_CTRL 0x00004C + +#define INSPUR_PW_MODE_CTL_OSC_INPUT(x) ((x) << 3) +#define INSPUR_PW_MODE_CTL_OSC_INPUT_MASK 0x8 + +#define INSPUR_PW_MODE_CTL_MODE(x) ((x) << 0) +#define INSPUR_PW_MODE_CTL_MODE_MASK 0x03 +#define INSPUR_PW_MODE_CTL_MODE_SHIFT 0 + +#define INSPUR_PW_MODE_CTL_MODE_MODE0 0 +#define INSPUR_PW_MODE_CTL_MODE_MODE1 1 +#define INSPUR_PW_MODE_CTL_MODE_SLEEP 2 + +//#define INSPUR_CRT_PLL_CTRL 0x000060 + +#define INSPUR_PLL_CTRL_BYPASS(x) ((x) << 18) +#define INSPUR_PLL_CTRL_BYPASS_MASK 0x40000 + +#define INSPUR_PLL_CTRL_POWER(x) ((x) << 17) +#define INSPUR_PLL_CTRL_POWER_MASK 0x20000 + +#define INSPUR_PLL_CTRL_INPUT(x) ((x) << 16) +#define INSPUR_PLL_CTRL_INPUT_MASK 0x10000 + +#define INSPUR_PLL_CTRL_POD(x) ((x) << 14) +#define INSPUR_PLL_CTRL_POD_MASK 0xC000 + +#define INSPUR_PLL_CTRL_OD(x) ((x) << 12) +#define INSPUR_PLL_CTRL_OD_MASK 0x3000 + +#define INSPUR_PLL_CTRL_N(x) ((x) << 8) +#define INSPUR_PLL_CTRL_N_MASK 0xF00 + +#define INSPUR_PLL_CTRL_M(x) ((x) << 0) +#define INSPUR_PLL_CTRL_M_MASK 0xFF + +#define INSPUR_CRT_DISP_CTL 0x80200 + +#define INSPUR_CRT_DISP_CTL_DPMS(x) ((x) << 30) +#define INSPUR_CRT_DISP_CTL_DPMS_MASK 0xc0000000 + +#define INSPUR_CRT_DPMS_ON 0 +#define INSPUR_CRT_DPMS_OFF 3 + +#define INSPUR_CRT_DISP_CTL_CRTSELECT(x) ((x) << 25) +#define INSPUR_CRT_DISP_CTL_CRTSELECT_MASK 0x2000000 + +#define INSPUR_CRTSELECT_CRT 1 + +#define INSPUR_CRT_DISP_CTL_CLOCK_PHASE(x) ((x) << 14) +#define INSPUR_CRT_DISP_CTL_CLOCK_PHASE_MASK 0x4000 + +#define INSPUR_CRT_DISP_CTL_VSYNC_PHASE(x) ((x) << 13) +#define INSPUR_CRT_DISP_CTL_VSYNC_PHASE_MASK 0x2000 + +#define INSPUR_CRT_DISP_CTL_HSYNC_PHASE(x) ((x) << 12) +#define INSPUR_CRT_DISP_CTL_HSYNC_PHASE_MASK 0x1000 + +#define INSPUR_CRT_DISP_CTL_TIMING(x) ((x) << 8) +#define INSPUR_CRT_DISP_CTL_TIMING_MASK 0x100 + +#define INSPUR_CRT_DISP_CTL_PLANE(x) ((x) << 2) +#define INSPUR_CRT_DISP_CTL_PLANE_MASK 4 + +#define INSPUR_CRT_DISP_CTL_FORMAT(x) ((x) << 0) +#define INSPUR_CRT_DISP_CTL_FORMAT_MASK 0x03 + +#define INSPUR_CRT_FB_ADDRESS 0x080204 + +#define INSPUR_CRT_FB_WIDTH 0x080208 +#define INSPUR_CRT_FB_WIDTH_WIDTH(x) ((x) << 16) +#define INSPUR_CRT_FB_WIDTH_WIDTH_MASK 0x3FFF0000 +#define INSPUR_CRT_FB_WIDTH_OFFS(x) ((x) << 0) +#define INSPUR_CRT_FB_WIDTH_OFFS_MASK 0x3FFF + +#define INSPUR_CRT_HORZ_TOTAL 0x08020C +#define INSPUR_CRT_HORZ_TOTAL_TOTAL(x) ((x) << 16) +#define INSPUR_CRT_HORZ_TOTAL_TOTAL_MASK 0xFFF0000 + +#define INSPUR_CRT_HORZ_TOTAL_DISP_END(x) ((x) << 0) +#define INSPUR_CRT_HORZ_TOTAL_DISP_END_MASK 0xFFF + +#define INSPUR_CRT_HORZ_SYNC 0x080210 +#define INSPUR_CRT_HORZ_SYNC_WIDTH(x) ((x) << 16) +#define INSPUR_CRT_HORZ_SYNC_WIDTH_MASK 0xFF0000 + +#define INSPUR_CRT_HORZ_SYNC_START(x) ((x) << 0) +#define INSPUR_CRT_HORZ_SYNC_START_MASK 0xFFF + +#define INSPUR_CRT_VERT_TOTAL 0x080214 +#define INSPUR_CRT_VERT_TOTAL_TOTAL(x) ((x) << 16) +#define INSPUR_CRT_VERT_TOTAL_TOTAL_MASK 0x7FFF0000 + +#define INSPUR_CRT_VERT_TOTAL_DISP_END(x) ((x) << 0) +#define INSPUR_CRT_VERT_TOTAL_DISP_END_MASK 0x7FF + +#define INSPUR_CRT_VERT_SYNC 0x080218 +#define INSPUR_CRT_VERT_SYNC_HEIGHT(x) ((x) << 16) +#define INSPUR_CRT_VERT_SYNC_HEIGHT_MASK 0x3F0000 + +#define INSPUR_CRT_VERT_SYNC_START(x) ((x) << 0) +#define INSPUR_CRT_VERT_SYNC_START_MASK 0x7FF + +/* Hardware Cursor */ +#define INSPUR_HWC_ADDRESS 0x080230 +#define INSPUR_HWC_ADDRESS_ENABLE(x) ((x) << 31) +#define INSPUR_HWC_ADDRESS_ENABLE_MASK 0x80000000 +#define INSPUR_HWC_ADDRESS_ADDRESS(x) ((x) << 0) +#define INSPUR_HWC_ADDRESS_ADDRESS_MASK 0xFFFFFFF + +#define INSPUR_HWC_LOCATION 0x080234 +#define INSPUR_HWC_LOCATION_TOP(x) ((x) << 27) +#define INSPUR_HWC_LOCATION_TOP_MASK 0x8000000 +#define INSPUR_HWC_LOCATION_Y(x) ((x) << 16) +#define INSPUR_HWC_LOCATION_Y_MASK 0x7FF0000 +#define INSPUR_HWC_LOCATION_LEFT(x) ((x) << 11) +#define INSPUR_HWC_LOCATION_LEFT_MASK 0x800 +#define INSPUR_HWC_LOCATION_X(x) ((x) << 0) +#define INSPUR_HWC_LOCATION_X_MASK 0x7FF + +#define INSPUR_HWC_COLOR_12 0x080238 +#define INSPUR_HWC_COLOR_12_2_RGB(x) ((x) << 16) +#define INSPUR_HWC_COLOR_12_2_RGB_MASK 0xFFFF0000 +#define INSPUR_HWC_COLOR_12_1_RGB(x) ((x) << 0) +#define INSPUR_HWC_COLOR_12_1_RGB_MASK 0xFFFF + +#define INSPUR_HWC_COLOR_3 0x08023C +#define INSPUR_HWC_COLOR_3_RGB(x) ((x) << 0) +#define INSPUR_HWC_COLOR_3_RGB_MASK 0xFFFF + +/* Auto Centering */ +#define INSPUR_CRT_AUTO_CENTERING_TL 0x080280 +#define INSPUR_CRT_AUTO_CENTERING_TL_TOP(x) ((x) << 16) +#define INSPUR_CRT_AUTO_CENTERING_TL_TOP_MASK 0x7FF0000 + +#define INSPUR_CRT_AUTO_CENTERING_TL_LEFT(x) ((x) << 0) +#define INSPUR_CRT_AUTO_CENTERING_TL_LEFT_MASK 0x7FF + +#define INSPUR_CRT_AUTO_CENTERING_BR 0x080284 +#define INSPUR_CRT_AUTO_CENTERING_BR_BOTTOM(x) ((x) << 16) +#define INSPUR_CRT_AUTO_CENTERING_BR_BOTTOM_MASK 0x7FF0000 + +#define INSPUR_CRT_AUTO_CENTERING_BR_RIGHT(x) ((x) << 0) +#define INSPUR_CRT_AUTO_CENTERING_BR_RIGHT_MASK 0x7FF + +/* register to control panel output */ +#define INSPUR_DISPLAY_CONTROL_HISILE 0x80288 +#define INSPUR_DISPLAY_CONTROL_FPVDDEN(x) ((x) << 0) +#define INSPUR_DISPLAY_CONTROL_PANELDATE(x) ((x) << 1) +#define INSPUR_DISPLAY_CONTROL_FPEN(x) ((x) << 2) +#define INSPUR_DISPLAY_CONTROL_VBIASEN(x) ((x) << 3) + +#define INSPUR_RAW_INTERRUPT 0x80290 +#define INSPUR_RAW_INTERRUPT_VBLANK(x) ((x) << 2) +#define INSPUR_RAW_INTERRUPT_VBLANK_MASK 0x4 + +#define INSPUR_RAW_INTERRUPT_EN 0x80298 +#define INSPUR_RAW_INTERRUPT_EN_VBLANK(x) ((x) << 2) +#define INSPUR_RAW_INTERRUPT_EN_VBLANK_MASK 0x4 + +/* register and values for PLL control */ +#define CRT_PLL1_NS 0x802a8 +#define CRT_PLL1_NS_OUTER_BYPASS(x) ((x) << 30) +#define CRT_PLL1_NS_INTER_BYPASS(x) ((x) << 29) +#define CRT_PLL1_NS_POWERON(x) ((x) << 24) + +#define CRT_PLL1_NS_25MHZ 0x00006691 //640x480 +#define CRT_PLL1_NS_40MHZ 0x00004580 //800x600 +#define CRT_PLL1_NS_65MHZ 0x00002568 //1024x768 +#define CRT_PLL1_NS_83MHZ 0x000027bb //1280x800 +#define CRT_PLL1_NS_106MHZ 0x000027ef //1440x900 +#define CRT_PLL1_NS_108MHZ 0x000027f2 //1280x1024 +#define CRT_PLL1_NS_146MHZ 0x00001575 //1680x1050 +#define CRT_PLL1_NS_148MHZ 0x0000145f //1920x1080 +#define CRT_PLL1_NS_193MHZ 0x000018f7 //1920x1200 + +#define CRT_PLL2_NS 0x802ac +#define CRT_PLL2_NS_25MHZ 0x0 +#define CRT_PLL2_NS_40MHZ 0x0 +#define CRT_PLL2_NS_65MHZ 0x0 +#define CRT_PLL2_NS_83MHZ 0x0 +#define CRT_PLL2_NS_106MHZ 0x0 +#define CRT_PLL2_NS_108MHZ 0x0 +#define CRT_PLL2_NS_146MHZ 0x0 +#define CRT_PLL2_NS_148MHZ 0x0 +#define CRT_PLL2_NS_193MHZ 0x0 + +#define INSPUR_FIELD(field, value) (field(value) & field##_MASK) +#endif diff --git a/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_vdac.c b/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_vdac.c new file mode 100644 index 000000000000..4b31d82b00f8 --- /dev/null +++ b/drivers/gpu/drm/inspur/inspur-drm/inspur_drm_vdac.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include + +#include "inspur_drm_drv.h" +#include "inspur_drm_regs.h" + +static int inspur_connector_get_modes(struct drm_connector *connector) +{ + int count; + + count = drm_add_modes_noedid(connector, + connector->dev->mode_config.max_width, + connector->dev->mode_config.max_height); + drm_set_preferred_mode(connector, 1024, 768); + return count; +} + +static int inspur_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + return MODE_OK; +} + +static const struct drm_connector_helper_funcs inspur_connector_helper_funcs = { + .get_modes = inspur_connector_get_modes, + .mode_valid = inspur_connector_mode_valid, +}; + +static const struct drm_connector_funcs inspur_connector_funcs = { + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static void inspur_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adj_mode) +{ + u32 reg; + struct drm_device *dev = encoder->dev; + struct inspur_drm_private *priv = dev->dev_private; + + reg = readl(priv->mmio + INSPUR_DISPLAY_CONTROL_HISILE); + reg |= INSPUR_DISPLAY_CONTROL_FPVDDEN(1); + reg |= INSPUR_DISPLAY_CONTROL_PANELDATE(1); + reg |= INSPUR_DISPLAY_CONTROL_FPEN(1); + reg |= INSPUR_DISPLAY_CONTROL_VBIASEN(1); + writel(reg, priv->mmio + INSPUR_DISPLAY_CONTROL_HISILE); +} + +static const struct drm_encoder_helper_funcs inspur_encoder_helper_funcs = { + .mode_set = inspur_encoder_mode_set, +}; + +static const struct drm_encoder_funcs inspur_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +int inspur_vdac_init(struct inspur_drm_private *priv) +{ + struct drm_device *dev = priv->dev; + struct drm_encoder *encoder; + struct drm_connector *connector; + int ret; + + encoder = devm_kzalloc(dev->dev, sizeof(*encoder), GFP_KERNEL); + if (!encoder) { + DRM_ERROR("failed to alloc memory when init encoder\n"); + return -ENOMEM; + } + + encoder->possible_crtcs = 0x1; + ret = drm_encoder_init(dev, encoder, &inspur_encoder_funcs, + DRM_MODE_ENCODER_DAC, NULL); + if (ret) { + DRM_ERROR("failed to init encoder: %d\n", ret); + return ret; + } + + drm_encoder_helper_add(encoder, &inspur_encoder_helper_funcs); + + connector = devm_kzalloc(dev->dev, sizeof(*connector), GFP_KERNEL); + if (!connector) { + DRM_ERROR("failed to alloc memory when init connector\n"); + return -ENOMEM; + } + + ret = drm_connector_init(dev, connector, + &inspur_connector_funcs, + DRM_MODE_CONNECTOR_VGA); + if (ret) { + DRM_ERROR("failed to init connector: %d\n", ret); + return ret; + } + drm_connector_helper_add(connector, &inspur_connector_helper_funcs); + + drm_connector_register(connector); + drm_connector_attach_encoder(connector, encoder); + return 0; +} diff --git a/drivers/gpu/drm/inspur/inspur-drm/inspur_ttm.c b/drivers/gpu/drm/inspur/inspur-drm/inspur_ttm.c new file mode 100644 index 000000000000..1c9acc776102 --- /dev/null +++ b/drivers/gpu/drm/inspur/inspur-drm/inspur_ttm.c @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include + +#include "inspur_drm_drv.h" + +int inspur_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + + return drm_gem_vram_fill_create_dumb(file, dev, 0, 16, args); +} + +const struct drm_mode_config_funcs inspur_mode_funcs = { + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, + .fb_create = drm_gem_fb_create, + .mode_valid = drm_vram_helper_mode_valid, +}; -- Gitee From 106935a826c50925940620b5e4889cacefeb3d71 Mon Sep 17 00:00:00 2001 From: xiongmengbiao Date: Tue, 2 Apr 2024 21:54:10 +0800 Subject: [PATCH 0881/2138] anolis: kvm: add kvm_arch_hypercall hook interface ANBZ: #8699 Signed-off-by: xiongmengbiao Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3000 --- arch/x86/include/asm/kvm-x86-ops.h | 1 + arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/hygon/psp.c | 2 +- arch/x86/kvm/svm/svm.c | 17 +++++++++++++++++ arch/x86/kvm/x86.c | 4 +++- 5 files changed, 23 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 0c540ac3872e..348b78389406 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -137,6 +137,7 @@ KVM_X86_OP(vcpu_deliver_sipi_vector) KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons); KVM_X86_OP_OPTIONAL(get_untagged_addr) KVM_X86_OP_OPTIONAL(vm_attestation) +KVM_X86_OP_OPTIONAL(arch_hypercall) KVM_X86_OP_OPTIONAL(control_pre_system_reset) KVM_X86_OP_OPTIONAL(control_post_system_reset) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 213d17d35ce3..ac6599d54c87 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1758,6 +1758,7 @@ struct kvm_x86_ops { int (*vm_attestation)(struct kvm *kvm, unsigned long gpa, unsigned long len); int (*control_pre_system_reset)(struct kvm *kvm); int (*control_post_system_reset)(struct kvm *kvm); + int (*arch_hypercall)(struct kvm *kvm, u64 nr, u64 a0, u64 a1, u64 a2, u64 a3); }; struct kvm_x86_nested_ops { diff --git a/arch/x86/kvm/hygon/psp.c b/arch/x86/kvm/hygon/psp.c index 9181ec2406ec..3d33afd4e644 100644 --- a/arch/x86/kvm/hygon/psp.c +++ b/arch/x86/kvm/hygon/psp.c @@ -629,4 +629,4 @@ int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, /* return psp_ret to guest */ kvm_write_guest(kvm, psp_ret_gpa, &psp_ret, sizeof(psp_ret)); return ret; -} +} EXPORT_SYMBOL_GPL(kvm_pv_psp_op); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 800d40670d76..37e4ed47d22a 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -5040,6 +5040,22 @@ static int svm_vm_init(struct kvm *kvm) return 0; } +static int kvm_hygon_arch_hypercall(struct kvm *kvm, u64 nr, u64 a0, u64 a1, u64 a2, u64 a3) +{ + int ret = 0; + + switch (nr) { + case KVM_HC_PSP_OP: + ret = kvm_pv_psp_op(kvm, a0, a1, a2, a3); + break; + + default: + ret = -KVM_ENOSYS; + break; + } + return ret; +} + static struct kvm_x86_ops svm_x86_ops __initdata = { .name = KBUILD_MODNAME, @@ -5175,6 +5191,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .vm_attestation = sev_vm_attestation, .control_pre_system_reset = csv_control_pre_system_reset, .control_post_system_reset = csv_control_post_system_reset, + .arch_hypercall = kvm_hygon_arch_hypercall, }; /* diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 804dfa0c3202..7a044c4427f3 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9966,7 +9966,9 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) ret = static_call(kvm_x86_vm_attestation)(vcpu->kvm, a0, a1); break; case KVM_HC_PSP_OP: - ret = kvm_pv_psp_op(vcpu->kvm, a0, a1, a2, a3); + ret = -KVM_ENOSYS; + if (kvm_x86_ops.arch_hypercall) + ret = static_call(kvm_x86_arch_hypercall)(vcpu->kvm, nr, a0, a1, a2, a3); break; default: ret = -KVM_ENOSYS; -- Gitee From 556848bae39c90eec698c40aa00711ee6415b2d3 Mon Sep 17 00:00:00 2001 From: xiongmengbiao Date: Wed, 3 Apr 2024 11:19:18 +0800 Subject: [PATCH 0882/2138] anolis: crypto: ccp: Eliminate dependence of the kvm module on the ccp module ANBZ: #8699 Because the KVM module calls certain interfaces from the ccp module, such as vpsp_try_do_cmd, it is necessary to load the ccp module before loading kvm. However, on CPUs other than Hygon, the ccp module might not be loaded, which would prevent the kvm module from loading. Therefore, we use function hooks to call functions from the ccp module. Now the module dependencies are as follows: [root@anolis ~]# lsmod | grep kvm kvm_amd 200704 0 kvm 1339392 1 kvm_amd ccp 352256 1 kvm_amd irqbypass 12288 2 vfio_pci_core,kvm Signed-off-by: xiongmengbiao Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3000 --- arch/x86/include/asm/kvm_host.h | 2 - arch/x86/kvm/Makefile | 2 +- arch/x86/kvm/svm/svm.c | 7 ++- drivers/crypto/ccp/Makefile | 3 +- .../hygon/psp.c => drivers/crypto/ccp/vpsp.c | 48 +++++++++---------- include/linux/psp-sev.h | 14 ++++++ 6 files changed, 47 insertions(+), 29 deletions(-) rename arch/x86/kvm/hygon/psp.c => drivers/crypto/ccp/vpsp.c (90%) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index ac6599d54c87..508d6dccb3c4 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -2151,8 +2151,6 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event); int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, unsigned long ipi_bitmap_high, u32 min, unsigned long icr, int op_64_bit); -int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, - gpa_t psp_ret_gpa, gpa_t table_gpa); int kvm_add_user_return_msr(u32 msr); int kvm_find_user_return_msr(u32 msr); diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index a70ae9e2ad1a..39bbce9b1685 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -12,7 +12,7 @@ include $(srctree)/virt/kvm/Makefile.kvm kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \ i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \ hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o \ - mmu/spte.o hygon/psp.o + mmu/spte.o ifdef CONFIG_HYPERV kvm-y += kvm_onhyperv.o diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 37e4ed47d22a..7c5a7bd1a7ff 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -5043,10 +5043,15 @@ static int svm_vm_init(struct kvm *kvm) static int kvm_hygon_arch_hypercall(struct kvm *kvm, u64 nr, u64 a0, u64 a1, u64 a2, u64 a3) { int ret = 0; + struct kvm_vpsp vpsp = { + .kvm = kvm, + .write_guest = kvm_write_guest, + .read_guest = kvm_read_guest + }; switch (nr) { case KVM_HC_PSP_OP: - ret = kvm_pv_psp_op(kvm, a0, a1, a2, a3); + ret = kvm_pv_psp_op(&vpsp, a0, a1, a2, a3); break; default: diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 088d53009824..70bab9cbe3d5 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -14,7 +14,8 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ platform-access.o \ dbc.o \ psp-ringbuf.o \ - csv-dev.o + csv-dev.o \ + vpsp.o ccp-$(CONFIG_TDM_DEV_HYGON) += tdm-dev.o obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o diff --git a/arch/x86/kvm/hygon/psp.c b/drivers/crypto/ccp/vpsp.c similarity index 90% rename from arch/x86/kvm/hygon/psp.c rename to drivers/crypto/ccp/vpsp.c index 3d33afd4e644..3f18530c5353 100644 --- a/arch/x86/kvm/hygon/psp.c +++ b/drivers/crypto/ccp/vpsp.c @@ -185,7 +185,7 @@ static gpa_t get_gpa_from_hva(struct gpa2hva_tbls *g2h, void *hva) * newly allocated hva(host virtual address) and updates the mapping * relationship in the parent memory */ -static int guest_multiple_level_gpa_replace(struct kvm *kvm, +static int guest_multiple_level_gpa_replace(struct kvm_vpsp *vpsp, struct map_tbl *tbl, struct gpa2hva_tbls *g2h) { int ret = 0; @@ -200,7 +200,7 @@ static int guest_multiple_level_gpa_replace(struct kvm *kvm, return -ENOMEM; /* get child gpa from parent gpa */ - if (unlikely(kvm_read_guest(kvm, tbl->parent_pa + tbl->offset, + if (unlikely(vpsp->read_guest(vpsp->kvm, tbl->parent_pa + tbl->offset, &sub_paddr, sizeof(sub_paddr)))) { pr_err("[%s]: kvm_read_guest for parent gpa failed\n", __func__); @@ -209,7 +209,7 @@ static int guest_multiple_level_gpa_replace(struct kvm *kvm, } /* copy child block data from gpa to hva */ - if (unlikely(kvm_read_guest(kvm, sub_paddr, (void *)tbl->hva, + if (unlikely(vpsp->read_guest(vpsp->kvm, sub_paddr, (void *)tbl->hva, tbl->size))) { pr_err("[%s]: kvm_read_guest for sub_data failed\n", __func__); @@ -249,7 +249,7 @@ static int guest_multiple_level_gpa_replace(struct kvm *kvm, * address) back to the memory corresponding to the gpa, and restores * the mapping relationship in the original parent memory */ -static int guest_multiple_level_gpa_restore(struct kvm *kvm, +static int guest_multiple_level_gpa_restore(struct kvm_vpsp *vpsp, struct map_tbl *tbl, struct gpa2hva_tbls *g2h) { int ret = 0; @@ -266,7 +266,7 @@ static int guest_multiple_level_gpa_restore(struct kvm *kvm, } /* copy child block data from hva to gpa */ - if (unlikely(kvm_write_guest(kvm, sub_gpa, (void *)tbl->hva, + if (unlikely(vpsp->write_guest(vpsp->kvm, sub_gpa, (void *)tbl->hva, tbl->size))) { pr_err("[%s]: kvm_write_guest for sub_gpa failed\n", __func__); @@ -300,7 +300,7 @@ static int guest_multiple_level_gpa_restore(struct kvm *kvm, * executes upper-layer abstract interfaces, including replacing and * restoring two sub-processing functions */ -static int guest_addr_map_table_op(struct kvm *kvm, struct gpa2hva_tbls *g2h, +static int guest_addr_map_table_op(struct kvm_vpsp *vpsp, struct gpa2hva_tbls *g2h, struct addr_map_tbls *map_tbls, int op) { int ret = 0; @@ -321,7 +321,7 @@ static int guest_addr_map_table_op(struct kvm *kvm, struct gpa2hva_tbls *g2h, } /* restore new pa of kva with the gpa from guest */ - if (unlikely(guest_multiple_level_gpa_restore(kvm, + if (unlikely(guest_multiple_level_gpa_restore(vpsp, &map_tbls->tbl[i], g2h))) { pr_err("[%s]: guest_multiple_level_gpa_restore failed\n", __func__); @@ -352,7 +352,7 @@ static int guest_addr_map_table_op(struct kvm *kvm, struct gpa2hva_tbls *g2h, } /* replace the gpa from guest with the new pa of kva */ - if (unlikely(guest_multiple_level_gpa_replace(kvm, + if (unlikely(guest_multiple_level_gpa_replace(vpsp, &map_tbls->tbl[i], g2h))) { pr_err("[%s]: guest_multiple_level_gpa_replace failed\n", __func__); @@ -390,7 +390,7 @@ static void kvm_pv_psp_mem_free(struct gpa2hva_tbls *g2h, struct addr_map_tbls * information in the command buffer, the processed data will be * used to interact with the psp device */ -static int kvm_pv_psp_cmd_pre_op(struct kvm *kvm, gpa_t data_gpa, +static int kvm_pv_psp_cmd_pre_op(struct kvm_vpsp *vpsp, gpa_t data_gpa, gpa_t table_gpa, struct vpsp_hbuf_wrapper *hbuf) { int ret = 0; @@ -402,7 +402,7 @@ static int kvm_pv_psp_cmd_pre_op(struct kvm *kvm, gpa_t data_gpa, struct gpa2hva_tbls *g2h = NULL; uint32_t g2h_tbl_size; - if (unlikely(kvm_read_guest(kvm, data_gpa, &psp_head, + if (unlikely(vpsp->read_guest(vpsp->kvm, data_gpa, &psp_head, sizeof(struct psp_cmdresp_head)))) return -EFAULT; @@ -411,14 +411,14 @@ static int kvm_pv_psp_cmd_pre_op(struct kvm *kvm, gpa_t data_gpa, if (!data) return -ENOMEM; - if (unlikely(kvm_read_guest(kvm, data_gpa, data, data_size))) { + if (unlikely(vpsp->read_guest(vpsp->kvm, data_gpa, data, data_size))) { ret = -EFAULT; goto end; } if (table_gpa) { /* parse address map table from guest */ - if (unlikely(kvm_read_guest(kvm, table_gpa, &map_head, + if (unlikely(vpsp->read_guest(vpsp->kvm, table_gpa, &map_head, sizeof(struct addr_map_tbls)))) { pr_err("[%s]: kvm_read_guest for map_head failed\n", __func__); @@ -434,7 +434,7 @@ static int kvm_pv_psp_cmd_pre_op(struct kvm *kvm, gpa_t data_gpa, goto end; } - if (unlikely(kvm_read_guest(kvm, table_gpa, map_tbls, + if (unlikely(vpsp->read_guest(vpsp->kvm, table_gpa, map_tbls, map_tbl_size))) { pr_err("[%s]: kvm_read_guest for map_tbls failed\n", __func__); @@ -460,7 +460,7 @@ static int kvm_pv_psp_cmd_pre_op(struct kvm *kvm, gpa_t data_gpa, goto end; } - if (guest_addr_map_table_op(kvm, g2h, map_tbls, 0)) { + if (guest_addr_map_table_op(vpsp, g2h, map_tbls, 0)) { pr_err("[%s]: guest_addr_map_table_op for replacing failed\n", __func__); ret = -EFAULT; @@ -484,13 +484,13 @@ static int kvm_pv_psp_cmd_pre_op(struct kvm *kvm, gpa_t data_gpa, * pointer of the mapping table when the command has finished * interacting with the psp device */ -static int kvm_pv_psp_cmd_post_op(struct kvm *kvm, gpa_t data_gpa, +static int kvm_pv_psp_cmd_post_op(struct kvm_vpsp *vpsp, gpa_t data_gpa, struct vpsp_hbuf_wrapper *hbuf) { int ret = 0; if (hbuf->map_tbls) { - if (guest_addr_map_table_op(kvm, hbuf->g2h_tbls, + if (guest_addr_map_table_op(vpsp, hbuf->g2h_tbls, hbuf->map_tbls, 1)) { pr_err("[%s]: guest_addr_map_table_op for restoring failed\n", __func__); @@ -500,7 +500,7 @@ static int kvm_pv_psp_cmd_post_op(struct kvm *kvm, gpa_t data_gpa, } /* restore cmdresp's buffer from context */ - if (unlikely(kvm_write_guest(kvm, data_gpa, hbuf->data, + if (unlikely(vpsp->write_guest(vpsp->kvm, data_gpa, hbuf->data, hbuf->data_size))) { pr_err("[%s]: kvm_write_guest for cmdresp data failed\n", __func__); @@ -526,7 +526,7 @@ static int cmd_type_is_tkm(int cmd) /* * The primary implementation interface of virtual PSP in kernel mode */ -int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, +int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, gpa_t table_gpa) { int ret = 0; @@ -540,21 +540,21 @@ int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, // only tkm cmd need vid if (cmd_type_is_tkm(vcmd->cmd_id)) { // check the permission to use the default vid when no vid is set - ret = vpsp_get_vid(&vid, kvm->userspace_pid); + ret = vpsp_get_vid(&vid, vpsp->kvm->userspace_pid); if (ret && !vpsp_get_default_vid_permission()) { pr_err("[%s]: not allowed tkm command without vid\n", __func__); return -EFAULT; } } - if (unlikely(kvm_read_guest(kvm, psp_ret_gpa, &psp_ret, + if (unlikely(vpsp->read_guest(vpsp->kvm, psp_ret_gpa, &psp_ret, sizeof(psp_ret)))) return -EFAULT; switch (psp_ret.status) { case VPSP_INIT: /* multilevel pointer replace*/ - ret = kvm_pv_psp_cmd_pre_op(kvm, data_gpa, table_gpa, &hbuf); + ret = kvm_pv_psp_cmd_pre_op(vpsp, data_gpa, table_gpa, &hbuf); if (unlikely(ret)) { psp_ret.status = VPSP_FINISH; pr_err("[%s]: kvm_pv_psp_cmd_pre_op failed\n", @@ -581,7 +581,7 @@ int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, ret = 0; } else if (psp_ret.status == VPSP_FINISH) { /* restore multilevel pointer data */ - ret = kvm_pv_psp_cmd_post_op(kvm, data_gpa, &hbuf); + ret = kvm_pv_psp_cmd_post_op(vpsp, data_gpa, &hbuf); if (unlikely(ret)) { pr_err("[%s]: kvm_pv_psp_cmd_post_op failed\n", __func__); @@ -609,7 +609,7 @@ int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, ret = 0; } else if (psp_ret.status == VPSP_FINISH) { /* restore multilevel pointer data */ - ret = kvm_pv_psp_cmd_post_op(kvm, data_gpa, + ret = kvm_pv_psp_cmd_post_op(vpsp, data_gpa, &g_hbuf_wrap[prio][index]); if (unlikely(ret)) { pr_err("[%s]: kvm_pv_psp_cmd_post_op failed\n", @@ -627,6 +627,6 @@ int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, } end: /* return psp_ret to guest */ - kvm_write_guest(kvm, psp_ret_gpa, &psp_ret, sizeof(psp_ret)); + vpsp->write_guest(vpsp->kvm, psp_ret_gpa, &psp_ret, sizeof(psp_ret)); return ret; } EXPORT_SYMBOL_GPL(kvm_pv_psp_op); diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 74086c114184..9a144026f89a 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -13,6 +13,7 @@ #define __PSP_SEV_H__ #include +#include #define SEV_FW_BLOB_MAX_SIZE 0x4000 /* 16KB */ @@ -671,6 +672,12 @@ struct vpsp_ret { u32 status : 2; }; +struct kvm_vpsp { + struct kvm *kvm; + int (*write_guest)(struct kvm *kvm, gpa_t gpa, const void *data, unsigned long len); + int (*read_guest)(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); +}; + #define PSP_VID_MASK 0xff #define PSP_VID_SHIFT 56 #define PUT_PSP_VID(hpa, vid) ((__u64)(hpa) | ((__u64)(PSP_VID_MASK & vid) << PSP_VID_SHIFT)) @@ -827,6 +834,9 @@ int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret) int vpsp_get_vid(uint32_t *vid, pid_t pid); int vpsp_get_default_vid_permission(void); + +int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, + gpa_t table_gpa); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ static inline int @@ -874,6 +884,10 @@ vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret) { r static inline int vpsp_get_default_vid_permission(void) { return -ENODEV; } + +static inline int +kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, + gpa_t psp_ret_gpa, gpa_t table_gpa) { return -ENODEV; } #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ #endif /* __PSP_SEV_H__ */ -- Gitee From 9408517e45d5bf81d6a2b36a43a96fa252633f51 Mon Sep 17 00:00:00 2001 From: yangdepei Date: Mon, 8 Apr 2024 17:09:39 +0800 Subject: [PATCH 0883/2138] anolis: bugfix: crypto: ccp: remove repeated sm4-hs mode ANBZ: #8582 remove the repeated sm4-hs mode definition, otherwise, it will caused ccp-crypto module load err in the following version of kernel-6.6, eg. 6.6.20 Signed-off-by: yangdepei Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3052 --- drivers/crypto/ccp/ccp-crypto-sm4-hygon.c | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c index 0d1c750ff118..2328a9f87218 100644 --- a/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c +++ b/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c @@ -205,15 +205,6 @@ static struct ccp_sm4_def sm4_algs[] = { .ivsize = 0, .alg_defaults = &ccp_sm4_defaults, }, - { - .mode = CCP_SM4_ALG_MODE_ECB_HS, - .version = CCP_VERSION(5, 0), - .name = "ecb(sm4)", - .driver_name = "ecb-sm4-hs-ccp", - .blocksize = SM4_BLOCK_SIZE, - .ivsize = 0, - .alg_defaults = &ccp_sm4_defaults, - }, { .mode = CCP_SM4_ALG_MODE_CBC, .version = CCP_VERSION(5, 0), @@ -232,15 +223,6 @@ static struct ccp_sm4_def sm4_algs[] = { .ivsize = SM4_BLOCK_SIZE, .alg_defaults = &ccp_sm4_defaults, }, - { - .mode = CCP_SM4_ALG_MODE_CBC_HS, - .version = CCP_VERSION(5, 0), - .name = "cbc(sm4)", - .driver_name = "cbc-sm4-hs-ccp", - .blocksize = SM4_BLOCK_SIZE, - .ivsize = SM4_BLOCK_SIZE, - .alg_defaults = &ccp_sm4_defaults, - }, { .mode = CCP_SM4_ALG_MODE_OFB, .version = CCP_VERSION(5, 0), -- Gitee From 1f9d7663b96ee4665213efede15a14e02784b889 Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Tue, 16 Apr 2024 11:26:43 +0800 Subject: [PATCH 0884/2138] anolis: fs/resctrl: Remove mbm_Bps features checking from resctrl_is_mbm_{enabled,event} ANBZ: #8763 A new mbm_Bps monitoring features is introduced by commit 09d14609221e ("anolis: fs/resctrl: Add a new resctrl monitoring event to get MB in Bps"). Since it is not a non-decreasing counter, but an instantaneous value in a hardware window, it should not be treated as true in function resctrl_is_mbm_{enabled,event}. Fixes: 09d14609221e ("anolis: fs/resctrl: Add a new resctrl monitoring event to get MB in Bps") Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3060 --- fs/resctrl/rdtgroup.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/fs/resctrl/rdtgroup.c b/fs/resctrl/rdtgroup.c index e39f22453d84..ea969ddb1a9d 100644 --- a/fs/resctrl/rdtgroup.c +++ b/fs/resctrl/rdtgroup.c @@ -113,14 +113,13 @@ void rdt_staged_configs_clear(void) static bool resctrl_is_mbm_enabled(void) { return (resctrl_arch_is_mbm_total_enabled() || - resctrl_arch_is_mbm_local_enabled() || - resctrl_arch_is_mbm_bps_enabled()); + resctrl_arch_is_mbm_local_enabled()); } static bool resctrl_is_mbm_event(int e) { return (e >= QOS_L3_MBM_TOTAL_EVENT_ID && - e <= QOS_MC_MBM_BPS_EVENT_ID); + e <= QOS_L3_MBM_LOCAL_EVENT_ID); } /* -- Gitee From 37b6378ab8cb3774491733778206bfd41db8c803 Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Tue, 16 Apr 2024 09:45:55 +0800 Subject: [PATCH 0885/2138] anolis: fs/resctrl: Cancel the delayed checking works when resctrl is umounted ANBZ: #8763 In the following scenario, an MPAM error interrupt with MSMONCFG_ID_RANGE error code has been triggered: mount -t resctrl resctrl /sys/fs/resctrl # create the maximum ctrl&mon groups umount /sys/fs/resctrl mount -t resctrl resctrl -o cdp /sys/fs/resctrl To keep all the unused rmids clean, a delayed work cqm_handle_limbo() periodically checks and recycles the dirty rmids in the background, even when the resctrl is umounted. When remounting restrl with cdp enabled, the `cdp_enabled` will be set true and the actual PARTID will correspond to twice the closid. Then the delayed work could access the out-of-range PARTID by resctrl_arch_rmid_read(). Another delayed work mbm_handle_overflow() has the same problem. To alleviate this issue, cancel the delayed works when resctrl is umounted. Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3060 --- fs/resctrl/rdtgroup.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/fs/resctrl/rdtgroup.c b/fs/resctrl/rdtgroup.c index ea969ddb1a9d..643ea199c428 100644 --- a/fs/resctrl/rdtgroup.c +++ b/fs/resctrl/rdtgroup.c @@ -2792,6 +2792,9 @@ static void rmdir_all_sub(void) static void rdt_kill_sb(struct super_block *sb) { + struct rdt_resource *l3 = resctrl_arch_get_resource(RDT_RESOURCE_L3); + struct rdt_domain *d; + cpus_read_lock(); mutex_lock(&rdtgroup_mutex); @@ -2801,6 +2804,20 @@ static void rdt_kill_sb(struct super_block *sb) resctrl_arch_reset_resources(); rmdir_all_sub(); + + /* + * When resctrl is umounted, forcefully cancel delayed works since the + * new mount option may be changed. + */ + list_for_each_entry(d, &l3->domains, list) { + if (resctrl_is_mbm_enabled()) + cancel_delayed_work(&d->mbm_over); + if (resctrl_arch_is_llc_occupancy_enabled() && has_busy_rmid(d)) { + __check_limbo(d, true); + cancel_delayed_work(&d->cqm_limbo); + } + } + rdt_pseudo_lock_release(); rdtgroup_default.mode = RDT_MODE_SHAREABLE; schemata_list_destroy(); -- Gitee From b03d521e10efb42bffb2278b4e6778aa3dc74bf3 Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Tue, 16 Apr 2024 11:40:38 +0800 Subject: [PATCH 0886/2138] anolis: arm_mpam: Fix the ris field parsing typo in error irq handler ANBZ: #8763 The ris field in MPAMF_ESR should be parsed from MPAMF_ESR_RIS instead of MPAMF_ESR_PMG. Fix this typo. Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3060 --- drivers/platform/mpam/mpam_devices.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c index 906f8a6b6940..0134263c88af 100644 --- a/drivers/platform/mpam/mpam_devices.c +++ b/drivers/platform/mpam/mpam_devices.c @@ -2055,7 +2055,7 @@ static irqreturn_t __mpam_irq_handler(int irq, struct mpam_msc *msc) partid = FIELD_GET(MPAMF_ESR_PARTID_OR_MON, reg); pmg = FIELD_GET(MPAMF_ESR_PMG, reg); - ris = FIELD_GET(MPAMF_ESR_PMG, reg); + ris = FIELD_GET(MPAMF_ESR_RIS, reg); pr_err("error irq from msc:%u '%s', partid:%u, pmg: %u, ris: %u\n", msc->id, mpam_errcode_names[errcode], partid, pmg, ris); -- Gitee From d68c5629d16f0e528cc344b2b5e33bb99ff2709d Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Wed, 17 Apr 2024 15:25:43 +0800 Subject: [PATCH 0887/2138] anolis: loongarch: fix spec to support compiling vmlinuz.efi ANBZ: #8779 The current spec only supports compilation of elf vmlinux, and grub2 can only support the efi stub kernel, so the spec is fixed to compile vmlinuz in efi format to support kernel startup. Signed-off-by: Jing Zhang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3071 --- anolis/rpm/kernel.spec.template | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/anolis/rpm/kernel.spec.template b/anolis/rpm/kernel.spec.template index 109e09ae53fe..56a4c7051b6d 100644 --- a/anolis/rpm/kernel.spec.template +++ b/anolis/rpm/kernel.spec.template @@ -142,9 +142,9 @@ %ifarch loongarch64 %define all_arch_configs %{name}-%{version}-loongarch64*.config %define asmarch loongarch -%define make_target vmlinux +%define make_target vmlinuz.efi %define hdrarch loongarch -%define kernel_image vmlinux +%define kernel_image arch/loongarch/boot/vmlinuz.efi %endif # To temporarily exclude an architecture from being built, add it to -- Gitee From b22871bdaa0a5fa886973f23eb1d8d8da8cef3c0 Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Tue, 23 Apr 2024 10:43:39 +0800 Subject: [PATCH 0888/2138] anolis: Fix the warning message when uninstalling the kernel ANBZ: #8620 Remove modules.builtin.alias.bin after built kernel, the same as commit: 375333772562("anolis: spec: remove modules.builtin.alias.bin after built kernel") Then change the files copied to the /boot/ directory to %ghost to avoid warnings about repeatedly deleting these files when the kernel is uninstalled. Signed-off-by: Jing Zhang Reviewed-by: Qiao Ma Reviewed-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/3083 --- anolis/rpm/kernel.spec.template | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/anolis/rpm/kernel.spec.template b/anolis/rpm/kernel.spec.template index 56a4c7051b6d..2e2a4558dcc9 100644 --- a/anolis/rpm/kernel.spec.template +++ b/anolis/rpm/kernel.spec.template @@ -976,7 +976,7 @@ BuildKernel() { # remove files that will be auto generated by depmod at rpm -i time pushd $RPM_BUILD_ROOT/lib/modules/$KernelVer/ - rm -f modules.{alias*,builtin.bin,dep*,*map,symbols*,devname,softdep} + rm -f modules.{alias*,builtin.bin,builtin.alias.bin,dep*,*map,symbols*,devname,softdep} popd # Copy the System.map file for depmod to use, and create a backup of the @@ -1010,7 +1010,7 @@ BuildKernel() { # remove files that will be auto generated by depmod at rpm -i time pushd $RPM_BUILD_ROOT/lib/modules/$KernelVer/ - rm -f modules.{alias*,builtin.bin,dep*,*map,symbols*,devname,softdep} + rm -f modules.{alias*,builtin.bin,builtin.alias.bin,dep*,*map,symbols*,devname,softdep} popd # Cleanup @@ -1558,12 +1558,12 @@ fi %ghost /%{image_install_path}/dtb-%{KVERREL}%{?2:+%{2}} \ %endif\ %attr(0600, root, root) /lib/modules/%{KVERREL}%{?2:+%{2}}/System.map\ -%attr(0600, root, root) /boot/System.map-%{KVERREL}%{?2:+%{2}}\ +%ghost %attr(0600, root, root) /boot/System.map-%{KVERREL}%{?2:+%{2}}\ /lib/modules/%{KVERREL}%{?2:+%{2}}/symvers.gz\ /lib/modules/%{KVERREL}%{?2:+%{2}}/config\ -%attr(0600, root, root) /boot/symvers-%{KVERREL}%{?2:+%{2}}.gz\ -%attr(0600, root, root) /boot/initramfs-%{KVERREL}%{?2:+%{2}}.img\ -%attr(0644, root, root) /boot/config-%{KVERREL}%{?2:+%{2}}\ +%ghost %attr(0600, root, root) /boot/symvers-%{KVERREL}%{?2:+%{2}}.gz\ +%ghost %attr(0600, root, root) /boot/initramfs-%{KVERREL}%{?2:+%{2}}.img\ +%ghost %attr(0644, root, root) /boot/config-%{KVERREL}%{?2:+%{2}}\ %dir /lib/modules\ %dir /lib/modules/%{KVERREL}%{?2:+%{2}}\ /lib/modules/%{KVERREL}%{?2:+%{2}}/kernel\ -- Gitee From e227360abe322a413eaa64c6cba8b7ebd3be98db Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Mon, 29 Apr 2024 17:05:14 +0800 Subject: [PATCH 0889/2138] anolis: kfence: Fix the check about sample_interval ANBZ: #8499 num should be signed long to prevent user change sample_interval across from positive to negative (and vice versa). Fix it. Fixes: e61ac77f426a ("anolis: kfence: enhance kfence for 6.6") Signed-off-by: Tianchen Ding Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3113 --- mm/kfence/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/kfence/core.c b/mm/kfence/core.c index d5329b1c560b..0e0219aa3565 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -91,7 +91,7 @@ DEFINE_STATIC_KEY_TRUE(kfence_order0_page); static void kfence_enable_late(void); static int param_set_sample_interval(const char *val, const struct kernel_param *kp) { - unsigned long num; + long num; int ret = kstrtol(val, 0, &num); if (ret < 0) -- Gitee From aa11183d0ff2fa72e2d7af72824c6a5ec160173c Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Mon, 29 Apr 2024 17:09:55 +0800 Subject: [PATCH 0890/2138] anolis: kfence: Fix a race condition about slab_want_init_on_free() ANBZ: #8499 When porting kfence from 5.10, we wrongly handled the new upstream feature about shortening the critical zone about meta->lock. Fix it. Fixes: e61ac77f426a ("anolis: kfence: enhance kfence for 6.6") Signed-off-by: Tianchen Ding Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3113 --- mm/kfence/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 0e0219aa3565..2fe1b509778a 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -988,7 +988,7 @@ static inline bool __free_meta(void *addr, struct kfence_metadata *meta, bool zo * data is still there, and after a use-after-free is detected, we * unprotect the page, so the data is still accessible. */ - if (!zombie && unlikely(slab_want_init_on_free(meta->cache))) + if (!zombie && unlikely(init)) memzero_explicit(addr, meta->size); } -- Gitee From d04f09a4985784dd0f190aeecfb36f179b4834f6 Mon Sep 17 00:00:00 2001 From: Muhammad Usama Anjum Date: Wed, 27 Mar 2024 16:17:17 +0500 Subject: [PATCH 0891/2138] x86/selftests: Skip the tests if prerequisites aren't fulfilled ANBZ: #8853 commit 99c84311e35f9399bdce666f6306a048e2a5b404 linux-next Skip instead of failing when prerequisite conditions aren't fulfilled, such as invalid xstate values etc. Make the tests show as 'SKIP' when run: make -C tools/testing/selftests/ TARGETS=x86 run_tests ... # timeout set to 45 # selftests: x86: amx_64 # # xstate cpuid: invalid tile data size/offset: 0/0 ok 42 selftests: x86: amx_64 # SKIP # timeout set to 45 # selftests: x86: lam_64 # # Unsupported LAM feature! ok 43 selftests: x86: lam_64 # SKIP ... In the AMX test, Move away from check_cpuid_xsave() and start using arch_prctl() to find out if AMX support is present or not. In the kernels where AMX isn't present, arch_prctl() returns -EINVAL, hence it is backward compatible. Signed-off-by: Muhammad Usama Anjum Signed-off-by: Ingo Molnar Reviewed-by: Chang S. Bae Reviewed-by: Binbin Wu Acked-by: Kirill A. Shutemov Link: https://lore.kernel.org/r/20240327111720.3509180-1-usama.anjum@collabora.com Signed-off-by: Zelin Deng Reviewed-by: Guanjun Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/3125 --- tools/testing/selftests/x86/amx.c | 27 ++++++++++----------------- tools/testing/selftests/x86/lam.c | 2 +- 2 files changed, 11 insertions(+), 18 deletions(-) diff --git a/tools/testing/selftests/x86/amx.c b/tools/testing/selftests/x86/amx.c index d884fd69dd51..95aad6d8849b 100644 --- a/tools/testing/selftests/x86/amx.c +++ b/tools/testing/selftests/x86/amx.c @@ -103,21 +103,6 @@ static void clearhandler(int sig) #define CPUID_LEAF1_ECX_XSAVE_MASK (1 << 26) #define CPUID_LEAF1_ECX_OSXSAVE_MASK (1 << 27) -static inline void check_cpuid_xsave(void) -{ - uint32_t eax, ebx, ecx, edx; - - /* - * CPUID.1:ECX.XSAVE[bit 26] enumerates general - * support for the XSAVE feature set, including - * XGETBV. - */ - __cpuid_count(1, 0, eax, ebx, ecx, edx); - if (!(ecx & CPUID_LEAF1_ECX_XSAVE_MASK)) - fatal_error("cpuid: no CPU xsave support"); - if (!(ecx & CPUID_LEAF1_ECX_OSXSAVE_MASK)) - fatal_error("cpuid: no OS xsave support"); -} static uint32_t xbuf_size; @@ -350,6 +335,7 @@ enum expected_result { FAIL_EXPECTED, SUCCESS_EXPECTED }; /* arch_prctl() and sigaltstack() test */ +#define ARCH_GET_XCOMP_SUPP 0x1021 #define ARCH_GET_XCOMP_PERM 0x1022 #define ARCH_REQ_XCOMP_PERM 0x1023 @@ -928,8 +914,15 @@ static void test_ptrace(void) int main(void) { - /* Check hardware availability at first */ - check_cpuid_xsave(); + unsigned long features; + long rc; + + rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_SUPP, &features); + if (rc || (features & XFEATURE_MASK_XTILE) != XFEATURE_MASK_XTILE) { + ksft_print_msg("no AMX support\n"); + return KSFT_SKIP; + } + check_cpuid_xtiledata(); init_stashed_xsave(); diff --git a/tools/testing/selftests/x86/lam.c b/tools/testing/selftests/x86/lam.c index 8f9b06d9ce03..edc14b15da34 100644 --- a/tools/testing/selftests/x86/lam.c +++ b/tools/testing/selftests/x86/lam.c @@ -1183,7 +1183,7 @@ int main(int argc, char **argv) if (!cpu_has_lam()) { ksft_print_msg("Unsupported LAM feature!\n"); - return -1; + return KSFT_SKIP; } while ((c = getopt(argc, argv, "ht:")) != -1) { -- Gitee From 6b667b960e45a3482e2d289bbfdfc3443910cc98 Mon Sep 17 00:00:00 2001 From: Liu Wei Date: Tue, 7 May 2024 11:40:37 +0800 Subject: [PATCH 0892/2138] anolis: block: use %px to print request in rq_hang ANBZ: #8947 In function blk_mq_debugfs_rq_hang_show, an incorrect format string when printing the request. "%p" is plain pointer, which is hashed to prevent leaking information about the kernel memory layout, so change it to "%px" Signed-off-by: Liu Wei Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3129 --- block/blk-mq-debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 271535f56bd2..f42314c86377 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -354,7 +354,7 @@ static void blk_mq_debugfs_rq_hang_show(struct seq_file *m, struct request *rq) struct bio_vec *bvec; struct bvec_iter_all iter_all; - seq_printf(m, "%p {.op=", rq); + seq_printf(m, "%px {.op=", rq); if (strcmp(op_str, "UNKNOWN") == 0) seq_printf(m, "%u", op); else -- Gitee From aefda738b8dcde515372c6f099224f3e557fb4c6 Mon Sep 17 00:00:00 2001 From: Juxin Gao Date: Mon, 29 Apr 2024 16:05:52 +0800 Subject: [PATCH 0893/2138] irqchip/loongson-pch-pic: Update interrupt registration policy ANBZ: #8927 commit 234a557e28b9142e07eae21083a04fffef83ee8d upstream The current code is using a fixed mapping between the LS7A interrupt source and the HT interrupt vector. This prevents the utilization of the full interrupt vector space and therefore limits the number of interrupt source in a system. Replace the fixed mapping with a dynamic mapping which allocates a vector when an interrupt source is set up. This avoids that unused sources prevent vectors from being used for other devices. Introduce a mapping table in struct pch_pic, where each interrupt source will allocate an index as a 'hwirq' number from the table in the order of application and set table value as interrupt source number. This hwirq number will be configured as vector in the HT interrupt controller. For an interrupt source, the validity period of the obtained hwirq will last until the system reset. Co-developed-by: Biao Dong Signed-off-by: Biao Dong Co-developed-by: Tianyang Zhang Signed-off-by: Tianyang Zhang Signed-off-by: Baoqi Zhang Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240422093830.27212-1-zhangtianyang@loongson.cn Signed-off-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3136 --- drivers/irqchip/irq-loongson-pch-pic.c | 76 ++++++++++++++++++++------ 1 file changed, 59 insertions(+), 17 deletions(-) diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c index 3b150b6121fc..1f244e9de9be 100644 --- a/drivers/irqchip/irq-loongson-pch-pic.c +++ b/drivers/irqchip/irq-loongson-pch-pic.c @@ -33,6 +33,7 @@ #define PIC_COUNT (PIC_COUNT_PER_REG * PIC_REG_COUNT) #define PIC_REG_IDX(irq_id) ((irq_id) / PIC_COUNT_PER_REG) #define PIC_REG_BIT(irq_id) ((irq_id) % PIC_COUNT_PER_REG) +#define PIC_UNDEF_VECTOR 255 #define PIC_COUNT_PER_REG64 64 #define PIC_REG64_COUNT 1 #define PIC_REG64_IDX(irq_id) ((irq_id) / PIC_COUNT_PER_REG64) @@ -50,12 +51,19 @@ struct pch_pic { u32 saved_vec_en[PIC_REG_COUNT]; u32 saved_vec_pol[PIC_REG_COUNT]; u32 saved_vec_edge[PIC_REG_COUNT]; + u8 table[PIC_COUNT]; + int inuse; }; static struct pch_pic *pch_pic_priv[MAX_IO_PICS]; struct fwnode_handle *pch_pic_handle[MAX_IO_PICS]; +static inline u8 hwirq_to_bit(struct pch_pic *priv, int hirq) +{ + return priv->table[hirq]; +} + struct irq_domain *get_pchpic_irq_domain(void) { return pch_pic_priv[0]->pic_domain; @@ -89,45 +97,47 @@ static void pch_pic_mask_irq(struct irq_data *d) { struct pch_pic *priv = irq_data_get_irq_chip_data(d); - pch_pic_bitset(priv, PCH_PIC_MASK, d->hwirq); + pch_pic_bitset(priv, PCH_PIC_MASK, hwirq_to_bit(priv, d->hwirq)); irq_chip_mask_parent(d); } static void pch_pic_unmask_irq(struct irq_data *d) { struct pch_pic *priv = irq_data_get_irq_chip_data(d); + int bit = hwirq_to_bit(priv, d->hwirq); - writeq(BIT(PIC_REG64_BIT(d->hwirq)), - priv->base + PCH_PIC_CLR + PIC_REG64_IDX(d->hwirq) * 8); + writel(BIT(PIC_REG_BIT(bit)), + priv->base + PCH_PIC_CLR + PIC_REG_IDX(bit) * 4); irq_chip_unmask_parent(d); - pch_pic_bitclr(priv, PCH_PIC_MASK, d->hwirq); + pch_pic_bitclr(priv, PCH_PIC_MASK, bit); } static int pch_pic_set_type(struct irq_data *d, unsigned int type) { struct pch_pic *priv = irq_data_get_irq_chip_data(d); + int bit = hwirq_to_bit(priv, d->hwirq); int ret = 0; switch (type) { case IRQ_TYPE_EDGE_RISING: - pch_pic_bitset(priv, PCH_PIC_EDGE, d->hwirq); - pch_pic_bitclr(priv, PCH_PIC_POL, d->hwirq); + pch_pic_bitset(priv, PCH_PIC_EDGE, bit); + pch_pic_bitclr(priv, PCH_PIC_POL, bit); irq_set_handler_locked(d, handle_edge_irq); break; case IRQ_TYPE_EDGE_FALLING: - pch_pic_bitset(priv, PCH_PIC_EDGE, d->hwirq); - pch_pic_bitset(priv, PCH_PIC_POL, d->hwirq); + pch_pic_bitset(priv, PCH_PIC_EDGE, bit); + pch_pic_bitset(priv, PCH_PIC_POL, bit); irq_set_handler_locked(d, handle_edge_irq); break; case IRQ_TYPE_LEVEL_HIGH: - pch_pic_bitclr(priv, PCH_PIC_EDGE, d->hwirq); - pch_pic_bitclr(priv, PCH_PIC_POL, d->hwirq); + pch_pic_bitclr(priv, PCH_PIC_EDGE, bit); + pch_pic_bitclr(priv, PCH_PIC_POL, bit); irq_set_handler_locked(d, handle_level_irq); break; case IRQ_TYPE_LEVEL_LOW: - pch_pic_bitclr(priv, PCH_PIC_EDGE, d->hwirq); - pch_pic_bitset(priv, PCH_PIC_POL, d->hwirq); + pch_pic_bitclr(priv, PCH_PIC_EDGE, bit); + pch_pic_bitset(priv, PCH_PIC_POL, bit); irq_set_handler_locked(d, handle_level_irq); break; default: @@ -142,11 +152,12 @@ static void pch_pic_ack_irq(struct irq_data *d) { unsigned int reg; struct pch_pic *priv = irq_data_get_irq_chip_data(d); + int bit = hwirq_to_bit(priv, d->hwirq); - reg = readl(priv->base + PCH_PIC_EDGE + PIC_REG_IDX(d->hwirq) * 4); - if (reg & BIT(PIC_REG_BIT(d->hwirq))) { - writeq(BIT(PIC_REG64_BIT(d->hwirq)), - priv->base + PCH_PIC_CLR + PIC_REG64_IDX(d->hwirq) * 8); + reg = readl(priv->base + PCH_PIC_EDGE + PIC_REG_IDX(bit) * 4); + if (reg & BIT(PIC_REG_BIT(bit))) { + writel(BIT(PIC_REG_BIT(bit)), + priv->base + PCH_PIC_CLR + PIC_REG_IDX(bit) * 4); } irq_chip_ack_parent(d); } @@ -168,6 +179,8 @@ static int pch_pic_domain_translate(struct irq_domain *d, { struct pch_pic *priv = d->host_data; struct device_node *of_node = to_of_node(fwspec->fwnode); + unsigned long flags; + int i; if (of_node) { if (fwspec->param_count < 2) @@ -180,12 +193,33 @@ static int pch_pic_domain_translate(struct irq_domain *d, return -EINVAL; *hwirq = fwspec->param[0] - priv->gsi_base; + if (fwspec->param_count > 1) *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; else *type = IRQ_TYPE_NONE; } + raw_spin_lock_irqsave(&priv->pic_lock, flags); + /* Check pic-table to confirm if the hwirq has been assigned */ + for (i = 0; i < priv->inuse; i++) { + if (priv->table[i] == *hwirq) { + *hwirq = i; + break; + } + } + if (i == priv->inuse) { + /* Assign a new hwirq in pic-table */ + if (priv->inuse >= PIC_COUNT) { + pr_err("pch-pic domain has no free vectors\n"); + raw_spin_unlock_irqrestore(&priv->pic_lock, flags); + return -EINVAL; + } + priv->table[priv->inuse] = *hwirq; + *hwirq = priv->inuse++; + } + raw_spin_unlock_irqrestore(&priv->pic_lock, flags); + return 0; } @@ -203,6 +237,9 @@ static int pch_pic_alloc(struct irq_domain *domain, unsigned int virq, if (err) return err; + /* Write vector ID */ + writeb(priv->ht_vec_base + hwirq, priv->base + PCH_INT_HTVEC(hwirq_to_bit(priv, hwirq))); + parent_fwspec.fwnode = domain->parent->fwnode; parent_fwspec.param_count = 1; parent_fwspec.param[0] = hwirq + priv->ht_vec_base; @@ -231,7 +268,7 @@ static void pch_pic_reset(struct pch_pic *priv) for (i = 0; i < PIC_COUNT; i++) { /* Write vector ID */ - writeb(priv->ht_vec_base + i, priv->base + PCH_INT_HTVEC(i)); + writeb(priv->ht_vec_base + i, priv->base + PCH_INT_HTVEC(hwirq_to_bit(priv, i))); /* Hardcode route to HT0 Lo */ writeb(1, priv->base + PCH_INT_ROUTE(i)); } @@ -295,6 +332,7 @@ static int pch_pic_init(phys_addr_t addr, unsigned long size, int vec_base, u32 gsi_base) { struct pch_pic *priv; + int i; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) @@ -305,6 +343,10 @@ static int pch_pic_init(phys_addr_t addr, unsigned long size, int vec_base, if (!priv->base) goto free_priv; + priv->inuse = 0; + for (i = 0; i < PIC_COUNT; i++) + priv->table[i] = PIC_UNDEF_VECTOR; + priv->ht_vec_base = vec_base; priv->vec_count = ((readq(priv->base) >> 48) & 0xff) + 1; priv->gsi_base = gsi_base; -- Gitee From 7505e4213d5be97cefe85cfbb85d703697e38554 Mon Sep 17 00:00:00 2001 From: Ferry Meng Date: Tue, 7 May 2024 20:04:25 +0800 Subject: [PATCH 0894/2138] anolis: io_uring: revert create_io_thread share flag and restrict percpu_sq_thread share scope. ANBZ: #8960 We re-introduced the percpu_sq_thread feature in patch 8aefafb37 (anolis: io_uring: re-add sqthread percpu polling support). However, when using the readv/writev opcode, which needs to share usermode 'mm', it causes an error. The reason is that we should ensure the sq worker thread needs the SHARE_VM flag at its creation time. In the original patch, we avoid sharing it because we want this feature to share one sqthread between not only 'threads' but also 'processes'. Now it is confirmed that the larger sharing scope cannot be achieved in the current io_uring and io_thread design architecture. Thus, what we need to do is: 1.Just restore create_io_thread's implementation. 2.Forbid percpu_sq_thread sharing between processes. Signed-off-by: Ferry Meng Reviewed-by: Joseph Qi Reviewed-by: Yi Tao Link: https://gitee.com/anolis/cloud-kernel/pulls/3138 --- include/linux/sched/task.h | 3 +-- io_uring/io-wq.c | 4 ++-- io_uring/sqpoll.c | 17 ++++++----------- kernel/fork.c | 21 ++++++--------------- 4 files changed, 15 insertions(+), 30 deletions(-) diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index d2d46728da3e..a23af225c898 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -94,8 +94,7 @@ extern void exit_itimers(struct task_struct *); extern pid_t kernel_clone(struct kernel_clone_args *kargs); struct task_struct *copy_process(struct pid *pid, int trace, int node, struct kernel_clone_args *args); -struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node, - bool unshare); +struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node); struct task_struct *fork_idle(int); extern pid_t kernel_thread(int (*fn)(void *), void *arg, const char *name, unsigned long flags); diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c index eb426621603e..a1e31723c9ed 100644 --- a/io_uring/io-wq.c +++ b/io_uring/io-wq.c @@ -780,7 +780,7 @@ static void create_worker_cont(struct callback_head *cb) worker = container_of(cb, struct io_worker, create_work); clear_bit_unlock(0, &worker->create_state); wq = worker->wq; - tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE, false); + tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE); if (!IS_ERR(tsk)) { io_init_new_worker(wq, worker, tsk); io_worker_release(worker); @@ -849,7 +849,7 @@ static bool create_io_worker(struct io_wq *wq, int index) if (index == IO_WQ_ACCT_BOUND) set_bit(IO_WORKER_F_BOUND, &worker->flags); - tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE, false); + tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE); if (!IS_ERR(tsk)) { io_init_new_worker(wq, worker, tsk); } else if (!io_should_retry_thread(worker, PTR_ERR(tsk))) { diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c index ff6d7d847dfe..1e1096f4858d 100644 --- a/io_uring/sqpoll.c +++ b/io_uring/sqpoll.c @@ -194,6 +194,10 @@ static struct io_sq_data *io_get_sq_data(struct io_uring_params *p, mutex_lock(&percpu_sqd_lock); sqd = *per_cpu_ptr(percpu_sqd, p->sq_thread_cpu); if (sqd) { + if (sqd->task_tgid != current->tgid) { + mutex_unlock(&percpu_sqd_lock); + return ERR_PTR(-EPERM); + } refcount_inc(&sqd->refs); mutex_unlock(&percpu_sqd_lock); *percpu_found = true; @@ -266,16 +270,8 @@ static bool io_sqd_handle_event(struct io_sq_data *sqd) if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) || signal_pending(current)) { mutex_unlock(&sqd->lock); - if (signal_pending(current)) { + if (signal_pending(current)) did_sig = get_signal(&ksig); - if (did_sig && sqd->sq_cpu != -1 && - refcount_read(&sqd->refs) != 0) { - mutex_lock(&percpu_sqd_lock); - if (*per_cpu_ptr(percpu_sqd, sqd->sq_cpu) == sqd) - did_sig = false; - mutex_unlock(&percpu_sqd_lock); - } - } cond_resched(); mutex_lock(&sqd->lock); sqd->sq_cpu = raw_smp_processor_id(); @@ -517,8 +513,7 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx, sqd->task_pid = current->pid; sqd->task_tgid = current->tgid; - tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE, - !!(ctx->flags & IORING_SETUP_SQPOLL_PERCPU)); + tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE); if (IS_ERR(tsk)) { ret = PTR_ERR(tsk); goto err_sqpoll; diff --git a/kernel/fork.c b/kernel/fork.c index 73e3e6e4b5b9..50cca073320e 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2851,22 +2851,13 @@ struct task_struct * __init fork_idle(int cpu) * The returned task is inactive, and the caller must fire it up through * wake_up_new_task(p). All signals are blocked in the created task. */ -struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node, - bool unshare) -{ - unsigned long flags = unshare ? 0 : (CLONE_FS|CLONE_FILES| - CLONE_SIGHAND|CLONE_THREAD| - CLONE_IO|CLONE_VM); - /* we use 'unshare' flag to try to create an independent io_thread, - * 'unshare' describes whether child share parent's mm directly (with - * refcount add one), or it should copy mm/files when copy_process(). - * By setting this flag, the io_thread won't share parent's mm - * directly, but can be shared among different tasks, and looks more - * reasonably. - */ +struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node) +{ + unsigned long flags = CLONE_FS|CLONE_FILES|CLONE_SIGHAND|CLONE_THREAD| + CLONE_IO; struct kernel_clone_args args = { - .flags = ((lower_32_bits(flags) | CLONE_UNTRACED) - & ~CSIGNAL), + .flags = ((lower_32_bits(flags) | CLONE_VM | + CLONE_UNTRACED) & ~CSIGNAL), .exit_signal = (lower_32_bits(flags) & CSIGNAL), .fn = fn, .fn_arg = arg, -- Gitee From deac51f8e9d82f9fb4832e6de612bda1e004a581 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 24 Oct 2023 23:48:59 +0200 Subject: [PATCH 0895/2138] tools: Sync if_link uapi header ANBZ: #8818 commit 5c1b994de4be8a27afa3281be2ff58b38e8bc50c upstream. Sync if_link uapi header to the latest version as we need the refresher in tooling for netkit device. Given it's been a while since the last sync and the diff is fairly big, it has been done as its own commit. Signed-off-by: Daniel Borkmann Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/r/20231024214904.29825-3-daniel@iogearbox.net Signed-off-by: Martin KaFai Lau Signed-off-by: Yuanhe Shu Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3143 --- tools/include/uapi/linux/if_link.h | 141 +++++++++++++++++++++++++++++ 1 file changed, 141 insertions(+) diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h index 39e659c83cfd..a0aa05a28cf2 100644 --- a/tools/include/uapi/linux/if_link.h +++ b/tools/include/uapi/linux/if_link.h @@ -211,6 +211,9 @@ struct rtnl_link_stats { * @rx_nohandler: Number of packets received on the interface * but dropped by the networking stack because the device is * not designated to receive packets (e.g. backup link in a bond). + * + * @rx_otherhost_dropped: Number of packets dropped due to mismatch + * in destination MAC address. */ struct rtnl_link_stats64 { __u64 rx_packets; @@ -243,6 +246,23 @@ struct rtnl_link_stats64 { __u64 rx_compressed; __u64 tx_compressed; __u64 rx_nohandler; + + __u64 rx_otherhost_dropped; +}; + +/* Subset of link stats useful for in-HW collection. Meaning of the fields is as + * for struct rtnl_link_stats64. + */ +struct rtnl_hw_stats64 { + __u64 rx_packets; + __u64 tx_packets; + __u64 rx_bytes; + __u64 tx_bytes; + __u64 rx_errors; + __u64 tx_errors; + __u64 rx_dropped; + __u64 tx_dropped; + __u64 multicast; }; /* The struct should be in sync with struct ifmap */ @@ -350,7 +370,13 @@ enum { IFLA_GRO_MAX_SIZE, IFLA_TSO_MAX_SIZE, IFLA_TSO_MAX_SEGS, + IFLA_ALLMULTI, /* Allmulti count: > 0 means acts ALLMULTI */ + + IFLA_DEVLINK_PORT, + IFLA_GSO_IPV4_MAX_SIZE, + IFLA_GRO_IPV4_MAX_SIZE, + IFLA_DPLL_PIN, __IFLA_MAX }; @@ -539,6 +565,12 @@ enum { IFLA_BRPORT_MRP_IN_OPEN, IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT, IFLA_BRPORT_MCAST_EHT_HOSTS_CNT, + IFLA_BRPORT_LOCKED, + IFLA_BRPORT_MAB, + IFLA_BRPORT_MCAST_N_GROUPS, + IFLA_BRPORT_MCAST_MAX_GROUPS, + IFLA_BRPORT_NEIGH_VLAN_SUPPRESS, + IFLA_BRPORT_BACKUP_NHID, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) @@ -716,7 +748,79 @@ enum ipvlan_mode { #define IPVLAN_F_PRIVATE 0x01 #define IPVLAN_F_VEPA 0x02 +/* Tunnel RTM header */ +struct tunnel_msg { + __u8 family; + __u8 flags; + __u16 reserved2; + __u32 ifindex; +}; + +/* netkit section */ +enum netkit_action { + NETKIT_NEXT = -1, + NETKIT_PASS = 0, + NETKIT_DROP = 2, + NETKIT_REDIRECT = 7, +}; + +enum netkit_mode { + NETKIT_L2, + NETKIT_L3, +}; + +enum { + IFLA_NETKIT_UNSPEC, + IFLA_NETKIT_PEER_INFO, + IFLA_NETKIT_PRIMARY, + IFLA_NETKIT_POLICY, + IFLA_NETKIT_PEER_POLICY, + IFLA_NETKIT_MODE, + __IFLA_NETKIT_MAX, +}; +#define IFLA_NETKIT_MAX (__IFLA_NETKIT_MAX - 1) + /* VXLAN section */ + +/* include statistics in the dump */ +#define TUNNEL_MSG_FLAG_STATS 0x01 + +#define TUNNEL_MSG_VALID_USER_FLAGS TUNNEL_MSG_FLAG_STATS + +/* Embedded inside VXLAN_VNIFILTER_ENTRY_STATS */ +enum { + VNIFILTER_ENTRY_STATS_UNSPEC, + VNIFILTER_ENTRY_STATS_RX_BYTES, + VNIFILTER_ENTRY_STATS_RX_PKTS, + VNIFILTER_ENTRY_STATS_RX_DROPS, + VNIFILTER_ENTRY_STATS_RX_ERRORS, + VNIFILTER_ENTRY_STATS_TX_BYTES, + VNIFILTER_ENTRY_STATS_TX_PKTS, + VNIFILTER_ENTRY_STATS_TX_DROPS, + VNIFILTER_ENTRY_STATS_TX_ERRORS, + VNIFILTER_ENTRY_STATS_PAD, + __VNIFILTER_ENTRY_STATS_MAX +}; +#define VNIFILTER_ENTRY_STATS_MAX (__VNIFILTER_ENTRY_STATS_MAX - 1) + +enum { + VXLAN_VNIFILTER_ENTRY_UNSPEC, + VXLAN_VNIFILTER_ENTRY_START, + VXLAN_VNIFILTER_ENTRY_END, + VXLAN_VNIFILTER_ENTRY_GROUP, + VXLAN_VNIFILTER_ENTRY_GROUP6, + VXLAN_VNIFILTER_ENTRY_STATS, + __VXLAN_VNIFILTER_ENTRY_MAX +}; +#define VXLAN_VNIFILTER_ENTRY_MAX (__VXLAN_VNIFILTER_ENTRY_MAX - 1) + +enum { + VXLAN_VNIFILTER_UNSPEC, + VXLAN_VNIFILTER_ENTRY, + __VXLAN_VNIFILTER_MAX +}; +#define VXLAN_VNIFILTER_MAX (__VXLAN_VNIFILTER_MAX - 1) + enum { IFLA_VXLAN_UNSPEC, IFLA_VXLAN_ID, @@ -748,6 +852,8 @@ enum { IFLA_VXLAN_GPE, IFLA_VXLAN_TTL_INHERIT, IFLA_VXLAN_DF, + IFLA_VXLAN_VNIFILTER, /* only applicable with COLLECT_METADATA mode */ + IFLA_VXLAN_LOCALBYPASS, __IFLA_VXLAN_MAX }; #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1) @@ -781,6 +887,7 @@ enum { IFLA_GENEVE_LABEL, IFLA_GENEVE_TTL_INHERIT, IFLA_GENEVE_DF, + IFLA_GENEVE_INNER_PROTO_INHERIT, __IFLA_GENEVE_MAX }; #define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1) @@ -826,6 +933,8 @@ enum { IFLA_GTP_FD1, IFLA_GTP_PDP_HASHSIZE, IFLA_GTP_ROLE, + IFLA_GTP_CREATE_SOCKETS, + IFLA_GTP_RESTART_COUNT, __IFLA_GTP_MAX, }; #define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1) @@ -1162,6 +1271,17 @@ enum { #define IFLA_STATS_FILTER_BIT(ATTR) (1 << (ATTR - 1)) +enum { + IFLA_STATS_GETSET_UNSPEC, + IFLA_STATS_GET_FILTERS, /* Nest of IFLA_STATS_LINK_xxx, each a u32 with + * a filter mask for the corresponding group. + */ + IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS, /* 0 or 1 as u8 */ + __IFLA_STATS_GETSET_MAX, +}; + +#define IFLA_STATS_GETSET_MAX (__IFLA_STATS_GETSET_MAX - 1) + /* These are embedded into IFLA_STATS_LINK_XSTATS: * [IFLA_STATS_LINK_XSTATS] * -> [LINK_XSTATS_TYPE_xxx] @@ -1179,10 +1299,21 @@ enum { enum { IFLA_OFFLOAD_XSTATS_UNSPEC, IFLA_OFFLOAD_XSTATS_CPU_HIT, /* struct rtnl_link_stats64 */ + IFLA_OFFLOAD_XSTATS_HW_S_INFO, /* HW stats info. A nest */ + IFLA_OFFLOAD_XSTATS_L3_STATS, /* struct rtnl_hw_stats64 */ __IFLA_OFFLOAD_XSTATS_MAX }; #define IFLA_OFFLOAD_XSTATS_MAX (__IFLA_OFFLOAD_XSTATS_MAX - 1) +enum { + IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC, + IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, /* u8 */ + IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, /* u8 */ + __IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX, +}; +#define IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX \ + (__IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX - 1) + /* XDP section */ #define XDP_FLAGS_UPDATE_IF_NOEXIST (1U << 0) @@ -1281,4 +1412,14 @@ enum { #define IFLA_MCTP_MAX (__IFLA_MCTP_MAX - 1) +/* DSA section */ + +enum { + IFLA_DSA_UNSPEC, + IFLA_DSA_MASTER, + __IFLA_DSA_MAX, +}; + +#define IFLA_DSA_MAX (__IFLA_DSA_MAX - 1) + #endif /* _UAPI_LINUX_IF_LINK_H */ -- Gitee From 784136481bd4a5bd6db57020cb7b7b88876f8690 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Thu, 9 May 2024 09:37:33 +0800 Subject: [PATCH 0896/2138] anolis: check cgroup v1 for memcg_blkcg_tree operations ANBZ: #8973 Currently parameter 'cgwb_v1' can be setup unconditionally. Take the following abnormal case into consideration: System administrator configures both 'cgwb_v1' and 'systemd.unified_cgroup_hierarchy=1' in command line by mistake, so we use cgroup v2 after boot in fact. Though we'll check if current kernel is under cgroup v2 in inode_cgwb_enabled(), we still allocate, insert and delete links for memcg_blkcg_tree since we only check parameter 'cgwb_v1'. This seems no actual harm, but it is entirely unnecessary and wasty. So restrict these operations only under cgroup v1. Since bdi initialization is before enabling cgroup subsys, so we'll still create debug file bdi_wb_link but without any links in above abnormal case. Signed-off-by: Joseph Qi Reviewed-by: Jingbo Xu Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3144 --- mm/backing-dev.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/backing-dev.c b/mm/backing-dev.c index f032314fcbf2..67d71ce4472d 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -564,7 +564,7 @@ int allocate_memcg_blkcg_links(int count, struct list_head *tmp_links) struct memcg_blkcg_link *link; int i; - if (!cgwb_v1) + if (!cgroup_writeback_support_v1()) return 0; for (i = 0; i < count; i++) { @@ -594,7 +594,7 @@ void insert_memcg_blkcg_link(struct cgroup_subsys *ss, struct cgroup_subsys_state *memcg_css; int err; - if (!cgwb_v1) + if (!cgroup_writeback_support_v1()) return; if (ss->id != io_cgrp_id && ss->id != memory_cgrp_id) @@ -682,7 +682,7 @@ static void delete_blkcg_link(struct cgroup_subsys_state *blkcg_css) void delete_memcg_blkcg_link(struct cgroup_subsys *ss, struct cgroup_subsys_state *css) { - if (!cgwb_v1) + if (!cgroup_writeback_support_v1()) return; if (ss->id != io_cgrp_id && ss->id != memory_cgrp_id) -- Gitee From 8176055b049ed713fdfc87e3c4d072312ae0a227 Mon Sep 17 00:00:00 2001 From: Yang Jihong Date: Mon, 4 Sep 2023 02:33:38 +0000 Subject: [PATCH 0897/2138] perf record: Track sideband events for all CPUs when tracing selected CPUs ANBZ: #8889 commit 74b4f3ecdf64b62446abfb36669b3d40a42d34eb upstream. User space tasks can migrate between CPUs, we need to track side-band events for all CPUs. The specific scenarios are as follows: CPU0 CPU1 perf record -C 0 start taskA starts to be created and executed -> PERF_RECORD_COMM and PERF_RECORD_MMAP events only deliver to CPU1 ...... | migrate to CPU0 | Running on CPU0 <----------/ ... perf record -C 0 stop Now perf samples the PC of taskA. However, perf does not record the PERF_RECORD_COMM and PERF_RECORD_MMAP events of taskA. Therefore, the comm and symbols of taskA cannot be parsed. The solution is to record sideband events for all CPUs when tracing selected CPUs. Because this modifies the default behavior, add related comments to the perf record man page. The sys_perf_event_open invoked is as follows: # perf --debug verbose=3 record -e cpu-clock -C 1 true Opening: cpu-clock ------------------------------------------------------------ perf_event_attr: type 1 (PERF_TYPE_SOFTWARE) size 136 config 0 (PERF_COUNT_SW_CPU_CLOCK) { sample_period, sample_freq } 4000 sample_type IP|TID|TIME|CPU|PERIOD|IDENTIFIER read_format ID|LOST disabled 1 inherit 1 freq 1 sample_id_all 1 exclude_guest 1 ------------------------------------------------------------ sys_perf_event_open: pid -1 cpu 1 group_fd -1 flags 0x8 = 5 Opening: dummy:u ------------------------------------------------------------ perf_event_attr: type 1 (PERF_TYPE_SOFTWARE) size 136 config 0x9 (PERF_COUNT_SW_DUMMY) { sample_period, sample_freq } 1 sample_type IP|TID|TIME|CPU|IDENTIFIER read_format ID|LOST inherit 1 exclude_kernel 1 exclude_hv 1 mmap 1 comm 1 task 1 sample_id_all 1 exclude_guest 1 mmap2 1 comm_exec 1 ksymbol 1 bpf_event 1 ------------------------------------------------------------ sys_perf_event_open: pid -1 cpu 0 group_fd -1 flags 0x8 = 6 sys_perf_event_open: pid -1 cpu 1 group_fd -1 flags 0x8 = 7 sys_perf_event_open: pid -1 cpu 2 group_fd -1 flags 0x8 = 9 sys_perf_event_open: pid -1 cpu 3 group_fd -1 flags 0x8 = 10 sys_perf_event_open: pid -1 cpu 4 group_fd -1 flags 0x8 = 11 sys_perf_event_open: pid -1 cpu 5 group_fd -1 flags 0x8 = 12 sys_perf_event_open: pid -1 cpu 6 group_fd -1 flags 0x8 = 13 sys_perf_event_open: pid -1 cpu 7 group_fd -1 flags 0x8 = 14 Signed-off-by: Yang Jihong Tested-by: Ravi Bangoria Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Anshuman Khandual Cc: Ian Rogers Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: Kan Liang Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Thomas Richter Link: https://lore.kernel.org/r/20230904023340.12707-5-yangjihong1@huawei.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Jing Zhang Reviewed-by: Shuai Xue Reviewed-by: Peng Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3133 --- tools/perf/Documentation/perf-record.txt | 3 +++ tools/perf/builtin-record.c | 30 +++++++++++++++++++++++- 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt index d5217be012d7..1889f66addf2 100644 --- a/tools/perf/Documentation/perf-record.txt +++ b/tools/perf/Documentation/perf-record.txt @@ -374,6 +374,9 @@ comma-separated list with no space: 0,1. Ranges of CPUs are specified with -: 0- In per-thread mode with inheritance mode on (default), samples are captured only when the thread executes on the designated CPUs. Default is to monitor all CPUs. +User space tasks can migrate between CPUs, so when tracing selected CPUs, +a dummy event is created to track sideband for all CPUs. + -B:: --no-buildid:: Do not save the build ids of binaries in the perf.data files. This skips diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index b94ae33a343c..47406ca3ce4f 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -906,10 +906,30 @@ static int record__config_off_cpu(struct record *rec) return off_cpu_prepare(rec->evlist, &rec->opts.target, &rec->opts); } +static bool record__tracking_system_wide(struct record *rec) +{ + struct evlist *evlist = rec->evlist; + struct evsel *evsel; + + /* + * If non-dummy evsel exists, system_wide sideband is need to + * help parse sample information. + * For example, PERF_EVENT_MMAP event to help parse symbol, + * and PERF_EVENT_COMM event to help parse task executable name. + */ + evlist__for_each_entry(evlist, evsel) { + if (!evsel__is_dummy_event(evsel)) + return true; + } + + return false; +} + static int record__config_tracking_events(struct record *rec) { struct record_opts *opts = &rec->opts; struct evlist *evlist = rec->evlist; + bool system_wide = false; struct evsel *evsel; /* @@ -919,7 +939,15 @@ static int record__config_tracking_events(struct record *rec) */ if (opts->target.initial_delay || target__has_cpu(&opts->target) || perf_pmus__num_core_pmus() > 1) { - evsel = evlist__findnew_tracking_event(evlist, false); + + /* + * User space tasks can migrate between CPUs, so when tracing + * selected CPUs, sideband for all CPUs is still needed. + */ + if (!!opts->target.cpu_list && record__tracking_system_wide(rec)) + system_wide = true; + + evsel = evlist__findnew_tracking_event(evlist, system_wide); if (!evsel) return -ENOMEM; -- Gitee From 1456d3d796807e06c443c3340a674024da5e3b06 Mon Sep 17 00:00:00 2001 From: Yang Jihong Date: Mon, 4 Sep 2023 02:33:39 +0000 Subject: [PATCH 0898/2138] perf test: Add test case for record sideband events ANBZ: #8889 commit 23b97c7ee963f1d007c035e76ba7e3a4fd1259e6 upstream. Add a new test case to record sideband events for all CPUs when tracing selected CPUs Test result: # ./perf test list 2>&1 | grep 'perf record sideband tests' 95: perf record sideband tests # ./perf test 95 95: perf record sideband tests : Ok Signed-off-by: Yang Jihong Tested-by: Ravi Bangoria Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Anshuman Khandual Cc: Ian Rogers Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: Kan Liang Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Thomas Richter Link: https://lore.kernel.org/r/20230904023340.12707-6-yangjihong1@huawei.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Jing Zhang Reviewed-by: Shuai Xue Reviewed-by: Peng Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3133 --- tools/perf/tests/shell/record_sideband.sh | 58 +++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100755 tools/perf/tests/shell/record_sideband.sh diff --git a/tools/perf/tests/shell/record_sideband.sh b/tools/perf/tests/shell/record_sideband.sh new file mode 100755 index 000000000000..5024a7ce0c51 --- /dev/null +++ b/tools/perf/tests/shell/record_sideband.sh @@ -0,0 +1,58 @@ +#!/bin/sh +# perf record sideband tests +# SPDX-License-Identifier: GPL-2.0 + +set -e + +err=0 +perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX) + +cleanup() +{ + rm -rf ${perfdata} + trap - EXIT TERM INT +} + +trap_cleanup() +{ + cleanup + exit 1 +} +trap trap_cleanup EXIT TERM INT + +can_cpu_wide() +{ + if ! perf record -o ${perfdata} -BN --no-bpf-event -C $1 true 2>&1 >/dev/null + then + echo "record sideband test [Skipped cannot record cpu$1]" + err=2 + fi + + rm -f ${perfdata} + return $err +} + +test_system_wide_tracking() +{ + # Need CPU 0 and CPU 1 + can_cpu_wide 0 || return 0 + can_cpu_wide 1 || return 0 + + # Record on CPU 0 a task running on CPU 1 + perf record -BN --no-bpf-event -o ${perfdata} -C 0 -- taskset --cpu-list 1 true + + # Should get MMAP events from CPU 1 + mmap_cnt=`perf script -i ${perfdata} --show-mmap-events -C 1 2>/dev/null | grep MMAP | wc -l` + + if [ ${mmap_cnt} -gt 0 ] ; then + return 0 + fi + + echo "Failed to record MMAP events on CPU 1 when tracing CPU 0" + return 1 +} + +test_system_wide_tracking + +cleanup +exit $err -- Gitee From 11709583220ddde1d6aa1fa59a5cc2b2ee112431 Mon Sep 17 00:00:00 2001 From: Yang Jihong Date: Mon, 4 Sep 2023 02:33:40 +0000 Subject: [PATCH 0899/2138] perf test: Add perf_event_attr test for record dummy event ANBZ: #8889 commit d50ad02cb39a5fe1d0c02b3b51e8a2a37464c54a upstream. If only dummy event is recorded, tracking event is not needed. Add this test scenario. Test result: # ./perf test list 2>&1 | grep 'Setup struct perf_event_attr' 17: Setup struct perf_event_attr # ./perf test 17 -v 17: Setup struct perf_event_attr : --- start --- test child forked, pid 720198 running './tests/attr/test-record-dummy-C0' test child finished with 0 ---- end ---- Setup struct perf_event_attr: Ok Signed-off-by: Yang Jihong Tested-by: Ravi Bangoria Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Anshuman Khandual Cc: Ian Rogers Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: Kan Liang Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Thomas Richter Link: https://lore.kernel.org/r/20230904023340.12707-7-yangjihong1@huawei.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Jing Zhang Reviewed-by: Shuai Xue Reviewed-by: Peng Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3133 --- tools/perf/tests/attr/test-record-dummy-C0 | 55 ++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 tools/perf/tests/attr/test-record-dummy-C0 diff --git a/tools/perf/tests/attr/test-record-dummy-C0 b/tools/perf/tests/attr/test-record-dummy-C0 new file mode 100644 index 000000000000..83ca4e373acd --- /dev/null +++ b/tools/perf/tests/attr/test-record-dummy-C0 @@ -0,0 +1,55 @@ +[config] +command = record +args = --no-bpf-event -e dummy -C 0 kill >/dev/null 2>&1 +ret = 1 + +[event] +fd=1 +group_fd=-1 +cpu=0 +pid=-1 +flags=8 +type=1 +size=136 +config=9 +sample_period=4000 +# PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_TIME | +# PERF_SAMPLE_PERIOD +# + PERF_SAMPLE_CPU added by -C 0 +sample_type=391 +read_format=4 +disabled=0 +inherit=1 +pinned=0 +exclusive=0 +exclude_user=0 +exclude_kernel=0 +exclude_hv=0 +exclude_idle=0 +mmap=1 +comm=1 +freq=1 +inherit_stat=0 +enable_on_exec=0 +task=1 +watermark=0 +precise_ip=0 +mmap_data=0 +sample_id_all=1 +exclude_host=0 +exclude_guest=1 +exclude_callchain_kernel=0 +exclude_callchain_user=0 +mmap2=1 +comm_exec=1 +context_switch=0 +write_backward=0 +namespaces=0 +use_clockid=0 +wakeup_events=0 +bp_type=0 +config1=0 +config2=0 +branch_sample_type=0 +sample_regs_user=0 +sample_stack_user=0 -- Gitee From ebf85f8890b2d2f5e3ce91f4fcbcb1b4c122880e Mon Sep 17 00:00:00 2001 From: Yang Jihong Date: Sat, 16 Sep 2023 09:16:41 +0000 Subject: [PATCH 0900/2138] perf test: Fix test-record-dummy-C0 failure for supported PERF_FORMAT_LOST feature kernel ANBZ: #8889 commit a132b784db68b543fd2745973cd8b5edf8e9bde4 upstream. For kernel that supports PERF_FORMAT_LOST, attr->read_format has PERF_FORMAT_LOST bit. Update expected value of attr->read_format of test-record-dummy-C0 for this scenario. Before: # ./perf test 17 -vv 17: Setup struct perf_event_attr : --- start --- test child forked, pid 1609441 running './tests/attr/test-record-dummy-C0' 'PERF_TEST_ATTR=/tmp/tmpm3s60aji ./perf record -o /tmp/tmpm3s60aji/perf.data --no-bpf-event -e dummy -C 0 kill >/dev/null 2>&1' ret '1', expected '1' expected read_format=4, got 20 FAILED './tests/attr/test-record-dummy-C0' - match failure test child finished with -1 ---- end ---- Setup struct perf_event_attr: FAILED! After: # ./perf test 17 -vv 17: Setup struct perf_event_attr : --- start --- test child forked, pid 1609441 running './tests/attr/test-record-dummy-C0' 'PERF_TEST_ATTR=/tmp/tmppa9vxcb7 ./perf record -o /tmp/tmppa9vxcb7/perf.data --no-bpf-event -e dummy -C 0 kill >/dev/null 2>&1' ret '1', expected '1' test child finished with 0 ---- end ---- Setup struct perf_event_attr: Ok Reported-and-Tested-by: Namhyung Kim Signed-off-by: Yang Jihong Link: https://lore.kernel.org/r/20230916091641.776031-1-yangjihong1@huawei.com Signed-off-by: Namhyung Kim Signed-off-by: Jing Zhang Reviewed-by: Peng Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3148 --- tools/perf/tests/attr/test-record-dummy-C0 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/tests/attr/test-record-dummy-C0 b/tools/perf/tests/attr/test-record-dummy-C0 index 83ca4e373acd..576ec48b3aaf 100644 --- a/tools/perf/tests/attr/test-record-dummy-C0 +++ b/tools/perf/tests/attr/test-record-dummy-C0 @@ -17,7 +17,7 @@ sample_period=4000 # PERF_SAMPLE_PERIOD # + PERF_SAMPLE_CPU added by -C 0 sample_type=391 -read_format=4 +read_format=4|20 disabled=0 inherit=1 pinned=0 -- Gitee From e26fdadd2529a910cec525359efab9a15d5b5394 Mon Sep 17 00:00:00 2001 From: yangdepei Date: Tue, 16 Apr 2024 20:15:13 +0800 Subject: [PATCH 0901/2138] anolis:ccp: ccp-crypto support sm2 on Hygon generation 4th CPU ANBZ: #8582 1. support sm2 on 4th cpu 2. create new ccp-dev-v5.c file for hygon ccp only 3. restore original ccp-dev-v5.c file Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3119 --- drivers/crypto/ccp/Kconfig | 1 + drivers/crypto/ccp/Makefile | 3 +- drivers/crypto/ccp/ccp-dev-v5.c | 326 +------ drivers/crypto/ccp/ccp-dev.h | 14 +- drivers/crypto/ccp/hygon/ccp-dev-v5.c | 1236 +++++++++++++++++++++++++ drivers/crypto/ccp/sp-pci.c | 27 +- 6 files changed, 1281 insertions(+), 326 deletions(-) create mode 100644 drivers/crypto/ccp/hygon/ccp-dev-v5.c diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 702b4c6761fd..726f1a6025eb 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -50,6 +50,7 @@ config HYGON_GM bool "Hygon GM (sm2/sm3/sm4) Interface" default y depends on CRYPTO_DEV_CCP_CRYPTO && X86_64 + select CRYPTO_SM3_GENERIC help Hygon GM ccp driver diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 70bab9cbe3d5..69534353c8d4 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -5,7 +5,8 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_CCP) += ccp-dev.o \ ccp-ops.o \ ccp-dev-v3.o \ ccp-dev-v5.o \ - ccp-dmaengine.o + ccp-dmaengine.o \ + hygon/ccp-dev-v5.o ccp-$(CONFIG_CRYPTO_DEV_CCP_DEBUGFS) += ccp-debugfs.o ccp-$(CONFIG_PCI) += sp-pci.o ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index e5c129c3e049..7b73332d6aa1 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -131,28 +131,6 @@ union ccp_function { u16 type:2; u16 mode:3; } ecc; - struct { - u16 rand:1; - u16 rsvd:11; - u16 mode:3; - } sm2; - struct { - u16 rsvd:10; - u16 type:4; - u16 rsvd2:1; - } sm3; - struct { - u16 rsvd:7; - u16 encrypt:1; - u16 mode:4; - u16 select:1; - u16 rsvd2:2; - } sm4; - struct { - u16 size:7; - u16 encrypt:1; - u16 step:7; - } sm4_ctr; u16 raw; }; @@ -173,15 +151,6 @@ union ccp_function { #define CCP_PT_BITWISE(p) ((p)->pt.bitwise) #define CCP_ECC_MODE(p) ((p)->ecc.mode) #define CCP_ECC_AFFINE(p) ((p)->ecc.one) -#define CCP_SM2_RAND(p) ((p)->sm2.rand) -#define CCP_SM2_MODE(p) ((p)->sm2.mode) -#define CCP_SM3_TYPE(p) ((p)->sm3.type) -#define CCP_SM4_ENCRYPT(p) ((p)->sm4.encrypt) -#define CCP_SM4_MODE(p) ((p)->sm4.mode) -#define CCP_SM4_SELECT(p) ((p)->sm4.select) -#define CCP_SM4_CTR_ENCRYPT(p) ((p)->sm4_ctr.encrypt) -#define CCP_SM4_CTR_STEP(p) ((p)->sm4_ctr.step) -#define CCP_SM4_CTR_SIZE(p) ((p)->sm4_ctr.size) /* Word 0 */ #define CCP5_CMD_DW0(p) ((p)->dw0) @@ -217,8 +186,6 @@ union ccp_function { #define CCP5_CMD_FIX_DST(p) ((p)->dw5.fields.fixed) #define CCP5_CMD_SHA_LO(p) ((p)->dw4.sha_len_lo) #define CCP5_CMD_SHA_HI(p) ((p)->dw5.sha_len_hi) -#define CCP5_CMD_SM3_LO(p) ((p)->dw4.sm3_len_lo) -#define CCP5_CMD_SM3_HI(p) ((p)->dw5.sm3_len_hi) /* Word 6/7 */ #define CCP5_CMD_DW6(p) ((p)->key_lo) @@ -227,17 +194,6 @@ union ccp_function { #define CCP5_CMD_KEY_HI(p) ((p)->dw7.key_hi) #define CCP5_CMD_KEY_MEM(p) ((p)->dw7.key_mem) -static inline unsigned int command_per_queue(void) -{ -#ifdef CONFIG_HYGON_GM - return boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? - HYGON_COMMANDS_PER_QUEUE : - COMMANDS_PER_QUEUE; -#else - return COMMANDS_PER_QUEUE; -#endif -} - static inline u32 low_address(unsigned long addr) { return (u64)addr & 0x0ffffffff; @@ -251,86 +207,15 @@ static inline u32 high_address(unsigned long addr) static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q) { unsigned int head_idx, n; - u32 head_lo, queue_start, command_per_q; + u32 head_lo, queue_start; - command_per_q = command_per_queue(); queue_start = low_address(cmd_q->qdma_tail); head_lo = ioread32(cmd_q->reg_head_lo); head_idx = (head_lo - queue_start) / sizeof(struct ccp5_desc); - n = head_idx + command_per_q - cmd_q->qidx - 1; + n = head_idx + COMMANDS_PER_QUEUE - cmd_q->qidx - 1; - return n % command_per_q; /* Always one unused spot */ -} - -static int ccp5_do_multi_cmds(struct ccp5_desc *desc, - struct ccp_cmd_queue *cmd_q) -{ - u32 *mP; - __le32 *dP; - int i; - u32 command_per_q; - - command_per_q = command_per_queue(); - - cmd_q->total_ops++; - - if (CCP5_CMD_SOC(desc)) { - CCP5_CMD_IOC(desc) = 1; - CCP5_CMD_SOC(desc) = 0; - } - - mutex_lock(&cmd_q->q_mutex); - - mP = (u32 *) &cmd_q->qbase[cmd_q->qidx]; - dP = (__le32 *) desc; - for (i = 0; i < 8; i++) - mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ - - cmd_q->qidx = (cmd_q->qidx + 1) % command_per_q; - - mutex_unlock(&cmd_q->q_mutex); - - return 0; -} - -static int ccp5_do_run_cmd(struct ccp_op *op) -{ - struct ccp_cmd_queue *cmd_q = op->cmd_q; - u32 tail; - int ret = 0; - - mutex_lock(&cmd_q->q_mutex); - - /* The data used by this command must be flushed to memory */ - wmb(); - - /* Write the new tail address back to the queue register */ - tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE); - iowrite32(tail, cmd_q->reg_tail_lo); - - /* Turn the queue back on using our cached control register */ - iowrite32(cmd_q->qcontrol | CMD5_Q_RUN, cmd_q->reg_control); - mutex_unlock(&cmd_q->q_mutex); - - if (op->ioc) { - /* Wait for the job to complete */ - ret = wait_event_interruptible(cmd_q->int_queue, - cmd_q->int_rcvd); - if (ret || cmd_q->cmd_error) { - /* Log the error and flush the queue by - * moving the head pointer - */ - if (cmd_q->cmd_error) - ccp_log_error(cmd_q->ccp, cmd_q->cmd_error); - iowrite32(tail, cmd_q->reg_head_lo); - if (!ret) - ret = -EIO; - } - cmd_q->int_rcvd = 0; - } - - return ret; + return n % COMMANDS_PER_QUEUE; /* Always one unused spot */ } static int ccp5_do_cmd(struct ccp5_desc *desc, @@ -338,11 +223,10 @@ static int ccp5_do_cmd(struct ccp5_desc *desc, { __le32 *mP; u32 *dP; - u32 tail, command_per_q; + u32 tail; int i; int ret = 0; - command_per_q = command_per_queue(); cmd_q->total_ops++; if (CCP5_CMD_SOC(desc)) { @@ -356,7 +240,7 @@ static int ccp5_do_cmd(struct ccp5_desc *desc, for (i = 0; i < 8; i++) mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ - cmd_q->qidx = (cmd_q->qidx + 1) % command_per_q; + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; /* The data used by this command must be flushed to memory */ wmb(); @@ -700,163 +584,6 @@ static int ccp5_perform_ecc(struct ccp_op *op) return ccp5_do_cmd(&desc, op->cmd_q); } -static int ccp5_perform_sm2(struct ccp_op *op) -{ - struct ccp5_desc desc; - union ccp_function function; - struct ccp_dma_info *saddr = &op->src.u.dma; - struct ccp_dma_info *daddr = &op->dst.u.dma; - - op->cmd_q->total_sm2_ops++; - - memset(&desc, 0, Q_DESC_SIZE); - - CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM2; - - CCP5_CMD_SOC(&desc) = 0; - CCP5_CMD_IOC(&desc) = 1; - CCP5_CMD_INIT(&desc) = 1; - CCP5_CMD_EOM(&desc) = 1; - CCP5_CMD_PROT(&desc) = 0; - - function.raw = 0; - CCP_SM2_RAND(&function) = op->u.sm2.rand; - CCP_SM2_MODE(&function) = op->u.sm2.mode; - CCP5_CMD_FUNCTION(&desc) = function.raw; - - /* Length of source data must match with mode */ - CCP5_CMD_LEN(&desc) = saddr->length; - CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(saddr); - CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(saddr); - CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; - - CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(daddr); - CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(daddr); - CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; - - return ccp5_do_cmd(&desc, op->cmd_q); -} - -static int ccp5_perform_sm3(struct ccp_op *op) -{ - struct ccp5_desc desc; - union ccp_function function; - - op->cmd_q->total_sm3_ops++; - - memset(&desc, 0, Q_DESC_SIZE); - - CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM3; - - CCP5_CMD_SOC(&desc) = op->soc; - CCP5_CMD_IOC(&desc) = op->ioc; - CCP5_CMD_INIT(&desc) = op->init; - CCP5_CMD_EOM(&desc) = op->eom; - CCP5_CMD_PROT(&desc) = 0; - - function.raw = 0; - CCP_SM3_TYPE(&function) = op->u.sm3.type; - CCP5_CMD_FUNCTION(&desc) = function.raw; - - CCP5_CMD_LEN(&desc) = op->src.u.dma.length; - - CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); - CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); - CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; - CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; - - if (op->eom) { - CCP5_CMD_SM3_LO(&desc) = lower_32_bits(op->u.sm3.msg_bits); - CCP5_CMD_SM3_HI(&desc) = upper_32_bits(op->u.sm3.msg_bits); - } - - return ccp5_do_multi_cmds(&desc, op->cmd_q); -} - -static int ccp5_perform_sm4(struct ccp_op *op) -{ - struct ccp5_desc desc; - union ccp_function function; - u32 key_addr = op->sb_ctx * LSB_ITEM_SIZE + SM4_BLOCK_SIZE; - - op->cmd_q->total_sm4_ops++; - - memset(&desc, 0, Q_DESC_SIZE); - - CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM4; - - CCP5_CMD_SOC(&desc) = op->soc; - CCP5_CMD_IOC(&desc) = op->ioc; - CCP5_CMD_INIT(&desc) = op->init; - CCP5_CMD_EOM(&desc) = op->eom; - CCP5_CMD_PROT(&desc) = 0; - - function.raw = 0; - CCP_SM4_ENCRYPT(&function) = op->u.sm4.action; - CCP_SM4_MODE(&function) = op->u.sm4.mode; - CCP_SM4_SELECT(&function) = op->u.sm4.select; - CCP5_CMD_FUNCTION(&desc) = function.raw; - - CCP5_CMD_LEN(&desc) = op->src.u.dma.length; - - CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); - CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); - CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; - CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; - - CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); - CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); - CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; - - CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); - CCP5_CMD_KEY_HI(&desc) = 0; - CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; - - return ccp5_do_multi_cmds(&desc, op->cmd_q); -} - -static int ccp5_perform_sm4_ctr(struct ccp_op *op) -{ - struct ccp5_desc desc; - union ccp_function function; - u32 key_addr = op->sb_ctx * LSB_ITEM_SIZE + SM4_BLOCK_SIZE; - - op->cmd_q->total_sm4_ctr_ops++; - - memset(&desc, 0, Q_DESC_SIZE); - - CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM4_CTR; - - CCP5_CMD_SOC(&desc) = op->soc; - CCP5_CMD_IOC(&desc) = op->ioc; - CCP5_CMD_INIT(&desc) = op->init; - CCP5_CMD_EOM(&desc) = op->eom; - CCP5_CMD_PROT(&desc) = 0; - - function.raw = 0; - CCP_SM4_CTR_SIZE(&function) = op->u.sm4_ctr.size; - CCP_SM4_CTR_ENCRYPT(&function) = op->u.sm4_ctr.action; - CCP_SM4_CTR_STEP(&function) = op->u.sm4_ctr.step; - CCP5_CMD_FUNCTION(&desc) = function.raw; - - CCP5_CMD_LEN(&desc) = op->src.u.dma.length; - - CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); - CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); - CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; - CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; - - CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); - CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); - CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; - - CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); - CCP5_CMD_KEY_HI(&desc) = 0; - CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; - - return ccp5_do_multi_cmds(&desc, op->cmd_q); -} - static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) { int q_mask = 1 << cmd_q->id; @@ -866,7 +593,6 @@ static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) /* Build a bit mask to know which LSBs this queue has access to. * Don't bother with segment 0 as it has special privileges. */ - status >>= LSB_REGION_WIDTH; for (j = 1; j < MAX_LSB_CNT; j++) { if (status & q_mask) bitmap_set(cmd_q->lsbmask, j, 1); @@ -1018,7 +744,7 @@ static void ccp5_irq_bh(unsigned long data) status = ioread32(cmd_q->reg_interrupt_status); - if (status & SUPPORTED_INTERRUPTS) { + if (status) { cmd_q->int_status = status; cmd_q->q_status = ioread32(cmd_q->reg_status); cmd_q->q_int_status = ioread32(cmd_q->reg_int_status); @@ -1027,9 +753,10 @@ static void ccp5_irq_bh(unsigned long data) if ((status & INT_ERROR) && !cmd_q->cmd_error) cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); + cmd_q->int_rcvd = 1; + /* Acknowledge the interrupt and wake the kthread */ iowrite32(status, cmd_q->reg_interrupt_status); - cmd_q->int_rcvd = 1; wake_up_interruptible(&cmd_q->int_queue); } } @@ -1057,7 +784,7 @@ static int ccp5_init(struct ccp_device *ccp) char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; unsigned int qmr, i; u64 status; - u32 status_lo, status_hi, command_per_q, queue_size_val; + u32 status_lo, status_hi; int ret; /* Find available queues */ @@ -1074,9 +801,6 @@ static int ccp5_init(struct ccp_device *ccp) return 1; } - command_per_q = command_per_queue(); - queue_size_val = QUEUE_SIZE_VAL(command_per_q); - for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) { if (!(qmr & (1 << i))) continue; @@ -1103,7 +827,7 @@ static int ccp5_init(struct ccp_device *ccp) /* Page alignment satisfies our needs for N <= 128 */ BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); - cmd_q->qsize = Q_SIZE(command_per_q, Q_DESC_SIZE); + cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); cmd_q->qbase = dmam_alloc_coherent(dev, cmd_q->qsize, &cmd_q->qbase_dma, GFP_KERNEL); @@ -1190,7 +914,7 @@ static int ccp5_init(struct ccp_device *ccp) cmd_q = &ccp->cmd_q[i]; cmd_q->qcontrol &= ~(CMD5_Q_SIZE << CMD5_Q_SHIFT); - cmd_q->qcontrol |= queue_size_val << CMD5_Q_SHIFT; + cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD5_Q_SHIFT; cmd_q->qdma_tail = cmd_q->qbase_dma; dma_addr_lo = low_address(cmd_q->qdma_tail); @@ -1338,26 +1062,6 @@ static void ccp5_destroy(struct ccp_device *ccp) } } -static int ccp5_get_trng_mask_param(void) -{ - /* According to spec description for SM4 high secure module, - * which need 64 bytes data, so the initialize times of writing - * mask register must be 16 or a multiple of 16. - * - * The AES algorithem need 48 bytes, so the initialize times will - * be 12 or a multiple of 12. - */ - -#ifdef CONFIG_HYGON_GM - /* for sm4 HS */ - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) - return 16; -#endif - - /* for AES HS */ - return 12; -} - static void ccp5_config(struct ccp_device *ccp) { /* Public side */ @@ -1368,13 +1072,12 @@ static void ccp5other_config(struct ccp_device *ccp) { int i; u32 rnd; - int len = ccp5_get_trng_mask_param(); /* We own all of the queues on the NTB CCP */ iowrite32(0x00012D57, ccp->io_regs + CMD5_TRNG_CTL_OFFSET); iowrite32(0x00000003, ccp->io_regs + CMD5_CONFIG_0_OFFSET); - for (i = 0; i < len; i++) { + for (i = 0; i < 12; i++) { rnd = ioread32(ccp->io_regs + TRNG_OUT_REG); iowrite32(rnd, ccp->io_regs + CMD5_AES_MASK_OFFSET); } @@ -1400,11 +1103,6 @@ static const struct ccp_actions ccp5_actions = { .rsa = ccp5_perform_rsa, .passthru = ccp5_perform_passthru, .ecc = ccp5_perform_ecc, - .sm2 = ccp5_perform_sm2, - .sm3 = ccp5_perform_sm3, - .sm4 = ccp5_perform_sm4, - .sm4_ctr = ccp5_perform_sm4_ctr, - .run_cmd = ccp5_do_run_cmd, .sballoc = ccp_lsb_alloc, .sbfree = ccp_lsb_free, .init = ccp5_init, diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index e1aa68f4044c..46518c80f8ca 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -101,13 +101,12 @@ #define CMD5_Q_SHIFT 3 #define COMMANDS_PER_QUEUE 16 -#define HYGON_COMMANDS_PER_QUEUE 8192 +#define QUEUE_SIZE_VAL ((ffs(COMMANDS_PER_QUEUE) - 2) & \ + CMD5_Q_SIZE) +#define Q_PTR_MASK (2 << (QUEUE_SIZE_VAL + 5) - 1) #define Q_DESC_SIZE sizeof(struct ccp5_desc) - -#define QUEUE_SIZE_VAL(c) ((ffs((c)) - 2) & CMD5_Q_SIZE) -#define Q_PTR_MASK(c) (2 << (QUEUE_SIZE_VAL((c)) + 5) - 1) -#define Q_SIZE(c, n) ((c)*(n)) +#define Q_SIZE(n) (COMMANDS_PER_QUEUE*(n)) #define INT_COMPLETION 0x1 #define INT_ERROR 0x2 @@ -362,6 +361,9 @@ struct ccp_device { bool use_tasklet; struct tasklet_struct irq_tasklet; + /* This flag mark if the ccp support both sm2 and ecc function */ + uint32_t support_sm2_ecc; + /* I/O area used for device communication. The register mapping * starts at an offset into the mapped bar. * The CMD_REQx registers and the Delete_Cmd_Queue_Job register @@ -709,5 +711,7 @@ extern const struct ccp_vdata ccpv3_platform; extern const struct ccp_vdata ccpv3; extern const struct ccp_vdata ccpv5a; extern const struct ccp_vdata ccpv5b; +extern const struct ccp_vdata ccpv5a_hygon; +extern const struct ccp_vdata ccpv5b_hygon; #endif diff --git a/drivers/crypto/ccp/hygon/ccp-dev-v5.c b/drivers/crypto/ccp/hygon/ccp-dev-v5.c new file mode 100644 index 000000000000..35e9fc5135d0 --- /dev/null +++ b/drivers/crypto/ccp/hygon/ccp-dev-v5.c @@ -0,0 +1,1236 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON Secure Processor interface driver + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Depei Yang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include + +#include "../ccp-dev.h" + +/* Allocate the requested number of contiguous LSB slots + * from the LSB bitmap. Look in the private range for this + * queue first; failing that, check the public area. + * If no space is available, wait around. + * Return: first slot number + */ +static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count) +{ + struct ccp_device *ccp; + int start; + + /* First look at the map for the queue */ + if (cmd_q->lsb >= 0) { + start = (u32)bitmap_find_next_zero_area(cmd_q->lsbmap, + LSB_SIZE, + 0, count, 0); + if (start < LSB_SIZE) { + bitmap_set(cmd_q->lsbmap, start, count); + return start + cmd_q->lsb * LSB_SIZE; + } + } + + /* No joy; try to get an entry from the shared blocks */ + ccp = cmd_q->ccp; + for (;;) { + mutex_lock(&ccp->sb_mutex); + + start = (u32)bitmap_find_next_zero_area(ccp->lsbmap, + MAX_LSB_CNT * LSB_SIZE, + 0, + count, 0); + if (start <= MAX_LSB_CNT * LSB_SIZE) { + bitmap_set(ccp->lsbmap, start, count); + + mutex_unlock(&ccp->sb_mutex); + return start; + } + + ccp->sb_avail = 0; + + mutex_unlock(&ccp->sb_mutex); + + /* Wait for KSB entries to become available */ + if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail)) + return 0; + } +} + + +/* Free a number of LSB slots from the bitmap, starting at + * the indicated starting slot number. + */ +static void ccp_lsb_free(struct ccp_cmd_queue *cmd_q, unsigned int start, + unsigned int count) +{ + if (!start) + return; + + if (cmd_q->lsb == start) { + /* An entry from the private LSB */ + bitmap_clear(cmd_q->lsbmap, start, count); + } else { + /* From the shared LSBs */ + struct ccp_device *ccp = cmd_q->ccp; + + mutex_lock(&ccp->sb_mutex); + bitmap_clear(ccp->lsbmap, start, count); + ccp->sb_avail = 1; + mutex_unlock(&ccp->sb_mutex); + wake_up_interruptible_all(&ccp->sb_queue); + } +} + +/* Hygon CCP version 5: Union to define the function field (cmd_reg1/dword0) */ +union ccp_function { + struct { + u16 byteswap:2; + u16 bitwise:3; + u16 reflect:2; + u16 rsvd:8; + } pt; + struct { + u16 rand:1; + u16 rsvd:10; + u16 mode:3; + u16 ecc_mode:1; + } sm2_ecc; + struct { + u16 rand:1; + u16 rsvd:11; + u16 mode:3; + } sm2; + struct { + u16 rsvd:10; + u16 type:4; + u16 rsvd2:1; + } sm3; + struct { + u16 rsvd:7; + u16 encrypt:1; + u16 mode:4; + u16 select:1; + u16 rsvd2:2; + } sm4; + struct { + u16 size:7; + u16 encrypt:1; + u16 step:7; + } sm4_ctr; + u16 raw; +}; + +#define CCP_PT_BYTESWAP(p) ((p)->pt.byteswap) +#define CCP_PT_BITWISE(p) ((p)->pt.bitwise) + +#define CCP_SM2_RAND(p) ((p)->sm2.rand) +#define CCP_SM2_MODE(p) ((p)->sm2.mode) + +/* For ccp support both sm2 and ecc */ +#define CCP_SM2_ECC_RAND(p) ((p)->sm2_ecc.rand) +#define CCP_SM2_ECC_MODE(p) ((p)->sm2_ecc.mode) +#define CCP_SM2_ECC_ECC_MODE(p) ((p)->sm2_ecc.ecc_mode) + +#define CCP_SM3_TYPE(p) ((p)->sm3.type) +#define CCP_SM4_ENCRYPT(p) ((p)->sm4.encrypt) +#define CCP_SM4_MODE(p) ((p)->sm4.mode) +#define CCP_SM4_SELECT(p) ((p)->sm4.select) +#define CCP_SM4_CTR_ENCRYPT(p) ((p)->sm4_ctr.encrypt) +#define CCP_SM4_CTR_STEP(p) ((p)->sm4_ctr.step) +#define CCP_SM4_CTR_SIZE(p) ((p)->sm4_ctr.size) + +/* Word 0 */ +#define CCP5_CMD_DW0(p) ((p)->dw0) +#define CCP5_CMD_SOC(p) (CCP5_CMD_DW0(p).soc) +#define CCP5_CMD_IOC(p) (CCP5_CMD_DW0(p).ioc) +#define CCP5_CMD_INIT(p) (CCP5_CMD_DW0(p).init) +#define CCP5_CMD_EOM(p) (CCP5_CMD_DW0(p).eom) +#define CCP5_CMD_FUNCTION(p) (CCP5_CMD_DW0(p).function) +#define CCP5_CMD_ENGINE(p) (CCP5_CMD_DW0(p).engine) +#define CCP5_CMD_PROT(p) (CCP5_CMD_DW0(p).prot) + +/* Word 1 */ +#define CCP5_CMD_DW1(p) ((p)->length) +#define CCP5_CMD_LEN(p) (CCP5_CMD_DW1(p)) + +/* Word 2 */ +#define CCP5_CMD_DW2(p) ((p)->src_lo) +#define CCP5_CMD_SRC_LO(p) (CCP5_CMD_DW2(p)) + +/* Word 3 */ +#define CCP5_CMD_DW3(p) ((p)->dw3) +#define CCP5_CMD_SRC_MEM(p) ((p)->dw3.src_mem) +#define CCP5_CMD_SRC_HI(p) ((p)->dw3.src_hi) +#define CCP5_CMD_LSB_ID(p) ((p)->dw3.lsb_cxt_id) +#define CCP5_CMD_FIX_SRC(p) ((p)->dw3.fixed) + +/* Words 4/5 */ +#define CCP5_CMD_DW4(p) ((p)->dw4) +#define CCP5_CMD_DST_LO(p) (CCP5_CMD_DW4(p).dst_lo) +#define CCP5_CMD_DW5(p) ((p)->dw5.fields.dst_hi) +#define CCP5_CMD_DST_HI(p) (CCP5_CMD_DW5(p)) +#define CCP5_CMD_DST_MEM(p) ((p)->dw5.fields.dst_mem) +#define CCP5_CMD_FIX_DST(p) ((p)->dw5.fields.fixed) +#define CCP5_CMD_SM3_LO(p) ((p)->dw4.sm3_len_lo) +#define CCP5_CMD_SM3_HI(p) ((p)->dw5.sm3_len_hi) + +/* Word 6/7 */ +#define CCP5_CMD_DW6(p) ((p)->key_lo) +#define CCP5_CMD_KEY_LO(p) (CCP5_CMD_DW6(p)) +#define CCP5_CMD_DW7(p) ((p)->dw7) +#define CCP5_CMD_KEY_HI(p) ((p)->dw7.key_hi) +#define CCP5_CMD_KEY_MEM(p) ((p)->dw7.key_mem) + +#define CCP5_COMMANDS_PER_QUEUE 8192 +#define CCP5_QUEUE_SIZE_VAL ((ffs(CCP5_COMMANDS_PER_QUEUE) - 2) & \ + CMD5_Q_SIZE) +#define CCP5_Q_PTR_MASK (2 << (CCP5_QUEUE_SIZE_VAL + 5) - 1) +#define CCP5_Q_SIZE(n) (CCP5_COMMANDS_PER_QUEUE * (n)) + +/* indicates whether there is ECC engine for Hygon CCP */ +#define RI_ECC_PRESENT 0x0400 + +/** + * Hygon CCP from 4th generation support both sm2 & ecc, + * but its input content is different from previous version. + * the previous requries only one src buffer which include + * hash + key. Now, hash and key should passed separately. To + * compatible with previous driver, we parse hash and key + * from src buffer which same as previous input + */ +#define SM2_ECC_OPERAND_LEN 32 +#define SM2_ECC_KG_SRC_SIZE 32 +#define SM2_ECC_LP_SRC_SIZE 32 +#define SM2_ECC_SIGN_SRC_SIZE 64 +#define SM2_ECC_VERIFY_SRC_SIZE 96 + +static inline int ccp5_get_keyinfo(struct ccp_op *op, dma_addr_t *kaddr, u32 *slen) +{ + struct ccp_dma_info *sinfo = &op->src.u.dma; + dma_addr_t saddr = sinfo->address + sinfo->offset; + int ret = 0; + + switch (op->u.sm2.mode) { + case CCP_SM2_MODE_SIGN: + *kaddr = saddr + SM2_ECC_OPERAND_LEN; + *slen = SM2_ECC_SIGN_SRC_SIZE; + break; + case CCP_SM2_MODE_VERIFY: + *kaddr = saddr + SM2_ECC_VERIFY_SRC_SIZE; + *slen = SM2_ECC_VERIFY_SRC_SIZE; + break; + case CCP_SM2_MODE_KG: + *kaddr = 0; /* unused for KG */ + *slen = SM2_ECC_KG_SRC_SIZE; + break; + case CCP_SM2_MODE_LP: + *kaddr = saddr + SM2_ECC_OPERAND_LEN; + *slen = SM2_ECC_LP_SRC_SIZE; + break; + default: + pr_err("Invalid sm2 operation, mode = %d\n", op->u.sm2.mode); + ret = -EINVAL; + break; + } + + return ret; +} + +static inline u32 low_address(unsigned long addr) +{ + return (u64)addr & 0x0ffffffff; +} + +static inline u32 high_address(unsigned long addr) +{ + return ((u64)addr >> 32) & 0x00000ffff; +} + +static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q) +{ + unsigned int head_idx, n; + u32 head_lo, queue_start; + + queue_start = low_address(cmd_q->qdma_tail); + head_lo = ioread32(cmd_q->reg_head_lo); + head_idx = (head_lo - queue_start) / sizeof(struct ccp5_desc); + + n = head_idx + CCP5_COMMANDS_PER_QUEUE - cmd_q->qidx - 1; + + return n % CCP5_COMMANDS_PER_QUEUE; /* Always one unused spot */ +} + +static int ccp5_do_multi_cmds(struct ccp5_desc *desc, + struct ccp_cmd_queue *cmd_q) +{ + u32 *mP; + __le32 *dP; + int i; + + cmd_q->total_ops++; + + if (CCP5_CMD_SOC(desc)) { + CCP5_CMD_IOC(desc) = 1; + CCP5_CMD_SOC(desc) = 0; + } + + mutex_lock(&cmd_q->q_mutex); + + mP = (u32 *) &cmd_q->qbase[cmd_q->qidx]; + dP = (__le32 *) desc; + for (i = 0; i < 8; i++) + mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ + + cmd_q->qidx = (cmd_q->qidx + 1) % CCP5_COMMANDS_PER_QUEUE; + + mutex_unlock(&cmd_q->q_mutex); + + return 0; +} + +static int ccp5_do_run_cmd(struct ccp_op *op) +{ + struct ccp_cmd_queue *cmd_q = op->cmd_q; + u32 tail; + int ret = 0; + + mutex_lock(&cmd_q->q_mutex); + + /* The data used by this command must be flushed to memory */ + wmb(); + + /* Write the new tail address back to the queue register */ + tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE); + iowrite32(tail, cmd_q->reg_tail_lo); + + /* Turn the queue back on using our cached control register */ + iowrite32(cmd_q->qcontrol | CMD5_Q_RUN, cmd_q->reg_control); + mutex_unlock(&cmd_q->q_mutex); + + if (op->ioc) { + /* Wait for the job to complete */ + ret = wait_event_interruptible(cmd_q->int_queue, + cmd_q->int_rcvd); + if (ret || cmd_q->cmd_error) { + /* Log the error and flush the queue by + * moving the head pointer + */ + if (cmd_q->cmd_error) + ccp_log_error(cmd_q->ccp, cmd_q->cmd_error); + iowrite32(tail, cmd_q->reg_head_lo); + if (!ret) + ret = -EIO; + } + cmd_q->int_rcvd = 0; + } + + return ret; +} + +static int ccp5_do_cmd(struct ccp5_desc *desc, + struct ccp_cmd_queue *cmd_q) +{ + __le32 *mP; + u32 *dP; + u32 tail; + int i; + int ret = 0; + + cmd_q->total_ops++; + + if (CCP5_CMD_SOC(desc)) { + CCP5_CMD_IOC(desc) = 1; + CCP5_CMD_SOC(desc) = 0; + } + mutex_lock(&cmd_q->q_mutex); + + mP = (__le32 *)&cmd_q->qbase[cmd_q->qidx]; + dP = (u32 *)desc; + for (i = 0; i < 8; i++) + mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ + + cmd_q->qidx = (cmd_q->qidx + 1) % CCP5_COMMANDS_PER_QUEUE; + + /* The data used by this command must be flushed to memory */ + wmb(); + + /* Write the new tail address back to the queue register */ + tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE); + iowrite32(tail, cmd_q->reg_tail_lo); + + /* Turn the queue back on using our cached control register */ + iowrite32(cmd_q->qcontrol | CMD5_Q_RUN, cmd_q->reg_control); + mutex_unlock(&cmd_q->q_mutex); + + if (CCP5_CMD_IOC(desc)) { + /* Wait for the job to complete */ + ret = wait_event_interruptible(cmd_q->int_queue, + cmd_q->int_rcvd); + if (ret || cmd_q->cmd_error) { + /* Log the error and flush the queue by + * moving the head pointer + */ + if (cmd_q->cmd_error) + ccp_log_error(cmd_q->ccp, + cmd_q->cmd_error); + iowrite32(tail, cmd_q->reg_head_lo); + if (!ret) + ret = -EIO; + } + cmd_q->int_rcvd = 0; + } + + return ret; +} + +static int ccp5_perform_sm2(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + struct ccp_dma_info *saddr = &op->src.u.dma; + struct ccp_dma_info *daddr = &op->dst.u.dma; + dma_addr_t kaddr; + unsigned int slen = saddr->length; + int ret = 0; + + op->cmd_q->total_sm2_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM2; + + CCP5_CMD_SOC(&desc) = 0; + CCP5_CMD_IOC(&desc) = 1; + CCP5_CMD_INIT(&desc) = 1; + CCP5_CMD_EOM(&desc) = 1; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + + /* + * ccp support both sm2 and ecc, the rand,mode filed are different + * with previous, and run on ecc or sm2 also should be indicated + */ + if (op->cmd_q->ccp->support_sm2_ecc) { + ret = ccp5_get_keyinfo(op, &kaddr, &slen); + if (ret) + return ret; + + CCP_SM2_ECC_RAND(&function) = op->u.sm2.rand; + CCP_SM2_ECC_MODE(&function) = op->u.sm2.mode; + CCP_SM2_ECC_ECC_MODE(&function) = 0; /* 0: SM2 1: ECC */ + } else { + CCP_SM2_RAND(&function) = op->u.sm2.rand; + CCP_SM2_MODE(&function) = op->u.sm2.mode; + } + + CCP5_CMD_FUNCTION(&desc) = function.raw; + + /* Length of source data must match with mode */ + CCP5_CMD_LEN(&desc) = slen; + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(saddr); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(saddr); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(daddr); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(daddr); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + if (op->cmd_q->ccp->support_sm2_ecc && + op->u.sm2.mode != CCP_SM2_MODE_KG) { + CCP5_CMD_KEY_LO(&desc) = low_address(kaddr); + CCP5_CMD_KEY_HI(&desc) = high_address(kaddr); + CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + } + + return ccp5_do_cmd(&desc, op->cmd_q); +} + +static int ccp5_perform_sm3(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + + op->cmd_q->total_sm3_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM3; + + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = op->ioc; + CCP5_CMD_INIT(&desc) = op->init; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM3_TYPE(&function) = op->u.sm3.type; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; + + if (op->eom) { + CCP5_CMD_SM3_LO(&desc) = lower_32_bits(op->u.sm3.msg_bits); + CCP5_CMD_SM3_HI(&desc) = upper_32_bits(op->u.sm3.msg_bits); + } + + return ccp5_do_multi_cmds(&desc, op->cmd_q); +} + +static int ccp5_perform_sm4(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + u32 key_addr = op->sb_ctx * LSB_ITEM_SIZE + SM4_BLOCK_SIZE; + + op->cmd_q->total_sm4_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM4; + + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = op->ioc; + CCP5_CMD_INIT(&desc) = op->init; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM4_ENCRYPT(&function) = op->u.sm4.action; + CCP_SM4_MODE(&function) = op->u.sm4.mode; + CCP_SM4_SELECT(&function) = op->u.sm4.select; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; + + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); + CCP5_CMD_KEY_HI(&desc) = 0; + CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; + + return ccp5_do_multi_cmds(&desc, op->cmd_q); +} + +static int ccp5_perform_sm4_ctr(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + u32 key_addr = op->sb_ctx * LSB_ITEM_SIZE + SM4_BLOCK_SIZE; + + op->cmd_q->total_sm4_ctr_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM4_CTR; + + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = op->ioc; + CCP5_CMD_INIT(&desc) = op->init; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM4_CTR_SIZE(&function) = op->u.sm4_ctr.size; + CCP_SM4_CTR_ENCRYPT(&function) = op->u.sm4_ctr.action; + CCP_SM4_CTR_STEP(&function) = op->u.sm4_ctr.step; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; + + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); + CCP5_CMD_KEY_HI(&desc) = 0; + CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; + + return ccp5_do_multi_cmds(&desc, op->cmd_q); +} + +static int ccp5_perform_passthru(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + struct ccp_dma_info *saddr = &op->src.u.dma; + struct ccp_dma_info *daddr = &op->dst.u.dma; + + + op->cmd_q->total_pt_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_PASSTHRU; + + CCP5_CMD_SOC(&desc) = 0; + CCP5_CMD_IOC(&desc) = 1; + CCP5_CMD_INIT(&desc) = 0; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_PT_BYTESWAP(&function) = op->u.passthru.byte_swap; + CCP_PT_BITWISE(&function) = op->u.passthru.bit_mod; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + /* Length of source data is always 256 bytes */ + if (op->src.type == CCP_MEMTYPE_SYSTEM) + CCP5_CMD_LEN(&desc) = saddr->length; + else + CCP5_CMD_LEN(&desc) = daddr->length; + + if (op->src.type == CCP_MEMTYPE_SYSTEM) { + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP) + CCP5_CMD_LSB_ID(&desc) = op->sb_key; + } else { + u32 key_addr = op->src.u.sb * CCP_SB_BYTES; + + CCP5_CMD_SRC_LO(&desc) = lower_32_bits(key_addr); + CCP5_CMD_SRC_HI(&desc) = 0; + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SB; + } + + if (op->dst.type == CCP_MEMTYPE_SYSTEM) { + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + } else { + u32 key_addr = op->dst.u.sb * CCP_SB_BYTES; + + CCP5_CMD_DST_LO(&desc) = lower_32_bits(key_addr); + CCP5_CMD_DST_HI(&desc) = 0; + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SB; + } + + return ccp5_do_cmd(&desc, op->cmd_q); +} + +static int ccp5_perform_aes(struct ccp_op *op) +{ + pr_err("AES function not implement!"); + return -EPERM; +} + +static int ccp5_perform_xts_aes(struct ccp_op *op) +{ + pr_err("XTS-AES function not implement!"); + return -EPERM; +} + +static int ccp5_perform_sha(struct ccp_op *op) +{ + pr_err("SHA function not implement!"); + return -EPERM; +} + +static int ccp5_perform_des3(struct ccp_op *op) +{ + pr_err("DES3 function not implement!"); + return -EPERM; +} + +static int ccp5_perform_rsa(struct ccp_op *op) +{ + pr_err("RSA function not implement!"); + return -EPERM; +} + +static int ccp5_perform_ecc(struct ccp_op *op) +{ + pr_err("ECC function not implement!"); + return -EPERM; +} + +static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) +{ + int q_mask = 1 << cmd_q->id; + int queues = 0; + int j; + + /* Build a bit mask to know which LSBs this queue has access to. + * Don't bother with segment 0 as it has special privileges. + */ + status >>= LSB_REGION_WIDTH; + for (j = 1; j < MAX_LSB_CNT; j++) { + if (status & q_mask) + bitmap_set(cmd_q->lsbmask, j, 1); + status >>= LSB_REGION_WIDTH; + } + queues = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT); + dev_dbg(cmd_q->ccp->dev, "Queue %d can access %d LSB regions\n", + cmd_q->id, queues); + + return queues ? 0 : -EINVAL; +} + +static int ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp, + int lsb_cnt, int n_lsbs, + unsigned long *lsb_pub) +{ + DECLARE_BITMAP(qlsb, MAX_LSB_CNT); + int bitno; + int qlsb_wgt; + int i; + + /* For each queue: + * If the count of potential LSBs available to a queue matches the + * ordinal given to us in lsb_cnt: + * Copy the mask of possible LSBs for this queue into "qlsb"; + * For each bit in qlsb, see if the corresponding bit in the + * aggregation mask is set; if so, we have a match. + * If we have a match, clear the bit in the aggregation to + * mark it as no longer available. + * If there is no match, clear the bit in qlsb and keep looking. + */ + for (i = 0; i < ccp->cmd_q_count; i++) { + struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i]; + + qlsb_wgt = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT); + + if (qlsb_wgt == lsb_cnt) { + bitmap_copy(qlsb, cmd_q->lsbmask, MAX_LSB_CNT); + + bitno = find_first_bit(qlsb, MAX_LSB_CNT); + while (bitno < MAX_LSB_CNT) { + if (test_bit(bitno, lsb_pub)) { + /* We found an available LSB + * that this queue can access + */ + cmd_q->lsb = bitno; + bitmap_clear(lsb_pub, bitno, 1); + dev_dbg(ccp->dev, + "Queue %d gets LSB %d\n", + i, bitno); + break; + } + bitmap_clear(qlsb, bitno, 1); + bitno = find_first_bit(qlsb, MAX_LSB_CNT); + } + if (bitno >= MAX_LSB_CNT) + return -EINVAL; + n_lsbs--; + } + } + return n_lsbs; +} + +/* For each queue, from the most- to least-constrained: + * find an LSB that can be assigned to the queue. If there are N queues that + * can only use M LSBs, where N > M, fail; otherwise, every queue will get a + * dedicated LSB. Remaining LSB regions become a shared resource. + * If we have fewer LSBs than queues, all LSB regions become shared resources. + */ +static int ccp_assign_lsbs(struct ccp_device *ccp) +{ + DECLARE_BITMAP(lsb_pub, MAX_LSB_CNT); + DECLARE_BITMAP(qlsb, MAX_LSB_CNT); + int n_lsbs = 0; + int bitno; + int i, lsb_cnt; + int rc = 0; + + bitmap_zero(lsb_pub, MAX_LSB_CNT); + + /* Create an aggregate bitmap to get a total count of available LSBs */ + for (i = 0; i < ccp->cmd_q_count; i++) + bitmap_or(lsb_pub, + lsb_pub, ccp->cmd_q[i].lsbmask, + MAX_LSB_CNT); + + n_lsbs = bitmap_weight(lsb_pub, MAX_LSB_CNT); + + if (n_lsbs >= ccp->cmd_q_count) { + /* We have enough LSBS to give every queue a private LSB. + * Brute force search to start with the queues that are more + * constrained in LSB choice. When an LSB is privately + * assigned, it is removed from the public mask. + * This is an ugly N squared algorithm with some optimization. + */ + for (lsb_cnt = 1; + n_lsbs && (lsb_cnt <= MAX_LSB_CNT); + lsb_cnt++) { + rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs, + lsb_pub); + if (rc < 0) + return -EINVAL; + n_lsbs = rc; + } + } + + rc = 0; + /* What's left of the LSBs, according to the public mask, now become + * shared. Any zero bits in the lsb_pub mask represent an LSB region + * that can't be used as a shared resource, so mark the LSB slots for + * them as "in use". + */ + bitmap_copy(qlsb, lsb_pub, MAX_LSB_CNT); + + bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT); + while (bitno < MAX_LSB_CNT) { + bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE); + bitmap_set(qlsb, bitno, 1); + bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT); + } + + return rc; +} + +static void ccp5_disable_queue_interrupts(struct ccp_device *ccp) +{ + unsigned int i; + + for (i = 0; i < ccp->cmd_q_count; i++) + iowrite32(0x0, ccp->cmd_q[i].reg_int_enable); +} + +static void ccp5_enable_queue_interrupts(struct ccp_device *ccp) +{ + unsigned int i; + + for (i = 0; i < ccp->cmd_q_count; i++) + iowrite32(SUPPORTED_INTERRUPTS, ccp->cmd_q[i].reg_int_enable); +} + +static void ccp5_irq_bh(unsigned long data) +{ + struct ccp_device *ccp = (struct ccp_device *)data; + u32 status; + unsigned int i; + + for (i = 0; i < ccp->cmd_q_count; i++) { + struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i]; + + status = ioread32(cmd_q->reg_interrupt_status); + + if (status & SUPPORTED_INTERRUPTS) { + cmd_q->int_status = status; + cmd_q->q_status = ioread32(cmd_q->reg_status); + cmd_q->q_int_status = ioread32(cmd_q->reg_int_status); + + /* On error, only save the first error value */ + if ((status & INT_ERROR) && !cmd_q->cmd_error) + cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); + + /* Acknowledge the interrupt and wake the kthread */ + iowrite32(status, cmd_q->reg_interrupt_status); + cmd_q->int_rcvd = 1; + wake_up_interruptible(&cmd_q->int_queue); + } + } + ccp5_enable_queue_interrupts(ccp); +} + +static irqreturn_t ccp5_irq_handler(int irq, void *data) +{ + struct ccp_device *ccp = (struct ccp_device *)data; + + ccp5_disable_queue_interrupts(ccp); + ccp->total_interrupts++; + if (ccp->use_tasklet) + tasklet_schedule(&ccp->irq_tasklet); + else + ccp5_irq_bh((unsigned long)ccp); + return IRQ_HANDLED; +} + +static int ccp5_init(struct ccp_device *ccp) +{ + struct device *dev = ccp->dev; + struct ccp_cmd_queue *cmd_q; + struct dma_pool *dma_pool; + char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; + unsigned int qmr, i; + u64 status; + u32 status_lo, status_hi; + int ret; + + /* Find available queues */ + qmr = ioread32(ccp->io_regs + Q_MASK_REG); + /* + * Check for a access to the registers. If this read returns + * 0xffffffff, it's likely that the system is running a broken + * BIOS which disallows access to the device. Stop here and fail + * the initialization (but not the load, as the PSP could get + * properly initialized). + */ + if (qmr == 0xffffffff) { + dev_notice(dev, "ccp: unable to access the device: you might be running a broken BIOS.\n"); + return 1; + } + + /* check if ccp support both sm2 and ecc. */ + ccp->support_sm2_ecc = !!(ioread32(ccp->io_regs + CMD5_PSP_CCP_VERSION) + & RI_ECC_PRESENT); + + for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) { + if (!(qmr & (1 << i))) + continue; + + /* Allocate a dma pool for this queue */ + snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d", + ccp->name, i); + dma_pool = dma_pool_create(dma_pool_name, dev, + CCP_DMAPOOL_MAX_SIZE, + CCP_DMAPOOL_ALIGN, 0); + if (!dma_pool) { + dev_err(dev, "unable to allocate dma pool\n"); + ret = -ENOMEM; + goto e_pool; + } + + cmd_q = &ccp->cmd_q[ccp->cmd_q_count]; + ccp->cmd_q_count++; + + cmd_q->ccp = ccp; + cmd_q->id = i; + cmd_q->dma_pool = dma_pool; + mutex_init(&cmd_q->q_mutex); + + /* Page alignment satisfies our needs for N <= 128 */ + BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); + cmd_q->qsize = CCP5_Q_SIZE(Q_DESC_SIZE); + cmd_q->qbase = dmam_alloc_coherent(dev, cmd_q->qsize, + &cmd_q->qbase_dma, + GFP_KERNEL); + if (!cmd_q->qbase) { + dev_err(dev, "unable to allocate command queue\n"); + ret = -ENOMEM; + goto e_pool; + } + + cmd_q->qidx = 0; + /* Preset some register values and masks that are queue + * number dependent + */ + cmd_q->reg_control = ccp->io_regs + + CMD5_Q_STATUS_INCR * (i + 1); + cmd_q->reg_tail_lo = cmd_q->reg_control + CMD5_Q_TAIL_LO_BASE; + cmd_q->reg_head_lo = cmd_q->reg_control + CMD5_Q_HEAD_LO_BASE; + cmd_q->reg_int_enable = cmd_q->reg_control + + CMD5_Q_INT_ENABLE_BASE; + cmd_q->reg_interrupt_status = cmd_q->reg_control + + CMD5_Q_INTERRUPT_STATUS_BASE; + cmd_q->reg_status = cmd_q->reg_control + CMD5_Q_STATUS_BASE; + cmd_q->reg_int_status = cmd_q->reg_control + + CMD5_Q_INT_STATUS_BASE; + cmd_q->reg_dma_status = cmd_q->reg_control + + CMD5_Q_DMA_STATUS_BASE; + cmd_q->reg_dma_read_status = cmd_q->reg_control + + CMD5_Q_DMA_READ_STATUS_BASE; + cmd_q->reg_dma_write_status = cmd_q->reg_control + + CMD5_Q_DMA_WRITE_STATUS_BASE; + + init_waitqueue_head(&cmd_q->int_queue); + + dev_dbg(dev, "queue #%u available\n", i); + } + + if (ccp->cmd_q_count == 0) { + dev_notice(dev, "no command queues available\n"); + ret = 1; + goto e_pool; + } + + /* Turn off the queues and disable interrupts until ready */ + ccp5_disable_queue_interrupts(ccp); + for (i = 0; i < ccp->cmd_q_count; i++) { + cmd_q = &ccp->cmd_q[i]; + + cmd_q->qcontrol = 0; /* Start with nothing */ + iowrite32(cmd_q->qcontrol, cmd_q->reg_control); + + ioread32(cmd_q->reg_int_status); + ioread32(cmd_q->reg_status); + + /* Clear the interrupt status */ + iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status); + } + + dev_dbg(dev, "Requesting an IRQ...\n"); + /* Request an irq */ + ret = sp_request_ccp_irq(ccp->sp, ccp5_irq_handler, ccp->name, ccp); + if (ret) { + dev_err(dev, "unable to allocate an IRQ\n"); + goto e_pool; + } + /* Initialize the ISR tasklet */ + if (ccp->use_tasklet) + tasklet_init(&ccp->irq_tasklet, ccp5_irq_bh, + (unsigned long)ccp); + + dev_dbg(dev, "Loading LSB map...\n"); + /* Copy the private LSB mask to the public registers */ + status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET); + status_hi = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET); + iowrite32(status_lo, ccp->io_regs + LSB_PUBLIC_MASK_LO_OFFSET); + iowrite32(status_hi, ccp->io_regs + LSB_PUBLIC_MASK_HI_OFFSET); + status = ((u64)status_hi<<30) | (u64)status_lo; + + dev_dbg(dev, "Configuring virtual queues...\n"); + /* Configure size of each virtual queue accessible to host */ + for (i = 0; i < ccp->cmd_q_count; i++) { + u32 dma_addr_lo; + u32 dma_addr_hi; + + cmd_q = &ccp->cmd_q[i]; + + cmd_q->qcontrol &= ~(CMD5_Q_SIZE << CMD5_Q_SHIFT); + cmd_q->qcontrol |= CCP5_QUEUE_SIZE_VAL << CMD5_Q_SHIFT; + + cmd_q->qdma_tail = cmd_q->qbase_dma; + dma_addr_lo = low_address(cmd_q->qdma_tail); + iowrite32((u32)dma_addr_lo, cmd_q->reg_tail_lo); + iowrite32((u32)dma_addr_lo, cmd_q->reg_head_lo); + + dma_addr_hi = high_address(cmd_q->qdma_tail); + cmd_q->qcontrol |= (dma_addr_hi << 16); + iowrite32(cmd_q->qcontrol, cmd_q->reg_control); + + /* Find the LSB regions accessible to the queue */ + ccp_find_lsb_regions(cmd_q, status); + cmd_q->lsb = -1; /* Unassigned value */ + } + + dev_dbg(dev, "Assigning LSBs...\n"); + ret = ccp_assign_lsbs(ccp); + if (ret) { + dev_err(dev, "Unable to assign LSBs (%d)\n", ret); + goto e_irq; + } + + /* Optimization: pre-allocate LSB slots for each queue */ + for (i = 0; i < ccp->cmd_q_count; i++) { + ccp->cmd_q[i].sb_key = ccp_lsb_alloc(&ccp->cmd_q[i], 2); + ccp->cmd_q[i].sb_ctx = ccp_lsb_alloc(&ccp->cmd_q[i], 2); + } + + dev_dbg(dev, "Starting threads...\n"); + /* Create a kthread for each queue */ + for (i = 0; i < ccp->cmd_q_count; i++) { + struct task_struct *kthread; + + cmd_q = &ccp->cmd_q[i]; + + kthread = kthread_run(ccp_cmd_queue_thread, cmd_q, + "%s-q%u", ccp->name, cmd_q->id); + if (IS_ERR(kthread)) { + dev_err(dev, "error creating queue thread (%ld)\n", + PTR_ERR(kthread)); + ret = PTR_ERR(kthread); + goto e_kthread; + } + + cmd_q->kthread = kthread; + } + + dev_dbg(dev, "Enabling interrupts...\n"); + ccp5_enable_queue_interrupts(ccp); + + dev_dbg(dev, "Registering device...\n"); + /* Put this on the unit list to make it available */ + ccp_add_device(ccp); + + ret = ccp_register_rng(ccp); + if (ret) + goto e_kthread; + + /* Register the DMA engine support */ + ret = ccp_dmaengine_register(ccp); + if (ret) + goto e_hwrng; + +#ifdef CONFIG_CRYPTO_DEV_CCP_DEBUGFS + /* Set up debugfs entries */ + ccp5_debugfs_setup(ccp); +#endif + + return 0; + +e_hwrng: + ccp_unregister_rng(ccp); + +e_kthread: + for (i = 0; i < ccp->cmd_q_count; i++) + if (ccp->cmd_q[i].kthread) + kthread_stop(ccp->cmd_q[i].kthread); + +e_irq: + sp_free_ccp_irq(ccp->sp, ccp); + +e_pool: + for (i = 0; i < ccp->cmd_q_count; i++) + dma_pool_destroy(ccp->cmd_q[i].dma_pool); + + return ret; +} + +static void ccp5_destroy(struct ccp_device *ccp) +{ + struct ccp_cmd_queue *cmd_q; + struct ccp_cmd *cmd; + unsigned int i; + + /* Unregister the DMA engine */ + ccp_dmaengine_unregister(ccp); + + /* Unregister the RNG */ + ccp_unregister_rng(ccp); + + /* Remove this device from the list of available units first */ + ccp_del_device(ccp); + +#ifdef CONFIG_CRYPTO_DEV_CCP_DEBUGFS + /* We're in the process of tearing down the entire driver; + * when all the devices are gone clean up debugfs + */ + if (ccp_present()) + ccp5_debugfs_destroy(); +#endif + + /* Disable and clear interrupts */ + ccp5_disable_queue_interrupts(ccp); + for (i = 0; i < ccp->cmd_q_count; i++) { + cmd_q = &ccp->cmd_q[i]; + + /* Turn off the run bit */ + iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control); + + /* Clear the interrupt status */ + iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status); + ioread32(cmd_q->reg_int_status); + ioread32(cmd_q->reg_status); + } + + /* Stop the queue kthreads */ + for (i = 0; i < ccp->cmd_q_count; i++) + if (ccp->cmd_q[i].kthread) + kthread_stop(ccp->cmd_q[i].kthread); + + sp_free_ccp_irq(ccp->sp, ccp); + + /* Flush the cmd and backlog queue */ + while (!list_empty(&ccp->cmd)) { + /* Invoke the callback directly with an error code */ + cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry); + list_del(&cmd->entry); + cmd->callback(cmd->data, -ENODEV); + } + while (!list_empty(&ccp->backlog)) { + /* Invoke the callback directly with an error code */ + cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry); + list_del(&cmd->entry); + cmd->callback(cmd->data, -ENODEV); + } +} + +static void ccp5_config(struct ccp_device *ccp) +{ + /* Public side */ + iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET); +} + +static void ccp5other_config(struct ccp_device *ccp) +{ + int i; + u32 rnd; + + /* We own all of the queues on the NTB CCP */ + + iowrite32(0x00012D57, ccp->io_regs + CMD5_TRNG_CTL_OFFSET); + iowrite32(0x00000003, ccp->io_regs + CMD5_CONFIG_0_OFFSET); + + /* According to spec description for SM4 high secure module, + * which need 64 bytes data, so the initialize times of writing + * mask register must be 16 or a multiple of 16. + * + * The AES algorithem need 48 bytes, so the initialize times will + * be 12 or a multiple of 12. + */ + for (i = 0; i < 16; i++) { + rnd = ioread32(ccp->io_regs + TRNG_OUT_REG); + iowrite32(rnd, ccp->io_regs + CMD5_AES_MASK_OFFSET); + } + + iowrite32(0x0000001F, ccp->io_regs + CMD5_QUEUE_MASK_OFFSET); + iowrite32(0x00005B6D, ccp->io_regs + CMD5_QUEUE_PRIO_OFFSET); + iowrite32(0x00000000, ccp->io_regs + CMD5_CMD_TIMEOUT_OFFSET); + + iowrite32(0x3FFFFFFF, ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET); + iowrite32(0x000003FF, ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET); + + iowrite32(0x00108823, ccp->io_regs + CMD5_CLK_GATE_CTL_OFFSET); + + ccp5_config(ccp); +} + +/* Version 5 adds some function, but is essentially the same as v5 */ +static const struct ccp_actions ccp5_actions = { + .aes = ccp5_perform_aes, + .xts_aes = ccp5_perform_xts_aes, + .sha = ccp5_perform_sha, + .des3 = ccp5_perform_des3, + .rsa = ccp5_perform_rsa, + .passthru = ccp5_perform_passthru, + .ecc = ccp5_perform_ecc, + .sm2 = ccp5_perform_sm2, + .sm3 = ccp5_perform_sm3, + .sm4 = ccp5_perform_sm4, + .sm4_ctr = ccp5_perform_sm4_ctr, + .run_cmd = ccp5_do_run_cmd, + .sballoc = ccp_lsb_alloc, + .sbfree = ccp_lsb_free, + .init = ccp5_init, + .destroy = ccp5_destroy, + .get_free_slots = ccp5_get_free_slots, +}; + +const struct ccp_vdata ccpv5a_hygon = { + .version = CCP_VERSION(5, 1), + .setup = ccp5_config, + .perform = &ccp5_actions, + .offset = 0x0, + .rsamax = CCP5_RSA_MAX_WIDTH, +}; + +const struct ccp_vdata ccpv5b_hygon = { + .version = CCP_VERSION(5, 1), + .dma_chan_attr = DMA_PRIVATE, + .setup = ccp5other_config, + .perform = &ccp5_actions, + .offset = 0x0, + .rsamax = CCP5_RSA_MAX_WIDTH, +}; diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index 4f6a0507f7cd..8c5a34019aa2 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -594,7 +594,22 @@ static const struct sp_dev_vdata dev_vdata[] = { { /* 9 */ .bar = 2, #ifdef CONFIG_CRYPTO_DEV_SP_CCP - .ccp_vdata = &ccpv5a, + .ccp_vdata = &ccpv5a_hygon, +#endif +#ifdef CONFIG_CRYPTO_DEV_SP_PSP + .psp_vdata = &pspv1, +#endif + }, + { /* 10 */ + .bar = 2, +#ifdef CONFIG_CRYPTO_DEV_SP_CCP + .ccp_vdata = &ccpv5b_hygon, +#endif + }, + { /* 11 */ + .bar = 2, +#ifdef CONFIG_CRYPTO_DEV_SP_CCP + .ccp_vdata = &ccpv5a_hygon, #endif #ifdef CONFIG_CRYPTO_DEV_SP_PSP .psp_vdata = &psp_csvv1, @@ -612,11 +627,11 @@ static const struct pci_device_id sp_pci_table[] = { { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[6] }, { PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] }, { PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] }, - { PCI_VDEVICE(HYGON, 0x1456), (kernel_ulong_t)&dev_vdata[1] }, - { PCI_VDEVICE(HYGON, 0x1468), (kernel_ulong_t)&dev_vdata[2] }, - { PCI_VDEVICE(HYGON, 0x1486), (kernel_ulong_t)&dev_vdata[9] }, - { PCI_VDEVICE(HYGON, 0x14b8), (kernel_ulong_t)&dev_vdata[2] }, - { PCI_VDEVICE(HYGON, 0x14a6), (kernel_ulong_t)&dev_vdata[9] }, + { PCI_VDEVICE(HYGON, 0x1456), (kernel_ulong_t)&dev_vdata[9] }, + { PCI_VDEVICE(HYGON, 0x1468), (kernel_ulong_t)&dev_vdata[10] }, + { PCI_VDEVICE(HYGON, 0x1486), (kernel_ulong_t)&dev_vdata[11] }, + { PCI_VDEVICE(HYGON, 0x14b8), (kernel_ulong_t)&dev_vdata[10] }, + { PCI_VDEVICE(HYGON, 0x14a6), (kernel_ulong_t)&dev_vdata[11] }, /* Last entry must be zero */ { 0, } }; -- Gitee From 6a8fe61cbbefb4383f34fafac84ff6b6faec30a4 Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Thu, 25 Apr 2024 19:17:41 +0800 Subject: [PATCH 0902/2138] anolis: Add mediated ccp driver support for hygon crypto technology. ANBZ: #8582 Add the hct.ko driver module to support the HYGON Cryptography Technology (HCT) Engine, which also supports CCP virtualization. Signed-off-by: Yabin Li Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3119 --- arch/x86/configs/anolis-debug_defconfig | 1 + arch/x86/configs/anolis_defconfig | 1 + drivers/crypto/ccp/Kconfig | 13 + drivers/crypto/ccp/Makefile | 2 + drivers/crypto/ccp/hygon/hct.c | 2185 +++++++++++++++++++++++ 5 files changed, 2202 insertions(+) create mode 100644 drivers/crypto/ccp/hygon/hct.c diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index ff275d4fdbc1..58c6d3184e13 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -7459,6 +7459,7 @@ CONFIG_HYGON_GM=y CONFIG_HYGON_PSP2CPU_CMD=y CONFIG_TDM_DEV_HYGON=y CONFIG_TDM_KERNEL_GUARD=m +CONFIG_CRYPTO_DEV_HCT=m # CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set CONFIG_CRYPTO_DEV_NITROX=m CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 753475d33452..f04bbb4b7d28 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -7450,6 +7450,7 @@ CONFIG_HYGON_GM=y CONFIG_HYGON_PSP2CPU_CMD=y CONFIG_TDM_DEV_HYGON=y CONFIG_TDM_KERNEL_GUARD=m +CONFIG_CRYPTO_DEV_HCT=m # CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set CONFIG_CRYPTO_DEV_NITROX=m CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 726f1a6025eb..7115bf3028d4 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -69,6 +69,19 @@ config TDM_DEV_HYGON help Hygon TDM driver +config CRYPTO_DEV_HCT + tristate "HCT CCP device" + default m + depends on X86_64 + select VFIO_MDEV + help + Provides hygon crypto technology ccp device driver. + Support virtualize ccp devices based on mediated devices. + Support multi-process and virtual machines. + Support host-noiommu mode memory encryption function. + Support compiling hct.ko when mdev module is disabled. + If you choose 'M' here, this module will be called hct ccp. + config TDM_KERNEL_GUARD tristate "Hygon TDM kernel guard" default y diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 69534353c8d4..88086af2412e 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -19,6 +19,8 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ vpsp.o ccp-$(CONFIG_TDM_DEV_HYGON) += tdm-dev.o +obj-$(CONFIG_CRYPTO_DEV_HCT) += hygon/hct.o + obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o ccp-crypto-objs := ccp-crypto-main.o \ ccp-crypto-aes.o \ diff --git a/drivers/crypto/ccp/hygon/hct.c b/drivers/crypto/ccp/hygon/hct.c new file mode 100644 index 000000000000..dd386fec2b07 --- /dev/null +++ b/drivers/crypto/ccp/hygon/hct.c @@ -0,0 +1,2185 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Copyright (c) 2022 HYGON Corporation . All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * VERSION_STRING modification instructions: + * 0.1 -- support hct/mdev mode. + * 0.2 -- supoort qemu virtualization. + */ + +#undef pr_fmt +#define pr_fmt(fmt) "hct: " fmt + +#define VERSION_STRING "0.2" +#define DRIVER_AUTHOR "HYGON Corporation" +#define VERSION_SIZE 16 + +#define MCCP_CLASS_NAME "hct" +#define MCCP_NAME "hct" +#define MCCP_STRING_LEN 16 + +#define MCCP_CONFIG_SPACE_SIZE 0xff + +#define MCCP_VFIO_PCI_OFFSET_SHIFT 40 +#define MCCP_VFIO_PCI_OFFSET_TO_INDEX(off) \ + (off >> MCCP_VFIO_PCI_OFFSET_SHIFT) +#define MCCP_VFIO_PCI_INDEX_TO_OFFSET(index) \ + ((u64)(index) << MCCP_VFIO_PCI_OFFSET_SHIFT) +#define MCCP_VFIO_PCI_OFFSET_MASK \ + (((u64)(1) << MCCP_VFIO_PCI_OFFSET_SHIFT) - 1) +#define vdev_to_mdev_state(vdev) \ + container_of((vdev), struct mdev_state, vdev) + +#define MCCP_SHARE_IOC_TYPE 'C' +#define MCCP_SHARE_OP 0x01 +#define MCCP_SHARE_OP_DMA_MAP 0x01 +#define MCCP_SHARE_OP_DMA_UNMAP_ALL 0x02 +#define MCCP_SHARE_OP_GET_ID 0x03 +#define MCCP_SHARE_OP_GET_PASID 0x04 +#define MCCP_SHARE_OP_DMA_UNMAP 0x05 +#define MCCP_SHARE_OP_GET_VERSION 0x06 + +#define MCCP_SHARE_IOMMU_MAGIC 0x3d6a9c5728633b9e + +#define PCI_RESOURCE_BAR2 2 +#define MCCP_DEV_ID_SIZE 8 + +/* fixed iova range for ccp dma. */ +#define MCCP_DMA_IOVA_OFFSET 0 +#define MCCP_DMA_IOVA_SIZE (1ul << 30) + +#define MCCP_INSTANCE_MAX 1024 +#define MCCP_INSTANCE_OFFSET 8 +#define MCCP_INSTANCE_MASK (~((1u << MCCP_INSTANCE_OFFSET) - 1)) +#define MCCP_PASID_SIZE (1 << 8) +#define MCCP_IOVA_MAX_SLOT 1024 +#define MCCP_DEV_MAX 16 +#define MCCP_DEV_QUEUE_MAX 8 +#define MCCP_DEV_QUEUE 5 +#define MCCP_QUEUES_MAX (MCCP_DEV_MAX * MCCP_DEV_QUEUE_MAX) +#define MCCP_QUEUE_NEED_INIT 0x01 +#define MCCP_SHARED_SIZE (MCCP_DEV_MAX * PAGE_SIZE) + +#define MCCP_MSIX_ENTRY_SIZE 2 +#define MCCP_NTB_VECTOR_NUM 1 +#define MCCP_PSP_VECTOR_NUM 2 +#define MCCP_GET_QUEUE_FLAG (0x55) +#define MCCP_PUT_QUEUE_FLAG (0xAA) +#define IRQ_EVENT_SIGNAL (1UL) +#define IRQ_EVENT_SIGFAL (0xFF) + +#define Q_MASK_REG 0x0000 +#define MCMD_Q_STATUS_INCR 0x1000 +#define MCMD_Q_TAIL_LO_BASE 0x0004 +#define MCMD_Q_HEAD_LO_BASE 0x0008 +#define MCMD_Q_INT_ENABLE_BASE 0x000C +#define MCMD_Q_INTERRUPT_STATUS_BASE 0x0010 +#define MCMD_Q_STATUS_BASE 0x0100 +#define MCMD_Q_INT_STATUS_BASE 0x0104 + +#define INT_COMPLETION 0x1 +#define INT_ERROR 0x2 +#define INT_QUEUE_STOPPED 0x4 +#define INT_EMPTY_QUEUE 0x8 +#define SUPPORTED_INTERRUPTS (INT_COMPLETION | INT_ERROR) +#define MCMD_Q_ERROR(__qs) ((__qs) & 0x0000003f) + +#define PHY_ADDR_MASK 0x7FFFFFFFFFFF + +struct hct_shared_cfg { + unsigned int iova_slot[MCCP_IOVA_MAX_SLOT]; + unsigned int ccp_queue_state[MCCP_QUEUES_MAX]; + unsigned int ccps_ref[MCCP_DEV_MAX]; + unsigned int ccps_ref_lock; + int rsvd1[15]; + u64 qidx[MCCP_QUEUES_MAX]; + unsigned int ccp_state[MCCP_DEV_MAX]; +} __aligned(PAGE_SIZE); + +struct hct_dev_ctrl { + unsigned char op; + unsigned char rsvd[3]; + union { + unsigned char version[VERSION_SIZE]; + unsigned int id; + struct { + unsigned long vaddr; + unsigned long iova; + unsigned long size; + }; + }; +}; + +struct hct_dma { + struct list_head next; + unsigned long vaddr; + unsigned long iova; + size_t size; + struct page **pages; + unsigned long npages; + unsigned int pfnmap_flag; +}; + +/* record the register address related to interrupt */ +struct hct_cmd_queue { + void __iomem *reg_control; + void __iomem *reg_tail_lo; + void __iomem *reg_head_lo; + void __iomem *reg_int_enable; + void __iomem *reg_interrupt_status; + void __iomem *reg_status; + void __iomem *reg_int_status; + struct mutex q_lock; + DECLARE_KFIFO_PTR(ectx_fifo, struct eventfd_ctx *); +} ____cacheline_aligned; + +struct hct_dev_ctx { + struct hct_cmd_queue cmd_q[MCCP_DEV_QUEUE_MAX]; + struct tasklet_struct irq_tasklet; + char devname[MCCP_STRING_LEN]; + void __iomem *io_regs; /* for BAR2 memory address */ + u32 q_count; + int irq; +} ____cacheline_aligned; + +struct hct_iommu { + unsigned long magic; + struct mutex lock; + struct pci_dev *pdev; + struct hct_dev_ctx dev_ctx; + unsigned long id; + unsigned long ref; +}; + +static struct hct_data { + struct hct_iommu iommu[MCCP_DEV_MAX]; + struct mutex lock; + unsigned long bitmap; + struct iommu_domain *domain; + int prot; + dma_addr_t dma_share_iova; + size_t dma_share_size; + unsigned long dma_share_ref; + unsigned long mdev_ref; + unsigned long ids[BITS_TO_LONGS(MCCP_INSTANCE_MAX)]; +} hct_data; + +static struct hct_share_cfg { + long ref; + struct mutex lock; + struct page *pages[MCCP_DEV_MAX]; + u64 pagecount; + void *vaddr; + u64 size; +} hct_share; + +static struct hct_dev { + dev_t vd_devt; + struct class *vd_class; + struct cdev vd_cdev; + struct device dev; + struct mdev_parent mdev_parent; +} hct_dev; + +struct mdev_region_info { + u64 start; + u64 phys_start; + u32 size; + u64 vfio_offset; +}; + +struct mdev_state { + struct vfio_device vdev; + struct mutex ops_lock; + struct mdev_device *mdev; + struct hct_iommu *iommu; + struct mdev_region_info region_info[VFIO_PCI_NUM_REGIONS]; + struct list_head next; + struct vfio_device_info dev_info; + unsigned long ref; + struct eventfd_ctx *trigger[MCCP_DEV_QUEUE_MAX]; + u8 efd_start; + u8 efd_count; +}; + +struct mdev_type hct_mdev_type = { + .sysfs_name = "1", + .pretty_name = "hct mdev type" +}; +struct mdev_type *hct_mdev_types[] = { + &hct_mdev_type +}; + +static void hct_cmd_queue_enable_interrupt(struct hct_dev_ctx *dev_ctx) +{ + unsigned int i; + + for (i = 0; i < dev_ctx->q_count; i++) + iowrite32(SUPPORTED_INTERRUPTS, dev_ctx->cmd_q[i].reg_int_enable); +} + +static void hct_cmd_queue_disable_interrupt(struct hct_dev_ctx *dev_ctx) +{ + unsigned int i; + + for (i = 0; i < dev_ctx->q_count; i++) + iowrite32(0x00, dev_ctx->cmd_q[i].reg_int_enable); +} + +static void hct_cmd_queue_intr_task(unsigned long data) +{ + struct hct_dev_ctx *dev_ctx = (struct hct_dev_ctx *)data; + u32 i, err, status; + + hct_cmd_queue_disable_interrupt(dev_ctx); + + for (i = 0; i < dev_ctx->q_count; i++) { + struct hct_cmd_queue *cmd_q = &dev_ctx->cmd_q[i]; + struct eventfd_ctx *trigger; + + status = ioread32(cmd_q->reg_interrupt_status); + if (status) { + if (status & INT_ERROR) { + /* print interrupt numbers for debug */ + err = ioread32(cmd_q->reg_status); + pr_err("Irq fail, errcode = %d.\n", MCMD_Q_ERROR(err)); + while (kfifo_get(&cmd_q->ectx_fifo, &trigger)) + eventfd_signal(trigger, IRQ_EVENT_SIGFAL); + } else { + while (kfifo_get(&cmd_q->ectx_fifo, &trigger)) + eventfd_signal(trigger, IRQ_EVENT_SIGNAL); + } + + iowrite32(status, cmd_q->reg_interrupt_status); + } + } + + hct_cmd_queue_enable_interrupt(dev_ctx); +} + +static irqreturn_t hct_cmd_queue_intr_handler(int irq, void *arg) +{ + struct hct_dev_ctx *dev_ctx = (struct hct_dev_ctx *)arg; + + tasklet_schedule(&dev_ctx->irq_tasklet); + return IRQ_HANDLED; +} + +static int hct_dev_cmd_queue_init(struct pci_dev *pdev, struct hct_dev_ctx *dev_ctx, int idx) +{ + struct hct_cmd_queue *cmd_q; + unsigned long addr, len; + unsigned int retval, qmr; + int i, ret; + + if (!pdev || !dev_ctx) + return -EINVAL; + + memset(dev_ctx, 0, sizeof(*dev_ctx)); + + ret = pci_enable_device(pdev); + if (ret) + return -EINVAL; + + addr = pci_resource_start(pdev, PCI_RESOURCE_BAR2); + len = pci_resource_len(pdev, PCI_RESOURCE_BAR2); + dev_ctx->io_regs = ioremap(addr, len); + if (!dev_ctx->io_regs) + return -ENOMEM; + + pci_set_master(pdev); + retval = pci_alloc_irq_vectors(pdev, 1, MCCP_MSIX_ENTRY_SIZE, PCI_IRQ_MSIX); + if (retval != MCCP_NTB_VECTOR_NUM && retval != MCCP_PSP_VECTOR_NUM) + return -ENOMEM; + + snprintf(dev_ctx->devname, MCCP_STRING_LEN, "hct-ccp-%d", idx); + dev_ctx->irq = pci_irq_vector(pdev, retval - 1); + /* To request_irq, the fourth parameter dev_name must be global + * variable or static variable. + */ + ret = request_irq(dev_ctx->irq, hct_cmd_queue_intr_handler, 0, dev_ctx->devname, dev_ctx); + if (ret) { + pci_free_irq_vectors(pdev); + dev_ctx->irq = 0; + return ret; + } + + tasklet_init(&dev_ctx->irq_tasklet, hct_cmd_queue_intr_task, (unsigned long)dev_ctx); + + qmr = ioread32(dev_ctx->io_regs + Q_MASK_REG); + if (qmr == 0) { + iowrite32(0x1f, dev_ctx->io_regs + Q_MASK_REG); + qmr = ioread32(dev_ctx->io_regs + Q_MASK_REG); + } + for (i = 0; i < MCCP_DEV_QUEUE_MAX; i++) { + if (!(qmr & (1 << i))) + continue; + + cmd_q = &dev_ctx->cmd_q[dev_ctx->q_count++]; + + mutex_init(&cmd_q->q_lock); + ret = kfifo_alloc(&cmd_q->ectx_fifo, MCCP_INSTANCE_MAX, GFP_KERNEL); + if (ret) + return -ENOMEM; + + cmd_q->reg_control = dev_ctx->io_regs + MCMD_Q_STATUS_INCR * (i + 1); + cmd_q->reg_tail_lo = cmd_q->reg_control + MCMD_Q_TAIL_LO_BASE; + cmd_q->reg_head_lo = cmd_q->reg_control + MCMD_Q_HEAD_LO_BASE; + cmd_q->reg_int_enable = cmd_q->reg_control + MCMD_Q_INT_ENABLE_BASE; + cmd_q->reg_interrupt_status = cmd_q->reg_control + MCMD_Q_INTERRUPT_STATUS_BASE; + cmd_q->reg_status = cmd_q->reg_control + MCMD_Q_STATUS_BASE; + cmd_q->reg_int_status = cmd_q->reg_control + MCMD_Q_INT_STATUS_BASE; + } + + return (dev_ctx->q_count >= 0) ? 0 : -1; +} + +static int hct_iommu_alloc(struct pci_dev *pdev) +{ + unsigned long i; + int ret = -EINVAL; + + mutex_lock(&hct_data.lock); + + i = find_first_zero_bit(&hct_data.bitmap, MCCP_DEV_MAX); + if (i != MCCP_DEV_MAX) + bitmap_set(&hct_data.bitmap, i, 1); + + if (device_iommu_capable(&pdev->dev, IOMMU_CAP_CACHE_COHERENCY)) + hct_data.prot |= IOMMU_CACHE; + + mutex_unlock(&hct_data.lock); + + if (i == MCCP_DEV_MAX) + return -EINVAL; + + ret = iommu_attach_device(hct_data.domain, &pdev->dev); + if (ret) { + mutex_lock(&hct_data.lock); + bitmap_clear(&hct_data.bitmap, i, 1); + mutex_unlock(&hct_data.lock); + } else { + mutex_lock(&hct_data.iommu[i].lock); + hct_data.iommu[i].pdev = pdev; + hct_data.iommu[i].id = i; + hct_data.iommu[i].ref = 0; + hct_data.iommu[i].magic = MCCP_SHARE_IOMMU_MAGIC; + pci_set_drvdata(pdev, &hct_data.iommu[i]); + + ret = hct_dev_cmd_queue_init(pdev, &hct_data.iommu[i].dev_ctx, i); + mutex_unlock(&hct_data.iommu[i].lock); + } + + return ret; +} + +static void hct_iommu_free(struct hct_iommu *iommu) +{ + struct iommu_domain *domain; + + if (!iommu || iommu->magic != MCCP_SHARE_IOMMU_MAGIC) + return; + + domain = iommu_get_domain_for_dev(&iommu->pdev->dev); + + mutex_lock(&iommu->lock); + if (iommu->pdev && domain == hct_data.domain) + iommu_detach_device(domain, &iommu->pdev->dev); + iommu->pdev = NULL; + iommu->magic = 0; + mutex_unlock(&iommu->lock); + + mutex_lock(&hct_data.lock); + if (iommu->id < MCCP_DEV_MAX) + bitmap_clear(&hct_data.bitmap, iommu->id, 1); + mutex_unlock(&hct_data.lock); +} + +static int handle_pci_cfg_read(struct mdev_state *mdev_state, int offset, + __le32 *val, int count) +{ + u32 tmp_val = 0; + int ret = -EINVAL; + struct pci_dev *pdev = mdev_state->iommu->pdev; + + if (!mdev_state->mdev || !pdev) { + pr_err("hct: invalid dev or pdev\n"); + return ret; + } + + if (count == 1) { + u8 tmp; + + ret = pci_user_read_config_byte(pdev, offset, &tmp); + tmp_val = tmp; + } else if (count == 2) { + u16 tmp; + + ret = pci_user_read_config_word(pdev, offset, &tmp); + tmp_val = tmp; + } else if (count == 4) + ret = pci_user_read_config_dword(pdev, offset, &tmp_val); + + *val = cpu_to_le32(tmp_val); + + return ret; +} + +static int handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset, + u8 *buf, u32 count) +{ + u32 tmp_val = le32_to_cpu(*(u32 *)buf); + int ret = -EINVAL; + struct pci_dev *pdev = mdev_state->iommu->pdev; + + + if (!mdev_state->mdev || !pdev) { + pr_err("hct: invalid dev or pdev\n"); + return ret; + } + + if (count == 1) + ret = pci_user_write_config_byte(pdev, offset, tmp_val); + else if (count == 2) + ret = pci_user_write_config_word(pdev, offset, tmp_val); + else if (count == 4) + ret = pci_user_write_config_dword(pdev, offset, tmp_val); + + return ret; +} + +static ssize_t hct_access(struct mdev_device *mdev, u8 *buf, size_t count, + loff_t pos, bool is_write) +{ + struct mdev_state *mdev_state; + unsigned int index; + loff_t offset; + int ret = 0; + + if (!mdev || !buf) + return -EINVAL; + + mdev_state = dev_get_drvdata(&mdev->dev); + if (!mdev_state) { + pr_err("%s mdev_state not found\n", __func__); + return -EINVAL; + } + + mutex_lock(&mdev_state->ops_lock); + + index = MCCP_VFIO_PCI_OFFSET_TO_INDEX(pos); + offset = pos & MCCP_VFIO_PCI_OFFSET_MASK; + switch (index) { + case VFIO_PCI_CONFIG_REGION_INDEX: + if (is_write) + ret = handle_pci_cfg_write(mdev_state, offset, buf, count); + else + ret = handle_pci_cfg_read(mdev_state, offset, (__le32 *)buf, count); + break; + default: + ret = -1; + } + + if (!ret) + ret = count; + + mutex_unlock(&mdev_state->ops_lock); + + return ret; +} + +static int hct_mdev_state_init(struct mdev_state *mdev_state) +{ + unsigned long *bitmap = &hct_data.bitmap; + struct hct_iommu *iommu = hct_data.iommu; + unsigned long ref = -1ul; + int i, n = -1; + int ret = 0; + + if (!mdev_state) + return -EINVAL; + + mutex_init(&mdev_state->ops_lock); + + mutex_lock(&hct_data.lock); + if (hct_data.mdev_ref > 0) { + mutex_unlock(&hct_data.lock); + return -EBUSY; + } + + for (i = 0; i < MCCP_DEV_MAX; i++) { + if (test_bit(i, bitmap)) { + if (ref > iommu[i].ref) { + n = i; + ref = iommu[i].ref; + } + } + } + + if (n >= 0 && n < MCCP_DEV_MAX) { + mdev_state->iommu = &iommu[n]; + mdev_state->ref = iommu[n].ref++; + } else + ret = -EINVAL; + mutex_unlock(&hct_data.lock); + + return ret; +} + +static int hct_init_dev(struct vfio_device *vdev) +{ + struct mdev_state *mdev_state = vdev_to_mdev_state(vdev); + struct mdev_device *mdev = to_mdev_device(vdev->dev); + int ret = 0; + + if (!mdev || !mdev_state) + return -EINVAL; + + ret = hct_mdev_state_init(mdev_state); + if (ret) + return ret; + + mdev_state->mdev = mdev; + return 0; +} + +static void hct_release_dev(struct vfio_device *vdev) +{ + struct mdev_state *mdev_state = vdev_to_mdev_state(vdev); + struct mdev_device *mdev = to_mdev_device(vdev->dev); + + mutex_lock(&hct_data.lock); + if (hct_data.mdev_ref > 0) + pr_warn("The mdev device is in use.\n"); + else { + mdev_state->iommu->ref--; + dev_set_drvdata(&mdev->dev, NULL); + } + mutex_unlock(&hct_data.lock); +} + +static ssize_t hct_read(struct vfio_device *vdev, char __user *buf, + size_t count, loff_t *ppos) +{ + struct mdev_device *mdev = to_mdev_device(vdev->dev); + unsigned int done = 0; + int ret; + u32 val; + size_t filled; + + while (count) { + if (count >= 4 && !(*ppos % 4)) { + ret = hct_access(mdev, (u8 *)&val, sizeof(u32), *ppos, false); + if (ret <= 0) + goto read_err; + + if (copy_to_user(buf, &val, sizeof(u32))) + goto read_err; + + filled = 4; + } else if (count >= 2 && !(*ppos % 2)) { + ret = hct_access(mdev, (u8 *)&val, sizeof(u16), *ppos, false); + if (ret <= 0) + goto read_err; + + if (copy_to_user(buf, &val, sizeof(u16))) + goto read_err; + + filled = 2; + } else { + ret = hct_access(mdev, (u8 *)&val, sizeof(u8), *ppos, false); + if (ret <= 0) + goto read_err; + + if (copy_to_user(buf, &val, sizeof(u8))) + goto read_err; + + filled = 1; + } + + count -= filled; + done += filled; + *ppos += filled; + buf += filled; + } + + return done; + +read_err: + return -EFAULT; +} + +static ssize_t hct_write(struct vfio_device *vdev, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct mdev_device *mdev = to_mdev_device(vdev->dev); + unsigned int done = 0; + int ret; + u64 val; + u8 idx; + + while (count) { + size_t filled; + + if (count == MCCP_DEV_ID_SIZE && *ppos == MCCP_GET_QUEUE_FLAG) { + struct mdev_state *mdev_state; + struct hct_dev_ctx *dev_ctx; + struct hct_cmd_queue *cmd_q; + + mdev_state = dev_get_drvdata(&mdev->dev); + if (!mdev_state) + goto write_err; + + if (copy_from_user(&val, buf, sizeof(u64)) || + val >= MCCP_DEV_QUEUE_MAX || + val < mdev_state->efd_start) + goto write_err; + + idx = val - mdev_state->efd_start; + dev_ctx = &mdev_state->iommu->dev_ctx; + cmd_q = &dev_ctx->cmd_q[idx]; + + mutex_lock(&cmd_q->q_lock); + if (kfifo_avail(&cmd_q->ectx_fifo)) + kfifo_put(&cmd_q->ectx_fifo, mdev_state->trigger[idx]); + mutex_unlock(&cmd_q->q_lock); + + filled = MCCP_DEV_ID_SIZE; + } else if (count >= 4 && !(*ppos % 4)) { + if (copy_from_user(&val, buf, sizeof(u32))) + goto write_err; + + ret = hct_access(mdev, (u8 *)&val, sizeof(u32), *ppos, true); + if (ret <= 0) + goto write_err; + + filled = 4; + } else if (count >= 2 && !(*ppos % 2)) { + if (copy_from_user(&val, buf, sizeof(u16))) + goto write_err; + + ret = hct_access(mdev, (u8 *)&val, sizeof(u16), *ppos, true); + if (ret <= 0) + goto write_err; + + filled = 2; + } else { + if (copy_from_user(&val, buf, sizeof(u8))) + goto write_err; + + ret = hct_access(mdev, (u8 *)&val, sizeof(u8), *ppos, true); + if (ret <= 0) + goto write_err; + + filled = 1; + } + count -= filled; + done += filled; + *ppos += filled; + buf += filled; + } + + return done; +write_err: + return -EFAULT; +} + +static int hct_get_region_info(struct mdev_device *mdev, + struct vfio_region_info *region_info, + u16 *cap_type_id, void **cap_type) +{ + struct mdev_state *mdev_state = NULL; + struct pci_dev *pdev = NULL; + unsigned int size = 0; + u32 bar_index; + + if (!mdev) + return -EINVAL; + + mdev_state = dev_get_drvdata(&mdev->dev); + if (!mdev_state) + return -ENODEV; + + bar_index = region_info->index; + if (bar_index >= VFIO_PCI_NUM_REGIONS) + return -EINVAL; + + pdev = mdev_state->iommu->pdev; + mutex_lock(&mdev_state->ops_lock); + + switch (bar_index) { + case VFIO_PCI_CONFIG_REGION_INDEX: + size = pdev->cfg_size; + break; + case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX: + size = pci_resource_len(pdev, bar_index); + break; + default: + size = 0; + break; + } + + mdev_state->region_info[bar_index].size = size; + mdev_state->region_info[bar_index].vfio_offset = + MCCP_VFIO_PCI_INDEX_TO_OFFSET(bar_index); + + region_info->size = size; + region_info->offset = MCCP_VFIO_PCI_INDEX_TO_OFFSET(bar_index); + region_info->flags = VFIO_REGION_INFO_FLAG_READ | + VFIO_REGION_INFO_FLAG_WRITE; + if (size >= PAGE_SIZE) + region_info->flags |= VFIO_REGION_INFO_FLAG_MMAP; + + mutex_unlock(&mdev_state->ops_lock); + return 0; +} + +static int hct_get_irq_info(struct mdev_device *mdev, + struct vfio_irq_info *irq_info) +{ + switch (irq_info->index) { + case VFIO_PCI_INTX_IRQ_INDEX: + case VFIO_PCI_MSI_IRQ_INDEX: + case VFIO_PCI_MSIX_IRQ_INDEX: + case VFIO_PCI_REQ_IRQ_INDEX: + break; + + default: + return -EINVAL; + } + + irq_info->flags = VFIO_IRQ_INFO_EVENTFD; + irq_info->count = 1; + + if (irq_info->index == VFIO_PCI_INTX_IRQ_INDEX) + irq_info->flags |= (VFIO_IRQ_INFO_MASKABLE | + VFIO_IRQ_INFO_AUTOMASKED); + else + irq_info->flags |= VFIO_IRQ_INFO_NORESIZE; + + return 0; +} + +static int hct_get_device_info(struct mdev_device *mdev, + struct vfio_device_info *dev_info) +{ + dev_info->flags = VFIO_DEVICE_FLAGS_PCI; + dev_info->num_regions = VFIO_PCI_NUM_REGIONS; + dev_info->num_irqs = VFIO_PCI_NUM_IRQS; + + return 0; +} + +/* each ccp vq corresponding to one eventfd */ +static int hct_set_irq_efds(struct mdev_device *mdev, + struct vfio_irq_set *hdr, + void *data) +{ + struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev); + int *fd = (int *)data; + int i; + + if (!mdev_state || !data) + return -EINVAL; + + if (hdr->index != VFIO_PCI_MSIX_IRQ_INDEX) + return -EINVAL; + + if ((hdr->flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) != VFIO_IRQ_SET_ACTION_TRIGGER) + return -EINVAL; + + if (hdr->start + hdr->count > MCCP_DEV_QUEUE_MAX) + return -EINVAL; + + mdev_state->efd_start = hdr->start; + for (i = 0; i < hdr->count; i++) { + struct eventfd_ctx *trigger; + + trigger = eventfd_ctx_fdget(fd[i]); + if (IS_ERR(trigger)) + return -1; + + mdev_state->trigger[mdev_state->efd_count++] = trigger; + } + + return 0; +} + +static int hct_reset(struct mdev_device *mdev) +{ + struct mdev_state *mdev_state = NULL; + + if (!mdev) + return -EINVAL; + + mdev_state = dev_get_drvdata(&mdev->dev); + if (!mdev_state) + return -EINVAL; + + return 0; +} + +static long hct_ioctl(struct vfio_device *vdev, unsigned int cmd, + unsigned long arg) +{ + struct mdev_device *mdev = to_mdev_device(vdev->dev); + struct mdev_state *mdev_state = NULL; + unsigned long minsz; + int ret = 0; + + if (!mdev) + return -EINVAL; + + mdev_state = dev_get_drvdata(&mdev->dev); + if (!mdev_state) + return -ENODEV; + + if (!mdev_state->iommu || !mdev_state->iommu->pdev) + return -EIO; + + switch (cmd) { + case VFIO_DEVICE_GET_INFO: + { + struct vfio_device_info info; + + minsz = offsetofend(struct vfio_device_info, num_irqs); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + + ret = hct_get_device_info(mdev, &info); + if (ret) + return ret; + + memcpy(&mdev_state->dev_info, &info, sizeof(info)); + + if (copy_to_user((void __user *)arg, &info, minsz)) + return -EFAULT; + + return 0; + } + case VFIO_DEVICE_GET_REGION_INFO: + { + struct vfio_region_info info; + u16 cap_type_id = 0; + void *cap_type = NULL; + + minsz = offsetofend(struct vfio_region_info, offset); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + + ret = hct_get_region_info(mdev, &info, &cap_type_id, + &cap_type); + if (ret) + return ret; + + if (copy_to_user((void __user *)arg, &info, minsz)) + return -EFAULT; + + return 0; + } + + case VFIO_DEVICE_GET_IRQ_INFO: + { + struct vfio_irq_info info; + + minsz = offsetofend(struct vfio_irq_info, count); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if ((info.argsz < minsz) || + (info.index >= mdev_state->dev_info.num_irqs)) + return -EINVAL; + + ret = hct_get_irq_info(mdev, &info); + if (ret) + return ret; + + if (copy_to_user((void __user *)arg, &info, minsz)) + return -EFAULT; + + return 0; + } + case VFIO_DEVICE_SET_IRQS: + { + struct vfio_irq_set hdr; + u8 *data = NULL; + size_t data_size = 0; + + minsz = offsetofend(struct vfio_irq_set, count); + + if (copy_from_user(&hdr, (void __user *)arg, minsz)) + return -EFAULT; + + ret = vfio_set_irqs_validate_and_prepare(&hdr, mdev_state->dev_info.num_irqs, + mdev_state->dev_info.num_irqs, &data_size); + if (ret) + return ret; + + if (data_size) { + data = memdup_user((void __user *)(arg + minsz), data_size); + if (IS_ERR(data)) + return PTR_ERR(data); + } + + mutex_lock(&mdev_state->ops_lock); + ret = hct_set_irq_efds(mdev, &hdr, data); + mutex_unlock(&mdev_state->ops_lock); + kfree(data); + + return ret; + } + case VFIO_DEVICE_RESET: + return hct_reset(mdev); + } + return -ENOTTY; +} + +static int hct_open(struct vfio_device *vdev) +{ + struct mdev_state *mdev_state = vdev_to_mdev_state(vdev); + + if (!mdev_state) + return -ENODEV; + + if (!mdev_state->iommu || !mdev_state->iommu->pdev) + return -EIO; + + mutex_lock(&hct_data.lock); + hct_data.mdev_ref++; + mutex_unlock(&hct_data.lock); + + return 0; +} + +static void hct_close(struct vfio_device *vdev) +{ + struct mdev_state *mdev_state = vdev_to_mdev_state(vdev); + int i; + + if (!mdev_state || !mdev_state->iommu) + return; + + for (i = 0; i < mdev_state->efd_count; i++) + eventfd_ctx_put(mdev_state->trigger[i]); + mdev_state->efd_count = 0; + + mutex_lock(&hct_data.lock); + hct_data.mdev_ref--; + mutex_unlock(&hct_data.lock); +} + +static ssize_t address_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct mdev_state *mdev_state = dev_get_drvdata(dev); + struct pci_dev *pdev = NULL; + ssize_t size; + + if (!mdev_state || !mdev_state->iommu) + goto exit; + + mutex_lock(&mdev_state->iommu->lock); + if (!mdev_state->iommu->pdev || + mdev_state->iommu->magic != MCCP_SHARE_IOMMU_MAGIC) { + mutex_unlock(&mdev_state->iommu->lock); + goto exit; + } + pdev = mdev_state->iommu->pdev; + size = sprintf(buf, "%04x:%02x:%02x.%x", + pci_domain_nr(pdev->bus), + pdev->bus->number, + 0x00ff & (pdev->devfn >> 8), + 0x00ff & pdev->devfn); + mutex_unlock(&mdev_state->iommu->lock); + return size; + +exit: + return sprintf(buf, "\n"); +} + +static ssize_t id_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct mdev_state *mdev_state = dev_get_drvdata(dev); + ssize_t size; + + if (!mdev_state || !mdev_state->iommu) + goto exit; + + mutex_lock(&mdev_state->iommu->lock); + if (!mdev_state->iommu->pdev || + mdev_state->iommu->magic != MCCP_SHARE_IOMMU_MAGIC) { + mutex_unlock(&mdev_state->iommu->lock); + goto exit; + } + + size = sprintf(buf, "%lu", mdev_state->iommu->id); + mutex_unlock(&mdev_state->iommu->lock); + return size; + +exit: + return sprintf(buf, "\n"); +} + +static ssize_t idx_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct mdev_state *mdev_state = dev_get_drvdata(dev); + ssize_t size; + + if (!mdev_state || !mdev_state->iommu) + goto exit; + + mutex_lock(&mdev_state->iommu->lock); + if (!mdev_state->iommu->pdev || + mdev_state->iommu->magic != MCCP_SHARE_IOMMU_MAGIC) { + mutex_unlock(&mdev_state->iommu->lock); + goto exit; + } + + size = sprintf(buf, "%lu", mdev_state->ref); + mutex_unlock(&mdev_state->iommu->lock); + return size; + +exit: + return sprintf(buf, "\n"); +} + +static DEVICE_ATTR_RO(address); +static DEVICE_ATTR_RO(id); +static DEVICE_ATTR_RO(idx); + +static struct attribute *mdev_dev_attrs[] = { + &dev_attr_address.attr, + &dev_attr_id.attr, + &dev_attr_idx.attr, + NULL, +}; + +static const struct attribute_group mdev_dev_group = { + .name = "vendor", + .attrs = mdev_dev_attrs, +}; + +static const struct attribute_group *hct_mdev_groups[] = { + &mdev_dev_group, + NULL, +}; + +static void hct_mmap_open(struct vm_area_struct *vma) +{ + zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start); +} + +static void hct_mmap_close(struct vm_area_struct *vma) +{ +} + +static vm_fault_t hct_mmap_fault(struct vm_fault *vmf) +{ + vm_fault_t ret = VM_FAULT_NOPAGE; + struct vm_area_struct *vma = vmf->vma; + + if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, pgprot_decrypted(vma->vm_page_prot))) + ret = VM_FAULT_SIGBUS; + return ret; +} + +static const struct vm_operations_struct hct_mmap_ops = { + .open = hct_mmap_open, + .close = hct_mmap_close, + .fault = hct_mmap_fault, +}; + +static int hct_mmap(struct vfio_device *vdev, struct vm_area_struct *vma) +{ + struct mdev_device *mdev = to_mdev_device(vdev->dev); + struct mdev_state *mds = dev_get_drvdata(&mdev->dev); + struct pci_dev *pdev = mds->iommu->pdev; + unsigned int index; + + index = vma->vm_pgoff >> (40 - PAGE_SHIFT); + vma->vm_private_data = mdev; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_pgoff = pci_resource_start(pdev, index) >> PAGE_SHIFT; + vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); + vma->vm_ops = &hct_mmap_ops; + return 0; +} + +static const struct vfio_device_ops hct_mdev_ops = { + .init = hct_init_dev, + .release = hct_release_dev, + .open_device = hct_open, + .close_device = hct_close, + .read = hct_read, + .write = hct_write, + .ioctl = hct_ioctl, + .mmap = hct_mmap, + .bind_iommufd = vfio_iommufd_emulated_bind, + .unbind_iommufd = vfio_iommufd_emulated_unbind, + .attach_ioas = vfio_iommufd_emulated_attach_ioas, + .detach_ioas = vfio_iommufd_emulated_detach_ioas, +}; + +static int hct_mdev_probe(struct mdev_device *mdev) +{ + struct mdev_state *mdev_state = NULL; + int ret; + + if (!mdev) + return -EINVAL; + + mdev_state = vfio_alloc_device(mdev_state, vdev, &mdev->dev, + &hct_mdev_ops); + if (IS_ERR(mdev_state)) + return PTR_ERR(mdev_state); + + ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev); + if (ret) { + vfio_put_device(&mdev_state->vdev); + return ret; + } + + dev_set_drvdata(&mdev->dev, mdev_state); + return 0; +} + +static void hct_mdev_remove(struct mdev_device *mdev) +{ + struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev); + + vfio_unregister_group_dev(&mdev_state->vdev); + vfio_put_device(&mdev_state->vdev); +} + +static unsigned int hct_mdev_get_available(struct mdev_type *mtype) +{ + return MCCP_INSTANCE_MAX; +} + +static ssize_t hct_mdev_show_description(struct mdev_type *mtype, char *buf) +{ + return sprintf(buf, "This is HYGON CCP device!"); +} + +struct mdev_driver hct_mdev_driver = { + .device_api = VFIO_DEVICE_API_PCI_STRING, + .driver = { + .name = "hct_mdev", + .owner = THIS_MODULE, + .mod_name = KBUILD_MODNAME, + .dev_groups = hct_mdev_groups, + }, + .probe = hct_mdev_probe, + .remove = hct_mdev_remove, + .get_available = hct_mdev_get_available, + .show_description = hct_mdev_show_description, +}; + +struct hct_private { + struct list_head head; + struct mutex lock; + unsigned int id; +}; + +static int hct_share_open(struct inode *inode, struct file *file) +{ + int ret = 0; + struct hct_private *private; + unsigned int id; + + private = kzalloc(sizeof(*private), GFP_KERNEL); + if (!private) + return -ENOMEM; + + mutex_lock(&hct_data.lock); + bitmap_set(hct_data.ids, 0, 1); + id = (unsigned int)find_first_zero_bit(hct_data.ids, MCCP_INSTANCE_MAX); + if (id < MCCP_INSTANCE_MAX) + bitmap_set(hct_data.ids, id, 1); + mutex_unlock(&hct_data.lock); + + if (id >= MCCP_INSTANCE_MAX) { + kfree(private); + return -EBUSY; + } + + mutex_lock(&hct_share.lock); + hct_share.ref++; + hct_share.pagecount = MCCP_DEV_MAX; + mutex_unlock(&hct_share.lock); + + file->private_data = private; + private->id = id << MCCP_INSTANCE_OFFSET; + INIT_LIST_HEAD(&private->head); + mutex_init(&private->lock); + + return ret; +} + +static bool is_invalid_reserved_pfn(unsigned long pfn) +{ + if (pfn_valid(pfn)) + return PageReserved(pfn_to_page(pfn)); + + return true; +} + +static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, + unsigned long vaddr, unsigned long *pfn, + bool write_fault) +{ + int ret; + + ret = follow_pfn(vma, vaddr, pfn); + if (ret) { + bool unlocked = false; + + ret = fixup_user_fault(mm, vaddr, + FAULT_FLAG_REMOTE | + (write_fault ? FAULT_FLAG_WRITE : 0), + &unlocked); + + if (unlocked) + return -EAGAIN; + + if (ret) + return ret; + + ret = follow_pfn(vma, vaddr, pfn); + } + + return ret; +} + +static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, + int prot, unsigned long *pfn) +{ + struct page *page[1]; + struct vm_area_struct *vma; + unsigned int flags = 0; + int ret; + + if (prot & IOMMU_WRITE) + flags |= FOLL_WRITE; + + mmap_read_lock(mm); + ret = pin_user_pages_remote(mm, vaddr, 1, flags | FOLL_LONGTERM, + page, NULL); + if (ret == 1) { + *pfn = page_to_pfn(page[0]); + ret = 0; + goto done; + } + + vaddr = untagged_addr(vaddr); + +retry: + vma = find_vma_intersection(mm, vaddr, vaddr + 1); + + if (vma && vma->vm_flags & VM_PFNMAP) { + ret = follow_fault_pfn(vma, mm, vaddr, pfn, prot & IOMMU_WRITE); + if (ret == -EAGAIN) + goto retry; + + if (!ret && !is_invalid_reserved_pfn(*pfn)) + ret = -EFAULT; + } +done: + mmap_read_unlock(mm); + + return ret; +} + +struct page **hct_pin_memory(struct hct_private *private, unsigned long uaddr, + unsigned long ulen, unsigned long *n) +{ + unsigned long npages, size; + int npinned; + struct page **pages; + unsigned long first, last; + + if (ulen == 0 || uaddr + ulen < uaddr) + return NULL; + + first = (uaddr & PAGE_MASK) >> PAGE_SHIFT; + last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT; + npages = (last - first + 1); + + if (WARN_ON_ONCE(npages > INT_MAX)) + return NULL; + + size = npages * sizeof(struct page *); + if (size > PAGE_SIZE) + pages = vmalloc(size); + else + pages = kmalloc(size, GFP_KERNEL); + + if (!pages) + return NULL; + + /* Pin the user virtual address. */ + npinned = pin_user_pages_fast(uaddr, npages, FOLL_WRITE, pages); + if (npinned != npages) + goto err; + + *n = npages; + return pages; + +err: + if (npinned > 0) + unpin_user_pages(pages, npinned); + kvfree(pages); + return NULL; +} + +static void hct_unpin_memory(struct hct_private *private, struct page **pages, + unsigned long npages) +{ + unpin_user_pages(pages, npages); + kvfree(pages); +} + +static inline int is_dma_share(dma_addr_t dma_iova, size_t dma_size) +{ + if (dma_iova >= MCCP_DMA_IOVA_OFFSET && + dma_iova + dma_size <= MCCP_DMA_IOVA_OFFSET + MCCP_DMA_IOVA_SIZE) + return 1; + else + return 0; +} + +static int hct_add_dma_share_unsafe(dma_addr_t dma_iova, size_t dma_size) +{ + int ret = 0; + + if (!is_dma_share(dma_iova, dma_size)) + return 0; + + if (!hct_data.dma_share_size) { + hct_data.dma_share_iova = dma_iova; + hct_data.dma_share_size = dma_size; + } + + if (dma_iova != hct_data.dma_share_iova || + dma_size != hct_data.dma_share_size) + ret = -EINVAL; + else + hct_data.dma_share_ref++; + + return ret; +} + +static int hct_unmap_dma_share_unsafe(dma_addr_t dma_iova, size_t dma_size) +{ + if (!is_dma_share(dma_iova, dma_size)) + return -EINVAL; + + if (hct_data.dma_share_size) { + if (hct_data.dma_share_iova == dma_iova && + hct_data.dma_share_size == dma_size) + hct_data.dma_share_ref--; + + if (hct_data.dma_share_ref == 0) { + iommu_unmap(hct_data.domain, hct_data.dma_share_iova, + hct_data.dma_share_size); + hct_data.dma_share_size = 0; + } + } + + return 0; +} + +static int hct_iommu_iova_check_unsafe(dma_addr_t dma_iova, size_t dma_size, + phys_addr_t phys_addr, + struct iommu_domain *domain) +{ + dma_addr_t iova; + int ret = 0; + size_t mapped = 0; + + iova = dma_iova; + while (iova < dma_iova + dma_size) { + phys_addr_t phys; + + phys = iommu_iova_to_phys(domain, iova); + if (phys) { + if ((phys_addr & PHY_ADDR_MASK) != (phys & PHY_ADDR_MASK)) { + pr_err("iova=0x%llx phys_addr=0x%llx phys=0x%llx, check fail.\n", + iova, phys_addr, phys); + ret = -1; + break; + } + mapped += PAGE_SIZE; + } + iova += PAGE_SIZE; + phys_addr += PAGE_SIZE; + } + + if (ret == 0 && mapped == dma_size) + ret = 1; + + return ret; +} + +static unsigned long get_num_contig_pages(unsigned long idx, + struct page **inpages, unsigned long npages) +{ + unsigned long paddr, next_paddr; + unsigned long i = idx + 1, pages = 1; + + /* find the number of contiguous pages starting from idx */ + paddr = page_to_phys(inpages[idx]); + while (i < npages) { + next_paddr = page_to_phys(inpages[i++]); + if ((paddr + PAGE_SIZE) == next_paddr) { + pages++; + paddr = next_paddr; + continue; + } + break; + } + + return pages; +} + +static struct hct_dma *hct_find_dma(struct hct_private *private, + dma_addr_t start, size_t size) +{ + struct hct_dma *dma, *tmp; + + list_for_each_entry_safe(dma, tmp, &private->head, next) { + if (dma->iova <= start && + dma->iova + dma->size >= start + size) + return dma; + } + + return NULL; +} + +/* + * Turns out AMD IOMMU has a page table bug where it won't map large pages + * to a region that previously mapped smaller pages. This should be fixed + * soon, so this is just a temporary workaround to break mappings down into + * PAGE_SIZE. Better to map smaller pages than nothing. + */ +static int map_try_harder(struct iommu_domain *domain, dma_addr_t iova, + unsigned long pfn, long npage, int prot) +{ + long i; + int ret = 0; + + for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) { + ret = iommu_map(domain, iova, + (phys_addr_t)pfn << PAGE_SHIFT, + PAGE_SIZE, prot, GFP_KERNEL); + if (ret) + break; + } + + for (; i < npage && i > 0; i--, iova -= PAGE_SIZE) + iommu_unmap(domain, iova, PAGE_SIZE); + + return ret; +} + +/* + * only handle io-memory [vm_flags | VM_PFNMAP == true] + */ +static int hct_iommu_pfnmap(struct hct_private *private, struct hct_dma *dma) +{ + unsigned long pfn; + unsigned long vaddr; + dma_addr_t iova; + size_t mapped_size = 0; + size_t size; + int ret = 0; + + if (!private || !dma) + return -EINVAL; + + dma->pfnmap_flag = 1; + vaddr = dma->vaddr; + iova = dma->iova; + size = dma->size; + + mutex_lock(&hct_data.lock); + while (size) { + ret = vaddr_get_pfn(current->mm, vaddr, hct_data.prot, &pfn); + if (ret) + goto map_fail; + + ret = iommu_map(hct_data.domain, iova, + (phys_addr_t)pfn << PAGE_SHIFT, + 1 << PAGE_SHIFT, hct_data.prot, + GFP_KERNEL); + if (ret) + goto map_fail; + + size -= 1 << PAGE_SHIFT; + vaddr += 1 << PAGE_SHIFT; + iova += 1 << PAGE_SHIFT; + mapped_size += 1 << PAGE_SHIFT; + } + mutex_unlock(&hct_data.lock); + + list_add(&dma->next, &private->head); + return 0; + +map_fail: + mutex_unlock(&hct_data.lock); + iommu_unmap(hct_data.domain, dma->iova, mapped_size); + return ret; +} + +static int hct_iommu_map(struct hct_private *private, unsigned long vaddr, + dma_addr_t dma_iova, size_t dma_size) +{ + struct hct_dma *dma; + struct page **pages; + unsigned long n, i, npages; + dma_addr_t iova, iova_end, iova_next; + int ret = 0; + size_t mapped_size = 0; + size_t iova_size = dma_size; + + if (!dma_size || (vaddr | dma_iova | dma_size) & (PAGE_SIZE - 1)) + return -EINVAL; + + if (hct_find_dma(private, dma_iova, dma_size)) + return 0; + + dma = kzalloc(sizeof(*dma), GFP_KERNEL); + if (!dma) + return -ENOMEM; + + pages = hct_pin_memory(private, vaddr, dma_size, &n); + if (!pages) { + /* We will think the vm_flags includes VM_PFNMAP. */ + dma->vaddr = vaddr; + dma->iova = dma_iova; + dma->size = dma_size; + ret = hct_iommu_pfnmap(private, dma); + if (ret) + kfree(dma); + return ret; + } + + dma->vaddr = vaddr; + dma->iova = dma_iova; + dma->pages = pages; + dma->size = dma_size; + dma->npages = n; + + iova = dma_iova; + iova_end = dma_iova + dma_size; + iova_size = dma_size; + + mutex_lock(&hct_data.lock); + for (i = 0; iova < iova_end && i < n; iova = iova_next, i += npages) { + size_t len; + phys_addr_t phys; + + npages = get_num_contig_pages(i, pages, n); + + /* When the value of npages is 524288, the value of npages * PAGE_SIZE + * will be 0x80000000 (bit31 is 1). + * When the value of npages is greater than 524288, if the type of len is int, + * the len will be a negative value. + */ + len = min_t(size_t, (npages * PAGE_SIZE), iova_size); + phys = page_to_phys(pages[i]); + + iova_size -= len; + iova_next = iova + len; + + ret = hct_iommu_iova_check_unsafe(iova, len, phys, hct_data.domain); + if (ret < 0) { + ret = -EBUSY; + goto map_fail; + } else if (ret > 0) { + ret = 0; + continue; + } + + ret = iommu_map(hct_data.domain, iova, phys, len, hct_data.prot, GFP_KERNEL); + if (ret) { + if (ret == -EBUSY) + ret = map_try_harder(hct_data.domain, iova, + phys >> PAGE_SHIFT, + len >> PAGE_SHIFT, + hct_data.prot); + if (ret) + goto map_fail; + } + mapped_size += len; + cond_resched(); + } + + ret = hct_add_dma_share_unsafe(dma_iova, dma_size); + if (ret) + goto map_fail; + + mutex_unlock(&hct_data.lock); + list_add(&dma->next, &private->head); + return 0; +map_fail: + if (mapped_size) + iommu_unmap(hct_data.domain, dma_iova, mapped_size); + mutex_unlock(&hct_data.lock); + hct_unpin_memory(private, pages, n); + kfree(dma); + return ret; +} + +static void hct_iommu_unmap(struct hct_private *private, + dma_addr_t iova, size_t size) +{ + struct iommu_domain *domain = hct_data.domain; + struct hct_dma *dma; + + if (!size || (iova | size) & (PAGE_SIZE - 1)) + return; + + dma = hct_find_dma(private, iova, size); + if (!dma) + return; + + mutex_lock(&hct_data.lock); + iommu_unmap(domain, dma->iova, dma->size); + if (dma->pfnmap_flag == 0) + hct_unpin_memory(private, dma->pages, dma->npages); + list_del(&dma->next); + kfree(dma); + mutex_unlock(&hct_data.lock); +} + +static void hct_iommu_unmap_all(struct hct_private *private) +{ + struct iommu_domain *domain = hct_data.domain; + struct hct_dma *dma, *tmp; + + mutex_lock(&hct_data.lock); + list_for_each_entry_safe(dma, tmp, &private->head, next) { + if (hct_unmap_dma_share_unsafe(dma->iova, dma->size)) + iommu_unmap(domain, dma->iova, dma->size); + if (dma->pfnmap_flag == 0) + hct_unpin_memory(private, dma->pages, dma->npages); + cond_resched(); + list_del(&dma->next); + kfree(dma); + } + mutex_unlock(&hct_data.lock); +} + +static struct page *hct_get_page(pgoff_t page_idx) +{ + u64 *node; + + mutex_lock(&hct_share.lock); + if (!hct_share.pages[page_idx]) { + hct_share.pages[page_idx] = + alloc_pages(GFP_HIGHUSER | __GFP_ZERO, 0); + if (!hct_share.pages[page_idx]) { + mutex_unlock(&hct_share.lock); + return NULL; + } + } + get_page(hct_share.pages[page_idx]); + + node = page_to_virt(hct_share.pages[page_idx]) + PAGE_SIZE - 8; + *node = hct_data.iommu[page_idx].pdev->dev.numa_node; + mutex_unlock(&hct_share.lock); + + return hct_share.pages[page_idx]; +} + +static void hct_put_pages(void) +{ + int i; + + for (i = 0; i < hct_share.pagecount; i++) { + if (!hct_share.pages[i]) + continue; + + put_page(hct_share.pages[i]); + hct_share.pages[i] = NULL; + } +} + +/* Clear status information when exiting abnormally. */ +static void hct_clear_shared_lock_memory(unsigned int gid) +{ + int *base; + int *queue_lck; + int dev_idx; + int queue_idx; + + for (dev_idx = 0; dev_idx < MCCP_DEV_MAX && + hct_share.pages[dev_idx]; dev_idx++) { + base = (int *)page_to_virt(hct_share.pages[dev_idx]); + for (queue_idx = 0; queue_idx < MCCP_DEV_QUEUE; queue_idx++) { + queue_lck = base + queue_idx; + if (*queue_lck == gid) + *queue_lck = 0; /* vq userid will be changed. */ + } + } +} + +static long hct_share_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) +{ + struct hct_dev_ctrl dev_ctrl; + unsigned int cmd_id; + unsigned int len; + unsigned int pasid; + int ret = 0; + struct hct_private *private = file->private_data; + + if (_IOC_TYPE(ioctl) != MCCP_SHARE_IOC_TYPE) + return -EINVAL; + + cmd_id = _IOC_NR(ioctl); + len = _IOC_SIZE(ioctl); + + if (cmd_id != MCCP_SHARE_OP) + return -EINVAL; + + if (len != sizeof(dev_ctrl)) + return -EINVAL; + + if (copy_from_user(&dev_ctrl, (void __user *)arg, sizeof(dev_ctrl))) + return -EINVAL; + + mutex_lock(&private->lock); + switch (dev_ctrl.op) { + case MCCP_SHARE_OP_DMA_MAP: + ret = hct_iommu_map(private, dev_ctrl.vaddr, dev_ctrl.iova, dev_ctrl.size); + break; + case MCCP_SHARE_OP_DMA_UNMAP: + hct_iommu_unmap(private, dev_ctrl.iova, dev_ctrl.size); + ret = 0; + break; + case MCCP_SHARE_OP_DMA_UNMAP_ALL: + hct_iommu_unmap_all(private); + ret = 0; + break; + case MCCP_SHARE_OP_GET_ID: + dev_ctrl.id = private->id; + if (copy_to_user((void __user *)arg, &dev_ctrl, sizeof(dev_ctrl))) + ret = -EINVAL; + else + ret = 0; + break; + case MCCP_SHARE_OP_GET_PASID: + /* The different virtual machines is distinguished through pasid. */ + pasid = private->id >> MCCP_INSTANCE_OFFSET; + if (pasid >= MCCP_PASID_SIZE) { + ret = -EINVAL; + break; + } + + dev_ctrl.id = pasid; + if (copy_to_user((void __user *)arg, &dev_ctrl, sizeof(dev_ctrl))) + ret = -EINVAL; + break; + case MCCP_SHARE_OP_GET_VERSION: + memcpy(dev_ctrl.version, VERSION_STRING, sizeof(VERSION_STRING)); + if (copy_to_user((void __user *)arg, &dev_ctrl, sizeof(dev_ctrl))) + ret = -EINVAL; + break; + default: + ret = -EINVAL; + break; + } + mutex_unlock(&private->lock); + + return ret; +} + +static int hct_share_close(struct inode *inode, struct file *file) +{ + struct hct_private *private = file->private_data; + unsigned int id = private->id >> MCCP_INSTANCE_OFFSET; + + mutex_lock(&hct_share.lock); + /* For the vm scenario, the hct_share.vaddr value is NULL. */ + if (hct_share.vaddr) { + struct hct_shared_cfg *cfg = hct_share.vaddr; + int i; + + if (private->id == cfg->ccps_ref_lock) + cfg->ccps_ref_lock = 0; + + for (i = 0; i < MCCP_DEV_MAX; i++) + if (private->id == (MCCP_INSTANCE_MASK & cfg->ccp_state[i])) + cfg->ccp_state[i] = 0; + + for (i = 0; i < MCCP_QUEUES_MAX; i++) + if (private->id == cfg->ccp_queue_state[i]) + cfg->ccp_queue_state[i] = MCCP_QUEUE_NEED_INIT; + + for (i = 0; i < MCCP_IOVA_MAX_SLOT; i++) + if (private->id == cfg->iova_slot[i]) + cfg->iova_slot[i] = 0; + } + + hct_clear_shared_lock_memory(private->id); + + hct_share.ref--; + if (!hct_share.ref) { + hct_put_pages(); + if (hct_share.vaddr) + memset(hct_share.vaddr, 0x00, hct_share.size); + } + mutex_unlock(&hct_share.lock); + + mutex_lock(&hct_data.lock); + if (id < MCCP_INSTANCE_MAX) + bitmap_clear(hct_data.ids, id, 1); + mutex_unlock(&hct_data.lock); + + mutex_lock(&private->lock); + hct_iommu_unmap_all(private); + mutex_unlock(&private->lock); + + kfree(private); + return 0; +} + +static vm_fault_t hct_cdev_vma_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + pgoff_t page_idx = (vmf->address - vma->vm_start) >> PAGE_SHIFT; + + if (page_idx >= hct_share.pagecount) + return VM_FAULT_SIGBUS; + + vmf->page = hct_get_page(page_idx); + if (!vmf->page) + return VM_FAULT_SIGBUS; + + return 0; +} + +static const struct vm_operations_struct hct_cdev_vm_ops = { + .fault = hct_cdev_vma_fault, +}; + +static int hct_share_mmap(struct file *file, struct vm_area_struct *vma) +{ + unsigned long len; + int ret = 0; + + mutex_lock(&hct_share.lock); + len = vma->vm_end - vma->vm_start; + if (len == MCCP_SHARED_SIZE) { + /* The required size for vm is 64KB, + * and will follow the pagefault process. + */ + vma->vm_ops = &hct_cdev_vm_ops; + goto exit; + } + + if (unlikely(!hct_share.vaddr)) { + hct_share.size = (vma->vm_end - vma->vm_start); + hct_share.vaddr = kzalloc(hct_share.size, GFP_KERNEL); + } + + if (!hct_share.vaddr) { + ret = -ENOMEM; + goto exit; + } + + if (hct_share.size != (vma->vm_end - vma->vm_start)) { + ret = -EINVAL; + pr_err("invalid hct share size\n"); + goto exit; + } + + ret = remap_pfn_range(vma, vma->vm_start, + virt_to_phys(hct_share.vaddr) >> PAGE_SHIFT, + hct_share.size, + vma->vm_page_prot); +exit: + mutex_unlock(&hct_share.lock); + return ret; +} + +static const struct file_operations hct_share_fops = { + .owner = THIS_MODULE, + .open = hct_share_open, + .release = hct_share_close, + .mmap = hct_share_mmap, + .unlocked_ioctl = hct_share_ioctl, +}; + +static struct miscdevice hct_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "hct_share", + .fops = &hct_share_fops, + .mode = 0666, +}; + +static int hct_share_init(void) +{ + int i; + int ret; + + memset(&hct_data, 0x00, sizeof(hct_data)); + mutex_init(&hct_data.lock); + + for (i = 0; i < MCCP_DEV_MAX; i++) + mutex_init(&hct_data.iommu[i].lock); + + ret = misc_register(&hct_misc); + if (!ret) { + hct_data.domain = iommu_domain_alloc(&pci_bus_type); + if (!hct_data.domain) { + misc_deregister(&hct_misc); + if (!pci_bus_type.iommu_ops) { + pr_err("iommu is disabled\n"); + return -ENODEV; + } + return -ENOMEM; + } + hct_data.prot = IOMMU_READ | IOMMU_WRITE; + } + + return ret; +} + +static void hct_share_exit(void) +{ + int i; + struct hct_iommu *iommu; + struct iommu_domain *domain; + struct pci_dev *pdev; + + mutex_lock(&hct_data.lock); + for (i = 0; i < MCCP_DEV_MAX; i++) { + iommu = &hct_data.iommu[i]; + pdev = iommu->pdev; + if (pdev) { + domain = iommu_get_domain_for_dev(&pdev->dev); + if (domain == hct_data.domain) + iommu_detach_device(domain, &pdev->dev); + } + } + mutex_unlock(&hct_data.lock); + + if (hct_data.domain) + iommu_domain_free(hct_data.domain); + + misc_deregister(&hct_misc); + kfree(hct_share.vaddr); +} + +static int hct_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + return hct_iommu_alloc(pdev); +} + +static void hct_pci_remove(struct pci_dev *pdev) +{ + struct hct_iommu *iommu; + struct hct_dev_ctx *dev_ctx; + int i; + + iommu = pci_get_drvdata(pdev); + if (!iommu) { + pci_set_drvdata(pdev, NULL); + return; + } + + dev_ctx = &iommu->dev_ctx; + for (i = 0; i < dev_ctx->q_count; i++) + kfifo_free(&dev_ctx->cmd_q[i].ectx_fifo); + + if (dev_ctx->io_regs) + iounmap(dev_ctx->io_regs); + if (dev_ctx->irq) { + tasklet_kill(&dev_ctx->irq_tasklet); + free_irq(dev_ctx->irq, dev_ctx); + dev_ctx->irq = 0; + pci_free_irq_vectors(pdev); + pci_clear_master(pdev); + pci_disable_device(pdev); + } + hct_iommu_free(iommu); + pci_set_drvdata(pdev, NULL); +} + +static struct pci_driver hct_pci_driver = { + .name = "hct", + .id_table = NULL, + .probe = hct_pci_probe, + .remove = hct_pci_remove, +}; + +static const struct file_operations hct_vd_fops = { + .owner = THIS_MODULE, +}; + +static void hct_device_release(struct device *dev) +{ + dev_dbg(dev, "hct: released\n"); +} + +#define CPUID_VENDOR_HygonGenuine_ebx 0x6f677948 +#define CPUID_VENDOR_HygonGenuine_ecx 0x656e6975 +#define CPUID_VENDOR_HygonGenuine_edx 0x6e65476e + +static int __init hct_dev_init(void) +{ + int ret = 0; + u32 vendor_ebx = 0; + u32 vendor_ecx = 0; + u32 vendor_edx = 0; + u32 vendor_eax = 0; + + cpuid(0, &vendor_eax, &vendor_ebx, &vendor_ecx, &vendor_edx); + + /* HygonGenuine */ + if (!(vendor_ebx == CPUID_VENDOR_HygonGenuine_ebx && + vendor_ecx == CPUID_VENDOR_HygonGenuine_ecx && + vendor_edx == CPUID_VENDOR_HygonGenuine_edx)) { + pr_err("Not hygon hardware\n"); + return -1; + } + + ret = mdev_register_driver(&hct_mdev_driver); + if (ret) + return ret; + + memset(&hct_dev, 0, sizeof(hct_dev)); + + ret = alloc_chrdev_region(&hct_dev.vd_devt, 0, MINORMASK + 1, + MCCP_NAME); + + if (ret < 0) { + pr_err("Error: failed to register hct_dev, err:%d\n", ret); + goto failed0; + } + + cdev_init(&hct_dev.vd_cdev, &hct_vd_fops); + cdev_add(&hct_dev.vd_cdev, hct_dev.vd_devt, MINORMASK + 1); + + hct_dev.vd_class = class_create(MCCP_CLASS_NAME); + if (IS_ERR(hct_dev.vd_class)) { + pr_err("Error: failed to register hct_dev class\n"); + ret = PTR_ERR(hct_dev.vd_class); + goto failed1; + } + + hct_dev.dev.class = hct_dev.vd_class; + hct_dev.dev.release = hct_device_release; + dev_set_name(&hct_dev.dev, "%s", MCCP_NAME); + hct_dev.dev.devt = hct_dev.vd_devt; + + ret = device_register(&hct_dev.dev); + if (ret) + goto failed2; + + ret = mdev_register_parent(&hct_dev.mdev_parent, &hct_dev.dev, + &hct_mdev_driver, hct_mdev_types, + ARRAY_SIZE(hct_mdev_types)); + if (ret) + goto failed3; + + ret = hct_share_init(); + if (ret) + goto failed4; + + memset(&hct_share, 0x00, sizeof(hct_share)); + mutex_init(&hct_share.lock); + + ret = pci_register_driver(&hct_pci_driver); + if (ret) + goto failed5; + + goto all_done; + +failed5: + hct_share_exit(); + +failed4: + mdev_unregister_parent(&hct_dev.mdev_parent); + +failed3: + device_unregister(&hct_dev.dev); + +failed2: + class_destroy(hct_dev.vd_class); + +failed1: + cdev_del(&hct_dev.vd_cdev); + unregister_chrdev_region(hct_dev.vd_devt, MINORMASK + 1); + +failed0: + mdev_unregister_driver(&hct_mdev_driver); + +all_done: + return ret; +} + +static void __exit hct_dev_exit(void) +{ + hct_share_exit(); + hct_dev.dev.bus = NULL; + mdev_unregister_parent(&hct_dev.mdev_parent); + + device_unregister(&hct_dev.dev); + cdev_del(&hct_dev.vd_cdev); + unregister_chrdev_region(hct_dev.vd_devt, MINORMASK + 1); + class_destroy(hct_dev.vd_class); + mdev_unregister_driver(&hct_mdev_driver); + hct_dev.vd_class = NULL; + + pci_unregister_driver(&hct_pci_driver); +} + +module_init(hct_dev_init) +module_exit(hct_dev_exit) + +MODULE_LICENSE("GPL"); +MODULE_VERSION(VERSION_STRING); +MODULE_AUTHOR(DRIVER_AUTHOR); -- Gitee From 50dab1d5e295c7d43712794e6f972ffe519275f3 Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Thu, 25 Apr 2024 19:36:07 +0800 Subject: [PATCH 0903/2138] anolis: HCT supporting memory encryption host ANBZ: #8582 1. hct support run vfio-noiommu mode on memmory-encrypt host 2. change wb attribute for the bar memory of hct virtual machine. Signed-off-by: Yabin Li Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3119 --- drivers/crypto/ccp/hygon/hct.c | 132 ++++++++++++++++++++++++++++++++- 1 file changed, 131 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/ccp/hygon/hct.c b/drivers/crypto/ccp/hygon/hct.c index dd386fec2b07..392371af865a 100644 --- a/drivers/crypto/ccp/hygon/hct.c +++ b/drivers/crypto/ccp/hygon/hct.c @@ -27,18 +27,21 @@ #include #include #include +#include #include /** * VERSION_STRING modification instructions: * 0.1 -- support hct/mdev mode. * 0.2 -- supoort qemu virtualization. + * 0.3 -- support host-noiommu mode memory encryption function, + * and performance optimization in virtual machines (enable caching). */ #undef pr_fmt #define pr_fmt(fmt) "hct: " fmt -#define VERSION_STRING "0.2" +#define VERSION_STRING "0.3" #define DRIVER_AUTHOR "HYGON Corporation" #define VERSION_SIZE 16 @@ -67,6 +70,11 @@ #define MCCP_SHARE_OP_DMA_UNMAP 0x05 #define MCCP_SHARE_OP_GET_VERSION 0x06 +#define MCCP_NOIOMMU_IOC_TYPE MCCP_SHARE_IOC_TYPE +#define MCCP_NOIOMMU_OP MCCP_SHARE_OP +#define MCCP_NOIOMMU_SET_MEMORY_WB 0x01 +#define MCCP_NOIOMMU_GET_SME_ACTIVE 0x02 + #define MCCP_SHARE_IOMMU_MAGIC 0x3d6a9c5728633b9e #define PCI_RESOURCE_BAR2 2 @@ -130,11 +138,16 @@ struct hct_dev_ctrl { union { unsigned char version[VERSION_SIZE]; unsigned int id; + unsigned long sme_mask; struct { unsigned long vaddr; unsigned long iova; unsigned long size; }; + struct { + unsigned long vt_addr; + unsigned int nr_pages; + }; }; }; @@ -2064,6 +2077,115 @@ static void hct_device_release(struct device *dev) dev_dbg(dev, "hct: released\n"); } +/* set the flags PAT, PCT and PWT of page all to 0 + * for obtaining cache properties. + */ +void hct_noiommu_set_memory_wb(unsigned long address) +{ + pgd_t *pgd = current->mm->pgd + pgd_index(address); + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + pte_t old_pte; + pte_t new_pte; + pgprot_t new_prot; + unsigned long pfn; + + if (pgd_none(*pgd)) { + pr_err("pgd val shouldn't be none\n"); + return; + } + + p4d = p4d_offset(pgd, address); + if (p4d_none(*p4d)) { + pr_err("p4d val shouldn't be none\n"); + return; + } + + pud = pud_offset(p4d, address); + if (pud_none(*pud) || pud_large(*pud) || !pud_present(*pud)) { + pr_err("pud val is invalid.\n"); + return; + } + + pmd = pmd_offset(pud, address); + if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd)) { + pr_err("pmd val is invalid.\n"); + return; + } + + pte = pte_offset_kernel(pmd, address); + if (pte_none(*pte)) { + pr_err("pte val shouldn't be none\n"); + return; + } + + old_pte = *pte; + pfn = pte_pfn(old_pte); + new_prot = pte_pgprot(old_pte); + pgprot_val(new_prot) &= ~(_PAGE_PAT | _PAGE_PCD | _PAGE_PWT); + new_pte = pfn_pte(pfn, new_prot); + set_pte_atomic(pte, new_pte); +} + +static DEFINE_MUTEX(hct_noiommu_lock); +static long hct_noiommu_ioctl(struct file *file, + unsigned int ioctl, unsigned long arg) +{ + struct hct_dev_ctrl ctrl; + unsigned int cmd_id; + unsigned int len; + int ret = 0; + + if (_IOC_TYPE(ioctl) != MCCP_NOIOMMU_IOC_TYPE) + return -EINVAL; + + cmd_id = _IOC_NR(ioctl); + len = _IOC_SIZE(ioctl); + + if (cmd_id != MCCP_SHARE_OP) + return -EINVAL; + + if (len != sizeof(ctrl)) + return -EINVAL; + + if (copy_from_user(&ctrl, (void __user *)arg, sizeof(ctrl))) + return -EINVAL; + + mutex_lock(&hct_noiommu_lock); + switch (ctrl.op) { + case MCCP_NOIOMMU_SET_MEMORY_WB: + while (ctrl.nr_pages && ctrl.nr_pages--) { + hct_noiommu_set_memory_wb(ctrl.vt_addr); + ctrl.vt_addr += PAGE_SIZE; + } + break; + case MCCP_NOIOMMU_GET_SME_ACTIVE: + ctrl.sme_mask = sme_me_mask; + if (copy_to_user((void __user *)arg, &ctrl, sizeof(ctrl))) + ret = -EINVAL; + break; + default: + ret = -EINVAL; + break; + } + mutex_unlock(&hct_noiommu_lock); + + return ret; +} + +const struct file_operations hct_noiommu_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = hct_noiommu_ioctl, +}; + +struct miscdevice hct_noiommu_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "hct_noiommu", + .fops = &hct_noiommu_fops, +}; + #define CPUID_VENDOR_HygonGenuine_ebx 0x6f677948 #define CPUID_VENDOR_HygonGenuine_ecx 0x656e6975 #define CPUID_VENDOR_HygonGenuine_edx 0x6e65476e @@ -2086,6 +2208,9 @@ static int __init hct_dev_init(void) return -1; } + if (!iommu_present(&pci_bus_type)) + return misc_register(&hct_noiommu_misc); + ret = mdev_register_driver(&hct_mdev_driver); if (ret) return ret; @@ -2163,6 +2288,11 @@ static int __init hct_dev_init(void) static void __exit hct_dev_exit(void) { + if (!iommu_present(&pci_bus_type)) { + misc_deregister(&hct_noiommu_misc); + return; + } + hct_share_exit(); hct_dev.dev.bus = NULL; mdev_unregister_parent(&hct_dev.mdev_parent); -- Gitee From 9d717850abe500fb8b30222a1c7f93a1cb0ce5f5 Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Thu, 25 Apr 2024 19:41:51 +0800 Subject: [PATCH 0904/2138] anolis: bugifx: fix build issue when the module mdev is disabled. ANBZ: #8582 Signed-off-by: Yabin Li Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3119 --- drivers/crypto/ccp/hygon/hct.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/ccp/hygon/hct.c b/drivers/crypto/ccp/hygon/hct.c index 392371af865a..d2e179a2406b 100644 --- a/drivers/crypto/ccp/hygon/hct.c +++ b/drivers/crypto/ccp/hygon/hct.c @@ -23,12 +23,14 @@ #include #include #include -#include #include #include #include #include #include +#if IS_ENABLED(CONFIG_VFIO_MDEV) +#include +#endif /** * VERSION_STRING modification instructions: @@ -36,12 +38,13 @@ * 0.2 -- supoort qemu virtualization. * 0.3 -- support host-noiommu mode memory encryption function, * and performance optimization in virtual machines (enable caching). + * 0.4 -- support compiling hct.ko when mdev module is disabled. */ #undef pr_fmt #define pr_fmt(fmt) "hct: " fmt -#define VERSION_STRING "0.3" +#define VERSION_STRING "0.4" #define DRIVER_AUTHOR "HYGON Corporation" #define VERSION_SIZE 16 @@ -192,6 +195,7 @@ struct hct_iommu { unsigned long ref; }; +#if IS_ENABLED(CONFIG_VFIO_MDEV) static struct hct_data { struct hct_iommu iommu[MCCP_DEV_MAX]; struct mutex lock; @@ -2076,6 +2080,7 @@ static void hct_device_release(struct device *dev) { dev_dbg(dev, "hct: released\n"); } +#endif /* IS_ENABLED(CONFIG_VFIO_MDEV) */ /* set the flags PAT, PCT and PWT of page all to 0 * for obtaining cache properties. @@ -2192,7 +2197,7 @@ struct miscdevice hct_noiommu_misc = { static int __init hct_dev_init(void) { - int ret = 0; + int __maybe_unused ret = 0; u32 vendor_ebx = 0; u32 vendor_ecx = 0; u32 vendor_edx = 0; @@ -2208,6 +2213,7 @@ static int __init hct_dev_init(void) return -1; } +#if IS_ENABLED(CONFIG_VFIO_MDEV) if (!iommu_present(&pci_bus_type)) return misc_register(&hct_noiommu_misc); @@ -2284,10 +2290,15 @@ static int __init hct_dev_init(void) all_done: return ret; +#else + pr_info("The module mdev is disabled.\n"); + return misc_register(&hct_noiommu_misc); +#endif } static void __exit hct_dev_exit(void) { +#if IS_ENABLED(CONFIG_VFIO_MDEV) if (!iommu_present(&pci_bus_type)) { misc_deregister(&hct_noiommu_misc); return; @@ -2305,6 +2316,9 @@ static void __exit hct_dev_exit(void) hct_dev.vd_class = NULL; pci_unregister_driver(&hct_pci_driver); +#else + misc_deregister(&hct_noiommu_misc); +#endif } module_init(hct_dev_init) -- Gitee From 78cef423c071aff8125b718600d5d58859f78ba7 Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Thu, 25 Apr 2024 19:46:13 +0800 Subject: [PATCH 0905/2138] anolis: Change the maximum number of supported ccps from 16 to 48. ANBZ: #8582 Signed-off-by: Yabin Li Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3119 --- drivers/crypto/ccp/hygon/hct.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/ccp/hygon/hct.c b/drivers/crypto/ccp/hygon/hct.c index d2e179a2406b..3a29cd953943 100644 --- a/drivers/crypto/ccp/hygon/hct.c +++ b/drivers/crypto/ccp/hygon/hct.c @@ -39,12 +39,13 @@ * 0.3 -- support host-noiommu mode memory encryption function, * and performance optimization in virtual machines (enable caching). * 0.4 -- support compiling hct.ko when mdev module is disabled. + * 0.5 -- change the maximum number of supported ccps from 16 to 48. */ #undef pr_fmt #define pr_fmt(fmt) "hct: " fmt -#define VERSION_STRING "0.4" +#define VERSION_STRING "0.5" #define DRIVER_AUTHOR "HYGON Corporation" #define VERSION_SIZE 16 @@ -92,7 +93,7 @@ #define MCCP_INSTANCE_MASK (~((1u << MCCP_INSTANCE_OFFSET) - 1)) #define MCCP_PASID_SIZE (1 << 8) #define MCCP_IOVA_MAX_SLOT 1024 -#define MCCP_DEV_MAX 16 +#define MCCP_DEV_MAX 48 #define MCCP_DEV_QUEUE_MAX 8 #define MCCP_DEV_QUEUE 5 #define MCCP_QUEUES_MAX (MCCP_DEV_MAX * MCCP_DEV_QUEUE_MAX) -- Gitee From 853d76d476b5a150aa9d37a3fe67eb8fe8a3a996 Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Mon, 6 May 2024 14:31:25 +0800 Subject: [PATCH 0906/2138] anolis: support 1024 processes simutaneously in the hct-mdev mode. ANBZ: #8582 Signed-off-by: Yabin Li Signed-off-by: yangdepei Reviewed-by: Tianjia Zhang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3119 --- drivers/crypto/ccp/hygon/hct.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/ccp/hygon/hct.c b/drivers/crypto/ccp/hygon/hct.c index 3a29cd953943..719b3287d151 100644 --- a/drivers/crypto/ccp/hygon/hct.c +++ b/drivers/crypto/ccp/hygon/hct.c @@ -1250,7 +1250,6 @@ static int hct_share_open(struct inode *inode, struct file *file) return -ENOMEM; mutex_lock(&hct_data.lock); - bitmap_set(hct_data.ids, 0, 1); id = (unsigned int)find_first_zero_bit(hct_data.ids, MCCP_INSTANCE_MAX); if (id < MCCP_INSTANCE_MAX) bitmap_set(hct_data.ids, id, 1); @@ -1267,7 +1266,13 @@ static int hct_share_open(struct inode *inode, struct file *file) mutex_unlock(&hct_share.lock); file->private_data = private; - private->id = id << MCCP_INSTANCE_OFFSET; + /* + * At user space, each process is assigned a different number + * which cannot be 0, as the identifier for the process. + * The number is assigned by id, so the value of id needs to + * start from 1, and cannot be 0. + */ + private->id = (++id) << MCCP_INSTANCE_OFFSET; INIT_LIST_HEAD(&private->head); mutex_init(&private->lock); @@ -1891,7 +1896,7 @@ static int hct_share_close(struct inode *inode, struct file *file) mutex_unlock(&hct_share.lock); mutex_lock(&hct_data.lock); - if (id < MCCP_INSTANCE_MAX) + if (--id < MCCP_INSTANCE_MAX) bitmap_clear(hct_data.ids, id, 1); mutex_unlock(&hct_data.lock); -- Gitee From 1e61237cd8399d4df6a295ceb7ad6f2c265b26f4 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 22 Feb 2024 07:12:11 +0800 Subject: [PATCH 0907/2138] selftests: net: veth: test syncing GRO and XDP state while device is down MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #9028 commit 1a825e4cdf457b7aef7ebbc2f1206654f5beb150 upstream. Test that we keep GRO flag in sync when XDP is disabled while the device is closed. Signed-off-by: Jakub Kicinski Reviewed-by: Toke Høiland-Jørgensen Signed-off-by: David S. Miller Signed-off-by: Xiao Long Signed-off-by: Philo Lu Reviewed-by: D. Wythe Link: https://gitee.com/anolis/cloud-kernel/pulls/3178 --- tools/testing/selftests/net/veth.sh | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tools/testing/selftests/net/veth.sh b/tools/testing/selftests/net/veth.sh index 27574bbf2d63..5ae85def0739 100755 --- a/tools/testing/selftests/net/veth.sh +++ b/tools/testing/selftests/net/veth.sh @@ -246,6 +246,20 @@ ip netns exec $NS_DST ethtool -K veth$DST rx-udp-gro-forwarding on chk_gro " - aggregation with TSO off" 1 cleanup +create_ns +ip -n $NS_DST link set dev veth$DST up +ip -n $NS_DST link set dev veth$DST xdp object ${BPF_FILE} section xdp +chk_gro_flag "gro vs xdp while down - gro flag on" $DST on +ip -n $NS_DST link set dev veth$DST down +chk_gro_flag " - after down" $DST on +ip -n $NS_DST link set dev veth$DST xdp off +chk_gro_flag " - after xdp off" $DST off +ip -n $NS_DST link set dev veth$DST up +chk_gro_flag " - after up" $DST off +ip -n $NS_SRC link set dev veth$SRC xdp object ${BPF_FILE} section xdp +chk_gro_flag " - after peer xdp" $DST off +cleanup + create_ns chk_channels "default channels" $DST 1 1 -- Gitee From 7f22f1872712592e6e9fe3734c7fbc6fb5657ecf Mon Sep 17 00:00:00 2001 From: Ignat Korchagin Date: Thu, 14 Mar 2024 02:37:59 +0800 Subject: [PATCH 0908/2138] selftests: net: veth: test the ability to independently manipulate GRO and XDP ANBZ: #9028 commit ba5a6476e3866c97e2c85f64b0c7dfb8fbdda18a upstream. We should be able to independently flip either XDP or GRO states and toggling one should not affect the other. Adjust other tests as well that had implicit expectation that GRO would be automatically enabled. Signed-off-by: Ignat Korchagin Signed-off-by: David S. Miller Signed-off-by: Xiao Long Signed-off-by: Philo Lu Reviewed-by: D. Wythe Link: https://gitee.com/anolis/cloud-kernel/pulls/3178 --- tools/testing/selftests/net/udpgro_fwd.sh | 4 ++++ tools/testing/selftests/net/veth.sh | 24 ++++++++++++++++++++--- 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/net/udpgro_fwd.sh b/tools/testing/selftests/net/udpgro_fwd.sh index f4549e6894dd..83ed987cff34 100755 --- a/tools/testing/selftests/net/udpgro_fwd.sh +++ b/tools/testing/selftests/net/udpgro_fwd.sh @@ -217,6 +217,7 @@ for family in 4 6; do cleanup create_ns + ip netns exec $NS_DST ethtool -K veth$DST generic-receive-offload on ip netns exec $NS_DST ethtool -K veth$DST rx-gro-list on run_test "GRO frag list" $BM_NET$DST 1 0 cleanup @@ -227,6 +228,7 @@ for family in 4 6; do # use NAT to circumvent GRO FWD check create_ns ip -n $NS_DST addr add dev veth$DST $BM_NET$DST_NAT/$SUFFIX + ip netns exec $NS_DST ethtool -K veth$DST generic-receive-offload on ip netns exec $NS_DST ethtool -K veth$DST rx-udp-gro-forwarding on ip netns exec $NS_DST $IPT -t nat -I PREROUTING -d $BM_NET$DST_NAT \ -j DNAT --to-destination $BM_NET$DST @@ -240,6 +242,7 @@ for family in 4 6; do cleanup create_vxlan_pair + ip netns exec $NS_DST ethtool -K veth$DST generic-receive-offload on ip netns exec $NS_DST ethtool -K veth$DST rx-gro-list on run_test "GRO frag list over UDP tunnel" $OL_NET$DST 10 10 cleanup @@ -247,6 +250,7 @@ for family in 4 6; do # use NAT to circumvent GRO FWD check create_vxlan_pair ip -n $NS_DST addr add dev $VXDEV$DST $OL_NET$DST_NAT/$SUFFIX + ip netns exec $NS_DST ethtool -K veth$DST generic-receive-offload on ip netns exec $NS_DST ethtool -K veth$DST rx-udp-gro-forwarding on ip netns exec $NS_DST $IPT -t nat -I PREROUTING -d $OL_NET$DST_NAT \ -j DNAT --to-destination $OL_NET$DST diff --git a/tools/testing/selftests/net/veth.sh b/tools/testing/selftests/net/veth.sh index 5ae85def0739..3a394b43e274 100755 --- a/tools/testing/selftests/net/veth.sh +++ b/tools/testing/selftests/net/veth.sh @@ -249,9 +249,9 @@ cleanup create_ns ip -n $NS_DST link set dev veth$DST up ip -n $NS_DST link set dev veth$DST xdp object ${BPF_FILE} section xdp -chk_gro_flag "gro vs xdp while down - gro flag on" $DST on +chk_gro_flag "gro vs xdp while down - gro flag off" $DST off ip -n $NS_DST link set dev veth$DST down -chk_gro_flag " - after down" $DST on +chk_gro_flag " - after down" $DST off ip -n $NS_DST link set dev veth$DST xdp off chk_gro_flag " - after xdp off" $DST off ip -n $NS_DST link set dev veth$DST up @@ -260,6 +260,21 @@ ip -n $NS_SRC link set dev veth$SRC xdp object ${BPF_FILE} section xdp chk_gro_flag " - after peer xdp" $DST off cleanup +create_ns +ip -n $NS_DST link set dev veth$DST up +ip -n $NS_DST link set dev veth$DST xdp object ${BPF_FILE} section xdp +ip netns exec $NS_DST ethtool -K veth$DST generic-receive-offload on +chk_gro_flag "gro vs xdp while down - gro flag on" $DST on +ip -n $NS_DST link set dev veth$DST down +chk_gro_flag " - after down" $DST on +ip -n $NS_DST link set dev veth$DST xdp off +chk_gro_flag " - after xdp off" $DST on +ip -n $NS_DST link set dev veth$DST up +chk_gro_flag " - after up" $DST on +ip -n $NS_SRC link set dev veth$SRC xdp object ${BPF_FILE} section xdp +chk_gro_flag " - after peer xdp" $DST on +cleanup + create_ns chk_channels "default channels" $DST 1 1 @@ -327,11 +342,14 @@ if [ $CPUS -gt 2 ]; then fi ip -n $NS_DST link set dev veth$DST xdp object ${BPF_FILE} section xdp 2>/dev/null -chk_gro_flag "with xdp attached - gro flag" $DST on +chk_gro_flag "with xdp attached - gro flag" $DST off chk_gro_flag " - peer gro flag" $SRC off chk_tso_flag " - tso flag" $SRC off chk_tso_flag " - peer tso flag" $DST on ip netns exec $NS_DST ethtool -K veth$DST rx-udp-gro-forwarding on +chk_gro " - no aggregation" 10 +ip netns exec $NS_DST ethtool -K veth$DST generic-receive-offload on +chk_gro_flag " - gro flag with GRO on" $DST on chk_gro " - aggregation" 1 -- Gitee From 95662ad98c2604598b3ffad7e3964b1a09213eaa Mon Sep 17 00:00:00 2001 From: hanliyang Date: Tue, 14 May 2024 16:00:03 +0800 Subject: [PATCH 0909/2138] anolis: crypto: ccp: Return -ENODEV if Hygon PSP is not configured with CSV capability ANBZ: #9023 The rules to determine psp_master is not exactly the same between AMD ASP and Hygon PSP. If a Hygon PSP is not configured with CSV capability, it should not be the psp_master. Fixes: 39e18cb04c1f ("anolis: crypto: ccp: Fixup the capability of Hygon PSP during initialization") Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3174 --- drivers/crypto/ccp/psp-dev.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index 1566b955730e..b4aea8dbdc28 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -236,11 +236,15 @@ static irqreturn_t psp_irq_handler_hygon(int irq, void *data) } #endif -static void hygon_fixup_psp_caps(struct psp_device *psp) +static int hygon_fixup_psp_caps(struct psp_device *psp) { - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) - psp->capability &= ~(PSP_CAPABILITY_TEE | - PSP_CAPABILITY_PSP_SECURITY_REPORTING); + /* the hygon psp is unavailable if bit0 cleared in feature reg */ + if (!(psp->capability & PSP_CAPABILITY_SEV)) + return -ENODEV; + + psp->capability &= ~(PSP_CAPABILITY_TEE | + PSP_CAPABILITY_PSP_SECURITY_REPORTING); + return 0; } static unsigned int psp_get_capability(struct psp_device *psp) @@ -263,8 +267,13 @@ static unsigned int psp_get_capability(struct psp_device *psp) /* * Fix capability of Hygon psp, the meaning of Hygon psp feature * register is not exactly the same as AMD. + * Return -ENODEV directly if hygon psp not configured with CSV + * capability. */ - hygon_fixup_psp_caps(psp); + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + if (hygon_fixup_psp_caps(psp)) + return -ENODEV; + } /* Detect if TSME and SME are both enabled */ if (psp->capability & PSP_CAPABILITY_PSP_SECURITY_REPORTING && -- Gitee From 3d48bddf1519180aff456ddeff287336525f0f74 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 18 Dec 2023 23:39:32 +0000 Subject: [PATCH 0910/2138] kselftest/arm64: Don't probe the current VL for unsupported vector types ANBZ: #8856 commit 9a802ddb2123e5adec394d35cd539cc0b15bc830 upstream. The vec-syscfg selftest verifies that setting the VL of the currently tested vector type does not disrupt the VL of the other vector type. To do this it records the current vector length for each type but neglects to guard this with a check for that vector type actually being supported. Add one, using a helper function which we also update all the other instances of this pattern. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20231218-kselftest-arm64-vec-syscfg-rdvl-v1-1-0ac22d47e81f@kernel.org Signed-off-by: Will Deacon Signed-off-by: Ruidong Tian Reviewed-by: Baolin Wang Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/3182 --- tools/testing/selftests/arm64/fp/vec-syscfg.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/arm64/fp/vec-syscfg.c b/tools/testing/selftests/arm64/fp/vec-syscfg.c index 5f648b97a06f..ea9c7d47790f 100644 --- a/tools/testing/selftests/arm64/fp/vec-syscfg.c +++ b/tools/testing/selftests/arm64/fp/vec-syscfg.c @@ -66,6 +66,11 @@ static struct vec_data vec_data[] = { }, }; +static bool vec_type_supported(struct vec_data *data) +{ + return getauxval(data->hwcap_type) & data->hwcap; +} + static int stdio_read_integer(FILE *f, const char *what, int *val) { int n = 0; @@ -564,8 +569,11 @@ static void prctl_set_all_vqs(struct vec_data *data) return; } - for (i = 0; i < ARRAY_SIZE(vec_data); i++) + for (i = 0; i < ARRAY_SIZE(vec_data); i++) { + if (!vec_type_supported(&vec_data[i])) + continue; orig_vls[i] = vec_data[i].rdvl(); + } for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; vq++) { vl = sve_vl_from_vq(vq); @@ -594,7 +602,7 @@ static void prctl_set_all_vqs(struct vec_data *data) if (&vec_data[i] == data) continue; - if (!(getauxval(vec_data[i].hwcap_type) & vec_data[i].hwcap)) + if (!vec_type_supported(&vec_data[i])) continue; if (vec_data[i].rdvl() != orig_vls[i]) { @@ -765,7 +773,7 @@ int main(void) struct vec_data *data = &vec_data[i]; unsigned long supported; - supported = getauxval(data->hwcap_type) & data->hwcap; + supported = vec_type_supported(data); if (!supported) all_supported = false; -- Gitee From 8d7188fd6e198d333d48a09915fcf58248c69d02 Mon Sep 17 00:00:00 2001 From: Juxin Gao Date: Wed, 22 May 2024 11:53:04 +0800 Subject: [PATCH 0911/2138] anolis: LoongArch: Limit min pci msi-x/msi vector number There need limit pci msi-x/msi vector number when request more than 32 vectors on loongarch. ANBZ: #9104 Signed-off-by: Juxin Gao Signed-off-by: Hongchen Zhang Link: https://gitee.com/anolis/cloud-kernel/pulls/3246 --- drivers/pci/msi/msi.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c index c429f9cce441..eeb6c5d0299a 100644 --- a/drivers/pci/msi/msi.c +++ b/drivers/pci/msi/msi.c @@ -430,8 +430,10 @@ int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, int rc; #ifdef CONFIG_LOONGARCH - if (maxvec > 32) + if (maxvec > 32) { maxvec = pci_irq_numbers; + minvec = min_t(int, pci_irq_numbers, minvec); + } #endif if (!pci_msi_supported(dev, minvec) || dev->current_state != PCI_D0) @@ -809,8 +811,10 @@ int __pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int int hwsize, rc, nvec = maxvec; #ifdef CONFIG_LOONGARCH - if (maxvec > 32) + if (maxvec > 32) { nvec = pci_irq_numbers; + minvec = min_t(int, pci_irq_numbers, minvec); + } #endif if (maxvec < minvec) -- Gitee From b01d2659f25c581bd4083923db462ae019f2f4b5 Mon Sep 17 00:00:00 2001 From: Juxin Gao Date: Wed, 22 May 2024 17:00:24 +0800 Subject: [PATCH 0912/2138] anolis: Change vmlinuz.efi to vmlinux for loongarch64 ANBZ: #9129 Signed-off-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3251 --- anolis/rpm/kernel.spec.template | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/anolis/rpm/kernel.spec.template b/anolis/rpm/kernel.spec.template index 2e2a4558dcc9..1f9ab51bd728 100644 --- a/anolis/rpm/kernel.spec.template +++ b/anolis/rpm/kernel.spec.template @@ -142,9 +142,9 @@ %ifarch loongarch64 %define all_arch_configs %{name}-%{version}-loongarch64*.config %define asmarch loongarch -%define make_target vmlinuz.efi +%define make_target vmlinux %define hdrarch loongarch -%define kernel_image arch/loongarch/boot/vmlinuz.efi +%define kernel_image vmlinux %endif # To temporarily exclude an architecture from being built, add it to -- Gitee From 534b316e9e823acb5b25945111856ce826a4db4a Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Fri, 17 May 2024 16:36:58 +0800 Subject: [PATCH 0913/2138] anolis: spec: add grubby config after install kernel ANBZ: #9066 Add grubby config for crashkernel after install kernel Signed-off-by: Jing Zhang Reviewed-by: Qiao Ma Reviewed-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/3213 --- anolis/rpm/kernel.spec.template | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/anolis/rpm/kernel.spec.template b/anolis/rpm/kernel.spec.template index 1f9ab51bd728..89c843aa78c2 100644 --- a/anolis/rpm/kernel.spec.template +++ b/anolis/rpm/kernel.spec.template @@ -1356,6 +1356,11 @@ then\ %{_sbindir}/weak-modules --add-kernel %{KVERREL}%{?1:+%{1}} || exit $?\ fi\ /bin/kernel-install add %{KVERREL}%{?1:+%{1}} /lib/modules/%{KVERREL}%{?1:+%{1}}/vmlinuz || exit $?\ +%ifarch aarch64 \ +grubby --update-kernel /boot/vmlinuz-%{KVERREL}%{?1:+%{1}} --args="cgroup.memory=nokmem crashkernel=0M-2G:0M,2G-64G:256M,64G-:384M iommu.passthrough=1 iommu.strict=0 nospectre_bhb ssbd=force-off"\ +%else \ +grubby --update-kernel /boot/vmlinuz-%{KVERREL}%{?1:+%{1}} --args="cgroup.memory=nokmem crashkernel=0M-2G:0M,2G-8G:192M,8G-:256M"\ +%endif \ %{nil} # -- Gitee From d86c9723536721b42fa18e16183c5bff257ee24e Mon Sep 17 00:00:00 2001 From: Hongchen Zhang Date: Wed, 24 Apr 2024 18:00:22 +0800 Subject: [PATCH 0914/2138] anolis: LoongArch: fix KASLR can not be disabled by nokaslr when boot from old BPI ANBZ: #9175 After this patch,KASLR is really disabled when nokaslr passed from boot parameter for old BPI. Signed-off-by: Hongchen Zhang Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3263 --- arch/loongarch/kernel/legacy_boot.h | 3 +++ arch/loongarch/kernel/relocate.c | 6 +++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/arch/loongarch/kernel/legacy_boot.h b/arch/loongarch/kernel/legacy_boot.h index 982bf9b1de72..104d8c53bd2d 100644 --- a/arch/loongarch/kernel/legacy_boot.h +++ b/arch/loongarch/kernel/legacy_boot.h @@ -3,6 +3,7 @@ #define __LEGACY_BOOT_H_ #include #include +#include #define ADDRESS_TYPE_SYSRAM 1 #define ADDRESS_TYPE_RESERVED 2 #define ADDRESS_TYPE_ACPI 3 @@ -87,4 +88,6 @@ extern int __init pch_msi_parse_madt(union acpi_subtable_headers *header, const unsigned long end); extern struct irq_domain *get_pchpic_irq_domain(void); + +extern __init void fw_init_cmdline(unsigned long argc, unsigned long cmdp); #endif diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/relocate.c index 0eddd4a66b87..14e5bb8e57c3 100644 --- a/arch/loongarch/kernel/relocate.c +++ b/arch/loongarch/kernel/relocate.c @@ -16,6 +16,7 @@ #include #include #include +#include "legacy_boot.h" #define RELOCATED(x) ((void *)((long)x + reloc_offset)) #define RELOCATED_KASLR(x) ((void *)((long)x + random_offset)) @@ -173,7 +174,10 @@ unsigned long __init relocate_kernel(void) void *location_new = _text; /* Default to original kernel start */ char *cmdline = early_memremap_ro(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */ - strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE); + if (fw_arg0 < 2) + strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE); + else + fw_init_cmdline(fw_arg0, TO_CACHE(fw_arg1)); /* OLD BPI parameters */ #ifdef CONFIG_RANDOMIZE_BASE location_new = determine_relocation_address(); -- Gitee From daa5f7f80f7bfb9ff0cd4e4a92fab2119b80413c Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Thu, 9 May 2024 20:21:53 +0800 Subject: [PATCH 0915/2138] fuse: set FR_PENDING atomically in fuse_resend() ANBZ: #9180 commit 42815f8ac54c5113bf450ec4b7ccc5b62af0f6a7 upstream. When fuse_resend() moves the requests from processing lists to pending list, it uses __set_bit() to set FR_PENDING bit in req->flags. Using __set_bit() is not safe, because other functions may update req->flags concurrently (e.g., request_wait_answer() may call set_bit(FR_INTERRUPTED, &flags)). Fix it by using set_bit() instead. Fixes: 760eac73f9f6 ("fuse: Introduce a new notification type for resend pending requests") Signed-off-by: Hou Tao Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3271 --- fs/fuse/dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 3e2acd253ed2..fb0293add851 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -1815,7 +1815,7 @@ static void fuse_resend(struct fuse_conn *fc) spin_unlock(&fc->lock); list_for_each_entry_safe(req, next, &to_queue, list) { - __set_bit(FR_PENDING, &req->flags); + set_bit(FR_PENDING, &req->flags); /* mark the request as resend request */ req->in.h.unique |= FUSE_UNIQUE_RESEND; } -- Gitee From cbc7c3e435eaff2e640273765791eee89f8a47f8 Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Thu, 9 May 2024 20:21:54 +0800 Subject: [PATCH 0916/2138] fuse: clear FR_SENT when re-adding requests into pending list ANBZ: #9180 commit 246014876d782bbf2e652267482cd2e799fb5fcd upstream. The following warning was reported by lee bruce: ------------[ cut here ]------------ WARNING: CPU: 0 PID: 8264 at fs/fuse/dev.c:300 fuse_request_end+0x685/0x7e0 fs/fuse/dev.c:300 Modules linked in: CPU: 0 PID: 8264 Comm: ab2 Not tainted 6.9.0-rc7 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996) RIP: 0010:fuse_request_end+0x685/0x7e0 fs/fuse/dev.c:300 ...... Call Trace: fuse_dev_do_read.constprop.0+0xd36/0x1dd0 fs/fuse/dev.c:1334 fuse_dev_read+0x166/0x200 fs/fuse/dev.c:1367 call_read_iter include/linux/fs.h:2104 [inline] new_sync_read fs/read_write.c:395 [inline] vfs_read+0x85b/0xba0 fs/read_write.c:476 ksys_read+0x12f/0x260 fs/read_write.c:619 do_syscall_x64 arch/x86/entry/common.c:52 [inline] do_syscall_64+0xce/0x260 arch/x86/entry/common.c:83 entry_SYSCALL_64_after_hwframe+0x77/0x7f ...... The warning is due to the FUSE_NOTIFY_RESEND notify sent by the write() syscall in the reproducer program and it happens as follows: (1) calls fuse_dev_read() to read the INIT request The read succeeds. During the read, bit FR_SENT will be set on the request. (2) calls fuse_dev_write() to send an USE_NOTIFY_RESEND notify The resend notify will resend all processing requests, so the INIT request is moved from processing list to pending list again. (3) calls fuse_dev_read() with an invalid output address fuse_dev_read() will try to copy the same INIT request to the output address, but it will fail due to the invalid address, so the INIT request is ended and triggers the warning in fuse_request_end(). Fix it by clearing FR_SENT when re-adding requests into pending list. Acked-by: Miklos Szeredi Reported-by: xingwei lee Reported-by: yue sun Closes: https://lore.kernel.org/linux-fsdevel/58f13e47-4765-fce4-daf4-dffcc5ae2330@huaweicloud.com/T/#m091614e5ea2af403b259e7cea6a49e51b9ee07a7 Fixes: 760eac73f9f6 ("fuse: Introduce a new notification type for resend pending requests") Signed-off-by: Hou Tao Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3271 --- fs/fuse/dev.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index fb0293add851..60f0bd6b0099 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -1816,6 +1816,7 @@ static void fuse_resend(struct fuse_conn *fc) list_for_each_entry_safe(req, next, &to_queue, list) { set_bit(FR_PENDING, &req->flags); + clear_bit(FR_SENT, &req->flags); /* mark the request as resend request */ req->in.h.unique |= FUSE_UNIQUE_RESEND; } -- Gitee From 44735f822b147fe4a990bc681b71f1b4e5b86f58 Mon Sep 17 00:00:00 2001 From: Hongchen Zhang Date: Thu, 23 May 2024 10:15:04 +0800 Subject: [PATCH 0917/2138] anolis: Revert "LoongArch: kdump: Add high memory reservation" ANBZ: #9163 This reverts commit 63601a110e3c57cea9d460d5a1800593c8fdda01 There is no need to reserve extra memory to the crash kernel, as if the kdump test failed, we should increase the size of the crash kernel instead of reserve high memory for user. Signed-off-by: Hongchen Zhang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3268 --- arch/loongarch/kernel/setup.c | 49 +++-------------------------------- 1 file changed, 3 insertions(+), 46 deletions(-) diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 0f07b41f9e61..23b248f24695 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -386,49 +386,13 @@ static void __init bootcmdline_init(char **cmdline_p) * memory area used by the previous production kernel should be reserved to * avoid destroy to the captured data. */ -static void reserve_oldmem_region(int node, unsigned long s0, unsigned long e0) +static void reserve_oldmem_region(void) { #ifdef CONFIG_CRASH_DUMP - unsigned long s1, e1; - if (!is_kdump_kernel()) return; - if ((e0 - s0) > (SZ_1G >> PAGE_SHIFT)) - e0 = e0 - (SZ_512M >> PAGE_SHIFT); - - /* crashmem_start is crashk_res reserved by primary production kernel */ - s1 = PFN_UP(crashmem_start); - e1 = PFN_DOWN(crashmem_start + crashmem_size); - - if (s1 == 0) - return; - - if (node == 0) { - memblock_reserve(PFN_PHYS(s0), (s1 - s0) << PAGE_SHIFT); - memblock_reserve(PFN_PHYS(e1), (e0 - e1) << PAGE_SHIFT); - } else { - memblock_reserve(PFN_PHYS(s0), (e0 - s0) << PAGE_SHIFT); - } -#endif -} - -/* Traditionally, LoongArch's contiguous low memory is 256M, so crashkernel=X@Y is - * unable to be large enough in some cases. Thus, if the total memory of a node - * is more than 1GB, we reserve the top 512MB for the capture kernel - */ -static void reserve_crashm_region(int node, unsigned long s0, unsigned long e0) -{ -#ifdef CONFIG_KEXEC - if (crashk_res.start == crashk_res.end) - return; - - if ((e0 - s0) <= (SZ_1G >> PAGE_SHIFT)) - return; - - s0 = e0 - (SZ_512M >> PAGE_SHIFT); - - memblock_reserve(PFN_PHYS(s0), (e0 - s0) << PAGE_SHIFT); + memblock_cap_memory_range(crashmem_start, crashmem_size); #endif } @@ -471,16 +435,9 @@ static void __init check_kernel_sections_mem(void) */ static void __init arch_mem_init(char **cmdline_p) { - unsigned int node; - unsigned long start_pfn, end_pfn; - arch_reserve_vmcore(); arch_parse_crashkernel(); - for_each_online_node(node) { - get_pfn_range_for_nid(node, &start_pfn, &end_pfn); - reserve_crashm_region(node, start_pfn, end_pfn); - reserve_oldmem_region(node, start_pfn, end_pfn); - } + reserve_oldmem_region(); if (usermem) pr_info("User-defined physical RAM map overwrite\n"); -- Gitee From c5b45b07df3941854f136c615ab6d2aba90986fb Mon Sep 17 00:00:00 2001 From: Hongchen Zhang Date: Thu, 23 May 2024 10:15:05 +0800 Subject: [PATCH 0918/2138] anolis: Revert "LoongArch: Fix kdump failure on v40 interface specification" ANBZ: #9163 This reverts commit c9a5527a4071c59723e99fbfec02ff980352d2f5. This patch is only a work around, and the root cause is efi map table error. Therefore, we fix the problem of efi map table error at the following patch instead of adding this work around. Signed-off-by: Hongchen Zhang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3268 --- arch/loongarch/kernel/setup.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 23b248f24695..7c2622ce69ea 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -398,6 +398,10 @@ static void reserve_oldmem_region(void) void __init platform_init(void) { + arch_reserve_vmcore(); + arch_parse_crashkernel(); + reserve_oldmem_region(); + #ifdef CONFIG_ACPI_TABLE_UPGRADE acpi_table_upgrade(); #endif @@ -435,10 +439,6 @@ static void __init check_kernel_sections_mem(void) */ static void __init arch_mem_init(char **cmdline_p) { - arch_reserve_vmcore(); - arch_parse_crashkernel(); - reserve_oldmem_region(); - if (usermem) pr_info("User-defined physical RAM map overwrite\n"); -- Gitee From 0d535da1f74dca48fb3a82e99ef0ef98f2f9075a Mon Sep 17 00:00:00 2001 From: Hongchen Zhang Date: Thu, 23 May 2024 10:15:06 +0800 Subject: [PATCH 0919/2138] anolis: Revert "LoongArch: kdump: Add memory reservation for old kernel" ANBZ: #9163 This reverts commit beda9aa1f7bba0f0a68d64565c1b97917da3e3d6. In the current state, elfcorehdr will not be destroyed, because it is allocated at the memory reserved for crash kernel, so this patch is not required. Signed-off-by: Hongchen Zhang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3268 --- arch/loongarch/kernel/setup.c | 45 ++++++++--------------------------- 1 file changed, 10 insertions(+), 35 deletions(-) diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 7c2622ce69ea..83d8e7662b06 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -72,8 +72,6 @@ EXPORT_SYMBOL(cpu_data); struct loongson_board_info b_info; static const char dmi_empty_string[] = " "; -static phys_addr_t crashmem_start, crashmem_size; - /* * Setup information * @@ -211,15 +209,6 @@ static int __init early_parse_mem(char *p) return -EINVAL; } - start = 0; - size = memparse(p, &p); - if (*p == '@') - start = memparse(p + 1, &p); - else { - pr_err("Invalid format!\n"); - return -EINVAL; - } - /* * If a user specifies memory size, we * blow away any automatically generated @@ -227,14 +216,16 @@ static int __init early_parse_mem(char *p) */ if (usermem == 0) { usermem = 1; - if (!strstr(boot_command_line, "elfcorehdr")) { - memblock_remove(memblock_start_of_DRAM(), - memblock_end_of_DRAM() - memblock_start_of_DRAM()); - } else { - crashmem_start = start; - crashmem_size = size; - return 0; - } + memblock_remove(memblock_start_of_DRAM(), + memblock_end_of_DRAM() - memblock_start_of_DRAM()); + } + start = 0; + size = memparse(p, &p); + if (*p == '@') + start = memparse(p + 1, &p); + else { + pr_err("Invalid format!\n"); + return -EINVAL; } if (!IS_ENABLED(CONFIG_NUMA)) @@ -381,26 +372,10 @@ static void __init bootcmdline_init(char **cmdline_p) *cmdline_p = boot_command_line; } -/* - * After the kdump operation is performed to enter the capture kernel, the - * memory area used by the previous production kernel should be reserved to - * avoid destroy to the captured data. - */ -static void reserve_oldmem_region(void) -{ -#ifdef CONFIG_CRASH_DUMP - if (!is_kdump_kernel()) - return; - - memblock_cap_memory_range(crashmem_start, crashmem_size); -#endif -} - void __init platform_init(void) { arch_reserve_vmcore(); arch_parse_crashkernel(); - reserve_oldmem_region(); #ifdef CONFIG_ACPI_TABLE_UPGRADE acpi_table_upgrade(); -- Gitee From 3f5ad90d69a9f1fe7fd7ca2779c001bebc3283ff Mon Sep 17 00:00:00 2001 From: Hongchen Zhang Date: Thu, 23 May 2024 10:15:07 +0800 Subject: [PATCH 0920/2138] anolis: LoongArch: fix efi map page table error ANBZ: #9163 There should map all efi used memory, not the kernel usable memory. Signed-off-by: Yun Liu Signed-off-by: Hongchen Zhang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3268 --- arch/loongarch/kernel/efi.c | 39 +++++++++++++++++++++++++++++-------- 1 file changed, 31 insertions(+), 8 deletions(-) diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c index c0fad2d75460..459583c985be 100644 --- a/arch/loongarch/kernel/efi.c +++ b/arch/loongarch/kernel/efi.c @@ -69,8 +69,10 @@ static int __init efimap_populate_hugepages( if (pud_none(*pud)) { void *p = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); - if (!p) + if (!p) { + pr_err("can not alloc efimap huge pages!\n"); return -1; + } pmd_init(p); pud_populate(&init_mm, pud, p); } @@ -88,7 +90,8 @@ static void __init efi_map_pgt(void) { unsigned long node; unsigned long start, end; - unsigned long start_pfn, end_pfn; + efi_memory_desc_t *md; + u32 mem_type; pgd_efi = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); if (!pgd_efi) { @@ -105,13 +108,33 @@ static void __init efi_map_pgt(void) /* MMIO Registers, Uncached */ efimap_populate_hugepages(SZ_256M | (node << 44), SZ_512M | (node << 44), PAGE_KERNEL_SUC); + } - get_pfn_range_for_nid(node, &start_pfn, &end_pfn); - start = ALIGN_DOWN(start_pfn << PAGE_SHIFT, PMD_SIZE); - end = ALIGN(end_pfn << PAGE_SHIFT, PMD_SIZE); - - /* System memory, Cached */ - efimap_populate_hugepages(node ? start : SZ_512M, end, PAGE_KERNEL); + /* Parse memory information */ + for_each_efi_memory_desc(md) { + mem_type = md->type; + start = ALIGN_DOWN(md->phys_addr, PMD_SIZE); + end = ALIGN(start + (md->num_pages << EFI_PAGE_SHIFT), PMD_SIZE); + node = start >> 44; + + switch (mem_type) { + case EFI_LOADER_CODE: + case EFI_LOADER_DATA: + case EFI_BOOT_SERVICES_CODE: + case EFI_BOOT_SERVICES_DATA: + case EFI_PAL_CODE: + case EFI_UNUSABLE_MEMORY: + case EFI_ACPI_RECLAIM_MEMORY: + case EFI_RESERVED_TYPE: + case EFI_RUNTIME_SERVICES_CODE: + case EFI_RUNTIME_SERVICES_DATA: + efimap_populate_hugepages(node ? start : SZ_512M, end, PAGE_KERNEL); + break; + case EFI_MEMORY_MAPPED_IO: + case EFI_MEMORY_MAPPED_IO_PORT_SPACE: + efimap_populate_hugepages(node ? start : SZ_512M, end, PAGE_KERNEL_SUC); + break; + } } } -- Gitee From 4a10f1ed2b544e5bc8031e5aba8a55beba618e9d Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Mon, 16 Oct 2023 16:42:53 +0100 Subject: [PATCH 0921/2138] net: phylink: provide mac_get_caps() method ANBZ: #9164 commit b6f9774719e5601b32f47021b40fee446b356490 upstream. Provide a new method, mac_get_caps() to get the MAC capabilities for the specified interface mode. This is for MACs which have special requirements, such as not supporting half-duplex in certain interface modes, and will replace the validate() method. Signed-off-by: Russell King (Oracle) Link: https://lore.kernel.org/r/E1qsPk5-009wiX-G5@rmk-PC.armlinux.org.uk Signed-off-by: Jakub Kicinski Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3269 --- Documentation/networking/sfp-phylink.rst | 7 +++++++ drivers/net/phy/phylink.c | 14 +++++++++++--- include/linux/phylink.h | 15 +++++++++++++++ 3 files changed, 33 insertions(+), 3 deletions(-) diff --git a/Documentation/networking/sfp-phylink.rst b/Documentation/networking/sfp-phylink.rst index 55b65f607a64..b069d34d7f5c 100644 --- a/Documentation/networking/sfp-phylink.rst +++ b/Documentation/networking/sfp-phylink.rst @@ -200,6 +200,13 @@ this documentation. when the in-band link state changes - otherwise the link will never come up. + The :c:func:`mac_get_caps` method is optional, and if provided should + return the phylink MAC capabilities that are supported for the passed + ``interface`` mode. In general, there is no need to implement this method. + Phylink will use these capabilities in combination with permissible + capabilities for ``interface`` to determine the allowable ethtool link + modes. + The :c:func:`validate` method should mask the supplied supported mask, and ``state->advertising`` with the supported ethtool link modes. These are the new ethtool link modes, so bitmask operations must be diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index b5f012619e42..00e6a5723230 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -657,6 +657,7 @@ static int phylink_validate_mac_and_pcs(struct phylink *pl, unsigned long *supported, struct phylink_link_state *state) { + unsigned long capabilities; struct phylink_pcs *pcs; int ret; @@ -696,10 +697,17 @@ static int phylink_validate_mac_and_pcs(struct phylink *pl, } /* Then validate the link parameters with the MAC */ - if (pl->mac_ops->validate) + if (pl->mac_ops->validate) { pl->mac_ops->validate(pl->config, supported, state); - else - phylink_generic_validate(pl->config, supported, state); + } else { + if (pl->mac_ops->mac_get_caps) + capabilities = pl->mac_ops->mac_get_caps(pl->config, + state->interface); + else + capabilities = pl->config->mac_capabilities; + + phylink_validate_mask_caps(supported, state, capabilities); + } return phylink_is_empty_linkmode(supported) ? -EINVAL : 0; } diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 2b886ea654bb..0798198a09ef 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -228,6 +228,7 @@ void phylink_limit_mac_speed(struct phylink_config *config, u32 max_speed); /** * struct phylink_mac_ops - MAC operations structure. * @validate: Validate and update the link configuration. + * @mac_get_caps: Get MAC capabilities for interface mode. * @mac_select_pcs: Select a PCS for the interface mode. * @mac_prepare: prepare for a major reconfiguration of the interface. * @mac_config: configure the MAC for the selected mode and state. @@ -241,6 +242,8 @@ struct phylink_mac_ops { void (*validate)(struct phylink_config *config, unsigned long *supported, struct phylink_link_state *state); + unsigned long (*mac_get_caps)(struct phylink_config *config, + phy_interface_t interface); struct phylink_pcs *(*mac_select_pcs)(struct phylink_config *config, phy_interface_t interface); int (*mac_prepare)(struct phylink_config *config, unsigned int mode, @@ -292,6 +295,18 @@ struct phylink_mac_ops { */ void validate(struct phylink_config *config, unsigned long *supported, struct phylink_link_state *state); +/** + * mac_get_caps: Get MAC capabilities for interface mode. + * @config: a pointer to a &struct phylink_config. + * @interface: PHY interface mode. + * + * Optional method. When not provided, config->mac_capabilities will be used. + * When implemented, this returns the MAC capabilities for the specified + * interface mode where there is some special handling required by the MAC + * driver (e.g. not supporting half-duplex in certain interface modes.) + */ +unsigned long mac_get_caps(struct phylink_config *config, + phy_interface_t interface); /** * mac_select_pcs: Select a PCS for the interface mode. * @config: a pointer to a &struct phylink_config. -- Gitee From eea1f7e4a6dc09e85dafbe7dcd9c18e6e5742bc8 Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Fri, 19 Apr 2024 12:03:05 +0300 Subject: [PATCH 0922/2138] net: stmmac: Rename phylink_get_caps() callback to update_caps() ANBZ: #9164 commit dc144baeb4fbfa0d91ce9c3875307566f58704ec upstream. Since recent commits the stmmac_ops::phylink_get_caps() callback has no longer been responsible for the phylink MAC capabilities getting, but merely updates the MAC capabilities in the mac_device_info::link::caps field. Rename the callback to comply with the what the method does now. Signed-off-by: Serge Semin Reviewed-by: Romain Gantois Signed-off-by: Paolo Abeni Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3269 --- drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c | 8 ++++---- drivers/net/ethernet/stmicro/stmmac/hwif.h | 8 ++++---- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 6 +++--- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index a9837985a483..bdb4f527289d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -69,7 +69,7 @@ static void dwmac4_core_init(struct mac_device_info *hw, init_waitqueue_head(&priv->tstamp_busy_wait); } -static void dwmac4_phylink_get_caps(struct stmmac_priv *priv) +static void dwmac4_update_caps(struct stmmac_priv *priv) { if (priv->plat->tx_queues_to_use > 1) priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD | MAC_1000HD); @@ -1161,7 +1161,7 @@ static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no, const struct stmmac_ops dwmac4_ops = { .core_init = dwmac4_core_init, - .phylink_get_caps = dwmac4_phylink_get_caps, + .update_caps = dwmac4_update_caps, .set_mac = stmmac_set_mac, .rx_ipc = dwmac4_rx_ipc_enable, .rx_queue_enable = dwmac4_rx_queue_enable, @@ -1204,7 +1204,7 @@ const struct stmmac_ops dwmac4_ops = { const struct stmmac_ops dwmac410_ops = { .core_init = dwmac4_core_init, - .phylink_get_caps = dwmac4_phylink_get_caps, + .update_caps = dwmac4_update_caps, .set_mac = stmmac_dwmac4_set_mac, .rx_ipc = dwmac4_rx_ipc_enable, .rx_queue_enable = dwmac4_rx_queue_enable, @@ -1253,7 +1253,7 @@ const struct stmmac_ops dwmac410_ops = { const struct stmmac_ops dwmac510_ops = { .core_init = dwmac4_core_init, - .phylink_get_caps = dwmac4_phylink_get_caps, + .update_caps = dwmac4_update_caps, .set_mac = stmmac_dwmac4_set_mac, .rx_ipc = dwmac4_rx_ipc_enable, .rx_queue_enable = dwmac4_rx_queue_enable, diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 47fb8e1646c2..ee9a7d98648b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -300,8 +300,8 @@ struct stmmac_est; struct stmmac_ops { /* MAC core initialization */ void (*core_init)(struct mac_device_info *hw, struct net_device *dev); - /* Get phylink capabilities */ - void (*phylink_get_caps)(struct stmmac_priv *priv); + /* Update MAC capabilities */ + void (*update_caps)(struct stmmac_priv *priv); /* Enable the MAC RX/TX */ void (*set_mac)(void __iomem *ioaddr, bool enable); /* Enable and verify that the IPC module is supported */ @@ -423,8 +423,8 @@ struct stmmac_ops { #define stmmac_core_init(__priv, __args...) \ stmmac_do_void_callback(__priv, mac, core_init, __args) -#define stmmac_mac_phylink_get_caps(__priv) \ - stmmac_do_void_callback(__priv, mac, phylink_get_caps, __priv) +#define stmmac_mac_update_caps(__priv) \ + stmmac_do_void_callback(__priv, mac, update_caps, __priv) #define stmmac_mac_set(__priv, __args...) \ stmmac_do_void_callback(__priv, mac, set_mac, __args) #define stmmac_rx_ipc(__priv, __args...) \ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index f649d1bd25c5..7f4425aba675 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1225,8 +1225,8 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) xpcs_get_interfaces(priv->hw->xpcs, priv->phylink_config.supported_interfaces); - /* Get the MAC specific capabilities */ - stmmac_mac_phylink_get_caps(priv); + /* Refresh the MAC-specific capabilities */ + stmmac_mac_update_caps(priv); priv->phylink_config.mac_capabilities = priv->hw->link.caps; @@ -7208,7 +7208,7 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt) priv->rss.table[i] = ethtool_rxfh_indir_default(i, rx_cnt); - stmmac_mac_phylink_get_caps(priv); + stmmac_mac_update_caps(priv); priv->phylink_config.mac_capabilities = priv->hw->link.caps; -- Gitee From a7c177e8e811a637a2815bf9bbb9464d69a4ea43 Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Fri, 19 Apr 2024 12:03:06 +0300 Subject: [PATCH 0923/2138] net: stmmac: Move MAC caps init to phylink MAC caps getter ANBZ: #9164 commit f951a64922a8576975673c42985a89a798e19b4e upstream. After a set of recent fixes the stmmac_phy_setup() and stmmac_reinit_queues() methods have turned to having some duplicated code. Let's get rid from the duplication by moving the MAC-capabilities initialization to the PHYLINK MAC-capabilities getter. The getter is called during each network device interface open/close cycle. So the MAC-capabilities will be initialized in generic device open procedure and in case of the Tx/Rx queues re-initialization as the original code semantics implies. Signed-off-by: Serge Semin Reviewed-by: Romain Gantois Signed-off-by: Paolo Abeni Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3269 --- .../net/ethernet/stmicro/stmmac/stmmac_main.c | 36 +++++++++---------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 7f4425aba675..78a8a3fccb31 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -936,6 +936,22 @@ static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) priv->pause, tx_cnt); } +static unsigned long stmmac_mac_get_caps(struct phylink_config *config, + phy_interface_t interface) +{ + struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); + + /* Refresh the MAC-specific capabilities */ + stmmac_mac_update_caps(priv); + + config->mac_capabilities = priv->hw->link.caps; + + if (priv->plat->max_speed) + phylink_limit_mac_speed(config, priv->plat->max_speed); + + return config->mac_capabilities; +} + static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config, phy_interface_t interface) { @@ -1105,6 +1121,7 @@ static void stmmac_mac_link_up(struct phylink_config *config, } static const struct phylink_mac_ops stmmac_phylink_mac_ops = { + .mac_get_caps = stmmac_mac_get_caps, .mac_select_pcs = stmmac_mac_select_pcs, .mac_config = stmmac_mac_config, .mac_link_down = stmmac_mac_link_down, @@ -1204,7 +1221,6 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) int mode = priv->plat->phy_interface; struct fwnode_handle *fwnode; struct phylink *phylink; - int max_speed; priv->phylink_config.dev = &priv->dev->dev; priv->phylink_config.type = PHYLINK_NETDEV; @@ -1225,15 +1241,6 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) xpcs_get_interfaces(priv->hw->xpcs, priv->phylink_config.supported_interfaces); - /* Refresh the MAC-specific capabilities */ - stmmac_mac_update_caps(priv); - - priv->phylink_config.mac_capabilities = priv->hw->link.caps; - - max_speed = priv->plat->max_speed; - if (max_speed) - phylink_limit_mac_speed(&priv->phylink_config, max_speed); - fwnode = priv->plat->port_node; if (!fwnode) fwnode = dev_fwnode(priv->device); @@ -7194,7 +7201,6 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt) { struct stmmac_priv *priv = netdev_priv(dev); int ret = 0, i; - int max_speed; if (netif_running(dev)) stmmac_release(dev); @@ -7208,14 +7214,6 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt) priv->rss.table[i] = ethtool_rxfh_indir_default(i, rx_cnt); - stmmac_mac_update_caps(priv); - - priv->phylink_config.mac_capabilities = priv->hw->link.caps; - - max_speed = priv->plat->max_speed; - if (max_speed) - phylink_limit_mac_speed(&priv->phylink_config, max_speed); - stmmac_napi_add(dev); if (netif_running(dev)) -- Gitee From 0e98544f84ae93bd51c42765234096bea81db46d Mon Sep 17 00:00:00 2001 From: Yanteng Si Date: Thu, 25 Apr 2024 21:01:54 +0800 Subject: [PATCH 0924/2138] anolis: net: stmmac: Move the atds flag to the stmmac_dma_cfg structure ANBZ: #9164 Alternate Descriptor Size (ATDS) is a part of the DMA-configs together with the PBL, ALL, AEME, etc so the structure is the most suitable place for it. Signed-off-by: Feiyang Chen Signed-off-by: Yinggang Gu Signed-off-by: Yanteng Si Reviewed-by: Serge Semin Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3269 --- drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c | 2 +- drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c | 4 ++-- drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c | 2 +- drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c | 2 +- drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c | 2 +- drivers/net/ethernet/stmicro/stmmac/hwif.h | 3 +-- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 5 ++--- include/linux/stmmac.h | 1 + 8 files changed, 10 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 63998d65fef8..031a83678c71 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -299,7 +299,7 @@ static int sun8i_dwmac_dma_reset(void __iomem *ioaddr) * Called from stmmac via stmmac_dma_ops->init */ static void sun8i_dwmac_dma_init(void __iomem *ioaddr, - struct stmmac_dma_cfg *dma_cfg, int atds) + struct stmmac_dma_cfg *dma_cfg) { writel(EMAC_RX_INT | EMAC_TX_INT, ioaddr + EMAC_INT_EN); writel(0x1FFFFFF, ioaddr + EMAC_INT_STA); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index daf79cdbd3ec..bb82ee9b855f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -71,7 +71,7 @@ static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) } static void dwmac1000_dma_init(void __iomem *ioaddr, - struct stmmac_dma_cfg *dma_cfg, int atds) + struct stmmac_dma_cfg *dma_cfg) { u32 value = readl(ioaddr + DMA_BUS_MODE); int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; @@ -98,7 +98,7 @@ static void dwmac1000_dma_init(void __iomem *ioaddr, if (dma_cfg->mixed_burst) value |= DMA_BUS_MODE_MB; - if (atds) + if (dma_cfg->atds) value |= DMA_BUS_MODE_ATDS; if (dma_cfg->aal) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c index dea270f60cc3..f861babc06f9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c @@ -19,7 +19,7 @@ #include "dwmac_dma.h" static void dwmac100_dma_init(void __iomem *ioaddr, - struct stmmac_dma_cfg *dma_cfg, int atds) + struct stmmac_dma_cfg *dma_cfg) { /* Enable Application Access by writing to DMA CSR0 */ writel(DMA_BUS_MODE_DEFAULT | (dma_cfg->pbl << DMA_BUS_MODE_PBL_SHIFT), diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index 7805a66a0bc0..22a044d93e17 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -153,7 +153,7 @@ static void dwmac410_dma_init_channel(struct stmmac_priv *priv, } static void dwmac4_dma_init(void __iomem *ioaddr, - struct stmmac_dma_cfg *dma_cfg, int atds) + struct stmmac_dma_cfg *dma_cfg) { u32 value = readl(ioaddr + DMA_SYS_BUS_MODE); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index dd2ab6185c40..7840bc403788 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -20,7 +20,7 @@ static int dwxgmac2_dma_reset(void __iomem *ioaddr) } static void dwxgmac2_dma_init(void __iomem *ioaddr, - struct stmmac_dma_cfg *dma_cfg, int atds) + struct stmmac_dma_cfg *dma_cfg) { u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE); diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index ee9a7d98648b..9e7d30e2c59e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -167,8 +167,7 @@ struct dma_features; struct stmmac_dma_ops { /* DMA core initialization */ int (*reset)(void __iomem *ioaddr); - void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg, - int atds); + void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg); void (*init_chan)(struct stmmac_priv *priv, void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg, u32 chan); void (*init_rx_chan)(struct stmmac_priv *priv, void __iomem *ioaddr, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 78a8a3fccb31..1d2f3e214f97 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2944,7 +2944,6 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) struct stmmac_rx_queue *rx_q; struct stmmac_tx_queue *tx_q; u32 chan = 0; - int atds = 0; int ret = 0; if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { @@ -2953,7 +2952,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) } if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) - atds = 1; + priv->plat->dma_cfg->atds = 1; ret = stmmac_reset(priv, priv->ioaddr); if (ret) { @@ -2962,7 +2961,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) } /* DMA Configuration */ - stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds); + stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg); if (priv->plat->axi) stmmac_axi(priv, priv->ioaddr, priv->plat->axi); diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 42ff5a4de8ee..89f92847edd4 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -100,6 +100,7 @@ struct stmmac_dma_cfg { bool eame; bool multi_msi_en; bool dche; + bool atds; }; #define AXI_BLEN 7 -- Gitee From 1d7247a4436077f200fe187913890950a8c3f306 Mon Sep 17 00:00:00 2001 From: Yanteng Si Date: Thu, 25 Apr 2024 21:01:55 +0800 Subject: [PATCH 0925/2138] anolis: net: stmmac: Add multi-channel support ANBZ: #9164 DW GMAC v3.x multi-channels feature is implemented as multiple sets of the same CSRs. Here is only preliminary support, it will be useful for the driver further evolution and for the users having multi-channel DWGMAC v3.x devices. Signed-off-by: Feiyang Chen Signed-off-by: Yinggang Gu Signed-off-by: Yanteng Si Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3269 --- .../net/ethernet/stmicro/stmmac/dwmac-sun8i.c | 2 +- .../ethernet/stmicro/stmmac/dwmac1000_dma.c | 32 ++++++++++--------- .../net/ethernet/stmicro/stmmac/dwmac_dma.h | 20 ++++++++++-- .../net/ethernet/stmicro/stmmac/dwmac_lib.c | 30 ++++++++--------- drivers/net/ethernet/stmicro/stmmac/hwif.h | 2 +- .../net/ethernet/stmicro/stmmac/stmmac_main.c | 6 ++-- 6 files changed, 55 insertions(+), 37 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 031a83678c71..1fa6406f2dd4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -395,7 +395,7 @@ static void sun8i_dwmac_dma_start_tx(struct stmmac_priv *priv, writel(v, ioaddr + EMAC_TX_CTL1); } -static void sun8i_dwmac_enable_dma_transmission(void __iomem *ioaddr) +static void sun8i_dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan) { u32 v; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index bb82ee9b855f..f161ec9ac490 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -70,15 +70,17 @@ static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) writel(value, ioaddr + DMA_AXI_BUS_MODE); } -static void dwmac1000_dma_init(void __iomem *ioaddr, - struct stmmac_dma_cfg *dma_cfg) +static void dwmac1000_dma_init_channel(struct stmmac_priv *priv, + void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, u32 chan) { - u32 value = readl(ioaddr + DMA_BUS_MODE); int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl; + u32 value; - /* - * Set the DMA PBL (Programmable Burst Length) mode. + value = readl(ioaddr + DMA_CHAN_BUS_MODE(chan)); + + /* Set the DMA PBL (Programmable Burst Length) mode. * * Note: before stmmac core 3.50 this mode bit was 4xPBL, and * post 3.5 mode bit acts as 8*PBL. @@ -104,10 +106,10 @@ static void dwmac1000_dma_init(void __iomem *ioaddr, if (dma_cfg->aal) value |= DMA_BUS_MODE_AAL; - writel(value, ioaddr + DMA_BUS_MODE); + writel(value, ioaddr + DMA_CHAN_BUS_MODE(chan)); /* Mask interrupts by writing to CSR7 */ - writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); + writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_CHAN_INTR_ENA(chan)); } static void dwmac1000_dma_init_rx(struct stmmac_priv *priv, @@ -116,7 +118,7 @@ static void dwmac1000_dma_init_rx(struct stmmac_priv *priv, dma_addr_t dma_rx_phy, u32 chan) { /* RX descriptor base address list must be written into DMA CSR3 */ - writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_RCV_BASE_ADDR); + writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_CHAN_RCV_BASE_ADDR(chan)); } static void dwmac1000_dma_init_tx(struct stmmac_priv *priv, @@ -125,7 +127,7 @@ static void dwmac1000_dma_init_tx(struct stmmac_priv *priv, dma_addr_t dma_tx_phy, u32 chan) { /* TX descriptor base address list must be written into DMA CSR4 */ - writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_TX_BASE_ADDR); + writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_CHAN_TX_BASE_ADDR(chan)); } static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz) @@ -153,7 +155,7 @@ static void dwmac1000_dma_operation_mode_rx(struct stmmac_priv *priv, void __iomem *ioaddr, int mode, u32 channel, int fifosz, u8 qmode) { - u32 csr6 = readl(ioaddr + DMA_CONTROL); + u32 csr6 = readl(ioaddr + DMA_CHAN_CONTROL(channel)); if (mode == SF_DMA_MODE) { pr_debug("GMAC: enable RX store and forward mode\n"); @@ -175,14 +177,14 @@ static void dwmac1000_dma_operation_mode_rx(struct stmmac_priv *priv, /* Configure flow control based on rx fifo size */ csr6 = dwmac1000_configure_fc(csr6, fifosz); - writel(csr6, ioaddr + DMA_CONTROL); + writel(csr6, ioaddr + DMA_CHAN_CONTROL(channel)); } static void dwmac1000_dma_operation_mode_tx(struct stmmac_priv *priv, void __iomem *ioaddr, int mode, u32 channel, int fifosz, u8 qmode) { - u32 csr6 = readl(ioaddr + DMA_CONTROL); + u32 csr6 = readl(ioaddr + DMA_CHAN_CONTROL(channel)); if (mode == SF_DMA_MODE) { pr_debug("GMAC: enable TX store and forward mode\n"); @@ -209,7 +211,7 @@ static void dwmac1000_dma_operation_mode_tx(struct stmmac_priv *priv, csr6 |= DMA_CONTROL_TTC_256; } - writel(csr6, ioaddr + DMA_CONTROL); + writel(csr6, ioaddr + DMA_CHAN_CONTROL(channel)); } static void dwmac1000_dump_dma_regs(struct stmmac_priv *priv, @@ -271,12 +273,12 @@ static int dwmac1000_get_hw_feature(void __iomem *ioaddr, static void dwmac1000_rx_watchdog(struct stmmac_priv *priv, void __iomem *ioaddr, u32 riwt, u32 queue) { - writel(riwt, ioaddr + DMA_RX_WATCHDOG); + writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(queue)); } const struct stmmac_dma_ops dwmac1000_dma_ops = { .reset = dwmac_dma_reset, - .init = dwmac1000_dma_init, + .init_chan = dwmac1000_dma_init_channel, .init_rx_chan = dwmac1000_dma_init_rx, .init_tx_chan = dwmac1000_dma_init_tx, .axi = dwmac1000_dma_axi, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h index 72672391675f..363a85469594 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h @@ -22,6 +22,23 @@ #define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */ #define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */ +/* Following DMA defines are channels oriented */ +#define DMA_CHAN_BASE_OFFSET 0x100 + +static inline u32 dma_chan_base_addr(u32 base, u32 chan) +{ + return base + chan * DMA_CHAN_BASE_OFFSET; +} + +#define DMA_CHAN_XMT_POLL_DEMAND(chan) dma_chan_base_addr(DMA_XMT_POLL_DEMAND, chan) +#define DMA_CHAN_INTR_ENA(chan) dma_chan_base_addr(DMA_INTR_ENA, chan) +#define DMA_CHAN_CONTROL(chan) dma_chan_base_addr(DMA_CONTROL, chan) +#define DMA_CHAN_STATUS(chan) dma_chan_base_addr(DMA_STATUS, chan) +#define DMA_CHAN_BUS_MODE(chan) dma_chan_base_addr(DMA_BUS_MODE, chan) +#define DMA_CHAN_RCV_BASE_ADDR(chan) dma_chan_base_addr(DMA_RCV_BASE_ADDR, chan) +#define DMA_CHAN_TX_BASE_ADDR(chan) dma_chan_base_addr(DMA_TX_BASE_ADDR, chan) +#define DMA_CHAN_RX_WATCHDOG(chan) dma_chan_base_addr(DMA_RX_WATCHDOG, chan) + /* SW Reset */ #define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */ @@ -152,7 +169,7 @@ #define NUM_DWMAC1000_DMA_REGS 23 #define NUM_DWMAC4_DMA_REGS 27 -void dwmac_enable_dma_transmission(void __iomem *ioaddr); +void dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan); void dwmac_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan, bool rx, bool tx); void dwmac_disable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr, @@ -168,5 +185,4 @@ void dwmac_dma_stop_rx(struct stmmac_priv *priv, void __iomem *ioaddr, int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr, struct stmmac_extra_stats *x, u32 chan, u32 dir); int dwmac_dma_reset(void __iomem *ioaddr); - #endif /* __DWMAC_DMA_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c index 85e18f9a22f9..4846bf49c576 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c @@ -28,65 +28,65 @@ int dwmac_dma_reset(void __iomem *ioaddr) } /* CSR1 enables the transmit DMA to check for new descriptor */ -void dwmac_enable_dma_transmission(void __iomem *ioaddr) +void dwmac_enable_dma_transmission(void __iomem *ioaddr, u32 chan) { - writel(1, ioaddr + DMA_XMT_POLL_DEMAND); + writel(1, ioaddr + DMA_CHAN_XMT_POLL_DEMAND(chan)); } void dwmac_enable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan, bool rx, bool tx) { - u32 value = readl(ioaddr + DMA_INTR_ENA); + u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan)); if (rx) value |= DMA_INTR_DEFAULT_RX; if (tx) value |= DMA_INTR_DEFAULT_TX; - writel(value, ioaddr + DMA_INTR_ENA); + writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan)); } void dwmac_disable_dma_irq(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan, bool rx, bool tx) { - u32 value = readl(ioaddr + DMA_INTR_ENA); + u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan)); if (rx) value &= ~DMA_INTR_DEFAULT_RX; if (tx) value &= ~DMA_INTR_DEFAULT_TX; - writel(value, ioaddr + DMA_INTR_ENA); + writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan)); } void dwmac_dma_start_tx(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan) { - u32 value = readl(ioaddr + DMA_CONTROL); + u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); value |= DMA_CONTROL_ST; - writel(value, ioaddr + DMA_CONTROL); + writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); } void dwmac_dma_stop_tx(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan) { - u32 value = readl(ioaddr + DMA_CONTROL); + u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); value &= ~DMA_CONTROL_ST; - writel(value, ioaddr + DMA_CONTROL); + writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); } void dwmac_dma_start_rx(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan) { - u32 value = readl(ioaddr + DMA_CONTROL); + u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); value |= DMA_CONTROL_SR; - writel(value, ioaddr + DMA_CONTROL); + writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); } void dwmac_dma_stop_rx(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan) { - u32 value = readl(ioaddr + DMA_CONTROL); + u32 value = readl(ioaddr + DMA_CHAN_CONTROL(chan)); value &= ~DMA_CONTROL_SR; - writel(value, ioaddr + DMA_CONTROL); + writel(value, ioaddr + DMA_CHAN_CONTROL(chan)); } #ifdef DWMAC_DMA_DEBUG @@ -165,7 +165,7 @@ int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr, struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats); int ret = 0; /* read the status register (CSR5) */ - u32 intr_status = readl(ioaddr + DMA_STATUS); + u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan)); #ifdef DWMAC_DMA_DEBUG /* Enable it to monitor DMA rx/tx status in case of critical problems */ diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 9e7d30e2c59e..eb4003ca7f5b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -189,7 +189,7 @@ struct stmmac_dma_ops { /* To track extra statistic (if supported) */ void (*dma_diagnostic_fr)(struct stmmac_extra_stats *x, void __iomem *ioaddr); - void (*enable_dma_transmission) (void __iomem *ioaddr); + void (*enable_dma_transmission)(void __iomem *ioaddr, u32 chan); void (*enable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr, u32 chan, bool rx, bool tx); void (*disable_dma_irq)(struct stmmac_priv *priv, void __iomem *ioaddr, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 1d2f3e214f97..80acff8b69e9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2512,7 +2512,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) true, priv->mode, true, true, xdp_desc.len); - stmmac_enable_dma_transmission(priv, priv->ioaddr); + stmmac_enable_dma_transmission(priv, priv->ioaddr, queue); tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); entry = tx_q->cur_tx; @@ -4644,7 +4644,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); - stmmac_enable_dma_transmission(priv, priv->ioaddr); + stmmac_enable_dma_transmission(priv, priv->ioaddr, queue); stmmac_flush_tx_descriptors(priv, queue); stmmac_tx_timer_arm(priv, queue); @@ -4863,7 +4863,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, u64_stats_update_end(&txq_stats->q_syncp); } - stmmac_enable_dma_transmission(priv, priv->ioaddr); + stmmac_enable_dma_transmission(priv, priv->ioaddr, queue); entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); tx_q->cur_tx = entry; -- Gitee From ef7ef9806986c14b1a69ac77753625a8a08f512e Mon Sep 17 00:00:00 2001 From: Yanteng Si Date: Thu, 25 Apr 2024 21:01:56 +0800 Subject: [PATCH 0926/2138] anolis: net: stmmac: Export dwmac1000_dma_ops ANBZ: #9164 The loongson gnet will call it in the future. Signed-off-by: Feiyang Chen Signed-off-by: Yinggang Gu Signed-off-by: Yanteng Si Reviewed-by: Serge Semin Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3269 --- drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index f161ec9ac490..66c0c22908b1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -296,3 +296,4 @@ const struct stmmac_dma_ops dwmac1000_dma_ops = { .get_hw_feature = dwmac1000_get_hw_feature, .rx_watchdog = dwmac1000_rx_watchdog, }; +EXPORT_SYMBOL_GPL(dwmac1000_dma_ops); -- Gitee From 82715e4ec00f9264ff0ba1c5267ee292202f1699 Mon Sep 17 00:00:00 2001 From: Yanteng Si Date: Thu, 25 Apr 2024 21:04:35 +0800 Subject: [PATCH 0927/2138] anolis: net: stmmac: dwmac-loongson: Drop useless platform data ANBZ: #9164 The multicast_filter_bins is initialized twice, it should be 256, let's drop the first useless assignment. Signed-off-by: Feiyang Chen Signed-off-by: Yinggang Gu Signed-off-by: Yanteng Si Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3269 --- drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index ee3604f58def..ae09d172d9c6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -15,9 +15,6 @@ static int loongson_default_data(struct plat_stmmacenet_data *plat) plat->has_gmac = 1; plat->force_sf_dma_mode = 1; - /* Set default value for multicast hash bins */ - plat->multicast_filter_bins = HASH_TABLE_SIZE; - /* Set default value for unicast filter entries */ plat->unicast_filter_entries = 1; -- Gitee From 0d93cf932da96481922831be13ab5c294fe75afe Mon Sep 17 00:00:00 2001 From: Yanteng Si Date: Thu, 25 Apr 2024 21:04:36 +0800 Subject: [PATCH 0928/2138] anolis: net: stmmac: dwmac-loongson: Use PCI_DEVICE_DATA() macro for device identification ANBZ: #9164 Just use PCI_DEVICE_DATA() macro for device identification, No changes to function functionality. Signed-off-by: Feiyang Chen Signed-off-by: Yinggang Gu Signed-off-by: Yanteng Si Reviewed-by: Serge Semin Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3269 --- drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index ae09d172d9c6..494c0b16926b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -9,6 +9,8 @@ #include #include "stmmac.h" +#define PCI_DEVICE_ID_LOONGSON_GMAC 0x7a03 + static int loongson_default_data(struct plat_stmmacenet_data *plat) { plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ @@ -213,7 +215,7 @@ static SIMPLE_DEV_PM_OPS(loongson_dwmac_pm_ops, loongson_dwmac_suspend, loongson_dwmac_resume); static const struct pci_device_id loongson_dwmac_id_table[] = { - { PCI_VDEVICE(LOONGSON, 0x7a03) }, + { PCI_DEVICE_DATA(LOONGSON, GMAC, NULL) }, {} }; MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table); -- Gitee From e2a22509625af665869effd1693d9246e370de4c Mon Sep 17 00:00:00 2001 From: Yanteng Si Date: Thu, 25 Apr 2024 21:04:37 +0800 Subject: [PATCH 0929/2138] anolis: net: stmmac: dwmac-loongson: Split up the platform data initialization ANBZ: #9164 Based on IP core classification, loongson has two types of network devices: GMAC and GNET. GMAC's ip_core id is 0x35/0x37, while GNET's ip_core id is 0x37/0x10. Device tables: device type pci_id snps_id channel ls2k1000 gmac 7a03 0x35/0x37 1 ls7a1000 gmac 7a03 0x35/0x37 1 ls2k2000 gnet 7a13 0x10 8 ls7a2000 gnet 7a13 0x37 1 The GMAC device only has a MAC chip inside and needs an external PHY chip; To later distinguish 8-channel gnet devices from single-channel gnet/gmac devices, move rx_queues_to_use loongson_default_data to loongson_dwmac_probe(). Also move mac_interface to loongson_default_data(). Signed-off-by: Feiyang Chen Signed-off-by: Yinggang Gu Signed-off-by: Yanteng Si Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3269 --- .../ethernet/stmicro/stmmac/dwmac-loongson.c | 20 ++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index 494c0b16926b..9f208f84c1e7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -11,22 +11,20 @@ #define PCI_DEVICE_ID_LOONGSON_GMAC 0x7a03 -static int loongson_default_data(struct plat_stmmacenet_data *plat) +static void loongson_default_data(struct plat_stmmacenet_data *plat) { plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ plat->has_gmac = 1; plat->force_sf_dma_mode = 1; + plat->mac_interface = PHY_INTERFACE_MODE_GMII; + /* Set default value for unicast filter entries */ plat->unicast_filter_entries = 1; /* Set the maxmtu to a default of JUMBO_LEN */ plat->maxmtu = JUMBO_LEN; - /* Set default number of RX and TX queues to use */ - plat->tx_queues_to_use = 1; - plat->rx_queues_to_use = 1; - /* Disable Priority config by default */ plat->tx_queues_cfg[0].use_prio = false; plat->rx_queues_cfg[0].use_prio = false; @@ -44,6 +42,12 @@ static int loongson_default_data(struct plat_stmmacenet_data *plat) plat->dma_cfg->pblx8 = true; plat->multicast_filter_bins = 256; +} + +static int loongson_gmac_data(struct plat_stmmacenet_data *plat) +{ + loongson_default_data(plat); + return 0; } @@ -112,11 +116,10 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id } plat->phy_interface = phy_mode; - plat->mac_interface = PHY_INTERFACE_MODE_GMII; pci_set_master(pdev); - loongson_default_data(plat); + loongson_gmac_data(plat); pci_enable_msi(pdev); memset(&res, 0, sizeof(res)); res.addr = pcim_iomap_table(pdev)[0]; @@ -141,6 +144,9 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id goto err_disable_msi; } + plat->tx_queues_to_use = 1; + plat->rx_queues_to_use = 1; + ret = stmmac_dvr_probe(&pdev->dev, plat, &res); if (ret) goto err_disable_msi; -- Gitee From 286d0f6d396e18787e95154583836bda3a92bf84 Mon Sep 17 00:00:00 2001 From: Yanteng Si Date: Thu, 25 Apr 2024 21:06:11 +0800 Subject: [PATCH 0930/2138] anolis: net: stmmac: dwmac-loongson: Add phy mask for Loongson GMAC ANBZ: #9164 The phy mask of gmac(and gnet) is 0. Signed-off-by: Feiyang Chen Signed-off-by: Yinggang Gu Signed-off-by: Yanteng Si Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3269 --- drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index 9f208f84c1e7..f7618edf4a3a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -48,6 +48,8 @@ static int loongson_gmac_data(struct plat_stmmacenet_data *plat) { loongson_default_data(plat); + plat->mdio_bus_data->phy_mask = 0; + return 0; } -- Gitee From 2f70a154d354fe4c0b497d87cf77c88fb4d76367 Mon Sep 17 00:00:00 2001 From: Yanteng Si Date: Thu, 25 Apr 2024 21:06:12 +0800 Subject: [PATCH 0931/2138] anolis: net: stmmac: dwmac-loongson: Add phy_interface for Loongson GMAC ANBZ: #9164 The mac_interface of gmac is PHY_INTERFACE_MODE_RGMII_ID. Signed-off-by: Feiyang Chen Signed-off-by: Yinggang Gu Signed-off-by: Yanteng Si Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3269 --- drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index f7618edf4a3a..e989cb835340 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -49,6 +49,7 @@ static int loongson_gmac_data(struct plat_stmmacenet_data *plat) loongson_default_data(plat); plat->mdio_bus_data->phy_mask = 0; + plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID; return 0; } -- Gitee From 957e1107bcb9d6c98916fe15dcad0a4b4f2fc8f3 Mon Sep 17 00:00:00 2001 From: Yanteng Si Date: Thu, 25 Apr 2024 21:10:35 +0800 Subject: [PATCH 0932/2138] anolis: net: stmmac: dwmac-loongson: Add full PCI support ANBZ: #9164 Current dwmac-loongson only support LS2K in the "probed with PCI and configured with DT" manner. Add LS7A support on which the devices are fully PCI (non-DT). Others: LS2K is a SoC and LS7A is a bridge chip. Signed-off-by: Feiyang Chen Signed-off-by: Yinggang Gu Signed-off-by: Yanteng Si Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3269 --- .../ethernet/stmicro/stmmac/dwmac-loongson.c | 113 ++++++++++-------- 1 file changed, 65 insertions(+), 48 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index e989cb835340..1022bceaa680 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -11,8 +11,17 @@ #define PCI_DEVICE_ID_LOONGSON_GMAC 0x7a03 -static void loongson_default_data(struct plat_stmmacenet_data *plat) +struct stmmac_pci_info { + int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat); +}; + +static void loongson_default_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) { + /* Get bus_id, this can be overloaded later */ + plat->bus_id = (pci_domain_nr(pdev->bus) << 16) | + PCI_DEVID(pdev->bus->number, pdev->devfn); + plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ plat->has_gmac = 1; plat->force_sf_dma_mode = 1; @@ -44,9 +53,10 @@ static void loongson_default_data(struct plat_stmmacenet_data *plat) plat->multicast_filter_bins = 256; } -static int loongson_gmac_data(struct plat_stmmacenet_data *plat) +static int loongson_gmac_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) { - loongson_default_data(plat); + loongson_default_data(pdev, plat); plat->mdio_bus_data->phy_mask = 0; plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID; @@ -54,20 +64,20 @@ static int loongson_gmac_data(struct plat_stmmacenet_data *plat) return 0; } +static struct stmmac_pci_info loongson_gmac_pci_info = { + .setup = loongson_gmac_data, +}; + static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct plat_stmmacenet_data *plat; + int ret, i, bus_id, phy_mode; + struct stmmac_pci_info *info; struct stmmac_resources res; struct device_node *np; - int ret, i, phy_mode; np = dev_of_node(&pdev->dev); - if (!np) { - pr_info("dwmac_loongson_pci: No OF node\n"); - return -ENODEV; - } - plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); if (!plat) return -ENOMEM; @@ -78,12 +88,6 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id if (!plat->mdio_bus_data) return -ENOMEM; - plat->mdio_node = of_get_child_by_name(np, "mdio"); - if (plat->mdio_node) { - dev_info(&pdev->dev, "Found MDIO subnode\n"); - plat->mdio_bus_data->needs_reset = true; - } - plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), GFP_KERNEL); if (!plat->dma_cfg) { ret = -ENOMEM; @@ -107,46 +111,59 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id break; } - plat->bus_id = of_alias_get_id(np, "ethernet"); - if (plat->bus_id < 0) - plat->bus_id = pci_dev_id(pdev); + pci_set_master(pdev); - phy_mode = device_get_phy_mode(&pdev->dev); - if (phy_mode < 0) { - dev_err(&pdev->dev, "phy_mode not found\n"); - ret = phy_mode; + info = (struct stmmac_pci_info *)id->driver_data; + ret = info->setup(pdev, plat); + if (ret) goto err_disable_device; - } - plat->phy_interface = phy_mode; - - pci_set_master(pdev); + if (np) { + plat->mdio_node = of_get_child_by_name(np, "mdio"); + if (plat->mdio_node) { + dev_info(&pdev->dev, "Found MDIO subnode\n"); + plat->mdio_bus_data->needs_reset = true; + } + + bus_id = of_alias_get_id(np, "ethernet"); + if (bus_id >= 0) + plat->bus_id = bus_id; + + phy_mode = device_get_phy_mode(&pdev->dev); + if (phy_mode < 0) { + dev_err(&pdev->dev, "phy_mode not found\n"); + ret = phy_mode; + goto err_disable_device; + } + plat->phy_interface = phy_mode; + + res.irq = of_irq_get_byname(np, "macirq"); + if (res.irq < 0) { + dev_err(&pdev->dev, "IRQ macirq not found\n"); + ret = -ENODEV; + goto err_disable_msi; + } + + res.wol_irq = of_irq_get_byname(np, "eth_wake_irq"); + if (res.wol_irq < 0) { + dev_info(&pdev->dev, "IRQ eth_wake_irq not found, using macirq\n"); + res.wol_irq = res.irq; + } + + res.lpi_irq = of_irq_get_byname(np, "eth_lpi"); + if (res.lpi_irq < 0) { + dev_err(&pdev->dev, "IRQ eth_lpi not found\n"); + ret = -ENODEV; + goto err_disable_msi; + } + } else { + res.irq = pdev->irq; + } - loongson_gmac_data(plat); pci_enable_msi(pdev); memset(&res, 0, sizeof(res)); res.addr = pcim_iomap_table(pdev)[0]; - res.irq = of_irq_get_byname(np, "macirq"); - if (res.irq < 0) { - dev_err(&pdev->dev, "IRQ macirq not found\n"); - ret = -ENODEV; - goto err_disable_msi; - } - - res.wol_irq = of_irq_get_byname(np, "eth_wake_irq"); - if (res.wol_irq < 0) { - dev_info(&pdev->dev, "IRQ eth_wake_irq not found, using macirq\n"); - res.wol_irq = res.irq; - } - - res.lpi_irq = of_irq_get_byname(np, "eth_lpi"); - if (res.lpi_irq < 0) { - dev_err(&pdev->dev, "IRQ eth_lpi not found\n"); - ret = -ENODEV; - goto err_disable_msi; - } - plat->tx_queues_to_use = 1; plat->rx_queues_to_use = 1; @@ -224,7 +241,7 @@ static SIMPLE_DEV_PM_OPS(loongson_dwmac_pm_ops, loongson_dwmac_suspend, loongson_dwmac_resume); static const struct pci_device_id loongson_dwmac_id_table[] = { - { PCI_DEVICE_DATA(LOONGSON, GMAC, NULL) }, + { PCI_DEVICE_DATA(LOONGSON, GMAC, &loongson_gmac_pci_info) }, {} }; MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table); -- Gitee From 67a6e22d4a6891a1cfd44586877a7a12c0da7910 Mon Sep 17 00:00:00 2001 From: Yanteng Si Date: Thu, 25 Apr 2024 21:10:36 +0800 Subject: [PATCH 0933/2138] anolis: net: stmmac: dwmac-loongson: Add loongson_dwmac_config_legacy ANBZ: #9164 Move res._irq to loongson_dwmac_config_legacy(). No function changes. Signed-off-by: Feiyang Chen Signed-off-by: Yinggang Gu Signed-off-by: Yanteng Si Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3269 --- .../ethernet/stmicro/stmmac/dwmac-loongson.c | 56 +++++++++++-------- 1 file changed, 34 insertions(+), 22 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index 1022bceaa680..df5899bec91a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -68,6 +68,38 @@ static struct stmmac_pci_info loongson_gmac_pci_info = { .setup = loongson_gmac_data, }; +static int loongson_dwmac_config_legacy(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat, + struct stmmac_resources *res, + struct device_node *np) +{ + if (np) { + res->irq = of_irq_get_byname(np, "macirq"); + if (res->irq < 0) { + dev_err(&pdev->dev, "IRQ macirq not found\n"); + return -ENODEV; + } + + res->wol_irq = of_irq_get_byname(np, "eth_wake_irq"); + if (res->wol_irq < 0) { + dev_info(&pdev->dev, + "IRQ eth_wake_irq not found, using macirq\n"); + res->wol_irq = res->irq; + } + + res->lpi_irq = of_irq_get_byname(np, "eth_lpi"); + if (res->lpi_irq < 0) { + dev_err(&pdev->dev, "IRQ eth_lpi not found\n"); + return -ENODEV; + } + } else { + res->irq = pdev->irq; + res->wol_irq = res->irq; + } + + return 0; +} + static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct plat_stmmacenet_data *plat; @@ -136,28 +168,6 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id goto err_disable_device; } plat->phy_interface = phy_mode; - - res.irq = of_irq_get_byname(np, "macirq"); - if (res.irq < 0) { - dev_err(&pdev->dev, "IRQ macirq not found\n"); - ret = -ENODEV; - goto err_disable_msi; - } - - res.wol_irq = of_irq_get_byname(np, "eth_wake_irq"); - if (res.wol_irq < 0) { - dev_info(&pdev->dev, "IRQ eth_wake_irq not found, using macirq\n"); - res.wol_irq = res.irq; - } - - res.lpi_irq = of_irq_get_byname(np, "eth_lpi"); - if (res.lpi_irq < 0) { - dev_err(&pdev->dev, "IRQ eth_lpi not found\n"); - ret = -ENODEV; - goto err_disable_msi; - } - } else { - res.irq = pdev->irq; } pci_enable_msi(pdev); @@ -167,6 +177,8 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id plat->tx_queues_to_use = 1; plat->rx_queues_to_use = 1; + ret = loongson_dwmac_config_legacy(pdev, plat, &res, np); + ret = stmmac_dvr_probe(&pdev->dev, plat, &res); if (ret) goto err_disable_msi; -- Gitee From 834583cc71d8da0d8a5efdc1304df6bc67f6771d Mon Sep 17 00:00:00 2001 From: Yanteng Si Date: Thu, 25 Apr 2024 21:10:37 +0800 Subject: [PATCH 0934/2138] anolis: net: stmmac: dwmac-loongson: Fixed failure to set network speed to 1000. ANBZ: #9164 GNET devices with dev revision 0x00 do not support manually setting the speed to 1000. Signed-off-by: Feiyang Chen Signed-off-by: Yinggang Gu Signed-off-by: Yanteng Si Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3269 --- drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c | 8 ++++++++ drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c | 6 ++++++ include/linux/stmmac.h | 1 + 3 files changed, 15 insertions(+) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index df5899bec91a..a16bba389417 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -10,6 +10,7 @@ #include "stmmac.h" #define PCI_DEVICE_ID_LOONGSON_GMAC 0x7a03 +#define PCI_DEVICE_ID_LOONGSON_GNET 0x7a13 struct stmmac_pci_info { int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat); @@ -179,6 +180,13 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id ret = loongson_dwmac_config_legacy(pdev, plat, &res, np); + /* GNET devices with dev revision 0x00 do not support manually + * setting the speed to 1000. + */ + if (pdev->device == PCI_DEVICE_ID_LOONGSON_GNET && + pdev->revision == 0x00) + plat->flags |= STMMAC_FLAG_DISABLE_FORCE_1000; + ret = stmmac_dvr_probe(&pdev->dev, plat, &res); if (ret) goto err_disable_msi; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 521b1b5ffebb..cf83cc95169b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -412,6 +412,12 @@ stmmac_ethtool_set_link_ksettings(struct net_device *dev, return 0; } + if (priv->plat->flags & STMMAC_FLAG_DISABLE_FORCE_1000) { + if (cmd->base.speed == SPEED_1000 && + cmd->base.autoneg != AUTONEG_ENABLE) + return -EOPNOTSUPP; + } + return phylink_ethtool_ksettings_set(priv->phylink, cmd); } diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 89f92847edd4..e286ebd32fe6 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -221,6 +221,7 @@ struct dwmac4_addrs { #define STMMAC_FLAG_RX_CLK_RUNS_IN_LPI BIT(10) #define STMMAC_FLAG_EN_TX_LPI_CLOCKGATING BIT(11) #define STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY BIT(12) +#define STMMAC_FLAG_DISABLE_FORCE_1000 BIT(13) struct plat_stmmacenet_data { int bus_id; -- Gitee From 0e168f4bfc8c9165c64940d6b410b7116898303c Mon Sep 17 00:00:00 2001 From: Yanteng Si Date: Thu, 25 Apr 2024 21:11:36 +0800 Subject: [PATCH 0935/2138] anolis: net: stmmac: dwmac-loongson: Add Loongson GNET support ANBZ: #9164 There are two types of Loongson DWGMAC. The first type shares the same register definitions and has similar logic as dwmac1000. The second type uses several different register definitions, we think it is necessary to distinguish rx and tx, so we split these bits into two. Simply put, we split some single bit fields into double bits fileds: Name Tx Rx DMA_INTR_ENA_NIE = 0x00040000 | 0x00020000; DMA_INTR_ENA_AIE = 0x00010000 | 0x00008000; DMA_STATUS_NIS = 0x00040000 | 0x00020000; DMA_STATUS_AIS = 0x00010000 | 0x00008000; DMA_STATUS_FBI = 0x00002000 | 0x00001000; Therefore, when using, TX and RX must be set at the same time. How to use them: 1. Create the Loongson GNET-specific stmmac_dma_ops.dma_interrupt() stmmac_dma_ops.init_chan() methods in the dwmac-loongson.c driver. Adding all the Loongson-specific macros 2. Create a Loongson GNET-specific platform setup method with the next semantics: + allocate stmmac_dma_ops instance and initialize it with dwmac1000_dma_ops. + override the stmmac_dma_ops.{dma_interrupt, init_chan} with the pointers to the methods defined in 2. + allocate mac_device_info instance and initialize the mac_device_info.dma field with a pointer to the new stmmac_dma_ops instance. + initialize mac_device_info in a way it's done in dwmac1000_setup(). 3. Initialize plat_stmmacenet_data.setup() with the pointer to the method created in 2. GNET features: Speeds: 10/100/1000Mbps DMA-descriptors type: enhanced L3/L4 filters availability: support VLAN hash table filter: support PHY-interface: GMII Remote Wake-up support: support Mac Management Counters (MMC): support Number of additional MAC addresses: 5 MAC Hash-based filter: support Number of ash table size: 256 DMA chennel number: 0x10 device is 8 and 0x37 device is 1 Others: GNET integrates both MAC and PHY chips inside. GNET device: LS2K2000, LS7A2000, the chip connection between the mac and phy of these devices is not normal and requires two rounds of negotiation; LS7A2000 does not support half-duplex and multi-channel; To enable multi-channel on LS2K2000, you need to turn off hardware checksum. **Note**: Currently, only the LS2K2000's synopsys_id is 0x10, while the synopsys_id of other devices are 0x37. Signed-off-by: Feiyang Chen Signed-off-by: Yinggang Gu Signed-off-by: Yanteng Si Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3269 --- drivers/net/ethernet/stmicro/stmmac/common.h | 1 + .../ethernet/stmicro/stmmac/dwmac-loongson.c | 381 +++++++++++++++++- 2 files changed, 371 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 4dbc076f72d6..f9a3f3321e59 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -29,6 +29,7 @@ /* Synopsys Core versions */ #define DWMAC_CORE_3_40 0x34 #define DWMAC_CORE_3_50 0x35 +#define DWMAC_CORE_3_70 0x37 #define DWMAC_CORE_4_00 0x40 #define DWMAC_CORE_4_10 0x41 #define DWMAC_CORE_5_00 0x50 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index a16bba389417..68de90c44feb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -8,9 +8,71 @@ #include #include #include "stmmac.h" +#include "dwmac_dma.h" +#include "dwmac1000.h" + +/* Normal Loongson Tx Summary */ +#define DMA_INTR_ENA_NIE_TX_LOONGSON 0x00040000 +/* Normal Loongson Rx Summary */ +#define DMA_INTR_ENA_NIE_RX_LOONGSON 0x00020000 + +#define DMA_INTR_NORMAL_LOONGSON (DMA_INTR_ENA_NIE_TX_LOONGSON | \ + DMA_INTR_ENA_NIE_RX_LOONGSON | \ + DMA_INTR_ENA_RIE | DMA_INTR_ENA_TIE) + +/* Abnormal Loongson Tx Summary */ +#define DMA_INTR_ENA_AIE_TX_LOONGSON 0x00010000 +/* Abnormal Loongson Rx Summary */ +#define DMA_INTR_ENA_AIE_RX_LOONGSON 0x00008000 + +#define DMA_INTR_ABNORMAL_LOONGSON (DMA_INTR_ENA_AIE_TX_LOONGSON | \ + DMA_INTR_ENA_AIE_RX_LOONGSON | \ + DMA_INTR_ENA_FBE | DMA_INTR_ENA_UNE) + +#define DMA_INTR_DEFAULT_MASK_LOONGSON (DMA_INTR_NORMAL_LOONGSON | \ + DMA_INTR_ABNORMAL_LOONGSON) + +/* Normal Loongson Tx Interrupt Summary */ +#define DMA_STATUS_NIS_TX_LOONGSON 0x00040000 +/* Normal Loongson Rx Interrupt Summary */ +#define DMA_STATUS_NIS_RX_LOONGSON 0x00020000 + +/* Abnormal Loongson Tx Interrupt Summary */ +#define DMA_STATUS_AIS_TX_LOONGSON 0x00010000 +/* Abnormal Loongson Rx Interrupt Summary */ +#define DMA_STATUS_AIS_RX_LOONGSON 0x00008000 + +/* Fatal Loongson Tx Bus Error Interrupt */ +#define DMA_STATUS_FBI_TX_LOONGSON 0x00002000 +/* Fatal Loongson Rx Bus Error Interrupt */ +#define DMA_STATUS_FBI_RX_LOONGSON 0x00001000 + +#define DMA_STATUS_MSK_COMMON_LOONGSON (DMA_STATUS_NIS_TX_LOONGSON | \ + DMA_STATUS_NIS_RX_LOONGSON | \ + DMA_STATUS_AIS_TX_LOONGSON | \ + DMA_STATUS_AIS_RX_LOONGSON | \ + DMA_STATUS_FBI_TX_LOONGSON | \ + DMA_STATUS_FBI_RX_LOONGSON) + +#define DMA_STATUS_MSK_RX_LOONGSON (DMA_STATUS_ERI | DMA_STATUS_RWT | \ + DMA_STATUS_RPS | DMA_STATUS_RU | \ + DMA_STATUS_RI | DMA_STATUS_OVF | \ + DMA_STATUS_MSK_COMMON_LOONGSON) + +#define DMA_STATUS_MSK_TX_LOONGSON (DMA_STATUS_ETI | DMA_STATUS_UNF | \ + DMA_STATUS_TJT | DMA_STATUS_TU | \ + DMA_STATUS_TPS | DMA_STATUS_TI | \ + DMA_STATUS_MSK_COMMON_LOONGSON) #define PCI_DEVICE_ID_LOONGSON_GMAC 0x7a03 #define PCI_DEVICE_ID_LOONGSON_GNET 0x7a13 +#define LOONGSON_DWMAC_CORE_1_00 0x10 /* Loongson custom IP */ +#define CHANNEL_NUM 8 + +struct loongson_data { + u32 gmac_verion; + struct device *dev; +}; struct stmmac_pci_info { int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat); @@ -69,6 +131,168 @@ static struct stmmac_pci_info loongson_gmac_pci_info = { .setup = loongson_gmac_data, }; +static void loongson_gnet_dma_init_channel(struct stmmac_priv *priv, + void __iomem *ioaddr, + struct stmmac_dma_cfg *dma_cfg, + u32 chan) +{ + int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl; + int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl; + u32 value; + + value = readl(ioaddr + DMA_CHAN_BUS_MODE(chan)); + + if (dma_cfg->pblx8) + value |= DMA_BUS_MODE_MAXPBL; + + value |= DMA_BUS_MODE_USP; + value &= ~(DMA_BUS_MODE_PBL_MASK | DMA_BUS_MODE_RPBL_MASK); + value |= (txpbl << DMA_BUS_MODE_PBL_SHIFT); + value |= (rxpbl << DMA_BUS_MODE_RPBL_SHIFT); + + /* Set the Fixed burst mode */ + if (dma_cfg->fixed_burst) + value |= DMA_BUS_MODE_FB; + + /* Mixed Burst has no effect when fb is set */ + if (dma_cfg->mixed_burst) + value |= DMA_BUS_MODE_MB; + + if (dma_cfg->atds) + value |= DMA_BUS_MODE_ATDS; + + if (dma_cfg->aal) + value |= DMA_BUS_MODE_AAL; + + writel(value, ioaddr + DMA_CHAN_BUS_MODE(chan)); + + /* Mask interrupts by writing to CSR7 */ + writel(DMA_INTR_DEFAULT_MASK_LOONGSON, ioaddr + + DMA_CHAN_INTR_ENA(chan)); +} + +static int loongson_gnet_dma_interrupt(struct stmmac_priv *priv, + void __iomem *ioaddr, + struct stmmac_extra_stats *x, + u32 chan, u32 dir) +{ + struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats); + u32 abnor_intr_status; + u32 nor_intr_status; + u32 fb_intr_status; + u32 intr_status; + int ret = 0; + + /* read the status register (CSR5) */ + intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan)); + + if (dir == DMA_DIR_RX) + intr_status &= DMA_STATUS_MSK_RX_LOONGSON; + else if (dir == DMA_DIR_TX) + intr_status &= DMA_STATUS_MSK_TX_LOONGSON; + + nor_intr_status = intr_status & (DMA_STATUS_NIS_TX_LOONGSON | + DMA_STATUS_NIS_RX_LOONGSON); + abnor_intr_status = intr_status & (DMA_STATUS_AIS_TX_LOONGSON | + DMA_STATUS_AIS_RX_LOONGSON); + fb_intr_status = intr_status & (DMA_STATUS_FBI_TX_LOONGSON | + DMA_STATUS_FBI_RX_LOONGSON); + + /* ABNORMAL interrupts */ + if (unlikely(abnor_intr_status)) { + if (unlikely(intr_status & DMA_STATUS_UNF)) { + ret = tx_hard_error_bump_tc; + x->tx_undeflow_irq++; + } + if (unlikely(intr_status & DMA_STATUS_TJT)) + x->tx_jabber_irq++; + if (unlikely(intr_status & DMA_STATUS_OVF)) + x->rx_overflow_irq++; + if (unlikely(intr_status & DMA_STATUS_RU)) + x->rx_buf_unav_irq++; + if (unlikely(intr_status & DMA_STATUS_RPS)) + x->rx_process_stopped_irq++; + if (unlikely(intr_status & DMA_STATUS_RWT)) + x->rx_watchdog_irq++; + if (unlikely(intr_status & DMA_STATUS_ETI)) + x->tx_early_irq++; + if (unlikely(intr_status & DMA_STATUS_TPS)) { + x->tx_process_stopped_irq++; + ret = tx_hard_error; + } + if (unlikely(fb_intr_status)) { + x->fatal_bus_error_irq++; + ret = tx_hard_error; + } + } + /* TX/RX NORMAL interrupts */ + if (likely(nor_intr_status)) { + if (likely(intr_status & DMA_STATUS_RI)) { + u32 value = readl(ioaddr + DMA_INTR_ENA); + /* to schedule NAPI on real RIE event. */ + if (likely(value & DMA_INTR_ENA_RIE)) { + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->rx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); + ret |= handle_rx; + } + } + if (likely(intr_status & DMA_STATUS_TI)) { + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->tx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); + ret |= handle_tx; + } + if (unlikely(intr_status & DMA_STATUS_ERI)) + x->rx_early_irq++; + } + /* Optional hardware blocks, interrupts should be disabled */ + if (unlikely(intr_status & + (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI))) + pr_warn("%s: unexpected status %08x\n", __func__, intr_status); + + /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */ + writel((intr_status & 0x7ffff), ioaddr + DMA_CHAN_STATUS(chan)); + + return ret; +} + +static void loongson_gnet_fix_speed(void *priv, unsigned int speed, + unsigned int mode) +{ + struct loongson_data *ld = (struct loongson_data *)priv; + struct net_device *ndev = dev_get_drvdata(ld->dev); + struct stmmac_priv *ptr = netdev_priv(ndev); + + /* The controller and PHY don't work well together. + * We need to use the PS bit to check if the controller's status + * is correct and reset PHY if necessary. + * MAC_CTRL_REG.15 is defined by the GMAC_CONTROL_PS macro. + */ + if (speed == SPEED_1000) { + if (readl(ptr->ioaddr + MAC_CTRL_REG) & + GMAC_CONTROL_PS) + /* Word around hardware bug, restart autoneg */ + phy_restart_aneg(ndev->phydev); + } +} + +static int loongson_gnet_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + loongson_default_data(pdev, plat); + + plat->phy_interface = PHY_INTERFACE_MODE_GMII; + plat->mdio_bus_data->phy_mask = ~(u32)BIT(2); + plat->fix_mac_speed = loongson_gnet_fix_speed; + + return 0; +} + +static struct stmmac_pci_info loongson_gnet_pci_info = { + .setup = loongson_gnet_data, +}; + static int loongson_dwmac_config_legacy(struct pci_dev *pdev, struct plat_stmmacenet_data *plat, struct stmmac_resources *res, @@ -101,12 +325,126 @@ static int loongson_dwmac_config_legacy(struct pci_dev *pdev, return 0; } +static int loongson_dwmac_config_msi(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat, + struct stmmac_resources *res, + struct device_node *np) +{ + int i, ret, vecs; + + vecs = roundup_pow_of_two(CHANNEL_NUM * 2 + 1); + ret = pci_alloc_irq_vectors(pdev, vecs, vecs, PCI_IRQ_MSI); + if (ret < 0) { + dev_info(&pdev->dev, + "MSI enable failed, Fallback to legacy interrupt\n"); + return loongson_dwmac_config_legacy(pdev, plat, res, np); + } + + res->irq = pci_irq_vector(pdev, 0); + res->wol_irq = 0; + + /* INT NAME | MAC | CH7 rx | CH7 tx | ... | CH0 rx | CH0 tx | + * --------- ----- -------- -------- ... -------- -------- + * IRQ NUM | 0 | 1 | 2 | ... | 15 | 16 | + */ + for (i = 0; i < CHANNEL_NUM; i++) { + res->rx_irq[CHANNEL_NUM - 1 - i] = + pci_irq_vector(pdev, 1 + i * 2); + res->tx_irq[CHANNEL_NUM - 1 - i] = + pci_irq_vector(pdev, 2 + i * 2); + } + + plat->flags |= STMMAC_FLAG_MULTI_MSI_EN; + + return 0; +} + +static struct mac_device_info *loongson_dwmac_setup(void *apriv) +{ + struct stmmac_priv *priv = apriv; + struct mac_device_info *mac; + struct stmmac_dma_ops *dma; + struct loongson_data *ld; + struct pci_dev *pdev; + + ld = priv->plat->bsp_priv; + pdev = to_pci_dev(priv->device); + + mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL); + if (!mac) + return NULL; + + dma = devm_kzalloc(priv->device, sizeof(*dma), GFP_KERNEL); + if (!dma) + return NULL; + + /* The original IP-core version is 0x37 in all Loongson GNET + * (ls2k2000 and ls7a2000), but the GNET HW designers have changed the + * GMAC_VERSION.SNPSVER field to the custom 0x10 value on the Loongson + * ls2k2000 MAC to emphasize the differences: multiple DMA-channels, + * AV feature and GMAC_INT_STATUS CSR flags layout. Get back the + * original value so the correct HW-interface would be selected. + */ + if (ld->gmac_verion == LOONGSON_DWMAC_CORE_1_00) { + priv->synopsys_id = DWMAC_CORE_3_70; + *dma = dwmac1000_dma_ops; + dma->init_chan = loongson_gnet_dma_init_channel; + dma->dma_interrupt = loongson_gnet_dma_interrupt; + mac->dma = dma; + } + + mac->mac = &dwmac1000_ops; + priv->dev->priv_flags |= IFF_UNICAST_FLT; + + /* Pre-initialize the respective "mac" fields as it's done in + * dwmac1000_setup() + */ + mac->pcsr = priv->ioaddr; + mac->multicast_filter_bins = priv->plat->multicast_filter_bins; + mac->unicast_filter_entries = priv->plat->unicast_filter_entries; + mac->mcast_bits_log2 = 0; + + if (mac->multicast_filter_bins) + mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins); + + /* The GMAC devices with PCI ID 0x7a03 does not support any pause mode. + * The GNET devices without CORE ID 0x10 does not support half-duplex. + */ + if (pdev->device == PCI_DEVICE_ID_LOONGSON_GMAC) { + mac->link.caps = MAC_10 | MAC_100 | MAC_1000; + } else { + if (ld->gmac_verion == LOONGSON_DWMAC_CORE_1_00) + mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | + MAC_10 | MAC_100 | MAC_1000; + else + mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | + MAC_10FD | MAC_100FD | MAC_1000FD; + } + + mac->link.duplex = GMAC_CONTROL_DM; + mac->link.speed10 = GMAC_CONTROL_PS; + mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES; + mac->link.speed1000 = 0; + mac->link.speed_mask = GMAC_CONTROL_PS | GMAC_CONTROL_FES; + mac->mii.addr = GMAC_MII_ADDR; + mac->mii.data = GMAC_MII_DATA; + mac->mii.addr_shift = 11; + mac->mii.addr_mask = 0x0000F800; + mac->mii.reg_shift = 6; + mac->mii.reg_mask = 0x000007C0; + mac->mii.clk_csr_shift = 2; + mac->mii.clk_csr_mask = GENMASK(5, 2); + + return mac; +} + static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct plat_stmmacenet_data *plat; int ret, i, bus_id, phy_mode; struct stmmac_pci_info *info; struct stmmac_resources res; + struct loongson_data *ld; struct device_node *np; np = dev_of_node(&pdev->dev); @@ -122,10 +460,12 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id return -ENOMEM; plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), GFP_KERNEL); - if (!plat->dma_cfg) { - ret = -ENOMEM; - goto err_put_node; - } + if (!plat->dma_cfg) + return -ENOMEM; + + ld = devm_kzalloc(&pdev->dev, sizeof(*ld), GFP_KERNEL); + if (!ld) + return -ENOMEM; /* Enable pci device */ ret = pci_enable_device(pdev); @@ -171,14 +511,34 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id plat->phy_interface = phy_mode; } - pci_enable_msi(pdev); + plat->bsp_priv = ld; + plat->setup = loongson_dwmac_setup; + ld->dev = &pdev->dev; + memset(&res, 0, sizeof(res)); res.addr = pcim_iomap_table(pdev)[0]; + ld->gmac_verion = readl(res.addr + GMAC_VERSION) & 0xff; + + switch (ld->gmac_verion) { + case LOONGSON_DWMAC_CORE_1_00: + plat->rx_queues_to_use = CHANNEL_NUM; + plat->tx_queues_to_use = CHANNEL_NUM; + + /* Only channel 0 supports checksum, + * so turn off checksum to enable multiple channels. + */ + for (i = 1; i < CHANNEL_NUM; i++) + plat->tx_queues_cfg[i].coe_unsupported = 1; - plat->tx_queues_to_use = 1; - plat->rx_queues_to_use = 1; + ret = loongson_dwmac_config_msi(pdev, plat, &res, np); + break; + default: /* 0x35 device and 0x37 device. */ + plat->tx_queues_to_use = 1; + plat->rx_queues_to_use = 1; - ret = loongson_dwmac_config_legacy(pdev, plat, &res, np); + ret = loongson_dwmac_config_legacy(pdev, plat, &res, np); + break; + } /* GNET devices with dev revision 0x00 do not support manually * setting the speed to 1000. @@ -189,12 +549,10 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id ret = stmmac_dvr_probe(&pdev->dev, plat, &res); if (ret) - goto err_disable_msi; + goto err_disable_device; return ret; -err_disable_msi: - pci_disable_msi(pdev); err_disable_device: pci_disable_device(pdev); err_put_node: @@ -262,6 +620,7 @@ static SIMPLE_DEV_PM_OPS(loongson_dwmac_pm_ops, loongson_dwmac_suspend, static const struct pci_device_id loongson_dwmac_id_table[] = { { PCI_DEVICE_DATA(LOONGSON, GMAC, &loongson_gmac_pci_info) }, + { PCI_DEVICE_DATA(LOONGSON, GNET, &loongson_gnet_pci_info) }, {} }; MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table); -- Gitee From 7ead418468f396c48438be070bc25174060bcd9b Mon Sep 17 00:00:00 2001 From: Yanteng Si Date: Thu, 25 Apr 2024 21:11:37 +0800 Subject: [PATCH 0936/2138] anolis: net: stmmac: dwmac-loongson: Move disable_force flag to _gnet_date ANBZ: #9164 We've already introduced loongson_gnet_data(), so the STMMAC_FLAG_DISABLE_FORCE_1000 should be take away from loongson_dwmac_probe(). Signed-off-by: Feiyang Chen Signed-off-by: Yinggang Gu Signed-off-by: Yanteng Si Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3269 --- .../net/ethernet/stmicro/stmmac/dwmac-loongson.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index 68de90c44feb..dea02de030e6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -286,6 +286,12 @@ static int loongson_gnet_data(struct pci_dev *pdev, plat->mdio_bus_data->phy_mask = ~(u32)BIT(2); plat->fix_mac_speed = loongson_gnet_fix_speed; + /* GNET devices with dev revision 0x00 do not support manually + * setting the speed to 1000. + */ + if (pdev->revision == 0x00) + plat->flags |= STMMAC_FLAG_DISABLE_FORCE_1000; + return 0; } @@ -540,13 +546,6 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id break; } - /* GNET devices with dev revision 0x00 do not support manually - * setting the speed to 1000. - */ - if (pdev->device == PCI_DEVICE_ID_LOONGSON_GNET && - pdev->revision == 0x00) - plat->flags |= STMMAC_FLAG_DISABLE_FORCE_1000; - ret = stmmac_dvr_probe(&pdev->dev, plat, &res); if (ret) goto err_disable_device; -- Gitee From b71feea088f9209e94c3b6f47505f1dd67b2e6e9 Mon Sep 17 00:00:00 2001 From: Yanteng Si Date: Thu, 25 Apr 2024 21:11:38 +0800 Subject: [PATCH 0937/2138] anolis: net: stmmac: dwmac-loongson: Add loongson module author ANBZ: #9164 Add Yanteng Si as MODULE_AUTHOR of Loongson DWMAC PCI driver. Signed-off-by: Feiyang Chen Signed-off-by: Yinggang Gu Signed-off-by: Yanteng Si Signed-off-by: Ming Wang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3269 --- drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index dea02de030e6..f0eebed751f3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -638,4 +638,5 @@ module_pci_driver(loongson_dwmac_driver); MODULE_DESCRIPTION("Loongson DWMAC PCI driver"); MODULE_AUTHOR("Qing Zhang "); +MODULE_AUTHOR("Yanteng Si "); MODULE_LICENSE("GPL v2"); -- Gitee From 35b2c11f997b9b5b384c7c6fe72cd0ffa7615da4 Mon Sep 17 00:00:00 2001 From: yuan0927 Date: Fri, 24 May 2024 09:15:40 +0800 Subject: [PATCH 0938/2138] anolis: drm/phytium: Bugfix Xorg startup for ps23xx when using pe2201 bmc card ANBZ: #8989 When the pe2201 bmc card is detected on ps23xx SoCs, map the card's graphics memory to device attributes to avoid unnecessary trouble. Signed-off-by: yuan0927 Signed-off-by: WangHao Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/3260 --- drivers/gpu/drm/phytium/phytium_display_drv.c | 21 ++++++++++-- drivers/gpu/drm/phytium/phytium_display_drv.h | 3 +- drivers/gpu/drm/phytium/phytium_gem.c | 25 +++++++++----- drivers/gpu/drm/phytium/phytium_pci.c | 33 ++++++++++++++++--- 4 files changed, 67 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/phytium/phytium_display_drv.c b/drivers/gpu/drm/phytium/phytium_display_drv.c index 60c7a20e7ca2..31c080573414 100644 --- a/drivers/gpu/drm/phytium/phytium_display_drv.c +++ b/drivers/gpu/drm/phytium/phytium_display_drv.c @@ -249,7 +249,7 @@ static int phytium_display_load(struct drm_device *dev, unsigned long flags) goto failed_modeset_init; } - if (priv->support_memory_type & MEMORY_TYPE_VRAM) + if (priv->support_memory_type & (MEMORY_TYPE_VRAM_WC | MEMORY_TYPE_VRAM_DEVICE)) priv->vram_hw_init(priv); phytium_irq_preinstall(dev); @@ -285,8 +285,25 @@ static void phytium_display_unload(struct drm_device *dev) drm_mode_config_cleanup(dev); } +/* phytium display specific ioctls + * The device specific ioctl range is 0x40 to 0x79. + */ +#define DRM_PHYTIUM_VRAM_TYPE_DEVICE 0x0 +#define DRM_IOCTL_PHYTIUM_VRAM_TYPE_DEVICE DRM_IO(DRM_COMMAND_BASE\ + + DRM_PHYTIUM_VRAM_TYPE_DEVICE) + +static int phytium_ioctl_check_vram_device(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct phytium_display_private *priv = dev->dev_private; + + return ((priv->support_memory_type == MEMORY_TYPE_VRAM_DEVICE) ? 1 : 0); +} + static const struct drm_ioctl_desc phytium_ioctls[] = { /* for test, none so far */ + DRM_IOCTL_DEF_DRV(PHYTIUM_VRAM_TYPE_DEVICE, phytium_ioctl_check_vram_device, + DRM_AUTH|DRM_UNLOCKED), }; static const struct file_operations phytium_drm_driver_fops = { @@ -378,7 +395,7 @@ static int phytium_display_pm_resume(struct drm_device *dev) phytium_crtc_resume(dev); phytium_gem_resume(dev); - if (priv->support_memory_type & MEMORY_TYPE_VRAM) + if (priv->support_memory_type & (MEMORY_TYPE_VRAM_WC | MEMORY_TYPE_VRAM_DEVICE)) priv->vram_hw_init(priv); ret = drm_atomic_helper_resume(dev, dev->mode_config.suspend_state); diff --git a/drivers/gpu/drm/phytium/phytium_display_drv.h b/drivers/gpu/drm/phytium/phytium_display_drv.h index 70080dad8621..9038bf6ebd8c 100644 --- a/drivers/gpu/drm/phytium/phytium_display_drv.h +++ b/drivers/gpu/drm/phytium/phytium_display_drv.h @@ -49,9 +49,10 @@ enum phytium_mem_state_type { PHYTIUM_MEM_STATE_TYPE_COUNT, }; -#define MEMORY_TYPE_VRAM 0x1 +#define MEMORY_TYPE_VRAM_WC 0x1 #define MEMORY_TYPE_SYSTEM_CARVEOUT 0x2 #define MEMORY_TYPE_SYSTEM_UNIFIED 0x4 +#define MEMORY_TYPE_VRAM_DEVICE 0x8 #define IS_PLATFORM(priv, p) ((priv)->info.platform_mask & BIT(p)) diff --git a/drivers/gpu/drm/phytium/phytium_gem.c b/drivers/gpu/drm/phytium/phytium_gem.c index f470f769dce6..da873a9de811 100644 --- a/drivers/gpu/drm/phytium/phytium_gem.c +++ b/drivers/gpu/drm/phytium/phytium_gem.c @@ -87,7 +87,8 @@ phytium_gem_prime_get_sg_table(struct drm_gem_object *obj) return ERR_PTR(-ENOMEM); } - if ((phytium_gem_obj->memory_type == MEMORY_TYPE_VRAM) || + if ((phytium_gem_obj->memory_type == MEMORY_TYPE_VRAM_WC) || + (phytium_gem_obj->memory_type == MEMORY_TYPE_VRAM_DEVICE) || (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_CARVEOUT)) { ret = sg_alloc_table(sgt, 1, GFP_KERNEL); if (ret) { @@ -272,7 +273,8 @@ int phytium_gem_suspend(struct drm_device *drm_dev) int ret = 0; list_for_each_entry(phytium_gem_obj, &priv->gem_list_head, list) { - if (phytium_gem_obj->memory_type != MEMORY_TYPE_VRAM) + if ((phytium_gem_obj->memory_type != MEMORY_TYPE_VRAM_WC) && + (phytium_gem_obj->memory_type != MEMORY_TYPE_VRAM_DEVICE)) continue; phytium_gem_obj->vaddr_save = vmalloc(phytium_gem_obj->size); @@ -291,7 +293,8 @@ int phytium_gem_suspend(struct drm_device *drm_dev) return 0; malloc_failed: list_for_each_entry(phytium_gem_obj, &priv->gem_list_head, list) { - if (phytium_gem_obj->memory_type != MEMORY_TYPE_VRAM) + if ((phytium_gem_obj->memory_type != MEMORY_TYPE_VRAM_WC) && + (phytium_gem_obj->memory_type != MEMORY_TYPE_VRAM_DEVICE)) continue; if (phytium_gem_obj->vaddr_save) { @@ -308,7 +311,8 @@ void phytium_gem_resume(struct drm_device *drm_dev) struct phytium_gem_object *phytium_gem_obj = NULL; list_for_each_entry(phytium_gem_obj, &priv->gem_list_head, list) { - if (phytium_gem_obj->memory_type != MEMORY_TYPE_VRAM) + if ((phytium_gem_obj->memory_type != MEMORY_TYPE_VRAM_WC) && + (phytium_gem_obj->memory_type != MEMORY_TYPE_VRAM_DEVICE)) continue; memcpy(phytium_gem_obj->vaddr, phytium_gem_obj->vaddr_save, phytium_gem_obj->size); @@ -327,7 +331,8 @@ void phytium_gem_free_object(struct drm_gem_object *obj) DRM_DEBUG_KMS("free phytium_gem_obj iova:0x%pa size:0x%lx\n", &phytium_gem_obj->iova, phytium_gem_obj->size); if (phytium_gem_obj->vaddr) { - if (phytium_gem_obj->memory_type == MEMORY_TYPE_VRAM) { + if ((phytium_gem_obj->memory_type == MEMORY_TYPE_VRAM_WC) || + (phytium_gem_obj->memory_type == MEMORY_TYPE_VRAM_DEVICE)) { phytium_memory_pool_free(priv, phytium_gem_obj->vaddr, size); priv->mem_state[PHYTIUM_MEM_VRAM_ALLOC] -= size; } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_CARVEOUT) { @@ -360,10 +365,14 @@ int phytium_gem_mmap_obj(struct drm_gem_object *obj, struct vm_area_struct *vma) vma->vm_pgoff = 0; vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); - if (phytium_gem_obj->memory_type == MEMORY_TYPE_VRAM) { + if (phytium_gem_obj->memory_type == MEMORY_TYPE_VRAM_WC) { vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); ret = remap_pfn_range(vma, vma->vm_start, pfn, vma->vm_end - vma->vm_start, vma->vm_page_prot); + } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_VRAM_DEVICE) { + vma->vm_page_prot = pgprot_device(vma->vm_page_prot); + ret = remap_pfn_range(vma, vma->vm_start, pfn, + vma->vm_end - vma->vm_start, vma->vm_page_prot); } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_CARVEOUT) { ret = remap_pfn_range(vma, vma->vm_start, pfn, vma->vm_end - vma->vm_start, vma->vm_page_prot); @@ -421,7 +430,7 @@ struct phytium_gem_object *phytium_gem_create_object(struct drm_device *dev, uns goto failed_object_init; } - if (priv->support_memory_type & MEMORY_TYPE_VRAM) { + if (priv->support_memory_type & (MEMORY_TYPE_VRAM_WC | MEMORY_TYPE_VRAM_DEVICE)) { ret = phytium_memory_pool_alloc(priv, &phytium_gem_obj->vaddr, &phytium_gem_obj->phys_addr, size); if (ret) { @@ -429,7 +438,7 @@ struct phytium_gem_object *phytium_gem_create_object(struct drm_device *dev, uns goto failed_dma_alloc; } phytium_gem_obj->iova = phytium_gem_obj->phys_addr; - phytium_gem_obj->memory_type = MEMORY_TYPE_VRAM; + phytium_gem_obj->memory_type = priv->support_memory_type; priv->mem_state[PHYTIUM_MEM_VRAM_ALLOC] += size; } else if (priv->support_memory_type & MEMORY_TYPE_SYSTEM_CARVEOUT) { ret = phytium_memory_pool_alloc(priv, &phytium_gem_obj->vaddr, diff --git a/drivers/gpu/drm/phytium/phytium_pci.c b/drivers/gpu/drm/phytium/phytium_pci.c index f93ab85395c5..7ed2f58a2942 100644 --- a/drivers/gpu/drm/phytium/phytium_pci.c +++ b/drivers/gpu/drm/phytium/phytium_pci.c @@ -27,6 +27,24 @@ void phytium_pci_vram_hw_init(struct phytium_display_private *priv) pci_priv->dc_hw_vram_init(priv, priv->pool_phys_addr, priv->pool_size); } +static bool phytium_pci_host_is_5c01(struct pci_bus *bus) +{ + struct pci_bus *child = bus; + struct pci_dev *root = NULL; + + while (child) { + if (child->parent->parent) + child = child->parent; + else + break; + } + + root = child->self; + if ((root->vendor == 0x1db7) && (root->device == 0x5c01)) + return true; + return false; +} + int phytium_pci_vram_init(struct pci_dev *pdev, struct phytium_display_private *priv) { int ret = 0; @@ -34,8 +52,15 @@ int phytium_pci_vram_init(struct pci_dev *pdev, struct phytium_display_private * priv->pool_phys_addr = pci_resource_start(pdev, 2); priv->pool_size = pci_resource_len(pdev, 2); if ((priv->pool_phys_addr != 0) && (priv->pool_size != 0)) { - priv->pool_virt_addr = devm_ioremap_wc(&pdev->dev, priv->pool_phys_addr, - priv->pool_size); + if ((pdev->device == 0xdc3e) && phytium_pci_host_is_5c01(pdev->bus)) { + priv->pool_virt_addr = devm_ioremap(&pdev->dev, priv->pool_phys_addr, + priv->pool_size); + priv->support_memory_type = MEMORY_TYPE_VRAM_DEVICE; + } else { + priv->pool_virt_addr = devm_ioremap_wc(&pdev->dev, priv->pool_phys_addr, + priv->pool_size); + priv->support_memory_type = MEMORY_TYPE_VRAM_WC; + } if (priv->pool_virt_addr == NULL) { DRM_ERROR("pci vram ioremap fail, addr:0x%llx, size:0x%llx\n", priv->pool_phys_addr, priv->pool_size); @@ -47,7 +72,6 @@ int phytium_pci_vram_init(struct pci_dev *pdev, struct phytium_display_private * goto failed_init_memory_pool; priv->mem_state[PHYTIUM_MEM_VRAM_TOTAL] = priv->pool_size; - priv->support_memory_type = MEMORY_TYPE_VRAM; priv->vram_hw_init = phytium_pci_vram_hw_init; } else { DRM_DEBUG_KMS("not support vram\n"); @@ -67,7 +91,8 @@ int phytium_pci_vram_init(struct pci_dev *pdev, struct phytium_display_private * void phytium_pci_vram_fini(struct pci_dev *pdev, struct phytium_display_private *priv) { - if (priv->support_memory_type == MEMORY_TYPE_VRAM) { + if ((priv->support_memory_type == MEMORY_TYPE_VRAM_WC) || + (priv->support_memory_type == MEMORY_TYPE_VRAM_DEVICE)) { phytium_memory_pool_fini(&pdev->dev, priv); devm_iounmap(&pdev->dev, priv->pool_virt_addr); } -- Gitee From 40ac75f5d304ea11d5fa97af92b9dd3cd8af3dbb Mon Sep 17 00:00:00 2001 From: yuan0927 Date: Fri, 24 May 2024 09:18:08 +0800 Subject: [PATCH 0939/2138] anolis: drm/ast: Fixed display error for ps23xx when using ast bmc card ANBZ: #8989 When the ast bmc card is detected on ps23xx SoCs, change the card's vram to uncache mode to avoid unnecessary trouble. Signed-off-by: yuan0927 Signed-off-by: WangHao Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/3260 --- drivers/gpu/drm/ast/ast_drv.c | 20 ++++++++++++++++++++ drivers/gpu/drm/ast/ast_drv.h | 3 +++ drivers/gpu/drm/ast/ast_mm.c | 34 +++++++++++++++++++++++++++++++++- 3 files changed, 56 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index e1224ef4ad83..529357d1a333 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -50,11 +50,31 @@ module_param_named(modeset, ast_modeset, int, 0400); DEFINE_DRM_GEM_FOPS(ast_fops); +#define DRM_AST_VRAM_TYPE_DEVICE 0x0 +#define DRM_IOCTL_AST_VRAM_TYPE_DEVICE DRM_IO(DRM_COMMAND_BASE\ + + DRM_AST_VRAM_TYPE_DEVICE) + +static int ast_ioctl_check_5c01_device(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct ast_device *ast = to_ast_device(dev); + + return ast->is_5c01_device ? 1 : 0; +} + +static const struct drm_ioctl_desc ast_ioctls[] = { + /* for test, none so far */ + DRM_IOCTL_DEF_DRV(AST_VRAM_TYPE_DEVICE, ast_ioctl_check_5c01_device, + DRM_AUTH|DRM_UNLOCKED), +}; + static const struct drm_driver ast_driver = { .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET, + .ioctls = ast_ioctls, + .num_ioctls = ARRAY_SIZE(ast_ioctls), .fops = &ast_fops, .name = DRIVER_NAME, .desc = DRIVER_DESC, diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index f7053f2972bb..b7534f6fbfff 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -231,6 +231,9 @@ struct ast_device { } bmc; } output; + struct ttm_device *bdev; + + bool is_5c01_device; bool support_wide_screen; enum { ast_use_p2a, diff --git a/drivers/gpu/drm/ast/ast_mm.c b/drivers/gpu/drm/ast/ast_mm.c index bc174bd933b9..f48df729c811 100644 --- a/drivers/gpu/drm/ast/ast_mm.c +++ b/drivers/gpu/drm/ast/ast_mm.c @@ -30,6 +30,12 @@ #include #include +#include + +#include +#include +#include +#include #include "ast_drv.h" @@ -71,6 +77,25 @@ static u32 ast_get_vram_size(struct ast_device *ast) return vram_size; } +static bool ast_pci_host_is_5c01(struct pci_bus *bus) +{ + struct pci_bus *child = bus; + struct pci_dev *root = NULL; + + while (child) { + if (child->parent->parent) + child = child->parent; + else + break; + } + + root = child->self; + + if ((root->vendor == 0x1db7) && (root->device == 0x5c01)) + return true; + return false; +} + int ast_mm_init(struct ast_device *ast) { struct drm_device *dev = &ast->base; @@ -87,7 +112,14 @@ int ast_mm_init(struct ast_device *ast) vram_size = ast_get_vram_size(ast); - ast->vram = devm_ioremap_wc(dev->dev, base, vram_size); + if (ast_pci_host_is_5c01(pdev->bus)) { + ast->is_5c01_device = true; + ast->vram = devm_ioremap(dev->dev, base, vram_size); + } else { + ast->is_5c01_device = false; + ast->vram = devm_ioremap_wc(dev->dev, base, vram_size); + } + if (!ast->vram) return -ENOMEM; -- Gitee From d620ea264fc11e1eed555a3a59796ef1bd7b30d3 Mon Sep 17 00:00:00 2001 From: Jiakun Shuai Date: Thu, 30 May 2024 14:04:56 +0800 Subject: [PATCH 0940/2138] anolis: drm/phytium: Replace default efi fb0 with dc fb ANBZ: #9176 phytium inclusion category: bugfix CVE: NA When enable efi fb, replace the original efi fb0 when loading the dc driver. commit: 8a7a237c819a Signed-off-by: WangHao Signed-off-by: Yang Xun Signed-off-by: Wang Yinfeng Signed-off-by: Jiakun Shuai Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/3266 --- drivers/gpu/drm/phytium/phytium_pci.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/drivers/gpu/drm/phytium/phytium_pci.c b/drivers/gpu/drm/phytium/phytium_pci.c index 7ed2f58a2942..e849830540a1 100644 --- a/drivers/gpu/drm/phytium/phytium_pci.c +++ b/drivers/gpu/drm/phytium/phytium_pci.c @@ -6,6 +6,7 @@ #include #include +#include #include #include "phytium_display_drv.h" #include "phytium_pci.h" @@ -238,12 +239,30 @@ phytium_pci_private_fini(struct pci_dev *pdev, struct phytium_display_private *p devm_kfree(&pdev->dev, pci_priv); } +static int phytium_remove_conflicting_framebuffers(struct pci_dev *pdev) +{ + resource_size_t base, size; + + base = pci_resource_start(pdev, 2); + size = pci_resource_len(pdev, 2); + + return drm_aperture_remove_conflicting_framebuffers(base, size, + &phytium_display_drm_driver); + +} + static int phytium_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct phytium_display_private *priv = NULL; struct drm_device *dev = NULL; int ret = 0; + ret = phytium_remove_conflicting_framebuffers(pdev); + if (ret) { + DRM_ERROR("failed to remove conflicting phytium framebuffers\n"); + return ret; + } + dev = drm_dev_alloc(&phytium_display_drm_driver, &pdev->dev); if (IS_ERR(dev)) { DRM_ERROR("failed to allocate drm_device\n"); -- Gitee From 5e6986db5935c44196d98d9fa505d2aea694e086 Mon Sep 17 00:00:00 2001 From: Peter-Jan Gootzen Date: Wed, 1 May 2024 17:38:16 +0200 Subject: [PATCH 0941/2138] virtio-fs: limit number of request queues ANBZ: #9236 commit 103c2de111bf32f7c36a0ce8f638b114a37e0b76 upstream. Virtio-fs devices might allocate significant resources to virtio queues such as CPU cores that busy poll on the queue. The device indicates how many request queues it can support and the driver should initialize the number of queues that they want to utilize. In this patch we limit the number of initialized request queues to the number of CPUs, to limit the resource consumption on the device-side and to prepare for the upcoming multi-queue patch. Signed-off-by: Peter-Jan Gootzen Signed-off-by: Yoray Zack Suggested-by: Max Gurtovoy Reviewed-by: Max Gurtovoy Reviewed-by: Stefan Hajnoczi Acked-by: Michael S. Tsirkin Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3278 --- fs/fuse/virtio_fs.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index d84dacbdce2c..74b92be8f7f1 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -704,6 +704,9 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev, if (fs->num_request_queues == 0) return -EINVAL; + /* Truncate nr of request queues to nr_cpu_id */ + fs->num_request_queues = min_t(unsigned int, fs->num_request_queues, + nr_cpu_ids); fs->nvqs = VQ_REQUEST + fs->num_request_queues; fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL); if (!fs->vqs) -- Gitee From 81deb551f3b861c1b792ca59cc8a85da3bafd6bc Mon Sep 17 00:00:00 2001 From: Peter-Jan Gootzen Date: Wed, 1 May 2024 17:38:17 +0200 Subject: [PATCH 0942/2138] virtio-fs: add multi-queue support ANBZ: #9236 commit 529395d2ae6456c556405016ea0c43081fe607f3 upstream. This commit creates a multi-queue mapping at device bring-up. The driver first attempts to use the existing MSI-X interrupt affinities (previously disabled), and if not present, will distribute the request queues evenly over the CPUs. If the latter fails as well, all CPUs are mapped to request queue zero. When a request is handed from FUSE to the virtio-fs device driver, the driver will use the current CPU to index into the multi-queue mapping and determine the optimal request queue to use. We measured the performance of this patch with the fio benchmarking tool, increasing the number of queues results in a significant speedup for both read and write operations, demonstrating the effectiveness of multi-queue support. Host: - Dell PowerEdge R760 - CPU: Intel(R) Xeon(R) Gold 6438M, 128 cores - VM: KVM with 32 cores Virtio-fs device: - BlueField-3 DPU - CPU: ARM Cortex-A78AE, 16 cores - One thread per queue, each busy polling on one request queue - Each queue is 1024 descriptors deep Workload: - fio, sequential read or write, ioengine=libaio, numjobs=32, 4GiB file per job, iodepth=8, bs=256KiB, runtime=30s Performance Results: +===========================+==========+===========+ | Number of queues | Fio read | Fio write | +===========================+==========+===========+ | 1 request queue (GiB/s) | 6.1 | 4.6 | +---------------------------+----------+-----------+ | 8 request queues (GiB/s) | 25.8 | 10.3 | +---------------------------+----------+-----------+ | 16 request queues (GiB/s) | 30.9 | 19.5 | +---------------------------+----------+-----------+ | 32 request queue (GiB/s) | 33.2 | 22.6 | +---------------------------+----------+-----------+ | Speedup | 5.5x | 5x | +---------------=-----------+----------+-----------+ Signed-off-by: Peter-Jan Gootzen Signed-off-by: Yoray Zack Signed-off-by: Max Gurtovoy Reviewed-by: Stefan Hajnoczi Acked-by: Michael S. Tsirkin Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3278 --- fs/fuse/virtio_fs.c | 70 +++++++++++++++++++++++++++++++++++++++------ 1 file changed, 62 insertions(+), 8 deletions(-) diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index 74b92be8f7f1..624997d8fc50 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -7,6 +7,8 @@ #include #include #include +#include +#include #include #include #include @@ -63,6 +65,8 @@ struct virtio_fs { unsigned int num_request_queues; /* number of request queues */ struct dax_device *dax_dev; + unsigned int *mq_map; /* index = cpu id, value = request vq id */ + /* DAX memory window where file contents are mapped */ void *window_kaddr; phys_addr_t window_phys_addr; @@ -165,6 +169,7 @@ static void release_virtio_fs_obj(struct kref *ref) { struct virtio_fs *vfs = container_of(ref, struct virtio_fs, refcount); + kfree(vfs->mq_map); kfree(vfs->vqs); kfree(vfs); } @@ -659,6 +664,44 @@ static void virtio_fs_requests_done_work(struct work_struct *work) } } +static void virtio_fs_map_queues(struct virtio_device *vdev, struct virtio_fs *fs) +{ + const struct cpumask *mask, *masks; + unsigned int q, cpu; + + /* First attempt to map using existing transport layer affinities + * e.g. PCIe MSI-X + */ + if (!vdev->config->get_vq_affinity) + goto fallback; + + for (q = 0; q < fs->num_request_queues; q++) { + mask = vdev->config->get_vq_affinity(vdev, VQ_REQUEST + q); + if (!mask) + goto fallback; + + for_each_cpu(cpu, mask) + fs->mq_map[cpu] = q; + } + + return; +fallback: + /* Attempt to map evenly in groups over the CPUs */ + masks = group_cpus_evenly(fs->num_request_queues); + /* If even this fails we default to all CPUs use queue zero */ + if (!masks) { + for_each_possible_cpu(cpu) + fs->mq_map[cpu] = 0; + return; + } + + for (q = 0; q < fs->num_request_queues; q++) { + for_each_cpu(cpu, &masks[q]) + fs->mq_map[cpu] = q; + } + kfree(masks); +} + /* Virtqueue interrupt handler */ static void virtio_fs_vq_done(struct virtqueue *vq) { @@ -695,6 +738,11 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev, { struct virtqueue **vqs; vq_callback_t **callbacks; + /* Specify pre_vectors to ensure that the queues before the + * request queues (e.g. hiprio) don't claim any of the CPUs in + * the multi-queue mapping and interrupt affinities + */ + struct irq_affinity desc = { .pre_vectors = VQ_REQUEST }; const char **names; unsigned int i; int ret = 0; @@ -716,7 +764,9 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev, callbacks = kmalloc_array(fs->nvqs, sizeof(callbacks[VQ_HIPRIO]), GFP_KERNEL); names = kmalloc_array(fs->nvqs, sizeof(names[VQ_HIPRIO]), GFP_KERNEL); - if (!vqs || !callbacks || !names) { + fs->mq_map = kcalloc_node(nr_cpu_ids, sizeof(*fs->mq_map), GFP_KERNEL, + dev_to_node(&vdev->dev)); + if (!vqs || !callbacks || !names || !fs->mq_map) { ret = -ENOMEM; goto out; } @@ -736,7 +786,7 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev, names[i] = fs->vqs[i].name; } - ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, NULL); + ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, &desc); if (ret < 0) goto out; @@ -748,8 +798,10 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev, kfree(names); kfree(callbacks); kfree(vqs); - if (ret) + if (ret) { kfree(fs->vqs); + kfree(fs->mq_map); + } return ret; } @@ -889,7 +941,7 @@ static int virtio_fs_probe(struct virtio_device *vdev) if (ret < 0) goto out; - /* TODO vq affinity */ + virtio_fs_map_queues(vdev, fs); ret = virtio_fs_setup_dax(vdev, fs); if (ret < 0) @@ -1237,7 +1289,7 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq, static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq) __releases(fiq->lock) { - unsigned int queue_id = VQ_REQUEST; /* TODO multiqueue */ + unsigned int queue_id; struct virtio_fs *fs; struct fuse_req *req; struct virtio_fs_vq *fsvq; @@ -1251,11 +1303,13 @@ __releases(fiq->lock) spin_unlock(&fiq->lock); fs = fiq->priv; + queue_id = VQ_REQUEST + fs->mq_map[raw_smp_processor_id()]; - pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u\n", - __func__, req->in.h.opcode, req->in.h.unique, + pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u queue_id %u\n", + __func__, req->in.h.opcode, req->in.h.unique, req->in.h.nodeid, req->in.h.len, - fuse_len_args(req->args->out_numargs, req->args->out_args)); + fuse_len_args(req->args->out_numargs, req->args->out_args), + queue_id); fsvq = &fs->vqs[queue_id]; ret = virtio_fs_enqueue_req(fsvq, req, false); -- Gitee From db7344f7024c4b1a72b339b0255a9eae91fa44b0 Mon Sep 17 00:00:00 2001 From: Hongzhen Luo Date: Tue, 9 Apr 2024 19:30:22 +0800 Subject: [PATCH 0943/2138] erofs: derive fsid from on-disk UUID for .statfs() if possible ANBZ: #9245 commit 1872df8dcd87bd1e623e1a70076d08636b9c473d upstream Use the superblock's UUID to generate the fsid when it's non-null. Reviewed-by: Gao Xiang Reviewed-by: Jingbo Xu Signed-off-by: Hongzhen Luo Link: https://lore.kernel.org/r/20240409113022.74720-1-hongzhen@linux.alibaba.com Signed-off-by: Gao Xiang Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3287 --- fs/erofs/super.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 649f5a2d9a85..7eb7efb577e8 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -909,22 +909,20 @@ static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct erofs_sb_info *sbi = EROFS_SB(sb); - u64 id = 0; - - if (!erofs_is_fscache_mode(sb)) - id = huge_encode_dev(sb->s_bdev->bd_dev); buf->f_type = sb->s_magic; buf->f_bsize = sb->s_blocksize; buf->f_blocks = sbi->total_blocks; buf->f_bfree = buf->f_bavail = 0; - buf->f_files = ULLONG_MAX; buf->f_ffree = ULLONG_MAX - sbi->inos; - buf->f_namelen = EROFS_NAME_LEN; - buf->f_fsid = u64_to_fsid(id); + if (uuid_is_null(&sb->s_uuid)) + buf->f_fsid = u64_to_fsid(erofs_is_fscache_mode(sb) ? 0 : + huge_encode_dev(sb->s_bdev->bd_dev)); + else + buf->f_fsid = uuid_to_fsid(sb->s_uuid.b); return 0; } -- Gitee From 168fab5cf09a4d3d8cb387fa524cb89bec61d78a Mon Sep 17 00:00:00 2001 From: Juxin Gao Date: Tue, 28 May 2024 11:33:31 +0800 Subject: [PATCH 0944/2138] anolis: LoongArch: Fix secondary bridge routing errors ANBZ: #9241 Signed-off-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3280 --- drivers/irqchip/irq-loongson-pch-pic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c index 1f244e9de9be..79bc3d132657 100644 --- a/drivers/irqchip/irq-loongson-pch-pic.c +++ b/drivers/irqchip/irq-loongson-pch-pic.c @@ -268,7 +268,7 @@ static void pch_pic_reset(struct pch_pic *priv) for (i = 0; i < PIC_COUNT; i++) { /* Write vector ID */ - writeb(priv->ht_vec_base + i, priv->base + PCH_INT_HTVEC(hwirq_to_bit(priv, i))); + writeb(i, priv->base + PCH_INT_HTVEC(hwirq_to_bit(priv, i))); /* Hardcode route to HT0 Lo */ writeb(1, priv->base + PCH_INT_ROUTE(i)); } -- Gitee From 9ef2a85c8ae1abac0cc9d3edd9c562e09206bc1b Mon Sep 17 00:00:00 2001 From: hanliyang Date: Tue, 4 Jun 2024 15:15:28 +0800 Subject: [PATCH 0945/2138] anolis: KVM: SVM: Return 0 if Non-CSV2 virtual machine try to rd/wr MSR_AMD64_SEV_ES_GHCB from host-side ANBZ: #9278 state_test/smm_test selftests are failing on HYGON with: "KVM_GET_MSRS failed, r: 86 (failed on MSR c0010130)" In order to support live migration for Hygon CSV2 guest, the commit 54378dcde7a5 ("anolis: KVM: SVM: Export MSR_AMD64_SEV_ES_GHCB to userspace for CSV2 guest") support to emulate KVM_AMD64_SEV_ES_GHCB. But it doesn't handling the return code well if the virtual machine is not a CSV2 guest. Fixes: 54378dcde7a5 ("anolis: KVM: SVM: Export MSR_AMD64_SEV_ES_GHCB to userspace for CSV2 guest") Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3316 --- arch/x86/kvm/svm/svm.c | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 7c5a7bd1a7ff..3e0cfbd269e7 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2976,10 +2976,15 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) * Only support userspace get/set from/to * vmcb.control.ghcb_gpa */ - if (!msr_info->host_initiated || - !sev_es_guest(svm->vcpu.kvm)) + if (!msr_info->host_initiated) return 1; + /* Filling the data as 0 if it's not a Hygon CSV2 guest */ + if (!sev_es_guest(svm->vcpu.kvm)) { + msr_info->data = 0; + return 0; + } + msr_info->data = svm->vmcb->control.ghcb_gpa; /* Only set status bits when using GHCB page protocol */ @@ -3242,10 +3247,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) * Only support userspace get/set from/to * vmcb.control.ghcb_gpa */ - if (!msr->host_initiated || - !sev_es_guest(svm->vcpu.kvm)) + if (!msr->host_initiated) return 1; + /* + * Ignore write to this MSR if it's not a Hygon CSV2 + * guest. + */ + if (!sev_es_guest(svm->vcpu.kvm)) + return 0; + /* * Value 0 means uninitialized userspace MSR data, * userspace need get the initial MSR data afterwards. -- Gitee From 451230f72599e6a8200ac32aa82045f547210608 Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Tue, 21 May 2024 15:23:18 +0800 Subject: [PATCH 0946/2138] anolis: configs: define and add kconfig constraints ANBZ: #8598 This commit add the constraints for kconfig, it defines some rules that kconfig file must or should obey. Signed-off-by: Qiao Ma Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/3236 --- .../examination/L0-MANDATORY/arm64.config | 394 +++++++++++++++++ .../examination/L0-MANDATORY/loongarch.config | 348 +++++++++++++++ .../examination/L0-MANDATORY/sw_64.config | 343 +++++++++++++++ .../examination/L0-MANDATORY/x86.config | 397 ++++++++++++++++++ .../examination/L1-RECOMMEND/arm64.config | 139 ++++++ .../examination/L1-RECOMMEND/loongarch.config | 103 +++++ .../examination/L1-RECOMMEND/sw_64.config | 86 ++++ .../examination/L1-RECOMMEND/x86.config | 139 ++++++ anolis/configs/examination/README.md | 33 ++ 9 files changed, 1982 insertions(+) create mode 100644 anolis/configs/examination/L0-MANDATORY/arm64.config create mode 100644 anolis/configs/examination/L0-MANDATORY/loongarch.config create mode 100644 anolis/configs/examination/L0-MANDATORY/sw_64.config create mode 100644 anolis/configs/examination/L0-MANDATORY/x86.config create mode 100644 anolis/configs/examination/L1-RECOMMEND/arm64.config create mode 100644 anolis/configs/examination/L1-RECOMMEND/loongarch.config create mode 100644 anolis/configs/examination/L1-RECOMMEND/sw_64.config create mode 100644 anolis/configs/examination/L1-RECOMMEND/x86.config create mode 100644 anolis/configs/examination/README.md diff --git a/anolis/configs/examination/L0-MANDATORY/arm64.config b/anolis/configs/examination/L0-MANDATORY/arm64.config new file mode 100644 index 000000000000..eab0cbcff378 --- /dev/null +++ b/anolis/configs/examination/L0-MANDATORY/arm64.config @@ -0,0 +1,394 @@ +# UNLIMITED CONFIG_LSM +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +# CHOICE CONFIG_NODES_SHIFT 6/8/10 +# RANGE CONFIG_NR_CPUS 1024,8192 +CONFIG_64BIT=y +CONFIG_ACPI=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_NUMA=y +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_THERMAL=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_AIO=y +CONFIG_ARCH_HISI=y +CONFIG_ARM64=y +CONFIG_ARM64_CNP=y +CONFIG_ARM64_E0PD=y +CONFIG_ARM64_EPAN=y +CONFIG_ARM64_HW_AFDBM=y +CONFIG_ARM64_PAN=y +CONFIG_ARM64_PSEUDO_NMI=y +CONFIG_ARM64_RAS_EXTN=y +CONFIG_ARM64_SME=y +CONFIG_ARM64_SVE=y +CONFIG_ARM_CCN=y +CONFIG_ARM_GIC=y +CONFIG_ARM_GIC_V2M=y +CONFIG_ARM_GIC_V3=y +CONFIG_ARM_GIC_V3_ITS=y +CONFIG_ARM_GIC_V3_ITS_PCI=y +CONFIG_ARM_PMU=y +CONFIG_ARM_SMMU=y +CONFIG_ARM_SMMU_V3=y +CONFIG_ARM_SMMU_V3_PMU=m +CONFIG_ARM_SPE_PMU=m +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ATA=m +CONFIG_AUDIT=y +CONFIG_AUTOFS_FS=y +CONFIG_AUXILIARY_BUS=y +CONFIG_BINFMT_ELF=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BLK_CGROUP=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_DM=m +CONFIG_BLK_DEV_INITRD=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_BLK_DEV_NVME=m +CONFIG_BLK_DEV_SD=m +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLOCK=y +CONFIG_BONDING=m +CONFIG_BPF=y +CONFIG_BPF_JIT=y +CONFIG_BPF_SYSCALL=y +CONFIG_BRIDGE=m +CONFIG_BUG=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_CGROUPS=y +CONFIG_CGROUP_BPF=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_SCHED=y +CONFIG_COMMON_CLK=y +CONFIG_COMPACTION=y +CONFIG_COREDUMP=y +CONFIG_CPUSETS=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_IDLE=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_PM=y +CONFIG_CRASH_CORE=y +CONFIG_CRASH_DUMP=y +CONFIG_CRYPTO=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_SHA256=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SM2=y +CONFIG_CRYPTO_SM3=y +CONFIG_CRYPTO_SM3_GENERIC=y +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_SM4_GENERIC=m +CONFIG_DAX=y +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_BTF=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEVMEM=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_DMADEVICES=y +CONFIG_DMA_ENGINE=y +CONFIG_DMI=y +CONFIG_DNOTIFY=y +CONFIG_DNS_RESOLVER=m +CONFIG_DRM_HISI_HIBMC=m +CONFIG_DYNAMIC_FTRACE=y +CONFIG_EDAC=y +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_ELFCORE=y +CONFIG_ELF_CORE=y +CONFIG_EPOLL=y +CONFIG_EROFS_FS=m +CONFIG_ETHTOOL_NETLINK=y +CONFIG_EVENTFD=y +CONFIG_EVM=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_FANOTIFY=y +CONFIG_FAT_FS=m +CONFIG_FB=y +CONFIG_FB_EFI=y +CONFIG_FHANDLE=y +CONFIG_FILE_LOCKING=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FREEZER=y +CONFIG_FSNOTIFY=y +CONFIG_FS_DAX=y +CONFIG_FTRACE=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_FUSE_FS=m +CONFIG_FUTEX=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_IRQ_IPI=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GPIO_ACPI=y +CONFIG_GPIO_HISI=m +CONFIG_HARDLOCKUP_DETECTOR=y +CONFIG_HDMI=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_HISILICON_LPC=y +CONFIG_HISI_PCIE_PMU=m +CONFIG_HISI_PMU=m +CONFIG_HOTPLUG_CPU=y +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_HWMON=y +CONFIG_HW_RANDOM=y +CONFIG_I2C=y +CONFIG_IMA=y +CONFIG_INET=y +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INFINIBAND=m +CONFIG_INPUT=y +CONFIG_INPUT_KEYBOARD=y +CONFIG_INPUT_MOUSE=y +CONFIG_INTEGRITY=y +CONFIG_IOMMU_SUPPORT=y +CONFIG_IOSCHED_BFQ=y +CONFIG_IO_URING=y +CONFIG_IPC_NS=y +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_SI=m +CONFIG_IPV6=y +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_IRQ_MSI_IOMMU=y +CONFIG_ISO9660_FS=m +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KERNFS=y +CONFIG_KEXEC=y +CONFIG_KEXEC_CORE=y +CONFIG_KEXEC_FILE=y +CONFIG_KEYS=y +CONFIG_KPROBES=y +CONFIG_KRETPROBES=y +CONFIG_KVM=y +CONFIG_KVM_MMIO=y +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_LOCKUP_DETECTOR=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_MD=y +CONFIG_MEMBARRIER=y +CONFIG_MEMCG=y +CONFIG_MEMFD_CREATE=y +CONFIG_MEMORY_FAILURE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MIGRATION=y +CONFIG_MISC_FILESYSTEMS=y +CONFIG_MMU=y +CONFIG_MODULES=y +CONFIG_MODULE_SIG=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MULTIUSER=y +CONFIG_NAMESPACES=y +CONFIG_NET=y +CONFIG_NETDEVICES=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETLINK_DIAG=m +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NET_CLS=y +CONFIG_NET_CLS_ACT=y +CONFIG_NET_CORE=y +CONFIG_NET_NS=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_FQ_CODEL=y +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_TEAM=m +CONFIG_NFSD=m +CONFIG_NFSD_V4=y +CONFIG_NFS_COMMON=y +CONFIG_NFS_FS=m +CONFIG_NFS_FSCACHE=y +CONFIG_NFS_V4=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_NAT=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_IPV4=y +CONFIG_NF_TABLES_IPV6=y +CONFIG_NLS=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_UTF8=m +CONFIG_NO_HZ=y +CONFIG_NO_HZ_COMMON=y +CONFIG_NTFS3_FS=m +CONFIG_NUMA=y +CONFIG_NUMA_BALANCING=y +CONFIG_NVME_CORE=m +CONFIG_NVME_TARGET=m +CONFIG_OVERLAY_FS=m +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_PAGE_COUNTER=y +CONFIG_PANIC_ON_OOPS=y +CONFIG_PARAVIRT=y +CONFIG_PCI=y +CONFIG_PCIEAER=y +CONFIG_PCIEASPM=y +CONFIG_PCIEPORTBUS=y +CONFIG_PCIE_EDR=y +CONFIG_PCI_ATS=y +CONFIG_PCI_HISI=y +CONFIG_PCI_HOST_GENERIC=y +CONFIG_PCI_IOV=y +CONFIG_PCI_MSI=y +CONFIG_PERF_EVENTS=y +CONFIG_PID_NS=y +CONFIG_PM=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_VMCORE=y +CONFIG_PSI=y +CONFIG_PSTORE=y +CONFIG_PVPANIC=y +CONFIG_QUOTA=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_RAS=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RELOCATABLE=y +CONFIG_RPS=y +CONFIG_RTC_CLASS=y +CONFIG_SATA_AHCI=m +CONFIG_SCHED_MC=y +CONFIG_SCHED_SMT=y +CONFIG_SCSI=y +CONFIG_SCSI_HISI_SAS=m +CONFIG_SCSI_HISI_SAS_PCI=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_SECCOMP=y +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_PATH=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_CORE=y +CONFIG_SERIO=y +CONFIG_SHMEM=y +CONFIG_SIGNALFD=y +CONFIG_SLUB=y +CONFIG_SMC=m +CONFIG_SMP=y +CONFIG_SOFTLOCKUP_DETECTOR=y +CONFIG_SOFT_WATCHDOG=m +CONFIG_SPARSEMEM=y +CONFIG_SPI=y +CONFIG_SPI_HISI_KUNPENG=m +CONFIG_SQUASHFS=m +CONFIG_STACKPROTECTOR=y +CONFIG_STACKTRACE=y +CONFIG_SUNRPC=m +CONFIG_SWAP=y +CONFIG_SYN_COOKIES=y +CONFIG_SYSCTL=y +CONFIG_SYSFS=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_TAP=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_CUBIC=y +CONFIG_THERMAL=y +CONFIG_THREAD_INFO_IN_TASK=y +CONFIG_TIMERFD=y +CONFIG_TLS=m +CONFIG_TMPFS=y +CONFIG_TRACEPOINTS=y +CONFIG_TRACING=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TREE_RCU=y +CONFIG_TRUSTED_KEYS=y +CONFIG_TTY=y +CONFIG_TUN=m +CONFIG_UIO=m +CONFIG_UNIX=y +CONFIG_UPROBES=y +CONFIG_USB=y +CONFIG_USB_SUPPORT=y +CONFIG_USERFAULTFD=y +CONFIG_USER_NS=y +CONFIG_UTS_NS=y +CONFIG_VETH=m +CONFIG_VFAT_FS=m +CONFIG_VFIO=m +CONFIG_VFIO_PCI=m +CONFIG_VHOST=m +CONFIG_VHOST_NET=m +CONFIG_VHOST_VSOCK=m +CONFIG_VIRTIO=m +CONFIG_VIRTIO_FS=m +CONFIG_VIRTIO_MEM=m +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_NET=m +CONFIG_VIRTIO_PCI=m +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTUALIZATION=y +CONFIG_VLAN_8021Q=m +CONFIG_VMAP_STACK=y +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_VSOCKETS=m +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_VXLAN=m +CONFIG_WATCHDOG=y +CONFIG_XDP_SOCKETS=y +CONFIG_XFRM=y +CONFIG_XFRM_USER=y +CONFIG_XFS_FS=m +CONFIG_XPS=y +CONFIG_ZONE_DMA32=y +CONFIG_ZONE_DMA=y +CONFIG_ZRAM=m diff --git a/anolis/configs/examination/L0-MANDATORY/loongarch.config b/anolis/configs/examination/L0-MANDATORY/loongarch.config new file mode 100644 index 000000000000..7ffa35318f42 --- /dev/null +++ b/anolis/configs/examination/L0-MANDATORY/loongarch.config @@ -0,0 +1,348 @@ +# UNLIMITED CONFIG_LSM +## CONFIG_NFS_FSCACHE=y +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_NODES_SHIFT=6 +CONFIG_NR_CPUS=256 +CONFIG_64BIT=y +CONFIG_ACPI=y +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_NUMA=y +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_THERMAL=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_AIO=y +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ATA=y +CONFIG_AUDIT=y +CONFIG_AUTOFS_FS=y +CONFIG_AUXILIARY_BUS=y +CONFIG_BINFMT_ELF=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BLK_CGROUP=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_DM=m +CONFIG_BLK_DEV_INITRD=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_BLK_DEV_NVME=m +CONFIG_BLK_DEV_SD=m +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLOCK=y +CONFIG_BONDING=m +CONFIG_BPF=y +CONFIG_BPF_JIT=y +CONFIG_BPF_SYSCALL=y +CONFIG_BRIDGE=m +CONFIG_BUG=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_CGROUPS=y +CONFIG_CGROUP_BPF=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_SCHED=y +CONFIG_COMMON_CLK=y +CONFIG_COMPACTION=y +CONFIG_COREDUMP=y +CONFIG_CPUSETS=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_PM=y +CONFIG_CRASH_CORE=y +CONFIG_CRASH_DUMP=y +CONFIG_CRYPTO=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_SHA256=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SM2=y +CONFIG_CRYPTO_SM3=y +CONFIG_CRYPTO_SM3_GENERIC=y +CONFIG_CRYPTO_SM4=y +CONFIG_CRYPTO_SM4_GENERIC=y +CONFIG_DAX=y +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_BTF=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEVMEM=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_DMADEVICES=y +CONFIG_DMA_ENGINE=y +CONFIG_DMI=y +CONFIG_DNOTIFY=y +CONFIG_DNS_RESOLVER=y +CONFIG_DYNAMIC_FTRACE=y +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_ELFCORE=y +CONFIG_ELF_CORE=y +CONFIG_EPOLL=y +CONFIG_EROFS_FS=m +CONFIG_ETHTOOL_NETLINK=y +CONFIG_EVENTFD=y +CONFIG_EVM=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_FANOTIFY=y +CONFIG_FAT_FS=m +CONFIG_FB=y +CONFIG_FB_EFI=y +CONFIG_FHANDLE=y +CONFIG_FILE_LOCKING=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FREEZER=y +CONFIG_FSNOTIFY=y +CONFIG_FTRACE=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_FUSE_FS=m +CONFIG_FUTEX=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GPIO_ACPI=y +CONFIG_HARDLOCKUP_DETECTOR=y +CONFIG_HDMI=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_HOTPLUG_CPU=y +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_HWMON=y +CONFIG_HW_RANDOM=y +CONFIG_I2C=y +CONFIG_IMA=y +CONFIG_INET=y +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INFINIBAND=m +CONFIG_INPUT=y +CONFIG_INPUT_KEYBOARD=y +CONFIG_INPUT_MOUSE=y +CONFIG_INTEGRITY=y +CONFIG_IOMMU_SUPPORT=y +CONFIG_IOSCHED_BFQ=y +CONFIG_IO_URING=y +CONFIG_IPC_NS=y +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_SI=m +CONFIG_IPV6=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_ISO9660_FS=m +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KERNFS=y +CONFIG_KEXEC=y +CONFIG_KEXEC_CORE=y +CONFIG_KEYS=y +CONFIG_KPROBES=y +CONFIG_KRETPROBES=y +CONFIG_KVM=y +CONFIG_KVM_MMIO=y +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_LOCKUP_DETECTOR=y +CONFIG_LOONGARCH=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_MD=y +CONFIG_MEMBARRIER=y +CONFIG_MEMCG=y +CONFIG_MEMFD_CREATE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MIGRATION=y +CONFIG_MISC_FILESYSTEMS=y +CONFIG_MMU=y +CONFIG_MODULES=y +CONFIG_MODULE_SIG=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MULTIUSER=y +CONFIG_NAMESPACES=y +CONFIG_NET=y +CONFIG_NETDEVICES=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETLINK_DIAG=m +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NET_CLS=y +CONFIG_NET_CLS_ACT=y +CONFIG_NET_CORE=y +CONFIG_NET_NS=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_FQ_CODEL=y +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_TEAM=m +CONFIG_NFSD=y +CONFIG_NFSD_V4=y +CONFIG_NFS_COMMON=y +CONFIG_NFS_FS=y +CONFIG_NFS_V4=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_NAT=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_IPV4=y +CONFIG_NF_TABLES_IPV6=y +CONFIG_NLS=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_UTF8=y +CONFIG_NO_HZ=y +CONFIG_NO_HZ_COMMON=y +CONFIG_NTFS3_FS=m +CONFIG_NUMA=y +CONFIG_NUMA_BALANCING=y +CONFIG_NVME_CORE=m +CONFIG_NVME_TARGET=m +CONFIG_OVERLAY_FS=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_PAGE_COUNTER=y +CONFIG_PANIC_ON_OOPS=y +CONFIG_PCI=y +CONFIG_PCIEAER=y +CONFIG_PCIEASPM=y +CONFIG_PCIEPORTBUS=y +CONFIG_PCI_ATS=y +CONFIG_PCI_IOV=y +CONFIG_PCI_MSI=y +CONFIG_PERF_EVENTS=y +CONFIG_PID_NS=y +CONFIG_PM=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_VMCORE=y +CONFIG_PSI=y +CONFIG_PSTORE=m +CONFIG_PVPANIC=y +CONFIG_QUOTA=y +CONFIG_RAS=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RELOCATABLE=y +CONFIG_RPS=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_SYSTOHC=y +CONFIG_SATA_AHCI=y +CONFIG_SCHED_SMT=y +CONFIG_SCSI=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SECCOMP=y +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_PATH=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_CORE=y +CONFIG_SERIO=y +CONFIG_SHMEM=y +CONFIG_SIGNALFD=y +CONFIG_SLUB=y +CONFIG_SMC=m +CONFIG_SMP=y +CONFIG_SOFTLOCKUP_DETECTOR=y +CONFIG_SOFT_WATCHDOG=m +CONFIG_SPARSEMEM=y +CONFIG_SPI=y +CONFIG_SQUASHFS=m +CONFIG_STACKPROTECTOR=y +CONFIG_STACKTRACE=y +CONFIG_SUNRPC=y +CONFIG_SWAP=y +CONFIG_SYN_COOKIES=y +CONFIG_SYSCTL=y +CONFIG_SYSFS=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_TAP=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_CUBIC=m +CONFIG_THERMAL=y +CONFIG_TIMERFD=y +CONFIG_TLS=m +CONFIG_TMPFS=y +CONFIG_TRACEPOINTS=y +CONFIG_TRACING=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TREE_RCU=y +CONFIG_TRUSTED_KEYS=y +CONFIG_TTY=y +CONFIG_TUN=m +CONFIG_UIO=m +CONFIG_UNIX=y +CONFIG_UNWINDER_PROLOGUE=y +CONFIG_UPROBES=y +CONFIG_USB=y +CONFIG_USB_SUPPORT=y +CONFIG_USERFAULTFD=y +CONFIG_USER_NS=y +CONFIG_UTS_NS=y +CONFIG_VETH=m +CONFIG_VFAT_FS=m +CONFIG_VFIO=m +CONFIG_VFIO_PCI=m +CONFIG_VHOST=m +CONFIG_VHOST_NET=m +CONFIG_VHOST_VSOCK=m +CONFIG_VIRTIO=y +CONFIG_VIRTIO_FS=m +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_NET=m +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTUALIZATION=y +CONFIG_VLAN_8021Q=m +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_VSOCKETS=m +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_VXLAN=m +CONFIG_WATCHDOG=y +CONFIG_XDP_SOCKETS=y +CONFIG_XFRM=y +CONFIG_XFRM_USER=y +CONFIG_XFS_FS=y +CONFIG_XPS=y +CONFIG_ZONE_DMA32=y +CONFIG_ZRAM=m diff --git a/anolis/configs/examination/L0-MANDATORY/sw_64.config b/anolis/configs/examination/L0-MANDATORY/sw_64.config new file mode 100644 index 000000000000..62c44debb191 --- /dev/null +++ b/anolis/configs/examination/L0-MANDATORY/sw_64.config @@ -0,0 +1,343 @@ +# UNLIMITED CONFIG_LSM +CONFIG_NODES_SHIFT=7 +CONFIG_NR_CPUS=512 +CONFIG SW64=y +CONFIG_64BIT=y +CONFIG_ACPI=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_NUMA=y +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_AIO=y +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ATA=y +CONFIG_AUDIT=y +CONFIG_AUTOFS_FS=y +CONFIG_AUXILIARY_BUS=y +CONFIG_BINFMT_ELF=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BLK_CGROUP=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_DM=m +CONFIG_BLK_DEV_INITRD=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_BLK_DEV_NVME=m +CONFIG_BLK_DEV_SD=y +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLOCK=y +CONFIG_BONDING=m +CONFIG_BPF=y +CONFIG_BPF_JIT=y +CONFIG_BPF_SYSCALL=y +CONFIG_BRIDGE=m +CONFIG_BUG=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_CGROUPS=y +CONFIG_CGROUP_BPF=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_SCHED=y +CONFIG_COMMON_CLK=y +CONFIG_COMPACTION=y +CONFIG_COREDUMP=y +CONFIG_CPUSETS=y +CONFIG_CPU_FREQ=y +CONFIG_CRASH_CORE=y +CONFIG_CRYPTO=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_SHA256=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SM2=y +CONFIG_CRYPTO_SM3=y +CONFIG_CRYPTO_SM3_GENERIC=y +CONFIG_CRYPTO_SM4=y +CONFIG_CRYPTO_SM4_GENERIC=y +CONFIG_DAX=y +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DEEP_MEMCPY=y +CONFIG_DEEP_MEMSET=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEVMEM=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_DMADEVICES=y +CONFIG_DMA_ENGINE=y +CONFIG_DMI=y +CONFIG_DNOTIFY=y +CONFIG_DNS_RESOLVER=m +CONFIG_DYNAMIC_FTRACE=y +CONFIG_EFI=y +CONFIG_ELFCORE=y +CONFIG_ELF_CORE=y +CONFIG_EPOLL=y +CONFIG_EROFS_FS=m +CONFIG_ETHTOOL_NETLINK=y +CONFIG_EVENTFD=y +CONFIG_EVM=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_FANOTIFY=y +CONFIG_FAT_FS=m +CONFIG_FB=y +CONFIG_FB_EFI=y +CONFIG_FHANDLE=y +CONFIG_FILE_LOCKING=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FREEZER=y +CONFIG_FSNOTIFY=y +CONFIG_FTRACE=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_FUSE_FS=m +CONFIG_FUTEX=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GPIO_ACPI=y +CONFIG_HARDLOCKUP_DETECTOR=y +CONFIG_HDMI=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_HOTPLUG_CPU=y +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_HWMON=y +CONFIG_HW_RANDOM=y +CONFIG_I2C=y +CONFIG_IMA=y +CONFIG_INET=y +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INFINIBAND=m +CONFIG_INPUT=y +CONFIG_INPUT_KEYBOARD=y +CONFIG_INPUT_MOUSE=y +CONFIG_INTEGRITY=y +CONFIG_IOMMU_SUPPORT=y +CONFIG_IOSCHED_BFQ=y +CONFIG_IO_URING=y +CONFIG_IPC_NS=y +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_SI=m +CONFIG_IPV6=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_IRQ_MSI_IOMMU=y +CONFIG_ISO9660_FS=m +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KERNFS=y +CONFIG_KEXEC=y +CONFIG_KEXEC_CORE=y +CONFIG_KEYS=y +CONFIG_KPROBES=y +CONFIG_KRETPROBES=y +CONFIG_KVM=y +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_LOONGARCH=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_MD=y +CONFIG_MEMBARRIER=y +CONFIG_MEMCG=y +CONFIG_MEMFD_CREATE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MIGRATION=y +CONFIG_MISC_FILESYSTEMS=y +CONFIG_MMU=y +CONFIG_MODULES=y +CONFIG_MODULE_SIG=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MULTIUSER=y +CONFIG_NAMESPACES=y +CONFIG_NET=y +CONFIG_NETDEVICES=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETLINK_DIAG=m +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NET_CLS=y +CONFIG_NET_CLS_ACT=y +CONFIG_NET_CORE=y +CONFIG_NET_NS=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_FQ_CODEL=y +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_TEAM=m +CONFIG_NFSD=m +CONFIG_NFSD_V4=y +CONFIG_NFS_COMMON=y +CONFIG_NFS_FS=m +CONFIG_NFS_FSCACHE=y +CONFIG_NFS_V4=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_NAT=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_IPV4=y +CONFIG_NF_TABLES_IPV6=y +CONFIG_NLS=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_UTF8=m +CONFIG_NONCACHE_PAGE=y +CONFIG_NO_HZ=y +CONFIG_NO_HZ_COMMON=y +CONFIG_NUMA=y +CONFIG_NUMA_BALANCING=y +CONFIG_NVME_CORE=y +CONFIG_NVME_TARGET=y +CONFIG_OVERLAY_FS=m +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_PAGE_COUNTER=y +CONFIG_PANIC_ON_OOPS=y +CONFIG_PCI=y +CONFIG_PCIEAER=y +CONFIG_PCIEASPM=y +CONFIG_PCIEPORTBUS=y +CONFIG_PCIE_EDR=y +CONFIG_PCI_ATS=y +CONFIG_PCI_IOV=y +CONFIG_PCI_MSI=y +CONFIG_PERF_EVENTS=y +CONFIG_PID_NS=y +CONFIG_PM=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_VMCORE=y +CONFIG_PSI=y +CONFIG_PSTORE=m +CONFIG_PVPANIC=y +CONFIG_QUOTA=y +CONFIG_RAS=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RELOCATABLE=y +CONFIG_RPS=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_SYSTOHC=y +CONFIG_SATA_AHCI=y +CONFIG_SCHED_SMT=y +CONFIG_SCSI=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SECCOMP=y +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_PATH=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_CORE=y +CONFIG_SERIO=y +CONFIG_SHMEM=y +CONFIG_SIGNALFD=y +CONFIG_SLUB=y +CONFIG_SMC=m +CONFIG_SMP=y +CONFIG_SOFTLOCKUP_DETECTOR=y +CONFIG_SOFT_WATCHDOG=m +CONFIG_SPARSEMEM=y +CONFIG_SPI=y +CONFIG_SQUASHFS=m +CONFIG_STACKTRACE=y +CONFIG_SUNRPC=m +CONFIG_SW64_CHIP3=y +CONFIG_SW64_CPUAUTOPLUG=y +CONFIG_SW64_CPUFREQ=y +CONFIG_SWAP=y +CONFIG_SYN_COOKIES=y +CONFIG_SYSCTL=y +CONFIG_SYSFS=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_TAP=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_CUBIC=m +CONFIG_THERMAL=y +CONFIG_TIMERFD=y +CONFIG_TLS=m +CONFIG_TMPFS=y +CONFIG_TRACEPOINTS=y +CONFIG_TRACING=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TREE_RCU=y +CONFIG_TRUSTED_KEYS=y +CONFIG_TTY=y +CONFIG_TUN=y +CONFIG_UIO=m +CONFIG_UNIX=y +CONFIG_UPROBES=y +CONFIG_USB=y +CONFIG_USB_SUPPORT=y +CONFIG_USERFAULTFD=y +CONFIG_USER_NS=y +CONFIG_UTS_NS=y +CONFIG_VETH=m +CONFIG_VFAT_FS=m +CONFIG_VFIO=m +CONFIG_VFIO_PCI=m +CONFIG_VHOST=m +CONFIG_VHOST_NET=m +CONFIG_VHOST_VSOCK=m +CONFIG_VIRTIO=y +CONFIG_VIRTIO_FS=m +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_NET=m +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTUALIZATION=y +CONFIG_VLAN_8021Q=m +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_VSOCKETS=m +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_VXLAN=m +CONFIG_WATCHDOG=y +CONFIG_XDP_SOCKETS=y +CONFIG_XFRM=y +CONFIG_XFRM_USER=y +CONFIG_XFS_FS=y +CONFIG_XPS=y +CONFIG_ZONE_DMA32=y +CONFIG_ZRAM=m diff --git a/anolis/configs/examination/L0-MANDATORY/x86.config b/anolis/configs/examination/L0-MANDATORY/x86.config new file mode 100644 index 000000000000..2025290f6549 --- /dev/null +++ b/anolis/configs/examination/L0-MANDATORY/x86.config @@ -0,0 +1,397 @@ +# UNLIMITED CONFIG_LSM +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +# CHOICE CONFIG_NODES_SHIFT 6/8/10 +# RANGE CONFIG_NR_CPUS 1024,8192 + +## CONFIG_SPECULATION_MITIGATIONS has been renamed to CONFIG_CPU_MITIGATIONS on linux stable linux-6.6.y, +## so make these two configs exclusive. +# EXCLUSIVE y CONFIG_SPECULATION_MITIGATIONS CONFIG_CPU_MITIGATIONS + +CONFIG_64BIT=y +CONFIG_ACPI=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_NUMA=y +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_THERMAL=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_AIO=y +CONFIG_AMD_MEM_ENCRYPT=y +CONFIG_ARCH_CPUIDLE_HALTPOLL=y +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ATA=m +CONFIG_AUDIT=y +CONFIG_AUTOFS_FS=y +CONFIG_AUXILIARY_BUS=y +CONFIG_BINFMT_ELF=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BLK_CGROUP=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_DM=m +CONFIG_BLK_DEV_INITRD=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_BLK_DEV_NVME=m +CONFIG_BLK_DEV_SD=m +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLOCK=y +CONFIG_BONDING=m +CONFIG_BPF=y +CONFIG_BPF_JIT=y +CONFIG_BPF_SYSCALL=y +CONFIG_BRIDGE=m +CONFIG_BUG=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_CGROUPS=y +CONFIG_CGROUP_BPF=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_SCHED=y +CONFIG_COMMON_CLK=y +CONFIG_COMPACTION=y +CONFIG_COREDUMP=y +CONFIG_CPUSETS=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_IDLE=y +CONFIG_CPU_SUP_AMD=y +CONFIG_CPU_SUP_CENTAUR=y +CONFIG_CPU_SUP_HYGON=y +CONFIG_CPU_SUP_INTEL=y +CONFIG_CPU_SUP_ZHAOXIN=y +CONFIG_CRASH_CORE=y +CONFIG_CRASH_DUMP=y +CONFIG_CRYPTO=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_SHA256=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SIMD=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SM2=y +CONFIG_CRYPTO_SM3=y +CONFIG_CRYPTO_SM3_GENERIC=y +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_SM4_GENERIC=m +CONFIG_DAX=y +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_BTF=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEVMEM=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_DMADEVICES=y +CONFIG_DMA_ENGINE=y +CONFIG_DMI=y +CONFIG_DNOTIFY=y +CONFIG_DNS_RESOLVER=m +CONFIG_DYNAMIC_FTRACE=y +CONFIG_EDAC=y +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_ELFCORE=y +CONFIG_ELF_CORE=y +CONFIG_EPOLL=y +CONFIG_EROFS_FS=m +CONFIG_ETHTOOL_NETLINK=y +CONFIG_EVENTFD=y +CONFIG_EVM=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_FANOTIFY=y +CONFIG_FAT_FS=m +CONFIG_FB=y +CONFIG_FB_EFI=y +CONFIG_FHANDLE=y +CONFIG_FILE_LOCKING=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FREEZER=y +CONFIG_FSNOTIFY=y +CONFIG_FS_DAX=y +CONFIG_FTRACE=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_FUSE_FS=m +CONFIG_FUTEX=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GPIO_ACPI=y +CONFIG_HARDLOCKUP_DETECTOR=y +CONFIG_HDMI=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_HOTPLUG_CPU=y +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_HPET_TIMER=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_HWMON=y +CONFIG_HW_RANDOM=y +CONFIG_HYPERVISOR_GUEST=y +CONFIG_I2C=y +CONFIG_IA32_FEAT_CTL=y +CONFIG_IMA=y +CONFIG_INET=y +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INFINIBAND=m +CONFIG_INPUT=y +CONFIG_INPUT_KEYBOARD=y +CONFIG_INPUT_MOUSE=y +CONFIG_INSTRUCTION_DECODER=y +CONFIG_INTEGRITY=y +CONFIG_INTEL_IOMMU=y +CONFIG_IOMMU_SUPPORT=y +CONFIG_IOSCHED_BFQ=y +CONFIG_IO_URING=y +CONFIG_IPC_NS=y +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_SI=m +CONFIG_IPV6=y +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_IRQ_MSI_IOMMU=y +CONFIG_IRQ_REMAP=y +CONFIG_ISO9660_FS=m +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_KERNFS=y +CONFIG_KEXEC=y +CONFIG_KEXEC_CORE=y +CONFIG_KEXEC_FILE=y +CONFIG_KEYS=y +CONFIG_KPROBES=y +CONFIG_KRETPROBES=y +CONFIG_KVM=m +CONFIG_KVM_AMD=m +CONFIG_KVM_GUEST=y +CONFIG_KVM_INTEL=m +CONFIG_KVM_MMIO=y +CONFIG_LIVEPATCH=y +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_LOCKUP_DETECTOR=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_MD=y +CONFIG_MEMBARRIER=y +CONFIG_MEMCG=y +CONFIG_MEMFD_CREATE=y +CONFIG_MEMORY_FAILURE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MICROCODE=y +CONFIG_MIGRATION=y +CONFIG_MISC_FILESYSTEMS=y +CONFIG_MMU=y +CONFIG_MODULES=y +CONFIG_MODULE_SIG=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MULTIUSER=y +CONFIG_NAMESPACES=y +CONFIG_NET=y +CONFIG_NETDEVICES=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETLINK_DIAG=m +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NET_CLS=y +CONFIG_NET_CLS_ACT=y +CONFIG_NET_CORE=y +CONFIG_NET_NS=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_FQ_CODEL=y +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_TEAM=m +CONFIG_NFSD=m +CONFIG_NFSD_V4=y +CONFIG_NFS_COMMON=y +CONFIG_NFS_FS=m +CONFIG_NFS_FSCACHE=y +CONFIG_NFS_V4=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_NAT=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_IPV4=y +CONFIG_NF_TABLES_IPV6=y +CONFIG_NLS=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_UTF8=m +CONFIG_NO_HZ=y +CONFIG_NO_HZ_COMMON=y +CONFIG_NTFS3_FS=m +CONFIG_NUMA=y +CONFIG_NUMA_BALANCING=y +CONFIG_NVME_CORE=m +CONFIG_NVME_TARGET=m +CONFIG_OVERLAY_FS=m +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_PAGE_COUNTER=y +CONFIG_PANIC_ON_OOPS=y +CONFIG_PARAVIRT=y +CONFIG_PARAVIRT_CLOCK=y +CONFIG_PCI=y +CONFIG_PCIEAER=y +CONFIG_PCIEASPM=y +CONFIG_PCIEPORTBUS=y +CONFIG_PCIE_EDR=y +CONFIG_PCI_ATS=y +CONFIG_PCI_IOV=y +CONFIG_PCI_MSI=y +CONFIG_PERF_EVENTS=y +CONFIG_PID_NS=y +CONFIG_PM=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_VMCORE=y +CONFIG_PSI=y +CONFIG_PSTORE=y +CONFIG_PVPANIC=y +CONFIG_QUOTA=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_RAS=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RELOCATABLE=y +CONFIG_RPS=y +CONFIG_RTC_CLASS=y +CONFIG_SATA_AHCI=m +CONFIG_SCHED_MC=y +CONFIG_SCHED_SMT=y +CONFIG_SCSI=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SECCOMP=y +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_PATH=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_CORE=y +CONFIG_SERIO=y +CONFIG_SHMEM=y +CONFIG_SIGNALFD=y +CONFIG_SLUB=y +CONFIG_SMC=m +CONFIG_SMP=y +CONFIG_SOFTLOCKUP_DETECTOR=y +CONFIG_SOFT_WATCHDOG=m +CONFIG_SPARSEMEM=y +CONFIG_SPI=y +CONFIG_SQUASHFS=m +CONFIG_STACKPROTECTOR=y +CONFIG_STACKTRACE=y +CONFIG_SUNRPC=m +CONFIG_SWAP=y +CONFIG_SYN_COOKIES=y +CONFIG_SYSCTL=y +CONFIG_SYSFS=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_TAP=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_CUBIC=y +CONFIG_THERMAL=y +CONFIG_THREAD_INFO_IN_TASK=y +CONFIG_TIMERFD=y +CONFIG_TLS=m +CONFIG_TMPFS=y +CONFIG_TRACEPOINTS=y +CONFIG_TRACING=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TREE_RCU=y +CONFIG_TRUSTED_KEYS=y +CONFIG_TTY=y +CONFIG_TUN=m +CONFIG_UIO=m +CONFIG_UNIX=y +CONFIG_UPROBES=y +CONFIG_USB=y +CONFIG_USB_SUPPORT=y +CONFIG_USERFAULTFD=y +CONFIG_USER_NS=y +CONFIG_UTS_NS=y +CONFIG_VETH=m +CONFIG_VFAT_FS=m +CONFIG_VFIO=m +CONFIG_VFIO_PCI=m +CONFIG_VGA_CONSOLE=y +CONFIG_VHOST=m +CONFIG_VHOST_NET=m +CONFIG_VHOST_VSOCK=m +CONFIG_VIRTIO=y +CONFIG_VIRTIO_FS=m +CONFIG_VIRTIO_MEM=m +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_NET=m +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTUALIZATION=y +CONFIG_VLAN_8021Q=m +CONFIG_VMAP_STACK=y +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_VSOCKETS=m +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_VXLAN=m +CONFIG_WATCHDOG=y +CONFIG_X86=y +CONFIG_X86_64=y +CONFIG_X86_64_SMP=y +CONFIG_X86_CPUID=y +CONFIG_X86_IO_APIC=y +CONFIG_X86_LOCAL_APIC=y +CONFIG_X86_MCE=y +CONFIG_X86_MCE_INTEL=y +CONFIG_X86_MSR=y +CONFIG_X86_SGX=y +CONFIG_X86_TSC=y +CONFIG_X86_X2APIC=y +CONFIG_XDP_SOCKETS=y +CONFIG_XFRM=y +CONFIG_XFRM_USER=y +CONFIG_XFS_FS=m +CONFIG_XPS=y +CONFIG_ZONE_DMA32=y +CONFIG_ZONE_DMA=y +CONFIG_ZRAM=m diff --git a/anolis/configs/examination/L1-RECOMMEND/arm64.config b/anolis/configs/examination/L1-RECOMMEND/arm64.config new file mode 100644 index 000000000000..7146c58229ef --- /dev/null +++ b/anolis/configs/examination/L1-RECOMMEND/arm64.config @@ -0,0 +1,139 @@ +# UNLIMITED CONFIG_BUILD_SALT +# CHOICE CONFIG_HZ 100/250/1000 +# CONFIG_ARM64_64K_PAGES is not set +# EXCLUSIVE y CONFIG_ARM64_4K_PAGES CONFIG_ARM64_64K_PAGES +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_LOG_BUF_SHIFT=20 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PGTABLE_LEVELS=4 +# UNLIMITED CONFIG_SYSTEM_TRUSTED_KEYS +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +CONFIG_ACPI_APEI_SEA=y +CONFIG_ACPI_CPPC_CPUFREQ=m +CONFIG_ACPI_HMAT=y +CONFIG_ARCH_PHYTIUM=y +CONFIG_ARM64_AMU_EXTN=y +CONFIG_ARM64_LSE_ATOMICS=y +CONFIG_ARM64_PA_BITS_48=y +CONFIG_ARM64_PMEM=y +CONFIG_ARM64_VA_BITS_48=y +CONFIG_ARM_GIC_PHYTIUM_2500=y +CONFIG_ARM_SMMU_V3_PMU=m +CONFIG_ARM_SMMU_V3_SVA=y +CONFIG_BASE_FULL=y +CONFIG_BLK_PM=y +CONFIG_BNX2=m +CONFIG_BNX2X=m +CONFIG_BNXT=m +CONFIG_BTRFS_FS=m +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_CHELSIO_T4=m +CONFIG_CIFS=m +CONFIG_CONFIGFS_FS=y +CONFIG_CORESIGHT=m +CONFIG_CRC16=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_DEV_HISI_HPRE=m +CONFIG_CRYPTO_DEV_HISI_QM=m +CONFIG_CRYPTO_DEV_HISI_SEC2=m +CONFIG_CRYPTO_DEV_HISI_SEC=m +CONFIG_CRYPTO_DEV_HISI_TRNG=m +CONFIG_CRYPTO_DEV_HISI_ZIP=m +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_LZO=y +CONFIG_CRYPTO_SM3_ARM64_CE=m +CONFIG_CUSE=m +CONFIG_CXL_BUS=m +CONFIG_CXL_PCI=m +CONFIG_DAMON=y +CONFIG_DRM=m +CONFIG_DRM_PHYTIUM=m +CONFIG_E1000=m +CONFIG_EXT3_FS=m +CONFIG_EXT4_FS=m +CONFIG_FCOE=m +CONFIG_FSCACHE=m +CONFIG_GENERIC_PHY=y +CONFIG_HINIC=m +CONFIG_HISI_THERMAL=m +CONFIG_HNS3=m +CONFIG_HNS=m +CONFIG_I2C_HISI=m +CONFIG_I40E=m +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_IGB=m +CONFIG_INET_MPTCP_DIAG=m +CONFIG_INFINIBAND_HNS=m +CONFIG_INFINIBAND_HNS_HIP08=y +CONFIG_IO_STRICT_DEVMEM=y +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_RR=m +CONFIG_IXGBE=m +CONFIG_IXGBEVF=m +CONFIG_JBD2=m +CONFIG_KSM=y +CONFIG_KUNPENG_HCCS=m +CONFIG_LIVEPATCH=y +CONFIG_MACVLAN=m +CONFIG_MEGARAID_SAS=m +CONFIG_MEMCG_KMEM=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_EN=m +CONFIG_MLX5_CORE=m +CONFIG_MLX5_CORE_EN=y +CONFIG_MPTCP=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_MTD=m +CONFIG_NETFILTER_XTABLES=y +CONFIG_NET_ACT_GACT=m +CONFIG_NET_ACT_POLICE=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_V3=m +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NGBE=m +CONFIG_NO_HZ_FULL=y +CONFIG_NTB=m +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_TCP=m +CONFIG_PROC_PID_CPUSET=y +CONFIG_PROFILING=y +CONFIG_RATIONAL=y +CONFIG_RESET_HISI=y +CONFIG_RSEQ=y +CONFIG_RTC_INTF_DEV=y +CONFIG_RTC_SYSTOHC=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_SCHED_CLUSTER=y +CONFIG_SCSI_HISI_SAS=m +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_SPI_HISI_SFC_V3XX=m +CONFIG_SPI_MASTER=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_TASKSTATS=y +CONFIG_TCP_CONG_BBR=m +CONFIG_TXGBE=m +CONFIG_UACCE=m +CONFIG_USB_ACM=m +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_PCI=y +CONFIG_USB_STORAGE=m +CONFIG_USB_XHCI_HCD=y +CONFIG_VFIO_PLATFORM=m +CONFIG_VIRTIO_BLK=m +CONFIG_VIRTIO_CONSOLE=m +CONFIG_VIRTIO_MMIO=m +CONFIG_VIRT_CPU_ACCOUNTING=y +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_ZSMALLOC=y diff --git a/anolis/configs/examination/L1-RECOMMEND/loongarch.config b/anolis/configs/examination/L1-RECOMMEND/loongarch.config new file mode 100644 index 000000000000..d842bed7dca5 --- /dev/null +++ b/anolis/configs/examination/L1-RECOMMEND/loongarch.config @@ -0,0 +1,103 @@ +CONFIG_ARCH_FORCE_MAX_ORDER=11 +# UNLIMITED CONFIG_BUILD_SALT +# UNLIMITED CONFIG_EXT3_FS +CONFIG_HZ=250 +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PGTABLE_LEVELS=3 +# UNLIMITED CONFIG_RANDOMIZE_BASE +# UNLIMITED CONFIG_SYSTEM_TRUSTED_KEYS +# EXCLUSIVE y CONFIG_16KB_2LEVEL CONFIG_16KB_3LEVEL CONFIG_4KB_3LEVEL CONFIG_4KB_4LEVEL CONFIG_64KB_2LEVEL CONFIG_64KB_3LEVEL +CONFIG_ARCH_IOREMAP=y +CONFIG_ARCH_STRICT_ALIGN=y +CONFIG_ARCH_WRITECOMBINE=y +CONFIG_BASE_FULL=y +CONFIG_BLK_PM=y +CONFIG_BNX2=y +CONFIG_BNX2X=m +CONFIG_BNXT=m +CONFIG_BTRFS_FS=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_CHELSIO_T4=m +CONFIG_CIFS=m +CONFIG_CONFIGFS_FS=y +CONFIG_CPU_HAS_LASX=y +CONFIG_CPU_HAS_LBT=y +CONFIG_CPU_HAS_LSX=y +CONFIG_CRC16=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRC32_LOONGARCH=m +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_LZO=m +CONFIG_CUSE=m +CONFIG_DRM=y +CONFIG_DRM_LOONGSON=y +CONFIG_DWMAC_LOONGSON=m +CONFIG_E1000=m +CONFIG_EXT4_FS=y +CONFIG_FB_LS2K500=m +CONFIG_FCOE=m +CONFIG_FSCACHE=m +CONFIG_GPIO_LOONGSON_64BIT=y +CONFIG_I2C_LS2X=m +CONFIG_I40E=m +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_IGB=m +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_RR=m +CONFIG_IXGBE=m +CONFIG_IXGBEVF=m +CONFIG_JBD2=y +CONFIG_KSM=y +CONFIG_LIVEPATCH=y +CONFIG_MACVLAN=m +CONFIG_MEGARAID_SAS=m +CONFIG_MEMCG_KMEM=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_EN=m +CONFIG_MLX5_CORE=m +CONFIG_MLX5_CORE_EN=y +CONFIG_MPTCP=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_MTD=m +CONFIG_NETFILTER_XTABLES=y +CONFIG_NET_ACT_GACT=m +CONFIG_NET_ACT_POLICE=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFS_ACL_SUPPORT=y +CONFIG_NFS_V3=m +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NGBE=m +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_TCP=m +CONFIG_PROC_PID_CPUSET=y +CONFIG_PROFILING=y +CONFIG_RATIONAL=y +CONFIG_RSEQ=y +CONFIG_RTC_DRV_LOONGSON=y +CONFIG_RTC_INTF_DEV=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_MPT3SAS=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_SPI_LOONGSON_PCI=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_TASKSTATS=y +CONFIG_TCP_CONG_BBR=m +CONFIG_TXGBE=m +CONFIG_USB_ACM=m +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_PCI=y +CONFIG_USB_STORAGE=m +CONFIG_USB_XHCI_HCD=y +CONFIG_VIRTIO_BLK=m +CONFIG_VIRTIO_CONSOLE=y +CONFIG_VIRTIO_MMIO=m +CONFIG_ZSMALLOC=y diff --git a/anolis/configs/examination/L1-RECOMMEND/sw_64.config b/anolis/configs/examination/L1-RECOMMEND/sw_64.config new file mode 100644 index 000000000000..09b5b503d843 --- /dev/null +++ b/anolis/configs/examination/L1-RECOMMEND/sw_64.config @@ -0,0 +1,86 @@ +# UNLIMITED CONFIG_BUILD_SALT +CONFIG_HZ=250 +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_PGTABLE_LEVELS=4 +# UNLIMITED CONFIG_RANDOMIZE_BASE +# UNLIMITED CONFIG_SYSTEM_TRUSTED_KEYS +CONFIG_BASE_FULL=y +CONFIG_BLK_PM=y +CONFIG_BNX2=m +CONFIG_BNX2X=m +CONFIG_BNXT=m +CONFIG_BTRFS_FS=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_CHELSIO_T4=m +CONFIG_CIFS=m +CONFIG_CONFIGFS_FS=y +CONFIG_CRC16=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_LZO=m +CONFIG_CUSE=m +CONFIG_DRM=y +CONFIG_E1000=m +CONFIG_EXT3_FS=y +CONFIG_EXT4_FS=y +CONFIG_FCOE=m +CONFIG_FSCACHE=m +CONFIG_I40E=m +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_IGB=m +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_RR=m +CONFIG_IXGBE=m +CONFIG_IXGBEVF=m +CONFIG_JBD2=y +CONFIG_KSM=y +CONFIG_LIVEPATCH=y +CONFIG_MACVLAN=m +CONFIG_MEGARAID_SAS=m +CONFIG_MEMCG_KMEM=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_EN=m +CONFIG_MLX5_CORE=m +CONFIG_MLX5_CORE_EN=y +CONFIG_MPTCP=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_MTD=m +CONFIG_NETFILTER_XTABLES=y +CONFIG_NET_ACT_GACT=m +CONFIG_NET_ACT_POLICE=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFS_ACL_SUPPORT=y +CONFIG_NFS_V3=m +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NGBE=m +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_TCP=m +CONFIG_PROC_PID_CPUSET=y +CONFIG_PROFILING=y +CONFIG_RATIONAL=y +CONFIG_RSEQ=y +CONFIG_RTC_INTF_DEV=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_TASKSTATS=y +CONFIG_TCP_CONG_BBR=m +CONFIG_TXGBE=m +CONFIG_USB_ACM=m +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_PCI=y +CONFIG_USB_STORAGE=y +CONFIG_USB_XHCI_HCD=y +CONFIG_VIRTIO_BLK=m +CONFIG_VIRTIO_CONSOLE=m +CONFIG_VIRTIO_MMIO=y +CONFIG_ZSMALLOC=y diff --git a/anolis/configs/examination/L1-RECOMMEND/x86.config b/anolis/configs/examination/L1-RECOMMEND/x86.config new file mode 100644 index 000000000000..1bfc8b392b01 --- /dev/null +++ b/anolis/configs/examination/L1-RECOMMEND/x86.config @@ -0,0 +1,139 @@ +# UNLIMITED CONFIG_BUILD_SALT +# CHOICE CONFIG_HZ 100/250/1000 +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_LOG_BUF_SHIFT=20 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +CONFIG_OUTPUT_FORMAT="elf64-x86-64" +CONFIG_PGTABLE_LEVELS=5 +# UNLIMITED CONFIG_SYSTEM_TRUSTED_KEYS +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +CONFIG_ACPI_HMAT=y +CONFIG_BASE_FULL=y +CONFIG_BLK_PM=y +CONFIG_BNX2=m +CONFIG_BNX2X=m +CONFIG_BNXT=m +CONFIG_BTRFS_FS=m +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_CHELSIO_T4=m +CONFIG_CIFS=m +CONFIG_COMPAT=y +CONFIG_CONFIGFS_FS=y +CONFIG_CRC16=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_DEV_CCP=y +CONFIG_CRYPTO_DEV_ZHAOXIN=m +CONFIG_CRYPTO_DEV_ZHAOXIN_AES=m +CONFIG_CRYPTO_DEV_ZHAOXIN_SHA=m +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_LZO=y +CONFIG_CRYPTO_SM3_AVX_X86_64=m +CONFIG_CRYPTO_SM3_ZHAOXIN_GMI=m +CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64=m +CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64=m +CONFIG_CRYPTO_SM4_ZHAOXIN_GMI=m +CONFIG_CUSE=m +CONFIG_CXL_BUS=m +CONFIG_CXL_PCI=m +CONFIG_DAMON=y +CONFIG_DRM=m +CONFIG_E1000=m +CONFIG_EXT3_FS=m +CONFIG_EXT4_FS=m +CONFIG_FCOE=m +CONFIG_FSCACHE=m +CONFIG_HINIC=m +CONFIG_HW_RANDOM_ZHAOXIN=m +CONFIG_I2C_ZHAOXIN=m +CONFIG_I40E=m +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_IGB=m +CONFIG_INET_MPTCP_DIAG=m +CONFIG_INTEL_IDLE=y +CONFIG_INTEL_IDXD_BUS=m +CONFIG_INTEL_IFS=m +CONFIG_INTEL_PMC_CORE=m +CONFIG_INTEL_PMT_CLASS=m +CONFIG_INTEL_TPMI=m +CONFIG_IO_STRICT_DEVMEM=y +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_RR=m +CONFIG_IXGBE=m +CONFIG_IXGBEVF=m +CONFIG_JBD2=m +CONFIG_KSM=y +CONFIG_MACVLAN=m +CONFIG_MEGARAID_SAS=m +CONFIG_MEMCG_KMEM=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_EN=m +CONFIG_MLX5_CORE=m +CONFIG_MLX5_CORE_EN=y +CONFIG_MPTCP=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_MTD=m +CONFIG_NETFILTER_XTABLES=y +CONFIG_NET_ACT_GACT=m +CONFIG_NET_ACT_POLICE=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_V3=m +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NGBE=m +CONFIG_NO_HZ_FULL=y +CONFIG_NTB=m +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_TCP=m +CONFIG_PARAVIRT_SPINLOCKS=y +CONFIG_PINCTRL_KX7000=m +CONFIG_PINCTRL_ZHAOXIN=m +CONFIG_PROC_PID_CPUSET=y +CONFIG_PROFILING=y +CONFIG_RATIONAL=y +CONFIG_RSEQ=y +CONFIG_RTC_INTF_DEV=y +CONFIG_RTC_SYSTOHC=y +CONFIG_SATA_ZHAOXIN=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_SCHED_CLUSTER=y +CONFIG_SCHED_MC_PRIO=y +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SENSORS_ZHAOXIN_CPUTEMP=m +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_TASKSTATS=y +CONFIG_TCP_CONG_BBR=m +CONFIG_TXGBE=m +CONFIG_UACCE=m +CONFIG_UNWINDER_ORC=y +CONFIG_USB_ACM=m +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_PCI=y +CONFIG_USB_STORAGE=m +CONFIG_USB_XHCI_HCD=y +CONFIG_VFIO_IOMMU_TYPE1=m +CONFIG_VFIO_MDEV=m +CONFIG_VIRTIO_BLK=m +CONFIG_VIRTIO_CONSOLE=m +CONFIG_VIRTIO_MMIO=m +CONFIG_VIRT_CPU_ACCOUNTING=y +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_X86_CMOV=y +CONFIG_X86_CMPXCHG64=y +CONFIG_X86_DIRECT_GBPAGES=y +CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y +CONFIG_X86_INTEL_PSTATE=y +CONFIG_X86_IOPL_IOPERM=y +CONFIG_X86_MPPARSE=y +CONFIG_X86_VSYSCALL_EMULATION=y +CONFIG_ZSMALLOC=y diff --git a/anolis/configs/examination/README.md b/anolis/configs/examination/README.md new file mode 100644 index 000000000000..78480608a03f --- /dev/null +++ b/anolis/configs/examination/README.md @@ -0,0 +1,33 @@ +# 背景 +本文档用于存放 kconfig 的检查规则,以便检查 kconfig 的是否有违背规则。 + +# 目录组织 +- L0-MANDATORY/,用于存放**必须**遵守的 kconfig 规则,如果违反则视为失败 +- L1-RECOMMEND/,用于存放**推荐**遵守的 kconfig 规则,如果违反则会告警 +- {L0-MANDATORY, L1-RECOMMEND}/{x86/arm64/loongarch/sw_64}.config,对应 x86、arm64、龙芯、申威平台的 kconfig 规则 + +# 规则文件说明 +文件的每一行存放一个规则,具体如下: +1. `CONFIG_FOO=value` +CONFIG_FOO 必须出现在 config 文件中,且值必须为 value + +2. `# CONFIG_FOO is not set` +CONFIG_FOO 必须出现在 config 文件中,其值必须为 not set + +3. `# UNLIMITED CONFIG_FOO` +对 CONFIG_FOO 不做要求 + +4. `# CHOICE CONFIG_FOO a/b/c` +CONFIG_FOO 必须出现在 config 文件中,值必须在 a/b/c 中选择一个 + +5. `# RANGE CONFIG_FOO a,b` +CONFIG_FOO 必须出现在 config 文件中,值为整型,且必须在 [a, b] 这个范围内 + +6. `# EXCLUSIVE value CONFIG_FOO1 [CONFIG_FOO2 ...]` +CONFIG_FOO1, CONFIG_FOO2 等列表中,有且只有一个能出现在 config 文件中,且值必须为 value + +7. `## xxxx` +此行为注释 + +# 使用方式 +在 clone 该仓库后,执行 `cd anolis; make dist-configs-check` 命令即可。 -- Gitee From 728fc2c95028a6005c902e054ee257b47ffca0a3 Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Tue, 21 May 2024 15:24:01 +0800 Subject: [PATCH 0947/2138] anolis: configs: add scripts to check kconfig files ANBZ: #8598 This commit add scripts to check the kconfig files obey constraints or not. Use follow command to check: cd anolis; make dist-configs-check Signed-off-by: Qiao Ma Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/3236 --- anolis/Makefile | 3 + .../examination/anolis_kconfig_check.py | 337 ++++++++++++++++++ anolis/configs/examination/configs-check.sh | 44 +++ 3 files changed, 384 insertions(+) create mode 100644 anolis/configs/examination/anolis_kconfig_check.py create mode 100644 anolis/configs/examination/configs-check.sh diff --git a/anolis/Makefile b/anolis/Makefile index 069ceab2c498..8c650c547ddd 100644 --- a/anolis/Makefile +++ b/anolis/Makefile @@ -26,6 +26,9 @@ dist-genrpmtree: dist-check dist-rpms: dist-genrpmtree dist-check sh buildpkg.sh +dist-configs-check: + sh configs/examination/configs-check.sh + clean: rm -rf $(DIST_OUTPUT) diff --git a/anolis/configs/examination/anolis_kconfig_check.py b/anolis/configs/examination/anolis_kconfig_check.py new file mode 100644 index 000000000000..de85fc659287 --- /dev/null +++ b/anolis/configs/examination/anolis_kconfig_check.py @@ -0,0 +1,337 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 +# +# The core script for ANCK kconfig checking. +# It is not recommended to call directly. +# +# Copyright (C) 2024 Qiao Ma + +import argparse, re +from typing import List, Type, Dict, Tuple +from enum import Enum + +def die(args: str): + print(args) + exit(1) + +def default_args_func(args): + pass + +class Config(): + name: str + value: str + + def __init__(self, name, value) -> None: + self.name = name + self.value = value + + @staticmethod + def from_text(line: str) -> Type["Config"] : + RE_CONFIG_SET = r'^(CONFIG_\w+)=(.*)$' + RE_CONFIG_NOT_SET = r'^# (CONFIG_\w+) is not set$' + + if re.match(RE_CONFIG_SET, line): + obj = re.match(RE_CONFIG_SET, line) + return Config(name=obj.group(1), value=obj.group(2)) + elif re.match(RE_CONFIG_NOT_SET, line): + obj = re.match(RE_CONFIG_NOT_SET, line) + return Config(name=obj.group(1), value="n") + return None + +class ConfigList(): + configs: Dict[str, Config] + + def __init__(self) -> None: + self.configs = {} + + @staticmethod + def from_file(file: str) -> Type["ConfigList"]: + confs = ConfigList() + with open(file) as f: + for line in f.readlines(): + conf = Config.from_text(line) + if conf is None: + continue + confs.configs[conf.name] = conf + return confs + + def get(self, name) -> Type["Config"]: + return self.configs.get(name, None) + +ResultKind = Enum("ResultKind", ("SUCCESS", "MISS", "WRONG_VALUE", "NOT_IN_CHOICE", "NOT_IN_RANGE", "EXCLUSIVE_ERROR")) +RuleLevel = Enum("RuleLevel", ("L0_MANDATORY", "L1_RECOMMEND")) + +class CheckResult(): + name: str + kind: ResultKind + level: RuleLevel + value: str + + def __init__(self, level: RuleLevel, kind: ResultKind, name: str, text: str) -> None: + self.level = level + self.kind = kind + self.name = name + self.text = text + + def is_fatal_error(self): + return self.kind != ResultKind.SUCCESS and self.level == RuleLevel.L0_MANDATORY + + def __str__(self) -> str: + if self.kind == ResultKind.SUCCESS: + return "" + if self.level == RuleLevel.L0_MANDATORY: + return f"ERROR: {self.text}\n" + return f"WARNING: {self.text}\n" + + @staticmethod + def success(): + return CheckResult(RuleLevel.L0_MANDATORY, ResultKind.SUCCESS, "", "") + + @staticmethod + def miss(level: RuleLevel, name: str): + return CheckResult(level, ResultKind.MISS, name, f"missed: {name}") + + @staticmethod + def group_miss(level: RuleLevel, confs: List[str]): + conf_list = " ".join(confs) + return CheckResult(level, ResultKind.MISS, "", f"missed: none of follow configs exist {conf_list}") + + @staticmethod + def wrong_value(level: RuleLevel, name: str, expected: str, real: str): + return CheckResult(level, ResultKind.WRONG_VALUE, name, + f"wrong_value: {name}, expected: {expected}, real: {real}") + + @staticmethod + def not_in_choice(level: RuleLevel, name: str, real_value: str, values: List[str]): + str_values = ",".join(values) + return CheckResult(level, ResultKind.NOT_IN_CHOICE, name, + f"not_in_choice: {name} {real_value} not in [{str_values}]") + + @staticmethod + def not_in_range(level: RuleLevel, name: str, real_value: int, start: int, end: int): + return CheckResult(level, ResultKind.NOT_IN_RANGE, name, f"not_in_range: {name} {real_value} not in range [{start}, {end}]") + + @staticmethod + def exlusive_error(level: RuleLevel, confs: List[str]): + str_confs = ",".join(confs) + return CheckResult(level, ResultKind.EXCLUSIVE_ERROR, "", f"exclusive error: expected only one appears, but follow configs appears: {str_confs}") + +class Rule(): + subclasses = [] + + def __init_subclass__(cls, **kwargs): + super().__init_subclass__(**kwargs) + Rule.subclasses.append(cls) + + @staticmethod + def try_parse(line: str, level: RuleLevel): + raise NotImplementedError + + def check(self, line: str, level: RuleLevel): + raise NotImplementedError + + @staticmethod + def parse(line: str, level: RuleLevel): + for subclass in Rule.subclasses: + result = subclass.try_parse(line, level) + if result is not None: + return result + die(f"cannot parse : {line}") + +class RuleList(): + rules: List[Rule] + + def __init__(self): + self.rules = [] + + @staticmethod + def from_file(path: str, level: RuleLevel) -> Type["RuleList"]: + rl = RuleList() + with open(path) as f: + for line in f.readlines(): + line = line.strip() + if line == "" or line.startswith("##"): + continue + rule = Rule.parse(line, level) + rl.rules.append(rule) + return rl + + def check(self, confs: ConfigList) -> List[CheckResult]: + results : List[CheckResult] = [] + for rule in self.rules: + res = rule.check(confs) + results.append(res) + return results + + def merge(self, rhs: ConfigList): + self.rules.extend(rhs.rules) + +class ValueRule(Rule): + conf: Config + level: RuleLevel + + @staticmethod + def try_parse(line: str, level: RuleLevel): + rule = ValueRule() + conf = Config.from_text(line) + if conf is None: + return None + rule.conf = conf + rule.level = level + return rule + + def check(self, confs: ConfigList): + name = self.conf.name + conf = confs.get(name) + if conf is None: + return CheckResult.miss(self.level, self.conf.name) + if conf.value != self.conf.value: + return CheckResult.wrong_value(self.level, name, self.conf.value, conf.value) + return CheckResult.success() + +class UnlimitedRule(Rule): + @staticmethod + def try_parse(line: str, level: RuleLevel): + RE_CONF_UNLIMITED = r'^# UNLIMITED CONFIG_\w+$' + if not re.match(RE_CONF_UNLIMITED, line): + return None + return UnlimitedRule() + + def check(self, confs: ConfigList): + return CheckResult.success() + +class ChoiceRule(Rule): + name: str + values: List[str] + + def __init__(self, level, name, values) -> None: + self.level = level + self.name = name + self.values = values + + @staticmethod + def try_parse(line: str, level: RuleLevel): + RE_CONF_CHOICE = r'^#\s*CHOICE\s+(CONFIG_\w+)\s+([\w,\/]+)$' + obj = re.match(RE_CONF_CHOICE, line) + if obj is None: + return None + name = obj.group(1) + values = obj.group(2) + return ChoiceRule(level, name, values.split("/")) + + def check(self, confs: ConfigList): + conf = confs.get(self.name) + if conf is None: + return CheckResult.miss(self.level, self.name) + if conf.value not in self.values: + return CheckResult.not_in_choice(self.level, self.name, conf.value, self.values) + return CheckResult.success() + +class RangeRule(Rule): + level: RuleLevel + name: str + start: int + end: int + + def __init__(self, level: RuleLevel, name: str, start: int, end: int) -> None: + self.level = level + self.name = name + self.start = start + self.end = end + + @staticmethod + def try_parse(line: str, level: RuleLevel): + RE_CONF_RANGE = r'^#\s*RANGE\s+(CONFIG_\w+)\s+(\d+)\,(\d+)$' + obj = re.match(RE_CONF_RANGE, line) + if obj is None: + return None + return RangeRule(level, obj.group(1), int(obj.group(2)), int(obj.group(3))) + + def check(self, confs: ConfigList): + conf = confs.get(self.name) + if conf is None: + return CheckResult.miss(self.level, self.name) + val = int(conf.value) + if val <= self.end and val >= self.start: + return CheckResult.success() + return CheckResult.not_in_range(self.level, self.name, val, self.start, self.end) + +class ExclusiveRule(Rule): + level: RuleLevel + value: str + confs: List[str] + + def __init__(self, level: RuleLevel, value: str, confs: List[str]) -> None: + self.level = level + self.value = value + self.confs = confs + + @staticmethod + def try_parse(line: str, level: RuleLevel): + """# EXCLUSIVE value CONFIG_XXX [CONFIG_XXX ...]""" + RE_CONF_RANGE = r'^#\s*EXCLUSIVE\s+(\w+)\s+(.*)$' + obj = re.match(RE_CONF_RANGE, line) + if obj is None: + return None + value = obj.group(1) + confs = obj.group(2).split() + if len(confs) == 0: + return None + return ExclusiveRule(level, value, confs) + + def check(self, confs: ConfigList): + appears : List[Config] = [] + for name in self.confs: + conf = confs.get(name) + if conf is not None and conf.value != 'n': + appears.append(conf) + if len(appears) == 0: + return CheckResult.group_miss(self.level, appears) + if len(appears) != 1: + return CheckResult.exlusive_error(self.level, [x.name for x in appears]) + if appears[0].value != self.value: + return CheckResult.wrong_value(self.level, appears[0].name, self.value, appears[0].value) + return CheckResult.success() + +def level_of(l: str) -> RuleLevel: + if l == "L0-MANDATORY": + return RuleLevel.L0_MANDATORY + elif l == "L1-RECOMMEND": + return RuleLevel.L1_RECOMMEND + die(f"unknown level {l}") + +def do_check(args): + confs = ConfigList.from_file(args.config) + rules = RuleList() + + if len(args.rules) != len(args.level): + die("the num of level and rules do not match") + + for i, rule_file in enumerate(args.rules): + rules.merge(RuleList.from_file(rule_file, level_of(args.level[i]))) + results = rules.check(confs) + + fatal_error = False + result_text = "" + for r in results: + result_text += str(r) + fatal_error = fatal_error or r.is_fatal_error() + + if result_text == "": + result_text = "PASS\n" + print(result_text) + exit(fatal_error) + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='check configs') + parser.set_defaults(func=default_args_func) + subparsers = parser.add_subparsers() + + checker = subparsers.add_parser("check") + checker.add_argument("--rules", action='append', default=[], help="the kconfig checking rule files") + checker.add_argument("--level", action='append', default=[], help="the kconfig checking rule files") + checker.add_argument("config", help="the config files to be checked") + checker.set_defaults(func=do_check) + + args = parser.parse_args() + args.func(args) diff --git a/anolis/configs/examination/configs-check.sh b/anolis/configs/examination/configs-check.sh new file mode 100644 index 000000000000..fff2579be94a --- /dev/null +++ b/anolis/configs/examination/configs-check.sh @@ -0,0 +1,44 @@ +#! /bin/bash +# check kconfigs obey constraints or not. +# it is called from Makefile, do not run it directly. +# +# usage: +# - check for only one arch: +# ARCH=${arch} make dist-configs-check +# available archs are: x86, arm64, loongarch +# - check for all arch: +# make dist-configs-check + +SCRIPT_DIR=$(realpath $(dirname $0)) + +final_exit_status=0 + +function check_arch() { + local arch=$1 + echo "* Checking configs for arch: $arch" + python3 ${SCRIPT_DIR}/anolis_kconfig_check.py check \ + --rules ${SCRIPT_DIR}/L0-MANDATORY/${arch}.config \ + --level L0-MANDATORY \ + --rules ${SCRIPT_DIR}/L1-RECOMMEND/${arch}.config \ + --level L1-RECOMMEND \ + ${SCRIPT_DIR}/../../../arch/${arch}/configs/anolis_defconfig + + local ret=$? + if [ $final_exit_status -eq 0 ]; then + final_exit_status=$ret + fi +} + +# arch sw_64 is not available now +arch_list=("x86" "arm64" "loongarch") + +if [ -n "${ARCH}" ]; then + arch_list=(${ARCH}) +fi + +for arch in ${arch_list[@]} +do + check_arch $arch +done + +exit $final_exit_status -- Gitee From 68917c9b899b7b139e4b439b6e56b3f73ccbe318 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Mon, 8 Apr 2024 11:51:39 +0800 Subject: [PATCH 0948/2138] powercap: intel_rapl: Sort header files ANBZ: #9256 commit 72b8b94155d957f82697802555d53c142d82dece upstream. Sort header files alphabetically. Intel-SIG: commit 72b8b94155d9 powercap: intel_rapl: Sort header files Backport TPMI based RAPL PMU support for GNR and future Xeons. Signed-off-by: Zhang Rui Signed-off-by: Rafael J. Wysocki [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3293 --- drivers/powercap/intel_rapl_common.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index f1de4111e98d..e98e56a7cb1e 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -5,27 +5,27 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include +#include +#include +#include +#include #include -#include #include -#include -#include -#include #include -#include -#include -#include -#include +#include +#include #include -#include -#include #include -#include +#include +#include +#include +#include -#include #include #include +#include /* bitmasks for RAPL MSRs, used by primitive access functions */ #define ENERGY_STATUS_MASK 0xffffffff -- Gitee From c5a81e830174d319670f67d86040dc78a3ae0404 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sun, 28 Apr 2024 17:24:26 +0800 Subject: [PATCH 0949/2138] powercap: intel_rapl: Introduce APIs for PMU support ANBZ: #9256 commit 575024a8aa7cf1dff49b94092f774ed1c90586be upstream. Introduce two new APIs rapl_package_add_pmu()/rapl_package_remove_pmu(). RAPL driver can invoke these APIs to expose its supported energy counters via perf PMU. The new RAPL PMU is fully compatible with current MSR RAPL PMU, including using the same PMU name and events name/id/unit/scale, etc. For example, use below command perf stat -e power/energy-pkg/ -e power/energy-ram/ FOO to get the energy consumption if power/energy-pkg/ and power/energy-ram/ events are available in the "perf list" output. This does not introduce any conflict because TPMI RAPL is the only user of these APIs currently, and it never co-exists with MSR RAPL. Note that RAPL Packages can be probed/removed dynamically, and the events supported by each TPMI RAPL device can be different. Thus the RAPL PMU support is done on demand, which means 1. PMU is registered only if it is needed by a RAPL Package. PMU events for unsupported counters are not exposed. 2. PMU is unregistered and registered when a new RAPL Package is probed and supports new counters that are not supported by current PMU. For example, on a dual-package system using TPMI RAPL, it is possible that Package 1 behaves as TPMI domain root and supports Psys domain. In this case, register PMU without Psys event when probing Package 0, and re-register the PMU with Psys event when probing Package 1. 3. PMU is unregistered when all registered RAPL Packages don't need PMU. Intel-SIG: commit 575024a8aa7c powercap: intel_rapl: Introduce APIs for PMU support Backport TPMI based RAPL PMU support for GNR and future Xeons. Signed-off-by: Zhang Rui Signed-off-by: Rafael J. Wysocki [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3293 --- drivers/powercap/intel_rapl_common.c | 582 +++++++++++++++++++++++++++ include/linux/intel_rapl.h | 32 ++ 2 files changed, 614 insertions(+) diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index e98e56a7cb1e..3350054632f1 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -15,6 +15,8 @@ #include #include #include +#include +#include #include #include #include @@ -1505,6 +1507,586 @@ static int rapl_detect_domains(struct rapl_package *rp) return 0; } +#ifdef CONFIG_PERF_EVENTS + +/* + * Support for RAPL PMU + * + * Register a PMU if any of the registered RAPL Packages have the requirement + * of exposing its energy counters via Perf PMU. + * + * PMU Name: + * power + * + * Events: + * Name Event id RAPL Domain + * energy_cores 0x01 RAPL_DOMAIN_PP0 + * energy_pkg 0x02 RAPL_DOMAIN_PACKAGE + * energy_ram 0x03 RAPL_DOMAIN_DRAM + * energy_gpu 0x04 RAPL_DOMAIN_PP1 + * energy_psys 0x05 RAPL_DOMAIN_PLATFORM + * + * Unit: + * Joules + * + * Scale: + * 2.3283064365386962890625e-10 + * The same RAPL domain in different RAPL Packages may have different + * energy units. Use 2.3283064365386962890625e-10 (2^-32) Joules as + * the fixed unit for all energy counters, and covert each hardware + * counter increase to N times of PMU event counter increases. + * + * This is fully compatible with the current MSR RAPL PMU. This means that + * userspace programs like turbostat can use the same code to handle RAPL Perf + * PMU, no matter what RAPL Interface driver (MSR/TPMI, etc) is running + * underlying on the platform. + * + * Note that RAPL Packages can be probed/removed dynamically, and the events + * supported by each TPMI RAPL device can be different. Thus the RAPL PMU + * support is done on demand, which means + * 1. PMU is registered only if it is needed by a RAPL Package. PMU events for + * unsupported counters are not exposed. + * 2. PMU is unregistered and registered when a new RAPL Package is probed and + * supports new counters that are not supported by current PMU. + * 3. PMU is unregistered when all registered RAPL Packages don't need PMU. + */ + +struct rapl_pmu { + struct pmu pmu; /* Perf PMU structure */ + u64 timer_ms; /* Maximum expiration time to avoid counter overflow */ + unsigned long domain_map; /* Events supported by current registered PMU */ + bool registered; /* Whether the PMU has been registered or not */ +}; + +static struct rapl_pmu rapl_pmu; + +/* PMU helpers */ + +static int get_pmu_cpu(struct rapl_package *rp) +{ + int cpu; + + if (!rp->has_pmu) + return nr_cpu_ids; + + /* Only TPMI RAPL is supported for now */ + if (rp->priv->type != RAPL_IF_TPMI) + return nr_cpu_ids; + + /* TPMI RAPL uses any CPU in the package for PMU */ + for_each_online_cpu(cpu) + if (topology_physical_package_id(cpu) == rp->id) + return cpu; + + return nr_cpu_ids; +} + +static bool is_rp_pmu_cpu(struct rapl_package *rp, int cpu) +{ + if (!rp->has_pmu) + return false; + + /* Only TPMI RAPL is supported for now */ + if (rp->priv->type != RAPL_IF_TPMI) + return false; + + /* TPMI RAPL uses any CPU in the package for PMU */ + return topology_physical_package_id(cpu) == rp->id; +} + +static struct rapl_package_pmu_data *event_to_pmu_data(struct perf_event *event) +{ + struct rapl_package *rp = event->pmu_private; + + return &rp->pmu_data; +} + +/* PMU event callbacks */ + +static u64 event_read_counter(struct perf_event *event) +{ + struct rapl_package *rp = event->pmu_private; + u64 val; + int ret; + + /* Return 0 for unsupported events */ + if (event->hw.idx < 0) + return 0; + + ret = rapl_read_data_raw(&rp->domains[event->hw.idx], ENERGY_COUNTER, false, &val); + + /* Return 0 for failed read */ + if (ret) + return 0; + + return val; +} + +static void __rapl_pmu_event_start(struct perf_event *event) +{ + struct rapl_package_pmu_data *data = event_to_pmu_data(event); + + if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) + return; + + event->hw.state = 0; + + list_add_tail(&event->active_entry, &data->active_list); + + local64_set(&event->hw.prev_count, event_read_counter(event)); + if (++data->n_active == 1) + hrtimer_start(&data->hrtimer, data->timer_interval, + HRTIMER_MODE_REL_PINNED); +} + +static void rapl_pmu_event_start(struct perf_event *event, int mode) +{ + struct rapl_package_pmu_data *data = event_to_pmu_data(event); + unsigned long flags; + + raw_spin_lock_irqsave(&data->lock, flags); + __rapl_pmu_event_start(event); + raw_spin_unlock_irqrestore(&data->lock, flags); +} + +static u64 rapl_event_update(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct rapl_package_pmu_data *data = event_to_pmu_data(event); + u64 prev_raw_count, new_raw_count; + s64 delta, sdelta; + + /* + * Follow the generic code to drain hwc->prev_count. + * The loop is not expected to run for multiple times. + */ + prev_raw_count = local64_read(&hwc->prev_count); + do { + new_raw_count = event_read_counter(event); + } while (!local64_try_cmpxchg(&hwc->prev_count, + &prev_raw_count, new_raw_count)); + + + /* + * Now we have the new raw value and have updated the prev + * timestamp already. We can now calculate the elapsed delta + * (event-)time and add that to the generic event. + */ + delta = new_raw_count - prev_raw_count; + + /* + * Scale delta to smallest unit (2^-32) + * users must then scale back: count * 1/(1e9*2^32) to get Joules + * or use ldexp(count, -32). + * Watts = Joules/Time delta + */ + sdelta = delta * data->scale[event->hw.flags]; + + local64_add(sdelta, &event->count); + + return new_raw_count; +} + +static void rapl_pmu_event_stop(struct perf_event *event, int mode) +{ + struct rapl_package_pmu_data *data = event_to_pmu_data(event); + struct hw_perf_event *hwc = &event->hw; + unsigned long flags; + + raw_spin_lock_irqsave(&data->lock, flags); + + /* Mark event as deactivated and stopped */ + if (!(hwc->state & PERF_HES_STOPPED)) { + WARN_ON_ONCE(data->n_active <= 0); + if (--data->n_active == 0) + hrtimer_cancel(&data->hrtimer); + + list_del(&event->active_entry); + + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); + hwc->state |= PERF_HES_STOPPED; + } + + /* Check if update of sw counter is necessary */ + if ((mode & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { + /* + * Drain the remaining delta count out of a event + * that we are disabling: + */ + rapl_event_update(event); + hwc->state |= PERF_HES_UPTODATE; + } + + raw_spin_unlock_irqrestore(&data->lock, flags); +} + +static int rapl_pmu_event_add(struct perf_event *event, int mode) +{ + struct rapl_package_pmu_data *data = event_to_pmu_data(event); + struct hw_perf_event *hwc = &event->hw; + unsigned long flags; + + raw_spin_lock_irqsave(&data->lock, flags); + + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + if (mode & PERF_EF_START) + __rapl_pmu_event_start(event); + + raw_spin_unlock_irqrestore(&data->lock, flags); + + return 0; +} + +static void rapl_pmu_event_del(struct perf_event *event, int flags) +{ + rapl_pmu_event_stop(event, PERF_EF_UPDATE); +} + +/* RAPL PMU event ids, same as shown in sysfs */ +enum perf_rapl_events { + PERF_RAPL_PP0 = 1, /* all cores */ + PERF_RAPL_PKG, /* entire package */ + PERF_RAPL_RAM, /* DRAM */ + PERF_RAPL_PP1, /* gpu */ + PERF_RAPL_PSYS, /* psys */ + PERF_RAPL_MAX +}; +#define RAPL_EVENT_MASK GENMASK(7, 0) + +static const int event_to_domain[PERF_RAPL_MAX] = { + [PERF_RAPL_PP0] = RAPL_DOMAIN_PP0, + [PERF_RAPL_PKG] = RAPL_DOMAIN_PACKAGE, + [PERF_RAPL_RAM] = RAPL_DOMAIN_DRAM, + [PERF_RAPL_PP1] = RAPL_DOMAIN_PP1, + [PERF_RAPL_PSYS] = RAPL_DOMAIN_PLATFORM, +}; + +static int rapl_pmu_event_init(struct perf_event *event) +{ + struct rapl_package *pos, *rp = NULL; + u64 cfg = event->attr.config & RAPL_EVENT_MASK; + int domain, idx; + + /* Only look at RAPL events */ + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* Check for supported events only */ + if (!cfg || cfg >= PERF_RAPL_MAX) + return -EINVAL; + + if (event->cpu < 0) + return -EINVAL; + + /* Find out which Package the event belongs to */ + list_for_each_entry(pos, &rapl_packages, plist) { + if (is_rp_pmu_cpu(pos, event->cpu)) { + rp = pos; + break; + } + } + if (!rp) + return -ENODEV; + + /* Find out which RAPL Domain the event belongs to */ + domain = event_to_domain[cfg]; + + event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; + event->pmu_private = rp; /* Which package */ + event->hw.flags = domain; /* Which domain */ + + event->hw.idx = -1; + /* Find out the index in rp->domains[] to get domain pointer */ + for (idx = 0; idx < rp->nr_domains; idx++) { + if (rp->domains[idx].id == domain) { + event->hw.idx = idx; + break; + } + } + + return 0; +} + +static void rapl_pmu_event_read(struct perf_event *event) +{ + rapl_event_update(event); +} + +static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer) +{ + struct rapl_package_pmu_data *data = + container_of(hrtimer, struct rapl_package_pmu_data, hrtimer); + struct perf_event *event; + unsigned long flags; + + if (!data->n_active) + return HRTIMER_NORESTART; + + raw_spin_lock_irqsave(&data->lock, flags); + + list_for_each_entry(event, &data->active_list, active_entry) + rapl_event_update(event); + + raw_spin_unlock_irqrestore(&data->lock, flags); + + hrtimer_forward_now(hrtimer, data->timer_interval); + + return HRTIMER_RESTART; +} + +/* PMU sysfs attributes */ + +/* + * There are no default events, but we need to create "events" group (with + * empty attrs) before updating it with detected events. + */ +static struct attribute *attrs_empty[] = { + NULL, +}; + +static struct attribute_group pmu_events_group = { + .name = "events", + .attrs = attrs_empty, +}; + +static ssize_t cpumask_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rapl_package *rp; + cpumask_var_t cpu_mask; + int cpu; + int ret; + + if (!alloc_cpumask_var(&cpu_mask, GFP_KERNEL)) + return -ENOMEM; + + cpus_read_lock(); + + cpumask_clear(cpu_mask); + + /* Choose a cpu for each RAPL Package */ + list_for_each_entry(rp, &rapl_packages, plist) { + cpu = get_pmu_cpu(rp); + if (cpu < nr_cpu_ids) + cpumask_set_cpu(cpu, cpu_mask); + } + cpus_read_unlock(); + + ret = cpumap_print_to_pagebuf(true, buf, cpu_mask); + + free_cpumask_var(cpu_mask); + + return ret; +} + +static DEVICE_ATTR_RO(cpumask); + +static struct attribute *pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL +}; + +static struct attribute_group pmu_cpumask_group = { + .attrs = pmu_cpumask_attrs, +}; + +PMU_FORMAT_ATTR(event, "config:0-7"); +static struct attribute *pmu_format_attr[] = { + &format_attr_event.attr, + NULL +}; + +static struct attribute_group pmu_format_group = { + .name = "format", + .attrs = pmu_format_attr, +}; + +static const struct attribute_group *pmu_attr_groups[] = { + &pmu_events_group, + &pmu_cpumask_group, + &pmu_format_group, + NULL +}; + +#define RAPL_EVENT_ATTR_STR(_name, v, str) \ +static struct perf_pmu_events_attr event_attr_##v = { \ + .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \ + .event_str = str, \ +} + +RAPL_EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01"); +RAPL_EVENT_ATTR_STR(energy-pkg, rapl_pkg, "event=0x02"); +RAPL_EVENT_ATTR_STR(energy-ram, rapl_ram, "event=0x03"); +RAPL_EVENT_ATTR_STR(energy-gpu, rapl_gpu, "event=0x04"); +RAPL_EVENT_ATTR_STR(energy-psys, rapl_psys, "event=0x05"); + +RAPL_EVENT_ATTR_STR(energy-cores.unit, rapl_unit_cores, "Joules"); +RAPL_EVENT_ATTR_STR(energy-pkg.unit, rapl_unit_pkg, "Joules"); +RAPL_EVENT_ATTR_STR(energy-ram.unit, rapl_unit_ram, "Joules"); +RAPL_EVENT_ATTR_STR(energy-gpu.unit, rapl_unit_gpu, "Joules"); +RAPL_EVENT_ATTR_STR(energy-psys.unit, rapl_unit_psys, "Joules"); + +RAPL_EVENT_ATTR_STR(energy-cores.scale, rapl_scale_cores, "2.3283064365386962890625e-10"); +RAPL_EVENT_ATTR_STR(energy-pkg.scale, rapl_scale_pkg, "2.3283064365386962890625e-10"); +RAPL_EVENT_ATTR_STR(energy-ram.scale, rapl_scale_ram, "2.3283064365386962890625e-10"); +RAPL_EVENT_ATTR_STR(energy-gpu.scale, rapl_scale_gpu, "2.3283064365386962890625e-10"); +RAPL_EVENT_ATTR_STR(energy-psys.scale, rapl_scale_psys, "2.3283064365386962890625e-10"); + +#define RAPL_EVENT_GROUP(_name, domain) \ +static struct attribute *pmu_attr_##_name[] = { \ + &event_attr_rapl_##_name.attr.attr, \ + &event_attr_rapl_unit_##_name.attr.attr, \ + &event_attr_rapl_scale_##_name.attr.attr, \ + NULL \ +}; \ +static umode_t is_visible_##_name(struct kobject *kobj, struct attribute *attr, int event) \ +{ \ + return rapl_pmu.domain_map & BIT(domain) ? attr->mode : 0; \ +} \ +static struct attribute_group pmu_group_##_name = { \ + .name = "events", \ + .attrs = pmu_attr_##_name, \ + .is_visible = is_visible_##_name, \ +} + +RAPL_EVENT_GROUP(cores, RAPL_DOMAIN_PP0); +RAPL_EVENT_GROUP(pkg, RAPL_DOMAIN_PACKAGE); +RAPL_EVENT_GROUP(ram, RAPL_DOMAIN_DRAM); +RAPL_EVENT_GROUP(gpu, RAPL_DOMAIN_PP1); +RAPL_EVENT_GROUP(psys, RAPL_DOMAIN_PLATFORM); + +static const struct attribute_group *pmu_attr_update[] = { + &pmu_group_cores, + &pmu_group_pkg, + &pmu_group_ram, + &pmu_group_gpu, + &pmu_group_psys, + NULL +}; + +static int rapl_pmu_update(struct rapl_package *rp) +{ + int ret = 0; + + /* Return if PMU already covers all events supported by current RAPL Package */ + if (rapl_pmu.registered && !(rp->domain_map & (~rapl_pmu.domain_map))) + goto end; + + /* Unregister previous registered PMU */ + if (rapl_pmu.registered) + perf_pmu_unregister(&rapl_pmu.pmu); + + rapl_pmu.registered = false; + rapl_pmu.domain_map |= rp->domain_map; + + memset(&rapl_pmu.pmu, 0, sizeof(struct pmu)); + rapl_pmu.pmu.attr_groups = pmu_attr_groups; + rapl_pmu.pmu.attr_update = pmu_attr_update; + rapl_pmu.pmu.task_ctx_nr = perf_invalid_context; + rapl_pmu.pmu.event_init = rapl_pmu_event_init; + rapl_pmu.pmu.add = rapl_pmu_event_add; + rapl_pmu.pmu.del = rapl_pmu_event_del; + rapl_pmu.pmu.start = rapl_pmu_event_start; + rapl_pmu.pmu.stop = rapl_pmu_event_stop; + rapl_pmu.pmu.read = rapl_pmu_event_read; + rapl_pmu.pmu.module = THIS_MODULE; + rapl_pmu.pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT; + ret = perf_pmu_register(&rapl_pmu.pmu, "power", -1); + if (ret) { + pr_info("Failed to register PMU\n"); + return ret; + } + + rapl_pmu.registered = true; +end: + rp->has_pmu = true; + return ret; +} + +int rapl_package_add_pmu(struct rapl_package *rp) +{ + struct rapl_package_pmu_data *data = &rp->pmu_data; + int idx; + + if (rp->has_pmu) + return -EEXIST; + + guard(cpus_read_lock)(); + + for (idx = 0; idx < rp->nr_domains; idx++) { + struct rapl_domain *rd = &rp->domains[idx]; + int domain = rd->id; + u64 val; + + if (!test_bit(domain, &rp->domain_map)) + continue; + + /* + * The RAPL PMU granularity is 2^-32 Joules + * data->scale[]: times of 2^-32 Joules for each ENERGY COUNTER increase + */ + val = rd->energy_unit * (1ULL << 32); + do_div(val, ENERGY_UNIT_SCALE * 1000000); + data->scale[domain] = val; + + if (!rapl_pmu.timer_ms) { + struct rapl_primitive_info *rpi = get_rpi(rp, ENERGY_COUNTER); + + /* + * Calculate the timer rate: + * Use reference of 200W for scaling the timeout to avoid counter + * overflows. + * + * max_count = rpi->mask >> rpi->shift + 1 + * max_energy_pj = max_count * rd->energy_unit + * max_time_sec = (max_energy_pj / 1000000000) / 200w + * + * rapl_pmu.timer_ms = max_time_sec * 1000 / 2 + */ + val = (rpi->mask >> rpi->shift) + 1; + val *= rd->energy_unit; + do_div(val, 1000000 * 200 * 2); + rapl_pmu.timer_ms = val; + + pr_debug("%llu ms overflow timer\n", rapl_pmu.timer_ms); + } + + pr_debug("Domain %s: hw unit %lld * 2^-32 Joules\n", rd->name, data->scale[domain]); + } + + /* Initialize per package PMU data */ + raw_spin_lock_init(&data->lock); + INIT_LIST_HEAD(&data->active_list); + data->timer_interval = ms_to_ktime(rapl_pmu.timer_ms); + hrtimer_init(&data->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + data->hrtimer.function = rapl_hrtimer_handle; + + return rapl_pmu_update(rp); +} +EXPORT_SYMBOL_GPL(rapl_package_add_pmu); + +void rapl_package_remove_pmu(struct rapl_package *rp) +{ + struct rapl_package *pos; + + if (!rp->has_pmu) + return; + + guard(cpus_read_lock)(); + + list_for_each_entry(pos, &rapl_packages, plist) { + /* PMU is still needed */ + if (pos->has_pmu && pos != rp) + return; + } + + perf_pmu_unregister(&rapl_pmu.pmu); + memset(&rapl_pmu, 0, sizeof(struct rapl_pmu)); +} +EXPORT_SYMBOL_GPL(rapl_package_remove_pmu); +#endif + /* called from CPU hotplug notifier, hotplug lock held */ void rapl_remove_package_cpuslocked(struct rapl_package *rp) { diff --git a/include/linux/intel_rapl.h b/include/linux/intel_rapl.h index f3196f82fd8a..c0397423d3a8 100644 --- a/include/linux/intel_rapl.h +++ b/include/linux/intel_rapl.h @@ -158,6 +158,26 @@ struct rapl_if_priv { void *rpi; }; +#ifdef CONFIG_PERF_EVENTS +/** + * struct rapl_package_pmu_data: Per package data for PMU support + * @scale: Scale of 2^-32 Joules for each energy counter increase. + * @lock: Lock to protect n_active and active_list. + * @n_active: Number of active events. + * @active_list: List of active events. + * @timer_interval: Maximum timer expiration time before counter overflow. + * @hrtimer: Periodically update the counter to prevent overflow. + */ +struct rapl_package_pmu_data { + u64 scale[RAPL_DOMAIN_MAX]; + raw_spinlock_t lock; + int n_active; + struct list_head active_list; + ktime_t timer_interval; + struct hrtimer hrtimer; +}; +#endif + /* maximum rapl package domain name: package-%d-die-%d */ #define PACKAGE_DOMAIN_NAME_LENGTH 30 @@ -176,6 +196,10 @@ struct rapl_package { struct cpumask cpumask; char name[PACKAGE_DOMAIN_NAME_LENGTH]; struct rapl_if_priv *priv; +#ifdef CONFIG_PERF_EVENTS + bool has_pmu; + struct rapl_package_pmu_data pmu_data; +#endif }; struct rapl_package *rapl_find_package_domain_cpuslocked(int id, struct rapl_if_priv *priv, @@ -188,4 +212,12 @@ struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv, struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id_is_cpu); void rapl_remove_package(struct rapl_package *rp); +#ifdef CONFIG_PERF_EVENTS +int rapl_package_add_pmu(struct rapl_package *rp); +void rapl_package_remove_pmu(struct rapl_package *rp); +#else +static inline int rapl_package_add_pmu(struct rapl_package *rp) { return 0; } +static inline void rapl_package_remove_pmu(struct rapl_package *rp) { } +#endif + #endif /* __INTEL_RAPL_H__ */ -- Gitee From 13cf4b87978be1bdaab90c17f71083b43db61442 Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sun, 28 Apr 2024 17:24:27 +0800 Subject: [PATCH 0950/2138] powercap: intel_rapl_tpmi: Enable PMU support ANBZ: #9256 commit 963a9ad3c589dc0f922697faea53c69098083945 upstream. Enable RAPL PMU support for TPMI RAPL driver. Intel-SIG: commit 963a9ad3c589 powercap: intel_rapl_tpmi: Enable PMU support Backport TPMI based RAPL PMU support for GNR and future Xeons. Signed-off-by: Zhang Rui Signed-off-by: Rafael J. Wysocki [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3293 --- drivers/powercap/intel_rapl_tpmi.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/powercap/intel_rapl_tpmi.c b/drivers/powercap/intel_rapl_tpmi.c index 1c48dba0ba96..645fd1dc51a9 100644 --- a/drivers/powercap/intel_rapl_tpmi.c +++ b/drivers/powercap/intel_rapl_tpmi.c @@ -313,6 +313,8 @@ static int intel_rapl_tpmi_probe(struct auxiliary_device *auxdev, goto err; } + rapl_package_add_pmu(trp->rp); + auxiliary_set_drvdata(auxdev, trp); return 0; @@ -325,6 +327,7 @@ static void intel_rapl_tpmi_remove(struct auxiliary_device *auxdev) { struct tpmi_rapl_package *trp = auxiliary_get_drvdata(auxdev); + rapl_package_remove_pmu(trp->rp); rapl_remove_package(trp->rp); trp_release(trp); } -- Gitee From 1f0d1014f11c5fbdf640df7b68890c6c1dc24321 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Sat, 23 Sep 2023 17:23:47 -0700 Subject: [PATCH 0951/2138] dmaengine: idxd: rate limit printk in misc interrupt thread ANBZ: #9252 commit 555921feb2ac03d88647ccc62015e68f157c30a2 upstream. Add rate limit to the dev_warn() call in the misc interrupt thread. This limits dmesg getting spammed if a descriptor submitter is spamming bad descriptors with invalid completion records and resulting the errors being continuously reported by the misc interrupt handling thread. Intel-SIG: commit 555921feb2ac dmaengine: idxd: rate limit printk in misc interrupt thread. Incremental backporting patches for DSA/IAA on Intel Xeon platform. Reported-by: Sanjay Kumar Signed-off-by: Dave Jiang Reviewed-by: Fenghua Yu Acked-by: Lijun Pan Link: https://lore.kernel.org/r/20230924002347.1117757-1-fenghua.yu@intel.com Signed-off-by: Vinod Koul [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- drivers/dma/idxd/irq.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c index 7efc85b5bad9..27c93a245d12 100644 --- a/drivers/dma/idxd/irq.c +++ b/drivers/dma/idxd/irq.c @@ -433,8 +433,8 @@ irqreturn_t idxd_misc_thread(int vec, void *data) val |= IDXD_INTC_ERR; for (i = 0; i < 4; i++) - dev_warn(dev, "err[%d]: %#16.16llx\n", - i, idxd->sw_err.bits[i]); + dev_warn_ratelimited(dev, "err[%d]: %#16.16llx\n", + i, idxd->sw_err.bits[i]); err = true; } -- Gitee From 8f10528b1623bb2fa279534a40d6fa1d5df8c358 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Fri, 8 Sep 2023 13:10:45 -0700 Subject: [PATCH 0952/2138] dmaengine: idxd: add wq driver name support for accel-config user tool ANBZ: #9252 commit 7af1e0aceeb321cbc90fcf6fa0bec8870290377f upstream. With the possibility of multiple wq drivers that can be bound to the wq, the user config tool accel-config needs a way to know which wq driver to bind to the wq. Introduce per wq driver_name sysfs attribute where the user can indicate the driver to be bound to the wq. This allows accel-config to just bind to the driver using wq->driver_name. Intel-SIG: commit 7af1e0aceeb3 dmaengine: idxd: add wq driver name support for accel-config user tool. Incremental backporting patches for DSA/IAA on Intel Xeon platform. Signed-off-by: Dave Jiang Signed-off-by: Tom Zanussi Reviewed-by: Fenghua Yu Acked-by: Vinod Koul Link: https://lore.kernel.org/r/20230908201045.4115614-1-fenghua.yu@intel.com Signed-off-by: Vinod Koul [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- .../ABI/stable/sysfs-driver-dma-idxd | 6 ++++ drivers/dma/idxd/cdev.c | 7 ++++ drivers/dma/idxd/dma.c | 6 ++++ drivers/dma/idxd/idxd.h | 9 +++++ drivers/dma/idxd/sysfs.c | 34 +++++++++++++++++++ include/uapi/linux/idxd.h | 1 + 6 files changed, 63 insertions(+) diff --git a/Documentation/ABI/stable/sysfs-driver-dma-idxd b/Documentation/ABI/stable/sysfs-driver-dma-idxd index 825e619250bf..f2ec42949a54 100644 --- a/Documentation/ABI/stable/sysfs-driver-dma-idxd +++ b/Documentation/ABI/stable/sysfs-driver-dma-idxd @@ -270,6 +270,12 @@ Description: Shows the operation capability bits displayed in bitmap format correlates to the operations allowed. It's visible only on platforms that support the capability. +What: /sys/bus/dsa/devices/wq./driver_name +Date: Sept 8, 2023 +KernelVersion: 6.7.0 +Contact: dmaengine@vger.kernel.org +Description: Name of driver to be bounded to the wq. + What: /sys/bus/dsa/devices/engine./group_id Date: Oct 25, 2019 KernelVersion: 5.6.0 diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index c18633ad8455..7ddf5f933475 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -584,6 +584,7 @@ void idxd_wq_del_cdev(struct idxd_wq *wq) static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) { + struct device *dev = &idxd_dev->conf_dev; struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); struct idxd_device *idxd = wq->idxd; int rc; @@ -611,6 +612,12 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) mutex_lock(&wq->wq_lock); + if (!idxd_wq_driver_name_match(wq, dev)) { + idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME; + rc = -ENODEV; + goto wq_err; + } + wq->wq = create_workqueue(dev_name(wq_confdev(wq))); if (!wq->wq) { rc = -ENOMEM; diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c index 07623fb0f52f..47a01893cfdb 100644 --- a/drivers/dma/idxd/dma.c +++ b/drivers/dma/idxd/dma.c @@ -306,6 +306,12 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev) return -ENXIO; mutex_lock(&wq->wq_lock); + if (!idxd_wq_driver_name_match(wq, dev)) { + idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME; + rc = -ENODEV; + goto err; + } + wq->type = IDXD_WQT_KERNEL; rc = drv_enable_wq(wq); diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index bea10c5cdb76..fcbb8caea899 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -159,6 +159,8 @@ struct idxd_cdev { int minor; }; +#define DRIVER_NAME_SIZE 128 + #define IDXD_ALLOCATED_BATCH_SIZE 128U #define WQ_NAME_SIZE 1024 #define WQ_TYPE_SIZE 10 @@ -227,6 +229,8 @@ struct idxd_wq { /* Lock to protect upasid_xa access. */ struct mutex uc_lock; struct xarray upasid_xa; + + char driver_name[DRIVER_NAME_SIZE + 1]; }; struct idxd_engine { @@ -648,6 +652,11 @@ static inline void idxd_wqcfg_set_max_batch_shift(int idxd_type, union wqcfg *wq wqcfg->max_batch_shift = max_batch_shift; } +static inline int idxd_wq_driver_name_match(struct idxd_wq *wq, struct device *dev) +{ + return (strncmp(wq->driver_name, dev->driver->name, strlen(dev->driver->name)) == 0); +} + int __must_check __idxd_driver_register(struct idxd_device_driver *idxd_drv, struct module *module, const char *mod_name); #define idxd_driver_register(driver) \ diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index 1fd5a93045f7..3a5ce477a81a 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -1282,6 +1282,39 @@ static ssize_t wq_op_config_store(struct device *dev, struct device_attribute *a static struct device_attribute dev_attr_wq_op_config = __ATTR(op_config, 0644, wq_op_config_show, wq_op_config_store); +static ssize_t wq_driver_name_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct idxd_wq *wq = confdev_to_wq(dev); + + return sysfs_emit(buf, "%s\n", wq->driver_name); +} + +static ssize_t wq_driver_name_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_wq *wq = confdev_to_wq(dev); + char *input, *pos; + + if (wq->state != IDXD_WQ_DISABLED) + return -EPERM; + + if (strlen(buf) > DRIVER_NAME_SIZE || strlen(buf) == 0) + return -EINVAL; + + input = kstrndup(buf, count, GFP_KERNEL); + if (!input) + return -ENOMEM; + + pos = strim(input); + memset(wq->driver_name, 0, DRIVER_NAME_SIZE + 1); + sprintf(wq->driver_name, "%s", pos); + kfree(input); + return count; +} + +static struct device_attribute dev_attr_wq_driver_name = + __ATTR(driver_name, 0644, wq_driver_name_show, wq_driver_name_store); + static struct attribute *idxd_wq_attributes[] = { &dev_attr_wq_clients.attr, &dev_attr_wq_state.attr, @@ -1301,6 +1334,7 @@ static struct attribute *idxd_wq_attributes[] = { &dev_attr_wq_occupancy.attr, &dev_attr_wq_enqcmds_retries.attr, &dev_attr_wq_op_config.attr, + &dev_attr_wq_driver_name.attr, NULL, }; diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h index 606b52e88ce3..3d1987e1bb2d 100644 --- a/include/uapi/linux/idxd.h +++ b/include/uapi/linux/idxd.h @@ -31,6 +31,7 @@ enum idxd_scmd_stat { IDXD_SCMD_WQ_IRQ_ERR = 0x80100000, IDXD_SCMD_WQ_USER_NO_IOMMU = 0x80110000, IDXD_SCMD_DEV_EVL_ERR = 0x80120000, + IDXD_SCMD_WQ_NO_DRV_NAME = 0x80200000, }; #define IDXD_SCMD_SOFTERR_MASK 0x80000000 -- Gitee From 5546e3f27881dceecc9d4411e2637a0fe7199a54 Mon Sep 17 00:00:00 2001 From: Guanjun Date: Mon, 11 Dec 2023 13:37:04 +0800 Subject: [PATCH 0953/2138] dmaengine: idxd: Fix incorrect descriptions for GRPCFG register ANBZ: #9252 commit 0c154698a0fc32957d00c6009d5389e086dc8acf upstream. Fix incorrect descriptions for the GRPCFG register which has three sub-registers (GRPWQCFG, GRPENGCFG and GRPFLGCFG). No functional changes Intel-SIG: commit 0c154698a0fc dmaengine: idxd: Fix incorrect descriptions for GRPCFG register. Incremental backporting patches for DSA/IAA on Intel Xeon platform. Signed-off-by: Guanjun Reviewed-by: Dave Jiang Reviewed-by: Fenghua Yu Acked-by: Lijun Pan Link: https://lore.kernel.org/r/20231211053704.2725417-3-guanjun@linux.alibaba.com Signed-off-by: Vinod Koul [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- drivers/dma/idxd/registers.h | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h index cfbcd1adb1d1..e16dbf9ab324 100644 --- a/drivers/dma/idxd/registers.h +++ b/drivers/dma/idxd/registers.h @@ -437,12 +437,14 @@ union wqcfg { /* * This macro calculates the offset into the GRPCFG register * idxd - struct idxd * - * n - wq id - * ofs - the index of the 32b dword for the config register + * n - group id + * ofs - the index of the 64b qword for the config register * - * The WQCFG register block is divided into groups per each wq. The n index - * allows us to move to the register group that's for that particular wq. - * Each register is 32bits. The ofs gives us the number of register to access. + * The GRPCFG register block is divided into three sub-registers, which + * are GRPWQCFG, GRPENGCFG and GRPFLGCFG. The n index allows us to move + * to the register block that contains the three sub-registers. + * Each register block is 64bits. And the ofs gives us the offset + * within the GRPWQCFG register to access. */ #define GRPWQCFG_OFFSET(idxd_dev, n, ofs) ((idxd_dev)->grpcfg_offset +\ (n) * GRPCFG_SIZE + sizeof(u64) * (ofs)) -- Gitee From 7c1ccf25338a4eea84e5256feac61744a8b3bd6d Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Tue, 5 Dec 2023 15:25:17 -0600 Subject: [PATCH 0954/2138] dmaengine: idxd: add external module driver support for dsa_bus_type ANBZ: #9252 commit 15a611015224c6698801ab9a2c2321742c39174e upstream. Add support to allow an external driver to be registered to the dsa_bus_type and also auto-loaded. Intel-SIG: commit 15a611015224 dmaengine: idxd: add external module driver support for dsa_bus_type. Incremental backporting patches for DSA/IAA on Intel Xeon platform. Signed-off-by: Dave Jiang Signed-off-by: Tom Zanussi Reviewed-by: Fenghua Yu Acked-by: Vinod Koul Signed-off-by: Herbert Xu [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- drivers/dma/idxd/bus.c | 6 ++++++ drivers/dma/idxd/idxd.h | 3 +++ 2 files changed, 9 insertions(+) diff --git a/drivers/dma/idxd/bus.c b/drivers/dma/idxd/bus.c index 6f84621053c6..0c9e689a2e77 100644 --- a/drivers/dma/idxd/bus.c +++ b/drivers/dma/idxd/bus.c @@ -67,11 +67,17 @@ static void idxd_config_bus_remove(struct device *dev) idxd_drv->remove(idxd_dev); } +static int idxd_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) +{ + return add_uevent_var(env, "MODALIAS=" IDXD_DEVICES_MODALIAS_FMT, 0); +} + struct bus_type dsa_bus_type = { .name = "dsa", .match = idxd_config_bus_match, .probe = idxd_config_bus_probe, .remove = idxd_config_bus_remove, + .uevent = idxd_bus_uevent, }; EXPORT_SYMBOL_GPL(dsa_bus_type); diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index fcbb8caea899..3809dc0304c0 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -657,6 +657,9 @@ static inline int idxd_wq_driver_name_match(struct idxd_wq *wq, struct device *d return (strncmp(wq->driver_name, dev->driver->name, strlen(dev->driver->name)) == 0); } +#define MODULE_ALIAS_IDXD_DEVICE(type) MODULE_ALIAS("idxd:t" __stringify(type) "*") +#define IDXD_DEVICES_MODALIAS_FMT "idxd:t%d" + int __must_check __idxd_driver_register(struct idxd_device_driver *idxd_drv, struct module *module, const char *mod_name); #define idxd_driver_register(driver) \ -- Gitee From 67c477a3c01560ff495032df2a232ead4e8b9f02 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Tue, 5 Dec 2023 15:25:18 -0600 Subject: [PATCH 0955/2138] dmaengine: idxd: Rename drv_enable/disable_wq to idxd_drv_enable/disable_wq, and export ANBZ: #9252 commit d7ad915d817c8ce07a6101292497dddbab0429a3 upstream. Rename drv_enable_wq and drv_disable_wq to idxd_drv_enable_wq and idxd_drv_disable_wq respectively, so that they're no longer too generic to be exported. This also matches existing naming within the idxd driver. And to allow idxd sub-drivers to enable and disable wqs, export them. Intel-SIG: commit d7ad915d817c dmaengine: idxd: Rename drv_enable/disable_wq to idxd_drv_enable/disable_wq, and export. Incremental backporting patches for DSA/IAA on Intel Xeon platform. Signed-off-by: Tom Zanussi Reviewed-by: Dave Jiang Reviewed-by: Fenghua Yu Acked-by: Vinod Koul Signed-off-by: Herbert Xu [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- drivers/dma/idxd/cdev.c | 6 +++--- drivers/dma/idxd/device.c | 6 ++++-- drivers/dma/idxd/dma.c | 6 +++--- drivers/dma/idxd/idxd.h | 4 ++-- 4 files changed, 12 insertions(+), 10 deletions(-) diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index 7ddf5f933475..9be5de8f51a1 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -625,7 +625,7 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) } wq->type = IDXD_WQT_USER; - rc = drv_enable_wq(wq); + rc = idxd_drv_enable_wq(wq); if (rc < 0) goto err; @@ -640,7 +640,7 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) return 0; err_cdev: - drv_disable_wq(wq); + idxd_drv_disable_wq(wq); err: destroy_workqueue(wq->wq); wq->type = IDXD_WQT_NONE; @@ -655,7 +655,7 @@ static void idxd_user_drv_remove(struct idxd_dev *idxd_dev) mutex_lock(&wq->wq_lock); idxd_wq_del_cdev(wq); - drv_disable_wq(wq); + idxd_drv_disable_wq(wq); wq->type = IDXD_WQT_NONE; destroy_workqueue(wq->wq); wq->wq = NULL; diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index 542d340552dd..c852343a59a7 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -1357,7 +1357,7 @@ int idxd_wq_request_irq(struct idxd_wq *wq) return rc; } -int drv_enable_wq(struct idxd_wq *wq) +int idxd_drv_enable_wq(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; struct device *dev = &idxd->pdev->dev; @@ -1489,8 +1489,9 @@ int drv_enable_wq(struct idxd_wq *wq) err: return rc; } +EXPORT_SYMBOL_NS_GPL(idxd_drv_enable_wq, IDXD); -void drv_disable_wq(struct idxd_wq *wq) +void idxd_drv_disable_wq(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; struct device *dev = &idxd->pdev->dev; @@ -1510,6 +1511,7 @@ void drv_disable_wq(struct idxd_wq *wq) wq->type = IDXD_WQT_NONE; wq->client_count = 0; } +EXPORT_SYMBOL_NS_GPL(idxd_drv_disable_wq, IDXD); int idxd_device_drv_probe(struct idxd_dev *idxd_dev) { diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c index 47a01893cfdb..e7043e235408 100644 --- a/drivers/dma/idxd/dma.c +++ b/drivers/dma/idxd/dma.c @@ -314,7 +314,7 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev) wq->type = IDXD_WQT_KERNEL; - rc = drv_enable_wq(wq); + rc = idxd_drv_enable_wq(wq); if (rc < 0) { dev_dbg(dev, "Enable wq %d failed: %d\n", wq->id, rc); rc = -ENXIO; @@ -333,7 +333,7 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev) return 0; err_dma: - drv_disable_wq(wq); + idxd_drv_disable_wq(wq); err: wq->type = IDXD_WQT_NONE; mutex_unlock(&wq->wq_lock); @@ -347,7 +347,7 @@ static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev) mutex_lock(&wq->wq_lock); __idxd_wq_quiesce(wq); idxd_unregister_dma_channel(wq); - drv_disable_wq(wq); + idxd_drv_disable_wq(wq); mutex_unlock(&wq->wq_lock); } diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index 3809dc0304c0..8de470422e13 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -687,8 +687,8 @@ void idxd_unmask_error_interrupts(struct idxd_device *idxd); /* device control */ int idxd_device_drv_probe(struct idxd_dev *idxd_dev); void idxd_device_drv_remove(struct idxd_dev *idxd_dev); -int drv_enable_wq(struct idxd_wq *wq); -void drv_disable_wq(struct idxd_wq *wq); +int idxd_drv_enable_wq(struct idxd_wq *wq); +void idxd_drv_disable_wq(struct idxd_wq *wq); int idxd_device_init_reset(struct idxd_device *idxd); int idxd_device_enable(struct idxd_device *idxd); int idxd_device_disable(struct idxd_device *idxd); -- Gitee From 2f045e7bf3004a7dd39fb46eaf9ed1ffa3e7e496 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Tue, 5 Dec 2023 15:25:19 -0600 Subject: [PATCH 0956/2138] dmaengine: idxd: Export descriptor management functions ANBZ: #9252 commit 8621f99bde2c3232ec254bae67f91bc6ca7b02b9 upstream. To allow idxd sub-drivers to access the descriptor management functions, export them. Intel-SIG: commit 8621f99bde2c dmaengine: idxd: Export descriptor management functions. Incremental backporting patches for DSA/IAA on Intel Xeon platform. Signed-off-by: Tom Zanussi Reviewed-by: Dave Jiang Reviewed-by: Fenghua Yu Acked-by: Vinod Koul Signed-off-by: Herbert Xu [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- drivers/dma/idxd/submit.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c index 3f922518e3a5..7daa6ac26076 100644 --- a/drivers/dma/idxd/submit.c +++ b/drivers/dma/idxd/submit.c @@ -61,6 +61,7 @@ struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype) return __get_desc(wq, idx, cpu); } +EXPORT_SYMBOL_NS_GPL(idxd_alloc_desc, IDXD); void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc) { @@ -69,6 +70,7 @@ void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc) desc->cpu = -1; sbitmap_queue_clear(&wq->sbq, desc->id, cpu); } +EXPORT_SYMBOL_NS_GPL(idxd_free_desc, IDXD); static struct idxd_desc *list_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie, struct idxd_desc *desc) @@ -215,3 +217,4 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc) percpu_ref_put(&wq->wq_active); return 0; } +EXPORT_SYMBOL_NS_GPL(idxd_submit_desc, IDXD); -- Gitee From 1b2087d60f56a3e5ef69cd864472a1e32cb3c44f Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Tue, 5 Dec 2023 15:25:20 -0600 Subject: [PATCH 0957/2138] dmaengine: idxd: Export wq resource management functions ANBZ: #9252 commit 86d3a34144fd634861d4401903ac9a21bb87f025 upstream. To allow idxd sub-drivers to access the wq resource management functions, export them. Intel-SIG: commit 86d3a34144fd dmaengine: idxd: Export wq resource management functions. Incremental backporting patches for DSA/IAA on Intel Xeon platform. Signed-off-by: Tom Zanussi Reviewed-by: Dave Jiang Reviewed-by: Fenghua Yu Acked-by: Vinod Koul Signed-off-by: Herbert Xu [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- drivers/dma/idxd/device.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index c852343a59a7..556ee6c1a4c4 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -161,6 +161,7 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq) free_hw_descs(wq); return rc; } +EXPORT_SYMBOL_NS_GPL(idxd_wq_alloc_resources, IDXD); void idxd_wq_free_resources(struct idxd_wq *wq) { @@ -174,6 +175,7 @@ void idxd_wq_free_resources(struct idxd_wq *wq) dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr); sbitmap_queue_free(&wq->sbq); } +EXPORT_SYMBOL_NS_GPL(idxd_wq_free_resources, IDXD); int idxd_wq_enable(struct idxd_wq *wq) { @@ -405,6 +407,7 @@ int idxd_wq_init_percpu_ref(struct idxd_wq *wq) reinit_completion(&wq->wq_resurrect); return 0; } +EXPORT_SYMBOL_NS_GPL(idxd_wq_init_percpu_ref, IDXD); void __idxd_wq_quiesce(struct idxd_wq *wq) { @@ -414,6 +417,7 @@ void __idxd_wq_quiesce(struct idxd_wq *wq) complete_all(&wq->wq_resurrect); wait_for_completion(&wq->wq_dead); } +EXPORT_SYMBOL_NS_GPL(__idxd_wq_quiesce, IDXD); void idxd_wq_quiesce(struct idxd_wq *wq) { @@ -421,6 +425,7 @@ void idxd_wq_quiesce(struct idxd_wq *wq) __idxd_wq_quiesce(wq); mutex_unlock(&wq->wq_lock); } +EXPORT_SYMBOL_NS_GPL(idxd_wq_quiesce, IDXD); /* Device control bits */ static inline bool idxd_is_enabled(struct idxd_device *idxd) -- Gitee From 9c849bd74dec22665ad0ae2ad630042d89540bf1 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Tue, 5 Dec 2023 15:25:21 -0600 Subject: [PATCH 0958/2138] dmaengine: idxd: Add wq private data accessors ANBZ: #9252 commit 786d0e7f183ac1c1aef1801c2110f7582f0a6a83 upstream. Add the accessors idxd_wq_set_private() and idxd_wq_get_private() allowing users to set and retrieve a private void * associated with an idxd_wq. The private data is stored in the idxd_dev.conf_dev associated with each idxd_wq. Intel-SIG: commit 786d0e7f183a dmaengine: idxd: Add wq private data accessors. Incremental backporting patches for DSA/IAA on Intel Xeon platform. Signed-off-by: Tom Zanussi Reviewed-by: Fenghua Yu Acked-by: Vinod Koul Signed-off-by: Herbert Xu [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- drivers/dma/idxd/idxd.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index 8de470422e13..a05ed56e6913 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -620,6 +620,16 @@ static inline int idxd_wq_refcount(struct idxd_wq *wq) return wq->client_count; }; +static inline void idxd_wq_set_private(struct idxd_wq *wq, void *private) +{ + dev_set_drvdata(wq_confdev(wq), private); +} + +static inline void *idxd_wq_get_private(struct idxd_wq *wq) +{ + return dev_get_drvdata(wq_confdev(wq)); +} + /* * Intel IAA does not support batch processing. * The max batch size of device, max batch size of wq and -- Gitee From 73427435e7af85f6bdcd9e44d19bf8fcefc8f8af Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Tue, 5 Dec 2023 15:25:22 -0600 Subject: [PATCH 0959/2138] dmaengine: idxd: add callback support for iaa crypto ANBZ: #9252 commit aa8d18becc0c14aa3eb46d6d1b81450446e11b87 upstream. Create a lightweight callback interface to allow idxd sub-drivers to be notified when work sent to idxd wqs has completed. For a sub-driver to be notified of work completion, it needs to: - Set the descriptor's 'Request Completion Interrupt' (IDXD_OP_FLAG_RCI) - Set the sub-driver desc_complete() callback when registering the sub-driver e.g.: struct idxd_device_driver my_drv = { .probe = my_probe, .desc_complete = my_complete, } - Set the sub-driver-specific context in the sub-driver's descriptor e.g: idxd_desc->crypto.req = req; idxd_desc->crypto.tfm = tfm; idxd_desc->crypto.src_addr = src_addr; idxd_desc->crypto.dst_addr = dst_addr; When the work completes and the completion irq fires, idxd will invoke the desc_complete() callback with pointers to the descriptor, context, and completion_type. Intel-SIG: commit aa8d18becc0c dmaengine: idxd: add callback support for iaa crypto. Incremental backporting patches for DSA/IAA on Intel Xeon platform. Signed-off-by: Dave Jiang Signed-off-by: Tom Zanussi Reviewed-by: Fenghua Yu Acked-by: Vinod Koul Signed-off-by: Herbert Xu [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- drivers/dma/idxd/device.c | 2 +- drivers/dma/idxd/dma.c | 3 +- drivers/dma/idxd/idxd.h | 62 ++++++++++++++++++++++++++++++++------- drivers/dma/idxd/irq.c | 12 ++++---- drivers/dma/idxd/submit.c | 6 ++-- 5 files changed, 65 insertions(+), 20 deletions(-) diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index 556ee6c1a4c4..c41ef195eeb9 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -1278,7 +1278,7 @@ static void idxd_flush_pending_descs(struct idxd_irq_entry *ie) tx = &desc->txd; tx->callback = NULL; tx->callback_result = NULL; - idxd_dma_complete_txd(desc, ctype, true); + idxd_dma_complete_txd(desc, ctype, true, NULL, NULL); } } diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c index e7043e235408..cd835eabd31b 100644 --- a/drivers/dma/idxd/dma.c +++ b/drivers/dma/idxd/dma.c @@ -22,7 +22,7 @@ static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c) void idxd_dma_complete_txd(struct idxd_desc *desc, enum idxd_complete_type comp_type, - bool free_desc) + bool free_desc, void *ctx, u32 *status) { struct idxd_device *idxd = desc->wq->idxd; struct dma_async_tx_descriptor *tx; @@ -359,6 +359,7 @@ static enum idxd_dev_type dev_types[] = { struct idxd_device_driver idxd_dmaengine_drv = { .probe = idxd_dmaengine_drv_probe, .remove = idxd_dmaengine_drv_remove, + .desc_complete = idxd_dma_complete_txd, .name = "dmaengine", .type = dev_types, }; diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index a05ed56e6913..32a1fef9adb1 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -13,6 +13,7 @@ #include #include #include +#include #include #include "registers.h" @@ -57,11 +58,23 @@ enum idxd_type { #define IDXD_ENQCMDS_RETRIES 32 #define IDXD_ENQCMDS_MAX_RETRIES 64 +enum idxd_complete_type { + IDXD_COMPLETE_NORMAL = 0, + IDXD_COMPLETE_ABORT, + IDXD_COMPLETE_DEV_FAIL, +}; + +struct idxd_desc; + struct idxd_device_driver { const char *name; enum idxd_dev_type *type; int (*probe)(struct idxd_dev *idxd_dev); void (*remove)(struct idxd_dev *idxd_dev); + void (*desc_complete)(struct idxd_desc *desc, + enum idxd_complete_type comp_type, + bool free_desc, + void *ctx, u32 *status); struct device_driver drv; }; @@ -174,12 +187,6 @@ enum idxd_op_type { IDXD_OP_NONBLOCK = 1, }; -enum idxd_complete_type { - IDXD_COMPLETE_NORMAL = 0, - IDXD_COMPLETE_ABORT, - IDXD_COMPLETE_DEV_FAIL, -}; - struct idxd_dma_chan { struct dma_chan chan; struct idxd_wq *wq; @@ -380,6 +387,14 @@ static inline unsigned int evl_size(struct idxd_device *idxd) return idxd->evl->size * evl_ent_size(idxd); } +struct crypto_ctx { + struct acomp_req *req; + struct crypto_tfm *tfm; + dma_addr_t src_addr; + dma_addr_t dst_addr; + bool compress; +}; + /* IDXD software descriptor */ struct idxd_desc { union { @@ -392,7 +407,10 @@ struct idxd_desc { struct iax_completion_record *iax_completion; }; dma_addr_t compl_dma; - struct dma_async_tx_descriptor txd; + union { + struct dma_async_tx_descriptor txd; + struct crypto_ctx crypto; + }; struct llist_node llnode; struct list_head list; int id; @@ -419,6 +437,15 @@ enum idxd_completion_status { #define idxd_dev_to_idxd(idxd_dev) container_of(idxd_dev, struct idxd_device, idxd_dev) #define idxd_dev_to_wq(idxd_dev) container_of(idxd_dev, struct idxd_wq, idxd_dev) +static inline struct idxd_device_driver *wq_to_idxd_drv(struct idxd_wq *wq) +{ + struct device *dev = wq_confdev(wq); + struct idxd_device_driver *idxd_drv = + container_of(dev->driver, struct idxd_device_driver, drv); + + return idxd_drv; +} + static inline struct idxd_device *confdev_to_idxd(struct device *dev) { struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev); @@ -680,6 +707,24 @@ void idxd_driver_unregister(struct idxd_device_driver *idxd_drv); #define module_idxd_driver(__idxd_driver) \ module_driver(__idxd_driver, idxd_driver_register, idxd_driver_unregister) +void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc); +void idxd_dma_complete_txd(struct idxd_desc *desc, + enum idxd_complete_type comp_type, + bool free_desc, void *ctx, u32 *status); + +static inline void idxd_desc_complete(struct idxd_desc *desc, + enum idxd_complete_type comp_type, + bool free_desc) +{ + struct idxd_device_driver *drv; + u32 status; + + drv = wq_to_idxd_drv(desc->wq); + if (drv->desc_complete) + drv->desc_complete(desc, comp_type, free_desc, + &desc->txd, &status); +} + int idxd_register_bus_type(void); void idxd_unregister_bus_type(void); int idxd_register_devices(struct idxd_device *idxd); @@ -733,14 +778,11 @@ int idxd_wq_request_irq(struct idxd_wq *wq); /* submission */ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc); struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype); -void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc); int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc); /* dmaengine */ int idxd_register_dma_device(struct idxd_device *idxd); void idxd_unregister_dma_device(struct idxd_device *idxd); -void idxd_dma_complete_txd(struct idxd_desc *desc, - enum idxd_complete_type comp_type, bool free_desc); /* cdev */ int idxd_cdev_register(void); diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c index 27c93a245d12..fc049c9c9892 100644 --- a/drivers/dma/idxd/irq.c +++ b/drivers/dma/idxd/irq.c @@ -123,7 +123,7 @@ static void idxd_abort_invalid_int_handle_descs(struct idxd_irq_entry *ie) list_for_each_entry_safe(d, t, &flist, list) { list_del(&d->list); - idxd_dma_complete_txd(d, IDXD_COMPLETE_ABORT, true); + idxd_desc_complete(d, IDXD_COMPLETE_ABORT, true); } } @@ -533,7 +533,7 @@ static void idxd_int_handle_resubmit_work(struct work_struct *work) */ if (rc != -EAGAIN) { desc->completion->status = IDXD_COMP_DESC_ABORT; - idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, false); + idxd_desc_complete(desc, IDXD_COMPLETE_ABORT, false); } idxd_free_desc(wq, desc); } @@ -574,11 +574,11 @@ static void irq_process_pending_llist(struct idxd_irq_entry *irq_entry) * and 0xff, which DSA_COMP_STATUS_MASK can mask out. */ if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) { - idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true); + idxd_desc_complete(desc, IDXD_COMPLETE_ABORT, true); continue; } - idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL, true); + idxd_desc_complete(desc, IDXD_COMPLETE_NORMAL, true); } else { spin_lock(&irq_entry->list_lock); list_add_tail(&desc->list, @@ -619,11 +619,11 @@ static void irq_process_work_list(struct idxd_irq_entry *irq_entry) list_del(&desc->list); if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) { - idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true); + idxd_desc_complete(desc, IDXD_COMPLETE_ABORT, true); continue; } - idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL, true); + idxd_desc_complete(desc, IDXD_COMPLETE_NORMAL, true); } } diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c index 7daa6ac26076..817a564413b0 100644 --- a/drivers/dma/idxd/submit.c +++ b/drivers/dma/idxd/submit.c @@ -127,7 +127,8 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie, spin_unlock(&ie->list_lock); if (found) - idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, false); + idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, false, + NULL, NULL); /* * completing the descriptor will return desc to allocator and @@ -137,7 +138,8 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie, */ list_for_each_entry_safe(d, t, &flist, list) { list_del_init(&d->list); - idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, true); + idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, true, + NULL, NULL); } } -- Gitee From e9454eccf136777e6afc0080a9a8eb8b4c1f8a5b Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Tue, 5 Dec 2023 15:25:23 -0600 Subject: [PATCH 0960/2138] crypto: iaa - Add IAA Compression Accelerator Documentation ANBZ: #9252 commit 8ccc257b29a183c42830aa854ed7b50fa22f8731 upstream. Because the IAA Compression Accelerator requires significant user setup in order to be used properly, this adds documentation on the iaa_crypto driver including setup, usage, and examples. Intel-SIG: commit 8ccc257b29a1 crypto: iaa - Add IAA Compression Accelerator Documentation. Backporting patches for Intel IAA crypto driver on Intel Xeon platform. Signed-off-by: Tom Zanussi Signed-off-by: Herbert Xu [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- .../driver-api/crypto/iaa/iaa-crypto.rst | 824 ++++++++++++++++++ Documentation/driver-api/crypto/iaa/index.rst | 20 + Documentation/driver-api/crypto/index.rst | 20 + Documentation/driver-api/index.rst | 1 + 4 files changed, 865 insertions(+) create mode 100644 Documentation/driver-api/crypto/iaa/iaa-crypto.rst create mode 100644 Documentation/driver-api/crypto/iaa/index.rst create mode 100644 Documentation/driver-api/crypto/index.rst diff --git a/Documentation/driver-api/crypto/iaa/iaa-crypto.rst b/Documentation/driver-api/crypto/iaa/iaa-crypto.rst new file mode 100644 index 000000000000..de587cf9cbed --- /dev/null +++ b/Documentation/driver-api/crypto/iaa/iaa-crypto.rst @@ -0,0 +1,824 @@ +.. SPDX-License-Identifier: GPL-2.0 + +========================================= +IAA Compression Accelerator Crypto Driver +========================================= + +Tom Zanussi + +The IAA crypto driver supports compression/decompression compatible +with the DEFLATE compression standard described in RFC 1951, which is +the compression/decompression algorithm exported by this module. + +The IAA hardware spec can be found here: + + https://cdrdv2.intel.com/v1/dl/getContent/721858 + +The iaa_crypto driver is designed to work as a layer underneath +higher-level compression devices such as zswap. + +Users can select IAA compress/decompress acceleration by specifying +one of the supported IAA compression algorithms in whatever facility +allows compression algorithms to be selected. + +For example, a zswap device can select the IAA 'fixed' mode +represented by selecting the 'deflate-iaa' crypto compression +algorithm:: + + # echo deflate-iaa > /sys/module/zswap/parameters/compressor + +This will tell zswap to use the IAA 'fixed' compression mode for all +compresses and decompresses. + +Currently, there is only one compression modes available, 'fixed' +mode. + +The 'fixed' compression mode implements the compression scheme +specified by RFC 1951 and is given the crypto algorithm name +'deflate-iaa'. (Because the IAA hardware has a 4k history-window +limitation, only buffers <= 4k, or that have been compressed using a +<= 4k history window, are technically compliant with the deflate spec, +which allows for a window of up to 32k. Because of this limitation, +the IAA fixed mode deflate algorithm is given its own algorithm name +rather than simply 'deflate'). + + +Config options and other setup +============================== + +The IAA crypto driver is available via menuconfig using the following +path:: + + Cryptographic API -> Hardware crypto devices -> Support for Intel(R) IAA Compression Accelerator + +In the configuration file the option called CONFIG_CRYPTO_DEV_IAA_CRYPTO. + +The IAA crypto driver also supports statistics, which are available +via menuconfig using the following path:: + + Cryptographic API -> Hardware crypto devices -> Support for Intel(R) IAA Compression -> Enable Intel(R) IAA Compression Accelerator Statistics + +In the configuration file the option called CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS. + +The following config options should also be enabled:: + + CONFIG_IRQ_REMAP=y + CONFIG_INTEL_IOMMU=y + CONFIG_INTEL_IOMMU_SVM=y + CONFIG_PCI_ATS=y + CONFIG_PCI_PRI=y + CONFIG_PCI_PASID=y + CONFIG_INTEL_IDXD=m + CONFIG_INTEL_IDXD_SVM=y + +IAA is one of the first Intel accelerator IPs that can work in +conjunction with the Intel IOMMU. There are multiple modes that exist +for testing. Based on IOMMU configuration, there are 3 modes:: + + - Scalable + - Legacy + - No IOMMU + + +Scalable mode +------------- + +Scalable mode supports Shared Virtual Memory (SVM or SVA). It is +entered when using the kernel boot commandline:: + + intel_iommu=on,sm_on + +with VT-d turned on in BIOS. + +With scalable mode, both shared and dedicated workqueues are available +for use. + +For scalable mode, the following BIOS settings should be enabled:: + + Socket Configuration > IIO Configuration > Intel VT for Directed I/O (VT-d) > Intel VT for Directed I/O + + Socket Configuration > IIO Configuration > PCIe ENQCMD > ENQCMDS + + +Legacy mode +----------- + +Legacy mode is entered when using the kernel boot commandline:: + + intel_iommu=off + +or VT-d is not turned on in BIOS. + +If you have booted into Linux and not sure if VT-d is on, do a "dmesg +| grep -i dmar". If you don't see a number of DMAR devices enumerated, +most likely VT-d is not on. + +With legacy mode, only dedicated workqueues are available for use. + + +No IOMMU mode +------------- + +No IOMMU mode is entered when using the kernel boot commandline:: + + iommu=off. + +With no IOMMU mode, only dedicated workqueues are available for use. + + +Usage +===== + +accel-config +------------ + +When loaded, the iaa_crypto driver automatically creates a default +configuration and enables it, and assigns default driver attributes. +If a different configuration or set of driver attributes is required, +the user must first disable the IAA devices and workqueues, reset the +configuration, and then re-register the deflate-iaa algorithm with the +crypto subsystem by removing and reinserting the iaa_crypto module. + +The :ref:`iaa_disable_script` in the 'Use Cases' +section below can be used to disable the default configuration. + +See :ref:`iaa_default_config` below for details of the default +configuration. + +More likely than not, however, and because of the complexity and +configurability of the accelerator devices, the user will want to +configure the device and manually enable the desired devices and +workqueues. + +The userspace tool to help doing that is called accel-config. Using +accel-config to configure device or loading a previously saved config +is highly recommended. The device can be controlled via sysfs +directly but comes with the warning that you should do this ONLY if +you know exactly what you are doing. The following sections will not +cover the sysfs interface but assumes you will be using accel-config. + +The :ref:`iaa_sysfs_config` section in the appendix below can be +consulted for the sysfs interface details if interested. + +The accel-config tool along with instructions for building it can be +found here: + + https://github.com/intel/idxd-config/#readme + +Typical usage +------------- + +In order for the iaa_crypto module to actually do any +compression/decompression work on behalf of a facility, one or more +IAA workqueues need to be bound to the iaa_crypto driver. + +For instance, here's an example of configuring an IAA workqueue and +binding it to the iaa_crypto driver (note that device names are +specified as 'iax' rather than 'iaa' - this is because upstream still +has the old 'iax' device naming in place) :: + + # configure wq1.0 + + accel-config config-wq --group-id=0 --mode=dedicated --type=kernel --name="iaa_crypto" --device_name="crypto" iax1/wq1.0 + + # enable IAA device iax1 + + accel-config enable-device iax1 + + # enable wq1.0 on IAX device iax1 + + accel-config enable-wq iax1/wq1.0 + +Whenever a new workqueue is bound to or unbound from the iaa_crypto +driver, the available workqueues are 'rebalanced' such that work +submitted from a particular CPU is given to the most appropriate +workqueue available. Current best practice is to configure and bind +at least one workqueue for each IAA device, but as long as there is at +least one workqueue configured and bound to any IAA device in the +system, the iaa_crypto driver will work, albeit most likely not as +efficiently. + +The IAA crypto algorigthms is operational and compression and +decompression operations are fully enabled following the successful +binding of the first IAA workqueue to the iaa_crypto driver. + +Similarly, the IAA crypto algorithm is not operational and compression +and decompression operations are disabled following the unbinding of +the last IAA worqueue to the iaa_crypto driver. + +As a result, the IAA crypto algorithms and thus the IAA hardware are +only available when one or more workques are bound to the iaa_crypto +driver. + +When there are no IAA workqueues bound to the driver, the IAA crypto +algorithms can be unregistered by removing the module. + + +Driver attributes +----------------- + +There are a couple user-configurable driver attributes that can be +used to configure various modes of operation. They're listed below, +along with their default values. To set any of these attributes, echo +the appropriate values to the attribute file located under +/sys/bus/dsa/drivers/crypto/ + +The attribute settings at the time the IAA algorithms are registered +are captured in each algorithm's crypto_ctx and used for all compresses +and decompresses when using that algorithm. + +The available attributes are: + + - verify_compress + + Toggle compression verification. If set, each compress will be + internally decompressed and the contents verified, returning error + codes if unsuccessful. This can be toggled with 0/1:: + + echo 0 > /sys/bus/dsa/drivers/crypto/verify_compress + + The default setting is '1' - verify all compresses. + + - sync_mode + + Select mode to be used to wait for completion of each compresses + and decompress operation. + + The crypto async interface support implemented by iaa_crypto + provides an implementation that satisfies the interface but does + so in a synchronous manner - it fills and submits the IDXD + descriptor and then loops around waiting for it to complete before + returning. This isn't a problem at the moment, since all existing + callers (e.g. zswap) wrap any asynchronous callees in a + synchronous wrapper anyway. + + The iaa_crypto driver does however provide true asynchronous + support for callers that can make use of it. In this mode, it + fills and submits the IDXD descriptor, then returns immediately + with -EINPROGRESS. The caller can then either poll for completion + itself, which requires specific code in the caller which currently + nothing in the upstream kernel implements, or go to sleep and wait + for an interrupt signaling completion. This latter mode is + supported by current users in the kernel such as zswap via + synchronous wrappers. Although it is supported this mode is + significantly slower than the synchronous mode that does the + polling in the iaa_crypto driver previously mentioned. + + This mode can be enabled by writing 'async_irq' to the sync_mode + iaa_crypto driver attribute:: + + echo async_irq > /sys/bus/dsa/drivers/crypto/sync_mode + + Async mode without interrupts (caller must poll) can be enabled by + writing 'async' to it:: + + echo async > /sys/bus/dsa/drivers/crypto/sync_mode + + The mode that does the polling in the iaa_crypto driver can be + enabled by writing 'sync' to it:: + + echo sync > /sys/bus/dsa/drivers/crypto/sync_mode + + The default mode is 'sync'. + +.. _iaa_default_config: + +IAA Default Configuration +------------------------- + +When the iaa_crypto driver is loaded, each IAA device has a single +work queue configured for it, with the following attributes:: + + mode "dedicated" + threshold 0 + size Total WQ Size from WQCAP + priority 10 + type IDXD_WQT_KERNEL + group 0 + name "iaa_crypto" + driver_name "crypto" + +The devices and workqueues are also enabled and therefore the driver +is ready to be used without any additional configuration. + +The default driver attributes in effect when the driver is loaded are:: + + sync_mode "sync" + verify_compress 1 + +In order to change either the device/work queue or driver attributes, +the enabled devices and workqueues must first be disabled. In order +to have the new configuration applied to the deflate-iaa crypto +algorithm, it needs to be re-registered by removing and reinserting +the iaa_crypto module. The :ref:`iaa_disable_script` in the 'Use +Cases' section below can be used to disable the default configuration. + +Statistics +========== + +If the optional debugfs statistics support is enabled, the IAA crypto +driver will generate statistics which can be accessed in debugfs at:: + + # ls -al /sys/kernel/debug/iaa-crypto/ + total 0 + drwxr-xr-x 2 root root 0 Mar 3 09:35 . + drwx------ 47 root root 0 Mar 3 09:35 .. + -rw-r--r-- 1 root root 0 Mar 3 09:35 max_acomp_delay_ns + -rw-r--r-- 1 root root 0 Mar 3 09:35 max_adecomp_delay_ns + -rw-r--r-- 1 root root 0 Mar 3 09:35 max_comp_delay_ns + -rw-r--r-- 1 root root 0 Mar 3 09:35 max_decomp_delay_ns + -rw-r--r-- 1 root root 0 Mar 3 09:35 stats_reset + -rw-r--r-- 1 root root 0 Mar 3 09:35 total_comp_bytes_out + -rw-r--r-- 1 root root 0 Mar 3 09:35 total_comp_calls + -rw-r--r-- 1 root root 0 Mar 3 09:35 total_decomp_bytes_in + -rw-r--r-- 1 root root 0 Mar 3 09:35 total_decomp_calls + -rw-r--r-- 1 root root 0 Mar 3 09:35 wq_stats + +Most of the above statisticss are self-explanatory. The wq_stats file +shows per-wq stats, a set for each iaa device and wq in addition to +some global stats:: + + # cat wq_stats + global stats: + total_comp_calls: 100 + total_decomp_calls: 100 + total_comp_bytes_out: 22800 + total_decomp_bytes_in: 22800 + total_completion_einval_errors: 0 + total_completion_timeout_errors: 0 + total_completion_comp_buf_overflow_errors: 0 + + iaa device: + id: 1 + n_wqs: 1 + comp_calls: 0 + comp_bytes: 0 + decomp_calls: 0 + decomp_bytes: 0 + wqs: + name: iaa_crypto + comp_calls: 0 + comp_bytes: 0 + decomp_calls: 0 + decomp_bytes: 0 + + iaa device: + id: 3 + n_wqs: 1 + comp_calls: 0 + comp_bytes: 0 + decomp_calls: 0 + decomp_bytes: 0 + wqs: + name: iaa_crypto + comp_calls: 0 + comp_bytes: 0 + decomp_calls: 0 + decomp_bytes: 0 + + iaa device: + id: 5 + n_wqs: 1 + comp_calls: 100 + comp_bytes: 22800 + decomp_calls: 100 + decomp_bytes: 22800 + wqs: + name: iaa_crypto + comp_calls: 100 + comp_bytes: 22800 + decomp_calls: 100 + decomp_bytes: 22800 + +Writing 0 to 'stats_reset' resets all the stats, including the +per-device and per-wq stats:: + + # echo 0 > stats_reset + # cat wq_stats + global stats: + total_comp_calls: 0 + total_decomp_calls: 0 + total_comp_bytes_out: 0 + total_decomp_bytes_in: 0 + total_completion_einval_errors: 0 + total_completion_timeout_errors: 0 + total_completion_comp_buf_overflow_errors: 0 + ... + + +Use cases +========= + +Simple zswap test +----------------- + +For this example, the kernel should be configured according to the +dedicated mode options described above, and zswap should be enabled as +well:: + + CONFIG_ZSWAP=y + +This is a simple test that uses iaa_compress as the compressor for a +swap (zswap) device. It sets up the zswap device and then uses the +memory_memadvise program listed below to forcibly swap out and in a +specified number of pages, demonstrating both compress and decompress. + +The zswap test expects the work queues for each IAA device on the +system to be configured properly as a kernel workqueue with a +workqueue driver_name of "crypto". + +The first step is to make sure the iaa_crypto module is loaded:: + + modprobe iaa_crypto + +If the IAA devices and workqueues haven't previously been disabled and +reconfigured, then the default configuration should be in place and no +further IAA configuration is necessary. See :ref:`iaa_default_config` +below for details of the default configuration. + +If the default configuration is in place, you should see the iaa +devices and wq0s enabled:: + + # cat /sys/bus/dsa/devices/iax1/state + enabled + # cat /sys/bus/dsa/devices/iax1/wq1.0/state + enabled + +To demonstrate that the following steps work as expected, these +commands can be used to enable debug output:: + + # echo -n 'module iaa_crypto +p' > /sys/kernel/debug/dynamic_debug/control + # echo -n 'module idxd +p' > /sys/kernel/debug/dynamic_debug/control + +Use the following commands to enable zswap:: + + # echo 0 > /sys/module/zswap/parameters/enabled + # echo 50 > /sys/module/zswap/parameters/max_pool_percent + # echo deflate-iaa > /sys/module/zswap/parameters/compressor + # echo zsmalloc > /sys/module/zswap/parameters/zpool + # echo 1 > /sys/module/zswap/parameters/enabled + # echo 0 > /sys/module/zswap/parameters/same_filled_pages_enabled + # echo 100 > /proc/sys/vm/swappiness + # echo never > /sys/kernel/mm/transparent_hugepage/enabled + # echo 1 > /proc/sys/vm/overcommit_memory + +Now you can now run the zswap workload you want to measure. For +example, using the memory_memadvise code below, the following command +will swap in and out 100 pages:: + + ./memory_madvise 100 + + Allocating 100 pages to swap in/out + Swapping out 100 pages + Swapping in 100 pages + Swapped out and in 100 pages + +You should see something like the following in the dmesg output:: + + [ 404.202972] idxd 0000:e7:02.0: iaa_comp_acompress: dma_map_sg, src_addr 223925c000, nr_sgs 1, req->src 00000000ee7cb5e6, req->slen 4096, sg_dma_len(sg) 4096 + [ 404.202973] idxd 0000:e7:02.0: iaa_comp_acompress: dma_map_sg, dst_addr 21dadf8000, nr_sgs 1, req->dst 000000008d6acea8, req->dlen 4096, sg_dma_len(sg) 8192 + [ 404.202975] idxd 0000:e7:02.0: iaa_compress: desc->src1_addr 223925c000, desc->src1_size 4096, desc->dst_addr 21dadf8000, desc->max_dst_size 4096, desc->src2_addr 2203543000, desc->src2_size 1568 + [ 404.202981] idxd 0000:e7:02.0: iaa_compress_verify: (verify) desc->src1_addr 21dadf8000, desc->src1_size 228, desc->dst_addr 223925c000, desc->max_dst_size 4096, desc->src2_addr 0, desc->src2_size 0 + ... + +Now that basic functionality has been demonstrated, the defaults can +be erased and replaced with a different configuration. To do that, +first disable zswap:: + + # echo lzo > /sys/module/zswap/parameters/compressor + # swapoff -a + # echo 0 > /sys/module/zswap/parameters/accept_threshold_percent + # echo 0 > /sys/module/zswap/parameters/max_pool_percent + # echo 0 > /sys/module/zswap/parameters/enabled + # echo 0 > /sys/module/zswap/parameters/enabled + +Then run the :ref:`iaa_disable_script` in the 'Use Cases' section +below to disable the default configuration. + +Finally turn swap back on:: + + # swapon -a + +Following all that the IAA device(s) can now be re-configured and +enabled as desired for further testing. Below is one example. + +The zswap test expects the work queues for each IAA device on the +system to be configured properly as a kernel workqueue with a +workqueue driver_name of "crypto". + +The below script automatically does that:: + + #!/bin/bash + + echo "IAA devices:" + lspci -d:0cfe + echo "# IAA devices:" + lspci -d:0cfe | wc -l + + # + # count iaa instances + # + iaa_dev_id="0cfe" + num_iaa=$(lspci -d:${iaa_dev_id} | wc -l) + echo "Found ${num_iaa} IAA instances" + + # + # disable iaa wqs and devices + # + echo "Disable IAA" + + for ((i = 1; i < ${num_iaa} * 2; i += 2)); do + echo disable wq iax${i}/wq${i}.0 + accel-config disable-wq iax${i}/wq${i}.0 + echo disable iaa iax${i} + accel-config disable-device iax${i} + done + + echo "End Disable IAA" + + # + # configure iaa wqs and devices + # + echo "Configure IAA" + for ((i = 1; i < ${num_iaa} * 2; i += 2)); do + accel-config config-wq --group-id=0 --mode=dedicated --size=128 --priority=10 --type=kernel --name="iaa_crypto" --driver_name="crypto" iax${i}/wq${i} + done + + echo "End Configure IAA" + + # + # enable iaa wqs and devices + # + echo "Enable IAA" + + for ((i = 1; i < ${num_iaa} * 2; i += 2)); do + echo enable iaa iaa${i} + accel-config enable-device iaa${i} + echo enable wq iaa${i}/wq${i}.0 + accel-config enable-wq iaa${i}/wq${i}.0 + done + + echo "End Enable IAA" + +When the workqueues are bound to the iaa_crypto driver, you should +see something similar to the following in dmesg output if you've +enabled debug output (echo -n 'module iaa_crypto +p' > +/sys/kernel/debug/dynamic_debug/control):: + + [ 60.752344] idxd 0000:f6:02.0: add_iaa_wq: added wq 000000004068d14d to iaa 00000000c9585ba2, n_wq 1 + [ 60.752346] iaa_crypto: rebalance_wq_table: nr_nodes=2, nr_cpus 160, nr_iaa 8, cpus_per_iaa 20 + [ 60.752347] iaa_crypto: rebalance_wq_table: iaa=0 + [ 60.752349] idxd 0000:6a:02.0: request_iaa_wq: getting wq from iaa_device 0000000042d7bc52 (0) + [ 60.752350] idxd 0000:6a:02.0: request_iaa_wq: returning unused wq 00000000c8bb4452 (0) from iaa device 0000000042d7bc52 (0) + [ 60.752352] iaa_crypto: rebalance_wq_table: assigned wq for cpu=0, node=0 = wq 00000000c8bb4452 + [ 60.752354] iaa_crypto: rebalance_wq_table: iaa=0 + [ 60.752355] idxd 0000:6a:02.0: request_iaa_wq: getting wq from iaa_device 0000000042d7bc52 (0) + [ 60.752356] idxd 0000:6a:02.0: request_iaa_wq: returning unused wq 00000000c8bb4452 (0) from iaa device 0000000042d7bc52 (0) + [ 60.752358] iaa_crypto: rebalance_wq_table: assigned wq for cpu=1, node=0 = wq 00000000c8bb4452 + [ 60.752359] iaa_crypto: rebalance_wq_table: iaa=0 + [ 60.752360] idxd 0000:6a:02.0: request_iaa_wq: getting wq from iaa_device 0000000042d7bc52 (0) + [ 60.752361] idxd 0000:6a:02.0: request_iaa_wq: returning unused wq 00000000c8bb4452 (0) from iaa device 0000000042d7bc52 (0) + [ 60.752362] iaa_crypto: rebalance_wq_table: assigned wq for cpu=2, node=0 = wq 00000000c8bb4452 + [ 60.752364] iaa_crypto: rebalance_wq_table: iaa=0 + . + . + . + +Once the workqueues and devices have been enabled, the IAA crypto +algorithms are enabled and available. When the IAA crypto algorithms +have been successfully enabled, you should see the following dmesg +output:: + + [ 64.893759] iaa_crypto: iaa_crypto_enable: iaa_crypto now ENABLED + +Now run the following zswap-specific setup commands to have zswap use +the 'fixed' compression mode:: + + echo 0 > /sys/module/zswap/parameters/enabled + echo 50 > /sys/module/zswap/parameters/max_pool_percent + echo deflate-iaa > /sys/module/zswap/parameters/compressor + echo zsmalloc > /sys/module/zswap/parameters/zpool + echo 1 > /sys/module/zswap/parameters/enabled + echo 0 > /sys/module/zswap/parameters/same_filled_pages_enabled + + echo 100 > /proc/sys/vm/swappiness + echo never > /sys/kernel/mm/transparent_hugepage/enabled + echo 1 > /proc/sys/vm/overcommit_memory + +Finally, you can now run the zswap workload you want to measure. For +example, using the code below, the following command will swap in and +out 100 pages:: + + ./memory_madvise 100 + + Allocating 100 pages to swap in/out + Swapping out 100 pages + Swapping in 100 pages + Swapped out and in 100 pages + +You should see something like the following in the dmesg output if +you've enabled debug output (echo -n 'module iaa_crypto +p' > +/sys/kernel/debug/dynamic_debug/control):: + + [ 404.202972] idxd 0000:e7:02.0: iaa_comp_acompress: dma_map_sg, src_addr 223925c000, nr_sgs 1, req->src 00000000ee7cb5e6, req->slen 4096, sg_dma_len(sg) 4096 + [ 404.202973] idxd 0000:e7:02.0: iaa_comp_acompress: dma_map_sg, dst_addr 21dadf8000, nr_sgs 1, req->dst 000000008d6acea8, req->dlen 4096, sg_dma_len(sg) 8192 + [ 404.202975] idxd 0000:e7:02.0: iaa_compress: desc->src1_addr 223925c000, desc->src1_size 4096, desc->dst_addr 21dadf8000, desc->max_dst_size 4096, desc->src2_addr 2203543000, desc->src2_size 1568 + [ 404.202981] idxd 0000:e7:02.0: iaa_compress_verify: (verify) desc->src1_addr 21dadf8000, desc->src1_size 228, desc->dst_addr 223925c000, desc->max_dst_size 4096, desc->src2_addr 0, desc->src2_size 0 + [ 409.203227] idxd 0000:e7:02.0: iaa_comp_adecompress: dma_map_sg, src_addr 21ddd8b100, nr_sgs 1, req->src 0000000084adab64, req->slen 228, sg_dma_len(sg) 228 + [ 409.203235] idxd 0000:e7:02.0: iaa_comp_adecompress: dma_map_sg, dst_addr 21ee3dc000, nr_sgs 1, req->dst 000000004e2990d0, req->dlen 4096, sg_dma_len(sg) 4096 + [ 409.203239] idxd 0000:e7:02.0: iaa_decompress: desc->src1_addr 21ddd8b100, desc->src1_size 228, desc->dst_addr 21ee3dc000, desc->max_dst_size 4096, desc->src2_addr 0, desc->src2_size 0 + [ 409.203254] idxd 0000:e7:02.0: iaa_comp_adecompress: dma_map_sg, src_addr 21ddd8b100, nr_sgs 1, req->src 0000000084adab64, req->slen 228, sg_dma_len(sg) 228 + [ 409.203256] idxd 0000:e7:02.0: iaa_comp_adecompress: dma_map_sg, dst_addr 21f1551000, nr_sgs 1, req->dst 000000004e2990d0, req->dlen 4096, sg_dma_len(sg) 4096 + [ 409.203257] idxd 0000:e7:02.0: iaa_decompress: desc->src1_addr 21ddd8b100, desc->src1_size 228, desc->dst_addr 21f1551000, desc->max_dst_size 4096, desc->src2_addr 0, desc->src2_size 0 + +In order to unregister the IAA crypto algorithms, and register new +ones using different parameters, any users of the current algorithm +should be stopped and the IAA workqueues and devices disabled. + +In the case of zswap, remove the IAA crypto algorithm as the +compressor and turn off swap (to remove all references to +iaa_crypto):: + + echo lzo > /sys/module/zswap/parameters/compressor + swapoff -a + + echo 0 > /sys/module/zswap/parameters/accept_threshold_percent + echo 0 > /sys/module/zswap/parameters/max_pool_percent + echo 0 > /sys/module/zswap/parameters/enabled + +Once zswap is disabled and no longer using iaa_crypto, the IAA wqs and +devices can be disabled. + +.. _iaa_disable_script: + +IAA disable script +------------------ + +The below script automatically does that:: + + #!/bin/bash + + echo "IAA devices:" + lspci -d:0cfe + echo "# IAA devices:" + lspci -d:0cfe | wc -l + + # + # count iaa instances + # + iaa_dev_id="0cfe" + num_iaa=$(lspci -d:${iaa_dev_id} | wc -l) + echo "Found ${num_iaa} IAA instances" + + # + # disable iaa wqs and devices + # + echo "Disable IAA" + + for ((i = 1; i < ${num_iaa} * 2; i += 2)); do + echo disable wq iax${i}/wq${i}.0 + accel-config disable-wq iax${i}/wq${i}.0 + echo disable iaa iax${i} + accel-config disable-device iax${i} + done + + echo "End Disable IAA" + +Finally, at this point the iaa_crypto module can be removed, which +will unregister the current IAA crypto algorithms:: + + rmmod iaa_crypto + + +memory_madvise.c (gcc -o memory_memadvise memory_madvise.c):: + + #include + #include + #include + #include + #include + #include + + #ifndef MADV_PAGEOUT + #define MADV_PAGEOUT 21 /* force pages out immediately */ + #endif + + #define PG_SZ 4096 + + int main(int argc, char **argv) + { + int i, nr_pages = 1; + int64_t *dump_ptr; + char *addr, *a; + int loop = 1; + + if (argc > 1) + nr_pages = atoi(argv[1]); + + printf("Allocating %d pages to swap in/out\n", nr_pages); + + /* allocate pages */ + addr = mmap(NULL, nr_pages * PG_SZ, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0); + *addr = 1; + + /* initialize data in page to all '*' chars */ + memset(addr, '*', nr_pages * PG_SZ); + + printf("Swapping out %d pages\n", nr_pages); + + /* Tell kernel to swap it out */ + madvise(addr, nr_pages * PG_SZ, MADV_PAGEOUT); + + while (loop > 0) { + /* Wait for swap out to finish */ + sleep(5); + + a = addr; + + printf("Swapping in %d pages\n", nr_pages); + + /* Access the page ... this will swap it back in again */ + for (i = 0; i < nr_pages; i++) { + if (a[0] != '*') { + printf("Bad data from decompress!!!!!\n"); + + dump_ptr = (int64_t *)a; + for (int j = 0; j < 100; j++) { + printf(" page %d data: %#llx\n", i, *dump_ptr); + dump_ptr++; + } + } + + a += PG_SZ; + } + + loop --; + } + + printf("Swapped out and in %d pages\n", nr_pages); + +Appendix +======== + +.. _iaa_sysfs_config: + +IAA sysfs config interface +-------------------------- + +Below is a description of the IAA sysfs interface, which as mentioned +in the main document, should only be used if you know exactly what you +are doing. Even then, there's no compelling reason to use it directly +since accel-config can do everything the sysfs interface can and in +fact accel-config is based on it under the covers. + +The 'IAA config path' is /sys/bus/dsa/devices and contains +subdirectories representing each IAA device, workqueue, engine, and +group. Note that in the sysfs interface, the IAA devices are actually +named using iax e.g. iax1, iax3, etc. (Note that IAA devices are the +odd-numbered devices; the even-numbered devices are DSA devices and +can be ignored for IAA). + +The 'IAA device bind path' is /sys/bus/dsa/drivers/idxd/bind and is +the file that is written to enable an IAA device. + +The 'IAA workqueue bind path' is /sys/bus/dsa/drivers/crypto/bind and +is the file that is written to enable an IAA workqueue. + +Similarly /sys/bus/dsa/drivers/idxd/unbind and +/sys/bus/dsa/drivers/crypto/unbind are used to disable IAA devices and +workqueues. + +The basic sequence of commands needed to set up the IAA devices and +workqueues is: + +For each device:: + 1) Disable any workqueues enabled on the device. For example to + disable workques 0 and 1 on IAA device 3:: + + # echo wq3.0 > /sys/bus/dsa/drivers/crypto/unbind + # echo wq3.1 > /sys/bus/dsa/drivers/crypto/unbind + + 2) Disable the device. For example to disable IAA device 3:: + + # echo iax3 > /sys/bus/dsa/drivers/idxd/unbind + + 3) configure the desired workqueues. For example, to configure + workqueue 3 on IAA device 3:: + + # echo dedicated > /sys/bus/dsa/devices/iax3/wq3.3/mode + # echo 128 > /sys/bus/dsa/devices/iax3/wq3.3/size + # echo 0 > /sys/bus/dsa/devices/iax3/wq3.3/group_id + # echo 10 > /sys/bus/dsa/devices/iax3/wq3.3/priority + # echo "kernel" > /sys/bus/dsa/devices/iax3/wq3.3/type + # echo "iaa_crypto" > /sys/bus/dsa/devices/iax3/wq3.3/name + # echo "crypto" > /sys/bus/dsa/devices/iax3/wq3.3/driver_name + + 4) Enable the device. For example to enable IAA device 3:: + + # echo iax3 > /sys/bus/dsa/drivers/idxd/bind + + 5) Enable the desired workqueues on the device. For example to + enable workques 0 and 1 on IAA device 3:: + + # echo wq3.0 > /sys/bus/dsa/drivers/crypto/bind + # echo wq3.1 > /sys/bus/dsa/drivers/crypto/bind diff --git a/Documentation/driver-api/crypto/iaa/index.rst b/Documentation/driver-api/crypto/iaa/index.rst new file mode 100644 index 000000000000..aa6837e27264 --- /dev/null +++ b/Documentation/driver-api/crypto/iaa/index.rst @@ -0,0 +1,20 @@ +.. SPDX-License-Identifier: GPL-2.0 + +================================= +IAA (Intel Analytics Accelerator) +================================= + +IAA provides hardware compression and decompression via the crypto +API. + +.. toctree:: + :maxdepth: 1 + + iaa-crypto + +.. only:: subproject and html + + Indices + ======= + + * :ref:`genindex` diff --git a/Documentation/driver-api/crypto/index.rst b/Documentation/driver-api/crypto/index.rst new file mode 100644 index 000000000000..fb9709b98bea --- /dev/null +++ b/Documentation/driver-api/crypto/index.rst @@ -0,0 +1,20 @@ +.. SPDX-License-Identifier: GPL-2.0 + +============== +Crypto Drivers +============== + +Documentation for crypto drivers that may need more involved setup and +configuration. + +.. toctree:: + :maxdepth: 1 + + iaa/index + +.. only:: subproject and html + + Indices + ======= + + * :ref:`genindex` diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst index 1e16a40da3ba..f0f8f521f65b 100644 --- a/Documentation/driver-api/index.rst +++ b/Documentation/driver-api/index.rst @@ -114,6 +114,7 @@ available subsections can be seen below. zorro hte/index wmi + crypto/index .. only:: subproject and html -- Gitee From a082dd5afeff21859ff166dc4f80f152a9fad960 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Tue, 5 Dec 2023 15:25:24 -0600 Subject: [PATCH 0961/2138] crypto: iaa - Add Intel IAA Compression Accelerator crypto driver core ANBZ: #9252 commit ea7a5cbb43696cfacf73e61916d1860ac30b5b2f upstream. The Intel Analytics Accelerator (IAA) is a hardware accelerator that provides very high thoughput compression/decompression compatible with the DEFLATE compression standard described in RFC 1951, which is the compression/decompression algorithm exported by this module. Users can select IAA compress/decompress acceleration by specifying one of the deflate-iaa* algorithms as the compression algorithm to use by whatever facility allows asynchronous compression algorithms to be selected. For example, zswap can select the IAA fixed deflate algorithm 'deflate-iaa' via: # echo deflate-iaa > /sys/module/zswap/parameters/compressor This patch adds iaa_crypto as an idxd sub-driver and tracks iaa devices and workqueues as they are probed or removed. [ Based on work originally by George Powley, Jing Lin and Kyung Min Park ] Intel-SIG: commit ea7a5cbb4369 crypto: iaa - Add Intel IAA Compression Accelerator crypto driver core. Backporting patches for Intel IAA crypto driver on Intel Xeon platform. Signed-off-by: Tom Zanussi Signed-off-by: Herbert Xu [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- MAINTAINERS | 7 + drivers/crypto/intel/Kconfig | 1 + drivers/crypto/intel/Makefile | 1 + drivers/crypto/intel/iaa/Kconfig | 10 + drivers/crypto/intel/iaa/Makefile | 10 + drivers/crypto/intel/iaa/iaa_crypto.h | 30 ++ drivers/crypto/intel/iaa/iaa_crypto_main.c | 323 +++++++++++++++++++++ 7 files changed, 382 insertions(+) create mode 100644 drivers/crypto/intel/iaa/Kconfig create mode 100644 drivers/crypto/intel/iaa/Makefile create mode 100644 drivers/crypto/intel/iaa/iaa_crypto.h create mode 100644 drivers/crypto/intel/iaa/iaa_crypto_main.c diff --git a/MAINTAINERS b/MAINTAINERS index c1023cec8044..8e90e3cfc5f6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10572,6 +10572,13 @@ S: Supported Q: https://patchwork.kernel.org/project/linux-dmaengine/list/ F: drivers/dma/ioat* +INTEL IAA CRYPTO DRIVER +M: Tom Zanussi +L: linux-crypto@vger.kernel.org +S: Supported +F: Documentation/driver-api/crypto/iaa/iaa-crypto.rst +F: drivers/crypto/intel/iaa/* + INTEL IDLE DRIVER M: Jacob Pan M: Len Brown diff --git a/drivers/crypto/intel/Kconfig b/drivers/crypto/intel/Kconfig index 3d90c87d4094..f38cd62a3f67 100644 --- a/drivers/crypto/intel/Kconfig +++ b/drivers/crypto/intel/Kconfig @@ -3,3 +3,4 @@ source "drivers/crypto/intel/keembay/Kconfig" source "drivers/crypto/intel/ixp4xx/Kconfig" source "drivers/crypto/intel/qat/Kconfig" +source "drivers/crypto/intel/iaa/Kconfig" diff --git a/drivers/crypto/intel/Makefile b/drivers/crypto/intel/Makefile index b3d0352ae188..2f56f6d34cf0 100644 --- a/drivers/crypto/intel/Makefile +++ b/drivers/crypto/intel/Makefile @@ -3,3 +3,4 @@ obj-y += keembay/ obj-y += ixp4xx/ obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/ +obj-$(CONFIG_CRYPTO_DEV_IAA_CRYPTO) += iaa/ diff --git a/drivers/crypto/intel/iaa/Kconfig b/drivers/crypto/intel/iaa/Kconfig new file mode 100644 index 000000000000..fcccb6ff7e29 --- /dev/null +++ b/drivers/crypto/intel/iaa/Kconfig @@ -0,0 +1,10 @@ +config CRYPTO_DEV_IAA_CRYPTO + tristate "Support for Intel(R) IAA Compression Accelerator" + depends on CRYPTO_DEFLATE + depends on INTEL_IDXD + default n + help + This driver supports acceleration for compression and + decompression with the Intel Analytics Accelerator (IAA) + hardware using the cryptographic API. If you choose 'M' + here, the module will be called iaa_crypto. diff --git a/drivers/crypto/intel/iaa/Makefile b/drivers/crypto/intel/iaa/Makefile new file mode 100644 index 000000000000..03859431c897 --- /dev/null +++ b/drivers/crypto/intel/iaa/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for IAA crypto device drivers +# + +ccflags-y += -I $(srctree)/drivers/dma/idxd -DDEFAULT_SYMBOL_NAMESPACE=IDXD + +obj-$(CONFIG_CRYPTO_DEV_IAA_CRYPTO) := iaa_crypto.o + +iaa_crypto-y := iaa_crypto_main.o diff --git a/drivers/crypto/intel/iaa/iaa_crypto.h b/drivers/crypto/intel/iaa/iaa_crypto.h new file mode 100644 index 000000000000..5d1fff7f4b8e --- /dev/null +++ b/drivers/crypto/intel/iaa/iaa_crypto.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */ + +#ifndef __IAA_CRYPTO_H__ +#define __IAA_CRYPTO_H__ + +#include +#include +#include + +#define IDXD_SUBDRIVER_NAME "crypto" + +/* Representation of IAA workqueue */ +struct iaa_wq { + struct list_head list; + struct idxd_wq *wq; + + struct iaa_device *iaa_device; +}; + +/* Representation of IAA device with wqs, populated by probe */ +struct iaa_device { + struct list_head list; + struct idxd_device *idxd; + + int n_wq; + struct list_head wqs; +}; + +#endif diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c new file mode 100644 index 000000000000..9a662ae49106 --- /dev/null +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -0,0 +1,323 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "idxd.h" +#include "iaa_crypto.h" + +#ifdef pr_fmt +#undef pr_fmt +#endif + +#define pr_fmt(fmt) "idxd: " IDXD_SUBDRIVER_NAME ": " fmt + +/* number of iaa instances probed */ +static unsigned int nr_iaa; + +static LIST_HEAD(iaa_devices); +static DEFINE_MUTEX(iaa_devices_lock); + +static struct iaa_device *iaa_device_alloc(void) +{ + struct iaa_device *iaa_device; + + iaa_device = kzalloc(sizeof(*iaa_device), GFP_KERNEL); + if (!iaa_device) + return NULL; + + INIT_LIST_HEAD(&iaa_device->wqs); + + return iaa_device; +} + +static void iaa_device_free(struct iaa_device *iaa_device) +{ + struct iaa_wq *iaa_wq, *next; + + list_for_each_entry_safe(iaa_wq, next, &iaa_device->wqs, list) { + list_del(&iaa_wq->list); + kfree(iaa_wq); + } + + kfree(iaa_device); +} + +static bool iaa_has_wq(struct iaa_device *iaa_device, struct idxd_wq *wq) +{ + struct iaa_wq *iaa_wq; + + list_for_each_entry(iaa_wq, &iaa_device->wqs, list) { + if (iaa_wq->wq == wq) + return true; + } + + return false; +} + +static struct iaa_device *add_iaa_device(struct idxd_device *idxd) +{ + struct iaa_device *iaa_device; + + iaa_device = iaa_device_alloc(); + if (!iaa_device) + return NULL; + + iaa_device->idxd = idxd; + + list_add_tail(&iaa_device->list, &iaa_devices); + + nr_iaa++; + + return iaa_device; +} + +static void del_iaa_device(struct iaa_device *iaa_device) +{ + list_del(&iaa_device->list); + + iaa_device_free(iaa_device); + + nr_iaa--; +} + +static int add_iaa_wq(struct iaa_device *iaa_device, struct idxd_wq *wq, + struct iaa_wq **new_wq) +{ + struct idxd_device *idxd = iaa_device->idxd; + struct pci_dev *pdev = idxd->pdev; + struct device *dev = &pdev->dev; + struct iaa_wq *iaa_wq; + + iaa_wq = kzalloc(sizeof(*iaa_wq), GFP_KERNEL); + if (!iaa_wq) + return -ENOMEM; + + iaa_wq->wq = wq; + iaa_wq->iaa_device = iaa_device; + idxd_wq_set_private(wq, iaa_wq); + + list_add_tail(&iaa_wq->list, &iaa_device->wqs); + + iaa_device->n_wq++; + + if (new_wq) + *new_wq = iaa_wq; + + dev_dbg(dev, "added wq %d to iaa device %d, n_wq %d\n", + wq->id, iaa_device->idxd->id, iaa_device->n_wq); + + return 0; +} + +static void del_iaa_wq(struct iaa_device *iaa_device, struct idxd_wq *wq) +{ + struct idxd_device *idxd = iaa_device->idxd; + struct pci_dev *pdev = idxd->pdev; + struct device *dev = &pdev->dev; + struct iaa_wq *iaa_wq; + + list_for_each_entry(iaa_wq, &iaa_device->wqs, list) { + if (iaa_wq->wq == wq) { + list_del(&iaa_wq->list); + iaa_device->n_wq--; + + dev_dbg(dev, "removed wq %d from iaa_device %d, n_wq %d, nr_iaa %d\n", + wq->id, iaa_device->idxd->id, + iaa_device->n_wq, nr_iaa); + + if (iaa_device->n_wq == 0) + del_iaa_device(iaa_device); + break; + } + } +} + +static int save_iaa_wq(struct idxd_wq *wq) +{ + struct iaa_device *iaa_device, *found = NULL; + struct idxd_device *idxd; + struct pci_dev *pdev; + struct device *dev; + int ret = 0; + + list_for_each_entry(iaa_device, &iaa_devices, list) { + if (iaa_device->idxd == wq->idxd) { + idxd = iaa_device->idxd; + pdev = idxd->pdev; + dev = &pdev->dev; + /* + * Check to see that we don't already have this wq. + * Shouldn't happen but we don't control probing. + */ + if (iaa_has_wq(iaa_device, wq)) { + dev_dbg(dev, "same wq probed multiple times for iaa_device %p\n", + iaa_device); + goto out; + } + + found = iaa_device; + + ret = add_iaa_wq(iaa_device, wq, NULL); + if (ret) + goto out; + + break; + } + } + + if (!found) { + struct iaa_device *new_device; + struct iaa_wq *new_wq; + + new_device = add_iaa_device(wq->idxd); + if (!new_device) { + ret = -ENOMEM; + goto out; + } + + ret = add_iaa_wq(new_device, wq, &new_wq); + if (ret) { + del_iaa_device(new_device); + goto out; + } + } + + if (WARN_ON(nr_iaa == 0)) + return -EINVAL; +out: + return 0; +} + +static void remove_iaa_wq(struct idxd_wq *wq) +{ + struct iaa_device *iaa_device; + + list_for_each_entry(iaa_device, &iaa_devices, list) { + if (iaa_has_wq(iaa_device, wq)) { + del_iaa_wq(iaa_device, wq); + break; + } + } +} + +static int iaa_crypto_probe(struct idxd_dev *idxd_dev) +{ + struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); + struct idxd_device *idxd = wq->idxd; + struct idxd_driver_data *data = idxd->data; + struct device *dev = &idxd_dev->conf_dev; + int ret = 0; + + if (idxd->state != IDXD_DEV_ENABLED) + return -ENXIO; + + if (data->type != IDXD_TYPE_IAX) + return -ENODEV; + + mutex_lock(&wq->wq_lock); + + if (!idxd_wq_driver_name_match(wq, dev)) { + dev_dbg(dev, "wq %d.%d driver_name match failed: wq driver_name %s, dev driver name %s\n", + idxd->id, wq->id, wq->driver_name, dev->driver->name); + idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME; + ret = -ENODEV; + goto err; + } + + wq->type = IDXD_WQT_KERNEL; + + ret = idxd_drv_enable_wq(wq); + if (ret < 0) { + dev_dbg(dev, "enable wq %d.%d failed: %d\n", + idxd->id, wq->id, ret); + ret = -ENXIO; + goto err; + } + + mutex_lock(&iaa_devices_lock); + + ret = save_iaa_wq(wq); + if (ret) + goto err_save; + + mutex_unlock(&iaa_devices_lock); +out: + mutex_unlock(&wq->wq_lock); + + return ret; + +err_save: + idxd_drv_disable_wq(wq); +err: + wq->type = IDXD_WQT_NONE; + + goto out; +} + +static void iaa_crypto_remove(struct idxd_dev *idxd_dev) +{ + struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); + + idxd_wq_quiesce(wq); + + mutex_lock(&wq->wq_lock); + mutex_lock(&iaa_devices_lock); + + remove_iaa_wq(wq); + idxd_drv_disable_wq(wq); + + mutex_unlock(&iaa_devices_lock); + mutex_unlock(&wq->wq_lock); +} + +static enum idxd_dev_type dev_types[] = { + IDXD_DEV_WQ, + IDXD_DEV_NONE, +}; + +static struct idxd_device_driver iaa_crypto_driver = { + .probe = iaa_crypto_probe, + .remove = iaa_crypto_remove, + .name = IDXD_SUBDRIVER_NAME, + .type = dev_types, +}; + +static int __init iaa_crypto_init_module(void) +{ + int ret = 0; + + ret = idxd_driver_register(&iaa_crypto_driver); + if (ret) { + pr_debug("IAA wq sub-driver registration failed\n"); + goto out; + } + + pr_debug("initialized\n"); +out: + return ret; +} + +static void __exit iaa_crypto_cleanup_module(void) +{ + idxd_driver_unregister(&iaa_crypto_driver); + + pr_debug("cleaned up\n"); +} + +MODULE_IMPORT_NS(IDXD); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_IDXD_DEVICE(0); +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("IAA Compression Accelerator Crypto Driver"); + +module_init(iaa_crypto_init_module); +module_exit(iaa_crypto_cleanup_module); -- Gitee From 98c6e015779485c05014237d36d3390880592f11 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Tue, 5 Dec 2023 15:25:25 -0600 Subject: [PATCH 0962/2138] crypto: iaa - Add per-cpu workqueue table with rebalancing ANBZ: #9252 commit f57bf3f78377d66af89a6d0c6d926ffb1f590b5d upstream. The iaa compression/decompression algorithms in later patches need a way to retrieve an appropriate IAA workqueue depending on how close the associated IAA device is to the current cpu. For this purpose, add a per-cpu array of workqueues such that an appropriate workqueue can be retrieved by simply accessing the per-cpu array. Whenever a new workqueue is bound to or unbound from the iaa_crypto driver, the available workqueues are 'rebalanced' such that work submitted from a particular CPU is given to the most appropriate workqueue available. There currently isn't any way for the user to tweak the way this is done internally - if necessary, knobs can be added later for that purpose. Current best practice is to configure and bind at least one workqueue for each IAA device, but as long as there is at least one workqueue configured and bound to any IAA device in the system, the iaa_crypto driver will work, albeit most likely not as efficiently. [ Based on work originally by George Powley, Jing Lin and Kyung Min Park ] Intel-SIG: commit f57bf3f78377 crypto: iaa - Add per-cpu workqueue table with rebalancing. Backporting patches for Intel IAA crypto driver on Intel Xeon platform. Signed-off-by: Tom Zanussi Signed-off-by: Herbert Xu [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- drivers/crypto/intel/iaa/iaa_crypto.h | 7 + drivers/crypto/intel/iaa/iaa_crypto_main.c | 222 +++++++++++++++++++++ 2 files changed, 229 insertions(+) diff --git a/drivers/crypto/intel/iaa/iaa_crypto.h b/drivers/crypto/intel/iaa/iaa_crypto.h index 5d1fff7f4b8e..c25546fa87f7 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto.h +++ b/drivers/crypto/intel/iaa/iaa_crypto.h @@ -27,4 +27,11 @@ struct iaa_device { struct list_head wqs; }; +struct wq_table_entry { + struct idxd_wq **wqs; + int max_wqs; + int n_wqs; + int cur_wq; +}; + #endif diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index 9a662ae49106..925b8fea0d06 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -22,6 +22,46 @@ /* number of iaa instances probed */ static unsigned int nr_iaa; +static unsigned int nr_cpus; +static unsigned int nr_nodes; +static unsigned int nr_cpus_per_node; + +/* Number of physical cpus sharing each iaa instance */ +static unsigned int cpus_per_iaa; + +/* Per-cpu lookup table for balanced wqs */ +static struct wq_table_entry __percpu *wq_table; + +static void wq_table_add(int cpu, struct idxd_wq *wq) +{ + struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu); + + if (WARN_ON(entry->n_wqs == entry->max_wqs)) + return; + + entry->wqs[entry->n_wqs++] = wq; + + pr_debug("%s: added iaa wq %d.%d to idx %d of cpu %d\n", __func__, + entry->wqs[entry->n_wqs - 1]->idxd->id, + entry->wqs[entry->n_wqs - 1]->id, entry->n_wqs - 1, cpu); +} + +static void wq_table_free_entry(int cpu) +{ + struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu); + + kfree(entry->wqs); + memset(entry, 0, sizeof(*entry)); +} + +static void wq_table_clear_entry(int cpu) +{ + struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu); + + entry->n_wqs = 0; + entry->cur_wq = 0; + memset(entry->wqs, 0, entry->max_wqs * sizeof(struct idxd_wq *)); +} static LIST_HEAD(iaa_devices); static DEFINE_MUTEX(iaa_devices_lock); @@ -141,6 +181,53 @@ static void del_iaa_wq(struct iaa_device *iaa_device, struct idxd_wq *wq) } } +static void clear_wq_table(void) +{ + int cpu; + + for (cpu = 0; cpu < nr_cpus; cpu++) + wq_table_clear_entry(cpu); + + pr_debug("cleared wq table\n"); +} + +static void free_wq_table(void) +{ + int cpu; + + for (cpu = 0; cpu < nr_cpus; cpu++) + wq_table_free_entry(cpu); + + free_percpu(wq_table); + + pr_debug("freed wq table\n"); +} + +static int alloc_wq_table(int max_wqs) +{ + struct wq_table_entry *entry; + int cpu; + + wq_table = alloc_percpu(struct wq_table_entry); + if (!wq_table) + return -ENOMEM; + + for (cpu = 0; cpu < nr_cpus; cpu++) { + entry = per_cpu_ptr(wq_table, cpu); + entry->wqs = kcalloc(max_wqs, sizeof(struct wq *), GFP_KERNEL); + if (!entry->wqs) { + free_wq_table(); + return -ENOMEM; + } + + entry->max_wqs = max_wqs; + } + + pr_debug("initialized wq table\n"); + + return 0; +} + static int save_iaa_wq(struct idxd_wq *wq) { struct iaa_device *iaa_device, *found = NULL; @@ -193,6 +280,8 @@ static int save_iaa_wq(struct idxd_wq *wq) if (WARN_ON(nr_iaa == 0)) return -EINVAL; + + cpus_per_iaa = (nr_nodes * nr_cpus_per_node) / nr_iaa; out: return 0; } @@ -207,6 +296,116 @@ static void remove_iaa_wq(struct idxd_wq *wq) break; } } + + if (nr_iaa) + cpus_per_iaa = (nr_nodes * nr_cpus_per_node) / nr_iaa; + else + cpus_per_iaa = 0; +} + +static int wq_table_add_wqs(int iaa, int cpu) +{ + struct iaa_device *iaa_device, *found_device = NULL; + int ret = 0, cur_iaa = 0, n_wqs_added = 0; + struct idxd_device *idxd; + struct iaa_wq *iaa_wq; + struct pci_dev *pdev; + struct device *dev; + + list_for_each_entry(iaa_device, &iaa_devices, list) { + idxd = iaa_device->idxd; + pdev = idxd->pdev; + dev = &pdev->dev; + + if (cur_iaa != iaa) { + cur_iaa++; + continue; + } + + found_device = iaa_device; + dev_dbg(dev, "getting wq from iaa_device %d, cur_iaa %d\n", + found_device->idxd->id, cur_iaa); + break; + } + + if (!found_device) { + found_device = list_first_entry_or_null(&iaa_devices, + struct iaa_device, list); + if (!found_device) { + pr_debug("couldn't find any iaa devices with wqs!\n"); + ret = -EINVAL; + goto out; + } + cur_iaa = 0; + + idxd = found_device->idxd; + pdev = idxd->pdev; + dev = &pdev->dev; + dev_dbg(dev, "getting wq from only iaa_device %d, cur_iaa %d\n", + found_device->idxd->id, cur_iaa); + } + + list_for_each_entry(iaa_wq, &found_device->wqs, list) { + wq_table_add(cpu, iaa_wq->wq); + pr_debug("rebalance: added wq for cpu=%d: iaa wq %d.%d\n", + cpu, iaa_wq->wq->idxd->id, iaa_wq->wq->id); + n_wqs_added++; + }; + + if (!n_wqs_added) { + pr_debug("couldn't find any iaa wqs!\n"); + ret = -EINVAL; + goto out; + } +out: + return ret; +} + +/* + * Rebalance the wq table so that given a cpu, it's easy to find the + * closest IAA instance. The idea is to try to choose the most + * appropriate IAA instance for a caller and spread available + * workqueues around to clients. + */ +static void rebalance_wq_table(void) +{ + const struct cpumask *node_cpus; + int node, cpu, iaa = -1; + + if (nr_iaa == 0) + return; + + pr_debug("rebalance: nr_nodes=%d, nr_cpus %d, nr_iaa %d, cpus_per_iaa %d\n", + nr_nodes, nr_cpus, nr_iaa, cpus_per_iaa); + + clear_wq_table(); + + if (nr_iaa == 1) { + for (cpu = 0; cpu < nr_cpus; cpu++) { + if (WARN_ON(wq_table_add_wqs(0, cpu))) { + pr_debug("could not add any wqs for iaa 0 to cpu %d!\n", cpu); + return; + } + } + + return; + } + + for_each_online_node(node) { + node_cpus = cpumask_of_node(node); + + for (cpu = 0; cpu < nr_cpus_per_node; cpu++) { + int node_cpu = cpumask_nth(cpu, node_cpus); + + if ((cpu % cpus_per_iaa) == 0) + iaa++; + + if (WARN_ON(wq_table_add_wqs(iaa, node_cpu))) { + pr_debug("could not add any wqs for iaa %d to cpu %d!\n", iaa, cpu); + return; + } + } + } } static int iaa_crypto_probe(struct idxd_dev *idxd_dev) @@ -215,6 +414,7 @@ static int iaa_crypto_probe(struct idxd_dev *idxd_dev) struct idxd_device *idxd = wq->idxd; struct idxd_driver_data *data = idxd->data; struct device *dev = &idxd_dev->conf_dev; + bool first_wq = false; int ret = 0; if (idxd->state != IDXD_DEV_ENABLED) @@ -245,10 +445,19 @@ static int iaa_crypto_probe(struct idxd_dev *idxd_dev) mutex_lock(&iaa_devices_lock); + if (list_empty(&iaa_devices)) { + ret = alloc_wq_table(wq->idxd->max_wqs); + if (ret) + goto err_alloc; + first_wq = true; + } + ret = save_iaa_wq(wq); if (ret) goto err_save; + rebalance_wq_table(); + mutex_unlock(&iaa_devices_lock); out: mutex_unlock(&wq->wq_lock); @@ -256,6 +465,10 @@ static int iaa_crypto_probe(struct idxd_dev *idxd_dev) return ret; err_save: + if (first_wq) + free_wq_table(); +err_alloc: + mutex_unlock(&iaa_devices_lock); idxd_drv_disable_wq(wq); err: wq->type = IDXD_WQT_NONE; @@ -273,7 +486,12 @@ static void iaa_crypto_remove(struct idxd_dev *idxd_dev) mutex_lock(&iaa_devices_lock); remove_iaa_wq(wq); + idxd_drv_disable_wq(wq); + rebalance_wq_table(); + + if (nr_iaa == 0) + free_wq_table(); mutex_unlock(&iaa_devices_lock); mutex_unlock(&wq->wq_lock); @@ -295,6 +513,10 @@ static int __init iaa_crypto_init_module(void) { int ret = 0; + nr_cpus = num_online_cpus(); + nr_nodes = num_online_nodes(); + nr_cpus_per_node = nr_cpus / nr_nodes; + ret = idxd_driver_register(&iaa_crypto_driver); if (ret) { pr_debug("IAA wq sub-driver registration failed\n"); -- Gitee From 2e04ca3e4dc5572aabbdd26ccfc0dbfca5fb3022 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Tue, 5 Dec 2023 15:25:26 -0600 Subject: [PATCH 0963/2138] crypto: iaa - Add compression mode management along with fixed mode ANBZ: #9252 commit b190447e0fa3ef7355480d641d078962e03768b4 upstream. Define an in-kernel API for adding and removing compression modes, which can be used by kernel modules or other kernel code that implements IAA compression modes. Also add a separate file, iaa_crypto_comp_fixed.c, containing huffman tables generated for the IAA 'fixed' compression mode. Future compression modes can be added in a similar fashion. One or more crypto compression algorithms will be created for each compression mode, each of which can be selected as the compression algorithm to be used by a particular facility. Intel-SIG: commit b190447e0fa3 crypto: iaa - Add compression mode management along with fixed mode. Backporting patches for Intel IAA crypto driver on Intel Xeon platform. Signed-off-by: Tom Zanussi Signed-off-by: Herbert Xu [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- drivers/crypto/intel/iaa/Makefile | 2 +- drivers/crypto/intel/iaa/iaa_crypto.h | 85 +++++ .../crypto/intel/iaa/iaa_crypto_comp_fixed.c | 92 +++++ drivers/crypto/intel/iaa/iaa_crypto_main.c | 327 +++++++++++++++++- 4 files changed, 504 insertions(+), 2 deletions(-) create mode 100644 drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c diff --git a/drivers/crypto/intel/iaa/Makefile b/drivers/crypto/intel/iaa/Makefile index 03859431c897..cc87feffd059 100644 --- a/drivers/crypto/intel/iaa/Makefile +++ b/drivers/crypto/intel/iaa/Makefile @@ -7,4 +7,4 @@ ccflags-y += -I $(srctree)/drivers/dma/idxd -DDEFAULT_SYMBOL_NAMESPACE=IDXD obj-$(CONFIG_CRYPTO_DEV_IAA_CRYPTO) := iaa_crypto.o -iaa_crypto-y := iaa_crypto_main.o +iaa_crypto-y := iaa_crypto_main.o iaa_crypto_comp_fixed.o diff --git a/drivers/crypto/intel/iaa/iaa_crypto.h b/drivers/crypto/intel/iaa/iaa_crypto.h index c25546fa87f7..33e68f9d3d02 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto.h +++ b/drivers/crypto/intel/iaa/iaa_crypto.h @@ -10,6 +10,11 @@ #define IDXD_SUBDRIVER_NAME "crypto" +#define IAA_COMP_MODES_MAX 2 + +#define FIXED_HDR 0x2 +#define FIXED_HDR_SIZE 3 + /* Representation of IAA workqueue */ struct iaa_wq { struct list_head list; @@ -18,11 +23,23 @@ struct iaa_wq { struct iaa_device *iaa_device; }; +struct iaa_device_compression_mode { + const char *name; + + struct aecs_comp_table_record *aecs_comp_table; + struct aecs_decomp_table_record *aecs_decomp_table; + + dma_addr_t aecs_comp_table_dma_addr; + dma_addr_t aecs_decomp_table_dma_addr; +}; + /* Representation of IAA device with wqs, populated by probe */ struct iaa_device { struct list_head list; struct idxd_device *idxd; + struct iaa_device_compression_mode *compression_modes[IAA_COMP_MODES_MAX]; + int n_wq; struct list_head wqs; }; @@ -34,4 +51,72 @@ struct wq_table_entry { int cur_wq; }; +#define IAA_AECS_ALIGN 32 + +/* + * Analytics Engine Configuration and State (AECS) contains parameters and + * internal state of the analytics engine. + */ +struct aecs_comp_table_record { + u32 crc; + u32 xor_checksum; + u32 reserved0[5]; + u32 num_output_accum_bits; + u8 output_accum[256]; + u32 ll_sym[286]; + u32 reserved1; + u32 reserved2; + u32 d_sym[30]; + u32 reserved_padding[2]; +} __packed; + +/* AECS for decompress */ +struct aecs_decomp_table_record { + u32 crc; + u32 xor_checksum; + u32 low_filter_param; + u32 high_filter_param; + u32 output_mod_idx; + u32 drop_init_decomp_out_bytes; + u32 reserved[36]; + u32 output_accum_data[2]; + u32 out_bits_valid; + u32 bit_off_indexing; + u32 input_accum_data[64]; + u8 size_qw[32]; + u32 decomp_state[1220]; +} __packed; + +int iaa_aecs_init_fixed(void); +void iaa_aecs_cleanup_fixed(void); + +typedef int (*iaa_dev_comp_init_fn_t) (struct iaa_device_compression_mode *mode); +typedef int (*iaa_dev_comp_free_fn_t) (struct iaa_device_compression_mode *mode); + +struct iaa_compression_mode { + const char *name; + u32 *ll_table; + int ll_table_size; + u32 *d_table; + int d_table_size; + u32 *header_table; + int header_table_size; + u16 gen_decomp_table_flags; + iaa_dev_comp_init_fn_t init; + iaa_dev_comp_free_fn_t free; +}; + +int add_iaa_compression_mode(const char *name, + const u32 *ll_table, + int ll_table_size, + const u32 *d_table, + int d_table_size, + const u8 *header_table, + int header_table_size, + u16 gen_decomp_table_flags, + iaa_dev_comp_init_fn_t init, + iaa_dev_comp_free_fn_t free); + +void remove_iaa_compression_mode(const char *name); + #endif diff --git a/drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c b/drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c new file mode 100644 index 000000000000..45cf5d74f0fb --- /dev/null +++ b/drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */ + +#include "idxd.h" +#include "iaa_crypto.h" + +/* + * Fixed Huffman tables the IAA hardware requires to implement RFC-1951. + */ +static const u32 fixed_ll_sym[286] = { + 0x40030, 0x40031, 0x40032, 0x40033, 0x40034, 0x40035, 0x40036, 0x40037, + 0x40038, 0x40039, 0x4003A, 0x4003B, 0x4003C, 0x4003D, 0x4003E, 0x4003F, + 0x40040, 0x40041, 0x40042, 0x40043, 0x40044, 0x40045, 0x40046, 0x40047, + 0x40048, 0x40049, 0x4004A, 0x4004B, 0x4004C, 0x4004D, 0x4004E, 0x4004F, + 0x40050, 0x40051, 0x40052, 0x40053, 0x40054, 0x40055, 0x40056, 0x40057, + 0x40058, 0x40059, 0x4005A, 0x4005B, 0x4005C, 0x4005D, 0x4005E, 0x4005F, + 0x40060, 0x40061, 0x40062, 0x40063, 0x40064, 0x40065, 0x40066, 0x40067, + 0x40068, 0x40069, 0x4006A, 0x4006B, 0x4006C, 0x4006D, 0x4006E, 0x4006F, + 0x40070, 0x40071, 0x40072, 0x40073, 0x40074, 0x40075, 0x40076, 0x40077, + 0x40078, 0x40079, 0x4007A, 0x4007B, 0x4007C, 0x4007D, 0x4007E, 0x4007F, + 0x40080, 0x40081, 0x40082, 0x40083, 0x40084, 0x40085, 0x40086, 0x40087, + 0x40088, 0x40089, 0x4008A, 0x4008B, 0x4008C, 0x4008D, 0x4008E, 0x4008F, + 0x40090, 0x40091, 0x40092, 0x40093, 0x40094, 0x40095, 0x40096, 0x40097, + 0x40098, 0x40099, 0x4009A, 0x4009B, 0x4009C, 0x4009D, 0x4009E, 0x4009F, + 0x400A0, 0x400A1, 0x400A2, 0x400A3, 0x400A4, 0x400A5, 0x400A6, 0x400A7, + 0x400A8, 0x400A9, 0x400AA, 0x400AB, 0x400AC, 0x400AD, 0x400AE, 0x400AF, + 0x400B0, 0x400B1, 0x400B2, 0x400B3, 0x400B4, 0x400B5, 0x400B6, 0x400B7, + 0x400B8, 0x400B9, 0x400BA, 0x400BB, 0x400BC, 0x400BD, 0x400BE, 0x400BF, + 0x48190, 0x48191, 0x48192, 0x48193, 0x48194, 0x48195, 0x48196, 0x48197, + 0x48198, 0x48199, 0x4819A, 0x4819B, 0x4819C, 0x4819D, 0x4819E, 0x4819F, + 0x481A0, 0x481A1, 0x481A2, 0x481A3, 0x481A4, 0x481A5, 0x481A6, 0x481A7, + 0x481A8, 0x481A9, 0x481AA, 0x481AB, 0x481AC, 0x481AD, 0x481AE, 0x481AF, + 0x481B0, 0x481B1, 0x481B2, 0x481B3, 0x481B4, 0x481B5, 0x481B6, 0x481B7, + 0x481B8, 0x481B9, 0x481BA, 0x481BB, 0x481BC, 0x481BD, 0x481BE, 0x481BF, + 0x481C0, 0x481C1, 0x481C2, 0x481C3, 0x481C4, 0x481C5, 0x481C6, 0x481C7, + 0x481C8, 0x481C9, 0x481CA, 0x481CB, 0x481CC, 0x481CD, 0x481CE, 0x481CF, + 0x481D0, 0x481D1, 0x481D2, 0x481D3, 0x481D4, 0x481D5, 0x481D6, 0x481D7, + 0x481D8, 0x481D9, 0x481DA, 0x481DB, 0x481DC, 0x481DD, 0x481DE, 0x481DF, + 0x481E0, 0x481E1, 0x481E2, 0x481E3, 0x481E4, 0x481E5, 0x481E6, 0x481E7, + 0x481E8, 0x481E9, 0x481EA, 0x481EB, 0x481EC, 0x481ED, 0x481EE, 0x481EF, + 0x481F0, 0x481F1, 0x481F2, 0x481F3, 0x481F4, 0x481F5, 0x481F6, 0x481F7, + 0x481F8, 0x481F9, 0x481FA, 0x481FB, 0x481FC, 0x481FD, 0x481FE, 0x481FF, + 0x38000, 0x38001, 0x38002, 0x38003, 0x38004, 0x38005, 0x38006, 0x38007, + 0x38008, 0x38009, 0x3800A, 0x3800B, 0x3800C, 0x3800D, 0x3800E, 0x3800F, + 0x38010, 0x38011, 0x38012, 0x38013, 0x38014, 0x38015, 0x38016, 0x38017, + 0x400C0, 0x400C1, 0x400C2, 0x400C3, 0x400C4, 0x400C5 +}; + +static const u32 fixed_d_sym[30] = { + 0x28000, 0x28001, 0x28002, 0x28003, 0x28004, 0x28005, 0x28006, 0x28007, + 0x28008, 0x28009, 0x2800A, 0x2800B, 0x2800C, 0x2800D, 0x2800E, 0x2800F, + 0x28010, 0x28011, 0x28012, 0x28013, 0x28014, 0x28015, 0x28016, 0x28017, + 0x28018, 0x28019, 0x2801A, 0x2801B, 0x2801C, 0x2801D +}; + +static int init_fixed_mode(struct iaa_device_compression_mode *mode) +{ + struct aecs_comp_table_record *comp_table = mode->aecs_comp_table; + u32 bfinal = 1; + u32 offset; + + /* Configure aecs table using fixed Huffman table */ + comp_table->crc = 0; + comp_table->xor_checksum = 0; + offset = comp_table->num_output_accum_bits / 8; + comp_table->output_accum[offset] = FIXED_HDR | bfinal; + comp_table->num_output_accum_bits = FIXED_HDR_SIZE; + + return 0; +} + +int iaa_aecs_init_fixed(void) +{ + int ret; + + ret = add_iaa_compression_mode("fixed", + fixed_ll_sym, + sizeof(fixed_ll_sym), + fixed_d_sym, + sizeof(fixed_d_sym), + NULL, 0, 0, + init_fixed_mode, NULL); + if (!ret) + pr_debug("IAA fixed compression mode initialized\n"); + + return ret; +} + +void iaa_aecs_cleanup_fixed(void) +{ + remove_iaa_compression_mode("fixed"); +} diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index 925b8fea0d06..4ec7a9269243 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -66,6 +66,299 @@ static void wq_table_clear_entry(int cpu) static LIST_HEAD(iaa_devices); static DEFINE_MUTEX(iaa_devices_lock); +static struct iaa_compression_mode *iaa_compression_modes[IAA_COMP_MODES_MAX]; + +static int find_empty_iaa_compression_mode(void) +{ + int i = -EINVAL; + + for (i = 0; i < IAA_COMP_MODES_MAX; i++) { + if (iaa_compression_modes[i]) + continue; + break; + } + + return i; +} + +static struct iaa_compression_mode *find_iaa_compression_mode(const char *name, int *idx) +{ + struct iaa_compression_mode *mode; + int i; + + for (i = 0; i < IAA_COMP_MODES_MAX; i++) { + mode = iaa_compression_modes[i]; + if (!mode) + continue; + + if (!strcmp(mode->name, name)) { + *idx = i; + return iaa_compression_modes[i]; + } + } + + return NULL; +} + +static void free_iaa_compression_mode(struct iaa_compression_mode *mode) +{ + kfree(mode->name); + kfree(mode->ll_table); + kfree(mode->d_table); + kfree(mode->header_table); + + kfree(mode); +} + +/* + * IAA Compression modes are defined by an ll_table, a d_table, and an + * optional header_table. These tables are typically generated and + * captured using statistics collected from running actual + * compress/decompress workloads. + * + * A module or other kernel code can add and remove compression modes + * with a given name using the exported @add_iaa_compression_mode() + * and @remove_iaa_compression_mode functions. + * + * When a new compression mode is added, the tables are saved in a + * global compression mode list. When IAA devices are added, a + * per-IAA device dma mapping is created for each IAA device, for each + * compression mode. These are the tables used to do the actual + * compression/deccompression and are unmapped if/when the devices are + * removed. Currently, compression modes must be added before any + * device is added, and removed after all devices have been removed. + */ + +/** + * remove_iaa_compression_mode - Remove an IAA compression mode + * @name: The name the compression mode will be known as + * + * Remove the IAA compression mode named @name. + */ +void remove_iaa_compression_mode(const char *name) +{ + struct iaa_compression_mode *mode; + int idx; + + mutex_lock(&iaa_devices_lock); + + if (!list_empty(&iaa_devices)) + goto out; + + mode = find_iaa_compression_mode(name, &idx); + if (mode) { + free_iaa_compression_mode(mode); + iaa_compression_modes[idx] = NULL; + } +out: + mutex_unlock(&iaa_devices_lock); +} +EXPORT_SYMBOL_GPL(remove_iaa_compression_mode); + +/** + * add_iaa_compression_mode - Add an IAA compression mode + * @name: The name the compression mode will be known as + * @ll_table: The ll table + * @ll_table_size: The ll table size in bytes + * @d_table: The d table + * @d_table_size: The d table size in bytes + * @header_table: Optional header table + * @header_table_size: Optional header table size in bytes + * @gen_decomp_table_flags: Otional flags used to generate the decomp table + * @init: Optional callback function to init the compression mode data + * @free: Optional callback function to free the compression mode data + * + * Add a new IAA compression mode named @name. + * + * Returns 0 if successful, errcode otherwise. + */ +int add_iaa_compression_mode(const char *name, + const u32 *ll_table, + int ll_table_size, + const u32 *d_table, + int d_table_size, + const u8 *header_table, + int header_table_size, + u16 gen_decomp_table_flags, + iaa_dev_comp_init_fn_t init, + iaa_dev_comp_free_fn_t free) +{ + struct iaa_compression_mode *mode; + int idx, ret = -ENOMEM; + + mutex_lock(&iaa_devices_lock); + + if (!list_empty(&iaa_devices)) { + ret = -EBUSY; + goto out; + } + + mode = kzalloc(sizeof(*mode), GFP_KERNEL); + if (!mode) + goto out; + + mode->name = kstrdup(name, GFP_KERNEL); + if (!mode->name) + goto free; + + if (ll_table) { + mode->ll_table = kzalloc(ll_table_size, GFP_KERNEL); + if (!mode->ll_table) + goto free; + memcpy(mode->ll_table, ll_table, ll_table_size); + mode->ll_table_size = ll_table_size; + } + + if (d_table) { + mode->d_table = kzalloc(d_table_size, GFP_KERNEL); + if (!mode->d_table) + goto free; + memcpy(mode->d_table, d_table, d_table_size); + mode->d_table_size = d_table_size; + } + + if (header_table) { + mode->header_table = kzalloc(header_table_size, GFP_KERNEL); + if (!mode->header_table) + goto free; + memcpy(mode->header_table, header_table, header_table_size); + mode->header_table_size = header_table_size; + } + + mode->gen_decomp_table_flags = gen_decomp_table_flags; + + mode->init = init; + mode->free = free; + + idx = find_empty_iaa_compression_mode(); + if (idx < 0) + goto free; + + pr_debug("IAA compression mode %s added at idx %d\n", + mode->name, idx); + + iaa_compression_modes[idx] = mode; + + ret = 0; +out: + mutex_unlock(&iaa_devices_lock); + + return ret; +free: + free_iaa_compression_mode(mode); + goto out; +} +EXPORT_SYMBOL_GPL(add_iaa_compression_mode); + +static void free_device_compression_mode(struct iaa_device *iaa_device, + struct iaa_device_compression_mode *device_mode) +{ + size_t size = sizeof(struct aecs_comp_table_record) + IAA_AECS_ALIGN; + struct device *dev = &iaa_device->idxd->pdev->dev; + + kfree(device_mode->name); + + if (device_mode->aecs_comp_table) + dma_free_coherent(dev, size, device_mode->aecs_comp_table, + device_mode->aecs_comp_table_dma_addr); + if (device_mode->aecs_decomp_table) + dma_free_coherent(dev, size, device_mode->aecs_decomp_table, + device_mode->aecs_decomp_table_dma_addr); + + kfree(device_mode); +} + +static int init_device_compression_mode(struct iaa_device *iaa_device, + struct iaa_compression_mode *mode, + int idx, struct idxd_wq *wq) +{ + size_t size = sizeof(struct aecs_comp_table_record) + IAA_AECS_ALIGN; + struct device *dev = &iaa_device->idxd->pdev->dev; + struct iaa_device_compression_mode *device_mode; + int ret = -ENOMEM; + + device_mode = kzalloc(sizeof(*device_mode), GFP_KERNEL); + if (!device_mode) + return -ENOMEM; + + device_mode->name = kstrdup(mode->name, GFP_KERNEL); + if (!device_mode->name) + goto free; + + device_mode->aecs_comp_table = dma_alloc_coherent(dev, size, + &device_mode->aecs_comp_table_dma_addr, GFP_KERNEL); + if (!device_mode->aecs_comp_table) + goto free; + + device_mode->aecs_decomp_table = dma_alloc_coherent(dev, size, + &device_mode->aecs_decomp_table_dma_addr, GFP_KERNEL); + if (!device_mode->aecs_decomp_table) + goto free; + + /* Add Huffman table to aecs */ + memset(device_mode->aecs_comp_table, 0, sizeof(*device_mode->aecs_comp_table)); + memcpy(device_mode->aecs_comp_table->ll_sym, mode->ll_table, mode->ll_table_size); + memcpy(device_mode->aecs_comp_table->d_sym, mode->d_table, mode->d_table_size); + + if (mode->init) { + ret = mode->init(device_mode); + if (ret) + goto free; + } + + /* mode index should match iaa_compression_modes idx */ + iaa_device->compression_modes[idx] = device_mode; + + pr_debug("IAA %s compression mode initialized for iaa device %d\n", + mode->name, iaa_device->idxd->id); + + ret = 0; +out: + return ret; +free: + pr_debug("IAA %s compression mode initialization failed for iaa device %d\n", + mode->name, iaa_device->idxd->id); + + free_device_compression_mode(iaa_device, device_mode); + goto out; +} + +static int init_device_compression_modes(struct iaa_device *iaa_device, + struct idxd_wq *wq) +{ + struct iaa_compression_mode *mode; + int i, ret = 0; + + for (i = 0; i < IAA_COMP_MODES_MAX; i++) { + mode = iaa_compression_modes[i]; + if (!mode) + continue; + + ret = init_device_compression_mode(iaa_device, mode, i, wq); + if (ret) + break; + } + + return ret; +} + +static void remove_device_compression_modes(struct iaa_device *iaa_device) +{ + struct iaa_device_compression_mode *device_mode; + int i; + + for (i = 0; i < IAA_COMP_MODES_MAX; i++) { + device_mode = iaa_device->compression_modes[i]; + if (!device_mode) + continue; + + free_device_compression_mode(iaa_device, device_mode); + iaa_device->compression_modes[i] = NULL; + if (iaa_compression_modes[i]->free) + iaa_compression_modes[i]->free(device_mode); + } +} + static struct iaa_device *iaa_device_alloc(void) { struct iaa_device *iaa_device; @@ -120,8 +413,21 @@ static struct iaa_device *add_iaa_device(struct idxd_device *idxd) return iaa_device; } +static int init_iaa_device(struct iaa_device *iaa_device, struct iaa_wq *iaa_wq) +{ + int ret = 0; + + ret = init_device_compression_modes(iaa_device, iaa_wq->wq); + if (ret) + return ret; + + return ret; +} + static void del_iaa_device(struct iaa_device *iaa_device) { + remove_device_compression_modes(iaa_device); + list_del(&iaa_device->list); iaa_device_free(iaa_device); @@ -276,6 +582,13 @@ static int save_iaa_wq(struct idxd_wq *wq) del_iaa_device(new_device); goto out; } + + ret = init_iaa_device(new_device, new_wq); + if (ret) { + del_iaa_wq(new_device, new_wq->wq); + del_iaa_device(new_device); + goto out; + } } if (WARN_ON(nr_iaa == 0)) @@ -517,20 +830,32 @@ static int __init iaa_crypto_init_module(void) nr_nodes = num_online_nodes(); nr_cpus_per_node = nr_cpus / nr_nodes; + ret = iaa_aecs_init_fixed(); + if (ret < 0) { + pr_debug("IAA fixed compression mode init failed\n"); + goto out; + } + ret = idxd_driver_register(&iaa_crypto_driver); if (ret) { pr_debug("IAA wq sub-driver registration failed\n"); - goto out; + goto err_driver_reg; } pr_debug("initialized\n"); out: return ret; + +err_driver_reg: + iaa_aecs_cleanup_fixed(); + + goto out; } static void __exit iaa_crypto_cleanup_module(void) { idxd_driver_unregister(&iaa_crypto_driver); + iaa_aecs_cleanup_fixed(); pr_debug("cleaned up\n"); } -- Gitee From d02ed73ec611fbe2748a8f3739bd1af24bcdfa93 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Tue, 5 Dec 2023 15:25:27 -0600 Subject: [PATCH 0964/2138] crypto: iaa - Add support for deflate-iaa compression algorithm ANBZ: #9252 commit 2ec6761df889fdf896fde761abd447596dd8f8c2 upstream. This patch registers the deflate-iaa deflate compression algorithm and hooks it up to the IAA hardware using the 'fixed' compression mode introduced in the previous patch. Because the IAA hardware has a 4k history-window limitation, only buffers <= 4k, or that have been compressed using a <= 4k history window, are technically compliant with the deflate spec, which allows for a window of up to 32k. Because of this limitation, the IAA fixed mode deflate algorithm is given its own algorithm name, 'deflate-iaa'. With this change, the deflate-iaa crypto algorithm is registered and operational, and compression and decompression operations are fully enabled following the successful binding of the first IAA workqueue to the iaa_crypto sub-driver. when there are no IAA workqueues bound to the driver, the IAA crypto algorithm can be unregistered by removing the module. A new iaa_crypto 'verify_compress' driver attribute is also added, allowing the user to toggle compression verification. If set, each compress will be internally decompressed and the contents verified, returning error codes if unsuccessful. This can be toggled with 0/1: echo 0 > /sys/bus/dsa/drivers/crypto/verify_compress The default setting is '1' - verify all compresses. The verify_compress value setting at the time the algorithm is registered is captured in the algorithm's crypto_ctx and used for all compresses when using the algorithm. [ Based on work originally by George Powley, Jing Lin and Kyung Min Park ] Intel-SIG: commit 2ec6761df889 crypto: iaa - Add support for deflate-iaa compression algorithm. Backporting patches for Intel IAA crypto driver on Intel Xeon platform. Signed-off-by: Tom Zanussi Signed-off-by: Herbert Xu [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- crypto/testmgr.c | 10 + drivers/crypto/intel/iaa/iaa_crypto.h | 36 + drivers/crypto/intel/iaa/iaa_crypto_main.c | 1051 +++++++++++++++++++- 3 files changed, 1079 insertions(+), 18 deletions(-) diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 216878c8bc3d..b6d924e0ff59 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -4819,6 +4819,16 @@ static const struct alg_test_desc alg_test_descs[] = { .decomp = __VECS(deflate_decomp_tv_template) } } + }, { + .alg = "deflate-iaa", + .test = alg_test_comp, + .fips_allowed = 1, + .suite = { + .comp = { + .comp = __VECS(deflate_comp_tv_template), + .decomp = __VECS(deflate_decomp_tv_template) + } + } }, { .alg = "dh", .test = alg_test_kpp, diff --git a/drivers/crypto/intel/iaa/iaa_crypto.h b/drivers/crypto/intel/iaa/iaa_crypto.h index 33e68f9d3d02..4c6b0f5a6b50 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto.h +++ b/drivers/crypto/intel/iaa/iaa_crypto.h @@ -10,15 +10,42 @@ #define IDXD_SUBDRIVER_NAME "crypto" +#define IAA_DECOMP_ENABLE BIT(0) +#define IAA_DECOMP_FLUSH_OUTPUT BIT(1) +#define IAA_DECOMP_CHECK_FOR_EOB BIT(2) +#define IAA_DECOMP_STOP_ON_EOB BIT(3) +#define IAA_DECOMP_SUPPRESS_OUTPUT BIT(9) + +#define IAA_COMP_FLUSH_OUTPUT BIT(1) +#define IAA_COMP_APPEND_EOB BIT(2) + +#define IAA_COMPLETION_TIMEOUT 1000000 + +#define IAA_ANALYTICS_ERROR 0x0a +#define IAA_ERROR_DECOMP_BUF_OVERFLOW 0x0b +#define IAA_ERROR_COMP_BUF_OVERFLOW 0x19 +#define IAA_ERROR_WATCHDOG_EXPIRED 0x24 + #define IAA_COMP_MODES_MAX 2 #define FIXED_HDR 0x2 #define FIXED_HDR_SIZE 3 +#define IAA_COMP_FLAGS (IAA_COMP_FLUSH_OUTPUT | \ + IAA_COMP_APPEND_EOB) + +#define IAA_DECOMP_FLAGS (IAA_DECOMP_ENABLE | \ + IAA_DECOMP_FLUSH_OUTPUT | \ + IAA_DECOMP_CHECK_FOR_EOB | \ + IAA_DECOMP_STOP_ON_EOB) + /* Representation of IAA workqueue */ struct iaa_wq { struct list_head list; + struct idxd_wq *wq; + int ref; + bool remove; struct iaa_device *iaa_device; }; @@ -119,4 +146,13 @@ int add_iaa_compression_mode(const char *name, void remove_iaa_compression_mode(const char *name); +enum iaa_mode { + IAA_MODE_FIXED, +}; + +struct iaa_compression_ctx { + enum iaa_mode mode; + bool verify_compress; +}; + #endif diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index 4ec7a9269243..94a3dcd4e73c 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -10,6 +10,7 @@ #include #include #include +#include #include "idxd.h" #include "iaa_crypto.h" @@ -20,6 +21,8 @@ #define pr_fmt(fmt) "idxd: " IDXD_SUBDRIVER_NAME ": " fmt +#define IAA_ALG_PRIORITY 300 + /* number of iaa instances probed */ static unsigned int nr_iaa; static unsigned int nr_cpus; @@ -29,9 +32,28 @@ static unsigned int nr_cpus_per_node; /* Number of physical cpus sharing each iaa instance */ static unsigned int cpus_per_iaa; +static struct crypto_comp *deflate_generic_tfm; + /* Per-cpu lookup table for balanced wqs */ static struct wq_table_entry __percpu *wq_table; +static struct idxd_wq *wq_table_next_wq(int cpu) +{ + struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu); + + if (++entry->cur_wq >= entry->n_wqs) + entry->cur_wq = 0; + + if (!entry->wqs[entry->cur_wq]) + return NULL; + + pr_debug("%s: returning wq at idx %d (iaa wq %d.%d) from cpu %d\n", __func__, + entry->cur_wq, entry->wqs[entry->cur_wq]->idxd->id, + entry->wqs[entry->cur_wq]->id, cpu); + + return entry->wqs[entry->cur_wq]; +} + static void wq_table_add(int cpu, struct idxd_wq *wq) { struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu); @@ -66,6 +88,40 @@ static void wq_table_clear_entry(int cpu) static LIST_HEAD(iaa_devices); static DEFINE_MUTEX(iaa_devices_lock); +/* If enabled, IAA hw crypto algos are registered, unavailable otherwise */ +static bool iaa_crypto_enabled; +static bool iaa_crypto_registered; + +/* Verify results of IAA compress or not */ +static bool iaa_verify_compress = true; + +static ssize_t verify_compress_show(struct device_driver *driver, char *buf) +{ + return sprintf(buf, "%d\n", iaa_verify_compress); +} + +static ssize_t verify_compress_store(struct device_driver *driver, + const char *buf, size_t count) +{ + int ret = -EBUSY; + + mutex_lock(&iaa_devices_lock); + + if (iaa_crypto_enabled) + goto out; + + ret = kstrtobool(buf, &iaa_verify_compress); + if (ret) + goto out; + + ret = count; +out: + mutex_unlock(&iaa_devices_lock); + + return ret; +} +static DRIVER_ATTR_RW(verify_compress); + static struct iaa_compression_mode *iaa_compression_modes[IAA_COMP_MODES_MAX]; static int find_empty_iaa_compression_mode(void) @@ -250,6 +306,12 @@ int add_iaa_compression_mode(const char *name, } EXPORT_SYMBOL_GPL(add_iaa_compression_mode); +static struct iaa_device_compression_mode * +get_iaa_device_compression_mode(struct iaa_device *iaa_device, int idx) +{ + return iaa_device->compression_modes[idx]; +} + static void free_device_compression_mode(struct iaa_device *iaa_device, struct iaa_device_compression_mode *device_mode) { @@ -268,6 +330,86 @@ static void free_device_compression_mode(struct iaa_device *iaa_device, kfree(device_mode); } +#define IDXD_OP_FLAG_AECS_RW_TGLS 0x400000 +#define IAX_AECS_DEFAULT_FLAG (IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | IDXD_OP_FLAG_CC) +#define IAX_AECS_COMPRESS_FLAG (IAX_AECS_DEFAULT_FLAG | IDXD_OP_FLAG_RD_SRC2_AECS) +#define IAX_AECS_DECOMPRESS_FLAG (IAX_AECS_DEFAULT_FLAG | IDXD_OP_FLAG_RD_SRC2_AECS) +#define IAX_AECS_GEN_FLAG (IAX_AECS_DEFAULT_FLAG | \ + IDXD_OP_FLAG_WR_SRC2_AECS_COMP | \ + IDXD_OP_FLAG_AECS_RW_TGLS) + +static int check_completion(struct device *dev, + struct iax_completion_record *comp, + bool compress, + bool only_once); + +static int decompress_header(struct iaa_device_compression_mode *device_mode, + struct iaa_compression_mode *mode, + struct idxd_wq *wq) +{ + dma_addr_t src_addr, src2_addr; + struct idxd_desc *idxd_desc; + struct iax_hw_desc *desc; + struct device *dev; + int ret = 0; + + idxd_desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK); + if (IS_ERR(idxd_desc)) + return PTR_ERR(idxd_desc); + + desc = idxd_desc->iax_hw; + + dev = &wq->idxd->pdev->dev; + + src_addr = dma_map_single(dev, (void *)mode->header_table, + mode->header_table_size, DMA_TO_DEVICE); + dev_dbg(dev, "%s: mode->name %s, src_addr %llx, dev %p, src %p, slen %d\n", + __func__, mode->name, src_addr, dev, + mode->header_table, mode->header_table_size); + if (unlikely(dma_mapping_error(dev, src_addr))) { + dev_dbg(dev, "dma_map_single err, exiting\n"); + ret = -ENOMEM; + return ret; + } + + desc->flags = IAX_AECS_GEN_FLAG; + desc->opcode = IAX_OPCODE_DECOMPRESS; + + desc->src1_addr = (u64)src_addr; + desc->src1_size = mode->header_table_size; + + src2_addr = device_mode->aecs_decomp_table_dma_addr; + desc->src2_addr = (u64)src2_addr; + desc->src2_size = 1088; + dev_dbg(dev, "%s: mode->name %s, src2_addr %llx, dev %p, src2_size %d\n", + __func__, mode->name, desc->src2_addr, dev, desc->src2_size); + desc->max_dst_size = 0; // suppressed output + + desc->decompr_flags = mode->gen_decomp_table_flags; + + desc->priv = 1; + + desc->completion_addr = idxd_desc->compl_dma; + + ret = idxd_submit_desc(wq, idxd_desc); + if (ret) { + pr_err("%s: submit_desc failed ret=0x%x\n", __func__, ret); + goto out; + } + + ret = check_completion(dev, idxd_desc->iax_completion, false, false); + if (ret) + dev_dbg(dev, "%s: mode->name %s check_completion failed ret=%d\n", + __func__, mode->name, ret); + else + dev_dbg(dev, "%s: mode->name %s succeeded\n", __func__, + mode->name); +out: + dma_unmap_single(dev, src_addr, 1088, DMA_TO_DEVICE); + + return ret; +} + static int init_device_compression_mode(struct iaa_device *iaa_device, struct iaa_compression_mode *mode, int idx, struct idxd_wq *wq) @@ -300,6 +442,14 @@ static int init_device_compression_mode(struct iaa_device *iaa_device, memcpy(device_mode->aecs_comp_table->ll_sym, mode->ll_table, mode->ll_table_size); memcpy(device_mode->aecs_comp_table->d_sym, mode->d_table, mode->d_table_size); + if (mode->header_table) { + ret = decompress_header(device_mode, mode, wq); + if (ret) { + pr_debug("iaa header decompression failed: ret=%d\n", ret); + goto free; + } + } + if (mode->init) { ret = mode->init(device_mode); if (ret) @@ -372,18 +522,6 @@ static struct iaa_device *iaa_device_alloc(void) return iaa_device; } -static void iaa_device_free(struct iaa_device *iaa_device) -{ - struct iaa_wq *iaa_wq, *next; - - list_for_each_entry_safe(iaa_wq, next, &iaa_device->wqs, list) { - list_del(&iaa_wq->list); - kfree(iaa_wq); - } - - kfree(iaa_device); -} - static bool iaa_has_wq(struct iaa_device *iaa_device, struct idxd_wq *wq) { struct iaa_wq *iaa_wq; @@ -426,12 +564,8 @@ static int init_iaa_device(struct iaa_device *iaa_device, struct iaa_wq *iaa_wq) static void del_iaa_device(struct iaa_device *iaa_device) { - remove_device_compression_modes(iaa_device); - list_del(&iaa_device->list); - iaa_device_free(iaa_device); - nr_iaa--; } @@ -497,6 +631,86 @@ static void clear_wq_table(void) pr_debug("cleared wq table\n"); } +static void free_iaa_device(struct iaa_device *iaa_device) +{ + if (!iaa_device) + return; + + remove_device_compression_modes(iaa_device); + kfree(iaa_device); +} + +static void __free_iaa_wq(struct iaa_wq *iaa_wq) +{ + struct iaa_device *iaa_device; + + if (!iaa_wq) + return; + + iaa_device = iaa_wq->iaa_device; + if (iaa_device->n_wq == 0) + free_iaa_device(iaa_wq->iaa_device); +} + +static void free_iaa_wq(struct iaa_wq *iaa_wq) +{ + struct idxd_wq *wq; + + __free_iaa_wq(iaa_wq); + + wq = iaa_wq->wq; + + kfree(iaa_wq); + idxd_wq_set_private(wq, NULL); +} + +static int iaa_wq_get(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct iaa_wq *iaa_wq; + int ret = 0; + + spin_lock(&idxd->dev_lock); + iaa_wq = idxd_wq_get_private(wq); + if (iaa_wq && !iaa_wq->remove) { + iaa_wq->ref++; + idxd_wq_get(wq); + } else { + ret = -ENODEV; + } + spin_unlock(&idxd->dev_lock); + + return ret; +} + +static int iaa_wq_put(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct iaa_wq *iaa_wq; + bool free = false; + int ret = 0; + + spin_lock(&idxd->dev_lock); + iaa_wq = idxd_wq_get_private(wq); + if (iaa_wq) { + iaa_wq->ref--; + if (iaa_wq->ref == 0 && iaa_wq->remove) { + idxd_wq_set_private(wq, NULL); + free = true; + } + idxd_wq_put(wq); + } else { + ret = -ENODEV; + } + spin_unlock(&idxd->dev_lock); + if (free) { + __free_iaa_wq(iaa_wq); + kfree(iaa_wq); + } + + return ret; +} + static void free_wq_table(void) { int cpu; @@ -580,6 +794,7 @@ static int save_iaa_wq(struct idxd_wq *wq) ret = add_iaa_wq(new_device, wq, &new_wq); if (ret) { del_iaa_device(new_device); + free_iaa_device(new_device); goto out; } @@ -587,6 +802,7 @@ static int save_iaa_wq(struct idxd_wq *wq) if (ret) { del_iaa_wq(new_device, new_wq->wq); del_iaa_device(new_device); + free_iaa_wq(new_wq); goto out; } } @@ -721,6 +937,729 @@ static void rebalance_wq_table(void) } } +static inline int check_completion(struct device *dev, + struct iax_completion_record *comp, + bool compress, + bool only_once) +{ + char *op_str = compress ? "compress" : "decompress"; + int ret = 0; + + while (!comp->status) { + if (only_once) + return -EAGAIN; + cpu_relax(); + } + + if (comp->status != IAX_COMP_SUCCESS) { + if (comp->status == IAA_ERROR_WATCHDOG_EXPIRED) { + ret = -ETIMEDOUT; + dev_dbg(dev, "%s timed out, size=0x%x\n", + op_str, comp->output_size); + goto out; + } + + if (comp->status == IAA_ANALYTICS_ERROR && + comp->error_code == IAA_ERROR_COMP_BUF_OVERFLOW && compress) { + ret = -E2BIG; + dev_dbg(dev, "compressed > uncompressed size," + " not compressing, size=0x%x\n", + comp->output_size); + goto out; + } + + if (comp->status == IAA_ERROR_DECOMP_BUF_OVERFLOW) { + ret = -EOVERFLOW; + goto out; + } + + ret = -EINVAL; + dev_dbg(dev, "iaa %s status=0x%x, error=0x%x, size=0x%x\n", + op_str, comp->status, comp->error_code, comp->output_size); + print_hex_dump(KERN_INFO, "cmp-rec: ", DUMP_PREFIX_OFFSET, 8, 1, comp, 64, 0); + + goto out; + } +out: + return ret; +} + +static int deflate_generic_decompress(struct acomp_req *req) +{ + void *src, *dst; + int ret; + + src = kmap_local_page(sg_page(req->src)) + req->src->offset; + dst = kmap_local_page(sg_page(req->dst)) + req->dst->offset; + + ret = crypto_comp_decompress(deflate_generic_tfm, + src, req->slen, dst, &req->dlen); + + kunmap_local(src); + kunmap_local(dst); + + return ret; +} + +static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, + struct idxd_wq *wq, + dma_addr_t src_addr, unsigned int slen, + dma_addr_t dst_addr, unsigned int *dlen, + u32 *compression_crc, + bool disable_async) +{ + struct iaa_device_compression_mode *active_compression_mode; + struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm); + struct iaa_device *iaa_device; + struct idxd_desc *idxd_desc; + struct iax_hw_desc *desc; + struct idxd_device *idxd; + struct iaa_wq *iaa_wq; + struct pci_dev *pdev; + struct device *dev; + int ret = 0; + + iaa_wq = idxd_wq_get_private(wq); + iaa_device = iaa_wq->iaa_device; + idxd = iaa_device->idxd; + pdev = idxd->pdev; + dev = &pdev->dev; + + active_compression_mode = get_iaa_device_compression_mode(iaa_device, ctx->mode); + + idxd_desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK); + if (IS_ERR(idxd_desc)) { + dev_dbg(dev, "idxd descriptor allocation failed\n"); + dev_dbg(dev, "iaa compress failed: ret=%ld\n", PTR_ERR(idxd_desc)); + return PTR_ERR(idxd_desc); + } + desc = idxd_desc->iax_hw; + + desc->flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | + IDXD_OP_FLAG_RD_SRC2_AECS | IDXD_OP_FLAG_CC; + desc->opcode = IAX_OPCODE_COMPRESS; + desc->compr_flags = IAA_COMP_FLAGS; + desc->priv = 1; + + desc->src1_addr = (u64)src_addr; + desc->src1_size = slen; + desc->dst_addr = (u64)dst_addr; + desc->max_dst_size = *dlen; + desc->src2_addr = active_compression_mode->aecs_comp_table_dma_addr; + desc->src2_size = sizeof(struct aecs_comp_table_record); + desc->completion_addr = idxd_desc->compl_dma; + + dev_dbg(dev, "%s: compression mode %s," + " desc->src1_addr %llx, desc->src1_size %d," + " desc->dst_addr %llx, desc->max_dst_size %d," + " desc->src2_addr %llx, desc->src2_size %d\n", __func__, + active_compression_mode->name, + desc->src1_addr, desc->src1_size, desc->dst_addr, + desc->max_dst_size, desc->src2_addr, desc->src2_size); + + ret = idxd_submit_desc(wq, idxd_desc); + if (ret) { + dev_dbg(dev, "submit_desc failed ret=%d\n", ret); + goto err; + } + + ret = check_completion(dev, idxd_desc->iax_completion, true, false); + if (ret) { + dev_dbg(dev, "check_completion failed ret=%d\n", ret); + goto err; + } + + *dlen = idxd_desc->iax_completion->output_size; + + *compression_crc = idxd_desc->iax_completion->crc; + + idxd_free_desc(wq, idxd_desc); +out: + return ret; +err: + idxd_free_desc(wq, idxd_desc); + dev_dbg(dev, "iaa compress failed: ret=%d\n", ret); + + goto out; +} + +static int iaa_remap_for_verify(struct device *dev, struct iaa_wq *iaa_wq, + struct acomp_req *req, + dma_addr_t *src_addr, dma_addr_t *dst_addr) +{ + int ret = 0; + int nr_sgs; + + dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); + dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); + + nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_FROM_DEVICE); + if (nr_sgs <= 0 || nr_sgs > 1) { + dev_dbg(dev, "verify: couldn't map src sg for iaa device %d," + " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, + iaa_wq->wq->id, ret); + ret = -EIO; + goto out; + } + *src_addr = sg_dma_address(req->src); + dev_dbg(dev, "verify: dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p," + " req->slen %d, sg_dma_len(sg) %d\n", *src_addr, nr_sgs, + req->src, req->slen, sg_dma_len(req->src)); + + nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_TO_DEVICE); + if (nr_sgs <= 0 || nr_sgs > 1) { + dev_dbg(dev, "verify: couldn't map dst sg for iaa device %d," + " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, + iaa_wq->wq->id, ret); + ret = -EIO; + dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_FROM_DEVICE); + goto out; + } + *dst_addr = sg_dma_address(req->dst); + dev_dbg(dev, "verify: dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p," + " req->dlen %d, sg_dma_len(sg) %d\n", *dst_addr, nr_sgs, + req->dst, req->dlen, sg_dma_len(req->dst)); +out: + return ret; +} + +static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req, + struct idxd_wq *wq, + dma_addr_t src_addr, unsigned int slen, + dma_addr_t dst_addr, unsigned int *dlen, + u32 compression_crc) +{ + struct iaa_device_compression_mode *active_compression_mode; + struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm); + struct iaa_device *iaa_device; + struct idxd_desc *idxd_desc; + struct iax_hw_desc *desc; + struct idxd_device *idxd; + struct iaa_wq *iaa_wq; + struct pci_dev *pdev; + struct device *dev; + int ret = 0; + + iaa_wq = idxd_wq_get_private(wq); + iaa_device = iaa_wq->iaa_device; + idxd = iaa_device->idxd; + pdev = idxd->pdev; + dev = &pdev->dev; + + active_compression_mode = get_iaa_device_compression_mode(iaa_device, ctx->mode); + + idxd_desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK); + if (IS_ERR(idxd_desc)) { + dev_dbg(dev, "idxd descriptor allocation failed\n"); + dev_dbg(dev, "iaa compress failed: ret=%ld\n", + PTR_ERR(idxd_desc)); + return PTR_ERR(idxd_desc); + } + desc = idxd_desc->iax_hw; + + /* Verify (optional) - decompress and check crc, suppress dest write */ + + desc->flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | IDXD_OP_FLAG_CC; + desc->opcode = IAX_OPCODE_DECOMPRESS; + desc->decompr_flags = IAA_DECOMP_FLAGS | IAA_DECOMP_SUPPRESS_OUTPUT; + desc->priv = 1; + + desc->src1_addr = (u64)dst_addr; + desc->src1_size = *dlen; + desc->dst_addr = (u64)src_addr; + desc->max_dst_size = slen; + desc->completion_addr = idxd_desc->compl_dma; + + dev_dbg(dev, "(verify) compression mode %s," + " desc->src1_addr %llx, desc->src1_size %d," + " desc->dst_addr %llx, desc->max_dst_size %d," + " desc->src2_addr %llx, desc->src2_size %d\n", + active_compression_mode->name, + desc->src1_addr, desc->src1_size, desc->dst_addr, + desc->max_dst_size, desc->src2_addr, desc->src2_size); + + ret = idxd_submit_desc(wq, idxd_desc); + if (ret) { + dev_dbg(dev, "submit_desc (verify) failed ret=%d\n", ret); + goto err; + } + + ret = check_completion(dev, idxd_desc->iax_completion, false, false); + if (ret) { + dev_dbg(dev, "(verify) check_completion failed ret=%d\n", ret); + goto err; + } + + if (compression_crc != idxd_desc->iax_completion->crc) { + ret = -EINVAL; + dev_dbg(dev, "(verify) iaa comp/decomp crc mismatch:" + " comp=0x%x, decomp=0x%x\n", compression_crc, + idxd_desc->iax_completion->crc); + print_hex_dump(KERN_INFO, "cmp-rec: ", DUMP_PREFIX_OFFSET, + 8, 1, idxd_desc->iax_completion, 64, 0); + goto err; + } + + idxd_free_desc(wq, idxd_desc); +out: + return ret; +err: + idxd_free_desc(wq, idxd_desc); + dev_dbg(dev, "iaa compress failed: ret=%d\n", ret); + + goto out; +} + +static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req, + struct idxd_wq *wq, + dma_addr_t src_addr, unsigned int slen, + dma_addr_t dst_addr, unsigned int *dlen, + bool disable_async) +{ + struct iaa_device_compression_mode *active_compression_mode; + struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm); + struct iaa_device *iaa_device; + struct idxd_desc *idxd_desc; + struct iax_hw_desc *desc; + struct idxd_device *idxd; + struct iaa_wq *iaa_wq; + struct pci_dev *pdev; + struct device *dev; + int ret = 0; + + iaa_wq = idxd_wq_get_private(wq); + iaa_device = iaa_wq->iaa_device; + idxd = iaa_device->idxd; + pdev = idxd->pdev; + dev = &pdev->dev; + + active_compression_mode = get_iaa_device_compression_mode(iaa_device, ctx->mode); + + idxd_desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK); + if (IS_ERR(idxd_desc)) { + dev_dbg(dev, "idxd descriptor allocation failed\n"); + dev_dbg(dev, "iaa decompress failed: ret=%ld\n", + PTR_ERR(idxd_desc)); + return PTR_ERR(idxd_desc); + } + desc = idxd_desc->iax_hw; + + desc->flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | IDXD_OP_FLAG_CC; + desc->opcode = IAX_OPCODE_DECOMPRESS; + desc->max_dst_size = PAGE_SIZE; + desc->decompr_flags = IAA_DECOMP_FLAGS; + desc->priv = 1; + + desc->src1_addr = (u64)src_addr; + desc->dst_addr = (u64)dst_addr; + desc->max_dst_size = *dlen; + desc->src1_size = slen; + desc->completion_addr = idxd_desc->compl_dma; + + dev_dbg(dev, "%s: decompression mode %s," + " desc->src1_addr %llx, desc->src1_size %d," + " desc->dst_addr %llx, desc->max_dst_size %d," + " desc->src2_addr %llx, desc->src2_size %d\n", __func__, + active_compression_mode->name, + desc->src1_addr, desc->src1_size, desc->dst_addr, + desc->max_dst_size, desc->src2_addr, desc->src2_size); + + ret = idxd_submit_desc(wq, idxd_desc); + if (ret) { + dev_dbg(dev, "submit_desc failed ret=%d\n", ret); + goto err; + } + + ret = check_completion(dev, idxd_desc->iax_completion, false, false); + if (ret) { + dev_dbg(dev, "%s: check_completion failed ret=%d\n", __func__, ret); + if (idxd_desc->iax_completion->status == IAA_ANALYTICS_ERROR) { + pr_warn("%s: falling back to deflate-generic decompress, " + "analytics error code %x\n", __func__, + idxd_desc->iax_completion->error_code); + ret = deflate_generic_decompress(req); + if (ret) { + dev_dbg(dev, "%s: deflate-generic failed ret=%d\n", + __func__, ret); + goto err; + } + } else { + goto err; + } + } else { + req->dlen = idxd_desc->iax_completion->output_size; + } + + *dlen = req->dlen; + + idxd_free_desc(wq, idxd_desc); +out: + return ret; +err: + idxd_free_desc(wq, idxd_desc); + dev_dbg(dev, "iaa decompress failed: ret=%d\n", ret); + + goto out; +} + +static int iaa_comp_acompress(struct acomp_req *req) +{ + struct iaa_compression_ctx *compression_ctx; + struct crypto_tfm *tfm = req->base.tfm; + dma_addr_t src_addr, dst_addr; + bool disable_async = false; + int nr_sgs, cpu, ret = 0; + struct iaa_wq *iaa_wq; + u32 compression_crc; + struct idxd_wq *wq; + struct device *dev; + int order = -1; + + compression_ctx = crypto_tfm_ctx(tfm); + + if (!iaa_crypto_enabled) { + pr_debug("iaa_crypto disabled, not compressing\n"); + return -ENODEV; + } + + if (!req->src || !req->slen) { + pr_debug("invalid src, not compressing\n"); + return -EINVAL; + } + + cpu = get_cpu(); + wq = wq_table_next_wq(cpu); + put_cpu(); + if (!wq) { + pr_debug("no wq configured for cpu=%d\n", cpu); + return -ENODEV; + } + + ret = iaa_wq_get(wq); + if (ret) { + pr_debug("no wq available for cpu=%d\n", cpu); + return -ENODEV; + } + + iaa_wq = idxd_wq_get_private(wq); + + if (!req->dst) { + gfp_t flags = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; + + /* incompressible data will always be < 2 * slen */ + req->dlen = 2 * req->slen; + order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE); + req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL); + if (!req->dst) { + ret = -ENOMEM; + order = -1; + goto out; + } + disable_async = true; + } + + dev = &wq->idxd->pdev->dev; + + nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); + if (nr_sgs <= 0 || nr_sgs > 1) { + dev_dbg(dev, "couldn't map src sg for iaa device %d," + " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, + iaa_wq->wq->id, ret); + ret = -EIO; + goto out; + } + src_addr = sg_dma_address(req->src); + dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p," + " req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs, + req->src, req->slen, sg_dma_len(req->src)); + + nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); + if (nr_sgs <= 0 || nr_sgs > 1) { + dev_dbg(dev, "couldn't map dst sg for iaa device %d," + " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, + iaa_wq->wq->id, ret); + ret = -EIO; + goto err_map_dst; + } + dst_addr = sg_dma_address(req->dst); + dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p," + " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs, + req->dst, req->dlen, sg_dma_len(req->dst)); + + ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr, + &req->dlen, &compression_crc, disable_async); + if (ret == -EINPROGRESS) + return ret; + + if (!ret && compression_ctx->verify_compress) { + ret = iaa_remap_for_verify(dev, iaa_wq, req, &src_addr, &dst_addr); + if (ret) { + dev_dbg(dev, "%s: compress verify remap failed ret=%d\n", __func__, ret); + goto out; + } + + ret = iaa_compress_verify(tfm, req, wq, src_addr, req->slen, + dst_addr, &req->dlen, compression_crc); + if (ret) + dev_dbg(dev, "asynchronous compress verification failed ret=%d\n", ret); + + dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_TO_DEVICE); + dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_FROM_DEVICE); + + goto out; + } + + if (ret) + dev_dbg(dev, "asynchronous compress failed ret=%d\n", ret); + + dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); +err_map_dst: + dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); +out: + iaa_wq_put(wq); + + if (order >= 0) + sgl_free_order(req->dst, order); + + return ret; +} + +static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req) +{ + gfp_t flags = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? + GFP_KERNEL : GFP_ATOMIC; + struct crypto_tfm *tfm = req->base.tfm; + dma_addr_t src_addr, dst_addr; + int nr_sgs, cpu, ret = 0; + struct iaa_wq *iaa_wq; + struct device *dev; + struct idxd_wq *wq; + int order = -1; + + cpu = get_cpu(); + wq = wq_table_next_wq(cpu); + put_cpu(); + if (!wq) { + pr_debug("no wq configured for cpu=%d\n", cpu); + return -ENODEV; + } + + ret = iaa_wq_get(wq); + if (ret) { + pr_debug("no wq available for cpu=%d\n", cpu); + return -ENODEV; + } + + iaa_wq = idxd_wq_get_private(wq); + + dev = &wq->idxd->pdev->dev; + + nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); + if (nr_sgs <= 0 || nr_sgs > 1) { + dev_dbg(dev, "couldn't map src sg for iaa device %d," + " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, + iaa_wq->wq->id, ret); + ret = -EIO; + goto out; + } + src_addr = sg_dma_address(req->src); + dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p," + " req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs, + req->src, req->slen, sg_dma_len(req->src)); + + req->dlen = 4 * req->slen; /* start with ~avg comp rato */ +alloc_dest: + order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE); + req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL); + if (!req->dst) { + ret = -ENOMEM; + order = -1; + goto out; + } + + nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); + if (nr_sgs <= 0 || nr_sgs > 1) { + dev_dbg(dev, "couldn't map dst sg for iaa device %d," + " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, + iaa_wq->wq->id, ret); + ret = -EIO; + goto err_map_dst; + } + + dst_addr = sg_dma_address(req->dst); + dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p," + " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs, + req->dst, req->dlen, sg_dma_len(req->dst)); + ret = iaa_decompress(tfm, req, wq, src_addr, req->slen, + dst_addr, &req->dlen, true); + if (ret == -EOVERFLOW) { + dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); + req->dlen *= 2; + if (req->dlen > CRYPTO_ACOMP_DST_MAX) + goto err_map_dst; + goto alloc_dest; + } + + if (ret != 0) + dev_dbg(dev, "asynchronous decompress failed ret=%d\n", ret); + + dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); +err_map_dst: + dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); +out: + iaa_wq_put(wq); + + if (order >= 0) + sgl_free_order(req->dst, order); + + return ret; +} + +static int iaa_comp_adecompress(struct acomp_req *req) +{ + struct crypto_tfm *tfm = req->base.tfm; + dma_addr_t src_addr, dst_addr; + int nr_sgs, cpu, ret = 0; + struct iaa_wq *iaa_wq; + struct device *dev; + struct idxd_wq *wq; + + if (!iaa_crypto_enabled) { + pr_debug("iaa_crypto disabled, not decompressing\n"); + return -ENODEV; + } + + if (!req->src || !req->slen) { + pr_debug("invalid src, not decompressing\n"); + return -EINVAL; + } + + if (!req->dst) + return iaa_comp_adecompress_alloc_dest(req); + + cpu = get_cpu(); + wq = wq_table_next_wq(cpu); + put_cpu(); + if (!wq) { + pr_debug("no wq configured for cpu=%d\n", cpu); + return -ENODEV; + } + + ret = iaa_wq_get(wq); + if (ret) { + pr_debug("no wq available for cpu=%d\n", cpu); + return -ENODEV; + } + + iaa_wq = idxd_wq_get_private(wq); + + dev = &wq->idxd->pdev->dev; + + nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); + if (nr_sgs <= 0 || nr_sgs > 1) { + dev_dbg(dev, "couldn't map src sg for iaa device %d," + " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, + iaa_wq->wq->id, ret); + ret = -EIO; + goto out; + } + src_addr = sg_dma_address(req->src); + dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p," + " req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs, + req->src, req->slen, sg_dma_len(req->src)); + + nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); + if (nr_sgs <= 0 || nr_sgs > 1) { + dev_dbg(dev, "couldn't map dst sg for iaa device %d," + " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, + iaa_wq->wq->id, ret); + ret = -EIO; + goto err_map_dst; + } + dst_addr = sg_dma_address(req->dst); + dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p," + " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs, + req->dst, req->dlen, sg_dma_len(req->dst)); + + ret = iaa_decompress(tfm, req, wq, src_addr, req->slen, + dst_addr, &req->dlen, false); + if (ret == -EINPROGRESS) + return ret; + + if (ret != 0) + dev_dbg(dev, "asynchronous decompress failed ret=%d\n", ret); + + dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); +err_map_dst: + dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); +out: + iaa_wq_put(wq); + + return ret; +} + +static void compression_ctx_init(struct iaa_compression_ctx *ctx) +{ + ctx->verify_compress = iaa_verify_compress; +} + +static int iaa_comp_init_fixed(struct crypto_acomp *acomp_tfm) +{ + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm); + struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm); + + compression_ctx_init(ctx); + + ctx->mode = IAA_MODE_FIXED; + + return 0; +} + +static void dst_free(struct scatterlist *sgl) +{ + /* + * Called for req->dst = NULL cases but we free elsewhere + * using sgl_free_order(). + */ +} + +static struct acomp_alg iaa_acomp_fixed_deflate = { + .init = iaa_comp_init_fixed, + .compress = iaa_comp_acompress, + .decompress = iaa_comp_adecompress, + .dst_free = dst_free, + .base = { + .cra_name = "deflate", + .cra_driver_name = "deflate-iaa", + .cra_ctxsize = sizeof(struct iaa_compression_ctx), + .cra_module = THIS_MODULE, + .cra_priority = IAA_ALG_PRIORITY, + } +}; + +static int iaa_register_compression_device(void) +{ + int ret; + + ret = crypto_register_acomp(&iaa_acomp_fixed_deflate); + if (ret) { + pr_err("deflate algorithm acomp fixed registration failed (%d)\n", ret); + goto out; + } + + iaa_crypto_registered = true; +out: + return ret; +} + +static int iaa_unregister_compression_device(void) +{ + if (iaa_crypto_registered) + crypto_unregister_acomp(&iaa_acomp_fixed_deflate); + + return 0; +} + static int iaa_crypto_probe(struct idxd_dev *idxd_dev) { struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); @@ -738,6 +1677,11 @@ static int iaa_crypto_probe(struct idxd_dev *idxd_dev) mutex_lock(&wq->wq_lock); + if (idxd_wq_get_private(wq)) { + mutex_unlock(&wq->wq_lock); + return -EBUSY; + } + if (!idxd_wq_driver_name_match(wq, dev)) { dev_dbg(dev, "wq %d.%d driver_name match failed: wq driver_name %s, dev driver name %s\n", idxd->id, wq->id, wq->driver_name, dev->driver->name); @@ -771,12 +1715,28 @@ static int iaa_crypto_probe(struct idxd_dev *idxd_dev) rebalance_wq_table(); + if (first_wq) { + iaa_crypto_enabled = true; + ret = iaa_register_compression_device(); + if (ret != 0) { + iaa_crypto_enabled = false; + dev_dbg(dev, "IAA compression device registration failed\n"); + goto err_register; + } + try_module_get(THIS_MODULE); + + pr_info("iaa_crypto now ENABLED\n"); + } + mutex_unlock(&iaa_devices_lock); out: mutex_unlock(&wq->wq_lock); return ret; +err_register: + remove_iaa_wq(wq); + free_iaa_wq(idxd_wq_get_private(wq)); err_save: if (first_wq) free_wq_table(); @@ -792,6 +1752,9 @@ static int iaa_crypto_probe(struct idxd_dev *idxd_dev) static void iaa_crypto_remove(struct idxd_dev *idxd_dev) { struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); + struct idxd_device *idxd = wq->idxd; + struct iaa_wq *iaa_wq; + bool free = false; idxd_wq_quiesce(wq); @@ -800,12 +1763,38 @@ static void iaa_crypto_remove(struct idxd_dev *idxd_dev) remove_iaa_wq(wq); + spin_lock(&idxd->dev_lock); + iaa_wq = idxd_wq_get_private(wq); + if (!iaa_wq) { + spin_unlock(&idxd->dev_lock); + pr_err("%s: no iaa_wq available to remove\n", __func__); + goto out; + } + + if (iaa_wq->ref) { + iaa_wq->remove = true; + } else { + wq = iaa_wq->wq; + idxd_wq_set_private(wq, NULL); + free = true; + } + spin_unlock(&idxd->dev_lock); + if (free) { + __free_iaa_wq(iaa_wq); + kfree(iaa_wq); + } + idxd_drv_disable_wq(wq); rebalance_wq_table(); - if (nr_iaa == 0) + if (nr_iaa == 0) { + iaa_crypto_enabled = false; free_wq_table(); + module_put(THIS_MODULE); + pr_info("iaa_crypto now DISABLED\n"); + } +out: mutex_unlock(&iaa_devices_lock); mutex_unlock(&wq->wq_lock); } @@ -830,10 +1819,19 @@ static int __init iaa_crypto_init_module(void) nr_nodes = num_online_nodes(); nr_cpus_per_node = nr_cpus / nr_nodes; + if (crypto_has_comp("deflate-generic", 0, 0)) + deflate_generic_tfm = crypto_alloc_comp("deflate-generic", 0, 0); + + if (IS_ERR_OR_NULL(deflate_generic_tfm)) { + pr_err("IAA could not alloc %s tfm: errcode = %ld\n", + "deflate-generic", PTR_ERR(deflate_generic_tfm)); + return -ENOMEM; + } + ret = iaa_aecs_init_fixed(); if (ret < 0) { pr_debug("IAA fixed compression mode init failed\n"); - goto out; + goto err_aecs_init; } ret = idxd_driver_register(&iaa_crypto_driver); @@ -842,20 +1840,37 @@ static int __init iaa_crypto_init_module(void) goto err_driver_reg; } + ret = driver_create_file(&iaa_crypto_driver.drv, + &driver_attr_verify_compress); + if (ret) { + pr_debug("IAA verify_compress attr creation failed\n"); + goto err_verify_attr_create; + } + pr_debug("initialized\n"); out: return ret; +err_verify_attr_create: + idxd_driver_unregister(&iaa_crypto_driver); err_driver_reg: iaa_aecs_cleanup_fixed(); +err_aecs_init: + crypto_free_comp(deflate_generic_tfm); goto out; } static void __exit iaa_crypto_cleanup_module(void) { + if (iaa_unregister_compression_device()) + pr_debug("IAA compression device unregister failed\n"); + + driver_remove_file(&iaa_crypto_driver.drv, + &driver_attr_verify_compress); idxd_driver_unregister(&iaa_crypto_driver); iaa_aecs_cleanup_fixed(); + crypto_free_comp(deflate_generic_tfm); pr_debug("cleaned up\n"); } -- Gitee From 642a069b881d2246729f8469248fee63bd42bd39 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Tue, 5 Dec 2023 15:25:28 -0600 Subject: [PATCH 0965/2138] crypto: iaa - Add irq support for the crypto async interface ANBZ: #9252 commit 09646c98d0bfed47930d9eb0d66c323fae70a5e0 upstream. The existing iaa crypto async support provides an implementation that satisfies the interface but does so in a synchronous manner - it fills and submits the IDXD descriptor and then waits for it to complete before returning. This isn't a problem at the moment, since all existing callers (e.g. zswap) wrap any asynchronous callees in a synchronous wrapper anyway. This change makes the iaa crypto async implementation truly asynchronous: it fills and submits the IDXD descriptor, then returns immediately with -EINPROGRESS. It also sets the descriptor's 'request completion irq' bit and sets up a callback with the IDXD driver which is called when the operation completes and the irq fires. The existing callers such as zswap use synchronous wrappers to deal with -EINPROGRESS and so work as expected without any changes. This mode can be enabled by writing 'async_irq' to the sync_mode iaa_crypto driver attribute: echo async_irq > /sys/bus/dsa/drivers/crypto/sync_mode Async mode without interrupts (caller must poll) can be enabled by writing 'async' to it: echo async > /sys/bus/dsa/drivers/crypto/sync_mode The default sync mode can be enabled by writing 'sync' to it: echo sync > /sys/bus/dsa/drivers/crypto/sync_mode The sync_mode value setting at the time the IAA algorithms are registered is captured in each algorithm's crypto_ctx and used for all compresses and decompresses when using a given algorithm. Intel-SIG: commit 09646c98d0bf crypto: iaa - Add irq support for the crypto async interface. Backporting patches for Intel IAA crypto driver on Intel Xeon platform. Signed-off-by: Tom Zanussi Signed-off-by: Herbert Xu [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- drivers/crypto/intel/iaa/iaa_crypto.h | 2 + drivers/crypto/intel/iaa/iaa_crypto_main.c | 266 ++++++++++++++++++++- 2 files changed, 266 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/intel/iaa/iaa_crypto.h b/drivers/crypto/intel/iaa/iaa_crypto.h index 4c6b0f5a6b50..de014ac53adb 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto.h +++ b/drivers/crypto/intel/iaa/iaa_crypto.h @@ -153,6 +153,8 @@ enum iaa_mode { struct iaa_compression_ctx { enum iaa_mode mode; bool verify_compress; + bool async_mode; + bool use_irq; }; #endif diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index 94a3dcd4e73c..2f90a43500f0 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -122,6 +122,102 @@ static ssize_t verify_compress_store(struct device_driver *driver, } static DRIVER_ATTR_RW(verify_compress); +/* + * The iaa crypto driver supports three 'sync' methods determining how + * compressions and decompressions are performed: + * + * - sync: the compression or decompression completes before + * returning. This is the mode used by the async crypto + * interface when the sync mode is set to 'sync' and by + * the sync crypto interface regardless of setting. + * + * - async: the compression or decompression is submitted and returns + * immediately. Completion interrupts are not used so + * the caller is responsible for polling the descriptor + * for completion. This mode is applicable to only the + * async crypto interface and is ignored for anything + * else. + * + * - async_irq: the compression or decompression is submitted and + * returns immediately. Completion interrupts are + * enabled so the caller can wait for the completion and + * yield to other threads. When the compression or + * decompression completes, the completion is signaled + * and the caller awakened. This mode is applicable to + * only the async crypto interface and is ignored for + * anything else. + * + * These modes can be set using the iaa_crypto sync_mode driver + * attribute. + */ + +/* Use async mode */ +static bool async_mode; +/* Use interrupts */ +static bool use_irq; + +/** + * set_iaa_sync_mode - Set IAA sync mode + * @name: The name of the sync mode + * + * Make the IAA sync mode named @name the current sync mode used by + * compression/decompression. + */ + +static int set_iaa_sync_mode(const char *name) +{ + int ret = 0; + + if (sysfs_streq(name, "sync")) { + async_mode = false; + use_irq = false; + } else if (sysfs_streq(name, "async")) { + async_mode = true; + use_irq = false; + } else if (sysfs_streq(name, "async_irq")) { + async_mode = true; + use_irq = true; + } else { + ret = -EINVAL; + } + + return ret; +} + +static ssize_t sync_mode_show(struct device_driver *driver, char *buf) +{ + int ret = 0; + + if (!async_mode && !use_irq) + ret = sprintf(buf, "%s\n", "sync"); + else if (async_mode && !use_irq) + ret = sprintf(buf, "%s\n", "async"); + else if (async_mode && use_irq) + ret = sprintf(buf, "%s\n", "async_irq"); + + return ret; +} + +static ssize_t sync_mode_store(struct device_driver *driver, + const char *buf, size_t count) +{ + int ret = -EBUSY; + + mutex_lock(&iaa_devices_lock); + + if (iaa_crypto_enabled) + goto out; + + ret = set_iaa_sync_mode(buf); + if (ret == 0) + ret = count; +out: + mutex_unlock(&iaa_devices_lock); + + return ret; +} +static DRIVER_ATTR_RW(sync_mode); + static struct iaa_compression_mode *iaa_compression_modes[IAA_COMP_MODES_MAX]; static int find_empty_iaa_compression_mode(void) @@ -1001,6 +1097,111 @@ static int deflate_generic_decompress(struct acomp_req *req) return ret; } +static int iaa_remap_for_verify(struct device *dev, struct iaa_wq *iaa_wq, + struct acomp_req *req, + dma_addr_t *src_addr, dma_addr_t *dst_addr); + +static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req, + struct idxd_wq *wq, + dma_addr_t src_addr, unsigned int slen, + dma_addr_t dst_addr, unsigned int *dlen, + u32 compression_crc); + +static void iaa_desc_complete(struct idxd_desc *idxd_desc, + enum idxd_complete_type comp_type, + bool free_desc, void *__ctx, + u32 *status) +{ + struct iaa_device_compression_mode *active_compression_mode; + struct iaa_compression_ctx *compression_ctx; + struct crypto_ctx *ctx = __ctx; + struct iaa_device *iaa_device; + struct idxd_device *idxd; + struct iaa_wq *iaa_wq; + struct pci_dev *pdev; + struct device *dev; + int ret, err = 0; + + compression_ctx = crypto_tfm_ctx(ctx->tfm); + + iaa_wq = idxd_wq_get_private(idxd_desc->wq); + iaa_device = iaa_wq->iaa_device; + idxd = iaa_device->idxd; + pdev = idxd->pdev; + dev = &pdev->dev; + + active_compression_mode = get_iaa_device_compression_mode(iaa_device, + compression_ctx->mode); + dev_dbg(dev, "%s: compression mode %s," + " ctx->src_addr %llx, ctx->dst_addr %llx\n", __func__, + active_compression_mode->name, + ctx->src_addr, ctx->dst_addr); + + ret = check_completion(dev, idxd_desc->iax_completion, + ctx->compress, false); + if (ret) { + dev_dbg(dev, "%s: check_completion failed ret=%d\n", __func__, ret); + if (!ctx->compress && + idxd_desc->iax_completion->status == IAA_ANALYTICS_ERROR) { + pr_warn("%s: falling back to deflate-generic decompress, " + "analytics error code %x\n", __func__, + idxd_desc->iax_completion->error_code); + ret = deflate_generic_decompress(ctx->req); + if (ret) { + dev_dbg(dev, "%s: deflate-generic failed ret=%d\n", + __func__, ret); + err = -EIO; + goto err; + } + } else { + err = -EIO; + goto err; + } + } else { + ctx->req->dlen = idxd_desc->iax_completion->output_size; + } + + if (ctx->compress && compression_ctx->verify_compress) { + dma_addr_t src_addr, dst_addr; + u32 compression_crc; + + compression_crc = idxd_desc->iax_completion->crc; + + ret = iaa_remap_for_verify(dev, iaa_wq, ctx->req, &src_addr, &dst_addr); + if (ret) { + dev_dbg(dev, "%s: compress verify remap failed ret=%d\n", __func__, ret); + err = -EIO; + goto out; + } + + ret = iaa_compress_verify(ctx->tfm, ctx->req, iaa_wq->wq, src_addr, + ctx->req->slen, dst_addr, &ctx->req->dlen, + compression_crc); + if (ret) { + dev_dbg(dev, "%s: compress verify failed ret=%d\n", __func__, ret); + err = -EIO; + } + + dma_unmap_sg(dev, ctx->req->dst, sg_nents(ctx->req->dst), DMA_TO_DEVICE); + dma_unmap_sg(dev, ctx->req->src, sg_nents(ctx->req->src), DMA_FROM_DEVICE); + + goto out; + } +err: + dma_unmap_sg(dev, ctx->req->dst, sg_nents(ctx->req->dst), DMA_FROM_DEVICE); + dma_unmap_sg(dev, ctx->req->src, sg_nents(ctx->req->src), DMA_TO_DEVICE); +out: + if (ret != 0) + dev_dbg(dev, "asynchronous compress failed ret=%d\n", ret); + + if (ctx->req->base.complete) + acomp_request_complete(ctx->req, err); + + if (free_desc) + idxd_free_desc(idxd_desc->wq, idxd_desc); + iaa_wq_put(idxd_desc->wq); +} + static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, struct idxd_wq *wq, dma_addr_t src_addr, unsigned int slen, @@ -1049,6 +1250,22 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, desc->src2_size = sizeof(struct aecs_comp_table_record); desc->completion_addr = idxd_desc->compl_dma; + if (ctx->use_irq && !disable_async) { + desc->flags |= IDXD_OP_FLAG_RCI; + + idxd_desc->crypto.req = req; + idxd_desc->crypto.tfm = tfm; + idxd_desc->crypto.src_addr = src_addr; + idxd_desc->crypto.dst_addr = dst_addr; + idxd_desc->crypto.compress = true; + + dev_dbg(dev, "%s use_async_irq: compression mode %s," + " src_addr %llx, dst_addr %llx\n", __func__, + active_compression_mode->name, + src_addr, dst_addr); + } else if (ctx->async_mode && !disable_async) + req->base.data = idxd_desc; + dev_dbg(dev, "%s: compression mode %s," " desc->src1_addr %llx, desc->src1_size %d," " desc->dst_addr %llx, desc->max_dst_size %d," @@ -1063,6 +1280,12 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, goto err; } + if (ctx->async_mode && !disable_async) { + ret = -EINPROGRESS; + dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__); + goto out; + } + ret = check_completion(dev, idxd_desc->iax_completion, true, false); if (ret) { dev_dbg(dev, "check_completion failed ret=%d\n", ret); @@ -1073,7 +1296,8 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, *compression_crc = idxd_desc->iax_completion->crc; - idxd_free_desc(wq, idxd_desc); + if (!ctx->async_mode) + idxd_free_desc(wq, idxd_desc); out: return ret; err: @@ -1256,6 +1480,22 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req, desc->src1_size = slen; desc->completion_addr = idxd_desc->compl_dma; + if (ctx->use_irq && !disable_async) { + desc->flags |= IDXD_OP_FLAG_RCI; + + idxd_desc->crypto.req = req; + idxd_desc->crypto.tfm = tfm; + idxd_desc->crypto.src_addr = src_addr; + idxd_desc->crypto.dst_addr = dst_addr; + idxd_desc->crypto.compress = false; + + dev_dbg(dev, "%s: use_async_irq compression mode %s," + " src_addr %llx, dst_addr %llx\n", __func__, + active_compression_mode->name, + src_addr, dst_addr); + } else if (ctx->async_mode && !disable_async) + req->base.data = idxd_desc; + dev_dbg(dev, "%s: decompression mode %s," " desc->src1_addr %llx, desc->src1_size %d," " desc->dst_addr %llx, desc->max_dst_size %d," @@ -1270,6 +1510,12 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req, goto err; } + if (ctx->async_mode && !disable_async) { + ret = -EINPROGRESS; + dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__); + goto out; + } + ret = check_completion(dev, idxd_desc->iax_completion, false, false); if (ret) { dev_dbg(dev, "%s: check_completion failed ret=%d\n", __func__, ret); @@ -1292,7 +1538,8 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req, *dlen = req->dlen; - idxd_free_desc(wq, idxd_desc); + if (!ctx->async_mode) + idxd_free_desc(wq, idxd_desc); out: return ret; err: @@ -1601,6 +1848,8 @@ static int iaa_comp_adecompress(struct acomp_req *req) static void compression_ctx_init(struct iaa_compression_ctx *ctx) { ctx->verify_compress = iaa_verify_compress; + ctx->async_mode = async_mode; + ctx->use_irq = use_irq; } static int iaa_comp_init_fixed(struct crypto_acomp *acomp_tfm) @@ -1809,6 +2058,7 @@ static struct idxd_device_driver iaa_crypto_driver = { .remove = iaa_crypto_remove, .name = IDXD_SUBDRIVER_NAME, .type = dev_types, + .desc_complete = iaa_desc_complete, }; static int __init iaa_crypto_init_module(void) @@ -1847,10 +2097,20 @@ static int __init iaa_crypto_init_module(void) goto err_verify_attr_create; } + ret = driver_create_file(&iaa_crypto_driver.drv, + &driver_attr_sync_mode); + if (ret) { + pr_debug("IAA sync mode attr creation failed\n"); + goto err_sync_attr_create; + } + pr_debug("initialized\n"); out: return ret; +err_sync_attr_create: + driver_remove_file(&iaa_crypto_driver.drv, + &driver_attr_verify_compress); err_verify_attr_create: idxd_driver_unregister(&iaa_crypto_driver); err_driver_reg: @@ -1866,6 +2126,8 @@ static void __exit iaa_crypto_cleanup_module(void) if (iaa_unregister_compression_device()) pr_debug("IAA compression device unregister failed\n"); + driver_remove_file(&iaa_crypto_driver.drv, + &driver_attr_sync_mode); driver_remove_file(&iaa_crypto_driver.drv, &driver_attr_verify_compress); idxd_driver_unregister(&iaa_crypto_driver); -- Gitee From 70e810fa350716274cebb8c4a7b8a64f8a11901a Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Tue, 5 Dec 2023 15:25:29 -0600 Subject: [PATCH 0966/2138] crypto: iaa - Add IAA Compression Accelerator stats ANBZ: #9252 commit 93382a91632a5d88bb9bb0ff1fea872fe87f5dc2 upstream. Add support for optional debugfs statistics support for the IAA Compression Accelerator. This is enabled by the kernel config item: CRYPTO_DEV_IAA_CRYPTO_STATS When enabled, the IAA crypto driver will generate statistics which can be accessed at /sys/kernel/debug/iaa-crypto/. See Documentation/driver-api/crypto/iax/iax-crypto.rst for details. Intel-SIG: commit 93382a91632a crypto: iaa - Add IAA Compression Accelerator stats. Backporting patches for Intel IAA crypto driver on Intel Xeon platform. Signed-off-by: Tom Zanussi Signed-off-by: Herbert Xu [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- drivers/crypto/intel/iaa/Kconfig | 9 + drivers/crypto/intel/iaa/Makefile | 2 + drivers/crypto/intel/iaa/iaa_crypto.h | 13 + drivers/crypto/intel/iaa/iaa_crypto_main.c | 39 ++- drivers/crypto/intel/iaa/iaa_crypto_stats.c | 313 ++++++++++++++++++++ drivers/crypto/intel/iaa/iaa_crypto_stats.h | 53 ++++ 6 files changed, 427 insertions(+), 2 deletions(-) create mode 100644 drivers/crypto/intel/iaa/iaa_crypto_stats.c create mode 100644 drivers/crypto/intel/iaa/iaa_crypto_stats.h diff --git a/drivers/crypto/intel/iaa/Kconfig b/drivers/crypto/intel/iaa/Kconfig index fcccb6ff7e29..d53f4b1d494f 100644 --- a/drivers/crypto/intel/iaa/Kconfig +++ b/drivers/crypto/intel/iaa/Kconfig @@ -8,3 +8,12 @@ config CRYPTO_DEV_IAA_CRYPTO decompression with the Intel Analytics Accelerator (IAA) hardware using the cryptographic API. If you choose 'M' here, the module will be called iaa_crypto. + +config CRYPTO_DEV_IAA_CRYPTO_STATS + bool "Enable Intel(R) IAA Compression Accelerator Statistics" + depends on CRYPTO_DEV_IAA_CRYPTO + default n + help + Enable statistics for the IAA compression accelerator. + These include per-device and per-workqueue statistics in + addition to global driver statistics. diff --git a/drivers/crypto/intel/iaa/Makefile b/drivers/crypto/intel/iaa/Makefile index cc87feffd059..b64b208d2344 100644 --- a/drivers/crypto/intel/iaa/Makefile +++ b/drivers/crypto/intel/iaa/Makefile @@ -8,3 +8,5 @@ ccflags-y += -I $(srctree)/drivers/dma/idxd -DDEFAULT_SYMBOL_NAMESPACE=IDXD obj-$(CONFIG_CRYPTO_DEV_IAA_CRYPTO) := iaa_crypto.o iaa_crypto-y := iaa_crypto_main.o iaa_crypto_comp_fixed.o + +iaa_crypto-$(CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS) += iaa_crypto_stats.o diff --git a/drivers/crypto/intel/iaa/iaa_crypto.h b/drivers/crypto/intel/iaa/iaa_crypto.h index de014ac53adb..014420f7beb0 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto.h +++ b/drivers/crypto/intel/iaa/iaa_crypto.h @@ -48,6 +48,11 @@ struct iaa_wq { bool remove; struct iaa_device *iaa_device; + + u64 comp_calls; + u64 comp_bytes; + u64 decomp_calls; + u64 decomp_bytes; }; struct iaa_device_compression_mode { @@ -69,6 +74,11 @@ struct iaa_device { int n_wq; struct list_head wqs; + + u64 comp_calls; + u64 comp_bytes; + u64 decomp_calls; + u64 decomp_bytes; }; struct wq_table_entry { @@ -157,4 +167,7 @@ struct iaa_compression_ctx { bool use_irq; }; +extern struct list_head iaa_devices; +extern struct mutex iaa_devices_lock; + #endif diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index 2f90a43500f0..eafa2dd7a5bb 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -14,6 +14,7 @@ #include "idxd.h" #include "iaa_crypto.h" +#include "iaa_crypto_stats.h" #ifdef pr_fmt #undef pr_fmt @@ -85,8 +86,8 @@ static void wq_table_clear_entry(int cpu) memset(entry->wqs, 0, entry->max_wqs * sizeof(struct idxd_wq *)); } -static LIST_HEAD(iaa_devices); -static DEFINE_MUTEX(iaa_devices_lock); +LIST_HEAD(iaa_devices); +DEFINE_MUTEX(iaa_devices_lock); /* If enabled, IAA hw crypto algos are registered, unavailable otherwise */ static bool iaa_crypto_enabled; @@ -1052,6 +1053,7 @@ static inline int check_completion(struct device *dev, ret = -ETIMEDOUT; dev_dbg(dev, "%s timed out, size=0x%x\n", op_str, comp->output_size); + update_completion_timeout_errs(); goto out; } @@ -1061,6 +1063,7 @@ static inline int check_completion(struct device *dev, dev_dbg(dev, "compressed > uncompressed size," " not compressing, size=0x%x\n", comp->output_size); + update_completion_comp_buf_overflow_errs(); goto out; } @@ -1073,6 +1076,7 @@ static inline int check_completion(struct device *dev, dev_dbg(dev, "iaa %s status=0x%x, error=0x%x, size=0x%x\n", op_str, comp->status, comp->error_code, comp->output_size); print_hex_dump(KERN_INFO, "cmp-rec: ", DUMP_PREFIX_OFFSET, 8, 1, comp, 64, 0); + update_completion_einval_errs(); goto out; } @@ -1094,6 +1098,8 @@ static int deflate_generic_decompress(struct acomp_req *req) kunmap_local(src); kunmap_local(dst); + update_total_sw_decomp_calls(); + return ret; } @@ -1161,6 +1167,15 @@ static void iaa_desc_complete(struct idxd_desc *idxd_desc, ctx->req->dlen = idxd_desc->iax_completion->output_size; } + /* Update stats */ + if (ctx->compress) { + update_total_comp_bytes_out(ctx->req->dlen); + update_wq_comp_bytes(iaa_wq->wq, ctx->req->dlen); + } else { + update_total_decomp_bytes_in(ctx->req->dlen); + update_wq_decomp_bytes(iaa_wq->wq, ctx->req->dlen); + } + if (ctx->compress && compression_ctx->verify_compress) { dma_addr_t src_addr, dst_addr; u32 compression_crc; @@ -1280,6 +1295,10 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, goto err; } + /* Update stats */ + update_total_comp_calls(); + update_wq_comp_calls(wq); + if (ctx->async_mode && !disable_async) { ret = -EINPROGRESS; dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__); @@ -1294,6 +1313,10 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, *dlen = idxd_desc->iax_completion->output_size; + /* Update stats */ + update_total_comp_bytes_out(*dlen); + update_wq_comp_bytes(wq, *dlen); + *compression_crc = idxd_desc->iax_completion->crc; if (!ctx->async_mode) @@ -1510,6 +1533,10 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req, goto err; } + /* Update stats */ + update_total_decomp_calls(); + update_wq_decomp_calls(wq); + if (ctx->async_mode && !disable_async) { ret = -EINPROGRESS; dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__); @@ -1540,6 +1567,10 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req, if (!ctx->async_mode) idxd_free_desc(wq, idxd_desc); + + /* Update stats */ + update_total_decomp_bytes_in(slen); + update_wq_decomp_bytes(wq, slen); out: return ret; err: @@ -2104,6 +2135,9 @@ static int __init iaa_crypto_init_module(void) goto err_sync_attr_create; } + if (iaa_crypto_debugfs_init()) + pr_warn("debugfs init failed, stats not available\n"); + pr_debug("initialized\n"); out: return ret; @@ -2126,6 +2160,7 @@ static void __exit iaa_crypto_cleanup_module(void) if (iaa_unregister_compression_device()) pr_debug("IAA compression device unregister failed\n"); + iaa_crypto_debugfs_cleanup(); driver_remove_file(&iaa_crypto_driver.drv, &driver_attr_sync_mode); driver_remove_file(&iaa_crypto_driver.drv, diff --git a/drivers/crypto/intel/iaa/iaa_crypto_stats.c b/drivers/crypto/intel/iaa/iaa_crypto_stats.c new file mode 100644 index 000000000000..0279edc6194e --- /dev/null +++ b/drivers/crypto/intel/iaa/iaa_crypto_stats.c @@ -0,0 +1,313 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../../dma/idxd/idxd.h" +#include +#include +#include "iaa_crypto.h" +#include "iaa_crypto_stats.h" + +static u64 total_comp_calls; +static u64 total_decomp_calls; +static u64 total_sw_decomp_calls; +static u64 max_comp_delay_ns; +static u64 max_decomp_delay_ns; +static u64 max_acomp_delay_ns; +static u64 max_adecomp_delay_ns; +static u64 total_comp_bytes_out; +static u64 total_decomp_bytes_in; +static u64 total_completion_einval_errors; +static u64 total_completion_timeout_errors; +static u64 total_completion_comp_buf_overflow_errors; + +static struct dentry *iaa_crypto_debugfs_root; + +void update_total_comp_calls(void) +{ + total_comp_calls++; +} + +void update_total_comp_bytes_out(int n) +{ + total_comp_bytes_out += n; +} + +void update_total_decomp_calls(void) +{ + total_decomp_calls++; +} + +void update_total_sw_decomp_calls(void) +{ + total_sw_decomp_calls++; +} + +void update_total_decomp_bytes_in(int n) +{ + total_decomp_bytes_in += n; +} + +void update_completion_einval_errs(void) +{ + total_completion_einval_errors++; +} + +void update_completion_timeout_errs(void) +{ + total_completion_timeout_errors++; +} + +void update_completion_comp_buf_overflow_errs(void) +{ + total_completion_comp_buf_overflow_errors++; +} + +void update_max_comp_delay_ns(u64 start_time_ns) +{ + u64 time_diff; + + time_diff = ktime_get_ns() - start_time_ns; + + if (time_diff > max_comp_delay_ns) + max_comp_delay_ns = time_diff; +} + +void update_max_decomp_delay_ns(u64 start_time_ns) +{ + u64 time_diff; + + time_diff = ktime_get_ns() - start_time_ns; + + if (time_diff > max_decomp_delay_ns) + max_decomp_delay_ns = time_diff; +} + +void update_max_acomp_delay_ns(u64 start_time_ns) +{ + u64 time_diff; + + time_diff = ktime_get_ns() - start_time_ns; + + if (time_diff > max_acomp_delay_ns) + max_acomp_delay_ns = time_diff; +} + +void update_max_adecomp_delay_ns(u64 start_time_ns) +{ + u64 time_diff; + + time_diff = ktime_get_ns() - start_time_ns; + + if (time_diff > max_adecomp_delay_ns) + + max_adecomp_delay_ns = time_diff; +} + +void update_wq_comp_calls(struct idxd_wq *idxd_wq) +{ + struct iaa_wq *wq = idxd_wq_get_private(idxd_wq); + + wq->comp_calls++; + wq->iaa_device->comp_calls++; +} + +void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n) +{ + struct iaa_wq *wq = idxd_wq_get_private(idxd_wq); + + wq->comp_bytes += n; + wq->iaa_device->comp_bytes += n; +} + +void update_wq_decomp_calls(struct idxd_wq *idxd_wq) +{ + struct iaa_wq *wq = idxd_wq_get_private(idxd_wq); + + wq->decomp_calls++; + wq->iaa_device->decomp_calls++; +} + +void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n) +{ + struct iaa_wq *wq = idxd_wq_get_private(idxd_wq); + + wq->decomp_bytes += n; + wq->iaa_device->decomp_bytes += n; +} + +static void reset_iaa_crypto_stats(void) +{ + total_comp_calls = 0; + total_decomp_calls = 0; + total_sw_decomp_calls = 0; + max_comp_delay_ns = 0; + max_decomp_delay_ns = 0; + max_acomp_delay_ns = 0; + max_adecomp_delay_ns = 0; + total_comp_bytes_out = 0; + total_decomp_bytes_in = 0; + total_completion_einval_errors = 0; + total_completion_timeout_errors = 0; + total_completion_comp_buf_overflow_errors = 0; +} + +static void reset_wq_stats(struct iaa_wq *wq) +{ + wq->comp_calls = 0; + wq->comp_bytes = 0; + wq->decomp_calls = 0; + wq->decomp_bytes = 0; +} + +static void reset_device_stats(struct iaa_device *iaa_device) +{ + struct iaa_wq *iaa_wq; + + iaa_device->comp_calls = 0; + iaa_device->comp_bytes = 0; + iaa_device->decomp_calls = 0; + iaa_device->decomp_bytes = 0; + + list_for_each_entry(iaa_wq, &iaa_device->wqs, list) + reset_wq_stats(iaa_wq); +} + +static void wq_show(struct seq_file *m, struct iaa_wq *iaa_wq) +{ + seq_printf(m, " name: %s\n", iaa_wq->wq->name); + seq_printf(m, " comp_calls: %llu\n", iaa_wq->comp_calls); + seq_printf(m, " comp_bytes: %llu\n", iaa_wq->comp_bytes); + seq_printf(m, " decomp_calls: %llu\n", iaa_wq->decomp_calls); + seq_printf(m, " decomp_bytes: %llu\n\n", iaa_wq->decomp_bytes); +} + +static void device_stats_show(struct seq_file *m, struct iaa_device *iaa_device) +{ + struct iaa_wq *iaa_wq; + + seq_puts(m, "iaa device:\n"); + seq_printf(m, " id: %d\n", iaa_device->idxd->id); + seq_printf(m, " n_wqs: %d\n", iaa_device->n_wq); + seq_printf(m, " comp_calls: %llu\n", iaa_device->comp_calls); + seq_printf(m, " comp_bytes: %llu\n", iaa_device->comp_bytes); + seq_printf(m, " decomp_calls: %llu\n", iaa_device->decomp_calls); + seq_printf(m, " decomp_bytes: %llu\n", iaa_device->decomp_bytes); + seq_puts(m, " wqs:\n"); + + list_for_each_entry(iaa_wq, &iaa_device->wqs, list) + wq_show(m, iaa_wq); +} + +static void global_stats_show(struct seq_file *m) +{ + seq_puts(m, "global stats:\n"); + seq_printf(m, " total_comp_calls: %llu\n", total_comp_calls); + seq_printf(m, " total_decomp_calls: %llu\n", total_decomp_calls); + seq_printf(m, " total_sw_decomp_calls: %llu\n", total_sw_decomp_calls); + seq_printf(m, " total_comp_bytes_out: %llu\n", total_comp_bytes_out); + seq_printf(m, " total_decomp_bytes_in: %llu\n", total_decomp_bytes_in); + seq_printf(m, " total_completion_einval_errors: %llu\n", + total_completion_einval_errors); + seq_printf(m, " total_completion_timeout_errors: %llu\n", + total_completion_timeout_errors); + seq_printf(m, " total_completion_comp_buf_overflow_errors: %llu\n\n", + total_completion_comp_buf_overflow_errors); +} + +static int wq_stats_show(struct seq_file *m, void *v) +{ + struct iaa_device *iaa_device; + + mutex_lock(&iaa_devices_lock); + + global_stats_show(m); + + list_for_each_entry(iaa_device, &iaa_devices, list) + device_stats_show(m, iaa_device); + + mutex_unlock(&iaa_devices_lock); + + return 0; +} + +static int iaa_crypto_stats_reset(void *data, u64 value) +{ + struct iaa_device *iaa_device; + + reset_iaa_crypto_stats(); + + mutex_lock(&iaa_devices_lock); + + list_for_each_entry(iaa_device, &iaa_devices, list) + reset_device_stats(iaa_device); + + mutex_unlock(&iaa_devices_lock); + + return 0; +} + +static int wq_stats_open(struct inode *inode, struct file *file) +{ + return single_open(file, wq_stats_show, file); +} + +static const struct file_operations wq_stats_fops = { + .open = wq_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +DEFINE_DEBUGFS_ATTRIBUTE(wq_stats_reset_fops, NULL, iaa_crypto_stats_reset, "%llu\n"); + +int __init iaa_crypto_debugfs_init(void) +{ + if (!debugfs_initialized()) + return -ENODEV; + + iaa_crypto_debugfs_root = debugfs_create_dir("iaa_crypto", NULL); + if (!iaa_crypto_debugfs_root) + return -ENOMEM; + + debugfs_create_u64("max_comp_delay_ns", 0644, + iaa_crypto_debugfs_root, &max_comp_delay_ns); + debugfs_create_u64("max_decomp_delay_ns", 0644, + iaa_crypto_debugfs_root, &max_decomp_delay_ns); + debugfs_create_u64("max_acomp_delay_ns", 0644, + iaa_crypto_debugfs_root, &max_comp_delay_ns); + debugfs_create_u64("max_adecomp_delay_ns", 0644, + iaa_crypto_debugfs_root, &max_decomp_delay_ns); + debugfs_create_u64("total_comp_calls", 0644, + iaa_crypto_debugfs_root, &total_comp_calls); + debugfs_create_u64("total_decomp_calls", 0644, + iaa_crypto_debugfs_root, &total_decomp_calls); + debugfs_create_u64("total_sw_decomp_calls", 0644, + iaa_crypto_debugfs_root, &total_sw_decomp_calls); + debugfs_create_u64("total_comp_bytes_out", 0644, + iaa_crypto_debugfs_root, &total_comp_bytes_out); + debugfs_create_u64("total_decomp_bytes_in", 0644, + iaa_crypto_debugfs_root, &total_decomp_bytes_in); + debugfs_create_file("wq_stats", 0644, iaa_crypto_debugfs_root, NULL, + &wq_stats_fops); + debugfs_create_file("stats_reset", 0644, iaa_crypto_debugfs_root, NULL, + &wq_stats_reset_fops); + + return 0; +} + +void __exit iaa_crypto_debugfs_cleanup(void) +{ + debugfs_remove_recursive(iaa_crypto_debugfs_root); +} + +MODULE_LICENSE("GPL"); diff --git a/drivers/crypto/intel/iaa/iaa_crypto_stats.h b/drivers/crypto/intel/iaa/iaa_crypto_stats.h new file mode 100644 index 000000000000..c10b87b86fa4 --- /dev/null +++ b/drivers/crypto/intel/iaa/iaa_crypto_stats.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */ + +#ifndef __CRYPTO_DEV_IAA_CRYPTO_STATS_H__ +#define __CRYPTO_DEV_IAA_CRYPTO_STATS_H__ + +#if defined(CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS) +int iaa_crypto_debugfs_init(void); +void iaa_crypto_debugfs_cleanup(void); + +void update_total_comp_calls(void); +void update_total_comp_bytes_out(int n); +void update_total_decomp_calls(void); +void update_total_sw_decomp_calls(void); +void update_total_decomp_bytes_in(int n); +void update_max_comp_delay_ns(u64 start_time_ns); +void update_max_decomp_delay_ns(u64 start_time_ns); +void update_max_acomp_delay_ns(u64 start_time_ns); +void update_max_adecomp_delay_ns(u64 start_time_ns); +void update_completion_einval_errs(void); +void update_completion_timeout_errs(void); +void update_completion_comp_buf_overflow_errs(void); + +void update_wq_comp_calls(struct idxd_wq *idxd_wq); +void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n); +void update_wq_decomp_calls(struct idxd_wq *idxd_wq); +void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n); + +#else +static inline int iaa_crypto_debugfs_init(void) { return 0; } +static inline void iaa_crypto_debugfs_cleanup(void) {} + +static inline void update_total_comp_calls(void) {} +static inline void update_total_comp_bytes_out(int n) {} +static inline void update_total_decomp_calls(void) {} +static inline void update_total_sw_decomp_calls(void) {} +static inline void update_total_decomp_bytes_in(int n) {} +static inline void update_max_comp_delay_ns(u64 start_time_ns) {} +static inline void update_max_decomp_delay_ns(u64 start_time_ns) {} +static inline void update_max_acomp_delay_ns(u64 start_time_ns) {} +static inline void update_max_adecomp_delay_ns(u64 start_time_ns) {} +static inline void update_completion_einval_errs(void) {} +static inline void update_completion_timeout_errs(void) {} +static inline void update_completion_comp_buf_overflow_errs(void) {} + +static inline void update_wq_comp_calls(struct idxd_wq *idxd_wq) {} +static inline void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n) {} +static inline void update_wq_decomp_calls(struct idxd_wq *idxd_wq) {} +static inline void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n) {} + +#endif // CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS + +#endif -- Gitee From 62de61cc0efa26fff041d4a3e856a79d704c550f Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Tue, 5 Dec 2023 15:25:30 -0600 Subject: [PATCH 0967/2138] dmaengine: idxd: Add support for device/wq defaults ANBZ: #9252 commit 979f6ded93ac5ca0fec2b4c5b7b668c8a2a65e1b upstream. Add a load_device_defaults() function pointer to struct idxd_driver_data, which if defined, will be called when an idxd device is probed and will allow the idxd device to be configured with default values. The load_device_defaults() function is passed an idxd device to work with to set specific device attributes. Also add a load_device_defaults() implementation IAA devices; future patches would add default functions for other device types such as DSA. The way idxd device probing works, if the device configuration is valid at that point e.g. at least one workqueue and engine is properly configured then the device will be enabled and ready to go. The IAA implementation, idxd_load_iaa_device_defaults(), configures a single workqueue (wq0) for each device with the following default values: mode "dedicated" threshold 0 size Total WQ Size from WQCAP priority 10 type IDXD_WQT_KERNEL group 0 name "iaa_crypto" driver_name "crypto" Note that this now adds another configuration step for any users that want to configure their own devices/workqueus with something different in that they'll first need to disable (in the case of IAA) wq0 and the device itself before they can set their own attributes and re-enable, since they've been already been auto-enabled. Note also that in order for the new configuration to be applied to the deflate-iaa crypto algorithm the iaa_crypto module needs to unregister the old version, which is accomplished by removing the iaa_crypto module, and re-registering it with the new configuration by reinserting the iaa_crypto module. Intel-SIG: commit 979f6ded93ac dmaengine: idxd: Add support for device/wq defaults. Backporting patches for Intel IAA crypto driver on Intel Xeon platform. Signed-off-by: Tom Zanussi Reviewed-by: Dave Jiang Signed-off-by: Herbert Xu [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- drivers/dma/idxd/Makefile | 2 +- drivers/dma/idxd/defaults.c | 53 +++++++++++++++++++++++++++++++++++++ drivers/dma/idxd/idxd.h | 4 +++ drivers/dma/idxd/init.c | 7 +++++ 4 files changed, 65 insertions(+), 1 deletion(-) create mode 100644 drivers/dma/idxd/defaults.c diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile index c5e679070e46..2b4a0d406e1e 100644 --- a/drivers/dma/idxd/Makefile +++ b/drivers/dma/idxd/Makefile @@ -4,7 +4,7 @@ obj-$(CONFIG_INTEL_IDXD_BUS) += idxd_bus.o idxd_bus-y := bus.o obj-$(CONFIG_INTEL_IDXD) += idxd.o -idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o debugfs.o +idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o debugfs.o defaults.o idxd-$(CONFIG_INTEL_IDXD_PERFMON) += perfmon.o diff --git a/drivers/dma/idxd/defaults.c b/drivers/dma/idxd/defaults.c new file mode 100644 index 000000000000..c607ae8dd12c --- /dev/null +++ b/drivers/dma/idxd/defaults.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2023 Intel Corporation. All rights rsvd. */ +#include +#include "idxd.h" + +int idxd_load_iaa_device_defaults(struct idxd_device *idxd) +{ + struct idxd_engine *engine; + struct idxd_group *group; + struct idxd_wq *wq; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return 0; + + wq = idxd->wqs[0]; + + if (wq->state != IDXD_WQ_DISABLED) + return -EPERM; + + /* set mode to "dedicated" */ + set_bit(WQ_FLAG_DEDICATED, &wq->flags); + wq->threshold = 0; + + /* only setting up 1 wq, so give it all the wq space */ + wq->size = idxd->max_wq_size; + + /* set priority to 10 */ + wq->priority = 10; + + /* set type to "kernel" */ + wq->type = IDXD_WQT_KERNEL; + + /* set wq group to 0 */ + group = idxd->groups[0]; + wq->group = group; + group->num_wqs++; + + /* set name to "iaa_crypto" */ + memset(wq->name, 0, WQ_NAME_SIZE + 1); + strscpy(wq->name, "iaa_crypto", WQ_NAME_SIZE + 1); + + /* set driver_name to "crypto" */ + memset(wq->driver_name, 0, DRIVER_NAME_SIZE + 1); + strscpy(wq->driver_name, "crypto", DRIVER_NAME_SIZE + 1); + + engine = idxd->engines[0]; + + /* set engine group to 0 */ + engine->group = idxd->groups[0]; + engine->group->num_engines++; + + return 0; +} diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index 32a1fef9adb1..eb73cabb4ad0 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -277,6 +277,8 @@ struct idxd_dma_dev { struct dma_device dma; }; +typedef int (*load_device_defaults_fn_t) (struct idxd_device *idxd); + struct idxd_driver_data { const char *name_prefix; enum idxd_type type; @@ -287,6 +289,7 @@ struct idxd_driver_data { int cr_status_off; int cr_result_off; bool user_submission_safe; + load_device_defaults_fn_t load_device_defaults; }; struct idxd_evl { @@ -732,6 +735,7 @@ void idxd_unregister_devices(struct idxd_device *idxd); void idxd_wqs_quiesce(struct idxd_device *idxd); bool idxd_queue_int_handle_resubmit(struct idxd_desc *desc); void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count); +int idxd_load_iaa_device_defaults(struct idxd_device *idxd); /* device interrupt control */ irqreturn_t idxd_misc_thread(int vec, void *data); diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index 786afb256b6e..a7295943fa22 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -61,6 +61,7 @@ static struct idxd_driver_data idxd_driver_data[] = { .user_submission_safe = false, /* See INTEL-SA-01084 security advisory */ .cr_status_off = offsetof(struct iax_completion_record, status), .cr_result_off = offsetof(struct iax_completion_record, error_code), + .load_device_defaults = idxd_load_iaa_device_defaults, }, }; @@ -756,6 +757,12 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err; } + if (data->load_device_defaults) { + rc = data->load_device_defaults(idxd); + if (rc) + dev_warn(dev, "IDXD loading device defaults failed\n"); + } + rc = idxd_register_devices(idxd); if (rc) { dev_err(dev, "IDXD sysfs setup failed\n"); -- Gitee From 84bdf5e682527e27c425ffc78164d708247ade04 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Mon, 18 Dec 2023 14:47:14 -0600 Subject: [PATCH 0968/2138] crypto: iaa - Change desc->priv to 0 ANBZ: #9252 commit 98bb0dd15133a66ce4d1079193367c536b9e3fec upstream. In order for shared workqeues to work properly, desc->priv should be set to 0 rather than 1. The need for this is described in commit f5ccf55e1028 (dmaengine/idxd: Re-enable kernel workqueue under DMA API), so we need to make IAA consistent with IOMMU settings, otherwise we get: [ 141.948389] IOMMU: dmar15: Page request in Privilege Mode [ 141.948394] dmar15: Invalid page request: 2000026a100101 ffffb167 Dedicated workqueues ignore this field and are unaffected. Intel-SIG: commit 98bb0dd15133 crypto: iaa - Change desc->priv to 0. Backporting patches for Intel IAA crypto driver on Intel Xeon platform. Signed-off-by: Tom Zanussi Reviewed-by: Dave Jiang Reviewed-by: Fenghua Yu Signed-off-by: Herbert Xu [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- drivers/crypto/intel/iaa/iaa_crypto_main.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index eafa2dd7a5bb..5093361b0107 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -484,7 +484,7 @@ static int decompress_header(struct iaa_device_compression_mode *device_mode, desc->decompr_flags = mode->gen_decomp_table_flags; - desc->priv = 1; + desc->priv = 0; desc->completion_addr = idxd_desc->compl_dma; @@ -1255,7 +1255,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, IDXD_OP_FLAG_RD_SRC2_AECS | IDXD_OP_FLAG_CC; desc->opcode = IAX_OPCODE_COMPRESS; desc->compr_flags = IAA_COMP_FLAGS; - desc->priv = 1; + desc->priv = 0; desc->src1_addr = (u64)src_addr; desc->src1_size = slen; @@ -1409,7 +1409,7 @@ static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req, desc->flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | IDXD_OP_FLAG_CC; desc->opcode = IAX_OPCODE_DECOMPRESS; desc->decompr_flags = IAA_DECOMP_FLAGS | IAA_DECOMP_SUPPRESS_OUTPUT; - desc->priv = 1; + desc->priv = 0; desc->src1_addr = (u64)dst_addr; desc->src1_size = *dlen; @@ -1495,7 +1495,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req, desc->opcode = IAX_OPCODE_DECOMPRESS; desc->max_dst_size = PAGE_SIZE; desc->decompr_flags = IAA_DECOMP_FLAGS; - desc->priv = 1; + desc->priv = 0; desc->src1_addr = (u64)src_addr; desc->dst_addr = (u64)dst_addr; -- Gitee From 514f9bf694b6630ae9c6ffd34ecb6d2940837211 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Mon, 18 Dec 2023 14:47:15 -0600 Subject: [PATCH 0969/2138] crypto: iaa - Remove unneeded newline in update_max_adecomp_delay_ns() ANBZ: #9252 commit 5c3fadc83ee9f6747c8f0f81cd9e5591eb003360 upstream. Remove a stray newline in update_max_adecomp_delay_ns(). Intel-SIG: commit 5c3fadc83ee9 crypto: iaa - Remove unneeded newline in update_max_adecomp_delay_ns(). Backporting patches for Intel IAA crypto driver on Intel Xeon platform. Reported-by: Christophe JAILLET Signed-off-by: Tom Zanussi Reviewed-by: Dave Jiang Reviewed-by: Fenghua Yu Signed-off-by: Herbert Xu [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- drivers/crypto/intel/iaa/iaa_crypto_stats.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/crypto/intel/iaa/iaa_crypto_stats.c b/drivers/crypto/intel/iaa/iaa_crypto_stats.c index 0279edc6194e..2e3b7b73af20 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_stats.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_stats.c @@ -109,7 +109,6 @@ void update_max_adecomp_delay_ns(u64 start_time_ns) time_diff = ktime_get_ns() - start_time_ns; if (time_diff > max_adecomp_delay_ns) - max_adecomp_delay_ns = time_diff; } -- Gitee From d3a2c4ea2b7c131f76b17b3ba3e85de806a1afdf Mon Sep 17 00:00:00 2001 From: Jiapeng Chong Date: Tue, 19 Dec 2023 14:15:20 +0800 Subject: [PATCH 0970/2138] crypto: iaa - remove unneeded semicolon ANBZ: #9252 commit 38f56101b8733b6d334c836c119c79d49b958a3f upstream. No functional modification involved. ./drivers/crypto/intel/iaa/iaa_crypto_main.c:979:2-3: Unneeded semicolon. Intel-SIG: commit 38f56101b873 crypto: iaa - remove unneeded semicolon. Backporting patches for Intel IAA crypto driver on Intel Xeon platform. Reported-by: Abaci Robot Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=7772 Signed-off-by: Jiapeng Chong Acked-by: Tom Zanussi Signed-off-by: Herbert Xu [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- drivers/crypto/intel/iaa/iaa_crypto_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index 5093361b0107..abef4b5d5e7e 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -976,7 +976,7 @@ static int wq_table_add_wqs(int iaa, int cpu) pr_debug("rebalance: added wq for cpu=%d: iaa wq %d.%d\n", cpu, iaa_wq->wq->idxd->id, iaa_wq->wq->id); n_wqs_added++; - }; + } if (!n_wqs_added) { pr_debug("couldn't find any iaa wqs!\n"); -- Gitee From 13b9a202624afad6bb64526d482ec8c16a086d88 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Wed, 27 Dec 2023 14:28:32 -0600 Subject: [PATCH 0971/2138] crypto: iaa - Account for cpu-less numa nodes ANBZ: #9252 commit b8910630c967ffee582289451ddb5f9f19c26872 upstream. In some configurations e.g. systems with CXL, a numa node can have 0 cpus and cpumask_nth() will return a cpu value that doesn't exist, which will result in an attempt to add an entry to the wq table at a bad index. To fix this, when iterating the cpus for a node, skip any node that doesn't have cpus. Also, as a precaution, add a warning and bail if cpumask_nth() returns a nonexistent cpu. Intel-SIG: commit b8910630c967 crypto: iaa - Account for cpu-less numa nodes. Backporting patches for Intel IAA crypto driver on Intel Xeon platform. Reported-by: Zhang, Rex Signed-off-by: Tom Zanussi Signed-off-by: Herbert Xu [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- drivers/crypto/intel/iaa/iaa_crypto_main.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index abef4b5d5e7e..dfd3baf0a8d8 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -1017,12 +1017,17 @@ static void rebalance_wq_table(void) return; } - for_each_online_node(node) { + for_each_node_with_cpus(node) { node_cpus = cpumask_of_node(node); for (cpu = 0; cpu < nr_cpus_per_node; cpu++) { int node_cpu = cpumask_nth(cpu, node_cpus); + if (WARN_ON(node_cpu >= nr_cpu_ids)) { + pr_debug("node_cpu %d doesn't exist!\n", node_cpu); + return; + } + if ((cpu % cpus_per_iaa) == 0) iaa++; @@ -2095,9 +2100,15 @@ static struct idxd_device_driver iaa_crypto_driver = { static int __init iaa_crypto_init_module(void) { int ret = 0; + int node; nr_cpus = num_online_cpus(); - nr_nodes = num_online_nodes(); + for_each_node_with_cpus(node) + nr_nodes++; + if (!nr_nodes) { + pr_err("IAA couldn't find any nodes with cpus\n"); + return -ENODEV; + } nr_cpus_per_node = nr_cpus / nr_nodes; if (crypto_has_comp("deflate-generic", 0, 0)) -- Gitee From b5256d81477887520e49136a539ab7d6bd3e9c4b Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Tue, 19 Dec 2023 20:33:50 +0100 Subject: [PATCH 0972/2138] dmaengine: idxd: Remove usage of the deprecated ida_simple_xx() API ANBZ: #9252 commit 1075ee66a8c19bfa375b19c236fd6a22a867f138 upstream. ida_alloc() and ida_free() should be preferred to the deprecated ida_simple_get() and ida_simple_remove(). This is less verbose. Note that the upper limit of ida_simple_get() is exclusive, but the one of ida_alloc_range() is inclusive. Sothis change allows one more device. MINORMASK is ((1U << MINORBITS) - 1), so allowing MINORMASK as a maximum value makes sense. It is also consistent with other "ida_.*MINORMASK" and "ida_*MINOR()" usages. Intel-SIG: commit 1075ee66a8c1 dmaengine: idxd: Remove usage of the deprecated ida_simple_xx() API. Incremental backporting patches for DSA/IAA on Intel Xeon platform. Signed-off-by: Christophe JAILLET Reviewed-by: Fenghua Yu Acked-by: Lijun Pan Link: https://lore.kernel.org/r/ac991f5f42112fa782a881d391d447529cbc4a23.1702967302.git.christophe.jaillet@wanadoo.fr Signed-off-by: Vinod Koul [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- drivers/dma/idxd/cdev.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index 9be5de8f51a1..73407e577527 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -165,7 +165,7 @@ static void idxd_cdev_dev_release(struct device *dev) struct idxd_wq *wq = idxd_cdev->wq; cdev_ctx = &ictx[wq->idxd->data->type]; - ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor); + ida_free(&cdev_ctx->minor_ida, idxd_cdev->minor); kfree(idxd_cdev); } @@ -539,7 +539,7 @@ int idxd_wq_add_cdev(struct idxd_wq *wq) cdev = &idxd_cdev->cdev; dev = cdev_dev(idxd_cdev); cdev_ctx = &ictx[wq->idxd->data->type]; - minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL); + minor = ida_alloc_max(&cdev_ctx->minor_ida, MINORMASK, GFP_KERNEL); if (minor < 0) { kfree(idxd_cdev); return minor; -- Gitee From c8e8eff881ceb7606df707d357311a4d05338d0f Mon Sep 17 00:00:00 2001 From: "Ricardo B. Marliere" Date: Tue, 13 Feb 2024 11:43:15 -0300 Subject: [PATCH 0973/2138] dmaengine: idxd: make dsa_bus_type const ANBZ: #9252 commit cf497f3585f944ff42cc2c84453569f37a1bd6b9 upstream. Since commit d492cc2573a0 ("driver core: device.h: make struct bus_type a const *"), the driver core can properly handle constant struct bus_type, move the dsa_bus_type variable to be a constant structure as well, placing it into read-only memory which can not be modified at runtime. Intel-SIG: commit cf497f3585f9 dmaengine: idxd: make dsa_bus_type const. Incremental backporting patches for DSA/IAA on Intel Xeon platform. Cc: Greg Kroah-Hartman Suggested-by: Greg Kroah-Hartman Signed-off-by: "Ricardo B. Marliere" Reviewed-by: Dave Jiang Reviewed-by: Fenghua Yu Reviewed-by: Greg Kroah-Hartman Link: https://lore.kernel.org/r/20240213-bus_cleanup-idxd-v1-1-c3e703675387@marliere.net Signed-off-by: Vinod Koul [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- drivers/dma/idxd/bus.c | 2 +- drivers/dma/idxd/idxd.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/idxd/bus.c b/drivers/dma/idxd/bus.c index 0c9e689a2e77..b83b27e04f2a 100644 --- a/drivers/dma/idxd/bus.c +++ b/drivers/dma/idxd/bus.c @@ -72,7 +72,7 @@ static int idxd_bus_uevent(const struct device *dev, struct kobj_uevent_env *env return add_uevent_var(env, "MODALIAS=" IDXD_DEVICES_MODALIAS_FMT, 0); } -struct bus_type dsa_bus_type = { +const struct bus_type dsa_bus_type = { .name = "dsa", .match = idxd_config_bus_match, .probe = idxd_config_bus_probe, diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index eb73cabb4ad0..4a35abad006c 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -518,7 +518,7 @@ static inline void idxd_set_user_intr(struct idxd_device *idxd, bool enable) iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET); } -extern struct bus_type dsa_bus_type; +extern const struct bus_type dsa_bus_type; extern bool support_enqcmd; extern struct ida idxd_ida; -- Gitee From e0dbded62aedfa70867dfa153e458f7e2d6c63a5 Mon Sep 17 00:00:00 2001 From: "Ricardo B. Marliere" Date: Mon, 19 Feb 2024 08:46:56 -0300 Subject: [PATCH 0974/2138] dmaengine: idxd: constify the struct device_type usage ANBZ: #9252 commit 1e0a2852a134833f6827de15cd62ea0ed19f1b60 upstream. Since commit aed65af1cc2f ("drivers: make device_type const"), the driver core can properly handle constant struct device_type. Move the dsa_device_type, iax_device_type, idxd_wq_device_type, idxd_cdev_file_type, idxd_cdev_device_type and idxd_group_device_type variables to be constant structures as well, placing it into read-only memory which can not be modified at runtime. Intel-SIG: commit 1e0a2852a134 dmaengine: idxd: constify the struct device_type usage. Incremental backporting patches for DSA/IAA on Intel Xeon platform. Cc: Greg Kroah-Hartman Signed-off-by: "Ricardo B. Marliere" Reviewed-by: Fenghua Yu Link: https://lore.kernel.org/r/20240219-device_cleanup-dmaengine-v1-1-9f72f3cf3587@marliere.net Signed-off-by: Vinod Koul [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- drivers/dma/idxd/cdev.c | 4 ++-- drivers/dma/idxd/idxd.h | 12 ++++++------ drivers/dma/idxd/sysfs.c | 10 +++++----- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index 73407e577527..fd9bbee4cc42 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -152,7 +152,7 @@ static void idxd_file_dev_release(struct device *dev) mutex_unlock(&wq->wq_lock); } -static struct device_type idxd_cdev_file_type = { +static const struct device_type idxd_cdev_file_type = { .name = "idxd_file", .release = idxd_file_dev_release, .groups = cdev_file_attribute_groups, @@ -169,7 +169,7 @@ static void idxd_cdev_dev_release(struct device *dev) kfree(idxd_cdev); } -static struct device_type idxd_cdev_device_type = { +static const struct device_type idxd_cdev_device_type = { .name = "idxd_cdev", .release = idxd_cdev_dev_release, }; diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index 4a35abad006c..868b724a3b75 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -282,7 +282,7 @@ typedef int (*load_device_defaults_fn_t) (struct idxd_device *idxd); struct idxd_driver_data { const char *name_prefix; enum idxd_type type; - struct device_type *dev_type; + const struct device_type *dev_type; int compl_size; int align; int evl_cr_off; @@ -522,11 +522,11 @@ extern const struct bus_type dsa_bus_type; extern bool support_enqcmd; extern struct ida idxd_ida; -extern struct device_type dsa_device_type; -extern struct device_type iax_device_type; -extern struct device_type idxd_wq_device_type; -extern struct device_type idxd_engine_device_type; -extern struct device_type idxd_group_device_type; +extern const struct device_type dsa_device_type; +extern const struct device_type iax_device_type; +extern const struct device_type idxd_wq_device_type; +extern const struct device_type idxd_engine_device_type; +extern const struct device_type idxd_group_device_type; static inline bool is_dsa_dev(struct idxd_dev *idxd_dev) { diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index 3a5ce477a81a..f706eae0e76b 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -91,7 +91,7 @@ static void idxd_conf_engine_release(struct device *dev) kfree(engine); } -struct device_type idxd_engine_device_type = { +const struct device_type idxd_engine_device_type = { .name = "engine", .release = idxd_conf_engine_release, .groups = idxd_engine_attribute_groups, @@ -577,7 +577,7 @@ static void idxd_conf_group_release(struct device *dev) kfree(group); } -struct device_type idxd_group_device_type = { +const struct device_type idxd_group_device_type = { .name = "group", .release = idxd_conf_group_release, .groups = idxd_group_attribute_groups, @@ -1392,7 +1392,7 @@ static void idxd_conf_wq_release(struct device *dev) kfree(wq); } -struct device_type idxd_wq_device_type = { +const struct device_type idxd_wq_device_type = { .name = "wq", .release = idxd_conf_wq_release, .groups = idxd_wq_attribute_groups, @@ -1821,13 +1821,13 @@ static void idxd_conf_device_release(struct device *dev) kfree(idxd); } -struct device_type dsa_device_type = { +const struct device_type dsa_device_type = { .name = "dsa", .release = idxd_conf_device_release, .groups = idxd_attribute_groups, }; -struct device_type iax_device_type = { +const struct device_type iax_device_type = { .name = "iax", .release = idxd_conf_device_release, .groups = idxd_attribute_groups, -- Gitee From 32d7dcd432b5f44835e195c646e2465189fc495c Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Mon, 8 Jan 2024 16:53:48 -0600 Subject: [PATCH 0975/2138] crypto: iaa - Remove header table code ANBZ: #9252 commit 3274819b3c81c18c01e3c0e0ea726870ec237ac0 upstream. The header table and related code is currently unused - it was included and used for canned mode, but canned mode has been removed, so this code can be safely removed as well. This indirectly fixes a bug reported by Dan Carpenter. Intel-SIG: commit 3274819b3c81 crypto: iaa - Remove header table code. Backporting patches for Intel IAA crypto driver on Intel Xeon platform. Reported-by: Dan Carpenter Closes: https://lore.kernel.org/linux-crypto/b2e0bd974981291e16882686a2b9b1db3986abe4.camel@linux.intel.com/T/#m4403253d6a4347a925fab4fc1cdb4ef7c095fb86 Signed-off-by: Tom Zanussi Reviewed-by: Dave Jiang Signed-off-by: Herbert Xu [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- drivers/crypto/intel/iaa/iaa_crypto.h | 25 ---- .../crypto/intel/iaa/iaa_crypto_comp_fixed.c | 1 - drivers/crypto/intel/iaa/iaa_crypto_main.c | 108 +----------------- 3 files changed, 3 insertions(+), 131 deletions(-) diff --git a/drivers/crypto/intel/iaa/iaa_crypto.h b/drivers/crypto/intel/iaa/iaa_crypto.h index 014420f7beb0..2524091a5f70 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto.h +++ b/drivers/crypto/intel/iaa/iaa_crypto.h @@ -59,10 +59,8 @@ struct iaa_device_compression_mode { const char *name; struct aecs_comp_table_record *aecs_comp_table; - struct aecs_decomp_table_record *aecs_decomp_table; dma_addr_t aecs_comp_table_dma_addr; - dma_addr_t aecs_decomp_table_dma_addr; }; /* Representation of IAA device with wqs, populated by probe */ @@ -107,23 +105,6 @@ struct aecs_comp_table_record { u32 reserved_padding[2]; } __packed; -/* AECS for decompress */ -struct aecs_decomp_table_record { - u32 crc; - u32 xor_checksum; - u32 low_filter_param; - u32 high_filter_param; - u32 output_mod_idx; - u32 drop_init_decomp_out_bytes; - u32 reserved[36]; - u32 output_accum_data[2]; - u32 out_bits_valid; - u32 bit_off_indexing; - u32 input_accum_data[64]; - u8 size_qw[32]; - u32 decomp_state[1220]; -} __packed; - int iaa_aecs_init_fixed(void); void iaa_aecs_cleanup_fixed(void); @@ -136,9 +117,6 @@ struct iaa_compression_mode { int ll_table_size; u32 *d_table; int d_table_size; - u32 *header_table; - int header_table_size; - u16 gen_decomp_table_flags; iaa_dev_comp_init_fn_t init; iaa_dev_comp_free_fn_t free; }; @@ -148,9 +126,6 @@ int add_iaa_compression_mode(const char *name, int ll_table_size, const u32 *d_table, int d_table_size, - const u8 *header_table, - int header_table_size, - u16 gen_decomp_table_flags, iaa_dev_comp_init_fn_t init, iaa_dev_comp_free_fn_t free); diff --git a/drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c b/drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c index 45cf5d74f0fb..19d9a333ac49 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c @@ -78,7 +78,6 @@ int iaa_aecs_init_fixed(void) sizeof(fixed_ll_sym), fixed_d_sym, sizeof(fixed_d_sym), - NULL, 0, 0, init_fixed_mode, NULL); if (!ret) pr_debug("IAA fixed compression mode initialized\n"); diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index dfd3baf0a8d8..39a5fc905c4d 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -258,16 +258,14 @@ static void free_iaa_compression_mode(struct iaa_compression_mode *mode) kfree(mode->name); kfree(mode->ll_table); kfree(mode->d_table); - kfree(mode->header_table); kfree(mode); } /* - * IAA Compression modes are defined by an ll_table, a d_table, and an - * optional header_table. These tables are typically generated and - * captured using statistics collected from running actual - * compress/decompress workloads. + * IAA Compression modes are defined by an ll_table and a d_table. + * These tables are typically generated and captured using statistics + * collected from running actual compress/decompress workloads. * * A module or other kernel code can add and remove compression modes * with a given name using the exported @add_iaa_compression_mode() @@ -315,9 +313,6 @@ EXPORT_SYMBOL_GPL(remove_iaa_compression_mode); * @ll_table_size: The ll table size in bytes * @d_table: The d table * @d_table_size: The d table size in bytes - * @header_table: Optional header table - * @header_table_size: Optional header table size in bytes - * @gen_decomp_table_flags: Otional flags used to generate the decomp table * @init: Optional callback function to init the compression mode data * @free: Optional callback function to free the compression mode data * @@ -330,9 +325,6 @@ int add_iaa_compression_mode(const char *name, int ll_table_size, const u32 *d_table, int d_table_size, - const u8 *header_table, - int header_table_size, - u16 gen_decomp_table_flags, iaa_dev_comp_init_fn_t init, iaa_dev_comp_free_fn_t free) { @@ -370,16 +362,6 @@ int add_iaa_compression_mode(const char *name, mode->d_table_size = d_table_size; } - if (header_table) { - mode->header_table = kzalloc(header_table_size, GFP_KERNEL); - if (!mode->header_table) - goto free; - memcpy(mode->header_table, header_table, header_table_size); - mode->header_table_size = header_table_size; - } - - mode->gen_decomp_table_flags = gen_decomp_table_flags; - mode->init = init; mode->free = free; @@ -420,10 +402,6 @@ static void free_device_compression_mode(struct iaa_device *iaa_device, if (device_mode->aecs_comp_table) dma_free_coherent(dev, size, device_mode->aecs_comp_table, device_mode->aecs_comp_table_dma_addr); - if (device_mode->aecs_decomp_table) - dma_free_coherent(dev, size, device_mode->aecs_decomp_table, - device_mode->aecs_decomp_table_dma_addr); - kfree(device_mode); } @@ -440,73 +418,6 @@ static int check_completion(struct device *dev, bool compress, bool only_once); -static int decompress_header(struct iaa_device_compression_mode *device_mode, - struct iaa_compression_mode *mode, - struct idxd_wq *wq) -{ - dma_addr_t src_addr, src2_addr; - struct idxd_desc *idxd_desc; - struct iax_hw_desc *desc; - struct device *dev; - int ret = 0; - - idxd_desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK); - if (IS_ERR(idxd_desc)) - return PTR_ERR(idxd_desc); - - desc = idxd_desc->iax_hw; - - dev = &wq->idxd->pdev->dev; - - src_addr = dma_map_single(dev, (void *)mode->header_table, - mode->header_table_size, DMA_TO_DEVICE); - dev_dbg(dev, "%s: mode->name %s, src_addr %llx, dev %p, src %p, slen %d\n", - __func__, mode->name, src_addr, dev, - mode->header_table, mode->header_table_size); - if (unlikely(dma_mapping_error(dev, src_addr))) { - dev_dbg(dev, "dma_map_single err, exiting\n"); - ret = -ENOMEM; - return ret; - } - - desc->flags = IAX_AECS_GEN_FLAG; - desc->opcode = IAX_OPCODE_DECOMPRESS; - - desc->src1_addr = (u64)src_addr; - desc->src1_size = mode->header_table_size; - - src2_addr = device_mode->aecs_decomp_table_dma_addr; - desc->src2_addr = (u64)src2_addr; - desc->src2_size = 1088; - dev_dbg(dev, "%s: mode->name %s, src2_addr %llx, dev %p, src2_size %d\n", - __func__, mode->name, desc->src2_addr, dev, desc->src2_size); - desc->max_dst_size = 0; // suppressed output - - desc->decompr_flags = mode->gen_decomp_table_flags; - - desc->priv = 0; - - desc->completion_addr = idxd_desc->compl_dma; - - ret = idxd_submit_desc(wq, idxd_desc); - if (ret) { - pr_err("%s: submit_desc failed ret=0x%x\n", __func__, ret); - goto out; - } - - ret = check_completion(dev, idxd_desc->iax_completion, false, false); - if (ret) - dev_dbg(dev, "%s: mode->name %s check_completion failed ret=%d\n", - __func__, mode->name, ret); - else - dev_dbg(dev, "%s: mode->name %s succeeded\n", __func__, - mode->name); -out: - dma_unmap_single(dev, src_addr, 1088, DMA_TO_DEVICE); - - return ret; -} - static int init_device_compression_mode(struct iaa_device *iaa_device, struct iaa_compression_mode *mode, int idx, struct idxd_wq *wq) @@ -529,24 +440,11 @@ static int init_device_compression_mode(struct iaa_device *iaa_device, if (!device_mode->aecs_comp_table) goto free; - device_mode->aecs_decomp_table = dma_alloc_coherent(dev, size, - &device_mode->aecs_decomp_table_dma_addr, GFP_KERNEL); - if (!device_mode->aecs_decomp_table) - goto free; - /* Add Huffman table to aecs */ memset(device_mode->aecs_comp_table, 0, sizeof(*device_mode->aecs_comp_table)); memcpy(device_mode->aecs_comp_table->ll_sym, mode->ll_table, mode->ll_table_size); memcpy(device_mode->aecs_comp_table->d_sym, mode->d_table, mode->d_table_size); - if (mode->header_table) { - ret = decompress_header(device_mode, mode, wq); - if (ret) { - pr_debug("iaa header decompression failed: ret=%d\n", ret); - goto free; - } - } - if (mode->init) { ret = mode->init(device_mode); if (ret) -- Gitee From 96974aeaf54017b2df5a161b156a306cc7ee5784 Mon Sep 17 00:00:00 2001 From: Minjie Du Date: Tue, 9 Jan 2024 10:19:14 +0800 Subject: [PATCH 0976/2138] crypto: iaa - Remove unnecessary debugfs_create_dir() error check in iaa_crypto_debugfs_init() ANBZ: #9252 commit cc342dba0d39f226f4a5e26194404c3785481470 upstream. This patch removes the debugfs_create_dir() error checking in iaa_crypto_debugfs_init(). Because the debugfs_create_dir() is developed in a way that the caller can safely handle the errors that occur during the creation of DebugFS nodes. Intel-SIG: commit cc342dba0d39 crypto: iaa - Remove unnecessary debugfs_create_dir() error check in iaa_crypto_debugfs_init(). Backporting patches for Intel IAA crypto driver on Intel Xeon platform. Signed-off-by: Minjie Du Acked-by: Tom Zanussi Signed-off-by: Herbert Xu [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- drivers/crypto/intel/iaa/iaa_crypto_stats.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/crypto/intel/iaa/iaa_crypto_stats.c b/drivers/crypto/intel/iaa/iaa_crypto_stats.c index 2e3b7b73af20..cbf87d0effe3 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_stats.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_stats.c @@ -275,8 +275,6 @@ int __init iaa_crypto_debugfs_init(void) return -ENODEV; iaa_crypto_debugfs_root = debugfs_create_dir("iaa_crypto", NULL); - if (!iaa_crypto_debugfs_root) - return -ENOMEM; debugfs_create_u64("max_comp_delay_ns", 0644, iaa_crypto_debugfs_root, &max_comp_delay_ns); -- Gitee From d4e75921be3c3dfa873e7e3d24569fc247aaa89b Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Sun, 25 Feb 2024 14:11:33 -0600 Subject: [PATCH 0977/2138] crypto: iaa - Fix async_disable descriptor leak ANBZ: #9252 commit 262534ddc88dfea7474ed18adfecf856e4fbe054 upstream. The disable_async paths of iaa_compress/decompress() don't free idxd descriptors in the async_disable case. Currently this only happens in the testcases where req->dst is set to null. Add a test to free them in those paths. Intel-SIG: commit 262534ddc88d crypto: iaa - Fix async_disable descriptor leak. Backporting patches for Intel IAA crypto driver on Intel Xeon platform. Signed-off-by: Tom Zanussi Signed-off-by: Herbert Xu [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- drivers/crypto/intel/iaa/iaa_crypto_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index 39a5fc905c4d..85ee4c965ccf 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -1222,7 +1222,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, *compression_crc = idxd_desc->iax_completion->crc; - if (!ctx->async_mode) + if (!ctx->async_mode || disable_async) idxd_free_desc(wq, idxd_desc); out: return ret; @@ -1468,7 +1468,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req, *dlen = req->dlen; - if (!ctx->async_mode) + if (!ctx->async_mode || disable_async) idxd_free_desc(wq, idxd_desc); /* Update stats */ -- Gitee From 3e7bcf3258cf3d7ebe61023cae1d88732be2e3a9 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Sun, 25 Feb 2024 14:11:34 -0600 Subject: [PATCH 0978/2138] crypto: iaa - Fix comp/decomp delay statistics ANBZ: #9252 commit cdb083e73d632afcc5a931d31bb37445580f4bfb upstream. The comp/decomp delay statistics currently have no callers; somehow they were dropped during refactoring. There originally were also two sets, one for the async algorithm, the other for the synchronous version. Because the synchronous algorithm was dropped, one set should be removed. To keep it consistent with the rest of the stats, and since there's no ambiguity, remove the acomp/adecomp versions. Also add back the callers. Intel-SIG: commit cdb083e73d63 crypto: iaa - Fix comp/decomp delay statistics. Backporting patches for Intel IAA crypto driver on Intel Xeon platform. Reported-by: Rex Zhang Signed-off-by: Tom Zanussi Signed-off-by: Herbert Xu [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- drivers/crypto/intel/iaa/iaa_crypto_main.c | 9 +++++++ drivers/crypto/intel/iaa/iaa_crypto_stats.c | 28 --------------------- drivers/crypto/intel/iaa/iaa_crypto_stats.h | 8 +++--- 3 files changed, 13 insertions(+), 32 deletions(-) diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index 85ee4c965ccf..b54f93c64033 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -1494,6 +1494,7 @@ static int iaa_comp_acompress(struct acomp_req *req) u32 compression_crc; struct idxd_wq *wq; struct device *dev; + u64 start_time_ns; int order = -1; compression_ctx = crypto_tfm_ctx(tfm); @@ -1567,8 +1568,10 @@ static int iaa_comp_acompress(struct acomp_req *req) " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs, req->dst, req->dlen, sg_dma_len(req->dst)); + start_time_ns = iaa_get_ts(); ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr, &req->dlen, &compression_crc, disable_async); + update_max_comp_delay_ns(start_time_ns); if (ret == -EINPROGRESS) return ret; @@ -1615,6 +1618,7 @@ static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req) struct iaa_wq *iaa_wq; struct device *dev; struct idxd_wq *wq; + u64 start_time_ns; int order = -1; cpu = get_cpu(); @@ -1671,8 +1675,10 @@ static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req) dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p," " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs, req->dst, req->dlen, sg_dma_len(req->dst)); + start_time_ns = iaa_get_ts(); ret = iaa_decompress(tfm, req, wq, src_addr, req->slen, dst_addr, &req->dlen, true); + update_max_decomp_delay_ns(start_time_ns); if (ret == -EOVERFLOW) { dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); req->dlen *= 2; @@ -1703,6 +1709,7 @@ static int iaa_comp_adecompress(struct acomp_req *req) int nr_sgs, cpu, ret = 0; struct iaa_wq *iaa_wq; struct device *dev; + u64 start_time_ns; struct idxd_wq *wq; if (!iaa_crypto_enabled) { @@ -1762,8 +1769,10 @@ static int iaa_comp_adecompress(struct acomp_req *req) " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs, req->dst, req->dlen, sg_dma_len(req->dst)); + start_time_ns = iaa_get_ts(); ret = iaa_decompress(tfm, req, wq, src_addr, req->slen, dst_addr, &req->dlen, false); + update_max_decomp_delay_ns(start_time_ns); if (ret == -EINPROGRESS) return ret; diff --git a/drivers/crypto/intel/iaa/iaa_crypto_stats.c b/drivers/crypto/intel/iaa/iaa_crypto_stats.c index cbf87d0effe3..c9f83af4b307 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_stats.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_stats.c @@ -22,8 +22,6 @@ static u64 total_decomp_calls; static u64 total_sw_decomp_calls; static u64 max_comp_delay_ns; static u64 max_decomp_delay_ns; -static u64 max_acomp_delay_ns; -static u64 max_adecomp_delay_ns; static u64 total_comp_bytes_out; static u64 total_decomp_bytes_in; static u64 total_completion_einval_errors; @@ -92,26 +90,6 @@ void update_max_decomp_delay_ns(u64 start_time_ns) max_decomp_delay_ns = time_diff; } -void update_max_acomp_delay_ns(u64 start_time_ns) -{ - u64 time_diff; - - time_diff = ktime_get_ns() - start_time_ns; - - if (time_diff > max_acomp_delay_ns) - max_acomp_delay_ns = time_diff; -} - -void update_max_adecomp_delay_ns(u64 start_time_ns) -{ - u64 time_diff; - - time_diff = ktime_get_ns() - start_time_ns; - - if (time_diff > max_adecomp_delay_ns) - max_adecomp_delay_ns = time_diff; -} - void update_wq_comp_calls(struct idxd_wq *idxd_wq) { struct iaa_wq *wq = idxd_wq_get_private(idxd_wq); @@ -151,8 +129,6 @@ static void reset_iaa_crypto_stats(void) total_sw_decomp_calls = 0; max_comp_delay_ns = 0; max_decomp_delay_ns = 0; - max_acomp_delay_ns = 0; - max_adecomp_delay_ns = 0; total_comp_bytes_out = 0; total_decomp_bytes_in = 0; total_completion_einval_errors = 0; @@ -280,10 +256,6 @@ int __init iaa_crypto_debugfs_init(void) iaa_crypto_debugfs_root, &max_comp_delay_ns); debugfs_create_u64("max_decomp_delay_ns", 0644, iaa_crypto_debugfs_root, &max_decomp_delay_ns); - debugfs_create_u64("max_acomp_delay_ns", 0644, - iaa_crypto_debugfs_root, &max_comp_delay_ns); - debugfs_create_u64("max_adecomp_delay_ns", 0644, - iaa_crypto_debugfs_root, &max_decomp_delay_ns); debugfs_create_u64("total_comp_calls", 0644, iaa_crypto_debugfs_root, &total_comp_calls); debugfs_create_u64("total_decomp_calls", 0644, diff --git a/drivers/crypto/intel/iaa/iaa_crypto_stats.h b/drivers/crypto/intel/iaa/iaa_crypto_stats.h index c10b87b86fa4..c916ca83f070 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_stats.h +++ b/drivers/crypto/intel/iaa/iaa_crypto_stats.h @@ -15,8 +15,6 @@ void update_total_sw_decomp_calls(void); void update_total_decomp_bytes_in(int n); void update_max_comp_delay_ns(u64 start_time_ns); void update_max_decomp_delay_ns(u64 start_time_ns); -void update_max_acomp_delay_ns(u64 start_time_ns); -void update_max_adecomp_delay_ns(u64 start_time_ns); void update_completion_einval_errs(void); void update_completion_timeout_errs(void); void update_completion_comp_buf_overflow_errs(void); @@ -26,6 +24,8 @@ void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n); void update_wq_decomp_calls(struct idxd_wq *idxd_wq); void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n); +static inline u64 iaa_get_ts(void) { return ktime_get_ns(); } + #else static inline int iaa_crypto_debugfs_init(void) { return 0; } static inline void iaa_crypto_debugfs_cleanup(void) {} @@ -37,8 +37,6 @@ static inline void update_total_sw_decomp_calls(void) {} static inline void update_total_decomp_bytes_in(int n) {} static inline void update_max_comp_delay_ns(u64 start_time_ns) {} static inline void update_max_decomp_delay_ns(u64 start_time_ns) {} -static inline void update_max_acomp_delay_ns(u64 start_time_ns) {} -static inline void update_max_adecomp_delay_ns(u64 start_time_ns) {} static inline void update_completion_einval_errs(void) {} static inline void update_completion_timeout_errs(void) {} static inline void update_completion_comp_buf_overflow_errs(void) {} @@ -48,6 +46,8 @@ static inline void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n) {} static inline void update_wq_decomp_calls(struct idxd_wq *idxd_wq) {} static inline void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n) {} +static inline u64 iaa_get_ts(void) { return 0; } + #endif // CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS #endif -- Gitee From 10db2d876d9e8066f91f11bd13a36196dc4dd1df Mon Sep 17 00:00:00 2001 From: Barry Song Date: Thu, 29 Feb 2024 23:14:49 +1300 Subject: [PATCH 0979/2138] crypto: iaa - fix the missing CRYPTO_ALG_ASYNC in cra_flags ANBZ: #9252 commit 30dd94dba350043a32cfe9cb478ed621aae3c5c9 upstream. Add the missing CRYPTO_ALG_ASYNC flag since intel iaa driver works asynchronously. Intel-SIG: commit 30dd94dba350 crypto: iaa - fix the missing CRYPTO_ALG_ASYNC in cra_flags. Backporting patches for Intel IAA crypto driver on Intel Xeon platform. Signed-off-by: Barry Song Acked-by: Tom Zanussi Signed-off-by: Herbert Xu [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- drivers/crypto/intel/iaa/iaa_crypto_main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index b54f93c64033..1cd304de5388 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -1823,6 +1823,7 @@ static struct acomp_alg iaa_acomp_fixed_deflate = { .base = { .cra_name = "deflate", .cra_driver_name = "deflate-iaa", + .cra_flags = CRYPTO_ALG_ASYNC, .cra_ctxsize = sizeof(struct iaa_compression_ctx), .cra_module = THIS_MODULE, .cra_priority = IAA_ALG_PRIORITY, -- Gitee From 458c7b012ef5631ba1503fbd80c1e6e18e44be1d Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Thu, 21 Mar 2024 16:08:45 -0500 Subject: [PATCH 0980/2138] crypto: iaa - Fix nr_cpus < nr_iaa case ANBZ: #9252 commit 5a7e89d3315d1be86aff8a8bf849023cda6547f7 upstream. If nr_cpus < nr_iaa, the calculated cpus_per_iaa will be 0, which causes a divide-by-0 in rebalance_wq_table(). Make sure cpus_per_iaa is 1 in that case, and also in the nr_iaa == 0 case, even though cpus_per_iaa is never used if nr_iaa == 0, for paranoia. Intel-SIG: commit 5a7e89d3315d crypto: iaa - Fix nr_cpus < nr_iaa case. Backporting patches for Intel IAA crypto driver on Intel Xeon platform. Cc: # v6.8+ Reported-by: Jerry Snitselaar Signed-off-by: Tom Zanussi Signed-off-by: Herbert Xu [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- drivers/crypto/intel/iaa/iaa_crypto_main.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index 1cd304de5388..b2191ade9011 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -806,6 +806,8 @@ static int save_iaa_wq(struct idxd_wq *wq) return -EINVAL; cpus_per_iaa = (nr_nodes * nr_cpus_per_node) / nr_iaa; + if (!cpus_per_iaa) + cpus_per_iaa = 1; out: return 0; } @@ -821,10 +823,12 @@ static void remove_iaa_wq(struct idxd_wq *wq) } } - if (nr_iaa) + if (nr_iaa) { cpus_per_iaa = (nr_nodes * nr_cpus_per_node) / nr_iaa; - else - cpus_per_iaa = 0; + if (!cpus_per_iaa) + cpus_per_iaa = 1; + } else + cpus_per_iaa = 1; } static int wq_table_add_wqs(int iaa, int cpu) -- Gitee From 4653ba43d9e86a43a5558f5b189f11781124e880 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Mon, 4 Mar 2024 15:20:08 -0600 Subject: [PATCH 0981/2138] crypto: iaa - fix decomp_bytes_in stats ANBZ: #9252 commit 19b0ed5ddc8b554d1dfc34f870dc56e706ed205a upstream. Decomp stats should use slen, not dlen. Change both the global and per-wq stats to use the correct value. Intel-SIG: commit 19b0ed5ddc8b crypto: iaa - fix decomp_bytes_in stats. Backporting patches for Intel IAA crypto driver on Intel Xeon platform. Signed-off-by: Tom Zanussi Signed-off-by: Herbert Xu [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- drivers/crypto/intel/iaa/iaa_crypto_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index b2191ade9011..cc4200579e54 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -1079,8 +1079,8 @@ static void iaa_desc_complete(struct idxd_desc *idxd_desc, update_total_comp_bytes_out(ctx->req->dlen); update_wq_comp_bytes(iaa_wq->wq, ctx->req->dlen); } else { - update_total_decomp_bytes_in(ctx->req->dlen); - update_wq_decomp_bytes(iaa_wq->wq, ctx->req->dlen); + update_total_decomp_bytes_in(ctx->req->slen); + update_wq_decomp_bytes(iaa_wq->wq, ctx->req->slen); } if (ctx->compress && compression_ctx->verify_compress) { -- Gitee From fd3322761811c3585237933116fa46a23d227b47 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Mon, 4 Mar 2024 15:20:09 -0600 Subject: [PATCH 0982/2138] crypto: iaa - Remove comp/decomp delay statistics ANBZ: #9252 commit 956cb8a37039306379a1a926ccb1b55e08ffae80 upstream. As part of the simplification/cleanup of the iaa statistics, remove the comp/decomp delay statistics. They're actually not really useful and can be/are being more flexibly generated using standard kernel tracing infrastructure. Intel-SIG: commit 956cb8a37039 crypto: iaa - Remove comp/decomp delay statistics. Backporting patches for Intel IAA crypto driver on Intel Xeon platform. Signed-off-by: Tom Zanussi Signed-off-by: Herbert Xu [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- drivers/crypto/intel/iaa/iaa_crypto_main.c | 9 ------- drivers/crypto/intel/iaa/iaa_crypto_stats.c | 28 --------------------- drivers/crypto/intel/iaa/iaa_crypto_stats.h | 8 ------ 3 files changed, 45 deletions(-) diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index cc4200579e54..6229b24b0d35 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -1498,7 +1498,6 @@ static int iaa_comp_acompress(struct acomp_req *req) u32 compression_crc; struct idxd_wq *wq; struct device *dev; - u64 start_time_ns; int order = -1; compression_ctx = crypto_tfm_ctx(tfm); @@ -1572,10 +1571,8 @@ static int iaa_comp_acompress(struct acomp_req *req) " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs, req->dst, req->dlen, sg_dma_len(req->dst)); - start_time_ns = iaa_get_ts(); ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr, &req->dlen, &compression_crc, disable_async); - update_max_comp_delay_ns(start_time_ns); if (ret == -EINPROGRESS) return ret; @@ -1622,7 +1619,6 @@ static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req) struct iaa_wq *iaa_wq; struct device *dev; struct idxd_wq *wq; - u64 start_time_ns; int order = -1; cpu = get_cpu(); @@ -1679,10 +1675,8 @@ static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req) dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p," " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs, req->dst, req->dlen, sg_dma_len(req->dst)); - start_time_ns = iaa_get_ts(); ret = iaa_decompress(tfm, req, wq, src_addr, req->slen, dst_addr, &req->dlen, true); - update_max_decomp_delay_ns(start_time_ns); if (ret == -EOVERFLOW) { dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); req->dlen *= 2; @@ -1713,7 +1707,6 @@ static int iaa_comp_adecompress(struct acomp_req *req) int nr_sgs, cpu, ret = 0; struct iaa_wq *iaa_wq; struct device *dev; - u64 start_time_ns; struct idxd_wq *wq; if (!iaa_crypto_enabled) { @@ -1773,10 +1766,8 @@ static int iaa_comp_adecompress(struct acomp_req *req) " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs, req->dst, req->dlen, sg_dma_len(req->dst)); - start_time_ns = iaa_get_ts(); ret = iaa_decompress(tfm, req, wq, src_addr, req->slen, dst_addr, &req->dlen, false); - update_max_decomp_delay_ns(start_time_ns); if (ret == -EINPROGRESS) return ret; diff --git a/drivers/crypto/intel/iaa/iaa_crypto_stats.c b/drivers/crypto/intel/iaa/iaa_crypto_stats.c index c9f83af4b307..7820062a91e5 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_stats.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_stats.c @@ -20,8 +20,6 @@ static u64 total_comp_calls; static u64 total_decomp_calls; static u64 total_sw_decomp_calls; -static u64 max_comp_delay_ns; -static u64 max_decomp_delay_ns; static u64 total_comp_bytes_out; static u64 total_decomp_bytes_in; static u64 total_completion_einval_errors; @@ -70,26 +68,6 @@ void update_completion_comp_buf_overflow_errs(void) total_completion_comp_buf_overflow_errors++; } -void update_max_comp_delay_ns(u64 start_time_ns) -{ - u64 time_diff; - - time_diff = ktime_get_ns() - start_time_ns; - - if (time_diff > max_comp_delay_ns) - max_comp_delay_ns = time_diff; -} - -void update_max_decomp_delay_ns(u64 start_time_ns) -{ - u64 time_diff; - - time_diff = ktime_get_ns() - start_time_ns; - - if (time_diff > max_decomp_delay_ns) - max_decomp_delay_ns = time_diff; -} - void update_wq_comp_calls(struct idxd_wq *idxd_wq) { struct iaa_wq *wq = idxd_wq_get_private(idxd_wq); @@ -127,8 +105,6 @@ static void reset_iaa_crypto_stats(void) total_comp_calls = 0; total_decomp_calls = 0; total_sw_decomp_calls = 0; - max_comp_delay_ns = 0; - max_decomp_delay_ns = 0; total_comp_bytes_out = 0; total_decomp_bytes_in = 0; total_completion_einval_errors = 0; @@ -252,10 +228,6 @@ int __init iaa_crypto_debugfs_init(void) iaa_crypto_debugfs_root = debugfs_create_dir("iaa_crypto", NULL); - debugfs_create_u64("max_comp_delay_ns", 0644, - iaa_crypto_debugfs_root, &max_comp_delay_ns); - debugfs_create_u64("max_decomp_delay_ns", 0644, - iaa_crypto_debugfs_root, &max_decomp_delay_ns); debugfs_create_u64("total_comp_calls", 0644, iaa_crypto_debugfs_root, &total_comp_calls); debugfs_create_u64("total_decomp_calls", 0644, diff --git a/drivers/crypto/intel/iaa/iaa_crypto_stats.h b/drivers/crypto/intel/iaa/iaa_crypto_stats.h index c916ca83f070..3787a5f507eb 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_stats.h +++ b/drivers/crypto/intel/iaa/iaa_crypto_stats.h @@ -13,8 +13,6 @@ void update_total_comp_bytes_out(int n); void update_total_decomp_calls(void); void update_total_sw_decomp_calls(void); void update_total_decomp_bytes_in(int n); -void update_max_comp_delay_ns(u64 start_time_ns); -void update_max_decomp_delay_ns(u64 start_time_ns); void update_completion_einval_errs(void); void update_completion_timeout_errs(void); void update_completion_comp_buf_overflow_errs(void); @@ -24,8 +22,6 @@ void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n); void update_wq_decomp_calls(struct idxd_wq *idxd_wq); void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n); -static inline u64 iaa_get_ts(void) { return ktime_get_ns(); } - #else static inline int iaa_crypto_debugfs_init(void) { return 0; } static inline void iaa_crypto_debugfs_cleanup(void) {} @@ -35,8 +31,6 @@ static inline void update_total_comp_bytes_out(int n) {} static inline void update_total_decomp_calls(void) {} static inline void update_total_sw_decomp_calls(void) {} static inline void update_total_decomp_bytes_in(int n) {} -static inline void update_max_comp_delay_ns(u64 start_time_ns) {} -static inline void update_max_decomp_delay_ns(u64 start_time_ns) {} static inline void update_completion_einval_errs(void) {} static inline void update_completion_timeout_errs(void) {} static inline void update_completion_comp_buf_overflow_errs(void) {} @@ -46,8 +40,6 @@ static inline void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n) {} static inline void update_wq_decomp_calls(struct idxd_wq *idxd_wq) {} static inline void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n) {} -static inline u64 iaa_get_ts(void) { return 0; } - #endif // CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS #endif -- Gitee From f68817ff3eb1ecbc252706c85c6b3aeef4379694 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Mon, 4 Mar 2024 15:20:10 -0600 Subject: [PATCH 0983/2138] crypto: iaa - Add global_stats file and remove individual stat files ANBZ: #9252 commit c21fb22df63d51bb26d34023b0d9651a15442eb6 upstream. Currently, the wq_stats output also includes the global stats, while the individual global stats are also available as separate debugfs files. Since these are all read-only, there's really no reason to have them as separate files, especially since we already display them as global stats in the wq_stats. It makes more sense to just add a separate global_stats file to display those, and remove them from the wq_stats, as well as removing the individual stats files. Intel-SIG: commit c21fb22df63d crypto: iaa - Add global_stats file and remove individual stat files. Backporting patches for Intel IAA crypto driver on Intel Xeon platform. Signed-off-by: Tom Zanussi Signed-off-by: Herbert Xu [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- .../driver-api/crypto/iaa/iaa-crypto.rst | 76 +++++++++++-------- drivers/crypto/intel/iaa/iaa_crypto_stats.c | 30 ++++---- 2 files changed, 61 insertions(+), 45 deletions(-) diff --git a/Documentation/driver-api/crypto/iaa/iaa-crypto.rst b/Documentation/driver-api/crypto/iaa/iaa-crypto.rst index de587cf9cbed..7b28aef39ba0 100644 --- a/Documentation/driver-api/crypto/iaa/iaa-crypto.rst +++ b/Documentation/driver-api/crypto/iaa/iaa-crypto.rst @@ -321,33 +321,30 @@ driver will generate statistics which can be accessed in debugfs at:: # ls -al /sys/kernel/debug/iaa-crypto/ total 0 - drwxr-xr-x 2 root root 0 Mar 3 09:35 . - drwx------ 47 root root 0 Mar 3 09:35 .. - -rw-r--r-- 1 root root 0 Mar 3 09:35 max_acomp_delay_ns - -rw-r--r-- 1 root root 0 Mar 3 09:35 max_adecomp_delay_ns - -rw-r--r-- 1 root root 0 Mar 3 09:35 max_comp_delay_ns - -rw-r--r-- 1 root root 0 Mar 3 09:35 max_decomp_delay_ns - -rw-r--r-- 1 root root 0 Mar 3 09:35 stats_reset - -rw-r--r-- 1 root root 0 Mar 3 09:35 total_comp_bytes_out - -rw-r--r-- 1 root root 0 Mar 3 09:35 total_comp_calls - -rw-r--r-- 1 root root 0 Mar 3 09:35 total_decomp_bytes_in - -rw-r--r-- 1 root root 0 Mar 3 09:35 total_decomp_calls - -rw-r--r-- 1 root root 0 Mar 3 09:35 wq_stats - -Most of the above statisticss are self-explanatory. The wq_stats file -shows per-wq stats, a set for each iaa device and wq in addition to -some global stats:: + drwxr-xr-x 2 root root 0 Mar 3 07:55 . + drwx------ 53 root root 0 Mar 3 07:55 .. + -rw-r--r-- 1 root root 0 Mar 3 07:55 global_stats + -rw-r--r-- 1 root root 0 Mar 3 07:55 stats_reset + -rw-r--r-- 1 root root 0 Mar 3 07:55 wq_stats - # cat wq_stats +The global_stats file shows a set of global statistics collected since +the driver has been loaded or reset:: + + # cat global_stats global stats: - total_comp_calls: 100 - total_decomp_calls: 100 - total_comp_bytes_out: 22800 - total_decomp_bytes_in: 22800 + total_comp_calls: 4300 + total_decomp_calls: 4164 + total_sw_decomp_calls: 0 + total_comp_bytes_out: 5993989 + total_decomp_bytes_in: 5993989 total_completion_einval_errors: 0 total_completion_timeout_errors: 0 - total_completion_comp_buf_overflow_errors: 0 + total_completion_comp_buf_overflow_errors: 136 + +The wq_stats file shows per-wq stats, a set for each iaa device and wq +in addition to some global stats:: + # cat wq_stats iaa device: id: 1 n_wqs: 1 @@ -379,21 +376,36 @@ some global stats:: iaa device: id: 5 n_wqs: 1 - comp_calls: 100 - comp_bytes: 22800 - decomp_calls: 100 - decomp_bytes: 22800 + comp_calls: 1360 + comp_bytes: 1999776 + decomp_calls: 0 + decomp_bytes: 0 wqs: name: iaa_crypto - comp_calls: 100 - comp_bytes: 22800 - decomp_calls: 100 - decomp_bytes: 22800 + comp_calls: 1360 + comp_bytes: 1999776 + decomp_calls: 0 + decomp_bytes: 0 + + iaa device: + id: 7 + n_wqs: 1 + comp_calls: 2940 + comp_bytes: 3994213 + decomp_calls: 4164 + decomp_bytes: 5993989 + wqs: + name: iaa_crypto + comp_calls: 2940 + comp_bytes: 3994213 + decomp_calls: 4164 + decomp_bytes: 5993989 + ... -Writing 0 to 'stats_reset' resets all the stats, including the +Writing to 'stats_reset' resets all the stats, including the per-device and per-wq stats:: - # echo 0 > stats_reset + # echo 1 > stats_reset # cat wq_stats global stats: total_comp_calls: 0 diff --git a/drivers/crypto/intel/iaa/iaa_crypto_stats.c b/drivers/crypto/intel/iaa/iaa_crypto_stats.c index 7820062a91e5..0f225bdf2279 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_stats.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_stats.c @@ -159,7 +159,7 @@ static void device_stats_show(struct seq_file *m, struct iaa_device *iaa_device) wq_show(m, iaa_wq); } -static void global_stats_show(struct seq_file *m) +static int global_stats_show(struct seq_file *m, void *v) { seq_puts(m, "global stats:\n"); seq_printf(m, " total_comp_calls: %llu\n", total_comp_calls); @@ -173,6 +173,8 @@ static void global_stats_show(struct seq_file *m) total_completion_timeout_errors); seq_printf(m, " total_completion_comp_buf_overflow_errors: %llu\n\n", total_completion_comp_buf_overflow_errors); + + return 0; } static int wq_stats_show(struct seq_file *m, void *v) @@ -181,8 +183,6 @@ static int wq_stats_show(struct seq_file *m, void *v) mutex_lock(&iaa_devices_lock); - global_stats_show(m); - list_for_each_entry(iaa_device, &iaa_devices, list) device_stats_show(m, iaa_device); @@ -219,6 +219,18 @@ static const struct file_operations wq_stats_fops = { .release = single_release, }; +static int global_stats_open(struct inode *inode, struct file *file) +{ + return single_open(file, global_stats_show, file); +} + +static const struct file_operations global_stats_fops = { + .open = global_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + DEFINE_DEBUGFS_ATTRIBUTE(wq_stats_reset_fops, NULL, iaa_crypto_stats_reset, "%llu\n"); int __init iaa_crypto_debugfs_init(void) @@ -228,16 +240,8 @@ int __init iaa_crypto_debugfs_init(void) iaa_crypto_debugfs_root = debugfs_create_dir("iaa_crypto", NULL); - debugfs_create_u64("total_comp_calls", 0644, - iaa_crypto_debugfs_root, &total_comp_calls); - debugfs_create_u64("total_decomp_calls", 0644, - iaa_crypto_debugfs_root, &total_decomp_calls); - debugfs_create_u64("total_sw_decomp_calls", 0644, - iaa_crypto_debugfs_root, &total_sw_decomp_calls); - debugfs_create_u64("total_comp_bytes_out", 0644, - iaa_crypto_debugfs_root, &total_comp_bytes_out); - debugfs_create_u64("total_decomp_bytes_in", 0644, - iaa_crypto_debugfs_root, &total_decomp_bytes_in); + debugfs_create_file("global_stats", 0644, iaa_crypto_debugfs_root, NULL, + &global_stats_fops); debugfs_create_file("wq_stats", 0644, iaa_crypto_debugfs_root, NULL, &wq_stats_fops); debugfs_create_file("stats_reset", 0644, iaa_crypto_debugfs_root, NULL, -- Gitee From 8b844174667b60e9a707a6264cd096510d04acb1 Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Mon, 4 Mar 2024 15:20:11 -0600 Subject: [PATCH 0984/2138] crypto: iaa - Change iaa statistics to atomic64_t ANBZ: #9252 commit 43698cd6c02ddcf3b4ffb34e5da0be3e801027fe upstream. Change all the iaa statistics to use atomic64_t instead of the current u64, to avoid potentially inconsistent counts. Intel-SIG: commit 43698cd6c02d crypto: iaa - Change iaa statistics to atomic64_t. Backporting patches for Intel IAA crypto driver on Intel Xeon platform. Signed-off-by: Tom Zanussi Signed-off-by: Herbert Xu [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- drivers/crypto/intel/iaa/iaa_crypto.h | 16 +-- drivers/crypto/intel/iaa/iaa_crypto_stats.c | 125 +++++++++++--------- 2 files changed, 77 insertions(+), 64 deletions(-) diff --git a/drivers/crypto/intel/iaa/iaa_crypto.h b/drivers/crypto/intel/iaa/iaa_crypto.h index 2524091a5f70..56985e395263 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto.h +++ b/drivers/crypto/intel/iaa/iaa_crypto.h @@ -49,10 +49,10 @@ struct iaa_wq { struct iaa_device *iaa_device; - u64 comp_calls; - u64 comp_bytes; - u64 decomp_calls; - u64 decomp_bytes; + atomic64_t comp_calls; + atomic64_t comp_bytes; + atomic64_t decomp_calls; + atomic64_t decomp_bytes; }; struct iaa_device_compression_mode { @@ -73,10 +73,10 @@ struct iaa_device { int n_wq; struct list_head wqs; - u64 comp_calls; - u64 comp_bytes; - u64 decomp_calls; - u64 decomp_bytes; + atomic64_t comp_calls; + atomic64_t comp_bytes; + atomic64_t decomp_calls; + atomic64_t decomp_bytes; }; struct wq_table_entry { diff --git a/drivers/crypto/intel/iaa/iaa_crypto_stats.c b/drivers/crypto/intel/iaa/iaa_crypto_stats.c index 0f225bdf2279..f5cc3d29ca19 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_stats.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_stats.c @@ -17,117 +17,117 @@ #include "iaa_crypto.h" #include "iaa_crypto_stats.h" -static u64 total_comp_calls; -static u64 total_decomp_calls; -static u64 total_sw_decomp_calls; -static u64 total_comp_bytes_out; -static u64 total_decomp_bytes_in; -static u64 total_completion_einval_errors; -static u64 total_completion_timeout_errors; -static u64 total_completion_comp_buf_overflow_errors; +static atomic64_t total_comp_calls; +static atomic64_t total_decomp_calls; +static atomic64_t total_sw_decomp_calls; +static atomic64_t total_comp_bytes_out; +static atomic64_t total_decomp_bytes_in; +static atomic64_t total_completion_einval_errors; +static atomic64_t total_completion_timeout_errors; +static atomic64_t total_completion_comp_buf_overflow_errors; static struct dentry *iaa_crypto_debugfs_root; void update_total_comp_calls(void) { - total_comp_calls++; + atomic64_inc(&total_comp_calls); } void update_total_comp_bytes_out(int n) { - total_comp_bytes_out += n; + atomic64_add(n, &total_comp_bytes_out); } void update_total_decomp_calls(void) { - total_decomp_calls++; + atomic64_inc(&total_decomp_calls); } void update_total_sw_decomp_calls(void) { - total_sw_decomp_calls++; + atomic64_inc(&total_sw_decomp_calls); } void update_total_decomp_bytes_in(int n) { - total_decomp_bytes_in += n; + atomic64_add(n, &total_decomp_bytes_in); } void update_completion_einval_errs(void) { - total_completion_einval_errors++; + atomic64_inc(&total_completion_einval_errors); } void update_completion_timeout_errs(void) { - total_completion_timeout_errors++; + atomic64_inc(&total_completion_timeout_errors); } void update_completion_comp_buf_overflow_errs(void) { - total_completion_comp_buf_overflow_errors++; + atomic64_inc(&total_completion_comp_buf_overflow_errors); } void update_wq_comp_calls(struct idxd_wq *idxd_wq) { struct iaa_wq *wq = idxd_wq_get_private(idxd_wq); - wq->comp_calls++; - wq->iaa_device->comp_calls++; + atomic64_inc(&wq->comp_calls); + atomic64_inc(&wq->iaa_device->comp_calls); } void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n) { struct iaa_wq *wq = idxd_wq_get_private(idxd_wq); - wq->comp_bytes += n; - wq->iaa_device->comp_bytes += n; + atomic64_add(n, &wq->comp_bytes); + atomic64_add(n, &wq->iaa_device->comp_bytes); } void update_wq_decomp_calls(struct idxd_wq *idxd_wq) { struct iaa_wq *wq = idxd_wq_get_private(idxd_wq); - wq->decomp_calls++; - wq->iaa_device->decomp_calls++; + atomic64_inc(&wq->decomp_calls); + atomic64_inc(&wq->iaa_device->decomp_calls); } void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n) { struct iaa_wq *wq = idxd_wq_get_private(idxd_wq); - wq->decomp_bytes += n; - wq->iaa_device->decomp_bytes += n; + atomic64_add(n, &wq->decomp_bytes); + atomic64_add(n, &wq->iaa_device->decomp_bytes); } static void reset_iaa_crypto_stats(void) { - total_comp_calls = 0; - total_decomp_calls = 0; - total_sw_decomp_calls = 0; - total_comp_bytes_out = 0; - total_decomp_bytes_in = 0; - total_completion_einval_errors = 0; - total_completion_timeout_errors = 0; - total_completion_comp_buf_overflow_errors = 0; + atomic64_set(&total_comp_calls, 0); + atomic64_set(&total_decomp_calls, 0); + atomic64_set(&total_sw_decomp_calls, 0); + atomic64_set(&total_comp_bytes_out, 0); + atomic64_set(&total_decomp_bytes_in, 0); + atomic64_set(&total_completion_einval_errors, 0); + atomic64_set(&total_completion_timeout_errors, 0); + atomic64_set(&total_completion_comp_buf_overflow_errors, 0); } static void reset_wq_stats(struct iaa_wq *wq) { - wq->comp_calls = 0; - wq->comp_bytes = 0; - wq->decomp_calls = 0; - wq->decomp_bytes = 0; + atomic64_set(&wq->comp_calls, 0); + atomic64_set(&wq->comp_bytes, 0); + atomic64_set(&wq->decomp_calls, 0); + atomic64_set(&wq->decomp_bytes, 0); } static void reset_device_stats(struct iaa_device *iaa_device) { struct iaa_wq *iaa_wq; - iaa_device->comp_calls = 0; - iaa_device->comp_bytes = 0; - iaa_device->decomp_calls = 0; - iaa_device->decomp_bytes = 0; + atomic64_set(&iaa_device->comp_calls, 0); + atomic64_set(&iaa_device->comp_bytes, 0); + atomic64_set(&iaa_device->decomp_calls, 0); + atomic64_set(&iaa_device->decomp_bytes, 0); list_for_each_entry(iaa_wq, &iaa_device->wqs, list) reset_wq_stats(iaa_wq); @@ -136,10 +136,14 @@ static void reset_device_stats(struct iaa_device *iaa_device) static void wq_show(struct seq_file *m, struct iaa_wq *iaa_wq) { seq_printf(m, " name: %s\n", iaa_wq->wq->name); - seq_printf(m, " comp_calls: %llu\n", iaa_wq->comp_calls); - seq_printf(m, " comp_bytes: %llu\n", iaa_wq->comp_bytes); - seq_printf(m, " decomp_calls: %llu\n", iaa_wq->decomp_calls); - seq_printf(m, " decomp_bytes: %llu\n\n", iaa_wq->decomp_bytes); + seq_printf(m, " comp_calls: %llu\n", + atomic64_read(&iaa_wq->comp_calls)); + seq_printf(m, " comp_bytes: %llu\n", + atomic64_read(&iaa_wq->comp_bytes)); + seq_printf(m, " decomp_calls: %llu\n", + atomic64_read(&iaa_wq->decomp_calls)); + seq_printf(m, " decomp_bytes: %llu\n\n", + atomic64_read(&iaa_wq->decomp_bytes)); } static void device_stats_show(struct seq_file *m, struct iaa_device *iaa_device) @@ -149,10 +153,14 @@ static void device_stats_show(struct seq_file *m, struct iaa_device *iaa_device) seq_puts(m, "iaa device:\n"); seq_printf(m, " id: %d\n", iaa_device->idxd->id); seq_printf(m, " n_wqs: %d\n", iaa_device->n_wq); - seq_printf(m, " comp_calls: %llu\n", iaa_device->comp_calls); - seq_printf(m, " comp_bytes: %llu\n", iaa_device->comp_bytes); - seq_printf(m, " decomp_calls: %llu\n", iaa_device->decomp_calls); - seq_printf(m, " decomp_bytes: %llu\n", iaa_device->decomp_bytes); + seq_printf(m, " comp_calls: %llu\n", + atomic64_read(&iaa_device->comp_calls)); + seq_printf(m, " comp_bytes: %llu\n", + atomic64_read(&iaa_device->comp_bytes)); + seq_printf(m, " decomp_calls: %llu\n", + atomic64_read(&iaa_device->decomp_calls)); + seq_printf(m, " decomp_bytes: %llu\n", + atomic64_read(&iaa_device->decomp_bytes)); seq_puts(m, " wqs:\n"); list_for_each_entry(iaa_wq, &iaa_device->wqs, list) @@ -162,17 +170,22 @@ static void device_stats_show(struct seq_file *m, struct iaa_device *iaa_device) static int global_stats_show(struct seq_file *m, void *v) { seq_puts(m, "global stats:\n"); - seq_printf(m, " total_comp_calls: %llu\n", total_comp_calls); - seq_printf(m, " total_decomp_calls: %llu\n", total_decomp_calls); - seq_printf(m, " total_sw_decomp_calls: %llu\n", total_sw_decomp_calls); - seq_printf(m, " total_comp_bytes_out: %llu\n", total_comp_bytes_out); - seq_printf(m, " total_decomp_bytes_in: %llu\n", total_decomp_bytes_in); + seq_printf(m, " total_comp_calls: %llu\n", + atomic64_read(&total_comp_calls)); + seq_printf(m, " total_decomp_calls: %llu\n", + atomic64_read(&total_decomp_calls)); + seq_printf(m, " total_sw_decomp_calls: %llu\n", + atomic64_read(&total_sw_decomp_calls)); + seq_printf(m, " total_comp_bytes_out: %llu\n", + atomic64_read(&total_comp_bytes_out)); + seq_printf(m, " total_decomp_bytes_in: %llu\n", + atomic64_read(&total_decomp_bytes_in)); seq_printf(m, " total_completion_einval_errors: %llu\n", - total_completion_einval_errors); + atomic64_read(&total_completion_einval_errors)); seq_printf(m, " total_completion_timeout_errors: %llu\n", - total_completion_timeout_errors); + atomic64_read(&total_completion_timeout_errors)); seq_printf(m, " total_completion_comp_buf_overflow_errors: %llu\n\n", - total_completion_comp_buf_overflow_errors); + atomic64_read(&total_completion_comp_buf_overflow_errors)); return 0; } -- Gitee From 238bd2814bd1b7e5f9cf3d67a96bb0bcf195fd1c Mon Sep 17 00:00:00 2001 From: Jerry Snitselaar Date: Thu, 21 Mar 2024 16:08:46 -0500 Subject: [PATCH 0985/2138] crypto: iaa - Fix some errors in IAA documentation ANBZ: #9252 commit 616ce45c150fa65683c8c1b1f2e2ac930462868d upstream. This cleans up the following issues I ran into when trying to use the scripts and commands in the iaa-crypto.rst document. - Fix incorrect arguments being passed to accel-config config-wq. - Replace --device_name with --driver-name. - Replace --driver_name with --driver-name. - Replace --size with --wq-size. - Add missing --priority argument. - Add missing accel-config config-engine command after the config-wq commands. - Fix wq name passed to accel-config config-wq. - Add rmmod/modprobe of iaa_crypto to script that disables, then enables all devices and workqueues to avoid enable-wq failing with -EEXIST when trying to register to compression algorithm. - Fix device name in cases where iaa was used instead of iax. Intel-SIG: commit 616ce45c150f crypto: iaa - Fix some errors in IAA documentation. Backporting patches for Intel IAA crypto driver on Intel Xeon platform. Cc: Jonathan Corbet Cc: linux-crypto@vger.kernel.org Cc: linux-doc@vger.kernel.org Signed-off-by: Jerry Snitselaar Reviewed-by: Tom Zanussi Signed-off-by: Herbert Xu [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- .../driver-api/crypto/iaa/iaa-crypto.rst | 22 ++++++++++++++----- 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/Documentation/driver-api/crypto/iaa/iaa-crypto.rst b/Documentation/driver-api/crypto/iaa/iaa-crypto.rst index 7b28aef39ba0..f4fba897d931 100644 --- a/Documentation/driver-api/crypto/iaa/iaa-crypto.rst +++ b/Documentation/driver-api/crypto/iaa/iaa-crypto.rst @@ -179,7 +179,9 @@ has the old 'iax' device naming in place) :: # configure wq1.0 - accel-config config-wq --group-id=0 --mode=dedicated --type=kernel --name="iaa_crypto" --device_name="crypto" iax1/wq1.0 + accel-config config-wq --group-id=0 --mode=dedicated --type=kernel --priority=10 --name="iaa_crypto" --driver-name="crypto" iax1/wq1.0 + + accel-config config-engine iax1/engine1.0 --group-id=0 # enable IAA device iax1 @@ -548,12 +550,20 @@ The below script automatically does that:: echo "End Disable IAA" + echo "Reload iaa_crypto module" + + rmmod iaa_crypto + modprobe iaa_crypto + + echo "End Reload iaa_crypto module" + # # configure iaa wqs and devices # echo "Configure IAA" for ((i = 1; i < ${num_iaa} * 2; i += 2)); do - accel-config config-wq --group-id=0 --mode=dedicated --size=128 --priority=10 --type=kernel --name="iaa_crypto" --driver_name="crypto" iax${i}/wq${i} + accel-config config-wq --group-id=0 --mode=dedicated --wq-size=128 --priority=10 --type=kernel --name="iaa_crypto" --driver-name="crypto" iax${i}/wq${i}.0 + accel-config config-engine iax${i}/engine${i}.0 --group-id=0 done echo "End Configure IAA" @@ -564,10 +574,10 @@ The below script automatically does that:: echo "Enable IAA" for ((i = 1; i < ${num_iaa} * 2; i += 2)); do - echo enable iaa iaa${i} - accel-config enable-device iaa${i} - echo enable wq iaa${i}/wq${i}.0 - accel-config enable-wq iaa${i}/wq${i}.0 + echo enable iaa iax${i} + accel-config enable-device iax${i} + echo enable wq iax${i}/wq${i}.0 + accel-config enable-wq iax${i}/wq${i}.0 done echo "End Enable IAA" -- Gitee From 062ee9c7cea060b635f91d8211d6e8c20931969c Mon Sep 17 00:00:00 2001 From: Tom Zanussi Date: Fri, 5 Apr 2024 13:57:30 -0500 Subject: [PATCH 0986/2138] crypto: iaa - Use cpumask_weight() when rebalancing ANBZ: #9252 commit 8f0e0cf74ccef41b383daddcf5447bba655031b3 upstream. If some cpus are offlined, or if the node mask is smaller than expected, the 'nonexistent cpu' warning in rebalance_wq_table() may be erroneously triggered. Use cpumask_weight() to make sure we only iterate over the exact number of cpus in the mask. Also use num_possible_cpus() instead of num_online_cpus() to make sure all slots in the wq table are initialized. Intel-SIG: commit 8f0e0cf74cce crypto: iaa - Use cpumask_weight() when rebalancing. Backporting patches for Intel IAA crypto driver on Intel Xeon platform. Signed-off-by: Tom Zanussi Signed-off-by: Herbert Xu [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- drivers/crypto/intel/iaa/iaa_crypto_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index 6229b24b0d35..814fb2c31626 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -922,7 +922,7 @@ static void rebalance_wq_table(void) for_each_node_with_cpus(node) { node_cpus = cpumask_of_node(node); - for (cpu = 0; cpu < nr_cpus_per_node; cpu++) { + for (cpu = 0; cpu < cpumask_weight(node_cpus); cpu++) { int node_cpu = cpumask_nth(cpu, node_cpus); if (WARN_ON(node_cpu >= nr_cpu_ids)) { @@ -2005,7 +2005,7 @@ static int __init iaa_crypto_init_module(void) int ret = 0; int node; - nr_cpus = num_online_cpus(); + nr_cpus = num_possible_cpus(); for_each_node_with_cpus(node) nr_nodes++; if (!nr_nodes) { -- Gitee From 1f6d7cb5ffdac8efafdae849b8769cde05e00ce1 Mon Sep 17 00:00:00 2001 From: Jerry Snitselaar Date: Fri, 5 Apr 2024 14:39:41 -0700 Subject: [PATCH 0987/2138] dmaengine: idxd: Check for driver name match before sva user feature ANBZ: #9252 commit c863062cf8250d8330859fc1d730b2aed3313bcd upstream. Currently if the user driver is probed on a workqueue configured for another driver with SVA not enabled on the system, it will print out a number of probe failing messages like the following: [ 264.831140] user: probe of wq13.0 failed with error -95 On some systems, such as GNR, the number of messages can reach over 100. Move the SVA feature check to be after the driver name match check. Intel-SIG: commit c863062cf825 dmaengine: idxd: Check for driver name match before sva user feature. Incremental backporting patches for DSA/IAA on Intel Xeon platform. Cc: Vinod Koul Cc: dmaengine@vger.kernel.org Cc: linux-kernel@vger.kernel.org Reviewed-by: Fenghua Yu Reviewed-by: Dave Jiang Signed-off-by: Jerry Snitselaar Link: https://lore.kernel.org/r/20240405213941.3629709-1-jsnitsel@redhat.com Signed-off-by: Vinod Koul [ Xiaochen Shen: amend commit log ] Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- drivers/dma/idxd/cdev.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index fd9bbee4cc42..57f1bf2ab20b 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -592,6 +592,14 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) if (idxd->state != IDXD_DEV_ENABLED) return -ENXIO; + mutex_lock(&wq->wq_lock); + + if (!idxd_wq_driver_name_match(wq, dev)) { + idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME; + rc = -ENODEV; + goto wq_err; + } + /* * User type WQ is enabled only when SVA is enabled for two reasons: * - If no IOMMU or IOMMU Passthrough without SVA, userspace @@ -607,14 +615,7 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) dev_dbg(&idxd->pdev->dev, "User type WQ cannot be enabled without SVA.\n"); - return -EOPNOTSUPP; - } - - mutex_lock(&wq->wq_lock); - - if (!idxd_wq_driver_name_match(wq, dev)) { - idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME; - rc = -ENODEV; + rc = -EOPNOTSUPP; goto wq_err; } -- Gitee From cdaafd779828bf2ca34287908df23544a3c9f17b Mon Sep 17 00:00:00 2001 From: Xiaochen Shen Date: Wed, 29 May 2024 19:04:40 +0800 Subject: [PATCH 0988/2138] anolis: configs: Add kernel config for Intel IAA crypto driver ANBZ: #9252 Add kernel config for Intel IAA compression accelerator crypto driver. Intel-SIG: no upstream anolis: configs: Add kernel config for Intel IAA crypto driver. Backporting patches for Intel IAA crypto driver on Intel Xeon platform. Signed-off-by: Xiaochen Shen Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3291 --- arch/x86/configs/anolis_defconfig | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index f04bbb4b7d28..524d5326c893 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -7464,6 +7464,8 @@ CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m CONFIG_CRYPTO_DEV_QAT_C62XVF=m # CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION is not set +CONFIG_CRYPTO_DEV_IAA_CRYPTO=m +CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS=y CONFIG_CRYPTO_DEV_CHELSIO=m # CONFIG_CRYPTO_DEV_VIRTIO is not set # CONFIG_CRYPTO_DEV_SAFEXCEL is not set -- Gitee From 54d791d613094d61c7a2e1cf2678e9ab083656b2 Mon Sep 17 00:00:00 2001 From: xiaohuihui-bzwx-kj Date: Fri, 17 May 2024 01:46:50 -0700 Subject: [PATCH 0989/2138] anolis: drivers/bzwx: add Chengdu BeiZhongWangXin Technology N5/N6 Series Network Card Driver ANBZ: #9074 add Chengdu BeiZhongWangXin Technology N5/N6 Series Network Card Driver Signed-off-by: xiaohuihui-bzwx-kj Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/3214 --- arch/arm64/configs/anolis_defconfig | 4 + arch/x86/configs/anolis_defconfig | 4 + drivers/net/ethernet/Kconfig | 1 + drivers/net/ethernet/Makefile | 1 + drivers/net/ethernet/bzwx/Kconfig | 21 + drivers/net/ethernet/bzwx/Makefile | 6 + drivers/net/ethernet/bzwx/nce/Kconfig | 36 + drivers/net/ethernet/bzwx/nce/Makefile | 35 + drivers/net/ethernet/bzwx/nce/comm/common.h | 262 ++ drivers/net/ethernet/bzwx/nce/comm/feature.h | 77 + drivers/net/ethernet/bzwx/nce/comm/mailbox.h | 147 + drivers/net/ethernet/bzwx/nce/comm/reg.h | 255 ++ drivers/net/ethernet/bzwx/nce/comm/txrx.c | 1556 ++++++++ drivers/net/ethernet/bzwx/nce/comm/txrx.h | 476 +++ drivers/net/ethernet/bzwx/nce/comm/version.h | 9 + drivers/net/ethernet/bzwx/nce/ne6x/ne6x.h | 468 +++ .../net/ethernet/bzwx/nce/ne6x/ne6x_arfs.c | 628 ++++ .../net/ethernet/bzwx/nce/ne6x/ne6x_arfs.h | 149 + .../net/ethernet/bzwx/nce/ne6x/ne6x_debugfs.c | 2397 ++++++++++++ .../net/ethernet/bzwx/nce/ne6x/ne6x_debugfs.h | 69 + drivers/net/ethernet/bzwx/nce/ne6x/ne6x_dev.c | 1602 ++++++++ drivers/net/ethernet/bzwx/nce/ne6x/ne6x_dev.h | 319 ++ .../net/ethernet/bzwx/nce/ne6x/ne6x_ethtool.c | 1623 ++++++++ .../net/ethernet/bzwx/nce/ne6x/ne6x_ethtool.h | 29 + .../ethernet/bzwx/nce/ne6x/ne6x_interrupt.c | 700 ++++ .../ethernet/bzwx/nce/ne6x/ne6x_interrupt.h | 27 + .../net/ethernet/bzwx/nce/ne6x/ne6x_main.c | 3111 ++++++++++++++++ .../net/ethernet/bzwx/nce/ne6x/ne6x_netlink.c | 250 ++ .../net/ethernet/bzwx/nce/ne6x/ne6x_netlink.h | 39 + .../net/ethernet/bzwx/nce/ne6x/ne6x_portmap.h | 36 + .../net/ethernet/bzwx/nce/ne6x/ne6x_procfs.c | 171 + .../net/ethernet/bzwx/nce/ne6x/ne6x_procfs.h | 14 + drivers/net/ethernet/bzwx/nce/ne6x/ne6x_reg.c | 1620 ++++++++ drivers/net/ethernet/bzwx/nce/ne6x/ne6x_reg.h | 249 ++ .../net/ethernet/bzwx/nce/ne6x/ne6x_txrx.c | 444 +++ .../net/ethernet/bzwx/nce/ne6x/ne6x_txrx.h | 11 + .../ethernet/bzwx/nce/ne6x/ne6x_virtchnl_pf.c | 2388 ++++++++++++ .../ethernet/bzwx/nce/ne6x/ne6x_virtchnl_pf.h | 163 + .../net/ethernet/bzwx/nce/ne6x_vf/ne6xvf.h | 555 +++ .../bzwx/nce/ne6x_vf/ne6xvf_debugfs.c | 305 ++ .../bzwx/nce/ne6x_vf/ne6xvf_ethtool.c | 846 +++++ .../bzwx/nce/ne6x_vf/ne6xvf_ethtool_stats.h | 23 + .../ethernet/bzwx/nce/ne6x_vf/ne6xvf_main.c | 3310 +++++++++++++++++ .../ethernet/bzwx/nce/ne6x_vf/ne6xvf_osdep.h | 22 + .../ethernet/bzwx/nce/ne6x_vf/ne6xvf_txrx.c | 160 + .../ethernet/bzwx/nce/ne6x_vf/ne6xvf_txrx.h | 11 + .../bzwx/nce/ne6x_vf/ne6xvf_virtchnl.c | 1125 ++++++ .../bzwx/nce/ne6x_vf/ne6xvf_virtchnl.h | 123 + 48 files changed, 25877 insertions(+) create mode 100644 drivers/net/ethernet/bzwx/Kconfig create mode 100644 drivers/net/ethernet/bzwx/Makefile create mode 100644 drivers/net/ethernet/bzwx/nce/Kconfig create mode 100644 drivers/net/ethernet/bzwx/nce/Makefile create mode 100644 drivers/net/ethernet/bzwx/nce/comm/common.h create mode 100644 drivers/net/ethernet/bzwx/nce/comm/feature.h create mode 100644 drivers/net/ethernet/bzwx/nce/comm/mailbox.h create mode 100644 drivers/net/ethernet/bzwx/nce/comm/reg.h create mode 100644 drivers/net/ethernet/bzwx/nce/comm/txrx.c create mode 100644 drivers/net/ethernet/bzwx/nce/comm/txrx.h create mode 100644 drivers/net/ethernet/bzwx/nce/comm/version.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_arfs.c create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_arfs.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_debugfs.c create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_debugfs.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_dev.c create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_dev.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_ethtool.c create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_ethtool.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_interrupt.c create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_interrupt.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_main.c create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_netlink.c create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_netlink.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_portmap.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_procfs.c create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_procfs.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_reg.c create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_reg.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_txrx.c create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_txrx.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_virtchnl_pf.c create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x/ne6x_virtchnl_pf.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_debugfs.c create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_ethtool.c create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_ethtool_stats.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_main.c create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_osdep.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_txrx.c create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_txrx.h create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_virtchnl.c create mode 100644 drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_virtchnl.h diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig index 2940c4369081..93c4f1d5c89d 100644 --- a/arch/arm64/configs/anolis_defconfig +++ b/arch/arm64/configs/anolis_defconfig @@ -2740,6 +2740,10 @@ CONFIG_CHELSIO_IPSEC_INLINE=m CONFIG_NET_VENDOR_DAVICOM=y # CONFIG_DM9051 is not set CONFIG_DNET=m +CONFIG_NET_VENDOR_BZWX=y +CONFIG_NCE=m +CONFIG_NE6X=m +CONFIG_NE6XVF=m # CONFIG_NET_VENDOR_DEC is not set # CONFIG_NET_VENDOR_DLINK is not set # CONFIG_NET_VENDOR_EMULEX is not set diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 524d5326c893..d150c33702a7 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -2835,6 +2835,10 @@ CONFIG_ENIC=m CONFIG_NET_VENDOR_DAVICOM=y # CONFIG_DM9051 is not set CONFIG_DNET=m +CONFIG_NET_VENDOR_BZWX=y +CONFIG_NCE=m +CONFIG_NE6X=m +CONFIG_NE6XVF=m CONFIG_NET_VENDOR_DEC=y # CONFIG_NET_TULIP is not set # CONFIG_NET_VENDOR_DLINK is not set diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 5a274b99f299..ceca838cb86a 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -69,6 +69,7 @@ config DNET To compile this driver as a module, choose M here: the module will be called dnet. +source "drivers/net/ethernet/bzwx/Kconfig" source "drivers/net/ethernet/dec/Kconfig" source "drivers/net/ethernet/dlink/Kconfig" source "drivers/net/ethernet/emulex/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 0d872d4efcd1..d24786d26214 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -104,3 +104,4 @@ obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/ obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/ obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/ obj-$(CONFIG_NET_VENDOR_PENSANDO) += pensando/ +obj-$(CONFIG_NET_VENDOR_BZWX) += bzwx/ diff --git a/drivers/net/ethernet/bzwx/Kconfig b/drivers/net/ethernet/bzwx/Kconfig new file mode 100644 index 000000000000..5cc757ceba64 --- /dev/null +++ b/drivers/net/ethernet/bzwx/Kconfig @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# BeiZhongWangXin device configuration +# + +config NET_VENDOR_BZWX + bool "BeiZhongWangXin devices" + default y + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about BeiZhongWangXin devices. If you say Y, you will be asked + for your specific device in the following questions. + +if NET_VENDOR_BZWX + +source "drivers/net/ethernet/bzwx/nce/Kconfig" + +endif # NET_VENDOR_BZWX diff --git a/drivers/net/ethernet/bzwx/Makefile b/drivers/net/ethernet/bzwx/Makefile new file mode 100644 index 000000000000..05273f2858c5 --- /dev/null +++ b/drivers/net/ethernet/bzwx/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the BeiZhongWangXin network device drivers. +# + +obj-$(CONFIG_NCE) += nce/ diff --git a/drivers/net/ethernet/bzwx/nce/Kconfig b/drivers/net/ethernet/bzwx/nce/Kconfig new file mode 100644 index 000000000000..694c1108f8b4 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/Kconfig @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# BeiZhongWangXin device configuration + + +config NCE + tristate "BeiZhongWangXin Ethernet Connection N5/N6 Series Support" + depends on PCI + help + This selects the drivers support BeiZhongWangXin Ethernet Connection N5/N6 Series devices. + +if NCE + +config NE6X + tristate "BeiZhongWangXin Ethernet Connection N5/N6 Series Support" + default n + depends on PCI_MSI + help + This driver supports BeiZhongWangXin Ethernet Connection N5/N6 Series + of devices. + + To compile this driver as a module, choose M here. + The module will be called ncepf. + +config NE6XVF + tristate "BeiZhongWangXin Ethernet Connection N5/N6 Series Virtual Function support" + depends on PCI_MSI + depends on NE6X + help + This driver supports virtual functions for BeiZhongWangXin Ethernet Connection N5/N6 Series + Virtual Function devices. + + To compile this driver as a module, choose M here. The module + will be called ncevf. + +endif #NCE diff --git a/drivers/net/ethernet/bzwx/nce/Makefile b/drivers/net/ethernet/bzwx/nce/Makefile new file mode 100644 index 000000000000..5ec82cec67b3 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/Makefile @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the BeiZhongWangXin network device drivers. +# + +ccflags-y += -I$(srctree)/drivers/net/ethernet/bzwx/nce/comm +ccflags-y += -I$(srctree)/drivers/net/ethernet/bzwx/nce/ne6x +ccflags-y += -I$(srctree)/drivers/net/ethernet/bzwx/nce/ne6x_vf +subdir-ccflags-y += -I$(src)/comm +subdir-ccflags-y += -I$(src)/ne6x +subdir-ccflags-y += -I$(src)/ne6x_vf + +obj-$(CONFIG_NE6X) += ncepf.o +ncepf-objs := comm/txrx.o \ + ne6x/ne6x_main.o \ + ne6x/ne6x_ethtool.o \ + ne6x/ne6x_procfs.o \ + ne6x/ne6x_netlink.o \ + ne6x/ne6x_interrupt.o \ + ne6x/ne6x_reg.o \ + ne6x/ne6x_dev.o \ + ne6x/ne6x_txrx.o + +ncepf-$(CONFIG_DEBUG_FS) += ne6x/ne6x_debugfs.o +ncepf-$(CONFIG_PCI_IOV) += ne6x/ne6x_virtchnl_pf.o +ncepf-$(CONFIG_RFS_ACCEL) += ne6x/ne6x_arfs.o + +obj-$(CONFIG_NE6XVF) += ncevf.o +ncevf-objs := comm/txrx.o \ + ne6x_vf/ne6xvf_main.o \ + ne6x_vf/ne6xvf_ethtool.o \ + ne6x_vf/ne6xvf_virtchnl.o \ + ne6x_vf/ne6xvf_txrx.o + +ncevf-$(CONFIG_DEBUG_FS) += ne6x_vf/ne6xvf_debugfs.o diff --git a/drivers/net/ethernet/bzwx/nce/comm/common.h b/drivers/net/ethernet/bzwx/nce/comm/common.h new file mode 100644 index 000000000000..b3c35edbf124 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/comm/common.h @@ -0,0 +1,262 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_COMMON_H +#define _NE6X_COMMON_H + +#define NE6X_MAX_U64 0xFFFFFFFFFFFFFFFFULL + +#define NE6X_MODULE_TYPE_TOTAL_BYTE 3 + +#define NE6X_AQ_LINK_UP 0x1ULL +#define NE6X_AQ_AN_COMPLETED BIT(0) + +#define PCI_VENDOR_ID_BZWX 0xD20C + +struct ne6x_eth_stats { + u64 rx_bytes; /* gorc */ + u64 rx_unicast; /* uprc */ + u64 rx_multicast; /* mprc */ + u64 rx_broadcast; /* bprc */ + u64 rx_discards; /* rdpc */ + u64 rx_miss; + u64 rx_unknown_protocol; /* rupp */ + u64 tx_bytes; /* gotc */ + u64 tx_unicast; /* uptc */ + u64 tx_multicast; /* mptc */ + u64 tx_broadcast; /* bptc */ + u64 tx_discards; /* tdpc */ + u64 tx_errors; /* tepc */ + u64 rx_malform; + u64 tx_malform; +}; + +enum ne6x_phy_type { + NE6X_PHY_TYPE_UNKNOWN = 0, + NE6X_PHY_TYPE_10GBASE = 1, + NE6X_PHY_TYPE_25GBASE, + NE6X_PHY_TYPE_40GBASE, + NE6X_PHY_TYPE_100GBASE, + NE6X_PHY_TYPE_200GBASE, +}; + +#define NE6X_LINK_SPEED_10GB_SHIFT 0x1 +#define NE6X_LINK_SPEED_40GB_SHIFT 0x2 +#define NE6X_LINK_SPEED_25GB_SHIFT 0x3 +#define NE6X_LINK_SPEED_100GB_SHIFT 0x4 +#define NE6X_LINK_SPEED_200GB_SHIFT 0x5 + +enum ne6x_sdk_link_speed { + NE6X_LINK_SPEED_UNKNOWN = 0, + NE6X_LINK_SPEED_10GB = BIT(NE6X_LINK_SPEED_10GB_SHIFT), + NE6X_LINK_SPEED_40GB = BIT(NE6X_LINK_SPEED_40GB_SHIFT), + NE6X_LINK_SPEED_25GB = BIT(NE6X_LINK_SPEED_25GB_SHIFT), + NE6X_LINK_SPEED_100GB = BIT(NE6X_LINK_SPEED_100GB_SHIFT), + NE6X_LINK_SPEED_200GB = BIT(NE6X_LINK_SPEED_200GB_SHIFT), +}; + +struct ne6x_link_status { + u64 phy_type_low; + u64 phy_type_high; + + u16 max_frame_size; + u16 req_speeds; + u8 topo_media_conflict; + u8 link_cfg_err; + u8 lse_ena; /* Link Status Event notification */ + u8 link_info; + u8 an_info; + u8 ext_info; + u8 fec_info; + u8 pacing; + u32 link_speed; + u8 module_type[NE6X_MODULE_TYPE_TOTAL_BYTE]; +}; + +struct ne6x_mac_info { + u8 perm_addr[ETH_ALEN]; +}; + +struct ne6x_link_info { + u32 link; + u32 speed; +}; + +enum ne6x_media_type { + NE6X_MEDIA_UNKNOWN = 0, + NE6X_MEDIA_FIBER, + NE6X_MEDIA_BASET, + NE6X_MEDIA_BACKPLANE, + NE6X_MEDIA_DA, + NE6X_MEDIA_AUI, +}; + +struct ne6x_phy_info { + struct ne6x_link_status link_info; + struct ne6x_link_status link_info_old; + u64 phy_type_low; + u64 phy_type_high; + enum ne6x_media_type media_type; + u8 get_link_info; + u16 curr_user_speed_req; +}; + +struct ne6x_port_info { + struct ne6x_hw *hw; /* back pointer to HW instance */ + + u8 lport; + u8 hw_port_id; /* hardware port id */ + u8 hw_trunk_id; + u32 hw_queue_base_old; + u32 hw_queue_base; + u32 hw_max_queue; + + u32 queue; /* current used queue */ + struct ne6x_link_info link_status; + struct ne6x_mac_info mac; + struct ne6x_phy_info phy; +}; + +struct ne6x_bus_info { + u16 domain_num; + u16 device; + u8 func; + u8 bus_num; +}; + +struct ne6x_mbx_snap_buffer_data { + u8 state : 4; + u8 len : 4; + u8 type; + u8 data[6]; +}; + +/* Structure to track messages sent by VFs on mailbox: + * 1. vf_cntr : a counter array of VFs to track the number of + * asynchronous messages sent by each VF + * 2. vfcntr_len : number of entries in VF counter array + */ +struct ne6x_mbx_vf_counter { + u32 *vf_cntr; + u32 vfcntr_len; +}; + +/* Enum defining the different states of the mailbox snapshot in the + * PF-VF mailbox overflow detection algorithm. The + * snapshot can be in + * states: + * 1. NE6X_MAL_VF_DETECT_STATE_NEW_SNAPSHOT - generate a new static snapshot + * within + * the mailbox buffer. + * 2. NE6X_MAL_VF_DETECT_STATE_TRAVERSE - iterate through the mailbox snaphot + * 3. + * NE6X_MAL_VF_DETECT_STATE_DETECT - track the messages sent per VF via the + * mailbox and mark any VFs sending more + * messages than the threshold limit set. + * 4. NE6X_MAL_VF_DETECT_STATE_INVALID - Invalid mailbox state set to + * 0xFFFFFFFF. + */ +enum ne6x_mbx_snapshot_state { + NE6X_MAL_VF_DETECT_STATE_NEW_SNAPSHOT = 0, + NE6X_MAL_VF_DETECT_STATE_TRAVERSE, + NE6X_MAL_VF_DETECT_STATE_DETECT, + NE6X_MAL_VF_DETECT_STATE_INVALID = 0xF, +}; + +struct ne6x_mbx_snapshot { + enum ne6x_mbx_snapshot_state state; + struct ne6x_mbx_vf_counter mbx_vf; +}; + +enum virtchnl_vf_config_codes { + VIRTCHNL_VF_CONFIG_TRUST = 0, + VIRTCHNL_VF_CONFIG_FORCE_LINK = 1, +}; + +struct virtchnl_vf_config { + u8 type; + u8 data[5]; +}; + +enum ne6x_adapter_state { + NE6X_ADPT_DOWN, + NE6X_ADPT_NEEDS_RESTART, + NE6X_ADPT_NETDEV_ALLOCD, + NE6X_ADPT_NETDEV_REGISTERED, + NE6X_ADPT_UMAC_FLTR_CHANGED, + NE6X_ADPT_MMAC_FLTR_CHANGED, + NE6X_ADPT_VLAN_FLTR_CHANGED, + NE6X_ADPT_PROMISC_CHANGED, + NE6X_ADPT_RELEASING, + NE6X_ADPT_RECOVER, + NE6X_ADPT_DOWN_REQUESTED, + NE6X_ADPT_OPEN, + NE6X_ADPT_NBITS /* must be last */ +}; + +struct ne6x_adapt_comm { + u16 port_info; + DECLARE_BITMAP(state, NE6X_ADPT_NBITS); +}; + +struct ne6x_vlan { + u16 tpid; + u16 vid; + u8 prio; +}; + +struct ne6x_vf_vlan { + u16 vid; + u16 tpid; +}; + +struct ne6x_macvlan { + struct list_head list; + struct net_device *vdev; + u8 mac[ETH_ALEN]; +}; + +/* values for UPT1_RSSConf.hashFunc */ +enum { + NE6X_RSS_HASH_TYPE_NONE = 0x0, + NE6X_RSS_HASH_TYPE_IPV4 = 0x01, + NE6X_RSS_HASH_TYPE_IPV4_TCP = 0x02, + NE6X_RSS_HASH_TYPE_IPV6 = 0x04, + NE6X_RSS_HASH_TYPE_IPV6_TCP = 0x08, + NE6X_RSS_HASH_TYPE_IPV4_UDP = 0x10, + NE6X_RSS_HASH_TYPE_IPV6_UDP = 0x20, +}; + +enum { + NE6X_RSS_HASH_FUNC_NONE = 0x0, + NE6X_RSS_HASH_FUNC_TOEPLITZ = 0x01, +}; + +#define NE6X_RSS_MAX_KEY_SIZE 40 +#define NE6X_RSS_MAX_IND_TABLE_SIZE 128 + +struct ne6x_rss_info { + u16 hash_type; + u16 hash_func; + u16 hash_key_size; + u16 ind_table_size; + u8 hash_key[NE6X_RSS_MAX_KEY_SIZE]; + u8 ind_table[NE6X_RSS_MAX_IND_TABLE_SIZE]; +}; + +#define NE6X_VF_VLAN(vid, tpid) ((struct ne6x_vf_vlan){vid, tpid}) + +#ifndef readq +static inline u64 readq(void __iomem *addr) +{ + return readl(addr) + ((u64)readl(addr + 4) << 32); +} + +static inline void writeq(u64 val, void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr + 4); +} +#endif + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/comm/feature.h b/drivers/net/ethernet/bzwx/nce/comm/feature.h new file mode 100644 index 000000000000..482b4d2d1d39 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/comm/feature.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2024, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_FEATURE_H +#define _NE6X_FEATURE_H + +#define NE6X_F_RSS BIT(0) +#define NE6X_F_PROMISC BIT(1) +#define NE6X_F_RX_IPV4_CKSUM BIT(2) +#define NE6X_F_RX_UDP_CKSUM BIT(3) +#define NE6X_F_RX_TCP_CKSUM BIT(4) +#define NE6X_F_RX_SCTP_CKSUM BIT(5) +#define NE6X_F_RX_VLAN_STRIP BIT(6) +#define NE6X_F_RX_QINQ_STRIP BIT(7) +#define NE6X_F_RX_VLAN_FILTER BIT(8) +#define NE6X_F_LRO BIT(9) +#define NE6X_F_RX_DISABLE BIT(10) +#define NE6X_F_RX_FW_LLDP BIT(11) +#define NE6X_F_RX_ALLMULTI BIT(12) +#define NE6X_F_FLOW_STEERING BIT(15) +#define NE6X_F_TX_VLAN BIT(16) +#define NE6X_F_TX_IP_CKSUM BIT(17) +#define NE6X_F_TX_TCP_CKSUM BIT(18) +#define NE6X_F_TX_UDP_CKSUM BIT(19) +#define NE6X_F_TX_SCTP_CKSUM BIT(20) +#define NE6X_F_TX_TCP_SEG BIT(21) +#define NE6X_F_TX_UDP_SEG BIT(22) +#define NE6X_F_TX_QINQ BIT(23) +#define NE6X_F_TX_NIC_SWITCH BIT(24) +#define NE6X_F_TX_MAC_LEARN BIT(25) +#define NE6X_F_TX_DISABLE BIT(26) +#define NE6X_F_TX_QOSBANDWIDTH BIT(27) +#define NE6X_F_TX_UDP_TNL_SEG BIT(28) +#define NE6X_F_TX_UDP_TNL_CSUM BIT(29) + +#define NE6X_OFFLOAD_RSS NE6X_F_RSS +#define NE6X_OFFLOAD_RXCSUM (NE6X_F_RX_IPV4_CKSUM | \ + NE6X_F_RX_UDP_CKSUM | \ + NE6X_F_RX_TCP_CKSUM | \ + NE6X_F_RX_SCTP_CKSUM) +#define NE6X_OFFLOAD_TXCSUM (NE6X_F_TX_IP_CKSUM | \ + NE6X_F_TX_TCP_CKSUM | \ + NE6X_F_TX_UDP_CKSUM | \ + NE6X_F_TX_UDP_TNL_CSUM) + +#define NE6X_OFFLOAD_LRO NE6X_F_LRO +#define NE6X_OFFLOAD_TSO NE6X_F_TX_TCP_SEG +#define NE6X_OFFLOAD_UFO NE6X_F_TX_UDP_SEG +#define NE6X_OFFLOAD_SCTP_CSUM NE6X_F_TX_SCTP_CKSUM + +#define NE6X_OFFLOAD_RXD_VLAN (NE6X_F_RX_VLAN_STRIP | \ + NE6X_F_RX_QINQ_STRIP | \ + NE6X_F_RX_VLAN_FILTER) +#define NE6X_OFFLOAD_TXD_VLAN (NE6X_F_TX_VLAN | NE6X_F_TX_QINQ) +#define NE6X_OFFLOAD_L2 NE6X_F_TX_NIC_SWITCH + +#define NE6X_F_SMART_ENABLED BIT(0) +#define NE6X_F_SRIOV_ENABLED BIT(1) +#define NE6X_F_SWITCH_ENABLED BIT(2) +#define NE6X_F_L2FDB_LEARN_ENABLED BIT(3) +#define NE6X_F_VLAN_ENABLED BIT(4) +#define NE6X_F_WHITELIST_ENABLED BIT(5) +#define NE6X_F_DDOS_ENABLED BIT(6) +#define NE6X_F_TRUST_VLAN_ENABLED BIT(7) +#define NE6X_F_S_ROCE_ICRC_ENABLED BIT(8) + +#define NE6X_F_ACK_FLOOD BIT(0) +#define NE6X_F_PUSH_ACK_FLOOD BIT(1) +#define NE6X_F_SYN_ACK_FLOOD BIT(2) +#define NE6X_F_FIN_FLOOD BIT(3) +#define NE6X_F_RST_FLOOD BIT(4) +#define NE6X_F_PUSH_SYN_ACK_FLOOD BIT(5) +#define NE6X_F_UDP_FLOOD BIT(6) +#define NE6X_F_ICMP_FLOOD BIT(7) +#define NE6X_F_FRAGMENT_FLOOD BIT(8) + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/comm/mailbox.h b/drivers/net/ethernet/bzwx/nce/comm/mailbox.h new file mode 100644 index 000000000000..85ae76b1321f --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/comm/mailbox.h @@ -0,0 +1,147 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2024, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_COMM_MAILBOX_H +#define _NE6X_COMM_MAILBOX_H + +enum virtchnl_ops { + VIRTCHNL_OP_UNKNOWN = 0, + VIRTCHNL_OP_VERSION = 1, + VIRTCHNL_OP_RESET_VF = 2, + VIRTCHNL_OP_GET_VF_RESOURCES = 3, + VIRTCHNL_OP_CONFIG_TX_QUEUE = 4, + VIRTCHNL_OP_CONFIG_RX_QUEUE = 5, + VIRTCHNL_OP_CONFIG_ADPT_QUEUES = 6, + VIRTCHNL_OP_CONFIG_IRQ_MAP = 7, + VIRTCHNL_OP_ENABLE_QUEUES = 8, + VIRTCHNL_OP_DISABLE_QUEUES = 9, + VIRTCHNL_OP_ADD_ETH_ADDR = 10, + VIRTCHNL_OP_DEL_ETH_ADDR = 11, + VIRTCHNL_OP_ADD_VLAN = 12, + VIRTCHNL_OP_DEL_VLAN = 13, + /* promiscuous mode / unicast promisc / multicast promisc */ + VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, + VIRTCHNL_OP_EVENT = 17, /* link state */ + VIRTCHNL_OP_SET_VF_ADDR = 18, + VIRTCHNL_OP_VF_CONFIG = 19, + VIRTCHNL_OP_CONFIG_OFFLOAD = 27, + VIRTCHNL_OP_GET_VF_FEATURE = 28, + VIRTCHNL_OP_REQUEST_QUEUES = 29, + VIRTCHNL_OP_CONFIG_RSS = 30, + VIRTCHNL_OP_GET_PORT_STATUS = 31, + VIRTCHNL_OP_CHANGED_RSS = 32, + VIRTCHNL_OP_SET_VF_STATE = 33, + VIRTCHNL_OP_SET_FAST_MDOE = 34, + VIRTCHNL_OP_CONFIG_VLAN = 40, + VIRTCHNL_OP_CONFIG_VLAN_OFFLOAD = 41, + VIRTCHNL_OP_CONFIG_MTU = 42, + VIRTCHNL_OP_CONFIG_FLOW_CTRL = 43, + + VIRTCHNL_OP_MAX, +}; + +static char local_error_buffer[64]; +static inline const char *ne6x_opcode_str(enum virtchnl_ops opcode) +{ + sprintf(local_error_buffer, "__OPCODE_UNKNOWN_OPCODE(%d)", opcode); + switch (opcode) { + case VIRTCHNL_OP_VERSION: + return "__OPCODE_GET_VERSION"; + case VIRTCHNL_OP_RESET_VF: + return "__OPCODE_RESET_VF"; + case VIRTCHNL_OP_GET_VF_RESOURCES: + return "__OPCODE_GET_VF_RESOURCES"; + case VIRTCHNL_OP_CONFIG_TX_QUEUE: + return "__OPCODE_CONFIG_TX_QUEUE"; + case VIRTCHNL_OP_CONFIG_RX_QUEUE: + return "__OPCODE_INIT_EXTENDED_CAPS"; + case VIRTCHNL_OP_CONFIG_ADPT_QUEUES: + return "__OPCODE_CONFIG_ADPT_QUEUES"; + case VIRTCHNL_OP_CONFIG_IRQ_MAP: + return "__OPCODE_CONFIG_IRQ_MAP"; + case VIRTCHNL_OP_ENABLE_QUEUES: + return "__OPCODE_ENABLE_QUEUES"; + case VIRTCHNL_OP_DISABLE_QUEUES: + return "__OPCODE_DISABLE_QUEUES"; + case VIRTCHNL_OP_ADD_ETH_ADDR: + return "__OPCODE_ADD_ETH_ADDR"; + case VIRTCHNL_OP_DEL_ETH_ADDR: + return "__OPCODE_DEL_ETH_ADDR"; + case VIRTCHNL_OP_ADD_VLAN: + return "__OPCODE_ADD_VLAN"; + case VIRTCHNL_OP_DEL_VLAN: + return "__OPCODE_DEL_VLAN"; + case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + return "__OPCODE_CONFIG_PROMISCUOUS_MODE"; + case VIRTCHNL_OP_EVENT: + return "__OPCODE_EVENT"; + case VIRTCHNL_OP_CONFIG_RSS: + return "__OPCODE_CONFIG_RSS"; + case VIRTCHNL_OP_CHANGED_RSS: + return "__OP_CHANGED_RSS"; + case VIRTCHNL_OP_CONFIG_OFFLOAD: + return "__OPCODE_CONFIGURE_OFFLOAD"; + case VIRTCHNL_OP_GET_VF_FEATURE: + return "VIRTCHNL_OP_GET_VF_FEATURE"; + case VIRTCHNL_OP_REQUEST_QUEUES: + return "__OPCODE_REQUEST_QUEUES"; + case VIRTCHNL_OP_GET_PORT_STATUS: + return "__OP_GET_PORT_STATUS"; + case VIRTCHNL_OP_SET_VF_ADDR: + return "__OPCODE_SET_VF_ADDR"; + case VIRTCHNL_OP_VF_CONFIG: + return "__VIRTCHNL_OP_VF_CONFIG"; + case VIRTCHNL_OP_SET_VF_STATE: + return "__VIRTCHNL_OP_SET_VF_STATE"; + case VIRTCHNL_OP_SET_FAST_MDOE: + return "__VIRTCHNL_OP_SET_FAST_MDOE"; + case VIRTCHNL_OP_CONFIG_VLAN: + return "__VIRTCHNL_OP_CONFIG_VLAN"; + case VIRTCHNL_OP_CONFIG_VLAN_OFFLOAD: + return "__VIRTCHNL_OP_CONFIG_VLAN_OFFLOAD"; + case VIRTCHNL_OP_CONFIG_MTU: + return "__VIRTCHNL_OP_CONFIG_MTU"; + case VIRTCHNL_OP_CONFIG_FLOW_CTRL: + return "__VIRTCHNL_OP_CONFIG_FLOW_CTRL"; + default: + return local_error_buffer; + } +} + +/* Error Codes */ +enum virtchnl_status_code { + VIRTCHNL_STATUS_SUCCESS = 0, + VIRTCHNL_STATUS_ERR_PARAM = -5, + VIRTCHNL_STATUS_ERR_NO_MEMORY = -18, + VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38, + VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39, + VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40, + VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR = -53, + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED = -64, +}; + +static inline const char *ne6x_mbox_status_str(enum virtchnl_status_code opcode) +{ + switch (opcode) { + case VIRTCHNL_STATUS_SUCCESS: + return "__STATUS_SUCCESS"; + case VIRTCHNL_STATUS_ERR_PARAM: + return "__STATUS_ERR_PARAM"; + case VIRTCHNL_STATUS_ERR_NO_MEMORY: + return "__STATUS_ERR_NO_MEMORY"; + case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH: + return "__STATUS_ERR_OPCODE_MISMATCH"; + case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR: + return "__STATUS_ERR_CQP_COMPL_ERROR"; + case VIRTCHNL_STATUS_ERR_INVALID_VF_ID: + return "__STATUS_ERR_INVALID_VF_ID"; + case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR: + return "__STATUS_ERR_ADMIN_QUEUE_ERROR"; + case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED: + return "__STATUS_ERR_NOT_SUPPORTED"; + default: + return "__STATUS_UNKNOWN"; + } +} + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/comm/reg.h b/drivers/net/ethernet/bzwx/nce/comm/reg.h new file mode 100644 index 000000000000..15a745bb06f3 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/comm/reg.h @@ -0,0 +1,255 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2024, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_COMM_REG_H +#define _NE6X_COMM_REG_H + +#include + +#define NE6X_BAR2_VP_TDQ(__vp, __reg) \ + ((((__vp) & 0x7f) << 12) | (0 << 11) | (((__reg) & 0xff) << 3)) +#define NE6X_BAR2_VP_RDQ(__vp, __reg) \ + ((((__vp) & 0x7f) << 12) | (1 << 11) | (((__reg) & 0xff) << 3)) + +/* CIU */ +#define NE6X_VP_BASE_ADDR 0x0 +#define NE6X_VPINT_DYN_CTLN(_VPID, _OFFSET) \ + (((_VPID) << 12) + ((_OFFSET) << 4)) /* _i=0...64 * Reset: PFR */ +#define NE6X_PF_BASE_ADDR 0x138ULL +#define NE6X_PFINT_DYN_CTLN(_PFID, _OFFSET) \ + (((NE6X_PF_BASE_ADDR + (_PFID)) << 12) + ((_OFFSET) << 4)) + /* _i=0...7 */ /* Reset: PFR */ + +#define NE6X_VP_INT 0x00 +#define NE6X_VP_INT_SET 0x01 +#define NE6X_VP_INT_MASK 0x02 +#define NE6X_VP_CQ_INTSHIFT 16 +#define NE6X_CQ_BASE_ADDR 0x03 +#define NE6X_CQ_HD_POINTER 0x04 +#define NE6X_CQ_CFG 0x05 +#define NE6X_RQ_BASE_ADDR 0x07 +#define NE6X_RQ_CFG 0x08 +#define NE6X_RQ_TAIL_POINTER 0x09 +#define NE6X_VP_RELOAD 0x0a +#define NE6X_SQ_BASE_ADDR 0x0b +#define NE6X_SQ_CFG 0x0c +#define NE6X_SQ_TAIL_POINTER 0x0d +#define NE6X_CQ_TAIL_POINTER 0x11 +#define NE6X_RQ_BUFF_OFST 0x12 +#define NE6X_RQ_HD_POINTER 0x13 +#define NE6X_SQ_BUFF_OFST 0x14 +#define NE6X_SQ_HD_POINTER 0x15 +#define NE6X_RQ_OFST 0x16 +#define NE6X_SQ_OFST 0x17 +#define NE6X_RQ_BLOCK_CFG 0x1b +#define NE6X_SQ_METER_CFG0 0x1c +#define NE6X_SQ_METER_CFG1 0x1d +#define NE6X_SQ_METER_CFG2 0x1e +#define NE6X_SQ_METER_CFG3 0x1f +#define NE6X_INT_CFG 0x21 +#define NE6X_CIU_TIME_OUT_CFG 0x45 +#define NE6X_ALL_CQ_CFG 0x46 +#define NE6X_ALL_SQ_CFG 0x47 +#define NE6X_ALL_RQ_CFG 0x48 +#define NE6X_MERGE_CFG 0x49 +#define NE6X_BFD_RECV_CNT 0x4a +#define NE6X_ETH_RECV_CNT 0x4b + +#define NE6X_PF_CON_ADDR(_OFST) \ + (((NE6X_PF_BASE_ADDR) << 12) + ((_OFST) << 4)) +#define NE6X_PF_MAILBOX_DATA 0x40 +#define NE6X_VF_MAILBOX_DATA 0x80 +#define NE6X_PF_MAILBOX_ADDR(_VP) \ + (((NE6X_PF_BASE_ADDR) << 12) + ((NE6X_PF_MAILBOX_DATA + (_VP)) << 4)) +#define NE6X_VF_MAILBOX_ADDR(_VP) \ + (((NE6X_PF_BASE_ADDR) << 12) + ((NE6X_VF_MAILBOX_DATA + (_VP)) << 4)) +#define NE6X_PF_DB_INT_REQ 0xC0 +#define NE6X_PF_DB_INT_ACK 0xC1 +#define NE6X_PF_DB_DREQ_INT 0xC2 +#define NE6X_PF_DB_DREQ_INT_SET 0xC3 +#define NE6X_PF_DB_DREQ_INT_MASK 0xC4 +#define NE6X_PF_DB_DACK_INT 0xC5 +#define NE6X_PF_DB_DACK_INT_SET 0xC6 +#define NE6X_PF_DB_DACK_INT_MASK 0xC7 + +union ne6x_vp_int { + struct vp_int { + u64 csr_ciu_int_vp : 64; + } reg; + u64 val; +}; + +union ne6x_vp_int_mask { + struct vp_int_mask { + u64 csr_ciu_mask_vp : 64; + } reg; + u64 val; +}; + +union ne6x_cq_base_addr { + struct cq_base_addr { + u64 csr_cq_base_addr_vp : 64; + } reg; + u64 val; +}; + +union ne6x_cq_cfg { + struct cq_cfg { + u64 csr_cq_len_vp : 16; + u64 csr_cq_merge_time_vp : 16; + u64 csr_cq_merge_size_vp : 4; + u64 rsv0 : 28; + } reg; + u64 val; +}; + +union ne6x_rq_base_addr { + struct rq_base_addr { + u64 csr_rq_base_addr_vp : 64; + } reg; + u64 val; +}; + +union ne6x_rq_cfg { + struct rq_cfg { + u64 csr_rq_len_vp : 16; + u64 csr_rdq_pull_en : 1; + u64 csr_rqevt_write_back_vp : 1; + u64 csr_recv_pd_type_vp : 2; + u64 csr_recv_pd_revers_en : 1; + u64 rsv0 : 11; + u64 rsv1 : 32; + } reg; + u64 val; +}; + +union ne6x_sq_base_addr { + struct sq_base_addr { + u64 csr_sq_base_addr_vp : 64; + } reg; + u64 val; +}; + +union ne6x_sq_cfg { + struct sq_cfg { + u64 csr_sq_len_vp : 16; + u64 csr_tdq_pull_en : 1; + u64 csr_sqevt_write_back_vp : 1; + u64 csr_send_pd_revers_en : 1; + u64 rsv0 : 13; + u64 rsv1 : 32; + } reg; + u64 val; +}; + +union ne6x_rq_block_cfg { + struct rq_block_cfg { + u64 csr_rdq_mop_len : 16; + u64 csr_rdq_sop_len : 16; + u64 rsv0 : 32; + } reg; + u64 val; +}; + +union ne6x_sq_meter_cfg0 { + struct sq_meter_cfg0 { + u64 csr_meter_pkt_token_num_vp : 16; + u64 csr_meter_ipg_len_vp : 8; + u64 csr_meter_refresh_en_vp : 1; + u64 csr_meter_rate_limit_en_vp : 1; + u64 csr_meter_packet_mode_vp : 1; + u64 reserved : 37; + } reg; + u64 val; +}; + +union ne6x_sq_meter_cfg1 { + struct sq_meter_cfg1 { + u64 csr_meter_refresh_count_vp : 28; + u64 reserved : 4; + u64 csr_meter_refresh_interval_vp : 32; + } reg; + u64 val; +}; + +union ne6x_sq_meter_cfg2 { + struct sq_meter_cfg2 { + u64 csr_meter_resume_threshold_vp : 32; + u64 reserved : 32; + } reg; + u64 val; +}; + +union ne6x_sq_meter_cfg3 { + struct sq_meter_cfg3 { + u64 csr_meter_pause_threshold_vp : 32; + u64 reserved : 32; + } reg; + u64 val; +}; + +union ne6x_int_cfg { + struct int_cfg { + u64 csr_sq_hdle_half_int_cnt_vp : 16; + u64 csr_rq_hdle_half_int_cnt_vp : 16; + u64 csr_cq_hdle_half_int_cnt_vp : 16; + u64 rsv0 : 16; + } reg; + u64 val; +}; + +union ne6x_ciu_time_out_cfg { + struct ciu_time_out_cfg { + u64 csr_int_timer_out_cnt : 12; + u64 rsv0 : 52; + } reg; + u64 val; +}; + +union ne6x_all_cq_cfg { + struct all_cq_cfg { + u64 csr_allcq_merge_size : 4; + u64 rsv0 : 4; + u64 csr_allcq_wt_rr_cnt : 7; + u64 csr_allcq_wt_rr_flag : 1; + u64 rsv1 : 48; + } reg; + u64 val; +}; + +union ne6x_all_sq_cfg { + struct all_sq_cfg { + u64 csr_allsq_wb_trigger_info : 8; + u64 csr_allsq_csum_zero_negate : 1; + u64 csr_allsq_pull_merge_cfg : 5; + u64 rsv0 : 50; + } reg; + u64 val; +}; + +union ne6x_all_rq_cfg { + struct all_rq_cfg { + u64 csr_allrq_wb_trigger_info : 8; + u64 csr_allrq_pull_merge_cfg : 5; + u64 rsv0 : 51; + } reg; + u64 val; +}; + +union ne6x_merge_cfg { + struct merge_cfg { + u64 csr_merge_clk_cnt : 16; + u64 rsv0 : 48; + } reg; + u64 val; +}; + +union ne6x_eth_recv_cnt { + struct eth_recv_cnt { + u64 csr_eth_pkt_drop_cnt : 32; + u64 csr_eth_rdq_drop_cnt : 32; + } reg; + u64 val; +}; + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/comm/txrx.c b/drivers/net/ethernet/bzwx/nce/comm/txrx.c new file mode 100644 index 000000000000..d5b386250835 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/comm/txrx.c @@ -0,0 +1,1556 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "txrx.h" + +int ne6x_setup_tx_descriptors(struct ne6x_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + int bi_size; + + if (!dev) + return -ENOMEM; + + /* warn if we are about to overwrite the pointer */ + WARN_ON(tx_ring->tx_buf); + bi_size = sizeof(struct ne6x_tx_buf) * tx_ring->count; + tx_ring->tx_buf = kzalloc(bi_size, GFP_KERNEL); + if (!tx_ring->tx_buf) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(struct ne6x_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) { + dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", + tx_ring->size); + goto err; + } + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->cq_last_expect = 0; + + return 0; + +err: + kfree(tx_ring->tx_buf); + tx_ring->tx_buf = NULL; + + return -ENOMEM; +} + +int ne6x_setup_cq_descriptors(struct ne6x_ring *cq_ring) +{ + struct device *dev = cq_ring->dev; + + if (!dev) + return -ENOMEM; + + /* round up to nearest 4K */ + cq_ring->size = cq_ring->count * sizeof(struct ne6x_cq_desc); + cq_ring->size = ALIGN(cq_ring->size, 4096); + cq_ring->desc = dma_alloc_coherent(dev, cq_ring->size, &cq_ring->dma, GFP_KERNEL); + if (!cq_ring->desc) { + dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", + cq_ring->size); + goto err; + } + + cq_ring->next_to_use = 0; + cq_ring->next_to_clean = 0; + + return 0; + +err: + return -ENOMEM; +} + +int ne6x_setup_tg_descriptors(struct ne6x_ring *tg_ring) +{ + struct device *dev = tg_ring->dev; + + if (!dev) + return -ENOMEM; + + /* round up to nearest 4K */ + tg_ring->size = tg_ring->count * sizeof(struct ne6x_tx_tag); + tg_ring->size = ALIGN(tg_ring->size, 4096); + tg_ring->desc = dma_alloc_coherent(dev, tg_ring->size, &tg_ring->dma, GFP_KERNEL); + if (!tg_ring->desc) { + dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n", + tg_ring->size); + goto err; + } + + tg_ring->next_to_use = 0; + tg_ring->next_to_clean = 0; + + return 0; + +err: + return -ENOMEM; +} + +int ne6x_setup_rx_descriptors(struct ne6x_ring *rx_ring) +{ + struct device *dev = rx_ring->dev; + int err = -ENOMEM; + int bi_size; + + /* warn if we are about to overwrite the pointer */ + WARN_ON(rx_ring->rx_buf); + bi_size = sizeof(struct ne6x_rx_buf) * rx_ring->count; + rx_ring->rx_buf = kzalloc(bi_size, GFP_KERNEL); + if (!rx_ring->rx_buf) + goto err; + + u64_stats_init(&rx_ring->syncp); + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union ne6x_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); + + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + rx_ring->cq_last_expect = 0; + + return 0; + +err: + kfree(rx_ring->rx_buf); + rx_ring->rx_buf = NULL; + + return err; +} + +int ne6x_setup_tx_sgl(struct ne6x_ring *tx_ring) +{ + struct device *dev = tx_ring->dev; + + if (!dev) + return -ENOMEM; + tx_ring->sgl = kzalloc(sizeof(*tx_ring->sgl), GFP_KERNEL); + + if (!tx_ring->sgl) + goto err; + + return 0; +err: + return -ENOMEM; +} + +static inline unsigned int ne6x_txd_use_count(unsigned int size) +{ + return ((size * 85) >> 20) + 1; +} + +bool __ne6x_chk_linearize(struct sk_buff *skb); +static inline bool ne6x_chk_linearize(struct sk_buff *skb, int count) +{ + /* Both TSO and single send will work if count is less than 8 */ + if (likely(count < NE6X_MAX_BUFFER_TXD)) + return false; + + if (skb_is_gso(skb)) + return __ne6x_chk_linearize(skb); + + /* we can support up to 8 data buffers for a single send */ + return count != NE6X_MAX_BUFFER_TXD; +} + +int __ne6x_maybe_stop_tx(struct ne6x_ring *tx_ring, int size); + +static inline int ne6x_maybe_stop_tx(struct ne6x_ring *tx_ring, int size) +{ + if (likely(NE6X_DESC_UNUSED(tx_ring) >= size)) + return 0; + + return __ne6x_maybe_stop_tx(tx_ring, size); +} + +static inline bool ne6x_rx_is_programming_status(u8 status) +{ + return status & 0x20; +} + +static void ne6x_reuse_rx_page(struct ne6x_ring *rx_ring, struct ne6x_rx_buf *old_buff) +{ + u16 nta = rx_ring->next_to_alloc; + struct ne6x_rx_buf *new_buff; + + new_buff = &rx_ring->rx_buf[nta]; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + new_buff->dma = old_buff->dma; + new_buff->page = old_buff->page; + new_buff->page_offset = old_buff->page_offset; + new_buff->pagecnt_bias = old_buff->pagecnt_bias; +} + +static void ne6x_clean_programming_status(struct ne6x_ring *rx_ring, + union ne6x_rx_desc *rx_desc, + u8 status) +{ + u32 ntc = rx_ring->next_to_clean; + struct ne6x_rx_buf *rx_buffer; + + /* fetch, update, and store next to clean */ + rx_buffer = &rx_ring->rx_buf[ntc++]; + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(NE6X_RX_DESC(rx_ring, ntc)); + + /* place unused page back on the ring */ + ne6x_reuse_rx_page(rx_ring, rx_buffer); + rx_ring->rx_stats.page_reuse_count++; + + /* clear contents of buffer_info */ + rx_buffer->page = NULL; +} + +static struct ne6x_rx_buf *ne6x_get_rx_buffer(struct ne6x_ring *rx_ring, const unsigned int size) +{ + struct ne6x_rx_buf *rx_buffer; + + rx_buffer = &rx_ring->rx_buf[rx_ring->next_to_clean]; + prefetchw(rx_buffer->page); + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, rx_buffer->page_offset, size, + DMA_FROM_DEVICE); + + /* We have pulled a buffer for use, so decrement pagecnt_bias */ + rx_buffer->pagecnt_bias--; + + return rx_buffer; +} + +static void ne6x_add_rx_frag(struct ne6x_ring *rx_ring, struct ne6x_rx_buf *rx_buffer, + struct sk_buff *skb, unsigned int size) +{ +#if (PAGE_SIZE < 8192) + unsigned int truesize = ne6x_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(size); +#endif + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, rx_buffer->page_offset, + size, truesize); + + /* page is being used so we must update the page offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif +} + +static struct sk_buff *ne6x_construct_skb(struct ne6x_ring *rx_ring, + struct ne6x_rx_buf *rx_buffer, + unsigned int size) +{ + void *page_addr = page_address(rx_buffer->page) + rx_buffer->page_offset; +#if (PAGE_SIZE < 8192) + unsigned int truesize = ne6x_rx_pg_size(rx_ring) / 2; +#else + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(size); +#endif + unsigned int headlen; + struct sk_buff *skb; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch((void *)((u8 *)page_addr + L1_CACHE_BYTES)); +#endif + + /* allocate a skb to store the frags */ + skb = __napi_alloc_skb(&rx_ring->q_vector->napi, NE6X_RX_HDR_SIZE, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; + + /* Determine available headroom for copy */ + headlen = size; + if (headlen > NE6X_RX_HDR_SIZE) + headlen = eth_get_headlen(skb->dev, page_addr, NE6X_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, headlen), page_addr, ALIGN(headlen, sizeof(long))); + + /* update all of the pointers */ + size -= headlen; + if (size) { + skb_add_rx_frag(skb, 0, rx_buffer->page, rx_buffer->page_offset + headlen, size, + truesize); + + /* buffer is used by skb, update page_offset */ +#if (PAGE_SIZE < 8192) + rx_buffer->page_offset ^= truesize; +#else + rx_buffer->page_offset += truesize; +#endif + } else { + /* buffer is unused, reset bias back to rx_buffer */ + rx_buffer->pagecnt_bias++; + } + + return skb; +} + +static inline bool ne6x_page_is_reusable(struct page *page) +{ + return (page_to_nid(page) == numa_mem_id()) && !page_is_pfmemalloc(page); +} + +static bool ne6x_can_reuse_rx_page(struct ne6x_rx_buf *rx_buffer) +{ + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; + struct page *page = rx_buffer->page; + + /* Is any reuse possible? */ + if (unlikely(!ne6x_page_is_reusable(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely((page_count(page) - pagecnt_bias) > 1)) + return false; +#else +#define NE6X_LAST_OFFSET (SKB_WITH_OVERHEAD(PAGE_SIZE) - NE6X_RXBUFFER_4096) + if (rx_buffer->page_offset > NE6X_LAST_OFFSET) + return false; +#endif + + /* If we have drained the page fragment pool we need to update + * the pagecnt_bias and page count so that we fully restock the + * number of references the driver holds. + */ + if (unlikely(pagecnt_bias == 1)) { + page_ref_add(page, USHRT_MAX - 1); + rx_buffer->pagecnt_bias = USHRT_MAX; + } + + return true; +} + +static void ne6x_put_rx_buffer(struct ne6x_ring *rx_ring, struct ne6x_rx_buf *rx_buffer) +{ + if (ne6x_can_reuse_rx_page(rx_buffer)) { + /* hand second half of page back to the ring */ + ne6x_reuse_rx_page(rx_ring, rx_buffer); + rx_ring->rx_stats.page_reuse_count++; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, ne6x_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, NE6X_RX_DMA_ATTR); + __page_frag_cache_drain(rx_buffer->page, rx_buffer->pagecnt_bias); + } + + /* clear contents of buffer_info */ + rx_buffer->page = NULL; +} + +static inline bool ne6x_test_staterr(union ne6x_rx_desc *rx_desc, const u8 stat_err_bits) +{ + return !!(rx_desc->wb.u.val & stat_err_bits); +} + +static bool ne6x_is_non_eop(struct ne6x_ring *rx_ring, union ne6x_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(NE6X_RX_DESC(rx_ring, ntc)); + + /* if we are the last buffer then there is nothing else to do */ +#define NE6X_RXD_EOF BIT(NE6X_RX_DESC_STATUS_EOF_SHIFT) + if (likely(ne6x_test_staterr(rx_desc, NE6X_RXD_EOF))) + return false; + + rx_ring->rx_stats.non_eop_descs++; + rx_desc->wb.u.val = 0; + + return true; +} + +static bool ne6x_cleanup_headers(struct ne6x_ring *rx_ring, struct sk_buff *skb, + union ne6x_rx_desc *rx_desc) +{ + if (unlikely(ne6x_test_staterr(rx_desc, BIT(NE6X_RX_DESC_STATUS_ERR_SHIFT)))) { + dev_kfree_skb_any(skb); + rx_ring->rx_stats.rx_mem_error++; + return true; + } + + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +static inline void ne6x_rx_hash(struct ne6x_ring *ring, union ne6x_rx_desc *rx_desc, + struct sk_buff *skb, struct rx_hdr_info *rx_hdr) +{ + if (!(ring->netdev->features & NETIF_F_RXHASH)) + return; + + if (rx_hdr->ol_flag.flag_bits.rx_rss_hash) + skb_set_hash(skb, rx_hdr->rss_hash, PKT_HASH_TYPE_NONE); +} + +static inline void ne6x_rx_checksum(struct ne6x_ring *rx_ring, struct sk_buff *skb, + union ne6x_rx_desc *rx_desc, + struct rx_hdr_info *rx_hdr) +{ + skb->ip_summed = CHECKSUM_NONE; + skb->csum_level = 0; + skb_checksum_none_assert(skb); + + if (!(rx_ring->netdev->features & NETIF_F_RXCSUM)) + return; + + if (rx_hdr->ol_flag.flag_bits.rx_ip_cksum_bad || + rx_hdr->ol_flag.flag_bits.rx_l4_cksum_bad || + rx_hdr->ol_flag.flag_bits.rx_inner_ip_cksum_bad || + rx_hdr->ol_flag.flag_bits.rx_inner_l4_cksum_bad) { + rx_ring->rx_stats.csum_err++; + } else if (rx_hdr->ol_flag.flag_bits.rx_ip_cksum_good || + rx_hdr->ol_flag.flag_bits.rx_l4_cksum_good || + rx_hdr->ol_flag.flag_bits.rx_inner_ip_cksum_good || + rx_hdr->ol_flag.flag_bits.rx_inner_l4_cksum_good) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = 1; + } +} + +static inline void ne6x_process_skb_fields(struct ne6x_ring *rx_ring, + union ne6x_rx_desc *rx_desc, + struct sk_buff *skb, + struct rx_hdr_info *rx_hdr) +{ + netdev_features_t features = rx_ring->netdev->features; + bool non_zero_vlan = false; + + ne6x_rx_hash(rx_ring, rx_desc, skb, rx_hdr); + rx_hdr->vlan_tci = ntohs(rx_hdr->vlan_tci); + rx_hdr->vlan_tci_outer = ntohs(rx_hdr->vlan_tci_outer); + + if (features & NETIF_F_HW_VLAN_CTAG_RX) { + if (rx_hdr->ol_flag.flag_bits.rx_vlan_striped) { + non_zero_vlan = !!(rx_hdr->vlan_tci_outer & VLAN_VID_MASK); + if (non_zero_vlan) { + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + (rx_hdr->vlan_tci_outer)); + } + } + } else if (features & NETIF_F_HW_VLAN_STAG_RX) { + if (rx_hdr->ol_flag.flag_bits.rx_qinq_striped) { + non_zero_vlan = !!(rx_hdr->vlan_tci_outer & VLAN_VID_MASK); + if (non_zero_vlan) { + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), + (rx_hdr->vlan_tci_outer)); + } + } + } + + ne6x_rx_checksum(rx_ring, skb, rx_desc, rx_hdr); + skb_record_rx_queue(skb, rx_ring->queue_index); + + /* modifies the skb - consumes the enet header */ + skb->protocol = eth_type_trans(skb, rx_ring->netdev); +} + +static void ne6x_receive_skb(struct ne6x_ring *rx_ring, struct sk_buff *skb) +{ + struct ne6x_q_vector *q_vector = rx_ring->q_vector; + + napi_gro_receive(&q_vector->napi, skb); +} + +static bool ne6x_alloc_mapped_page(struct ne6x_ring *rx_ring, struct ne6x_rx_buf *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) { + rx_ring->rx_stats.page_reuse_count++; + return true; + } + + /* alloc new page for storage */ + page = dev_alloc_pages(ne6x_rx_pg_order(rx_ring)); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_page_failed++; + return false; + } + + /* map page for use */ + dma = dma_map_page_attrs(rx_ring->dev, page, 0, ne6x_rx_pg_size(rx_ring), DMA_FROM_DEVICE, + NE6X_RX_DMA_ATTR); + + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, ne6x_rx_pg_order(rx_ring)); + rx_ring->rx_stats.alloc_page_failed++; + return false; + } + + bi->dma = dma; + bi->page = page; + bi->page_offset = 0; + + page_ref_add(page, USHRT_MAX - 1); + bi->pagecnt_bias = USHRT_MAX; + + return true; +} + +void ne6x_tail_update(struct ne6x_ring *ring, int val) +{ + int i; + + for (i = 0; i < NE6X_TAIL_REG_NUM; i++) + writeq(val, ring->tail + i); +} + +static inline void ne6x_release_rx_desc(struct ne6x_ring *rx_ring, u32 val) +{ + rx_ring->next_to_use = val; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = val; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + ne6x_tail_update(rx_ring, val); +} + +bool ne6x_alloc_rx_buffers(struct ne6x_ring *rx_ring, u16 cleaned_count) +{ + u16 ntu = rx_ring->next_to_use; + union ne6x_rx_desc *rx_desc; + struct ne6x_rx_buf *bi; + + /* do nothing if no valid netdev defined */ + if (!rx_ring->netdev || !cleaned_count) + return false; + + rx_desc = NE6X_RX_DESC(rx_ring, ntu); + bi = &rx_ring->rx_buf[ntu]; + + do { + if (!ne6x_alloc_mapped_page(rx_ring, bi)) + goto no_buffers; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, bi->page_offset, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); + + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->wb.u.val = 0; + rx_desc->w.buffer_mop_addr = cpu_to_le64(bi->dma + bi->page_offset); + rx_desc->w.buffer_sop_addr = 0; + rx_desc->w.mop_mem_len = rx_ring->rx_buf_len; + rx_desc->wb.pkt_len = 0; + rx_desc->w.vp = rx_ring->reg_idx; + + rx_desc++; + bi++; + ntu++; + if (unlikely(ntu == rx_ring->count)) { + rx_desc = NE6X_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buf; + ntu = 0; + } + + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.u.val = 0; + + cleaned_count--; + } while (cleaned_count); + + if (rx_ring->next_to_use != ntu) + ne6x_release_rx_desc(rx_ring, ntu); + + return false; + +no_buffers: + if (rx_ring->next_to_use != ntu) + ne6x_release_rx_desc(rx_ring, ntu); + + /* make sure to come back via polling to try again after + * allocation failure + */ + return true; +} + +static void ne6x_get_rx_head_info(struct sk_buff *skb, struct rx_hdr_info *rx_hdr) +{ + skb_frag_t *frag; + void *page_addr; + u32 temp_len, i; + + if (skb->data_len == 0) { + memcpy(rx_hdr, &skb->data[skb->len - 16], sizeof(struct rx_hdr_info)); + } else { + if (skb_shinfo(skb)->nr_frags > 1) { + i = skb_shinfo(skb)->nr_frags - 1; + frag = &skb_shinfo(skb)->frags[i]; + if (skb_frag_size(frag) >= 16) { + page_addr = skb_frag_address(frag) + skb_frag_size(frag) - 16; + memcpy(rx_hdr, page_addr, sizeof(struct rx_hdr_info)); + } else if (skb_frag_size(frag) > 4) { + page_addr = skb_frag_address(frag); + temp_len = skb_frag_size(frag); + memcpy((char *)rx_hdr + 16 - temp_len, page_addr, temp_len - 4); + frag = &skb_shinfo(skb)->frags[i - 1]; + page_addr = skb_frag_address(frag) + skb_frag_size(frag) - 16 + + temp_len; + memcpy(rx_hdr, page_addr, 16 - temp_len); + } else { + page_addr = skb_frag_address(frag); + temp_len = skb_frag_size(frag); + frag = &skb_shinfo(skb)->frags[i - 1]; + page_addr = skb_frag_address(frag) + skb_frag_size(frag) - 16 + + temp_len; + memcpy(rx_hdr, page_addr, sizeof(struct rx_hdr_info)); + } + } else { + frag = &skb_shinfo(skb)->frags[0]; + if (skb_frag_size(frag) >= 16) { + page_addr = skb_frag_address(frag) + skb_frag_size(frag) - 16; + memcpy(rx_hdr, page_addr, sizeof(struct rx_hdr_info)); + } else if (skb_frag_size(frag) > 4) { + page_addr = skb_frag_address(frag); + temp_len = skb_frag_size(frag); + memcpy((char *)rx_hdr + 16 - temp_len, page_addr, temp_len - 4); + page_addr = &skb->data[skb->len - skb->data_len - 16 + temp_len]; + memcpy(rx_hdr, page_addr, 16 - temp_len); + } else { + page_addr = skb_frag_address(frag); + temp_len = skb_frag_size(frag); + page_addr = &skb->data[skb->len - skb->data_len - 16 + temp_len]; + memcpy(rx_hdr, page_addr, sizeof(struct rx_hdr_info)); + } + } + } +} + +static void ne6x_clean_tx_desc(struct ne6x_tx_desc *tx_desc, struct ne6x_ring *ring) +{ + if (tx_desc->u.flags.tx_drop_addr) + ring->tx_stats.tx_drop_addr++; + + if (tx_desc->u.flags.tx_ecc_err) + ring->tx_stats.tx_ecc_err++; + + if (tx_desc->u.flags.tx_pcie_read_err) { + ring->tx_stats.tx_pcie_read_err++; + dev_info(ring->dev, "**** tx_desc: flag[0x%x], vp[%d], et[%d], ch[%d], tt[%d], sopv[%d], eopv[%d], tso[%d], l3chk[%d], l3oft[%d], l4chk[%d], l4oft[%d], pld[%d], mop[%d], sop[%d], mss[%d],mopa[%lld],sopa[%lld]\n", + tx_desc->u.val, tx_desc->vp, tx_desc->event_trigger, tx_desc->chain, + tx_desc->transmit_type, tx_desc->sop_valid, tx_desc->eop_valid, + tx_desc->tso, tx_desc->l3_csum, tx_desc->l3_ofst, tx_desc->l4_csum, + tx_desc->l4_ofst, tx_desc->pld_ofst, tx_desc->mop_cnt, tx_desc->sop_cnt, + tx_desc->mss, tx_desc->buffer_mop_addr, tx_desc->buffer_sop_addr); + } + + tx_desc->u.val = 0; + tx_desc->vp = 0; + tx_desc->event_trigger = 0; + tx_desc->chain = 0; + tx_desc->transmit_type = 0; + tx_desc->sop_valid = 0; + tx_desc->eop_valid = 0; + tx_desc->tso = 0; + tx_desc->l3_csum = 0; + tx_desc->l3_ofst = 0; + tx_desc->l4_csum = 0; + tx_desc->l4_ofst = 0; + tx_desc->pld_ofst = 0; + tx_desc->mop_cnt = 0; + tx_desc->sop_cnt = 0; + tx_desc->mss = 0; + tx_desc->buffer_mop_addr = 0; + tx_desc->buffer_sop_addr = 0; +} + +int ne6x_clean_cq_irq(struct ne6x_q_vector *q_vector, struct ne6x_ring *cq_ring, int napi_budget) +{ + struct ne6x_cq_desc *cq_desc = NULL; + struct ne6x_tx_desc *tx_desc = NULL; + struct ne6x_ring *clean_ring = NULL; + union ne6x_rx_desc *rx_desc = NULL; + int i, cq_num, off_idx, ntc; + int budget = napi_budget; + int last_expect = 0; + int total = 0; + + do { + cq_desc = NE6X_CQ_DESC(cq_ring, cq_ring->next_to_use); + cq_num = cq_desc->num; + if (!cq_num) + break; + + dma_rmb(); + cq_ring->stats.packets += cq_num; + + if (cq_desc->ctype) { + clean_ring = q_vector->rx.ring; + last_expect = clean_ring->cq_last_expect; + for (i = 0; i < cq_num; i++) { + off_idx = cq_desc->payload.rx_cq[i].cq_rx_offset; + if (unlikely(off_idx != last_expect)) { + netdev_err(cq_ring->netdev, "ne6xpf: cqrx err, need debug! cq: %d, rx: %d\n", + off_idx, last_expect); + netdev_err(cq_ring->netdev, "ne6xpf: queue: %d, vp: %d, rxq: %d\n", + cq_ring->queue_index, cq_ring->reg_idx, + clean_ring->queue_index); + } + + rx_desc = NE6X_RX_DESC(clean_ring, off_idx); + rx_desc->wb.u.val = cq_desc->payload.rx_cq[i].cq_rx_stats; + rx_desc->wb.pkt_len = cq_desc->payload.rx_cq[i].cq_rx_len; + if (rx_desc->wb.pkt_len > clean_ring->rx_buf_len) { + if (!rx_desc->wb.u.flags.rx_eop) + rx_desc->wb.pkt_len = clean_ring->rx_buf_len; + else + rx_desc->wb.pkt_len = rx_desc->wb.pkt_len % + clean_ring->rx_buf_len ? + rx_desc->wb.pkt_len % + clean_ring->rx_buf_len : + clean_ring->rx_buf_len; + } + + last_expect++; + last_expect = (last_expect < clean_ring->count) ? last_expect : 0; + } + + cq_ring->cq_stats.rx_num += cq_num; + } else { + clean_ring = q_vector->tx.ring; + last_expect = clean_ring->cq_last_expect; + for (i = 0; i < cq_num; i++) { + off_idx = cq_desc->payload.tx_cq[i].cq_tx_offset; + if (unlikely(off_idx != last_expect)) { + netdev_info(cq_ring->netdev, "ne6xpf: cqtx err, need debug! cq: %d, tx: %d\n", + off_idx, last_expect); + netdev_info(cq_ring->netdev, "ne6xpf: queue: %d, vp: %d, txq: %d\n", + cq_ring->queue_index, cq_ring->reg_idx, + clean_ring->queue_index); + } + + tx_desc = NE6X_TX_DESC(clean_ring, off_idx); + tx_desc->u.val = cq_desc->payload.tx_cq[i].cq_tx_stats; + last_expect++; + last_expect = (last_expect < clean_ring->count) ? last_expect : 0; + } + + cq_ring->cq_stats.tx_num += cq_num; + } + + clean_ring->cq_last_expect = last_expect; + cq_ring->cq_stats.cq_num++; + + /* clean cq desc */ + cq_desc->num = 0; + ntc = cq_ring->next_to_use + 1; + ntc = (ntc < cq_ring->count) ? ntc : 0; + cq_ring->next_to_use = ntc; + prefetch(NE6X_CQ_DESC(cq_ring, ntc)); + + budget--; + total++; + } while (likely(budget)); + + if (NE6X_DESC_UNUSED(cq_ring) < 1024) { + cq_ring->next_to_clean = cq_ring->next_to_use; + /* memory barrier updating cq ring tail */ + wmb(); + writeq(cq_ring->next_to_clean, cq_ring->tail); + } + + return total; +} + +int ne6x_clean_rx_irq(struct ne6x_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = NE6X_DESC_UNUSED(rx_ring); + struct ne6x_rx_buf *rx_buffer = NULL; + struct sk_buff *skb = rx_ring->skb; + union ne6x_rx_desc *rx_desc = NULL; + struct rx_hdr_info rx_hdr; + bool failure = false; + unsigned int size; + u8 rx_status; + + while (likely(total_rx_packets < (unsigned int)budget)) { + if (cleaned_count >= NE6X_RX_BUFFER_WRITE) { + failure = failure || ne6x_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + rx_desc = NE6X_RX_DESC(rx_ring, rx_ring->next_to_clean); + + rx_status = rx_desc->wb.u.val; + if (!rx_status) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we have + * verified the descriptor has been written back. + */ + dma_rmb(); + + if (unlikely(ne6x_rx_is_programming_status(rx_status))) { + rx_ring->rx_stats.rx_err++; + ne6x_clean_programming_status(rx_ring, rx_desc, rx_status); + cleaned_count++; + continue; + } + + size = rx_desc->wb.pkt_len; + rx_buffer = ne6x_get_rx_buffer(rx_ring, size); + + /* retrieve a buffer from the ring */ + if (skb) + ne6x_add_rx_frag(rx_ring, rx_buffer, skb, size); + else + skb = ne6x_construct_skb(rx_ring, rx_buffer, size); + + /* exit if we failed to retrieve a buffer */ + if (!skb) { + rx_ring->rx_stats.alloc_buf_failed++; + rx_buffer->pagecnt_bias++; + break; + } + + ne6x_put_rx_buffer(rx_ring, rx_buffer); + cleaned_count++; + + if (ne6x_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + if (ne6x_cleanup_headers(rx_ring, skb, rx_desc)) { + skb = NULL; + continue; + } + + ne6x_get_rx_head_info(skb, &rx_hdr); + pskb_trim(skb, skb->len - 16); + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + + /* populate checksum, VLAN, and protocol */ + ne6x_process_skb_fields(rx_ring, rx_desc, skb, &rx_hdr); + + ne6x_receive_skb(rx_ring, skb); + skb = NULL; + + rx_desc->wb.u.val = 0; + + /* update budget accounting */ + total_rx_packets++; + } + + rx_ring->skb = skb; + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + + /* guarantee a trip back through this routine if there was a failure */ + return failure ? budget : (int)total_rx_packets; +} + +int ne6x_clean_tx_irq(struct ne6x_adapt_comm *comm, struct ne6x_ring *tx_ring, int napi_budget) +{ + unsigned int total_bytes = 0, total_packets = 0; + struct ne6x_tx_desc *eop_desc = NULL; + u16 i = tx_ring->next_to_clean; + struct ne6x_tx_desc *tx_desc; + struct ne6x_tx_buf *tx_buf; + unsigned int budget = 256; + + tx_buf = &tx_ring->tx_buf[i]; + tx_desc = NE6X_TX_DESC(tx_ring, i); + + if (unlikely(tx_buf->jumbo_frame)) { + tx_buf->napi_budget += napi_budget; + if (!tx_buf->jumbo_finsh) + return !!budget; + + napi_budget = tx_buf->napi_budget; + } + + do { + eop_desc = tx_buf->next_to_watch; + if (!eop_desc) + break; + + prefetchw(&tx_buf->skb->users); + + if (!eop_desc->u.val) + break; + + dma_rmb(); + + /* clear next_to_watch to prevent false hangs */ + tx_buf->next_to_watch = NULL; + tx_buf->jumbo_frame = 0; + tx_buf->jumbo_finsh = 0; + + /* update the statistics for this packet */ + total_bytes += tx_buf->bytecount; + total_packets += tx_buf->gso_segs; + + /* free the skb/XDP data */ + ne6x_clean_tx_desc(tx_desc, tx_ring); + + /* free the skb */ + napi_consume_skb(tx_buf->skb, napi_budget); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); + + /* clear tx_buffer data */ + tx_buf->skb = NULL; + dma_unmap_len_set(tx_buf, len, 0); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buf++; + tx_desc++; + i++; + if (i == tx_ring->count) { + i = 0; + tx_buf = tx_ring->tx_buf; + tx_desc = NE6X_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buf, len)) { + dma_unmap_page(tx_ring->dev, dma_unmap_addr(tx_buf, dma), + dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); + dma_unmap_len_set(tx_buf, len, 0); + } + + /* free the skb/XDP data */ + ne6x_clean_tx_desc(tx_desc, tx_ring); + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buf++; + tx_desc++; + i++; + if (i == tx_ring->count) { + i = 0; + tx_buf = tx_ring->tx_buf; + tx_desc = NE6X_TX_DESC(tx_ring, 0); + } + + if (unlikely(tx_buf->jumbo_frame && !tx_buf->jumbo_finsh)) + break; + + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); + + if (total_packets) { + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + + /* notify netdev of completed buffers */ + netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, total_bytes); + +#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) + if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && + (NE6X_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && + !test_bit(NE6X_ADPT_DOWN, comm->state)) { + netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_q; + } + } + } + + return !!budget; +} + +static inline int ne6x_xmit_descriptor_count(struct sk_buff *skb) +{ + int count = 0; + + count = 1; + count += skb_shinfo(skb)->nr_frags; + + return count; +} + +int __ne6x_maybe_stop_tx(struct ne6x_ring *tx_ring, int size) +{ + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + /* Memory barrier before checking head and tail */ + smp_mb(); + + /* Check again in a case another CPU has just made room available. */ + if (likely(NE6X_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + + return 0; +} + +static inline u16 ne6x_gso_get_seg_hdrlen(struct sk_buff *skb) +{ + u16 gso_hdr_len; + + gso_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (unlikely(skb->encapsulation)) + gso_hdr_len = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); + + return gso_hdr_len; +} + +static int ne6x_tso(struct ne6x_ring *tx_ring, struct ne6x_tx_buf *first, + struct ne6x_tx_tag *ptx_tag) +{ + struct sk_buff *skb = first->skb; + u8 hdrlen = 0; + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL || !skb_is_gso(skb)) + return 0; + + hdrlen = ne6x_gso_get_seg_hdrlen(skb); + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + /* update gso_segs and bytecount */ + first->gso_segs = skb_shinfo(skb)->gso_segs; + first->bytecount += (first->gso_segs - 1) * hdrlen; + + ptx_tag->tag_mss = skb_shinfo(skb)->gso_size; + + return 1; +} + +static void ne6x_tx_prepare_vlan_flags(struct ne6x_ring *tx_ring, + struct ne6x_tx_buf *first, + struct ne6x_tx_tag *ptx_tag) +{ + struct sk_buff *skb = first->skb; + + /* nothing left to do, software offloaded VLAN */ + if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol)) + return; + + /* the VLAN ethertype/tpid is determined by adapter configuration and netdev + * feature flags, which the driver only allows either 802.1Q or 802.1ad + * VLAN offloads exclusively so we only care about the VLAN ID here + */ + if (skb_vlan_tag_present(skb)) { + if (tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX) + ptx_tag->tag_vlan2 = cpu_to_be16(skb_vlan_tag_get(skb)); + else if (tx_ring->netdev->features & NETIF_F_HW_VLAN_STAG_TX) + ptx_tag->tag_vlan1 = cpu_to_be16(skb_vlan_tag_get(skb)); + } +} + +static int ne6x_tx_csum(struct ne6x_ring *tx_ring, struct ne6x_tx_buf *first, + struct ne6x_tx_tag *ptx_tag) +{ + tx_ring->tx_stats.csum_good++; + return 1; +} + +static inline void ne6x_tx_desc_push(struct ne6x_tx_desc *tx_desc, + dma_addr_t dma, u32 size) +{ + tx_desc->buffer_mop_addr = cpu_to_le64(dma); + tx_desc->mop_cnt = size; + tx_desc->event_trigger = 1; +} + +void ne6x_unmap_and_free_tx_resource(struct ne6x_ring *ring, + struct ne6x_tx_buf *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } + + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); +} + +static inline void ne6x_fill_gso_sg(void *p, u16 offset, u16 len, struct ne6x_sg_info *sg) +{ + sg->p = p; + sg->offset = offset; + sg->len = len; +} + +int ne6x_fill_jumbo_sgl(struct ne6x_ring *tx_ring, struct sk_buff *skb) +{ + u16 sg_max_dlen = 0, dlen = 0, len = 0, offset = 0, send_dlen = 0, total_dlen = 0; + u16 subframe = 0, send_subframe = 0, sg_avail = 0, i = 0, j = 0; + u16 gso_hdr_len = ne6x_gso_get_seg_hdrlen(skb); + struct ne6x_sg_list *sgl = tx_ring->sgl; + + WARN_ON(!sgl); + + memset(sgl, 0, sizeof(struct ne6x_sg_list)); + dlen = skb_headlen(skb) - gso_hdr_len; + sgl->mss = skb_shinfo(skb)->gso_size; + sg_max_dlen = NE6X_MAX_DATA_PER_TXD - gso_hdr_len; + sg_max_dlen = ((u16)(sg_max_dlen / sgl->mss)) * sgl->mss; + total_dlen = skb->data_len + dlen; + sgl->sgl_mss_cnt = sg_max_dlen / sgl->mss; + subframe = total_dlen / sg_max_dlen; + subframe += total_dlen % sg_max_dlen ? 1 : 0; + ne6x_fill_gso_sg(skb->data, 0, gso_hdr_len, &sgl->sg[i]); + sgl->sg[i].flag |= NE6X_SG_FST_SG_FLAG | NE6X_SG_SOP_FLAG | NE6X_SG_JUMBO_FLAG; + offset = gso_hdr_len; + sg_avail = sg_max_dlen; + ++send_subframe; + i++; + while (dlen) { + len = dlen > sg_avail ? sg_avail : dlen; + ne6x_fill_gso_sg(skb->data, offset, len, &sgl->sg[i]); + offset += len; + dlen -= len; + send_dlen += len; + sg_avail -= len; + if (send_dlen == total_dlen) + goto end; + + if (!(send_dlen % sg_max_dlen)) { + sgl->sg[i].flag |= NE6X_SG_EOP_FLAG; + ++i; + if (unlikely(i > NE6X_MAX_DESC_NUM_PER_SKB)) + goto err; + + ne6x_fill_gso_sg(skb->data, 0, gso_hdr_len, &sgl->sg[i]); + + sgl->sg[i].flag |= NE6X_SG_SOP_FLAG | NE6X_SG_JUMBO_FLAG; + sgl->sg[i].base_mss_no = send_subframe * sgl->sgl_mss_cnt; + + if (++send_subframe == subframe) + sgl->sg[i].flag |= NE6X_SG_LST_SG_FLAG; + + sgl->sg[i].base_mss_no = send_subframe * sgl->sgl_mss_cnt; + + sg_avail = sg_max_dlen; + } + ++i; + if (unlikely(i > NE6X_MAX_DESC_NUM_PER_SKB)) + goto err; + } + + for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) { + skb_frag_t *f = &skb_shinfo(skb)->frags[j]; + + dlen = skb_frag_size(f); + offset = 0; + while (dlen) { + len = dlen > sg_avail ? sg_avail : dlen; + ne6x_fill_gso_sg(f, offset, len, &sgl->sg[i]); + sgl->sg[i].flag |= NE6X_SG_FRAG_FLAG; + + offset += len; + dlen -= len; + send_dlen += len; + sg_avail -= len; + if (send_dlen == total_dlen) + goto end; + if (!(send_dlen % sg_max_dlen)) { + sgl->sg[i].flag |= NE6X_SG_EOP_FLAG; + ++i; + if (unlikely(i > NE6X_MAX_DESC_NUM_PER_SKB)) + goto err; + ne6x_fill_gso_sg(skb->data, 0, gso_hdr_len, &sgl->sg[i]); + sgl->sg[i].flag |= NE6X_SG_SOP_FLAG | NE6X_SG_JUMBO_FLAG; + sgl->sg[i].base_mss_no = send_subframe * sgl->sgl_mss_cnt; + + if (++send_subframe == subframe) + sgl->sg[i].flag |= NE6X_SG_LST_SG_FLAG; + sg_avail = sg_max_dlen; + } + ++i; + if (unlikely(i > NE6X_MAX_DESC_NUM_PER_SKB)) + goto err; + } + offset = 0; + } +end: + sgl->sg[i].flag |= NE6X_SG_EOP_FLAG; + sgl->sg_num = ++i; + return 0; +err: + return -1; +} + +void ne6x_fill_tx_desc(struct ne6x_tx_desc *tx_desc, u8 vp, dma_addr_t tag_dma, + dma_addr_t dma, struct ne6x_sg_info *sg) +{ + memset(tx_desc, 0, NE6X_TX_DESC_SIZE); + tx_desc->buffer_mop_addr = cpu_to_le64(dma); + tx_desc->buffer_sop_addr = (sg->flag & NE6X_SG_SOP_FLAG) ? cpu_to_le64(tag_dma) : 0; + tx_desc->mop_cnt = sg->len; + tx_desc->event_trigger = 1; + tx_desc->vp = vp; + tx_desc->sop_valid = (sg->flag & NE6X_SG_SOP_FLAG) ? 1u : 0u; + tx_desc->eop_valid = (sg->flag & NE6X_SG_EOP_FLAG) ? 1u : 0u; + tx_desc->sop_cnt = (sg->flag & NE6X_SG_SOP_FLAG) ? 32 : 0; + if (tx_desc->eop_valid) { + tx_desc->sop_cnt = tx_desc->mop_cnt; + tx_desc->buffer_sop_addr = tx_desc->buffer_mop_addr; + tx_desc->mop_cnt = 4; + } +} + +void ne6x_fill_tx_priv_tag(struct ne6x_ring *tx_ring, struct ne6x_tx_tag *tx_tag, + int mss, struct ne6x_sg_info *sg) +{ + struct ne6x_adapt_comm *comm = (struct ne6x_adapt_comm *)tx_ring->adpt; + + tx_tag->tag_pi1 = (comm->port_info & 0x2) ? 1 : 0; + tx_tag->tag_pi0 = (comm->port_info & 0x1) ? 1 : 0; + tx_tag->tag_vport = (comm->port_info >> 8) & 0xFF; + tx_tag->tag_mss = cpu_to_be16(mss); + tx_tag->tag_num = sg->base_mss_no | (sg->flag & NE6X_SG_JUMBO_FLAG) | + (sg->flag & NE6X_SG_LST_SG_FLAG) | + (sg->flag & NE6X_SG_FST_SG_FLAG); + tx_tag->tag_num = cpu_to_be16(tx_tag->tag_num); +} + +void ne6x_xmit_jumbo(struct ne6x_ring *tx_ring, struct ne6x_tx_buf *first, + struct ne6x_ring *tag_ring, struct ne6x_tx_tag *tx_tag) +{ + int j = 0; + struct ne6x_sg_list *sgl = tx_ring->sgl; + struct ne6x_sg_info *sg; + dma_addr_t dma, tag_dma; + struct sk_buff *skb = first->skb; + struct ne6x_tx_buf *tx_bi; + struct ne6x_tx_tag *tag_desc = tx_tag; + u32 i = tx_ring->next_to_use; + struct ne6x_tx_desc *tx_desc = NE6X_TX_DESC(tx_ring, i); + + for (; j < sgl->sg_num; j++) { + sg = &sgl->sg[j]; + if (likely(sg->flag & NE6X_SG_FRAG_FLAG)) { + dma = skb_frag_dma_map(tx_ring->dev, sg->p, sg->offset, sg->len, + DMA_TO_DEVICE); + } else { + dma = dma_map_single(tx_ring->dev, sg->p + sg->offset, sg->len, + DMA_TO_DEVICE); + } + + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + tx_bi = &tx_ring->tx_buf[i]; + + dma_unmap_len_set(tx_bi, len, sg->len); + + dma_unmap_addr_set(tx_bi, dma, dma); + + if (sg->flag & NE6X_SG_SOP_FLAG) { + tag_dma = tag_ring->dma + tag_ring->next_to_use * NE6X_TX_PRIV_TAG_SIZE; + tag_desc = NE6X_TX_TAG(tag_ring, tag_ring->next_to_use); + ne6x_fill_tx_priv_tag(tx_ring, tag_desc, sgl->mss, sg); + if (++tag_ring->next_to_use == tag_ring->count) + tag_ring->next_to_use = 0; + } else { + tag_dma = 0; + } + + tx_desc = NE6X_TX_DESC(tx_ring, i); + ne6x_fill_tx_desc(tx_desc, tx_ring->reg_idx, tag_dma, dma, sg); + if (++i == tx_ring->count) + i = 0; + } + tx_ring->next_to_use = i; + ne6x_maybe_stop_tx(tx_ring, DESC_NEEDED); + + skb_tx_timestamp(skb); + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. + * + * We also use this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + /* notify HW of packet */ + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) + ne6x_tail_update(tx_ring, i); + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + first->jumbo_finsh = 1u; + + return; + +dma_error: + dev_info(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_bi map */ + for (;;) { + tx_bi = &tx_ring->tx_buf[i]; + ne6x_unmap_and_free_tx_resource(tx_ring, tx_bi); + if (tx_bi == first) + break; + + if (i == 0) + i = tx_ring->count; + + i--; + } + + tx_ring->next_to_use = i; +} + +void ne6x_xmit_simple(struct ne6x_ring *tx_ring, struct ne6x_tx_buf *first, + struct ne6x_ring *tag_ring, struct ne6x_tx_tag *tx_tag) +{ + struct sk_buff *skb = first->skb; + struct ne6x_adapt_comm *comm = (struct ne6x_adapt_comm *)tx_ring->adpt; + struct ne6x_tx_desc *tx_desc, *first_desc; + unsigned int size = skb_headlen(skb); + u32 i = tx_ring->next_to_use; + struct ne6x_tx_tag *ttx_desc; + struct ne6x_tx_buf *tx_bi; + bool is_first = true; + int send_len = 0; + skb_frag_t *frag; + dma_addr_t dma; + __le64 mss = 0; + + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + first_desc = NE6X_TX_DESC(tx_ring, i); + tx_desc = NE6X_TX_DESC(tx_ring, i); + mss = tx_desc->mss; + tx_desc->sop_valid = 1; + tx_desc->eop_valid = 0; + tx_bi = first; + + ttx_desc = (struct ne6x_tx_tag *)tx_tag; + ttx_desc->tag_pi1 = (comm->port_info & 0x2) ? 1 : 0; + ttx_desc->tag_pi0 = (comm->port_info & 0x1) ? 1 : 0; + ttx_desc->tag_vport = (comm->port_info >> 8) & 0xFF; + ttx_desc->tag_mss = tx_tag->tag_mss; + ttx_desc->tag_num = 0x0; + send_len += size; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_bi, len, size); + dma_unmap_addr_set(tx_bi, dma, dma); + + ne6x_tx_desc_push(tx_desc, dma, size); + tx_desc->vp = tx_ring->reg_idx; + tx_desc->tso = 0x0; + tx_desc->l3_csum = 0x00; + tx_desc->l3_ofst = 0x00; + tx_desc->l4_csum = 0x00; + tx_desc->l4_ofst = 0x00; + tx_desc->pld_ofst = 0x00; + tx_desc->u.val = 0x0; + tx_desc->rsv4 = 0; + if (is_first) { + tx_desc->sop_valid = 1u; + is_first = false; + tx_desc->sop_cnt = 32; + tx_desc->buffer_sop_addr = cpu_to_le64(first->tag_dma); + } + + if (send_len == skb->len) { + tx_desc->eop_valid = 1u; + break; + } + + if (++i == tx_ring->count) + i = 0; + + tx_desc = NE6X_TX_DESC(tx_ring, i); + + size = skb_frag_size(frag); + send_len += size; + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, DMA_TO_DEVICE); + + tx_bi = &tx_ring->tx_buf[i]; + } + + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + + if (++i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + if (++tag_ring->next_to_use == tag_ring->count) + tag_ring->next_to_use = 0; + + ne6x_maybe_stop_tx(tx_ring, DESC_NEEDED); + + /* timestamp the skb as late as possible, just prior to notifying + * the MAC that it should transmit this packet + */ + skb_tx_timestamp(skb); + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. + * + * We also use this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + wmb(); + + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + /* notify HW of packet */ + if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) + ne6x_tail_update(tx_ring, i); + + return; + +dma_error: + dev_info(tx_ring->dev, "TX DMA map failed\n"); + + /* clear dma mappings for failed tx_bi map */ + for (;;) { + tx_bi = &tx_ring->tx_buf[i]; + ne6x_unmap_and_free_tx_resource(tx_ring, tx_bi); + if (tx_bi == first) + break; + + if (i == 0) + i = tx_ring->count; + + i--; + } + + tx_ring->next_to_use = i; +} + +netdev_tx_t ne6x_xmit_frame_ring(struct sk_buff *skb, struct ne6x_ring *tx_ring, + struct ne6x_ring *tag_ring, bool jumbo_frame) +{ + struct ne6x_tx_tag *tx_tagx = NE6X_TX_TAG(tag_ring, tag_ring->next_to_use); + struct ne6x_tx_buf *first; + int tso, count; + + /* prefetch the data, we'll need it later */ + prefetch(tx_tagx); + prefetch(skb->data); + + if (!jumbo_frame) { + count = ne6x_xmit_descriptor_count(skb); + } else { + if (ne6x_fill_jumbo_sgl(tx_ring, skb)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + count = tx_ring->sgl->sg_num; + } + /* reserve 5 descriptors to avoid tail over-write */ + if (ne6x_maybe_stop_tx(tx_ring, count + 4 + 1)) { + /* this is a hard error */ + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buf[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = skb->len; + first->gso_segs = 1; + /* record initial flags and protocol */ + + first->jumbo_frame = 0; + first->jumbo_finsh = 0; + first->tag_dma = tag_ring->dma + tag_ring->next_to_use * sizeof(struct ne6x_tx_tag); + memset(tx_tagx, 0x00, sizeof(*tx_tagx)); + + ne6x_tx_prepare_vlan_flags(tx_ring, first, tx_tagx); + + tso = ne6x_tso(tx_ring, first, tx_tagx); + if (tso < 0) + goto out_drop; + + tso = ne6x_tx_csum(tx_ring, first, tx_tagx); + if (tso < 0) + goto out_drop; + + tx_tagx->tag_mss = cpu_to_be16(tx_tagx->tag_mss); + + if (!jumbo_frame) { + ne6x_xmit_simple(tx_ring, first, tag_ring, tx_tagx); + } else { + first->jumbo_frame = true; + ne6x_xmit_jumbo(tx_ring, first, tag_ring, tx_tagx); + } + + return NETDEV_TX_OK; + +out_drop: + ne6x_unmap_and_free_tx_resource(tx_ring, first); + + return NETDEV_TX_OK; +} diff --git a/drivers/net/ethernet/bzwx/nce/comm/txrx.h b/drivers/net/ethernet/bzwx/nce/comm/txrx.h new file mode 100644 index 000000000000..8b35bc385aa5 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/comm/txrx.h @@ -0,0 +1,476 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2024, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _TXRX_H +#define _TXRX_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" + +#define NE6X_MAX_NUM_DESCRIPTORS (16 * 1024) +#define NE6X_DEFAULT_NUM_DESCRIPTORS (4 * 1024) +#define NE6X_MIN_NUM_DESCRIPTORS 64 +#define NE6X_REQ_DESCRIPTOR_MULTIPLE 32 + +#define NE6X_MAX_BUFFER_TXD 8 +#define NE6X_MIN_TX_LEN 60 + +#define NE6X_TAIL_REG_NUM 4 +#define NE6X_RX_BUFFER_WRITE 32 /* Must be power of 2 */ + +/* The size limit for a transmit buffer in a descriptor is 15K. + * In order to align with the read requests we will align the value to + * the nearest 4K which represents our maximum read request size. + */ +#define NE6X_MAX_READ_REQ_SIZE 4096 +#define NE6X_MAX_DATA_PER_TXD (15500 - 32 - 4 - 1) +#define NE6X_MAX_DATA_PER_TXD_ALIGNED \ + (NE6X_MAX_DATA_PER_TXD & ~(NE6X_MAX_READ_REQ_SIZE - 1)) + +/* Supported Rx Buffer Sizes (a multiple of 128) */ +#define NE6X_PACKET_HDR_PAD ETH_HLEN +#define NE6X_RXBUFFER_256 256 +#define NE6X_RXBUFFER_2048 2048 +#define NE6X_RXBUFFER_4096 4096 /* Used for large frames w/ padding */ +/*CIU buffer max len is 15k*/ +#define NE6X_MAX_RXBUFFER 15360 /* largest size for single descriptor */ +#define NE6X_MIN_MTU_SIZE 128 +#define NE6X_RX_HDR_SIZE NE6X_RXBUFFER_256 + +#define NE6X_TX_PRIV_TAG_SIZE 32 +#define NE6X_TX_DESC_SIZE 32 +/* iterator for handling rings in ring container */ +#define ne6x_for_each_ring(pos, head) \ + for (pos = (head).ring; pos; pos = pos->next) + +#define NE6X_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) + +#define NE6X_RX_DESC(R, i) (&(((union ne6x_rx_desc *)((R)->desc))[i])) +#define NE6X_TX_DESC(R, i) (&(((struct ne6x_tx_desc *)((R)->desc))[i])) +#define NE6X_TX_TAG(R, i) (&(((struct ne6x_tx_tag *)((R)->desc))[i])) +#define NE6X_CQ_DESC(R, i) (&(((struct ne6x_cq_desc *)((R)->desc))[i])) + +#define NE6X_DESC_UNUSED(R) \ + ((((R)->next_to_clean > (R)->next_to_use) ? \ + 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1) + +struct ne6x_tx_desc_status { + /* pkt drop */ + u8 tx_drop_addr : 1; + u8 rsv3 : 1; + u8 rsv2 : 1; + /* normal */ + u8 tx_done : 1; + /* ecc error */ + u8 tx_ecc_err : 1; + u8 rsv1 : 1; + u8 rsv0 : 1; + /* pcie error */ + u8 tx_pcie_read_err : 1; +}; + +struct ne6x_tx_desc { + union { + /* Hardware write back*/ + struct ne6x_tx_desc_status flags; + u8 val; + } u; + + u8 rsv0 : 1; + u8 vp : 7; + u8 event_trigger : 1; + u8 chain : 1; + u8 transmit_type : 2; + u8 sop_valid : 1; + u8 eop_valid : 1; + u8 tso : 1; + u8 rsv1 : 1; + u8 rsv2; + u8 rsv3; + + u8 l3_csum : 1; + u8 l3_ofst : 7; + u8 l4_csum : 1; + u8 l4_ofst : 7; + u8 pld_ofst; + + __le64 mop_cnt : 24; + __le64 sop_cnt : 16; + __le64 rsv4 : 8; + __le64 mss : 16; + __le64 buffer_mop_addr; + __le64 buffer_sop_addr; +}; + +struct ne6x_tx_tag { + u8 resv0; + u8 tag_pi1 : 1; + u8 resv1 : 7; + u8 l3_csum : 1; + u8 l4_csum : 1; + u8 vxl_l3_csum : 1; + u8 vxl_l4_csum : 1; + u8 tag_resv : 3; + u8 tag_pi0 : 1; + u8 tag_vport; + u16 tag_vlan1; /* 1q vlan */ + u16 tag_vlan2; /* 1ad vlan */ + + __le64 resv2 : 32; + __le64 tag_num : 16; + __le64 tag_mss : 16; /* mss */ + + u8 l3_ofst; + u8 l4_ofst; + u16 l4_len; /* l4hdr + pld_size */ + u8 vxl_l3_ofst; + u8 vxl_l4_ofst; + u16 vxl_l4_len; /* l4hdr + pld_size */ + + __le64 resv3; +}; + +struct ne6x_tx_buf { + struct ne6x_tx_desc *next_to_watch; + struct sk_buff *skb; + u32 bytecount; + u8 jumbo_frame; /* fragment when bytecount > 15.5KB*/ + u8 jumbo_finsh; /* when last frame of jumbo packet transmitted, set it 1 */ + u16 rsv; + int napi_budget; /* when bytecount > 15.5KB, accumulating NPAI trigger count + * in transmit irq handler + */ + u16 gso_segs; + dma_addr_t tag_dma; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); +}; + +struct ne6x_rx_desc_status { + u8 rx_mem_err : 1; /* MOP_MEM_ADDR/SOP_MEM_ADDR/MOP_MEM_LEN=0, pkt need drop */ + u8 rx_mem_ovflow : 1; /* SOP_MEM_OVFLOW ==1, mop have pkt */ + u8 rsv : 1; + u8 rx_eop : 1; /* EOP flag */ + u8 rx_csum_err : 1; /* checksum error */ + u8 rx_err : 1; /* Not enough descriptors */ + u8 rx_mem_used : 1; /* MEM_USED, Normal */ + u8 pd_type : 1; /* 0 ingress pd, 1 egress pd */ +}; + +#define NE6X_RX_DESC_STATUS_EOF_SHIFT 3 +#define NE6X_RX_DESC_STATUS_ERR_SHIFT 0 + +/* Receive Descriptor */ +union ne6x_rx_desc { + struct { + u8 rsv3; + u8 rsv2 : 1; + u8 vp : 7; + __le16 mop_mem_len; + __le16 sop_mem_len; + __le16 rsv1; + __le64 buffer_sop_addr; + __le64 buffer_mop_addr; + + __le64 rsv0; + } w; /* write */ + + struct { + union { + struct ne6x_rx_desc_status flags; + u8 val; + } u; + u8 rsv2 : 1; + u8 vp : 7; + u8 pd[24]; + __le16 rsv0; + __le16 rsv1; + __le16 pkt_len; + } wb; /* Writeback */ +}; + +struct ne6x_tx_cq_desc { + u8 cq_tx_stats; + u16 cq_tx_offset; +} __packed; + +struct ne6x_rx_cq_desc { + u8 cq_rx_stats; + u16 cq_rx_len; + u16 cq_rx_offset; +} __packed; + +struct ne6x_cq_desc { + u8 ctype : 1; + u8 rsv0 : 3; + u8 num : 4; + u8 rsv1; + + union { + struct ne6x_tx_cq_desc tx_cq[10]; + struct ne6x_rx_cq_desc rx_cq[6]; + u8 data[30]; + } payload; +}; + +struct ne6x_rx_buf { + dma_addr_t dma; + struct page *page; + u32 page_offset; + u16 pagecnt_bias; +}; + +struct ne6x_q_stats { + u64 packets; + u64 bytes; +}; + +struct ne6x_txq_stats { + u64 restart_q; + u64 tx_busy; + u64 tx_linearize; + u64 csum_err; + u64 csum_good; + u64 tx_pcie_read_err; + u64 tx_ecc_err; + u64 tx_drop_addr; +}; + +struct ne6x_rxq_stats { + u64 non_eop_descs; + u64 alloc_page_failed; + u64 alloc_buf_failed; + u64 page_reuse_count; + u64 csum_err; + u64 csum_good; + u64 rx_mem_error; + u64 rx_err; +}; + +struct ne6x_cq_stats { + u64 cq_num; + u64 tx_num; + u64 rx_num; +}; + +#define NE6X_SG_SOP_FLAG BIT(0) +#define NE6X_SG_EOP_FLAG BIT(1) +#define NE6X_SG_FST_SG_FLAG BIT(13) +#define NE6X_SG_LST_SG_FLAG BIT(14) +#define NE6X_SG_JUMBO_FLAG BIT(15) +#define NE6X_SG_FRAG_FLAG BIT(4) +#define NE6X_MAX_DESC_NUM_PER_SKB 16 + +struct ne6x_sg_info { + void *p; + u16 offset; + u16 len; + u16 flag; + u16 base_mss_no; +}; + +struct ne6x_sg_list { + u16 sg_num; + u16 mss; + u16 sgl_mss_cnt; + struct ne6x_sg_info sg[NE6X_MAX_DESC_NUM_PER_SKB]; +}; + +/* descriptor ring, associated with a adapter */ +struct ne6x_ring { + /* CL1 - 1st cacheline starts here */ + void *adpt; + struct ne6x_ring *next; /* pointer to next ring in q_vector */ + void *desc; /* Descriptor ring memory */ + struct device *dev; /* Used for DMA mapping */ + struct net_device *netdev; /* netdev ring maps to */ + struct ne6x_q_vector *q_vector; /* Backreference to associated vector */ + + u64 __iomem *tail; + + struct ne6x_sg_list *sgl; + + union { + struct ne6x_tx_buf *tx_buf; + struct ne6x_rx_buf *rx_buf; + }; + + u16 count; /* Number of descriptors */ + u16 reg_idx; /* HW register index of the ring */ + + /* used in interrupt processing */ + u16 next_to_use; + u16 next_to_clean; + u16 next_to_alloc; + u16 cq_last_expect; + + u16 queue_index; /* Queue number of ring */ + u16 rx_buf_len; + + /* stats structs */ + struct ne6x_q_stats stats; + struct u64_stats_sync syncp; + + union { + struct ne6x_txq_stats tx_stats; + struct ne6x_rxq_stats rx_stats; + struct ne6x_cq_stats cq_stats; + }; + + struct rcu_head rcu; /* to avoid race on free */ + dma_addr_t dma; /* physical address of ring */ + unsigned int size; /* length of descriptor ring in bytes */ + struct sk_buff *skb; /* When ne6x_clean_rx_ring_irq() must + * return before it sees the EOP for + * the current packet, we save that skb + * here and resume receiving this + * packet the next time + * ne6x_clean_rx_ring_irq() is called + * for this ring. + */ +} ____cacheline_internodealigned_in_smp; + +struct ne6x_ring_container { + /* head of linked-list of rings */ + struct ne6x_ring *ring; + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 count; +}; + +union rx_ol_flags { + u32 ol_flags; /* Offload Feature Bits. */ + struct { +#if defined(__BIG_ENDIAN_BITFIELD) + u32 ol_flag_rx_vlan :1; + u32 rx_ip_cksum_bad :1; + u32 rx_ip_cksum_good :1; + u32 rx_l4_cksum_bad :1; + u32 rx_l4_cksum_good :1; + u32 rx_rss_hash :1; + u32 rx_qinq :1; + u32 rx_lro :1; + u32 rx_vlan_striped :1; + u32 rx_qinq_striped :1; + u32 rx_dvlan :1; + u32 rx_vlan_bad :1; + u32 rx_inner_ip_cksum_bad :1; + u32 rx_inner_ip_cksum_good :1; + u32 rx_inner_l4_cksum_bad :1; + u32 rx_inner_l4_cksum_good :1; + u32 rx_tnl_csum :1; + u32 rsv0 :1; + u32 tag_num :8; + u32 rsv1 :6; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u32 rsv1 :6; + u32 tag_num :8; + u32 rsv0 :1; + u32 rx_tnl_csum :1; + u32 rx_vlan_striped :1; + u32 rx_qinq_striped :1; + u32 rx_dvlan :1; + u32 rx_vlan_bad :1; + u32 rx_inner_ip_cksum_bad :1; + u32 rx_inner_ip_cksum_good :1; + u32 rx_inner_l4_cksum_bad :1; + u32 rx_inner_l4_cksum_good :1; + u32 ol_flag_rx_vlan :1; + u32 rx_ip_cksum_bad :1; + u32 rx_ip_cksum_good :1; + u32 rx_l4_cksum_bad :1; + u32 rx_l4_cksum_good :1; + u32 rx_rss_hash :1; + u32 rx_qinq :1; + u32 rx_lro :1; +#endif + } flag_bits; +}; + +struct rx_hdr_info { + union rx_ol_flags ol_flag; + u32 rss_hash; /* RSS Hash Value */ + u32 vlan_tci_outer:16; /* VLAN Outer Tag Control Identifier */ + u32 vlan_tci:16; /* VLAN Tag Control Identifier */ +}; + +#define NE6X_INT_NAME_STR_LEN (IFNAMSIZ + 16) + +/* struct that defines an interrupt vector */ +struct ne6x_q_vector { + void *adpt; + + u16 v_idx; /* index in the adpt->q_vector array. */ + u16 reg_idx; + + struct napi_struct napi; + + struct ne6x_ring_container rx; + struct ne6x_ring_container tx; + struct ne6x_ring_container cq; + struct ne6x_ring_container tg; + + u8 num_ringpairs; /* total number of ring pairs in vector */ + + cpumask_t affinity_mask; + struct irq_affinity_notify affinity_notify; + + char name[NE6X_INT_NAME_STR_LEN]; +} ____cacheline_internodealigned_in_smp; + +#define DESC_NEEDED (MAX_SKB_FRAGS + 6) + +static inline unsigned int ne6x_rx_pg_order(struct ne6x_ring *ring) +{ +#if (PAGE_SIZE < 8192) + if (ring->rx_buf_len > (PAGE_SIZE / 2)) + return 1; +#endif + return 0; +} + +#define ne6x_rx_pg_size(_ring) (PAGE_SIZE << ne6x_rx_pg_order(_ring)) + +static inline struct netdev_queue *txring_txq(const struct ne6x_ring *ring) +{ + return netdev_get_tx_queue(ring->netdev, ring->queue_index); +} + +int ne6x_clean_cq_irq(struct ne6x_q_vector *q_vector, struct ne6x_ring *cq_ring, int napi_budget); +int ne6x_clean_rx_irq(struct ne6x_ring *rx_ring, int budget); +int ne6x_clean_tx_irq(struct ne6x_adapt_comm *comm, struct ne6x_ring *tx_ring, int napi_budget); +netdev_tx_t ne6x_xmit_frame_ring(struct sk_buff *skb, struct ne6x_ring *tx_ring, + struct ne6x_ring *tag_ring, bool jumbo_frame); +void ne6x_tail_update(struct ne6x_ring *ring, int val); +int ne6x_setup_tx_descriptors(struct ne6x_ring *tx_ring); +int ne6x_setup_rx_descriptors(struct ne6x_ring *rx_ring); +int ne6x_setup_cq_descriptors(struct ne6x_ring *cq_ring); +int ne6x_setup_tg_descriptors(struct ne6x_ring *tg_ring); +int ne6x_setup_tx_sgl(struct ne6x_ring *tx_ring); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/comm/version.h b/drivers/net/ethernet/bzwx/nce/comm/version.h new file mode 100644 index 000000000000..9affdb9803b1 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/comm/version.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _VERSION_H +#define _VERSION_H + +#define VERSION "1.0.4" + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x.h new file mode 100644 index 000000000000..1206d8ab3cfd --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x.h @@ -0,0 +1,468 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_H +#define _NE6X_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "reg.h" +#include "feature.h" +#include "txrx.h" +#include "common.h" +#include "ne6x_txrx.h" +#include "ne6x_ethtool.h" +#include "ne6x_procfs.h" +#include "ne6x_virtchnl_pf.h" +#include "version.h" + +#define NE6X_MAX_VP_NUM 64 +#define NE6X_PF_VP0_NUM 64 +#define NE6X_PF_VP1_NUM 65 +#define NE6X_MAILBOX_VP_NUM NE6X_PF_VP0_NUM +#define NE6X_MAX_MSIX_NUM 72 +#define NE6X_MIN_MSIX 2 + +#define NE6X_NIC_INT_VP 71 +#define NE6X_NIC_INT_START_BIT 42 + +#define wr64(a, reg, value) \ + writeq((value), ((void __iomem *)((a)->hw_addr0) + (reg))) +#define rd64(a, reg) \ + readq((void __iomem *)((a)->hw_addr0) + (reg)) +#define wr64_bar4(a, reg, value) \ + writeq((value), ((void __iomem *)((a)->hw_addr4) + (reg))) +#define rd64_bar4(a, reg) \ + readq((void __iomem *)((a)->hw_addr4) + (reg)) + +#define ne6x_pf_to_dev(pf) (&((pf)->pdev->dev)) +#define ne6x_get_vf_by_id(pf, vf_id) (&((pf)->vf[vf_id])) + +#define ADPT_PPORT(adpt) ((adpt)->port_info->hw_port_id) +#define ADPT_LPORT(adpt) ((adpt)->port_info->lport) +#define ADPT_VPORT(adpt) ((adpt)->vport) +#define ADPT_VPORTCOS(adpt) ((adpt)->base_queue + 160) + +enum ne6x_adapter_type { + NE6X_ADPT_PF = 0, + NE6X_ADPT_VF, +}; + +enum ne6x_adapter_flags { + NE6X_ADPT_F_DISABLE_FW_LLDP, + NE6X_ADPT_F_LINKDOWN_ON_CLOSE, + NE6X_ADPT_F_NORFLASH_WRITE_PROTECT, + NE6X_ADPT_F_DDOS_SWITCH, + NE6X_ADPT_F_ACL, + NE6X_ADPT_F_TRUST_VLAN, + NE6X_ADPT_F_NBITS /* must be last */ +}; + +enum ne6x_pf_state { + NE6X_TESTING, + NE6X_DOWN, + NE6X_SERVICE_SCHED, + NE6X_INT_INIT_DOWN, + NE6X_CLIENT_SERVICE_REQUESTED, + NE6X_LINK_POOLING, + NE6X_CONFIG_BUSY, + NE6X_TIMEOUT_RECOVERY_PENDING, + NE6X_PF_RESET_REQUESTED, + NE6X_CORE_RESET_REQUESTED, + NE6X_GLOBAL_RESET_REQUESTED, + NE6X_RESET_INTR_RECEIVED, + NE6X_DOWN_REQUESTED, + NE6X_VF_DIS, + NE6X_MAILBOXQ_EVENT_PENDING, + NE6X_PF_INTX, + NE6X_PF_MSI, + NE6X_PF_MSIX, + NE6X_FLAG_SRIOV_ENA, + NE6X_REMOVE, + NE6X_STATE_NBITS /* must be last */ +}; + +enum { + NE6X_ETHTOOL_FLASH_810_LOADER = 0, + NE6X_ETHTOOL_FLASH_810_APP = 1, + NE6X_ETHTOOL_FLASH_807_APP = 2, + NE6X_ETHTOOL_FLASH_NP = 3, + NE6X_ETHTOOL_FLASH_PXE = 4, + NE6X_ETHTOOL_FRU = 0xf2, +}; + +/* MAC addr list head node struct */ +struct mac_addr_head { + struct list_head list; + struct mutex mutex; /* mutex */ +}; + +/* MAC addr list node struct */ +struct mac_addr_node { + struct list_head list; + u8 addr[32]; +}; + +/* values for UPT1_RSSConf.hashFunc */ +enum { + NE6X_FW_VER_NORMAL = 0x0, + NE6X_FW_VER_WHITELIST = 0x100, +}; + +struct ne6x_lump_tracking { + u16 num_entries; + u16 list[]; +}; + +struct ne6x_hw_port_stats { + u64 mac_rx_eth_byte; + u64 mac_rx_eth; + u64 mac_rx_eth_undersize; + u64 mac_rx_eth_crc; + u64 mac_rx_eth_64b; + u64 mac_rx_eth_65_127b; + u64 mac_rx_eth_128_255b; + u64 mac_rx_eth_256_511b; + u64 mac_rx_eth_512_1023b; + u64 mac_rx_eth_1024_15360b; + u64 mac_tx_eth_byte; + u64 mac_tx_eth; + u64 mac_tx_eth_undersize; + u64 mac_tx_eth_64b; + u64 mac_tx_eth_65_127b; + u64 mac_tx_eth_128_255b; + u64 mac_tx_eth_256_511b; + u64 mac_tx_eth_512_1023b; + u64 mac_tx_eth_1024_15360b; +}; + +/* struct that defines a adapter, associated with a dev */ +struct ne6x_adapter { + struct ne6x_adapt_comm comm; + struct net_device *netdev; + struct ne6x_pf *back; /* back pointer to PF */ + struct ne6x_port_info *port_info; /* back pointer to port_info */ + struct ne6x_ring **rx_rings; /* Rx ring array */ + struct ne6x_ring **tx_rings; /* Tx ring array */ + struct ne6x_ring **cq_rings; /* Tx ring array */ + struct ne6x_ring **tg_rings; /* Tx tag ring array */ + struct ne6x_q_vector **q_vectors; /* q_vector array */ + + /* used for loopback test */ + char *send_buffer; + wait_queue_head_t recv_notify; + u8 recv_done; + + irqreturn_t (*irq_handler)(int irq, void *data); + + u32 tx_restart; + u32 tx_busy; + u32 rx_buf_failed; + u32 rx_page_failed; + u16 num_q_vectors; + u16 base_vector; /* IRQ base for OS reserved vectors */ + enum ne6x_adapter_type type; + struct ne6x_vf *vf; /* VF associated with this adapter */ + u16 idx; /* software index in pf->adpt[] */ + u16 max_frame; + u16 rx_buf_len; + struct rtnl_link_stats64 net_stats; + struct rtnl_link_stats64 net_stats_offsets; + struct ne6x_eth_stats eth_stats; + struct ne6x_eth_stats eth_stats_offsets; + struct ne6x_rss_info rss_info; + int rss_size; + + bool irqs_ready; + bool current_isup; /* Sync 'link up' logging */ + u16 current_speed; + u16 vport; + u16 num_queue; /* Used queues */ + u16 base_queue; /* adapter's first queue in hw array */ + u16 num_tx_desc; + u16 num_rx_desc; + u16 num_cq_desc; + u16 num_tg_desc; + + u32 hw_feature; + bool netdev_registered; + + /* unicast MAC head node */ + struct mac_addr_head uc_mac_addr; + /* multicast MAC head node */ + struct mac_addr_head mc_mac_addr; + + struct work_struct set_rx_mode_task; + + struct ne6x_hw_port_stats stats; + DECLARE_BITMAP(flags, NE6X_ADPT_F_NBITS); + + struct list_head vlan_filter_list; + struct list_head macvlan_list; + /* Lock to protect accesses to MAC and VLAN lists */ + spinlock_t mac_vlan_list_lock; + + /* aRFS members only allocated for the PF ADPT */ +#define NE6X_MAX_RFS_FILTERS 0xFFFF +#define NE6X_MAX_ARFS_LIST 1024 +#define NE6X_ARFS_LST_MASK (NE6X_MAX_ARFS_LIST - 1) + struct hlist_head *arfs_fltr_list; + struct ne6x_arfs_active_fltr_cntrs *arfs_fltr_cntrs; + spinlock_t arfs_lock; /* protects aRFS hash table and filter state */ + atomic_t *arfs_last_fltr_id; +} ____cacheline_internodealigned_in_smp; + +struct ne6x_dev_eeprom_info { + u8 vendor_id[3]; + u8 ocp_record_version; + u8 max_power_s0; + u8 max_power_s5; + u8 hot_card_cooling_passive_tier; + u8 cold_card_cooling_passive_tier; + u8 cooling_mode; + u16 hot_standby_airflow_require; + u16 cold_standby_airflow_require; + u8 uart_configuration_1; + u8 uart_configuration_2; + u8 usb_present; + u8 manageability_type; + u8 fru_write_protection; + u8 prog_mode_power_state_supported; + u8 hot_card_cooling_active_tier; + u8 cold_card_cooling_active_tier; + u8 transceiver_ref_power_Level; + u8 transceiver_ref_temp_Level; + u8 card_thermal_tier_with_local_fan_fail; + u16 product_mode; + u8 is_pcie_exist; + u32 logic_port_to_phyical; + u8 resv[3]; + u8 number_of_physical_controllers; + u8 control_1_udid[16]; + u8 control_2_udid[16]; + u8 control_3_udid[16]; + u8 control_4_udid[16]; + u32 hw_feature; + u32 hw_flag; + u8 port_0_mac[6]; + u8 port_1_mac[6]; + u8 port_2_mac[6]; + u8 port_3_mac[6]; + u8 rsv[9]; + u32 spd_verify_value; +} __packed; + +struct ne6x_hw { + u64 __iomem *hw_addr0; + u64 __iomem *hw_addr2; + u64 __iomem *hw_addr4; + + struct ne6x_port_info *port_info; + + /* pci info */ + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + u8 dvm_ena; /* double vlan enable */ + struct ne6x_pf *back; + struct ne6x_bus_info bus; + u16 pf_port; + + u32 expect_vp; + u32 max_queue; + + struct ne6x_mbx_snapshot mbx_snapshot; + u8 ne6x_mbx_ready_to_send[64]; +}; + +#define ne6x_hw_to_dev(ptr) (&(container_of((ptr), struct ne6x_pf, hw))->pdev->dev) + +struct ne6x_firmware_ver_info { + u32 firmware_soc_ver; + u32 firmware_np_ver; + u32 firmware_pxe_ver; +}; + +/* struct that defines the Ethernet device */ +struct ne6x_pf { + struct pci_dev *pdev; + + /* OS reserved IRQ details */ + struct msix_entry *msix_entries; + u16 ctrl_adpt_idx; /* control adapter index in pf->adpt array */ + + struct ne6x_adapter **adpt; /* adapters created by the driver */ + + struct mutex switch_mutex; /* switch_mutex */ + struct mutex mbus_comm_mutex; /* mbus_comm_mutex */ + struct timer_list serv_tmr; + struct timer_list linkscan_tmr; + unsigned long service_timer_period; + struct work_struct serv_task; + struct work_struct linkscan_work; + + /* Virtchnl/SR-IOV config info */ + struct ne6x_vf *vf; + u16 num_alloc_vfs; + u16 num_qps_per_vf; + + u16 next_adpt; /* Next free slot in pf->adpt[] - 0-based! */ + u16 num_alloc_adpt; + + DECLARE_BITMAP(state, NE6X_STATE_NBITS); + + u32 tx_timeout_count; + u32 tx_timeout_recovery_level; + unsigned long tx_timeout_last_recovery; + struct ne6x_firmware_ver_info verinfo; + struct ne6x_dev_eeprom_info sdk_spd_info; + + struct ne6x_hw hw; + struct ne6x_lump_tracking *irq_pile; +#ifdef CONFIG_DEBUG_FS + struct dentry *ne6x_dbg_pf; + struct dentry *ne6x_dbg_info_pf; +#endif /* CONFIG_DEBUG_FS */ + struct proc_dir_entry *ne6x_proc_pf; + struct list_head key_filter_list; + spinlock_t key_list_lock; /* Lock to protect accesses to key filter */ + + char link_intname[NE6X_INT_NAME_STR_LEN]; + char mailbox_intname[NE6X_INT_NAME_STR_LEN]; + bool link_int_irq_ready; + bool mailbox_int_irq_ready; + bool is_fastmode; + u32 hw_flag; + u32 dump_info; + u16 dev_type; +}; + +static inline void ne6x_adpt_setup_irqhandler(struct ne6x_adapter *adpt, + irqreturn_t (*irq_handler)(int, void *)) +{ + adpt->irq_handler = irq_handler; +} + +struct ne6x_netdev_priv { + struct ne6x_adapter *adpt; +}; + +static inline bool ne6x_is_supported_port_vlan_proto(struct ne6x_hw *hw, + u16 vlan_proto) +{ + bool is_supported = false; + + switch (vlan_proto) { + case ETH_P_8021Q: + is_supported = true; + break; + case ETH_P_8021AD: + if (hw->dvm_ena) + is_supported = true; + break; + } + + return is_supported; +} + +static inline struct ne6x_pf *ne6x_netdev_to_pf(struct net_device *netdev) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + + return np->adpt->back; +} + +static inline struct ne6x_adapter *ne6x_netdev_to_adpt(struct net_device *netdev) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + + return np->adpt; +} + +#define NE6X_VLAN(tpid, vid, prio) \ + ((struct ne6x_vlan){ tpid, vid, prio }) + +struct rtnl_link_stats64 *ne6x_get_adpt_stats_struct(struct ne6x_adapter *adpt); + +void ne6x_switch_pci_write(void *bar_base, u32 base_addr, u32 offset_addr, u64 reg_value); +u64 ne6x_switch_pci_read(void *bar_base, u32 base_addr, u32 offset_addr); +int ne6x_adpt_restart_vp(struct ne6x_adapter *adpt, bool enable); +void ne6x_update_pf_stats(struct ne6x_adapter *adpt); +void ne6x_service_event_schedule(struct ne6x_pf *pf); + +void ne6x_down(struct ne6x_adapter *adpt); +int ne6x_up(struct ne6x_adapter *adpt); +int ne6x_adpt_configure(struct ne6x_adapter *adpt); +void ne6x_adpt_close(struct ne6x_adapter *adpt); + +int ne6x_alloc_rings(struct ne6x_adapter *adpt); +int ne6x_adpt_configure_tx(struct ne6x_adapter *adpt); +int ne6x_adpt_configure_rx(struct ne6x_adapter *adpt); +int ne6x_adpt_configure_cq(struct ne6x_adapter *adpt); +void ne6x_adpt_clear_rings(struct ne6x_adapter *adpt); +int ne6x_adpt_setup_tx_resources(struct ne6x_adapter *adpt); +int ne6x_adpt_setup_rx_resources(struct ne6x_adapter *adpt); + +int ne6x_close(struct net_device *netdev); +int ne6x_open(struct net_device *netdev); +int ne6x_adpt_open(struct ne6x_adapter *adpt); +int ne6x_adpt_mem_alloc(struct ne6x_pf *pf, struct ne6x_adapter *adpt); +void ne6x_adpt_map_rings_to_vectors(struct ne6x_adapter *adpt); +void ne6x_adpt_reset_stats(struct ne6x_adapter *adpt); +void ne6x_adpt_free_arrays(struct ne6x_adapter *adpt, bool free_qvectors); +int ne6x_adpt_register_netdev(struct ne6x_adapter *adpt); +bool netif_is_ne6x(struct net_device *dev); + +int ne6x_validata_tx_rate(struct ne6x_adapter *adpt, int vf_id, int min_tx_rate, int max_tx_rate); + +int ne6x_del_vlan_list(struct ne6x_adapter *adpt, struct ne6x_vlan vlan); +struct ne6x_vlan_filter *ne6x_add_vlan_list(struct ne6x_adapter *adpt, struct ne6x_vlan vlan); + +struct ne6x_key_filter *ne6x_add_key_list(struct ne6x_pf *pf, struct ne6x_key key); +int ne6x_del_key_list(struct ne6x_pf *pf, struct ne6x_key key); +int ne6x_add_key(struct ne6x_adapter *adpt, u8 *mac_addr, u8 size); +int ne6x_del_key(struct ne6x_adapter *adpt, u8 *mac_addr, u8 size); + +int ne6x_adpt_add_vlan(struct ne6x_adapter *adpt, struct ne6x_vlan vlan); +int ne6x_adpt_del_vlan(struct ne6x_adapter *adpt, struct ne6x_vlan vlan); + +void ne6x_sync_features(struct net_device *netdev); + +int ne6x_adpt_add_mac(struct ne6x_adapter *adpt, const u8 *addr, bool is_unicast); +int ne6x_adpt_del_mac(struct ne6x_adapter *adpt, const u8 *addr, bool is_unicast); + +int ne6x_adpt_clear_mac_vlan(struct ne6x_adapter *adpt); +void ne6x_adpt_clear_ddos(struct ne6x_pf *pf); +void ne6x_linkscan_schedule(struct ne6x_pf *pf); + +ssize_t ne6x_proc_tps_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_arfs.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_arfs.c new file mode 100644 index 000000000000..e5793f89cd4a --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_arfs.c @@ -0,0 +1,628 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include + +#include "ne6x.h" +#include "ne6x_reg.h" +#include "ne6x_portmap.h" +#include "ne6x_dev.h" +#include "ne6x_txrx.h" +#include "ne6x_arfs.h" + +static void +ne6x_arfs_update_active_fltr_cntrs(struct ne6x_adapter *adpt, + struct ne6x_arfs_entry *entry, bool add); + +int ne6x_dev_add_fster_rules(struct ne6x_adapter *adpt, struct ne6x_fster_fltr *input, bool is_tun) +{ + u32 table_id = 0xffffffff; + struct ne6x_fster_table fster; + struct ne6x_fster_search_result result; + u32 *fster_data = (u32 *)&fster; + int ret = 0, index; + struct device *dev; + + dev = ne6x_pf_to_dev(adpt->back); + + memset(&fster, 0x00, sizeof(struct ne6x_fster_table)); + /* hash key */ + memcpy(&fster.ip, &input->ip, sizeof(fster.ip)); + /* hash data */ + memcpy(&fster.data, &input->data, sizeof(fster.data)); + + /* flow steer info */ + for (index = 0; index < 24; index++) + fster_data[index] = cpu_to_be32(fster_data[index]); + + ret = ne6x_reg_table_search(adpt->back, NE6X_REG_ARFS_TABLE, (u32 *)fster_data, + sizeof(fster.ip), (u32 *)&result, 32); + + if (ret == -ENOENT) { + ret = ne6x_reg_table_insert(adpt->back, NE6X_REG_ARFS_TABLE, (u32 *)fster_data, + sizeof(fster), &table_id); + if (ret) + dev_err(ne6x_pf_to_dev(adpt->back), "insert flow steer table fail %02x\n", + ADPT_LPORT(adpt)); + } else { + ret = ne6x_reg_table_update(adpt->back, NE6X_REG_ARFS_TABLE, result.key_index + 8, + (u32 *)&fster.data, sizeof(fster.data)); + if (ret) + dev_err(ne6x_pf_to_dev(adpt->back), "update flow steer table fail ret:%d\n", + ret); + } + + return 0; +} + +int ne6x_dev_del_fster_rules(struct ne6x_adapter *adpt, struct ne6x_fster_fltr *input, bool is_tun) +{ + struct ne6x_fster_table fster; + struct ne6x_fster_search_result result; + u32 *fster_data = (u32 *)&fster; + int ret = 0, index; + struct device *dev; + + dev = ne6x_pf_to_dev(adpt->back); + + memset(&fster, 0x00, sizeof(struct ne6x_fster_table)); + /* hash key */ + memcpy(&fster.ip, &input->ip, sizeof(fster.ip)); + + /* flow steer info */ + for (index = 0; index < 16; index++) + fster_data[index] = cpu_to_be32(fster_data[index]); + + ret = ne6x_reg_table_search(adpt->back, NE6X_REG_ARFS_TABLE, (u32 *)fster_data, + sizeof(fster.ip), (u32 *)&result, 32); + if (!ret) { + ret = ne6x_reg_table_delete(adpt->back, NE6X_REG_ARFS_TABLE, + (u32 *)&fster.ip, sizeof(fster.ip)); + if (ret) + dev_err(ne6x_pf_to_dev(adpt->back), "delete flow steer table fail ret:%d\n", + ret); + } else { + dev_err(ne6x_pf_to_dev(adpt->back), "search flow steer table fail ret:%d\n", ret); + } + return 0; +} + +static bool ne6x_is_arfs_active(struct ne6x_adapter *adpt) +{ + return !!adpt->arfs_fltr_list; +} + +static bool +ne6x_arfs_is_flow_expired(struct ne6x_adapter *adpt, struct ne6x_arfs_entry *arfs_entry) +{ +#define NE6X_ARFS_TIME_DELTA_EXPIRATION msecs_to_jiffies(5000) + if (rps_may_expire_flow(adpt->netdev, arfs_entry->fltr_info.q_index, + arfs_entry->flow_id, + arfs_entry->fltr_info.fltr_id)) + return true; + + /* expiration timer only used for UDP filters */ + if (arfs_entry->fltr_info.flow_type != NE6X_FLTR_PTYPE_NONF_IPV4_UDP && + arfs_entry->fltr_info.flow_type != NE6X_FLTR_PTYPE_NONF_IPV6_UDP) + return false; + + return time_in_range64(arfs_entry->time_activated + + NE6X_ARFS_TIME_DELTA_EXPIRATION, + arfs_entry->time_activated, get_jiffies_64()); +} + +static void +ne6x_arfs_update_flow_rules(struct ne6x_adapter *adpt, u16 idx, + struct hlist_head *add_list, + struct hlist_head *del_list) +{ + struct ne6x_arfs_entry *e; + struct hlist_node *n; + struct device *dev; + + dev = ne6x_pf_to_dev(adpt->back); + + /* go through the aRFS hlist at this idx and check for needed updates */ + hlist_for_each_entry_safe(e, n, &adpt->arfs_fltr_list[idx], list_entry) { + /* check if filter needs to be added to HW */ + if (e->fltr_state == NE6X_ARFS_INACTIVE) { + enum ne6x_fltr_ptype flow_type = e->fltr_info.flow_type; + struct ne6x_arfs_entry_ptr *ep = + devm_kzalloc(dev, sizeof(*ep), GFP_ATOMIC); + + if (!ep) + continue; + INIT_HLIST_NODE(&ep->list_entry); + /* reference aRFS entry to add HW filter */ + ep->arfs_entry = e; + hlist_add_head(&ep->list_entry, add_list); + e->fltr_state = NE6X_ARFS_ACTIVE; + /* expiration timer only used for UDP flows */ + if (flow_type == NE6X_FLTR_PTYPE_NONF_IPV4_UDP || + flow_type == NE6X_FLTR_PTYPE_NONF_IPV6_UDP) + e->time_activated = get_jiffies_64(); + } else if (e->fltr_state == NE6X_ARFS_ACTIVE) { + /* check if filter needs to be removed from HW */ + if (ne6x_arfs_is_flow_expired(adpt, e)) { + /* remove aRFS entry from hash table for delete + * and to prevent referencing it the next time + * through this hlist index + */ + hlist_del(&e->list_entry); + e->fltr_state = NE6X_ARFS_TODEL; + /* save reference to aRFS entry for delete */ + hlist_add_head(&e->list_entry, del_list); + } + } + } +} + +int ne6x_arfs_add_flow_rules(struct ne6x_adapter *adpt, struct hlist_head *add_list_head) +{ + struct ne6x_arfs_entry_ptr *ep; + struct hlist_node *n; + struct device *dev; + + dev = ne6x_pf_to_dev(adpt->back); + + hlist_for_each_entry_safe(ep, n, add_list_head, list_entry) { + int result; + + result = ne6x_dev_add_fster_rules(adpt, &ep->arfs_entry->fltr_info, false); + if (!result) + ne6x_arfs_update_active_fltr_cntrs(adpt, ep->arfs_entry, true); + else + dev_dbg(dev, "Unable to add aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n", + result, ep->arfs_entry->fltr_state, + ep->arfs_entry->fltr_info.fltr_id, + ep->arfs_entry->flow_id, + ep->arfs_entry->fltr_info.q_index); + + hlist_del(&ep->list_entry); + devm_kfree(dev, ep); + } + + return 0; +} + +int ne6x_arfs_del_flow_rules(struct ne6x_adapter *adpt, struct hlist_head *del_list_head) +{ + struct ne6x_arfs_entry *e; + struct hlist_node *n; + struct device *dev; + + dev = ne6x_pf_to_dev(adpt->back); + + hlist_for_each_entry_safe(e, n, del_list_head, list_entry) { + int result; + + result = ne6x_dev_del_fster_rules(adpt, &e->fltr_info, false); + if (!result) + ne6x_arfs_update_active_fltr_cntrs(adpt, e, false); + else + dev_dbg(dev, "Unable to delete aRFS entry, err %d fltr_state %d fltr_id %d flow_id %d Q %d\n", + result, e->fltr_state, e->fltr_info.fltr_id, + e->flow_id, e->fltr_info.q_index); + + /* The aRFS hash table is no longer referencing this entry */ + hlist_del(&e->list_entry); + devm_kfree(dev, e); + } + + return 0; +} + +void ne6x_sync_arfs_fltrs(struct ne6x_pf *pf) +{ + struct ne6x_adapter *pf_adpt; + unsigned int i; + u8 idx = 0; + + ne6x_for_each_pf(pf, idx) { + HLIST_HEAD(tmp_del_list); + HLIST_HEAD(tmp_add_list); + + pf_adpt = pf->adpt[idx]; + + if (!pf_adpt) + continue; + + if (unlikely(!(pf_adpt->netdev->features & NETIF_F_NTUPLE))) + continue; + + if (!ne6x_is_arfs_active(pf_adpt)) + continue; + + spin_lock_bh(&pf_adpt->arfs_lock); + /* Once we process aRFS for the PF ADPT get out */ + for (i = 0; i < NE6X_MAX_ARFS_LIST; i++) + ne6x_arfs_update_flow_rules(pf_adpt, i, &tmp_add_list, + &tmp_del_list); + spin_unlock_bh(&pf_adpt->arfs_lock); + + /* use list of ne6x_arfs_entry(s) for delete */ + ne6x_arfs_del_flow_rules(pf_adpt, &tmp_del_list); + + /* use list of ne6x_arfs_entry(s) for add */ + ne6x_arfs_add_flow_rules(pf_adpt, &tmp_add_list); + } +} + +static void +ne6x_arfs_update_active_fltr_cntrs(struct ne6x_adapter *adpt, + struct ne6x_arfs_entry *entry, bool add) +{ + struct ne6x_arfs_active_fltr_cntrs *fltr_cntrs = adpt->arfs_fltr_cntrs; + + switch (entry->fltr_info.flow_type) { + case NE6X_FLTR_PTYPE_NONF_IPV4_TCP: + if (add) + atomic_inc(&fltr_cntrs->active_tcpv4_cnt); + else + atomic_dec(&fltr_cntrs->active_tcpv4_cnt); + break; + case NE6X_FLTR_PTYPE_NONF_IPV6_TCP: + if (add) + atomic_inc(&fltr_cntrs->active_tcpv6_cnt); + else + atomic_dec(&fltr_cntrs->active_tcpv6_cnt); + break; + case NE6X_FLTR_PTYPE_NONF_IPV4_UDP: + if (add) + atomic_inc(&fltr_cntrs->active_udpv4_cnt); + else + atomic_dec(&fltr_cntrs->active_udpv4_cnt); + break; + case NE6X_FLTR_PTYPE_NONF_IPV6_UDP: + if (add) + atomic_inc(&fltr_cntrs->active_udpv6_cnt); + else + atomic_dec(&fltr_cntrs->active_udpv6_cnt); + break; + default: + dev_err(ne6x_pf_to_dev(adpt->back), "aRFS: Failed to update filter counters, invalid filter type %d\n", + entry->fltr_info.flow_type); + } +} + +static bool +ne6x_arfs_cmp(struct ne6x_fster_fltr *fltr_info, const struct flow_keys *fk) +{ + bool is_v4; + + if (!fltr_info || !fk) + return false; + + is_v4 = (fltr_info->flow_type == NE6X_FLTR_PTYPE_NONF_IPV4_UDP || + fltr_info->flow_type == NE6X_FLTR_PTYPE_NONF_IPV4_TCP); + + if (fk->basic.n_proto == htons(ETH_P_IP) && is_v4) + return (fltr_info->ip.v4.proto == fk->basic.ip_proto && + fltr_info->ip.v4.src_port == fk->ports.src && + fltr_info->ip.v4.dst_port == fk->ports.dst && + fltr_info->ip.v4.src_ip == fk->addrs.v4addrs.src && + fltr_info->ip.v4.dst_ip == fk->addrs.v4addrs.dst); + + else if (fk->basic.n_proto == htons(ETH_P_IPV6) && !is_v4) + return (fltr_info->ip.v6.proto == fk->basic.ip_proto && + fltr_info->ip.v6.src_port == fk->ports.src && + fltr_info->ip.v6.dst_port == fk->ports.dst && + !memcmp(&fltr_info->ip.v6.src_ip, + &fk->addrs.v6addrs.src, + sizeof(struct in6_addr)) && + !memcmp(&fltr_info->ip.v6.dst_ip, + &fk->addrs.v6addrs.dst, + sizeof(struct in6_addr))); + + return false; +} + +static struct ne6x_arfs_entry * +ne6x_arfs_build_entry(struct ne6x_adapter *adpt, const struct flow_keys *fk, + u32 hash, u16 rxq_idx, u32 flow_id) +{ + struct ne6x_arfs_entry *arfs_entry; + struct ne6x_fster_fltr *fltr_info; + u8 ip_proto; + + arfs_entry = devm_kzalloc(ne6x_pf_to_dev(adpt->back), + sizeof(*arfs_entry), + GFP_ATOMIC | __GFP_NOWARN); + if (!arfs_entry) + return NULL; + + fltr_info = &arfs_entry->fltr_info; + fltr_info->q_index = rxq_idx; + fltr_info->dest_adpt = adpt->idx; + ip_proto = fk->basic.ip_proto; + + if (fk->basic.n_proto == htons(ETH_P_IP)) { + fltr_info->ip.v4.proto = ip_proto; + fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ? + NE6X_FLTR_PTYPE_NONF_IPV4_TCP : + NE6X_FLTR_PTYPE_NONF_IPV4_UDP; + fltr_info->ip.v4.src_ip = fk->addrs.v4addrs.src; + fltr_info->ip.v4.dst_ip = fk->addrs.v4addrs.dst; + fltr_info->ip.v4.src_port = fk->ports.src; + fltr_info->ip.v4.dst_port = fk->ports.dst; + fltr_info->ip.v4.proto = fk->basic.ip_proto; + fltr_info->ip.v4.pi = ADPT_LPORT(adpt); + } else { /* ETH_P_IPV6 */ + fltr_info->ip.v6.proto = ip_proto; + fltr_info->flow_type = (ip_proto == IPPROTO_TCP) ? + NE6X_FLTR_PTYPE_NONF_IPV6_TCP : + NE6X_FLTR_PTYPE_NONF_IPV6_UDP; + memcpy(&fltr_info->ip.v6.src_ip, &fk->addrs.v6addrs.src, + sizeof(struct in6_addr)); + memcpy(&fltr_info->ip.v6.dst_ip, &fk->addrs.v6addrs.dst, + sizeof(struct in6_addr)); + fltr_info->ip.v6.src_port = fk->ports.src; + fltr_info->ip.v6.dst_port = fk->ports.dst; + fltr_info->ip.v6.proto = fk->basic.ip_proto; + fltr_info->ip.v6.pi = ADPT_LPORT(adpt); + } + fltr_info->data.tab_id = 5; + fltr_info->data.port = ADPT_VPORT(adpt); + fltr_info->data.cos = cpu_to_be16(rxq_idx); + fltr_info->data.hash = hash; + + arfs_entry->flow_id = flow_id; + fltr_info->fltr_id = + atomic_inc_return(adpt->arfs_last_fltr_id) % RPS_NO_FILTER; + + return arfs_entry; +} + +void ne6x_free_cpu_rx_rmap(struct ne6x_adapter *adpt) +{ + struct net_device *netdev; + + if (!adpt) + return; + + netdev = adpt->netdev; + if (!netdev || !netdev->rx_cpu_rmap) + return; + + free_irq_cpu_rmap(netdev->rx_cpu_rmap); + netdev->rx_cpu_rmap = NULL; +} + +int ne6x_get_irq_num(struct ne6x_pf *pf, int idx) +{ + if (!pf->msix_entries) + return -EINVAL; + + return pf->msix_entries[idx].vector; +} + +int ne6x_set_cpu_rx_rmap(struct ne6x_adapter *adpt) +{ + struct net_device *netdev; + struct ne6x_pf *pf; + int base_idx, i; + + pf = adpt->back; + + netdev = adpt->netdev; + if (!pf || !netdev || !adpt->num_q_vectors) + return -EINVAL; + + netdev_dbg(netdev, "Setup CPU RMAP: adpt type 0x%x, ifname %s, q_vectors %d\n", + adpt->type, netdev->name, adpt->num_q_vectors); + + netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adpt->num_q_vectors); + if (unlikely(!netdev->rx_cpu_rmap)) + return -EINVAL; + + base_idx = adpt->base_vector; + for (i = 0; i < adpt->num_q_vectors; i++) { + if (irq_cpu_rmap_add(netdev->rx_cpu_rmap, ne6x_get_irq_num(pf, base_idx + i))) { + ne6x_free_cpu_rx_rmap(adpt); + return -EINVAL; + } + } + + return 0; +} + +int ne6x_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb, + u16 rxq_idx, u32 flow_id) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + struct ne6x_arfs_entry *arfs_entry; + struct ne6x_adapter *adpt = np->adpt; + struct flow_keys fk; + struct ne6x_pf *pf; + __be16 n_proto; + u8 ip_proto; + u16 idx; + u32 hash; + int ret; + + if (unlikely(!(netdev->features & NETIF_F_NTUPLE))) + return -ENODEV; + + /* failed to allocate memory for aRFS so don't crash */ + if (unlikely(!adpt->arfs_fltr_list)) + return -ENODEV; + + pf = adpt->back; + + if (unlikely(test_bit(NE6X_DOWN, pf->state))) + return -ENODEV; + + /* aRFS only supported on Rx queues belonging to PF ADPT */ + if (rxq_idx >= adpt->num_queue) + return -EOPNOTSUPP; + + if (skb->encapsulation) + return -EPROTONOSUPPORT; + + if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) + return -EPROTONOSUPPORT; + + n_proto = fk.basic.n_proto; + /* Support only IPV4 and IPV6 */ + if ((n_proto == htons(ETH_P_IP) && !ip_is_fragment(ip_hdr(skb))) || + n_proto == htons(ETH_P_IPV6)) + ip_proto = fk.basic.ip_proto; + else + return -EPROTONOSUPPORT; + + /* Support only TCP and UDP */ + if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) + return -EPROTONOSUPPORT; + + /* choose the aRFS list bucket based on skb hash */ + hash = skb_get_hash_raw(skb); + idx = skb_get_hash_raw(skb) & NE6X_ARFS_LST_MASK; + /* search for entry in the bucket */ + spin_lock_bh(&adpt->arfs_lock); + hlist_for_each_entry(arfs_entry, &adpt->arfs_fltr_list[idx], + list_entry) { + struct ne6x_fster_fltr *fltr_info = &arfs_entry->fltr_info; + + /* keep searching for the already existing arfs_entry flow */ + if (!ne6x_arfs_cmp(fltr_info, &fk)) + continue; + + ret = fltr_info->fltr_id; + + if (fltr_info->q_index == rxq_idx || + arfs_entry->fltr_state != NE6X_ARFS_ACTIVE) + goto out; + + /* update the queue to forward to on an already existing flow */ + fltr_info->q_index = rxq_idx; + fltr_info->data.cos = cpu_to_be16(rxq_idx); + arfs_entry->fltr_state = NE6X_ARFS_INACTIVE; + ne6x_arfs_update_active_fltr_cntrs(adpt, arfs_entry, false); + goto out_schedule_service_task; + } + + arfs_entry = ne6x_arfs_build_entry(adpt, &fk, hash, rxq_idx, flow_id); + if (!arfs_entry) { + ret = -ENOMEM; + goto out; + } + + ret = arfs_entry->fltr_info.fltr_id; + INIT_HLIST_NODE(&arfs_entry->list_entry); + hlist_add_head(&arfs_entry->list_entry, &adpt->arfs_fltr_list[idx]); +out_schedule_service_task: + ne6x_service_event_schedule(pf); +out: + spin_unlock_bh(&adpt->arfs_lock); + return ret; +} + +static int ne6x_init_arfs_cntrs(struct ne6x_adapter *adpt) +{ + if (!adpt) + return -EINVAL; + + adpt->arfs_fltr_cntrs = kzalloc(sizeof(*adpt->arfs_fltr_cntrs), + GFP_KERNEL); + if (!adpt->arfs_fltr_cntrs) + return -ENOMEM; + + adpt->arfs_last_fltr_id = kzalloc(sizeof(*adpt->arfs_last_fltr_id), + GFP_KERNEL); + if (!adpt->arfs_last_fltr_id) { + kfree(adpt->arfs_fltr_cntrs); + adpt->arfs_fltr_cntrs = NULL; + return -ENOMEM; + } + + return 0; +} + +void ne6x_init_arfs(struct ne6x_adapter *adpt) +{ + struct hlist_head *arfs_fltr_list; + unsigned int i; + + if (!adpt) + return; + + arfs_fltr_list = kcalloc(NE6X_MAX_ARFS_LIST, sizeof(*arfs_fltr_list), + GFP_KERNEL); + if (!arfs_fltr_list) + return; + + if (ne6x_init_arfs_cntrs(adpt)) + goto free_arfs_fltr_list; + + for (i = 0; i < NE6X_MAX_ARFS_LIST; i++) + INIT_HLIST_HEAD(&arfs_fltr_list[i]); + + spin_lock_init(&adpt->arfs_lock); + + adpt->arfs_fltr_list = arfs_fltr_list; + + return; + +free_arfs_fltr_list: + kfree(arfs_fltr_list); +} + +void ne6x_clear_arfs(struct ne6x_adapter *adpt) +{ + struct device *dev; + unsigned int i; + struct ne6x_arfs_entry *r; + struct hlist_node *n; + HLIST_HEAD(tmp_del_list); + + if (!adpt || !adpt->back || !adpt->arfs_fltr_list) + return; + + dev = ne6x_pf_to_dev(adpt->back); + + for (i = 0; i < NE6X_MAX_ARFS_LIST; i++) { + spin_lock_bh(&adpt->arfs_lock); + hlist_for_each_entry_safe(r, n, &adpt->arfs_fltr_list[i], + list_entry) { + if (r->fltr_state == NE6X_ARFS_ACTIVE || r->fltr_state == NE6X_ARFS_TODEL) { + hlist_del(&r->list_entry); + hlist_add_head(&r->list_entry, &tmp_del_list); + } + } + spin_unlock_bh(&adpt->arfs_lock); + } + + hlist_for_each_entry_safe(r, n, &tmp_del_list, list_entry) { + ne6x_dev_del_fster_rules(adpt, &r->fltr_info, false); + hlist_del(&r->list_entry); + devm_kfree(dev, r); + } + + for (i = 0; i < NE6X_MAX_ARFS_LIST; i++) { + struct ne6x_arfs_entry *r; + struct hlist_node *n; + + spin_lock_bh(&adpt->arfs_lock); + hlist_for_each_entry_safe(r, n, &adpt->arfs_fltr_list[i], + list_entry) { + hlist_del(&r->list_entry); + devm_kfree(dev, r); + } + spin_unlock_bh(&adpt->arfs_lock); + } + + kfree(adpt->arfs_fltr_list); + adpt->arfs_fltr_list = NULL; + kfree(adpt->arfs_last_fltr_id); + adpt->arfs_last_fltr_id = NULL; + kfree(adpt->arfs_fltr_cntrs); + adpt->arfs_fltr_cntrs = NULL; +} + +void ne6x_remove_arfs(struct ne6x_adapter *adpt) +{ + if (!adpt) + return; + + ne6x_clear_arfs(adpt); +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_arfs.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_arfs.h new file mode 100644 index 000000000000..a24d9f19d478 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_arfs.h @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_ARFS_H +#define _NE6X_ARFS_H + +/* protocol enumeration for filters */ +enum ne6x_fltr_ptype { + /* NONE - used for undef/error */ + NE6X_FLTR_PTYPE_NONF_NONE = 0, + NE6X_FLTR_PTYPE_NONF_IPV4_UDP, + NE6X_FLTR_PTYPE_NONF_IPV4_TCP, + NE6X_FLTR_PTYPE_NONF_IPV6_UDP, + NE6X_FLTR_PTYPE_NONF_IPV6_TCP, + NE6X_FLTR_PTYPE_MAX, +}; + +struct ne6x_fster_v4 { + __be32 rsv0[3]; + __be32 dst_ip; + __be32 rsv1[3]; + __be32 src_ip; + __be16 dst_port; + __be16 src_port; + __be16 rsv2; + u8 pi; + u8 proto; + u8 rsv3[24]; +}; + +#define NE6X_IPV6_ADDR_LEN_AS_U32 4 + +struct ne6x_fster_v6 { + __be32 dst_ip[NE6X_IPV6_ADDR_LEN_AS_U32]; + __be32 src_ip[NE6X_IPV6_ADDR_LEN_AS_U32]; + __be16 dst_port; + __be16 src_port; + __be16 rsv0; + u8 pi; + u8 proto; + u8 rsv1[24]; +}; + +struct ne6x_fster_data { + u8 tab_id; + u8 port; + __be16 cos; + __be32 hash; + u8 rsv0[24]; +}; + +struct ne6x_fster_table { + union { + struct ne6x_fster_v4 v4; + struct ne6x_fster_v6 v6; + } ip; + struct ne6x_fster_data data; +}; + +struct ne6x_fster_search_result { + u32 key_index; + struct ne6x_fster_data data; +}; + +struct ne6x_fster_fltr { + struct list_head fltr_node; + enum ne6x_fltr_ptype flow_type; + + union { + struct ne6x_fster_v4 v4; + struct ne6x_fster_v6 v6; + } ip; + struct ne6x_fster_data data; + + /* filter control */ + u16 q_index; + u16 dest_adpt; + u8 cnt_ena; + u16 cnt_index; + u32 fltr_id; +}; + +enum ne6x_arfs_fltr_state { + NE6X_ARFS_INACTIVE, + NE6X_ARFS_ACTIVE, + NE6X_ARFS_TODEL, +}; + +struct ne6x_arfs_entry { + struct ne6x_fster_fltr fltr_info; + struct ne6x_arfs_active_fltr_cntrs *arfs_fltr_cntrs; + struct hlist_node list_entry; + u64 time_activated; /* only valid for UDP flows */ + u32 flow_id; + /* fltr_state = 0 - NE6X_ARFS_INACTIVE: + * filter needs to be updated or programmed in HW. + * fltr_state = 1 - NE6X_ARFS_ACTIVE: + * filter is active and programmed in HW. + * fltr_state = 2 - NE6X_ARFS_TODEL: + * filter has been deleted from HW and needs to be removed from + * the aRFS hash table. + */ + u8 fltr_state; +}; + +struct ne6x_arfs_entry_ptr { + struct ne6x_arfs_entry *arfs_entry; + struct hlist_node list_entry; +}; + +struct ne6x_arfs_active_fltr_cntrs { + atomic_t active_tcpv4_cnt; + atomic_t active_tcpv6_cnt; + atomic_t active_udpv4_cnt; + atomic_t active_udpv6_cnt; +}; + +#ifdef CONFIG_RFS_ACCEL +int +ne6x_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb, + u16 rxq_idx, u32 flow_id); +void ne6x_clear_arfs(struct ne6x_adapter *adpt); +void ne6x_free_cpu_rx_rmap(struct ne6x_adapter *adpt); +void ne6x_init_arfs(struct ne6x_adapter *adpt); +void ne6x_sync_arfs_fltrs(struct ne6x_pf *pf); +int ne6x_set_cpu_rx_rmap(struct ne6x_adapter *adpt); +void ne6x_remove_arfs(struct ne6x_adapter *adpt); +#else +static inline void ne6x_clear_arfs(struct ne6x_adapter *adpt) { } +static inline void ne6x_free_cpu_rx_rmap(struct ne6x_adapter *adpt) { } +static inline void ne6x_init_arfs(struct ne6x_adapter *adpt) { } +static inline void ne6x_sync_arfs_fltrs(struct ne6x_pf *pf) { } +static inline void ne6x_remove_arfs(struct ne6x_adapter *adpt) { } + +static inline int ne6x_set_cpu_rx_rmap(struct ne6x_adapter __always_unused *adpt) +{ + return 0; +} + +static inline int +ne6x_rx_flow_steer(struct net_device __always_unused *netdev, + const struct sk_buff __always_unused *skb, + u16 __always_unused rxq_idx, u32 __always_unused flow_id) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_RFS_ACCEL */ + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_debugfs.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_debugfs.c new file mode 100644 index 000000000000..b945381ee8e8 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_debugfs.c @@ -0,0 +1,2397 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include +#include +#include +#include +#include +#include + +#include "ne6x.h" +#include "ne6x_debugfs.h" +#include "ne6x_portmap.h" +#include "ne6x_reg.h" +#include "ne6x_dev.h" +#include "ne6x_txrx.h" +#include "ne6x_arfs.h" + +#define NE6X_CQ_TO_OFF_TX(__desc, __idx) \ + (((__desc)->payload.data[3 * (__idx) + 1] << 0) | \ + ((__desc)->payload.data[3 * (__idx) + 2] << 8)) +#define NE6X_CQ_TO_STS_TX(__desc, __idx) ((__desc)->payload.data[3 * (__idx)]) + +#define NE6X_CQ_TO_LEN_RX(__desc, __idx) \ + (((__desc)->payload.data[5 * (__idx) + 1] << 0) | \ + ((__desc)->payload.data[5 * (__idx) + 2] << 8)) +#define NE6X_CQ_TO_STS_RX(__desc, __idx) ((__desc)->payload.data[5 * (__idx)]) +#define NE6X_CQ_TO_OFF_RX(__desc, __idx) \ + (((__desc)->payload.data[5 * (__idx) + 3] << 0) | \ + ((__desc)->payload.data[5 * (__idx) + 4] << 8)) + +#define PARA_KEY_STRING " " +#define ARRAY_P_MAX_COUNT 140 +#define HASH_KEY_SIZE 64 +#define HASH_DATA_SIZE 64 +#define TABLE_WIDHT_BIT_512 512 +#define TABLE_WIDHT_BIT_128 128 +#define TABLE_WIDHT_BIT_64 64 +#define TABLE_WIDHT_BIT_16 16 +#define TABLE_WIDHT_BIT_256 256 +#define TABLE_WIDHT_BIT_32 32 + +#define FRU_CHECK_6ASCII(x) (((x) >> 6) == 0x2) +#define ASCII628_BASE 32 +#define FRU_6BIT_8BITLENGTH(x) (((x) * 4) / 3) + +static int table_size[] = { + TABLE_WIDHT_BIT_512, + TABLE_WIDHT_BIT_64, + TABLE_WIDHT_BIT_16, + TABLE_WIDHT_BIT_64, + TABLE_WIDHT_BIT_256, + TABLE_WIDHT_BIT_64, + TABLE_WIDHT_BIT_64, + TABLE_WIDHT_BIT_32 +}; + +const struct ne6x_debug_info ne6x_device_info[] = { + {0xE220, "N5E025P2-PAUA", "25G"}, {0xE22C, "N5E025P2-NAUA", "25G"}, + {0xE221, "N5S025P2-PAUA", "25G"}, {0xE22D, "N5S025P2-NAUA", "25G"}, + {0xEA20, "N6E100P2-PAUA", "100G"}, {0xEA2C, "N6E100P2-NAUA", "100G"}, + {0xEA21, "N6S100P2-PAUA", "100G"}, {0xEA2D, "N6S100P2-NAUA", "100G"}, + {0xD221, "N6S025P2-PDUA", "25G"}, {0xDA21, "N6S100P2-PDUA", "100G"}, + {0x1220, "N5E025P2-PAGA", "25G"}, {0x122C, "N5E025P2-NAGA", "25G"}, + {0x1221, "N5S025P2-PAGA", "25G"}, {0x122D, "N5S025P2-NAGA", "25G"}, + {0x1A20, "N6E100P2-PAGA", "100G"}, {0x1A2C, "N6E100P2-NAGA", "100G"}, + {0x1A21, "N6S100P2-PAGA", "100G"}, {0x1A2D, "N6S100P2-NAGA", "100G"}, + {0x0221, "N6S100P2-NAGA", "100G"}, {0x0A21, "N6S100P2-PDGA", "100G"} }; + +char *my_strtok(char *p_in_string, char *p_in_delimit, char **pp_out_ret) +{ + static char *p_tmp; + char *p_strstr = NULL; + char *ret = NULL; + int for_index; + + if (!pp_out_ret) + return NULL; + + *pp_out_ret = NULL; + if (!p_in_delimit) + return p_in_string; + + if (p_in_string) + p_tmp = p_in_string; + + if (!p_tmp) + return NULL; + + ret = p_tmp; + p_strstr = strstr(p_tmp, p_in_delimit); + if (p_strstr) { + p_tmp = p_strstr + strlen(p_in_delimit); + for (for_index = 0; for_index < strlen(p_in_delimit); for_index++) + *(p_strstr + for_index) = '\0'; + } else { + p_tmp = NULL; + } + + *pp_out_ret = p_tmp; + + return ret; +} + +int my_isdigit(char in_char) +{ + if ((in_char >= '0') && (in_char <= '9')) + return 1; + else + return 0; +} + +int my_atoi(char *p_in_string) +{ + int flag = 1; + int ret = 0; + + while (my_isdigit(p_in_string[0]) == 0) + p_in_string++; + + if (*(p_in_string - 1) == '-') + flag = -1; + + while (my_isdigit(p_in_string[0]) != 0) { + ret *= 10; + ret += p_in_string[0] - '0'; + if (ret > INT_MAX || ret < INT_MIN) + return 0; + + p_in_string++; + } + + if (ret != 0) + return (flag * ret); + else + return 0; +} + +static struct dentry *ne6x_dbg_root; +u8 *ne6x_dbg_get_fru_product_part(u8 *buffer, enum fru_product_part part, u8 *len); + +void ne6x_dbg_show_queue(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_ring *ring; + struct ne6x_adapter *adpt; + u64 head, tail, oft; + int queue_num = 0; + int i, j; + + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->rx_rings[j]; + queue_num = adpt->base_queue + j; + if (queue_num < NE6X_PF_VP0_NUM) { + head = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_RQ_HD_POINTER)); + tail = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_RQ_TAIL_POINTER)); + oft = rd64(&pf->hw, NE6X_VPINT_DYN_CTLN(queue_num, NE6X_RQ_OFST)); + } else { + head = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_RQ_HD_POINTER)); + tail = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_RQ_TAIL_POINTER)); + oft = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_RQ_OFST)); + } + dev_info(&pf->pdev->dev, "----RX: Netdev[%d] Queue[%d]: H[0x%04llx], T[0x%04llx], RQ[0x%04llx], idle:%04d, alloc:%04d, use:%04d, clean:%04d\n", + i, j, head, tail, oft, NE6X_DESC_UNUSED(ring), ring->next_to_alloc, + ring->next_to_use, ring->next_to_clean); + } + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->tx_rings[j]; + queue_num = adpt->base_queue + j; + if (queue_num < NE6X_PF_VP0_NUM) { + head = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_SQ_HD_POINTER)); + tail = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_SQ_TAIL_POINTER)); + oft = rd64(&pf->hw, NE6X_VPINT_DYN_CTLN(queue_num, NE6X_SQ_OFST)); + } else { + head = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_SQ_HD_POINTER)); + tail = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_SQ_TAIL_POINTER)); + oft = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_SQ_OFST)); + } + dev_info(&pf->pdev->dev, "----TX: Netdev[%d] Queue[%d]: H[0x%04llx], T[0x%04llx], SQ[0x%04llx], idle:%04d, use:%04d, clean:%04d\n", + i, j, head, tail, oft, NE6X_DESC_UNUSED(ring), ring->next_to_use, + ring->next_to_clean); + } + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->cq_rings[j]; + queue_num = adpt->base_queue + j; + if (queue_num < NE6X_PF_VP0_NUM) { + head = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_CQ_HD_POINTER)); + tail = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_CQ_TAIL_POINTER)); + } else { + head = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_CQ_HD_POINTER)); + tail = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_CQ_TAIL_POINTER)); + } + dev_info(&pf->pdev->dev, "----CQ: Netdev[%d] Queue[%d]: H[0x%04llx], T[0x%04llx], idle:%04d, use:%04d, clean:%04d\n", + i, j, head, tail, NE6X_DESC_UNUSED(ring), ring->next_to_use, + ring->next_to_clean); + } + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + } +} + +void ne6x_dbg_show_ring(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + int i, j, k, l; + union ne6x_rx_desc *rx_desc; + struct ne6x_tx_desc *tx_desc; + struct ne6x_cq_desc *cq_desc; + struct ne6x_ring *ring; + struct ne6x_adapter *adpt; + + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->rx_rings[j]; + for (k = 0; k < ring->count; k++) { + rx_desc = NE6X_RX_DESC(ring, k); + if (!rx_desc->wb.u.val) + /* this descriptor is empty,skip */ + continue; + + dev_info(&pf->pdev->dev, "**** rx_desc[%d], vp[%d], mml[%d], sml[%d], bsa[0x%llx], bma[0x%llx], flag[0x%x], vp[%d], pkt_len[%d]\n", + k, rx_desc->w.vp, rx_desc->w.mop_mem_len, + rx_desc->w.sop_mem_len, rx_desc->w.buffer_sop_addr, + rx_desc->w.buffer_mop_addr, rx_desc->wb.u.val, + rx_desc->wb.vp, rx_desc->wb.pkt_len); + } + } + + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->tx_rings[j]; + for (k = 0; k < ring->count; k++) { + tx_desc = NE6X_TX_DESC(ring, k); + if (!tx_desc->buffer_mop_addr) + /* this descriptor is empty,skip */ + continue; + + dev_info(&pf->pdev->dev, "**** tx_desc[%d], flag[0x%x], vp[%d], et[%d], ch[%d], tt[%d],sopv[%d],eopv[%d],tso[%d],l3chk[%d],l3oft[%d],l4chk[%d],l4oft[%d],pld[%d],mop[%d],sop[%d],mss[%d],mopa[%lld],sopa[%lld]\n", + k, tx_desc->u.val, tx_desc->vp, tx_desc->event_trigger, + tx_desc->chain, tx_desc->transmit_type, tx_desc->sop_valid, + tx_desc->eop_valid, tx_desc->tso, tx_desc->l3_csum, + tx_desc->l3_ofst, tx_desc->l4_csum, tx_desc->l4_ofst, + tx_desc->pld_ofst, tx_desc->mop_cnt, tx_desc->sop_cnt, + tx_desc->mss, tx_desc->buffer_mop_addr, + tx_desc->buffer_sop_addr); + } + } + + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->cq_rings[j]; + for (k = 0; k < ring->count; k++) { + cq_desc = NE6X_CQ_DESC(ring, k); + if (!cq_desc->num) + /* this descriptor is empty,skip */ + continue; + + dev_info(&pf->pdev->dev, + "**** cq_desc[%d], vp[%d], ctype[%d], num[%d]\n", k, + ring->reg_idx, cq_desc->ctype, cq_desc->num); + for (l = 0; l < cq_desc->num; l++) { + if (cq_desc->ctype == 0) + dev_info(&pf->pdev->dev, + "******[TX] %d:%d val:0x%x\n", l, + NE6X_CQ_TO_OFF_TX(cq_desc, l), + NE6X_CQ_TO_STS_TX(cq_desc, l)); + else + dev_info(&pf->pdev->dev, + "******[RX] %d:%d val:0x%x len:0x%x\n", l, + NE6X_CQ_TO_OFF_RX(cq_desc, l), + NE6X_CQ_TO_STS_RX(cq_desc, l), + NE6X_CQ_TO_LEN_RX(cq_desc, l)); + } + } + } + } +} + +void ne6x_dbg_show_txtail(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + int i, j; + struct ne6x_adapter *adpt; + struct ne6x_ring *ring; + + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + dev_info(&pf->pdev->dev, "+----------------------------------------------------------------+\n"); + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->tx_rings[j]; + dev_info(&pf->pdev->dev, + "+ Netdev[%d] TX queue[%d] processed %llx packets\n", i, j, + readq(ring->tail + j)); + } + dev_info(&pf->pdev->dev, "+----------------------------------------------------------------+\n"); + } +} + +void ne6x_dbg_show_txq(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_ring *ring; + struct ne6x_adapter *adpt; + int i, j; + + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + dev_info(&pf->pdev->dev, "+----------------------------------------------------------------+\n"); + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->tx_rings[j]; + dev_info(&pf->pdev->dev, + "+ Netdev[%d] TX queue[%d] processed %lld packets\n", i, j, + ring->stats.packets); + } + dev_info(&pf->pdev->dev, "+----------------------------------------------------------------+\n"); + } +} + +void ne6x_dbg_show_rxq(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_ring *ring; + struct ne6x_adapter *adpt; + int i, j; + + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->rx_rings[j]; + dev_info(&pf->pdev->dev, + "+ Netdev[%d] RX queue[%d] processed %lld packets\n", i, j, + ring->stats.packets); + } + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + } +} + +void ne6x_dbg_show_cq(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_ring *ring; + struct ne6x_adapter *adpt; + int i, j; + + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + for (j = 0; j < adpt->num_queue; j++) { + ring = adpt->cq_rings[j]; + dev_info(&pf->pdev->dev, + "+ Netdev[%d] CQ queue[%d] processed %lld packets\n", i, j, + ring->stats.packets); + } + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + } +} + +void ne6x_dbg_clean_queue(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_ring *tx_ring; + struct ne6x_ring *rx_ring; + struct ne6x_ring *cq_ring; + struct ne6x_adapter *adpt; + int i, j; + + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + for (j = 0; j < adpt->num_queue; j++) { + tx_ring = adpt->tx_rings[j]; + rx_ring = adpt->rx_rings[j]; + cq_ring = adpt->cq_rings[j]; + + memset(&tx_ring->stats, 0, sizeof(struct ne6x_q_stats)); + memset(&tx_ring->tx_stats, 0, sizeof(struct ne6x_txq_stats)); + + memset(&rx_ring->stats, 0, sizeof(struct ne6x_q_stats)); + memset(&rx_ring->rx_stats, 0, sizeof(struct ne6x_rxq_stats)); + + memset(&cq_ring->stats, 0, sizeof(struct ne6x_q_stats)); + memset(&cq_ring->cq_stats, 0, sizeof(struct ne6x_cq_stats)); + } + dev_info(&pf->pdev->dev, "---------------------------adpt[%d] all ring cleaned---------------------------------------", + i); + } +} + +void ne6x_dbg_show_txring(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_ring *tx_ring; + struct ne6x_adapter *adpt; + u64 head, tail, oft; + int queue_num = 0; + int i, j; + + dev_info(&pf->pdev->dev, "\n"); + dev_info(&pf->pdev->dev, "+----------------------------tx begin------------------------------+\n"); + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + for (j = 0; j < adpt->num_queue; j++) { + tx_ring = adpt->tx_rings[j]; + queue_num = adpt->base_queue + j; + if (queue_num < NE6X_PF_VP0_NUM) { + head = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_SQ_HD_POINTER)); + tail = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_SQ_TAIL_POINTER)); + oft = rd64(&pf->hw, NE6X_VPINT_DYN_CTLN(queue_num, NE6X_SQ_OFST)); + } else { + head = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_SQ_HD_POINTER)); + tail = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_SQ_TAIL_POINTER)); + oft = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_SQ_OFST)); + } + dev_info(&pf->pdev->dev, "---- Netdev[%d] Queue[%02d]: H[0x%04llx], T[0x%04llx], SQ[0x%04llx], idle:%04d, use:%04d, clean:%04d, busy:%lld\n", + i, j, head, tail, oft, NE6X_DESC_UNUSED(tx_ring), + tx_ring->next_to_use, tx_ring->next_to_clean, + tx_ring->tx_stats.tx_busy); + } + } + dev_info(&pf->pdev->dev, "+----------------------------tx end--------------------------------+\n"); + dev_info(&pf->pdev->dev, "\n"); +} + +void ne6x_dbg_show_rxring(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_ring *rx_ring; + struct ne6x_adapter *adpt; + u64 head, tail, oft; + int queue_num = 0; + int i, j; + + dev_info(&pf->pdev->dev, "\n"); + dev_info(&pf->pdev->dev, "+----------------------------rx begin------------------------------+\n"); + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + for (j = 0; j < adpt->num_queue; j++) { + rx_ring = adpt->rx_rings[j]; + queue_num = adpt->base_queue + j; + if (queue_num < NE6X_PF_VP0_NUM) { + head = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_RQ_HD_POINTER)); + tail = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_RQ_TAIL_POINTER)); + oft = rd64(&pf->hw, NE6X_VPINT_DYN_CTLN(queue_num, NE6X_RQ_OFST)); + } else { + head = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_RQ_HD_POINTER)); + tail = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_RQ_TAIL_POINTER)); + oft = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_RQ_OFST)); + } + dev_info(&pf->pdev->dev, "---- Netdev[%d] Queue[%02d]: H[0x%04llx], T[0x%04llx], RQ[0x%04llx], alloc:%04d, use:%04d, clean:%04d, cq_expect:%04d\n", + i, j, head, tail, oft, rx_ring->next_to_alloc, + rx_ring->next_to_use, rx_ring->next_to_clean, + rx_ring->cq_last_expect); + } + } + dev_info(&pf->pdev->dev, "+----------------------------rx end--------------------------------+\n"); + dev_info(&pf->pdev->dev, "\n"); +} + +void ne6x_dbg_show_cqring(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_ring *cq_ring; + struct ne6x_adapter *adpt; + int queue_num = 0; + u64 head, tail; + int i, j; + + dev_info(&pf->pdev->dev, "\n"); + dev_info(&pf->pdev->dev, "+----------------------------cq begin------------------------------+\n"); + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", i); + continue; + } + + for (j = 0; j < adpt->num_queue; j++) { + cq_ring = adpt->cq_rings[j]; + queue_num = adpt->base_queue + j; + if (queue_num < NE6X_PF_VP0_NUM) { + head = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_CQ_HD_POINTER)); + tail = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(queue_num, NE6X_CQ_TAIL_POINTER)); + } else { + head = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_RQ_HD_POINTER)); + tail = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(queue_num - NE6X_PF_VP0_NUM, + NE6X_RQ_TAIL_POINTER)); + } + dev_info(&pf->pdev->dev, "---- Netdev[%d] Queue[%02d]: H[0x%04llx], T[0x%04llx], idle:%04d, use:%04d, clean:%04d\n", + i, j, head, tail, NE6X_DESC_UNUSED(cq_ring), cq_ring->next_to_use, + cq_ring->next_to_clean); + } + } + dev_info(&pf->pdev->dev, "+----------------------------cq end--------------------------------+\n"); + dev_info(&pf->pdev->dev, "\n"); +} + +void ne6x_dbg_show_txdesc_states(int adpt_num, int queue_num, struct ne6x_pf *pf) +{ + struct ne6x_tx_desc *tx_desc = NULL; + struct ne6x_ring *tx_ring = NULL; + struct ne6x_adapter *adpt = NULL; + int i; + + if (adpt_num > pf->num_alloc_adpt) { + dev_warn(&pf->pdev->dev, " error\n"); + return; + } + adpt = pf->adpt[adpt_num]; + + if (queue_num > adpt->num_queue) { + dev_warn(&pf->pdev->dev, " error\n"); + return; + } + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", adpt_num); + return; + } + + tx_ring = adpt->tx_rings[queue_num]; + + dev_info(&pf->pdev->dev, "\n"); + dev_info(&pf->pdev->dev, "+-----------------------------------Netdev[%d] - Queue[%d] - tx_desc begin-----------------------------------------+\n", + adpt_num, queue_num); + for (i = 0; i < tx_ring->count; i++) { + tx_desc = NE6X_TX_DESC(tx_ring, i); + if (!tx_desc->buffer_mop_addr && i != 0) + /* this descriptor is empty,skip */ + continue; + + dev_info(&pf->pdev->dev, "tx_desc[%d]\n", i); + dev_info(&pf->pdev->dev, "struct ne6x_tx_desc\n" + "{\n" + " u8 flags : 8; [0x%x]\n" + " u8 vp : 7; [%d]\n" + " u8 event_trigger : 1; [%d]\n" + " u8 chain : 1; [%d]\n" + " u8 transmit_type : 2; [%d]\n" + " u8 sop_valid : 1; [%d]\n" + " u8 eop_valid : 1; [%d]\n" + " u8 tso : 1; [%d]\n" + " u8 l3_csum : 1; [%d]\n" + " u8 l3_ofst : 7; [%d]\n" + " u8 l4_csum : 1; [%d]\n" + " u8 l4_ofst : 7; [%d]\n" + " u8 pld_ofst; [%d]\n" + " __le64 mop_cnt : 24; [%d]\n" + " __le64 sop_cnt : 16; [%d]\n" + " __le64 mss : 16; [%d]\n" + " __le64 buffer_mop_addr; [%lld]\n" + " __le64 buffer_sop_addr; [%lld]\n" + "};\n", + tx_desc->u.val, tx_desc->vp, tx_desc->event_trigger, tx_desc->chain, + tx_desc->transmit_type, tx_desc->sop_valid, tx_desc->eop_valid, tx_desc->tso, + tx_desc->l3_csum, tx_desc->l3_ofst, tx_desc->l4_csum, tx_desc->l4_ofst, + tx_desc->pld_ofst, tx_desc->mop_cnt, tx_desc->sop_cnt, tx_desc->mss, + tx_desc->buffer_mop_addr, tx_desc->buffer_sop_addr); + } + dev_info(&pf->pdev->dev, "+------------------------------------------------Netdev[%d] - Queue[%d] - tx_desc end--------------------------------------------------+\n", + adpt_num, queue_num); + dev_info(&pf->pdev->dev, "\n"); +} + +void ne6x_dbg_show_rxdesc_states(int adpt_num, int queue_num, struct ne6x_pf *pf) +{ + union ne6x_rx_desc *rx_desc = NULL; + struct ne6x_ring *rx_ring = NULL; + struct ne6x_adapter *adpt = NULL; + int i; + + if (adpt_num > pf->num_alloc_adpt) { + dev_warn(&pf->pdev->dev, " error\n"); + return; + } + adpt = pf->adpt[adpt_num]; + + if (queue_num > adpt->num_queue) { + dev_warn(&pf->pdev->dev, " error\n"); + return; + } + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", adpt_num); + return; + } + rx_ring = adpt->rx_rings[queue_num]; + + dev_info(&pf->pdev->dev, "\n"); + dev_info(&pf->pdev->dev, "+-------------------------------------------------Netdev[%d] - Queue[%2d] - rx_desc begin-------------------------------------------------+\n", + adpt_num, queue_num); + for (i = 0; i < rx_ring->count; i++) { + rx_desc = NE6X_RX_DESC(rx_ring, i); + + if (!rx_desc->wb.u.val) + /* this descriptor is empty,skip */ + continue; + + dev_info(&pf->pdev->dev, "**** Netdev[%d], Queue[%02d], rx_desc[%d], vp[%d], mml[%d], sml[%d], bsa[0x%llx], bma[0x%llx], flag[0x%x], vp[%d], p[0x%02x%02x%02x%02x%02x%02x%02x%02x], pkt_len[%d]\n", + adpt_num, queue_num, i, rx_desc->w.vp, rx_desc->w.mop_mem_len, + rx_desc->w.sop_mem_len, rx_desc->w.buffer_sop_addr, + rx_desc->w.buffer_mop_addr, rx_desc->wb.u.val, rx_desc->wb.vp, + rx_desc->wb.pd[0], rx_desc->wb.pd[1], rx_desc->wb.pd[2], rx_desc->wb.pd[3], + rx_desc->wb.pd[4], rx_desc->wb.pd[5], rx_desc->wb.pd[6], rx_desc->wb.pd[7], + rx_desc->wb.pkt_len); + } + dev_info(&pf->pdev->dev, "+-------------------------------------------------Netdev[%d] - Queue[%d] - rx_desc end----------------------------------------------------+\n", + adpt_num, queue_num); + dev_info(&pf->pdev->dev, "\n"); +} + +void ne6x_dbg_show_cqdesc_states(int adpt_num, int queue_num, struct ne6x_pf *pf) +{ + struct ne6x_cq_desc *cq_desc = NULL; + struct ne6x_ring *cq_ring = NULL; + struct ne6x_adapter *adpt = NULL; + int i, j; + + if (adpt_num > pf->num_alloc_adpt) { + dev_warn(&pf->pdev->dev, " error\n"); + return; + } + adpt = pf->adpt[adpt_num]; + + if (queue_num > adpt->num_queue) { + dev_warn(&pf->pdev->dev, " error\n"); + return; + } + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + dev_warn(&pf->pdev->dev, "**-- Netdev[%d] is link down --**\n", adpt_num); + return; + } + cq_ring = adpt->cq_rings[queue_num]; + + dev_info(&pf->pdev->dev, "\n"); + dev_info(&pf->pdev->dev, "+--------------------------------------------------Netdev[%d] - Queue[%d] - cq_desc begin------------------------------------------------+\n", + adpt_num, queue_num); + for (i = 0; i < cq_ring->count; i++) { + cq_desc = NE6X_CQ_DESC(cq_ring, i); + + if (!cq_desc->num) + /* this descriptor is empty,skip */ + continue; + + dev_info(&pf->pdev->dev, "**** Netdev[%d], Queue[%02d], cq_desc[%d], vp[%d], ctype[%s], num[%d]\n", + adpt_num, queue_num, i, cq_ring->reg_idx, + cq_desc->ctype == 0 ? "tx" : "rx", + cq_desc->num); + for (j = 0; j < cq_desc->num; j++) { + if (cq_desc->ctype == 0) + dev_info(&pf->pdev->dev, "******TX%d[%d]: val:0x%x\n", j, + NE6X_CQ_TO_OFF_TX(cq_desc, j), + NE6X_CQ_TO_STS_TX(cq_desc, j)); + else + dev_info(&pf->pdev->dev, "******RX%d[%d]: val:0x%x len:%d\n", j, + NE6X_CQ_TO_OFF_RX(cq_desc, j), + NE6X_CQ_TO_STS_RX(cq_desc, j), + NE6X_CQ_TO_LEN_RX(cq_desc, j)); + } + } + dev_info(&pf->pdev->dev, "+--------------------------------------------------Netdev[%d] - Queue[%d] - cq_desc end--------------------------------------------------+\n", + adpt_num, queue_num); + dev_info(&pf->pdev->dev, "\n"); +} + +#ifdef CONFIG_RFS_ACCEL +void ne6x_dbg_show_arfs_cnt(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u8 idx = 0; + struct ne6x_adapter *pf_adpt; + struct ne6x_arfs_active_fltr_cntrs *fltr_cntrs = NULL; + + ne6x_for_each_pf(pf, idx) { + pf_adpt = pf->adpt[idx]; + fltr_cntrs = pf_adpt->arfs_fltr_cntrs; + dev_info(&pf->pdev->dev, "+---------------------------+\n"); + dev_info(&pf->pdev->dev, "pf_num:%d totle_num:%d\n\t\t\t tcp_v4_num:%d\n\t\t\t udp_v4_num:%d\n\t\t\t tcp_v6_num:%d\n\t\t\t udp_v6_num:%d\n", + idx, (atomic_read(&fltr_cntrs->active_tcpv4_cnt) + + atomic_read(&fltr_cntrs->active_udpv4_cnt) + + atomic_read(&fltr_cntrs->active_tcpv6_cnt) + + atomic_read(&fltr_cntrs->active_udpv6_cnt)), + atomic_read(&fltr_cntrs->active_tcpv4_cnt), + atomic_read(&fltr_cntrs->active_udpv4_cnt), + atomic_read(&fltr_cntrs->active_tcpv6_cnt), + atomic_read(&fltr_cntrs->active_udpv6_cnt)); + dev_info(&pf->pdev->dev, "+---------------------------+\n"); + } +} +#endif + +extern u32 ne6x_dev_crc32(const u8 *buf, u32 size); + +void ne6x_dbg_apb_read(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u64 offset; + u32 value; + u32 addr; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i", &addr); + if (cnt != 1) { + dev_warn(&pf->pdev->dev, "apb_read \n"); + return; + } + + offset = addr; + value = ne6x_reg_apb_read(pf, offset); + dev_info(&pf->pdev->dev, "offset = 0x%08X 0x%08X\n", addr, value); +} + +void ne6x_dbg_apb_write(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u64 offset; + u32 value; + u32 addr; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i %i", &addr, &value); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "apb_write \n"); + return; + } + + offset = addr; + ne6x_reg_apb_write(pf, offset, value); + dev_info(&pf->pdev->dev, "apb_write: 0x%llx = 0x%x\n", offset, value); +} + +void ne6x_dbg_mem_read(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + int index = 0, cnt; + u32 *reg_data; + u64 offset; + u32 addr; + u32 size; + + cnt = sscanf(&cmd_buf[0], "%i %i", &addr, &size); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "mem_read \n"); + return; + } + + reg_data = kzalloc((size + 4) * 4, GFP_KERNEL); + offset = addr; + for (index = 0x00; index < size; index++) + reg_data[index] = ne6x_reg_apb_read(pf, offset + index * 4); + + for (index = 0x00; index < size / 4; index++) + dev_info(&pf->pdev->dev, "%lx: %08X %08X %08X %08X\n", + (unsigned int long)(offset + index * 16), reg_data[4 * index], + reg_data[4 * index + 1], reg_data[4 * index + 2], reg_data[4 * index + 3]); + + if ((size % 4) == 1) + dev_info(&pf->pdev->dev, "%lx: %08X\n", (unsigned int long)(offset + index * 16), + reg_data[4 * index]); + else if ((size % 4) == 2) + dev_info(&pf->pdev->dev, "%lx: %08X %08X\n", + (unsigned int long)(offset + index * 16), reg_data[4 * index], + reg_data[4 * index + 1]); + else if ((size % 4) == 3) + dev_info(&pf->pdev->dev, "%lx: %08X %08X %08X\n", + (unsigned int long)(offset + index * 16), reg_data[4 * index], + reg_data[4 * index + 1], reg_data[4 * index + 2]); + + kfree((void *)reg_data); +} + +void ne6x_dbg_mem_write(struct ne6x_pf *pf, char *cmd_buf, int count) {} + +void ne6x_dbg_templ_help(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + dev_info(&pf->pdev->dev, "HW_FEATURES = 0\n"); + dev_info(&pf->pdev->dev, "HW_FLAGS = 1\n"); + dev_info(&pf->pdev->dev, "RSS_TABLE_SIZE = 2\n"); + dev_info(&pf->pdev->dev, "RSS_TABLE_ENTRY_WIDTH = 3\n"); + dev_info(&pf->pdev->dev, "RSS_HASH_KEY_BLOCK_SIZE = 4\n"); + dev_info(&pf->pdev->dev, "PORT2PI_0 = 5\n"); + dev_info(&pf->pdev->dev, "PI2PORT_0 = 25\n"); + dev_info(&pf->pdev->dev, "VLAN_TYPE = 33\n"); + dev_info(&pf->pdev->dev, "PI0_BROADCAST_LEAF = 37\n"); + dev_info(&pf->pdev->dev, "PORT_OLFLAGS_0 = 53\n"); + dev_info(&pf->pdev->dev, "PORT_2_COS_0 = 121\n"); + dev_info(&pf->pdev->dev, "VPORT0_LINK_STATUS = 155\n"); + dev_info(&pf->pdev->dev, "TSO_CKSUM_DISABLE = 156\n"); + dev_info(&pf->pdev->dev, "PORT0_MTU = 157\n"); + dev_info(&pf->pdev->dev, "PORT0_QINQ = 161\n"); + dev_info(&pf->pdev->dev, "CQ_SIZE = 229\n"); +} + +void ne6x_dbg_templ_read(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u32 vport; + u32 value; + u32 type; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i %i", &vport, &type); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "temp_read \n"); + return; + } + + ne6x_reg_get_user_data(pf, vport + type, &value); + dev_info(&pf->pdev->dev, "temp_read 0x%04X value 0x%08X\n", type, value); +} + +void ne6x_dbg_templ_write(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u32 vport; + u32 value; + u32 type; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i %i %i", &vport, &type, &value); + if (cnt != 3) { + dev_warn(&pf->pdev->dev, "temp_write \n"); + return; + } + + ne6x_reg_set_user_data(pf, vport + type, value); + dev_info(&pf->pdev->dev, "temp_write: 0x%04x = 0x%x\n", type, value); +} + +void ne6x_dbg_soc_read(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u32 value; + u32 addr; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i", &addr); + if (cnt != 1) { + dev_warn(&pf->pdev->dev, "soc_read \n"); + return; + } + + ne6x_reg_indirect_read(pf, addr, &value); + dev_info(&pf->pdev->dev, "offset = 0x%08X 0x%08X\n", addr, value); +} + +void ne6x_dbg_soc_write(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u32 value; + u32 addr; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i %i", &addr, &value); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "soc_write \n"); + return; + } + + ne6x_reg_indirect_write(pf, addr, value); + dev_info(&pf->pdev->dev, "soc_write: 0x%08X = 0x%08X\n", addr, value); +} + +void ne6x_dbg_tab_read(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + int array_index = 0, ret, index; + struct ne6x_debug_table *table_info; + u8 *p_str_array[10] = {0}; + u8 *p_in_string = NULL; + char *p_tmp_ret = NULL; + + table_info = kzalloc(sizeof(*table_info), GFP_KERNEL); + memset(table_info, 0, sizeof(*table_info)); + + p_in_string = &cmd_buf[0]; + while ((p_str_array[array_index] = my_strtok(p_in_string, PARA_KEY_STRING, &p_tmp_ret)) != + NULL) { + p_in_string = p_str_array[array_index] + strlen(p_str_array[array_index]) + 1; + array_index++; + if (array_index >= 10) + break; + + if (!p_tmp_ret) + break; + } + + if (array_index < 2) { + dev_warn(&pf->pdev->dev, "tab_read \n"); + kfree(table_info); + return; + } + + /* table */ + if (!strncmp(p_str_array[0], "0x", 2)) + table_info->table = simple_strtoul(p_str_array[0], NULL, 16); + else + table_info->table = my_atoi(p_str_array[0]); + + /* index */ + if (!strncmp(p_str_array[1], "0x", 2)) + table_info->index = simple_strtoul(p_str_array[1], NULL, 16); + else + table_info->index = my_atoi(p_str_array[1]); + + table_info->size = table_size[table_info->table]; + ret = ne6x_reg_table_read(pf, table_info->table, table_info->index, + (u32 *)&table_info->data[0], table_info->size); + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, (ret == 0) ? "success" : "timeout!"); + + for (index = 0x00; index < (table_info->size >> 2) / 4; index++) + dev_info(&pf->pdev->dev, "%08X: %08X %08X %08X %08X\n", index * 16, + table_info->data[4 * index], table_info->data[4 * index + 1], + table_info->data[4 * index + 2], table_info->data[4 * index + 3]); + + if (((table_info->size >> 2) % 4) == 1) + dev_info(&pf->pdev->dev, "%08X: %08X\n", index * 16, table_info->data[4 * index]); + else if (((table_info->size >> 2) % 4) == 2) + dev_info(&pf->pdev->dev, "%08X: %08X %08X\n", index * 16, + table_info->data[4 * index], table_info->data[4 * index + 1]); + else if (((table_info->size >> 2) % 4) == 3) + dev_info(&pf->pdev->dev, "%08X: %08X %08X %08X\n", index * 16, + table_info->data[4 * index], table_info->data[4 * index + 1], + table_info->data[4 * index + 2]); + + kfree(table_info); +} + +void ne6x_dbg_set_mac_to_eeprom(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_dev_eeprom_info *psdk_spd_info = &pf->sdk_spd_info; + u8 mac_addr[6]; + int port = 0; + int ret; + int cnt; + + if (strncmp(cmd_buf, "P0", 2) == 0) { + port = 0; + } else if (strncmp(cmd_buf, "P1", 2) == 0) { + port = 1; + } else { + dev_warn(&pf->pdev->dev, "set_port_mac P0/P1 macaddr\n"); + dev_warn(&pf->pdev->dev, "example-- set_port_mac P0 94:f5:21:00:00:01\n"); + return; + } + + cnt = sscanf(&cmd_buf[2], "%hhX:%hhX:%hhX:%hhX:%hhX:%hhX", &mac_addr[0], &mac_addr[1], + &mac_addr[2], &mac_addr[3], &mac_addr[4], &mac_addr[5]); + if (cnt != 6) { + dev_warn(&pf->pdev->dev, "set_port_mac P0/P1 macaddr\n"); + dev_warn(&pf->pdev->dev, "example-- set_port_mac P0 94:f5:24:00:00:01\n"); + return; + } + + if (port == 0) + memcpy(&psdk_spd_info->port_0_mac, &mac_addr, 6); + else if (port == 1) + memcpy(&psdk_spd_info->port_1_mac, &mac_addr, 6); + else if (port == 2) + memcpy(&psdk_spd_info->port_2_mac, &mac_addr, 6); + else if (port == 3) + memcpy(&psdk_spd_info->port_3_mac, &mac_addr, 6); + + psdk_spd_info->spd_verify_value = + cpu_to_be32(ne6x_dev_crc32((const u8 *)psdk_spd_info, + sizeof(*psdk_spd_info) - 4)); + ret = ne6x_dev_write_eeprom(pf->adpt[0], 0x0, (u8 *)psdk_spd_info, + sizeof(*psdk_spd_info)); + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, + (ret == 0) ? "set mac success!" : "set mac fail!"); +} + +void ne6x_dbg_get_mac(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_dev_eeprom_info *psdk_spd_info = &pf->sdk_spd_info; + u8 mac_addr[6]; + int port = 0; + + if (strncmp(cmd_buf, "P0", 2) == 0) { + port = 0; + } else if (strncmp(cmd_buf, "P1", 2) == 0) { + port = 1; + } else { + dev_warn(&pf->pdev->dev, "get_port_mac P0/P1\n"); + dev_warn(&pf->pdev->dev, "example-- get_port_mac P0\n"); + return; + } + + if (port == 0) + memcpy(&mac_addr, &psdk_spd_info->port_0_mac, 6); + else if (port == 1) + memcpy(&mac_addr, &psdk_spd_info->port_1_mac, 6); + else if (port == 2) + memcpy(&mac_addr, &psdk_spd_info->port_2_mac, 6); + else if (port == 3) + memcpy(&mac_addr, &psdk_spd_info->port_3_mac, 6); + else + return; + + dev_info(&pf->pdev->dev, "port %d: mac = %02x:%02x:%02x:%02x:%02x:%02x\n", port, + mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3], mac_addr[4], mac_addr[5]); +} + +void ne6x_dbg_set_dev_type_to_eeprom(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_dev_eeprom_info *psdk_spd_info = &pf->sdk_spd_info; + u8 *p_str_array[10] = {0}; + int array_index = 0, ret; + u8 *p_in_string = NULL; + char *p_tmp_ret = NULL; + u16 dev_type = 0; + + p_in_string = &cmd_buf[0]; + while ((p_str_array[array_index] = my_strtok(p_in_string, PARA_KEY_STRING, &p_tmp_ret)) != + NULL) { + p_in_string = p_str_array[array_index] + strlen(p_str_array[array_index]) + 1; + array_index++; + if (array_index >= 10) + break; + + if (!p_tmp_ret) + break; + } + + if (array_index < 1) { + dev_warn(&pf->pdev->dev, "set_dev_type (0xA003:2*25,0xA004:4*25)\n"); + return; + } + + if (!strncmp(p_str_array[0], "0x", 2)) { + dev_type = simple_strtoul(p_str_array[0], NULL, 16); + } else { + dev_warn(&pf->pdev->dev, "set_dev_type (0xA003:2*25,0xA004:4*25)\n"); + return; + } + + if (dev_type != NE6000AI_2S_X16H_25G_N5 && dev_type != NE6000AI_2S_X16H_25G_N6) { + dev_warn(&pf->pdev->dev, "set_dev_type (0xA003:2*25,0xA004:4*25)\n"); + return; + } + + psdk_spd_info->product_mode = cpu_to_be16(dev_type); + psdk_spd_info->is_pcie_exist = 0x1; + + if (dev_type == NE6000AI_2S_X16H_25G_N5) { + psdk_spd_info->number_of_physical_controllers = 2; + psdk_spd_info->logic_port_to_phyical = cpu_to_be32(0x00000800); + } else if (dev_type == NE6000AI_2S_X16H_25G_N6) { + psdk_spd_info->number_of_physical_controllers = 2; + psdk_spd_info->logic_port_to_phyical = cpu_to_be32(0x00000100); + } else { + return; + } + + psdk_spd_info->spd_verify_value = + cpu_to_be32(ne6x_dev_crc32((const u8 *)psdk_spd_info, + sizeof(struct ne6x_dev_eeprom_info) - 4)); + ret = ne6x_dev_write_eeprom(pf->adpt[0], 0x0, (u8 *)psdk_spd_info, + sizeof(struct ne6x_dev_eeprom_info)); + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, + (ret == 0) ? "write eeprom mac success!" : "write eeprom mac fail!"); +} + +void ne6x_dbg_tab_write(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_debug_table *table_info; + int array_index = 0, ret, index; + u8 *p_str_array[100] = {0}; + u8 *p_in_string = NULL; + char *p_tmp_ret = NULL; + + table_info = kzalloc(sizeof(*table_info), GFP_KERNEL); + memset(table_info, 0, sizeof(*table_info)); + + p_in_string = &cmd_buf[0]; + while ((p_str_array[array_index] = my_strtok(p_in_string, PARA_KEY_STRING, &p_tmp_ret)) != + NULL) { + p_in_string = p_str_array[array_index] + strlen(p_str_array[array_index]) + 1; + array_index++; + if (array_index >= 100) + break; + + if (!p_tmp_ret) + break; + } + + if (array_index < 8) { + dev_info(&pf->pdev->dev, "tab_write
...\n"); + kfree(table_info); + return; + } + + /* table */ + if (!strncmp(p_str_array[0], "0x", 2)) + table_info->table = simple_strtoul(p_str_array[0], NULL, 16); + else + table_info->table = my_atoi(p_str_array[0]); + + /* index */ + if (!strncmp(p_str_array[1], "0x", 2)) + table_info->index = simple_strtoul(p_str_array[1], NULL, 16); + else + table_info->index = my_atoi(p_str_array[1]); + + /* data */ + table_info->size = 0; + for (index = 0; index < (array_index - 2); index++) { + if (!strncmp(p_str_array[index + 2], "0x", 2)) + table_info->data[index] = simple_strtoul(p_str_array[index + 2], NULL, 16); + else + table_info->data[index] = my_atoi(p_str_array[index + 2]); + + table_info->size++; + } + + table_info->size = table_size[table_info->table]; + + ret = ne6x_reg_table_write(pf, table_info->table, table_info->index, + (u32 *)&table_info->data[0], table_info->size); + kfree(table_info); + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, (ret == 0) ? "success!" : "timeout!"); +} + +void ne6x_dbg_tab_insert(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u8 *p_str_array[ARRAY_P_MAX_COUNT] = {0}; + struct ne6x_debug_table *table_info; + int array_index = 0, ret, index; + u32 table_id = 0xffffffff; + u8 *p_in_string = NULL; + char *p_tmp_ret = NULL; + + table_info = kzalloc(sizeof(*table_info), GFP_KERNEL); + memset(table_info, 0, sizeof(*table_info)); + + p_in_string = &cmd_buf[0]; + while ((p_str_array[array_index] = my_strtok(p_in_string, PARA_KEY_STRING, &p_tmp_ret)) != + NULL) { + p_in_string = p_str_array[array_index] + strlen(p_str_array[array_index]) + 1; + array_index++; + if (array_index >= ARRAY_P_MAX_COUNT) + break; + + if (!p_tmp_ret) + break; + } + + /* 1 + 16 + 1+++ */ + if (array_index < 24) { + dev_warn(&pf->pdev->dev, "tab_insert
\n"); + kfree(table_info); + return; + } + + /* table */ + if (!strncmp(p_str_array[0], "0x", 2)) + table_info->table = simple_strtoul(p_str_array[0], NULL, 16); + else + table_info->table = my_atoi(p_str_array[0]); + + /* data */ + table_info->size = 0; + for (index = 0; index < (array_index - 1); index++) { + if (!strncmp(p_str_array[index + 1], "0x", 2)) + table_info->data[index] = simple_strtoul(p_str_array[index + 1], NULL, 16); + else + table_info->data[index] = my_atoi(p_str_array[index + 1]); + + table_info->size++; + } + + table_info->size = 64; + + ret = ne6x_reg_table_search(pf, (enum ne6x_reg_table)table_info->table, + (u32 *)&table_info->data[0], table_info->size, NULL, + table_info->size); + if (ret == -ENOENT) { + table_info->size = 64 + table_size[table_info->table]; + ret = ne6x_reg_table_insert(pf, (enum ne6x_reg_table)table_info->table, + (u32 *)&table_info->data[0], table_info->size, + &table_id); + } else { + dev_info(&pf->pdev->dev, "0x%x 0x%x 0x%x 0x%x table exist\n", table_info->data[0], + table_info->data[1], table_info->data[2], table_info->data[3]); + return; + } + if (ret == 0) + dev_info(&pf->pdev->dev, "insert rule_id = 0x%x\n", table_id); + + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, (ret == 0) ? "success!" : + ((ret != -ETIMEDOUT) ? "fail!" : "timeout!")); +} + +void ne6x_dbg_tab_delete(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + int array_index = 0, ret, index; + struct ne6x_debug_table *table_info; + u8 *p_str_array[100] = {0}; + u8 *p_in_string = NULL; + char *p_tmp_ret = NULL; + + table_info = kzalloc(sizeof(*table_info), GFP_KERNEL); + memset(table_info, 0, sizeof(*table_info)); + + p_in_string = &cmd_buf[0]; + while ((p_str_array[array_index] = my_strtok(p_in_string, PARA_KEY_STRING, &p_tmp_ret)) != + NULL) { + p_in_string = p_str_array[array_index] + strlen(p_str_array[array_index]) + 1; + array_index++; + if (array_index >= 100) + break; + + if (!p_tmp_ret) + break; + } + + if (array_index < 9) { + dev_warn(&pf->pdev->dev, "tab_delete
\n"); + kfree(table_info); + return; + } + + /* table */ + if (!strncmp(p_str_array[0], "0x", 2)) + table_info->table = simple_strtoul(p_str_array[0], NULL, 16); + else + table_info->table = my_atoi(p_str_array[0]); + + /* data */ + table_info->size = 0; + for (index = 0; index < (array_index - 1); index++) { + if (!strncmp(p_str_array[index + 1], "0x", 2)) + table_info->data[index] = simple_strtoul(p_str_array[index + 1], NULL, 16); + else + table_info->data[index] = my_atoi(p_str_array[index + 1]); + + table_info->size++; + } + + table_info->size = 64; + + ret = ne6x_reg_table_delete(pf, (enum ne6x_reg_table)table_info->table, + (u32 *)&table_info->data[0], table_info->size); + kfree(table_info); + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, (ret == 0) ? "success!" : "timeout!"); +} + +void ne6x_dbg_tab_search(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_debug_table *table_info; + int array_index = 0, ret, index; + u8 *p_str_array[100] = {0}; + u8 *p_in_string = NULL; + char *p_tmp_ret = NULL; + + table_info = kzalloc(sizeof(*table_info), GFP_KERNEL); + memset(table_info, 0, sizeof(*table_info)); + + p_in_string = &cmd_buf[0]; + while ((p_str_array[array_index] = my_strtok(p_in_string, PARA_KEY_STRING, &p_tmp_ret)) != + NULL) { + p_in_string = p_str_array[array_index] + strlen(p_str_array[array_index]) + 1; + array_index++; + if (array_index >= 100) + break; + + if (!p_tmp_ret) + break; + } + + dev_info(&pf->pdev->dev, "array_index = %d\n", array_index); + if (array_index < 9) { + dev_warn(&pf->pdev->dev, "tab_delete
\n"); + kfree(table_info); + return; + } + + if (!strncmp(p_str_array[0], "0x", 2)) + table_info->table = simple_strtoul(p_str_array[0], NULL, 16); + else + table_info->table = my_atoi(p_str_array[0]); + + table_info->size = 0; + for (index = 0; index < (array_index - 1); index++) { + if (!strncmp(p_str_array[index + 1], "0x", 2)) + table_info->data[index] = simple_strtoul(p_str_array[index + 1], NULL, 16); + else + table_info->data[index] = my_atoi(p_str_array[index + 1]); + + table_info->size++; + } + + table_info->size = 64; + ret = ne6x_reg_table_search(pf, (enum ne6x_reg_table)table_info->table, + (u32 *)&table_info->data[0], table_info->size, + (u32 *)&table_info->data[0], table_info->size); + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, + (ret == 0) ? "success!" : ((ret == -ENOENT) ? "not fount!" : "timeout!")); + if (ret) + return; + + for (index = 0x00; index < (table_info->size >> 2) / 4; index++) + dev_info(&pf->pdev->dev, "%08X: %08X %08X %08X %08X\n", index * 16, + table_info->data[4 * index], table_info->data[4 * index + 1], + table_info->data[4 * index + 2], table_info->data[4 * index + 3]); + + if (((table_info->size >> 2) % 4) == 1) + dev_info(&pf->pdev->dev, "%08X: %08X\n", index * 16, table_info->data[4 * index]); + else if (((table_info->size >> 2) % 4) == 2) + dev_info(&pf->pdev->dev, "%08X: %08X %08X\n", index * 16, + table_info->data[4 * index], table_info->data[4 * index + 1]); + else if (((table_info->size >> 2) % 4) == 3) + dev_info(&pf->pdev->dev, "%08X: %08X %08X %08X\n", index * 16, + table_info->data[4 * index], table_info->data[4 * index + 1], + table_info->data[4 * index + 2]); + + kfree(table_info); +} + +void ne6x_dbg_get_fru_info(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct file *fp = NULL; + u8 *buffer; + int para_count; + u32 size; + + para_count = sscanf(&cmd_buf[0], "%i", &size); + if (para_count != 1) { + dev_warn(&pf->pdev->dev, "fru_read \n"); + return; + } + + if (size > 512) { + dev_warn(&pf->pdev->dev, "size must less than 512\n."); + return; + } + + buffer = kzalloc((size + 4), GFP_KERNEL); + ne6x_dev_get_fru(pf, (u32 *)buffer, size); + + fp = filp_open("/opt/share/fru.bin", O_RDWR | O_CREAT, 0644); + if (!fp) { + dev_err(&pf->pdev->dev, "can't open /opt/share/fru.bin.\n"); + return; + } + + kernel_write(fp, (char *)buffer, size, &fp->f_pos); + filp_close(fp, NULL); +} + +u32 getparam(char *cmd_buf, u32 *param, int max_cnt) +{ + int ret, i, j, tmp, tmp1, tmp2, flag = 0; + u32 count = 0, cnt = 0, cnt_t = 0; + char *p = &cmd_buf[0]; + char *char_offset; + u32 *offset; + + offset = kzalloc((max_cnt + 1) * sizeof(u32), GFP_ATOMIC); + char_offset = kzalloc((max_cnt + 1) * sizeof(char), GFP_ATOMIC); + /* count the number */ + for (i = 0; i < strlen(cmd_buf); i++) { + if (cmd_buf[i] == ',' || cmd_buf[i] == '-') { + count++; + if (cmd_buf[i] == ',') { + offset[count] = i + 1; + char_offset[count] = ','; + } else if (cmd_buf[i] == '-') { + offset[count] = i + 1; + char_offset[count] = '-'; + } + } + if (cmd_buf[i] == ' ') + break; + + if (count >= max_cnt) + break; + } + + for (i = 0; i <= count; i++) { + ret = sscanf(p, "%i", ¶m[i + cnt_t]); + if (ret == 1) { + cnt++; + if (char_offset[cnt] == '-') { + flag++; + p = &cmd_buf[offset[cnt]]; + ret = sscanf(p, "%i", ¶m[i + cnt_t + 1]); + tmp1 = param[i + cnt_t]; + tmp2 = param[i + cnt_t + 1]; + if (ret == 1) { + tmp = i + cnt_t; + for (j = 0; j <= tmp2 - tmp1; j++) + param[tmp + j] = tmp1 + j; + } + cnt_t += tmp2 - tmp1; + + cnt++; + } + p = &cmd_buf[offset[cnt]]; + } + } + + kfree(offset); + + return cnt + cnt_t - 2 * flag; +} + +void ne6x_dbg_show_pcie_drop_counter(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + union ne6x_eth_recv_cnt eth_recv_cnt; + u64 __iomem *reg; + + reg = (void __iomem *)pf->hw.hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_ETH_RECV_CNT); + eth_recv_cnt.val = readq(reg); + dev_info(&pf->pdev->dev, "pcie drop cnt = %d\n", eth_recv_cnt.reg.csr_eth_pkt_drop_cnt + + eth_recv_cnt.reg.csr_eth_rdq_drop_cnt); +} + +void ne6x_dbg_clr_table(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u32 table_id = 0, cnt; + + cnt = sscanf(&cmd_buf[0], "%i", &table_id); + if (table_id == 6) + ne6x_reg_clear_table(pf, table_id); +} + +void ne6x_dbg_set_hw_flag_eeprom(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + struct ne6x_dev_eeprom_info *psdk_spd_info = &pf->sdk_spd_info; + int flag = 0; + int ret; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i", &flag); + if (cnt != 1) { + dev_warn(&pf->pdev->dev, "\n0:none;1,ram white list;2,ddr white list\n"); + return; + } + + psdk_spd_info->hw_flag = cpu_to_be32(flag); + psdk_spd_info->spd_verify_value = + cpu_to_be32(ne6x_dev_crc32((const u8 *)psdk_spd_info, + sizeof(struct ne6x_dev_eeprom_info) - 4)); + ret = ne6x_dev_write_eeprom(pf->adpt[0], 0x0, (u8 *)psdk_spd_info, + sizeof(struct ne6x_dev_eeprom_info)); + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, (ret == 0) ? "set hw_flag success!" + : "set hw_flag fail!"); +} + +void ne6x_dbg_erase_norflash(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u32 offset; + u32 length; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i %i", &offset, &length); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "norflash_erase \n"); + return; + } + + if (!ne6x_reg_erase_norflash(pf, offset, length)) + return; + + dev_err(&pf->pdev->dev, "norflash_erase fail.\n"); +} + +void ne6x_dbg_write_norflash(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u8 *ptemp_data = NULL; + u32 offset = 0; + u32 length = 0; + u32 temp_data = 0; + u8 *ptemp = NULL; + int i = 0; + + ptemp_data = kzalloc(1024, GFP_ATOMIC); + + while ((ptemp = strsep(&cmd_buf, " "))) { + if (!strncmp(ptemp, "0x", 2)) + temp_data = simple_strtoul(ptemp, NULL, 16); + else + temp_data = my_atoi(ptemp); + + if (i == 0) + offset = temp_data; + else if (i == 1) + length = temp_data; + else + ptemp_data[i - 2] = (u8)temp_data; + + i++; + if (i == 1026) + break; + } + + if (length > 1024 || i < 2) { + dev_warn(&pf->pdev->dev, "norflash_write (byte split by space max 256)\n"); + goto pdata_memfree; + } + + if (!ne6x_reg_write_norflash(pf, offset, length, (u32 *)ptemp_data)) + dev_info(&pf->pdev->dev, "write norflash success.\n"); + else + dev_err(&pf->pdev->dev, "write norflash fail.\n"); + +pdata_memfree: + kfree(ptemp_data); +} + +void ne6x_dbg_read_norflash(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u32 offset = 0; + u32 length = 0; + u32 buffer_len; + char *pdata = NULL; + int cnt; + + cnt = sscanf(&cmd_buf[0], "%i %i", &offset, &length); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "norflash_read \n"); + return; + } + + buffer_len = length; + if (length % 4) + buffer_len = (length / 4 + 1) * 4; + + pdata = kzalloc(buffer_len, GFP_ATOMIC); + if (!ne6x_reg_read_norflash(pf, offset, buffer_len, (u32 *)pdata)) + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, pdata, length); + else + dev_err(&pf->pdev->dev, "read_norflash fail.\n"); + + kfree(pdata); +} + +void ne6x_dbg_meter_write(struct ne6x_pf *pf, char *cmd_buf, int count) +{ + u8 *p_str_array[ARRAY_P_MAX_COUNT] = {0}; + u32 cir, type_num, type_flag = 0; + u32 cir_maxnum = 0xfffff; + u32 cbs_maxnum = 0xffffff; + struct meter_table vf_bw; + char *p_tmp_ret; + int index, ret = 0; + int array_index = 0; + u8 *p_in_string = NULL; + u32 data[3] = {0}; + u32 type = 0; + + p_in_string = &cmd_buf[0]; + p_tmp_ret = NULL; + + while ((p_str_array[array_index] = my_strtok(p_in_string, PARA_KEY_STRING, &p_tmp_ret)) != + NULL) { + p_in_string = p_str_array[array_index] + strlen(p_str_array[array_index]) + 1; + array_index++; + if (array_index >= ARRAY_P_MAX_COUNT) + break; + if (!p_tmp_ret) + break; + } + if (array_index != 3) { + dev_warn(&pf->pdev->dev, "Incorrect input, please re-enter\n"); + return; + } + + for (index = 0; index < array_index; index++) { + if (!strncmp(p_str_array[index], "0x", 2)) + data[index] = simple_strtoul(p_str_array[index], NULL, 16); + else + data[index] = my_atoi(p_str_array[index]); + } + + type_num = data[0]; + switch (type_num) { + case 0: + type_flag |= NE6X_F_ACK_FLOOD; + break; + case 1: + type_flag |= NE6X_F_PUSH_ACK_FLOOD; + break; + case 2: + type_flag |= NE6X_F_SYN_ACK_FLOOD; + break; + case 3: + type_flag |= NE6X_F_FIN_FLOOD; + break; + case 4: + type_flag |= NE6X_F_RST_FLOOD; + break; + case 5: + type_flag |= NE6X_F_PUSH_SYN_ACK_FLOOD; + break; + case 6: + type_flag |= NE6X_F_UDP_FLOOD; + break; + case 7: + type_flag |= NE6X_F_ICMP_FLOOD; + break; + case 8: + type_flag |= NE6X_F_FRAGMENT_FLOOD; + break; + default: + dev_err(&pf->pdev->dev, "err_input,please enter one of'0-8'\n"); + return; + } + + if (data[1] == 1) { + ne6x_reg_get_user_data(pf, NP_USER_DATA_DDOS_FLAG, &type); + type |= type_flag; + ne6x_reg_set_user_data(pf, NP_USER_DATA_DDOS_FLAG, type); + } else if (data[1] == 0) { + ne6x_reg_get_user_data(pf, NP_USER_DATA_DDOS_FLAG, &type); + type &= ~type_flag; + ne6x_reg_set_user_data(pf, NP_USER_DATA_DDOS_FLAG, type); + } else { + dev_err(&pf->pdev->dev, "Input error, please enter '0' or '1'\n"); + return; + } + + cir = data[2] * 1000 + 1023; + cir = min((cir / 1024), cir_maxnum); + vf_bw.cir = cir; + vf_bw.pir = min((cir + cir / 10), cir_maxnum); + + vf_bw.cbs = min((vf_bw.cir * 10000), cbs_maxnum); + vf_bw.pbs = min((vf_bw.pir * 10000), cbs_maxnum); + ret = ne6x_reg_config_meter(pf, NE6X_METER1_TABLE | + NE6X_METER_SUBSET(NE6X_METER_SUBSET0) | type_num, + (u32 *)&vf_bw, sizeof(vf_bw)); + + dev_info(&pf->pdev->dev, "%s: %s\n", __func__, + (ret == 0) ? "write meter success!" : "write meter fail!"); +} + +const struct ne6x_dbg_cmd_wr deg_cmd_wr[] = { + {"queue", ne6x_dbg_show_queue}, + {"ring", ne6x_dbg_show_ring}, + {"txq", ne6x_dbg_show_txq}, + {"rxq", ne6x_dbg_show_rxq}, + {"cq", ne6x_dbg_show_cq}, + {"clean", ne6x_dbg_clean_queue}, + {"txtail", ne6x_dbg_show_txtail}, + {"txr", ne6x_dbg_show_txring}, + {"rxr", ne6x_dbg_show_rxring}, + {"cqr", ne6x_dbg_show_cqring}, +#ifdef CONFIG_RFS_ACCEL + {"arfs", ne6x_dbg_show_arfs_cnt}, +#endif + {"apb_read", ne6x_dbg_apb_read}, + {"apb_write", ne6x_dbg_apb_write}, + {"mem_read", ne6x_dbg_mem_read}, + {"mem_write", ne6x_dbg_mem_write}, + {"soc_read", ne6x_dbg_soc_read}, + {"soc_write", ne6x_dbg_soc_write}, + {"templ_help", ne6x_dbg_templ_help}, + {"templ_read", ne6x_dbg_templ_read}, + {"templ_write", ne6x_dbg_templ_write}, + {"tab_read", ne6x_dbg_tab_read}, + {"tab_write", ne6x_dbg_tab_write}, + {"tab_insert", ne6x_dbg_tab_insert}, + {"tab_delete", ne6x_dbg_tab_delete}, + {"tab_search", ne6x_dbg_tab_search}, + {"set_port_mac", ne6x_dbg_set_mac_to_eeprom}, + {"get_port_mac", ne6x_dbg_get_mac}, + {"fru_read", ne6x_dbg_get_fru_info}, + {"pcie_dropcnt", ne6x_dbg_show_pcie_drop_counter}, + {"clear_table", ne6x_dbg_clr_table}, + {"set_hw_flag", ne6x_dbg_set_hw_flag_eeprom}, + {"norflash_erase", ne6x_dbg_erase_norflash}, + {"norflash_write", ne6x_dbg_write_norflash}, + {"norflash_read", ne6x_dbg_read_norflash}, + {"meter_write", ne6x_dbg_meter_write}, +}; + +/** + * ne6x_dbg_command_read - read for command datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t ne6x_dbg_command_read(struct file *filp, char __user *buffer, size_t count, + loff_t *ppos) +{ + return 0; +} + +static ssize_t ne6x_dbg_info_pnsn_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + u8 *pru_name = NULL, *pru_pn = NULL, *pru_sn = NULL; + char name_pre[INFO_COL] = {0}; + char name_aft[INFO_COL] = {0}; + struct ne6x_pf *pf = NULL; + u32 buf_size = 500; + char *name = NULL; + ssize_t len = 0; + u8 *buffer_data; + u8 length = 0; + u16 device_id; + int erro = 0; + int dex = 0; + int i = 0; + + if (*ppos > 0 || count < PAGE_SIZE) + return 0; + + name = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!name) + return -ENOMEM; + + buffer_data = kzalloc(buf_size, GFP_KERNEL); + if (!buffer_data) { + kfree(name); + return -ENOMEM; + } + + pf = filp->private_data; + ne6x_dev_get_fru(pf, (u32 *)buffer_data, buf_size); + + pru_name = ne6x_dbg_get_fru_product_part(buffer_data, PRODUCT_NAME, &length); + if (!pru_name) { + dev_err(&pf->pdev->dev, "get pru_name info erro"); + device_id = pf->hw.subsystem_device_id; + if (!device_id) { + dev_err(&pf->pdev->dev, "subsystem_device_id is NULL!"); + erro = 1; + goto get_buffer_end; + } + + sprintf(name_pre, "Product Name: BeiZhongWangXin"); + sprintf(name_aft, "Ethernet Adapter"); + + for (i = 0; i < ARRAY_SIZE(ne6x_device_info); i++) { + if (device_id == ne6x_device_info[i].system_id) + dex = i; + } + + if (dex != -1) { + len = sprintf(name, "%s %s %s %s\n", name_pre, + ne6x_device_info[dex].system_name, + ne6x_device_info[dex].system_speed, name_aft); + } else { + dev_warn(&pf->pdev->dev, "subsystem_device_id not match"); + erro = 1; + goto get_buffer_end; + } + + } else { + len = sprintf(name, "Product Name: %s\n", pru_name); + } + + pru_pn = ne6x_dbg_get_fru_product_part(buffer_data, PRODUCT_PART_NUMBER, &length); + if (pru_pn) + len = sprintf(name, "%s[PN] Part number: %s\n", name, pru_pn); + + pru_sn = ne6x_dbg_get_fru_product_part(buffer_data, PRODUCT_SERIAL_NUMBER, &length); + if (pru_sn) + len = sprintf(name, "%s[SN] Serial number: %s\n", name, pru_sn); + + if (copy_to_user(buffer, name, len)) { + erro = 2; + goto get_buffer_end; + } + + if (!len) { + erro = 1; + goto get_buffer_end; + } + + *ppos = len; + goto get_buffer_end; + +get_buffer_end: + kfree(pru_pn); + kfree(pru_sn); + kfree(pru_name); + kfree(name); + kfree(buffer_data); + + if (erro == 1) + return 0; + else if (erro == 2) + return -EFAULT; + + return len; +} + +static bool ne6x_dbg_fru_checksum(const u8 *data, u32 len) +{ + u8 gl = 0; + u32 i; + + for (i = 0; i < len - 1; i++) + gl += data[i]; + + gl = ~gl + 1; + return gl == data[len - 1]; +} + +static int ne6x_dbg_fru_get_offset(u8 *buffer, enum fru_type type, u8 *offset) +{ + u8 hd[8] = {0}; + int i; + + for (i = 0; i < 8; i++) + hd[i] = buffer[i]; + + if (!(hd[0] & 0x1)) + return -2; + + if (!ne6x_dbg_fru_checksum(hd, 8)) + return -3; + + if (type < INTER_USE_AREA || type > MUILT_AREA) + return -4; + + *offset = hd[type + 1]; + + return 0; +} + +static u8 *ne6x_dbg_fru_6ascii28(const u8 *data, u8 *len) +{ + u8 len_bit_6, len_bit_8; + int i, i6, byte; + u8 *buf = NULL; + + len_bit_6 = data[0] & 0x3F; + len_bit_8 = FRU_6BIT_8BITLENGTH(len_bit_6); + buf = kzalloc(len_bit_8 + 1, GFP_ATOMIC); + + if (!buf) { + *len = 0; + return NULL; + } + + for (i = 0, i6 = 1; i6 <= len_bit_6 && i < len_bit_8 && data[i6]; i++) { + byte = (i - 1) % 4; + + switch (byte) { + case 0: + buf[i] = data[i6] & 0x3F; + break; + case 1: + buf[i] = (data[i6] >> 6) | (data[1 + i6] << 2); + i6++; + break; + case 2: + buf[i] = (data[i6] >> 4) | (data[1 + i6] << 4); + i6++; + break; + case 3: + buf[i] = data[i6++] >> 2; + break; + } + + buf[i] &= 0x3F; + buf[i] += ASCII628_BASE; + } + + *len = len_bit_8; + + return buf; +} + +u8 *ne6x_dbg_get_fru_product_part(u8 *buffer, enum fru_product_part part, u8 *len) +{ + u8 hd[2] = {0}; + u8 *pt = NULL; + u8 ofst = 0; + u32 i = 0; + + if (!buffer) + return NULL; + + if (ne6x_dbg_fru_get_offset(buffer, PRODUCT_AREA, &ofst) != 0 || ofst == 0) { + *len = 0; + return NULL; + } + + ofst *= 8; + hd[0] = buffer[ofst]; + hd[1] = buffer[ofst + 1]; + if (!(hd[0] & 0x1) || hd[1] == 0) + return NULL; + + if (!ne6x_dbg_fru_checksum(&buffer[ofst], hd[1] * 8)) + return NULL; + + ofst += 3; + + for (i = 0; i < part; i++) + ofst += 1 + (buffer[ofst] & 0x3f); + + if (FRU_CHECK_6ASCII(buffer[ofst])) { + pt = ne6x_dbg_fru_6ascii28(&buffer[ofst], len); + } else { + *len = (buffer[ofst] & 0x3f); + pt = kzalloc(*len, GFP_ATOMIC); + if (!pt) + return NULL; + + memcpy(pt, &buffer[ofst + 1], *len); + } + + return pt; +} + +void ne6x_dbg_update_adpt_speed(struct ne6x_adapter *adpt, u32 speed, u32 lane_mode) {} + +/** + * ne6x_dbg_command_write - write into command datum + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + **/ +static ssize_t ne6x_dbg_command_write(struct file *filp, const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ne6x_pf *pf = filp->private_data; + char *cmd_buf, *cmd_buf_tmp; + struct ne6x_ring *tx_ring; + int bytes_not_copied; + struct ne6x_adapter *adpt; + int i, cnt = 0; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + + /* don't cross maximal possible value */ + if (count >= NE6X_DEBUG_CHAR_LEN) + return -ENOSPC; + + cmd_buf = kzalloc(count + 1, GFP_KERNEL); + if (!cmd_buf) + return count; + + bytes_not_copied = copy_from_user(cmd_buf, buffer, count); + if (bytes_not_copied) { + kfree(cmd_buf); + return -EFAULT; + } + cmd_buf[count] = '\0'; + + cmd_buf_tmp = strchr(cmd_buf, '\n'); + if (cmd_buf_tmp) { + *cmd_buf_tmp = '\0'; + count = cmd_buf_tmp - cmd_buf + 1; + } + + if (strncmp(cmd_buf, "updtail", 7) == 0) { + int idx, vp, tail; + + cnt = sscanf(&cmd_buf[7], "%d %d %d", &idx, &vp, &tail); + if (cnt != 3) { + dev_warn(&pf->pdev->dev, "updtail \n"); + goto command_write_done; + } + adpt = pf->adpt[idx ? 1 : 0]; + tx_ring = adpt->tx_rings[vp & 0xf]; + ne6x_tail_update(tx_ring, tail); + dev_info(&pf->pdev->dev, "write: adpt = %d vp = 0x%x tail_ptr = %d\n", idx ? 1 : 0, + vp, tail); + } else if (strncmp(cmd_buf, "memrd", 5) == 0) { + u32 base_addr; + u32 offset_addr = 0; + u64 value; + int index, vp; + + cnt = sscanf(&cmd_buf[5], "%d", &vp); + if (cnt != 1) { + dev_warn(&pf->pdev->dev, "memrd \n"); + goto command_write_done; + } + + offset_addr = 0x0; + for (index = 0; index < 0x20; index++) { + base_addr = 0x140 + vp; + value = ne6x_reg_pci_read(pf, base_addr, offset_addr); + dev_info(&pf->pdev->dev, "read: 0x%x 0x%02x = 0x%llx\n", base_addr, + offset_addr, value); + offset_addr++; + } + + if (base_addr == 0x13F) { + offset_addr = 0x21; + for (index = 0x21; index < 0x24; index++) { + base_addr = 0x140 + vp; + value = ne6x_reg_pci_read(pf, base_addr, offset_addr); + dev_info(&pf->pdev->dev, "read: 0x%x 0x%02x = 0x%llx\n", base_addr, + offset_addr, value); + offset_addr++; + } + + offset_addr = 0x39; + for (index = 0x39; index < 0x4E; index++) { + base_addr = 0x140 + vp; + value = ne6x_reg_pci_read(pf, base_addr, offset_addr); + dev_info(&pf->pdev->dev, "read: 0x%x 0x%02x = 0x%llx\n", base_addr, + offset_addr, value); + offset_addr++; + } + + offset_addr = 0x80; + for (index = 0x80; index < 0x95; index++) { + base_addr = 0x140 + vp; + value = ne6x_reg_pci_read(pf, base_addr, offset_addr); + dev_info(&pf->pdev->dev, "read: 0x%x 0x%02x = 0x%llx\n", base_addr, + offset_addr, value); + offset_addr++; + } + + offset_addr = 0xA3; + for (index = 0xA3; index < 0xA5; index++) { + base_addr = 0x140 + vp; + value = ne6x_reg_pci_read(pf, base_addr, offset_addr); + dev_info(&pf->pdev->dev, "read: 0x%x 0x%02x = 0x%llx\n", base_addr, + offset_addr, value); + offset_addr++; + } + } + } else if (strncmp(cmd_buf, "read", 4) == 0) { + u32 base_addr; + u32 offset_addr; + u64 value; + + cnt = sscanf(&cmd_buf[4], "%i %i", &base_addr, &offset_addr); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "read \n"); + goto command_write_done; + } + + value = ne6x_reg_pci_read(pf, base_addr, offset_addr); + dev_info(&pf->pdev->dev, "read: 0x%x 0x%x = 0x%llx\n", base_addr, offset_addr, + value); + } else if (strncmp(cmd_buf, "write", 5) == 0) { + u32 base_addr; + u32 offset_addr; + u64 value; + + cnt = sscanf(&cmd_buf[5], "%i %i %lli ", &base_addr, &offset_addr, &value); + if (cnt != 3) { + dev_warn(&pf->pdev->dev, "write \n"); + goto command_write_done; + } + + ne6x_reg_pci_write(pf, base_addr, offset_addr, value); + value = ne6x_reg_pci_read(pf, base_addr, offset_addr); + dev_info(&pf->pdev->dev, "write: 0x%x 0x%x = 0x%llx\n", base_addr, offset_addr, + value); + } else if (strncmp(cmd_buf, "wr", 2) == 0) { + u32 offset; + u32 value; + + cnt = sscanf(&cmd_buf[2], "%i %i", &offset, &value); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "rr \n"); + goto command_write_done; + } + ne6x_reg_indirect_write(pf, offset, value); + dev_info(&pf->pdev->dev, "wr: 0x%x = 0x%x\n", offset, value); + } else if (strncmp(cmd_buf, "rr", 2) == 0) { + u32 offset; + u32 value; + + cnt = sscanf(&cmd_buf[2], "%i", &offset); + if (cnt != 1) { + dev_warn(&pf->pdev->dev, "read \n"); + goto command_write_done; + } + + value = ne6x_reg_indirect_read(pf, offset, &value); + dev_info(&pf->pdev->dev, "rr: 0x%x = 0x%x\n", offset, value); + } else if (strncmp(cmd_buf, "txd", 3) == 0) { + u32 adpt_num; + u32 quenue_num; + + cnt = sscanf(&cmd_buf[3], "%i %i", &adpt_num, &quenue_num); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "txd \n"); + goto command_write_done; + } + + ne6x_dbg_show_txdesc_states(adpt_num, quenue_num, pf); + } else if (strncmp(cmd_buf, "rxd", 3) == 0) { + u32 adpt_num; + u32 quenue_num; + + cnt = sscanf(&cmd_buf[3], "%i %i", &adpt_num, &quenue_num); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "rxd \n"); + goto command_write_done; + } + + ne6x_dbg_show_rxdesc_states(adpt_num, quenue_num, pf); + } else if (strncmp(cmd_buf, "cqd", 3) == 0) { + u32 adpt_num; + u32 quenue_num; + + cnt = sscanf(&cmd_buf[3], "%i %i", &adpt_num, &quenue_num); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "cqd \n"); + goto command_write_done; + } + + ne6x_dbg_show_cqdesc_states(adpt_num, quenue_num, pf); + } else { + for (i = 0; i < count; i++) { + if (cmd_buf[i] == ' ') { + cmd_buf[i] = '\0'; + cnt = i; + break; + } + if (cmd_buf[i] == '\0') { + cnt = i; + break; + } + } + + for (i = 0; i < ARRAY_SIZE(deg_cmd_wr); i++) { + if (strncmp(cmd_buf, deg_cmd_wr[i].command, cnt) == 0) { + deg_cmd_wr[i].command_proc(pf, &cmd_buf[cnt + 1], count - cnt - 1); + goto command_write_done; + } + } + + dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf); + } + +command_write_done: + kfree(cmd_buf); + cmd_buf = NULL; + return count; +} + +static const struct file_operations ne6x_dbg_command_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ne6x_dbg_command_read, + .write = ne6x_dbg_command_write, +}; + +const struct ne6x_dbg_cmd_wr deg_netdev_ops_cmd_wr[] = {}; + +/** + * ne6x_dbg_netdev_ops_read - read for netdev_ops datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static const struct file_operations ne6x_dbg_info_pnsn_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ne6x_dbg_info_pnsn_read, +}; + +static const struct file_operations ne6x_dbg_info_tps_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ne6x_proc_tps_read, +}; + +static ssize_t ne6x_dbg_netdev_ops_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + return 0; +} + +/** + * ne6x_dbg_netdev_ops_write - write into netdev_ops datum + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + **/ +static ssize_t ne6x_dbg_netdev_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ne6x_pf *pf = filp->private_data; + char *cmd_buf, *cmd_buf_tmp; + int bytes_not_copied; + int i; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + + /* don't cross maximal possible value */ + if (count >= NE6X_DEBUG_CHAR_LEN) + return -ENOSPC; + + cmd_buf = kzalloc(count + 1, GFP_KERNEL); + if (!cmd_buf) + return count; + + bytes_not_copied = copy_from_user(cmd_buf, buffer, count); + if (bytes_not_copied) { + kfree(cmd_buf); + return -EFAULT; + } + cmd_buf[count] = '\0'; + + cmd_buf_tmp = strchr(cmd_buf, '\n'); + if (cmd_buf_tmp) { + *cmd_buf_tmp = '\0'; + count = cmd_buf_tmp - cmd_buf + 1; + } + + for (i = 0; i < ARRAY_SIZE(deg_netdev_ops_cmd_wr); i++) { + if (strncmp(cmd_buf, deg_netdev_ops_cmd_wr[i].command, count) == 0) { + deg_netdev_ops_cmd_wr[i].command_proc(pf, + &cmd_buf[sizeof(deg_netdev_ops_cmd_wr[i].command) + 1], + count - 1 - sizeof(deg_netdev_ops_cmd_wr[i].command)); + goto command_write_done; + } + } + dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf); + +command_write_done: + kfree(cmd_buf); + cmd_buf = NULL; + return count; +} + +static const struct file_operations ne6x_dbg_netdev_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ne6x_dbg_netdev_ops_read, + .write = ne6x_dbg_netdev_ops_write, +}; + +/** + * ne6x_dbg_pf_init - setup the debugfs directory for the PF + * @pf: the PF that is starting up + **/ +void ne6x_dbg_pf_init(struct ne6x_pf *pf) +{ + const struct device *dev = &pf->pdev->dev; + const char *name = pci_name(pf->pdev); + struct dentry *pfile; + + pf->ne6x_dbg_pf = debugfs_create_dir(name, ne6x_dbg_root); + if (!pf->ne6x_dbg_pf) + return; + + pf->ne6x_dbg_info_pf = debugfs_create_dir("info", pf->ne6x_dbg_pf); + if (!pf->ne6x_dbg_info_pf) + return; + + pfile = debugfs_create_file("command", 0600, pf->ne6x_dbg_pf, pf, &ne6x_dbg_command_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("netdev_ops", 0600, pf->ne6x_dbg_pf, pf, + &ne6x_dbg_netdev_ops_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("product_info", 0600, pf->ne6x_dbg_info_pf, pf, + &ne6x_dbg_info_pnsn_fops); + if (!pfile) + goto create_failed; + + pfile = debugfs_create_file("power_info", 0600, pf->ne6x_dbg_info_pf, pf, + &ne6x_dbg_info_tps_fops); + if (!pfile) + goto create_failed; + + return; + +create_failed: + dev_err(dev, "debugfs dir/file for %s failed\n", name); + debugfs_remove_recursive(pf->ne6x_dbg_info_pf); + debugfs_remove_recursive(pf->ne6x_dbg_pf); +} + +/** + * ne6x_dbg_pf_exit - clear out the PF's debugfs entries + * @pf: the PF that is stopping + **/ +void ne6x_dbg_pf_exit(struct ne6x_pf *pf) +{ + debugfs_remove_recursive(pf->ne6x_dbg_info_pf); + pf->ne6x_dbg_info_pf = NULL; + + debugfs_remove_recursive(pf->ne6x_dbg_pf); + pf->ne6x_dbg_pf = NULL; +} + +/** + * ne6x_dbg_init - start up debugfs for the driver + **/ +void ne6x_dbg_init(void) +{ + ne6x_dbg_root = debugfs_create_dir(ne6x_driver_name, NULL); + if (!ne6x_dbg_root) + pr_info("init of debugfs failed\n"); +} + +/** + * ne6x_dbg_exit - clean out the driver's debugfs entries + **/ +void ne6x_dbg_exit(void) +{ + debugfs_remove_recursive(ne6x_dbg_root); + ne6x_dbg_root = NULL; +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_debugfs.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_debugfs.h new file mode 100644 index 000000000000..2094e52f4b6d --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_debugfs.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_DEBUGFS_H +#define _NE6X_DEBUGFS_H + +struct ne6x_debug_table { + int table; + int index; + int size; + u32 data[128]; +}; + +#ifdef CONFIG_DEBUG_FS + +enum fru_product_part { + MANUFACTURER_NAME = 0, + PRODUCT_NAME, + PRODUCT_PART_NUMBER, /* pn */ + PRODUCT_VERSION, + PRODUCT_SERIAL_NUMBER, /* sn */ + PRODUCT_ASSET_TAG, + PRODUCT_FRU_FILE_ID, +}; + +enum fru_type { + INTER_USE_AREA = 0, + CHASSIS_AREA, + BOARD_AREA, + PRODUCT_AREA, + MUILT_AREA, +}; + +#define NE6X_DEBUG_CHAR_LEN 1024 + +#define INFO_ROW 20 +#define INFO_COL 50 + +extern char ne6x_driver_name[]; + +struct ne6x_dbg_cmd_wr { + char command[NE6X_DEBUG_CHAR_LEN]; + void (*command_proc)(struct ne6x_pf *pf, char *cmd_buf, int count); +}; + +struct ne6x_debug_info { + u16 system_id; + char system_name[INFO_COL]; + char system_speed[INFO_COL]; +}; + +void ne6x_dbg_init(void); +void ne6x_dbg_exit(void); + +void ne6x_dbg_pf_init(struct ne6x_pf *pf); +void ne6x_dbg_pf_exit(struct ne6x_pf *pf); +#else /* !CONFIG_DEBUG_FS */ + +static inline void ne6x_dbg_init(void) +{ } +static inline void ne6x_dbg_exit(void) +{ } +static inline void ne6x_dbg_pf_init(struct ne6x_pf *pf) +{ } +static inline void ne6x_dbg_pf_exit(struct ne6x_pf *pf) +{ } +#endif /* end CONFIG_DEBUG_FS */ + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_dev.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_dev.c new file mode 100644 index 000000000000..70381bd6ebc9 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_dev.c @@ -0,0 +1,1602 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "ne6x.h" +#include "ne6x_portmap.h" +#include "ne6x_reg.h" +#include "ne6x_dev.h" +#include "reg.h" + +#define NE6X_SDK_CRC32_DATA_LEN 256 + +#define NE6X_PPORT_BY_HWINFO(HWINFO, index) (((HWINFO) >> (8 * (index))) & 0xff) + +#define to_be32_vector(s, e, p) \ +({ \ + int __n; \ + u32 *__data = (u32 *)(p);\ + for (__n = (s); __n < (e); __n++) \ + __data[__n] = cpu_to_be32(__data[__n]); \ +}) + +void ext_toeplitz_key(const unsigned char *key, unsigned char *ext_key) +{ + int i; + + for (i = 0; i < 39; i++) { + ext_key[i] = key[i]; + ext_key[44 + i] = (key[i] << 1) | (key[i + 1] >> 7); + ext_key[44 * 2 + i] = (key[i] << 2) | (key[i + 1] >> 6); + ext_key[44 * 3 + i] = (key[i] << 3) | (key[i + 1] >> 5); + ext_key[44 * 4 + i] = (key[i] << 4) | (key[i + 1] >> 4); + ext_key[44 * 5 + i] = (key[i] << 5) | (key[i + 1] >> 3); + ext_key[44 * 6 + i] = (key[i] << 6) | (key[i + 1] >> 2); + ext_key[44 * 7 + i] = (key[i] << 7) | (key[i + 1] >> 1); + } + + ext_key[39] = key[39]; + ext_key[44 + 39] = (key[39] << 1) | (key[1] >> 7); + ext_key[44 * 2 + 39] = (key[39] << 2) | (key[1] >> 6); + ext_key[44 * 3 + 39] = (key[39] << 3) | (key[1] >> 5); + ext_key[44 * 4 + 39] = (key[39] << 4) | (key[1] >> 4); + ext_key[44 * 5 + 39] = (key[39] << 5) | (key[1] >> 3); + ext_key[44 * 6 + 39] = (key[39] << 6) | (key[1] >> 2); + ext_key[44 * 7 + 39] = (key[39] << 7) | (key[1] >> 1); + + for (i = 0; i < 4; i++) { + ext_key[40 + i] = ext_key[i]; + ext_key[44 + 40 + i] = ext_key[44 + i]; + ext_key[44 * 2 + 40 + i] = ext_key[44 * 2 + i]; + ext_key[44 * 3 + 40 + i] = ext_key[44 * 3 + i]; + ext_key[44 * 4 + 40 + i] = ext_key[44 * 4 + i]; + ext_key[44 * 5 + 40 + i] = ext_key[44 * 5 + i]; + ext_key[44 * 6 + 40 + i] = ext_key[44 * 6 + i]; + ext_key[44 * 7 + 40 + i] = ext_key[44 * 7 + i]; + } +} + +static u32 ne6x_dev_bitrev(u32 input, int bw) +{ + u32 var = 0; + int i; + + for (i = 0; i < bw; i++) { + if (input & 0x01) + var |= 1 << (bw - 1 - i); + + input >>= 1; + } + + return var; +} + +void ne6x_dev_crc32_init(u32 poly, u32 *table) +{ + u32 c; + int i, j; + + poly = ne6x_dev_bitrev(poly, 32); + + for (i = 0; i < NE6X_SDK_CRC32_DATA_LEN; i++) { + c = i; + for (j = 0; j < 8; j++) { + if (c & 1) + c = poly ^ (c >> 1); + else + c = c >> 1; + } + table[i] = c; + } +} + +u32 ne6x_dev_crc32(const u8 *buf, u32 size) +{ + u32 ne6x_sdk_crc32tab[NE6X_SDK_CRC32_DATA_LEN]; + u32 i, crc; + + ne6x_dev_crc32_init(0x4C11DB7, ne6x_sdk_crc32tab); + crc = 0xFFFFFFFF; + + for (i = 0; i < size; i++) + crc = ne6x_sdk_crc32tab[(crc ^ buf[i]) & 0xff] ^ (crc >> 8); + + return crc ^ 0xFFFFFFFF; +} + +int ne6x_dev_spd_verify(struct ne6x_dev_eeprom_info *spd_info) +{ + if (be32_to_cpu(spd_info->spd_verify_value) == + ne6x_dev_crc32((const u8 *)spd_info, sizeof(*spd_info) - 4)) + return 0; + + return -EINVAL; +} + +int ne6x_dev_get_eeprom(struct ne6x_pf *pf) +{ + int retry = 3; + + while (retry-- > 0) { + ne6x_reg_e2prom_read(pf, 0x0, (u8 *)&pf->sdk_spd_info, sizeof(pf->sdk_spd_info)); + if (!ne6x_dev_spd_verify(&pf->sdk_spd_info)) + return 0; + } + + memset(&pf->sdk_spd_info, 0, sizeof(pf->sdk_spd_info)); + + return -EINVAL; +} + +static int ne6x_dev_get_dev_info(struct ne6x_pf *pf) +{ + int ret; + + ret = ne6x_dev_get_eeprom(pf); + if (!ret) { + pf->dev_type = be16_to_cpu(pf->sdk_spd_info.product_mode); + pf->hw_flag = be32_to_cpu(pf->sdk_spd_info.hw_flag); + if (!pf->hw_flag) + pf->hw_flag = 1; + } else { + dev_err(ne6x_pf_to_dev(pf), "get eeprom fail\n"); + } + + return ret; +} + +int ne6x_dev_set_white_list(struct ne6x_pf *pf, bool enable) +{ + u32 data; + + if (enable) { + if (pf->hw_flag == 1 || pf->hw_flag == 2) { + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data |= NE6X_F_WHITELIST_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); + } else { + dev_info(ne6x_pf_to_dev(pf), "hw not support white list func\n"); + return -EOPNOTSUPP; + } + } else { + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data &= ~NE6X_F_WHITELIST_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); + } + + return 0; +} + +void ne6x_dev_set_ddos(struct ne6x_pf *pf, bool enable) +{ + u32 data; + + if (enable) { + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data |= NE6X_F_DDOS_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); + } else { + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data &= ~NE6X_F_DDOS_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); + } +} + +void ne6x_dev_set_trust_vlan(struct ne6x_pf *pf, bool enable) +{ + u32 data; + + if (enable) { + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data |= NE6X_F_TRUST_VLAN_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); + } else { + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data &= ~NE6X_F_TRUST_VLAN_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); + } +} + +bool ne6x_dev_get_trust_vlan(struct ne6x_pf *pf) +{ + u32 data; + + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + if (data & NE6X_F_TRUST_VLAN_ENABLED) + return true; + return false; +} + +int ne6x_dev_get_pport(struct ne6x_adapter *adpt) +{ + u32 lport_to_phy; + + if (!adpt) + return 0; + + switch (adpt->back->dev_type) { + case NE6000AI_2S_X16H_25G_N5: + return adpt->idx; + default: + break; + } + + lport_to_phy = adpt->back->sdk_spd_info.logic_port_to_phyical; + + return NE6X_PPORT_BY_HWINFO(be32_to_cpu(lport_to_phy), adpt->idx); +} + +static void ne6x_dev_set_roce_icrc_offload(struct ne6x_pf *pf, bool enable) +{ + u32 data; + + if (enable) { + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data |= NE6X_F_S_ROCE_ICRC_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); + } else { + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data &= ~NE6X_F_S_ROCE_ICRC_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); + } +} + +int ne6x_dev_init(struct ne6x_pf *pf) +{ + if (unlikely(ne6x_dev_get_dev_info(pf))) + return -EINVAL; + + ne6x_reg_get_ver(pf, &pf->verinfo); + ne6x_dev_clear_vport(pf); + ne6x_dev_set_fast_mode(pf, false, 0); + ne6x_dev_set_roce_icrc_offload(pf, true); + + return 0; +} + +int ne6x_dev_get_mac_addr(struct ne6x_adapter *adpt, u8 *mac) +{ + struct ne6x_dev_eeprom_info *info = &adpt->back->sdk_spd_info; + + memset(mac, 0, 6); + switch (adpt->idx) { + case 0: + ether_addr_copy(mac, &info->port_0_mac[0]); + break; + case 1: + ether_addr_copy(mac, &info->port_1_mac[0]); + break; + case 2: + ether_addr_copy(mac, &info->port_2_mac[0]); + break; + case 3: + ether_addr_copy(mac, &info->port_3_mac[0]); + break; + default: + return -1; + } + + return 0; +} + +int ne6x_dev_get_port_num(struct ne6x_pf *pf) +{ + return pf->sdk_spd_info.number_of_physical_controllers; +} + +int ne6x_dev_get_temperature_info(struct ne6x_pf *pf, struct ne6x_soc_temperature *temp) +{ + return ne6x_reg_get_soc_info(pf, NE6X_SOC_TEMPERATURE, (u32 *)temp, sizeof(*temp)); +} + +int ne6x_dev_get_power_consum(struct ne6x_pf *pf, struct ne6x_soc_power *power) +{ + return ne6x_reg_get_soc_info(pf, NE6X_SOC_POWER_CONSUM, (u32 *)power, sizeof(*power)); +} + +int ne6x_dev_i2c3_signal_test(struct ne6x_pf *pf, u32 *id) +{ + return ne6x_reg_get_soc_info(pf, NE6X_SOC_I2C3_TEST, (u32 *)id, sizeof(u32)); +} + +int ne6x_dev_get_fru(struct ne6x_pf *pf, u32 *buffer, u32 size) +{ + return ne6x_reg_get_soc_info(pf, NE6X_SOC_FRU, buffer, size); +} + +int ne6x_dev_start_ddr_test(struct ne6x_pf *pf) +{ + return ne6x_reg_get_soc_info(pf, NE6X_SOC_DDR_TEST, NULL, 0); +} + +int ne6x_dev_read_eeprom(struct ne6x_adapter *adpt, int offset, u8 *pbuf, int size) +{ + return ne6x_reg_e2prom_read(adpt->back, offset, pbuf, size); +} + +int ne6x_dev_write_eeprom(struct ne6x_adapter *adpt, int offset, u8 *pbuf, int size) +{ + return ne6x_reg_e2prom_write(adpt->back, offset, pbuf, size); +} + +int ne6x_dev_get_link_status(struct ne6x_adapter *adpt, struct ne6x_link_info *status) +{ + u32 link_speed = ne6x_reg_apb_read(adpt->back, 0x2087FB00 + 4 * ADPT_LPORT(adpt)); + + status->link = link_speed >> 16; + status->speed = link_speed & 0xffff; + + return 0; +} + +int ne6x_dev_get_sfp_status(struct ne6x_adapter *adpt, u8 *status) +{ + u32 sfp_state; + + sfp_state = ne6x_reg_apb_read(adpt->back, 0x2087FB40 + 4 * ADPT_LPORT(adpt)); + *status = sfp_state & 0x1; + + return 0; +} + +void ne6x_dev_update_status(struct ne6x_pf *pf, struct ne6x_port_info *port, bool is_up) +{ + u32 speed = NE6X_LINK_SPEED_25GB; + struct ne6x_phy_info *phy = &port->phy; + struct ne6x_link_status *link = &phy->link_info; + + if (!is_up) { + link->phy_type_low = NE6X_PHY_TYPE_UNKNOWN; + link->link_speed = speed; + link->link_info &= ~NE6X_AQ_LINK_UP; + phy->media_type = NE6X_MEDIA_UNKNOWN; + return; + } + + link->link_info |= NE6X_AQ_LINK_UP; + switch (speed) { + case NE6X_LINK_SPEED_10GB: + link->phy_type_low = NE6X_PHY_TYPE_10GBASE; + link->link_speed = NE6X_LINK_SPEED_10GB; + break; + case NE6X_LINK_SPEED_25GB: + link->phy_type_low = NE6X_PHY_TYPE_25GBASE; + link->link_speed = NE6X_LINK_SPEED_25GB; + break; + case NE6X_LINK_SPEED_40GB: + link->phy_type_low = NE6X_PHY_TYPE_40GBASE; + link->link_speed = NE6X_LINK_SPEED_40GB; + break; + case NE6X_LINK_SPEED_100GB: + link->phy_type_low = NE6X_PHY_TYPE_100GBASE; + link->link_speed = NE6X_LINK_SPEED_100GB; + break; + case NE6X_LINK_SPEED_200GB: + link->phy_type_low = NE6X_PHY_TYPE_200GBASE; + link->link_speed = NE6X_LINK_SPEED_200GB; + break; + default: + dev_warn(ne6x_pf_to_dev(pf), "Unrecognized link_speed (0x%x).\n", speed); + break; + } + + phy->media_type = NE6X_MEDIA_FIBER; +} + +int ne6x_dev_self_test_link(struct ne6x_adapter *adpt, int *verify) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_LINK_STATUS, NE6X_TALK_GET, + ADPT_LPORT(adpt), (void *)verify, sizeof(int)); +} + +int ne6x_dev_reset_firmware(struct ne6x_adapter *adpt) +{ + return ne6x_reg_reset_firmware(adpt->back); +} + +int ne6x_dev_get_speed(struct ne6x_adapter *adpt, u32 *speed) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_SPEED, NE6X_TALK_GET, + ADPT_LPORT(adpt), (void *)speed, sizeof(u32)); +} + +int ne6x_dev_set_speed(struct ne6x_adapter *adpt, u32 speed) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_SPEED, NE6X_TALK_SET, + ADPT_LPORT(adpt), (void *)&speed, sizeof(u32)); +} + +int ne6x_dev_get_flowctrl(struct ne6x_adapter *adpt, struct ne6x_flowctrl *fctrl) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_PAUSE, NE6X_TALK_GET, + ADPT_LPORT(adpt), (void *)fctrl, sizeof(fctrl)); +} + +int ne6x_dev_set_flowctrl(struct ne6x_adapter *adpt, struct ne6x_flowctrl *fctrl) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_PAUSE, NE6X_TALK_SET, + ADPT_LPORT(adpt), (void *)fctrl, sizeof(*fctrl)); +} + +int ne6x_dev_get_mac_stats(struct ne6x_adapter *adpt) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_STATS, NE6X_TALK_GET, + ADPT_LPORT(adpt), (void *)&adpt->stats, sizeof(adpt->stats)); +} + +int ne6x_dev_set_mtu(struct ne6x_adapter *adpt, u32 mtu) +{ + u32 max_length = mtu + 18; + + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_MAX_FRAME, NE6X_TALK_SET, + ADPT_LPORT(adpt), (void *)&max_length, sizeof(max_length)); +} + +int ne6x_dev_get_mtu(struct ne6x_adapter *adpt, u32 *mtu) +{ + u32 max_length; + int ret; + + ret = ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_MAX_FRAME, NE6X_TALK_GET, + ADPT_LPORT(adpt), (void *)&max_length, sizeof(max_length)); + *mtu = max_length - 18; + + return ret; +} + +static int fastlog2(int x) +{ + int idx; + + for (idx = 31; idx >= 0; idx--) { + if (x & (1 << idx)) + break; + } + + return idx; +} + +int ne6x_dev_set_rss(struct ne6x_adapter *adpt, struct ne6x_rss_info *cfg) +{ + struct rss_table rss; + u32 *rss_data = (u32 *)&rss; + int ret, i; + + memset(&rss, 0x00, sizeof(rss)); + rss.flag = cpu_to_be32(0x01); /* valid bit */ + rss.hash_fun = (cfg->hash_func << 24) & 0xFF000000; + rss.hash_fun |= (cfg->hash_type & 0xFFFFFF); + rss.hash_fun = cpu_to_be32(rss.hash_fun); + rss.queue_base = cpu_to_be32(ADPT_VPORTCOS(adpt)); + rss.queue_def = cpu_to_be16(0x0); + rss.queue_size = cpu_to_be16(adpt->num_queue); + rss.entry_num = fastlog2(cfg->ind_table_size); + rss.entry_num = cpu_to_be16(rss.entry_num); + rss.entry_size = cpu_to_be16(0x0); + + for (i = 0; i < cfg->ind_table_size; i++) + rss.entry_data[i] = cfg->ind_table[i]; + + ext_toeplitz_key(&cfg->hash_key[0], &rss.hash_key[0]); + + for (i = 0; i < 128; i++) + rss_data[i] = cpu_to_be32(rss_data[i]); + + ret = ne6x_reg_table_write(adpt->back, NE6X_REG_RSS_TABLE, ADPT_VPORT(adpt), + (void *)&rss, sizeof(rss)); + return ret; +} + +int ne6x_dev_upgrade_firmware(struct ne6x_adapter *adpt, u8 region, u8 *data, int size, int flags) +{ + int ret; + + clear_bit(NE6X_LINK_POOLING, adpt->back->state); + ret = ne6x_reg_upgrade_firmware(adpt->back, region, data, size); + set_bit(NE6X_LINK_POOLING, adpt->back->state); + + return ret; +} + +int ne6x_dev_get_sfp_type_len(struct ne6x_adapter *adpt, struct ne6x_sfp_mod_type_len *sfp_mode) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_SFP_TYPE_LEN, NE6X_TALK_GET, + ADPT_LPORT(adpt), sfp_mode, sizeof(*sfp_mode)); +} + +int ne6x_dev_get_sfp_eeprom(struct ne6x_adapter *adpt, u8 *data, int offset, int size, int flags) +{ + return ne6x_reg_get_sfp_eeprom(adpt->back, ADPT_LPORT(adpt), data, offset, size); +} + +int ne6x_dev_clear_stats(struct ne6x_adapter *adpt) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_STATS, NE6X_TALK_SET, + ADPT_LPORT(adpt), NULL, 0); +} + +/* panel port mapped to logical port */ +void ne6x_dev_set_port2pi(struct ne6x_adapter *adpt) +{ + u32 val = (ADPT_LPORT(adpt) << 24) | (ADPT_VPORT(adpt) << 16) | + (adpt->port_info->hw_queue_base + 160); + + ne6x_reg_set_user_data(adpt->back, (NP_USER_DATA_PORT2PI_0 + ADPT_PPORT(adpt)), val); +} + +/* logical port mapped to panel port */ +void ne6x_dev_set_pi2port(struct ne6x_adapter *adpt) +{ + ne6x_reg_set_user_data(adpt->back, (NP_USER_DATA_PI2PORT_0 + ADPT_LPORT(adpt)), + ADPT_PPORT(adpt)); +} + +/* clear vport map */ +void ne6x_dev_clear_vport(struct ne6x_pf *pf) +{ + int idx; + + for (idx = 0; idx < 32; idx++) + ne6x_reg_set_user_data(pf, (NP_USER_DATA_PORT_2_COS_0 + idx), 0); + + for (idx = 0; idx < 64; idx++) + ne6x_reg_set_user_data(pf, (NP_USER_DATA_PORT_OLFLAGS_0 + idx), 0); +} + +/* automatically generating vp_base_cos */ +int ne6x_dev_set_vport(struct ne6x_adapter *adpt) +{ + u16 port = adpt->vport >> 1; + u32 val = 0; + + ne6x_reg_get_user_data(adpt->back, (NP_USER_DATA_PORT_2_COS_0 + port), &val); + + /* pf base cos */ + if (adpt->vport & 0x1) { + val &= 0xFFFF; + val |= ((adpt->port_info->hw_queue_base + 160) << 16); + ne6x_reg_set_user_data(adpt->back, (NP_USER_DATA_PORT_2_COS_0 + port), val); + } else { + val &= 0xFFFF0000; + val |= (adpt->port_info->hw_queue_base + 160); + ne6x_reg_set_user_data(adpt->back, (NP_USER_DATA_PORT_2_COS_0 + port), val); + } + + return 0; +} + +int ne6x_dev_get_vlan_port(struct ne6x_adapter *adpt, u16 vlan_id, pbmp_t pbmp) +{ + pbmp_t new_pbmp; + int ret; + + PBMP_CLEAR(new_pbmp); + ret = ne6x_reg_table_read(adpt->back, NE6X_REG_VLAN_TABLE, + ADPT_LPORT(adpt) * 4096 + (vlan_id & 0xFFF), + (void *)new_pbmp, + sizeof(pbmp_t)); + + PBMP_DWORD_GET(pbmp, 0) = PBMP_DWORD_GET(new_pbmp, 3); + PBMP_DWORD_GET(pbmp, 1) = PBMP_DWORD_GET(new_pbmp, 2); + PBMP_DWORD_GET(pbmp, 2) = PBMP_DWORD_GET(new_pbmp, 1); + PBMP_DWORD_GET(pbmp, 3) = PBMP_DWORD_GET(new_pbmp, 0); + + return ret; +} + +int ne6x_dev_set_vlan_port(struct ne6x_adapter *adpt, u16 vlan_id, pbmp_t pbmp) +{ + pbmp_t new_pbmp; + + PBMP_CLEAR(new_pbmp); + PBMP_DWORD_GET(new_pbmp, 0) = PBMP_DWORD_GET(pbmp, 3); + PBMP_DWORD_GET(new_pbmp, 1) = PBMP_DWORD_GET(pbmp, 2); + PBMP_DWORD_GET(new_pbmp, 2) = PBMP_DWORD_GET(pbmp, 1); + PBMP_DWORD_GET(new_pbmp, 3) = PBMP_DWORD_GET(pbmp, 0); + + return ne6x_reg_table_write(adpt->back, NE6X_REG_VLAN_TABLE, + ADPT_LPORT(adpt) * 4096 + (vlan_id & 0xFFF), + (void *)new_pbmp, sizeof(pbmp_t)); +} + +int ne6x_dev_vlan_add(struct ne6x_adapter *adpt, struct ne6x_vlan *vlan) +{ + pbmp_t pbmp, new_pbmp; + u16 index = 0; + + if (vlan->tpid == ETH_P_8021Q) + index = ADPT_LPORT(adpt) * 4096; + else if (vlan->tpid == ETH_P_8021AD) + index = 4 * 4096 + ADPT_LPORT(adpt) * 4096; + + memset(pbmp, 0, sizeof(pbmp_t)); + memset(new_pbmp, 0, sizeof(pbmp_t)); + + ne6x_reg_table_read(adpt->back, NE6X_REG_VLAN_TABLE, index + (vlan->vid & 0xFFF), + (void *)&new_pbmp, sizeof(pbmp)); + PBMP_DWORD_GET(pbmp, 0) = PBMP_DWORD_GET(new_pbmp, 3); + PBMP_DWORD_GET(pbmp, 1) = PBMP_DWORD_GET(new_pbmp, 2); + PBMP_DWORD_GET(pbmp, 2) = PBMP_DWORD_GET(new_pbmp, 1); + PBMP_DWORD_GET(pbmp, 3) = PBMP_DWORD_GET(new_pbmp, 0); + + memset(new_pbmp, 0, sizeof(pbmp)); + + PBMP_PORT_ADD(pbmp, adpt->vport); + + PBMP_DWORD_GET(new_pbmp, 0) = PBMP_DWORD_GET(pbmp, 3); + PBMP_DWORD_GET(new_pbmp, 1) = PBMP_DWORD_GET(pbmp, 2); + PBMP_DWORD_GET(new_pbmp, 2) = PBMP_DWORD_GET(pbmp, 1); + PBMP_DWORD_GET(new_pbmp, 3) = PBMP_DWORD_GET(pbmp, 0); + + ne6x_reg_table_write(adpt->back, NE6X_REG_VLAN_TABLE, index + (vlan->vid & 0xFFF), + (void *)&new_pbmp, sizeof(pbmp)); + + return 0; +} + +int ne6x_dev_vlan_del(struct ne6x_adapter *adpt, struct ne6x_vlan *vlan) +{ + pbmp_t pbmp, new_pbmp; + u16 index = 0; + + if (vlan->tpid == ETH_P_8021Q) + index = ADPT_LPORT(adpt) * 4096; + else if (vlan->tpid == ETH_P_8021AD) + index = 4 * 4096 + ADPT_LPORT(adpt) * 4096; + + memset(pbmp, 0, sizeof(pbmp)); + memset(new_pbmp, 0, sizeof(pbmp)); + + ne6x_reg_table_read(adpt->back, NE6X_REG_VLAN_TABLE, index + (vlan->vid & 0xFFF), + (void *)&new_pbmp, sizeof(pbmp)); + + PBMP_DWORD_GET(pbmp, 0) = PBMP_DWORD_GET(new_pbmp, 3); + PBMP_DWORD_GET(pbmp, 1) = PBMP_DWORD_GET(new_pbmp, 2); + PBMP_DWORD_GET(pbmp, 2) = PBMP_DWORD_GET(new_pbmp, 1); + PBMP_DWORD_GET(pbmp, 3) = PBMP_DWORD_GET(new_pbmp, 0); + + memset(new_pbmp, 0, sizeof(pbmp)); + + PBMP_PORT_REMOVE(pbmp, adpt->vport); + + PBMP_DWORD_GET(new_pbmp, 0) = PBMP_DWORD_GET(pbmp, 3); + PBMP_DWORD_GET(new_pbmp, 1) = PBMP_DWORD_GET(pbmp, 2); + PBMP_DWORD_GET(new_pbmp, 2) = PBMP_DWORD_GET(pbmp, 1); + PBMP_DWORD_GET(new_pbmp, 3) = PBMP_DWORD_GET(pbmp, 0); + + ne6x_reg_table_write(adpt->back, NE6X_REG_VLAN_TABLE, index + (vlan->vid & 0xFFF), + (void *)&new_pbmp, sizeof(pbmp)); + + return 0; +} + +/* clear vlan table */ +int ne6x_dev_clear_vlan_map(struct ne6x_pf *pf) +{ + pbmp_t pbmp; + int index; + + PBMP_CLEAR(pbmp); + for (index = 0; index < 8192; index++) + ne6x_reg_table_write(pf, NE6X_REG_VLAN_TABLE, index, (void *)pbmp, sizeof(pbmp)); + + return 0; +} + +/* port add qinq */ +int ne6x_dev_add_vf_qinq(struct ne6x_vf *vf, __be16 proto, u16 vid) +{ + struct ne6x_vf_vlan vlan; + u32 val = 0; + + memset(&vlan, 0, sizeof(vlan)); + + vlan.tpid = proto; + vlan.vid = vid; + + memcpy(&val, &vlan, sizeof(u32)); + ne6x_reg_set_user_data(vf->adpt->back, NP_USER_DATA_PORT0_QINQ + ADPT_VPORT(vf->adpt), val); + + return 0; +} + +/* port del qinq */ +int ne6x_dev_del_vf_qinq(struct ne6x_vf *vf, __be16 proto, u16 vid) +{ + ne6x_reg_set_user_data(vf->adpt->back, NP_USER_DATA_PORT0_QINQ + ADPT_VPORT(vf->adpt), 0); + + return 0; +} + +int ne6x_dev_set_uc_promiscuous_enable(struct ne6x_adapter *adpt, int enable) +{ + u32 val = 0; + + ne6x_reg_get_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), &val); + + if (enable) + val |= NE6X_F_PROMISC; + else + val &= ~NE6X_F_PROMISC; + + ne6x_reg_set_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), val); + + return 0; +} + +int ne6x_dev_set_mc_promiscuous_enable(struct ne6x_adapter *adpt, int enable) +{ + u32 val = 0; + + ne6x_reg_get_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), &val); + + if (enable) + val |= NE6X_F_RX_ALLMULTI; + else + val &= ~NE6X_F_RX_ALLMULTI; + + ne6x_reg_set_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), val); + + return 0; +} + +static void ne6x_dev_update_uc_leaf(struct l2fdb_dest_unicast *unicast, struct ne6x_adapter *adpt, + bool set_or_clear) +{ + u16 vport = ADPT_VPORT(adpt); + + set_or_clear ? SET_BIT(unicast->vp_bmp[vport / 32], vport % 32) : + CLR_BIT(unicast->vp_bmp[vport / 32], vport % 32); + + unicast->cnt = 0; +} + +int ne6x_dev_add_unicast_for_fastmode(struct ne6x_adapter *adpt, u8 *mac) +{ + struct l2fdb_fast_table db; + + memcpy(&db.mac[0], mac, 6); + db.start_cos = ADPT_VPORTCOS(adpt); + db.cos_num = adpt->num_queue; + + to_be32_vector(0, sizeof(db) / 4, &db); + + return ne6x_reg_set_unicast_for_fastmode(adpt->back, ADPT_VPORT(adpt), + (u32 *)&db, sizeof(db)); +} + +int ne6x_dev_add_unicast(struct ne6x_adapter *adpt, u8 *mac) +{ + struct l2fdb_search_result res; + struct l2fdb_table db; + u32 tid = 0xffffffff; + int ret; + + if (adpt->back->is_fastmode) + ne6x_dev_add_unicast_for_fastmode(adpt, mac); + + memset(&db, 0, sizeof(db)); + + db.pport = ADPT_LPORT(adpt); + memcpy(&db.mac[0], mac, 6); + + to_be32_vector(0, 16, &db); + + ret = ne6x_add_key(adpt, mac, 6); + if (!ret) { + memset(&db, 0, 128); + memcpy(&db.mac[0], mac, 6); + db.pport = ADPT_LPORT(adpt); + db.vlanid = 0; + + memset(&db.fw_info.unicast, 0, sizeof(db.fw_info.unicast)); + db.fw_info.unicast.flags = 0x1; + ne6x_dev_update_uc_leaf(&db.fw_info.unicast, adpt, true); + + to_be32_vector(0, 17, &db); + + ret = ne6x_reg_table_insert(adpt->back, NE6X_REG_L2FDB_TABLE, + (u32 *)&db, 128, &tid); + if (ret) + dev_err(ne6x_pf_to_dev(adpt->back), + "insert unicast table %x %02x %02x %02x %02x %02x %02x fail\n", + ADPT_LPORT(adpt), mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + } else { + ret = ne6x_reg_table_search(adpt->back, NE6X_REG_L2FDB_TABLE, + (u32 *)&db, 64, (u32 *)&res, sizeof(res)); + db.fw_info.unicast.flags = 0x1; + db.fw_info.unicast.vp_bmp[0] = res.fw_info.unicast.vp_bmp[0]; + db.fw_info.unicast.vp_bmp[1] = res.fw_info.unicast.vp_bmp[1]; + db.fw_info.unicast.vp_bmp[2] = res.fw_info.unicast.vp_bmp[2]; + db.fw_info.unicast.cnt = res.fw_info.unicast.cnt; + ne6x_dev_update_uc_leaf(&db.fw_info.unicast, adpt, true); + + to_be32_vector(16, 17, &db); + + ret = ne6x_reg_table_update(adpt->back, NE6X_REG_L2FDB_TABLE, + res.key_index + 1, (u32 *)&db.fw_info, 64); + } + + return 0; +} + +static int ne6x_dev_del_unicast_for_fastmode(struct ne6x_adapter *adpt) +{ + struct l2fdb_fast_table db; + + memset(&db, 0, sizeof(db)); + + return ne6x_reg_set_unicast_for_fastmode(adpt->back, ADPT_VPORT(adpt), + (u32 *)&db, sizeof(db)); +} + +int ne6x_dev_del_unicast(struct ne6x_adapter *adpt, u8 *mac) +{ + struct l2fdb_search_result res; + struct l2fdb_table db; + int ret = 0; + + if (adpt->back->is_fastmode) + ne6x_dev_del_unicast_for_fastmode(adpt); + + ret = ne6x_del_key(adpt, mac, 6); + + memset(&db, 0, sizeof(db)); + + db.pport = ADPT_LPORT(adpt); + memcpy(&db.mac[0], mac, 6); + + to_be32_vector(0, 32, &db); + + ne6x_reg_table_search(adpt->back, NE6X_REG_L2FDB_TABLE, + (u32 *)&db, 64, (u32 *)&res, sizeof(res)); + + memset(&db, 0, sizeof(db)); + memcpy(&db.mac[0], mac, 6); + db.vlanid = 0; + db.pport = ADPT_LPORT(adpt); + db.fw_info.unicast.flags = 0x1; + db.fw_info.unicast.vp_bmp[0] = res.fw_info.unicast.vp_bmp[0]; + db.fw_info.unicast.vp_bmp[1] = res.fw_info.unicast.vp_bmp[1]; + db.fw_info.unicast.vp_bmp[2] = res.fw_info.unicast.vp_bmp[2]; + db.fw_info.unicast.cnt = res.fw_info.unicast.cnt; + ne6x_dev_update_uc_leaf(&db.fw_info.unicast, adpt, false); + + to_be32_vector(0, 17, &db); + + if (!ret) + ret = ne6x_reg_table_delete(adpt->back, NE6X_REG_L2FDB_TABLE, (u32 *)&db, 64); + else + ret = ne6x_reg_table_update(adpt->back, NE6X_REG_L2FDB_TABLE, + res.key_index + 1, (u32 *)&db.fw_info, 64); + + return 0; +} + +static void ne6x_dev_update_mc_leaf(struct l2fdb_dest_multicast *multicast, + struct ne6x_adapter *adpt, bool set_or_clear) +{ + u16 vport = ADPT_VPORT(adpt); + + set_or_clear ? SET_BIT(multicast->vp_bmp[vport / 32], vport % 32) : + CLR_BIT(multicast->vp_bmp[vport / 32], vport % 32); +} + +int ne6x_dev_add_multicast(struct ne6x_adapter *adpt, u8 *mac) +{ + struct l2fdb_search_result res; + struct l2fdb_table db; + u32 tid = 0xffffffff; + int ret; + + memset(&db, 0, sizeof(db)); + + db.pport = ADPT_LPORT(adpt); + memcpy(&db.mac[0], mac, 6); + + to_be32_vector(0, 32, &db); + + ret = ne6x_add_key(adpt, mac, 6); + if (!ret) { + memset(&db, 0, sizeof(db)); + memcpy(&db.mac[0], mac, 6); + db.pport = ADPT_LPORT(adpt); + + memset(&db.fw_info.multicast, 0, sizeof(db.fw_info.multicast)); + db.fw_info.multicast.flags = 0x3; + ne6x_dev_update_mc_leaf(&db.fw_info.multicast, adpt, true); + + to_be32_vector(0, 17, &db); + + ret = ne6x_reg_table_insert(adpt->back, NE6X_REG_L2FDB_TABLE, + (u32 *)&db, 128, &tid); + if (ret) + dev_err(ne6x_pf_to_dev(adpt->back), + "insert multicast table %x %02x %02x %02x %02x %02x %02x fail\n", + ADPT_LPORT(adpt), mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + } else { + ret = ne6x_reg_table_search(adpt->back, NE6X_REG_L2FDB_TABLE, + (u32 *)&db, 64, (u32 *)&res, sizeof(res)); + + db.fw_info.multicast.flags = 0x3; + db.fw_info.multicast.vp_bmp[0] = res.fw_info.multicast.vp_bmp[0]; + db.fw_info.multicast.vp_bmp[1] = res.fw_info.multicast.vp_bmp[1]; + db.fw_info.multicast.vp_bmp[2] = res.fw_info.multicast.vp_bmp[2]; + ne6x_dev_update_mc_leaf(&db.fw_info.multicast, adpt, true); + + to_be32_vector(16, 17, &db); + + ret = ne6x_reg_table_update(adpt->back, NE6X_REG_L2FDB_TABLE, + res.key_index + 1, (u32 *)&db.fw_info, 64); + } + + return 0; +} + +int ne6x_dev_del_multicast(struct ne6x_adapter *adpt, u8 *mac) +{ + struct l2fdb_search_result res; + struct l2fdb_table db; + int ret; + + ret = ne6x_del_key(adpt, mac, 6); + + memset(&db, 0, sizeof(db)); + + /* hash_key */ + db.pport = ADPT_LPORT(adpt); + memcpy(&db.mac[0], mac, 6); + + to_be32_vector(0, 32, &db); + + /* mac info */ + ne6x_reg_table_search(adpt->back, NE6X_REG_L2FDB_TABLE, + (u32 *)&db, 64, (u32 *)&res, sizeof(res)); + memset(&db, 0, 128); + memcpy(&db.mac[0], mac, 6); + db.vlanid = 0; + db.pport = ADPT_LPORT(adpt); + db.fw_info.multicast.flags = 0x3; + db.fw_info.multicast.vp_bmp[0] = res.fw_info.multicast.vp_bmp[0]; + db.fw_info.multicast.vp_bmp[1] = res.fw_info.multicast.vp_bmp[1]; + db.fw_info.multicast.vp_bmp[2] = res.fw_info.multicast.vp_bmp[2]; + + ne6x_dev_update_mc_leaf(&db.fw_info.multicast, adpt, false); + + to_be32_vector(0, 17, &db); + + if (!ret) + ret = ne6x_reg_table_delete(adpt->back, NE6X_REG_L2FDB_TABLE, (u32 *)&db, 64); + else + ret = ne6x_reg_table_update(adpt->back, NE6X_REG_L2FDB_TABLE, + res.key_index + 1, (u32 *)&db.fw_info, 64); + + return ret; +} + +inline void ne6x_dev_update_boradcast_leaf(u32 *leaf, struct ne6x_adapter *adpt, bool set_or_clear) +{ + u16 vport = ADPT_VPORT(adpt); + + set_or_clear ? SET_BIT(*leaf, vport % 32) : CLR_BIT(*leaf, vport % 32); +} + +int ne6x_dev_add_broadcast_leaf(struct ne6x_adapter *adpt) +{ + u32 val = 0; + + ne6x_reg_get_user_data(adpt->back, (NP_USER_DATA_PI0_BROADCAST_LEAF + + ADPT_LPORT(adpt) * 4 + ADPT_VPORT(adpt) / 32), &val); + ne6x_dev_update_boradcast_leaf(&val, adpt, true); + ne6x_reg_set_user_data(adpt->back, (NP_USER_DATA_PI0_BROADCAST_LEAF + + ADPT_LPORT(adpt) * 4 + ADPT_VPORT(adpt) / 32), val); + + return 0; +} + +int ne6x_dev_del_broadcast_leaf(struct ne6x_adapter *adpt) +{ + u32 val = 0; + + ne6x_reg_get_user_data(adpt->back, (NP_USER_DATA_PI0_BROADCAST_LEAF + + ADPT_LPORT(adpt) * 4 + ADPT_VPORT(adpt) / 32), &val); + ne6x_dev_update_boradcast_leaf(&val, adpt, false); + ne6x_reg_set_user_data(adpt->back, (NP_USER_DATA_PI0_BROADCAST_LEAF + + ADPT_LPORT(adpt) * 4 + ADPT_VPORT(adpt) / 32), val); + + return 0; +} + +u32 ne6x_dev_get_features(struct ne6x_adapter *adpt) +{ + int val = 0; + + ne6x_reg_get_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), &val); + + return val; +} + +int ne6x_dev_set_features(struct ne6x_adapter *adpt, u32 val) +{ + ne6x_reg_set_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), val); + + return 0; +} + +int ne6x_dev_enable_rxhash(struct ne6x_adapter *adpt, int enable) +{ + u32 val = 0; + + ne6x_reg_get_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), &val); + if (enable) + val |= NE6X_F_RSS; + else + val &= ~NE6X_F_RSS; + + ne6x_reg_set_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), val); + + return 0; +} + +int ne6x_dev_set_fec(struct ne6x_adapter *adpt, enum ne6x_fec_state fec) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_FEC, NE6X_TALK_SET, + ADPT_LPORT(adpt), (void *)&fec, sizeof(int)); +} + +int ne6x_dev_set_mac_inloop(struct ne6x_adapter *adpt, int enable) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_LOOPBACK, NE6X_TALK_SET, + ADPT_LPORT(adpt), (void *)&enable, sizeof(int)); +} + +int ne6x_dev_get_fec(struct ne6x_adapter *adpt, enum ne6x_fec_state *fec) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_FEC, NE6X_TALK_GET, + ADPT_LPORT(adpt), (void *)fec, sizeof(int)); +} + +int ne6x_dev_set_sfp_speed(struct ne6x_adapter *adpt, u32 speed) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_SFP_SPEED, NE6X_TALK_SET, + ADPT_LPORT(adpt), (void *)&speed, sizeof(u32)); +} + +int ne6x_dev_get_sfp_speed(struct ne6x_adapter *adpt, u32 *speed) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_SFP_SPEED, NE6X_TALK_GET, + ADPT_LPORT(adpt), (void *)speed, sizeof(u32)); +} + +int ne6x_dev_set_if_state(struct ne6x_adapter *adpt, u32 state) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_STATE, NE6X_TALK_SET, + ADPT_LPORT(adpt), (void *)&state, sizeof(u32)); +} + +int ne6x_dev_get_if_state(struct ne6x_adapter *adpt, u32 *state) +{ + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_STATE, NE6X_TALK_GET, + ADPT_LPORT(adpt), (void *)state, sizeof(u32)); +} + +int ne6x_dev_set_nic_stop(struct ne6x_pf *pf, u32 flag) +{ + return ne6x_reg_nic_stop(pf, flag); +} + +int ne6x_dev_set_nic_start(struct ne6x_pf *pf, u32 flag) +{ + return ne6x_reg_nic_start(pf, flag); +} + +int ne6x_dev_set_led(struct ne6x_adapter *adpt, bool state) +{ + return ne6x_reg_set_led(adpt->back, ADPT_LPORT(adpt), state); +} + +void ne6x_dev_transform_vf_stat_format(u32 *stat_arr, struct vf_stat *stat) +{ + u32 start_pos = 0; + + stat->rx_malform_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; + start_pos += 2; + stat->rx_drop_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; + start_pos += 2; + stat->rx_broadcast_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; + start_pos += 2; + stat->rx_multicast_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; + start_pos += 2; + stat->rx_unicast_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; + start_pos += 2; + stat->tx_broadcast_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; + start_pos += 2; + stat->tx_multicast_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; + start_pos += 2; + stat->tx_unicast_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; + start_pos += 16; + stat->tx_malform_pkts = ((u64)stat_arr[start_pos] << 32) + stat_arr[start_pos + 1]; +} + +int ne6x_dev_get_vf_stat(struct ne6x_adapter *adpt, struct vf_stat *stat) +{ + u32 stat_arr[64]; + int ret; + + ret = ne6x_reg_table_read(adpt->back, NE6X_REG_VF_STAT_TABLE, ADPT_VPORT(adpt), + (u32 *)&stat_arr[0], sizeof(stat_arr)); + ne6x_dev_transform_vf_stat_format(stat_arr, stat); + + return ret; +} + +int ne6x_dev_reset_vf_stat(struct ne6x_adapter *adpt) +{ + u32 stat_arr[64] = {0}; + + return ne6x_reg_table_write(adpt->back, NE6X_REG_VF_STAT_TABLE, ADPT_VPORT(adpt), + (u32 *)&stat_arr[0], sizeof(stat_arr)); +} + +int ne6x_dev_check_speed(struct ne6x_adapter *adpt, u32 speed) +{ + switch (adpt->back->dev_type) { + case NE6000AI_2S_X16H_25G_N5: + case NE6000AI_2S_X16H_25G_N6: + if (speed == SPEED_25000 || speed == SPEED_10000) + return 0; + + return -EOPNOTSUPP; + case NE6000AI_2S_X16H_100G_N5: + if (speed == SPEED_40000 || speed == SPEED_100000) + return 0; + + return -EOPNOTSUPP; + default: + return -EOPNOTSUPP; + } +} + +int ne6x_dev_set_fw_lldp(struct ne6x_adapter *adpt, bool state) +{ + u32 val = 0; + + ne6x_reg_get_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), &val); + if (state) + val |= NE6X_F_RX_FW_LLDP; + else + val &= ~NE6X_F_RX_FW_LLDP; + + ne6x_reg_set_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), val); + + return 0; +} + +#define NE6X_METER_STEP 152 +#define NE6X_DF_METER_CBS_PBS (100 * 152) +int ne6x_dev_set_vf_bw(struct ne6x_adapter *adpt, int tx_rate) +{ + u32 val = 0, ret = 0; + u32 cir = 0, cbs = 0; + struct meter_table vf_bw; + + ne6x_reg_get_user_data(adpt->back, NP_USER_DATA_PORT_OLFLAGS_0 + ADPT_VPORT(adpt), &val); + memset(&vf_bw, 0, sizeof(struct meter_table)); + + if (tx_rate) + val |= NE6X_F_TX_QOSBANDWIDTH; + else + val &= ~NE6X_F_TX_QOSBANDWIDTH; + + if (tx_rate) { + cir = tx_rate; + cbs = 0xffffff; + vf_bw.pbs = cbs; + vf_bw.cir = cir; + vf_bw.cbs = cbs; + vf_bw.pir = cir; + ret = ne6x_reg_config_meter(adpt->back, + NE6X_METER0_TABLE | + NE6X_METER_SUBSET(NE6X_METER_SUBSET0) | + ADPT_VPORT(adpt), + (u32 *)&vf_bw, sizeof(vf_bw)); + ne6x_reg_set_user_data(adpt->back, + NP_USER_DATA_PORT_OLFLAGS_0 + + ADPT_VPORT(adpt), + val); + } else { + ne6x_reg_set_user_data(adpt->back, + NP_USER_DATA_PORT_OLFLAGS_0 + + ADPT_VPORT(adpt), + val); + ret = ne6x_reg_config_meter(adpt->back, + NE6X_METER0_TABLE | + NE6X_METER_SUBSET(NE6X_METER_SUBSET0) | + ADPT_VPORT(adpt), + (u32 *)&vf_bw, sizeof(vf_bw)); + } + + return ret; +} + +static int ne6x_dev_reg_pattern_test(struct ne6x_pf *pf, u32 reg, u32 val_arg) +{ + struct device *dev; + u32 val, orig_val; + + orig_val = ne6x_reg_apb_read(pf, reg); + dev = ne6x_pf_to_dev(pf); + + ne6x_reg_apb_write(pf, reg, val_arg); + val = ne6x_reg_apb_read(pf, reg); + if (val != val_arg) { + dev_err(dev, "%s: reg pattern test failed - reg 0x%08x val 0x%08x\n", + __func__, reg, val); + return -1; + } + + ne6x_reg_apb_write(pf, reg, orig_val); + val = ne6x_reg_apb_read(pf, reg); + if (val != orig_val) { + dev_err(dev, "%s: reg restore test failed - reg 0x%08x orig 0x%08x val 0x%08x\n", + __func__, reg, orig_val, val); + return -1; + } + + return 0; +} + +#define NE6X_TEST_INT_SET_VALUE 0x1000000000000000 /* bit 60 */ +int ne6x_dev_test_intr(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf = adpt->back; + struct ne6x_hw *hw = &pf->hw; + int base = adpt->base_vector; + union ne6x_vp_int vp_int; + int ret = -1; + + if (base < NE6X_PF_VP0_NUM) { + vp_int.val = rd64(hw, NE6X_VPINT_DYN_CTLN(base, NE6X_VP_INT)); + wr64(hw, NE6X_VPINT_DYN_CTLN(base, NE6X_VP_INT_SET), + NE6X_TEST_INT_SET_VALUE); + vp_int.val = rd64(hw, NE6X_VPINT_DYN_CTLN(base, NE6X_VP_INT)); + if (vp_int.val & NE6X_TEST_INT_SET_VALUE) { + ret = 0; + vp_int.val &= ~NE6X_TEST_INT_SET_VALUE; + wr64(hw, NE6X_VPINT_DYN_CTLN(base, NE6X_VP_INT), vp_int.val); + } + } else { + vp_int.val = rd64_bar4(hw, NE6X_PFINT_DYN_CTLN(base - NE6X_PF_VP0_NUM, + NE6X_VP_INT)); + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(base - NE6X_PF_VP0_NUM, + NE6X_VP_INT_SET), + NE6X_TEST_INT_SET_VALUE); + vp_int.val = rd64_bar4(hw, NE6X_PFINT_DYN_CTLN(base - NE6X_PF_VP0_NUM, + NE6X_VP_INT)); + if (vp_int.val & NE6X_TEST_INT_SET_VALUE) { + ret = 0; + vp_int.val &= ~NE6X_TEST_INT_SET_VALUE; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(base - NE6X_PF_VP0_NUM, + NE6X_VP_INT), vp_int.val); + } + } + + return ret; +} + +int ne6x_dev_test_reg(struct ne6x_adapter *adpt) +{ + struct ne6x_diag_reg_info test_reg[4] = { + {0x20a00180, 0x5A5A5A5A}, + {0x20a00180, 0xA5A5A5A5}, + {0x20a00188, 0x00000000}, + {0x20a0018c, 0xFFFFFFFF} + }; + u32 value, reg; + int index; + + netdev_dbg(adpt->netdev, "Register test\n"); + for (index = 0; index < ARRAY_SIZE(test_reg); ++index) { + value = test_reg[index].value; + reg = test_reg[index].address; + + /* bail on failure (non-zero return) */ + if (ne6x_dev_reg_pattern_test(adpt->back, reg, value)) + return 1; + } + + return 0; +} + +#define NE6X_LOOP_TEST_TYPE 0x1234 +/* handle hook packet */ +int ne6x_dev_proto_recv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *ptype, struct net_device *ndev) +{ + struct ne6x_netdev_priv *np = netdev_priv(dev); + struct ne6x_adapter *adpt = np->adpt; + + netdev_info(dev, "recv loopback test packet success!\n"); + adpt->recv_done = true; + + kfree_skb(skb); + wake_up(&adpt->recv_notify); + + return 0; +} + +static u8 loop_dst_mac[8] = {0x00, 0x00, 0x00, 0x11, 0x11, 0x01}; +int ne6x_dev_proto_send(struct net_device *netdev, char *buf, int len) +{ + struct sk_buff *skb; + u8 *pdata = NULL; + u32 skb_len; + + skb_len = LL_RESERVED_SPACE(netdev) + len; + skb = dev_alloc_skb(skb_len); + if (!skb) + return -1; + + skb_reserve(skb, LL_RESERVED_SPACE(netdev)); + skb->dev = netdev; + skb->ip_summed = CHECKSUM_NONE; + skb->priority = 0; + pdata = skb_put(skb, len); + if (pdata) + memcpy(pdata, buf, len); + + /* send loop test packet */ + if (dev_queue_xmit(skb) < 0) { + dev_put(netdev); + kfree_skb(skb); + netdev_err(netdev, "send pkt fail.\n"); + return -1; + } + netdev_info(netdev, "send loopback test packet success!\n"); + + return 0; +} + +int ne6x_dev_test_loopback(struct ne6x_adapter *adpt) +{ + struct packet_type prot_hook; + struct ethhdr *ether_hdr; + u32 old_value; + int ret = 0; + + adpt->send_buffer = kzalloc(2048, GFP_KERNEL); + if (!adpt->send_buffer) + return -ENOMEM; + + /* config mac/pcs loopback */ + if (ne6x_dev_set_mac_inloop(adpt, true)) { + netdev_err(adpt->netdev, "loopback test set_mac_inloop fail !\n"); + return -1; + } + + old_value = ne6x_dev_get_features(adpt); + ne6x_dev_set_uc_promiscuous_enable(adpt, true); + memset(&prot_hook, 0, sizeof(struct packet_type)); + prot_hook.type = cpu_to_be16(NE6X_LOOP_TEST_TYPE); + prot_hook.dev = adpt->netdev; + prot_hook.func = ne6x_dev_proto_recv; + dev_add_pack(&prot_hook); + ether_hdr = (struct ethhdr *)adpt->send_buffer; + memcpy(ether_hdr->h_source, &adpt->port_info->mac.perm_addr[0], ETH_ALEN); + memcpy(ether_hdr->h_dest, loop_dst_mac, ETH_ALEN); + ether_hdr->h_proto = cpu_to_be16(NE6X_LOOP_TEST_TYPE); + adpt->send_buffer[14] = 0x45; + ne6x_dev_proto_send(adpt->netdev, adpt->send_buffer, 1024); + + if (wait_event_interruptible_timeout(adpt->recv_notify, !!adpt->recv_done, + msecs_to_jiffies(2000)) <= 0) { + netdev_info(adpt->netdev, "loopback test fail !\n"); + ret = -1; + } + + adpt->recv_done = false; + kfree(adpt->send_buffer); + adpt->send_buffer = NULL; + /* restore prosimc */ + ne6x_dev_set_features(adpt, old_value); + dev_remove_pack(&prot_hook); + if (ne6x_dev_set_mac_inloop(adpt, false)) { + netdev_err(adpt->netdev, "loopback test cancel_mac_inloop fail\n"); + return -1; + } + + return ret; +} + +int ne6x_dev_set_port_mac(struct ne6x_adapter *adpt, u8 *data) +{ + u8 mac_info[8]; + + memcpy(mac_info, data, 6); + + return ne6x_reg_talk_port(adpt->back, NE6X_MSG_PORT_INFO, NE6X_TALK_SET, ADPT_LPORT(adpt), + (void *)data, sizeof(mac_info)); +} + +static u32 crc_table[CRC32_TABLE_SIZE]; /* 1KB */ +static void ne6x_dev_crc32_for_fw_init(void) +{ + u32 remainder; + u32 dividend; + s32 bit; + + for (dividend = 0U; dividend < CRC32_TABLE_SIZE; ++dividend) { + remainder = dividend; + for (bit = 8; bit > 0; --bit) { + if ((remainder & 1U) != 0) + remainder = (remainder >> 1) ^ CRC32_REVERSED_POLYNOMIAL; + else + remainder >>= 1; + } + + crc_table[dividend] = remainder; + } +} + +static u32 ne6x_dev_crc32_for_fw(const void *message, u32 bytes) +{ + const u8 *buffer = (const u8 *)message; + u32 remainder = CRC32_INITIAL_REMAINDER; + u8 idx; + + ne6x_dev_crc32_for_fw_init(); + + while (bytes-- > 0) { + idx = (u8)(*buffer++ ^ remainder); + remainder = crc_table[idx] ^ (remainder >> 8); + } + + return remainder ^ CRC32_FINALIZE_REMAINDER; +} + +static int ne6x_dev_get_fw_region(const u8 *data, u32 size, int *region) +{ + if (size < NE6X_FW_SIG_LENGTH) + return NE6X_FW_NOT_SUPPORT; + + if (!memcmp(data, NE6X_FW_810_APP_SIG, NE6X_FW_SIG_LENGTH)) { + *region = NE6X_ETHTOOL_FLASH_810_APP; + return 0; + } else if (!memcmp(data, NE6X_FW_NP_APP_SIG, NE6X_FW_SIG_LENGTH)) { + *region = NE6X_ETHTOOL_FLASH_NP; + return 0; + } else if (!memcmp(data, NE6X_FW_PXE_SIG, NE6X_FW_SIG_LENGTH)) { + *region = NE6X_ETHTOOL_FLASH_PXE; + return 0; + } else if (!memcmp(data, NE6X_FW_810_LDR_SIG, NE6X_FW_SIG_LENGTH)) { + *region = NE6X_ETHTOOL_FLASH_810_LOADER; + return 0; + } else if (!memcmp(data, NE6X_FW_FRU_SIG, NE6X_FW_SIG_LENGTH)) { + *region = NE6X_ETHTOOL_FRU; + return 0; + } else if (!memcmp(data, NE6X_FW_807_APP_SIG, NE6X_FW_SIG_LENGTH)) { + *region = NE6X_ETHTOOL_FLASH_807_APP; + return 0; + } else { + return NE6X_FW_NOT_SUPPORT; + } +} + +static int ne6x_dev_check_fw(const u8 *data, const u32 size, const int region) +{ + struct ne6x_fw_common_header *comm_hdr; + struct ne6x_fw_np_header *np_hdr; + u32 hcrc, pcrc, crc; + + switch (region) { + case NE6X_ETHTOOL_FLASH_810_APP: + case NE6X_ETHTOOL_FLASH_PXE: + case NE6X_ETHTOOL_FLASH_810_LOADER: + case NE6X_ETHTOOL_FLASH_807_APP: + comm_hdr = (struct ne6x_fw_common_header *)&data[NE6X_FW_SIG_OFFSET]; + hcrc = comm_hdr->header_crc; + pcrc = comm_hdr->package_crc; + comm_hdr->header_crc = CRC32_INITIAL_REMAINDER; + crc = ne6x_dev_crc32_for_fw(data, sizeof(*comm_hdr)); + if (crc != hcrc) + return NE6X_FW_HEADER_CRC_ERR; + + if (comm_hdr->length != size) + return NE6X_FW_LENGTH_ERR; + + comm_hdr->package_crc = CRC32_INITIAL_REMAINDER; + comm_hdr->header_crc = CRC32_INITIAL_REMAINDER; + crc = ne6x_dev_crc32_for_fw(data, comm_hdr->length); + comm_hdr->package_crc = pcrc; + comm_hdr->header_crc = hcrc; + if (crc != pcrc) + return NE6X_FW_PKG_CRC_ERR; + + break; + case NE6X_ETHTOOL_FLASH_NP: + np_hdr = (struct ne6x_fw_np_header *)&data[NE6X_FW_SIG_OFFSET]; + hcrc = np_hdr->hdr_crc; + pcrc = np_hdr->pkg_crc; + np_hdr->hdr_crc = CRC32_INITIAL_REMAINDER; + crc = ne6x_dev_crc32_for_fw(data, sizeof(*np_hdr)); + if (crc != hcrc) + return NE6X_FW_HEADER_CRC_ERR; + + if (np_hdr->img_length != size) + return NE6X_FW_LENGTH_ERR; + + np_hdr->pkg_crc = CRC32_INITIAL_REMAINDER; + np_hdr->hdr_crc = CRC32_INITIAL_REMAINDER; + crc = ne6x_dev_crc32_for_fw(data, np_hdr->img_length); + np_hdr->pkg_crc = pcrc; + np_hdr->hdr_crc = hcrc; + if (crc != pcrc) + return NE6X_FW_PKG_CRC_ERR; + + break; + } + + return 0; +} + +int ne6x_dev_validate_fw(const u8 *data, const u32 size, int *region) +{ + if (ne6x_dev_get_fw_region(data, size, region)) + return NE6X_FW_NOT_SUPPORT; + + return ne6x_dev_check_fw(data, size, *region); +} + +int ne6x_dev_set_tx_rx_state(struct ne6x_adapter *adpt, int tx_state, int rx_state) +{ + u32 value = ne6x_dev_get_features(adpt); + + if (tx_state) + value &= ~NE6X_F_TX_DISABLE; + else + value |= NE6X_F_TX_DISABLE; + + if (rx_state) + value &= ~NE6X_F_RX_DISABLE; + else + value |= NE6X_F_RX_DISABLE; + + ne6x_dev_set_features(adpt, value); + + return 0; +} + +int ne6x_dev_set_fast_mode(struct ne6x_pf *pf, bool is_fast_mode, u8 number_queue) +{ + u32 mode; + + if (is_fast_mode) { + mode = pf->num_alloc_vfs; + mode |= 1 << 16; + pf->is_fastmode = true; + } else { + mode = 0; + pf->is_fastmode = false; + } + + return ne6x_reg_set_user_data(pf, NP_USER_DATA_FAST_MODE, mode); +} + +int ne6x_dev_get_dump_data_len(struct ne6x_pf *pf, u32 *size) +{ + return ne6x_reg_get_dump_data_len(pf, size); +} + +int ne6x_dev_get_dump_data(struct ne6x_pf *pf, u32 *data, u32 size) +{ + return ne6x_reg_get_dump_data(pf, data, size); +} + +int ne6x_dev_set_norflash_write_protect(struct ne6x_pf *pf, u32 write_protect) +{ + return ne6x_reg_set_norflash_write_protect(pf, write_protect); +} + +int ne6x_dev_get_norflash_write_protect(struct ne6x_pf *pf, u32 *p_write_protect) +{ + return ne6x_reg_get_norflash_write_protect(pf, p_write_protect); +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_dev.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_dev.h new file mode 100644 index 000000000000..02d896596236 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_dev.h @@ -0,0 +1,319 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_DEV_H +#define _NE6X_DEV_H + +#include "ne6x_portmap.h" + +#define NE6000AI_2S_X16H_100G_N5 0xA050 +#define NE6000AI_2S_X16H_25G_N5 0xA030 +#define NE6000AI_2S_X16H_25G_N6 0xA031 + +#define NE6000_IF_INTERFACE_UP 1 +#define NE6000_IF_INTERFACE_DOWN 0 + +struct ne6x_flowctrl { + u32 autoneg; + u32 rx_pause; + u32 tx_pause; +}; + +struct ne6x_sfp_mod_type_len { + u32 type; + u32 len; +}; + +enum { + NE6X_SOC_TEMPERATURE = 0x0, + NE6X_SOC_POWER_CONSUM, + NE6X_SOC_DDR_TEST, + NE6X_SOC_FRU, + NE6X_SOC_SERDES_SEND_BIT, + NE6X_SOC_I2C3_TEST, +}; + +struct ne6x_soc_temperature { + u32 chip_temerature; + u32 board_temperature; +}; + +struct ne6x_soc_power { + u32 cur; + u32 vol; + u32 power; +}; + +#define NE6X_FW_SIG_OFFSET 0x0 + +#define NE6X_FW_SIG_LENGTH 8 + +#define NE6X_FW_810_LDR_SIG "NE6K810L" +#define NE6X_FW_810_APP_SIG "NE6K810A" +#define NE6X_FW_807_APP_SIG "NE6K807A" +#define NE6X_FW_803_APP_SIG "NE6K803A" +#define NE6X_FW_803_LDR_SIG "NE6K803L" +#define NE6X_FW_NP_APP_SIG "NE6KNPV1" +#define NE6X_FW_TBL_SIG "NE6KTBL*" +#define NE6X_FW_PXE_SIG "NE6KPXE*" +#define NE6X_FW_FRU_SIG "NE6KFRU*" + +struct ne6x_fw_common_header { + u8 signature[NE6X_FW_SIG_LENGTH]; + u32 version; + u32 length; + u32 sections; + u32 sect_start_addr; + u32 type; + u32 build_date; + u8 reserved[16]; + u8 fw_ver[8]; + u32 package_crc; + u32 header_crc; +}; /* 64B */ + +struct ne6x_fw_np_iwidth { + char sig[4]; + u16 width; + u16 ocp; +}; /* 8B */ + +struct ne6x_fw_np_isad { + char sig[4]; + u32 isa_id; + + struct ne6x_fw_np_iwidth fp; + struct ne6x_fw_np_iwidth dp; + struct ne6x_fw_np_iwidth rp; +}; /* 32B */ + +struct ne6x_fw_np_atd { + char sig[4]; + u32 at_id; + + struct ne6x_fw_np_iwidth te; +}; /* 16B */ + +struct ne6x_fw_np_header { + char signature[NE6X_FW_SIG_LENGTH]; + u32 hdr_version; + u32 hdr_length; + + u32 rsvd; + u32 build_date; + u32 img_version; + u32 img_length; + + u32 npc_cnt; + u32 npc_offset; + u32 isa_cnt; + u32 isa_offset; + + u32 at_cnt; + u32 at_offset; + u32 atd_cnt; + u32 atd_offset; + + struct ne6x_fw_np_isad ISA[1]; + + struct ne6x_fw_np_atd ATD[1]; + + u32 cipher; /* For future use */ + u32 comp; /* For future use */ + u32 pkg_crc; + u32 hdr_crc; +}; /* 128 B */ + +#define CRC32_REVERSED_POLYNOMIAL 0xEDB88320U +#define CRC32_INITIAL_REMAINDER 0xFFFFFFFFU +#define CRC32_FINALIZE_REMAINDER 0xFFFFFFFFU +#define CRC32_TABLE_SIZE 256U + +enum { + NE6X_FW_NOT_SUPPORT = -1, + NE6X_FW_HEADER_CRC_ERR = -2, + NE6X_FW_LENGTH_ERR = -3, + NE6X_FW_PKG_CRC_ERR = -4, +}; + +struct ne6x_key_filter { + struct list_head list; + struct ne6x_key key; + struct { + u8 is_new_key : 1; /* filter is new, wait for PF answer */ + u8 remove : 1; /* filter needs to be removed */ + u8 add : 1; /* filter needs to be added */ + u8 padding : 5; + u8 refcnt; + }; +}; + +struct ne6x_vlan_filter { + struct list_head list; + struct ne6x_vlan vlan; + struct { + u8 is_new_vlan : 1; /* filter is new, wait for PF answer */ + u8 remove : 1; /* filter needs to be removed */ + u8 add : 1; /* filter needs to be added */ + u8 padding : 5; + u8 refcnt; + }; +}; + +enum { + NE6X_METER_SUBSET0 = 0x0, + NE6X_METER_SUBSET1, + NE6X_METER_SUBSET2, + NE6X_METER_SUBSET3, + NE6X_METER_SUBSET4, + NE6X_METER_SUBSET5, + NE6X_METER_SUBSET6, + NE6X_METER_SUBSET7, + NE6X_METER_SUBSET8, + NE6X_METER_SUBSET9, + NE6X_METER_SUBSET10, + NE6X_METER_SUBSET11, + NE6X_METER_SUBSET12, + NE6X_METER_SUBSET13, + NE6X_METER_SUBSET14, + NE6X_METER_SUBSET15, +}; + +#define NE6X_METER0_TABLE 0x00000000U +#define NE6X_METER1_TABLE 0x80000000U +#define NE6X_METER_SUBSET(n) (((n) & 0xf) << 27) + +struct vf_stat { + u64 rx_drop_pkts; + u64 rx_broadcast_pkts; + u64 rx_multicast_pkts; + u64 rx_unicast_pkts; + u64 tx_broadcast_pkts; + u64 tx_multicast_pkts; + u64 tx_unicast_pkts; + u64 rx_malform_pkts; + u64 tx_malform_pkts; +}; + +enum ne6x_fec_state { + NE6X_FEC_NONE, + NE6X_FEC_RS, + NE6X_FEC_BASER, + NE6X_FEC_AUTO, +}; + +int ne6x_dev_init(struct ne6x_pf *pf); +int ne6x_dev_get_port_num(struct ne6x_pf *pf); +int ne6x_dev_get_mac_addr(struct ne6x_adapter *adpt, u8 *mac); +int ne6x_dev_get_mac_stats(struct ne6x_adapter *adpt); +int ne6x_dev_get_link_status(struct ne6x_adapter *adpt, struct ne6x_link_info *status); +int ne6x_dev_set_speed(struct ne6x_adapter *adpt, u32 speed); +int ne6x_dev_set_sfp_speed(struct ne6x_adapter *adpt, u32 speed); +int ne6x_dev_get_sfp_speed(struct ne6x_adapter *adpt, u32 *speed); + +int ne6x_dev_reset_firmware(struct ne6x_adapter *adpt); + +int ne6x_dev_self_test_link(struct ne6x_adapter *adpt, int *verify); + +u32 ne6x_dev_get_features(struct ne6x_adapter *adpt); +int ne6x_dev_set_features(struct ne6x_adapter *adpt, u32 value); + +int ne6x_dev_set_mtu(struct ne6x_adapter *adpt, u32 mtu); +int ne6x_dev_get_mtu(struct ne6x_adapter *adpt, u32 *mtu); + +void ne6x_dev_clear_vport(struct ne6x_pf *pf); +void ne6x_dev_set_port2pi(struct ne6x_adapter *adpt); +void ne6x_dev_set_pi2port(struct ne6x_adapter *adpt); +int ne6x_dev_set_vport(struct ne6x_adapter *adpt); + +int ne6x_dev_set_vlan_port(struct ne6x_adapter *adpt, u16 vlan_id, pbmp_t pbmp); +int ne6x_dev_get_vlan_port(struct ne6x_adapter *adpt, u16 vlan_id, pbmp_t pbmp); +int ne6x_dev_vlan_add(struct ne6x_adapter *adpt, struct ne6x_vlan *vlan); +int ne6x_dev_vlan_del(struct ne6x_adapter *adpt, struct ne6x_vlan *vlan); +int ne6x_dev_add_vf_qinq(struct ne6x_vf *vf, __be16 proto, u16 vid); +int ne6x_dev_del_vf_qinq(struct ne6x_vf *vf, __be16 proto, u16 vid); +int ne6x_dev_clear_vlan_map(struct ne6x_pf *pf); + +int ne6x_dev_set_rss(struct ne6x_adapter *adpt, struct ne6x_rss_info *info); + +int ne6x_dev_get_flowctrl(struct ne6x_adapter *adpt, struct ne6x_flowctrl *fctrl); +int ne6x_dev_set_flowctrl(struct ne6x_adapter *adpt, struct ne6x_flowctrl *fctrl); +int ne6x_dev_get_port_fec(struct ne6x_adapter *adpt, int *status); + +int ne6x_dev_write_eeprom(struct ne6x_adapter *adpt, int offset, u8 *pbuf, int size); +int ne6x_dev_read_eeprom(struct ne6x_adapter *adpt, int offset, u8 *pbuf, int size); + +int ne6x_dev_clear_stats(struct ne6x_adapter *adpt); + +int ne6x_dev_get_port_fec(struct ne6x_adapter *adpt, int *status); + +int ne6x_dev_set_uc_promiscuous_enable(struct ne6x_adapter *adpt, int enable); +int ne6x_dev_set_mc_promiscuous_enable(struct ne6x_adapter *adpt, int enable); + +int ne6x_dev_set_fec(struct ne6x_adapter *adpt, enum ne6x_fec_state fec); +int ne6x_dev_get_fec(struct ne6x_adapter *adpt, enum ne6x_fec_state *fec); + +int ne6x_dev_add_unicast(struct ne6x_adapter *adpt, u8 *mac); +int ne6x_dev_del_unicast(struct ne6x_adapter *adpt, u8 *mac); + +int ne6x_dev_add_multicast(struct ne6x_adapter *adpt, u8 *mac); +int ne6x_dev_del_multicast(struct ne6x_adapter *adpt, u8 *mac); +int ne6x_dev_enable_rxhash(struct ne6x_adapter *adpt, int enable); +int ne6x_dev_read_qsfp(struct ne6x_adapter *adpt, u8 regaddr, u8 *data, int len); + +int ne6x_dev_upgrade_firmware(struct ne6x_adapter *adpt, u8 region, u8 *data, int size, int flags); + +int ne6x_dev_get_sfp_type_len(struct ne6x_adapter *adpt, struct ne6x_sfp_mod_type_len *sfp_mode); + +int ne6x_dev_get_sfp_eeprom(struct ne6x_adapter *adpt, u8 *data, int offset, int size, int flags); + +int ne6x_dev_set_nic_stop(struct ne6x_pf *pf, u32 flag); +int ne6x_dev_set_nic_start(struct ne6x_pf *pf, u32 flag); +int ne6x_dev_get_temperature_info(struct ne6x_pf *pf, struct ne6x_soc_temperature *temp); +int ne6x_dev_get_power_consum(struct ne6x_pf *pf, struct ne6x_soc_power *power); +int ne6x_dev_get_fru(struct ne6x_pf *pf, u32 *buffer, u32 size); +int ne6x_dev_start_ddr_test(struct ne6x_pf *pf); +int ne6x_dev_i2c3_signal_test(struct ne6x_pf *pf, u32 *id); + +int ne6x_dev_set_if_state(struct ne6x_adapter *adpt, u32 state); +int ne6x_dev_get_if_state(struct ne6x_adapter *adpt, u32 *state); + +int ne6x_dev_get_sfp_status(struct ne6x_adapter *adpt, u8 *status); + +int ne6x_dev_set_led(struct ne6x_adapter *adpt, bool state); +int ne6x_dev_get_vf_stat(struct ne6x_adapter *adpt, struct vf_stat *stat); +int ne6x_dev_reset_vf_stat(struct ne6x_adapter *adpt); +int ne6x_dev_check_speed(struct ne6x_adapter *adpt, u32 speed); + +int ne6x_reg_table_update(struct ne6x_pf *pf, enum ne6x_reg_table table, u32 index, + u32 *data, int size); + +int ne6x_dev_set_fw_lldp(struct ne6x_adapter *adpt, bool state); + +int ne6x_dev_set_vf_bw(struct ne6x_adapter *adpt, int tx_rate); + +int ne6x_dev_test_loopback(struct ne6x_adapter *adpt); +int ne6x_dev_test_reg(struct ne6x_adapter *adpt); +int ne6x_dev_test_intr(struct ne6x_adapter *adpt); +int ne6x_dev_set_port_mac(struct ne6x_adapter *adpt, u8 *data); +int ne6x_dev_add_broadcast_leaf(struct ne6x_adapter *adpt); +int ne6x_dev_del_broadcast_leaf(struct ne6x_adapter *adpt); +int ne6x_dev_validate_fw(const u8 *data, const u32 size, int *region); + +int ne6x_dev_set_tx_rx_state(struct ne6x_adapter *adpt, int tx_state, int rx_state); +int ne6x_dev_set_fast_mode(struct ne6x_pf *pf, bool is_fast_mode, u8 num_queue); +int ne6x_dev_add_unicast_for_fastmode(struct ne6x_adapter *adpt, u8 *mac); + +int ne6x_dev_get_dump_data_len(struct ne6x_pf *pf, u32 *size); +int ne6x_dev_get_dump_data(struct ne6x_pf *pf, u32 *data, u32 size); +int ne6x_dev_set_white_list(struct ne6x_pf *pf, bool enable); +void ne6x_dev_set_ddos(struct ne6x_pf *pf, bool enable); +int ne6x_dev_get_pport(struct ne6x_adapter *adpt); +int ne6x_dev_set_norflash_write_protect(struct ne6x_pf *pf, u32 write_protect); +int ne6x_dev_get_norflash_write_protect(struct ne6x_pf *pf, u32 *p_write_protect); + +u32 ne6x_dev_crc32(const u8 *buf, u32 size); +void ne6x_dev_set_trust_vlan(struct ne6x_pf *pf, bool enable); +bool ne6x_dev_get_trust_vlan(struct ne6x_pf *pf); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_ethtool.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_ethtool.c new file mode 100644 index 000000000000..063b734f238f --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_ethtool.c @@ -0,0 +1,1623 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "ne6x.h" +#include "ne6x_portmap.h" +#include "ne6x_reg.h" +#include "ne6x_dev.h" +#include +#include "version.h" + +static const char ne6x_gstrings_test[][ETH_GSTRING_LEN] = { + "Link test ", "Loopback test ", "Register test ", "Interrupt test" +}; + +#define NE6X_TEST_LEN (sizeof(ne6x_gstrings_test) / ETH_GSTRING_LEN) + +static int ne6x_q_stats_len(struct net_device *netdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + int stats_size, total_slen = 0; + + /* Tx stats */ + stats_size = sizeof(struct ne6x_q_stats) + sizeof(struct ne6x_txq_stats); + total_slen += adpt->num_queue * (stats_size / sizeof(u64)); + + /* Rx stats */ + stats_size = sizeof(struct ne6x_q_stats) + sizeof(struct ne6x_rxq_stats); + total_slen += adpt->num_queue * (stats_size / sizeof(u64)); + + /* CQ stats */ + stats_size = sizeof(struct ne6x_cq_stats); + total_slen += adpt->num_queue * (stats_size / sizeof(u64)); + + return total_slen; +} + +struct ne6x_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +/* Helper macro for defining some statistics directly copied from the netdev + * stats structure. + */ +#define NE6X_NETDEV_STAT(_net_stat) NE6X_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat) + +static const struct ne6x_stats ne6x_gstrings_adpt_stats[] = { + NE6X_NETDEV_STAT(rx_packets), + NE6X_NETDEV_STAT(tx_packets), + NE6X_NETDEV_STAT(rx_bytes), + NE6X_NETDEV_STAT(tx_bytes), + NE6X_NETDEV_STAT(rx_errors), + NE6X_NETDEV_STAT(tx_errors), + NE6X_NETDEV_STAT(rx_dropped), + NE6X_NETDEV_STAT(tx_dropped), + NE6X_NETDEV_STAT(collisions), + NE6X_NETDEV_STAT(rx_length_errors), + NE6X_NETDEV_STAT(rx_crc_errors), +}; + +#define NE6X_DEVICE_ETH_STAT(_dev_eth_stat) NE6X_STAT(struct ne6x_eth_stats, \ + #_dev_eth_stat, _dev_eth_stat) + +static const struct ne6x_stats ne6x_gstrings_adpt_dev_eth_stats[] = { + NE6X_DEVICE_ETH_STAT(rx_unicast), + NE6X_DEVICE_ETH_STAT(rx_multicast), + NE6X_DEVICE_ETH_STAT(rx_broadcast), + NE6X_DEVICE_ETH_STAT(rx_discards), + NE6X_DEVICE_ETH_STAT(rx_miss), + NE6X_DEVICE_ETH_STAT(tx_unicast), + NE6X_DEVICE_ETH_STAT(tx_multicast), + NE6X_DEVICE_ETH_STAT(tx_broadcast), + NE6X_DEVICE_ETH_STAT(rx_malform), + NE6X_DEVICE_ETH_STAT(tx_malform), +}; + +#define NE6X_PF_STAT(_name, _stat) NE6X_STAT(struct ne6x_pf, _name, _stat) + +static const struct ne6x_stats ne6x_gstrings_pf_stats[] = { + NE6X_PF_STAT("tx_timeout", tx_timeout_count), +}; + +/* per-queue ring statistics */ +#define NE6X_QUEUE_STAT(_name, _stat) NE6X_STAT(struct ne6x_ring, _name, _stat) + +static const struct ne6x_stats ne6x_gstrings_tx_queue_stats[] = { + NE6X_QUEUE_STAT("tx_queue_%u_packets", stats.packets), + NE6X_QUEUE_STAT("tx_queue_%u_bytes", stats.bytes), + NE6X_QUEUE_STAT("tx_queue_%u_rst", tx_stats.restart_q), + NE6X_QUEUE_STAT("tx_queue_%u_busy", tx_stats.tx_busy), + NE6X_QUEUE_STAT("tx_queue_%u_line", tx_stats.tx_linearize), + NE6X_QUEUE_STAT("tx_queue_%u_csum_err", tx_stats.csum_err), + NE6X_QUEUE_STAT("tx_queue_%u_csum", tx_stats.csum_good), + NE6X_QUEUE_STAT("tx_queue_%u_pcie_read_err", tx_stats.tx_pcie_read_err), + NE6X_QUEUE_STAT("tx_queue_%u_ecc_err", tx_stats.tx_ecc_err), + NE6X_QUEUE_STAT("tx_queue_%u_drop_addr", tx_stats.tx_drop_addr), +}; + +static const struct ne6x_stats ne6x_gstrings_rx_queue_stats[] = { + NE6X_QUEUE_STAT("rx_queue_%u_packets", stats.packets), + NE6X_QUEUE_STAT("rx_queue_%u_bytes", stats.bytes), + NE6X_QUEUE_STAT("rx_queue_%u_no_eop", rx_stats.non_eop_descs), + NE6X_QUEUE_STAT("rx_queue_%u_alloc_pg_err", rx_stats.alloc_page_failed), + NE6X_QUEUE_STAT("rx_queue_%u_alloc_buf_err", rx_stats.alloc_buf_failed), + NE6X_QUEUE_STAT("rx_queue_%u_pg_reuse", rx_stats.page_reuse_count), + NE6X_QUEUE_STAT("rx_queue_%u_csum_err", rx_stats.csum_err), + NE6X_QUEUE_STAT("rx_queue_%u_csum", rx_stats.csum_good), + NE6X_QUEUE_STAT("rx_queue_%u_mem_err", rx_stats.rx_mem_error), + NE6X_QUEUE_STAT("rx_queue_%u_rx_err", rx_stats.rx_err), +}; + +static const struct ne6x_stats ne6x_gstrings_cq_queue_stats[] = { + NE6X_QUEUE_STAT("cx_queue_%u_nums", cq_stats.cq_num), + NE6X_QUEUE_STAT("cx_queue_%u_tx_nums", cq_stats.tx_num), + NE6X_QUEUE_STAT("cx_queue_%u_rx_nums", cq_stats.rx_num), +}; + +/* port mac statistics */ +#define NE6X_PORT_MAC_STAT(_name, _stat) NE6X_STAT(struct ne6x_adapter, _name, _stat) + +static const struct ne6x_stats ne6x_gstrings_port_mac_stats[] = { + NE6X_PORT_MAC_STAT("port.rx_eth_byte", stats.mac_rx_eth_byte), + NE6X_PORT_MAC_STAT("port.rx_eth", stats.mac_rx_eth), + NE6X_PORT_MAC_STAT("port.rx_eth_undersize", stats.mac_rx_eth_undersize), + NE6X_PORT_MAC_STAT("port.rx_eth_crc_err", stats.mac_rx_eth_crc), + NE6X_PORT_MAC_STAT("port.rx_eth_64b", stats.mac_rx_eth_64b), + NE6X_PORT_MAC_STAT("port.rx_eth_65_127b", stats.mac_rx_eth_65_127b), + NE6X_PORT_MAC_STAT("port.rx_eth_128_255b", stats.mac_rx_eth_128_255b), + NE6X_PORT_MAC_STAT("port.rx_eth_256_511b", stats.mac_rx_eth_256_511b), + NE6X_PORT_MAC_STAT("port.rx_eth_512_1023b", stats.mac_rx_eth_512_1023b), + NE6X_PORT_MAC_STAT("port.rx_eth_1024_15360b", stats.mac_rx_eth_1024_15360b), + NE6X_PORT_MAC_STAT("port.tx_eth_byte", stats.mac_tx_eth_byte), + NE6X_PORT_MAC_STAT("port.tx_eth", stats.mac_tx_eth), + NE6X_PORT_MAC_STAT("port.tx_eth_undersize", stats.mac_tx_eth_undersize), + NE6X_PORT_MAC_STAT("port.tx_eth_64b", stats.mac_tx_eth_64b), + NE6X_PORT_MAC_STAT("port.tx_eth_65_127b", stats.mac_tx_eth_65_127b), + NE6X_PORT_MAC_STAT("port.tx_eth_128_255b", stats.mac_tx_eth_128_255b), + NE6X_PORT_MAC_STAT("port.tx_eth_256_511b", stats.mac_tx_eth_256_511b), + NE6X_PORT_MAC_STAT("port.tx_eth_512_1023b", stats.mac_tx_eth_512_1023b), + NE6X_PORT_MAC_STAT("port.tx_eth_1024_15360b", stats.mac_tx_eth_1024_15360b), +}; + +#define NE6X_ADPT_STATS_LEN ARRAY_SIZE(ne6x_gstrings_adpt_stats) +#define NE6X_ADPT_DEV_ETH_STATS_LEN ARRAY_SIZE(ne6x_gstrings_adpt_dev_eth_stats) + +#define NE6X_PF_STATS_LEN ARRAY_SIZE(ne6x_gstrings_pf_stats) +#define NE6X_PORT_MAC_STATS_LEN ARRAY_SIZE(ne6x_gstrings_port_mac_stats) + +#define NE6X_ALL_STATS_LEN(n) \ + (NE6X_ADPT_STATS_LEN + NE6X_ADPT_DEV_ETH_STATS_LEN + \ + NE6X_PF_STATS_LEN + NE6X_PORT_MAC_STATS_LEN + ne6x_q_stats_len(n)) + +struct ne6x_priv_flag { + char name[ETH_GSTRING_LEN]; + u32 bitno; /* bit position in pf->flags */ +}; + +#define NE6X_PRIV_FLAG(_name, _bitno) { \ + .name = _name, \ + .bitno = _bitno, \ +} + +static const struct ne6x_priv_flag ne6x_gstrings_priv_flags[] = { + NE6X_PRIV_FLAG("disable-fw-lldp", NE6X_ADPT_F_DISABLE_FW_LLDP), + NE6X_PRIV_FLAG("link-down-on-close", NE6X_ADPT_F_LINKDOWN_ON_CLOSE), + NE6X_PRIV_FLAG("write-protect", NE6X_ADPT_F_NORFLASH_WRITE_PROTECT), + NE6X_PRIV_FLAG("ddos-switch", NE6X_ADPT_F_DDOS_SWITCH), + NE6X_PRIV_FLAG("white-list", NE6X_ADPT_F_ACL), + NE6X_PRIV_FLAG("trust-vlan", NE6X_ADPT_F_TRUST_VLAN), +}; + +#define NE6X_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ne6x_gstrings_priv_flags) + +static void ne6x_get_settings_link_up_fec(struct net_device *netdev, + u32 link_speed, + struct ethtool_link_ksettings *ks) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + enum ne6x_fec_state fec = NE6X_FEC_NONE; + + switch (link_speed) { + case NE6X_LINK_SPEED_25GB: + case NE6X_LINK_SPEED_100GB: + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER); + + ne6x_dev_get_fec(adpt, &fec); + if (fec == NE6X_FEC_RS) + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS); + else if (fec == NE6X_FEC_BASER) + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_BASER); + else + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); + + break; + default: + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE); + ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE); + break; + } +} + +static void ne6x_get_settings_link_up(struct ethtool_link_ksettings *ks, struct net_device *netdev) +{ + struct ne6x_link_status *link_info; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + ethtool_link_ksettings_zero_link_mode(ks, supported); + ethtool_link_ksettings_zero_link_mode(ks, advertising); + + link_info = &adpt->port_info->phy.link_info; + switch (link_info->link_speed) { + case NE6X_LINK_SPEED_100GB: + ks->base.speed = SPEED_100000; + ethtool_link_ksettings_add_link_mode(ks, advertising, 100000baseCR4_Full); + break; + case NE6X_LINK_SPEED_40GB: + ks->base.speed = SPEED_40000; + ethtool_link_ksettings_add_link_mode(ks, advertising, 40000baseCR4_Full); + break; + case NE6X_LINK_SPEED_25GB: + ks->base.speed = SPEED_25000; + ethtool_link_ksettings_add_link_mode(ks, advertising, 25000baseCR_Full); + break; + case NE6X_LINK_SPEED_10GB: + ks->base.speed = SPEED_10000; + ethtool_link_ksettings_add_link_mode(ks, advertising, 10000baseT_Full); + break; + case NE6X_LINK_SPEED_200GB: + ks->base.speed = SPEED_200000; + break; + default: + netdev_info(netdev, "WARNING: Unrecognized link_speed (0x%x).\n", + link_info->link_speed); + break; + } + + ks->base.duplex = DUPLEX_FULL; + + if (link_info->an_info & NE6X_AQ_AN_COMPLETED) + ethtool_link_ksettings_add_link_mode(ks, lp_advertising, Autoneg); + + ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); + + ne6x_get_settings_link_up_fec(netdev, link_info->link_speed, ks); +} + +static void ne6x_phy_type_to_ethtool(struct ne6x_adapter *adpt, + struct ethtool_link_ksettings *ks) +{ + ethtool_link_ksettings_zero_link_mode(ks, supported); + ethtool_link_ksettings_zero_link_mode(ks, advertising); +} + +static void ne6x_get_settings_link_down(struct ethtool_link_ksettings *ks, + struct net_device *netdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + ne6x_phy_type_to_ethtool(adpt, ks); + /* With no link, speed and duplex are unknown */ + ks->base.speed = SPEED_UNKNOWN; + ks->base.duplex = DUPLEX_UNKNOWN; +} + +static int ne6x_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *ks) +{ + struct ne6x_link_status *hw_link_info; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + ethtool_link_ksettings_zero_link_mode(ks, supported); + ethtool_link_ksettings_zero_link_mode(ks, advertising); + ethtool_link_ksettings_zero_link_mode(ks, lp_advertising); + hw_link_info = &adpt->port_info->phy.link_info; + + /* set speed and duplex */ + if (hw_link_info->link_info & NE6X_AQ_LINK_UP) + ne6x_get_settings_link_up(ks, netdev); + else + ne6x_get_settings_link_down(ks, netdev); + + if (!ne6x_dev_check_speed(adpt, SPEED_10000)) + ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseT_Full); + + if (!ne6x_dev_check_speed(adpt, SPEED_25000)) + ethtool_link_ksettings_add_link_mode(ks, supported, 25000baseCR_Full); + + if (!ne6x_dev_check_speed(adpt, SPEED_100000)) + ethtool_link_ksettings_add_link_mode(ks, supported, 100000baseCR4_Full); + + if (!ne6x_dev_check_speed(adpt, SPEED_40000)) + ethtool_link_ksettings_add_link_mode(ks, supported, 40000baseCR4_Full); + + ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE); + ks->base.port = PORT_FIBRE; + + /* Set flow control settings */ + ethtool_link_ksettings_add_link_mode(ks, supported, Pause); + + return 0; +} + +static int ne6x_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ks) +{ + bool if_running = netif_running(netdev); + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + u32 master = (adpt->idx == 0); + char *speed = "Unknown "; + u32 link_speed; + u32 sfp_speed; + int ret; + + if (ne6x_dev_check_speed(adpt, ks->base.speed)) { + dev_info(&pf->pdev->dev, "speed not support\n"); + return -EOPNOTSUPP; + } + + if (!master && pf->dev_type == NE6000AI_2S_X16H_25G_N5) { + dev_info(&pf->pdev->dev, "only master port can change speed\n"); + return -EOPNOTSUPP; + } + + switch (ks->base.speed) { + case SPEED_100000: + link_speed = NE6X_LINK_SPEED_100GB; + break; + case SPEED_40000: + link_speed = NE6X_LINK_SPEED_40GB; + break; + case SPEED_25000: + link_speed = NE6X_LINK_SPEED_25GB; + break; + case SPEED_10000: + link_speed = NE6X_LINK_SPEED_10GB; + break; + default: + return -EOPNOTSUPP; + } + + ret = ne6x_dev_get_sfp_speed(adpt, &sfp_speed); + if (!ret) { + switch (sfp_speed) { + case NE6X_LINK_SPEED_40GB: + speed = "40 G"; + break; + case NE6X_LINK_SPEED_100GB: + speed = "100 G"; + break; + case NE6X_LINK_SPEED_10GB: + speed = "10 G"; + break; + case NE6X_LINK_SPEED_25GB: + speed = "25 G"; + break; + case NE6X_LINK_SPEED_200GB: + speed = "200 G"; + break; + default: + break; + } + + if (sfp_speed != link_speed) + netdev_info(adpt->netdev, "speed not match, sfp support%sbps Full Duplex\n", + speed); + } + + if (if_running) + ne6x_close(adpt->netdev); + + ret = ne6x_dev_set_speed(adpt, link_speed); + if (if_running) + ne6x_open(adpt->netdev); + + return ret; +} + +static void __ne6x_add_stat_strings(u8 **p, const struct ne6x_stats stats[], + const unsigned int size, + ...) +{ + unsigned int i; + + for (i = 0; i < size; i++) { + va_list args; + + va_start(args, size); + vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args); + *p += ETH_GSTRING_LEN; + va_end(args); + } +} + +#define ne6x_add_stat_strings(p, stats, ...) \ + __ne6x_add_stat_strings(p, stats, ARRAY_SIZE(stats), ##__VA_ARGS__) + +static void ne6x_get_stat_strings(struct net_device *netdev, u8 *data) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + unsigned int i; + + ne6x_add_stat_strings(&data, ne6x_gstrings_adpt_stats); + ne6x_add_stat_strings(&data, ne6x_gstrings_adpt_dev_eth_stats); + ne6x_add_stat_strings(&data, ne6x_gstrings_pf_stats); + + for (i = 0; i < adpt->num_queue; i++) { + ne6x_add_stat_strings(&data, ne6x_gstrings_tx_queue_stats, i); + ne6x_add_stat_strings(&data, ne6x_gstrings_rx_queue_stats, i); + ne6x_add_stat_strings(&data, ne6x_gstrings_cq_queue_stats, i); + } + + ne6x_add_stat_strings(&data, ne6x_gstrings_port_mac_stats); +} + +static void ne6x_get_priv_flag_strings(struct net_device *netdev, u8 *data) +{ + unsigned int i; + u8 *p = data; + + for (i = 0; i < NE6X_PRIV_FLAG_ARRAY_SIZE; i++) { + snprintf(p, ETH_GSTRING_LEN, "%s", ne6x_gstrings_priv_flags[i].name); + p += ETH_GSTRING_LEN; + } +} + +static void ne6x_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_STATS: + ne6x_get_stat_strings(netdev, data); + break; + case ETH_SS_TEST: + memcpy(data, ne6x_gstrings_test, NE6X_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_PRIV_FLAGS: + ne6x_get_priv_flag_strings(netdev, data); + break; + default: + break; + } +} + +static int ne6x_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return NE6X_ALL_STATS_LEN(netdev); + case ETH_SS_TEST: + return NE6X_TEST_LEN; + case ETH_SS_PRIV_FLAGS: + return NE6X_PRIV_FLAG_ARRAY_SIZE; + default: + return -EOPNOTSUPP; + } +} + +static void ne6x_get_mac_stats(struct ne6x_adapter *adpt) +{ + ne6x_dev_get_mac_stats(adpt); +} + +static void ne6x_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, + u64 *data) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + struct ne6x_ring *tx_ring; + struct ne6x_ring *rx_ring; + struct ne6x_ring *cq_ring; + unsigned int j; + int i = 0; + char *p; + + ne6x_update_pf_stats(adpt); + + for (j = 0; j < NE6X_ADPT_STATS_LEN; j++) { + p = (char *)ne6x_get_adpt_stats_struct(adpt) + + ne6x_gstrings_adpt_stats[j].stat_offset; + data[i++] = (ne6x_gstrings_adpt_stats[j].sizeof_stat == sizeof(u64)) ? + *(u64 *)p : *(u32 *)p; + } + + for (j = 0; j < NE6X_ADPT_DEV_ETH_STATS_LEN; j++) { + p = (char *)(&adpt->eth_stats) + + ne6x_gstrings_adpt_dev_eth_stats[j].stat_offset; + data[i++] = (ne6x_gstrings_adpt_dev_eth_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + for (j = 0; j < NE6X_PF_STATS_LEN; j++) { + p = (char *)pf + ne6x_gstrings_pf_stats[j].stat_offset; + data[i++] = (ne6x_gstrings_pf_stats[j].sizeof_stat == sizeof(u64)) ? + *(u64 *)p : *(u32 *)p; + } + + /* populate per queue stats */ + rcu_read_lock(); + for (j = 0; j < adpt->num_queue; j++) { + tx_ring = READ_ONCE(adpt->tx_rings[j]); + if (tx_ring) { + data[i++] = tx_ring->stats.packets; + data[i++] = tx_ring->stats.bytes; + data[i++] = tx_ring->tx_stats.restart_q; + data[i++] = tx_ring->tx_stats.tx_busy; + data[i++] = tx_ring->tx_stats.tx_linearize; + data[i++] = tx_ring->tx_stats.csum_err; + data[i++] = tx_ring->tx_stats.csum_good; + data[i++] = tx_ring->tx_stats.tx_pcie_read_err; + data[i++] = tx_ring->tx_stats.tx_ecc_err; + data[i++] = tx_ring->tx_stats.tx_drop_addr; + } else { + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + } + + rx_ring = READ_ONCE(adpt->rx_rings[j]); + if (rx_ring) { + data[i++] = rx_ring->stats.packets; + data[i++] = rx_ring->stats.bytes; + data[i++] = rx_ring->rx_stats.non_eop_descs; + data[i++] = rx_ring->rx_stats.alloc_page_failed; + data[i++] = rx_ring->rx_stats.alloc_buf_failed; + data[i++] = rx_ring->rx_stats.page_reuse_count; + data[i++] = rx_ring->rx_stats.csum_err; + data[i++] = rx_ring->rx_stats.csum_good; + data[i++] = rx_ring->rx_stats.rx_mem_error; + data[i++] = rx_ring->rx_stats.rx_err; + } else { + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + } + + cq_ring = READ_ONCE(adpt->cq_rings[j]); + if (cq_ring) { + data[i++] = cq_ring->cq_stats.cq_num; + data[i++] = cq_ring->cq_stats.tx_num; + data[i++] = cq_ring->cq_stats.rx_num; + } else { + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + } + } + + rcu_read_unlock(); + + ne6x_get_mac_stats(adpt); + + for (j = 0; j < NE6X_PORT_MAC_STATS_LEN; j++) { + p = (char *)adpt + ne6x_gstrings_port_mac_stats[j].stat_offset; + data[i++] = (ne6x_gstrings_port_mac_stats[j].sizeof_stat == sizeof(u64)) ? + *(u64 *)p : *(u32 *)p; + } +} + +extern char ne6x_driver_name[]; + +static void ne6x_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + u32 soc_ver = 0, np_ver = 0, erom_ver = 0; + struct ne6x_pf *pf = ne6x_netdev_to_pf(netdev); + char nvm_version_str[32]; + char driver_name[32]; + char temp_str[16] = {0}; + + snprintf(driver_name, 32, "%s", ne6x_driver_name); + strscpy(drvinfo->driver, driver_name, sizeof(drvinfo->driver)); + strscpy(drvinfo->version, VERSION, sizeof(drvinfo->version)); + memset(nvm_version_str, 0, sizeof(nvm_version_str)); + soc_ver = pf->verinfo.firmware_soc_ver; + np_ver = pf->verinfo.firmware_np_ver & 0xFFFF; + erom_ver = pf->verinfo.firmware_pxe_ver & 0xFFFF; + snprintf(nvm_version_str, 20, "%d.%d.%d.%d ", (soc_ver & 0xff000000) >> 24, + ((erom_ver & 0xFFFF) / 100), ((soc_ver & 0xFFFF) / 100), + ((np_ver & 0xFFFF) / 100)); + if (erom_ver % 100) { + snprintf(temp_str, 4, "P%d", (erom_ver % 100)); + strncat(nvm_version_str, temp_str, 4); + } + if ((soc_ver & 0xffff) % 100) { + snprintf(temp_str, 4, "A%d", ((soc_ver & 0xffff) % 100)); + strncat(nvm_version_str, temp_str, 4); + } + if (np_ver % 100) { + snprintf(temp_str, 4, "N%d", (np_ver % 100)); + strncat(nvm_version_str, temp_str, 4); + } + strlcpy(drvinfo->fw_version, nvm_version_str, sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, pci_name(pf->pdev), sizeof(drvinfo->bus_info)); +} + +static void ne6x_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) +{ + struct ne6x_pf *pf = ne6x_netdev_to_pf(netdev); + struct ne6x_hw *hw = &pf->hw; + unsigned int i, j, ri; + u32 *reg_buf = p; + u32 reg; + + regs->version = 1; + + /* loop through the diags reg table for what to print */ + ri = 0; + for (i = 0; ne6x_reg_list[i].offset != 0; i++) { + for (j = 0; j < ne6x_reg_list[i].elements; j++) { + reg = ne6x_reg_list[i].offset + (j * ne6x_reg_list[i].stride); + reg_buf[ri++] = rd64(hw, reg); + } + } +} + +static void ne6x_self_test(struct net_device *dev, struct ethtool_test *eth_test, u64 *data) +{ + memset(data, 0, sizeof(*data) * NE6X_TEST_LEN); +} + +static int ne6x_get_regs_len(struct net_device *netdev) +{ + int reg_count = 0; + int i; + + for (i = 0; ne6x_reg_list[i].offset != 0; i++) + reg_count += ne6x_reg_list[i].elements; + + return reg_count * sizeof(u32); +} + +static void ne6x_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *ker, + struct netlink_ext_ack __always_unused *extack) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + ring->rx_max_pending = NE6X_MAX_NUM_DESCRIPTORS; + ring->tx_max_pending = NE6X_MAX_NUM_DESCRIPTORS; + ring->rx_mini_max_pending = NE6X_MIN_NUM_DESCRIPTORS; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = adpt->num_rx_desc; + ring->tx_pending = adpt->num_tx_desc; + ring->rx_mini_pending = NE6X_MIN_NUM_DESCRIPTORS; + ring->rx_jumbo_pending = 0; +} + +static int ne6x_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *ker, + struct netlink_ext_ack __always_unused *extack) +{ + u32 new_rx_count, new_tx_count, new_cq_count, new_tg_count; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + int timeout = 50; + int err = 0; + int i; + + if (ring->tx_pending > NE6X_MAX_NUM_DESCRIPTORS || + ring->tx_pending < NE6X_MIN_NUM_DESCRIPTORS || + ring->rx_pending > NE6X_MAX_NUM_DESCRIPTORS || + ring->rx_pending < NE6X_MIN_NUM_DESCRIPTORS) { + netdev_info(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n", + ring->tx_pending, ring->rx_pending, NE6X_MIN_NUM_DESCRIPTORS, + NE6X_MAX_NUM_DESCRIPTORS); + return -EINVAL; + } + + new_tx_count = ALIGN(ring->tx_pending, NE6X_REQ_DESCRIPTOR_MULTIPLE); + new_rx_count = ALIGN(ring->rx_pending, NE6X_REQ_DESCRIPTOR_MULTIPLE); + new_cq_count = new_tx_count + new_rx_count; + new_tg_count = new_tx_count; + + if (new_tx_count == adpt->num_tx_desc && new_rx_count == adpt->num_rx_desc) + return 0; + + while (test_and_set_bit(NE6X_CONFIG_BUSY, pf->state)) { + timeout--; + if (!timeout) + return -EBUSY; + + usleep_range(1000, 2000); + } + + if (!netif_running(adpt->netdev)) { + adpt->num_tx_desc = new_tx_count; + adpt->num_rx_desc = new_rx_count; + adpt->num_cq_desc = new_cq_count; + adpt->num_tg_desc = new_tg_count; + netdev_info(netdev, "Link is down, queue count change happens when link is brought up\n"); + goto done; + } + + err = ne6x_close(adpt->netdev); + if (err) { + netdev_err(netdev, "fail to close adpt = %d\n", adpt->idx); + goto done; + } + + netdev_info(netdev, "Descriptors change from (Tx: %d / Rx: %d) to [%d-%d]\n", + adpt->tx_rings[0]->count, adpt->rx_rings[0]->count, new_tx_count, new_rx_count); + + /* simple case - set for the next time the netdev is started */ + for (i = 0; i < adpt->num_queue; i++) { + adpt->tx_rings[i]->count = new_tx_count; + adpt->rx_rings[i]->count = new_rx_count; + adpt->cq_rings[i]->count = new_cq_count; + adpt->tg_rings[i]->count = new_tg_count; + } + + adpt->num_tx_desc = new_tx_count; + adpt->num_rx_desc = new_rx_count; + adpt->num_cq_desc = new_cq_count; + adpt->num_tg_desc = new_tg_count; + + err = ne6x_open(adpt->netdev); + if (err) { + netdev_err(netdev, "fail to open adpt = %d\n", adpt->idx); + goto done; + } + +done: + clear_bit(NE6X_CONFIG_BUSY, pf->state); + + return err; +} + +static void ne6x_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_flowctrl flowctrl; + int ret; + + ret = ne6x_dev_get_flowctrl(adpt, &flowctrl); + if (ret) + return; + + pause->autoneg = 0; + pause->rx_pause = flowctrl.rx_pause; + pause->tx_pause = flowctrl.tx_pause; +} + +static int ne6x_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_flowctrl flowctrl; + int ret; + + if (pause->autoneg) + return -EOPNOTSUPP; + + flowctrl.autoneg = pause->autoneg; + flowctrl.rx_pause = pause->rx_pause; + flowctrl.tx_pause = pause->tx_pause; + + ret = ne6x_dev_set_flowctrl(adpt, &flowctrl); + if (ret) + return ret; + + return 0; +} + +static int ne6x_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + ec->tx_max_coalesced_frames_irq = 256; + ec->rx_max_coalesced_frames_irq = 256; + ec->use_adaptive_rx_coalesce = 0; + ec->use_adaptive_tx_coalesce = 0; + ec->rx_coalesce_usecs = 0; + ec->tx_coalesce_usecs = 0; + ec->rx_coalesce_usecs_high = 0; + ec->tx_coalesce_usecs_high = 0; + + return 0; +} + +static int ne6x_get_eeprom_len(struct net_device *netdev) { return 256; } + +static int ne6x_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + struct ne6x_hw *hw = &pf->hw; + u8 *eeprom_buff; + int err = 0; + int ret_val; + u32 magic; + + if (eeprom->len == 0) + return -EINVAL; + + magic = hw->vendor_id | (hw->device_id << 16); + if (eeprom->magic && eeprom->magic != magic) { + /* make sure it is the right magic for NVMUpdate */ + if ((eeprom->magic >> 16) != hw->device_id) + err = -EINVAL; + else if (test_bit(NE6X_RESET_INTR_RECEIVED, pf->state)) + err = -EBUSY; + + return err; + } + + /* normal ethtool get_eeprom support */ + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ret_val = ne6x_dev_read_eeprom(adpt, 0x0, (u8 *)eeprom_buff, eeprom->len); + memcpy(bytes, eeprom_buff, eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +#define L3_RSS_FLAGS (RXH_IP_DST | RXH_IP_SRC) +#define L4_RSS_FLAGS (RXH_L4_B_0_1 | RXH_L4_B_2_3) + +static u64 ne6x_get_rss_hash_opts(struct ne6x_adapter *adpt, u64 flow_type) +{ + u64 data = 0; + + switch (flow_type) { + case TCP_V4_FLOW: + if (adpt->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV4) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adpt->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV4_TCP) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case UDP_V4_FLOW: + if (adpt->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV4) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adpt->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV4_UDP) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case TCP_V6_FLOW: + if (adpt->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV6) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adpt->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV6_TCP) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case UDP_V6_FLOW: + if (adpt->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV6) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adpt->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV6_UDP) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + /* Default is src/dest for IP, no matter the L4 hashing */ + data |= RXH_IP_SRC | RXH_IP_DST; + break; + } + + return data; +} + +static int ne6x_set_rss_hash_opts(struct ne6x_adapter *adpt, struct ethtool_rxnfc *cmd) +{ + u16 rss_flags = adpt->rss_info.hash_type; + int status; + + if (cmd->data != L3_RSS_FLAGS && cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS)) + return -EINVAL; + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~NE6X_RSS_HASH_TYPE_IPV4_TCP; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= NE6X_RSS_HASH_TYPE_IPV4 | NE6X_RSS_HASH_TYPE_IPV4_TCP; + break; + case TCP_V6_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~NE6X_RSS_HASH_TYPE_IPV6_TCP; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= NE6X_RSS_HASH_TYPE_IPV6 | NE6X_RSS_HASH_TYPE_IPV6_TCP; + break; + case UDP_V4_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~NE6X_RSS_HASH_TYPE_IPV4_UDP; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= NE6X_RSS_HASH_TYPE_IPV4 | NE6X_RSS_HASH_TYPE_IPV4_UDP; + break; + case UDP_V6_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~NE6X_RSS_HASH_TYPE_IPV6_UDP; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= NE6X_RSS_HASH_TYPE_IPV6 | NE6X_RSS_HASH_TYPE_IPV6_UDP; + break; + default: + return -EINVAL; + } + + if (rss_flags == adpt->rss_info.hash_type) + return 0; + + adpt->rss_info.hash_type = rss_flags; + + status = ne6x_dev_set_rss(adpt, &adpt->rss_info); + + return (status != 0) ? (-EIO) : 0; +} + +static int ne6x_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info, u32 *rules) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + switch (info->cmd) { + case ETHTOOL_GRXFH: + info->data = ne6x_get_rss_hash_opts(adpt, info->flow_type); + break; + case ETHTOOL_GRXRINGS: + info->data = adpt->num_queue; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int ne6x_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + int status = 0; + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = adpt->num_queue; + break; + case ETHTOOL_SRXFH: + status = ne6x_set_rss_hash_opts(adpt, info); + break; + default: + return -EINVAL; + } + + return status; +} + +static u32 ne6x_get_rxfh_key_size(struct net_device *netdev) +{ + return NE6X_RSS_MAX_KEY_SIZE; +} + +static u32 ne6x_get_rss_table_size(struct net_device *netdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_rss_info *rss_info = &adpt->rss_info; + + return rss_info->ind_table_size; +} + +static int ne6x_get_rxfh(struct net_device *netdev, u32 *p, u8 *key, u8 *hfunc) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_rss_info *rss_info = &adpt->rss_info; + unsigned int n = rss_info->ind_table_size; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (p) { + while (n--) + p[n] = rss_info->ind_table[n]; + } + + if (key) + memcpy(key, rss_info->hash_key, ne6x_get_rxfh_key_size(netdev)); + + return 0; +} + +static int ne6x_set_rxfh(struct net_device *netdev, const u32 *p, const u8 *key, const u8 hfunc) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_rss_info *rss_info = &adpt->rss_info; + unsigned int i; + int status; + + /* We do not allow change in unsupported parameters */ + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + /* Fill out the redirection table */ + if (p) { + /* Allow at least 2 queues w/ SR-IOV. */ + for (i = 0; i < rss_info->ind_table_size; i++) + rss_info->ind_table[i] = p[i]; + } + + /* Fill out the rss hash key */ + if (key) + memcpy(&rss_info->hash_key[0], key, ne6x_get_rxfh_key_size(netdev)); + + status = ne6x_dev_set_rss(adpt, rss_info); + + return (status == 0) ? 0 : (-EIO); +} + +static void ne6x_get_channels(struct net_device *netdev, struct ethtool_channels *channels) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + channels->max_rx = 0; + channels->max_tx = 0; + channels->max_other = 0; + channels->max_combined = adpt->port_info->hw_max_queue; + channels->rx_count = 0; + channels->tx_count = 0; + channels->other_count = 0; + channels->combined_count = adpt->num_queue; +} + +static int ne6x_set_channels(struct net_device *netdev, struct ethtool_channels *channels) +{ + int qp_remaining, q_vectors, i; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + int timeout = 50; + int err = 0; + + if (!channels->combined_count || channels->rx_count || channels->tx_count || + channels->combined_count > pf->hw.expect_vp) + return -EINVAL; + + if (channels->combined_count == adpt->num_queue) { + /* nothing to do */ + netdev_info(netdev, "channel not change, nothing to do!\n"); + return 0; + } + + while (test_and_set_bit(NE6X_CONFIG_BUSY, pf->state)) { + timeout--; + if (!timeout) { + netdev_info(netdev, "ne6x config busy, timeout!!!\n"); + return -EBUSY; + } + usleep_range(1000, 2000); + } + + /* set for the next time the netdev is started */ + if (!netif_running(adpt->netdev)) { + adpt->port_info->queue = channels->combined_count; + adpt->num_q_vectors = adpt->port_info->queue; + adpt->num_queue = adpt->num_q_vectors; + qp_remaining = adpt->num_queue; + q_vectors = adpt->num_q_vectors; + + for (i = 0; i < adpt->num_q_vectors; i++) { + adpt->q_vectors[i]->num_ringpairs = + DIV_ROUND_UP(qp_remaining, q_vectors - i); + adpt->q_vectors[i]->reg_idx = + adpt->q_vectors[i]->v_idx + adpt->base_vector; + qp_remaining--; + } + + for (i = 0; i < adpt->rss_info.ind_table_size; i++) + adpt->rss_info.ind_table[i] = + ethtool_rxfh_indir_default(i, adpt->num_queue); + + ne6x_dev_set_rss(adpt, &adpt->rss_info); + netdev_info(netdev, "Link is down, queue count change happens when link is brought up\n"); + goto done; + } + + err = ne6x_close(adpt->netdev); + if (err) { + netdev_err(netdev, "fail to close adpt = %d\n", adpt->idx); + goto done; + } + + adpt->port_info->queue = channels->combined_count; + adpt->num_q_vectors = adpt->port_info->queue; + adpt->num_queue = adpt->num_q_vectors; + qp_remaining = adpt->num_queue; + q_vectors = adpt->num_q_vectors; + + for (i = 0; i < adpt->num_q_vectors; i++) { + adpt->q_vectors[i]->num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - i); + adpt->q_vectors[i]->reg_idx = adpt->q_vectors[i]->v_idx + adpt->base_vector; + qp_remaining--; + } + + for (i = 0; i < adpt->rss_info.ind_table_size; i++) + adpt->rss_info.ind_table[i] = ethtool_rxfh_indir_default(i, adpt->num_queue); + + ne6x_dev_set_rss(adpt, &adpt->rss_info); + err = ne6x_open(adpt->netdev); + if (err) { + netdev_err(netdev, "fail to open adpt = %d\n", adpt->idx); + goto done; + } + +done: + clear_bit(NE6X_CONFIG_BUSY, pf->state); + + return err; +} + +static int ne6x_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + switch (state) { + case ETHTOOL_ID_ACTIVE: + ne6x_dev_set_led(adpt, true); + return 1; + case ETHTOOL_ID_ON: + return 0; + case ETHTOOL_ID_OFF: + return 0; + case ETHTOOL_ID_INACTIVE: + ne6x_dev_set_led(adpt, false); + } + + return 0; +} + +static int ne6x_nway_reset(struct net_device *netdev) { return 0; } + +static u64 ne6x_link_test(struct net_device *netdev, u64 *data) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + bool link_up = false; + int verify; + + verify = 0; + link_up = adpt->port_info->phy.link_info.link_info & NE6X_AQ_LINK_UP; + usleep_range(10, 20); + + link_up &= verify; + if (link_up) + *data = 1; + else + *data = 0; + + return *data; +} + +static void ne6x_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + /* Online tests */ + if (ne6x_link_test(netdev, &data[NE6X_ETH_TEST_LINK])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + data[NE6X_ETH_TEST_LOOPBACK] = 0; + if (ne6x_dev_test_loopback(adpt)) { + data[NE6X_ETH_TEST_LOOPBACK] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + } + + data[NE6X_ETH_TEST_REG] = 0; + if (ne6x_dev_test_reg(adpt)) { + data[NE6X_ETH_TEST_REG] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + } + + data[NE6X_ETH_TEST_INT] = 0; + if (ne6x_dev_test_intr(adpt)) { + data[NE6X_ETH_TEST_INT] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + } +} + +static int ne6x_get_fec_param(struct net_device *netdev, struct ethtool_fecparam *fecparam) +{ + struct ne6x_link_status *hw_link_info; + enum ne6x_fec_state fec = NE6X_FEC_NONE; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + int err = 0; + + hw_link_info = &adpt->port_info->phy.link_info; + if (hw_link_info->link_info & NE6X_AQ_LINK_UP) { + switch (hw_link_info->link_speed) { + case NE6X_LINK_SPEED_25GB: + case NE6X_LINK_SPEED_100GB: + err = ne6x_dev_get_fec(adpt, &fec); + if (fec == NE6X_FEC_RS) { + fecparam->fec |= ETHTOOL_FEC_RS; + fecparam->active_fec = ETHTOOL_FEC_RS; + } else if (fec == NE6X_FEC_BASER) { + fecparam->fec |= ETHTOOL_FEC_BASER; + fecparam->active_fec = ETHTOOL_FEC_BASER; + } else { + fecparam->fec |= ETHTOOL_FEC_OFF; + fecparam->active_fec = ETHTOOL_FEC_OFF; + } + break; + default: + fecparam->fec |= ETHTOOL_FEC_OFF; + fecparam->active_fec = ETHTOOL_FEC_OFF; + break; + } + } else { + fecparam->fec |= ETHTOOL_FEC_OFF; + fecparam->active_fec = ETHTOOL_FEC_OFF; + } + + return err; +} + +static int ne6x_set_fec_param(struct net_device *netdev, struct ethtool_fecparam *fecparam) +{ + enum ne6x_fec_state fec = NE6X_FEC_NONE; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + int err = 0; + + switch (fecparam->fec) { + case ETHTOOL_FEC_AUTO: + dev_warn(&pf->pdev->dev, "Unsupported FEC mode: AUTO"); + err = -EINVAL; + goto done; + case ETHTOOL_FEC_RS: + fec = NE6X_FEC_RS; + break; + case ETHTOOL_FEC_BASER: + fec = NE6X_FEC_BASER; + break; + case ETHTOOL_FEC_OFF: + case ETHTOOL_FEC_NONE: + fec = NE6X_FEC_NONE; + break; + default: + dev_warn(&pf->pdev->dev, "Unsupported FEC mode: %d", fecparam->fec); + err = -EINVAL; + goto done; + } + + err = ne6x_dev_set_fec(adpt, fec); + if (err) + return err; + +done: + return err; +} + +static const char * const flash_region_strings[] = { + "810 loader", + "810 app", + "807 app", + "NP Image", + "PXE Image", +}; + +static int ethtool_flash_firmware(struct net_device *netdev, u32 type, const u8 *data, + u32 size) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + int ret; + + ret = ne6x_dev_upgrade_firmware(adpt, type, (u8 *)data, size, 1); + if (ret) + dev_err(&pf->pdev->dev, "Failed to flash firmware\n"); + + return ret; +} + +static int ethtool_flash_region(struct net_device *netdev, const u8 *data, u32 size, u32 region) +{ + struct ne6x_pf *pf = ne6x_netdev_to_pf(netdev); + int ret; + + netdev_info(netdev, "%s = 0x%x\n", __func__, region); + + switch (region) { + case NE6X_ETHTOOL_FLASH_810_APP: + case NE6X_ETHTOOL_FLASH_NP: + case NE6X_ETHTOOL_FLASH_PXE: + case NE6X_ETHTOOL_FLASH_810_LOADER: + case NE6X_ETHTOOL_FRU: + case NE6X_ETHTOOL_FLASH_807_APP: + ret = ethtool_flash_firmware(netdev, region, data, size); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + if (ret) + dev_info(&pf->pdev->dev, "loading %s fail, reload driver\n", + flash_region_strings[region]); + + return ret; +} + +static int ne6x_ethtool_get_flash_region(struct net_device *netdev, const u8 *data, u32 *size) +{ + int region = -1; + int ret; + + ret = ne6x_dev_validate_fw(data, *size, ®ion); + if (ret) { + netdev_err(netdev, "firmware error ret = %d\n", ret); + return -1; + } + + return region; +} + +static int ne6x_set_flash(struct net_device *netdev, struct ethtool_flash *ef) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + const struct firmware *fw; + unsigned int master; + size_t fw_size; + u8 *fw_data; + int region; + int ret; + + master = (adpt->idx == 0); + if (!master) { + dev_info(&pf->pdev->dev, "only master port can upgrade\n"); + return -1; + } + + ret = request_firmware(&fw, ef->data, &pf->pdev->dev); + if (ret < 0) + return ret; + + fw_data = (u8 *)fw->data; + fw_size = fw->size; + if (fw_size > 0) { + region = ne6x_ethtool_get_flash_region(netdev, fw_data, (u32 *)&fw_size); + if (region < 0) { + ret = region; + goto out_free_fw; + } + + ret = ethtool_flash_region(netdev, fw_data, fw_size, region); + if (ret) + goto out_free_fw; + } + +out_free_fw: + release_firmware(fw); + return ret; +} + +#define NE6X_FIRMWARE_RESET_CHIP \ + ((ETH_RESET_MGMT | ETH_RESET_IRQ | \ + ETH_RESET_DMA | ETH_RESET_FILTER | \ + ETH_RESET_OFFLOAD | ETH_RESET_MAC | \ + ETH_RESET_PHY | ETH_RESET_RAM) << ETH_RESET_SHARED_SHIFT) + +static int ne6x_reset(struct net_device *netdev, u32 *flags) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + bool reload = false; + u32 req = *flags; + + if (!req) + return -EINVAL; + + if (adpt->idx != 0x0) { + netdev_err(netdev, "Reset is not supported from a eth0_nfp1\n"); + return -EOPNOTSUPP; + } + + if ((req & NE6X_FIRMWARE_RESET_CHIP) == NE6X_FIRMWARE_RESET_CHIP) { + /* This feature is not supported in older firmware versions */ + if (!ne6x_dev_reset_firmware(adpt)) { + netdev_info(netdev, "Firmware reset request successful.\n"); + reload = true; + *flags &= ~NE6X_FIRMWARE_RESET_CHIP; + } + } + + if (reload) + netdev_info(netdev, "Reload driver to complete reset\n"); + + return 0; +} + +static int ne6x_get_module_info(struct net_device *netdev, struct ethtool_modinfo *modinfo) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_sfp_mod_type_len sfp_mod; + int err; + + err = ne6x_dev_get_sfp_type_len(adpt, &sfp_mod); + if (err) + return err; + + modinfo->type = sfp_mod.type; + modinfo->eeprom_len = sfp_mod.len; + netdev_info(netdev, "type %d erprom_len %d.\n", sfp_mod.type, sfp_mod.len); + + return 0; +} + +#define STD_SFP_INFO_MAX_SIZE 640 + +static int ne6x_get_module_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + u8 sfp_data[STD_SFP_INFO_MAX_SIZE]; + int err; + + if (!ee->len || ((ee->len + ee->offset) > STD_SFP_INFO_MAX_SIZE)) + return -EINVAL; + + memset(data, 0, ee->len); + err = ne6x_dev_get_sfp_eeprom(adpt, sfp_data, ee->offset, ee->len, 0); + if (err) + return err; + + memcpy(data, sfp_data + ee->offset, ee->len); + + return 0; +} + +static u32 ne6x_get_priv_flags(struct net_device *netdev) +{ + const struct ne6x_priv_flag *priv_flag; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + u32 is_write_proterct = false; + u32 i, ret_flags = 0; + u32 value = 0; + + ne6x_dev_get_norflash_write_protect(adpt->back, &is_write_proterct); + if (is_write_proterct) + set_bit(NE6X_ADPT_F_NORFLASH_WRITE_PROTECT, adpt->flags); + else + clear_bit(NE6X_ADPT_F_NORFLASH_WRITE_PROTECT, adpt->flags); + + if (ne6x_dev_get_trust_vlan(adpt->back)) + set_bit(NE6X_ADPT_F_TRUST_VLAN, adpt->flags); + else + clear_bit(NE6X_ADPT_F_TRUST_VLAN, adpt->flags); + value = ne6x_dev_get_features(adpt); + if (value & NE6X_F_RX_FW_LLDP) + clear_bit(NE6X_ADPT_F_DISABLE_FW_LLDP, adpt->flags); + else + set_bit(NE6X_ADPT_F_DISABLE_FW_LLDP, adpt->flags); + + for (i = 0; i < NE6X_PRIV_FLAG_ARRAY_SIZE; i++) { + priv_flag = &ne6x_gstrings_priv_flags[i]; + if (test_bit(priv_flag->bitno, adpt->flags)) + ret_flags |= BIT(i); + } + + return ret_flags; +} + +static int ne6x_set_priv_flags(struct net_device *netdev, u32 flags) +{ + DECLARE_BITMAP(change_flags, NE6X_ADPT_F_NBITS); + DECLARE_BITMAP(orig_flags, NE6X_ADPT_F_NBITS); + const struct ne6x_priv_flag *priv_flag; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + int ret = 0; + u32 i; + + if (flags > BIT(NE6X_PRIV_FLAG_ARRAY_SIZE)) + return -EINVAL; + + bitmap_copy(orig_flags, adpt->flags, NE6X_ADPT_F_NBITS); + + for (i = 0; i < NE6X_PRIV_FLAG_ARRAY_SIZE; i++) { + priv_flag = &ne6x_gstrings_priv_flags[i]; + + if (flags & BIT(i)) + set_bit(priv_flag->bitno, adpt->flags); + else + clear_bit(priv_flag->bitno, adpt->flags); + } + + bitmap_xor(change_flags, adpt->flags, orig_flags, NE6X_ADPT_F_NBITS); + + if (test_bit(NE6X_ADPT_F_DISABLE_FW_LLDP, change_flags)) { + if (test_bit(NE6X_ADPT_F_DISABLE_FW_LLDP, adpt->flags)) + ne6x_dev_set_fw_lldp(adpt, false); + else + ne6x_dev_set_fw_lldp(adpt, true); + } + + if (test_bit(NE6X_ADPT_F_NORFLASH_WRITE_PROTECT, change_flags)) { + if (test_bit(NE6X_ADPT_F_NORFLASH_WRITE_PROTECT, adpt->flags)) + ne6x_dev_set_norflash_write_protect(adpt->back, true); + else + ne6x_dev_set_norflash_write_protect(adpt->back, false); + } + + if (test_bit(NE6X_ADPT_F_DDOS_SWITCH, change_flags)) { + if (test_bit(NE6X_ADPT_F_DDOS_SWITCH, adpt->flags)) + ne6x_dev_set_ddos(adpt->back, true); + else + ne6x_dev_set_ddos(adpt->back, false); + } + + if (test_bit(NE6X_ADPT_F_ACL, change_flags)) { + if (adpt->idx != 0) { + netdev_err(netdev, "only adpt 0 support acl flag\n"); + return -EINVAL; + } + if (test_bit(NE6X_ADPT_F_ACL, adpt->flags)) { + if (ne6x_dev_set_white_list(adpt->back, true)) + return -EPERM; + } else { + ne6x_dev_set_white_list(adpt->back, false); + } + } + if (test_bit(NE6X_ADPT_F_TRUST_VLAN, change_flags)) { + if (test_bit(NE6X_ADPT_F_TRUST_VLAN, adpt->flags)) + ne6x_dev_set_trust_vlan(adpt->back, true); + else + ne6x_dev_set_trust_vlan(adpt->back, false); + } + return ret; +} + +static int ne6x_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump) +{ + struct ne6x_pf *pf = ne6x_netdev_to_pf(dev); + + dump->version = 1; + dump->flag = 0; + + /* Calculate the requested preset idx length */ + if (ne6x_dev_get_dump_data_len(pf, &dump->len)) { + dump->len = 0; + return -EAGAIN; + } + + return 0; +} + +static int ne6x_get_dump_data(struct net_device *dev, struct ethtool_dump *dump, void *buffer) +{ + struct ne6x_pf *pf = ne6x_netdev_to_pf(dev); + u32 *p = buffer; + + if (ne6x_dev_get_dump_data(pf, p, dump->len)) + return -EAGAIN; + + return 0; +} + +static const struct ethtool_ops ne6x_ethtool_ops = { + .get_link_ksettings = ne6x_get_link_ksettings, + .set_link_ksettings = ne6x_set_link_ksettings, + .get_strings = ne6x_get_strings, + .get_sset_count = ne6x_get_sset_count, + .get_ethtool_stats = ne6x_get_ethtool_stats, + .get_drvinfo = ne6x_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_regs = ne6x_get_regs, + .get_regs_len = ne6x_get_regs_len, + .get_dump_flag = ne6x_get_dump_flag, + .get_dump_data = ne6x_get_dump_data, + .self_test = ne6x_self_test, + .get_ringparam = ne6x_get_ringparam, + .set_ringparam = ne6x_set_ringparam, + .get_pauseparam = ne6x_get_pauseparam, + .set_pauseparam = ne6x_set_pauseparam, + .get_coalesce = ne6x_get_coalesce, + .get_eeprom_len = ne6x_get_eeprom_len, + .get_eeprom = ne6x_get_eeprom, + .get_rxnfc = ne6x_get_rxnfc, + .set_rxnfc = ne6x_set_rxnfc, + .get_rxfh_key_size = ne6x_get_rxfh_key_size, + .get_rxfh_indir_size = ne6x_get_rss_table_size, + .get_rxfh = ne6x_get_rxfh, + .set_rxfh = ne6x_set_rxfh, + .get_channels = ne6x_get_channels, + .set_channels = ne6x_set_channels, + .flash_device = ne6x_set_flash, + .reset = ne6x_reset, + .get_module_info = ne6x_get_module_info, + .get_module_eeprom = ne6x_get_module_eeprom, + .get_priv_flags = ne6x_get_priv_flags, + .set_priv_flags = ne6x_set_priv_flags, + .set_phys_id = ne6x_set_phys_id, + .nway_reset = ne6x_nway_reset, + .self_test = ne6x_diag_test, + .get_fecparam = ne6x_get_fec_param, + .set_fecparam = ne6x_set_fec_param, +}; + +void ne6x_set_ethtool_ops(struct net_device *dev) +{ + dev->ethtool_ops = &ne6x_ethtool_ops; +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_ethtool.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_ethtool.h new file mode 100644 index 000000000000..54d84d65900f --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_ethtool.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_ETHTOOL_H +#define _NE6X_ETHTOOL_H + +#define NE6X_STAT(_type, _name, _stat) \ +{ \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(_type, _stat), \ + .stat_offset = offsetof(_type, _stat) \ +} + +enum ne6x_ethtool_test_id { + NE6X_ETH_TEST_LINK, + NE6X_ETH_TEST_LOOPBACK, + NE6X_ETH_TEST_REG, + NE6X_ETH_TEST_INT, + NE6X_ETH_TEST_CHIP_TEMPERATUR, + NE6X_ETH_TEST_BOARD_TEMPERATUR, + NE6X_ETH_TEST_CURRENT, + NE6X_ETH_TEST_VOLTAGE, + NE6X_ETH_TEST_POWER, + NE6X_ETH_TEST_I2C3, +}; + +void ne6x_set_ethtool_ops(struct net_device *netdev); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_interrupt.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_interrupt.c new file mode 100644 index 000000000000..91a03ef06a58 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_interrupt.c @@ -0,0 +1,700 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "ne6x.h" +#include "ne6x_interrupt.h" + +static int ne6x_init_msix(struct ne6x_pf *pf, int budget) +{ + int actual_vector; + ssize_t size; + + actual_vector = pci_enable_msix_range(pf->pdev, pf->msix_entries, NE6X_MIN_MSIX, budget); + dev_info(&pf->pdev->dev, "%s actual_vector = %d\n", __func__, actual_vector); + if (actual_vector <= 0) { + kfree(pf->msix_entries); + pf->msix_entries = NULL; + pci_disable_msix(pf->pdev); + dev_err(&pf->pdev->dev, "error msix enable failed\n"); + return -ENODEV; + } + + size = sizeof(struct ne6x_lump_tracking) + (sizeof(u16) * actual_vector); + pf->irq_pile = kzalloc(size, GFP_KERNEL); + if (!pf->irq_pile) { + dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n"); + kfree(pf->msix_entries); + pf->msix_entries = NULL; + pci_disable_msix(pf->pdev); + return -ENOMEM; + } + pf->irq_pile->num_entries = actual_vector; + + return 0; +} + +static int ne6x_init_intx(struct ne6x_pf *pf) +{ + int actual_vector; + ssize_t size; + + dev_info(&pf->pdev->dev, "try enable intx\n"); + actual_vector = 0x1; + + size = sizeof(struct ne6x_lump_tracking) + (sizeof(u16) * actual_vector); + pf->irq_pile = kzalloc(size, GFP_KERNEL); + if (!pf->irq_pile) { + dev_err(&pf->pdev->dev, "error intx allocating irq_pile memory\n"); + return -ENOMEM; + } + pf->irq_pile->num_entries = actual_vector; + + test_and_set_bit(NE6X_PF_INTX, pf->state); + + return 0; +} + +int ne6x_init_interrupt_scheme(struct ne6x_pf *pf) +{ + union ne6x_ciu_time_out_cfg ciu_time_out_cdg; + union ne6x_all_rq_cfg all_rq_cfg; + union ne6x_all_sq_cfg all_sq_cfg; + union ne6x_all_cq_cfg all_cq_cfg; + union ne6x_merge_cfg merge_cfg; + struct ne6x_hw *hw = &pf->hw; + u64 __iomem *reg; + int err; + int i; + + pf->msix_entries = kcalloc(NE6X_MAX_MSIX_NUM, sizeof(struct msix_entry), GFP_KERNEL); + if (!pf->msix_entries) + return -ENOMEM; + + for (i = 0; i < NE6X_MAX_MSIX_NUM; i++) + pf->msix_entries[i].entry = i; + + test_and_set_bit(NE6X_PF_MSIX, pf->state); + + if (ne6x_init_msix(pf, NE6X_MAX_MSIX_NUM)) { + clear_bit(NE6X_PF_MSIX, pf->state); + err = ne6x_init_intx(pf); + if (err) { + dev_err(&pf->pdev->dev, "error intx enable failed\n"); + return err; + } + } + + if (pf->irq_pile->num_entries >= NE6X_MAX_MSIX_NUM) { + err = ne6x_init_link_irq(pf); + if (err) { + dev_err(&pf->pdev->dev, "init int irq failed\n"); + return err; + } + } + + /* We only initialize int once, so as not to overwrite user settings */ + if (test_and_set_bit(NE6X_INT_INIT_DOWN, pf->state)) + return 0; + + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_ALL_RQ_CFG); + all_rq_cfg.val = readq(reg); + all_rq_cfg.reg.csr_allrq_pull_merge_cfg = 0x10; + writeq(all_rq_cfg.val, reg); + + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_ALL_SQ_CFG); + all_sq_cfg.val = readq(reg); + all_sq_cfg.reg.csr_allsq_pull_merge_cfg = 0x10; + writeq(all_sq_cfg.val, reg); + + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_ALL_CQ_CFG); + all_cq_cfg.val = readq(reg); + all_cq_cfg.reg.csr_allcq_merge_size = 0x1; + all_cq_cfg.reg.csr_allcq_wt_rr_cnt = 0x7F; + all_cq_cfg.reg.csr_allcq_wt_rr_flag = 0x1; + writeq(all_cq_cfg.val, reg); + + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_MERGE_CFG); + merge_cfg.val = readq(reg); + merge_cfg.reg.csr_merge_clk_cnt = 800; + writeq(merge_cfg.val, reg); + + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_CIU_TIME_OUT_CFG); + ciu_time_out_cdg.val = readq(reg); + ciu_time_out_cdg.reg.csr_int_timer_out_cnt = 0xfff; + writeq(ciu_time_out_cdg.val, reg); + + return 0; +} + +static int ne6x_adpt_alloc_q_vector(struct ne6x_adapter *adpt, int v_idx) +{ + struct ne6x_q_vector *q_vector; + + /* allocate q_vector */ + q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + q_vector->adpt = adpt; + q_vector->v_idx = v_idx; + + cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); + + if (adpt->netdev) + netif_napi_add(adpt->netdev, &q_vector->napi, ne6x_napi_poll); + + /* tie q_vector and adpt together */ + adpt->q_vectors[v_idx] = q_vector; + return 0; +} + +static void ne6x_free_q_vector(struct ne6x_adapter *adpt, int v_idx) +{ + struct ne6x_q_vector *q_vector = adpt->q_vectors[v_idx]; + struct ne6x_ring *ring; + struct device *dev; + + dev = ne6x_pf_to_dev(adpt->back); + + if (!q_vector) { + dev_dbg(dev, "Queue vector at index %d not found\n", v_idx); + return; + } + + /* disassociate q_vector from rings */ + ne6x_for_each_ring(ring, q_vector->tx) ring->q_vector = NULL; + + ne6x_for_each_ring(ring, q_vector->rx) ring->q_vector = NULL; + + ne6x_for_each_ring(ring, q_vector->cq) ring->q_vector = NULL; + + /* only adapter w/ an associated netdev is set up w/ NAPI */ + if (adpt->netdev) + netif_napi_del(&q_vector->napi); + + adpt->q_vectors[v_idx] = NULL; + kfree(q_vector); +} + +static int ne6x_adpt_alloc_q_vectors(struct ne6x_adapter *adpt) +{ + int v_idx, num_q_vectors, err; + + /* if not MSIX, give the one vector only to the LAN adapter */ + num_q_vectors = adpt->num_q_vectors; + + for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { + err = ne6x_adpt_alloc_q_vector(adpt, v_idx); + if (err) + goto err_out; + } + + return 0; + +err_out: + while (v_idx--) + ne6x_free_q_vector(adpt, v_idx); + + return err; +} + +void ne6x_adpt_free_q_vectors(struct ne6x_adapter *adpt) +{ + int v_idx; + + for (v_idx = 0; v_idx < adpt->num_q_vectors; v_idx++) + ne6x_free_q_vector(adpt, v_idx); +} + +int ne6x_adpt_setup_vectors(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf = adpt->back; + int ret = -ENOENT; + + if (adpt->q_vectors[0]) { + dev_info(&pf->pdev->dev, "adapter %d has existing q_vectors\n", adpt->idx); + return -EEXIST; + } + + if (adpt->base_vector) { + dev_info(&pf->pdev->dev, "adapter %d has non-zero base vector %d\n", adpt->idx, + adpt->base_vector); + return -EEXIST; + } + + ret = ne6x_adpt_alloc_q_vectors(adpt); + if (ret) { + dev_info(&pf->pdev->dev, "failed to allocate %d q_vector for adapter %d, ret=%d\n", + adpt->num_q_vectors, adpt->idx, ret); + adpt->num_q_vectors = 0; + goto vector_setup_out; + } + + if (adpt->num_q_vectors) + adpt->base_vector = adpt->port_info->hw_queue_base; + + if (adpt->base_vector < 0) { + dev_info(&pf->pdev->dev, "failed to get tracking for %d vectors for adapter %d, err=%d\n", + adpt->num_q_vectors, adpt->idx, adpt->base_vector); + ne6x_adpt_free_q_vectors(adpt); + ret = -ENOENT; + goto vector_setup_out; + } + +vector_setup_out: + return ret; +} + +static void ne6x_irq_affinity_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) +{ + struct ne6x_q_vector *q_vector = + container_of(notify, struct ne6x_q_vector, affinity_notify); + + cpumask_copy(&q_vector->affinity_mask, mask); +} + +static void ne6x_irq_affinity_release(struct kref *ref) {} + +int ne6x_adpt_request_irq_msix(struct ne6x_adapter *adpt, char *basename) +{ + int q_vectors = adpt->num_q_vectors; + struct ne6x_pf *pf = adpt->back; + int base = adpt->base_vector; + int rx_int_idx = 0; + int tx_int_idx = 0; + int vector, err; + int irq_num; + int cpu; + + for (vector = 0; vector < q_vectors; vector++) { + struct ne6x_q_vector *q_vector = adpt->q_vectors[vector]; + + irq_num = pf->msix_entries[base + vector].vector; + + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", basename, + "TxRx", rx_int_idx++); + tx_int_idx++; + } else if (q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", basename, + "rx", rx_int_idx++); + } else if (q_vector->tx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-%d", basename, + "tx", tx_int_idx++); + } else { + /* skip this unused q_vector */ + continue; + } + + err = request_irq(irq_num, adpt->irq_handler, 0, q_vector->name, q_vector); + if (err) { + dev_info(&pf->pdev->dev, "MSIX request_irq failed, error: %d\n", err); + goto free_queue_irqs; + } + + /* register for affinity change notifications */ + q_vector->affinity_notify.notify = ne6x_irq_affinity_notify; + q_vector->affinity_notify.release = ne6x_irq_affinity_release; + irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); + + /* Spread affinity hints out across online CPUs. + * + * get_cpu_mask returns a static constant mask with + * a permanent lifetime so it's ok to pass to + * irq_set_affinity_hint without making a copy. + */ + cpu = cpumask_local_spread(q_vector->v_idx, -1); + irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); + } + + adpt->irqs_ready = true; + return 0; + +free_queue_irqs: + while (vector) { + vector--; + irq_num = pf->msix_entries[base + vector].vector; + irq_set_affinity_notifier(irq_num, NULL); + irq_set_affinity_hint(irq_num, NULL); + free_irq(irq_num, &adpt->q_vectors[vector]); + } + + return err; +} + +static irqreturn_t ne6x_intr(int irq, void *data) +{ + struct ne6x_q_vector *q_vector = data; + struct ne6x_adapter *adpt = q_vector->adpt; + struct ne6x_hw *hw = &adpt->back->hw; + u64 reg_val; + + reg_val = rd64(hw, NE6X_VPINT_DYN_CTLN(0, NE6X_VP_INT)); + if (!(reg_val & 0x10000)) + return IRQ_NONE; + + napi_schedule(&q_vector->napi); + return IRQ_HANDLED; +} + +int ne6x_adpt_request_irq_intx(struct ne6x_adapter *adpt, char *basename) +{ + struct ne6x_q_vector *q_vector = adpt->q_vectors[0]; + struct net_device *netdev = adpt->netdev; + struct ne6x_pf *pf = adpt->back; + u32 irq = pf->pdev->irq; + int err; + + snprintf(q_vector->name, sizeof(q_vector->name) - 1, "%s-%s-INTx", basename, "TxRx"); + + err = request_irq(irq, &ne6x_intr, IRQF_SHARED, netdev->name, q_vector); + if (err) { + dev_info(&pf->pdev->dev, "INTx request_irq failed, error: %d\n", err); + return err; + } + + return 0; +} + +int ne6x_adpt_request_irq(struct ne6x_adapter *adpt, char *basename) +{ + struct ne6x_pf *pf = adpt->back; + int err; + + if (test_bit(NE6X_PF_MSIX, pf->state)) + err = ne6x_adpt_request_irq_msix(adpt, basename); + else + err = ne6x_adpt_request_irq_intx(adpt, basename); + + if (err) + dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err); + + return err; +} + +void ne6x_adpt_configure_msix(struct ne6x_adapter *adpt) +{ + union ne6x_vp_int_mask int_mask; + struct ne6x_pf *pf = adpt->back; + struct ne6x_hw *hw = &pf->hw; + union ne6x_int_cfg int_cfg; + u32 qp, nextqp; + int i, q; + + /* The interrupt indexing is offset by 1 in the PFINT_ITRn + * and PFINT_LNKLSTn registers, e.g.: + * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts) + */ + qp = adpt->base_queue; + + /* SRIOV mode VF Config OR SRIOV disabled PF Config */ + if (qp < NE6X_PF_VP0_NUM) { + for (i = 0; i < adpt->num_q_vectors; i++) { + struct ne6x_q_vector *q_vector = adpt->q_vectors[i]; + + for (q = 0; q < q_vector->num_ringpairs; q++) { + nextqp = qp + i + q; + + int_cfg.val = rd64(hw, NE6X_VPINT_DYN_CTLN(nextqp, NE6X_INT_CFG)); + int_cfg.reg.csr_sq_hdle_half_int_cnt_vp = 0x0; + int_cfg.reg.csr_rq_hdle_half_int_cnt_vp = 0x0; + int_cfg.reg.csr_cq_hdle_half_int_cnt_vp = 0xffff; + wr64(hw, NE6X_VPINT_DYN_CTLN(nextqp, NE6X_INT_CFG), int_cfg.val); + + int_mask.val = rd64(hw, + NE6X_VPINT_DYN_CTLN(nextqp, NE6X_VP_INT_MASK)); + int_mask.reg.csr_ciu_mask_vp = NE6X_MAX_U64; + wr64(hw, NE6X_VPINT_DYN_CTLN(nextqp, NE6X_VP_INT_MASK), + int_mask.val); + } + } + } else { + /* SRIOV mode PF Config */ + for (i = 0; i < adpt->num_q_vectors; i++) { + struct ne6x_q_vector *q_vector = adpt->q_vectors[i]; + + for (q = 0; q < q_vector->num_ringpairs; q++) { + nextqp = qp - NE6X_PF_VP0_NUM + i + q; + + int_cfg.val = rd64_bar4(hw, + NE6X_PFINT_DYN_CTLN(nextqp, NE6X_INT_CFG)); + int_cfg.reg.csr_sq_hdle_half_int_cnt_vp = 0x0; + int_cfg.reg.csr_rq_hdle_half_int_cnt_vp = 0x0; + int_cfg.reg.csr_cq_hdle_half_int_cnt_vp = 0xffff; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(nextqp, NE6X_INT_CFG), + int_cfg.val); + + int_mask.val = + rd64_bar4(hw, + NE6X_PFINT_DYN_CTLN(nextqp, + NE6X_VP_INT_MASK)); + int_mask.reg.csr_ciu_mask_vp = NE6X_MAX_U64; + wr64_bar4(hw, + NE6X_PFINT_DYN_CTLN(nextqp, NE6X_VP_INT_MASK), + int_mask.val); + } + } + } +} + +static inline void ne6x_irq_dynamic_enable(struct ne6x_adapter *adpt, int vector) +{ + union ne6x_vp_int_mask int_mask; + struct ne6x_pf *pf = adpt->back; + struct ne6x_hw *hw = &pf->hw; + + if (vector < NE6X_PF_VP0_NUM) { + int_mask.val = rd64(hw, NE6X_VPINT_DYN_CTLN(vector, NE6X_VP_INT_MASK)); + int_mask.reg.csr_ciu_mask_vp &= ~(1ULL << NE6X_VP_CQ_INTSHIFT); + wr64(hw, NE6X_VPINT_DYN_CTLN(vector, NE6X_VP_INT_MASK), int_mask.val); + } else { + int_mask.val = rd64_bar4(hw, + NE6X_PFINT_DYN_CTLN(vector - NE6X_PF_VP0_NUM, + NE6X_VP_INT_MASK)); + int_mask.reg.csr_ciu_mask_vp &= ~(1ULL << NE6X_VP_CQ_INTSHIFT); + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(vector - NE6X_PF_VP0_NUM, + NE6X_VP_INT_MASK), + int_mask.val); + } +} + +int ne6x_adpt_enable_irq(struct ne6x_adapter *adpt) +{ + int i; + + for (i = 0; i < adpt->num_q_vectors; i++) + ne6x_irq_dynamic_enable(adpt, adpt->base_vector + i); + + return 0; +} + +void ne6x_adpt_disable_irq(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf = adpt->back; + struct ne6x_hw *hw = &pf->hw; + int base = adpt->base_vector; + int i; + + /* disable each interrupt */ + if (base < NE6X_PF_VP0_NUM) { + for (i = adpt->base_vector; i < (adpt->num_q_vectors + adpt->base_vector); i++) { + wr64(hw, NE6X_VPINT_DYN_CTLN(i, NE6X_VP_INT), NE6X_MAX_U64); + wr64(hw, NE6X_VPINT_DYN_CTLN(i, NE6X_VP_INT_MASK), NE6X_MAX_U64); + } + } else { + for (i = adpt->base_vector; i < (adpt->num_q_vectors + adpt->base_vector); i++) { + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(i - NE6X_PF_VP0_NUM, NE6X_VP_INT), + NE6X_MAX_U64); + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(i - NE6X_PF_VP0_NUM, NE6X_VP_INT_MASK), + NE6X_MAX_U64); + } + } + + if (test_bit(NE6X_PF_MSIX, pf->state)) { + for (i = 0; i < adpt->num_q_vectors; i++) + synchronize_irq(pf->msix_entries[i + base].vector); + } else { + synchronize_irq(pf->pdev->irq); + } +} + +void ne6x_adpt_free_irq(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf = adpt->back; + int base = adpt->base_vector; + int i; + + if (!adpt->q_vectors) + return; + + if (!adpt->irqs_ready) + return; + + adpt->irqs_ready = false; + for (i = 0; i < adpt->num_q_vectors; i++) { + int irq_num; + u16 vector; + + vector = i + base; + irq_num = pf->msix_entries[vector].vector; + + /* free only the irqs that were actually requested */ + if (!adpt->q_vectors[i] || !adpt->q_vectors[i]->num_ringpairs) + continue; + + /* clear the affinity notifier in the IRQ descriptor */ + irq_set_affinity_notifier(irq_num, NULL); + + /* remove our suggested affinity mask for this IRQ */ + irq_set_affinity_hint(irq_num, NULL); + + synchronize_irq(irq_num); + free_irq(irq_num, adpt->q_vectors[i]); + } +} + +static void ne6x_reset_interrupt_capability(struct ne6x_pf *pf) +{ + /* If we're in Legacy mode, the interrupt was cleaned in adpt_close */ + if (pf->msix_entries) { + pci_disable_msix(pf->pdev); + kfree(pf->msix_entries); + pf->msix_entries = NULL; + } + + kfree(pf->irq_pile); + pf->irq_pile = NULL; +} + +int ne6x_init_link_irq(struct ne6x_pf *pf) +{ + int irq_num; + int err; + + snprintf(pf->link_intname, sizeof(pf->link_intname) - 1, "%s-%s-%d", + dev_driver_string(&pf->pdev->dev), "link", pf->hw.bus.bus_num); + irq_num = pf->msix_entries[NE6X_NIC_INT_VP].vector; + err = request_irq(irq_num, ne6x_linkint_irq_handler, 0, pf->link_intname, pf); + if (!err) + pf->link_int_irq_ready = true; + + return 0; +} + +int ne6x_enable_link_irq(struct ne6x_pf *pf) +{ + u64 int_mask = 0xffffffffffffffff; + u64 temp = 1; + int i = 0; + + if (!pf->link_int_irq_ready) + return 0; + + for (i = 0; i < pf->hw.pf_port; i++) + int_mask &= ~(temp << (i + NE6X_NIC_INT_START_BIT)); + + wr64_bar4(&pf->hw, NE6X_PFINT_DYN_CTLN(NE6X_NIC_INT_VP - NE6X_PF_VP0_NUM, NE6X_VP_INT_MASK), + int_mask); + + return 0; +} + +int ne6x_disable_link_irq(struct ne6x_pf *pf) +{ + u64 int_mask = 0xffffffffffffffff; + u64 int_val; + + wr64_bar4(&pf->hw, NE6X_PFINT_DYN_CTLN(NE6X_NIC_INT_VP - NE6X_PF_VP0_NUM, NE6X_VP_INT_MASK), + int_mask); + int_val = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(NE6X_NIC_INT_VP - NE6X_PF_VP0_NUM, NE6X_VP_INT)); + wr64_bar4(&pf->hw, NE6X_PFINT_DYN_CTLN(NE6X_NIC_INT_VP - NE6X_PF_VP0_NUM, NE6X_VP_INT), + int_val); + + return 0; +} + +void ne6x_free_link_irq(struct ne6x_pf *pf) +{ + if (pf->link_int_irq_ready) { + synchronize_irq(pf->msix_entries[NE6X_NIC_INT_VP].vector); + free_irq(pf->msix_entries[NE6X_NIC_INT_VP].vector, pf); + } + + pf->link_int_irq_ready = false; +} + +irqreturn_t ne6x_msix_clean_vf_mbx(int irq, void *data) +{ + struct ne6x_pf *pf = data; + struct ne6x_hw *hw = &pf->hw; + bool have_cmd = false; + struct ne6x_vf *vf; + u64 int_val = 0; + u64 val; + int i; + + val = rd64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT)); + ne6x_for_each_vf(pf, i) { + vf = &pf->vf[i]; + if (val & (1ULL << vf->base_queue)) { + test_and_set_bit(NE6X_MAILBOXQ_EVENT_PENDING, pf->state); + pf->hw.mbx_snapshot.state = NE6X_MAL_VF_DETECT_STATE_DETECT; + pf->hw.mbx_snapshot.mbx_vf.vf_cntr[i] = true; + have_cmd = true; + int_val |= (1ULL << vf->base_queue); + } + } + + if (have_cmd) { + ne6x_service_event_schedule(pf); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT), int_val); + } + + val = rd64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT)); + ne6x_for_each_vf(pf, i) { + vf = &pf->vf[i]; + if (val & (1ULL << vf->base_queue)) { + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT), + (1ULL << vf->base_queue)); + pf->hw.mbx_snapshot.state = NE6X_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; + pf->hw.ne6x_mbx_ready_to_send[i] = true; + } + } + + return IRQ_HANDLED; +} + +int ne6x_init_mailbox_irq(struct ne6x_pf *pf) +{ + int irq_num; + int err; + + snprintf(pf->mailbox_intname, sizeof(pf->mailbox_intname) - 1, "%s-%s-%d", + dev_driver_string(&pf->pdev->dev), "mailbox", pf->hw.bus.bus_num); + irq_num = pf->msix_entries[NE6X_MAILBOX_VP_NUM].vector; + err = request_irq(irq_num, ne6x_msix_clean_vf_mbx, 0, pf->mailbox_intname, pf); + if (!err) + pf->mailbox_int_irq_ready = true; + + dev_info(&pf->pdev->dev, "reg mailbox irq id= %d,name = %s\n", irq_num, + pf->mailbox_intname); + + return err; +} + +int ne6x_disable_mailbox_irq(struct ne6x_pf *pf) +{ + struct ne6x_hw *hw = &pf->hw; + + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT_MASK), 0xffffffffffffffff); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT_MASK), 0xffffffffffffffff); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT), 0xffffffffffffffff); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT), 0xffffffffffffffff); + + return 0; +} + +void ne6x_free_mailbox_irq(struct ne6x_pf *pf) +{ + if (pf->mailbox_int_irq_ready) { + synchronize_irq(pf->msix_entries[NE6X_MAILBOX_VP_NUM].vector); + free_irq(pf->msix_entries[NE6X_MAILBOX_VP_NUM].vector, pf); + } + + pf->mailbox_int_irq_ready = false; +} + +void ne6x_clear_interrupt_scheme(struct ne6x_pf *pf) +{ + int i; + + for (i = 0; i < pf->num_alloc_adpt; i++) { + if (pf->adpt[i]) + ne6x_adpt_free_q_vectors(pf->adpt[i]); + } + + ne6x_disable_link_irq(pf); + ne6x_free_link_irq(pf); + ne6x_reset_interrupt_capability(pf); +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_interrupt.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_interrupt.h new file mode 100644 index 000000000000..e8d512d965a1 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_interrupt.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_INTERRUPT_H +#define _NE6X_INTERRUPT_H + +#include "ne6x.h" + +int ne6x_init_interrupt_scheme(struct ne6x_pf *pf); +int ne6x_adpt_setup_vectors(struct ne6x_adapter *adpt); +void ne6x_adpt_free_q_vectors(struct ne6x_adapter *adpt); +int ne6x_adpt_request_irq(struct ne6x_adapter *adpt, char *basename); +void ne6x_adpt_configure_msix(struct ne6x_adapter *adpt); +int ne6x_adpt_enable_irq(struct ne6x_adapter *adpt); +void ne6x_adpt_free_irq(struct ne6x_adapter *adpt); +void ne6x_clear_interrupt_scheme(struct ne6x_pf *pf); +void ne6x_adpt_disable_irq(struct ne6x_adapter *adpt); +irqreturn_t ne6x_linkint_irq_handler(int irq, void *data); +int ne6x_enable_link_irq(struct ne6x_pf *pf); +int ne6x_disable_link_irq(struct ne6x_pf *pf); +int ne6x_init_link_irq(struct ne6x_pf *pf); +void ne6x_free_link_irq(struct ne6x_pf *pf); +int ne6x_init_mailbox_irq(struct ne6x_pf *pf); +void ne6x_free_mailbox_irq(struct ne6x_pf *pf); +int ne6x_disable_mailbox_irq(struct ne6x_pf *pf); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_main.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_main.c new file mode 100644 index 000000000000..24e71dd68998 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_main.c @@ -0,0 +1,3111 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "ne6x.h" +#include "ne6x_portmap.h" +#include "ne6x_reg.h" +#include "ne6x_dev.h" +#include "ne6x_debugfs.h" +#include "ne6x_arfs.h" +#include "version.h" +#include "ne6x_netlink.h" +#include "ne6x_interrupt.h" + +#define CREATE_TRACE_POINTS + +#define SUMMARY "Chengdu BeiZhongWangXin Ethernet Connection N5/N6 Series Linux Driver" +#define COPYRIGHT "Copyright(c) 2020 - 2023 Chengdu BeiZhongWangXin Technology Co., Ltd." + +char ne6x_driver_name[] = "ncepf"; + +static const char ne6x_driver_string[] = SUMMARY; + +const char ne6x_driver_version_str[] = VERSION; +static const char ne6x_copyright[] = COPYRIGHT; + +/* ne6x_pci_tbl - PCI Device ID Table + * + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id ne6x_pci_tbl[] = { + {PCI_VDEVICE(BZWX, 0x5010), 0}, + {PCI_VDEVICE(BZWX, 0x5011), 0}, + {PCI_VDEVICE(BZWX, 0x6010), 0}, + {PCI_VDEVICE(BZWX, 0x6011), 0}, + /* required last entry */ + {0, 0}, +}; + +MODULE_DEVICE_TABLE(pci, ne6x_pci_tbl); +MODULE_AUTHOR("Chengdu BeiZhongWangXin Technology Co., Ltd., "); +MODULE_DESCRIPTION("Chengdu BeiZhongWangXin Ethernet Connection N5/N6 Series Linux Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(VERSION); + +static struct workqueue_struct *ne6x_wq; +static const struct net_device_ops ne6x_netdev_ops; + +bool netif_is_ne6x(struct net_device *dev) +{ + return dev && (dev->netdev_ops == &ne6x_netdev_ops); +} + +int ne6x_hw_init(struct ne6x_hw *hw) +{ + int cpu_num = num_online_cpus(); + + /* max phy_port */ + hw->pf_port = ne6x_dev_get_port_num(hw->back); + /* expect vp queue */ + hw->expect_vp = NE6X_MAX_VP_NUM / hw->pf_port; + /* actal max vp queue */ + hw->max_queue = min_t(int, cpu_num, hw->expect_vp); + + hw->port_info = devm_kzalloc(ne6x_hw_to_dev(hw), sizeof(*hw->port_info), GFP_KERNEL); + if (!hw->port_info) + return -EIO; + + /* set the back pointer to HW */ + hw->port_info->hw = hw; + + if (!is_valid_ether_addr(hw->port_info->mac.perm_addr)) + eth_random_addr(hw->port_info->mac.perm_addr); + + return 0; +} + +int ne6x_aq_get_phy_capabilities(struct ne6x_adapter *adpt, bool is_up, bool get_hw_stats) +{ + struct ne6x_port_info *port_info = adpt->port_info; + + /* read link states */ + if (get_hw_stats) + ne6x_dev_get_link_status(adpt, &port_info->link_status); + + if (is_up) { + if (port_info->link_status.link) { + port_info->phy.link_info.link_info |= NE6X_AQ_LINK_UP; + + switch (port_info->link_status.speed) { + case NE6X_LINK_SPEED_10GB: + port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_10GBASE; + port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_10GB; + break; + case NE6X_LINK_SPEED_25GB: + port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_25GBASE; + port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_25GB; + break; + case NE6X_LINK_SPEED_40GB: + port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_40GBASE; + port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_40GB; + break; + case NE6X_LINK_SPEED_100GB: + port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_100GBASE; + port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_100GB; + break; + case NE6X_LINK_SPEED_200GB: + port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_200GBASE; + port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_200GB; + break; + default: + dev_info(&adpt->back->pdev->dev, "WARNING: Unrecognized link_speed (0x%x).\n", + NE6X_LINK_SPEED_UNKNOWN); + break; + } + + port_info->phy.media_type = NE6X_MEDIA_FIBER; + return 0; + } + } + + port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_UNKNOWN; + port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_UNKNOWN; + port_info->phy.media_type = NE6X_MEDIA_UNKNOWN; + port_info->phy.link_info.link_info &= ~NE6X_AQ_LINK_UP; + + return 0; +} + +int ne6x_aq_get_vf_link_status(struct ne6x_adapter *adpt, bool is_up) +{ + struct ne6x_pf *pf = adpt->back; + struct ne6x_adapter *pf_adpt = pf->adpt[(adpt->port_info->lport >= pf->hw.pf_port) ? + (pf->hw.pf_port - 1) : adpt->port_info->lport]; + struct ne6x_link_info *pf_link_status = &pf_adpt->port_info->link_status; + struct ne6x_port_info *vf_port_info = adpt->port_info; + + if (is_up) { + if (pf_link_status->link) { + vf_port_info->phy.link_info.link_info |= NE6X_AQ_LINK_UP; + + switch (pf_link_status->speed) { + case NE6X_LINK_SPEED_10GB: + vf_port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_10GBASE; + vf_port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_10GB; + break; + case NE6X_LINK_SPEED_25GB: + vf_port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_25GBASE; + vf_port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_25GB; + break; + case NE6X_LINK_SPEED_40GB: + vf_port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_40GBASE; + vf_port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_40GB; + break; + case NE6X_LINK_SPEED_100GB: + vf_port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_100GBASE; + vf_port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_100GB; + break; + case NE6X_LINK_SPEED_200GB: + vf_port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_200GBASE; + vf_port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_200GB; + break; + default: + dev_info(&adpt->back->pdev->dev, "WARNING: Unrecognized link_speed (0x%x).\n", + NE6X_LINK_SPEED_UNKNOWN); + break; + } + + vf_port_info->phy.media_type = NE6X_MEDIA_FIBER; + return 0; + } + } + + vf_port_info->phy.link_info.phy_type_low = NE6X_PHY_TYPE_UNKNOWN; + vf_port_info->phy.link_info.link_speed = NE6X_LINK_SPEED_UNKNOWN; + vf_port_info->phy.media_type = NE6X_MEDIA_UNKNOWN; + vf_port_info->phy.link_info.link_info &= ~NE6X_AQ_LINK_UP; + + return 0; +} + +static void ne6x_adpt_link_event(struct ne6x_adapter *adpt, bool link_up) +{ + if (!adpt) + return; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state) || !adpt->netdev) + return; + + if (link_up == netif_carrier_ok(adpt->netdev)) + return; + + if (link_up) { + netif_carrier_on(adpt->netdev); + netif_tx_wake_all_queues(adpt->netdev); + } else { + netif_carrier_off(adpt->netdev); + netif_tx_stop_all_queues(adpt->netdev); + } +} + +void ne6x_print_link_message(struct ne6x_adapter *adpt, bool isup) +{ + char *speed = "Unknown "; + char *an = "False"; + u16 new_speed; + + if (isup) + new_speed = adpt->port_info->phy.link_info.link_speed; + else + new_speed = NE6X_LINK_SPEED_UNKNOWN; + + if (adpt->current_isup == isup && adpt->current_speed == new_speed) + return; + + adpt->current_isup = isup; + adpt->current_speed = new_speed; + + if (!isup) { + netdev_info(adpt->netdev, "NIC Link is Down\n"); + return; + } + + switch (adpt->port_info->phy.link_info.link_speed) { + case NE6X_LINK_SPEED_40GB: + speed = "40 G"; + break; + case NE6X_LINK_SPEED_100GB: + speed = "100 G"; + break; + case NE6X_LINK_SPEED_10GB: + speed = "10 G"; + break; + case NE6X_LINK_SPEED_25GB: + speed = "25 G"; + break; + case NE6X_LINK_SPEED_200GB: + speed = "200 G"; + break; + default: + break; + } + + if (adpt->port_info->phy.link_info.an_info) + an = "True"; + + netdev_info(adpt->netdev, "NIC Link is Up, %sbps Full Duplex, Autoneg: %s\n", speed, an); +} + +static void ne6x_link_event(struct ne6x_pf *pf) +{ + struct ne6x_phy_info *phy_info; + struct ne6x_adapter *adpt = NULL; + u32 old_link_speed; + bool old_link; + bool link_up; + int i; +#ifdef CONFIG_PCI_IOV + struct ne6x_vf *vf; + int vf_id; +#endif + + for (i = 0; i < pf->num_alloc_adpt; i++) { + link_up = false; + adpt = pf->adpt[i]; + phy_info = &adpt->port_info->phy; + phy_info->link_info_old = phy_info->link_info; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) + ne6x_aq_get_phy_capabilities(adpt, false, true); + else + ne6x_aq_get_phy_capabilities(adpt, true, true); + + /* add sfp online state begin */ + ne6x_dev_get_sfp_status(adpt, &phy_info->link_info.ext_info); + if (phy_info->link_info.ext_info != phy_info->link_info_old.ext_info) { + if (phy_info->link_info.ext_info == 0) + netdev_info(adpt->netdev, "adpt->id= %d,optical module unplugged", + adpt->idx); + else + netdev_info(adpt->netdev, "adpt->id= %d,optical module plugged", + adpt->idx); + } + + /* end sfp online state */ + old_link = !!(adpt->port_info->phy.link_info_old.link_info & NE6X_AQ_LINK_UP); + old_link_speed = adpt->port_info->phy.link_info_old.link_speed; + /* Check if the link state is up after updating link info, and treat + * this event as an UP event since the link is actually UP now. + */ + if (adpt->port_info->phy.link_info.link_info & NE6X_AQ_LINK_UP) + link_up = true; + + /* if the old link up/down is the same as the new */ + if (link_up == old_link) { + if (link_up && old_link_speed != adpt->port_info->phy.link_info.link_speed) + ne6x_print_link_message(adpt, link_up); + + continue; + } + + ne6x_adpt_link_event(adpt, link_up); + ne6x_print_link_message(adpt, link_up); + } + +#ifdef CONFIG_PCI_IOV + ne6x_for_each_vf(pf, vf_id) { + vf = &pf->vf[vf_id]; + adpt = vf->adpt; + + if (test_bit(NE6X_VF_STATE_INIT, vf->vf_states)) { + if (!vf->rx_tx_state) { + adpt->port_info->phy.link_info.link_info = 0x0; + vf->rx_tx_state = true; + } + link_up = false; + phy_info = &adpt->port_info->phy; + phy_info->link_info_old = phy_info->link_info; + ne6x_aq_get_vf_link_status(adpt, true); + old_link = !!(adpt->port_info->phy.link_info_old.link_info + & NE6X_AQ_LINK_UP); + old_link_speed = adpt->port_info->phy.link_info_old.link_speed; + + if (adpt->port_info->phy.link_info.link_info & NE6X_AQ_LINK_UP) + link_up = true; + + if (link_up == old_link && + old_link_speed == adpt->port_info->phy.link_info.link_speed) + continue; + + pf->hw.mbx_snapshot.state = NE6X_MAL_VF_DETECT_STATE_DETECT; + ne6x_vc_notify_link_state(vf); + } + } +#endif +} + +static void ne6x_clean_link_status_subtask(struct ne6x_pf *pf) +{ + if (!test_bit(NE6X_LINK_POOLING, pf->state)) + return; + + ne6x_link_event(pf); +} + +void ne6x_service_event_schedule(struct ne6x_pf *pf) +{ + if (!test_bit(NE6X_DOWN, pf->state)) + queue_work(ne6x_wq, &pf->serv_task); +} + +static void ne6x_adpt_reinit_locked(struct ne6x_adapter *adpt); + +static void ne6x_do_reset(struct ne6x_pf *pf, u32 reset_flags, bool lock_acquired) +{ + struct ne6x_adapter *adpt = NULL; + int i; + + WARN_ON(in_interrupt()); + + if (reset_flags & BIT_ULL(NE6X_PF_RESET_REQUESTED)) { + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + if (test_bit(NE6X_ADPT_RECOVER, adpt->comm.state)) { + ne6x_adpt_reinit_locked(adpt); + clear_bit(NE6X_ADPT_RECOVER, adpt->comm.state); + } + } + } else if (reset_flags & BIT_ULL(NE6X_CORE_RESET_REQUESTED)) { + /* hardware reset:include PCIE,CORE.etc. */ + dev_info(&pf->pdev->dev, "timeout info: CORE reset\n"); + } else { + dev_info(&pf->pdev->dev, "bad reset request 0x%08x\n", reset_flags); + } +} + +static void ne6x_recover_hang_subtask(struct ne6x_pf *pf) +{ + u32 reset_flags = 0; + + if (test_and_clear_bit(NE6X_PF_RESET_REQUESTED, pf->state)) + reset_flags |= BIT(NE6X_PF_RESET_REQUESTED); + + if (test_and_clear_bit(NE6X_CORE_RESET_REQUESTED, pf->state)) + reset_flags |= BIT(NE6X_CORE_RESET_REQUESTED); + + if (test_and_clear_bit(NE6X_GLOBAL_RESET_REQUESTED, pf->state)) + reset_flags |= BIT(NE6X_GLOBAL_RESET_REQUESTED); + + if (test_and_clear_bit(NE6X_DOWN_REQUESTED, pf->state)) + reset_flags |= BIT(NE6X_DOWN_REQUESTED); + + /* If there's a recovery already waiting, it takes + * precedence before starting a new reset sequence. + */ + if (test_bit(NE6X_RESET_INTR_RECEIVED, pf->state)) { + clear_bit(NE6X_RESET_INTR_RECEIVED, pf->state); + test_and_clear_bit(NE6X_TIMEOUT_RECOVERY_PENDING, pf->state); + } + + /* If we're already down or resetting, just bail */ + if (reset_flags && !test_bit(NE6X_DOWN, pf->state) && + !test_bit(NE6X_CONFIG_BUSY, pf->state)) + ne6x_do_reset(pf, reset_flags, false); +} + +static void ne6x_service_timer(struct timer_list *t) +{ + struct ne6x_pf *pf = from_timer(pf, t, serv_tmr); + + if (pf->num_alloc_vfs) + mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->service_timer_period)); + + ne6x_service_event_schedule(pf); +} + +void ne6x_linkscan_schedule(struct ne6x_pf *pf) +{ + if (!test_bit(NE6X_DOWN, pf->state)) + queue_work(ne6x_wq, &pf->linkscan_work); +} + +static void ne6x_linkscan_timer(struct timer_list *t) +{ + struct ne6x_pf *pf = from_timer(pf, t, linkscan_tmr); + + if (pf->irq_pile->num_entries < NE6X_MAX_MSIX_NUM) + mod_timer(&pf->linkscan_tmr, round_jiffies(jiffies + HZ)); + else + mod_timer(&pf->linkscan_tmr, round_jiffies(jiffies + HZ * 30)); + + if (!test_bit(NE6X_DOWN, pf->state)) + queue_work(ne6x_wq, &pf->linkscan_work); +} + +static void ne6x_service_task(struct work_struct *work) +{ + struct ne6x_pf *pf = container_of(work, struct ne6x_pf, serv_task); + unsigned long start_time = jiffies; + +#ifdef CONFIG_PCI_IOV + /* vf command process */ + ne6x_vc_process_vf_msg(pf); +#endif + + ne6x_recover_hang_subtask(pf); + + ne6x_sync_arfs_fltrs(pf); + + /* If the tasks have taken longer than one timer cycle or there + * is more work to be done, reschedule the service task now + * rather than wait for the timer to tick again. + */ + if (time_after(jiffies, (start_time + pf->service_timer_period)) || + test_bit(NE6X_MAILBOXQ_EVENT_PENDING, pf->state) || + test_bit(NE6X_RESET_INTR_RECEIVED, pf->state)) + ne6x_service_event_schedule(pf); +} + +static void ne6x_linkscan_work(struct work_struct *work) +{ + struct ne6x_pf *pf = container_of(work, struct ne6x_pf, linkscan_work); + + ne6x_clean_link_status_subtask(pf); +} + +irqreturn_t ne6x_linkint_irq_handler(int irq, void *data) +{ + struct ne6x_pf *pf = data; + u64 intval = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(NE6X_NIC_INT_VP - NE6X_PF_VP0_NUM, + NE6X_VP_INT)); + + wr64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(NE6X_NIC_INT_VP - NE6X_PF_VP0_NUM, + NE6X_VP_INT), + intval); + ne6x_linkscan_schedule(pf); + + return IRQ_HANDLED; +} + +int ne6x_pf_init(struct ne6x_pf *pf) +{ + pf->ctrl_adpt_idx = 0; + mutex_init(&pf->switch_mutex); + + /* set up periodic task facility */ + timer_setup(&pf->serv_tmr, ne6x_service_timer, 0); + pf->service_timer_period = HZ; + timer_setup(&pf->linkscan_tmr, ne6x_linkscan_timer, 0); + add_timer(&pf->serv_tmr); + + INIT_WORK(&pf->serv_task, ne6x_service_task); + INIT_WORK(&pf->linkscan_work, ne6x_linkscan_work); + + clear_bit(NE6X_SERVICE_SCHED, pf->state); + + pf->next_adpt = 0; + pf->num_alloc_adpt = pf->hw.pf_port; + pf->num_alloc_vfs = 0; + pf->mailbox_int_irq_ready = false; + pf->link_int_irq_ready = false; + + ne6x_dbg_pf_init(pf); + ne6x_proc_pf_init(pf); + + /* init key list head node */ + spin_lock_init(&pf->key_list_lock); + INIT_LIST_HEAD(&pf->key_filter_list); + + return 0; +} + +static void ne6x_set_num_rings_in_adpt(struct ne6x_adapter *adpt) +{ + adpt->base_queue = adpt->port_info->hw_queue_base; + adpt->num_q_vectors = adpt->port_info->queue; + adpt->num_queue = adpt->num_q_vectors; + adpt->num_tx_desc = ALIGN(NE6X_DEFAULT_NUM_DESCRIPTORS, NE6X_REQ_DESCRIPTOR_MULTIPLE); + adpt->num_rx_desc = ALIGN(NE6X_DEFAULT_NUM_DESCRIPTORS, NE6X_REQ_DESCRIPTOR_MULTIPLE); + adpt->num_cq_desc = adpt->num_tx_desc + adpt->num_rx_desc; + adpt->num_tg_desc = adpt->num_tx_desc; + adpt->irqs_ready = false; +} + +static irqreturn_t ne6x_msix_clean_rings(int irq, void *data) +{ + struct ne6x_q_vector *q_vector = data; + struct ne6x_adapter *adpt = (struct ne6x_adapter *)q_vector->adpt; + struct ne6x_hw *hw = &adpt->back->hw; + + if (!q_vector->tx.ring && !q_vector->rx.ring && !q_vector->cq.ring && !q_vector->tg.ring) + return IRQ_HANDLED; + + if (q_vector->reg_idx < NE6X_PF_VP0_NUM) + wr64(hw, NE6X_VPINT_DYN_CTLN(q_vector->reg_idx, NE6X_VP_INT_MASK), + 0xffffffffffffffff); + else + wr64_bar4(hw, + NE6X_PFINT_DYN_CTLN(q_vector->reg_idx - NE6X_PF_VP0_NUM, + NE6X_VP_INT_MASK), + 0xffffffffffffffff); + + napi_schedule_irqoff(&q_vector->napi); + + return IRQ_HANDLED; +} + +int ne6x_adpt_mem_alloc(struct ne6x_pf *pf, struct ne6x_adapter *adpt) +{ + struct ne6x_ring **next_rings; + int ret = -ENODEV; + int size; + + /* Need to protect the allocation of the adapters at the PF level */ + mutex_lock(&pf->switch_mutex); + + adpt->netdev_registered = false; + size = sizeof(struct ne6x_ring *) * adpt->num_queue * 4; + adpt->tx_rings = kzalloc(size, GFP_KERNEL); + if (!adpt->tx_rings) + goto err_rings; + + next_rings = adpt->tx_rings + adpt->num_queue; + adpt->cq_rings = next_rings; + next_rings += adpt->num_queue; + adpt->rx_rings = next_rings; + adpt->tg_rings = adpt->rx_rings + adpt->num_queue; + + /* allocate memory for q_vector pointers */ + size = sizeof(struct ne6x_q_vector *) * adpt->num_q_vectors; + adpt->q_vectors = kzalloc(size, GFP_KERNEL); + if (!adpt->q_vectors) { + kfree(adpt->tx_rings); + ret = -ENOMEM; + goto err_rings; + } + + /* Setup default MSIX irq handler for adapter */ + ne6x_adpt_setup_irqhandler(adpt, ne6x_msix_clean_rings); + ret = 0; + +err_rings: + mutex_unlock(&pf->switch_mutex); + return ret; +} + +static int ne6x_force_link_state(struct ne6x_adapter *adpt, bool is_up) +{ + int err; + + err = ne6x_aq_get_phy_capabilities(adpt, is_up, true); + if (err) + return err; + + if (is_up) + test_and_set_bit(NE6X_LINK_POOLING, adpt->back->state); + + return 0; +} + +int ne6x_adpt_restart_vp(struct ne6x_adapter *adpt, bool enable) +{ + struct ne6x_pf *pf = adpt->back; + struct ne6x_hw *hw = &pf->hw; + int i, pf_q; + + pf_q = adpt->base_queue; + for (i = 0; i < adpt->num_queue; i++, pf_q++) { + if (pf_q < NE6X_PF_VP0_NUM) + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_VP_RELOAD), enable); + else + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_VP_RELOAD), + enable); + + usleep_range(1000, 2000); + if (!enable) { + ne6x_tail_update(adpt->rx_rings[i], 0); + ne6x_tail_update(adpt->tx_rings[i], 0); + } + } + + return 0; +} + +int ne6x_adpt_configure(struct ne6x_adapter *adpt) +{ + int err; + int i; + + err = ne6x_adpt_restart_vp(adpt, true); + if (!err) + err = ne6x_adpt_configure_tx(adpt); + + if (!err) + err = ne6x_adpt_configure_cq(adpt); + + if (!err) + err = ne6x_adpt_configure_rx(adpt); + + if (!err) + err = ne6x_adpt_restart_vp(adpt, false); + + if (!err) { + for (i = 0; i < adpt->num_queue && !err; i++) + ne6x_alloc_rx_buffers(adpt->rx_rings[i], + NE6X_DESC_UNUSED(adpt->rx_rings[i])); + } + + return err; +} + +static void ne6x_napi_enable_all(struct ne6x_adapter *adpt) +{ + int q_idx; + + if (!adpt->netdev) + return; + + for (q_idx = 0; q_idx < adpt->num_q_vectors; q_idx++) { + struct ne6x_q_vector *q_vector = adpt->q_vectors[q_idx]; + + if (q_vector->tx.ring || q_vector->rx.ring || q_vector->cq.ring) + napi_enable(&q_vector->napi); + } +} + +static int ne6x_up_complete(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf = adpt->back; + + ne6x_adpt_configure_msix(adpt); + + clear_bit(NE6X_ADPT_DOWN, adpt->comm.state); + ne6x_napi_enable_all(adpt); + ne6x_adpt_enable_irq(adpt); + + if ((adpt->port_info->phy.link_info.link_info & NE6X_AQ_LINK_UP) && adpt->netdev) { + ne6x_print_link_message(adpt, true); + netif_tx_start_all_queues(adpt->netdev); + netif_carrier_on(adpt->netdev); + } + + /* On the next run of the service_task, notify any clients of the new + * opened netdev + */ + set_bit(NE6X_CLIENT_SERVICE_REQUESTED, pf->state); + ne6x_linkscan_schedule(pf); + + return 0; +} + +static void ne6x_napi_disable_all(struct ne6x_adapter *adpt) +{ + int q_idx; + + if (!adpt->netdev) + return; + + for (q_idx = 0; q_idx < adpt->num_q_vectors; q_idx++) { + struct ne6x_q_vector *q_vector = adpt->q_vectors[q_idx]; + + if (q_vector->tx.ring || q_vector->rx.ring || q_vector->cq.ring) + napi_disable(&q_vector->napi); + } +} + +static void ne6x_unmap_and_free_tx_resource(struct ne6x_ring *ring, struct ne6x_tx_buf *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } + + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); +} + +void ne6x_clean_tx_ring(struct ne6x_ring *tx_ring) +{ + unsigned long bi_size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_buf) + return; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) + ne6x_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_buf[i]); + + bi_size = sizeof(struct ne6x_tx_buf) * tx_ring->count; + memset(tx_ring->tx_buf, 0, bi_size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->cq_last_expect = 0; + + if (!tx_ring->netdev) + return; + + /* cleanup Tx queue statistics */ + netdev_tx_reset_queue(txring_txq(tx_ring)); +} + +void ne6x_clean_rx_ring(struct ne6x_ring *rx_ring) +{ + unsigned long bi_size; + u16 i; + + /* ring already cleared, nothing to do */ + if (!rx_ring->rx_buf) + return; + + if (rx_ring->skb) { + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + } + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + struct ne6x_rx_buf *rx_bi = &rx_ring->rx_buf[i]; + + if (!rx_bi->page) + continue; + + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, rx_bi->dma, rx_bi->page_offset, + rx_ring->rx_buf_len, DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, ne6x_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, NE6X_RX_DMA_ATTR); + + __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias); + + rx_bi->page = NULL; + rx_bi->page_offset = 0; + } + + bi_size = sizeof(struct ne6x_rx_buf) * rx_ring->count; + memset(rx_ring->rx_buf, 0, bi_size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + rx_ring->cq_last_expect = 0; +} + +static void ne6x_clean_cq_ring(struct ne6x_ring *cq_ring) +{ + /* Zero out the descriptor ring */ + memset(cq_ring->desc, 0, cq_ring->size); + + cq_ring->next_to_clean = 0; + cq_ring->next_to_use = 0; +} + +void ne6x_down(struct ne6x_adapter *adpt) +{ + int i; + + /* It is assumed that the caller of this function + * sets the adpt->comm.state NE6X_ADPT_DOWN bit. + */ + if (adpt->netdev) { + netif_carrier_off(adpt->netdev); + netif_tx_disable(adpt->netdev); + } + + ne6x_adpt_disable_irq(adpt); + ne6x_adpt_restart_vp(adpt, true); + ne6x_force_link_state(adpt, false); + ne6x_napi_disable_all(adpt); + + for (i = 0; i < adpt->num_queue; i++) { + ne6x_clean_tx_ring(adpt->tx_rings[i]); + ne6x_clean_cq_ring(adpt->cq_rings[i]); + ne6x_clean_rx_ring(adpt->rx_rings[i]); + } +} + +void ne6x_free_rx_resources(struct ne6x_ring *rx_ring) +{ + ne6x_clean_rx_ring(rx_ring); + kfree(rx_ring->rx_buf); + rx_ring->rx_buf = NULL; + + if (rx_ring->desc) { + dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, rx_ring->dma); + rx_ring->desc = NULL; + } +} + +static void ne6x_adpt_free_rx_resources(struct ne6x_adapter *adpt) +{ + int i; + + if (!adpt->rx_rings) + return; + + for (i = 0; i < adpt->num_queue; i++) { + if (adpt->rx_rings[i] && adpt->rx_rings[i]->desc) + ne6x_free_rx_resources(adpt->rx_rings[i]); + } +} + +void ne6x_free_tx_resources(struct ne6x_ring *tx_ring) +{ + ne6x_clean_tx_ring(tx_ring); + kfree(tx_ring->tx_buf); + tx_ring->tx_buf = NULL; + + if (tx_ring->desc) { + dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, tx_ring->dma); + tx_ring->desc = NULL; + } +} + +void ne6x_free_cq_resources(struct ne6x_ring *cq_ring) +{ + ne6x_clean_cq_ring(cq_ring); + if (cq_ring->desc) { + dma_free_coherent(cq_ring->dev, cq_ring->size, cq_ring->desc, cq_ring->dma); + cq_ring->desc = NULL; + } +} + +static void ne6x_adpt_free_tx_resources(struct ne6x_adapter *adpt) +{ + int i; + + if (adpt->tx_rings) { + for (i = 0; i < adpt->num_queue; i++) { + if (adpt->tx_rings[i] && adpt->tx_rings[i]->desc) + ne6x_free_tx_resources(adpt->tx_rings[i]); + kfree(adpt->tx_rings[i]->sgl); + } + } + + if (adpt->cq_rings) { + for (i = 0; i < adpt->num_queue; i++) { + if (adpt->cq_rings[i] && adpt->cq_rings[i]->desc) + ne6x_free_cq_resources(adpt->cq_rings[i]); + } + } + + if (adpt->tg_rings) { + for (i = 0; i < adpt->num_queue; i++) { + if (adpt->tg_rings[i] && adpt->tg_rings[i]->desc) + /* tg_ring == cq_ring */ + ne6x_free_cq_resources(adpt->tg_rings[i]); + } + } +} + +int ne6x_up(struct ne6x_adapter *adpt) +{ + int err; + + ne6x_force_link_state(adpt, true); + + err = ne6x_adpt_configure(adpt); + if (!err) + err = ne6x_up_complete(adpt); + + return err; +} + +int ne6x_adpt_open(struct ne6x_adapter *adpt) +{ + char int_name[NE6X_INT_NAME_STR_LEN]; + struct ne6x_pf *pf = adpt->back; + int err; + + /* allocate descriptors */ + err = ne6x_adpt_setup_tx_resources(adpt); + if (err) + goto err_setup_tx; + + err = ne6x_adpt_setup_rx_resources(adpt); + if (err) + goto err_setup_rx; + + err = ne6x_adpt_configure(adpt); + if (err) + goto err_setup_rx; + + if (adpt->netdev) { + snprintf(int_name, sizeof(int_name) - 1, "%s-%s", dev_driver_string(&pf->pdev->dev), + adpt->netdev->name); + err = ne6x_adpt_request_irq(adpt, int_name); + if (err) + goto err_setup_rx; + + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(adpt->netdev, adpt->num_queue); + if (err) + goto err_set_queues; + + /* When reducing the number of Tx queues, any pre-existing + * skbuffs might target a now removed queue. Older versions of + * the Linux kernel do not check for this, and it can result + * in a kernel panic. Avoid this by flushing all skbs now, so + * that we avoid attempting to transmit one that has an + * invalid queue mapping. + */ + qdisc_reset_all_tx_gt(adpt->netdev, 0); + + err = netif_set_real_num_rx_queues(adpt->netdev, adpt->num_queue); + if (err) + goto err_set_queues; + } else { + err = -EINVAL; + goto err_setup_rx; + } + + err = ne6x_up_complete(adpt); + if (err) + goto err_up_complete; + + ne6x_dev_set_tx_rx_state(adpt, true, true); + return 0; + +err_up_complete: + ne6x_down(adpt); +err_set_queues: + ne6x_adpt_free_irq(adpt); +err_setup_rx: + ne6x_adpt_free_rx_resources(adpt); +err_setup_tx: + ne6x_adpt_free_tx_resources(adpt); + + return err; +} + +int ne6x_open(struct net_device *netdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + int err; + + netdev_info(netdev, "open !!!\n"); + set_bit(NE6X_ADPT_OPEN, adpt->comm.state); + + netif_carrier_off(netdev); + + if (ne6x_force_link_state(adpt, true)) + return -EAGAIN; + + err = ne6x_adpt_open(adpt); + if (err) + return err; + + ne6x_sync_features(netdev); + + ne6x_dev_set_if_state(adpt, NE6000_IF_INTERFACE_UP); + + return 0; +} + +void ne6x_adpt_close(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf = adpt->back; + + ne6x_dev_set_tx_rx_state(adpt, false, false); + if (!test_and_set_bit(NE6X_ADPT_DOWN, adpt->comm.state)) + ne6x_down(adpt); + + ne6x_adpt_free_irq(adpt); + ne6x_adpt_free_tx_resources(adpt); + ne6x_adpt_free_rx_resources(adpt); + set_bit(NE6X_CLIENT_SERVICE_REQUESTED, pf->state); +} + +int ne6x_close(struct net_device *netdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + clear_bit(NE6X_ADPT_OPEN, adpt->comm.state); + adpt->current_isup = false; + adpt->current_speed = NE6X_LINK_SPEED_UNKNOWN; + ne6x_adpt_close(adpt); + if (test_bit(NE6X_ADPT_F_LINKDOWN_ON_CLOSE, adpt->flags)) + ne6x_dev_set_if_state(adpt, NE6000_IF_INTERFACE_DOWN); + + netdev_info(netdev, "close !!!\n"); + + return 0; +} + +static void ne6x_adpt_reinit_locked(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf = adpt->back; + + WARN_ON(in_interrupt()); + while (test_and_set_bit(NE6X_CONFIG_BUSY, pf->state)) + usleep_range(1000, 2000); + + ne6x_down(adpt); + ne6x_up(adpt); + clear_bit(NE6X_CONFIG_BUSY, pf->state); +} + +static int ne6x_change_mtu(struct net_device *netdev, int new_mtu) +{ + int max_frame = new_mtu + NE6X_PACKET_HDR_PAD; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + if (new_mtu < NE6X_MIN_MTU_SIZE) { + netdev_err(netdev, "mtu < MIN MTU size"); + return -EINVAL; + } + + max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + if (max_frame > NE6X_MAX_RXBUFFER) { + netdev_err(netdev, "mtu > MAX MTU size"); + return -EINVAL; + } + + netdev_info(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) { + if (adpt->back->num_alloc_vfs == 0) + ne6x_adpt_reinit_locked(adpt); + } + + return 0; +} + +static void ne6x_tx_timeout(struct net_device *netdev, __always_unused unsigned int txqueue) +{ + struct ne6x_ring *tx_ring = NULL; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_pf *pf = adpt->back; + unsigned int hung_queue = 0; + u64 head, intr, tail; + + hung_queue = txqueue; + tx_ring = adpt->tx_rings[hung_queue]; + pf->tx_timeout_count++; + + if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) + pf->tx_timeout_recovery_level = 1; /* reset after some time */ + else if (time_before(jiffies, (pf->tx_timeout_last_recovery + netdev->watchdog_timeo))) + return; /* don't do any new action before the next timeout */ + + /* don't kick off another recovery if one is already pending */ + if (test_and_set_bit(NE6X_TIMEOUT_RECOVERY_PENDING, pf->state)) + return; + + if (tx_ring) { + if (tx_ring->reg_idx < NE6X_PF_VP0_NUM) { + head = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(tx_ring->reg_idx, NE6X_SQ_HD_POINTER)); + /* Read interrupt register */ + intr = rd64(&pf->hw, NE6X_VPINT_DYN_CTLN(tx_ring->reg_idx, NE6X_VP_INT)); + tail = rd64(&pf->hw, + NE6X_VPINT_DYN_CTLN(tx_ring->reg_idx, + NE6X_SQ_TAIL_POINTER)); + } else { + head = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(tx_ring->reg_idx - + NE6X_PF_VP0_NUM, + NE6X_SQ_HD_POINTER)); + intr = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(tx_ring->reg_idx - + NE6X_PF_VP0_NUM, + NE6X_VP_INT)); + tail = rd64_bar4(&pf->hw, + NE6X_PFINT_DYN_CTLN(tx_ring->reg_idx - + NE6X_PF_VP0_NUM, + NE6X_SQ_TAIL_POINTER)); + } + + netdev_info(netdev, "tx_timeout: adapter: %u, Q: %u, NTC: 0x%x, HEAD: 0x%llx, NTU: 0x%x, TAIL: 0x%llx, INTR: 0x%llx\n", + adpt->idx, hung_queue, tx_ring->next_to_clean, head, + tx_ring->next_to_use, tail, intr); + } + + pf->tx_timeout_last_recovery = jiffies; + netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n", + pf->tx_timeout_recovery_level, hung_queue); + + switch (pf->tx_timeout_recovery_level) { + case 1: + set_bit(NE6X_ADPT_RECOVER, adpt->comm.state); + set_bit(NE6X_PF_RESET_REQUESTED, pf->state); + set_bit(NE6X_RESET_INTR_RECEIVED, pf->state); + break; + case 2: + set_bit(NE6X_CORE_RESET_REQUESTED, pf->state); + break; + default: + netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n"); + set_bit(NE6X_DOWN_REQUESTED, pf->state); + set_bit(NE6X_ADPT_DOWN_REQUESTED, adpt->comm.state); + break; + } + + ne6x_service_event_schedule(pf); + pf->tx_timeout_recovery_level++; +} + +static void ne6x_get_netdev_stats_struct_tx(struct ne6x_ring *ring, struct rtnl_link_stats64 *stats) +{ + u64 bytes, packets; + unsigned int start; + + do { + start = u64_stats_fetch_begin(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + + stats->tx_packets += packets; + stats->tx_bytes += bytes; +} + +struct rtnl_link_stats64 *ne6x_get_adpt_stats_struct(struct ne6x_adapter *adpt) +{ + return &adpt->net_stats; +} + +static void ne6x_get_netdev_stats_struct(struct net_device *netdev, struct rtnl_link_stats64 *stats) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct rtnl_link_stats64 *adpt_stats = ne6x_get_adpt_stats_struct(adpt); + struct ne6x_ring *tx_ring, *rx_ring; + u64 bytes, packets; + unsigned int start; + int i; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) + return; + + if (!adpt->tx_rings) + return; + + rcu_read_lock(); + for (i = 0; i < adpt->num_queue; i++) { + tx_ring = READ_ONCE(adpt->tx_rings[i]); + if (!tx_ring) + continue; + + ne6x_get_netdev_stats_struct_tx(tx_ring, stats); + rx_ring = &tx_ring[2]; + + do { + start = u64_stats_fetch_begin(&rx_ring->syncp); + packets = rx_ring->stats.packets; + bytes = rx_ring->stats.bytes; + } while (u64_stats_fetch_retry(&rx_ring->syncp, start)); + + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + + adpt_stats->rx_dropped = 0; + rcu_read_unlock(); + + /* following stats updated by ne6x_watchdog_subtask() */ + stats->multicast = adpt_stats->multicast; + stats->tx_errors = adpt_stats->tx_errors; + stats->tx_dropped = adpt_stats->tx_dropped; + stats->rx_errors = adpt_stats->rx_errors; + stats->rx_dropped = adpt_stats->rx_dropped; + stats->rx_crc_errors = adpt_stats->rx_crc_errors; + stats->rx_length_errors = adpt_stats->rx_length_errors; +} + +void ne6x_update_pf_stats(struct ne6x_adapter *adpt) +{ + struct rtnl_link_stats64 *ons; + struct rtnl_link_stats64 *ns; /* netdev stats */ + struct ne6x_eth_stats *oes; + struct ne6x_eth_stats *es; /* device's eth stats */ + struct ne6x_ring *tx_ring; + struct ne6x_ring *rx_ring; + u32 tx_restart, tx_busy; + u32 rx_page, rx_buf; + u64 bytes, packets; + unsigned int start; + struct vf_stat vf_stat; + u64 tx_linearize; + u64 tx_force_wb; + u64 rx_p, rx_b; + u64 tx_p, tx_b; + u64 tx_e, rx_e; + u64 rx_l, rx_c; + u16 i; + + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) + return; + + ns = ne6x_get_adpt_stats_struct(adpt); + ons = &adpt->net_stats_offsets; + es = &adpt->eth_stats; + oes = &adpt->eth_stats_offsets; + + rx_p = 0; + rx_b = 0; + tx_p = 0; + tx_b = 0; + rx_e = 0; + tx_e = 0; + rx_c = 0; + rx_l = 0; + tx_force_wb = 0; + tx_linearize = 0; + tx_busy = 0; + tx_restart = 0; + rx_page = 0; + rx_buf = 0; + + rcu_read_lock(); + for (i = 0; i < adpt->num_queue; i++) { + /* locate Tx ring */ + tx_ring = READ_ONCE(adpt->tx_rings[i]); + + do { + start = u64_stats_fetch_begin(&tx_ring->syncp); + packets = tx_ring->stats.packets; + bytes = tx_ring->stats.bytes; + } while (u64_stats_fetch_retry(&tx_ring->syncp, start)); + + tx_b += bytes; + tx_p += packets; + tx_restart += tx_ring->tx_stats.restart_q; + tx_busy += tx_ring->tx_stats.tx_busy; + tx_linearize += tx_ring->tx_stats.tx_linearize; + tx_e += tx_ring->tx_stats.csum_err + tx_ring->tx_stats.tx_drop_addr + + tx_ring->tx_stats.tx_pcie_read_err; + + rx_ring = &tx_ring[2]; + + do { + start = u64_stats_fetch_begin(&rx_ring->syncp); + packets = rx_ring->stats.packets; + bytes = rx_ring->stats.bytes; + } while (u64_stats_fetch_retry(&rx_ring->syncp, start)); + + rx_b += bytes; + rx_p += packets; + rx_buf += rx_ring->rx_stats.alloc_buf_failed; + rx_page += rx_ring->rx_stats.alloc_page_failed; + rx_e += rx_ring->rx_stats.csum_err + rx_ring->rx_stats.rx_err + + rx_ring->rx_stats.rx_mem_error; + rx_l += rx_ring->rx_stats.rx_mem_error; + } + + rcu_read_unlock(); + + adpt->tx_restart = tx_restart; + adpt->tx_busy = tx_busy; + adpt->rx_page_failed = rx_page; + adpt->rx_buf_failed = rx_buf; + + ns->rx_packets = rx_p; + ns->rx_bytes = rx_b; + ns->tx_packets = tx_p; + ns->tx_bytes = tx_b; + ns->tx_errors = tx_e; + ns->rx_errors = rx_e; + ns->rx_length_errors = rx_l; + ns->rx_crc_errors = rx_c; + + ns->rx_dropped = 0; + ne6x_dev_get_vf_stat(adpt, &vf_stat); + es->rx_broadcast = vf_stat.rx_broadcast_pkts; + es->rx_miss = vf_stat.rx_drop_pkts; + es->rx_multicast = vf_stat.rx_multicast_pkts; + es->rx_unicast = vf_stat.rx_unicast_pkts; + es->tx_broadcast = vf_stat.tx_broadcast_pkts; + es->tx_multicast = vf_stat.tx_multicast_pkts; + es->tx_unicast = vf_stat.tx_unicast_pkts; + es->rx_malform = vf_stat.rx_malform_pkts; + es->tx_malform = vf_stat.tx_malform_pkts; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void ne6x_netpoll(struct net_device *netdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + int i; + + /* if interface is down do nothing */ + if (test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) + return; + + for (i = 0; i < adpt->num_q_vectors; i++) + ne6x_msix_clean_rings(0, adpt->q_vectors[i]); +} +#endif + +static int ne6x_set_mac(struct net_device *netdev, void *p) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_mac_info *mac = &adpt->port_info->mac; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) { + netdev_info(netdev, "already using mac address %pM\n", addr->sa_data); + return 0; + } + + if (ether_addr_equal(mac->perm_addr, addr->sa_data)) + netdev_info(netdev, "returning to hw mac address %pM\n", mac->perm_addr); + else + netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); + + ne6x_adpt_del_mac(adpt, mac->perm_addr, true); + eth_hw_addr_set(netdev, addr->sa_data); + memcpy(mac->perm_addr, addr->sa_data, netdev->addr_len); + ne6x_adpt_add_mac(adpt, mac->perm_addr, true); + ne6x_dev_set_port_mac(adpt, mac->perm_addr); + + return 0; +} + +static int ne6x_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_vlan vlan; + int ret; + + netdev_info(netdev, "vlan_rx_add_vid proto = 0x%04X vid = %d\n", proto, vid); + + if (!vid) + return 0; + + /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged + * packets aren't pruned by the device's internal switch on Rx + */ + vlan = NE6X_VLAN(be16_to_cpu(proto), vid, 0); + + if (vlan.vid > 0 && vlan.vid < (VLAN_N_VID - 1)) { + ret = ne6x_adpt_add_vlan(adpt, vlan); + if (!ret) + set_bit(NE6X_ADPT_VLAN_FLTR_CHANGED, adpt->comm.state); + } else { + return -EINVAL; + } + + return ret; +} + +static int ne6x_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_vlan vlan; + int ret; + + netdev_info(netdev, "vlan_rx_add_vid proto = 0x%04X vid = %d\n", proto, vid); + + if (!vid) + return 0; + + /* Make sure VLAN delete is successful before updating VLAN + * information + */ + vlan = NE6X_VLAN(be16_to_cpu(proto), vid, 0); + ret = ne6x_adpt_del_vlan(adpt, vlan); + if (ret) + return ret; + + set_bit(NE6X_ADPT_VLAN_FLTR_CHANGED, adpt->comm.state); + + return 0; +} + +static struct mac_addr_node *ne6x_find_addr(struct ne6x_adapter *adpt, + const u8 *macaddr, bool is_unicast) +{ + struct mac_addr_head *addr_head = NULL; + struct mac_addr_node *addr_node = NULL; + + if (!macaddr) + return NULL; + + if (is_unicast) + addr_head = &adpt->uc_mac_addr; + else + addr_head = &adpt->mc_mac_addr; + + list_for_each_entry(addr_node, &addr_head->list, list) { + if (ether_addr_equal(macaddr, addr_node->addr)) + return addr_node; + } + + return NULL; +} + +int ne6x_adpt_add_mac(struct ne6x_adapter *adpt, const u8 *addr, bool is_unicast) +{ + int (*ne6x_vc_cfg_mac)(struct ne6x_adapter *adpt, u8 *mac); + struct mac_addr_head *addr_head = NULL; + struct mac_addr_node *addr_node = NULL; + int rc = 0; + + if (!addr) + return -EINVAL; + + if (is_unicast) { + addr_head = &adpt->uc_mac_addr; + ne6x_vc_cfg_mac = ne6x_dev_add_unicast; + } else { + addr_head = &adpt->mc_mac_addr; + ne6x_vc_cfg_mac = ne6x_dev_add_multicast; + } + + mutex_lock(&addr_head->mutex); + + if (ne6x_find_addr(adpt, addr, is_unicast)) + goto out_unlock; + + /* Update MAC list value */ + addr_node = kzalloc(sizeof(*addr_node), GFP_KERNEL); + if (!addr_node) { + rc = -ENOMEM; + goto out_unlock; + } + + ether_addr_copy(addr_node->addr, addr); + list_add_tail(&addr_node->list, &addr_head->list); + /* Send the value of the updated MAC linked list to the SDK */ + ne6x_vc_cfg_mac(adpt, addr_node->addr); + +out_unlock: + mutex_unlock(&addr_head->mutex); + + return rc; +} + +int ne6x_adpt_del_mac(struct ne6x_adapter *adpt, const u8 *addr, bool is_unicast) +{ + int (*ne6x_vc_cfg_mac)(struct ne6x_adapter *adpt, u8 *mac); + struct mac_addr_head *addr_head = NULL; + struct mac_addr_node *addr_node = NULL; + + if (is_unicast) { + addr_head = &adpt->uc_mac_addr; + ne6x_vc_cfg_mac = ne6x_dev_del_unicast; + } else { + addr_head = &adpt->mc_mac_addr; + ne6x_vc_cfg_mac = ne6x_dev_del_multicast; + } + + mutex_lock(&addr_head->mutex); + addr_node = ne6x_find_addr(adpt, addr, is_unicast); + if (!addr_node) + goto out_unlock; + + list_del(&addr_node->list); + ne6x_vc_cfg_mac(adpt, addr_node->addr); + kfree(addr_node); + +out_unlock: + mutex_unlock(&addr_head->mutex); + + return 0; +} + +static int ne6x_mc_addr_sync(struct net_device *netdev, const u8 *addr) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + return ne6x_adpt_add_mac(adpt, addr, false); +} + +static int ne6x_mc_addr_unsync(struct net_device *netdev, const u8 *addr) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + return ne6x_adpt_del_mac(adpt, addr, false); +} + +static int ne6x_uc_addr_sync(struct net_device *netdev, const u8 *addr) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + return ne6x_adpt_add_mac(adpt, addr, true); +} + +static int ne6x_uc_addr_unsync(struct net_device *netdev, const u8 *addr) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + return ne6x_adpt_del_mac(adpt, addr, true); +} + +void ne6x_adpt_clear_ddos(struct ne6x_pf *pf) +{ + u32 data; + + ne6x_reg_get_user_data(pf, NP_USER_DATA_HW_FLAGS, &data); + data &= ~NE6X_F_DDOS_ENABLED; + ne6x_reg_set_user_data(pf, NP_USER_DATA_HW_FLAGS, data); +} + +int ne6x_adpt_clear_mac_vlan(struct ne6x_adapter *adpt) +{ + struct mac_addr_node *temp_node = NULL, *addr_node = NULL; + struct ne6x_vlan_filter *f = NULL, *temp_filter = NULL; + struct mac_addr_head *addr_head = NULL; + struct list_head temp_header; + int ret = 0; + + INIT_LIST_HEAD(&temp_header); + spin_lock_bh(&adpt->mac_vlan_list_lock); + list_for_each_entry(f, &adpt->vlan_filter_list, list) { + if (f->vlan.vid) { + temp_filter = kzalloc(sizeof(*temp_filter), GFP_ATOMIC); + memcpy(temp_filter, f, sizeof(struct ne6x_vlan_filter)); + list_add_tail(&temp_filter->list, &temp_header); + } + } + spin_unlock_bh(&adpt->mac_vlan_list_lock); + + list_for_each_entry_safe(f, temp_filter, &temp_header, list) { + if (f->vlan.vid) + ret |= ne6x_adpt_del_vlan(adpt, f->vlan); + + list_del(&f->list); + kfree(f); + } + + addr_head = &adpt->uc_mac_addr; + mutex_lock(&addr_head->mutex); + list_for_each_entry_safe(addr_node, temp_node, &addr_head->list, list) { + ret |= ne6x_dev_del_unicast(adpt, addr_node->addr); + list_del(&addr_node->list); + kfree(addr_node); + } + mutex_unlock(&addr_head->mutex); + + addr_head = &adpt->mc_mac_addr; + mutex_lock(&addr_head->mutex); + list_for_each_entry_safe(addr_node, temp_node, &addr_head->list, list) { + ret |= ne6x_dev_del_multicast(adpt, addr_node->addr); + list_del(&addr_node->list); + kfree(addr_node); + } + mutex_unlock(&addr_head->mutex); + + return ret; +} + +static void ne6x_set_rx_mode_task(struct work_struct *work) +{ + struct ne6x_adapter *adpt = container_of(work, struct ne6x_adapter, set_rx_mode_task); + struct net_device *netdev = adpt->netdev; + + /* Check for Promiscuous modes */ + if (netdev->flags & IFF_PROMISC) { + ne6x_dev_set_uc_promiscuous_enable(adpt, true); + ne6x_dev_set_mc_promiscuous_enable(adpt, true); + } else { + ne6x_dev_set_uc_promiscuous_enable(adpt, false); + ne6x_dev_set_mc_promiscuous_enable(adpt, false); + /* Check for All Multicast modes */ + if (netdev->flags & IFF_ALLMULTI) + ne6x_dev_set_mc_promiscuous_enable(adpt, true); + else + __dev_mc_sync(netdev, ne6x_mc_addr_sync, ne6x_mc_addr_unsync); + } + + __dev_uc_sync(netdev, ne6x_uc_addr_sync, ne6x_uc_addr_unsync); +} + +static void ne6x_set_rx_mode(struct net_device *netdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + if (!adpt) + return; + + queue_work(ne6x_wq, &adpt->set_rx_mode_task); +} + +static int ne6x_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + if (!adpt) + return -1; + + return 0; +} + +#define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ + NETIF_F_HW_VLAN_CTAG_TX | \ + NETIF_F_HW_VLAN_STAG_RX | \ + NETIF_F_HW_VLAN_STAG_TX) + +#define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \ + NETIF_F_HW_VLAN_STAG_FILTER) + +#define NETIF_UDP_TNL_FEATURES (NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + +static netdev_features_t ne6x_fix_features(struct net_device *netdev, netdev_features_t features) +{ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features &= ~NETIF_F_HW_VLAN_STAG_RX; + + if (features & NETIF_F_HW_VLAN_STAG_RX) + features &= ~NETIF_F_HW_VLAN_CTAG_RX; + + if (features & NETIF_F_HW_VLAN_CTAG_TX) + features &= ~NETIF_F_HW_VLAN_STAG_TX; + + if (features & NETIF_F_HW_VLAN_STAG_TX) + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + if (features & NETIF_VLAN_FILTERING_FEATURES) + features |= NETIF_VLAN_FILTERING_FEATURES; + + return features; +} + +static int ne6x_set_features(struct net_device *netdev, netdev_features_t features) +{ + netdev_features_t changed = features ^ netdev->features; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + u32 value; + + value = ne6x_dev_get_features(adpt); + + if (changed & (NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM)) { + if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) + value |= NE6X_F_TX_UDP_TNL_SEG; + else + value &= ~NE6X_F_TX_UDP_TNL_SEG; + } + + if (changed & NETIF_VLAN_OFFLOAD_FEATURES || changed & NETIF_VLAN_FILTERING_FEATURES) { + /* keep cases separate because one ethertype for offloads can be + * disabled at the same time as another is disabled, so check for an + * enabled ethertype first, then check for disabled. Default to + * ETH_P_8021Q so an ethertype is specified if disabling insertion and + * stripping. + */ + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + value |= NE6X_F_RX_VLAN_STRIP; + else + value &= ~NE6X_F_RX_VLAN_STRIP; + + if (features & NETIF_F_HW_VLAN_CTAG_TX) + value |= NE6X_F_TX_VLAN; + else + value &= ~NE6X_F_TX_VLAN; + + if (features & NETIF_F_HW_VLAN_STAG_RX) + value |= NE6X_F_RX_QINQ_STRIP; + else + value &= ~NE6X_F_RX_QINQ_STRIP; + + if (features & NETIF_F_HW_VLAN_STAG_TX) + value |= NE6X_F_TX_QINQ; + else + value &= ~NE6X_F_TX_QINQ; + + if (features & (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) + value |= NE6X_F_RX_VLAN_FILTER; + else + value &= ~NE6X_F_RX_VLAN_FILTER; + } + + if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO)) { + if (features & NETIF_F_RXCSUM) + value |= NE6X_OFFLOAD_RXCSUM; + else + value &= ~NE6X_OFFLOAD_RXCSUM; + + /* update hardware LRO capability accordingly */ + if (features & NETIF_F_LRO) + value |= NE6X_OFFLOAD_LRO; + else + value &= ~NE6X_OFFLOAD_LRO; + } + + if (changed & (NETIF_F_TSO6 | NETIF_F_TSO)) { + if (features & (NETIF_F_TSO | NETIF_F_TSO6)) + value |= NE6X_OFFLOAD_TSO; + else + value &= ~NE6X_OFFLOAD_TSO; + } + + if (changed & (NETIF_F_TSO6 | NETIF_F_TSO)) { + if (features & (NETIF_F_TSO | NETIF_F_TSO6)) + value |= NE6X_OFFLOAD_TSO; + else + value &= ~NE6X_OFFLOAD_TSO; + } + + if (changed & NETIF_F_GSO_UDP) { + if (features & NETIF_F_GSO_UDP) + value |= NE6X_OFFLOAD_UFO; + else + value &= ~NE6X_OFFLOAD_UFO; + } + + if (changed & NETIF_F_IP_CSUM) { + if (features & NETIF_F_IP_CSUM) + value |= NE6X_OFFLOAD_TXCSUM; + else + value &= ~NE6X_OFFLOAD_TXCSUM; + } + + if (changed & NETIF_F_RXHASH) { + if (features & NETIF_F_RXHASH) + value |= NE6X_OFFLOAD_RSS; + else + value &= ~NE6X_OFFLOAD_RSS; + } + + if (changed & NETIF_F_HW_L2FW_DOFFLOAD) { + if (features & NETIF_F_HW_L2FW_DOFFLOAD) + value |= NE6X_OFFLOAD_L2; + else + value &= ~NE6X_OFFLOAD_L2; + } + + if (changed & NETIF_F_SCTP_CRC) { + if (features & NETIF_F_SCTP_CRC) + value |= NE6X_OFFLOAD_SCTP_CSUM; + else + value &= ~NE6X_OFFLOAD_SCTP_CSUM; + } + + if (changed & NETIF_F_NTUPLE) { + if (features & NETIF_F_NTUPLE) + value |= NE6X_F_FLOW_STEERING; + else + value &= ~NE6X_F_FLOW_STEERING; + } + return ne6x_dev_set_features(adpt, value); +} + +static netdev_features_t ne6x_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + size_t len; + + /* No point in doing any of this if neither checksum nor GSO are + * being requested for this frame. We can rule out both by just + * checking for CHECKSUM_PARTIAL + */ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return features; + + /* We cannot support GSO if the MSS is going to be less than + * 64 bytes. If it is then we need to drop support for GSO. + */ + if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) + features &= ~NETIF_F_GSO_MASK; + + /* MACLEN can support at most 63 words */ + len = skb_network_header(skb) - skb->data; + if (len & ~(63 * 2)) + goto out_err; + + /* IPLEN and EIPLEN can support at most 127 dwords */ + len = skb_transport_header(skb) - skb_network_header(skb); + if (len & ~(127 * 4)) + goto out_err; + + /* No need to validate L4LEN as TCP is the only protocol with a + * a flexible value and we support all possible values supported + * by TCP, which is at most 15 dwords + */ + return features; + +out_err: + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); +} + +int ne6x_link_speed_to_rate(int link_speed) +{ + switch (link_speed) { + case NE6X_LINK_SPEED_100GB: + return SPEED_100000; + case NE6X_LINK_SPEED_40GB: + return SPEED_40000; + case NE6X_LINK_SPEED_25GB: + return SPEED_25000; + case NE6X_LINK_SPEED_10GB: + return SPEED_10000; + default: + return SPEED_25000; + } +} + +int ne6x_validata_tx_rate(struct ne6x_adapter *adpt, int vf_id, int min_tx_rate, int max_tx_rate) +{ + if (!adpt) + return -EINVAL; + + if (min_tx_rate) { + dev_err(&adpt->back->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", + min_tx_rate, vf_id); + return -EINVAL; + } + + if (max_tx_rate > ne6x_link_speed_to_rate(adpt->port_info->phy.link_info.link_speed)) { + dev_err(&adpt->back->pdev->dev, "Invalid max tx rate (%d) (greater than link_speed) specified for VF %d.\n", + max_tx_rate, vf_id); + return -EINVAL; + } + + return 0; +} + +static struct ne6x_key_filter *ne6x_find_key(struct ne6x_pf *pf, struct ne6x_key key) +{ + struct ne6x_key_filter *f; + + list_for_each_entry(f, &pf->key_filter_list, list) { + if (f->key.pi == key.pi && ether_addr_equal(f->key.mac_addr, key.mac_addr)) + return f; + } + + return NULL; +} + +struct ne6x_key_filter *ne6x_add_key_list(struct ne6x_pf *pf, struct ne6x_key key) +{ + struct ne6x_key_filter *f = NULL; + + spin_lock_bh(&pf->key_list_lock); + + f = ne6x_find_key(pf, key); + if (!f) { + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + goto clearout; + + f->key = key; + + list_add_tail(&f->list, &pf->key_filter_list); + f->add = true; + } else { + f->refcnt++; + } + +clearout: + spin_unlock_bh(&pf->key_list_lock); + + return f; +} + +int ne6x_del_key_list(struct ne6x_pf *pf, struct ne6x_key key) +{ + struct ne6x_key_filter *f; + + spin_lock_bh(&pf->key_list_lock); + + f = ne6x_find_key(pf, key); + if (f) { + if (f->refcnt) { + f->refcnt--; + spin_unlock_bh(&pf->key_list_lock); + return -1; + } + + list_del(&f->list); + kfree(f); + } + + spin_unlock_bh(&pf->key_list_lock); + + return 0; +} + +int ne6x_add_key(struct ne6x_adapter *adpt, u8 *mac_addr, u8 size) +{ + struct ne6x_key_filter *f; + struct ne6x_key key; + + memset(&key, 0, sizeof(struct ne6x_key)); + key.pi = ADPT_LPORT(adpt); + memcpy(key.mac_addr, mac_addr, size); + + f = ne6x_add_key_list(adpt->back, key); + if (f->refcnt) + return -1; + + return 0; +} + +int ne6x_del_key(struct ne6x_adapter *adpt, u8 *mac_addr, u8 size) +{ + struct ne6x_key key; + int ret; + + memset(&key, 0, sizeof(struct ne6x_key)); + key.pi = ADPT_LPORT(adpt); + memcpy(key.mac_addr, mac_addr, size); + + ret = ne6x_del_key_list(adpt->back, key); + if (ret) + return -1; + + return 0; +} + +static struct ne6x_vlan_filter *ne6x_find_vlan(struct ne6x_adapter *adpt, struct ne6x_vlan vlan) +{ + struct ne6x_vlan_filter *f; + + list_for_each_entry(f, &adpt->vlan_filter_list, list) { + if (f->vlan.vid == vlan.vid && f->vlan.tpid == vlan.tpid) + return f; + } + + return NULL; +} + +struct ne6x_vlan_filter *ne6x_add_vlan_list(struct ne6x_adapter *adpt, struct ne6x_vlan vlan) +{ + struct ne6x_vlan_filter *f = NULL; + + spin_lock_bh(&adpt->mac_vlan_list_lock); + + f = ne6x_find_vlan(adpt, vlan); + if (!f) { + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + goto clearout; + + f->vlan = vlan; + + list_add_tail(&f->list, &adpt->vlan_filter_list); + f->add = true; + } else { + f->refcnt++; + } + +clearout: + spin_unlock_bh(&adpt->mac_vlan_list_lock); + + return f; +} + +int ne6x_del_vlan_list(struct ne6x_adapter *adpt, struct ne6x_vlan vlan) +{ + struct ne6x_vlan_filter *f; + + spin_lock_bh(&adpt->mac_vlan_list_lock); + + f = ne6x_find_vlan(adpt, vlan); + if (f) { + if (f->refcnt) { + f->refcnt--; + spin_unlock_bh(&adpt->mac_vlan_list_lock); + return -1; + } + + list_del(&f->list); + kfree(f); + } + + spin_unlock_bh(&adpt->mac_vlan_list_lock); + + return 0; +} + +int ne6x_adpt_add_vlan(struct ne6x_adapter *adpt, struct ne6x_vlan vlan) +{ + struct ne6x_vlan_filter *f = ne6x_add_vlan_list(adpt, vlan); + + if (f->refcnt == 0) + ne6x_dev_vlan_add(adpt, &vlan); + + return 0; +} + +int ne6x_adpt_del_vlan(struct ne6x_adapter *adpt, struct ne6x_vlan vlan) +{ + int ret; + + ret = ne6x_del_vlan_list(adpt, vlan); + if (ret == 0) + ne6x_dev_vlan_del(adpt, &vlan); + + return 0; +} + +int ne6x_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, + u8 qos, __be16 vlan_proto) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + struct ne6x_pf *pf = ne6x_netdev_to_pf(netdev); + u16 local_vlan_proto = ntohs(vlan_proto); + u16 vid_temp = 0, tpid_temp = 0; + struct ne6x_vlan vlan; + struct ne6x_adapter *adpt; + struct device *dev; + struct ne6x_vf *vf; + int lport; + + dev = ne6x_pf_to_dev(pf); + + if (vf_id < 0 || vf_id >= pf->num_alloc_vfs / 2 || vlan_id >= (VLAN_N_VID - 1) || qos > 7) { + dev_err(dev, "Invalid Port VLAN parameters for VF %d,vlan ID %d, QoS %d\n", + vf_id, vlan_id, qos); + return -EINVAL; + } + + if (!ne6x_is_supported_port_vlan_proto(&pf->hw, local_vlan_proto)) { + dev_err(dev, "VF VLAN protocol 0x%04x is not supported\n", + local_vlan_proto); + return -EPROTONOSUPPORT; + } + + lport = ADPT_LPORT(np->adpt); + vf_id += (pf->num_alloc_vfs / 2) * lport; + + vf = ne6x_get_vf_by_id(pf, vf_id); + if (!vf) + return -EINVAL; + + vf->port_vlan_info = NE6X_VLAN(local_vlan_proto, vlan_id, qos); + if (vf->port_vlan_info.prio || vf->port_vlan_info.vid) + dev_info(dev, "Setting VLAN %u, QoS %u, TPID 0x%04x on VF %d\n", + vlan_id, qos, local_vlan_proto, vf_id); + else + dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id); + + adpt = vf->adpt; + + dev_info(dev, "%s: net_name:%s TPID:%08x vlan_id:%d qos:%d lport:%d vport:%d vlan_id:%d tpid:%04x %d\n", + __func__, netdev->name, local_vlan_proto, vlan_id, qos, ADPT_LPORT(adpt), + ADPT_VPORT(adpt), vf->port_vlan_info.vid, vf->port_vlan_info.tpid, vf->vfp_vid); + + vlan = NE6X_VLAN(local_vlan_proto, vlan_id, qos); + + if (vlan.vid == 0) { + if (vf->vfp_tpid == vlan.tpid) { + vlan.vid = vf->vfp_vid; + vlan.tpid = vf->vfp_tpid; + vf->vfp_vid = 0; + vf->vfp_tpid = 0; + ne6x_dev_del_vf_qinq(vf, vlan.tpid, vlan.vid); + ne6x_adpt_del_vlan(vf->adpt, vlan); + } else { + vlan.vid = vf->vfp_vid; + vlan.tpid = vf->vfp_tpid; + vf->vfp_vid = 0; + vf->vfp_tpid = 0; + ne6x_dev_del_vf_qinq(vf, vlan.tpid, vlan.vid); + ne6x_adpt_del_vlan(vf->adpt, vlan); + } + + } else if (vlan.vid > 0 && vlan.vid < (VLAN_N_VID - 1)) { + vid_temp = vlan.vid; + tpid_temp = vlan.tpid; + vlan.vid = vf->vfp_vid; + vlan.tpid = vf->vfp_tpid; + + if (vf->vfp_vid == vid_temp) { + ne6x_dev_del_vf_qinq(vf, vlan.tpid, vlan.vid); + ne6x_adpt_del_vlan(vf->adpt, vlan); + } + + vlan.vid = vid_temp; + vlan.tpid = tpid_temp; + vid_temp = (qos << VLAN_PRIO_SHIFT) | (vlan.vid & VLAN_VID_MASK); + vf->vfp_vid = vf->port_vlan_info.vid; + vf->vfp_tpid = vf->port_vlan_info.tpid; + ne6x_dev_add_vf_qinq(vf, tpid_temp, vid_temp); + ne6x_adpt_add_vlan(vf->adpt, vlan); + } else { + return -EINVAL; + } + + return 0; +} + +static void *ne6x_fwd_add_macvlan(struct net_device *netdev, struct net_device *vdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + struct ne6x_macvlan *mv = NULL; + u8 mac[ETH_ALEN]; + + ether_addr_copy(mac, vdev->dev_addr); + mv = devm_kzalloc(ne6x_pf_to_dev(adpt->back), sizeof(*mv), GFP_KERNEL); + if (!mv) + return NULL; + + ne6x_adpt_add_mac(adpt, mac, true); + INIT_LIST_HEAD(&mv->list); + mv->vdev = vdev; + ether_addr_copy(mv->mac, mac); + list_add(&mv->list, &adpt->macvlan_list); + netdev_info(netdev, "MACVLAN offloads for %s are on\n", vdev->name); + + return mv; +} + +static void ne6x_fwd_del_macvlan(struct net_device *netdev, void *accel_priv) +{ + struct ne6x_macvlan *mv = (struct ne6x_macvlan *)accel_priv; + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + + if (!accel_priv) + return; + + ne6x_adpt_del_mac(adpt, mv->mac, true); + list_del(&mv->list); + devm_kfree(ne6x_pf_to_dev(adpt->back), mv); + + netdev_info(netdev, "MACVLAN offloads for %s are off\n", mv->vdev->name); +} + +static const struct net_device_ops ne6x_netdev_ops = { + .ndo_open = ne6x_open, + .ndo_stop = ne6x_close, + .ndo_start_xmit = ne6x_lan_xmit_frame, + .ndo_get_stats64 = ne6x_get_netdev_stats_struct, + .ndo_set_rx_mode = ne6x_set_rx_mode, + .ndo_set_mac_address = ne6x_set_mac, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = ne6x_change_mtu, + .ndo_tx_timeout = ne6x_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ne6x_netpoll, +#endif + .ndo_set_vf_rate = ne6x_ndo_set_vf_bw, + .ndo_set_tx_maxrate = ne6x_set_tx_maxrate, + .ndo_set_vf_mac = ne6x_set_vf_mac, + .ndo_get_vf_config = ne6x_get_vf_config, + .ndo_set_vf_trust = ne6x_set_vf_trust, + .ndo_set_vf_vlan = ne6x_set_vf_port_vlan, + .ndo_set_vf_link_state = ne6x_set_vf_link_state, + .ndo_vlan_rx_add_vid = ne6x_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ne6x_vlan_rx_kill_vid, + .ndo_set_features = ne6x_set_features, + .ndo_features_check = ne6x_features_check, +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = ne6x_rx_flow_steer, +#endif + .ndo_tx_timeout = ne6x_tx_timeout, + .ndo_dfwd_add_station = ne6x_fwd_add_macvlan, + .ndo_dfwd_del_station = ne6x_fwd_del_macvlan, + .ndo_fix_features = ne6x_fix_features, + .ndo_set_features = ne6x_set_features, +}; + +void ne6x_sync_features(struct net_device *netdev) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(netdev); + u32 value; + + value = ne6x_dev_get_features(adpt); + + if (netdev->features & NETIF_F_GSO_UDP_TUNNEL_CSUM) + value |= NE6X_F_TX_UDP_TNL_SEG; + else + value &= ~NE6X_F_TX_UDP_TNL_SEG; + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) + value |= NE6X_F_RX_VLAN_STRIP; + else + value &= ~NE6X_F_RX_VLAN_STRIP; + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_TX) + value |= NE6X_F_TX_VLAN; + else + value &= ~NE6X_F_TX_VLAN; + + if (netdev->features & NETIF_F_HW_VLAN_STAG_RX) + value |= NE6X_F_RX_QINQ_STRIP; + else + value &= ~NE6X_F_RX_QINQ_STRIP; + + if (netdev->features & NETIF_F_HW_VLAN_STAG_TX) + value |= NE6X_F_TX_QINQ; + else + value &= ~NE6X_F_TX_QINQ; + + if (netdev->features & (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) + value |= NE6X_F_RX_VLAN_FILTER; + else + value &= ~NE6X_F_RX_VLAN_FILTER; + + if (netdev->features & NETIF_F_RXCSUM) + value |= NE6X_OFFLOAD_RXCSUM; + else + value &= ~NE6X_OFFLOAD_RXCSUM; + + /* update hardware LRO capability accordingly */ + if (netdev->features & NETIF_F_LRO) + value |= NE6X_OFFLOAD_LRO; + else + value &= ~NE6X_OFFLOAD_LRO; + + if (netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) + value |= NE6X_OFFLOAD_TSO; + else + value &= ~NE6X_OFFLOAD_TSO; + + if (netdev->features & NETIF_F_GSO_UDP) + value |= NE6X_OFFLOAD_UFO; + else + value &= ~NE6X_OFFLOAD_UFO; + + if (netdev->features & NETIF_F_IP_CSUM) + value |= NE6X_OFFLOAD_TXCSUM; + else + value &= ~NE6X_OFFLOAD_TXCSUM; + + if (netdev->features & NETIF_F_RXHASH) + value |= NE6X_OFFLOAD_RSS; + else + value &= ~NE6X_OFFLOAD_RSS; + + if (netdev->features & NETIF_F_HW_L2FW_DOFFLOAD) + value |= NE6X_OFFLOAD_L2; + else + value &= ~NE6X_OFFLOAD_L2; + + if (netdev->features & NETIF_F_SCTP_CRC) + value |= NE6X_OFFLOAD_SCTP_CSUM; + else + value &= ~NE6X_OFFLOAD_SCTP_CSUM; + + if (netdev->features & NETIF_F_NTUPLE) + value |= NE6X_F_FLOW_STEERING; + else + value &= ~NE6X_F_FLOW_STEERING; + + ne6x_dev_set_features(adpt, value); +} + +static void ne6x_set_netdev_features(struct net_device *netdev) +{ + struct ne6x_pf *pf = ne6x_netdev_to_pf(netdev); + netdev_features_t vlano_features = 0u; + netdev_features_t csumo_features; + netdev_features_t dflt_features; + netdev_features_t tso_features; + + dflt_features = NETIF_F_SG | + NETIF_F_HIGHDMA | + NETIF_F_NTUPLE | + NETIF_F_RXHASH; + + csumo_features = NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_IPV6_CSUM; + + vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + + tso_features = NETIF_F_TSO | + NETIF_F_TSO_ECN | + NETIF_F_TSO6 | + NETIF_F_GSO_GRE | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_LRO | + NETIF_F_LOOPBACK | + NETIF_F_GSO_GRE_CSUM | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL | + NETIF_F_GSO_IPXIP4 | + NETIF_F_GSO_IPXIP6 | + NETIF_F_GSO_UDP_L4 | + NETIF_F_GSO_SCTP | + 0; + + netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM; + + /* set features that user can change */ + netdev->hw_features = dflt_features | csumo_features | vlano_features | tso_features; + + /* add support for HW_CSUM on packets with MPLS header */ + netdev->mpls_features = NETIF_F_HW_CSUM; + + netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD; + + /* enable features */ + netdev->features |= netdev->hw_features; + /* encap and VLAN devices inherit default, csumo and tso features */ + netdev->hw_enc_features |= dflt_features | csumo_features | tso_features; + netdev->vlan_features |= dflt_features | csumo_features | tso_features; + netdev->hw_features |= NETIF_F_HW_TC; + pf->hw.dvm_ena = 0x1; + + netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_TX | + NETIF_F_HW_VLAN_STAG_FILTER; +} + +static int ne6x_config_netdev(struct ne6x_adapter *adpt) +{ + struct ne6x_rss_info *rss_info = &adpt->rss_info; + struct ne6x_pf *pf = adpt->back; + struct ne6x_netdev_priv *np; + struct net_device *netdev; + char name[IFNAMSIZ] = {0}; + int etherdev_size, index; + u8 mac_addr[ETH_ALEN]; + + if (pf->hw.bus.domain_num) + sprintf(name, "enP%dp%ds0f%d", + pf->hw.bus.domain_num, pf->hw.bus.bus_num, adpt->idx); + else + sprintf(name, "enp%ds0f%d", pf->hw.bus.bus_num, adpt->idx); + + etherdev_size = sizeof(struct ne6x_netdev_priv); + + netdev = alloc_netdev_mq(etherdev_size, name, NET_NAME_USER, ether_setup, adpt->num_queue); + if (!netdev) + return -ENOMEM; + + adpt->netdev = netdev; + np = netdev_priv(netdev); + np->adpt = adpt; + + /* begin rss info */ + rss_info->hash_type = NE6X_RSS_HASH_TYPE_IPV4_TCP | + NE6X_RSS_HASH_TYPE_IPV4_UDP | + NE6X_RSS_HASH_TYPE_IPV4 | + NE6X_RSS_HASH_TYPE_IPV6_TCP | + NE6X_RSS_HASH_TYPE_IPV6_UDP | + NE6X_RSS_HASH_TYPE_IPV6; + rss_info->hash_func = NE6X_RSS_HASH_FUNC_TOEPLITZ; + rss_info->hash_key_size = NE6X_RSS_MAX_KEY_SIZE; + rss_info->ind_table_size = NE6X_RSS_MAX_IND_TABLE_SIZE; + netdev_rss_key_fill(rss_info->hash_key, sizeof(rss_info->hash_key)); + + for (index = 0; index < rss_info->ind_table_size; index++) + rss_info->ind_table[index] = ethtool_rxfh_indir_default(index, adpt->num_queue); + + ne6x_dev_set_rss(adpt, rss_info); /* end rss info */ + + ne6x_set_netdev_features(netdev); + + SET_NETDEV_DEV(netdev, &pf->pdev->dev); + ether_addr_copy(mac_addr, adpt->port_info->mac.perm_addr); + eth_hw_addr_set(netdev, mac_addr); + ether_addr_copy(netdev->perm_addr, mac_addr); + + netdev->netdev_ops = &ne6x_netdev_ops; + netdev->watchdog_timeo = 5 * HZ; + ne6x_set_ethtool_ops(netdev); + +/* MTU range: 128 - 15342 */ + netdev->min_mtu = NE6X_MIN_MTU_SIZE; + netdev->max_mtu = NE6X_MAX_RXBUFFER - NE6X_PACKET_HDR_PAD - ETH_FCS_LEN; + netdev->gso_max_size = 65535; + netdev->needed_headroom = 32; + netdev->needed_tailroom = 32; + ne6x_dev_set_mtu(adpt, netdev->mtu); + ne6x_sync_features(netdev); + + return 0; +} + +static void ne6x_map_vector_to_qp(struct ne6x_adapter *adpt, int v_idx, int qp_idx) +{ + struct ne6x_q_vector *q_vector = adpt->q_vectors[v_idx]; + struct ne6x_ring *tx_ring = adpt->tx_rings[qp_idx]; + struct ne6x_ring *rx_ring = adpt->rx_rings[qp_idx]; + struct ne6x_ring *cq_ring = adpt->cq_rings[qp_idx]; + struct ne6x_ring *tg_ring = adpt->tg_rings[qp_idx]; + + tx_ring->q_vector = q_vector; + tx_ring->next = q_vector->tx.ring; + q_vector->tx.ring = tx_ring; + q_vector->tx.count++; + + cq_ring->q_vector = q_vector; + cq_ring->next = q_vector->cq.ring; + q_vector->cq.ring = cq_ring; + q_vector->cq.count++; + tg_ring->q_vector = q_vector; + tg_ring->next = q_vector->cq.ring; + q_vector->tg.ring = tg_ring; + q_vector->tg.count++; + + rx_ring->q_vector = q_vector; + rx_ring->next = q_vector->rx.ring; + q_vector->rx.ring = rx_ring; + q_vector->rx.count++; +} + +void ne6x_adpt_map_rings_to_vectors(struct ne6x_adapter *adpt) +{ + int q_vectors = adpt->num_q_vectors; + int qp_remaining = adpt->num_queue; + struct ne6x_q_vector *q_vector; + int num_ringpairs; + int v_start = 0; + int qp_idx = 0; + + /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to + * group them so there are multiple queues per vector. + * It is also important to go through all the vectors available to be + * sure that if we don't use all the vectors, that the remaining vectors + * are cleared. This is especially important when decreasing the + * number of queues in use. + */ + for (; v_start < q_vectors; v_start++) { + q_vector = adpt->q_vectors[v_start]; + + num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); + + q_vector->num_ringpairs = num_ringpairs; + q_vector->reg_idx = q_vector->v_idx + adpt->base_vector; + + q_vector->rx.count = 0; + q_vector->tx.count = 0; + q_vector->cq.count = 0; + q_vector->tg.count = 0; + q_vector->rx.ring = NULL; + q_vector->tx.ring = NULL; + q_vector->cq.ring = NULL; + q_vector->tg.ring = NULL; + + while (num_ringpairs--) { + ne6x_map_vector_to_qp(adpt, v_start, qp_idx); + qp_idx++; + qp_remaining--; + } + } +} + +void ne6x_adpt_reset_stats(struct ne6x_adapter *adpt) +{ + struct rtnl_link_stats64 *ns; + int i; + + if (!adpt) + return; + + ns = ne6x_get_adpt_stats_struct(adpt); + memset(ns, 0, sizeof(*ns)); + memset(&adpt->net_stats_offsets, 0, sizeof(adpt->net_stats_offsets)); + memset(&adpt->eth_stats, 0, sizeof(adpt->eth_stats)); + memset(&adpt->eth_stats_offsets, 0, sizeof(adpt->eth_stats_offsets)); + + if (adpt->rx_rings && adpt->rx_rings[0]) { + for (i = 0; i < adpt->num_queue; i++) { + memset(&adpt->rx_rings[i]->stats, 0, + sizeof(adpt->rx_rings[i]->stats)); + memset(&adpt->rx_rings[i]->rx_stats, 0, + sizeof(adpt->rx_rings[i]->rx_stats)); + memset(&adpt->rx_rings[i]->cq_stats, 0, + sizeof(adpt->rx_rings[i]->cq_stats)); + memset(&adpt->tx_rings[i]->stats, 0, + sizeof(adpt->tx_rings[i]->stats)); + memset(&adpt->tx_rings[i]->tx_stats, 0, + sizeof(adpt->tx_rings[i]->tx_stats)); + } + } +} + +static int ne6x_adpt_setup(struct ne6x_pf *pf) +{ + struct ne6x_adapter *adpt = NULL; + u32 is_write_proterct = false; + struct ne6x_hw *hw = &pf->hw; + int i, ret = 0; + u32 value; + + /* PF + VP */ + pf->adpt = kcalloc(NE6X_MAX_VP_NUM + 4, sizeof(*pf->adpt), GFP_KERNEL); + if (!pf->adpt) + return -ENOMEM; + + ne6x_dev_get_norflash_write_protect(pf, &is_write_proterct); + + /* Need to protect the allocation of the adapters at the PF level */ + for (i = pf->num_alloc_adpt - 1; i >= 0; i--) { + struct ne6x_vlan vlan = {0}; + + adpt = kzalloc(sizeof(*adpt), GFP_KERNEL); + adpt->back = pf; + pf->adpt[i] = adpt; + adpt->idx = i; + adpt->vport = NE6X_PF_VP0_NUM + i; /*vport*/ + set_bit(NE6X_ADPT_DOWN, adpt->comm.state); + + value = ne6x_dev_get_features(adpt); + if (value & NE6X_F_RX_FW_LLDP) + clear_bit(NE6X_ADPT_F_DISABLE_FW_LLDP, adpt->flags); + else + set_bit(NE6X_ADPT_F_DISABLE_FW_LLDP, adpt->flags); + + clear_bit(NE6X_ADPT_F_LINKDOWN_ON_CLOSE, adpt->flags); + clear_bit(NE6X_ADPT_F_DDOS_SWITCH, adpt->flags); + clear_bit(NE6X_ADPT_F_ACL, adpt->flags); + + if (is_write_proterct) + set_bit(NE6X_ADPT_F_NORFLASH_WRITE_PROTECT, adpt->flags); + else + clear_bit(NE6X_ADPT_F_NORFLASH_WRITE_PROTECT, adpt->flags); + + INIT_WORK(&adpt->set_rx_mode_task, ne6x_set_rx_mode_task); + + /* init multicast MAC addr list head node */ + INIT_LIST_HEAD(&adpt->mc_mac_addr.list); + mutex_init(&adpt->mc_mac_addr.mutex); + + /* init unicast MAC addr list head node */ + INIT_LIST_HEAD(&adpt->uc_mac_addr.list); + mutex_init(&adpt->uc_mac_addr.mutex); + + /* init vlan list head node */ + spin_lock_init(&adpt->mac_vlan_list_lock); + INIT_LIST_HEAD(&adpt->vlan_filter_list); + + INIT_LIST_HEAD(&adpt->macvlan_list); + init_waitqueue_head(&adpt->recv_notify); + + adpt->port_info = kzalloc(sizeof(*adpt->port_info), GFP_KERNEL); + if (!adpt->port_info) { + ret = -ENOMEM; + goto err_portinfo; + } + + adpt->port_info->lport = i; /* logical port */ + adpt->port_info->hw_trunk_id = i; + adpt->port_info->hw_port_id = ne6x_dev_get_pport(adpt); + adpt->port_info->queue = pf->hw.max_queue; + adpt->port_info->hw_max_queue = adpt->port_info->queue; + adpt->port_info->hw_queue_base = pf->hw.expect_vp * i; + adpt->comm.port_info = adpt->port_info->lport | (adpt->vport << 8); + adpt->port_info->hw = hw; + adpt->port_info->phy.curr_user_speed_req = 0x0; + + ne6x_dev_get_mac_addr(adpt, adpt->port_info->mac.perm_addr); + ne6x_set_num_rings_in_adpt(adpt); + + ret = ne6x_adpt_mem_alloc(pf, adpt); + if (ret) + goto err_netdev; + + ret = ne6x_config_netdev(adpt); + if (ret) + goto err_configdev; + + /* The unicast MAC address delivers the SDK */ + vlan = NE6X_VLAN(ETH_P_8021Q, 0xfff, 0); + ne6x_adpt_add_vlan(adpt, vlan); + ne6x_adpt_add_mac(adpt, adpt->port_info->mac.perm_addr, true); + ne6x_dev_add_broadcast_leaf(adpt); + + /* set up vectors and rings if needed */ + ret = ne6x_adpt_setup_vectors(adpt); + if (ret) + goto err_msix; + + ret = ne6x_alloc_rings(adpt); + if (ret) + goto err_rings; + + ne6x_init_arfs(adpt); + + ret = ne6x_set_cpu_rx_rmap(adpt); + if (ret) + netdev_info(adpt->netdev, "adpt rx rmap err: %d", ret); + + /* map all of the rings to the q_vectors */ + ne6x_adpt_map_rings_to_vectors(adpt); + ne6x_adpt_reset_stats(adpt); + ne6x_dev_set_port2pi(adpt); + ne6x_dev_set_pi2port(adpt); + ne6x_dev_set_vport(adpt); + ne6x_dev_set_rss(adpt, &adpt->rss_info); + } + + for (i = pf->num_alloc_adpt - 1; i >= 0; i--) { + adpt = pf->adpt[i]; + ret = ne6x_adpt_register_netdev(adpt); + if (ret) + goto err_configdev; + + adpt->netdev_registered = true; + netif_carrier_off(adpt->netdev); + /* make sure transmit queues start off as stopped */ + netif_tx_stop_all_queues(adpt->netdev); + } + + return ret; + +err_rings: + ne6x_adpt_free_q_vectors(adpt); +err_msix: + if (adpt->netdev_registered) { + adpt->netdev_registered = false; + unregister_netdev(adpt->netdev); + free_netdev(adpt->netdev); + adpt->netdev = NULL; + } +err_configdev: + kfree(adpt->tx_rings); + kfree(adpt->q_vectors); +err_netdev: + kfree(adpt->port_info); +err_portinfo: + kfree(adpt); + + return ret; +} + +int ne6x_adpt_register_netdev(struct ne6x_adapter *adpt) +{ + int ret; + + ret = register_netdev(adpt->netdev); + if (ret) { + struct net_device *device = adpt->netdev; + struct ne6x_pf *pf = adpt->back; + char name[IFNAMSIZ] = {0}; + + sprintf(name, "enp%ds0f%%d", pf->hw.bus.bus_num); + strcpy(device->name, name); + return register_netdev(adpt->netdev); + } + + return ret; +} + +void ne6x_adjust_adpt_port_max_queue(struct ne6x_pf *pf) +{ + int cpu_num = num_online_cpus(); + + if (pf->irq_pile->num_entries < NE6X_MAX_MSIX_NUM) { + pf->hw.expect_vp = pf->irq_pile->num_entries / pf->hw.pf_port; + /* actal max vp queue */ + pf->hw.max_queue = min_t(int, cpu_num, pf->hw.expect_vp); + dev_info(&pf->pdev->dev, "%s:hw->expect_vp = %d hw->max_queue = %d cpu_num = %d\n", + __func__, pf->hw.expect_vp, pf->hw.max_queue, cpu_num); + } +} + +static int ne6x_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct ne6x_pf *pf; + struct ne6x_hw *hw; + u32 ioremap_len; + int err; + + if (PCI_FUNC(pdev->devfn) != 1) + return 0; + + /* initialize device for use with memory space */ + err = pci_enable_device_mem(pdev); + if (err) + return err; + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); + if (err) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err); + goto err_dma; + } + } + + /* set up pci connections */ + err = pci_request_mem_regions(pdev, ne6x_driver_name); + if (err) { + dev_info(&pdev->dev, "pci_request_mem_regions failed %d\n", err); + goto err_pci_reg; + } + pci_set_master(pdev); + /* Now that we have a PCI connection, we need to do the + * low level device setup. This is primarily setting up + * the Admin Queue structures and then querying for the + * device's current profile information. + */ + pf = kzalloc(sizeof(*pf), GFP_KERNEL); + if (!pf) { + err = -ENOMEM; + goto err_pf_alloc; + } + pf->next_adpt = 0; + pf->pdev = pdev; + pci_set_drvdata(pdev, pf); + set_bit(NE6X_DOWN, pf->state); + + hw = &pf->hw; + hw->back = pf; + + ioremap_len = pci_resource_len(pdev, 0); + hw->hw_addr0 = ioremap(pci_resource_start(pdev, 0), ioremap_len); + if (!hw->hw_addr0) { + err = -EIO; + dev_info(&pdev->dev, "ioremap bar0 (0x%04x, 0x%04x) failed: 0x%x\n", + (unsigned int)pci_resource_start(pdev, 0), ioremap_len, err); + goto err_ioremap_hw_addr0; + } + + ioremap_len = pci_resource_len(pdev, 2); + hw->hw_addr2 = ioremap(pci_resource_start(pdev, 2), ioremap_len); + if (!hw->hw_addr2) { + err = -EIO; + dev_info(&pdev->dev, "ioremap bar2 (0x%04x, 0x%04x) failed: 0x%x\n", + (unsigned int)pci_resource_start(pdev, 2), ioremap_len, err); + goto err_ioremap_hw_addr2; + } + + ioremap_len = pci_resource_len(pdev, 4); + hw->hw_addr4 = ioremap(pci_resource_start(pdev, 4), ioremap_len); + if (!hw->hw_addr4) { + err = -EIO; + dev_info(&pdev->dev, "ioremap bar4 (0x%04x, 0x%04x) failed: 0x%x\n", + (unsigned int)pci_resource_start(pdev, 4), ioremap_len, err); + goto err_ioremap_hw_addr4; + } + + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->revision_id = pdev->revision; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + hw->bus.domain_num = pci_domain_nr(pdev->bus); + hw->bus.bus_num = pdev->bus->number; + hw->bus.device = PCI_SLOT(pdev->devfn); + hw->bus.func = PCI_FUNC(pdev->devfn); + + usleep_range(10, 20); + + mutex_init(&pf->mbus_comm_mutex); + if (ne6x_dev_init(pf)) { + err = -EIO; + dev_info(&pdev->dev, "sdk init failed!\n"); + goto error_sdk_init_failed; + } + usleep_range(10, 20); + + pci_save_state(pdev); + + /* hardware resource initialization */ + err = ne6x_hw_init(hw); + if (err) + goto err_unroll_alloc; + + /* driver private resource initialization */ + err = ne6x_pf_init(pf); + if (err) + goto err_pf_reset; + + /* interrupt resource initialization */ + err = ne6x_init_interrupt_scheme(pf); + if (err) + goto err_interrupt_scheme; + + ne6x_adjust_adpt_port_max_queue(pf); + + err = ne6x_adpt_setup(pf); + if (err) + goto err_adpts; + + ne6x_dev_set_nic_start(pf, 0); + add_timer(&pf->linkscan_tmr); + ne6x_enable_link_irq(pf); + pcie_print_link_status(pdev); + /* ready to go, so clear down state bit */ + clear_bit(NE6X_DOWN, pf->state); + return 0; + +err_adpts: + set_bit(NE6X_DOWN, pf->state); + ne6x_clear_interrupt_scheme(pf); +err_interrupt_scheme: + del_timer_sync(&pf->serv_tmr); +err_pf_reset: + devm_kfree(ne6x_hw_to_dev(hw), hw->port_info); + hw->port_info = NULL; +err_unroll_alloc: +error_sdk_init_failed: + iounmap(hw->hw_addr4); +err_ioremap_hw_addr4: + iounmap(hw->hw_addr2); + hw->hw_addr2 = NULL; +err_ioremap_hw_addr2: + iounmap(hw->hw_addr0); +err_ioremap_hw_addr0: + kfree(pf); +err_pf_alloc: + pci_release_mem_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +void ne6x_adpt_free_arrays(struct ne6x_adapter *adpt, bool free_qvectors) +{ + /* free the ring and vector containers */ + if (free_qvectors) { + kfree(adpt->q_vectors); + adpt->q_vectors = NULL; + } + + kfree(adpt->tx_rings); + adpt->tx_rings = NULL; + adpt->rx_rings = NULL; + adpt->cq_rings = NULL; +} + +static int ne6x_adpt_clear(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf; + + if (!adpt) + return 0; + + if (!adpt->back) + goto free_adpt; + + pf = adpt->back; + + mutex_lock(&pf->switch_mutex); + if (!pf->adpt[adpt->idx]) { + dev_err(&pf->pdev->dev, "pf->adpt[%d] is NULL, just free adpt[%d](type %d)\n", + adpt->idx, adpt->idx, adpt->type); + goto unlock_adpt; + } + + if (pf->adpt[adpt->idx] != adpt) { + dev_err(&pf->pdev->dev, "pf->adpt[%d](type %d) != adpt[%d](type %d): no free!\n", + pf->adpt[adpt->idx]->idx, pf->adpt[adpt->idx]->type, adpt->idx, adpt->type); + goto unlock_adpt; + } + + /* updates the PF for this cleared adpt */ + ne6x_adpt_free_arrays(adpt, true); + + pf->adpt[adpt->idx] = NULL; + if (adpt->idx < pf->next_adpt) + pf->next_adpt = adpt->idx; + +unlock_adpt: + mutex_unlock(&pf->switch_mutex); +free_adpt: + kfree(adpt); + + return 0; +} + +int ne6x_adpt_release(struct ne6x_adapter *adpt) +{ + struct mac_addr_head *mc_head = &adpt->mc_mac_addr; + struct mac_addr_head *uc_head = &adpt->uc_mac_addr; + struct mac_addr_node *temp_node, *addr_node; + struct ne6x_vlan_filter *vlf, *vlftmp; + struct ne6x_key_filter *klf, *klftmp; + struct ne6x_macvlan *mv, *mv_tmp; + struct ne6x_pf *pf = adpt->back; + + if (!test_bit(NE6X_DOWN, pf->state)) { + dev_info(&pf->pdev->dev, "Can't remove PF adapter\n"); + return -ENODEV; + } + + set_bit(NE6X_ADPT_RELEASING, adpt->comm.state); + + ne6x_remove_arfs(adpt); + ne6x_adpt_clear_ddos(pf); + ne6x_adpt_clear_mac_vlan(adpt); + ne6x_dev_del_broadcast_leaf(adpt); + /* release adpt multicast addr list resource */ + mutex_lock(&mc_head->mutex); + list_for_each_entry_safe(addr_node, temp_node, &mc_head->list, list) { + list_del(&addr_node->list); + kfree(addr_node); + } + mutex_unlock(&mc_head->mutex); + + /* release adpt unicast addr list resource */ + mutex_lock(&uc_head->mutex); + list_for_each_entry_safe(addr_node, temp_node, &uc_head->list, list) { + list_del(&addr_node->list); + kfree(addr_node); + } + mutex_unlock(&uc_head->mutex); + + spin_lock_bh(&adpt->mac_vlan_list_lock); + /* release adpt vlan list resource */ + list_for_each_entry_safe(vlf, vlftmp, &adpt->vlan_filter_list, list) { + list_del(&vlf->list); + kfree(vlf); + } + spin_unlock_bh(&adpt->mac_vlan_list_lock); + + spin_lock_bh(&adpt->back->key_list_lock); + /* release adpt vlan list resource */ + list_for_each_entry_safe(klf, klftmp, &adpt->back->key_filter_list, list) { + list_del(&klf->list); + kfree(klf); + } + spin_unlock_bh(&adpt->back->key_list_lock); + + list_for_each_entry_safe(mv, mv_tmp, &adpt->macvlan_list, list) + ne6x_fwd_del_macvlan(adpt->netdev, mv); + + if (adpt->netdev_registered) { + adpt->netdev_registered = false; + if (adpt->netdev) + /* results in a call to i40e_close() */ + unregister_netdev(adpt->netdev); + } + + ne6x_free_cpu_rx_rmap(adpt); + ne6x_adpt_disable_irq(adpt); + + /* clear the sync flag on all filters */ + if (adpt->netdev) { + __dev_uc_unsync(adpt->netdev, NULL); + __dev_mc_unsync(adpt->netdev, NULL); + } + + ne6x_adpt_free_q_vectors(adpt); + if (adpt->netdev) { + free_netdev(adpt->netdev); + adpt->netdev = NULL; + } + + /*add for lldp*/ + ne6x_dev_set_fw_lldp(adpt, false); + ne6x_adpt_clear_rings(adpt); + ne6x_adpt_clear(adpt); + + return 0; +} + +static void ne6x_remove(struct pci_dev *pdev) +{ + struct ne6x_pf *pf = pci_get_drvdata(pdev); + struct ne6x_hw *hw = &pf->hw; + int i; + + if (PCI_FUNC(pdev->devfn) != 1) + return; + + ne6x_proc_pf_exit(pf); + ne6x_dbg_pf_exit(pf); + + ne6x_dev_set_nic_stop(pf, 0); + +#ifdef CONFIG_PCI_IOV + if (pf->num_alloc_vfs) { + set_bit(NE6X_REMOVE, pf->state); + ne6x_sriov_configure(pdev, 0); + } +#endif + + /* no more scheduling of any task */ + set_bit(NE6X_DOWN, pf->state); + if (pf->serv_tmr.function) + del_timer_sync(&pf->serv_tmr); + + if (pf->serv_task.func) + cancel_work_sync(&pf->serv_task); + + if (pf->linkscan_tmr.function) + del_timer_sync(&pf->linkscan_tmr); + + if (pf->linkscan_work.func) + cancel_work_sync(&pf->linkscan_work); + + /* Now we can shutdown the PF's adapter, just before we kill + * adminq and hmc. + */ + for (i = 0; i < pf->num_alloc_adpt; i++) + ne6x_adpt_release(pf->adpt[i]); + + /* Clear all dynamic memory lists of rings, q_vectors, and adapters */ + rtnl_lock(); + ne6x_clear_interrupt_scheme(pf); + for (i = 0; i < pf->num_alloc_adpt; i++) { + if (pf->adpt[i]) { + ne6x_adpt_clear_rings(pf->adpt[i]); + ne6x_adpt_clear(pf->adpt[i]); + pf->adpt[i] = NULL; + } + } + rtnl_unlock(); + + kfree(pf->adpt); + + iounmap(hw->hw_addr4); + iounmap(hw->hw_addr2); + hw->hw_addr2 = NULL; + iounmap(hw->hw_addr0); + kfree(pf); + pci_release_mem_regions(pdev); + pci_disable_device(pdev); +} + +static struct pci_driver ne6x_driver = { + .name = ne6x_driver_name, + .id_table = ne6x_pci_tbl, + .probe = ne6x_probe, + .remove = ne6x_remove, + .sriov_configure = ne6x_sriov_configure, +}; + +int __init ne6x_init_module(void) +{ + pr_info("%s: %s - version %s\n", ne6x_driver_name, ne6x_driver_string, + ne6x_driver_version_str); + pr_info("%s: %s\n", ne6x_driver_name, ne6x_copyright); + + ne6x_wq = create_singlethread_workqueue(ne6x_driver_name); + if (!ne6x_wq) { + pr_err("%s: Failed to create workqueue\n", ne6x_driver_name); + return -ENOMEM; + } + + ne6x_dbg_init(); + ne6x_proc_init(); + ne6x_netlink_init(); + + return pci_register_driver(&ne6x_driver); +} + +module_init(ne6x_init_module); + +void __exit ne6x_exit_module(void) +{ + pci_unregister_driver(&ne6x_driver); + destroy_workqueue(ne6x_wq); + ne6x_netlink_exit(); + ne6x_proc_exit(); + ne6x_dbg_exit(); +} + +module_exit(ne6x_exit_module); diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_netlink.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_netlink.c new file mode 100644 index 000000000000..1e6f21b53242 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_netlink.c @@ -0,0 +1,250 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include +#include + +#include "ne6x.h" +#include "ne6x_reg.h" +#include "ne6x_debugfs.h" +#include "ne6x_dev.h" +#include "ne6x_netlink.h" + +static struct sock *ne6x_nlsock; +static DEFINE_MUTEX(ne6x_msg_mutex); + +static int ne6x_netlink_tab_add(struct ne6x_pf *pf, struct ne6x_rule *rule) +{ + struct ne6x_debug_table *table_info; + struct device *dev; + u32 table_id = 0xFFFFFFFF; + int err; + + table_info = kzalloc(sizeof(*table_info), GFP_KERNEL); + if (unlikely(!table_info)) + return -ENOMEM; + + dev = ne6x_pf_to_dev(pf); + table_info->table = NE6X_REG_ACL_TABLE; + table_info->size = NE6X_HASH_KEY_SIZE; + memcpy(table_info->data, rule, sizeof(*rule)); + + err = ne6x_reg_table_search(pf, table_info->table, &table_info->data[0], + table_info->size, NULL, table_info->size); + if (err == -ENOENT) { + table_info->size = NE6X_HASH_KEY_SIZE + NE6X_HASH_DATA_SIZE; + err = ne6x_reg_table_insert(pf, table_info->table, &table_info->data[0], + table_info->size, &table_id); + } else { + dev_info(dev, "table exist\n"); + kfree(table_info); + return -EEXIST; + } + + if (err == 0) { + dev_info(dev, "insert rule_id = 0x%x success!\n", table_id); + } else if (err != -ETIMEDOUT) { + dev_info(dev, "insert rule_id = 0x%x fail!\n", table_id); + err = -EIO; + } else { + dev_info(dev, "insert rule_id = 0x%x timeout!\n", table_id); + err = EAGAIN; + } + + kfree(table_info); + return err; +} + +static int ne6x_netlink_tab_del(struct ne6x_pf *pf, struct ne6x_rule *rule) +{ + struct ne6x_debug_table *table_info; + struct device *dev; + int err; + + table_info = kzalloc(sizeof(*table_info), GFP_KERNEL); + if (unlikely(!table_info)) + return -ENOMEM; + + dev = ne6x_pf_to_dev(pf); + table_info->table = NE6X_REG_ACL_TABLE; + table_info->size = NE6X_HASH_KEY_SIZE; + memcpy(table_info->data, rule, sizeof(*rule)); + + err = ne6x_reg_table_delete(pf, table_info->table, &table_info->data[0], table_info->size); + dev_info(dev, "%s: %s\n", __func__, (err == 0) ? "success!" : "timeout!"); + kfree(table_info); + + return err; +} + +static int ne6x_netlink_meter_write(struct ne6x_pf *pf, struct ne6x_meter *meter) +{ + struct meter_table vf_bw; + struct device *dev; + u32 cir_maxnum = 0xfffff; + u32 cbs_maxnum = 0xffffff; + u32 type_flag = 0; + u32 type_map = 0; + u32 cir; + int err; + + if (meter->type_num > NE6X_METER_TYPE_MAX || + meter->opcode > NE6X_METER_OPCODE_MAX) + return -EINVAL; + + dev = ne6x_pf_to_dev(pf); + type_flag |= BIT(meter->type_num); + + err = ne6x_reg_get_user_data(pf, NP_USER_DATA_DDOS_FLAG, &type_map); + if (err) + return err; + + if (meter->opcode) + type_map |= type_flag; + else + type_map &= ~type_flag; + + err = ne6x_reg_set_user_data(pf, NP_USER_DATA_DDOS_FLAG, type_map); + if (err) + return err; + + cir = meter->value * 1000 + 1023; + cir = min(cir / 1024, cir_maxnum); + + vf_bw.cir = cir; + vf_bw.pir = min(cir + cir / 10, cir_maxnum); + + vf_bw.cbs = min(vf_bw.cir * 10000, cbs_maxnum); + vf_bw.pbs = min(vf_bw.pir * 10000, cbs_maxnum); + + err = ne6x_reg_config_meter(pf, NE6X_METER1_TABLE | + NE6X_METER_SUBSET(NE6X_METER_SUBSET0) | + meter->type_num, (u32 *)&vf_bw, sizeof(vf_bw)); + + dev_info(dev, "%s\n", err ? "write meter fail!" : "write meter success!"); + + return err; +} + +static int ne6x_netlink_rcv_msg(struct nlmsghdr *nlh) +{ + char name[IFNAMSIZ] = {0}; + struct net_device *dev; + struct ne6x_pf *pf; + void *data; + int err; + + strncpy(name, nlmsg_data(nlh), IFNAMSIZ - 1); + dev = __dev_get_by_name(&init_net, name); + if (unlikely(!dev)) + return -ENODEV; + + if (unlikely(!netif_is_ne6x(dev))) + return -EOPNOTSUPP; + + pf = ne6x_netdev_to_pf(dev); + data = nlmsg_data(nlh) + IFNAMSIZ; + + switch (nlh->nlmsg_type) { + case NE6X_NLMSG_TAB_ADD: + /* if entry exists, treat it as insertion success */ + err = ne6x_netlink_tab_add(pf, data); + if (err == -EEXIST) + err = 0; + break; + case NE6X_NLMSG_TAB_DEL: + err = ne6x_netlink_tab_del(pf, data); + break; + case NE6X_NLMSG_METER_WRITE: + err = ne6x_netlink_meter_write(pf, data); + break; + default: + return -EOPNOTSUPP; + } + + return err; +} + +static void ne6x_netlink_ack(struct sk_buff *in_skb, unsigned long *status) +{ + struct sk_buff *skb_out; + struct nlmsghdr *nlh; + size_t payload; + + payload = BITS_TO_LONGS(NE6X_RULE_BATCH_MAX) * sizeof(unsigned long); + skb_out = nlmsg_new(payload, GFP_KERNEL); + if (unlikely(!skb_out)) { + NETLINK_CB(in_skb).sk->sk_err = ENOBUFS; + NETLINK_CB(in_skb).sk->sk_error_report(NETLINK_CB(in_skb).sk); + return; + } + + nlh = nlmsg_put(skb_out, NETLINK_CB(in_skb).portid, 0, NLMSG_DONE, payload, 0); + if (unlikely(!nlh)) { + nlmsg_free(skb_out); + return; + } + + NETLINK_CB(skb_out).dst_group = 0; + bitmap_copy(nlmsg_data(nlh), status, NE6X_RULE_BATCH_MAX); + + nlmsg_unicast(in_skb->sk, skb_out, NETLINK_CB(in_skb).portid); +} + +static void ne6x_netlink_rcv(struct sk_buff *skb) +{ + DECLARE_BITMAP(status, NE6X_RULE_BATCH_MAX); + u32 idx = 0; + + bitmap_zero(status, NE6X_RULE_BATCH_MAX); + mutex_lock(&ne6x_msg_mutex); + while (skb->len >= nlmsg_total_size(0) && idx < NE6X_RULE_BATCH_MAX) { + struct nlmsghdr *nlh; + int msglen, err; + + nlh = nlmsg_hdr(skb); + + if (unlikely(nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)) { + set_bit(idx, status); + goto skip; + } + + err = ne6x_netlink_rcv_msg(nlh); + if (err) + set_bit(idx, status); + +skip: + msglen = NLMSG_ALIGN(nlh->nlmsg_len); + if (unlikely(msglen > skb->len)) + msglen = skb->len; + + idx++; + skb_pull(skb, msglen); + } + + ne6x_netlink_ack(skb, status); + mutex_unlock(&ne6x_msg_mutex); +} + +/** + * ne6x_netlink_init - start up netlink resource for the driver + **/ +void ne6x_netlink_init(void) +{ + struct netlink_kernel_cfg ne6x_netlink_cfg = { + .input = ne6x_netlink_rcv, + }; + + ne6x_nlsock = netlink_kernel_create(&init_net, NE6X_NETLINK, &ne6x_netlink_cfg); + if (unlikely(!ne6x_nlsock)) + pr_warn("Init of netlink failed\n"); +} + +/** + * ne6x_netlink_exit - clean out the driver's netlink resource + **/ +void ne6x_netlink_exit(void) +{ + netlink_kernel_release(ne6x_nlsock); + ne6x_nlsock = NULL; +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_netlink.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_netlink.h new file mode 100644 index 000000000000..61a6cd1347bd --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_netlink.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_NETLINK_H +#define _NE6X_NETLINK_H + +#define NE6X_NETLINK 31 +#define NE6X_HASH_KEY_SIZE 64 +#define NE6X_HASH_DATA_SIZE 64 +#define NE6X_RULE_BATCH_MAX 64 +#define NE6X_METER_TYPE_MAX 8 +#define NE6X_METER_OPCODE_MAX 1 +#define NE6X_ADDR_LEN 16 + +/* netlink message opcodes */ +enum { + NE6X_NLMSG_BASE = 0x10, /* the type < 0x10 is reserved for control messages */ + NE6X_NLMSG_TAB_ADD = NE6X_NLMSG_BASE, + NE6X_NLMSG_TAB_DEL, + NE6X_NLMSG_METER_WRITE, + NE6X_NLMSG_MAX +}; + +struct ne6x_rule { + u8 dst[NE6X_ADDR_LEN]; + u8 src[NE6X_ADDR_LEN]; + u32 proto; +} __packed; + +struct ne6x_meter { + u8 type_num; + u8 opcode; + u32 value; +} __packed; + +void ne6x_netlink_init(void); +void ne6x_netlink_exit(void); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_portmap.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_portmap.h new file mode 100644 index 000000000000..b60470095d99 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_portmap.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_PORTMAP_H +#define _NE6X_PORTMAP_H + +#include +#include + +#define PBMP_DWORD_NUM 4 +#define PBMP_WORD_WIDTH 32 + +typedef u32 pbmp_t[PBMP_DWORD_NUM]; + +#define SET_BIT(DAT, POS) ((DAT) |= ((u32)0x1 << (POS))) +#define CLR_BIT(DAT, POS) ((DAT) &= (~((u32)0x01 << (POS)))) + +#define PBMP_DWORD_GET(bm, word) ((bm)[(word)]) +#define PBMP_CLEAR(bm) \ + (PBMP_DWORD_GET(bm, 0) = PBMP_DWORD_GET(bm, 1) = \ + PBMP_DWORD_GET(bm, 2) = \ + PBMP_DWORD_GET(bm, 3) = 0) + +#define PBMP_WNET(port) ((port) / PBMP_WORD_WIDTH) +#define PBMP_WBIT(port) (1LU << ((port) % PBMP_WORD_WIDTH)) + +#define PBMP_ENTRY(bm, port) \ + (PBMP_DWORD_GET(bm, PBMP_WNET(port))) + +#define PBMP_PORT_REMOVE(bm, port) \ + (PBMP_ENTRY(bm, port) &= ~(PBMP_WBIT(port))) + +#define PBMP_PORT_ADD(bm, port) \ + (PBMP_ENTRY(bm, port) |= PBMP_WBIT(port)) + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_procfs.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_procfs.c new file mode 100644 index 000000000000..6015d51465c4 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_procfs.c @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include + +#include "ne6x.h" +#include "ne6x_reg.h" +#include "ne6x_dev.h" + +static struct proc_dir_entry *ne6x_proc_root; + +ssize_t ne6x_proc_tps_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) +{ + struct ne6x_soc_temperature temp = {0}; + struct ne6x_soc_power power = {0}; + struct device *dev = NULL; + struct ne6x_pf *pf = NULL; + char *info = NULL; + ssize_t len = 0; + int err; + + if (*ppos > 0 || count < PAGE_SIZE) + return 0; + + info = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!info) + return -ENOMEM; + + pf = filp->private_data; + dev = &pf->pdev->dev; + err = ne6x_dev_get_temperature_info(pf, &temp); + if (err) { + dev_err(dev, "get device temperature failed\n"); + } else { + len += sprintf(info, "Chip temperature (°C) %d\n", temp.chip_temerature); + len += sprintf(info + len, "Nic temerature (°C) %d\n", temp.board_temperature); + } + + err = ne6x_dev_get_power_consum(pf, &power); + if (err) { + dev_err(dev, "get device power failed\n"); + } else { + len += sprintf(info + len, "Current (A) %d.%03d\n", + power.cur / 1000, power.cur % 1000); + len += sprintf(info + len, "Voltage (V) %d.%03d\n", + power.vol / 1000, power.vol % 1000); + len += sprintf(info + len, "Power (W) %d.%03d\n", + power.power / 1000, power.power % 1000); + } + + if (!len) { + kfree(info); + return len; + } + + if (copy_to_user(buf, info, len)) { + kfree(info); + return -EFAULT; + } + + *ppos = len; + kfree(info); + return len; +} + +ssize_t ne6x_proc_i2c_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) +{ + struct device *dev = NULL; + struct ne6x_pf *pf = NULL; + char info[512] = {0}; + ssize_t len = 0; + u32 id = 0; + int err; + + if (*ppos > 0 || count < 512) + return 0; + + pf = filp->private_data; + dev = &pf->pdev->dev; + err = ne6x_dev_i2c3_signal_test(pf, &id); + if (err) + dev_err(dev, "get device i2c external info failed\n"); + else + len += sprintf(info, "I2c external sig test %d\n", id & 0xff); + + if (!len) + return len; + + if (copy_to_user(buf, info, len)) + return -EFAULT; + + *ppos = len; + return len; +} + +static int ne6x_tps_open(struct inode *inode, struct file *file) +{ + file->private_data = pde_data(inode); + + return 0; +} + +static int ne6x_i2c_open(struct inode *inode, struct file *file) +{ + file->private_data = pde_data(inode); + + return 0; +} + +static const struct proc_ops ne6x_proc_tps_fops = { + .proc_open = ne6x_tps_open, + .proc_read = ne6x_proc_tps_read, +}; + +static const struct proc_ops ne6x_proc_i2c_fops = { + .proc_open = ne6x_i2c_open, + .proc_read = ne6x_proc_i2c_read, +}; + +void ne6x_proc_pf_init(struct ne6x_pf *pf) +{ + struct proc_dir_entry *pfile = NULL; + const struct device *dev = NULL; + const char *name = NULL; + + name = pci_name(pf->pdev); + dev = &pf->pdev->dev; + pf->ne6x_proc_pf = proc_mkdir(name, ne6x_proc_root); + if (!pf->ne6x_proc_pf) { + dev_err(dev, "proc dir %s create failed\n", name); + return; + } + + pfile = proc_create_data("temperature_power_state", 0600, pf->ne6x_proc_pf, + &ne6x_proc_tps_fops, pf); + if (!pfile) { + dev_err(dev, "proc file temperature_power_state create failed\n"); + goto create_failed; + } + + pfile = proc_create_data("i2c_test", 0600, pf->ne6x_proc_pf, &ne6x_proc_i2c_fops, pf); + if (!pfile) { + dev_err(dev, "proc file i2c_test create failed\n"); + goto create_failed; + } + + return; + +create_failed: + proc_remove(pf->ne6x_proc_pf); +} + +void ne6x_proc_pf_exit(struct ne6x_pf *pf) +{ + proc_remove(pf->ne6x_proc_pf); + pf->ne6x_proc_pf = NULL; +} + +extern char ne6x_driver_name[]; +void ne6x_proc_init(void) +{ + ne6x_proc_root = proc_mkdir(ne6x_driver_name, NULL); + if (!ne6x_proc_root) + pr_info("init of proc failed\n"); +} + +void ne6x_proc_exit(void) +{ + proc_remove(ne6x_proc_root); + ne6x_proc_root = NULL; +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_procfs.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_procfs.h new file mode 100644 index 000000000000..d4ce94cab66b --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_procfs.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_PROCFS_H +#define _NE6X_PROCFS_H + +struct ne6x_pf; + +void ne6x_proc_pf_init(struct ne6x_pf *pf); +void ne6x_proc_pf_exit(struct ne6x_pf *pf); +void ne6x_proc_init(void); +void ne6x_proc_exit(void); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_reg.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_reg.c new file mode 100644 index 000000000000..2b7f6f24ca25 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_reg.c @@ -0,0 +1,1620 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include +#include + +#include "ne6x.h" +#include "ne6x_reg.h" +#include "ne6x_portmap.h" + +#define AXIA_MBUS_READ_MEMORY_COMMAND 0x07 +#define AXIA_MBUS_READ_MEMORY_ACK 0x08 + +#define AXIA_MBUS_WRITE_MEMORY_COMMAND 0x09 +#define AXIA_MBUS_WRITE_MEMORY_ACK 0x0A + +#define AXIA_MBUS_READ_REGISTER_COMMAND 0x0B +#define AXIA_MBUS_READ_REGISTER_ACK 0x0C + +#define AXIA_MBUS_WRITE_REGISTER_COMMAND 0x0D +#define AXIA_MBUS_WRITE_REGISTER_ACK 0x0E + +#define AXIA_MBUS_RESET_FIRMWARE_COMMAND 0x0F +#define AXIA_MBUS_RESET_FIRMWARE_ACK 0x10 +#define AXIA_MBUS_READ_TABLE_COMMAND 0x11 +#define AXIA_MBUS_READ_TABLE_ACK 0x12 + +#define AXIA_MBUS_WRITE_TABLE_COMMAND 0x13 +#define AXIA_MBUS_WRITE_TABLE_ACK 0x14 + +#define AXIA_MBUS_CLEARUP_COMMAND 0x15 +#define AXIA_MBUS_CLEARUP_ACK 0x16 + +/* hash table operator */ +#define AXIA_MBUS_INSERT_COMMAND 0x17 +#define AXIA_MBUS_INSERT_ACK 0x18 + +#define AXIA_MBUS_UPDATE_COMMAND 0x19 +#define AXIA_MBUS_UPDATE_ACK 0x1A + +#define AXIA_MBUS_DELETE_COMMAND 0x1B +#define AXIA_MBUS_DELETE_ACK 0x1C + +#define AXIA_MBUS_LOOKUP_COMMAND 0x1D +#define AXIA_MBUS_LOOKUP_ACK 0x1E + +/* data download operator */ +#define AXIA_MBUS_DOWNLOAD_COMMAND 0x21 +#define AXIA_MBUS_DOWNLOAD_ACK 0x22 + +#define AXIA_MBUS_OPERATOR_COMMAND 0x23 +#define AXIA_MBUS_OPERATOR_ACK 0x24 + +#define AXIA_MBUS_SETUP_PORT_COMMAND 0x25 +#define AXIA_MBUS_SETUP_PORT_ACK 0x26 + +#define AXIA_MBUS_SETUP_TABLE_COMMAND 0x27 +#define AXIA_MBUS_SETUP_TABLE_ACK 0x28 + +#define AXIA_MBUS_SETUP_TAPI_COMMAND 0x29 +#define AXIA_MBUS_SETUP_TAPI_ACK 0x2A + +#define AXIA_MBUS_SETUP_HASH_COMMAND 0x2B +#define AXIA_MBUS_SETUP_HASH_ACK 0x2C + +#define AXIA_MBUS_SETUP_DTAB_COMMAND 0x2D +#define AXIA_MBUS_SETUP_DTAB_ACK 0x2E + +#define AXIA_MBUS_E2PROM_READ_COMMAND 0x2F +#define AXIA_MBUS_E2PROM_READ_ACK 0x30 + +#define AXIA_MBUS_E2PROM_WRITE_COMMAND 0x31 +#define AXIA_MBUS_E2PROM_WRITE_ACK 0x32 + +#define AXIA_MBUS_SET_FAN_SPEED_COMMAND 0x33 +#define AXIA_MBUS_SET_FAN_SPEED_ACK 0x34 + +#define AXIA_MBUS_GET_FAN_SPEED_COMMAND 0x35 +#define AXIA_MBUS_GET_FAN_SPEED_ACK 0x36 + +#define AXIA_MBUS_GET_SYSTEM_INFO_COMMAND 0x37 +#define AXIA_MBUS_GET_SYSTEM_INFO_ACK 0x38 + +#define AXIA_MBUS_UPGRADE_PRE_COMMAND 0x39 +#define AXIA_MBUS_UPGRADE_PRE_COMMAND_ACK 0x3A +#define AXIA_MBUS_UPGRADE_COMMAND 0x3B +#define AXIA_MBUS_UPGRADE_COMMAND_ACK 0x3C + +#define AXIA_MBUS_GET_VER_COMMAND 0x3D +#define AXIA_MBUS_GET_VER_COMMAND_ACK 0x3E + +#define AXIA_MBUS_TALK_PORT_BASE 0x41 + +#define AXIA_MBUS_TALK_SET_PORT_ENABLE_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_ENABLE + 0) +#define AXIA_MBUS_TALK_SET_PORT_ENABLE_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_ENABLE + 1) + +#define AXIA_MBUS_TALK_GET_PORT_ENABLE_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_ENABLE + 2) +#define AXIA_MBUS_TALK_GET_PORT_ENABLE_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_ENABLE + 3) + +#define AXIA_MBUS_TALK_SET_PORT_DUPLEX_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_DUPLEX + 0) +#define AXIA_MBUS_TALK_SET_PORT_DUPLEX_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_DUPLEX + 1) + +#define AXIA_MBUS_TALK_GET_PORT_DUPLEX_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_DUPLEX + 2) +#define AXIA_MBUS_TALK_GET_PORT_DUPLEX_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_DUPLEX + 3) + +#define AXIA_MBUS_TALK_SET_PORT_SPEED_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SPEED + 0) +#define AXIA_MBUS_TALK_SET_PORT_SPEED_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SPEED + 1) + +#define AXIA_MBUS_TALK_GET_PORT_SPEED_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SPEED + 2) +#define AXIA_MBUS_TALK_GET_PORT_SPEED_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SPEED + 3) + +#define AXIA_MBUS_TALK_SET_PORT_STATS_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_STATS + 0) +#define AXIA_MBUS_TALK_SET_PORT_STATS_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_STATS + 1) + +#define AXIA_MBUS_TALK_GET_PORT_STATS_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_STATS + 2) +#define AXIA_MBUS_TALK_GET_PORT_STATS_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_STATS + 3) + +#define AXIA_MBUS_TALK_SET_PORT_SFP_SPEED_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_SPEED + 0) +#define AXIA_MBUS_TALK_SET_PORT_SFP_SPEED_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_SPEED + 1) + +#define AXIA_MBUS_TALK_GET_PORT_SFP_SPEED_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_SPEED + 2) +#define AXIA_MBUS_TALK_GET_PORT_SFP_SPEED_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_SPEED + 3) + +#define AXIA_MBUS_TALK_SET_PORT_FEC_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_FEC + 0) +#define AXIA_MBUS_TALK_SET_PORT_FEC_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_FEC + 1) + +#define AXIA_MBUS_TALK_GET_PORT_FEC_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_FEC + 2) +#define AXIA_MBUS_TALK_GET_PORT_FEC_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_FEC + 3) + +#define AXIA_MBUS_TALK_SET_PORT_SPEED_MAX_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SPEED_MAX + 0) +#define AXIA_MBUS_TALK_SET_PORT_SPEED_MAX_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SPEED_MAX + 1) + +#define AXIA_MBUS_TALK_GET_PORT_SPEED_MAX_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SPEED_MAX + 2) +#define AXIA_MBUS_TALK_GET_PORT_SPEED_MAX_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SPEED_MAX + 3) + +#define AXIA_MBUS_TALK_SET_PORT_PAUSE_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_PAUSE + 0) +#define AXIA_MBUS_TALK_SET_PORT_PAUSE_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_PAUSE + 1) + +#define AXIA_MBUS_TALK_GET_PORT_PAUSE_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_PAUSE + 2) +#define AXIA_MBUS_TALK_GET_PORT_PAUSE_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_PAUSE + 3) + +#define AXIA_MBUS_TALK_SET_PORT_PAUSE_ADDR_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_PAUSE_ADDR + 0) +#define AXIA_MBUS_TALK_SET_PORT_PAUSE_ADDR_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_PAUSE_ADDR + 1) + +#define AXIA_MBUS_TALK_GET_PORT_PAUSE_ADDR_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_PAUSE_ADDR + 2) +#define AXIA_MBUS_TALK_GET_PORT_PAUSE_ADDR_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_PAUSE_ADDR + 3) + +#define AXIA_MBUS_TALK_SET_PORT_LOOPBACK_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_LOOPBACK + 0) +#define AXIA_MBUS_TALK_SET_PORT_LOOPBACK_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_LOOPBACK + 1) + +#define AXIA_MBUS_TALK_GET_PORT_LOOPBACK_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_LOOPBACK + 2) +#define AXIA_MBUS_TALK_GET_PORT_LOOPBACK_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_LOOPBACK + 3) + +#define AXIA_MBUS_TALK_SET_PORT_MAX_FRAME_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_MAX_FRAME + 0) +#define AXIA_MBUS_TALK_SET_PORT_MAX_FRAME_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_MAX_FRAME + 1) + +#define AXIA_MBUS_TALK_GET_PORT_MAX_FRAME_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_MAX_FRAME + 2) +#define AXIA_MBUS_TALK_GET_PORT_MAX_FRAME_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_MAX_FRAME + 3) + +#define AXIA_MBUS_TALK_SET_PORT_AUTO_NEG_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_AUTO_NEG + 0) +#define AXIA_MBUS_TALK_SET_PORT_AUTO_NEG_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_AUTO_NEG + 1) + +#define AXIA_MBUS_TALK_GET_PORT_AUTO_NEG_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_AUTO_NEG + 2) +#define AXIA_MBUS_TALK_GET_PORT_AUTO_NEG_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_AUTO_NEG + 3) + +#define AXIA_MBUS_TALK_SET_PORT_INFO_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_INFO + 0) +#define AXIA_MBUS_TALK_SET_PORT_INFO_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_INFO + 1) + +#define AXIA_MBUS_TALK_GET_PORT_INFO_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_INFO + 2) +#define AXIA_MBUS_TALK_GET_PORT_INFO_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_INFO + 3) + +#define AXIA_MBUS_TALK_SET_PORT_LINK_STATUS_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_LINK_STATUS + 0) +#define AXIA_MBUS_TALK_SET_PORT_LINK_STATUS_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_LINK_STATUS + 1) + +#define AXIA_MBUS_TALK_GET_PORT_LINK_STATUS_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_LINK_STATUS + 2) +#define AXIA_MBUS_TALK_GET_PORT_LINK_STATUS_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_LINK_STATUS + 3) + +#define AXIA_MBUS_TALK_SET_PORT_DRV_I2C_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_DRV_I2C + 0) +#define AXIA_MBUS_TALK_SET_PORT_DRV_I2C_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_DRV_I2C + 1) + +#define AXIA_MBUS_TALK_GET_PORT_DRV_I2C_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_DRV_I2C + 2) +#define AXIA_MBUS_TALK_GET_PORT_DRV_I2C_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_DRV_I2C + 3) + +#define AXIA_MBUS_TALK_SET_PORT_SELF_TEST_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SELF_TEST + 0) +#define AXIA_MBUS_TALK_SET_PORT_SELF_TEST_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SELF_TEST + 1) + +#define AXIA_MBUS_TALK_GET_PORT_SELF_TEST_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SELF_TEST + 2) +#define AXIA_MBUS_TALK_GET_PORT_SELF_TEST_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SELF_TEST + 3) + +#define AXIA_MBUS_TALK_SET_PORT_SFP_TYPE_LEN_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_TYPE_LEN + 0) +#define AXIA_MBUS_TALK_SET_PORT_SFP_TYPE_LEN_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_TYPE_LEN + 1) + +#define AXIA_MBUS_TALK_GET_PORT_SFP_TYPE_LEN_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_TYPE_LEN + 2) +#define AXIA_MBUS_TALK_GET_PORT_SFP_TYPE_LEN_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_TYPE_LEN + 3) + +#define AXIA_MBUS_TALK_SET_PORT_SFP_EEPROM_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_EEPROM + 0) +#define AXIA_MBUS_TALK_SET_PORT_SFP_EEPROM_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_EEPROM + 1) + +#define AXIA_MBUS_TALK_GET_PORT_SFP_EEPROM_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_EEPROM + 2) +#define AXIA_MBUS_TALK_GET_PORT_SFP_EEPROM_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_SFP_EEPROM + 3) + +#define AXIA_MBUS_TALK_SET_PORT_STATE_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_STATE + 0) +#define AXIA_MBUS_TALK_SET_PORT_STATE_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_STATE + 1) + +#define AXIA_MBUS_TALK_GET_PORT_STATE_COMMAND \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_STATE + 2) +#define AXIA_MBUS_TALK_GET_PORT_STATE_ACK \ + (AXIA_MBUS_TALK_PORT_BASE + 4 * NE6X_MSG_PORT_STATE + 3) + +#define AXIA_MBUS_SET_NIC_START_COMMAND 0x9F +#define AXIA_MBUS_SET_NIC_START_ACK 0xA0 +#define AXIA_MBUS_SET_NIC_STOP_COMMAND 0xA1 +#define AXIA_MBUS_SET_NIC_STOP_ACK 0xA2 +#define AXIA_MBUS_GET_NIC_STATE_COMMAND 0xA3 +#define AXIA_MBUS_GET_NIC_STATE_ACK 0xA4 +#define AXIA_MBUS_SET_NP_USERDATA_COMMAND 0xA5 +#define AXIA_MBUS_SET_NP_USERDATA_ACK 0xA6 +#define AXIA_MBUS_GET_NP_USERDATA_COMMAND 0xA7 +#define AXIA_MBUS_GET_NP_USERDATA_ACK 0xA8 + +#define AXIA_MBUS_SET_LED_STATE_COMMAND 0xA9 +#define AXIA_MBUS_SET_LED_STATE_ACK 0xAA + +#define AXIA_MBUS_CONFIG_METER_COMMAND 0xAB +#define AXIA_MBUS_CONFIG_METER_ACK 0xAC + +#define AXIA_MBUS_CLEAR_CREDIT_COMMAND 0xAD +#define AXIA_MBUS_CLEAR_CREDIT_ACK 0xAE + +#define AXIA_MBUS_SET_FAST_L2FDB_COMMAND 0xD1 +#define AXIA_MBUS_SET_FAST_L2FDB_ACK 0xD2 + +#define AXIA_MBUS_GET_DUMP_DATA_LEN_COMMAND 0xD3 +#define AXIA_MBUS_GET_DUMP_DATA_LEN_ACK 0xD4 + +#define AXIA_MBUS_GET_DUMP_DATA_COMMAND 0xD5 +#define AXIA_MBUS_GET_DUMP_DATA_ACK 0xD6 + +#define AXIA_MBUS_CLR_TABLE_COMMAND 0xD7 +#define AXIA_MBUS_CLR_TABLE_ACK 0xD8 + +#define AXIA_MBUS_SET_NOFLASH_WRITE_PROTECT_COMMAND 0xD9 +#define AXIA_MBUS_SET_NOFLASH_WRITE_PROTECT_ACK 0xDA + +#define AXIA_MBUS_GET_NOFLASH_WRITE_PROTECT_COMMAND 0xDB +#define AXIA_MBUS_GET_NOFLASH_WRITE_PROTECT_ACK 0xDC + +#define AXIA_MBUS_OPT_NOFLASH_COMMAND 0xDD +#define AXIA_MBUS_OPT_NOFLASH_ACK 0xDE + +#define PCIE2C810_SHM_MBUS_BASE 0x20878000 +#define PCIE2C810_SHM_DATA_BASE 0x20878004 + +#define MEM_ONCHIP_64BIT 0x00 +#define MEM_ONCHIP_512BIT 0x01 +#define MEM_ONXDDR_512BIT 0x04 + +enum engine_idx { + ENGINE_DIRECT_TABLE0 = 0x1, + ENGINE_DIRECT_TABLE1, + ENGINE_HASHA_TABLE, + ENGINE_HASHB_TABLE, +}; + +struct axia_mbus_msg { + union { + u32 uint; + struct { +#if defined(__BIG_ENDIAN_BITFIELD) + u32 opcode : 8; + u32 dst_block : 4; + u32 src_block : 4; + u32 data_len : 14; + u32 e : 2; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + u32 e : 2; + u32 data_len : 14; + u32 src_block : 4; + u32 dst_block : 4; + u32 opcode : 8; +#endif + } bits; + } hdr; + u32 data[]; +} __packed; + +struct ne6x_diag_reg_test_info ne6x_reg_list[] = { + /* offset mask elements stride */ + {NE6X_VP_BASE_ADDR, 0xFFFFFFFFFFFFFFFF, NE6X_VP_INT, 0}, + {0} +}; + +struct ne6x_reg_table_info { + u32 addr; /* engine id as base address */ + u32 size; /* 00 - 15: length + * 16 - 20: + * 21 - 23: entry_num + * 24 - 26: mem_type + * 27 - 27: mem_type_bucekt + * 28 - 31: opcode + */ + u32 opcode_read; + u32 opcode_write; +#define ADV_CMD_DISABLE 0x00 +#define ADV_CMD_EBABLE 0x01 + u32 advanced_cmd; + u32 opcode_insert; + u32 opcode_delete; + u32 opcode_lookup; + u32 opcode_update; + u32 size_insert; + u32 size_delete; + u32 size_lookup; + u32 size_update; +}; + +static struct ne6x_reg_table_info table_info[] = { + /* address size(tableidx + memtype + bucket + entry_num + size) + * read write adv_cmd insert delete lookup size_insert size_delete size_lookup + */ + {0x00000000, + (ENGINE_DIRECT_TABLE0 << 28) | (MEM_ONCHIP_64BIT << 24) | (1 << 21) | (8 << 16) | 0x0200, + AXIA_MBUS_READ_TABLE_COMMAND, AXIA_MBUS_WRITE_TABLE_COMMAND, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00}, + + {0x10000000, + (ENGINE_DIRECT_TABLE0 << 28) | (MEM_ONCHIP_64BIT << 24) | (1 << 21) | (2 << 16) | 0x0040, + AXIA_MBUS_READ_TABLE_COMMAND, AXIA_MBUS_WRITE_TABLE_COMMAND, 0x01, + AXIA_MBUS_INSERT_COMMAND, AXIA_MBUS_DELETE_COMMAND, AXIA_MBUS_LOOKUP_COMMAND, + AXIA_MBUS_UPDATE_COMMAND, 128, 64, 64, 64}, + + {0x20000000, + (ENGINE_DIRECT_TABLE0 << 28) | (MEM_ONCHIP_64BIT << 24) | (1 << 21) | (2 << 16) | 0x0010, + AXIA_MBUS_READ_TABLE_COMMAND, AXIA_MBUS_WRITE_TABLE_COMMAND, 0x00, 0x31, 0x33, 0x35, 0x00, + 0x00, 0x00, 0x00, 0x00}, + + {0x30000000, + (ENGINE_DIRECT_TABLE0 << 28) | (MEM_ONCHIP_64BIT << 24) | (1 << 21) | (8 << 16) | 0x0008, + AXIA_MBUS_READ_TABLE_COMMAND, AXIA_MBUS_WRITE_TABLE_COMMAND, 0x00, 0x31, 0x33, 0x35, 0x00, + 0x00, 0x00, 0x00, 0x00}, + + {0x40000000, + (ENGINE_DIRECT_TABLE0 << 28) | (MEM_ONCHIP_64BIT << 24) | (1 << 21) | (4 << 16) | 0x0100, + AXIA_MBUS_READ_TABLE_COMMAND, AXIA_MBUS_WRITE_TABLE_COMMAND, 0x00, 0x31, 0x33, 0x35, 0x00, + 0x00, 0x00, 0x00, 0x00}, + + {0x50000000, + (ENGINE_DIRECT_TABLE0 << 28) | (MEM_ONCHIP_512BIT << 24) | (1 << 21) | (1 << 16) | 0x0040, + AXIA_MBUS_READ_TABLE_COMMAND, AXIA_MBUS_WRITE_TABLE_COMMAND, 0x00, 0x31, 0x33, 0x35, 0x00, + 0x00, 0x00, 0x00, 0x00}, + + {0x60000000, + (ENGINE_DIRECT_TABLE0 << 28) | (MEM_ONCHIP_64BIT << 24) | (1 << 21) | (2 << 16) | 0x0040, + AXIA_MBUS_READ_TABLE_COMMAND, AXIA_MBUS_WRITE_TABLE_COMMAND, 0x01, + AXIA_MBUS_INSERT_COMMAND, AXIA_MBUS_DELETE_COMMAND, AXIA_MBUS_LOOKUP_COMMAND, + AXIA_MBUS_UPDATE_COMMAND, 128, 64, 64, 64}, + + {0x70000000, + (ENGINE_DIRECT_TABLE0 << 28) | (MEM_ONCHIP_64BIT << 24) | (1 << 21) | (2 << 16) | 0x0040, + AXIA_MBUS_READ_TABLE_COMMAND, AXIA_MBUS_WRITE_TABLE_COMMAND, 0x01, + AXIA_MBUS_INSERT_COMMAND, AXIA_MBUS_DELETE_COMMAND, AXIA_MBUS_LOOKUP_COMMAND, + AXIA_MBUS_UPDATE_COMMAND, 96, 64, 64, 32}, +}; + +#define TABLE_ADDR(table) (table_info[table].addr & 0xF0000000) +#define TABLE_SIZE(table) (table_info[table].size & 0x00000FFF) +#define TABLE_XMEM(table) (table_info[table].size & 0xFFE00000) +#define TABLE_XNUM(table) ((table_info[table].size >> 16) & 0xF) + +#define TABLE_OPCODE_WRITE(table) (table_info[table].opcode_write & 0x3F) +#define TABLE_OPCODE_READ(table) (table_info[table].opcode_read & 0x3F) +#define TABLE_ADVCMD_VALID(table) (table_info[table].advanced_cmd == 0x01) +#define TABLE_OPCODE_INSERT(table) (table_info[table].opcode_insert & 0x3F) +#define TABLE_OPCODE_DELETE(table) (table_info[table].opcode_delete & 0x3F) +#define TABLE_OPCODE_LOOKUP(table) (table_info[table].opcode_lookup & 0x3F) + +#define TABLE_OPCODE_UPDATE(table) (table_info[table].opcode_update & 0x3F) + +#define TABLE_SIZE_INSERT(table) (table_info[table].size_insert) +#define TABLE_SIZE_DELETE(table) (table_info[table].size_delete) +#define TABLE_SIZE_LOOKUP(table) (table_info[table].size_lookup) +#define TABLE_SIZE_UPDATE(table) (table_info[table].size_update) +#define TABLE_SIZE_LOOKUP_RET(table) (table_info[table].size & 0xFFF) + +#define NUM_TABLE(table) (table_info[table].table_num) + +static u64 local_module_base; + +void ne6x_reg_lock(struct ne6x_pf *pf) +{ + mutex_lock(&pf->mbus_comm_mutex); +} + +void ne6x_reg_unlock(struct ne6x_pf *pf) +{ + mutex_unlock(&pf->mbus_comm_mutex); +} + +void ne6x_switch_pci_write(void *bar_base, u32 base_addr, u32 offset_addr, u64 reg_value) +{ + unsigned int reg_offset = 0; + void __iomem *addr = NULL; + + reg_offset = (base_addr << 12) + (offset_addr << 4); + addr = bar_base + reg_offset; + writeq(reg_value, addr); +} + +u64 ne6x_switch_pci_read(void *bar_base, u32 base_addr, u32 offset_addr) +{ + unsigned int reg_offset = 0; + void __iomem *addr = NULL; + u64 val = 0; + + reg_offset = (base_addr << 12) + (offset_addr << 4); + addr = bar_base + reg_offset; + val = readq(addr); + + return val; +} + +void ne6x_reg_pci_write(struct ne6x_pf *pf, u32 base_addr, u32 offset_addr, u64 reg_value) +{ + ne6x_switch_pci_write(pf->hw.hw_addr4, base_addr, offset_addr, reg_value); +} + +u64 ne6x_reg_pci_read(struct ne6x_pf *pf, u32 base_addr, u32 offset_addr) +{ + return ne6x_switch_pci_read(pf->hw.hw_addr4, base_addr, offset_addr); +} + +#define BAR4_CSR_OFFSET 0x3C0 +u32 ne6x_reg_axi_read(struct ne6x_pf *pf, u32 offset) +{ + u64 reg_offset = offset & 0xFFFFFFFC; + u64 reg_value = 0x4000000000000000ULL + (reg_offset << 30); + + ne6x_reg_pci_write(pf, BAR4_CSR_OFFSET, 0x0, reg_value); + reg_value = (reg_offset << 30); + ne6x_reg_pci_write(pf, BAR4_CSR_OFFSET, 0x0, reg_value); + reg_value = ne6x_reg_pci_read(pf, BAR4_CSR_OFFSET, 0x0); + reg_value = ne6x_reg_pci_read(pf, BAR4_CSR_OFFSET, 0x0); + + return ne6x_reg_pci_read(pf, BAR4_CSR_OFFSET, 0x0) & 0xFFFFFFFFUL; +} + +void ne6x_reg_axi_write(struct ne6x_pf *pf, u32 offset, u32 value) +{ + u64 reg_offset = offset & 0xFFFFFFFC; + u64 reg_value = 0x4000000000000000ULL + (reg_offset << 30) + value; + + reg_offset = (reg_offset << 30); + ne6x_reg_pci_write(pf, BAR4_CSR_OFFSET, 0x0, reg_value); +} + +u32 _reg_apb_read(struct ne6x_pf *pf, u64 offset) +{ + u32 offset_l = 0x27A00000 | ((offset << 4) & 0xFFFF0); + u32 offset_h; + u32 data = 0; + + if ((offset & 0xFFFFF0000ULL) != local_module_base) { + offset_h = 0x10000000 | ((offset >> 12) & 0xFFFFF0); + ne6x_reg_axi_write(pf, offset_h, 0xA1B2C3D4); + } + + data = ne6x_reg_axi_read(pf, offset_l); + + return data; +} + +void _reg_apb_write(struct ne6x_pf *pf, u64 offset, u32 value) +{ + u32 offset_l; + u32 offset_h; + + if ((offset & 0xFFFFF0000ULL) != local_module_base) { + offset_h = 0x10000000 | ((offset >> 12) & 0xFFFFF0); + ne6x_reg_axi_write(pf, offset_h, 0xA2B2C3D4); + } + + offset_l = 0x2FA00000 | ((offset << 4) & 0xFFFF0); + ne6x_reg_axi_write(pf, offset_l, value); +} + +u32 NE6X_ACCESS_TIMEOUT = 9999; +int _ne6x_reg_perform(struct ne6x_pf *pf, u32 *data, u32 *pbuf, u32 len, u32 retlen) +{ + struct axia_mbus_msg resp; + int timeout = 0, index = 0; + + memset(&resp, 0, sizeof(resp)); + + /* Write Command(s) */ + for (index = 0; index < len; index++) + _reg_apb_write(pf, PCIE2C810_SHM_MBUS_BASE + 4 * index, data[index]); + + /* Start mbus mechanism, notice c810 */ + _reg_apb_write(pf, 0x20680014, 0x3FEC); + + usleep_range(200, 300); + + /* check if c810 handle completed */ + while (timeout < NE6X_ACCESS_TIMEOUT) { + resp.hdr.uint = _reg_apb_read(pf, PCIE2C810_SHM_MBUS_BASE); + + /* resp opcode is even number, request opcode is odd number */ + if ((resp.hdr.bits.opcode & 0x01) == 0x0) + break; + + timeout++; + usleep_range(200, 220); + } + + if (timeout >= NE6X_ACCESS_TIMEOUT) { + dev_info(ne6x_pf_to_dev(pf), "%s: timeout! (%d)\n", __func__, timeout); + return -ETIMEDOUT; + } + + if (resp.hdr.bits.e == 1) { + dev_info(ne6x_pf_to_dev(pf), "%s: response.bits.e = 1 !\n", __func__); + return -EAGAIN; + } + + if (!pbuf) + return 0; + + for (index = 0; index < retlen; index++) + pbuf[index] = _reg_apb_read(pf, PCIE2C810_SHM_DATA_BASE + 4 * index); + + return 0; +} + +int ne6x_reg_perform(struct ne6x_pf *pf, u32 *data, u32 *pbuf, u32 len, u32 retlen) +{ + int status; + + ne6x_reg_lock(pf); + status = _ne6x_reg_perform(pf, data, pbuf, len, retlen); + ne6x_reg_unlock(pf); + + return status; +} + +u32 ne6x_reg_apb_read(struct ne6x_pf *pf, u64 offset) +{ + u32 data; + + ne6x_reg_lock(pf); + data = _reg_apb_read(pf, offset); + ne6x_reg_unlock(pf); + + return data; +} + +void ne6x_reg_apb_write(struct ne6x_pf *pf, u64 offset, u32 value) +{ + ne6x_reg_lock(pf); + _reg_apb_write(pf, offset, value); + ne6x_reg_unlock(pf); +} + +int ne6x_reg_indirect_read(struct ne6x_pf *pf, u32 addr, u32 *value) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(16, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_READ_REGISTER_COMMAND; + msg->hdr.bits.data_len = 8; + msg->data[0] = addr; + + status = ne6x_reg_perform(pf, (u32 *)msg, value, 2, 1); + kfree(msg); + + return status; +} + +int ne6x_reg_indirect_write(struct ne6x_pf *pf, u32 addr, u32 value) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(16, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_WRITE_REGISTER_COMMAND; + msg->hdr.bits.data_len = 12; + msg->data[0] = addr; + msg->data[1] = value; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 3, 0); + kfree(msg); + + return status; +} + +static bool ne6x_reg_valid_table(struct ne6x_pf *pf, enum ne6x_reg_table table) +{ + if (pf->hw_flag != 0) { + if (table > NE6X_REG_ARFS_TABLE) + return false; + } else { + if (table > NE6X_REG_VF_BW_TABLE) + return false; + } + + return true; +} + +int ne6x_reg_table_read(struct ne6x_pf *pf, enum ne6x_reg_table table, + int index, void *data, int size) +{ + struct axia_mbus_msg *msg; + int status; + + if (size % TABLE_SIZE(table) != 0x00) + return -EINVAL; + + if (!ne6x_reg_valid_table(pf, table)) + return -EINVAL; + + msg = kzalloc(1028, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = (u32)(TABLE_OPCODE_READ(table)); + msg->hdr.bits.data_len = 12; + msg->data[0] = TABLE_ADDR(table) + index * TABLE_XNUM(table); + msg->data[1] = TABLE_XMEM(table) + size; + + status = ne6x_reg_perform(pf, (u32 *)msg, (u32 *)data, 3, size / 4); + kfree(msg); + + return status; +} + +int ne6x_reg_table_write(struct ne6x_pf *pf, enum ne6x_reg_table table, + int index, void *data, int size) +{ + struct axia_mbus_msg *msg; + int status; + + if (TABLE_ADVCMD_VALID(table)) + return -EINVAL; + + if (!ne6x_reg_valid_table(pf, table)) + return -EINVAL; + + msg = kzalloc(1028, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = (u32)(TABLE_OPCODE_WRITE(table)); + msg->hdr.bits.data_len = 12 + size; + msg->data[0] = TABLE_ADDR(table) + index * TABLE_XNUM(table); + msg->data[1] = TABLE_XMEM(table) + size; + memcpy(&msg->data[2], data, size); + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 3 + size / 4, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_table_insert(struct ne6x_pf *pf, enum ne6x_reg_table table, + u32 *data, int size, u32 *table_id) +{ + struct axia_mbus_msg *msg; + int status, count; + + if (TABLE_ADVCMD_VALID(table) == 0x0) + return -EINVAL; + + if (size % TABLE_SIZE_INSERT(table) != 0x00) + return -EINVAL; + + if (!ne6x_reg_valid_table(pf, table)) + return -EINVAL; + + msg = kzalloc(1028, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + count = size / TABLE_SIZE_INSERT(table); + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = (u32)(TABLE_OPCODE_INSERT(table)); + msg->hdr.bits.data_len = 12 + size; + msg->data[0] = TABLE_ADDR(table); + msg->data[1] = TABLE_XMEM(table) + TABLE_SIZE_INSERT(table); + memcpy((void *)&msg->data[2], (void *)data, size); + + status = ne6x_reg_perform(pf, (u32 *)msg, table_id, 3 + (size >> 2), + (!table_id) ? 0 : count); + kfree(msg); + + return status; +} + +int ne6x_reg_table_delete(struct ne6x_pf *pf, enum ne6x_reg_table table, u32 *data, int size) +{ + struct axia_mbus_msg *msg; + int status; + + if (TABLE_ADVCMD_VALID(table) == 0x0) + return -EINVAL; + + if (TABLE_SIZE_DELETE(table) != size) + return -EINVAL; + + if (!ne6x_reg_valid_table(pf, table)) + return -EINVAL; + + msg = kzalloc(1028, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = (u32)(TABLE_OPCODE_DELETE(table)); + msg->hdr.bits.data_len = 12 + size; + msg->data[0] = TABLE_ADDR(table); + msg->data[1] = TABLE_XMEM(table) + size; + memcpy(&msg->data[2], data, size); + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 3 + (size >> 2), 0); + kfree(msg); + + return status; +} + +int ne6x_reg_table_search(struct ne6x_pf *pf, enum ne6x_reg_table table, + u32 *data, int size, u32 *ret_data, int ret_size) +{ + struct axia_mbus_msg *msg; + int status; + + if (TABLE_ADVCMD_VALID(table) == 0x0) + return -EINVAL; + + if (size % TABLE_SIZE_LOOKUP(table) != 0x00) + return -EINVAL; + + if (!ne6x_reg_valid_table(pf, table)) + return -EINVAL; + + msg = kzalloc(1036, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = (u32)(TABLE_OPCODE_LOOKUP(table)); + msg->hdr.bits.data_len = 12 + size; + msg->data[0] = TABLE_ADDR(table); + msg->data[1] = TABLE_XMEM(table) + TABLE_SIZE_LOOKUP_RET(table); + memcpy((void *)&msg->data[2], (void *)data, size); + + status = ne6x_reg_perform(pf, (u32 *)msg, ret_data, 3 + (size >> 2), ret_size / 4); + kfree(msg); + + return (status != 0) ? -ENOENT : status; +} + +int ne6x_reg_table_update(struct ne6x_pf *pf, enum ne6x_reg_table table, + u32 index, u32 *data, int size) +{ + struct axia_mbus_msg *msg; + int status; + + if (TABLE_ADVCMD_VALID(table) == 0x0) + return -EINVAL; + + if (size % TABLE_SIZE_UPDATE(table) != 0x00) + return -EINVAL; + + if (!ne6x_reg_valid_table(pf, table)) + return -EINVAL; + + msg = kzalloc(1036, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = (u32)(TABLE_OPCODE_UPDATE(table)); + msg->hdr.bits.data_len = 16 + size; + msg->data[0] = TABLE_ADDR(table); + msg->data[1] = index; + msg->data[2] = TABLE_SIZE_UPDATE(table); + memcpy((void *)&msg->data[3], (void *)data, size); + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 4 + (size >> 2), 0); + kfree(msg); + + return (status != 0) ? -ENOENT : status; +} + +int ne6x_reg_talk_port(struct ne6x_pf *pf, enum ne6x_reg_talk_port talk, + enum ne6x_reg_talk_opcode opcode, + int port, void *pbuf, int size) +{ + struct axia_mbus_msg *msg; + int status; + + if (((size % 4) != 0) || size > 512) + return -EINVAL; + + msg = kzalloc(520, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = (AXIA_MBUS_TALK_PORT_BASE + 4 * talk + 2 * opcode); + msg->hdr.bits.data_len = 8 + size; + msg->data[0] = port; + if (pbuf) + memcpy(&msg->data[1], pbuf, size); + + status = ne6x_reg_perform(pf, (u32 *)msg, (opcode == NE6X_TALK_GET) ? pbuf : NULL, + 2 + ((opcode == NE6X_TALK_GET) ? 0 : (size >> 2)), + (opcode == NE6X_TALK_GET) ? (size >> 2) : 0); + kfree(msg); + + return status; +} + +int ne6x_reg_reset_firmware(struct ne6x_pf *pf) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_RESET_FIRMWARE_COMMAND; + msg->hdr.bits.data_len = 4; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 1, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_e2prom_read(struct ne6x_pf *pf, u32 offset, void *pbuf, int size) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(1040, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + if (size > 2048) + size = 2048; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_E2PROM_READ_COMMAND; + msg->hdr.bits.data_len = 12; + msg->data[0] = offset; + msg->data[1] = size; + + status = ne6x_reg_perform(pf, (u32 *)msg, (u32 *)pbuf, 3, size / 4); + kfree(msg); + + return status; +} + +int ne6x_reg_e2prom_write(struct ne6x_pf *pf, u32 offset, void *pbuf, int size) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(1040, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + if (size > 1024) + size = 1024; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_E2PROM_WRITE_COMMAND; + msg->hdr.bits.data_len = 12 + (size / 4) * 4; + msg->data[0] = (offset); + msg->data[1] = (size); + memcpy((void *)&msg->data[1], (void *)pbuf, (ssize_t)size); + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 3 + (size / 4), 0); + kfree(msg); + + return status; +} + +int ne6x_reg_get_fan_speed(struct ne6x_pf *pf, u32 *speed) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_GET_FAN_SPEED_COMMAND; + msg->hdr.bits.data_len = 4; + + status = ne6x_reg_perform(pf, (u32 *)msg, (u32 *)speed, 1, 1); + kfree(msg); + + return status; +} + +int ne6x_reg_set_fan_speed(struct ne6x_pf *pf, u32 speed) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_SET_FAN_SPEED_COMMAND; + msg->hdr.bits.data_len = 8; + msg->data[0] = speed; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 2, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_get_soc_info(struct ne6x_pf *pf, u32 class_type, u32 *ret, u32 size) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_GET_SYSTEM_INFO_COMMAND; + msg->hdr.bits.data_len = 12; + msg->data[0] = class_type; + msg->data[1] = size; + + status = ne6x_reg_perform(pf, (u32 *)msg, (u32 *)ret, 3, size >> 2); + kfree(msg); + + return status; +} + +int ne6x_reg_send_bit(struct ne6x_pf *pf, u32 port, u32 mode) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_GET_SYSTEM_INFO_COMMAND; + msg->hdr.bits.data_len = 16; + msg->data[0] = 4; + msg->data[1] = port; + msg->data[2] = mode; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 4, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_mem_read(struct ne6x_pf *pf, u32 addr, void *pbuf, u32 size) +{ + struct axia_mbus_msg *msg; + int status; + + if (size > 1024) + size = 1024; + + msg = kzalloc(520, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_READ_MEMORY_COMMAND; + msg->hdr.bits.data_len = 12; + msg->data[0] = addr; + msg->data[1] = size; + + status = ne6x_reg_perform(pf, (u32 *)msg, (u32 *)pbuf, 3, size / 4); + kfree(msg); + + return status; +} + +int ne6x_reg_mem_write(struct ne6x_pf *pf, u32 addr, void *pbuf, u32 size) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(520, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + if (size > 1024) + size = 1024; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_WRITE_MEMORY_COMMAND; + msg->hdr.bits.data_len = 12 + (size / 4) * 4; + msg->data[0] = addr; + msg->data[1] = size; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 3 + (size / 4), 0); + kfree(msg); + + return status; +} + +#define NE6X_FW_MAX_FRG_SIZE (4 * 1024) +int ne6x_reg_upgrade_firmware(struct ne6x_pf *pf, u8 region, u8 *data, int size) +{ + struct axia_mbus_msg *msg; + int offset = 0, left_size = 0, frag_size = 0; + int status = 0; + + msg = kzalloc(NE6X_FW_MAX_FRG_SIZE + 16, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + ne6x_reg_lock(pf); + /* scile begin */ + NE6X_ACCESS_TIMEOUT = 100000; + left_size = size; + while (left_size) { + frag_size = (left_size >= NE6X_FW_MAX_FRG_SIZE) ? NE6X_FW_MAX_FRG_SIZE : left_size; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_UPGRADE_COMMAND; + msg->hdr.bits.data_len = 12 + frag_size; + msg->data[0] = region; /* region */ + msg->data[1] = frag_size; /* size */ + memcpy(&msg->data[2], data + offset, frag_size); + + status |= _ne6x_reg_perform(pf, (u32 *)msg, NULL, 3 + (frag_size >> 2), 0); + if (status) + goto err_upgrade; + + left_size -= frag_size; + offset += frag_size; + } + +err_upgrade: + /* scile end */ + NE6X_ACCESS_TIMEOUT = 999; + ne6x_reg_unlock(pf); + kfree(msg); + + return status; +} + +int ne6x_reg_get_ver(struct ne6x_pf *pf, struct ne6x_firmware_ver_info *version) +{ + struct axia_mbus_msg *msg; + u32 *out_buffer = (u32 *)version; + int status; + + msg = kzalloc(40, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_GET_VER_COMMAND; + msg->hdr.bits.data_len = 4; + + status = ne6x_reg_perform(pf, (u32 *)msg, out_buffer, 1, + sizeof(struct ne6x_firmware_ver_info) / sizeof(u32)); + kfree(msg); + + return status; +} + +int ne6x_reg_get_sfp_eeprom(struct ne6x_pf *pf, int port, void *pbuf, u32 offset, int size) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(1040, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + if (size > 2048) + size = 2048; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_TALK_GET_PORT_SFP_EEPROM_COMMAND; + msg->hdr.bits.data_len = 16; + msg->data[0] = port; + msg->data[1] = offset; + msg->data[2] = size; + + status = ne6x_reg_perform(pf, (u32 *)msg, (u32 *)pbuf, 4, size / 4); + kfree(msg); + + return status; +} + +int ne6x_reg_nic_start(struct ne6x_pf *pf, u32 flag) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_SET_NIC_START_COMMAND; + msg->hdr.bits.data_len = 8; + msg->data[0] = flag; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 2, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_nic_stop(struct ne6x_pf *pf, u32 flag) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_SET_NIC_STOP_COMMAND; + msg->hdr.bits.data_len = 8; + msg->data[0] = flag; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 2, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_get_nic_state(struct ne6x_pf *pf, u32 *state) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_GET_NIC_STATE_COMMAND; + msg->hdr.bits.data_len = 4; + + status = ne6x_reg_perform(pf, (u32 *)msg, (u32 *)state, 1, 1); + kfree(msg); + + return status; +} + +int ne6x_reg_set_user_data_template(struct ne6x_pf *pf, enum np_user_data type, u32 data) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_SET_NP_USERDATA_COMMAND; + msg->hdr.bits.data_len = 12; + msg->data[0] = type; + msg->data[1] = data; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 3, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_get_user_data_template(struct ne6x_pf *pf, enum np_user_data type, u32 *data) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_GET_NP_USERDATA_COMMAND; + msg->hdr.bits.data_len = 4; + msg->data[0] = type; + + status = ne6x_reg_perform(pf, (u32 *)msg, data, 2, 1); + kfree(msg); + + return status; +} + +int ne6x_reg_set_user_data(struct ne6x_pf *pf, enum np_user_data type, u32 data) +{ + return ne6x_reg_set_user_data_template(pf, type, data); +} + +int ne6x_reg_get_user_data(struct ne6x_pf *pf, enum np_user_data type, u32 *data) +{ + int status = 0; + + status = ne6x_reg_get_user_data_template(pf, type, data); + + return status; +} + +int ne6x_reg_set_led(struct ne6x_pf *pf, int port, bool state) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(32, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_SET_LED_STATE_COMMAND; + msg->hdr.bits.data_len = 12; + msg->data[0] = port; + msg->data[1] = state; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 3, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_config_meter(struct ne6x_pf *pf, u32 meter_id, u32 *data, int size) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(520, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_CONFIG_METER_COMMAND; + msg->hdr.bits.data_len = size + 8; + msg->data[0] = meter_id; + memcpy((void *)&msg->data[1], (void *)data, size); + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 2 + (size / 4), 0); + kfree(msg); + + return status; +} + +int ne6x_reg_set_unicast_for_fastmode(struct ne6x_pf *pf, u32 index, u32 *data, + u32 size) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(40, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_SET_FAST_L2FDB_COMMAND; + msg->hdr.bits.data_len = size + 8; + msg->data[0] = index; + memcpy((void *)&msg->data[1], (void *)data, size); + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 2 + (size / 4), 0); + kfree(msg); + + return status; +} + +int ne6x_reg_get_dump_data_len(struct ne6x_pf *pf, u32 *size) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(40, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_GET_DUMP_DATA_LEN_COMMAND; + msg->hdr.bits.data_len = 4; + + status = ne6x_reg_perform(pf, (u32 *)msg, size, 1, 1); + kfree(msg); + + return status; +} + +void ne6x_reg_send(struct ne6x_pf *pf, u32 cmd, u32 *data, u32 size) +{ + struct axia_mbus_msg *msg; + u32 *msg_data; + int index; + + msg = kzalloc(size + 12, GFP_KERNEL); + if (!msg) + return; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = cmd; + msg->hdr.bits.data_len = 4 + size; + memcpy((void *)&msg->data[0], (void *)data, size); + + msg_data = (u32 *)msg; + /* Write Command(s) */ + for (index = 0; index < ((size / 4) + 1); index++) + _reg_apb_write(pf, PCIE2C810_SHM_MBUS_BASE + 4 * index, msg_data[index]); + + /* Start mbus mechanism, notice c810 */ + _reg_apb_write(pf, 0x20680014, 0x3FEC); + usleep_range(1000, 1200); + kfree(msg); +} + +int ne6x_reg_polling(struct ne6x_pf *pf, u32 cmd, u32 *data, u32 buf_size, + u32 *real_size) +{ + int timeout = 0, offset = 0; + struct axia_mbus_msg resp; + int index, status; + + memset(&resp, 0, sizeof(resp)); + + /* check if c810 handle completed */ + while (timeout < NE6X_ACCESS_TIMEOUT) { + resp.hdr.uint = _reg_apb_read(pf, PCIE2C810_SHM_MBUS_BASE); + if (resp.hdr.bits.opcode == cmd) + break; + + timeout++; + usleep_range(200, 220); + } + + status = (timeout >= NE6X_ACCESS_TIMEOUT) ? -ETIMEDOUT : 0; + status = (resp.hdr.bits.e == 1) ? -EAGAIN : status; + if (status) { + dev_info(ne6x_pf_to_dev(pf), "%s: cmd %d status (%d)\n", __func__, cmd, status); + return status; + } + + switch (cmd) { + case AXIA_MBUS_GET_DUMP_DATA_ACK: + *real_size = resp.hdr.bits.data_len - sizeof(resp) - sizeof(u32); + offset = sizeof(u32); + pf->dump_info = _reg_apb_read(pf, PCIE2C810_SHM_DATA_BASE); + break; + default: + *real_size = resp.hdr.bits.data_len - sizeof(resp); + offset = 0; + break; + } + + if (*real_size > buf_size) + *real_size = buf_size; + + for (index = 0; index < (*real_size) / 4; index++) + data[index] = _reg_apb_read(pf, PCIE2C810_SHM_DATA_BASE + 4 * index + offset); + + return 0; +} + +int ne6x_reg_get_dump_data(struct ne6x_pf *pf, u32 *data, u32 size) +{ + u32 *temp_buff = data; + u32 left_size = size; + u32 real_size = 0; + + memset(&pf->dump_info, 0, sizeof(u32)); + + ne6x_reg_lock(pf); + while (left_size > 0) { + temp_buff += real_size / 4; + ne6x_reg_send(pf, AXIA_MBUS_GET_DUMP_DATA_COMMAND, (u32 *)&pf->dump_info, 4); + if (ne6x_reg_polling(pf, AXIA_MBUS_GET_DUMP_DATA_ACK, + temp_buff, left_size, &real_size)) { + ne6x_reg_unlock(pf); + return -EAGAIN; + } + + left_size -= real_size; + } + ne6x_reg_unlock(pf); + + return 0; +} + +int ne6x_reg_clear_table(struct ne6x_pf *pf, u32 table_id) +{ + struct axia_mbus_msg *msg; + int status; + + if (!ne6x_reg_valid_table(pf, table_id)) + return -EINVAL; + + msg = kzalloc(40, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + NE6X_ACCESS_TIMEOUT = 99999; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_CLR_TABLE_COMMAND; + msg->hdr.bits.data_len = 8; + msg->data[0] = table_id; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 2, 0); + kfree(msg); + + NE6X_ACCESS_TIMEOUT = 9999; + + return status; +} + +int ne6x_reg_set_norflash_write_protect(struct ne6x_pf *pf, u32 write_protect) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(40, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_SET_NOFLASH_WRITE_PROTECT_COMMAND; + msg->hdr.bits.data_len = 8; + msg->data[0] = write_protect; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 2, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_get_norflash_write_protect(struct ne6x_pf *pf, u32 *p_write_protect) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(512, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_GET_NOFLASH_WRITE_PROTECT_COMMAND; + msg->hdr.bits.data_len = 4; + + status = ne6x_reg_perform(pf, (u32 *)msg, p_write_protect, 1, 1); + kfree(msg); + + return status; +} + +int ne6x_reg_write_norflash(struct ne6x_pf *pf, u32 offset, u32 length, u32 *pdata) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(512, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_OPT_NOFLASH_COMMAND; + msg->hdr.bits.data_len = 16 + length; + msg->data[0] = NE6X_NORFLASH_OP_WRITE_E; + msg->data[1] = offset; + msg->data[2] = length; + memcpy((void *)&msg->data[3], (void *)pdata, length); + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 4 + (length >> 2), 0); + kfree(msg); + + return status; +} + +int ne6x_reg_erase_norflash(struct ne6x_pf *pf, u32 offset, u32 length) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(40, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_OPT_NOFLASH_COMMAND; + msg->hdr.bits.data_len = 16; + msg->data[0] = NE6X_NORFLASH_OP_ERASE_E; + msg->data[1] = offset; + msg->data[2] = length; + + status = ne6x_reg_perform(pf, (u32 *)msg, NULL, 4, 0); + kfree(msg); + + return status; +} + +int ne6x_reg_read_norflash(struct ne6x_pf *pf, u32 offset, u32 length, u32 *p) +{ + struct axia_mbus_msg *msg; + int status; + + msg = kzalloc(40, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->hdr.uint = 0; + msg->hdr.bits.opcode = AXIA_MBUS_OPT_NOFLASH_COMMAND; + msg->hdr.bits.data_len = 16; + msg->data[0] = NE6X_NORFLASH_OP_READ_E; + msg->data[1] = offset; + msg->data[2] = length; + + status = ne6x_reg_perform(pf, (u32 *)msg, p, 4, length >> 2); + kfree(msg); + + return status; +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_reg.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_reg.h new file mode 100644 index 000000000000..cf8a7c5767a1 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_reg.h @@ -0,0 +1,249 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_REG_H +#define _NE6X_REG_H + +#include + +struct ne6x_diag_reg_test_info { + u32 offset; /* the base register */ + u64 mask; /* bits that can be tested */ + u32 elements; /* number of elements if array */ + u32 stride; /* bytes between each element */ +}; + +enum ne6x_reg_table { + NE6X_REG_RSS_TABLE = 0x0, + NE6X_REG_L2FDB_TABLE, + NE6X_REG_VLAN_TABLE, + NE6X_REG_MAC_LEARN_TABLE, + NE6X_REG_VF_STAT_TABLE, + NE6X_REG_VF_BW_TABLE, + NE6X_REG_ACL_TABLE, + NE6X_REG_ARFS_TABLE, + NE6X_REG_TABLE_LAST, +}; + +enum ne6x_reg_talk_port { + NE6X_MSG_PORT_ENABLE = 0, + NE6X_MSG_PORT_DUPLEX, + NE6X_MSG_PORT_SPEED, + NE6X_MSG_PORT_STATS, + NE6X_MSG_PORT_SFP_SPEED, + NE6X_MSG_PORT_FEC, + NE6X_MSG_PORT_SPEED_MAX, + NE6X_MSG_PORT_PAUSE, + NE6X_MSG_PORT_PAUSE_ADDR, + NE6X_MSG_PORT_LOOPBACK, + NE6X_MSG_PORT_MAX_FRAME, + NE6X_MSG_PORT_AUTO_NEG, + NE6X_MSG_PORT_INFO, + NE6X_MSG_PORT_LINK_STATUS, + NE6X_MSG_PORT_DRV_I2C, + NE6X_MSG_PORT_SELF_TEST, + NE6X_MSG_PORT_SFP_TYPE_LEN, + NE6X_MSG_PORT_SFP_EEPROM, + NE6X_MSG_PORT_STATE, +}; + +enum ne6x_reg_talk_opcode { + NE6X_TALK_SET = 0, + NE6X_TALK_GET +}; + +extern struct ne6x_diag_reg_test_info ne6x_reg_list[]; + +struct table_info { + u32 addr; /* 00 - 27: max_size + * 28 - 31: engine_idx + */ + u32 size; + /* 00 - 15: length + * 16 - 20: + * 21 - 23: entry_num + * 24 - 26: mem_type + * 27 - 27: mem_type_bucekt + * 28 - 31: opcode + */ + u16 opcode_read; + u16 opcode_write; +#define ADV_CMD_DISABLE 0x00 +#define ADV_CMD_EBABLE 0x01 + u32 advanced_cmd; + u16 opcode_insert; + u16 opcode_delete; + u16 opcode_update; + u16 opcode_search; + u16 size_insert; + u16 size_delete; + u16 size_search; + u16 size_update; +}; + +struct rss_table { + u32 resv; + u32 flag; + u32 hash_fun; /* 24-31, func, 23-1,type */ + u32 queue_base; + u16 queue_def; + u16 queue_size; + u16 entry_num; + u16 entry_size; + u8 entry_data[128]; + u8 hash_key[352]; + u8 resv1[8]; +}; + +struct l2fdb_dest_unicast { + u8 flags; /* bit0 -- static,bit1---multicast */ + u8 rsv[3]; + u32 vp_bmp[3]; + u32 cnt; /* leaf num */ + u8 resv3[44]; +}; + +struct l2fdb_dest_multicast { + u8 flags; /* bit0 -- static,bit1---multicast */ + u8 resv3[3]; + u32 vp_bmp[3]; + u8 resv4[48]; +}; + +struct l2fdb_search_result { + u32 key_index; + union { + struct l2fdb_dest_unicast unicast; + struct l2fdb_dest_multicast multicast; + } fw_info; +}; + +struct l2fdb_table { + u8 resv1; + u8 pport; + u8 mac[6]; + u32 vlanid; + u8 resv2[52]; + union { + struct l2fdb_dest_unicast unicast; + struct l2fdb_dest_multicast multicast; + } fw_info; /* forward info */ +}; + +struct l2fdb_fast_table { + u8 mac[6]; + u8 start_cos; + u8 cos_num; +}; + +struct meter_table { + u32 cir; + u32 cbs; + u32 pir; + u32 pbs; +}; + +enum np_user_data { + NP_USER_DATA_HW_FEATURES = 0, + NP_USER_DATA_HW_FLAGS = 1, + NP_USER_DATA_RSS_TABLE_SIZE = 2, + NP_USER_DATA_RSS_TABLE_ENTRY_WIDTH = 3, + NP_USER_DATA_RSS_HASH_KEY_BLOCK_SIZE = 4, + NP_USER_DATA_PORT2PI_0 = 5, + NP_USER_DATA_PI2PORT_0 = 25, + NP_USER_DATA_VLAN_TYPE = 33, + NP_USER_DATA_RSV_0 = 34, + NP_USER_DATA_RSV_1 = 35, + NP_USER_DATA_RSV_2 = 36, + NP_USER_DATA_PI0_BROADCAST_LEAF = 37, + NP_USER_DATA_PORT_OLFLAGS_0 = 53, + NP_USER_DATA_PORT_2_COS_0 = 121, + NP_USER_DATA_VPORT0_LINK_STATUS = 155, + NP_USER_DATA_TSO_CKSUM_DISABLE = 156, + NP_USER_DATA_PORT0_MTU = 157, + NP_USER_DATA_PORT0_QINQ = 161, + NP_USER_DATA_CQ_SIZE = 229, + NP_USER_DATA_FAST_MODE = 230, + NP_USER_DATA_SUB_FLAG = 231, + NP_USER_DATA_DDOS_FLAG = 242, + NP_USER_DATA_END = 255, +}; + +struct ne6x_diag_reg_info { + u32 address; + u32 value; +}; + +enum { + NE6X_NORFLASH_OP_WRITE_E = 0, + NE6X_NORFLASH_OP_READ_E = 1, + NE6X_NORFLASH_OP_ERASE_E = 2, + NE6X_NORFLASH_OP_E_END, +}; + +void ne6x_reg_pci_write(struct ne6x_pf *pf, u32 base_addr, + u32 offset_addr, u64 reg_value); +u64 ne6x_reg_pci_read(struct ne6x_pf *pf, u32 base_addr, u32 offset_addr); + +u32 ne6x_reg_apb_read(struct ne6x_pf *pf, u64 offset); +void ne6x_reg_apb_write(struct ne6x_pf *pf, u64 offset, u32 value); +int ne6x_reg_reset_firmware(struct ne6x_pf *pf); +u32 ne6x_reg_apb_read(struct ne6x_pf *pf, u64 offset); +void ne6x_reg_apb_write(struct ne6x_pf *pf, u64 offset, u32 value); + +int ne6x_reg_indirect_read(struct ne6x_pf *pf, u32 addr, u32 *value); +int ne6x_reg_indirect_write(struct ne6x_pf *pf, u32 addr, u32 value); +int ne6x_reg_table_read(struct ne6x_pf *pf, enum ne6x_reg_table table, + int index, void *data, int size); +int ne6x_reg_table_write(struct ne6x_pf *pf, enum ne6x_reg_table table, + int index, void *data, int size); +int ne6x_reg_table_insert(struct ne6x_pf *pf, enum ne6x_reg_table table, + u32 *data, int size, u32 *table_id); +int ne6x_reg_table_delete(struct ne6x_pf *pf, enum ne6x_reg_table table, + u32 *data, int size); +int ne6x_reg_table_search(struct ne6x_pf *pf, enum ne6x_reg_table table, + u32 *data, int size, u32 *ret_data, int ret_size); + +int ne6x_reg_e2prom_read(struct ne6x_pf *pf, u32 offset, void *pbuf, int size); +int ne6x_reg_e2prom_write(struct ne6x_pf *pf, u32 offset, void *pbuf, int size); +int ne6x_reg_set_fan_speed(struct ne6x_pf *pf, u32 speed); +int ne6x_reg_get_fan_speed(struct ne6x_pf *pf, u32 *speed); + +int ne6x_reg_get_soc_info(struct ne6x_pf *pf, u32 class_type, u32 *ret, u32 size); +int ne6x_reg_talk_port(struct ne6x_pf *pf, enum ne6x_reg_talk_port talk, + enum ne6x_reg_talk_opcode opcode, int port, + void *pbuf, int size); +int ne6x_reg_upgrade_firmware(struct ne6x_pf *pf, u8 region, u8 *data, int size); + +int ne6x_reg_get_ver(struct ne6x_pf *pf, struct ne6x_firmware_ver_info *version); + +int ne6x_reg_get_sfp_eeprom(struct ne6x_pf *pf, int port, void *pbuf, + u32 offset, int size); + +int ne6x_reg_nic_start(struct ne6x_pf *pf, u32 flag); +int ne6x_reg_nic_stop(struct ne6x_pf *pf, u32 flag); + +int ne6x_reg_get_nic_state(struct ne6x_pf *pf, u32 *state); + +int ne6x_reg_set_user_data(struct ne6x_pf *pf, enum np_user_data type, u32 data); +int ne6x_reg_get_user_data(struct ne6x_pf *pf, enum np_user_data type, u32 *data); + +int ne6x_reg_set_led(struct ne6x_pf *pf, int port, bool state); +int ne6x_reg_config_meter(struct ne6x_pf *pf, u32 meter_id, u32 *data, int size); + +int ne6x_reg_send_bit(struct ne6x_pf *pf, u32 port, u32 mode); + +int ne6x_reg_set_unicast_for_fastmode(struct ne6x_pf *pf, u32 index, + u32 *data, u32 size); +int ne6x_reg_get_dump_data_len(struct ne6x_pf *pf, u32 *size); +int ne6x_reg_get_dump_data(struct ne6x_pf *pf, u32 *data, u32 size); +int ne6x_reg_clear_table(struct ne6x_pf *pf, u32 table_id); + +int ne6x_reg_set_norflash_write_protect(struct ne6x_pf *pf, u32 write_protect); +int ne6x_reg_get_norflash_write_protect(struct ne6x_pf *pf, u32 *p_write_protect); + +int ne6x_reg_write_norflash(struct ne6x_pf *pf, u32 offset, u32 length, u32 *pdata); +int ne6x_reg_erase_norflash(struct ne6x_pf *pf, u32 offset, u32 length); +int ne6x_reg_read_norflash(struct ne6x_pf *pf, u32 offset, u32 length, u32 *p); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_txrx.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_txrx.c new file mode 100644 index 000000000000..bb70698eefec --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_txrx.c @@ -0,0 +1,444 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "ne6x.h" +#include "ne6x_txrx.h" +#include "ne6x_reg.h" + +int ne6x_adpt_setup_tx_resources(struct ne6x_adapter *adpt) +{ + int i, err = 0; + + for (i = 0; i < adpt->num_queue && !err; i++) { + err = ne6x_setup_tx_descriptors(adpt->tx_rings[i]); + err = ne6x_setup_tg_descriptors(adpt->tg_rings[i]); + err = ne6x_setup_cq_descriptors(adpt->cq_rings[i]); + err = ne6x_setup_tx_sgl(adpt->tx_rings[i]); + } + + return err; +} + +int ne6x_adpt_setup_rx_resources(struct ne6x_adapter *adpt) +{ + int i, err = 0; + + for (i = 0; i < adpt->num_queue && !err; i++) + err = ne6x_setup_rx_descriptors(adpt->rx_rings[i]); + + return err; +} + +static inline void ne6x_update_enable_itr(struct ne6x_q_vector *q_vector) +{ + struct ne6x_adapter *adpt = (struct ne6x_adapter *)q_vector->adpt; + struct ne6x_hw *hw = &adpt->back->hw; + + u64 val = 1ULL << NE6X_VP_CQ_INTSHIFT; + + if (!test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + struct ne6x_ring *cq_ring = NULL; + + cq_ring = q_vector->cq.ring; + if (cq_ring->next_to_clean != cq_ring->next_to_use) { + cq_ring->next_to_clean = cq_ring->next_to_use; + /* memory barrier updating cq ring tail */ + wmb(); + writeq(cq_ring->next_to_clean, cq_ring->tail); + } + + if (q_vector->reg_idx < NE6X_PF_VP0_NUM) { + wr64(hw, NE6X_VPINT_DYN_CTLN(q_vector->reg_idx, NE6X_VP_INT), val); + wr64(hw, NE6X_VPINT_DYN_CTLN(q_vector->reg_idx, NE6X_VP_INT_MASK), ~(val)); + } else { + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(q_vector->reg_idx - NE6X_PF_VP0_NUM, + NE6X_VP_INT), val); + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(q_vector->reg_idx - NE6X_PF_VP0_NUM, + NE6X_VP_INT_MASK), ~(val)); + } + } +} + +int ne6x_napi_poll(struct napi_struct *napi, int budget) +{ + struct ne6x_q_vector *q_vector = container_of(napi, struct ne6x_q_vector, napi); + struct ne6x_adapt_comm *comm = (struct ne6x_adapt_comm *)q_vector->adpt; + struct ne6x_ring *ring = NULL; + bool clean_complete = true; + int cq_budget = 16; + int work_done = 0; + int cleaned = 0; + + if (test_bit(NE6X_ADPT_DOWN, comm->state)) { + napi_complete(napi); + return 0; + } + + ring = q_vector->cq.ring; + cleaned = ne6x_clean_cq_irq(q_vector, ring, cq_budget); + if (cleaned >= cq_budget) + clean_complete = false; + + ring = q_vector->tx.ring; + if (!ne6x_clean_tx_irq(comm, ring, budget)) + clean_complete = false; + + /* Handle case where we are called by netpoll with a budget of 0 */ + if (budget <= 0) + goto tx_only; + + ring = q_vector->rx.ring; + cleaned = ne6x_clean_rx_irq(ring, budget); + if (cleaned >= budget) + clean_complete = false; + + work_done += cleaned; + + /* If work not completed, return budget and polling will return */ + if (!clean_complete) { + int cpu_id = smp_processor_id(); + + /* It is possible that the interrupt affinity has changed but, + * if the cpu is pegged at 100%, polling will never exit while + * traffic continues and the interrupt will be stuck on this + * cpu. We check to make sure affinity is correct before we + * continue to poll, otherwise we must stop polling so the + * interrupt can move to the correct cpu. + */ + if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { + /* Tell napi that we are done polling */ + napi_complete_done(napi, work_done); + ne6x_update_enable_itr(q_vector); + /* Return budget-1 so that polling stops */ + return budget - 1; + } +tx_only: + return budget; + } + + /* Work is done so exit the polling mode and re-enable the interrupt */ + napi_complete_done(napi, work_done); + ne6x_update_enable_itr(q_vector); + + return min(work_done, budget - 1); +} + +void ne6x_adpt_clear_rings(struct ne6x_adapter *adpt) +{ + int i; + + if (adpt->tx_rings && adpt->tx_rings[0]) { + for (i = 0; i < adpt->num_queue; i++) { + kfree_rcu(adpt->tx_rings[i], rcu); + adpt->tx_rings[i] = NULL; + adpt->rx_rings[i] = NULL; + adpt->cq_rings[i] = NULL; + } + } +} + +int ne6x_alloc_rings(struct ne6x_adapter *adpt) +{ + struct ne6x_pf *pf = adpt->back; + struct ne6x_ring *ring; + int i, qpv = 4; + + /* Set basic values in the rings to be used later during open() */ + for (i = 0; i < adpt->num_queue; i++) { + /* allocate space for both Tx and Rx in one shot */ + ring = kcalloc(qpv, sizeof(*ring), GFP_KERNEL); + if (!ring) + goto err_out; + + ring->queue_index = i; + ring->reg_idx = adpt->base_queue + i; + ring->netdev = adpt->netdev; + ring->dev = &pf->pdev->dev; + ring->adpt = adpt; + ring->count = adpt->num_tx_desc; + ring->size = 0; + adpt->tx_rings[i] = ring++; + + ring->queue_index = i; + ring->reg_idx = adpt->base_queue + i; + ring->netdev = adpt->netdev; + ring->dev = &pf->pdev->dev; + ring->adpt = adpt; + ring->count = adpt->num_cq_desc; + ring->size = 0; + adpt->cq_rings[i] = ring++; + + ring->queue_index = i; + ring->reg_idx = adpt->base_queue + i; + ring->netdev = adpt->netdev; + ring->dev = &pf->pdev->dev; + ring->adpt = adpt; + ring->count = adpt->num_rx_desc; + ring->size = 0; + adpt->rx_rings[i] = ring++; + + ring->queue_index = i; + ring->reg_idx = adpt->base_queue + i; + ring->netdev = adpt->netdev; + ring->dev = &pf->pdev->dev; + ring->adpt = adpt; + ring->count = adpt->num_tg_desc; + ring->size = 0; + adpt->tg_rings[i] = ring; + } + + return 0; + +err_out: + ne6x_adpt_clear_rings(adpt); + return -ENOMEM; +} + +static int ne6x_configure_tx_ring(struct ne6x_ring *ring) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(ring->netdev); + u16 pf_q = adpt->base_queue + ring->queue_index; + union ne6x_sq_base_addr sq_base_addr; + struct ne6x_hw *hw = &adpt->back->hw; + union ne6x_sq_cfg sq_cfg; + + /* SRIOV mode VF Config OR SRIOV disabled PF Config */ + if (pf_q < NE6X_PF_VP0_NUM) { + sq_base_addr.val = rd64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_SQ_BASE_ADDR)); + sq_base_addr.reg.csr_sq_base_addr_vp = ring->dma; + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_SQ_BASE_ADDR), sq_base_addr.val); + + sq_cfg.val = rd64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_SQ_CFG)); + sq_cfg.reg.csr_sq_len_vp = ring->count; + sq_cfg.reg.csr_tdq_pull_en = 0x1; + sq_cfg.reg.csr_sqevt_write_back_vp = 0x0; + sq_cfg.reg.csr_send_pd_revers_en = 0x0; + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_SQ_CFG), sq_cfg.val); + + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_SQ_HD_POINTER), 0x0); + + /* cache tail off for easier writes later */ + ring->tail = (u64 *)&((u64 *)hw->hw_addr2)[NE6X_BAR2_VP_TDQ(pf_q, 0x0) >> 3]; + } else { + /* SRIOV mode PF Config */ + sq_base_addr.val = rd64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_SQ_BASE_ADDR)); + sq_base_addr.reg.csr_sq_base_addr_vp = ring->dma; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_SQ_BASE_ADDR), + sq_base_addr.val); + + sq_cfg.val = + rd64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_SQ_CFG)); + sq_cfg.reg.csr_sq_len_vp = ring->count; + sq_cfg.reg.csr_tdq_pull_en = 0x1; + sq_cfg.reg.csr_sqevt_write_back_vp = 0x0; + sq_cfg.reg.csr_send_pd_revers_en = 0x0; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_SQ_CFG), sq_cfg.val); + + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_SQ_HD_POINTER), 0x0); + + /* cache tail off for easier writes later */ + ring->tail = (u64 *)&((u64 *)hw->hw_addr2)[NE6X_BAR2_VP_TDQ(pf_q, 0x0) >> 3]; + } + + return 0; +} + +int ne6x_adpt_configure_tx(struct ne6x_adapter *adpt) +{ + int err = 0; + u16 i; + + for (i = 0; (i < adpt->num_queue) && !err; i++) + err = ne6x_configure_tx_ring(adpt->tx_rings[i]); + + return err; +} + +static int ne6x_configure_cq_ring(struct ne6x_ring *ring) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(ring->netdev); + u16 pf_q = adpt->base_queue + ring->queue_index; + union ne6x_cq_base_addr cq_base_addr; + struct ne6x_hw *hw = &adpt->back->hw; + union ne6x_cq_cfg cq_cfg; + + /* SRIOV enabled VF config OR SRIOV disabled PF config */ + if (pf_q < NE6X_PF_VP0_NUM) { + cq_base_addr.val = rd64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_CQ_BASE_ADDR)); + cq_base_addr.reg.csr_cq_base_addr_vp = ring->dma; + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_CQ_BASE_ADDR), cq_base_addr.val); + + cq_cfg.val = rd64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_CQ_CFG)); + cq_cfg.reg.csr_cq_len_vp = ring->count; + cq_cfg.reg.csr_cq_merge_time_vp = 7; + cq_cfg.reg.csr_cq_merge_size_vp = 7; + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_CQ_CFG), cq_cfg.val); + + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_CQ_TAIL_POINTER), 0x0); + + /* cache tail for quicker writes, and clear the reg before use */ + ring->tail = (void __iomem *)hw->hw_addr0 + + (NE6X_VPINT_DYN_CTLN(pf_q, NE6X_CQ_HD_POINTER)); + writeq(0, ring->tail); + } else { + /* SRIOV enable PF config */ + cq_base_addr.val = rd64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_CQ_BASE_ADDR)); + cq_base_addr.reg.csr_cq_base_addr_vp = ring->dma; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_CQ_BASE_ADDR), + cq_base_addr.val); + + cq_cfg.val = rd64_bar4(hw, + NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_CQ_CFG)); + cq_cfg.reg.csr_cq_len_vp = ring->count; + cq_cfg.reg.csr_cq_merge_time_vp = 7; + cq_cfg.reg.csr_cq_merge_size_vp = 7; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_CQ_CFG), + cq_cfg.val); + + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_CQ_TAIL_POINTER), 0x0); + + /* cache tail for quicker writes, and clear the reg before use */ + ring->tail = (void __iomem *)hw->hw_addr4 + + (NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_CQ_HD_POINTER)); + writeq(0, ring->tail); + } + + return 0; +} + +int ne6x_adpt_configure_cq(struct ne6x_adapter *adpt) +{ + int err = 0; + u16 i; + /* set up individual rings */ + for (i = 0; i < adpt->num_queue && !err; i++) + err = ne6x_configure_cq_ring(adpt->cq_rings[i]); + + return 0; +} + +static int ne6x_configure_rx_ring(struct ne6x_ring *ring) +{ + struct ne6x_adapter *adpt = ne6x_netdev_to_adpt(ring->netdev); + u16 pf_q = adpt->base_queue + ring->queue_index; + union ne6x_rq_block_cfg rq_block_cfg; + union ne6x_rq_base_addr rq_base_addr; + struct ne6x_hw *hw = &adpt->back->hw; + union ne6x_rq_cfg rc_cfg; + u16 rxmax = 0; + + ring->rx_buf_len = adpt->rx_buf_len; + + if (pf_q < NE6X_PF_VP0_NUM) { + rq_base_addr.val = rd64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_RQ_BASE_ADDR)); + rq_base_addr.reg.csr_rq_base_addr_vp = ring->dma; + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_RQ_BASE_ADDR), rq_base_addr.val); + + rxmax = min_t(u16, adpt->max_frame, ring->rx_buf_len); + rq_block_cfg.val = rd64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_RQ_BLOCK_CFG)); + rq_block_cfg.reg.csr_rdq_mop_len = rxmax; + rq_block_cfg.reg.csr_rdq_sop_len = 0; + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_RQ_BLOCK_CFG), rq_block_cfg.val); + + rc_cfg.val = rd64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_RQ_CFG)); + rc_cfg.reg.csr_rq_len_vp = ring->count; + rc_cfg.reg.csr_rdq_pull_en = 0x1; + rc_cfg.reg.csr_rqevt_write_back_vp = 0x0; + rc_cfg.reg.csr_recv_pd_type_vp = 0x0; + rc_cfg.reg.csr_recv_pd_revers_en = 0x0; + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_RQ_CFG), rc_cfg.val); + + wr64(hw, NE6X_VPINT_DYN_CTLN(pf_q, NE6X_RQ_HD_POINTER), 0x0); + + /* cache tail for quicker writes, and clear the reg before use */ + ring->tail = (u64 *)&((u64 *)hw->hw_addr2)[NE6X_BAR2_VP_RDQ(pf_q, 0x0) >> 3]; + } else { + /* SRIOV enabled PF Config */ + rq_base_addr.val = rd64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_RQ_BASE_ADDR)); + rq_base_addr.reg.csr_rq_base_addr_vp = ring->dma; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_RQ_BASE_ADDR), + rq_base_addr.val); + + rxmax = min_t(u16, adpt->max_frame, ring->rx_buf_len); + rq_block_cfg.val = rd64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_RQ_BLOCK_CFG)); + rq_block_cfg.reg.csr_rdq_mop_len = rxmax; + rq_block_cfg.reg.csr_rdq_sop_len = 0; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_RQ_BLOCK_CFG), + rq_block_cfg.val); + + rc_cfg.val = + rd64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, + NE6X_RQ_CFG)); + rc_cfg.reg.csr_rq_len_vp = ring->count; + rc_cfg.reg.csr_rdq_pull_en = 0x1; + rc_cfg.reg.csr_rqevt_write_back_vp = 0x0; + rc_cfg.reg.csr_recv_pd_type_vp = 0x0; + rc_cfg.reg.csr_recv_pd_revers_en = 0x0; + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_RQ_CFG), rc_cfg.val); + + wr64_bar4(hw, NE6X_PFINT_DYN_CTLN(pf_q - NE6X_PF_VP0_NUM, NE6X_RQ_HD_POINTER), 0x0); + + /* cache tail for quicker writes, and clear the reg before use */ + ring->tail = (u64 *)&((u64 *)hw->hw_addr2)[NE6X_BAR2_VP_RDQ(pf_q, 0x0) >> 3]; + } + + return 0; +} + +int ne6x_adpt_configure_rx(struct ne6x_adapter *adpt) +{ + int err = 0; + u16 i; + + adpt->max_frame = NE6X_MAX_RXBUFFER; + adpt->rx_buf_len = (PAGE_SIZE < 8192) ? NE6X_RXBUFFER_4096 : NE6X_RXBUFFER_4096; + + /* set up individual rings */ + for (i = 0; i < adpt->num_queue && !err; i++) + err = ne6x_configure_rx_ring(adpt->rx_rings[i]); + + return err; +} + +netdev_tx_t ne6x_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + struct ne6x_adapter *adpt = np->adpt; + struct ne6x_ring *tx_ring = adpt->tx_rings[skb->queue_mapping]; + struct ne6x_ring *tag_ring = adpt->tg_rings[skb->queue_mapping]; + struct sk_buff *trailer; + int tailen = 4; + int nsg; + bool jumbo_frame = true; + + /* hardware can't handle really short frames, hardware padding works + * beyond this point + */ + if (skb_put_padto(skb, NE6X_MIN_TX_LEN)) + return NETDEV_TX_OK; + + /* single packet add 4 byte to CRC */ + if (skb->len < NE6X_MAX_DATA_PER_TXD) { + nsg = skb_cow_data(skb, tailen, &trailer); + if (unlikely(nsg < 0)) { + netdev_err(adpt->netdev, "TX: skb_cow_data() returned %d\n", nsg); + return nsg; + } + + pskb_put(skb, trailer, tailen); + jumbo_frame = false; + } + + if (netdev->gso_max_size < skb->len) + netdev_err(adpt->netdev, "%s: skb->len = %d > 15360\n", __func__, skb->len); + + return ne6x_xmit_frame_ring(skb, tx_ring, tag_ring, jumbo_frame); +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_txrx.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_txrx.h new file mode 100644 index 000000000000..b09563cfc4e3 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_txrx.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_TXRX_H +#define _NE6X_TXRX_H + +int ne6x_napi_poll(struct napi_struct *napi, int budget); +netdev_tx_t ne6x_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); +bool ne6x_alloc_rx_buffers(struct ne6x_ring *rx_ring, u16 cleaned_count); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_virtchnl_pf.c b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_virtchnl_pf.c new file mode 100644 index 000000000000..aca0ab3d3ee1 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_virtchnl_pf.c @@ -0,0 +1,2388 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "ne6x.h" +#include "ne6x_reg.h" +#include "ne6x_portmap.h" +#include "ne6x_dev.h" +#include "ne6x_txrx.h" +#include "ne6x_interrupt.h" + +void ne6x_clear_vf_status(struct ne6x_vf *vf) +{ + struct ne6x_flowctrl flowctrl; + + flowctrl.rx_pause = 0; + flowctrl.tx_pause = 0; + ne6x_dev_set_flowctrl(vf->adpt, &flowctrl); + ne6x_dev_set_vf_bw(vf->adpt, 0); +} + +void ne6x_mbx_deinit_snapshot(struct ne6x_hw *hw) +{ + struct ne6x_mbx_snapshot *snap = &hw->mbx_snapshot; + + /* Free VF counter array and reset vf counter length */ + kfree(snap->mbx_vf.vf_cntr); + snap->mbx_vf.vfcntr_len = 0; +} + +int ne6x_mbx_init_snapshot(struct ne6x_hw *hw, u16 vf_count) +{ + struct ne6x_mbx_snapshot *snap = &hw->mbx_snapshot; + + /* Ensure that the number of VFs allocated is non-zero and + * is not greater than the number of supported VFs defined in + * the functional capabilities of the PF. + */ + if (!vf_count || vf_count > NE6X_MAX_VP_NUM) + return 1; + + snap->mbx_vf.vf_cntr = kcalloc(vf_count, sizeof(*snap->mbx_vf.vf_cntr), GFP_KERNEL); + if (!snap->mbx_vf.vf_cntr) + return 1; + + /* Setting the VF counter length to the number of allocated + * VFs for given PF's functional capabilities. + */ + snap->mbx_vf.vfcntr_len = vf_count; + snap->state = NE6X_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; + memset(hw->ne6x_mbx_ready_to_send, true, 64); + + return 0; +} + +int ne6x_status_to_errno(int err) +{ + if (err) + return -EINVAL; + + return 0; +} + +void ne6x_set_vf_state_qs_dis(struct ne6x_vf *vf) +{ + /* Clear Rx/Tx enabled queues flag */ + if (test_bit(NE6X_VF_STATE_QS_ENA, vf->vf_states)) + clear_bit(NE6X_VF_STATE_QS_ENA, vf->vf_states); +} + +static void ne6x_dis_vf_qs(struct ne6x_vf *vf) +{ + ne6x_set_vf_state_qs_dis(vf); +} + +bool ne6x_is_reset_in_progress(unsigned long *state) +{ + return test_bit(NE6X_PF_RESET_REQUESTED, state) || + test_bit(NE6X_RESET_INTR_RECEIVED, state) || + test_bit(NE6X_CORE_RESET_REQUESTED, state) || + test_bit(NE6X_GLOBAL_RESET_REQUESTED, state); +} + +void ne6x_adpt_close_vf(struct ne6x_adapter *adpt, u16 vf_id) +{ + if (!test_and_set_bit(NE6X_ADPT_DOWN, adpt->comm.state)) + clear_bit(NE6X_ADPT_DOWN, adpt->comm.state); +} + +static int ne6x_adpt_clear_vf(struct ne6x_adapter *adpt) +{ + struct mac_addr_head *mc_head = &adpt->mc_mac_addr; + struct mac_addr_head *uc_head = &adpt->uc_mac_addr; + struct mac_addr_node *temp_node, *addr_node; + struct ne6x_vlan_filter *vlf, *vlftmp; + struct ne6x_pf *pf; + + if (!adpt) + return 0; + + if (!adpt->back) + goto free_adpt; + + pf = adpt->back; + + mutex_lock(&pf->switch_mutex); + if (!pf->adpt[adpt->idx]) { + dev_err(&pf->pdev->dev, "pf->adpt[%d] is NULL, just free adpt[%d](type %d)\n", + adpt->idx, adpt->idx, adpt->type); + goto unlock_adpt; + } + + if (pf->adpt[adpt->idx] != adpt) { + dev_err(&pf->pdev->dev, "pf->adpt[%d](type %d) != adpt[%d](type %d): no free!\n", + pf->adpt[adpt->idx]->idx, pf->adpt[adpt->idx]->type, adpt->idx, adpt->type); + goto unlock_adpt; + } + + pf->adpt[adpt->idx] = NULL; + if (adpt->idx < pf->next_adpt) + pf->next_adpt = adpt->idx; + + kfree(adpt->tx_rings); + adpt->tx_rings = NULL; + + kfree(adpt->q_vectors); + adpt->q_vectors = NULL; + + kfree(adpt->port_info); + adpt->port_info = NULL; + + /* release adpt multicast addr list resource */ + mutex_lock(&mc_head->mutex); + list_for_each_entry_safe(addr_node, temp_node, &mc_head->list, list) { + list_del(&addr_node->list); + kfree(addr_node); + } + mutex_unlock(&mc_head->mutex); + + /* release adpt unicast addr list resource */ + mutex_lock(&uc_head->mutex); + list_for_each_entry_safe(addr_node, temp_node, &uc_head->list, list) { + list_del(&addr_node->list); + kfree(addr_node); + } + mutex_unlock(&uc_head->mutex); + + spin_lock_bh(&adpt->mac_vlan_list_lock); + /* release adpt vlan list resource */ + list_for_each_entry_safe(vlf, vlftmp, &adpt->vlan_filter_list, list) { + list_del(&vlf->list); + kfree(vlf); + } + spin_unlock_bh(&adpt->mac_vlan_list_lock); + +unlock_adpt: + mutex_unlock(&pf->switch_mutex); +free_adpt: + kfree(adpt); + + return 0; +} + +int ne6x_adpt_release_vf(struct ne6x_adapter *adpt, u16 vf_id) +{ + struct ne6x_pf *pf; + + if (!adpt->back) + return -ENODEV; + + pf = adpt->back; + + if (adpt->netdev && !ne6x_is_reset_in_progress(pf->state) && + (test_bit(NE6X_ADPT_NETDEV_REGISTERED, adpt->comm.state))) { + unregister_netdev(adpt->netdev); + clear_bit(NE6X_ADPT_NETDEV_REGISTERED, adpt->comm.state); + } + + ne6x_adpt_close_vf(adpt, vf_id); + + if (!ne6x_is_reset_in_progress(pf->state)) + ne6x_adpt_clear_vf(adpt); + + return 0; +} + +struct ne6x_adapter *ne6x_get_vf_adpt(struct ne6x_vf *vf) +{ + return vf->pf->adpt[vf->lan_adpt_idx]; +} + +static void ne6x_vf_invalidate_adpt(struct ne6x_vf *vf) +{ + vf->lan_adpt_idx = NE6X_NO_ADPT; +} + +static void ne6x_vf_adpt_release(struct ne6x_vf *vf) +{ + ne6x_adpt_clear_mac_vlan(ne6x_get_vf_adpt(vf)); + ne6x_dev_del_broadcast_leaf(ne6x_get_vf_adpt(vf)); + ne6x_dev_set_features(vf->adpt, 0); + ne6x_dev_del_vf_qinq(vf, 0, 0); + ne6x_adpt_release_vf(ne6x_get_vf_adpt(vf), vf->vf_id); + ne6x_vf_invalidate_adpt(vf); +} + +static void ne6x_free_vf_res(struct ne6x_vf *vf) +{ + /* First, disable VF's configuration API to prevent OS from + * accessing the VF's adapter after it's freed or invalidated. + */ + clear_bit(NE6X_VF_STATE_INIT, vf->vf_states); + + /* free adapter and disconnect it from the parent uplink */ + if (vf->lan_adpt_idx != NE6X_NO_ADPT) { + if (vf->tx_rate) { + ne6x_dev_set_vf_bw(ne6x_get_vf_adpt(vf), 0); + vf->tx_rate = 0; + } + + ne6x_vf_adpt_release(vf); + } +} + +static int ne6x_sriov_free_msix_res(struct ne6x_pf *pf) +{ + struct ne6x_lump_tracking *res; + + if (!pf) + return -EINVAL; + + res = pf->irq_pile; + if (!res) + return -EINVAL; + + wr64_bar4(&pf->hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT_MASK), 0xffffffffffffffff); + wr64_bar4(&pf->hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT_MASK), 0xffffffffffffffff); + wr64_bar4(&pf->hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT), 0xffffffffffffffff); + wr64_bar4(&pf->hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT), 0xffffffffffffffff); + + return 0; +} + +void ne6x_free_vfs(struct ne6x_pf *pf) +{ + struct device *dev = ne6x_pf_to_dev(pf); + unsigned int tmp, i; + u64 reg; + + if (!pf->vf) + return; + + while (test_and_set_bit(NE6X_VF_DIS, pf->state)) + usleep_range(1000, 2000); + + /* Disable IOV before freeing resources. This lets any VF drivers + * running in the host get themselves cleaned up before we yank + * the carpet out from underneath their feet. + */ + if (!pci_vfs_assigned(pf->pdev)) + pci_disable_sriov(pf->pdev); + else + dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n"); + + /* Avoid wait time by stopping all VFs at the same time */ + ne6x_for_each_vf(pf, i) { + if (test_bit(NE6X_VF_STATE_QS_ENA, pf->vf[i].vf_states)) + ne6x_dis_vf_qs(&pf->vf[i]); + } + + tmp = pf->num_alloc_vfs; + pf->num_qps_per_vf = 0; + pf->num_alloc_vfs = 0; + + for (i = 0; i < tmp; i++) { + if (test_bit(NE6X_VF_STATE_INIT, pf->vf[i].vf_states)) { + set_bit(NE6X_VF_STATE_DIS, pf->vf[i].vf_states); + ne6x_free_vf_res(&pf->vf[i]); + } + } + + if (ne6x_sriov_free_msix_res(pf)) + dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n"); + + ne6x_dev_clear_vport(pf); + kfree(pf->vf); + pf->vf = NULL; + + reg = rd64_bar4(&pf->hw, 0x05300); + reg &= ~0xfc000; + reg |= 0x8000; + wr64_bar4(&pf->hw, 0x05300, reg); + + clear_bit(NE6X_VF_DIS, pf->state); +} + +static int ne6x_alloc_vfs(struct ne6x_pf *pf, int num_vfs) +{ + struct ne6x_vf *vfs; + + vfs = kcalloc(num_vfs, sizeof(*vfs), GFP_KERNEL); + if (!vfs) + return -ENOMEM; + + pf->vf = vfs; + pf->num_alloc_vfs = num_vfs; + + return 0; +} + +static int ne6x_sriov_set_msix_res(struct ne6x_pf *pf, u16 num_msix_needed) +{ + int sriov_base_vector; + + sriov_base_vector = NE6X_MAX_MSIX_NUM - num_msix_needed; + + /* make sure we only grab irq_tracker entries from the list end and + * that we have enough available MSIX vectors + */ + if (sriov_base_vector < 0) + return -EINVAL; + + return 0; +} + +static int ne6x_set_per_vf_res(struct ne6x_pf *pf) +{ + struct device *dev = ne6x_pf_to_dev(pf); + u16 queue; + + if (!pf->num_alloc_vfs) + return -EINVAL; + + queue = NE6X_MAX_VP_NUM / pf->num_alloc_vfs; + + if (ne6x_sriov_set_msix_res(pf, queue * pf->num_alloc_vfs)) { + dev_err(dev, "Unable to set MSI-X resources for %d VFs\n", pf->num_alloc_vfs); + return -EINVAL; + } + + /* only allow equal Tx/Rx queue count (i.e. queue pairs) */ + pf->num_qps_per_vf = queue; + dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n", pf->num_alloc_vfs, + pf->num_qps_per_vf, pf->num_qps_per_vf); + + return 0; +} + +static void ne6x_vc_clear_allowlist(struct ne6x_vf *vf) +{ + bitmap_zero(vf->opcodes_allowlist, VIRTCHNL_OP_MAX); +} + +/* default opcodes to communicate with VF */ +static const u32 default_allowlist_opcodes[] = { + VIRTCHNL_OP_GET_VF_RESOURCES, + VIRTCHNL_OP_VERSION, + VIRTCHNL_OP_RESET_VF, +}; + +static void ne6x_vc_allowlist_opcodes(struct ne6x_vf *vf, const u32 *opcodes, size_t size) +{ + unsigned int i; + + for (i = 0; i < size; i++) + set_bit(opcodes[i], vf->opcodes_allowlist); +} + +void ne6x_vc_set_default_allowlist(struct ne6x_vf *vf) +{ + ne6x_vc_clear_allowlist(vf); + ne6x_vc_allowlist_opcodes(vf, default_allowlist_opcodes, + ARRAY_SIZE(default_allowlist_opcodes)); +} + +static void ne6x_set_dflt_settings_vfs(struct ne6x_pf *pf) +{ + int i; + + ne6x_for_each_vf(pf, i) { + struct ne6x_vf *vf = &pf->vf[i]; + + vf->pf = pf; + vf->vf_id = i; + vf->base_queue = (NE6X_MAX_VP_NUM / pf->num_alloc_vfs) * i; + vf->num_vf_qs = pf->num_qps_per_vf; + vf->tx_rate = 0; + test_and_clear_bit(NE6X_VF_CONFIG_FLAG_TRUSTED, vf->vf_config_flag); + ne6x_vc_set_default_allowlist(vf); + } +} + +void ne6x_send_init_mbx_mesg(struct ne6x_pf *pf) +{ + struct ne6x_hw *hw = &pf->hw; + u64 reg_cfg; + int i; + + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT_MASK), 0xffffffffffffffff); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT_MASK), 0xffffffffffffffff); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT), 0xffffffffffffffff); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT), 0xffffffffffffffff); + + ne6x_for_each_vf(pf, i) { + struct ne6x_vf *vf = &pf->vf[i]; + + wr64_bar4(hw, NE6X_PF_MAILBOX_ADDR(vf->base_queue), 0x0); + reg_cfg = rd64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT_MASK)); + reg_cfg &= ~(1ULL << vf->base_queue); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DREQ_INT_MASK), reg_cfg); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_DACK_INT_MASK), reg_cfg); + } +} + +static struct ne6x_port_info *ne6x_vf_get_port_info(struct ne6x_vf *vf) +{ + struct ne6x_adapter *adpt = ne6x_get_vf_adpt(vf); + + return adpt->port_info; +} + +static struct ne6x_adapter *ne6x_adpt_alloc(struct ne6x_pf *pf, u16 vf_id, u16 num_vfs) +{ + struct device *dev = ne6x_pf_to_dev(pf); + struct ne6x_adapter *adpt = NULL; + int pf_adpt_idx; + + /* Need to protect the allocation of the adapters at the PF level */ + mutex_lock(&pf->switch_mutex); + + /* If we have already allocated our maximum number of adapters, + * pf->next_adpt will be NE6X_NO_ADPT. If not, pf->next_adpt index + * is available to be populated + */ + if (pf->next_adpt == NE6X_NO_ADPT) { + dev_dbg(dev, "out of adapter slots!\n"); + goto unlock_pf; + } + + adpt = kzalloc(sizeof(*adpt), GFP_KERNEL); + adpt->back = pf; + adpt->type = NE6X_ADPT_VF; + set_bit(NE6X_ADPT_DOWN, adpt->comm.state); + + adpt->num_queue = pf->vf[vf_id].num_vf_qs; + adpt->num_q_vectors = pf->vf[vf_id].num_vf_qs; + /* vf_id 0 -- 63: vport: 0 -- 64: pf: 64 -- 68 */ + adpt->idx = pf->vf[vf_id].vf_id + pf->num_alloc_adpt; + adpt->vport = pf->vf[vf_id].vf_id; + adpt->port_info = kzalloc(sizeof(*adpt->port_info), GFP_KERNEL); + if (!adpt->port_info) + goto err_rings; + + /* vf attach pf alloc */ + pf_adpt_idx = pf->vf[vf_id].base_queue / (NE6X_MAX_VP_NUM / pf->hw.pf_port); + adpt->port_info->lport = pf->adpt[pf_adpt_idx]->port_info->lport; + adpt->port_info->hw_port_id = pf->adpt[pf_adpt_idx]->port_info->hw_port_id; + adpt->port_info->hw = &pf->hw; + adpt->port_info->hw_trunk_id = pf->adpt[pf_adpt_idx]->port_info->hw_trunk_id; + adpt->port_info->hw_queue_base = pf->vf[vf_id].base_queue; + adpt->port_info->hw_max_queue = pf->vf[vf_id].num_vf_qs; + adpt->base_queue = pf->vf[vf_id].base_queue; + + /* init multicast MAC addr list head node */ + INIT_LIST_HEAD(&adpt->mc_mac_addr.list); + mutex_init(&adpt->mc_mac_addr.mutex); + + /* init unicast MAC addr list head node */ + INIT_LIST_HEAD(&adpt->uc_mac_addr.list); + mutex_init(&adpt->uc_mac_addr.mutex); + + /* init vlan list head node */ + spin_lock_init(&adpt->mac_vlan_list_lock); + INIT_LIST_HEAD(&adpt->vlan_filter_list); + + pf->adpt[adpt->idx] = adpt; + + goto unlock_pf; + +err_rings: + kfree(adpt); + adpt = NULL; +unlock_pf: + mutex_unlock(&pf->switch_mutex); + return adpt; +} + +struct ne6x_adapter *ne6x_adpt_setup_vf(struct ne6x_pf *pf, u16 vf_id, u16 num_vfs) +{ + struct device *dev = ne6x_pf_to_dev(pf); + struct ne6x_adapter *adpt; + + adpt = ne6x_adpt_alloc(pf, vf_id, num_vfs); + if (!adpt) { + dev_err(dev, "could not allocate adapter\n"); + return NULL; + } + + return adpt; +} + +static struct ne6x_adapter *ne6x_vf_adpt_setup(struct ne6x_vf *vf, u16 num_vfs) +{ + struct ne6x_pf *pf = vf->pf; + struct ne6x_adapter *adpt; + + adpt = ne6x_adpt_setup_vf(pf, vf->vf_id, num_vfs); + if (!adpt) { + dev_err(ne6x_pf_to_dev(pf), "Failed to create VF adapter\n"); + ne6x_vf_invalidate_adpt(vf); + return NULL; + } + + vf->lan_adpt_idx = adpt->idx; + vf->adpt = adpt; + + return adpt; +} + +static int ne6x_init_vf_adpt_res(struct ne6x_vf *vf, u16 num_vfs) +{ + struct ne6x_pf *pf = vf->pf; + u8 broadcast[ETH_ALEN]; + struct ne6x_adapter *adpt; + struct device *dev; + + dev = ne6x_pf_to_dev(pf); + adpt = ne6x_vf_adpt_setup(vf, num_vfs); + if (!adpt) + return -ENOMEM; + + vf->tx_rate = 0; + ne6x_dev_set_vf_bw(adpt, vf->tx_rate); + eth_broadcast_addr(broadcast); + + return 0; +} + +static int ne6x_start_vfs(struct ne6x_pf *pf, u16 num_vfs) +{ + int retval, i; + + ne6x_for_each_vf(pf, i) { + struct ne6x_vf *vf = &pf->vf[i]; + + retval = ne6x_init_vf_adpt_res(vf, num_vfs); + if (retval) { + dev_err(ne6x_pf_to_dev(pf), "Failed to initialize adapter resources for VF %d, error %d\n", + vf->vf_id, retval); + goto teardown; + } + + set_bit(NE6X_VF_STATE_INIT, vf->vf_states); + } + + ne6x_linkscan_schedule(pf); + + return 0; + +teardown: + for (i = i - 1; i >= 0; i--) { + struct ne6x_vf *vf = &pf->vf[i]; + + ne6x_vf_adpt_release(vf); + } + + return retval; +} + +static int ne6x_delete_pf_trunk(struct ne6x_pf *pf) +{ + return 0; +} + +static int ne6x_recycle_vp_resources(struct ne6x_pf *pf) +{ + struct ne6x_adapter *adpt; + int rst, i; + u64 reg; + + rst = ne6x_delete_pf_trunk(pf); + if (rst) + return rst; + + ne6x_disable_link_irq(pf); + ne6x_free_link_irq(pf); + for (i = 0; i < pf->num_alloc_adpt; i++) { + adpt = pf->adpt[i]; + if (test_bit(NE6X_ADPT_OPEN, adpt->comm.state)) + ne6x_adpt_close(adpt); + } + + reg = rd64_bar4(&pf->hw, 0x05300); + reg &= ~0xfc000; + reg |= 0x7c000; + wr64_bar4(&pf->hw, 0x05300, reg); + + return 0; +} + +static int ne6x_adpt_resetup(struct ne6x_pf *pf, bool recovery) +{ + int vid, pooling, i, actual_vector = 1, size; + struct device *dev = ne6x_pf_to_dev(pf); + union ne6x_ciu_time_out_cfg ciu_time_out_cdg; + union ne6x_all_rq_cfg all_rq_cfg; + union ne6x_all_sq_cfg all_sq_cfg; + union ne6x_all_cq_cfg all_cq_cfg; + union ne6x_merge_cfg merge_cfg; + struct ne6x_hw *hw = &pf->hw; + int qp_remaining, q_vectors; + struct ne6x_adapter *adpt = NULL; + u64 __iomem *reg; + + pooling = test_bit(NE6X_LINK_POOLING, pf->state); + if (pooling) + clear_bit(NE6X_LINK_POOLING, pf->state); + + if (test_bit(NE6X_PF_MSIX, pf->state)) { + pci_disable_msix(pf->pdev); + actual_vector = pci_enable_msix_range(pf->pdev, pf->msix_entries, NE6X_MIN_MSIX, + NE6X_MAX_MSIX_NUM); + if (actual_vector < NE6X_MAX_MSIX_NUM) { + clear_bit(NE6X_PF_MSIX, pf->state); + pci_disable_msix(pf->pdev); + dev_err(dev, "%s-%d: error msix enable failed\n", __func__, __LINE__); + } + + pf->irq_pile->num_entries = actual_vector; + } else { + if (!pf->irq_pile) { + size = sizeof(struct ne6x_lump_tracking) + (sizeof(u16) * actual_vector); + pf->irq_pile = kzalloc(size, GFP_KERNEL); + if (!pf->irq_pile) { + dev_err(dev, "error intx allocating irq_pile memory\n"); + return -ENOMEM; + } + + pf->irq_pile->num_entries = actual_vector; + } + + test_and_set_bit(NE6X_PF_INTX, pf->state); + } + + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_ALL_RQ_CFG); + all_rq_cfg.val = readq(reg); + all_rq_cfg.reg.csr_allrq_pull_merge_cfg = 0x10; + writeq(all_rq_cfg.val, reg); + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_ALL_SQ_CFG); + all_sq_cfg.val = readq(reg); + all_sq_cfg.reg.csr_allsq_pull_merge_cfg = 0x10; + writeq(all_sq_cfg.val, reg); + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_ALL_CQ_CFG); + all_cq_cfg.val = readq(reg); + all_cq_cfg.reg.csr_allcq_merge_size = 0x1; + all_cq_cfg.reg.csr_allcq_wt_rr_cnt = 0x7F; + all_cq_cfg.reg.csr_allcq_wt_rr_flag = 0x1; + writeq(all_cq_cfg.val, reg); + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_MERGE_CFG); + merge_cfg.val = readq(reg); + merge_cfg.reg.csr_merge_clk_cnt = 800; + writeq(merge_cfg.val, reg); + reg = (void __iomem *)hw->hw_addr4 + NE6X_PFINT_DYN_CTLN(7, NE6X_CIU_TIME_OUT_CFG); + ciu_time_out_cdg.val = readq(reg); + ciu_time_out_cdg.reg.csr_int_timer_out_cnt = 0xfff; + writeq(ciu_time_out_cdg.val, reg); + + ne6x_for_each_pf(pf, vid) { + adpt = pf->adpt[vid]; + if (recovery) { + adpt->port_info->hw_queue_base = adpt->port_info->hw_queue_base_old; + adpt->base_queue = adpt->port_info->hw_queue_base; + adpt->port_info->hw_queue_base = pf->hw.expect_vp * vid; + adpt->base_queue = adpt->port_info->hw_queue_base; + adpt->base_vector = adpt->base_queue; + adpt->port_info->hw_max_queue = pf->hw.max_queue; + adpt->port_info->queue = adpt->port_info->hw_max_queue; + adpt->num_q_vectors = adpt->port_info->queue; + adpt->num_queue = adpt->num_q_vectors; + } else { + adpt->port_info->hw_queue_base_old = adpt->port_info->hw_queue_base; + adpt->port_info->hw_queue_base = NE6X_PF_VP1_NUM + vid; + adpt->base_queue = adpt->port_info->hw_queue_base; + adpt->base_vector = adpt->base_queue; + adpt->port_info->hw_max_queue = 1u; + adpt->port_info->queue = 1u; + adpt->num_q_vectors = adpt->port_info->queue; + adpt->num_queue = adpt->num_q_vectors; + } + + for (i = 0; i < adpt->num_queue; i++) { + adpt->rx_rings[i]->reg_idx = adpt->base_queue + i; + adpt->cq_rings[i]->reg_idx = adpt->rx_rings[i]->reg_idx; + adpt->tx_rings[i]->reg_idx = adpt->cq_rings[i]->reg_idx; + } + + qp_remaining = adpt->num_queue; + q_vectors = adpt->num_q_vectors; + for (i = 0; i < adpt->num_q_vectors; i++) { + adpt->q_vectors[i]->num_ringpairs = + DIV_ROUND_UP(qp_remaining, q_vectors - i); + adpt->q_vectors[i]->reg_idx = + adpt->q_vectors[i]->v_idx + adpt->base_vector; + qp_remaining--; + } + + ne6x_adpt_reset_stats(adpt); + ne6x_dev_set_vport(adpt); + for (i = 0; i < adpt->rss_info.ind_table_size; i++) + adpt->rss_info.ind_table[i] = + ethtool_rxfh_indir_default(i, adpt->num_queue); + + ne6x_dev_set_rss(adpt, &adpt->rss_info); + ne6x_dev_set_port2pi(adpt); + rtnl_lock(); + + if (test_bit(NE6X_ADPT_OPEN, adpt->comm.state)) + ne6x_adpt_open(adpt); + + rtnl_unlock(); + } + + ne6x_init_link_irq(pf); + ne6x_enable_link_irq(pf); + + if (pooling) { + set_bit(NE6X_LINK_POOLING, pf->state); + ne6x_linkscan_schedule(pf); + } + + return 0; +} + +static int ne6x_ena_vfs(struct ne6x_pf *pf, u16 num_vfs) +{ + struct device *dev = ne6x_pf_to_dev(pf); + int ret; + + ret = ne6x_recycle_vp_resources(pf); + if (ret) + goto err_pci_disable_sriov; + + ret = ne6x_adpt_resetup(pf, false); + if (ret) + goto err_pci_disable_sriov; + + ne6x_clr_vf_bw_for_max_vpnum(pf); + ret = ne6x_alloc_vfs(pf, num_vfs); + if (ret) + goto err_pci_disable_sriov; + + if (ne6x_set_per_vf_res(pf)) { + dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n", + num_vfs); + ret = -ENOSPC; + goto err_unroll_sriov; + } + + ne6x_set_dflt_settings_vfs(pf); + if (ne6x_start_vfs(pf, num_vfs)) { + dev_err(dev, "Failed to start VF(s)\n"); + ret = -EAGAIN; + goto err_unroll_sriov; + } + + ne6x_init_mailbox_irq(pf); + ne6x_send_init_mbx_mesg(pf); + clear_bit(NE6X_VF_DIS, pf->state); + + return 0; + +err_unroll_sriov: + kfree(pf->vf); + pf->vf = NULL; + pf->num_alloc_vfs = 0; +err_pci_disable_sriov: + pci_disable_sriov(pf->pdev); + + return ret; +} + +static int ne6x_pci_sriov_ena(struct ne6x_pf *pf, int num_vfs) +{ + int pre_existing_vfs = pci_num_vf(pf->pdev); + struct device *dev = ne6x_pf_to_dev(pf); + int err; + + if (pre_existing_vfs && pre_existing_vfs != num_vfs) + ne6x_free_vfs(pf); + else if (pre_existing_vfs && pre_existing_vfs == num_vfs) + return 0; + + if (num_vfs > NE6X_MAX_VP_NUM) { + dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n", num_vfs, + NE6X_MAX_VP_NUM); + return -EOPNOTSUPP; + } + + err = ne6x_ena_vfs(pf, num_vfs); + if (err) { + dev_err(dev, "Failed to enable SR-IOV: %d\n", err); + return err; + } + + if (num_vfs) + test_and_set_bit(NE6X_FLAG_SRIOV_ENA, pf->state); + + return 0; +} + +int ne6x_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct ne6x_pf *pf = pci_get_drvdata(pdev); + struct ne6x_adapter *adpt = NULL; + struct ne6x_vf *vf = NULL; + pbmp_t port_bitmap; + int err = 0, vf_id; + int timeout = 50; + int status; + + if (!(num_vfs == 0 || num_vfs == 2 || num_vfs == 4 || num_vfs == 8 || + num_vfs == 16 || num_vfs == 32 || num_vfs == 64)) + return -EINVAL; + + if (pf->irq_pile->num_entries < NE6X_MAX_MSIX_NUM) { + dev_err(ne6x_pf_to_dev(pf), "ne6x irq number < %d!\n", NE6X_MAX_MSIX_NUM); + return -EPERM; + } + + while (test_and_set_bit(NE6X_CONFIG_BUSY, pf->state)) { + timeout--; + if (!timeout) { + dev_warn(ne6x_pf_to_dev(pf), "ne6x config busy, timeout!\n"); + return -EBUSY; + } + usleep_range(1000, 2000); + } + + if (!num_vfs) { + set_bit(NE6X_TIMEOUT_RECOVERY_PENDING, pf->state); + if (!pci_vfs_assigned(pdev)) { + ne6x_free_vfs(pf); + ne6x_disable_mailbox_irq(pf); + ne6x_free_mailbox_irq(pf); + ne6x_mbx_deinit_snapshot(&pf->hw); + if (test_bit(NE6X_FLAG_SRIOV_ENA, pf->state)) + clear_bit(NE6X_FLAG_SRIOV_ENA, pf->state); + + if (!test_bit(NE6X_REMOVE, pf->state)) { + ne6x_recycle_vp_resources(pf); + err = ne6x_adpt_resetup(pf, true); + } + + clear_bit(NE6X_TIMEOUT_RECOVERY_PENDING, pf->state); + clear_bit(NE6X_CONFIG_BUSY, pf->state); + if (err) + goto err_recovery; + + return 0; + } + + clear_bit(NE6X_TIMEOUT_RECOVERY_PENDING, pf->state); + clear_bit(NE6X_CONFIG_BUSY, pf->state); + return -EBUSY; + } + + status = ne6x_mbx_init_snapshot(&pf->hw, num_vfs); + if (status) + return ne6x_status_to_errno(status); + + err = ne6x_pci_sriov_ena(pf, num_vfs); + if (err) { + ne6x_mbx_deinit_snapshot(&pf->hw); + clear_bit(NE6X_CONFIG_BUSY, pf->state); + return err; + } + + PBMP_CLEAR(port_bitmap); + + /* config vport, default vlan */ + ne6x_for_each_vf(pf, vf_id) { + vf = &pf->vf[vf_id]; + adpt = vf->adpt; + + /* config default vlan */ + PBMP_PORT_ADD(port_bitmap, adpt->vport); + ne6x_dev_set_vport(adpt); + adpt->hw_feature = ne6x_dev_get_features(adpt); + } + + err = pci_enable_sriov(pf->pdev, num_vfs); + if (err) + goto err_hanler; + + clear_bit(NE6X_CONFIG_BUSY, pf->state); + + return num_vfs; + +err_hanler: + ne6x_dev_clear_vport(pf); + /* config vport, default vlan */ + ne6x_for_each_pf(pf, vf_id) { + adpt = pf->adpt[vf_id]; + adpt->port_info->hw_queue_base = adpt->port_info->hw_queue_base_old; + ne6x_dev_set_vport(adpt); + } + + if (!pci_vfs_assigned(pdev)) { + ne6x_mbx_deinit_snapshot(&pf->hw); + ne6x_free_vfs(pf); + pf->num_alloc_vfs = 0; + if (test_bit(NE6X_FLAG_SRIOV_ENA, pf->state)) + clear_bit(NE6X_FLAG_SRIOV_ENA, pf->state); + } + +err_recovery: + clear_bit(NE6X_CONFIG_BUSY, pf->state); + return err; +} + +static int ne6x_validate_vf_id(struct ne6x_pf *pf, u16 vf_id) +{ + /* vf_id range is only valid for 0-255, and should always be unsigned */ + if (vf_id >= pf->num_alloc_vfs) + return -EINVAL; + + return 0; +} + +static int ne6x_validate_outer_vf_id(struct ne6x_pf *pf, u16 out_vf_id) +{ + if (out_vf_id >= (pf->num_alloc_vfs / pf->num_alloc_adpt)) + return -EINVAL; + + return 0; +} + +int ne6x_sdk_send_msg_to_vf(struct ne6x_hw *hw, u16 vfid, u32 v_opcode, + u32 v_retval, u8 *msg, u16 msglen) +{ + union u_ne6x_mbx_snap_buffer_data usnap; + struct ne6x_pf *pf = hw->back; + struct ne6x_vf *vf = &pf->vf[vfid]; + int timeout = 2000; + int i; + + usnap.snap.state = v_retval; + usnap.snap.len = msglen; + usnap.snap.type = v_opcode; + + for (i = 0; i < msglen && i < 6; i++) + usnap.snap.data[i] = msg[i]; + + while (!(pf->hw.ne6x_mbx_ready_to_send[vfid])) { + usleep_range(100, 200); + timeout--; + if (!timeout) + break; + } + + wr64_bar4(hw, NE6X_PF_MAILBOX_ADDR(vf->base_queue), usnap.val); + wr64_bar4(hw, NE6X_PF_CON_ADDR(NE6X_PF_DB_INT_REQ), (1ULL << vf->base_queue)); + pf->hw.mbx_snapshot.state = NE6X_MAL_VF_DETECT_STATE_TRAVERSE; + pf->hw.ne6x_mbx_ready_to_send[vfid] = false; + + return 0; +} + +static int ne6x_vc_send_msg_to_vf(struct ne6x_vf *vf, u32 v_opcode, + enum virtchnl_status_code v_retval, + u8 *msg, u16 msglen) +{ + struct device *dev; + struct ne6x_pf *pf; + int aq_ret; + + if (!vf) + return -EINVAL; + + pf = vf->pf; + dev = ne6x_pf_to_dev(pf); + + if (ne6x_validate_vf_id(pf, vf->vf_id)) { + dev_err(dev, "vf id[%d] is invalid\n", vf->vf_id); + return -EINVAL; + } + + /* single place to detect unsuccessful return values */ + if (v_retval) + dev_info(dev, "VF %d failed opcode %s, retval: %s\n", vf->vf_id, + ne6x_opcode_str(v_opcode), ne6x_mbox_status_str(v_retval)); + + aq_ret = ne6x_sdk_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval, msg, msglen); + if (aq_ret) { + dev_info(dev, "Unable to send the message to VF %d aq_err %d\n", vf->vf_id, aq_ret); + return -EIO; + } + + return 0; +} + +static int ne6x_check_vf_init(struct ne6x_pf *pf, struct ne6x_vf *vf) +{ + if (!test_bit(NE6X_VF_STATE_INIT, vf->vf_states)) { + dev_err(ne6x_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n", vf->vf_id); + return -EBUSY; + } + + return 0; +} + +static int ne6x_vc_add_def_mac_addr(struct ne6x_vf *vf, struct ne6x_adapter *adpt, + struct virtchnl_ether_addr *vc_ether_addr) +{ + struct device *dev = ne6x_pf_to_dev(vf->pf); + u8 *mac_addr = vc_ether_addr->addr; + + if (!is_unicast_ether_addr(mac_addr)) { + dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); + return -EPERM; + } + + if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr)) { + dev_err(dev, "vf already use the same addr\n"); + return -EPERM; + } + + ether_addr_copy(vf->dev_lan_addr.addr, mac_addr); + ne6x_adpt_add_mac(adpt, mac_addr, true); + + return 0; +} + +static int ne6x_vc_del_def_mac_addr(struct ne6x_vf *vf, struct ne6x_adapter *adpt, u8 *mac) +{ + return ne6x_adpt_del_mac(adpt, mac, true); +} + +static int ne6x_vc_get_vf_res_msg(struct ne6x_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + union u_ne6x_mbx_snap_buffer_data *vfres = NULL; + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct virtchnl_ether_addr vc_ether_addr; + struct ne6x_pf *pf = vf->pf; + struct ne6x_adapter *pf_adpt; + int len, ret; + + if (ne6x_check_vf_init(pf, vf)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + vc_ether_addr.addr[0] = rsvsnap->snap.data[0]; + vc_ether_addr.addr[1] = rsvsnap->snap.data[1]; + vc_ether_addr.addr[2] = rsvsnap->snap.data[2]; + vc_ether_addr.addr[3] = rsvsnap->snap.data[3]; + vc_ether_addr.addr[4] = rsvsnap->snap.data[4]; + vc_ether_addr.addr[5] = rsvsnap->snap.data[5]; + + pf_adpt = vf->adpt; + + ne6x_vc_add_def_mac_addr(vf, pf_adpt, &vc_ether_addr); + + len = sizeof(union u_ne6x_mbx_snap_buffer_data); + vfres = kzalloc(len, GFP_KERNEL); + if (!vfres) { + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + len = 0; + goto err; + } + + vfres->snap.type = VIRTCHNL_OP_GET_VF_RESOURCES; + vfres->snap.data[0] = vf->vf_id; /* vport */ + vfres->snap.data[1] = pf_adpt->port_info->lport; /* lport */ + vfres->snap.data[2] = pf_adpt->port_info->hw_port_id; /* pport */ + vfres->snap.data[3] = pf_adpt->port_info->hw_queue_base; /* base_queue */ + vfres->snap.data[4] = pf->num_qps_per_vf; /* num_qps_per_vf */ + vfres->snap.data[5] = pf->num_alloc_vfs / pf->num_alloc_adpt; /* num vfs of per hw_port */ + vfres->snap.len = 6; + vf->ready = 0; + vf->adpt->port_info->phy.link_info.link_info = 0; + vf->ready_to_link_notify = 0; + set_bit(NE6X_VF_STATE_ACTIVE, vf->vf_states); + +err: + /* send the response back to the VF */ + vfres->snap.state = v_ret; + ret = ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, + vfres->snap.state, + (u8 *)vfres->snap.data, + vfres->snap.len); + + return ret; +} + +static int ne6x_vc_add_mac_addr(struct ne6x_vf *vf, struct ne6x_adapter *adpt, + struct virtchnl_ether_addr *vc_ether_addr) +{ + u8 *mac_addr = vc_ether_addr->addr; + int ret = 0; + + if (likely(is_multicast_ether_addr(mac_addr))) { + if (is_broadcast_ether_addr(mac_addr)) + return 0; + + ne6x_adpt_add_mac(adpt, mac_addr, false); + } else { + ne6x_adpt_add_mac(adpt, mac_addr, true); + } + + return ret; +} + +static int ne6x_vc_del_mac_addr(struct ne6x_vf *vf, struct ne6x_adapter *adpt, + struct virtchnl_ether_addr *vc_ether_addr) +{ + u8 *mac_addr = vc_ether_addr->addr; + int ret = 0; + + if (likely(is_multicast_ether_addr(mac_addr))) { + if (is_broadcast_ether_addr(mac_addr)) + return 0; + + ne6x_adpt_del_mac(adpt, mac_addr, false); + } else { + ne6x_adpt_del_mac(adpt, mac_addr, true); + } + + return ret; +} + +static int ne6x_vc_handle_mac_addr_msg(struct ne6x_vf *vf, u8 *msg, bool set) +{ + int (*ne6x_vc_cfg_mac)(struct ne6x_vf *vf, struct ne6x_adapter *adpt, + struct virtchnl_ether_addr *virtchnl_ether_addr); + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + union u_ne6x_mbx_snap_buffer_data *usnap; + struct virtchnl_ether_addr eth_addr; + enum virtchnl_ops vc_op; + struct ne6x_adapter *adpt; + u8 *mac_addr; + int result; + + if (set) { + vc_op = VIRTCHNL_OP_ADD_ETH_ADDR; + ne6x_vc_cfg_mac = ne6x_vc_add_mac_addr; + } else { + vc_op = VIRTCHNL_OP_DEL_ETH_ADDR; + ne6x_vc_cfg_mac = ne6x_vc_del_mac_addr; + } + + adpt = ne6x_get_vf_adpt(vf); + if (!adpt) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto handle_mac_exit; + } + + usnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + mac_addr = usnap->snap.data; + + if (is_broadcast_ether_addr(mac_addr) || is_zero_ether_addr(mac_addr)) + goto handle_mac_exit; + + if (ether_addr_equal(vf->dev_lan_addr.addr, mac_addr)) + goto handle_mac_exit; + + ether_addr_copy(eth_addr.addr, mac_addr); + result = ne6x_vc_cfg_mac(vf, adpt, ð_addr); + if (result == -EEXIST || result == -ENOENT) { + goto handle_mac_exit; + } else if (result) { + v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; + goto handle_mac_exit; + } + +handle_mac_exit: + /* send the response to the VF */ + return ne6x_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0); +} + +static int ne6x_vc_add_mac_addr_msg(struct ne6x_vf *vf, u8 *msg) +{ + return ne6x_vc_handle_mac_addr_msg(vf, msg, true); +} + +static int ne6x_vc_del_mac_addr_msg(struct ne6x_vf *vf, u8 *msg) +{ + return ne6x_vc_handle_mac_addr_msg(vf, msg, false); +} + +static int ne6x_vf_set_adpt_promisc(struct ne6x_vf *vf, struct ne6x_adapter *adpt, + u8 promisc_m) +{ + int status = 0; + + dev_info(ne6x_pf_to_dev(adpt->back), "%s: adpt->vport = %d enable promiscuous <%s>\n", + __func__, adpt->vport, + (promisc_m & NE6X_UCAST_PROMISC_BITS) ? "unicast" : "multicast"); + + if (promisc_m & NE6X_UCAST_PROMISC_BITS) + status = ne6x_dev_set_uc_promiscuous_enable(adpt, true); + + if (promisc_m & NE6X_MCAST_PROMISC_BITS) + status = ne6x_dev_set_mc_promiscuous_enable(adpt, true); + + if (status) { + dev_err(ne6x_pf_to_dev(adpt->back), "disable Tx/Rx filter promiscuous mode off VF-%u mac: %d, trunk: 0x%x, failed, error: %d\n", + vf->vf_id, 0, adpt->port_info->hw_trunk_id, status); + return status; + } + + return 0; +} + +static int ne6x_vf_clear_adpt_promisc(struct ne6x_vf *vf, struct ne6x_adapter *adpt, u8 promisc_m) +{ + int status = 0; + + dev_info(ne6x_pf_to_dev(adpt->back), "%s: adpt->vport = %d clear promiscuous <%s>\n", + __func__, adpt->vport, + (promisc_m & NE6X_UCAST_PROMISC_BITS) ? "unicast" : "multicast"); + + if (promisc_m & NE6X_UCAST_PROMISC_BITS) + status = ne6x_dev_set_uc_promiscuous_enable(adpt, false); + + if (promisc_m & NE6X_MCAST_PROMISC_BITS) + status = ne6x_dev_set_mc_promiscuous_enable(adpt, false); + + if (status) { + dev_err(ne6x_pf_to_dev(adpt->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n", + vf->vf_id, status); + return status; + } + + return 0; +} + +static int ne6x_vc_cfg_promiscuous_mode_msg(struct ne6x_vf *vf, u8 *msg) +{ + union u_ne6x_mbx_snap_buffer_data *usnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + struct virtchnl_promisc_info *info = (struct virtchnl_promisc_info *)usnap->snap.data; + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + bool alluni = false, allmulti = false; + int ucast_err = 0, mcast_err = 0; + struct ne6x_pf *pf = vf->pf; + u8 mcast_m, ucast_m; + struct ne6x_adapter *adpt; + struct device *dev; + + if (!test_bit(NE6X_VF_STATE_ACTIVE, vf->vf_states)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + adpt = ne6x_get_vf_adpt(vf); + if (!adpt) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + dev = ne6x_pf_to_dev(pf); + + if (info->flags & FLAG_VF_UNICAST_PROMISC) + alluni = true; + + if (info->flags & FLAG_VF_MULTICAST_PROMISC) + allmulti = true; + + mcast_m = NE6X_MCAST_PROMISC_BITS; + ucast_m = NE6X_UCAST_PROMISC_BITS; + + if (alluni) + ucast_err = ne6x_vf_set_adpt_promisc(vf, adpt, ucast_m); + else + ucast_err = ne6x_vf_clear_adpt_promisc(vf, adpt, ucast_m); + + if (allmulti) + mcast_err = ne6x_vf_set_adpt_promisc(vf, adpt, mcast_m); + else + mcast_err = ne6x_vf_clear_adpt_promisc(vf, adpt, mcast_m); + + if (!mcast_err) { + if (allmulti && !test_and_set_bit(NE6X_VF_STATE_MC_PROMISC, vf->vf_states)) + dev_info(dev, "VF %u successfully set multicast promiscuous mode\n", + vf->vf_id); + else if (!allmulti && test_and_clear_bit(NE6X_VF_STATE_MC_PROMISC, vf->vf_states)) + dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n", + vf->vf_id); + } + + if (!ucast_err) { + if (alluni && !test_and_set_bit(NE6X_VF_STATE_UC_PROMISC, vf->vf_states)) + dev_info(dev, "VF %u successfully set unicast promiscuous mode\n", + vf->vf_id); + else if (!alluni && test_and_clear_bit(NE6X_VF_STATE_UC_PROMISC, vf->vf_states)) + dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n", + vf->vf_id); + } + +error_param: + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, v_ret, NULL, 0); +} + +static bool ne6x_is_vf_link_up(struct ne6x_vf *vf) +{ + struct ne6x_port_info *pi = ne6x_vf_get_port_info(vf); + struct ne6x_pf *pf = vf->pf; + + if (ne6x_check_vf_init(pf, vf)) + return false; + + if (vf->link_forced) + return vf->link_up; + else + return pi->phy.link_info.link_info & NE6X_AQ_LINK_UP; +} + +u32 ne6x_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed) +{ + u32 speed; + + switch (link_speed) { + case NE6X_LINK_SPEED_10GB: + speed = NE6X_LINK_SPEED_10GB; + break; + case NE6X_LINK_SPEED_25GB: + speed = NE6X_LINK_SPEED_25GB; + break; + case NE6X_LINK_SPEED_40GB: + speed = NE6X_LINK_SPEED_40GB; + break; + case NE6X_LINK_SPEED_100GB: + speed = NE6X_LINK_SPEED_100GB; + break; + default: + speed = NE6X_LINK_SPEED_UNKNOWN; + break; + } + + return speed; +} + +static void ne6x_set_pfe_link(struct ne6x_vf *vf, struct virtchnl_pf_event *pfe, + int ne6x_link_speed, bool link_up) +{ + pfe->link_status = link_up; + /* Speed in Mbps */ + if (link_up && vf->link_forced) + ne6x_link_speed = NE6X_LINK_SPEED_25GB; + + pfe->link_speed = ne6x_conv_link_speed_to_virtchnl(true, ne6x_link_speed); +} + +void ne6x_vc_notify_vf_link_state(struct ne6x_vf *vf) +{ + struct virtchnl_pf_event pfe = {0}; + struct ne6x_hw *hw = &vf->pf->hw; + struct ne6x_port_info *pi; + u8 data[6] = {0}; + + pi = ne6x_vf_get_port_info(vf); + pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; + + if (ne6x_is_vf_link_up(vf)) + ne6x_set_pfe_link(vf, &pfe, pi->phy.link_info.link_speed, true); + else + ne6x_set_pfe_link(vf, &pfe, NE6X_LINK_SPEED_UNKNOWN, false); + + data[0] = pfe.event; + data[1] = (pfe.link_speed >> 24) & 0xff; + data[2] = (pfe.link_speed >> 16) & 0xff; + data[3] = (pfe.link_speed >> 8) & 0xff; + data[4] = (pfe.link_speed >> 0) & 0xff; + data[5] = pfe.link_status; + + ne6x_sdk_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, + VIRTCHNL_STATUS_SUCCESS, + (u8 *)data, 6); +} + +void ne6x_vc_notify_link_state(struct ne6x_vf *vf) +{ + if (vf->ready_to_link_notify) + ne6x_vc_notify_vf_link_state(vf); +} + +static void ne6x_vc_notify_vf_reset(struct ne6x_vf *vf) +{ + struct virtchnl_pf_event pfe; + struct ne6x_pf *pf; + u8 data[6] = {0}; + + if (!vf) + return; + + pf = vf->pf; + if (ne6x_validate_vf_id(pf, vf->vf_id)) + return; + + /* Bail out if VF is in disabled state, neither initialized, nor active + * state - otherwise proceed with notifications + */ + if ((!test_bit(NE6X_VF_STATE_INIT, vf->vf_states) && + !test_bit(NE6X_VF_STATE_ACTIVE, vf->vf_states)) || + test_bit(NE6X_VF_STATE_DIS, vf->vf_states)) + return; + + pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; + data[0] = pfe.event; + ne6x_sdk_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT, + VIRTCHNL_STATUS_SUCCESS, + (u8 *)data, 1); +} + +static void ne6x_vc_notify_vf_trust_change(struct ne6x_vf *vf) +{ + struct virtchnl_vf_config vfconfig = {0}; + struct ne6x_hw *hw = &vf->pf->hw; + struct ne6x_pf *pf = vf->pf; + struct device *dev; + u8 data[6] = {0}; + + dev = ne6x_pf_to_dev(pf); + vfconfig.type = VIRTCHNL_VF_CONFIG_TRUST; + if (test_bit(NE6X_VF_CONFIG_FLAG_TRUSTED, vf->vf_config_flag)) + vfconfig.data[0] = 1; + else + vfconfig.data[0] = 0; + + data[0] = vfconfig.type; + data[1] = vfconfig.data[0]; + dev_info(dev, "vfconfig_type = %d,data = %d\n", data[0], data[1]); + ne6x_sdk_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_VF_CONFIG, + VIRTCHNL_STATUS_SUCCESS, + (u8 *)data, 2); +} + +bool ne6x_reset_vf(struct ne6x_vf *vf, bool is_vflr) +{ + struct ne6x_adapter *adpt; + + adpt = ne6x_get_vf_adpt(vf); + + if (test_bit(NE6X_VF_STATE_QS_ENA, vf->vf_states)) + ne6x_dis_vf_qs(vf); + + if (test_bit(NE6X_VF_STATE_ACTIVE, vf->vf_states)) { + clear_bit(NE6X_VF_STATE_ACTIVE, vf->vf_states); + adpt->port_info->phy.link_info.link_info = 0x0; + if (is_vflr) + vf->rx_tx_state = false; + } + + if (test_bit(NE6X_VF_STATE_UC_PROMISC, vf->vf_states)) + clear_bit(NE6X_VF_STATE_UC_PROMISC, vf->vf_states); + + if (test_bit(NE6X_VF_STATE_MC_PROMISC, vf->vf_states)) + clear_bit(NE6X_VF_STATE_MC_PROMISC, vf->vf_states); + + return 0; +} + +static void ne6x_vc_reset_vf(struct ne6x_vf *vf, bool update_tx_rx) +{ + ne6x_vc_notify_vf_reset(vf); + ne6x_reset_vf(vf, update_tx_rx); +} + +static int ne6x_vc_request_qs_msg(struct ne6x_vf *vf, u8 *msg) +{ + union u_ne6x_mbx_snap_buffer_data *usnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + u16 req_queues = (usnap->snap.data[1] << 8) | usnap->snap.data[0]; + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + u16 max_avail_vf_qps, max_allowed_vf_qps; + u8 req_reset = usnap->snap.data[2]; + bool need_update_rx_tx = false; + struct ne6x_pf *pf = vf->pf; + u16 tx_rx_queue_left; + u16 num_queue_pairs; + struct device *dev; + u16 cur_queues; + + ne6x_clear_vf_status(vf); + dev = ne6x_pf_to_dev(pf); + + if (!test_bit(NE6X_VF_STATE_ACTIVE, vf->vf_states)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + max_allowed_vf_qps = pf->num_qps_per_vf; + cur_queues = vf->num_vf_qs; + tx_rx_queue_left = cur_queues; + max_avail_vf_qps = tx_rx_queue_left + cur_queues; + + if (!req_queues) { + dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n", vf->vf_id); + } else if (req_queues > max_allowed_vf_qps) { + dev_err(dev, "VF %d tried to request more than %d queues.\n", vf->vf_id, + max_allowed_vf_qps); + num_queue_pairs = max_allowed_vf_qps; + } else if (req_queues > cur_queues && req_queues - cur_queues > tx_rx_queue_left) { + dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n", vf->vf_id, + req_queues - cur_queues, tx_rx_queue_left); + num_queue_pairs = min_t(u16, max_avail_vf_qps, max_allowed_vf_qps); + } else { + if (req_queues != vf->num_req_qs) { + vf->num_req_qs = req_queues; + need_update_rx_tx = true; + } + if (req_reset) { + ne6x_vc_reset_vf(vf, need_update_rx_tx); + } else { + vf->ready = false; + if (need_update_rx_tx) + vf->rx_tx_state = false; + + vf->adpt->port_info->phy.link_info.link_info = 0x0; + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, + VIRTCHNL_STATUS_SUCCESS, NULL, 0); + } + + return 0; + } + +error_param: + /* send the response to the VF */ + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, v_ret, (u8 *)&num_queue_pairs, + 2); +} + +static int ne6x_vc_config_mtu_msg(struct ne6x_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct ne6x_adapter *adpt = vf->adpt; + struct ne6x_pf *pf = vf->pf; + struct device *dev; + u16 *mtu; + + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + mtu = (u16 *)(rsvsnap->snap.data); + + dev = ne6x_pf_to_dev(pf); + dev_info(dev, "%s: mtu = %d\n", __func__, *mtu); + ne6x_dev_set_mtu(adpt, *mtu); + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_MTU, v_ret, NULL, 0); +} + +struct virtchnl_vlan_info { + u16 vlan_id; + s16 flags; +}; + +static int ne6x_vc_config_vlan_msg(struct ne6x_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct virtchnl_vlan_info *dpdk_vlan; + struct ne6x_adapter *adpt = vf->adpt; + struct ne6x_pf *pf = vf->pf; + struct device *dev; + struct ne6x_vlan vlan; + int ret; + + dev = ne6x_pf_to_dev(pf); + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + dpdk_vlan = (struct virtchnl_vlan_info *)rsvsnap->snap.data; + if (dpdk_vlan->flags) { + dev_info(dev, "%s: flags = %d vlan id = %d\n", __func__, dpdk_vlan->flags, + dpdk_vlan->vlan_id); + + vlan = NE6X_VLAN(ETH_P_8021Q, dpdk_vlan->vlan_id, 0); + ret = ne6x_adpt_add_vlan(adpt, vlan); + if (!ret) { + dev_info(dev, "%s: add vlan id success\n", __func__); + set_bit(NE6X_ADPT_VLAN_FLTR_CHANGED, adpt->comm.state); + } else { + dev_info(dev, "%s: add vlan id failed\n", __func__); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + } + } else { + dev_info(dev, "%s: flags = %d vlan id = %d\n", __func__, dpdk_vlan->flags, + dpdk_vlan->vlan_id); + + vlan = NE6X_VLAN(ETH_P_8021Q, dpdk_vlan->vlan_id, 0); + ret = ne6x_adpt_del_vlan(adpt, vlan); + if (ret) { + dev_info(dev, "%s: del vlan id failed\n", __func__); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + } else { + dev_info(dev, "%s: del vlan id success\n", __func__); + set_bit(NE6X_ADPT_VLAN_FLTR_CHANGED, adpt->comm.state); + } + } + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VLAN, v_ret, NULL, 0); +} + +#define ETH_VLAN_STRIP_MASK 0x0001 +#define ETH_VLAN_FILTER_MASK 0x0002 +#define ETH_QINQ_STRIP_MASK 0x0008 +#define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001 +#define DEV_RX_OFFLOAD_QINQ_STRIP 0x00000020 +#define DEV_RX_OFFLOAD_VLAN_FILTER 0x00000200 + +struct virtchnl_vlan_offload_info { + u16 mask; + u16 feature; +}; + +static int ne6x_vc_config_vlan_offload_msg(struct ne6x_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + struct virtchnl_vlan_offload_info *offload; + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct ne6x_adapter *adpt = vf->adpt; + struct ne6x_pf *pf = vf->pf; + struct device *dev; + + dev = ne6x_pf_to_dev(pf); + adpt->hw_feature = ne6x_dev_get_features(adpt); + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + offload = (struct virtchnl_vlan_offload_info *)rsvsnap->snap.data; + + if (offload->mask & ETH_VLAN_FILTER_MASK) { + dev_info(dev, "%s: ETH_VLAN_FILTER_MASK\n", __func__); + if (offload->feature & DEV_RX_OFFLOAD_VLAN_FILTER) { + dev_info(dev, "%s: ETH_VLAN_FILTER ON\n", __func__); + adpt->hw_feature |= (NE6X_F_RX_VLAN_FILTER); + } else { + dev_info(dev, "%s: ETH_VLAN_FILTER OFF\n", __func__); + adpt->hw_feature &= ~(NE6X_F_RX_VLAN_FILTER); + } + } + + if (offload->mask & ETH_VLAN_STRIP_MASK) { + dev_info(dev, "%s: ETH_VLAN_STRIP_MASK\n", __func__); + if (offload->feature & DEV_RX_OFFLOAD_VLAN_STRIP) { + dev_info(dev, "%s: ETH_VLAN_STRIP ON\n", __func__); + adpt->hw_feature |= NE6X_F_RX_VLAN_STRIP; + } else { + dev_info(dev, "%s: ETH_VLAN_STRIP OFF\n", __func__); + adpt->hw_feature &= ~NE6X_F_RX_VLAN_STRIP; + } + } + + if (offload->mask & ETH_QINQ_STRIP_MASK) { + dev_info(dev, "%s: ETH_QINQ_STRIP_MASK\n", __func__); + if (offload->feature & DEV_RX_OFFLOAD_QINQ_STRIP) { + dev_info(dev, "%s: ETH_QINQ_STRIP ON\n", __func__); + adpt->hw_feature |= NE6X_F_RX_QINQ_STRIP; + } else { + dev_info(dev, "%s: ETH_QINQ_STRIP OFF\n", __func__); + adpt->hw_feature &= ~NE6X_F_RX_QINQ_STRIP; + } + } + + ne6x_dev_set_features(adpt, adpt->hw_feature); + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VLAN_OFFLOAD, v_ret, NULL, 0); +} + +struct virtchnl_flow_ctrl_info { + u16 mode; + u16 high_water; +}; + +enum rte_eth_fc_mode { + RTE_FC_NONE = 0, /**< Disable flow control. */ + RTE_FC_RX_PAUSE, /**< RX pause frame, enable flowctrl on TX side. */ + RTE_FC_TX_PAUSE, /**< TX pause frame, enable flowctrl on RX side. */ + RTE_FC_FULL /**< Enable flow control on both side. */ +}; + +static int ne6x_vc_config_flow_ctrl_msg(struct ne6x_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct virtchnl_flow_ctrl_info *flow; + struct ne6x_adapter *adpt = vf->adpt; + struct ne6x_flowctrl flowctrl; + struct ne6x_pf *pf = vf->pf; + struct device *dev; + int ret; + + dev = ne6x_pf_to_dev(pf); + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + flow = (struct virtchnl_flow_ctrl_info *)rsvsnap->snap.data; + if (flow->mode == RTE_FC_FULL) { + flowctrl.rx_pause = 1; + flowctrl.tx_pause = 1; + } else if (flow->mode == RTE_FC_RX_PAUSE) { + flowctrl.rx_pause = 1; + } else if (flow->mode == RTE_FC_TX_PAUSE) { + flowctrl.tx_pause = 1; + } else { + flowctrl.rx_pause = 0; + flowctrl.tx_pause = 0; + } + + dev_info(dev, "%s: mode = %d high water = %d\n", __func__, flow->mode, flow->high_water); + ret = ne6x_dev_set_flowctrl(adpt, &flowctrl); + if (ret) { + dev_info(dev, "%s: set flow ctrl failed\n", __func__); + v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; + } + + ret = ne6x_dev_set_vf_bw(adpt, flow->high_water); + if (ret) + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_FLOW_CTRL, v_ret, NULL, 0); +} + +static int ne6x_vc_config_rss_msg(struct ne6x_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct ne6x_adapter *adpt = vf->adpt; + u8 *data = (u8 *)&adpt->rss_info; + int i; + + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + + for (i = 0; i < rsvsnap->snap.len; i++) { + data[adpt->rss_size] = rsvsnap->snap.data[i]; + adpt->rss_size++; + } + + if (adpt->rss_size >= sizeof(struct ne6x_rss_info)) { + adpt->rss_size = 0; + ne6x_dev_set_rss(adpt, &adpt->rss_info); + } + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS, v_ret, NULL, 0); +} + +static int ne6x_vc_changed_rss_msg(struct ne6x_vf *vf, u8 *msg) +{ + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct ne6x_adapter *adpt = vf->adpt; + int i, ret; + + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + memcpy(&adpt->num_queue, rsvsnap->snap.data, sizeof(adpt->num_queue)); + + if (adpt->rss_info.ind_table_size > NE6X_RSS_MAX_IND_TABLE_SIZE) + adpt->rss_info.ind_table_size = NE6X_RSS_MAX_IND_TABLE_SIZE; + + for (i = 0; i < adpt->rss_info.ind_table_size; i++) + adpt->rss_info.ind_table[i] = ethtool_rxfh_indir_default(i, adpt->num_queue); + + ret = ne6x_dev_set_rss(adpt, &adpt->rss_info); + ret |= ne6x_dev_add_unicast_for_fastmode(adpt, vf->dev_lan_addr.addr); + ret |= ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CHANGED_RSS, + VIRTCHNL_STATUS_SUCCESS, NULL, 0); + + return ret; +} + +static int ne6x_vc_add_vlan_msg(struct ne6x_vf *vf, u8 *msg) +{ + struct ne6x_vlan vlan; + u16 vlan_tpid = 0; + u16 vlan_id = 0; + + vlan_id = *((u16 *)msg); + vlan_tpid = *((u16 *)(msg + 2)); + dev_info(&vf->pf->pdev->dev, "%s:vlan tpid:%04x,vlan id:%04x\n", + __func__, vlan_tpid, vlan_id); + + vlan = NE6X_VLAN(vlan_tpid, vlan_id, 0); + + dev_info(&vf->pf->pdev->dev, "%s:vfp_vid %04x\n", __func__, vf->vfp_vid); + + ne6x_adpt_add_vlan(vf->adpt, vlan); + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, VIRTCHNL_STATUS_SUCCESS, NULL, 0); +} + +static int ne6x_vc_del_vlan_msg(struct ne6x_vf *vf, u8 *msg) +{ + struct ne6x_vlan vlan; + u16 vlan_tpid = 0; + u16 vlan_id = 0; + + vlan_id = *((u16 *)msg); + vlan_tpid = *((u16 *)(msg + 2)); + + dev_info(&vf->pf->pdev->dev, "%s:vlan tpid:%04x,vlan id:%04x\n", __func__, vlan_tpid, + vlan_id); + vlan = NE6X_VLAN(vlan_tpid, vlan_id, 0); + + ne6x_adpt_del_vlan(vf->adpt, vlan); + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, VIRTCHNL_STATUS_SUCCESS, NULL, 0); +} + +static int ne6x_vc_config_offload_msg(struct ne6x_vf *vf, u8 *msg) +{ + union u_ne6x_mbx_snap_buffer_data *rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + struct ne6x_adapter *adpt = vf->adpt; + + adpt->hw_feature = rsvsnap->snap.data[3]; + adpt->hw_feature = adpt->hw_feature << 8; + adpt->hw_feature |= rsvsnap->snap.data[2]; + adpt->hw_feature = adpt->hw_feature << 8; + adpt->hw_feature |= rsvsnap->snap.data[1]; + adpt->hw_feature = adpt->hw_feature << 8; + adpt->hw_feature |= rsvsnap->snap.data[0]; + + if (vf->tx_rate) + adpt->hw_feature |= NE6X_F_TX_QOSBANDWIDTH; + else + adpt->hw_feature &= ~NE6X_F_TX_QOSBANDWIDTH; + + ne6x_dev_set_features(adpt, adpt->hw_feature); + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_OFFLOAD, VIRTCHNL_STATUS_SUCCESS, NULL, + 0); +} + +static int ne6x_vc_request_feature_msg(struct ne6x_vf *vf, u8 *msg) +{ + struct ne6x_adapter *adpt = vf->adpt; + + adpt->hw_feature = ne6x_dev_get_features(adpt); + dev_info(&vf->pf->pdev->dev, "%s: vf->vf_id =%d vport = %d lport = %d pport = %d hw_queue_base = %d hw_feature = %08X\n", + __func__, vf->vf_id, adpt->vport, adpt->port_info->lport, + adpt->port_info->hw_port_id, adpt->port_info->hw_queue_base, adpt->hw_feature); + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_FEATURE, VIRTCHNL_STATUS_SUCCESS, + (u8 *)&adpt->hw_feature, sizeof(u32)); +} + +static int ne6x_vc_reset_vf_msg(struct ne6x_vf *vf, u8 *msg) +{ + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct virtchnl_ether_addr vc_ether_addr; + + vf->ready = false; + vf->rx_tx_state = 0; + vf->adpt->port_info->phy.link_info.link_info = false; + + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + vc_ether_addr.addr[0] = rsvsnap->snap.data[0]; + vc_ether_addr.addr[1] = rsvsnap->snap.data[1]; + vc_ether_addr.addr[2] = rsvsnap->snap.data[2]; + vc_ether_addr.addr[3] = rsvsnap->snap.data[3]; + vc_ether_addr.addr[4] = rsvsnap->snap.data[4]; + vc_ether_addr.addr[5] = rsvsnap->snap.data[5]; + + ne6x_dev_set_features(vf->adpt, 0); + ne6x_dev_del_vf_qinq(vf, 0, 0); + + vf->port_vlan_info = NE6X_VLAN(0, 0, 0); + vf->link_forced = false; + vf->trusted = false; + vf->tx_rate = 0; + clear_bit(NE6X_VF_CONFIG_FLAG_TRUSTED, vf->vf_config_flag); + ne6x_dev_del_broadcast_leaf(ne6x_get_vf_adpt(vf)); + ne6x_adpt_clear_mac_vlan(ne6x_get_vf_adpt(vf)); + + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_RESET_VF, VIRTCHNL_STATUS_SUCCESS, NULL, 0); +} + +static int ne6x_get_logic_vf_id(struct net_device *netdev, int vf_id) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + struct ne6x_adapter *adpt = np->adpt; + struct ne6x_pf *pf = adpt->back; + + return (adpt->idx * (pf->num_alloc_vfs / pf->num_alloc_adpt) + vf_id); +} + +int ne6x_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) +{ + struct ne6x_pf *pf = ne6x_netdev_to_pf(netdev); + struct ne6x_vf *vf; + int logic_vf_id; + int ret = 0; + + ret = ne6x_validate_outer_vf_id(pf, vf_id); + if (ret) + return ret; + + logic_vf_id = ne6x_get_logic_vf_id(netdev, vf_id); + + if (logic_vf_id >= pf->num_alloc_vfs) + return -EINVAL; + + vf = ne6x_get_vf_by_id(pf, logic_vf_id); + + netdev_info(netdev, "set vf-%d trust %s\n", vf_id, trusted ? "on" : "off"); + + if (!vf) { + netdev_err(netdev, "vf is NULL\n"); + return -EINVAL; + } + + /* Check if already ready ?*/ + if (!vf->ready) { + netdev_err(netdev, "vf is not ready\n"); + return (-1); + } + + /* Check if already trusted */ + if (trusted == vf->trusted) + return 0; + + vf->trusted = trusted; + + if (vf->trusted) { + set_bit(NE6X_VF_CONFIG_FLAG_TRUSTED, vf->vf_config_flag); + } else { + clear_bit(NE6X_VF_CONFIG_FLAG_TRUSTED, vf->vf_config_flag); + ne6x_vf_clear_adpt_promisc(vf, ne6x_get_vf_adpt(vf), + NE6X_UCAST_PROMISC_BITS | + NE6X_MCAST_PROMISC_BITS); + } + + ne6x_vc_notify_vf_trust_change(vf); + dev_info(ne6x_pf_to_dev(pf), "VF %u is now %strusted\n", + logic_vf_id, trusted ? "" : "un"); + + return 0; +} + +int ne6x_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) +{ + struct ne6x_pf *pf = ne6x_netdev_to_pf(netdev); + int ret = 0, logic_vf_id; + struct ne6x_vf *vf; + + ret = ne6x_validate_outer_vf_id(pf, vf_id); + if (ret) + return ret; + + logic_vf_id = ne6x_get_logic_vf_id(netdev, vf_id); + + vf = ne6x_get_vf_by_id(pf, logic_vf_id); + if (!vf) + return -EINVAL; + + netdev_info(netdev, "set vf-%d link state %s\n", vf_id, + link_state == IFLA_VF_LINK_STATE_ENABLE + ? "enable" + : (link_state == IFLA_VF_LINK_STATE_DISABLE ? "disable" : "auto")); + + /* Check if already ready ?*/ + if (!vf->ready) + return (-1); + + if (!vf->trusted) + return (-1); + + switch (link_state) { + case IFLA_VF_LINK_STATE_AUTO: + vf->link_forced = false; + break; + case IFLA_VF_LINK_STATE_ENABLE: + vf->link_forced = true; + vf->link_up = true; + break; + case IFLA_VF_LINK_STATE_DISABLE: + vf->link_forced = true; + vf->link_up = false; + break; + default: + ret = -EINVAL; + goto out_put_vf; + } + + ne6x_vc_notify_vf_link_state(vf); + +out_put_vf: + return ret; +} + +static int ne6x_vc_modify_vf_mac(struct ne6x_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct virtchnl_ether_addr vc_ether_addr; + struct ne6x_pf *pf = vf->pf; + struct ne6x_adapter *pf_adpt; + + if (ne6x_check_vf_init(pf, vf)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + vc_ether_addr.addr[0] = rsvsnap->snap.data[0]; + vc_ether_addr.addr[1] = rsvsnap->snap.data[1]; + vc_ether_addr.addr[2] = rsvsnap->snap.data[2]; + vc_ether_addr.addr[3] = rsvsnap->snap.data[3]; + vc_ether_addr.addr[4] = rsvsnap->snap.data[4]; + vc_ether_addr.addr[5] = rsvsnap->snap.data[5]; + + pf_adpt = vf->adpt; + if (!pf->adpt) + dev_info(ne6x_pf_to_dev(pf), "adpt is null vf %d\n", vf->vf_id); + + /* set zero addr mean clear mac */ + if (is_zero_ether_addr(vc_ether_addr.addr)) + return ne6x_vc_del_def_mac_addr(vf, pf_adpt, vf->dev_lan_addr.addr); + + if (is_valid_ether_addr(vf->dev_lan_addr.addr)) { + ne6x_vc_del_def_mac_addr(vf, pf_adpt, vf->dev_lan_addr.addr); + memset(vf->dev_lan_addr.addr, 0, 6); + } + + ne6x_vc_add_def_mac_addr(vf, pf_adpt, &vc_ether_addr); + +err: + /* send the response back to the VF */ + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_VF_ADDR, v_ret, vc_ether_addr.addr, 6); +} + +static int ne6x_vc_set_fast_mode(struct ne6x_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + union u_ne6x_mbx_snap_buffer_data *rsvsnap; + struct ne6x_pf *pf = vf->pf; + + rsvsnap = (union u_ne6x_mbx_snap_buffer_data *)msg; + + if (rsvsnap->snap.data[0]) { + vf->adpt->num_queue = rsvsnap->snap.data[1]; + v_ret = ne6x_dev_set_fast_mode(pf, true, vf->adpt->num_queue); + } else { + v_ret = ne6x_dev_set_fast_mode(pf, false, 0); + } + + /* send the response back to the VF */ + return ne6x_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_FAST_MDOE, v_ret, NULL, 0); +} + +void ne6x_vc_process_vf_msg(struct ne6x_pf *pf) +{ + union u_ne6x_mbx_snap_buffer_data usnap; + struct ne6x_hw *hw = &pf->hw; + struct ne6x_vf *vf = NULL; + struct ne6x_vlan vlan; + struct device *dev; + int err = 0; + int i; + + dev = ne6x_pf_to_dev(pf); + ne6x_for_each_vf(pf, i) { + if (pf->hw.mbx_snapshot.mbx_vf.vf_cntr[i]) { + vf = &pf->vf[i]; + usnap.val = rd64_bar4(hw, NE6X_VF_MAILBOX_ADDR(vf->base_queue)); + WARN(usnap.snap.len > 6, ">>>>>>>>>>>>>>>>>>recv VF mailbox error!!!<<<<<<<<<<<<<<<<<<<"); + switch (usnap.snap.type) { + case VIRTCHNL_OP_GET_VF_RESOURCES: + err = ne6x_vc_get_vf_res_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_CONFIG_TX_QUEUE: + case VIRTCHNL_OP_CONFIG_RX_QUEUE: + err = ne6x_vc_send_msg_to_vf(vf, usnap.snap.type, + VIRTCHNL_STATUS_SUCCESS, + NULL, 0); + break; + case VIRTCHNL_OP_ENABLE_QUEUES: + err = ne6x_vc_send_msg_to_vf(vf, usnap.snap.type, + VIRTCHNL_STATUS_SUCCESS, + NULL, 0); + vf->ready = 1; + break; + case VIRTCHNL_OP_ADD_ETH_ADDR: + err = ne6x_vc_add_mac_addr_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_DEL_ETH_ADDR: + err = ne6x_vc_del_mac_addr_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_ADD_VLAN: + err = ne6x_vc_add_vlan_msg(vf, (u8 *)&usnap.snap.data); + break; + case VIRTCHNL_OP_DEL_VLAN: + err = ne6x_vc_del_vlan_msg(vf, (u8 *)&usnap.snap.data); + break; + case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + ne6x_vc_cfg_promiscuous_mode_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_EVENT: + err = ne6x_vc_send_msg_to_vf(vf, usnap.snap.type, + VIRTCHNL_STATUS_SUCCESS, + NULL, 0); + break; + case VIRTCHNL_OP_REQUEST_QUEUES: + err = ne6x_vc_request_qs_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_CONFIG_RSS: + err = ne6x_vc_config_rss_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_CONFIG_VLAN: + err = ne6x_vc_config_vlan_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_CONFIG_VLAN_OFFLOAD: + err = ne6x_vc_config_vlan_offload_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_CONFIG_MTU: + err = ne6x_vc_config_mtu_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_CONFIG_FLOW_CTRL: + err = ne6x_vc_config_flow_ctrl_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_CHANGED_RSS: + err = ne6x_vc_changed_rss_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_CONFIG_OFFLOAD: + err = ne6x_vc_config_offload_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_GET_VF_FEATURE: + err = ne6x_vc_request_feature_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_RESET_VF: + err = ne6x_vc_reset_vf_msg(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_GET_PORT_STATUS: + ne6x_dev_add_broadcast_leaf(ne6x_get_vf_adpt(vf)); + vlan = NE6X_VLAN(ETH_P_8021Q, 0xfff, 0); + ne6x_adpt_add_vlan(ne6x_get_vf_adpt(vf), vlan); + ne6x_vc_notify_vf_link_state(vf); + + if (!vf->ready_to_link_notify) + vf->ready_to_link_notify = 1; + + ne6x_linkscan_schedule(pf); + break; + case VIRTCHNL_OP_SET_VF_ADDR: + err = ne6x_vc_modify_vf_mac(vf, (u8 *)&usnap); + break; + case VIRTCHNL_OP_SET_FAST_MDOE: + err = ne6x_vc_set_fast_mode(vf, (u8 *)&usnap); + break; + /* VIRTCHNL_OP_VERSION not used */ + default: + dev_err(dev, "Unsupported opcode %s from VF %d\n", + ne6x_opcode_str(usnap.snap.type), i); + err = ne6x_vc_send_msg_to_vf(vf, usnap.snap.type, + VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, + NULL, 0); + break; + } + pf->hw.mbx_snapshot.mbx_vf.vf_cntr[i] = false; + } + if (err) + /* Helper function cares less about error return values here + * as it is busy with pending work. + */ + dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n", i, + usnap.snap.type, err); + } + + if (test_bit(NE6X_MAILBOXQ_EVENT_PENDING, pf->state)) + clear_bit(NE6X_MAILBOXQ_EVENT_PENDING, pf->state); +} + +int ne6x_get_vf_config(struct net_device *netdev, int vf_id, + struct ifla_vf_info *ivi) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + struct ne6x_adapter *adpt = np->adpt; + struct ne6x_pf *pf = adpt->back; + struct ne6x_vf *vf; + int logic_vfid = 0; + int ret = 0; + + /* validate the request */ + ret = ne6x_validate_outer_vf_id(pf, vf_id); + if (ret) + goto error_param; + + logic_vfid = ne6x_get_logic_vf_id(netdev, vf_id); + vf = &pf->vf[logic_vfid]; + /* first adpt is always the LAN adpt */ + adpt = pf->adpt[vf->lan_adpt_idx]; + if (!adpt) { + ret = -ENOENT; + goto error_param; + } + + ivi->vf = vf_id; + + ether_addr_copy(ivi->mac, vf->dev_lan_addr.addr); + + ivi->vlan = vf->port_vlan_info.vid; + ivi->qos = vf->port_vlan_info.prio; + if (vf->port_vlan_info.vid) + ivi->vlan_proto = cpu_to_be16(vf->port_vlan_info.tpid); + + if (!vf->link_forced) + ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; + else if (vf->link_up) + ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; + else + ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; + + ivi->max_tx_rate = vf->tx_rate; + ivi->min_tx_rate = 0; + if (test_bit(NE6X_VF_CONFIG_FLAG_TRUSTED, vf->vf_config_flag)) + ivi->trusted = 1; + else + ivi->trusted = 0; + +error_param: + return ret; +} + +static void ne6x_calc_token_for_bw(int max_tx_rate, int *time_inv, int *tocken) +{ + if (max_tx_rate <= 100) { + *time_inv = 3910; + *tocken = max_tx_rate; + } else if (max_tx_rate <= 1000) { + *time_inv = 790; + *tocken = max_tx_rate / 5; + } else if (max_tx_rate < 5000) { + *time_inv = 395; + *tocken = max_tx_rate / 10; + } else if (max_tx_rate < 10000) { + *time_inv = 118; + *tocken = max_tx_rate / 33; + } else { + *time_inv = 39; + *tocken = max_tx_rate / 100; + } +} + +int ne6x_set_vf_bw_for_max_vpnum(struct ne6x_pf *pf, int vf_id, int max_tx_rate) +{ + union ne6x_sq_meter_cfg0 sq_meter_cfg0; + union ne6x_sq_meter_cfg1 sq_meter_cfg1; + union ne6x_sq_meter_cfg2 sq_meter_cfg2; + union ne6x_sq_meter_cfg3 sq_meter_cfg3; + struct ne6x_hw *hw = &pf->hw; + int time_inv = 0; + int tocken = 0; + + sq_meter_cfg3.val = rd64(hw, NE6X_VPINT_DYN_CTLN(vf_id, NE6X_SQ_METER_CFG3)); + sq_meter_cfg3.reg.csr_meter_pause_threshold_vp = 1; + wr64(hw, NE6X_VPINT_DYN_CTLN(vf_id, NE6X_SQ_METER_CFG3), sq_meter_cfg3.val); + sq_meter_cfg2.val = rd64(hw, NE6X_VPINT_DYN_CTLN(vf_id, NE6X_SQ_METER_CFG2)); + sq_meter_cfg2.reg.csr_meter_resume_threshold_vp = 1; + wr64(hw, NE6X_VPINT_DYN_CTLN(vf_id, NE6X_SQ_METER_CFG2), sq_meter_cfg2.val); + + sq_meter_cfg1.val = rd64(hw, NE6X_VPINT_DYN_CTLN(vf_id, NE6X_SQ_METER_CFG1)); + sq_meter_cfg1.reg.csr_meter_refresh_count_vp = max_tx_rate; + + if (max_tx_rate) { + ne6x_calc_token_for_bw(max_tx_rate, &time_inv, &tocken); + sq_meter_cfg1.reg.csr_meter_refresh_count_vp = tocken; + sq_meter_cfg1.reg.csr_meter_refresh_interval_vp = time_inv; + } else { + sq_meter_cfg1.reg.csr_meter_refresh_count_vp = 0x1; + sq_meter_cfg1.reg.csr_meter_refresh_interval_vp = 0x1; + } + + wr64(hw, NE6X_VPINT_DYN_CTLN(vf_id, NE6X_SQ_METER_CFG1), sq_meter_cfg1.val); + sq_meter_cfg0.val = rd64(hw, NE6X_VPINT_DYN_CTLN(vf_id, NE6X_SQ_METER_CFG0)); + sq_meter_cfg0.reg.csr_meter_pkt_token_num_vp = 0x1; + sq_meter_cfg0.reg.csr_meter_ipg_len_vp = 0x0; + sq_meter_cfg0.reg.csr_meter_refresh_en_vp = 0x1; + sq_meter_cfg0.reg.csr_meter_packet_mode_vp = 0x0; + + if (max_tx_rate) { + sq_meter_cfg0.reg.csr_meter_rate_limit_en_vp = 0x1; + sq_meter_cfg0.reg.csr_meter_refresh_en_vp = 0x1; + } else { + sq_meter_cfg0.reg.csr_meter_rate_limit_en_vp = 0x0; + sq_meter_cfg0.reg.csr_meter_refresh_en_vp = 0x0; + } + + wr64(hw, NE6X_VPINT_DYN_CTLN(vf_id, NE6X_SQ_METER_CFG0), sq_meter_cfg0.val); + + return 0; +} + +void ne6x_clr_vf_bw_for_max_vpnum(struct ne6x_pf *pf) +{ + int index; + + for (index = 0; index < NE6X_MAX_VP_NUM; index++) + ne6x_set_vf_bw_for_max_vpnum(pf, index, 0); +} + +int ne6x_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, int max_tx_rate) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + struct ne6x_pf *pf = np->adpt->back; + struct ne6x_adapter *adpt; + struct ne6x_vf *vf; + int logic_vfid; + int ret; + + /* validate the request */ + ret = ne6x_validate_outer_vf_id(pf, vf_id); + if (ret) + goto error; + + logic_vfid = ne6x_get_logic_vf_id(netdev, vf_id); + vf = &pf->vf[logic_vfid]; + adpt = ne6x_get_vf_adpt(vf); + if (!adpt) { + ret = -EINVAL; + goto error; + } + + ret = ne6x_validata_tx_rate(adpt, logic_vfid, min_tx_rate, max_tx_rate); + if (ret) { + ret = -EINVAL; + goto error; + } + + if (!test_bit(NE6X_VF_STATE_INIT, vf->vf_states)) { + dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", logic_vfid); + ret = -EAGAIN; + goto error; + } + + if (pf->num_alloc_vfs == 64) + ret = ne6x_set_vf_bw_for_max_vpnum(pf, logic_vfid, max_tx_rate); + else + ret = ne6x_dev_set_vf_bw(adpt, max_tx_rate); + + if (ret) + goto error; + + vf->tx_rate = max_tx_rate; + + return 0; +error: + return ret; +} + +int ne6x_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) +{ + struct ne6x_netdev_priv *np = netdev_priv(netdev); + union u_ne6x_mbx_snap_buffer_data usnap; + struct ne6x_adapter *adpt = np->adpt; + struct ne6x_pf *pf = adpt->back; + struct ne6x_vf *vf; + int logic_vfid; + int ret; + + /* validate the request */ + ret = ne6x_validate_outer_vf_id(pf, vf_id); + if (ret) + goto error_param; + + logic_vfid = ne6x_get_logic_vf_id(netdev, vf_id); + vf = &pf->vf[logic_vfid]; + + adpt = ne6x_get_vf_adpt(vf); + if (!is_valid_ether_addr(mac)) { + dev_err(&pf->pdev->dev, "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); + ret = -EINVAL; + goto error_param; + } + + if (is_multicast_ether_addr(mac)) { + dev_err(&pf->pdev->dev, "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); + ret = -EINVAL; + goto error_param; + } + + if (ether_addr_equal(vf->dev_lan_addr.addr, mac)) { + dev_err(&pf->pdev->dev, "already use the same Ethernet address %pM for VF %d\n", + mac, vf_id); + goto error_param; + } + + /*simluate a msg from vf*/ + usnap.snap.type = VIRTCHNL_OP_SET_VF_ADDR; + usnap.snap.state = VIRTCHNL_STATUS_SUCCESS; + usnap.snap.len = 6; + memcpy(usnap.snap.data, mac, usnap.snap.len); + ret = ne6x_vc_modify_vf_mac(vf, (u8 *)&usnap); + +error_param: + return ret; +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_virtchnl_pf.h b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_virtchnl_pf.h new file mode 100644 index 000000000000..2f094d164fe3 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x/ne6x_virtchnl_pf.h @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6X_VIRTCHNL_PF_H +#define _NE6X_VIRTCHNL_PF_H + +#include "mailbox.h" + +#define NE6X_NO_ADPT 0xffff + +enum virtchnl_event_codes { + VIRTCHNL_EVENT_UNKNOWN = 0, + VIRTCHNL_EVENT_LINK_CHANGE, + VIRTCHNL_EVENT_RESET_IMPENDING, + VIRTCHNL_EVENT_PF_DRIVER_CLOSE, + VIRTCHNL_EVENT_DCF_ADPT_MAP_UPDATE, +}; + +struct virtchnl_pf_event { + u8 event; + u32 link_speed; + u8 link_status; +}; + +union u_ne6x_mbx_snap_buffer_data { + struct ne6x_mbx_snap_buffer_data snap; + u64 val; +}; + +/* Specific VF states */ +enum ne6x_vf_states { + NE6X_VF_STATE_INIT = 0, /* PF is initializing VF */ + NE6X_VF_STATE_ACTIVE, /* VF resources are allocated for use */ + NE6X_VF_STATE_QS_ENA, /* VF queue(s) enabled */ + NE6X_VF_STATE_DIS, + NE6X_VF_STATE_MC_PROMISC, + NE6X_VF_STATE_UC_PROMISC, + NE6X_VF_STATES_NBITS +}; + +struct virtchnl_ether_addr { + u8 addr[ETH_ALEN]; +}; + +struct virtchnl_promisc_info { + u16 adpt_id; + u16 flags; +}; + +#define FLAG_VF_UNICAST_PROMISC 0x00000001 +#define FLAG_VF_MULTICAST_PROMISC 0x00000002 + +enum ne6x_promisc_flags { + NE6X_PROMISC_UCAST_RX = 0x1, + NE6X_PROMISC_UCAST_TX = 0x2, + NE6X_PROMISC_MCAST_RX = 0x4, + NE6X_PROMISC_MCAST_TX = 0x8, + NE6X_PROMISC_BCAST_RX = 0x10, + NE6X_PROMISC_BCAST_TX = 0x20, + NE6X_PROMISC_VLAN_RX = 0x40, + NE6X_PROMISC_VLAN_TX = 0x80, +}; + +#define NE6X_UCAST_PROMISC_BITS (NE6X_PROMISC_UCAST_TX | NE6X_PROMISC_UCAST_RX) +#define NE6X_MCAST_PROMISC_BITS (NE6X_PROMISC_MCAST_TX | NE6X_PROMISC_MCAST_RX) + +enum ne6x_vf_config_flag { + NE6X_VF_CONFIG_FLAG_TRUSTED = 0, + NE6X_VF_CONFIG_FLAG_LINK_FORCED, + NE6X_VF_CONFIG_FLAG_NBITS /* must be last */ +}; + +struct ne6x_key { + u8 rsv0; + u8 pi; + u8 mac_addr[6]; + u8 rsv1[56]; +}; + +/* VF information structure */ +struct ne6x_vf { + struct ne6x_pf *pf; + struct ne6x_adapter *adpt; + + u16 vf_id; /* VF ID in the PF space */ + u16 lan_adpt_idx; /* index into PF struct */ + /* first vector index of this VF in the PF space */ + u16 vfp_vid; + u16 vfp_tpid; + int tx_rate; + u8 rx_tx_state; + bool ready; + bool ready_to_link_notify; + + u16 base_queue; + u16 num_vf_qs; + u16 num_req_qs; + + struct ne6x_vlan port_vlan_info; /* Port VLAN ID, QoS, and TPID */ + + u8 trusted : 1; + u8 link_forced : 1; + u8 link_up : 1; /* only valid if VF link is forced */ + + struct virtchnl_ether_addr dev_lan_addr; + DECLARE_BITMAP(vf_states, NE6X_VF_STATES_NBITS); /* VF runtime states */ + DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX); + DECLARE_BITMAP(vf_config_flag, NE6X_VF_CONFIG_FLAG_NBITS); +}; + +#define ne6x_for_each_vf(pf, i) for ((i) = 0; (i) < (pf)->num_alloc_vfs; (i)++) +#define ne6x_for_each_pf(pf, i) for ((i) = 0; (i) < (pf)->num_alloc_adpt; (i)++) + +#ifdef CONFIG_PCI_IOV +int ne6x_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted); +int ne6x_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state); + +int ne6x_sriov_configure(struct pci_dev *pdev, int num_vfs); +void ne6x_vc_process_vf_msg(struct ne6x_pf *pf); +void ne6x_vc_notify_link_state(struct ne6x_vf *vf); +int ne6x_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac); +void ne6x_clr_vf_bw_for_max_vpnum(struct ne6x_pf *pf); + +struct ne6x_adapter *ne6x_get_vf_adpt(struct ne6x_vf *vf); +int ne6x_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, int max_tx_rate); +int ne6x_get_vf_config(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi); + +#else /* CONFIG_PCI_IOV */ +static inline int ne6x_sriov_configure(struct pci_dev __always_unused *pdev, + int __always_unused num_vfs) +{ + return -EOPNOTSUPP; +} + +static inline int ne6x_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) +{ + return -EOPNOTSUPP; +} + +static inline int ne6x_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state) +{ + return -EOPNOTSUPP; +} + +static inline int ne6x_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) +{ + return -EOPNOTSUPP; +} + +static inline int ne6x_ndo_set_vf_bw(struct net_device *netdev, int vf_id, + int min_tx_rate, int max_tx_rate) +{ + return -EOPNOTSUPP; +} + +static inline int ne6x_get_vf_config(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi) +{ + return -EOPNOTSUPP; +} + +#endif /* CONFIG_PCI_IOV */ + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf.h b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf.h new file mode 100644 index 000000000000..9ee06262f0fb --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf.h @@ -0,0 +1,555 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6XVF_H +#define _NE6XVF_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "reg.h" +#include "common.h" +#include "feature.h" +#include "txrx.h" +#include "mailbox.h" +#include "ne6xvf_virtchnl.h" + +#define NE6XVF_MAX_AQ_BUF_SIZE 4096 +#define NE6XVF_AQ_LEN 32 +#define NE6XVF_AQ_MAX_ERR 20 /* times to try before resetting AQ */ + +#define NE6XVF_REG_ADDR(_VPID, _OFST) (((_VPID) << 12) + ((_OFST) << 4)) + +#define NE6XVF_DB_STATE 0x1a +#define NE6XVF_MAILBOX_DATA 0x19 +#define NE6XVF_PF_MAILBOX_DATA 0x18 + +#define NE6XVF_QC_TAIL1(_Q) (((_Q) << 12) | (NE6X_CQ_HD_POINTER << 4)) /* _i=0...15 Reset: PFR */ +#define NE6XVF_QTX_TAIL1(_Q) (((_Q) << 12) | (0 << 11) | 0) /* _i=0...15 Reset: PFR */ +#define NE6XVF_QRX_TAIL1(_Q) (((_Q) << 12) | (1 << 11) | 0) /* _i=0...15 Reset: PFR */ + +#define ne6xvf_debug(h, m, s, ...) \ +do { \ + if (((m) & (h)->debug_mask)) \ + pr_info("ncevf %02x:%02x.%x " s, \ + (h)->bus.bus_id, (h)->bus.device, \ + (h)->bus.func, ##__VA_ARGS__); \ +} while (0) + +#define hw_dbg(h, s, ...) \ + pr_debug("ncevf %02x:%02x.%x " s, \ + (h)->bus.bus_id, (h)->bus.device, \ + (h)->bus.func, ##__VA_ARGS__) + +extern char ne6xvf_driver_name[]; +extern const char ne6xvf_driver_version[]; +extern struct workqueue_struct *ne6xvf_wq; + +#define ne6xvf_init_spinlock(_sp) ne6xvf_init_spinlock_d(_sp) +#define ne6xvf_acquire_spinlock(_sp) ne6xvf_acquire_spinlock_d(_sp) +#define ne6xvf_release_spinlock(_sp) ne6xvf_release_spinlock_d(_sp) +#define ne6xvf_destroy_spinlock(_sp) ne6xvf_destroy_spinlock_d(_sp) + +#define wr64(a, reg, value) writeq((value), ((a)->hw_addr0 + (reg))) +#define rd64(a, reg) readq((a)->hw_addr0 + (reg)) + +#define NE6XVF_READ_REG(hw, reg) rd64(hw, reg) +#define NE6XVF_WRITE_REG(hw, reg, value) wr64(hw, reg, value) + +#define NE6XVF_MAX_REQ_QUEUES 32 + +#define NE6XVF_RESET_WAIT_MS 10 +#define NE6XVF_RESET_WAIT_DETECTED_COUNT 50 +#define NE6XVF_RESET_WAIT_COMPLETE_COUNT 2000 + +enum ne6xvf_critical_section_t { + __NE6XVF_IN_CRITICAL_TASK, /* cannot be interrupted */ + __NE6XVF_IN_REMOVE_TASK, /* device being removed */ + __NE6XVF_TX_TSTAMP_IN_PROGRESS, /* PTP Tx timestamp request in progress */ +}; + +struct ne6xvf_vlan_filter { + struct list_head list; + struct ne6x_vf_vlan vlan; + struct { + u8 is_new_vlan : 1; /* filter is new, wait for PF answer */ + u8 remove : 1; /* filter needs to be removed */ + u8 add : 1; /* filter needs to be added */ + u8 padding : 5; + }; +}; + +struct ne6xvf_mac_filter { + struct list_head list; + u8 macaddr[ETH_ALEN]; + struct { + u8 is_new_mac : 1; /* filter is new, wait for PF decision */ + u8 remove : 1; /* filter needs to be removed */ + u8 add : 1; /* filter needs to be added */ + u8 is_primary : 1; /* filter is a default VF MAC */ + u8 add_handled : 1; /* received response from PF for filter add */ + u8 padding : 3; + }; +}; + +/* Driver state. The order of these is important! */ +enum ne6xvf_state_t { + __NE6XVF_STARTUP, /* driver loaded, probe complete */ + __NE6XVF_REMOVE, /* driver is being unloaded */ + __NE6XVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */ + __NE6XVF_INIT_EXTENDED_CAPS, /* process extended caps which require aq msg exchange */ + __NE6XVF_INIT_CONFIG_ADAPTER, + __NE6XVF_INIT_SW, /* got resources, setting up structs */ + __NE6XVF_INIT_FAILED, /* init failed, restarting procedure */ + __NE6XVF_RESETTING, /* in reset */ + __NE6XVF_COMM_FAILED, /* communication with PF failed */ + /* Below here, watchdog is running */ + __NE6XVF_DOWN, /* ready, can be opened */ + __NE6XVF_DOWN_PENDING, /* descending, waiting for watchdog */ + __NE6XVF_TESTING, /* in ethtool self-test */ + __NE6XVF_RUNNING /* opened, working */ +}; + +struct ne6xvf_mac_info { + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + u8 san_addr[ETH_ALEN]; + u8 port_addr[ETH_ALEN]; + u16 max_fcoeq; +}; + +enum ne6xvf_bus_speed { + ne6xvf_bus_speed_unknown = 0, + ne6xvf_bus_speed_33 = 33, + ne6xvf_bus_speed_66 = 66, + ne6xvf_bus_speed_100 = 100, + ne6xvf_bus_speed_120 = 120, + ne6xvf_bus_speed_133 = 133, + ne6xvf_bus_speed_2500 = 2500, + ne6xvf_bus_speed_5000 = 5000, + ne6xvf_bus_speed_8000 = 8000, + ne6xvf_bus_speed_reserved +}; + +enum ne6xvf_bus_width { + ne6xvf_bus_width_unknown = 0, + ne6xvf_bus_width_pcie_x1 = 1, + ne6xvf_bus_width_pcie_x2 = 2, + ne6xvf_bus_width_pcie_x4 = 4, + ne6xvf_bus_width_pcie_x8 = 8, + ne6xvf_bus_width_32 = 32, + ne6xvf_bus_width_64 = 64, + ne6xvf_bus_width_reserved +}; + +enum ne6xvf_bus_type { + ne6xvf_bus_type_unknown = 0, + ne6xvf_bus_type_pci, + ne6xvf_bus_type_pcix, + ne6xvf_bus_type_pci_express, + ne6xvf_bus_type_reserved +}; + +struct ne6xvf_bus_info { + enum ne6xvf_bus_speed speed; + enum ne6xvf_bus_width width; + enum ne6xvf_bus_type type; + + u16 func; + u16 device; + u16 lan_id; + u16 bus_id; +}; + +struct ne6xvf_hw_capabilities { + u32 num_vsis; + u32 num_rx_qp; + u32 num_tx_qp; + u32 base_queue; + u32 num_msix_vectors_vf; + u32 max_mtu; + u32 chip_id; + u32 mac_id; + u32 lport; + u32 vf_id; + u32 num_vf_per_pf; +}; + +struct ne6xvf_hw { + u8 __iomem *hw_addr0; + u8 __iomem *hw_addr2; + void *back; + + /* subsystem structs */ + struct ne6xvf_mac_info mac; + struct ne6xvf_bus_info bus; + + /* pci info */ + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + + /* capabilities for entire device and PCI func */ + struct ne6xvf_hw_capabilities dev_caps; + + struct ne6xvf_sdk_mbx_info mbx; + + /* debug mask */ + u32 debug_mask; + char err_str[16]; +}; + +struct ne6xvf_eth_stats { + u64 rx_bytes; /* gorc */ + u64 rx_unicast; /* uprc */ + u64 rx_multicast; /* mprc */ + u64 rx_broadcast; /* bprc */ + u64 rx_discards; /* rdpc */ + u64 rx_unknown_protocol; /* rupp */ + u64 tx_bytes; /* gotc */ + u64 tx_unicast; /* uptc */ + u64 tx_multicast; /* mptc */ + u64 tx_broadcast; /* bptc */ + u64 tx_discards; /* tdpc */ + u64 tx_errors; /* tepc */ +}; + +#define NE6XVF_FLAG_RX_CSUM_ENABLED BIT(0) +#define NE6XVF_FLAG_PF_COMMS_FAILED BIT(3) +#define NE6XVF_FLAG_RESET_PENDING BIT(4) +#define NE6XVF_FLAG_RESET_NEEDED BIT(5) +#define NE6XVF_FLAG_WB_ON_ITR_CAPABLE BIT(6) +#define NE6XVF_FLAG_PROMISC_ON BIT(13) +#define NE6XVF_FLAG_ALLMULTI_ON BIT(14) + +#define NE6XVF_FLAG_LEGACY_RX BIT(15) +#define NE6XVF_FLAG_REINIT_ITR_NEEDED BIT(16) +#define NE6XVF_FLAG_QUEUES_ENABLED BIT(17) +#define NE6XVF_FLAG_QUEUES_DISABLED BIT(18) +#define NE6XVF_FLAG_REINIT_MSIX_NEEDED BIT(20) +#define NE6XF_FLAG_REINIT_CHNL_NEEDED BIT(21) +#define NE6XF_FLAG_RESET_DETECTED BIT(22) +#define NE6XF_FLAG_INITIAL_MAC_SET BIT(23) + +#define NE6XVF_FLAG_AQ_ENABLE_QUEUES BIT_ULL(0) +#define NE6XVF_FLAG_AQ_ADD_MAC_FILTER BIT_ULL(2) +#define NE6XVF_FLAG_AQ_ADD_VLAN_FILTER BIT_ULL(3) +#define NE6XVF_FLAG_AQ_DEL_MAC_FILTER BIT_ULL(4) +#define NE6XVF_FLAG_AQ_DEL_VLAN_FILTER BIT_ULL(5) +#define NE6XVF_FLAG_AQ_CONFIGURE_QUEUES BIT_ULL(6) +#define NE6XVF_FLAG_AQ_MAP_VECTORS BIT_ULL(7) +#define NE6XVF_FLAG_AQ_HANDLE_RESET BIT_ULL(8) +#define NE6XVF_FLAG_AQ_CONFIGURE_RSS BIT_ULL(9) /* direct AQ config */ +#define NE6XVF_FLAG_AQ_GET_CONFIG BIT_ULL(10) +/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */ +#define NE6XVF_FLAG_AQ_GET_HENA BIT_ULL(11) +#define NE6XVF_FLAG_AQ_SET_HENA BIT_ULL(12) +#define NE6XVF_FLAG_AQ_SET_RSS_KEY BIT_ULL(13) +#define NE6XVF_FLAG_AQ_SET_RSS_LUT BIT_ULL(14) +#define NE6XVF_FLAG_AQ_REQUEST_PROMISC BIT_ULL(15) +#define NE6XVF_FLAG_AQ_RELEASE_PROMISC BIT_ULL(16) +#define NE6XVF_FLAG_AQ_REQUEST_ALLMULTI BIT_ULL(17) +#define NE6XVF_FLAG_AQ_RELEASE_ALLMULTI BIT_ULL(18) + +#define NE6XVF_FLAG_AQ_CONFIGURE_HW_OFFLOAD BIT_ULL(38) +#define NE6XVF_FLAG_AQ_GET_FEATURE BIT_ULL(39) +#define NE6XVF_FLAG_AQ_GET_PORT_LINK_STATUS BIT_ULL(40) +#define NE6XVF_FLAG_AQ_SET_VF_MAC BIT_ULL(41) +#define NE6XVF_FLAG_AQ_CHANGED_RSS BIT_ULL(42) + +struct ne6xvf_adapter { + struct ne6x_adapt_comm comm; + struct work_struct sdk_task; + struct delayed_work watchdog_task; + wait_queue_head_t down_waitqueue; + wait_queue_head_t vc_waitqueue; + struct ne6x_q_vector *q_vectors; + struct list_head vlan_filter_list; + struct list_head mac_filter_list; + struct list_head macvlan_list; + /* Lock to protect accesses to MAC and VLAN lists */ + spinlock_t mac_vlan_list_lock; + char misc_vector_name[IFNAMSIZ + 9]; + u16 max_queues; + u16 num_active_queues; + u16 num_req_queues; + u32 hw_feature; + struct ne6x_ring *tg_rings; /* TG */ + struct ne6x_ring *cq_rings; /* CQ */ + u32 cq_desc_count; + + /* TX */ + struct ne6x_ring *tx_rings; + u32 tx_timeout_count; + u32 tx_desc_count; + + /* RX */ + struct ne6x_ring *rx_rings; + u64 hw_csum_rx_error; + u32 rx_desc_count; + int num_msix_vectors; + struct msix_entry *msix_entries; + + u32 flags; + + /* duplicates for common code */ +#define NE6XVF_FLAG_DCB_ENABLED 0 + + /* flags for admin queue service task */ + u64 aq_required; + + /* Lock to prevent possible clobbering of + * current_netdev_promisc_flags + */ + spinlock_t current_netdev_promisc_flags_lock; + + netdev_features_t current_netdev_promisc_flags; + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + struct net_device_stats net_stats; + + struct ne6xvf_hw hw; /* defined in ne6xvf.h */ + + enum ne6xvf_state_t state; + enum ne6xvf_state_t last_state; + unsigned long crit_section; + + bool netdev_registered; + bool link_up; + enum ne6x_sdk_link_speed link_speed; + enum virtchnl_ops current_op; + struct virtchnl_vf_resource *vf_res; + struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */ + + struct ne6xvf_eth_stats current_stats; + //struct ne6xvf_vsi vsi; + u16 msg_enable; + struct ne6x_rss_info rss_info; + u8 trusted; + +#ifdef CONFIG_DEBUG_FS + struct dentry *ne6xvf_dbg_pf; +#endif /* CONFIG_DEBUG_FS */ +}; + +#ifdef CONFIG_DEBUG_FS +#define NCE_DEBUG_CHAR_LEN 1024 + +struct ne6xvf_dbg_cmd_wr { + char command[NCE_DEBUG_CHAR_LEN]; + void (*command_proc)(struct ne6xvf_adapter *pf); +}; + +void ne6xvf_dbg_pf_init(struct ne6xvf_adapter *pf); +void ne6xvf_dbg_pf_exit(struct ne6xvf_adapter *pf); +void ne6xvf_dbg_init(void); +void ne6xvf_dbg_exit(void); +#else +static inline void ne6xvf_dbg_pf_init(struct ne6xvf_adapter *pf) { } +static inline void ne6xvf_dbg_pf_exit(struct ne6xvf_adapter *pf) { } +static inline void ne6xvf_dbg_init(void) { } +static inline void ne6xvf_dbg_exit(void) { } +#endif /* CONFIG_DEBUG_FS */ + +/* Error Codes */ +enum ne6xvf_status { + NE6XVF_SUCCESS = 0, + NE6XVF_ERR_NVM = -1, + NE6XVF_ERR_NVM_CHECKSUM = -2, + NE6XVF_ERR_PHY = -3, + NE6XVF_ERR_CONFIG = -4, + NE6XVF_ERR_PARAM = -5, + NE6XVF_ERR_MAC_TYPE = -6, + NE6XVF_ERR_UNKNOWN_PHY = -7, + NE6XVF_ERR_LINK_SETUP = -8, + NE6XVF_ERR_ADAPTER_STOPPED = -9, + NE6XVF_ERR_INVALID_MAC_ADDR = -10, + NE6XVF_ERR_DEVICE_NOT_SUPPORTED = -11, + NE6XVF_ERR_MASTER_REQUESTS_PENDING = -12, + NE6XVF_ERR_INVALID_LINK_SETTINGS = -13, + NE6XVF_ERR_AUTONEG_NOT_COMPLETE = -14, + NE6XVF_ERR_RESET_FAILED = -15, + NE6XVF_ERR_SWFW_SYNC = -16, + NE6XVF_ERR_NO_AVAILABLE_VSI = -17, + NE6XVF_ERR_NO_MEMORY = -18, + NE6XVF_ERR_BAD_PTR = -19, + NE6XVF_ERR_RING_FULL = -20, + NE6XVF_ERR_INVALID_PD_ID = -21, + NE6XVF_ERR_INVALID_QP_ID = -22, + NE6XVF_ERR_INVALID_CQ_ID = -23, + NE6XVF_ERR_INVALID_CEQ_ID = -24, + NE6XVF_ERR_INVALID_AEQ_ID = -25, + NE6XVF_ERR_INVALID_SIZE = -26, + NE6XVF_ERR_INVALID_ARP_INDEX = -27, + NE6XVF_ERR_INVALID_FPM_FUNC_ID = -28, + NE6XVF_ERR_QP_INVALID_MSG_SIZE = -29, + NE6XVF_ERR_QP_TOOMANY_WRS_POSTED = -30, + NE6XVF_ERR_INVALID_FRAG_COUNT = -31, + NE6XVF_ERR_QUEUE_EMPTY = -32, + NE6XVF_ERR_INVALID_ALIGNMENT = -33, + NE6XVF_ERR_FLUSHED_QUEUE = -34, + NE6XVF_ERR_INVALID_PUSH_PAGE_INDEX = -35, + NE6XVF_ERR_INVALID_IMM_DATA_SIZE = -36, + NE6XVF_ERR_TIMEOUT = -37, + NE6XVF_ERR_OPCODE_MISMATCH = -38, + NE6XVF_ERR_CQP_COMPL_ERROR = -39, + NE6XVF_ERR_INVALID_VF_ID = -40, + NE6XVF_ERR_INVALID_HMCFN_ID = -41, + NE6XVF_ERR_BACKING_PAGE_ERROR = -42, + NE6XVF_ERR_NO_PBLCHUNKS_AVAILABLE = -43, + NE6XVF_ERR_INVALID_PBLE_INDEX = -44, + NE6XVF_ERR_INVALID_SD_INDEX = -45, + NE6XVF_ERR_INVALID_PAGE_DESC_INDEX = -46, + NE6XVF_ERR_INVALID_SD_TYPE = -47, + NE6XVF_ERR_MEMCPY_FAILED = -48, + NE6XVF_ERR_INVALID_HMC_OBJ_INDEX = -49, + NE6XVF_ERR_INVALID_HMC_OBJ_COUNT = -50, + NE6XVF_ERR_INVALID_SRQ_ARM_LIMIT = -51, + NE6XVF_ERR_SRQ_ENABLED = -52, + NE6XVF_ERR_ADMIN_QUEUE_ERROR = -53, + NE6XVF_ERR_ADMIN_QUEUE_TIMEOUT = -54, + NE6XVF_ERR_BUF_TOO_SHORT = -55, + NE6XVF_ERR_ADMIN_QUEUE_FULL = -56, + NE6XVF_ERR_ADMIN_QUEUE_NO_WORK = -57, + NE6XVF_ERR_BAD_IWARP_CQE = -58, + NE6XVF_ERR_NVM_BLANK_MODE = -59, + NE6XVF_ERR_NOT_IMPLEMENTED = -60, + NE6XVF_ERR_PE_DOORBELL_NOT_ENABLED = -61, + NE6XVF_ERR_DIAG_TEST_FAILED = -62, + NE6XVF_ERR_NOT_READY = -63, + NE6XVF_NOT_SUPPORTED = -64, + NE6XVF_ERR_FIRMWARE_API_VERSION = -65, + NE6XVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66, +}; + +static inline const char *ne6xvf_state_str(enum ne6xvf_state_t state) +{ + switch (state) { + case __NE6XVF_STARTUP: + return "__NE6XVF_STARTUP"; + case __NE6XVF_REMOVE: + return "__NE6XVF_REMOVE"; + case __NE6XVF_INIT_GET_RESOURCES: + return "__NE6XVF_INIT_GET_RESOURCES"; + case __NE6XVF_INIT_EXTENDED_CAPS: + return "__NE6XVF_INIT_EXTENDED_CAPS"; + case __NE6XVF_INIT_CONFIG_ADAPTER: + return "__NE6XVF_INIT_CONFIG_ADAPTER"; + case __NE6XVF_INIT_SW: + return "__NE6XVF_INIT_SW"; + case __NE6XVF_INIT_FAILED: + return "__NE6XVF_INIT_FAILED"; + case __NE6XVF_RESETTING: + return "__NE6XVF_RESETTING"; + case __NE6XVF_COMM_FAILED: + return "__NE6XVF_COMM_FAILED"; + case __NE6XVF_DOWN: + return "__NE6XVF_DOWN"; + case __NE6XVF_DOWN_PENDING: + return "__NE6XVF_DOWN_PENDING"; + case __NE6XVF_TESTING: + return "__NE6XVF_TESTING"; + case __NE6XVF_RUNNING: + return "__NE6XVF_RUNNING"; + default: + return "__NE6XVF_UNKNOWN_STATE"; + } +} + +static inline void ne6xvf_change_state(struct ne6xvf_adapter *adapter, enum ne6xvf_state_t state) +{ + if (adapter->state != state) { + adapter->last_state = adapter->state; + adapter->state = state; + } +} + +static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) +{ + return &pdev->dev; +} + +int ne6xvf_send_api_ver(struct ne6xvf_adapter *adapter); +int ne6xvf_send_vf_config_msg(struct ne6xvf_adapter *adapter, bool b_init); +int ne6xvf_send_vf_offload_msg(struct ne6xvf_adapter *adapter); +int ne6xvf_send_vf_feature_msg(struct ne6xvf_adapter *adapter); +int ne6xvf_get_vf_config(struct ne6xvf_adapter *adapter); +int ne6xvf_request_reset(struct ne6xvf_adapter *adapter); +void ne6xvf_free_all_tg_resources(struct ne6xvf_adapter *adapter); +void ne6xvf_free_all_cq_resources(struct ne6xvf_adapter *adapter); +void ne6xvf_free_all_tx_resources(struct ne6xvf_adapter *adapter); +void ne6xvf_free_all_rx_resources(struct ne6xvf_adapter *adapter); +void ne6xvf_reset_interrupt_capability(struct ne6xvf_adapter *adapter); +bool ne6x_alloc_rx_buffers(struct ne6x_ring *rx_ring, u16 cleaned_count); +void ne6xvf_set_ethtool_ops(struct net_device *netdev); +void ne6xvf_request_stats(struct ne6xvf_adapter *adapter); +void ne6xvf_irq_enable(struct ne6xvf_adapter *adapter, bool flush); +int ne6xvf_get_vf_feature(struct ne6xvf_adapter *adapter); +enum ne6xvf_status ne6xvf_clean_arq_element(struct ne6xvf_hw *hw, struct ne6xvf_arq_event_info *e, + u16 *pending); +void ne6xvf_virtchnl_completion(struct ne6xvf_adapter *adapter, enum virtchnl_ops v_opcode, + enum ne6xvf_status v_retval, u8 *msg, u16 msglen); +int ne6xvf_get_vf_feature(struct ne6xvf_adapter *adapter); +int ne6xvf_request_feature(struct ne6xvf_adapter *adapter); +int ne6xvf_config_default_vlan(struct ne6xvf_adapter *adapter); +void ne6xvf_config_rss_info(struct ne6xvf_adapter *adapter); +void ne6xvf_changed_rss(struct ne6xvf_adapter *adapter); + +void ne6xvf_add_vlans(struct ne6xvf_adapter *adapter); +void ne6xvf_del_vlans(struct ne6xvf_adapter *adapter); +void ne6xvf_schedule_reset(struct ne6xvf_adapter *adapter); +int ne6xvf_parse_vf_resource_msg(struct ne6xvf_adapter *adapter); +int ne6xvf_request_queues(struct ne6xvf_adapter *adapter, int num); +void ne6xvf_add_ether_addrs(struct ne6xvf_adapter *adapter); +void ne6xvf_del_ether_addrs(struct ne6xvf_adapter *adapter); +void ne6xvf_set_promiscuous(struct ne6xvf_adapter *adapter); +int ne6xvf_poll_virtchnl_msg(struct ne6xvf_adapter *adapter, struct ne6xvf_arq_event_info *event, + enum virtchnl_ops op_to_poll); +int ne6xvf_enable_queues(struct ne6xvf_adapter *adapter); +void ne6xvf_update_pf_stats(struct ne6xvf_adapter *adapter); +int ne6xvf_send_pf_msg(struct ne6xvf_adapter *adapter, enum virtchnl_ops op, u8 *msg, u16 len); +void ne6xvf_vchanel_get_port_link_status(struct ne6xvf_adapter *adapter); +void ne6xvf_set_vf_addr(struct ne6xvf_adapter *adapter); +int ne6xvf_close(struct net_device *netdev); +int ne6xvf_open(struct net_device *netdev); +void ne6xvf_fill_rss_lut(struct ne6xvf_adapter *adapter); +void ne6xvf_tail_update(struct ne6x_ring *ring, int val); +int ne6xvf_register_netdev(struct ne6xvf_adapter *adapter); + +#endif /* _NE6XVF_H */ diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_debugfs.c b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_debugfs.c new file mode 100644 index 000000000000..1a5851788ff6 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_debugfs.c @@ -0,0 +1,305 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include +#include + +#include "ne6xvf.h" + +static struct dentry *ne6xvf_dbg_root; + +void ne6xvf_showqueue(struct ne6xvf_adapter *pf) +{ + struct ne6x_ring *ring; + u64 head, tail, oft; + int i; + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + for (i = 0; i < pf->num_active_queues; i++) { + ring = &pf->rx_rings[i]; + head = rd64(&pf->hw, NE6XVF_REG_ADDR(i, NE6X_RQ_HD_POINTER)); + tail = rd64(&pf->hw, NE6XVF_REG_ADDR(i, NE6X_RQ_TAIL_POINTER)); + oft = rd64(&pf->hw, NE6XVF_REG_ADDR(i, NE6X_RQ_OFST)); + dev_info(&pf->pdev->dev, "----RX: Queue[%d]: H[0x%04llx], T[0x%04llx], RQ[0x%04llx], idle:%04d, alloc:%04d, use:%04d, clean:%04d\n", + i, + head, + tail, + oft, + NE6X_DESC_UNUSED(ring), + ring->next_to_alloc, + ring->next_to_use, + ring->next_to_clean); + } + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + for (i = 0; i < pf->num_active_queues; i++) { + ring = &pf->tx_rings[i]; + head = rd64(&pf->hw, NE6XVF_REG_ADDR(i, NE6X_SQ_HD_POINTER)); + tail = rd64(&pf->hw, NE6XVF_REG_ADDR(i, NE6X_SQ_TAIL_POINTER)); + oft = rd64(&pf->hw, NE6XVF_REG_ADDR(i, NE6X_SQ_OFST)); + dev_info(&pf->pdev->dev, "----TX: Queue[%d]: H[0x%04llx], T[0x%04llx], SQ[0x%04llx], idle:%04d, use:%04d, clean:%04d\n", + i, + head, + tail, + oft, + NE6X_DESC_UNUSED(ring), + ring->next_to_use, + ring->next_to_clean); + } + + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); + for (i = 0; i < pf->num_active_queues; i++) { + ring = &pf->cq_rings[i]; + head = rd64(&pf->hw, NE6XVF_REG_ADDR(i, NE6X_CQ_HD_POINTER)); + tail = rd64(&pf->hw, NE6XVF_REG_ADDR(i, NE6X_CQ_TAIL_POINTER)); + dev_info(&pf->pdev->dev, "----CQ: Queue[%d]: H[0x%04llx], T[0x%04llx], idle:%04d, use:%04d, clean:%04d\n", + i, + head, + tail, + NE6X_DESC_UNUSED(ring), + ring->next_to_use, + ring->next_to_clean); + } + dev_info(&pf->pdev->dev, "--------------------------------------------------------------------------------------------"); +} + +void ne6xvf_showring(struct ne6xvf_adapter *pf) +{ + struct ne6x_tx_desc *tx_desc; + struct ne6x_cq_desc *cq_desc; + union ne6x_rx_desc *rx_desc; + struct ne6x_ring *ring; + int j, k; + + for (j = 0; j < pf->num_active_queues; j++) { + ring = &pf->rx_rings[j]; + + for (k = 0; k < ring->count; k++) { + rx_desc = NE6X_RX_DESC(ring, k); + if (!rx_desc->wb.u.val) + /* empty descriptor, skip */ + continue; + + dev_info(&pf->pdev->dev, "**** rx_desc[%d], vp[%d], m_len[%d], s_len[%d], s_addr[0x%llx], m_addr[0x%llx], flag[0x%x], vp[%d], pkt_len[%d]\n", + k, + rx_desc->w.vp, + rx_desc->w.mop_mem_len, + rx_desc->w.sop_mem_len, + rx_desc->w.buffer_sop_addr, + rx_desc->w.buffer_mop_addr, + rx_desc->wb.u.val, + rx_desc->wb.vp, + rx_desc->wb.pkt_len); + } + } + + for (j = 0; j < pf->num_active_queues; j++) { + ring = &pf->tx_rings[j]; + + for (k = 0; k < ring->count; k++) { + tx_desc = NE6X_TX_DESC(ring, k); + if (!tx_desc->buffer_sop_addr) + /* empty descriptor, skp */ + continue; + + dev_info(&pf->pdev->dev, "**** tx_desc[%d], flag[0x%x], vp[%d], et[%d], ch[%d], tt[%d],sopv[%d],eopv[%d],tso[%d],l3chk[%d],l3oft[%d],l4chk[%d],l4oft[%d],pld[%d],mop[%d],sop[%d],mss[%d],mopa[%lld],sopa[%lld]\n", + k, + tx_desc->u.val, + tx_desc->vp, + tx_desc->event_trigger, + tx_desc->chain, + tx_desc->transmit_type, + tx_desc->sop_valid, + tx_desc->eop_valid, + tx_desc->tso, + tx_desc->l3_csum, + tx_desc->l3_ofst, + tx_desc->l4_csum, + tx_desc->l4_ofst, + tx_desc->pld_ofst, + tx_desc->mop_cnt, + tx_desc->sop_cnt, + tx_desc->mss, + tx_desc->buffer_mop_addr, + tx_desc->buffer_sop_addr); + } + } + + for (j = 0; j < pf->num_active_queues; j++) { + ring = &pf->cq_rings[j]; + + for (k = 0; k < ring->count; k++) { + cq_desc = NE6X_CQ_DESC(ring, k); + if (!cq_desc->num) + /* empty descriptor, skip */ + continue; + + dev_info(&pf->pdev->dev, "**** cq_desc[%d], vp[%d], ctype[%d], num[%d]\n", + k, + ring->reg_idx, + cq_desc->ctype, + cq_desc->num); + } + } +} + +const struct ne6xvf_dbg_cmd_wr deg_vf_cmd_wr[] = { + {"queue", ne6xvf_showqueue}, + {"ring", ne6xvf_showring}, +}; + +/** + * nce_dbg_command_read - read for command datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t ne6xvf_dbg_command_read(struct file *filp, char __user *buffer, size_t count, + loff_t *ppos) +{ + return 0; +} + +/** + * ne6xvf_dbg_command_write - write into command datum + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + **/ +static ssize_t ne6xvf_dbg_command_write(struct file *filp, const char __user *buffer, size_t count, + loff_t *ppos) +{ + struct ne6xvf_adapter *pf = filp->private_data; + char *cmd_buf, *cmd_buf_tmp; + int bytes_not_copied; + int i, cnt; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + + /* don't cross maximal possible value */ + if (count >= NCE_DEBUG_CHAR_LEN) + return -ENOSPC; + + cmd_buf = kzalloc(count + 1, GFP_KERNEL); + if (!cmd_buf) + return count; + + bytes_not_copied = copy_from_user(cmd_buf, buffer, count); + if (bytes_not_copied) { + kfree(cmd_buf); + return -EFAULT; + } + cmd_buf[count] = '\0'; + + cmd_buf_tmp = strchr(cmd_buf, '\n'); + if (cmd_buf_tmp) { + *cmd_buf_tmp = '\0'; + count = cmd_buf_tmp - cmd_buf + 1; + } + + if (strncmp(cmd_buf, "read", 4) == 0) { + u32 base_addr; + u32 offset_addr; + u64 value = 0; + + cnt = sscanf(&cmd_buf[4], "%i %i", &base_addr, &offset_addr); + if (cnt != 2) { + dev_warn(&pf->pdev->dev, "read \n"); + goto command_write_done; + } + dev_info(&pf->pdev->dev, "read: 0x%x 0x%x = 0x%llx\n", base_addr, offset_addr, + value); + } else if (strncmp(cmd_buf, "write", 5) == 0) { + u32 base_addr; + u32 offset_addr; + u64 value = 0; + + cnt = sscanf(&cmd_buf[5], "%i %i %lli ", &base_addr, &offset_addr, &value); + if (cnt != 3) { + dev_warn(&pf->pdev->dev, "write \n"); + goto command_write_done; + } + dev_info(&pf->pdev->dev, "write: 0x%x 0x%x = 0x%llx\n", base_addr, offset_addr, + value); + } else { + for (i = 0; i < ARRAY_SIZE(deg_vf_cmd_wr); i++) { + if (strncmp(cmd_buf, deg_vf_cmd_wr[i].command, count) == 0) { + deg_vf_cmd_wr[i].command_proc(pf); + goto command_write_done; + } + } + + dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf); + } + +command_write_done: + kfree(cmd_buf); + cmd_buf = NULL; + return count; +} + +static const struct file_operations ne6xvf_dbg_command_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ne6xvf_dbg_command_read, + .write = ne6xvf_dbg_command_write, +}; + +/** + * nce_dbg_pf_init - setup the debugfs directory for the PF + * @pf: the PF that is starting up + **/ +void ne6xvf_dbg_pf_init(struct ne6xvf_adapter *pf) +{ + const struct device *dev = &pf->pdev->dev; + const char *name = pci_name(pf->pdev); + struct dentry *pfile; + + pf->ne6xvf_dbg_pf = debugfs_create_dir(name, ne6xvf_dbg_root); + if (!pf->ne6xvf_dbg_pf) + return; + + pfile = debugfs_create_file("command", 0600, pf->ne6xvf_dbg_pf, pf, + &ne6xvf_dbg_command_fops); + if (!pfile) + goto create_failed; + + return; + +create_failed: + dev_info(dev, "debugfs dir/file for %s failed\n", name); + debugfs_remove_recursive(pf->ne6xvf_dbg_pf); +} + +/** + * nce_dbg_pf_exit - clear out the PF's debugfs entries + * @pf: the PF that is stopping + **/ +void ne6xvf_dbg_pf_exit(struct ne6xvf_adapter *pf) +{ + debugfs_remove_recursive(pf->ne6xvf_dbg_pf); + pf->ne6xvf_dbg_pf = NULL; +} + +/** + * nce_dbg_init - start up debugfs for the driver + **/ +void ne6xvf_dbg_init(void) +{ + ne6xvf_dbg_root = debugfs_create_dir(ne6xvf_driver_name, NULL); + if (!ne6xvf_dbg_root) + pr_info("init of debugfs failed\n"); +} + +/** + * nce_dbg_exit - clean out the driver's debugfs entries + **/ +void ne6xvf_dbg_exit(void) +{ + debugfs_remove_recursive(ne6xvf_dbg_root); + ne6xvf_dbg_root = NULL; +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_ethtool.c b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_ethtool.c new file mode 100644 index 000000000000..3fbab2d87066 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_ethtool.c @@ -0,0 +1,846 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "ne6xvf.h" +#include "ne6xvf_ethtool_stats.h" +#include "ne6xvf_txrx.h" + +static const char ne6xvf_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", + "Eeprom test (offline)", + "Interrupt test (offline)", + "Link test (on/offline)" +}; + +#define NE6XVF_TEST_LEN (sizeof(ne6xvf_gstrings_test) / ETH_GSTRING_LEN) + +static int ne6xvf_q_stats_len(struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + int stats_size, total_slen = 0; + + /* Tx stats */ + stats_size = sizeof(struct ne6x_q_stats) + sizeof(struct ne6x_txq_stats); + total_slen += adapter->num_active_queues * (stats_size / sizeof(u64)); + + /* Rx stats */ + stats_size = sizeof(struct ne6x_q_stats) + sizeof(struct ne6x_rxq_stats); + total_slen += adapter->num_active_queues * (stats_size / sizeof(u64)); + + /* CQ stats */ + stats_size = sizeof(struct ne6x_cq_stats); + total_slen += adapter->num_active_queues * (stats_size / sizeof(u64)); + + return total_slen; +} + +struct ne6xvf_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +/* Helper macro for defining some statistics directly copied from the netdev + * stats structure. + */ +#define NE6XVF_NETDEV_STAT(_net_stat) NE6XVF_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat) + +/* per-queue ring statistics */ +#define NE6XVF_QUEUE_STAT(_name, _stat) NE6XVF_STAT(struct ne6x_ring, _name, _stat) + +static const struct ne6xvf_stats ne6xvf_gstrings_tx_queue_stats[] = { + NE6XVF_QUEUE_STAT("tx_queue_%u_packets", stats.packets), + NE6XVF_QUEUE_STAT("tx_queue_%u_bytes", stats.bytes), + NE6XVF_QUEUE_STAT("tx_queue_%u_rst", tx_stats.restart_q), + NE6XVF_QUEUE_STAT("tx_queue_%u_busy", tx_stats.tx_busy), + NE6XVF_QUEUE_STAT("tx_queue_%u_line", tx_stats.tx_linearize), + NE6XVF_QUEUE_STAT("tx_queue_%u_csum_err", tx_stats.csum_err), + NE6XVF_QUEUE_STAT("tx_queue_%u_csum", tx_stats.csum_good), + NE6XVF_QUEUE_STAT("tx_queue_%u_pcie_read_err", tx_stats.tx_pcie_read_err), + NE6XVF_QUEUE_STAT("tx_queue_%u_ecc_err", tx_stats.tx_ecc_err), + NE6XVF_QUEUE_STAT("tx_queue_%u_drop_addr", tx_stats.tx_drop_addr), +}; + +static const struct ne6xvf_stats ne6xvf_gstrings_rx_queue_stats[] = { + NE6XVF_QUEUE_STAT("rx_queue_%u_packets", stats.packets), + NE6XVF_QUEUE_STAT("rx_queue_%u_bytes", stats.bytes), + NE6XVF_QUEUE_STAT("rx_queue_%u_no_eop", rx_stats.non_eop_descs), + NE6XVF_QUEUE_STAT("rx_queue_%u_alloc_pg_err", rx_stats.alloc_page_failed), + NE6XVF_QUEUE_STAT("rx_queue_%u_alloc_buf_err", rx_stats.alloc_buf_failed), + NE6XVF_QUEUE_STAT("rx_queue_%u_pg_reuse", rx_stats.page_reuse_count), + NE6XVF_QUEUE_STAT("rx_queue_%u_csum_err", rx_stats.csum_err), + NE6XVF_QUEUE_STAT("rx_queue_%u_csum", rx_stats.csum_good), + NE6XVF_QUEUE_STAT("rx_queue_%u_mem_err", rx_stats.rx_mem_error), + NE6XVF_QUEUE_STAT("rx_queue_%u_rx_err", rx_stats.rx_err), +}; + +static const struct ne6xvf_stats ne6xvf_gstrings_cq_queue_stats[] = { + NE6XVF_QUEUE_STAT("cx_queue_%u_nums", cq_stats.cq_num), + NE6XVF_QUEUE_STAT("cx_queue_%u_tx_nums", cq_stats.tx_num), + NE6XVF_QUEUE_STAT("cx_queue_%u_rx_nums", cq_stats.rx_num), +}; + +/* port mac statistics */ +#define NE6XVF_PORT_MAC_STAT(_name, _stat) NE6XVF_STAT(struct ne6xvf_vsi, _name, _stat) + +#define NE6XVF_ALL_STATS_LEN(n) (ne6xvf_q_stats_len(n)) + +#define ne6xvf_ethtool_advertise_link_mode(aq_link_speed, ethtool_link_mode) \ + ethtool_link_ksettings_add_link_mode(ks, advertising, ethtool_link_mode) + +static void ne6xvf_get_settings_link_up(struct ethtool_link_ksettings *ks, + struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + switch (adapter->link_speed) { + case NE6X_LINK_SPEED_100GB: + ks->base.speed = SPEED_100000; + break; + case NE6X_LINK_SPEED_40GB: + ks->base.speed = SPEED_40000; + break; + case NE6X_LINK_SPEED_25GB: + ks->base.speed = SPEED_25000; + break; + case NE6X_LINK_SPEED_10GB: + ks->base.speed = SPEED_10000; + break; + case NE6X_LINK_SPEED_200GB: + ks->base.speed = SPEED_200000; + break; + default: + netdev_info(netdev, "WARNING: Unrecognized link_speed (0x%x).\n", + adapter->link_speed); + break; + } + ks->base.duplex = DUPLEX_FULL; +} + +/** + * ne6xvf_get_settings_link_down - Get the Link settings when link is down + * @ks: ethtool ksettings to fill in + * @netdev: network interface device structure + * + * Reports link settings that can be determined when link is down + */ +static void ne6xvf_get_settings_link_down(struct ethtool_link_ksettings *ks, + struct net_device *netdev) +{ + ks->base.speed = SPEED_UNKNOWN; + ks->base.duplex = DUPLEX_UNKNOWN; +} + +/** + * ne6xvf_get_link_ksettings - Get Link Speed and Duplex settings + * @netdev: network interface device structure + * @ks: ethtool ksettings + * + * Reports speed/duplex settings based on media_type + */ +static int ne6xvf_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *ks) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + ethtool_link_ksettings_zero_link_mode(ks, supported); + ethtool_link_ksettings_zero_link_mode(ks, advertising); + ethtool_link_ksettings_zero_link_mode(ks, lp_advertising); + + ks->base.port = PORT_NONE; + if (adapter->link_up) { + /* Set flow control settings */ + ne6xvf_get_settings_link_up(ks, netdev); + } else { + ne6xvf_get_settings_link_down(ks, netdev); + } + + return 0; +} + +/** + * ne6xvf_set_link_ksettings - Set Speed and Duplex + * @netdev: network interface device structure + * @ks: ethtool ksettings + * + * Set speed/duplex per media_types advertised/forced + */ +static int ne6xvf_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ks) +{ + return -EOPNOTSUPP; +} + +static void __ne6xvf_add_stat_strings(u8 **p, const struct ne6xvf_stats stats[], + const unsigned int size, ...) +{ + unsigned int i; + + for (i = 0; i < size; i++) { + va_list args; + + va_start(args, size); + vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args); + *p += ETH_GSTRING_LEN; + va_end(args); + } +} + +#define ne6xvf_add_stat_strings(p, stats, ...) \ + __ne6xvf_add_stat_strings(p, stats, ARRAY_SIZE(stats), ##__VA_ARGS__) + +static void ne6xvf_get_stat_strings(struct net_device *netdev, u8 *data) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + unsigned int i; + + for (i = 0; i < adapter->num_active_queues; i++) { + ne6xvf_add_stat_strings(&data, ne6xvf_gstrings_tx_queue_stats, i); + ne6xvf_add_stat_strings(&data, ne6xvf_gstrings_rx_queue_stats, i); + ne6xvf_add_stat_strings(&data, ne6xvf_gstrings_cq_queue_stats, i); + } +} + +static void ne6xvf_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_STATS: + ne6xvf_get_stat_strings(netdev, data); + break; + case ETH_SS_TEST: + memcpy(data, ne6xvf_gstrings_test, NE6XVF_TEST_LEN * ETH_GSTRING_LEN); + default: + break; + } +} + +static int ne6xvf_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + /* The number (and order) of strings reported *must* remain + * constant for a given netdevice. This function must not + * report a different number based on run time parameters + * (such as the number of queues in use, or the setting of + * a private ethtool flag). This is due to the nature of the + * ethtool stats API. + * + * Userspace programs such as ethtool must make 3 separate + * ioctl requests, one for size, one for the strings, and + * finally one for the stats. Since these cross into + * userspace, changes to the number or size could result in + * undefined memory access or incorrect string<->value + * correlations for statistics. + * + * Even if it appears to be safe, changes to the size or + * order of strings will suffer from race conditions and are + * not safe. + */ + return NE6XVF_ALL_STATS_LEN(netdev); + case ETH_SS_TEST: + return NE6XVF_TEST_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void ne6xvf_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, + u64 *data) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + struct ne6x_ring *tx_ring; + struct ne6x_ring *rx_ring; + struct ne6x_ring *cq_ring; + unsigned int j; + int i = 0; + + ne6xvf_update_pf_stats(adapter); + + /* populate per queue stats */ + rcu_read_lock(); + for (j = 0; j < adapter->num_active_queues; j++) { + tx_ring = &adapter->tx_rings[j]; + if (tx_ring) { + data[i++] = tx_ring->stats.packets; + data[i++] = tx_ring->stats.bytes; + data[i++] = tx_ring->tx_stats.restart_q; + data[i++] = tx_ring->tx_stats.tx_busy; + data[i++] = tx_ring->tx_stats.tx_linearize; + data[i++] = tx_ring->tx_stats.csum_err; + data[i++] = tx_ring->tx_stats.csum_good; + data[i++] = tx_ring->tx_stats.tx_pcie_read_err; + data[i++] = tx_ring->tx_stats.tx_ecc_err; + data[i++] = tx_ring->tx_stats.tx_drop_addr; + } else { + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + } + + rx_ring = &adapter->rx_rings[j]; + if (rx_ring) { + data[i++] = rx_ring->stats.packets; + data[i++] = rx_ring->stats.bytes; + data[i++] = rx_ring->rx_stats.non_eop_descs; + data[i++] = rx_ring->rx_stats.alloc_page_failed; + data[i++] = rx_ring->rx_stats.alloc_buf_failed; + data[i++] = rx_ring->rx_stats.page_reuse_count; + data[i++] = rx_ring->rx_stats.csum_err; + data[i++] = rx_ring->rx_stats.csum_good; + data[i++] = rx_ring->rx_stats.rx_mem_error; + data[i++] = rx_ring->rx_stats.rx_err; + } else { + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + } + + cq_ring = &adapter->cq_rings[j]; + if (cq_ring) { + data[i++] = cq_ring->cq_stats.cq_num; + data[i++] = cq_ring->cq_stats.tx_num; + data[i++] = cq_ring->cq_stats.rx_num; + } else { + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; + } + } + rcu_read_unlock(); +} + +static void ne6xvf_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + strscpy(drvinfo->driver, ne6xvf_driver_name, sizeof(drvinfo->driver)); + strscpy(drvinfo->version, ne6xvf_driver_version, sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, "N/A", 4); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); +} + +static void ne6xvf_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) {} + +static void ne6xvf_self_test(struct net_device *dev, struct ethtool_test *eth_test, u64 *data) +{ + memset(data, 0, sizeof(*data) * NE6XVF_TEST_LEN); +} + +static int ne6xvf_get_regs_len(struct net_device *netdev) +{ + return 0; +} + +static void ne6xvf_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *ker, + struct netlink_ext_ack __always_unused *extack) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = NE6X_MAX_NUM_DESCRIPTORS; + ring->tx_max_pending = NE6X_MAX_NUM_DESCRIPTORS; + ring->rx_mini_max_pending = NE6X_MIN_NUM_DESCRIPTORS; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = adapter->rx_desc_count; + ring->tx_pending = adapter->tx_desc_count; + ring->rx_mini_pending = NE6X_MIN_NUM_DESCRIPTORS; + ring->rx_jumbo_pending = 0; +} + +static int ne6xvf_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam __always_unused *ker, + struct netlink_ext_ack __always_unused *extack) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + u32 new_rx_count, new_tx_count, new_cq_count; + int err; + + if (ring->tx_pending > NE6X_MAX_NUM_DESCRIPTORS || + ring->tx_pending < NE6X_MIN_NUM_DESCRIPTORS || + ring->rx_pending > NE6X_MAX_NUM_DESCRIPTORS || + ring->rx_pending < NE6X_MIN_NUM_DESCRIPTORS) { + netdev_info(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n", + ring->tx_pending, ring->rx_pending, NE6X_MIN_NUM_DESCRIPTORS, + NE6X_MAX_NUM_DESCRIPTORS); + return -EINVAL; + } + + new_tx_count = ALIGN(ring->tx_pending, NE6X_REQ_DESCRIPTOR_MULTIPLE); + new_rx_count = ALIGN(ring->rx_pending, NE6X_REQ_DESCRIPTOR_MULTIPLE); + new_cq_count = new_rx_count + new_rx_count; + + if (new_tx_count == adapter->tx_desc_count && new_rx_count == adapter->rx_desc_count) + return 0; + + if (!netif_running(adapter->netdev)) { + adapter->tx_desc_count = new_tx_count; + adapter->rx_desc_count = new_rx_count; + adapter->cq_desc_count = new_cq_count; + netdev_info(netdev, "Link is down, queue count change happens when link is brought up\n"); + return 0; + } + + err = ne6xvf_close(adapter->netdev); + if (err) { + netdev_err(netdev, "fail to close vf\n"); + return err; + } + netdev_info(netdev, "Descriptors change from (Tx: %d / Rx: %d) to [%d-%d]\n", + adapter->tx_rings[0].count, adapter->rx_rings[0].count, new_tx_count, + new_rx_count); + adapter->tx_desc_count = new_tx_count; + adapter->rx_desc_count = new_rx_count; + adapter->cq_desc_count = new_cq_count; + + err = ne6xvf_open(adapter->netdev); + if (err) { + netdev_err(netdev, "fail to open vf\n"); + return err; + } + + return 0; +} + +/** + * ne6xvf_get_pauseparam - Get Flow Control status + * @netdev: netdevice structure + * @pause: buffer to return pause parameters + * + * Return tx/rx-pause status + **/ +static void ne6xvf_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) +{ + pause->autoneg = 0; + pause->rx_pause = 0; + pause->tx_pause = 0; +} + +/** + * ne6xvf_get_coalesce - get a netdev's coalesce settings + * @netdev: the netdev to check + * @ec: ethtool coalesce data structure + * + **/ +static int ne6xvf_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + ec->tx_max_coalesced_frames_irq = 256; + ec->rx_max_coalesced_frames_irq = 256; + ec->use_adaptive_rx_coalesce = 0; + ec->use_adaptive_tx_coalesce = 0; + ec->rx_coalesce_usecs = 0; + ec->tx_coalesce_usecs = 0; + ec->rx_coalesce_usecs_high = 0; + ec->tx_coalesce_usecs_high = 0; + + return 0; +} + +static int ne6xvf_get_eeprom_len(struct net_device *netdev) +{ + return 0x64; +} + +static int ne6xvf_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) +{ + int blink_freq = 2; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return blink_freq; + case ETHTOOL_ID_ON: + break; + case ETHTOOL_ID_OFF: + break; + case ETHTOOL_ID_INACTIVE: + break; + default: + break; + } + + return 0; +} + +static int ne6xvf_nway_reset(struct net_device *netdev) +{ + return 0; +} + +static void ne6xvf_diag_test(struct net_device *netdev, struct ethtool_test *eth_test, u64 *data) +{ + data[NE6XVF_ETH_TEST_LINK] = 0; + + /* Offline only tests, not run in online; pass by default */ + data[NE6XVF_ETH_TEST_REG] = 0; + data[NE6XVF_ETH_TEST_EEPROM] = 0; + data[NE6XVF_ETH_TEST_INTR] = 0; +} + +#define L3_RSS_FLAGS (RXH_IP_DST | RXH_IP_SRC) +#define L4_RSS_FLAGS (RXH_L4_B_0_1 | RXH_L4_B_2_3) +static int ne6xvf_get_rss_hash_opts(struct ne6xvf_adapter *adapter, u64 flow_type) +{ + u64 data = 0; + + switch (flow_type) { + case TCP_V4_FLOW: + if (adapter->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV4) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adapter->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV4_TCP) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case UDP_V4_FLOW: + if (adapter->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV4) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adapter->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV4_UDP) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case TCP_V6_FLOW: + if (adapter->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV6) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adapter->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV6_TCP) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case UDP_V6_FLOW: + if (adapter->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV6) + data |= RXH_IP_DST | RXH_IP_SRC; + if (adapter->rss_info.hash_type & NE6X_RSS_HASH_TYPE_IPV6_UDP) + data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + /* Default is src/dest for IP, no matter the L4 hashing */ + data |= RXH_IP_SRC | RXH_IP_DST; + break; + } + + return data; +} + +static int ne6xvf_set_rss_hash_opts(struct ne6xvf_adapter *adapter, struct ethtool_rxnfc *cmd) +{ + u16 rss_flags = adapter->rss_info.hash_type; + + if (cmd->data != L3_RSS_FLAGS && cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS)) + return -EINVAL; + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~NE6X_RSS_HASH_TYPE_IPV4_TCP; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= NE6X_RSS_HASH_TYPE_IPV4 | NE6X_RSS_HASH_TYPE_IPV4_TCP; + break; + case TCP_V6_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~NE6X_RSS_HASH_TYPE_IPV6_TCP; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= NE6X_RSS_HASH_TYPE_IPV6 | NE6X_RSS_HASH_TYPE_IPV6_TCP; + break; + case UDP_V4_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~NE6X_RSS_HASH_TYPE_IPV4_UDP; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= NE6X_RSS_HASH_TYPE_IPV4 | NE6X_RSS_HASH_TYPE_IPV4_UDP; + break; + case UDP_V6_FLOW: + if (cmd->data == L3_RSS_FLAGS) + rss_flags &= ~NE6X_RSS_HASH_TYPE_IPV6_UDP; + else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) + rss_flags |= NE6X_RSS_HASH_TYPE_IPV6 | NE6X_RSS_HASH_TYPE_IPV6_UDP; + break; + default: + return -EINVAL; + } + + if (rss_flags == adapter->rss_info.hash_type) + return 0; + + adapter->rss_info.hash_type = rss_flags; + adapter->aq_required |= NE6XVF_FLAG_AQ_CONFIGURE_RSS; + + return 0; +} + +/** + * ne6xvf_set_rxnfc - command to set Rx flow rules. + * @netdev: network interface device structure + * @cmd: ethtool rxnfc command + * + * Returns 0 for success and negative values for errors + */ +static int ne6xvf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + int ret = -EOPNOTSUPP; + + switch (info->cmd) { + case ETHTOOL_SRXFH: + ret = ne6xvf_set_rss_hash_opts(adapter, info); + break; + default: + break; + } + + return ret; +} + +/** + * iavf_get_rxnfc - command to get RX flow classification rules + * @netdev: network interface device structure + * @cmd: ethtool rxnfc command + * @rule_locs: pointer to store rule locations + * + * Returns Success if the command is supported. + **/ +static int ne6xvf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = adapter->num_active_queues; + ret = 0; + break; + case ETHTOOL_GRXFH: + cmd->data = ne6xvf_get_rss_hash_opts(adapter, cmd->flow_type); + break; + default: + break; + } + + return 0; +} + +/** + * ne6xvf_get_rxfh_key_size - get the RSS hash key size + * @netdev: network interface device structure + * + * Returns the table size. + **/ +static u32 ne6xvf_get_rxfh_key_size(struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + return adapter->rss_info.hash_key_size; +} + +/** + * iavf_get_rxfh_indir_size - get the rx flow hash indirection table size + * @netdev: network interface device structure + * + * Returns the table size. + **/ +static u32 ne6xvf_get_rxfh_indir_size(struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + return adapter->rss_info.ind_table_size; +} + +/** + * ne6xvf_get_rxfh - get the rx flow hash indirection table + * @netdev: network interface device structure + * @indir: indirection table + * @key: hash key + * @hfunc: hash function in use + * + * Reads the indirection table directly from the hardware. Always returns 0. + **/ +static int ne6xvf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + u16 i; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (key) + memcpy(key, adapter->rss_info.hash_key, adapter->rss_info.hash_key_size); + + if (indir) { + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < adapter->rss_info.ind_table_size; i++) + indir[i] = (u32)adapter->rss_info.ind_table[i]; + } + + return 0; +} + +/** + * ne6xvf_set_rxfh - set the Rx flow hash indirection table + * @netdev: network interface device structure + * @indir: indirection table + * @key: hash key + * @hfunc: hash function + * + * Returns -EINVAL if the table specifies an invalid queue ID, otherwise + * returns 0 after programming the table. + */ +static int ne6xvf_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + int i; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (!key && !indir) + return 0; + + if (key) + memcpy(&adapter->rss_info.hash_key[0], key, adapter->rss_info.hash_key_size); + + if (indir) { + /* Each 32 bits pointed by 'indir' is stored with a lut entry */ + for (i = 0; i < adapter->rss_info.ind_table_size; i++) + adapter->rss_info.ind_table[i] = (u8)(indir[i]); + } + + adapter->aq_required |= NE6XVF_FLAG_AQ_CONFIGURE_RSS; + + return 0; +} + +/** + * iavf_get_channels: get the number of channels supported by the device + * @netdev: network interface device structure + * @ch: channel information structure + * + * For the purposes of our device, we only use combined channels, i.e. a tx/rx + * queue pair. Report one extra channel to match our "other" MSI-X vector. + **/ +static void ne6xvf_get_channels(struct net_device *netdev, struct ethtool_channels *channels) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + channels->max_rx = 0; + channels->max_tx = 0; + channels->max_other = 0; + channels->max_combined = adapter->max_queues; + channels->rx_count = 0; + channels->tx_count = 0; + channels->other_count = 0; + channels->combined_count = adapter->num_active_queues; +} + +/** + * ne6xvf_set_channels: set the new channel count + * @netdev: network interface device structure + * @ch: channel information structure + * + * Negotiate a new number of channels with the PF then do a reset. During + * reset we'll realloc queues and fix the RSS table. Returns 0 on success, + * negative on failure. + **/ +static int ne6xvf_set_channels(struct net_device *netdev, struct ethtool_channels *channels) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + int err = 0; + + if (!channels->combined_count || channels->rx_count || channels->tx_count || + channels->combined_count > adapter->vf_res->num_queue_pairs) + return -EINVAL; + + if (channels->rx_count == adapter->num_active_queues) { + /* nothing to do */ + netdev_info(netdev, "channel not change, nothing to do!\n"); + return 0; + } + + /* set for the next time the netdev is started */ + if (!netif_running(adapter->netdev)) { + adapter->num_active_queues = channels->combined_count; + + netif_set_real_num_rx_queues(adapter->netdev, adapter->num_active_queues); + netif_set_real_num_tx_queues(adapter->netdev, adapter->num_active_queues); + + ne6xvf_fill_rss_lut(adapter); + adapter->aq_required |= NE6XVF_FLAG_AQ_CHANGED_RSS; + + netdev_info(netdev, "Link is down, queue count change happens when link is brought up\n"); + + return 0; + } + + err = ne6xvf_close(adapter->netdev); + if (err) { + netdev_err(netdev, "fail to close vf\n"); + return err; + } + + adapter->num_active_queues = channels->combined_count; + + netif_set_real_num_rx_queues(adapter->netdev, adapter->num_active_queues); + netif_set_real_num_tx_queues(adapter->netdev, adapter->num_active_queues); + + ne6xvf_fill_rss_lut(adapter); + adapter->aq_required |= NE6XVF_FLAG_AQ_CHANGED_RSS; + + err = ne6xvf_open(adapter->netdev); + if (err) { + netdev_err(netdev, "fail to open vf\n"); + return err; + } + + return 0; +} + +static const struct ethtool_ops ne6xvf_ethtool_ops = { + .get_link_ksettings = ne6xvf_get_link_ksettings, + .set_link_ksettings = ne6xvf_set_link_ksettings, + .get_strings = ne6xvf_get_strings, + .get_sset_count = ne6xvf_get_sset_count, + .get_ethtool_stats = ne6xvf_get_ethtool_stats, + .get_drvinfo = ne6xvf_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_regs = ne6xvf_get_regs, + .get_regs_len = ne6xvf_get_regs_len, + .self_test = ne6xvf_self_test, + .get_ringparam = ne6xvf_get_ringparam, + .set_ringparam = ne6xvf_set_ringparam, + .get_pauseparam = ne6xvf_get_pauseparam, + .get_coalesce = ne6xvf_get_coalesce, + .get_eeprom_len = ne6xvf_get_eeprom_len, + .get_rxnfc = ne6xvf_get_rxnfc, + .set_rxnfc = ne6xvf_set_rxnfc, + .get_rxfh_key_size = ne6xvf_get_rxfh_key_size, + .get_rxfh_indir_size = ne6xvf_get_rxfh_indir_size, + .get_rxfh = ne6xvf_get_rxfh, + .set_rxfh = ne6xvf_set_rxfh, + .get_channels = ne6xvf_get_channels, + .set_channels = ne6xvf_set_channels, + .set_phys_id = ne6xvf_set_phys_id, + .nway_reset = ne6xvf_nway_reset, + .self_test = ne6xvf_diag_test, +}; + +void ne6xvf_set_ethtool_ops(struct net_device *dev) +{ + dev->ethtool_ops = &ne6xvf_ethtool_ops; +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_ethtool_stats.h b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_ethtool_stats.h new file mode 100644 index 000000000000..300a90b6af55 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_ethtool_stats.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6XVF_ETHTOOL_H +#define _NE6XVF_ETHTOOL_H + +#include "ne6xvf.h" + +#define NE6XVF_STAT(_type, _name, _stat) \ +{ \ + .stat_string = _name, \ + .sizeof_stat = sizeof_field(_type, _stat), \ + .stat_offset = offsetof(_type, _stat) \ +} + +enum ne6xvf_ethtool_test_id { + NE6XVF_ETH_TEST_REG = 0, + NE6XVF_ETH_TEST_EEPROM, + NE6XVF_ETH_TEST_INTR, + NE6XVF_ETH_TEST_LINK, +}; + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_main.c b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_main.c new file mode 100644 index 000000000000..d72af2d4e6bd --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_main.c @@ -0,0 +1,3310 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include +#include +#include + +#include "ne6xvf.h" +#include "ne6xvf_osdep.h" +#include "ne6xvf_virtchnl.h" +#include "ne6xvf_txrx.h" +#include "version.h" + +#define CREATE_TRACE_POINTS + +#define SUMMARY \ + "Chengdu BeiZhongWangXin Ethernet Connection N5/N6 Series Virtual Function Linux Driver" +#define COPYRIGHT "Copyright (c) 2020 - 2023 Chengdu BeiZhongWangXin Technology Co., Ltd." + +char ne6xvf_driver_name[] = "ncevf"; +static const char ne6xvf_driver_string[] = SUMMARY; + +const char ne6xvf_driver_version[] = VERSION; +static const char ne6xvf_copyright[] = COPYRIGHT; + +static const struct pci_device_id ne6xvf_pci_tbl[] = { + {PCI_VDEVICE(BZWX, 0x501a), 0}, + {PCI_VDEVICE(BZWX, 0x601a), 0}, + /* required last entry */ + {0,} +}; + +MODULE_DEVICE_TABLE(pci, ne6xvf_pci_tbl); + +MODULE_AUTHOR("Chengdu BeiZhongWangXin Technology Co., Ltd., "); +MODULE_DESCRIPTION(SUMMARY); +MODULE_LICENSE("GPL"); +MODULE_VERSION(VERSION); + +static const struct net_device_ops ne6xvf_netdev_ops; +struct workqueue_struct *ne6xvf_wq; +static void ne6xvf_sync_features(struct net_device *netdev); + +struct ne6xvf_adapter *ne6xvf_pdev_to_adapter(struct pci_dev *pdev) +{ + return netdev_priv(pci_get_drvdata(pdev)); +} + +void ne6xvf_schedule_reset(struct ne6xvf_adapter *adapter) +{ + adapter->flags |= NE6XVF_FLAG_RESET_NEEDED; + mod_delayed_work(ne6xvf_wq, &adapter->watchdog_task, 0); +} + +static void ne6xvf_tx_timeout(struct net_device *netdev, __always_unused unsigned int txqueue) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + adapter->tx_timeout_count++; + ne6xvf_schedule_reset(adapter); +} + +/** + * nce_get_vsi_stats_struct - Get System Network Statistics + * @vsi: the VSI we care about + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the service task. + **/ + +struct net_device_stats *nce_get_vsi_stats_struct(struct ne6xvf_adapter *adapter) +{ + if (adapter->netdev) + return &adapter->netdev->stats; + else + return &adapter->net_stats; +} + +/** + * nce_update_pf_stats - Update PF port stats counters + * @pf: PF whose stats needs to be updated + */ +void ne6xvf_update_pf_stats(struct ne6xvf_adapter *adapter) +{ + struct net_device_stats *ns; /* netdev stats */ + struct ne6x_ring *tx_ring; + struct ne6x_ring *rx_ring; + u64 bytes, packets; + u64 rx_p, rx_b; + u64 tx_p, tx_b; + u16 i; + + if (test_bit(NE6X_ADPT_DOWN, adapter->comm.state)) + return; + + ns = nce_get_vsi_stats_struct(adapter); + + rx_p = 0; + rx_b = 0; + tx_p = 0; + tx_b = 0; + + rcu_read_lock(); + for (i = 0; i < adapter->num_active_queues; i++) { + /* locate Tx ring */ + tx_ring = &adapter->tx_rings[i]; + + packets = tx_ring->stats.packets; + bytes = tx_ring->stats.bytes; + + tx_b += bytes; + tx_p += packets; + + rx_ring = &adapter->rx_rings[i]; + + packets = rx_ring->stats.packets; + bytes = rx_ring->stats.bytes; + rx_b += bytes; + rx_p += packets; + } + rcu_read_unlock(); + + ns->rx_packets = rx_p; + ns->rx_bytes = rx_b; + ns->tx_packets = tx_p; + ns->tx_bytes = tx_b; + + adapter->net_stats.rx_packets = rx_p; + adapter->net_stats.tx_packets = rx_b; + adapter->net_stats.rx_bytes = rx_b; + adapter->net_stats.tx_bytes = tx_b; +} + +bool ne6xvf_is_remove_in_progress(struct ne6xvf_adapter *adapter) +{ + return test_bit(__NE6XVF_IN_REMOVE_TASK, &adapter->crit_section); +} + +static void ne6xvf_sdk_task(struct work_struct *work) +{ + struct ne6xvf_adapter *adapter = container_of(work, struct ne6xvf_adapter, sdk_task); + struct ne6xvf_hw *hw = &adapter->hw; + struct ne6xvf_arq_event_info event; + enum ne6xvf_status ret, v_ret; + enum virtchnl_ops v_op; + u16 pending = 1u; + + if (ne6xvf_is_remove_in_progress(adapter)) + return; + + if (adapter->flags & NE6XVF_FLAG_PF_COMMS_FAILED) + goto out; + + event.buf_len = NE6XVF_MAX_AQ_BUF_SIZE; + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); + if (!event.msg_buf) + goto out; + + do { + ret = ne6xvf_clean_arq_element(hw, &event, &pending); + v_op = (enum virtchnl_ops)le32_to_cpu(event.snap.type); + v_ret = (enum ne6xvf_status)le32_to_cpu(event.snap.state); + + if (ret || !v_op) + break; /* No event to process or error cleaning ARQ */ + + while (test_and_set_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section)) + usleep_range(500, 1000); + + ne6xvf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf, event.msg_len); + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + if (pending != 0) + memset(event.msg_buf, 0, NE6XVF_MAX_AQ_BUF_SIZE); + } while (pending); + + if ((adapter->flags & (NE6XVF_FLAG_RESET_PENDING | NE6XVF_FLAG_RESET_NEEDED)) || + adapter->state == __NE6XVF_RESETTING) + goto freedom; + +freedom: + kfree(event.msg_buf); + +out: + return; +} + +static int ne6xvf_check_reset_complete(struct ne6xvf_hw *hw) +{ + u64 rstat; + int i; + + for (i = 0; i < NE6XVF_RESET_WAIT_COMPLETE_COUNT; i++) { + rstat = rd64(hw, NE6XVF_REG_ADDR(0, NE6X_VP_RELOAD)); + if (rstat) + return 0; + + usleep_range(10, 20); + } + + return 0; +} + +int ne6xvf_init_sdk_mbx(struct ne6xvf_hw *hw) +{ + union u_ne6x_mbx_snap_buffer_data mbx_buffer; + union u_ne6x_mbx_snap_buffer_data usnap; + u64 val; + + if (hw->mbx.init_flag) + return -1; + + hw->mbx.sq_data.state = NE6X_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; + hw->mbx.sq_data.type = VIRTCHNL_OP_UNKNOWN; + hw->mbx.init_flag = 0x1; + + val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(0, NE6X_VP_INT)); + if (val & 0x2) { + usnap.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(0, NE6XVF_PF_MAILBOX_DATA)); + mbx_buffer.snap.state = usnap.snap.state; + mbx_buffer.snap.type = usnap.snap.type; + + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6XVF_MAILBOX_DATA), mbx_buffer.val); + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6X_VP_INT), 0x2); + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6XVF_DB_STATE), 0x1); + } + + usleep_range(10, 20); + val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(0, NE6X_VP_INT)); + + if (val & 0x1) + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6X_VP_INT), 0x1); + + return 0; +} + +static void ne6xvf_startup(struct ne6xvf_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct ne6xvf_hw *hw = &adapter->hw; + int ret; + + WARN_ON(adapter->state != __NE6XVF_STARTUP); + + adapter->flags &= ~NE6XVF_FLAG_PF_COMMS_FAILED; + adapter->flags &= ~NE6XVF_FLAG_RESET_PENDING; + + ret = ne6xvf_check_reset_complete(hw); + if (ret) { + dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n", ret); + goto err; + } + + ret = ne6xvf_init_sdk_mbx(hw); + if (ret) { + dev_err(&pdev->dev, "Failed to init SDK (%d)\n", ret); + goto err; + } + + ne6xvf_change_state(adapter, __NE6XVF_INIT_GET_RESOURCES); + + return; + +err: + ne6xvf_change_state(adapter, __NE6XVF_INIT_FAILED); +} + +/** + * ne6xvf_parse_vf_resource_msg - parse response from VIRTCHNL_OP_GET_VF_RESOURCES + * @adapter: board private structure + */ +int ne6xvf_parse_vf_resource_msg(struct ne6xvf_adapter *adapter) +{ + int i, num_req_queues = adapter->num_req_queues; + + for (i = 0; i < adapter->vf_res->num_vsis; i++) { + if (adapter->vf_res->vsi_res[i].vsi_type == NE6XVF_VIRTCHNL_VSI_SRIOV) + adapter->vsi_res = &adapter->vf_res->vsi_res[i]; + } + + if (!adapter->vsi_res) { + dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); + return -ENODEV; + } + + if (num_req_queues && num_req_queues > adapter->vsi_res->num_queue_pairs) { + /* Problem. The PF gave us fewer queues than what we had + * negotiated in our request. Need a reset to see if we can't + * get back to a working state. + */ + dev_err(&adapter->pdev->dev, "Requested %d queues, but PF only gave us %d.\n", + num_req_queues, adapter->vsi_res->num_queue_pairs); + adapter->flags |= NE6XVF_FLAG_REINIT_MSIX_NEEDED; + adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; + ne6xvf_schedule_reset(adapter); + + return -EAGAIN; + } + adapter->num_req_queues = 0; + set_bit(NE6X_ADPT_DOWN, adapter->comm.state); + return 0; +} + +/** + * ne6xvf_init_get_resources - third step of driver startup + * @adapter: board private structure + * + * Function process __NE6XVF_INIT_GET_RESOURCES driver state and + * finishes driver initialization procedure. + * When success the state is changed to __NE6XVF_DOWN + * when fails the state is changed to __NE6XVF_INIT_FAILED + **/ +static int ne6xvf_init_get_resources(struct ne6xvf_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + int ret = 0; + + WARN_ON(adapter->state != __NE6XVF_INIT_GET_RESOURCES); + + if (!adapter->vf_res) { + adapter->vf_res = kzalloc(sizeof(*adapter->vf_res) + + sizeof(struct virtchnl_vsi_resource), + GFP_KERNEL); + if (!adapter->vf_res) + goto err; + } + + adapter->hw_feature = 0x00; + ret = ne6xvf_send_vf_config_msg(adapter, true); + if (ret) { + dev_err(&pdev->dev, "Unable to send config request (%d)\n", ret); + goto err; + } + + ret = ne6xvf_get_vf_config(adapter); + if (ret == NE6XVF_ERR_ADMIN_QUEUE_NO_WORK) { + ret = ne6xvf_send_vf_config_msg(adapter, true); + goto err_alloc; + } else if (ret == NE6XVF_ERR_PARAM) { + /* We only get ERR_PARAM if the device is in a very bad + * state or if we've been disabled for previous bad + * behavior. Either way, we're done now. + */ + dev_err(&pdev->dev, + "Unable to get VF config due to PF error condition, not retrying\n"); + return ret; + } + + if (ret) { + dev_err(&pdev->dev, "Unable to get VF config (%d)\n", ret); + goto err_alloc; + } + + ret = ne6xvf_parse_vf_resource_msg(adapter); + if (ret) { + dev_err(&pdev->dev, "Failed to parse VF resource message from PF (%d)\n", ret); + goto err_alloc; + } + + ne6xvf_change_state(adapter, __NE6XVF_INIT_EXTENDED_CAPS); + return ret; + +err_alloc: + kfree(adapter->vf_res); + adapter->vf_res = NULL; +err: + ne6xvf_change_state(adapter, __NE6XVF_INIT_FAILED); + + return ret; +} + +/** + * ne6xvf_napi_disable_all - disable NAPI on all queue vectors + * @adapter: board private structure + **/ +static void ne6xvf_napi_disable_all(struct ne6xvf_adapter *adapter) +{ + int q_vectors = adapter->num_msix_vectors; + struct ne6x_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < q_vectors; q_idx++) { + q_vector = &adapter->q_vectors[q_idx]; + napi_disable(&q_vector->napi); + } +} + +static void ne6xvf_free_queues(struct ne6xvf_adapter *adapter) +{ + if (!adapter->vsi_res) + return; + + adapter->num_active_queues = 0; + kfree(adapter->tg_rings); + adapter->tg_rings = NULL; + kfree(adapter->cq_rings); + adapter->cq_rings = NULL; + kfree(adapter->tx_rings); + adapter->tx_rings = NULL; + kfree(adapter->rx_rings); + adapter->rx_rings = NULL; +} + +/** + * ne6xvf_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + * We allocate one ring per queue at run-time since we don't know the + * number of queues at compile-time. The polling_netdev array is + * intended for Multiqueue, but should work fine with a single queue. + **/ +static int ne6xvf_alloc_queues(struct ne6xvf_adapter *adapter) +{ + int i, num_active_queues; + + /* If we're in reset reallocating queues we don't actually know yet for + * certain the PF gave us the number of queues we asked for but we'll + * assume it did. Once basic reset is finished we'll confirm once we + * start negotiating config with PF. + */ + if (adapter->num_req_queues) + num_active_queues = adapter->num_req_queues; + else + num_active_queues = min_t(int, adapter->vsi_res->num_queue_pairs, + (int)(num_online_cpus())); + + adapter->tg_rings = kcalloc(num_active_queues, sizeof(struct ne6x_ring), GFP_KERNEL); + adapter->cq_rings = kcalloc(num_active_queues, sizeof(struct ne6x_ring), GFP_KERNEL); + + adapter->tx_rings = kcalloc(num_active_queues, sizeof(struct ne6x_ring), GFP_KERNEL); + if (!adapter->tx_rings) + goto err_out; + + adapter->rx_rings = kcalloc(num_active_queues, sizeof(struct ne6x_ring), GFP_KERNEL); + if (!adapter->rx_rings) + goto err_out; + + for (i = 0; i < num_active_queues; i++) { + struct ne6x_ring *tg_ring; + struct ne6x_ring *cq_ring; + struct ne6x_ring *tx_ring; + struct ne6x_ring *rx_ring; + + tg_ring = &adapter->tg_rings[i]; + tg_ring->queue_index = i; + tg_ring->netdev = adapter->netdev; + tg_ring->dev = pci_dev_to_dev(adapter->pdev); + tg_ring->adpt = adapter; + tg_ring->count = adapter->tx_desc_count; + + cq_ring = &adapter->cq_rings[i]; + cq_ring->queue_index = i; + cq_ring->netdev = adapter->netdev; + cq_ring->dev = pci_dev_to_dev(adapter->pdev); + cq_ring->adpt = adapter; + cq_ring->count = adapter->cq_desc_count; + + tx_ring = &adapter->tx_rings[i]; + tx_ring->queue_index = i; + tx_ring->netdev = adapter->netdev; + tx_ring->dev = pci_dev_to_dev(adapter->pdev); + tx_ring->adpt = adapter; + tx_ring->count = adapter->tx_desc_count; + + rx_ring = &adapter->rx_rings[i]; + rx_ring->queue_index = i; + rx_ring->netdev = adapter->netdev; + rx_ring->dev = pci_dev_to_dev(adapter->pdev); + rx_ring->adpt = adapter; + rx_ring->count = adapter->rx_desc_count; + } + + adapter->max_queues = num_active_queues; + adapter->num_active_queues = adapter->max_queues; + + return 0; + +err_out: + ne6xvf_free_queues(adapter); + return -ENOMEM; +} + +static void ne6xvf_irq_disable(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_hw *hw = &adapter->hw; + int i; + + if (!adapter->msix_entries) + return; + + for (i = 0; i < adapter->num_msix_vectors; i++) { + wr64(hw, NE6XVF_REG_ADDR(i, NE6X_VP_INT_MASK), 0xffffffffffffffff); + synchronize_irq(adapter->msix_entries[i].vector); + } +} + +static void ne6xvf_free_traffic_irqs(struct ne6xvf_adapter *adapter) +{ + int vector, irq_num, q_vectors; + + if (!adapter->msix_entries) + return; + + q_vectors = adapter->num_active_queues; + + for (vector = 0; vector < q_vectors; vector++) { + irq_num = adapter->msix_entries[vector].vector; + irq_set_affinity_notifier(irq_num, NULL); + irq_set_affinity_hint(irq_num, NULL); + free_irq(irq_num, &adapter->q_vectors[vector]); + } +} + +static void ne6xvf_free_q_vectors(struct ne6xvf_adapter *adapter) +{ + int q_idx, num_q_vectors; + int napi_vectors; + + if (!adapter->q_vectors) + return; + + num_q_vectors = adapter->num_msix_vectors; + napi_vectors = adapter->num_active_queues; + + for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { + struct ne6x_q_vector *q_vector = &adapter->q_vectors[q_idx]; + + if (q_idx < napi_vectors) + netif_napi_del(&q_vector->napi); + } + + kfree(adapter->q_vectors); + adapter->q_vectors = NULL; +} + +/** + * ne6xvf_disable_vf - disable a VF that failed to reset + * @adapter: private adapter structure + * + * Helper function to shut down the VF when a reset never finishes. + **/ +static void ne6xvf_disable_vf(struct ne6xvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ne6xvf_vlan_filter *fv, *fvtmp; + struct ne6xvf_mac_filter *f, *ftmp; + + /* reset never finished */ + adapter->flags |= NE6XVF_FLAG_PF_COMMS_FAILED; + + /* We don't use netif_running() because it may be true prior to + * ndo_open() returning, so we can't assume it means all our open + * tasks have finished, since we're not holding the rtnl_lock here. + */ + if (!test_bit(NE6X_ADPT_DOWN, adapter->comm.state)) { + set_bit(NE6X_ADPT_DOWN, adapter->comm.state); + netif_carrier_off(netdev); + netif_tx_disable(netdev); + adapter->link_up = false; + ne6xvf_irq_disable(adapter); + ne6xvf_napi_disable_all(adapter); + ne6xvf_free_traffic_irqs(adapter); + ne6xvf_free_all_tg_resources(adapter); + ne6xvf_free_all_cq_resources(adapter); + ne6xvf_free_all_tx_resources(adapter); + ne6xvf_free_all_rx_resources(adapter); + } + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + /* Delete all of the filters */ + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { + list_del(&f->list); + kfree(f); + } + + list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) { + list_del(&fv->list); + kfree(fv); + } + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + ne6xvf_reset_interrupt_capability(adapter); + ne6xvf_free_q_vectors(adapter); + ne6xvf_free_queues(adapter); + memset(adapter->vf_res, 0, sizeof(struct virtchnl_vf_resource)); + adapter->netdev->flags &= ~IFF_UP; + adapter->flags &= ~NE6XVF_FLAG_RESET_PENDING; + ne6xvf_change_state(adapter, __NE6XVF_DOWN); + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + + dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); +} + +/** + * ne6xvf_acquire_msix_vectors - Setup the MSIX capability + * @adapter: board private structure + * @vectors: number of vectors to request + * + * Work with the OS to set up the MSIX vectors needed. + * + * Returns 0 on success, negative on failure + **/ +static int ne6xvf_acquire_msix_vectors(struct ne6xvf_adapter *adapter, int vectors) +{ + int v_actual; + + /* We'll want at least 3 (vector_threshold): + * 0) Other (Admin Queue and link, mostly) + * 1) TxQ[0] Cleanup + * 2) RxQ[0] Cleanup + * + * The more we get, the more we will assign to Tx/Rx Cleanup + * for the separate queues...where Rx Cleanup >= Tx Cleanup. + * Right now, we simply care about how many we'll get; we'll + * set them up later while requesting irq's. + */ + v_actual = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, 1, vectors); + if (v_actual != vectors) { + dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts: %d\n", v_actual); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + pci_disable_msi(adapter->pdev); + return v_actual; + } + + adapter->num_msix_vectors = v_actual; + + return 0; +} + +/** + * ne6xvf_set_interrupt_capability - set MSI-X or FAIL if not supported + * @adapter: board private structure to initialize + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + **/ +static int ne6xvf_set_interrupt_capability(struct ne6xvf_adapter *adapter) +{ + int vector, v_budget; + int err = 0; + + if (!adapter->vsi_res) + return -EIO; + + v_budget = adapter->num_active_queues; + adapter->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), GFP_KERNEL); + if (!adapter->msix_entries) { + err = -ENOMEM; + goto out; + } + + for (vector = 0; vector < v_budget; vector++) + adapter->msix_entries[vector].entry = vector; + + dev_info(&adapter->pdev->dev, "v_budget:%d, adapter->vf_res->max_vectors: %d\n", v_budget, + adapter->vf_res->max_vectors); + err = ne6xvf_acquire_msix_vectors(adapter, v_budget); +out: + netif_set_real_num_rx_queues(adapter->netdev, v_budget); + netif_set_real_num_tx_queues(adapter->netdev, v_budget); + + return err; +} + +/** + * ne6xvf_fill_rss_lut - Fill the lut with default values + * @adapter: board private structure + **/ +void ne6xvf_fill_rss_lut(struct ne6xvf_adapter *adapter) +{ + u16 i; + + for (i = 0; i < adapter->rss_info.ind_table_size; i++) + adapter->rss_info.ind_table[i] = i % adapter->num_active_queues; +} + +/** + * ne6xvf_init_rss - Prepare for RSS + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +static int ne6xvf_init_rss(struct ne6xvf_adapter *adapter) +{ + struct ne6x_rss_info *rss_info = &adapter->rss_info; + + /* begin rss info */ + rss_info->hash_type = NE6X_RSS_HASH_TYPE_IPV4_TCP | + NE6X_RSS_HASH_TYPE_IPV4_UDP | + NE6X_RSS_HASH_TYPE_IPV4 | + NE6X_RSS_HASH_TYPE_IPV6_TCP | + NE6X_RSS_HASH_TYPE_IPV6_UDP | + NE6X_RSS_HASH_TYPE_IPV6; + rss_info->hash_func = NE6X_RSS_HASH_FUNC_TOEPLITZ; + rss_info->hash_key_size = NE6X_RSS_MAX_KEY_SIZE; + rss_info->ind_table_size = NE6X_RSS_MAX_IND_TABLE_SIZE; + ne6xvf_fill_rss_lut(adapter); + netdev_rss_key_fill((void *)&adapter->rss_info.hash_key[0], + adapter->rss_info.hash_key_size); + adapter->aq_required |= NE6XVF_FLAG_AQ_CONFIGURE_RSS; + adapter->aq_required |= NE6XVF_FLAG_AQ_CHANGED_RSS; + + return 0; +} + +/** + * ne6xvf_alloc_q_vectors - Allocate memory for interrupt vectors + * @adapter: board private structure to initialize + * + * We allocate one q_vector per queue interrupt. If allocation fails we + * return -ENOMEM. + **/ +static int ne6xvf_alloc_q_vectors(struct ne6xvf_adapter *adapter) +{ + struct ne6x_q_vector *q_vector; + int q_idx, num_q_vectors; + + num_q_vectors = adapter->num_active_queues; + adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), GFP_KERNEL); + if (!adapter->q_vectors) + return -ENOMEM; + + for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { + q_vector = &adapter->q_vectors[q_idx]; + q_vector->adpt = adapter; + q_vector->v_idx = q_idx; + q_vector->reg_idx = q_idx; + cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); + netif_napi_add(adapter->netdev, &q_vector->napi, ne6xvf_napi_poll); + } + + return 0; +} + +/** + * ne6xvf_init_interrupt_scheme - Determine if MSIX is supported and init + * @adapter: board private structure to initialize + * + **/ +int ne6xvf_init_interrupt_scheme(struct ne6xvf_adapter *adapter) +{ + int err; + + err = ne6xvf_alloc_queues(adapter); + if (err) { + dev_err(&adapter->pdev->dev, "Unable to allocate memory for queues\n"); + goto err_alloc_queues; + } + + rtnl_lock(); + err = ne6xvf_set_interrupt_capability(adapter); + rtnl_unlock(); + if (err) { + dev_err(&adapter->pdev->dev, "Unable to setup interrupt capabilities\n"); + goto err_set_interrupt; + } + + err = ne6xvf_alloc_q_vectors(adapter); + if (err) { + dev_err(&adapter->pdev->dev, "Unable to allocate memory for queue vectors\n"); + goto err_alloc_q_vectors; + } + + dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u", + (adapter->num_active_queues > 1) ? "Enabled" : "Disabled", + adapter->num_active_queues); + + return 0; + +err_alloc_q_vectors: + ne6xvf_reset_interrupt_capability(adapter); +err_set_interrupt: + ne6xvf_free_queues(adapter); +err_alloc_queues: + return err; +} + +/** + * ne6xvf_map_vector_to_cq - associate irqs with complete queues + * @adapter: board private structure + * @v_idx: interrupt number + * @r_idx: queue number + **/ +static void ne6xvf_map_vector_to_cq(struct ne6xvf_adapter *adapter, int v_idx, int r_idx) +{ + struct ne6x_q_vector *q_vector = &adapter->q_vectors[v_idx]; + struct ne6x_ring *cq_ring = &adapter->cq_rings[r_idx]; + + cq_ring->q_vector = q_vector; + cq_ring->next = q_vector->cq.ring; + q_vector->cq.ring = cq_ring; + q_vector->cq.count++; +} + +/** + * ne6xvf_map_vector_to_rxq - associate irqs with rx queues + * @adapter: board private structure + * @v_idx: interrupt number + * @r_idx: queue number + **/ +static void ne6xvf_map_vector_to_rxq(struct ne6xvf_adapter *adapter, int v_idx, int r_idx) +{ + struct ne6x_q_vector *q_vector = &adapter->q_vectors[v_idx]; + struct ne6x_ring *rx_ring = &adapter->rx_rings[r_idx]; + + rx_ring->q_vector = q_vector; + rx_ring->next = q_vector->rx.ring; + q_vector->rx.ring = rx_ring; + q_vector->rx.count++; +} + +/** + * ne6xvf_map_vector_to_txq - associate irqs with tx queues + * @adapter: board private structure + * @v_idx: interrupt number + * @t_idx: queue number + **/ +static void ne6xvf_map_vector_to_txq(struct ne6xvf_adapter *adapter, int v_idx, int t_idx) +{ + struct ne6x_q_vector *q_vector = &adapter->q_vectors[v_idx]; + struct ne6x_ring *tx_ring = &adapter->tx_rings[t_idx]; + + tx_ring->q_vector = q_vector; + tx_ring->next = q_vector->tx.ring; + q_vector->tx.ring = tx_ring; + q_vector->tx.count++; + q_vector->num_ringpairs++; +} + +/** + * ne6xvf_map_rings_to_vectors - Maps descriptor rings to vectors + * @adapter: board private structure to initialize + * + * This function maps descriptor rings to the queue-specific vectors + * we were allotted through the MSI-X enabling code. Ideally, we'd have + * one vector per ring/queue, but on a constrained vector budget, we + * group the rings as "efficiently" as possible. You would add new + * mapping configurations in here. + **/ +static void ne6xvf_map_rings_to_vectors(struct ne6xvf_adapter *adapter) +{ + int rings_remaining = adapter->num_active_queues; + int q_vectors; + int ridx; + + q_vectors = adapter->num_msix_vectors; + + for (ridx = 0; ridx < rings_remaining; ridx++) { + ne6xvf_map_vector_to_cq(adapter, ridx, ridx); + ne6xvf_map_vector_to_rxq(adapter, ridx, ridx); + ne6xvf_map_vector_to_txq(adapter, ridx, ridx); + } +} + +/** + * ne6xvf_setup_all_tg_resources - allocate all queues Tg resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ne6xvf_setup_all_tg_resources(struct ne6xvf_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_active_queues; i++) { + adapter->tg_rings[i].count = adapter->tx_desc_count; + err = ne6x_setup_tg_descriptors(&adapter->tg_rings[i]); + if (!err) + continue; + + dev_err(&adapter->pdev->dev, "tg Allocation for complete Queue %u failed\n", i); + break; + } + + return err; +} + +/** + * ne6xvf_setup_all_cq_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ne6xvf_setup_all_cq_resources(struct ne6xvf_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_active_queues; i++) { + adapter->cq_rings[i].count = adapter->tx_desc_count; + err = ne6x_setup_cq_descriptors(&adapter->cq_rings[i]); + if (!err) + continue; + + dev_err(&adapter->pdev->dev, "Allocation for complete Queue %u failed\n", i); + break; + } + + return err; +} + +/** + * ne6xvf_setup_all_tx_resources - allocate all queues Tx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ne6xvf_setup_all_tx_resources(struct ne6xvf_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_active_queues; i++) { + adapter->tx_rings[i].count = adapter->tx_desc_count; + err = ne6x_setup_tx_descriptors(&adapter->tx_rings[i]); + err |= ne6x_setup_tx_sgl(&adapter->tx_rings[i]); + if (!err) + continue; + + dev_err(&adapter->pdev->dev, "Allocation for Tx Queue %u failed\n", i); + break; + } + + return err; +} + +/** + * ne6xvf_setup_all_rx_resources - allocate all queues Rx resources + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ +static int ne6xvf_setup_all_rx_resources(struct ne6xvf_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_active_queues; i++) { + adapter->rx_rings[i].count = adapter->rx_desc_count; + err = ne6x_setup_rx_descriptors(&adapter->rx_rings[i]); + if (!err) + continue; + + dev_err(&adapter->pdev->dev, "Allocation for Rx Queue %u failed\n", i); + break; + } + + return err; +} + +/** + * ne6xvf_msix_clean_rings - MSIX mode Interrupt Handler + * @irq: interrupt number + * @data: pointer to a q_vector + **/ +static irqreturn_t ne6xvf_msix_clean_rings(int irq, void *data) +{ + struct ne6x_q_vector *q_vector = data; + struct ne6xvf_adapter *adpt = (struct ne6xvf_adapter *)q_vector->adpt; + u64 val; + + if (!q_vector->tx.ring && !q_vector->rx.ring && !q_vector->cq.ring) + return IRQ_HANDLED; + + napi_schedule_irqoff(&q_vector->napi); + val = rd64(&adpt->hw, NE6XVF_REG_ADDR(q_vector->reg_idx, NE6X_VP_INT_MASK)); + val |= 1ULL << NE6X_VP_CQ_INTSHIFT; + wr64(&adpt->hw, NE6XVF_REG_ADDR(q_vector->reg_idx, NE6X_VP_INT_MASK), val); + + return IRQ_HANDLED; +} + +/** + * ne6xvf_irq_affinity_notify - Callback for affinity changes + * @notify: context as to what irq was changed + * @mask: the new affinity mask + * + * This is a callback function used by the irq_set_affinity_notifier function + * so that we may register to receive changes to the irq affinity masks. + **/ +static void ne6xvf_irq_affinity_notify(struct irq_affinity_notify *notify, const cpumask_t *mask) +{ + struct ne6x_q_vector *q_vector; + + q_vector = container_of(notify, struct ne6x_q_vector, affinity_notify); + cpumask_copy(&q_vector->affinity_mask, mask); +} + +/** + * ne6xvf_irq_affinity_release - Callback for affinity notifier release + * @ref: internal core kernel usage + * + * This is a callback function used by the irq_set_affinity_notifier function + * to inform the current notification subscriber that they will no longer + * receive notifications. + **/ +static void ne6xvf_irq_affinity_release(struct kref *ref) {} + +/** + * ne6xvf_request_traffic_irqs - Initialize MSI-X interrupts + * @adapter: board private structure + * @basename: device basename + * + * Allocates MSI-X vectors for tx and rx handling, and requests + * interrupts from the kernel. + **/ +static int ne6xvf_request_traffic_irqs(struct ne6xvf_adapter *adapter, char *basename) +{ + unsigned int rx_int_idx = 0, tx_int_idx = 0; + unsigned int vector, q_vectors; + int irq_num, err; + int cpu; + + ne6xvf_irq_disable(adapter); + /* Decrement for Other and TCP Timer vectors */ + q_vectors = adapter->num_active_queues; + + for (vector = 0; vector < q_vectors; vector++) { + struct ne6x_q_vector *q_vector = &adapter->q_vectors[vector]; + + irq_num = adapter->msix_entries[vector].vector; + + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name), "ne6xvf-%s-TxRx-%u", + basename, rx_int_idx++); + tx_int_idx++; + } else if (q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name), + "ne6xvf-%s-rx-%u", basename, + rx_int_idx++); + } else if (q_vector->tx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name), + "ne6xvf-%s-tx-%u", basename, + tx_int_idx++); + } else { + /* skip this unused q_vector */ + continue; + } + + err = request_irq(irq_num, ne6xvf_msix_clean_rings, 0, q_vector->name, q_vector); + if (err) { + dev_info(&adapter->pdev->dev, "Request_irq failed, error: %d\n", err); + goto free_queue_irqs; + } + + /* register for affinity change notifications */ + q_vector->affinity_notify.notify = ne6xvf_irq_affinity_notify; + q_vector->affinity_notify.release = ne6xvf_irq_affinity_release; + irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); + + /* Spread the IRQ affinity hints across online CPUs. Note that + * get_cpu_mask returns a mask with a permanent lifetime so + * it's safe to use as a hint for irq_set_affinity_hint. + */ + cpu = cpumask_local_spread(q_vector->v_idx, -1); + irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); + } + + return 0; + +free_queue_irqs: + while (vector) { + vector--; + irq_num = adapter->msix_entries[vector].vector; + irq_set_affinity_notifier(irq_num, NULL); + irq_set_affinity_hint(irq_num, NULL); + free_irq(irq_num, &adapter->q_vectors[vector]); + } + + return err; +} + +/** + * ne6xvf_configure_queues + * @adapter: adapter structure + * + * Request that the PF set up our (previously allocated) queues. + **/ +void ne6xvf_configure_queues(struct ne6xvf_adapter *adapter) +{ + unsigned int rx_buf_len = NE6X_RXBUFFER_2048; + struct ne6xvf_hw *hw = &adapter->hw; + union ne6x_sq_base_addr sq_base_addr; + union ne6x_rq_base_addr rq_base_addr; + union ne6x_rq_block_cfg rq_block_cfg; + union ne6x_cq_base_addr cq_base_addr; + union ne6x_cq_cfg cq_cfg; + union ne6x_sq_cfg sq_cfg; + union ne6x_rq_cfg rc_cfg; + int i; + + /* Legacy Rx will always default to a 2048 buffer size. */ +#if (PAGE_SIZE < 8192) + if (!(adapter->flags & NE6XVF_FLAG_LEGACY_RX)) + /* For jumbo frames on systems with 4K pages we have to use + * an order 1 page, so we might as well increase the size + * of our Rx buffer to make better use of the available space + */ + rx_buf_len = NE6X_RXBUFFER_4096; +#endif + + for (i = 0; i < adapter->num_active_queues; i++) + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_VP_RELOAD), 0x1); + + usleep_range(100, 120); + + for (i = 0; i < adapter->num_active_queues; i++) { + /* cq */ + /* cache tail for quicker writes, and clear the reg before use */ + adapter->cq_rings[i].tail = (u64 __iomem *)(hw->hw_addr0 + NE6XVF_QC_TAIL1(i)); + adapter->cq_rings[i].reg_idx = hw->dev_caps.base_queue + i; + + cq_base_addr.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(i, NE6X_CQ_BASE_ADDR)); + cq_base_addr.reg.csr_cq_base_addr_vp = adapter->cq_rings[i].dma; + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_CQ_BASE_ADDR), cq_base_addr.val); + + cq_cfg.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(i, NE6X_CQ_CFG)); + cq_cfg.reg.csr_cq_len_vp = adapter->cq_rings[i].count; + cq_cfg.reg.csr_cq_merge_time_vp = 7; + cq_cfg.reg.csr_cq_merge_size_vp = 7; + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_CQ_CFG), cq_cfg.val); + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_CQ_TAIL_POINTER), 0x0); + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_CQ_HD_POINTER), 0x0); + + /* tx */ + /* cache tail off for easier writes later */ + adapter->tx_rings[i].tail = (u64 __iomem *)(hw->hw_addr2 + NE6XVF_QTX_TAIL1(i)); + adapter->tx_rings[i].reg_idx = hw->dev_caps.base_queue + i; + + sq_base_addr.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(i, NE6X_SQ_BASE_ADDR)); + sq_base_addr.reg.csr_sq_base_addr_vp = adapter->tx_rings[i].dma; + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_SQ_BASE_ADDR), sq_base_addr.val); + + sq_cfg.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(i, NE6X_SQ_CFG)); + sq_cfg.reg.csr_sq_len_vp = adapter->tx_rings[i].count; + sq_cfg.reg.csr_tdq_pull_en = 0x1; + sq_cfg.reg.csr_sqevt_write_back_vp = 0x0; + sq_cfg.reg.csr_send_pd_revers_en = 0x0; + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_SQ_CFG), sq_cfg.val); + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_SQ_HD_POINTER), 0x0); + + /* rx */ + /* cache tail for quicker writes, and clear the reg before use */ + adapter->rx_rings[i].tail = (u64 __iomem *)(hw->hw_addr2 + NE6XVF_QRX_TAIL1(i)); + adapter->rx_rings[i].rx_buf_len = rx_buf_len; + adapter->rx_rings[i].reg_idx = hw->dev_caps.base_queue + i; + + rq_base_addr.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(i, NE6X_RQ_BASE_ADDR)); + rq_base_addr.reg.csr_rq_base_addr_vp = adapter->rx_rings[i].dma; + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_RQ_BASE_ADDR), rq_base_addr.val); + + rq_block_cfg.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(i, NE6X_RQ_BLOCK_CFG)); + rq_block_cfg.reg.csr_rdq_mop_len = adapter->rx_rings[i].rx_buf_len; + rq_block_cfg.reg.csr_rdq_sop_len = 0; + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_RQ_BLOCK_CFG), rq_block_cfg.val); + + rc_cfg.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(i, NE6X_RQ_CFG)); + rc_cfg.reg.csr_rq_len_vp = adapter->rx_rings[i].count; + rc_cfg.reg.csr_rdq_pull_en = 0x1; + rc_cfg.reg.csr_rqevt_write_back_vp = 0x0; + rc_cfg.reg.csr_recv_pd_type_vp = 0x0; + rc_cfg.reg.csr_recv_pd_revers_en = 0x0; + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_RQ_CFG), rc_cfg.val); + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_RQ_HD_POINTER), 0x0); + } + + for (i = 0; i < adapter->num_active_queues; i++) + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_VP_RELOAD), 0x0); + + usleep_range(100, 120); +} + +/** + * ne6xvf_configure - set up transmit and receive data structures + * @adapter: board private structure + **/ +static void ne6xvf_configure(struct ne6xvf_adapter *adapter) +{ + int i; + + ne6xvf_configure_queues(adapter); + + adapter->aq_required |= NE6XVF_FLAG_AQ_CONFIGURE_QUEUES; + + for (i = 0; i < adapter->num_active_queues; i++) { + struct ne6x_ring *ring = &adapter->rx_rings[i]; + + ne6x_alloc_rx_buffers(ring, NE6X_DESC_UNUSED(ring)); + usleep_range(1000, 2000); + } +} + +/** + * ne6xvf_napi_enable_all - enable NAPI on all queue vectors + * @adapter: board private structure + **/ +static void ne6xvf_napi_enable_all(struct ne6xvf_adapter *adapter) +{ + int q_vectors = adapter->num_msix_vectors; + struct ne6x_q_vector *q_vector; + int q_idx; + + for (q_idx = 0; q_idx < q_vectors; q_idx++) { + struct napi_struct *napi; + + q_vector = &adapter->q_vectors[q_idx]; + napi = &q_vector->napi; + napi_enable(napi); + } +} + +/** + * ne6xvf_up_complete - Finish the last steps of bringing up a connection + * @adapter: board private structure + * + * Expects to be called while holding the __NE6XVF_IN_CRITICAL_TASK bit lock. + **/ +static void ne6xvf_up_complete(struct ne6xvf_adapter *adapter) +{ + ne6xvf_change_state(adapter, __NE6XVF_RUNNING); + clear_bit(NE6X_ADPT_DOWN, adapter->comm.state); + + ne6xvf_napi_enable_all(adapter); + + adapter->aq_required |= NE6XVF_FLAG_AQ_ENABLE_QUEUES; + mod_delayed_work(ne6xvf_wq, &adapter->watchdog_task, 0); +} + +/** + * ne6xvf_reinit_interrupt_scheme - Reallocate queues and vectors + * @adapter: board private structure + * + * Returns 0 on success, negative on failure + **/ +static int ne6xvf_reinit_interrupt_scheme(struct ne6xvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + if (!test_bit(NE6X_ADPT_DOWN, adapter->comm.state)) + ne6xvf_free_traffic_irqs(adapter); + + ne6xvf_reset_interrupt_capability(adapter); + ne6xvf_free_q_vectors(adapter); + ne6xvf_free_queues(adapter); + + err = ne6xvf_init_interrupt_scheme(adapter); + if (err) + goto err; + + netif_tx_stop_all_queues(netdev); + + set_bit(NE6X_ADPT_DOWN, adapter->comm.state); + + ne6xvf_map_rings_to_vectors(adapter); +err: + return err; +} + +static void ne6xvf_get_port_link_status(struct ne6xvf_adapter *adapter); + +/** + * ne6xvf_handle_reset - Handle hardware reset + * @adapter: pointer to ne6xvf_adapter + * + * During reset we need to shut down and reinitialize the admin queue + * before we can use it to communicate with the PF again. We also clear + * and reinit the rings because that context is lost as well. + * + * This function is called in the __NE6XVF_RESETTING driver state. If a reset + * is detected and completes, the driver state changed to __NE6XVF_RUNNING or + * __NE6XVF_DOWN, else driver state will remain in __NE6XVF_RESETTING. + * + * The function is called with the NE6XVF_FLAG_RESET_PENDING flag set and it is + * cleared when a reset is detected and completes. + **/ +static void ne6xvf_handle_reset(struct ne6xvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ne6xvf_hw *hw = &adapter->hw; + bool running; + int err, i; + + /* We don't use netif_running() because it may be true prior to + * ndo_open() returning, so we can't assume it means all our open + * tasks have finished, since we're not holding the rtnl_lock here. + */ + running = (adapter->last_state == __NE6XVF_RUNNING); + + if (running) { + netdev->flags &= ~IFF_UP; + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + adapter->link_up = false; + ne6xvf_napi_disable_all(adapter); + } + + pci_set_master(adapter->pdev); + pci_restore_msi_state(adapter->pdev); + + ne6xvf_irq_disable(adapter); + + for (i = 0; i < adapter->num_msix_vectors; i++) + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_VP_RELOAD), 0x1); + + usleep_range(100, 120); + + /* free the Tx/Rx rings and descriptors, might be better to just + * re-use them sometime in the future + */ + ne6xvf_free_all_tg_resources(adapter); + ne6xvf_free_all_cq_resources(adapter); + ne6xvf_free_all_rx_resources(adapter); + ne6xvf_free_all_tx_resources(adapter); + + /* Set the queues_disabled flag when VF is going through reset + * to avoid a race condition especially for ADQ i.e. when a VF ADQ is + * configured, PF resets the VF to allocate ADQ resources. When this + * happens there's a possibility to hit a condition where VF is in + * running state but the queues haven't been enabled yet. So wait for + * virtchnl success message for enable queues and then unset this flag. + * Don't allow the link to come back up until that happens. + */ + adapter->flags |= NE6XVF_FLAG_QUEUES_DISABLED; + + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + adapter->aq_required = 0; + + err = ne6xvf_reinit_interrupt_scheme(adapter); + if (err) + goto reset_err; + + adapter->aq_required |= NE6XVF_FLAG_AQ_GET_CONFIG; + adapter->aq_required |= NE6XVF_FLAG_AQ_MAP_VECTORS; + + /* We were running when the reset started, so we need + * to restore some state here. + */ + if (running) { + err = ne6xvf_setup_all_tg_resources(adapter); + if (err) + goto reset_err; + + err = ne6xvf_setup_all_cq_resources(adapter); + if (err) + goto reset_err; + + /* allocate transmit descriptors */ + err = ne6xvf_setup_all_tx_resources(adapter); + if (err) + goto reset_err; + + /* allocate receive descriptors */ + err = ne6xvf_setup_all_rx_resources(adapter); + if (err) + goto reset_err; + + if ((adapter->flags & NE6XVF_FLAG_REINIT_MSIX_NEEDED) || + (adapter->flags & NE6XVF_FLAG_REINIT_ITR_NEEDED)) { + err = ne6xvf_request_traffic_irqs(adapter, netdev->name); + if (err) + goto reset_err; + + adapter->flags &= ~NE6XVF_FLAG_REINIT_MSIX_NEEDED; + } + + ne6xvf_configure(adapter); + + /* ne6xvf_up_complete() will switch device back + * to __NE6XVF_RUNNING + */ + ne6xvf_up_complete(adapter); + + ne6xvf_irq_enable(adapter, true); + + ne6xvf_get_port_link_status(adapter); + + netdev->flags |= IFF_UP; + } else { + ne6xvf_change_state(adapter, __NE6XVF_DOWN); + } + + adapter->flags &= ~NE6XVF_FLAG_REINIT_ITR_NEEDED; + + return; + +reset_err: + if (running) { + set_bit(NE6X_ADPT_DOWN, adapter->comm.state); + ne6xvf_free_traffic_irqs(adapter); + netdev->flags &= ~IFF_UP; + } + + dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); + ne6xvf_disable_vf(adapter); +} + +/** + * ne6xvf_init_process_extended_caps - Part of driver startup + * @adapter: board private structure + * + * Function processes __NE6XVF_INIT_EXTENDED_CAPS driver state. This state + * handles negotiating capabilities for features which require an additional + * message. + * + * Once all extended capabilities exchanges are finished, the driver will + * transition into __NE6XVF_INIT_CONFIG_ADAPTER. + */ +static void ne6xvf_init_process_extended_caps(struct ne6xvf_adapter *adapter) +{ + WARN_ON(adapter->state != __NE6XVF_INIT_EXTENDED_CAPS); + + /* When we reach here, no further extended capabilities exchanges are + * necessary, so we finally transition into __NE6XVF_INIT_CONFIG_ADAPTER + */ + adapter->vsi_res->num_queue_pairs = adapter->vf_res->num_queue_pairs; + adapter->hw_feature = 0x00; + ne6xvf_change_state(adapter, __NE6XVF_INIT_CONFIG_ADAPTER); +} + +/** + * ne6xvf_process_config - Process the config information we got from the PF + * @adapter: board private structure + * + * Verify that we have a valid config struct, and set up our netdev features + * and our VSI struct. + **/ +int ne6xvf_process_config(struct ne6xvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + netdev_features_t csumo_features; + netdev_features_t vlano_features; + netdev_features_t dflt_features; + netdev_features_t tso_features; + + dflt_features = NETIF_F_SG | + NETIF_F_HIGHDMA | + NETIF_F_RXHASH; + + csumo_features = NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_IPV6_CSUM; + + vlano_features = NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + + /* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */ + tso_features = NETIF_F_TSO | + NETIF_F_TSO_ECN | + NETIF_F_TSO6 | + NETIF_F_GSO_GRE | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_LRO | + NETIF_F_LOOPBACK | + NETIF_F_GSO_GRE_CSUM | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL | + NETIF_F_GSO_IPXIP4 | + NETIF_F_GSO_IPXIP6 | + NETIF_F_GSO_UDP_L4 | + NETIF_F_GSO_SCTP | + 0; + + netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM; + + /* set features that user can change */ + netdev->hw_features = dflt_features | csumo_features | vlano_features | tso_features; + + /* add support for HW_CSUM on packets with MPLS header */ + netdev->mpls_features = NETIF_F_HW_CSUM; + + netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD; + + /* enable features */ + netdev->features |= netdev->hw_features; + /* encap and VLAN devices inherit default, csumo and tso features */ + netdev->hw_enc_features |= dflt_features | csumo_features | tso_features; + netdev->vlan_features |= dflt_features | csumo_features | tso_features; + netdev->hw_features |= NETIF_F_HW_TC; + + /* advertise support but don't enable by default since only one type of + * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one + * type turns on the other has to be turned off. This is enforced by the + * nce_fix_features() ndo callback. + */ + netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_TX | + NETIF_F_HW_VLAN_STAG_FILTER; + + netdev->gso_max_size = 65535; + netdev->features = netdev->hw_features; + ne6xvf_sync_features(netdev); + + return 0; +} + +/** + * ne6xvf_init_config_adapter - last part of driver startup + * @adapter: board private structure + * + * After all the supported capabilities are negotiated, then the + * __NE6XVF_INIT_CONFIG_ADAPTER state will finish driver initialization. + */ +static void ne6xvf_init_config_adapter(struct ne6xvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int ret; + + WARN_ON(adapter->state != __NE6XVF_INIT_CONFIG_ADAPTER); + + if (ne6xvf_process_config(adapter)) + goto err; + + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + adapter->flags |= NE6XVF_FLAG_RX_CSUM_ENABLED; + + netdev->netdev_ops = &ne6xvf_netdev_ops; + ne6xvf_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + + netdev->min_mtu = NE6X_MIN_MTU_SIZE; + netdev->max_mtu = NE6X_MAX_RXBUFFER - ETH_HLEN - ETH_FCS_LEN; + + if (!is_valid_ether_addr(adapter->hw.mac.addr)) { + dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", + adapter->hw.mac.addr); + eth_hw_addr_random(netdev); + ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); + } else { + eth_hw_addr_set(netdev, adapter->hw.mac.addr); + ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); + } + + adapter->tx_desc_count = ALIGN(NE6X_DEFAULT_NUM_DESCRIPTORS, NE6X_REQ_DESCRIPTOR_MULTIPLE); + adapter->rx_desc_count = ALIGN(NE6X_DEFAULT_NUM_DESCRIPTORS, NE6X_REQ_DESCRIPTOR_MULTIPLE); + adapter->cq_desc_count = adapter->tx_desc_count + adapter->rx_desc_count; + ret = ne6xvf_init_interrupt_scheme(adapter); + if (ret) + goto err_sw_init; + + ne6xvf_map_rings_to_vectors(adapter); + + netif_carrier_off(netdev); + adapter->link_up = false; + if (!adapter->netdev_registered) { + ret = ne6xvf_register_netdev(adapter); + if (ret) + goto err_register; + } + adapter->netdev_registered = true; + + netif_tx_stop_all_queues(netdev); + ne6xvf_change_state(adapter, __NE6XVF_DOWN); + set_bit(NE6X_ADPT_DOWN, adapter->comm.state); + + wake_up(&adapter->down_waitqueue); + ne6xvf_init_rss(adapter); + adapter->trusted = 0; + return; + +err_register: +err_sw_init: + ne6xvf_reset_interrupt_capability(adapter); +err: + ne6xvf_change_state(adapter, __NE6XVF_INIT_FAILED); +} + +/** + * ne6xvf_process_aq_command - process aq_required flags + * and sends aq command + * @adapter: pointer to ne6xvf adapter structure + * + * Returns 0 on success + * Returns error code if no command was sent + * or error code if the command failed. + **/ +static int ne6xvf_process_aq_command(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event = {.buf_len = 0, .msg_buf = NULL}; + + if (adapter->aq_required & NE6XVF_FLAG_AQ_GET_CONFIG) + return ne6xvf_send_vf_config_msg(adapter, false); + + if (adapter->aq_required & NE6XVF_FLAG_AQ_CONFIGURE_HW_OFFLOAD) + return ne6xvf_send_vf_offload_msg(adapter); + + if (adapter->aq_required & NE6XVF_FLAG_AQ_CONFIGURE_RSS) { + ne6xvf_config_rss_info(adapter); + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_CHANGED_RSS) { + ne6xvf_changed_rss(adapter); + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_CONFIGURE_QUEUES) { + if (ne6xvf_request_queues(adapter, adapter->num_active_queues) == 0) { + usleep_range(50, 100); + if (ne6xvf_poll_virtchnl_msg(adapter, &event, + VIRTCHNL_OP_REQUEST_QUEUES) == 0) { + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + adapter->aq_required &= ~NE6XVF_FLAG_AQ_CONFIGURE_QUEUES; + } + } + return 0; + } + if (adapter->aq_required & NE6XVF_FLAG_AQ_ENABLE_QUEUES) { + ne6xvf_enable_queues(adapter); + adapter->aq_required &= ~NE6XVF_FLAG_AQ_ENABLE_QUEUES; + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_GET_PORT_LINK_STATUS) { + ne6xvf_vchanel_get_port_link_status(adapter); + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_SET_VF_MAC) { + ne6xvf_set_vf_addr(adapter); + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_ADD_MAC_FILTER) { + ne6xvf_add_ether_addrs(adapter); + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_DEL_MAC_FILTER) { + ne6xvf_del_ether_addrs(adapter); + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_ADD_VLAN_FILTER) { + ne6xvf_add_vlans(adapter); + adapter->aq_required &= ~NE6XVF_FLAG_AQ_ADD_VLAN_FILTER; + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_DEL_VLAN_FILTER) { + ne6xvf_del_vlans(adapter); + adapter->aq_required &= ~NE6XVF_FLAG_AQ_DEL_VLAN_FILTER; + return 0; + } + + if (adapter->aq_required & NE6XVF_FLAG_AQ_REQUEST_PROMISC) { + adapter->aq_required &= ~NE6XVF_FLAG_AQ_REQUEST_PROMISC; + ne6xvf_set_promiscuous(adapter); + + return 0; + } + return -EAGAIN; +} + +/** + * ne6xvf_asq_done - check if FW has processed the Admin Send Queue + * @hw: pointer to the hw struct + * + * Returns true if the firmware has processed all descriptors on the + * admin send queue. Returns false if there are still requests pending. + **/ +bool ne6xvf_asq_done(struct ne6xvf_hw *hw) +{ + return 1; +} + +/** + * ne6xvf_register_netdev - register netdev + * @adapter: pointer to the ne6xvf_adapter struct + * + * Returns 0 if register netdev success + **/ +int ne6xvf_register_netdev(struct ne6xvf_adapter *adapter) +{ + char newname[IFNAMSIZ] = {0}; + int ret; + u16 domain_num; + + domain_num = pci_domain_nr(adapter->pdev->bus); + + /* There are some pcie device with the same bus number but with different + * pcie domain, the name of netdev should contain pcie domain number + */ + if (domain_num) + sprintf(newname, "enP%dp%ds0f%dv%d", domain_num, adapter->hw.bus.bus_id, + adapter->hw.dev_caps.lport, + adapter->hw.dev_caps.vf_id % adapter->hw.dev_caps.num_vf_per_pf); + else + sprintf(newname, "enp%ds0f%dv%d", adapter->hw.bus.bus_id, + adapter->hw.dev_caps.lport, + adapter->hw.dev_caps.vf_id % adapter->hw.dev_caps.num_vf_per_pf); + + strcpy(&adapter->netdev->name[0], newname); + dev_info(&adapter->pdev->dev, "name: %s\n", newname); + ret = register_netdev(adapter->netdev); + if (ret) { + sprintf(newname, "enp%ds0f%dv%%d", adapter->hw.bus.bus_id, + adapter->hw.dev_caps.lport); + strcpy(&adapter->netdev->name[0], newname); + ret = register_netdev(adapter->netdev); + } + return ret; +} + +static void ne6xvf_watchdog_task(struct work_struct *work) +{ + struct ne6xvf_adapter *adapter = container_of(work, struct ne6xvf_adapter, + watchdog_task.work); + struct ne6xvf_hw *hw = &adapter->hw; + + if (ne6xvf_is_remove_in_progress(adapter)) + return; + + if (test_and_set_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section)) + goto restart_watchdog; + + if (adapter->flags & NE6XVF_FLAG_PF_COMMS_FAILED) + ne6xvf_change_state(adapter, __NE6XVF_COMM_FAILED); + + if (adapter->flags & NE6XVF_FLAG_RESET_NEEDED && adapter->state != __NE6XVF_RESETTING) { + adapter->flags &= ~NE6XVF_FLAG_RESET_NEEDED; + ne6xvf_change_state(adapter, __NE6XVF_RESETTING); + adapter->aq_required = 0; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + } + switch (adapter->state) { + case __NE6XVF_INIT_FAILED: + /* Try again from failed step */ + ne6xvf_change_state(adapter, adapter->last_state); + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + queue_delayed_work(ne6xvf_wq, &adapter->watchdog_task, HZ); + return; + case __NE6XVF_COMM_FAILED: + adapter->aq_required = 0; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + queue_delayed_work(ne6xvf_wq, &adapter->watchdog_task, msecs_to_jiffies(10)); + return; + case __NE6XVF_RESETTING: + ne6xvf_handle_reset(adapter); + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + queue_work(ne6xvf_wq, &adapter->watchdog_task.work); + return; + case __NE6XVF_DOWN: + case __NE6XVF_DOWN_PENDING: + case __NE6XVF_TESTING: + case __NE6XVF_RUNNING: + if (adapter->current_op) { + if (!ne6xvf_asq_done(hw)) { + dev_dbg(&adapter->pdev->dev, "Admin queue timeout\n"); + ne6xvf_send_api_ver(adapter); + } + } else { + int ret = ne6xvf_process_aq_command(adapter); + + /* An error will be returned if no commands were + * processed; use this opportunity to update stats + * if the error isn't -EOPNOTSUPP + */ + if (ret && ret != -EOPNOTSUPP && adapter->state == __NE6XVF_RUNNING) + ne6xvf_request_stats(adapter); + } + break; + case __NE6XVF_REMOVE: + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + return; + default: + break; + } + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + +restart_watchdog: + queue_work(ne6xvf_wq, &adapter->sdk_task); + if (adapter->aq_required) + queue_delayed_work(ne6xvf_wq, &adapter->watchdog_task, msecs_to_jiffies(20)); + else + queue_delayed_work(ne6xvf_wq, &adapter->watchdog_task, msecs_to_jiffies(1000)); +} + +inline void ne6xvf_init_spinlock_d(struct ne6xvf_spinlock *sp) +{ + mutex_init((struct mutex *)sp); +} + +void ne6xvf_acquire_spinlock_d(struct ne6xvf_spinlock *sp) +{ + mutex_lock((struct mutex *)sp); +} + +void ne6xvf_release_spinlock_d(struct ne6xvf_spinlock *sp) +{ + mutex_unlock((struct mutex *)sp); +} + +void ne6xvf_destroy_spinlock_d(struct ne6xvf_spinlock *sp) +{ + mutex_destroy((struct mutex *)sp); +} + +/** + * ne6xvf_find_filter - Search filter list for specific mac filter + * @adapter: board private structure + * @macaddr: the MAC address + * + * Returns ptr to the filter object or NULL. Must be called while holding the + * mac_vlan_list_lock. + **/ +static struct ne6xvf_mac_filter *ne6xvf_find_filter(struct ne6xvf_adapter *adapter, + const u8 *macaddr) +{ + struct ne6xvf_mac_filter *f; + + if (!macaddr) + return NULL; + + list_for_each_entry(f, &adapter->mac_filter_list, list) { + if (ether_addr_equal(macaddr, f->macaddr)) + return f; + } + + return NULL; +} + +/** + * ne6xvf_add_filter - Add a mac filter to the filter list + * @adapter: board private structure + * @macaddr: the MAC address + * + * Returns ptr to the filter object or NULL when no memory available. + **/ +static struct ne6xvf_mac_filter *ne6xvf_add_filter(struct ne6xvf_adapter *adapter, + const u8 *macaddr) +{ + struct ne6xvf_mac_filter *f; + + if (!macaddr) + return NULL; + + f = ne6xvf_find_filter(adapter, macaddr); + if (!f) { + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + return f; + + ether_addr_copy(f->macaddr, macaddr); + + list_add_tail(&f->list, &adapter->mac_filter_list); + f->add = true; + f->add_handled = false; + f->is_new_mac = true; + f->is_primary = false; + adapter->aq_required |= NE6XVF_FLAG_AQ_ADD_MAC_FILTER; + } else { + f->remove = false; + } + + return f; +} + +/** + * ne6xvf_down - Shutdown the connection processing + * @adapter: board private structure + * + * Expects to be called while holding the __NE6XVF_IN_CRITICAL_TASK bit lock. + **/ +void ne6xvf_down(struct ne6xvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ne6xvf_vlan_filter *vlf; + struct ne6xvf_mac_filter *f; + + if (adapter->state <= __NE6XVF_DOWN_PENDING) + return; + + netif_carrier_off(netdev); + netif_tx_disable(netdev); + adapter->link_up = false; + set_bit(NE6X_ADPT_DOWN, adapter->comm.state); + ne6xvf_irq_disable(adapter); + ne6xvf_napi_disable_all(adapter); + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + /* clear the sync flag on all filters */ + __dev_uc_unsync(adapter->netdev, NULL); + __dev_mc_unsync(adapter->netdev, NULL); + + /* remove all MAC filters */ + list_for_each_entry(f, &adapter->mac_filter_list, list) + f->remove = true; + + /* remove all VLAN filters */ + list_for_each_entry(vlf, &adapter->vlan_filter_list, list) + vlf->remove = true; + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + if (!(adapter->flags & NE6XVF_FLAG_PF_COMMS_FAILED) && + adapter->state != __NE6XVF_RESETTING) { + dev_info(&adapter->pdev->dev, "%s: state->%s\n", __func__, + ne6xvf_state_str(adapter->state)); + /* cancel any current operation */ + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + /* Schedule operations to close down the HW. Don't wait + * here for this to complete. The watchdog is still running + * and it will take care of this. + */ + adapter->aq_required |= NE6XVF_FLAG_AQ_DEL_MAC_FILTER; + + /* In case the queue configure or enable operations are still + * pending from when the interface was opened, make sure + * they're canceled here. + */ + adapter->aq_required &= ~NE6XVF_FLAG_AQ_ENABLE_QUEUES; + adapter->aq_required &= ~NE6XVF_FLAG_AQ_CONFIGURE_QUEUES; + } + + mod_delayed_work(ne6xvf_wq, &adapter->watchdog_task, 0); +} + +static void ne6xvf_get_port_link_status(struct ne6xvf_adapter *adapter) +{ + adapter->aq_required |= NE6XVF_FLAG_AQ_GET_PORT_LINK_STATUS; + mod_delayed_work(ne6xvf_wq, &adapter->watchdog_task, 0); +} + +static void ne6xvf_set_vport_state(struct ne6xvf_adapter *adapter, int tx_state, int rx_state) +{ + if (rx_state) + adapter->hw_feature &= ~NE6X_F_RX_DISABLE; + else + adapter->hw_feature |= NE6X_F_RX_DISABLE; + + if (tx_state) + adapter->hw_feature &= ~NE6X_F_TX_DISABLE; + else + adapter->hw_feature |= NE6X_F_TX_DISABLE; + + adapter->aq_required |= NE6XVF_FLAG_AQ_CONFIGURE_HW_OFFLOAD; + mod_delayed_work(ne6xvf_wq, &adapter->watchdog_task, 0); +} + +/** + * ne6xvf_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog is started, + * and the stack is notified that the interface is ready. + **/ +int ne6xvf_open(struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + int err; + + netdev_info(netdev, "open !!!\n"); + + while (test_and_set_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section)) + usleep_range(500, 1000); + + if (adapter->flags & NE6XVF_FLAG_PF_COMMS_FAILED) { + dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); + err = -EIO; + goto unlock; + } + + if (adapter->state == __NE6XVF_RUNNING && !test_bit(NE6X_ADPT_DOWN, adapter->comm.state)) { + dev_dbg(&adapter->pdev->dev, "VF is already open.\n"); + err = 0; + goto unlock; + } + + if (adapter->state != __NE6XVF_DOWN) { + err = -EBUSY; + goto unlock; + } + err = ne6xvf_setup_all_tg_resources(adapter); + if (err) + goto err_setup_tg; + + err = ne6xvf_setup_all_cq_resources(adapter); + if (err) + goto err_setup_cq; + + /* allocate transmit descriptors */ + err = ne6xvf_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = ne6xvf_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + /* clear any pending interrupts, may auto mask */ + err = ne6xvf_request_traffic_irqs(adapter, netdev->name); + if (err) + goto err_req_irq; + + ne6xvf_configure(adapter); + + ne6xvf_up_complete(adapter); + + ne6xvf_irq_enable(adapter, true); + + ne6xvf_get_port_link_status(adapter); + + ne6xvf_set_vport_state(adapter, true, true); + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + + return 0; + +err_req_irq: + ne6xvf_down(adapter); + ne6xvf_free_traffic_irqs(adapter); +err_setup_rx: + ne6xvf_free_all_rx_resources(adapter); +err_setup_tx: + ne6xvf_free_all_tx_resources(adapter); +err_setup_cq: + ne6xvf_free_all_cq_resources(adapter); +err_setup_tg: + ne6xvf_free_all_tg_resources(adapter); + +unlock: + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + + return err; +} + +/** + * ne6xvf_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. All IRQs except vector 0 (reserved for admin queue) + * are freed, along with all transmit and receive resources. + **/ +int ne6xvf_close(struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + struct ne6xvf_hw *hw = &adapter->hw; + int status; + int i; + + netdev_info(netdev, "close !!!\n"); + + while (test_and_set_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section)) + usleep_range(500, 1000); + + if (adapter->state <= __NE6XVF_DOWN_PENDING) { + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + return 0; + } + + ne6xvf_set_vport_state(adapter, false, false); + ne6xvf_down(adapter); + + for (i = 0; i < adapter->num_msix_vectors; i++) + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_VP_RELOAD), 0x1); + + usleep_range(100, 120); + + ne6xvf_change_state(adapter, __NE6XVF_DOWN_PENDING); + ne6xvf_free_traffic_irqs(adapter); + + ne6xvf_free_all_tg_resources(adapter); + ne6xvf_free_all_cq_resources(adapter); + ne6xvf_free_all_tx_resources(adapter); + ne6xvf_free_all_rx_resources(adapter); + if (adapter->state == __NE6XVF_DOWN_PENDING) + ne6xvf_change_state(adapter, __NE6XVF_DOWN); + + clear_bit(__NE6XVF_IN_CRITICAL_TASK, &adapter->crit_section); + + /* If we're closing the interface as part of driver removal then don't + * wait. The VF resources will be reinitialized when the hardware is + * reset. + */ + if (ne6xvf_is_remove_in_progress(adapter)) + return 0; + + /* We explicitly don't free resources here because the hardware is + * still active and can DMA into memory. Resources are cleared in + * ne6xvf_virtchnl_completion() after we get confirmation from the PF + * driver that the rings have been stopped. + * + * Also, we wait for state to transition to __NE6XVF_DOWN before + * returning. State change occurs in ne6xvf_virtchnl_completion() after + * VF resources are released (which occurs after PF driver processes and + * responds to admin queue commands). + */ + status = wait_event_timeout(adapter->down_waitqueue, adapter->state == __NE6XVF_DOWN, + msecs_to_jiffies(500)); + if (!status) + netdev_dbg(netdev, "Device resources not yet released\n"); + + return 0; +} + +/** + * ne6xvf_addr_sync - Callback for dev_(mc|uc)_sync to add address + * @netdev: the netdevice + * @addr: address to add + * + * Called by __dev_(mc|uc)_sync when an address needs to be added. We call + * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. + */ +static int ne6xvf_addr_sync(struct net_device *netdev, const u8 *addr) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; + + if (ne6xvf_add_filter(adapter, addr)) + return 0; + else + return -ENOMEM; +} + +/** + * ne6xvf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address + * @netdev: the netdevice + * @addr: address to add + * + * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call + * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock. + */ +static int ne6xvf_addr_unsync(struct net_device *netdev, const u8 *addr) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + struct ne6xvf_mac_filter *f; + + /* Under some circumstances, we might receive a request to delete + * our own device address from our uc list. Because we store the + * device address in the VSI's MAC/VLAN filter list, we need to ignore + * such requests and not delete our device address from this list. + */ + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; + + f = ne6xvf_find_filter(adapter, addr); + if (f) { + f->remove = true; + adapter->aq_required |= NE6XVF_FLAG_AQ_DEL_MAC_FILTER; + } + + return 0; +} + +/** + * ne6xvf_promiscuous_mode_changed - check if promiscuous mode bits changed + * @adapter: device specific adapter + */ +bool ne6xvf_promiscuous_mode_changed(struct ne6xvf_adapter *adapter) +{ + return (adapter->current_netdev_promisc_flags ^ adapter->netdev->flags) & + (IFF_PROMISC | IFF_ALLMULTI); +} + +/** + * ne6xvf_set_rx_mode - NDO callback to set the netdev filters + * @netdev: network interface device structure + **/ +static void ne6xvf_set_rx_mode(struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + __dev_uc_sync(netdev, ne6xvf_addr_sync, ne6xvf_addr_unsync); + __dev_mc_sync(netdev, ne6xvf_addr_sync, ne6xvf_addr_unsync); + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + if (!adapter->trusted) { + adapter->hw_feature &= ~NE6X_F_PROMISC; + adapter->hw_feature &= ~NE6X_F_RX_ALLMULTI; + adapter->flags &= ~NE6XVF_FLAG_PROMISC_ON; + adapter->flags &= ~NE6XVF_FLAG_ALLMULTI_ON; + return; + } + + if (netdev->flags & IFF_PROMISC) { + adapter->flags |= NE6XVF_FLAG_PROMISC_ON; + adapter->flags |= NE6XVF_FLAG_ALLMULTI_ON; + } else if (netdev->flags & IFF_ALLMULTI) { + adapter->flags &= ~NE6XVF_FLAG_PROMISC_ON; + adapter->flags |= NE6XVF_FLAG_ALLMULTI_ON; + } else { + adapter->flags &= ~NE6XVF_FLAG_PROMISC_ON; + adapter->flags &= ~NE6XVF_FLAG_ALLMULTI_ON; + } + + adapter->aq_required |= NE6XVF_FLAG_AQ_REQUEST_PROMISC; +} + +/** + * ne6xvf_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the watchdog task. + **/ +static struct net_device_stats *ne6xvf_get_stats(struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + if (adapter->netdev) + return &adapter->netdev->stats; + else + return &adapter->net_stats; +} + +static void ne6xvf_sync_features(struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + if (netdev->features & NETIF_F_GSO_UDP_TUNNEL_CSUM) + adapter->hw_feature |= NE6X_F_TX_UDP_TNL_SEG; + else + adapter->hw_feature &= ~NE6X_F_TX_UDP_TNL_SEG; + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) + adapter->hw_feature |= NE6X_F_RX_VLAN_STRIP; + else + adapter->hw_feature &= ~NE6X_F_RX_VLAN_STRIP; + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_TX) + adapter->hw_feature |= NE6X_F_TX_VLAN; + else + adapter->hw_feature &= ~NE6X_F_TX_VLAN; + + if (netdev->features & NETIF_F_HW_VLAN_STAG_RX) + adapter->hw_feature |= NE6X_F_RX_QINQ_STRIP; + else + adapter->hw_feature &= ~NE6X_F_RX_QINQ_STRIP; + + if (netdev->features & NETIF_F_HW_VLAN_STAG_TX) + adapter->hw_feature |= NE6X_F_TX_QINQ; + else + adapter->hw_feature &= ~NE6X_F_TX_QINQ; + + if (netdev->features & (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) + adapter->hw_feature |= NE6X_F_RX_VLAN_FILTER; + else + adapter->hw_feature &= ~NE6X_F_RX_VLAN_FILTER; + + if (netdev->features & NETIF_F_RXCSUM) + adapter->hw_feature |= NE6X_OFFLOAD_RXCSUM; + + if (netdev->features & NETIF_F_LRO) + adapter->hw_feature |= NE6X_OFFLOAD_LRO; + + if (netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) + adapter->hw_feature |= NE6X_OFFLOAD_TSO; + + if (netdev->features & NETIF_F_IP_CSUM) + adapter->hw_feature |= NE6X_OFFLOAD_TXCSUM; + + if (netdev->features & NETIF_F_RXHASH) + adapter->hw_feature |= NE6X_OFFLOAD_RSS; + + if (netdev->features & NETIF_F_HW_L2FW_DOFFLOAD) + adapter->hw_feature |= NE6X_OFFLOAD_L2; + + if (netdev->features & NETIF_F_RXHASH) + adapter->hw_feature |= NE6X_OFFLOAD_RSS; + + if (netdev->features & NETIF_F_SCTP_CRC) + adapter->hw_feature |= NE6X_OFFLOAD_SCTP_CSUM; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_SCTP_CSUM; + + dev_info(&adapter->pdev->dev, "%s: adapter->hw_feature = 0x%08x\n", __func__, + adapter->hw_feature); + + adapter->aq_required |= NE6XVF_FLAG_AQ_CONFIGURE_HW_OFFLOAD; +} + +#define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ + NETIF_F_HW_VLAN_CTAG_TX | \ + NETIF_F_HW_VLAN_STAG_RX | \ + NETIF_F_HW_VLAN_STAG_TX) + +#define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) + +#define NETIF_UDP_TNL_FEATURES (NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM) + +/** + * nce_set_features - set the netdev feature flags + * @netdev: ptr to the netdev being adjusted + * @features: the feature set that the stack is suggesting + * Note: expects to be called while under rtnl_lock() + **/ +static int ne6xvf_set_features(struct net_device *netdev, netdev_features_t features) +{ + netdev_features_t changed = features ^ netdev->features; + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + if (changed & (NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM)) { + if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) + adapter->hw_feature |= NE6X_F_TX_UDP_TNL_SEG; + else + adapter->hw_feature &= ~NE6X_F_TX_UDP_TNL_SEG; + } + + if (changed & NETIF_VLAN_OFFLOAD_FEATURES || changed & NETIF_VLAN_FILTERING_FEATURES) { + /* keep cases separate because one ethertype for offloads can be + * disabled at the same time as another is disabled, so check for an + * enabled ethertype first, then check for disabled. Default to + * ETH_P_8021Q so an ethertype is specified if disabling insertion and + * stripping. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + adapter->hw_feature |= NE6X_F_RX_VLAN_STRIP; + else + adapter->hw_feature &= ~NE6X_F_RX_VLAN_STRIP; + + if (features & NETIF_F_HW_VLAN_CTAG_TX) + adapter->hw_feature |= NE6X_F_TX_VLAN; + else + adapter->hw_feature &= ~NE6X_F_TX_VLAN; + + if (features & NETIF_F_HW_VLAN_STAG_RX) + adapter->hw_feature |= NE6X_F_RX_QINQ_STRIP; + else + adapter->hw_feature &= ~NE6X_F_RX_QINQ_STRIP; + + if (features & NETIF_F_HW_VLAN_STAG_TX) + adapter->hw_feature |= NE6X_F_TX_QINQ; + else + adapter->hw_feature &= ~NE6X_F_TX_QINQ; + + if (features & (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) + adapter->hw_feature |= NE6X_F_RX_VLAN_FILTER; + else + adapter->hw_feature &= ~NE6X_F_RX_VLAN_FILTER; + } + + if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO)) { + if (features & NETIF_F_RXCSUM) + adapter->hw_feature |= NE6X_OFFLOAD_RXCSUM; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_RXCSUM; + + /* update hardware LRO capability accordingly */ + if (features & NETIF_F_LRO) + adapter->hw_feature |= NE6X_OFFLOAD_LRO; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_LRO; + } + + if (changed & (NETIF_F_TSO6 | NETIF_F_TSO)) { + if (features & (NETIF_F_TSO | NETIF_F_TSO6)) + adapter->hw_feature |= NE6X_OFFLOAD_TSO; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_TSO; + } + + if (changed & NETIF_F_GSO_UDP) { + if (features & NETIF_F_GSO_UDP) + adapter->hw_feature |= NE6X_OFFLOAD_UFO; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_UFO; + } + + if (changed & NETIF_F_IP_CSUM) { + if (features & NETIF_F_IP_CSUM) + adapter->hw_feature |= NE6X_OFFLOAD_TXCSUM; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_TXCSUM; + } + + if (changed & NETIF_F_RXHASH) { + if (features & NETIF_F_RXHASH) + adapter->hw_feature |= NE6X_OFFLOAD_RSS; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_RSS; + } + + if (changed & NETIF_F_HW_L2FW_DOFFLOAD) { + if (features & NETIF_F_HW_L2FW_DOFFLOAD) + adapter->hw_feature |= NE6X_OFFLOAD_L2; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_L2; + } + + if (changed & NETIF_F_SCTP_CRC) { + if (features & NETIF_F_SCTP_CRC) + adapter->hw_feature |= NE6X_OFFLOAD_SCTP_CSUM; + else + adapter->hw_feature &= ~NE6X_OFFLOAD_SCTP_CSUM; + } + + dev_info(&adapter->pdev->dev, "%s: adapter->hw_feature = 0x%08x\n", __func__, + adapter->hw_feature); + + adapter->aq_required |= NE6XVF_FLAG_AQ_CONFIGURE_HW_OFFLOAD; + mod_delayed_work(ne6xvf_wq, &adapter->watchdog_task, 0); + + return 0; +} + +/** + * nce_fix_features - fix the netdev feature flags + * @netdev: ptr to the netdev being adjusted + * @features: the feature set that the stack is suggesting + * Note: expects to be called while under rtnl_lock() + **/ +static netdev_features_t ne6xvf_fix_features(struct net_device *netdev, netdev_features_t features) +{ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features &= ~NETIF_F_HW_VLAN_STAG_RX; + + if (features & NETIF_F_HW_VLAN_STAG_RX) + features &= ~NETIF_F_HW_VLAN_CTAG_RX; + + if (features & NETIF_F_HW_VLAN_CTAG_TX) + features &= ~NETIF_F_HW_VLAN_STAG_TX; + + if (features & NETIF_F_HW_VLAN_STAG_TX) + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + if (features & NETIF_VLAN_FILTERING_FEATURES) + features |= NETIF_VLAN_FILTERING_FEATURES; + + return features; +} + +/** + * ne6xvf_replace_primary_mac - Replace current primary address + * @adapter: board private structure + * @new_mac: new MAC address to be applied + * + * Replace current dev_addr and send request to PF for removal of previous + * primary MAC address filter and addition of new primary MAC filter. + * Return 0 for success, -ENOMEM for failure. + * + * Do not call this with mac_vlan_list_lock! + **/ +int ne6xvf_replace_primary_mac(struct ne6xvf_adapter *adapter, const u8 *new_mac) +{ + memcpy(adapter->hw.mac.addr, new_mac, 6); + adapter->aq_required |= NE6XVF_FLAG_AQ_SET_VF_MAC; + + /* schedule the watchdog task to immediately process the request */ + queue_work(ne6xvf_wq, &adapter->watchdog_task.work); + return 0; +} + +/** + * ne6xvf_set_mac - NDO callback to set port mac address + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int ne6xvf_set_mac(struct net_device *netdev, void *p) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + int ret; + + netdev_info(netdev, "set mac address %pM\n", addr->sa_data); + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (is_multicast_ether_addr(addr->sa_data)) { + netdev_err(netdev, "Invalid Ethernet address %pM\n", addr->sa_data); + return -EINVAL; + } + + if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) { + netdev_info(netdev, "already using mac address %pM\n", addr->sa_data); + return 0; + } + + ret = ne6xvf_replace_primary_mac(adapter, addr->sa_data); + + if (ret) + return ret; + + ret = wait_event_interruptible_timeout(adapter->vc_waitqueue, + ether_addr_equal(netdev->dev_addr, addr->sa_data), + msecs_to_jiffies(2500)); + + /* If ret < 0 then it means wait was interrupted. + * If ret == 0 then it means we got a timeout. + * else it means we got response for set MAC from PF, + * check if netdev MAC was updated to requested MAC, + * if yes then set MAC succeeded otherwise it failed return -EACCES + */ + netdev_info(netdev, "%s,%pM %pM\n", __func__, addr->sa_data, netdev->dev_addr); + if (!ether_addr_equal(netdev->dev_addr, addr->sa_data)) + return -EACCES; + + return 0; +} + +/** + * ne6xvf_do_ioctl - Handle network device specific ioctls + * @netdev: network interface device structure + * @ifr: interface request data + * @cmd: ioctl command + * + * Callback to handle the networking device specific ioctls. Used to handle + * the SIOCGHWTSTAMP and SIOCSHWTSTAMP ioctl requests that configure Tx and Rx + * timstamping support. + */ +static int ne6xvf_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + return 0; +} + +/** + * ne6xvf_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int ne6xvf_change_mtu(struct net_device *netdev, int new_mtu) +{ + int max_frame = new_mtu; + + if (new_mtu < NE6X_MIN_MTU_SIZE) { + netdev_err(netdev, "mtu < MIN MTU size"); + return -EINVAL; + } + + max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + if (max_frame > NE6X_MAX_RXBUFFER) { + netdev_err(netdev, "mtu > MAX MTU size"); + return -EINVAL; + } + + netdev_info(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + return 0; +} + +/** + * ne6xvf_find_vlan - Search filter list for specific vlan filter + * @vsi: board private structure + * @vlan: vlan tag + * + * Returns ptr to the filter object or NULL. Must be called while holding the + * mac_vlan_list_lock. + **/ +static struct ne6xvf_vlan_filter *ne6xvf_find_vlan(struct ne6xvf_adapter *adapter, + struct ne6x_vf_vlan vlan) +{ + struct ne6xvf_vlan_filter *f; + + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (f->vlan.vid == vlan.vid && f->vlan.tpid == vlan.tpid) + return f; + } + + return NULL; +} + +/** + * ne6xvf_add_vlan - Add a vlan filter to the list + * @adapter: board private structure + * @vlan: VLAN tag + * + * Returns ptr to the filter object or NULL when no memory available. + **/ +struct ne6xvf_vlan_filter *ne6xvf_add_vlan_list(struct ne6xvf_adapter *adapter, + struct ne6x_vf_vlan vlan) +{ + struct ne6xvf_vlan_filter *f = NULL; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + f = ne6xvf_find_vlan(adapter, vlan); + if (!f) { + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + goto clearout; + + f->vlan = vlan; + + list_add_tail(&f->list, &adapter->vlan_filter_list); + f->add = true; + } + +clearout: + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return f; +} + +/** + * ne6xvf_del_vlan - Remove a vlan filter from the list + * @adapter: board private structure + * @vlan: VLAN tag + **/ +void ne6xvf_del_vlan_list(struct ne6xvf_adapter *adapter, struct ne6x_vf_vlan vlan) +{ + struct ne6xvf_vlan_filter *f; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + f = ne6xvf_find_vlan(adapter, vlan); + if (f) { + list_del(&f->list); + kfree(f); + } + + spin_unlock_bh(&adapter->mac_vlan_list_lock); +} + +/** + * ne6xvf_add_vlan - Add a vlan filter to the list + * @adapter: board private structure + * @vlan: VLAN tag + * + * Returns ptr to the filter object or NULL when no memory available. + **/ +static struct ne6xvf_vlan_filter *ne6xvf_add_vlan(struct ne6xvf_adapter *adapter, + struct ne6x_vf_vlan vlan) +{ + struct ne6xvf_vlan_filter *f = NULL; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + f = ne6xvf_find_vlan(adapter, vlan); + if (!f) { + f = kzalloc(sizeof(*f), GFP_ATOMIC); + if (!f) + goto clearout; + + f->vlan = vlan; + + list_add_tail(&f->list, &adapter->vlan_filter_list); + f->add = true; + adapter->aq_required |= NE6XVF_FLAG_AQ_ADD_VLAN_FILTER; + } + +clearout: + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return f; +} + +/** + * ne6xvf_del_vlan - Remove a vlan filter from the list + * @adapter: board private structure + * @vlan: VLAN tag + **/ +static void ne6xvf_del_vlan(struct ne6xvf_adapter *adapter, struct ne6x_vf_vlan vlan) +{ + struct ne6xvf_vlan_filter *f; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + f = ne6xvf_find_vlan(adapter, vlan); + if (f) { + f->remove = true; + adapter->aq_required |= NE6XVF_FLAG_AQ_DEL_VLAN_FILTER; + } + + spin_unlock_bh(&adapter->mac_vlan_list_lock); +} + +static int ne6xvf_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + struct ne6x_vf_vlan vlan; + + netdev_info(netdev, "%s:%d: proto:%04x vid:%d\n", __func__, __LINE__, + be16_to_cpu(proto), vid); + vlan = NE6X_VF_VLAN(vid, be16_to_cpu(proto)); + + if (!vid) + return 0; + + if (!ne6xvf_add_vlan(adapter, vlan)) + return -ENOMEM; + + mod_delayed_work(ne6xvf_wq, &adapter->watchdog_task, 0); + + return 0; +} + +static int ne6xvf_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + struct ne6x_vf_vlan vlan; + + netdev_info(netdev, "%s:%d: proto:%04x vid:%d\n", __func__, __LINE__, + be16_to_cpu(proto), vid); + vlan = NE6X_VF_VLAN(vid, be16_to_cpu(proto)); + + ne6xvf_del_vlan(adapter, vlan); + mod_delayed_work(ne6xvf_wq, &adapter->watchdog_task, 0); + + return 0; +} + +/** + *__ne6xvf_setup_tc - configure multiple traffic classes + * @netdev: network interface device structure + * @type_data: tc offload data + * + * This function processes the config information provided by the + * user to configure traffic classes/queue channels and packages the + * information to request the PF to setup traffic classes. + * + * Returns 0 on success. + **/ +static int __ne6xvf_setup_tc(struct net_device *netdev, void *type_data) +{ + return 0; +} + +/** + * ne6xvf_setup_tc - configure multiple traffic classes + * @dev: network interface device structure + * @type: type of offload + * @type_data: tc offload data + * + * This function is the callback to ndo_setup_tc in the + * netdev_ops. + * + * Returns 0 on success + **/ +static int ne6xvf_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) +{ + return __ne6xvf_setup_tc(dev, type_data); +} + +/** + * ne6xvf_features_check - Validate encapsulated packet conforms to limits + * @skb: skb buff + * @dev: This physical port's netdev + * @features: Offload features that the stack believes apply + **/ +static netdev_features_t ne6xvf_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + size_t len; + + /* No point in doing any of this if neither checksum nor GSO are + * being requested for this frame. We can rule out both by just + * checking for CHECKSUM_PARTIAL + */ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return features; + + /* We cannot support GSO if the MSS is going to be less than + * 64 bytes. If it is then we need to drop support for GSO. + */ + if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) + features &= ~NETIF_F_GSO_MASK; + + /* MACLEN can support at most 63 words */ + len = skb_network_header(skb) - skb->data; + if (len & ~(63 * 2)) + goto out_err; + + /* IPLEN and EIPLEN can support at most 127 dwords */ + len = skb_transport_header(skb) - skb_network_header(skb); + if (len & ~(127 * 4)) + goto out_err; + + /* No need to validate L4LEN as TCP is the only protocol with a + * a flexible value and we support all possible values supported + * by TCP, which is at most 15 dwords + */ + + return features; + +out_err: + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); +} + +/** + * ne6xvf_fwd_add_macvlan - Configure MACVLAN interface + * @netdev: Main net device to configure + * @vdev: MACVLAN subordinate device + */ +static void *ne6xvf_fwd_add_macvlan(struct net_device *netdev, struct net_device *vdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + struct ne6x_macvlan *mv = NULL; + u8 mac[ETH_ALEN]; + + ether_addr_copy(mac, vdev->dev_addr); + mv = devm_kzalloc(&adapter->pdev->dev, sizeof(*mv), GFP_KERNEL); + if (!mv) + return NULL; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + ne6xvf_addr_sync(netdev, mac); + spin_unlock_bh(&adapter->mac_vlan_list_lock); + INIT_LIST_HEAD(&mv->list); + mv->vdev = vdev; + ether_addr_copy(mv->mac, mac); + list_add(&mv->list, &adapter->macvlan_list); + netdev_info(netdev, "MACVLAN offloads for %s are on\n", vdev->name); + + return mv; +} + +/** + * ne6xvf_fwd_del_macvlan - Delete MACVLAN interface resources + * @netdev: Main net device + * @accel_priv: MACVLAN sub ordinate device + */ +static void ne6xvf_fwd_del_macvlan(struct net_device *netdev, void *accel_priv) +{ + struct ne6x_macvlan *mv = (struct ne6x_macvlan *)accel_priv; + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + + if (!accel_priv) + return; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + ne6xvf_addr_unsync(netdev, mv->mac); + spin_unlock_bh(&adapter->mac_vlan_list_lock); + list_del(&mv->list); + devm_kfree(&adapter->pdev->dev, mv); + + netdev_info(netdev, "MACVLAN offloads for %s are off\n", mv->vdev->name); +} + +static const struct net_device_ops ne6xvf_netdev_ops = { + .ndo_open = ne6xvf_open, + .ndo_stop = ne6xvf_close, + .ndo_start_xmit = ne6xvf_lan_xmit_frame, + .ndo_get_stats = ne6xvf_get_stats, + .ndo_set_rx_mode = ne6xvf_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = ne6xvf_set_mac, + .ndo_do_ioctl = ne6xvf_do_ioctl, + .ndo_change_mtu = ne6xvf_change_mtu, + .ndo_tx_timeout = ne6xvf_tx_timeout, + + .ndo_vlan_rx_add_vid = ne6xvf_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ne6xvf_vlan_rx_kill_vid, + + .ndo_vlan_rx_add_vid = ne6xvf_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ne6xvf_vlan_rx_kill_vid, + + .ndo_setup_tc = ne6xvf_setup_tc, + .ndo_features_check = ne6xvf_features_check, + + .ndo_dfwd_add_station = ne6xvf_fwd_add_macvlan, + .ndo_dfwd_del_station = ne6xvf_fwd_del_macvlan, + + .ndo_fix_features = ne6xvf_fix_features, + .ndo_set_features = ne6xvf_set_features, +}; + +static int ne6xvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct ne6xvf_adapter *adapter = NULL; + struct ne6xvf_hw *hw = NULL; + struct net_device *netdev; + char name[IFNAMSIZ] = {0}; + int err; + + err = pci_enable_device(pdev); + if (err) + return err; + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); + if (err) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err); + goto err_dma; + } + } + + err = pci_request_regions(pdev, ne6xvf_driver_name); + if (err) { + dev_err(pci_dev_to_dev(pdev), "pci_request_regions failed 0x%x\n", err); + goto err_pci_reg; + } + + pci_set_master(pdev); + + sprintf(name, "enp%ds%df%d", pdev->bus->number, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + + netdev = alloc_netdev_mq(sizeof(struct ne6xvf_adapter), name, NET_NAME_USER, ether_setup, + NE6XVF_MAX_REQ_QUEUES); + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + + adapter->netdev = netdev; + adapter->pdev = pdev; + + hw = &adapter->hw; + hw->back = adapter; + + ne6xvf_change_state(adapter, __NE6XVF_STARTUP); + + pci_save_state(pdev); + + hw->hw_addr0 = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); + hw->hw_addr2 = ioremap(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2)); + + if (!hw->hw_addr0 || !hw->hw_addr2) { + err = -EIO; + goto err_ioremap; + } + + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + hw->bus.device = PCI_SLOT(pdev->devfn); + hw->bus.func = PCI_FUNC(pdev->devfn); + hw->bus.bus_id = pdev->bus->number; + + ne6xvf_init_spinlock(&hw->mbx.mbx_spinlock); + spin_lock_init(&adapter->mac_vlan_list_lock); + + INIT_LIST_HEAD(&adapter->mac_filter_list); + INIT_LIST_HEAD(&adapter->vlan_filter_list); + INIT_LIST_HEAD(&adapter->macvlan_list); + + INIT_WORK(&adapter->sdk_task, ne6xvf_sdk_task); + INIT_DELAYED_WORK(&adapter->watchdog_task, ne6xvf_watchdog_task); + + init_waitqueue_head(&adapter->down_waitqueue); + init_waitqueue_head(&adapter->vc_waitqueue); + + ne6xvf_startup(adapter); + if (ne6xvf_init_get_resources(adapter)) { + err = -EIO; + goto err_ioremap; + } + + adapter->aq_required = 0; + ne6xvf_init_process_extended_caps(adapter); + ne6xvf_init_config_adapter(adapter); + + queue_delayed_work(ne6xvf_wq, &adapter->watchdog_task, + msecs_to_jiffies(5 * (pdev->devfn & 0x07))); + + ne6xvf_dbg_pf_init(adapter); + + hw->debug_mask = 0xffffffff; + return 0; +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * ne6xvf_irq_enable_queues - Enable interrupt for specified queues + * @adapter: board private structure + * @mask: bitmap of queues to enable + **/ +void ne6xvf_irq_enable_queues(struct ne6xvf_adapter *adapter, u32 mask) +{ + struct ne6xvf_hw *hw = &adapter->hw; + int i; + + for (i = 0; i < adapter->num_msix_vectors; i++) + wr64(hw, NE6XVF_REG_ADDR(i, NE6X_VP_INT_MASK), ~(1ULL << NE6X_VP_CQ_INTSHIFT)); +} + +/** + * ne6xvf_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + * @flush: boolean value whether to run rd32() + **/ +void ne6xvf_irq_enable(struct ne6xvf_adapter *adapter, bool flush) +{ + ne6xvf_irq_enable_queues(adapter, ~0); +} + +void ne6xvf_free_all_tg_resources(struct ne6xvf_adapter *adapter) +{ + int i; + + if (!adapter->tg_rings) + return; + + for (i = 0; i < adapter->num_active_queues; i++) + if (adapter->tg_rings[i].desc) { + struct ne6x_ring *tg_ring = &adapter->tg_rings[i]; + /* Zero out the descriptor ring */ + memset(tg_ring->desc, 0, tg_ring->size); + tg_ring->next_to_use = 0; + tg_ring->next_to_clean = 0; + + if (!tg_ring->netdev) + return; + + dma_free_coherent(tg_ring->dev, tg_ring->size, tg_ring->desc, tg_ring->dma); + tg_ring->desc = NULL; + } +} + +void ne6xvf_free_all_cq_resources(struct ne6xvf_adapter *adapter) +{ + int i; + + if (!adapter->cq_rings) + return; + + for (i = 0; i < adapter->num_active_queues; i++) + if (adapter->cq_rings[i].desc) { + struct ne6x_ring *cq_ring = &adapter->cq_rings[i]; + /* Zero out the descriptor ring */ + memset(cq_ring->desc, 0, cq_ring->size); + cq_ring->next_to_use = 0; + cq_ring->next_to_clean = 0; + + if (!cq_ring->netdev) + return; + + dma_free_coherent(cq_ring->dev, cq_ring->size, cq_ring->desc, cq_ring->dma); + cq_ring->desc = NULL; + } +} + +void ne6xvf_free_all_tx_resources(struct ne6xvf_adapter *adapter) +{ + unsigned long bi_size; + int i, idx; + + if (!adapter->tx_rings) + return; + + for (i = 0; i < adapter->num_active_queues; i++) + if (adapter->tx_rings[i].desc) { + struct ne6x_ring *tx_ring = &adapter->tx_rings[i]; + + /* ring already cleared, nothing to do */ + if (tx_ring->tx_buf) { + /* Free all the Tx ring sk_buffs */ + for (idx = 0; idx < tx_ring->count; idx++) + ne6xvf_unmap_and_free_tx_resource(tx_ring, + &tx_ring->tx_buf[idx]); + + bi_size = sizeof(struct ne6x_tx_buf) * tx_ring->count; + memset(tx_ring->tx_buf, 0, bi_size); + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->cq_last_expect = 0; + + if (tx_ring->netdev) + /* cleanup Tx queue statistics */ + netdev_tx_reset_queue(txring_txq(tx_ring)); + } + + kfree(tx_ring->tx_buf); + tx_ring->tx_buf = NULL; + dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, tx_ring->dma); + tx_ring->desc = NULL; + kfree(tx_ring->sgl); + } +} + +void ne6xvf_free_all_rx_resources(struct ne6xvf_adapter *adapter) +{ + unsigned long bi_size; + int i, idx; + + if (!adapter->rx_rings) + return; + + for (i = 0; i < adapter->num_active_queues; i++) + if (adapter->rx_rings[i].desc) { + struct ne6x_ring *rx_ring = &adapter->rx_rings[i]; + /* ring already cleared, nothing to do */ + if (rx_ring->rx_buf) { + if (rx_ring->skb) { + dev_kfree_skb(rx_ring->skb); + rx_ring->skb = NULL; + } + + /* Free all the Rx ring sk_buffs */ + for (idx = 0; idx < rx_ring->count; idx++) { + struct ne6x_rx_buf *rx_bi = &rx_ring->rx_buf[idx]; + + if (!rx_bi->page) + continue; + + /* Invalidate cache lines that may have been written to by + * device so that we avoid corrupting memory. + */ + dma_sync_single_range_for_cpu(rx_ring->dev, rx_bi->dma, + rx_bi->page_offset, + rx_ring->rx_buf_len, + DMA_FROM_DEVICE); + + /* free resources associated with mapping */ + dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, + ne6x_rx_pg_size(rx_ring), + DMA_FROM_DEVICE, NE6X_RX_DMA_ATTR); + + __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias); + + rx_bi->page = NULL; + rx_bi->page_offset = 0; + } + + bi_size = sizeof(struct ne6x_rx_buf) * rx_ring->count; + memset(rx_ring->rx_buf, 0, bi_size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + rx_ring->cq_last_expect = 0; + } + + kfree(rx_ring->rx_buf); + rx_ring->rx_buf = NULL; + + if (rx_ring->desc) { + dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + rx_ring->desc = NULL; + } + } +} + +void ne6xvf_reset_interrupt_capability(struct ne6xvf_adapter *adapter) +{ + if (!adapter->msix_entries) + return; + + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; +} + +static void ne6xvf_remove(struct pci_dev *pdev) +{ + struct ne6xvf_adapter *adapter = ne6xvf_pdev_to_adapter(pdev); + struct net_device *netdev = adapter->netdev; + struct ne6xvf_vlan_filter *vlf, *vlftmp; + struct ne6xvf_hw *hw = &adapter->hw; + struct ne6xvf_mac_filter *f, *ftmp; + struct ne6x_macvlan *mv, *mv_tmp; + int i; + + ne6xvf_dbg_pf_exit(adapter); + + set_bit(__NE6XVF_IN_REMOVE_TASK, &adapter->crit_section); + cancel_work_sync(&adapter->sdk_task); + cancel_delayed_work_sync(&adapter->watchdog_task); + + if (adapter->netdev_registered) { + /* This will call ne6xvf_close if the device was open previously. + * The Admin Queue and watchdog tasks have already been shut + * down at this point so the driver will rely on + * ne6xvf_request_reset below to disable the queues and handle + * any other Admin Queue-based cleanup normally done as part of + * ne6xvf_close. + */ + unregister_netdev(netdev); + adapter->netdev_registered = false; + } + + dev_info(&adapter->pdev->dev, "Removing device\n"); + + /* Shut down all the garbage mashers on the detention level */ + ne6xvf_change_state(adapter, __NE6XVF_REMOVE); + adapter->flags &= ~NE6XVF_FLAG_REINIT_ITR_NEEDED; + + ne6xvf_request_reset(adapter); + + for (i = 0; i < adapter->num_active_queues; i++) + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(i, NE6X_VP_RELOAD), 0x1); + + ne6xvf_free_all_tg_resources(adapter); + ne6xvf_free_all_cq_resources(adapter); + ne6xvf_free_all_tx_resources(adapter); + ne6xvf_free_all_rx_resources(adapter); + + if (adapter->last_state == __NE6XVF_RESETTING || + (adapter->last_state == __NE6XVF_RUNNING && !(netdev->flags & IFF_UP))) + ne6xvf_free_traffic_irqs(adapter); + + ne6xvf_reset_interrupt_capability(adapter); + ne6xvf_free_q_vectors(adapter); + + ne6xvf_destroy_spinlock(&hw->mbx.mbx_spinlock); + + spin_lock_bh(&adapter->mac_vlan_list_lock); + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { + list_del(&f->list); + kfree(f); + } + + /* release vsi vlan list resource */ + list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list, list) { + list_del(&vlf->list); + kfree(vlf); + } + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + list_for_each_entry_safe(mv, mv_tmp, &adapter->macvlan_list, list) + ne6xvf_fwd_del_macvlan(netdev, mv); + + iounmap(hw->hw_addr0); + iounmap(hw->hw_addr2); + pci_release_regions(pdev); + + ne6xvf_free_queues(adapter); + kfree(adapter->vf_res); + adapter->vf_res = NULL; + + free_netdev(netdev); + + pci_disable_device(pdev); +} + +static struct pci_driver ne6xvf_driver = { + .name = ne6xvf_driver_name, + .id_table = ne6xvf_pci_tbl, + .probe = ne6xvf_probe, + .remove = ne6xvf_remove, +}; + +static int __init ne6xvf_init_module(void) +{ + int ret; + + pr_info("navf: %s - version %s\n", ne6xvf_driver_string, ne6xvf_driver_version); + + pr_info("%s\n", ne6xvf_copyright); + + ne6xvf_wq = create_singlethread_workqueue(ne6xvf_driver_name); + if (!ne6xvf_wq) { + pr_err("%s: Failed to create workqueue\n", ne6xvf_driver_name); + return -ENOMEM; + } + + ne6xvf_dbg_init(); + + ret = pci_register_driver(&ne6xvf_driver); + + return ret; +} + +module_init(ne6xvf_init_module); + +/** + * ne6xvf_exit_module - Driver Exit Cleanup Routine + * + * ne6xvf_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit ne6xvf_exit_module(void) +{ + pci_unregister_driver(&ne6xvf_driver); + destroy_workqueue(ne6xvf_wq); + ne6xvf_dbg_exit(); +} + +module_exit(ne6xvf_exit_module); diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_osdep.h b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_osdep.h new file mode 100644 index 000000000000..8f74f7984049 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_osdep.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6XVF_OSDEP_H +#define _NE6XVF_OSDEP_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +inline void ne6xvf_init_spinlock_d(struct ne6xvf_spinlock *sp); +void ne6xvf_destroy_spinlock_d(struct ne6xvf_spinlock *sp); +void ne6xvf_acquire_spinlock_d(struct ne6xvf_spinlock *sp); +void ne6xvf_release_spinlock_d(struct ne6xvf_spinlock *sp); + +#endif /* _NE6XVF_OSDEP_H */ diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_txrx.c b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_txrx.c new file mode 100644 index 000000000000..7ba4a802d5b7 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_txrx.c @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "ne6xvf.h" +#include "ne6xvf_txrx.h" + +/** + * ne6xvf_update_enable_itr - Update itr and re-enable MSIX interrupt + * @vsi: the VSI we care about + * @q_vector: q_vector for which itr is being updated and interrupt enabled + * + **/ +static inline void ne6xvf_update_enable_itr(struct ne6x_q_vector *q_vector) +{ + struct ne6xvf_adapter *adpt = (struct ne6xvf_adapter *)q_vector->adpt; + struct ne6xvf_hw *hw = &adpt->hw; + + if (!test_bit(NE6X_ADPT_DOWN, adpt->comm.state)) { + struct ne6x_ring *cq_ring = NULL; + + cq_ring = q_vector->cq.ring; + if (cq_ring->next_to_clean != cq_ring->next_to_use) { + cq_ring->next_to_clean = cq_ring->next_to_use; + /* memory barrier updating cq ring tail */ + wmb(); + writeq(cq_ring->next_to_clean, cq_ring->tail); + } + + wr64(hw, NE6XVF_REG_ADDR(q_vector->reg_idx, NE6X_VP_INT), + (1ULL << NE6X_VP_CQ_INTSHIFT)); + wr64(hw, NE6XVF_REG_ADDR(q_vector->reg_idx, NE6X_VP_INT_MASK), + ~(1ULL << NE6X_VP_CQ_INTSHIFT)); + } +} + +/** + * ne6xvf_unmap_and_free_tx_resource - Release a Tx buffer + * @ring: the ring that owns the buffer + * @tx_buffer: the buffer to free + **/ +void ne6xvf_unmap_and_free_tx_resource(struct ne6x_ring *ring, struct ne6x_tx_buf *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); +} + +/** + * ne6xvf_napi_poll - NAPI polling Rx/Tx cleanup routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * This function will clean all queues associated with a q_vector. + * + * Returns the amount of work done + **/ +int ne6xvf_napi_poll(struct napi_struct *napi, int budget) +{ + struct ne6x_q_vector *q_vector = container_of(napi, struct ne6x_q_vector, napi); + struct ne6x_adapt_comm *comm = (struct ne6x_adapt_comm *)q_vector->adpt; + struct ne6x_ring *ring = NULL; + bool clean_complete = true; + int cq_budget = 16; + int work_done = 0; + int cleaned = 0; + + ring = q_vector->cq.ring; + + if (test_bit(NE6X_ADPT_DOWN, comm->state)) { + napi_complete(napi); + return 0; + } + + cleaned = ne6x_clean_cq_irq(q_vector, ring, cq_budget); + if (cleaned >= cq_budget) + clean_complete = false; + + ring = q_vector->tx.ring; + if (!ne6x_clean_tx_irq(comm, ring, budget)) + clean_complete = false; + + /* Handle case where we are called by netpoll with a budget of 0 */ + if (budget <= 0) + goto tx_only; + + ring = q_vector->rx.ring; + cleaned = ne6x_clean_rx_irq(ring, budget); + if (cleaned >= budget) + clean_complete = false; + + work_done += cleaned; + + /* If work not completed, return budget and polling will return */ + if (!clean_complete) { + int cpu_id = smp_processor_id(); + + /* It is possible that the interrupt affinity has changed but, + * if the cpu is pegged at 100%, polling will never exit while + * traffic continues and the interrupt will be stuck on this + * cpu. We check to make sure affinity is correct before we + * continue to poll, otherwise we must stop polling so the + * interrupt can move to the correct cpu. + */ + if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { + /* Tell napi that we are done polling */ + napi_complete_done(napi, work_done); + ne6xvf_update_enable_itr(q_vector); + /* Return budget-1 so that polling stops */ + return budget - 1; + } +tx_only: + return budget; + } + + /* Work is done so exit the polling mode and re-enable the interrupt */ + napi_complete_done(napi, work_done); + ne6xvf_update_enable_itr(q_vector); + + return min(work_done, budget - 1); +} + +netdev_tx_t ne6xvf_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct ne6xvf_adapter *adapter = netdev_priv(netdev); + struct ne6x_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping]; + struct ne6x_ring *tag_ring = &adapter->tg_rings[skb->queue_mapping]; + struct sk_buff *trailer; + int tailen, nsg; + bool jumbo_frame = true; + + tailen = 4; + + if (skb_put_padto(skb, NE6X_MIN_TX_LEN)) + return NETDEV_TX_OK; + + if (skb->len < NE6X_MAX_DATA_PER_TXD) { + nsg = skb_cow_data(skb, tailen, &trailer); + if (unlikely(nsg < 0)) { + netdev_err(netdev, "TX: skb_cow_data() returned %d\n", nsg); + return nsg; + } + + pskb_put(skb, trailer, tailen); + jumbo_frame = false; + } + + if (netdev->gso_max_size < skb->len) + netdev_err(netdev, "%s: skb->len = %d > 15360\n", __func__, skb->len); + + return ne6x_xmit_frame_ring(skb, tx_ring, tag_ring, jumbo_frame); +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_txrx.h b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_txrx.h new file mode 100644 index 000000000000..0a10c04862a2 --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_txrx.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6XVF_TXRX_H +#define _NE6XVF_TXRX_H + +void ne6xvf_unmap_and_free_tx_resource(struct ne6x_ring *ring, struct ne6x_tx_buf *tx_buffer); +int ne6xvf_napi_poll(struct napi_struct *napi, int budget); +netdev_tx_t ne6xvf_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); + +#endif diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_virtchnl.c b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_virtchnl.c new file mode 100644 index 000000000000..e504254e212c --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_virtchnl.c @@ -0,0 +1,1125 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#include "ne6xvf.h" +#include "ne6xvf_osdep.h" + +int ne6xvf_sdk_send_msg_to_pf(struct ne6xvf_hw *hw, enum virtchnl_ops v_opcode, + enum virtchnl_status_code v_retval, u8 *msg, u16 msglen, + void *cmd_details) +{ + union u_ne6x_mbx_snap_buffer_data mbx_buffer; + + ne6xvf_acquire_spinlock(&hw->mbx.mbx_spinlock); + + mbx_buffer.snap.data[0] = 0; + mbx_buffer.snap.data[1] = 0; + mbx_buffer.snap.data[2] = 0; + mbx_buffer.snap.data[3] = 0; + mbx_buffer.snap.data[4] = 0; + mbx_buffer.snap.data[5] = 0; + + if (msglen) { + if (msglen > NE6XVF_SDK_LARGE_BUF) { + ne6xvf_release_spinlock(&hw->mbx.mbx_spinlock); + return NE6XVF_ERR_INVALID_SIZE; + } + + memcpy(mbx_buffer.snap.data, msg, msglen); + } + + mbx_buffer.snap.len = msglen; + mbx_buffer.snap.type = v_opcode; + mbx_buffer.snap.state = v_retval; + + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6XVF_MAILBOX_DATA), mbx_buffer.val); + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6XVF_DB_STATE), 0x2); + + ne6xvf_release_spinlock(&hw->mbx.mbx_spinlock); + + return 0; +} + +int ne6xvf_send_pf_msg(struct ne6xvf_adapter *adapter, enum virtchnl_ops op, u8 *msg, u16 len) +{ + struct ne6xvf_hw *hw = &adapter->hw; + int err; + + if (adapter->flags & NE6XVF_FLAG_PF_COMMS_FAILED) + return 0; /* nothing to see here, move along */ + + err = ne6xvf_sdk_send_msg_to_pf(hw, op, VIRTCHNL_STATUS_SUCCESS, msg, len, NULL); + if (err) + dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %d, sdk_err %s\n", + op, err, hw->err_str); + + return err; +} + +/** + * ne6xvf_clean_arq_element + * @hw: pointer to the hw struct + * @e: event info from the receive descriptor, includes any buffers + * @pending: number of events that could be left to process + * + * This function cleans one Admin Receive Queue element and returns + * the contents through e. It can also return how many events are + * left to process through 'pending' + **/ +enum ne6xvf_status ne6xvf_clean_arq_element(struct ne6xvf_hw *hw, struct ne6xvf_arq_event_info *e, + u16 *pending) +{ + union u_ne6x_mbx_snap_buffer_data usnap; + enum ne6xvf_status ret_code = 0; + u64 val; + int i; + + ne6xvf_acquire_spinlock(&hw->mbx.mbx_spinlock); + val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(0, NE6X_VP_INT)); + if (val & 0x1) + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6X_VP_INT), 0x1); + + if (!(val & 0x2)) { + ne6xvf_release_spinlock(&hw->mbx.mbx_spinlock); + return NE6XVF_ERR_NOT_READY; + } + + usnap.val = NE6XVF_READ_REG(hw, NE6XVF_REG_ADDR(0, NE6XVF_PF_MAILBOX_DATA)); + e->msg_len = min_t(u16, (u16)usnap.snap.len, e->buf_len); + if (e->msg_buf && e->msg_len != 0) { + for (i = 0; i < e->msg_len && i < NE6XVF_SDK_LARGE_BUF; i++) { + e->msg_buf[i] = usnap.snap.data[i]; + e->snap.data[i] = usnap.snap.data[i]; + } + } + + e->snap.type = usnap.snap.type; + e->snap.state = usnap.snap.state; + + if (pending) + *pending = 0; + + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6X_VP_INT), 0x2); + NE6XVF_WRITE_REG(hw, NE6XVF_REG_ADDR(0, NE6XVF_DB_STATE), 0x1); + + ne6xvf_release_spinlock(&hw->mbx.mbx_spinlock); + return ret_code; +} + +/** + * ne6xvf_poll_virtchnl_msg - poll for virtchnl msg matching the requested_op + * @adapter: adapter structure + * @event: event to populate on success + * @op_to_poll: requested virtchnl op to poll for + */ +int ne6xvf_poll_virtchnl_msg(struct ne6xvf_adapter *adapter, struct ne6xvf_arq_event_info *event, + enum virtchnl_ops op_to_poll) +{ + struct ne6xvf_arq_event_info rece_event; + struct ne6xvf_hw *hw = &adapter->hw; + enum ne6xvf_status status, v_ret; + enum virtchnl_ops received_op; + int timeout = 50000; + int i; + + rece_event.buf_len = NE6XVF_MAX_AQ_BUF_SIZE; + rece_event.msg_buf = kzalloc(rece_event.buf_len, GFP_KERNEL); + if (!rece_event.msg_buf) + return NE6XVF_ERR_NO_MEMORY; + + while (1) { + /* When the SDK is empty, ne6xvf_clean_arq_element will return + * nonzero and this loop will terminate. + */ + status = ne6xvf_clean_arq_element(hw, &rece_event, NULL); + if (status) { + if (status == NE6XVF_ERR_NOT_READY && timeout) { + usleep_range(10, 12); + timeout--; + continue; + } + kfree(rece_event.msg_buf); + return status; + } + + received_op = (enum virtchnl_ops)le32_to_cpu(rece_event.snap.type); + v_ret = (enum ne6xvf_status)le32_to_cpu(rece_event.snap.state); + if (op_to_poll == received_op) { + memcpy(&event->snap, &rece_event.snap, + sizeof(struct ne6x_mbx_snap_buffer_data)); + event->msg_len = min(rece_event.msg_len, event->buf_len); + if (event->msg_buf) { + for (i = 0; i < event->msg_len && i < NE6XVF_SDK_LARGE_BUF; i++) + event->msg_buf[i] = rece_event.msg_buf[i]; + } + break; + } + + ne6xvf_virtchnl_completion(adapter, received_op, v_ret, rece_event.msg_buf, + rece_event.msg_len); + } + + kfree(rece_event.msg_buf); + status = (enum ne6xvf_status)le32_to_cpu(event->snap.state); + + return status; +} + +int ne6xvf_request_reset(struct ne6xvf_adapter *adapter) +{ + int status; + + if (!adapter->vf_res) + return 0; + /* Don't check CURRENT_OP - this is always higher priority */ + status = ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, + &adapter->vf_res->vsi_res[0].default_mac_addr[0], 6); + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + return status; +} + +int ne6xvf_send_api_ver(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event = {.buf_len = 0, .msg_buf = NULL}; + struct ne6xvf_virtchnl_version_info vvi; + + vvi.major = NE6XVF_VIRTCHNL_VERSION_MAJOR; + vvi.minor = NE6XVF_VIRTCHNL_VERSION_MINOR; + + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_VERSION, (u8 *)&vvi, sizeof(vvi)); + usleep_range(10, 12); + return ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_VERSION); +} + +/** + * ne6xvf_vf_parse_hw_config + * @hw: pointer to the hardware structure + * @msg: pointer to the virtual channel VF resource structure + * + * Given a VF resource message from the PF, populate the hw struct + * with appropriate information. + **/ +void ne6xvf_vf_parse_hw_config(struct ne6xvf_hw *hw, struct virtchnl_vf_resource *msg) +{ + struct virtchnl_vsi_resource *vsi_res; + int i; + + vsi_res = &msg->vsi_res[0]; + + hw->dev_caps.num_vsis = msg->num_vsis; + hw->dev_caps.num_rx_qp = msg->num_queue_pairs; + hw->dev_caps.num_tx_qp = msg->num_queue_pairs; + hw->dev_caps.num_msix_vectors_vf = msg->max_vectors; + + hw->dev_caps.max_mtu = msg->max_mtu; + for (i = 0; i < msg->num_vsis; i++) { + if (vsi_res->vsi_type == NE6XVF_VIRTCHNL_VSI_SRIOV) { + ether_addr_copy(hw->mac.perm_addr, vsi_res->default_mac_addr); + ether_addr_copy(hw->mac.addr, vsi_res->default_mac_addr); + } + vsi_res++; + } +} + +/** + * ne6xvf_get_vf_config + * @adapter: private adapter structure + * + * Get VF configuration from PF and populate hw structure. Must be called after + * admin queue is initialized. Busy waits until response is received from PF, + * with maximum timeout. Response from PF is returned in the buffer for further + * processing by the caller. + **/ +int ne6xvf_get_vf_config(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_hw *hw = &adapter->hw; + struct ne6xvf_arq_event_info event; + int err; + + event.buf_len = sizeof(struct ne6x_mbx_snap_buffer_data); + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); + if (!event.msg_buf) + return -ENOMEM; + + err = ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_GET_VF_RESOURCES); + + hw->dev_caps.vf_id = event.msg_buf[0]; + hw->dev_caps.chip_id = 0x0; + hw->dev_caps.lport = event.msg_buf[1]; + hw->dev_caps.mac_id = event.msg_buf[2]; + hw->dev_caps.base_queue = event.msg_buf[3]; + hw->dev_caps.num_vf_per_pf = event.msg_buf[5]; + adapter->vf_res->num_vsis = 0x1; + adapter->vf_res->num_queue_pairs = event.msg_buf[4]; + adapter->vf_res->max_vectors = event.msg_buf[4]; + adapter->vf_res->vsi_res[0].vsi_type = NE6XVF_VIRTCHNL_VSI_SRIOV; + + adapter->comm.port_info = hw->dev_caps.lport | (hw->dev_caps.vf_id << 8); + + dev_info(&adapter->pdev->dev, "vf %d Get Resource [ lport: %d, mac_id: %d, base: %d, queue: %d, err = %d]\n", + hw->dev_caps.vf_id, hw->dev_caps.lport, hw->dev_caps.mac_id, + hw->dev_caps.base_queue, adapter->vf_res->num_queue_pairs, err); + + ne6xvf_vf_parse_hw_config(hw, adapter->vf_res); + + return err; +} + +int ne6xvf_config_default_vlan(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event; + struct ne6x_vf_vlan vlan; + + adapter->current_op = VIRTCHNL_OP_ADD_VLAN; + + event.buf_len = 0; + event.msg_buf = NULL; + + vlan = NE6X_VF_VLAN(0xfff, ETH_P_8021Q); + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)&vlan, sizeof(struct ne6x_vf_vlan)); + ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_ADD_VLAN); + + return 0; +} + +/** + * ne6xvf_send_vf_config_msg + * @adapter: adapter structure + * + * Send VF configuration request admin queue message to the PF. The reply + * is not checked in this function. Returns 0 if the message was + * successfully sent, or one of the NE6XVF_ADMIN_QUEUE_ERROR_ statuses if not. + **/ +int ne6xvf_send_vf_config_msg(struct ne6xvf_adapter *adapter, bool b_init) +{ + u8 mac_addr[ETH_ALEN]; + + adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES; + adapter->aq_required &= ~NE6XVF_FLAG_AQ_GET_CONFIG; + if (b_init) { + eth_random_addr(mac_addr); + mac_addr[0] = 0x02; + mac_addr[1] = 0x31; + mac_addr[2] = 0x3a; + } else { + memcpy(mac_addr, adapter->vf_res->vsi_res[0].default_mac_addr, 6); + } + + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_RESOURCES, mac_addr, 6); + + /* mac addr need get for PF */ + adapter->vf_res->vsi_res[0].default_mac_addr[0] = mac_addr[0]; + adapter->vf_res->vsi_res[0].default_mac_addr[1] = mac_addr[1]; + adapter->vf_res->vsi_res[0].default_mac_addr[2] = mac_addr[2]; + adapter->vf_res->vsi_res[0].default_mac_addr[3] = mac_addr[3]; + adapter->vf_res->vsi_res[0].default_mac_addr[4] = mac_addr[4]; + adapter->vf_res->vsi_res[0].default_mac_addr[5] = mac_addr[5]; + adapter->vf_res->vsi_res[0].vsi_type = NE6XVF_VIRTCHNL_VSI_SRIOV; + + return 0; +} + +int ne6xvf_send_vf_offload_msg(struct ne6xvf_adapter *adapter) +{ + adapter->current_op = VIRTCHNL_OP_CONFIG_OFFLOAD; + adapter->aq_required &= ~NE6XVF_FLAG_AQ_CONFIGURE_HW_OFFLOAD; + dev_info(&adapter->pdev->dev, "adapter->hw_feature = 0x%08X\n", adapter->hw_feature); + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_OFFLOAD, (u8 *)&adapter->hw_feature, 4); + + return 0; +} + +void ne6xvf_config_rss_info(struct ne6xvf_adapter *adapter) +{ + int count, size = sizeof(struct ne6x_rss_info); + int index, status; + u8 *plut_info = (u8 *)&adapter->rss_info; + struct ne6xvf_arq_event_info event; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot Configure RSS, command %d pending\n", + adapter->current_op); + return; + } + + adapter->current_op = VIRTCHNL_OP_CONFIG_RSS; + + count = (size + NE6XVF_SDK_LARGE_BUF - 1) / NE6XVF_SDK_LARGE_BUF; + + for (index = 0; index < count; index++) { + event.buf_len = 0; + event.msg_buf = NULL; + status = ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_RSS, + &plut_info[index * NE6XVF_SDK_LARGE_BUF], + ((size - index * NE6XVF_SDK_LARGE_BUF) > + NE6XVF_SDK_LARGE_BUF) + ? NE6XVF_SDK_LARGE_BUF + : (size - index * NE6XVF_SDK_LARGE_BUF)); + ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_CONFIG_RSS); + } + + adapter->aq_required &= ~NE6XVF_FLAG_AQ_CONFIGURE_RSS; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; +} + +void ne6xvf_changed_rss(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot Configure RSS, command %d pending\n", + adapter->current_op); + return; + } + + event.msg_buf = NULL; + event.buf_len = 0; + + adapter->current_op = VIRTCHNL_OP_CHANGED_RSS; + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_CHANGED_RSS, (u8 *)&adapter->num_active_queues, + sizeof(adapter->num_active_queues)); + ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_CHANGED_RSS); + adapter->aq_required &= ~NE6XVF_FLAG_AQ_CHANGED_RSS; + adapter->current_op = VIRTCHNL_OP_UNKNOWN; +} + +int ne6xvf_request_feature(struct ne6xvf_adapter *adapter) +{ + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot request feature, command %d pending\n", + adapter->current_op); + return -EBUSY; + } + + adapter->current_op = VIRTCHNL_OP_GET_VF_FEATURE; + adapter->aq_required &= ~NE6XVF_FLAG_AQ_GET_FEATURE; + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_GET_VF_FEATURE, NULL, 0); + + return 0; +} + +/** + * ne6xvf_request_stats + * @adapter: adapter structure + * + * Request VSI statistics from PF. + **/ +void ne6xvf_request_stats(struct ne6xvf_adapter *adapter) +{ + ne6xvf_update_pf_stats(adapter); +} + +/** + * ne6xvf_request_queues + * @adapter: adapter structure + * @num: number of requested queues + * + * We get a default number of queues from the PF. This enables us to request a + * different number. Returns 0 on success, negative on failure + **/ +int ne6xvf_request_queues(struct ne6xvf_adapter *adapter, int num) +{ + struct ne6xvf_virtchnl_vf_res_request vfres; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot request queues, command %d pending\n", + adapter->current_op); + return -EBUSY; + } + + vfres.num_queue_pairs = 1; + vfres.need_reset = 0x0; + + adapter->current_op = VIRTCHNL_OP_REQUEST_QUEUES; + adapter->flags |= NE6XVF_FLAG_REINIT_ITR_NEEDED; + + return ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_REQUEST_QUEUES, (u8 *)&vfres, sizeof(vfres)); +} + +/** + * ne6xvf_enable_queues + * @adapter: adapter structure + * + * We get a default number of queues from the PF. This enables us to request a + * different number. Returns 0 on success, negative on failure + **/ +int ne6xvf_enable_queues(struct ne6xvf_adapter *adapter) +{ + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot enable queues, command %d pending\n", + adapter->current_op); + return -EBUSY; + } + + adapter->current_op = VIRTCHNL_OP_ENABLE_QUEUES; + adapter->aq_required &= ~NE6XVF_FLAG_AQ_ENABLE_QUEUES; + + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_QUEUES, NULL, 0); + return 0; +} + +int ne6xvf_get_vf_feature(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event; + int status; + + event.buf_len = sizeof(struct ne6x_mbx_snap_buffer_data); + event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); + if (!event.msg_buf) + return -ENOMEM; + + status = ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_GET_VF_FEATURE); + if (status == 0) { + adapter->hw_feature = event.snap.data[3]; + adapter->hw_feature = (adapter->hw_feature << 8); + adapter->hw_feature |= event.snap.data[2]; + adapter->hw_feature = (adapter->hw_feature << 8); + adapter->hw_feature |= event.snap.data[1]; + adapter->hw_feature = (adapter->hw_feature << 8); + adapter->hw_feature |= event.snap.data[0]; + dev_info(&adapter->pdev->dev, "vf %d get feature 0x%08X\n", + adapter->hw.dev_caps.vf_id, adapter->hw_feature); + } + + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + adapter->aq_required &= ~NE6XVF_FLAG_AQ_GET_FEATURE; + kfree(event.msg_buf); + + return status; +} + +/** + * ne6xvf_add_ether_addrs + * @adapter: adapter structure + * + * Request that the PF add one or more addresses to our filters. + **/ +void ne6xvf_add_ether_addrs(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event = {.buf_len = 0, .msg_buf = NULL}; + struct virtchnl_ether_addr_list *veal; + struct ne6xvf_mac_filter *f; + int len, i = 0, count = 0; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n", + adapter->current_op); + return; + } + + adapter->aq_required &= ~NE6XVF_FLAG_AQ_ADD_MAC_FILTER; + adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR; + spin_lock_bh(&adapter->mac_vlan_list_lock); + + list_for_each_entry(f, &adapter->mac_filter_list, list) { + if (f->add) + count++; + } + + if (!count) { + adapter->aq_required &= ~NE6XVF_FLAG_AQ_ADD_MAC_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + len = sizeof(struct virtchnl_ether_addr_list) + + (count * sizeof(struct virtchnl_ether_addr)); + veal = kzalloc(len, GFP_ATOMIC); + if (!veal) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + veal->vsi_id = adapter->vsi_res->vsi_id; + veal->num_elements = count; + list_for_each_entry(f, &adapter->mac_filter_list, list) { + if (f->add) { + ether_addr_copy(veal->list[i].addr, f->macaddr); + i++; + f->add = false; + if (i == count) + break; + } + } + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + for (i = 0; i < count; i++) { + event.buf_len = 0; + event.msg_buf = NULL; + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)veal->list[i].addr, 6); + ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_ADD_ETH_ADDR); + } + + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + kfree(veal); +} + +void ne6xvf_set_vf_addr(struct ne6xvf_adapter *adapter) +{ + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n", + adapter->current_op); + return; + } + + adapter->current_op = VIRTCHNL_OP_SET_VF_ADDR; + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_SET_VF_ADDR, adapter->hw.mac.addr, 6); + adapter->aq_required &= ~NE6XVF_FLAG_AQ_SET_VF_MAC; +} + +/** + * ne6xvf_del_ether_addrs + * @adapter: adapter structure + * + * Request that the PF add one or more addresses to our filters. + **/ +void ne6xvf_del_ether_addrs(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event = {.buf_len = 0, .msg_buf = NULL}; + struct virtchnl_ether_addr_list *veal; + struct ne6xvf_mac_filter *f, *temp; + int len, i = 0, count = 0; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot add filters, command %d pending\n", + adapter->current_op); + return; + } + + adapter->aq_required &= ~NE6XVF_FLAG_AQ_DEL_MAC_FILTER; + spin_lock_bh(&adapter->mac_vlan_list_lock); + + list_for_each_entry(f, &adapter->mac_filter_list, list) { + if (f->remove) + count++; + } + + if (!count) { + adapter->aq_required &= ~NE6XVF_FLAG_AQ_DEL_MAC_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR; + + len = sizeof(struct virtchnl_ether_addr_list) + + (count * sizeof(struct virtchnl_ether_addr)); + veal = kzalloc(len, GFP_ATOMIC); + if (!veal) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + veal->vsi_id = adapter->vsi_res->vsi_id; + veal->num_elements = count; + list_for_each_entry_safe(f, temp, &adapter->mac_filter_list, list) { + if (f->remove) { + ether_addr_copy(veal->list[i].addr, f->macaddr); + i++; + list_del(&f->list); + kfree(f); + if (i == count) + break; + } + } + spin_unlock_bh(&adapter->mac_vlan_list_lock); + + for (i = 0; i < count; i++) { + event.buf_len = 0; + event.msg_buf = NULL; + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)veal->list[i].addr, 6); + ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_DEL_ETH_ADDR); + } + + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + kfree(veal); +} + +#define NE6XVF_MAX_SPEED_STRLEN 13 + +/** + * ne6xvf_print_link_message - print link up or down + * @adapter: adapter structure + * + * Log a message telling the world of our wonderous link status + */ +static void ne6xvf_print_link_message(struct ne6xvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int link_speed_mbps; + char *speed; + + if (!adapter->link_up) { + netdev_info(netdev, "NIC Link is Down\n"); + return; + } + + speed = kcalloc(1, NE6XVF_MAX_SPEED_STRLEN, GFP_KERNEL); + if (!speed) + return; + + switch (adapter->link_speed) { + case NE6X_LINK_SPEED_100GB: + link_speed_mbps = SPEED_100000; + break; + case NE6X_LINK_SPEED_40GB: + link_speed_mbps = SPEED_40000; + break; + case NE6X_LINK_SPEED_25GB: + link_speed_mbps = SPEED_25000; + break; + case NE6X_LINK_SPEED_10GB: + link_speed_mbps = SPEED_10000; + break; + default: + link_speed_mbps = SPEED_UNKNOWN; + break; + } + + snprintf(speed, NE6XVF_MAX_SPEED_STRLEN, "%d %s", link_speed_mbps / 1000, "Gbps"); + + netdev_info(netdev, "NIC Link is Up Speed is %s Full Duplex\n", speed); + + kfree(speed); +} + +/** + * ne6xvf_set_promiscuous + * @adapter: adapter structure + * @flags: bitmask to control unicast/multicast promiscuous. + * + * Request that the PF enable promiscuous mode for our VSI. + **/ +void ne6xvf_set_promiscuous(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_virtchnl_promisc_info vpi; + int flags = 0; + + dev_warn(&adapter->pdev->dev, "%s: ....\n", __func__); + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot set promiscuous mode, command %d pending\n", + adapter->current_op); + return; + } + + adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; + + if (adapter->flags & NE6XVF_FLAG_PROMISC_ON) { + adapter->hw_feature |= NE6X_F_PROMISC; + flags |= FLAG_VF_UNICAST_PROMISC; + } else { + adapter->hw_feature &= ~NE6X_F_PROMISC; + } + + if (adapter->flags & NE6XVF_FLAG_ALLMULTI_ON) { + adapter->hw_feature |= NE6X_F_RX_ALLMULTI; + flags |= FLAG_VF_MULTICAST_PROMISC; + } else { + adapter->hw_feature &= ~NE6X_F_RX_ALLMULTI; + } + + vpi.vsi_id = adapter->vsi_res->vsi_id; + vpi.flags = flags; + + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, (u8 *)&vpi, sizeof(vpi)); +} + +void ne6xvf_vchanel_get_port_link_status(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_hw *hw = &adapter->hw; + u8 msg[8] = {0}; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot get_link_status, command %d pending\n", + adapter->current_op); + return; + } + + /* pass queue info to vf */ + msg[0] = hw->dev_caps.base_queue; + msg[1] = adapter->num_active_queues; + + adapter->current_op = VIRTCHNL_OP_GET_PORT_STATUS; + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_GET_PORT_STATUS, msg, 2); + adapter->aq_required &= ~NE6XVF_FLAG_AQ_GET_PORT_LINK_STATUS; +} + +/** + * ne6xvf_virtchnl_completion + * @adapter: adapter structure + * @v_opcode: opcode sent by PF + * @v_retval: retval sent by PF + * @msg: message sent by PF + * @msglen: message length + * + * Asynchronous completion function for admin queue messages. Rather than busy + * wait, we fire off our requests and assume that no errors will be returned. + * This function handles the reply messages. + **/ +void ne6xvf_virtchnl_completion(struct ne6xvf_adapter *adapter, enum virtchnl_ops v_opcode, + enum ne6xvf_status v_retval, u8 *msg, u16 msglen) +{ + struct net_device *netdev = adapter->netdev; + + if (v_opcode == VIRTCHNL_OP_EVENT) { + struct virtchnl_pf_event *vpe = (struct virtchnl_pf_event *)msg; + bool link_up = vpe->link_status; + enum ne6x_sdk_link_speed old_link_speed = adapter->link_speed; + + switch (vpe->event) { + case NE6XVF_VIRTCHNL_EVENT_LINK_CHANGE: + adapter->link_speed = (vpe->link_speed_0 << 24) | + (vpe->link_speed_1 << 16) | + (vpe->link_speed_2 << 8) | + vpe->link_speed_3; + if (adapter->current_op == VIRTCHNL_OP_GET_PORT_STATUS) + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + /* we've already got the right link status, bail */ + if (adapter->link_up == link_up) { + if (link_up && old_link_speed != adapter->link_speed) + ne6xvf_print_link_message(adapter); + + break; + } + + if (link_up) { + /* If we get link up message and start queues + * before our queues are configured it will + * trigger a TX hang. In that case, just ignore + * the link status message,we'll get another one + * after we enable queues and actually prepared + * to send traffic. + */ + if (adapter->state != __NE6XVF_RUNNING) + break; + + /* For ADQ enabled VF, we reconfigure VSIs and + * re-allocate queues. Hence wait till all + * queues are enabled. + */ + if (adapter->flags & NE6XVF_FLAG_QUEUES_DISABLED) + break; + } + + adapter->link_up = link_up; + if (link_up) { + netif_tx_start_all_queues(netdev); + netif_carrier_on(netdev); + } else { + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + } + ne6xvf_print_link_message(adapter); + break; + case NE6XVF_VIRTCHNL_EVENT_RESET_IMPENDING: + dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n"); + break; + default: + dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n", vpe->event); + break; + } + return; + } + + if (v_opcode == VIRTCHNL_OP_VF_CONFIG) { + struct virtchnl_vf_config *vfconfig = (struct virtchnl_vf_config *)msg; + + dev_info(&adapter->pdev->dev, "vf_vonfig_data from the PF,type= %d,value = %d\n", + vfconfig->type, vfconfig->data[0]); + switch (vfconfig->type) { + case VIRTCHNL_VF_CONFIG_TRUST: + adapter->trusted = vfconfig->data[0]; + if (!adapter->trusted) { + adapter->hw_feature &= ~NE6X_F_PROMISC; + adapter->hw_feature &= ~NE6X_F_RX_ALLMULTI; + adapter->flags &= ~NE6XVF_FLAG_PROMISC_ON; + adapter->flags &= ~NE6XVF_FLAG_ALLMULTI_ON; + } + break; + default: + break; + } + return; + } + + if (v_retval) { + switch (v_opcode) { + case VIRTCHNL_OP_SET_VF_ADDR: + dev_err(&adapter->pdev->dev, "Failed to change MAC address\n"); + ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); + wake_up(&adapter->vc_waitqueue); + if (adapter->current_op != VIRTCHNL_OP_SET_VF_ADDR) + return; + + break; + default: + dev_err(&adapter->pdev->dev, "PF returned error %d to our request %d\n", + v_retval, v_opcode); + + /* Assume that the ADQ configuration caused one of the + * v_opcodes in this if statement to fail. Set the + * flag so the reset path can return to the pre-ADQ + * configuration and traffic can resume + */ + if ((v_opcode == VIRTCHNL_OP_ENABLE_QUEUES || + v_opcode == VIRTCHNL_OP_CONFIG_IRQ_MAP || + v_opcode == VIRTCHNL_OP_CONFIG_ADPT_QUEUES)) { + dev_err(&adapter->pdev->dev, + "ADQ is enabled and opcode %d failed (%d)\n", v_opcode, + v_retval); + netdev_reset_tc(netdev); + adapter->flags |= NE6XVF_FLAG_REINIT_ITR_NEEDED; + ne6xvf_schedule_reset(adapter); + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + return; + } + } + } + + switch (v_opcode) { + case VIRTCHNL_OP_SET_VF_ADDR: + if (!v_retval) { + if (msglen != 0 && msg) { + netif_addr_lock_bh(netdev); + ether_addr_copy(adapter->hw.mac.addr, msg); + eth_hw_addr_set(netdev, msg); + netif_addr_unlock_bh(netdev); + } + } + wake_up(&adapter->vc_waitqueue); + if (adapter->current_op == VIRTCHNL_OP_SET_VF_ADDR) + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + + break; + case VIRTCHNL_OP_GET_VF_RESOURCES: + memcpy(adapter->vf_res, msg, msglen); + ne6xvf_vf_parse_hw_config(&adapter->hw, adapter->vf_res); + if (is_zero_ether_addr(adapter->hw.mac.addr)) { + /* restore current mac address */ + ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); + } else { + netif_addr_lock_bh(netdev); + /* refresh current mac address if changed */ + ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); + netif_addr_unlock_bh(netdev); + } + + ne6xvf_parse_vf_resource_msg(adapter); + break; + case VIRTCHNL_OP_GET_VF_FEATURE: + memcpy(&adapter->hw_feature, msg, 4); + dev_info(&adapter->pdev->dev, "%s: hw_featrue = 0x%08X\n", + ne6xvf_state_str(adapter->state), adapter->hw_feature); + break; + case VIRTCHNL_OP_ENABLE_QUEUES: + /* enable transmits */ + if (adapter->state == __NE6XVF_RUNNING) { + ne6xvf_irq_enable(adapter, true); + /* If queues not enabled when handling link event, + * then set carrier on now + */ + if (adapter->link_up && !netif_carrier_ok(netdev)) { + netif_tx_start_all_queues(netdev); + netif_carrier_on(netdev); + } + } + adapter->flags |= NE6XVF_FLAG_QUEUES_ENABLED; + adapter->flags &= ~NE6XVF_FLAG_QUEUES_DISABLED; + break; + case VIRTCHNL_OP_DISABLE_QUEUES: + ne6xvf_free_all_tg_resources(adapter); + ne6xvf_free_all_cq_resources(adapter); + ne6xvf_free_all_tx_resources(adapter); + ne6xvf_free_all_rx_resources(adapter); + if (adapter->state == __NE6XVF_DOWN_PENDING) + ne6xvf_change_state(adapter, __NE6XVF_DOWN); + + adapter->flags &= ~NE6XVF_FLAG_QUEUES_ENABLED; + break; + case VIRTCHNL_OP_VERSION: + case VIRTCHNL_OP_CONFIG_IRQ_MAP: + /* Don't display an error if we get these out of sequence. + * If the firmware needed to get kicked, we'll get these and + * it's no problem. + */ + if (v_opcode != adapter->current_op) + return; + + break; + case VIRTCHNL_OP_REQUEST_QUEUES: { + struct ne6xvf_virtchnl_vf_res_request *vfres = + (struct ne6xvf_virtchnl_vf_res_request *)msg; + if (vfres->num_queue_pairs != adapter->num_req_queues) { + dev_info(&adapter->pdev->dev, "Requested %d queues, PF can support %d\n", + adapter->num_req_queues, vfres->num_queue_pairs); + adapter->num_req_queues = 0; + adapter->flags &= ~NE6XVF_FLAG_REINIT_ITR_NEEDED; + } + } break; + default: + if (adapter->current_op && v_opcode != adapter->current_op) + dev_dbg(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", + adapter->current_op, v_opcode); + + break; + } /* switch v_opcode */ + + adapter->current_op = VIRTCHNL_OP_UNKNOWN; +} + +/** + * ne6xvf_add_vlans + * @adapter: adapter structure + * + * Request that the PF add one or more VLAN filters to our VSI. + **/ +void ne6xvf_add_vlans(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event = {0}; + struct ne6xvf_vlan_filter *f = NULL; + struct ne6x_vf_vlan *vlan = NULL; + int len = 0, i = 0, count = 0; + + dev_info(&adapter->pdev->dev, "%s: adapter->current_op:%d\n", __func__, + adapter->current_op); + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot add VLANs, command %d pending\n", + adapter->current_op); + return; + } + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (f->add) + count++; + } + + if (!count) { + adapter->aq_required &= ~NE6XVF_FLAG_AQ_ADD_VLAN_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + adapter->current_op = VIRTCHNL_OP_ADD_VLAN; + + len = sizeof(struct ne6x_vf_vlan) * count; + vlan = kzalloc(len, GFP_ATOMIC); + if (!vlan) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (f->add) { + vlan[i].tpid = f->vlan.tpid; + vlan[i].vid = f->vlan.vid; + i++; + f->add = false; + f->is_new_vlan = true; + if (i == count) + break; + } + } + adapter->aq_required &= ~NE6XVF_FLAG_AQ_ADD_VLAN_FILTER; + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + for (i = 0; i < count; i++) { + event.buf_len = 0; + event.msg_buf = NULL; + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)&vlan[i], + sizeof(struct ne6x_vf_vlan)); + ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_ADD_VLAN); + } + + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + kfree(vlan); +} + +/** + * ne6xvf_del_vlans + * @adapter: adapter structure + * + * Request that the PF remove one or more VLAN filters from our VSI. + **/ +void ne6xvf_del_vlans(struct ne6xvf_adapter *adapter) +{ + struct ne6xvf_arq_event_info event = {0}; + struct ne6xvf_vlan_filter *f, *ftmp; + struct ne6x_vf_vlan *vlan = NULL; + int i = 0, count = 0; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot remove VLANs, command %d pending\n", + adapter->current_op); + return; + } + + spin_lock_bh(&adapter->mac_vlan_list_lock); + + list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { + /* since VLAN capabilities are not allowed, we dont want to send + * a VLAN delete request because it will most likely fail and + * create unnecessary errors/noise, so just free the VLAN + * filters marked for removal to enable bailing out before + * sending a virtchnl message + */ + if (f->remove) + count++; + } + + if (!count) { + adapter->aq_required &= ~NE6XVF_FLAG_AQ_DEL_VLAN_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + adapter->current_op = VIRTCHNL_OP_DEL_VLAN; + vlan = kcalloc(count, sizeof(*vlan), GFP_ATOMIC); + if (!vlan) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); + return; + } + + list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { + if (f->remove) { + vlan[i].tpid = f->vlan.tpid; + vlan[i].vid = f->vlan.vid; + i++; + list_del(&f->list); + kfree(f); + if (i == count) + break; + } + } + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + adapter->aq_required &= ~NE6XVF_FLAG_AQ_DEL_VLAN_FILTER; + for (i = 0; i < count; i++) { + event.buf_len = 0; + event.msg_buf = NULL; + ne6xvf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)&vlan[i], + sizeof(struct ne6x_vf_vlan)); + ne6xvf_poll_virtchnl_msg(adapter, &event, VIRTCHNL_OP_DEL_VLAN); + } + + adapter->current_op = VIRTCHNL_OP_UNKNOWN; + kfree(vlan); +} diff --git a/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_virtchnl.h b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_virtchnl.h new file mode 100644 index 000000000000..1fae0b1922dc --- /dev/null +++ b/drivers/net/ethernet/bzwx/nce/ne6x_vf/ne6xvf_virtchnl.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 - 2023, Chengdu BeiZhongWangXin Technology Co., Ltd. */ + +#ifndef _NE6XVF_VIRTCHNL_H +#define _NE6XVF_VIRTCHNL_H + +#define NE6XVF_SDK_LARGE_BUF 6 + +struct ne6xvf_spinlock { + /* mutext lock */ + struct mutex spinlock; +}; + +struct virtchnl_vsi_resource { + u16 vsi_id; + u16 num_queue_pairs; + + /* see enum virtchnl_vsi_type */ + s32 vsi_type; + u16 qset_handle; + u8 default_mac_addr[ETH_ALEN]; +}; + +struct virtchnl_ether_addr { + u8 addr[ETH_ALEN]; + u8 type; + u8 pad; +}; + +struct virtchnl_vf_resource { + u16 num_vsis; + u16 num_queue_pairs; + u16 max_vectors; + u16 max_mtu; + + u32 vf_cap_flags; + u32 rss_key_size; + u32 rss_lut_size; + + struct virtchnl_vsi_resource vsi_res[]; +}; + +enum nacf_virtchnl_vsi_type { + NE6XVF_VIRTCHNL_VSI_TYPE_INVALID = 0, + NE6XVF_VIRTCHNL_VSI_SRIOV = 6, +}; + +struct virtchnl_ether_addr_list { + u16 vsi_id; + u16 num_elements; + struct virtchnl_ether_addr list[]; +}; + +struct ne6xvf_arq_event_info { + struct ne6x_mbx_snap_buffer_data snap; + u16 msg_len; + u16 buf_len; + u8 *msg_buf; +}; + +/* VF resource request */ +struct ne6xvf_virtchnl_vf_res_request { + u16 num_queue_pairs; + u8 need_reset; + u8 rsv; +}; + +#define FLAG_VF_UNICAST_PROMISC 0x00000001 +#define FLAG_VF_MULTICAST_PROMISC 0x00000002 + +/* VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE + * VF sends VSI id and flags. + * PF returns status code in retval. + * Note: we assume that broadcast accept mode is always enabled. + */ +struct ne6xvf_virtchnl_promisc_info { + u16 vsi_id; + u16 flags; +}; + +union u_ne6x_mbx_snap_buffer_data { + struct ne6x_mbx_snap_buffer_data snap; + u64 val; +}; + +struct ne6xvf_sdk_mbx_info { + struct ne6xvf_spinlock mbx_spinlock; + struct ne6x_mbx_snap_buffer_data sq_data; + struct ne6x_mbx_snap_buffer_data cq_data; + int init_flag; +}; + +#define NE6XVF_VIRTCHNL_VERSION_MAJOR 1 +#define NE6XVF_VIRTCHNL_VERSION_MINOR 1 + +struct ne6xvf_virtchnl_version_info { + u8 major; + u8 minor; +}; + +/* VIRTCHNL_OP_EVENT + * PF sends this message to inform the VF driver of events that may affect it. + * No direct response is expected from the VF, though it may generate other + * messages in response to this one. + */ +enum ne6xvf_virtchnl_event_codes { + NE6XVF_VIRTCHNL_EVENT_UNKNOWN = 0, + NE6XVF_VIRTCHNL_EVENT_LINK_CHANGE, + NE6XVF_VIRTCHNL_EVENT_RESET_IMPENDING, + NE6XVF_VIRTCHNL_EVENT_PF_DRIVER_CLOSE, + NE6XVF_VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE, +}; + +struct virtchnl_pf_event { + u8 event; + u8 link_speed_0; + u8 link_speed_1; + u8 link_speed_2; + u8 link_speed_3; + u8 link_status; +}; + +#endif -- Gitee From ac171cafa94dcd2e09a07b8fc51181cb7f7e2f13 Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Fri, 14 Jun 2024 10:23:57 +0800 Subject: [PATCH 0990/2138] anolis: configs: Enable CONFIG_DEBUG_PREEMPT for debug kernel ANBZ: #9326 To prepare for preempt=full in future, enable CONFIG_DEBUG_PREEMPT to detect bugs in vanilla kernel and anolis own features. Signed-off-by: Tianchen Ding Reviewed-by: Qiao Ma Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/3350 --- arch/arm64/configs/anolis-debug_defconfig | 2 +- arch/x86/configs/anolis-debug_defconfig | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig index aeb6bc8aa483..0d5d34dbb3ca 100644 --- a/arch/arm64/configs/anolis-debug_defconfig +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -7001,7 +7001,7 @@ CONFIG_SCHED_ACPU=y # end of Scheduler Debugging # CONFIG_DEBUG_TIMEKEEPING is not set -# CONFIG_DEBUG_PREEMPT is not set +CONFIG_DEBUG_PREEMPT=y # # Lock Debugging (spinlocks, mutexes, etc...) diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index 58c6d3184e13..621d8c552839 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -7881,7 +7881,7 @@ CONFIG_SCHED_ACPU=y # end of Scheduler Debugging # CONFIG_DEBUG_TIMEKEEPING is not set -# CONFIG_DEBUG_PREEMPT is not set +CONFIG_DEBUG_PREEMPT=y # # Lock Debugging (spinlocks, mutexes, etc...) -- Gitee From 5f29cc1502f3221994bb7037643950dc5e392577 Mon Sep 17 00:00:00 2001 From: Yi Liu Date: Thu, 28 Sep 2023 00:15:23 -0700 Subject: [PATCH 0991/2138] iommu: Add new iommu op to create domains owned by userspace ANBZ: #9185 commit 909f4abd1097769d024c3a9c2e59c2fbe5d2d0c0 upstream. Intel-SIG: commit 909f4abd1097 iommu: Add new iommu op to create domains owned by userspace Backport to support Intel QAT live migration for in-tree driver Introduce a new iommu_domain op to create domains owned by userspace, e.g. through IOMMUFD. These domains have a few different properties compares to kernel owned domains: - They may be PAGING domains, but created with special parameters. For instance aperture size changes/number of levels, different IOPTE formats, or other things necessary to make a vIOMMU work - We have to track all the memory allocations with GFP_KERNEL_ACCOUNT to make the cgroup sandbox stronger - Device-specialty domains, such as NESTED domains can be created by IOMMUFD. The new op clearly says the domain is being created by IOMMUFD, that the domain is intended for userspace use, and it provides a way to pass user flags or a driver specific uAPI structure to customize the created domain to exactly what the vIOMMU userspace driver requires. iommu drivers that cannot support VFIO/IOMMUFD should not support this op. This includes any driver that cannot provide a fully functional PAGING domain. This new op for now is only supposed to be used by IOMMUFD, hence no wrapper for it. IOMMUFD would call the callback directly. As for domain free, IOMMUFD would use iommu_domain_free(). Link: https://lore.kernel.org/r/20230928071528.26258-2-yi.l.liu@intel.com Suggested-by: Jason Gunthorpe Signed-off-by: Lu Baolu Co-developed-by: Nicolin Chen Signed-off-by: Nicolin Chen Signed-off-by: Yi Liu Reviewed-by: Kevin Tian Signed-off-by: Jason Gunthorpe [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- include/linux/iommu.h | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/include/linux/iommu.h b/include/linux/iommu.h index b6ef263e85c0..a37cf16afa26 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -234,7 +234,15 @@ struct iommu_iotlb_gather { * op is allocated in the iommu driver and freed by the caller after * use. The information type is one of enum iommu_hw_info_type defined * in include/uapi/linux/iommufd.h. - * @domain_alloc: allocate iommu domain + * @domain_alloc: allocate and return an iommu domain if success. Otherwise + * NULL is returned. The domain is not fully initialized until + * the caller iommu_domain_alloc() returns. + * @domain_alloc_user: Allocate an iommu domain corresponding to the input + * parameters as defined in include/uapi/linux/iommufd.h. + * Unlike @domain_alloc, it is called only by IOMMUFD and + * must fully initialize the new domain before return. + * Upon success, a domain is returned. Upon failure, + * ERR_PTR must be returned. * @probe_device: Add device to iommu driver handling * @release_device: Remove device from iommu driver handling * @probe_finalize: Do final setup work after the device is added to an IOMMU @@ -267,6 +275,7 @@ struct iommu_ops { /* Domain allocation and freeing by the iommu driver */ struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); + struct iommu_domain *(*domain_alloc_user)(struct device *dev, u32 flags); struct iommu_device *(*probe_device)(struct device *dev); void (*release_device)(struct device *dev); -- Gitee From 8d0fdf8dd633f11da0bf1ba32e6417cbfb266100 Mon Sep 17 00:00:00 2001 From: Yi Liu Date: Thu, 28 Sep 2023 00:15:24 -0700 Subject: [PATCH 0992/2138] iommufd: Use the domain_alloc_user() op for domain allocation ANBZ: #9185 commit 7975b722087fa23ff3ad1ff4998b8572a7e17e84 upstream. Intel-SIG: commit 7975b722087f iommufd: Use the domain_alloc_user() op for domain allocation Backport to support Intel QAT live migration for in-tree driver Make IOMMUFD use iommu_domain_alloc_user() by default for iommu_domain creation. IOMMUFD needs to support iommu_domain allocation with parameters from userspace in nested support, and a driver is expected to implement everything under this op. If the iommu driver doesn't provide domain_alloc_user callback then IOMMUFD falls back to use iommu_domain_alloc() with an UNMANAGED type if possible. Link: https://lore.kernel.org/r/20230928071528.26258-3-yi.l.liu@intel.com Suggested-by: Jason Gunthorpe Reviewed-by: Lu Baolu Reviewed-by: Kevin Tian Co-developed-by: Nicolin Chen Signed-off-by: Nicolin Chen Signed-off-by: Yi Liu Signed-off-by: Jason Gunthorpe [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/iommu/iommufd/hw_pagetable.c | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c index cf2c1504e20d..48874f896521 100644 --- a/drivers/iommu/iommufd/hw_pagetable.c +++ b/drivers/iommu/iommufd/hw_pagetable.c @@ -5,6 +5,7 @@ #include #include +#include "../iommu-priv.h" #include "iommufd_private.h" void iommufd_hw_pagetable_destroy(struct iommufd_object *obj) @@ -74,6 +75,7 @@ struct iommufd_hw_pagetable * iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, struct iommufd_device *idev, bool immediate_attach) { + const struct iommu_ops *ops = dev_iommu_ops(idev->dev); struct iommufd_hw_pagetable *hwpt; int rc; @@ -88,10 +90,19 @@ iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, refcount_inc(&ioas->obj.users); hwpt->ioas = ioas; - hwpt->domain = iommu_domain_alloc(idev->dev->bus); - if (!hwpt->domain) { - rc = -ENOMEM; - goto out_abort; + if (ops->domain_alloc_user) { + hwpt->domain = ops->domain_alloc_user(idev->dev, 0); + if (IS_ERR(hwpt->domain)) { + rc = PTR_ERR(hwpt->domain); + hwpt->domain = NULL; + goto out_abort; + } + } else { + hwpt->domain = iommu_domain_alloc(idev->dev->bus); + if (!hwpt->domain) { + rc = -ENOMEM; + goto out_abort; + } } /* -- Gitee From 477e696caefede87acb89c61236a53c13b943e48 Mon Sep 17 00:00:00 2001 From: Yi Liu Date: Thu, 28 Sep 2023 00:15:25 -0700 Subject: [PATCH 0993/2138] iommufd: Flow user flags for domain allocation to domain_alloc_user() ANBZ: #9185 commit 89d63875d80ea127280c60dd4cd101af1d9b6557 upstream. Intel-SIG: commit 89d63875d80e iommufd: Flow user flags for domain allocation to domain_alloc_user() Backport to support Intel QAT live migration for in-tree driver Extends iommufd_hw_pagetable_alloc() to accept user flags, the uAPI will provide the flags. Link: https://lore.kernel.org/r/20230928071528.26258-4-yi.l.liu@intel.com Reviewed-by: Kevin Tian Signed-off-by: Yi Liu Reviewed-by: Lu Baolu Signed-off-by: Jason Gunthorpe [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/iommu/iommufd/device.c | 2 +- drivers/iommu/iommufd/hw_pagetable.c | 9 ++++++--- drivers/iommu/iommufd/iommufd_private.h | 3 ++- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c index ce78c3671539..e88fa73a45e6 100644 --- a/drivers/iommu/iommufd/device.c +++ b/drivers/iommu/iommufd/device.c @@ -540,7 +540,7 @@ iommufd_device_auto_get_domain(struct iommufd_device *idev, } hwpt = iommufd_hw_pagetable_alloc(idev->ictx, ioas, idev, - immediate_attach); + 0, immediate_attach); if (IS_ERR(hwpt)) { destroy_hwpt = ERR_CAST(hwpt); goto out_unlock; diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c index 48874f896521..5be7a31cbd9c 100644 --- a/drivers/iommu/iommufd/hw_pagetable.c +++ b/drivers/iommu/iommufd/hw_pagetable.c @@ -61,6 +61,7 @@ int iommufd_hw_pagetable_enforce_cc(struct iommufd_hw_pagetable *hwpt) * @ictx: iommufd context * @ioas: IOAS to associate the domain with * @idev: Device to get an iommu_domain for + * @flags: Flags from userspace * @immediate_attach: True if idev should be attached to the hwpt * * Allocate a new iommu_domain and return it as a hw_pagetable. The HWPT @@ -73,7 +74,8 @@ int iommufd_hw_pagetable_enforce_cc(struct iommufd_hw_pagetable *hwpt) */ struct iommufd_hw_pagetable * iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, - struct iommufd_device *idev, bool immediate_attach) + struct iommufd_device *idev, u32 flags, + bool immediate_attach) { const struct iommu_ops *ops = dev_iommu_ops(idev->dev); struct iommufd_hw_pagetable *hwpt; @@ -91,7 +93,7 @@ iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, hwpt->ioas = ioas; if (ops->domain_alloc_user) { - hwpt->domain = ops->domain_alloc_user(idev->dev, 0); + hwpt->domain = ops->domain_alloc_user(idev->dev, flags); if (IS_ERR(hwpt->domain)) { rc = PTR_ERR(hwpt->domain); hwpt->domain = NULL; @@ -166,7 +168,8 @@ int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd) } mutex_lock(&ioas->mutex); - hwpt = iommufd_hw_pagetable_alloc(ucmd->ictx, ioas, idev, false); + hwpt = iommufd_hw_pagetable_alloc(ucmd->ictx, ioas, + idev, cmd->flags, false); if (IS_ERR(hwpt)) { rc = PTR_ERR(hwpt); goto out_unlock; diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h index 2c58670011fe..3064997a0181 100644 --- a/drivers/iommu/iommufd/iommufd_private.h +++ b/drivers/iommu/iommufd/iommufd_private.h @@ -242,7 +242,8 @@ struct iommufd_hw_pagetable { struct iommufd_hw_pagetable * iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, - struct iommufd_device *idev, bool immediate_attach); + struct iommufd_device *idev, u32 flags, + bool immediate_attach); int iommufd_hw_pagetable_enforce_cc(struct iommufd_hw_pagetable *hwpt); int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt, struct iommufd_device *idev); -- Gitee From a214fd8f83afd2e097e9116e468e50784b035bc3 Mon Sep 17 00:00:00 2001 From: Yi Liu Date: Thu, 28 Sep 2023 00:15:26 -0700 Subject: [PATCH 0994/2138] iommufd: Support allocating nested parent domain ANBZ: #9185 commit 4ff542163397073f86eda484318d61980ff1031d upstream. Intel-SIG: commit 4ff542163397 iommufd: Support allocating nested parent domain Backport to support Intel QAT live migration for in-tree driver Extend IOMMU_HWPT_ALLOC to allocate domains to be used as parent (stage-2) in nested translation. Add IOMMU_HWPT_ALLOC_NEST_PARENT to the uAPI. Link: https://lore.kernel.org/r/20230928071528.26258-5-yi.l.liu@intel.com Signed-off-by: Yi Liu Reviewed-by: Kevin Tian Reviewed-by: Lu Baolu Signed-off-by: Jason Gunthorpe [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/iommu/iommufd/hw_pagetable.c | 5 ++++- include/uapi/linux/iommufd.h | 12 +++++++++++- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c index 5be7a31cbd9c..8b3d2875d642 100644 --- a/drivers/iommu/iommufd/hw_pagetable.c +++ b/drivers/iommu/iommufd/hw_pagetable.c @@ -83,6 +83,9 @@ iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, lockdep_assert_held(&ioas->mutex); + if (flags && !ops->domain_alloc_user) + return ERR_PTR(-EOPNOTSUPP); + hwpt = iommufd_object_alloc(ictx, hwpt, IOMMUFD_OBJ_HW_PAGETABLE); if (IS_ERR(hwpt)) return hwpt; @@ -154,7 +157,7 @@ int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd) struct iommufd_ioas *ioas; int rc; - if (cmd->flags || cmd->__reserved) + if ((cmd->flags & (~IOMMU_HWPT_ALLOC_NEST_PARENT)) || cmd->__reserved) return -EOPNOTSUPP; idev = iommufd_get_device(ucmd, cmd->dev_id); diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h index b4ba0c0cbab6..4a7c5c8fdbb4 100644 --- a/include/uapi/linux/iommufd.h +++ b/include/uapi/linux/iommufd.h @@ -347,10 +347,20 @@ struct iommu_vfio_ioas { }; #define IOMMU_VFIO_IOAS _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VFIO_IOAS) +/** + * enum iommufd_hwpt_alloc_flags - Flags for HWPT allocation + * @IOMMU_HWPT_ALLOC_NEST_PARENT: If set, allocate a domain which can serve + * as the parent domain in the nesting + * configuration. + */ +enum iommufd_hwpt_alloc_flags { + IOMMU_HWPT_ALLOC_NEST_PARENT = 1 << 0, +}; + /** * struct iommu_hwpt_alloc - ioctl(IOMMU_HWPT_ALLOC) * @size: sizeof(struct iommu_hwpt_alloc) - * @flags: Must be 0 + * @flags: Combination of enum iommufd_hwpt_alloc_flags * @dev_id: The device to allocate this HWPT for * @pt_id: The IOAS to connect this HWPT to * @out_hwpt_id: The ID of the new HWPT -- Gitee From 1fb0773f71bb13fe6b630a26856be807efa907d2 Mon Sep 17 00:00:00 2001 From: Nicolin Chen Date: Mon, 18 Sep 2023 18:16:37 -0700 Subject: [PATCH 0995/2138] iommufd/selftest: Iterate idev_ids in mock_domain's alloc_hwpt test ANBZ: #9185 commit bb812e0069ce5de9af2a7910951c8c95632cebe3 upstream. Intel-SIG: commit bb812e0069ce iommufd/selftest: Iterate idev_ids in mock_domain's alloc_hwpt test Backport to support Intel QAT live migration for in-tree driver The point in iterating variant->mock_domains is to test the idev_ids[0] and idev_ids[1]. So use it instead of keeping testing idev_ids[0] only. Link: https://lore.kernel.org/r/20230919011637.16483-1-nicolinc@nvidia.com Signed-off-by: Nicolin Chen Reviewed-by: Kevin Tian Signed-off-by: Jason Gunthorpe [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- tools/testing/selftests/iommu/iommufd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c index 890a81f4ff61..4c1f56ce6cbe 100644 --- a/tools/testing/selftests/iommu/iommufd.c +++ b/tools/testing/selftests/iommu/iommufd.c @@ -1407,7 +1407,7 @@ TEST_F(iommufd_mock_domain, alloc_hwpt) uint32_t stddev_id; uint32_t hwpt_id; - test_cmd_hwpt_alloc(self->idev_ids[0], self->ioas_id, &hwpt_id); + test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id, &hwpt_id); test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL); test_ioctl_destroy(stddev_id); test_ioctl_destroy(hwpt_id); -- Gitee From 33bec5d41bbceebb43ab1bc9cd982b31d5709cc7 Mon Sep 17 00:00:00 2001 From: Yi Liu Date: Thu, 28 Sep 2023 00:15:27 -0700 Subject: [PATCH 0996/2138] iommufd/selftest: Add domain_alloc_user() support in iommu mock ANBZ: #9185 commit 408663619fcfc89c087df65b362c91bf0a0be617 upstream. Intel-SIG: commit 408663619fcf iommufd/selftest: Add domain_alloc_user() support in iommu mock Backport to support Intel QAT live migration for in-tree driver Add mock_domain_alloc_user() and a new test case for IOMMU_HWPT_ALLOC_NEST_PARENT. Link: https://lore.kernel.org/r/20230928071528.26258-6-yi.l.liu@intel.com Co-developed-by: Nicolin Chen Signed-off-by: Nicolin Chen Signed-off-by: Yi Liu Reviewed-by: Kevin Tian Signed-off-by: Jason Gunthorpe [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/iommu/iommufd/selftest.c | 19 +++++++++++++++ tools/testing/selftests/iommu/iommufd.c | 24 +++++++++++++++---- .../selftests/iommu/iommufd_fail_nth.c | 2 +- tools/testing/selftests/iommu/iommufd_utils.h | 11 ++++++--- 4 files changed, 48 insertions(+), 8 deletions(-) diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c index 00b794d74e03..049174c8fa02 100644 --- a/drivers/iommu/iommufd/selftest.c +++ b/drivers/iommu/iommufd/selftest.c @@ -161,6 +161,8 @@ static void *mock_domain_hw_info(struct device *dev, u32 *length, u32 *type) return info; } +static const struct iommu_ops mock_ops; + static struct iommu_domain *mock_domain_alloc(unsigned int iommu_domain_type) { struct mock_iommu_domain *mock; @@ -177,10 +179,26 @@ static struct iommu_domain *mock_domain_alloc(unsigned int iommu_domain_type) mock->domain.geometry.aperture_start = MOCK_APERTURE_START; mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST; mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE; + mock->domain.ops = mock_ops.default_domain_ops; + mock->domain.type = iommu_domain_type; xa_init(&mock->pfns); return &mock->domain; } +static struct iommu_domain * +mock_domain_alloc_user(struct device *dev, u32 flags) +{ + struct iommu_domain *domain; + + if (flags & (~IOMMU_HWPT_ALLOC_NEST_PARENT)) + return ERR_PTR(-EOPNOTSUPP); + + domain = mock_domain_alloc(IOMMU_DOMAIN_UNMANAGED); + if (!domain) + domain = ERR_PTR(-ENOMEM); + return domain; +} + static void mock_domain_free(struct iommu_domain *domain) { struct mock_iommu_domain *mock = @@ -322,6 +340,7 @@ static const struct iommu_ops mock_ops = { .pgsize_bitmap = MOCK_IO_PAGE_SIZE, .hw_info = mock_domain_hw_info, .domain_alloc = mock_domain_alloc, + .domain_alloc_user = mock_domain_alloc_user, .capable = mock_domain_capable, .set_platform_dma_ops = mock_domain_set_plaform_dma_ops, .device_group = generic_device_group, diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c index 4c1f56ce6cbe..bdc56e32a4b2 100644 --- a/tools/testing/selftests/iommu/iommufd.c +++ b/tools/testing/selftests/iommu/iommufd.c @@ -114,6 +114,7 @@ TEST_F(iommufd, cmd_length) TEST_LENGTH(iommu_destroy, IOMMU_DESTROY); TEST_LENGTH(iommu_hw_info, IOMMU_GET_HW_INFO); + TEST_LENGTH(iommu_hwpt_alloc, IOMMU_HWPT_ALLOC); TEST_LENGTH(iommu_ioas_alloc, IOMMU_IOAS_ALLOC); TEST_LENGTH(iommu_ioas_iova_ranges, IOMMU_IOAS_IOVA_RANGES); TEST_LENGTH(iommu_ioas_allow_iovas, IOMMU_IOAS_ALLOW_IOVAS); @@ -1404,13 +1405,28 @@ TEST_F(iommufd_mock_domain, alloc_hwpt) int i; for (i = 0; i != variant->mock_domains; i++) { + uint32_t hwpt_id[2]; uint32_t stddev_id; - uint32_t hwpt_id; - test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id, &hwpt_id); - test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL); + test_err_hwpt_alloc(EOPNOTSUPP, + self->idev_ids[i], self->ioas_id, + ~IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[0]); + test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id, + 0, &hwpt_id[0]); + test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id, + IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[1]); + + /* Do a hw_pagetable rotation test */ + test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[0]); + EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[0])); + test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[1]); + EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[1])); + test_cmd_mock_domain_replace(self->stdev_ids[i], self->ioas_id); + test_ioctl_destroy(hwpt_id[1]); + + test_cmd_mock_domain(hwpt_id[0], &stddev_id, NULL, NULL); test_ioctl_destroy(stddev_id); - test_ioctl_destroy(hwpt_id); + test_ioctl_destroy(hwpt_id[0]); } } diff --git a/tools/testing/selftests/iommu/iommufd_fail_nth.c b/tools/testing/selftests/iommu/iommufd_fail_nth.c index a220ca2a689d..3d7838506bfe 100644 --- a/tools/testing/selftests/iommu/iommufd_fail_nth.c +++ b/tools/testing/selftests/iommu/iommufd_fail_nth.c @@ -615,7 +615,7 @@ TEST_FAIL_NTH(basic_fail_nth, device) if (_test_cmd_get_hw_info(self->fd, idev_id, &info, sizeof(info))) return -1; - if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, &hwpt_id)) + if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, 0, &hwpt_id)) return -1; if (_test_cmd_mock_domain_replace(self->fd, stdev_id, ioas_id2, NULL)) diff --git a/tools/testing/selftests/iommu/iommufd_utils.h b/tools/testing/selftests/iommu/iommufd_utils.h index e0753d03ecaa..be4970a84977 100644 --- a/tools/testing/selftests/iommu/iommufd_utils.h +++ b/tools/testing/selftests/iommu/iommufd_utils.h @@ -103,10 +103,11 @@ static int _test_cmd_mock_domain_replace(int fd, __u32 stdev_id, __u32 pt_id, pt_id, NULL)) static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id, - __u32 *hwpt_id) + __u32 flags, __u32 *hwpt_id) { struct iommu_hwpt_alloc cmd = { .size = sizeof(cmd), + .flags = flags, .dev_id = device_id, .pt_id = pt_id, }; @@ -120,8 +121,12 @@ static int _test_cmd_hwpt_alloc(int fd, __u32 device_id, __u32 pt_id, return 0; } -#define test_cmd_hwpt_alloc(device_id, pt_id, hwpt_id) \ - ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, pt_id, hwpt_id)) +#define test_cmd_hwpt_alloc(device_id, pt_id, flags, hwpt_id) \ + ASSERT_EQ(0, _test_cmd_hwpt_alloc(self->fd, device_id, \ + pt_id, flags, hwpt_id)) +#define test_err_hwpt_alloc(_errno, device_id, pt_id, flags, hwpt_id) \ + EXPECT_ERRNO(_errno, _test_cmd_hwpt_alloc(self->fd, device_id, \ + pt_id, flags, hwpt_id)) static int _test_cmd_access_replace_ioas(int fd, __u32 access_id, unsigned int ioas_id) -- Gitee From f4c60b51ff1d47cdab1eb3abeffa8155a6d40889 Mon Sep 17 00:00:00 2001 From: Yi Liu Date: Thu, 28 Sep 2023 00:15:28 -0700 Subject: [PATCH 0997/2138] iommu/vt-d: Add domain_alloc_user op ANBZ: #9185 commit c97d1b20d3835178bcd0e3a86c20ce4e36b6d80c upstream. Intel-SIG: commit c97d1b20d383 iommu/vt-d: Add domain_alloc_user op Backport to support Intel QAT live migration for in-tree driver Add the domain_alloc_user() op implementation. It supports allocating domains to be used as parent under nested translation. Unlike other drivers VT-D uses only a single page table format so it only needs to check if the HW can support nesting. Link: https://lore.kernel.org/r/20230928071528.26258-7-yi.l.liu@intel.com Signed-off-by: Yi Liu Reviewed-by: Lu Baolu Reviewed-by: Kevin Tian Signed-off-by: Jason Gunthorpe [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/iommu/intel/iommu.c | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index ffc528ea7285..94be151ca2cf 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -4075,6 +4075,33 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) return NULL; } +static struct iommu_domain * +intel_iommu_domain_alloc_user(struct device *dev, u32 flags) +{ + struct iommu_domain *domain; + struct intel_iommu *iommu; + + if (flags & (~IOMMU_HWPT_ALLOC_NEST_PARENT)) + return ERR_PTR(-EOPNOTSUPP); + + iommu = device_to_iommu(dev, NULL, NULL); + if (!iommu) + return ERR_PTR(-ENODEV); + + if ((flags & IOMMU_HWPT_ALLOC_NEST_PARENT) && !ecap_nest(iommu->ecap)) + return ERR_PTR(-EOPNOTSUPP); + + /* + * domain_alloc_user op needs to fully initialize a domain + * before return, so uses iommu_domain_alloc() here for + * simple. + */ + domain = iommu_domain_alloc(dev->bus); + if (!domain) + domain = ERR_PTR(-ENOMEM); + return domain; +} + static void intel_iommu_domain_free(struct iommu_domain *domain) { if (domain != &si_domain->domain && domain != &blocking_domain) @@ -4812,6 +4839,7 @@ const struct iommu_ops intel_iommu_ops = { .capable = intel_iommu_capable, .hw_info = intel_iommu_hw_info, .domain_alloc = intel_iommu_domain_alloc, + .domain_alloc_user = intel_iommu_domain_alloc_user, .probe_device = intel_iommu_probe_device, .probe_finalize = intel_iommu_probe_finalize, .release_device = intel_iommu_release_device, -- Gitee From 807e363d43e4c2a220fef4576e47072e858f052e Mon Sep 17 00:00:00 2001 From: Nicolin Chen Date: Sun, 15 Oct 2023 00:46:48 -0700 Subject: [PATCH 0998/2138] iommufd/selftest: Rework TEST_LENGTH to test min_size explicitly ANBZ: #9185 commit 266dcae34d8f44c3bbab00e227f8b14517682bb7 upstream. Intel-SIG: commit 266dcae34d8f iommufd/selftest: Rework TEST_LENGTH to test min_size explicitly Backport to support Intel QAT live migration for in-tree driver TEST_LENGTH passing ".size = sizeof(struct _struct) - 1" expects -EINVAL from "if (ucmd.user_size < op->min_size)" check in iommufd_fops_ioctl(). This has been working when min_size is exactly the size of the structure. However, if the size of the structure becomes larger than min_size, i.e. the passing size above is larger than min_size, that min_size sanity no longer works. Since the first test in TEST_LENGTH() was to test that min_size sanity routine, rework it to support a min_size calculation, rather than using the full size of the structure. Link: https://lore.kernel.org/r/20231015074648.24185-1-nicolinc@nvidia.com Signed-off-by: Nicolin Chen Reviewed-by: Kevin Tian Signed-off-by: Jason Gunthorpe [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- tools/testing/selftests/iommu/iommufd.c | 29 ++++++++++++++----------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c index bdc56e32a4b2..9b6c4d177586 100644 --- a/tools/testing/selftests/iommu/iommufd.c +++ b/tools/testing/selftests/iommu/iommufd.c @@ -86,12 +86,13 @@ TEST_F(iommufd, cmd_fail) TEST_F(iommufd, cmd_length) { -#define TEST_LENGTH(_struct, _ioctl) \ +#define TEST_LENGTH(_struct, _ioctl, _last) \ { \ + size_t min_size = offsetofend(struct _struct, _last); \ struct { \ struct _struct cmd; \ uint8_t extra; \ - } cmd = { .cmd = { .size = sizeof(struct _struct) - 1 }, \ + } cmd = { .cmd = { .size = min_size - 1 }, \ .extra = UINT8_MAX }; \ int old_errno; \ int rc; \ @@ -112,17 +113,19 @@ TEST_F(iommufd, cmd_length) } \ } - TEST_LENGTH(iommu_destroy, IOMMU_DESTROY); - TEST_LENGTH(iommu_hw_info, IOMMU_GET_HW_INFO); - TEST_LENGTH(iommu_hwpt_alloc, IOMMU_HWPT_ALLOC); - TEST_LENGTH(iommu_ioas_alloc, IOMMU_IOAS_ALLOC); - TEST_LENGTH(iommu_ioas_iova_ranges, IOMMU_IOAS_IOVA_RANGES); - TEST_LENGTH(iommu_ioas_allow_iovas, IOMMU_IOAS_ALLOW_IOVAS); - TEST_LENGTH(iommu_ioas_map, IOMMU_IOAS_MAP); - TEST_LENGTH(iommu_ioas_copy, IOMMU_IOAS_COPY); - TEST_LENGTH(iommu_ioas_unmap, IOMMU_IOAS_UNMAP); - TEST_LENGTH(iommu_option, IOMMU_OPTION); - TEST_LENGTH(iommu_vfio_ioas, IOMMU_VFIO_IOAS); + TEST_LENGTH(iommu_destroy, IOMMU_DESTROY, id); + TEST_LENGTH(iommu_hw_info, IOMMU_GET_HW_INFO, __reserved); + TEST_LENGTH(iommu_hwpt_alloc, IOMMU_HWPT_ALLOC, __reserved); + TEST_LENGTH(iommu_ioas_alloc, IOMMU_IOAS_ALLOC, out_ioas_id); + TEST_LENGTH(iommu_ioas_iova_ranges, IOMMU_IOAS_IOVA_RANGES, + out_iova_alignment); + TEST_LENGTH(iommu_ioas_allow_iovas, IOMMU_IOAS_ALLOW_IOVAS, + allowed_iovas); + TEST_LENGTH(iommu_ioas_map, IOMMU_IOAS_MAP, iova); + TEST_LENGTH(iommu_ioas_copy, IOMMU_IOAS_COPY, src_iova); + TEST_LENGTH(iommu_ioas_unmap, IOMMU_IOAS_UNMAP, length); + TEST_LENGTH(iommu_option, IOMMU_OPTION, val64); + TEST_LENGTH(iommu_vfio_ioas, IOMMU_VFIO_IOAS, __reserved); #undef TEST_LENGTH } -- Gitee From c8fae1305a3dd5ffc03970b7fdada9dd56d7cc4e Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Tue, 24 Oct 2023 14:50:52 +0100 Subject: [PATCH 0999/2138] vfio/iova_bitmap: Export more API symbols ANBZ: #9185 commit 53f0b020218fcc0a56a11df39630dbd379e4d9a6 upstream. Intel-SIG: commit 53f0b020218f vfio/iova_bitmap: Export more API symbols Backport to support Intel QAT live migration for in-tree driver In preparation to move iova_bitmap into iommufd, export the rest of API symbols that will be used in what could be used by modules, namely: iova_bitmap_alloc iova_bitmap_free iova_bitmap_for_each Link: https://lore.kernel.org/r/20231024135109.73787-2-joao.m.martins@oracle.com Suggested-by: Alex Williamson Signed-off-by: Joao Martins Reviewed-by: Jason Gunthorpe Reviewed-by: Kevin Tian Reviewed-by: Alex Williamson Signed-off-by: Jason Gunthorpe [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/vfio/iova_bitmap.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/vfio/iova_bitmap.c b/drivers/vfio/iova_bitmap.c index 7af5b204990b..3ad1e520445c 100644 --- a/drivers/vfio/iova_bitmap.c +++ b/drivers/vfio/iova_bitmap.c @@ -269,6 +269,7 @@ struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length, iova_bitmap_free(bitmap); return ERR_PTR(rc); } +EXPORT_SYMBOL_GPL(iova_bitmap_alloc); /** * iova_bitmap_free() - Frees an IOVA bitmap object @@ -290,6 +291,7 @@ void iova_bitmap_free(struct iova_bitmap *bitmap) kfree(bitmap); } +EXPORT_SYMBOL_GPL(iova_bitmap_free); /* * Returns the remaining bitmap indexes from mapped_total_index to process for @@ -388,6 +390,7 @@ int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque, return ret; } +EXPORT_SYMBOL_GPL(iova_bitmap_for_each); /** * iova_bitmap_set() - Records an IOVA range in bitmap -- Gitee From 6c4c728dfe5da805755ae3770e0cc3031be2d960 Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Tue, 24 Oct 2023 14:50:53 +0100 Subject: [PATCH 1000/2138] vfio: Move iova_bitmap into iommufd ANBZ: #9185 commit 8c9c727b6142325ed5697240fceb99cbeb4ac2ec upstream. Intel-SIG: commit 8c9c727b6142 vfio: Move iova_bitmap into iommufd Backport to support Intel QAT live migration for in-tree driver Both VFIO and IOMMUFD will need iova bitmap for storing dirties and walking the user bitmaps, so move to the common dependency into IOMMUFD. In doing so, create the symbol IOMMUFD_DRIVER which designates the builtin code that will be used by drivers when selected. Today this means MLX5_VFIO_PCI and PDS_VFIO_PCI. IOMMU drivers will do the same (in future patches) when supporting dirty tracking and select IOMMUFD_DRIVER accordingly. Given that the symbol maybe be disabled, add header definitions in iova_bitmap.h for when IOMMUFD_DRIVER=n Link: https://lore.kernel.org/r/20231024135109.73787-3-joao.m.martins@oracle.com Signed-off-by: Joao Martins Reviewed-by: Jason Gunthorpe Reviewed-by: Brett Creeley Reviewed-by: Kevin Tian Reviewed-by: Alex Williamson Signed-off-by: Jason Gunthorpe [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/iommu/Kconfig | 4 +++ drivers/iommu/iommufd/Makefile | 1 + drivers/{vfio => iommu/iommufd}/iova_bitmap.c | 0 drivers/vfio/Makefile | 3 +-- drivers/vfio/pci/mlx5/Kconfig | 1 + drivers/vfio/pci/pds/Kconfig | 1 + include/linux/iova_bitmap.h | 26 +++++++++++++++++++ 7 files changed, 34 insertions(+), 2 deletions(-) rename drivers/{vfio => iommu/iommufd}/iova_bitmap.c (100%) diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index b1df0a09601b..f0ba61f8a49d 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -7,6 +7,10 @@ config IOMMU_IOVA config IOMMU_API bool +config IOMMUFD_DRIVER + bool + default n + menuconfig IOMMU_SUPPORT bool "IOMMU Hardware Support" depends on MMU diff --git a/drivers/iommu/iommufd/Makefile b/drivers/iommu/iommufd/Makefile index 8aeba81800c5..34b446146961 100644 --- a/drivers/iommu/iommufd/Makefile +++ b/drivers/iommu/iommufd/Makefile @@ -11,3 +11,4 @@ iommufd-y := \ iommufd-$(CONFIG_IOMMUFD_TEST) += selftest.o obj-$(CONFIG_IOMMUFD) += iommufd.o +obj-$(CONFIG_IOMMUFD_DRIVER) += iova_bitmap.o diff --git a/drivers/vfio/iova_bitmap.c b/drivers/iommu/iommufd/iova_bitmap.c similarity index 100% rename from drivers/vfio/iova_bitmap.c rename to drivers/iommu/iommufd/iova_bitmap.c diff --git a/drivers/vfio/Makefile b/drivers/vfio/Makefile index c82ea032d352..68c05705200f 100644 --- a/drivers/vfio/Makefile +++ b/drivers/vfio/Makefile @@ -1,8 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_VFIO) += vfio.o -vfio-y += vfio_main.o \ - iova_bitmap.o +vfio-y += vfio_main.o vfio-$(CONFIG_VFIO_DEVICE_CDEV) += device_cdev.o vfio-$(CONFIG_VFIO_GROUP) += group.o vfio-$(CONFIG_IOMMUFD) += iommufd.o diff --git a/drivers/vfio/pci/mlx5/Kconfig b/drivers/vfio/pci/mlx5/Kconfig index 7088edc4fb28..c3ced56b7787 100644 --- a/drivers/vfio/pci/mlx5/Kconfig +++ b/drivers/vfio/pci/mlx5/Kconfig @@ -3,6 +3,7 @@ config MLX5_VFIO_PCI tristate "VFIO support for MLX5 PCI devices" depends on MLX5_CORE select VFIO_PCI_CORE + select IOMMUFD_DRIVER help This provides migration support for MLX5 devices using the VFIO framework. diff --git a/drivers/vfio/pci/pds/Kconfig b/drivers/vfio/pci/pds/Kconfig index 6eceef7b028a..fec9b167c7b9 100644 --- a/drivers/vfio/pci/pds/Kconfig +++ b/drivers/vfio/pci/pds/Kconfig @@ -5,6 +5,7 @@ config PDS_VFIO_PCI tristate "VFIO support for PDS PCI devices" depends on PDS_CORE && PCI_IOV select VFIO_PCI_CORE + select IOMMUFD_DRIVER help This provides generic PCI support for PDS devices using the VFIO framework. diff --git a/include/linux/iova_bitmap.h b/include/linux/iova_bitmap.h index c006cf0a25f3..1c338f5e5b7a 100644 --- a/include/linux/iova_bitmap.h +++ b/include/linux/iova_bitmap.h @@ -7,6 +7,7 @@ #define _IOVA_BITMAP_H_ #include +#include struct iova_bitmap; @@ -14,6 +15,7 @@ typedef int (*iova_bitmap_fn_t)(struct iova_bitmap *bitmap, unsigned long iova, size_t length, void *opaque); +#if IS_ENABLED(CONFIG_IOMMUFD_DRIVER) struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length, unsigned long page_size, u64 __user *data); @@ -22,5 +24,29 @@ int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque, iova_bitmap_fn_t fn); void iova_bitmap_set(struct iova_bitmap *bitmap, unsigned long iova, size_t length); +#else +static inline struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, + size_t length, + unsigned long page_size, + u64 __user *data) +{ + return NULL; +} + +static inline void iova_bitmap_free(struct iova_bitmap *bitmap) +{ +} + +static inline int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque, + iova_bitmap_fn_t fn) +{ + return -EOPNOTSUPP; +} + +static inline void iova_bitmap_set(struct iova_bitmap *bitmap, + unsigned long iova, size_t length) +{ +} +#endif #endif -- Gitee From f8cd2ab32f5f1bc6ecce1560061128a876e0fab9 Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Tue, 24 Oct 2023 14:50:54 +0100 Subject: [PATCH 1001/2138] iommufd/iova_bitmap: Move symbols to IOMMUFD namespace ANBZ: #9185 commit 13578d4ebe8be1c16146f37c0c91f2579611cff2 upstream. Intel-SIG: commit 13578d4ebe8b iommufd/iova_bitmap: Move symbols to IOMMUFD namespace Backport to support Intel QAT live migration for in-tree driver Have the IOVA bitmap exported symbols adhere to the IOMMUFD symbol export convention i.e. using the IOMMUFD namespace. In doing so, import the namespace in the current users. This means VFIO and the vfio-pci drivers that use iova_bitmap_set(). Link: https://lore.kernel.org/r/20231024135109.73787-4-joao.m.martins@oracle.com Suggested-by: Jason Gunthorpe Signed-off-by: Joao Martins Reviewed-by: Jason Gunthorpe Reviewed-by: Brett Creeley Reviewed-by: Kevin Tian Reviewed-by: Alex Williamson Signed-off-by: Jason Gunthorpe [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/iommu/iommufd/iova_bitmap.c | 8 ++++---- drivers/vfio/pci/mlx5/main.c | 1 + drivers/vfio/pci/pds/pci_drv.c | 1 + drivers/vfio/vfio_main.c | 1 + 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/iommu/iommufd/iova_bitmap.c b/drivers/iommu/iommufd/iova_bitmap.c index 3ad1e520445c..a365e18128da 100644 --- a/drivers/iommu/iommufd/iova_bitmap.c +++ b/drivers/iommu/iommufd/iova_bitmap.c @@ -269,7 +269,7 @@ struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length, iova_bitmap_free(bitmap); return ERR_PTR(rc); } -EXPORT_SYMBOL_GPL(iova_bitmap_alloc); +EXPORT_SYMBOL_NS_GPL(iova_bitmap_alloc, IOMMUFD); /** * iova_bitmap_free() - Frees an IOVA bitmap object @@ -291,7 +291,7 @@ void iova_bitmap_free(struct iova_bitmap *bitmap) kfree(bitmap); } -EXPORT_SYMBOL_GPL(iova_bitmap_free); +EXPORT_SYMBOL_NS_GPL(iova_bitmap_free, IOMMUFD); /* * Returns the remaining bitmap indexes from mapped_total_index to process for @@ -390,7 +390,7 @@ int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque, return ret; } -EXPORT_SYMBOL_GPL(iova_bitmap_for_each); +EXPORT_SYMBOL_NS_GPL(iova_bitmap_for_each, IOMMUFD); /** * iova_bitmap_set() - Records an IOVA range in bitmap @@ -428,4 +428,4 @@ void iova_bitmap_set(struct iova_bitmap *bitmap, cur_bit += nbits; } while (cur_bit <= last_bit); } -EXPORT_SYMBOL_GPL(iova_bitmap_set); +EXPORT_SYMBOL_NS_GPL(iova_bitmap_set, IOMMUFD); diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c index 42ec574a8622..5cf2b491d15a 100644 --- a/drivers/vfio/pci/mlx5/main.c +++ b/drivers/vfio/pci/mlx5/main.c @@ -1376,6 +1376,7 @@ static struct pci_driver mlx5vf_pci_driver = { module_pci_driver(mlx5vf_pci_driver); +MODULE_IMPORT_NS(IOMMUFD); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Max Gurtovoy "); MODULE_AUTHOR("Yishai Hadas "); diff --git a/drivers/vfio/pci/pds/pci_drv.c b/drivers/vfio/pci/pds/pci_drv.c index caffa1a2cf59..a34dda516629 100644 --- a/drivers/vfio/pci/pds/pci_drv.c +++ b/drivers/vfio/pci/pds/pci_drv.c @@ -204,6 +204,7 @@ static struct pci_driver pds_vfio_pci_driver = { module_pci_driver(pds_vfio_pci_driver); +MODULE_IMPORT_NS(IOMMUFD); MODULE_DESCRIPTION(PDS_VFIO_DRV_DESCRIPTION); MODULE_AUTHOR("Brett Creeley "); MODULE_LICENSE("GPL"); diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c index 40732e8ed4c6..a96d97da367d 100644 --- a/drivers/vfio/vfio_main.c +++ b/drivers/vfio/vfio_main.c @@ -1693,6 +1693,7 @@ static void __exit vfio_cleanup(void) module_init(vfio_init); module_exit(vfio_cleanup); +MODULE_IMPORT_NS(IOMMUFD); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR(DRIVER_AUTHOR); -- Gitee From 5f3cd0ecb35410943e149247c2ee2eecf08f687d Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Tue, 24 Oct 2023 14:50:55 +0100 Subject: [PATCH 1002/2138] iommu: Add iommu_domain ops for dirty tracking ANBZ: #9185 commit 750e2e902b7180cb82d2f9b1e372e32087bb8b1b upstream. Intel-SIG: commit 750e2e902b71 iommu: Add iommu_domain ops for dirty tracking Backport to support Intel QAT live migration for in-tree driver Add to iommu domain operations a set of callbacks to perform dirty tracking, particulary to start and stop tracking and to read and clear the dirty data. Drivers are generally expected to dynamically change its translation structures to toggle the tracking and flush some form of control state structure that stands in the IOVA translation path. Though it's not mandatory, as drivers can also enable dirty tracking at boot, and just clear the dirty bits before setting dirty tracking. For each of the newly added IOMMU core APIs: iommu_cap::IOMMU_CAP_DIRTY_TRACKING: new device iommu_capable value when probing for capabilities of the device. .set_dirty_tracking(): an iommu driver is expected to change its translation structures and enable dirty tracking for the devices in the iommu_domain. For drivers making dirty tracking always-enabled, it should just return 0. .read_and_clear_dirty(): an iommu driver is expected to walk the pagetables for the iova range passed in and use iommu_dirty_bitmap_record() to record dirty info per IOVA. When detecting that a given IOVA is dirty it should also clear its dirty state from the PTE, *unless* the flag IOMMU_DIRTY_NO_CLEAR is passed in -- flushing is steered from the caller of the domain_op via iotlb_gather. The iommu core APIs use the same data structure in use for dirty tracking for VFIO device dirty (struct iova_bitmap) abstracted by iommu_dirty_bitmap_record() helper function. domain::dirty_ops: IOMMU domains will store the dirty ops depending on whether the iommu device supports dirty tracking or not. iommu drivers can then use this field to figure if the dirty tracking is supported+enforced on attach. The enforcement is enable via domain_alloc_user() which is done via IOMMUFD hwpt flag introduced later. Link: https://lore.kernel.org/r/20231024135109.73787-5-joao.m.martins@oracle.com Signed-off-by: Joao Martins Reviewed-by: Jason Gunthorpe Reviewed-by: Lu Baolu Reviewed-by: Kevin Tian Signed-off-by: Jason Gunthorpe [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- include/linux/io-pgtable.h | 4 +++ include/linux/iommu.h | 70 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 74 insertions(+) diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 1b7a44b35616..25142a0e2fc2 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -166,6 +166,10 @@ struct io_pgtable_ops { struct iommu_iotlb_gather *gather); phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops, unsigned long iova); + int (*read_and_clear_dirty)(struct io_pgtable_ops *ops, + unsigned long iova, size_t size, + unsigned long flags, + struct iommu_dirty_bitmap *dirty); }; /** diff --git a/include/linux/iommu.h b/include/linux/iommu.h index a37cf16afa26..95c86501a7a2 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -13,6 +13,7 @@ #include #include #include +#include #include #define IOMMU_READ (1 << 0) @@ -37,6 +38,7 @@ struct bus_type; struct device; struct iommu_domain; struct iommu_domain_ops; +struct iommu_dirty_ops; struct notifier_block; struct iommu_sva; struct iommu_fault_event; @@ -95,6 +97,8 @@ struct iommu_domain_geometry { struct iommu_domain { unsigned type; const struct iommu_domain_ops *ops; + const struct iommu_dirty_ops *dirty_ops; + unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ struct iommu_domain_geometry geometry; struct iommu_dma_cookie *iova_cookie; @@ -133,6 +137,7 @@ enum iommu_cap { * usefully support the non-strict DMA flush queue. */ IOMMU_CAP_DEFERRED_FLUSH, + IOMMU_CAP_DIRTY_TRACKING, /* IOMMU supports dirty tracking */ }; /* These are the possible reserved region types */ @@ -227,6 +232,35 @@ struct iommu_iotlb_gather { bool queued; }; +/** + * struct iommu_dirty_bitmap - Dirty IOVA bitmap state + * @bitmap: IOVA bitmap + * @gather: Range information for a pending IOTLB flush + */ +struct iommu_dirty_bitmap { + struct iova_bitmap *bitmap; + struct iommu_iotlb_gather *gather; +}; + +/* Read but do not clear any dirty bits */ +#define IOMMU_DIRTY_NO_CLEAR (1 << 0) + +/** + * struct iommu_dirty_ops - domain specific dirty tracking operations + * @set_dirty_tracking: Enable or Disable dirty tracking on the iommu domain + * @read_and_clear_dirty: Walk IOMMU page tables for dirtied PTEs marshalled + * into a bitmap, with a bit represented as a page. + * Reads the dirty PTE bits and clears it from IO + * pagetables. + */ +struct iommu_dirty_ops { + int (*set_dirty_tracking)(struct iommu_domain *domain, bool enabled); + int (*read_and_clear_dirty)(struct iommu_domain *domain, + unsigned long iova, size_t size, + unsigned long flags, + struct iommu_dirty_bitmap *dirty); +}; + /** * struct iommu_ops - iommu ops and capabilities * @capable: check capability @@ -641,6 +675,28 @@ static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) return gather && gather->queued; } +static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty, + struct iova_bitmap *bitmap, + struct iommu_iotlb_gather *gather) +{ + if (gather) + iommu_iotlb_gather_init(gather); + + dirty->bitmap = bitmap; + dirty->gather = gather; +} + +static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty, + unsigned long iova, + unsigned long length) +{ + if (dirty->bitmap) + iova_bitmap_set(dirty->bitmap, iova, length); + + if (dirty->gather) + iommu_iotlb_gather_add_range(dirty->gather, iova, length); +} + /* PCI device grouping function */ extern struct iommu_group *pci_device_group(struct device *dev); /* Generic device grouping function */ @@ -747,6 +803,8 @@ struct iommu_fwspec {}; struct iommu_device {}; struct iommu_fault_param {}; struct iommu_iotlb_gather {}; +struct iommu_dirty_bitmap {}; +struct iommu_dirty_ops {}; static inline bool iommu_present(const struct bus_type *bus) { @@ -979,6 +1037,18 @@ static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) return false; } +static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty, + struct iova_bitmap *bitmap, + struct iommu_iotlb_gather *gather) +{ +} + +static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty, + unsigned long iova, + unsigned long length) +{ +} + static inline void iommu_device_unregister(struct iommu_device *iommu) { } -- Gitee From 829ce0b3aae51946e3df949bc3317950eb7055c1 Mon Sep 17 00:00:00 2001 From: Nicolin Chen Date: Tue, 17 Oct 2023 11:15:52 -0700 Subject: [PATCH 1003/2138] iommufd: Correct IOMMU_HWPT_ALLOC_NEST_PARENT description ANBZ: #9185 commit b5f9e63278d6f32789478acf1ed41d21d92b36cf upstream. Intel-SIG: commit b5f9e63278d6 iommufd: Correct IOMMU_HWPT_ALLOC_NEST_PARENT description Backport to support Intel QAT live migration for in-tree driver The IOMMU_HWPT_ALLOC_NEST_PARENT flag is used to allocate a HWPT. Though a HWPT holds a domain in the core structure, it is still quite confusing to describe it using "domain" in the uAPI kdoc. Correct it to "HWPT". Fixes: 4ff542163397 ("iommufd: Support allocating nested parent domain") Link: https://lore.kernel.org/r/20231017181552.12667-1-nicolinc@nvidia.com Signed-off-by: Nicolin Chen Signed-off-by: Jason Gunthorpe [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- include/uapi/linux/iommufd.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h index 4a7c5c8fdbb4..be7a95042677 100644 --- a/include/uapi/linux/iommufd.h +++ b/include/uapi/linux/iommufd.h @@ -349,9 +349,8 @@ struct iommu_vfio_ioas { /** * enum iommufd_hwpt_alloc_flags - Flags for HWPT allocation - * @IOMMU_HWPT_ALLOC_NEST_PARENT: If set, allocate a domain which can serve - * as the parent domain in the nesting - * configuration. + * @IOMMU_HWPT_ALLOC_NEST_PARENT: If set, allocate a HWPT that can serve as + * the parent HWPT in a nesting configuration. */ enum iommufd_hwpt_alloc_flags { IOMMU_HWPT_ALLOC_NEST_PARENT = 1 << 0, -- Gitee From 66976e84eac59b126c9ccea56d0d1387ab0dd352 Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Tue, 24 Oct 2023 14:50:56 +0100 Subject: [PATCH 1004/2138] iommufd: Add a flag to enforce dirty tracking on attach ANBZ: #9185 commit 5f9bdbf4c65860cc8b9c544d92bfd76fbea8d9c5 upstream. Intel-SIG: commit 5f9bdbf4c658 iommufd: Add a flag to enforce dirty tracking on attach Backport to support Intel QAT live migration for in-tree driver Throughout IOMMU domain lifetime that wants to use dirty tracking, some guarantees are needed such that any device attached to the iommu_domain supports dirty tracking. The idea is to handle a case where IOMMU in the system are assymetric feature-wise and thus the capability may not be supported for all devices. The enforcement is done by adding a flag into HWPT_ALLOC namely: IOMMU_HWPT_ALLOC_DIRTY_TRACKING .. Passed in HWPT_ALLOC ioctl() flags. The enforcement is done by creating a iommu_domain via domain_alloc_user() and validating the requested flags with what the device IOMMU supports (and failing accordingly) advertised). Advertising the new IOMMU domain feature flag requires that the individual iommu driver capability is supported when a future device attachment happens. Link: https://lore.kernel.org/r/20231024135109.73787-6-joao.m.martins@oracle.com Signed-off-by: Joao Martins Reviewed-by: Jason Gunthorpe Reviewed-by: Kevin Tian Signed-off-by: Jason Gunthorpe [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/iommu/iommufd/hw_pagetable.c | 4 +++- include/uapi/linux/iommufd.h | 3 +++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c index 8b3d2875d642..dd50ca9e2c09 100644 --- a/drivers/iommu/iommufd/hw_pagetable.c +++ b/drivers/iommu/iommufd/hw_pagetable.c @@ -157,7 +157,9 @@ int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd) struct iommufd_ioas *ioas; int rc; - if ((cmd->flags & (~IOMMU_HWPT_ALLOC_NEST_PARENT)) || cmd->__reserved) + if ((cmd->flags & ~(IOMMU_HWPT_ALLOC_NEST_PARENT | + IOMMU_HWPT_ALLOC_DIRTY_TRACKING)) || + cmd->__reserved) return -EOPNOTSUPP; idev = iommufd_get_device(ucmd, cmd->dev_id); diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h index be7a95042677..c76248410120 100644 --- a/include/uapi/linux/iommufd.h +++ b/include/uapi/linux/iommufd.h @@ -351,9 +351,12 @@ struct iommu_vfio_ioas { * enum iommufd_hwpt_alloc_flags - Flags for HWPT allocation * @IOMMU_HWPT_ALLOC_NEST_PARENT: If set, allocate a HWPT that can serve as * the parent HWPT in a nesting configuration. + * @IOMMU_HWPT_ALLOC_DIRTY_TRACKING: Dirty tracking support for device IOMMU is + * enforced on device attachment */ enum iommufd_hwpt_alloc_flags { IOMMU_HWPT_ALLOC_NEST_PARENT = 1 << 0, + IOMMU_HWPT_ALLOC_DIRTY_TRACKING = 1 << 1, }; /** -- Gitee From 54b6074366a4353433bfd3d16472e9c156c7dcaf Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Tue, 24 Oct 2023 14:50:57 +0100 Subject: [PATCH 1005/2138] iommufd: Add IOMMU_HWPT_SET_DIRTY_TRACKING ANBZ: #9185 commit e2a4b294784957fc28ecb1fed8a7e69da18eb18d upstream. Intel-SIG: commit e2a4b2947849 iommufd: Add IOMMU_HWPT_SET_DIRTY_TRACKING Backport to support Intel QAT live migration for in-tree driver Every IOMMU driver should be able to implement the needed iommu domain ops to control dirty tracking. Connect a hw_pagetable to the IOMMU core dirty tracking ops, specifically the ability to enable/disable dirty tracking on an IOMMU domain (hw_pagetable id). To that end add an io_pagetable kernel API to toggle dirty tracking: * iopt_set_dirty_tracking(iopt, [domain], state) The intended caller of this is via the hw_pagetable object that is created. Internally it will ensure the leftover dirty state is cleared /right before/ dirty tracking starts. This is also useful for iommu drivers which may decide that dirty tracking is always-enabled at boot without wanting to toggle dynamically via corresponding iommu domain op. Link: https://lore.kernel.org/r/20231024135109.73787-7-joao.m.martins@oracle.com Signed-off-by: Joao Martins Reviewed-by: Jason Gunthorpe Reviewed-by: Kevin Tian Signed-off-by: Jason Gunthorpe [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/iommu/iommufd/hw_pagetable.c | 24 +++++++++++ drivers/iommu/iommufd/io_pagetable.c | 54 +++++++++++++++++++++++++ drivers/iommu/iommufd/iommufd_private.h | 12 ++++++ drivers/iommu/iommufd/main.c | 3 ++ include/uapi/linux/iommufd.h | 28 +++++++++++++ 5 files changed, 121 insertions(+) diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c index dd50ca9e2c09..c3b7bd9bfcbb 100644 --- a/drivers/iommu/iommufd/hw_pagetable.c +++ b/drivers/iommu/iommufd/hw_pagetable.c @@ -196,3 +196,27 @@ int iommufd_hwpt_alloc(struct iommufd_ucmd *ucmd) iommufd_put_object(&idev->obj); return rc; } + +int iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd *ucmd) +{ + struct iommu_hwpt_set_dirty_tracking *cmd = ucmd->cmd; + struct iommufd_hw_pagetable *hwpt; + struct iommufd_ioas *ioas; + int rc = -EOPNOTSUPP; + bool enable; + + if (cmd->flags & ~IOMMU_HWPT_DIRTY_TRACKING_ENABLE) + return rc; + + hwpt = iommufd_get_hwpt(ucmd, cmd->hwpt_id); + if (IS_ERR(hwpt)) + return PTR_ERR(hwpt); + + ioas = hwpt->ioas; + enable = cmd->flags & IOMMU_HWPT_DIRTY_TRACKING_ENABLE; + + rc = iopt_set_dirty_tracking(&ioas->iopt, hwpt->domain, enable); + + iommufd_put_object(&hwpt->obj); + return rc; +} diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c index e76b22939994..8e9c51c5d30a 100644 --- a/drivers/iommu/iommufd/io_pagetable.c +++ b/drivers/iommu/iommufd/io_pagetable.c @@ -432,6 +432,60 @@ int iopt_map_user_pages(struct iommufd_ctx *ictx, struct io_pagetable *iopt, return 0; } +static int iopt_clear_dirty_data(struct io_pagetable *iopt, + struct iommu_domain *domain) +{ + const struct iommu_dirty_ops *ops = domain->dirty_ops; + struct iommu_iotlb_gather gather; + struct iommu_dirty_bitmap dirty; + struct iopt_area *area; + int ret = 0; + + lockdep_assert_held_read(&iopt->iova_rwsem); + + iommu_dirty_bitmap_init(&dirty, NULL, &gather); + + for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area; + area = iopt_area_iter_next(area, 0, ULONG_MAX)) { + if (!area->pages) + continue; + + ret = ops->read_and_clear_dirty(domain, iopt_area_iova(area), + iopt_area_length(area), 0, + &dirty); + if (ret) + break; + } + + iommu_iotlb_sync(domain, &gather); + return ret; +} + +int iopt_set_dirty_tracking(struct io_pagetable *iopt, + struct iommu_domain *domain, bool enable) +{ + const struct iommu_dirty_ops *ops = domain->dirty_ops; + int ret = 0; + + if (!ops) + return -EOPNOTSUPP; + + down_read(&iopt->iova_rwsem); + + /* Clear dirty bits from PTEs to ensure a clean snapshot */ + if (enable) { + ret = iopt_clear_dirty_data(iopt, domain); + if (ret) + goto out_unlock; + } + + ret = ops->set_dirty_tracking(domain, enable); + +out_unlock: + up_read(&iopt->iova_rwsem); + return ret; +} + int iopt_get_pages(struct io_pagetable *iopt, unsigned long iova, unsigned long length, struct list_head *pages_list) { diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h index 3064997a0181..b09750848da6 100644 --- a/drivers/iommu/iommufd/iommufd_private.h +++ b/drivers/iommu/iommufd/iommufd_private.h @@ -8,6 +8,7 @@ #include #include #include +#include struct iommu_domain; struct iommu_group; @@ -70,6 +71,9 @@ int iopt_unmap_iova(struct io_pagetable *iopt, unsigned long iova, unsigned long length, unsigned long *unmapped); int iopt_unmap_all(struct io_pagetable *iopt, unsigned long *unmapped); +int iopt_set_dirty_tracking(struct io_pagetable *iopt, + struct iommu_domain *domain, bool enable); + void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova, unsigned long length); int iopt_table_add_domain(struct io_pagetable *iopt, @@ -240,6 +244,14 @@ struct iommufd_hw_pagetable { struct list_head hwpt_item; }; +static inline struct iommufd_hw_pagetable * +iommufd_get_hwpt(struct iommufd_ucmd *ucmd, u32 id) +{ + return container_of(iommufd_get_object(ucmd->ictx, id, + IOMMUFD_OBJ_HW_PAGETABLE), + struct iommufd_hw_pagetable, obj); +} +int iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd *ucmd); struct iommufd_hw_pagetable * iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, struct iommufd_device *idev, u32 flags, diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c index e71523cbd0de..46fedd779714 100644 --- a/drivers/iommu/iommufd/main.c +++ b/drivers/iommu/iommufd/main.c @@ -307,6 +307,7 @@ union ucmd_buffer { struct iommu_destroy destroy; struct iommu_hw_info info; struct iommu_hwpt_alloc hwpt; + struct iommu_hwpt_set_dirty_tracking set_dirty_tracking; struct iommu_ioas_alloc alloc; struct iommu_ioas_allow_iovas allow_iovas; struct iommu_ioas_copy ioas_copy; @@ -342,6 +343,8 @@ static const struct iommufd_ioctl_op iommufd_ioctl_ops[] = { __reserved), IOCTL_OP(IOMMU_HWPT_ALLOC, iommufd_hwpt_alloc, struct iommu_hwpt_alloc, __reserved), + IOCTL_OP(IOMMU_HWPT_SET_DIRTY_TRACKING, iommufd_hwpt_set_dirty_tracking, + struct iommu_hwpt_set_dirty_tracking, __reserved), IOCTL_OP(IOMMU_IOAS_ALLOC, iommufd_ioas_alloc_ioctl, struct iommu_ioas_alloc, out_ioas_id), IOCTL_OP(IOMMU_IOAS_ALLOW_IOVAS, iommufd_ioas_allow_iovas, diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h index c76248410120..5c82b68c88f3 100644 --- a/include/uapi/linux/iommufd.h +++ b/include/uapi/linux/iommufd.h @@ -47,6 +47,7 @@ enum { IOMMUFD_CMD_VFIO_IOAS, IOMMUFD_CMD_HWPT_ALLOC, IOMMUFD_CMD_GET_HW_INFO, + IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING, }; /** @@ -453,4 +454,31 @@ struct iommu_hw_info { __u32 __reserved; }; #define IOMMU_GET_HW_INFO _IO(IOMMUFD_TYPE, IOMMUFD_CMD_GET_HW_INFO) + +/* + * enum iommufd_hwpt_set_dirty_tracking_flags - Flags for steering dirty + * tracking + * @IOMMU_HWPT_DIRTY_TRACKING_ENABLE: Enable dirty tracking + */ +enum iommufd_hwpt_set_dirty_tracking_flags { + IOMMU_HWPT_DIRTY_TRACKING_ENABLE = 1, +}; + +/** + * struct iommu_hwpt_set_dirty_tracking - ioctl(IOMMU_HWPT_SET_DIRTY_TRACKING) + * @size: sizeof(struct iommu_hwpt_set_dirty_tracking) + * @flags: Combination of enum iommufd_hwpt_set_dirty_tracking_flags + * @hwpt_id: HW pagetable ID that represents the IOMMU domain + * @__reserved: Must be 0 + * + * Toggle dirty tracking on an HW pagetable. + */ +struct iommu_hwpt_set_dirty_tracking { + __u32 size; + __u32 flags; + __u32 hwpt_id; + __u32 __reserved; +}; +#define IOMMU_HWPT_SET_DIRTY_TRACKING _IO(IOMMUFD_TYPE, \ + IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING) #endif -- Gitee From e84007c1bd5bf795008630610dc9abfe361f867f Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Tue, 24 Oct 2023 14:50:58 +0100 Subject: [PATCH 1006/2138] iommufd: Add IOMMU_HWPT_GET_DIRTY_BITMAP ANBZ: #9185 commit b9a60d6f850e4470017b60f731220a58cda199aa upstream. Intel-SIG: commit b9a60d6f850e iommufd: Add IOMMU_HWPT_GET_DIRTY_BITMAP Backport to support Intel QAT live migration for in-tree driver Connect a hw_pagetable to the IOMMU core dirty tracking read_and_clear_dirty iommu domain op. It exposes all of the functionality for the UAPI that read the dirtied IOVAs while clearing the Dirty bits from the PTEs. In doing so, add an IO pagetable API iopt_read_and_clear_dirty_data() that performs the reading of dirty IOPTEs for a given IOVA range and then copying back to userspace bitmap. Underneath it uses the IOMMU domain kernel API which will read the dirty bits, as well as atomically clearing the IOPTE dirty bit and flushing the IOTLB at the end. The IOVA bitmaps usage takes care of the iteration of the bitmaps user pages efficiently and without copies. Within the iterator function we iterate over io-pagetable contigous areas that have been mapped. Contrary to past incantation of a similar interface in VFIO the IOVA range to be scanned is tied in to the bitmap size, thus the application needs to pass a appropriately sized bitmap address taking into account the iova range being passed *and* page size ... as opposed to allowing bitmap-iova != iova. Link: https://lore.kernel.org/r/20231024135109.73787-8-joao.m.martins@oracle.com Signed-off-by: Joao Martins Reviewed-by: Jason Gunthorpe Reviewed-by: Kevin Tian Signed-off-by: Jason Gunthorpe [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/iommu/iommufd/hw_pagetable.c | 22 +++++ drivers/iommu/iommufd/io_pagetable.c | 113 ++++++++++++++++++++++++ drivers/iommu/iommufd/iommufd_private.h | 10 +++ drivers/iommu/iommufd/main.c | 4 + include/uapi/linux/iommufd.h | 35 ++++++++ 5 files changed, 184 insertions(+) diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c index c3b7bd9bfcbb..7316f69110ef 100644 --- a/drivers/iommu/iommufd/hw_pagetable.c +++ b/drivers/iommu/iommufd/hw_pagetable.c @@ -220,3 +220,25 @@ int iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd *ucmd) iommufd_put_object(&hwpt->obj); return rc; } + +int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd) +{ + struct iommu_hwpt_get_dirty_bitmap *cmd = ucmd->cmd; + struct iommufd_hw_pagetable *hwpt; + struct iommufd_ioas *ioas; + int rc = -EOPNOTSUPP; + + if ((cmd->flags || cmd->__reserved)) + return -EOPNOTSUPP; + + hwpt = iommufd_get_hwpt(ucmd, cmd->hwpt_id); + if (IS_ERR(hwpt)) + return PTR_ERR(hwpt); + + ioas = hwpt->ioas; + rc = iopt_read_and_clear_dirty_data(&ioas->iopt, hwpt->domain, + cmd->flags, cmd); + + iommufd_put_object(&hwpt->obj); + return rc; +} diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c index 8e9c51c5d30a..ea6fd83202a2 100644 --- a/drivers/iommu/iommufd/io_pagetable.c +++ b/drivers/iommu/iommufd/io_pagetable.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "io_pagetable.h" #include "double_span.h" @@ -432,6 +433,118 @@ int iopt_map_user_pages(struct iommufd_ctx *ictx, struct io_pagetable *iopt, return 0; } +struct iova_bitmap_fn_arg { + struct io_pagetable *iopt; + struct iommu_domain *domain; + struct iommu_dirty_bitmap *dirty; +}; + +static int __iommu_read_and_clear_dirty(struct iova_bitmap *bitmap, + unsigned long iova, size_t length, + void *opaque) +{ + struct iopt_area *area; + struct iopt_area_contig_iter iter; + struct iova_bitmap_fn_arg *arg = opaque; + struct iommu_domain *domain = arg->domain; + struct iommu_dirty_bitmap *dirty = arg->dirty; + const struct iommu_dirty_ops *ops = domain->dirty_ops; + unsigned long last_iova = iova + length - 1; + int ret; + + iopt_for_each_contig_area(&iter, area, arg->iopt, iova, last_iova) { + unsigned long last = min(last_iova, iopt_area_last_iova(area)); + + ret = ops->read_and_clear_dirty(domain, iter.cur_iova, + last - iter.cur_iova + 1, 0, + dirty); + if (ret) + return ret; + } + + if (!iopt_area_contig_done(&iter)) + return -EINVAL; + return 0; +} + +static int +iommu_read_and_clear_dirty(struct iommu_domain *domain, + struct io_pagetable *iopt, unsigned long flags, + struct iommu_hwpt_get_dirty_bitmap *bitmap) +{ + const struct iommu_dirty_ops *ops = domain->dirty_ops; + struct iommu_iotlb_gather gather; + struct iommu_dirty_bitmap dirty; + struct iova_bitmap_fn_arg arg; + struct iova_bitmap *iter; + int ret = 0; + + if (!ops || !ops->read_and_clear_dirty) + return -EOPNOTSUPP; + + iter = iova_bitmap_alloc(bitmap->iova, bitmap->length, + bitmap->page_size, + u64_to_user_ptr(bitmap->data)); + if (IS_ERR(iter)) + return -ENOMEM; + + iommu_dirty_bitmap_init(&dirty, iter, &gather); + + arg.iopt = iopt; + arg.domain = domain; + arg.dirty = &dirty; + iova_bitmap_for_each(iter, &arg, __iommu_read_and_clear_dirty); + + iommu_iotlb_sync(domain, &gather); + iova_bitmap_free(iter); + + return ret; +} + +int iommufd_check_iova_range(struct io_pagetable *iopt, + struct iommu_hwpt_get_dirty_bitmap *bitmap) +{ + size_t iommu_pgsize = iopt->iova_alignment; + u64 last_iova; + + if (check_add_overflow(bitmap->iova, bitmap->length - 1, &last_iova)) + return -EOVERFLOW; + + if (bitmap->iova > ULONG_MAX || last_iova > ULONG_MAX) + return -EOVERFLOW; + + if ((bitmap->iova & (iommu_pgsize - 1)) || + ((last_iova + 1) & (iommu_pgsize - 1))) + return -EINVAL; + + if (!bitmap->page_size) + return -EINVAL; + + if ((bitmap->iova & (bitmap->page_size - 1)) || + ((last_iova + 1) & (bitmap->page_size - 1))) + return -EINVAL; + + return 0; +} + +int iopt_read_and_clear_dirty_data(struct io_pagetable *iopt, + struct iommu_domain *domain, + unsigned long flags, + struct iommu_hwpt_get_dirty_bitmap *bitmap) +{ + int ret; + + ret = iommufd_check_iova_range(iopt, bitmap); + if (ret) + return ret; + + down_read(&iopt->iova_rwsem); + ret = iommu_read_and_clear_dirty(domain, iopt, flags, bitmap); + up_read(&iopt->iova_rwsem); + + return ret; +} + static int iopt_clear_dirty_data(struct io_pagetable *iopt, struct iommu_domain *domain) { diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h index b09750848da6..034129130db3 100644 --- a/drivers/iommu/iommufd/iommufd_private.h +++ b/drivers/iommu/iommufd/iommufd_private.h @@ -8,6 +8,8 @@ #include #include #include +#include +#include #include struct iommu_domain; @@ -71,6 +73,10 @@ int iopt_unmap_iova(struct io_pagetable *iopt, unsigned long iova, unsigned long length, unsigned long *unmapped); int iopt_unmap_all(struct io_pagetable *iopt, unsigned long *unmapped); +int iopt_read_and_clear_dirty_data(struct io_pagetable *iopt, + struct iommu_domain *domain, + unsigned long flags, + struct iommu_hwpt_get_dirty_bitmap *bitmap); int iopt_set_dirty_tracking(struct io_pagetable *iopt, struct iommu_domain *domain, bool enable); @@ -226,6 +232,8 @@ int iommufd_option_rlimit_mode(struct iommu_option *cmd, struct iommufd_ctx *ictx); int iommufd_vfio_ioas(struct iommufd_ucmd *ucmd); +int iommufd_check_iova_range(struct io_pagetable *iopt, + struct iommu_hwpt_get_dirty_bitmap *bitmap); /* * A HW pagetable is called an iommu_domain inside the kernel. This user object @@ -252,6 +260,8 @@ iommufd_get_hwpt(struct iommufd_ucmd *ucmd, u32 id) struct iommufd_hw_pagetable, obj); } int iommufd_hwpt_set_dirty_tracking(struct iommufd_ucmd *ucmd); +int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd); + struct iommufd_hw_pagetable * iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, struct iommufd_device *idev, u32 flags, diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c index 46fedd779714..d50f42a730aa 100644 --- a/drivers/iommu/iommufd/main.c +++ b/drivers/iommu/iommufd/main.c @@ -307,6 +307,7 @@ union ucmd_buffer { struct iommu_destroy destroy; struct iommu_hw_info info; struct iommu_hwpt_alloc hwpt; + struct iommu_hwpt_get_dirty_bitmap get_dirty_bitmap; struct iommu_hwpt_set_dirty_tracking set_dirty_tracking; struct iommu_ioas_alloc alloc; struct iommu_ioas_allow_iovas allow_iovas; @@ -343,6 +344,8 @@ static const struct iommufd_ioctl_op iommufd_ioctl_ops[] = { __reserved), IOCTL_OP(IOMMU_HWPT_ALLOC, iommufd_hwpt_alloc, struct iommu_hwpt_alloc, __reserved), + IOCTL_OP(IOMMU_HWPT_GET_DIRTY_BITMAP, iommufd_hwpt_get_dirty_bitmap, + struct iommu_hwpt_get_dirty_bitmap, data), IOCTL_OP(IOMMU_HWPT_SET_DIRTY_TRACKING, iommufd_hwpt_set_dirty_tracking, struct iommu_hwpt_set_dirty_tracking, __reserved), IOCTL_OP(IOMMU_IOAS_ALLOC, iommufd_ioas_alloc_ioctl, @@ -555,5 +558,6 @@ MODULE_ALIAS_MISCDEV(VFIO_MINOR); MODULE_ALIAS("devname:vfio/vfio"); #endif MODULE_IMPORT_NS(IOMMUFD_INTERNAL); +MODULE_IMPORT_NS(IOMMUFD); MODULE_DESCRIPTION("I/O Address Space Management for passthrough devices"); MODULE_LICENSE("GPL"); diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h index 5c82b68c88f3..dce38e32ca84 100644 --- a/include/uapi/linux/iommufd.h +++ b/include/uapi/linux/iommufd.h @@ -48,6 +48,7 @@ enum { IOMMUFD_CMD_HWPT_ALLOC, IOMMUFD_CMD_GET_HW_INFO, IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING, + IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP, }; /** @@ -481,4 +482,38 @@ struct iommu_hwpt_set_dirty_tracking { }; #define IOMMU_HWPT_SET_DIRTY_TRACKING _IO(IOMMUFD_TYPE, \ IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING) + +/** + * struct iommu_hwpt_get_dirty_bitmap - ioctl(IOMMU_HWPT_GET_DIRTY_BITMAP) + * @size: sizeof(struct iommu_hwpt_get_dirty_bitmap) + * @hwpt_id: HW pagetable ID that represents the IOMMU domain + * @flags: Must be zero + * @__reserved: Must be 0 + * @iova: base IOVA of the bitmap first bit + * @length: IOVA range size + * @page_size: page size granularity of each bit in the bitmap + * @data: bitmap where to set the dirty bits. The bitmap bits each + * represent a page_size which you deviate from an arbitrary iova. + * + * Checking a given IOVA is dirty: + * + * data[(iova / page_size) / 64] & (1ULL << ((iova / page_size) % 64)) + * + * Walk the IOMMU pagetables for a given IOVA range to return a bitmap + * with the dirty IOVAs. In doing so it will also by default clear any + * dirty bit metadata set in the IOPTE. + */ +struct iommu_hwpt_get_dirty_bitmap { + __u32 size; + __u32 hwpt_id; + __u32 flags; + __u32 __reserved; + __aligned_u64 iova; + __aligned_u64 length; + __aligned_u64 page_size; + __aligned_u64 data; +}; +#define IOMMU_HWPT_GET_DIRTY_BITMAP _IO(IOMMUFD_TYPE, \ + IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP) + #endif -- Gitee From 33cef7a9730cc43e49152017e7ba2c3dc63cb831 Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Tue, 24 Oct 2023 14:50:59 +0100 Subject: [PATCH 1007/2138] iommufd: Add capabilities to IOMMU_GET_HW_INFO ANBZ: #9185 commit 7623683857e52b75184d37862c70f1230aef2edd upstream. Intel-SIG: commit 7623683857e5 iommufd: Add capabilities to IOMMU_GET_HW_INFO Backport to support Intel QAT live migration for in-tree driver Extend IOMMUFD_CMD_GET_HW_INFO op to query generic iommu capabilities for a given device. Capabilities are IOMMU agnostic and use device_iommu_capable() API passing one of the IOMMU_CAP_*. Enumerate IOMMU_CAP_DIRTY_TRACKING for now in the out_capabilities field returned back to userspace. Link: https://lore.kernel.org/r/20231024135109.73787-9-joao.m.martins@oracle.com Signed-off-by: Joao Martins Reviewed-by: Jason Gunthorpe Reviewed-by: Kevin Tian Signed-off-by: Jason Gunthorpe [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/iommu/iommufd/device.c | 4 ++++ include/uapi/linux/iommufd.h | 17 +++++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c index e88fa73a45e6..2a41fd2b6ef8 100644 --- a/drivers/iommu/iommufd/device.c +++ b/drivers/iommu/iommufd/device.c @@ -1185,6 +1185,10 @@ int iommufd_get_hw_info(struct iommufd_ucmd *ucmd) */ cmd->data_len = data_len; + cmd->out_capabilities = 0; + if (device_iommu_capable(idev->dev, IOMMU_CAP_DIRTY_TRACKING)) + cmd->out_capabilities |= IOMMU_HW_CAP_DIRTY_TRACKING; + rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); out_free: kfree(data); diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h index dce38e32ca84..036ebc6c19cf 100644 --- a/include/uapi/linux/iommufd.h +++ b/include/uapi/linux/iommufd.h @@ -418,6 +418,20 @@ enum iommu_hw_info_type { IOMMU_HW_INFO_TYPE_INTEL_VTD, }; +/** + * enum iommufd_hw_capabilities + * @IOMMU_HW_CAP_DIRTY_TRACKING: IOMMU hardware support for dirty tracking + * If available, it means the following APIs + * are supported: + * + * IOMMU_HWPT_GET_DIRTY_BITMAP + * IOMMU_HWPT_SET_DIRTY_TRACKING + * + */ +enum iommufd_hw_capabilities { + IOMMU_HW_CAP_DIRTY_TRACKING = 1 << 0, +}; + /** * struct iommu_hw_info - ioctl(IOMMU_GET_HW_INFO) * @size: sizeof(struct iommu_hw_info) @@ -429,6 +443,8 @@ enum iommu_hw_info_type { * the iommu type specific hardware information data * @out_data_type: Output the iommu hardware info type as defined in the enum * iommu_hw_info_type. + * @out_capabilities: Output the generic iommu capability info type as defined + * in the enum iommu_hw_capabilities. * @__reserved: Must be 0 * * Query an iommu type specific hardware information data from an iommu behind @@ -453,6 +469,7 @@ struct iommu_hw_info { __aligned_u64 data_uptr; __u32 out_data_type; __u32 __reserved; + __aligned_u64 out_capabilities; }; #define IOMMU_GET_HW_INFO _IO(IOMMUFD_TYPE, IOMMUFD_CMD_GET_HW_INFO) -- Gitee From 7493198153bbbeb2118367f5356a04a267bc15a9 Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Tue, 24 Oct 2023 14:51:00 +0100 Subject: [PATCH 1008/2138] iommufd: Add a flag to skip clearing of IOPTE dirty ANBZ: #9185 commit 609848132c71316df3260d1ec066539c21bba585 upstream. Intel-SIG: commit 609848132c71 iommufd: Add a flag to skip clearing of IOPTE dirty Backport to support Intel QAT live migration for in-tree driver VFIO has an operation where it unmaps an IOVA while returning a bitmap with the dirty data. In reality the operation doesn't quite query the IO pagetables that the PTE was dirty or not. Instead it marks as dirty on anything that was mapped, and doing so in one syscall. In IOMMUFD the equivalent is done in two operations by querying with GET_DIRTY_IOVA followed by UNMAP_IOVA. However, this would incur two TLB flushes given that after clearing dirty bits IOMMU implementations require invalidating their IOTLB, plus another invalidation needed for the UNMAP. To allow dirty bits to be queried faster, add a flag (IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR) that requests to not clear the dirty bits from the PTE (but just reading them), under the expectation that the next operation is the unmap. An alternative is to unmap and just perpectually mark as dirty as that's the same behaviour as today. So here equivalent functionally can be provided with unmap alone, and if real dirty info is required it will amortize the cost while querying. There's still a race against DMA where in theory the unmap of the IOVA (when the guest invalidates the IOTLB via emulated iommu) would race against the VF performing DMA on the same IOVA. As discussed in [0], we are accepting to resolve this race as throwing away the DMA and it doesn't matter if it hit physical DRAM or not, the VM can't tell if we threw it away because the DMA was blocked or because we failed to copy the DRAM. [0] https://lore.kernel.org/linux-iommu/20220502185239.GR8364@nvidia.com/ Link: https://lore.kernel.org/r/20231024135109.73787-10-joao.m.martins@oracle.com Signed-off-by: Joao Martins Reviewed-by: Jason Gunthorpe Reviewed-by: Kevin Tian Signed-off-by: Jason Gunthorpe [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/iommu/iommufd/hw_pagetable.c | 3 ++- drivers/iommu/iommufd/io_pagetable.c | 9 +++++++-- include/uapi/linux/iommufd.h | 15 ++++++++++++++- 3 files changed, 23 insertions(+), 4 deletions(-) diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c index 7316f69110ef..72a5269984b0 100644 --- a/drivers/iommu/iommufd/hw_pagetable.c +++ b/drivers/iommu/iommufd/hw_pagetable.c @@ -228,7 +228,8 @@ int iommufd_hwpt_get_dirty_bitmap(struct iommufd_ucmd *ucmd) struct iommufd_ioas *ioas; int rc = -EOPNOTSUPP; - if ((cmd->flags || cmd->__reserved)) + if ((cmd->flags & ~(IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR)) || + cmd->__reserved) return -EOPNOTSUPP; hwpt = iommufd_get_hwpt(ucmd, cmd->hwpt_id); diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c index ea6fd83202a2..9f193c933de6 100644 --- a/drivers/iommu/iommufd/io_pagetable.c +++ b/drivers/iommu/iommufd/io_pagetable.c @@ -434,6 +434,7 @@ int iopt_map_user_pages(struct iommufd_ctx *ictx, struct io_pagetable *iopt, } struct iova_bitmap_fn_arg { + unsigned long flags; struct io_pagetable *iopt; struct iommu_domain *domain; struct iommu_dirty_bitmap *dirty; @@ -450,13 +451,14 @@ static int __iommu_read_and_clear_dirty(struct iova_bitmap *bitmap, struct iommu_dirty_bitmap *dirty = arg->dirty; const struct iommu_dirty_ops *ops = domain->dirty_ops; unsigned long last_iova = iova + length - 1; + unsigned long flags = arg->flags; int ret; iopt_for_each_contig_area(&iter, area, arg->iopt, iova, last_iova) { unsigned long last = min(last_iova, iopt_area_last_iova(area)); ret = ops->read_and_clear_dirty(domain, iter.cur_iova, - last - iter.cur_iova + 1, 0, + last - iter.cur_iova + 1, flags, dirty); if (ret) return ret; @@ -490,12 +492,15 @@ iommu_read_and_clear_dirty(struct iommu_domain *domain, iommu_dirty_bitmap_init(&dirty, iter, &gather); + arg.flags = flags; arg.iopt = iopt; arg.domain = domain; arg.dirty = &dirty; iova_bitmap_for_each(iter, &arg, __iommu_read_and_clear_dirty); - iommu_iotlb_sync(domain, &gather); + if (!(flags & IOMMU_DIRTY_NO_CLEAR)) + iommu_iotlb_sync(domain, &gather); + iova_bitmap_free(iter); return ret; diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h index 036ebc6c19cf..c44eecf5d318 100644 --- a/include/uapi/linux/iommufd.h +++ b/include/uapi/linux/iommufd.h @@ -500,11 +500,24 @@ struct iommu_hwpt_set_dirty_tracking { #define IOMMU_HWPT_SET_DIRTY_TRACKING _IO(IOMMUFD_TYPE, \ IOMMUFD_CMD_HWPT_SET_DIRTY_TRACKING) +/** + * enum iommufd_hwpt_get_dirty_bitmap_flags - Flags for getting dirty bits + * @IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR: Just read the PTEs without clearing + * any dirty bits metadata. This flag + * can be passed in the expectation + * where the next operation is an unmap + * of the same IOVA range. + * + */ +enum iommufd_hwpt_get_dirty_bitmap_flags { + IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR = 1, +}; + /** * struct iommu_hwpt_get_dirty_bitmap - ioctl(IOMMU_HWPT_GET_DIRTY_BITMAP) * @size: sizeof(struct iommu_hwpt_get_dirty_bitmap) * @hwpt_id: HW pagetable ID that represents the IOMMU domain - * @flags: Must be zero + * @flags: Combination of enum iommufd_hwpt_get_dirty_bitmap_flags * @__reserved: Must be 0 * @iova: base IOVA of the bitmap first bit * @length: IOVA range size -- Gitee From 217512a27921d4649e3ac607ab8d91b6bd7493c7 Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Tue, 24 Oct 2023 14:51:01 +0100 Subject: [PATCH 1009/2138] iommu/amd: Add domain_alloc_user based domain allocation ANBZ: #9185 commit 134288158a415cd863b1c32c7dcddc0a1dc32aab upstream. Intel-SIG: commit 134288158a41 iommu/amd: Add domain_alloc_user based domain allocation Backport to support Intel QAT live migration for in-tree driver Add the domain_alloc_user op implementation. To that end, refactor amd_iommu_domain_alloc() to receive a dev pointer and flags, while renaming it too, such that it becomes a common function shared with domain_alloc_user() implementation. The sole difference with domain_alloc_user() is that we initialize also other fields that iommu_domain_alloc() does. It lets it return the iommu domain correctly initialized in one function. This is in preparation to add dirty enforcement on AMD implementation of domain_alloc_user. Link: https://lore.kernel.org/r/20231024135109.73787-11-joao.m.martins@oracle.com Signed-off-by: Joao Martins Reviewed-by: Suravee Suthikulpanit Signed-off-by: Jason Gunthorpe [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/iommu/amd/iommu.c | 44 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 41 insertions(+), 3 deletions(-) diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 95bd7c25ba6f..667e23b0ab0d 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -37,6 +37,7 @@ #include #include #include +#include #include "amd_iommu.h" #include "../dma-iommu.h" @@ -2155,28 +2156,64 @@ static inline u64 dma_max_address(void) return ((1ULL << PM_LEVEL_SHIFT(amd_iommu_gpt_level)) - 1); } -static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) +static struct iommu_domain *do_iommu_domain_alloc(unsigned int type, + struct device *dev, u32 flags) { struct protection_domain *domain; + struct amd_iommu *iommu = NULL; + + if (dev) { + iommu = rlookup_amd_iommu(dev); + if (!iommu) + return ERR_PTR(-ENODEV); + } /* * Since DTE[Mode]=0 is prohibited on SNP-enabled system, * default to use IOMMU_DOMAIN_DMA[_FQ]. */ if (amd_iommu_snp_en && (type == IOMMU_DOMAIN_IDENTITY)) - return NULL; + return ERR_PTR(-EINVAL); domain = protection_domain_alloc(type); if (!domain) - return NULL; + return ERR_PTR(-ENOMEM); domain->domain.geometry.aperture_start = 0; domain->domain.geometry.aperture_end = dma_max_address(); domain->domain.geometry.force_aperture = true; + if (iommu) { + domain->domain.type = type; + domain->domain.pgsize_bitmap = iommu->iommu.ops->pgsize_bitmap; + domain->domain.ops = iommu->iommu.ops->default_domain_ops; + } + return &domain->domain; } +static struct iommu_domain *amd_iommu_domain_alloc(unsigned int type) +{ + struct iommu_domain *domain; + + domain = do_iommu_domain_alloc(type, NULL, 0); + if (IS_ERR(domain)) + return NULL; + + return domain; +} + +static struct iommu_domain *amd_iommu_domain_alloc_user(struct device *dev, + u32 flags) +{ + unsigned int type = IOMMU_DOMAIN_UNMANAGED; + + if (flags) + return ERR_PTR(-EOPNOTSUPP); + + return do_iommu_domain_alloc(type, dev, flags); +} + static void amd_iommu_domain_free(struct iommu_domain *dom) { struct protection_domain *domain; @@ -2464,6 +2501,7 @@ static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain) const struct iommu_ops amd_iommu_ops = { .capable = amd_iommu_capable, .domain_alloc = amd_iommu_domain_alloc, + .domain_alloc_user = amd_iommu_domain_alloc_user, .probe_device = amd_iommu_probe_device, .release_device = amd_iommu_release_device, .probe_finalize = amd_iommu_probe_finalize, -- Gitee From f31e2d40fea7ec4759ccf4bfb06df2189b44def5 Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Tue, 24 Oct 2023 14:51:02 +0100 Subject: [PATCH 1010/2138] iommu/amd: Access/Dirty bit support in IOPTEs ANBZ: #9185 commit 421a511a293fe1c73b37f6147c6676c4ee6efa04 upstream. Intel-SIG: commit 421a511a293f iommu/amd: Access/Dirty bit support in IOPTEs Backport to support Intel QAT live migration for in-tree driver IOMMU advertises Access/Dirty bits if the extended feature register reports it. Relevant AMD IOMMU SDM ref[0] "1.3.8 Enhanced Support for Access and Dirty Bits" To enable it set the DTE flag in bits 7 and 8 to enable access, or access+dirty. With that, the IOMMU starts marking the D and A flags on every Memory Request or ATS translation request. It is on the VMM side to steer whether to enable dirty tracking or not, rather than wrongly doing in IOMMU. Relevant AMD IOMMU SDM ref [0], "Table 7. Device Table Entry (DTE) Field Definitions" particularly the entry "HAD". To actually toggle on and off it's relatively simple as it's setting 2 bits on DTE and flush the device DTE cache. To get what's dirtied use existing AMD io-pgtable support, by walking the pagetables over each IOVA, with fetch_pte(). The IOTLB flushing is left to the caller (much like unmap), and iommu_dirty_bitmap_record() is the one adding page-ranges to invalidate. This allows caller to batch the flush over a big span of IOVA space, without the iommu wondering about when to flush. Worthwhile sections from AMD IOMMU SDM: "2.2.3.1 Host Access Support" "2.2.3.2 Host Dirty Support" For details on how IOMMU hardware updates the dirty bit see, and expects from its consequent clearing by CPU: "2.2.7.4 Updating Accessed and Dirty Bits in the Guest Address Tables" "2.2.7.5 Clearing Accessed and Dirty Bits" Quoting the SDM: "The setting of accessed and dirty status bits in the page tables is visible to both the CPU and the peripheral when sharing guest page tables. The IOMMU interlocked operations to update A and D bits must be 64-bit operations and naturally aligned on a 64-bit boundary" .. and for the IOMMU update sequence to Dirty bit, essentially is states: 1. Decodes the read and write intent from the memory access. 2. If P=0 in the page descriptor, fail the access. 3. Compare the A & D bits in the descriptor with the read and write intent in the request. 4. If the A or D bits need to be updated in the descriptor: * Start atomic operation. * Read the descriptor as a 64-bit access. * If the descriptor no longer appears to require an update, release the atomic lock with no further action and continue to step 5. * Calculate the new A & D bits. * Write the descriptor as a 64-bit access. * End atomic operation. 5. Continue to the next stage of translation or to the memory access. Access/Dirty bits readout also need to consider the non-default page-sizes (aka replicated PTEs as mentined by manual), as AMD supports all powers of two (except 512G) page sizes. Select IOMMUFD_DRIVER only if IOMMUFD is enabled considering that IOMMU dirty tracking requires IOMMUFD. Link: https://lore.kernel.org/r/20231024135109.73787-12-joao.m.martins@oracle.com Signed-off-by: Joao Martins Reviewed-by: Suravee Suthikulpanit Signed-off-by: Jason Gunthorpe [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/iommu/amd/Kconfig | 1 + drivers/iommu/amd/amd_iommu_types.h | 12 ++++ drivers/iommu/amd/io_pgtable.c | 68 +++++++++++++++++++ drivers/iommu/amd/iommu.c | 102 +++++++++++++++++++++++++++- 4 files changed, 182 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/amd/Kconfig b/drivers/iommu/amd/Kconfig index 9b5fc3356bf2..8bd4c3b183ec 100644 --- a/drivers/iommu/amd/Kconfig +++ b/drivers/iommu/amd/Kconfig @@ -10,6 +10,7 @@ config AMD_IOMMU select IOMMU_API select IOMMU_IOVA select IOMMU_IO_PGTABLE + select IOMMUFD_DRIVER if IOMMUFD depends on X86_64 && PCI && ACPI && HAVE_CMPXCHG_DOUBLE help With this option you can enable support for AMD IOMMU hardware in diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h index 7dc30c2b56b3..dec4e5c2b66b 100644 --- a/drivers/iommu/amd/amd_iommu_types.h +++ b/drivers/iommu/amd/amd_iommu_types.h @@ -97,7 +97,9 @@ #define FEATURE_GATS_MASK (3ULL) #define FEATURE_GAM_VAPIC BIT_ULL(21) #define FEATURE_GIOSUP BIT_ULL(48) +#define FEATURE_HASUP BIT_ULL(49) #define FEATURE_EPHSUP BIT_ULL(50) +#define FEATURE_HDSUP BIT_ULL(52) #define FEATURE_SNP BIT_ULL(63) #define FEATURE_PASID_SHIFT 32 @@ -212,6 +214,7 @@ /* macros and definitions for device table entries */ #define DEV_ENTRY_VALID 0x00 #define DEV_ENTRY_TRANSLATION 0x01 +#define DEV_ENTRY_HAD 0x07 #define DEV_ENTRY_PPR 0x34 #define DEV_ENTRY_IR 0x3d #define DEV_ENTRY_IW 0x3e @@ -370,10 +373,16 @@ #define PTE_LEVEL_PAGE_SIZE(level) \ (1ULL << (12 + (9 * (level)))) +/* + * The IOPTE dirty bit + */ +#define IOMMU_PTE_HD_BIT (6) + /* * Bit value definition for I/O PTE fields */ #define IOMMU_PTE_PR BIT_ULL(0) +#define IOMMU_PTE_HD BIT_ULL(IOMMU_PTE_HD_BIT) #define IOMMU_PTE_U BIT_ULL(59) #define IOMMU_PTE_FC BIT_ULL(60) #define IOMMU_PTE_IR BIT_ULL(61) @@ -384,6 +393,7 @@ */ #define DTE_FLAG_V BIT_ULL(0) #define DTE_FLAG_TV BIT_ULL(1) +#define DTE_FLAG_HAD (3ULL << 7) #define DTE_FLAG_GIOV BIT_ULL(54) #define DTE_FLAG_GV BIT_ULL(55) #define DTE_GLX_SHIFT (56) @@ -413,6 +423,7 @@ #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_PR) +#define IOMMU_PTE_DIRTY(pte) ((pte) & IOMMU_PTE_HD) #define IOMMU_PTE_PAGE(pte) (iommu_phys_to_virt((pte) & IOMMU_PAGE_MASK)) #define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07) @@ -563,6 +574,7 @@ struct protection_domain { int nid; /* Node ID */ u64 *gcr3_tbl; /* Guest CR3 table */ unsigned long flags; /* flags to find out type of domain */ + bool dirty_tracking; /* dirty tracking is enabled in the domain */ unsigned dev_cnt; /* devices assigned to this domain */ unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ }; diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c index 2892aa1b4dc1..6c0621f6f572 100644 --- a/drivers/iommu/amd/io_pgtable.c +++ b/drivers/iommu/amd/io_pgtable.c @@ -486,6 +486,73 @@ static phys_addr_t iommu_v1_iova_to_phys(struct io_pgtable_ops *ops, unsigned lo return (__pte & ~offset_mask) | (iova & offset_mask); } +static bool pte_test_and_clear_dirty(u64 *ptep, unsigned long size, + unsigned long flags) +{ + bool test_only = flags & IOMMU_DIRTY_NO_CLEAR; + bool dirty = false; + int i, count; + + /* + * 2.2.3.2 Host Dirty Support + * When a non-default page size is used , software must OR the + * Dirty bits in all of the replicated host PTEs used to map + * the page. The IOMMU does not guarantee the Dirty bits are + * set in all of the replicated PTEs. Any portion of the page + * may have been written even if the Dirty bit is set in only + * one of the replicated PTEs. + */ + count = PAGE_SIZE_PTE_COUNT(size); + for (i = 0; i < count && test_only; i++) { + if (test_bit(IOMMU_PTE_HD_BIT, (unsigned long *)&ptep[i])) { + dirty = true; + break; + } + } + + for (i = 0; i < count && !test_only; i++) { + if (test_and_clear_bit(IOMMU_PTE_HD_BIT, + (unsigned long *)&ptep[i])) { + dirty = true; + } + } + + return dirty; +} + +static int iommu_v1_read_and_clear_dirty(struct io_pgtable_ops *ops, + unsigned long iova, size_t size, + unsigned long flags, + struct iommu_dirty_bitmap *dirty) +{ + struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops); + unsigned long end = iova + size - 1; + + do { + unsigned long pgsize = 0; + u64 *ptep, pte; + + ptep = fetch_pte(pgtable, iova, &pgsize); + if (ptep) + pte = READ_ONCE(*ptep); + if (!ptep || !IOMMU_PTE_PRESENT(pte)) { + pgsize = pgsize ?: PTE_LEVEL_PAGE_SIZE(0); + iova += pgsize; + continue; + } + + /* + * Mark the whole IOVA range as dirty even if only one of + * the replicated PTEs were marked dirty. + */ + if (pte_test_and_clear_dirty(ptep, pgsize, flags)) + iommu_dirty_bitmap_record(dirty, iova, pgsize); + iova += pgsize; + } while (iova < end); + + return 0; +} + /* * ---------------------------------------------------- */ @@ -527,6 +594,7 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo pgtable->iop.ops.map_pages = iommu_v1_map_pages; pgtable->iop.ops.unmap_pages = iommu_v1_unmap_pages; pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys; + pgtable->iop.ops.read_and_clear_dirty = iommu_v1_read_and_clear_dirty; return &pgtable->iop; } diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 667e23b0ab0d..caad10f9cee3 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -66,6 +66,7 @@ LIST_HEAD(hpet_map); LIST_HEAD(acpihid_map); const struct iommu_ops amd_iommu_ops; +const struct iommu_dirty_ops amd_dirty_ops; static ATOMIC_NOTIFIER_HEAD(ppr_notifier); int amd_iommu_max_glx_val = -1; @@ -1611,6 +1612,9 @@ static void set_dte_entry(struct amd_iommu *iommu, u16 devid, pte_root |= 1ULL << DEV_ENTRY_PPR; } + if (domain->dirty_tracking) + pte_root |= DTE_FLAG_HAD; + if (domain->flags & PD_IOMMUV2_MASK) { u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl); u64 glx = domain->glx; @@ -2156,9 +2160,15 @@ static inline u64 dma_max_address(void) return ((1ULL << PM_LEVEL_SHIFT(amd_iommu_gpt_level)) - 1); } +static bool amd_iommu_hd_support(struct amd_iommu *iommu) +{ + return iommu && (iommu->features & FEATURE_HDSUP); +} + static struct iommu_domain *do_iommu_domain_alloc(unsigned int type, struct device *dev, u32 flags) { + bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING; struct protection_domain *domain; struct amd_iommu *iommu = NULL; @@ -2175,6 +2185,9 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type, if (amd_iommu_snp_en && (type == IOMMU_DOMAIN_IDENTITY)) return ERR_PTR(-EINVAL); + if (dirty_tracking && !amd_iommu_hd_support(iommu)) + return ERR_PTR(-EOPNOTSUPP); + domain = protection_domain_alloc(type); if (!domain) return ERR_PTR(-ENOMEM); @@ -2187,6 +2200,9 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type, domain->domain.type = type; domain->domain.pgsize_bitmap = iommu->iommu.ops->pgsize_bitmap; domain->domain.ops = iommu->iommu.ops->default_domain_ops; + + if (dirty_tracking) + domain->domain.dirty_ops = &amd_dirty_ops; } return &domain->domain; @@ -2208,7 +2224,7 @@ static struct iommu_domain *amd_iommu_domain_alloc_user(struct device *dev, { unsigned int type = IOMMU_DOMAIN_UNMANAGED; - if (flags) + if (flags & ~IOMMU_HWPT_ALLOC_DIRTY_TRACKING) return ERR_PTR(-EOPNOTSUPP); return do_iommu_domain_alloc(type, dev, flags); @@ -2251,6 +2267,13 @@ static int amd_iommu_attach_device(struct iommu_domain *dom, dev_data->defer_attach = false; + /* + * Restrict to devices with compatible IOMMU hardware support + * when enforcement of dirty tracking is enabled. + */ + if (dom->dirty_ops && !amd_iommu_hd_support(iommu)) + return -EINVAL; + if (dev_data->domain) detach_device(dev); @@ -2369,6 +2392,11 @@ static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap) return true; case IOMMU_CAP_DEFERRED_FLUSH: return true; + case IOMMU_CAP_DIRTY_TRACKING: { + struct amd_iommu *iommu = rlookup_amd_iommu(dev); + + return amd_iommu_hd_support(iommu); + } default: break; } @@ -2376,6 +2404,73 @@ static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap) return false; } +static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain, + bool enable) +{ + struct protection_domain *pdomain = to_pdomain(domain); + struct dev_table_entry *dev_table; + struct iommu_dev_data *dev_data; + bool domain_flush = false; + struct amd_iommu *iommu; + unsigned long flags; + u64 pte_root; + + spin_lock_irqsave(&pdomain->lock, flags); + if (!(pdomain->dirty_tracking ^ enable)) { + spin_unlock_irqrestore(&pdomain->lock, flags); + return 0; + } + + list_for_each_entry(dev_data, &pdomain->dev_list, list) { + iommu = rlookup_amd_iommu(dev_data->dev); + if (!iommu) + continue; + + dev_table = get_dev_table(iommu); + pte_root = dev_table[dev_data->devid].data[0]; + + pte_root = (enable ? pte_root | DTE_FLAG_HAD : + pte_root & ~DTE_FLAG_HAD); + + /* Flush device DTE */ + dev_table[dev_data->devid].data[0] = pte_root; + device_flush_dte(dev_data); + domain_flush = true; + } + + /* Flush IOTLB to mark IOPTE dirty on the next translation(s) */ + if (domain_flush) { + amd_iommu_domain_flush_tlb_pde(pdomain); + amd_iommu_domain_flush_complete(pdomain); + } + pdomain->dirty_tracking = enable; + spin_unlock_irqrestore(&pdomain->lock, flags); + + return 0; +} + +static int amd_iommu_read_and_clear_dirty(struct iommu_domain *domain, + unsigned long iova, size_t size, + unsigned long flags, + struct iommu_dirty_bitmap *dirty) +{ + struct protection_domain *pdomain = to_pdomain(domain); + struct io_pgtable_ops *ops = &pdomain->iop.iop.ops; + unsigned long lflags; + + if (!ops || !ops->read_and_clear_dirty) + return -EOPNOTSUPP; + + spin_lock_irqsave(&pdomain->lock, lflags); + if (!pdomain->dirty_tracking && dirty->bitmap) { + spin_unlock_irqrestore(&pdomain->lock, lflags); + return -EINVAL; + } + spin_unlock_irqrestore(&pdomain->lock, lflags); + + return ops->read_and_clear_dirty(ops, iova, size, flags, dirty); +} + static void amd_iommu_get_resv_regions(struct device *dev, struct list_head *head) { @@ -2498,6 +2593,11 @@ static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain) return true; } +const struct iommu_dirty_ops amd_dirty_ops = { + .set_dirty_tracking = amd_iommu_set_dirty_tracking, + .read_and_clear_dirty = amd_iommu_read_and_clear_dirty, +}; + const struct iommu_ops amd_iommu_ops = { .capable = amd_iommu_capable, .domain_alloc = amd_iommu_domain_alloc, -- Gitee From e9c49848e27843fcbcb3717ea51a9929b58284f4 Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Tue, 24 Oct 2023 14:51:03 +0100 Subject: [PATCH 1011/2138] iommu/vt-d: Access/Dirty bit support for SS domains ANBZ: #9185 commit f35f22cc760eb2c7034bf53251399685d611e03f upstream. Intel-SIG: commit f35f22cc760e iommu/vt-d: Access/Dirty bit support for SS domains Backport to support Intel QAT live migration for in-tree driver IOMMU advertises Access/Dirty bits for second-stage page table if the extended capability DMAR register reports it (ECAP, mnemonic ECAP.SSADS). The first stage table is compatible with CPU page table thus A/D bits are implicitly supported. Relevant Intel IOMMU SDM ref for first stage table "3.6.2 Accessed, Extended Accessed, and Dirty Flags" and second stage table "3.7.2 Accessed and Dirty Flags". First stage page table is enabled by default so it's allowed to set dirty tracking and no control bits needed, it just returns 0. To use SSADS, set bit 9 (SSADE) in the scalable-mode PASID table entry and flush the IOTLB via pasid_flush_caches() following the manual. Relevant SDM refs: "3.7.2 Accessed and Dirty Flags" "6.5.3.3 Guidance to Software for Invalidations, Table 23. Guidance to Software for Invalidations" PTE dirty bit is located in bit 9 and it's cached in the IOTLB so flush IOTLB to make sure IOMMU attempts to set the dirty bit again. Note that iommu_dirty_bitmap_record() will add the IOVA to iotlb_gather and thus the caller of the iommu op will flush the IOTLB. Relevant manuals over the hardware translation is chapter 6 with some special mention to: "6.2.3.1 Scalable-Mode PASID-Table Entry Programming Considerations" "6.2.4 IOTLB" Select IOMMUFD_DRIVER only if IOMMUFD is enabled, given that IOMMU dirty tracking requires IOMMUFD. Link: https://lore.kernel.org/r/20231024135109.73787-13-joao.m.martins@oracle.com Signed-off-by: Joao Martins Reviewed-by: Lu Baolu Reviewed-by: Kevin Tian Signed-off-by: Jason Gunthorpe [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/iommu/intel/Kconfig | 1 + drivers/iommu/intel/iommu.c | 103 +++++++++++++++++++++++++++++++++- drivers/iommu/intel/iommu.h | 16 ++++++ drivers/iommu/intel/pasid.c | 109 ++++++++++++++++++++++++++++++++++++ drivers/iommu/intel/pasid.h | 4 ++ 5 files changed, 232 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig index 2e56bd79f589..f5348b80652b 100644 --- a/drivers/iommu/intel/Kconfig +++ b/drivers/iommu/intel/Kconfig @@ -15,6 +15,7 @@ config INTEL_IOMMU select DMA_OPS select IOMMU_API select IOMMU_IOVA + select IOMMUFD_DRIVER if IOMMUFD select NEED_DMA_MAP_STATE select DMAR_TABLE select SWIOTLB diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 94be151ca2cf..29d6a98e5892 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -300,6 +300,7 @@ static int iommu_skip_te_disable; #define IDENTMAP_AZALIA 4 const struct iommu_ops intel_iommu_ops; +const struct iommu_dirty_ops intel_dirty_ops; static bool translation_pre_enabled(struct intel_iommu *iommu) { @@ -4080,8 +4081,10 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags) { struct iommu_domain *domain; struct intel_iommu *iommu; + bool dirty_tracking; - if (flags & (~IOMMU_HWPT_ALLOC_NEST_PARENT)) + if (flags & + (~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING))) return ERR_PTR(-EOPNOTSUPP); iommu = device_to_iommu(dev, NULL, NULL); @@ -4091,6 +4094,10 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags) if ((flags & IOMMU_HWPT_ALLOC_NEST_PARENT) && !ecap_nest(iommu->ecap)) return ERR_PTR(-EOPNOTSUPP); + dirty_tracking = (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING); + if (dirty_tracking && !ssads_supported(iommu)) + return ERR_PTR(-EOPNOTSUPP); + /* * domain_alloc_user op needs to fully initialize a domain * before return, so uses iommu_domain_alloc() here for @@ -4099,6 +4106,15 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags) domain = iommu_domain_alloc(dev->bus); if (!domain) domain = ERR_PTR(-ENOMEM); + + if (!IS_ERR(domain) && dirty_tracking) { + if (to_dmar_domain(domain)->use_first_level) { + iommu_domain_free(domain); + return ERR_PTR(-EOPNOTSUPP); + } + domain->dirty_ops = &intel_dirty_ops; + } + return domain; } @@ -4122,6 +4138,9 @@ static int prepare_domain_attach_device(struct iommu_domain *domain, if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap)) return -EINVAL; + if (domain->dirty_ops && !ssads_supported(iommu)) + return -EINVAL; + /* check if this iommu agaw is sufficient for max mapped address */ addr_width = agaw_to_width(iommu->agaw); if (addr_width > cap_mgaw(iommu->cap)) @@ -4377,6 +4396,8 @@ static bool intel_iommu_capable(struct device *dev, enum iommu_cap cap) return dmar_platform_optin(); case IOMMU_CAP_ENFORCE_CACHE_COHERENCY: return ecap_sc_support(info->iommu->ecap); + case IOMMU_CAP_DIRTY_TRACKING: + return ssads_supported(info->iommu); default: return false; } @@ -4777,6 +4798,9 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain, if (!pasid_supported(iommu) || dev_is_real_dma_subdevice(dev)) return -EOPNOTSUPP; + if (domain->dirty_ops) + return -EINVAL; + if (context_copied(iommu, info->bus, info->devfn)) return -EBUSY; @@ -4835,6 +4859,83 @@ static void *intel_iommu_hw_info(struct device *dev, u32 *length, u32 *type) return vtd; } +static int intel_iommu_set_dirty_tracking(struct iommu_domain *domain, + bool enable) +{ + struct dmar_domain *dmar_domain = to_dmar_domain(domain); + struct device_domain_info *info; + int ret; + + spin_lock(&dmar_domain->lock); + if (dmar_domain->dirty_tracking == enable) + goto out_unlock; + + list_for_each_entry(info, &dmar_domain->devices, link) { + ret = intel_pasid_setup_dirty_tracking(info->iommu, + info->domain, info->dev, + IOMMU_NO_PASID, enable); + if (ret) + goto err_unwind; + } + + dmar_domain->dirty_tracking = enable; +out_unlock: + spin_unlock(&dmar_domain->lock); + + return 0; + +err_unwind: + list_for_each_entry(info, &dmar_domain->devices, link) + intel_pasid_setup_dirty_tracking(info->iommu, dmar_domain, + info->dev, IOMMU_NO_PASID, + dmar_domain->dirty_tracking); + spin_unlock(&dmar_domain->lock); + return ret; +} + +static int intel_iommu_read_and_clear_dirty(struct iommu_domain *domain, + unsigned long iova, size_t size, + unsigned long flags, + struct iommu_dirty_bitmap *dirty) +{ + struct dmar_domain *dmar_domain = to_dmar_domain(domain); + unsigned long end = iova + size - 1; + unsigned long pgsize; + + /* + * IOMMUFD core calls into a dirty tracking disabled domain without an + * IOVA bitmap set in order to clean dirty bits in all PTEs that might + * have occurred when we stopped dirty tracking. This ensures that we + * never inherit dirtied bits from a previous cycle. + */ + if (!dmar_domain->dirty_tracking && dirty->bitmap) + return -EINVAL; + + do { + struct dma_pte *pte; + int lvl = 0; + + pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &lvl, + GFP_ATOMIC); + pgsize = level_size(lvl) << VTD_PAGE_SHIFT; + if (!pte || !dma_pte_present(pte)) { + iova += pgsize; + continue; + } + + if (dma_sl_pte_test_and_clear_dirty(pte, flags)) + iommu_dirty_bitmap_record(dirty, iova, pgsize); + iova += pgsize; + } while (iova < end); + + return 0; +} + +const struct iommu_dirty_ops intel_dirty_ops = { + .set_dirty_tracking = intel_iommu_set_dirty_tracking, + .read_and_clear_dirty = intel_iommu_read_and_clear_dirty, +}; + const struct iommu_ops intel_iommu_ops = { .capable = intel_iommu_capable, .hw_info = intel_iommu_hw_info, diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h index e6a3e7065616..6a7244a1b8f1 100644 --- a/drivers/iommu/intel/iommu.h +++ b/drivers/iommu/intel/iommu.h @@ -48,6 +48,9 @@ #define DMA_FL_PTE_DIRTY BIT_ULL(6) #define DMA_FL_PTE_XD BIT_ULL(63) +#define DMA_SL_PTE_DIRTY_BIT 9 +#define DMA_SL_PTE_DIRTY BIT_ULL(DMA_SL_PTE_DIRTY_BIT) + #define ADDR_WIDTH_5LEVEL (57) #define ADDR_WIDTH_4LEVEL (48) @@ -539,6 +542,8 @@ enum { #define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap)) #define pasid_supported(iommu) (sm_supported(iommu) && \ ecap_pasid((iommu)->ecap)) +#define ssads_supported(iommu) (sm_supported(iommu) && \ + ecap_slads((iommu)->ecap)) struct pasid_entry; struct pasid_state_entry; @@ -595,6 +600,7 @@ struct dmar_domain { u8 has_mappings:1; /* Has mappings configured through * iommu_map() interface. */ + u8 dirty_tracking:1; /* Dirty tracking is enabled */ spinlock_t lock; /* Protect device tracking lists */ struct list_head devices; /* all devices' list */ @@ -784,6 +790,16 @@ static inline bool dma_pte_present(struct dma_pte *pte) return (pte->val & 3) != 0; } +static inline bool dma_sl_pte_test_and_clear_dirty(struct dma_pte *pte, + unsigned long flags) +{ + if (flags & IOMMU_DIRTY_NO_CLEAR) + return (pte->val & DMA_SL_PTE_DIRTY) != 0; + + return test_and_clear_bit(DMA_SL_PTE_DIRTY_BIT, + (unsigned long *)&pte->val); +} + static inline bool dma_pte_superpage(struct dma_pte *pte) { return (pte->val & DMA_PTE_LARGE_PAGE); diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index 8faa93cffac4..06ea2dd53542 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -277,6 +277,11 @@ static inline void pasid_set_bits(u64 *ptr, u64 mask, u64 bits) WRITE_ONCE(*ptr, (old & ~mask) | bits); } +static inline u64 pasid_get_bits(u64 *ptr) +{ + return READ_ONCE(*ptr); +} + /* * Setup the DID(Domain Identifier) field (Bit 64~79) of scalable mode * PASID entry. @@ -335,6 +340,36 @@ static inline void pasid_set_fault_enable(struct pasid_entry *pe) pasid_set_bits(&pe->val[0], 1 << 1, 0); } +/* + * Enable second level A/D bits by setting the SLADE (Second Level + * Access Dirty Enable) field (Bit 9) of a scalable mode PASID + * entry. + */ +static inline void pasid_set_ssade(struct pasid_entry *pe) +{ + pasid_set_bits(&pe->val[0], 1 << 9, 1 << 9); +} + +/* + * Disable second level A/D bits by clearing the SLADE (Second Level + * Access Dirty Enable) field (Bit 9) of a scalable mode PASID + * entry. + */ +static inline void pasid_clear_ssade(struct pasid_entry *pe) +{ + pasid_set_bits(&pe->val[0], 1 << 9, 0); +} + +/* + * Checks if second level A/D bits specifically the SLADE (Second Level + * Access Dirty Enable) field (Bit 9) of a scalable mode PASID + * entry is set. + */ +static inline bool pasid_get_ssade(struct pasid_entry *pe) +{ + return pasid_get_bits(&pe->val[0]) & (1 << 9); +} + /* * Setup the WPE(Write Protect Enable) field (Bit 132) of a * scalable mode PASID entry. @@ -630,6 +665,8 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu, pasid_set_translation_type(pte, PASID_ENTRY_PGTT_SL_ONLY); pasid_set_fault_enable(pte); pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); + if (domain->dirty_tracking) + pasid_set_ssade(pte); pasid_set_present(pte); spin_unlock(&iommu->lock); @@ -639,6 +676,78 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu, return 0; } +/* + * Set up dirty tracking on a second only or nested translation type. + */ +int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu, + struct dmar_domain *domain, + struct device *dev, u32 pasid, + bool enabled) +{ + struct pasid_entry *pte; + u16 did, pgtt; + + spin_lock(&iommu->lock); + + pte = intel_pasid_get_entry(dev, pasid); + if (!pte) { + spin_unlock(&iommu->lock); + dev_err_ratelimited( + dev, "Failed to get pasid entry of PASID %d\n", pasid); + return -ENODEV; + } + + did = domain_id_iommu(domain, iommu); + pgtt = pasid_pte_get_pgtt(pte); + if (pgtt != PASID_ENTRY_PGTT_SL_ONLY && + pgtt != PASID_ENTRY_PGTT_NESTED) { + spin_unlock(&iommu->lock); + dev_err_ratelimited( + dev, + "Dirty tracking not supported on translation type %d\n", + pgtt); + return -EOPNOTSUPP; + } + + if (pasid_get_ssade(pte) == enabled) { + spin_unlock(&iommu->lock); + return 0; + } + + if (enabled) + pasid_set_ssade(pte); + else + pasid_clear_ssade(pte); + spin_unlock(&iommu->lock); + + if (!ecap_coherent(iommu->ecap)) + clflush_cache_range(pte, sizeof(*pte)); + + /* + * From VT-d spec table 25 "Guidance to Software for Invalidations": + * + * - PASID-selective-within-Domain PASID-cache invalidation + * If (PGTT=SS or Nested) + * - Domain-selective IOTLB invalidation + * Else + * - PASID-selective PASID-based IOTLB invalidation + * - If (pasid is RID_PASID) + * - Global Device-TLB invalidation to affected functions + * Else + * - PASID-based Device-TLB invalidation (with S=1 and + * Addr[63:12]=0x7FFFFFFF_FFFFF) to affected functions + */ + pasid_cache_invalidation_with_pasid(iommu, did, pasid); + + iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); + + /* Device IOTLB doesn't need to be flushed in caching mode. */ + if (!cap_caching_mode(iommu->cap)) + devtlb_invalidation_with_pasid(iommu, dev, pasid); + + return 0; +} + /* * Set up the scalable mode pasid entry for passthrough translation type. */ diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h index 4e9e68c3c388..958050b093aa 100644 --- a/drivers/iommu/intel/pasid.h +++ b/drivers/iommu/intel/pasid.h @@ -106,6 +106,10 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu, int intel_pasid_setup_second_level(struct intel_iommu *iommu, struct dmar_domain *domain, struct device *dev, u32 pasid); +int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu, + struct dmar_domain *domain, + struct device *dev, u32 pasid, + bool enabled); int intel_pasid_setup_pass_through(struct intel_iommu *iommu, struct dmar_domain *domain, struct device *dev, u32 pasid); -- Gitee From d81e0247d7eff9dceec13d57ab59c349a4cc0018 Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Tue, 24 Oct 2023 14:51:04 +0100 Subject: [PATCH 1012/2138] iommufd/selftest: Expand mock_domain with dev_flags ANBZ: #9185 commit e04b23c8d4ed977dbab4a4159f9e4d9a878b5c65 upstream. Intel-SIG: commit e04b23c8d4ed iommufd/selftest: Expand mock_domain with dev_flags Backport to support Intel QAT live migration for in-tree driver Expand mock_domain test to be able to manipulate the device capabilities. This allows testing with mockdev without dirty tracking support advertised and thus make sure enforce_dirty test does the expected. To avoid breaking IOMMUFD_TEST UABI replicate the mock_domain struct and thus add an input dev_flags at the end. Link: https://lore.kernel.org/r/20231024135109.73787-14-joao.m.martins@oracle.com Signed-off-by: Joao Martins Reviewed-by: Kevin Tian Signed-off-by: Jason Gunthorpe [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/iommu/iommufd/iommufd_test.h | 12 ++++++++ drivers/iommu/iommufd/selftest.c | 11 +++++-- tools/testing/selftests/iommu/iommufd_utils.h | 29 +++++++++++++++++++ 3 files changed, 50 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/iommufd/iommufd_test.h b/drivers/iommu/iommufd/iommufd_test.h index 3f3644375bf1..9817edcd8968 100644 --- a/drivers/iommu/iommufd/iommufd_test.h +++ b/drivers/iommu/iommufd/iommufd_test.h @@ -19,6 +19,7 @@ enum { IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT, IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE, IOMMU_TEST_OP_ACCESS_REPLACE_IOAS, + IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS, }; enum { @@ -40,6 +41,10 @@ enum { MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES = 1 << 0, }; +enum { + MOCK_FLAGS_DEVICE_NO_DIRTY = 1 << 0, +}; + struct iommu_test_cmd { __u32 size; __u32 op; @@ -56,6 +61,13 @@ struct iommu_test_cmd { /* out_idev_id is the standard iommufd_bind object */ __u32 out_idev_id; } mock_domain; + struct { + __u32 out_stdev_id; + __u32 out_hwpt_id; + __u32 out_idev_id; + /* Expand mock_domain to set mock device flags */ + __u32 dev_flags; + } mock_domain_flags; struct { __u32 pt_id; } mock_domain_replace; diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c index 049174c8fa02..8299fe9cbf2b 100644 --- a/drivers/iommu/iommufd/selftest.c +++ b/drivers/iommu/iommufd/selftest.c @@ -111,6 +111,7 @@ enum selftest_obj_type { struct mock_dev { struct device dev; + unsigned long flags; }; struct selftest_obj { @@ -396,7 +397,7 @@ static void mock_dev_release(struct device *dev) kfree(mdev); } -static struct mock_dev *mock_dev_create(void) +static struct mock_dev *mock_dev_create(unsigned long dev_flags) { struct mock_dev *mdev; int rc; @@ -406,6 +407,7 @@ static struct mock_dev *mock_dev_create(void) return ERR_PTR(-ENOMEM); device_initialize(&mdev->dev); + mdev->flags = dev_flags; mdev->dev.release = mock_dev_release; mdev->dev.bus = &iommufd_mock_bus_type.bus; @@ -441,6 +443,7 @@ static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd, struct iommufd_device *idev; struct selftest_obj *sobj; u32 pt_id = cmd->id; + u32 dev_flags = 0; u32 idev_id; int rc; @@ -451,7 +454,10 @@ static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd, sobj->idev.ictx = ucmd->ictx; sobj->type = TYPE_IDEV; - sobj->idev.mock_dev = mock_dev_create(); + if (cmd->op == IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS) + dev_flags = cmd->mock_domain_flags.dev_flags; + + sobj->idev.mock_dev = mock_dev_create(dev_flags); if (IS_ERR(sobj->idev.mock_dev)) { rc = PTR_ERR(sobj->idev.mock_dev); goto out_sobj; @@ -1034,6 +1040,7 @@ int iommufd_test(struct iommufd_ucmd *ucmd) cmd->add_reserved.start, cmd->add_reserved.length); case IOMMU_TEST_OP_MOCK_DOMAIN: + case IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS: return iommufd_test_mock_domain(ucmd, cmd); case IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE: return iommufd_test_mock_domain_replace( diff --git a/tools/testing/selftests/iommu/iommufd_utils.h b/tools/testing/selftests/iommu/iommufd_utils.h index be4970a84977..1e0736adc991 100644 --- a/tools/testing/selftests/iommu/iommufd_utils.h +++ b/tools/testing/selftests/iommu/iommufd_utils.h @@ -74,6 +74,35 @@ static int _test_cmd_mock_domain(int fd, unsigned int ioas_id, __u32 *stdev_id, EXPECT_ERRNO(_errno, _test_cmd_mock_domain(self->fd, ioas_id, \ stdev_id, hwpt_id, NULL)) +static int _test_cmd_mock_domain_flags(int fd, unsigned int ioas_id, + __u32 stdev_flags, __u32 *stdev_id, + __u32 *hwpt_id, __u32 *idev_id) +{ + struct iommu_test_cmd cmd = { + .size = sizeof(cmd), + .op = IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS, + .id = ioas_id, + .mock_domain_flags = { .dev_flags = stdev_flags }, + }; + int ret; + + ret = ioctl(fd, IOMMU_TEST_CMD, &cmd); + if (ret) + return ret; + if (stdev_id) + *stdev_id = cmd.mock_domain_flags.out_stdev_id; + assert(cmd.id != 0); + if (hwpt_id) + *hwpt_id = cmd.mock_domain_flags.out_hwpt_id; + if (idev_id) + *idev_id = cmd.mock_domain_flags.out_idev_id; + return 0; +} +#define test_err_mock_domain_flags(_errno, ioas_id, flags, stdev_id, hwpt_id) \ + EXPECT_ERRNO(_errno, \ + _test_cmd_mock_domain_flags(self->fd, ioas_id, flags, \ + stdev_id, hwpt_id, NULL)) + static int _test_cmd_mock_domain_replace(int fd, __u32 stdev_id, __u32 pt_id, __u32 *hwpt_id) { -- Gitee From 4d4817a4575a8eb6fcb8429ddd4ef7e02cbfbf8d Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Tue, 24 Oct 2023 14:51:05 +0100 Subject: [PATCH 1013/2138] iommufd/selftest: Test IOMMU_HWPT_ALLOC_DIRTY_TRACKING ANBZ: #9185 commit 266ce58989ba05e2a24460fdbf402d766c2e3870 upstream. Intel-SIG: commit 266ce58989ba iommufd/selftest: Test IOMMU_HWPT_ALLOC_DIRTY_TRACKING Backport to support Intel QAT live migration for in-tree driver In order to selftest the iommu domain dirty enforcing implement the mock_domain necessary support and add a new dev_flags to test that the hwpt_alloc/attach_device fails as expected. Expand the existing mock_domain fixture with a enforce_dirty test that exercises the hwpt_alloc and device attachment. Link: https://lore.kernel.org/r/20231024135109.73787-15-joao.m.martins@oracle.com Signed-off-by: Joao Martins Reviewed-by: Kevin Tian Signed-off-by: Jason Gunthorpe [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/iommu/iommufd/selftest.c | 37 +++++++++++++- tools/testing/selftests/iommu/iommufd.c | 49 +++++++++++++++++++ tools/testing/selftests/iommu/iommufd_utils.h | 3 ++ 3 files changed, 88 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c index 8299fe9cbf2b..c5b6c21f6637 100644 --- a/drivers/iommu/iommufd/selftest.c +++ b/drivers/iommu/iommufd/selftest.c @@ -134,6 +134,11 @@ static void mock_domain_blocking_free(struct iommu_domain *domain) static int mock_domain_nop_attach(struct iommu_domain *domain, struct device *dev) { + struct mock_dev *mdev = container_of(dev, struct mock_dev, dev); + + if (domain->dirty_ops && (mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY)) + return -EINVAL; + return 0; } @@ -162,6 +167,25 @@ static void *mock_domain_hw_info(struct device *dev, u32 *length, u32 *type) return info; } +static int mock_domain_set_dirty_tracking(struct iommu_domain *domain, + bool enable) +{ + return 0; +} + +static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain, + unsigned long iova, size_t size, + unsigned long flags, + struct iommu_dirty_bitmap *dirty) +{ + return 0; +} + +const struct iommu_dirty_ops dirty_ops = { + .set_dirty_tracking = mock_domain_set_dirty_tracking, + .read_and_clear_dirty = mock_domain_read_and_clear_dirty, +}; + static const struct iommu_ops mock_ops; static struct iommu_domain *mock_domain_alloc(unsigned int iommu_domain_type) @@ -189,12 +213,20 @@ static struct iommu_domain *mock_domain_alloc(unsigned int iommu_domain_type) static struct iommu_domain * mock_domain_alloc_user(struct device *dev, u32 flags) { + struct mock_dev *mdev = container_of(dev, struct mock_dev, dev); struct iommu_domain *domain; - if (flags & (~IOMMU_HWPT_ALLOC_NEST_PARENT)) + if (flags & + (~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING))) + return ERR_PTR(-EOPNOTSUPP); + + if ((flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) && + (mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY)) return ERR_PTR(-EOPNOTSUPP); domain = mock_domain_alloc(IOMMU_DOMAIN_UNMANAGED); + if (domain && !(mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY)) + domain->dirty_ops = &dirty_ops; if (!domain) domain = ERR_PTR(-ENOMEM); return domain; @@ -402,6 +434,9 @@ static struct mock_dev *mock_dev_create(unsigned long dev_flags) struct mock_dev *mdev; int rc; + if (dev_flags & ~(MOCK_FLAGS_DEVICE_NO_DIRTY)) + return ERR_PTR(-EINVAL); + mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); if (!mdev) return ERR_PTR(-ENOMEM); diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c index 9b6c4d177586..08aed07617ad 100644 --- a/tools/testing/selftests/iommu/iommufd.c +++ b/tools/testing/selftests/iommu/iommufd.c @@ -1433,6 +1433,55 @@ TEST_F(iommufd_mock_domain, alloc_hwpt) } } +FIXTURE(iommufd_dirty_tracking) +{ + int fd; + uint32_t ioas_id; + uint32_t hwpt_id; + uint32_t stdev_id; + uint32_t idev_id; +}; + +FIXTURE_SETUP(iommufd_dirty_tracking) +{ + self->fd = open("/dev/iommu", O_RDWR); + ASSERT_NE(-1, self->fd); + + test_ioctl_ioas_alloc(&self->ioas_id); + test_cmd_mock_domain(self->ioas_id, &self->stdev_id, &self->hwpt_id, + &self->idev_id); +} + +FIXTURE_TEARDOWN(iommufd_dirty_tracking) +{ + teardown_iommufd(self->fd, _metadata); +} + +TEST_F(iommufd_dirty_tracking, enforce_dirty) +{ + uint32_t ioas_id, stddev_id, idev_id; + uint32_t hwpt_id, _hwpt_id; + uint32_t dev_flags; + + /* Regular case */ + dev_flags = MOCK_FLAGS_DEVICE_NO_DIRTY; + test_cmd_hwpt_alloc(self->idev_id, self->ioas_id, + IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id); + test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL); + test_err_mock_domain_flags(EINVAL, hwpt_id, dev_flags, &stddev_id, + NULL); + test_ioctl_destroy(stddev_id); + test_ioctl_destroy(hwpt_id); + + /* IOMMU device does not support dirty tracking */ + test_ioctl_ioas_alloc(&ioas_id); + test_cmd_mock_domain_flags(ioas_id, dev_flags, &stddev_id, &_hwpt_id, + &idev_id); + test_err_hwpt_alloc(EOPNOTSUPP, idev_id, ioas_id, + IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id); + test_ioctl_destroy(stddev_id); +} + /* VFIO compatibility IOCTLs */ TEST_F(iommufd, simple_ioctls) diff --git a/tools/testing/selftests/iommu/iommufd_utils.h b/tools/testing/selftests/iommu/iommufd_utils.h index 1e0736adc991..4ddafa29e638 100644 --- a/tools/testing/selftests/iommu/iommufd_utils.h +++ b/tools/testing/selftests/iommu/iommufd_utils.h @@ -98,6 +98,9 @@ static int _test_cmd_mock_domain_flags(int fd, unsigned int ioas_id, *idev_id = cmd.mock_domain_flags.out_idev_id; return 0; } +#define test_cmd_mock_domain_flags(ioas_id, flags, stdev_id, hwpt_id, idev_id) \ + ASSERT_EQ(0, _test_cmd_mock_domain_flags(self->fd, ioas_id, flags, \ + stdev_id, hwpt_id, idev_id)) #define test_err_mock_domain_flags(_errno, ioas_id, flags, stdev_id, hwpt_id) \ EXPECT_ERRNO(_errno, \ _test_cmd_mock_domain_flags(self->fd, ioas_id, flags, \ -- Gitee From f3102d3238f697d907ff06c3d4c87a2611502fcf Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Tue, 24 Oct 2023 14:51:06 +0100 Subject: [PATCH 1014/2138] iommufd/selftest: Test IOMMU_HWPT_SET_DIRTY_TRACKING ANBZ: #9185 commit 7adf267d66d1d737ea8318976fd1ce93733fd3a4 upstream. Intel-SIG: commit 7adf267d66d1 iommufd/selftest: Test IOMMU_HWPT_SET_DIRTY_TRACKING Backport to support Intel QAT live migration for in-tree driver Change mock_domain to supporting dirty tracking and add tests to exercise the new SET_DIRTY_TRACKING API in the iommufd_dirty_tracking selftest fixture. Link: https://lore.kernel.org/r/20231024135109.73787-16-joao.m.martins@oracle.com Signed-off-by: Joao Martins Reviewed-by: Kevin Tian Signed-off-by: Jason Gunthorpe [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/iommu/iommufd/selftest.c | 16 ++++++++++++++++ tools/testing/selftests/iommu/iommufd.c | 15 +++++++++++++++ tools/testing/selftests/iommu/iommufd_utils.h | 17 +++++++++++++++++ 3 files changed, 48 insertions(+) diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c index c5b6c21f6637..578b19afc864 100644 --- a/drivers/iommu/iommufd/selftest.c +++ b/drivers/iommu/iommufd/selftest.c @@ -24,6 +24,7 @@ static struct platform_device *selftest_iommu_dev; size_t iommufd_test_memory_limit = 65536; enum { + MOCK_DIRTY_TRACK = 1, MOCK_IO_PAGE_SIZE = PAGE_SIZE / 2, /* @@ -101,6 +102,7 @@ void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd, } struct mock_iommu_domain { + unsigned long flags; struct iommu_domain domain; struct xarray pfns; }; @@ -170,6 +172,20 @@ static void *mock_domain_hw_info(struct device *dev, u32 *length, u32 *type) static int mock_domain_set_dirty_tracking(struct iommu_domain *domain, bool enable) { + struct mock_iommu_domain *mock = + container_of(domain, struct mock_iommu_domain, domain); + unsigned long flags = mock->flags; + + if (enable && !domain->dirty_ops) + return -EINVAL; + + /* No change? */ + if (!(enable ^ !!(flags & MOCK_DIRTY_TRACK))) + return 0; + + flags = (enable ? flags | MOCK_DIRTY_TRACK : flags & ~MOCK_DIRTY_TRACK); + + mock->flags = flags; return 0; } diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c index 08aed07617ad..8a060961907a 100644 --- a/tools/testing/selftests/iommu/iommufd.c +++ b/tools/testing/selftests/iommu/iommufd.c @@ -1482,6 +1482,21 @@ TEST_F(iommufd_dirty_tracking, enforce_dirty) test_ioctl_destroy(stddev_id); } +TEST_F(iommufd_dirty_tracking, set_dirty_tracking) +{ + uint32_t stddev_id; + uint32_t hwpt_id; + + test_cmd_hwpt_alloc(self->idev_id, self->ioas_id, + IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id); + test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL); + test_cmd_set_dirty_tracking(hwpt_id, true); + test_cmd_set_dirty_tracking(hwpt_id, false); + + test_ioctl_destroy(stddev_id); + test_ioctl_destroy(hwpt_id); +} + /* VFIO compatibility IOCTLs */ TEST_F(iommufd, simple_ioctls) diff --git a/tools/testing/selftests/iommu/iommufd_utils.h b/tools/testing/selftests/iommu/iommufd_utils.h index 4ddafa29e638..e37af6291b22 100644 --- a/tools/testing/selftests/iommu/iommufd_utils.h +++ b/tools/testing/selftests/iommu/iommufd_utils.h @@ -179,6 +179,23 @@ static int _test_cmd_access_replace_ioas(int fd, __u32 access_id, #define test_cmd_access_replace_ioas(access_id, ioas_id) \ ASSERT_EQ(0, _test_cmd_access_replace_ioas(self->fd, access_id, ioas_id)) +static int _test_cmd_set_dirty_tracking(int fd, __u32 hwpt_id, bool enabled) +{ + struct iommu_hwpt_set_dirty_tracking cmd = { + .size = sizeof(cmd), + .flags = enabled ? IOMMU_HWPT_DIRTY_TRACKING_ENABLE : 0, + .hwpt_id = hwpt_id, + }; + int ret; + + ret = ioctl(fd, IOMMU_HWPT_SET_DIRTY_TRACKING, &cmd); + if (ret) + return -errno; + return 0; +} +#define test_cmd_set_dirty_tracking(hwpt_id, enabled) \ + ASSERT_EQ(0, _test_cmd_set_dirty_tracking(self->fd, hwpt_id, enabled)) + static int _test_cmd_create_access(int fd, unsigned int ioas_id, __u32 *access_id, unsigned int flags) { -- Gitee From e3400382a03c24b3456a24ef554130de74b272e6 Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Tue, 24 Oct 2023 14:51:07 +0100 Subject: [PATCH 1015/2138] iommufd/selftest: Test IOMMU_HWPT_GET_DIRTY_BITMAP ANBZ: #9185 commit a9af47e382a4d517685cb13c780272e7f300ebc5 upstream. Intel-SIG: commit a9af47e382a4 iommufd/selftest: Test IOMMU_HWPT_GET_DIRTY_BITMAP Backport to support Intel QAT live migration for in-tree driver Add a new test ioctl for simulating the dirty IOVAs in the mock domain, and implement the mock iommu domain ops that get the dirty tracking supported. The selftest exercises the usual main workflow of: 1) Setting dirty tracking from the iommu domain 2) Read and clear dirty IOPTEs Different fixtures will test different IOVA range sizes, that exercise corner cases of the bitmaps. Link: https://lore.kernel.org/r/20231024135109.73787-17-joao.m.martins@oracle.com Signed-off-by: Joao Martins Signed-off-by: Jason Gunthorpe [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/iommu/iommufd/iommufd_test.h | 9 ++ drivers/iommu/iommufd/selftest.c | 107 ++++++++++++++- tools/testing/selftests/iommu/iommufd.c | 96 +++++++++++++ tools/testing/selftests/iommu/iommufd_utils.h | 127 ++++++++++++++++++ 4 files changed, 334 insertions(+), 5 deletions(-) diff --git a/drivers/iommu/iommufd/iommufd_test.h b/drivers/iommu/iommufd/iommufd_test.h index 9817edcd8968..1f2e93d3d4e8 100644 --- a/drivers/iommu/iommufd/iommufd_test.h +++ b/drivers/iommu/iommufd/iommufd_test.h @@ -20,6 +20,7 @@ enum { IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE, IOMMU_TEST_OP_ACCESS_REPLACE_IOAS, IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS, + IOMMU_TEST_OP_DIRTY, }; enum { @@ -107,6 +108,14 @@ struct iommu_test_cmd { struct { __u32 ioas_id; } access_replace_ioas; + struct { + __u32 flags; + __aligned_u64 iova; + __aligned_u64 length; + __aligned_u64 page_size; + __aligned_u64 uptr; + __aligned_u64 out_nr_dirty; + } dirty; }; __u32 last; }; diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c index 578b19afc864..eaed432e59f4 100644 --- a/drivers/iommu/iommufd/selftest.c +++ b/drivers/iommu/iommufd/selftest.c @@ -37,6 +37,7 @@ enum { _MOCK_PFN_START = MOCK_PFN_MASK + 1, MOCK_PFN_START_IOVA = _MOCK_PFN_START, MOCK_PFN_LAST_IOVA = _MOCK_PFN_START, + MOCK_PFN_DIRTY_IOVA = _MOCK_PFN_START << 1, }; /* @@ -194,6 +195,31 @@ static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain, unsigned long flags, struct iommu_dirty_bitmap *dirty) { + struct mock_iommu_domain *mock = + container_of(domain, struct mock_iommu_domain, domain); + unsigned long i, max = size / MOCK_IO_PAGE_SIZE; + void *ent, *old; + + if (!(mock->flags & MOCK_DIRTY_TRACK) && dirty->bitmap) + return -EINVAL; + + for (i = 0; i < max; i++) { + unsigned long cur = iova + i * MOCK_IO_PAGE_SIZE; + + ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE); + if (ent && (xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA)) { + unsigned long val; + + /* Clear dirty */ + val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA; + old = xa_store(&mock->pfns, cur / MOCK_IO_PAGE_SIZE, + xa_mk_value(val), GFP_KERNEL); + WARN_ON_ONCE(ent != old); + iommu_dirty_bitmap_record(dirty, cur, + MOCK_IO_PAGE_SIZE); + } + } + return 0; } @@ -325,7 +351,7 @@ static size_t mock_domain_unmap_pages(struct iommu_domain *domain, for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) { ent = xa_erase(&mock->pfns, iova / MOCK_IO_PAGE_SIZE); - WARN_ON(!ent); + /* * iommufd generates unmaps that must be a strict * superset of the map's performend So every starting @@ -335,13 +361,13 @@ static size_t mock_domain_unmap_pages(struct iommu_domain *domain, * passed to map_pages */ if (first) { - WARN_ON(!(xa_to_value(ent) & - MOCK_PFN_START_IOVA)); + WARN_ON(ent && !(xa_to_value(ent) & + MOCK_PFN_START_IOVA)); first = false; } if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize) - WARN_ON(!(xa_to_value(ent) & - MOCK_PFN_LAST_IOVA)); + WARN_ON(ent && !(xa_to_value(ent) & + MOCK_PFN_LAST_IOVA)); iova += MOCK_IO_PAGE_SIZE; ret += MOCK_IO_PAGE_SIZE; @@ -1068,6 +1094,71 @@ static_assert((unsigned int)MOCK_ACCESS_RW_WRITE == IOMMUFD_ACCESS_RW_WRITE); static_assert((unsigned int)MOCK_ACCESS_RW_SLOW_PATH == __IOMMUFD_ACCESS_RW_SLOW_PATH); +static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id, + unsigned long iova, size_t length, + unsigned long page_size, void __user *uptr, + u32 flags) +{ + unsigned long bitmap_size, i, max = length / page_size; + struct iommu_test_cmd *cmd = ucmd->cmd; + struct iommufd_hw_pagetable *hwpt; + struct mock_iommu_domain *mock; + int rc, count = 0; + void *tmp; + + if (iova % page_size || length % page_size || !uptr) + return -EINVAL; + + hwpt = get_md_pagetable(ucmd, mockpt_id, &mock); + if (IS_ERR(hwpt)) + return PTR_ERR(hwpt); + + if (!(mock->flags & MOCK_DIRTY_TRACK)) { + rc = -EINVAL; + goto out_put; + } + + bitmap_size = max / BITS_PER_BYTE; + + tmp = kvzalloc(bitmap_size, GFP_KERNEL_ACCOUNT); + if (!tmp) { + rc = -ENOMEM; + goto out_put; + } + + if (copy_from_user(tmp, uptr, bitmap_size)) { + rc = -EFAULT; + goto out_free; + } + + for (i = 0; i < max; i++) { + unsigned long cur = iova + i * page_size; + void *ent, *old; + + if (!test_bit(i, (unsigned long *)tmp)) + continue; + + ent = xa_load(&mock->pfns, cur / page_size); + if (ent) { + unsigned long val; + + val = xa_to_value(ent) | MOCK_PFN_DIRTY_IOVA; + old = xa_store(&mock->pfns, cur / page_size, + xa_mk_value(val), GFP_KERNEL); + WARN_ON_ONCE(ent != old); + count++; + } + } + + cmd->dirty.out_nr_dirty = count; + rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); +out_free: + kvfree(tmp); +out_put: + iommufd_put_object(&hwpt->obj); + return rc; +} + void iommufd_selftest_destroy(struct iommufd_object *obj) { struct selftest_obj *sobj = container_of(obj, struct selftest_obj, obj); @@ -1133,6 +1224,12 @@ int iommufd_test(struct iommufd_ucmd *ucmd) return -EINVAL; iommufd_test_memory_limit = cmd->memory_limit.limit; return 0; + case IOMMU_TEST_OP_DIRTY: + return iommufd_test_dirty(ucmd, cmd->id, cmd->dirty.iova, + cmd->dirty.length, + cmd->dirty.page_size, + u64_to_user_ptr(cmd->dirty.uptr), + cmd->dirty.flags); default: return -EOPNOTSUPP; } diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c index 8a060961907a..274d8dbd65a6 100644 --- a/tools/testing/selftests/iommu/iommufd.c +++ b/tools/testing/selftests/iommu/iommufd.c @@ -1440,13 +1440,47 @@ FIXTURE(iommufd_dirty_tracking) uint32_t hwpt_id; uint32_t stdev_id; uint32_t idev_id; + unsigned long page_size; + unsigned long bitmap_size; + void *bitmap; + void *buffer; +}; + +FIXTURE_VARIANT(iommufd_dirty_tracking) +{ + unsigned long buffer_size; }; FIXTURE_SETUP(iommufd_dirty_tracking) { + void *vrc; + int rc; + self->fd = open("/dev/iommu", O_RDWR); ASSERT_NE(-1, self->fd); + rc = posix_memalign(&self->buffer, HUGEPAGE_SIZE, variant->buffer_size); + if (rc || !self->buffer) { + SKIP(return, "Skipping buffer_size=%lu due to errno=%d", + variant->buffer_size, rc); + } + + assert((uintptr_t)self->buffer % HUGEPAGE_SIZE == 0); + vrc = mmap(self->buffer, variant->buffer_size, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0); + assert(vrc == self->buffer); + + self->page_size = MOCK_PAGE_SIZE; + self->bitmap_size = + variant->buffer_size / self->page_size / BITS_PER_BYTE; + + /* Provision with an extra (MOCK_PAGE_SIZE) for the unaligned case */ + rc = posix_memalign(&self->bitmap, PAGE_SIZE, + self->bitmap_size + MOCK_PAGE_SIZE); + assert(!rc); + assert(self->bitmap); + assert((uintptr_t)self->bitmap % PAGE_SIZE == 0); + test_ioctl_ioas_alloc(&self->ioas_id); test_cmd_mock_domain(self->ioas_id, &self->stdev_id, &self->hwpt_id, &self->idev_id); @@ -1454,9 +1488,41 @@ FIXTURE_SETUP(iommufd_dirty_tracking) FIXTURE_TEARDOWN(iommufd_dirty_tracking) { + munmap(self->buffer, variant->buffer_size); + munmap(self->bitmap, self->bitmap_size); teardown_iommufd(self->fd, _metadata); } +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128k) +{ + /* one u32 index bitmap */ + .buffer_size = 128UL * 1024UL, +}; + +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256k) +{ + /* one u64 index bitmap */ + .buffer_size = 256UL * 1024UL, +}; + +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty640k) +{ + /* two u64 index and trailing end bitmap */ + .buffer_size = 640UL * 1024UL, +}; + +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M) +{ + /* 4K bitmap (128M IOVA range) */ + .buffer_size = 128UL * 1024UL * 1024UL, +}; + +FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256M) +{ + /* 8K bitmap (256M IOVA range) */ + .buffer_size = 256UL * 1024UL * 1024UL, +}; + TEST_F(iommufd_dirty_tracking, enforce_dirty) { uint32_t ioas_id, stddev_id, idev_id; @@ -1497,6 +1563,36 @@ TEST_F(iommufd_dirty_tracking, set_dirty_tracking) test_ioctl_destroy(hwpt_id); } +TEST_F(iommufd_dirty_tracking, get_dirty_bitmap) +{ + uint32_t stddev_id; + uint32_t hwpt_id; + uint32_t ioas_id; + + test_ioctl_ioas_alloc(&ioas_id); + test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer, + variant->buffer_size, MOCK_APERTURE_START); + + test_cmd_hwpt_alloc(self->idev_id, ioas_id, + IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id); + test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL); + + test_cmd_set_dirty_tracking(hwpt_id, true); + + test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size, + MOCK_APERTURE_START, self->page_size, + self->bitmap, self->bitmap_size, _metadata); + + /* PAGE_SIZE unaligned bitmap */ + test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size, + MOCK_APERTURE_START, self->page_size, + self->bitmap + MOCK_PAGE_SIZE, + self->bitmap_size, _metadata); + + test_ioctl_destroy(stddev_id); + test_ioctl_destroy(hwpt_id); +} + /* VFIO compatibility IOCTLs */ TEST_F(iommufd, simple_ioctls) diff --git a/tools/testing/selftests/iommu/iommufd_utils.h b/tools/testing/selftests/iommu/iommufd_utils.h index e37af6291b22..b129cf23b824 100644 --- a/tools/testing/selftests/iommu/iommufd_utils.h +++ b/tools/testing/selftests/iommu/iommufd_utils.h @@ -16,6 +16,25 @@ /* Hack to make assertions more readable */ #define _IOMMU_TEST_CMD(x) IOMMU_TEST_CMD +/* Imported from include/asm-generic/bitops/generic-non-atomic.h */ +#define BITS_PER_BYTE 8 +#define BITS_PER_LONG __BITS_PER_LONG +#define BIT_MASK(nr) (1UL << ((nr) % __BITS_PER_LONG)) +#define BIT_WORD(nr) ((nr) / __BITS_PER_LONG) + +static inline void set_bit(unsigned int nr, unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p |= mask; +} + +static inline bool test_bit(unsigned int nr, unsigned long *addr) +{ + return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG - 1))); +} + static void *buffer; static unsigned long BUFFER_SIZE; @@ -196,6 +215,103 @@ static int _test_cmd_set_dirty_tracking(int fd, __u32 hwpt_id, bool enabled) #define test_cmd_set_dirty_tracking(hwpt_id, enabled) \ ASSERT_EQ(0, _test_cmd_set_dirty_tracking(self->fd, hwpt_id, enabled)) +static int _test_cmd_get_dirty_bitmap(int fd, __u32 hwpt_id, size_t length, + __u64 iova, size_t page_size, + __u64 *bitmap) +{ + struct iommu_hwpt_get_dirty_bitmap cmd = { + .size = sizeof(cmd), + .hwpt_id = hwpt_id, + .iova = iova, + .length = length, + .page_size = page_size, + .data = (uintptr_t)bitmap, + }; + int ret; + + ret = ioctl(fd, IOMMU_HWPT_GET_DIRTY_BITMAP, &cmd); + if (ret) + return ret; + return 0; +} + +#define test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, \ + bitmap) \ + ASSERT_EQ(0, _test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, \ + page_size, bitmap)) + +static int _test_cmd_mock_domain_set_dirty(int fd, __u32 hwpt_id, size_t length, + __u64 iova, size_t page_size, + __u64 *bitmap, __u64 *dirty) +{ + struct iommu_test_cmd cmd = { + .size = sizeof(cmd), + .op = IOMMU_TEST_OP_DIRTY, + .id = hwpt_id, + .dirty = { + .iova = iova, + .length = length, + .page_size = page_size, + .uptr = (uintptr_t)bitmap, + } + }; + int ret; + + ret = ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_DIRTY), &cmd); + if (ret) + return -ret; + if (dirty) + *dirty = cmd.dirty.out_nr_dirty; + return 0; +} + +#define test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size, \ + bitmap, nr) \ + ASSERT_EQ(0, \ + _test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, \ + page_size, bitmap, nr)) + +static int _test_mock_dirty_bitmaps(int fd, __u32 hwpt_id, size_t length, + __u64 iova, size_t page_size, __u64 *bitmap, + __u64 bitmap_size, + struct __test_metadata *_metadata) +{ + unsigned long i, count, nbits = bitmap_size * BITS_PER_BYTE; + unsigned long nr = nbits / 2; + __u64 out_dirty = 0; + + /* Mark all even bits as dirty in the mock domain */ + for (count = 0, i = 0; i < nbits; count += !(i % 2), i++) + if (!(i % 2)) + set_bit(i, (unsigned long *)bitmap); + ASSERT_EQ(nr, count); + + test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size, + bitmap, &out_dirty); + ASSERT_EQ(nr, out_dirty); + + /* Expect all even bits as dirty in the user bitmap */ + memset(bitmap, 0, bitmap_size); + test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap); + for (count = 0, i = 0; i < nbits; count += !(i % 2), i++) + ASSERT_EQ(!(i % 2), test_bit(i, (unsigned long *)bitmap)); + ASSERT_EQ(count, out_dirty); + + memset(bitmap, 0, bitmap_size); + test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap); + + /* It as read already -- expect all zeroes */ + for (i = 0; i < nbits; i++) + ASSERT_EQ(0, test_bit(i, (unsigned long *)bitmap)); + + return 0; +} +#define test_mock_dirty_bitmaps(hwpt_id, length, iova, page_size, bitmap, \ + bitmap_size, _metadata) \ + ASSERT_EQ(0, _test_mock_dirty_bitmaps(self->fd, hwpt_id, length, iova, \ + page_size, bitmap, bitmap_size, \ + _metadata)) + static int _test_cmd_create_access(int fd, unsigned int ioas_id, __u32 *access_id, unsigned int flags) { @@ -320,6 +436,17 @@ static int _test_ioctl_ioas_map(int fd, unsigned int ioas_id, void *buffer, IOMMU_IOAS_MAP_READABLE)); \ }) +#define test_ioctl_ioas_map_fixed_id(ioas_id, buffer, length, iova) \ + ({ \ + __u64 __iova = iova; \ + ASSERT_EQ(0, \ + _test_ioctl_ioas_map( \ + self->fd, ioas_id, buffer, length, &__iova, \ + IOMMU_IOAS_MAP_FIXED_IOVA | \ + IOMMU_IOAS_MAP_WRITEABLE | \ + IOMMU_IOAS_MAP_READABLE)); \ + }) + #define test_err_ioctl_ioas_map_fixed(_errno, buffer, length, iova) \ ({ \ __u64 __iova = iova; \ -- Gitee From d93e0b497639908062a229d4bf42bc0c85669f74 Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Tue, 24 Oct 2023 14:51:08 +0100 Subject: [PATCH 1016/2138] iommufd/selftest: Test out_capabilities in IOMMU_GET_HW_INFO ANBZ: #9185 commit ae36fe70cea4d7c177452ab41e6734fa3cbd4ad8 upstream. Intel-SIG: commit ae36fe70cea4 iommufd/selftest: Test out_capabilities in IOMMU_GET_HW_INFO Backport to support Intel QAT live migration for in-tree driver Enumerate the capabilities from the mock device and test whether it advertises as expected. Include it as part of the iommufd_dirty_tracking fixture. Link: https://lore.kernel.org/r/20231024135109.73787-18-joao.m.martins@oracle.com Signed-off-by: Joao Martins Signed-off-by: Jason Gunthorpe [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/iommu/iommufd/selftest.c | 13 +++++++++- tools/testing/selftests/iommu/iommufd.c | 17 +++++++++++++ .../selftests/iommu/iommufd_fail_nth.c | 2 +- tools/testing/selftests/iommu/iommufd_utils.h | 24 ++++++++++++------- 4 files changed, 45 insertions(+), 11 deletions(-) diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c index eaed432e59f4..b8a39f6e1777 100644 --- a/drivers/iommu/iommufd/selftest.c +++ b/drivers/iommu/iommufd/selftest.c @@ -391,7 +391,18 @@ static phys_addr_t mock_domain_iova_to_phys(struct iommu_domain *domain, static bool mock_domain_capable(struct device *dev, enum iommu_cap cap) { - return cap == IOMMU_CAP_CACHE_COHERENCY; + struct mock_dev *mdev = container_of(dev, struct mock_dev, dev); + + switch (cap) { + case IOMMU_CAP_CACHE_COHERENCY: + return true; + case IOMMU_CAP_DIRTY_TRACKING: + return !(mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY); + default: + break; + } + + return false; } static void mock_domain_set_plaform_dma_ops(struct device *dev) diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c index 274d8dbd65a6..36fcc333b03e 100644 --- a/tools/testing/selftests/iommu/iommufd.c +++ b/tools/testing/selftests/iommu/iommufd.c @@ -1563,6 +1563,23 @@ TEST_F(iommufd_dirty_tracking, set_dirty_tracking) test_ioctl_destroy(hwpt_id); } +TEST_F(iommufd_dirty_tracking, device_dirty_capability) +{ + uint32_t caps = 0; + uint32_t stddev_id; + uint32_t hwpt_id; + + test_cmd_hwpt_alloc(self->idev_id, self->ioas_id, 0, &hwpt_id); + test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL); + test_cmd_get_hw_capabilities(self->idev_id, caps, + IOMMU_HW_CAP_DIRTY_TRACKING); + ASSERT_EQ(IOMMU_HW_CAP_DIRTY_TRACKING, + caps & IOMMU_HW_CAP_DIRTY_TRACKING); + + test_ioctl_destroy(stddev_id); + test_ioctl_destroy(hwpt_id); +} + TEST_F(iommufd_dirty_tracking, get_dirty_bitmap) { uint32_t stddev_id; diff --git a/tools/testing/selftests/iommu/iommufd_fail_nth.c b/tools/testing/selftests/iommu/iommufd_fail_nth.c index 3d7838506bfe..1fcd69cb0e41 100644 --- a/tools/testing/selftests/iommu/iommufd_fail_nth.c +++ b/tools/testing/selftests/iommu/iommufd_fail_nth.c @@ -612,7 +612,7 @@ TEST_FAIL_NTH(basic_fail_nth, device) &idev_id)) return -1; - if (_test_cmd_get_hw_info(self->fd, idev_id, &info, sizeof(info))) + if (_test_cmd_get_hw_info(self->fd, idev_id, &info, sizeof(info), NULL)) return -1; if (_test_cmd_hwpt_alloc(self->fd, idev_id, ioas_id, 0, &hwpt_id)) diff --git a/tools/testing/selftests/iommu/iommufd_utils.h b/tools/testing/selftests/iommu/iommufd_utils.h index b129cf23b824..2410d06f5a34 100644 --- a/tools/testing/selftests/iommu/iommufd_utils.h +++ b/tools/testing/selftests/iommu/iommufd_utils.h @@ -535,8 +535,8 @@ static void teardown_iommufd(int fd, struct __test_metadata *_metadata) #endif /* @data can be NULL */ -static int _test_cmd_get_hw_info(int fd, __u32 device_id, - void *data, size_t data_len) +static int _test_cmd_get_hw_info(int fd, __u32 device_id, void *data, + size_t data_len, uint32_t *capabilities) { struct iommu_test_hw_info *info = (struct iommu_test_hw_info *)data; struct iommu_hw_info cmd = { @@ -544,6 +544,7 @@ static int _test_cmd_get_hw_info(int fd, __u32 device_id, .dev_id = device_id, .data_len = data_len, .data_uptr = (uint64_t)data, + .out_capabilities = 0, }; int ret; @@ -580,14 +581,19 @@ static int _test_cmd_get_hw_info(int fd, __u32 device_id, assert(!info->flags); } + if (capabilities) + *capabilities = cmd.out_capabilities; + return 0; } -#define test_cmd_get_hw_info(device_id, data, data_len) \ - ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, \ - data, data_len)) +#define test_cmd_get_hw_info(device_id, data, data_len) \ + ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, data, \ + data_len, NULL)) + +#define test_err_get_hw_info(_errno, device_id, data, data_len) \ + EXPECT_ERRNO(_errno, _test_cmd_get_hw_info(self->fd, device_id, data, \ + data_len, NULL)) -#define test_err_get_hw_info(_errno, device_id, data, data_len) \ - EXPECT_ERRNO(_errno, \ - _test_cmd_get_hw_info(self->fd, device_id, \ - data, data_len)) +#define test_cmd_get_hw_capabilities(device_id, caps, mask) \ + ASSERT_EQ(0, _test_cmd_get_hw_info(self->fd, device_id, NULL, 0, &caps)) -- Gitee From 1a87c5218d8b6e2275ca0b6f875ec33c6add0232 Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Tue, 24 Oct 2023 14:51:09 +0100 Subject: [PATCH 1017/2138] iommufd/selftest: Test IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR flag ANBZ: #9185 commit 0795b305da8902e7d092f90bf9a1a2c98f34b1db upstream. Intel-SIG: commit 0795b305da89 iommufd/selftest: Test IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR flag Backport to support Intel QAT live migration for in-tree driver Change test_mock_dirty_bitmaps() to pass a flag where it specifies the flag under test. The test does the same thing as the GET_DIRTY_BITMAP regular test. Except that it tests whether the dirtied bits are fetched all the same a second time, as opposed to observing them cleared. Link: https://lore.kernel.org/r/20231024135109.73787-19-joao.m.martins@oracle.com Signed-off-by: Joao Martins Signed-off-by: Jason Gunthorpe [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/iommu/iommufd/selftest.c | 15 +++++--- tools/testing/selftests/iommu/iommufd.c | 38 ++++++++++++++++++- tools/testing/selftests/iommu/iommufd_utils.h | 26 ++++++++----- 3 files changed, 61 insertions(+), 18 deletions(-) diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c index b8a39f6e1777..2b707eeb7cb3 100644 --- a/drivers/iommu/iommufd/selftest.c +++ b/drivers/iommu/iommufd/selftest.c @@ -208,13 +208,16 @@ static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain, ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE); if (ent && (xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA)) { - unsigned long val; - /* Clear dirty */ - val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA; - old = xa_store(&mock->pfns, cur / MOCK_IO_PAGE_SIZE, - xa_mk_value(val), GFP_KERNEL); - WARN_ON_ONCE(ent != old); + if (!(flags & IOMMU_DIRTY_NO_CLEAR)) { + unsigned long val; + + val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA; + old = xa_store(&mock->pfns, + cur / MOCK_IO_PAGE_SIZE, + xa_mk_value(val), GFP_KERNEL); + WARN_ON_ONCE(ent != old); + } iommu_dirty_bitmap_record(dirty, cur, MOCK_IO_PAGE_SIZE); } diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c index 36fcc333b03e..ae2101c6d640 100644 --- a/tools/testing/selftests/iommu/iommufd.c +++ b/tools/testing/selftests/iommu/iommufd.c @@ -1598,13 +1598,47 @@ TEST_F(iommufd_dirty_tracking, get_dirty_bitmap) test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size, MOCK_APERTURE_START, self->page_size, - self->bitmap, self->bitmap_size, _metadata); + self->bitmap, self->bitmap_size, 0, _metadata); /* PAGE_SIZE unaligned bitmap */ test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size, MOCK_APERTURE_START, self->page_size, self->bitmap + MOCK_PAGE_SIZE, - self->bitmap_size, _metadata); + self->bitmap_size, 0, _metadata); + + test_ioctl_destroy(stddev_id); + test_ioctl_destroy(hwpt_id); +} + +TEST_F(iommufd_dirty_tracking, get_dirty_bitmap_no_clear) +{ + uint32_t stddev_id; + uint32_t hwpt_id; + uint32_t ioas_id; + + test_ioctl_ioas_alloc(&ioas_id); + test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer, + variant->buffer_size, MOCK_APERTURE_START); + + test_cmd_hwpt_alloc(self->idev_id, ioas_id, + IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id); + test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL); + + test_cmd_set_dirty_tracking(hwpt_id, true); + + test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size, + MOCK_APERTURE_START, self->page_size, + self->bitmap, self->bitmap_size, + IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR, + _metadata); + + /* Unaligned bitmap */ + test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size, + MOCK_APERTURE_START, self->page_size, + self->bitmap + MOCK_PAGE_SIZE, + self->bitmap_size, + IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR, + _metadata); test_ioctl_destroy(stddev_id); test_ioctl_destroy(hwpt_id); diff --git a/tools/testing/selftests/iommu/iommufd_utils.h b/tools/testing/selftests/iommu/iommufd_utils.h index 2410d06f5a34..e263bf80a977 100644 --- a/tools/testing/selftests/iommu/iommufd_utils.h +++ b/tools/testing/selftests/iommu/iommufd_utils.h @@ -217,11 +217,12 @@ static int _test_cmd_set_dirty_tracking(int fd, __u32 hwpt_id, bool enabled) static int _test_cmd_get_dirty_bitmap(int fd, __u32 hwpt_id, size_t length, __u64 iova, size_t page_size, - __u64 *bitmap) + __u64 *bitmap, __u32 flags) { struct iommu_hwpt_get_dirty_bitmap cmd = { .size = sizeof(cmd), .hwpt_id = hwpt_id, + .flags = flags, .iova = iova, .length = length, .page_size = page_size, @@ -236,9 +237,9 @@ static int _test_cmd_get_dirty_bitmap(int fd, __u32 hwpt_id, size_t length, } #define test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, \ - bitmap) \ + bitmap, flags) \ ASSERT_EQ(0, _test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, \ - page_size, bitmap)) + page_size, bitmap, flags)) static int _test_cmd_mock_domain_set_dirty(int fd, __u32 hwpt_id, size_t length, __u64 iova, size_t page_size, @@ -273,7 +274,7 @@ static int _test_cmd_mock_domain_set_dirty(int fd, __u32 hwpt_id, size_t length, static int _test_mock_dirty_bitmaps(int fd, __u32 hwpt_id, size_t length, __u64 iova, size_t page_size, __u64 *bitmap, - __u64 bitmap_size, + __u64 bitmap_size, __u32 flags, struct __test_metadata *_metadata) { unsigned long i, count, nbits = bitmap_size * BITS_PER_BYTE; @@ -292,25 +293,30 @@ static int _test_mock_dirty_bitmaps(int fd, __u32 hwpt_id, size_t length, /* Expect all even bits as dirty in the user bitmap */ memset(bitmap, 0, bitmap_size); - test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap); + test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap, + flags); for (count = 0, i = 0; i < nbits; count += !(i % 2), i++) ASSERT_EQ(!(i % 2), test_bit(i, (unsigned long *)bitmap)); ASSERT_EQ(count, out_dirty); memset(bitmap, 0, bitmap_size); - test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap); + test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap, + flags); /* It as read already -- expect all zeroes */ - for (i = 0; i < nbits; i++) - ASSERT_EQ(0, test_bit(i, (unsigned long *)bitmap)); + for (i = 0; i < nbits; i++) { + ASSERT_EQ(!(i % 2) && (flags & + IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR), + test_bit(i, (unsigned long *)bitmap)); + } return 0; } #define test_mock_dirty_bitmaps(hwpt_id, length, iova, page_size, bitmap, \ - bitmap_size, _metadata) \ + bitmap_size, flags, _metadata) \ ASSERT_EQ(0, _test_mock_dirty_bitmaps(self->fd, hwpt_id, length, iova, \ page_size, bitmap, bitmap_size, \ - _metadata)) + flags, _metadata)) static int _test_cmd_create_access(int fd, unsigned int ioas_id, __u32 *access_id, unsigned int flags) -- Gitee From 8625afef8c1d2834a17d74a77ac5b96fb82e7f37 Mon Sep 17 00:00:00 2001 From: Yi Liu Date: Tue, 24 Oct 2023 08:00:11 -0700 Subject: [PATCH 1018/2138] iommu/vt-d: Enhance capability check for nested parent domain allocation ANBZ: #9185 commit a2cdecdf9d234455fdfc8f539bbf5818711bc29d upstream. Intel-SIG: commit a2cdecdf9d23 iommu/vt-d: Enhance capability check for nested parent domain allocation Backport to support Intel QAT live migration for in-tree driver This adds the scalable mode check before allocating the nested parent domain as checking nested capability is not enough. User may turn off scalable mode which also means no nested support even if the hardware supports it. Fixes: c97d1b20d383 ("iommu/vt-d: Add domain_alloc_user op") Link: https://lore.kernel.org/r/20231024150011.44642-1-yi.l.liu@intel.com Signed-off-by: Yi Liu Reviewed-by: Lu Baolu Reviewed-by: Kevin Tian Signed-off-by: Jason Gunthorpe [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/iommu/intel/iommu.c | 2 +- drivers/iommu/intel/iommu.h | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 29d6a98e5892..ecf9c8a2fa40 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -4091,7 +4091,7 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags) if (!iommu) return ERR_PTR(-ENODEV); - if ((flags & IOMMU_HWPT_ALLOC_NEST_PARENT) && !ecap_nest(iommu->ecap)) + if ((flags & IOMMU_HWPT_ALLOC_NEST_PARENT) && !nested_supported(iommu)) return ERR_PTR(-EOPNOTSUPP); dirty_tracking = (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING); diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h index 6a7244a1b8f1..49ea164cb006 100644 --- a/drivers/iommu/intel/iommu.h +++ b/drivers/iommu/intel/iommu.h @@ -544,6 +544,8 @@ enum { ecap_pasid((iommu)->ecap)) #define ssads_supported(iommu) (sm_supported(iommu) && \ ecap_slads((iommu)->ecap)) +#define nested_supported(iommu) (sm_supported(iommu) && \ + ecap_nest((iommu)->ecap)) struct pasid_entry; struct pasid_state_entry; -- Gitee From cb45627de93f4794ede9ba3e902807baf37c13ca Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Mon, 30 Oct 2023 11:34:46 +0000 Subject: [PATCH 1019/2138] iommufd/selftest: Fix page-size check in iommufd_test_dirty() ANBZ: #9185 commit 2e22aac3ea9cfc0ec3209c96644f60c1806a8117 upstream. Intel-SIG: commit 2e22aac3ea9c iommufd/selftest: Fix page-size check in iommufd_test_dirty() Backport to support Intel QAT live migration for in-tree driver iommufd_test_dirty()/IOMMU_TEST_OP_DIRTY sets the dirty bits in the mock domain implementation that the userspace side validates against what it obtains via the UAPI. However in introducing iommufd_test_dirty() it forgot to validate page_size being 0 leading to two possible divide-by-zero problems: one at the beginning when calculating @max and while calculating the IOVA in the XArray PFN tracking list. While at it, validate the length to require non-zero value as well, as we can't be allocating a 0-sized bitmap. Link: https://lore.kernel.org/r/20231030113446.7056-1-joao.m.martins@oracle.com Reported-by: syzbot+25dc7383c30ecdc83c38@syzkaller.appspotmail.com Closes: https://lore.kernel.org/linux-iommu/00000000000005f6aa0608b9220f@google.com/ Fixes: a9af47e382a4 ("iommufd/selftest: Test IOMMU_HWPT_GET_DIRTY_BITMAP") Signed-off-by: Joao Martins Signed-off-by: Jason Gunthorpe [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/iommu/iommufd/selftest.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c index 2b707eeb7cb3..22c60651d60f 100644 --- a/drivers/iommu/iommufd/selftest.c +++ b/drivers/iommu/iommufd/selftest.c @@ -1113,14 +1113,15 @@ static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id, unsigned long page_size, void __user *uptr, u32 flags) { - unsigned long bitmap_size, i, max = length / page_size; + unsigned long bitmap_size, i, max; struct iommu_test_cmd *cmd = ucmd->cmd; struct iommufd_hw_pagetable *hwpt; struct mock_iommu_domain *mock; int rc, count = 0; void *tmp; - if (iova % page_size || length % page_size || !uptr) + if (!page_size || !length || iova % page_size || length % page_size || + !uptr) return -EINVAL; hwpt = get_md_pagetable(ucmd, mockpt_id, &mock); @@ -1132,6 +1133,7 @@ static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id, goto out_put; } + max = length / page_size; bitmap_size = max / BITS_PER_BYTE; tmp = kvzalloc(bitmap_size, GFP_KERNEL_ACCOUNT); -- Gitee From d38145e93af71e6f141d4c131f1ded2ed356dca6 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 16 Nov 2023 16:52:15 +0000 Subject: [PATCH 1020/2138] iommufd/selftest: Fix _test_mock_dirty_bitmaps() ANBZ: #9185 commit 98594181944daa201481ad63242806beb7c89ff4 upstream. Intel-SIG: commit 98594181944d iommufd/selftest: Fix _test_mock_dirty_bitmaps() Backport to support Intel QAT live migration for in-tree driver The ASSERT_EQ() macro sneakily expands to two statements, so the loop here needs braces to ensure it captures both and actually terminates the test upon failure. Where these tests are currently failing on my arm64 machine, this reduces the number of logged lines from a rather unreasonable ~197,000 down to 10. While we're at it, we can also clean up the tautologous "count" calculations whose assertions can never fail unless mathematics and/or the C language become fundamentally broken. Fixes: a9af47e382a4 ("iommufd/selftest: Test IOMMU_HWPT_GET_DIRTY_BITMAP") Link: https://lore.kernel.org/r/90e083045243ef407dd592bb1deec89cd1f4ddf2.1700153535.git.robin.murphy@arm.com Signed-off-by: Robin Murphy Reviewed-by: Kevin Tian Reviewed-by: Joao Martins Tested-by: Joao Martins Signed-off-by: Jason Gunthorpe [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- tools/testing/selftests/iommu/iommufd_utils.h | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/tools/testing/selftests/iommu/iommufd_utils.h b/tools/testing/selftests/iommu/iommufd_utils.h index e263bf80a977..70d558e0f0c7 100644 --- a/tools/testing/selftests/iommu/iommufd_utils.h +++ b/tools/testing/selftests/iommu/iommufd_utils.h @@ -277,15 +277,13 @@ static int _test_mock_dirty_bitmaps(int fd, __u32 hwpt_id, size_t length, __u64 bitmap_size, __u32 flags, struct __test_metadata *_metadata) { - unsigned long i, count, nbits = bitmap_size * BITS_PER_BYTE; + unsigned long i, nbits = bitmap_size * BITS_PER_BYTE; unsigned long nr = nbits / 2; __u64 out_dirty = 0; /* Mark all even bits as dirty in the mock domain */ - for (count = 0, i = 0; i < nbits; count += !(i % 2), i++) - if (!(i % 2)) - set_bit(i, (unsigned long *)bitmap); - ASSERT_EQ(nr, count); + for (i = 0; i < nbits; i += 2) + set_bit(i, (unsigned long *)bitmap); test_cmd_mock_domain_set_dirty(fd, hwpt_id, length, iova, page_size, bitmap, &out_dirty); @@ -295,9 +293,10 @@ static int _test_mock_dirty_bitmaps(int fd, __u32 hwpt_id, size_t length, memset(bitmap, 0, bitmap_size); test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap, flags); - for (count = 0, i = 0; i < nbits; count += !(i % 2), i++) + /* Beware ASSERT_EQ() is two statements -- braces are not redundant! */ + for (i = 0; i < nbits; i++) { ASSERT_EQ(!(i % 2), test_bit(i, (unsigned long *)bitmap)); - ASSERT_EQ(count, out_dirty); + } memset(bitmap, 0, bitmap_size); test_cmd_get_dirty_bitmap(fd, hwpt_id, length, iova, page_size, bitmap, -- Gitee From 2dcbe63d462df48c1d85b9c125be19a551e45c28 Mon Sep 17 00:00:00 2001 From: Kunwu Chan Date: Wed, 22 Nov 2023 11:26:08 +0800 Subject: [PATCH 1021/2138] iommu/vt-d: Set variable intel_dirty_ops to static ANBZ: #9185 commit e378c7de74620051c3be899a8c2506c25d23049d upstream. Intel-SIG: commit e378c7de7462 iommu/vt-d: Set variable intel_dirty_ops to static Backport to support Intel QAT live migration for in-tree driver Fix the following warning: drivers/iommu/intel/iommu.c:302:30: warning: symbol 'intel_dirty_ops' was not declared. Should it be static? This variable is only used in its defining file, so it should be static. Fixes: f35f22cc760e ("iommu/vt-d: Access/Dirty bit support for SS domains") Signed-off-by: Kunwu Chan Reviewed-by: Jason Gunthorpe Reviewed-by: Joao Martins Link: https://lore.kernel.org/r/20231120101025.1103404-1-chentao@kylinos.cn Signed-off-by: Lu Baolu Signed-off-by: Joerg Roedel [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/iommu/intel/iommu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index ecf9c8a2fa40..7f63a60337ce 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -300,7 +300,7 @@ static int iommu_skip_te_disable; #define IDENTMAP_AZALIA 4 const struct iommu_ops intel_iommu_ops; -const struct iommu_dirty_ops intel_dirty_ops; +static const struct iommu_dirty_ops intel_dirty_ops; static bool translation_pre_enabled(struct intel_iommu *iommu) { @@ -4931,7 +4931,7 @@ static int intel_iommu_read_and_clear_dirty(struct iommu_domain *domain, return 0; } -const struct iommu_dirty_ops intel_dirty_ops = { +static const struct iommu_dirty_ops intel_dirty_ops = { .set_dirty_tracking = intel_iommu_set_dirty_tracking, .read_and_clear_dirty = intel_iommu_read_and_clear_dirty, }; -- Gitee From 18dd35b54eff6ae7525a0c67168f4d7664165496 Mon Sep 17 00:00:00 2001 From: Aichun Shi Date: Wed, 29 May 2024 07:32:49 +0800 Subject: [PATCH 1022/2138] x86: configs: Add kernel config required for IOMMUFD Dirty Tracking ANBZ: #9185 Intel-SIG: no upstream x86: configs: Add kernel config required for IOMMUFD Dirty Tracking Backport to support Intel QAT live migration for in-tree driver Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- arch/x86/configs/anolis-debug_defconfig | 2 +- arch/x86/configs/anolis_defconfig | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index 621d8c552839..ef3c7817cadf 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -6010,7 +6010,7 @@ CONFIG_INTEL_IOMMU_SVM=y CONFIG_INTEL_IOMMU_FLOPPY_WA=y CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON=y CONFIG_INTEL_IOMMU_PERF_EVENTS=y -# CONFIG_IOMMUFD is not set +CONFIG_IOMMUFD=m CONFIG_IRQ_REMAP=y CONFIG_HYPERV_IOMMU=y # CONFIG_VIRTIO_IOMMU is not set diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index d150c33702a7..24234079ecce 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -6003,7 +6003,7 @@ CONFIG_INTEL_IOMMU_SVM=y CONFIG_INTEL_IOMMU_FLOPPY_WA=y CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON=y CONFIG_INTEL_IOMMU_PERF_EVENTS=y -# CONFIG_IOMMUFD is not set +CONFIG_IOMMUFD=m CONFIG_IRQ_REMAP=y CONFIG_HYPERV_IOMMU=y # CONFIG_VIRTIO_IOMMU is not set -- Gitee From 0af7a6abe6d5b8991656d33ae79bca4e4c241564 Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Wed, 6 Mar 2024 21:58:46 +0800 Subject: [PATCH 1023/2138] crypto: qat - adf_get_etr_base() helper ANBZ: #9185 commit 1894cb1de656cfde345c3b2690e379be1eb9db96 upstream. Intel-SIG: commit 1894cb1de656 crypto: qat - adf_get_etr_base() helper Backport to support Intel QAT live migration for in-tree driver Add and use the new helper function adf_get_etr_base() which retrieves the virtual address of the ring bar. This will be used extensively when adding support for Live Migration. Signed-off-by: Giovanni Cabiddu Signed-off-by: Xin Zeng Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/crypto/intel/qat/qat_common/adf_common_drv.h | 10 ++++++++++ drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c | 4 +--- drivers/crypto/intel/qat/qat_common/adf_transport.c | 4 +--- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index 57328249c89e..3bec9e20bad0 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -248,6 +248,16 @@ static inline void __iomem *adf_get_pmisc_base(struct adf_accel_dev *accel_dev) return pmisc->virt_addr; } +static inline void __iomem *adf_get_etr_base(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_bar *etr; + + etr = &GET_BARS(accel_dev)[hw_data->get_etr_bar_id(hw_data)]; + + return etr->virt_addr; +} + static inline void __iomem *adf_get_aram_base(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c index d28e1921940a..b8a6d24f791f 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c @@ -321,8 +321,7 @@ static int reset_ring_pair(void __iomem *csr, u32 bank_number) int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; - u32 etr_bar_id = hw_data->get_etr_bar_id(hw_data); - void __iomem *csr; + void __iomem *csr = adf_get_etr_base(accel_dev); int ret; if (bank_number >= hw_data->num_banks) @@ -331,7 +330,6 @@ int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number) dev_dbg(&GET_DEV(accel_dev), "ring pair reset for bank:%d\n", bank_number); - csr = (&GET_BARS(accel_dev)[etr_bar_id])->virt_addr; ret = reset_ring_pair(csr, bank_number); if (ret) dev_err(&GET_DEV(accel_dev), diff --git a/drivers/crypto/intel/qat/qat_common/adf_transport.c b/drivers/crypto/intel/qat/qat_common/adf_transport.c index 630d0483c4e0..1efdf46490f1 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_transport.c +++ b/drivers/crypto/intel/qat/qat_common/adf_transport.c @@ -474,7 +474,6 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev, int adf_init_etr_data(struct adf_accel_dev *accel_dev) { struct adf_etr_data *etr_data; - struct adf_hw_device_data *hw_data = accel_dev->hw_device; void __iomem *csr_addr; u32 size; u32 num_banks = 0; @@ -495,8 +494,7 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev) } accel_dev->transport = etr_data; - i = hw_data->get_etr_bar_id(hw_data); - csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr; + csr_addr = adf_get_etr_base(accel_dev); /* accel_dev->debugfs_dir should always be non-NULL here */ etr_data->debug = debugfs_create_dir("transport", -- Gitee From fcb9d2188054c6645cc4f7b5fe49af726f095c69 Mon Sep 17 00:00:00 2001 From: Xin Zeng Date: Wed, 6 Mar 2024 21:58:47 +0800 Subject: [PATCH 1024/2138] crypto: qat - relocate and rename 4xxx PF2VM definitions ANBZ: #9185 commit 1f8d6a163c20751629801c737a8cfd06f2002b4c upstream. Intel-SIG: commit 1f8d6a163c20 crypto: qat - relocate and rename 4xxx PF2VM definitions Backport to support Intel QAT live migration for in-tree driver Move and rename ADF_4XXX_PF2VM_OFFSET and ADF_4XXX_VM2PF_OFFSET to ADF_GEN4_PF2VM_OFFSET and ADF_GEN4_VM2PF_OFFSET respectively. These definitions are moved from adf_gen4_pfvf.c to adf_gen4_hw_data.h as they are specific to GEN4 and not just to qat_4xxx. This change is made in anticipation of their use in live migration. This does not introduce any functional change. Signed-off-by: Xin Zeng Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h | 4 ++++ drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c | 8 +++----- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h index c6e80df5a85a..c153f41162ec 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h @@ -197,6 +197,10 @@ do { \ /* Arbiter threads mask with error value */ #define ADF_GEN4_ENA_THD_MASK_ERROR GENMASK(ADF_NUM_THREADS_PER_AE, 0) +/* PF2VM communication channel */ +#define ADF_GEN4_PF2VM_OFFSET(i) (0x40B010 + (i) * 0x20) +#define ADF_GEN4_VM2PF_OFFSET(i) (0x40B014 + (i) * 0x20) + void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); enum icp_qat_gen4_slice_mask { diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c index 8e8efe93f3ee..21474d402d09 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pfvf.c @@ -6,12 +6,10 @@ #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_gen4_pfvf.h" +#include "adf_gen4_hw_data.h" #include "adf_pfvf_pf_proto.h" #include "adf_pfvf_utils.h" -#define ADF_4XXX_PF2VM_OFFSET(i) (0x40B010 + ((i) * 0x20)) -#define ADF_4XXX_VM2PF_OFFSET(i) (0x40B014 + ((i) * 0x20)) - /* VF2PF interrupt source registers */ #define ADF_4XXX_VM2PF_SOU 0x41A180 #define ADF_4XXX_VM2PF_MSK 0x41A1C0 @@ -29,12 +27,12 @@ static const struct pfvf_csr_format csr_gen4_fmt = { static u32 adf_gen4_pf_get_pf2vf_offset(u32 i) { - return ADF_4XXX_PF2VM_OFFSET(i); + return ADF_GEN4_PF2VM_OFFSET(i); } static u32 adf_gen4_pf_get_vf2pf_offset(u32 i) { - return ADF_4XXX_VM2PF_OFFSET(i); + return ADF_GEN4_VM2PF_OFFSET(i); } static void adf_gen4_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask) -- Gitee From 1b86ef0cb1dd981700d2e01626dadfaa4b375985 Mon Sep 17 00:00:00 2001 From: Xin Zeng Date: Wed, 6 Mar 2024 21:58:48 +0800 Subject: [PATCH 1025/2138] crypto: qat - move PFVF compat checker to a function ANBZ: #9185 commit 867e801005e9e76f7ae2d143fed0da440150c64d upstream. Intel-SIG: commit 867e801005e9 crypto: qat - move PFVF compat checker to a function Backport to support Intel QAT live migration for in-tree driver Move the code that implements VF version compatibility on the PF side to a separate function so that it can be reused when doing VM live migration. This does not introduce any functional change. Signed-off-by: Xin Zeng Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- .../crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c | 8 +------- drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h | 11 +++++++++++ 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c index 9ab93fbfefde..b9b5e744a3f1 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c @@ -242,13 +242,7 @@ static int adf_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, "VersionRequest received from VF%d (vers %d) to PF (vers %d)\n", vf_nr, vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION); - if (vf_compat_ver == 0) - compat = ADF_PF2VF_VF_INCOMPATIBLE; - else if (vf_compat_ver <= ADF_PFVF_COMPAT_THIS_VERSION) - compat = ADF_PF2VF_VF_COMPATIBLE; - else - compat = ADF_PF2VF_VF_COMPAT_UNKNOWN; - + compat = adf_vf_compat_checker(vf_compat_ver); vf_info->vf_compat_ver = vf_compat_ver; resp->type = ADF_PF2VF_MSGTYPE_VERSION_RESP; diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h index 2be048e2287b..1a044297d873 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_utils.h @@ -28,4 +28,15 @@ u32 adf_pfvf_csr_msg_of(struct adf_accel_dev *accel_dev, struct pfvf_message msg struct pfvf_message adf_pfvf_message_of(struct adf_accel_dev *accel_dev, u32 raw_msg, const struct pfvf_csr_format *fmt); +static inline u8 adf_vf_compat_checker(u8 vf_compat_ver) +{ + if (vf_compat_ver == 0) + return ADF_PF2VF_VF_INCOMPATIBLE; + + if (vf_compat_ver <= ADF_PFVF_COMPAT_THIS_VERSION) + return ADF_PF2VF_VF_COMPATIBLE; + + return ADF_PF2VF_VF_COMPAT_UNKNOWN; +} + #endif /* ADF_PFVF_UTILS_H */ -- Gitee From e73704b3ec5e5773b421b95e9b47504db35a40e9 Mon Sep 17 00:00:00 2001 From: Giovanni Cabiddu Date: Wed, 6 Mar 2024 21:58:49 +0800 Subject: [PATCH 1026/2138] crypto: qat - relocate CSR access code ANBZ: #9185 commit 680302d191b043cf3abe4076794de10171a4ca93 upstream. Intel-SIG: commit 680302d191b0 crypto: qat - relocate CSR access code Backport to support Intel QAT live migration for in-tree driver As the common hw_data files are growing and the adf_hw_csr_ops is going to be extended with new operations, move all logic related to ring CSRs to the newly created adf_gen[2|4]_hw_csr_data.[c|h] files. This does not introduce any functional change. Signed-off-by: Giovanni Cabiddu Signed-off-by: Xin Zeng Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- .../intel/qat/qat_420xx/adf_420xx_hw_data.c | 1 + .../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 1 + .../intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c | 1 + .../qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c | 1 + .../intel/qat/qat_c62x/adf_c62x_hw_data.c | 1 + .../intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c | 1 + drivers/crypto/intel/qat/qat_common/Makefile | 2 + .../qat/qat_common/adf_gen2_hw_csr_data.c | 101 ++++++++++++++++++ .../qat/qat_common/adf_gen2_hw_csr_data.h | 86 +++++++++++++++ .../intel/qat/qat_common/adf_gen2_hw_data.c | 97 ----------------- .../intel/qat/qat_common/adf_gen2_hw_data.h | 76 ------------- .../qat/qat_common/adf_gen4_hw_csr_data.c | 101 ++++++++++++++++++ .../qat/qat_common/adf_gen4_hw_csr_data.h | 97 +++++++++++++++++ .../intel/qat/qat_common/adf_gen4_hw_data.c | 97 ----------------- .../intel/qat/qat_common/adf_gen4_hw_data.h | 94 +--------------- .../qat/qat_dh895xcc/adf_dh895xcc_hw_data.c | 1 + .../qat_dh895xccvf/adf_dh895xccvf_hw_data.c | 1 + 17 files changed, 397 insertions(+), 362 deletions(-) create mode 100644 drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.h create mode 100644 drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.h diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c index 1102c47f8293..9ccbf5998d5c 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index a9e389077db2..62595abb14ec 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c index a882e0ea2279..201f9412c582 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include "adf_c3xxx_hw_data.h" diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c index 84d9486e04de..a512ca4efd3f 100644 --- a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c index 48cf3eb7c734..6b5b0cf9c7c7 100644 --- a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include "adf_c62x_hw_data.h" diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c index 751d7aa57fc7..4aaaaf921734 100644 --- a/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_c62xvf_hw_data.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index 5915cde8a7aa..ceaa685352ed 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -14,9 +14,11 @@ intel_qat-objs := adf_cfg.o \ adf_hw_arbiter.o \ adf_sysfs.o \ adf_sysfs_ras_counters.o \ + adf_gen2_hw_csr_data.o \ adf_gen2_hw_data.o \ adf_gen2_config.o \ adf_gen4_config.o \ + adf_gen4_hw_csr_data.o \ adf_gen4_hw_data.o \ adf_gen4_pm.o \ adf_gen2_dc.o \ diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.c new file mode 100644 index 000000000000..650c9edd8a66 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2024 Intel Corporation */ +#include +#include "adf_gen2_hw_csr_data.h" + +static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size) +{ + return BUILD_RING_BASE_ADDR(addr, size); +} + +static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring) +{ + return READ_CSR_RING_HEAD(csr_base_addr, bank, ring); +} + +static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring, + u32 value) +{ + WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value); +} + +static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring) +{ + return READ_CSR_RING_TAIL(csr_base_addr, bank, ring); +} + +static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring, + u32 value) +{ + WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value); +} + +static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_E_STAT(csr_base_addr, bank); +} + +static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, + u32 ring, u32 value) +{ + WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value); +} + +static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring, + dma_addr_t addr) +{ + WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr); +} + +static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, u32 value) +{ + WRITE_CSR_INT_FLAG(csr_base_addr, bank, value); +} + +static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank) +{ + WRITE_CSR_INT_SRCSEL(csr_base_addr, bank); +} + +static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value); +} + +static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value); +} + +static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value); +} + +static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value); +} + +void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops) +{ + csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr; + csr_ops->read_csr_ring_head = read_csr_ring_head; + csr_ops->write_csr_ring_head = write_csr_ring_head; + csr_ops->read_csr_ring_tail = read_csr_ring_tail; + csr_ops->write_csr_ring_tail = write_csr_ring_tail; + csr_ops->read_csr_e_stat = read_csr_e_stat; + csr_ops->write_csr_ring_config = write_csr_ring_config; + csr_ops->write_csr_ring_base = write_csr_ring_base; + csr_ops->write_csr_int_flag = write_csr_int_flag; + csr_ops->write_csr_int_srcsel = write_csr_int_srcsel; + csr_ops->write_csr_int_col_en = write_csr_int_col_en; + csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl; + csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col; + csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en; +} +EXPORT_SYMBOL_GPL(adf_gen2_init_hw_csr_ops); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.h new file mode 100644 index 000000000000..55058b0f9e52 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_csr_data.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2024 Intel Corporation */ +#ifndef ADF_GEN2_HW_CSR_DATA_H_ +#define ADF_GEN2_HW_CSR_DATA_H_ + +#include +#include "adf_accel_devices.h" + +#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL +#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL +#define ADF_RING_CSR_RING_CONFIG 0x000 +#define ADF_RING_CSR_RING_LBASE 0x040 +#define ADF_RING_CSR_RING_UBASE 0x080 +#define ADF_RING_CSR_RING_HEAD 0x0C0 +#define ADF_RING_CSR_RING_TAIL 0x100 +#define ADF_RING_CSR_E_STAT 0x14C +#define ADF_RING_CSR_INT_FLAG 0x170 +#define ADF_RING_CSR_INT_SRCSEL 0x174 +#define ADF_RING_CSR_INT_SRCSEL_2 0x178 +#define ADF_RING_CSR_INT_COL_EN 0x17C +#define ADF_RING_CSR_INT_COL_CTL 0x180 +#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184 +#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000 +#define ADF_RING_BUNDLE_SIZE 0x1000 +#define ADF_ARB_REG_SLOT 0x1000 +#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C + +#define BUILD_RING_BASE_ADDR(addr, size) \ + (((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) +#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \ + ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_RING_HEAD + ((ring) << 2)) +#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \ + ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_RING_TAIL + ((ring) << 2)) +#define READ_CSR_E_STAT(csr_base_addr, bank) \ + ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_E_STAT) +#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value) +#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ +do { \ + u32 l_base = 0, u_base = 0; \ + l_base = (u32)((value) & 0xFFFFFFFF); \ + u_base = (u32)(((value) & 0xFFFFFFFF00000000ULL) >> 32); \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_RING_LBASE + ((ring) << 2), l_base); \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_RING_UBASE + ((ring) << 2), u_base); \ +} while (0) + +#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_RING_HEAD + ((ring) << 2), value) +#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_RING_TAIL + ((ring) << 2), value) +#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_INT_FLAG, value) +#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ +do { \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \ +} while (0) +#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_INT_COL_EN, value) +#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_INT_COL_CTL, \ + ADF_RING_CSR_INT_COL_CTL_ENABLE | (value)) +#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ + ADF_RING_CSR_INT_FLAG_AND_COL, value) + +#define WRITE_CSR_RING_SRV_ARB_EN(csr_addr, index, value) \ + ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \ + (ADF_ARB_REG_SLOT * (index)), value) + +void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); + +#endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c index d1884547b5a1..1f64bf49b221 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c @@ -111,103 +111,6 @@ void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev) } EXPORT_SYMBOL_GPL(adf_gen2_enable_ints); -static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size) -{ - return BUILD_RING_BASE_ADDR(addr, size); -} - -static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring) -{ - return READ_CSR_RING_HEAD(csr_base_addr, bank, ring); -} - -static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring, - u32 value) -{ - WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value); -} - -static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring) -{ - return READ_CSR_RING_TAIL(csr_base_addr, bank, ring); -} - -static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring, - u32 value) -{ - WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value); -} - -static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank) -{ - return READ_CSR_E_STAT(csr_base_addr, bank); -} - -static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, - u32 ring, u32 value) -{ - WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value); -} - -static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring, - dma_addr_t addr) -{ - WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr); -} - -static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, u32 value) -{ - WRITE_CSR_INT_FLAG(csr_base_addr, bank, value); -} - -static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank) -{ - WRITE_CSR_INT_SRCSEL(csr_base_addr, bank); -} - -static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, - u32 value) -{ - WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value); -} - -static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank, - u32 value) -{ - WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value); -} - -static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank, - u32 value) -{ - WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value); -} - -static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank, - u32 value) -{ - WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value); -} - -void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops) -{ - csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr; - csr_ops->read_csr_ring_head = read_csr_ring_head; - csr_ops->write_csr_ring_head = write_csr_ring_head; - csr_ops->read_csr_ring_tail = read_csr_ring_tail; - csr_ops->write_csr_ring_tail = write_csr_ring_tail; - csr_ops->read_csr_e_stat = read_csr_e_stat; - csr_ops->write_csr_ring_config = write_csr_ring_config; - csr_ops->write_csr_ring_base = write_csr_ring_base; - csr_ops->write_csr_int_flag = write_csr_int_flag; - csr_ops->write_csr_int_srcsel = write_csr_int_srcsel; - csr_ops->write_csr_int_col_en = write_csr_int_col_en; - csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl; - csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col; - csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en; -} -EXPORT_SYMBOL_GPL(adf_gen2_init_hw_csr_ops); - u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h index 6bd341061de4..708e9186127b 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h @@ -6,78 +6,9 @@ #include "adf_accel_devices.h" #include "adf_cfg_common.h" -/* Transport access */ -#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL -#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL -#define ADF_RING_CSR_RING_CONFIG 0x000 -#define ADF_RING_CSR_RING_LBASE 0x040 -#define ADF_RING_CSR_RING_UBASE 0x080 -#define ADF_RING_CSR_RING_HEAD 0x0C0 -#define ADF_RING_CSR_RING_TAIL 0x100 -#define ADF_RING_CSR_E_STAT 0x14C -#define ADF_RING_CSR_INT_FLAG 0x170 -#define ADF_RING_CSR_INT_SRCSEL 0x174 -#define ADF_RING_CSR_INT_SRCSEL_2 0x178 -#define ADF_RING_CSR_INT_COL_EN 0x17C -#define ADF_RING_CSR_INT_COL_CTL 0x180 -#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184 -#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000 -#define ADF_RING_BUNDLE_SIZE 0x1000 #define ADF_GEN2_RX_RINGS_OFFSET 8 #define ADF_GEN2_TX_RINGS_MASK 0xFF -#define BUILD_RING_BASE_ADDR(addr, size) \ - (((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) -#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \ - ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_RING_HEAD + ((ring) << 2)) -#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \ - ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_RING_TAIL + ((ring) << 2)) -#define READ_CSR_E_STAT(csr_base_addr, bank) \ - ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_E_STAT) -#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value) -#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ -do { \ - u32 l_base = 0, u_base = 0; \ - l_base = (u32)((value) & 0xFFFFFFFF); \ - u_base = (u32)(((value) & 0xFFFFFFFF00000000ULL) >> 32); \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_RING_LBASE + ((ring) << 2), l_base); \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_RING_UBASE + ((ring) << 2), u_base); \ -} while (0) - -#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_RING_HEAD + ((ring) << 2), value) -#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_RING_TAIL + ((ring) << 2), value) -#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_INT_FLAG, value) -#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ -do { \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \ -} while (0) -#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_INT_COL_EN, value) -#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_INT_COL_CTL, \ - ADF_RING_CSR_INT_COL_CTL_ENABLE | (value)) -#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * (bank)) + \ - ADF_RING_CSR_INT_FLAG_AND_COL, value) - /* AE to function map */ #define AE2FUNCTION_MAP_A_OFFSET (0x3A400 + 0x190) #define AE2FUNCTION_MAP_B_OFFSET (0x3A400 + 0x310) @@ -106,12 +37,6 @@ do { \ #define ADF_ARB_OFFSET 0x30000 #define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180 #define ADF_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0)) -#define ADF_ARB_REG_SLOT 0x1000 -#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C - -#define WRITE_CSR_RING_SRV_ARB_EN(csr_addr, index, value) \ - ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \ - (ADF_ARB_REG_SLOT * (index)), value) /* Power gating */ #define ADF_POWERGATE_DC BIT(23) @@ -158,7 +83,6 @@ u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self); void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev); void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable, int num_a_regs, int num_b_regs); -void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); void adf_gen2_get_admin_info(struct admin_info *admin_csrs_info); void adf_gen2_get_arb_info(struct arb_info *arb_info); void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.c new file mode 100644 index 000000000000..652ef4598930 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2024 Intel Corporation */ +#include +#include "adf_gen4_hw_csr_data.h" + +static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size) +{ + return BUILD_RING_BASE_ADDR(addr, size); +} + +static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring) +{ + return READ_CSR_RING_HEAD(csr_base_addr, bank, ring); +} + +static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring, + u32 value) +{ + WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value); +} + +static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring) +{ + return READ_CSR_RING_TAIL(csr_base_addr, bank, ring); +} + +static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring, + u32 value) +{ + WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value); +} + +static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_E_STAT(csr_base_addr, bank); +} + +static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, u32 ring, + u32 value) +{ + WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value); +} + +static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring, + dma_addr_t addr) +{ + WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr); +} + +static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_INT_FLAG(csr_base_addr, bank, value); +} + +static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank) +{ + WRITE_CSR_INT_SRCSEL(csr_base_addr, bank); +} + +static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, u32 value) +{ + WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value); +} + +static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value); +} + +static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value); +} + +static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value); +} + +void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops) +{ + csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr; + csr_ops->read_csr_ring_head = read_csr_ring_head; + csr_ops->write_csr_ring_head = write_csr_ring_head; + csr_ops->read_csr_ring_tail = read_csr_ring_tail; + csr_ops->write_csr_ring_tail = write_csr_ring_tail; + csr_ops->read_csr_e_stat = read_csr_e_stat; + csr_ops->write_csr_ring_config = write_csr_ring_config; + csr_ops->write_csr_ring_base = write_csr_ring_base; + csr_ops->write_csr_int_flag = write_csr_int_flag; + csr_ops->write_csr_int_srcsel = write_csr_int_srcsel; + csr_ops->write_csr_int_col_en = write_csr_int_col_en; + csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl; + csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col; + csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en; +} +EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.h new file mode 100644 index 000000000000..08d803432d9f --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2024 Intel Corporation */ +#ifndef ADF_GEN4_HW_CSR_DATA_H_ +#define ADF_GEN4_HW_CSR_DATA_H_ + +#include +#include "adf_accel_devices.h" + +#define ADF_BANK_INT_SRC_SEL_MASK 0x44UL +#define ADF_RING_CSR_RING_CONFIG 0x1000 +#define ADF_RING_CSR_RING_LBASE 0x1040 +#define ADF_RING_CSR_RING_UBASE 0x1080 +#define ADF_RING_CSR_RING_HEAD 0x0C0 +#define ADF_RING_CSR_RING_TAIL 0x100 +#define ADF_RING_CSR_E_STAT 0x14C +#define ADF_RING_CSR_INT_FLAG 0x170 +#define ADF_RING_CSR_INT_SRCSEL 0x174 +#define ADF_RING_CSR_INT_COL_CTL 0x180 +#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184 +#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000 +#define ADF_RING_CSR_INT_COL_EN 0x17C +#define ADF_RING_CSR_ADDR_OFFSET 0x100000 +#define ADF_RING_BUNDLE_SIZE 0x2000 +#define ADF_RING_CSR_RING_SRV_ARB_EN 0x19C + +#define BUILD_RING_BASE_ADDR(addr, size) \ + ((((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) << 6) +#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_HEAD + ((ring) << 2)) +#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_TAIL + ((ring) << 2)) +#define READ_CSR_E_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_E_STAT) +#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value) +#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ +do { \ + void __iomem *_csr_base_addr = csr_base_addr; \ + u32 _bank = bank; \ + u32 _ring = ring; \ + dma_addr_t _value = value; \ + u32 l_base = 0, u_base = 0; \ + l_base = lower_32_bits(_value); \ + u_base = upper_32_bits(_value); \ + ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (_bank) + \ + ADF_RING_CSR_RING_LBASE + ((_ring) << 2), l_base); \ + ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (_bank) + \ + ADF_RING_CSR_RING_UBASE + ((_ring) << 2), u_base); \ +} while (0) + +#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_HEAD + ((ring) << 2), value) +#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_TAIL + ((ring) << 2), value) +#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_FLAG, (value)) +#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK) +#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_COL_EN, (value)) +#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_COL_CTL, \ + ADF_RING_CSR_INT_COL_CTL_ENABLE | (value)) +#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_FLAG_AND_COL, (value)) + +#define WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_SRV_ARB_EN, (value)) + +void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); + +#endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c index b8a6d24f791f..12269e309fbf 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c @@ -8,103 +8,6 @@ #include "adf_gen4_hw_data.h" #include "adf_gen4_pm.h" -static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size) -{ - return BUILD_RING_BASE_ADDR(addr, size); -} - -static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring) -{ - return READ_CSR_RING_HEAD(csr_base_addr, bank, ring); -} - -static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring, - u32 value) -{ - WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value); -} - -static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring) -{ - return READ_CSR_RING_TAIL(csr_base_addr, bank, ring); -} - -static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring, - u32 value) -{ - WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value); -} - -static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank) -{ - return READ_CSR_E_STAT(csr_base_addr, bank); -} - -static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, u32 ring, - u32 value) -{ - WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value); -} - -static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring, - dma_addr_t addr) -{ - WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr); -} - -static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, - u32 value) -{ - WRITE_CSR_INT_FLAG(csr_base_addr, bank, value); -} - -static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank) -{ - WRITE_CSR_INT_SRCSEL(csr_base_addr, bank); -} - -static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, u32 value) -{ - WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value); -} - -static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank, - u32 value) -{ - WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value); -} - -static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank, - u32 value) -{ - WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value); -} - -static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank, - u32 value) -{ - WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value); -} - -void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops) -{ - csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr; - csr_ops->read_csr_ring_head = read_csr_ring_head; - csr_ops->write_csr_ring_head = write_csr_ring_head; - csr_ops->read_csr_ring_tail = read_csr_ring_tail; - csr_ops->write_csr_ring_tail = write_csr_ring_tail; - csr_ops->read_csr_e_stat = read_csr_e_stat; - csr_ops->write_csr_ring_config = write_csr_ring_config; - csr_ops->write_csr_ring_base = write_csr_ring_base; - csr_ops->write_csr_int_flag = write_csr_int_flag; - csr_ops->write_csr_int_srcsel = write_csr_int_srcsel; - csr_ops->write_csr_int_col_en = write_csr_int_col_en; - csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl; - csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col; - csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en; -} -EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops); - u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self) { return ADF_GEN4_ACCELERATORS_MASK; diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h index c153f41162ec..719f7757e587 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ /* Copyright(c) 2020 Intel Corporation */ -#ifndef ADF_GEN4_HW_CSR_DATA_H_ -#define ADF_GEN4_HW_CSR_DATA_H_ +#ifndef ADF_GEN4_HW_DATA_H_ +#define ADF_GEN4_HW_DATA_H_ #include @@ -54,95 +54,6 @@ #define ADF_GEN4_ADMINMSGLR_OFFSET 0x500578 #define ADF_GEN4_MAILBOX_BASE_OFFSET 0x600970 -/* Transport access */ -#define ADF_BANK_INT_SRC_SEL_MASK 0x44UL -#define ADF_RING_CSR_RING_CONFIG 0x1000 -#define ADF_RING_CSR_RING_LBASE 0x1040 -#define ADF_RING_CSR_RING_UBASE 0x1080 -#define ADF_RING_CSR_RING_HEAD 0x0C0 -#define ADF_RING_CSR_RING_TAIL 0x100 -#define ADF_RING_CSR_E_STAT 0x14C -#define ADF_RING_CSR_INT_FLAG 0x170 -#define ADF_RING_CSR_INT_SRCSEL 0x174 -#define ADF_RING_CSR_INT_COL_CTL 0x180 -#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184 -#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000 -#define ADF_RING_CSR_INT_COL_EN 0x17C -#define ADF_RING_CSR_ADDR_OFFSET 0x100000 -#define ADF_RING_BUNDLE_SIZE 0x2000 - -#define BUILD_RING_BASE_ADDR(addr, size) \ - ((((addr) >> 6) & (GENMASK_ULL(63, 0) << (size))) << 6) -#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \ - ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_RING_HEAD + ((ring) << 2)) -#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \ - ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_RING_TAIL + ((ring) << 2)) -#define READ_CSR_E_STAT(csr_base_addr, bank) \ - ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_E_STAT) -#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_RING_CONFIG + ((ring) << 2), value) -#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ -do { \ - void __iomem *_csr_base_addr = csr_base_addr; \ - u32 _bank = bank; \ - u32 _ring = ring; \ - dma_addr_t _value = value; \ - u32 l_base = 0, u_base = 0; \ - l_base = lower_32_bits(_value); \ - u_base = upper_32_bits(_value); \ - ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (_bank) + \ - ADF_RING_CSR_RING_LBASE + ((_ring) << 2), l_base); \ - ADF_CSR_WR((_csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (_bank) + \ - ADF_RING_CSR_RING_UBASE + ((_ring) << 2), u_base); \ -} while (0) - -#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_RING_HEAD + ((ring) << 2), value) -#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_RING_TAIL + ((ring) << 2), value) -#define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_INT_FLAG, (value)) -#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK) -#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_INT_COL_EN, (value)) -#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_INT_COL_CTL, \ - ADF_RING_CSR_INT_COL_CTL_ENABLE | (value)) -#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_INT_FLAG_AND_COL, (value)) - -/* Arbiter configuration */ -#define ADF_RING_CSR_RING_SRV_ARB_EN 0x19C - -#define WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value) \ - ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ - ADF_RING_BUNDLE_SIZE * (bank) + \ - ADF_RING_CSR_RING_SRV_ARB_EN, (value)) - /* Default ring mapping */ #define ADF_GEN4_DEFAULT_RING_TO_SRV_MAP \ (ASYM << ADF_CFG_SERV_RING_PAIR_0_SHIFT | \ @@ -234,7 +145,6 @@ u32 adf_gen4_get_num_aes(struct adf_hw_device_data *self); enum dev_sku_info adf_gen4_get_sku(struct adf_hw_device_data *self); u32 adf_gen4_get_sram_bar_id(struct adf_hw_device_data *self); int adf_gen4_init_device(struct adf_accel_dev *accel_dev); -void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number); void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev); void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c index ac04662ca806..c0661ff5e929 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c +++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include "adf_dh895xcc_hw_data.h" diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c index 70e56cc16ece..f4ee4c2e00da 100644 --- a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c +++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include -- Gitee From d768e92f0d397d3d1d74f0b94d4cf307a8c2bda2 Mon Sep 17 00:00:00 2001 From: Siming Wan Date: Wed, 6 Mar 2024 21:58:50 +0800 Subject: [PATCH 1027/2138] crypto: qat - rename get_sla_arr_of_type() ANBZ: #9185 commit 84058ffb919bf6a6aac24d2baf7fce442d24f390 upstream. Intel-SIG: commit 84058ffb919b crypto: qat - rename get_sla_arr_of_type() Backport to support Intel QAT live migration for in-tree driver The function get_sla_arr_of_type() returns a pointer to an SLA type specific array. Rename it and expose it as it will be used externally to this module. This does not introduce any functional change. Signed-off-by: Siming Wan Reviewed-by: Giovanni Cabiddu Reviewed-by: Damian Muszynski Signed-off-by: Xin Zeng Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/crypto/intel/qat/qat_common/adf_rl.c | 10 +++++----- drivers/crypto/intel/qat/qat_common/adf_rl.h | 2 ++ 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c index d4f2db3c53d8..65f752f4792a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_rl.c +++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c @@ -183,14 +183,14 @@ static enum adf_cfg_service_type srv_to_cfg_svc_type(enum adf_base_services rl_s } /** - * get_sla_arr_of_type() - Returns a pointer to SLA type specific array + * adf_rl_get_sla_arr_of_type() - Returns a pointer to SLA type specific array * @rl_data: pointer to ratelimiting data * @type: SLA type * @sla_arr: pointer to variable where requested pointer will be stored * * Return: Max number of elements allowed for the returned array */ -static u32 get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type, +u32 adf_rl_get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type, struct rl_sla ***sla_arr) { switch (type) { @@ -778,7 +778,7 @@ static void clear_sla(struct adf_rl *rl_data, struct rl_sla *sla) rp_in_use[sla->ring_pairs_ids[i]] = false; update_budget(sla, old_cir, true); - get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr); + adf_rl_get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr); assign_node_to_parent(rl_data->accel_dev, sla, true); adf_rl_send_admin_delete_msg(rl_data->accel_dev, node_id, sla->type); mark_rps_usage(sla, rl_data->rp_in_use, false); @@ -875,7 +875,7 @@ static int add_update_sla(struct adf_accel_dev *accel_dev, if (!is_update) { mark_rps_usage(sla, rl_data->rp_in_use, true); - get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr); + adf_rl_get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr); sla_type_arr[sla->node_id] = sla; rl_data->sla[sla->sla_id] = sla; } @@ -1065,7 +1065,7 @@ void adf_rl_remove_sla_all(struct adf_accel_dev *accel_dev, bool incl_default) /* Unregister and remove all SLAs */ for (j = RL_LEAF; j >= end_type; j--) { - max_id = get_sla_arr_of_type(rl_data, j, &sla_type_arr); + max_id = adf_rl_get_sla_arr_of_type(rl_data, j, &sla_type_arr); for (i = 0; i < max_id; i++) { if (!sla_type_arr[i]) diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.h b/drivers/crypto/intel/qat/qat_common/adf_rl.h index 269c6656fb90..bfe750ea0e83 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_rl.h +++ b/drivers/crypto/intel/qat/qat_common/adf_rl.h @@ -151,6 +151,8 @@ struct rl_sla { u16 ring_pairs_cnt; }; +u32 adf_rl_get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type, + struct rl_sla ***sla_arr); int adf_rl_add_sla(struct adf_accel_dev *accel_dev, struct adf_rl_sla_input_data *sla_in); int adf_rl_update_sla(struct adf_accel_dev *accel_dev, -- Gitee From 00d929c0379b0bd6f9f22b5ebf5562d602ab2518 Mon Sep 17 00:00:00 2001 From: Siming Wan Date: Wed, 6 Mar 2024 21:58:51 +0800 Subject: [PATCH 1028/2138] crypto: qat - expand CSR operations for QAT GEN4 devices ANBZ: #9185 commit 3fa1057e35474c715608635a0bf7452397580bfd upstream. Intel-SIG: commit 3fa1057e3547 crypto: qat - expand CSR operations for QAT GEN4 devices Backport to support Intel QAT live migration for in-tree driver Extend the CSR operations for QAT GEN4 devices to allow saving and restoring the rings state. The new operations will be used as a building block for implementing the state save and restore of Virtual Functions necessary for VM live migration. This adds the following operations: - read ring status register - read ring underflow/overflow status register - read ring nearly empty status register - read ring nearly full status register - read ring full status register - read ring complete status register - read ring exception status register - read/write ring exception interrupt mask register - read ring configuration register - read ring base register - read/write ring interrupt enable register - read ring interrupt flag register - read/write ring interrupt source select register - read ring coalesced interrupt enable register - read ring coalesced interrupt control register - read ring flag and coalesced interrupt enable register - read ring service arbiter enable register - get ring coalesced interrupt control enable mask Signed-off-by: Siming Wan Reviewed-by: Giovanni Cabiddu Signed-off-by: Xin Zeng Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- .../intel/qat/qat_common/adf_accel_devices.h | 27 ++++ .../qat/qat_common/adf_gen4_hw_csr_data.c | 130 ++++++++++++++++++ .../qat/qat_common/adf_gen4_hw_csr_data.h | 93 ++++++++++++- 3 files changed, 249 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 08658c3a01e9..d1f3f5a822ff 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -150,22 +150,49 @@ struct adf_hw_csr_ops { u32 ring); void (*write_csr_ring_tail)(void __iomem *csr_base_addr, u32 bank, u32 ring, u32 value); + u32 (*read_csr_stat)(void __iomem *csr_base_addr, u32 bank); + u32 (*read_csr_uo_stat)(void __iomem *csr_base_addr, u32 bank); u32 (*read_csr_e_stat)(void __iomem *csr_base_addr, u32 bank); + u32 (*read_csr_ne_stat)(void __iomem *csr_base_addr, u32 bank); + u32 (*read_csr_nf_stat)(void __iomem *csr_base_addr, u32 bank); + u32 (*read_csr_f_stat)(void __iomem *csr_base_addr, u32 bank); + u32 (*read_csr_c_stat)(void __iomem *csr_base_addr, u32 bank); + u32 (*read_csr_exp_stat)(void __iomem *csr_base_addr, u32 bank); + u32 (*read_csr_exp_int_en)(void __iomem *csr_base_addr, u32 bank); + void (*write_csr_exp_int_en)(void __iomem *csr_base_addr, u32 bank, + u32 value); + u32 (*read_csr_ring_config)(void __iomem *csr_base_addr, u32 bank, + u32 ring); void (*write_csr_ring_config)(void __iomem *csr_base_addr, u32 bank, u32 ring, u32 value); + dma_addr_t (*read_csr_ring_base)(void __iomem *csr_base_addr, u32 bank, + u32 ring); void (*write_csr_ring_base)(void __iomem *csr_base_addr, u32 bank, u32 ring, dma_addr_t addr); + u32 (*read_csr_int_en)(void __iomem *csr_base_addr, u32 bank); + void (*write_csr_int_en)(void __iomem *csr_base_addr, u32 bank, + u32 value); + u32 (*read_csr_int_flag)(void __iomem *csr_base_addr, u32 bank); void (*write_csr_int_flag)(void __iomem *csr_base_addr, u32 bank, u32 value); + u32 (*read_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank); void (*write_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank); + void (*write_csr_int_srcsel_w_val)(void __iomem *csr_base_addr, + u32 bank, u32 value); + u32 (*read_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank); void (*write_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank, u32 value); + u32 (*read_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank); void (*write_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank, u32 value); + u32 (*read_csr_int_flag_and_col)(void __iomem *csr_base_addr, + u32 bank); void (*write_csr_int_flag_and_col)(void __iomem *csr_base_addr, u32 bank, u32 value); + u32 (*read_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank); void (*write_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank, u32 value); + u32 (*get_int_col_ctl_enable_mask)(void); }; struct adf_cfg_device_data; diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.c index 652ef4598930..6609c248aaba 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.c @@ -30,57 +30,166 @@ static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring, WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value); } +static u32 read_csr_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_STAT(csr_base_addr, bank); +} + +static u32 read_csr_uo_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_UO_STAT(csr_base_addr, bank); +} + static u32 read_csr_e_stat(void __iomem *csr_base_addr, u32 bank) { return READ_CSR_E_STAT(csr_base_addr, bank); } +static u32 read_csr_ne_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_NE_STAT(csr_base_addr, bank); +} + +static u32 read_csr_nf_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_NF_STAT(csr_base_addr, bank); +} + +static u32 read_csr_f_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_F_STAT(csr_base_addr, bank); +} + +static u32 read_csr_c_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_C_STAT(csr_base_addr, bank); +} + +static u32 read_csr_exp_stat(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_EXP_STAT(csr_base_addr, bank); +} + +static u32 read_csr_exp_int_en(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_EXP_INT_EN(csr_base_addr, bank); +} + +static void write_csr_exp_int_en(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_EXP_INT_EN(csr_base_addr, bank, value); +} + +static u32 read_csr_ring_config(void __iomem *csr_base_addr, u32 bank, + u32 ring) +{ + return READ_CSR_RING_CONFIG(csr_base_addr, bank, ring); +} + static void write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, u32 ring, u32 value) { WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value); } +static dma_addr_t read_csr_ring_base(void __iomem *csr_base_addr, u32 bank, + u32 ring) +{ + return READ_CSR_RING_BASE(csr_base_addr, bank, ring); +} + static void write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring, dma_addr_t addr) { WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, addr); } +static u32 read_csr_int_en(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_INT_EN(csr_base_addr, bank); +} + +static void write_csr_int_en(void __iomem *csr_base_addr, u32 bank, u32 value) +{ + WRITE_CSR_INT_EN(csr_base_addr, bank, value); +} + +static u32 read_csr_int_flag(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_INT_FLAG(csr_base_addr, bank); +} + static void write_csr_int_flag(void __iomem *csr_base_addr, u32 bank, u32 value) { WRITE_CSR_INT_FLAG(csr_base_addr, bank, value); } +static u32 read_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_INT_SRCSEL(csr_base_addr, bank); +} + static void write_csr_int_srcsel(void __iomem *csr_base_addr, u32 bank) { WRITE_CSR_INT_SRCSEL(csr_base_addr, bank); } +static void write_csr_int_srcsel_w_val(void __iomem *csr_base_addr, u32 bank, + u32 value) +{ + WRITE_CSR_INT_SRCSEL_W_VAL(csr_base_addr, bank, value); +} + +static u32 read_csr_int_col_en(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_INT_COL_EN(csr_base_addr, bank); +} + static void write_csr_int_col_en(void __iomem *csr_base_addr, u32 bank, u32 value) { WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value); } +static u32 read_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_INT_COL_CTL(csr_base_addr, bank); +} + static void write_csr_int_col_ctl(void __iomem *csr_base_addr, u32 bank, u32 value) { WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value); } +static u32 read_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_INT_FLAG_AND_COL(csr_base_addr, bank); +} + static void write_csr_int_flag_and_col(void __iomem *csr_base_addr, u32 bank, u32 value) { WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value); } +static u32 read_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank) +{ + return READ_CSR_RING_SRV_ARB_EN(csr_base_addr, bank); +} + static void write_csr_ring_srv_arb_en(void __iomem *csr_base_addr, u32 bank, u32 value) { WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value); } +static u32 get_int_col_ctl_enable_mask(void) +{ + return ADF_RING_CSR_INT_COL_CTL_ENABLE; +} + void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops) { csr_ops->build_csr_ring_base_addr = build_csr_ring_base_addr; @@ -88,14 +197,35 @@ void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops) csr_ops->write_csr_ring_head = write_csr_ring_head; csr_ops->read_csr_ring_tail = read_csr_ring_tail; csr_ops->write_csr_ring_tail = write_csr_ring_tail; + csr_ops->read_csr_stat = read_csr_stat; + csr_ops->read_csr_uo_stat = read_csr_uo_stat; csr_ops->read_csr_e_stat = read_csr_e_stat; + csr_ops->read_csr_ne_stat = read_csr_ne_stat; + csr_ops->read_csr_nf_stat = read_csr_nf_stat; + csr_ops->read_csr_f_stat = read_csr_f_stat; + csr_ops->read_csr_c_stat = read_csr_c_stat; + csr_ops->read_csr_exp_stat = read_csr_exp_stat; + csr_ops->read_csr_exp_int_en = read_csr_exp_int_en; + csr_ops->write_csr_exp_int_en = write_csr_exp_int_en; + csr_ops->read_csr_ring_config = read_csr_ring_config; csr_ops->write_csr_ring_config = write_csr_ring_config; + csr_ops->read_csr_ring_base = read_csr_ring_base; csr_ops->write_csr_ring_base = write_csr_ring_base; + csr_ops->read_csr_int_en = read_csr_int_en; + csr_ops->write_csr_int_en = write_csr_int_en; + csr_ops->read_csr_int_flag = read_csr_int_flag; csr_ops->write_csr_int_flag = write_csr_int_flag; + csr_ops->read_csr_int_srcsel = read_csr_int_srcsel; csr_ops->write_csr_int_srcsel = write_csr_int_srcsel; + csr_ops->write_csr_int_srcsel_w_val = write_csr_int_srcsel_w_val; + csr_ops->read_csr_int_col_en = read_csr_int_col_en; csr_ops->write_csr_int_col_en = write_csr_int_col_en; + csr_ops->read_csr_int_col_ctl = read_csr_int_col_ctl; csr_ops->write_csr_int_col_ctl = write_csr_int_col_ctl; + csr_ops->read_csr_int_flag_and_col = read_csr_int_flag_and_col; csr_ops->write_csr_int_flag_and_col = write_csr_int_flag_and_col; + csr_ops->read_csr_ring_srv_arb_en = read_csr_ring_srv_arb_en; csr_ops->write_csr_ring_srv_arb_en = write_csr_ring_srv_arb_en; + csr_ops->get_int_col_ctl_enable_mask = get_int_col_ctl_enable_mask; } EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.h index 08d803432d9f..6f33e7c87c2c 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_csr_data.h @@ -12,13 +12,22 @@ #define ADF_RING_CSR_RING_UBASE 0x1080 #define ADF_RING_CSR_RING_HEAD 0x0C0 #define ADF_RING_CSR_RING_TAIL 0x100 +#define ADF_RING_CSR_STAT 0x140 +#define ADF_RING_CSR_UO_STAT 0x148 #define ADF_RING_CSR_E_STAT 0x14C +#define ADF_RING_CSR_NE_STAT 0x150 +#define ADF_RING_CSR_NF_STAT 0x154 +#define ADF_RING_CSR_F_STAT 0x158 +#define ADF_RING_CSR_C_STAT 0x15C +#define ADF_RING_CSR_INT_FLAG_EN 0x16C #define ADF_RING_CSR_INT_FLAG 0x170 #define ADF_RING_CSR_INT_SRCSEL 0x174 +#define ADF_RING_CSR_INT_COL_EN 0x17C #define ADF_RING_CSR_INT_COL_CTL 0x180 #define ADF_RING_CSR_INT_FLAG_AND_COL 0x184 +#define ADF_RING_CSR_EXP_STAT 0x188 +#define ADF_RING_CSR_EXP_INT_EN 0x18C #define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000 -#define ADF_RING_CSR_INT_COL_EN 0x17C #define ADF_RING_CSR_ADDR_OFFSET 0x100000 #define ADF_RING_BUNDLE_SIZE 0x2000 #define ADF_RING_CSR_RING_SRV_ARB_EN 0x19C @@ -33,9 +42,41 @@ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ ADF_RING_BUNDLE_SIZE * (bank) + \ ADF_RING_CSR_RING_TAIL + ((ring) << 2)) +#define READ_CSR_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_STAT) +#define READ_CSR_UO_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_UO_STAT) #define READ_CSR_E_STAT(csr_base_addr, bank) \ ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_E_STAT) +#define READ_CSR_NE_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_NE_STAT) +#define READ_CSR_NF_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_NF_STAT) +#define READ_CSR_F_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_F_STAT) +#define READ_CSR_C_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_C_STAT) +#define READ_CSR_EXP_STAT(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_EXP_STAT) +#define READ_CSR_EXP_INT_EN(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_EXP_INT_EN) +#define WRITE_CSR_EXP_INT_EN(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_EXP_INT_EN, value) +#define READ_CSR_RING_CONFIG(csr_base_addr, bank, ring) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_CONFIG + ((ring) << 2)) #define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ ADF_RING_BUNDLE_SIZE * (bank) + \ @@ -57,6 +98,25 @@ do { \ ADF_RING_CSR_RING_UBASE + ((_ring) << 2), u_base); \ } while (0) +static inline u64 read_base(void __iomem *csr_base_addr, u32 bank, u32 ring) +{ + u32 l_base, u_base; + + /* + * Use special IO wrapper for ring base as LBASE and UBASE are + * not physically contigious + */ + l_base = ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + + ADF_RING_CSR_RING_LBASE + (ring << 2)); + u_base = ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + + ADF_RING_CSR_RING_UBASE + (ring << 2)); + + return (u64)u_base << 32 | (u64)l_base; +} + +#define READ_CSR_RING_BASE(csr_base_addr, bank, ring) \ + read_base((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, (bank), (ring)) + #define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ ADF_RING_BUNDLE_SIZE * (bank) + \ @@ -65,28 +125,59 @@ do { \ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ ADF_RING_BUNDLE_SIZE * (bank) + \ ADF_RING_CSR_RING_TAIL + ((ring) << 2), value) +#define READ_CSR_INT_EN(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_FLAG_EN) +#define WRITE_CSR_INT_EN(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_FLAG_EN, (value)) +#define READ_CSR_INT_FLAG(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_FLAG) #define WRITE_CSR_INT_FLAG(csr_base_addr, bank, value) \ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ ADF_RING_BUNDLE_SIZE * (bank) + \ ADF_RING_CSR_INT_FLAG, (value)) +#define READ_CSR_INT_SRCSEL(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_SRCSEL) #define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ ADF_RING_BUNDLE_SIZE * (bank) + \ ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK) +#define WRITE_CSR_INT_SRCSEL_W_VAL(csr_base_addr, bank, value) \ + ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_SRCSEL, (value)) +#define READ_CSR_INT_COL_EN(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_COL_EN) #define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ ADF_RING_BUNDLE_SIZE * (bank) + \ ADF_RING_CSR_INT_COL_EN, (value)) +#define READ_CSR_INT_COL_CTL(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + ADF_RING_CSR_INT_COL_CTL) #define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ ADF_RING_BUNDLE_SIZE * (bank) + \ ADF_RING_CSR_INT_COL_CTL, \ ADF_RING_CSR_INT_COL_CTL_ENABLE | (value)) +#define READ_CSR_INT_FLAG_AND_COL(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_INT_FLAG_AND_COL) #define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ ADF_RING_BUNDLE_SIZE * (bank) + \ ADF_RING_CSR_INT_FLAG_AND_COL, (value)) +#define READ_CSR_RING_SRV_ARB_EN(csr_base_addr, bank) \ + ADF_CSR_RD((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ + ADF_RING_BUNDLE_SIZE * (bank) + \ + ADF_RING_CSR_RING_SRV_ARB_EN) #define WRITE_CSR_RING_SRV_ARB_EN(csr_base_addr, bank, value) \ ADF_CSR_WR((csr_base_addr) + ADF_RING_CSR_ADDR_OFFSET, \ ADF_RING_BUNDLE_SIZE * (bank) + \ -- Gitee From c6790583c43bfd8380e13211a39f8d7116f0352a Mon Sep 17 00:00:00 2001 From: Siming Wan Date: Wed, 6 Mar 2024 21:58:52 +0800 Subject: [PATCH 1029/2138] crypto: qat - add bank save and restore flows ANBZ: #9185 commit bbfdde7d195ffc9c10598055c449b24c50a0cd25 upstream. Intel-SIG: commit bbfdde7d195f crypto: qat - add bank save and restore flows Backport to support Intel QAT live migration for in-tree driver Add logic to save, restore, quiesce and drain a ring bank for QAT GEN4 devices. This allows to save and restore the state of a Virtual Function (VF) and will be used to implement VM live migration. Signed-off-by: Siming Wan Reviewed-by: Giovanni Cabiddu Signed-off-by: Xin Zeng Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- .../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 2 + .../intel/qat/qat_common/adf_accel_devices.h | 38 +++ .../intel/qat/qat_common/adf_gen4_hw_data.c | 279 ++++++++++++++++++ .../intel/qat/qat_common/adf_gen4_hw_data.h | 19 ++ 4 files changed, 338 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index 62595abb14ec..fe70735acda5 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -455,6 +455,8 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->get_ring_to_svc_map = adf_gen4_get_ring_to_svc_map; hw_data->disable_iov = adf_disable_sriov; hw_data->ring_pair_reset = adf_gen4_ring_pair_reset; + hw_data->bank_state_save = adf_gen4_bank_state_save; + hw_data->bank_state_restore = adf_gen4_bank_state_restore; hw_data->enable_pm = adf_gen4_enable_pm; hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt; hw_data->dev_config = adf_gen4_dev_config; diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index d1f3f5a822ff..986e63ec702d 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -140,6 +140,40 @@ struct admin_info { u32 mailbox_offset; }; +struct ring_config { + u64 base; + u32 config; + u32 head; + u32 tail; + u32 reserved0; +}; + +struct bank_state { + u32 ringstat0; + u32 ringstat1; + u32 ringuostat; + u32 ringestat; + u32 ringnestat; + u32 ringnfstat; + u32 ringfstat; + u32 ringcstat0; + u32 ringcstat1; + u32 ringcstat2; + u32 ringcstat3; + u32 iaintflagen; + u32 iaintflagreg; + u32 iaintflagsrcsel0; + u32 iaintflagsrcsel1; + u32 iaintcolen; + u32 iaintcolctl; + u32 iaintflagandcolen; + u32 ringexpstat; + u32 ringexpintenable; + u32 ringsrvarben; + u32 reserved0; + struct ring_config rings[ADF_ETR_MAX_RINGS_PER_BANK]; +}; + struct adf_hw_csr_ops { u64 (*build_csr_ring_base_addr)(dma_addr_t addr, u32 size); u32 (*read_csr_ring_head)(void __iomem *csr_base_addr, u32 bank, @@ -271,6 +305,10 @@ struct adf_hw_device_data { void (*enable_ints)(struct adf_accel_dev *accel_dev); void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev); int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, u32 bank_nr); + int (*bank_state_save)(struct adf_accel_dev *accel_dev, u32 bank_number, + struct bank_state *state); + int (*bank_state_restore)(struct adf_accel_dev *accel_dev, + u32 bank_number, struct bank_state *state); void (*reset_device)(struct adf_accel_dev *accel_dev); void (*set_msix_rttable)(struct adf_accel_dev *accel_dev); const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c index 12269e309fbf..41a0979e68c1 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2020 Intel Corporation */ #include +#include #include "adf_accel_devices.h" #include "adf_cfg_services.h" #include "adf_common_drv.h" @@ -390,3 +391,281 @@ u16 adf_gen4_get_ring_to_svc_map(struct adf_accel_dev *accel_dev) return ring_to_svc_map; } EXPORT_SYMBOL_GPL(adf_gen4_get_ring_to_svc_map); + +/* + * adf_gen4_bank_quiesce_coal_timer() - quiesce bank coalesced interrupt timer + * @accel_dev: Pointer to the device structure + * @bank_idx: Offset to the bank within this device + * @timeout_ms: Timeout in milliseconds for the operation + * + * This function tries to quiesce the coalesced interrupt timer of a bank if + * it has been enabled and triggered. + * + * Returns 0 on success, error code otherwise + * + */ +int adf_gen4_bank_quiesce_coal_timer(struct adf_accel_dev *accel_dev, + u32 bank_idx, int timeout_ms) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); + void __iomem *csr_misc = adf_get_pmisc_base(accel_dev); + void __iomem *csr_etr = adf_get_etr_base(accel_dev); + u32 int_col_ctl, int_col_mask, int_col_en; + u32 e_stat, intsrc; + u64 wait_us; + int ret; + + if (timeout_ms < 0) + return -EINVAL; + + int_col_ctl = csr_ops->read_csr_int_col_ctl(csr_etr, bank_idx); + int_col_mask = csr_ops->get_int_col_ctl_enable_mask(); + if (!(int_col_ctl & int_col_mask)) + return 0; + + int_col_en = csr_ops->read_csr_int_col_en(csr_etr, bank_idx); + int_col_en &= BIT(ADF_WQM_CSR_RP_IDX_RX); + + e_stat = csr_ops->read_csr_e_stat(csr_etr, bank_idx); + if (!(~e_stat & int_col_en)) + return 0; + + wait_us = 2 * ((int_col_ctl & ~int_col_mask) << 8) * USEC_PER_SEC; + do_div(wait_us, hw_data->clock_frequency); + wait_us = min(wait_us, (u64)timeout_ms * USEC_PER_MSEC); + dev_dbg(&GET_DEV(accel_dev), + "wait for bank %d - coalesced timer expires in %llu us (max=%u ms estat=0x%x intcolen=0x%x)\n", + bank_idx, wait_us, timeout_ms, e_stat, int_col_en); + + ret = read_poll_timeout(ADF_CSR_RD, intsrc, intsrc, + ADF_COALESCED_POLL_DELAY_US, wait_us, true, + csr_misc, ADF_WQM_CSR_RPINTSOU(bank_idx)); + if (ret) + dev_warn(&GET_DEV(accel_dev), + "coalesced timer for bank %d expired (%llu us)\n", + bank_idx, wait_us); + + return ret; +} +EXPORT_SYMBOL_GPL(adf_gen4_bank_quiesce_coal_timer); + +static int drain_bank(void __iomem *csr, u32 bank_number, int timeout_us) +{ + u32 status; + + ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number), + ADF_WQM_CSR_RPRESETCTL_DRAIN); + + return read_poll_timeout(ADF_CSR_RD, status, + status & ADF_WQM_CSR_RPRESETSTS_STATUS, + ADF_RPRESET_POLL_DELAY_US, timeout_us, true, + csr, ADF_WQM_CSR_RPRESETSTS(bank_number)); +} + +void adf_gen4_bank_drain_finish(struct adf_accel_dev *accel_dev, + u32 bank_number) +{ + void __iomem *csr = adf_get_etr_base(accel_dev); + + ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number), + ADF_WQM_CSR_RPRESETSTS_STATUS); +} + +int adf_gen4_bank_drain_start(struct adf_accel_dev *accel_dev, + u32 bank_number, int timeout_us) +{ + void __iomem *csr = adf_get_etr_base(accel_dev); + int ret; + + dev_dbg(&GET_DEV(accel_dev), "Drain bank %d\n", bank_number); + + ret = drain_bank(csr, bank_number, timeout_us); + if (ret) + dev_err(&GET_DEV(accel_dev), "Bank drain failed (timeout)\n"); + else + dev_dbg(&GET_DEV(accel_dev), "Bank drain successful\n"); + + return ret; +} + +static void bank_state_save(struct adf_hw_csr_ops *ops, void __iomem *base, + u32 bank, struct bank_state *state, u32 num_rings) +{ + u32 i; + + state->ringstat0 = ops->read_csr_stat(base, bank); + state->ringuostat = ops->read_csr_uo_stat(base, bank); + state->ringestat = ops->read_csr_e_stat(base, bank); + state->ringnestat = ops->read_csr_ne_stat(base, bank); + state->ringnfstat = ops->read_csr_nf_stat(base, bank); + state->ringfstat = ops->read_csr_f_stat(base, bank); + state->ringcstat0 = ops->read_csr_c_stat(base, bank); + state->iaintflagen = ops->read_csr_int_en(base, bank); + state->iaintflagreg = ops->read_csr_int_flag(base, bank); + state->iaintflagsrcsel0 = ops->read_csr_int_srcsel(base, bank); + state->iaintcolen = ops->read_csr_int_col_en(base, bank); + state->iaintcolctl = ops->read_csr_int_col_ctl(base, bank); + state->iaintflagandcolen = ops->read_csr_int_flag_and_col(base, bank); + state->ringexpstat = ops->read_csr_exp_stat(base, bank); + state->ringexpintenable = ops->read_csr_exp_int_en(base, bank); + state->ringsrvarben = ops->read_csr_ring_srv_arb_en(base, bank); + + for (i = 0; i < num_rings; i++) { + state->rings[i].head = ops->read_csr_ring_head(base, bank, i); + state->rings[i].tail = ops->read_csr_ring_tail(base, bank, i); + state->rings[i].config = ops->read_csr_ring_config(base, bank, i); + state->rings[i].base = ops->read_csr_ring_base(base, bank, i); + } +} + +#define CHECK_STAT(op, expect_val, name, args...) \ +({ \ + u32 __expect_val = (expect_val); \ + u32 actual_val = op(args); \ + (__expect_val == actual_val) ? 0 : \ + (pr_err("QAT: Fail to restore %s register. Expected 0x%x, actual 0x%x\n", \ + name, __expect_val, actual_val), -EINVAL); \ +}) + +static int bank_state_restore(struct adf_hw_csr_ops *ops, void __iomem *base, + u32 bank, struct bank_state *state, u32 num_rings, + int tx_rx_gap) +{ + u32 val, tmp_val, i; + int ret; + + for (i = 0; i < num_rings; i++) + ops->write_csr_ring_base(base, bank, i, state->rings[i].base); + + for (i = 0; i < num_rings; i++) + ops->write_csr_ring_config(base, bank, i, state->rings[i].config); + + for (i = 0; i < num_rings / 2; i++) { + int tx = i * (tx_rx_gap + 1); + int rx = tx + tx_rx_gap; + + ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head); + ops->write_csr_ring_tail(base, bank, tx, state->rings[tx].tail); + + /* + * The TX ring head needs to be updated again to make sure that + * the HW will not consider the ring as full when it is empty + * and the correct state flags are set to match the recovered state. + */ + if (state->ringestat & BIT(tx)) { + val = ops->read_csr_int_srcsel(base, bank); + val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK; + ops->write_csr_int_srcsel_w_val(base, bank, val); + ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head); + } + + ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail); + val = ops->read_csr_int_srcsel(base, bank); + val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH; + ops->write_csr_int_srcsel_w_val(base, bank, val); + + ops->write_csr_ring_head(base, bank, rx, state->rings[rx].head); + val = ops->read_csr_int_srcsel(base, bank); + val |= ADF_RP_INT_SRC_SEL_F_FALL_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH; + ops->write_csr_int_srcsel_w_val(base, bank, val); + + /* + * The RX ring tail needs to be updated again to make sure that + * the HW will not consider the ring as empty when it is full + * and the correct state flags are set to match the recovered state. + */ + if (state->ringfstat & BIT(rx)) + ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail); + } + + ops->write_csr_int_flag_and_col(base, bank, state->iaintflagandcolen); + ops->write_csr_int_en(base, bank, state->iaintflagen); + ops->write_csr_int_col_en(base, bank, state->iaintcolen); + ops->write_csr_int_srcsel_w_val(base, bank, state->iaintflagsrcsel0); + ops->write_csr_exp_int_en(base, bank, state->ringexpintenable); + ops->write_csr_int_col_ctl(base, bank, state->iaintcolctl); + ops->write_csr_ring_srv_arb_en(base, bank, state->ringsrvarben); + + /* Check that all ring statuses match the saved state. */ + ret = CHECK_STAT(ops->read_csr_stat, state->ringstat0, "ringstat", + base, bank); + if (ret) + return ret; + + ret = CHECK_STAT(ops->read_csr_e_stat, state->ringestat, "ringestat", + base, bank); + if (ret) + return ret; + + ret = CHECK_STAT(ops->read_csr_ne_stat, state->ringnestat, "ringnestat", + base, bank); + if (ret) + return ret; + + ret = CHECK_STAT(ops->read_csr_nf_stat, state->ringnfstat, "ringnfstat", + base, bank); + if (ret) + return ret; + + ret = CHECK_STAT(ops->read_csr_f_stat, state->ringfstat, "ringfstat", + base, bank); + if (ret) + return ret; + + ret = CHECK_STAT(ops->read_csr_c_stat, state->ringcstat0, "ringcstat", + base, bank); + if (ret) + return ret; + + tmp_val = ops->read_csr_exp_stat(base, bank); + val = state->ringexpstat; + if (tmp_val && !val) { + pr_err("QAT: Bank was restored with exception: 0x%x\n", val); + return -EINVAL; + } + + return 0; +} + +int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number, + struct bank_state *state) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); + void __iomem *csr_base = adf_get_etr_base(accel_dev); + + if (bank_number >= hw_data->num_banks || !state) + return -EINVAL; + + dev_dbg(&GET_DEV(accel_dev), "Saving state of bank %d\n", bank_number); + + bank_state_save(csr_ops, csr_base, bank_number, state, + hw_data->num_rings_per_bank); + + return 0; +} +EXPORT_SYMBOL_GPL(adf_gen4_bank_state_save); + +int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev, u32 bank_number, + struct bank_state *state) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); + void __iomem *csr_base = adf_get_etr_base(accel_dev); + int ret; + + if (bank_number >= hw_data->num_banks || !state) + return -EINVAL; + + dev_dbg(&GET_DEV(accel_dev), "Restoring state of bank %d\n", bank_number); + + ret = bank_state_restore(csr_ops, csr_base, bank_number, state, + hw_data->num_rings_per_bank, hw_data->tx_rx_gap); + if (ret) + dev_err(&GET_DEV(accel_dev), + "Unable to restore state of bank %d\n", bank_number); + + return ret; +} +EXPORT_SYMBOL_GPL(adf_gen4_bank_state_restore); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h index 719f7757e587..e8cb930e80c9 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h @@ -77,10 +77,19 @@ #define ADF_RPRESET_POLL_TIMEOUT_US (5 * USEC_PER_SEC) #define ADF_RPRESET_POLL_DELAY_US 20 #define ADF_WQM_CSR_RPRESETCTL_RESET BIT(0) +#define ADF_WQM_CSR_RPRESETCTL_DRAIN BIT(2) #define ADF_WQM_CSR_RPRESETCTL(bank) (0x6000 + ((bank) << 3)) #define ADF_WQM_CSR_RPRESETSTS_STATUS BIT(0) #define ADF_WQM_CSR_RPRESETSTS(bank) (ADF_WQM_CSR_RPRESETCTL(bank) + 4) +/* Ring interrupt */ +#define ADF_RP_INT_SRC_SEL_F_RISE_MASK BIT(2) +#define ADF_RP_INT_SRC_SEL_F_FALL_MASK GENMASK(2, 0) +#define ADF_RP_INT_SRC_SEL_RANGE_WIDTH 4 +#define ADF_COALESCED_POLL_DELAY_US 1000 +#define ADF_WQM_CSR_RPINTSOU(bank) (0x200000 + ((bank) << 12)) +#define ADF_WQM_CSR_RP_IDX_RX 1 + /* Error source registers */ #define ADF_GEN4_ERRSOU0 (0x41A200) #define ADF_GEN4_ERRSOU1 (0x41A204) @@ -150,5 +159,15 @@ void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev); void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev); u16 adf_gen4_get_ring_to_svc_map(struct adf_accel_dev *accel_dev); +int adf_gen4_bank_quiesce_coal_timer(struct adf_accel_dev *accel_dev, + u32 bank_idx, int timeout_ms); +int adf_gen4_bank_drain_start(struct adf_accel_dev *accel_dev, + u32 bank_number, int timeout_us); +void adf_gen4_bank_drain_finish(struct adf_accel_dev *accel_dev, + u32 bank_number); +int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number, + struct bank_state *state); +int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev, + u32 bank_number, struct bank_state *state); #endif -- Gitee From 8b323ea736702c2c1027ed096ddd22b17113c3bb Mon Sep 17 00:00:00 2001 From: Xin Zeng Date: Wed, 6 Mar 2024 21:58:53 +0800 Subject: [PATCH 1030/2138] crypto: qat - add interface for live migration ANBZ: #9185 commit 0fce55e5334d380d8a09f80ba9c9b68eeea6971d upstream. Intel-SIG: commit 0fce55e5334d crypto: qat - add interface for live migration Backport to support Intel QAT live migration for in-tree driver Extend the driver with a new interface to be used for VF live migration. This allows to create and destroy a qat_mig_dev object that contains a set of methods to allow to save and restore the state of QAT VF. This interface will be used by the qat-vfio-pci module. Signed-off-by: Xin Zeng Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/crypto/intel/qat/qat_common/Makefile | 2 +- .../intel/qat/qat_common/adf_accel_devices.h | 17 +++ .../intel/qat/qat_common/adf_gen4_vf_mig.h | 10 ++ .../crypto/intel/qat/qat_common/qat_mig_dev.c | 130 ++++++++++++++++++ include/linux/qat/qat_mig_dev.h | 31 +++++ 5 files changed, 189 insertions(+), 1 deletion(-) create mode 100644 drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.h create mode 100644 drivers/crypto/intel/qat/qat_common/qat_mig_dev.c create mode 100644 include/linux/qat/qat_mig_dev.h diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index ceaa685352ed..9fba31d4ac7f 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -54,6 +54,6 @@ intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \ intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \ adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \ adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \ - adf_gen2_pfvf.o adf_gen4_pfvf.o + adf_gen2_pfvf.o adf_gen4_pfvf.o qat_mig_dev.o intel_qat-$(CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION) += adf_heartbeat_inject.o diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 986e63ec702d..b08fea10121e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -9,6 +9,7 @@ #include #include #include +#include #include "adf_cfg_common.h" #include "adf_rl.h" #include "adf_telemetry.h" @@ -258,6 +259,20 @@ struct adf_dc_ops { void (*build_deflate_ctx)(void *ctx); }; +struct qat_migdev_ops { + int (*init)(struct qat_mig_dev *mdev); + void (*cleanup)(struct qat_mig_dev *mdev); + void (*reset)(struct qat_mig_dev *mdev); + int (*open)(struct qat_mig_dev *mdev); + void (*close)(struct qat_mig_dev *mdev); + int (*suspend)(struct qat_mig_dev *mdev); + int (*resume)(struct qat_mig_dev *mdev); + int (*save_state)(struct qat_mig_dev *mdev); + int (*save_setup)(struct qat_mig_dev *mdev); + int (*load_state)(struct qat_mig_dev *mdev); + int (*load_setup)(struct qat_mig_dev *mdev, int size); +}; + struct adf_dev_err_mask { u32 cppagentcmdpar_mask; u32 parerr_ath_cph_mask; @@ -325,6 +340,7 @@ struct adf_hw_device_data { struct adf_dev_err_mask dev_err_mask; struct adf_rl_hw_data rl_data; struct adf_tl_hw_data tl_data; + struct qat_migdev_ops vfmig_ops; const char *fw_name; const char *fw_mmp_name; u32 fuses; @@ -381,6 +397,7 @@ struct adf_hw_device_data { #define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops) #define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops) #define GET_DC_OPS(accel_dev) (&(accel_dev)->hw_device->dc_ops) +#define GET_VFMIG_OPS(accel_dev) (&(accel_dev)->hw_device->vfmig_ops) #define GET_TL_DATA(accel_dev) GET_HW_DATA(accel_dev)->tl_data #define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.h new file mode 100644 index 000000000000..72216d078ee1 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2024 Intel Corporation */ +#ifndef ADF_GEN4_VF_MIG_H_ +#define ADF_GEN4_VF_MIG_H_ + +#include "adf_accel_devices.h" + +void adf_gen4_init_vf_mig_ops(struct qat_migdev_ops *vfmig_ops); + +#endif diff --git a/drivers/crypto/intel/qat/qat_common/qat_mig_dev.c b/drivers/crypto/intel/qat/qat_common/qat_mig_dev.c new file mode 100644 index 000000000000..892c2283a50e --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/qat_mig_dev.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2024 Intel Corporation */ +#include +#include +#include +#include +#include +#include "adf_accel_devices.h" +#include "adf_common_drv.h" + +struct qat_mig_dev *qat_vfmig_create(struct pci_dev *pdev, int vf_id) +{ + struct adf_accel_dev *accel_dev; + struct qat_migdev_ops *ops; + struct qat_mig_dev *mdev; + + accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + if (!accel_dev) + return ERR_PTR(-ENODEV); + + ops = GET_VFMIG_OPS(accel_dev); + if (!ops || !ops->init || !ops->cleanup || !ops->reset || !ops->open || + !ops->close || !ops->suspend || !ops->resume || !ops->save_state || + !ops->load_state || !ops->save_setup || !ops->load_setup) + return ERR_PTR(-EINVAL); + + mdev = kmalloc(sizeof(*mdev), GFP_KERNEL); + if (!mdev) + return ERR_PTR(-ENOMEM); + + mdev->vf_id = vf_id; + mdev->parent_accel_dev = accel_dev; + + return mdev; +} +EXPORT_SYMBOL_GPL(qat_vfmig_create); + +int qat_vfmig_init(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->init(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_init); + +void qat_vfmig_cleanup(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->cleanup(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_cleanup); + +void qat_vfmig_reset(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->reset(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_reset); + +int qat_vfmig_open(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->open(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_open); + +void qat_vfmig_close(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + GET_VFMIG_OPS(accel_dev)->close(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_close); + +int qat_vfmig_suspend(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->suspend(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_suspend); + +int qat_vfmig_resume(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->resume(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_resume); + +int qat_vfmig_save_state(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->save_state(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_save_state); + +int qat_vfmig_save_setup(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->save_setup(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_save_setup); + +int qat_vfmig_load_state(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->load_state(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_load_state); + +int qat_vfmig_load_setup(struct qat_mig_dev *mdev, int size) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->load_setup(mdev, size); +} +EXPORT_SYMBOL_GPL(qat_vfmig_load_setup); + +void qat_vfmig_destroy(struct qat_mig_dev *mdev) +{ + kfree(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_destroy); diff --git a/include/linux/qat/qat_mig_dev.h b/include/linux/qat/qat_mig_dev.h new file mode 100644 index 000000000000..dbbb6a063dd2 --- /dev/null +++ b/include/linux/qat/qat_mig_dev.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2024 Intel Corporation */ +#ifndef QAT_MIG_DEV_H_ +#define QAT_MIG_DEV_H_ + +struct pci_dev; + +struct qat_mig_dev { + void *parent_accel_dev; + u8 *state; + u32 setup_size; + u32 remote_setup_size; + u32 state_size; + s32 vf_id; +}; + +struct qat_mig_dev *qat_vfmig_create(struct pci_dev *pdev, int vf_id); +int qat_vfmig_init(struct qat_mig_dev *mdev); +void qat_vfmig_cleanup(struct qat_mig_dev *mdev); +void qat_vfmig_reset(struct qat_mig_dev *mdev); +int qat_vfmig_open(struct qat_mig_dev *mdev); +void qat_vfmig_close(struct qat_mig_dev *mdev); +int qat_vfmig_suspend(struct qat_mig_dev *mdev); +int qat_vfmig_resume(struct qat_mig_dev *mdev); +int qat_vfmig_save_state(struct qat_mig_dev *mdev); +int qat_vfmig_save_setup(struct qat_mig_dev *mdev); +int qat_vfmig_load_state(struct qat_mig_dev *mdev); +int qat_vfmig_load_setup(struct qat_mig_dev *mdev, int size); +void qat_vfmig_destroy(struct qat_mig_dev *mdev); + +#endif /*QAT_MIG_DEV_H_*/ -- Gitee From 7617c59e171a3d51862d4248954a93fe78630d8f Mon Sep 17 00:00:00 2001 From: Xin Zeng Date: Wed, 6 Mar 2024 21:58:54 +0800 Subject: [PATCH 1031/2138] crypto: qat - implement interface for live migration ANBZ: #9185 commit f0bbfc391aa7eaa796f09ee40dd1cd78c6c81960 upstream. Intel-SIG: commit f0bbfc391aa7 crypto: qat - implement interface for live migration Backport to support Intel QAT live migration for in-tree driver Add logic to implement the interface for live migration defined in qat/qat_mig_dev.h. This is specific for QAT GEN4 Virtual Functions (VFs). This introduces a migration data manager which is used to handle the device state during migration. The manager ensures that the device state is stored in a format that can be restored in the destination node. The VF state is organized into a hierarchical structure that includes a preamble, a general state section, a MISC bar section and an ETR bar section. The latter contains the state of the 4 ring pairs contained on a VF. Here is a graphical representation of the state: preamble | general state section | leaf state | MISC bar state section| leaf state | ETR bar state section | bank0 state section | leaf state | bank1 state section | leaf state | bank2 state section | leaf state | bank3 state section | leaf state In addition to the implementation of the qat_migdev_ops interface and the state manager framework, add a mutex in pfvf to avoid pf2vf messages during migration. Signed-off-by: Xin Zeng Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- .../intel/qat/qat_420xx/adf_420xx_hw_data.c | 2 + .../intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 2 + drivers/crypto/intel/qat/qat_common/Makefile | 2 + .../intel/qat/qat_common/adf_accel_devices.h | 6 + .../intel/qat/qat_common/adf_gen4_hw_data.h | 10 + .../intel/qat/qat_common/adf_gen4_vf_mig.c | 1010 +++++++++++++++++ .../intel/qat/qat_common/adf_mstate_mgr.c | 318 ++++++ .../intel/qat/qat_common/adf_mstate_mgr.h | 89 ++ .../crypto/intel/qat/qat_common/adf_sriov.c | 7 +- 9 files changed, 1445 insertions(+), 1 deletion(-) create mode 100644 drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.c create mode 100644 drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.h diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c index 9ccbf5998d5c..d255cb3ebd9c 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "adf_420xx_hw_data.h" #include "icp_qat_hw.h" @@ -488,6 +489,7 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id) adf_gen4_init_dc_ops(&hw_data->dc_ops); adf_gen4_init_ras_ops(&hw_data->ras_ops); adf_gen4_init_tl_data(&hw_data->tl_data); + adf_gen4_init_vf_mig_ops(&hw_data->vfmig_ops); adf_init_rl_data(&hw_data->rl_data); } diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index fe70735acda5..7d48985c99d9 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -17,6 +17,7 @@ #include "adf_gen4_ras.h" #include #include +#include #include "adf_4xxx_hw_data.h" #include "icp_qat_hw.h" @@ -472,6 +473,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) adf_gen4_init_dc_ops(&hw_data->dc_ops); adf_gen4_init_ras_ops(&hw_data->ras_ops); adf_gen4_init_tl_data(&hw_data->tl_data); + adf_gen4_init_vf_mig_ops(&hw_data->vfmig_ops); adf_init_rl_data(&hw_data->rl_data); } diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index 9fba31d4ac7f..6f9266edc9f1 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -20,12 +20,14 @@ intel_qat-objs := adf_cfg.o \ adf_gen4_config.o \ adf_gen4_hw_csr_data.o \ adf_gen4_hw_data.o \ + adf_gen4_vf_mig.o \ adf_gen4_pm.o \ adf_gen2_dc.o \ adf_gen4_dc.o \ adf_gen4_ras.o \ adf_gen4_timer.o \ adf_clock.o \ + adf_mstate_mgr.o \ qat_crypto.o \ qat_compression.o \ qat_comp_algs.o \ diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index b08fea10121e..7830ecb1a1f1 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -412,11 +412,17 @@ struct adf_fw_loader_data { struct adf_accel_vf_info { struct adf_accel_dev *accel_dev; struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */ + struct mutex pfvf_mig_lock; /* protects PFVF state for migration */ struct ratelimit_state vf2pf_ratelimit; u32 vf_nr; bool init; bool restarting; u8 vf_compat_ver; + /* + * Private area used for device migration. + * Memory allocation and free is managed by migration driver. + */ + void *mig_priv; }; struct adf_dc_data { diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h index e8cb930e80c9..8b10926cedba 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h @@ -86,6 +86,7 @@ #define ADF_RP_INT_SRC_SEL_F_RISE_MASK BIT(2) #define ADF_RP_INT_SRC_SEL_F_FALL_MASK GENMASK(2, 0) #define ADF_RP_INT_SRC_SEL_RANGE_WIDTH 4 +#define ADF_COALESCED_POLL_TIMEOUT_US (1 * USEC_PER_SEC) #define ADF_COALESCED_POLL_DELAY_US 1000 #define ADF_WQM_CSR_RPINTSOU(bank) (0x200000 + ((bank) << 12)) #define ADF_WQM_CSR_RP_IDX_RX 1 @@ -120,6 +121,15 @@ /* PF2VM communication channel */ #define ADF_GEN4_PF2VM_OFFSET(i) (0x40B010 + (i) * 0x20) #define ADF_GEN4_VM2PF_OFFSET(i) (0x40B014 + (i) * 0x20) +#define ADF_GEN4_VINTMSKPF2VM_OFFSET(i) (0x40B00C + (i) * 0x20) +#define ADF_GEN4_VINTSOUPF2VM_OFFSET(i) (0x40B008 + (i) * 0x20) +#define ADF_GEN4_VINTMSK_OFFSET(i) (0x40B004 + (i) * 0x20) +#define ADF_GEN4_VINTSOU_OFFSET(i) (0x40B000 + (i) * 0x20) + +struct adf_gen4_vfmig { + struct adf_mstate_mgr *mstate_mgr; + bool bank_stopped[ADF_GEN4_NUM_BANKS_PER_VF]; +}; void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c new file mode 100644 index 000000000000..78a39cfe196f --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c @@ -0,0 +1,1010 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2024 Intel Corporation */ +#include +#include +#include +#include +#include +#include +#include + +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "adf_gen4_hw_data.h" +#include "adf_gen4_pfvf.h" +#include "adf_pfvf_utils.h" +#include "adf_mstate_mgr.h" +#include "adf_gen4_vf_mig.h" + +#define ADF_GEN4_VF_MSTATE_SIZE 4096 +#define ADF_GEN4_PFVF_RSP_TIMEOUT_US 5000 + +static int adf_gen4_vfmig_save_setup(struct qat_mig_dev *mdev); +static int adf_gen4_vfmig_load_setup(struct qat_mig_dev *mdev, int len); + +static int adf_gen4_vfmig_init_device(struct qat_mig_dev *mdev) +{ + u8 *state; + + state = kmalloc(ADF_GEN4_VF_MSTATE_SIZE, GFP_KERNEL); + if (!state) + return -ENOMEM; + + mdev->state = state; + mdev->state_size = ADF_GEN4_VF_MSTATE_SIZE; + mdev->setup_size = 0; + mdev->remote_setup_size = 0; + + return 0; +} + +static void adf_gen4_vfmig_cleanup_device(struct qat_mig_dev *mdev) +{ + kfree(mdev->state); + mdev->state = NULL; +} + +static void adf_gen4_vfmig_reset_device(struct qat_mig_dev *mdev) +{ + mdev->setup_size = 0; + mdev->remote_setup_size = 0; +} + +static int adf_gen4_vfmig_open_device(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + struct adf_accel_vf_info *vf_info; + struct adf_gen4_vfmig *vfmig; + + vf_info = &accel_dev->pf.vf_info[mdev->vf_id]; + + vfmig = kzalloc(sizeof(*vfmig), GFP_KERNEL); + if (!vfmig) + return -ENOMEM; + + vfmig->mstate_mgr = adf_mstate_mgr_new(mdev->state, mdev->state_size); + if (!vfmig->mstate_mgr) { + kfree(vfmig); + return -ENOMEM; + } + vf_info->mig_priv = vfmig; + mdev->setup_size = 0; + mdev->remote_setup_size = 0; + + return 0; +} + +static void adf_gen4_vfmig_close_device(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + struct adf_accel_vf_info *vf_info; + struct adf_gen4_vfmig *vfmig; + + vf_info = &accel_dev->pf.vf_info[mdev->vf_id]; + if (vf_info->mig_priv) { + vfmig = vf_info->mig_priv; + adf_mstate_mgr_destroy(vfmig->mstate_mgr); + kfree(vfmig); + vf_info->mig_priv = NULL; + } +} + +static int adf_gen4_vfmig_suspend_device(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_accel_vf_info *vf_info; + struct adf_gen4_vfmig *vf_mig; + u32 vf_nr = mdev->vf_id; + int ret, i; + + vf_info = &accel_dev->pf.vf_info[vf_nr]; + vf_mig = vf_info->mig_priv; + + /* Stop all inflight jobs */ + for (i = 0; i < hw_data->num_banks_per_vf; i++) { + u32 pf_bank_nr = i + vf_nr * hw_data->num_banks_per_vf; + + ret = adf_gen4_bank_drain_start(accel_dev, pf_bank_nr, + ADF_RPRESET_POLL_TIMEOUT_US); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to drain bank %d for vf_nr %d\n", i, + vf_nr); + return ret; + } + vf_mig->bank_stopped[i] = true; + + adf_gen4_bank_quiesce_coal_timer(accel_dev, pf_bank_nr, + ADF_COALESCED_POLL_TIMEOUT_US); + } + + return 0; +} + +static int adf_gen4_vfmig_resume_device(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_accel_vf_info *vf_info; + struct adf_gen4_vfmig *vf_mig; + u32 vf_nr = mdev->vf_id; + int i; + + vf_info = &accel_dev->pf.vf_info[vf_nr]; + vf_mig = vf_info->mig_priv; + + for (i = 0; i < hw_data->num_banks_per_vf; i++) { + u32 pf_bank_nr = i + vf_nr * hw_data->num_banks_per_vf; + + if (vf_mig->bank_stopped[i]) { + adf_gen4_bank_drain_finish(accel_dev, pf_bank_nr); + vf_mig->bank_stopped[i] = false; + } + } + + return 0; +} + +struct adf_vf_bank_info { + struct adf_accel_dev *accel_dev; + u32 vf_nr; + u32 bank_nr; +}; + +struct mig_user_sla { + enum adf_base_services srv; + u64 rp_mask; + u32 cir; + u32 pir; +}; + +static int adf_mstate_sla_check(struct adf_mstate_mgr *sub_mgr, u8 *src_buf, + u32 src_size, void *opaque) +{ + struct adf_mstate_vreginfo _sinfo = { src_buf, src_size }; + struct adf_mstate_vreginfo *sinfo = &_sinfo, *dinfo = opaque; + u32 src_sla_cnt = sinfo->size / sizeof(struct mig_user_sla); + u32 dst_sla_cnt = dinfo->size / sizeof(struct mig_user_sla); + struct mig_user_sla *src_slas = sinfo->addr; + struct mig_user_sla *dst_slas = dinfo->addr; + int i, j; + + for (i = 0; i < src_sla_cnt; i++) { + for (j = 0; j < dst_sla_cnt; j++) { + if (src_slas[i].srv != dst_slas[j].srv || + src_slas[i].rp_mask != dst_slas[j].rp_mask) + continue; + + if (src_slas[i].cir > dst_slas[j].cir || + src_slas[i].pir > dst_slas[j].pir) { + pr_err("QAT: DST VF rate limiting mismatch.\n"); + return -EINVAL; + } + break; + } + + if (j == dst_sla_cnt) { + pr_err("QAT: SRC VF rate limiting mismatch - SRC srv %d and rp_mask 0x%llx.\n", + src_slas[i].srv, src_slas[i].rp_mask); + return -EINVAL; + } + } + + return 0; +} + +static inline int adf_mstate_check_cap_size(u32 src_sz, u32 dst_sz, u32 max_sz) +{ + if (src_sz > max_sz || dst_sz > max_sz) + return -EINVAL; + else + return 0; +} + +static int adf_mstate_compatver_check(struct adf_mstate_mgr *sub_mgr, + u8 *src_buf, u32 src_sz, void *opaque) +{ + struct adf_mstate_vreginfo *info = opaque; + u8 compat = 0; + u8 *pcompat; + + if (src_sz != info->size) { + pr_debug("QAT: State mismatch (compat version size), current %u, expected %u\n", + src_sz, info->size); + return -EINVAL; + } + + memcpy(info->addr, src_buf, info->size); + pcompat = info->addr; + if (*pcompat == 0) { + pr_warn("QAT: Unable to determine the version of VF\n"); + return 0; + } + + compat = adf_vf_compat_checker(*pcompat); + if (compat == ADF_PF2VF_VF_INCOMPATIBLE) { + pr_debug("QAT: SRC VF driver (ver=%u) is incompatible with DST PF driver (ver=%u)\n", + *pcompat, ADF_PFVF_COMPAT_THIS_VERSION); + return -EINVAL; + } + + if (compat == ADF_PF2VF_VF_COMPAT_UNKNOWN) + pr_debug("QAT: SRC VF driver (ver=%u) is newer than DST PF driver (ver=%u)\n", + *pcompat, ADF_PFVF_COMPAT_THIS_VERSION); + + return 0; +} + +/* + * adf_mstate_capmask_compare() - compare QAT device capability mask + * @sinfo: Pointer to source capability info + * @dinfo: Pointer to target capability info + * + * This function compares the capability mask between source VF and target VF + * + * Returns: 0 if target capability mask is identical to source capability mask, + * 1 if target mask can represent all the capabilities represented by source mask, + * -1 if target mask can't represent all the capabilities represented by source + * mask. + */ +static int adf_mstate_capmask_compare(struct adf_mstate_vreginfo *sinfo, + struct adf_mstate_vreginfo *dinfo) +{ + u64 src = 0, dst = 0; + + if (adf_mstate_check_cap_size(sinfo->size, dinfo->size, sizeof(u64))) { + pr_debug("QAT: Unexpected capability size %u %u %zu\n", + sinfo->size, dinfo->size, sizeof(u64)); + return -1; + } + + memcpy(&src, sinfo->addr, sinfo->size); + memcpy(&dst, dinfo->addr, dinfo->size); + + pr_debug("QAT: Check cap compatibility of cap %llu %llu\n", src, dst); + + if (src == dst) + return 0; + + if ((src | dst) == dst) + return 1; + + return -1; +} + +static int adf_mstate_capmask_superset(struct adf_mstate_mgr *sub_mgr, u8 *buf, + u32 size, void *opa) +{ + struct adf_mstate_vreginfo sinfo = { buf, size }; + + if (adf_mstate_capmask_compare(&sinfo, opa) >= 0) + return 0; + + return -EINVAL; +} + +static int adf_mstate_capmask_equal(struct adf_mstate_mgr *sub_mgr, u8 *buf, + u32 size, void *opa) +{ + struct adf_mstate_vreginfo sinfo = { buf, size }; + + if (adf_mstate_capmask_compare(&sinfo, opa) == 0) + return 0; + + return -EINVAL; +} + +static int adf_mstate_set_vreg(struct adf_mstate_mgr *sub_mgr, u8 *buf, + u32 size, void *opa) +{ + struct adf_mstate_vreginfo *info = opa; + + if (size != info->size) { + pr_debug("QAT: Unexpected cap size %u %u\n", size, info->size); + return -EINVAL; + } + memcpy(info->addr, buf, info->size); + + return 0; +} + +static u32 adf_gen4_vfmig_get_slas(struct adf_accel_dev *accel_dev, u32 vf_nr, + struct mig_user_sla *pmig_slas) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_rl *rl_data = accel_dev->rate_limiting; + struct rl_sla **sla_type_arr = NULL; + u64 rp_mask, rp_index; + u32 max_num_sla; + u32 sla_cnt = 0; + int i, j; + + if (!accel_dev->rate_limiting) + return 0; + + rp_index = vf_nr * hw_data->num_banks_per_vf; + max_num_sla = adf_rl_get_sla_arr_of_type(rl_data, RL_LEAF, &sla_type_arr); + + for (i = 0; i < max_num_sla; i++) { + if (!sla_type_arr[i]) + continue; + + rp_mask = 0; + for (j = 0; j < sla_type_arr[i]->ring_pairs_cnt; j++) + rp_mask |= BIT(sla_type_arr[i]->ring_pairs_ids[j]); + + if (rp_mask & GENMASK_ULL(rp_index + 3, rp_index)) { + pmig_slas->rp_mask = rp_mask; + pmig_slas->cir = sla_type_arr[i]->cir; + pmig_slas->pir = sla_type_arr[i]->pir; + pmig_slas->srv = sla_type_arr[i]->srv; + pmig_slas++; + sla_cnt++; + } + } + + return sla_cnt; +} + +static int adf_gen4_vfmig_load_etr_regs(struct adf_mstate_mgr *sub_mgr, + u8 *state, u32 size, void *opa) +{ + struct adf_vf_bank_info *vf_bank_info = opa; + struct adf_accel_dev *accel_dev = vf_bank_info->accel_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + u32 pf_bank_nr; + int ret; + + pf_bank_nr = vf_bank_info->bank_nr + vf_bank_info->vf_nr * hw_data->num_banks_per_vf; + ret = hw_data->bank_state_restore(accel_dev, pf_bank_nr, + (struct bank_state *)state); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to load regs for vf%d bank%d\n", + vf_bank_info->vf_nr, vf_bank_info->bank_nr); + return ret; + } + + return 0; +} + +static int adf_gen4_vfmig_load_etr_bank(struct adf_accel_dev *accel_dev, + u32 vf_nr, u32 bank_nr, + struct adf_mstate_mgr *mstate_mgr) +{ + struct adf_vf_bank_info vf_bank_info = {accel_dev, vf_nr, bank_nr}; + struct adf_mstate_sect_h *subsec, *l2_subsec; + struct adf_mstate_mgr sub_sects_mgr; + char bank_ids[ADF_MSTATE_ID_LEN]; + + snprintf(bank_ids, sizeof(bank_ids), ADF_MSTATE_BANK_IDX_IDS "%x", bank_nr); + subsec = adf_mstate_sect_lookup(mstate_mgr, bank_ids, NULL, NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), + "Failed to lookup sec %s for vf%d bank%d\n", + ADF_MSTATE_BANK_IDX_IDS, vf_nr, bank_nr); + return -EINVAL; + } + + adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec); + l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr, ADF_MSTATE_ETR_REGS_IDS, + adf_gen4_vfmig_load_etr_regs, + &vf_bank_info); + if (!l2_subsec) { + dev_err(&GET_DEV(accel_dev), + "Failed to add sec %s for vf%d bank%d\n", + ADF_MSTATE_ETR_REGS_IDS, vf_nr, bank_nr); + return -EINVAL; + } + + return 0; +} + +static int adf_gen4_vfmig_load_etr(struct adf_accel_dev *accel_dev, u32 vf_nr) +{ + struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_gen4_vfmig *vfmig = vf_info->mig_priv; + struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr; + struct adf_mstate_mgr sub_sects_mgr; + struct adf_mstate_sect_h *subsec; + int ret, i; + + subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_ETRB_IDS, NULL, + NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n", + ADF_MSTATE_ETRB_IDS); + return -EINVAL; + } + + adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec); + for (i = 0; i < hw_data->num_banks_per_vf; i++) { + ret = adf_gen4_vfmig_load_etr_bank(accel_dev, vf_nr, i, + &sub_sects_mgr); + if (ret) + return ret; + } + + return 0; +} + +static int adf_gen4_vfmig_load_misc(struct adf_accel_dev *accel_dev, u32 vf_nr) +{ + struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; + struct adf_gen4_vfmig *vfmig = vf_info->mig_priv; + void __iomem *csr = adf_get_pmisc_base(accel_dev); + struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr; + struct adf_mstate_sect_h *subsec, *l2_subsec; + struct adf_mstate_mgr sub_sects_mgr; + struct { + char *id; + u64 ofs; + } misc_states[] = { + {ADF_MSTATE_VINTMSK_IDS, ADF_GEN4_VINTMSK_OFFSET(vf_nr)}, + {ADF_MSTATE_VINTMSK_PF2VM_IDS, ADF_GEN4_VINTMSKPF2VM_OFFSET(vf_nr)}, + {ADF_MSTATE_PF2VM_IDS, ADF_GEN4_PF2VM_OFFSET(vf_nr)}, + {ADF_MSTATE_VM2PF_IDS, ADF_GEN4_VM2PF_OFFSET(vf_nr)}, + }; + int i; + + subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_MISCB_IDS, NULL, + NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n", + ADF_MSTATE_MISCB_IDS); + return -EINVAL; + } + + adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec); + for (i = 0; i < ARRAY_SIZE(misc_states); i++) { + struct adf_mstate_vreginfo info; + u32 regv; + + info.addr = ®v; + info.size = sizeof(regv); + l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr, + misc_states[i].id, + adf_mstate_set_vreg, + &info); + if (!l2_subsec) { + dev_err(&GET_DEV(accel_dev), + "Failed to load sec %s\n", misc_states[i].id); + return -EINVAL; + } + ADF_CSR_WR(csr, misc_states[i].ofs, regv); + } + + return 0; +} + +static int adf_gen4_vfmig_load_generic(struct adf_accel_dev *accel_dev, u32 vf_nr) +{ + struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; + struct mig_user_sla dst_slas[RL_RP_CNT_PER_LEAF_MAX] = { }; + struct adf_gen4_vfmig *vfmig = vf_info->mig_priv; + struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr; + struct adf_mstate_sect_h *subsec, *l2_subsec; + struct adf_mstate_mgr sub_sects_mgr; + u32 dst_sla_cnt; + struct { + char *id; + int (*action)(struct adf_mstate_mgr *sub_mgr, u8 *buf, u32 size, void *opa); + struct adf_mstate_vreginfo info; + } gen_states[] = { + {ADF_MSTATE_IOV_INIT_IDS, adf_mstate_set_vreg, + {&vf_info->init, sizeof(vf_info->init)}}, + {ADF_MSTATE_COMPAT_VER_IDS, adf_mstate_compatver_check, + {&vf_info->vf_compat_ver, sizeof(vf_info->vf_compat_ver)}}, + {ADF_MSTATE_SLA_IDS, adf_mstate_sla_check, {dst_slas, 0}}, + }; + int i; + + subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_GEN_IDS, NULL, NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n", + ADF_MSTATE_GEN_IDS); + return -EINVAL; + } + + adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec); + for (i = 0; i < ARRAY_SIZE(gen_states); i++) { + if (gen_states[i].info.addr == dst_slas) { + dst_sla_cnt = adf_gen4_vfmig_get_slas(accel_dev, vf_nr, dst_slas); + gen_states[i].info.size = dst_sla_cnt * sizeof(struct mig_user_sla); + } + + l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr, + gen_states[i].id, + gen_states[i].action, + &gen_states[i].info); + if (!l2_subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n", + gen_states[i].id); + return -EINVAL; + } + } + + return 0; +} + +static int adf_gen4_vfmig_load_config(struct adf_accel_dev *accel_dev, u32 vf_nr) +{ + struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_gen4_vfmig *vfmig = vf_info->mig_priv; + struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr; + struct adf_mstate_sect_h *subsec, *l2_subsec; + struct adf_mstate_mgr sub_sects_mgr; + struct { + char *id; + int (*action)(struct adf_mstate_mgr *sub_mgr, u8 *buf, u32 size, void *opa); + struct adf_mstate_vreginfo info; + } setups[] = { + {ADF_MSTATE_GEN_CAP_IDS, adf_mstate_capmask_superset, + {&hw_data->accel_capabilities_mask, sizeof(hw_data->accel_capabilities_mask)}}, + {ADF_MSTATE_GEN_SVCMAP_IDS, adf_mstate_capmask_equal, + {&hw_data->ring_to_svc_map, sizeof(hw_data->ring_to_svc_map)}}, + {ADF_MSTATE_GEN_EXTDC_IDS, adf_mstate_capmask_superset, + {&hw_data->extended_dc_capabilities, sizeof(hw_data->extended_dc_capabilities)}}, + }; + int i; + + subsec = adf_mstate_sect_lookup(mstate_mgr, ADF_MSTATE_CONFIG_IDS, NULL, NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n", + ADF_MSTATE_CONFIG_IDS); + return -EINVAL; + } + + adf_mstate_mgr_init_from_psect(&sub_sects_mgr, subsec); + for (i = 0; i < ARRAY_SIZE(setups); i++) { + l2_subsec = adf_mstate_sect_lookup(&sub_sects_mgr, setups[i].id, + setups[i].action, &setups[i].info); + if (!l2_subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to load sec %s\n", + setups[i].id); + return -EINVAL; + } + } + + return 0; +} + +static int adf_gen4_vfmig_save_etr_regs(struct adf_mstate_mgr *subs, u8 *state, + u32 size, void *opa) +{ + struct adf_vf_bank_info *vf_bank_info = opa; + struct adf_accel_dev *accel_dev = vf_bank_info->accel_dev; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + u32 pf_bank_nr; + int ret; + + pf_bank_nr = vf_bank_info->bank_nr; + pf_bank_nr += vf_bank_info->vf_nr * hw_data->num_banks_per_vf; + + ret = hw_data->bank_state_save(accel_dev, pf_bank_nr, + (struct bank_state *)state); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to save regs for vf%d bank%d\n", + vf_bank_info->vf_nr, vf_bank_info->bank_nr); + return ret; + } + + return sizeof(struct bank_state); +} + +static int adf_gen4_vfmig_save_etr_bank(struct adf_accel_dev *accel_dev, + u32 vf_nr, u32 bank_nr, + struct adf_mstate_mgr *mstate_mgr) +{ + struct adf_mstate_sect_h *subsec, *l2_subsec; + struct adf_vf_bank_info vf_bank_info; + struct adf_mstate_mgr sub_sects_mgr; + char bank_ids[ADF_MSTATE_ID_LEN]; + + snprintf(bank_ids, sizeof(bank_ids), ADF_MSTATE_BANK_IDX_IDS "%x", bank_nr); + + subsec = adf_mstate_sect_add(mstate_mgr, bank_ids, NULL, NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), + "Failed to add sec %s for vf%d bank%d\n", + ADF_MSTATE_BANK_IDX_IDS, vf_nr, bank_nr); + return -EINVAL; + } + + adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr); + vf_bank_info.accel_dev = accel_dev; + vf_bank_info.vf_nr = vf_nr; + vf_bank_info.bank_nr = bank_nr; + l2_subsec = adf_mstate_sect_add(&sub_sects_mgr, ADF_MSTATE_ETR_REGS_IDS, + adf_gen4_vfmig_save_etr_regs, + &vf_bank_info); + if (!l2_subsec) { + dev_err(&GET_DEV(accel_dev), + "Failed to add sec %s for vf%d bank%d\n", + ADF_MSTATE_ETR_REGS_IDS, vf_nr, bank_nr); + return -EINVAL; + } + adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec); + + return 0; +} + +static int adf_gen4_vfmig_save_etr(struct adf_accel_dev *accel_dev, u32 vf_nr) +{ + struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_gen4_vfmig *vfmig = vf_info->mig_priv; + struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr; + struct adf_mstate_mgr sub_sects_mgr; + struct adf_mstate_sect_h *subsec; + int ret, i; + + subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_ETRB_IDS, NULL, NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", + ADF_MSTATE_ETRB_IDS); + return -EINVAL; + } + + adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr); + for (i = 0; i < hw_data->num_banks_per_vf; i++) { + ret = adf_gen4_vfmig_save_etr_bank(accel_dev, vf_nr, i, + &sub_sects_mgr); + if (ret) + return ret; + } + adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec); + + return 0; +} + +static int adf_gen4_vfmig_save_misc(struct adf_accel_dev *accel_dev, u32 vf_nr) +{ + struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; + struct adf_gen4_vfmig *vfmig = vf_info->mig_priv; + struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr; + void __iomem *csr = adf_get_pmisc_base(accel_dev); + struct adf_mstate_sect_h *subsec, *l2_subsec; + struct adf_mstate_mgr sub_sects_mgr; + struct { + char *id; + u64 offset; + } misc_states[] = { + {ADF_MSTATE_VINTSRC_IDS, ADF_GEN4_VINTSOU_OFFSET(vf_nr)}, + {ADF_MSTATE_VINTMSK_IDS, ADF_GEN4_VINTMSK_OFFSET(vf_nr)}, + {ADF_MSTATE_VINTSRC_PF2VM_IDS, ADF_GEN4_VINTSOUPF2VM_OFFSET(vf_nr)}, + {ADF_MSTATE_VINTMSK_PF2VM_IDS, ADF_GEN4_VINTMSKPF2VM_OFFSET(vf_nr)}, + {ADF_MSTATE_PF2VM_IDS, ADF_GEN4_PF2VM_OFFSET(vf_nr)}, + {ADF_MSTATE_VM2PF_IDS, ADF_GEN4_VM2PF_OFFSET(vf_nr)}, + }; + ktime_t time_exp; + int i; + + subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_MISCB_IDS, NULL, NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", + ADF_MSTATE_MISCB_IDS); + return -EINVAL; + } + + time_exp = ktime_add_us(ktime_get(), ADF_GEN4_PFVF_RSP_TIMEOUT_US); + while (!mutex_trylock(&vf_info->pfvf_mig_lock)) { + if (ktime_after(ktime_get(), time_exp)) { + dev_err(&GET_DEV(accel_dev), "Failed to get pfvf mig lock\n"); + return -ETIMEDOUT; + } + usleep_range(500, 1000); + } + + adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr); + for (i = 0; i < ARRAY_SIZE(misc_states); i++) { + struct adf_mstate_vreginfo info; + u32 regv; + + info.addr = ®v; + info.size = sizeof(regv); + regv = ADF_CSR_RD(csr, misc_states[i].offset); + + l2_subsec = adf_mstate_sect_add_vreg(&sub_sects_mgr, + misc_states[i].id, + &info); + if (!l2_subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", + misc_states[i].id); + mutex_unlock(&vf_info->pfvf_mig_lock); + return -EINVAL; + } + } + + mutex_unlock(&vf_info->pfvf_mig_lock); + adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec); + + return 0; +} + +static int adf_gen4_vfmig_save_generic(struct adf_accel_dev *accel_dev, u32 vf_nr) +{ + struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; + struct adf_gen4_vfmig *vfmig = vf_info->mig_priv; + struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr; + struct adf_mstate_mgr sub_sects_mgr; + struct adf_mstate_sect_h *subsec, *l2_subsec; + struct mig_user_sla src_slas[RL_RP_CNT_PER_LEAF_MAX] = { }; + u32 src_sla_cnt; + struct { + char *id; + struct adf_mstate_vreginfo info; + } gen_states[] = { + {ADF_MSTATE_IOV_INIT_IDS, + {&vf_info->init, sizeof(vf_info->init)}}, + {ADF_MSTATE_COMPAT_VER_IDS, + {&vf_info->vf_compat_ver, sizeof(vf_info->vf_compat_ver)}}, + {ADF_MSTATE_SLA_IDS, {src_slas, 0}}, + }; + int i; + + subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_GEN_IDS, NULL, NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", + ADF_MSTATE_GEN_IDS); + return -EINVAL; + } + + adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr); + for (i = 0; i < ARRAY_SIZE(gen_states); i++) { + if (gen_states[i].info.addr == src_slas) { + src_sla_cnt = adf_gen4_vfmig_get_slas(accel_dev, vf_nr, src_slas); + gen_states[i].info.size = src_sla_cnt * sizeof(struct mig_user_sla); + } + + l2_subsec = adf_mstate_sect_add_vreg(&sub_sects_mgr, + gen_states[i].id, + &gen_states[i].info); + if (!l2_subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", + gen_states[i].id); + return -EINVAL; + } + } + adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec); + + return 0; +} + +static int adf_gen4_vfmig_save_config(struct adf_accel_dev *accel_dev, u32 vf_nr) +{ + struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_gen4_vfmig *vfmig = vf_info->mig_priv; + struct adf_mstate_mgr *mstate_mgr = vfmig->mstate_mgr; + struct adf_mstate_mgr sub_sects_mgr; + struct adf_mstate_sect_h *subsec, *l2_subsec; + struct { + char *id; + struct adf_mstate_vreginfo info; + } setups[] = { + {ADF_MSTATE_GEN_CAP_IDS, + {&hw_data->accel_capabilities_mask, sizeof(hw_data->accel_capabilities_mask)}}, + {ADF_MSTATE_GEN_SVCMAP_IDS, + {&hw_data->ring_to_svc_map, sizeof(hw_data->ring_to_svc_map)}}, + {ADF_MSTATE_GEN_EXTDC_IDS, + {&hw_data->extended_dc_capabilities, sizeof(hw_data->extended_dc_capabilities)}}, + }; + int i; + + subsec = adf_mstate_sect_add(mstate_mgr, ADF_MSTATE_CONFIG_IDS, NULL, NULL); + if (!subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", + ADF_MSTATE_CONFIG_IDS); + return -EINVAL; + } + + adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mstate_mgr); + for (i = 0; i < ARRAY_SIZE(setups); i++) { + l2_subsec = adf_mstate_sect_add_vreg(&sub_sects_mgr, setups[i].id, + &setups[i].info); + if (!l2_subsec) { + dev_err(&GET_DEV(accel_dev), "Failed to add sec %s\n", + setups[i].id); + return -EINVAL; + } + } + adf_mstate_sect_update(mstate_mgr, &sub_sects_mgr, subsec); + + return 0; +} + +static int adf_gen4_vfmig_save_state(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + struct adf_accel_vf_info *vf_info; + struct adf_gen4_vfmig *vfmig; + u32 vf_nr = mdev->vf_id; + int ret; + + vf_info = &accel_dev->pf.vf_info[vf_nr]; + vfmig = vf_info->mig_priv; + + ret = adf_gen4_vfmig_save_setup(mdev); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to save setup for vf_nr %d\n", vf_nr); + return ret; + } + + adf_mstate_mgr_init(vfmig->mstate_mgr, mdev->state + mdev->setup_size, + mdev->state_size - mdev->setup_size); + if (!adf_mstate_preamble_add(vfmig->mstate_mgr)) + return -EINVAL; + + ret = adf_gen4_vfmig_save_generic(accel_dev, vf_nr); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to save generic state for vf_nr %d\n", vf_nr); + return ret; + } + + ret = adf_gen4_vfmig_save_misc(accel_dev, vf_nr); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to save misc bar state for vf_nr %d\n", vf_nr); + return ret; + } + + ret = adf_gen4_vfmig_save_etr(accel_dev, vf_nr); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to save etr bar state for vf_nr %d\n", vf_nr); + return ret; + } + + adf_mstate_preamble_update(vfmig->mstate_mgr); + + return 0; +} + +static int adf_gen4_vfmig_load_state(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + struct adf_accel_vf_info *vf_info; + struct adf_gen4_vfmig *vfmig; + u32 vf_nr = mdev->vf_id; + int ret; + + vf_info = &accel_dev->pf.vf_info[vf_nr]; + vfmig = vf_info->mig_priv; + + ret = adf_gen4_vfmig_load_setup(mdev, mdev->state_size); + if (ret) { + dev_err(&GET_DEV(accel_dev), "Failed to load setup for vf_nr %d\n", + vf_nr); + return ret; + } + + ret = adf_mstate_mgr_init_from_remote(vfmig->mstate_mgr, + mdev->state + mdev->remote_setup_size, + mdev->state_size - mdev->remote_setup_size, + NULL, NULL); + if (ret) { + dev_err(&GET_DEV(accel_dev), "Invalid state for vf_nr %d\n", + vf_nr); + return ret; + } + + ret = adf_gen4_vfmig_load_generic(accel_dev, vf_nr); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to load general state for vf_nr %d\n", vf_nr); + return ret; + } + + ret = adf_gen4_vfmig_load_misc(accel_dev, vf_nr); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to load misc bar state for vf_nr %d\n", vf_nr); + return ret; + } + + ret = adf_gen4_vfmig_load_etr(accel_dev, vf_nr); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to load etr bar state for vf_nr %d\n", vf_nr); + return ret; + } + + return 0; +} + +static int adf_gen4_vfmig_save_setup(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + struct adf_accel_vf_info *vf_info; + struct adf_gen4_vfmig *vfmig; + u32 vf_nr = mdev->vf_id; + int ret; + + vf_info = &accel_dev->pf.vf_info[vf_nr]; + vfmig = vf_info->mig_priv; + + if (mdev->setup_size) + return 0; + + adf_mstate_mgr_init(vfmig->mstate_mgr, mdev->state, mdev->state_size); + if (!adf_mstate_preamble_add(vfmig->mstate_mgr)) + return -EINVAL; + + ret = adf_gen4_vfmig_save_config(accel_dev, mdev->vf_id); + if (ret) + return ret; + + adf_mstate_preamble_update(vfmig->mstate_mgr); + mdev->setup_size = adf_mstate_state_size(vfmig->mstate_mgr); + + return 0; +} + +static int adf_gen4_vfmig_load_setup(struct qat_mig_dev *mdev, int len) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + struct adf_accel_vf_info *vf_info; + struct adf_gen4_vfmig *vfmig; + u32 vf_nr = mdev->vf_id; + u32 setup_size; + int ret; + + vf_info = &accel_dev->pf.vf_info[vf_nr]; + vfmig = vf_info->mig_priv; + + if (mdev->remote_setup_size) + return 0; + + if (len < sizeof(struct adf_mstate_preh)) + return -EAGAIN; + + adf_mstate_mgr_init(vfmig->mstate_mgr, mdev->state, mdev->state_size); + setup_size = adf_mstate_state_size_from_remote(vfmig->mstate_mgr); + if (setup_size > mdev->state_size) + return -EINVAL; + + if (len < setup_size) + return -EAGAIN; + + ret = adf_mstate_mgr_init_from_remote(vfmig->mstate_mgr, mdev->state, + setup_size, NULL, NULL); + if (ret) { + dev_err(&GET_DEV(accel_dev), "Invalide setup for vf_nr %d\n", + vf_nr); + return ret; + } + + mdev->remote_setup_size = setup_size; + + ret = adf_gen4_vfmig_load_config(accel_dev, vf_nr); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Failed to load config for vf_nr %d\n", vf_nr); + return ret; + } + + return 0; +} + +void adf_gen4_init_vf_mig_ops(struct qat_migdev_ops *vfmig_ops) +{ + vfmig_ops->init = adf_gen4_vfmig_init_device; + vfmig_ops->cleanup = adf_gen4_vfmig_cleanup_device; + vfmig_ops->reset = adf_gen4_vfmig_reset_device; + vfmig_ops->open = adf_gen4_vfmig_open_device; + vfmig_ops->close = adf_gen4_vfmig_close_device; + vfmig_ops->suspend = adf_gen4_vfmig_suspend_device; + vfmig_ops->resume = adf_gen4_vfmig_resume_device; + vfmig_ops->save_state = adf_gen4_vfmig_save_state; + vfmig_ops->load_state = adf_gen4_vfmig_load_state; + vfmig_ops->load_setup = adf_gen4_vfmig_load_setup; + vfmig_ops->save_setup = adf_gen4_vfmig_save_setup; +} +EXPORT_SYMBOL_GPL(adf_gen4_init_vf_mig_ops); diff --git a/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.c b/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.c new file mode 100644 index 000000000000..41cc763a74aa --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.c @@ -0,0 +1,318 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2024 Intel Corporation */ + +#include +#include +#include "adf_mstate_mgr.h" + +#define ADF_MSTATE_MAGIC 0xADF5CAEA +#define ADF_MSTATE_VERSION 0x1 + +struct adf_mstate_sect_h { + u8 id[ADF_MSTATE_ID_LEN]; + u32 size; + u32 sub_sects; + u8 state[]; +}; + +u32 adf_mstate_state_size(struct adf_mstate_mgr *mgr) +{ + return mgr->state - mgr->buf; +} + +static inline u32 adf_mstate_avail_room(struct adf_mstate_mgr *mgr) +{ + return mgr->buf + mgr->size - mgr->state; +} + +void adf_mstate_mgr_init(struct adf_mstate_mgr *mgr, u8 *buf, u32 size) +{ + mgr->buf = buf; + mgr->state = buf; + mgr->size = size; + mgr->n_sects = 0; +}; + +struct adf_mstate_mgr *adf_mstate_mgr_new(u8 *buf, u32 size) +{ + struct adf_mstate_mgr *mgr; + + mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); + if (!mgr) + return NULL; + + adf_mstate_mgr_init(mgr, buf, size); + + return mgr; +} + +void adf_mstate_mgr_destroy(struct adf_mstate_mgr *mgr) +{ + kfree(mgr); +} + +void adf_mstate_mgr_init_from_parent(struct adf_mstate_mgr *mgr, + struct adf_mstate_mgr *p_mgr) +{ + adf_mstate_mgr_init(mgr, p_mgr->state, + p_mgr->size - adf_mstate_state_size(p_mgr)); +} + +void adf_mstate_mgr_init_from_psect(struct adf_mstate_mgr *mgr, + struct adf_mstate_sect_h *p_sect) +{ + adf_mstate_mgr_init(mgr, p_sect->state, p_sect->size); + mgr->n_sects = p_sect->sub_sects; +} + +static void adf_mstate_preamble_init(struct adf_mstate_preh *preamble) +{ + preamble->magic = ADF_MSTATE_MAGIC; + preamble->version = ADF_MSTATE_VERSION; + preamble->preh_len = sizeof(*preamble); + preamble->size = 0; + preamble->n_sects = 0; +} + +/* default preambles checker */ +static int adf_mstate_preamble_def_checker(struct adf_mstate_preh *preamble, + void *opaque) +{ + struct adf_mstate_mgr *mgr = opaque; + + if (preamble->magic != ADF_MSTATE_MAGIC || + preamble->version > ADF_MSTATE_VERSION || + preamble->preh_len > mgr->size) { + pr_debug("QAT: LM - Invalid state (magic=%#x, version=%#x, hlen=%u), state_size=%u\n", + preamble->magic, preamble->version, preamble->preh_len, + mgr->size); + return -EINVAL; + } + + return 0; +} + +struct adf_mstate_preh *adf_mstate_preamble_add(struct adf_mstate_mgr *mgr) +{ + struct adf_mstate_preh *pre = (struct adf_mstate_preh *)mgr->buf; + + if (adf_mstate_avail_room(mgr) < sizeof(*pre)) { + pr_err("QAT: LM - Not enough space for preamble\n"); + return NULL; + } + + adf_mstate_preamble_init(pre); + mgr->state += pre->preh_len; + + return pre; +} + +int adf_mstate_preamble_update(struct adf_mstate_mgr *mgr) +{ + struct adf_mstate_preh *preamble = (struct adf_mstate_preh *)mgr->buf; + + preamble->size = adf_mstate_state_size(mgr) - preamble->preh_len; + preamble->n_sects = mgr->n_sects; + + return 0; +} + +static void adf_mstate_dump_sect(struct adf_mstate_sect_h *sect, + const char *prefix) +{ + pr_debug("QAT: LM - %s QAT state section %s\n", prefix, sect->id); + print_hex_dump_debug("h-", DUMP_PREFIX_OFFSET, 16, 2, sect, + sizeof(*sect), true); + print_hex_dump_debug("s-", DUMP_PREFIX_OFFSET, 16, 2, sect->state, + sect->size, true); +} + +static inline void __adf_mstate_sect_update(struct adf_mstate_mgr *mgr, + struct adf_mstate_sect_h *sect, + u32 size, + u32 n_subsects) +{ + sect->size += size; + sect->sub_sects += n_subsects; + mgr->n_sects++; + mgr->state += sect->size; + + adf_mstate_dump_sect(sect, "Add"); +} + +void adf_mstate_sect_update(struct adf_mstate_mgr *p_mgr, + struct adf_mstate_mgr *curr_mgr, + struct adf_mstate_sect_h *sect) +{ + __adf_mstate_sect_update(p_mgr, sect, adf_mstate_state_size(curr_mgr), + curr_mgr->n_sects); +} + +static struct adf_mstate_sect_h *adf_mstate_sect_add_header(struct adf_mstate_mgr *mgr, + const char *id) +{ + struct adf_mstate_sect_h *sect = (struct adf_mstate_sect_h *)(mgr->state); + + if (adf_mstate_avail_room(mgr) < sizeof(*sect)) { + pr_debug("QAT: LM - Not enough space for header of QAT state sect %s\n", id); + return NULL; + } + + strscpy(sect->id, id, sizeof(sect->id)); + sect->size = 0; + sect->sub_sects = 0; + mgr->state += sizeof(*sect); + + return sect; +} + +struct adf_mstate_sect_h *adf_mstate_sect_add_vreg(struct adf_mstate_mgr *mgr, + const char *id, + struct adf_mstate_vreginfo *info) +{ + struct adf_mstate_sect_h *sect; + + sect = adf_mstate_sect_add_header(mgr, id); + if (!sect) + return NULL; + + if (adf_mstate_avail_room(mgr) < info->size) { + pr_debug("QAT: LM - Not enough space for QAT state sect %s, requires %u\n", + id, info->size); + return NULL; + } + + memcpy(sect->state, info->addr, info->size); + __adf_mstate_sect_update(mgr, sect, info->size, 0); + + return sect; +} + +struct adf_mstate_sect_h *adf_mstate_sect_add(struct adf_mstate_mgr *mgr, + const char *id, + adf_mstate_populate populate, + void *opaque) +{ + struct adf_mstate_mgr sub_sects_mgr; + struct adf_mstate_sect_h *sect; + int avail_room, size; + + sect = adf_mstate_sect_add_header(mgr, id); + if (!sect) + return NULL; + + if (!populate) + return sect; + + avail_room = adf_mstate_avail_room(mgr); + adf_mstate_mgr_init_from_parent(&sub_sects_mgr, mgr); + + size = (*populate)(&sub_sects_mgr, sect->state, avail_room, opaque); + if (size < 0) + return NULL; + + size += adf_mstate_state_size(&sub_sects_mgr); + if (avail_room < size) { + pr_debug("QAT: LM - Not enough space for QAT state sect %s, requires %u\n", + id, size); + return NULL; + } + __adf_mstate_sect_update(mgr, sect, size, sub_sects_mgr.n_sects); + + return sect; +} + +static int adf_mstate_sect_validate(struct adf_mstate_mgr *mgr) +{ + struct adf_mstate_sect_h *start = (struct adf_mstate_sect_h *)mgr->state; + struct adf_mstate_sect_h *sect = start; + u64 end; + int i; + + end = (uintptr_t)mgr->buf + mgr->size; + for (i = 0; i < mgr->n_sects; i++) { + uintptr_t s_start = (uintptr_t)sect->state; + uintptr_t s_end = s_start + sect->size; + + if (s_end < s_start || s_end > end) { + pr_debug("QAT: LM - Corrupted state section (index=%u, size=%u) in state_mgr (size=%u, secs=%u)\n", + i, sect->size, mgr->size, mgr->n_sects); + return -EINVAL; + } + sect = (struct adf_mstate_sect_h *)s_end; + } + + pr_debug("QAT: LM - Scanned section (last child=%s, size=%lu) in state_mgr (size=%u, secs=%u)\n", + start->id, sizeof(struct adf_mstate_sect_h) * (ulong)(sect - start), + mgr->size, mgr->n_sects); + + return 0; +} + +u32 adf_mstate_state_size_from_remote(struct adf_mstate_mgr *mgr) +{ + struct adf_mstate_preh *preh = (struct adf_mstate_preh *)mgr->buf; + + return preh->preh_len + preh->size; +} + +int adf_mstate_mgr_init_from_remote(struct adf_mstate_mgr *mgr, u8 *buf, u32 size, + adf_mstate_preamble_checker pre_checker, + void *opaque) +{ + struct adf_mstate_preh *pre; + int ret; + + adf_mstate_mgr_init(mgr, buf, size); + pre = (struct adf_mstate_preh *)(mgr->buf); + + pr_debug("QAT: LM - Dump state preambles\n"); + print_hex_dump_debug("", DUMP_PREFIX_OFFSET, 16, 2, pre, pre->preh_len, 0); + + if (pre_checker) + ret = (*pre_checker)(pre, opaque); + else + ret = adf_mstate_preamble_def_checker(pre, mgr); + if (ret) + return ret; + + mgr->state = mgr->buf + pre->preh_len; + mgr->n_sects = pre->n_sects; + + return adf_mstate_sect_validate(mgr); +} + +struct adf_mstate_sect_h *adf_mstate_sect_lookup(struct adf_mstate_mgr *mgr, + const char *id, + adf_mstate_action action, + void *opaque) +{ + struct adf_mstate_sect_h *sect = (struct adf_mstate_sect_h *)mgr->state; + struct adf_mstate_mgr sub_sects_mgr; + int i, ret; + + for (i = 0; i < mgr->n_sects; i++) { + if (!strncmp(sect->id, id, sizeof(sect->id))) + goto found; + + sect = (struct adf_mstate_sect_h *)(sect->state + sect->size); + } + + return NULL; + +found: + adf_mstate_dump_sect(sect, "Found"); + + adf_mstate_mgr_init_from_psect(&sub_sects_mgr, sect); + if (sect->sub_sects && adf_mstate_sect_validate(&sub_sects_mgr)) + return NULL; + + if (!action) + return sect; + + ret = (*action)(&sub_sects_mgr, sect->state, sect->size, opaque); + if (ret) + return NULL; + + return sect; +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.h b/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.h new file mode 100644 index 000000000000..81d263a596c5 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_mstate_mgr.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2024 Intel Corporation */ + +#ifndef ADF_MSTATE_MGR_H +#define ADF_MSTATE_MGR_H + +#define ADF_MSTATE_ID_LEN 8 + +#define ADF_MSTATE_ETRB_IDS "ETRBAR" +#define ADF_MSTATE_MISCB_IDS "MISCBAR" +#define ADF_MSTATE_EXTB_IDS "EXTBAR" +#define ADF_MSTATE_GEN_IDS "GENER" +#define ADF_MSTATE_CONFIG_IDS "CONFIG" +#define ADF_MSTATE_SECTION_NUM 5 + +#define ADF_MSTATE_BANK_IDX_IDS "bnk" + +#define ADF_MSTATE_ETR_REGS_IDS "mregs" +#define ADF_MSTATE_VINTSRC_IDS "visrc" +#define ADF_MSTATE_VINTMSK_IDS "vimsk" +#define ADF_MSTATE_SLA_IDS "sla" +#define ADF_MSTATE_IOV_INIT_IDS "iovinit" +#define ADF_MSTATE_COMPAT_VER_IDS "compver" +#define ADF_MSTATE_GEN_CAP_IDS "gencap" +#define ADF_MSTATE_GEN_SVCMAP_IDS "svcmap" +#define ADF_MSTATE_GEN_EXTDC_IDS "extdc" +#define ADF_MSTATE_VINTSRC_PF2VM_IDS "vispv" +#define ADF_MSTATE_VINTMSK_PF2VM_IDS "vimpv" +#define ADF_MSTATE_VM2PF_IDS "vm2pf" +#define ADF_MSTATE_PF2VM_IDS "pf2vm" + +struct adf_mstate_mgr { + u8 *buf; + u8 *state; + u32 size; + u32 n_sects; +}; + +struct adf_mstate_preh { + u32 magic; + u32 version; + u16 preh_len; + u16 n_sects; + u32 size; +}; + +struct adf_mstate_vreginfo { + void *addr; + u32 size; +}; + +struct adf_mstate_sect_h; + +typedef int (*adf_mstate_preamble_checker)(struct adf_mstate_preh *preamble, void *opa); +typedef int (*adf_mstate_populate)(struct adf_mstate_mgr *sub_mgr, u8 *buf, + u32 size, void *opa); +typedef int (*adf_mstate_action)(struct adf_mstate_mgr *sub_mgr, u8 *buf, u32 size, + void *opa); + +struct adf_mstate_mgr *adf_mstate_mgr_new(u8 *buf, u32 size); +void adf_mstate_mgr_destroy(struct adf_mstate_mgr *mgr); +void adf_mstate_mgr_init(struct adf_mstate_mgr *mgr, u8 *buf, u32 size); +void adf_mstate_mgr_init_from_parent(struct adf_mstate_mgr *mgr, + struct adf_mstate_mgr *p_mgr); +void adf_mstate_mgr_init_from_psect(struct adf_mstate_mgr *mgr, + struct adf_mstate_sect_h *p_sect); +int adf_mstate_mgr_init_from_remote(struct adf_mstate_mgr *mgr, + u8 *buf, u32 size, + adf_mstate_preamble_checker checker, + void *opaque); +struct adf_mstate_preh *adf_mstate_preamble_add(struct adf_mstate_mgr *mgr); +int adf_mstate_preamble_update(struct adf_mstate_mgr *mgr); +u32 adf_mstate_state_size(struct adf_mstate_mgr *mgr); +u32 adf_mstate_state_size_from_remote(struct adf_mstate_mgr *mgr); +void adf_mstate_sect_update(struct adf_mstate_mgr *p_mgr, + struct adf_mstate_mgr *curr_mgr, + struct adf_mstate_sect_h *sect); +struct adf_mstate_sect_h *adf_mstate_sect_add_vreg(struct adf_mstate_mgr *mgr, + const char *id, + struct adf_mstate_vreginfo *info); +struct adf_mstate_sect_h *adf_mstate_sect_add(struct adf_mstate_mgr *mgr, + const char *id, + adf_mstate_populate populate, + void *opaque); +struct adf_mstate_sect_h *adf_mstate_sect_lookup(struct adf_mstate_mgr *mgr, + const char *id, + adf_mstate_action action, + void *opaque); +#endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c index 87a70c00c41e..8d645e7e04aa 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sriov.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c @@ -26,10 +26,12 @@ static void adf_iov_send_resp(struct work_struct *work) u32 vf_nr = vf_info->vf_nr; bool ret; + mutex_lock(&vf_info->pfvf_mig_lock); ret = adf_recv_and_handle_vf2pf_msg(accel_dev, vf_nr); if (ret) /* re-enable interrupt on PF from this VF */ adf_enable_vf2pf_interrupts(accel_dev, 1 << vf_nr); + mutex_unlock(&vf_info->pfvf_mig_lock); kfree(pf2vf_resp); } @@ -62,6 +64,7 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev) vf_info->vf_nr = i; mutex_init(&vf_info->pf2vf_lock); + mutex_init(&vf_info->pfvf_mig_lock); ratelimit_state_init(&vf_info->vf2pf_ratelimit, ADF_VF2PF_RATELIMIT_INTERVAL, ADF_VF2PF_RATELIMIT_BURST); @@ -138,8 +141,10 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev) if (hw_data->configure_iov_threads) hw_data->configure_iov_threads(accel_dev, false); - for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) + for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) { mutex_destroy(&vf->pf2vf_lock); + mutex_destroy(&vf->pfvf_mig_lock); + } if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) { kfree(accel_dev->pf.vf_info); -- Gitee From 4a3f9d71607c10838f044f68f3caf91e25f26d93 Mon Sep 17 00:00:00 2001 From: Xin Zeng Date: Fri, 26 Apr 2024 14:40:51 +0800 Subject: [PATCH 1032/2138] vfio/qat: Add vfio_pci driver for Intel QAT SR-IOV VF devices ANBZ: #9185 commit bb208810b1abf1c84870cfbe1cc9cf1a1d35c607 upstream. Intel-SIG: commit bb208810b1ab vfio/qat: Add vfio_pci driver for Intel QAT SR-IOV VF devices Backport to support Intel QAT live migration for in-tree driver Add vfio pci variant driver for Intel QAT SR-IOV VF devices. This driver registers to the vfio subsystem through the interfaces exposed by the subsystem. It follows the live migration protocol v2 defined in uapi/linux/vfio.h and interacts with Intel QAT PF driver through a set of interfaces defined in qat/qat_mig_dev.h to support live migration of Intel QAT VF devices. This version only covers migration for Intel QAT GEN4 VF devices. Co-developed-by: Yahui Cao Signed-off-by: Yahui Cao Signed-off-by: Xin Zeng Reviewed-by: Giovanni Cabiddu Reviewed-by: Kevin Tian Link: https://lore.kernel.org/r/20240426064051.2859652-1-xin.zeng@intel.com Signed-off-by: Alex Williamson [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- MAINTAINERS | 8 + drivers/vfio/pci/Kconfig | 2 + drivers/vfio/pci/Makefile | 2 + drivers/vfio/pci/qat/Kconfig | 12 + drivers/vfio/pci/qat/Makefile | 3 + drivers/vfio/pci/qat/main.c | 702 ++++++++++++++++++++++++++++++++++ 6 files changed, 729 insertions(+) create mode 100644 drivers/vfio/pci/qat/Kconfig create mode 100644 drivers/vfio/pci/qat/Makefile create mode 100644 drivers/vfio/pci/qat/main.c diff --git a/MAINTAINERS b/MAINTAINERS index 8e90e3cfc5f6..02ec5c36d214 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -22675,6 +22675,14 @@ L: kvm@vger.kernel.org S: Maintained F: drivers/vfio/platform/ +VFIO QAT PCI DRIVER +M: Xin Zeng +M: Giovanni Cabiddu +L: kvm@vger.kernel.org +L: qat-linux@intel.com +S: Supported +F: drivers/vfio/pci/qat/ + VGA_SWITCHEROO R: Lukas Wunner S: Maintained diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig index 8125e5f37832..04ac975432a3 100644 --- a/drivers/vfio/pci/Kconfig +++ b/drivers/vfio/pci/Kconfig @@ -65,4 +65,6 @@ source "drivers/vfio/pci/hisilicon/Kconfig" source "drivers/vfio/pci/pds/Kconfig" +source "drivers/vfio/pci/qat/Kconfig" + endmenu diff --git a/drivers/vfio/pci/Makefile b/drivers/vfio/pci/Makefile index 45167be462d8..52aa7423e6df 100644 --- a/drivers/vfio/pci/Makefile +++ b/drivers/vfio/pci/Makefile @@ -13,3 +13,5 @@ obj-$(CONFIG_MLX5_VFIO_PCI) += mlx5/ obj-$(CONFIG_HISI_ACC_VFIO_PCI) += hisilicon/ obj-$(CONFIG_PDS_VFIO_PCI) += pds/ + +obj-$(CONFIG_QAT_VFIO_PCI) += qat/ diff --git a/drivers/vfio/pci/qat/Kconfig b/drivers/vfio/pci/qat/Kconfig new file mode 100644 index 000000000000..bf52cfa4b595 --- /dev/null +++ b/drivers/vfio/pci/qat/Kconfig @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only +config QAT_VFIO_PCI + tristate "VFIO support for QAT VF PCI devices" + select VFIO_PCI_CORE + depends on CRYPTO_DEV_QAT_4XXX + help + This provides migration support for Intel(R) QAT Virtual Function + using the VFIO framework. + + To compile this as a module, choose M here: the module + will be called qat_vfio_pci. If you don't know what to do here, + say N. diff --git a/drivers/vfio/pci/qat/Makefile b/drivers/vfio/pci/qat/Makefile new file mode 100644 index 000000000000..5fe5c4ec19d3 --- /dev/null +++ b/drivers/vfio/pci/qat/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_QAT_VFIO_PCI) += qat_vfio_pci.o +qat_vfio_pci-y := main.o diff --git a/drivers/vfio/pci/qat/main.c b/drivers/vfio/pci/qat/main.c new file mode 100644 index 000000000000..e36740a282e7 --- /dev/null +++ b/drivers/vfio/pci/qat/main.c @@ -0,0 +1,702 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2024 Intel Corporation */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * The migration data of each Intel QAT VF device is encapsulated into a + * 4096 bytes block. The data consists of two parts. + * The first is a pre-configured set of attributes of the VF being migrated, + * which are only set when it is created. This can be migrated during pre-copy + * stage and used for a device compatibility check. + * The second is the VF state. This includes the required MMIO regions and + * the shadow states maintained by the QAT PF driver. This part can only be + * saved when the VF is fully quiesced and be migrated during stop-copy stage. + * Both these 2 parts of data are saved in hierarchical structures including + * a preamble section and several raw state sections. + * When the pre-configured part of the migration data is fully retrieved from + * user space, the preamble section are used to validate the correctness of + * the data blocks and check the version compatibility. The raw state sections + * are then used to do a device compatibility check. + * When the device transits from RESUMING state, the VF states are extracted + * from the raw state sections of the VF state part of the migration data and + * then loaded into the device. + */ + +struct qat_vf_migration_file { + struct file *filp; + /* protects migration region context */ + struct mutex lock; + bool disabled; + struct qat_vf_core_device *qat_vdev; + ssize_t filled_size; +}; + +struct qat_vf_core_device { + struct vfio_pci_core_device core_device; + struct qat_mig_dev *mdev; + /* protects migration state */ + struct mutex state_mutex; + enum vfio_device_mig_state mig_state; + struct qat_vf_migration_file *resuming_migf; + struct qat_vf_migration_file *saving_migf; +}; + +static int qat_vf_pci_open_device(struct vfio_device *core_vdev) +{ + struct qat_vf_core_device *qat_vdev = + container_of(core_vdev, struct qat_vf_core_device, + core_device.vdev); + struct vfio_pci_core_device *vdev = &qat_vdev->core_device; + int ret; + + ret = vfio_pci_core_enable(vdev); + if (ret) + return ret; + + ret = qat_vfmig_open(qat_vdev->mdev); + if (ret) { + vfio_pci_core_disable(vdev); + return ret; + } + qat_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING; + + vfio_pci_core_finish_enable(vdev); + + return 0; +} + +static void qat_vf_disable_fd(struct qat_vf_migration_file *migf) +{ + mutex_lock(&migf->lock); + migf->disabled = true; + migf->filp->f_pos = 0; + migf->filled_size = 0; + mutex_unlock(&migf->lock); +} + +static void qat_vf_disable_fds(struct qat_vf_core_device *qat_vdev) +{ + if (qat_vdev->resuming_migf) { + qat_vf_disable_fd(qat_vdev->resuming_migf); + fput(qat_vdev->resuming_migf->filp); + qat_vdev->resuming_migf = NULL; + } + + if (qat_vdev->saving_migf) { + qat_vf_disable_fd(qat_vdev->saving_migf); + fput(qat_vdev->saving_migf->filp); + qat_vdev->saving_migf = NULL; + } +} + +static void qat_vf_pci_close_device(struct vfio_device *core_vdev) +{ + struct qat_vf_core_device *qat_vdev = container_of(core_vdev, + struct qat_vf_core_device, core_device.vdev); + + qat_vfmig_close(qat_vdev->mdev); + qat_vf_disable_fds(qat_vdev); + vfio_pci_core_close_device(core_vdev); +} + +static long qat_vf_precopy_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct qat_vf_migration_file *migf = filp->private_data; + struct qat_vf_core_device *qat_vdev = migf->qat_vdev; + struct qat_mig_dev *mig_dev = qat_vdev->mdev; + struct vfio_precopy_info info; + loff_t *pos = &filp->f_pos; + unsigned long minsz; + int ret = 0; + + if (cmd != VFIO_MIG_GET_PRECOPY_INFO) + return -ENOTTY; + + minsz = offsetofend(struct vfio_precopy_info, dirty_bytes); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + if (info.argsz < minsz) + return -EINVAL; + + mutex_lock(&qat_vdev->state_mutex); + if (qat_vdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY && + qat_vdev->mig_state != VFIO_DEVICE_STATE_PRE_COPY_P2P) { + mutex_unlock(&qat_vdev->state_mutex); + return -EINVAL; + } + + mutex_lock(&migf->lock); + if (migf->disabled) { + ret = -ENODEV; + goto out; + } + + if (*pos > mig_dev->setup_size) { + ret = -EINVAL; + goto out; + } + + info.dirty_bytes = 0; + info.initial_bytes = mig_dev->setup_size - *pos; + +out: + mutex_unlock(&migf->lock); + mutex_unlock(&qat_vdev->state_mutex); + if (ret) + return ret; + return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; +} + +static ssize_t qat_vf_save_read(struct file *filp, char __user *buf, + size_t len, loff_t *pos) +{ + struct qat_vf_migration_file *migf = filp->private_data; + struct qat_mig_dev *mig_dev = migf->qat_vdev->mdev; + ssize_t done = 0; + loff_t *offs; + int ret; + + if (pos) + return -ESPIPE; + offs = &filp->f_pos; + + mutex_lock(&migf->lock); + if (*offs > migf->filled_size || *offs < 0) { + done = -EINVAL; + goto out_unlock; + } + + if (migf->disabled) { + done = -ENODEV; + goto out_unlock; + } + + len = min_t(size_t, migf->filled_size - *offs, len); + if (len) { + ret = copy_to_user(buf, mig_dev->state + *offs, len); + if (ret) { + done = -EFAULT; + goto out_unlock; + } + *offs += len; + done = len; + } + +out_unlock: + mutex_unlock(&migf->lock); + return done; +} + +static int qat_vf_release_file(struct inode *inode, struct file *filp) +{ + struct qat_vf_migration_file *migf = filp->private_data; + + qat_vf_disable_fd(migf); + mutex_destroy(&migf->lock); + kfree(migf); + + return 0; +} + +static const struct file_operations qat_vf_save_fops = { + .owner = THIS_MODULE, + .read = qat_vf_save_read, + .unlocked_ioctl = qat_vf_precopy_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .release = qat_vf_release_file, + .llseek = no_llseek, +}; + +static int qat_vf_save_state(struct qat_vf_core_device *qat_vdev, + struct qat_vf_migration_file *migf) +{ + int ret; + + ret = qat_vfmig_save_state(qat_vdev->mdev); + if (ret) + return ret; + migf->filled_size = qat_vdev->mdev->state_size; + + return 0; +} + +static int qat_vf_save_setup(struct qat_vf_core_device *qat_vdev, + struct qat_vf_migration_file *migf) +{ + int ret; + + ret = qat_vfmig_save_setup(qat_vdev->mdev); + if (ret) + return ret; + migf->filled_size = qat_vdev->mdev->setup_size; + + return 0; +} + +/* + * Allocate a file handler for user space and then save the migration data for + * the device being migrated. If this is called in the pre-copy stage, save the + * pre-configured device data. Otherwise, if this is called in the stop-copy + * stage, save the device state. In both cases, update the data size which can + * then be read from user space. + */ +static struct qat_vf_migration_file * +qat_vf_save_device_data(struct qat_vf_core_device *qat_vdev, bool pre_copy) +{ + struct qat_vf_migration_file *migf; + int ret; + + migf = kzalloc(sizeof(*migf), GFP_KERNEL); + if (!migf) + return ERR_PTR(-ENOMEM); + + migf->filp = anon_inode_getfile("qat_vf_mig", &qat_vf_save_fops, + migf, O_RDONLY); + ret = PTR_ERR_OR_ZERO(migf->filp); + if (ret) { + kfree(migf); + return ERR_PTR(ret); + } + + stream_open(migf->filp->f_inode, migf->filp); + mutex_init(&migf->lock); + + if (pre_copy) + ret = qat_vf_save_setup(qat_vdev, migf); + else + ret = qat_vf_save_state(qat_vdev, migf); + if (ret) { + fput(migf->filp); + return ERR_PTR(ret); + } + + migf->qat_vdev = qat_vdev; + + return migf; +} + +static ssize_t qat_vf_resume_write(struct file *filp, const char __user *buf, + size_t len, loff_t *pos) +{ + struct qat_vf_migration_file *migf = filp->private_data; + struct qat_mig_dev *mig_dev = migf->qat_vdev->mdev; + loff_t end, *offs; + ssize_t done = 0; + int ret; + + if (pos) + return -ESPIPE; + offs = &filp->f_pos; + + if (*offs < 0 || + check_add_overflow((loff_t)len, *offs, &end)) + return -EOVERFLOW; + + if (end > mig_dev->state_size) + return -ENOMEM; + + mutex_lock(&migf->lock); + if (migf->disabled) { + done = -ENODEV; + goto out_unlock; + } + + ret = copy_from_user(mig_dev->state + *offs, buf, len); + if (ret) { + done = -EFAULT; + goto out_unlock; + } + *offs += len; + migf->filled_size += len; + + /* + * Load the pre-configured device data first to check if the target + * device is compatible with the source device. + */ + ret = qat_vfmig_load_setup(mig_dev, migf->filled_size); + if (ret && ret != -EAGAIN) { + done = ret; + goto out_unlock; + } + done = len; + +out_unlock: + mutex_unlock(&migf->lock); + return done; +} + +static const struct file_operations qat_vf_resume_fops = { + .owner = THIS_MODULE, + .write = qat_vf_resume_write, + .release = qat_vf_release_file, + .llseek = no_llseek, +}; + +static struct qat_vf_migration_file * +qat_vf_resume_device_data(struct qat_vf_core_device *qat_vdev) +{ + struct qat_vf_migration_file *migf; + int ret; + + migf = kzalloc(sizeof(*migf), GFP_KERNEL); + if (!migf) + return ERR_PTR(-ENOMEM); + + migf->filp = anon_inode_getfile("qat_vf_mig", &qat_vf_resume_fops, migf, O_WRONLY); + ret = PTR_ERR_OR_ZERO(migf->filp); + if (ret) { + kfree(migf); + return ERR_PTR(ret); + } + + migf->qat_vdev = qat_vdev; + migf->filled_size = 0; + stream_open(migf->filp->f_inode, migf->filp); + mutex_init(&migf->lock); + + return migf; +} + +static int qat_vf_load_device_data(struct qat_vf_core_device *qat_vdev) +{ + return qat_vfmig_load_state(qat_vdev->mdev); +} + +static struct file *qat_vf_pci_step_device_state(struct qat_vf_core_device *qat_vdev, u32 new) +{ + u32 cur = qat_vdev->mig_state; + int ret; + + /* + * As the device is not capable of just stopping P2P DMAs, suspend the + * device completely once any of the P2P states are reached. + * When it is suspended, all its MMIO registers can still be operated + * correctly, jobs submitted through ring are queued while no jobs are + * processed by the device. The MMIO states can be safely migrated to + * the target VF during stop-copy stage and restored correctly in the + * target VF. All queued jobs can be resumed then. + */ + if ((cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_RUNNING_P2P) || + (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_PRE_COPY_P2P)) { + ret = qat_vfmig_suspend(qat_vdev->mdev); + if (ret) + return ERR_PTR(ret); + return NULL; + } + + if ((cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_RUNNING) || + (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_PRE_COPY)) { + qat_vfmig_resume(qat_vdev->mdev); + return NULL; + } + + if ((cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_STOP) || + (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING_P2P)) + return NULL; + + if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_STOP_COPY) { + struct qat_vf_migration_file *migf; + + migf = qat_vf_save_device_data(qat_vdev, false); + if (IS_ERR(migf)) + return ERR_CAST(migf); + get_file(migf->filp); + qat_vdev->saving_migf = migf; + return migf->filp; + } + + if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RESUMING) { + struct qat_vf_migration_file *migf; + + migf = qat_vf_resume_device_data(qat_vdev); + if (IS_ERR(migf)) + return ERR_CAST(migf); + get_file(migf->filp); + qat_vdev->resuming_migf = migf; + return migf->filp; + } + + if ((cur == VFIO_DEVICE_STATE_STOP_COPY && new == VFIO_DEVICE_STATE_STOP) || + (cur == VFIO_DEVICE_STATE_PRE_COPY && new == VFIO_DEVICE_STATE_RUNNING) || + (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_RUNNING_P2P)) { + qat_vf_disable_fds(qat_vdev); + return NULL; + } + + if ((cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_PRE_COPY) || + (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_PRE_COPY_P2P)) { + struct qat_vf_migration_file *migf; + + migf = qat_vf_save_device_data(qat_vdev, true); + if (IS_ERR(migf)) + return ERR_CAST(migf); + get_file(migf->filp); + qat_vdev->saving_migf = migf; + return migf->filp; + } + + if (cur == VFIO_DEVICE_STATE_PRE_COPY_P2P && new == VFIO_DEVICE_STATE_STOP_COPY) { + struct qat_vf_migration_file *migf = qat_vdev->saving_migf; + + if (!migf) + return ERR_PTR(-EINVAL); + ret = qat_vf_save_state(qat_vdev, migf); + if (ret) + return ERR_PTR(ret); + return NULL; + } + + if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) { + ret = qat_vf_load_device_data(qat_vdev); + if (ret) + return ERR_PTR(ret); + + qat_vf_disable_fds(qat_vdev); + return NULL; + } + + /* vfio_mig_get_next_state() does not use arcs other than the above */ + WARN_ON(true); + return ERR_PTR(-EINVAL); +} + +static void qat_vf_reset_done(struct qat_vf_core_device *qat_vdev) +{ + qat_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING; + qat_vfmig_reset(qat_vdev->mdev); + qat_vf_disable_fds(qat_vdev); +} + +static struct file *qat_vf_pci_set_device_state(struct vfio_device *vdev, + enum vfio_device_mig_state new_state) +{ + struct qat_vf_core_device *qat_vdev = container_of(vdev, + struct qat_vf_core_device, core_device.vdev); + enum vfio_device_mig_state next_state; + struct file *res = NULL; + int ret; + + mutex_lock(&qat_vdev->state_mutex); + while (new_state != qat_vdev->mig_state) { + ret = vfio_mig_get_next_state(vdev, qat_vdev->mig_state, + new_state, &next_state); + if (ret) { + res = ERR_PTR(ret); + break; + } + res = qat_vf_pci_step_device_state(qat_vdev, next_state); + if (IS_ERR(res)) + break; + qat_vdev->mig_state = next_state; + if (WARN_ON(res && new_state != qat_vdev->mig_state)) { + fput(res); + res = ERR_PTR(-EINVAL); + break; + } + } + mutex_unlock(&qat_vdev->state_mutex); + + return res; +} + +static int qat_vf_pci_get_device_state(struct vfio_device *vdev, + enum vfio_device_mig_state *curr_state) +{ + struct qat_vf_core_device *qat_vdev = container_of(vdev, + struct qat_vf_core_device, core_device.vdev); + + mutex_lock(&qat_vdev->state_mutex); + *curr_state = qat_vdev->mig_state; + mutex_unlock(&qat_vdev->state_mutex); + + return 0; +} + +static int qat_vf_pci_get_data_size(struct vfio_device *vdev, + unsigned long *stop_copy_length) +{ + struct qat_vf_core_device *qat_vdev = container_of(vdev, + struct qat_vf_core_device, core_device.vdev); + + mutex_lock(&qat_vdev->state_mutex); + *stop_copy_length = qat_vdev->mdev->state_size; + mutex_unlock(&qat_vdev->state_mutex); + + return 0; +} + +static const struct vfio_migration_ops qat_vf_pci_mig_ops = { + .migration_set_state = qat_vf_pci_set_device_state, + .migration_get_state = qat_vf_pci_get_device_state, + .migration_get_data_size = qat_vf_pci_get_data_size, +}; + +static void qat_vf_pci_release_dev(struct vfio_device *core_vdev) +{ + struct qat_vf_core_device *qat_vdev = container_of(core_vdev, + struct qat_vf_core_device, core_device.vdev); + + qat_vfmig_cleanup(qat_vdev->mdev); + qat_vfmig_destroy(qat_vdev->mdev); + mutex_destroy(&qat_vdev->state_mutex); + vfio_pci_core_release_dev(core_vdev); +} + +static int qat_vf_pci_init_dev(struct vfio_device *core_vdev) +{ + struct qat_vf_core_device *qat_vdev = container_of(core_vdev, + struct qat_vf_core_device, core_device.vdev); + struct qat_mig_dev *mdev; + struct pci_dev *parent; + int ret, vf_id; + + core_vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P | + VFIO_MIGRATION_PRE_COPY; + core_vdev->mig_ops = &qat_vf_pci_mig_ops; + + ret = vfio_pci_core_init_dev(core_vdev); + if (ret) + return ret; + + mutex_init(&qat_vdev->state_mutex); + + parent = pci_physfn(qat_vdev->core_device.pdev); + vf_id = pci_iov_vf_id(qat_vdev->core_device.pdev); + if (vf_id < 0) { + ret = -ENODEV; + goto err_rel; + } + + mdev = qat_vfmig_create(parent, vf_id); + if (IS_ERR(mdev)) { + ret = PTR_ERR(mdev); + goto err_rel; + } + + ret = qat_vfmig_init(mdev); + if (ret) + goto err_destroy; + + qat_vdev->mdev = mdev; + + return 0; + +err_destroy: + qat_vfmig_destroy(mdev); +err_rel: + vfio_pci_core_release_dev(core_vdev); + return ret; +} + +static const struct vfio_device_ops qat_vf_pci_ops = { + .name = "qat-vf-vfio-pci", + .init = qat_vf_pci_init_dev, + .release = qat_vf_pci_release_dev, + .open_device = qat_vf_pci_open_device, + .close_device = qat_vf_pci_close_device, + .ioctl = vfio_pci_core_ioctl, + .read = vfio_pci_core_read, + .write = vfio_pci_core_write, + .mmap = vfio_pci_core_mmap, + .request = vfio_pci_core_request, + .match = vfio_pci_core_match, + .bind_iommufd = vfio_iommufd_physical_bind, + .unbind_iommufd = vfio_iommufd_physical_unbind, + .attach_ioas = vfio_iommufd_physical_attach_ioas, + .detach_ioas = vfio_iommufd_physical_detach_ioas, +}; + +static struct qat_vf_core_device *qat_vf_drvdata(struct pci_dev *pdev) +{ + struct vfio_pci_core_device *core_device = pci_get_drvdata(pdev); + + return container_of(core_device, struct qat_vf_core_device, core_device); +} + +static void qat_vf_pci_aer_reset_done(struct pci_dev *pdev) +{ + struct qat_vf_core_device *qat_vdev = qat_vf_drvdata(pdev); + + if (!qat_vdev->mdev) + return; + + mutex_lock(&qat_vdev->state_mutex); + qat_vf_reset_done(qat_vdev); + mutex_unlock(&qat_vdev->state_mutex); +} + +static int +qat_vf_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct qat_vf_core_device *qat_vdev; + int ret; + + qat_vdev = vfio_alloc_device(qat_vf_core_device, core_device.vdev, dev, &qat_vf_pci_ops); + if (IS_ERR(qat_vdev)) + return PTR_ERR(qat_vdev); + + pci_set_drvdata(pdev, &qat_vdev->core_device); + ret = vfio_pci_core_register_device(&qat_vdev->core_device); + if (ret) + goto out_put_device; + + return 0; + +out_put_device: + vfio_put_device(&qat_vdev->core_device.vdev); + return ret; +} + +static void qat_vf_vfio_pci_remove(struct pci_dev *pdev) +{ + struct qat_vf_core_device *qat_vdev = qat_vf_drvdata(pdev); + + vfio_pci_core_unregister_device(&qat_vdev->core_device); + vfio_put_device(&qat_vdev->core_device.vdev); +} + +static const struct pci_device_id qat_vf_vfio_pci_table[] = { + /* Intel QAT GEN4 4xxx VF device */ + { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4941) }, + { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4943) }, + { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_INTEL, 0x4945) }, + {} +}; +MODULE_DEVICE_TABLE(pci, qat_vf_vfio_pci_table); + +static const struct pci_error_handlers qat_vf_err_handlers = { + .reset_done = qat_vf_pci_aer_reset_done, + .error_detected = vfio_pci_core_aer_err_detected, +}; + +static struct pci_driver qat_vf_vfio_pci_driver = { + .name = "qat_vfio_pci", + .id_table = qat_vf_vfio_pci_table, + .probe = qat_vf_vfio_pci_probe, + .remove = qat_vf_vfio_pci_remove, + .err_handler = &qat_vf_err_handlers, + .driver_managed_dma = true, +}; +module_pci_driver(qat_vf_vfio_pci_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Xin Zeng "); +MODULE_DESCRIPTION("QAT VFIO PCI - VFIO PCI driver with live migration support for Intel(R) QAT GEN4 device family"); +MODULE_IMPORT_NS(CRYPTO_QAT); -- Gitee From fc7fdd9bdef8b0415b219b032ad92b36dcb2adec Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 25 Mar 2024 14:44:55 -0600 Subject: [PATCH 1033/2138] crypto: qat - Avoid -Wflex-array-member-not-at-end warnings ANBZ: #9185 commit 140e4c85d54045ecd67f1d50fdad0fe2ecc088eb upstream. Intel-SIG: commit 140e4c85d540 crypto: qat - Avoid -Wflex-array-member-not-at-end warnings Backport to support Intel QAT live migration for in-tree driver -Wflex-array-member-not-at-end is coming in GCC-14, and we are getting ready to enable it globally. Use the `__struct_group()` helper to separate the flexible array from the rest of the members in flexible `struct qat_alg_buf_list`, through tagged `struct qat_alg_buf_list_hdr`, and avoid embedding the flexible-array member in the middle of `struct qat_alg_fixed_buf_list`. Also, use `container_of()` whenever we need to retrieve a pointer to the flexible structure. So, with these changes, fix the following warnings: drivers/crypto/intel/qat/qat_common/qat_bl.h:25:33: warning: structure containing a flexible array member is not at the end of another structure [-Wflex-array-member-not-at-end] drivers/crypto/intel/qat/qat_common/qat_bl.h:25:33: warning: structure containing a flexible array member is not at the end of another structure [-Wflex-array-member-not-at-end] drivers/crypto/intel/qat/qat_common/qat_bl.h:25:33: warning: structure containing a flexible array member is not at the end of another structure [-Wflex-array-member-not-at-end] drivers/crypto/intel/qat/qat_common/qat_bl.h:25:33: warning: structure containing a flexible array member is not at the end of another structure [-Wflex-array-member-not-at-end] drivers/crypto/intel/qat/qat_common/qat_bl.h:25:33: warning: structure containing a flexible array member is not at the end of another structure [-Wflex-array-member-not-at-end] drivers/crypto/intel/qat/qat_common/qat_bl.h:25:33: warning: structure containing a flexible array member is not at the end of another structure [-Wflex-array-member-not-at-end] drivers/crypto/intel/qat/qat_common/qat_bl.h:25:33: warning: structure containing a flexible array member is not at the end of another structure [-Wflex-array-member-not-at-end] drivers/crypto/intel/qat/qat_common/qat_bl.h:25:33: warning: structure containing a flexible array member is not at the end of another structure [-Wflex-array-member-not-at-end] Link: https://github.com/KSPP/linux/issues/202 Signed-off-by: Gustavo A. R. Silva Acked-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/crypto/intel/qat/qat_common/qat_bl.c | 6 ++++-- drivers/crypto/intel/qat/qat_common/qat_bl.h | 11 +++++++---- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.c b/drivers/crypto/intel/qat/qat_common/qat_bl.c index 76baed0a76c0..338acf29c487 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_bl.c +++ b/drivers/crypto/intel/qat/qat_common/qat_bl.c @@ -81,7 +81,8 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev, if (unlikely(!bufl)) return -ENOMEM; } else { - bufl = &buf->sgl_src.sgl_hdr; + bufl = container_of(&buf->sgl_src.sgl_hdr, + struct qat_alg_buf_list, hdr); memset(bufl, 0, sizeof(struct qat_alg_buf_list)); buf->sgl_src_valid = true; } @@ -139,7 +140,8 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev, if (unlikely(!buflout)) goto err_in; } else { - buflout = &buf->sgl_dst.sgl_hdr; + buflout = container_of(&buf->sgl_dst.sgl_hdr, + struct qat_alg_buf_list, hdr); memset(buflout, 0, sizeof(struct qat_alg_buf_list)); buf->sgl_dst_valid = true; } diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.h b/drivers/crypto/intel/qat/qat_common/qat_bl.h index d87e4f35ac39..85bc32a9ec0e 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_bl.h +++ b/drivers/crypto/intel/qat/qat_common/qat_bl.h @@ -15,14 +15,17 @@ struct qat_alg_buf { } __packed; struct qat_alg_buf_list { - u64 resrvd; - u32 num_bufs; - u32 num_mapped_bufs; + /* New members must be added within the __struct_group() macro below. */ + __struct_group(qat_alg_buf_list_hdr, hdr, __packed, + u64 resrvd; + u32 num_bufs; + u32 num_mapped_bufs; + ); struct qat_alg_buf buffers[]; } __packed; struct qat_alg_fixed_buf_list { - struct qat_alg_buf_list sgl_hdr; + struct qat_alg_buf_list_hdr sgl_hdr; struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC]; } __packed __aligned(64); -- Gitee From 44aa583b70a5d9f4ae6e914deb480a43eb6af7a8 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 2 Apr 2024 09:13:55 +0100 Subject: [PATCH 1034/2138] crypto: qat - Fix spelling mistake "Invalide" -> "Invalid" ANBZ: #9185 commit f5c2cf9d14be283e5240c04d03ad96577d55f9f4 upstream. Intel-SIG: commit f5c2cf9d14be crypto: qat - Fix spelling mistake "Invalide" -> "Invalid" Backport to support Intel QAT live migration for in-tree driver There is a spelling mistake in a dev_err message. Fix it. Signed-off-by: Colin Ian King Acked-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c index 78a39cfe196f..a62eb5e8dbe6 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.c @@ -976,7 +976,7 @@ static int adf_gen4_vfmig_load_setup(struct qat_mig_dev *mdev, int len) ret = adf_mstate_mgr_init_from_remote(vfmig->mstate_mgr, mdev->state, setup_size, NULL, NULL); if (ret) { - dev_err(&GET_DEV(accel_dev), "Invalide setup for vf_nr %d\n", + dev_err(&GET_DEV(accel_dev), "Invalid setup for vf_nr %d\n", vf_nr); return ret; } -- Gitee From a6b1cc51501407b87f32e650cd517b7746341053 Mon Sep 17 00:00:00 2001 From: Damian Muszynski Date: Thu, 11 Apr 2024 11:24:58 +0200 Subject: [PATCH 1035/2138] crypto: qat - implement dh fallback for primes > 4K ANBZ: #9185 commit 5d5bd24f415516b212d56e8a66fffd40cdaeab30 upstream. Intel-SIG: commit 5d5bd24f4155 crypto: qat - implement dh fallback for primes > 4K Backport to support Intel QAT live migration for in-tree driver The Intel QAT driver provides support for the Diffie-Hellman (DH) algorithm, limited to prime numbers up to 4K. This driver is used by default on platforms with integrated QAT hardware for all DH requests. This has led to failures with algorithms requiring larger prime sizes, such as ffdhe6144. alg: ffdhe6144(dh): test failed on vector 1, err=-22 alg: self-tests for ffdhe6144(qat-dh) (ffdhe6144(dh)) failed (rc=-22) Implement a fallback mechanism when an unsupported request is received. Signed-off-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- .../intel/qat/qat_common/qat_asym_algs.c | 66 +++++++++++++++++-- 1 file changed, 60 insertions(+), 6 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c b/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c index 4128200a9032..85c682e248fb 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c +++ b/drivers/crypto/intel/qat/qat_common/qat_asym_algs.c @@ -110,6 +110,8 @@ struct qat_dh_ctx { unsigned int p_size; bool g2; struct qat_crypto_instance *inst; + struct crypto_kpp *ftfm; + bool fallback; } __packed __aligned(64); struct qat_asym_request { @@ -381,6 +383,36 @@ static int qat_dh_compute_value(struct kpp_request *req) return ret; } +static int qat_dh_generate_public_key(struct kpp_request *req) +{ + struct kpp_request *nreq = kpp_request_ctx(req); + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); + + if (ctx->fallback) { + memcpy(nreq, req, sizeof(*req)); + kpp_request_set_tfm(nreq, ctx->ftfm); + return crypto_kpp_generate_public_key(nreq); + } + + return qat_dh_compute_value(req); +} + +static int qat_dh_compute_shared_secret(struct kpp_request *req) +{ + struct kpp_request *nreq = kpp_request_ctx(req); + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); + + if (ctx->fallback) { + memcpy(nreq, req, sizeof(*req)); + kpp_request_set_tfm(nreq, ctx->ftfm); + return crypto_kpp_compute_shared_secret(nreq); + } + + return qat_dh_compute_value(req); +} + static int qat_dh_check_params_length(unsigned int p_len) { switch (p_len) { @@ -398,9 +430,6 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params) struct qat_crypto_instance *inst = ctx->inst; struct device *dev = &GET_DEV(inst->accel_dev); - if (qat_dh_check_params_length(params->p_size << 3)) - return -EINVAL; - ctx->p_size = params->p_size; ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL); if (!ctx->p) @@ -454,6 +483,13 @@ static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf, if (crypto_dh_decode_key(buf, len, ¶ms) < 0) return -EINVAL; + if (qat_dh_check_params_length(params.p_size << 3)) { + ctx->fallback = true; + return crypto_kpp_set_secret(ctx->ftfm, buf, len); + } + + ctx->fallback = false; + /* Free old secret if any */ qat_dh_clear_ctx(dev, ctx); @@ -481,6 +517,9 @@ static unsigned int qat_dh_max_size(struct crypto_kpp *tfm) { struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); + if (ctx->fallback) + return crypto_kpp_maxsize(ctx->ftfm); + return ctx->p_size; } @@ -489,11 +528,22 @@ static int qat_dh_init_tfm(struct crypto_kpp *tfm) struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); struct qat_crypto_instance *inst = qat_crypto_get_instance_node(numa_node_id()); + const char *alg = kpp_alg_name(tfm); + unsigned int reqsize; if (!inst) return -EINVAL; - kpp_set_reqsize(tfm, sizeof(struct qat_asym_request) + 64); + ctx->ftfm = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->ftfm)) + return PTR_ERR(ctx->ftfm); + + crypto_kpp_set_flags(ctx->ftfm, crypto_kpp_get_flags(tfm)); + + reqsize = max(sizeof(struct qat_asym_request) + 64, + sizeof(struct kpp_request) + crypto_kpp_reqsize(ctx->ftfm)); + + kpp_set_reqsize(tfm, reqsize); ctx->p_size = 0; ctx->g2 = false; @@ -506,6 +556,9 @@ static void qat_dh_exit_tfm(struct crypto_kpp *tfm) struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm); struct device *dev = &GET_DEV(ctx->inst->accel_dev); + if (ctx->ftfm) + crypto_free_kpp(ctx->ftfm); + qat_dh_clear_ctx(dev, ctx); qat_crypto_put_instance(ctx->inst); } @@ -1265,8 +1318,8 @@ static struct akcipher_alg rsa = { static struct kpp_alg dh = { .set_secret = qat_dh_set_secret, - .generate_public_key = qat_dh_compute_value, - .compute_shared_secret = qat_dh_compute_value, + .generate_public_key = qat_dh_generate_public_key, + .compute_shared_secret = qat_dh_compute_shared_secret, .max_size = qat_dh_max_size, .init = qat_dh_init_tfm, .exit = qat_dh_exit_tfm, @@ -1276,6 +1329,7 @@ static struct kpp_alg dh = { .cra_priority = 1000, .cra_module = THIS_MODULE, .cra_ctxsize = sizeof(struct qat_dh_ctx), + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, }; -- Gitee From 22d91f9b98135491d6f8bb89ba01b0b7a5def5c5 Mon Sep 17 00:00:00 2001 From: Adam Guerin Date: Fri, 12 Apr 2024 13:24:02 +0100 Subject: [PATCH 1036/2138] crypto: qat - improve error message in adf_get_arbiter_mapping() ANBZ: #9185 commit 4a4fc6c0c7fe29f2538013a57ebd7813ec6c12a8 upstream. Intel-SIG: commit 4a4fc6c0c7fe crypto: qat - improve error message in adf_get_arbiter_mapping() Backport to support Intel QAT live migration for in-tree driver Improve error message to be more readable. Fixes: 5da6a2d5353e ("crypto: qat - generate dynamically arbiter mappings") Signed-off-by: Adam Guerin Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c | 2 +- drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c index d255cb3ebd9c..78f0ea49254d 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -298,7 +298,7 @@ static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev) { if (adf_gen4_init_thd2arb_map(accel_dev)) dev_warn(&GET_DEV(accel_dev), - "Generate of the thread to arbiter map failed"); + "Failed to generate thread to arbiter mapping"); return GET_HW_DATA(accel_dev)->thd_to_arb_map; } diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index 7d48985c99d9..bbd92c017c28 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -210,7 +210,7 @@ static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev) { if (adf_gen4_init_thd2arb_map(accel_dev)) dev_warn(&GET_DEV(accel_dev), - "Generate of the thread to arbiter map failed"); + "Failed to generate thread to arbiter mapping"); return GET_HW_DATA(accel_dev)->thd_to_arb_map; } -- Gitee From 1809cb7b24ad00c2a47cda030d4410ad0545e99f Mon Sep 17 00:00:00 2001 From: Adam Guerin Date: Fri, 12 Apr 2024 13:24:03 +0100 Subject: [PATCH 1037/2138] crypto: qat - improve error logging to be consistent across features ANBZ: #9185 commit d281a28bd2a94d72c440457e05a2f04a52f15947 upstream. Intel-SIG: commit d281a28bd2a9 crypto: qat - improve error logging to be consistent across features Backport to support Intel QAT live migration for in-tree driver Improve error logging in rate limiting feature. Staying consistent with the error logging found in the telemetry feature. Fixes: d9fb8408376e ("crypto: qat - add rate limiting feature to qat_4xxx") Signed-off-by: Adam Guerin Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- drivers/crypto/intel/qat/qat_common/adf_rl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c index 65f752f4792a..346ef8bee99d 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_rl.c +++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c @@ -1125,7 +1125,7 @@ int adf_rl_start(struct adf_accel_dev *accel_dev) } if ((fw_caps & RL_CAPABILITY_MASK) != RL_CAPABILITY_VALUE) { - dev_info(&GET_DEV(accel_dev), "not supported\n"); + dev_info(&GET_DEV(accel_dev), "feature not supported by FW\n"); ret = -EOPNOTSUPP; goto ret_free; } -- Gitee From 91bc6993dc4931ee2575be4e9bfb4e5594356031 Mon Sep 17 00:00:00 2001 From: Lucas Segarra Fernandez Date: Tue, 16 Apr 2024 12:33:37 +0200 Subject: [PATCH 1038/2138] crypto: qat - validate slices count returned by FW ANBZ: #9185 commit 483fd65ce29317044d1d00757e3fd23503b6b04c upstream. Intel-SIG: commit 483fd65ce293 crypto: qat - validate slices count returned by FW Backport to support Intel QAT live migration for in-tree driver The function adf_send_admin_tl_start() enables the telemetry (TL) feature on a QAT device by sending the ICP_QAT_FW_TL_START message to the firmware. This triggers the FW to start writing TL data to a DMA buffer in memory and returns an array containing the number of accelerators of each type (slices) supported by this HW. The pointer to this array is stored in the adf_tl_hw_data data structure called slice_cnt. The array slice_cnt is then used in the function tl_print_dev_data() to report in debugfs only statistics about the supported accelerators. An incorrect value of the elements in slice_cnt might lead to an out of bounds memory read. At the moment, there isn't an implementation of FW that returns a wrong value, but for robustness validate the slice count array returned by FW. Fixes: 69e7649f7cc2 ("crypto: qat - add support for device telemetry") Signed-off-by: Lucas Segarra Fernandez Reviewed-by: Damian Muszynski Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- .../crypto/intel/qat/qat_common/adf_gen4_tl.c | 1 + .../intel/qat/qat_common/adf_telemetry.c | 21 +++++++++++++++++++ .../intel/qat/qat_common/adf_telemetry.h | 1 + 3 files changed, 23 insertions(+) diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c index 7fc7a77f6aed..c7ad8cf07863 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c @@ -149,5 +149,6 @@ void adf_gen4_init_tl_data(struct adf_tl_hw_data *tl_data) tl_data->sl_exec_counters = sl_exec_counters; tl_data->rp_counters = rp_counters; tl_data->num_rp_counters = ARRAY_SIZE(rp_counters); + tl_data->max_sl_cnt = ADF_GEN4_TL_MAX_SLICES_PER_TYPE; } EXPORT_SYMBOL_GPL(adf_gen4_init_tl_data); diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c index 2ff714d11bd2..74fb0c2ed241 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c +++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c @@ -41,6 +41,20 @@ static int validate_tl_data(struct adf_tl_hw_data *tl_data) return 0; } +static int validate_tl_slice_counters(struct icp_qat_fw_init_admin_slice_cnt *slice_count, + u8 max_slices_per_type) +{ + u8 *sl_counter = (u8 *)slice_count; + int i; + + for (i = 0; i < ADF_TL_SL_CNT_COUNT; i++) { + if (sl_counter[i] > max_slices_per_type) + return -EINVAL; + } + + return 0; +} + static int adf_tl_alloc_mem(struct adf_accel_dev *accel_dev) { struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); @@ -214,6 +228,13 @@ int adf_tl_run(struct adf_accel_dev *accel_dev, int state) return ret; } + ret = validate_tl_slice_counters(&telemetry->slice_cnt, tl_data->max_sl_cnt); + if (ret) { + dev_err(dev, "invalid value returned by FW\n"); + adf_send_admin_tl_stop(accel_dev); + return ret; + } + telemetry->hbuffs = state; atomic_set(&telemetry->state, state); diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h index 9be81cd3b886..e54a406cc1b4 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h +++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h @@ -40,6 +40,7 @@ struct adf_tl_hw_data { u8 num_dev_counters; u8 num_rp_counters; u8 max_rp; + u8 max_sl_cnt; }; struct adf_telemetry { -- Gitee From 743c1c33c379546d7ffc15aec6802ff6c3a2dd4b Mon Sep 17 00:00:00 2001 From: Aichun Shi Date: Mon, 27 May 2024 21:28:21 +0800 Subject: [PATCH 1039/2138] x86: configs: Add kernel config to support Intel QAT live migration ANBZ: #9185 Intel-SIG: no upstream x86: configs: Add kernel config to support Intel QAT live migration Backport to support Intel QAT live migration for in-tree driver Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3290 --- arch/x86/configs/anolis-debug_defconfig | 1 + arch/x86/configs/anolis_defconfig | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig index ef3c7817cadf..7d728ee1318a 100644 --- a/arch/x86/configs/anolis-debug_defconfig +++ b/arch/x86/configs/anolis-debug_defconfig @@ -5764,6 +5764,7 @@ CONFIG_VFIO_PCI=m # CONFIG_VFIO_PCI_VGA is not set # CONFIG_VFIO_PCI_IGD is not set # CONFIG_MLX5_VFIO_PCI is not set +CONFIG_QAT_VFIO_PCI=m # end of VFIO support for PCI devices CONFIG_VFIO_MDEV=m diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig index 24234079ecce..38730def197a 100644 --- a/arch/x86/configs/anolis_defconfig +++ b/arch/x86/configs/anolis_defconfig @@ -5761,6 +5761,7 @@ CONFIG_VFIO_PCI=m # CONFIG_VFIO_PCI_VGA is not set # CONFIG_VFIO_PCI_IGD is not set # CONFIG_MLX5_VFIO_PCI is not set +CONFIG_QAT_VFIO_PCI=m # end of VFIO support for PCI devices CONFIG_VFIO_MDEV=m -- Gitee From 568ce3570ab240eec4d3ccc6da1b6890e3e02574 Mon Sep 17 00:00:00 2001 From: Xiongfeng Wang Date: Mon, 8 Apr 2024 20:38:12 +0800 Subject: [PATCH 1040/2138] firmware: arm_sdei: Move sdei_cpuhp_up/down() before lockup_detector_online_cpu() ANBZ: #9355 commit 3f1e2f7315ac8f64a5c2d1e6904bb34ebfb140fa openeuler hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I9EYSX -------------------------------- commit 58c81b6ed03f ("firmware: arm_sdei: Fix sleep from invalid context BUG") move sdei_cpuhp_up/down() after lockup_detector_online_cpu(). sdei_watchdog is enabled in lockup_detector_online_cpu(). It fails because it is enabled before sdei_cpuhp_up(). This commit move sdei_cpuhp_up() before lockup_detector_online_cpu(). Signed-off-by: Xiongfeng Wang Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/3386 --- drivers/firmware/arm_sdei.c | 21 +++++++-------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 8 insertions(+), 14 deletions(-) diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index 7f178a4194a5..fe638e40aebb 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -43,8 +43,6 @@ static asmlinkage void (*sdei_firmware_call)(unsigned long function_id, /* entry point from firmware to arch asm code */ static unsigned long sdei_entry_point; -static int sdei_hp_state; - struct sdei_event { /* These three are protected by the sdei_list_lock */ struct list_head list; @@ -785,7 +783,7 @@ static int sdei_device_freeze(struct device *dev) int err; /* unregister private events */ - cpuhp_remove_state(sdei_hp_state); + cpuhp_remove_state(CPUHP_AP_ARM_SDEI_ONLINE); err = sdei_unregister_shared(); if (err) @@ -806,15 +804,12 @@ static int sdei_device_thaw(struct device *dev) return err; } - err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI", + err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_ONLINE, "SDEI", &sdei_cpuhp_up, &sdei_cpuhp_down); - if (err < 0) { + if (err) pr_warn("Failed to re-register CPU hotplug notifier...\n"); - return err; - } - sdei_hp_state = err; - return 0; + return err; } static int sdei_device_restore(struct device *dev) @@ -846,7 +841,7 @@ static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action, * We are going to reset the interface, after this there is no point * doing work when we take CPUs offline. */ - cpuhp_remove_state(sdei_hp_state); + cpuhp_remove_state(CPUHP_AP_ARM_SDEI_ONLINE); sdei_platform_reset(); @@ -1026,15 +1021,13 @@ static int sdei_probe(struct platform_device *pdev) goto remove_cpupm; } - err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI", + err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_ONLINE, "SDEI", &sdei_cpuhp_up, &sdei_cpuhp_down); - if (err < 0) { + if (err) { pr_warn("Failed to register CPU hotplug notifier...\n"); goto remove_reboot; } - sdei_hp_state = err; - return 0; remove_reboot: diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 624d4a38c358..44f1e762b1ec 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -245,6 +245,7 @@ enum cpuhp_state { CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE, CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE, CPUHP_AP_PERF_CSKY_ONLINE, + CPUHP_AP_ARM_SDEI_ONLINE, CPUHP_AP_WATCHDOG_ONLINE, CPUHP_AP_WORKQUEUE_ONLINE, CPUHP_AP_RANDOM_ONLINE, -- Gitee From 4d5d6a39b6655c04ed28541668c7d5e18916bce7 Mon Sep 17 00:00:00 2001 From: Yicong Yang Date: Thu, 28 Mar 2024 11:00:55 +0800 Subject: [PATCH 1041/2138] watchdog: Fix call trace when failed to initialize sdei ANBZ: #9355 commit 20c2368e0195aae332176096336105c470d48f15 openeuler kunpeng inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I9KE9N CVE: NA ---------------------------------------------------------------------- 1509d06c9c41 ("init: only move down lockup_detector_init() when sdei_watchdog is enabled") In the above commit, sdei_watchdog needs to move down lockup_detector_init (), while nmi_watchdog does not. So when sdei_watchdog fails to be initialized, nmi_watchdog should not be initialized. [ 0.706631][ T1] SDEI NMI watchdog: Disable SDEI NMI Watchdog in VM [ 0.707405][ T1] ------------[ cut here ]------------ [ 0.708020][ T1] WARNING: CPU: 0 PID: 1 at kernel/watchdog_perf.c:117 hardlockup_detector_event_create+0x24/0x108 [ 0.709230][ T1] Modules linked in: [ 0.709665][ T1] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 6.6.0 #1 [ 0.710700][ T1] Hardware name: QEMU KVM Virtual Machine, BIOS 0.0.0 02/06/2015 [ 0.711625][ T1] pstate: 00400005 (nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) [ 0.712547][ T1] pc : hardlockup_detector_event_create+0x24/0x108 [ 0.713316][ T1] lr : watchdog_hardlockup_probe+0x28/0xa8 [ 0.714010][ T1] sp : ffff8000831cbdc0 [ 0.714501][ T1] pmr_save: 000000e0 [ 0.714957][ T1] x29: ffff8000831cbdc0 x28: 0000000000000000 x27: 0000000000000000 [ 0.715899][ T1] x26: 0000000000000000 x25: 0000000000000000 x24: 0000000000000000 [ 0.716839][ T1] x23: 0000000000000000 x22: 0000000000000000 x21: ffff80008218fab0 [ 0.717775][ T1] x20: ffff8000821af000 x19: ffff0000c0261900 x18: 0000000000000020 [ 0.718713][ T1] x17: 00000000cb551c45 x16: ffff800082625e48 x15: ffffffffffffffff [ 0.719663][ T1] x14: 0000000000000000 x13: 205d315420202020 x12: 5b5d313336363037 [ 0.720607][ T1] x11: 00000000ffff7fff x10: 00000000ffff7fff x9 : ffff800081b5f630 [ 0.721590][ T1] x8 : 00000000000bffe8 x7 : c0000000ffff7fff x6 : 000000000005fff4 [ 0.722528][ T1] x5 : 00000000002bffa8 x4 : 0000000000000000 x3 : 0000000000000000 [ 0.723482][ T1] x2 : 0000000000000000 x1 : 0000000000000140 x0 : ffff0000c02c0000 [ 0.724426][ T1] Call trace: [ 0.724808][ T1] hardlockup_detector_event_create+0x24/0x108 [ 0.725535][ T1] watchdog_hardlockup_probe+0x28/0xa8 [ 0.726174][ T1] lockup_detector_init+0x110/0x158 [ 0.726776][ T1] kernel_init_freeable+0x208/0x288 [ 0.727387][ T1] kernel_init+0x2c/0x200 [ 0.727902][ T1] ret_from_fork+0x10/0x20 [ 0.728420][ T1] ---[ end trace 0000000000000000 ]--- Fixes: f61b11535a0b ("watchdog: Support watchdog_sdei coexist with existing watchdogs") Signed-off-by: Yicong Yang Signed-off-by: Jie Liu Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/3386 --- kernel/watchdog.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 11102420a2c7..68c0b1d8e467 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -1030,7 +1030,7 @@ void __init lockup_detector_init(void) housekeeping_cpumask(HK_TYPE_TIMER)); if ((!disable_sdei_nmi_watchdog && !sdei_watchdog_hardlockup_probe()) || - !watchdog_hardlockup_probe()) + (disable_sdei_nmi_watchdog && !watchdog_hardlockup_probe())) watchdog_hardlockup_available = true; else allow_lockup_detector_init_retry = true; -- Gitee From 0c6965e8af9fe61efcde70e628119f690bc902c7 Mon Sep 17 00:00:00 2001 From: Liao Xuan Date: Thu, 16 May 2024 03:06:42 -0400 Subject: [PATCH 1042/2138] anolis: x86/cpu: Get LLC ID for Hygon family 18h model 10h ANBZ: #9363 Get LLC ID from ApicId[3]. Signed-off-by: Liao Xuan Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3366 --- arch/x86/kernel/cpu/cacheinfo.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c index 7c4ce361c728..66c3ba277507 100644 --- a/arch/x86/kernel/cpu/cacheinfo.c +++ b/arch/x86/kernel/cpu/cacheinfo.c @@ -708,7 +708,8 @@ void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu) if (!cpuid_edx(0x80000006)) return; - if (c->x86_model < 0x5) { + if (c->x86_model < 0x5 || + (c->x86_model >= 0x10 && c->x86_model <= 0x1f)) { /* * LLC is at the core complex level. * Core complex ID is ApicId[3] for these processors. -- Gitee From 61f0ee0c3526d0c8bb06012c64aaa2e0890fa736 Mon Sep 17 00:00:00 2001 From: Liao Xuan Date: Thu, 16 May 2024 03:34:27 -0400 Subject: [PATCH 1043/2138] anolis: x86/amd_nb: Add support for Hygon family 18h model 10h ANBZ: #9363 Add root and DF F1/F3/F4 device IDs for Hygon family 18h model 10h processors. Signed-off-by: Liao Xuan Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3366 --- arch/x86/kernel/amd_nb.c | 5 +++++ include/linux/pci_ids.h | 1 + 2 files changed, 6 insertions(+) diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index e7dbd486ef16..34688619e2f0 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -46,9 +46,11 @@ #define PCI_DEVICE_ID_AMD_MI200_DF_F4 0x14d4 #define PCI_DEVICE_ID_HYGON_18H_M05H_ROOT 0x14a0 +#define PCI_DEVICE_ID_HYGON_18H_M10H_ROOT 0x14c0 #define PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1 0x1491 #define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F1 0x14b1 #define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F4 0x14b4 +#define PCI_DEVICE_ID_HYGON_18H_M10H_DF_F4 0x14d4 #define PCI_DEVICE_ID_HYGON_18H_M06H_DF_F5 0x14b5 /* Protect the PCI config register pairs used for SMN. */ @@ -136,6 +138,7 @@ static const struct pci_device_id hygon_root_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) }, { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) }, { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_ROOT) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M10H_ROOT) }, {} }; @@ -143,6 +146,7 @@ static const struct pci_device_id hygon_nb_misc_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M10H_DF_F3) }, {} }; @@ -150,6 +154,7 @@ static const struct pci_device_id hygon_nb_link_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) }, { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M10H_DF_F4) }, {} }; diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 082cd30aba87..820fd378b1c2 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2602,6 +2602,7 @@ #define PCI_VENDOR_ID_HYGON 0x1d94 #define PCI_DEVICE_ID_HYGON_18H_M05H_HDA 0x14a9 #define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3 0x14b3 +#define PCI_DEVICE_ID_HYGON_18H_M10H_DF_F3 0x14d3 #define PCI_VENDOR_ID_FUNGIBLE 0x1dad -- Gitee From 165019864f6c7a4fa511266df932aa4f34d92d53 Mon Sep 17 00:00:00 2001 From: Liao Xuan Date: Wed, 19 Jun 2024 03:27:47 -0400 Subject: [PATCH 1044/2138] anolis: EDAC/amd64: Add support for Hygon family 18h model 10h ANBZ: #9363 Add Hygon family 18h model 10h processor support for amd64_edac. Signed-off-by: Liao Xuan Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3366 --- drivers/edac/amd64_edac.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 196a060e3928..7bdff971b9a9 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -4161,6 +4161,9 @@ static int per_family_init(struct amd64_pvt *pvt) } else if (pvt->model == 0x6) { pvt->ctl_name = "F18h_M06h"; break; + } else if (pvt->model == 0x10) { + pvt->ctl_name = "F18h_M10h"; + break; } pvt->ctl_name = "F18h"; break; -- Gitee From ead6214b48b96e39b1eef35198d140deddd2aefe Mon Sep 17 00:00:00 2001 From: Liao Xuan Date: Thu, 16 May 2024 03:55:41 -0400 Subject: [PATCH 1045/2138] anolis: hwmon/k10temp: Add support for Hygon family 18h model 10h ANBZ: #9363 Add 18H_M10H DF F3 device ID to get the temperature for Hygon family 18h model 10h processor. Signed-off-by: Liao Xuan Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3366 --- drivers/hwmon/k10temp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index 6dbcb8cd0951..6bb096687f3d 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -627,6 +627,7 @@ static const struct pci_device_id k10temp_id_table[] = { { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3) }, + { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_HYGON_18H_M10H_DF_F3) }, {} }; MODULE_DEVICE_TABLE(pci, k10temp_id_table); -- Gitee From fbd07ffc1f366759e4cd51fe29ac8c9c45966f79 Mon Sep 17 00:00:00 2001 From: Liao Xuan Date: Thu, 16 May 2024 04:02:35 -0400 Subject: [PATCH 1046/2138] anolis: ALSA: hda: Add support for Hygon family 18h model 10h HD-Audio ANBZ: #9363 Add the new PCI ID 0x1d94 0x14c9 for Hygon family 18h model 10h HDA controller. Signed-off-by: Liao Xuan Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3366 --- include/linux/pci_ids.h | 1 + sound/pci/hda/hda_intel.c | 2 ++ 2 files changed, 3 insertions(+) diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 820fd378b1c2..a9bb8aa1112a 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2601,6 +2601,7 @@ #define PCI_VENDOR_ID_HYGON 0x1d94 #define PCI_DEVICE_ID_HYGON_18H_M05H_HDA 0x14a9 +#define PCI_DEVICE_ID_HYGON_18H_M10H_HDA 0x14c9 #define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3 0x14b3 #define PCI_DEVICE_ID_HYGON_18H_M10H_DF_F3 0x14d3 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index f0f5c6ccc3e1..950288f20522 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2819,6 +2819,8 @@ static const struct pci_device_id azx_ids[] = { /* Hygon HDAudio */ { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_HDA), .driver_data = AZX_DRIVER_HYGON | AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_NO_MSI }, + { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_HYGON_18H_M10H_HDA), + .driver_data = AZX_DRIVER_HYGON }, { 0, } }; MODULE_DEVICE_TABLE(pci, azx_ids); -- Gitee From 535abce0c1ea1f1a6d540658d16f7b53033a715d Mon Sep 17 00:00:00 2001 From: Liao Xuan Date: Sun, 12 May 2024 23:12:56 -0400 Subject: [PATCH 1047/2138] anolis: x86/amd_nb: Add support for Hygon family 18h model 7h ANBZ: #9362 Add Hygon family 18h model 7h processor support for amd_nb. Signed-off-by: Liao Xuan Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3362 --- arch/x86/kernel/amd_nb.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 34688619e2f0..3d6d25b64bb3 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -286,6 +286,7 @@ static int get_df_register(struct pci_dev *misc, u8 func, int offset, u32 *value device = PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1; break; case 0x6: + case 0x7: device = PCI_DEVICE_ID_HYGON_18H_M05H_DF_F1; break; default: @@ -294,6 +295,7 @@ static int get_df_register(struct pci_dev *misc, u8 func, int offset, u32 *value } else if (func == 5) { switch (boot_cpu_data.x86_model) { case 0x6: + case 0x7: device = PCI_DEVICE_ID_HYGON_18H_M06H_DF_F5; break; default: -- Gitee From ce3e33b09a53848a36948ba8f4eaeed807e0f255 Mon Sep 17 00:00:00 2001 From: Liao Xuan Date: Tue, 25 Jun 2024 23:23:46 -0400 Subject: [PATCH 1048/2138] anolis: EDAC/amd64: Add support for Hygon family 18h model 7h ANBZ: #9362 Add Hygon family 18h model 7h processor support for amd64_edac. Signed-off-by: Liao Xuan Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3362 --- drivers/edac/amd64_edac.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 7bdff971b9a9..d0ab59e80fb0 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -4161,6 +4161,9 @@ static int per_family_init(struct amd64_pvt *pvt) } else if (pvt->model == 0x6) { pvt->ctl_name = "F18h_M06h"; break; + } else if (pvt->model == 0x7) { + pvt->ctl_name = "F18h_M07h"; + break; } else if (pvt->model == 0x10) { pvt->ctl_name = "F18h_M10h"; break; -- Gitee From 2a8b446724749b3102ce0945d09d574bd50df6f1 Mon Sep 17 00:00:00 2001 From: Liao Xuan Date: Wed, 19 Jun 2024 03:13:06 -0400 Subject: [PATCH 1049/2138] anolis: perf/x86/uncore: Add L3 PMU support for Hygon family 18h model 7h ANBZ: #9362 From model 6h, Hygon processors can use the same L3 PMU slicemask and threadmask. Signed-off-by: Liao Xuan Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3362 --- arch/x86/events/amd/uncore.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index 5100469fef32..a4c164e7f9c0 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -203,7 +203,7 @@ static u64 l3_thread_slice_mask(u64 config) if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && boot_cpu_data.x86 == 0x18) { - if (boot_cpu_data.x86_model == 0x6) + if (boot_cpu_data.x86_model >= 0x6 && boot_cpu_data.x86_model <= 0xf) return ((config & HYGON_L3_SLICE_MASK) ? : HYGON_L3_SLICE_MASK) | ((config & HYGON_L3_THREAD_MASK) ? : HYGON_L3_THREAD_MASK); else @@ -282,7 +282,8 @@ amd_f17h_uncore_is_visible(struct kobject *kobj, struct attribute *attr, int i) static umode_t hygon_f18h_m6h_uncore_is_visible(struct kobject *kobj, struct attribute *attr, int i) { - return boot_cpu_data.x86 == 0x18 && boot_cpu_data.x86_model == 0x6 ? + return boot_cpu_data.x86 == 0x18 && + boot_cpu_data.x86_model >= 0x6 && boot_cpu_data.x86_model <= 0xf ? attr->mode : 0; } @@ -755,7 +756,7 @@ static int __init amd_uncore_init(void) boot_cpu_data.x86 == 0x18) { *l3_attr++ = &format_attr_event8.attr; *l3_attr++ = &format_attr_umask8.attr; - if (boot_cpu_data.x86_model == 0x6) { + if (boot_cpu_data.x86_model >= 0x6 && boot_cpu_data.x86_model <= 0xf) { *l3_attr++ = &format_attr_threadmask32.attr; amd_llc_pmu.attr_update = hygon_uncore_l3_attr_update; } else { -- Gitee From 37d34f3ae965813a88e49fef5ac588d6f88c5ef6 Mon Sep 17 00:00:00 2001 From: Qi Liu Date: Thu, 20 Jun 2024 20:15:33 +0800 Subject: [PATCH 1050/2138] anolis: perf/x86/uncore: Add DF PMU support for Hygon family 18h model 4h and model 6h ANBZ: #9536 Adjust the DF PMU event and umask for Hygon family 18h modle 4h and model 6h processor. Signed-off-by: Qi Liu Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3499 --- arch/x86/events/amd/uncore.c | 24 ++++++++++++++++++++++-- arch/x86/include/asm/perf_event.h | 16 ++++++++++++++++ 2 files changed, 38 insertions(+), 2 deletions(-) diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index a4c164e7f9c0..6faf2f6ca4dc 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -235,8 +235,17 @@ static int amd_uncore_event_init(struct perf_event *event) if (event->attr.type != event->pmu->type) return -ENOENT; - if (pmu_version >= 2 && is_nb_event(event)) + if (pmu_version >= 2 && is_nb_event(event)) { event_mask = AMD64_PERFMON_V2_RAW_EVENT_MASK_NB; + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18 && + is_nb_event(event)) { + event_mask = HYGON_F18H_RAW_EVENT_MASK_NB; + if (boot_cpu_data.x86_model == 0x4) + event_mask = HYGON_F18H_M4H_RAW_EVENT_MASK_NB; + if (boot_cpu_data.x86_model == 0x6) + event_mask = HYGON_F18H_M6H_RAW_EVENT_MASK_NB; + } /* * NB and Last level cache counters (MSRs) are shared across all cores @@ -334,8 +343,11 @@ static struct device_attribute format_attr_##_var = \ DEFINE_UNCORE_FORMAT_ATTR(event12, event, "config:0-7,32-35"); DEFINE_UNCORE_FORMAT_ATTR(event14, event, "config:0-7,32-35,59-60"); /* F17h+ DF */ DEFINE_UNCORE_FORMAT_ATTR(event14v2, event, "config:0-7,32-37"); /* PerfMonV2 DF */ +DEFINE_UNCORE_FORMAT_ATTR(event14f18h, event, "config:0-7,32-35,61-62"); /* F18h DF */ DEFINE_UNCORE_FORMAT_ATTR(event8, event, "config:0-7"); /* F17h+ L3 */ DEFINE_UNCORE_FORMAT_ATTR(umask8, umask, "config:8-15"); +DEFINE_UNCORE_FORMAT_ATTR(umask10f18h, umask, "config:8-17"); /* F18h M4h DF */ +DEFINE_UNCORE_FORMAT_ATTR(umask12f18h, umask, "config:8-19"); /* F18h M6h DF */ DEFINE_UNCORE_FORMAT_ATTR(umask12, umask, "config:8-15,24-27"); /* PerfMonV2 DF */ DEFINE_UNCORE_FORMAT_ATTR(coreid, coreid, "config:42-44"); /* F19h L3 */ DEFINE_UNCORE_FORMAT_ATTR(slicemask, slicemask, "config:48-51"); /* F17h L3 */ @@ -717,8 +729,16 @@ static int __init amd_uncore_init(void) if (pmu_version >= 2) { *df_attr++ = &format_attr_event14v2.attr; *df_attr++ = &format_attr_umask12.attr; - } else if (boot_cpu_data.x86 >= 0x17) { + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && + boot_cpu_data.x86 >= 0x17) { *df_attr = &format_attr_event14.attr; + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) { + *df_attr++ = &format_attr_event14f18h.attr; + if (boot_cpu_data.x86_model == 0x4) + *df_attr++ = &format_attr_umask10f18h.attr; + else if (boot_cpu_data.x86_model == 0x6) + *df_attr++ = &format_attr_umask12f18h.attr; } amd_uncore_nb = alloc_percpu(struct amd_uncore *); diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 9450594f1709..103ae26479de 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -58,6 +58,9 @@ #define AMD64_EVENTSEL_EVENT \ (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32)) +#define HYGON_F18H_EVENTSEL_EVENT \ + (AMD64_EVENTSEL_EVENT | \ + GENMASK_ULL(62, 61)) #define INTEL_ARCH_EVENT_MASK \ (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT) @@ -109,6 +112,19 @@ (AMD64_EVENTSEL_EVENT | \ ARCH_PERFMON_EVENTSEL_UMASK) +#define HYGON_F18H_M4H_EVENTSEL_UMASK_NB 0x0003FF00ULL +#define HYGON_F18H_M6H_EVENTSEL_UMASK_NB 0x000FFF00ULL + +#define HYGON_F18H_RAW_EVENT_MASK_NB \ + (HYGON_F18H_EVENTSEL_EVENT | \ + ARCH_PERFMON_EVENTSEL_UMASK) +#define HYGON_F18H_M4H_RAW_EVENT_MASK_NB \ + (HYGON_F18H_EVENTSEL_EVENT | \ + HYGON_F18H_M4H_EVENTSEL_UMASK_NB) +#define HYGON_F18H_M6H_RAW_EVENT_MASK_NB \ + (HYGON_F18H_EVENTSEL_EVENT | \ + HYGON_F18H_M6H_EVENTSEL_UMASK_NB) + #define AMD64_PERFMON_V2_EVENTSEL_EVENT_NB \ (AMD64_EVENTSEL_EVENT | \ GENMASK_ULL(37, 36)) -- Gitee From 231b7a024bef5dcd91478b8ad6575eb572e0b531 Mon Sep 17 00:00:00 2001 From: Song Gao Date: Thu, 13 Jun 2024 20:05:39 +0800 Subject: [PATCH 1051/2138] anolis: LoongArch: KVM: Add PMU support ANBZ: #9537 On LoongArch, the host and guest have their own PMU CSRs registers and they share PMU hardware resources. A set of PMU CSRs consists of a CTRL register and a CNTR register. We can set which PMU CSRs are used by the guest by writing to the GCFG register [24: 26] bits. On KVM side: - Save the host PMU CSRs into structure kvm_context. - If the host supports the PMU feature. - When entering guest mode. save the host PMU CSRs and restore the guest PMU CSRs. - When exiting guest mode, save the guest PMU CSRs and restore the host PMU CSRs. Signed-off-by: Song Gao Reviewed-by: Bibo Mao Link: https://lore.kernel.org/all/20240613120539.41021-1-gaosong@loongson.cn/ Link: https://gitee.com/anolis/cloud-kernel/pulls/3517 Reviewed-by: Juxin Gao --- arch/loongarch/include/asm/kvm_csr.h | 4 +- arch/loongarch/include/asm/kvm_host.h | 15 ++- arch/loongarch/include/uapi/asm/kvm.h | 4 + arch/loongarch/kvm/exit.c | 5 +- arch/loongarch/kvm/vcpu.c | 153 +++++++++++++++++++++++++- arch/loongarch/kvm/vm.c | 35 +++++- 6 files changed, 206 insertions(+), 10 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_csr.h b/arch/loongarch/include/asm/kvm_csr.h index 476c9f620dd5..0a52f115a87e 100644 --- a/arch/loongarch/include/asm/kvm_csr.h +++ b/arch/loongarch/include/asm/kvm_csr.h @@ -30,6 +30,7 @@ : [val] "+r" (__v) \ : [reg] "i" (csr) \ : "memory"); \ + __v; \ }) #define gcsr_xchg(v, m, csr) \ @@ -180,6 +181,7 @@ __BUILD_GCSR_OP(tlbidx) #define kvm_save_hw_gcsr(csr, gid) (csr->csrs[gid] = gcsr_read(gid)) #define kvm_restore_hw_gcsr(csr, gid) (gcsr_write(csr->csrs[gid], gid)) +#define kvm_read_clear_hw_gcsr(csr, gid) (csr->csrs[gid] = gcsr_write(0, gid)) int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu); @@ -208,7 +210,7 @@ static __always_inline void kvm_change_sw_gcsr(struct loongarch_csrs *csr, csr->csrs[gid] |= val & _mask; } -#define KVM_PMU_PLV_ENABLE (CSR_PERFCTRL_PLV0 | \ +#define KVM_PMU_EVENT_ENABLED (CSR_PERFCTRL_PLV0 | \ CSR_PERFCTRL_PLV1 | \ CSR_PERFCTRL_PLV2 | \ CSR_PERFCTRL_PLV3) diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index c146d2ebdb90..16887d0c6e95 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -55,9 +55,14 @@ struct kvm_arch_memory_slot { unsigned long flags; }; +#define KVM_REQ_PMU KVM_ARCH_REQ(0) +#define HOST_MAX_PMNUM 16 struct kvm_context { unsigned long vpid_cache; struct kvm_vcpu *last_vcpu; + /* Save host pmu csr */ + u64 perf_ctrl[HOST_MAX_PMNUM]; + u64 perf_cntr[HOST_MAX_PMNUM]; }; struct kvm_world_switch { @@ -129,7 +134,8 @@ enum emulation_result { #define KVM_LARCH_LASX (0x1 << 2) #define KVM_LARCH_SWCSR_LATEST (0x1 << 3) #define KVM_LARCH_HWCSR_USABLE (0x1 << 4) -#define KVM_LARCH_PERF (0x1 << 5) +#define KVM_GUEST_PMU_ENABLE (0x1 << 5) +#define KVM_GUEST_PMU_ACTIVE (0x1 << 6) struct kvm_vcpu_arch { /* @@ -167,6 +173,9 @@ struct kvm_vcpu_arch { /* CSR state */ struct loongarch_csrs *csr; + /* Guest max PMU CSR id */ + int max_pmu_csrid; + /* GPR used as IO source/target */ u32 io_gpr; @@ -237,12 +246,12 @@ static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch) static inline bool kvm_guest_has_pmu(struct kvm_vcpu_arch *arch) { - return arch->cpucfg[6] & CPUCFG6_PMP; + return arch->cpucfg[LOONGARCH_CPUCFG6] & CPUCFG6_PMP; } static inline int kvm_get_pmu_num(struct kvm_vcpu_arch *arch) { - return (arch->cpucfg[6] & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT; + return (arch->cpucfg[LOONGARCH_CPUCFG6] & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT; } /* Debug: dump vcpu state */ diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h index 9891ed93816a..8a1408cbadb0 100644 --- a/arch/loongarch/include/uapi/asm/kvm.h +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -89,6 +89,10 @@ struct kvm_fpu { #define KVM_LOONGARCH_VCPU_PVTIME_CTRL 1 #define KVM_LOONGARCH_VCPU_PVTIME_GPA 0 +/* Device Control API on vm fd */ +#define KVM_LOONGARCH_VM_FEAT_CTRL 0 +#define KVM_LOONGARCH_VM_FEAT_PMU 0 + struct kvm_debug_exit_arch { }; diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 8affc6d4a66e..9607de0b3673 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -83,9 +83,10 @@ static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst) rj = inst.reg2csr_format.rj; csrid = inst.reg2csr_format.csr; - if (csrid >= LOONGARCH_CSR_PERFCTRL0 && csrid <= LOONGARCH_CSR_PERFCNTR3) { - if (!kvm_own_pmu(vcpu)) { + if (csrid >= LOONGARCH_CSR_PERFCTRL0 && csrid <= vcpu->arch.max_pmu_csrid) { + if (kvm_guest_has_pmu(&vcpu->arch)) { vcpu->arch.pc -= 4; + kvm_make_request(KVM_REQ_PMU, vcpu); return EMULATE_DONE; } } diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 685f2826d022..58a8df488f30 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -140,6 +140,131 @@ static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu, return -ENXIO; } +static inline void kvm_save_host_pmu(struct kvm_vcpu *vcpu) +{ + struct kvm_context *context; + + context = this_cpu_ptr(vcpu->kvm->arch.vmcs); + context->perf_ctrl[0] = write_csr_perfctrl0(0); + context->perf_ctrl[1] = write_csr_perfctrl1(0); + context->perf_ctrl[2] = write_csr_perfctrl2(0); + context->perf_ctrl[3] = write_csr_perfctrl3(0); + context->perf_cntr[0] = read_csr_perfcntr0(); + context->perf_cntr[1] = read_csr_perfcntr1(); + context->perf_cntr[2] = read_csr_perfcntr2(); + context->perf_cntr[3] = read_csr_perfcntr3(); +} + +static inline void kvm_restore_host_pmu(struct kvm_vcpu *vcpu) +{ + struct kvm_context *context; + + context = this_cpu_ptr(vcpu->kvm->arch.vmcs); + write_csr_perfcntr0(context->perf_cntr[0]); + write_csr_perfcntr1(context->perf_cntr[1]); + write_csr_perfcntr2(context->perf_cntr[2]); + write_csr_perfcntr3(context->perf_cntr[3]); + write_csr_perfctrl0(context->perf_ctrl[0]); + write_csr_perfctrl1(context->perf_ctrl[1]); + write_csr_perfctrl2(context->perf_ctrl[2]); + write_csr_perfctrl3(context->perf_ctrl[3]); +} + + +static inline void kvm_save_guest_pmu(struct kvm_vcpu *vcpu) +{ + struct loongarch_csrs *csr = vcpu->arch.csr; + + kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0); + kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1); + kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2); + kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3); +} + +static inline void kvm_restore_guest_pmu(struct kvm_vcpu *vcpu) +{ + struct loongarch_csrs *csr = vcpu->arch.csr; + + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3); +} + +static void kvm_lose_pmu(struct kvm_vcpu *vcpu) +{ + unsigned long val; + struct loongarch_csrs *csr = vcpu->arch.csr; + + if (!(vcpu->arch.aux_inuse & KVM_GUEST_PMU_ENABLE)) + return; + if (!(vcpu->arch.aux_inuse & KVM_GUEST_PMU_ACTIVE)) + return; + + kvm_save_guest_pmu(vcpu); + /* Disable pmu access from guest */ + write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF); + + /* + * Clear KVM_GUEST_PMU_ENABLE if the guest is not using PMU CSRs + * when exiting the guest, so that the next time trap into the guest. + * we don't need to deal with PMU CSRs contexts. + */ + val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0); + val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1); + val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2); + val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3); + if (!(val & KVM_PMU_EVENT_ENABLED)) + vcpu->arch.aux_inuse &= ~KVM_GUEST_PMU_ENABLE; + kvm_restore_host_pmu(vcpu); + + /* KVM_GUEST_PMU_ACTIVE needs to be cleared when exiting the guest */ + vcpu->arch.aux_inuse &= ~KVM_GUEST_PMU_ACTIVE; +} + +static void kvm_own_pmu(struct kvm_vcpu *vcpu) +{ + unsigned long val; + + kvm_save_host_pmu(vcpu); + /* Set PM0-PM(num) to guest */ + val = read_csr_gcfg() & ~CSR_GCFG_GPERF; + val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT; + write_csr_gcfg(val); + kvm_restore_guest_pmu(vcpu); +} + +static void kvm_restore_pmu(struct kvm_vcpu *vcpu) +{ + if (!(vcpu->arch.aux_inuse & KVM_GUEST_PMU_ENABLE)) + return; + + kvm_make_request(KVM_REQ_PMU, vcpu); +} + +static void kvm_check_pmu(struct kvm_vcpu *vcpu) +{ + if (!kvm_check_request(KVM_REQ_PMU, vcpu)) + return; + + kvm_own_pmu(vcpu); + + /* + * Set KVM_GUEST PMU_ENABLE and GUEST_PMU_ACTIVE + * when guest has KVM_REQ_PMU request. + */ + vcpu->arch.aux_inuse |= KVM_GUEST_PMU_ENABLE; + vcpu->arch.aux_inuse |= KVM_GUEST_PMU_ACTIVE; +} + /* * kvm_check_requests - check and handle pending vCPU requests * @@ -213,6 +338,7 @@ static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu) /* Make sure the vcpu mode has been written */ smp_store_mb(vcpu->mode, IN_GUEST_MODE); kvm_check_vpid(vcpu); + kvm_check_pmu(vcpu); vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY); /* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */ vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; @@ -243,6 +369,8 @@ static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) /* Set a default exit reason */ run->exit_reason = KVM_EXIT_UNKNOWN; + kvm_lose_pmu(vcpu); + guest_timing_exit_irqoff(); guest_state_exit_irqoff(); local_irq_enable(); @@ -506,6 +634,21 @@ static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val) kvm_write_sw_gcsr(csr, id, val); + /* + * After modifying the PMU CSR register value of the vcpu. + * If the PMU CSRs are used, we need to set KVM_REQ_PMU. + */ + if (id >= LOONGARCH_CSR_PERFCTRL0 && id <= LOONGARCH_CSR_PERFCNTR3) { + unsigned long val; + + val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0); + val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1); + val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2); + val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3); + if (val & KVM_PMU_EVENT_ENABLED) + kvm_make_request(KVM_REQ_PMU, vcpu); + } + return ret; } @@ -596,7 +739,7 @@ static int kvm_check_cpucfg(int id, u64 val) return 0; case LOONGARCH_CPUCFG6: if (val & CPUCFG6_PMP) { - host = read_cpucfg(6); + host = read_cpucfg(LOONGARCH_CPUCFG6); if ((val & CPUCFG6_PMBITS) != (host & CPUCFG6_PMBITS)) /* Guest pmbits must be the same with host */ return -EINVAL; @@ -691,6 +834,10 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu, if (ret) break; vcpu->arch.cpucfg[id] = (u32)v; + if (id == LOONGARCH_CPUCFG6) { + vcpu->arch.max_pmu_csrid = LOONGARCH_CSR_PERFCTRL0 + + 2 * kvm_get_pmu_num(&vcpu->arch) + 1; + } break; case KVM_REG_LOONGARCH_KVM: switch (reg->id) { @@ -784,8 +931,8 @@ static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) { switch (attr->attr) { - case 2: - case 6: + case LOONGARCH_CPUCFG2: + case LOONGARCH_CPUCFG6: return 0; default: return -ENXIO; diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c index 06fd746b03b6..b63db5ffad73 100644 --- a/arch/loongarch/kvm/vm.c +++ b/arch/loongarch/kvm/vm.c @@ -100,7 +100,40 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) return r; } +static int kvm_vm_feature_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) +{ + switch (attr->attr) { + case KVM_LOONGARCH_VM_FEAT_PMU: + if (cpu_has_pmp) + return 0; + return -ENXIO; + default: + return -ENXIO; + } +} + +static int kvm_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) +{ + switch (attr->group) { + case KVM_LOONGARCH_VM_FEAT_CTRL: + return kvm_vm_feature_has_attr(kvm, attr); + default: + return -ENXIO; + } +} + int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { - return -ENOIOCTLCMD; + struct kvm *kvm = filp->private_data; + void __user *argp = (void __user *)arg; + struct kvm_device_attr attr; + + switch (ioctl) { + case KVM_HAS_DEVICE_ATTR: + if (copy_from_user(&attr, argp, sizeof(attr))) + return -EFAULT; + return kvm_vm_has_attr(kvm, &attr); + default: + return -EINVAL; + } } -- Gitee From b1338a2175bf9a80b34c6a383d1aba324d9fc7de Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Mon, 3 Jun 2024 11:49:50 +0800 Subject: [PATCH 1052/2138] anolis: LoongArch: KVM: Add iocsr and mmio bus simulation in kernel ANBZ: #9546 Add iocsr and mmio memory read and write simulation to the kernel. When the VM accesses the device address space through iocsr instructions or mmio, it does not need to return to the qemu user mode but directly completes the access in the kernel mode. Signed-off-by: Tianrui Zhao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/3527 Reviewed-by: Juxin Gao --- arch/loongarch/kvm/exit.c | 70 ++++++++++++++++++++++++++++----------- include/linux/kvm_host.h | 1 + 2 files changed, 52 insertions(+), 19 deletions(-) diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 9607de0b3673..74028ad40b24 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -115,7 +115,7 @@ static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst) int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu) { int ret; - unsigned long val; + unsigned long *val; u32 addr, rd, rj, opcode; /* @@ -128,6 +128,7 @@ int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu) ret = EMULATE_DO_IOCSR; run->iocsr_io.phys_addr = addr; run->iocsr_io.is_write = 0; + val = &vcpu->arch.gprs[rd]; /* LoongArch is Little endian */ switch (opcode) { @@ -161,15 +162,21 @@ int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu) break; default: ret = EMULATE_FAIL; - break; + return ret; } - if (ret == EMULATE_DO_IOCSR) { - if (run->iocsr_io.is_write) { - val = vcpu->arch.gprs[rd]; - memcpy(run->iocsr_io.data, &val, run->iocsr_io.len); - } - vcpu->arch.io_gpr = rd; + if (run->iocsr_io.is_write) { + if (!kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val)) + ret = EMULATE_DONE; + else + /* Save data and let user space to write it */ + memcpy(run->iocsr_io.data, val, run->iocsr_io.len); + } else { + if (!kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val)) + ret = EMULATE_DONE; + else + /* Save register id for iocsr read completion */ + vcpu->arch.io_gpr = rd; } return ret; @@ -457,17 +464,31 @@ int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst) } if (ret == EMULATE_DO_MMIO) { + /* + * if mmio device such as pch pic is emulated in KVM, + * it need not return to user space to handle the mmio + * exception. + */ + ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, vcpu->arch.badv, + run->mmio.len, &vcpu->arch.gprs[rd]); + if (!ret) { + update_pc(&vcpu->arch); + vcpu->mmio_needed = 0; + return EMULATE_DONE; + } + /* Set for kvm_complete_mmio_read() use */ vcpu->arch.io_gpr = rd; run->mmio.is_write = 0; vcpu->mmio_is_write = 0; - } else { - kvm_err("Read not supported Inst=0x%08x @%lx BadVaddr:%#lx\n", - inst.word, vcpu->arch.pc, vcpu->arch.badv); - kvm_arch_vcpu_dump_regs(vcpu); - vcpu->mmio_needed = 0; + return EMULATE_DO_MMIO; } + kvm_err("Read not supported Inst=0x%08x @%lx BadVaddr:%#lx\n", + inst.word, vcpu->arch.pc, vcpu->arch.badv); + kvm_arch_vcpu_dump_regs(vcpu); + vcpu->mmio_needed = 0; + return ret; } @@ -605,17 +626,28 @@ int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst) } if (ret == EMULATE_DO_MMIO) { + /* + * if mmio device such as pch pic is emulated in KVM, + * it need not return to user space to handle the mmio + * exception. + */ + ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, vcpu->arch.badv, + run->mmio.len, data); + if (!ret) + return EMULATE_DONE; + run->mmio.is_write = 1; vcpu->mmio_needed = 1; vcpu->mmio_is_write = 1; - } else { - vcpu->arch.pc = curr_pc; - kvm_err("Write not supported Inst=0x%08x @%lx BadVaddr:%#lx\n", - inst.word, vcpu->arch.pc, vcpu->arch.badv); - kvm_arch_vcpu_dump_regs(vcpu); - /* Rollback PC if emulation was unsuccessful */ + return EMULATE_DO_MMIO; } + vcpu->arch.pc = curr_pc; + kvm_err("Write not supported Inst=0x%08x @%lx BadVaddr:%#lx\n", + inst.word, vcpu->arch.pc, vcpu->arch.badv); + kvm_arch_vcpu_dump_regs(vcpu); + /* Rollback PC if emulation was unsuccessful */ + return ret; } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index d027f8fd23bf..88ab1da9c255 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -216,6 +216,7 @@ enum kvm_bus { KVM_PIO_BUS, KVM_VIRTIO_CCW_NOTIFY_BUS, KVM_FAST_MMIO_BUS, + KVM_IOCSR_BUS, KVM_NR_BUSES }; -- Gitee From d1cd041e49d3f76f938999d607b5488861e0f03d Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Mon, 3 Jun 2024 14:12:26 +0800 Subject: [PATCH 1053/2138] anolis: LoongArch: KVM: Add IPI device support ANBZ: #9546 Added device model for IPI interrupt controller, implemented basic create destroy interface, and registered device model to kvm device table. Signed-off-by: Tianrui Zhao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/3527 Reviewed-by: Juxin Gao --- arch/loongarch/include/asm/kvm_host.h | 4 + arch/loongarch/include/asm/kvm_ipi.h | 36 ++++++ arch/loongarch/kvm/Makefile | 1 + arch/loongarch/kvm/intc/ipi.c | 155 ++++++++++++++++++++++++++ arch/loongarch/kvm/main.c | 7 +- arch/loongarch/kvm/vcpu.c | 3 + include/uapi/linux/kvm.h | 4 + 7 files changed, 209 insertions(+), 1 deletion(-) create mode 100644 arch/loongarch/include/asm/kvm_ipi.h create mode 100644 arch/loongarch/kvm/intc/ipi.c diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index 16887d0c6e95..a32b5c12ec01 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -19,6 +19,7 @@ #include #include #include +#include /* Loongarch KVM register ids */ #define KVM_GET_IOC_CSR_IDX(id) ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT) @@ -109,6 +110,7 @@ struct kvm_arch { s64 time_offset; struct kvm_context __percpu *vmcs; + struct loongarch_ipi *ipi; }; #define CSR_MAX_NUMS 0x800 @@ -206,6 +208,8 @@ struct kvm_vcpu_arch { int last_sched_cpu; /* mp state */ struct kvm_mp_state mp_state; + /* ipi state */ + struct ipi_state ipi_state; /* cpucfg */ u32 cpucfg[KVM_MAX_CPUCFG_REGS]; /* paravirt steal time */ diff --git a/arch/loongarch/include/asm/kvm_ipi.h b/arch/loongarch/include/asm/kvm_ipi.h new file mode 100644 index 000000000000..875a93008802 --- /dev/null +++ b/arch/loongarch/include/asm/kvm_ipi.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ + +#ifndef __LS3A_KVM_IPI_H +#define __LS3A_KVM_IPI_H + +#include + +#define LARCH_INT_IPI 12 + +struct loongarch_ipi { + spinlock_t lock; + struct kvm *kvm; + struct kvm_io_device device; + struct kvm_io_device mail_dev; +}; + +struct ipi_state { + spinlock_t lock; + uint32_t status; + uint32_t en; + uint32_t set; + uint32_t clear; + uint64_t buf[4]; +}; + +#define SMP_MAILBOX 0x1000 +#define KVM_IOCSR_IPI_ADDR_SIZE 0x48 + +#define MAIL_SEND_ADDR (SMP_MAILBOX + IOCSR_MAIL_SEND) +#define KVM_IOCSR_MAIL_ADDR_SIZE 0x118 + +int kvm_loongarch_register_ipi_device(void); +#endif diff --git a/arch/loongarch/kvm/Makefile b/arch/loongarch/kvm/Makefile index 244467d7792a..69a074ee0d0f 100644 --- a/arch/loongarch/kvm/Makefile +++ b/arch/loongarch/kvm/Makefile @@ -18,5 +18,6 @@ kvm-y += timer.o kvm-y += tlb.o kvm-y += vcpu.o kvm-y += vm.o +kvm-y += intc/ipi.o CFLAGS_exit.o += $(call cc-option,-Wno-override-init,) diff --git a/arch/loongarch/kvm/intc/ipi.c b/arch/loongarch/kvm/intc/ipi.c new file mode 100644 index 000000000000..a9dc0aaec502 --- /dev/null +++ b/arch/loongarch/kvm/intc/ipi.c @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ + +#include +#include +#include + +static int kvm_loongarch_ipi_write(struct kvm_vcpu *vcpu, + struct kvm_io_device *dev, + gpa_t addr, int len, const void *val) +{ + return 0; +} + +static int kvm_loongarch_ipi_read(struct kvm_vcpu *vcpu, + struct kvm_io_device *dev, + gpa_t addr, int len, void *val) +{ + return 0; +} + +static int kvm_loongarch_mail_write(struct kvm_vcpu *vcpu, + struct kvm_io_device *dev, + gpa_t addr, int len, const void *val) +{ + return 0; +} + +static const struct kvm_io_device_ops kvm_loongarch_ipi_ops = { + .read = kvm_loongarch_ipi_read, + .write = kvm_loongarch_ipi_write, +}; + +static const struct kvm_io_device_ops kvm_loongarch_mail_ops = { + .write = kvm_loongarch_mail_write, +}; + +static int kvm_loongarch_ipi_get_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + return 0; +} + +static int kvm_loongarch_ipi_set_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + return 0; +} + +static void kvm_loongarch_ipi_destroy(struct kvm_device *dev) +{ + struct kvm *kvm; + struct loongarch_ipi *ipi; + struct kvm_io_device *device; + + if (!dev) + return; + + kvm = dev->kvm; + if (!kvm) + return; + + ipi = kvm->arch.ipi; + if (!ipi) + return; + + device = &ipi->device; + kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, device); + + device = &ipi->mail_dev; + kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, device); + + kfree(ipi); +} + +static int kvm_loongarch_ipi_create(struct kvm_device *dev, u32 type) +{ + struct kvm *kvm; + struct loongarch_ipi *s; + unsigned long addr; + struct kvm_io_device *device; + int ret; + + kvm_info("begin create loongarch ipi in kvm ...\n"); + if (!dev) { + kvm_err("%s: kvm_device ptr is invalid!\n", __func__); + return -EINVAL; + } + + kvm = dev->kvm; + if (kvm->arch.ipi) { + kvm_err("%s: loongarch ipi has been created!\n", __func__); + return -EINVAL; + } + + s = kzalloc(sizeof(struct loongarch_ipi), GFP_KERNEL); + if (!s) + return -ENOMEM; + spin_lock_init(&s->lock); + s->kvm = kvm; + + /* + * Initialize IOCSR device + */ + device = &s->device; + kvm_iodevice_init(device, &kvm_loongarch_ipi_ops); + addr = SMP_MAILBOX; + mutex_lock(&kvm->slots_lock); + ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS, addr, + KVM_IOCSR_IPI_ADDR_SIZE, device); + mutex_unlock(&kvm->slots_lock); + if (ret < 0) { + kvm_err("%s: initialize IOCSR dev failed, ret = %d\n", __func__, ret); + goto err; + } + + device = &s->mail_dev; + kvm_iodevice_init(device, &kvm_loongarch_mail_ops); + addr = MAIL_SEND_ADDR; + mutex_lock(&kvm->slots_lock); + ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS, addr, + KVM_IOCSR_MAIL_ADDR_SIZE, device); + mutex_unlock(&kvm->slots_lock); + if (ret < 0) { + device = &s->device; + kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, device); + kvm_err("%s: initialize mail box dev failed, ret = %d\n", __func__, ret); + goto err; + } + + kvm->arch.ipi = s; + kvm_info("create loongarch ipi in kvm done!\n"); + + return 0; + +err: + kfree(s); + return -EFAULT; +} + +static struct kvm_device_ops kvm_loongarch_ipi_dev_ops = { + .name = "kvm-loongarch-ipi", + .create = kvm_loongarch_ipi_create, + .destroy = kvm_loongarch_ipi_destroy, + .set_attr = kvm_loongarch_ipi_set_attr, + .get_attr = kvm_loongarch_ipi_get_attr, +}; + +int kvm_loongarch_register_ipi_device(void) +{ + return kvm_register_device_ops(&kvm_loongarch_ipi_dev_ops, + KVM_DEV_TYPE_LA_IPI); +} diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c index 86a2f2d0cb27..36efc7b38f83 100644 --- a/arch/loongarch/kvm/main.c +++ b/arch/loongarch/kvm/main.c @@ -312,7 +312,7 @@ void kvm_arch_hardware_disable(void) static int kvm_loongarch_env_init(void) { - int cpu, order; + int cpu, order, ret; void *addr; struct kvm_context *context; @@ -367,6 +367,11 @@ static int kvm_loongarch_env_init(void) kvm_init_gcsr_flag(); + /* Register loongarch ipi interrupt controller interface. */ + ret = kvm_loongarch_register_ipi_device(); + if (ret) + return ret; + return 0; } diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 58a8df488f30..b91612200662 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -1372,6 +1372,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) /* Init */ vcpu->arch.last_sched_cpu = -1; + /* Init ipi_state lock */ + spin_lock_init(&vcpu->arch.ipi_state.lock); + /* * Initialize guest register state to valid architectural reset state. */ diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index def7a0ee9717..ad229b2add8e 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1464,7 +1464,11 @@ enum kvm_device_type { #define KVM_DEV_TYPE_ARM_PV_TIME KVM_DEV_TYPE_ARM_PV_TIME KVM_DEV_TYPE_RISCV_AIA, #define KVM_DEV_TYPE_RISCV_AIA KVM_DEV_TYPE_RISCV_AIA + KVM_DEV_TYPE_LA_IPI, +#define KVM_DEV_TYPE_LA_IPI KVM_DEV_TYPE_LA_IPI + KVM_DEV_TYPE_MAX, + }; struct kvm_vfio_spapr_tce { -- Gitee From 3b2ba5a1899e83c6b8a1af770c10a4ed7ece5887 Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Sat, 15 Jun 2024 18:19:35 +0800 Subject: [PATCH 1054/2138] anolis: LoongArch: KVM: Add IPI read and write function ANBZ: #9546 Implementation of IPI interrupt controller address space read and write function simulation. Signed-off-by: Min Zhou Signed-off-by: Tianrui Zhao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/3527 Reviewed-by: Juxin Gao --- arch/loongarch/include/asm/kvm_host.h | 2 + arch/loongarch/include/asm/kvm_ipi.h | 16 ++ arch/loongarch/kvm/intc/ipi.c | 287 +++++++++++++++++++++++++- 3 files changed, 303 insertions(+), 2 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index a32b5c12ec01..52c81e4900eb 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -39,6 +39,8 @@ struct kvm_vm_stat { struct kvm_vm_stat_generic generic; u64 pages; u64 hugepages; + u64 ipi_read_exits; + u64 ipi_write_exits; }; struct kvm_vcpu_stat { diff --git a/arch/loongarch/include/asm/kvm_ipi.h b/arch/loongarch/include/asm/kvm_ipi.h index 875a93008802..729dfc1e3f40 100644 --- a/arch/loongarch/include/asm/kvm_ipi.h +++ b/arch/loongarch/include/asm/kvm_ipi.h @@ -29,8 +29,24 @@ struct ipi_state { #define SMP_MAILBOX 0x1000 #define KVM_IOCSR_IPI_ADDR_SIZE 0x48 +#define CORE_STATUS_OFF 0x000 +#define CORE_EN_OFF 0x004 +#define CORE_SET_OFF 0x008 +#define CORE_CLEAR_OFF 0x00c +#define CORE_BUF_20 0x020 +#define CORE_BUF_28 0x028 +#define CORE_BUF_30 0x030 +#define CORE_BUF_38 0x038 +#define IOCSR_IPI_SEND 0x040 + +#define IOCSR_MAIL_SEND 0x048 +#define IOCSR_ANY_SEND 0x158 + #define MAIL_SEND_ADDR (SMP_MAILBOX + IOCSR_MAIL_SEND) #define KVM_IOCSR_MAIL_ADDR_SIZE 0x118 +#define MAIL_SEND_OFFSET 0 +#define ANY_SEND_OFFSET (IOCSR_ANY_SEND - IOCSR_MAIL_SEND) + int kvm_loongarch_register_ipi_device(void); #endif diff --git a/arch/loongarch/kvm/intc/ipi.c b/arch/loongarch/kvm/intc/ipi.c index a9dc0aaec502..815858671005 100644 --- a/arch/loongarch/kvm/intc/ipi.c +++ b/arch/loongarch/kvm/intc/ipi.c @@ -7,24 +7,307 @@ #include #include +static void ipi_send(struct kvm *kvm, uint64_t data) +{ + struct kvm_vcpu *vcpu; + struct kvm_interrupt irq; + int cpu, action, status; + + cpu = ((data & 0xffffffff) >> 16) & 0x3ff; + vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu); + if (unlikely(vcpu == NULL)) { + kvm_err("%s: invalid target cpu: %d\n", __func__, cpu); + return; + } + + action = 1 << (data & 0x1f); + + spin_lock(&vcpu->arch.ipi_state.lock); + status = vcpu->arch.ipi_state.status; + vcpu->arch.ipi_state.status |= action; + if (status == 0) { + irq.irq = LARCH_INT_IPI; + kvm_vcpu_ioctl_interrupt(vcpu, &irq); + } + spin_unlock(&vcpu->arch.ipi_state.lock); +} + +static void ipi_clear(struct kvm_vcpu *vcpu, uint64_t data) +{ + struct kvm_interrupt irq; + + spin_lock(&vcpu->arch.ipi_state.lock); + vcpu->arch.ipi_state.status &= ~data; + if (!vcpu->arch.ipi_state.status) { + irq.irq = -LARCH_INT_IPI; + kvm_vcpu_ioctl_interrupt(vcpu, &irq); + } + spin_unlock(&vcpu->arch.ipi_state.lock); +} + +static uint64_t read_mailbox(struct kvm_vcpu *vcpu, int offset, int len) +{ + void *pbuf; + uint64_t ret = 0; + + spin_lock(&vcpu->arch.ipi_state.lock); + pbuf = (void *)vcpu->arch.ipi_state.buf + (offset - 0x20); + if (len == 1) + ret = *(unsigned char *)pbuf; + else if (len == 2) + ret = *(unsigned short *)pbuf; + else if (len == 4) + ret = *(unsigned int *)pbuf; + else if (len == 8) + ret = *(unsigned long *)pbuf; + else + kvm_err("%s: unknown data len: %d\n", __func__, len); + spin_unlock(&vcpu->arch.ipi_state.lock); + + return ret; +} + +static void write_mailbox(struct kvm_vcpu *vcpu, int offset, + uint64_t data, int len) +{ + void *pbuf; + + spin_lock(&vcpu->arch.ipi_state.lock); + pbuf = (void *)vcpu->arch.ipi_state.buf + (offset - 0x20); + if (len == 1) + *(unsigned char *)pbuf = (unsigned char)data; + else if (len == 2) + *(unsigned short *)pbuf = (unsigned short)data; + else if (len == 4) + *(unsigned int *)pbuf = (unsigned int)data; + else if (len == 8) + *(unsigned long *)pbuf = (unsigned long)data; + else + kvm_err("%s: unknown data len: %d\n", __func__, len); + spin_unlock(&vcpu->arch.ipi_state.lock); +} + +static int loongarch_ipi_writel(struct kvm_vcpu *vcpu, gpa_t addr, + int len, const void *val) +{ + uint64_t data; + uint32_t offset; + int ret = 0; + + data = *(uint64_t *)val; + + offset = (uint32_t)(addr & 0xff); + WARN_ON_ONCE(offset & (len - 1)); + + switch (offset) { + case CORE_STATUS_OFF: + kvm_err("CORE_SET_OFF Can't be write\n"); + ret = -EINVAL; + break; + case CORE_EN_OFF: + spin_lock(&vcpu->arch.ipi_state.lock); + vcpu->arch.ipi_state.en = data; + spin_unlock(&vcpu->arch.ipi_state.lock); + break; + case IOCSR_IPI_SEND: + ipi_send(vcpu->kvm, data); + break; + case CORE_SET_OFF: + kvm_info("CORE_SET_OFF simulation is required\n"); + ret = -EINVAL; + break; + case CORE_CLEAR_OFF: + /* Just clear the status of the current vcpu */ + ipi_clear(vcpu, data); + break; + case CORE_BUF_20 ... CORE_BUF_38 + 7: + if (offset + len > CORE_BUF_38 + 8) { + kvm_err("%s: invalid offset or len: offset = %d, len = %d\n", + __func__, offset, len); + ret = -EINVAL; + break; + } + write_mailbox(vcpu, offset, data, len); + break; + default: + kvm_err("%s: unknown addr: %llx\n", __func__, addr); + ret = -EINVAL; + break; + } + + return ret; +} + +static int loongarch_ipi_readl(struct kvm_vcpu *vcpu, gpa_t addr, + int len, void *val) +{ + uint32_t offset; + uint64_t res = 0; + int ret = 0; + + offset = (uint32_t)(addr & 0xff); + WARN_ON_ONCE(offset & (len - 1)); + + switch (offset) { + case CORE_STATUS_OFF: + spin_lock(&vcpu->arch.ipi_state.lock); + res = vcpu->arch.ipi_state.status; + spin_unlock(&vcpu->arch.ipi_state.lock); + break; + case CORE_EN_OFF: + spin_lock(&vcpu->arch.ipi_state.lock); + res = vcpu->arch.ipi_state.en; + spin_unlock(&vcpu->arch.ipi_state.lock); + break; + case CORE_SET_OFF: + res = 0; + break; + case CORE_CLEAR_OFF: + res = 0; + break; + case CORE_BUF_20 ... CORE_BUF_38 + 7: + if (offset + len > CORE_BUF_38 + 8) { + kvm_err("%s: invalid offset or len: offset = %d, len = %d\n", + __func__, offset, len); + ret = -EINVAL; + break; + } + res = read_mailbox(vcpu, offset, len); + break; + default: + kvm_err("%s: unknown addr: %llx\n", __func__, addr); + ret = -EINVAL; + break; + } + + *(uint64_t *)val = res; + + return ret; +} + static int kvm_loongarch_ipi_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, gpa_t addr, int len, const void *val) { - return 0; + struct loongarch_ipi *ipi; + int ret; + + ipi = vcpu->kvm->arch.ipi; + if (!ipi) { + kvm_err("%s: ipi irqchip not valid!\n", __func__); + return -EINVAL; + } + + ipi->kvm->stat.ipi_write_exits++; + ret = loongarch_ipi_writel(vcpu, addr, len, val); + + return ret; } static int kvm_loongarch_ipi_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, gpa_t addr, int len, void *val) { - return 0; + struct loongarch_ipi *ipi; + int ret; + + ipi = vcpu->kvm->arch.ipi; + if (!ipi) { + kvm_err("%s: ipi irqchip not valid!\n", __func__); + return -EINVAL; + } + + ipi->kvm->stat.ipi_read_exits++; + ret = loongarch_ipi_readl(vcpu, addr, len, val); + + return ret; +} + +static void send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data) +{ + int i; + uint32_t val = 0, mask = 0; + /* + * Bit 27-30 is mask for byte writing. + * If the mask is 0, we need not to do anything. + */ + if ((data >> 27) & 0xf) { + /* Read the old val */ + kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val); + + /* Construct the mask by scanning the bit 27-30 */ + for (i = 0; i < 4; i++) { + if (data & (0x1 << (27 + i))) + mask |= (0xff << (i * 8)); + } + /* Save the old part of val */ + val &= mask; + } + + val |= ((uint32_t)(data >> 32) & ~mask); + kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val); +} + +static void mail_send(struct kvm *kvm, uint64_t data) +{ + struct kvm_vcpu *vcpu; + int cpu, mailbox; + int offset; + + cpu = ((data & 0xffffffff) >> 16) & 0x3ff; + vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu); + if (unlikely(vcpu == NULL)) { + kvm_err("%s: invalid target cpu: %d\n", __func__, cpu); + return; + } + + mailbox = ((data & 0xffffffff) >> 2) & 0x7; + offset = SMP_MAILBOX + CORE_BUF_20 + mailbox * 4; + send_ipi_data(vcpu, offset, data); +} + +static void any_send(struct kvm *kvm, uint64_t data) +{ + struct kvm_vcpu *vcpu; + int cpu, offset; + + cpu = ((data & 0xffffffff) >> 16) & 0x3ff; + vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu); + if (unlikely(vcpu == NULL)) { + kvm_err("%s: invalid target cpu: %d\n", __func__, cpu); + return; + } + + offset = data & 0xffff; + send_ipi_data(vcpu, offset, data); } static int kvm_loongarch_mail_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, gpa_t addr, int len, const void *val) { + struct loongarch_ipi *ipi; + + ipi = vcpu->kvm->arch.ipi; + if (!ipi) { + kvm_err("%s: ipi irqchip not valid!\n", __func__); + return -EINVAL; + } + + addr &= 0xfff; + addr -= IOCSR_MAIL_SEND; + + switch (addr) { + case MAIL_SEND_OFFSET: + mail_send(vcpu->kvm, *(uint64_t *)val); + break; + case ANY_SEND_OFFSET: + any_send(vcpu->kvm, *(uint64_t *)val); + break; + default: + break; + } + return 0; } -- Gitee From 35fe5857a478a7fa1f12ffff74fee039a91d7fda Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Sat, 15 Jun 2024 18:22:57 +0800 Subject: [PATCH 1055/2138] anolis: LoongArch: KVM: Add IPI user mode read and write function ANBZ: #9546 Implements the communication interface between the user mode program and the kernel in IPI interrupt control simulation, which is used to obtain or send the simulation data of the interrupt controller in the user mode process, and is used in VM migration or VM saving and restoration. Signed-off-by: Min Zhou Signed-off-by: Tianrui Zhao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/3527 Reviewed-by: Juxin Gao --- arch/loongarch/include/uapi/asm/kvm.h | 2 + arch/loongarch/kvm/intc/ipi.c | 91 ++++++++++++++++++++++++++- 2 files changed, 91 insertions(+), 2 deletions(-) diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h index 8a1408cbadb0..8f0e94bc701b 100644 --- a/arch/loongarch/include/uapi/asm/kvm.h +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -118,4 +118,6 @@ struct kvm_iocsr_entry { #define KVM_IRQCHIP_NUM_PINS 64 #define KVM_MAX_CORES 256 +#define KVM_DEV_LOONGARCH_IPI_GRP_REGS 0x40000002 + #endif /* __UAPI_ASM_LOONGARCH_KVM_H */ diff --git a/arch/loongarch/kvm/intc/ipi.c b/arch/loongarch/kvm/intc/ipi.c index 815858671005..fbf6f7e462cf 100644 --- a/arch/loongarch/kvm/intc/ipi.c +++ b/arch/loongarch/kvm/intc/ipi.c @@ -320,16 +320,103 @@ static const struct kvm_io_device_ops kvm_loongarch_mail_ops = { .write = kvm_loongarch_mail_write, }; +static int kvm_loongarch_ipi_regs_access(struct kvm_device *dev, + struct kvm_device_attr *attr, + bool is_write) +{ + uint64_t val; + int cpu, addr; + void *p = NULL; + int len = 4; + struct kvm_vcpu *vcpu; + + cpu = (attr->attr >> 16) & 0x3ff; + addr = attr->attr & 0xff; + + vcpu = kvm_get_vcpu(dev->kvm, cpu); + if (unlikely(vcpu == NULL)) { + kvm_err("%s: invalid target cpu: %d\n", __func__, cpu); + return -EINVAL; + } + switch (addr) { + case CORE_STATUS_OFF: + p = &vcpu->arch.ipi_state.status; + break; + case CORE_EN_OFF: + p = &vcpu->arch.ipi_state.en; + break; + case CORE_SET_OFF: + p = &vcpu->arch.ipi_state.set; + break; + case CORE_CLEAR_OFF: + p = &vcpu->arch.ipi_state.clear; + break; + case CORE_BUF_20: + p = &vcpu->arch.ipi_state.buf[0]; + len = 8; + break; + case CORE_BUF_28: + p = &vcpu->arch.ipi_state.buf[1]; + len = 8; + break; + case CORE_BUF_30: + p = &vcpu->arch.ipi_state.buf[2]; + len = 8; + break; + case CORE_BUF_38: + p = &vcpu->arch.ipi_state.buf[3]; + len = 8; + break; + default: + kvm_err("%s: unknown ipi register, addr = %d\n", __func__, addr); + return -EINVAL; + } + + if (is_write) { + if (len == 4) { + if (get_user(val, (uint32_t __user *)attr->addr)) + return -EFAULT; + *(uint32_t *)p = (uint32_t)val; + } else if (len == 8) { + if (get_user(val, (uint64_t __user *)attr->addr)) + return -EFAULT; + *(uint64_t *)p = val; + } + } else { + if (len == 4) { + val = *(uint32_t *)p; + return put_user(val, (uint32_t __user *)attr->addr); + } else if (len == 8) { + val = *(uint64_t *)p; + return put_user(val, (uint64_t __user *)attr->addr); + } + } + + return 0; +} + static int kvm_loongarch_ipi_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { - return 0; + switch (attr->group) { + case KVM_DEV_LOONGARCH_IPI_GRP_REGS: + return kvm_loongarch_ipi_regs_access(dev, attr, false); + default: + kvm_err("%s: unknown group (%d)\n", __func__, attr->group); + return -EINVAL; + } } static int kvm_loongarch_ipi_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { - return 0; + switch (attr->group) { + case KVM_DEV_LOONGARCH_IPI_GRP_REGS: + return kvm_loongarch_ipi_regs_access(dev, attr, true); + default: + kvm_err("%s: unknown group (%d)\n", __func__, attr->group); + return -EINVAL; + } } static void kvm_loongarch_ipi_destroy(struct kvm_device *dev) -- Gitee From 5947ef4a6540004b612f35882c0325a66d605550 Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Mon, 3 Jun 2024 14:30:58 +0800 Subject: [PATCH 1056/2138] anolis: LoongArch: KVM: Add EXTIOI device support ANBZ: #9546 Added device model for EXTIOI interrupt controller, implemented basic create destroy interface, and registered device model to kvm device table. Signed-off-by: Tianrui Zhao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/3527 Reviewed-by: Juxin Gao --- arch/loongarch/include/asm/kvm_extioi.h | 78 +++++++++++++++++ arch/loongarch/include/asm/kvm_host.h | 2 + arch/loongarch/kvm/Makefile | 1 + arch/loongarch/kvm/intc/extioi.c | 111 ++++++++++++++++++++++++ arch/loongarch/kvm/main.c | 6 +- include/uapi/linux/kvm.h | 2 + 6 files changed, 199 insertions(+), 1 deletion(-) create mode 100644 arch/loongarch/include/asm/kvm_extioi.h create mode 100644 arch/loongarch/kvm/intc/extioi.c diff --git a/arch/loongarch/include/asm/kvm_extioi.h b/arch/loongarch/include/asm/kvm_extioi.h new file mode 100644 index 000000000000..48a117b2be5d --- /dev/null +++ b/arch/loongarch/include/asm/kvm_extioi.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ + +#ifndef LOONGARCH_EXTIOI_H +#define LOONGARCH_EXTIOI_H + +#include + +#define EXTIOI_IRQS 256 +#define EXTIOI_ROUTE_MAX_VCPUS 256 +#define EXTIOI_IRQS_U8_NUMS (EXTIOI_IRQS / 8) +#define EXTIOI_IRQS_U32_NUMS (EXTIOI_IRQS_U8_NUMS / 4) +#define EXTIOI_IRQS_U64_NUMS (EXTIOI_IRQS_U32_NUMS / 2) +/* map to ipnum per 32 irqs */ +#define EXTIOI_IRQS_NODETYPE_COUNT 16 + +#define EXTIOI_BASE 0x1400 +#define EXTIOI_SIZE 0x900 + +#define LS3A_INTC_IP 8 + +struct loongarch_extioi { + spinlock_t lock; + struct kvm *kvm; + struct kvm_io_device device; + /* hardware state */ + union nodetype { + u64 reg_u64[EXTIOI_IRQS_NODETYPE_COUNT / 4]; + u32 reg_u32[EXTIOI_IRQS_NODETYPE_COUNT / 2]; + uint16_t reg_u16[EXTIOI_IRQS_NODETYPE_COUNT]; + u8 reg_u8[EXTIOI_IRQS_NODETYPE_COUNT * 2]; + } nodetype; + + /* one bit shows the state of one irq */ + union bounce { + u64 reg_u64[EXTIOI_IRQS_U64_NUMS]; + u32 reg_u32[EXTIOI_IRQS_U32_NUMS]; + u8 reg_u8[EXTIOI_IRQS_U8_NUMS]; + } bounce; + + union isr { + u64 reg_u64[EXTIOI_IRQS_U64_NUMS]; + u32 reg_u32[EXTIOI_IRQS_U32_NUMS]; + u8 reg_u8[EXTIOI_IRQS_U8_NUMS]; + } isr; + union coreisr { + u64 reg_u64[EXTIOI_ROUTE_MAX_VCPUS][EXTIOI_IRQS_U64_NUMS]; + u32 reg_u32[EXTIOI_ROUTE_MAX_VCPUS][EXTIOI_IRQS_U32_NUMS]; + u8 reg_u8[EXTIOI_ROUTE_MAX_VCPUS][EXTIOI_IRQS_U8_NUMS]; + } coreisr; + union enable { + u64 reg_u64[EXTIOI_IRQS_U64_NUMS]; + u32 reg_u32[EXTIOI_IRQS_U32_NUMS]; + u8 reg_u8[EXTIOI_IRQS_U8_NUMS]; + } enable; + + /* use one byte to config ipmap for 32 irqs at once */ + union ipmap { + u64 reg_u64; + u32 reg_u32[EXTIOI_IRQS_U32_NUMS / 4]; + u8 reg_u8[EXTIOI_IRQS_U8_NUMS / 4]; + } ipmap; + /* use one byte to config coremap for one irq */ + union coremap { + u64 reg_u64[EXTIOI_IRQS / 8]; + u32 reg_u32[EXTIOI_IRQS / 4]; + u8 reg_u8[EXTIOI_IRQS]; + } coremap; + + DECLARE_BITMAP(sw_coreisr[EXTIOI_ROUTE_MAX_VCPUS][LS3A_INTC_IP], EXTIOI_IRQS); + uint8_t sw_coremap[EXTIOI_IRQS]; +}; + +void extioi_set_irq(struct loongarch_extioi *s, int irq, int level); +int kvm_loongarch_register_extioi_device(void); +#endif /* LOONGARCH_EXTIOI_H */ diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index 52c81e4900eb..03db9f0edeac 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -20,6 +20,7 @@ #include #include #include +#include /* Loongarch KVM register ids */ #define KVM_GET_IOC_CSR_IDX(id) ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT) @@ -113,6 +114,7 @@ struct kvm_arch { s64 time_offset; struct kvm_context __percpu *vmcs; struct loongarch_ipi *ipi; + struct loongarch_extioi *extioi; }; #define CSR_MAX_NUMS 0x800 diff --git a/arch/loongarch/kvm/Makefile b/arch/loongarch/kvm/Makefile index 69a074ee0d0f..2ab3ab019bee 100644 --- a/arch/loongarch/kvm/Makefile +++ b/arch/loongarch/kvm/Makefile @@ -19,5 +19,6 @@ kvm-y += tlb.o kvm-y += vcpu.o kvm-y += vm.o kvm-y += intc/ipi.o +kvm-y += intc/extioi.o CFLAGS_exit.o += $(call cc-option,-Wno-override-init,) diff --git a/arch/loongarch/kvm/intc/extioi.c b/arch/loongarch/kvm/intc/extioi.c new file mode 100644 index 000000000000..2f1b93e95f97 --- /dev/null +++ b/arch/loongarch/kvm/intc/extioi.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ + +#include +#include +#include + +static int kvm_loongarch_extioi_write(struct kvm_vcpu *vcpu, + struct kvm_io_device *dev, + gpa_t addr, int len, const void *val) +{ + return 0; +} + +static int kvm_loongarch_extioi_read(struct kvm_vcpu *vcpu, + struct kvm_io_device *dev, + gpa_t addr, int len, void *val) +{ + return 0; +} + +static const struct kvm_io_device_ops kvm_loongarch_extioi_ops = { + .read = kvm_loongarch_extioi_read, + .write = kvm_loongarch_extioi_write, +}; + +static int kvm_loongarch_extioi_get_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + return 0; +} + +static int kvm_loongarch_extioi_set_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + return 0; +} + +static void kvm_loongarch_extioi_destroy(struct kvm_device *dev) +{ + struct kvm *kvm; + struct loongarch_extioi *extioi; + struct kvm_io_device *device; + + if (!dev) + return; + + kvm = dev->kvm; + if (!kvm) + return; + + extioi = kvm->arch.extioi; + if (!extioi) + return; + + device = &extioi->device; + kvm_io_bus_unregister_dev(kvm, KVM_IOCSR_BUS, device); + kfree(extioi); +} + +static int kvm_loongarch_extioi_create(struct kvm_device *dev, u32 type) +{ + int ret; + struct loongarch_extioi *s; + struct kvm_io_device *device; + struct kvm *kvm = dev->kvm; + + /* extioi has been created */ + if (kvm->arch.extioi) + return -EINVAL; + + s = kzalloc(sizeof(struct loongarch_extioi), GFP_KERNEL); + if (!s) + return -ENOMEM; + spin_lock_init(&s->lock); + s->kvm = kvm; + + /* + * Initialize IOCSR device + */ + device = &s->device; + kvm_iodevice_init(device, &kvm_loongarch_extioi_ops); + mutex_lock(&kvm->slots_lock); + ret = kvm_io_bus_register_dev(kvm, KVM_IOCSR_BUS, EXTIOI_BASE, EXTIOI_SIZE, device); + mutex_unlock(&kvm->slots_lock); + if (ret < 0) { + kfree(s); + return -EFAULT; + } + + kvm->arch.extioi = s; + + kvm_info("create extioi device successfully\n"); + return 0; +} + +static struct kvm_device_ops kvm_loongarch_extioi_dev_ops = { + .name = "kvm-loongarch-extioi", + .create = kvm_loongarch_extioi_create, + .destroy = kvm_loongarch_extioi_destroy, + .set_attr = kvm_loongarch_extioi_set_attr, + .get_attr = kvm_loongarch_extioi_get_attr, +}; + +int kvm_loongarch_register_extioi_device(void) +{ + return kvm_register_device_ops(&kvm_loongarch_extioi_dev_ops, + KVM_DEV_TYPE_LA_EXTIOI); +} diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c index 36efc7b38f83..b5da4341006a 100644 --- a/arch/loongarch/kvm/main.c +++ b/arch/loongarch/kvm/main.c @@ -9,6 +9,7 @@ #include #include #include +#include #include "trace.h" unsigned long vpid_mask; @@ -372,7 +373,10 @@ static int kvm_loongarch_env_init(void) if (ret) return ret; - return 0; + /* Register loongarch extioi interrupt controller interface. */ + ret = kvm_loongarch_register_extioi_device(); + + return ret; } static void kvm_loongarch_env_exit(void) diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index ad229b2add8e..86fe825c5a15 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1466,6 +1466,8 @@ enum kvm_device_type { #define KVM_DEV_TYPE_RISCV_AIA KVM_DEV_TYPE_RISCV_AIA KVM_DEV_TYPE_LA_IPI, #define KVM_DEV_TYPE_LA_IPI KVM_DEV_TYPE_LA_IPI + KVM_DEV_TYPE_LA_EXTIOI, +#define KVM_DEV_TYPE_LA_EXTIOI KVM_DEV_TYPE_LA_EXTIOI KVM_DEV_TYPE_MAX, -- Gitee From c11aaba2e519bebfd5abacfe03af4f44b3426cea Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Sat, 15 Jun 2024 18:41:22 +0800 Subject: [PATCH 1057/2138] anolis: LoongArch: KVM: Add EXTIOI read and write functions ANBZ: #9546 Implementation of EXTIOI interrupt controller address space read and write function simulation. Signed-off-by: Tianrui Zhao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/3527 Reviewed-by: Juxin Gao --- arch/loongarch/include/asm/kvm_extioi.h | 17 + arch/loongarch/include/asm/kvm_host.h | 2 + arch/loongarch/kvm/intc/extioi.c | 577 +++++++++++++++++++++++- 3 files changed, 594 insertions(+), 2 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_extioi.h b/arch/loongarch/include/asm/kvm_extioi.h index 48a117b2be5d..d2af039a7d6f 100644 --- a/arch/loongarch/include/asm/kvm_extioi.h +++ b/arch/loongarch/include/asm/kvm_extioi.h @@ -19,8 +19,25 @@ #define EXTIOI_BASE 0x1400 #define EXTIOI_SIZE 0x900 +#define EXTIOI_NODETYPE_START 0xa0 +#define EXTIOI_NODETYPE_END 0xbf +#define EXTIOI_IPMAP_START 0xc0 +#define EXTIOI_IPMAP_END 0xc7 +#define EXTIOI_ENABLE_START 0x200 +#define EXTIOI_ENABLE_END 0x21f +#define EXTIOI_BOUNCE_START 0x280 +#define EXTIOI_BOUNCE_END 0x29f +#define EXTIOI_ISR_START 0x300 +#define EXTIOI_ISR_END 0x31f +#define EXTIOI_COREISR_START 0x400 +#define EXTIOI_COREISR_END 0x71f +#define EXTIOI_COREMAP_START 0x800 +#define EXTIOI_COREMAP_END 0x8ff + #define LS3A_INTC_IP 8 +#define EXTIOI_SW_COREMAP_FLAG (1 << 0) + struct loongarch_extioi { spinlock_t lock; struct kvm *kvm; diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index 03db9f0edeac..3293e2e52ebc 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -42,6 +42,8 @@ struct kvm_vm_stat { u64 hugepages; u64 ipi_read_exits; u64 ipi_write_exits; + u64 extioi_read_exits; + u64 extioi_write_exits; }; struct kvm_vcpu_stat { diff --git a/arch/loongarch/kvm/intc/extioi.c b/arch/loongarch/kvm/intc/extioi.c index 2f1b93e95f97..dd18b7a7599a 100644 --- a/arch/loongarch/kvm/intc/extioi.c +++ b/arch/loongarch/kvm/intc/extioi.c @@ -7,18 +7,591 @@ #include #include +#define loongarch_ext_irq_lock(s, flags) spin_lock_irqsave(&s->lock, flags) +#define loongarch_ext_irq_unlock(s, flags) spin_unlock_irqrestore(&s->lock, flags) + +static void extioi_update_irq(struct loongarch_extioi *s, int irq, int level) +{ + int ipnum, cpu, found, irq_index, irq_mask; + struct kvm_interrupt vcpu_irq; + struct kvm_vcpu *vcpu; + + ipnum = s->ipmap.reg_u8[irq / 32]; + ipnum = count_trailing_zeros(ipnum); + ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0; + + cpu = s->sw_coremap[irq]; + vcpu = kvm_get_vcpu(s->kvm, cpu); + irq_index = irq / 32; + /* length of accessing core isr is 4 bytes */ + irq_mask = 1 << (irq & 0x1f); + + if (level) { + /* if not enable return false */ + if (((s->enable.reg_u32[irq_index]) & irq_mask) == 0) + return; + s->coreisr.reg_u32[cpu][irq_index] |= irq_mask; + found = find_first_bit(s->sw_coreisr[cpu][ipnum], EXTIOI_IRQS); + set_bit(irq, s->sw_coreisr[cpu][ipnum]); + } else { + s->coreisr.reg_u32[cpu][irq_index] &= ~irq_mask; + clear_bit(irq, s->sw_coreisr[cpu][ipnum]); + found = find_first_bit(s->sw_coreisr[cpu][ipnum], EXTIOI_IRQS); + } + + if (found < EXTIOI_IRQS) + /* other irq is handling, need not update parent irq level */ + return; + + vcpu_irq.irq = level ? INT_HWI0 + ipnum : -(INT_HWI0 + ipnum); + kvm_vcpu_ioctl_interrupt(vcpu, &vcpu_irq); +} + +void extioi_set_irq(struct loongarch_extioi *s, int irq, int level) +{ + unsigned long *isr = (unsigned long *)s->isr.reg_u8; + unsigned long flags; + + level ? set_bit(irq, isr) : clear_bit(irq, isr); + if (!level) + return; + loongarch_ext_irq_lock(s, flags); + extioi_update_irq(s, irq, level); + loongarch_ext_irq_unlock(s, flags); +} + +static inline void extioi_enable_irq(struct kvm_vcpu *vcpu, struct loongarch_extioi *s, + int index, u8 mask, int level) +{ + u8 val; + int irq; + + val = mask & s->isr.reg_u8[index]; + irq = ffs(val); + while (irq != 0) { + /* + * enable bit change from 0 to 1, + * need to update irq by pending bits + */ + extioi_update_irq(s, irq - 1 + index * 8, level); + val &= ~(1 << (irq - 1)); + irq = ffs(val); + } +} + +static int loongarch_extioi_writeb(struct kvm_vcpu *vcpu, + struct loongarch_extioi *s, + gpa_t addr, int len, const void *val) +{ + int index, irq, ret = 0; + u8 data, old_data, cpu; + u8 coreisr, old_coreisr; + gpa_t offset; + + data = *(u8 *)val; + offset = addr - EXTIOI_BASE; + + switch (offset) { + case EXTIOI_NODETYPE_START ... EXTIOI_NODETYPE_END: + index = (offset - EXTIOI_NODETYPE_START); + s->nodetype.reg_u8[index] = data; + break; + case EXTIOI_IPMAP_START ... EXTIOI_IPMAP_END: + /* + * ipmap cannot be set at runtime, can be set only at the beginning + * of intr driver, need not update upper irq level + */ + index = (offset - EXTIOI_IPMAP_START); + s->ipmap.reg_u8[index] = data; + break; + case EXTIOI_ENABLE_START ... EXTIOI_ENABLE_END: + index = (offset - EXTIOI_ENABLE_START); + old_data = s->enable.reg_u8[index]; + s->enable.reg_u8[index] = data; + /* + * 1: enable irq. + * update irq when isr is set. + */ + data = s->enable.reg_u8[index] & ~old_data & s->isr.reg_u8[index]; + extioi_enable_irq(vcpu, s, index, data, 1); + /* + * 0: disable irq. + * update irq when isr is set. + */ + data = ~s->enable.reg_u8[index] & old_data & s->isr.reg_u8[index]; + extioi_enable_irq(vcpu, s, index, data, 0); + break; + case EXTIOI_BOUNCE_START ... EXTIOI_BOUNCE_END: + /* do not emulate hw bounced irq routing */ + index = offset - EXTIOI_BOUNCE_START; + s->bounce.reg_u8[index] = data; + break; + case EXTIOI_COREISR_START ... EXTIOI_COREISR_END: + /* length of accessing core isr is 8 bytes */ + index = (offset - EXTIOI_COREISR_START); + /* using attrs to get current cpu index */ + cpu = vcpu->vcpu_id; + coreisr = data; + old_coreisr = s->coreisr.reg_u8[cpu][index]; + /* write 1 to clear interrupt */ + s->coreisr.reg_u8[cpu][index] = old_coreisr & ~coreisr; + coreisr &= old_coreisr; + irq = ffs(coreisr); + while (irq != 0) { + extioi_update_irq(s, irq - 1 + index * 8, 0); + coreisr &= ~(1 << (irq - 1)); + irq = ffs(coreisr); + } + break; + case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END: + irq = offset - EXTIOI_COREMAP_START; + index = irq; + s->coremap.reg_u8[index] = data; + + cpu = data & 0xff; + cpu = ffs(cpu) - 1; + cpu = (cpu >= 4) ? 0 : cpu; + + if (s->sw_coremap[irq] == cpu) + break; + + if (test_bit(irq, (unsigned long *)s->isr.reg_u8)) { + /* + * lower irq at old cpu and raise irq at new cpu + */ + extioi_update_irq(s, irq, 0); + s->sw_coremap[irq] = cpu; + extioi_update_irq(s, irq, 1); + } else + s->sw_coremap[irq] = cpu; + + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + +static int loongarch_extioi_writew(struct kvm_vcpu *vcpu, + struct loongarch_extioi *s, + gpa_t addr, int len, const void *val) +{ + int i, index, irq, ret = 0; + u8 cpu; + u32 data, old_data; + u32 coreisr, old_coreisr; + gpa_t offset; + + data = *(u32 *)val; + offset = addr - EXTIOI_BASE; + + switch (offset) { + case EXTIOI_NODETYPE_START ... EXTIOI_NODETYPE_END: + index = (offset - EXTIOI_NODETYPE_START) >> 2; + s->nodetype.reg_u32[index] = data; + break; + case EXTIOI_IPMAP_START ... EXTIOI_IPMAP_END: + /* + * ipmap cannot be set at runtime, can be set only at the beginning + * of intr driver, need not update upper irq level + */ + index = (offset - EXTIOI_IPMAP_START) >> 2; + s->ipmap.reg_u32[index] = data; + break; + case EXTIOI_ENABLE_START ... EXTIOI_ENABLE_END: + index = (offset - EXTIOI_ENABLE_START) >> 2; + old_data = s->enable.reg_u32[index]; + s->enable.reg_u32[index] = data; + /* + * 1: enable irq. + * update irq when isr is set. + */ + data = s->enable.reg_u32[index] & ~old_data & s->isr.reg_u32[index]; + index = index << 2; + for (i = 0; i < sizeof(data); i++) { + u8 mask = (data >> (i * 8)) & 0xff; + + extioi_enable_irq(vcpu, s, index + i, mask, 1); + } + /* + * 0: disable irq. + * update irq when isr is set. + */ + data = ~s->enable.reg_u32[index] & old_data & s->isr.reg_u32[index]; + for (i = 0; i < sizeof(data); i++) { + u8 mask = (data >> (i * 8)) & 0xff; + + extioi_enable_irq(vcpu, s, index, mask, 0); + } + break; + case EXTIOI_BOUNCE_START ... EXTIOI_BOUNCE_END: + /* do not emulate hw bounced irq routing */ + index = (offset - EXTIOI_BOUNCE_START) >> 2; + s->bounce.reg_u32[index] = data; + break; + case EXTIOI_COREISR_START ... EXTIOI_COREISR_END: + /* length of accessing core isr is 8 bytes */ + index = (offset - EXTIOI_COREISR_START) >> 2; + /* using attrs to get current cpu index */ + cpu = vcpu->vcpu_id; + coreisr = data; + old_coreisr = s->coreisr.reg_u32[cpu][index]; + /* write 1 to clear interrupt */ + s->coreisr.reg_u32[cpu][index] = old_coreisr & ~coreisr; + coreisr &= old_coreisr; + irq = ffs(coreisr); + while (irq != 0) { + extioi_update_irq(s, irq - 1 + index * 32, 0); + coreisr &= ~(1 << (irq - 1)); + irq = ffs(coreisr); + } + break; + case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END: + irq = offset - EXTIOI_COREMAP_START; + index = irq >> 2; + + s->coremap.reg_u32[index] = data; + + for (i = 0; i < sizeof(data); i++) { + cpu = data & 0xff; + cpu = ffs(cpu) - 1; + cpu = (cpu >= 4) ? 0 : cpu; + data = data >> 8; + + if (s->sw_coremap[irq + i] == cpu) + continue; + + if (test_bit(irq, (unsigned long *)s->isr.reg_u8)) { + /* + * lower irq at old cpu and raise irq at new cpu + */ + extioi_update_irq(s, irq + i, 0); + s->sw_coremap[irq + i] = cpu; + extioi_update_irq(s, irq + i, 1); + } else + s->sw_coremap[irq + i] = cpu; + } + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + +static int loongarch_extioi_writel(struct kvm_vcpu *vcpu, + struct loongarch_extioi *s, + gpa_t addr, int len, const void *val) +{ + int i, index, irq, bits, ret = 0; + u8 cpu; + u64 data, old_data; + u64 coreisr, old_coreisr; + gpa_t offset; + + data = *(u64 *)val; + offset = addr - EXTIOI_BASE; + + switch (offset) { + case EXTIOI_NODETYPE_START ... EXTIOI_NODETYPE_END: + index = (offset - EXTIOI_NODETYPE_START) >> 3; + s->nodetype.reg_u64[index] = data; + break; + case EXTIOI_IPMAP_START ... EXTIOI_IPMAP_END: + /* + * ipmap cannot be set at runtime, can be set only at the beginning + * of intr driver, need not update upper irq level + */ + index = (offset - EXTIOI_IPMAP_START) >> 3; + s->ipmap.reg_u64 = data; + break; + case EXTIOI_ENABLE_START ... EXTIOI_ENABLE_END: + index = (offset - EXTIOI_ENABLE_START) >> 3; + old_data = s->enable.reg_u64[index]; + s->enable.reg_u64[index] = data; + /* + * 1: enable irq. + * update irq when isr is set. + */ + data = s->enable.reg_u64[index] & ~old_data & s->isr.reg_u64[index]; + index = index << 3; + for (i = 0; i < sizeof(data); i++) { + u8 mask = (data >> (i * 8)) & 0xff; + + extioi_enable_irq(vcpu, s, index + i, mask, 1); + } + /* + * 0: disable irq. + * update irq when isr is set. + */ + data = ~s->enable.reg_u64[index] & old_data & s->isr.reg_u64[index]; + for (i = 0; i < sizeof(data); i++) { + u8 mask = (data >> (i * 8)) & 0xff; + + extioi_enable_irq(vcpu, s, index, mask, 0); + } + break; + case EXTIOI_BOUNCE_START ... EXTIOI_BOUNCE_END: + /* do not emulate hw bounced irq routing */ + index = (offset - EXTIOI_BOUNCE_START) >> 3; + s->bounce.reg_u64[index] = data; + break; + case EXTIOI_COREISR_START ... EXTIOI_COREISR_END: + /* length of accessing core isr is 8 bytes */ + index = (offset - EXTIOI_COREISR_START) >> 3; + /* using attrs to get current cpu index */ + cpu = vcpu->vcpu_id; + coreisr = data; + old_coreisr = s->coreisr.reg_u64[cpu][index]; + /* write 1 to clear interrupt */ + s->coreisr.reg_u64[cpu][index] = old_coreisr & ~coreisr; + coreisr &= old_coreisr; + + bits = sizeof(u64) * 8; + irq = find_first_bit((void *)&coreisr, bits); + while (irq < bits) { + extioi_update_irq(s, irq + index * bits, 0); + bitmap_clear((void *)&coreisr, irq, 1); + irq = find_first_bit((void *)&coreisr, bits); + } + break; + case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END: + irq = offset - EXTIOI_COREMAP_START; + index = irq >> 3; + + s->coremap.reg_u64[index] = data; + + for (i = 0; i < sizeof(data); i++) { + cpu = data & 0xff; + cpu = ffs(cpu) - 1; + cpu = (cpu >= 4) ? 0 : cpu; + data = data >> 8; + + if (s->sw_coremap[irq + i] == cpu) + continue; + + if (test_bit(irq, (unsigned long *)s->isr.reg_u8)) { + /* + * lower irq at old cpu and raise irq at new cpu + */ + extioi_update_irq(s, irq + i, 0); + s->sw_coremap[irq + i] = cpu; + extioi_update_irq(s, irq + i, 1); + } else + s->sw_coremap[irq + i] = cpu; + } + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + static int kvm_loongarch_extioi_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, gpa_t addr, int len, const void *val) { - return 0; + int ret; + struct loongarch_extioi *extioi = vcpu->kvm->arch.extioi; + unsigned long flags; + + if (!extioi) { + kvm_err("%s: extioi irqchip not valid!\n", __func__); + return -EINVAL; + } + + vcpu->kvm->stat.extioi_write_exits++; + loongarch_ext_irq_lock(extioi, flags); + + switch (len) { + case 1: + ret = loongarch_extioi_writeb(vcpu, extioi, addr, len, val); + break; + case 4: + ret = loongarch_extioi_writew(vcpu, extioi, addr, len, val); + break; + case 8: + ret = loongarch_extioi_writel(vcpu, extioi, addr, len, val); + break; + default: + WARN_ONCE(1, "%s: Abnormal address access:addr 0x%llx,size %d\n", + __func__, addr, len); + } + + loongarch_ext_irq_unlock(extioi, flags); + + + return ret; +} + +static int loongarch_extioi_readb(struct kvm_vcpu *vcpu, struct loongarch_extioi *s, + gpa_t addr, int len, void *val) +{ + int index, ret = 0; + gpa_t offset; + u64 data; + + offset = addr - EXTIOI_BASE; + switch (offset) { + case EXTIOI_NODETYPE_START ... EXTIOI_NODETYPE_END: + index = offset - EXTIOI_NODETYPE_START; + data = s->nodetype.reg_u8[index]; + break; + case EXTIOI_IPMAP_START ... EXTIOI_IPMAP_END: + index = offset - EXTIOI_IPMAP_START; + data = s->ipmap.reg_u8[index]; + break; + case EXTIOI_ENABLE_START ... EXTIOI_ENABLE_END: + index = offset - EXTIOI_ENABLE_START; + data = s->enable.reg_u8[index]; + break; + case EXTIOI_BOUNCE_START ... EXTIOI_BOUNCE_END: + index = offset - EXTIOI_BOUNCE_START; + data = s->bounce.reg_u8[index]; + break; + case EXTIOI_COREISR_START ... EXTIOI_COREISR_END: + /* length of accessing core isr is 8 bytes */ + index = offset - EXTIOI_COREISR_START; + data = s->coreisr.reg_u8[vcpu->vcpu_id][index]; + break; + case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END: + index = offset - EXTIOI_COREMAP_START; + data = s->coremap.reg_u8[index]; + break; + default: + ret = -EINVAL; + break; + } + + *(u8 *)val = data; + + return ret; +} + +static int loongarch_extioi_readw(struct kvm_vcpu *vcpu, struct loongarch_extioi *s, + gpa_t addr, int len, void *val) +{ + int index, ret = 0; + gpa_t offset; + u64 data; + + offset = addr - EXTIOI_BASE; + switch (offset) { + case EXTIOI_NODETYPE_START ... EXTIOI_NODETYPE_END: + index = (offset - EXTIOI_NODETYPE_START) >> 2; + data = s->nodetype.reg_u32[index]; + break; + case EXTIOI_IPMAP_START ... EXTIOI_IPMAP_END: + index = (offset - EXTIOI_IPMAP_START) >> 2; + data = s->ipmap.reg_u32[index]; + break; + case EXTIOI_ENABLE_START ... EXTIOI_ENABLE_END: + index = (offset - EXTIOI_ENABLE_START) >> 2; + data = s->enable.reg_u32[index]; + break; + case EXTIOI_BOUNCE_START ... EXTIOI_BOUNCE_END: + index = (offset - EXTIOI_BOUNCE_START) >> 2; + data = s->bounce.reg_u32[index]; + break; + case EXTIOI_COREISR_START ... EXTIOI_COREISR_END: + /* length of accessing core isr is 8 bytes */ + index = (offset - EXTIOI_COREISR_START) >> 2; + data = s->coreisr.reg_u32[vcpu->vcpu_id][index]; + break; + case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END: + index = (offset - EXTIOI_COREMAP_START) >> 2; + data = s->coremap.reg_u32[index]; + break; + default: + ret = -EINVAL; + break; + } + + *(u32 *)val = data; + + return ret; +} + +static int loongarch_extioi_readl(struct kvm_vcpu *vcpu, struct loongarch_extioi *s, + gpa_t addr, int len, void *val) +{ + int index, ret = 0; + gpa_t offset; + u64 data; + + offset = addr - EXTIOI_BASE; + switch (offset) { + case EXTIOI_NODETYPE_START ... EXTIOI_NODETYPE_END: + index = (offset - EXTIOI_NODETYPE_START) >> 3; + data = s->nodetype.reg_u64[index]; + break; + case EXTIOI_IPMAP_START ... EXTIOI_IPMAP_END: + index = (offset - EXTIOI_IPMAP_START) >> 3; + data = s->ipmap.reg_u64; + break; + case EXTIOI_ENABLE_START ... EXTIOI_ENABLE_END: + index = (offset - EXTIOI_ENABLE_START) >> 3; + data = s->enable.reg_u64[index]; + break; + case EXTIOI_BOUNCE_START ... EXTIOI_BOUNCE_END: + index = (offset - EXTIOI_BOUNCE_START) >> 3; + data = s->bounce.reg_u64[index]; + break; + case EXTIOI_COREISR_START ... EXTIOI_COREISR_END: + /* length of accessing core isr is 8 bytes */ + index = (offset - EXTIOI_COREISR_START) >> 3; + data = s->coreisr.reg_u64[vcpu->vcpu_id][index]; + break; + case EXTIOI_COREMAP_START ... EXTIOI_COREMAP_END: + index = (offset - EXTIOI_COREMAP_START) >> 3; + data = s->coremap.reg_u64[index]; + break; + default: + ret = -EINVAL; + break; + } + + *(u64 *)val = data; + + return ret; } static int kvm_loongarch_extioi_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, gpa_t addr, int len, void *val) { - return 0; + int ret; + struct loongarch_extioi *extioi = vcpu->kvm->arch.extioi; + unsigned long flags; + + if (!extioi) { + kvm_err("%s: extioi irqchip not valid!\n", __func__); + return -EINVAL; + } + + vcpu->kvm->stat.extioi_read_exits++; + loongarch_ext_irq_lock(extioi, flags); + + switch (len) { + case 1: + ret = loongarch_extioi_readb(vcpu, extioi, addr, len, val); + break; + case 4: + ret = loongarch_extioi_readw(vcpu, extioi, addr, len, val); + break; + case 8: + ret = loongarch_extioi_readl(vcpu, extioi, addr, len, val); + break; + default: + WARN_ONCE(1, "%s: Abnormal address access:addr 0x%llx,size %d\n", + __func__, addr, len); + } + + loongarch_ext_irq_unlock(extioi, flags); + + return ret; } static const struct kvm_io_device_ops kvm_loongarch_extioi_ops = { -- Gitee From 6526c657ec3c0567e48314a503769923c51e79f0 Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Sat, 15 Jun 2024 18:44:38 +0800 Subject: [PATCH 1058/2138] anolis: LoongArch: KVM: Add EXTIOI user mode read and write functions ANBZ: #9546 Implements the communication interface between the user mode program and the kernel in EXTIOI interrupt control simulation, which is used to obtain or send the simulation data of the interrupt controller in the user mode process, and is used in VM migration or VM saving and restoration. Signed-off-by: Tianrui Zhao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/3527 Reviewed-by: Juxin Gao --- arch/loongarch/include/uapi/asm/kvm.h | 2 + arch/loongarch/kvm/intc/extioi.c | 103 +++++++++++++++++++++++++- 2 files changed, 103 insertions(+), 2 deletions(-) diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h index 8f0e94bc701b..1349f426058d 100644 --- a/arch/loongarch/include/uapi/asm/kvm.h +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -120,4 +120,6 @@ struct kvm_iocsr_entry { #define KVM_DEV_LOONGARCH_IPI_GRP_REGS 0x40000002 +#define KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS 0x40000003 + #endif /* __UAPI_ASM_LOONGARCH_KVM_H */ diff --git a/arch/loongarch/kvm/intc/extioi.c b/arch/loongarch/kvm/intc/extioi.c index dd18b7a7599a..48141823aaa3 100644 --- a/arch/loongarch/kvm/intc/extioi.c +++ b/arch/loongarch/kvm/intc/extioi.c @@ -47,6 +47,26 @@ static void extioi_update_irq(struct loongarch_extioi *s, int irq, int level) kvm_vcpu_ioctl_interrupt(vcpu, &vcpu_irq); } +static void extioi_set_sw_coreisr(struct loongarch_extioi *s) +{ + int ipnum, cpu, irq_index, irq_mask, irq; + + for (irq = 0; irq < EXTIOI_IRQS; irq++) { + ipnum = s->ipmap.reg_u8[irq / 32]; + ipnum = count_trailing_zeros(ipnum); + ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0; + irq_index = irq / 32; + /* length of accessing core isr is 4 bytes */ + irq_mask = 1 << (irq & 0x1f); + + cpu = s->coremap.reg_u8[irq]; + if (!!(s->coreisr.reg_u32[cpu][irq_index] & irq_mask)) + set_bit(irq, s->sw_coreisr[cpu][ipnum]); + else + clear_bit(irq, s->sw_coreisr[cpu][ipnum]); + } +} + void extioi_set_irq(struct loongarch_extioi *s, int irq, int level) { unsigned long *isr = (unsigned long *)s->isr.reg_u8; @@ -599,16 +619,95 @@ static const struct kvm_io_device_ops kvm_loongarch_extioi_ops = { .write = kvm_loongarch_extioi_write, }; +static int kvm_loongarch_extioi_regs_access(struct kvm_device *dev, + struct kvm_device_attr *attr, + bool is_write) +{ + int len, addr; + void __user *data; + void *p = NULL; + struct loongarch_extioi *s; + unsigned long flags; + + s = dev->kvm->arch.extioi; + addr = attr->attr; + data = (void __user *)attr->addr; + + loongarch_ext_irq_lock(s, flags); + switch (addr) { + case EXTIOI_NODETYPE_START: + p = s->nodetype.reg_u8; + len = sizeof(s->nodetype); + break; + case EXTIOI_IPMAP_START: + p = s->ipmap.reg_u8; + len = sizeof(s->ipmap); + break; + case EXTIOI_ENABLE_START: + p = s->enable.reg_u8; + len = sizeof(s->enable); + break; + case EXTIOI_BOUNCE_START: + p = s->bounce.reg_u8; + len = sizeof(s->bounce); + break; + case EXTIOI_ISR_START: + p = s->isr.reg_u8; + len = sizeof(s->isr); + break; + case EXTIOI_COREISR_START: + p = s->coreisr.reg_u8; + len = sizeof(s->coreisr); + break; + case EXTIOI_COREMAP_START: + p = s->coremap.reg_u8; + len = sizeof(s->coremap); + break; + case EXTIOI_SW_COREMAP_FLAG: + p = s->sw_coremap; + len = sizeof(s->sw_coremap); + break; + default: + loongarch_ext_irq_unlock(s, flags); + kvm_err("%s: unknown extioi register, addr = %d\n", __func__, addr); + return -EINVAL; + } + + loongarch_ext_irq_unlock(s, flags); + + if (is_write) { + if (copy_from_user(p, data, len)) + return -EFAULT; + } else { + if (copy_to_user(data, p, len)) + return -EFAULT; + } + + if ((addr == EXTIOI_COREISR_START) && is_write) { + loongarch_ext_irq_lock(s, flags); + extioi_set_sw_coreisr(s); + loongarch_ext_irq_unlock(s, flags); + } + + return 0; +} + static int kvm_loongarch_extioi_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { - return 0; + if (attr->group == KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS) + return kvm_loongarch_extioi_regs_access(dev, attr, false); + + return -EINVAL; } static int kvm_loongarch_extioi_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { - return 0; + if (attr->group == KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS) + return kvm_loongarch_extioi_regs_access(dev, attr, true); + + return -EINVAL; } static void kvm_loongarch_extioi_destroy(struct kvm_device *dev) -- Gitee From 6a5775dad9ef11aeda3c144b47b8d687351f4460 Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Mon, 3 Jun 2024 14:51:29 +0800 Subject: [PATCH 1059/2138] anolis: LoongArch: KVM: Add PCHPIC device support ANBZ: #9546 Added device model for PCHPIC interrupt controller, implemented basic create destroy interface, and registered device model to kvm device table. Signed-off-by: Tianrui Zhao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/3527 Reviewed-by: Juxin Gao --- arch/loongarch/include/asm/kvm_host.h | 2 + arch/loongarch/include/asm/kvm_pch_pic.h | 30 +++++++ arch/loongarch/kvm/Makefile | 1 + arch/loongarch/kvm/intc/pch_pic.c | 100 +++++++++++++++++++++++ arch/loongarch/kvm/main.c | 6 ++ include/uapi/linux/kvm.h | 2 + 6 files changed, 141 insertions(+) create mode 100644 arch/loongarch/include/asm/kvm_pch_pic.h create mode 100644 arch/loongarch/kvm/intc/pch_pic.c diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index 3293e2e52ebc..19aef9a98dfa 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -21,6 +21,7 @@ #include #include #include +#include /* Loongarch KVM register ids */ #define KVM_GET_IOC_CSR_IDX(id) ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT) @@ -117,6 +118,7 @@ struct kvm_arch { struct kvm_context __percpu *vmcs; struct loongarch_ipi *ipi; struct loongarch_extioi *extioi; + struct loongarch_pch_pic *pch_pic; }; #define CSR_MAX_NUMS 0x800 diff --git a/arch/loongarch/include/asm/kvm_pch_pic.h b/arch/loongarch/include/asm/kvm_pch_pic.h new file mode 100644 index 000000000000..5aef0e4e3863 --- /dev/null +++ b/arch/loongarch/include/asm/kvm_pch_pic.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ + +#ifndef LOONGARCH_PCH_PIC_H +#define LOONGARCH_PCH_PIC_H + +#include + +struct loongarch_pch_pic { + spinlock_t lock; + struct kvm *kvm; + struct kvm_io_device device; + uint64_t mask; /* 1:disable irq, 0:enable irq */ + uint64_t htmsi_en; /* 1:msi */ + uint64_t edge; /* 1:edge triggered, 0:level triggered */ + uint64_t auto_ctrl0; /* only use default value 00b */ + uint64_t auto_ctrl1; /* only use default value 00b */ + uint64_t last_intirr; /* edge detection */ + uint64_t irr; /* interrupt request register */ + uint64_t isr; /* interrupt service register */ + uint64_t polarity; /* 0: high level trigger, 1: low level trigger */ + uint8_t route_entry[64]; /* default value 0, route to int0: extioi */ + uint8_t htmsi_vector[64]; /* irq route table for routing to extioi */ + uint64_t pch_pic_base; +}; + +int kvm_loongarch_register_pch_pic_device(void); +#endif /* LOONGARCH_PCH_PIC_H */ diff --git a/arch/loongarch/kvm/Makefile b/arch/loongarch/kvm/Makefile index 2ab3ab019bee..5661f2bc04f6 100644 --- a/arch/loongarch/kvm/Makefile +++ b/arch/loongarch/kvm/Makefile @@ -20,5 +20,6 @@ kvm-y += vcpu.o kvm-y += vm.o kvm-y += intc/ipi.o kvm-y += intc/extioi.o +kvm-y += intc/pch_pic.o CFLAGS_exit.o += $(call cc-option,-Wno-override-init,) diff --git a/arch/loongarch/kvm/intc/pch_pic.c b/arch/loongarch/kvm/intc/pch_pic.c new file mode 100644 index 000000000000..4097c00e8294 --- /dev/null +++ b/arch/loongarch/kvm/intc/pch_pic.c @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include + +static int kvm_loongarch_pch_pic_write(struct kvm_vcpu *vcpu, + struct kvm_io_device *dev, + gpa_t addr, int len, const void *val) +{ + return 0; +} + +static int kvm_loongarch_pch_pic_read(struct kvm_vcpu *vcpu, + struct kvm_io_device *dev, + gpa_t addr, int len, void *val) +{ + return 0; +} + +static const struct kvm_io_device_ops kvm_loongarch_pch_pic_ops = { + .read = kvm_loongarch_pch_pic_read, + .write = kvm_loongarch_pch_pic_write, +}; + +static int kvm_loongarch_pch_pic_get_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + return 0; +} + +static int kvm_loongarch_pch_pic_set_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + return 0; +} + +static void kvm_loongarch_pch_pic_destroy(struct kvm_device *dev) +{ + struct kvm *kvm; + struct loongarch_pch_pic *s; + struct kvm_io_device *device; + + if (!dev) + return; + + kvm = dev->kvm; + if (!kvm) + return; + + s = kvm->arch.pch_pic; + if (!s) + return; + + device = &s->device; + /* unregister pch pic device and free it's memory */ + kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, device); + kfree(s); +} + +static int kvm_loongarch_pch_pic_create(struct kvm_device *dev, u32 type) +{ + struct loongarch_pch_pic *s; + struct kvm *kvm = dev->kvm; + + /* pch pic should not has been created */ + if (kvm->arch.pch_pic) + return -EINVAL; + + s = kzalloc(sizeof(struct loongarch_pch_pic), GFP_KERNEL); + if (!s) + return -ENOMEM; + + spin_lock_init(&s->lock); + s->kvm = kvm; + + + kvm->arch.pch_pic = s; + + kvm_info("create pch pic device successfully\n"); + return 0; +} + +static struct kvm_device_ops kvm_loongarch_pch_pic_dev_ops = { + .name = "kvm-loongarch-pch-pic", + .create = kvm_loongarch_pch_pic_create, + .destroy = kvm_loongarch_pch_pic_destroy, + .set_attr = kvm_loongarch_pch_pic_set_attr, + .get_attr = kvm_loongarch_pch_pic_get_attr, +}; + +int kvm_loongarch_register_pch_pic_device(void) +{ + return kvm_register_device_ops(&kvm_loongarch_pch_pic_dev_ops, + KVM_DEV_TYPE_LA_IOAPIC); +} diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c index b5da4341006a..285bd4126e54 100644 --- a/arch/loongarch/kvm/main.c +++ b/arch/loongarch/kvm/main.c @@ -10,6 +10,7 @@ #include #include #include +#include #include "trace.h" unsigned long vpid_mask; @@ -375,6 +376,11 @@ static int kvm_loongarch_env_init(void) /* Register loongarch extioi interrupt controller interface. */ ret = kvm_loongarch_register_extioi_device(); + if (ret) + return ret; + + /* Register loongarch pch pic interrupt controller interface. */ + ret = kvm_loongarch_register_pch_pic_device(); return ret; } diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 86fe825c5a15..ec5d3be77663 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1464,6 +1464,8 @@ enum kvm_device_type { #define KVM_DEV_TYPE_ARM_PV_TIME KVM_DEV_TYPE_ARM_PV_TIME KVM_DEV_TYPE_RISCV_AIA, #define KVM_DEV_TYPE_RISCV_AIA KVM_DEV_TYPE_RISCV_AIA + KVM_DEV_TYPE_LA_IOAPIC = 0x100, +#define KVM_DEV_TYPE_LA_IOAPIC KVM_DEV_TYPE_LA_IOAPIC KVM_DEV_TYPE_LA_IPI, #define KVM_DEV_TYPE_LA_IPI KVM_DEV_TYPE_LA_IPI KVM_DEV_TYPE_LA_EXTIOI, -- Gitee From 9d04a2fceb15dd4eedeedaf35f59c1c3a1451939 Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Sat, 15 Jun 2024 18:56:27 +0800 Subject: [PATCH 1060/2138] anolis: LoongArch: KVM: Add PCHPIC read and write functions ANBZ: #9546 Implementation of IPI interrupt controller address space read and write function simulation. Implement interrupt injection interface under loongarch. Signed-off-by: Tianrui Zhao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/3527 Reviewed-by: Juxin Gao --- arch/loongarch/include/asm/kvm_host.h | 18 ++ arch/loongarch/include/asm/kvm_pch_pic.h | 31 +++ arch/loongarch/include/uapi/asm/kvm.h | 1 + arch/loongarch/kvm/intc/ipi.c | 45 ++-- arch/loongarch/kvm/intc/pch_pic.c | 290 ++++++++++++++++++++++- arch/loongarch/kvm/vm.c | 34 +++ 6 files changed, 401 insertions(+), 18 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index 19aef9a98dfa..32a22532da76 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -35,6 +35,22 @@ #define KVM_HALT_POLL_NS_DEFAULT 500000 #define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(1) +/* KVM_IRQ_LINE irq field index values */ +#define KVM_LOONGARCH_IRQ_TYPE_SHIFT 24 +#define KVM_LOONGARCH_IRQ_TYPE_MASK 0xff +#define KVM_LOONGARCH_IRQ_VCPU_SHIFT 16 +#define KVM_LOONGARCH_IRQ_VCPU_MASK 0xff +#define KVM_LOONGARCH_IRQ_NUM_SHIFT 0 +#define KVM_LOONGARCH_IRQ_NUM_MASK 0xffff + +/* irq_type field */ +#define KVM_LOONGARCH_IRQ_TYPE_CPU_IP 0 +#define KVM_LOONGARCH_IRQ_TYPE_CPU_IO 1 +#define KVM_LOONGARCH_IRQ_TYPE_HT 2 +#define KVM_LOONGARCH_IRQ_TYPE_MSI 3 +#define KVM_LOONGARCH_IRQ_TYPE_IOAPIC 4 +#define KVM_LOONGARCH_IRQ_TYPE_ROUTE 5 + #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ KVM_GUESTDBG_USE_SW_BP | KVM_GUESTDBG_SINGLESTEP) struct kvm_vm_stat { @@ -45,6 +61,8 @@ struct kvm_vm_stat { u64 ipi_write_exits; u64 extioi_read_exits; u64 extioi_write_exits; + u64 pch_pic_read_exits; + u64 pch_pic_write_exits; }; struct kvm_vcpu_stat { diff --git a/arch/loongarch/include/asm/kvm_pch_pic.h b/arch/loongarch/include/asm/kvm_pch_pic.h index 5aef0e4e3863..91bd5a5ec575 100644 --- a/arch/loongarch/include/asm/kvm_pch_pic.h +++ b/arch/loongarch/include/asm/kvm_pch_pic.h @@ -8,6 +8,35 @@ #include +#define PCH_PIC_SIZE 0x3e8 + +#define PCH_PIC_INT_ID_START 0x0 +#define PCH_PIC_INT_ID_END 0x7 +#define PCH_PIC_MASK_START 0x20 +#define PCH_PIC_MASK_END 0x27 +#define PCH_PIC_HTMSI_EN_START 0x40 +#define PCH_PIC_HTMSI_EN_END 0x47 +#define PCH_PIC_EDGE_START 0x60 +#define PCH_PIC_EDGE_END 0x67 +#define PCH_PIC_CLEAR_START 0x80 +#define PCH_PIC_CLEAR_END 0x87 +#define PCH_PIC_AUTO_CTRL0_START 0xc0 +#define PCH_PIC_AUTO_CTRL0_END 0xc7 +#define PCH_PIC_AUTO_CTRL1_START 0xe0 +#define PCH_PIC_AUTO_CTRL1_END 0xe7 +#define PCH_PIC_ROUTE_ENTRY_START 0x100 +#define PCH_PIC_ROUTE_ENTRY_END 0x13f +#define PCH_PIC_HTMSI_VEC_START 0x200 +#define PCH_PIC_HTMSI_VEC_END 0x23f +#define PCH_PIC_INT_IRR_START 0x380 +#define PCH_PIC_INT_IRR_END 0x38f +#define PCH_PIC_INT_ISR_START 0x3a0 +#define PCH_PIC_INT_ISR_END 0x3af +#define PCH_PIC_POLARITY_START 0x3e0 +#define PCH_PIC_POLARITY_END 0x3e7 +#define PCH_PIC_INT_ID_VAL 0x7000000UL +#define PCH_PIC_INT_ID_VER 0x1UL + struct loongarch_pch_pic { spinlock_t lock; struct kvm *kvm; @@ -26,5 +55,7 @@ struct loongarch_pch_pic { uint64_t pch_pic_base; }; +void pch_pic_set_irq(struct loongarch_pch_pic *s, int irq, int level); +void pch_msi_set_irq(struct kvm *kvm, int irq, int level); int kvm_loongarch_register_pch_pic_device(void); #endif /* LOONGARCH_PCH_PIC_H */ diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h index 1349f426058d..82ad5487ef23 100644 --- a/arch/loongarch/include/uapi/asm/kvm.h +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -19,6 +19,7 @@ #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 #define KVM_DIRTY_LOG_PAGE_OFFSET 64 +#define __KVM_HAVE_IRQ_LINE #define KVM_GUESTDBG_USE_SW_BP 0x00010000 /* diff --git a/arch/loongarch/kvm/intc/ipi.c b/arch/loongarch/kvm/intc/ipi.c index fbf6f7e462cf..12024d9fdd0b 100644 --- a/arch/loongarch/kvm/intc/ipi.c +++ b/arch/loongarch/kvm/intc/ipi.c @@ -223,9 +223,9 @@ static int kvm_loongarch_ipi_read(struct kvm_vcpu *vcpu, return ret; } -static void send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data) +static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data) { - int i; + int i, ret; uint32_t val = 0, mask = 0; /* * Bit 27-30 is mask for byte writing. @@ -233,8 +233,11 @@ static void send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data) */ if ((data >> 27) & 0xf) { /* Read the old val */ - kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val); - + ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val); + if (unlikely(ret)) { + kvm_err("%s: : read date from addr %llx failed\n", __func__, addr); + return ret; + } /* Construct the mask by scanning the bit 27-30 */ for (i = 0; i < 4; i++) { if (data & (0x1 << (27 + i))) @@ -245,41 +248,48 @@ static void send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data) } val |= ((uint32_t)(data >> 32) & ~mask); - kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val); + ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val); + if (unlikely(ret)) + kvm_err("%s: : write date to addr %llx failed\n", __func__, addr); + + return ret; } -static void mail_send(struct kvm *kvm, uint64_t data) +static int mail_send(struct kvm *kvm, uint64_t data) { struct kvm_vcpu *vcpu; int cpu, mailbox; - int offset; + int offset, ret; cpu = ((data & 0xffffffff) >> 16) & 0x3ff; vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu); if (unlikely(vcpu == NULL)) { kvm_err("%s: invalid target cpu: %d\n", __func__, cpu); - return; + return -EINVAL; } mailbox = ((data & 0xffffffff) >> 2) & 0x7; offset = SMP_MAILBOX + CORE_BUF_20 + mailbox * 4; - send_ipi_data(vcpu, offset, data); + ret = send_ipi_data(vcpu, offset, data); + + return ret; } -static void any_send(struct kvm *kvm, uint64_t data) +static int any_send(struct kvm *kvm, uint64_t data) { struct kvm_vcpu *vcpu; - int cpu, offset; + int cpu, offset, ret; cpu = ((data & 0xffffffff) >> 16) & 0x3ff; vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu); if (unlikely(vcpu == NULL)) { kvm_err("%s: invalid target cpu: %d\n", __func__, cpu); - return; + return -EINVAL; } offset = data & 0xffff; - send_ipi_data(vcpu, offset, data); + ret = send_ipi_data(vcpu, offset, data); + return ret; } static int kvm_loongarch_mail_write(struct kvm_vcpu *vcpu, @@ -287,6 +297,7 @@ static int kvm_loongarch_mail_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, const void *val) { struct loongarch_ipi *ipi; + int ret; ipi = vcpu->kvm->arch.ipi; if (!ipi) { @@ -299,16 +310,18 @@ static int kvm_loongarch_mail_write(struct kvm_vcpu *vcpu, switch (addr) { case MAIL_SEND_OFFSET: - mail_send(vcpu->kvm, *(uint64_t *)val); + ret = mail_send(vcpu->kvm, *(uint64_t *)val); break; case ANY_SEND_OFFSET: - any_send(vcpu->kvm, *(uint64_t *)val); + ret = any_send(vcpu->kvm, *(uint64_t *)val); break; default: + kvm_err("%s: invalid addr %llx!\n", __func__, addr); + ret = -EINVAL; break; } - return 0; + return ret; } static const struct kvm_io_device_ops kvm_loongarch_ipi_ops = { diff --git a/arch/loongarch/kvm/intc/pch_pic.c b/arch/loongarch/kvm/intc/pch_pic.c index 4097c00e8294..4ad85277fced 100644 --- a/arch/loongarch/kvm/intc/pch_pic.c +++ b/arch/loongarch/kvm/intc/pch_pic.c @@ -8,18 +8,304 @@ #include #include +/* update the isr according to irq level and route irq to extioi */ +static void pch_pic_update_irq(struct loongarch_pch_pic *s, int irq, int level) +{ + u64 mask = (1 << irq); + + /* + * set isr and route irq to extioi and + * the route table is in htmsi_vector[] + */ + if (level) { + if (mask & s->irr & ~s->mask) { + s->isr |= mask; + irq = s->htmsi_vector[irq]; + extioi_set_irq(s->kvm->arch.extioi, irq, level); + } + } else { + if (mask & s->isr & ~s->irr) { + s->isr &= ~mask; + irq = s->htmsi_vector[irq]; + extioi_set_irq(s->kvm->arch.extioi, irq, level); + } + } +} + +/* msi irq handler */ +void pch_msi_set_irq(struct kvm *kvm, int irq, int level) +{ + extioi_set_irq(kvm->arch.extioi, irq, level); +} + +/* called when a irq is triggered in pch pic */ +void pch_pic_set_irq(struct loongarch_pch_pic *s, int irq, int level) +{ + u64 mask = (1 << irq); + + spin_lock(&s->lock); + if (level) + /* set irr */ + s->irr |= mask; + else { + /* 0 level signal in edge triggered irq does not mean to clear irq + * The irr register variable is cleared when the cpu writes to the + * PCH_PIC_CLEAR_START address area + */ + if (s->edge & mask) + return; + s->irr &= ~mask; + } + pch_pic_update_irq(s, irq, level); + spin_unlock(&s->lock); +} + +/* update batch irqs, the irq_mask is a bitmap of irqs */ +static void pch_pic_update_batch_irqs(struct loongarch_pch_pic *s, u64 irq_mask, int level) +{ + int irq, bits; + + /* find each irq by irqs bitmap and update each irq */ + bits = sizeof(irq_mask) * 8; + irq = find_first_bit((void *)&irq_mask, bits); + while (irq < bits) { + pch_pic_update_irq(s, irq, level); + bitmap_clear((void *)&irq_mask, irq, 1); + irq = find_first_bit((void *)&irq_mask, bits); + } +} + +/* + * pch pic register is 64-bit, but it is accessed by 32-bit, + * so we use high to get whether low or high 32 bits we want + * to read. + */ +static u32 pch_pic_read_reg(u64 *s, int high) +{ + u64 val = *s; + + /* read the high 32 bits when the high is 1 */ + return high ? (u32)(val >> 32) : (u32)val; +} + +/* + * pch pic register is 64-bit, but it is accessed by 32-bit, + * so we use high to get whether low or high 32 bits we want + * to write. + */ +static u32 pch_pic_write_reg(u64 *s, int high, u32 v) +{ + u64 val = *s, data = v; + + if (high) { + /* + * Clear val high 32 bits + * write the high 32 bits when the high is 1 + */ + *s = (val << 32 >> 32) | (data << 32); + val >>= 32; + } else + /* + * Clear val low 32 bits + * write the low 32 bits when the high is 0 + */ + *s = (val >> 32 << 32) | v; + + return (u32)val; +} + +static int loongarch_pch_pic_write(struct loongarch_pch_pic *s, gpa_t addr, + int len, const void *val) +{ + u32 old, data, offset, index; + u64 irq; + int ret; + + ret = 0; + data = *(u32 *)val; + offset = addr - s->pch_pic_base; + + spin_lock(&s->lock); + switch (offset) { + case PCH_PIC_MASK_START ... PCH_PIC_MASK_END: + offset -= PCH_PIC_MASK_START; + /* get whether high or low 32 bits we want to write */ + index = offset >> 2; + old = pch_pic_write_reg(&s->mask, index, data); + + /* enable irq when mask value change to 0 */ + irq = (old & ~data) << (32 * index); + pch_pic_update_batch_irqs(s, irq, 1); + + /* disable irq when mask value change to 1 */ + irq = (~old & data) << (32 * index); + pch_pic_update_batch_irqs(s, irq, 0); + break; + case PCH_PIC_HTMSI_EN_START ... PCH_PIC_HTMSI_EN_END: + offset -= PCH_PIC_HTMSI_EN_START; + index = offset >> 2; + pch_pic_write_reg(&s->htmsi_en, index, data); + break; + case PCH_PIC_EDGE_START ... PCH_PIC_EDGE_END: + offset -= PCH_PIC_EDGE_START; + index = offset >> 2; + /* 1: edge triggered, 0: level triggered */ + pch_pic_write_reg(&s->edge, index, data); + break; + case PCH_PIC_CLEAR_START ... PCH_PIC_CLEAR_END: + offset -= PCH_PIC_CLEAR_START; + index = offset >> 2; + /* write 1 to clear edge irq */ + old = pch_pic_read_reg(&s->irr, index); + /* + * get the irq bitmap which is edge triggered and + * already set and to be cleared + */ + irq = old & pch_pic_read_reg(&s->edge, index) & data; + /* write irr to the new state where irqs have been cleared */ + pch_pic_write_reg(&s->irr, index, old & ~irq); + /* update cleared irqs */ + pch_pic_update_batch_irqs(s, irq, 0); + break; + case PCH_PIC_AUTO_CTRL0_START ... PCH_PIC_AUTO_CTRL0_END: + offset -= PCH_PIC_AUTO_CTRL0_START; + index = offset >> 2; + /* we only use default mode: fixed interrupt distribution mode */ + pch_pic_write_reg(&s->auto_ctrl0, index, 0); + break; + case PCH_PIC_AUTO_CTRL1_START ... PCH_PIC_AUTO_CTRL1_END: + offset -= PCH_PIC_AUTO_CTRL1_START; + index = offset >> 2; + /* we only use default mode: fixed interrupt distribution mode */ + pch_pic_write_reg(&s->auto_ctrl1, index, 0); + break; + case PCH_PIC_ROUTE_ENTRY_START ... PCH_PIC_ROUTE_ENTRY_END: + offset -= PCH_PIC_ROUTE_ENTRY_START; + /* only route to int0: extioi */ + s->route_entry[offset] = 1; + break; + case PCH_PIC_HTMSI_VEC_START ... PCH_PIC_HTMSI_VEC_END: + /* route table to extioi */ + offset -= PCH_PIC_HTMSI_VEC_START; + s->htmsi_vector[offset] = (u8)data; + break; + case PCH_PIC_POLARITY_START ... PCH_PIC_POLARITY_END: + offset -= PCH_PIC_POLARITY_START; + index = offset >> 2; + + /* we only use defalut value 0: high level triggered */ + pch_pic_write_reg(&s->polarity, index, 0); + break; + default: + ret = -EINVAL; + break; + } + + spin_unlock(&s->lock); + return ret; +} + static int kvm_loongarch_pch_pic_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, gpa_t addr, int len, const void *val) { - return 0; + int ret; + struct loongarch_pch_pic *s = vcpu->kvm->arch.pch_pic; + + if (!s) { + kvm_err("%s: pch pic irqchip not valid!\n", __func__); + return -EINVAL; + } + + /* statistics of pch pic writing */ + vcpu->kvm->stat.pch_pic_write_exits++; + ret = loongarch_pch_pic_write(s, addr, len, val); + + return ret; +} + +static int loongarch_pch_pic_read(struct loongarch_pch_pic *s, gpa_t addr, int len, void *val) +{ + int offset, index, ret = 0; + u32 data = 0; + u64 int_id = 0; + + offset = addr - s->pch_pic_base; + + spin_lock(&s->lock); + switch (offset) { + case PCH_PIC_INT_ID_START ... PCH_PIC_INT_ID_END: + /* int id version */ + int_id |= (u64)PCH_PIC_INT_ID_VER << 32; + /* irq number */ + int_id |= (u64)31 << (32 + 16); + /* int id value */ + int_id |= PCH_PIC_INT_ID_VAL; + *(u64 *)val = int_id; + break; + case PCH_PIC_MASK_START ... PCH_PIC_MASK_END: + offset -= PCH_PIC_MASK_START; + index = offset >> 2; + /* read mask reg */ + data = pch_pic_read_reg(&s->mask, index); + *(u32 *)val = data; + break; + case PCH_PIC_HTMSI_EN_START ... PCH_PIC_HTMSI_EN_END: + offset -= PCH_PIC_HTMSI_EN_START; + index = offset >> 2; + /* read htmsi enable reg */ + data = pch_pic_read_reg(&s->htmsi_en, index); + *(u32 *)val = data; + break; + case PCH_PIC_EDGE_START ... PCH_PIC_EDGE_END: + offset -= PCH_PIC_EDGE_START; + index = offset >> 2; + /* read edge enable reg */ + data = pch_pic_read_reg(&s->edge, index); + *(u32 *)val = data; + break; + case PCH_PIC_AUTO_CTRL0_START ... PCH_PIC_AUTO_CTRL0_END: + case PCH_PIC_AUTO_CTRL1_START ... PCH_PIC_AUTO_CTRL1_END: + /* we only use default mode: fixed interrupt distribution mode */ + *(u32 *)val = 0; + break; + case PCH_PIC_ROUTE_ENTRY_START ... PCH_PIC_ROUTE_ENTRY_END: + /* only route to int0: extioi */ + *(u8 *)val = 1; + break; + case PCH_PIC_HTMSI_VEC_START ... PCH_PIC_HTMSI_VEC_END: + offset -= PCH_PIC_HTMSI_VEC_START; + /* read htmsi vector */ + data = s->htmsi_vector[offset]; + *(u8 *)val = data; + break; + case PCH_PIC_POLARITY_START ... PCH_PIC_POLARITY_END: + /* we only use defalut value 0: high level triggered */ + *(u32 *)val = 0; + break; + default: + ret = -EINVAL; + } + spin_unlock(&s->lock); + return ret; } static int kvm_loongarch_pch_pic_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, gpa_t addr, int len, void *val) { - return 0; + int ret; + struct loongarch_pch_pic *s = vcpu->kvm->arch.pch_pic; + + if (!s) { + kvm_err("%s: pch pic irqchip not valid!\n", __func__); + return -EINVAL; + } + + /* statistics of pch pic reading */ + vcpu->kvm->stat.pch_pic_read_exits++; + ret = loongarch_pch_pic_read(s, addr, len, val); + return ret; } static const struct kvm_io_device_ops kvm_loongarch_pch_pic_ops = { diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c index b63db5ffad73..5aa5bb79a379 100644 --- a/arch/loongarch/kvm/vm.c +++ b/arch/loongarch/kvm/vm.c @@ -5,6 +5,8 @@ #include #include +#include +#include const struct _kvm_stats_desc kvm_vm_stats_desc[] = { KVM_GENERIC_VM_STATS(), @@ -137,3 +139,35 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) return -EINVAL; } } + +int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *data, + bool line_status) +{ + bool level; + struct loongarch_pch_pic *s; + int type, vcpu, irq, vcpus, val, ret = 0; + + level = data->level; + val = data->irq; + s = kvm->arch.pch_pic; + vcpus = atomic_read(&kvm->online_vcpus); + + type = (val >> KVM_LOONGARCH_IRQ_TYPE_SHIFT) & KVM_LOONGARCH_IRQ_TYPE_MASK; + vcpu = (val >> KVM_LOONGARCH_IRQ_VCPU_SHIFT) & KVM_LOONGARCH_IRQ_VCPU_MASK; + irq = (val >> KVM_LOONGARCH_IRQ_NUM_SHIFT) & KVM_LOONGARCH_IRQ_NUM_MASK; + + switch (type) { + case KVM_LOONGARCH_IRQ_TYPE_IOAPIC: + if (irq < KVM_IRQCHIP_NUM_PINS) + pch_pic_set_irq(s, irq, level); + else if (irq < 256) + pch_msi_set_irq(kvm, irq, level); + else + ret = -EINVAL; + break; + default: + ret = -EINVAL; + } + + return ret; +} -- Gitee From 5dfb1aab06e5b38aa8ed1e9a78d217830846ef67 Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Sat, 15 Jun 2024 18:59:35 +0800 Subject: [PATCH 1061/2138] anolis: LoongArch: KVM: Add PCHPIC user mode read and write functions ANBZ: #9546 Implements the communication interface between the user mode program and the kernel in PCHPIC interrupt control simulation, which is used to obtain or send the simulation data of the interrupt controller in the user mode process, and is used in VM migration or VM saving and restoration. Signed-off-by: Tianrui Zhao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/3527 Reviewed-by: Juxin Gao --- arch/loongarch/include/uapi/asm/kvm.h | 5 + arch/loongarch/kvm/intc/pch_pic.c | 128 +++++++++++++++++++++++++- 2 files changed, 131 insertions(+), 2 deletions(-) diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h index 82ad5487ef23..a4190baf7dba 100644 --- a/arch/loongarch/include/uapi/asm/kvm.h +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -123,4 +123,9 @@ struct kvm_iocsr_entry { #define KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS 0x40000003 +#define KVM_DEV_LOONGARCH_PCH_PIC_GRP_CTRL 0x40000004 +#define KVM_DEV_LOONGARCH_PCH_PIC_CTRL_INIT 0 + +#define KVM_DEV_LOONGARCH_PCH_PIC_GRP_REGS 0x40000005 + #endif /* __UAPI_ASM_LOONGARCH_KVM_H */ diff --git a/arch/loongarch/kvm/intc/pch_pic.c b/arch/loongarch/kvm/intc/pch_pic.c index 4ad85277fced..abb7bab84f2d 100644 --- a/arch/loongarch/kvm/intc/pch_pic.c +++ b/arch/loongarch/kvm/intc/pch_pic.c @@ -313,16 +313,140 @@ static const struct kvm_io_device_ops kvm_loongarch_pch_pic_ops = { .write = kvm_loongarch_pch_pic_write, }; +static int kvm_loongarch_pch_pic_init(struct kvm_device *dev, u64 addr) +{ + int ret; + struct loongarch_pch_pic *s = dev->kvm->arch.pch_pic; + struct kvm_io_device *device; + struct kvm *kvm = dev->kvm; + + s->pch_pic_base = addr; + device = &s->device; + /* init device by pch pic writing and reading ops */ + kvm_iodevice_init(device, &kvm_loongarch_pch_pic_ops); + mutex_lock(&kvm->slots_lock); + /* register pch pic device */ + ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, addr, PCH_PIC_SIZE, device); + mutex_unlock(&kvm->slots_lock); + if (ret < 0) + return -EFAULT; + + return 0; +} + +/* used by user space to get or set pch pic registers */ +static int kvm_loongarch_pch_pic_regs_access(struct kvm_device *dev, + struct kvm_device_attr *attr, + bool is_write) +{ + int addr, len = 8, ret = 0; + void __user *data; + void *p = NULL; + struct loongarch_pch_pic *s; + + s = dev->kvm->arch.pch_pic; + addr = attr->attr; + data = (void __user *)attr->addr; + + spin_lock(&s->lock); + /* get pointer to pch pic register by addr */ + switch (addr) { + case PCH_PIC_MASK_START: + p = &s->mask; + break; + case PCH_PIC_HTMSI_EN_START: + p = &s->htmsi_en; + break; + case PCH_PIC_EDGE_START: + p = &s->edge; + break; + case PCH_PIC_AUTO_CTRL0_START: + p = &s->auto_ctrl0; + break; + case PCH_PIC_AUTO_CTRL1_START: + p = &s->auto_ctrl1; + break; + case PCH_PIC_ROUTE_ENTRY_START: + p = s->route_entry; + len = 64; + break; + case PCH_PIC_HTMSI_VEC_START: + p = s->htmsi_vector; + len = 64; + break; + case PCH_PIC_INT_IRR_START: + p = &s->irr; + break; + case PCH_PIC_INT_ISR_START: + p = &s->isr; + break; + case PCH_PIC_POLARITY_START: + p = &s->polarity; + break; + default: + ret = -EINVAL; + } + + /* write or read value according to is_write */ + if (is_write) { + if (copy_from_user(p, data, len)) + ret = -EFAULT; + } else { + if (copy_to_user(data, p, len)) + ret = -EFAULT; + } + + spin_unlock(&s->lock); + return ret; +} + static int kvm_loongarch_pch_pic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { - return 0; + /* only support pch pic group registers */ + if (attr->group == KVM_DEV_LOONGARCH_PCH_PIC_GRP_REGS) + return kvm_loongarch_pch_pic_regs_access(dev, attr, false); + + return -EINVAL; } static int kvm_loongarch_pch_pic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { - return 0; + int ret = -EINVAL; + u64 addr; + void __user *uaddr = (void __user *)(long)attr->addr; + + switch (attr->group) { + case KVM_DEV_LOONGARCH_PCH_PIC_GRP_CTRL: + switch (attr->attr) { + case KVM_DEV_LOONGARCH_PCH_PIC_CTRL_INIT: + if (copy_from_user(&addr, uaddr, sizeof(addr))) + return -EFAULT; + + if (!dev->kvm->arch.pch_pic) { + kvm_err("%s: please create pch_pic irqchip first!\n", __func__); + ret = -EFAULT; + break; + } + + ret = kvm_loongarch_pch_pic_init(dev, addr); + break; + default: + kvm_err("%s: unknown group (%d) attr (%lld)\n", __func__, attr->group, + attr->attr); + ret = -EINVAL; + break; + } + break; + case KVM_DEV_LOONGARCH_PCH_PIC_GRP_REGS: + ret = kvm_loongarch_pch_pic_regs_access(dev, attr, true); + break; + default: + break; + } + + return ret; } static void kvm_loongarch_pch_pic_destroy(struct kvm_device *dev) -- Gitee From 1186a3aaf216fb9a55457e07d21c68c4c2bf7982 Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Mon, 3 Jun 2024 15:09:26 +0800 Subject: [PATCH 1062/2138] anolis: LoongArch: KVM: Add irqfd support ANBZ: #9546 Enable the KVM_IRQ_ROUTING KVM_IRQCHIP KVM_MSI configuration item, increase the KVM_CAP_IRQCHIP capability, and implement the query interface of the kernel irqchip. Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/3527 Reviewed-by: Juxin Gao --- arch/loongarch/include/uapi/asm/kvm.h | 2 + arch/loongarch/kvm/Kconfig | 4 ++ arch/loongarch/kvm/Makefile | 1 + arch/loongarch/kvm/intc/pch_pic.c | 28 +++++++++ arch/loongarch/kvm/irqfd.c | 87 +++++++++++++++++++++++++++ arch/loongarch/kvm/vm.c | 21 ++++++- 6 files changed, 142 insertions(+), 1 deletion(-) create mode 100644 arch/loongarch/kvm/irqfd.c diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h index a4190baf7dba..dc6ae66771c5 100644 --- a/arch/loongarch/include/uapi/asm/kvm.h +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -119,6 +119,8 @@ struct kvm_iocsr_entry { #define KVM_IRQCHIP_NUM_PINS 64 #define KVM_MAX_CORES 256 +#define KVM_LOONGARCH_VM_HAVE_IRQCHIP 0x40000001 + #define KVM_DEV_LOONGARCH_IPI_GRP_REGS 0x40000002 #define KVM_DEV_LOONGARCH_EXTIOI_GRP_REGS 0x40000003 diff --git a/arch/loongarch/kvm/Kconfig b/arch/loongarch/kvm/Kconfig index fda425babfb2..2f44176a45b5 100644 --- a/arch/loongarch/kvm/Kconfig +++ b/arch/loongarch/kvm/Kconfig @@ -24,6 +24,10 @@ config KVM select HAVE_KVM_DIRTY_RING_ACQ_REL select HAVE_KVM_EVENTFD select HAVE_KVM_VCPU_ASYNC_IOCTL + select HAVE_KVM_IRQ_ROUTING + select HAVE_KVM_IRQCHIP + select HAVE_KVM_IRQFD + select HAVE_KVM_MSI select KVM_GENERIC_DIRTYLOG_READ_PROTECT select KVM_GENERIC_HARDWARE_ENABLING select KVM_MMIO diff --git a/arch/loongarch/kvm/Makefile b/arch/loongarch/kvm/Makefile index 5661f2bc04f6..f363e4b6fcf3 100644 --- a/arch/loongarch/kvm/Makefile +++ b/arch/loongarch/kvm/Makefile @@ -21,5 +21,6 @@ kvm-y += vm.o kvm-y += intc/ipi.o kvm-y += intc/extioi.o kvm-y += intc/pch_pic.o +kvm-y += irqfd.o CFLAGS_exit.o += $(call cc-option,-Wno-override-init,) diff --git a/arch/loongarch/kvm/intc/pch_pic.c b/arch/loongarch/kvm/intc/pch_pic.c index abb7bab84f2d..e18e27992978 100644 --- a/arch/loongarch/kvm/intc/pch_pic.c +++ b/arch/loongarch/kvm/intc/pch_pic.c @@ -449,6 +449,29 @@ static int kvm_loongarch_pch_pic_set_attr(struct kvm_device *dev, return ret; } +static int kvm_setup_default_irq_routing(struct kvm *kvm) +{ + struct kvm_irq_routing_entry *entries; + + u32 nr = KVM_IRQCHIP_NUM_PINS; + int i, ret; + + entries = kcalloc(nr, sizeof(*entries), GFP_KERNEL); + if (!entries) + return -ENOMEM; + + for (i = 0; i < nr; i++) { + entries[i].gsi = i; + entries[i].type = KVM_IRQ_ROUTING_IRQCHIP; + entries[i].u.irqchip.irqchip = 0; + entries[i].u.irqchip.pin = i; + } + ret = kvm_set_irq_routing(kvm, entries, nr, 0); + kfree(entries); + + return 0; +} + static void kvm_loongarch_pch_pic_destroy(struct kvm_device *dev) { struct kvm *kvm; @@ -474,6 +497,7 @@ static void kvm_loongarch_pch_pic_destroy(struct kvm_device *dev) static int kvm_loongarch_pch_pic_create(struct kvm_device *dev, u32 type) { + int ret; struct loongarch_pch_pic *s; struct kvm *kvm = dev->kvm; @@ -481,6 +505,10 @@ static int kvm_loongarch_pch_pic_create(struct kvm_device *dev, u32 type) if (kvm->arch.pch_pic) return -EINVAL; + ret = kvm_setup_default_irq_routing(kvm); + if (ret) + return -ENOMEM; + s = kzalloc(sizeof(struct loongarch_pch_pic), GFP_KERNEL); if (!s) return -ENOMEM; diff --git a/arch/loongarch/kvm/irqfd.c b/arch/loongarch/kvm/irqfd.c new file mode 100644 index 000000000000..bf67f329ebc9 --- /dev/null +++ b/arch/loongarch/kvm/irqfd.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ + +#include +#include +#include + +static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int irq_source_id, + int level, bool line_status) +{ + /* ioapic pin (0 ~ 64) <---> gsi(0 ~ 64) */ + pch_pic_set_irq(kvm->arch.pch_pic, e->irqchip.pin, level); + + return 0; +} + +/* + * kvm_set_routing_entry: populate a kvm routing entry + * from a user routing entry + * + * @kvm: the VM this entry is applied to + * @e: kvm kernel routing entry handle + * @ue: user api routing entry handle + * return 0 on success, -EINVAL on errors. + */ +int kvm_set_routing_entry(struct kvm *kvm, + struct kvm_kernel_irq_routing_entry *e, + const struct kvm_irq_routing_entry *ue) +{ + int r = -EINVAL; + + switch (ue->type) { + case KVM_IRQ_ROUTING_IRQCHIP: + e->set = kvm_set_ioapic_irq; + + e->irqchip.irqchip = ue->u.irqchip.irqchip; + e->irqchip.pin = ue->u.irqchip.pin; + + if (e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS) + goto out; + break; + case KVM_IRQ_ROUTING_MSI: + e->set = kvm_set_msi; + e->msi.address_lo = ue->u.msi.address_lo; + e->msi.address_hi = ue->u.msi.address_hi; + e->msi.data = ue->u.msi.data; + break; + default: + goto out; + } + r = 0; +out: + return r; +} + +int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int irq_source_id, + int level, bool line_status) +{ + if (e->type == KVM_IRQ_ROUTING_MSI) { + pch_msi_set_irq(kvm, e->msi.data, 1); + return 0; + } + + return -EWOULDBLOCK; +} + +/** + * kvm_set_msi: inject the MSI corresponding to the + * MSI routing entry + * + * This is the entry point for irqfd MSI injection + * and userspace MSI injection. + */ +int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int irq_source_id, + int level, bool line_status) +{ + if (!level) + return -1; + + pch_msi_set_irq(kvm, e->msi.data, level); + return 0; +} diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c index 5aa5bb79a379..01e35a841027 100644 --- a/arch/loongarch/kvm/vm.c +++ b/arch/loongarch/kvm/vm.c @@ -72,6 +72,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) int r; switch (ext) { + case KVM_CAP_IRQCHIP: case KVM_CAP_ONE_REG: case KVM_CAP_ENABLE_CAP: case KVM_CAP_READONLY_MEM: @@ -80,6 +81,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_IOEVENTFD: case KVM_CAP_MP_STATE: case KVM_CAP_SET_GUEST_DEBUG: + case KVM_CAP_VM_ATTRIBUTES: r = 1; break; case KVM_CAP_NR_VCPUS: @@ -109,6 +111,7 @@ static int kvm_vm_feature_has_attr(struct kvm *kvm, struct kvm_device_attr *attr if (cpu_has_pmp) return 0; return -ENXIO; + default: return -ENXIO; } @@ -119,6 +122,8 @@ static int kvm_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) switch (attr->group) { case KVM_LOONGARCH_VM_FEAT_CTRL: return kvm_vm_feature_has_attr(kvm, attr); + case KVM_LOONGARCH_VM_HAVE_IRQCHIP: + return 0; default: return -ENXIO; } @@ -126,18 +131,27 @@ static int kvm_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { + int r; struct kvm *kvm = filp->private_data; void __user *argp = (void __user *)arg; struct kvm_device_attr attr; switch (ioctl) { - case KVM_HAS_DEVICE_ATTR: + case KVM_CREATE_IRQCHIP: { + r = 1; + break; + } + case KVM_HAS_DEVICE_ATTR: { if (copy_from_user(&attr, argp, sizeof(attr))) return -EFAULT; + return kvm_vm_has_attr(kvm, &attr); + } default: return -EINVAL; } + + return r; } int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *data, @@ -171,3 +185,8 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *data, return ret; } + +bool kvm_arch_irqchip_in_kernel(struct kvm *kvm) +{ + return (bool)((!!kvm->arch.extioi) && (!!kvm->arch.pch_pic)); +} -- Gitee From 5a3a17f058d7de61133ff0b28062eb4d252f7b1a Mon Sep 17 00:00:00 2001 From: Juxin Gao Date: Thu, 6 Jun 2024 10:49:15 +0800 Subject: [PATCH 1063/2138] anolis: asm-generic: completely revert "Unify uapi bitsperlong.h for arm64, riscv and loongarch" ANBZ: #9290 Just like the description of the upstream commit, 6e8d96909a23c8078ee965bd48bb31cbef2de943 a similar problem was encountered under the loongarch architecture Unifying the asm-generic headers across 32-bit and 64-bit architectures based on the compiler provided macros was a good idea and appears to work with all user space, but it caused a regression when building old kernels on systems that have the new headers installed in /usr/include, as this combination trips an inconsistency in the kernel's own tools/include headers that are a mix of userspace and kernel-internal headers. Signed-off-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3324 --- arch/loongarch/include/uapi/asm/bitsperlong.h | 9 +++++++++ tools/include/uapi/asm/bitsperlong.h | 2 ++ 2 files changed, 11 insertions(+) create mode 100644 arch/loongarch/include/uapi/asm/bitsperlong.h diff --git a/arch/loongarch/include/uapi/asm/bitsperlong.h b/arch/loongarch/include/uapi/asm/bitsperlong.h new file mode 100644 index 000000000000..00b4ba1e5cdf --- /dev/null +++ b/arch/loongarch/include/uapi/asm/bitsperlong.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef __ASM_LOONGARCH_BITSPERLONG_H +#define __ASM_LOONGARCH_BITSPERLONG_H + +#define __BITS_PER_LONG (__SIZEOF_LONG__ * 8) + +#include + +#endif /* __ASM_LOONGARCH_BITSPERLONG_H */ diff --git a/tools/include/uapi/asm/bitsperlong.h b/tools/include/uapi/asm/bitsperlong.h index 036e2fc92d1a..786eae853257 100644 --- a/tools/include/uapi/asm/bitsperlong.h +++ b/tools/include/uapi/asm/bitsperlong.h @@ -13,6 +13,8 @@ #include "../../../arch/ia64/include/uapi/asm/bitsperlong.h" #elif defined(__alpha__) #include "../../../arch/alpha/include/uapi/asm/bitsperlong.h" +#elif defined(__loongarch__) +#include "../../../arch/loongarch/include/uapi/asm/bitsperlong.h" #elif defined(__sw_64__) #include "../../../arch/sw_64/include/uapi/asm/bitsperlong.h" #else -- Gitee From f5adba5efaafeea58eed4ef337a43f5e455ecf7f Mon Sep 17 00:00:00 2001 From: gaojuxin Date: Fri, 26 Jul 2024 10:10:31 +0800 Subject: [PATCH 1064/2138] LoongArch: Add writecombine support for DMW-based ioremap() ANBZ: #8623 commit 8e02c3b782ec64343f3cccc8dc5a8be2b379e80b upstream. Currently, only TLB-based ioremap() support writecombine, so add the counterpart for DMW-based ioremap() with help of DMW2. The base address (WRITECOMBINE_BASE) is configured as 0xa000000000000000. DMW3 is unused by kernel now, however firmware may leave garbage in them and interfere kernel's address mapping. So clear it as necessary. BTW, centralize the DMW configuration to macro SETUP_DMWINS. Signed-off-by: Jiaxun Yang Signed-off-by: Huacai Chen Signed-off-by: gaojuxin Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3546 --- arch/loongarch/include/asm/addrspace.h | 4 ++++ arch/loongarch/include/asm/io.h | 10 ++++++++-- arch/loongarch/include/asm/loongarch.h | 10 +++++++++- arch/loongarch/include/asm/stackframe.h | 11 +++++++++++ arch/loongarch/kernel/head.S | 11 ++--------- arch/loongarch/power/suspend_asm.S | 6 +----- drivers/firmware/efi/libstub/loongarch.c | 2 ++ 7 files changed, 37 insertions(+), 17 deletions(-) diff --git a/arch/loongarch/include/asm/addrspace.h b/arch/loongarch/include/asm/addrspace.h index 60a2ce1a6531..d9be1df3b95f 100644 --- a/arch/loongarch/include/asm/addrspace.h +++ b/arch/loongarch/include/asm/addrspace.h @@ -36,6 +36,10 @@ extern unsigned long vm_map_base; #define UNCACHE_BASE CSR_DMW0_BASE #endif +#ifndef WRITECOMBINE_BASE +#define WRITECOMBINE_BASE CSR_DMW2_BASE +#endif + #define DMW_PABITS 48 #define TO_PHYS_MASK ((1ULL << DMW_PABITS) - 1) diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h index 4a8adcca329b..838db690b723 100644 --- a/arch/loongarch/include/asm/io.h +++ b/arch/loongarch/include/asm/io.h @@ -30,10 +30,16 @@ extern void __init early_iounmap(void __iomem *addr, unsigned long size); static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long prot_val) { - if (prot_val & _CACHE_CC) + switch (prot_val & _CACHE_MASK) { + case _CACHE_CC: return (void __iomem *)(unsigned long)(CACHE_BASE + offset); - else + case _CACHE_SUC: return (void __iomem *)(unsigned long)(UNCACHE_BASE + offset); + case _CACHE_WUC: + return (void __iomem *)(unsigned long)(WRITECOMBINE_BASE + offset); + default: + return NULL; + } } #define ioremap(offset, size) \ diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h index d0c50140bba9..8594c55ec171 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -879,7 +879,7 @@ #define LOONGARCH_CSR_DMWIN2 0x182 /* 64 direct map win2: MEM */ #define LOONGARCH_CSR_DMWIN3 0x183 /* 64 direct map win3: MEM */ -/* Direct Map window 0/1 */ +/* Direct Map window 0/1/2/3 */ #define CSR_DMW0_PLV0 _CONST64_(1 << 0) #define CSR_DMW0_VSEG _CONST64_(0x8000) #define CSR_DMW0_BASE (CSR_DMW0_VSEG << DMW_PABITS) @@ -891,6 +891,14 @@ #define CSR_DMW1_BASE (CSR_DMW1_VSEG << DMW_PABITS) #define CSR_DMW1_INIT (CSR_DMW1_BASE | CSR_DMW1_MAT | CSR_DMW1_PLV0) +#define CSR_DMW2_PLV0 _CONST64_(1 << 0) +#define CSR_DMW2_MAT _CONST64_(2 << 4) +#define CSR_DMW2_VSEG _CONST64_(0xa000) +#define CSR_DMW2_BASE (CSR_DMW2_VSEG << DMW_PABITS) +#define CSR_DMW2_INIT (CSR_DMW2_BASE | CSR_DMW2_MAT | CSR_DMW2_PLV0) + +#define CSR_DMW3_INIT 0x0 + /* Performance Counter registers */ #define LOONGARCH_CSR_PERFCTRL0 0x200 /* 32 perf event 0 config */ #define LOONGARCH_CSR_PERFCNTR0 0x201 /* 64 perf event 0 count value */ diff --git a/arch/loongarch/include/asm/stackframe.h b/arch/loongarch/include/asm/stackframe.h index efc8c42290d0..35ba862f2025 100644 --- a/arch/loongarch/include/asm/stackframe.h +++ b/arch/loongarch/include/asm/stackframe.h @@ -37,6 +37,17 @@ cfi_restore \reg \offset \docfi .endm + .macro SETUP_DMWINS temp + li.d \temp, CSR_DMW0_INIT # WUC, PLV0, 0x8000 xxxx xxxx xxxx + csrwr \temp, LOONGARCH_CSR_DMWIN0 + li.d \temp, CSR_DMW1_INIT # CAC, PLV0, 0x9000 xxxx xxxx xxxx + csrwr \temp, LOONGARCH_CSR_DMWIN1 + li.d \temp, CSR_DMW2_INIT # WUC, PLV0, 0xa000 xxxx xxxx xxxx + csrwr \temp, LOONGARCH_CSR_DMWIN2 + li.d \temp, CSR_DMW3_INIT # 0x0, unused + csrwr \temp, LOONGARCH_CSR_DMWIN3 + .endm + /* Jump to the runtime virtual address. */ .macro JUMP_VIRT_ADDR temp1 temp2 li.d \temp1, CACHE_BASE diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S index e336fbc4eb96..841e51144945 100644 --- a/arch/loongarch/kernel/head.S +++ b/arch/loongarch/kernel/head.S @@ -44,11 +44,7 @@ SYM_DATA(kernel_fsize, .long _kernel_fsize); SYM_CODE_START(kernel_entry) # kernel entry point /* Config direct window and set PG */ - li.d t0, CSR_DMW0_INIT # UC, PLV0, 0x8000 xxxx xxxx xxxx - csrwr t0, LOONGARCH_CSR_DMWIN0 - li.d t0, CSR_DMW1_INIT # CA, PLV0, 0x9000 xxxx xxxx xxxx - csrwr t0, LOONGARCH_CSR_DMWIN1 - + SETUP_DMWINS t0 JUMP_VIRT_ADDR t0, t1 /* Enable PG */ @@ -119,11 +115,8 @@ SYM_CODE_END(kernel_entry) * function after setting up the stack and tp registers. */ SYM_CODE_START(smpboot_entry) - li.d t0, CSR_DMW0_INIT # UC, PLV0 - csrwr t0, LOONGARCH_CSR_DMWIN0 - li.d t0, CSR_DMW1_INIT # CA, PLV0 - csrwr t0, LOONGARCH_CSR_DMWIN1 + SETUP_DMWINS t0 JUMP_VIRT_ADDR t0, t1 /* Enable PG */ diff --git a/arch/loongarch/power/suspend_asm.S b/arch/loongarch/power/suspend_asm.S index e2fc3b4e31f0..c28ad52b7baf 100644 --- a/arch/loongarch/power/suspend_asm.S +++ b/arch/loongarch/power/suspend_asm.S @@ -73,11 +73,7 @@ SYM_FUNC_START(loongarch_suspend_enter) * Reload all of the registers and return. */ SYM_INNER_LABEL(loongarch_wakeup_start, SYM_L_GLOBAL) - li.d t0, CSR_DMW0_INIT # UC, PLV0 - csrwr t0, LOONGARCH_CSR_DMWIN0 - li.d t0, CSR_DMW1_INIT # CA, PLV0 - csrwr t0, LOONGARCH_CSR_DMWIN1 - + SETUP_DMWINS t0 JUMP_VIRT_ADDR t0, t1 /* Enable PG */ diff --git a/drivers/firmware/efi/libstub/loongarch.c b/drivers/firmware/efi/libstub/loongarch.c index d0ef93551c44..3782d0a187d1 100644 --- a/drivers/firmware/efi/libstub/loongarch.c +++ b/drivers/firmware/efi/libstub/loongarch.c @@ -74,6 +74,8 @@ efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image, /* Config Direct Mapping */ csr_write64(CSR_DMW0_INIT, LOONGARCH_CSR_DMWIN0); csr_write64(CSR_DMW1_INIT, LOONGARCH_CSR_DMWIN1); + csr_write64(CSR_DMW2_INIT, LOONGARCH_CSR_DMWIN2); + csr_write64(CSR_DMW3_INIT, LOONGARCH_CSR_DMWIN3); real_kernel_entry = (void *)kernel_entry_address(kernel_addr, image); -- Gitee From 24ef3ec011e71abf1c562256abaf6a64059ffc9e Mon Sep 17 00:00:00 2001 From: Qi Liu Date: Mon, 29 Jul 2024 11:42:09 +0800 Subject: [PATCH 1065/2138] anolis: perf/x86/uncore: Add DF PMU support for Hygon family 18h model 5h,7h and 10h ANBZ: #9591 Adjust the DF PMU event and umask for Hygon family 18h modle 5h, 7h and 10h processor. Signed-off-by: Qi Liu Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3554 --- arch/x86/events/amd/uncore.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index 6faf2f6ca4dc..5bc616a638f0 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -241,9 +241,12 @@ static int amd_uncore_event_init(struct perf_event *event) boot_cpu_data.x86 == 0x18 && is_nb_event(event)) { event_mask = HYGON_F18H_RAW_EVENT_MASK_NB; - if (boot_cpu_data.x86_model == 0x4) + if (boot_cpu_data.x86_model == 0x4 || + boot_cpu_data.x86_model == 0x5) event_mask = HYGON_F18H_M4H_RAW_EVENT_MASK_NB; - if (boot_cpu_data.x86_model == 0x6) + if (boot_cpu_data.x86_model == 0x6 || + boot_cpu_data.x86_model == 0x7 || + boot_cpu_data.x86_model == 0x10) event_mask = HYGON_F18H_M6H_RAW_EVENT_MASK_NB; } @@ -735,9 +738,12 @@ static int __init amd_uncore_init(void) } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && boot_cpu_data.x86 == 0x18) { *df_attr++ = &format_attr_event14f18h.attr; - if (boot_cpu_data.x86_model == 0x4) + if (boot_cpu_data.x86_model == 0x4 || + boot_cpu_data.x86_model == 0x5) *df_attr++ = &format_attr_umask10f18h.attr; - else if (boot_cpu_data.x86_model == 0x6) + else if (boot_cpu_data.x86_model == 0x6 || + boot_cpu_data.x86_model == 0x7 || + boot_cpu_data.x86_model == 0x10) *df_attr++ = &format_attr_umask12f18h.attr; } -- Gitee From e0a04fcd25b3c1d4e02a3847648486e90effe5de Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Fri, 29 Mar 2024 11:00:16 +0800 Subject: [PATCH 1066/2138] anolis: x86/mce: Set bios_cmci_threshold for CMCI threshold ANBZ: #9445 In the Linux kernel, the CMCI threshold is set to 1 by default. This patch prevents Linux from overwriting the CMCI threshold set by the bios. With this patch, the CMCI threshold can be set through the BIOS, which can also avoid CMCI storms, on Zhaoxin/Centaur CPUs. Signed-off-by: leoliu-oc Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3444 --- arch/x86/kernel/cpu/mce/core.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 2cafc35f3b7b..c9c9ebbb3268 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -1946,6 +1946,7 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) if (cfg->monarch_timeout < 0) cfg->monarch_timeout = USEC_PER_SEC; } + mca_cfg.bios_cmci_threshold = 1; } if (cfg->monarch_timeout < 0) -- Gitee From 1825990048da154507a20ca4d82d91564484b8b4 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Fri, 14 Jun 2024 06:46:24 -0700 Subject: [PATCH 1067/2138] perf/x86/uncore: Save the unit control address of all units ANBZ: #9608 commit 0007f39325921351b7860a976a730acbb198b9ca upstream. The unit control address of some CXL units may be wrongly calculated under some configuration on a EMR machine. The current implementation only saves the unit control address of the units from the first die, and the first unit of the rest of dies. Perf assumed that the units from the other dies have the same offset as the first die. So the unit control address of the rest of the units can be calculated. However, the assumption is wrong, especially for the CXL units. Introduce an RB tree for each uncore type to save the unit control address and three kinds of ID information (unit ID, PMU ID, and die ID) for all units. The unit ID is a physical ID of a unit. The PMU ID is a logical ID assigned to a unit. The logical IDs start from 0 and must be contiguous. The physical ID and the logical ID are 1:1 mapping. The units with the same physical ID in different dies share the same PMU. The die ID indicates which die a unit belongs to. The RB tree can be searched by two different keys (unit ID or PMU ID + die ID). During the RB tree setup, the unit ID is used as a key to look up the RB tree. The perf can create/assign a proper PMU ID to the unit. Later, after the RB tree is setup, PMU ID + die ID is used as a key to look up the RB tree to fill the cpumask of a PMU. It's used more frequently, so PMU ID + die ID is compared in the unit_less(). The uncore_find_unit() has to be O(N). But the RB tree setup only occurs once during the driver load time. It should be acceptable. Compared with the current implementation, more space is required to save the information of all units. The extra size should be acceptable. For example, on EMR, there are 221 units at most. For a 2-socket machine, the extra space is ~6KB at most. Intel-SIG: commit 0007f3932592 perf/x86/uncore: Save the unit control address of all units Backport SPR/EMR HBM and CXL PMON support to kernel v6.6 Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240614134631.1092359-2-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3586 --- arch/x86/events/intel/uncore_discovery.c | 79 +++++++++++++++++++++++- arch/x86/events/intel/uncore_discovery.h | 10 +++ 2 files changed, 87 insertions(+), 2 deletions(-) diff --git a/arch/x86/events/intel/uncore_discovery.c b/arch/x86/events/intel/uncore_discovery.c index 9a698a92962a..ce520e69a3c1 100644 --- a/arch/x86/events/intel/uncore_discovery.c +++ b/arch/x86/events/intel/uncore_discovery.c @@ -93,6 +93,8 @@ add_uncore_discovery_type(struct uncore_unit_discovery *unit) if (!type->box_ctrl_die) goto free_type; + type->units = RB_ROOT; + type->access_type = unit->access_type; num_discovered_types[type->access_type]++; type->type = unit->box_type; @@ -120,10 +122,59 @@ get_uncore_discovery_type(struct uncore_unit_discovery *unit) return add_uncore_discovery_type(unit); } +static inline bool unit_less(struct rb_node *a, const struct rb_node *b) +{ + struct intel_uncore_discovery_unit *a_node, *b_node; + + a_node = rb_entry(a, struct intel_uncore_discovery_unit, node); + b_node = rb_entry(b, struct intel_uncore_discovery_unit, node); + + if (a_node->pmu_idx < b_node->pmu_idx) + return true; + if (a_node->pmu_idx > b_node->pmu_idx) + return false; + + if (a_node->die < b_node->die) + return true; + if (a_node->die > b_node->die) + return false; + + return 0; +} + +static inline struct intel_uncore_discovery_unit * +uncore_find_unit(struct rb_root *root, unsigned int id) +{ + struct intel_uncore_discovery_unit *unit; + struct rb_node *node; + + for (node = rb_first(root); node; node = rb_next(node)) { + unit = rb_entry(node, struct intel_uncore_discovery_unit, node); + if (unit->id == id) + return unit; + } + + return NULL; +} + +static void uncore_find_add_unit(struct intel_uncore_discovery_unit *node, + struct rb_root *root, u16 *num_units) +{ + struct intel_uncore_discovery_unit *unit = uncore_find_unit(root, node->id); + + if (unit) + node->pmu_idx = unit->pmu_idx; + else if (num_units) + node->pmu_idx = (*num_units)++; + + rb_add(&node->node, root, unit_less); +} + static void uncore_insert_box_info(struct uncore_unit_discovery *unit, int die, bool parsed) { + struct intel_uncore_discovery_unit *node; struct intel_uncore_discovery_type *type; unsigned int *ids; u64 *box_offset; @@ -136,14 +187,26 @@ uncore_insert_box_info(struct uncore_unit_discovery *unit, return; } + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return; + + node->die = die; + node->id = unit->box_id; + node->addr = unit->ctl; + if (parsed) { type = search_uncore_discovery_type(unit->box_type); if (!type) { pr_info("A spurious uncore type %d is detected, " "Disable the uncore type.\n", unit->box_type); + kfree(node); return; } + + uncore_find_add_unit(node, &type->units, &type->num_units); + /* Store the first box of each die */ if (!type->box_ctrl_die[die]) type->box_ctrl_die[die] = unit->ctl; @@ -152,16 +215,18 @@ uncore_insert_box_info(struct uncore_unit_discovery *unit, type = get_uncore_discovery_type(unit); if (!type) - return; + goto free_node; box_offset = kcalloc(type->num_boxes + 1, sizeof(u64), GFP_KERNEL); if (!box_offset) - return; + goto free_node; ids = kcalloc(type->num_boxes + 1, sizeof(unsigned int), GFP_KERNEL); if (!ids) goto free_box_offset; + uncore_find_add_unit(node, &type->units, &type->num_units); + /* Store generic information for the first box */ if (!type->num_boxes) { type->box_ctrl = unit->ctl; @@ -201,6 +266,8 @@ uncore_insert_box_info(struct uncore_unit_discovery *unit, free_box_offset: kfree(box_offset); +free_node: + kfree(node); } static bool @@ -339,8 +406,16 @@ bool intel_uncore_has_discovery_tables(int *ignore) void intel_uncore_clear_discovery_tables(void) { struct intel_uncore_discovery_type *type, *next; + struct intel_uncore_discovery_unit *pos; + struct rb_node *node; rbtree_postorder_for_each_entry_safe(type, next, &discovery_tables, node) { + while (!RB_EMPTY_ROOT(&type->units)) { + node = rb_first(&type->units); + pos = rb_entry(node, struct intel_uncore_discovery_unit, node); + rb_erase(node, &type->units); + kfree(pos); + } kfree(type->box_ctrl_die); kfree(type); } diff --git a/arch/x86/events/intel/uncore_discovery.h b/arch/x86/events/intel/uncore_discovery.h index 22e769a81103..5190017aba51 100644 --- a/arch/x86/events/intel/uncore_discovery.h +++ b/arch/x86/events/intel/uncore_discovery.h @@ -113,17 +113,27 @@ struct uncore_unit_discovery { }; }; +struct intel_uncore_discovery_unit { + struct rb_node node; + unsigned int pmu_idx; /* The idx of the corresponding PMU */ + unsigned int id; /* Unit ID */ + unsigned int die; /* Die ID */ + u64 addr; /* Unit Control Address */ +}; + struct intel_uncore_discovery_type { struct rb_node node; enum uncore_access_type access_type; u64 box_ctrl; /* Unit ctrl addr of the first box */ u64 *box_ctrl_die; /* Unit ctrl addr of the first box of each die */ + struct rb_root units; /* Unit ctrl addr for all units */ u16 type; /* Type ID of the uncore block */ u8 num_counters; u8 counter_width; u8 ctl_offset; /* Counter Control 0 offset */ u8 ctr_offset; /* Counter 0 offset */ u16 num_boxes; /* number of boxes for the uncore block */ + u16 num_units; /* number of units */ unsigned int *ids; /* Box IDs */ u64 *box_offset; /* Box offset */ }; -- Gitee From f43cbdfc3ffad48908a90530d97cbc0a138cee51 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Fri, 14 Jun 2024 06:46:25 -0700 Subject: [PATCH 1068/2138] perf/x86/uncore: Support per PMU cpumask ANBZ: #9608 commit c74443d92f68f07c03ae242ced554b749e6c6736 upstream. The cpumask of some uncore units, e.g., CXL uncore units, may be wrong under some configurations. Perf may access an uncore counter of a non-existent uncore unit. The uncore driver assumes that all uncore units are symmetric among dies. A global cpumask is shared among all uncore PMUs. However, some CXL uncore units may only be available on some dies. A per PMU cpumask is introduced to track the CPU mask of this PMU. The driver searches the unit control RB tree to check whether the PMU is available on a given die, and updates the per PMU cpumask accordingly. Intel-SIG: commit c74443d92f68 perf/x86/uncore: Support per PMU cpumask Backport SPR/EMR HBM and CXL PMON support to kernel v6.6 Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Tested-by: Yunying Sun Link: https://lore.kernel.org/r/20240614134631.1092359-3-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3586 --- arch/x86/events/intel/uncore.c | 31 +++++++++++-- arch/x86/events/intel/uncore.h | 2 + arch/x86/events/intel/uncore_discovery.c | 58 ++++++++++++++++++++++++ arch/x86/events/intel/uncore_discovery.h | 3 ++ 4 files changed, 89 insertions(+), 5 deletions(-) diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 4e26a28536de..ec937ce83c55 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -843,7 +843,9 @@ static void uncore_pmu_disable(struct pmu *pmu) static ssize_t uncore_get_attr_cpumask(struct device *dev, struct device_attribute *attr, char *buf) { - return cpumap_print_to_pagebuf(true, buf, &uncore_cpu_mask); + struct intel_uncore_pmu *pmu = container_of(dev_get_drvdata(dev), struct intel_uncore_pmu, pmu); + + return cpumap_print_to_pagebuf(true, buf, &pmu->cpu_mask); } static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL); @@ -1453,6 +1455,18 @@ static void uncore_pci_exit(void) } } +static bool uncore_die_has_box(struct intel_uncore_type *type, + int die, unsigned int pmu_idx) +{ + if (!type->boxes) + return true; + + if (intel_uncore_find_discovery_unit_id(type->boxes, die, pmu_idx) < 0) + return false; + + return true; +} + static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu, int new_cpu) { @@ -1468,18 +1482,25 @@ static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu, if (old_cpu < 0) { WARN_ON_ONCE(box->cpu != -1); - box->cpu = new_cpu; + if (uncore_die_has_box(type, die, pmu->pmu_idx)) { + box->cpu = new_cpu; + cpumask_set_cpu(new_cpu, &pmu->cpu_mask); + } continue; } - WARN_ON_ONCE(box->cpu != old_cpu); + WARN_ON_ONCE(box->cpu != -1 && box->cpu != old_cpu); box->cpu = -1; + cpumask_clear_cpu(old_cpu, &pmu->cpu_mask); if (new_cpu < 0) continue; + if (!uncore_die_has_box(type, die, pmu->pmu_idx)) + continue; uncore_pmu_cancel_hrtimer(box); perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu); box->cpu = new_cpu; + cpumask_set_cpu(new_cpu, &pmu->cpu_mask); } } @@ -1502,7 +1523,7 @@ static void uncore_box_unref(struct intel_uncore_type **types, int id) pmu = type->pmus; for (i = 0; i < type->num_boxes; i++, pmu++) { box = pmu->boxes[id]; - if (box && atomic_dec_return(&box->refcnt) == 0) + if (box && box->cpu >= 0 && atomic_dec_return(&box->refcnt) == 0) uncore_box_exit(box); } } @@ -1592,7 +1613,7 @@ static int uncore_box_ref(struct intel_uncore_type **types, pmu = type->pmus; for (i = 0; i < type->num_boxes; i++, pmu++) { box = pmu->boxes[id]; - if (box && atomic_inc_return(&box->refcnt) == 1) + if (box && box->cpu >= 0 && atomic_inc_return(&box->refcnt) == 1) uncore_box_init(box); } } diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h index 4838502d89ae..0a49e304fe40 100644 --- a/arch/x86/events/intel/uncore.h +++ b/arch/x86/events/intel/uncore.h @@ -86,6 +86,7 @@ struct intel_uncore_type { const struct attribute_group *attr_groups[4]; const struct attribute_group **attr_update; struct pmu *pmu; /* for custom pmu ops */ + struct rb_root *boxes; /* * Uncore PMU would store relevant platform topology configuration here * to identify which platform component each PMON block of that type is @@ -125,6 +126,7 @@ struct intel_uncore_pmu { int func_id; bool registered; atomic_t activeboxes; + cpumask_t cpu_mask; struct intel_uncore_type *type; struct intel_uncore_box **boxes; }; diff --git a/arch/x86/events/intel/uncore_discovery.c b/arch/x86/events/intel/uncore_discovery.c index ce520e69a3c1..e61e460520a8 100644 --- a/arch/x86/events/intel/uncore_discovery.c +++ b/arch/x86/events/intel/uncore_discovery.c @@ -122,6 +122,64 @@ get_uncore_discovery_type(struct uncore_unit_discovery *unit) return add_uncore_discovery_type(unit); } +static inline int pmu_idx_cmp(const void *key, const struct rb_node *b) +{ + struct intel_uncore_discovery_unit *unit; + const unsigned int *id = key; + + unit = rb_entry(b, struct intel_uncore_discovery_unit, node); + + if (unit->pmu_idx > *id) + return -1; + else if (unit->pmu_idx < *id) + return 1; + + return 0; +} + +static struct intel_uncore_discovery_unit * +intel_uncore_find_discovery_unit(struct rb_root *units, int die, + unsigned int pmu_idx) +{ + struct intel_uncore_discovery_unit *unit; + struct rb_node *pos; + + if (!units) + return NULL; + + pos = rb_find_first(&pmu_idx, units, pmu_idx_cmp); + if (!pos) + return NULL; + unit = rb_entry(pos, struct intel_uncore_discovery_unit, node); + + if (die < 0) + return unit; + + for (; pos; pos = rb_next(pos)) { + unit = rb_entry(pos, struct intel_uncore_discovery_unit, node); + + if (unit->pmu_idx != pmu_idx) + break; + + if (unit->die == die) + return unit; + } + + return NULL; +} + +int intel_uncore_find_discovery_unit_id(struct rb_root *units, int die, + unsigned int pmu_idx) +{ + struct intel_uncore_discovery_unit *unit; + + unit = intel_uncore_find_discovery_unit(units, die, pmu_idx); + if (unit) + return unit->id; + + return -1; +} + static inline bool unit_less(struct rb_node *a, const struct rb_node *b) { struct intel_uncore_discovery_unit *a_node, *b_node; diff --git a/arch/x86/events/intel/uncore_discovery.h b/arch/x86/events/intel/uncore_discovery.h index 5190017aba51..96265cf1fc86 100644 --- a/arch/x86/events/intel/uncore_discovery.h +++ b/arch/x86/events/intel/uncore_discovery.h @@ -166,3 +166,6 @@ u64 intel_generic_uncore_pci_read_counter(struct intel_uncore_box *box, struct intel_uncore_type ** intel_uncore_generic_init_uncores(enum uncore_access_type type_id, int num_extra); + +int intel_uncore_find_discovery_unit_id(struct rb_root *units, int die, + unsigned int pmu_idx); -- Gitee From 04887e786a9d9c2f7e3be769e7511f021aa60b81 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Fri, 14 Jun 2024 06:46:26 -0700 Subject: [PATCH 1069/2138] perf/x86/uncore: Retrieve the unit ID from the unit control RB tree ANBZ: #9608 commit 585463fee64270d4b4d80b1e433d2105ef555bec upstream. The box_ids only save the unit ID for the first die. If a unit, e.g., a CXL unit, doesn't exist in the first die. The unit ID cannot be retrieved. The unit control RB tree also stores the unit ID information. Retrieve the unit ID from the unit control RB tree Intel-SIG: commit 585463fee642 perf/x86/uncore: Retrieve the unit ID from the unit control RB tree Backport SPR/EMR HBM and CXL PMON support to kernel v6.6 Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Tested-by: Yunying Sun Link: https://lore.kernel.org/r/20240614134631.1092359-4-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3586 --- arch/x86/events/intel/uncore.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index ec937ce83c55..dcf0721adefe 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -862,6 +862,9 @@ static const struct attribute_group uncore_pmu_attr_group = { static inline int uncore_get_box_id(struct intel_uncore_type *type, struct intel_uncore_pmu *pmu) { + if (type->boxes) + return intel_uncore_find_discovery_unit_id(type->boxes, -1, pmu->pmu_idx); + return type->box_ids ? type->box_ids[pmu->pmu_idx] : pmu->pmu_idx; } -- Gitee From 4a02fde332ebc4962aa61f1da5faaef82ac13772 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Fri, 14 Jun 2024 06:46:27 -0700 Subject: [PATCH 1070/2138] perf/x86/uncore: Apply the unit control RB tree to MMIO uncore units ANBZ: #9608 commit 80580dae65b941eb681bd79f31f64f91b58232b4 upstream. The unit control RB tree has the unit control and unit ID information for all the units. Use it to replace the box_ctls/mmio_offsets to get an accurate unit control address for MMIO uncore units. Intel-SIG: commit 80580dae65b9 perf/x86/uncore: Apply the unit control RB tree to MMIO uncore units Backport SPR/EMR HBM and CXL PMON support to kernel v6.6 Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Tested-by: Yunying Sun Link: https://lore.kernel.org/r/20240614134631.1092359-5-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3586 --- arch/x86/events/intel/uncore_discovery.c | 30 +++++++++++------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/arch/x86/events/intel/uncore_discovery.c b/arch/x86/events/intel/uncore_discovery.c index e61e460520a8..ece761c9f17a 100644 --- a/arch/x86/events/intel/uncore_discovery.c +++ b/arch/x86/events/intel/uncore_discovery.c @@ -606,34 +606,30 @@ static struct intel_uncore_ops generic_uncore_pci_ops = { #define UNCORE_GENERIC_MMIO_SIZE 0x4000 -static u64 generic_uncore_mmio_box_ctl(struct intel_uncore_box *box) -{ - struct intel_uncore_type *type = box->pmu->type; - - if (!type->box_ctls || !type->box_ctls[box->dieid] || !type->mmio_offsets) - return 0; - - return type->box_ctls[box->dieid] + type->mmio_offsets[box->pmu->pmu_idx]; -} - void intel_generic_uncore_mmio_init_box(struct intel_uncore_box *box) { - u64 box_ctl = generic_uncore_mmio_box_ctl(box); + static struct intel_uncore_discovery_unit *unit; struct intel_uncore_type *type = box->pmu->type; resource_size_t addr; - if (!box_ctl) { + unit = intel_uncore_find_discovery_unit(type->boxes, box->dieid, box->pmu->pmu_idx); + if (!unit) { + pr_warn("Uncore type %d id %d: Cannot find box control address.\n", + type->type_id, box->pmu->pmu_idx); + return; + } + + if (!unit->addr) { pr_warn("Uncore type %d box %d: Invalid box control address.\n", - type->type_id, type->box_ids[box->pmu->pmu_idx]); + type->type_id, unit->id); return; } - addr = box_ctl; + addr = unit->addr; box->io_addr = ioremap(addr, UNCORE_GENERIC_MMIO_SIZE); if (!box->io_addr) { pr_warn("Uncore type %d box %d: ioremap error for 0x%llx.\n", - type->type_id, type->box_ids[box->pmu->pmu_idx], - (unsigned long long)addr); + type->type_id, unit->id, (unsigned long long)addr); return; } @@ -722,6 +718,8 @@ static bool uncore_update_uncore_type(enum uncore_access_type type_id, uncore->box_ctls = type->box_ctrl_die; uncore->mmio_offsets = type->box_offset; uncore->mmio_map_size = UNCORE_GENERIC_MMIO_SIZE; + uncore->boxes = &type->units; + uncore->num_boxes = type->num_units; break; default: return false; -- Gitee From 1d736c2c172312fd6256e881537723139f8b61be Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Fri, 14 Jun 2024 06:46:28 -0700 Subject: [PATCH 1071/2138] perf/x86/uncore: Apply the unit control RB tree to MSR uncore units ANBZ: #9608 commit b1d9ea2e1ca44987c8409cc628dfb0c84e93dce9 upstream. The unit control RB tree has the unit control and unit ID information for all the MSR units. Use them to replace the box_ctl and uncore_msr_box_ctl() to get an accurate unit control address for MSR uncore units. Add intel_generic_uncore_assign_hw_event(), which utilizes the accurate unit control address from the unit control RB tree to calculate the config_base and event_base. The unit id related information should be retrieved from the unit control RB tree as well. Intel-SIG: commit b1d9ea2e1ca4 perf/x86/uncore: Apply the unit control RB tree to MSR uncore units Backport SPR/EMR HBM and CXL PMON support to kernel v6.6 Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Tested-by: Yunying Sun Link: https://lore.kernel.org/r/20240614134631.1092359-6-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3586 --- arch/x86/events/intel/uncore.c | 3 ++ arch/x86/events/intel/uncore_discovery.c | 49 +++++++++++++++++++++--- arch/x86/events/intel/uncore_discovery.h | 2 + arch/x86/events/intel/uncore_snbep.c | 16 +++++--- 4 files changed, 59 insertions(+), 11 deletions(-) diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index dcf0721adefe..6e9469325ad0 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -263,6 +263,9 @@ static void uncore_assign_hw_event(struct intel_uncore_box *box, return; } + if (intel_generic_uncore_assign_hw_event(event, box)) + return; + hwc->config_base = uncore_event_ctl(box, hwc->idx); hwc->event_base = uncore_perf_ctr(box, hwc->idx); } diff --git a/arch/x86/events/intel/uncore_discovery.c b/arch/x86/events/intel/uncore_discovery.c index ece761c9f17a..076ec1efe9cc 100644 --- a/arch/x86/events/intel/uncore_discovery.c +++ b/arch/x86/events/intel/uncore_discovery.c @@ -499,19 +499,31 @@ static const struct attribute_group generic_uncore_format_group = { .attrs = generic_uncore_formats_attr, }; +static u64 intel_generic_uncore_box_ctl(struct intel_uncore_box *box) +{ + struct intel_uncore_discovery_unit *unit; + + unit = intel_uncore_find_discovery_unit(box->pmu->type->boxes, + -1, box->pmu->pmu_idx); + if (WARN_ON_ONCE(!unit)) + return 0; + + return unit->addr; +} + void intel_generic_uncore_msr_init_box(struct intel_uncore_box *box) { - wrmsrl(uncore_msr_box_ctl(box), GENERIC_PMON_BOX_CTL_INT); + wrmsrl(intel_generic_uncore_box_ctl(box), GENERIC_PMON_BOX_CTL_INT); } void intel_generic_uncore_msr_disable_box(struct intel_uncore_box *box) { - wrmsrl(uncore_msr_box_ctl(box), GENERIC_PMON_BOX_CTL_FRZ); + wrmsrl(intel_generic_uncore_box_ctl(box), GENERIC_PMON_BOX_CTL_FRZ); } void intel_generic_uncore_msr_enable_box(struct intel_uncore_box *box) { - wrmsrl(uncore_msr_box_ctl(box), 0); + wrmsrl(intel_generic_uncore_box_ctl(box), 0); } static void intel_generic_uncore_msr_enable_event(struct intel_uncore_box *box, @@ -539,6 +551,31 @@ static struct intel_uncore_ops generic_uncore_msr_ops = { .read_counter = uncore_msr_read_counter, }; +bool intel_generic_uncore_assign_hw_event(struct perf_event *event, + struct intel_uncore_box *box) +{ + struct hw_perf_event *hwc = &event->hw; + u64 box_ctl; + + if (!box->pmu->type->boxes) + return false; + + if (box->pci_dev || box->io_addr) { + hwc->config_base = uncore_pci_event_ctl(box, hwc->idx); + hwc->event_base = uncore_pci_perf_ctr(box, hwc->idx); + return true; + } + + box_ctl = intel_generic_uncore_box_ctl(box); + if (!box_ctl) + return false; + + hwc->config_base = box_ctl + box->pmu->type->event_ctl + hwc->idx; + hwc->event_base = box_ctl + box->pmu->type->perf_ctr + hwc->idx; + + return true; +} + void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box) { struct pci_dev *pdev = box->pci_dev; @@ -697,10 +734,12 @@ static bool uncore_update_uncore_type(enum uncore_access_type type_id, switch (type_id) { case UNCORE_ACCESS_MSR: uncore->ops = &generic_uncore_msr_ops; - uncore->perf_ctr = (unsigned int)type->box_ctrl + type->ctr_offset; - uncore->event_ctl = (unsigned int)type->box_ctrl + type->ctl_offset; + uncore->perf_ctr = (unsigned int)type->ctr_offset; + uncore->event_ctl = (unsigned int)type->ctl_offset; uncore->box_ctl = (unsigned int)type->box_ctrl; uncore->msr_offsets = type->box_offset; + uncore->boxes = &type->units; + uncore->num_boxes = type->num_units; break; case UNCORE_ACCESS_PCI: uncore->ops = &generic_uncore_pci_ops; diff --git a/arch/x86/events/intel/uncore_discovery.h b/arch/x86/events/intel/uncore_discovery.h index 96265cf1fc86..4a7a7c819d6f 100644 --- a/arch/x86/events/intel/uncore_discovery.h +++ b/arch/x86/events/intel/uncore_discovery.h @@ -169,3 +169,5 @@ intel_uncore_generic_init_uncores(enum uncore_access_type type_id, int num_extra int intel_uncore_find_discovery_unit_id(struct rb_root *units, int die, unsigned int pmu_idx); +bool intel_generic_uncore_assign_hw_event(struct perf_event *event, + struct intel_uncore_box *box); diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 94c903b385ae..46ae4b2f04a0 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -5931,10 +5931,11 @@ static int spr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *ev struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; bool tie_en = !!(event->hw.config & SPR_CHA_PMON_CTL_TID_EN); struct intel_uncore_type *type = box->pmu->type; + int id = intel_uncore_find_discovery_unit_id(type->boxes, -1, box->pmu->pmu_idx); if (tie_en) { reg1->reg = SPR_C0_MSR_PMON_BOX_FILTER0 + - HSWEP_CBO_MSR_OFFSET * type->box_ids[box->pmu->pmu_idx]; + HSWEP_CBO_MSR_OFFSET * id; reg1->config = event->attr.config1 & SPR_CHA_PMON_BOX_FILTER_TID; reg1->idx = 0; } @@ -6458,18 +6459,21 @@ uncore_find_type_by_id(struct intel_uncore_type **types, int type_id) static int uncore_type_max_boxes(struct intel_uncore_type **types, int type_id) { + struct intel_uncore_discovery_unit *unit; struct intel_uncore_type *type; - int i, max = 0; + struct rb_node *node; + int max = 0; type = uncore_find_type_by_id(types, type_id); if (!type) return 0; - for (i = 0; i < type->num_boxes; i++) { - if (type->box_ids[i] > max) - max = type->box_ids[i]; - } + for (node = rb_first(type->boxes); node; node = rb_next(node)) { + unit = rb_entry(node, struct intel_uncore_discovery_unit, node); + if (unit->id > max) + max = unit->id; + } return max + 1; } -- Gitee From e4fa7a97d7b97bfda572bf899923ce28859ba368 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Fri, 14 Jun 2024 06:46:29 -0700 Subject: [PATCH 1072/2138] perf/x86/uncore: Apply the unit control RB tree to PCI uncore units ANBZ: #9608 commit f76a8420444beb1c3968504c8176a67d2d5fe18f upstream. The unit control RB tree has the unit control and unit ID information for all the PCI units. Use them to replace the box_ctls/pci_offsets to get an accurate unit control address for PCI uncore units. The UPI/M3UPI units in the discovery table are ignored. Please see the commit 65248a9a9ee1 ("perf/x86/uncore: Add a quirk for UPI on SPR"). Manually allocate a unit control RB tree for UPI/M3UPI. Add cleanup_extra_boxes to release such manual allocation. Intel-SIG: commit f76a8420444b perf/x86/uncore: Apply the unit control RB tree to PCI uncore units Backport SPR/EMR HBM and CXL PMON support to kernel v6.6 Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Tested-by: Yunying Sun Link: https://lore.kernel.org/r/20240614134631.1092359-7-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3586 --- arch/x86/events/intel/uncore.c | 53 +++++++++++----------- arch/x86/events/intel/uncore.h | 4 ++ arch/x86/events/intel/uncore_discovery.c | 26 ++++++++--- arch/x86/events/intel/uncore_discovery.h | 2 + arch/x86/events/intel/uncore_snbep.c | 57 ++++++++++++++++++------ 5 files changed, 94 insertions(+), 48 deletions(-) diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 6e9469325ad0..4935c761956a 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -969,6 +969,9 @@ static void uncore_type_exit(struct intel_uncore_type *type) if (type->cleanup_mapping) type->cleanup_mapping(type); + if (type->cleanup_extra_boxes) + type->cleanup_extra_boxes(type); + if (pmu) { for (i = 0; i < type->num_boxes; i++, pmu++) { uncore_pmu_unregister(pmu); @@ -1084,22 +1087,19 @@ static struct intel_uncore_pmu * uncore_pci_find_dev_pmu_from_types(struct pci_dev *pdev) { struct intel_uncore_type **types = uncore_pci_uncores; + struct intel_uncore_discovery_unit *unit; struct intel_uncore_type *type; - u64 box_ctl; - int i, die; + struct rb_node *node; for (; *types; types++) { type = *types; - for (die = 0; die < __uncore_max_dies; die++) { - for (i = 0; i < type->num_boxes; i++) { - if (!type->box_ctls[die]) - continue; - box_ctl = type->box_ctls[die] + type->pci_offsets[i]; - if (pdev->devfn == UNCORE_DISCOVERY_PCI_DEVFN(box_ctl) && - pdev->bus->number == UNCORE_DISCOVERY_PCI_BUS(box_ctl) && - pci_domain_nr(pdev->bus) == UNCORE_DISCOVERY_PCI_DOMAIN(box_ctl)) - return &type->pmus[i]; - } + + for (node = rb_first(type->boxes); node; node = rb_next(node)) { + unit = rb_entry(node, struct intel_uncore_discovery_unit, node); + if (pdev->devfn == UNCORE_DISCOVERY_PCI_DEVFN(unit->addr) && + pdev->bus->number == UNCORE_DISCOVERY_PCI_BUS(unit->addr) && + pci_domain_nr(pdev->bus) == UNCORE_DISCOVERY_PCI_DOMAIN(unit->addr)) + return &type->pmus[unit->pmu_idx]; } } @@ -1375,28 +1375,25 @@ static struct notifier_block uncore_pci_notifier = { static void uncore_pci_pmus_register(void) { struct intel_uncore_type **types = uncore_pci_uncores; + struct intel_uncore_discovery_unit *unit; struct intel_uncore_type *type; struct intel_uncore_pmu *pmu; + struct rb_node *node; struct pci_dev *pdev; - u64 box_ctl; - int i, die; for (; *types; types++) { type = *types; - for (die = 0; die < __uncore_max_dies; die++) { - for (i = 0; i < type->num_boxes; i++) { - if (!type->box_ctls[die]) - continue; - box_ctl = type->box_ctls[die] + type->pci_offsets[i]; - pdev = pci_get_domain_bus_and_slot(UNCORE_DISCOVERY_PCI_DOMAIN(box_ctl), - UNCORE_DISCOVERY_PCI_BUS(box_ctl), - UNCORE_DISCOVERY_PCI_DEVFN(box_ctl)); - if (!pdev) - continue; - pmu = &type->pmus[i]; - - uncore_pci_pmu_register(pdev, type, pmu, die); - } + + for (node = rb_first(type->boxes); node; node = rb_next(node)) { + unit = rb_entry(node, struct intel_uncore_discovery_unit, node); + pdev = pci_get_domain_bus_and_slot(UNCORE_DISCOVERY_PCI_DOMAIN(unit->addr), + UNCORE_DISCOVERY_PCI_BUS(unit->addr), + UNCORE_DISCOVERY_PCI_DEVFN(unit->addr)); + + if (!pdev) + continue; + pmu = &type->pmus[unit->pmu_idx]; + uncore_pci_pmu_register(pdev, type, pmu, unit->die); } } diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h index 0a49e304fe40..05c429c8cb93 100644 --- a/arch/x86/events/intel/uncore.h +++ b/arch/x86/events/intel/uncore.h @@ -99,6 +99,10 @@ struct intel_uncore_type { int (*get_topology)(struct intel_uncore_type *type); void (*set_mapping)(struct intel_uncore_type *type); void (*cleanup_mapping)(struct intel_uncore_type *type); + /* + * Optional callbacks for extra uncore units cleanup + */ + void (*cleanup_extra_boxes)(struct intel_uncore_type *type); }; #define pmu_group attr_groups[0] diff --git a/arch/x86/events/intel/uncore_discovery.c b/arch/x86/events/intel/uncore_discovery.c index 076ec1efe9cc..866493fda47c 100644 --- a/arch/x86/events/intel/uncore_discovery.c +++ b/arch/x86/events/intel/uncore_discovery.c @@ -215,8 +215,8 @@ uncore_find_unit(struct rb_root *root, unsigned int id) return NULL; } -static void uncore_find_add_unit(struct intel_uncore_discovery_unit *node, - struct rb_root *root, u16 *num_units) +void uncore_find_add_unit(struct intel_uncore_discovery_unit *node, + struct rb_root *root, u16 *num_units) { struct intel_uncore_discovery_unit *unit = uncore_find_unit(root, node->id); @@ -560,7 +560,7 @@ bool intel_generic_uncore_assign_hw_event(struct perf_event *event, if (!box->pmu->type->boxes) return false; - if (box->pci_dev || box->io_addr) { + if (box->io_addr) { hwc->config_base = uncore_pci_event_ctl(box, hwc->idx); hwc->event_base = uncore_pci_perf_ctr(box, hwc->idx); return true; @@ -570,16 +570,28 @@ bool intel_generic_uncore_assign_hw_event(struct perf_event *event, if (!box_ctl) return false; + if (box->pci_dev) { + box_ctl = UNCORE_DISCOVERY_PCI_BOX_CTRL(box_ctl); + hwc->config_base = box_ctl + uncore_pci_event_ctl(box, hwc->idx); + hwc->event_base = box_ctl + uncore_pci_perf_ctr(box, hwc->idx); + return true; + } + hwc->config_base = box_ctl + box->pmu->type->event_ctl + hwc->idx; hwc->event_base = box_ctl + box->pmu->type->perf_ctr + hwc->idx; return true; } +static inline int intel_pci_uncore_box_ctl(struct intel_uncore_box *box) +{ + return UNCORE_DISCOVERY_PCI_BOX_CTRL(intel_generic_uncore_box_ctl(box)); +} + void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box) { struct pci_dev *pdev = box->pci_dev; - int box_ctl = uncore_pci_box_ctl(box); + int box_ctl = intel_pci_uncore_box_ctl(box); __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags); pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_INT); @@ -588,7 +600,7 @@ void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box) void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box) { struct pci_dev *pdev = box->pci_dev; - int box_ctl = uncore_pci_box_ctl(box); + int box_ctl = intel_pci_uncore_box_ctl(box); pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_FRZ); } @@ -596,7 +608,7 @@ void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box) void intel_generic_uncore_pci_enable_box(struct intel_uncore_box *box) { struct pci_dev *pdev = box->pci_dev; - int box_ctl = uncore_pci_box_ctl(box); + int box_ctl = intel_pci_uncore_box_ctl(box); pci_write_config_dword(pdev, box_ctl, 0); } @@ -748,6 +760,8 @@ static bool uncore_update_uncore_type(enum uncore_access_type type_id, uncore->box_ctl = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl); uncore->box_ctls = type->box_ctrl_die; uncore->pci_offsets = type->box_offset; + uncore->boxes = &type->units; + uncore->num_boxes = type->num_units; break; case UNCORE_ACCESS_MMIO: uncore->ops = &generic_uncore_mmio_ops; diff --git a/arch/x86/events/intel/uncore_discovery.h b/arch/x86/events/intel/uncore_discovery.h index 4a7a7c819d6f..0acf9b681f3b 100644 --- a/arch/x86/events/intel/uncore_discovery.h +++ b/arch/x86/events/intel/uncore_discovery.h @@ -171,3 +171,5 @@ int intel_uncore_find_discovery_unit_id(struct rb_root *units, int die, unsigned int pmu_idx); bool intel_generic_uncore_assign_hw_event(struct perf_event *event, struct intel_uncore_box *box); +void uncore_find_add_unit(struct intel_uncore_discovery_unit *node, + struct rb_root *root, u16 *num_units); diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 46ae4b2f04a0..05cfef7d69c7 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -6197,6 +6197,24 @@ static u64 spr_upi_pci_offsets[SPR_UNCORE_UPI_NUM_BOXES] = { 0, 0x8000, 0x10000, 0x18000 }; +static void spr_extra_boxes_cleanup(struct intel_uncore_type *type) +{ + struct intel_uncore_discovery_unit *pos; + struct rb_node *node; + + if (!type->boxes) + return; + + while (!RB_EMPTY_ROOT(type->boxes)) { + node = rb_first(type->boxes); + pos = rb_entry(node, struct intel_uncore_discovery_unit, node); + rb_erase(node, type->boxes); + kfree(pos); + } + kfree(type->boxes); + type->boxes = NULL; +} + static struct intel_uncore_type spr_uncore_upi = { .event_mask = SNBEP_PMON_RAW_EVENT_MASK, .event_mask_ext = SPR_RAW_EVENT_MASK_EXT, @@ -6211,10 +6229,11 @@ static struct intel_uncore_type spr_uncore_upi = { .num_counters = 4, .num_boxes = SPR_UNCORE_UPI_NUM_BOXES, .perf_ctr_bits = 48, - .perf_ctr = ICX_UPI_PCI_PMON_CTR0, - .event_ctl = ICX_UPI_PCI_PMON_CTL0, + .perf_ctr = ICX_UPI_PCI_PMON_CTR0 - ICX_UPI_PCI_PMON_BOX_CTL, + .event_ctl = ICX_UPI_PCI_PMON_CTL0 - ICX_UPI_PCI_PMON_BOX_CTL, .box_ctl = ICX_UPI_PCI_PMON_BOX_CTL, .pci_offsets = spr_upi_pci_offsets, + .cleanup_extra_boxes = spr_extra_boxes_cleanup, }; static struct intel_uncore_type spr_uncore_m3upi = { @@ -6224,11 +6243,12 @@ static struct intel_uncore_type spr_uncore_m3upi = { .num_counters = 4, .num_boxes = SPR_UNCORE_UPI_NUM_BOXES, .perf_ctr_bits = 48, - .perf_ctr = ICX_M3UPI_PCI_PMON_CTR0, - .event_ctl = ICX_M3UPI_PCI_PMON_CTL0, + .perf_ctr = ICX_M3UPI_PCI_PMON_CTR0 - ICX_M3UPI_PCI_PMON_BOX_CTL, + .event_ctl = ICX_M3UPI_PCI_PMON_CTL0 - ICX_M3UPI_PCI_PMON_BOX_CTL, .box_ctl = ICX_M3UPI_PCI_PMON_BOX_CTL, .pci_offsets = spr_upi_pci_offsets, .constraints = icx_uncore_m3upi_constraints, + .cleanup_extra_boxes = spr_extra_boxes_cleanup, }; enum perf_uncore_spr_iio_freerunning_type_id { @@ -6515,10 +6535,11 @@ void spr_uncore_cpu_init(void) static void spr_update_device_location(int type_id) { + struct intel_uncore_discovery_unit *unit; struct intel_uncore_type *type; struct pci_dev *dev = NULL; + struct rb_root *root; u32 device, devfn; - u64 *ctls; int die; if (type_id == UNCORE_SPR_UPI) { @@ -6532,27 +6553,35 @@ static void spr_update_device_location(int type_id) } else return; - ctls = kcalloc(__uncore_max_dies, sizeof(u64), GFP_KERNEL); - if (!ctls) { + root = kzalloc(sizeof(struct rb_root), GFP_KERNEL); + if (!root) { type->num_boxes = 0; return; } + *root = RB_ROOT; while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, dev)) != NULL) { - if (devfn != dev->devfn) - continue; die = uncore_device_to_die(dev); if (die < 0) continue; - ctls[die] = pci_domain_nr(dev->bus) << UNCORE_DISCOVERY_PCI_DOMAIN_OFFSET | - dev->bus->number << UNCORE_DISCOVERY_PCI_BUS_OFFSET | - devfn << UNCORE_DISCOVERY_PCI_DEVFN_OFFSET | - type->box_ctl; + unit = kzalloc(sizeof(*unit), GFP_KERNEL); + if (!unit) + continue; + unit->die = die; + unit->id = PCI_SLOT(dev->devfn) - PCI_SLOT(devfn); + unit->addr = pci_domain_nr(dev->bus) << UNCORE_DISCOVERY_PCI_DOMAIN_OFFSET | + dev->bus->number << UNCORE_DISCOVERY_PCI_BUS_OFFSET | + devfn << UNCORE_DISCOVERY_PCI_DEVFN_OFFSET | + type->box_ctl; + + unit->pmu_idx = unit->id; + + uncore_find_add_unit(unit, root, NULL); } - type->box_ctls = ctls; + type->boxes = root; } int spr_uncore_pci_init(void) -- Gitee From adfbe86df14d62c7575b07f79bce73ae16fbde8b Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Fri, 14 Jun 2024 06:46:30 -0700 Subject: [PATCH 1073/2138] perf/x86/uncore: Cleanup unused unit structure ANBZ: #9608 commit 15a4bd51853b9c67f49bb03c20b6b6cb60fd204f upstream. The unit control and ID information are retrieved from the unit control RB tree. No one uses the old structure anymore. Remove them. Intel-SIG: commit 15a4bd51853b perf/x86/uncore: Cleanup unused unit structure Backport SPR/EMR HBM and CXL PMON support to kernel v6.6 Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Tested-by: Yunying Sun Link: https://lore.kernel.org/r/20240614134631.1092359-8-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3586 --- arch/x86/events/intel/uncore.c | 7 +- arch/x86/events/intel/uncore.h | 2 - arch/x86/events/intel/uncore_discovery.c | 110 +++-------------------- arch/x86/events/intel/uncore_discovery.h | 5 -- 4 files changed, 12 insertions(+), 112 deletions(-) diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 4935c761956a..4d856b51307f 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -868,7 +868,7 @@ static inline int uncore_get_box_id(struct intel_uncore_type *type, if (type->boxes) return intel_uncore_find_discovery_unit_id(type->boxes, -1, pmu->pmu_idx); - return type->box_ids ? type->box_ids[pmu->pmu_idx] : pmu->pmu_idx; + return pmu->pmu_idx; } void uncore_get_alias_name(char *pmu_name, struct intel_uncore_pmu *pmu) @@ -980,10 +980,7 @@ static void uncore_type_exit(struct intel_uncore_type *type) kfree(type->pmus); type->pmus = NULL; } - if (type->box_ids) { - kfree(type->box_ids); - type->box_ids = NULL; - } + kfree(type->events_group); type->events_group = NULL; } diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h index 05c429c8cb93..027ef292c602 100644 --- a/arch/x86/events/intel/uncore.h +++ b/arch/x86/events/intel/uncore.h @@ -62,7 +62,6 @@ struct intel_uncore_type { unsigned fixed_ctr; unsigned fixed_ctl; unsigned box_ctl; - u64 *box_ctls; /* Unit ctrl addr of the first box of each die */ union { unsigned msr_offset; unsigned mmio_offset; @@ -76,7 +75,6 @@ struct intel_uncore_type { u64 *pci_offsets; u64 *mmio_offsets; }; - unsigned *box_ids; struct event_constraint unconstrainted; struct event_constraint *constraints; struct intel_uncore_pmu *pmus; diff --git a/arch/x86/events/intel/uncore_discovery.c b/arch/x86/events/intel/uncore_discovery.c index 866493fda47c..571e44b49691 100644 --- a/arch/x86/events/intel/uncore_discovery.c +++ b/arch/x86/events/intel/uncore_discovery.c @@ -89,10 +89,6 @@ add_uncore_discovery_type(struct uncore_unit_discovery *unit) if (!type) return NULL; - type->box_ctrl_die = kcalloc(__uncore_max_dies, sizeof(u64), GFP_KERNEL); - if (!type->box_ctrl_die) - goto free_type; - type->units = RB_ROOT; type->access_type = unit->access_type; @@ -102,12 +98,6 @@ add_uncore_discovery_type(struct uncore_unit_discovery *unit) rb_add(&type->node, &discovery_tables, __type_less); return type; - -free_type: - kfree(type); - - return NULL; - } static struct intel_uncore_discovery_type * @@ -230,13 +220,10 @@ void uncore_find_add_unit(struct intel_uncore_discovery_unit *node, static void uncore_insert_box_info(struct uncore_unit_discovery *unit, - int die, bool parsed) + int die) { struct intel_uncore_discovery_unit *node; struct intel_uncore_discovery_type *type; - unsigned int *ids; - u64 *box_offset; - int i; if (!unit->ctl || !unit->ctl_offset || !unit->ctr_offset) { pr_info("Invalid address is detected for uncore type %d box %d, " @@ -253,79 +240,21 @@ uncore_insert_box_info(struct uncore_unit_discovery *unit, node->id = unit->box_id; node->addr = unit->ctl; - if (parsed) { - type = search_uncore_discovery_type(unit->box_type); - if (!type) { - pr_info("A spurious uncore type %d is detected, " - "Disable the uncore type.\n", - unit->box_type); - kfree(node); - return; - } - - uncore_find_add_unit(node, &type->units, &type->num_units); - - /* Store the first box of each die */ - if (!type->box_ctrl_die[die]) - type->box_ctrl_die[die] = unit->ctl; + type = get_uncore_discovery_type(unit); + if (!type) { + kfree(node); return; } - type = get_uncore_discovery_type(unit); - if (!type) - goto free_node; - - box_offset = kcalloc(type->num_boxes + 1, sizeof(u64), GFP_KERNEL); - if (!box_offset) - goto free_node; - - ids = kcalloc(type->num_boxes + 1, sizeof(unsigned int), GFP_KERNEL); - if (!ids) - goto free_box_offset; - uncore_find_add_unit(node, &type->units, &type->num_units); /* Store generic information for the first box */ - if (!type->num_boxes) { - type->box_ctrl = unit->ctl; - type->box_ctrl_die[die] = unit->ctl; + if (type->num_units == 1) { type->num_counters = unit->num_regs; type->counter_width = unit->bit_width; type->ctl_offset = unit->ctl_offset; type->ctr_offset = unit->ctr_offset; - *ids = unit->box_id; - goto end; } - - for (i = 0; i < type->num_boxes; i++) { - ids[i] = type->ids[i]; - box_offset[i] = type->box_offset[i]; - - if (unit->box_id == ids[i]) { - pr_info("Duplicate uncore type %d box ID %d is detected, " - "Drop the duplicate uncore unit.\n", - unit->box_type, unit->box_id); - goto free_ids; - } - } - ids[i] = unit->box_id; - box_offset[i] = unit->ctl - type->box_ctrl; - kfree(type->ids); - kfree(type->box_offset); -end: - type->ids = ids; - type->box_offset = box_offset; - type->num_boxes++; - return; - -free_ids: - kfree(ids); - -free_box_offset: - kfree(box_offset); - -free_node: - kfree(node); } static bool @@ -404,7 +333,7 @@ static int parse_discovery_table(struct pci_dev *dev, int die, if (uncore_ignore_unit(&unit, ignore)) continue; - uncore_insert_box_info(&unit, die, *parsed); + uncore_insert_box_info(&unit, die); } *parsed = true; @@ -474,7 +403,6 @@ void intel_uncore_clear_discovery_tables(void) rb_erase(node, &type->units); kfree(pos); } - kfree(type->box_ctrl_die); kfree(type); } } @@ -738,41 +666,23 @@ static bool uncore_update_uncore_type(enum uncore_access_type type_id, struct intel_uncore_discovery_type *type) { uncore->type_id = type->type; - uncore->num_boxes = type->num_boxes; uncore->num_counters = type->num_counters; uncore->perf_ctr_bits = type->counter_width; - uncore->box_ids = type->ids; + uncore->perf_ctr = (unsigned int)type->ctr_offset; + uncore->event_ctl = (unsigned int)type->ctl_offset; + uncore->boxes = &type->units; + uncore->num_boxes = type->num_units; switch (type_id) { case UNCORE_ACCESS_MSR: uncore->ops = &generic_uncore_msr_ops; - uncore->perf_ctr = (unsigned int)type->ctr_offset; - uncore->event_ctl = (unsigned int)type->ctl_offset; - uncore->box_ctl = (unsigned int)type->box_ctrl; - uncore->msr_offsets = type->box_offset; - uncore->boxes = &type->units; - uncore->num_boxes = type->num_units; break; case UNCORE_ACCESS_PCI: uncore->ops = &generic_uncore_pci_ops; - uncore->perf_ctr = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl) + type->ctr_offset; - uncore->event_ctl = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl) + type->ctl_offset; - uncore->box_ctl = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl); - uncore->box_ctls = type->box_ctrl_die; - uncore->pci_offsets = type->box_offset; - uncore->boxes = &type->units; - uncore->num_boxes = type->num_units; break; case UNCORE_ACCESS_MMIO: uncore->ops = &generic_uncore_mmio_ops; - uncore->perf_ctr = (unsigned int)type->ctr_offset; - uncore->event_ctl = (unsigned int)type->ctl_offset; - uncore->box_ctl = (unsigned int)type->box_ctrl; - uncore->box_ctls = type->box_ctrl_die; - uncore->mmio_offsets = type->box_offset; uncore->mmio_map_size = UNCORE_GENERIC_MMIO_SIZE; - uncore->boxes = &type->units; - uncore->num_boxes = type->num_units; break; default: return false; diff --git a/arch/x86/events/intel/uncore_discovery.h b/arch/x86/events/intel/uncore_discovery.h index 0acf9b681f3b..0e94aa7db8e7 100644 --- a/arch/x86/events/intel/uncore_discovery.h +++ b/arch/x86/events/intel/uncore_discovery.h @@ -124,18 +124,13 @@ struct intel_uncore_discovery_unit { struct intel_uncore_discovery_type { struct rb_node node; enum uncore_access_type access_type; - u64 box_ctrl; /* Unit ctrl addr of the first box */ - u64 *box_ctrl_die; /* Unit ctrl addr of the first box of each die */ struct rb_root units; /* Unit ctrl addr for all units */ u16 type; /* Type ID of the uncore block */ u8 num_counters; u8 counter_width; u8 ctl_offset; /* Counter Control 0 offset */ u8 ctr_offset; /* Counter 0 offset */ - u16 num_boxes; /* number of boxes for the uncore block */ u16 num_units; /* number of units */ - unsigned int *ids; /* Box IDs */ - u64 *box_offset; /* Box offset */ }; bool intel_uncore_has_discovery_tables(int *ignore); -- Gitee From e22a28b00afdc6a52dba8c2305647a34e660b805 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Fri, 14 Jun 2024 06:46:31 -0700 Subject: [PATCH 1074/2138] perf/x86/intel/uncore: Support HBM and CXL PMON counters ANBZ: #9608 commit f8a86a9bb5f7e65d8c4405052de062639a8783bb upstream. Unknown uncore PMON types can be found in both SPR and EMR with HBM or CXL. $ls /sys/devices/ | grep type uncore_type_12_16 uncore_type_12_18 uncore_type_12_2 uncore_type_12_4 uncore_type_12_6 uncore_type_12_8 uncore_type_13_17 uncore_type_13_19 uncore_type_13_3 uncore_type_13_5 uncore_type_13_7 uncore_type_13_9 The unknown PMON types are HBM and CXL PMON. Except for the name, the other information regarding the HBM and CXL PMON counters can be retrieved via the discovery table. Add them into the uncores tables for SPR and EMR. The event config registers for all CXL related units are 8-byte apart. Add SPR_UNCORE_MMIO_OFFS8_COMMON_FORMAT to specially handle it. Intel-SIG: commit f8a86a9bb5f7 perf/x86/intel/uncore: Support HBM and CXL PMON counters Backport SPR/EMR HBM and CXL PMON support to kernel v6.6 Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Tested-by: Yunying Sun Link: https://lore.kernel.org/r/20240614134631.1092359-9-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3586 --- arch/x86/events/intel/uncore_snbep.c | 55 +++++++++++++++++++++++++++- 1 file changed, 53 insertions(+), 2 deletions(-) diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 05cfef7d69c7..dcfabf678807 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -6161,7 +6161,55 @@ static struct intel_uncore_type spr_uncore_mdf = { .name = "mdf", }; -#define UNCORE_SPR_NUM_UNCORE_TYPES 12 +static void spr_uncore_mmio_offs8_init_box(struct intel_uncore_box *box) +{ + __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags); + intel_generic_uncore_mmio_init_box(box); +} + +static struct intel_uncore_ops spr_uncore_mmio_offs8_ops = { + .init_box = spr_uncore_mmio_offs8_init_box, + .exit_box = uncore_mmio_exit_box, + .disable_box = intel_generic_uncore_mmio_disable_box, + .enable_box = intel_generic_uncore_mmio_enable_box, + .disable_event = intel_generic_uncore_mmio_disable_event, + .enable_event = spr_uncore_mmio_enable_event, + .read_counter = uncore_mmio_read_counter, +}; + +#define SPR_UNCORE_MMIO_OFFS8_COMMON_FORMAT() \ + SPR_UNCORE_COMMON_FORMAT(), \ + .ops = &spr_uncore_mmio_offs8_ops + +static struct event_constraint spr_uncore_cxlcm_constraints[] = { + UNCORE_EVENT_CONSTRAINT(0x02, 0x0f), + UNCORE_EVENT_CONSTRAINT(0x05, 0x0f), + UNCORE_EVENT_CONSTRAINT(0x40, 0xf0), + UNCORE_EVENT_CONSTRAINT(0x41, 0xf0), + UNCORE_EVENT_CONSTRAINT(0x42, 0xf0), + UNCORE_EVENT_CONSTRAINT(0x43, 0xf0), + UNCORE_EVENT_CONSTRAINT(0x4b, 0xf0), + UNCORE_EVENT_CONSTRAINT(0x52, 0xf0), + EVENT_CONSTRAINT_END +}; + +static struct intel_uncore_type spr_uncore_cxlcm = { + SPR_UNCORE_MMIO_OFFS8_COMMON_FORMAT(), + .name = "cxlcm", + .constraints = spr_uncore_cxlcm_constraints, +}; + +static struct intel_uncore_type spr_uncore_cxldp = { + SPR_UNCORE_MMIO_OFFS8_COMMON_FORMAT(), + .name = "cxldp", +}; + +static struct intel_uncore_type spr_uncore_hbm = { + SPR_UNCORE_COMMON_FORMAT(), + .name = "hbm", +}; + +#define UNCORE_SPR_NUM_UNCORE_TYPES 15 #define UNCORE_SPR_CHA 0 #define UNCORE_SPR_IIO 1 #define UNCORE_SPR_IMC 6 @@ -6185,6 +6233,9 @@ static struct intel_uncore_type *spr_uncores[UNCORE_SPR_NUM_UNCORE_TYPES] = { NULL, NULL, &spr_uncore_mdf, + &spr_uncore_cxlcm, + &spr_uncore_cxldp, + &spr_uncore_hbm, }; /* @@ -6654,7 +6705,7 @@ static struct intel_uncore_type gnr_uncore_b2cmi = { }; static struct intel_uncore_type gnr_uncore_b2cxl = { - SPR_UNCORE_MMIO_COMMON_FORMAT(), + SPR_UNCORE_MMIO_OFFS8_COMMON_FORMAT(), .name = "b2cxl", }; -- Gitee From 41988d249900592a168307359a1f42c27b627140 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Tue, 29 Aug 2023 05:58:01 -0700 Subject: [PATCH 1075/2138] perf/x86/intel: Use the common uarch name for the shared functions ANBZ: #9622 commit d4b5694c75d4eba8238d541a55da0c67e876213e upstream. From PMU's perspective, the SPR/GNR server has a similar uarch to the ADL/MTL client p-core. Many functions are shared. However, the shared function name uses the abbreviation of the server product code name, rather than the common uarch code name. Rename these internal shared functions by the common uarch name. Intel-SIG: commit d4b5694c75d4 perf/x86/intel: Use the common uarch name for the shared functions Backport as a dependency needed by the GNR distinct pmu name fix Signed-off-by: Kan Liang Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20230829125806.3016082-2-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3587 --- arch/x86/events/intel/core.c | 64 ++++++++++++++++++------------------ arch/x86/events/intel/ds.c | 2 +- arch/x86/events/perf_event.h | 2 +- 3 files changed, 34 insertions(+), 34 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index d84979a01902..bfbe95bf5be9 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -299,7 +299,7 @@ static struct extra_reg intel_icl_extra_regs[] __read_mostly = { EVENT_EXTRA_END }; -static struct extra_reg intel_spr_extra_regs[] __read_mostly = { +static struct extra_reg intel_glc_extra_regs[] __read_mostly = { INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), @@ -309,7 +309,7 @@ static struct extra_reg intel_spr_extra_regs[] __read_mostly = { EVENT_EXTRA_END }; -static struct event_constraint intel_spr_event_constraints[] = { +static struct event_constraint intel_glc_event_constraints[] = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ @@ -349,7 +349,7 @@ static struct event_constraint intel_spr_event_constraints[] = { EVENT_CONSTRAINT_END }; -static struct extra_reg intel_gnr_extra_regs[] __read_mostly = { +static struct extra_reg intel_rwc_extra_regs[] __read_mostly = { INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0), INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1), INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), @@ -473,7 +473,7 @@ static u64 intel_pmu_event_map(int hw_event) return intel_perfmon_event_map[hw_event]; } -static __initconst const u64 spr_hw_cache_event_ids +static __initconst const u64 glc_hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = @@ -552,7 +552,7 @@ static __initconst const u64 spr_hw_cache_event_ids }, }; -static __initconst const u64 spr_hw_cache_extra_regs +static __initconst const u64 glc_hw_cache_extra_regs [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = @@ -4348,7 +4348,7 @@ icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, } static struct event_constraint * -spr_get_event_constraints(struct cpu_hw_events *cpuc, int idx, +glc_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct perf_event *event) { struct event_constraint *c; @@ -4437,7 +4437,7 @@ adl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); if (pmu->cpu_type == hybrid_big) - return spr_get_event_constraints(cpuc, idx, event); + return glc_get_event_constraints(cpuc, idx, event); else if (pmu->cpu_type == hybrid_small) return tnt_get_event_constraints(cpuc, idx, event); @@ -4489,7 +4489,7 @@ rwc_get_event_constraints(struct cpu_hw_events *cpuc, int idx, { struct event_constraint *c; - c = spr_get_event_constraints(cpuc, idx, event); + c = glc_get_event_constraints(cpuc, idx, event); /* The Retire Latency is not supported by the fixed counter 0. */ if (event->attr.precise_ip && @@ -4588,7 +4588,7 @@ static void nhm_limit_period(struct perf_event *event, s64 *left) *left = max(*left, 32LL); } -static void spr_limit_period(struct perf_event *event, s64 *left) +static void glc_limit_period(struct perf_event *event, s64 *left) { if (event->attr.precise_ip == 3) *left = max(*left, 128LL); @@ -5435,14 +5435,14 @@ static struct attribute *icl_tsx_events_attrs[] = { EVENT_ATTR_STR(mem-stores, mem_st_spr, "event=0xcd,umask=0x2"); EVENT_ATTR_STR(mem-loads-aux, mem_ld_aux, "event=0x03,umask=0x82"); -static struct attribute *spr_events_attrs[] = { +static struct attribute *glc_events_attrs[] = { EVENT_PTR(mem_ld_hsw), EVENT_PTR(mem_st_spr), EVENT_PTR(mem_ld_aux), NULL, }; -static struct attribute *spr_td_events_attrs[] = { +static struct attribute *glc_td_events_attrs[] = { EVENT_PTR(slots), EVENT_PTR(td_retiring), EVENT_PTR(td_bad_spec), @@ -5455,7 +5455,7 @@ static struct attribute *spr_td_events_attrs[] = { NULL, }; -static struct attribute *spr_tsx_events_attrs[] = { +static struct attribute *glc_tsx_events_attrs[] = { EVENT_PTR(tx_start), EVENT_PTR(tx_abort), EVENT_PTR(tx_commit), @@ -6369,7 +6369,7 @@ __init int intel_pmu_init(void) intel_pmu_pebs_data_source_grt(); x86_pmu.pebs_latency_data = adl_latency_data_small; x86_pmu.get_event_constraints = tnt_get_event_constraints; - x86_pmu.limit_period = spr_limit_period; + x86_pmu.limit_period = glc_limit_period; td_attr = tnt_events_attrs; mem_attr = grt_mem_attrs; extra_attr = nhm_format_attr; @@ -6400,7 +6400,7 @@ __init int intel_pmu_init(void) intel_pmu_pebs_data_source_cmt(); x86_pmu.pebs_latency_data = mtl_latency_data_small; x86_pmu.get_event_constraints = cmt_get_event_constraints; - x86_pmu.limit_period = spr_limit_period; + x86_pmu.limit_period = glc_limit_period; td_attr = cmt_events_attrs; mem_attr = grt_mem_attrs; extra_attr = cmt_format_attr; @@ -6718,20 +6718,20 @@ __init int intel_pmu_init(void) case INTEL_FAM6_SAPPHIRERAPIDS_X: case INTEL_FAM6_EMERALDRAPIDS_X: x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX; - x86_pmu.extra_regs = intel_spr_extra_regs; + x86_pmu.extra_regs = intel_glc_extra_regs; fallthrough; case INTEL_FAM6_GRANITERAPIDS_X: case INTEL_FAM6_GRANITERAPIDS_D: pmem = true; x86_pmu.late_ack = true; - memcpy(hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(hw_cache_event_ids)); - memcpy(hw_cache_extra_regs, spr_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); + memcpy(hw_cache_event_ids, glc_hw_cache_event_ids, sizeof(hw_cache_event_ids)); + memcpy(hw_cache_extra_regs, glc_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); - x86_pmu.event_constraints = intel_spr_event_constraints; - x86_pmu.pebs_constraints = intel_spr_pebs_event_constraints; + x86_pmu.event_constraints = intel_glc_event_constraints; + x86_pmu.pebs_constraints = intel_glc_pebs_event_constraints; if (!x86_pmu.extra_regs) - x86_pmu.extra_regs = intel_gnr_extra_regs; - x86_pmu.limit_period = spr_limit_period; + x86_pmu.extra_regs = intel_rwc_extra_regs; + x86_pmu.limit_period = glc_limit_period; x86_pmu.pebs_ept = 1; x86_pmu.pebs_aliases = NULL; x86_pmu.pebs_prec_dist = true; @@ -6741,13 +6741,13 @@ __init int intel_pmu_init(void) x86_pmu.flags |= PMU_FL_INSTR_LATENCY; x86_pmu.hw_config = hsw_hw_config; - x86_pmu.get_event_constraints = spr_get_event_constraints; + x86_pmu.get_event_constraints = glc_get_event_constraints; extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? hsw_format_attr : nhm_format_attr; extra_skl_attr = skl_format_attr; - mem_attr = spr_events_attrs; - td_attr = spr_td_events_attrs; - tsx_attr = spr_tsx_events_attrs; + mem_attr = glc_events_attrs; + td_attr = glc_td_events_attrs; + tsx_attr = glc_tsx_events_attrs; x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04); x86_pmu.lbr_pt_coexist = true; intel_pmu_pebs_data_source_skl(pmem); @@ -6797,7 +6797,7 @@ __init int intel_pmu_init(void) x86_pmu.filter = intel_pmu_filter; x86_pmu.get_event_constraints = adl_get_event_constraints; x86_pmu.hw_config = adl_hw_config; - x86_pmu.limit_period = spr_limit_period; + x86_pmu.limit_period = glc_limit_period; x86_pmu.get_hybrid_cpu_type = adl_get_hybrid_cpu_type; /* * The rtm_abort_event is used to check whether to enable GPRs @@ -6846,11 +6846,11 @@ __init int intel_pmu_init(void) pmu->intel_cap.perf_metrics = 1; pmu->intel_cap.pebs_output_pt_available = 0; - memcpy(pmu->hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(pmu->hw_cache_event_ids)); - memcpy(pmu->hw_cache_extra_regs, spr_hw_cache_extra_regs, sizeof(pmu->hw_cache_extra_regs)); - pmu->event_constraints = intel_spr_event_constraints; - pmu->pebs_constraints = intel_spr_pebs_event_constraints; - pmu->extra_regs = intel_spr_extra_regs; + memcpy(pmu->hw_cache_event_ids, glc_hw_cache_event_ids, sizeof(pmu->hw_cache_event_ids)); + memcpy(pmu->hw_cache_extra_regs, glc_hw_cache_extra_regs, sizeof(pmu->hw_cache_extra_regs)); + pmu->event_constraints = intel_glc_event_constraints; + pmu->pebs_constraints = intel_glc_pebs_event_constraints; + pmu->extra_regs = intel_glc_extra_regs; /* Initialize Atom core specific PerfMon capabilities.*/ pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX]; @@ -6874,7 +6874,7 @@ __init int intel_pmu_init(void) pmu->pebs_constraints = intel_grt_pebs_event_constraints; pmu->extra_regs = intel_grt_extra_regs; if (is_mtl(boot_cpu_data.x86_model)) { - x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX].extra_regs = intel_gnr_extra_regs; + x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX].extra_regs = intel_rwc_extra_regs; x86_pmu.pebs_latency_data = mtl_latency_data_small; extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr; diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 299ee85b253d..d9d5573e0cac 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1058,7 +1058,7 @@ struct event_constraint intel_icl_pebs_event_constraints[] = { EVENT_CONSTRAINT_END }; -struct event_constraint intel_spr_pebs_event_constraints[] = { +struct event_constraint intel_glc_pebs_event_constraints[] = { INTEL_FLAGS_UEVENT_CONSTRAINT(0x100, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */ INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index b8a2d3ba4ccd..38342d1614f5 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -1529,7 +1529,7 @@ extern struct event_constraint intel_skl_pebs_event_constraints[]; extern struct event_constraint intel_icl_pebs_event_constraints[]; -extern struct event_constraint intel_spr_pebs_event_constraints[]; +extern struct event_constraint intel_glc_pebs_event_constraints[]; struct event_constraint *intel_pebs_constraints(struct perf_event *event); -- Gitee From 9551977a177d4a629bf1ffd9e6f4f1b52a7e49ef Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Tue, 29 Aug 2023 05:58:02 -0700 Subject: [PATCH 1076/2138] perf/x86/intel: Factor out the initialization code for SPR ANBZ: #9622 commit 0ba0c03528e918a8f6b5aa63d502fdc6a9d80fc7 upstream. The SPR and ADL p-core have a similar uarch. Most of the initialization code can be shared. Factor out intel_pmu_init_glc() for the common initialization code. The common part of the ADL p-core will be replaced by the later patch. Intel-SIG: commit 0ba0c03528e9 perf/x86/intel: Factor out the initialization code for SPR Backport as a dependency needed by the GNR distinct pmu name fix Signed-off-by: Kan Liang Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20230829125806.3016082-3-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3587 --- arch/x86/events/intel/core.c | 49 +++++++++++++++++++----------------- 1 file changed, 26 insertions(+), 23 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index bfbe95bf5be9..9e940b18003c 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -6070,6 +6070,30 @@ static __always_inline bool is_mtl(u8 x86_model) (x86_model == INTEL_FAM6_METEORLAKE_L); } +static __always_inline void intel_pmu_init_glc(struct pmu *pmu) +{ + x86_pmu.late_ack = true; + x86_pmu.limit_period = glc_limit_period; + x86_pmu.pebs_aliases = NULL; + x86_pmu.pebs_prec_dist = true; + x86_pmu.pebs_block = true; + x86_pmu.flags |= PMU_FL_HAS_RSP_1; + x86_pmu.flags |= PMU_FL_NO_HT_SHARING; + x86_pmu.flags |= PMU_FL_INSTR_LATENCY; + x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04); + x86_pmu.lbr_pt_coexist = true; + x86_pmu.num_topdown_events = 8; + static_call_update(intel_pmu_update_topdown_event, + &icl_update_topdown_event); + static_call_update(intel_pmu_set_topdown_event_period, + &icl_set_topdown_event_period); + + memcpy(hybrid_var(pmu, hw_cache_event_ids), glc_hw_cache_event_ids, sizeof(hw_cache_event_ids)); + memcpy(hybrid_var(pmu, hw_cache_extra_regs), glc_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); + hybrid(pmu, event_constraints) = intel_glc_event_constraints; + hybrid(pmu, pebs_constraints) = intel_glc_pebs_event_constraints; +} + __init int intel_pmu_init(void) { struct attribute **extra_skl_attr = &empty_attrs; @@ -6722,24 +6746,10 @@ __init int intel_pmu_init(void) fallthrough; case INTEL_FAM6_GRANITERAPIDS_X: case INTEL_FAM6_GRANITERAPIDS_D: - pmem = true; - x86_pmu.late_ack = true; - memcpy(hw_cache_event_ids, glc_hw_cache_event_ids, sizeof(hw_cache_event_ids)); - memcpy(hw_cache_extra_regs, glc_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); - - x86_pmu.event_constraints = intel_glc_event_constraints; - x86_pmu.pebs_constraints = intel_glc_pebs_event_constraints; + intel_pmu_init_glc(NULL); if (!x86_pmu.extra_regs) x86_pmu.extra_regs = intel_rwc_extra_regs; - x86_pmu.limit_period = glc_limit_period; x86_pmu.pebs_ept = 1; - x86_pmu.pebs_aliases = NULL; - x86_pmu.pebs_prec_dist = true; - x86_pmu.pebs_block = true; - x86_pmu.flags |= PMU_FL_HAS_RSP_1; - x86_pmu.flags |= PMU_FL_NO_HT_SHARING; - x86_pmu.flags |= PMU_FL_INSTR_LATENCY; - x86_pmu.hw_config = hsw_hw_config; x86_pmu.get_event_constraints = glc_get_event_constraints; extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? @@ -6748,14 +6758,7 @@ __init int intel_pmu_init(void) mem_attr = glc_events_attrs; td_attr = glc_td_events_attrs; tsx_attr = glc_tsx_events_attrs; - x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04); - x86_pmu.lbr_pt_coexist = true; - intel_pmu_pebs_data_source_skl(pmem); - x86_pmu.num_topdown_events = 8; - static_call_update(intel_pmu_update_topdown_event, - &icl_update_topdown_event); - static_call_update(intel_pmu_set_topdown_event_period, - &icl_set_topdown_event_period); + intel_pmu_pebs_data_source_skl(true); pr_cont("Sapphire Rapids events, "); name = "sapphire_rapids"; break; -- Gitee From f18be8f8dca1fcdffe021f26be13838d31570365 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Tue, 29 Aug 2023 05:58:03 -0700 Subject: [PATCH 1077/2138] perf/x86/intel: Factor out the initialization code for ADL e-core ANBZ: #9622 commit d87d221f854b62f5e8026505497d33404ef6050c upstream. From PMU's perspective, the ADL e-core and newer SRF/GRR have a similar uarch. Most of the initialization code can be shared. Factor out intel_pmu_init_grt() for the common initialization code. The common part of the ADL e-core will be replaced by the later patch. Intel-SIG: commit d87d221f854b perf/x86/intel: Factor out the initialization code for ADL e-core Backport as a dependency needed by the GNR distinct pmu name fix Signed-off-by: Kan Liang Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20230829125806.3016082-4-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3587 --- arch/x86/events/intel/core.c | 58 +++++++++++++----------------------- 1 file changed, 21 insertions(+), 37 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 9e940b18003c..7aeac9ee85bb 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -6094,6 +6094,25 @@ static __always_inline void intel_pmu_init_glc(struct pmu *pmu) hybrid(pmu, pebs_constraints) = intel_glc_pebs_event_constraints; } +static __always_inline void intel_pmu_init_grt(struct pmu *pmu) +{ + x86_pmu.mid_ack = true; + x86_pmu.limit_period = glc_limit_period; + x86_pmu.pebs_aliases = NULL; + x86_pmu.pebs_prec_dist = true; + x86_pmu.pebs_block = true; + x86_pmu.lbr_pt_coexist = true; + x86_pmu.flags |= PMU_FL_HAS_RSP_1; + x86_pmu.flags |= PMU_FL_INSTR_LATENCY; + + memcpy(hybrid_var(pmu, hw_cache_event_ids), glp_hw_cache_event_ids, sizeof(hw_cache_event_ids)); + memcpy(hybrid_var(pmu, hw_cache_extra_regs), tnt_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); + hybrid_var(pmu, hw_cache_event_ids)[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; + hybrid(pmu, event_constraints) = intel_slm_event_constraints; + hybrid(pmu, pebs_constraints) = intel_grt_pebs_event_constraints; + hybrid(pmu, extra_regs) = intel_grt_extra_regs; +} + __init int intel_pmu_init(void) { struct attribute **extra_skl_attr = &empty_attrs; @@ -6372,28 +6391,10 @@ __init int intel_pmu_init(void) break; case INTEL_FAM6_ATOM_GRACEMONT: - x86_pmu.mid_ack = true; - memcpy(hw_cache_event_ids, glp_hw_cache_event_ids, - sizeof(hw_cache_event_ids)); - memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs, - sizeof(hw_cache_extra_regs)); - hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; - - x86_pmu.event_constraints = intel_slm_event_constraints; - x86_pmu.pebs_constraints = intel_grt_pebs_event_constraints; - x86_pmu.extra_regs = intel_grt_extra_regs; - - x86_pmu.pebs_aliases = NULL; - x86_pmu.pebs_prec_dist = true; - x86_pmu.pebs_block = true; - x86_pmu.lbr_pt_coexist = true; - x86_pmu.flags |= PMU_FL_HAS_RSP_1; - x86_pmu.flags |= PMU_FL_INSTR_LATENCY; - + intel_pmu_init_grt(NULL); intel_pmu_pebs_data_source_grt(); x86_pmu.pebs_latency_data = adl_latency_data_small; x86_pmu.get_event_constraints = tnt_get_event_constraints; - x86_pmu.limit_period = glc_limit_period; td_attr = tnt_events_attrs; mem_attr = grt_mem_attrs; extra_attr = nhm_format_attr; @@ -6403,28 +6404,11 @@ __init int intel_pmu_init(void) case INTEL_FAM6_ATOM_CRESTMONT: case INTEL_FAM6_ATOM_CRESTMONT_X: - x86_pmu.mid_ack = true; - memcpy(hw_cache_event_ids, glp_hw_cache_event_ids, - sizeof(hw_cache_event_ids)); - memcpy(hw_cache_extra_regs, tnt_hw_cache_extra_regs, - sizeof(hw_cache_extra_regs)); - hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; - - x86_pmu.event_constraints = intel_slm_event_constraints; - x86_pmu.pebs_constraints = intel_grt_pebs_event_constraints; + intel_pmu_init_grt(NULL); x86_pmu.extra_regs = intel_cmt_extra_regs; - - x86_pmu.pebs_aliases = NULL; - x86_pmu.pebs_prec_dist = true; - x86_pmu.lbr_pt_coexist = true; - x86_pmu.pebs_block = true; - x86_pmu.flags |= PMU_FL_HAS_RSP_1; - x86_pmu.flags |= PMU_FL_INSTR_LATENCY; - intel_pmu_pebs_data_source_cmt(); x86_pmu.pebs_latency_data = mtl_latency_data_small; x86_pmu.get_event_constraints = cmt_get_event_constraints; - x86_pmu.limit_period = glc_limit_period; td_attr = cmt_events_attrs; mem_attr = grt_mem_attrs; extra_attr = cmt_format_attr; -- Gitee From 759d15c1e241bd9af6facb186e297e5971cffd23 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Tue, 29 Aug 2023 05:58:04 -0700 Subject: [PATCH 1078/2138] perf/x86/intel: Apply the common initialization code for ADL ANBZ: #9622 commit 299a5fc8e783eed705015e83e381912dbbf3eabc upstream. Use the intel_pmu_init_glc() and intel_pmu_init_grt() to replace the duplicate code for ADL. The current code already checks the PERF_X86_EVENT_TOPDOWN flag before invoking the Topdown metrics functions. (The PERF_X86_EVENT_TOPDOWN flag is to indicate the Topdown metric feature, which is only available for the p-core.) Drop the unnecessary adl_set_topdown_event_period() and adl_update_topdown_event(). Intel-SIG: commit 299a5fc8e783 perf/x86/intel: Apply the common initialization code for ADL Backport as a dependency needed by the GNR distinct pmu name fix Signed-off-by: Kan Liang Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20230829125806.3016082-5-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3587 --- arch/x86/events/intel/core.c | 53 ++---------------------------------- 1 file changed, 2 insertions(+), 51 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 7aeac9ee85bb..137842ed8aef 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2561,16 +2561,6 @@ static int icl_set_topdown_event_period(struct perf_event *event) return 0; } -static int adl_set_topdown_event_period(struct perf_event *event) -{ - struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); - - if (pmu->cpu_type != hybrid_big) - return 0; - - return icl_set_topdown_event_period(event); -} - DEFINE_STATIC_CALL(intel_pmu_set_topdown_event_period, x86_perf_event_set_period); static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx) @@ -2713,16 +2703,6 @@ static u64 icl_update_topdown_event(struct perf_event *event) x86_pmu.num_topdown_events - 1); } -static u64 adl_update_topdown_event(struct perf_event *event) -{ - struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); - - if (pmu->cpu_type != hybrid_big) - return 0; - - return icl_update_topdown_event(event); -} - DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, x86_perf_event_update); static void intel_pmu_read_topdown_event(struct perf_event *event) @@ -6767,32 +6747,11 @@ __init int intel_pmu_init(void) static_branch_enable(&perf_is_hybrid); x86_pmu.num_hybrid_pmus = X86_HYBRID_NUM_PMUS; - x86_pmu.pebs_aliases = NULL; - x86_pmu.pebs_prec_dist = true; - x86_pmu.pebs_block = true; - x86_pmu.flags |= PMU_FL_HAS_RSP_1; - x86_pmu.flags |= PMU_FL_NO_HT_SHARING; - x86_pmu.flags |= PMU_FL_INSTR_LATENCY; - x86_pmu.lbr_pt_coexist = true; x86_pmu.pebs_latency_data = adl_latency_data_small; - x86_pmu.num_topdown_events = 8; - static_call_update(intel_pmu_update_topdown_event, - &adl_update_topdown_event); - static_call_update(intel_pmu_set_topdown_event_period, - &adl_set_topdown_event_period); - x86_pmu.filter = intel_pmu_filter; x86_pmu.get_event_constraints = adl_get_event_constraints; x86_pmu.hw_config = adl_hw_config; - x86_pmu.limit_period = glc_limit_period; x86_pmu.get_hybrid_cpu_type = adl_get_hybrid_cpu_type; - /* - * The rtm_abort_event is used to check whether to enable GPRs - * for the RTM abort event. Atom doesn't have the RTM abort - * event. There is no harmful to set it in the common - * x86_pmu.rtm_abort_event. - */ - x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04); td_attr = adl_hybrid_events_attrs; mem_attr = adl_hybrid_mem_attrs; @@ -6804,6 +6763,7 @@ __init int intel_pmu_init(void) pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX]; pmu->name = "cpu_core"; pmu->cpu_type = hybrid_big; + intel_pmu_init_glc(&pmu->pmu); pmu->late_ack = true; if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) { pmu->num_counters = x86_pmu.num_counters + 2; @@ -6833,16 +6793,13 @@ __init int intel_pmu_init(void) pmu->intel_cap.perf_metrics = 1; pmu->intel_cap.pebs_output_pt_available = 0; - memcpy(pmu->hw_cache_event_ids, glc_hw_cache_event_ids, sizeof(pmu->hw_cache_event_ids)); - memcpy(pmu->hw_cache_extra_regs, glc_hw_cache_extra_regs, sizeof(pmu->hw_cache_extra_regs)); - pmu->event_constraints = intel_glc_event_constraints; - pmu->pebs_constraints = intel_glc_pebs_event_constraints; pmu->extra_regs = intel_glc_extra_regs; /* Initialize Atom core specific PerfMon capabilities.*/ pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX]; pmu->name = "cpu_atom"; pmu->cpu_type = hybrid_small; + intel_pmu_init_grt(&pmu->pmu); pmu->mid_ack = true; pmu->num_counters = x86_pmu.num_counters; pmu->num_counters_fixed = x86_pmu.num_counters_fixed; @@ -6854,12 +6811,6 @@ __init int intel_pmu_init(void) pmu->intel_cap.perf_metrics = 0; pmu->intel_cap.pebs_output_pt_available = 1; - memcpy(pmu->hw_cache_event_ids, glp_hw_cache_event_ids, sizeof(pmu->hw_cache_event_ids)); - memcpy(pmu->hw_cache_extra_regs, tnt_hw_cache_extra_regs, sizeof(pmu->hw_cache_extra_regs)); - pmu->hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1; - pmu->event_constraints = intel_slm_event_constraints; - pmu->pebs_constraints = intel_grt_pebs_event_constraints; - pmu->extra_regs = intel_grt_extra_regs; if (is_mtl(boot_cpu_data.x86_model)) { x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX].extra_regs = intel_rwc_extra_regs; x86_pmu.pebs_latency_data = mtl_latency_data_small; -- Gitee From 9d62e88832a2f3eaa22098d81f3d9d4327b6e9a0 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Tue, 29 Aug 2023 05:58:05 -0700 Subject: [PATCH 1079/2138] perf/x86/intel: Clean up the hybrid CPU type handling code ANBZ: #9622 commit b0560bfd4b70277a4936c82e50e940aa253c95bf upstream. There is a fairly long list of grievances about the current code. The main beefs: 1. hybrid_big_small assumes that the *HARDWARE* (CPUID) provided core types are a bitmap. They are not. If Intel happened to make a core type of 0xff, hilarity would ensue. 2. adl_get_hybrid_cpu_type() utterly inscrutable. There are precisely zero comments and zero changelog about what it is attempting to do. According to Kan, the adl_get_hybrid_cpu_type() is there because some Alder Lake (ADL) CPUs can do some silly things. Some ADL models are *supposed* to be hybrid CPUs with big and little cores, but there are some SKUs that only have big cores. CPUID(0x1a) on those CPUs does not say that the CPUs are big cores. It apparently just returns 0x0. It confuses perf because it expects to see either 0x40 (Core) or 0x20 (Atom). The perf workaround for this is to watch for a CPU core saying it is type 0x0. If that happens on an Alder Lake, it calls x86_pmu.get_hybrid_cpu_type() and just assumes that the core is a Core (0x40) CPU. To fix up the mess, separate out the CPU types and the 'pmu' types. This allows 'hybrid_pmu_type' bitmaps without worrying that some future CPU type will set multiple bits. Since the types are now separate, add a function to glue them back together again. Actual comment on the situation in the glue function (find_hybrid_pmu_for_cpu()). Also, give ->get_hybrid_cpu_type() a real return type and make it clear that it is overriding the *CPU* type, not the PMU type. Rename cpu_type to pmu_type in the struct x86_hybrid_pmu to reflect the change. Intel-SIG: commit b0560bfd4b70 perf/x86/intel: Clean up the hybrid CPU type handling code Backport as a dependency needed by the GNR distinct pmu name fix Originally-by: Dave Hansen Signed-off-by: Kan Liang Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20230829125806.3016082-6-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3587 --- arch/x86/events/core.c | 6 +-- arch/x86/events/intel/core.c | 71 ++++++++++++++++++++++++------------ arch/x86/events/intel/ds.c | 2 +- arch/x86/events/perf_event.h | 35 +++++++++++------- 4 files changed, 73 insertions(+), 41 deletions(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 9c3ccd975858..a620ef3e790f 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -1890,9 +1890,9 @@ ssize_t events_hybrid_sysfs_show(struct device *dev, str = pmu_attr->event_str; for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) { - if (!(x86_pmu.hybrid_pmu[i].cpu_type & pmu_attr->pmu_type)) + if (!(x86_pmu.hybrid_pmu[i].pmu_type & pmu_attr->pmu_type)) continue; - if (x86_pmu.hybrid_pmu[i].cpu_type & pmu->cpu_type) { + if (x86_pmu.hybrid_pmu[i].pmu_type & pmu->pmu_type) { next_str = strchr(str, ';'); if (next_str) return snprintf(page, next_str - str + 1, "%s", str); @@ -2172,7 +2172,7 @@ static int __init init_hw_perf_events(void) hybrid_pmu->pmu.capabilities |= PERF_PMU_CAP_EXTENDED_HW_TYPE; err = perf_pmu_register(&hybrid_pmu->pmu, hybrid_pmu->name, - (hybrid_pmu->cpu_type == hybrid_big) ? PERF_TYPE_RAW : -1); + (hybrid_pmu->pmu_type == hybrid_big) ? PERF_TYPE_RAW : -1); if (err) break; } diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 137842ed8aef..a6afa84010c4 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3864,7 +3864,7 @@ static inline bool require_mem_loads_aux_event(struct perf_event *event) return false; if (is_hybrid()) - return hybrid_pmu(event->pmu)->cpu_type == hybrid_big; + return hybrid_pmu(event->pmu)->pmu_type == hybrid_big; return true; } @@ -4416,9 +4416,9 @@ adl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, { struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); - if (pmu->cpu_type == hybrid_big) + if (pmu->pmu_type == hybrid_big) return glc_get_event_constraints(cpuc, idx, event); - else if (pmu->cpu_type == hybrid_small) + else if (pmu->pmu_type == hybrid_small) return tnt_get_event_constraints(cpuc, idx, event); WARN_ON(1); @@ -4493,9 +4493,9 @@ mtl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, { struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); - if (pmu->cpu_type == hybrid_big) + if (pmu->pmu_type == hybrid_big) return rwc_get_event_constraints(cpuc, idx, event); - if (pmu->cpu_type == hybrid_small) + if (pmu->pmu_type == hybrid_small) return cmt_get_event_constraints(cpuc, idx, event); WARN_ON(1); @@ -4506,18 +4506,18 @@ static int adl_hw_config(struct perf_event *event) { struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); - if (pmu->cpu_type == hybrid_big) + if (pmu->pmu_type == hybrid_big) return hsw_hw_config(event); - else if (pmu->cpu_type == hybrid_small) + else if (pmu->pmu_type == hybrid_small) return intel_pmu_hw_config(event); WARN_ON(1); return -EOPNOTSUPP; } -static u8 adl_get_hybrid_cpu_type(void) +static enum hybrid_cpu_type adl_get_hybrid_cpu_type(void) { - return hybrid_big; + return HYBRID_INTEL_CORE; } static inline bool erratum_hsw11(struct perf_event *event) @@ -4711,22 +4711,47 @@ static void update_pmu_cap(struct x86_hybrid_pmu *pmu) } } -static bool init_hybrid_pmu(int cpu) +static struct x86_hybrid_pmu *find_hybrid_pmu_for_cpu(void) { - struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); u8 cpu_type = get_this_hybrid_cpu_type(); - struct x86_hybrid_pmu *pmu = NULL; int i; - if (!cpu_type && x86_pmu.get_hybrid_cpu_type) - cpu_type = x86_pmu.get_hybrid_cpu_type(); + /* + * This is running on a CPU model that is known to have hybrid + * configurations. But the CPU told us it is not hybrid, shame + * on it. There should be a fixup function provided for these + * troublesome CPUs (->get_hybrid_cpu_type). + */ + if (cpu_type == HYBRID_INTEL_NONE) { + if (x86_pmu.get_hybrid_cpu_type) + cpu_type = x86_pmu.get_hybrid_cpu_type(); + else + return NULL; + } + /* + * This essentially just maps between the 'hybrid_cpu_type' + * and 'hybrid_pmu_type' enums: + */ for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) { - if (x86_pmu.hybrid_pmu[i].cpu_type == cpu_type) { - pmu = &x86_pmu.hybrid_pmu[i]; - break; - } + enum hybrid_pmu_type pmu_type = x86_pmu.hybrid_pmu[i].pmu_type; + + if (cpu_type == HYBRID_INTEL_CORE && + pmu_type == hybrid_big) + return &x86_pmu.hybrid_pmu[i]; + if (cpu_type == HYBRID_INTEL_ATOM && + pmu_type == hybrid_small) + return &x86_pmu.hybrid_pmu[i]; } + + return NULL; +} + +static bool init_hybrid_pmu(int cpu) +{ + struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); + struct x86_hybrid_pmu *pmu = find_hybrid_pmu_for_cpu(); + if (WARN_ON_ONCE(!pmu || (pmu->pmu.type == -1))) { cpuc->pmu = NULL; return false; @@ -5815,7 +5840,7 @@ static bool is_attr_for_this_pmu(struct kobject *kobj, struct attribute *attr) struct perf_pmu_events_hybrid_attr *pmu_attr = container_of(attr, struct perf_pmu_events_hybrid_attr, attr.attr); - return pmu->cpu_type & pmu_attr->pmu_type; + return pmu->pmu_type & pmu_attr->pmu_type; } static umode_t hybrid_events_is_visible(struct kobject *kobj, @@ -5852,7 +5877,7 @@ static umode_t hybrid_format_is_visible(struct kobject *kobj, container_of(attr, struct perf_pmu_format_hybrid_attr, attr.attr); int cpu = hybrid_find_supported_cpu(pmu); - return (cpu >= 0) && (pmu->cpu_type & pmu_attr->pmu_type) ? attr->mode : 0; + return (cpu >= 0) && (pmu->pmu_type & pmu_attr->pmu_type) ? attr->mode : 0; } static umode_t hybrid_td_is_visible(struct kobject *kobj, @@ -5867,7 +5892,7 @@ static umode_t hybrid_td_is_visible(struct kobject *kobj, /* Only the big core supports perf metrics */ - if (pmu->cpu_type == hybrid_big) + if (pmu->pmu_type == hybrid_big) return pmu->intel_cap.perf_metrics ? attr->mode : 0; return attr->mode; @@ -6762,7 +6787,7 @@ __init int intel_pmu_init(void) /* Initialize big core specific PerfMon capabilities.*/ pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX]; pmu->name = "cpu_core"; - pmu->cpu_type = hybrid_big; + pmu->pmu_type = hybrid_big; intel_pmu_init_glc(&pmu->pmu); pmu->late_ack = true; if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) { @@ -6798,7 +6823,7 @@ __init int intel_pmu_init(void) /* Initialize Atom core specific PerfMon capabilities.*/ pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX]; pmu->name = "cpu_atom"; - pmu->cpu_type = hybrid_small; + pmu->pmu_type = hybrid_small; intel_pmu_init_grt(&pmu->pmu); pmu->mid_ack = true; pmu->num_counters = x86_pmu.num_counters; diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index d9d5573e0cac..c165c8202ad0 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -261,7 +261,7 @@ static u64 __adl_latency_data_small(struct perf_event *event, u64 status, { u64 val; - WARN_ON_ONCE(hybrid_pmu(event->pmu)->cpu_type == hybrid_big); + WARN_ON_ONCE(hybrid_pmu(event->pmu)->pmu_type == hybrid_big); dse &= PERF_PEBS_DATA_SOURCE_MASK; val = hybrid_var(event->pmu, pebs_data_source)[dse]; diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 38342d1614f5..fb56518356ec 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -658,10 +658,29 @@ enum { #define PERF_PEBS_DATA_SOURCE_MAX 0x10 #define PERF_PEBS_DATA_SOURCE_MASK (PERF_PEBS_DATA_SOURCE_MAX - 1) +enum hybrid_cpu_type { + HYBRID_INTEL_NONE, + HYBRID_INTEL_ATOM = 0x20, + HYBRID_INTEL_CORE = 0x40, +}; + +enum hybrid_pmu_type { + not_hybrid, + hybrid_small = BIT(0), + hybrid_big = BIT(1), + + hybrid_big_small = hybrid_big | hybrid_small, /* only used for matching */ +}; + +#define X86_HYBRID_PMU_ATOM_IDX 0 +#define X86_HYBRID_PMU_CORE_IDX 1 + +#define X86_HYBRID_NUM_PMUS 2 + struct x86_hybrid_pmu { struct pmu pmu; const char *name; - u8 cpu_type; + enum hybrid_pmu_type pmu_type; cpumask_t supported_cpus; union perf_capabilities intel_cap; u64 intel_ctrl; @@ -727,18 +746,6 @@ extern struct static_key_false perf_is_hybrid; __Fp; \ }) -enum hybrid_pmu_type { - hybrid_big = 0x40, - hybrid_small = 0x20, - - hybrid_big_small = hybrid_big | hybrid_small, -}; - -#define X86_HYBRID_PMU_ATOM_IDX 0 -#define X86_HYBRID_PMU_CORE_IDX 1 - -#define X86_HYBRID_NUM_PMUS 2 - /* * struct x86_pmu - generic x86 pmu */ @@ -947,7 +954,7 @@ struct x86_pmu { */ int num_hybrid_pmus; struct x86_hybrid_pmu *hybrid_pmu; - u8 (*get_hybrid_cpu_type) (void); + enum hybrid_cpu_type (*get_hybrid_cpu_type) (void); }; struct x86_perf_task_context_opt { -- Gitee From 810fed066f1bbc0f3c9571ada4b1973344be6c76 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Tue, 29 Aug 2023 05:58:06 -0700 Subject: [PATCH 1080/2138] perf/x86/intel: Add common intel_pmu_init_hybrid() ANBZ: #9622 commit 97588df87b56e27fd2b5d928d61c7a53e38afbb0 upstream. The current hybrid initialization codes aren't well organized and are hard to read. Factor out intel_pmu_init_hybrid() to do a common setup for each hybrid PMU. The PMU-specific capability will be updated later via either hard code (ADL) or CPUID hybrid enumeration (MTL). Splitting the ADL and MTL initialization codes, since they have different uarches. The hard code PMU capabilities are not required for MTL either. They can be enumerated by the new leaf 0x23 and IA32_PERF_CAPABILITIES MSR. The hybrid enumeration of the IA32_PERF_CAPABILITIES MSR is broken on MTL. Using the default value. Intel-SIG: commit 97588df87b56 perf/x86/intel: Add common intel_pmu_init_hybrid() Backport as a dependency needed by the GNR distinct pmu name fix Signed-off-by: Kan Liang Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20230829125806.3016082-7-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3587 --- arch/x86/events/intel/core.c | 162 ++++++++++++++++++++++++----------- 1 file changed, 111 insertions(+), 51 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index a6afa84010c4..5680d7e00864 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4696,6 +4696,16 @@ static void intel_pmu_check_num_counters(int *num_counters, int *num_counters_fixed, u64 *intel_ctrl, u64 fixed_mask); +static inline bool intel_pmu_broken_perf_cap(void) +{ + /* The Perf Metric (Bit 15) is always cleared */ + if ((boot_cpu_data.x86_model == INTEL_FAM6_METEORLAKE) || + (boot_cpu_data.x86_model == INTEL_FAM6_METEORLAKE_L)) + return true; + + return false; +} + static void update_pmu_cap(struct x86_hybrid_pmu *pmu) { unsigned int sub_bitmaps = cpuid_eax(ARCH_PERFMON_EXT_LEAF); @@ -4708,7 +4718,27 @@ static void update_pmu_cap(struct x86_hybrid_pmu *pmu) pmu->num_counters_fixed = fls(ebx); intel_pmu_check_num_counters(&pmu->num_counters, &pmu->num_counters_fixed, &pmu->intel_ctrl, ebx); + pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters); + pmu->unconstrained = (struct event_constraint) + __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1, + 0, pmu->num_counters, 0, 0); } + + + if (!intel_pmu_broken_perf_cap()) { + /* Perf Metric (Bit 15) and PEBS via PT (Bit 16) are hybrid enumeration */ + rdmsrl(MSR_IA32_PERF_CAPABILITIES, pmu->intel_cap.capabilities); + } + + if (pmu->intel_cap.perf_metrics) + pmu->intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS; + else + pmu->intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS); + + if (pmu->intel_cap.pebs_output_pt_available) + pmu->pmu.capabilities |= PERF_PMU_CAP_AUX_OUTPUT; + else + pmu->pmu.capabilities |= ~PERF_PMU_CAP_AUX_OUTPUT; } static struct x86_hybrid_pmu *find_hybrid_pmu_for_cpu(void) @@ -6069,10 +6099,52 @@ static void intel_pmu_check_hybrid_pmus(u64 fixed_mask) } } -static __always_inline bool is_mtl(u8 x86_model) +static const struct { enum hybrid_pmu_type id; char *name; } intel_hybrid_pmu_type_map[] __initconst = { + { hybrid_small, "cpu_atom" }, + { hybrid_big, "cpu_core" }, +}; + +static __always_inline int intel_pmu_init_hybrid(enum hybrid_pmu_type pmus) { - return (x86_model == INTEL_FAM6_METEORLAKE) || - (x86_model == INTEL_FAM6_METEORLAKE_L); + unsigned long pmus_mask = pmus; + struct x86_hybrid_pmu *pmu; + int idx = 0, bit; + + x86_pmu.num_hybrid_pmus = hweight_long(pmus_mask); + x86_pmu.hybrid_pmu = kcalloc(x86_pmu.num_hybrid_pmus, + sizeof(struct x86_hybrid_pmu), + GFP_KERNEL); + if (!x86_pmu.hybrid_pmu) + return -ENOMEM; + + static_branch_enable(&perf_is_hybrid); + x86_pmu.filter = intel_pmu_filter; + + for_each_set_bit(bit, &pmus_mask, ARRAY_SIZE(intel_hybrid_pmu_type_map)) { + pmu = &x86_pmu.hybrid_pmu[idx++]; + pmu->pmu_type = intel_hybrid_pmu_type_map[bit].id; + pmu->name = intel_hybrid_pmu_type_map[bit].name; + + pmu->num_counters = x86_pmu.num_counters; + pmu->num_counters_fixed = x86_pmu.num_counters_fixed; + pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters); + pmu->unconstrained = (struct event_constraint) + __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1, + 0, pmu->num_counters, 0, 0); + + pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities; + if (pmu->pmu_type & hybrid_small) { + pmu->intel_cap.perf_metrics = 0; + pmu->intel_cap.pebs_output_pt_available = 1; + pmu->mid_ack = true; + } else if (pmu->pmu_type & hybrid_big) { + pmu->intel_cap.perf_metrics = 1; + pmu->intel_cap.pebs_output_pt_available = 0; + pmu->late_ack = true; + } + } + + return 0; } static __always_inline void intel_pmu_init_glc(struct pmu *pmu) @@ -6757,23 +6829,14 @@ __init int intel_pmu_init(void) case INTEL_FAM6_RAPTORLAKE: case INTEL_FAM6_RAPTORLAKE_P: case INTEL_FAM6_RAPTORLAKE_S: - case INTEL_FAM6_METEORLAKE: - case INTEL_FAM6_METEORLAKE_L: /* * Alder Lake has 2 types of CPU, core and atom. * * Initialize the common PerfMon capabilities here. */ - x86_pmu.hybrid_pmu = kcalloc(X86_HYBRID_NUM_PMUS, - sizeof(struct x86_hybrid_pmu), - GFP_KERNEL); - if (!x86_pmu.hybrid_pmu) - return -ENOMEM; - static_branch_enable(&perf_is_hybrid); - x86_pmu.num_hybrid_pmus = X86_HYBRID_NUM_PMUS; + intel_pmu_init_hybrid(hybrid_big_small); x86_pmu.pebs_latency_data = adl_latency_data_small; - x86_pmu.filter = intel_pmu_filter; x86_pmu.get_event_constraints = adl_get_event_constraints; x86_pmu.hw_config = adl_hw_config; x86_pmu.get_hybrid_cpu_type = adl_get_hybrid_cpu_type; @@ -6786,10 +6849,7 @@ __init int intel_pmu_init(void) /* Initialize big core specific PerfMon capabilities.*/ pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX]; - pmu->name = "cpu_core"; - pmu->pmu_type = hybrid_big; intel_pmu_init_glc(&pmu->pmu); - pmu->late_ack = true; if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) { pmu->num_counters = x86_pmu.num_counters + 2; pmu->num_counters_fixed = x86_pmu.num_counters_fixed + 1; @@ -6814,45 +6874,45 @@ __init int intel_pmu_init(void) pmu->unconstrained = (struct event_constraint) __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1, 0, pmu->num_counters, 0, 0); - pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities; - pmu->intel_cap.perf_metrics = 1; - pmu->intel_cap.pebs_output_pt_available = 0; - pmu->extra_regs = intel_glc_extra_regs; /* Initialize Atom core specific PerfMon capabilities.*/ pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX]; - pmu->name = "cpu_atom"; - pmu->pmu_type = hybrid_small; intel_pmu_init_grt(&pmu->pmu); - pmu->mid_ack = true; - pmu->num_counters = x86_pmu.num_counters; - pmu->num_counters_fixed = x86_pmu.num_counters_fixed; - pmu->max_pebs_events = x86_pmu.max_pebs_events; - pmu->unconstrained = (struct event_constraint) - __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1, - 0, pmu->num_counters, 0, 0); - pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities; - pmu->intel_cap.perf_metrics = 0; - pmu->intel_cap.pebs_output_pt_available = 1; - - if (is_mtl(boot_cpu_data.x86_model)) { - x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX].extra_regs = intel_rwc_extra_regs; - x86_pmu.pebs_latency_data = mtl_latency_data_small; - extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? - mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr; - mem_attr = mtl_hybrid_mem_attrs; - intel_pmu_pebs_data_source_mtl(); - x86_pmu.get_event_constraints = mtl_get_event_constraints; - pmu->extra_regs = intel_cmt_extra_regs; - pr_cont("Meteorlake Hybrid events, "); - name = "meteorlake_hybrid"; - } else { - x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX; - intel_pmu_pebs_data_source_adl(); - pr_cont("Alderlake Hybrid events, "); - name = "alderlake_hybrid"; - } + + x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX; + intel_pmu_pebs_data_source_adl(); + pr_cont("Alderlake Hybrid events, "); + name = "alderlake_hybrid"; + break; + + case INTEL_FAM6_METEORLAKE: + case INTEL_FAM6_METEORLAKE_L: + intel_pmu_init_hybrid(hybrid_big_small); + + x86_pmu.pebs_latency_data = mtl_latency_data_small; + x86_pmu.get_event_constraints = mtl_get_event_constraints; + x86_pmu.hw_config = adl_hw_config; + + td_attr = adl_hybrid_events_attrs; + mem_attr = mtl_hybrid_mem_attrs; + tsx_attr = adl_hybrid_tsx_attrs; + extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? + mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr; + + /* Initialize big core specific PerfMon capabilities.*/ + pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX]; + intel_pmu_init_glc(&pmu->pmu); + pmu->extra_regs = intel_rwc_extra_regs; + + /* Initialize Atom core specific PerfMon capabilities.*/ + pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX]; + intel_pmu_init_grt(&pmu->pmu); + pmu->extra_regs = intel_cmt_extra_regs; + + intel_pmu_pebs_data_source_mtl(); + pr_cont("Meteorlake Hybrid events, "); + name = "meteorlake_hybrid"; break; default: @@ -6964,7 +7024,7 @@ __init int intel_pmu_init(void) if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) x86_pmu.intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS; - if (is_hybrid()) + if (is_hybrid() && !boot_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT)) intel_pmu_check_hybrid_pmus((u64)fixed_mask); if (x86_pmu.intel_cap.pebs_timing_info) -- Gitee From 249a503cd1e9763028ad18dfa085ff8cac1ce9ba Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Mon, 8 Jul 2024 12:33:35 -0700 Subject: [PATCH 1081/2138] perf/x86/intel: Add a distinct name for Granite Rapids MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #9622 commit fa0c1c9d283b37fdb7fc1dcccbb88fc8f48a4aa4 upstream. Currently, the Sapphire Rapids and Granite Rapids share the same PMU name, sapphire_rapids. Because from the kernel’s perspective, GNR is similar to SPR. The only key difference is that they support different extra MSRs. The code path and the PMU name are shared. However, from end users' perspective, they are quite different. Besides the extra MSRs, GNR has a newer PEBS format, supports Retire Latency, supports new CPUID enumeration architecture, doesn't required the load-latency AUX event, has additional TMA Level 1 Architectural Events, etc. The differences can be enumerated by CPUID or the PERF_CAPABILITIES MSR. They weren't reflected in the model-specific kernel setup. But it is worth to have a distinct PMU name for GNR. Intel-SIG: commit fa0c1c9d283b perf/x86/intel: Add a distinct name for Granite Rapids Backport 3 core pmu bugfixes to kernel v6.6 Fixes: a6742cb90b56 ("perf/x86/intel: Fix the FRONTEND encoding on GNR and MTL") Suggested-by: Ahmad Yasin Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20240708193336.1192217-3-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3587 --- arch/x86/events/intel/core.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 5680d7e00864..6cd5f33e8ad4 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -6804,12 +6804,18 @@ __init int intel_pmu_init(void) case INTEL_FAM6_EMERALDRAPIDS_X: x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX; x86_pmu.extra_regs = intel_glc_extra_regs; - fallthrough; + pr_cont("Sapphire Rapids events, "); + name = "sapphire_rapids"; + goto glc_common; + case INTEL_FAM6_GRANITERAPIDS_X: case INTEL_FAM6_GRANITERAPIDS_D: + x86_pmu.extra_regs = intel_rwc_extra_regs; + pr_cont("Granite Rapids events, "); + name = "granite_rapids"; + + glc_common: intel_pmu_init_glc(NULL); - if (!x86_pmu.extra_regs) - x86_pmu.extra_regs = intel_rwc_extra_regs; x86_pmu.pebs_ept = 1; x86_pmu.hw_config = hsw_hw_config; x86_pmu.get_event_constraints = glc_get_event_constraints; @@ -6820,8 +6826,6 @@ __init int intel_pmu_init(void) td_attr = glc_td_events_attrs; tsx_attr = glc_tsx_events_attrs; intel_pmu_pebs_data_source_skl(true); - pr_cont("Sapphire Rapids events, "); - name = "sapphire_rapids"; break; case INTEL_FAM6_ALDERLAKE: -- Gitee From 8406b5273b909996fd6b2a4b7b0b7067c6c2be4b Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Mon, 11 Sep 2023 06:51:28 -0700 Subject: [PATCH 1082/2138] perf/x86/intel: Fix broken fixed event constraints extension ANBZ: #9622 commit 950ecdc672aec9cd29036b2e2535b07c103af494 upstream. Unnecessary multiplexing is triggered when running an "instructions" event on an MTL. perf stat -e cpu_core/instructions/,cpu_core/instructions/ -a sleep 1 Performance counter stats for 'system wide': 115,489,000 cpu_core/instructions/ (50.02%) 127,433,777 cpu_core/instructions/ (49.98%) 1.002294504 seconds time elapsed Linux architectural perf events, e.g., cycles and instructions, usually have dedicated fixed counters. These events also have equivalent events which can be used in the general-purpose counters. The counters are precious. In the intel_pmu_check_event_constraints(), perf check/extend the event constraints of these events. So these events can utilize both fixed counters and general-purpose counters. The following cleanup commit: 97588df87b56 ("perf/x86/intel: Add common intel_pmu_init_hybrid()") forgot adding the intel_pmu_check_event_constraints() into update_pmu_cap(). The architectural perf events cannot utilize the general-purpose counters. The code to check and update the counters, event constraints and extra_regs is the same among hybrid systems. Move intel_pmu_check_hybrid_pmus() to init_hybrid_pmu(), and emove the duplicate check in update_pmu_cap(). Intel-SIG: commit 950ecdc672ae perf/x86/intel: Fix broken fixed event constraints extension Backport following hybrid pmu bugfixes for commit 97588df87b56 Fixes: 97588df87b56 ("perf/x86/intel: Add common intel_pmu_init_hybrid()") Signed-off-by: Kan Liang Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20230911135128.2322833-1-kan.liang@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3587 --- arch/x86/events/intel/core.c | 65 +++++++++++++++--------------------- 1 file changed, 26 insertions(+), 39 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 6cd5f33e8ad4..0d42d1363db5 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4696,6 +4696,13 @@ static void intel_pmu_check_num_counters(int *num_counters, int *num_counters_fixed, u64 *intel_ctrl, u64 fixed_mask); +static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints, + int num_counters, + int num_counters_fixed, + u64 intel_ctrl); + +static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs); + static inline bool intel_pmu_broken_perf_cap(void) { /* The Perf Metric (Bit 15) is always cleared */ @@ -4716,12 +4723,6 @@ static void update_pmu_cap(struct x86_hybrid_pmu *pmu) &eax, &ebx, &ecx, &edx); pmu->num_counters = fls(eax); pmu->num_counters_fixed = fls(ebx); - intel_pmu_check_num_counters(&pmu->num_counters, &pmu->num_counters_fixed, - &pmu->intel_ctrl, ebx); - pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters); - pmu->unconstrained = (struct event_constraint) - __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1, - 0, pmu->num_counters, 0, 0); } @@ -4729,6 +4730,16 @@ static void update_pmu_cap(struct x86_hybrid_pmu *pmu) /* Perf Metric (Bit 15) and PEBS via PT (Bit 16) are hybrid enumeration */ rdmsrl(MSR_IA32_PERF_CAPABILITIES, pmu->intel_cap.capabilities); } +} + +static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu) +{ + intel_pmu_check_num_counters(&pmu->num_counters, &pmu->num_counters_fixed, + &pmu->intel_ctrl, (1ULL << pmu->num_counters_fixed) - 1); + pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters); + pmu->unconstrained = (struct event_constraint) + __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1, + 0, pmu->num_counters, 0, 0); if (pmu->intel_cap.perf_metrics) pmu->intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS; @@ -4739,6 +4750,13 @@ static void update_pmu_cap(struct x86_hybrid_pmu *pmu) pmu->pmu.capabilities |= PERF_PMU_CAP_AUX_OUTPUT; else pmu->pmu.capabilities |= ~PERF_PMU_CAP_AUX_OUTPUT; + + intel_pmu_check_event_constraints(pmu->event_constraints, + pmu->num_counters, + pmu->num_counters_fixed, + pmu->intel_ctrl); + + intel_pmu_check_extra_regs(pmu->extra_regs); } static struct x86_hybrid_pmu *find_hybrid_pmu_for_cpu(void) @@ -4794,6 +4812,8 @@ static bool init_hybrid_pmu(int cpu) if (this_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT)) update_pmu_cap(pmu); + intel_pmu_check_hybrid_pmus(pmu); + if (!check_hw_exists(&pmu->pmu, pmu->num_counters, pmu->num_counters_fixed)) return false; @@ -6069,36 +6089,6 @@ static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs) } } -static void intel_pmu_check_hybrid_pmus(u64 fixed_mask) -{ - struct x86_hybrid_pmu *pmu; - int i; - - for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) { - pmu = &x86_pmu.hybrid_pmu[i]; - - intel_pmu_check_num_counters(&pmu->num_counters, - &pmu->num_counters_fixed, - &pmu->intel_ctrl, - fixed_mask); - - if (pmu->intel_cap.perf_metrics) { - pmu->intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS; - pmu->intel_ctrl |= INTEL_PMC_MSK_FIXED_SLOTS; - } - - if (pmu->intel_cap.pebs_output_pt_available) - pmu->pmu.capabilities |= PERF_PMU_CAP_AUX_OUTPUT; - - intel_pmu_check_event_constraints(pmu->event_constraints, - pmu->num_counters, - pmu->num_counters_fixed, - pmu->intel_ctrl); - - intel_pmu_check_extra_regs(pmu->extra_regs); - } -} - static const struct { enum hybrid_pmu_type id; char *name; } intel_hybrid_pmu_type_map[] __initconst = { { hybrid_small, "cpu_atom" }, { hybrid_big, "cpu_core" }, @@ -7028,9 +7018,6 @@ __init int intel_pmu_init(void) if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) x86_pmu.intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS; - if (is_hybrid() && !boot_cpu_has(X86_FEATURE_ARCH_PERFMON_EXT)) - intel_pmu_check_hybrid_pmus((u64)fixed_mask); - if (x86_pmu.intel_cap.pebs_timing_info) x86_pmu.flags |= PMU_FL_RETIRE_LATENCY; -- Gitee From 51a940b9431239289df7987024209c0349dd492e Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Tue, 21 Nov 2023 09:46:28 +0800 Subject: [PATCH 1083/2138] perf/x86/intel: Correct incorrect 'or' operation for PMU capabilities ANBZ: #9622 commit e8df9d9f4209c04161321d8c12640ae560f65939 upstream. When running perf-stat command on Intel hybrid platform, perf-stat reports the following errors: sudo taskset -c 7 ./perf stat -vvvv -e cpu_atom/instructions/ sleep 1 Opening: cpu/cycles/:HG ------------------------------------------------------------ perf_event_attr: type 0 (PERF_TYPE_HARDWARE) config 0xa00000000 disabled 1 ------------------------------------------------------------ sys_perf_event_open: pid 0 cpu -1 group_fd -1 flags 0x8 sys_perf_event_open failed, error -16 Performance counter stats for 'sleep 1': cpu_atom/instructions/ It looks the cpu_atom/instructions/ event can't be enabled on atom PMU even when the process is pinned on atom core. Investigation shows that exclusive_event_init() helper always returns -EBUSY error in the perf event creation. That's strange since the atom PMU should not be an exclusive PMU. Further investigation shows the issue was introduced by commit: 97588df87b56 ("perf/x86/intel: Add common intel_pmu_init_hybrid()") The commit originally intents to clear the bit PERF_PMU_CAP_AUX_OUTPUT from PMU capabilities if intel_cap.pebs_output_pt_available is not set, but it incorrectly uses 'or' operation and leads to all PMU capabilities bits are set to 1 except bit PERF_PMU_CAP_AUX_OUTPUT. Testing this fix on Intel hybrid platforms, the observed issues disappear. Intel-SIG: commit e8df9d9f4209 perf/x86/intel: Correct incorrect 'or' operation for PMU capabilities Backport following hybrid pmu bugfixes for commit 97588df87b56 Fixes: 97588df87b56 ("perf/x86/intel: Add common intel_pmu_init_hybrid()") Signed-off-by: Dapeng Mi Signed-off-by: Ingo Molnar Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20231121014628.729989-1-dapeng1.mi@linux.intel.com [ Yunying Sun: amend commit log ] Signed-off-by: Yunying Sun Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3587 --- arch/x86/events/intel/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 0d42d1363db5..e2c1c51d8a01 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4749,7 +4749,7 @@ static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu) if (pmu->intel_cap.pebs_output_pt_available) pmu->pmu.capabilities |= PERF_PMU_CAP_AUX_OUTPUT; else - pmu->pmu.capabilities |= ~PERF_PMU_CAP_AUX_OUTPUT; + pmu->pmu.capabilities &= ~PERF_PMU_CAP_AUX_OUTPUT; intel_pmu_check_event_constraints(pmu->event_constraints, pmu->num_counters, -- Gitee From 80693ec0ce816d0e2d93539c8d66d30320958cfa Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Fri, 28 Jun 2024 16:09:36 +0800 Subject: [PATCH 1084/2138] anolis: Update Zhaoxin GMI SM3 Secure Hash algorithm driver ANBZ: #9447 Update Zhaoxin GMI SM3 Secure Hash algorithm driver and Improve functionality Signed-off-by: leoliu-oc Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3446 --- arch/x86/crypto/sm3-zhaoxin-gmi.c | 77 +++++++++---------------------- 1 file changed, 21 insertions(+), 56 deletions(-) diff --git a/arch/x86/crypto/sm3-zhaoxin-gmi.c b/arch/x86/crypto/sm3-zhaoxin-gmi.c index e393133d572d..c39b7de97ce1 100644 --- a/arch/x86/crypto/sm3-zhaoxin-gmi.c +++ b/arch/x86/crypto/sm3-zhaoxin-gmi.c @@ -22,14 +22,6 @@ #include #include -const u8 zx_sm3_zero_message_hash[SM3_DIGEST_SIZE] = { - 0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F, - 0x8e, 0x61, 0x19, 0x48, 0x31, 0xE8, 0x1A, 0x8F, - 0x22, 0xBE, 0xC8, 0xC7, 0x28, 0xFE, 0xFB, 0x74, - 0x7E, 0xD0, 0x35, 0xEB, 0x50, 0x82, 0xAA, 0x2B -}; -EXPORT_SYMBOL_GPL(zx_sm3_zero_message_hash); - /* * Load supported features of the CPU to see if the SM3/SM4 is available. */ @@ -39,10 +31,9 @@ static int gmi_available(void) u32 eax, edx; if (((c->x86 == 6) && (c->x86_model >= 0x0f)) || - ((c->x86 == 6) && (c->x86_model == 0x09)) || - (c->x86 > 6)) { + ((c->x86 == 6) && (c->x86_model == 0x09)) || + (c->x86 > 6)) { if (!boot_cpu_has(X86_FEATURE_CCS) || !boot_cpu_has(X86_FEATURE_CCS_EN)) { - eax = 0xC0000001; __asm__ __volatile__ ("cpuid":"=d"(edx):"a"(eax) : ); @@ -58,9 +49,9 @@ static int gmi_available(void) return -ENODEV; } -void sm3_generic_block_fn(struct sm3_state *sst, const u8 *inp, int blockcnt) +static void sm3_generic_block_fn(struct sm3_state *sst, const u8 *inp, int blockcnt) { - unsigned long in, out, cnt; + unsigned long in, out, cnt, blksz, ctrl; if (!blockcnt) return; @@ -68,38 +59,14 @@ void sm3_generic_block_fn(struct sm3_state *sst, const u8 *inp, int blockcnt) in = (unsigned long)inp; out = (unsigned long)(sst->state); cnt = (unsigned long)blockcnt; + blksz = 0x20; + ctrl = -1; __asm__ __volatile__( - #ifdef __x86_64__ - "pushq %%rbp\n" - "pushq %%rbx\n" - "pushq %%rsi\n" - "pushq %%rdi\n" - "movq $-1, %%rax\n" - "movq $0x20, %%rbx\n" - #else - "pushl %%ebp\n" - "pushl %%ebx\n" - "pushl %%esi\n" - "pushl %%edi\n" - "movl $-1, %%eax\n" - "movl $0x20, %%ebx\n" - #endif ".byte 0xf3,0x0f,0xa6,0xe8\n" - #ifdef __x86_64__ - "popq %%rdi\n" - "popq %%rsi\n" - "popq %%rbx\n" - "popq %%rbp\n" - #else - "popl %%edi\n" - "popl %%esi\n" - "popl %%ebx\n" - "popl %%ebp\n" - #endif - : - : "S"(in), "D"(out), "c"(cnt) - : + : "+S"(in) + : "S"(in), "D"(out), "c"(cnt), "b"(blksz), "a"(ctrl) + : "memory" ); } @@ -137,11 +104,10 @@ static inline int zx_sm3_base_finish(struct shash_desc *desc, u8 *out) return 0; } -int zx_sm3_update(struct shash_desc *desc, const u8 *data, unsigned int len) +static int zx_sm3_update(struct shash_desc *desc, const u8 *data, unsigned int len) { return sm3_base_do_update(desc, data, len, sm3_generic_block_fn); } -EXPORT_SYMBOL(zx_sm3_update); static int zx_sm3_final(struct shash_desc *desc, u8 *out) { @@ -150,27 +116,26 @@ static int zx_sm3_final(struct shash_desc *desc, u8 *out) return zx_sm3_base_finish(desc, out); } -int zx_sm3_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *hash) +static int zx_sm3_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *hash) { sm3_base_do_update(desc, data, len, sm3_generic_block_fn); return zx_sm3_final(desc, hash); } -EXPORT_SYMBOL(zx_sm3_finup); static struct shash_alg zx_sm3_alg = { .digestsize = SM3_DIGEST_SIZE, - .init = zx_sm3_init, - .update = zx_sm3_update, - .final = zx_sm3_final, - .finup = zx_sm3_finup, + .init = zx_sm3_init, + .update = zx_sm3_update, + .final = zx_sm3_final, + .finup = zx_sm3_finup, .descsize = sizeof(struct sm3_state), - .base = { - .cra_name = "sm3", - .cra_driver_name = "sm3-zhaoxin-gmi", - .cra_priority = 300, - .cra_blocksize = SM3_BLOCK_SIZE, - .cra_module = THIS_MODULE, + .base = { + .cra_name = "sm3", + .cra_driver_name = "sm3-zhaoxin-gmi", + .cra_priority = 300, + .cra_blocksize = SM3_BLOCK_SIZE, + .cra_module = THIS_MODULE, } }; -- Gitee From 831bf04a3809b0005c9717fe548d3e8c5e14955d Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Fri, 28 Jun 2024 16:25:05 +0800 Subject: [PATCH 1085/2138] anolis: Update Zhaoxin GMI SM4 Block Cipher algorithm driver ANBZ: #9447 Update Zhaoxin GMI SM4 Block Cipher algorithm driver and Improve functionality Signed-off-by: leoliu-oc Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3446 --- arch/x86/crypto/sm4-zhaoxin-gmi.c | 347 +++++++++++++----------------- 1 file changed, 148 insertions(+), 199 deletions(-) diff --git a/arch/x86/crypto/sm4-zhaoxin-gmi.c b/arch/x86/crypto/sm4-zhaoxin-gmi.c index ec57b4ca4644..5a3695fcf647 100644 --- a/arch/x86/crypto/sm4-zhaoxin-gmi.c +++ b/arch/x86/crypto/sm4-zhaoxin-gmi.c @@ -23,7 +23,6 @@ #include #include - #define SM4_ECB (1<<6) #define SM4_CBC (1<<7) #define SM4_CFB (1<<8) @@ -36,7 +35,7 @@ /* Control word. */ struct sm4_cipher_data { - u8 iv[SM4_BLOCK_SIZE]; /* Initialization vector */ + u8 iv[SM4_BLOCK_SIZE]; /* Initialization vector */ union { u32 pad; struct { @@ -45,80 +44,54 @@ struct sm4_cipher_data { u32 mode:5; u32 digest:1; } b; - } cword; /* Control word */ - struct sm4_ctx keys; /* Encryption key */ + } cword; /* Control word */ + struct sm4_ctx keys; /* Encryption key */ }; static u8 *rep_xcrypt(const u8 *input, u8 *output, void *key, u8 *iv, - struct sm4_cipher_data *sm4_data, u64 count) + struct sm4_cipher_data *sm4_data, u64 count) { unsigned long rax = sm4_data->cword.pad; - // Set the flag for encryption or decryption + /* Set the flag for encryption or decryption */ if (sm4_data->cword.b.encdec == 1) rax &= ~0x01; else rax |= 0x01; __asm__ __volatile__( - #ifdef __x86_64__ - "pushq %%rbp\n\n" - "pushq %%rbx\n\n" - "pushq %%rcx\n\n" - "pushq %%rsi\n\n" - "pushq %%rdi\n\n" - #else - "pushl %%ebp\n\n" - "pushl %%ebx\n\n" - "pushl %%ecx\n\n" - "pushl %%esi\n\n" - "pushl %%edi\n\n" - #endif - ".byte 0xf3,0x0f,0xa7,0xf0\n" - #ifdef __x86_64__ - "popq %%rdi\n\n" - "popq %%rsi\n\n" - "popq %%rcx\n\n" - "popq %%rbx\n\n" - "popq %%rbp\n\n" - #else - "popl %%edi\n\n" - "popl %%esi\n\n" - "popl %%ecx\n\n" - "popl %%ebx\n\n" - "popl %%ebp\n\n" - #endif + ".byte 0xf3, 0x0f, 0xa7, 0xf0\n" : : "S"(input), "D"(output), "a"(rax), "b"(key), "c"((unsigned long)count), "d"(iv)); + return iv; } static u8 *rep_xcrypt_ctr(const u8 *input, u8 *output, void *key, u8 *iv, - struct sm4_cipher_data *sm4_data, u64 count) + struct sm4_cipher_data *sm4_data, u64 count) { u8 oiv[SM4_BLOCK_SIZE] = {0}; u16 cnt_tmp; u32 i; u8 *in_tmp = (u8 *)input, *out_tmp = output; - //Backup the original IV if it is not NULL. + /* Backup the original IV if it is not NULL. */ if (iv) memcpy(oiv, iv, SM4_BLOCK_SIZE); - // Get the current counter. + /* Get the current counter. */ cnt_tmp = GETU16(&iv[14]); - // Get the available counter space before overflow. + /* Get the available counter space before overflow. */ cnt_tmp = 0x10000 - cnt_tmp; - // - // Check there is enough counter space for the required blocks. - // + /* + * Check there is enough counter space for the required blocks. + */ if (cnt_tmp < count) { - - // Process the first part of data blocks. + /* Process the first part of data blocks. */ rep_xcrypt(in_tmp, out_tmp, key, iv, sm4_data, cnt_tmp); - // Only increase the counter by SW when overflow occurs. + /* Only increase the counter by SW when overflow occurs. */ memcpy(iv, oiv, SM4_BLOCK_SIZE); for (i = 0; i < cnt_tmp; i++) @@ -127,30 +100,30 @@ static u8 *rep_xcrypt_ctr(const u8 *input, u8 *output, void *key, u8 *iv, out_tmp = output + cnt_tmp * SM4_BLOCK_SIZE; in_tmp = (u8 *)(input + cnt_tmp * SM4_BLOCK_SIZE); - // Get the number of data blocks that have not been encrypted. + /* Get the number of data blocks that have not been encrypted. */ cnt_tmp = count - cnt_tmp; - // Process the remaining part of data blocks. + /* Process the remaining part of data blocks. */ rep_xcrypt(in_tmp, out_tmp, key, iv, sm4_data, cnt_tmp); } else { - // Counter space is big enough, the counter will not overflow. + /* Counter space is big enough, the counter will not overflow. */ rep_xcrypt(in_tmp, out_tmp, key, iv, sm4_data, count); } - // Restore the iv if not null + /* Restore the iv if not null */ if (iv) memcpy(iv, oiv, SM4_BLOCK_SIZE); return iv; } -static u8 *rep_xcrypt_ecb_ONE(const u8 *input, u8 *output, void *key, - u8 *iv, struct sm4_cipher_data *sm4_data, u64 count) +static u8 *rep_xcrypt_ecb_ONE(const u8 *input, u8 *output, void *key, u8 *iv, + struct sm4_cipher_data *sm4_data, u64 count) { struct sm4_cipher_data cw; - cw.cword.pad = 0; + cw.cword.pad = 0; cw.cword.b.encdec = 1; - cw.cword.pad |= 0x20|SM4_ECB; + cw.cword.pad |= 0x20 | SM4_ECB; return rep_xcrypt(input, output, key, iv, &cw, 1); } @@ -161,8 +134,7 @@ static u8 *rep_xcrypt_ecb_ONE(const u8 *input, u8 *output, void *key, * @in_key: The input key. * @key_len:The size of the key. */ -int gmi_sm4_set_key(struct crypto_skcipher *tfm, const u8 *in_key, - unsigned int key_len) +static int gmi_sm4_set_key(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); @@ -176,8 +148,6 @@ int gmi_sm4_set_key(struct crypto_skcipher *tfm, const u8 *in_key, return 0; } -EXPORT_SYMBOL_GPL(gmi_sm4_set_key); - static int sm4_cipher_common(struct skcipher_request *req, struct sm4_cipher_data *cw) { @@ -186,13 +156,12 @@ static int sm4_cipher_common(struct skcipher_request *req, struct sm4_cipher_dat struct skcipher_walk walk; unsigned int blocks; int err; - u8 *iv; err = skcipher_walk_virt(&walk, req, true); while ((blocks = (walk.nbytes / SM4_BLOCK_SIZE))) { - iv = rep_xcrypt(walk.src.virt.addr, walk.dst.virt.addr, ctx->rkey_enc, - walk.iv, cw, blocks); + rep_xcrypt(walk.src.virt.addr, walk.dst.virt.addr, ctx->rkey_enc, walk.iv, cw, + blocks); err = skcipher_walk_done(&walk, walk.nbytes % SM4_BLOCK_SIZE); } @@ -200,15 +169,14 @@ static int sm4_cipher_common(struct skcipher_request *req, struct sm4_cipher_dat return err; } - static int ecb_encrypt(struct skcipher_request *req) { int err; struct sm4_cipher_data cw; - cw.cword.pad = 0; + cw.cword.pad = 0; cw.cword.b.encdec = 1; - cw.cword.pad |= 0x20|SM4_ECB; + cw.cword.pad |= 0x20 | SM4_ECB; err = sm4_cipher_common(req, &cw); @@ -220,8 +188,8 @@ static int ecb_decrypt(struct skcipher_request *req) int err; struct sm4_cipher_data cw; - cw.cword.pad = 0; - cw.cword.pad |= 0x20|SM4_ECB; + cw.cword.pad = 0; + cw.cword.pad |= 0x20 | SM4_ECB; err = sm4_cipher_common(req, &cw); @@ -233,9 +201,9 @@ static int cbc_encrypt(struct skcipher_request *req) int err; struct sm4_cipher_data cw; - cw.cword.pad = 0; + cw.cword.pad = 0; cw.cword.b.encdec = 1; - cw.cword.pad |= 0x20|SM4_CBC; + cw.cword.pad |= 0x20 | SM4_CBC; err = sm4_cipher_common(req, &cw); @@ -247,15 +215,14 @@ static int cbc_decrypt(struct skcipher_request *req) int err; struct sm4_cipher_data cw; - cw.cword.pad = 0; - cw.cword.pad |= 0x20|SM4_CBC; + cw.cword.pad = 0; + cw.cword.pad |= 0x20 | SM4_CBC; err = sm4_cipher_common(req, &cw); return err; } - /* * sm4_cipher_ctr is used for ZX-E and newer */ @@ -266,21 +233,20 @@ static int sm4_cipher_ctr(struct skcipher_request *req, struct sm4_cipher_data * struct skcipher_walk walk; unsigned int blocks, nbytes; int err; - u8 *iv, *dst, *src; + u8 *dst, *src; u8 keystream[SM4_BLOCK_SIZE]; u32 i; err = skcipher_walk_virt(&walk, req, true); while ((nbytes = walk.nbytes) > 0) { - src = walk.src.virt.addr; dst = walk.dst.virt.addr; while (nbytes >= SM4_BLOCK_SIZE) { blocks = nbytes/SM4_BLOCK_SIZE; - iv = rep_xcrypt_ctr(walk.src.virt.addr, walk.dst.virt.addr, ctx->rkey_enc, - walk.iv, cw, blocks); + rep_xcrypt_ctr(walk.src.virt.addr, walk.dst.virt.addr, ctx->rkey_enc, + walk.iv, cw, blocks); for (i = 0; i < blocks; i++) crypto_inc(walk.iv, SM4_BLOCK_SIZE); @@ -312,9 +278,9 @@ static int ctr_encrypt(struct skcipher_request *req) int err; struct sm4_cipher_data cw; - cw.cword.pad = 0; + cw.cword.pad = 0; cw.cword.b.encdec = 1; - cw.cword.pad |= 0x20|SM4_CTR; + cw.cword.pad |= 0x20 | SM4_CTR; err = sm4_cipher_ctr(req, &cw); @@ -329,8 +295,8 @@ static int ctr_decrypt(struct skcipher_request *req) int err; struct sm4_cipher_data cw; - cw.cword.pad = 0; - cw.cword.pad |= 0x20|SM4_CTR; + cw.cword.pad = 0; + cw.cword.pad |= 0x20 | SM4_CTR; err = sm4_cipher_ctr(req, &cw); @@ -347,19 +313,17 @@ static int sm4_ctr_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw) struct skcipher_walk walk; unsigned int nbytes; int err; - u8 *iv = NULL, *dst, *src; + u8 *dst, *src; u8 en_iv[SM4_BLOCK_SIZE] = {0}; err = skcipher_walk_virt(&walk, req, true); while ((nbytes = walk.nbytes) > 0) { - src = walk.src.virt.addr; dst = walk.dst.virt.addr; while (nbytes >= SM4_BLOCK_SIZE) { - - iv = rep_xcrypt_ecb_ONE(walk.iv, en_iv, ctx->rkey_enc, walk.iv, cw, 1); + rep_xcrypt_ecb_ONE(walk.iv, en_iv, ctx->rkey_enc, walk.iv, cw, 1); crypto_inc(walk.iv, SM4_BLOCK_SIZE); crypto_xor_cpy(dst, en_iv, src, SM4_BLOCK_SIZE); @@ -371,7 +335,6 @@ static int sm4_ctr_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw) // tail if (walk.nbytes == walk.total && nbytes > 0) { - rep_xcrypt_ecb_ONE(walk.iv, en_iv, ctx->rkey_enc, walk.iv, cw, 1); crypto_xor_cpy(dst, en_iv, src, nbytes); @@ -394,9 +357,9 @@ static int ctr_encrypt_zxc(struct skcipher_request *req) int err; struct sm4_cipher_data cw; - cw.cword.pad = 0; + cw.cword.pad = 0; cw.cword.b.encdec = 1; - cw.cword.pad |= 0x20|SM4_CTR; + cw.cword.pad |= 0x20 | SM4_CTR; err = sm4_ctr_zxc(req, &cw); @@ -411,9 +374,9 @@ static int ctr_decrypt_zxc(struct skcipher_request *req) int err; struct sm4_cipher_data cw; - cw.cword.pad = 0; + cw.cword.pad = 0; cw.cword.b.encdec = 0; - cw.cword.pad |= 0x20|SM4_CTR; + cw.cword.pad |= 0x20 | SM4_CTR; err = sm4_ctr_zxc(req, &cw); @@ -421,16 +384,16 @@ static int ctr_decrypt_zxc(struct skcipher_request *req) } /* - * ofb_encrypt is used for ZX-E and newer + * ofb_encrypt is used for ZX-E and newer */ static int ofb_encrypt(struct skcipher_request *req) { int err; struct sm4_cipher_data cw; - cw.cword.pad = 0; + cw.cword.pad = 0; cw.cword.b.encdec = 1; - cw.cword.pad |= 0x20|SM4_OFB; + cw.cword.pad |= 0x20 | SM4_OFB; err = sm4_cipher_common(req, &cw); @@ -438,15 +401,15 @@ static int ofb_encrypt(struct skcipher_request *req) } /* - * ofb_decrypt is used for ZX-E and newer + * ofb_decrypt is used for ZX-E and newer */ static int ofb_decrypt(struct skcipher_request *req) { int err; struct sm4_cipher_data cw; - cw.cword.pad = 0; - cw.cword.pad |= 0x20|SM4_OFB; + cw.cword.pad = 0; + cw.cword.pad |= 0x20 | SM4_OFB; err = sm4_cipher_common(req, &cw); @@ -470,7 +433,6 @@ static int sm4_ofb_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw) while ((blocks = (walk.nbytes / SM4_BLOCK_SIZE))) { while (blocks--) { - rep_xcrypt_ecb_ONE(walk.iv, walk.iv, ctx->rkey_enc, NULL, cw, 1); for (n = 0; n < SM4_BLOCK_SIZE; n += sizeof(size_t)) @@ -482,7 +444,6 @@ static int sm4_ofb_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw) walk.dst.virt.addr += SM4_BLOCK_SIZE; } - err = skcipher_walk_done(&walk, walk.nbytes % SM4_BLOCK_SIZE); } @@ -497,9 +458,9 @@ static int ofb_encrypt_zxc(struct skcipher_request *req) int err; struct sm4_cipher_data cw; - cw.cword.pad = 0; + cw.cword.pad = 0; cw.cword.b.encdec = 1; - cw.cword.pad |= 0x20|SM4_OFB; + cw.cword.pad |= 0x20 | SM4_OFB; err = sm4_ofb_zxc(req, &cw); @@ -514,9 +475,9 @@ static int ofb_decrypt_zxc(struct skcipher_request *req) int err; struct sm4_cipher_data cw; - cw.cword.pad = 0; + cw.cword.pad = 0; cw.cword.b.encdec = 0; - cw.cword.pad |= 0x20|SM4_OFB; + cw.cword.pad |= 0x20 | SM4_OFB; err = sm4_ofb_zxc(req, &cw); @@ -532,9 +493,9 @@ static int cfb_encrypt(struct skcipher_request *req) int err; struct sm4_cipher_data cw; - cw.cword.pad = 0; + cw.cword.pad = 0; cw.cword.b.encdec = 1; - cw.cword.pad |= 0x20|SM4_CFB; + cw.cword.pad |= 0x20 | SM4_CFB; err = sm4_cipher_common(req, &cw); @@ -550,13 +511,12 @@ static int cfb_decrypt(struct skcipher_request *req) int err; struct sm4_cipher_data cw; - cw.cword.pad = 0; - cw.cword.pad |= 0x20|SM4_CFB; + cw.cword.pad = 0; + cw.cword.pad |= 0x20 | SM4_CFB; err = sm4_cipher_common(req, &cw); return err; - } /* @@ -577,7 +537,6 @@ static int sm4_cfb_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw) while ((blocks = (walk.nbytes / SM4_BLOCK_SIZE))) { while (blocks--) { rep_xcrypt_ecb_ONE(walk.iv, walk.iv, ctx->rkey_enc, NULL, cw, 1); - if (cw->cword.b.encdec) for (n = 0; n < SM4_BLOCK_SIZE; n += sizeof(size_t)) *(size_t *)(walk.dst.virt.addr + n) = @@ -610,9 +569,9 @@ static int cfb_encrypt_zxc(struct skcipher_request *req) int err; struct sm4_cipher_data cw; - cw.cword.pad = 0; + cw.cword.pad = 0; cw.cword.b.encdec = 1; - cw.cword.pad |= 0x20|SM4_CFB; + cw.cword.pad |= 0x20 | SM4_CFB; err = sm4_cfb_zxc(req, &cw); @@ -627,113 +586,112 @@ static int cfb_decrypt_zxc(struct skcipher_request *req) int err; struct sm4_cipher_data cw; - cw.cword.pad = 0; + cw.cword.pad = 0; cw.cword.b.encdec = 0; - cw.cword.pad |= 0x20|SM4_CFB; + cw.cword.pad |= 0x20 | SM4_CFB; err = sm4_cfb_zxc(req, &cw); return err; } - static struct skcipher_alg sm4_algs[] = { { .base = { - .cra_name = "__ecb(sm4)", - .cra_driver_name = "__ecb-sm4-gmi", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_INTERNAL, - .cra_blocksize = SM4_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct sm4_ctx), - .cra_module = THIS_MODULE, + .cra_name = "__ecb(sm4)", + .cra_driver_name = "__ecb-sm4-gmi", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, }, - .min_keysize = SM4_KEY_SIZE, - .max_keysize = SM4_KEY_SIZE, - .ivsize = SM4_BLOCK_SIZE, - .walksize = 8 * SM4_BLOCK_SIZE, - .setkey = gmi_sm4_set_key, - .encrypt = ecb_encrypt, - .decrypt = ecb_decrypt, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .walksize = 8 * SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = ecb_encrypt, + .decrypt = ecb_decrypt, }, { .base = { - .cra_name = "__cbc(sm4)", - .cra_driver_name = "__cbc-sm4-gmi", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_INTERNAL, - .cra_blocksize = SM4_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct sm4_ctx), - .cra_module = THIS_MODULE, + .cra_name = "__cbc(sm4)", + .cra_driver_name = "__cbc-sm4-gmi", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, }, - .min_keysize = SM4_KEY_SIZE, - .max_keysize = SM4_KEY_SIZE, - .ivsize = SM4_BLOCK_SIZE, - .walksize = 8 * SM4_BLOCK_SIZE, - .setkey = gmi_sm4_set_key, - .encrypt = cbc_encrypt, - .decrypt = cbc_decrypt, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .walksize = 8 * SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = cbc_encrypt, + .decrypt = cbc_decrypt, }, { .base = { - .cra_name = "__ctr(sm4)", - .cra_driver_name = "__ctr-sm4-gmi", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_INTERNAL, - .cra_blocksize = 1, //SM4_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct sm4_ctx), - .cra_module = THIS_MODULE, + .cra_name = "__ctr(sm4)", + .cra_driver_name = "__ctr-sm4-gmi", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = 1, //SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, }, - .min_keysize = SM4_KEY_SIZE, - .max_keysize = SM4_KEY_SIZE, - .ivsize = SM4_BLOCK_SIZE, - .chunksize = SM4_BLOCK_SIZE, - .walksize = 8 * SM4_BLOCK_SIZE, - .setkey = gmi_sm4_set_key, - .encrypt = ctr_encrypt, - .decrypt = ctr_decrypt, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .chunksize = SM4_BLOCK_SIZE, + .walksize = 8 * SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = ctr_encrypt, + .decrypt = ctr_decrypt, }, { .base = { - .cra_name = "__ofb(sm4)", - .cra_driver_name = "__ofb-sm4-gmi", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_INTERNAL, - .cra_blocksize = SM4_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct sm4_ctx), - .cra_module = THIS_MODULE, + .cra_name = "__ofb(sm4)", + .cra_driver_name = "__ofb-sm4-gmi", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, }, - .min_keysize = SM4_KEY_SIZE, - .max_keysize = SM4_KEY_SIZE, - .ivsize = SM4_BLOCK_SIZE, - .chunksize = SM4_BLOCK_SIZE, - .walksize = 8 * SM4_BLOCK_SIZE, - .setkey = gmi_sm4_set_key, - .encrypt = ofb_encrypt, - .decrypt = ofb_decrypt, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .chunksize = SM4_BLOCK_SIZE, + .walksize = 8 * SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = ofb_encrypt, + .decrypt = ofb_decrypt, }, { .base = { - .cra_name = "__cfb(sm4)", - .cra_driver_name = "__cfb-sm4-gmi", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_INTERNAL, - .cra_blocksize = SM4_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct sm4_ctx), - .cra_module = THIS_MODULE, + .cra_name = "__cfb(sm4)", + .cra_driver_name = "__cfb-sm4-gmi", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, }, - .min_keysize = SM4_KEY_SIZE, - .max_keysize = SM4_KEY_SIZE, - .ivsize = SM4_BLOCK_SIZE, - .chunksize = SM4_BLOCK_SIZE, - .walksize = 8 * SM4_BLOCK_SIZE, - .setkey = gmi_sm4_set_key, - .encrypt = cfb_encrypt, - .decrypt = cfb_decrypt, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .chunksize = SM4_BLOCK_SIZE, + .walksize = 8 * SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = cfb_encrypt, + .decrypt = cfb_decrypt, } }; @@ -745,13 +703,11 @@ static int gmi_zxc_check(void) struct cpuinfo_x86 *c = &cpu_data(0); - if ((c->x86 > 6)) { + if ((c->x86 > 6)) f_zxc = 0; - } else if (((c->x86 == 6) && (c->x86_model >= 0x0f)) - || ((c->x86 == 6) && (c->x86_model == 0x09)) - ) { + else if (((c->x86 == 6) && (c->x86_model >= 0x0f)) || + ((c->x86 == 6) && (c->x86_model == 0x09))) f_zxc = 1; - } return f_zxc; } @@ -764,28 +720,26 @@ static int gmi_ccs_available(void) struct cpuinfo_x86 *c = &cpu_data(0); u32 eax, edx; - if (((c->x86 == 6) && (c->x86_model >= 0x0f)) - || ((c->x86 == 6) && (c->x86_model == 0x09)) - || (c->x86 > 6)) { + if (((c->x86 == 6) && (c->x86_model >= 0x0f)) || + ((c->x86 == 6) && (c->x86_model == 0x09)) || + (c->x86 > 6)) { if (!boot_cpu_has(X86_FEATURE_CCS) || !boot_cpu_has(X86_FEATURE_CCS_EN)) { - eax = 0xC0000001; - __asm__ __volatile__ ("cpuid":"=d"(edx):"a"(eax) : ); + __asm__ __volatile__ ("cpuid" : "=d"(edx) : "a"(eax) : ); if ((edx & 0x0030) != 0x0030) return -ENODEV; - pr_notice("GMI SM4 is detected by CPUID\n"); + pr_debug("GMI SM4 is detected by CPUID\n"); return 0; } - pr_notice("GMI SM4 is available\n"); + pr_debug("GMI SM4 is available\n"); return 0; } return -ENODEV; } - static void gmi_sm4_exit(void) { int i; @@ -808,19 +762,14 @@ static int __init gmi_sm4_init(void) return -ENODEV; if (gmi_zxc_check()) { - for (i = 0; i < ARRAY_SIZE(sm4_algs); i++) { if (!strcmp(sm4_algs[i].base.cra_name, "__ctr(sm4)")) { - sm4_algs[i].encrypt = ctr_encrypt_zxc; sm4_algs[i].decrypt = ctr_decrypt_zxc; } else if (!strcmp(sm4_algs[i].base.cra_name, "__cfb(sm4)")) { - sm4_algs[i].encrypt = cfb_encrypt_zxc; sm4_algs[i].decrypt = cfb_decrypt_zxc; - } else if (!strcmp(sm4_algs[i].base.cra_name, "__ofb(sm4)")) { - sm4_algs[i].encrypt = ofb_encrypt_zxc; sm4_algs[i].decrypt = ofb_decrypt_zxc; } -- Gitee From 268dbbac01f60f60dee0122382f2b099f8ff04b6 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Fri, 28 Jun 2024 11:42:54 +0800 Subject: [PATCH 1086/2138] x86/cpu: Remove pointless evaluation of x86_coreid_bits ANBZ: #9440 commit 594957d723a0674ca15bfefb755b3403624b8239 upstream. cpuinfo_x86::x86_coreid_bits is only used by the AMD numa topology code. No point in evaluating it on non AMD systems. No functional change. Signed-off-by: Thomas Gleixner Tested-by: Juergen Gross Tested-by: Sohil Mehta Tested-by: Michael Kelley Tested-by: Peter Zijlstra (Intel) Tested-by: Zhang Rui Reviewed-by: Arjan van de Ven Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20230814085112.687588373@linutronix.de Signed-off-by: leoliu-oc Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3441 --- arch/x86/kernel/cpu/intel.c | 13 ------------- arch/x86/kernel/cpu/zhaoxin.c | 13 ------------- 2 files changed, 26 deletions(-) diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 4752a9f17ef6..2d7637a4a157 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -391,19 +391,6 @@ static void early_init_intel(struct cpuinfo_x86 *c) setup_clear_cpu_cap(X86_FEATURE_PGE); } - if (c->cpuid_level >= 0x00000001) { - u32 eax, ebx, ecx, edx; - - cpuid(0x00000001, &eax, &ebx, &ecx, &edx); - /* - * If HTT (EDX[28]) is set EBX[16:23] contain the number of - * apicids which are reserved per package. Store the resulting - * shift value for the package management code. - */ - if (edx & (1U << 28)) - c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff); - } - check_memory_type_self_snoop_errata(c); /* diff --git a/arch/x86/kernel/cpu/zhaoxin.c b/arch/x86/kernel/cpu/zhaoxin.c index 8e4201ad1d23..80b3791240e4 100644 --- a/arch/x86/kernel/cpu/zhaoxin.c +++ b/arch/x86/kernel/cpu/zhaoxin.c @@ -66,19 +66,6 @@ static void early_init_zhaoxin(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); } - if (c->cpuid_level >= 0x00000001) { - u32 eax, ebx, ecx, edx; - - cpuid(0x00000001, &eax, &ebx, &ecx, &edx); - /* - * If HTT (EDX[28]) is set EBX[16:23] contain the number of - * apicids which are reserved per package. Store the resulting - * shift value for the package management code. - */ - if (edx & (1U << 28)) - c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff); - } - /* * These CPUs declare support SSE4.2 instruction sets but * having low performance CRC32C instruction implementation. -- Gitee From 0042c5c0faf7a5049089894a9bf6bfbc619e10d8 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Fri, 28 Jun 2024 15:56:57 +0800 Subject: [PATCH 1087/2138] anolis: x86/mce/zhaoxin: Enable mcelog to decode PCIE, ZDI/ZPI and DRAM errors ANBZ: #9446 The mcelog cannot decode PCIE, ZDI/ZPI, and DRAM errors in the FFM (Firmware First Mode). The purpose of this patch is to enable mcelog to decode PCIE, ZDI/ZPI, and DRAM errors that occur on Zhaoxin processors, so that the cause of these errors can be quickly located. Signed-off-by: leoliu-oc Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3445 --- arch/x86/include/asm/mce.h | 6 ++ arch/x86/kernel/acpi/apei.c | 24 ++++- arch/x86/kernel/cpu/mce/apei.c | 165 +++++++++++++++++++++++++++++++++ drivers/acpi/apei/apei-base.c | 10 ++ drivers/acpi/apei/ghes.c | 28 +++++- include/acpi/apei.h | 2 + 6 files changed, 233 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 180b1cbfcc4e..eff1cf90895c 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -289,6 +289,12 @@ struct cper_sec_mem_err; extern void apei_mce_report_mem_error(int corrected, struct cper_sec_mem_err *mem_err); +extern void zx_apei_mce_report_mem_error(int corrected, struct cper_sec_mem_err *mem_err); +struct cper_sec_pcie; +extern void zx_apei_mce_report_pcie_error(int corrected, struct cper_sec_pcie *pcie_err); +struct cper_sec_proc_generic; +extern void zx_apei_mce_report_zdi_error(int corrected, struct cper_sec_proc_generic *zdi_err); + /* * Enumerate new IP types and HWID values in AMD processors which support * Scalable MCA. diff --git a/arch/x86/kernel/acpi/apei.c b/arch/x86/kernel/acpi/apei.c index 0916f00a992e..26d9963b66bd 100644 --- a/arch/x86/kernel/acpi/apei.c +++ b/arch/x86/kernel/acpi/apei.c @@ -40,7 +40,29 @@ int arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr, void *data) void arch_apei_report_mem_error(int sev, struct cper_sec_mem_err *mem_err) { #ifdef CONFIG_X86_MCE - apei_mce_report_mem_error(sev, mem_err); + if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN || + boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) + zx_apei_mce_report_mem_error(sev, mem_err); + else + apei_mce_report_mem_error(sev, mem_err); +#endif +} + +void arch_apei_report_pcie_error(int sev, struct cper_sec_pcie *pcie_err) +{ +#ifdef CONFIG_X86_MCE + if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN || + boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) + zx_apei_mce_report_pcie_error(sev, pcie_err); +#endif +} + +void arch_apei_report_zdi_error(int sev, struct cper_sec_proc_generic *zdi_err) +{ +#ifdef CONFIG_X86_MCE + if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN || + boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) + zx_apei_mce_report_zdi_error(sev, zdi_err); #endif } diff --git a/arch/x86/kernel/cpu/mce/apei.c b/arch/x86/kernel/cpu/mce/apei.c index 8ed341714686..7c23ae2e3006 100644 --- a/arch/x86/kernel/cpu/mce/apei.c +++ b/arch/x86/kernel/cpu/mce/apei.c @@ -63,6 +63,171 @@ void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err) } EXPORT_SYMBOL_GPL(apei_mce_report_mem_error); +void zx_apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err) +{ + struct mce m; + int apei_error = 0; + + if (boot_cpu_data.x86 != 7 || boot_cpu_data.x86_model != 91) + return; + + if (!(mem_err->validation_bits & CPER_MEM_VALID_PA)) + return; + + mce_setup(&m); + m.misc = 0; + m.misc = mem_err->module; + m.addr = mem_err->physical_addr; + if (mem_err->card == 0) + m.bank = 9; + else + m.bank = 10; + + switch (mem_err->error_type) { + case 2: + m.status = 0x9c20004000010080; + break; + case 3: + m.status = 0xbe40000000020090; + apei_error = apei_write_mce(&m); + break; + case 8: + if (mem_err->requestor_id == 2) + m.status = 0x98200040000400b0; + else if (mem_err->requestor_id == 3) { + m.status = 0xba400000000600a0; + apei_error = apei_write_mce(&m); + } else if (mem_err->requestor_id == 4) + m.status = 0x98200100000300b0; + else if (mem_err->requestor_id == 5) { + m.status = 0xba000000000500b0; + apei_error = apei_write_mce(&m); + } else + pr_info("Undefined Parity error\n"); + break; + case 10: + if (mem_err->requestor_id == 6) { + m.status = 0xba400000000700a0; + apei_error = apei_write_mce(&m); + } else if (mem_err->requestor_id == 7) { + m.status = 0xba000000000800b0; + apei_error = apei_write_mce(&m); + } else + pr_info("Undefined dvad error\n"); + break; + case 13: + m.status = 0x9c200040000100c0; + break; + case 14: + m.status = 0xbd000000000200c0; + apei_error = apei_write_mce(&m); + break; + } + mce_log(&m); +} +EXPORT_SYMBOL_GPL(zx_apei_mce_report_mem_error); + +void zx_apei_mce_report_pcie_error(int severity, struct cper_sec_pcie *pcie_err) +{ + struct mce m; + int apei_error = 0; + + if (boot_cpu_data.x86 != 7 || boot_cpu_data.x86_model != 91) + return; + + mce_setup(&m); + m.addr = 0; + m.misc = 0; + m.misc |= (u64)pcie_err->device_id.segment << 32; + m.misc |= pcie_err->device_id.bus << 24; + m.misc |= pcie_err->device_id.device << 19; + m.misc |= pcie_err->device_id.function << 16; + m.bank = 6; + + switch (severity) { + case 1: + m.status = 0x9820004000020e0b; + break; + case 2: + m.status = 0xba20000000010e0b; + break; + case 3: + m.status = 0xbd20000000000e0b; + apei_error = apei_write_mce(&m); + break; + default: + pr_info("Undefine pcie error\n"); + break; + } + mce_log(&m); +} +EXPORT_SYMBOL_GPL(zx_apei_mce_report_pcie_error); + +void zx_apei_mce_report_zdi_error(int severity, struct cper_sec_proc_generic *zdi_err) +{ + struct mce m; + int apei_error = 0; + + if (boot_cpu_data.x86 != 7 || boot_cpu_data.x86_model != 91) + return; + + mce_setup(&m); + m.misc = 0; + m.misc |= (zdi_err->requestor_id & 0xff) << 19; + m.misc |= ((zdi_err->requestor_id & 0xff00) >> 8) >> 24; + m.bank = 5; + switch (zdi_err->responder_id) { + case 2: + m.status = 0xba00000000040e0f; + apei_error = apei_write_mce(&m); + break; + case 3: + m.status = 0xba00000000030e0f; + apei_error = apei_write_mce(&m); + break; + case 4: + m.status = 0xba00000000020e0f; + apei_error = apei_write_mce(&m); + break; + case 5: + m.status = 0xba00000000010e0f; + apei_error = apei_write_mce(&m); + break; + case 6: + m.status = 0x9820004000090e0f; + break; + case 7: + m.status = 0x9820004000080e0f; + break; + case 8: + m.status = 0x9820004000070e0f; + break; + case 9: + m.status = 0x9820004000060e0f; + break; + case 10: + m.status = 0x9820004000050e0f; + break; + case 11: + case 12: + case 13: + case 14: + case 15: + m.status = 0x98200040000b0e0f; + break; + case 16: + case 17: + case 18: + m.status = 0x98200040000c0e0f; + break; + default: + pr_info("Undefined ZDI Error\n"); + break; + } + mce_log(&m); +} +EXPORT_SYMBOL_GPL(zx_apei_mce_report_zdi_error); + int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info, u64 lapic_id) { const u64 *i_mce = ((const u64 *) (ctx_info + 1)); diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c index c7c26872f4ce..05ee09357bd7 100644 --- a/drivers/acpi/apei/apei-base.c +++ b/drivers/acpi/apei/apei-base.c @@ -773,6 +773,16 @@ void __weak arch_apei_report_mem_error(int sev, } EXPORT_SYMBOL_GPL(arch_apei_report_mem_error); +void __weak arch_apei_report_pcie_error(int sev, struct cper_sec_pcie *pcie_err) +{ +} +EXPORT_SYMBOL_GPL(arch_apei_report_pcie_error); + +void __weak arch_apei_report_zdi_error(int sev, struct cper_sec_proc_generic *zdi_err) +{ +} +EXPORT_SYMBOL_GPL(arch_apei_report_zdi_error); + int apei_osc_setup(void) { static u8 whea_uuid_str[] = "ed855e0c-6c90-47bf-a62a-26de0fc5ad5c"; diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 64b6193e2475..9ccf2a51c64a 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -796,14 +796,21 @@ static bool ghes_do_proc(struct ghes *ghes, atomic_notifier_call_chain(&ghes_report_chain, sev, mem_err); - arch_apei_report_mem_error(sev, mem_err); + arch_apei_report_mem_error(sec_sev, mem_err); queued = ghes_handle_memory_failure(gdata, sev, sync); } else if (guid_equal(sec_type, &CPER_SEC_PCIE)) { + struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata); + + arch_apei_report_pcie_error(sec_sev, pcie_err); ghes_handle_aer(gdata); } else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) { queued = ghes_handle_arm_hw_error(gdata, sev, sync); + } else if (guid_equal(sec_type, &CPER_SEC_PROC_GENERIC)) { + struct cper_sec_proc_generic *zdi_err = acpi_hest_get_payload(gdata); + + arch_apei_report_zdi_error(sec_sev, zdi_err); } else { void *err = acpi_hest_get_payload(gdata); @@ -1188,6 +1195,8 @@ static int ghes_in_nmi_queue_one_entry(struct ghes *ghes, u32 len, node_len; u64 buf_paddr; int sev, rc; + struct acpi_hest_generic_data *gdata; + guid_t *sec_type; if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG)) return -EOPNOTSUPP; @@ -1223,6 +1232,23 @@ static int ghes_in_nmi_queue_one_entry(struct ghes *ghes, sev = ghes_severity(estatus->error_severity); if (sev >= GHES_SEV_PANIC) { + apei_estatus_for_each_section(estatus, gdata) { + sec_type = (guid_t *)gdata->section_type; + if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) { + struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata); + + arch_apei_report_mem_error(sev, mem_err); + } else if (guid_equal(sec_type, &CPER_SEC_PCIE)) { + struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata); + + arch_apei_report_pcie_error(sev, pcie_err); + } else if (guid_equal(sec_type, &CPER_SEC_PROC_GENERIC)) { + struct cper_sec_proc_generic *zdi_err = + acpi_hest_get_payload(gdata); + + arch_apei_report_zdi_error(sev, zdi_err); + } + } ghes_print_queued_estatus(); __ghes_panic(ghes, estatus, buf_paddr, fixmap_idx); } diff --git a/include/acpi/apei.h b/include/acpi/apei.h index dc60f7db5524..fcb5814a3f43 100644 --- a/include/acpi/apei.h +++ b/include/acpi/apei.h @@ -52,6 +52,8 @@ int erst_clear(u64 record_id); int arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr, void *data); void arch_apei_report_mem_error(int sev, struct cper_sec_mem_err *mem_err); +void arch_apei_report_pcie_error(int sev, struct cper_sec_pcie *pcie_err); +void arch_apei_report_zdi_error(int sev, struct cper_sec_proc_generic *zdi_err); #endif #endif -- Gitee From fd1f85f5b441e8a12ee6cada3b153f7c5941242e Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Tue, 2 Jan 2024 19:24:03 +0800 Subject: [PATCH 1088/2138] anolis: cpufreq: ACPI: add ITMT support when CPPC enabled ANBZ: #7809 The _CPC method can get per-core highest frequency. The highest frequency may varies between cores which mean cores can running at different max frequency, so can use it as a core priority and give a hint to scheduler in order to put critical task to the higher priority core. Signed-off-by: leoliu-oc Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3438 --- arch/x86/kernel/itmt.c | 2 + drivers/cpufreq/acpi-cpufreq.c | 81 ++++++++++++++++++++++++++++------ 2 files changed, 70 insertions(+), 13 deletions(-) diff --git a/arch/x86/kernel/itmt.c b/arch/x86/kernel/itmt.c index ee4fe8cdb857..b49ac8ecbbd6 100644 --- a/arch/x86/kernel/itmt.c +++ b/arch/x86/kernel/itmt.c @@ -122,6 +122,7 @@ int sched_set_itmt_support(void) return 0; } +EXPORT_SYMBOL_GPL(sched_set_itmt_support); /** * sched_clear_itmt_support() - Revoke platform's support of ITMT @@ -181,3 +182,4 @@ void sched_set_itmt_core_prio(int prio, int cpu) { per_cpu(sched_core_priority, cpu) = prio; } +EXPORT_SYMBOL_GPL(sched_set_itmt_core_prio); diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 4ac3a35dcd98..d34a8ca6187d 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -628,28 +628,35 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) #endif #ifdef CONFIG_ACPI_CPPC_LIB -static u64 get_max_boost_ratio(unsigned int cpu) +static bool cppc_highest_perf_diff; +static struct cpumask core_prior_mask; + +static void cppc_get_highest_nominal_perf(int cpu, u64 *highest_perf, u64 *nominal_perf) { struct cppc_perf_caps perf_caps; - u64 highest_perf, nominal_perf; int ret; - if (acpi_pstate_strict) - return 0; - ret = cppc_get_perf_caps(cpu, &perf_caps); if (ret) { - pr_debug("CPU%d: Unable to get performance capabilities (%d)\n", - cpu, ret); - return 0; + pr_debug("CPU%d: Unable to get performance capabilities (%d)\n", cpu, ret); + return; } - if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) - highest_perf = amd_get_highest_perf(); + *highest_perf = amd_get_highest_perf(); else - highest_perf = perf_caps.highest_perf; + *highest_perf = perf_caps.highest_perf; - nominal_perf = perf_caps.nominal_perf; + *nominal_perf = perf_caps.nominal_perf; +} + +static u64 get_max_boost_ratio(unsigned int cpu) +{ + u64 highest_perf, nominal_perf; + + if (acpi_pstate_strict) + return 0; + + cppc_get_highest_nominal_perf(cpu, &highest_perf, &nominal_perf); if (!highest_perf || !nominal_perf) { pr_debug("CPU%d: highest or nominal performance missing\n", cpu); @@ -663,8 +670,51 @@ static u64 get_max_boost_ratio(unsigned int cpu) return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf); } + +/* The work item is needed to avoid CPU hotplug locking issues */ +static void cpufreq_sched_itmt_work_fn(struct work_struct *work) +{ + sched_set_itmt_support(); +} + +static DECLARE_WORK(sched_itmt_work, cpufreq_sched_itmt_work_fn); + +static void cpufreq_set_itmt_prio(int cpu) +{ + u64 highest_perf, nominal_perf; + static u64 max_highest_perf = 0, min_highest_perf = U64_MAX; + + cppc_get_highest_nominal_perf(cpu, &highest_perf, &nominal_perf); + + sched_set_itmt_core_prio(highest_perf, cpu); + cpumask_set_cpu(cpu, &core_prior_mask); + + if (max_highest_perf <= min_highest_perf) { + if (highest_perf > max_highest_perf) + max_highest_perf = highest_perf; + + if (highest_perf < min_highest_perf) + min_highest_perf = highest_perf; + + if (max_highest_perf > min_highest_perf) { + /* + * This code can be run during CPU online under the + * CPU hotplug locks, so sched_set_itmt_support() + * cannot be called from here. Queue up a work item + * to invoke it. + */ + cppc_highest_perf_diff = true; + } + } + + if (cppc_highest_perf_diff && cpumask_equal(&core_prior_mask, cpu_online_mask)) { + pr_debug("queue a work to set itmt enabled\n"); + schedule_work(&sched_itmt_work); + } +} #else static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; } +static inline void cpufreq_set_itmt_prio(int cpu) { } #endif static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) @@ -677,7 +727,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) unsigned int valid_states = 0; unsigned int result = 0; u64 max_boost_ratio; - unsigned int i; + unsigned int i, j; #ifdef CONFIG_SMP static int blacklisted; #endif @@ -741,6 +791,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) pr_info_once("overriding BIOS provided _PSD data\n"); } #endif + if (c->x86_vendor == X86_VENDOR_CENTAUR || c->x86_vendor == X86_VENDOR_ZHAOXIN) { + for_each_cpu(j, policy->cpus) { + cpufreq_set_itmt_prio(j); + } + } /* capability check */ if (perf->state_count <= 1) { -- Gitee From 253f2775714de9556c65997f0b0cb7329cd90b59 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Wed, 3 Jan 2024 10:38:35 +0800 Subject: [PATCH 1089/2138] anolis: Set ASYM_PACKING Flag on Zhaoxin KH-40000 platform ANBZ: #7809 Set ASYM_PACKING Flag on Zhaoxin KH-40000 platform Signed-off-by: leoliu-oc Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3438 --- kernel/sched/topology.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index bc4625de1136..cc53396fb0c7 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -2488,6 +2488,17 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att } } +#if IS_ENABLED(CONFIG_X86) + if ((boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) && + (boot_cpu_data.x86 == 7 && boot_cpu_data.x86_model == 0x5b)) { + for_each_cpu(i, cpu_map) { + for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) + sd->flags |= SD_ASYM_PACKING; + } + } +#endif + /* Calculate CPU capacity for physical packages and nodes */ for (i = nr_cpumask_bits-1; i >= 0; i--) { if (!cpumask_test_cpu(i, cpu_map)) -- Gitee From 4332160f5224259ba5011af3f15e3261e9e57ac3 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 10 Aug 2020 13:44:58 +0800 Subject: [PATCH 1090/2138] anolis: arm64: Expose address bits (physical/virtual) via cpuinfo ANBZ: #9696 Expose the physical address bits and virtual address bits supported by ARM CPU core, which can be used for Kangaroo to decide the address sizes. Signed-off-by: Baolin Wang Reviewed-by: zou cao Signed-off-by: Yinan Liu Reviewed-by: Baolin Wang [ shawn: refactor a new id_aa64mmfr2_varange_to_virt_shift helper ] Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3657 --- arch/arm64/include/asm/cpufeature.h | 30 +++++++++++++++++++++++++++++ arch/arm64/kernel/cpuinfo.c | 4 +++- 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 24c2564268e5..d0e5119a209d 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -862,6 +862,36 @@ static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange) } } +static inline u32 id_aa64mmfr0_pa_range_bits(void) +{ + u64 mmfr0; + u32 parange; + + mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); + parange = cpuid_feature_extract_unsigned_field(mmfr0, + ID_AA64MMFR0_EL1_PARANGE_SHIFT); + return id_aa64mmfr0_parange_to_phys_shift(parange); +} + +static inline u32 id_aa64mmfr2_varange_to_virt_shift(int varange) +{ + switch (varange) { + case ID_AA64MMFR2_EL1_VARange_48: return 48; + case ID_AA64MMFR2_EL1_VARange_52: return 52; + default: return CONFIG_ARM64_VA_BITS; + } +} + +static inline u32 id_aa64mmfr2_va_range_bits(void) +{ + u64 mmfr2; + u32 varange; + + mmfr2 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1); + varange = cpuid_feature_extract_unsigned_field(mmfr2, ID_AA64MMFR2_EL1_VARange_SHIFT); + return id_aa64mmfr2_varange_to_virt_shift(varange); +} + /* Check whether hardware update of the Access flag is supported */ static inline bool cpu_has_hw_af(void) { diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 07ee88abfa0d..e862a165d512 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -242,7 +242,9 @@ static int c_show(struct seq_file *m, void *v) seq_printf(m, "CPU architecture: 8\n"); seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr)); seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr)); - seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr)); + seq_printf(m, "CPU revision\t: %d\n", MIDR_REVISION(midr)); + seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n\n", + id_aa64mmfr0_pa_range_bits(), id_aa64mmfr2_va_range_bits()); } return 0; -- Gitee From 6cb7b89019a24125aa5ea05e0d30062fae9fb308 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Thu, 10 Sep 2020 10:51:48 +0800 Subject: [PATCH 1091/2138] anolis: arm64: Add CPU freqency information for /proc/cpuinfo ANBZ: #9696 Some userspace services want to get the CPU frequency to analyze the CPU performance, thus we can add the CPU freqency information by /proc/cpuinfo interface for ARM platform just like X86 architecture. With this patch, we can get below information on Kunpeng machine: $cat /proc/cpuinfo processor : 0 BogoMIPS : 200.00 Features : fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics fphp asimdhp cpuid asimdrdm jscvt fcma dcpop asimddp asimdfhm CPU implementer : 0x48 CPU architecture: 8 CPU variant : 0x1 CPU part : 0xd01 CPU revision : 0 address sizes : 48 bits physical, 48 bits virtual CPU MHz : 2600.000 Signed-off-by: Baolin Wang Reviewed-by: luanshi Reviewed-by: zou cao Signed-off-by: Yinan Liu Reviewed-by: Baolin Wang Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3657 --- arch/arm64/kernel/cpuinfo.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index e862a165d512..8e7446408046 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -178,7 +179,7 @@ static int c_show(struct seq_file *m, void *v) { int i, j; bool compat = personality(current->personality) == PER_LINUX32; - unsigned int cpu, index, total; + unsigned int cpu, index, total, freq; bool rich_container = false; for_each_online_cpu(i) { @@ -243,8 +244,11 @@ static int c_show(struct seq_file *m, void *v) seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr)); seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr)); seq_printf(m, "CPU revision\t: %d\n", MIDR_REVISION(midr)); - seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n\n", + seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", id_aa64mmfr0_pa_range_bits(), id_aa64mmfr2_va_range_bits()); + + freq = cpufreq_get(cpu); + seq_printf(m, "CPU MHz\t\t: %u.%03u\n\n", freq / 1000, freq % 1000); } return 0; -- Gitee From 55b4477d61ea644d4c57b6362bd15d457e7fb2bf Mon Sep 17 00:00:00 2001 From: Guanghui Feng Date: Thu, 22 Sep 2022 18:22:48 +0800 Subject: [PATCH 1092/2138] anolis: arm64: add CPU freq sample on pcct non-support ANBZ: #9696 There is a need to show CPU freq when pcct non-support. (commit:f8e844adebde).In order to meet the request, add CPU freq sample with AMU counter at boot phase. The SYS_AMEVCNTR0_CORE_EL0 can count CPU cycle. The SYS_AMEVCNTR0_CONST_EL0 will count cycle constantly in speed at cntfrq. Consequently, we can estimate the CPU freq at boot phase. NOTE: SYS_AMEVCNTR0_CORE_EL0/SYS_AMEVCNTR0_CONST_EL0 will be paused out of sync by wfe/wfi. Signed-off-by: Guanghui Feng Link: https://gitee.com/anolis/cloud-kernel/pulls/712 Reviewed-by: Baolin Wang Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3657 --- arch/arm64/include/asm/cpufeature.h | 1 + arch/arm64/kernel/cpuinfo.c | 6 ++- arch/arm64/kernel/topology.c | 74 +++++++++++++++++++++++++++++ 3 files changed, 80 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index d0e5119a209d..0513c4ba9d9f 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -960,6 +960,7 @@ extern struct arm64_ftr_override arm64_sw_feature_override; u32 get_kvm_ipa_limit(void); void dump_cpu_features(void); +unsigned int arch_cpufreq_get_khz(int cpu); #endif /* __ASSEMBLY__ */ diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 8e7446408046..e3650d7e5fed 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -248,7 +248,11 @@ static int c_show(struct seq_file *m, void *v) id_aa64mmfr0_pa_range_bits(), id_aa64mmfr2_va_range_bits()); freq = cpufreq_get(cpu); - seq_printf(m, "CPU MHz\t\t: %u.%03u\n\n", freq / 1000, freq % 1000); + if (freq == 0) + freq = arch_cpufreq_get_khz(cpu); + if (freq) + seq_printf(m, "CPU MHz\t\t: %u.%03u\n", freq / 1000, freq % 1000); + seq_puts(m, "\n"); } return 0; diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 817d788cd866..8f0693cee808 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -21,6 +21,7 @@ #include #include #include +#include #ifdef CONFIG_ACPI static bool __init acpi_cpu_is_threaded(int cpu) @@ -71,6 +72,13 @@ int __init parse_acpi_topology(void) } #endif +static unsigned int cpufreq_khz; + +unsigned int arch_cpufreq_get_khz(int cpu) +{ + return cpufreq_khz; +} + #ifdef CONFIG_ARM64_AMU_EXTN #define read_corecnt() read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0) #define read_constcnt() read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0) @@ -81,12 +89,78 @@ int __init parse_acpi_topology(void) #undef pr_fmt #define pr_fmt(fmt) "AMU: " fmt +#define ARCH_FREQ_THRESHOLD_MS 10 static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, arch_max_freq_scale); static DEFINE_PER_CPU(u64, arch_const_cycles_prev); static DEFINE_PER_CPU(u64, arch_core_cycles_prev); static cpumask_var_t amu_fie_cpus; +/* + * Sample cpu freq. + * + * The register SYS_AMEVCNTR0_EL0(1) increases at the fixed + * rate of arch_timer_get_cntfrq() and can be used as timekeeper. + * While The register SYS_AMEVCNTR0_EL0(0) counte the cpu + * cycle elapsed. With the two registers, we can sample cpu + * freq: + * delta(cycle) / delta(timekeeper) + * + * But these registers are halted by wfe/wfi and can't + * in/out of the idle state synchronously, which is different + * from x86 MSR_IA32_APERF/MSR_IA32_MPERF. + * + * NOTE: + * ALL core use same freq by default(ignore big.LITTLE) + */ +static void __init __arch_cpufreq_init(void *dummy) +{ + unsigned long flags; + u64 stable_cnt; + u64 nonstable_cnt; + u32 freq = arch_timer_get_cntfrq(); + u64 delta = freq / 1000 * ARCH_FREQ_THRESHOLD_MS; + u64 counter; + + local_irq_save(flags); + counter = stable_cnt = read_sysreg_s(SYS_AMEVCNTR0_EL0(1)); + nonstable_cnt = read_sysreg_s(SYS_AMEVCNTR0_EL0(0)); + local_irq_restore(flags); + + /* + * Meaningless operations & keep cpu out of + * wfe/wfi idle state. + * + * While sampling core freq, detecting time taking + * may be more than 10 miliseconds by default. + * REFER to: intel x86 APERFMPERF_CACHE_THRESHOLD_MS + */ + while (counter - stable_cnt < delta) + counter = read_sysreg_s(SYS_AMEVCNTR0_EL0(1)); + + local_irq_save(flags); + stable_cnt = read_sysreg_s(SYS_AMEVCNTR0_EL0(1)) - stable_cnt; + nonstable_cnt = read_sysreg_s(SYS_AMEVCNTR0_EL0(0)) - nonstable_cnt; + local_irq_restore(flags); + + cpufreq_khz = div64_u64(freq * nonstable_cnt, stable_cnt) / 1000; +} + +static int __init arch_cpufreq_init(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { + if (cpu_has_amu_feat(cpu)) { + smp_call_function_single(cpu, __arch_cpufreq_init, NULL, 1); + return 0; + } + } + return 0; +} + +late_initcall(arch_cpufreq_init); + void update_freq_counters_refs(void) { this_cpu_write(arch_core_cycles_prev, read_corecnt()); -- Gitee From e0fb476153cfe3d4da799dec23654923e6b4eb3d Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Mon, 30 Jan 2023 14:42:58 +0800 Subject: [PATCH 1093/2138] anolis: arm64: Speed up address bits (physical/virtual) reading in /proc/cpuinfo ANBZ: #9696 commit 0f4ab4308b4e ("anolis: arm64: Expose address bits (physical/virtual) via cpuinfo") exposes the physical and virtual address bits for ARM CPUs in /proc/cpuinfo. Each acquisition of these values will read SYS_ID_AA64MMFR0_EL1 and SYS_ID_AA64MMFR2_EL1 registers by read_sanitised_ftr_reg(), which could be slow when /proc/cpuinfo is accessed frequently during startup. Since the values of these registers will not be changed, speed up reading of /proc/cpuinfo by using the stored register values in struct cpuinfo_arm64. Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/1136 Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3657 --- arch/arm64/include/asm/cpufeature.h | 8 ++------ arch/arm64/kernel/cpuinfo.c | 3 ++- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 0513c4ba9d9f..aebd82fa854f 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -862,12 +862,10 @@ static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange) } } -static inline u32 id_aa64mmfr0_pa_range_bits(void) +static inline u32 id_aa64mmfr0_pa_range_bits(u64 mmfr0) { - u64 mmfr0; u32 parange; - mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); parange = cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT); return id_aa64mmfr0_parange_to_phys_shift(parange); @@ -882,12 +880,10 @@ static inline u32 id_aa64mmfr2_varange_to_virt_shift(int varange) } } -static inline u32 id_aa64mmfr2_va_range_bits(void) +static inline u32 id_aa64mmfr2_va_range_bits(u64 mmfr2) { - u64 mmfr2; u32 varange; - mmfr2 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1); varange = cpuid_feature_extract_unsigned_field(mmfr2, ID_AA64MMFR2_EL1_VARange_SHIFT); return id_aa64mmfr2_varange_to_virt_shift(varange); } diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index e3650d7e5fed..fab6cde98b17 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -245,7 +245,8 @@ static int c_show(struct seq_file *m, void *v) seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr)); seq_printf(m, "CPU revision\t: %d\n", MIDR_REVISION(midr)); seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", - id_aa64mmfr0_pa_range_bits(), id_aa64mmfr2_va_range_bits()); + id_aa64mmfr0_pa_range_bits(cpuinfo->reg_id_aa64mmfr0), + id_aa64mmfr2_va_range_bits(cpuinfo->reg_id_aa64mmfr2)); freq = cpufreq_get(cpu); if (freq == 0) -- Gitee From 5605af993948fbb7274eba2276e4c9b82ede8371 Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Wed, 8 Feb 2023 11:37:13 +0800 Subject: [PATCH 1094/2138] anolis: arm64: Speed up the CPU frequency reading in /proc/cpuinfo ANBZ: #9696 Current "CPU MHz" number in /proc/cpuinfo on arm64 is accessed by cpufreq_get() if the cpufreq driver is registered. Since cpufreq_get() needs to sample, it will be too slow when /proc/cpuinfo is accessed frequently. To avoid this problem, return a cached value in struct arch_cpufreq_sample when the time between the current visit and the last visit has not exceeded the threshold, which is set as 100 ms. Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/1136 Signed-off-by: Shawn Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3657 --- arch/arm64/kernel/cpuinfo.c | 4 +--- arch/arm64/kernel/topology.c | 37 +++++++++++++++++++++++++++++++++++- 2 files changed, 37 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index fab6cde98b17..77cd06f2144f 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -248,9 +248,7 @@ static int c_show(struct seq_file *m, void *v) id_aa64mmfr0_pa_range_bits(cpuinfo->reg_id_aa64mmfr0), id_aa64mmfr2_va_range_bits(cpuinfo->reg_id_aa64mmfr2)); - freq = cpufreq_get(cpu); - if (freq == 0) - freq = arch_cpufreq_get_khz(cpu); + freq = arch_cpufreq_get_khz(cpu); if (freq) seq_printf(m, "CPU MHz\t\t: %u.%03u\n", freq / 1000, freq % 1000); seq_puts(m, "\n"); diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 8f0693cee808..3466387443a7 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -74,9 +74,44 @@ int __init parse_acpi_topology(void) static unsigned int cpufreq_khz; +struct arch_cpufreq_sample { + unsigned int khz; + ktime_t time; +}; + +static DEFINE_PER_CPU(struct arch_cpufreq_sample, samples); + +#define ARCH_CPUFREQ_CACHE_THRESHOLD_MS 100 + +static void arch_cpufreq_snapshot_cpu(int cpu, ktime_t now) +{ + s64 time_delta = ktime_ms_delta(now, per_cpu(samples.time, cpu)); + struct arch_cpufreq_sample *s; + + /* Don't bother re-computing within the cache threshold time. */ + if (time_delta < ARCH_CPUFREQ_CACHE_THRESHOLD_MS) + return; + + s = per_cpu_ptr(&samples, cpu); + + s->khz = cpufreq_get(cpu); + if (s->khz) + s->time = ktime_get(); +} + unsigned int arch_cpufreq_get_khz(int cpu) { - return cpufreq_khz; + unsigned int new_cpufreq; + + arch_cpufreq_snapshot_cpu(cpu, ktime_get()); + + new_cpufreq = per_cpu(samples.khz, cpu); + + /* + * If the cpufreq driver can provide a value, use it. + * Otherwise use the cpufreq_khz. + */ + return new_cpufreq ? new_cpufreq : cpufreq_khz; } #ifdef CONFIG_ARM64_AMU_EXTN -- Gitee From 3837951501525dabc1c3b1319af87928f1c4c894 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 13 Sep 2023 14:51:08 +0200 Subject: [PATCH 1095/2138] mm/rmap: drop stale comment in page_add_anon_rmap and hugepage_add_anon_rmap() ANBZ: #9728 commit fd63908706f79c963946a77b7f352db5431deed5 upstream Patch series "Anon rmap cleanups". Some cleanups around rmap for anon pages. I'm working on more cleanups also around file rmap -- also to handle the "compound" parameter internally only and to let hugetlb use page_add_file_rmap(), but these changes make sense separately. This patch (of 6): That comment was added in commit 5dbe0af47f8a ("mm: fix kernel BUG at mm/rmap.c:1017!") to document why we can see vma->vm_end getting adjusted concurrently due to a VMA split. However, the optimized locking code was changed again in bf181b9f9d8 ("mm anon rmap: replace same_anon_vma linked list with an interval tree."). ... and later, the comment was changed in commit 0503ea8f5ba7 ("mm/mmap: remove __vma_adjust()") to talk about "vma_merge" although the original issue was with VMA splitting. Let's just remove that comment. Nowadays, it's outdated, imprecise and confusing. Link: https://lkml.kernel.org/r/20230913125113.313322-1-david@redhat.com Link: https://lkml.kernel.org/r/20230913125113.313322-2-david@redhat.com Signed-off-by: David Hildenbrand Cc: Mike Kravetz Cc: Muchun Song Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3681 --- mm/rmap.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/mm/rmap.c b/mm/rmap.c index 9f795b93cf40..61ad50b9ed9f 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1245,7 +1245,6 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); if (likely(!folio_test_ksm(folio))) { - /* address might be in next vma when migration races vma_merge */ if (first) __page_set_anon_rmap(folio, page, vma, address, !!(flags & RMAP_EXCLUSIVE)); @@ -2549,7 +2548,6 @@ void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma, BUG_ON(!folio_test_locked(folio)); BUG_ON(!anon_vma); - /* address might be in next vma when migration races vma_merge */ first = atomic_inc_and_test(&folio->_entire_mapcount); VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page); VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page); -- Gitee From 865bb3c5cf5355edfbf421a3335f0da1abedfc09 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 13 Sep 2023 14:51:09 +0200 Subject: [PATCH 1096/2138] mm/rmap: move SetPageAnonExclusive out of __page_set_anon_rmap() ANBZ: #9728 commit c66db8c0702c0ab741ecfd5e12b323ff49fe9089 upstream Let's handle it in the caller. No need to pass the page. While at it, rename the function to __folio_set_anon() and pass "bool exclusive" instead of "int exclusive". Link: https://lkml.kernel.org/r/20230913125113.313322-3-david@redhat.com Signed-off-by: David Hildenbrand Cc: Matthew Wilcox Cc: Mike Kravetz Cc: Muchun Song Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3681 --- mm/rmap.c | 41 +++++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/mm/rmap.c b/mm/rmap.c index 61ad50b9ed9f..f76f0f23725c 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1122,27 +1122,25 @@ void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) } /** - * __page_set_anon_rmap - set up new anonymous rmap - * @folio: Folio which contains page. - * @page: Page to add to rmap. - * @vma: VM area to add page to. + * __folio_set_anon - set up a new anonymous rmap for a folio + * @folio: The folio to set up the new anonymous rmap for. + * @vma: VM area to add the folio to. * @address: User virtual address of the mapping - * @exclusive: the page is exclusively owned by the current process + * @exclusive: Whether the folio is exclusive to the process. */ -static void __page_set_anon_rmap(struct folio *folio, struct page *page, - struct vm_area_struct *vma, unsigned long address, int exclusive) +static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma, + unsigned long address, bool exclusive) { struct anon_vma *anon_vma = vma->anon_vma; BUG_ON(!anon_vma); if (folio_test_anon(folio)) - goto out; + return; /* - * If the page isn't exclusively mapped into this vma, - * we must use the _oldest_ possible anon_vma for the - * page mapping! + * If the folio isn't exclusive to this vma, we must use the _oldest_ + * possible anon_vma for the folio mapping! */ if (!exclusive) anon_vma = anon_vma->root; @@ -1156,9 +1154,6 @@ static void __page_set_anon_rmap(struct folio *folio, struct page *page, anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma); folio->index = linear_page_index(vma, address); -out: - if (exclusive) - SetPageAnonExclusive(page); } /** @@ -1246,11 +1241,13 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, if (likely(!folio_test_ksm(folio))) { if (first) - __page_set_anon_rmap(folio, page, vma, address, - !!(flags & RMAP_EXCLUSIVE)); + __folio_set_anon(folio, vma, address, + !!(flags & RMAP_EXCLUSIVE)); else __page_check_anon_rmap(folio, page, vma, address); } + if (flags & RMAP_EXCLUSIVE) + SetPageAnonExclusive(page); mlock_vma_folio(folio, vma, compound); } @@ -1289,7 +1286,8 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, } __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); - __page_set_anon_rmap(folio, &folio->page, vma, address, 1); + __folio_set_anon(folio, vma, address, true); + SetPageAnonExclusive(&folio->page); } /** @@ -2552,8 +2550,10 @@ void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma, VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page); VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page); if (first) - __page_set_anon_rmap(folio, page, vma, address, - !!(flags & RMAP_EXCLUSIVE)); + __folio_set_anon(folio, vma, address, + !!(flags & RMAP_EXCLUSIVE)); + if (flags & RMAP_EXCLUSIVE) + SetPageAnonExclusive(page); } void hugepage_add_new_anon_rmap(struct folio *folio, @@ -2563,6 +2563,7 @@ void hugepage_add_new_anon_rmap(struct folio *folio, /* increment count (starts at -1) */ atomic_set(&folio->_entire_mapcount, 0); folio_clear_hugetlb_restore_reserve(folio); - __page_set_anon_rmap(folio, &folio->page, vma, address, 1); + __folio_set_anon(folio, vma, address, true); + SetPageAnonExclusive(&folio->page); } #endif /* CONFIG_HUGETLB_PAGE */ -- Gitee From 4a71279cb760ade9d8f54972bb57a5dc07866299 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 13 Sep 2023 14:51:10 +0200 Subject: [PATCH 1097/2138] mm/rmap: move folio_test_anon() check out of __folio_set_anon() ANBZ: #9728 commit c5c540034747dfe450f64d1151081a6080daa8f9 upstream Let's handle it in the caller; no need for the "first" check based on the mapcount. We really only end up with !anon pages in page_add_anon_rmap() via do_swap_page(), where we hold the folio lock. So races are not possible. Add a VM_WARN_ON_FOLIO() to make sure that we really hold the folio lock. In the future, we might want to let do_swap_page() use folio_add_new_anon_rmap() on new pages instead: however, we might have to pass then whether the folio is exclusive or not. So keep it in there for now. For hugetlb we never expect to have a non-anon page in hugepage_add_anon_rmap(). Remove that code, along with some other checks that are either not required or were checked in hugepage_add_new_anon_rmap() already. Link: https://lkml.kernel.org/r/20230913125113.313322-4-david@redhat.com Signed-off-by: David Hildenbrand Cc: Matthew Wilcox Cc: Mike Kravetz Cc: Muchun Song Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3681 --- mm/rmap.c | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/mm/rmap.c b/mm/rmap.c index f76f0f23725c..65de3832123e 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1135,9 +1135,6 @@ static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma, BUG_ON(!anon_vma); - if (folio_test_anon(folio)) - return; - /* * If the folio isn't exclusive to this vma, we must use the _oldest_ * possible anon_vma for the folio mapping! @@ -1239,12 +1236,12 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, if (nr) __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); - if (likely(!folio_test_ksm(folio))) { - if (first) - __folio_set_anon(folio, vma, address, - !!(flags & RMAP_EXCLUSIVE)); - else - __page_check_anon_rmap(folio, page, vma, address); + if (unlikely(!folio_test_anon(folio))) { + VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); + __folio_set_anon(folio, vma, address, + !!(flags & RMAP_EXCLUSIVE)); + } else if (likely(!folio_test_ksm(folio))) { + __page_check_anon_rmap(folio, page, vma, address); } if (flags & RMAP_EXCLUSIVE) SetPageAnonExclusive(page); @@ -2541,17 +2538,13 @@ void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, rmap_t flags) { struct folio *folio = page_folio(page); - struct anon_vma *anon_vma = vma->anon_vma; int first; - BUG_ON(!folio_test_locked(folio)); - BUG_ON(!anon_vma); + VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); + first = atomic_inc_and_test(&folio->_entire_mapcount); VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page); VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page); - if (first) - __folio_set_anon(folio, vma, address, - !!(flags & RMAP_EXCLUSIVE)); if (flags & RMAP_EXCLUSIVE) SetPageAnonExclusive(page); } -- Gitee From ce4f714d9eb7894143ba47a00d9e8047d49f9ff3 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 13 Sep 2023 14:51:11 +0200 Subject: [PATCH 1098/2138] mm/rmap: warn on new PTE-mapped folios in page_add_anon_rmap() ANBZ: #9728 commit a1f34ee1de2c3a55bc2a6b9a38e1ecd2830dcc03 upstream If swapin code would ever decide to not use order-0 pages and supply a PTE-mapped large folio, we will have to change how we call __folio_set_anon() -- eventually with exclusive=false and an adjusted address. For now, let's add a VM_WARN_ON_FOLIO() with a comment about the situation. Link: https://lkml.kernel.org/r/20230913125113.313322-5-david@redhat.com Signed-off-by: David Hildenbrand Cc: Matthew Wilcox Cc: Mike Kravetz Cc: Muchun Song Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3681 --- mm/rmap.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/mm/rmap.c b/mm/rmap.c index 65de3832123e..9b40c3feba3e 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1238,6 +1238,13 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, if (unlikely(!folio_test_anon(folio))) { VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); + /* + * For a PTE-mapped large folio, we only know that the single + * PTE is exclusive. Further, __folio_set_anon() might not get + * folio->index right when not given the address of the head + * page. + */ + VM_WARN_ON_FOLIO(folio_test_large(folio) && !compound, folio); __folio_set_anon(folio, vma, address, !!(flags & RMAP_EXCLUSIVE)); } else if (likely(!folio_test_ksm(folio))) { -- Gitee From 634c88aec00fd451d406328bcd66b6dc5caaee72 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 13 Sep 2023 14:51:12 +0200 Subject: [PATCH 1099/2138] mm/rmap: simplify PageAnonExclusive sanity checks when adding anon rmap ANBZ: #9728 commit 132b180f06a74ddfc526709928036db3b7a1cf6d upstream Let's sanity-check PageAnonExclusive vs. mapcount in page_add_anon_rmap() and hugepage_add_anon_rmap() after setting PageAnonExclusive simply by re-reading the mapcounts. We can stop initializing the "first" variable in page_add_anon_rmap() and no longer need an atomic_inc_and_test() in hugepage_add_anon_rmap(). While at it, switch to VM_WARN_ON_FOLIO(). [david@redhat.com: update check for doubly-mapped page] Link: https://lkml.kernel.org/r/d8e5a093-2e22-c14b-7e64-6da280398d9f@redhat.com Link: https://lkml.kernel.org/r/20230913125113.313322-6-david@redhat.com Signed-off-by: David Hildenbrand Cc: Matthew Wilcox Cc: Mike Kravetz Cc: Muchun Song Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3681 --- mm/rmap.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/mm/rmap.c b/mm/rmap.c index 9b40c3feba3e..ed4b602bcbd5 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1199,7 +1199,7 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, atomic_t *mapped = &folio->_nr_pages_mapped; int nr = 0, nr_pmdmapped = 0; bool compound = flags & RMAP_COMPOUND; - bool first = true; + bool first; /* Is page being mapped by PTE? Is this its first map to be added? */ if (likely(!compound)) { @@ -1228,9 +1228,6 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, } } - VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page); - VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page); - if (nr_pmdmapped) __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr_pmdmapped); if (nr) @@ -1252,6 +1249,10 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, } if (flags & RMAP_EXCLUSIVE) SetPageAnonExclusive(page); + /* While PTE-mapping a THP we have a PMD and a PTE mapping. */ + VM_WARN_ON_FOLIO((atomic_read(&page->_mapcount) > 0 || + (folio_test_large(folio) && folio_entire_mapcount(folio) > 1)) && + PageAnonExclusive(page), folio); mlock_vma_folio(folio, vma, compound); } @@ -2545,15 +2546,14 @@ void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, rmap_t flags) { struct folio *folio = page_folio(page); - int first; VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); - first = atomic_inc_and_test(&folio->_entire_mapcount); - VM_BUG_ON_PAGE(!first && (flags & RMAP_EXCLUSIVE), page); - VM_BUG_ON_PAGE(!first && PageAnonExclusive(page), page); + atomic_inc(&folio->_entire_mapcount); if (flags & RMAP_EXCLUSIVE) SetPageAnonExclusive(page); + VM_WARN_ON_FOLIO(folio_entire_mapcount(folio) > 1 && + PageAnonExclusive(page), folio); } void hugepage_add_new_anon_rmap(struct folio *folio, -- Gitee From 2177373877fe28fc77f6029143afedc5e575886e Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 13 Sep 2023 14:51:13 +0200 Subject: [PATCH 1100/2138] mm/rmap: pass folio to hugepage_add_anon_rmap() ANBZ: #9728 commit 09c550508a4b8f7844b197cc16877dd0f7c42d8f upstream Let's pass a folio; we are always mapping the entire thing. Link: https://lkml.kernel.org/r/20230913125113.313322-7-david@redhat.com Signed-off-by: David Hildenbrand Cc: Matthew Wilcox Cc: Mike Kravetz Cc: Muchun Song Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3681 --- include/linux/rmap.h | 2 +- mm/migrate.c | 2 +- mm/rmap.c | 8 +++----- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index b1fb58b435a9..1054c1a09065 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -203,7 +203,7 @@ void folio_add_file_rmap_range(struct folio *, struct page *, unsigned int nr, void page_remove_rmap(struct page *, struct vm_area_struct *, bool compound); -void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *, +void hugepage_add_anon_rmap(struct folio *, struct vm_area_struct *, unsigned long address, rmap_t flags); void hugepage_add_new_anon_rmap(struct folio *, struct vm_area_struct *, unsigned long address); diff --git a/mm/migrate.c b/mm/migrate.c index c5ed8caf6a40..6f485a175f75 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -249,7 +249,7 @@ static bool remove_migration_pte(struct folio *folio, pte = arch_make_huge_pte(pte, shift, vma->vm_flags); if (folio_test_anon(folio)) - hugepage_add_anon_rmap(new, vma, pvmw.address, + hugepage_add_anon_rmap(folio, vma, pvmw.address, rmap_flags); else page_dup_file_rmap(new, true); diff --git a/mm/rmap.c b/mm/rmap.c index ed4b602bcbd5..d24e2c36372e 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -2542,18 +2542,16 @@ void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) * * RMAP_COMPOUND is ignored. */ -void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma, +void hugepage_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma, unsigned long address, rmap_t flags) { - struct folio *folio = page_folio(page); - VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); atomic_inc(&folio->_entire_mapcount); if (flags & RMAP_EXCLUSIVE) - SetPageAnonExclusive(page); + SetPageAnonExclusive(&folio->page); VM_WARN_ON_FOLIO(folio_entire_mapcount(folio) > 1 && - PageAnonExclusive(page), folio); + PageAnonExclusive(&folio->page), folio); } void hugepage_add_new_anon_rmap(struct folio *folio, -- Gitee From a90b60662ffb0c13d960567d4060c87660da3eaa Mon Sep 17 00:00:00 2001 From: Yin Fengwei Date: Mon, 18 Sep 2023 15:33:16 +0800 Subject: [PATCH 1101/2138] mm: add functions folio_in_range() and folio_within_vma() ANBZ: #9728 commit 28e566572aacdc551e24649e57cc9f04ba880cd2 upstream Patch series "support large folio for mlock", v3. Yu mentioned at [1] about the mlock() can't be applied to large folio. I leant the related code and here is my understanding: - For RLIMIT_MEMLOCK related, there is no problem. Because the RLIMIT_MEMLOCK statistics is not related underneath page. That means underneath page mlock or munlock doesn't impact the RLIMIT_MEMLOCK statistics collection which is always correct. - For keeping the page in RAM, there is no problem either. At least, during try_to_unmap_one(), once detect the VMA has VM_LOCKED bit set in vm_flags, the folio will be kept whatever the folio is mlocked or not. So the function of mlock for large folio works. But it's not optimized because the page reclaim needs scan these large folio and may split them. This series identified the large folio for mlock to four types: - The large folio is in VM_LOCKED range and fully mapped to the range - The large folio is in the VM_LOCKED range but not fully mapped to the range - The large folio cross VM_LOCKED VMA boundary - The large folio cross last level page table boundary For the first type, we mlock large folio so page reclaim will skip it. For the second/third type, we don't mlock large folio. As the pages not mapped to VM_LOACKED range are mapped to none VM_LOCKED range, if system is in memory pressure situation, the large folio can be picked by page reclaim and split. Then the pages not mapped to VM_LOCKED range can be reclaimed. For the fourth type, we don't mlock large folio because locking one page table lock can't prevent the part in another last level page table being unmapped. Thanks to Ryan for pointing this out. To check whether the folio is fully mapped to the range, PTEs needs be checked to see whether the page of folio is associated. Which needs take page table lock and is heavy operation. So far, the only place needs this check is madvise and page reclaim. These functions already have their own PTE iterator. patch1 introduce API to check whether large folio is in VMA range. patch2 make page reclaim/mlock_vma_folio/munlock_vma_folio support large folio mlock/munlock. patch3 make mlock/munlock syscall support large folio. Yu also mentioned a race which can make folio unevictable after munlock during RFC v2 discussion [3]: We decided that race issue didn't block this series based on: - That race issue was not introduced by this series - We had a looks-ok fix for that race issue. Need to wait for mlock_count fixing patch as Yosry Ahmed suggested [4] [1] https://lore.kernel.org/linux-mm/CAOUHufbtNPkdktjt_5qM45GegVO-rCFOMkSh0HQminQ12zsV8Q@mail.gmail.com/ [2] https://lore.kernel.org/linux-mm/20230809061105.3369958-1-fengwei.yin@intel.com/ [3] https://lore.kernel.org/linux-mm/CAOUHufZ6=9P_=CAOQyw0xw-3q707q-1FVV09dBNDC-hpcpj2Pg@mail.gmail.com/ This patch (of 3): folio_in_range() will be used to check whether the folio is mapped to specific VMA and whether the mapping address of folio is in the range. Also a helper function folio_within_vma() to check whether folio is in the range of vma based on folio_in_range(). Link: https://lkml.kernel.org/r/20230918073318.1181104-1-fengwei.yin@intel.com Link: https://lkml.kernel.org/r/20230918073318.1181104-2-fengwei.yin@intel.com Signed-off-by: Yin Fengwei Cc: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Ryan Roberts Cc: Yang Shi Cc: Yosry Ahmed Cc: Yu Zhao Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3681 --- mm/internal.h | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/mm/internal.h b/mm/internal.h index f773db493a99..b1a853b60a8c 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -655,6 +655,56 @@ extern long faultin_page_range(struct mm_struct *mm, unsigned long start, unsigned long end, bool write, int *locked); extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags, unsigned long bytes); + +/* + * NOTE: This function can't tell whether the folio is "fully mapped" in the + * range. + * "fully mapped" means all the pages of folio is associated with the page + * table of range while this function just check whether the folio range is + * within the range [start, end). Funcation caller nees to do page table + * check if it cares about the page table association. + * + * Typical usage (like mlock or madvise) is: + * Caller knows at least 1 page of folio is associated with page table of VMA + * and the range [start, end) is intersect with the VMA range. Caller wants + * to know whether the folio is fully associated with the range. It calls + * this function to check whether the folio is in the range first. Then checks + * the page table to know whether the folio is fully mapped to the range. + */ +static inline bool +folio_within_range(struct folio *folio, struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + pgoff_t pgoff, addr; + unsigned long vma_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + + VM_WARN_ON_FOLIO(folio_test_ksm(folio), folio); + if (start > end) + return false; + + if (start < vma->vm_start) + start = vma->vm_start; + + if (end > vma->vm_end) + end = vma->vm_end; + + pgoff = folio_pgoff(folio); + + /* if folio start address is not in vma range */ + if (!in_range(pgoff, vma->vm_pgoff, vma_pglen)) + return false; + + addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); + + return !(addr < start || end - addr < folio_size(folio)); +} + +static inline bool +folio_within_vma(struct folio *folio, struct vm_area_struct *vma) +{ + return folio_within_range(folio, vma, vma->vm_start, vma->vm_end); +} + /* * mlock_vma_folio() and munlock_vma_folio(): * should be called with vma's mmap_lock held for read or write, -- Gitee From 41ea58829638fb6310c5d3173cbda25cfb9508d2 Mon Sep 17 00:00:00 2001 From: Yin Fengwei Date: Mon, 18 Sep 2023 15:33:17 +0800 Subject: [PATCH 1102/2138] mm: handle large folio when large folio in VM_LOCKED VMA range ANBZ: #9728 commit 1acbc3f936146d1b34987294803ac131bc298ce8 upstream If large folio is in the range of VM_LOCKED VMA, it should be mlocked to avoid being picked by page reclaim. Which may split the large folio and then mlock each pages again. Mlock this kind of large folio to prevent them being picked by page reclaim. For the large folio which cross the boundary of VM_LOCKED VMA or not fully mapped to VM_LOCKED VMA, we'd better not to mlock it. So if the system is under memory pressure, this kind of large folio will be split and the pages ouf of VM_LOCKED VMA can be reclaimed. Ideally, for large folio, we should mlock it when the large folio is fully mapped to VMA and munlock it if any page are unmampped from VMA. But it's not easy to detect whether the large folio is fully mapped to VMA in some cases (like add/remove rmap). So we update mlock_vma_folio() and munlock_vma_folio() to mlock/munlock the folio according to vma->vm_flags. Let caller to decide whether they should call these two functions. For add rmap, only mlock normal 4K folio and postpone large folio handling to page reclaim phase. It is possible to reuse page table iterator to detect whether folio is fully mapped or not during page reclaim phase. For remove rmap, invoke munlock_vma_folio() to munlock folio unconditionly because rmap makes folio not fully mapped to VMA. Link: https://lkml.kernel.org/r/20230918073318.1181104-3-fengwei.yin@intel.com Signed-off-by: Yin Fengwei Cc: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Ryan Roberts Cc: Yang Shi Cc: Yosry Ahmed Cc: Yu Zhao Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3681 --- mm/internal.h | 23 ++++++++++-------- mm/rmap.c | 66 ++++++++++++++++++++++++++++++++++++++++++--------- 2 files changed, 68 insertions(+), 21 deletions(-) diff --git a/mm/internal.h b/mm/internal.h index b1a853b60a8c..d3df1271fe0e 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -713,14 +713,10 @@ folio_within_vma(struct folio *folio, struct vm_area_struct *vma) * mlock is usually called at the end of page_add_*_rmap(), munlock at * the end of page_remove_rmap(); but new anon folios are managed by * folio_add_lru_vma() calling mlock_new_folio(). - * - * @compound is used to include pmd mappings of THPs, but filter out - * pte mappings of THPs, which cannot be consistently counted: a pte - * mapping of the THP head cannot be distinguished by the page alone. */ void mlock_folio(struct folio *folio); static inline void mlock_vma_folio(struct folio *folio, - struct vm_area_struct *vma, bool compound) + struct vm_area_struct *vma) { /* * The VM_SPECIAL check here serves two purposes. @@ -730,17 +726,24 @@ static inline void mlock_vma_folio(struct folio *folio, * file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may * still be set while VM_SPECIAL bits are added: so ignore it then. */ - if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED) && - (compound || !folio_test_large(folio))) + if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED)) mlock_folio(folio); } void munlock_folio(struct folio *folio); static inline void munlock_vma_folio(struct folio *folio, - struct vm_area_struct *vma, bool compound) + struct vm_area_struct *vma) { - if (unlikely(vma->vm_flags & VM_LOCKED) && - (compound || !folio_test_large(folio))) + /* + * munlock if the function is called. Ideally, we should only + * do munlock if any page of folio is unmapped from VMA and + * cause folio not fully mapped to VMA. + * + * But it's not easy to confirm that's the situation. So we + * always munlock the folio and page reclaim will correct it + * if it's wrong. + */ + if (unlikely(vma->vm_flags & VM_LOCKED)) munlock_folio(folio); } diff --git a/mm/rmap.c b/mm/rmap.c index d24e2c36372e..c6bb2339e35b 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -798,6 +798,7 @@ struct folio_referenced_arg { unsigned long vm_flags; struct mem_cgroup *memcg; }; + /* * arg: folio_referenced_arg will be passed */ @@ -807,17 +808,33 @@ static bool folio_referenced_one(struct folio *folio, struct folio_referenced_arg *pra = arg; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); int referenced = 0; + unsigned long start = address, ptes = 0; while (page_vma_mapped_walk(&pvmw)) { address = pvmw.address; - if ((vma->vm_flags & VM_LOCKED) && - (!folio_test_large(folio) || !pvmw.pte)) { - /* Restore the mlock which got missed */ - mlock_vma_folio(folio, vma, !pvmw.pte); - page_vma_mapped_walk_done(&pvmw); - pra->vm_flags |= VM_LOCKED; - return false; /* To break the loop */ + if (vma->vm_flags & VM_LOCKED) { + if (!folio_test_large(folio) || !pvmw.pte) { + /* Restore the mlock which got missed */ + mlock_vma_folio(folio, vma); + page_vma_mapped_walk_done(&pvmw); + pra->vm_flags |= VM_LOCKED; + return false; /* To break the loop */ + } + /* + * For large folio fully mapped to VMA, will + * be handled after the pvmw loop. + * + * For large folio cross VMA boundaries, it's + * expected to be picked by page reclaim. But + * should skip reference of pages which are in + * the range of VM_LOCKED vma. As page reclaim + * should just count the reference of pages out + * the range of VM_LOCKED vma. + */ + ptes++; + pra->mapcount--; + continue; } if (pvmw.pte) { @@ -842,6 +859,23 @@ static bool folio_referenced_one(struct folio *folio, pra->mapcount--; } + if ((vma->vm_flags & VM_LOCKED) && + folio_test_large(folio) && + folio_within_vma(folio, vma)) { + unsigned long s_align, e_align; + + s_align = ALIGN_DOWN(start, PMD_SIZE); + e_align = ALIGN_DOWN(start + folio_size(folio) - 1, PMD_SIZE); + + /* folio doesn't cross page table boundary and fully mapped */ + if ((s_align == e_align) && (ptes == folio_nr_pages(folio))) { + /* Restore the mlock which got missed */ + mlock_vma_folio(folio, vma); + pra->vm_flags |= VM_LOCKED; + return false; /* To break the loop */ + } + } + if (referenced) folio_clear_idle(folio); if (folio_test_clear_young(folio)) @@ -1254,7 +1288,14 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, (folio_test_large(folio) && folio_entire_mapcount(folio) > 1)) && PageAnonExclusive(page), folio); - mlock_vma_folio(folio, vma, compound); + /* + * For large folio, only mlock it if it's fully mapped to VMA. It's + * not easy to check whether the large folio is fully mapped to VMA + * here. Only mlock normal 4K folio and leave page reclaim to handle + * large folio. + */ + if (!folio_test_large(folio)) + mlock_vma_folio(folio, vma); } /** @@ -1354,7 +1395,9 @@ void folio_add_file_rmap_range(struct folio *folio, struct page *page, if (nr) __lruvec_stat_mod_folio(folio, NR_FILE_MAPPED, nr); - mlock_vma_folio(folio, vma, compound); + /* See comments in page_add_anon_rmap() */ + if (!folio_test_large(folio)) + mlock_vma_folio(folio, vma); } /** @@ -1465,7 +1508,7 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma, * it's only reliable while mapped. */ - munlock_vma_folio(folio, vma, compound); + munlock_vma_folio(folio, vma); } /* @@ -1530,7 +1573,8 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, if (!(flags & TTU_IGNORE_MLOCK) && (vma->vm_flags & VM_LOCKED)) { /* Restore the mlock which got missed */ - mlock_vma_folio(folio, vma, false); + if (!folio_test_large(folio)) + mlock_vma_folio(folio, vma); page_vma_mapped_walk_done(&pvmw); ret = false; break; -- Gitee From a54639b276a9c9fd11e9c1fcf1f1e8c0b65fad71 Mon Sep 17 00:00:00 2001 From: Yin Fengwei Date: Mon, 18 Sep 2023 15:33:18 +0800 Subject: [PATCH 1103/2138] mm: mlock: update mlock_pte_range to handle large folio ANBZ: #9728 commit dc68badcede4ec3b4e5cdfcb8f678670220ac2ca upstream Current kernel only lock base size folio during mlock syscall. Add large folio support with following rules: - Only mlock large folio when it's in VM_LOCKED VMA range and fully mapped to page table. fully mapped folio is required as if folio is not fully mapped to a VM_LOCKED VMA, if system is in memory pressure, page reclaim is allowed to pick up this folio, split it and reclaim the pages which are not in VM_LOCKED VMA. - munlock will apply to the large folio which is in VMA range or cross the VMA boundary. This is required to handle the case that the large folio is mlocked, later the VMA is split in the middle of large folio. Link: https://lkml.kernel.org/r/20230918073318.1181104-4-fengwei.yin@intel.com Signed-off-by: Yin Fengwei Cc: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Ryan Roberts Cc: Yang Shi Cc: Yosry Ahmed Cc: Yu Zhao Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3681 --- mm/mlock.c | 66 ++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 64 insertions(+), 2 deletions(-) diff --git a/mm/mlock.c b/mm/mlock.c index 06bdfab83b58..42b6865f8f82 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -305,6 +305,58 @@ void munlock_folio(struct folio *folio) local_unlock(&mlock_fbatch.lock); } +static inline unsigned int folio_mlock_step(struct folio *folio, + pte_t *pte, unsigned long addr, unsigned long end) +{ + unsigned int count, i, nr = folio_nr_pages(folio); + unsigned long pfn = folio_pfn(folio); + pte_t ptent = ptep_get(pte); + + if (!folio_test_large(folio)) + return 1; + + count = pfn + nr - pte_pfn(ptent); + count = min_t(unsigned int, count, (end - addr) >> PAGE_SHIFT); + + for (i = 0; i < count; i++, pte++) { + pte_t entry = ptep_get(pte); + + if (!pte_present(entry)) + break; + if (pte_pfn(entry) - pfn >= nr) + break; + } + + return i; +} + +static inline bool allow_mlock_munlock(struct folio *folio, + struct vm_area_struct *vma, unsigned long start, + unsigned long end, unsigned int step) +{ + /* + * For unlock, allow munlock large folio which is partially + * mapped to VMA. As it's possible that large folio is + * mlocked and VMA is split later. + * + * During memory pressure, such kind of large folio can + * be split. And the pages are not in VM_LOCKed VMA + * can be reclaimed. + */ + if (!(vma->vm_flags & VM_LOCKED)) + return true; + + /* folio not in range [start, end), skip mlock */ + if (!folio_within_range(folio, vma, start, end)) + return false; + + /* folio is not fully mapped, skip mlock */ + if (step != folio_nr_pages(folio)) + return false; + + return true; +} + static int mlock_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) @@ -314,6 +366,8 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr, pte_t *start_pte, *pte; pte_t ptent; struct folio *folio; + unsigned int step = 1; + unsigned long start = addr; ptl = pmd_trans_huge_lock(pmd, vma); if (ptl) { @@ -334,6 +388,7 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr, walk->action = ACTION_AGAIN; return 0; } + for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) { ptent = ptep_get(pte); if (!pte_present(ptent)) @@ -341,12 +396,19 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr, folio = vm_normal_folio(vma, addr, ptent); if (!folio || folio_is_zone_device(folio)) continue; - if (folio_test_large(folio)) - continue; + + step = folio_mlock_step(folio, pte, addr, end); + if (!allow_mlock_munlock(folio, vma, start, end, step)) + goto next_entry; + if (vma->vm_flags & VM_LOCKED) mlock_folio(folio); else munlock_folio(folio); + +next_entry: + pte += step - 1; + addr += (step - 1) << PAGE_SHIFT; } pte_unmap(start_pte); out: -- Gitee From 7c20c65de4be35599435775b7f6d0e1212f621ba Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 23 Oct 2023 23:38:41 -0700 Subject: [PATCH 1104/2138] mm: mlock: avoid folio_within_range() on KSM pages ANBZ: #9728 commit b1454b463c217e5bc553acc44b2389d9257c9708 upstream Since commit dc68badcede4 ("mm: mlock: update mlock_pte_range to handle large folio") I've just occasionally seen VM_WARN_ON_FOLIO(folio_test_ksm) warnings from folio_within_range(), in a splurge after testing with KSM hyperactive. folio_referenced_one()'s use of folio_within_vma() is safe because it checks folio_test_large() first; but allow_mlock_munlock() needs to do the same to avoid those warnings (or check !folio_test_ksm() itself? Or move either check into folio_within_range()? Hard to tell without more examples of its use). Link: https://lkml.kernel.org/r/23852f6a-5bfa-1ffd-30db-30c5560ad426@google.com Fixes: dc68badcede4 ("mm: mlock: update mlock_pte_range to handle large folio") Signed-off-by: Hugh Dickins Reviewed-by: Yin Fengwei Cc: Lorenzo Stoakes Cc: Matthew Wilcox (Oracle) Cc: Stefan Roesch Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3681 --- mm/mlock.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mm/mlock.c b/mm/mlock.c index 42b6865f8f82..f79d8262c1a0 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -346,6 +346,10 @@ static inline bool allow_mlock_munlock(struct folio *folio, if (!(vma->vm_flags & VM_LOCKED)) return true; + /* folio_within_range() cannot take KSM, but any small folio is OK */ + if (!folio_test_large(folio)) + return true; + /* folio not in range [start, end), skip mlock */ if (!folio_within_range(folio, vma, start, end)) return false; -- Gitee From eeff4685c2506e9eb75a6feac319794cbec73029 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 2 Oct 2023 16:29:47 +0200 Subject: [PATCH 1105/2138] mm/rmap: move SetPageAnonExclusive() out of page_move_anon_rmap() ANBZ: #9728 commit 5ca432896a4ce6d69fffc3298b24c0dd9bdb871f upstream Patch series "mm/rmap: convert page_move_anon_rmap() to folio_move_anon_rmap()". Convert page_move_anon_rmap() to folio_move_anon_rmap(), letting the callers handle PageAnonExclusive. I'm including cleanup patch #3 because it fits into the picture and can be done cleaner by the conversion. This patch (of 3): Let's move it into the caller: there is a difference between whether an anon folio can only be mapped by one process (e.g., into one VMA), and whether it is truly exclusive (e.g., no references -- including GUP -- from other processes). Further, for large folios the page might not actually be pointing at the head page of the folio, so it better be handled in the caller. This is a preparation for converting page_move_anon_rmap() to consume a folio. Link: https://lkml.kernel.org/r/20231002142949.235104-1-david@redhat.com Link: https://lkml.kernel.org/r/20231002142949.235104-2-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Suren Baghdasaryan Reviewed-by: Vishal Moola (Oracle) Cc: Mike Kravetz Cc: Muchun Song Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3683 --- mm/huge_memory.c | 1 + mm/hugetlb.c | 4 +++- mm/memory.c | 1 + mm/rmap.c | 1 - 4 files changed, 5 insertions(+), 2 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 635f0f0f6860..41cd1d433f6f 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1343,6 +1343,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf) pmd_t entry; page_move_anon_rmap(page, vma); + SetPageAnonExclusive(page); folio_unlock(folio); reuse: if (unlikely(unshare)) { diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 92b955cc5a41..bf9b7ca04d61 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5607,8 +5607,10 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, * owner and can reuse this page. */ if (folio_mapcount(old_folio) == 1 && folio_test_anon(old_folio)) { - if (!PageAnonExclusive(&old_folio->page)) + if (!PageAnonExclusive(&old_folio->page)) { page_move_anon_rmap(&old_folio->page, vma); + SetPageAnonExclusive(&old_folio->page); + } if (likely(!unshare)) set_huge_ptep_writable(vma, haddr, ptep); diff --git a/mm/memory.c b/mm/memory.c index 2ac7d0a62c74..d01a3bdff4fb 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3610,6 +3610,7 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) * sunglasses. Hit it. */ page_move_anon_rmap(vmf->page, vma); + SetPageAnonExclusive(vmf->page); folio_unlock(folio); reuse: if (unlikely(unshare)) { diff --git a/mm/rmap.c b/mm/rmap.c index c6bb2339e35b..6f1ea1491118 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1152,7 +1152,6 @@ void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) * folio_test_anon()) will not see one without the other. */ WRITE_ONCE(folio->mapping, anon_vma); - SetPageAnonExclusive(page); } /** -- Gitee From ff7df38879e207fb95faa26a625fda875ddf10d9 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 2 Oct 2023 16:29:48 +0200 Subject: [PATCH 1106/2138] mm/rmap: convert page_move_anon_rmap() to folio_move_anon_rmap() ANBZ: #9728 commit 069686255c16a75b6a796e42df47f5af27b496a4 upstream Let's convert it to consume a folio. [akpm@linux-foundation.org: fix kerneldoc] Link: https://lkml.kernel.org/r/20231002142949.235104-3-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Suren Baghdasaryan Reviewed-by: Vishal Moola (Oracle) Cc: Mike Kravetz Cc: Muchun Song Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3683 --- include/linux/rmap.h | 2 +- mm/huge_memory.c | 2 +- mm/hugetlb.c | 2 +- mm/memory.c | 2 +- mm/rmap.c | 16 +++++++--------- 5 files changed, 11 insertions(+), 13 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 1054c1a09065..3c2fc291b071 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -189,7 +189,7 @@ typedef int __bitwise rmap_t; /* * rmap interfaces called when adding or removing pte of page */ -void page_move_anon_rmap(struct page *, struct vm_area_struct *); +void folio_move_anon_rmap(struct folio *, struct vm_area_struct *); void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long address, rmap_t flags); void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 41cd1d433f6f..cfc21c2603cc 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1342,7 +1342,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf) if (folio_ref_count(folio) == 1) { pmd_t entry; - page_move_anon_rmap(page, vma); + folio_move_anon_rmap(folio, vma); SetPageAnonExclusive(page); folio_unlock(folio); reuse: diff --git a/mm/hugetlb.c b/mm/hugetlb.c index bf9b7ca04d61..15919eb05e01 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5608,7 +5608,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, */ if (folio_mapcount(old_folio) == 1 && folio_test_anon(old_folio)) { if (!PageAnonExclusive(&old_folio->page)) { - page_move_anon_rmap(&old_folio->page, vma); + folio_move_anon_rmap(old_folio, vma); SetPageAnonExclusive(&old_folio->page); } if (likely(!unshare)) diff --git a/mm/memory.c b/mm/memory.c index d01a3bdff4fb..241df46d9780 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3609,7 +3609,7 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) * and the folio is locked, it's dark out, and we're wearing * sunglasses. Hit it. */ - page_move_anon_rmap(vmf->page, vma); + folio_move_anon_rmap(folio, vma); SetPageAnonExclusive(vmf->page); folio_unlock(folio); reuse: diff --git a/mm/rmap.c b/mm/rmap.c index 6f1ea1491118..7a27a2b41802 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1128,19 +1128,17 @@ int folio_total_mapcount(struct folio *folio) } /** - * page_move_anon_rmap - move a page to our anon_vma - * @page: the page to move to our anon_vma - * @vma: the vma the page belongs to + * folio_move_anon_rmap - move a folio to our anon_vma + * @folio: The folio to move to our anon_vma + * @vma: The vma the folio belongs to * - * When a page belongs exclusively to one process after a COW event, - * that page can be moved into the anon_vma that belongs to just that - * process, so the rmap code will not search the parent or sibling - * processes. + * When a folio belongs exclusively to one process after a COW event, + * that folio can be moved into the anon_vma that belongs to just that + * process, so the rmap code will not search the parent or sibling processes. */ -void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) +void folio_move_anon_rmap(struct folio *folio, struct vm_area_struct *vma) { void *anon_vma = vma->anon_vma; - struct folio *folio = page_folio(page); VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); VM_BUG_ON_VMA(!anon_vma, vma); -- Gitee From 97f3423e47945875d01253dcb1e53f2f2a5cf217 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 2 Oct 2023 16:29:49 +0200 Subject: [PATCH 1107/2138] memory: move exclusivity detection in do_wp_page() into wp_can_reuse_anon_folio() ANBZ: #9728 commit dec078cc2181fccf8b134406b86aaacc19f7163f upstream Let's clean up do_wp_page() a bit, removing two labels and making it a easier to read. wp_can_reuse_anon_folio() now only operates on the whole folio. Move the SetPageAnonExclusive() out into do_wp_page(). No need to do this under page lock -- the page table lock is sufficient. Link: https://lkml.kernel.org/r/20231002142949.235104-4-david@redhat.com Signed-off-by: David Hildenbrand Cc: Mike Kravetz Cc: Muchun Song Cc: Suren Baghdasaryan Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3683 --- mm/memory.c | 89 +++++++++++++++++++++++++++-------------------------- 1 file changed, 46 insertions(+), 43 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 241df46d9780..92992fe828a6 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3501,6 +3501,44 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio) return ret; } +static bool wp_can_reuse_anon_folio(struct folio *folio, + struct vm_area_struct *vma) +{ + /* + * We have to verify under folio lock: these early checks are + * just an optimization to avoid locking the folio and freeing + * the swapcache if there is little hope that we can reuse. + * + * KSM doesn't necessarily raise the folio refcount. + */ + if (folio_test_ksm(folio) || folio_ref_count(folio) > 3) + return false; + if (!folio_test_lru(folio)) + /* + * We cannot easily detect+handle references from + * remote LRU caches or references to LRU folios. + */ + lru_add_drain(); + if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio)) + return false; + if (!folio_trylock(folio)) + return false; + if (folio_test_swapcache(folio)) + folio_free_swap(folio); + if (folio_test_ksm(folio) || folio_ref_count(folio) != 1) { + folio_unlock(folio); + return false; + } + /* + * Ok, we've got the only folio reference from our mapping + * and the folio is locked, it's dark out, and we're wearing + * sunglasses. Hit it. + */ + folio_move_anon_rmap(folio, vma); + folio_unlock(folio); + return true; +} + /* * This routine handles present pages, when * * users try to write to a shared page (FAULT_FLAG_WRITE) @@ -3570,49 +3608,14 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) /* * Private mapping: create an exclusive anonymous page copy if reuse * is impossible. We might miss VM_WRITE for FOLL_FORCE handling. + * + * If we encounter a page that is marked exclusive, we must reuse + * the page without further checks. */ - if (folio && folio_test_anon(folio)) { - /* - * If the page is exclusive to this process we must reuse the - * page without further checks. - */ - if (PageAnonExclusive(vmf->page)) - goto reuse; - - /* - * We have to verify under folio lock: these early checks are - * just an optimization to avoid locking the folio and freeing - * the swapcache if there is little hope that we can reuse. - * - * KSM doesn't necessarily raise the folio refcount. - */ - if (folio_test_ksm(folio) || folio_ref_count(folio) > 3) - goto copy; - if (!folio_test_lru(folio)) - /* - * We cannot easily detect+handle references from - * remote LRU caches or references to LRU folios. - */ - lru_add_drain(); - if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio)) - goto copy; - if (!folio_trylock(folio)) - goto copy; - if (folio_test_swapcache(folio)) - folio_free_swap(folio); - if (folio_test_ksm(folio) || folio_ref_count(folio) != 1) { - folio_unlock(folio); - goto copy; - } - /* - * Ok, we've got the only folio reference from our mapping - * and the folio is locked, it's dark out, and we're wearing - * sunglasses. Hit it. - */ - folio_move_anon_rmap(folio, vma); - SetPageAnonExclusive(vmf->page); - folio_unlock(folio); -reuse: + if (folio && folio_test_anon(folio) && + (PageAnonExclusive(vmf->page) || wp_can_reuse_anon_folio(folio, vma))) { + if (!PageAnonExclusive(vmf->page)) + SetPageAnonExclusive(vmf->page); if (unlikely(unshare)) { pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; @@ -3620,7 +3623,7 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) wp_page_reuse(vmf); return 0; } -copy: + if ((vmf->flags & FAULT_FLAG_VMA_LOCK) && !vma->anon_vma) { pte_unmap_unlock(vmf->pte, vmf->ptl); vma_end_read(vmf->vma); -- Gitee From 0752a10348d63d639190bbc0365bc09c641d92c3 Mon Sep 17 00:00:00 2001 From: Zach O'Keefe Date: Mon, 25 Sep 2023 13:01:10 -0700 Subject: [PATCH 1108/2138] mm/thp: fix "mm: thp: kill __transhuge_page_enabled()" ANBZ: #9728 commit 7a81751fcdeb833acc858e59082688e3020bfe12 upstream The 6.0 commits: commit 9fec51689ff6 ("mm: thp: kill transparent_hugepage_active()") commit 7da4e2cb8b1f ("mm: thp: kill __transhuge_page_enabled()") merged "can we have THPs in this VMA?" logic that was previously done separately by fault-path, khugepaged, and smaps "THPeligible" checks. During the process, the semantics of the fault path check changed in two ways: 1) A VM_NO_KHUGEPAGED check was introduced (also added to smaps path). 2) We no longer checked if non-anonymous memory had a vm_ops->huge_fault handler that could satisfy the fault. Previously, this check had been done in create_huge_pud() and create_huge_pmd() routines, but after the changes, we never reach those routines. During the review of the above commits, it was determined that in-tree users weren't affected by the change; most notably, since the only relevant user (in terms of THP) of VM_MIXEDMAP or ->huge_fault is DAX, which is explicitly approved early in approval logic. However, this was a bad assumption to make as it assumes the only reason to support ->huge_fault was for DAX (which is not true in general). Remove the VM_NO_KHUGEPAGED check when not in collapse path and give any ->huge_fault handler a chance to handle the fault. Note that we don't validate the file mode or mapping alignment, which is consistent with the behavior before the aforementioned commits. Link: https://lkml.kernel.org/r/20230925200110.1979606-1-zokeefe@google.com Fixes: 7da4e2cb8b1f ("mm: thp: kill __transhuge_page_enabled()") Reported-by: Saurabh Singh Sengar Signed-off-by: Zach O'Keefe Cc: Yang Shi Cc: Matthew Wilcox Cc: David Hildenbrand Cc: Ryan Roberts Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3683 --- mm/huge_memory.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index cfc21c2603cc..0830d0a177ad 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -86,11 +86,11 @@ bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags, return in_pf; /* - * Special VMA and hugetlb VMA. + * khugepaged special VMA and hugetlb VMA. * Must be checked after dax since some dax mappings may have * VM_MIXEDMAP set. */ - if (vm_flags & VM_NO_KHUGEPAGED) + if (!in_pf && !smaps && (vm_flags & VM_NO_KHUGEPAGED)) return false; /* @@ -118,12 +118,18 @@ bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags, !hugepage_flags_always()))) return false; - /* Only regular file is valid */ - if (!in_pf && file_thp_enabled(vma)) - return true; - - if (!vma_is_anonymous(vma)) + if (!vma_is_anonymous(vma)) { + /* + * Trust that ->huge_fault() handlers know what they are doing + * in fault path. + */ + if (((in_pf || smaps)) && vma->vm_ops->huge_fault) + return true; + /* Only regular file is valid in collapse path */ + if (((!in_pf || smaps)) && file_thp_enabled(vma)) + return true; return false; + } if (vma_is_temporary_stack(vma)) return false; -- Gitee From cf3563ee09af5513555a11026eff3923600d964c Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Thu, 7 Dec 2023 16:12:02 +0000 Subject: [PATCH 1109/2138] mm: allow deferred splitting of arbitrary anon large folios ANBZ: #9728 commit 7dc7c5ef6463111991002f24c0aea08afe86f2cc upstream Patch series "Multi-size THP for anonymous memory", v9. A series to implement multi-size THP (mTHP) for anonymous memory (previously called "small-sized THP" and "large anonymous folios"). The objective of this is to improve performance by allocating larger chunks of memory during anonymous page faults: 1) Since SW (the kernel) is dealing with larger chunks of memory than base pages, there are efficiency savings to be had; fewer page faults, batched PTE and RMAP manipulation, reduced lru list, etc. In short, we reduce kernel overhead. This should benefit all architectures. 2) Since we are now mapping physically contiguous chunks of memory, we can take advantage of HW TLB compression techniques. A reduction in TLB pressure speeds up kernel and user space. arm64 systems have 2 mechanisms to coalesce TLB entries; "the contiguous bit" (architectural) and HPA (uarch). This version incorporates David's feedback on the core patches (#3, #4) and adds some RB and TB tags (see change log for details). By default, the existing behaviour (and performance) is maintained. The user must explicitly enable multi-size THP to see the performance benefit. This is done via a new sysfs interface (as recommended by David Hildenbrand - thanks to David for the suggestion)! This interface is inspired by the existing per-hugepage-size sysfs interface used by hugetlb, provides full backwards compatibility with the existing PMD-size THP interface, and provides a base for future extensibility. See [9] for detailed discussion of the interface. This series is based on mm-unstable (715b67adf4c8). Prerequisites ============= I'm removing this section on the basis that I don't believe what we were previously calling prerequisites are really prerequisites anymore. We originally defined them when mTHP was a compile-time feature. There is now a runtime control to opt-in to mTHP; when disabled, correctness and performance are as before. When enabled, the code is still correct/robust, but in the absence of the one remaining item (compaction) there may be a performance impact in some corners. See the old list in the v8 cover letter at [8]. And a longer explanation of my thinking here [10]. SUMMARY: I don't think we should hold this series up, waiting for the items on the prerequisites list. I believe this series should be ready now so hopefully can be added to mm-unstable for some testing, then fingers crossed for v6.8. Testing ======= The series includes patches for mm selftests to enlighten the cow and khugepaged tests to explicitly test with multi-size THP, in the same way that PMD-sized THP is tested. The new tests all pass, and no regressions are observed in the mm selftest suite. I've also run my usual kernel compilation and java script benchmarks without any issues. Refer to my performance numbers posted with v6 [6]. (These are for multi-size THP only - they do not include the arm64 contpte follow-on series). John Hubbard at Nvidia has indicated dramatic 10x performance improvements for some workloads at [11]. (Observed using v6 of this series as well as the arm64 contpte series). Kefeng Wang at Huawei has also indicated he sees improvements at [12] although there are some latency regressions also. I've also checked that there is no regression in the write fault path when mTHP is disabled using a microbenchmark. I ran it for a baseline kernel, as well as v8 and v9. I repeated on Ampere Altra (bare metal) and Apple M2 (VM): | | m2 vm | altra | |--------------|---------------------|---------------------| | kernel | mean | std_rel | mean | std_rel | |--------------|----------|----------|----------|----------| | baseline | 0.000% | 0.341% | 0.000% | 3.581% | | anonfolio-v8 | 0.005% | 0.272% | 5.068% | 1.128% | | anonfolio-v9 | -0.013% | 0.442% | 0.107% | 1.788% | There is no measurable difference on M2, but altra has a slow down in v8 which is fixed in v9 by moving the THP order check to be inline within thp_vma_allowable_orders(), as suggested by David. This patch (of 10): In preparation for the introduction of anonymous multi-size THP, we would like to be able to split them when they have unmapped subpages, in order to free those unused pages under memory pressure. So remove the artificial requirement that the large folio needed to be at least PMD-sized. Link: https://lkml.kernel.org/r/20231207161211.2374093-1-ryan.roberts@arm.com Link: https://lkml.kernel.org/r/20231207161211.2374093-2-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Reviewed-by: Yu Zhao Reviewed-by: Yin Fengwei Reviewed-by: Matthew Wilcox (Oracle) Reviewed-by: David Hildenbrand Reviewed-by: Barry Song Tested-by: Kefeng Wang Tested-by: John Hubbard Cc: Alistair Popple Cc: Anshuman Khandual Cc: Catalin Marinas Cc: David Rientjes Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Itaru Kitayama Cc: Kirill A. Shutemov Cc: Luis Chamberlain Cc: Vlastimil Babka Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3683 --- mm/rmap.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/rmap.c b/mm/rmap.c index 7a27a2b41802..49e4d86a4f70 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1488,11 +1488,11 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma, __lruvec_stat_mod_folio(folio, idx, -nr); /* - * Queue anon THP for deferred split if at least one + * Queue anon large folio for deferred split if at least one * page of the folio is unmapped and at least one page * is still mapped. */ - if (folio_test_pmd_mappable(folio) && folio_test_anon(folio)) + if (folio_test_large(folio) && folio_test_anon(folio)) if (!compound || nr < nr_pmdmapped) deferred_split_folio(folio); } -- Gitee From be292bbea7449419e32570e28690ddda41ed493e Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Thu, 7 Dec 2023 16:12:03 +0000 Subject: [PATCH 1110/2138] mm: non-pmd-mappable, large folios for folio_add_new_anon_rmap() ANBZ: #9728 commit 372cbd4d5a0665bf7e181c72f5e40e1bf59b0b08 upstream In preparation for supporting anonymous multi-size THP, improve folio_add_new_anon_rmap() to allow a non-pmd-mappable, large folio to be passed to it. In this case, all contained pages are accounted using the order-0 folio (or base page) scheme. Link: https://lkml.kernel.org/r/20231207161211.2374093-3-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Reviewed-by: Yu Zhao Reviewed-by: Yin Fengwei Reviewed-by: David Hildenbrand Reviewed-by: Barry Song Tested-by: Kefeng Wang Tested-by: John Hubbard Cc: Alistair Popple Cc: Anshuman Khandual Cc: Catalin Marinas Cc: David Rientjes Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Itaru Kitayama Cc: Kirill A. Shutemov Cc: Luis Chamberlain Cc: Matthew Wilcox (Oracle) Cc: Vlastimil Babka Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3683 --- mm/rmap.c | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/mm/rmap.c b/mm/rmap.c index 49e4d86a4f70..b086dc957b0c 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1305,32 +1305,44 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, * This means the inc-and-test can be bypassed. * The folio does not have to be locked. * - * If the folio is large, it is accounted as a THP. As the folio + * If the folio is pmd-mappable, it is accounted as a THP. As the folio * is new, it's assumed to be mapped exclusively by a single process. */ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, unsigned long address) { - int nr; + int nr = folio_nr_pages(folio); - VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); + VM_BUG_ON_VMA(address < vma->vm_start || + address + (nr << PAGE_SHIFT) > vma->vm_end, vma); __folio_set_swapbacked(folio); + __folio_set_anon(folio, vma, address, true); - if (likely(!folio_test_pmd_mappable(folio))) { + if (likely(!folio_test_large(folio))) { /* increment count (starts at -1) */ atomic_set(&folio->_mapcount, 0); - nr = 1; + SetPageAnonExclusive(&folio->page); + } else if (!folio_test_pmd_mappable(folio)) { + int i; + + for (i = 0; i < nr; i++) { + struct page *page = folio_page(folio, i); + + /* increment count (starts at -1) */ + atomic_set(&page->_mapcount, 0); + SetPageAnonExclusive(page); + } + + atomic_set(&folio->_nr_pages_mapped, nr); } else { /* increment count (starts at -1) */ atomic_set(&folio->_entire_mapcount, 0); atomic_set(&folio->_nr_pages_mapped, COMPOUND_MAPPED); - nr = folio_nr_pages(folio); + SetPageAnonExclusive(&folio->page); __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr); } __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); - __folio_set_anon(folio, vma, address, true); - SetPageAnonExclusive(&folio->page); } /** -- Gitee From 274568c0cc051fd92f761752a7c41585b11ea44a Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Thu, 7 Dec 2023 16:12:04 +0000 Subject: [PATCH 1111/2138] mm: thp: introduce multi-size THP sysfs interface ANBZ: #9728 commit 3485b88390b0af9e05dc2c3f57e9936f41e159a0 upstream In preparation for adding support for anonymous multi-size THP, introduce new sysfs structure that will be used to control the new behaviours. A new directory is added under transparent_hugepage for each supported THP size, and contains an `enabled` file, which can be set to "inherit" (to inherit the global setting), "always", "madvise" or "never". For now, the kernel still only supports PMD-sized anonymous THP, so only 1 directory is populated. The first half of the change converts transhuge_vma_suitable() and hugepage_vma_check() so that they take a bitfield of orders for which the user wants to determine support, and the functions filter out all the orders that can't be supported, given the current sysfs configuration and the VMA dimensions. The resulting functions are renamed to thp_vma_suitable_orders() and thp_vma_allowable_orders() respectively. Convenience functions that take a single, unencoded order and return a boolean are also defined as thp_vma_suitable_order() and thp_vma_allowable_order(). The second half of the change implements the new sysfs interface. It has been done so that each supported THP size has a `struct thpsize`, which describes the relevant metadata and is itself a kobject. This is pretty minimal for now, but should make it easy to add new per-thpsize files to the interface if needed in future (e.g. per-size defrag). Rather than keep the `enabled` state directly in the struct thpsize, I've elected to directly encode it into huge_anon_orders_[always|madvise|inherit] bitfields since this reduces the amount of work required in thp_vma_allowable_orders() which is called for every page fault. See Documentation/admin-guide/mm/transhuge.rst, as modified by this commit, for details of how the new sysfs interface works. [ryan.roberts@arm.com: fix build warning when CONFIG_SYSFS is disabled] Link: https://lkml.kernel.org/r/20231211125320.3997543-1-ryan.roberts@arm.com Link: https://lkml.kernel.org/r/20231207161211.2374093-4-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Reviewed-by: Barry Song Tested-by: Kefeng Wang Tested-by: John Hubbard Acked-by: David Hildenbrand Cc: Alistair Popple Cc: Anshuman Khandual Cc: Catalin Marinas Cc: David Rientjes Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Itaru Kitayama Cc: Kirill A. Shutemov Cc: Luis Chamberlain Cc: Matthew Wilcox (Oracle) Cc: Vlastimil Babka Cc: Yang Shi Cc: Yin Fengwei Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3683 --- Documentation/admin-guide/mm/transhuge.rst | 97 +++++++-- Documentation/filesystems/proc.rst | 6 +- fs/proc/task_mmu.c | 3 +- include/linux/huge_mm.h | 181 +++++++++++++--- mm/huge_memory.c | 227 ++++++++++++++++++--- mm/khugepaged.c | 20 +- mm/memory.c | 6 +- mm/page_vma_mapped.c | 3 +- 8 files changed, 457 insertions(+), 86 deletions(-) diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index b0cc8243e093..04eb45a2f940 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -45,10 +45,25 @@ components: the two is using hugepages just because of the fact the TLB miss is going to run faster. +Modern kernels support "multi-size THP" (mTHP), which introduces the +ability to allocate memory in blocks that are bigger than a base page +but smaller than traditional PMD-size (as described above), in +increments of a power-of-2 number of pages. mTHP can back anonymous +memory (for example 16K, 32K, 64K, etc). These THPs continue to be +PTE-mapped, but in many cases can still provide similar benefits to +those outlined above: Page faults are significantly reduced (by a +factor of e.g. 4, 8, 16, etc), but latency spikes are much less +prominent because the size of each page isn't as huge as the PMD-sized +variant and there is less memory to clear in each page fault. Some +architectures also employ TLB compression mechanisms to squeeze more +entries in when a set of PTEs are virtually and physically contiguous +and approporiately aligned. In this case, TLB misses will occur less +often. + THP can be enabled system wide or restricted to certain tasks or even memory ranges inside task's address space. Unless THP is completely disabled, there is ``khugepaged`` daemon that scans memory and -collapses sequences of basic pages into huge pages. +collapses sequences of basic pages into PMD-sized huge pages. The THP behaviour is controlled via :ref:`sysfs ` interface and using madvise(2) and prctl(2) system calls. @@ -95,12 +110,40 @@ Global THP controls Transparent Hugepage Support for anonymous memory can be entirely disabled (mostly for debugging purposes) or only enabled inside MADV_HUGEPAGE regions (to avoid the risk of consuming more memory resources) or enabled -system wide. This can be achieved with one of:: +system wide. This can be achieved per-supported-THP-size with one of:: + + echo always >/sys/kernel/mm/transparent_hugepage/hugepages-kB/enabled + echo madvise >/sys/kernel/mm/transparent_hugepage/hugepages-kB/enabled + echo never >/sys/kernel/mm/transparent_hugepage/hugepages-kB/enabled + +where is the hugepage size being addressed, the available sizes +for which vary by system. + +For example:: + + echo always >/sys/kernel/mm/transparent_hugepage/hugepages-2048kB/enabled + +Alternatively it is possible to specify that a given hugepage size +will inherit the top-level "enabled" value:: + + echo inherit >/sys/kernel/mm/transparent_hugepage/hugepages-kB/enabled + +For example:: + + echo inherit >/sys/kernel/mm/transparent_hugepage/hugepages-2048kB/enabled + +The top-level setting (for use with "inherit") can be set by issuing +one of the following commands:: echo always >/sys/kernel/mm/transparent_hugepage/enabled echo madvise >/sys/kernel/mm/transparent_hugepage/enabled echo never >/sys/kernel/mm/transparent_hugepage/enabled +By default, PMD-sized hugepages have enabled="inherit" and all other +hugepage sizes have enabled="never". If enabling multiple hugepage +sizes, the kernel will select the most appropriate enabled size for a +given allocation. + It's also possible to limit defrag efforts in the VM to generate anonymous hugepages in case they're not immediately free to madvise regions or to never try to defrag memory and simply fallback to regular @@ -146,25 +189,34 @@ madvise never should be self-explanatory. -By default kernel tries to use huge zero page on read page fault to -anonymous mapping. It's possible to disable huge zero page by writing 0 -or enable it back by writing 1:: +By default kernel tries to use huge, PMD-mappable zero page on read +page fault to anonymous mapping. It's possible to disable huge zero +page by writing 0 or enable it back by writing 1:: echo 0 >/sys/kernel/mm/transparent_hugepage/use_zero_page echo 1 >/sys/kernel/mm/transparent_hugepage/use_zero_page -Some userspace (such as a test program, or an optimized memory allocation -library) may want to know the size (in bytes) of a transparent hugepage:: +Some userspace (such as a test program, or an optimized memory +allocation library) may want to know the size (in bytes) of a +PMD-mappable transparent hugepage:: cat /sys/kernel/mm/transparent_hugepage/hpage_pmd_size -khugepaged will be automatically started when -transparent_hugepage/enabled is set to "always" or "madvise, and it'll -be automatically shutdown if it's set to "never". +khugepaged will be automatically started when one or more hugepage +sizes are enabled (either by directly setting "always" or "madvise", +or by setting "inherit" while the top-level enabled is set to "always" +or "madvise"), and it'll be automatically shutdown when the last +hugepage size is disabled (either by directly setting "never", or by +setting "inherit" while the top-level enabled is set to "never"). Khugepaged controls ------------------- +.. note:: + khugepaged currently only searches for opportunities to collapse to + PMD-sized THP and no attempt is made to collapse to other THP + sizes. + khugepaged runs usually at low frequency so while one may not want to invoke defrag algorithms synchronously during the page faults, it should be worth invoking defrag at least in khugepaged. However it's @@ -282,19 +334,26 @@ force Need of application restart =========================== -The transparent_hugepage/enabled values and tmpfs mount option only affect -future behavior. So to make them effective you need to restart any -application that could have been using hugepages. This also applies to the -regions registered in khugepaged. +The transparent_hugepage/enabled and +transparent_hugepage/hugepages-kB/enabled values and tmpfs mount +option only affect future behavior. So to make them effective you need +to restart any application that could have been using hugepages. This +also applies to the regions registered in khugepaged. Monitoring usage ================ -The number of anonymous transparent huge pages currently used by the +.. note:: + Currently the below counters only record events relating to + PMD-sized THP. Events relating to other THP sizes are not included. + +The number of PMD-sized anonymous transparent huge pages currently used by the system is available by reading the AnonHugePages field in ``/proc/meminfo``. -To identify what applications are using anonymous transparent huge pages, -it is necessary to read ``/proc/PID/smaps`` and count the AnonHugePages fields -for each mapping. +To identify what applications are using PMD-sized anonymous transparent huge +pages, it is necessary to read ``/proc/PID/smaps`` and count the AnonHugePages +fields for each mapping. (Note that AnonHugePages only applies to traditional +PMD-sized THP for historical reasons and should have been called +AnonHugePmdMapped). The number of file transparent huge pages mapped to userspace is available by reading ShmemPmdMapped and ShmemHugePages fields in ``/proc/meminfo``. @@ -413,7 +472,7 @@ for huge pages. Optimizing the applications =========================== -To be guaranteed that the kernel will map a 2M page immediately in any +To be guaranteed that the kernel will map a THP immediately in any memory region, the mmap region has to be hugepage naturally aligned. posix_memalign() can provide that guarantee. diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst index 2b59cff8be17..6652b658ee77 100644 --- a/Documentation/filesystems/proc.rst +++ b/Documentation/filesystems/proc.rst @@ -528,9 +528,9 @@ replaced by copy-on-write) part of the underlying shmem object out on swap. does not take into account swapped out page of underlying shmem objects. "Locked" indicates whether the mapping is locked in memory or not. -"THPeligible" indicates whether the mapping is eligible for allocating THP -pages as well as the THP is PMD mappable or not - 1 if true, 0 otherwise. -It just shows the current status. +"THPeligible" indicates whether the mapping is eligible for allocating +naturally aligned THP pages of any currently enabled size. 1 if true, 0 +otherwise. "VmFlags" field deserves a separate description. This member represents the kernel flags associated with the particular virtual memory area in two letter diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index b8640f36ebf8..5a081bad7782 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -865,7 +865,8 @@ static int show_smap(struct seq_file *m, void *v) __show_smap(m, &mss, false); seq_printf(m, "THPeligible: %8u\n", - hugepage_vma_check(vma, vma->vm_flags, true, false, true)); + !!thp_vma_allowable_orders(vma, vma->vm_flags, true, false, + true, THP_ORDERS_ALL)); if (arch_pkeys_enabled()) seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma)); diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index fc789c0ac85b..866ea42b40ba 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -67,6 +67,24 @@ extern struct kobj_attribute shmem_enabled_attr; #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) #define HPAGE_PMD_NR (1<vm_start >> PAGE_SHIFT) - vma->vm_pgoff, - HPAGE_PMD_NR)) + hpage_size >> PAGE_SHIFT)) return false; } - haddr = addr & HPAGE_PMD_MASK; + haddr = ALIGN_DOWN(addr, hpage_size); - if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) + if (haddr < vma->vm_start || haddr + hpage_size > vma->vm_end) return false; return true; } +/* + * Filter the bitfield of input orders to the ones suitable for use in the vma. + * See thp_vma_suitable_order(). + * All orders that pass the checks are returned as a bitfield. + */ +static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma, + unsigned long addr, unsigned long orders) +{ + int order; + + /* + * Iterate over orders, highest to lowest, removing orders that don't + * meet alignment requirements from the set. Exit loop at first order + * that meets requirements, since all lower orders must also meet + * requirements. + */ + + order = highest_order(orders); + + while (orders) { + if (thp_vma_suitable_order(vma, addr, order)) + break; + order = next_order(&orders, order); + } + + return orders; +} + static inline bool file_thp_enabled(struct vm_area_struct *vma) { struct inode *inode; @@ -130,8 +208,52 @@ static inline bool file_thp_enabled(struct vm_area_struct *vma) !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode); } -bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags, - bool smaps, bool in_pf, bool enforce_sysfs); +unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, + unsigned long vm_flags, bool smaps, + bool in_pf, bool enforce_sysfs, + unsigned long orders); + +/** + * thp_vma_allowable_orders - determine hugepage orders that are allowed for vma + * @vma: the vm area to check + * @vm_flags: use these vm_flags instead of vma->vm_flags + * @smaps: whether answer will be used for smaps file + * @in_pf: whether answer will be used by page fault handler + * @enforce_sysfs: whether sysfs config should be taken into account + * @orders: bitfield of all orders to consider + * + * Calculates the intersection of the requested hugepage orders and the allowed + * hugepage orders for the provided vma. Permitted orders are encoded as a set + * bit at the corresponding bit position (bit-2 corresponds to order-2, bit-3 + * corresponds to order-3, etc). Order-0 is never considered a hugepage order. + * + * Return: bitfield of orders allowed for hugepage in the vma. 0 if no hugepage + * orders are allowed. + */ +static inline +unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma, + unsigned long vm_flags, bool smaps, + bool in_pf, bool enforce_sysfs, + unsigned long orders) +{ + /* Optimization to check if required orders are enabled early. */ + if (enforce_sysfs && vma_is_anonymous(vma)) { + unsigned long mask = READ_ONCE(huge_anon_orders_always); + + if (vm_flags & VM_HUGEPAGE) + mask |= READ_ONCE(huge_anon_orders_madvise); + if (hugepage_global_always() || + ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled())) + mask |= READ_ONCE(huge_anon_orders_inherit); + + orders &= mask; + if (!orders) + return 0; + } + + return __thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, + enforce_sysfs, orders); +} #define transparent_hugepage_use_zero_page() \ (transparent_hugepage_flags & \ @@ -285,17 +407,24 @@ static inline bool folio_test_pmd_mappable(struct folio *folio) return false; } -static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, - unsigned long addr) +static inline bool thp_vma_suitable_order(struct vm_area_struct *vma, + unsigned long addr, int order) { return false; } -static inline bool hugepage_vma_check(struct vm_area_struct *vma, - unsigned long vm_flags, bool smaps, - bool in_pf, bool enforce_sysfs) +static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma, + unsigned long addr, unsigned long orders) { - return false; + return 0; +} + +static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma, + unsigned long vm_flags, bool smaps, + bool in_pf, bool enforce_sysfs, + unsigned long orders) +{ + return 0; } static inline void folio_prep_large_rmappable(struct folio *folio) {} diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 0830d0a177ad..af561f91bb7d 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -71,19 +71,30 @@ static struct shrinker deferred_split_shrinker; static atomic_t huge_zero_refcount; struct page *huge_zero_page __read_mostly; unsigned long huge_zero_pfn __read_mostly = ~0UL; +unsigned long huge_anon_orders_always __read_mostly; +unsigned long huge_anon_orders_madvise __read_mostly; +unsigned long huge_anon_orders_inherit __read_mostly; + +unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, + unsigned long vm_flags, bool smaps, + bool in_pf, bool enforce_sysfs, + unsigned long orders) +{ + /* Check the intersection of requested and supported orders. */ + orders &= vma_is_anonymous(vma) ? + THP_ORDERS_ALL_ANON : THP_ORDERS_ALL_FILE; + if (!orders) + return 0; -bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags, - bool smaps, bool in_pf, bool enforce_sysfs) -{ if (!vma->vm_mm) /* vdso */ - return false; + return 0; if (thp_disabled_by_hw() || vma_thp_disabled(vma, vm_flags)) return false; /* khugepaged doesn't collapse DAX vma, but page fault is fine. */ if (vma_is_dax(vma)) - return in_pf; + return in_pf ? orders : 0; /* * khugepaged special VMA and hugetlb VMA. @@ -91,17 +102,29 @@ bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags, * VM_MIXEDMAP set. */ if (!in_pf && !smaps && (vm_flags & VM_NO_KHUGEPAGED)) - return false; + return 0; /* - * Check alignment for file vma and size for both file and anon vma. + * Check alignment for file vma and size for both file and anon vma by + * filtering out the unsuitable orders. * * Skip the check for page fault. Huge fault does the check in fault - * handlers. And this check is not suitable for huge PUD fault. + * handlers. */ - if (!in_pf && - !transhuge_vma_suitable(vma, (vma->vm_end - HPAGE_PMD_SIZE))) - return false; + if (!in_pf) { + int order = highest_order(orders); + unsigned long addr; + + while (orders) { + addr = vma->vm_end - (PAGE_SIZE << order); + if (thp_vma_suitable_order(vma, addr, order)) + break; + order = next_order(&orders, order); + } + + if (!orders) + return 0; + } /* * Enabled via shmem mount options or sysfs settings. @@ -110,29 +133,33 @@ bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags, */ if (!in_pf && shmem_file(vma->vm_file)) return shmem_is_huge(file_inode(vma->vm_file), vma->vm_pgoff, - !enforce_sysfs, vma->vm_mm, vm_flags); - - /* Enforce sysfs THP requirements as necessary */ - if (enforce_sysfs && - (!hugepage_flags_enabled() || (!(vm_flags & VM_HUGEPAGE) && - !hugepage_flags_always()))) - return false; + !enforce_sysfs, vma->vm_mm, vm_flags) + ? orders : 0; if (!vma_is_anonymous(vma)) { + /* + * Enforce sysfs THP requirements as necessary. Anonymous vmas + * were already handled in thp_vma_allowable_orders(). + */ + if (enforce_sysfs && + (!hugepage_global_enabled() || (!(vm_flags & VM_HUGEPAGE) && + !hugepage_global_always()))) + return 0; + /* * Trust that ->huge_fault() handlers know what they are doing * in fault path. */ if (((in_pf || smaps)) && vma->vm_ops->huge_fault) - return true; + return orders; /* Only regular file is valid in collapse path */ if (((!in_pf || smaps)) && file_thp_enabled(vma)) - return true; - return false; + return orders; + return 0; } if (vma_is_temporary_stack(vma)) - return false; + return 0; /* * THPeligible bit of smaps should show 1 for proper VMAs even @@ -142,9 +169,9 @@ bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags, * the first page fault. */ if (!vma->anon_vma) - return (smaps || in_pf); + return (smaps || in_pf) ? orders : 0; - return true; + return orders; } static bool get_huge_zero_page(void) @@ -402,9 +429,136 @@ static const struct attribute_group hugepage_attr_group = { .attrs = hugepage_attr, }; +static void hugepage_exit_sysfs(struct kobject *hugepage_kobj); +static void thpsize_release(struct kobject *kobj); +static DEFINE_SPINLOCK(huge_anon_orders_lock); +static LIST_HEAD(thpsize_list); + +struct thpsize { + struct kobject kobj; + struct list_head node; + int order; +}; + +#define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj) + +static ssize_t thpsize_enabled_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + int order = to_thpsize(kobj)->order; + const char *output; + + if (test_bit(order, &huge_anon_orders_always)) + output = "[always] inherit madvise never"; + else if (test_bit(order, &huge_anon_orders_inherit)) + output = "always [inherit] madvise never"; + else if (test_bit(order, &huge_anon_orders_madvise)) + output = "always inherit [madvise] never"; + else + output = "always inherit madvise [never]"; + + return sysfs_emit(buf, "%s\n", output); +} + +static ssize_t thpsize_enabled_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int order = to_thpsize(kobj)->order; + ssize_t ret = count; + + if (sysfs_streq(buf, "always")) { + spin_lock(&huge_anon_orders_lock); + clear_bit(order, &huge_anon_orders_inherit); + clear_bit(order, &huge_anon_orders_madvise); + set_bit(order, &huge_anon_orders_always); + spin_unlock(&huge_anon_orders_lock); + } else if (sysfs_streq(buf, "inherit")) { + spin_lock(&huge_anon_orders_lock); + clear_bit(order, &huge_anon_orders_always); + clear_bit(order, &huge_anon_orders_madvise); + set_bit(order, &huge_anon_orders_inherit); + spin_unlock(&huge_anon_orders_lock); + } else if (sysfs_streq(buf, "madvise")) { + spin_lock(&huge_anon_orders_lock); + clear_bit(order, &huge_anon_orders_always); + clear_bit(order, &huge_anon_orders_inherit); + set_bit(order, &huge_anon_orders_madvise); + spin_unlock(&huge_anon_orders_lock); + } else if (sysfs_streq(buf, "never")) { + spin_lock(&huge_anon_orders_lock); + clear_bit(order, &huge_anon_orders_always); + clear_bit(order, &huge_anon_orders_inherit); + clear_bit(order, &huge_anon_orders_madvise); + spin_unlock(&huge_anon_orders_lock); + } else + ret = -EINVAL; + + return ret; +} + +static struct kobj_attribute thpsize_enabled_attr = + __ATTR(enabled, 0644, thpsize_enabled_show, thpsize_enabled_store); + +static struct attribute *thpsize_attrs[] = { + &thpsize_enabled_attr.attr, + NULL, +}; + +static const struct attribute_group thpsize_attr_group = { + .attrs = thpsize_attrs, +}; + +static const struct kobj_type thpsize_ktype = { + .release = &thpsize_release, + .sysfs_ops = &kobj_sysfs_ops, +}; + +static struct thpsize *thpsize_create(int order, struct kobject *parent) +{ + unsigned long size = (PAGE_SIZE << order) / SZ_1K; + struct thpsize *thpsize; + int ret; + + thpsize = kzalloc(sizeof(*thpsize), GFP_KERNEL); + if (!thpsize) + return ERR_PTR(-ENOMEM); + + ret = kobject_init_and_add(&thpsize->kobj, &thpsize_ktype, parent, + "hugepages-%lukB", size); + if (ret) { + kfree(thpsize); + return ERR_PTR(ret); + } + + ret = sysfs_create_group(&thpsize->kobj, &thpsize_attr_group); + if (ret) { + kobject_put(&thpsize->kobj); + return ERR_PTR(ret); + } + + thpsize->order = order; + return thpsize; +} + +static void thpsize_release(struct kobject *kobj) +{ + kfree(to_thpsize(kobj)); +} + static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) { int err; + struct thpsize *thpsize; + unsigned long orders; + int order; + + /* + * Default to setting PMD-sized THP to inherit the global setting and + * disable all other sizes. powerpc's PMD_ORDER isn't a compile-time + * constant so we have to do this here. + */ + huge_anon_orders_inherit = BIT(PMD_ORDER); *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); if (unlikely(!*hugepage_kobj)) { @@ -424,8 +578,24 @@ static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) goto remove_hp_group; } + orders = THP_ORDERS_ALL_ANON; + order = highest_order(orders); + while (orders) { + thpsize = thpsize_create(order, *hugepage_kobj); + if (IS_ERR(thpsize)) { + pr_err("failed to create thpsize for order %d\n", order); + err = PTR_ERR(thpsize); + goto remove_all; + } + list_add(&thpsize->node, &thpsize_list); + order = next_order(&orders, order); + } + return 0; +remove_all: + hugepage_exit_sysfs(*hugepage_kobj); + return err; remove_hp_group: sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group); delete_obj: @@ -435,6 +605,13 @@ static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj) { + struct thpsize *thpsize, *tmp; + + list_for_each_entry_safe(thpsize, tmp, &thpsize_list, node) { + list_del(&thpsize->node); + kobject_put(&thpsize->kobj); + } + sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group); sysfs_remove_group(hugepage_kobj, &hugepage_attr_group); kobject_put(hugepage_kobj); @@ -777,7 +954,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) struct folio *folio; unsigned long haddr = vmf->address & HPAGE_PMD_MASK; - if (!transhuge_vma_suitable(vma, haddr)) + if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER)) return VM_FAULT_FALLBACK; if (unlikely(anon_vma_prepare(vma))) return VM_FAULT_OOM; diff --git a/mm/khugepaged.c b/mm/khugepaged.c index a87cfe1d4b7b..927c7295d4cb 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -446,7 +446,8 @@ void khugepaged_enter_vma(struct vm_area_struct *vma, { if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) && hugepage_flags_enabled()) { - if (hugepage_vma_check(vma, vm_flags, false, false, true)) + if (thp_vma_allowable_order(vma, vm_flags, false, false, true, + PMD_ORDER)) __khugepaged_enter(vma->vm_mm); } } @@ -907,16 +908,16 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, if (!vma) return SCAN_VMA_NULL; - if (!transhuge_vma_suitable(vma, address)) + if (!thp_vma_suitable_order(vma, address, PMD_ORDER)) return SCAN_ADDRESS_RANGE; - if (!hugepage_vma_check(vma, vma->vm_flags, false, false, - cc->is_khugepaged)) + if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, + cc->is_khugepaged, PMD_ORDER)) return SCAN_VMA_CHECK; /* * Anon VMA expected, the address may be unmapped then * remapped to file after khugepaged reaquired the mmap_lock. * - * hugepage_vma_check may return true for qualified file + * thp_vma_allowable_order may return true for qualified file * vmas. */ if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap))) @@ -1492,7 +1493,8 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, * and map it by a PMD, regardless of sysfs THP settings. As such, let's * analogously elide sysfs THP settings here. */ - if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false)) + if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false, + PMD_ORDER)) return SCAN_VMA_CHECK; /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */ @@ -2364,7 +2366,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result, progress++; break; } - if (!hugepage_vma_check(vma, vma->vm_flags, false, false, true)) { + if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, + true, PMD_ORDER)) { skip: progress++; continue; @@ -2701,7 +2704,8 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev, *prev = vma; - if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false)) + if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false, + PMD_ORDER)) return -EINVAL; cc = kmalloc(sizeof(*cc), GFP_KERNEL); diff --git a/mm/memory.c b/mm/memory.c index 92992fe828a6..619caaa27531 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4477,7 +4477,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) if (thp_disabled_by_hw() || vma_thp_disabled(vma, vma->vm_flags)) return ret; - if (!transhuge_vma_suitable(vma, haddr)) + if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER)) return ret; page = compound_head(page); @@ -5275,7 +5275,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, return VM_FAULT_OOM; retry_pud: if (pud_none(*vmf.pud) && - hugepage_vma_check(vma, vm_flags, false, true, true)) { + thp_vma_allowable_order(vma, vm_flags, false, true, true, PUD_ORDER)) { ret = create_huge_pud(&vmf); if (!(ret & VM_FAULT_FALLBACK)) return ret; @@ -5309,7 +5309,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, goto retry_pud; if (pmd_none(*vmf.pmd) && - hugepage_vma_check(vma, vm_flags, false, true, true)) { + thp_vma_allowable_order(vma, vm_flags, false, true, true, PMD_ORDER)) { ret = create_huge_pmd(&vmf); if (!(ret & VM_FAULT_FALLBACK)) return ret; diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index e0b368e545ed..74d2de15fb5e 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -268,7 +268,8 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) * cleared *pmd but not decremented compound_mapcount(). */ if ((pvmw->flags & PVMW_SYNC) && - transhuge_vma_suitable(vma, pvmw->address) && + thp_vma_suitable_order(vma, pvmw->address, + PMD_ORDER) && (pvmw->nr_pages >= HPAGE_PMD_NR)) { spinlock_t *ptl = pmd_lock(mm, pvmw->pmd); -- Gitee From a8f9a3dec7350b73ad531063a6039c5963854c77 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Thu, 7 Dec 2023 16:12:05 +0000 Subject: [PATCH 1112/2138] mm: thp: support allocation of anonymous multi-size THP ANBZ: #9728 commit 19eaf44954df64f9bc8dec398219e15ad0811497 upstream Introduce the logic to allow THP to be configured (through the new sysfs interface we just added) to allocate large folios to back anonymous memory, which are larger than the base page size but smaller than PMD-size. We call this new THP extension "multi-size THP" (mTHP). mTHP continues to be PTE-mapped, but in many cases can still provide similar benefits to traditional PMD-sized THP: Page faults are significantly reduced (by a factor of e.g. 4, 8, 16, etc. depending on the configured order), but latency spikes are much less prominent because the size of each page isn't as huge as the PMD-sized variant and there is less memory to clear in each page fault. The number of per-page operations (e.g. ref counting, rmap management, lru list management) are also significantly reduced since those ops now become per-folio. Some architectures also employ TLB compression mechanisms to squeeze more entries in when a set of PTEs are virtually and physically contiguous and approporiately aligned. In this case, TLB misses will occur less often. The new behaviour is disabled by default, but can be enabled at runtime by writing to /sys/kernel/mm/transparent_hugepage/hugepage-XXkb/enabled (see documentation in previous commit). The long term aim is to change the default to include suitable lower orders, but there are some risks around internal fragmentation that need to be better understood first. [ryan.roberts@arm.com: resolve some multi-size THP review nits] Link: https://lkml.kernel.org/r/20231214160251.3574571-1-ryan.roberts@arm.com Link: https://lkml.kernel.org/r/20231207161211.2374093-5-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Tested-by: Kefeng Wang Tested-by: John Hubbard Acked-by: David Hildenbrand Cc: Alistair Popple Cc: Anshuman Khandual Cc: Barry Song Cc: Catalin Marinas Cc: David Rientjes Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Itaru Kitayama Cc: Kirill A. Shutemov Cc: Luis Chamberlain Cc: Matthew Wilcox (Oracle) Cc: Vlastimil Babka Cc: Yang Shi Cc: Yin Fengwei Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3683 --- include/linux/huge_mm.h | 6 ++- mm/memory.c | 109 ++++++++++++++++++++++++++++++++++++---- 2 files changed, 104 insertions(+), 11 deletions(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 866ea42b40ba..09179237dac4 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -68,9 +68,11 @@ extern struct kobj_attribute shmem_enabled_attr; #define HPAGE_PMD_NR (1<vma; + unsigned long orders; + struct folio *folio; + unsigned long addr; + pte_t *pte; + gfp_t gfp; + int order; + + /* + * If uffd is active for the vma we need per-page fault fidelity to + * maintain the uffd semantics. + */ + if (unlikely(userfaultfd_armed(vma))) + goto fallback; + + /* + * Get a list of all the (large) orders below PMD_ORDER that are enabled + * for this vma. Then filter out the orders that can't be allocated over + * the faulting address and still be fully contained in the vma. + */ + orders = thp_vma_allowable_orders(vma, vma->vm_flags, false, true, true, + BIT(PMD_ORDER) - 1); + orders = thp_vma_suitable_orders(vma, vmf->address, orders); + + if (!orders) + goto fallback; + + pte = pte_offset_map(vmf->pmd, vmf->address & PMD_MASK); + if (!pte) + return ERR_PTR(-EAGAIN); + + /* + * Find the highest order where the aligned range is completely + * pte_none(). Note that all remaining orders will be completely + * pte_none(). + */ + order = highest_order(orders); + while (orders) { + addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); + if (pte_range_none(pte + pte_index(addr), 1 << order)) + break; + order = next_order(&orders, order); + } + + pte_unmap(pte); + + /* Try allocating the highest of the remaining orders. */ + gfp = vma_thp_gfp_mask(vma); + while (orders) { + addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); + folio = vma_alloc_folio(gfp, order, vma, addr, true); + if (folio) { + clear_huge_page(&folio->page, vmf->address, 1 << order); + return folio; + } + order = next_order(&orders, order); + } + +fallback: +#endif + return vma_alloc_zeroed_movable_folio(vmf->vma, vmf->address); +} + /* * We enter with non-exclusive mmap_lock (to exclude vma changes, * but allow concurrent faults), and pte mapped but not yet locked. @@ -4280,9 +4358,12 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) { bool uffd_wp = vmf_orig_pte_uffd_wp(vmf); struct vm_area_struct *vma = vmf->vma; + unsigned long addr = vmf->address; struct folio *folio; vm_fault_t ret = 0; + int nr_pages = 1; pte_t entry; + int i; /* File mapping without ->vm_ops ? */ if (vma->vm_flags & VM_SHARED) @@ -4322,10 +4403,16 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) /* Allocate our own private page. */ if (unlikely(anon_vma_prepare(vma))) goto oom; - folio = vma_alloc_zeroed_movable_folio(vma, vmf->address); + /* Returns NULL on OOM or ERR_PTR(-EAGAIN) if we must retry the fault */ + folio = alloc_anon_folio(vmf); + if (IS_ERR(folio)) + return 0; if (!folio) goto oom; + nr_pages = folio_nr_pages(folio); + addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE); + if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL)) goto oom_free_page; folio_throttle_swaprate(folio, GFP_KERNEL); @@ -4342,12 +4429,15 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) if (vma->vm_flags & VM_WRITE) entry = pte_mkwrite(pte_mkdirty(entry), vma); - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, - &vmf->ptl); + vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); if (!vmf->pte) goto release; - if (vmf_pte_changed(vmf)) { - update_mmu_tlb(vma, vmf->address, vmf->pte); + if (nr_pages == 1 && vmf_pte_changed(vmf)) { + update_mmu_tlb(vma, addr, vmf->pte); + goto release; + } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) { + for (i = 0; i < nr_pages; i++) + update_mmu_tlb(vma, addr + PAGE_SIZE * i, vmf->pte + i); goto release; } @@ -4362,16 +4452,17 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) return handle_userfault(vmf, VM_UFFD_MISSING); } - inc_mm_counter(vma->vm_mm, MM_ANONPAGES); - folio_add_new_anon_rmap(folio, vma, vmf->address); + folio_ref_add(folio, nr_pages - 1); + add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages); + folio_add_new_anon_rmap(folio, vma, addr); folio_add_lru_vma(folio, vma); setpte: if (uffd_wp) entry = pte_mkuffd_wp(entry); - set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry); + set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr_pages); /* No need to invalidate - it was non-present before */ - update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); + update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr_pages); unlock: if (vmf->pte) pte_unmap_unlock(vmf->pte, vmf->ptl); -- Gitee From 3c6f4cb418b9cf26bef1be2028a6e50beaef788b Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Thu, 7 Dec 2023 16:12:06 +0000 Subject: [PATCH 1113/2138] selftests/mm/kugepaged: restore thp settings at exit ANBZ: #9728 commit b6aab3384cafba151c53d3b5f7e1f8d073aadf03 upstream Previously, the saved thp settings would be restored upon a signal or at the natural end of the test suite. But there are some tests that directly call exit() upon failure. In this case, the thp settings were not being restored, which could then influence other tests. Fix this by installing an atexit() handler to do the actual restore. The signal handler can now just call exit() and the atexit handler is invoked. Link: https://lkml.kernel.org/r/20231207161211.2374093-6-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Reviewed-by: Alistair Popple Reviewed-by: David Hildenbrand Tested-by: Kefeng Wang Tested-by: John Hubbard Cc: Anshuman Khandual Cc: Barry Song Cc: Catalin Marinas Cc: David Rientjes Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Itaru Kitayama Cc: Kirill A. Shutemov Cc: Luis Chamberlain Cc: Matthew Wilcox (Oracle) Cc: Vlastimil Babka Cc: Yang Shi Cc: Yin Fengwei Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3683 --- tools/testing/selftests/mm/khugepaged.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/mm/khugepaged.c b/tools/testing/selftests/mm/khugepaged.c index 030667cb5533..fc47a1c4944c 100644 --- a/tools/testing/selftests/mm/khugepaged.c +++ b/tools/testing/selftests/mm/khugepaged.c @@ -374,18 +374,22 @@ static void pop_settings(void) write_settings(current_settings()); } -static void restore_settings(int sig) +static void restore_settings_atexit(void) { if (skip_settings_restore) - goto out; + return; printf("Restore THP and khugepaged settings..."); write_settings(&saved_settings); success("OK"); - if (sig) - exit(EXIT_FAILURE); -out: - exit(exit_status); + + skip_settings_restore = true; +} + +static void restore_settings(int sig) +{ + /* exit() will invoke the restore_settings_atexit handler. */ + exit(sig ? EXIT_FAILURE : exit_status); } static void save_settings(void) @@ -415,6 +419,7 @@ static void save_settings(void) success("OK"); + atexit(restore_settings_atexit); signal(SIGTERM, restore_settings); signal(SIGINT, restore_settings); signal(SIGHUP, restore_settings); -- Gitee From 446be0e68382e4900bd836767de42ffac8900070 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Thu, 7 Dec 2023 16:12:07 +0000 Subject: [PATCH 1114/2138] selftests/mm: factor out thp settings management ANBZ: #9728 commit 00679a183ac6d2584723cfc2a2c07c8285f802dc upstream The khugepaged test has a useful framework for save/restore/pop/push of all thp settings via the sysfs interface. This will be useful to explicitly control multi-size THP settings in other tests, so let's move it out of khugepaged and into its own thp_settings.[c|h] utility. Link: https://lkml.kernel.org/r/20231207161211.2374093-7-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Tested-by: Alistair Popple Acked-by: David Hildenbrand Tested-by: Kefeng Wang Tested-by: John Hubbard Cc: Anshuman Khandual Cc: Barry Song Cc: Catalin Marinas Cc: David Rientjes Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Itaru Kitayama Cc: Kirill A. Shutemov Cc: Luis Chamberlain Cc: Matthew Wilcox (Oracle) Cc: Vlastimil Babka Cc: Yang Shi Cc: Yin Fengwei Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3683 --- tools/testing/selftests/mm/Makefile | 4 +- tools/testing/selftests/mm/khugepaged.c | 346 ++-------------------- tools/testing/selftests/mm/thp_settings.c | 296 ++++++++++++++++++ tools/testing/selftests/mm/thp_settings.h | 71 +++++ 4 files changed, 391 insertions(+), 326 deletions(-) create mode 100644 tools/testing/selftests/mm/thp_settings.c create mode 100644 tools/testing/selftests/mm/thp_settings.h diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile index c9fcbc6e5121..f64ec79d772e 100644 --- a/tools/testing/selftests/mm/Makefile +++ b/tools/testing/selftests/mm/Makefile @@ -117,8 +117,8 @@ TEST_FILES += va_high_addr_switch.sh include ../lib.mk -$(TEST_GEN_PROGS): vm_util.c -$(TEST_GEN_FILES): vm_util.c +$(TEST_GEN_PROGS): vm_util.c thp_settings.c +$(TEST_GEN_FILES): vm_util.c thp_settings.c $(OUTPUT)/uffd-stress: uffd-common.c $(OUTPUT)/uffd-unit-tests: uffd-common.c diff --git a/tools/testing/selftests/mm/khugepaged.c b/tools/testing/selftests/mm/khugepaged.c index fc47a1c4944c..b15e7fd70176 100644 --- a/tools/testing/selftests/mm/khugepaged.c +++ b/tools/testing/selftests/mm/khugepaged.c @@ -22,13 +22,13 @@ #include "linux/magic.h" #include "vm_util.h" +#include "thp_settings.h" #define BASE_ADDR ((void *)(1UL << 30)) static unsigned long hpage_pmd_size; static unsigned long page_size; static int hpage_pmd_nr; -#define THP_SYSFS "/sys/kernel/mm/transparent_hugepage/" #define PID_SMAPS "/proc/self/smaps" #define TEST_FILE "collapse_test_file" @@ -71,78 +71,7 @@ struct file_info { }; static struct file_info finfo; - -enum thp_enabled { - THP_ALWAYS, - THP_MADVISE, - THP_NEVER, -}; - -static const char *thp_enabled_strings[] = { - "always", - "madvise", - "never", - NULL -}; - -enum thp_defrag { - THP_DEFRAG_ALWAYS, - THP_DEFRAG_DEFER, - THP_DEFRAG_DEFER_MADVISE, - THP_DEFRAG_MADVISE, - THP_DEFRAG_NEVER, -}; - -static const char *thp_defrag_strings[] = { - "always", - "defer", - "defer+madvise", - "madvise", - "never", - NULL -}; - -enum shmem_enabled { - SHMEM_ALWAYS, - SHMEM_WITHIN_SIZE, - SHMEM_ADVISE, - SHMEM_NEVER, - SHMEM_DENY, - SHMEM_FORCE, -}; - -static const char *shmem_enabled_strings[] = { - "always", - "within_size", - "advise", - "never", - "deny", - "force", - NULL -}; - -struct khugepaged_settings { - bool defrag; - unsigned int alloc_sleep_millisecs; - unsigned int scan_sleep_millisecs; - unsigned int max_ptes_none; - unsigned int max_ptes_swap; - unsigned int max_ptes_shared; - unsigned long pages_to_scan; -}; - -struct settings { - enum thp_enabled thp_enabled; - enum thp_defrag thp_defrag; - enum shmem_enabled shmem_enabled; - bool use_zero_page; - struct khugepaged_settings khugepaged; - unsigned long read_ahead_kb; -}; - -static struct settings saved_settings; static bool skip_settings_restore; - static int exit_status; static void success(const char *msg) @@ -161,226 +90,13 @@ static void skip(const char *msg) printf(" \e[33m%s\e[0m\n", msg); } -static int read_file(const char *path, char *buf, size_t buflen) -{ - int fd; - ssize_t numread; - - fd = open(path, O_RDONLY); - if (fd == -1) - return 0; - - numread = read(fd, buf, buflen - 1); - if (numread < 1) { - close(fd); - return 0; - } - - buf[numread] = '\0'; - close(fd); - - return (unsigned int) numread; -} - -static int write_file(const char *path, const char *buf, size_t buflen) -{ - int fd; - ssize_t numwritten; - - fd = open(path, O_WRONLY); - if (fd == -1) { - printf("open(%s)\n", path); - exit(EXIT_FAILURE); - return 0; - } - - numwritten = write(fd, buf, buflen - 1); - close(fd); - if (numwritten < 1) { - printf("write(%s)\n", buf); - exit(EXIT_FAILURE); - return 0; - } - - return (unsigned int) numwritten; -} - -static int read_string(const char *name, const char *strings[]) -{ - char path[PATH_MAX]; - char buf[256]; - char *c; - int ret; - - ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name); - if (ret >= PATH_MAX) { - printf("%s: Pathname is too long\n", __func__); - exit(EXIT_FAILURE); - } - - if (!read_file(path, buf, sizeof(buf))) { - perror(path); - exit(EXIT_FAILURE); - } - - c = strchr(buf, '['); - if (!c) { - printf("%s: Parse failure\n", __func__); - exit(EXIT_FAILURE); - } - - c++; - memmove(buf, c, sizeof(buf) - (c - buf)); - - c = strchr(buf, ']'); - if (!c) { - printf("%s: Parse failure\n", __func__); - exit(EXIT_FAILURE); - } - *c = '\0'; - - ret = 0; - while (strings[ret]) { - if (!strcmp(strings[ret], buf)) - return ret; - ret++; - } - - printf("Failed to parse %s\n", name); - exit(EXIT_FAILURE); -} - -static void write_string(const char *name, const char *val) -{ - char path[PATH_MAX]; - int ret; - - ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name); - if (ret >= PATH_MAX) { - printf("%s: Pathname is too long\n", __func__); - exit(EXIT_FAILURE); - } - - if (!write_file(path, val, strlen(val) + 1)) { - perror(path); - exit(EXIT_FAILURE); - } -} - -static const unsigned long _read_num(const char *path) -{ - char buf[21]; - - if (read_file(path, buf, sizeof(buf)) < 0) { - perror("read_file(read_num)"); - exit(EXIT_FAILURE); - } - - return strtoul(buf, NULL, 10); -} - -static const unsigned long read_num(const char *name) -{ - char path[PATH_MAX]; - int ret; - - ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name); - if (ret >= PATH_MAX) { - printf("%s: Pathname is too long\n", __func__); - exit(EXIT_FAILURE); - } - return _read_num(path); -} - -static void _write_num(const char *path, unsigned long num) -{ - char buf[21]; - - sprintf(buf, "%ld", num); - if (!write_file(path, buf, strlen(buf) + 1)) { - perror(path); - exit(EXIT_FAILURE); - } -} - -static void write_num(const char *name, unsigned long num) -{ - char path[PATH_MAX]; - int ret; - - ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name); - if (ret >= PATH_MAX) { - printf("%s: Pathname is too long\n", __func__); - exit(EXIT_FAILURE); - } - _write_num(path, num); -} - -static void write_settings(struct settings *settings) -{ - struct khugepaged_settings *khugepaged = &settings->khugepaged; - - write_string("enabled", thp_enabled_strings[settings->thp_enabled]); - write_string("defrag", thp_defrag_strings[settings->thp_defrag]); - write_string("shmem_enabled", - shmem_enabled_strings[settings->shmem_enabled]); - write_num("use_zero_page", settings->use_zero_page); - - write_num("khugepaged/defrag", khugepaged->defrag); - write_num("khugepaged/alloc_sleep_millisecs", - khugepaged->alloc_sleep_millisecs); - write_num("khugepaged/scan_sleep_millisecs", - khugepaged->scan_sleep_millisecs); - write_num("khugepaged/max_ptes_none", khugepaged->max_ptes_none); - write_num("khugepaged/max_ptes_swap", khugepaged->max_ptes_swap); - write_num("khugepaged/max_ptes_shared", khugepaged->max_ptes_shared); - write_num("khugepaged/pages_to_scan", khugepaged->pages_to_scan); - - if (file_ops && finfo.type == VMA_FILE) - _write_num(finfo.dev_queue_read_ahead_path, - settings->read_ahead_kb); -} - -#define MAX_SETTINGS_DEPTH 4 -static struct settings settings_stack[MAX_SETTINGS_DEPTH]; -static int settings_index; - -static struct settings *current_settings(void) -{ - if (!settings_index) { - printf("Fail: No settings set"); - exit(EXIT_FAILURE); - } - return settings_stack + settings_index - 1; -} - -static void push_settings(struct settings *settings) -{ - if (settings_index >= MAX_SETTINGS_DEPTH) { - printf("Fail: Settings stack exceeded"); - exit(EXIT_FAILURE); - } - settings_stack[settings_index++] = *settings; - write_settings(current_settings()); -} - -static void pop_settings(void) -{ - if (settings_index <= 0) { - printf("Fail: Settings stack empty"); - exit(EXIT_FAILURE); - } - --settings_index; - write_settings(current_settings()); -} - static void restore_settings_atexit(void) { if (skip_settings_restore) return; printf("Restore THP and khugepaged settings..."); - write_settings(&saved_settings); + thp_restore_settings(); success("OK"); skip_settings_restore = true; @@ -395,27 +111,9 @@ static void restore_settings(int sig) static void save_settings(void) { printf("Save THP and khugepaged settings..."); - saved_settings = (struct settings) { - .thp_enabled = read_string("enabled", thp_enabled_strings), - .thp_defrag = read_string("defrag", thp_defrag_strings), - .shmem_enabled = - read_string("shmem_enabled", shmem_enabled_strings), - .use_zero_page = read_num("use_zero_page"), - }; - saved_settings.khugepaged = (struct khugepaged_settings) { - .defrag = read_num("khugepaged/defrag"), - .alloc_sleep_millisecs = - read_num("khugepaged/alloc_sleep_millisecs"), - .scan_sleep_millisecs = - read_num("khugepaged/scan_sleep_millisecs"), - .max_ptes_none = read_num("khugepaged/max_ptes_none"), - .max_ptes_swap = read_num("khugepaged/max_ptes_swap"), - .max_ptes_shared = read_num("khugepaged/max_ptes_shared"), - .pages_to_scan = read_num("khugepaged/pages_to_scan"), - }; if (file_ops && finfo.type == VMA_FILE) - saved_settings.read_ahead_kb = - _read_num(finfo.dev_queue_read_ahead_path); + thp_set_read_ahead_path(finfo.dev_queue_read_ahead_path); + thp_save_settings(); success("OK"); @@ -798,7 +496,7 @@ static void __madvise_collapse(const char *msg, char *p, int nr_hpages, struct mem_ops *ops, bool expect) { int ret; - struct settings settings = *current_settings(); + struct thp_settings settings = *thp_current_settings(); printf("%s...", msg); @@ -808,7 +506,7 @@ static void __madvise_collapse(const char *msg, char *p, int nr_hpages, */ settings.thp_enabled = THP_NEVER; settings.shmem_enabled = SHMEM_NEVER; - push_settings(&settings); + thp_push_settings(&settings); /* Clear VM_NOHUGEPAGE */ madvise(p, nr_hpages * hpage_pmd_size, MADV_HUGEPAGE); @@ -820,7 +518,7 @@ static void __madvise_collapse(const char *msg, char *p, int nr_hpages, else success("OK"); - pop_settings(); + thp_pop_settings(); } static void madvise_collapse(const char *msg, char *p, int nr_hpages, @@ -850,13 +548,13 @@ static bool wait_for_scan(const char *msg, char *p, int nr_hpages, madvise(p, nr_hpages * hpage_pmd_size, MADV_HUGEPAGE); /* Wait until the second full_scan completed */ - full_scans = read_num("khugepaged/full_scans") + 2; + full_scans = thp_read_num("khugepaged/full_scans") + 2; printf("%s...", msg); while (timeout--) { if (ops->check_huge(p, nr_hpages)) break; - if (read_num("khugepaged/full_scans") >= full_scans) + if (thp_read_num("khugepaged/full_scans") >= full_scans) break; printf("."); usleep(TICK); @@ -911,11 +609,11 @@ static bool is_tmpfs(struct mem_ops *ops) static void alloc_at_fault(void) { - struct settings settings = *current_settings(); + struct thp_settings settings = *thp_current_settings(); char *p; settings.thp_enabled = THP_ALWAYS; - push_settings(&settings); + thp_push_settings(&settings); p = alloc_mapping(1); *p = 1; @@ -925,7 +623,7 @@ static void alloc_at_fault(void) else fail("Fail"); - pop_settings(); + thp_pop_settings(); madvise(p, page_size, MADV_DONTNEED); printf("Split huge PMD on MADV_DONTNEED..."); @@ -973,11 +671,11 @@ static void collapse_single_pte_entry(struct collapse_context *c, struct mem_ops static void collapse_max_ptes_none(struct collapse_context *c, struct mem_ops *ops) { int max_ptes_none = hpage_pmd_nr / 2; - struct settings settings = *current_settings(); + struct thp_settings settings = *thp_current_settings(); void *p; settings.khugepaged.max_ptes_none = max_ptes_none; - push_settings(&settings); + thp_push_settings(&settings); p = ops->setup_area(1); @@ -1002,7 +700,7 @@ static void collapse_max_ptes_none(struct collapse_context *c, struct mem_ops *o } skip: ops->cleanup_area(p, hpage_pmd_size); - pop_settings(); + thp_pop_settings(); } static void collapse_swapin_single_pte(struct collapse_context *c, struct mem_ops *ops) @@ -1033,7 +731,7 @@ static void collapse_swapin_single_pte(struct collapse_context *c, struct mem_op static void collapse_max_ptes_swap(struct collapse_context *c, struct mem_ops *ops) { - int max_ptes_swap = read_num("khugepaged/max_ptes_swap"); + int max_ptes_swap = thp_read_num("khugepaged/max_ptes_swap"); void *p; p = ops->setup_area(1); @@ -1250,11 +948,11 @@ static void collapse_fork_compound(struct collapse_context *c, struct mem_ops *o fail("Fail"); ops->fault(p, 0, page_size); - write_num("khugepaged/max_ptes_shared", hpage_pmd_nr - 1); + thp_write_num("khugepaged/max_ptes_shared", hpage_pmd_nr - 1); c->collapse("Collapse PTE table full of compound pages in child", p, 1, ops, true); - write_num("khugepaged/max_ptes_shared", - current_settings()->khugepaged.max_ptes_shared); + thp_write_num("khugepaged/max_ptes_shared", + thp_current_settings()->khugepaged.max_ptes_shared); validate_memory(p, 0, hpage_pmd_size); ops->cleanup_area(p, hpage_pmd_size); @@ -1275,7 +973,7 @@ static void collapse_fork_compound(struct collapse_context *c, struct mem_ops *o static void collapse_max_ptes_shared(struct collapse_context *c, struct mem_ops *ops) { - int max_ptes_shared = read_num("khugepaged/max_ptes_shared"); + int max_ptes_shared = thp_read_num("khugepaged/max_ptes_shared"); int wstatus; void *p; @@ -1443,7 +1141,7 @@ static void parse_test_type(int argc, const char **argv) int main(int argc, const char **argv) { - struct settings default_settings = { + struct thp_settings default_settings = { .thp_enabled = THP_MADVISE, .thp_defrag = THP_DEFRAG_ALWAYS, .shmem_enabled = SHMEM_ADVISE, @@ -1484,7 +1182,7 @@ int main(int argc, const char **argv) default_settings.khugepaged.pages_to_scan = hpage_pmd_nr * 8; save_settings(); - push_settings(&default_settings); + thp_push_settings(&default_settings); alloc_at_fault(); diff --git a/tools/testing/selftests/mm/thp_settings.c b/tools/testing/selftests/mm/thp_settings.c new file mode 100644 index 000000000000..5e8ec792cac7 --- /dev/null +++ b/tools/testing/selftests/mm/thp_settings.c @@ -0,0 +1,296 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include + +#include "thp_settings.h" + +#define THP_SYSFS "/sys/kernel/mm/transparent_hugepage/" +#define MAX_SETTINGS_DEPTH 4 +static struct thp_settings settings_stack[MAX_SETTINGS_DEPTH]; +static int settings_index; +static struct thp_settings saved_settings; +static char dev_queue_read_ahead_path[PATH_MAX]; + +static const char * const thp_enabled_strings[] = { + "always", + "madvise", + "never", + NULL +}; + +static const char * const thp_defrag_strings[] = { + "always", + "defer", + "defer+madvise", + "madvise", + "never", + NULL +}; + +static const char * const shmem_enabled_strings[] = { + "always", + "within_size", + "advise", + "never", + "deny", + "force", + NULL +}; + +int read_file(const char *path, char *buf, size_t buflen) +{ + int fd; + ssize_t numread; + + fd = open(path, O_RDONLY); + if (fd == -1) + return 0; + + numread = read(fd, buf, buflen - 1); + if (numread < 1) { + close(fd); + return 0; + } + + buf[numread] = '\0'; + close(fd); + + return (unsigned int) numread; +} + +int write_file(const char *path, const char *buf, size_t buflen) +{ + int fd; + ssize_t numwritten; + + fd = open(path, O_WRONLY); + if (fd == -1) { + printf("open(%s)\n", path); + exit(EXIT_FAILURE); + return 0; + } + + numwritten = write(fd, buf, buflen - 1); + close(fd); + if (numwritten < 1) { + printf("write(%s)\n", buf); + exit(EXIT_FAILURE); + return 0; + } + + return (unsigned int) numwritten; +} + +const unsigned long read_num(const char *path) +{ + char buf[21]; + + if (read_file(path, buf, sizeof(buf)) < 0) { + perror("read_file()"); + exit(EXIT_FAILURE); + } + + return strtoul(buf, NULL, 10); +} + +void write_num(const char *path, unsigned long num) +{ + char buf[21]; + + sprintf(buf, "%ld", num); + if (!write_file(path, buf, strlen(buf) + 1)) { + perror(path); + exit(EXIT_FAILURE); + } +} + +int thp_read_string(const char *name, const char * const strings[]) +{ + char path[PATH_MAX]; + char buf[256]; + char *c; + int ret; + + ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name); + if (ret >= PATH_MAX) { + printf("%s: Pathname is too long\n", __func__); + exit(EXIT_FAILURE); + } + + if (!read_file(path, buf, sizeof(buf))) { + perror(path); + exit(EXIT_FAILURE); + } + + c = strchr(buf, '['); + if (!c) { + printf("%s: Parse failure\n", __func__); + exit(EXIT_FAILURE); + } + + c++; + memmove(buf, c, sizeof(buf) - (c - buf)); + + c = strchr(buf, ']'); + if (!c) { + printf("%s: Parse failure\n", __func__); + exit(EXIT_FAILURE); + } + *c = '\0'; + + ret = 0; + while (strings[ret]) { + if (!strcmp(strings[ret], buf)) + return ret; + ret++; + } + + printf("Failed to parse %s\n", name); + exit(EXIT_FAILURE); +} + +void thp_write_string(const char *name, const char *val) +{ + char path[PATH_MAX]; + int ret; + + ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name); + if (ret >= PATH_MAX) { + printf("%s: Pathname is too long\n", __func__); + exit(EXIT_FAILURE); + } + + if (!write_file(path, val, strlen(val) + 1)) { + perror(path); + exit(EXIT_FAILURE); + } +} + +const unsigned long thp_read_num(const char *name) +{ + char path[PATH_MAX]; + int ret; + + ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name); + if (ret >= PATH_MAX) { + printf("%s: Pathname is too long\n", __func__); + exit(EXIT_FAILURE); + } + return read_num(path); +} + +void thp_write_num(const char *name, unsigned long num) +{ + char path[PATH_MAX]; + int ret; + + ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name); + if (ret >= PATH_MAX) { + printf("%s: Pathname is too long\n", __func__); + exit(EXIT_FAILURE); + } + write_num(path, num); +} + +void thp_read_settings(struct thp_settings *settings) +{ + *settings = (struct thp_settings) { + .thp_enabled = thp_read_string("enabled", thp_enabled_strings), + .thp_defrag = thp_read_string("defrag", thp_defrag_strings), + .shmem_enabled = + thp_read_string("shmem_enabled", shmem_enabled_strings), + .use_zero_page = thp_read_num("use_zero_page"), + }; + settings->khugepaged = (struct khugepaged_settings) { + .defrag = thp_read_num("khugepaged/defrag"), + .alloc_sleep_millisecs = + thp_read_num("khugepaged/alloc_sleep_millisecs"), + .scan_sleep_millisecs = + thp_read_num("khugepaged/scan_sleep_millisecs"), + .max_ptes_none = thp_read_num("khugepaged/max_ptes_none"), + .max_ptes_swap = thp_read_num("khugepaged/max_ptes_swap"), + .max_ptes_shared = thp_read_num("khugepaged/max_ptes_shared"), + .pages_to_scan = thp_read_num("khugepaged/pages_to_scan"), + }; + if (dev_queue_read_ahead_path[0]) + settings->read_ahead_kb = read_num(dev_queue_read_ahead_path); +} + +void thp_write_settings(struct thp_settings *settings) +{ + struct khugepaged_settings *khugepaged = &settings->khugepaged; + + thp_write_string("enabled", thp_enabled_strings[settings->thp_enabled]); + thp_write_string("defrag", thp_defrag_strings[settings->thp_defrag]); + thp_write_string("shmem_enabled", + shmem_enabled_strings[settings->shmem_enabled]); + thp_write_num("use_zero_page", settings->use_zero_page); + + thp_write_num("khugepaged/defrag", khugepaged->defrag); + thp_write_num("khugepaged/alloc_sleep_millisecs", + khugepaged->alloc_sleep_millisecs); + thp_write_num("khugepaged/scan_sleep_millisecs", + khugepaged->scan_sleep_millisecs); + thp_write_num("khugepaged/max_ptes_none", khugepaged->max_ptes_none); + thp_write_num("khugepaged/max_ptes_swap", khugepaged->max_ptes_swap); + thp_write_num("khugepaged/max_ptes_shared", khugepaged->max_ptes_shared); + thp_write_num("khugepaged/pages_to_scan", khugepaged->pages_to_scan); + + if (dev_queue_read_ahead_path[0]) + write_num(dev_queue_read_ahead_path, settings->read_ahead_kb); +} + +struct thp_settings *thp_current_settings(void) +{ + if (!settings_index) { + printf("Fail: No settings set"); + exit(EXIT_FAILURE); + } + return settings_stack + settings_index - 1; +} + +void thp_push_settings(struct thp_settings *settings) +{ + if (settings_index >= MAX_SETTINGS_DEPTH) { + printf("Fail: Settings stack exceeded"); + exit(EXIT_FAILURE); + } + settings_stack[settings_index++] = *settings; + thp_write_settings(thp_current_settings()); +} + +void thp_pop_settings(void) +{ + if (settings_index <= 0) { + printf("Fail: Settings stack empty"); + exit(EXIT_FAILURE); + } + --settings_index; + thp_write_settings(thp_current_settings()); +} + +void thp_restore_settings(void) +{ + thp_write_settings(&saved_settings); +} + +void thp_save_settings(void) +{ + thp_read_settings(&saved_settings); +} + +void thp_set_read_ahead_path(char *path) +{ + if (!path) { + dev_queue_read_ahead_path[0] = '\0'; + return; + } + + strncpy(dev_queue_read_ahead_path, path, + sizeof(dev_queue_read_ahead_path)); + dev_queue_read_ahead_path[sizeof(dev_queue_read_ahead_path) - 1] = '\0'; +} diff --git a/tools/testing/selftests/mm/thp_settings.h b/tools/testing/selftests/mm/thp_settings.h new file mode 100644 index 000000000000..ff3d98c30617 --- /dev/null +++ b/tools/testing/selftests/mm/thp_settings.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __THP_SETTINGS_H__ +#define __THP_SETTINGS_H__ + +#include +#include +#include + +enum thp_enabled { + THP_ALWAYS, + THP_MADVISE, + THP_NEVER, +}; + +enum thp_defrag { + THP_DEFRAG_ALWAYS, + THP_DEFRAG_DEFER, + THP_DEFRAG_DEFER_MADVISE, + THP_DEFRAG_MADVISE, + THP_DEFRAG_NEVER, +}; + +enum shmem_enabled { + SHMEM_ALWAYS, + SHMEM_WITHIN_SIZE, + SHMEM_ADVISE, + SHMEM_NEVER, + SHMEM_DENY, + SHMEM_FORCE, +}; + +struct khugepaged_settings { + bool defrag; + unsigned int alloc_sleep_millisecs; + unsigned int scan_sleep_millisecs; + unsigned int max_ptes_none; + unsigned int max_ptes_swap; + unsigned int max_ptes_shared; + unsigned long pages_to_scan; +}; + +struct thp_settings { + enum thp_enabled thp_enabled; + enum thp_defrag thp_defrag; + enum shmem_enabled shmem_enabled; + bool use_zero_page; + struct khugepaged_settings khugepaged; + unsigned long read_ahead_kb; +}; + +int read_file(const char *path, char *buf, size_t buflen); +int write_file(const char *path, const char *buf, size_t buflen); +const unsigned long read_num(const char *path); +void write_num(const char *path, unsigned long num); + +int thp_read_string(const char *name, const char * const strings[]); +void thp_write_string(const char *name, const char *val); +const unsigned long thp_read_num(const char *name); +void thp_write_num(const char *name, unsigned long num); + +void thp_write_settings(struct thp_settings *settings); +void thp_read_settings(struct thp_settings *settings); +struct thp_settings *thp_current_settings(void); +void thp_push_settings(struct thp_settings *settings); +void thp_pop_settings(void); +void thp_restore_settings(void); +void thp_save_settings(void); + +void thp_set_read_ahead_path(char *path); + +#endif /* __THP_SETTINGS_H__ */ -- Gitee From f7e8d3e95d01d5beb549f621f5ef26bbeddbbbab Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Thu, 7 Dec 2023 16:12:08 +0000 Subject: [PATCH 1115/2138] selftests/mm: support multi-size THP interface in thp_settings ANBZ: #9728 commit 4f5070a5e40db2e9dbf5fff4ec678d6fbb338d5c upstream Save and restore the new per-size hugepage enabled setting, if available on the running kernel. Since the number of per-size directories is not fixed, solve this as simply as possible by catering for a maximum number in the thp_settings struct (20). Each array index is the order. The value of THP_NEVER is changed to 0 so that all of these new settings default to THP_NEVER and the user only needs to fill in the ones they want to enable. Link: https://lkml.kernel.org/r/20231207161211.2374093-8-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Tested-by: Kefeng Wang Tested-by: John Hubbard Cc: Alistair Popple Cc: Anshuman Khandual Cc: Barry Song Cc: Catalin Marinas Cc: David Hildenbrand Cc: David Rientjes Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Itaru Kitayama Cc: Kirill A. Shutemov Cc: Luis Chamberlain Cc: Matthew Wilcox (Oracle) Cc: Vlastimil Babka Cc: Yang Shi Cc: Yin Fengwei Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3683 --- tools/testing/selftests/mm/khugepaged.c | 3 ++ tools/testing/selftests/mm/thp_settings.c | 55 ++++++++++++++++++++++- tools/testing/selftests/mm/thp_settings.h | 11 ++++- 3 files changed, 67 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/mm/khugepaged.c b/tools/testing/selftests/mm/khugepaged.c index b15e7fd70176..7bd3baa9d34b 100644 --- a/tools/testing/selftests/mm/khugepaged.c +++ b/tools/testing/selftests/mm/khugepaged.c @@ -1141,6 +1141,7 @@ static void parse_test_type(int argc, const char **argv) int main(int argc, const char **argv) { + int hpage_pmd_order; struct thp_settings default_settings = { .thp_enabled = THP_MADVISE, .thp_defrag = THP_DEFRAG_ALWAYS, @@ -1175,11 +1176,13 @@ int main(int argc, const char **argv) exit(EXIT_FAILURE); } hpage_pmd_nr = hpage_pmd_size / page_size; + hpage_pmd_order = __builtin_ctz(hpage_pmd_nr); default_settings.khugepaged.max_ptes_none = hpage_pmd_nr - 1; default_settings.khugepaged.max_ptes_swap = hpage_pmd_nr / 8; default_settings.khugepaged.max_ptes_shared = hpage_pmd_nr / 2; default_settings.khugepaged.pages_to_scan = hpage_pmd_nr * 8; + default_settings.hugepages[hpage_pmd_order].enabled = THP_INHERIT; save_settings(); thp_push_settings(&default_settings); diff --git a/tools/testing/selftests/mm/thp_settings.c b/tools/testing/selftests/mm/thp_settings.c index 5e8ec792cac7..a4163438108e 100644 --- a/tools/testing/selftests/mm/thp_settings.c +++ b/tools/testing/selftests/mm/thp_settings.c @@ -16,9 +16,10 @@ static struct thp_settings saved_settings; static char dev_queue_read_ahead_path[PATH_MAX]; static const char * const thp_enabled_strings[] = { + "never", "always", + "inherit", "madvise", - "never", NULL }; @@ -198,6 +199,10 @@ void thp_write_num(const char *name, unsigned long num) void thp_read_settings(struct thp_settings *settings) { + unsigned long orders = thp_supported_orders(); + char path[PATH_MAX]; + int i; + *settings = (struct thp_settings) { .thp_enabled = thp_read_string("enabled", thp_enabled_strings), .thp_defrag = thp_read_string("defrag", thp_defrag_strings), @@ -218,11 +223,26 @@ void thp_read_settings(struct thp_settings *settings) }; if (dev_queue_read_ahead_path[0]) settings->read_ahead_kb = read_num(dev_queue_read_ahead_path); + + for (i = 0; i < NR_ORDERS; i++) { + if (!((1 << i) & orders)) { + settings->hugepages[i].enabled = THP_NEVER; + continue; + } + snprintf(path, PATH_MAX, "hugepages-%ukB/enabled", + (getpagesize() >> 10) << i); + settings->hugepages[i].enabled = + thp_read_string(path, thp_enabled_strings); + } } void thp_write_settings(struct thp_settings *settings) { struct khugepaged_settings *khugepaged = &settings->khugepaged; + unsigned long orders = thp_supported_orders(); + char path[PATH_MAX]; + int enabled; + int i; thp_write_string("enabled", thp_enabled_strings[settings->thp_enabled]); thp_write_string("defrag", thp_defrag_strings[settings->thp_defrag]); @@ -242,6 +262,15 @@ void thp_write_settings(struct thp_settings *settings) if (dev_queue_read_ahead_path[0]) write_num(dev_queue_read_ahead_path, settings->read_ahead_kb); + + for (i = 0; i < NR_ORDERS; i++) { + if (!((1 << i) & orders)) + continue; + snprintf(path, PATH_MAX, "hugepages-%ukB/enabled", + (getpagesize() >> 10) << i); + enabled = settings->hugepages[i].enabled; + thp_write_string(path, thp_enabled_strings[enabled]); + } } struct thp_settings *thp_current_settings(void) @@ -294,3 +323,27 @@ void thp_set_read_ahead_path(char *path) sizeof(dev_queue_read_ahead_path)); dev_queue_read_ahead_path[sizeof(dev_queue_read_ahead_path) - 1] = '\0'; } + +unsigned long thp_supported_orders(void) +{ + unsigned long orders = 0; + char path[PATH_MAX]; + char buf[256]; + int ret; + int i; + + for (i = 0; i < NR_ORDERS; i++) { + ret = snprintf(path, PATH_MAX, THP_SYSFS "hugepages-%ukB/enabled", + (getpagesize() >> 10) << i); + if (ret >= PATH_MAX) { + printf("%s: Pathname is too long\n", __func__); + exit(EXIT_FAILURE); + } + + ret = read_file(path, buf, sizeof(buf)); + if (ret) + orders |= 1UL << i; + } + + return orders; +} diff --git a/tools/testing/selftests/mm/thp_settings.h b/tools/testing/selftests/mm/thp_settings.h index ff3d98c30617..71cbff05f4c7 100644 --- a/tools/testing/selftests/mm/thp_settings.h +++ b/tools/testing/selftests/mm/thp_settings.h @@ -7,9 +7,10 @@ #include enum thp_enabled { + THP_NEVER, THP_ALWAYS, + THP_INHERIT, THP_MADVISE, - THP_NEVER, }; enum thp_defrag { @@ -29,6 +30,12 @@ enum shmem_enabled { SHMEM_FORCE, }; +#define NR_ORDERS 20 + +struct hugepages_settings { + enum thp_enabled enabled; +}; + struct khugepaged_settings { bool defrag; unsigned int alloc_sleep_millisecs; @@ -46,6 +53,7 @@ struct thp_settings { bool use_zero_page; struct khugepaged_settings khugepaged; unsigned long read_ahead_kb; + struct hugepages_settings hugepages[NR_ORDERS]; }; int read_file(const char *path, char *buf, size_t buflen); @@ -67,5 +75,6 @@ void thp_restore_settings(void); void thp_save_settings(void); void thp_set_read_ahead_path(char *path); +unsigned long thp_supported_orders(void); #endif /* __THP_SETTINGS_H__ */ -- Gitee From 95cf52ecf7c779c3bb14d42e1608defb5a9f5577 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Thu, 7 Dec 2023 16:12:09 +0000 Subject: [PATCH 1116/2138] selftests/mm/khugepaged: enlighten for multi-size THP ANBZ: #9728 commit 9f0704eae8a4edc8dca9c8a297f798d505a4103a upstream The `collapse_max_ptes_none` test was previously failing when a THP size less than PMD-size had enabled="always". The root cause is because the test faults in 1 page less than the threshold it set for collapsing. But when THP is enabled always, we "over allocate" and therefore the threshold is passed, and collapse unexpectedly succeeds. Solve this by enlightening khugepaged selftest. Add a command line option to pass in the desired THP size that should be used for all anonymous allocations. The harness will then explicitly configure a THP size as requested and modify the `collapse_max_ptes_none` test so that it faults in the threshold minus the number of pages in the configured THP size. If no command line option is provided, default to order 0, as per previous behaviour. I chose to use an order in the command line interface, since this makes the interface agnostic of base page size, making it easier to invoke from run_vmtests.sh. Link: https://lkml.kernel.org/r/20231207161211.2374093-9-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Tested-by: Kefeng Wang Tested-by: John Hubbard Cc: Alistair Popple Cc: Anshuman Khandual Cc: Barry Song Cc: Catalin Marinas Cc: David Hildenbrand Cc: David Rientjes Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Itaru Kitayama Cc: Kirill A. Shutemov Cc: Luis Chamberlain Cc: Matthew Wilcox (Oracle) Cc: Vlastimil Babka Cc: Yang Shi Cc: Yin Fengwei Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3683 --- tools/testing/selftests/mm/khugepaged.c | 48 +++++++++++++++++------ tools/testing/selftests/mm/run_vmtests.sh | 2 + 2 files changed, 39 insertions(+), 11 deletions(-) diff --git a/tools/testing/selftests/mm/khugepaged.c b/tools/testing/selftests/mm/khugepaged.c index 7bd3baa9d34b..829320a519e7 100644 --- a/tools/testing/selftests/mm/khugepaged.c +++ b/tools/testing/selftests/mm/khugepaged.c @@ -28,6 +28,7 @@ static unsigned long hpage_pmd_size; static unsigned long page_size; static int hpage_pmd_nr; +static int anon_order; #define PID_SMAPS "/proc/self/smaps" #define TEST_FILE "collapse_test_file" @@ -607,6 +608,11 @@ static bool is_tmpfs(struct mem_ops *ops) return ops == &__file_ops && finfo.type == VMA_SHMEM; } +static bool is_anon(struct mem_ops *ops) +{ + return ops == &__anon_ops; +} + static void alloc_at_fault(void) { struct thp_settings settings = *thp_current_settings(); @@ -673,6 +679,7 @@ static void collapse_max_ptes_none(struct collapse_context *c, struct mem_ops *o int max_ptes_none = hpage_pmd_nr / 2; struct thp_settings settings = *thp_current_settings(); void *p; + int fault_nr_pages = is_anon(ops) ? 1 << anon_order : 1; settings.khugepaged.max_ptes_none = max_ptes_none; thp_push_settings(&settings); @@ -686,10 +693,10 @@ static void collapse_max_ptes_none(struct collapse_context *c, struct mem_ops *o goto skip; } - ops->fault(p, 0, (hpage_pmd_nr - max_ptes_none - 1) * page_size); + ops->fault(p, 0, (hpage_pmd_nr - max_ptes_none - fault_nr_pages) * page_size); c->collapse("Maybe collapse with max_ptes_none exceeded", p, 1, ops, !c->enforce_pte_scan_limits); - validate_memory(p, 0, (hpage_pmd_nr - max_ptes_none - 1) * page_size); + validate_memory(p, 0, (hpage_pmd_nr - max_ptes_none - fault_nr_pages) * page_size); if (c->enforce_pte_scan_limits) { ops->fault(p, 0, (hpage_pmd_nr - max_ptes_none) * page_size); @@ -1076,7 +1083,7 @@ static void madvise_retracted_page_tables(struct collapse_context *c, static void usage(void) { - fprintf(stderr, "\nUsage: ./khugepaged [dir]\n\n"); + fprintf(stderr, "\nUsage: ./khugepaged [OPTIONS] [dir]\n\n"); fprintf(stderr, "\t\t: :\n"); fprintf(stderr, "\t\t: [all|khugepaged|madvise]\n"); fprintf(stderr, "\t\t: [all|anon|file|shmem]\n"); @@ -1085,15 +1092,34 @@ static void usage(void) fprintf(stderr, "\tCONFIG_READ_ONLY_THP_FOR_FS=y\n"); fprintf(stderr, "\n\tif [dir] is a (sub)directory of a tmpfs mount, tmpfs must be\n"); fprintf(stderr, "\tmounted with huge=madvise option for khugepaged tests to work\n"); + fprintf(stderr, "\n\tSupported Options:\n"); + fprintf(stderr, "\t\t-h: This help message.\n"); + fprintf(stderr, "\t\t-s: mTHP size, expressed as page order.\n"); + fprintf(stderr, "\t\t Defaults to 0. Use this size for anon allocations.\n"); exit(1); } -static void parse_test_type(int argc, const char **argv) +static void parse_test_type(int argc, char **argv) { + int opt; char *buf; const char *token; - if (argc == 1) { + while ((opt = getopt(argc, argv, "s:h")) != -1) { + switch (opt) { + case 's': + anon_order = atoi(optarg); + break; + case 'h': + default: + usage(); + } + } + + argv += optind; + argc -= optind; + + if (argc == 0) { /* Backwards compatibility */ khugepaged_context = &__khugepaged_context; madvise_context = &__madvise_context; @@ -1101,7 +1127,7 @@ static void parse_test_type(int argc, const char **argv) return; } - buf = strdup(argv[1]); + buf = strdup(argv[0]); token = strsep(&buf, ":"); if (!strcmp(token, "all")) { @@ -1135,11 +1161,13 @@ static void parse_test_type(int argc, const char **argv) if (!file_ops) return; - if (argc != 3) + if (argc != 2) usage(); + + get_finfo(argv[1]); } -int main(int argc, const char **argv) +int main(int argc, char **argv) { int hpage_pmd_order; struct thp_settings default_settings = { @@ -1164,9 +1192,6 @@ int main(int argc, const char **argv) parse_test_type(argc, argv); - if (file_ops) - get_finfo(argv[2]); - setbuf(stdout, NULL); page_size = getpagesize(); @@ -1183,6 +1208,7 @@ int main(int argc, const char **argv) default_settings.khugepaged.max_ptes_shared = hpage_pmd_nr / 2; default_settings.khugepaged.pages_to_scan = hpage_pmd_nr * 8; default_settings.hugepages[hpage_pmd_order].enabled = THP_INHERIT; + default_settings.hugepages[anon_order].enabled = THP_ALWAYS; save_settings(); thp_push_settings(&default_settings); diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh index d7b2c9d07eec..8ec99d704d06 100755 --- a/tools/testing/selftests/mm/run_vmtests.sh +++ b/tools/testing/selftests/mm/run_vmtests.sh @@ -377,6 +377,8 @@ CATEGORY="cow" run_test ./cow CATEGORY="thp" run_test ./khugepaged +CATEGORY="thp" run_test ./khugepaged -s 2 + CATEGORY="thp" run_test ./transhuge-stress -d 20 CATEGORY="thp" run_test ./split_huge_page_test -- Gitee From 0c11f2ea8e50f7211179fc14827dc4a767757687 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Thu, 7 Dec 2023 16:12:10 +0000 Subject: [PATCH 1117/2138] selftests/mm/cow: generalize do_run_with_thp() helper ANBZ: #9728 commit 12dc16b38463a671bc91dc2df10f3a014a27ff3b upstream do_run_with_thp() prepares (PMD-sized) THP memory into different states before running tests. With the introduction of multi-size THP, we would like to reuse this logic to also test those smaller THP sizes. So let's add a thpsize parameter which tells the function what size THP it should operate on. A separate commit will utilize this change to add new tests for multi-size THP, where available. Link: https://lkml.kernel.org/r/20231207161211.2374093-10-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Reviewed-by: David Hildenbrand Tested-by: Kefeng Wang Tested-by: John Hubbard Cc: Alistair Popple Cc: Anshuman Khandual Cc: Barry Song Cc: Catalin Marinas Cc: David Rientjes Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Itaru Kitayama Cc: Kirill A. Shutemov Cc: Luis Chamberlain Cc: Matthew Wilcox (Oracle) Cc: Vlastimil Babka Cc: Yang Shi Cc: Yin Fengwei Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3683 --- tools/testing/selftests/mm/cow.c | 121 +++++++++++++++++-------------- 1 file changed, 67 insertions(+), 54 deletions(-) diff --git a/tools/testing/selftests/mm/cow.c b/tools/testing/selftests/mm/cow.c index 6f2f83990441..a284918b1172 100644 --- a/tools/testing/selftests/mm/cow.c +++ b/tools/testing/selftests/mm/cow.c @@ -32,7 +32,7 @@ static size_t pagesize; static int pagemap_fd; -static size_t thpsize; +static size_t pmdsize; static int nr_hugetlbsizes; static size_t hugetlbsizes[10]; static int gup_fd; @@ -734,7 +734,7 @@ enum thp_run { THP_RUN_PARTIAL_SHARED, }; -static void do_run_with_thp(test_fn fn, enum thp_run thp_run) +static void do_run_with_thp(test_fn fn, enum thp_run thp_run, size_t thpsize) { char *mem, *mmap_mem, *tmp, *mremap_mem = MAP_FAILED; size_t size, mmap_size, mremap_size; @@ -759,11 +759,11 @@ static void do_run_with_thp(test_fn fn, enum thp_run thp_run) } /* - * Try to populate a THP. Touch the first sub-page and test if we get - * another sub-page populated automatically. + * Try to populate a THP. Touch the first sub-page and test if + * we get the last sub-page populated automatically. */ mem[0] = 0; - if (!pagemap_is_populated(pagemap_fd, mem + pagesize)) { + if (!pagemap_is_populated(pagemap_fd, mem + thpsize - pagesize)) { ksft_test_result_skip("Did not get a THP populated\n"); goto munmap; } @@ -773,12 +773,14 @@ static void do_run_with_thp(test_fn fn, enum thp_run thp_run) switch (thp_run) { case THP_RUN_PMD: case THP_RUN_PMD_SWAPOUT: + assert(thpsize == pmdsize); break; case THP_RUN_PTE: case THP_RUN_PTE_SWAPOUT: /* * Trigger PTE-mapping the THP by temporarily mapping a single - * subpage R/O. + * subpage R/O. This is a noop if the THP is not pmdsize (and + * therefore already PTE-mapped). */ ret = mprotect(mem + pagesize, pagesize, PROT_READ); if (ret) { @@ -875,52 +877,60 @@ static void do_run_with_thp(test_fn fn, enum thp_run thp_run) munmap(mremap_mem, mremap_size); } -static void run_with_thp(test_fn fn, const char *desc) +static void run_with_thp(test_fn fn, const char *desc, size_t size) { - ksft_print_msg("[RUN] %s ... with THP\n", desc); - do_run_with_thp(fn, THP_RUN_PMD); + ksft_print_msg("[RUN] %s ... with THP (%zu kB)\n", + desc, size / 1024); + do_run_with_thp(fn, THP_RUN_PMD, size); } -static void run_with_thp_swap(test_fn fn, const char *desc) +static void run_with_thp_swap(test_fn fn, const char *desc, size_t size) { - ksft_print_msg("[RUN] %s ... with swapped-out THP\n", desc); - do_run_with_thp(fn, THP_RUN_PMD_SWAPOUT); + ksft_print_msg("[RUN] %s ... with swapped-out THP (%zu kB)\n", + desc, size / 1024); + do_run_with_thp(fn, THP_RUN_PMD_SWAPOUT, size); } -static void run_with_pte_mapped_thp(test_fn fn, const char *desc) +static void run_with_pte_mapped_thp(test_fn fn, const char *desc, size_t size) { - ksft_print_msg("[RUN] %s ... with PTE-mapped THP\n", desc); - do_run_with_thp(fn, THP_RUN_PTE); + ksft_print_msg("[RUN] %s ... with PTE-mapped THP (%zu kB)\n", + desc, size / 1024); + do_run_with_thp(fn, THP_RUN_PTE, size); } -static void run_with_pte_mapped_thp_swap(test_fn fn, const char *desc) +static void run_with_pte_mapped_thp_swap(test_fn fn, const char *desc, size_t size) { - ksft_print_msg("[RUN] %s ... with swapped-out, PTE-mapped THP\n", desc); - do_run_with_thp(fn, THP_RUN_PTE_SWAPOUT); + ksft_print_msg("[RUN] %s ... with swapped-out, PTE-mapped THP (%zu kB)\n", + desc, size / 1024); + do_run_with_thp(fn, THP_RUN_PTE_SWAPOUT, size); } -static void run_with_single_pte_of_thp(test_fn fn, const char *desc) +static void run_with_single_pte_of_thp(test_fn fn, const char *desc, size_t size) { - ksft_print_msg("[RUN] %s ... with single PTE of THP\n", desc); - do_run_with_thp(fn, THP_RUN_SINGLE_PTE); + ksft_print_msg("[RUN] %s ... with single PTE of THP (%zu kB)\n", + desc, size / 1024); + do_run_with_thp(fn, THP_RUN_SINGLE_PTE, size); } -static void run_with_single_pte_of_thp_swap(test_fn fn, const char *desc) +static void run_with_single_pte_of_thp_swap(test_fn fn, const char *desc, size_t size) { - ksft_print_msg("[RUN] %s ... with single PTE of swapped-out THP\n", desc); - do_run_with_thp(fn, THP_RUN_SINGLE_PTE_SWAPOUT); + ksft_print_msg("[RUN] %s ... with single PTE of swapped-out THP (%zu kB)\n", + desc, size / 1024); + do_run_with_thp(fn, THP_RUN_SINGLE_PTE_SWAPOUT, size); } -static void run_with_partial_mremap_thp(test_fn fn, const char *desc) +static void run_with_partial_mremap_thp(test_fn fn, const char *desc, size_t size) { - ksft_print_msg("[RUN] %s ... with partially mremap()'ed THP\n", desc); - do_run_with_thp(fn, THP_RUN_PARTIAL_MREMAP); + ksft_print_msg("[RUN] %s ... with partially mremap()'ed THP (%zu kB)\n", + desc, size / 1024); + do_run_with_thp(fn, THP_RUN_PARTIAL_MREMAP, size); } -static void run_with_partial_shared_thp(test_fn fn, const char *desc) +static void run_with_partial_shared_thp(test_fn fn, const char *desc, size_t size) { - ksft_print_msg("[RUN] %s ... with partially shared THP\n", desc); - do_run_with_thp(fn, THP_RUN_PARTIAL_SHARED); + ksft_print_msg("[RUN] %s ... with partially shared THP (%zu kB)\n", + desc, size / 1024); + do_run_with_thp(fn, THP_RUN_PARTIAL_SHARED, size); } static void run_with_hugetlb(test_fn fn, const char *desc, size_t hugetlbsize) @@ -1091,15 +1101,15 @@ static void run_anon_test_case(struct test_case const *test_case) run_with_base_page(test_case->fn, test_case->desc); run_with_base_page_swap(test_case->fn, test_case->desc); - if (thpsize) { - run_with_thp(test_case->fn, test_case->desc); - run_with_thp_swap(test_case->fn, test_case->desc); - run_with_pte_mapped_thp(test_case->fn, test_case->desc); - run_with_pte_mapped_thp_swap(test_case->fn, test_case->desc); - run_with_single_pte_of_thp(test_case->fn, test_case->desc); - run_with_single_pte_of_thp_swap(test_case->fn, test_case->desc); - run_with_partial_mremap_thp(test_case->fn, test_case->desc); - run_with_partial_shared_thp(test_case->fn, test_case->desc); + if (pmdsize) { + run_with_thp(test_case->fn, test_case->desc, pmdsize); + run_with_thp_swap(test_case->fn, test_case->desc, pmdsize); + run_with_pte_mapped_thp(test_case->fn, test_case->desc, pmdsize); + run_with_pte_mapped_thp_swap(test_case->fn, test_case->desc, pmdsize); + run_with_single_pte_of_thp(test_case->fn, test_case->desc, pmdsize); + run_with_single_pte_of_thp_swap(test_case->fn, test_case->desc, pmdsize); + run_with_partial_mremap_thp(test_case->fn, test_case->desc, pmdsize); + run_with_partial_shared_thp(test_case->fn, test_case->desc, pmdsize); } for (i = 0; i < nr_hugetlbsizes; i++) run_with_hugetlb(test_case->fn, test_case->desc, @@ -1120,7 +1130,7 @@ static int tests_per_anon_test_case(void) { int tests = 2 + nr_hugetlbsizes; - if (thpsize) + if (pmdsize) tests += 8; return tests; } @@ -1329,7 +1339,7 @@ static void run_anon_thp_test_cases(void) { int i; - if (!thpsize) + if (!pmdsize) return; ksft_print_msg("[INFO] Anonymous THP tests\n"); @@ -1338,13 +1348,13 @@ static void run_anon_thp_test_cases(void) struct test_case const *test_case = &anon_thp_test_cases[i]; ksft_print_msg("[RUN] %s\n", test_case->desc); - do_run_with_thp(test_case->fn, THP_RUN_PMD); + do_run_with_thp(test_case->fn, THP_RUN_PMD, pmdsize); } } static int tests_per_anon_thp_test_case(void) { - return thpsize ? 1 : 0; + return pmdsize ? 1 : 0; } typedef void (*non_anon_test_fn)(char *mem, const char *smem, size_t size); @@ -1419,7 +1429,7 @@ static void run_with_huge_zeropage(non_anon_test_fn fn, const char *desc) } /* For alignment purposes, we need twice the thp size. */ - mmap_size = 2 * thpsize; + mmap_size = 2 * pmdsize; mmap_mem = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (mmap_mem == MAP_FAILED) { @@ -1434,11 +1444,11 @@ static void run_with_huge_zeropage(non_anon_test_fn fn, const char *desc) } /* We need a THP-aligned memory area. */ - mem = (char *)(((uintptr_t)mmap_mem + thpsize) & ~(thpsize - 1)); - smem = (char *)(((uintptr_t)mmap_smem + thpsize) & ~(thpsize - 1)); + mem = (char *)(((uintptr_t)mmap_mem + pmdsize) & ~(pmdsize - 1)); + smem = (char *)(((uintptr_t)mmap_smem + pmdsize) & ~(pmdsize - 1)); - ret = madvise(mem, thpsize, MADV_HUGEPAGE); - ret |= madvise(smem, thpsize, MADV_HUGEPAGE); + ret = madvise(mem, pmdsize, MADV_HUGEPAGE); + ret |= madvise(smem, pmdsize, MADV_HUGEPAGE); if (ret) { ksft_test_result_fail("MADV_HUGEPAGE failed\n"); goto munmap; @@ -1457,7 +1467,7 @@ static void run_with_huge_zeropage(non_anon_test_fn fn, const char *desc) goto munmap; } - fn(mem, smem, thpsize); + fn(mem, smem, pmdsize); munmap: munmap(mmap_mem, mmap_size); if (mmap_smem != MAP_FAILED) @@ -1650,7 +1660,7 @@ static void run_non_anon_test_case(struct non_anon_test_case const *test_case) run_with_zeropage(test_case->fn, test_case->desc); run_with_memfd(test_case->fn, test_case->desc); run_with_tmpfile(test_case->fn, test_case->desc); - if (thpsize) + if (pmdsize) run_with_huge_zeropage(test_case->fn, test_case->desc); for (i = 0; i < nr_hugetlbsizes; i++) run_with_memfd_hugetlb(test_case->fn, test_case->desc, @@ -1671,7 +1681,7 @@ static int tests_per_non_anon_test_case(void) { int tests = 3 + nr_hugetlbsizes; - if (thpsize) + if (pmdsize) tests += 1; return tests; } @@ -1683,10 +1693,13 @@ int main(int argc, char **argv) ksft_print_header(); pagesize = getpagesize(); - thpsize = read_pmd_pagesize(); - if (thpsize) + pmdsize = read_pmd_pagesize(); + if (pmdsize) { + ksft_print_msg("[INFO] detected PMD size: %zu KiB\n", + pmdsize / 1024); ksft_print_msg("[INFO] detected THP size: %zu KiB\n", - thpsize / 1024); + pmdsize / 1024); + } nr_hugetlbsizes = detect_hugetlb_page_sizes(hugetlbsizes, ARRAY_SIZE(hugetlbsizes)); detect_huge_zeropage(); -- Gitee From da8af6bd40460f5a7e729426f64c32cb4f179252 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Thu, 7 Dec 2023 16:12:11 +0000 Subject: [PATCH 1118/2138] selftests/mm/cow: add tests for anonymous multi-size THP ANBZ: #9728 commit c0f79103322c322ea9342d52c2d81528b7b56232 upstream Add tests similar to the existing PMD-sized THP tests, but which operate on memory backed by (PTE-mapped) multi-size THP. This reuses all the existing infrastructure. If the test suite detects that multi-size THP is not supported by the kernel, the new tests are skipped. Link: https://lkml.kernel.org/r/20231207161211.2374093-11-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Reviewed-by: David Hildenbrand Tested-by: Kefeng Wang Tested-by: John Hubbard Cc: Alistair Popple Cc: Anshuman Khandual Cc: Barry Song Cc: Catalin Marinas Cc: David Rientjes Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Itaru Kitayama Cc: Kirill A. Shutemov Cc: Luis Chamberlain Cc: Matthew Wilcox (Oracle) Cc: Vlastimil Babka Cc: Yang Shi Cc: Yin Fengwei Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3683 --- tools/testing/selftests/mm/cow.c | 82 +++++++++++++++++++++++++++----- 1 file changed, 70 insertions(+), 12 deletions(-) diff --git a/tools/testing/selftests/mm/cow.c b/tools/testing/selftests/mm/cow.c index a284918b1172..363bf5f801be 100644 --- a/tools/testing/selftests/mm/cow.c +++ b/tools/testing/selftests/mm/cow.c @@ -29,15 +29,49 @@ #include "../../../../mm/gup_test.h" #include "../kselftest.h" #include "vm_util.h" +#include "thp_settings.h" static size_t pagesize; static int pagemap_fd; static size_t pmdsize; +static int nr_thpsizes; +static size_t thpsizes[20]; static int nr_hugetlbsizes; static size_t hugetlbsizes[10]; static int gup_fd; static bool has_huge_zeropage; +static int sz2ord(size_t size) +{ + return __builtin_ctzll(size / pagesize); +} + +static int detect_thp_sizes(size_t sizes[], int max) +{ + int count = 0; + unsigned long orders; + size_t kb; + int i; + + /* thp not supported at all. */ + if (!pmdsize) + return 0; + + orders = 1UL << sz2ord(pmdsize); + orders |= thp_supported_orders(); + + for (i = 0; orders && count < max; i++) { + if (!(orders & (1UL << i))) + continue; + orders &= ~(1UL << i); + kb = (pagesize >> 10) << i; + sizes[count++] = kb * 1024; + ksft_print_msg("[INFO] detected THP size: %zu KiB\n", kb); + } + + return count; +} + static void detect_huge_zeropage(void) { int fd = open("/sys/kernel/mm/transparent_hugepage/use_zero_page", @@ -1101,15 +1135,27 @@ static void run_anon_test_case(struct test_case const *test_case) run_with_base_page(test_case->fn, test_case->desc); run_with_base_page_swap(test_case->fn, test_case->desc); - if (pmdsize) { - run_with_thp(test_case->fn, test_case->desc, pmdsize); - run_with_thp_swap(test_case->fn, test_case->desc, pmdsize); - run_with_pte_mapped_thp(test_case->fn, test_case->desc, pmdsize); - run_with_pte_mapped_thp_swap(test_case->fn, test_case->desc, pmdsize); - run_with_single_pte_of_thp(test_case->fn, test_case->desc, pmdsize); - run_with_single_pte_of_thp_swap(test_case->fn, test_case->desc, pmdsize); - run_with_partial_mremap_thp(test_case->fn, test_case->desc, pmdsize); - run_with_partial_shared_thp(test_case->fn, test_case->desc, pmdsize); + for (i = 0; i < nr_thpsizes; i++) { + size_t size = thpsizes[i]; + struct thp_settings settings = *thp_current_settings(); + + settings.hugepages[sz2ord(pmdsize)].enabled = THP_NEVER; + settings.hugepages[sz2ord(size)].enabled = THP_ALWAYS; + thp_push_settings(&settings); + + if (size == pmdsize) { + run_with_thp(test_case->fn, test_case->desc, size); + run_with_thp_swap(test_case->fn, test_case->desc, size); + } + + run_with_pte_mapped_thp(test_case->fn, test_case->desc, size); + run_with_pte_mapped_thp_swap(test_case->fn, test_case->desc, size); + run_with_single_pte_of_thp(test_case->fn, test_case->desc, size); + run_with_single_pte_of_thp_swap(test_case->fn, test_case->desc, size); + run_with_partial_mremap_thp(test_case->fn, test_case->desc, size); + run_with_partial_shared_thp(test_case->fn, test_case->desc, size); + + thp_pop_settings(); } for (i = 0; i < nr_hugetlbsizes; i++) run_with_hugetlb(test_case->fn, test_case->desc, @@ -1130,8 +1176,9 @@ static int tests_per_anon_test_case(void) { int tests = 2 + nr_hugetlbsizes; + tests += 6 * nr_thpsizes; if (pmdsize) - tests += 8; + tests += 2; return tests; } @@ -1689,16 +1736,22 @@ static int tests_per_non_anon_test_case(void) int main(int argc, char **argv) { int err; + struct thp_settings default_settings; ksft_print_header(); pagesize = getpagesize(); pmdsize = read_pmd_pagesize(); if (pmdsize) { + /* Only if THP is supported. */ + thp_read_settings(&default_settings); + default_settings.hugepages[sz2ord(pmdsize)].enabled = THP_INHERIT; + thp_save_settings(); + thp_push_settings(&default_settings); + ksft_print_msg("[INFO] detected PMD size: %zu KiB\n", pmdsize / 1024); - ksft_print_msg("[INFO] detected THP size: %zu KiB\n", - pmdsize / 1024); + nr_thpsizes = detect_thp_sizes(thpsizes, ARRAY_SIZE(thpsizes)); } nr_hugetlbsizes = detect_hugetlb_page_sizes(hugetlbsizes, ARRAY_SIZE(hugetlbsizes)); @@ -1717,6 +1770,11 @@ int main(int argc, char **argv) run_anon_thp_test_cases(); run_non_anon_test_cases(); + if (pmdsize) { + /* Only if THP is supported. */ + thp_restore_settings(); + } + err = ksft_get_fail_cnt(); if (err) ksft_exit_fail_msg("%d out of %d tests failed\n", -- Gitee From 0759e83c548308c8c337b6f8a006e5606c1a2792 Mon Sep 17 00:00:00 2001 From: "Vishal Moola (Oracle)" Date: Fri, 20 Oct 2023 11:33:27 -0700 Subject: [PATCH 1119/2138] mm/khugepaged: convert __collapse_huge_page_isolate() to use folios ANBZ: #9728 commit 8dd1e896735f6e5abf66525dfd39bbd7b8c0c6d6 upstream Patch series "Some khugepaged folio conversions", v3. This patchset converts a number of functions to use folios. This cleans up some khugepaged code and removes a large number of hidden compound_head() calls. This patch (of 5): Replaces 11 calls to compound_head() with 1, and removes 1348 bytes of kernel text. Link: https://lkml.kernel.org/r/20231020183331.10770-1-vishal.moola@gmail.com Link: https://lkml.kernel.org/r/20231020183331.10770-2-vishal.moola@gmail.com Signed-off-by: Vishal Moola (Oracle) Reviewed-by: Matthew Wilcox (Oracle) Reviewed-by: David Hildenbrand Reviewed-by: Yang Shi Cc: Kefeng Wang Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3693 --- mm/khugepaged.c | 45 +++++++++++++++++++++++---------------------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 927c7295d4cb..5277ee50195f 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -543,6 +543,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, struct list_head *compound_pagelist) { struct page *page = NULL; + struct folio *folio = NULL; pte_t *_pte; int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0; bool writable = false; @@ -577,7 +578,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, goto out; } - VM_BUG_ON_PAGE(!PageAnon(page), page); + folio = page_folio(page); + VM_BUG_ON_FOLIO(!folio_test_anon(folio), folio); if (page_mapcount(page) > 1) { ++shared; @@ -589,16 +591,15 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, } } - if (PageCompound(page)) { - struct page *p; - page = compound_head(page); + if (folio_test_large(folio)) { + struct folio *f; /* * Check if we have dealt with the compound page * already */ - list_for_each_entry(p, compound_pagelist, lru) { - if (page == p) + list_for_each_entry(f, compound_pagelist, lru) { + if (folio == f) goto next; } } @@ -609,7 +610,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, * is needed to serialize against split_huge_page * when invoked from the VM. */ - if (!trylock_page(page)) { + if (!folio_trylock(folio)) { result = SCAN_PAGE_LOCK; goto out; } @@ -625,8 +626,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, * but not from this process. The other process cannot write to * the page, only trigger CoW. */ - if (!is_refcount_suitable(page)) { - unlock_page(page); + if (!is_refcount_suitable(&folio->page)) { + folio_unlock(folio); result = SCAN_PAGE_COUNT; goto out; } @@ -635,27 +636,27 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, * Isolate the page to avoid collapsing an hugepage * currently in use by the VM. */ - if (!isolate_lru_page(page)) { - unlock_page(page); + if (!folio_isolate_lru(folio)) { + folio_unlock(folio); result = SCAN_DEL_PAGE_LRU; goto out; } - mod_node_page_state(page_pgdat(page), - NR_ISOLATED_ANON + page_is_file_lru(page), - compound_nr(page)); - VM_BUG_ON_PAGE(!PageLocked(page), page); - VM_BUG_ON_PAGE(PageLRU(page), page); + node_stat_mod_folio(folio, + NR_ISOLATED_ANON + folio_is_file_lru(folio), + folio_nr_pages(folio)); + VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); + VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); - if (PageCompound(page)) - list_add_tail(&page->lru, compound_pagelist); + if (folio_test_large(folio)) + list_add_tail(&folio->lru, compound_pagelist); next: /* * If collapse was initiated by khugepaged, check that there is * enough young pte to justify collapsing the page */ if (cc->is_khugepaged && - (pte_young(pteval) || page_is_young(page) || - PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, + (pte_young(pteval) || folio_test_young(folio) || + folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm, address))) referenced++; @@ -669,13 +670,13 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, result = SCAN_LACK_REFERENCED_PAGE; } else { result = SCAN_SUCCEED; - trace_mm_collapse_huge_page_isolate(page, none_or_zero, + trace_mm_collapse_huge_page_isolate(&folio->page, none_or_zero, referenced, writable, result); return result; } out: release_pte_pages(pte, _pte, compound_pagelist); - trace_mm_collapse_huge_page_isolate(page, none_or_zero, + trace_mm_collapse_huge_page_isolate(&folio->page, none_or_zero, referenced, writable, result); return result; } -- Gitee From b4b4baaa0bbe696ad4ae1d2baa0b5b851c079a76 Mon Sep 17 00:00:00 2001 From: "Vishal Moola (Oracle)" Date: Fri, 20 Oct 2023 11:33:28 -0700 Subject: [PATCH 1120/2138] mm/khugepaged: convert hpage_collapse_scan_pmd() to use folios ANBZ: #9728 commit 5c07ebb372d66423e508ecfb8e00324f8797f072 upstream Replaces 5 calls to compound_head(), and removes 1385 bytes of kernel text. Link: https://lkml.kernel.org/r/20231020183331.10770-3-vishal.moola@gmail.com Signed-off-by: Vishal Moola (Oracle) Reviewed-by: Rik van Riel Reviewed-by: Yang Shi Cc: Kefeng Wang Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3693 --- mm/khugepaged.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 5277ee50195f..fae6a64550d3 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1242,6 +1242,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, int result = SCAN_FAIL, referenced = 0; int none_or_zero = 0, shared = 0; struct page *page = NULL; + struct folio *folio = NULL; unsigned long _address; spinlock_t *ptl; int node = NUMA_NO_NODE, unmapped = 0; @@ -1328,29 +1329,28 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, } } - page = compound_head(page); - + folio = page_folio(page); /* * Record which node the original page is from and save this * information to cc->node_load[]. * Khugepaged will allocate hugepage from the node has the max * hit record. */ - node = page_to_nid(page); + node = folio_nid(folio); if (hpage_collapse_scan_abort(node, cc)) { result = SCAN_SCAN_ABORT; goto out_unmap; } cc->node_load[node]++; - if (!PageLRU(page)) { + if (!folio_test_lru(folio)) { result = SCAN_PAGE_LRU; goto out_unmap; } - if (PageLocked(page)) { + if (folio_test_locked(folio)) { result = SCAN_PAGE_LOCK; goto out_unmap; } - if (!PageAnon(page)) { + if (!folio_test_anon(folio)) { result = SCAN_PAGE_ANON; goto out_unmap; } @@ -1365,7 +1365,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, * has excessive GUP pins (i.e. 512). Anyway the same check * will be done again later the risk seems low. */ - if (!is_refcount_suitable(page)) { + if (!is_refcount_suitable(&folio->page)) { result = SCAN_PAGE_COUNT; goto out_unmap; } @@ -1375,8 +1375,8 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, * enough young pte to justify collapsing the page */ if (cc->is_khugepaged && - (pte_young(pteval) || page_is_young(page) || - PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, + (pte_young(pteval) || folio_test_young(folio) || + folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm, address))) referenced++; } @@ -1398,7 +1398,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, *mmap_locked = false; } out: - trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced, + trace_mm_khugepaged_scan_pmd(mm, &folio->page, writable, referenced, none_or_zero, result, unmapped); return result; } -- Gitee From 6740930d792ccc3e8e17ad1a68b3de3031279028 Mon Sep 17 00:00:00 2001 From: "Vishal Moola (Oracle)" Date: Fri, 20 Oct 2023 11:33:29 -0700 Subject: [PATCH 1121/2138] mm/khugepaged: convert is_refcount_suitable() to use folios ANBZ: #9728 commit dbf85c21e4aff90912b5d7755d2b25611f9191e9 upstream Both callers of is_refcount_suitable() have been converted to use folios, so convert it to take in a folio. Both callers only operate on head pages of folios so mapcount/refcount conversions here are trivial. Removes 3 calls to compound head, and removes 315 bytes of kernel text. Link: https://lkml.kernel.org/r/20231020183331.10770-4-vishal.moola@gmail.com Signed-off-by: Vishal Moola (Oracle) Reviewed-by: David Hildenbrand Reviewed-by: Yang Shi Cc: Kefeng Wang Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3693 --- mm/khugepaged.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index fae6a64550d3..2daa0e65a171 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -525,15 +525,15 @@ static void release_pte_pages(pte_t *pte, pte_t *_pte, } } -static bool is_refcount_suitable(struct page *page) +static bool is_refcount_suitable(struct folio *folio) { int expected_refcount; - expected_refcount = total_mapcount(page); - if (PageSwapCache(page)) - expected_refcount += compound_nr(page); + expected_refcount = folio_mapcount(folio); + if (folio_test_swapcache(folio)) + expected_refcount += folio_nr_pages(folio); - return page_count(page) == expected_refcount; + return folio_ref_count(folio) == expected_refcount; } static int __collapse_huge_page_isolate(struct vm_area_struct *vma, @@ -626,7 +626,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, * but not from this process. The other process cannot write to * the page, only trigger CoW. */ - if (!is_refcount_suitable(&folio->page)) { + if (!is_refcount_suitable(folio)) { folio_unlock(folio); result = SCAN_PAGE_COUNT; goto out; @@ -1365,7 +1365,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, * has excessive GUP pins (i.e. 512). Anyway the same check * will be done again later the risk seems low. */ - if (!is_refcount_suitable(&folio->page)) { + if (!is_refcount_suitable(folio)) { result = SCAN_PAGE_COUNT; goto out_unmap; } -- Gitee From c5bf9cf5506fa3ae39f470d99f6ae6920c948526 Mon Sep 17 00:00:00 2001 From: "Vishal Moola (Oracle)" Date: Fri, 20 Oct 2023 11:33:31 -0700 Subject: [PATCH 1122/2138] mm/khugepaged: convert collapse_pte_mapped_thp() to use folios ANBZ: #9728 commit 98b32d296d95d7aa0516c36b72406277412268cd upstream This removes 2 calls to compound_head() and helps convert khugepaged to use folios throughout. Previously, if the address passed to collapse_pte_mapped_thp() corresponded to a tail page, the scan would fail immediately. Using filemap_lock_folio() we get the corresponding folio back and try to operate on the folio instead. Link: https://lkml.kernel.org/r/20231020183331.10770-6-vishal.moola@gmail.com Signed-off-by: Vishal Moola (Oracle) Reviewed-by: Rik van Riel Reviewed-by: Yang Shi Cc: Kefeng Wang Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3693 --- mm/khugepaged.c | 45 ++++++++++++++++++++------------------------- 1 file changed, 20 insertions(+), 25 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 2daa0e65a171..9ece91d20d2d 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1468,7 +1468,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, bool notified = false; unsigned long haddr = addr & HPAGE_PMD_MASK; struct vm_area_struct *vma = vma_lookup(mm, haddr); - struct page *hpage; + struct folio *folio; pte_t *start_pte, *pte; pmd_t *pmd, pgt_pmd; spinlock_t *pml = NULL, *ptl; @@ -1502,19 +1502,14 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, if (userfaultfd_wp(vma)) return SCAN_PTE_UFFD_WP; - hpage = find_lock_page(vma->vm_file->f_mapping, + folio = filemap_lock_folio(vma->vm_file->f_mapping, linear_page_index(vma, haddr)); - if (!hpage) + if (IS_ERR(folio)) return SCAN_PAGE_NULL; - if (!PageHead(hpage)) { - result = SCAN_FAIL; - goto drop_hpage; - } - - if (compound_order(hpage) != HPAGE_PMD_ORDER) { + if (folio_order(folio) != HPAGE_PMD_ORDER) { result = SCAN_PAGE_COMPOUND; - goto drop_hpage; + goto drop_folio; } result = find_pmd_or_thp_or_none(mm, haddr, &pmd); @@ -1528,13 +1523,13 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, */ goto maybe_install_pmd; default: - goto drop_hpage; + goto drop_folio; } result = SCAN_FAIL; start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl); if (!start_pte) /* mmap_lock + page lock should prevent this */ - goto drop_hpage; + goto drop_folio; /* step 1: check all mapped PTEs are to the right huge page */ for (i = 0, addr = haddr, pte = start_pte; @@ -1559,7 +1554,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, * Note that uprobe, debugger, or MAP_PRIVATE may change the * page table, but the new page will not be a subpage of hpage. */ - if (hpage + i != page) + if (folio_page(folio, i) != page) goto abort; } @@ -1574,7 +1569,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, * page_table_lock) ptl nests inside pml. The less time we hold pml, * the better; but userfaultfd's mfill_atomic_pte() on a private VMA * inserts a valid as-if-COWed PTE without even looking up page cache. - * So page lock of hpage does not protect from it, so we must not drop + * So page lock of folio does not protect from it, so we must not drop * ptl before pgt_pmd is removed, so uffd private needs pml taken now. */ if (userfaultfd_armed(vma) && !(vma->vm_flags & VM_SHARED)) @@ -1598,7 +1593,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, continue; /* * We dropped ptl after the first scan, to do the mmu_notifier: - * page lock stops more PTEs of the hpage being faulted in, but + * page lock stops more PTEs of the folio being faulted in, but * does not stop write faults COWing anon copies from existing * PTEs; and does not stop those being swapped out or migrated. */ @@ -1607,7 +1602,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, goto abort; } page = vm_normal_page(vma, addr, ptent); - if (hpage + i != page) + if (folio_page(folio, i) != page) goto abort; /* @@ -1626,8 +1621,8 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, /* step 3: set proper refcount and mm_counters. */ if (nr_ptes) { - page_ref_sub(hpage, nr_ptes); - add_mm_counter(mm, mm_counter_file(hpage), -nr_ptes); + folio_ref_sub(folio, nr_ptes); + add_mm_counter(mm, mm_counter_file(&folio->page), -nr_ptes); } /* step 4: remove empty page table */ @@ -1651,14 +1646,14 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, maybe_install_pmd: /* step 5: install pmd entry */ result = install_pmd - ? set_huge_pmd(vma, haddr, pmd, hpage) + ? set_huge_pmd(vma, haddr, pmd, &folio->page) : SCAN_SUCCEED; - goto drop_hpage; + goto drop_folio; abort: if (nr_ptes) { flush_tlb_mm(mm); - page_ref_sub(hpage, nr_ptes); - add_mm_counter(mm, mm_counter_file(hpage), -nr_ptes); + folio_ref_sub(folio, nr_ptes); + add_mm_counter(mm, mm_counter_file(&folio->page), -nr_ptes); } if (start_pte) pte_unmap_unlock(start_pte, ptl); @@ -1666,9 +1661,9 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, spin_unlock(pml); if (notified) mmu_notifier_invalidate_range_end(&range); -drop_hpage: - unlock_page(hpage); - put_page(hpage); +drop_folio: + folio_unlock(folio); + folio_put(folio); return result; } -- Gitee From df94ec97470ac931b6beb1bb918c34d9d0bfc4db Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Sat, 18 Nov 2023 10:32:28 +0800 Subject: [PATCH 1123/2138] mm: ksm: use more folio api in ksm_might_need_to_copy() ANBZ: #9728 commit 1486fb50136f4799946f5ecfe050094574647153 upstream Patch series "mm: cleanup and use more folio in page fault", v3. Rename page_copy_prealloc() to folio_prealloc(), which is used by more functions, also do more folio conversion in page fault. This patch (of 5): Since ksm only support normal page, no swapout/in for ksm large folio too, add large folio check in ksm_might_need_to_copy(), also convert page->index to folio->index as page->index is going away. Then convert ksm_might_need_to_copy() to use more folio api to save nine compound_head() calls, short 'address' to reduce max-line-length. Link: https://lkml.kernel.org/r/20231118023232.1409103-1-wangkefeng.wang@huawei.com Link: https://lkml.kernel.org/r/20231118023232.1409103-2-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: Matthew Wilcox (Oracle) Cc: Sidhartha Kumar Cc: Vishal Moola (Oracle) Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3693 --- include/linux/ksm.h | 4 ++-- mm/ksm.c | 39 +++++++++++++++++++++------------------ 2 files changed, 23 insertions(+), 20 deletions(-) diff --git a/include/linux/ksm.h b/include/linux/ksm.h index b9cdeba03668..32ecea266fe6 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -88,7 +88,7 @@ static inline void ksm_exit(struct mm_struct *mm) * but what if the vma was unmerged while the page was swapped out? */ struct page *ksm_might_need_to_copy(struct page *page, - struct vm_area_struct *vma, unsigned long address); + struct vm_area_struct *vma, unsigned long addr); void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc); void folio_migrate_ksm(struct folio *newfolio, struct folio *folio); @@ -141,7 +141,7 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, } static inline struct page *ksm_might_need_to_copy(struct page *page, - struct vm_area_struct *vma, unsigned long address) + struct vm_area_struct *vma, unsigned long addr) { return page; } diff --git a/mm/ksm.c b/mm/ksm.c index 2e4cd681622d..fe9296bd85cd 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2788,48 +2788,51 @@ void __ksm_exit(struct mm_struct *mm) } struct page *ksm_might_need_to_copy(struct page *page, - struct vm_area_struct *vma, unsigned long address) + struct vm_area_struct *vma, unsigned long addr) { struct folio *folio = page_folio(page); struct anon_vma *anon_vma = folio_anon_vma(folio); - struct page *new_page; + struct folio *new_folio; - if (PageKsm(page)) { - if (page_stable_node(page) && + if (folio_test_large(folio)) + return page; + + if (folio_test_ksm(folio)) { + if (folio_stable_node(folio) && !(ksm_run & KSM_RUN_UNMERGE)) return page; /* no need to copy it */ } else if (!anon_vma) { return page; /* no need to copy it */ - } else if (page->index == linear_page_index(vma, address) && + } else if (folio->index == linear_page_index(vma, addr) && anon_vma->root == vma->anon_vma->root) { return page; /* still no need to copy it */ } if (PageHWPoison(page)) return ERR_PTR(-EHWPOISON); - if (!PageUptodate(page)) + if (!folio_test_uptodate(folio)) return page; /* let do_swap_page report the error */ - new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); - if (new_page && - mem_cgroup_charge(page_folio(new_page), vma->vm_mm, GFP_KERNEL)) { - put_page(new_page); - new_page = NULL; + new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false); + if (new_folio && + mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL)) { + folio_put(new_folio); + new_folio = NULL; } - if (new_page) { - if (copy_mc_user_highpage(new_page, page, address, vma)) { - put_page(new_page); + if (new_folio) { + if (copy_mc_user_highpage(&new_folio->page, page, addr, vma)) { + folio_put(new_folio); memory_failure_queue(page_to_pfn(page), 0); return ERR_PTR(-EHWPOISON); } - SetPageDirty(new_page); - __SetPageUptodate(new_page); - __SetPageLocked(new_page); + folio_set_dirty(new_folio); + __folio_mark_uptodate(new_folio); + __folio_set_locked(new_folio); #ifdef CONFIG_SWAP count_vm_event(KSM_SWPIN_COPY); #endif } - return new_page; + return new_folio ? &new_folio->page : NULL; } void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc) -- Gitee From b3a6a3e368dc5bc011f5dac416c210c0d2eafa6f Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Sat, 18 Nov 2023 10:32:29 +0800 Subject: [PATCH 1124/2138] mm: memory: use a folio in validate_page_before_insert() ANBZ: #9728 commit f8b6187d8dd98fd32fe393071f362a7b6beaad0a upstream Use a folio in validate_page_before_insert() to save two compound_head() calls. Link: https://lkml.kernel.org/r/20231118023232.1409103-3-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Reviewed-by: Sidhartha Kumar Cc: David Hildenbrand Cc: Matthew Wilcox (Oracle) Cc: Vishal Moola (Oracle) Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3693 --- mm/memory.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 31751ca29e06..4534a273ad6e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1828,9 +1828,12 @@ pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, static int validate_page_before_insert(struct page *page) { - if (PageAnon(page) || PageSlab(page) || page_has_type(page)) + struct folio *folio = page_folio(page); + + if (folio_test_anon(folio) || folio_test_slab(folio) || + page_has_type(page)) return -EINVAL; - flush_dcache_page(page); + flush_dcache_folio(folio); return 0; } -- Gitee From 20a487a0b0e89aea3f637b4408c6daaf8fd8efcd Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Sat, 18 Nov 2023 10:32:30 +0800 Subject: [PATCH 1125/2138] mm: memory: rename page_copy_prealloc() to folio_prealloc() ANBZ: #9728 commit 294de6d8f14a69f1251b94223ba9d90d64b28cec upstream Let's rename page_copy_prealloc() to folio_prealloc(), which could be reused in more functons, as it maybe zero the new page, pass a new need_zero to it, and call the vma_alloc_zeroed_movable_folio() if need_zero is true. Link: https://lkml.kernel.org/r/20231118023232.1409103-4-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Reviewed-by: Sidhartha Kumar Reviewed-by: Vishal Moola (Oracle) Cc: David Hildenbrand Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3693 --- mm/memory.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 4534a273ad6e..86f4fb06651c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -979,12 +979,17 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, return 0; } -static inline struct folio *page_copy_prealloc(struct mm_struct *src_mm, - struct vm_area_struct *vma, unsigned long addr) +static inline struct folio *folio_prealloc(struct mm_struct *src_mm, + struct vm_area_struct *vma, unsigned long addr, bool need_zero) { struct folio *new_folio; - new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false); + if (need_zero) + new_folio = vma_alloc_zeroed_movable_folio(vma, addr); + else + new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, + addr, false); + if (!new_folio) return NULL; @@ -1116,7 +1121,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, } else if (ret == -EBUSY) { goto out; } else if (ret == -EAGAIN) { - prealloc = page_copy_prealloc(src_mm, src_vma, addr); + prealloc = folio_prealloc(src_mm, src_vma, addr, false); if (!prealloc) return -ENOMEM; } else if (ret) { -- Gitee From c21021602cf009f6631e57eb77685760aafdb7a3 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Sat, 18 Nov 2023 10:32:31 +0800 Subject: [PATCH 1126/2138] mm: memory: use a folio in do_cow_fault() ANBZ: #9728 commit e4621e70469c3ac6e1b6914f1c42941a8a6e44d2 upstream Use folio_prealloc() helper and convert to use a folio in do_cow_fault(), which save five compound_head() calls. Link: https://lkml.kernel.org/r/20231118023232.1409103-5-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Reviewed-by: Vishal Moola (Oracle) Cc: David Hildenbrand Cc: Matthew Wilcox (Oracle) Cc: Sidhartha Kumar Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3693 --- mm/memory.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 86f4fb06651c..b751e79b1f2f 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4903,6 +4903,7 @@ static vm_fault_t do_read_fault(struct vm_fault *vmf) static vm_fault_t do_cow_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; + struct folio *folio; vm_fault_t ret; if (vmf->flags & FAULT_FLAG_VMA_LOCK) { @@ -4913,16 +4914,11 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf) if (unlikely(anon_vma_prepare(vma))) return VM_FAULT_OOM; - vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); - if (!vmf->cow_page) + folio = folio_prealloc(vma->vm_mm, vma, vmf->address, false); + if (!folio) return VM_FAULT_OOM; - if (mem_cgroup_charge(page_folio(vmf->cow_page), vma->vm_mm, - GFP_KERNEL)) { - put_page(vmf->cow_page); - return VM_FAULT_OOM; - } - folio_throttle_swaprate(page_folio(vmf->cow_page), GFP_KERNEL); + vmf->cow_page = &folio->page; ret = __do_fault(vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) @@ -4931,7 +4927,7 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf) return ret; copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); - __SetPageUptodate(vmf->cow_page); + __folio_mark_uptodate(folio); ret |= finish_fault(vmf); unlock_page(vmf->page); @@ -4940,7 +4936,7 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf) goto uncharge_out; return ret; uncharge_out: - put_page(vmf->cow_page); + folio_put(folio); return ret; } -- Gitee From d95dd8396c120d82b62c2bc904bbd8ff30e5c7f4 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Sat, 18 Nov 2023 10:32:32 +0800 Subject: [PATCH 1127/2138] mm: memory: use folio_prealloc() in wp_page_copy() ANBZ: #9728 commit cf503cc665c442ce9893cb12561c57a328465e29 upstream Use folio_prealloc() helper to simplify code a bit. Link: https://lkml.kernel.org/r/20231118023232.1409103-6-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: Matthew Wilcox (Oracle) Cc: Sidhartha Kumar Cc: Vishal Moola (Oracle) Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3693 --- mm/memory.c | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index b751e79b1f2f..01b37600acea 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3257,6 +3257,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) int page_copied = 0; struct mmu_notifier_range range; int ret; + bool pfn_is_zero; delayacct_wpcopy_start(); @@ -3265,16 +3266,12 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) if (unlikely(anon_vma_prepare(vma))) goto oom; - if (is_zero_pfn(pte_pfn(vmf->orig_pte))) { - new_folio = vma_alloc_zeroed_movable_folio(vma, vmf->address); - if (!new_folio) - goto oom; - } else { - new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, - vmf->address, false); - if (!new_folio) - goto oom; + pfn_is_zero = is_zero_pfn(pte_pfn(vmf->orig_pte)); + new_folio = folio_prealloc(mm, vma, vmf->address, pfn_is_zero); + if (!new_folio) + goto oom; + if (!pfn_is_zero) { ret = __wp_page_copy_user(&new_folio->page, vmf->page, vmf); if (ret) { /* @@ -3294,10 +3291,6 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) kmsan_copy_page_meta(&new_folio->page, vmf->page); } - if (mem_cgroup_charge(new_folio, mm, GFP_KERNEL)) - goto oom_free_new; - folio_throttle_swaprate(new_folio, GFP_KERNEL); - __folio_mark_uptodate(new_folio); mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, @@ -3396,8 +3389,6 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) delayacct_wpcopy_end(); return 0; -oom_free_new: - folio_put(new_folio); oom: if (old_folio) folio_put(old_folio); -- Gitee From 5d9fa16836103aa5efbe30054b50515ce20036d7 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 11 Dec 2023 16:22:06 +0000 Subject: [PATCH 1128/2138] mm: convert ksm_might_need_to_copy() to work on folios ANBZ: #9728 commit 96db66d9c8f3c1547325af01b1f328b85d6ee1b9 upstream Patch series "Finish two folio conversions". Most callers of page_add_new_anon_rmap() and lru_cache_add_inactive_or_unevictable() have been converted to their folio equivalents, but there are still a few stragglers. There's a bit of preparatory work in ksm and unuse_pte(), but after that it's pretty mechanical. This patch (of 9): Accept a folio as an argument and return a folio result. Removes a call to compound_head() in do_swap_page(), and prevents folio & page from getting out of sync in unuse_pte(). Reviewed-by: David Hildenbrand [willy@infradead.org: fix smatch warning] Link: https://lkml.kernel.org/r/ZXnPtblC6A1IkyAB@casper.infradead.org [david@redhat.com: only adjust the page if the folio changed] Link: https://lkml.kernel.org/r/6a8f2110-fa91-4c10-9eae-88315309a6e3@redhat.com Link: https://lkml.kernel.org/r/20231211162214.2146080-1-willy@infradead.org Link: https://lkml.kernel.org/r/20231211162214.2146080-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: David Hildenbrand Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3693 --- include/linux/ksm.h | 6 +++--- mm/ksm.c | 21 +++++++++++---------- mm/memory.c | 11 +++++++---- mm/swapfile.c | 8 +++++--- 4 files changed, 26 insertions(+), 20 deletions(-) diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 32ecea266fe6..f701b57fc64b 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -87,7 +87,7 @@ static inline void ksm_exit(struct mm_struct *mm) * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, * but what if the vma was unmerged while the page was swapped out? */ -struct page *ksm_might_need_to_copy(struct page *page, +struct folio *ksm_might_need_to_copy(struct folio *folio, struct vm_area_struct *vma, unsigned long addr); void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc); @@ -140,10 +140,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, return 0; } -static inline struct page *ksm_might_need_to_copy(struct page *page, +static inline struct folio *ksm_might_need_to_copy(struct folio *folio, struct vm_area_struct *vma, unsigned long addr) { - return page; + return folio; } static inline void rmap_walk_ksm(struct folio *folio, diff --git a/mm/ksm.c b/mm/ksm.c index fe9296bd85cd..5fbe58d863c7 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2787,30 +2787,30 @@ void __ksm_exit(struct mm_struct *mm) trace_ksm_exit(mm); } -struct page *ksm_might_need_to_copy(struct page *page, +struct folio *ksm_might_need_to_copy(struct folio *folio, struct vm_area_struct *vma, unsigned long addr) { - struct folio *folio = page_folio(page); + struct page *page = folio_page(folio, 0); struct anon_vma *anon_vma = folio_anon_vma(folio); struct folio *new_folio; if (folio_test_large(folio)) - return page; + return folio; if (folio_test_ksm(folio)) { if (folio_stable_node(folio) && !(ksm_run & KSM_RUN_UNMERGE)) - return page; /* no need to copy it */ + return folio; /* no need to copy it */ } else if (!anon_vma) { - return page; /* no need to copy it */ + return folio; /* no need to copy it */ } else if (folio->index == linear_page_index(vma, addr) && anon_vma->root == vma->anon_vma->root) { - return page; /* still no need to copy it */ + return folio; /* still no need to copy it */ } if (PageHWPoison(page)) return ERR_PTR(-EHWPOISON); if (!folio_test_uptodate(folio)) - return page; /* let do_swap_page report the error */ + return folio; /* let do_swap_page report the error */ new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false); if (new_folio && @@ -2819,9 +2819,10 @@ struct page *ksm_might_need_to_copy(struct page *page, new_folio = NULL; } if (new_folio) { - if (copy_mc_user_highpage(&new_folio->page, page, addr, vma)) { + if (copy_mc_user_highpage(folio_page(new_folio, 0), page, + addr, vma)) { folio_put(new_folio); - memory_failure_queue(page_to_pfn(page), 0); + memory_failure_queue(folio_pfn(folio), 0); return ERR_PTR(-EHWPOISON); } folio_set_dirty(new_folio); @@ -2832,7 +2833,7 @@ struct page *ksm_might_need_to_copy(struct page *page, #endif } - return new_folio ? &new_folio->page : NULL; + return new_folio; } void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc) diff --git a/mm/memory.c b/mm/memory.c index 01b37600acea..cdef8d808dd2 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4082,15 +4082,18 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) * page->index of !PageKSM() pages would be nonlinear inside the * anon VMA -- PageKSM() is lost on actual swapout. */ - page = ksm_might_need_to_copy(page, vma, vmf->address); - if (unlikely(!page)) { + folio = ksm_might_need_to_copy(folio, vma, vmf->address); + if (unlikely(!folio)) { ret = VM_FAULT_OOM; + folio = swapcache; goto out_page; - } else if (unlikely(PTR_ERR(page) == -EHWPOISON)) { + } else if (unlikely(folio == ERR_PTR(-EHWPOISON))) { ret = VM_FAULT_HWPOISON; + folio = swapcache; goto out_page; } - folio = page_folio(page); + if (folio != swapcache) + page = folio_page(folio, 0); /* * If we want to map a page that's in the swapcache writable, we diff --git a/mm/swapfile.c b/mm/swapfile.c index c856d6bb2daf..bd450abe309e 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1760,11 +1760,13 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, int ret = 1; swapcache = page; - page = ksm_might_need_to_copy(page, vma, addr); - if (unlikely(!page)) + folio = ksm_might_need_to_copy(folio, vma, addr); + if (unlikely(!folio)) return -ENOMEM; - else if (unlikely(PTR_ERR(page) == -EHWPOISON)) + else if (unlikely(folio == ERR_PTR(-EHWPOISON))) hwpoisoned = true; + else + page = folio_file_page(folio, swp_offset(entry)); pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); if (unlikely(!pte || !pte_same_as_swp(ptep_get(pte), -- Gitee From 7d2db5de0db2004bb821a02efebf2c630900b6bc Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 12 Dec 2023 16:48:13 +0000 Subject: [PATCH 1129/2138] mm: remove PageAnonExclusive assertions in unuse_pte() ANBZ: #9728 commit 8d294a8c6393afbde59cf14a0e8413df4b206698 upstream The page in question is either freshly allocated or known to be in the swap cache; these assertions are not particularly useful. Link: https://lkml.kernel.org/r/20231212164813.2540119-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: David Hildenbrand Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3693 --- mm/swapfile.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index bd450abe309e..b7a9512a8eb4 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1799,10 +1799,6 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, */ arch_swap_restore(entry, page_folio(page)); - /* See do_swap_page() */ - BUG_ON(!PageAnon(page) && PageMappedToDisk(page)); - BUG_ON(PageAnon(page) && PageAnonExclusive(page)); - dec_mm_counter(vma->vm_mm, MM_SWAPENTS); inc_mm_counter(vma->vm_mm, MM_ANONPAGES); get_page(page); -- Gitee From 5bc3145da4b2a40298e466c67e0d9e98f013c91e Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 11 Dec 2023 16:22:08 +0000 Subject: [PATCH 1130/2138] mm: convert unuse_pte() to use a folio throughout ANBZ: #9728 commit f00f48436c789af659047d3c5d6f6d17e640634e upstream Saves about eight calls to compound_head(). Link: https://lkml.kernel.org/r/20231211162214.2146080-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3693 --- mm/swapfile.c | 47 +++++++++++++++++++++++++---------------------- 1 file changed, 25 insertions(+), 22 deletions(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index b7a9512a8eb4..d67c20144dc8 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1752,21 +1752,25 @@ static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte) static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, swp_entry_t entry, struct folio *folio) { - struct page *page = folio_file_page(folio, swp_offset(entry)); - struct page *swapcache; + struct page *page; + struct folio *swapcache; spinlock_t *ptl; pte_t *pte, new_pte, old_pte; - bool hwpoisoned = PageHWPoison(page); + bool hwpoisoned = false; int ret = 1; - swapcache = page; + swapcache = folio; folio = ksm_might_need_to_copy(folio, vma, addr); if (unlikely(!folio)) return -ENOMEM; - else if (unlikely(folio == ERR_PTR(-EHWPOISON))) + else if (unlikely(folio == ERR_PTR(-EHWPOISON))) { + hwpoisoned = true; + folio = swapcache; + } + + page = folio_file_page(folio, swp_offset(entry)); + if (PageHWPoison(page)) hwpoisoned = true; - else - page = folio_file_page(folio, swp_offset(entry)); pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); if (unlikely(!pte || !pte_same_as_swp(ptep_get(pte), @@ -1777,13 +1781,12 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, old_pte = ptep_get(pte); - if (unlikely(hwpoisoned || !PageUptodate(page))) { + if (unlikely(hwpoisoned || !folio_test_uptodate(folio))) { swp_entry_t swp_entry; dec_mm_counter(vma->vm_mm, MM_SWAPENTS); if (hwpoisoned) { - swp_entry = make_hwpoison_entry(swapcache); - page = swapcache; + swp_entry = make_hwpoison_entry(page); } else { swp_entry = make_poisoned_swp_entry(); } @@ -1797,27 +1800,27 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, * when reading from swap. This metadata may be indexed by swap entry * so this must be called before swap_free(). */ - arch_swap_restore(entry, page_folio(page)); + arch_swap_restore(entry, folio); dec_mm_counter(vma->vm_mm, MM_SWAPENTS); inc_mm_counter(vma->vm_mm, MM_ANONPAGES); - get_page(page); - if (page == swapcache) { + folio_get(folio); + if (folio == swapcache) { rmap_t rmap_flags = RMAP_NONE; /* - * See do_swap_page(): PageWriteback() would be problematic. - * However, we do a wait_on_page_writeback() just before this - * call and have the page locked. + * See do_swap_page(): writeback would be problematic. + * However, we do a folio_wait_writeback() just before this + * call and have the folio locked. */ - VM_BUG_ON_PAGE(PageWriteback(page), page); + VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio); if (pte_swp_exclusive(old_pte)) rmap_flags |= RMAP_EXCLUSIVE; page_add_anon_rmap(page, vma, addr, rmap_flags); } else { /* ksm created a completely new copy */ - page_add_new_anon_rmap(page, vma, addr); - lru_cache_add_inactive_or_unevictable(page, vma); + folio_add_new_anon_rmap(folio, vma, addr); + folio_add_lru_vma(folio, vma); } new_pte = pte_mkold(mk_pte(page, vma->vm_page_prot)); if (pte_swp_soft_dirty(old_pte)) @@ -1830,9 +1833,9 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, out: if (pte) pte_unmap_unlock(pte, ptl); - if (page != swapcache) { - unlock_page(page); - put_page(page); + if (folio != swapcache) { + folio_unlock(folio); + folio_put(folio); } return ret; } -- Gitee From 2d043d0ae145acffd382048fe5f3238a68ea3159 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 11 Dec 2023 16:22:09 +0000 Subject: [PATCH 1131/2138] mm: remove some calls to page_add_new_anon_rmap() ANBZ: #9728 commit 2853b66b601a265306be709b4d86aaff7d92a0fc upstream We already have the folio in these functions, we just need to use it. folio_add_new_anon_rmap() didn't exist at the time they were converted to folios. Link: https://lkml.kernel.org/r/20231211162214.2146080-5-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: David Hildenbrand Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3693 --- kernel/events/uprobes.c | 2 +- mm/memory.c | 2 +- mm/userfaultfd.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 6dac0b579821..aa60ada8dfc3 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -181,7 +181,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, if (new_page) { folio_get(new_folio); - page_add_new_anon_rmap(new_page, vma, addr); + folio_add_new_anon_rmap(new_folio, vma, addr); folio_add_lru_vma(new_folio, vma); } else /* no new page, just dec_mm_counter for old_page */ diff --git a/mm/memory.c b/mm/memory.c index cdef8d808dd2..299467ca2f59 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4211,7 +4211,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) /* ksm created a completely new copy */ if (unlikely(folio != swapcache && swapcache)) { - page_add_new_anon_rmap(page, vma, vmf->address); + folio_add_new_anon_rmap(folio, vma, vmf->address); folio_add_lru_vma(folio, vma); } else { page_add_anon_rmap(page, vma, vmf->address, rmap_flags); diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 92fe2a76f4b5..ffef13f97edd 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -116,7 +116,7 @@ int mfill_atomic_install_pte(pmd_t *dst_pmd, folio_add_lru(folio); page_add_file_rmap(page, dst_vma, false); } else { - page_add_new_anon_rmap(page, dst_vma, dst_addr); + folio_add_new_anon_rmap(folio, dst_vma, dst_addr); folio_add_lru_vma(folio, dst_vma); } -- Gitee From b7e63b25e1c947e74d6b400106e0e36cbb388a86 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 11 Dec 2023 16:22:10 +0000 Subject: [PATCH 1132/2138] mm: remove stale example from comment ANBZ: #9728 commit b2926ac8178bf5c88ada4285f413f56c1cafc592 upstream folio_add_new_anon_rmap() no longer works this way, so just remove the entire example. Link: https://lkml.kernel.org/r/20231211162214.2146080-6-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: David Hildenbrand Cc: Ralph Campbell Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3693 --- mm/memremap.c | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/mm/memremap.c b/mm/memremap.c index bee85560a243..19ed6855f96f 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -485,21 +485,11 @@ void free_zone_device_page(struct page *page) __ClearPageAnonExclusive(page); /* - * When a device managed page is freed, the page->mapping field + * When a device managed page is freed, the folio->mapping field * may still contain a (stale) mapping value. For example, the - * lower bits of page->mapping may still identify the page as an - * anonymous page. Ultimately, this entire field is just stale - * and wrong, and it will cause errors if not cleared. One - * example is: - * - * migrate_vma_pages() - * migrate_vma_insert_page() - * page_add_new_anon_rmap() - * __page_set_anon_rmap() - * ...checks page->mapping, via PageAnon(page) call, - * and incorrectly concludes that the page is an - * anonymous page. Therefore, it incorrectly, - * silently fails to set up the new anon rmap. + * lower bits of folio->mapping may still identify the folio as an + * anonymous folio. Ultimately, this entire field is just stale + * and wrong, and it will cause errors if not cleared. * * For other types of ZONE_DEVICE pages, migration is either * handled differently or not done at all, so there is no need -- Gitee From 6217df7bffb14f38060e0a4bd8562396448e9a97 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 11 Dec 2023 16:22:11 +0000 Subject: [PATCH 1133/2138] mm: remove references to page_add_new_anon_rmap in comments ANBZ: #9728 commit cb9089babc91f7ffc785d51a0fa567365b0e7751 upstream Refer to folio_add_new_anon_rmap() instead. Link: https://lkml.kernel.org/r/20231211162214.2146080-7-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: David Hildenbrand Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3693 --- mm/rmap.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/rmap.c b/mm/rmap.c index b086dc957b0c..6bea1722f26d 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1201,9 +1201,9 @@ static void __page_check_anon_rmap(struct folio *folio, struct page *page, * We have exclusion against page_add_anon_rmap because the caller * always holds the page locked. * - * We have exclusion against page_add_new_anon_rmap because those pages + * We have exclusion against folio_add_new_anon_rmap because those pages * are initially only visible via the pagetables, and the pte is locked - * over the call to page_add_new_anon_rmap. + * over the call to folio_add_new_anon_rmap. */ VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, folio); -- Gitee From b604b86ca47afcf9ec5adf99c0b4fce318914982 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 11 Dec 2023 16:22:12 +0000 Subject: [PATCH 1134/2138] mm: convert migrate_vma_insert_page() to use a folio ANBZ: #9728 commit d3b082736518562f4eed185e1a67f28d20635fef upstream Replaces five calls to compound_head() with one. Link: https://lkml.kernel.org/r/20231211162214.2146080-8-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: David Hildenbrand Reviewed-by: Alistair Popple Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3693 --- mm/migrate_device.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/mm/migrate_device.c b/mm/migrate_device.c index 8ac1f79f754a..81193363f8cd 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -564,6 +564,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, struct page *page, unsigned long *src) { + struct folio *folio = page_folio(page); struct vm_area_struct *vma = migrate->vma; struct mm_struct *mm = vma->vm_mm; bool flush = false; @@ -596,17 +597,17 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, goto abort; if (unlikely(anon_vma_prepare(vma))) goto abort; - if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL)) + if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL)) goto abort; /* - * The memory barrier inside __SetPageUptodate makes sure that - * preceding stores to the page contents become visible before + * The memory barrier inside __folio_mark_uptodate makes sure that + * preceding stores to the folio contents become visible before * the set_pte_at() write. */ - __SetPageUptodate(page); + __folio_mark_uptodate(folio); - if (is_device_private_page(page)) { + if (folio_is_device_private(folio)) { swp_entry_t swp_entry; if (vma->vm_flags & VM_WRITE) @@ -617,8 +618,8 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, page_to_pfn(page)); entry = swp_entry_to_pte(swp_entry); } else { - if (is_zone_device_page(page) && - !is_device_coherent_page(page)) { + if (folio_is_zone_device(folio) && + !folio_is_device_coherent(folio)) { pr_warn_once("Unsupported ZONE_DEVICE page type.\n"); goto abort; } @@ -652,10 +653,10 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, goto unlock_abort; inc_mm_counter(mm, MM_ANONPAGES); - page_add_new_anon_rmap(page, vma, addr); - if (!is_zone_device_page(page)) - lru_cache_add_inactive_or_unevictable(page, vma); - get_page(page); + folio_add_new_anon_rmap(folio, vma, addr); + if (!folio_is_zone_device(folio)) + folio_add_lru_vma(folio, vma); + folio_get(folio); if (flush) { flush_cache_page(vma, addr, pte_pfn(orig_pte)); -- Gitee From be9499403fe149099c53c8066ebc2851183be8ed Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 11 Dec 2023 16:22:14 +0000 Subject: [PATCH 1135/2138] mm: remove page_add_new_anon_rmap and lru_cache_add_inactive_or_unevictable ANBZ: #9728 commit cafa8e37a2ebd344ae0774324c21f46640bbaab3 upstream All callers have now been converted to folio_add_new_anon_rmap() and folio_add_lru_vma() so we can remove the wrapper. Link: https://lkml.kernel.org/r/20231211162214.2146080-10-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: David Hildenbrand Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3693 --- include/linux/rmap.h | 2 -- include/linux/swap.h | 3 --- mm/folio-compat.c | 16 ---------------- 3 files changed, 21 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 3c2fc291b071..807d05a85d4b 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -192,8 +192,6 @@ typedef int __bitwise rmap_t; void folio_move_anon_rmap(struct folio *, struct vm_area_struct *); void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long address, rmap_t flags); -void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, - unsigned long address); void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *, unsigned long address); void page_add_file_rmap(struct page *, struct vm_area_struct *, diff --git a/include/linux/swap.h b/include/linux/swap.h index cb25db2a93dd..0201dd8c49e7 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -396,9 +396,6 @@ void folio_deactivate(struct folio *folio); void folio_mark_lazyfree(struct folio *folio); extern void swap_setup(void); -extern void lru_cache_add_inactive_or_unevictable(struct page *page, - struct vm_area_struct *vma); - /* linux/mm/vmscan.c */ extern unsigned long zone_reclaimable_pages(struct zone *zone); extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, diff --git a/mm/folio-compat.c b/mm/folio-compat.c index 10c3247542cb..a546271db69b 100644 --- a/mm/folio-compat.c +++ b/mm/folio-compat.c @@ -77,12 +77,6 @@ bool redirty_page_for_writepage(struct writeback_control *wbc, } EXPORT_SYMBOL(redirty_page_for_writepage); -void lru_cache_add_inactive_or_unevictable(struct page *page, - struct vm_area_struct *vma) -{ - folio_add_lru_vma(page_folio(page), vma); -} - int add_to_page_cache_lru(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp) { @@ -122,13 +116,3 @@ void putback_lru_page(struct page *page) { folio_putback_lru(page_folio(page)); } - -#ifdef CONFIG_MMU -void page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, - unsigned long address) -{ - VM_BUG_ON_PAGE(PageTail(page), page); - - return folio_add_new_anon_rmap((struct folio *)page, vma, address); -} -#endif -- Gitee From 03aab29d39bbc8623f6fa1a61d0faa9165f6a541 Mon Sep 17 00:00:00 2001 From: Juxin Gao Date: Thu, 6 Jun 2024 11:46:03 +0800 Subject: [PATCH 1136/2138] anolis: net: stmmac: Fix build error when config CONFIG_DWMAC_LOONGSON=m ANBZ: #9291 Signed-off-by: Juxin Gao Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/3325 --- drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index f0eebed751f3..f03d78385ed9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -399,7 +399,6 @@ static struct mac_device_info *loongson_dwmac_setup(void *apriv) mac->dma = dma; } - mac->mac = &dwmac1000_ops; priv->dev->priv_flags |= IFF_UNICAST_FLT; /* Pre-initialize the respective "mac" fields as it's done in -- Gitee From bc5efb7477f0049a9a2e63f2312c250c5a59bdbf Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:25 +0100 Subject: [PATCH 1137/2138] mm/rmap: rename hugepage_add* to hugetlb_add* ANBZ: #9728 commit 9d5fafd5d882446999366f673ab06edba453f862 upstream Patch series "mm/rmap: interface overhaul", v2. This series overhauls the rmap interface, to get rid of the "bool compound" / RMAP_COMPOUND parameter with the goal of making the interface less error prone, more future proof, and more natural to extend to "batching". Also, this converts the interface to always consume folio+subpage, which speeds up operations on large folios. Further, this series adds PTE-batching variants for 4 rmap functions, whereby only folio_add_anon_rmap_ptes() is used for batching in this series when PTE-remapping a PMD-mapped THP. folio_remove_rmap_ptes(), folio_try_dup_anon_rmap_ptes() and folio_dup_file_rmap_ptes() will soon come in handy[1,2]. This series performs a lot of folio conversion along the way. Most of the added LOC in the diff are only due to documentation. As we're moving to a pte/pmd interface where we clearly express the mapping granularity we are dealing with, we first get the remainder of hugetlb out of the way, as it is special and expected to remain special: it treats everything as a "single logical PTE" and only currently allows entire mappings. Even if we'd ever support partial mappings, I strongly assume the interface and implementation will still differ heavily: hopefull we can avoid working on subpages/subpage mapcounts completely and only add a "count" parameter for them to enable batching. New (extended) hugetlb interface that operates on entire folio: * hugetlb_add_new_anon_rmap() -> Already existed * hugetlb_add_anon_rmap() -> Already existed * hugetlb_try_dup_anon_rmap() * hugetlb_try_share_anon_rmap() * hugetlb_add_file_rmap() * hugetlb_remove_rmap() New "ordinary" interface for small folios / THP:: * folio_add_new_anon_rmap() -> Already existed * folio_add_anon_rmap_[pte|ptes|pmd]() * folio_try_dup_anon_rmap_[pte|ptes|pmd]() * folio_try_share_anon_rmap_[pte|pmd]() * folio_add_file_rmap_[pte|ptes|pmd]() * folio_dup_file_rmap_[pte|ptes|pmd]() * folio_remove_rmap_[pte|ptes|pmd]() folio_add_new_anon_rmap() will always map at the largest granularity possible (currently, a single PMD to cover a PMD-sized THP). Could be extended if ever required. In the future, we might want "_pud" variants and eventually "_pmds" variants for batching. I ran some simple microbenchmarks on an Intel(R) Xeon(R) Silver 4210R: measuring munmap(), fork(), cow, MADV_DONTNEED on each PTE ... and PTE remapping PMD-mapped THPs on 1 GiB of memory. For small folios, there is barely a change (< 1% improvement for me). For PTE-mapped THP: * PTE-remapping a PMD-mapped THP is more than 10% faster. * fork() is more than 4% faster. * MADV_DONTNEED is 2% faster * COW when writing only a single byte on a COW-shared PTE is 1% faster * munmap() barely changes (< 1%). [1] https://lkml.kernel.org/r/20230810103332.3062143-1-ryan.roberts@arm.com [2] https://lkml.kernel.org/r/20231204105440.61448-1-ryan.roberts@arm.com This patch (of 40): Let's just call it "hugetlb_". Yes, it's all already inconsistent and confusing because we have a lot of "hugepage_" functions for legacy reasons. But "hugetlb" cannot possibly be confused with transparent huge pages, and it matches "hugetlb.c" and "folio_test_hugetlb()". So let's minimize confusion in rmap code. Link: https://lkml.kernel.org/r/20231220224504.646757-1-david@redhat.com Link: https://lkml.kernel.org/r/20231220224504.646757-2-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Muchun Song Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Peter Xu Cc: Ryan Roberts Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- include/linux/rmap.h | 4 ++-- mm/hugetlb.c | 8 ++++---- mm/migrate.c | 4 ++-- mm/rmap.c | 8 ++++---- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 807d05a85d4b..846308fbdd8f 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -201,9 +201,9 @@ void folio_add_file_rmap_range(struct folio *, struct page *, unsigned int nr, void page_remove_rmap(struct page *, struct vm_area_struct *, bool compound); -void hugepage_add_anon_rmap(struct folio *, struct vm_area_struct *, +void hugetlb_add_anon_rmap(struct folio *, struct vm_area_struct *, unsigned long address, rmap_t flags); -void hugepage_add_new_anon_rmap(struct folio *, struct vm_area_struct *, +void hugetlb_add_new_anon_rmap(struct folio *, struct vm_area_struct *, unsigned long address); static inline void __page_dup_rmap(struct page *page, bool compound) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 15919eb05e01..943a92c4e314 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5019,7 +5019,7 @@ hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long add pte_t newpte = make_huge_pte(vma, &new_folio->page, 1); __folio_mark_uptodate(new_folio); - hugepage_add_new_anon_rmap(new_folio, vma, addr); + hugetlb_add_new_anon_rmap(new_folio, vma, addr); if (userfaultfd_wp(vma) && huge_pte_uffd_wp(old)) newpte = huge_pte_mkuffd_wp(newpte); set_huge_pte_at(vma->vm_mm, addr, ptep, newpte, sz); @@ -5722,7 +5722,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, /* Break COW or unshare */ huge_ptep_clear_flush(vma, haddr, ptep); page_remove_rmap(&old_folio->page, vma, true); - hugepage_add_new_anon_rmap(new_folio, vma, haddr); + hugetlb_add_new_anon_rmap(new_folio, vma, haddr); if (huge_pte_uffd_wp(pte)) newpte = huge_pte_mkuffd_wp(newpte); set_huge_pte_at(mm, haddr, ptep, newpte, huge_page_size(h)); @@ -6010,7 +6010,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, goto backout; if (anon_rmap) - hugepage_add_new_anon_rmap(folio, vma, haddr); + hugetlb_add_new_anon_rmap(folio, vma, haddr); else page_dup_file_rmap(&folio->page, true); new_pte = make_huge_pte(vma, &folio->page, ((vma->vm_flags & VM_WRITE) @@ -6438,7 +6438,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte, if (folio_in_pagecache) page_dup_file_rmap(&folio->page, true); else - hugepage_add_new_anon_rmap(folio, dst_vma, dst_addr); + hugetlb_add_new_anon_rmap(folio, dst_vma, dst_addr); /* * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY diff --git a/mm/migrate.c b/mm/migrate.c index 6f485a175f75..32e54790ecb4 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -249,8 +249,8 @@ static bool remove_migration_pte(struct folio *folio, pte = arch_make_huge_pte(pte, shift, vma->vm_flags); if (folio_test_anon(folio)) - hugepage_add_anon_rmap(folio, vma, pvmw.address, - rmap_flags); + hugetlb_add_anon_rmap(folio, vma, pvmw.address, + rmap_flags); else page_dup_file_rmap(new, true); set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte, diff --git a/mm/rmap.c b/mm/rmap.c index 6bea1722f26d..be2a648b0587 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -2595,8 +2595,8 @@ void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) * * RMAP_COMPOUND is ignored. */ -void hugepage_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma, - unsigned long address, rmap_t flags) +void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma, + unsigned long address, rmap_t flags) { VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); @@ -2607,8 +2607,8 @@ void hugepage_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma, PageAnonExclusive(&folio->page), folio); } -void hugepage_add_new_anon_rmap(struct folio *folio, - struct vm_area_struct *vma, unsigned long address) +void hugetlb_add_new_anon_rmap(struct folio *folio, + struct vm_area_struct *vma, unsigned long address) { BUG_ON(address < vma->vm_start || address >= vma->vm_end); /* increment count (starts at -1) */ -- Gitee From d381a670c0dcd075403f4077e6aad777d0a335cf Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:26 +0100 Subject: [PATCH 1138/2138] mm/rmap: introduce and use hugetlb_remove_rmap() ANBZ: #9728 commit e135826b2da0cf25305086dc9ac1e91718a148e1 upstream hugetlb rmap handling differs quite a lot from "ordinary" rmap code. For example, hugetlb currently only supports entire mappings, and treats any mapping as mapped using a single "logical PTE". Let's move it out of the way so we can overhaul our "ordinary" rmap. implementation/interface. Let's introduce and use hugetlb_remove_rmap() and remove the hugetlb code from page_remove_rmap(). This effectively removes one check on the small-folio path as well. Add sanity checks that we end up with the right folios in the right functions. Note: all possible candidates that need care are page_remove_rmap() that pass compound=true. Link: https://lkml.kernel.org/r/20231220224504.646757-3-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Yin Fengwei Reviewed-by: Ryan Roberts Reviewed-by: Matthew Wilcox (Oracle) Reviewed-by: Muchun Song Cc: Hugh Dickins Cc: Muchun Song Cc: Peter Xu Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- include/linux/rmap.h | 7 +++++++ mm/hugetlb.c | 4 ++-- mm/rmap.c | 18 +++++++++--------- 3 files changed, 18 insertions(+), 11 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 846308fbdd8f..8d312b3e4908 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -206,6 +206,13 @@ void hugetlb_add_anon_rmap(struct folio *, struct vm_area_struct *, void hugetlb_add_new_anon_rmap(struct folio *, struct vm_area_struct *, unsigned long address); +static inline void hugetlb_remove_rmap(struct folio *folio) +{ + VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); + + atomic_dec(&folio->_entire_mapcount); +} + static inline void __page_dup_rmap(struct page *page, bool compound) { if (compound) { diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 943a92c4e314..2f020632cd6c 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5410,7 +5410,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, make_pte_marker(PTE_MARKER_UFFD_WP), sz); hugetlb_count_sub(pages_per_huge_page(h), mm); - page_remove_rmap(page, vma, true); + hugetlb_remove_rmap(page_folio(page)); spin_unlock(ptl); tlb_remove_page_size(tlb, page, huge_page_size(h)); @@ -5721,7 +5721,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, /* Break COW or unshare */ huge_ptep_clear_flush(vma, haddr, ptep); - page_remove_rmap(&old_folio->page, vma, true); + hugetlb_remove_rmap(old_folio); hugetlb_add_new_anon_rmap(new_folio, vma, haddr); if (huge_pte_uffd_wp(pte)) newpte = huge_pte_mkuffd_wp(newpte); diff --git a/mm/rmap.c b/mm/rmap.c index be2a648b0587..7780b61acaee 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1450,15 +1450,9 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma, bool last; enum node_stat_item idx; + VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); VM_BUG_ON_PAGE(compound && !PageHead(page), page); - /* Hugetlb pages are not counted in NR_*MAPPED */ - if (unlikely(folio_test_hugetlb(folio))) { - /* hugetlb pages are always mapped with pmds */ - atomic_dec(&folio->_entire_mapcount); - return; - } - /* Is page being unmapped by PTE? Is this its last map to be removed? */ if (likely(!compound)) { last = atomic_add_negative(-1, &page->_mapcount); @@ -1816,7 +1810,10 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, dec_mm_counter(mm, mm_counter_file(&folio->page)); } discard: - page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); + if (unlikely(folio_test_hugetlb(folio))) + hugetlb_remove_rmap(folio); + else + page_remove_rmap(subpage, vma, false); if (vma->vm_flags & VM_LOCKED) mlock_drain_local(); folio_put(folio); @@ -2169,7 +2166,10 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, */ } - page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); + if (unlikely(folio_test_hugetlb(folio))) + hugetlb_remove_rmap(folio); + else + page_remove_rmap(subpage, vma, false); if (vma->vm_flags & VM_LOCKED) mlock_drain_local(); folio_put(folio); -- Gitee From 2ba7f7d587d0ec4983aca9239a6d8ae560551b3c Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:27 +0100 Subject: [PATCH 1139/2138] mm/rmap: introduce and use hugetlb_add_file_rmap() ANBZ: #9728 commit 44887f39945519fa8405133b1acd098fda9c9746 upstream hugetlb rmap handling differs quite a lot from "ordinary" rmap code. For example, hugetlb currently only supports entire mappings, and treats any mapping as mapped using a single "logical PTE". Let's move it out of the way so we can overhaul our "ordinary" rmap. implementation/interface. Right now we're using page_dup_file_rmap() in some cases where "ordinary" rmap code would have used page_add_file_rmap(). So let's introduce and use hugetlb_add_file_rmap() instead. We won't be adding a "hugetlb_dup_file_rmap()" functon for the fork() case, as it would be doing the same: "dup" is just an optimization for "add". What remains is a single page_dup_file_rmap() call in fork() code. Add sanity checks that we end up with the right folios in the right functions. Link: https://lkml.kernel.org/r/20231220224504.646757-4-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Yin Fengwei Reviewed-by: Ryan Roberts Reviewed-by: Muchun Song Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Peter Xu Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- include/linux/rmap.h | 8 ++++++++ mm/hugetlb.c | 6 +++--- mm/migrate.c | 2 +- mm/rmap.c | 1 + 4 files changed, 13 insertions(+), 4 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 8d312b3e4908..668d2e047136 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -206,6 +206,14 @@ void hugetlb_add_anon_rmap(struct folio *, struct vm_area_struct *, void hugetlb_add_new_anon_rmap(struct folio *, struct vm_area_struct *, unsigned long address); +static inline void hugetlb_add_file_rmap(struct folio *folio) +{ + VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); + VM_WARN_ON_FOLIO(folio_test_anon(folio), folio); + + atomic_inc(&folio->_entire_mapcount); +} + static inline void hugetlb_remove_rmap(struct folio *folio) { VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 2f020632cd6c..12085ef3beb1 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5142,7 +5142,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, * sleep during the process. */ if (!folio_test_anon(pte_folio)) { - page_dup_file_rmap(&pte_folio->page, true); + hugetlb_add_file_rmap(pte_folio); } else if (page_try_dup_anon_rmap(&pte_folio->page, true, src_vma)) { pte_t src_pte_old = entry; @@ -6012,7 +6012,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, if (anon_rmap) hugetlb_add_new_anon_rmap(folio, vma, haddr); else - page_dup_file_rmap(&folio->page, true); + hugetlb_add_file_rmap(folio); new_pte = make_huge_pte(vma, &folio->page, ((vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_SHARED))); /* @@ -6436,7 +6436,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte, goto out_release_unlock; if (folio_in_pagecache) - page_dup_file_rmap(&folio->page, true); + hugetlb_add_file_rmap(folio); else hugetlb_add_new_anon_rmap(folio, dst_vma, dst_addr); diff --git a/mm/migrate.c b/mm/migrate.c index 32e54790ecb4..40b4619f7318 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -252,7 +252,7 @@ static bool remove_migration_pte(struct folio *folio, hugetlb_add_anon_rmap(folio, vma, pvmw.address, rmap_flags); else - page_dup_file_rmap(new, true); + hugetlb_add_file_rmap(folio); set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte, psize); } else diff --git a/mm/rmap.c b/mm/rmap.c index 7780b61acaee..1015b7ba5700 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1365,6 +1365,7 @@ void folio_add_file_rmap_range(struct folio *folio, struct page *page, unsigned int nr_pmdmapped = 0, first; int nr = 0; + VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); VM_WARN_ON_FOLIO(compound && !folio_test_pmd_mappable(folio), folio); /* Is page being mapped by PTE? Is this its first map to be added? */ -- Gitee From 809bb42a4800ca0f1a1dd8c4a8fc8581d063e149 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:28 +0100 Subject: [PATCH 1140/2138] mm/rmap: introduce and use hugetlb_try_dup_anon_rmap() ANBZ: #9728 commit ebe2e35ec0f256372c158a18de459fb60070b313 upstream hugetlb rmap handling differs quite a lot from "ordinary" rmap code. For example, hugetlb currently only supports entire mappings, and treats any mapping as mapped using a single "logical PTE". Let's move it out of the way so we can overhaul our "ordinary" rmap. implementation/interface. So let's introduce and use hugetlb_try_dup_anon_rmap() to make all hugetlb handling use dedicated hugetlb_* rmap functions. Add sanity checks that we end up with the right folios in the right functions. Note that is_device_private_page() does not apply to hugetlb. Link: https://lkml.kernel.org/r/20231220224504.646757-5-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Yin Fengwei Reviewed-by: Ryan Roberts Reviewed-by: Muchun Song Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Peter Xu Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- include/linux/mm.h | 12 +++++++++--- include/linux/rmap.h | 18 ++++++++++++++++++ mm/hugetlb.c | 3 +-- 3 files changed, 28 insertions(+), 5 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 45d605c57bad..2a33ec96251f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1950,15 +1950,21 @@ static inline bool page_maybe_dma_pinned(struct page *page) * * The caller has to hold the PT lock and the vma->vm_mm->->write_protect_seq. */ -static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma, - struct page *page) +static inline bool folio_needs_cow_for_dma(struct vm_area_struct *vma, + struct folio *folio) { VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1)); if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)) return false; - return page_maybe_dma_pinned(page); + return folio_maybe_dma_pinned(folio); +} + +static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma, + struct page *page) +{ + return folio_needs_cow_for_dma(vma, page_folio(page)); } /** diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 668d2e047136..b5bebb78342f 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -206,6 +206,22 @@ void hugetlb_add_anon_rmap(struct folio *, struct vm_area_struct *, void hugetlb_add_new_anon_rmap(struct folio *, struct vm_area_struct *, unsigned long address); +/* See page_try_dup_anon_rmap() */ +static inline int hugetlb_try_dup_anon_rmap(struct folio *folio, + struct vm_area_struct *vma) +{ + VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); + VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); + + if (PageAnonExclusive(&folio->page)) { + if (unlikely(folio_needs_cow_for_dma(vma, folio))) + return -EBUSY; + ClearPageAnonExclusive(&folio->page); + } + atomic_inc(&folio->_entire_mapcount); + return 0; +} + static inline void hugetlb_add_file_rmap(struct folio *folio) { VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); @@ -223,6 +239,8 @@ static inline void hugetlb_remove_rmap(struct folio *folio) static inline void __page_dup_rmap(struct page *page, bool compound) { + VM_WARN_ON(folio_test_hugetlb(page_folio(page))); + if (compound) { struct folio *folio = (struct folio *)page; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 12085ef3beb1..c5a1db8db593 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5143,8 +5143,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, */ if (!folio_test_anon(pte_folio)) { hugetlb_add_file_rmap(pte_folio); - } else if (page_try_dup_anon_rmap(&pte_folio->page, - true, src_vma)) { + } else if (hugetlb_try_dup_anon_rmap(pte_folio, src_vma)) { pte_t src_pte_old = entry; struct folio *new_folio; -- Gitee From 7761792e88355c80949f306fe18cb4ffa8c349d2 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:29 +0100 Subject: [PATCH 1141/2138] mm/rmap: introduce and use hugetlb_try_share_anon_rmap() ANBZ: #9728 commit 0c2ec32bf0b2f0d7ccb98c53ee5d255d68e73595 upstream hugetlb rmap handling differs quite a lot from "ordinary" rmap code. For example, hugetlb currently only supports entire mappings, and treats any mapping as mapped using a single "logical PTE". Let's move it out of the way so we can overhaul our "ordinary" rmap. implementation/interface. So let's introduce and use hugetlb_try_dup_anon_rmap() to make all hugetlb handling use dedicated hugetlb_* rmap functions. Add sanity checks that we end up with the right folios in the right functions. Note that try_to_unmap_one() does not need care. Easy to spot because among all that nasty hugetlb special-casing in that function, we're not using set_huge_pte_at() on the anon path -- well, and that code assumes that we would want to swapout. Link: https://lkml.kernel.org/r/20231220224504.646757-6-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Yin Fengwei Reviewed-by: Ryan Roberts Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- include/linux/rmap.h | 25 +++++++++++++++++++++++++ mm/rmap.c | 15 ++++++++++----- 2 files changed, 35 insertions(+), 5 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index b5bebb78342f..2e9b58079fe0 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -222,6 +222,30 @@ static inline int hugetlb_try_dup_anon_rmap(struct folio *folio, return 0; } +/* See page_try_share_anon_rmap() */ +static inline int hugetlb_try_share_anon_rmap(struct folio *folio) +{ + VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); + VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); + VM_WARN_ON_FOLIO(!PageAnonExclusive(&folio->page), folio); + + /* Paired with the memory barrier in try_grab_folio(). */ + if (IS_ENABLED(CONFIG_HAVE_FAST_GUP)) + smp_mb(); + + if (unlikely(folio_maybe_dma_pinned(folio))) + return -EBUSY; + ClearPageAnonExclusive(&folio->page); + + /* + * This is conceptually a smp_wmb() paired with the smp_rmb() in + * gup_must_unshare(). + */ + if (IS_ENABLED(CONFIG_HAVE_FAST_GUP)) + smp_mb__after_atomic(); + return 0; +} + static inline void hugetlb_add_file_rmap(struct folio *folio) { VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); @@ -326,6 +350,7 @@ static inline int page_try_dup_anon_rmap(struct page *page, bool compound, */ static inline int page_try_share_anon_rmap(struct page *page) { + VM_WARN_ON(folio_test_hugetlb(page_folio(page))); VM_BUG_ON_PAGE(!PageAnon(page) || !PageAnonExclusive(page), page); /* device private pages cannot get pinned via GUP. */ diff --git a/mm/rmap.c b/mm/rmap.c index 1015b7ba5700..c0f7c2c0da34 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -2119,13 +2119,18 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, !anon_exclusive, subpage); /* See page_try_share_anon_rmap(): clear PTE first. */ - if (anon_exclusive && - page_try_share_anon_rmap(subpage)) { - if (folio_test_hugetlb(folio)) + if (folio_test_hugetlb(folio)) { + if (anon_exclusive && + hugetlb_try_share_anon_rmap(folio)) { set_huge_pte_at(mm, address, pvmw.pte, pteval, hsz); - else - set_pte_at(mm, address, pvmw.pte, pteval); + ret = false; + page_vma_mapped_walk_done(&pvmw); + break; + } + } else if (anon_exclusive && + page_try_share_anon_rmap(subpage)) { + set_pte_at(mm, address, pvmw.pte, pteval); ret = false; page_vma_mapped_walk_done(&pvmw); break; -- Gitee From 32b251a04389fd9fb53efb71e79d218aeab63f84 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:30 +0100 Subject: [PATCH 1142/2138] mm/rmap: add hugetlb sanity checks for anon rmap handling ANBZ: #9728 commit a4ea18641d8330a97d7d66f0ab017b690099ffce upstream Let's make sure we end up with the right folios in the right functions when adding an anon rmap, just like we already do in the other rmap functions. Link: https://lkml.kernel.org/r/20231220224504.646757-7-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Ryan Roberts Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- mm/rmap.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/mm/rmap.c b/mm/rmap.c index c0f7c2c0da34..94d717d5dae2 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1232,6 +1232,8 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, bool compound = flags & RMAP_COMPOUND; bool first; + VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); + /* Is page being mapped by PTE? Is this its first map to be added? */ if (likely(!compound)) { first = atomic_inc_and_test(&page->_mapcount); @@ -1313,6 +1315,7 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, { int nr = folio_nr_pages(folio); + VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); VM_BUG_ON_VMA(address < vma->vm_start || address + (nr << PAGE_SHIFT) > vma->vm_end, vma); __folio_set_swapbacked(folio); @@ -2604,6 +2607,7 @@ void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma, unsigned long address, rmap_t flags) { + VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); atomic_inc(&folio->_entire_mapcount); @@ -2616,6 +2620,8 @@ void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma, void hugetlb_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, unsigned long address) { + VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); + BUG_ON(address < vma->vm_start || address >= vma->vm_end); /* increment count (starts at -1) */ atomic_set(&folio->_entire_mapcount, 0); -- Gitee From df91c74b650f560742c2aa0eb356d2549e446086 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:31 +0100 Subject: [PATCH 1143/2138] mm/rmap: convert folio_add_file_rmap_range() into folio_add_file_rmap_[pte|ptes|pmd]() ANBZ: #9728 commit 68f0320824fa59c5429cbc811e6c46e7a30ea32c upstream Let's get rid of the compound parameter and instead define explicitly which mappings we're adding. That is more future proof, easier to read and harder to mess up. Use an enum to express the granularity internally. Make the compiler always special-case on the granularity by using __always_inline. Replace the "compound" check by a switch-case that will be removed by the compiler completely. Add plenty of sanity checks with CONFIG_DEBUG_VM. Replace the folio_test_pmd_mappable() check by a config check in the caller and sanity checks. Convert the single user of folio_add_file_rmap_range(). While at it, consistently use "int" instead of "unisgned int" in rmap code when dealing with mapcounts and the number of pages. This function design can later easily be extended to PUDs and to batch PMDs. Note that for now we don't support anything bigger than PMD-sized folios (as we cleanly separated hugetlb handling). Sanity checks will catch if that ever changes. Next up is removing page_remove_rmap() along with its "compound" parameter and smilarly converting all other rmap functions. Link: https://lkml.kernel.org/r/20231220224504.646757-8-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Yin Fengwei Reviewed-by: Ryan Roberts Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- include/linux/rmap.h | 46 ++++++++++++++++++++++++-- mm/memory.c | 2 +- mm/rmap.c | 79 ++++++++++++++++++++++++++++---------------- 3 files changed, 95 insertions(+), 32 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 2e9b58079fe0..c99f1523f8df 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -186,6 +186,44 @@ typedef int __bitwise rmap_t; */ #define RMAP_COMPOUND ((__force rmap_t)BIT(1)) +/* + * Internally, we're using an enum to specify the granularity. We make the + * compiler emit specialized code for each granularity. + */ +enum rmap_level { + RMAP_LEVEL_PTE = 0, + RMAP_LEVEL_PMD, +}; + +static inline void __folio_rmap_sanity_checks(struct folio *folio, + struct page *page, int nr_pages, enum rmap_level level) +{ + /* hugetlb folios are handled separately. */ + VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); + VM_WARN_ON_FOLIO(folio_test_large(folio) && + !folio_test_large_rmappable(folio), folio); + + VM_WARN_ON_ONCE(nr_pages <= 0); + VM_WARN_ON_FOLIO(page_folio(page) != folio, folio); + VM_WARN_ON_FOLIO(page_folio(page + nr_pages - 1) != folio, folio); + + switch (level) { + case RMAP_LEVEL_PTE: + break; + case RMAP_LEVEL_PMD: + /* + * We don't support folios larger than a single PMD yet. So + * when RMAP_LEVEL_PMD is set, we assume that we are creating + * a single "entire" mapping of the folio. + */ + VM_WARN_ON_FOLIO(folio_nr_pages(folio) != HPAGE_PMD_NR, folio); + VM_WARN_ON_FOLIO(nr_pages != HPAGE_PMD_NR, folio); + break; + default: + VM_WARN_ON_ONCE(true); + } +} + /* * rmap interfaces called when adding or removing pte of page */ @@ -196,8 +234,12 @@ void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *, unsigned long address); void page_add_file_rmap(struct page *, struct vm_area_struct *, bool compound); -void folio_add_file_rmap_range(struct folio *, struct page *, unsigned int nr, - struct vm_area_struct *, bool compound); +void folio_add_file_rmap_ptes(struct folio *, struct page *, int nr_pages, + struct vm_area_struct *); +#define folio_add_file_rmap_pte(folio, page, vma) \ + folio_add_file_rmap_ptes(folio, page, 1, vma) +void folio_add_file_rmap_pmd(struct folio *, struct page *, + struct vm_area_struct *); void page_remove_rmap(struct page *, struct vm_area_struct *, bool compound); diff --git a/mm/memory.c b/mm/memory.c index 299467ca2f59..f862095363bf 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4670,7 +4670,7 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio, folio_add_lru_vma(folio, vma); } else { add_mm_counter(vma->vm_mm, mm_counter_file(page), nr); - folio_add_file_rmap_range(folio, page, nr, vma, false); + folio_add_file_rmap_ptes(folio, page, nr, vma); } set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr); diff --git a/mm/rmap.c b/mm/rmap.c index 94d717d5dae2..bd1c0a68b852 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1348,31 +1348,18 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); } -/** - * folio_add_file_rmap_range - add pte mapping to page range of a folio - * @folio: The folio to add the mapping to - * @page: The first page to add - * @nr_pages: The number of pages which will be mapped - * @vma: the vm area in which the mapping is added - * @compound: charge the page as compound or small page - * - * The page range of folio is defined by [first_page, first_page + nr_pages) - * - * The caller needs to hold the pte lock. - */ -void folio_add_file_rmap_range(struct folio *folio, struct page *page, - unsigned int nr_pages, struct vm_area_struct *vma, - bool compound) +static __always_inline void __folio_add_file_rmap(struct folio *folio, + struct page *page, int nr_pages, struct vm_area_struct *vma, + enum rmap_level level) { atomic_t *mapped = &folio->_nr_pages_mapped; - unsigned int nr_pmdmapped = 0, first; - int nr = 0; + int nr = 0, nr_pmdmapped = 0, first; - VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); - VM_WARN_ON_FOLIO(compound && !folio_test_pmd_mappable(folio), folio); + VM_WARN_ON_FOLIO(folio_test_anon(folio), folio); + __folio_rmap_sanity_checks(folio, page, nr_pages, level); - /* Is page being mapped by PTE? Is this its first map to be added? */ - if (likely(!compound)) { + switch (level) { + case RMAP_LEVEL_PTE: do { first = atomic_inc_and_test(&page->_mapcount); if (first && folio_test_large(folio)) { @@ -1383,9 +1370,8 @@ void folio_add_file_rmap_range(struct folio *folio, struct page *page, if (first) nr++; } while (page++, --nr_pages > 0); - } else if (folio_test_pmd_mappable(folio)) { - /* That test is redundant: it's for safety or to optimize out */ - + break; + case RMAP_LEVEL_PMD: first = atomic_inc_and_test(&folio->_entire_mapcount); if (first) { nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped); @@ -1400,6 +1386,7 @@ void folio_add_file_rmap_range(struct folio *folio, struct page *page, nr = 0; } } + break; } if (nr_pmdmapped) @@ -1413,6 +1400,43 @@ void folio_add_file_rmap_range(struct folio *folio, struct page *page, mlock_vma_folio(folio, vma); } +/** + * folio_add_file_rmap_ptes - add PTE mappings to a page range of a folio + * @folio: The folio to add the mappings to + * @page: The first page to add + * @nr_pages: The number of pages that will be mapped using PTEs + * @vma: The vm area in which the mappings are added + * + * The page range of the folio is defined by [page, page + nr_pages) + * + * The caller needs to hold the page table lock. + */ +void folio_add_file_rmap_ptes(struct folio *folio, struct page *page, + int nr_pages, struct vm_area_struct *vma) +{ + __folio_add_file_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE); +} + +/** + * folio_add_file_rmap_pmd - add a PMD mapping to a page range of a folio + * @folio: The folio to add the mapping to + * @page: The first page to add + * @vma: The vm area in which the mapping is added + * + * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) + * + * The caller needs to hold the page table lock. + */ +void folio_add_file_rmap_pmd(struct folio *folio, struct page *page, + struct vm_area_struct *vma) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + __folio_add_file_rmap(folio, page, HPAGE_PMD_NR, vma, RMAP_LEVEL_PMD); +#else + WARN_ON_ONCE(true); +#endif +} + /** * page_add_file_rmap - add pte mapping to a file page * @page: the page to add the mapping to @@ -1425,16 +1449,13 @@ void page_add_file_rmap(struct page *page, struct vm_area_struct *vma, bool compound) { struct folio *folio = page_folio(page); - unsigned int nr_pages; VM_WARN_ON_ONCE_PAGE(compound && !PageTransHuge(page), page); if (likely(!compound)) - nr_pages = 1; + folio_add_file_rmap_pte(folio, page, vma); else - nr_pages = folio_nr_pages(folio); - - folio_add_file_rmap_range(folio, page, nr_pages, vma, compound); + folio_add_file_rmap_pmd(folio, page, vma); } /** -- Gitee From e87e2cd8caf8d16a98d558d76a9a2034dd6cded9 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:32 +0100 Subject: [PATCH 1144/2138] mm/memory: page_add_file_rmap() -> folio_add_file_rmap_[pte|pmd]() ANBZ: #9728 commit ef37b2ea08ace7b5fbcd569d703be1903afd12f9 upstream Let's convert insert_page_into_pte_locked() and do_set_pmd(). While at it, perform some folio conversion. Link: https://lkml.kernel.org/r/20231220224504.646757-9-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Yin Fengwei Reviewed-by: Ryan Roberts Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- mm/memory.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index f862095363bf..e9edceb08f69 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1845,12 +1845,14 @@ static int validate_page_before_insert(struct page *page) static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte, unsigned long addr, struct page *page, pgprot_t prot) { + struct folio *folio = page_folio(page); + if (!pte_none(ptep_get(pte))) return -EBUSY; /* Ok, finally just insert the thing.. */ - get_page(page); + folio_get(folio); inc_mm_counter(vma->vm_mm, mm_counter_file(page)); - page_add_file_rmap(page, vma, false); + folio_add_file_rmap_pte(folio, page, vma); set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot)); return 0; } @@ -4555,6 +4557,7 @@ static void deposit_prealloc_pte(struct vm_fault *vmf) vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) { + struct folio *folio = page_folio(page); struct vm_area_struct *vma = vmf->vma; bool write = vmf->flags & FAULT_FLAG_WRITE; unsigned long haddr = vmf->address & HPAGE_PMD_MASK; @@ -4573,8 +4576,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER)) return ret; - page = compound_head(page); - if (compound_order(page) != HPAGE_PMD_ORDER) + if (page != &folio->page || folio_order(folio) != HPAGE_PMD_ORDER) return ret; /* @@ -4583,7 +4585,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) * check. This kind of THP just can be PTE mapped. Access to * the corrupted subpage should trigger SIGBUS as expected. */ - if (unlikely(PageHasHWPoisoned(page))) + if (unlikely(folio_test_has_hwpoisoned(folio))) return ret; /* @@ -4607,7 +4609,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR); - page_add_file_rmap(page, vma, true); + folio_add_file_rmap_pmd(folio, page, vma); /* * deposit and withdraw with pmd lock held -- Gitee From c359c7e817aaf2fb7f0af5a4051e9daf1ba4923e Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:33 +0100 Subject: [PATCH 1145/2138] mm/huge_memory: page_add_file_rmap() -> folio_add_file_rmap_pmd() ANBZ: #9728 commit 14d85a6e88a658e29d9c8d6c521e7f824f2f2c6c upstream Let's convert remove_migration_pmd() and while at it, perform some folio conversion. Link: https://lkml.kernel.org/r/20231220224504.646757-10-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Yin Fengwei Reviewed-by: Ryan Roberts Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- mm/huge_memory.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index af561f91bb7d..1ee02f39dfd5 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3443,6 +3443,7 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) { + struct folio *folio = page_folio(new); struct vm_area_struct *vma = pvmw->vma; struct mm_struct *mm = vma->vm_mm; unsigned long address = pvmw->address; @@ -3454,7 +3455,7 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) return; entry = pmd_to_swp_entry(*pvmw->pmd); - get_page(new); + folio_get(folio); pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot)); if (pmd_swp_soft_dirty(*pvmw->pmd)) pmde = pmd_mksoft_dirty(pmde); @@ -3465,10 +3466,10 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) if (!is_migration_entry_young(entry)) pmde = pmd_mkold(pmde); /* NOTE: this may contain setting soft-dirty on some archs */ - if (PageDirty(new) && is_migration_entry_dirty(entry)) + if (folio_test_dirty(folio) && is_migration_entry_dirty(entry)) pmde = pmd_mkdirty(pmde); - if (PageAnon(new)) { + if (folio_test_anon(folio)) { rmap_t rmap_flags = RMAP_COMPOUND; if (!is_readable_migration_entry(entry)) @@ -3476,9 +3477,9 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) page_add_anon_rmap(new, vma, haddr, rmap_flags); } else { - page_add_file_rmap(new, vma, true); + folio_add_file_rmap_pmd(folio, new, vma); } - VM_BUG_ON(pmd_write(pmde) && PageAnon(new) && !PageAnonExclusive(new)); + VM_BUG_ON(pmd_write(pmde) && folio_test_anon(folio) && !PageAnonExclusive(new)); set_pmd_at(mm, haddr, pvmw->pmd, pmde); /* No need to invalidate - it was non-present before */ -- Gitee From abcc7eff4dc84f532d3a9f4a44fd97ef4784d1cd Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:34 +0100 Subject: [PATCH 1146/2138] mm/migrate: page_add_file_rmap() -> folio_add_file_rmap_pte() ANBZ: #9728 commit c4dffb0bc237d5e3b51adf947062e65ed34ac3c3 upstream Let's convert remove_migration_pte(). Link: https://lkml.kernel.org/r/20231220224504.646757-11-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Yin Fengwei Reviewed-by: Ryan Roberts Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- mm/migrate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/migrate.c b/mm/migrate.c index 40b4619f7318..db221fbfd29e 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -262,7 +262,7 @@ static bool remove_migration_pte(struct folio *folio, page_add_anon_rmap(new, vma, pvmw.address, rmap_flags); else - page_add_file_rmap(new, vma, false); + folio_add_file_rmap_pte(folio, new, vma); set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); } if (vma->vm_flags & VM_LOCKED) -- Gitee From 8bd08595f7bad5e7cf3061da89ce83772e868825 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:35 +0100 Subject: [PATCH 1147/2138] mm/userfaultfd: page_add_file_rmap() -> folio_add_file_rmap_pte() ANBZ: #9728 commit 7123e19c3c9d1539c899ac8d919498e3393bb288 upstream Let's convert mfill_atomic_install_pte(). Link: https://lkml.kernel.org/r/20231220224504.646757-12-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Yin Fengwei Reviewed-by: Ryan Roberts Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- mm/userfaultfd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index ffef13f97edd..2031e1d5b2d7 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -114,7 +114,7 @@ int mfill_atomic_install_pte(pmd_t *dst_pmd, /* Usually, cache pages are already added to LRU */ if (newly_allocated) folio_add_lru(folio); - page_add_file_rmap(page, dst_vma, false); + folio_add_file_rmap_pte(folio, page, dst_vma); } else { folio_add_new_anon_rmap(folio, dst_vma, dst_addr); folio_add_lru_vma(folio, dst_vma); -- Gitee From d149941c099943eef3b3661160140c3d0abf3e81 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:36 +0100 Subject: [PATCH 1148/2138] mm/rmap: remove page_add_file_rmap() ANBZ: #9728 commit be6e57cfabe99a5d3b3869103c4ea0ed4a9692d4 upstream All users are gone, let's remove it. Link: https://lkml.kernel.org/r/20231220224504.646757-13-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Yin Fengwei Reviewed-by: Ryan Roberts Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- include/linux/rmap.h | 2 -- mm/rmap.c | 21 --------------------- 2 files changed, 23 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index c99f1523f8df..56309cf26901 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -232,8 +232,6 @@ void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long address, rmap_t flags); void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *, unsigned long address); -void page_add_file_rmap(struct page *, struct vm_area_struct *, - bool compound); void folio_add_file_rmap_ptes(struct folio *, struct page *, int nr_pages, struct vm_area_struct *); #define folio_add_file_rmap_pte(folio, page, vma) \ diff --git a/mm/rmap.c b/mm/rmap.c index bd1c0a68b852..cfb8a311d00d 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1437,27 +1437,6 @@ void folio_add_file_rmap_pmd(struct folio *folio, struct page *page, #endif } -/** - * page_add_file_rmap - add pte mapping to a file page - * @page: the page to add the mapping to - * @vma: the vm area in which the mapping is added - * @compound: charge the page as compound or small page - * - * The caller needs to hold the pte lock. - */ -void page_add_file_rmap(struct page *page, struct vm_area_struct *vma, - bool compound) -{ - struct folio *folio = page_folio(page); - - VM_WARN_ON_ONCE_PAGE(compound && !PageTransHuge(page), page); - - if (likely(!compound)) - folio_add_file_rmap_pte(folio, page, vma); - else - folio_add_file_rmap_pmd(folio, page, vma); -} - /** * page_remove_rmap - take down pte mapping from a page * @page: page to remove mapping from -- Gitee From ab4f87b12f7500a21c0b2867ef81eb244f0e7cde Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:37 +0100 Subject: [PATCH 1149/2138] mm/rmap: factor out adding folio mappings into __folio_add_rmap() ANBZ: #9728 commit 96fd74958c558d6976bbc303dda0efa389182fab upstream Let's factor it out to prepare for reuse as we convert page_add_anon_rmap() to folio_add_anon_rmap_[pte|ptes|pmd](). Make the compiler always special-case on the granularity by using __always_inline. Link: https://lkml.kernel.org/r/20231220224504.646757-14-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Yin Fengwei Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Cc: Ryan Roberts Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- mm/rmap.c | 78 +++++++++++++++++++++++++++++++------------------------ 1 file changed, 44 insertions(+), 34 deletions(-) diff --git a/mm/rmap.c b/mm/rmap.c index cfb8a311d00d..5ab18a2ec0b0 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1127,6 +1127,48 @@ int folio_total_mapcount(struct folio *folio) return mapcount; } +static __always_inline unsigned int __folio_add_rmap(struct folio *folio, + struct page *page, int nr_pages, enum rmap_level level, + int *nr_pmdmapped) +{ + atomic_t *mapped = &folio->_nr_pages_mapped; + int first, nr = 0; + + __folio_rmap_sanity_checks(folio, page, nr_pages, level); + + switch (level) { + case RMAP_LEVEL_PTE: + do { + first = atomic_inc_and_test(&page->_mapcount); + if (first && folio_test_large(folio)) { + first = atomic_inc_return_relaxed(mapped); + first = (first < COMPOUND_MAPPED); + } + + if (first) + nr++; + } while (page++, --nr_pages > 0); + break; + case RMAP_LEVEL_PMD: + first = atomic_inc_and_test(&folio->_entire_mapcount); + if (first) { + nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped); + if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) { + *nr_pmdmapped = folio_nr_pages(folio); + nr = *nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); + /* Raced ahead of a remove and another add? */ + if (unlikely(nr < 0)) + nr = 0; + } else { + /* Raced ahead of a remove of COMPOUND_MAPPED */ + nr = 0; + } + } + break; + } + return nr; +} + /** * folio_move_anon_rmap - move a folio to our anon_vma * @folio: The folio to move to our anon_vma @@ -1352,43 +1394,11 @@ static __always_inline void __folio_add_file_rmap(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *vma, enum rmap_level level) { - atomic_t *mapped = &folio->_nr_pages_mapped; - int nr = 0, nr_pmdmapped = 0, first; + int nr, nr_pmdmapped = 0; VM_WARN_ON_FOLIO(folio_test_anon(folio), folio); - __folio_rmap_sanity_checks(folio, page, nr_pages, level); - - switch (level) { - case RMAP_LEVEL_PTE: - do { - first = atomic_inc_and_test(&page->_mapcount); - if (first && folio_test_large(folio)) { - first = atomic_inc_return_relaxed(mapped); - first = (first < COMPOUND_MAPPED); - } - - if (first) - nr++; - } while (page++, --nr_pages > 0); - break; - case RMAP_LEVEL_PMD: - first = atomic_inc_and_test(&folio->_entire_mapcount); - if (first) { - nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped); - if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) { - nr_pmdmapped = folio_nr_pages(folio); - nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); - /* Raced ahead of a remove and another add? */ - if (unlikely(nr < 0)) - nr = 0; - } else { - /* Raced ahead of a remove of COMPOUND_MAPPED */ - nr = 0; - } - } - break; - } + nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped); if (nr_pmdmapped) __lruvec_stat_mod_folio(folio, folio_test_swapbacked(folio) ? NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped); -- Gitee From 4b3db1ff16303435e91ff1652ca5ae96b7ad541e Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:38 +0100 Subject: [PATCH 1150/2138] mm/rmap: introduce folio_add_anon_rmap_[pte|ptes|pmd]() ANBZ: #9728 commit 8bd5130070fbf2247a97c5361427a810522ac98a upstream Let's mimic what we did with folio_add_file_rmap_*() so we can similarly replace page_add_anon_rmap() next. Make the compiler always special-case on the granularity by using __always_inline. For the PageAnonExclusive sanity checks, when adding a PMD mapping, we're now also checking each individual subpage covered by that PMD, instead of only the head page. Note that the new functions ignore the RMAP_COMPOUND flag, which we will remove as soon as page_add_anon_rmap() is gone. Link: https://lkml.kernel.org/r/20231220224504.646757-15-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Yin Fengwei Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Cc: Ryan Roberts Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- include/linux/rmap.h | 6 +++ mm/rmap.c | 120 +++++++++++++++++++++++++++++-------------- 2 files changed, 88 insertions(+), 38 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 56309cf26901..877f10c635ca 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -228,6 +228,12 @@ static inline void __folio_rmap_sanity_checks(struct folio *folio, * rmap interfaces called when adding or removing pte of page */ void folio_move_anon_rmap(struct folio *, struct vm_area_struct *); +void folio_add_anon_rmap_ptes(struct folio *, struct page *, int nr_pages, + struct vm_area_struct *, unsigned long address, rmap_t flags); +#define folio_add_anon_rmap_pte(folio, page, vma, address, flags) \ + folio_add_anon_rmap_ptes(folio, page, 1, vma, address, flags) +void folio_add_anon_rmap_pmd(struct folio *, struct page *, + struct vm_area_struct *, unsigned long address, rmap_t flags); void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long address, rmap_t flags); void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *, diff --git a/mm/rmap.c b/mm/rmap.c index 5ab18a2ec0b0..5b9fc3e8dc6d 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1269,40 +1269,20 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, rmap_t flags) { struct folio *folio = page_folio(page); - atomic_t *mapped = &folio->_nr_pages_mapped; - int nr = 0, nr_pmdmapped = 0; - bool compound = flags & RMAP_COMPOUND; - bool first; - - VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); - /* Is page being mapped by PTE? Is this its first map to be added? */ - if (likely(!compound)) { - first = atomic_inc_and_test(&page->_mapcount); - nr = first; - if (first && folio_test_large(folio)) { - nr = atomic_inc_return_relaxed(mapped); - nr = (nr < COMPOUND_MAPPED); - } - } else if (folio_test_pmd_mappable(folio)) { - /* That test is redundant: it's for safety or to optimize out */ + if (likely(!(flags & RMAP_COMPOUND))) + folio_add_anon_rmap_pte(folio, page, vma, address, flags); + else + folio_add_anon_rmap_pmd(folio, page, vma, address, flags); +} - first = atomic_inc_and_test(&folio->_entire_mapcount); - if (first) { - nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped); - if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) { - nr_pmdmapped = folio_nr_pages(folio); - nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); - /* Raced ahead of a remove and another add? */ - if (unlikely(nr < 0)) - nr = 0; - } else { - /* Raced ahead of a remove of COMPOUND_MAPPED */ - nr = 0; - } - } - } +static __always_inline void __folio_add_anon_rmap(struct folio *folio, + struct page *page, int nr_pages, struct vm_area_struct *vma, + unsigned long address, rmap_t flags, enum rmap_level level) +{ + int i, nr, nr_pmdmapped = 0; + nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped); if (nr_pmdmapped) __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr_pmdmapped); if (nr) @@ -1316,18 +1296,34 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, * folio->index right when not given the address of the head * page. */ - VM_WARN_ON_FOLIO(folio_test_large(folio) && !compound, folio); + VM_WARN_ON_FOLIO(folio_test_large(folio) && + level != RMAP_LEVEL_PMD, folio); __folio_set_anon(folio, vma, address, !!(flags & RMAP_EXCLUSIVE)); } else if (likely(!folio_test_ksm(folio))) { __page_check_anon_rmap(folio, page, vma, address); } - if (flags & RMAP_EXCLUSIVE) - SetPageAnonExclusive(page); - /* While PTE-mapping a THP we have a PMD and a PTE mapping. */ - VM_WARN_ON_FOLIO((atomic_read(&page->_mapcount) > 0 || - (folio_test_large(folio) && folio_entire_mapcount(folio) > 1)) && - PageAnonExclusive(page), folio); + + if (flags & RMAP_EXCLUSIVE) { + switch (level) { + case RMAP_LEVEL_PTE: + for (i = 0; i < nr_pages; i++) + SetPageAnonExclusive(page + i); + break; + case RMAP_LEVEL_PMD: + SetPageAnonExclusive(page); + break; + } + } + for (i = 0; i < nr_pages; i++) { + struct page *cur_page = page + i; + + /* While PTE-mapping a THP we have a PMD and a PTE mapping. */ + VM_WARN_ON_FOLIO((atomic_read(&cur_page->_mapcount) > 0 || + (folio_test_large(folio) && + folio_entire_mapcount(folio) > 1)) && + PageAnonExclusive(cur_page), folio); + } /* * For large folio, only mlock it if it's fully mapped to VMA. It's @@ -1339,6 +1335,54 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, mlock_vma_folio(folio, vma); } +/** + * folio_add_anon_rmap_ptes - add PTE mappings to a page range of an anon folio + * @folio: The folio to add the mappings to + * @page: The first page to add + * @nr_pages: The number of pages which will be mapped + * @vma: The vm area in which the mappings are added + * @address: The user virtual address of the first page to map + * @flags: The rmap flags + * + * The page range of folio is defined by [first_page, first_page + nr_pages) + * + * The caller needs to hold the page table lock, and the page must be locked in + * the anon_vma case: to serialize mapping,index checking after setting, + * and to ensure that an anon folio is not being upgraded racily to a KSM folio + * (but KSM folios are never downgraded). + */ +void folio_add_anon_rmap_ptes(struct folio *folio, struct page *page, + int nr_pages, struct vm_area_struct *vma, unsigned long address, + rmap_t flags) +{ + __folio_add_anon_rmap(folio, page, nr_pages, vma, address, flags, + RMAP_LEVEL_PTE); +} + +/** + * folio_add_anon_rmap_pmd - add a PMD mapping to a page range of an anon folio + * @folio: The folio to add the mapping to + * @page: The first page to add + * @vma: The vm area in which the mapping is added + * @address: The user virtual address of the first page to map + * @flags: The rmap flags + * + * The page range of folio is defined by [first_page, first_page + HPAGE_PMD_NR) + * + * The caller needs to hold the page table lock, and the page must be locked in + * the anon_vma case: to serialize mapping,index checking after setting. + */ +void folio_add_anon_rmap_pmd(struct folio *folio, struct page *page, + struct vm_area_struct *vma, unsigned long address, rmap_t flags) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + __folio_add_anon_rmap(folio, page, HPAGE_PMD_NR, vma, address, flags, + RMAP_LEVEL_PMD); +#else + WARN_ON_ONCE(true); +#endif +} + /** * folio_add_new_anon_rmap - Add mapping to a new anonymous folio. * @folio: The folio to add the mapping to. -- Gitee From 1bdb9d9661530805911f48f24d66b12d88b1f793 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:39 +0100 Subject: [PATCH 1151/2138] mm/huge_memory: batch rmap operations in __split_huge_pmd_locked() ANBZ: #9728 commit 91b2978a348073db0e47b380fa66c865eb25f3d8 upstream Let's use folio_add_anon_rmap_ptes(), batching the rmap operations. While at it, use more folio operations (but only in the code branch we're touching), use VM_WARN_ON_FOLIO(), and pass RMAP_EXCLUSIVE instead of manually setting PageAnonExclusive. We should never see non-anon pages on that branch: otherwise, the existing page_add_anon_rmap() call would have been flawed already. Link: https://lkml.kernel.org/r/20231220224504.646757-16-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Yin Fengwei Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Cc: Ryan Roberts Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- mm/huge_memory.c | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 1ee02f39dfd5..52c0b61300b6 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2240,6 +2240,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, unsigned long haddr, bool freeze) { struct mm_struct *mm = vma->vm_mm; + struct folio *folio; struct page *page; pgtable_t pgtable; pmd_t old_pmd, _pmd; @@ -2338,16 +2339,18 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, */ old_pmd = pmdp_invalidate(vma, haddr, pmd); page = pmd_page(old_pmd); + folio = page_folio(page); if (pmd_dirty(old_pmd)) { dirty = true; - SetPageDirty(page); + folio_set_dirty(folio); } write = pmd_write(old_pmd); young = pmd_young(old_pmd); soft_dirty = pmd_soft_dirty(old_pmd); uffd_wp = pmd_uffd_wp(old_pmd); - VM_BUG_ON_PAGE(!page_count(page), page); + VM_WARN_ON_FOLIO(!folio_ref_count(folio), folio); + VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); /* * Without "freeze", we'll simply split the PMD, propagating the @@ -2364,11 +2367,18 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, * * See page_try_share_anon_rmap(): invalidate PMD first. */ - anon_exclusive = PageAnon(page) && PageAnonExclusive(page); + anon_exclusive = PageAnonExclusive(page); if (freeze && anon_exclusive && page_try_share_anon_rmap(page)) freeze = false; - if (!freeze) - page_ref_add(page, HPAGE_PMD_NR - 1); + if (!freeze) { + rmap_t rmap_flags = RMAP_NONE; + + folio_ref_add(folio, HPAGE_PMD_NR - 1); + if (anon_exclusive) + rmap_flags |= RMAP_EXCLUSIVE; + folio_add_anon_rmap_ptes(folio, page, HPAGE_PMD_NR, + vma, haddr, rmap_flags); + } } /* @@ -2411,8 +2421,6 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot)); if (write) entry = pte_mkwrite(entry, vma); - if (anon_exclusive) - SetPageAnonExclusive(page + i); if (!young) entry = pte_mkold(entry); /* NOTE: this may set soft-dirty too on some archs */ @@ -2422,7 +2430,6 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, entry = pte_mksoft_dirty(entry); if (uffd_wp) entry = pte_mkuffd_wp(entry); - page_add_anon_rmap(page + i, vma, addr, RMAP_NONE); } VM_BUG_ON(!pte_none(ptep_get(pte))); set_pte_at(mm, addr, pte, entry); -- Gitee From 6b399e4cc6dde09d474ccc11ae75c9acd5de2589 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:40 +0100 Subject: [PATCH 1152/2138] mm/huge_memory: page_add_anon_rmap() -> folio_add_anon_rmap_pmd() ANBZ: #9728 commit 395db7b190892f1ca8d31e1fc83198e2531335f6 upstream Let's convert remove_migration_pmd(). No need to set RMAP_COMPOUND, that we will remove soon. Link: https://lkml.kernel.org/r/20231220224504.646757-17-david@redhat.com Signed-off-by: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Cc: Ryan Roberts Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- mm/huge_memory.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 52c0b61300b6..fde6384ba033 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3477,12 +3477,12 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) pmde = pmd_mkdirty(pmde); if (folio_test_anon(folio)) { - rmap_t rmap_flags = RMAP_COMPOUND; + rmap_t rmap_flags = RMAP_NONE; if (!is_readable_migration_entry(entry)) rmap_flags |= RMAP_EXCLUSIVE; - page_add_anon_rmap(new, vma, haddr, rmap_flags); + folio_add_anon_rmap_pmd(folio, new, vma, haddr, rmap_flags); } else { folio_add_file_rmap_pmd(folio, new, vma); } -- Gitee From 4a3e2dd9cf61980e7f992d87229264d4b3d1ded7 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:41 +0100 Subject: [PATCH 1153/2138] mm/migrate: page_add_anon_rmap() -> folio_add_anon_rmap_pte() ANBZ: #9728 commit a15dc4785c98f360bdca78483455e0aff30242cb upstream Let's convert remove_migration_pte(). Link: https://lkml.kernel.org/r/20231220224504.646757-18-david@redhat.com Signed-off-by: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Cc: Ryan Roberts Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- mm/migrate.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/migrate.c b/mm/migrate.c index db221fbfd29e..d9e464f96f0e 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -259,8 +259,8 @@ static bool remove_migration_pte(struct folio *folio, #endif { if (folio_test_anon(folio)) - page_add_anon_rmap(new, vma, pvmw.address, - rmap_flags); + folio_add_anon_rmap_pte(folio, new, vma, + pvmw.address, rmap_flags); else folio_add_file_rmap_pte(folio, new, vma); set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); -- Gitee From 9f37ea047e86cc4726cd1011c1f3136f8e0e6f69 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:42 +0100 Subject: [PATCH 1154/2138] mm/ksm: page_add_anon_rmap() -> folio_add_anon_rmap_pte() ANBZ: #9728 commit 977295349eb7826c50e2841915de96eab3a502c2 upstream Let's convert replace_page(). While at it, perform some folio conversion. Link: https://lkml.kernel.org/r/20231220224504.646757-19-david@redhat.com Signed-off-by: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Cc: Ryan Roberts Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- mm/ksm.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/mm/ksm.c b/mm/ksm.c index 5fbe58d863c7..37595360c8c8 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -1186,6 +1186,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, static int replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage, pte_t orig_pte) { + struct folio *kfolio = page_folio(kpage); struct mm_struct *mm = vma->vm_mm; struct folio *folio; pmd_t *pmd; @@ -1225,15 +1226,16 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, goto out_mn; } VM_BUG_ON_PAGE(PageAnonExclusive(page), page); - VM_BUG_ON_PAGE(PageAnon(kpage) && PageAnonExclusive(kpage), kpage); + VM_BUG_ON_FOLIO(folio_test_anon(kfolio) && PageAnonExclusive(kpage), + kfolio); /* * No need to check ksm_use_zero_pages here: we can only have a * zero_page here if ksm_use_zero_pages was enabled already. */ if (!is_zero_pfn(page_to_pfn(kpage))) { - get_page(kpage); - page_add_anon_rmap(kpage, vma, addr, RMAP_NONE); + folio_get(kfolio); + folio_add_anon_rmap_pte(kfolio, kpage, vma, addr, RMAP_NONE); newpte = mk_pte(kpage, vma->vm_page_prot); } else { /* -- Gitee From 84ac6db5814cf0f8791bc79a2b195b2a19d4f771 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:43 +0100 Subject: [PATCH 1155/2138] mm/swapfile: page_add_anon_rmap() -> folio_add_anon_rmap_pte() ANBZ: #9728 commit da7dc0afe243874b6ad25f5070aa728349e4e0fd upstream Let's convert unuse_pte(). Link: https://lkml.kernel.org/r/20231220224504.646757-20-david@redhat.com Signed-off-by: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Cc: Ryan Roberts Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- mm/swapfile.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index d67c20144dc8..d59faced1133 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1817,7 +1817,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, if (pte_swp_exclusive(old_pte)) rmap_flags |= RMAP_EXCLUSIVE; - page_add_anon_rmap(page, vma, addr, rmap_flags); + folio_add_anon_rmap_pte(folio, page, vma, addr, rmap_flags); } else { /* ksm created a completely new copy */ folio_add_new_anon_rmap(folio, vma, addr); folio_add_lru_vma(folio, vma); -- Gitee From 9c4328287209dfecf9cbd24a9a95fd45289454c9 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:44 +0100 Subject: [PATCH 1156/2138] mm/memory: page_add_anon_rmap() -> folio_add_anon_rmap_pte() ANBZ: #9728 commit b832a354d787bfbdea5c226f0d77cc1a222d09f8 upstream Let's convert restore_exclusive_pte() and do_swap_page(). While at it, perform some folio conversion in restore_exclusive_pte(). Link: https://lkml.kernel.org/r/20231220224504.646757-21-david@redhat.com Signed-off-by: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Cc: Ryan Roberts Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- mm/memory.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index e9edceb08f69..8ab08da7343e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -697,6 +697,7 @@ static void restore_exclusive_pte(struct vm_area_struct *vma, struct page *page, unsigned long address, pte_t *ptep) { + struct folio *folio = page_folio(page); pte_t orig_pte; pte_t pte; swp_entry_t entry; @@ -712,14 +713,15 @@ static void restore_exclusive_pte(struct vm_area_struct *vma, else if (is_writable_device_exclusive_entry(entry)) pte = maybe_mkwrite(pte_mkdirty(pte), vma); - VM_BUG_ON(pte_write(pte) && !(PageAnon(page) && PageAnonExclusive(page))); + VM_BUG_ON_FOLIO(pte_write(pte) && (!folio_test_anon(folio) && + PageAnonExclusive(page)), folio); /* * No need to take a page reference as one was already * created when the swap entry was made. */ - if (PageAnon(page)) - page_add_anon_rmap(page, vma, address, RMAP_NONE); + if (folio_test_anon(folio)) + folio_add_anon_rmap_pte(folio, page, vma, address, RMAP_NONE); else /* * Currently device exclusive access only supports anonymous @@ -4216,7 +4218,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) folio_add_new_anon_rmap(folio, vma, vmf->address); folio_add_lru_vma(folio, vma); } else { - page_add_anon_rmap(page, vma, vmf->address, rmap_flags); + folio_add_anon_rmap_pte(folio, page, vma, vmf->address, + rmap_flags); } VM_BUG_ON(!folio_test_anon(folio) || -- Gitee From bf3eca8dc5c60420dc0359967dfe41f2b3bda980 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:45 +0100 Subject: [PATCH 1157/2138] mm/rmap: remove page_add_anon_rmap() ANBZ: #9728 commit 84f0169e6c8a613012722e0d63302f9da4a72099 upstream All users are gone, remove it and all traces. Link: https://lkml.kernel.org/r/20231220224504.646757-22-david@redhat.com Signed-off-by: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Cc: Ryan Roberts Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- include/linux/rmap.h | 2 -- mm/rmap.c | 31 ++++--------------------------- 2 files changed, 4 insertions(+), 29 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 877f10c635ca..2ff63bcf1e8c 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -234,8 +234,6 @@ void folio_add_anon_rmap_ptes(struct folio *, struct page *, int nr_pages, folio_add_anon_rmap_ptes(folio, page, 1, vma, address, flags) void folio_add_anon_rmap_pmd(struct folio *, struct page *, struct vm_area_struct *, unsigned long address, rmap_t flags); -void page_add_anon_rmap(struct page *, struct vm_area_struct *, - unsigned long address, rmap_t flags); void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *, unsigned long address); void folio_add_file_rmap_ptes(struct folio *, struct page *, int nr_pages, diff --git a/mm/rmap.c b/mm/rmap.c index 5b9fc3e8dc6d..650b96496a28 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1240,7 +1240,7 @@ static void __page_check_anon_rmap(struct folio *folio, struct page *page, * The page's anon-rmap details (mapping and index) are guaranteed to * be set up correctly at this point. * - * We have exclusion against page_add_anon_rmap because the caller + * We have exclusion against folio_add_anon_rmap_*() because the caller * always holds the page locked. * * We have exclusion against folio_add_new_anon_rmap because those pages @@ -1253,29 +1253,6 @@ static void __page_check_anon_rmap(struct folio *folio, struct page *page, page); } -/** - * page_add_anon_rmap - add pte mapping to an anonymous page - * @page: the page to add the mapping to - * @vma: the vm area in which the mapping is added - * @address: the user virtual address mapped - * @flags: the rmap flags - * - * The caller needs to hold the pte lock, and the page must be locked in - * the anon_vma case: to serialize mapping,index checking after setting, - * and to ensure that PageAnon is not being upgraded racily to PageKsm - * (but PageKsm is never downgraded to PageAnon). - */ -void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, - unsigned long address, rmap_t flags) -{ - struct folio *folio = page_folio(page); - - if (likely(!(flags & RMAP_COMPOUND))) - folio_add_anon_rmap_pte(folio, page, vma, address, flags); - else - folio_add_anon_rmap_pmd(folio, page, vma, address, flags); -} - static __always_inline void __folio_add_anon_rmap(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *vma, unsigned long address, rmap_t flags, enum rmap_level level) @@ -1389,7 +1366,7 @@ void folio_add_anon_rmap_pmd(struct folio *folio, struct page *page, * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped * - * Like page_add_anon_rmap() but must only be called on *new* folios. + * Like folio_add_anon_rmap_*() but must only be called on *new* folios. * This means the inc-and-test can be bypassed. * The folio does not have to be locked. * @@ -1449,7 +1426,7 @@ static __always_inline void __folio_add_file_rmap(struct folio *folio, if (nr) __lruvec_stat_mod_folio(folio, NR_FILE_MAPPED, nr); - /* See comments in page_add_anon_rmap() */ + /* See comments in folio_add_anon_rmap_*() */ if (!folio_test_large(folio)) mlock_vma_folio(folio, vma); } @@ -1563,7 +1540,7 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma, /* * It would be tidy to reset folio_test_anon mapping when fully - * unmapped, but that might overwrite a racing page_add_anon_rmap + * unmapped, but that might overwrite a racing folio_add_anon_rmap_*() * which increments mapcount after us but sets mapping before us: * so leave the reset to free_pages_prepare, and remember that * it's only reliable while mapped. -- Gitee From 79353e58c5bd11f87fb03d5b67115d5d9419fe2d Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:46 +0100 Subject: [PATCH 1158/2138] mm/rmap: remove RMAP_COMPOUND ANBZ: #9728 commit 0cae959e3abf19ba62805f6e6a8b42b6cd9ed3e3 upstream No longer used, let's remove it and clarify RMAP_NONE/RMAP_EXCLUSIVE a bit. Link: https://lkml.kernel.org/r/20231220224504.646757-23-david@redhat.com Signed-off-by: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Cc: Ryan Roberts Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- include/linux/rmap.h | 12 +++--------- mm/rmap.c | 2 -- 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 2ff63bcf1e8c..426c90c4804f 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -172,20 +172,14 @@ struct anon_vma *folio_get_anon_vma(struct folio *folio); typedef int __bitwise rmap_t; /* - * No special request: if the page is a subpage of a compound page, it is - * mapped via a PTE. The mapped (sub)page is possibly shared between processes. + * No special request: A mapped anonymous (sub)page is possibly shared between + * processes. */ #define RMAP_NONE ((__force rmap_t)0) -/* The (sub)page is exclusive to a single process. */ +/* The anonymous (sub)page is exclusive to a single process. */ #define RMAP_EXCLUSIVE ((__force rmap_t)BIT(0)) -/* - * The compound page is not mapped via PTEs, but instead via a single PMD and - * should be accounted accordingly. - */ -#define RMAP_COMPOUND ((__force rmap_t)BIT(1)) - /* * Internally, we're using an enum to specify the granularity. We make the * compiler emit specialized code for each granularity. diff --git a/mm/rmap.c b/mm/rmap.c index 650b96496a28..7150c79f2f47 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -2632,8 +2632,6 @@ void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) * The following two functions are for anonymous (private mapped) hugepages. * Unlike common anonymous pages, anonymous hugepages have no accounting code * and no lru code, because we handle hugepages differently from common pages. - * - * RMAP_COMPOUND is ignored. */ void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma, unsigned long address, rmap_t flags) -- Gitee From 51ef6cedfc04ed7f8a78f403651c293414536115 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:47 +0100 Subject: [PATCH 1159/2138] mm/rmap: introduce folio_remove_rmap_[pte|ptes|pmd]() ANBZ: #9728 commit b06dc281aa9901076898d4d0a7bde588f11bc204 upstream Let's mimic what we did with folio_add_file_rmap_*() and folio_add_anon_rmap_*() so we can similarly replace page_remove_rmap() next. Make the compiler always special-case on the granularity by using __always_inline. We're adding folio_remove_rmap_ptes() handling right away, as we want to use that soon for batching rmap operations when unmapping PTE-mapped large folios. Link: https://lkml.kernel.org/r/20231220224504.646757-24-david@redhat.com Signed-off-by: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Cc: Ryan Roberts Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- include/linux/rmap.h | 6 ++++ mm/rmap.c | 82 +++++++++++++++++++++++++++++++++++--------- 2 files changed, 72 insertions(+), 16 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 426c90c4804f..bae0e76f07f6 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -238,6 +238,12 @@ void folio_add_file_rmap_pmd(struct folio *, struct page *, struct vm_area_struct *); void page_remove_rmap(struct page *, struct vm_area_struct *, bool compound); +void folio_remove_rmap_ptes(struct folio *, struct page *, int nr_pages, + struct vm_area_struct *); +#define folio_remove_rmap_pte(folio, page, vma) \ + folio_remove_rmap_ptes(folio, page, 1, vma) +void folio_remove_rmap_pmd(struct folio *, struct page *, + struct vm_area_struct *); void hugetlb_add_anon_rmap(struct folio *, struct vm_area_struct *, unsigned long address, rmap_t flags); diff --git a/mm/rmap.c b/mm/rmap.c index 7150c79f2f47..cd1071b1d277 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1480,25 +1480,37 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma, bool compound) { struct folio *folio = page_folio(page); + + if (likely(!compound)) + folio_remove_rmap_pte(folio, page, vma); + else + folio_remove_rmap_pmd(folio, page, vma); +} + +static __always_inline void __folio_remove_rmap(struct folio *folio, + struct page *page, int nr_pages, struct vm_area_struct *vma, + enum rmap_level level) +{ atomic_t *mapped = &folio->_nr_pages_mapped; - int nr = 0, nr_pmdmapped = 0; - bool last; + int last, nr = 0, nr_pmdmapped = 0; enum node_stat_item idx; - VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); - VM_BUG_ON_PAGE(compound && !PageHead(page), page); - - /* Is page being unmapped by PTE? Is this its last map to be removed? */ - if (likely(!compound)) { - last = atomic_add_negative(-1, &page->_mapcount); - nr = last; - if (last && folio_test_large(folio)) { - nr = atomic_dec_return_relaxed(mapped); - nr = (nr < COMPOUND_MAPPED); - } - } else if (folio_test_pmd_mappable(folio)) { - /* That test is redundant: it's for safety or to optimize out */ + __folio_rmap_sanity_checks(folio, page, nr_pages, level); + + switch (level) { + case RMAP_LEVEL_PTE: + do { + last = atomic_add_negative(-1, &page->_mapcount); + if (last && folio_test_large(folio)) { + last = atomic_dec_return_relaxed(mapped); + last = (last < COMPOUND_MAPPED); + } + if (last) + nr++; + } while (page++, --nr_pages > 0); + break; + case RMAP_LEVEL_PMD: last = atomic_add_negative(-1, &folio->_entire_mapcount); if (last) { nr = atomic_sub_return_relaxed(COMPOUND_MAPPED, mapped); @@ -1513,6 +1525,7 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma, nr = 0; } } + break; } if (nr_pmdmapped) { @@ -1534,7 +1547,7 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma, * is still mapped. */ if (folio_test_large(folio) && folio_test_anon(folio)) - if (!compound || nr < nr_pmdmapped) + if (level == RMAP_LEVEL_PTE || nr < nr_pmdmapped) deferred_split_folio(folio); } @@ -1549,6 +1562,43 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma, munlock_vma_folio(folio, vma); } +/** + * folio_remove_rmap_ptes - remove PTE mappings from a page range of a folio + * @folio: The folio to remove the mappings from + * @page: The first page to remove + * @nr_pages: The number of pages that will be removed from the mapping + * @vma: The vm area from which the mappings are removed + * + * The page range of the folio is defined by [page, page + nr_pages) + * + * The caller needs to hold the page table lock. + */ +void folio_remove_rmap_ptes(struct folio *folio, struct page *page, + int nr_pages, struct vm_area_struct *vma) +{ + __folio_remove_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE); +} + +/** + * folio_remove_rmap_pmd - remove a PMD mapping from a page range of a folio + * @folio: The folio to remove the mapping from + * @page: The first page to remove + * @vma: The vm area from which the mapping is removed + * + * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) + * + * The caller needs to hold the page table lock. + */ +void folio_remove_rmap_pmd(struct folio *folio, struct page *page, + struct vm_area_struct *vma) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + __folio_remove_rmap(folio, page, HPAGE_PMD_NR, vma, RMAP_LEVEL_PMD); +#else + WARN_ON_ONCE(true); +#endif +} + /* * @arg: enum ttu_flags will be passed to this argument */ -- Gitee From 663a4fa14a1529f7d92e8f6c58db858a720e0d6a Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:48 +0100 Subject: [PATCH 1160/2138] kernel/events/uprobes: page_remove_rmap() -> folio_remove_rmap_pte() ANBZ: #9728 commit 5cc9695f06b065168f5c893c8e006b6a8a2c9c91 upstream Let's convert __replace_page(). Link: https://lkml.kernel.org/r/20231220224504.646757-25-david@redhat.com Signed-off-by: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Cc: Ryan Roberts Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- kernel/events/uprobes.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index aa60ada8dfc3..7668f9219353 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -198,7 +198,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, set_pte_at_notify(mm, addr, pvmw.pte, mk_pte(new_page, vma->vm_page_prot)); - page_remove_rmap(old_page, vma, false); + folio_remove_rmap_pte(old_folio, old_page, vma); if (!folio_mapped(old_folio)) folio_free_swap(old_folio); page_vma_mapped_walk_done(&pvmw); -- Gitee From 8b85a21cc5ec726e3f4b360c52c86acf49b56873 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:49 +0100 Subject: [PATCH 1161/2138] mm/huge_memory: page_remove_rmap() -> folio_remove_rmap_pmd() ANBZ: #9728 commit a8e61d584eda0d5532b0bbfe3c2427d2688d3c83 upstream Let's convert zap_huge_pmd() and set_pmd_migration_entry(). While at it, perform some more folio conversion. Link: https://lkml.kernel.org/r/20231220224504.646757-26-david@redhat.com Signed-off-by: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Cc: Ryan Roberts Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- mm/huge_memory.c | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index fde6384ba033..8e21781faa1f 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1863,7 +1863,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, if (pmd_present(orig_pmd)) { page = pmd_page(orig_pmd); - page_remove_rmap(page, vma, true); + folio_remove_rmap_pmd(page_folio(page), page, vma); VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); VM_BUG_ON_PAGE(!PageHead(page), page); } else if (thp_migration_supported()) { @@ -2275,12 +2275,13 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, page = pfn_swap_entry_to_page(entry); } else { page = pmd_page(old_pmd); - if (!PageDirty(page) && pmd_dirty(old_pmd)) - set_page_dirty(page); - if (!PageReferenced(page) && pmd_young(old_pmd)) - SetPageReferenced(page); - page_remove_rmap(page, vma, true); - put_page(page); + folio = page_folio(page); + if (!folio_test_dirty(folio) && pmd_dirty(old_pmd)) + folio_set_dirty(folio); + if (!folio_test_referenced(folio) && pmd_young(old_pmd)) + folio_set_referenced(folio); + folio_remove_rmap_pmd(folio, page, vma); + folio_put(folio); } add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR); return; @@ -2438,7 +2439,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, pte_unmap(pte - 1); if (!pmd_migration) - page_remove_rmap(page, vma, true); + folio_remove_rmap_pmd(folio, page, vma); if (freeze) put_page(page); @@ -3402,6 +3403,7 @@ late_initcall(split_huge_pages_debugfs); int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, struct page *page) { + struct folio *folio = page_folio(page); struct vm_area_struct *vma = pvmw->vma; struct mm_struct *mm = vma->vm_mm; unsigned long address = pvmw->address; @@ -3417,14 +3419,14 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, pmdval = pmdp_invalidate(vma, address, pvmw->pmd); /* See page_try_share_anon_rmap(): invalidate PMD first. */ - anon_exclusive = PageAnon(page) && PageAnonExclusive(page); + anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page); if (anon_exclusive && page_try_share_anon_rmap(page)) { set_pmd_at(mm, address, pvmw->pmd, pmdval); return -EBUSY; } if (pmd_dirty(pmdval)) - set_page_dirty(page); + folio_set_dirty(folio); if (pmd_write(pmdval)) entry = make_writable_migration_entry(page_to_pfn(page)); else if (anon_exclusive) @@ -3441,8 +3443,8 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, if (pmd_uffd_wp(pmdval)) pmdswp = pmd_swp_mkuffd_wp(pmdswp); set_pmd_at(mm, address, pvmw->pmd, pmdswp); - page_remove_rmap(page, vma, true); - put_page(page); + folio_remove_rmap_pmd(folio, page, vma); + folio_put(folio); trace_set_migration_pmd(address, pmd_val(pmdswp)); return 0; -- Gitee From acbaf28df651ad7d2b9feadc98660d1065709907 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:50 +0100 Subject: [PATCH 1162/2138] mm/khugepaged: page_remove_rmap() -> folio_remove_rmap_pte() ANBZ: #9728 commit 35668a4321461505dcc39b56a0d97b0ba2c99668 upstream Let's convert __collapse_huge_page_copy_succeeded() and collapse_pte_mapped_thp(). While at it, perform some more folio conversion in __collapse_huge_page_copy_succeeded(). We can get rid of release_pte_page(). Link: https://lkml.kernel.org/r/20231220224504.646757-27-david@redhat.com Signed-off-by: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Cc: Ryan Roberts Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- mm/khugepaged.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 9ece91d20d2d..60b445bbc6e9 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -494,11 +494,6 @@ static void release_pte_folio(struct folio *folio) folio_putback_lru(folio); } -static void release_pte_page(struct page *page) -{ - release_pte_folio(page_folio(page)); -} - static void release_pte_pages(pte_t *pte, pte_t *_pte, struct list_head *compound_pagelist) { @@ -687,6 +682,7 @@ static void __collapse_huge_page_copy_succeeded(pte_t *pte, spinlock_t *ptl, struct list_head *compound_pagelist) { + struct folio *src_folio; struct page *src_page; struct page *tmp; pte_t *_pte; @@ -708,16 +704,17 @@ static void __collapse_huge_page_copy_succeeded(pte_t *pte, } } else { src_page = pte_page(pteval); - if (!PageCompound(src_page)) - release_pte_page(src_page); + src_folio = page_folio(src_page); + if (!folio_test_large(src_folio)) + release_pte_folio(src_folio); /* * ptl mostly unnecessary, but preempt has to * be disabled to update the per-cpu stats - * inside page_remove_rmap(). + * inside folio_remove_rmap_pte(). */ spin_lock(ptl); ptep_clear(vma->vm_mm, address, _pte); - page_remove_rmap(src_page, vma, false); + folio_remove_rmap_pte(src_folio, src_page, vma); spin_unlock(ptl); free_page_and_swap_cache(src_page); } @@ -1611,7 +1608,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, * PTE dirty? Shmem page is already dirty; file is read-only. */ ptep_clear(mm, addr, pte); - page_remove_rmap(page, vma, false); + folio_remove_rmap_pte(folio, page, vma); nr_ptes++; } -- Gitee From 96c9e119c6c38c45303835b89aa3cf91699c9c92 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:51 +0100 Subject: [PATCH 1163/2138] mm/ksm: page_remove_rmap() -> folio_remove_rmap_pte() ANBZ: #9728 commit 18e8612e56244c6db3254d435a22344856a9c55b upstream Let's convert replace_page(). Link: https://lkml.kernel.org/r/20231220224504.646757-28-david@redhat.com Signed-off-by: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Cc: Ryan Roberts Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- mm/ksm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/ksm.c b/mm/ksm.c index 37595360c8c8..558eb2153f10 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -1265,7 +1265,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, set_pte_at_notify(mm, addr, ptep, newpte); folio = page_folio(page); - page_remove_rmap(page, vma, false); + folio_remove_rmap_pte(folio, page, vma); if (!folio_mapped(folio)) folio_free_swap(folio); folio_put(folio); -- Gitee From 0db63a3d6d452427576839d26781c7ca8dbde37e Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:52 +0100 Subject: [PATCH 1164/2138] mm/memory: page_remove_rmap() -> folio_remove_rmap_pte() ANBZ: #9728 commit c46265030b0f400ef89833bb51da62676d2f855a upstream Let's convert zap_pte_range() and closely-related tlb_flush_rmap_batch(). While at it, perform some more folio conversion in zap_pte_range(). Link: https://lkml.kernel.org/r/20231220224504.646757-29-david@redhat.com Signed-off-by: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Cc: Ryan Roberts Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- mm/memory.c | 23 +++++++++++++---------- mm/mmu_gather.c | 2 +- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 8ab08da7343e..3143d5602fa0 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1421,6 +1421,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, arch_enter_lazy_mmu_mode(); do { pte_t ptent = ptep_get(pte); + struct folio *folio; struct page *page; if (pte_none(ptent)) @@ -1446,21 +1447,22 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, continue; } + folio = page_folio(page); delay_rmap = 0; - if (!PageAnon(page)) { + if (!folio_test_anon(folio)) { if (pte_dirty(ptent)) { - set_page_dirty(page); + folio_set_dirty(folio); if (tlb_delay_rmap(tlb)) { delay_rmap = 1; force_flush = 1; } } if (pte_young(ptent) && likely(vma_has_recency(vma))) - mark_page_accessed(page); + folio_mark_accessed(folio); } rss[mm_counter(page)]--; if (!delay_rmap) { - page_remove_rmap(page, vma, false); + folio_remove_rmap_pte(folio, page, vma); if (unlikely(page_mapcount(page) < 0)) print_bad_pte(vma, addr, ptent, page); } @@ -1476,6 +1478,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, if (is_device_private_entry(entry) || is_device_exclusive_entry(entry)) { page = pfn_swap_entry_to_page(entry); + folio = page_folio(page); if (unlikely(!should_zap_page(details, page))) continue; /* @@ -1487,8 +1490,8 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, WARN_ON_ONCE(!vma_is_anonymous(vma)); rss[mm_counter(page)]--; if (is_device_private_entry(entry)) - page_remove_rmap(page, vma, false); - put_page(page); + folio_remove_rmap_pte(folio, page, vma); + folio_put(folio); } else if (!non_swap_entry(entry)) { /* Genuine swap entry, hence a private anon page */ if (!should_zap_cows(details)) @@ -3357,10 +3360,10 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) * threads. * * The critical issue is to order this - * page_remove_rmap with the ptp_clear_flush above. - * Those stores are ordered by (if nothing else,) + * folio_remove_rmap_pte() with the ptp_clear_flush + * above. Those stores are ordered by (if nothing else,) * the barrier present in the atomic_add_negative - * in page_remove_rmap. + * in folio_remove_rmap_pte(); * * Then the TLB flush in ptep_clear_flush ensures that * no process can access the old page before the @@ -3369,7 +3372,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) * mapcount is visible. So transitively, TLBs to * old page will be flushed before it can be reused. */ - page_remove_rmap(vmf->page, vma, false); + folio_remove_rmap_pte(old_folio, vmf->page, vma); } /* Free the old page.. */ diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index 4f559f4ddd21..604ddf08affe 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -55,7 +55,7 @@ static void tlb_flush_rmap_batch(struct mmu_gather_batch *batch, struct vm_area_ if (encoded_page_flags(enc)) { struct page *page = encoded_page_ptr(enc); - page_remove_rmap(page, vma, false); + folio_remove_rmap_pte(page_folio(page), page, vma); } } } -- Gitee From f2f2cce471ce92d6e917fff86b1990e00474567c Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:53 +0100 Subject: [PATCH 1165/2138] mm/migrate_device: page_remove_rmap() -> folio_remove_rmap_pte() ANBZ: #9728 commit 5b205c7f2684764c8a9cc3442986623d4d6e87f1 upstream Let's convert migrate_vma_collect_pmd(). While at it, perform more folio conversion. Link: https://lkml.kernel.org/r/20231220224504.646757-30-david@redhat.com Signed-off-by: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Cc: Ryan Roberts Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- mm/migrate_device.c | 39 +++++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/mm/migrate_device.c b/mm/migrate_device.c index 81193363f8cd..39b7754480c6 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -107,6 +107,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, for (; addr < end; addr += PAGE_SIZE, ptep++) { unsigned long mpfn = 0, pfn; + struct folio *folio; struct page *page; swp_entry_t entry; pte_t pte; @@ -168,41 +169,43 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, } /* - * By getting a reference on the page we pin it and that blocks + * By getting a reference on the folio we pin it and that blocks * any kind of migration. Side effect is that it "freezes" the * pte. * - * We drop this reference after isolating the page from the lru - * for non device page (device page are not on the lru and thus + * We drop this reference after isolating the folio from the lru + * for non device folio (device folio are not on the lru and thus * can't be dropped from it). */ - get_page(page); + folio = page_folio(page); + folio_get(folio); /* - * We rely on trylock_page() to avoid deadlock between + * We rely on folio_trylock() to avoid deadlock between * concurrent migrations where each is waiting on the others - * page lock. If we can't immediately lock the page we fail this + * folio lock. If we can't immediately lock the folio we fail this * migration as it is only best effort anyway. * - * If we can lock the page it's safe to set up a migration entry - * now. In the common case where the page is mapped once in a + * If we can lock the folio it's safe to set up a migration entry + * now. In the common case where the folio is mapped once in a * single process setting up the migration entry now is an * optimisation to avoid walking the rmap later with * try_to_migrate(). */ - if (trylock_page(page)) { + if (folio_trylock(folio)) { bool anon_exclusive; pte_t swp_pte; flush_cache_page(vma, addr, pte_pfn(pte)); - anon_exclusive = PageAnon(page) && PageAnonExclusive(page); + anon_exclusive = folio_test_anon(folio) && + PageAnonExclusive(page); if (anon_exclusive) { pte = ptep_clear_flush(vma, addr, ptep); if (page_try_share_anon_rmap(page)) { set_pte_at(mm, addr, ptep, pte); - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); mpfn = 0; goto next; } @@ -214,7 +217,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, /* Set the dirty flag on the folio now the pte is gone. */ if (pte_dirty(pte)) - folio_mark_dirty(page_folio(page)); + folio_mark_dirty(folio); /* Setup special migration page table entry */ if (mpfn & MIGRATE_PFN_WRITE) @@ -248,16 +251,16 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, /* * This is like regular unmap: we remove the rmap and - * drop page refcount. Page won't be freed, as we took - * a reference just above. + * drop the folio refcount. The folio won't be freed, as + * we took a reference just above. */ - page_remove_rmap(page, vma, false); - put_page(page); + folio_remove_rmap_pte(folio, page, vma); + folio_put(folio); if (pte_present(pte)) unmapped++; } else { - put_page(page); + folio_put(folio); mpfn = 0; } -- Gitee From d515b5a62a944691fdf86f85b21e8651518d6d8e Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:54 +0100 Subject: [PATCH 1166/2138] mm/rmap: page_remove_rmap() -> folio_remove_rmap_pte() ANBZ: #9728 commit ca1a0746182c3c059573d7e4554d335cae5306dc upstream Let's convert try_to_unmap_one() and try_to_migrate_one(). Link: https://lkml.kernel.org/r/20231220224504.646757-31-david@redhat.com Signed-off-by: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Cc: Ryan Roberts Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- mm/rmap.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/mm/rmap.c b/mm/rmap.c index cd1071b1d277..d93c46204cbb 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1617,7 +1617,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, /* * When racing against e.g. zap_pte_range() on another cpu, - * in between its ptep_get_and_clear_full() and page_remove_rmap(), + * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(), * try_to_unmap() may return before page_mapped() has become false, * if page table locking is skipped: use TTU_SYNC to wait for that. */ @@ -1898,7 +1898,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, if (unlikely(folio_test_hugetlb(folio))) hugetlb_remove_rmap(folio); else - page_remove_rmap(subpage, vma, false); + folio_remove_rmap_pte(folio, subpage, vma); if (vma->vm_flags & VM_LOCKED) mlock_drain_local(); folio_put(folio); @@ -1966,7 +1966,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, /* * When racing against e.g. zap_pte_range() on another cpu, - * in between its ptep_get_and_clear_full() and page_remove_rmap(), + * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(), * try_to_migrate() may return before page_mapped() has become false, * if page table locking is skipped: use TTU_SYNC to wait for that. */ @@ -2259,7 +2259,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, if (unlikely(folio_test_hugetlb(folio))) hugetlb_remove_rmap(folio); else - page_remove_rmap(subpage, vma, false); + folio_remove_rmap_pte(folio, subpage, vma); if (vma->vm_flags & VM_LOCKED) mlock_drain_local(); folio_put(folio); @@ -2398,7 +2398,7 @@ static bool page_make_device_exclusive_one(struct folio *folio, * There is a reference on the page for the swap entry which has * been removed, so shouldn't take another. */ - page_remove_rmap(subpage, vma, false); + folio_remove_rmap_pte(folio, subpage, vma); } mmu_notifier_invalidate_range_end(&range); -- Gitee From 3ae9870acc8a96d2dbe56dc67fa7740ccffe44ca Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:55 +0100 Subject: [PATCH 1167/2138] Documentation: stop referring to page_remove_rmap() ANBZ: #9728 commit 5a0033f0285e0bb29f6e4d1593d4519c91ed882a upstream Refer to folio_remove_rmap_*() instaed. Link: https://lkml.kernel.org/r/20231220224504.646757-32-david@redhat.com Signed-off-by: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Cc: Ryan Roberts Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- Documentation/mm/transhuge.rst | 2 +- Documentation/mm/unevictable-lru.rst | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Documentation/mm/transhuge.rst b/Documentation/mm/transhuge.rst index 9a607059ea11..cf81272a6b8b 100644 --- a/Documentation/mm/transhuge.rst +++ b/Documentation/mm/transhuge.rst @@ -156,7 +156,7 @@ Partial unmap and deferred_split_folio() Unmapping part of THP (with munmap() or other way) is not going to free memory immediately. Instead, we detect that a subpage of THP is not in use -in page_remove_rmap() and queue the THP for splitting if memory pressure +in folio_remove_rmap_*() and queue the THP for splitting if memory pressure comes. Splitting will free up unused subpages. Splitting the page right away is not an option due to locking context in diff --git a/Documentation/mm/unevictable-lru.rst b/Documentation/mm/unevictable-lru.rst index 67f1338440a5..b6a07a26b10d 100644 --- a/Documentation/mm/unevictable-lru.rst +++ b/Documentation/mm/unevictable-lru.rst @@ -486,7 +486,7 @@ munlock the pages if we're removing the last VM_LOCKED VMA that maps the pages. Before the unevictable/mlock changes, mlocking did not mark the pages in any way, so unmapping them required no processing. -For each PTE (or PMD) being unmapped from a VMA, page_remove_rmap() calls +For each PTE (or PMD) being unmapped from a VMA, folio_remove_rmap_*() calls munlock_vma_folio(), which calls munlock_folio() when the VMA is VM_LOCKED (unless it was a PTE mapping of a part of a transparent huge page). @@ -511,7 +511,7 @@ userspace; truncation even unmaps and deletes any private anonymous pages which had been Copied-On-Write from the file pages now being truncated. Mlocked pages can be munlocked and deleted in this way: like with munmap(), -for each PTE (or PMD) being unmapped from a VMA, page_remove_rmap() calls +for each PTE (or PMD) being unmapped from a VMA, folio_remove_rmap_*() calls munlock_vma_folio(), which calls munlock_folio() when the VMA is VM_LOCKED (unless it was a PTE mapping of a part of a transparent huge page). -- Gitee From f204d5abb5e9bac32a331a3313f32c805d85867a Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:56 +0100 Subject: [PATCH 1168/2138] mm/rmap: remove page_remove_rmap() ANBZ: #9728 commit 4d8f7418e8ba36036c8486d92d9591c368ab9b85 upstream All callers are gone, let's remove it and some leftover traces. Link: https://lkml.kernel.org/r/20231220224504.646757-33-david@redhat.com Signed-off-by: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Cc: Ryan Roberts Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- include/linux/rmap.h | 4 +--- mm/filemap.c | 10 +++++----- mm/internal.h | 2 +- mm/memory-failure.c | 4 ++-- mm/rmap.c | 23 ++--------------------- 5 files changed, 11 insertions(+), 32 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index bae0e76f07f6..a7250c0a81cc 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -236,8 +236,6 @@ void folio_add_file_rmap_ptes(struct folio *, struct page *, int nr_pages, folio_add_file_rmap_ptes(folio, page, 1, vma) void folio_add_file_rmap_pmd(struct folio *, struct page *, struct vm_area_struct *); -void page_remove_rmap(struct page *, struct vm_area_struct *, - bool compound); void folio_remove_rmap_ptes(struct folio *, struct page *, int nr_pages, struct vm_area_struct *); #define folio_remove_rmap_pte(folio, page, vma) \ @@ -384,7 +382,7 @@ static inline int page_try_dup_anon_rmap(struct page *page, bool compound, * * This is similar to page_try_dup_anon_rmap(), however, not used during fork() * to duplicate a mapping, but instead to prepare for KSM or temporarily - * unmapping a page (swap, migration) via page_remove_rmap(). + * unmapping a page (swap, migration) via folio_remove_rmap_*(). * * Marking the page shared can only fail if the page may be pinned; device * private pages cannot get pinned and consequently this function cannot fail. diff --git a/mm/filemap.c b/mm/filemap.c index 0440e04ecdee..7e6ca5ebba6e 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -113,11 +113,11 @@ * ->i_pages lock (try_to_unmap_one) * ->lruvec->lru_lock (follow_page->mark_page_accessed) * ->lruvec->lru_lock (check_pte_range->isolate_lru_page) - * ->private_lock (page_remove_rmap->set_page_dirty) - * ->i_pages lock (page_remove_rmap->set_page_dirty) - * bdi.wb->list_lock (page_remove_rmap->set_page_dirty) - * ->inode->i_lock (page_remove_rmap->set_page_dirty) - * ->memcg->move_lock (page_remove_rmap->folio_memcg_lock) + * ->private_lock (folio_remove_rmap_pte->set_page_dirty) + * ->i_pages lock (folio_remove_rmap_pte->set_page_dirty) + * bdi.wb->list_lock (folio_remove_rmap_pte->set_page_dirty) + * ->inode->i_lock (folio_remove_rmap_pte->set_page_dirty) + * ->memcg->move_lock (folio_remove_rmap_pte->folio_memcg_lock) * bdi.wb->list_lock (zap_pte_range->set_page_dirty) * ->inode->i_lock (zap_pte_range->set_page_dirty) * ->private_lock (zap_pte_range->block_dirty_folio) diff --git a/mm/internal.h b/mm/internal.h index d3df1271fe0e..a819129ae29c 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -711,7 +711,7 @@ folio_within_vma(struct folio *folio, struct vm_area_struct *vma) * under page table lock for the pte/pmd being added or removed. * * mlock is usually called at the end of page_add_*_rmap(), munlock at - * the end of page_remove_rmap(); but new anon folios are managed by + * the end of folio_remove_rmap_*(); but new anon folios are managed by * folio_add_lru_vma() calling mlock_new_folio(). */ void mlock_folio(struct folio *folio); diff --git a/mm/memory-failure.c b/mm/memory-failure.c index c400b15b4d41..75a94ca5fa5a 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -2329,8 +2329,8 @@ int memory_failure(unsigned long pfn, int flags) * We use page flags to determine what action should be taken, but * the flags can be modified by the error containment action. One * example is an mlocked page, where PG_mlocked is cleared by - * page_remove_rmap() in try_to_unmap_one(). So to determine page status - * correctly, we save a copy of the page flags at this time. + * folio_remove_rmap_*() in try_to_unmap_one(). So to determine page + * status correctly, we save a copy of the page flags at this time. */ page_flags = p->flags; diff --git a/mm/rmap.c b/mm/rmap.c index d93c46204cbb..7ab943df34d2 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -470,7 +470,7 @@ void __init anon_vma_init(void) /* * Getting a lock on a stable anon_vma from a page off the LRU is tricky! * - * Since there is no serialization what so ever against page_remove_rmap() + * Since there is no serialization what so ever against folio_remove_rmap_*() * the best this function can do is return a refcount increased anon_vma * that might have been relevant to this page. * @@ -487,7 +487,7 @@ void __init anon_vma_init(void) * [ something equivalent to page_mapped_in_vma() ]. * * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from - * page_remove_rmap() that the anon_vma pointer from page->mapping is valid + * folio_remove_rmap_*() that the anon_vma pointer from page->mapping is valid * if there is a mapcount, we can dereference the anon_vma after observing * those. */ @@ -1468,25 +1468,6 @@ void folio_add_file_rmap_pmd(struct folio *folio, struct page *page, #endif } -/** - * page_remove_rmap - take down pte mapping from a page - * @page: page to remove mapping from - * @vma: the vm area from which the mapping is removed - * @compound: uncharge the page as compound or small page - * - * The caller needs to hold the pte lock. - */ -void page_remove_rmap(struct page *page, struct vm_area_struct *vma, - bool compound) -{ - struct folio *folio = page_folio(page); - - if (likely(!compound)) - folio_remove_rmap_pte(folio, page, vma); - else - folio_remove_rmap_pmd(folio, page, vma); -} - static __always_inline void __folio_remove_rmap(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *vma, enum rmap_level level) -- Gitee From c15b3810725779965bfe3e1756054dbff3904a48 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:57 +0100 Subject: [PATCH 1169/2138] mm/rmap: convert page_dup_file_rmap() to folio_dup_file_rmap_[pte|ptes|pmd]() ANBZ: #9728 commit d8ef5e311d7bfde54b60ab45026f206eff31b2d2 upstream Let's convert page_dup_file_rmap() like the other rmap functions. As there is only a single caller, convert that single caller right away and remove page_dup_file_rmap(). Add folio_dup_file_rmap_ptes() right away, we want to perform rmap baching during fork() soon. Link: https://lkml.kernel.org/r/20231220224504.646757-34-david@redhat.com Signed-off-by: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Cc: Ryan Roberts Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- include/linux/rmap.h | 59 ++++++++++++++++++++++++++++++++++++++++---- mm/memory.c | 2 +- 2 files changed, 55 insertions(+), 6 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index a7250c0a81cc..77b5e5d5e6ab 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -303,6 +303,60 @@ static inline void hugetlb_remove_rmap(struct folio *folio) atomic_dec(&folio->_entire_mapcount); } +static __always_inline void __folio_dup_file_rmap(struct folio *folio, + struct page *page, int nr_pages, enum rmap_level level) +{ + __folio_rmap_sanity_checks(folio, page, nr_pages, level); + + switch (level) { + case RMAP_LEVEL_PTE: + do { + atomic_inc(&page->_mapcount); + } while (page++, --nr_pages > 0); + break; + case RMAP_LEVEL_PMD: + atomic_inc(&folio->_entire_mapcount); + break; + } +} + +/** + * folio_dup_file_rmap_ptes - duplicate PTE mappings of a page range of a folio + * @folio: The folio to duplicate the mappings of + * @page: The first page to duplicate the mappings of + * @nr_pages: The number of pages of which the mapping will be duplicated + * + * The page range of the folio is defined by [page, page + nr_pages) + * + * The caller needs to hold the page table lock. + */ +static inline void folio_dup_file_rmap_ptes(struct folio *folio, + struct page *page, int nr_pages) +{ + __folio_dup_file_rmap(folio, page, nr_pages, RMAP_LEVEL_PTE); +} +#define folio_dup_file_rmap_pte(folio, page) \ + folio_dup_file_rmap_ptes(folio, page, 1) + +/** + * folio_dup_file_rmap_pmd - duplicate a PMD mapping of a page range of a folio + * @folio: The folio to duplicate the mapping of + * @page: The first page to duplicate the mapping of + * + * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) + * + * The caller needs to hold the page table lock. + */ +static inline void folio_dup_file_rmap_pmd(struct folio *folio, + struct page *page) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + __folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, RMAP_LEVEL_PTE); +#else + WARN_ON_ONCE(true); +#endif +} + static inline void __page_dup_rmap(struct page *page, bool compound) { VM_WARN_ON(folio_test_hugetlb(page_folio(page))); @@ -317,11 +371,6 @@ static inline void __page_dup_rmap(struct page *page, bool compound) } } -static inline void page_dup_file_rmap(struct page *page, bool compound) -{ - __page_dup_rmap(page, compound); -} - /** * page_try_dup_anon_rmap - try duplicating a mapping of an already mapped * anonymous page diff --git a/mm/memory.c b/mm/memory.c index 3143d5602fa0..547b9334ba03 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -952,7 +952,7 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, rss[MM_ANONPAGES]++; } else if (page) { folio_get(folio); - page_dup_file_rmap(page, false); + folio_dup_file_rmap_pte(folio, page); rss[mm_counter_file(page)]++; } -- Gitee From aa27fae767c532ac639faec80ca9153e191e4067 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:58 +0100 Subject: [PATCH 1170/2138] mm/rmap: introduce folio_try_dup_anon_rmap_[pte|ptes|pmd]() ANBZ: #9728 commit 61d90309b7156d54c5d358cb5d8bf55b33d233d2 upstream The last user of page_needs_cow_for_dma() and __page_dup_rmap() are gone, remove them. Add folio_try_dup_anon_rmap_ptes() right away, we want to perform rmap baching during fork() soon. Link: https://lkml.kernel.org/r/20231220224504.646757-35-david@redhat.com Signed-off-by: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Cc: Ryan Roberts Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- include/linux/mm.h | 6 -- include/linux/rmap.h | 150 ++++++++++++++++++++++++++++++------------- 2 files changed, 106 insertions(+), 50 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 2a33ec96251f..753ad4350795 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1961,12 +1961,6 @@ static inline bool folio_needs_cow_for_dma(struct vm_area_struct *vma, return folio_maybe_dma_pinned(folio); } -static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma, - struct page *page) -{ - return folio_needs_cow_for_dma(vma, page_folio(page)); -} - /** * is_zero_page - Query if a page is a zero page * @page: The page to query diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 77b5e5d5e6ab..dfce532b55b7 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -357,68 +357,130 @@ static inline void folio_dup_file_rmap_pmd(struct folio *folio, #endif } -static inline void __page_dup_rmap(struct page *page, bool compound) +static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio, + struct page *page, int nr_pages, struct vm_area_struct *src_vma, + enum rmap_level level) { - VM_WARN_ON(folio_test_hugetlb(page_folio(page))); + bool maybe_pinned; + int i; + + VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); + __folio_rmap_sanity_checks(folio, page, nr_pages, level); - if (compound) { - struct folio *folio = (struct folio *)page; + /* + * If this folio may have been pinned by the parent process, + * don't allow to duplicate the mappings but instead require to e.g., + * copy the subpage immediately for the child so that we'll always + * guarantee the pinned folio won't be randomly replaced in the + * future on write faults. + */ + maybe_pinned = likely(!folio_is_device_private(folio)) && + unlikely(folio_needs_cow_for_dma(src_vma, folio)); - VM_BUG_ON_PAGE(compound && !PageHead(page), page); + /* + * No need to check+clear for already shared PTEs/PMDs of the + * folio. But if any page is PageAnonExclusive, we must fallback to + * copying if the folio maybe pinned. + */ + switch (level) { + case RMAP_LEVEL_PTE: + if (unlikely(maybe_pinned)) { + for (i = 0; i < nr_pages; i++) + if (PageAnonExclusive(page + i)) + return -EBUSY; + } + do { + if (PageAnonExclusive(page)) + ClearPageAnonExclusive(page); + atomic_inc(&page->_mapcount); + } while (page++, --nr_pages > 0); + break; + case RMAP_LEVEL_PMD: + if (PageAnonExclusive(page)) { + if (unlikely(maybe_pinned)) + return -EBUSY; + ClearPageAnonExclusive(page); + } atomic_inc(&folio->_entire_mapcount); - } else { - atomic_inc(&page->_mapcount); + break; } + return 0; } /** - * page_try_dup_anon_rmap - try duplicating a mapping of an already mapped - * anonymous page - * @page: the page to duplicate the mapping for - * @compound: the page is mapped as compound or as a small page - * @vma: the source vma + * folio_try_dup_anon_rmap_ptes - try duplicating PTE mappings of a page range + * of a folio + * @folio: The folio to duplicate the mappings of + * @page: The first page to duplicate the mappings of + * @nr_pages: The number of pages of which the mapping will be duplicated + * @src_vma: The vm area from which the mappings are duplicated * - * The caller needs to hold the PT lock and the vma->vma_mm->write_protect_seq. + * The page range of the folio is defined by [page, page + nr_pages) * - * Duplicating the mapping can only fail if the page may be pinned; device - * private pages cannot get pinned and consequently this function cannot fail. + * The caller needs to hold the page table lock and the + * vma->vma_mm->write_protect_seq. + * + * Duplicating the mappings can only fail if the folio may be pinned; device + * private folios cannot get pinned and consequently this function cannot fail + * for them. + * + * If duplicating the mappings succeeded, the duplicated PTEs have to be R/O in + * the parent and the child. They must *not* be writable after this call + * succeeded. + * + * Returns 0 if duplicating the mappings succeeded. Returns -EBUSY otherwise. + */ +static inline int folio_try_dup_anon_rmap_ptes(struct folio *folio, + struct page *page, int nr_pages, struct vm_area_struct *src_vma) +{ + return __folio_try_dup_anon_rmap(folio, page, nr_pages, src_vma, + RMAP_LEVEL_PTE); +} +#define folio_try_dup_anon_rmap_pte(folio, page, vma) \ + folio_try_dup_anon_rmap_ptes(folio, page, 1, vma) + +/** + * folio_try_dup_anon_rmap_pmd - try duplicating a PMD mapping of a page range + * of a folio + * @folio: The folio to duplicate the mapping of + * @page: The first page to duplicate the mapping of + * @src_vma: The vm area from which the mapping is duplicated + * + * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) * - * If duplicating the mapping succeeds, the page has to be mapped R/O into - * the parent and the child. It must *not* get mapped writable after this call. + * The caller needs to hold the page table lock and the + * vma->vma_mm->write_protect_seq. + * + * Duplicating the mapping can only fail if the folio may be pinned; device + * private folios cannot get pinned and consequently this function cannot fail + * for them. + * + * If duplicating the mapping succeeds, the duplicated PMD has to be R/O in + * the parent and the child. They must *not* be writable after this call + * succeeded. * * Returns 0 if duplicating the mapping succeeded. Returns -EBUSY otherwise. */ +static inline int folio_try_dup_anon_rmap_pmd(struct folio *folio, + struct page *page, struct vm_area_struct *src_vma) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + return __folio_try_dup_anon_rmap(folio, page, HPAGE_PMD_NR, src_vma, + RMAP_LEVEL_PMD); +#else + WARN_ON_ONCE(true); + return -EBUSY; +#endif +} + static inline int page_try_dup_anon_rmap(struct page *page, bool compound, struct vm_area_struct *vma) { - VM_BUG_ON_PAGE(!PageAnon(page), page); - - /* - * No need to check+clear for already shared pages, including KSM - * pages. - */ - if (!PageAnonExclusive(page)) - goto dup; - - /* - * If this page may have been pinned by the parent process, - * don't allow to duplicate the mapping but instead require to e.g., - * copy the page immediately for the child so that we'll always - * guarantee the pinned page won't be randomly replaced in the - * future on write faults. - */ - if (likely(!is_device_private_page(page)) && - unlikely(page_needs_cow_for_dma(vma, page))) - return -EBUSY; + struct folio *folio = page_folio(page); - ClearPageAnonExclusive(page); - /* - * It's okay to share the anon page between both processes, mapping - * the page R/O into both processes. - */ -dup: - __page_dup_rmap(page, compound); - return 0; + if (likely(!compound)) + return folio_try_dup_anon_rmap_pte(folio, page, vma); + return folio_try_dup_anon_rmap_pmd(folio, page, vma); } /** -- Gitee From d7947afe3e62b651aa674bae26c4352fd45f31ec Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:59 +0100 Subject: [PATCH 1171/2138] mm/huge_memory: page_try_dup_anon_rmap() -> folio_try_dup_anon_rmap_pmd() ANBZ: #9728 commit 96c772c25c89f35091ce924117602d04de82a0fe upstream Let's convert copy_huge_pmd() and fixup the comment in copy_huge_pud(). While at it, perform more folio conversion in copy_huge_pmd(). Link: https://lkml.kernel.org/r/20231220224504.646757-36-david@redhat.com Signed-off-by: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Cc: Ryan Roberts Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- mm/huge_memory.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 8e21781faa1f..cf18dd979684 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1241,6 +1241,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, { spinlock_t *dst_ptl, *src_ptl; struct page *src_page; + struct folio *src_folio; pmd_t pmd; pgtable_t pgtable = NULL; int ret = -ENOMEM; @@ -1307,11 +1308,12 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, src_page = pmd_page(pmd); VM_BUG_ON_PAGE(!PageHead(src_page), src_page); + src_folio = page_folio(src_page); - get_page(src_page); - if (unlikely(page_try_dup_anon_rmap(src_page, true, src_vma))) { + folio_get(src_folio); + if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, src_vma))) { /* Page maybe pinned: split and retry the fault on PTEs. */ - put_page(src_page); + folio_put(src_folio); pte_free(dst_mm, pgtable); spin_unlock(src_ptl); spin_unlock(dst_ptl); @@ -1420,8 +1422,8 @@ int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, } /* - * TODO: once we support anonymous pages, use page_try_dup_anon_rmap() - * and split if duplicating fails. + * TODO: once we support anonymous pages, use + * folio_try_dup_anon_rmap_*() and split if duplicating fails. */ pudp_set_wrprotect(src_mm, addr, src_pud); pud = pud_mkold(pud_wrprotect(pud)); -- Gitee From 214f864fff560515e37f28f121417ed1339eb12a Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:45:00 +0100 Subject: [PATCH 1172/2138] mm/memory: page_try_dup_anon_rmap() -> folio_try_dup_anon_rmap_pte() ANBZ: #9728 commit 08e7795e2444c3df9292f4ac7092be6168166a46 upstream Let's convert copy_nonpresent_pte(). While at it, perform some more folio conversion. Link: https://lkml.kernel.org/r/20231220224504.646757-37-david@redhat.com Signed-off-by: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Cc: Ryan Roberts Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- mm/memory.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 547b9334ba03..1db774595415 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -772,6 +772,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, unsigned long vm_flags = dst_vma->vm_flags; pte_t orig_pte = ptep_get(src_pte); pte_t pte = orig_pte; + struct folio *folio; struct page *page; swp_entry_t entry = pte_to_swp_entry(orig_pte); @@ -816,6 +817,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, } } else if (is_device_private_entry(entry)) { page = pfn_swap_entry_to_page(entry); + folio = page_folio(page); /* * Update rss count even for unaddressable pages, as @@ -826,10 +828,10 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, * for unaddressable pages, at some point. But for now * keep things as they are. */ - get_page(page); + folio_get(folio); rss[mm_counter(page)]++; /* Cannot fail as these pages cannot get pinned. */ - BUG_ON(page_try_dup_anon_rmap(page, false, src_vma)); + folio_try_dup_anon_rmap_pte(folio, page, src_vma); /* * We do not preserve soft-dirty information, because so @@ -943,7 +945,7 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, * future. */ folio_get(folio); - if (unlikely(page_try_dup_anon_rmap(page, false, src_vma))) { + if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, src_vma))) { /* Page may be pinned, we have to copy. */ folio_put(folio); return copy_present_page(dst_vma, src_vma, dst_pte, src_pte, -- Gitee From 4280d19796cdb2c916e0b2d524833c09df06c1e1 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:45:01 +0100 Subject: [PATCH 1173/2138] mm/rmap: remove page_try_dup_anon_rmap() ANBZ: #9728 commit a13d096471ec0ac5c6fc90fbcd57e8430024046a upstream All users are gone, remove page_try_dup_anon_rmap() and any remaining traces. Link: https://lkml.kernel.org/r/20231220224504.646757-38-david@redhat.com Signed-off-by: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Cc: Ryan Roberts Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- include/linux/rmap.h | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index dfce532b55b7..3eaa20b14176 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -248,7 +248,7 @@ void hugetlb_add_anon_rmap(struct folio *, struct vm_area_struct *, void hugetlb_add_new_anon_rmap(struct folio *, struct vm_area_struct *, unsigned long address); -/* See page_try_dup_anon_rmap() */ +/* See folio_try_dup_anon_rmap_*() */ static inline int hugetlb_try_dup_anon_rmap(struct folio *folio, struct vm_area_struct *vma) { @@ -473,16 +473,6 @@ static inline int folio_try_dup_anon_rmap_pmd(struct folio *folio, #endif } -static inline int page_try_dup_anon_rmap(struct page *page, bool compound, - struct vm_area_struct *vma) -{ - struct folio *folio = page_folio(page); - - if (likely(!compound)) - return folio_try_dup_anon_rmap_pte(folio, page, vma); - return folio_try_dup_anon_rmap_pmd(folio, page, vma); -} - /** * page_try_share_anon_rmap - try marking an exclusive anonymous page possibly * shared to prepare for KSM or temporary unmapping @@ -491,8 +481,8 @@ static inline int page_try_dup_anon_rmap(struct page *page, bool compound, * The caller needs to hold the PT lock and has to have the page table entry * cleared/invalidated. * - * This is similar to page_try_dup_anon_rmap(), however, not used during fork() - * to duplicate a mapping, but instead to prepare for KSM or temporarily + * This is similar to folio_try_dup_anon_rmap_*(), however, not used during + * fork() to duplicate a mapping, but instead to prepare for KSM or temporarily * unmapping a page (swap, migration) via folio_remove_rmap_*(). * * Marking the page shared can only fail if the page may be pinned; device -- Gitee From e32410f6be1f494568b3608e120d55d6b59bd7cc Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:45:02 +0100 Subject: [PATCH 1174/2138] mm: convert page_try_share_anon_rmap() to folio_try_share_anon_rmap_[pte|pmd]() ANBZ: #9728 commit e3b4b1374f87c71e9309efc6149f113cdd17af72 upstream Let's convert it like we converted all the other rmap functions. Don't introduce folio_try_share_anon_rmap_ptes() for now, as we don't have a user that wants rmap batching in sight. Pretty easy to add later. All users are easy to convert -- only ksm.c doesn't use folios yet but that is left for future work -- so let's just do it in a single shot. While at it, turn the BUG_ON into a WARN_ON_ONCE. Note that page_try_share_anon_rmap() so far didn't care about pte/pmd mappings (no compound parameter). We're changing that so we can perform better sanity checks and make the code actually more readable/consistent. For example, __folio_rmap_sanity_checks() will make sure that a PMD range actually falls completely into the folio. Link: https://lkml.kernel.org/r/20231220224504.646757-39-david@redhat.com Signed-off-by: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Cc: Ryan Roberts Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- include/linux/rmap.h | 96 ++++++++++++++++++++++++++++++++------------ mm/huge_memory.c | 9 +++-- mm/internal.h | 4 +- mm/ksm.c | 5 ++- mm/migrate_device.c | 2 +- mm/rmap.c | 11 ++--- 6 files changed, 88 insertions(+), 39 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 3eaa20b14176..f684f2bf9153 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -264,7 +264,7 @@ static inline int hugetlb_try_dup_anon_rmap(struct folio *folio, return 0; } -/* See page_try_share_anon_rmap() */ +/* See folio_try_share_anon_rmap_*() */ static inline int hugetlb_try_share_anon_rmap(struct folio *folio) { VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); @@ -473,31 +473,15 @@ static inline int folio_try_dup_anon_rmap_pmd(struct folio *folio, #endif } -/** - * page_try_share_anon_rmap - try marking an exclusive anonymous page possibly - * shared to prepare for KSM or temporary unmapping - * @page: the exclusive anonymous page to try marking possibly shared - * - * The caller needs to hold the PT lock and has to have the page table entry - * cleared/invalidated. - * - * This is similar to folio_try_dup_anon_rmap_*(), however, not used during - * fork() to duplicate a mapping, but instead to prepare for KSM or temporarily - * unmapping a page (swap, migration) via folio_remove_rmap_*(). - * - * Marking the page shared can only fail if the page may be pinned; device - * private pages cannot get pinned and consequently this function cannot fail. - * - * Returns 0 if marking the page possibly shared succeeded. Returns -EBUSY - * otherwise. - */ -static inline int page_try_share_anon_rmap(struct page *page) +static __always_inline int __folio_try_share_anon_rmap(struct folio *folio, + struct page *page, int nr_pages, enum rmap_level level) { - VM_WARN_ON(folio_test_hugetlb(page_folio(page))); - VM_BUG_ON_PAGE(!PageAnon(page) || !PageAnonExclusive(page), page); + VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); + VM_WARN_ON_FOLIO(!PageAnonExclusive(page), folio); + __folio_rmap_sanity_checks(folio, page, nr_pages, level); - /* device private pages cannot get pinned via GUP. */ - if (unlikely(is_device_private_page(page))) { + /* device private folios cannot get pinned via GUP. */ + if (unlikely(folio_is_device_private(folio))) { ClearPageAnonExclusive(page); return 0; } @@ -548,7 +532,7 @@ static inline int page_try_share_anon_rmap(struct page *page) if (IS_ENABLED(CONFIG_HAVE_FAST_GUP)) smp_mb(); - if (unlikely(page_maybe_dma_pinned(page))) + if (unlikely(folio_maybe_dma_pinned(folio))) return -EBUSY; ClearPageAnonExclusive(page); @@ -561,6 +545,68 @@ static inline int page_try_share_anon_rmap(struct page *page) return 0; } +/** + * folio_try_share_anon_rmap_pte - try marking an exclusive anonymous page + * mapped by a PTE possibly shared to prepare + * for KSM or temporary unmapping + * @folio: The folio to share a mapping of + * @page: The mapped exclusive page + * + * The caller needs to hold the page table lock and has to have the page table + * entries cleared/invalidated. + * + * This is similar to folio_try_dup_anon_rmap_pte(), however, not used during + * fork() to duplicate mappings, but instead to prepare for KSM or temporarily + * unmapping parts of a folio (swap, migration) via folio_remove_rmap_pte(). + * + * Marking the mapped page shared can only fail if the folio maybe pinned; + * device private folios cannot get pinned and consequently this function cannot + * fail. + * + * Returns 0 if marking the mapped page possibly shared succeeded. Returns + * -EBUSY otherwise. + */ +static inline int folio_try_share_anon_rmap_pte(struct folio *folio, + struct page *page) +{ + return __folio_try_share_anon_rmap(folio, page, 1, RMAP_LEVEL_PTE); +} + +/** + * folio_try_share_anon_rmap_pmd - try marking an exclusive anonymous page + * range mapped by a PMD possibly shared to + * prepare for temporary unmapping + * @folio: The folio to share the mapping of + * @page: The first page to share the mapping of + * + * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) + * + * The caller needs to hold the page table lock and has to have the page table + * entries cleared/invalidated. + * + * This is similar to folio_try_dup_anon_rmap_pmd(), however, not used during + * fork() to duplicate a mapping, but instead to prepare for temporarily + * unmapping parts of a folio (swap, migration) via folio_remove_rmap_pmd(). + * + * Marking the mapped pages shared can only fail if the folio maybe pinned; + * device private folios cannot get pinned and consequently this function cannot + * fail. + * + * Returns 0 if marking the mapped pages possibly shared succeeded. Returns + * -EBUSY otherwise. + */ +static inline int folio_try_share_anon_rmap_pmd(struct folio *folio, + struct page *page) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + return __folio_try_share_anon_rmap(folio, page, HPAGE_PMD_NR, + RMAP_LEVEL_PMD); +#else + WARN_ON_ONCE(true); + return -EBUSY; +#endif +} + /* * Called from mm/vmscan.c to handle paging out */ diff --git a/mm/huge_memory.c b/mm/huge_memory.c index cf18dd979684..b0c4d3416819 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2368,10 +2368,11 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, * In case we cannot clear PageAnonExclusive(), split the PMD * only and let try_to_migrate_one() fail later. * - * See page_try_share_anon_rmap(): invalidate PMD first. + * See folio_try_share_anon_rmap_pmd(): invalidate PMD first. */ anon_exclusive = PageAnonExclusive(page); - if (freeze && anon_exclusive && page_try_share_anon_rmap(page)) + if (freeze && anon_exclusive && + folio_try_share_anon_rmap_pmd(folio, page)) freeze = false; if (!freeze) { rmap_t rmap_flags = RMAP_NONE; @@ -3420,9 +3421,9 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, flush_cache_range(vma, address, address + HPAGE_PMD_SIZE); pmdval = pmdp_invalidate(vma, address, pvmw->pmd); - /* See page_try_share_anon_rmap(): invalidate PMD first. */ + /* See folio_try_share_anon_rmap_pmd(): invalidate PMD first. */ anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page); - if (anon_exclusive && page_try_share_anon_rmap(page)) { + if (anon_exclusive && folio_try_share_anon_rmap_pmd(folio, page)) { set_pmd_at(mm, address, pvmw->pmd, pmdval); return -EBUSY; } diff --git a/mm/internal.h b/mm/internal.h index a819129ae29c..d96ae6f30840 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1103,7 +1103,7 @@ enum { * * Ordinary GUP: Using the PT lock * * GUP-fast and fork(): mm->write_protect_seq * * GUP-fast and KSM or temporary unmapping (swap, migration): see - * page_try_share_anon_rmap() + * folio_try_share_anon_rmap_*() * * Must be called with the (sub)page that's actually referenced via the * page table entry, which might not necessarily be the head page for a @@ -1146,7 +1146,7 @@ static inline bool gup_must_unshare(struct vm_area_struct *vma, return is_cow_mapping(vma->vm_flags); } - /* Paired with a memory barrier in page_try_share_anon_rmap(). */ + /* Paired with a memory barrier in folio_try_share_anon_rmap_*(). */ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP)) smp_rmb(); diff --git a/mm/ksm.c b/mm/ksm.c index 558eb2153f10..9aafdc73efa2 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -1148,8 +1148,9 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, goto out_unlock; } - /* See page_try_share_anon_rmap(): clear PTE first. */ - if (anon_exclusive && page_try_share_anon_rmap(page)) { + /* See folio_try_share_anon_rmap_pte(): clear PTE first. */ + if (anon_exclusive && + folio_try_share_anon_rmap_pte(page_folio(page), page)) { set_pte_at(mm, pvmw.address, pvmw.pte, entry); goto out_unlock; } diff --git a/mm/migrate_device.c b/mm/migrate_device.c index 39b7754480c6..b6c27c76e1a0 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -202,7 +202,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, if (anon_exclusive) { pte = ptep_clear_flush(vma, addr, ptep); - if (page_try_share_anon_rmap(page)) { + if (folio_try_share_anon_rmap_pte(folio, page)) { set_pte_at(mm, addr, ptep, pte); folio_unlock(folio); folio_put(folio); diff --git a/mm/rmap.c b/mm/rmap.c index 7ab943df34d2..365ac18d8c93 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1836,9 +1836,9 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, break; } - /* See page_try_share_anon_rmap(): clear PTE first. */ + /* See folio_try_share_anon_rmap(): clear PTE first. */ if (anon_exclusive && - page_try_share_anon_rmap(subpage)) { + folio_try_share_anon_rmap_pte(folio, subpage)) { swap_free(entry); set_pte_at(mm, address, pvmw.pte, pteval); ret = false; @@ -2112,7 +2112,8 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, pte_t swp_pte; if (anon_exclusive) - BUG_ON(page_try_share_anon_rmap(subpage)); + WARN_ON_ONCE(folio_try_share_anon_rmap_pte(folio, + subpage)); /* * Store the pfn of the page in a special migration @@ -2183,7 +2184,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) && !anon_exclusive, subpage); - /* See page_try_share_anon_rmap(): clear PTE first. */ + /* See folio_try_share_anon_rmap_pte(): clear PTE first. */ if (folio_test_hugetlb(folio)) { if (anon_exclusive && hugetlb_try_share_anon_rmap(folio)) { @@ -2194,7 +2195,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, break; } } else if (anon_exclusive && - page_try_share_anon_rmap(subpage)) { + folio_try_share_anon_rmap_pte(folio, subpage)) { set_pte_at(mm, address, pvmw.pte, pteval); ret = false; page_vma_mapped_walk_done(&pvmw); -- Gitee From dc50b79b3a06a0fb3e4da8eebdf41913256379d5 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:45:03 +0100 Subject: [PATCH 1175/2138] mm/rmap: rename COMPOUND_MAPPED to ENTIRELY_MAPPED ANBZ: #9728 commit e78a13fd16bb9d9712f61be2bd6612a092ce66ea upstream We removed all "bool compound" and RMAP_COMPOUND parameters. Let's remove the remaining "compound" terminology by making COMPOUND_MAPPED match the "folio->_entire_mapcount" terminology, renaming it to ENTIRELY_MAPPED. ENTIRELY_MAPPED is only used when the whole folio is mapped using a single page table entry (e.g., a single PMD mapping a PMD-sized THP). For now, we don't support mapping any THP bigger than that, so ENTIRELY_MAPPED only applies to PMD-mapped PMD-sized THP only. Link: https://lkml.kernel.org/r/20231220224504.646757-40-david@redhat.com Signed-off-by: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Cc: Ryan Roberts Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- Documentation/mm/transhuge.rst | 2 +- mm/internal.h | 6 +++--- mm/rmap.c | 18 +++++++++--------- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/Documentation/mm/transhuge.rst b/Documentation/mm/transhuge.rst index cf81272a6b8b..93c9239b9ebe 100644 --- a/Documentation/mm/transhuge.rst +++ b/Documentation/mm/transhuge.rst @@ -117,7 +117,7 @@ pages: - map/unmap of a PMD entry for the whole THP increment/decrement folio->_entire_mapcount and also increment/decrement - folio->_nr_pages_mapped by COMPOUND_MAPPED when _entire_mapcount + folio->_nr_pages_mapped by ENTIRELY_MAPPED when _entire_mapcount goes from -1 to 0 or 0 to -1. - map/unmap of individual pages with PTE entry increment/decrement diff --git a/mm/internal.h b/mm/internal.h index d96ae6f30840..149cdf9973e9 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -54,12 +54,12 @@ void page_writeback_init(void); /* * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages, - * its nr_pages_mapped would be 0x400000: choose the COMPOUND_MAPPED bit + * its nr_pages_mapped would be 0x400000: choose the ENTIRELY_MAPPED bit * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE). Hugetlb currently * leaves nr_pages_mapped at 0, but avoid surprise if it participates later. */ -#define COMPOUND_MAPPED 0x800000 -#define FOLIO_PAGES_MAPPED (COMPOUND_MAPPED - 1) +#define ENTIRELY_MAPPED 0x800000 +#define FOLIO_PAGES_MAPPED (ENTIRELY_MAPPED - 1) /* * Flags passed to __show_mem() and show_free_areas() to suppress output in diff --git a/mm/rmap.c b/mm/rmap.c index 365ac18d8c93..0066ae501a42 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1142,7 +1142,7 @@ static __always_inline unsigned int __folio_add_rmap(struct folio *folio, first = atomic_inc_and_test(&page->_mapcount); if (first && folio_test_large(folio)) { first = atomic_inc_return_relaxed(mapped); - first = (first < COMPOUND_MAPPED); + first = (first < ENTIRELY_MAPPED); } if (first) @@ -1152,15 +1152,15 @@ static __always_inline unsigned int __folio_add_rmap(struct folio *folio, case RMAP_LEVEL_PMD: first = atomic_inc_and_test(&folio->_entire_mapcount); if (first) { - nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped); - if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) { + nr = atomic_add_return_relaxed(ENTIRELY_MAPPED, mapped); + if (likely(nr < ENTIRELY_MAPPED + ENTIRELY_MAPPED)) { *nr_pmdmapped = folio_nr_pages(folio); nr = *nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); /* Raced ahead of a remove and another add? */ if (unlikely(nr < 0)) nr = 0; } else { - /* Raced ahead of a remove of COMPOUND_MAPPED */ + /* Raced ahead of a remove of ENTIRELY_MAPPED */ nr = 0; } } @@ -1403,7 +1403,7 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, } else { /* increment count (starts at -1) */ atomic_set(&folio->_entire_mapcount, 0); - atomic_set(&folio->_nr_pages_mapped, COMPOUND_MAPPED); + atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED); SetPageAnonExclusive(&folio->page); __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr); } @@ -1484,7 +1484,7 @@ static __always_inline void __folio_remove_rmap(struct folio *folio, last = atomic_add_negative(-1, &page->_mapcount); if (last && folio_test_large(folio)) { last = atomic_dec_return_relaxed(mapped); - last = (last < COMPOUND_MAPPED); + last = (last < ENTIRELY_MAPPED); } if (last) @@ -1494,15 +1494,15 @@ static __always_inline void __folio_remove_rmap(struct folio *folio, case RMAP_LEVEL_PMD: last = atomic_add_negative(-1, &folio->_entire_mapcount); if (last) { - nr = atomic_sub_return_relaxed(COMPOUND_MAPPED, mapped); - if (likely(nr < COMPOUND_MAPPED)) { + nr = atomic_sub_return_relaxed(ENTIRELY_MAPPED, mapped); + if (likely(nr < ENTIRELY_MAPPED)) { nr_pmdmapped = folio_nr_pages(folio); nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED); /* Raced ahead of another remove and an add? */ if (unlikely(nr < 0)) nr = 0; } else { - /* An add of COMPOUND_MAPPED raced ahead */ + /* An add of ENTIRELY_MAPPED raced ahead */ nr = 0; } } -- Gitee From fdc7476d7d80c900b5f7b481c6649278d2f0ab54 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:45:04 +0100 Subject: [PATCH 1176/2138] mm: remove one last reference to page_add_*_rmap() ANBZ: #9728 commit 4a8ffab02db55c8a70063c57519cadf72d480ed4 upstream Let's fixup one remaining comment. Note that the only trace remaining of the old rmap interface is in an example in Documentation/trace/ftrace.rst, that we'll just leave alone. Link: https://lkml.kernel.org/r/20231220224504.646757-41-david@redhat.com Signed-off-by: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Cc: Ryan Roberts Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- mm/internal.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/internal.h b/mm/internal.h index 149cdf9973e9..d971c8e67738 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -710,7 +710,7 @@ folio_within_vma(struct folio *folio, struct vm_area_struct *vma) * should be called with vma's mmap_lock held for read or write, * under page table lock for the pte/pmd being added or removed. * - * mlock is usually called at the end of page_add_*_rmap(), munlock at + * mlock is usually called at the end of folio_add_*_rmap_*(), munlock at * the end of folio_remove_rmap_*(); but new anon folios are managed by * folio_add_lru_vma() calling mlock_new_folio(). */ -- Gitee From 9003a6cc144bbf391c8c6de0b85832b5ae437dad Mon Sep 17 00:00:00 2001 From: Andrew Bresticker Date: Tue, 11 Jun 2024 08:32:16 -0700 Subject: [PATCH 1177/2138] mm/memory: don't require head page for do_set_pmd() ANBZ: #9728 commit ab1ffc86cb5bec1c92387b9811d9036512f8f4eb upstream The requirement that the head page be passed to do_set_pmd() was added in commit ef37b2ea08ac ("mm/memory: page_add_file_rmap() -> folio_add_file_rmap_[pte|pmd]()") and prevents pmd-mapping in the finish_fault() and filemap_map_pages() paths if the page to be inserted is anything but the head page for an otherwise suitable vma and pmd-sized page. Matthew said: : We're going to stop using PMDs to map large folios unless the fault is : within the first 4KiB of the PMD. No idea how many workloads that : affects, but it only needs to be backported as far as v6.8, so we may : as well backport it. Link: https://lkml.kernel.org/r/20240611153216.2794513-1-abrestic@rivosinc.com Fixes: ef37b2ea08ac ("mm/memory: page_add_file_rmap() -> folio_add_file_rmap_[pte|pmd]()") Signed-off-by: Andrew Bresticker Acked-by: David Hildenbrand Acked-by: Hugh Dickins Cc: Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- mm/memory.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mm/memory.c b/mm/memory.c index 1db774595415..6fa4f68ae2b9 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4584,8 +4584,9 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) if (!thp_vma_suitable_order(vma, haddr, PMD_ORDER)) return ret; - if (page != &folio->page || folio_order(folio) != HPAGE_PMD_ORDER) + if (folio_order(folio) != HPAGE_PMD_ORDER) return ret; + page = &folio->page; /* * Just backoff if any subpage of a THP is corrupted otherwise -- Gitee From e8c44ae38dd8b1f629bd901d61dbe49c3b59f41e Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 5 Jan 2024 16:57:29 +0100 Subject: [PATCH 1178/2138] mm/rmap: silence VM_WARN_ON_FOLIO() in __folio_rmap_sanity_checks() ANBZ: #9728 commit 9c5938694cd0e9e00bdfb7e60900673263daf4d5 upstream Unfortunately, vm_insert_page() and friends and up passing driver-allocated folios into folio_add_file_rmap_pte() using insert_page_into_pte_locked(). While these driver-allocated folios can be compound pages (large folios), they are not proper "rmappable" folios. In these VM_MIXEDMAP VMAs, there isn't really the concept of a reverse mapping, so long-term, we should clean that up and not call into rmap code. For the time being, document how we can end up in rmap code with large folios that are not marked rmappable. Link: https://lkml.kernel.org/r/793c5cee-d5fc-4eb1-86a2-39e05686233d@redhat.com Fixes: 68f0320824fa ("mm/rmap: convert folio_add_file_rmap_range() into folio_add_file_rmap_[pte|ptes|pmd]()") Reported-by: syzbot+50ef73537bbc393a25bb@syzkaller.appspotmail.com Closes: https://lkml.kernel.org/r/000000000000014174060e09316e@google.com Signed-off-by: David Hildenbrand Cc: Matthew Wilcox (Oracle) Cc: Ryan Roberts Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- include/linux/rmap.h | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index f684f2bf9153..3e093c29021a 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -194,8 +194,15 @@ static inline void __folio_rmap_sanity_checks(struct folio *folio, { /* hugetlb folios are handled separately. */ VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); - VM_WARN_ON_FOLIO(folio_test_large(folio) && - !folio_test_large_rmappable(folio), folio); + + /* + * TODO: we get driver-allocated folios that have nothing to do with + * the rmap using vm_insert_page(); therefore, we cannot assume that + * folio_test_large_rmappable() holds for large folios. We should + * handle any desired mapcount+stats accounting for these folios in + * VM_MIXEDMAP VMAs separately, and then sanity-check here that + * we really only get rmappable folios. + */ VM_WARN_ON_ONCE(nr_pages <= 0); VM_WARN_ON_FOLIO(page_folio(page) != folio, folio); -- Gitee From 94683dda3ac0d11c1a45f66a437f888907bcaaea Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 22 Jan 2024 18:54:07 +0100 Subject: [PATCH 1179/2138] mm/huge_memory: fix folio_set_dirty() vs. folio_mark_dirty() ANBZ: #9728 commit db44c658f798ad907219f15e033229b8d1aadb93 upstream The correct folio replacement for "set_page_dirty()" is "folio_mark_dirty()", not "folio_set_dirty()". Using the latter won't properly inform the FS using the dirty_folio() callback. This has been found by code inspection, but likely this can result in some real trouble. Link: https://lkml.kernel.org/r/20240122175407.307992-1-david@redhat.com Fixes: a8e61d584eda0 ("mm/huge_memory: page_remove_rmap() -> folio_remove_rmap_pmd()") Signed-off-by: David Hildenbrand Cc: Ryan Roberts Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- mm/huge_memory.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index b0c4d3416819..d2246d61c035 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2279,7 +2279,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, page = pmd_page(old_pmd); folio = page_folio(page); if (!folio_test_dirty(folio) && pmd_dirty(old_pmd)) - folio_set_dirty(folio); + folio_mark_dirty(folio); if (!folio_test_referenced(folio) && pmd_young(old_pmd)) folio_set_referenced(folio); folio_remove_rmap_pmd(folio, page, vma); @@ -3429,7 +3429,7 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, } if (pmd_dirty(pmdval)) - folio_set_dirty(folio); + folio_mark_dirty(folio); if (pmd_write(pmdval)) entry = make_writable_migration_entry(page_to_pfn(page)); else if (anon_exclusive) -- Gitee From e112aa712c813beed5ac47a26301ebd9dc59d959 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 22 Jan 2024 18:17:51 +0100 Subject: [PATCH 1180/2138] mm/memory: fix folio_set_dirty() vs. folio_mark_dirty() in zap_pte_range() ANBZ: #9728 commit e4e3df290f65da6cb27dac1309389c916f27db1a upstream The correct folio replacement for "set_page_dirty()" is "folio_mark_dirty()", not "folio_set_dirty()". Using the latter won't properly inform the FS using the dirty_folio() callback. This has been found by code inspection, but likely this can result in some real trouble when zapping dirty PTEs that point at clean pagecache folios. Yuezhang Mo said: "Without this fix, testing the latest exfat with xfstests, test cases generic/029 and generic/030 will fail." Link: https://lkml.kernel.org/r/20240122171751.272074-1-david@redhat.com Fixes: c46265030b0f ("mm/memory: page_remove_rmap() -> folio_remove_rmap_pte()") Signed-off-by: David Hildenbrand Reported-by: Ryan Roberts Closes: https://lkml.kernel.org/r/2445cedb-61fb-422c-8bfb-caf0a2beed62@arm.com Reviewed-by: Ryan Roberts Cc: Matthew Wilcox (Oracle) Reviewed-by: Yuezhang Mo Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3749 --- mm/memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/memory.c b/mm/memory.c index 6fa4f68ae2b9..3d5aeb9213e4 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1453,7 +1453,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, delay_rmap = 0; if (!folio_test_anon(folio)) { if (pte_dirty(ptent)) { - folio_set_dirty(folio); + folio_mark_dirty(folio); if (tlb_delay_rmap(tlb)) { delay_rmap = 1; force_flush = 1; -- Gitee From 3a0ee5d144d190df2a2ef2e40ccf6a7767a748a0 Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Wed, 21 Aug 2024 14:09:08 +0800 Subject: [PATCH 1181/2138] anolis: configs: add kconfigs baseline framework ANBZ: #8678 This commit add kconfig baseline related documentations and scripts. These documentations and scripts are copied from devel-5.10, based on commit b44fa71e0b7e9 ("anolis: configs: adjust the collapse rules"). Signed-off-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/3750 --- anolis/Makefile | 7 +- anolis/configs/How-To-Modify-Kconfig.zh.md | 131 ++++ anolis/configs/Makefile | 47 ++ anolis/configs/Makefile.ANCK | 3 + anolis/configs/Makefile.configs | 11 + anolis/configs/README.zh.md | 143 ++++ anolis/configs/scripts/anolis_kconfig.py | 851 +++++++++++++++++++++ anolis/configs/scripts/export_configs.sh | 29 + anolis/configs/scripts/generate_configs.sh | 38 + anolis/configs/scripts/kconfig_import | 15 + anolis/configs/scripts/kconfig_layout | 8 + anolis/configs/scripts/modify_config.sh | 133 ++++ anolis/configs/scripts/move_configs.sh | 48 ++ anolis/configs/scripts/update_configs.sh | 113 +++ 14 files changed, 1575 insertions(+), 2 deletions(-) create mode 100644 anolis/configs/How-To-Modify-Kconfig.zh.md create mode 100644 anolis/configs/Makefile create mode 100644 anolis/configs/Makefile.ANCK create mode 100644 anolis/configs/Makefile.configs create mode 100644 anolis/configs/README.zh.md create mode 100644 anolis/configs/scripts/anolis_kconfig.py create mode 100644 anolis/configs/scripts/export_configs.sh create mode 100644 anolis/configs/scripts/generate_configs.sh create mode 100644 anolis/configs/scripts/kconfig_import create mode 100644 anolis/configs/scripts/kconfig_layout create mode 100644 anolis/configs/scripts/modify_config.sh create mode 100644 anolis/configs/scripts/move_configs.sh create mode 100644 anolis/configs/scripts/update_configs.sh diff --git a/anolis/Makefile b/anolis/Makefile index 8c650c547ddd..bcd2c8039879 100644 --- a/anolis/Makefile +++ b/anolis/Makefile @@ -26,8 +26,10 @@ dist-genrpmtree: dist-check dist-rpms: dist-genrpmtree dist-check sh buildpkg.sh -dist-configs-check: - sh configs/examination/configs-check.sh +DIST_CONFIG_TARGETS := dist-defconfig dist-debug-defconfig dist-configs dist-configs-update dist-configs-move dist-configs-import dist-configs-export dist-configs-modify dist-configs-check dist-configs-help + +$(DIST_CONFIG_TARGETS): + make -C configs/ $@ clean: rm -rf $(DIST_OUTPUT) @@ -86,5 +88,6 @@ help: @echo ' DIST_BUILD_NUMBER - the build number for unofficial build, eg: 1/2' @echo ' DIST_DIY - the kernel version for diy build' @echo ' DIST_BUILD_VARIANT & DIST_BUILD_EXTRA - see comments in buildpkg.sh' + @make -C configs/ dist-configs-help export \ No newline at end of file diff --git a/anolis/configs/How-To-Modify-Kconfig.zh.md b/anolis/configs/How-To-Modify-Kconfig.zh.md new file mode 100644 index 000000000000..438bf9305443 --- /dev/null +++ b/anolis/configs/How-To-Modify-Kconfig.zh.md @@ -0,0 +1,131 @@ +本文示例如何修改和新增一个 kconfig 的配置。 + +# 一、 总体方法 +总的来说,您需要以下几步: + +1. 进入到 `anolis/` 目录: + +`cd anolis/` + +2. 修改/新增 kconfig + +假设要在所有架构中将 CONFIG_foo 都只为y,使用该命令: + +`make dist-configs-modify C=CONFIG_foo all=y L=L1` + +如果只在 x86 架构中将其置为 y,而在其他架构中保持关闭,使用该命令: + +`make dist-configs-modify C=CONFIG_foo x86=y others=n L=L1` + +这个命令执行以下动作: + +a. 查找是否已存在现有的 CONFIG_CAN 的配置项,若有,则删除。 + +b. 根据传入的参数,重新生成 CONFIG_CAN 的配置项。 + +c. 根据新的配置关系,重新计算和刷新 kconfig 的依赖关系 + +在使用时,请注意以下几点: + +a. 使用 `C=CONFIG_foo` 的方式来传递 kconfig 名称,而非使用这样的方式: + +`make dist-configs-modify CONFIG_CAN x86=y others=n L=L1` + +这是由 make 命令的语法所限制的。 + +b. 在传递 kconfig 的配置信息时,请注意必须传递层级信息,即 `L=xx` + +3. 确认结果 + +kconfig 的依赖关系是相当复杂的,因此在对单个 kconfig 调整后,可能会出现依赖条件不满足而导致该 kconfig 实际并未开启的情况。 +因此,`make dist-configs-modify`命令会重新计算依赖关系,这可能导致: + +a. 生成一系列新的 kconfig。 + +这些都是由 CONFIG_foo 通过 `select` 或者 `depends on` 关系自动使能的。 +这类新生成的 kconfig,需要人工调整它们到对应的层级(使用`make dist-configs-move`命令)。 + +b. 对特定 kconfig 的修改并未生效。 + +假设 CONFIG_foo 依赖于 CONFIG_bar,而 CONFIG_bar 之前并未打开,那么在重新计算依赖关系后,CONFIG_foo 依然会处于 `not set` 的状态,甚至是 `invisible` 状态(即在最终结果中看不到关于 CONFIG_foo 的任何配置项)。 + +这种情况,需要先定位依赖的 CONFIG_bar,并递归使用 `make dist-configs-modify` 修改 CONFIG_bar 及其依赖的 kconfig,最后再对 CONFIG_foo 进行修改。 + +具体定位的方法,推荐如下: + +1. 生成最终的 .config 文件。 +`cd /path/to/cloud-kernel/; make anolis_defconfig` + +2. 执行 `make menuconfig` 命令 + +3. 在具体的 tui 界面中,搜索 CONFIG_foo,并通过搜索结果查看对应的依赖关系。 + +# 二、 示例 +我们以使能 `CONFIG_CAN` 为例。 +# 1. 修改 kconfig +``` +cd anolis/; +make dist-configs-modify C=CONFIG_CAN all=y L=L1 +``` +这里,我们将 `CONFIG_CAN` 在所有架构中都打开,且将其层级置为 L1。 + +# 2. 检查结果 + +在调整后,自动使能了一大堆kconfig,我们需要对这些新的 kconfig 调整层级。 +``` +$make dist-configs-modify C=CONFIG_CAN all=y L=L1 +make -C configs/ dist-configs-modify +make[1]: Entering directory '/cloud-kernel/anolis/configs' +remove old file: /cloud-kernel/anolis/configs/L2-OPTIONAL/default/CONFIG_CAN +created new file: /cloud-kernel/anolis/configs/L1-RECOMMEND/default/CONFIG_CAN +refresh configs +collect all old configs... +* generated file: /cloud-kernel/anolis/output/kernel-ANCK-generic-x86.config +* processed file: /cloud-kernel/anolis/output/kernel-ANCK-generic-x86.config +* generated file: /cloud-kernel/anolis/output/kernel-ANCK-debug-x86.config +* processed file: /cloud-kernel/anolis/output/kernel-ANCK-debug-x86.config +* generated file: /cloud-kernel/anolis/output/kernel-ANCK-generic-arm64.config +* processed file: /cloud-kernel/anolis/output/kernel-ANCK-generic-arm64.config +* generated file: /cloud-kernel/anolis/output/kernel-ANCK-debug-arm64.config +* processed file: /cloud-kernel/anolis/output/kernel-ANCK-debug-arm64.config +split new configs... +replace old configs with new configs.... + +****************************************************************************** +There are some UNKNOWN level's new configs. + +CONFIG_CAN_8DEV_USB CONFIG_CAN_DEBUG_DEVICES CONFIG_CAN_GRCAN CONFIG_CAN_ISOTP CONFIG_CAN_MCBA_USB CONFIG_CAN_PHYTIUM CONFIG_CAN_UCAN +CONFIG_CAN_BCM CONFIG_CAN_DEV CONFIG_CAN_GS_USB CONFIG_CAN_J1939 CONFIG_CAN_MCP251X CONFIG_CAN_RAW CONFIG_CAN_VCAN +CONFIG_CAN_CALC_BITTIMING CONFIG_CAN_EMS_USB CONFIG_CAN_GW CONFIG_CAN_KVASER_PCIEFD CONFIG_CAN_MCP251XFD CONFIG_CAN_SJA1000 CONFIG_CAN_VXCAN +CONFIG_CAN_CC770 CONFIG_CAN_ESD_USB2 CONFIG_CAN_HI311X CONFIG_CAN_KVASER_USB CONFIG_CAN_PEAK_PCIEFD CONFIG_CAN_SLCAN CONFIG_CAN_XILINXCAN +CONFIG_CAN_C_CAN CONFIG_CAN_FLEXCAN CONFIG_CAN_IFI_CANFD CONFIG_CAN_M_CAN CONFIG_CAN_PEAK_USB CONFIG_CAN_SOFTING CONFIG_NET_EMATCH_CANID + +Need to classify above configs manually !!! +See: /cloud-kernel/anolis/configs/UNKNOWN +HINT: `make dist-configs-move` can help you. +eg: make dist-configs-move C=CONFIG_CAN* L=L2 + +****************************************************************************** + +The Final Configs After Refresh +default: CONFIG_CAN=y + +****************************************************************************** +make[1]: Leaving directory '/cloud-kernel/anolis/configs' +``` +可以看到,大量与 CONFIG_CAN 有关的 kconfig 被刷新出来了,但是 `CONFIG_CAN` 的结果是符合预期的。 + +这里,我们将这些自动生效的 kconfig,都放入 L2 层级中。 +``` +make dist-configs-move C=CONFIG_CAN* L=L2 +``` + +# 结束 +到这里为止,所以的步骤已完成,可以使用 `git add` 和 `git commit` 命令记录这些变更,并发起 PR 了。 + +# 附:`make dist-configs-move`参数说明 +`make dist-configs-move` 用于在不同的层级之间移动 kconfig。 +参数如下: +- OLD 可选,表示 kconfig 原来所在的层级。默认为 UNKNOWN +- C 必选,表示需要移动的 kconfig,可使用通配符,如 `C=CONFIG_CAN*` +- L 必选,表示新的层级。 \ No newline at end of file diff --git a/anolis/configs/Makefile b/anolis/configs/Makefile new file mode 100644 index 000000000000..797fdec6a21a --- /dev/null +++ b/anolis/configs/Makefile @@ -0,0 +1,47 @@ +include Makefile.configs + +ifeq ($(MAKELEVEL),0) + $(error The config related target cannot be executed directly from the shell.) +endif + +dist-defconfig: + @sh scripts/generate_configs.sh generic-${DIST_ARCH} + +dist-debug-defconfig: + @sh scripts/generate_configs.sh debug-${DIST_ARCH} + +dist-configs: + @sh scripts/generate_configs.sh + +dist-configs-update: + @sh scripts/update_configs.sh + +dist-configs-move: + @sh scripts/move_configs.sh + +dist-configs-import: + @DO_IMPORT_CONFIGS=Y sh scripts/update_configs.sh + +dist-configs-export: + @sh scripts/export_configs.sh + +dist-configs-check: + @sh examination/configs-check.sh + +dist-configs-modify: + @sh scripts/modify_config.sh + +dist-configs-help: + @echo '' + @echo '--------------------------------' + @echo 'Generate and update kernel configs' + @echo ' dist-defconfig: - generate anolis default config file of current ARCH, output to $${DIST_OUTPUT}' + @echo ' dist-debug-defconfig: - generate anolis debug default config file of current ARCH, output to $${DIST_OUTPUT}' + @echo ' dist-configs: - generate anolis config files to all arch, output to $${DIST_OUTPUT}' + @echo ' dist-configs-update: - refresh configs' + @echo ' dist-configs-modify: - modify the specified kconfig and refresh configs' + @echo ' dist-configs-move: - adjust the level of kconfig' + @echo ' dist-configs-import: - import legacy kconfig files, and split them into independent small files' + @echo ' dist-configs-export: - export all configs into a xlsx file' + +export diff --git a/anolis/configs/Makefile.ANCK b/anolis/configs/Makefile.ANCK new file mode 100644 index 000000000000..07a1ca0a88e3 --- /dev/null +++ b/anolis/configs/Makefile.ANCK @@ -0,0 +1,3 @@ +DIST_CONFIG_LAYOUTS = anolis/configs/scripts/kconfig_layout +DIST_CONFIG_ACTIONS_REFRESH = anolis/configs/scripts/kconfig_import +DIST_CONFIG_ACTIONS_IMPORTS = anolis/configs/scripts/kconfig_import \ No newline at end of file diff --git a/anolis/configs/Makefile.configs b/anolis/configs/Makefile.configs new file mode 100644 index 000000000000..38b25c1332e1 --- /dev/null +++ b/anolis/configs/Makefile.configs @@ -0,0 +1,11 @@ +# This file is for kernel kconfig baseline + +# the config levels, do not touch it, please +DIST_LEVELS= L0-MANDATORY L1-RECOMMEND L2-OPTIONAL UNKNOWN + +# the default kernel name, the ANCK downstream distributions could override it +# eg: +# DIST_CONFIG_KERNEL_NAME ?= FOO +DIST_CONFIG_KERNEL_NAME ?= ANCK + +include Makefile.$(DIST_CONFIG_KERNEL_NAME) diff --git a/anolis/configs/README.zh.md b/anolis/configs/README.zh.md new file mode 100644 index 000000000000..265c05ce3baf --- /dev/null +++ b/anolis/configs/README.zh.md @@ -0,0 +1,143 @@ +# ANCK config说明 +# 为什么会有 ANCK kconfig 基线 +## 管理较乱 +原来的 ANCK kconfig 管理较乱,出现了许多问题。比如: + +1. 以为打开了,实际没打开的情况 +比如 [CONFIG_NUMA_AWARE_SPINLOCKS](https://gitee.com/anolis/cloud-kernel/pulls/535) 和 [CONFIG_CK_KABI_SIZE_ALIGN_CHECKS](https://gitee.com/anolis/cloud-kernel/pulls/1627),虽然修改了 anolis_defconfig 文件,但是由于依赖不满足,实际上未成功开启。 + +2. 在 Kconfig 中新增了 kconfig ,但是没有更新 anolis_defconfig 文件 +比如 [CONFIG_VTOA](https://gitee.com/anolis/cloud-kernel/pulls/1749) 和 [CONFIG_SCHED_ACPU](https://gitee.com/anolis/cloud-kernel/pulls/2260) + +3. kconfig依赖错误 +比如 [CONFIG_YITIAN_CPER_RAWDATA](https://gitee.com/anolis/cloud-kernel/pulls/2046),仅与 arm64 arch 相关,但出现在了 x86 的 anolis_defconfig 中。 + +4. 重要config被错误修改,导致严重的性能问题 +比如 [CONFIG_ARM64_TLB_RANGE 和 CONFIG_ARM64_PTR_AUTH](https://gitee.com/anolis/cloud-kernel/pulls/1960) + +## 变更难以追溯 +之前许多 kconfig 的变更没有及时记录下来,导致在决定 kconfig 是否可以修改时需要仔细斟酌,没有可以参考的历史信息。 +通过 git 回溯信息也有困难,因为不断地刷新 anolis_defconfig 文件,导致很多 kconfig 的位置不断变化,导致需要 git blame 多次才能找到原始的 commit 。 + +## 兼容性 +ANCK 需要将重要 kconfig 高亮出来,作为给 ANCK 下游衍生版本的参考,以保证下游衍生版本与 ANCK 的兼容性。 + +## 逐渐复杂的 kconfig 文件 +随着龙蜥社区的发展,ANCK 的 kconfig 配置文件,在原来仅支持 x86 和 arm64 的 defconfig 和 debug-defconfig 共计 4 个 kconfig 文件的基础上,增加了对龙芯、申威架构的支持,对 核代码覆盖率 gcov 的支持,以及对 arm64 64k 的支持。 +当 kconfig 配置文件增多以后,很容易出现调整某个文件的配置项后,忘记调整其他文件的情况。 +比如该问题:更新config配置时,未同时更新 anolis_defconfig 和 anolis_debug-defconfig +比如:[CONFIG_KVM_INTEL_TDX](https://gitee.com/anolis/cloud-kernel/pulls/818) 和 [CONFIG_AMD_PTDMA](https://gitee.com/anolis/cloud-kernel/pulls/288) + +# kconfig 组织结构说明 +## 背景 +一个具体的 kconfig 配置项,由以下要素决定: +1. dist +产品。表示该 kconfig 是关于哪个产品的配置。比如说 CONFIG_ABC,可能关于 ANCK 的配置是 y,而关于 ANCK 的下游某个衍生版的配置为 m。 +2. level +层级。表示该 kconfig 对当前产品的重要程度,ANCK 划分了 3 个层级(L0/L1/L2),具体内容见后文。 +3. variant +场景。表示该 kconfig 是关于哪个场景的配置,比如是生产环境(generic)、测试环境(debug)、还是覆盖率测试(gcov)等。 +4. arch +架构。表示该 kconfig 是关于当前产品某个场景下,某个具体架构的配置。比如 x86、 arm64、loongarch 等。 +5. name +名称。该 kconfig 的名字,比如 CONFIG_EXT4_FS。 +6. value +值。该 kconfig 的值,比如 `CONFIG_EXT4_FS=m`。 + +举例: +假设当前有内核版本 ANCK,以及它的下游衍生版 FOO,以及配置项 CONFIG_EXT4_FS。 +在不同的产品、场景、架构下对该值的配置可能完全不同,重要程度也不同。 +比如 ANCK 需要在 x86 上要求 CONFIG_EXT4_FS 为y,而在 arm64 是需要它 m 即可,且该选项非常重要,不应该被随意变更。 +以及在衍生版 FOO 上,该文件系统并不重要,因此应该置为 `not set`。 +那么我们可以这么表示: +> Conf[(name="CONFIG_EXT4_FS", dist="ANCK", level="L0", variant="generic", arch="x86")] = "y" +> Conf[(name="CONFIG_EXT4_FS", dist="ANCK", level="L0", variant="generic", arch="arm64")] = "m" +> Conf[(name="CONFIG_EXT4_FS", dist="FOO", level="L2", variant="generic", arch="default")] = "n" + +## 产品说明 +1. ANCK (Anolis Cloud Kernel) +这是 Anolis 的内核,Anolis7、Anolis8、Anolis23 会搭载不同版本的 ANCK 内核。 +2. FOO +您可以在 ANCK 现有的代码和 kconfig 基础上进行修改和构建,从而形成一个 ANCK 的下游衍生版本,比如说新的版本名为 FOO。 + +## 分层说明 +ANCK 按照重要程度,将所有的 kconfig 划分为 3 个层级,以便标记重要的 config,为开发者修改 kconfig 时提供参考。 +### L0-MANDATORY +最核心的 kconfig,这类 kconfig 赋予内核最基础的产品化能力,保证内核能作为一个基本的服务器操作系统进行使用。 +这类 kconfig 的变更需要十分谨慎,建议 ANCK 下游衍生版不要去 override 此类配置。 + +入选条件: +1. 有国家标准/行业标准背书。 +2. 对兼容性有着重要影响的 kconfig。具体而言,可分为以下几类: +- 具有不证自明的基础能力支持,比如CONFIG_NET、CONFIG_PCI。 +- 具有广泛的通用使用场景的 kconfig。如CONFIG_NFS_FS,绝大多数服务器操作系统都支持了 nfs。 +- 被主流开源软件所广泛使用/依赖的kconfig。如CONFIG_USERFAULTFD,qemu 热迁移需要该特性。 +3. 有现实案例背书,或者有用户反馈错误配置会导致严重的功能/性能问题的 kconfig。 + +### L1-RECOMMEND +针对特定场景有着重要意义的 kconfig,此类 kconfig 的错误配置将导致该场景出现严重的产品化问题。 +Anolis 会站在云场景和服务器场景的角度配置 L1 层的 kconfig,下游衍生版按照可根据实际业务需求对其酌情 override。 + +入选条件: +1. 有重要的特定业务场景背书。注意是特定场景,如果是通用场景,请放L0。 +2. 在特定场景中因为错误配置,引发过一些事故的。 + +### L2-OPTIONAL +不被关注的 kconfig。 + +此层级 kconfig 其中分为两类: +1. 可以被手动修改的 kconfig。 +此类 kconfig 当前已配置,但是无法确定其对现有场景是否有重要意义,出于兼容性考虑,保持其不变,但将其放置到 L2。 +ANCK 认为它们的变更不会对现有使用场景造成严重影响,可以被任意打开或者关闭,比如 CONFIG_CAN、CONFIG_WIRELESS。 +下游衍生版如有需要,可随意覆盖。 +若后续发现该层级中某些 kconfig 对于某些场景非常重要,可提 PR 申请将其调整至 L1 或 L0。 + +2. 无法被手动修改的 kconfig。 +某些 kconfig 无法被手动调整,只能通过调整其他 kconfig 时通过依赖关系自动 select。关注此类 kconfig 的意义不大,因此将其放置到 L2 中。 +典型的 kconfig,如:CONFIG_ARCH_WANT_XXX,CONFIG_HAVE_XXX + +入选条件: +1. 当前已经配置了,但是说不清具体使用场景和使用价值的 kconfig +2. 不能被手动配置,只能被自动 select 的 kconfig + +### UNKNOWN +尚不明确具体层级的 kconfig。 +不建议将 kconfig 长期归类在此层级中。 + +## 场景说明 +ANCK 典型的场景包括: +1. generic。 +对应生产环境,属于正式上线使用的场景。 +2. debug。 +对应测试环境,在版本发布阶段的测试中使用,通常来说会打开 KASAN、KMEMLEAK、LOCKDEP 之类的检测项,以及时发现内核的相关问题。 +3. gcov。 +对应覆盖率测试的环境,在版本发布阶段的测试中使用。 +4. 64k。 +使用 arm64 64k 页表的内核。 + +## 架构说明 +ANCK 典型的架构包括: +1. x86 +2. arm64 +3. loongarch (龙芯) +4. sw_64 (申威) + +# kconfig 目录结构说明 +kconfig 的目录组织结构,是按照上文提到的几要素来的。具体来说, +kconfig 目录位于 $(srctree)/anolis/configs 中,共分为以下几类: +- scripts/ ,用于存放于 kconfig 有关的脚本文件,开发者通常无需关注 +- metadata/ ,用于存放 kconfig 的元数据信息。 + - metadata/description,关于 kconfig 的描述信息 + - metadata/changelog, 关于 kconfig 的变更记录 +- L*/ ,以分层方式存放的 kconfig 配置,用于生产环境 + - L*/{x86,arm64,...}, 按架构存放 kconfig 的配置 +- custom-overrides/,用于存放除生产环境以外的其他场景的差异化 kconfig + - custom-overrides/{debug, gcov},与 debug/gcov 有关的,与生产环境有差异的 kconfig + - custom-overrides/{debug, gcov}/{default, x86, arm64}.config, 与 debug/gcov 有关的,与生产环境有差异的,通用/x86 特有/arm64 特有的 kconfig +- OVERRIDE/ ,为 ANCK 衍生版提供的,用于存放覆盖 ANCK 的基础配置的目录 + - OVERRIDE/FOO,衍生版 FOO 相对于 ANCK 的差异化配置 + - OVERRIDE/FOO/L*/,衍生版 FOO 以分层方式存放 kconfig 的配置 + ... + +## 如何更新 kconfig +请参考 How-To-Modify-Kconfig.zh.md diff --git a/anolis/configs/scripts/anolis_kconfig.py b/anolis/configs/scripts/anolis_kconfig.py new file mode 100644 index 000000000000..a84ee594a4f3 --- /dev/null +++ b/anolis/configs/scripts/anolis_kconfig.py @@ -0,0 +1,851 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 +# +# The core script for ANCK kconfig baseline +# It is not recommended to call directly. +# +# Copyright (C) 2023 Qiao Ma + +import argparse, re, os, glob, shutil, copy +from typing import List, Dict, Type, Callable, Tuple +import json +from collections import Counter +import fnmatch +import functools + +def die(*args, **kwargs): + print(*args, **kwargs) + exit(1) + +class Rules(): + @staticmethod + def levels() -> List[str]: + return ["L0-MANDATORY", "L1-RECOMMEND", "L2-OPTIONAL", "UNKNOWN"] + + @staticmethod + def base_dist(): + return "ANCK" + + @staticmethod + def as_config_text(name: str, value: str) -> str: + if value is None or value == "n": + return f"# {name} is not set\n" + else: + return f"{name}={value}\n" + +class PathIterContext(): + dist: str + level: str + arch: str + subarch: str + name: str + path: str + data: any + + def __init__(self, data: any, dist: str, level: str, arch: str, subarch: str, name: str, path: str) -> None: + self.data = data + self.dist = dist + self.level = level + self.arch = arch + self.subarch = subarch + self.name = name + self.path = path + +class PathManager(): + @staticmethod + def dists(top_dir: str, dists: List[str] = None) -> List[str]: + dist_list = [Rules.base_dist()] + override_dir = os.path.join(top_dir, "OVERRIDE") + if os.path.exists(override_dir): + dist_list.extend(os.listdir(override_dir)) + + if dists is None: + return dist_list + return list(set(dist_list).intersection(set(dists))) + + @staticmethod + def dist_to_path(top_dir: str, dist: str) -> str: + if dist == Rules.base_dist(): + return top_dir + return os.path.join(top_dir, "OVERRIDE", dist) + + @staticmethod + def levels(dist_dir: str, levels: List[str] = None) -> List[str]: + all_levels = [] + for d in os.listdir(dist_dir): + if not os.path.isdir(os.path.join(dist_dir, d)): + continue + if not re.match('^L[0-9].*|UNKNOWN', d): + continue + all_levels.append(d) + + if levels is None: + return all_levels + return list(set(all_levels).intersection(set(levels))) + + @staticmethod + def archs(variant_dir: str, archs: List[str] = None) -> List[str]: + all_archs = os.listdir(variant_dir) + return all_archs if archs is None else list(set(all_archs).intersection(set(archs))) + + @staticmethod + def __for_each_arch(level_dir: str, data: any, func: Callable[[PathIterContext], None], dist: str, level: str, archs: List[str] = None, subarchs: List[str] = None): + for arch_dir in os.listdir(level_dir): + if "-" in arch_dir: + arch, subarch = arch_dir.split("-", maxsplit=1) + else: + arch = arch_dir + subarch = None + if archs is not None and arch not in archs: + continue + if subarchs is not None and subarch is not None and subarch not in subarchs: + continue + full_arch_dir = os.path.join(level_dir, arch_dir) + for conf in os.listdir(full_arch_dir): + path = os.path.join(full_arch_dir, conf) + context = PathIterContext(data, dist, level, arch, subarch, conf, path) + func(context) + + @staticmethod + def for_each(top_dir: str, data: any, func: Callable[[PathIterContext], None], dists: List[str] = None, levels: List[str] = None, archs: List[str] = None, subarchs: List[str] = None): + for dist in PathManager.dists(top_dir, dists): + dist_dir = PathManager.dist_to_path(top_dir, dist) + for level in PathManager.levels(dist_dir, levels): + level_dir = os.path.join(dist_dir, level) + if level != "UNKNOWN": + PathManager.__for_each_arch(level_dir, data, func, dist, level, archs, subarchs) + else: + for conf in os.listdir(level_dir): + PathManager.__for_each_arch(os.path.join(level_dir, conf), data, func, dist, level, archs, subarchs) + + @staticmethod + def as_level_dir(top_dir: str, dist: str, level: str): + path = PathManager.dist_to_path(top_dir, dist) + path = os.path.join(path, level) + return path + + @staticmethod + def as_path(top_dir: str, dist: str, level: str, arch: str, subarch: str, name: str): + path = PathManager.as_level_dir(top_dir, dist, level) + if level == "UNKNOWN": + path = os.path.join(path, name) + if subarch is None: + path = os.path.join(path, arch, name) + else: + path = os.path.join(path, f"{arch}-{subarch}", name) + return path + + +def default_args_func(args): + pass + +class LevelInfo(): + info: Dict[str, str] + + def __init__(self) -> None: + self.info = {} + + def get(self, conf: str) -> str: + return self.info.get(conf, "UNKNOWN") + + def merge_with_base(self, base: Type["LevelInfo"]): + if base is None: + return + for name, level in base.info.items(): + if name not in self.info: + self.info[name] = level + + @staticmethod + def __collect_info(ctx: PathIterContext): + level_info: Dict[str, str] = ctx.data + level_info[ctx.name] = ctx.level + + @staticmethod + def build(path: str, dist: str) -> Type["LevelInfo"]: + info = LevelInfo() + PathManager.for_each(path, info.info, LevelInfo.__collect_info, dists=[dist]) + return info + + @staticmethod + def load(file: str): + info = LevelInfo() + with open(file) as f: + info.info = json.loads(f.read()) + return info + +class Config(): + name: str + value: str + arch: str + subarch: str + level: str + dist: str + + def __init__(self, name: str, value: str, dist: str = None, level: str = "UNKNOWN", arch: str = None, subarch: str = None) -> None: + self.name = name + self.value = value + self.dist = dist + self.level = level + self.arch = arch + self.subarch = subarch + + @staticmethod + def from_text(line: str, dist: str, arch: str, subarch: str) -> Type["Config"] : + RE_CONFIG_SET = r'^(CONFIG_\w+)=(.*)$' + RE_CONFIG_NOT_SET = r'^# (CONFIG_\w+) is not set$' + + if re.match(RE_CONFIG_SET, line): + obj = re.match(RE_CONFIG_SET, line) + return Config(name=obj.group(1), value=obj.group(2), dist=dist, arch=arch, subarch=subarch) + elif re.match(RE_CONFIG_NOT_SET, line): + obj = re.match(RE_CONFIG_NOT_SET, line) + return Config(name=obj.group(1), value="n", dist=dist, arch=arch, subarch=subarch) + return None + + def as_text(self) -> str: + return Rules.as_config_text(self.name, self.value) + + def as_path(self, top_dir: str) -> str: + return PathManager.as_path(top_dir, self.dist, self.level, self.arch, self.subarch, self.name) + + def as_file(self, top_dir: str): + text = self.as_text() + path = self.as_path(top_dir) + os.makedirs(os.path.dirname(path), exist_ok=True) + with open(path, "w") as f: + f.write(text) + +class ConfigList(): + arch: str + dist: str + subarch: str + configs: Dict[str, Config] + + def __init__(self, dist: str, arch: str, subarch: str = None) -> None: + self.dist = dist + self.arch = arch + self.subarch = subarch + self.configs = {} + + def lists(self) -> List[Config]: + return list(self.configs.values()) + + def diff_to_base(self, base: Type["ConfigList"], level_info: LevelInfo): + same_configs = [] + for name, conf in self.configs.items(): + if name not in base.configs: + continue + if conf.value != base.configs[name].value: + continue + same_configs.append(name) + + for name in base.configs: + if name not in self.configs: + self.configs[name] = Config(name, value=None, dist=self.dist, arch=self.arch, level=level_info.get(name)) + + for name in same_configs: + del self.configs[name] + + def merge_with_base(self, base: Type["ConfigList"]): + if base is None: + return + for name, conf in base.configs.items(): + if name not in self.configs: + self.configs[name] = conf + + def dump_as_file(self, top_dir: str): + for conf in self.configs.values(): + conf.as_file(top_dir) + + def as_text(self): + text = "" + for conf in self.configs.values(): + text = text + conf.as_text() + return text + + @staticmethod + def from_path(path: str, dist: str, arch: str, subarch: str = None, level_info: LevelInfo = None, level: str = None) -> Type["ConfigList"]: + if level_info is not None and level is not None: + die("the argument level_info and level cannot be passed together") + if level_info is None and level is None: + level = "UNKNOWN" + + conflist = ConfigList(dist, arch, subarch) + with open(path) as f: + for line in f.readlines(): + conf = Config.from_text(line, dist, arch, subarch) + if conf is None: + continue + if level_info is not None: + conf.level = level_info.get(conf.name) + else: + conf.level = level + conflist.configs[conf.name] = conf + return conflist + +class LevelCollector(): + @staticmethod + def do_collect(args): + info = LevelInfo.build(args.top_dir, args.dist) + if args.base is not None: + base_info = None + for base in args.base: + cur_base = LevelInfo.build(args.top_dir, base) + cur_base.merge_with_base(base_info) + base_info = cur_base + info.merge_with_base(base_info) + print(json.dumps(info.info, ensure_ascii=False, indent=2)) + +class Importer(): + @staticmethod + def do_import(args): + level_info = LevelInfo.load(args.level_info) + conflist = ConfigList.from_path(path=args.config, dist=args.dist, arch=args.arch, subarch=args.subarch, level_info=level_info) + conflist.dump_as_file(args.top_dir) + +class Generator(): + @staticmethod + def collect_config(ctx: PathIterContext): + conflist : ConfigList = ctx.data + cur_conf = ConfigList.from_path(path=ctx.path, dist=ctx.dist, arch=ctx.arch, subarch=ctx.subarch) + conflist.merge_with_base(cur_conf) + + @staticmethod + def do_generate(args): + dist = args.dist + archdir = args.archdir + if "-" in archdir: + arch, subarch = archdir.split("-", maxsplit=1) + else: + arch, subarch = archdir, None + conflist = ConfigList(dist, arch, subarch) + subarchs = None if subarch is None else [subarch] + PathManager.for_each(args.top_dir, conflist, Generator.collect_config, dists=[dist], archs=[arch], subarchs=subarchs) + print(conflist.as_text()) + +class Merger(): + @staticmethod + def do_merge(args): + conflist = None + for file in args.file: + cur_conflist = ConfigList.from_path(file, dist="", arch="") + cur_conflist.merge_with_base(conflist) + conflist = cur_conflist + + print(conflist.as_text()) + +class Collapser(): + # for configs, the keys are: conf_name, arch + configs: Dict[str, Dict[str, Config]] + archs: set + + def __init__(self) -> None: + self.configs = {} + self.archs = set() + + @staticmethod + def __do_collect_info(ctx: PathIterContext): + c: Collapser = ctx.data + configs: Dict[str, Dict[str, Config]] = c.configs + archs = c.archs + + full_arch = ctx.arch + if ctx.subarch is not None: + full_arch = f"{ctx.arch}-{ctx.subarch}" + archs.add(full_arch) + + conflist = ConfigList.from_path(path=ctx.path, dist=ctx.dist, arch=ctx.arch, subarch=ctx.subarch, level=ctx.level) + for conf in conflist.lists(): + if conf.name not in configs: + configs[conf.name] = {} + configs[conf.name][full_arch] = conf + + @staticmethod + def __collapse_one_config(arch_confs: Dict[str, Config], archs: set, top_dir: str): + # the default value is only depends on arch x86 and arm64. + # For example: + # 1. the configs "x86 y, arm64 y, sw_64 m/n" will be collpased to "default y, sw_64 m/n" + # 2. the configs "x86 y, arm64 y, sw_64 y" will be collpased to "default y" + # 3. the configs "x86 y, arm64 m, sw_64 y" will not be collpased + if "x86" not in arch_confs or "arm64" not in arch_confs: + return + if arch_confs["x86"].value != arch_confs["arm64"].value: + return + common_conf = copy.deepcopy(arch_confs["x86"]) + common_conf.arch = "default" + common_conf.subarch = None + + for arch in archs: + if arch in arch_confs: + conf = arch_confs[arch] + if conf.value == common_conf.value: + os.remove(conf.as_path(top_dir)) + else: + miss_conf = copy.deepcopy(common_conf) + miss_conf.arch = arch + miss_conf.subarch = None + miss_conf.value = "n" + miss_conf.as_file(top_dir) + common_conf.as_file(top_dir) + + @staticmethod + def do_collapse(args): + c = Collapser() + PathManager.for_each(args.top_dir, c, Collapser.__do_collect_info, dists=[args.dist]) + + for arch_confs in c.configs.values(): + Collapser.__collapse_one_config(arch_confs, c.archs, args.top_dir) + +class Striper(): + configs: Dict[str, List[str]] + file_list: List[str] + + def __init__(self, file_list: List[str]) -> None: + self.configs = {} + self.file_list = file_list + + for i, path in enumerate(file_list): + conflist = ConfigList.from_path(path, dist="", arch="") + for conf in conflist.lists(): + name = conf.name + if name not in self.configs: + self.configs[name] = [None]*i + self.configs[name].append(conf.value) + for conf_values in self.configs.values(): + if len(conf_values) != i+1: + conf_values.append(None) + + def strip(self, base: Type["Striper"]): + disappear_confs = [] + same_confs = [] + for name, conf_values in base.configs.items(): + if name not in self.configs: + disappear_confs.append(name) + continue + if conf_values == self.configs[name]: + same_confs.append(name) + + for name in same_confs: + del self.configs[name] + + num_files = len(self.file_list) + for name in disappear_confs: + self.configs[name] = [None]*num_files + + def override_files(self): + for i, path in enumerate(self.file_list): + with open(path, "w") as f: + for name, values in self.configs.items(): + f.write(Rules.as_config_text(name, values[i])) + + @staticmethod + def do_strip(args): + if len(args.base) != len(args.target): + die("the target config files do not match base") + base = Striper(args.base) + target = Striper(args.target) + target.strip(base) + target.override_files() + +class ImportOpTranslater(): + files: Dict[str, str] + files_info: Dict[Tuple[str, str, str, str], str] + level_info_path: str + input_dir: str + output_dir: str + src_root: str + + def __init__(self, input_dir: str, output_dir: str, src_root: str) -> None: + self.files = {} + self.files_info = {} + self.input_dir = input_dir + self.output_dir = output_dir + self.src_root = src_root + self.level_info_path = "" + + def __cmd(self, cmd: str): + return f"python3 {__file__} {cmd} " + + def __op_file(self, args: str): + # FILE dist arch variant file_path REFRESH/NOREFRESH + dist, arch, subarch, path, refresh = args.split() + new_path = os.path.join(self.output_dir, os.path.basename(path)) + if subarch != "null": + self.files[f"{dist}-{arch}-{subarch}"] = new_path + self.files_info[(dist, arch, subarch)] = new_path + else: + self.files[f"{dist}-{arch}"] = new_path + self.files_info[(dist, arch, None)] = new_path + cmd = f"cp {path} {new_path}\n" + if refresh == "REFRESH": + cmd += f"KCONFIG_CONFIG={new_path} ARCH={arch} CROSS_COMPILE=scripts/dummy-tools/ " + cmd += f"make -C {self.src_root} olddefconfig > /dev/null\n" + cmd += f"rm -f {new_path}.old \n" + return cmd + + def __op_levelinfo(self, args: str): + #LEVELINFO target_dist [base_dist [base_dist ...]] + target_dist, base_dists = args.split(maxsplit=1) + cmd = self.__cmd("collect_level") + cmd += f"--dist {target_dist} --top_dir {self.input_dir} " + for base in base_dists.split(): + if base == "null": + continue + cmd += f"--base {base} " + self.level_info_path = os.path.join(self.output_dir, "level_info") + cmd += f"> {self.level_info_path}" + return cmd + + def __op_import(self, args: str): + # IMPORT file + file = args + subarch = None + dist, arch = file.split("-", maxsplit=1) + if "-" in arch: + arch, subarch = arch.split("-", maxsplit=1) + + cmd = self.__cmd("import") + cmd += f"--dist {dist} --arch {arch} " + if subarch is not None: + cmd += f"--subarch {subarch} " + cmd += f"--level_info {self.level_info_path} --top_dir {self.output_dir} " + cmd += f"{self.files[file]} " + return cmd + + def __op_collapse(self, args: str): + # COLLAPSE dist + dist = args + cmd = self.__cmd("collapse") + cmd += f"--dist {dist} --top_dir {self.output_dir}" + return cmd + + def __op_strip(self, args: str): + # STRIP target_dist base_dist + target_dist, base_dist = args.split() + copy_cmd = "" + cmd = self.__cmd("strip") + for (dist, arch, subarch), target_path in self.files_info.items(): + if dist != target_dist: + continue + try: + copy_cmd += f"cp {target_path} {target_path}.bak\n" + base_path = self.files_info[(base_dist, arch, subarch)] + except: + full_arch = arch + if subarch is not None: + full_arch = f"{arch}-{subarch}" + die(f"strip error. cannot find file {base_dist}-{full_arch} to match {target_dist}-{full_arch}") + cmd += f"--base {base_path} --target {target_path} " + return copy_cmd + cmd + + def __translate_one(self, op:str, args: str): + cmd = "" + if op == "FILE": + cmd = self.__op_file(args) + elif op == "LEVELINFO": + cmd = self.__op_levelinfo(args) + elif op == "IMPORT": + cmd = self.__op_import(args) + elif op == "COLLAPSE": + cmd = self.__op_collapse(args) + elif op == "STRIP": + cmd = self.__op_strip(args) + else: + die(f"unknown op {op}") + print(cmd) + + @staticmethod + def do_translate(args): + t = ImportOpTranslater(input_dir=args.input_dir, output_dir=args.output_dir, src_root=args.src_root) + with open(args.path) as f: + for i, line in enumerate(f.readlines()): + line = line.strip() + if line.startswith("#") or line == "": + continue + (op, action_args) = line.split(maxsplit=1) + try: + t.__translate_one(op, action_args) + except: + die(f"parse error in {args.path}:{i+1}\n> {line}") + +class KconfigLayoutEntry(): + name: str + dist: str + arch: str + subarch: str + base_dist: str + base_name: str + # (dist, variant, arch) + layout_list: List[Tuple[str, str, str]] + + def __init__(self, name: str, dist: str, arch: str, base_dist: str, base_name: str) -> None: + self.name = name + self.dist = dist + self.arch = arch + self.base_dist = base_dist + self.base_name = base_name + self.layout_list = [] + + @staticmethod + def from_text(line: str): + cur, arch, base, layouts = line.split() + dist, name = cur.split("/") + if base == "null": + base_dist = None + base_name = None + else: + base_dist, base_name = base.split("/") + entry = KconfigLayoutEntry(name, dist, arch,base_dist, base_name) + for l in layouts.split(";"): + variant, arch = l.split("/") + entry.layout_list.append((dist, variant, arch)) + return entry + +class KconfigLayout(): + # (dist, file_name) + layouts: Dict[Tuple[str, str], KconfigLayoutEntry] + + def __init__(self) -> None: + self.layouts = {} + + @staticmethod + def from_path(path: str) -> Type["KconfigLayout"]: + l = KconfigLayout() + with open(path) as f: + for line in f.readlines(): + line = line.strip() + if line.startswith("#") or line == "": + continue + e = KconfigLayoutEntry.from_text(line) + l.layouts[(e.dist, e.name)] = e + + if e.base_dist is None: + continue + if (e.base_dist, e.base_name) not in l.layouts: + die(f"cannot find {e.base_dist}/{e.base_name} while parsing {e.dist}/{e.name}") + e.layout_list = l.layouts[(e.base_dist, e.base_name)].layout_list + e.layout_list + return l + +class GenerateTranslater(): + input_dir: str + output_dir: str + src_root: str + + def __init__(self, args) -> None: + self.input_dir = args.input_dir + self.output_dir = args.output_dir + self.src_root = args.src_root + + def __cmd(self, cmd: str): + return f"python3 {__file__} {cmd} " + + def __translate_one(self, e: KconfigLayoutEntry, tmp_dir: str): + files = [] + cmd = "" + for dist, variant, arch in e.layout_list: + if variant == "generic": + # for geneic configs, generate them + file = os.path.join(tmp_dir, f"kernel-partial-{dist}-{variant}-{arch}.config") + cmd += self.__cmd("generate") + cmd += f"--top_dir {self.input_dir} --dist {dist} --archdir {arch}" + cmd += f"> {file} \n" + files.append(file) + else: + dist_path = PathManager.dist_to_path(self.input_dir, dist) + file = os.path.join(dist_path, "custom-overrides", variant, f"{arch}.config") + if os.path.exists(file): + files.append(file) + + # merge all partial configs + final_path = os.path.join(self.output_dir, f"kernel-{e.dist}-{e.name}.config") + cmd += self.__cmd("merge") + cmd += " ".join(files) + cmd += f" > {final_path} \n" + + # refresh configs + cmd += f"echo \"* generated file: {final_path}\"\n" + cmd += f"KCONFIG_CONFIG={final_path} ARCH={e.arch} CROSS_COMPILE=scripts/dummy-tools/ " + cmd += f"make -C {self.src_root} olddefconfig > /dev/null\n" + cmd += f"rm -f {final_path}.old \n" + cmd += f"echo \"* processed file: {final_path}\"\n" + + return cmd + + @staticmethod + def do_translate(args): + cmd = "" + t = GenerateTranslater(args) + l = KconfigLayout.from_path(args.layout) + + tmp_dir = os.path.join(args.output_dir, "tmp") + cmd += f"mkdir -p {tmp_dir}\n" + if args.target is not None: + dist, file_name = args.target.split("/", maxsplit=1) + if (dist, file_name) not in l.layouts: + die(f"cannot find config layout info for {dist}/{file_name}") + cmd += t.__translate_one(l.layouts[((dist, file_name))], tmp_dir) + else: + for e in l.layouts.values(): + cmd += t.__translate_one(e, tmp_dir) + cmd += f"rm -rf {tmp_dir}" + print(cmd) + +class Mover(): + """move configs from old level to new level""" + config_patterns: List[str] + new_level: str + top_dir: str + + def __init__(self, top_dir: str, new_level: str, config_patterns: List[str]) -> None: + self.top_dir = top_dir + self.new_level = new_level + self.config_patterns = config_patterns + + @staticmethod + def get_level(level: str) -> str: + target_level = "" + for l in Rules.levels(): + if l.startswith(level): + if target_level != "": + die(f"the level {level} is ambiguous") + target_level = l + + if target_level == "": + die(f"unkonw level {level}") + return target_level + + @staticmethod + def __move(ctx: PathIterContext): + m : Mover = ctx.data + for config_pattern in m.config_patterns: + if fnmatch.fnmatch(ctx.name, config_pattern): + new_path = PathManager.as_path(m.top_dir, ctx.dist, m.new_level, ctx.arch, ctx.subarch, ctx.name) + os.makedirs(os.path.dirname(new_path), exist_ok=True) + shutil.move(ctx.path, new_path) + print("* move: {} -> {}".format(ctx.path.replace(m.top_dir, "", 1), new_path.replace(m.top_dir, "", 1))) + return + + @staticmethod + def __remove_empty_dirs(dir_path: str): + for root, dirs, _ in os.walk(dir_path, topdown=False): + for name in dirs: + cur_dir_path = os.path.join(root, name) + if len(os.listdir(cur_dir_path)) == 0: + os.rmdir(cur_dir_path) + + @staticmethod + def do_move(args): + old_level = Mover.get_level(args.old) + new_level = Mover.get_level(args.new_level) + m = Mover(args.top_dir, new_level, args.config_name) + PathManager.for_each(args.top_dir, m, Mover.__move, dists=[args.dist], levels=[old_level]) + level_dir = PathManager.as_level_dir(args.top_dir, args.dist, args.old) + Mover.__remove_empty_dirs(level_dir) + +class Exporter(): + # conf_name, file_name, value + configs: Dict[str, Dict[str, str]] + + def __init__(self) -> None: + self.configs = {} + + def __save_as_xlsx(self, columns: List[str], output: str): + import pandas + if not output.endswith(".xlsx"): + output+=".xlsx" + + writer = pandas.ExcelWriter(output, engine="openpyxl") + data = pandas.DataFrame.from_dict(list(self.configs.values())) + data = data[columns] + data.to_excel(writer, index=False) + writer.save() + + @staticmethod + def do_export(args): + e = Exporter() + levelinfo = LevelInfo.load(args.level_info) + columns = ["name", "level"] + for file in args.files: + file_name = os.path.basename(file) + columns.append(file_name) + with open(file) as f: + conf_list = ConfigList.from_path(file, dist="", arch="", level_info=levelinfo) + for c in conf_list.lists(): + if c.name not in e.configs: + e.configs[c.name] = {} + e.configs[c.name][file_name] = c.value + e.configs[c.name]["level"] = c.level + e.configs[c.name]["name"] = c.name + e.__save_as_xlsx(columns, args.output) + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='process configs') + parser.set_defaults(func=default_args_func) + subparsers = parser.add_subparsers() + + level_collector = subparsers.add_parser('collect_level', description="collect level information") + level_collector.add_argument("--dist", required=True, help="the dist") + level_collector.add_argument("--top_dir", required=True, help="the dist") + level_collector.add_argument("--base", nargs="*", help="the base dist level info") + level_collector.set_defaults(func=LevelCollector.do_collect) + + importer = subparsers.add_parser('import', description="import new configs") + importer.add_argument("--dist", required=True, help="the dist") + importer.add_argument("--arch", required=True, help="the arch") + importer.add_argument("--subarch", help="the subarch") + importer.add_argument("--level_info", required=True, help="the level info ouputed by subcmd collect_level") + importer.add_argument("--top_dir", required=True, help="the output top dir") + importer.add_argument("config", help="the config file") + importer.set_defaults(func=Importer.do_import) + + generator = subparsers.add_parser("generate", description="generate configs") + generator.add_argument("--top_dir", required=True, help="the top dir to store configs") + generator.add_argument("--dist", help="the dist") + generator.add_argument("--archdir", help="the arch directory, be like \{arch\}-\{subarch\}") + generator.set_defaults(func=Generator.do_generate) + + merger = subparsers.add_parser("merge", description="merge with configs") + merger.add_argument("file", nargs="+", help="the config files") + merger.set_defaults(func=Merger.do_merge) + + collapser = subparsers.add_parser("collapse", description="collapse configs") + collapser.add_argument("--dist", required=True, help="the dist") + collapser.add_argument("--top_dir", required=True, help="the top dir to store configs") + collapser.set_defaults(func=Collapser.do_collapse) + + striper = subparsers.add_parser("strip", description="strip repeated configs") + striper.add_argument("--base", action='append', default=[], help="the base config files") + striper.add_argument("--target", action='append', default=[], help="the target config files") + striper.set_defaults(func=Striper.do_strip) + + import_translater = subparsers.add_parser("import_tanslate", description="import operations translater") + import_translater.add_argument("--input_dir", required=True, help="the dir to store old configs, used for collect level infos") + import_translater.add_argument("--output_dir", required=True, help="the dir to store new configs") + import_translater.add_argument("--src_root", required=True, help="the dir of kernel source") + import_translater.add_argument("path", help="the import scripts") + import_translater.set_defaults(func=ImportOpTranslater.do_translate) + + generate_translater = subparsers.add_parser("generate_translate", description="generate operations translater") + generate_translater.add_argument("--input_dir", required=True, help="the dir to store old configs, used for collect level infos") + generate_translater.add_argument("--output_dir", required=True, help="the dir to store new configs") + generate_translater.add_argument("--src_root", required=True, help="the dir of kernel source") + generate_translater.add_argument("--target", help="the target config file, like: /") + generate_translater.add_argument("layout", help="the kconfig layout file") + generate_translater.set_defaults(func=GenerateTranslater.do_translate) + + mover = subparsers.add_parser("move", description="move configs to new level") + mover.add_argument("--old", default="UNKNOWN", help="the config's old level dir, default is UNKNOWN") + mover.add_argument("--dist", required=True, help="the dist") + mover.add_argument("--top_dir", required=True, help="the top dir to store configs") + mover.add_argument("config_name", nargs="+", help="the config name") + mover.add_argument("new_level", help="the new level") + mover.set_defaults(func=Mover.do_move) + + exporter = subparsers.add_parser('export', description="export to excel format") + exporter.add_argument("files", nargs="+", help="the config files") + exporter.add_argument("--output", required=True, help="the output name") + exporter.add_argument("--level_info", required=True, help="the level info") + exporter.set_defaults(func=Exporter.do_export) + + args = parser.parse_args() + args.func(args) \ No newline at end of file diff --git a/anolis/configs/scripts/export_configs.sh b/anolis/configs/scripts/export_configs.sh new file mode 100644 index 000000000000..67880c39fc61 --- /dev/null +++ b/anolis/configs/scripts/export_configs.sh @@ -0,0 +1,29 @@ +#! /bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# To export kconfigs as xlsx format. +# +# Copyright (C) 2023 Qiao Ma + +set -e + +SCRIPT_DIR=$(realpath $(dirname $0)) +BASE_CONFIG_DIR=$(realpath ${SCRIPT_DIR}/..) +FILE_LIST=${DIST_OUTPUT}/file_list +LEVEL_INFO=${DIST_OUTPUT}/level_info + +mkdir -p ${DIST_OUTPUT} + +sh ${SCRIPT_DIR}/generate_configs.sh | tee ${FILE_LIST} + +python3 ${SCRIPT_DIR}/anolis_kconfig.py collect_level --top_dir ${BASE_CONFIG_DIR} \ + --dist ${DIST_CONFIG_KERNEL_NAME} > ${LEVEL_INFO} + +files=$(cat ${FILE_LIST} | grep "generated" | awk '{print $4}' | xargs) + +python3 ${SCRIPT_DIR}/anolis_kconfig.py export \ + --level_info ${LEVEL_INFO} \ + --output ${DIST_OUTPUT}/configs.xlsx\ + ${files} + +echo "* file generated: ${DIST_OUTPUT}/configs.xlsx" diff --git a/anolis/configs/scripts/generate_configs.sh b/anolis/configs/scripts/generate_configs.sh new file mode 100644 index 000000000000..95f2190189d3 --- /dev/null +++ b/anolis/configs/scripts/generate_configs.sh @@ -0,0 +1,38 @@ +#! /bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# Generate the whole kconfig files. +# +# Copyright (C) 2023 Qiao Ma + +set -e + +SCRIPT_DIR=$(realpath $(dirname $0)) +FILE_LIST=${DIST_OUTPUT}/file_list + +mkdir -p ${DIST_OUTPUT} + +if [ -z "$@" ]; then + python3 ${SCRIPT_DIR}/anolis_kconfig.py generate_translate \ + --input_dir ${SCRIPT_DIR}/../ \ + --output_dir ${DIST_OUTPUT} \ + --src_root ${DIST_SRCROOT} \ + ${DIST_SRCROOT}/${DIST_CONFIG_LAYOUTS} > ${DIST_OUTPUT}/generate.sh +else + for target in $@ + do + python3 ${SCRIPT_DIR}/anolis_kconfig.py generate_translate \ + --input_dir ${SCRIPT_DIR}/../ \ + --output_dir ${DIST_OUTPUT} \ + --src_root ${DIST_SRCROOT} \ + --target ${DIST_CONFIG_KERNEL_NAME}/${target} \ + ${DIST_SRCROOT}/${DIST_CONFIG_LAYOUTS} > ${DIST_OUTPUT}/generate.sh + done +fi + +sh ${DIST_OUTPUT}/generate.sh | tee ${FILE_LIST} + +if [ "${DIST_DO_GENERATE_DOT_CONFIG}" == "Y" ]; then + file=$(cat ${FILE_LIST} | grep "generated" | awk '{print $4}' | head -1) + cp -f ${file} ${DIST_SRCROOT}.config +fi diff --git a/anolis/configs/scripts/kconfig_import b/anolis/configs/scripts/kconfig_import new file mode 100644 index 000000000000..b55ee6847d7d --- /dev/null +++ b/anolis/configs/scripts/kconfig_import @@ -0,0 +1,15 @@ +# FILE dist arch subarch file_path REFRESH/NOREFRESH +# LEVELINFO target_dist base_dist [base_dist ...] +# IMPORT file +# COLLAPSE dist +# STRIP target_dist base_dist + +FILE ANCK x86 null %%DIST_OUTPUT%%/kernel-ANCK-generic-x86.config REFRESH +FILE ANCK arm64 null %%DIST_OUTPUT%%/kernel-ANCK-generic-arm64.config REFRESH + +# for ANCK +LEVELINFO ANCK null +IMPORT ANCK-x86 +IMPORT ANCK-arm64 + +COLLAPSE ANCK \ No newline at end of file diff --git a/anolis/configs/scripts/kconfig_layout b/anolis/configs/scripts/kconfig_layout new file mode 100644 index 000000000000..67772f224d11 --- /dev/null +++ b/anolis/configs/scripts/kconfig_layout @@ -0,0 +1,8 @@ +# dist/config_file_name arch base layout(variant/arch) +ANCK/generic-x86 x86 null generic/default;generic/x86 +ANCK/debug-x86 x86 null generic/default;generic/x86;debug/default;debug/x86 +ANCK/gcov-x86 x86 null generic/default;generic/x86;gcov/default +ANCK/generic-arm64 arm64 null generic/default;generic/arm64 +ANCK/debug-arm64 arm64 null generic/default;generic/arm64;debug/default;debug/arm64 +ANCK/gcov-arm64 arm64 null generic/default;generic/arm64;debug/default;debug/arm64;gcov/default +ANCK/arm64-64k arm64 null generic/default;generic/arm64;debug/default;debug/arm64;64k/arm64 diff --git a/anolis/configs/scripts/modify_config.sh b/anolis/configs/scripts/modify_config.sh new file mode 100644 index 000000000000..7f77b30e0966 --- /dev/null +++ b/anolis/configs/scripts/modify_config.sh @@ -0,0 +1,133 @@ +#! /bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# To modify kconfigs. +# +# Copyright (C) 2024 Qiao Ma + +set -e + +SCRIPT_DIR=$(realpath $(dirname $0)) +BASE_CONFIG_DIR=$(realpath ${SCRIPT_DIR}/..) +DIST_CONFIG_DIR=${BASE_CONFIG_DIR} + +if [ "$DIST_CONFIG_KERNEL_NAME" != "ANCK" ]; then + DIST_CONFIG_DIR=$(realpath ${BASE_CONFIG_DIR}/OVERRIDE/${DIST}/); +fi + +function die() { + echo "" + echo $@ + echo "usage:" + echo " make dist-configs-modify" \ + "C= L= [x86=] [arm64=] [others=] [all=]" + echo " C: the config name, must be specified" + echo " L: the level of config, must be specified" + echo " x86: the value of x86 architecture" + echo " arm64: the value of arm64 architecture" + echo " others: the default value for the architectures that not be specified" + echo " all: the value for all architectures" + echo "" + echo "example:" + echo " - only set x86 to y" + echo " make dist-configs-modify C=CONFIG_CRYPTO_ECDSA x86=y arm=n others=n L=L1" + echo " - set all archs to y" + echo " make dist-configs-modify C=CONFIG_CRYPTO_ECDSA all=y L=L1" + echo "" + exit 1 +} + +declare -A ARCH_VALUES + +function collect_ARCH_VALUES() { + if [ -n "${x86}" ]; then ARCH_VALUES["x86"]=${x86}; fi + if [ -n "${arm64}" ]; then ARCH_VALUES["arm64"]=${arm64}; fi + if [ -n "${others}" ]; then ARCH_VALUES["default"]=${others}; fi + if [ -n "${all}" ]; then ARCH_VALUES["default"]=${all}; fi + + if [ ${#ARCH_VALUES[@]} -eq 0 ]; then + die "need to specify at least one architecture's value"; + fi +} + +function set_correct_level() { + case $L in + "L0"|"L0-MANDATORY") + L="L0-MANDATORY" + ;; + "L1"|"L1-RECOMMEND") + L="L1-RECOMMEND" + ;; + "L2"|"L2-OPTIONAL") + L="L2-OPTIONAL" + ;; + *) + die "unsupported level: $L" + ;; + esac +} + +function check_args() { + if [ -z "$C" ]; then die "the config name must be specified"; fi + if [ -z "$L" ]; then die "the level must be specified"; fi + collect_ARCH_VALUES + set_correct_level +} + +function remove_old_configs() { + for f in $(find ${DIST_CONFIG_DIR}/L* -type f -name "$C") + do + echo "remove old file: $f" + rm -f $f + done +} + +function add_new_configs() { + for arch in ${!ARCH_VALUES[@]}; do + local value=${ARCH_VALUES[${arch}]} + local text="$C=$value" + if [ "$value" = "n" ]; then text="# $C is not set"; fi + + mkdir -p ${DIST_CONFIG_DIR}/${L}/${arch} + echo "$text" > ${DIST_CONFIG_DIR}/${L}/${arch}/$C; + echo "created new file: ${DIST_CONFIG_DIR}/${L}/${arch}/$C" + done +} + +function refresh_configs() { + echo "refresh configs" + sh ${SCRIPT_DIR}/update_configs.sh +} + +CHECK_FOUND_FILE=0 + +function check_config_for_one_arch() { + local arch=$1 + if [ -f ${DIST_CONFIG_DIR}/${L}/${arch}/$C ]; then + echo "$arch: $(cat ${DIST_CONFIG_DIR}/${L}/${arch}/$C)" + CHECK_FOUND_FILE=1 + fi +} + +function check_config() { + local appears=0 + echo "The Final Configs After Refresh" + check_config_for_one_arch "x86" + check_config_for_one_arch "arm64" + check_config_for_one_arch "default" + if [ "$CHECK_FOUND_FILE" == "0" ]; then + echo "Not Found Any Valid config files, maybe some dependency not satisfied" + fi + echo "" + echo "******************************************************************************" +} + +function main() { + check_args + remove_old_configs + add_new_configs + refresh_configs + check_config +} + +main diff --git a/anolis/configs/scripts/move_configs.sh b/anolis/configs/scripts/move_configs.sh new file mode 100644 index 000000000000..22a8f56e7d6e --- /dev/null +++ b/anolis/configs/scripts/move_configs.sh @@ -0,0 +1,48 @@ +#! /bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# To adjust the level of kconfig. +# +# Copyright (C) 2023 Qiao Ma + +set -e + +SCRIPT_DIR=$(realpath $(dirname $0)) +BASE_CONFIG_DIR=$(realpath ${SCRIPT_DIR}/..) + +function die() { + echo "" + echo $@ + echo "usage:" + echo " make dist-config-move OLD= C= L=" + echo " OLD: the old level, default is UNKONWN" + echo " C: config name" + echo " L: the new level" + echo "example:" + echo " - to move CONFIG_CAN to L1" + echo " make dist-config-move OLD=L2 C=CONFIG_CAN L=L1" + echo "" + exit 1 +} + +function check_args() { + if [ -z "$OLD" ]; then + OLD="UNKNOWN" + fi + if [ -z "$C" ]; then + die "config name \$C is not specified" + fi + if [ -z "$L" ]; then + die "config level \$L is not specified" + fi +} + +function do_move() { + python3 ${SCRIPT_DIR}/anolis_kconfig.py move \ + --top_dir ${BASE_CONFIG_DIR} \ + --dist ${DIST_CONFIG_KERNEL_NAME} \ + --old "$OLD" "$C" "$L" +} + +check_args +do_move diff --git a/anolis/configs/scripts/update_configs.sh b/anolis/configs/scripts/update_configs.sh new file mode 100644 index 000000000000..b7ab61fa9e95 --- /dev/null +++ b/anolis/configs/scripts/update_configs.sh @@ -0,0 +1,113 @@ +#! /bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# To update kconfigs. +# +# Copyright (C) 2023 Qiao Ma + +set -e + +SCRIPT_DIR=$(realpath $(dirname $0)) +BASE_CONFIG_DIR=$(realpath ${SCRIPT_DIR}/..) +TMP_DIR=${DIST_OUTPUT}/configs +OLD_CONFIG_DIR=${TMP_DIR}/old +NEW_CONFIG_DIR=${TMP_DIR}/new +BACKUP_CONFIG_DIR=${BASE_CONFIG_DIR}/configs.${DIST_CONFIG_KERNEL_NAME}.old + +if [ "${DIST_CONFIG_KERNEL_NAME}" != "ANCK" ]; then + OLD_DIST_CONFIG_DIR=${BASE_CONFIG_DIR}/OVERRIDE/${DIST_CONFIG_KERNEL_NAME} + NEW_DIST_CONFIG_DIR=${NEW_CONFIG_DIR}/OVERRIDE/${DIST_CONFIG_KERNEL_NAME} +else + OLD_DIST_CONFIG_DIR=${BASE_CONFIG_DIR} + NEW_DIST_CONFIG_DIR=${NEW_CONFIG_DIR} +fi + +if [ -n "$DO_IMPORT_CONFIGS" ]; then + IMPORT_ACTION=${DIST_SRCROOT}/${DIST_CONFIG_ACTIONS_IMPORTS} +else + IMPORT_ACTION=${DIST_SRCROOT}/${DIST_CONFIG_ACTIONS_REFRESH} +fi + + +function log() { + echo $@ +} + +function prepare_env() { + rm -rf ${TMP_DIR} + mkdir -p ${OLD_CONFIG_DIR} + mkdir -p ${NEW_CONFIG_DIR} +} + +function generate_configs() { + log "collect all old configs..." + # generate old config files + sh ${SCRIPT_DIR}/generate_configs.sh +} + +function split_new_configs() { + # split new config files + echo "split new configs..." + cp ${IMPORT_ACTION} ${DIST_OUTPUT}/kconfig_import + sed -i "s#%%DIST_OUTPUT%%#\${DIST_OUTPUT}#" ${DIST_OUTPUT}/kconfig_import + sed -i "s#%%DIST_SRCROOT%%#\${DIST_SRCROOT}#" ${DIST_OUTPUT}/kconfig_import + python3 ${SCRIPT_DIR}/anolis_kconfig.py import_tanslate \ + --input_dir ${BASE_CONFIG_DIR} \ + --output_dir ${NEW_CONFIG_DIR} \ + --src_root ${DIST_SRCROOT} ${DIST_OUTPUT}/kconfig_import > ${DIST_OUTPUT}/import.sh + sh -e ${DIST_OUTPUT}/import.sh +} + +function replace_with_new_configs() { + log "replace old configs with new configs...." + + rm -rf ${BACKUP_CONFIG_DIR} + mkdir -p ${BACKUP_CONFIG_DIR} + mkdir -p ${OLD_DIST_CONFIG_DIR} + for level in ${DIST_LEVELS}; + do + if [ -d ${OLD_DIST_CONFIG_DIR}/${level} ]; then + mv ${OLD_DIST_CONFIG_DIR}/${level} ${BACKUP_CONFIG_DIR} + fi + done + + for level in ${DIST_LEVELS} + do + if [ -d ${NEW_DIST_CONFIG_DIR}/${level} ]; then + mv ${NEW_DIST_CONFIG_DIR}/${level} ${OLD_DIST_CONFIG_DIR} + fi + done +} + +function check_configs() { + # check unknown config files + echo "" + echo "******************************************************************************" + local unknown_dir=${OLD_DIST_CONFIG_DIR}/UNKNOWN + if [ -d ${unknown_dir} ] && [ -n "$(ls ${unknown_dir})" ]; then + echo "There are some UNKNOWN level's new configs." + echo "" + ls ${unknown_dir} + echo "" + echo "Need to classify above configs manually !!!" + echo "See: ${unknown_dir}" + echo "HINT: \`make dist-configs-move\` can help you." + echo "eg: make dist-configs-move C=CONFIG_CAN* L=L2" + else + echo "" + echo "Congratulations, all configs has a determined level." + echo "**DO NOT FORGET** to add changelogs if any config is changed" + rm -rf ${BACKUP_CONFIG_DIR} + fi + echo "" + echo "******************************************************************************" + echo "" +} + +prepare_env +if [ -z "$DO_IMPORT_CONFIGS" ]; then + generate_configs +fi +split_new_configs +replace_with_new_configs +check_configs -- Gitee From 42a399530d759d810f7c9219c96ff1c346a187fb Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Wed, 21 Aug 2024 14:10:24 +0800 Subject: [PATCH 1182/2138] anolis: configs: break configs into kconfig baseline ANBZ: #8678 This commit do follow things: 1. breaks arch/{x86,arm64}/configs/anolis_defconfig into kconfig baseline. 2. copy custom-overrides/ and metadata/ from devel-5.10, based on commit b44fa71e0b7e9 ("anolis: configs: adjust the collapse rules") NOTICE: NO FUNCTIONAL CHANGE for anolis_defconfig. Signed-off-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/3750 --- .../arm64/CONFIG_ARCH_MMAP_RND_COMPAT_BITS | 1 + .../L0-MANDATORY/arm64/CONFIG_ARCH_PHYTIUM | 1 + .../configs/L0-MANDATORY/arm64/CONFIG_ARM64 | 1 + .../L0-MANDATORY/arm64/CONFIG_ARM64_16K_PAGES | 1 + .../L0-MANDATORY/arm64/CONFIG_ARM64_4K_PAGES | 1 + .../L0-MANDATORY/arm64/CONFIG_ARM64_64K_PAGES | 1 + .../L0-MANDATORY/arm64/CONFIG_ARM64_BTI | 1 + .../L0-MANDATORY/arm64/CONFIG_ARM64_CNP | 1 + .../L0-MANDATORY/arm64/CONFIG_ARM64_E0PD | 1 + .../L0-MANDATORY/arm64/CONFIG_ARM64_HW_AFDBM | 1 + .../L0-MANDATORY/arm64/CONFIG_ARM64_MPAM | 1 + .../L0-MANDATORY/arm64/CONFIG_ARM64_MTE | 1 + .../L0-MANDATORY/arm64/CONFIG_ARM64_PAN | 1 + .../arm64/CONFIG_ARM64_PSEUDO_NMI | 1 + .../L0-MANDATORY/arm64/CONFIG_ARM64_PTR_AUTH | 1 + .../L0-MANDATORY/arm64/CONFIG_ARM64_RAS_EXTN | 1 + .../L0-MANDATORY/arm64/CONFIG_ARM64_SVE | 1 + .../L0-MANDATORY/arm64/CONFIG_ARM64_TLB_RANGE | 1 + .../arm64/CONFIG_ARM64_USE_LSE_ATOMICS | 1 + .../arm64/CONFIG_ARM64_VA_BITS_39 | 1 + .../configs/L0-MANDATORY/arm64/CONFIG_ARM_CCN | 1 + .../configs/L0-MANDATORY/arm64/CONFIG_ARM_CMN | 1 + .../L0-MANDATORY/arm64/CONFIG_ARM_CPU_RESCTRL | 1 + .../configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC | 1 + .../arm64/CONFIG_ARM_GIC_PHYTIUM_2500 | 1 + .../L0-MANDATORY/arm64/CONFIG_ARM_GIC_V2M | 1 + .../L0-MANDATORY/arm64/CONFIG_ARM_GIC_V3 | 1 + .../L0-MANDATORY/arm64/CONFIG_ARM_GIC_V3_ITS | 1 + .../arm64/CONFIG_ARM_GIC_V3_ITS_PCI | 1 + .../configs/L0-MANDATORY/arm64/CONFIG_ARM_PMU | 1 + .../L0-MANDATORY/arm64/CONFIG_ARM_PMU_ACPI | 1 + .../L0-MANDATORY/arm64/CONFIG_ARM_SMMU | 1 + .../L0-MANDATORY/arm64/CONFIG_ARM_SMMU_V3 | 1 + .../L0-MANDATORY/arm64/CONFIG_ARM_SMMU_V3_PMU | 1 + .../L0-MANDATORY/arm64/CONFIG_ARM_SPE_PMU | 1 + .../L0-MANDATORY/arm64/CONFIG_CORESIGHT | 1 + .../arm64/CONFIG_CPU_LITTLE_ENDIAN | 1 + .../arm64/CONFIG_DEFERRED_STRUCT_PAGE_INIT | 1 + .../configs/L0-MANDATORY/arm64/CONFIG_EXT4_FS | 1 + .../configs/L0-MANDATORY/arm64/CONFIG_EXTCON | 1 + anolis/configs/L0-MANDATORY/arm64/CONFIG_HZ | 1 + .../configs/L0-MANDATORY/arm64/CONFIG_HZ_1000 | 1 + .../configs/L0-MANDATORY/arm64/CONFIG_HZ_250 | 1 + .../arm64/CONFIG_IRQ_TIME_ACCOUNTING | 1 + anolis/configs/L0-MANDATORY/arm64/CONFIG_JBD2 | 1 + anolis/configs/L0-MANDATORY/arm64/CONFIG_KVM | 1 + .../CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY | 1 + .../arm64/CONFIG_PCI_HOST_GENERIC | 1 + .../L0-MANDATORY/arm64/CONFIG_PCI_PF_STUB | 1 + .../L0-MANDATORY/arm64/CONFIG_PREEMPT_NONE | 1 + .../arm64/CONFIG_PREEMPT_VOLUNTARY | 1 + .../L0-MANDATORY/arm64/CONFIG_PWM_ATMEL_TCB | 1 + .../L0-MANDATORY/arm64/CONFIG_PWM_XILINX | 1 + .../L0-MANDATORY/arm64/CONFIG_QCOM_GPI_DMA | 1 + .../L0-MANDATORY/arm64/CONFIG_QCOM_ICC_BWMON | 1 + .../L0-MANDATORY/arm64/CONFIG_QCOM_LMH | 1 + .../L0-MANDATORY/arm64/CONFIG_QCOM_MPM | 1 + .../L0-MANDATORY/arm64/CONFIG_QCOM_RAMP_CTRL | 1 + .../arm64/CONFIG_QCOM_RPM_MASTER_STATS | 1 + .../L0-MANDATORY/arm64/CONFIG_QCOM_SCM | 1 + .../CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT | 1 + .../L0-MANDATORY/arm64/CONFIG_QCOM_SPM | 1 + .../arm64/CONFIG_QCOM_SSC_BLOCK_BUS | 1 + .../arm64/CONFIG_UNMAP_KERNEL_AT_EL0 | 1 + .../configs/L0-MANDATORY/arm64/CONFIG_VIRTIO | 1 + .../L0-MANDATORY/arm64/CONFIG_VIRTIO_BLK | 1 + .../L0-MANDATORY/arm64/CONFIG_VIRTIO_PCI | 1 + .../configs/L0-MANDATORY/default/CONFIG_64BIT | 1 + .../configs/L0-MANDATORY/default/CONFIG_ACPI | 1 + .../L0-MANDATORY/default/CONFIG_ACPI_APEI | 1 + .../L0-MANDATORY/default/CONFIG_ACPI_IPMI | 1 + .../L0-MANDATORY/default/CONFIG_ACPI_NUMA | 1 + .../default/CONFIG_ACPI_PROCESSOR | 1 + .../default/CONFIG_ADVISE_SYSCALLS | 1 + .../configs/L0-MANDATORY/default/CONFIG_AIO | 1 + .../default/CONFIG_ALLOW_DEV_COREDUMP | 1 + .../default/CONFIG_ASYMMETRIC_KEY_TYPE | 1 + .../CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE | 1 + .../configs/L0-MANDATORY/default/CONFIG_AUDIT | 1 + .../L0-MANDATORY/default/CONFIG_AUTOFS_FS | 1 + .../L0-MANDATORY/default/CONFIG_AUXILIARY_BUS | 1 + .../L0-MANDATORY/default/CONFIG_BASE_FULL | 1 + .../default/CONFIG_BFQ_GROUP_IOSCHED | 1 + .../L0-MANDATORY/default/CONFIG_BINFMT_ELF | 1 + .../L0-MANDATORY/default/CONFIG_BINFMT_SCRIPT | 1 + .../L0-MANDATORY/default/CONFIG_BLK_CGROUP | 1 + .../default/CONFIG_BLK_CGROUP_IOCOST | 1 + .../L0-MANDATORY/default/CONFIG_BLK_DEBUG_FS | 1 + .../L0-MANDATORY/default/CONFIG_BLK_DEV | 1 + .../default/CONFIG_BLK_DEV_INITRD | 1 + .../default/CONFIG_BLK_DEV_IO_TRACE | 1 + .../L0-MANDATORY/default/CONFIG_BLK_DEV_NVME | 1 + .../default/CONFIG_BLK_DEV_THROTTLING | 1 + .../L0-MANDATORY/default/CONFIG_BLK_MQ_PCI | 1 + .../L0-MANDATORY/default/CONFIG_BLK_MQ_VIRTIO | 1 + .../default/CONFIG_BLK_RQ_ALLOC_TIME | 1 + .../configs/L0-MANDATORY/default/CONFIG_BLOCK | 1 + .../L0-MANDATORY/default/CONFIG_BONDING | 1 + .../configs/L0-MANDATORY/default/CONFIG_BPF | 1 + .../L0-MANDATORY/default/CONFIG_BPF_EVENTS | 1 + .../L0-MANDATORY/default/CONFIG_BPF_JIT | 1 + .../L0-MANDATORY/default/CONFIG_BPF_LSM | 1 + .../L0-MANDATORY/default/CONFIG_BPF_SYSCALL | 1 + .../default/CONFIG_BPF_UNPRIV_DEFAULT_OFF | 1 + .../L0-MANDATORY/default/CONFIG_BRIDGE | 1 + .../configs/L0-MANDATORY/default/CONFIG_BUG | 1 + .../L0-MANDATORY/default/CONFIG_CACHEFILES | 1 + .../default/CONFIG_CACHEFILES_ONDEMAND | 1 + .../default/CONFIG_CACHESTAT_SYSCALL | 1 + .../CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE | 1 + .../default/CONFIG_CC_OPTIMIZE_FOR_SIZE | 1 + .../L0-MANDATORY/default/CONFIG_CFS_BANDWIDTH | 1 + .../L0-MANDATORY/default/CONFIG_CGROUPS | 1 + .../L0-MANDATORY/default/CONFIG_CGROUP_BPF | 1 + .../default/CONFIG_CGROUP_CPUACCT | 1 + .../L0-MANDATORY/default/CONFIG_CGROUP_DEVICE | 1 + .../default/CONFIG_CGROUP_HUGETLB | 1 + .../L0-MANDATORY/default/CONFIG_CGROUP_PERF | 1 + .../L0-MANDATORY/default/CONFIG_CGROUP_PIDS | 1 + .../L0-MANDATORY/default/CONFIG_CGROUP_RDMA | 1 + .../L0-MANDATORY/default/CONFIG_CGROUP_SCHED | 1 + .../default/CONFIG_CHECKPOINT_RESTORE | 1 + .../default/CONFIG_CK_KABI_RESERVE | 1 + .../default/CONFIG_CK_KABI_SIZE_ALIGN_CHECKS | 1 + .../L0-MANDATORY/default/CONFIG_COMMON_CLK | 1 + .../L0-MANDATORY/default/CONFIG_COMPACTION | 1 + .../L0-MANDATORY/default/CONFIG_CONFIGFS_FS | 1 + .../default/CONFIG_CONSOLE_TRANSLATIONS | 1 + .../L0-MANDATORY/default/CONFIG_COREDUMP | 1 + .../L0-MANDATORY/default/CONFIG_CPUSETS | 1 + .../L0-MANDATORY/default/CONFIG_CPU_FREQ | 1 + .../L0-MANDATORY/default/CONFIG_CPU_IDLE | 1 + .../L0-MANDATORY/default/CONFIG_CPU_ISOLATION | 1 + .../L0-MANDATORY/default/CONFIG_CRAMFS | 1 + .../L0-MANDATORY/default/CONFIG_CRASH_CORE | 1 + .../L0-MANDATORY/default/CONFIG_CRASH_DUMP | 1 + .../L0-MANDATORY/default/CONFIG_CRYPTO | 1 + .../L0-MANDATORY/default/CONFIG_CRYPTO_AEAD | 1 + .../L0-MANDATORY/default/CONFIG_CRYPTO_AEAD2 | 1 + .../L0-MANDATORY/default/CONFIG_CRYPTO_AES | 1 + .../default/CONFIG_CRYPTO_AKCIPHER | 1 + .../default/CONFIG_CRYPTO_AKCIPHER2 | 1 + .../L0-MANDATORY/default/CONFIG_CRYPTO_ALGAPI | 1 + .../default/CONFIG_CRYPTO_ALGAPI2 | 1 + .../L0-MANDATORY/default/CONFIG_CRYPTO_GCM | 1 + .../L0-MANDATORY/default/CONFIG_CRYPTO_GHASH | 1 + .../L0-MANDATORY/default/CONFIG_CRYPTO_HASH | 1 + .../L0-MANDATORY/default/CONFIG_CRYPTO_HASH2 | 1 + .../default/CONFIG_CRYPTO_LIB_AES | 1 + .../default/CONFIG_CRYPTO_LIB_SHA256 | 1 + .../default/CONFIG_CRYPTO_MANAGER | 1 + .../default/CONFIG_CRYPTO_MANAGER2 | 1 + .../L0-MANDATORY/default/CONFIG_CRYPTO_RNG | 1 + .../L0-MANDATORY/default/CONFIG_CRYPTO_RNG2 | 1 + .../L0-MANDATORY/default/CONFIG_CRYPTO_RSA | 1 + .../L0-MANDATORY/default/CONFIG_CRYPTO_SHA256 | 1 + .../default/CONFIG_CRYPTO_SKCIPHER | 1 + .../default/CONFIG_CRYPTO_SKCIPHER2 | 1 + .../L0-MANDATORY/default/CONFIG_CRYPTO_SM2 | 1 + .../L0-MANDATORY/default/CONFIG_CRYPTO_SM3 | 1 + .../default/CONFIG_CRYPTO_SM3_GENERIC | 1 + .../L0-MANDATORY/default/CONFIG_CRYPTO_SM4 | 1 + .../default/CONFIG_CRYPTO_SM4_GENERIC | 1 + .../L0-MANDATORY/default/CONFIG_CXL_BUS | 1 + .../configs/L0-MANDATORY/default/CONFIG_DAX | 1 + .../default/CONFIG_DEBUG_BUGVERBOSE | 1 + .../L0-MANDATORY/default/CONFIG_DEBUG_FS | 1 + .../L0-MANDATORY/default/CONFIG_DEBUG_INFO | 1 + .../default/CONFIG_DEBUG_INFO_BTF | 1 + .../default/CONFIG_DEBUG_INFO_DWARF4 | 1 + .../L0-MANDATORY/default/CONFIG_DEBUG_KERNEL | 1 + .../L0-MANDATORY/default/CONFIG_DEBUG_MISC | 1 + .../default/CONFIG_DEBUG_SECTION_MISMATCH | 1 + .../L0-MANDATORY/default/CONFIG_DEFAULT_CUBIC | 1 + .../default/CONFIG_DEFAULT_SECURITY_DAC | 1 + .../default/CONFIG_DETECT_HUNG_TASK | 1 + .../L0-MANDATORY/default/CONFIG_DEVTMPFS | 1 + .../default/CONFIG_DEVTMPFS_MOUNT | 1 + .../L0-MANDATORY/default/CONFIG_DMADEVICES | 1 + .../configs/L0-MANDATORY/default/CONFIG_DMI | 1 + .../L0-MANDATORY/default/CONFIG_DNOTIFY | 1 + .../L0-MANDATORY/default/CONFIG_DNS_RESOLVER | 1 + .../configs/L0-MANDATORY/default/CONFIG_DRM | 1 + .../L0-MANDATORY/default/CONFIG_DYNAMIC_DEBUG | 1 + .../default/CONFIG_DYNAMIC_DEBUG_CORE | 1 + .../default/CONFIG_DYNAMIC_FTRACE | 1 + .../configs/L0-MANDATORY/default/CONFIG_EDAC | 1 + .../configs/L0-MANDATORY/default/CONFIG_EFI | 1 + .../L0-MANDATORY/default/CONFIG_ELF_CORE | 1 + .../configs/L0-MANDATORY/default/CONFIG_EPOLL | 1 + .../L0-MANDATORY/default/CONFIG_EROFS_FS | 1 + .../default/CONFIG_ETHTOOL_NETLINK | 1 + .../L0-MANDATORY/default/CONFIG_EVENTFD | 1 + .../configs/L0-MANDATORY/default/CONFIG_EVM | 1 + .../L0-MANDATORY/default/CONFIG_EXPERT | 1 + .../L0-MANDATORY/default/CONFIG_EXT3_FS | 1 + .../default/CONFIG_EXT4_FS_POSIX_ACL | 1 + .../default/CONFIG_EXT4_FS_SECURITY | 1 + .../default/CONFIG_FAIR_GROUP_SCHED | 1 + .../L0-MANDATORY/default/CONFIG_FANOTIFY | 1 + .../L0-MANDATORY/default/CONFIG_FAT_FS | 1 + anolis/configs/L0-MANDATORY/default/CONFIG_FB | 1 + .../L0-MANDATORY/default/CONFIG_FHANDLE | 1 + .../L0-MANDATORY/default/CONFIG_FILE_LOCKING | 1 + .../default/CONFIG_FRAMEBUFFER_CONSOLE | 1 + .../L0-MANDATORY/default/CONFIG_FSCACHE | 1 + .../L0-MANDATORY/default/CONFIG_FSNOTIFY | 1 + .../L0-MANDATORY/default/CONFIG_FS_DAX | 1 + .../L0-MANDATORY/default/CONFIG_FTRACE | 1 + .../default/CONFIG_FTRACE_SYSCALLS | 1 + .../default/CONFIG_FUNCTION_GRAPH_TRACER | 1 + .../default/CONFIG_FUNCTION_TRACER | 1 + .../L0-MANDATORY/default/CONFIG_FUSE_FS | 1 + .../configs/L0-MANDATORY/default/CONFIG_FUTEX | 1 + .../L0-MANDATORY/default/CONFIG_FW_LOADER | 1 + .../default/CONFIG_GENERIC_GETTIMEOFDAY | 1 + .../default/CONFIG_GENERIC_VDSO_TIME_NS | 1 + .../default/CONFIG_HARDLOCKUP_DETECTOR | 1 + .../configs/L0-MANDATORY/default/CONFIG_HDMI | 1 + .../default/CONFIG_HIGH_RES_TIMERS | 1 + .../L0-MANDATORY/default/CONFIG_HOTPLUG_CPU | 1 + .../L0-MANDATORY/default/CONFIG_HOTPLUG_PCI | 1 + .../default/CONFIG_HOTPLUG_PCI_PCIE | 1 + .../L0-MANDATORY/default/CONFIG_HUGETLBFS | 1 + .../L0-MANDATORY/default/CONFIG_HUGETLB_PAGE | 1 + .../configs/L0-MANDATORY/default/CONFIG_HWMON | 1 + .../L0-MANDATORY/default/CONFIG_HW_RANDOM | 1 + .../L0-MANDATORY/default/CONFIG_HZ_100 | 1 + .../L0-MANDATORY/default/CONFIG_HZ_300 | 1 + .../L0-MANDATORY/default/CONFIG_HZ_PERIODIC | 1 + .../configs/L0-MANDATORY/default/CONFIG_I2C | 1 + .../configs/L0-MANDATORY/default/CONFIG_I40E | 1 + .../L0-MANDATORY/default/CONFIG_I40EVF | 1 + .../configs/L0-MANDATORY/default/CONFIG_ICE | 1 + .../default/CONFIG_IDLE_PAGE_TRACKING | 1 + .../configs/L0-MANDATORY/default/CONFIG_IGB | 1 + .../configs/L0-MANDATORY/default/CONFIG_IGBVF | 1 + .../configs/L0-MANDATORY/default/CONFIG_IMA | 1 + .../configs/L0-MANDATORY/default/CONFIG_INET | 1 + .../L0-MANDATORY/default/CONFIG_INET_DIAG | 1 + .../default/CONFIG_INET_MPTCP_DIAG | 1 + .../L0-MANDATORY/default/CONFIG_INET_TCP_DIAG | 1 + .../L0-MANDATORY/default/CONFIG_INET_UDP_DIAG | 1 + .../L0-MANDATORY/default/CONFIG_INFINIBAND | 1 + .../configs/L0-MANDATORY/default/CONFIG_INPUT | 1 + .../default/CONFIG_INPUT_KEYBOARD | 1 + .../L0-MANDATORY/default/CONFIG_INPUT_MOUSE | 1 + .../L0-MANDATORY/default/CONFIG_INTEGRITY | 1 + .../L0-MANDATORY/default/CONFIG_IOMMU_SUPPORT | 1 + .../L0-MANDATORY/default/CONFIG_IOSCHED_BFQ | 1 + .../L0-MANDATORY/default/CONFIG_IO_URING | 1 + .../L0-MANDATORY/default/CONFIG_IPC_NS | 1 + .../L0-MANDATORY/default/CONFIG_IPMI_HANDLER | 1 + .../configs/L0-MANDATORY/default/CONFIG_IPV6 | 1 + .../default/CONFIG_IP_NF_ARPTABLES | 1 + .../L0-MANDATORY/default/CONFIG_IP_NF_RAW | 1 + .../default/CONFIG_IP_NF_SECURITY | 1 + .../L0-MANDATORY/default/CONFIG_IP_SET | 1 + .../configs/L0-MANDATORY/default/CONFIG_IP_VS | 1 + .../L0-MANDATORY/default/CONFIG_IP_VS_IPV6 | 1 + .../L0-MANDATORY/default/CONFIG_ISO9660_FS | 1 + .../configs/L0-MANDATORY/default/CONFIG_IXGBE | 1 + .../L0-MANDATORY/default/CONFIG_IXGBEVF | 1 + .../L0-MANDATORY/default/CONFIG_JUMP_LABEL | 1 + .../L0-MANDATORY/default/CONFIG_KALLSYMS | 1 + .../L0-MANDATORY/default/CONFIG_KALLSYMS_ALL | 1 + .../configs/L0-MANDATORY/default/CONFIG_KCMP | 1 + .../L0-MANDATORY/default/CONFIG_KERNFS | 1 + .../configs/L0-MANDATORY/default/CONFIG_KEXEC | 1 + .../L0-MANDATORY/default/CONFIG_KEXEC_CORE | 1 + .../L0-MANDATORY/default/CONFIG_KEXEC_FILE | 1 + .../L0-MANDATORY/default/CONFIG_KFENCE | 1 + .../configs/L0-MANDATORY/default/CONFIG_KGDB | 1 + .../default/CONFIG_KGDB_SERIAL_CONSOLE | 1 + .../default/CONFIG_KGDB_TESTS_ON_BOOT | 1 + .../L0-MANDATORY/default/CONFIG_KPROBES | 1 + .../L0-MANDATORY/default/CONFIG_KPROBE_EVENTS | 1 + .../L0-MANDATORY/default/CONFIG_KRETPROBES | 1 + .../configs/L0-MANDATORY/default/CONFIG_LOCKD | 1 + .../L0-MANDATORY/default/CONFIG_LOCKD_V4 | 1 + .../default/CONFIG_LOCKUP_DETECTOR | 1 + .../L0-MANDATORY/default/CONFIG_LRU_GEN | 1 + .../L0-MANDATORY/default/CONFIG_MAGIC_SYSRQ | 1 + anolis/configs/L0-MANDATORY/default/CONFIG_MD | 1 + .../L0-MANDATORY/default/CONFIG_MEMBARRIER | 1 + .../configs/L0-MANDATORY/default/CONFIG_MEMCG | 1 + .../default/CONFIG_MEMORY_FAILURE | 1 + .../default/CONFIG_MEMORY_HOTPLUG | 1 + .../CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE | 1 + .../L0-MANDATORY/default/CONFIG_MIGRATION | 1 + .../default/CONFIG_MISC_FILESYSTEMS | 1 + .../L0-MANDATORY/default/CONFIG_MLX4_EN | 1 + .../L0-MANDATORY/default/CONFIG_MLX5_CORE | 1 + .../L0-MANDATORY/default/CONFIG_MLX5_CORE_EN | 1 + .../L0-MANDATORY/default/CONFIG_MLXSW_CORE | 1 + .../configs/L0-MANDATORY/default/CONFIG_MMU | 1 + .../L0-MANDATORY/default/CONFIG_MODPROBE_PATH | 1 + .../L0-MANDATORY/default/CONFIG_MODULES | 1 + .../L0-MANDATORY/default/CONFIG_MODULE_SIG | 1 + .../default/CONFIG_MODULE_SRCVERSION_ALL | 1 + .../L0-MANDATORY/default/CONFIG_MODULE_UNLOAD | 1 + .../L0-MANDATORY/default/CONFIG_MODVERSIONS | 1 + .../configs/L0-MANDATORY/default/CONFIG_MPTCP | 1 + .../default/CONFIG_MQ_IOSCHED_DEADLINE | 1 + .../default/CONFIG_MQ_IOSCHED_KYBER | 1 + .../L0-MANDATORY/default/CONFIG_MULTIUSER | 1 + .../L0-MANDATORY/default/CONFIG_NAMESPACES | 1 + .../configs/L0-MANDATORY/default/CONFIG_NET | 1 + .../L0-MANDATORY/default/CONFIG_NETDEVICES | 1 + .../L0-MANDATORY/default/CONFIG_NETFILTER | 1 + .../default/CONFIG_NETFILTER_ADVANCED | 1 + .../default/CONFIG_NETFILTER_INGRESS | 1 + .../L0-MANDATORY/default/CONFIG_NETLINK_DIAG | 1 + .../default/CONFIG_NETWORK_FILESYSTEMS | 1 + .../L0-MANDATORY/default/CONFIG_NET_ACT_GACT | 1 + .../default/CONFIG_NET_ACT_POLICE | 1 + .../L0-MANDATORY/default/CONFIG_NET_CLS | 1 + .../L0-MANDATORY/default/CONFIG_NET_CLS_ACT | 1 + .../L0-MANDATORY/default/CONFIG_NET_CORE | 1 + .../L0-MANDATORY/default/CONFIG_NET_FAILOVER | 1 + .../L0-MANDATORY/default/CONFIG_NET_KEY | 1 + .../L0-MANDATORY/default/CONFIG_NET_NS | 1 + .../L0-MANDATORY/default/CONFIG_NET_SCHED | 1 + .../default/CONFIG_NET_SCH_FQ_CODEL | 1 + .../default/CONFIG_NET_SCH_INGRESS | 1 + .../default/CONFIG_NET_VENDOR_BROADCOM | 1 + .../default/CONFIG_NET_VENDOR_INTEL | 1 + .../default/CONFIG_NET_VENDOR_MELLANOX | 1 + .../default/CONFIG_NET_VENDOR_WANGXUN | 1 + .../configs/L0-MANDATORY/default/CONFIG_NFSD | 1 + .../L0-MANDATORY/default/CONFIG_NFSD_V4 | 1 + .../L0-MANDATORY/default/CONFIG_NFS_COMMON | 1 + .../L0-MANDATORY/default/CONFIG_NFS_FS | 1 + .../L0-MANDATORY/default/CONFIG_NFS_FSCACHE | 1 + .../L0-MANDATORY/default/CONFIG_NFS_V3 | 1 + .../L0-MANDATORY/default/CONFIG_NFS_V4 | 1 + .../L0-MANDATORY/default/CONFIG_NFS_V4_1 | 1 + .../L0-MANDATORY/default/CONFIG_NFS_V4_2 | 1 + .../L0-MANDATORY/default/CONFIG_NF_CONNTRACK | 1 + .../L0-MANDATORY/default/CONFIG_NF_NAT | 1 + .../L0-MANDATORY/default/CONFIG_NF_TABLES | 1 + .../default/CONFIG_NF_TABLES_INET | 1 + .../default/CONFIG_NF_TABLES_IPV4 | 1 + .../default/CONFIG_NF_TABLES_IPV6 | 1 + .../configs/L0-MANDATORY/default/CONFIG_NGBE | 1 + .../configs/L0-MANDATORY/default/CONFIG_NLS | 1 + .../L0-MANDATORY/default/CONFIG_NLS_ASCII | 1 + .../default/CONFIG_NLS_CODEPAGE_936 | 1 + .../default/CONFIG_NLS_CODEPAGE_950 | 1 + .../L0-MANDATORY/default/CONFIG_NLS_DEFAULT | 1 + .../L0-MANDATORY/default/CONFIG_NLS_UTF8 | 1 + .../configs/L0-MANDATORY/default/CONFIG_NO_HZ | 1 + .../L0-MANDATORY/default/CONFIG_NO_HZ_FULL | 1 + .../L0-MANDATORY/default/CONFIG_NO_HZ_IDLE | 1 + .../L0-MANDATORY/default/CONFIG_NR_CPUS | 1 + .../L0-MANDATORY/default/CONFIG_NTFS_FS | 1 + .../configs/L0-MANDATORY/default/CONFIG_NUMA | 1 + .../L0-MANDATORY/default/CONFIG_NVME_CORE | 1 + .../L0-MANDATORY/default/CONFIG_NVME_FABRICS | 1 + .../L0-MANDATORY/default/CONFIG_NVME_RDMA | 1 + .../L0-MANDATORY/default/CONFIG_NVME_TCP | 1 + .../L0-MANDATORY/default/CONFIG_OVERLAY_FS | 1 + .../L0-MANDATORY/default/CONFIG_PACKET | 1 + .../default/CONFIG_PAGE_IDLE_FLAG | 1 + .../L0-MANDATORY/default/CONFIG_PANIC_ON_OOPS | 1 + .../L0-MANDATORY/default/CONFIG_PANIC_TIMEOUT | 1 + .../L0-MANDATORY/default/CONFIG_PARAVIRT | 1 + .../default/CONFIG_PARTITION_ADVANCED | 1 + .../configs/L0-MANDATORY/default/CONFIG_PCI | 1 + .../L0-MANDATORY/default/CONFIG_PCIEPORTBUS | 1 + .../L0-MANDATORY/default/CONFIG_PCI_IOV | 1 + .../L0-MANDATORY/default/CONFIG_PCI_MSI | 1 + .../L0-MANDATORY/default/CONFIG_PCI_STUB | 1 + .../L0-MANDATORY/default/CONFIG_PERF_EVENTS | 1 + .../default/CONFIG_PGTABLE_LEVELS | 1 + .../L0-MANDATORY/default/CONFIG_PID_NS | 1 + .../default/CONFIG_PKCS7_MESSAGE_PARSER | 1 + anolis/configs/L0-MANDATORY/default/CONFIG_PM | 1 + .../L0-MANDATORY/default/CONFIG_POSIX_TIMERS | 1 + .../L0-MANDATORY/default/CONFIG_PREEMPT | 1 + .../L0-MANDATORY/default/CONFIG_PREEMPTION | 1 + .../L0-MANDATORY/default/CONFIG_PREEMPT_BUILD | 1 + .../default/CONFIG_PREEMPT_DYNAMIC | 1 + .../L0-MANDATORY/default/CONFIG_PREEMPT_RCU | 1 + .../default/CONFIG_PREEMPT_TRACER | 1 + .../L0-MANDATORY/default/CONFIG_PRINTK | 1 + .../L0-MANDATORY/default/CONFIG_PRINTK_INDEX | 1 + .../L0-MANDATORY/default/CONFIG_PRINTK_TIME | 1 + .../default/CONFIG_PROBE_EVENTS_BTF_ARGS | 1 + .../L0-MANDATORY/default/CONFIG_PROC_FS | 1 + .../L0-MANDATORY/default/CONFIG_PROC_KCORE | 1 + .../default/CONFIG_PROC_PAGE_MONITOR | 1 + .../L0-MANDATORY/default/CONFIG_PROC_SYSCTL | 1 + .../L0-MANDATORY/default/CONFIG_PROC_VMCORE | 1 + .../default/CONFIG_PSE_CONTROLLER | 1 + .../L0-MANDATORY/default/CONFIG_PSTORE_BLK | 1 + .../default/CONFIG_PSTORE_DEFAULT_KMSG_BYTES | 1 + .../default/CONFIG_PTP_1588_CLOCK_MOCK | 1 + .../default/CONFIG_PTP_1588_CLOCK_OCP | 1 + .../default/CONFIG_PTP_1588_CLOCK_OPTIONAL | 1 + .../L0-MANDATORY/default/CONFIG_PVPANIC | 1 + .../L0-MANDATORY/default/CONFIG_PVPANIC_MMIO | 1 + .../L0-MANDATORY/default/CONFIG_PVPANIC_PCI | 1 + .../L0-MANDATORY/default/CONFIG_PWM_CLK | 1 + .../L0-MANDATORY/default/CONFIG_PWM_DWC | 1 + .../configs/L0-MANDATORY/default/CONFIG_QUOTA | 1 + .../default/CONFIG_RANDOMIZE_BASE | 1 + .../default/CONFIG_RANDSTRUCT_FULL | 1 + .../default/CONFIG_RANDSTRUCT_NONE | 1 + .../default/CONFIG_RANDSTRUCT_PERFORMANCE | 1 + .../configs/L0-MANDATORY/default/CONFIG_RAS | 1 + .../configs/L0-MANDATORY/default/CONFIG_RELAY | 1 + .../L0-MANDATORY/default/CONFIG_RELOCATABLE | 1 + .../L0-MANDATORY/default/CONFIG_RESCTRL_FS | 1 + .../default/CONFIG_RESET_CONTROLLER | 1 + .../configs/L0-MANDATORY/default/CONFIG_RPS | 1 + .../configs/L0-MANDATORY/default/CONFIG_RSEQ | 1 + .../L0-MANDATORY/default/CONFIG_RTC_CLASS | 1 + .../L0-MANDATORY/default/CONFIG_SCHEDSTATS | 1 + .../default/CONFIG_SCHED_AUTOGROUP | 1 + .../L0-MANDATORY/default/CONFIG_SCHED_CORE | 1 + .../L0-MANDATORY/default/CONFIG_SCHED_DEBUG | 1 + .../L0-MANDATORY/default/CONFIG_SCHED_MC | 1 + .../configs/L0-MANDATORY/default/CONFIG_SCSI | 1 + .../L0-MANDATORY/default/CONFIG_SECCOMP | 1 + .../default/CONFIG_SECCOMP_FILTER | 1 + .../default/CONFIG_SECONDARY_TRUSTED_KEYRING | 1 + .../L0-MANDATORY/default/CONFIG_SECURITY | 1 + .../L0-MANDATORY/default/CONFIG_SECURITYFS | 1 + .../default/CONFIG_SECURITY_INFINIBAND | 1 + .../default/CONFIG_SECURITY_NETWORK | 1 + .../default/CONFIG_SECURITY_NETWORK_XFRM | 1 + .../L0-MANDATORY/default/CONFIG_SECURITY_PATH | 1 + .../default/CONFIG_SECURITY_SELINUX | 1 + .../default/CONFIG_SECURITY_SELINUX_BOOTPARAM | 1 + .../configs/L0-MANDATORY/default/CONFIG_SERIO | 1 + .../configs/L0-MANDATORY/default/CONFIG_SHMEM | 1 + .../L0-MANDATORY/default/CONFIG_SIGNALFD | 1 + .../configs/L0-MANDATORY/default/CONFIG_SLUB | 1 + .../default/CONFIG_SLUB_CPU_PARTIAL | 1 + .../L0-MANDATORY/default/CONFIG_SLUB_DEBUG | 1 + .../L0-MANDATORY/default/CONFIG_SLUB_DEBUG_ON | 1 + .../L0-MANDATORY/default/CONFIG_SLUB_STATS | 1 + .../configs/L0-MANDATORY/default/CONFIG_SMC | 1 + .../configs/L0-MANDATORY/default/CONFIG_SMP | 1 + .../default/CONFIG_SOFTLOCKUP_DETECTOR | 1 + .../L0-MANDATORY/default/CONFIG_SPARSEMEM | 1 + .../default/CONFIG_SPARSEMEM_VMEMMAP | 1 + .../configs/L0-MANDATORY/default/CONFIG_SPI | 1 + .../L0-MANDATORY/default/CONFIG_SQUASHFS | 1 + .../L0-MANDATORY/default/CONFIG_STACKTRACE | 1 + .../configs/L0-MANDATORY/default/CONFIG_STM | 1 + .../L0-MANDATORY/default/CONFIG_STREAM_PARSER | 1 + .../default/CONFIG_STRICT_KERNEL_RWX | 1 + .../default/CONFIG_STRICT_MODULE_RWX | 1 + .../L0-MANDATORY/default/CONFIG_SUNRPC | 1 + .../configs/L0-MANDATORY/default/CONFIG_SWAP | 1 + .../L0-MANDATORY/default/CONFIG_SYN_COOKIES | 1 + .../L0-MANDATORY/default/CONFIG_SYSCTL | 1 + .../configs/L0-MANDATORY/default/CONFIG_SYSFS | 1 + .../default/CONFIG_SYSTEM_TRUSTED_KEYRING | 1 + .../default/CONFIG_SYSTEM_TRUSTED_KEYS | 1 + .../L0-MANDATORY/default/CONFIG_SYSVIPC | 1 + .../configs/L0-MANDATORY/default/CONFIG_TAP | 1 + .../L0-MANDATORY/default/CONFIG_TARGET_CORE | 1 + .../L0-MANDATORY/default/CONFIG_TCG_TIS | 1 + .../L0-MANDATORY/default/CONFIG_TCG_TPM | 1 + .../default/CONFIG_TCP_CONG_ADVANCED | 1 + .../L0-MANDATORY/default/CONFIG_TCP_CONG_BBR | 1 + .../default/CONFIG_TCP_CONG_CUBIC | 1 + .../L0-MANDATORY/default/CONFIG_THERMAL | 1 + .../L0-MANDATORY/default/CONFIG_TIMERFD | 1 + .../configs/L0-MANDATORY/default/CONFIG_TLS | 1 + .../configs/L0-MANDATORY/default/CONFIG_TMPFS | 1 + .../L0-MANDATORY/default/CONFIG_TRACEPOINTS | 1 + .../default/CONFIG_TRANSPARENT_HUGEPAGE | 1 + .../configs/L0-MANDATORY/default/CONFIG_TTY | 1 + .../configs/L0-MANDATORY/default/CONFIG_TUN | 1 + .../configs/L0-MANDATORY/default/CONFIG_TXGBE | 1 + .../configs/L0-MANDATORY/default/CONFIG_UIO | 1 + .../configs/L0-MANDATORY/default/CONFIG_UNIX | 1 + .../L0-MANDATORY/default/CONFIG_UNIX98_PTYS | 1 + .../L0-MANDATORY/default/CONFIG_UPROBE_EVENTS | 1 + .../configs/L0-MANDATORY/default/CONFIG_USB | 1 + .../L0-MANDATORY/default/CONFIG_USB_SUPPORT | 1 + .../L0-MANDATORY/default/CONFIG_USERFAULTFD | 1 + .../L0-MANDATORY/default/CONFIG_UTS_NS | 1 + .../configs/L0-MANDATORY/default/CONFIG_VETH | 1 + .../L0-MANDATORY/default/CONFIG_VFAT_FS | 1 + .../configs/L0-MANDATORY/default/CONFIG_VFIO | 1 + .../L0-MANDATORY/default/CONFIG_VFIO_PCI | 1 + .../L0-MANDATORY/default/CONFIG_VGA_ARB | 1 + .../L0-MANDATORY/default/CONFIG_VHOST_NET | 1 + .../L0-MANDATORY/default/CONFIG_VHOST_VSOCK | 1 + .../default/CONFIG_VIRTIO_BALLOON | 1 + .../default/CONFIG_VIRTIO_CONSOLE | 1 + .../L0-MANDATORY/default/CONFIG_VIRTIO_FS | 1 + .../L0-MANDATORY/default/CONFIG_VIRTIO_MEM | 1 + .../L0-MANDATORY/default/CONFIG_VIRTIO_MENU | 1 + .../L0-MANDATORY/default/CONFIG_VIRTIO_MMIO | 1 + .../L0-MANDATORY/default/CONFIG_VIRTIO_NET | 1 + .../L0-MANDATORY/default/CONFIG_VIRTIO_PMEM | 1 + .../default/CONFIG_VIRTUALIZATION | 1 + .../default/CONFIG_VIRT_CPU_ACCOUNTING_GEN | 1 + .../L0-MANDATORY/default/CONFIG_VMAP_STACK | 1 + .../default/CONFIG_VM_EVENT_COUNTERS | 1 + .../L0-MANDATORY/default/CONFIG_VSOCKETS | 1 + anolis/configs/L0-MANDATORY/default/CONFIG_VT | 1 + .../L0-MANDATORY/default/CONFIG_VT_CONSOLE | 1 + .../L0-MANDATORY/default/CONFIG_WATCHDOG | 1 + .../default/CONFIG_X509_CERTIFICATE_PARSER | 1 + .../L0-MANDATORY/default/CONFIG_XDP_SOCKETS | 1 + .../L0-MANDATORY/default/CONFIG_XFRM_USER | 1 + .../L0-MANDATORY/default/CONFIG_XFS_FS | 1 + .../configs/L0-MANDATORY/default/CONFIG_XPS | 1 + .../L0-MANDATORY/default/CONFIG_XZ_DEC | 1 + .../L0-MANDATORY/default/CONFIG_ZONE_DEVICE | 1 + .../L0-MANDATORY/default/CONFIG_ZONE_DMA | 1 + .../L0-MANDATORY/default/CONFIG_ZONE_DMA32 | 1 + .../configs/L0-MANDATORY/default/CONFIG_ZRAM | 1 + .../L0-MANDATORY/default/CONFIG_ZSTD_COMPRESS | 1 + .../default/CONFIG_ZSTD_DECOMPRESS | 1 + .../configs/L0-MANDATORY/x86/CONFIG_AMD_NUMA | 1 + .../x86/CONFIG_ARCH_MMAP_RND_COMPAT_BITS | 1 + .../L0-MANDATORY/x86/CONFIG_COMPAT_VDSO | 1 + .../L0-MANDATORY/x86/CONFIG_CPU_SUP_AMD | 1 + .../L0-MANDATORY/x86/CONFIG_CPU_SUP_HYGON | 1 + .../L0-MANDATORY/x86/CONFIG_CPU_SUP_INTEL | 1 + .../L0-MANDATORY/x86/CONFIG_CPU_SUP_ZHAOXIN | 1 + .../L0-MANDATORY/x86/CONFIG_CRYPTO_SIMD | 1 + .../x86/CONFIG_CRYPTO_SM3_AVX_X86_64 | 1 + .../x86/CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64 | 1 + .../x86/CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64 | 1 + .../x86/CONFIG_DEFERRED_STRUCT_PAGE_INIT | 1 + .../configs/L0-MANDATORY/x86/CONFIG_EXT4_FS | 1 + anolis/configs/L0-MANDATORY/x86/CONFIG_EXTCON | 1 + .../L0-MANDATORY/x86/CONFIG_HPET_TIMER | 1 + anolis/configs/L0-MANDATORY/x86/CONFIG_HZ | 1 + .../configs/L0-MANDATORY/x86/CONFIG_HZ_1000 | 1 + anolis/configs/L0-MANDATORY/x86/CONFIG_HZ_250 | 1 + .../L0-MANDATORY/x86/CONFIG_IA32_FEAT_CTL | 1 + .../x86/CONFIG_INSTRUCTION_DECODER | 1 + .../L0-MANDATORY/x86/CONFIG_INTEL_IDLE | 1 + .../L0-MANDATORY/x86/CONFIG_INTEL_IOMMU | 1 + .../L0-MANDATORY/x86/CONFIG_INTEL_IOMMU_SVM | 1 + .../L0-MANDATORY/x86/CONFIG_INTEL_TDX_GUEST | 1 + .../configs/L0-MANDATORY/x86/CONFIG_IRQ_REMAP | 1 + .../x86/CONFIG_IRQ_TIME_ACCOUNTING | 1 + anolis/configs/L0-MANDATORY/x86/CONFIG_JBD2 | 1 + .../L0-MANDATORY/x86/CONFIG_KPROBES_ON_FTRACE | 1 + anolis/configs/L0-MANDATORY/x86/CONFIG_KVM | 1 + .../configs/L0-MANDATORY/x86/CONFIG_KVM_AMD | 1 + .../configs/L0-MANDATORY/x86/CONFIG_KVM_GUEST | 1 + .../configs/L0-MANDATORY/x86/CONFIG_KVM_INTEL | 1 + .../configs/L0-MANDATORY/x86/CONFIG_LIVEPATCH | 1 + anolis/configs/L0-MANDATORY/x86/CONFIG_MTRR | 1 + .../configs/L0-MANDATORY/x86/CONFIG_OPTPROBES | 1 + .../x86/CONFIG_PAGE_TABLE_ISOLATION | 1 + .../L0-MANDATORY/x86/CONFIG_PCI_PF_STUB | 1 + .../L0-MANDATORY/x86/CONFIG_PHYSICAL_START | 1 + .../L0-MANDATORY/x86/CONFIG_PREEMPT_NONE | 1 + .../L0-MANDATORY/x86/CONFIG_PREEMPT_VOLUNTARY | 1 + .../L0-MANDATORY/x86/CONFIG_RANDOMIZE_MEMORY | 1 + .../configs/L0-MANDATORY/x86/CONFIG_RETHUNK | 1 + .../configs/L0-MANDATORY/x86/CONFIG_RETPOLINE | 1 + .../configs/L0-MANDATORY/x86/CONFIG_SEV_GUEST | 1 + .../x86/CONFIG_SPECULATION_MITIGATIONS | 1 + .../x86/CONFIG_UNWINDER_FRAME_POINTER | 1 + .../L0-MANDATORY/x86/CONFIG_UNWINDER_ORC | 1 + .../L0-MANDATORY/x86/CONFIG_VGA_CONSOLE | 1 + anolis/configs/L0-MANDATORY/x86/CONFIG_VIRTIO | 1 + .../L0-MANDATORY/x86/CONFIG_VIRTIO_BLK | 1 + .../L0-MANDATORY/x86/CONFIG_VIRTIO_PCI | 1 + anolis/configs/L0-MANDATORY/x86/CONFIG_X86 | 1 + .../L0-MANDATORY/x86/CONFIG_X86_5LEVEL | 1 + anolis/configs/L0-MANDATORY/x86/CONFIG_X86_64 | 1 + .../L0-MANDATORY/x86/CONFIG_X86_64_ACPI_NUMA | 1 + .../L0-MANDATORY/x86/CONFIG_X86_64_SMP | 1 + .../L0-MANDATORY/x86/CONFIG_X86_CMPXCHG64 | 1 + .../L0-MANDATORY/x86/CONFIG_X86_CPU_RESCTRL | 1 + .../L0-MANDATORY/x86/CONFIG_X86_IOPL_IOPERM | 1 + .../L0-MANDATORY/x86/CONFIG_X86_IO_APIC | 1 + .../L0-MANDATORY/x86/CONFIG_X86_LOCAL_APIC | 1 + .../configs/L0-MANDATORY/x86/CONFIG_X86_MCE | 1 + .../L0-MANDATORY/x86/CONFIG_X86_MCE_AMD | 1 + .../L0-MANDATORY/x86/CONFIG_X86_MCE_INTEL | 1 + .../configs/L0-MANDATORY/x86/CONFIG_X86_PAT | 1 + .../x86/CONFIG_X86_PLATFORM_DEVICES | 1 + .../configs/L0-MANDATORY/x86/CONFIG_X86_SGX | 1 + .../configs/L0-MANDATORY/x86/CONFIG_X86_TSC | 1 + .../configs/L0-MANDATORY/x86/CONFIG_X86_UMIP | 1 + .../x86/CONFIG_X86_VSYSCALL_EMULATION | 1 + .../L0-MANDATORY/x86/CONFIG_X86_X2APIC | 1 + .../L1-RECOMMEND/arm64/CONFIG_ACPI_AGDI | 1 + .../L1-RECOMMEND/arm64/CONFIG_ACPI_BGRT | 1 + .../arm64/CONFIG_ACPI_CPPC_CPUFREQ | 1 + .../L1-RECOMMEND/arm64/CONFIG_ACPI_DOCK | 1 + .../L1-RECOMMEND/arm64/CONFIG_ACPI_EC_DEBUGFS | 1 + .../arm64/CONFIG_ACPI_REDUCED_HARDWARE_ONLY | 1 + .../L1-RECOMMEND/arm64/CONFIG_ACPI_TAD | 1 + .../L1-RECOMMEND/arm64/CONFIG_AHCI_XGENE | 1 + .../arm64/CONFIG_ALIBABA_UNCORE_DRW_PMU | 1 + .../arm64/CONFIG_ARCH_FORCE_MAX_ORDER | 1 + .../arm64/CONFIG_ARM64_ACPI_PARKING_PROTOCOL | 1 + .../L1-RECOMMEND/arm64/CONFIG_ARM64_AMU_EXTN | 1 + .../arm64/CONFIG_ARM64_DEBUG_PRIORITY_MASKING | 1 + .../L1-RECOMMEND/arm64/CONFIG_ARM64_EPAN | 1 + .../arm64/CONFIG_ARM64_ERRATUM_1024718 | 1 + .../arm64/CONFIG_ARM64_ERRATUM_1165522 | 1 + .../arm64/CONFIG_ARM64_ERRATUM_1286807 | 1 + .../arm64/CONFIG_ARM64_ERRATUM_1319367 | 1 + .../arm64/CONFIG_ARM64_ERRATUM_1418040 | 1 + .../arm64/CONFIG_ARM64_ERRATUM_1463225 | 1 + .../arm64/CONFIG_ARM64_ERRATUM_1508412 | 1 + .../arm64/CONFIG_ARM64_ERRATUM_1530923 | 1 + .../arm64/CONFIG_ARM64_ERRATUM_1542419 | 1 + .../arm64/CONFIG_ARM64_ERRATUM_1742098 | 1 + .../arm64/CONFIG_ARM64_ERRATUM_2051678 | 1 + .../arm64/CONFIG_ARM64_ERRATUM_2054223 | 1 + .../arm64/CONFIG_ARM64_ERRATUM_2067961 | 1 + .../arm64/CONFIG_ARM64_ERRATUM_2077057 | 1 + .../arm64/CONFIG_ARM64_ERRATUM_2441007 | 1 + .../arm64/CONFIG_ARM64_ERRATUM_2441009 | 1 + .../arm64/CONFIG_ARM64_ERRATUM_2457168 | 1 + .../arm64/CONFIG_ARM64_ERRATUM_2645198 | 1 + .../arm64/CONFIG_ARM64_ERRATUM_2658417 | 1 + .../arm64/CONFIG_ARM64_ERRATUM_2966298 | 1 + .../arm64/CONFIG_ARM64_ERRATUM_3117295 | 1 + .../arm64/CONFIG_ARM64_ERRATUM_819472 | 1 + .../arm64/CONFIG_ARM64_ERRATUM_824069 | 1 + .../arm64/CONFIG_ARM64_ERRATUM_826319 | 1 + .../arm64/CONFIG_ARM64_ERRATUM_827319 | 1 + .../arm64/CONFIG_ARM64_ERRATUM_832075 | 1 + .../arm64/CONFIG_ARM64_ERRATUM_834220 | 1 + .../arm64/CONFIG_ARM64_ERRATUM_843419 | 1 + .../arm64/CONFIG_ARM64_ERRATUM_845719 | 1 + .../L1-RECOMMEND/arm64/CONFIG_ARM64_PMEM | 1 + .../L1-RECOMMEND/arm64/CONFIG_ARM64_SME | 1 + .../arm64/CONFIG_ARM64_SW_TTBR0_PAN | 1 + .../arm64/CONFIG_ARM64_TAGGED_ADDR_ABI | 1 + .../arm64/CONFIG_ARMV8_DEPRECATED | 1 + .../CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU | 1 + .../L1-RECOMMEND/arm64/CONFIG_ARM_DMC620_PMU | 1 + .../L1-RECOMMEND/arm64/CONFIG_ARM_DSU_PMU | 1 + .../L1-RECOMMEND/arm64/CONFIG_ARM_MHU_V2 | 1 + .../L1-RECOMMEND/arm64/CONFIG_ARM_PMUV3 | 1 + .../arm64/CONFIG_ARM_SCPI_CPUFREQ | 1 + .../arm64/CONFIG_ARM_SCPI_POWER_DOMAIN | 1 + .../arm64/CONFIG_ARM_SCPI_PROTOCOL | 1 + .../arm64/CONFIG_ARM_SDE_INTERFACE | 1 + .../arm64/CONFIG_ARM_SMC_WATCHDOG | 1 + .../CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT | 1 + .../arm64/CONFIG_ARM_SP805_WATCHDOG | 1 + .../arm64/CONFIG_CAVIUM_ERRATUM_22375 | 1 + .../arm64/CONFIG_CAVIUM_ERRATUM_23144 | 1 + .../arm64/CONFIG_CAVIUM_ERRATUM_23154 | 1 + .../arm64/CONFIG_CAVIUM_ERRATUM_27456 | 1 + .../arm64/CONFIG_CAVIUM_ERRATUM_30115 | 1 + .../arm64/CONFIG_CAVIUM_TX2_ERRATUM_219 | 1 + .../L1-RECOMMEND/arm64/CONFIG_CMA_SIZE_MBYTES | 1 + .../configs/L1-RECOMMEND/arm64/CONFIG_CMDLINE | 1 + .../L1-RECOMMEND/arm64/CONFIG_CMDLINE_FORCE | 1 + .../arm64/CONFIG_CMDLINE_FROM_BOOTLOADER | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_CNIC | 1 + .../L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CATU | 1 + .../arm64/CONFIG_CORESIGHT_CPU_DEBUG | 1 + .../CONFIG_CORESIGHT_CPU_DEBUG_DEFAULT_ON | 1 + .../L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CTI | 1 + .../CONFIG_CORESIGHT_CTI_INTEGRATION_REGS | 1 + .../arm64/CONFIG_CORESIGHT_LINKS_AND_SINKS | 1 + .../arm64/CONFIG_CORESIGHT_LINK_AND_SINK_TMC | 1 + .../arm64/CONFIG_CORESIGHT_SINK_ETBV10 | 1 + .../arm64/CONFIG_CORESIGHT_SINK_TPIU | 1 + .../arm64/CONFIG_CORESIGHT_SOURCE_ETM4X | 1 + .../L1-RECOMMEND/arm64/CONFIG_CORESIGHT_STM | 1 + .../arm64/CONFIG_CPUMASK_OFFSTACK | 1 + .../L1-RECOMMEND/arm64/CONFIG_CPU_BIG_ENDIAN | 1 + .../CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE | 1 + .../CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND | 1 + .../arm64/CONFIG_CPU_FREQ_GOV_SCHEDUTIL | 1 + .../arm64/CONFIG_CPU_FREQ_THERMAL | 1 + .../L1-RECOMMEND/arm64/CONFIG_CPU_THERMAL | 1 + .../arm64/CONFIG_CRYPTO_AES_ARM64 | 1 + .../arm64/CONFIG_CRYPTO_AES_ARM64_BS | 1 + .../arm64/CONFIG_CRYPTO_AES_ARM64_CE | 1 + .../arm64/CONFIG_CRYPTO_AES_ARM64_CE_BLK | 1 + .../arm64/CONFIG_CRYPTO_AES_ARM64_CE_CCM | 1 + .../arm64/CONFIG_CRYPTO_AES_ARM64_NEON_BLK | 1 + .../arm64/CONFIG_CRYPTO_CHACHA20_NEON | 1 + .../arm64/CONFIG_CRYPTO_CRCT10DIF_ARM64_CE | 1 + .../arm64/CONFIG_CRYPTO_CURVE25519 | 1 + .../L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_CCP | 1 + .../arm64/CONFIG_CRYPTO_DEV_QAT_C3XXX | 1 + .../arm64/CONFIG_CRYPTO_DEV_QAT_C3XXXVF | 1 + .../arm64/CONFIG_CRYPTO_DEV_QAT_C62X | 1 + .../arm64/CONFIG_CRYPTO_DEV_QAT_C62XVF | 1 + .../arm64/CONFIG_CRYPTO_DEV_QAT_DH895xCC | 1 + .../arm64/CONFIG_CRYPTO_DEV_QAT_DH895xCCVF | 1 + .../arm64/CONFIG_CRYPTO_GHASH_ARM64_CE | 1 + .../arm64/CONFIG_CRYPTO_POLY1305_NEON | 1 + .../arm64/CONFIG_CRYPTO_SHA1_ARM64_CE | 1 + .../arm64/CONFIG_CRYPTO_SHA256_ARM64 | 1 + .../arm64/CONFIG_CRYPTO_SHA2_ARM64_CE | 1 + .../arm64/CONFIG_CRYPTO_SM3_ARM64_CE | 1 + .../L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM3_NEON | 1 + .../arm64/CONFIG_CRYPTO_SM4_ARM64_CE | 1 + .../arm64/CONFIG_CRYPTO_SM4_ARM64_CE_BLK | 1 + .../arm64/CONFIG_CRYPTO_SM4_ARM64_CE_CCM | 1 + .../arm64/CONFIG_CRYPTO_SM4_ARM64_CE_GCM | 1 + .../arm64/CONFIG_CRYPTO_SM4_ARM64_NEON_BLK | 1 + .../arm64/CONFIG_DEBUG_PERF_USE_VMALLOC | 1 + .../L1-RECOMMEND/arm64/CONFIG_DEVICE_PRIVATE | 1 + .../configs/L1-RECOMMEND/arm64/CONFIG_DEVPORT | 1 + .../configs/L1-RECOMMEND/arm64/CONFIG_DEV_DAX | 1 + .../L1-RECOMMEND/arm64/CONFIG_DEV_DAX_HMEM | 1 + .../L1-RECOMMEND/arm64/CONFIG_DEV_DAX_KMEM | 1 + .../L1-RECOMMEND/arm64/CONFIG_DEV_DAX_PMEM | 1 + .../L1-RECOMMEND/arm64/CONFIG_DLM_DEBUG | 1 + .../L1-RECOMMEND/arm64/CONFIG_DRM_AMDGPU_CIK | 1 + .../arm64/CONFIG_DRM_AMDGPU_USERPTR | 1 + .../L1-RECOMMEND/arm64/CONFIG_DRM_AMD_ACP | 1 + .../arm64/CONFIG_DRM_DP_AUX_CHARDEV | 1 + .../L1-RECOMMEND/arm64/CONFIG_DRM_PHYTIUM | 1 + .../L1-RECOMMEND/arm64/CONFIG_DRM_VMWGFX | 1 + .../L1-RECOMMEND/arm64/CONFIG_DWC_PCIE_PMU | 1 + .../L1-RECOMMEND/arm64/CONFIG_DW_DMAC_PCI | 1 + .../L1-RECOMMEND/arm64/CONFIG_EDAC_XGENE | 1 + .../arm64/CONFIG_EFI_ARMSTUB_DTB_LOADER | 1 + .../L1-RECOMMEND/arm64/CONFIG_EFI_COCO_SECRET | 1 + .../arm64/CONFIG_ETM4X_IMPDEF_FEATURE | 1 + .../L1-RECOMMEND/arm64/CONFIG_EXTCON_GPIO | 1 + .../L1-RECOMMEND/arm64/CONFIG_FB_SIMPLE | 1 + .../L1-RECOMMEND/arm64/CONFIG_FB_SSD1307 | 1 + .../L1-RECOMMEND/arm64/CONFIG_FIRMWARE_EDID | 1 + .../arm64/CONFIG_FUJITSU_ERRATUM_010001 | 1 + .../arm64/CONFIG_FUNCTION_PROFILER | 1 + .../arm64/CONFIG_FW_LOADER_USER_HELPER | 1 + .../arm64/CONFIG_GENERIC_ARCH_NUMA | 1 + .../L1-RECOMMEND/arm64/CONFIG_GENERIC_IOREMAP | 1 + .../CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED | 1 + .../L1-RECOMMEND/arm64/CONFIG_GENERIC_PHY | 1 + .../configs/L1-RECOMMEND/arm64/CONFIG_GFS2_FS | 1 + .../arm64/CONFIG_GPIO_GENERIC_PLATFORM | 1 + .../L1-RECOMMEND/arm64/CONFIG_GPIO_HISI | 1 + .../arm64/CONFIG_HISILICON_ERRATUM_161600802 | 1 + .../L1-RECOMMEND/arm64/CONFIG_HISI_DMA | 1 + .../L1-RECOMMEND/arm64/CONFIG_HISI_THERMAL | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3 | 1 + .../L1-RECOMMEND/arm64/CONFIG_HNS3_DCB | 1 + .../L1-RECOMMEND/arm64/CONFIG_HNS3_ENET | 1 + .../L1-RECOMMEND/arm64/CONFIG_HNS3_HCLGE | 1 + .../L1-RECOMMEND/arm64/CONFIG_HNS3_HCLGEVF | 1 + .../L1-RECOMMEND/arm64/CONFIG_HNS_DSAF | 1 + .../L1-RECOMMEND/arm64/CONFIG_HNS_ENET | 1 + .../L1-RECOMMEND/arm64/CONFIG_HNS_MDIO | 1 + .../configs/L1-RECOMMEND/arm64/CONFIG_HSA_AMD | 1 + .../configs/L1-RECOMMEND/arm64/CONFIG_HYPERV | 1 + .../L1-RECOMMEND/arm64/CONFIG_I2C_SLAVE | 1 + .../arm64/CONFIG_I2C_SLAVE_EEPROM | 1 + .../L1-RECOMMEND/arm64/CONFIG_I40E_DCB | 1 + .../L1-RECOMMEND/arm64/CONFIG_INDIRECT_PIO | 1 + .../L1-RECOMMEND/arm64/CONFIG_INPUT_MOUSEDEV | 1 + .../L1-RECOMMEND/arm64/CONFIG_INTEL_IDMA64 | 1 + .../L1-RECOMMEND/arm64/CONFIG_INTEL_TH | 1 + .../arm64/CONFIG_IOMMU_DEFAULT_DMA_STRICT | 1 + .../arm64/CONFIG_IOMMU_DEFAULT_PASSTHROUGH | 1 + .../arm64/CONFIG_IOMMU_IO_PGTABLE_LPAE | 1 + .../L1-RECOMMEND/arm64/CONFIG_ISCSI_IBFT | 1 + .../arm64/CONFIG_KDB_DEFAULT_ENABLE | 1 + .../arm64/CONFIG_KEXEC_IMAGE_VERIFY_SIG | 1 + .../L1-RECOMMEND/arm64/CONFIG_KEYBOARD_ATKBD | 1 + .../L1-RECOMMEND/arm64/CONFIG_KUNPENG_HCCS | 1 + .../L1-RECOMMEND/arm64/CONFIG_KUSER_HELPERS | 1 + .../L1-RECOMMEND/arm64/CONFIG_LOG_BUF_SHIFT | 1 + .../arm64/CONFIG_MELLANOX_PLATFORM | 1 + .../L1-RECOMMEND/arm64/CONFIG_MLX5_CORE_IPOIB | 1 + .../L1-RECOMMEND/arm64/CONFIG_MOUSE_PS2 | 1 + .../arm64/CONFIG_NET_VENDOR_HISILICON | 1 + .../arm64/CONFIG_PID_IN_CONTEXTIDR | 1 + .../L1-RECOMMEND/arm64/CONFIG_POWERCAP | 1 + .../arm64/CONFIG_QCOM_FALKOR_ERRATUM_1003 | 1 + .../arm64/CONFIG_QCOM_FALKOR_ERRATUM_1009 | 1 + .../arm64/CONFIG_QCOM_FALKOR_ERRATUM_E1041 | 1 + .../arm64/CONFIG_QCOM_QDF2400_ERRATUM_0065 | 1 + .../arm64/CONFIG_RANDOMIZE_MODULE_REGION_FULL | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_RELR | 1 + .../arm64/CONFIG_RODATA_FULL_DEFAULT_ENABLED | 1 + .../L1-RECOMMEND/arm64/CONFIG_RTC_DRV_EFI | 1 + .../arm64/CONFIG_SATA_AHCI_SEATTLE | 1 + .../L1-RECOMMEND/arm64/CONFIG_SDEI_WATCHDOG | 1 + .../L1-RECOMMEND/arm64/CONFIG_SERIO_AMBAKMI | 1 + .../arm64/CONFIG_SOCIONEXT_SYNQUACER_PREITS | 1 + .../L1-RECOMMEND/arm64/CONFIG_SPI_CADENCE | 1 + .../L1-RECOMMEND/arm64/CONFIG_SPI_DESIGNWARE | 1 + .../L1-RECOMMEND/arm64/CONFIG_SPI_DW_MMIO | 1 + .../arm64/CONFIG_SPI_HISI_KUNPENG | 1 + .../L1-RECOMMEND/arm64/CONFIG_SPI_PL022 | 1 + .../configs/L1-RECOMMEND/arm64/CONFIG_SPI_QUP | 1 + .../configs/L1-RECOMMEND/arm64/CONFIG_SPI_XLP | 1 + .../L1-RECOMMEND/arm64/CONFIG_SQUASHFS_LZ4 | 1 + .../configs/L1-RECOMMEND/arm64/CONFIG_STAGING | 1 + .../L1-RECOMMEND/arm64/CONFIG_STM_DUMMY | 1 + .../L1-RECOMMEND/arm64/CONFIG_STM_PROTO_BASIC | 1 + .../L1-RECOMMEND/arm64/CONFIG_STM_PROTO_SYS_T | 1 + .../arm64/CONFIG_STM_SOURCE_CONSOLE | 1 + .../arm64/CONFIG_STM_SOURCE_FTRACE | 1 + .../arm64/CONFIG_STM_SOURCE_HEARTBEAT | 1 + .../L1-RECOMMEND/arm64/CONFIG_TCG_INFINEON | 1 + .../L1-RECOMMEND/arm64/CONFIG_TCP_CONG_CDG | 1 + .../arm64/CONFIG_THERMAL_GOV_BANG_BANG | 1 + .../L1-RECOMMEND/arm64/CONFIG_THERMAL_MMIO | 1 + .../L1-RECOMMEND/arm64/CONFIG_THERMAL_OF | 1 + .../arm64/CONFIG_THERMAL_WRITABLE_TRIPS | 1 + .../L1-RECOMMEND/arm64/CONFIG_VFIO_PLATFORM | 1 + .../arm64/CONFIG_VFIO_PLATFORM_BASE | 1 + .../arm64/CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES | 1 + .../L1-RECOMMEND/arm64/CONFIG_VIRTIO_PCI_LIB | 1 + .../arm64/CONFIG_VIRTIO_PCI_LIB_LEGACY | 1 + .../L1-RECOMMEND/arm64/CONFIG_VIRT_DRIVERS | 1 + .../L1-RECOMMEND/arm64/CONFIG_VMWARE_VMCI | 1 + .../configs/L1-RECOMMEND/arm64/CONFIG_VMXNET3 | 1 + .../L1-RECOMMEND/arm64/CONFIG_WDAT_WDT | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_XEN | 1 + .../arm64/CONFIG_YITIAN_CPER_RAWDATA | 1 + .../L1-RECOMMEND/default/CONFIG_ACPI_AC | 1 + .../default/CONFIG_ACPI_APEI_EINJ | 1 + .../default/CONFIG_ACPI_APEI_ERST_DEBUG | 1 + .../default/CONFIG_ACPI_APEI_GHES | 1 + .../default/CONFIG_ACPI_APEI_MEMORY_FAILURE | 1 + .../default/CONFIG_ACPI_APEI_PCIEAER | 1 + .../L1-RECOMMEND/default/CONFIG_ACPI_BATTERY | 1 + .../L1-RECOMMEND/default/CONFIG_ACPI_BUTTON | 1 + .../L1-RECOMMEND/default/CONFIG_ACPI_CONFIGFS | 1 + .../default/CONFIG_ACPI_CONTAINER | 1 + .../default/CONFIG_ACPI_CUSTOM_METHOD | 1 + .../L1-RECOMMEND/default/CONFIG_ACPI_DEBUG | 1 + .../L1-RECOMMEND/default/CONFIG_ACPI_DEBUGGER | 1 + .../L1-RECOMMEND/default/CONFIG_ACPI_FAN | 1 + .../L1-RECOMMEND/default/CONFIG_ACPI_HED | 1 + .../L1-RECOMMEND/default/CONFIG_ACPI_HMAT | 1 + .../default/CONFIG_ACPI_HOTPLUG_MEMORY | 1 + .../L1-RECOMMEND/default/CONFIG_ACPI_NFIT | 1 + .../L1-RECOMMEND/default/CONFIG_ACPI_PCC | 1 + .../L1-RECOMMEND/default/CONFIG_ACPI_PCI_SLOT | 1 + .../L1-RECOMMEND/default/CONFIG_ACPI_PRMT | 1 + .../default/CONFIG_ACPI_SPCR_TABLE | 1 + .../default/CONFIG_ACPI_TABLE_UPGRADE | 1 + .../L1-RECOMMEND/default/CONFIG_ACPI_THERMAL | 1 + .../L1-RECOMMEND/default/CONFIG_ACPI_VIDEO | 1 + .../L1-RECOMMEND/default/CONFIG_ASYNC_CORE | 1 + .../L1-RECOMMEND/default/CONFIG_ASYNC_MEMCPY | 1 + .../L1-RECOMMEND/default/CONFIG_ASYNC_PQ | 1 + .../default/CONFIG_ASYNC_RAID6_RECOV | 1 + .../default/CONFIG_ASYNC_RAID6_TEST | 1 + .../L1-RECOMMEND/default/CONFIG_ASYNC_TX_DMA | 1 + .../L1-RECOMMEND/default/CONFIG_ASYNC_XOR | 1 + .../configs/L1-RECOMMEND/default/CONFIG_ATA | 1 + .../L1-RECOMMEND/default/CONFIG_ATA_ACPI | 1 + .../L1-RECOMMEND/default/CONFIG_ATA_BMDMA | 1 + .../L1-RECOMMEND/default/CONFIG_ATA_FORCE | 1 + .../L1-RECOMMEND/default/CONFIG_ATA_GENERIC | 1 + .../L1-RECOMMEND/default/CONFIG_ATA_OVER_ETH | 1 + .../L1-RECOMMEND/default/CONFIG_ATA_PIIX | 1 + .../L1-RECOMMEND/default/CONFIG_ATA_SFF | 1 + .../default/CONFIG_ATA_VERBOSE_ERROR | 1 + .../L1-RECOMMEND/default/CONFIG_ATM_DRIVERS | 1 + .../default/CONFIG_ATOMIC64_SELFTEST | 1 + .../default/CONFIG_BALLOON_COMPACTION | 1 + .../L1-RECOMMEND/default/CONFIG_BCACHE | 1 + .../L1-RECOMMEND/default/CONFIG_BCMGENET | 1 + .../default/CONFIG_BFQ_CGROUP_DEBUG | 1 + .../L1-RECOMMEND/default/CONFIG_BINFMT_MISC | 1 + .../default/CONFIG_BLKDEV_UBLK_LEGACY_OPCODES | 1 + .../default/CONFIG_BLK_DEBUG_FS_ZONED | 1 + .../L1-RECOMMEND/default/CONFIG_BLK_DEV_BSG | 1 + .../default/CONFIG_BLK_DEV_BSGLIB | 1 + .../L1-RECOMMEND/default/CONFIG_BLK_DEV_DM | 1 + .../L1-RECOMMEND/default/CONFIG_BLK_DEV_DRBD | 1 + .../default/CONFIG_BLK_DEV_INTEGRITY | 1 + .../default/CONFIG_BLK_DEV_INTEGRITY_T10 | 1 + .../L1-RECOMMEND/default/CONFIG_BLK_DEV_LOOP | 1 + .../default/CONFIG_BLK_DEV_LOOP_MIN_COUNT | 1 + .../L1-RECOMMEND/default/CONFIG_BLK_DEV_MD | 1 + .../L1-RECOMMEND/default/CONFIG_BLK_DEV_NBD | 1 + .../default/CONFIG_BLK_DEV_NULL_BLK | 1 + .../default/CONFIG_BLK_DEV_PCIESSD_MTIP32XX | 1 + .../L1-RECOMMEND/default/CONFIG_BLK_DEV_RAM | 1 + .../default/CONFIG_BLK_DEV_RAM_COUNT | 1 + .../default/CONFIG_BLK_DEV_RAM_SIZE | 1 + .../L1-RECOMMEND/default/CONFIG_BLK_DEV_RBD | 1 + .../L1-RECOMMEND/default/CONFIG_BLK_DEV_SD | 1 + .../L1-RECOMMEND/default/CONFIG_BLK_DEV_SR | 1 + .../default/CONFIG_BLK_DEV_THROTTLING_LOW | 1 + .../L1-RECOMMEND/default/CONFIG_BLK_DEV_UBLK | 1 + .../L1-RECOMMEND/default/CONFIG_BLK_DEV_ZONED | 1 + .../L1-RECOMMEND/default/CONFIG_BLK_WBT | 1 + .../configs/L1-RECOMMEND/default/CONFIG_BNX2 | 1 + .../configs/L1-RECOMMEND/default/CONFIG_BNX2X | 1 + .../L1-RECOMMEND/default/CONFIG_BNX2X_SRIOV | 1 + .../configs/L1-RECOMMEND/default/CONFIG_BNXT | 1 + .../L1-RECOMMEND/default/CONFIG_BNXT_DCB | 1 + .../default/CONFIG_BNXT_FLOWER_OFFLOAD | 1 + .../L1-RECOMMEND/default/CONFIG_BNXT_HWMON | 1 + .../L1-RECOMMEND/default/CONFIG_BNXT_SRIOV | 1 + .../default/CONFIG_BOOTPARAM_HARDLOCKUP_PANIC | 1 + .../default/CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC | 1 + .../default/CONFIG_BOOTTIME_TRACING | 1 + .../L1-RECOMMEND/default/CONFIG_BOOT_CONFIG | 1 + .../default/CONFIG_BOOT_PRINTK_DELAY | 1 + .../L1-RECOMMEND/default/CONFIG_BPFILTER | 1 + .../default/CONFIG_BPF_JIT_ALWAYS_ON | 1 + .../default/CONFIG_BPF_KPROBE_OVERRIDE | 1 + .../default/CONFIG_BPF_STREAM_PARSER | 1 + .../default/CONFIG_BRANCH_PROFILE_NONE | 1 + .../default/CONFIG_BRIDGE_EBT_802_3 | 1 + .../default/CONFIG_BRIDGE_EBT_AMONG | 1 + .../default/CONFIG_BRIDGE_EBT_ARP | 1 + .../default/CONFIG_BRIDGE_EBT_ARPREPLY | 1 + .../default/CONFIG_BRIDGE_EBT_BROUTE | 1 + .../default/CONFIG_BRIDGE_EBT_DNAT | 1 + .../L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_IP | 1 + .../default/CONFIG_BRIDGE_EBT_IP6 | 1 + .../default/CONFIG_BRIDGE_EBT_LIMIT | 1 + .../default/CONFIG_BRIDGE_EBT_LOG | 1 + .../default/CONFIG_BRIDGE_EBT_MARK | 1 + .../default/CONFIG_BRIDGE_EBT_MARK_T | 1 + .../default/CONFIG_BRIDGE_EBT_NFLOG | 1 + .../default/CONFIG_BRIDGE_EBT_PKTTYPE | 1 + .../default/CONFIG_BRIDGE_EBT_REDIRECT | 1 + .../default/CONFIG_BRIDGE_EBT_SNAT | 1 + .../default/CONFIG_BRIDGE_EBT_STP | 1 + .../default/CONFIG_BRIDGE_EBT_T_FILTER | 1 + .../default/CONFIG_BRIDGE_EBT_T_NAT | 1 + .../default/CONFIG_BRIDGE_EBT_VLAN | 1 + .../default/CONFIG_BRIDGE_IGMP_SNOOPING | 1 + .../L1-RECOMMEND/default/CONFIG_BRIDGE_MRP | 1 + .../default/CONFIG_BRIDGE_NETFILTER | 1 + .../default/CONFIG_BRIDGE_NF_EBTABLES | 1 + .../default/CONFIG_BRIDGE_VLAN_FILTERING | 1 + .../default/CONFIG_BSD_PROCESS_ACCT | 1 + .../default/CONFIG_BSD_PROCESS_ACCT_V3 | 1 + .../L1-RECOMMEND/default/CONFIG_BTRFS_FS | 1 + .../default/CONFIG_BUG_ON_DATA_CORRUPTION | 1 + .../L1-RECOMMEND/default/CONFIG_BUILD_SALT | 1 + .../default/CONFIG_CACHEFILES_DEBUG | 1 + .../configs/L1-RECOMMEND/default/CONFIG_CDROM | 1 + .../L1-RECOMMEND/default/CONFIG_CDROM_PKTCDVD | 1 + .../default/CONFIG_CDROM_PKTCDVD_BUFFERS | 1 + .../default/CONFIG_CDROM_PKTCDVD_WCACHE | 1 + .../L1-RECOMMEND/default/CONFIG_CEPH_FS | 1 + .../L1-RECOMMEND/default/CONFIG_CEPH_FSCACHE | 1 + .../default/CONFIG_CEPH_FS_POSIX_ACL | 1 + .../default/CONFIG_CEPH_FS_SECURITY_LABEL | 1 + .../L1-RECOMMEND/default/CONFIG_CGROUP_DEBUG | 1 + .../default/CONFIG_CGROUP_FREEZER | 1 + .../default/CONFIG_CGROUP_NET_CLASSID | 1 + .../default/CONFIG_CGROUP_NET_PRIO | 1 + .../default/CONFIG_CGROUP_WRITEBACK | 1 + .../L1-RECOMMEND/default/CONFIG_CHR_DEV_SCH | 1 + .../L1-RECOMMEND/default/CONFIG_CHR_DEV_SG | 1 + .../L1-RECOMMEND/default/CONFIG_CHR_DEV_ST | 1 + .../L1-RECOMMEND/default/CONFIG_CLS_U32_MARK | 1 + .../L1-RECOMMEND/default/CONFIG_CLS_U32_PERF | 1 + .../configs/L1-RECOMMEND/default/CONFIG_CMA | 1 + .../L1-RECOMMEND/default/CONFIG_CMA_ALIGNMENT | 1 + .../L1-RECOMMEND/default/CONFIG_CMA_AREAS | 1 + .../L1-RECOMMEND/default/CONFIG_CMA_DEBUG | 1 + .../L1-RECOMMEND/default/CONFIG_CMA_DEBUGFS | 1 + .../default/CONFIG_CMA_SIZE_SEL_MAX | 1 + .../default/CONFIG_CMA_SIZE_SEL_MBYTES | 1 + .../default/CONFIG_CMA_SIZE_SEL_MIN | 1 + .../default/CONFIG_CMA_SIZE_SEL_PERCENTAGE | 1 + .../L1-RECOMMEND/default/CONFIG_COMPAT | 1 + .../default/CONFIG_COMPAT_32BIT_TIME | 1 + .../L1-RECOMMEND/default/CONFIG_COMPAT_BRK | 1 + .../L1-RECOMMEND/default/CONFIG_COMPILE_TEST | 1 + .../L1-RECOMMEND/default/CONFIG_CONNECTOR | 1 + .../default/CONFIG_CONSOLE_LOGLEVEL_DEFAULT | 1 + .../default/CONFIG_CONSOLE_LOGLEVEL_QUIET | 1 + .../CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS | 1 + .../CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE | 1 + .../CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE | 1 + .../CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL | 1 + .../CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE | 1 + .../default/CONFIG_CPU_FREQ_GOV_CONSERVATIVE | 1 + .../default/CONFIG_CPU_FREQ_GOV_ONDEMAND | 1 + .../default/CONFIG_CPU_FREQ_GOV_PERFORMANCE | 1 + .../default/CONFIG_CPU_FREQ_GOV_POWERSAVE | 1 + .../default/CONFIG_CPU_FREQ_GOV_USERSPACE | 1 + .../L1-RECOMMEND/default/CONFIG_CPU_FREQ_STAT | 1 + .../default/CONFIG_CPU_IDLE_GOV_LADDER | 1 + .../default/CONFIG_CPU_IDLE_GOV_MENU | 1 + .../default/CONFIG_CPU_IDLE_GOV_TEO | 1 + .../default/CONFIG_CRAMFS_BLOCKDEV | 1 + .../L1-RECOMMEND/default/CONFIG_CRAMFS_MTD | 1 + .../default/CONFIG_CROSS_MEMORY_ATTACH | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_842 | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_ACOMP2 | 1 + .../default/CONFIG_CRYPTO_ADIANTUM | 1 + .../default/CONFIG_CRYPTO_AEGIS128 | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_AES_TI | 1 + .../default/CONFIG_CRYPTO_ANSI_CPRNG | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_ANUBIS | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_ARC4 | 1 + .../default/CONFIG_CRYPTO_AUTHENC | 1 + .../default/CONFIG_CRYPTO_BLAKE2B | 1 + .../default/CONFIG_CRYPTO_BLOWFISH | 1 + .../default/CONFIG_CRYPTO_BLOWFISH_COMMON | 1 + .../default/CONFIG_CRYPTO_CAMELLIA | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_CAST5 | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_CAST6 | 1 + .../default/CONFIG_CRYPTO_CAST_COMMON | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_CBC | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_CCM | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_CFB | 1 + .../default/CONFIG_CRYPTO_CHACHA20 | 1 + .../default/CONFIG_CRYPTO_CHACHA20POLY1305 | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_CMAC | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_CRC32 | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_CRC32C | 1 + .../default/CONFIG_CRYPTO_CRCT10DIF | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_CRYPTD | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_CTR | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_CTS | 1 + .../default/CONFIG_CRYPTO_DEFLATE | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_DES | 1 + .../default/CONFIG_CRYPTO_DEV_VIRTIO | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_DH | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG | 1 + .../default/CONFIG_CRYPTO_DRBG_CTR | 1 + .../default/CONFIG_CRYPTO_DRBG_HASH | 1 + .../default/CONFIG_CRYPTO_DRBG_HMAC | 1 + .../default/CONFIG_CRYPTO_DRBG_MENU | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_ECB | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_ECC | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_ECDH | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_ECDSA | 1 + .../default/CONFIG_CRYPTO_ECHAINIV | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_ESSIV | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_FCRYPT | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_FIPS | 1 + .../default/CONFIG_CRYPTO_HASH_INFO | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_HMAC | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_HW | 1 + .../default/CONFIG_CRYPTO_JITTERENTROPY | 1 + .../default/CONFIG_CRYPTO_KEYWRAP | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_KHAZAD | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_KPP | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_KPP2 | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_LRW | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_LZ4 | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_LZ4HC | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_LZO | 1 + .../CONFIG_CRYPTO_MANAGER_DISABLE_TESTS | 1 + .../default/CONFIG_CRYPTO_MANAGER_EXTRA_TESTS | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_MD4 | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_MD5 | 1 + .../default/CONFIG_CRYPTO_MICHAEL_MIC | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_NULL | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_NULL2 | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_OFB | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_PCBC | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_PCRYPT | 1 + .../default/CONFIG_CRYPTO_POLY1305 | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_RMD160 | 1 + .../default/CONFIG_CRYPTO_RNG_DEFAULT | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_SEED | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_SEQIV | 1 + .../default/CONFIG_CRYPTO_SERPENT | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_SHA1 | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_SHA3 | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_SHA512 | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_STATS | 1 + .../default/CONFIG_CRYPTO_STREEBOG | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_TEA | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_TEST | 1 + .../default/CONFIG_CRYPTO_TWOFISH | 1 + .../default/CONFIG_CRYPTO_TWOFISH_COMMON | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_USER | 1 + .../default/CONFIG_CRYPTO_USER_API | 1 + .../default/CONFIG_CRYPTO_USER_API_AEAD | 1 + .../CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE | 1 + .../default/CONFIG_CRYPTO_USER_API_HASH | 1 + .../default/CONFIG_CRYPTO_USER_API_RNG | 1 + .../default/CONFIG_CRYPTO_USER_API_RNG_CAVP | 1 + .../default/CONFIG_CRYPTO_USER_API_SKCIPHER | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_VMAC | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_WP512 | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_XCBC | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_XTS | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_XXHASH | 1 + .../L1-RECOMMEND/default/CONFIG_CRYPTO_ZSTD | 1 + .../configs/L1-RECOMMEND/default/CONFIG_CUSE | 1 + .../L1-RECOMMEND/default/CONFIG_CXL_ACPI | 1 + .../L1-RECOMMEND/default/CONFIG_CXL_MEM | 1 + .../default/CONFIG_CXL_MEM_RAW_COMMANDS | 1 + .../L1-RECOMMEND/default/CONFIG_CXL_PCI | 1 + .../L1-RECOMMEND/default/CONFIG_CXL_PMEM | 1 + .../L1-RECOMMEND/default/CONFIG_CXL_PMU | 1 + .../L1-RECOMMEND/default/CONFIG_CXL_PORT | 1 + .../L1-RECOMMEND/default/CONFIG_CXL_REGION | 1 + .../L1-RECOMMEND/default/CONFIG_CXL_SUSPEND | 1 + .../configs/L1-RECOMMEND/default/CONFIG_DAMON | 1 + .../L1-RECOMMEND/default/CONFIG_DAMON_DBGFS | 1 + .../L1-RECOMMEND/default/CONFIG_DAMON_PADDR | 1 + .../L1-RECOMMEND/default/CONFIG_DAMON_VADDR | 1 + .../configs/L1-RECOMMEND/default/CONFIG_DCB | 1 + .../default/CONFIG_DEBUG_FORCE_WEAK_PER_CPU | 1 + .../default/CONFIG_DEBUG_FS_ALLOW_ALL | 1 + .../default/CONFIG_DEBUG_FS_ALLOW_NONE | 1 + .../default/CONFIG_DEBUG_FS_DISALLOW_MOUNT | 1 + .../default/CONFIG_DEBUG_INFO_BTF_MODULES | 1 + .../CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT | 1 + .../default/CONFIG_DEBUG_INFO_REDUCED | 1 + .../default/CONFIG_DEBUG_INFO_SPLIT | 1 + .../default/CONFIG_DEBUG_KERNEL_DC | 1 + .../default/CONFIG_DEBUG_KMEMLEAK | 1 + .../L1-RECOMMEND/default/CONFIG_DEBUG_LIST | 1 + .../default/CONFIG_DEBUG_MEMORY_INIT | 1 + .../default/CONFIG_DEBUG_NOTIFIERS | 1 + .../default/CONFIG_DEBUG_PAGEALLOC | 1 + .../default/CONFIG_DEBUG_PAGE_REF | 1 + .../L1-RECOMMEND/default/CONFIG_DEBUG_PLIST | 1 + .../default/CONFIG_DEBUG_RODATA_TEST | 1 + .../L1-RECOMMEND/default/CONFIG_DEBUG_SG | 1 + .../L1-RECOMMEND/default/CONFIG_DEBUG_WX | 1 + .../L1-RECOMMEND/default/CONFIG_DEFAULT_CODEL | 1 + .../L1-RECOMMEND/default/CONFIG_DEFAULT_FQ | 1 + .../default/CONFIG_DEFAULT_FQ_CODEL | 1 + .../default/CONFIG_DEFAULT_HUNG_TASK_TIMEOUT | 1 + .../default/CONFIG_DEFAULT_MMAP_MIN_ADDR | 1 + .../default/CONFIG_DEFAULT_NET_SCH | 1 + .../default/CONFIG_DEFAULT_PFIFO_FAST | 1 + .../L1-RECOMMEND/default/CONFIG_DEFAULT_RENO | 1 + .../default/CONFIG_DEFAULT_SECURITY_SELINUX | 1 + .../L1-RECOMMEND/default/CONFIG_DEFAULT_SFQ | 1 + .../default/CONFIG_DEFAULT_TCP_CONG | 1 + .../L1-RECOMMEND/default/CONFIG_DEVMEM | 1 + .../L1-RECOMMEND/default/CONFIG_DEV_DAX_CXL | 1 + .../default/CONFIG_DEV_DAX_HMEM_DEVICES | 1 + .../configs/L1-RECOMMEND/default/CONFIG_DLM | 1 + .../default/CONFIG_DMADEVICES_DEBUG | 1 + .../L1-RECOMMEND/default/CONFIG_DMATEST | 1 + .../L1-RECOMMEND/default/CONFIG_DMA_API_DEBUG | 1 + .../L1-RECOMMEND/default/CONFIG_DMA_CMA | 1 + .../L1-RECOMMEND/default/CONFIG_DMA_ENGINE | 1 + .../configs/L1-RECOMMEND/default/CONFIG_DMIID | 1 + .../L1-RECOMMEND/default/CONFIG_DMI_SYSFS | 1 + .../L1-RECOMMEND/default/CONFIG_DM_CACHE | 1 + .../L1-RECOMMEND/default/CONFIG_DM_CACHE_SMQ | 1 + .../L1-RECOMMEND/default/CONFIG_DM_CRYPT | 1 + .../L1-RECOMMEND/default/CONFIG_DM_DEBUG | 1 + .../L1-RECOMMEND/default/CONFIG_DM_DELAY | 1 + .../L1-RECOMMEND/default/CONFIG_DM_ERA | 1 + .../L1-RECOMMEND/default/CONFIG_DM_FLAKEY | 1 + .../L1-RECOMMEND/default/CONFIG_DM_INTEGRITY | 1 + .../default/CONFIG_DM_LOG_USERSPACE | 1 + .../L1-RECOMMEND/default/CONFIG_DM_LOG_WRITES | 1 + .../L1-RECOMMEND/default/CONFIG_DM_MIRROR | 1 + .../L1-RECOMMEND/default/CONFIG_DM_MULTIPATH | 1 + .../default/CONFIG_DM_MULTIPATH_QL | 1 + .../default/CONFIG_DM_MULTIPATH_ST | 1 + .../L1-RECOMMEND/default/CONFIG_DM_RAID | 1 + .../L1-RECOMMEND/default/CONFIG_DM_SNAPSHOT | 1 + .../L1-RECOMMEND/default/CONFIG_DM_SWITCH | 1 + .../default/CONFIG_DM_THIN_PROVISIONING | 1 + .../L1-RECOMMEND/default/CONFIG_DM_UEVENT | 1 + .../L1-RECOMMEND/default/CONFIG_DM_VERITY | 1 + .../L1-RECOMMEND/default/CONFIG_DM_WRITECACHE | 1 + .../L1-RECOMMEND/default/CONFIG_DM_ZERO | 1 + .../L1-RECOMMEND/default/CONFIG_DM_ZONED | 1 + .../L1-RECOMMEND/default/CONFIG_DRM_AMDGPU | 1 + .../L1-RECOMMEND/default/CONFIG_DRM_AMDGPU_SI | 1 + .../L1-RECOMMEND/default/CONFIG_DRM_AMD_DC | 1 + .../L1-RECOMMEND/default/CONFIG_DRM_AST | 1 + .../L1-RECOMMEND/default/CONFIG_DRM_BOCHS | 1 + .../default/CONFIG_DRM_CIRRUS_QEMU | 1 + .../default/CONFIG_DRM_FBDEV_EMULATION | 1 + .../default/CONFIG_DRM_FBDEV_OVERALLOC | 1 + .../default/CONFIG_DRM_I2C_CH7006 | 1 + .../L1-RECOMMEND/default/CONFIG_DRM_INSPUR | 1 + .../default/CONFIG_DRM_LOAD_EDID_FIRMWARE | 1 + .../L1-RECOMMEND/default/CONFIG_DRM_MGAG200 | 1 + .../L1-RECOMMEND/default/CONFIG_DRM_NOUVEAU | 1 + .../default/CONFIG_DRM_NOUVEAU_BACKLIGHT | 1 + .../L1-RECOMMEND/default/CONFIG_DRM_QXL | 1 + .../L1-RECOMMEND/default/CONFIG_DRM_RADEON | 1 + .../default/CONFIG_DRM_RADEON_USERPTR | 1 + .../L1-RECOMMEND/default/CONFIG_DRM_UDL | 1 + .../default/CONFIG_DRM_VIRTIO_GPU | 1 + .../configs/L1-RECOMMEND/default/CONFIG_DUMMY | 1 + .../L1-RECOMMEND/default/CONFIG_DW_DMAC | 1 + .../configs/L1-RECOMMEND/default/CONFIG_E100 | 1 + .../configs/L1-RECOMMEND/default/CONFIG_E1000 | 1 + .../L1-RECOMMEND/default/CONFIG_E1000E | 1 + .../L1-RECOMMEND/default/CONFIG_EDAC_DEBUG | 1 + .../L1-RECOMMEND/default/CONFIG_EDAC_GHES | 1 + .../default/CONFIG_EDAC_LEGACY_SYSFS | 1 + .../L1-RECOMMEND/default/CONFIG_EFIVAR_FS | 1 + .../default/CONFIG_EFI_CUSTOM_SSDT_OVERLAYS | 1 + .../default/CONFIG_EFI_SOFT_RESERVE | 1 + .../L1-RECOMMEND/default/CONFIG_EFI_STUB | 1 + .../default/CONFIG_EFI_VARS_PSTORE | 1 + .../CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE | 1 + .../default/CONFIG_ENCRYPTED_KEYS | 1 + .../default/CONFIG_EROFS_FS_DEBUG | 1 + .../default/CONFIG_EROFS_FS_ONDEMAND | 1 + .../default/CONFIG_EROFS_FS_POSIX_ACL | 1 + .../default/CONFIG_EROFS_FS_SECURITY | 1 + .../default/CONFIG_EROFS_FS_XATTR | 1 + .../L1-RECOMMEND/default/CONFIG_EROFS_FS_ZIP | 1 + .../default/CONFIG_EROFS_FS_ZIP_DEFLATE | 1 + .../default/CONFIG_EROFS_FS_ZIP_LZMA | 1 + .../default/CONFIG_EVM_ADD_XATTRS | 1 + .../default/CONFIG_EVM_ATTR_FSUUID | 1 + .../L1-RECOMMEND/default/CONFIG_EVM_LOAD_X509 | 1 + .../L1-RECOMMEND/default/CONFIG_EVM_X509_PATH | 1 + .../L1-RECOMMEND/default/CONFIG_EXFAT_FS | 1 + .../default/CONFIG_EXPORTFS_BLOCK_OPS | 1 + .../L1-RECOMMEND/default/CONFIG_EXT2_FS | 1 + .../L1-RECOMMEND/default/CONFIG_EXT4_DEBUG | 1 + .../default/CONFIG_EXT4_USE_FOR_EXT2 | 1 + .../default/CONFIG_EXTRA_FIRMWARE | 1 + .../L1-RECOMMEND/default/CONFIG_FAILOVER | 1 + .../CONFIG_FANOTIFY_ACCESS_PERMISSIONS | 1 + .../default/CONFIG_FAT_DEFAULT_CODEPAGE | 1 + .../default/CONFIG_FAT_DEFAULT_IOCHARSET | 1 + .../default/CONFIG_FAT_DEFAULT_UTF8 | 1 + .../default/CONFIG_FAULT_INJECTION | 1 + .../L1-RECOMMEND/default/CONFIG_FB_EFI | 1 + .../L1-RECOMMEND/default/CONFIG_FB_LS2K500 | 1 + .../default/CONFIG_FB_TILEBLITTING | 1 + .../configs/L1-RECOMMEND/default/CONFIG_FCOE | 1 + .../default/CONFIG_FIX_EARLYCON_MEM | 1 + .../configs/L1-RECOMMEND/default/CONFIG_FM10K | 1 + .../default/CONFIG_FORTIFY_SOURCE | 1 + .../CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY | 1 + .../CONFIG_FRAMEBUFFER_CONSOLE_ROTATION | 1 + .../L1-RECOMMEND/default/CONFIG_FRAME_WARN | 1 + .../L1-RECOMMEND/default/CONFIG_FSCACHE_DEBUG | 1 + .../L1-RECOMMEND/default/CONFIG_FSCACHE_STATS | 1 + .../default/CONFIG_FTRACE_RECORD_RECURSION | 1 + .../L1-RECOMMEND/default/CONFIG_FUSE_DAX | 1 + .../L1-RECOMMEND/default/CONFIG_FUSION | 1 + .../L1-RECOMMEND/default/CONFIG_FUSION_CTL | 1 + .../L1-RECOMMEND/default/CONFIG_FUSION_FC | 1 + .../default/CONFIG_FUSION_LOGGING | 1 + .../default/CONFIG_FUSION_MAX_SGE | 1 + .../L1-RECOMMEND/default/CONFIG_FUSION_SAS | 1 + .../L1-RECOMMEND/default/CONFIG_FUSION_SPI | 1 + .../L1-RECOMMEND/default/CONFIG_FW_CACHE | 1 + .../L1-RECOMMEND/default/CONFIG_FW_CFG_SYSFS | 1 + .../default/CONFIG_FW_CFG_SYSFS_CMDLINE | 1 + .../L1-RECOMMEND/default/CONFIG_GACT_PROB | 1 + .../L1-RECOMMEND/default/CONFIG_GCOV_KERNEL | 1 + .../L1-RECOMMEND/default/CONFIG_GDB_SCRIPTS | 1 + .../default/CONFIG_GENERIC_IRQ_DEBUGFS | 1 + .../default/CONFIG_GENERIC_IRQ_INJECTION | 1 + .../default/CONFIG_GENERIC_IRQ_PROBE | 1 + .../default/CONFIG_GENERIC_IRQ_SHOW | 1 + .../L1-RECOMMEND/default/CONFIG_GENEVE | 1 + .../default/CONFIG_GET_FREE_REGION | 1 + .../L1-RECOMMEND/default/CONFIG_GPIO_DS4520 | 1 + .../L1-RECOMMEND/default/CONFIG_GPIO_FXL6408 | 1 + .../L1-RECOMMEND/default/CONFIG_GPIO_GENERIC | 1 + .../L1-RECOMMEND/default/CONFIG_GPIO_LATCH | 1 + .../L1-RECOMMEND/default/CONFIG_GPIO_SIM | 1 + .../L1-RECOMMEND/default/CONFIG_GPIO_VIRTIO | 1 + .../L1-RECOMMEND/default/CONFIG_GP_PCI1XXXX | 1 + .../default/CONFIG_GUEST_PERF_EVENTS | 1 + .../L1-RECOMMEND/default/CONFIG_GUP_TEST | 1 + .../default/CONFIG_HARDENED_USERCOPY | 1 + .../default/CONFIG_HEADERS_INSTALL | 1 + .../default/CONFIG_HIBERNATE_CALLBACKS | 1 + .../L1-RECOMMEND/default/CONFIG_HIBERNATION | 1 + .../default/CONFIG_HIBERNATION_SNAPSHOT_DEV | 1 + .../configs/L1-RECOMMEND/default/CONFIG_HID | 1 + .../L1-RECOMMEND/default/CONFIG_HID_SUPPORT | 1 + .../configs/L1-RECOMMEND/default/CONFIG_HINIC | 1 + .../L1-RECOMMEND/default/CONFIG_HIST_TRIGGERS | 1 + .../default/CONFIG_HOTPLUG_PCI_ACPI | 1 + .../L1-RECOMMEND/default/CONFIG_HWLAT_TRACER | 1 + .../default/CONFIG_HWPOISON_INJECT | 1 + .../L1-RECOMMEND/default/CONFIG_HW_RANDOM_TPM | 1 + .../default/CONFIG_HYDCU_FIXUP_HEADER | 1 + .../L1-RECOMMEND/default/CONFIG_I2C_CHARDEV | 1 + .../L1-RECOMMEND/default/CONFIG_I2C_MUX | 1 + .../L1-RECOMMEND/default/CONFIG_I2C_SMBUS | 1 + .../L1-RECOMMEND/default/CONFIG_I6300ESB_WDT | 1 + .../configs/L1-RECOMMEND/default/CONFIG_IFB | 1 + .../L1-RECOMMEND/default/CONFIG_IGB_HWMON | 1 + .../configs/L1-RECOMMEND/default/CONFIG_IGC | 1 + .../L1-RECOMMEND/default/CONFIG_IKCONFIG | 1 + .../L1-RECOMMEND/default/CONFIG_IKCONFIG_PROC | 1 + .../L1-RECOMMEND/default/CONFIG_IKHEADERS | 1 + .../default/CONFIG_ILLEGAL_POINTER_VALUE | 1 + .../L1-RECOMMEND/default/CONFIG_IMA_APPRAISE | 1 + .../default/CONFIG_IMA_APPRAISE_BOOTPARAM | 1 + .../default/CONFIG_IMA_APPRAISE_BUILD_POLICY | 1 + .../default/CONFIG_IMA_APPRAISE_MODSIG | 1 + .../CONFIG_IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS | 1 + .../CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS | 1 + .../CONFIG_IMA_APPRAISE_REQUIRE_MODULE_SIGS | 1 + .../CONFIG_IMA_APPRAISE_REQUIRE_POLICY_SIGS | 1 + .../default/CONFIG_IMA_APPRAISE_SIGNED_INIT | 1 + .../default/CONFIG_IMA_ARCH_POLICY | 1 + .../default/CONFIG_IMA_BLACKLIST_KEYRING | 1 + .../default/CONFIG_IMA_DEFAULT_HASH | 1 + .../default/CONFIG_IMA_DEFAULT_HASH_SHA1 | 1 + .../default/CONFIG_IMA_DEFAULT_HASH_SHA256 | 1 + .../default/CONFIG_IMA_DEFAULT_HASH_SHA512 | 1 + .../default/CONFIG_IMA_DEFAULT_HASH_SM3 | 1 + .../default/CONFIG_IMA_DEFAULT_TEMPLATE | 1 + ...INGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY | 1 + .../L1-RECOMMEND/default/CONFIG_IMA_LOAD_X509 | 1 + .../L1-RECOMMEND/default/CONFIG_IMA_LSM_RULES | 1 + .../CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS | 1 + .../default/CONFIG_IMA_MEASURE_PCR_IDX | 1 + .../default/CONFIG_IMA_NG_TEMPLATE | 1 + .../default/CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS | 1 + .../default/CONFIG_IMA_READ_POLICY | 1 + .../CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT | 1 + .../default/CONFIG_IMA_SIG_TEMPLATE | 1 + .../default/CONFIG_IMA_WRITE_POLICY | 1 + .../L1-RECOMMEND/default/CONFIG_IMA_X509_PATH | 1 + .../L1-RECOMMEND/default/CONFIG_INET6_AH | 1 + .../L1-RECOMMEND/default/CONFIG_INET6_ESP | 1 + .../default/CONFIG_INET6_ESPINTCP | 1 + .../default/CONFIG_INET6_ESP_OFFLOAD | 1 + .../L1-RECOMMEND/default/CONFIG_INET6_IPCOMP | 1 + .../L1-RECOMMEND/default/CONFIG_INET6_TUNNEL | 1 + .../default/CONFIG_INET6_XFRM_TUNNEL | 1 + .../L1-RECOMMEND/default/CONFIG_INET_AH | 1 + .../default/CONFIG_INET_DIAG_DESTROY | 1 + .../L1-RECOMMEND/default/CONFIG_INET_ESP | 1 + .../L1-RECOMMEND/default/CONFIG_INET_ESPINTCP | 1 + .../default/CONFIG_INET_ESP_OFFLOAD | 1 + .../L1-RECOMMEND/default/CONFIG_INET_IPCOMP | 1 + .../L1-RECOMMEND/default/CONFIG_INET_RAW_DIAG | 1 + .../default/CONFIG_INFINIBAND_ADDR_TRANS | 1 + .../default/CONFIG_INFINIBAND_ERDMA | 1 + .../default/CONFIG_INFINIBAND_IPOIB | 1 + .../default/CONFIG_INFINIBAND_IPOIB_CM | 1 + .../default/CONFIG_INFINIBAND_IPOIB_DEBUG | 1 + .../default/CONFIG_INFINIBAND_ISER | 1 + .../default/CONFIG_INFINIBAND_ISERT | 1 + .../default/CONFIG_INFINIBAND_MTHCA | 1 + .../CONFIG_INFINIBAND_ON_DEMAND_PAGING | 1 + .../default/CONFIG_INFINIBAND_RTRS_CLIENT | 1 + .../default/CONFIG_INFINIBAND_RTRS_SERVER | 1 + .../default/CONFIG_INFINIBAND_SRP | 1 + .../default/CONFIG_INFINIBAND_SRPT | 1 + .../default/CONFIG_INFINIBAND_USER_ACCESS | 1 + .../default/CONFIG_INFINIBAND_USER_MAD | 1 + .../default/CONFIG_INITRAMFS_SOURCE | 1 + .../default/CONFIG_INIT_ON_ALLOC_DEFAULT_ON | 1 + .../default/CONFIG_INIT_ON_FREE_DEFAULT_ON | 1 + .../L1-RECOMMEND/default/CONFIG_INOTIFY_USER | 1 + .../L1-RECOMMEND/default/CONFIG_INPUT_EVDEV | 1 + .../default/CONFIG_INTEGRITY_ASYMMETRIC_KEYS | 1 + .../default/CONFIG_INTEGRITY_AUDIT | 1 + .../default/CONFIG_INTEGRITY_PLATFORM_KEYRING | 1 + .../default/CONFIG_INTEGRITY_SIGNATURE | 1 + .../default/CONFIG_INTEGRITY_TRUSTED_KEYRING | 1 + .../L1-RECOMMEND/default/CONFIG_IOMMU_API | 1 + .../L1-RECOMMEND/default/CONFIG_IOMMU_DEBUGFS | 1 + .../default/CONFIG_IOMMU_DEFAULT_DMA_LAZY | 1 + .../L1-RECOMMEND/default/CONFIG_IOMMU_DMA | 1 + .../L1-RECOMMEND/default/CONFIG_IOMMU_IOVA | 1 + .../default/CONFIG_IOMMU_IO_PGTABLE | 1 + .../default/CONFIG_IO_STRICT_DEVMEM | 1 + .../L1-RECOMMEND/default/CONFIG_IP6_NF_FILTER | 1 + .../default/CONFIG_IP6_NF_IPTABLES | 1 + .../L1-RECOMMEND/default/CONFIG_IP6_NF_MANGLE | 1 + .../default/CONFIG_IP6_NF_MATCH_AH | 1 + .../default/CONFIG_IP6_NF_MATCH_EUI64 | 1 + .../default/CONFIG_IP6_NF_MATCH_FRAG | 1 + .../default/CONFIG_IP6_NF_MATCH_HL | 1 + .../default/CONFIG_IP6_NF_MATCH_IPV6HEADER | 1 + .../default/CONFIG_IP6_NF_MATCH_MH | 1 + .../default/CONFIG_IP6_NF_MATCH_OPTS | 1 + .../default/CONFIG_IP6_NF_MATCH_RPFILTER | 1 + .../default/CONFIG_IP6_NF_MATCH_RT | 1 + .../default/CONFIG_IP6_NF_MATCH_SRH | 1 + .../L1-RECOMMEND/default/CONFIG_IP6_NF_NAT | 1 + .../L1-RECOMMEND/default/CONFIG_IP6_NF_RAW | 1 + .../default/CONFIG_IP6_NF_SECURITY | 1 + .../default/CONFIG_IP6_NF_TARGET_HL | 1 + .../default/CONFIG_IP6_NF_TARGET_MASQUERADE | 1 + .../default/CONFIG_IP6_NF_TARGET_NPT | 1 + .../default/CONFIG_IP6_NF_TARGET_REJECT | 1 + .../default/CONFIG_IP6_NF_TARGET_SYNPROXY | 1 + .../default/CONFIG_IPMI_DEVICE_INTERFACE | 1 + .../default/CONFIG_IPMI_DMI_DECODE | 1 + .../default/CONFIG_IPMI_PANIC_EVENT | 1 + .../default/CONFIG_IPMI_PANIC_STRING | 1 + .../default/CONFIG_IPMI_PLAT_DATA | 1 + .../L1-RECOMMEND/default/CONFIG_IPMI_POWEROFF | 1 + .../L1-RECOMMEND/default/CONFIG_IPMI_SI | 1 + .../L1-RECOMMEND/default/CONFIG_IPMI_SSIF | 1 + .../L1-RECOMMEND/default/CONFIG_IPMI_WATCHDOG | 1 + .../L1-RECOMMEND/default/CONFIG_IPV6_GRE | 1 + .../L1-RECOMMEND/default/CONFIG_IPV6_ILA | 1 + .../L1-RECOMMEND/default/CONFIG_IPV6_MIP6 | 1 + .../L1-RECOMMEND/default/CONFIG_IPV6_MROUTE | 1 + .../CONFIG_IPV6_MROUTE_MULTIPLE_TABLES | 1 + .../default/CONFIG_IPV6_MULTIPLE_TABLES | 1 + .../default/CONFIG_IPV6_NDISC_NODETYPE | 1 + .../default/CONFIG_IPV6_OPTIMISTIC_DAD | 1 + .../L1-RECOMMEND/default/CONFIG_IPV6_PIMSM_V2 | 1 + .../default/CONFIG_IPV6_ROUTER_PREF | 1 + .../default/CONFIG_IPV6_ROUTE_INFO | 1 + .../default/CONFIG_IPV6_RPL_LWTUNNEL | 1 + .../default/CONFIG_IPV6_SEG6_HMAC | 1 + .../default/CONFIG_IPV6_SEG6_LWTUNNEL | 1 + .../L1-RECOMMEND/default/CONFIG_IPV6_SIT | 1 + .../L1-RECOMMEND/default/CONFIG_IPV6_SIT_6RD | 1 + .../L1-RECOMMEND/default/CONFIG_IPV6_SUBTREES | 1 + .../L1-RECOMMEND/default/CONFIG_IPV6_TUNNEL | 1 + .../L1-RECOMMEND/default/CONFIG_IPV6_VTI | 1 + .../L1-RECOMMEND/default/CONFIG_IPVLAN | 1 + .../L1-RECOMMEND/default/CONFIG_IPVTAP | 1 + .../default/CONFIG_IP_ADVANCED_ROUTER | 1 + .../L1-RECOMMEND/default/CONFIG_IP_DCCP | 1 + .../default/CONFIG_IP_FIB_TRIE_STATS | 1 + .../L1-RECOMMEND/default/CONFIG_IP_MROUTE | 1 + .../default/CONFIG_IP_MROUTE_MULTIPLE_TABLES | 1 + .../L1-RECOMMEND/default/CONFIG_IP_MULTICAST | 1 + .../default/CONFIG_IP_MULTIPLE_TABLES | 1 + .../default/CONFIG_IP_NF_ARPFILTER | 1 + .../default/CONFIG_IP_NF_ARP_MANGLE | 1 + .../L1-RECOMMEND/default/CONFIG_IP_NF_FILTER | 1 + .../default/CONFIG_IP_NF_IPTABLES | 1 + .../L1-RECOMMEND/default/CONFIG_IP_NF_MANGLE | 1 + .../default/CONFIG_IP_NF_MATCH_AH | 1 + .../default/CONFIG_IP_NF_MATCH_ECN | 1 + .../default/CONFIG_IP_NF_MATCH_RPFILTER | 1 + .../default/CONFIG_IP_NF_MATCH_TTL | 1 + .../L1-RECOMMEND/default/CONFIG_IP_NF_NAT | 1 + .../default/CONFIG_IP_NF_TARGET_ECN | 1 + .../default/CONFIG_IP_NF_TARGET_MASQUERADE | 1 + .../default/CONFIG_IP_NF_TARGET_NETMAP | 1 + .../default/CONFIG_IP_NF_TARGET_REDIRECT | 1 + .../default/CONFIG_IP_NF_TARGET_REJECT | 1 + .../default/CONFIG_IP_NF_TARGET_SYNPROXY | 1 + .../default/CONFIG_IP_NF_TARGET_TTL | 1 + .../L1-RECOMMEND/default/CONFIG_IP_PIMSM_V1 | 1 + .../L1-RECOMMEND/default/CONFIG_IP_PIMSM_V2 | 1 + .../default/CONFIG_IP_ROUTE_MULTIPATH | 1 + .../default/CONFIG_IP_ROUTE_VERBOSE | 1 + .../default/CONFIG_IP_SET_BITMAP_IP | 1 + .../default/CONFIG_IP_SET_BITMAP_IPMAC | 1 + .../default/CONFIG_IP_SET_BITMAP_PORT | 1 + .../default/CONFIG_IP_SET_HASH_IP | 1 + .../default/CONFIG_IP_SET_HASH_IPMAC | 1 + .../default/CONFIG_IP_SET_HASH_IPMARK | 1 + .../default/CONFIG_IP_SET_HASH_IPPORT | 1 + .../default/CONFIG_IP_SET_HASH_IPPORTIP | 1 + .../default/CONFIG_IP_SET_HASH_IPPORTNET | 1 + .../default/CONFIG_IP_SET_HASH_MAC | 1 + .../default/CONFIG_IP_SET_HASH_NET | 1 + .../default/CONFIG_IP_SET_HASH_NETIFACE | 1 + .../default/CONFIG_IP_SET_HASH_NETNET | 1 + .../default/CONFIG_IP_SET_HASH_NETPORT | 1 + .../default/CONFIG_IP_SET_HASH_NETPORTNET | 1 + .../default/CONFIG_IP_SET_LIST_SET | 1 + .../L1-RECOMMEND/default/CONFIG_IP_SET_MAX | 1 + .../L1-RECOMMEND/default/CONFIG_IP_VS_DEBUG | 1 + .../L1-RECOMMEND/default/CONFIG_IP_VS_DH | 1 + .../L1-RECOMMEND/default/CONFIG_IP_VS_FO | 1 + .../L1-RECOMMEND/default/CONFIG_IP_VS_FTP | 1 + .../L1-RECOMMEND/default/CONFIG_IP_VS_LBLC | 1 + .../L1-RECOMMEND/default/CONFIG_IP_VS_LBLCR | 1 + .../L1-RECOMMEND/default/CONFIG_IP_VS_LC | 1 + .../L1-RECOMMEND/default/CONFIG_IP_VS_MH | 1 + .../default/CONFIG_IP_VS_MH_TAB_INDEX | 1 + .../L1-RECOMMEND/default/CONFIG_IP_VS_NFCT | 1 + .../L1-RECOMMEND/default/CONFIG_IP_VS_NQ | 1 + .../L1-RECOMMEND/default/CONFIG_IP_VS_OVF | 1 + .../L1-RECOMMEND/default/CONFIG_IP_VS_PE_SIP | 1 + .../L1-RECOMMEND/default/CONFIG_IP_VS_RR | 1 + .../L1-RECOMMEND/default/CONFIG_IP_VS_SED | 1 + .../L1-RECOMMEND/default/CONFIG_IP_VS_SH | 1 + .../default/CONFIG_IP_VS_SH_TAB_BITS | 1 + .../default/CONFIG_IP_VS_TAB_BITS | 1 + .../L1-RECOMMEND/default/CONFIG_IP_VS_WLC | 1 + .../L1-RECOMMEND/default/CONFIG_IP_VS_WRR | 1 + .../default/CONFIG_ISCSI_BOOT_SYSFS | 1 + .../L1-RECOMMEND/default/CONFIG_ISCSI_TARGET | 1 + .../L1-RECOMMEND/default/CONFIG_ISCSI_TCP | 1 + .../L1-RECOMMEND/default/CONFIG_IXGBEVF_IPSEC | 1 + .../L1-RECOMMEND/default/CONFIG_IXGBE_DCB | 1 + .../L1-RECOMMEND/default/CONFIG_IXGBE_HWMON | 1 + .../L1-RECOMMEND/default/CONFIG_IXGBE_IPSEC | 1 + .../L1-RECOMMEND/default/CONFIG_JBD2_DEBUG | 1 + .../L1-RECOMMEND/default/CONFIG_JOLIET | 1 + .../configs/L1-RECOMMEND/default/CONFIG_KASAN | 1 + .../configs/L1-RECOMMEND/default/CONFIG_KCOV | 1 + .../default/CONFIG_KDB_CONTINUE_CATASTROPHIC | 1 + .../L1-RECOMMEND/default/CONFIG_KDB_KEYBOARD | 1 + .../L1-RECOMMEND/default/CONFIG_KEXEC_SIG | 1 + .../configs/L1-RECOMMEND/default/CONFIG_KEYS | 1 + .../default/CONFIG_KEYS_REQUEST_CACHE | 1 + .../default/CONFIG_KEY_DH_OPERATIONS | 1 + .../default/CONFIG_KFENCE_DEFERRABLE | 1 + .../default/CONFIG_KFENCE_NUM_OBJECTS | 1 + .../default/CONFIG_KFENCE_SAMPLE_INTERVAL | 1 + .../default/CONFIG_KFENCE_STRESS_TEST_FAULTS | 1 + .../default/CONFIG_KGDB_HONOUR_BLOCKLIST | 1 + .../L1-RECOMMEND/default/CONFIG_KGDB_KDB | 1 + .../L1-RECOMMEND/default/CONFIG_KGDB_TESTS | 1 + .../default/CONFIG_KPROBE_EVENTS_ON_NOTRACE | 1 + .../configs/L1-RECOMMEND/default/CONFIG_KSM | 1 + .../default/CONFIG_LDISC_AUTOLOAD | 1 + .../configs/L1-RECOMMEND/default/CONFIG_LIBFC | 1 + .../L1-RECOMMEND/default/CONFIG_LIBFCOE | 1 + .../L1-RECOMMEND/default/CONFIG_LIST_HARDENED | 1 + .../default/CONFIG_LOAD_UEFI_KEYS | 1 + .../L1-RECOMMEND/default/CONFIG_LOCALVERSION | 1 + .../default/CONFIG_LOCALVERSION_AUTO | 1 + .../default/CONFIG_LOCKDEP_SUPPORT | 1 + .../default/CONFIG_LOCK_EVENT_COUNTS | 1 + .../default/CONFIG_LOG_CPU_MAX_BUF_SHIFT | 1 + .../default/CONFIG_LOOPBACK_TARGET | 1 + .../default/CONFIG_LRU_GEN_ENABLED | 1 + .../L1-RECOMMEND/default/CONFIG_LRU_GEN_STATS | 1 + .../configs/L1-RECOMMEND/default/CONFIG_LSM | 1 + .../default/CONFIG_LSM_MMAP_MIN_ADDR | 1 + .../L1-RECOMMEND/default/CONFIG_LTO_NONE | 1 + .../L1-RECOMMEND/default/CONFIG_LWTUNNEL | 1 + .../L1-RECOMMEND/default/CONFIG_LWTUNNEL_BPF | 1 + .../L1-RECOMMEND/default/CONFIG_MACSEC | 1 + .../L1-RECOMMEND/default/CONFIG_MACVLAN | 1 + .../L1-RECOMMEND/default/CONFIG_MACVTAP | 1 + .../default/CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE | 1 + .../default/CONFIG_MAGIC_SYSRQ_SERIAL | 1 + .../CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE | 1 + .../L1-RECOMMEND/default/CONFIG_MAX_SKB_FRAGS | 1 + .../L1-RECOMMEND/default/CONFIG_MD_AUTODETECT | 1 + .../L1-RECOMMEND/default/CONFIG_MD_CLUSTER | 1 + .../L1-RECOMMEND/default/CONFIG_MD_FAULTY | 1 + .../L1-RECOMMEND/default/CONFIG_MD_LINEAR | 1 + .../L1-RECOMMEND/default/CONFIG_MD_MULTIPATH | 1 + .../L1-RECOMMEND/default/CONFIG_MD_RAID0 | 1 + .../L1-RECOMMEND/default/CONFIG_MD_RAID1 | 1 + .../L1-RECOMMEND/default/CONFIG_MD_RAID10 | 1 + .../L1-RECOMMEND/default/CONFIG_MD_RAID456 | 1 + .../default/CONFIG_MEGARAID_LEGACY | 1 + .../default/CONFIG_MEGARAID_NEWGEN | 1 + .../L1-RECOMMEND/default/CONFIG_MEGARAID_SAS | 1 + .../L1-RECOMMEND/default/CONFIG_MEMORY | 1 + .../default/CONFIG_MEMORY_BALLOON | 1 + .../default/CONFIG_MEMORY_HOTREMOVE | 1 + .../default/CONFIG_MESSAGE_LOGLEVEL_DEFAULT | 1 + .../L1-RECOMMEND/default/CONFIG_MLX4_CORE | 1 + .../default/CONFIG_MLX4_CORE_GEN2 | 1 + .../L1-RECOMMEND/default/CONFIG_MLX4_DEBUG | 1 + .../L1-RECOMMEND/default/CONFIG_MLX4_EN_DCB | 1 + .../L1-RECOMMEND/default/CONFIG_MLX5_BRIDGE | 1 + .../L1-RECOMMEND/default/CONFIG_MLX5_CLS_ACT | 1 + .../default/CONFIG_MLX5_CORE_EN_DCB | 1 + .../L1-RECOMMEND/default/CONFIG_MLX5_EN_ARFS | 1 + .../L1-RECOMMEND/default/CONFIG_MLX5_EN_RXNFC | 1 + .../L1-RECOMMEND/default/CONFIG_MLX5_ESWITCH | 1 + .../L1-RECOMMEND/default/CONFIG_MLX5_FPGA | 1 + .../default/CONFIG_MLX5_INFINIBAND | 1 + .../L1-RECOMMEND/default/CONFIG_MLX5_MPFS | 1 + .../L1-RECOMMEND/default/CONFIG_MLX5_SF | 1 + .../default/CONFIG_MLX5_SW_STEERING | 1 + .../L1-RECOMMEND/default/CONFIG_MLX5_TC_CT | 1 + .../default/CONFIG_MLX5_TC_SAMPLE | 1 + .../configs/L1-RECOMMEND/default/CONFIG_MLXFW | 1 + .../default/CONFIG_MLXSW_CORE_HWMON | 1 + .../default/CONFIG_MLXSW_CORE_THERMAL | 1 + .../L1-RECOMMEND/default/CONFIG_MLXSW_I2C | 1 + .../L1-RECOMMEND/default/CONFIG_MLXSW_MINIMAL | 1 + .../L1-RECOMMEND/default/CONFIG_MLXSW_PCI | 1 + .../default/CONFIG_MLXSW_SPECTRUM | 1 + .../default/CONFIG_MLXSW_SPECTRUM_DCB | 1 + ...FIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS | 1 + .../default/CONFIG_MODULE_FORCE_LOAD | 1 + .../default/CONFIG_MODULE_FORCE_UNLOAD | 1 + .../default/CONFIG_MODULE_SIG_ALL | 1 + .../default/CONFIG_MODULE_SIG_FORCE | 1 + .../default/CONFIG_MODULE_SIG_HASH | 1 + .../default/CONFIG_MODULE_SIG_KEY | 1 + .../default/CONFIG_MODULE_SIG_SHA1 | 1 + .../default/CONFIG_MODULE_SIG_SHA224 | 1 + .../default/CONFIG_MODULE_SIG_SHA256 | 1 + .../default/CONFIG_MODULE_SIG_SHA384 | 1 + .../default/CONFIG_MODULE_SIG_SHA512 | 1 + .../configs/L1-RECOMMEND/default/CONFIG_MPLS | 1 + .../L1-RECOMMEND/default/CONFIG_MPLS_IPTUNNEL | 1 + .../L1-RECOMMEND/default/CONFIG_MPLS_ROUTING | 1 + .../L1-RECOMMEND/default/CONFIG_MPTCP_IPV6 | 1 + .../L1-RECOMMEND/default/CONFIG_MSDOS_FS | 1 + .../L1-RECOMMEND/default/CONFIG_NETCONSOLE | 1 + .../default/CONFIG_NETCONSOLE_DYNAMIC | 1 + .../L1-RECOMMEND/default/CONFIG_NETDEVSIM | 1 + .../default/CONFIG_NETFILTER_CONNCOUNT | 1 + .../default/CONFIG_NETFILTER_EGRESS | 1 + .../default/CONFIG_NETFILTER_FAMILY_ARP | 1 + .../default/CONFIG_NETFILTER_FAMILY_BRIDGE | 1 + .../default/CONFIG_NETFILTER_NETLINK | 1 + .../default/CONFIG_NETFILTER_NETLINK_ACCT | 1 + .../default/CONFIG_NETFILTER_NETLINK_GLUE_CT | 1 + .../default/CONFIG_NETFILTER_NETLINK_LOG | 1 + .../default/CONFIG_NETFILTER_NETLINK_OSF | 1 + .../default/CONFIG_NETFILTER_NETLINK_QUEUE | 1 + .../default/CONFIG_NETFILTER_SYNPROXY | 1 + .../default/CONFIG_NETFILTER_XTABLES | 1 + .../default/CONFIG_NETFILTER_XT_CONNMARK | 1 + .../default/CONFIG_NETFILTER_XT_MARK | 1 + .../CONFIG_NETFILTER_XT_MATCH_ADDRTYPE | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_BPF | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_CGROUP | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_CLUSTER | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_COMMENT | 1 + .../CONFIG_NETFILTER_XT_MATCH_CONNBYTES | 1 + .../CONFIG_NETFILTER_XT_MATCH_CONNLABEL | 1 + .../CONFIG_NETFILTER_XT_MATCH_CONNLIMIT | 1 + .../CONFIG_NETFILTER_XT_MATCH_CONNMARK | 1 + .../CONFIG_NETFILTER_XT_MATCH_CONNTRACK | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_CPU | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_DCCP | 1 + .../CONFIG_NETFILTER_XT_MATCH_DEVGROUP | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_DSCP | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_ECN | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_ESP | 1 + .../CONFIG_NETFILTER_XT_MATCH_HASHLIMIT | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_HELPER | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_HL | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_IPCOMP | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_IPRANGE | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_IPVS | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_L2TP | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_LENGTH | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_LIMIT | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_MAC | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_MARK | 1 + .../CONFIG_NETFILTER_XT_MATCH_MULTIPORT | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_NFACCT | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_OSF | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_OWNER | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_PHYSDEV | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_PKTTYPE | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_POLICY | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_QUOTA | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_RATEEST | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_REALM | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_RECENT | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_SCTP | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_SOCKET | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_STATE | 1 + .../CONFIG_NETFILTER_XT_MATCH_STATISTIC | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_STRING | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_TCPMSS | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_TIME | 1 + .../default/CONFIG_NETFILTER_XT_MATCH_U32 | 1 + .../default/CONFIG_NETFILTER_XT_NAT | 1 + .../default/CONFIG_NETFILTER_XT_SET | 1 + .../default/CONFIG_NETFILTER_XT_TARGET_AUDIT | 1 + .../CONFIG_NETFILTER_XT_TARGET_CHECKSUM | 1 + .../CONFIG_NETFILTER_XT_TARGET_CLASSIFY | 1 + .../CONFIG_NETFILTER_XT_TARGET_CONNMARK | 1 + .../CONFIG_NETFILTER_XT_TARGET_CONNSECMARK | 1 + .../default/CONFIG_NETFILTER_XT_TARGET_CT | 1 + .../default/CONFIG_NETFILTER_XT_TARGET_DSCP | 1 + .../default/CONFIG_NETFILTER_XT_TARGET_HL | 1 + .../default/CONFIG_NETFILTER_XT_TARGET_HMARK | 1 + .../CONFIG_NETFILTER_XT_TARGET_IDLETIMER | 1 + .../default/CONFIG_NETFILTER_XT_TARGET_LED | 1 + .../default/CONFIG_NETFILTER_XT_TARGET_LOG | 1 + .../default/CONFIG_NETFILTER_XT_TARGET_MARK | 1 + .../CONFIG_NETFILTER_XT_TARGET_MASQUERADE | 1 + .../default/CONFIG_NETFILTER_XT_TARGET_NETMAP | 1 + .../default/CONFIG_NETFILTER_XT_TARGET_NFLOG | 1 + .../CONFIG_NETFILTER_XT_TARGET_NFQUEUE | 1 + .../CONFIG_NETFILTER_XT_TARGET_NOTRACK | 1 + .../CONFIG_NETFILTER_XT_TARGET_RATEEST | 1 + .../CONFIG_NETFILTER_XT_TARGET_REDIRECT | 1 + .../CONFIG_NETFILTER_XT_TARGET_SECMARK | 1 + .../default/CONFIG_NETFILTER_XT_TARGET_TCPMSS | 1 + .../CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP | 1 + .../default/CONFIG_NETFILTER_XT_TARGET_TEE | 1 + .../default/CONFIG_NETFILTER_XT_TARGET_TPROXY | 1 + .../default/CONFIG_NETFILTER_XT_TARGET_TRACE | 1 + .../L1-RECOMMEND/default/CONFIG_NETFS_STATS | 1 + .../L1-RECOMMEND/default/CONFIG_NETFS_SUPPORT | 1 + .../L1-RECOMMEND/default/CONFIG_NETLABEL | 1 + .../default/CONFIG_NETWORK_PHY_TIMESTAMPING | 1 + .../default/CONFIG_NETWORK_SECMARK | 1 + .../L1-RECOMMEND/default/CONFIG_NET_ACT_BPF | 1 + .../default/CONFIG_NET_ACT_CONNMARK | 1 + .../L1-RECOMMEND/default/CONFIG_NET_ACT_CSUM | 1 + .../L1-RECOMMEND/default/CONFIG_NET_ACT_CT | 1 + .../default/CONFIG_NET_ACT_CTINFO | 1 + .../L1-RECOMMEND/default/CONFIG_NET_ACT_GATE | 1 + .../L1-RECOMMEND/default/CONFIG_NET_ACT_IFE | 1 + .../L1-RECOMMEND/default/CONFIG_NET_ACT_IPT | 1 + .../default/CONFIG_NET_ACT_MIRRED | 1 + .../L1-RECOMMEND/default/CONFIG_NET_ACT_MPLS | 1 + .../L1-RECOMMEND/default/CONFIG_NET_ACT_NAT | 1 + .../L1-RECOMMEND/default/CONFIG_NET_ACT_PEDIT | 1 + .../default/CONFIG_NET_ACT_SAMPLE | 1 + .../L1-RECOMMEND/default/CONFIG_NET_ACT_SIMP | 1 + .../default/CONFIG_NET_ACT_SKBEDIT | 1 + .../default/CONFIG_NET_ACT_SKBMOD | 1 + .../default/CONFIG_NET_ACT_TUNNEL_KEY | 1 + .../L1-RECOMMEND/default/CONFIG_NET_ACT_VLAN | 1 + .../L1-RECOMMEND/default/CONFIG_NET_CLS_BASIC | 1 + .../L1-RECOMMEND/default/CONFIG_NET_CLS_BPF | 1 + .../default/CONFIG_NET_CLS_CGROUP | 1 + .../L1-RECOMMEND/default/CONFIG_NET_CLS_FLOW | 1 + .../default/CONFIG_NET_CLS_FLOWER | 1 + .../L1-RECOMMEND/default/CONFIG_NET_CLS_FW | 1 + .../default/CONFIG_NET_CLS_MATCHALL | 1 + .../default/CONFIG_NET_CLS_ROUTE4 | 1 + .../L1-RECOMMEND/default/CONFIG_NET_CLS_U32 | 1 + .../default/CONFIG_NET_DROP_MONITOR | 1 + .../L1-RECOMMEND/default/CONFIG_NET_EMATCH | 1 + .../default/CONFIG_NET_EMATCH_CMP | 1 + .../default/CONFIG_NET_EMATCH_IPSET | 1 + .../default/CONFIG_NET_EMATCH_IPT | 1 + .../default/CONFIG_NET_EMATCH_META | 1 + .../default/CONFIG_NET_EMATCH_NBYTE | 1 + .../default/CONFIG_NET_EMATCH_STACK | 1 + .../default/CONFIG_NET_EMATCH_TEXT | 1 + .../default/CONFIG_NET_EMATCH_U32 | 1 + .../L1-RECOMMEND/default/CONFIG_NET_FC | 1 + .../L1-RECOMMEND/default/CONFIG_NET_IPGRE | 1 + .../default/CONFIG_NET_IPGRE_BROADCAST | 1 + .../default/CONFIG_NET_IPGRE_DEMUX | 1 + .../L1-RECOMMEND/default/CONFIG_NET_IPIP | 1 + .../default/CONFIG_NET_KEY_MIGRATE | 1 + .../default/CONFIG_NET_L3_MASTER_DEV | 1 + .../L1-RECOMMEND/default/CONFIG_NET_MPLS_GSO | 1 + .../L1-RECOMMEND/default/CONFIG_NET_NSH | 1 + .../L1-RECOMMEND/default/CONFIG_NET_PKTGEN | 1 + .../default/CONFIG_NET_PTP_CLASSIFY | 1 + .../L1-RECOMMEND/default/CONFIG_NET_SCH_CAKE | 1 + .../L1-RECOMMEND/default/CONFIG_NET_SCH_CBS | 1 + .../L1-RECOMMEND/default/CONFIG_NET_SCH_CHOKE | 1 + .../L1-RECOMMEND/default/CONFIG_NET_SCH_CODEL | 1 + .../default/CONFIG_NET_SCH_DEFAULT | 1 + .../L1-RECOMMEND/default/CONFIG_NET_SCH_DRR | 1 + .../L1-RECOMMEND/default/CONFIG_NET_SCH_ETF | 1 + .../L1-RECOMMEND/default/CONFIG_NET_SCH_ETS | 1 + .../L1-RECOMMEND/default/CONFIG_NET_SCH_FQ | 1 + .../default/CONFIG_NET_SCH_FQ_PIE | 1 + .../L1-RECOMMEND/default/CONFIG_NET_SCH_GRED | 1 + .../L1-RECOMMEND/default/CONFIG_NET_SCH_HFSC | 1 + .../L1-RECOMMEND/default/CONFIG_NET_SCH_HHF | 1 + .../L1-RECOMMEND/default/CONFIG_NET_SCH_HTB | 1 + .../default/CONFIG_NET_SCH_MQPRIO | 1 + .../default/CONFIG_NET_SCH_MULTIQ | 1 + .../L1-RECOMMEND/default/CONFIG_NET_SCH_NETEM | 1 + .../L1-RECOMMEND/default/CONFIG_NET_SCH_PIE | 1 + .../L1-RECOMMEND/default/CONFIG_NET_SCH_PLUG | 1 + .../L1-RECOMMEND/default/CONFIG_NET_SCH_PRIO | 1 + .../L1-RECOMMEND/default/CONFIG_NET_SCH_QFQ | 1 + .../L1-RECOMMEND/default/CONFIG_NET_SCH_RED | 1 + .../L1-RECOMMEND/default/CONFIG_NET_SCH_SFB | 1 + .../L1-RECOMMEND/default/CONFIG_NET_SCH_SFQ | 1 + .../default/CONFIG_NET_SCH_SKBPRIO | 1 + .../default/CONFIG_NET_SCH_TAPRIO | 1 + .../L1-RECOMMEND/default/CONFIG_NET_SCH_TBF | 1 + .../L1-RECOMMEND/default/CONFIG_NET_SCH_TEQL | 1 + .../default/CONFIG_NET_TC_SKB_EXT | 1 + .../L1-RECOMMEND/default/CONFIG_NET_TEAM | 1 + .../default/CONFIG_NET_TEAM_MODE_ACTIVEBACKUP | 1 + .../default/CONFIG_NET_TEAM_MODE_BROADCAST | 1 + .../default/CONFIG_NET_TEAM_MODE_LOADBALANCE | 1 + .../default/CONFIG_NET_TEAM_MODE_RANDOM | 1 + .../default/CONFIG_NET_TEAM_MODE_ROUNDROBIN | 1 + .../default/CONFIG_NET_VENDOR_HUAWEI | 1 + .../default/CONFIG_NET_VENDOR_SOLARFLARE | 1 + .../L1-RECOMMEND/default/CONFIG_NET_VRF | 1 + .../L1-RECOMMEND/default/CONFIG_NET_XGRESS | 1 + .../default/CONFIG_NFIT_SECURITY_DEBUG | 1 + .../default/CONFIG_NFSD_BLOCKLAYOUT | 1 + .../default/CONFIG_NFSD_FLEXFILELAYOUT | 1 + .../L1-RECOMMEND/default/CONFIG_NFSD_PNFS | 1 + .../default/CONFIG_NFSD_SCSILAYOUT | 1 + .../L1-RECOMMEND/default/CONFIG_NFSD_V3_ACL | 1 + .../default/CONFIG_NFSD_V4_2_INTER_SSC | 1 + .../default/CONFIG_NFSD_V4_SECURITY_LABEL | 1 + .../default/CONFIG_NFS_ACL_SUPPORT | 1 + .../default/CONFIG_NFS_DISABLE_UDP_SUPPORT | 1 + .../L1-RECOMMEND/default/CONFIG_NFS_SWAP | 1 + .../default/CONFIG_NFS_USE_LEGACY_DNS | 1 + .../L1-RECOMMEND/default/CONFIG_NFS_V2 | 1 + .../L1-RECOMMEND/default/CONFIG_NFS_V3_ACL | 1 + .../default/CONFIG_NFS_V4_1_MIGRATION | 1 + .../default/CONFIG_NFS_V4_2_READ_PLUS | 1 + .../default/CONFIG_NFT_BRIDGE_META | 1 + .../default/CONFIG_NFT_BRIDGE_REJECT | 1 + .../L1-RECOMMEND/default/CONFIG_NFT_COMPAT | 1 + .../L1-RECOMMEND/default/CONFIG_NFT_CONNLIMIT | 1 + .../L1-RECOMMEND/default/CONFIG_NFT_CT | 1 + .../L1-RECOMMEND/default/CONFIG_NFT_DUP_IPV4 | 1 + .../L1-RECOMMEND/default/CONFIG_NFT_DUP_IPV6 | 1 + .../default/CONFIG_NFT_DUP_NETDEV | 1 + .../L1-RECOMMEND/default/CONFIG_NFT_FIB | 1 + .../L1-RECOMMEND/default/CONFIG_NFT_FIB_INET | 1 + .../L1-RECOMMEND/default/CONFIG_NFT_FIB_IPV4 | 1 + .../L1-RECOMMEND/default/CONFIG_NFT_FIB_IPV6 | 1 + .../default/CONFIG_NFT_FIB_NETDEV | 1 + .../default/CONFIG_NFT_FLOW_OFFLOAD | 1 + .../default/CONFIG_NFT_FWD_NETDEV | 1 + .../L1-RECOMMEND/default/CONFIG_NFT_HASH | 1 + .../L1-RECOMMEND/default/CONFIG_NFT_LIMIT | 1 + .../L1-RECOMMEND/default/CONFIG_NFT_LOG | 1 + .../L1-RECOMMEND/default/CONFIG_NFT_MASQ | 1 + .../L1-RECOMMEND/default/CONFIG_NFT_NAT | 1 + .../L1-RECOMMEND/default/CONFIG_NFT_NUMGEN | 1 + .../L1-RECOMMEND/default/CONFIG_NFT_OSF | 1 + .../L1-RECOMMEND/default/CONFIG_NFT_QUEUE | 1 + .../L1-RECOMMEND/default/CONFIG_NFT_QUOTA | 1 + .../L1-RECOMMEND/default/CONFIG_NFT_REDIR | 1 + .../L1-RECOMMEND/default/CONFIG_NFT_REJECT | 1 + .../default/CONFIG_NFT_REJECT_INET | 1 + .../default/CONFIG_NFT_REJECT_IPV6 | 1 + .../L1-RECOMMEND/default/CONFIG_NFT_SOCKET | 1 + .../L1-RECOMMEND/default/CONFIG_NFT_SYNPROXY | 1 + .../L1-RECOMMEND/default/CONFIG_NFT_TPROXY | 1 + .../L1-RECOMMEND/default/CONFIG_NFT_TUNNEL | 1 + .../L1-RECOMMEND/default/CONFIG_NFT_XFRM | 1 + .../default/CONFIG_NF_CONNTRACK_AMANDA | 1 + .../default/CONFIG_NF_CONNTRACK_BRIDGE | 1 + .../default/CONFIG_NF_CONNTRACK_BROADCAST | 1 + .../default/CONFIG_NF_CONNTRACK_EVENTS | 1 + .../default/CONFIG_NF_CONNTRACK_FTP | 1 + .../default/CONFIG_NF_CONNTRACK_H323 | 1 + .../default/CONFIG_NF_CONNTRACK_IRC | 1 + .../default/CONFIG_NF_CONNTRACK_LABELS | 1 + .../default/CONFIG_NF_CONNTRACK_MARK | 1 + .../default/CONFIG_NF_CONNTRACK_NETBIOS_NS | 1 + .../default/CONFIG_NF_CONNTRACK_PPTP | 1 + .../default/CONFIG_NF_CONNTRACK_PROCFS | 1 + .../default/CONFIG_NF_CONNTRACK_SANE | 1 + .../default/CONFIG_NF_CONNTRACK_SECMARK | 1 + .../default/CONFIG_NF_CONNTRACK_SIP | 1 + .../default/CONFIG_NF_CONNTRACK_SNMP | 1 + .../default/CONFIG_NF_CONNTRACK_TFTP | 1 + .../default/CONFIG_NF_CONNTRACK_TIMEOUT | 1 + .../default/CONFIG_NF_CONNTRACK_TIMESTAMP | 1 + .../default/CONFIG_NF_CONNTRACK_ZONES | 1 + .../L1-RECOMMEND/default/CONFIG_NF_CT_NETLINK | 1 + .../default/CONFIG_NF_CT_NETLINK_HELPER | 1 + .../default/CONFIG_NF_CT_NETLINK_TIMEOUT | 1 + .../default/CONFIG_NF_CT_PROTO_DCCP | 1 + .../default/CONFIG_NF_CT_PROTO_GRE | 1 + .../default/CONFIG_NF_CT_PROTO_SCTP | 1 + .../default/CONFIG_NF_CT_PROTO_UDPLITE | 1 + .../default/CONFIG_NF_DEFRAG_IPV6 | 1 + .../L1-RECOMMEND/default/CONFIG_NF_DUP_IPV4 | 1 + .../L1-RECOMMEND/default/CONFIG_NF_DUP_IPV6 | 1 + .../L1-RECOMMEND/default/CONFIG_NF_DUP_NETDEV | 1 + .../L1-RECOMMEND/default/CONFIG_NF_FLOW_TABLE | 1 + .../default/CONFIG_NF_FLOW_TABLE_INET | 1 + .../L1-RECOMMEND/default/CONFIG_NF_LOG_ARP | 1 + .../L1-RECOMMEND/default/CONFIG_NF_LOG_IPV4 | 1 + .../L1-RECOMMEND/default/CONFIG_NF_LOG_IPV6 | 1 + .../L1-RECOMMEND/default/CONFIG_NF_NAT_AMANDA | 1 + .../L1-RECOMMEND/default/CONFIG_NF_NAT_FTP | 1 + .../L1-RECOMMEND/default/CONFIG_NF_NAT_H323 | 1 + .../L1-RECOMMEND/default/CONFIG_NF_NAT_IRC | 1 + .../default/CONFIG_NF_NAT_MASQUERADE | 1 + .../L1-RECOMMEND/default/CONFIG_NF_NAT_PPTP | 1 + .../default/CONFIG_NF_NAT_REDIRECT | 1 + .../L1-RECOMMEND/default/CONFIG_NF_NAT_SIP | 1 + .../default/CONFIG_NF_NAT_SNMP_BASIC | 1 + .../L1-RECOMMEND/default/CONFIG_NF_NAT_TFTP | 1 + .../default/CONFIG_NF_REJECT_IPV4 | 1 + .../default/CONFIG_NF_REJECT_IPV6 | 1 + .../default/CONFIG_NF_SOCKET_IPV4 | 1 + .../default/CONFIG_NF_SOCKET_IPV6 | 1 + .../L1-RECOMMEND/default/CONFIG_NF_TABLES_ARP | 1 + .../default/CONFIG_NF_TABLES_BRIDGE | 1 + .../default/CONFIG_NF_TABLES_NETDEV | 1 + .../default/CONFIG_NF_TPROXY_IPV6 | 1 + .../configs/L1-RECOMMEND/default/CONFIG_NLMON | 1 + .../L1-RECOMMEND/default/CONFIG_NODES_SHIFT | 1 + .../L1-RECOMMEND/default/CONFIG_NOUVEAU_DEBUG | 1 + .../default/CONFIG_NOUVEAU_DEBUG_DEFAULT | 1 + .../configs/L1-RECOMMEND/default/CONFIG_NTB | 1 + .../default/CONFIG_NUMA_BALANCING | 1 + .../CONFIG_NUMA_BALANCING_DEFAULT_ENABLED | 1 + .../L1-RECOMMEND/default/CONFIG_NVDIMM_KEYS | 1 + .../configs/L1-RECOMMEND/default/CONFIG_NVMEM | 1 + .../L1-RECOMMEND/default/CONFIG_NVMEM_SYSFS | 1 + .../L1-RECOMMEND/default/CONFIG_NVME_FC | 1 + .../L1-RECOMMEND/default/CONFIG_OPENVSWITCH | 1 + .../default/CONFIG_OPENVSWITCH_GENEVE | 1 + .../default/CONFIG_OPENVSWITCH_GRE | 1 + .../default/CONFIG_OPENVSWITCH_VXLAN | 1 + .../default/CONFIG_OSNOISE_TRACER | 1 + .../default/CONFIG_OVERLAY_FS_INDEX | 1 + .../default/CONFIG_OVERLAY_FS_METACOPY | 1 + .../default/CONFIG_OVERLAY_FS_NFS_EXPORT | 1 + .../CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW | 1 + .../default/CONFIG_OVERLAY_FS_REDIRECT_DIR | 1 + .../default/CONFIG_OVERLAY_FS_XINO_AUTO | 1 + .../L1-RECOMMEND/default/CONFIG_PACKET_DIAG | 1 + .../default/CONFIG_PAGE_EXTENSION | 1 + .../L1-RECOMMEND/default/CONFIG_PAGE_OWNER | 1 + .../default/CONFIG_PAGE_POISONING | 1 + .../default/CONFIG_PAGE_REPORTING | 1 + .../default/CONFIG_PARAVIRT_TIME_ACCOUNTING | 1 + .../L1-RECOMMEND/default/CONFIG_PCIEAER | 1 + .../L1-RECOMMEND/default/CONFIG_PCIEASPM | 1 + .../default/CONFIG_PCIEASPM_DEFAULT | 1 + .../L1-RECOMMEND/default/CONFIG_PCIE_DPC | 1 + .../L1-RECOMMEND/default/CONFIG_PCIE_ECRC | 1 + .../L1-RECOMMEND/default/CONFIG_PCIE_EDR | 1 + .../L1-RECOMMEND/default/CONFIG_PCI_PASID | 1 + .../L1-RECOMMEND/default/CONFIG_PCI_PRI | 1 + .../L1-RECOMMEND/default/CONFIG_PCI_QUIRKS | 1 + .../L1-RECOMMEND/default/CONFIG_PERCPU_STATS | 1 + .../L1-RECOMMEND/default/CONFIG_PERCPU_TEST | 1 + .../default/CONFIG_PERSISTENT_KEYRINGS | 1 + .../L1-RECOMMEND/default/CONFIG_PHYLIB | 1 + .../default/CONFIG_PKCS7_TEST_KEY | 1 + .../default/CONFIG_PKCS8_PRIVATE_KEY_PARSER | 1 + .../configs/L1-RECOMMEND/default/CONFIG_PMBUS | 1 + .../default/CONFIG_PM_ADVANCED_DEBUG | 1 + .../L1-RECOMMEND/default/CONFIG_PM_AUTOSLEEP | 1 + .../L1-RECOMMEND/default/CONFIG_PM_DEBUG | 1 + .../L1-RECOMMEND/default/CONFIG_PM_SLEEP | 1 + .../default/CONFIG_PM_SLEEP_DEBUG | 1 + .../L1-RECOMMEND/default/CONFIG_PM_SLEEP_SMP | 1 + .../default/CONFIG_PM_STD_PARTITION | 1 + .../default/CONFIG_PM_TEST_SUSPEND | 1 + .../L1-RECOMMEND/default/CONFIG_PM_WAKELOCKS | 1 + .../L1-RECOMMEND/default/CONFIG_POSIX_MQUEUE | 1 + .../default/CONFIG_POSIX_MQUEUE_SYSCTL | 1 + .../L1-RECOMMEND/default/CONFIG_POWER_RESET | 1 + .../configs/L1-RECOMMEND/default/CONFIG_PPP | 1 + .../configs/L1-RECOMMEND/default/CONFIG_PPPOE | 1 + .../L1-RECOMMEND/default/CONFIG_PROC_CHILDREN | 1 + .../L1-RECOMMEND/default/CONFIG_PROC_EVENTS | 1 + .../default/CONFIG_PROC_VMCORE_DEVICE_DUMP | 1 + .../default/CONFIG_PROFILE_ANNOTATED_BRANCHES | 1 + .../L1-RECOMMEND/default/CONFIG_PROFILING | 1 + .../L1-RECOMMEND/default/CONFIG_PSAMPLE | 1 + .../configs/L1-RECOMMEND/default/CONFIG_PSI | 1 + .../default/CONFIG_PSI_DEFAULT_DISABLED | 1 + .../L1-RECOMMEND/default/CONFIG_PSTORE | 1 + .../default/CONFIG_PSTORE_COMPRESS | 1 + .../default/CONFIG_PSTORE_CONSOLE | 1 + .../L1-RECOMMEND/default/CONFIG_PSTORE_RAM | 1 + .../L1-RECOMMEND/default/CONFIG_QFMT_V1 | 1 + .../L1-RECOMMEND/default/CONFIG_QFMT_V2 | 1 + .../L1-RECOMMEND/default/CONFIG_QUOTA_DEBUG | 1 + .../default/CONFIG_QUOTA_NETLINK_INTERFACE | 1 + .../L1-RECOMMEND/default/CONFIG_RAID6_PQ | 1 + .../default/CONFIG_RAID6_PQ_BENCHMARK | 1 + .../L1-RECOMMEND/default/CONFIG_RAID_ATTRS | 1 + .../default/CONFIG_RANDOMIZE_KSTACK_OFFSET | 1 + .../CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT | 1 + .../default/CONFIG_RCU_CPU_STALL_TIMEOUT | 1 + .../L1-RECOMMEND/default/CONFIG_RCU_EQS_DEBUG | 1 + .../L1-RECOMMEND/default/CONFIG_RCU_EXPERT | 1 + .../L1-RECOMMEND/default/CONFIG_RCU_NOCB_CPU | 1 + .../default/CONFIG_RCU_REF_SCALE_TEST | 1 + .../default/CONFIG_RCU_SCALE_TEST | 1 + .../default/CONFIG_RCU_TORTURE_TEST | 1 + .../L1-RECOMMEND/default/CONFIG_RCU_TRACE | 1 + .../L1-RECOMMEND/default/CONFIG_RDMA_RXE | 1 + .../L1-RECOMMEND/default/CONFIG_RDMA_SIW | 1 + .../L1-RECOMMEND/default/CONFIG_RD_BZIP2 | 1 + .../L1-RECOMMEND/default/CONFIG_RD_GZIP | 1 + .../L1-RECOMMEND/default/CONFIG_RD_LZ4 | 1 + .../L1-RECOMMEND/default/CONFIG_RD_LZMA | 1 + .../L1-RECOMMEND/default/CONFIG_RD_LZO | 1 + .../configs/L1-RECOMMEND/default/CONFIG_RD_XZ | 1 + .../L1-RECOMMEND/default/CONFIG_RD_ZSTD | 1 + .../L1-RECOMMEND/default/CONFIG_READABLE_ASM | 1 + .../default/CONFIG_READ_ONLY_THP_FOR_FS | 1 + .../default/CONFIG_RICH_CONTAINER | 1 + .../default/CONFIG_RPCSEC_GSS_KRB5 | 1 + .../L1-RECOMMEND/default/CONFIG_RTC_HCTOSYS | 1 + .../default/CONFIG_RTC_HCTOSYS_DEVICE | 1 + .../L1-RECOMMEND/default/CONFIG_RTC_INTF_DEV | 1 + .../L1-RECOMMEND/default/CONFIG_RTC_INTF_PROC | 1 + .../default/CONFIG_RTC_INTF_SYSFS | 1 + .../L1-RECOMMEND/default/CONFIG_RTC_NVMEM | 1 + .../L1-RECOMMEND/default/CONFIG_RTC_SYSTOHC | 1 + .../default/CONFIG_RTC_SYSTOHC_DEVICE | 1 + .../default/CONFIG_RT_GROUP_SCHED | 1 + .../default/CONFIG_RUNTIME_TESTING_MENU | 1 + .../L1-RECOMMEND/default/CONFIG_SATA_AHCI | 1 + .../default/CONFIG_SATA_AHCI_PLATFORM | 1 + .../default/CONFIG_SATA_MOBILE_LPM_POLICY | 1 + .../L1-RECOMMEND/default/CONFIG_SATA_PMP | 1 + .../L1-RECOMMEND/default/CONFIG_SCHED_ACPU | 1 + .../L1-RECOMMEND/default/CONFIG_SCHED_CLUSTER | 1 + .../L1-RECOMMEND/default/CONFIG_SCHED_INFO | 1 + .../L1-RECOMMEND/default/CONFIG_SCHED_SLI | 1 + .../L1-RECOMMEND/default/CONFIG_SCHED_SMT | 1 + .../L1-RECOMMEND/default/CONFIG_SCHED_TRACER | 1 + .../default/CONFIG_SCSI_CONSTANTS | 1 + .../L1-RECOMMEND/default/CONFIG_SCSI_DEBUG | 1 + .../L1-RECOMMEND/default/CONFIG_SCSI_DH | 1 + .../L1-RECOMMEND/default/CONFIG_SCSI_DMA | 1 + .../default/CONFIG_SCSI_ENCLOSURE | 1 + .../L1-RECOMMEND/default/CONFIG_SCSI_FC_ATTRS | 1 + .../default/CONFIG_SCSI_ISCSI_ATTRS | 1 + .../L1-RECOMMEND/default/CONFIG_SCSI_LOGGING | 1 + .../L1-RECOMMEND/default/CONFIG_SCSI_LOWLEVEL | 1 + .../L1-RECOMMEND/default/CONFIG_SCSI_MPI3MR | 1 + .../L1-RECOMMEND/default/CONFIG_SCSI_MPT2SAS | 1 + .../default/CONFIG_SCSI_MPT2SAS_MAX_SGE | 1 + .../L1-RECOMMEND/default/CONFIG_SCSI_MPT3SAS | 1 + .../default/CONFIG_SCSI_MPT3SAS_MAX_SGE | 1 + .../L1-RECOMMEND/default/CONFIG_SCSI_PROC_FS | 1 + .../L1-RECOMMEND/default/CONFIG_SCSI_SAS_ATA | 1 + .../default/CONFIG_SCSI_SAS_ATTRS | 1 + .../default/CONFIG_SCSI_SAS_HOST_SMP | 1 + .../default/CONFIG_SCSI_SAS_LIBSAS | 1 + .../default/CONFIG_SCSI_SCAN_ASYNC | 1 + .../L1-RECOMMEND/default/CONFIG_SCSI_SMARTPQI | 1 + .../default/CONFIG_SCSI_SPI_ATTRS | 1 + .../default/CONFIG_SCSI_SRP_ATTRS | 1 + .../L1-RECOMMEND/default/CONFIG_SCSI_UFSHCD | 1 + .../L1-RECOMMEND/default/CONFIG_SCSI_VIRTIO | 1 + .../L1-RECOMMEND/default/CONFIG_SECRETMEM | 1 + .../default/CONFIG_SECTION_MISMATCH_WARN_ONLY | 1 + .../default/CONFIG_SECURITY_DMESG_RESTRICT | 1 + .../default/CONFIG_SECURITY_SELINUX_AVC_STATS | 1 + .../default/CONFIG_SECURITY_SELINUX_DEVELOP | 1 + ...CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE | 1 + .../CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS | 1 + .../default/CONFIG_SECURITY_SMACK | 1 + .../default/CONFIG_SERIO_ALTERA_PS2 | 1 + .../L1-RECOMMEND/default/CONFIG_SERIO_ARC_PS2 | 1 + .../L1-RECOMMEND/default/CONFIG_SERIO_LIBPS2 | 1 + .../L1-RECOMMEND/default/CONFIG_SERIO_RAW | 1 + .../L1-RECOMMEND/default/CONFIG_SERIO_SERPORT | 1 + .../L1-RECOMMEND/default/CONFIG_SFC_FALCON | 1 + .../default/CONFIG_SHUFFLE_PAGE_ALLOCATOR | 1 + .../CONFIG_SIGNED_PE_FILE_VERIFICATION | 1 + .../default/CONFIG_SLAB_FREELIST_HARDENED | 1 + .../default/CONFIG_SLAB_FREELIST_RANDOM | 1 + .../default/CONFIG_SLAB_MERGE_DEFAULT | 1 + .../configs/L1-RECOMMEND/default/CONFIG_SLIP | 1 + .../L1-RECOMMEND/default/CONFIG_SMC_DIAG | 1 + .../configs/L1-RECOMMEND/default/CONFIG_SND | 1 + .../L1-RECOMMEND/default/CONFIG_SOFT_WATCHDOG | 1 + .../L1-RECOMMEND/default/CONFIG_SPI_DEBUG | 1 + .../default/CONFIG_SQUASHFS_4K_DEVBLK_SIZE | 1 + .../default/CONFIG_SQUASHFS_DECOMP_SINGLE | 1 + .../default/CONFIG_SQUASHFS_EMBEDDED | 1 + .../default/CONFIG_SQUASHFS_FILE_CACHE | 1 + .../default/CONFIG_SQUASHFS_FILE_DIRECT | 1 + .../CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE | 1 + .../L1-RECOMMEND/default/CONFIG_SQUASHFS_LZO | 1 + .../default/CONFIG_SQUASHFS_XATTR | 1 + .../L1-RECOMMEND/default/CONFIG_SQUASHFS_XZ | 1 + .../L1-RECOMMEND/default/CONFIG_SQUASHFS_ZLIB | 1 + .../L1-RECOMMEND/default/CONFIG_SQUASHFS_ZSTD | 1 + .../default/CONFIG_STACKPROTECTOR | 1 + .../default/CONFIG_STACKPROTECTOR_STRONG | 1 + .../default/CONFIG_STACKTRACE_SUPPORT | 1 + .../L1-RECOMMEND/default/CONFIG_STACK_TRACER | 1 + .../default/CONFIG_STATIC_KEYS_SELFTEST | 1 + .../default/CONFIG_STATIC_USERMODEHELPER | 1 + .../L1-RECOMMEND/default/CONFIG_STRICT_DEVMEM | 1 + .../default/CONFIG_STRIP_ASM_SYMS | 1 + .../L1-RECOMMEND/default/CONFIG_SUNRPC_DEBUG | 1 + .../default/CONFIG_SUNRPC_XPRT_RDMA | 1 + .../L1-RECOMMEND/default/CONFIG_SUSPEND | 1 + .../default/CONFIG_SUSPEND_FREEZER | 1 + .../default/CONFIG_SYMBOLIC_ERRNAME | 1 + .../L1-RECOMMEND/default/CONFIG_SYNC_FILE | 1 + .../L1-RECOMMEND/default/CONFIG_SYNTH_EVENTS | 1 + .../default/CONFIG_SYSFB_SIMPLEFB | 1 + .../L1-RECOMMEND/default/CONFIG_SYSFS_SYSCALL | 1 + .../L1-RECOMMEND/default/CONFIG_SYSTEMPORT | 1 + .../default/CONFIG_SYSTEM_BLACKLIST_HASH_LIST | 1 + .../default/CONFIG_SYSTEM_BLACKLIST_KEYRING | 1 + .../default/CONFIG_SYSTEM_DATA_VERIFICATION | 1 + .../default/CONFIG_SYSTEM_EXTRA_CERTIFICATE | 1 + .../CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE | 1 + .../default/CONFIG_SYSTEM_REVOCATION_LIST | 1 + .../default/CONFIG_SYSVIPC_SYSCTL | 1 + .../L1-RECOMMEND/default/CONFIG_TASKSTATS | 1 + .../default/CONFIG_TASK_DELAY_ACCT | 1 + .../default/CONFIG_TASK_IO_ACCOUNTING | 1 + .../L1-RECOMMEND/default/CONFIG_TASK_XACCT | 1 + .../L1-RECOMMEND/default/CONFIG_TCG_ATMEL | 1 + .../L1-RECOMMEND/default/CONFIG_TCG_CRB | 1 + .../L1-RECOMMEND/default/CONFIG_TCG_TIS_CORE | 1 + .../L1-RECOMMEND/default/CONFIG_TCM_FILEIO | 1 + .../L1-RECOMMEND/default/CONFIG_TCM_IBLOCK | 1 + .../L1-RECOMMEND/default/CONFIG_TCM_PSCSI | 1 + .../L1-RECOMMEND/default/CONFIG_TCM_USER2 | 1 + .../L1-RECOMMEND/default/CONFIG_TCP_CONG_BIC | 1 + .../default/CONFIG_TCP_CONG_DCTCP | 1 + .../default/CONFIG_TCP_CONG_HSTCP | 1 + .../L1-RECOMMEND/default/CONFIG_TCP_CONG_HTCP | 1 + .../default/CONFIG_TCP_CONG_HYBLA | 1 + .../default/CONFIG_TCP_CONG_ILLINOIS | 1 + .../L1-RECOMMEND/default/CONFIG_TCP_CONG_LP | 1 + .../L1-RECOMMEND/default/CONFIG_TCP_CONG_NV | 1 + .../default/CONFIG_TCP_CONG_SCALABLE | 1 + .../default/CONFIG_TCP_CONG_VEGAS | 1 + .../L1-RECOMMEND/default/CONFIG_TCP_CONG_VENO | 1 + .../default/CONFIG_TCP_CONG_WESTWOOD | 1 + .../L1-RECOMMEND/default/CONFIG_TCP_CONG_YEAH | 1 + .../L1-RECOMMEND/default/CONFIG_TCP_MD5SIG | 1 + .../configs/L1-RECOMMEND/default/CONFIG_TEE | 1 + .../L1-RECOMMEND/default/CONFIG_TEST_BPF | 1 + .../CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE | 1 + .../CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE | 1 + .../CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE | 1 + ...CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS | 1 + .../default/CONFIG_THERMAL_GOV_FAIR_SHARE | 1 + .../default/CONFIG_THERMAL_GOV_STEP_WISE | 1 + .../default/CONFIG_THERMAL_GOV_USER_SPACE | 1 + .../L1-RECOMMEND/default/CONFIG_THERMAL_HWMON | 1 + .../default/CONFIG_THERMAL_NETLINK | 1 + .../L1-RECOMMEND/default/CONFIG_TIGON3 | 1 + .../L1-RECOMMEND/default/CONFIG_TIGON3_HWMON | 1 + .../default/CONFIG_TIMERLAT_TRACER | 1 + .../L1-RECOMMEND/default/CONFIG_TIME_NS | 1 + .../L1-RECOMMEND/default/CONFIG_TLS_DEVICE | 1 + .../L1-RECOMMEND/default/CONFIG_TLS_TOE | 1 + .../L1-RECOMMEND/default/CONFIG_TMPFS_INODE64 | 1 + .../default/CONFIG_TMPFS_POSIX_ACL | 1 + .../L1-RECOMMEND/default/CONFIG_TMPFS_XATTR | 1 + .../default/CONFIG_TRACER_SNAPSHOT | 1 + .../CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP | 1 + .../L1-RECOMMEND/default/CONFIG_TRACING_MAP | 1 + .../CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS | 1 + .../CONFIG_TRANSPARENT_HUGEPAGE_MADVISE | 1 + .../L1-RECOMMEND/default/CONFIG_TRUSTED_KEYS | 1 + .../default/CONFIG_TRUSTED_KEYS_TPM | 1 + .../configs/L1-RECOMMEND/default/CONFIG_UBSAN | 1 + .../L1-RECOMMEND/default/CONFIG_UDF_FS | 1 + .../L1-RECOMMEND/default/CONFIG_UEVENT_HELPER | 1 + .../configs/L1-RECOMMEND/default/CONFIG_UID16 | 1 + .../L1-RECOMMEND/default/CONFIG_UIO_AEC | 1 + .../L1-RECOMMEND/default/CONFIG_UIO_CIF | 1 + .../default/CONFIG_UIO_DMEM_GENIRQ | 1 + .../L1-RECOMMEND/default/CONFIG_UIO_MF624 | 1 + .../L1-RECOMMEND/default/CONFIG_UIO_NETX | 1 + .../default/CONFIG_UIO_PCI_GENERIC | 1 + .../default/CONFIG_UIO_PDRV_GENIRQ | 1 + .../L1-RECOMMEND/default/CONFIG_UIO_PRUSS | 1 + .../L1-RECOMMEND/default/CONFIG_UIO_SERCOS3 | 1 + .../L1-RECOMMEND/default/CONFIG_UNIX_DIAG | 1 + .../L1-RECOMMEND/default/CONFIG_UPROBES | 1 + .../default/CONFIG_USB_NET_DRIVERS | 1 + .../L1-RECOMMEND/default/CONFIG_USB_PCI | 1 + .../L1-RECOMMEND/default/CONFIG_USELIB | 1 + .../L1-RECOMMEND/default/CONFIG_USER_NS | 1 + .../configs/L1-RECOMMEND/default/CONFIG_VDPA | 1 + .../default/CONFIG_VFIO_CONTAINER | 1 + .../L1-RECOMMEND/default/CONFIG_VFIO_GROUP | 1 + .../L1-RECOMMEND/default/CONFIG_VFIO_NOIOMMU | 1 + .../L1-RECOMMEND/default/CONFIG_VFIO_PCI_CORE | 1 + .../default/CONFIG_VGA_ARB_MAX_GPUS | 1 + .../L1-RECOMMEND/default/CONFIG_VHOST_MENU | 1 + .../L1-RECOMMEND/default/CONFIG_VHOST_SCSI | 1 + .../default/CONFIG_VIRTIO_DMA_SHARED_BUFFER | 1 + .../L1-RECOMMEND/default/CONFIG_VIRTIO_INPUT | 1 + .../default/CONFIG_VIRTIO_PCI_LEGACY | 1 + .../default/CONFIG_VIRTIO_VSOCKETS | 1 + .../default/CONFIG_VIRTIO_VSOCKETS_COMMON | 1 + .../L1-RECOMMEND/default/CONFIG_VIRT_FUSE | 1 + .../L1-RECOMMEND/default/CONFIG_VSOCKETS_DIAG | 1 + .../default/CONFIG_VSOCKETS_LOOPBACK | 1 + .../L1-RECOMMEND/default/CONFIG_VSOCKMON | 1 + .../default/CONFIG_VT_HW_CONSOLE_BINDING | 1 + .../configs/L1-RECOMMEND/default/CONFIG_VXLAN | 1 + .../configs/L1-RECOMMEND/default/CONFIG_WAN | 1 + .../default/CONFIG_WARN_ALL_UNSEEDED_RANDOM | 1 + .../L1-RECOMMEND/default/CONFIG_WATCHDOG_CORE | 1 + .../CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED | 1 + .../default/CONFIG_WATCHDOG_NOWAYOUT | 1 + .../default/CONFIG_WATCHDOG_OPEN_TIMEOUT | 1 + .../default/CONFIG_WATCHDOG_PRETIMEOUT_GOV | 1 + .../default/CONFIG_WATCHDOG_SYSFS | 1 + .../L1-RECOMMEND/default/CONFIG_WIREGUARD | 1 + .../default/CONFIG_WIREGUARD_DEBUG | 1 + .../L1-RECOMMEND/default/CONFIG_WQ_WATCHDOG | 1 + .../default/CONFIG_XDP_SOCKETS_DIAG | 1 + .../default/CONFIG_XFRM_INTERFACE | 1 + .../L1-RECOMMEND/default/CONFIG_XFRM_MIGRATE | 1 + .../default/CONFIG_XFRM_STATISTICS | 1 + .../default/CONFIG_XFRM_SUB_POLICY | 1 + .../L1-RECOMMEND/default/CONFIG_XFS_DEBUG | 1 + .../default/CONFIG_XFS_ONLINE_SCRUB | 1 + .../L1-RECOMMEND/default/CONFIG_XFS_POSIX_ACL | 1 + .../L1-RECOMMEND/default/CONFIG_XFS_QUOTA | 1 + .../L1-RECOMMEND/default/CONFIG_XFS_RT | 1 + .../default/CONFIG_XFS_SUPPORT_V4 | 1 + .../L1-RECOMMEND/default/CONFIG_XFS_WARN | 1 + .../L1-RECOMMEND/default/CONFIG_XOR_BLOCKS | 1 + .../L1-RECOMMEND/default/CONFIG_XZ_DEC_ARM | 1 + .../default/CONFIG_XZ_DEC_ARMTHUMB | 1 + .../L1-RECOMMEND/default/CONFIG_XZ_DEC_BCJ | 1 + .../L1-RECOMMEND/default/CONFIG_XZ_DEC_IA64 | 1 + .../default/CONFIG_XZ_DEC_MICROLZMA | 1 + .../default/CONFIG_XZ_DEC_POWERPC | 1 + .../L1-RECOMMEND/default/CONFIG_XZ_DEC_SPARC | 1 + .../L1-RECOMMEND/default/CONFIG_XZ_DEC_TEST | 1 + .../L1-RECOMMEND/default/CONFIG_XZ_DEC_X86 | 1 + .../L1-RECOMMEND/default/CONFIG_Z3FOLD | 1 + .../configs/L1-RECOMMEND/default/CONFIG_ZBUD | 1 + .../default/CONFIG_ZERO_CALL_USED_REGS | 1 + .../L1-RECOMMEND/default/CONFIG_ZISOFS | 1 + .../configs/L1-RECOMMEND/default/CONFIG_ZPOOL | 1 + .../L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP | 1 + .../default/CONFIG_ZRAM_DEF_COMP_LZ4 | 1 + .../default/CONFIG_ZRAM_DEF_COMP_LZ4HC | 1 + .../default/CONFIG_ZRAM_DEF_COMP_LZO | 1 + .../default/CONFIG_ZRAM_DEF_COMP_LZORLE | 1 + .../default/CONFIG_ZRAM_DEF_COMP_ZSTD | 1 + .../default/CONFIG_ZRAM_MEMORY_TRACKING | 1 + .../default/CONFIG_ZRAM_MULTI_COMP | 1 + .../default/CONFIG_ZRAM_WRITEBACK | 1 + .../L1-RECOMMEND/default/CONFIG_ZSMALLOC | 1 + .../default/CONFIG_ZSMALLOC_CHAIN_SIZE | 1 + .../L1-RECOMMEND/default/CONFIG_ZSMALLOC_STAT | 1 + .../L1-RECOMMEND/default/CONFIG_ZSTD_COMMON | 1 + .../configs/L1-RECOMMEND/default/CONFIG_ZSWAP | 1 + .../default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT | 1 + .../CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 | 1 + .../CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE | 1 + .../CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 | 1 + .../CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC | 1 + .../CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO | 1 + .../CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD | 1 + .../default/CONFIG_ZSWAP_DEFAULT_ON | 1 + .../CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON | 1 + .../default/CONFIG_ZSWAP_ZPOOL_DEFAULT | 1 + .../default/CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD | 1 + .../default/CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD | 1 + .../CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_ACPI_BGRT | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_ACPI_DOCK | 1 + .../L1-RECOMMEND/x86/CONFIG_ACPI_EC_DEBUGFS | 1 + .../L1-RECOMMEND/x86/CONFIG_ACPI_EXTLOG | 1 + .../x86/CONFIG_ACPI_PROCESSOR_AGGREGATOR | 1 + .../x86/CONFIG_ACPI_REV_OVERRIDE_POSSIBLE | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_ACPI_SBS | 1 + .../L1-RECOMMEND/x86/CONFIG_ACPI_SLEEP | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_ACPI_TAD | 1 + .../L1-RECOMMEND/x86/CONFIG_ACPI_THERMAL_REL | 1 + .../L1-RECOMMEND/x86/CONFIG_ACPI_WATCHDOG | 1 + .../L1-RECOMMEND/x86/CONFIG_ACQUIRE_WDT | 1 + .../L1-RECOMMEND/x86/CONFIG_ADVANTECH_WDT | 1 + anolis/configs/L1-RECOMMEND/x86/CONFIG_AMDTEE | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_AMD_HSMP | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_AMD_IOMMU | 1 + .../L1-RECOMMEND/x86/CONFIG_AMD_IOMMU_V2 | 1 + .../L1-RECOMMEND/x86/CONFIG_AMD_MEM_ENCRYPT | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_AMD_PTDMA | 1 + .../x86/CONFIG_ARCH_CPUIDLE_HALTPOLL | 1 + .../L1-RECOMMEND/x86/CONFIG_ARCH_MEMORY_PROBE | 1 + .../x86/CONFIG_CALL_DEPTH_TRACKING | 1 + .../L1-RECOMMEND/x86/CONFIG_CMA_SIZE_MBYTES | 1 + anolis/configs/L1-RECOMMEND/x86/CONFIG_CNIC | 1 + .../L1-RECOMMEND/x86/CONFIG_CPUMASK_OFFSTACK | 1 + .../x86/CONFIG_CPU_FREQ_GOV_SCHEDUTIL | 1 + .../L1-RECOMMEND/x86/CONFIG_CPU_IBPB_ENTRY | 1 + .../L1-RECOMMEND/x86/CONFIG_CPU_IBRS_ENTRY | 1 + .../x86/CONFIG_CPU_IDLE_GOV_HALTPOLL | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_CPU_SRSO | 1 + .../L1-RECOMMEND/x86/CONFIG_CPU_UNRET_ENTRY | 1 + .../L1-RECOMMEND/x86/CONFIG_CRASH_HOTPLUG | 1 + .../x86/CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 | 1 + .../x86/CONFIG_CRYPTO_AES_NI_INTEL | 1 + .../x86/CONFIG_CRYPTO_BLAKE2S_X86 | 1 + .../x86/CONFIG_CRYPTO_BLOWFISH_X86_64 | 1 + .../CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 | 1 + .../CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64 | 1 + .../x86/CONFIG_CRYPTO_CAMELLIA_X86_64 | 1 + .../x86/CONFIG_CRYPTO_CAST5_AVX_X86_64 | 1 + .../x86/CONFIG_CRYPTO_CAST6_AVX_X86_64 | 1 + .../x86/CONFIG_CRYPTO_CHACHA20_X86_64 | 1 + .../x86/CONFIG_CRYPTO_CRC32C_INTEL | 1 + .../x86/CONFIG_CRYPTO_CRC32_PCLMUL | 1 + .../x86/CONFIG_CRYPTO_CRCT10DIF_PCLMUL | 1 + .../L1-RECOMMEND/x86/CONFIG_CRYPTO_CURVE25519 | 1 + .../x86/CONFIG_CRYPTO_CURVE25519_X86 | 1 + .../x86/CONFIG_CRYPTO_DES3_EDE_X86_64 | 1 + .../L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_CCP | 1 + .../x86/CONFIG_CRYPTO_DEV_CCP_CRYPTO | 1 + .../L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_CCP_DD | 1 + .../x86/CONFIG_CRYPTO_DEV_CCP_DEBUGFS | 1 + .../L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_HCT | 1 + .../x86/CONFIG_CRYPTO_DEV_IAA_CRYPTO | 1 + .../x86/CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS | 1 + .../L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT | 1 + .../x86/CONFIG_CRYPTO_DEV_QAT_C3XXX | 1 + .../x86/CONFIG_CRYPTO_DEV_QAT_C3XXXVF | 1 + .../x86/CONFIG_CRYPTO_DEV_QAT_C62X | 1 + .../x86/CONFIG_CRYPTO_DEV_QAT_C62XVF | 1 + .../x86/CONFIG_CRYPTO_DEV_QAT_DH895xCC | 1 + .../x86/CONFIG_CRYPTO_DEV_QAT_DH895xCCVF | 1 + .../L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_SP_CCP | 1 + .../L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_SP_PSP | 1 + .../L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_TSSE | 1 + .../x86/CONFIG_CRYPTO_DEV_ZHAOXIN | 1 + .../x86/CONFIG_CRYPTO_DEV_ZHAOXIN_AES | 1 + .../x86/CONFIG_CRYPTO_DEV_ZHAOXIN_SHA | 1 + .../x86/CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL | 1 + .../x86/CONFIG_CRYPTO_NHPOLY1305_AVX2 | 1 + .../x86/CONFIG_CRYPTO_NHPOLY1305_SSE2 | 1 + .../x86/CONFIG_CRYPTO_POLY1305_X86_64 | 1 + .../x86/CONFIG_CRYPTO_SERPENT_AVX2_X86_64 | 1 + .../x86/CONFIG_CRYPTO_SERPENT_AVX_X86_64 | 1 + .../x86/CONFIG_CRYPTO_SERPENT_SSE2_X86_64 | 1 + .../L1-RECOMMEND/x86/CONFIG_CRYPTO_SHA1_SSSE3 | 1 + .../x86/CONFIG_CRYPTO_SHA256_SSSE3 | 1 + .../x86/CONFIG_CRYPTO_SHA512_SSSE3 | 1 + .../x86/CONFIG_CRYPTO_SM2_ZHAOXIN_GMI | 1 + .../x86/CONFIG_CRYPTO_SM3_ZHAOXIN_GMI | 1 + .../x86/CONFIG_CRYPTO_SM4_ZHAOXIN_GMI | 1 + .../x86/CONFIG_CRYPTO_TWOFISH_AVX_X86_64 | 1 + .../x86/CONFIG_CRYPTO_TWOFISH_X86_64 | 1 + .../x86/CONFIG_CRYPTO_TWOFISH_X86_64_3WAY | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_CSV_GUEST | 1 + .../L1-RECOMMEND/x86/CONFIG_DEBUG_BOOT_PARAMS | 1 + .../x86/CONFIG_DEBUG_PERF_USE_VMALLOC | 1 + .../L1-RECOMMEND/x86/CONFIG_DEVICE_PRIVATE | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_DEVPORT | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX | 1 + .../L1-RECOMMEND/x86/CONFIG_DEV_DAX_HMEM | 1 + .../L1-RECOMMEND/x86/CONFIG_DEV_DAX_KMEM | 1 + .../L1-RECOMMEND/x86/CONFIG_DEV_DAX_PMEM | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_DLM_DEBUG | 1 + .../L1-RECOMMEND/x86/CONFIG_DRM_AMDGPU_CIK | 1 + .../x86/CONFIG_DRM_AMDGPU_USERPTR | 1 + .../L1-RECOMMEND/x86/CONFIG_DRM_AMD_ACP | 1 + .../x86/CONFIG_DRM_DP_AUX_CHARDEV | 1 + .../L1-RECOMMEND/x86/CONFIG_DRM_GMA500 | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_DRM_I915 | 1 + .../x86/CONFIG_DRM_I915_CAPTURE_ERROR | 1 + .../x86/CONFIG_DRM_I915_COMPRESS_ERROR | 1 + .../x86/CONFIG_DRM_I915_FENCE_TIMEOUT | 1 + .../x86/CONFIG_DRM_I915_FORCE_PROBE | 1 + .../L1-RECOMMEND/x86/CONFIG_DRM_I915_GVT | 1 + .../x86/CONFIG_DRM_I915_GVT_KVMGT | 1 + .../x86/CONFIG_DRM_I915_HEARTBEAT_INTERVAL | 1 + .../x86/CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT | 1 + .../x86/CONFIG_DRM_I915_PREEMPT_TIMEOUT | 1 + .../x86/CONFIG_DRM_I915_STOP_TIMEOUT | 1 + .../x86/CONFIG_DRM_I915_TIMESLICE_DURATION | 1 + .../x86/CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND | 1 + .../L1-RECOMMEND/x86/CONFIG_DRM_I915_USERPTR | 1 + .../L1-RECOMMEND/x86/CONFIG_DRM_VMWGFX | 1 + .../L1-RECOMMEND/x86/CONFIG_DWC_PCIE_PMU | 1 + .../L1-RECOMMEND/x86/CONFIG_DW_DMAC_PCI | 1 + .../x86/CONFIG_DYNAMIC_PHYSICAL_MASK | 1 + .../L1-RECOMMEND/x86/CONFIG_DYNAMIC_SIGFRAME | 1 + .../L1-RECOMMEND/x86/CONFIG_E1000E_HWTS | 1 + .../L1-RECOMMEND/x86/CONFIG_EARLY_PRINTK | 1 + .../L1-RECOMMEND/x86/CONFIG_EARLY_PRINTK_DBGP | 1 + .../x86/CONFIG_EARLY_PRINTK_USB_XDBC | 1 + .../L1-RECOMMEND/x86/CONFIG_EDAC_AMD64 | 1 + .../L1-RECOMMEND/x86/CONFIG_EDAC_DECODE_MCE | 1 + .../L1-RECOMMEND/x86/CONFIG_EDAC_E752X | 1 + .../L1-RECOMMEND/x86/CONFIG_EDAC_I10NM | 1 + .../L1-RECOMMEND/x86/CONFIG_EDAC_I3000 | 1 + .../L1-RECOMMEND/x86/CONFIG_EDAC_I3200 | 1 + .../L1-RECOMMEND/x86/CONFIG_EDAC_I5100 | 1 + .../L1-RECOMMEND/x86/CONFIG_EDAC_I5400 | 1 + .../L1-RECOMMEND/x86/CONFIG_EDAC_I7300 | 1 + .../L1-RECOMMEND/x86/CONFIG_EDAC_I7CORE | 1 + .../L1-RECOMMEND/x86/CONFIG_EDAC_I82975X | 1 + .../L1-RECOMMEND/x86/CONFIG_EDAC_SBRIDGE | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_EDAC_SKX | 1 + anolis/configs/L1-RECOMMEND/x86/CONFIG_EDD | 1 + .../L1-RECOMMEND/x86/CONFIG_EFI_COCO_SECRET | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_EFI_MIXED | 1 + .../L1-RECOMMEND/x86/CONFIG_EFI_RCI2_TABLE | 1 + .../L1-RECOMMEND/x86/CONFIG_EFI_RUNTIME_MAP | 1 + .../L1-RECOMMEND/x86/CONFIG_EFI_SECRET | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_FB_HYPERV | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_FB_SIMPLE | 1 + .../L1-RECOMMEND/x86/CONFIG_FB_SSD1307 | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_FB_VESA | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_FCOE_FNIC | 1 + .../L1-RECOMMEND/x86/CONFIG_FIRMWARE_EDID | 1 + .../L1-RECOMMEND/x86/CONFIG_FIRMWARE_MEMMAP | 1 + .../L1-RECOMMEND/x86/CONFIG_FUNCTION_PROFILER | 1 + .../x86/CONFIG_FW_LOADER_USER_HELPER | 1 + .../L1-RECOMMEND/x86/CONFIG_GART_IOMMU | 1 + .../x86/CONFIG_GENERIC_ADC_THERMAL | 1 + .../L1-RECOMMEND/x86/CONFIG_GENERIC_CPU | 1 + .../L1-RECOMMEND/x86/CONFIG_GENERIC_ISA_DMA | 1 + .../x86/CONFIG_GENERIC_PENDING_IRQ | 1 + .../L1-RECOMMEND/x86/CONFIG_GENERIC_PHY | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_GFS2_FS | 1 + .../x86/CONFIG_GFS2_FS_LOCKING_DLM | 1 + .../x86/CONFIG_GPIO_GENERIC_PLATFORM | 1 + .../L1-RECOMMEND/x86/CONFIG_HALTPOLL_CPUIDLE | 1 + .../L1-RECOMMEND/x86/CONFIG_HANGCHECK_TIMER | 1 + anolis/configs/L1-RECOMMEND/x86/CONFIG_HPET | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_HPET_MMAP | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_HSA_AMD | 1 + .../x86/CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP | 1 + ...G_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON | 1 + .../L1-RECOMMEND/x86/CONFIG_HW_RANDOM_AMD | 1 + .../L1-RECOMMEND/x86/CONFIG_HW_RANDOM_INTEL | 1 + .../L1-RECOMMEND/x86/CONFIG_HW_RANDOM_ZHAOXIN | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_HYGON_CSV | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_HYGON_GM | 1 + .../L1-RECOMMEND/x86/CONFIG_HYGON_PSP2CPU_CMD | 1 + anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV | 1 + .../L1-RECOMMEND/x86/CONFIG_HYPERVISOR_GUEST | 1 + .../L1-RECOMMEND/x86/CONFIG_HYPERV_BALLOON | 1 + .../L1-RECOMMEND/x86/CONFIG_HYPERV_IOMMU | 1 + .../L1-RECOMMEND/x86/CONFIG_HYPERV_KEYBOARD | 1 + .../L1-RECOMMEND/x86/CONFIG_HYPERV_STORAGE | 1 + .../L1-RECOMMEND/x86/CONFIG_HYPERV_UTILS | 1 + .../L1-RECOMMEND/x86/CONFIG_HYPERV_VSOCKETS | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_I2C_SLAVE | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_I40E_DCB | 1 + anolis/configs/L1-RECOMMEND/x86/CONFIG_I8K | 1 + .../L1-RECOMMEND/x86/CONFIG_IA32_EMULATION | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_IGB_DCA | 1 + .../x86/CONFIG_INFINIBAND_OPA_VNIC | 1 + .../L1-RECOMMEND/x86/CONFIG_INFINIBAND_RDMAVT | 1 + .../L1-RECOMMEND/x86/CONFIG_INPUT_MOUSEDEV | 1 + .../L1-RECOMMEND/x86/CONFIG_INT3406_THERMAL | 1 + .../L1-RECOMMEND/x86/CONFIG_INT340X_THERMAL | 1 + .../L1-RECOMMEND/x86/CONFIG_INTEL_HFI_THERMAL | 1 + .../L1-RECOMMEND/x86/CONFIG_INTEL_IDMA64 | 1 + .../L1-RECOMMEND/x86/CONFIG_INTEL_IDXD | 1 + .../L1-RECOMMEND/x86/CONFIG_INTEL_IDXD_BUS | 1 + .../x86/CONFIG_INTEL_IDXD_PERFMON | 1 + .../L1-RECOMMEND/x86/CONFIG_INTEL_IDXD_SVM | 1 + .../L1-RECOMMEND/x86/CONFIG_INTEL_IOATDMA | 1 + .../x86/CONFIG_INTEL_IOMMU_PERF_EVENTS | 1 + ...ONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_INTEL_MEI | 1 + .../L1-RECOMMEND/x86/CONFIG_INTEL_MEI_ME | 1 + .../L1-RECOMMEND/x86/CONFIG_INTEL_MEI_WDT | 1 + .../L1-RECOMMEND/x86/CONFIG_INTEL_PCH_THERMAL | 1 + .../L1-RECOMMEND/x86/CONFIG_INTEL_PMC_CORE | 1 + .../L1-RECOMMEND/x86/CONFIG_INTEL_PMT_CLASS | 1 + .../x86/CONFIG_INTEL_PMT_CRASHLOG | 1 + .../x86/CONFIG_INTEL_PMT_TELEMETRY | 1 + .../L1-RECOMMEND/x86/CONFIG_INTEL_POWERCLAMP | 1 + .../L1-RECOMMEND/x86/CONFIG_INTEL_RAPL | 1 + .../L1-RECOMMEND/x86/CONFIG_INTEL_RAPL_CORE | 1 + .../L1-RECOMMEND/x86/CONFIG_INTEL_RAPL_TPMI | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_INTEL_RST | 1 + .../x86/CONFIG_INTEL_SPEED_SELECT_INTERFACE | 1 + .../x86/CONFIG_INTEL_SPEED_SELECT_TPMI | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH | 1 + .../L1-RECOMMEND/x86/CONFIG_INTEL_TH_ACPI | 1 + .../L1-RECOMMEND/x86/CONFIG_INTEL_TH_DEBUG | 1 + .../L1-RECOMMEND/x86/CONFIG_INTEL_TH_GTH | 1 + .../L1-RECOMMEND/x86/CONFIG_INTEL_TH_MSU | 1 + .../L1-RECOMMEND/x86/CONFIG_INTEL_TH_PCI | 1 + .../L1-RECOMMEND/x86/CONFIG_INTEL_TH_PTI | 1 + .../L1-RECOMMEND/x86/CONFIG_INTEL_TH_STH | 1 + .../L1-RECOMMEND/x86/CONFIG_INTEL_TPMI | 1 + .../L1-RECOMMEND/x86/CONFIG_INTEL_TURBO_MAX_3 | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_INTEL_TXT | 1 + .../L1-RECOMMEND/x86/CONFIG_INTEL_VSEC | 1 + .../x86/CONFIG_IOMMU_DEFAULT_DMA_STRICT | 1 + .../x86/CONFIG_IOMMU_DEFAULT_PASSTHROUGH | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_IOSF_MBI | 1 + .../L1-RECOMMEND/x86/CONFIG_IO_DELAY_0X80 | 1 + .../x86/CONFIG_IR_SERIAL_TRANSMITTER | 1 + .../L1-RECOMMEND/x86/CONFIG_IR_SHARP_DECODER | 1 + .../L1-RECOMMEND/x86/CONFIG_IR_XMP_DECODER | 1 + .../L1-RECOMMEND/x86/CONFIG_ISA_DMA_API | 1 + .../L1-RECOMMEND/x86/CONFIG_ISCSI_IBFT | 1 + .../L1-RECOMMEND/x86/CONFIG_ISCSI_IBFT_FIND | 1 + .../x86/CONFIG_ITCO_VENDOR_SUPPORT | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_ITCO_WDT | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_IXGBE_DCA | 1 + anolis/configs/L1-RECOMMEND/x86/CONFIG_KCSAN | 1 + .../x86/CONFIG_KDB_DEFAULT_ENABLE | 1 + .../L1-RECOMMEND/x86/CONFIG_KERNEL_BZIP2 | 1 + .../L1-RECOMMEND/x86/CONFIG_KERNEL_GZIP | 1 + .../L1-RECOMMEND/x86/CONFIG_KERNEL_LZ4 | 1 + .../L1-RECOMMEND/x86/CONFIG_KERNEL_LZMA | 1 + .../L1-RECOMMEND/x86/CONFIG_KERNEL_LZO | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_KERNEL_XZ | 1 + .../L1-RECOMMEND/x86/CONFIG_KERNEL_ZSTD | 1 + .../x86/CONFIG_KEXEC_BZIMAGE_VERIFY_SIG | 1 + .../L1-RECOMMEND/x86/CONFIG_KEXEC_JUMP | 1 + .../L1-RECOMMEND/x86/CONFIG_KEXEC_SIG_FORCE | 1 + .../L1-RECOMMEND/x86/CONFIG_KEYBOARD_ADC | 1 + .../L1-RECOMMEND/x86/CONFIG_KEYBOARD_ATKBD | 1 + .../x86/CONFIG_KGDB_LOW_LEVEL_TRAP | 1 + .../L1-RECOMMEND/x86/CONFIG_KVM_AMD_SEV | 1 + .../x86/CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID | 1 + .../x86/CONFIG_LEGACY_VSYSCALL_NONE | 1 + .../L1-RECOMMEND/x86/CONFIG_LOG_BUF_SHIFT | 1 + .../x86/CONFIG_MAPPING_DIRTY_HELPERS | 1 + anolis/configs/L1-RECOMMEND/x86/CONFIG_MAXSMP | 1 + anolis/configs/L1-RECOMMEND/x86/CONFIG_MCORE2 | 1 + .../L1-RECOMMEND/x86/CONFIG_MELLANOX_PLATFORM | 1 + .../L1-RECOMMEND/x86/CONFIG_MEM_SOFT_DIRTY | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_MICROCODE | 1 + .../L1-RECOMMEND/x86/CONFIG_MITIGATION_RFDS | 1 + .../L1-RECOMMEND/x86/CONFIG_MLX5_CORE_IPOIB | 1 + .../L1-RECOMMEND/x86/CONFIG_MLXREG_HOTPLUG | 1 + .../L1-RECOMMEND/x86/CONFIG_MLX_PLATFORM | 1 + .../x86/CONFIG_MODIFY_LDT_SYSCALL | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_MOUSE_PS2 | 1 + .../L1-RECOMMEND/x86/CONFIG_MTRR_SANITIZER | 1 + .../x86/CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT | 1 + ...CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_NET_TULIP | 1 + .../L1-RECOMMEND/x86/CONFIG_NR_CPUS_DEFAULT | 1 + .../x86/CONFIG_NR_CPUS_RANGE_BEGIN | 1 + .../L1-RECOMMEND/x86/CONFIG_NR_CPUS_RANGE_END | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_NUMA_EMU | 1 + anolis/configs/L1-RECOMMEND/x86/CONFIG_NVRAM | 1 + anolis/configs/L1-RECOMMEND/x86/CONFIG_NV_TCO | 1 + .../x86/CONFIG_PARAVIRT_SPINLOCKS | 1 + .../L1-RECOMMEND/x86/CONFIG_PCI_HYPERV | 1 + .../x86/CONFIG_PCI_HYPERV_INTERFACE | 1 + .../L1-RECOMMEND/x86/CONFIG_PCI_MMCONFIG | 1 + .../L1-RECOMMEND/x86/CONFIG_PCSPKR_PLATFORM | 1 + .../x86/CONFIG_PERF_EVENTS_AMD_BRS | 1 + .../x86/CONFIG_PERF_EVENTS_AMD_POWER | 1 + .../x86/CONFIG_PERF_EVENTS_AMD_UNCORE | 1 + .../x86/CONFIG_PERF_EVENTS_INTEL_CSTATE | 1 + .../x86/CONFIG_PERF_EVENTS_INTEL_RAPL | 1 + .../x86/CONFIG_PERF_EVENTS_INTEL_UNCORE | 1 + .../L1-RECOMMEND/x86/CONFIG_PHYSICAL_ALIGN | 1 + .../L1-RECOMMEND/x86/CONFIG_PINCTRL_KX7000 | 1 + .../L1-RECOMMEND/x86/CONFIG_PINCTRL_ZHAOXIN | 1 + .../L1-RECOMMEND/x86/CONFIG_PM_TRACE_RTC | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_POWERCAP | 1 + .../x86/CONFIG_PROC_THERMAL_MMIO_RAPL | 1 + .../x86/CONFIG_PTE_MARKER_UFFD_WP | 1 + .../L1-RECOMMEND/x86/CONFIG_QAT_VFIO_PCI | 1 + .../CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_RAS_CEC | 1 + .../L1-RECOMMEND/x86/CONFIG_RAS_CEC_DEBUG | 1 + .../x86/CONFIG_RESCTRL_FS_PSEUDO_LOCK | 1 + .../L1-RECOMMEND/x86/CONFIG_RTC_DRV_CMOS | 1 + .../L1-RECOMMEND/x86/CONFIG_SCHED_MC_PRIO | 1 + .../x86/CONFIG_SCHED_OMIT_FRAME_POINTER | 1 + .../L1-RECOMMEND/x86/CONFIG_SDEI_WATCHDOG | 1 + .../x86/CONFIG_SENSORS_ZHAOXIN_CPUTEMP | 1 + .../L1-RECOMMEND/x86/CONFIG_SERIO_I8042 | 1 + .../L1-RECOMMEND/x86/CONFIG_SGETMASK_SYSCALL | 1 + anolis/configs/L1-RECOMMEND/x86/CONFIG_SLS | 1 + .../L1-RECOMMEND/x86/CONFIG_SP5100_TCO | 1 + .../L1-RECOMMEND/x86/CONFIG_SPI_CADENCE | 1 + .../L1-RECOMMEND/x86/CONFIG_SPI_DESIGNWARE | 1 + .../L1-RECOMMEND/x86/CONFIG_SQUASHFS_LZ4 | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_STAGING | 1 + .../x86/CONFIG_STATIC_CALL_SELFTEST | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_STM_DUMMY | 1 + .../L1-RECOMMEND/x86/CONFIG_STM_PROTO_BASIC | 1 + .../L1-RECOMMEND/x86/CONFIG_STM_PROTO_SYS_T | 1 + .../x86/CONFIG_STM_SOURCE_CONSOLE | 1 + .../L1-RECOMMEND/x86/CONFIG_STM_SOURCE_FTRACE | 1 + .../x86/CONFIG_STM_SOURCE_HEARTBEAT | 1 + .../x86/CONFIG_STRICT_SIGALTSTACK_SIZE | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_TCG_HYGON | 1 + .../L1-RECOMMEND/x86/CONFIG_TCG_INFINEON | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_TCG_NSC | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_TCM_HYGON | 1 + .../L1-RECOMMEND/x86/CONFIG_TCP_CONG_CDG | 1 + .../L1-RECOMMEND/x86/CONFIG_TDM_DEV_HYGON | 1 + .../L1-RECOMMEND/x86/CONFIG_TDM_KERNEL_GUARD | 1 + .../L1-RECOMMEND/x86/CONFIG_TDX_GUEST_DRIVER | 1 + .../L1-RECOMMEND/x86/CONFIG_TEST_LIVEPATCH | 1 + .../x86/CONFIG_THERMAL_GOV_BANG_BANG | 1 + .../x86/CONFIG_THERMAL_WRITABLE_TRIPS | 1 + .../L1-RECOMMEND/x86/CONFIG_TOUCHSCREEN_ADC | 1 + .../L1-RECOMMEND/x86/CONFIG_UCLAMP_TASK | 1 + .../L1-RECOMMEND/x86/CONFIG_UIO_HV_GENERIC | 1 + .../L1-RECOMMEND/x86/CONFIG_UNACCEPTED_MEMORY | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_VBOXGUEST | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_VFIO_MDEV | 1 + .../L1-RECOMMEND/x86/CONFIG_VFIO_PCI_IGD | 1 + .../L1-RECOMMEND/x86/CONFIG_VGA_SWITCHEROO | 1 + .../x86/CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES | 1 + .../L1-RECOMMEND/x86/CONFIG_VIRTIO_PCI_LIB | 1 + .../x86/CONFIG_VIRTIO_PCI_LIB_LEGACY | 1 + .../L1-RECOMMEND/x86/CONFIG_VIRT_DRIVERS | 1 + anolis/configs/L1-RECOMMEND/x86/CONFIG_VMD | 1 + .../L1-RECOMMEND/x86/CONFIG_VMWARE_BALLOON | 1 + .../L1-RECOMMEND/x86/CONFIG_VMWARE_PVSCSI | 1 + .../L1-RECOMMEND/x86/CONFIG_VMWARE_VMCI | 1 + .../x86/CONFIG_VMWARE_VMCI_VSOCKETS | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_VMXNET3 | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_WDAT_WDT | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_X86_16BIT | 1 + .../L1-RECOMMEND/x86/CONFIG_X86_ACPI_CPUFREQ | 1 + .../x86/CONFIG_X86_AMD_FREQ_SENSITIVITY | 1 + .../x86/CONFIG_X86_AMD_PLATFORM_DEVICE | 1 + .../L1-RECOMMEND/x86/CONFIG_X86_AMD_PSTATE | 1 + .../x86/CONFIG_X86_AMD_PSTATE_DEFAULT_MODE | 1 + ...NFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK | 1 + .../x86/CONFIG_X86_CHECK_BIOS_CORRUPTION | 1 + .../x86/CONFIG_X86_CPA_STATISTICS | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_X86_CPUID | 1 + .../L1-RECOMMEND/x86/CONFIG_X86_DEBUG_FPU | 1 + .../x86/CONFIG_X86_DECODER_SELFTEST | 1 + .../x86/CONFIG_X86_DIRECT_GBPAGES | 1 + .../L1-RECOMMEND/x86/CONFIG_X86_ESPFIX64 | 1 + .../x86/CONFIG_X86_EXTENDED_PLATFORM | 1 + .../L1-RECOMMEND/x86/CONFIG_X86_GOLDFISH | 1 + .../L1-RECOMMEND/x86/CONFIG_X86_INTEL_LPSS | 1 + .../CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS | 1 + .../L1-RECOMMEND/x86/CONFIG_X86_INTEL_MID | 1 + .../L1-RECOMMEND/x86/CONFIG_X86_INTEL_PSTATE | 1 + .../x86/CONFIG_X86_INTEL_TSX_MODE_AUTO | 1 + .../x86/CONFIG_X86_INTEL_TSX_MODE_OFF | 1 + .../x86/CONFIG_X86_INTEL_TSX_MODE_ON | 1 + .../L1-RECOMMEND/x86/CONFIG_X86_KERNEL_IBT | 1 + .../L1-RECOMMEND/x86/CONFIG_X86_MCELOG_LEGACY | 1 + .../L1-RECOMMEND/x86/CONFIG_X86_MCE_INJECT | 1 + .../L1-RECOMMEND/x86/CONFIG_X86_MPPARSE | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_X86_MSR | 1 + .../L1-RECOMMEND/x86/CONFIG_X86_NUMACHIP | 1 + .../L1-RECOMMEND/x86/CONFIG_X86_P4_CLOCKMOD | 1 + .../L1-RECOMMEND/x86/CONFIG_X86_PCC_CPUFREQ | 1 + .../x86/CONFIG_X86_PKG_TEMP_THERMAL | 1 + .../L1-RECOMMEND/x86/CONFIG_X86_PM_TIMER | 1 + .../CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS | 1 + .../L1-RECOMMEND/x86/CONFIG_X86_SGX_KVM | 1 + .../x86/CONFIG_X86_SUPPORTS_MEMORY_FAILURE | 1 + anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_UV | 1 + .../x86/CONFIG_X86_VERBOSE_BOOTUP | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_X86_VSMP | 1 + anolis/configs/L1-RECOMMEND/x86/CONFIG_XEN | 1 + .../x86/CONFIG_XEN_NETDEV_FRONTEND | 1 + .../configs/L1-RECOMMEND/x86/CONFIG_XEN_PVHVM | 1 + .../L1-RECOMMEND/x86/CONFIG_XFRM_USER_COMPAT | 1 + .../L2-OPTIONAL/arm64/CONFIG_A64FX_DIAG | 1 + .../L2-OPTIONAL/arm64/CONFIG_ACPI_APEI_SEA | 1 + .../L2-OPTIONAL/arm64/CONFIG_ACPI_APMT | 1 + .../arm64/CONFIG_ACPI_CCA_REQUIRED | 1 + .../arm64/CONFIG_ACPI_CPPC_CPUFREQ_FIE | 1 + .../L2-OPTIONAL/arm64/CONFIG_ACPI_GENERIC_GSI | 1 + .../L2-OPTIONAL/arm64/CONFIG_ACPI_GTDT | 1 + .../L2-OPTIONAL/arm64/CONFIG_ACPI_IORT | 1 + .../L2-OPTIONAL/arm64/CONFIG_ACPI_MCFG | 1 + .../L2-OPTIONAL/arm64/CONFIG_ACPI_MPAM | 1 + .../L2-OPTIONAL/arm64/CONFIG_ACPI_PPTT | 1 + .../L2-OPTIONAL/arm64/CONFIG_AHCI_CEVA | 1 + .../L2-OPTIONAL/arm64/CONFIG_ALTERA_STAPL | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_AL_FIC | 1 + .../L2-OPTIONAL/arm64/CONFIG_AMBA_PL08X | 1 + .../L2-OPTIONAL/arm64/CONFIG_AMD8111_ETH | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_AMD_XGBE | 1 + .../L2-OPTIONAL/arm64/CONFIG_AMD_XGBE_DCB | 1 + .../L2-OPTIONAL/arm64/CONFIG_AMIGA_PARTITION | 1 + .../arm64/CONFIG_AMPERE_ERRATUM_AC03_CPU_38 | 1 + .../L2-OPTIONAL/arm64/CONFIG_APDS9802ALS | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_AQTION | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_ACTIONS | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_ALPINE | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_APPLE | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BCM | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_BERLIN | 1 + .../arm64/CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS | 1 + .../arm64/CONFIG_ARCH_BINFMT_ELF_STATE | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_BITMAIN | 1 + ...CONFIG_ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_SIG | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_EXYNOS | 1 + .../arm64/CONFIG_ARCH_HAS_DMA_PREP_COHERENT | 1 + .../arm64/CONFIG_ARCH_HAS_KEEPINITRD | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_RELR | 1 + .../arm64/CONFIG_ARCH_HAS_SETUP_DMA_OPS | 1 + .../arm64/CONFIG_ARCH_HAS_SUBPAGE_FAULTS | 1 + .../arm64/CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU | 1 + .../arm64/CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE | 1 + .../arm64/CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS | 1 + .../arm64/CONFIG_ARCH_HAS_TICK_BROADCAST | 1 + .../arm64/CONFIG_ARCH_HAVE_ELF_PROT | 1 + .../arm64/CONFIG_ARCH_HAVE_TRACE_MMIO_ACCESS | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_HISI | 1 + .../arm64/CONFIG_ARCH_INTEL_SOCFPGA | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_ARCH_K3 | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_KEEMBAY | 1 + .../arm64/CONFIG_ARCH_KEEP_MEMBLOCK | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_LG1K | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_MA35 | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_MEDIATEK | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_MESON | 1 + .../arm64/CONFIG_ARCH_MMAP_RND_BITS | 1 + .../arm64/CONFIG_ARCH_MMAP_RND_BITS_MAX | 1 + .../arm64/CONFIG_ARCH_MMAP_RND_BITS_MIN | 1 + .../CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_MVEBU | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_NPCM | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_ARCH_NXP | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_QCOM | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_REALTEK | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_RENESAS | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_ROCKCHIP | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_SEATTLE | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_SPARX5 | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_SPRD | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_STM32 | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_SUNXI | 1 + .../arm64/CONFIG_ARCH_SUPPORTS_HUGETLBFS | 1 + ...ONFIG_ARCH_SUPPORTS_KEXEC_IMAGE_VERIFY_SIG | 1 + .../CONFIG_ARCH_SUPPORTS_SHADOW_CALL_STACK | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_SYNQUACER | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_THUNDER | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_THUNDER2 | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_UNIPHIER | 1 + .../arm64/CONFIG_ARCH_USES_PG_ARCH_X | 1 + .../arm64/CONFIG_ARCH_USE_GNU_PROPERTY | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_VEXPRESS | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_VISCONTI | 1 + ...NFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT | 1 + .../arm64/CONFIG_ARCH_WANT_FRAME_POINTERS | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_XGENE | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARCH_ZYNQMP | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARM64_AS_HAS_MTE | 1 + .../arm64/CONFIG_ARM64_CONT_PMD_SHIFT | 1 + .../arm64/CONFIG_ARM64_CONT_PTE_SHIFT | 1 + .../arm64/CONFIG_ARM64_ERRATUM_858921 | 1 + .../CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419 | 1 + .../arm64/CONFIG_ARM64_LSE_ATOMICS | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARM64_PAGE_SHIFT | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARM64_PA_BITS | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARM64_PA_BITS_48 | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARM64_RELOC_TEST | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARM64_VA_BITS | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARM64_VA_BITS_48 | 1 + .../arm64/CONFIG_ARM64_WORKAROUND_CLEAN_CACHE | 1 + .../arm64/CONFIG_ARM64_WORKAROUND_REPEAT_TLBI | 1 + .../CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT | 1 + ...G_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD | 1 + .../CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_ARM_AMBA | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARM_ARCH_TIMER | 1 + .../arm64/CONFIG_ARM_ARCH_TIMER_EVTSTREAM | 1 + .../CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARM_CCI_PMU | 1 + .../arm64/CONFIG_ARM_FFA_TRANSPORT | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARM_GIC_MAX_NR | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_ARM_MHU | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARM_PSCI_CHECKER | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARM_PSCI_CPUIDLE | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARM_PSCI_FW | 1 + .../arm64/CONFIG_ARM_QCOM_CPUFREQ_HW | 1 + .../arm64/CONFIG_ARM_SBSA_WATCHDOG | 1 + .../arm64/CONFIG_ARM_SCMI_PROTOCOL | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARM_SMCCC_SOC_ID | 1 + .../arm64/CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARM_SMMU_QCOM | 1 + .../arm64/CONFIG_ARM_SMMU_QCOM_DEBUG | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARM_SMMU_V3_SVA | 1 + .../L2-OPTIONAL/arm64/CONFIG_ARM_TIMER_SP804 | 1 + .../L2-OPTIONAL/arm64/CONFIG_AS_HAS_ARMV8_2 | 1 + .../L2-OPTIONAL/arm64/CONFIG_AS_HAS_ARMV8_3 | 1 + .../L2-OPTIONAL/arm64/CONFIG_AS_HAS_ARMV8_4 | 1 + .../L2-OPTIONAL/arm64/CONFIG_AS_HAS_ARMV8_5 | 1 + .../arm64/CONFIG_AS_HAS_CFI_NEGATE_RA_STATE | 1 + .../L2-OPTIONAL/arm64/CONFIG_AS_HAS_LDAPR | 1 + .../arm64/CONFIG_AS_HAS_LSE_ATOMICS | 1 + .../L2-OPTIONAL/arm64/CONFIG_AS_HAS_SHA3 | 1 + .../L2-OPTIONAL/arm64/CONFIG_AT803X_PHY | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_ATL2 | 1 + .../arm64/CONFIG_AUDIT_ARCH_COMPAT_GENERIC | 1 + .../arm64/CONFIG_AUDIT_COMPAT_GENERIC | 1 + .../L2-OPTIONAL/arm64/CONFIG_AUDIT_GENERIC | 1 + .../L2-OPTIONAL/arm64/CONFIG_BACKLIGHT_GPIO | 1 + .../L2-OPTIONAL/arm64/CONFIG_BACKLIGHT_LED | 1 + .../L2-OPTIONAL/arm64/CONFIG_BACKLIGHT_PWM | 1 + .../L2-OPTIONAL/arm64/CONFIG_BCM_SBA_RAID | 1 + .../arm64/CONFIG_BLK_CGROUP_IOLATENCY | 1 + .../L2-OPTIONAL/arm64/CONFIG_BLK_DEV_PMEM | 1 + .../L2-OPTIONAL/arm64/CONFIG_BRCMSTB_GISB_ARB | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_BT | 1 + .../CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC | 1 + .../L2-OPTIONAL/arm64/CONFIG_CAVIUM_CPT | 1 + .../arm64/CONFIG_CC_HAS_BRANCH_PROT_PAC_RET | 1 + .../CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI | 1 + .../arm64/CONFIG_CC_HAS_SIGN_RETURN_ADDRESS | 1 + .../arm64/CONFIG_CC_HAVE_SHADOW_CALL_STACK | 1 + .../CONFIG_CC_HAVE_STACKPROTECTOR_SYSREG | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_CDX_BUS | 1 + .../L2-OPTIONAL/arm64/CONFIG_CHARGER_BQ24190 | 1 + .../arm64/CONFIG_CHARGER_DETECTOR_MAX14656 | 1 + .../L2-OPTIONAL/arm64/CONFIG_CHARGER_MANAGER | 1 + .../L2-OPTIONAL/arm64/CONFIG_CHARGER_RT9467 | 1 + .../L2-OPTIONAL/arm64/CONFIG_CHARGER_RT9471 | 1 + .../L2-OPTIONAL/arm64/CONFIG_CHARGER_SMB347 | 1 + .../L2-OPTIONAL/arm64/CONFIG_CHARGER_UCS1002 | 1 + .../L2-OPTIONAL/arm64/CONFIG_CHROMEOS_ACPI | 1 + .../arm64/CONFIG_CHROMEOS_PRIVACY_SCREEN | 1 + .../L2-OPTIONAL/arm64/CONFIG_CHROMEOS_TBMC | 1 + .../L2-OPTIONAL/arm64/CONFIG_CHROME_PLATFORMS | 1 + .../L2-OPTIONAL/arm64/CONFIG_CLKSRC_MMIO | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_CLK_ICST | 1 + .../L2-OPTIONAL/arm64/CONFIG_CLK_SP810 | 1 + .../L2-OPTIONAL/arm64/CONFIG_CLK_VEXPRESS_OSC | 1 + .../L2-OPTIONAL/arm64/CONFIG_CLONE_BACKWARDS | 1 + .../arm64/CONFIG_COMMON_CLK_AXI_CLKGEN | 1 + .../arm64/CONFIG_COMMON_CLK_CDCE925 | 1 + .../arm64/CONFIG_COMMON_CLK_FIXED_MMIO | 1 + .../arm64/CONFIG_COMMON_CLK_HI3516CV300 | 1 + .../arm64/CONFIG_COMMON_CLK_HI3519 | 1 + .../arm64/CONFIG_COMMON_CLK_HI3559A | 1 + .../arm64/CONFIG_COMMON_CLK_HI3660 | 1 + .../arm64/CONFIG_COMMON_CLK_HI3670 | 1 + .../arm64/CONFIG_COMMON_CLK_HI3798CV200 | 1 + .../arm64/CONFIG_COMMON_CLK_HI6220 | 1 + .../L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_QCOM | 1 + .../arm64/CONFIG_COMMON_CLK_RS9_PCIE | 1 + .../L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_SCPI | 1 + .../L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_SI514 | 1 + .../arm64/CONFIG_COMMON_CLK_SI521XX | 1 + .../L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_SI570 | 1 + .../L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_VC3 | 1 + .../L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_VC5 | 1 + .../L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_VC7 | 1 + .../L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_XGENE | 1 + .../arm64/CONFIG_COMMON_CLK_XLNX_CLKWZRD | 1 + .../arm64/CONFIG_COMMON_RESET_HI3660 | 1 + .../arm64/CONFIG_COMMON_RESET_HI6220 | 1 + .../arm64/CONFIG_COMPAT_ALIGNMENT_FIXUPS | 1 + .../L2-OPTIONAL/arm64/CONFIG_CORESIGHT_DUMMY | 1 + .../L2-OPTIONAL/arm64/CONFIG_CORESIGHT_TPDA | 1 + .../L2-OPTIONAL/arm64/CONFIG_CORESIGHT_TPDM | 1 + .../L2-OPTIONAL/arm64/CONFIG_CORESIGHT_TRBE | 1 + .../L2-OPTIONAL/arm64/CONFIG_CPUFREQ_DT | 1 + .../arm64/CONFIG_CPUFREQ_DT_PLATDEV | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_CPU_PM | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRC64 | 1 + .../L2-OPTIONAL/arm64/CONFIG_CRC64_ROCKSOFT | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_CROS_EC | 1 + .../L2-OPTIONAL/arm64/CONFIG_CROS_HPS_I2C | 1 + .../arm64/CONFIG_CROS_KBD_LED_BACKLIGHT | 1 + .../arm64/CONFIG_CRYPTO_CRC64_ROCKSOFT | 1 + .../arm64/CONFIG_CRYPTO_DEV_CAVIUM_ZIP | 1 + .../L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_CCREE | 1 + .../L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_CPT | 1 + .../arm64/CONFIG_CRYPTO_DEV_HISI_HPRE | 1 + .../arm64/CONFIG_CRYPTO_DEV_HISI_QM | 1 + .../arm64/CONFIG_CRYPTO_DEV_HISI_SEC | 1 + .../arm64/CONFIG_CRYPTO_DEV_HISI_SEC2 | 1 + .../arm64/CONFIG_CRYPTO_DEV_HISI_TRNG | 1 + .../arm64/CONFIG_CRYPTO_DEV_HISI_ZIP | 1 + .../arm64/CONFIG_CRYPTO_DEV_OCTEONTX_CPT | 1 + .../arm64/CONFIG_CRYPTO_DEV_QAT_4XXX | 1 + .../L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_QCE | 1 + .../arm64/CONFIG_CRYPTO_DEV_QCOM_RNG | 1 + .../arm64/CONFIG_CRYPTO_LIB_POLY1305_RSIZE | 1 + .../arm64/CONFIG_CRYPTO_NHPOLY1305_NEON | 1 + .../arm64/CONFIG_CRYPTO_POLYVAL_ARM64_CE | 1 + .../arm64/CONFIG_CRYPTO_SHA3_ARM64 | 1 + .../arm64/CONFIG_CRYPTO_SHA512_ARM64 | 1 + .../arm64/CONFIG_CRYPTO_SHA512_ARM64_CE | 1 + .../L2-OPTIONAL/arm64/CONFIG_DEBUG_EFI | 1 + .../arm64/CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC | 1 + .../arm64/CONFIG_DMA_DECLARE_COHERENT | 1 + .../L2-OPTIONAL/arm64/CONFIG_DMA_DIRECT_REMAP | 1 + .../arm64/CONFIG_DMA_NONCOHERENT_MMAP | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_DMA_OF | 1 + .../arm64/CONFIG_DMA_RESTRICTED_POOL | 1 + .../arm64/CONFIG_DRM_ANALOGIX_ANX6345 | 1 + .../arm64/CONFIG_DRM_ANALOGIX_ANX7625 | 1 + .../L2-OPTIONAL/arm64/CONFIG_DRM_ARCPGU | 1 + .../L2-OPTIONAL/arm64/CONFIG_DRM_CDNS_DSI | 1 + .../arm64/CONFIG_DRM_CDNS_MHDP8546 | 1 + .../arm64/CONFIG_DRM_CHIPONE_ICN6211 | 1 + .../arm64/CONFIG_DRM_CHRONTEL_CH7033 | 1 + .../arm64/CONFIG_DRM_DISPLAY_CONNECTOR | 1 + .../L2-OPTIONAL/arm64/CONFIG_DRM_DP_CEC | 1 + .../L2-OPTIONAL/arm64/CONFIG_DRM_HDLCD | 1 + .../L2-OPTIONAL/arm64/CONFIG_DRM_HISI_HIBMC | 1 + .../L2-OPTIONAL/arm64/CONFIG_DRM_HISI_KIRIN | 1 + .../L2-OPTIONAL/arm64/CONFIG_DRM_I2C_ADV7511 | 1 + .../arm64/CONFIG_DRM_I2C_NXP_TDA998X | 1 + .../L2-OPTIONAL/arm64/CONFIG_DRM_I2C_SIL164 | 1 + .../L2-OPTIONAL/arm64/CONFIG_DRM_ITE_IT6505 | 1 + .../L2-OPTIONAL/arm64/CONFIG_DRM_ITE_IT66121 | 1 + .../L2-OPTIONAL/arm64/CONFIG_DRM_KOMEDA | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_DRM_LIMA | 1 + .../L2-OPTIONAL/arm64/CONFIG_DRM_LOGICVC | 1 + .../arm64/CONFIG_DRM_LONTIUM_LT8912B | 1 + .../arm64/CONFIG_DRM_LONTIUM_LT9211 | 1 + .../arm64/CONFIG_DRM_LONTIUM_LT9611 | 1 + .../arm64/CONFIG_DRM_LONTIUM_LT9611UXC | 1 + .../L2-OPTIONAL/arm64/CONFIG_DRM_LVDS_CODEC | 1 + .../L2-OPTIONAL/arm64/CONFIG_DRM_MALI_DISPLAY | 1 + ...CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_DRM_MSM | 1 + .../L2-OPTIONAL/arm64/CONFIG_DRM_NWL_MIPI_DSI | 1 + .../L2-OPTIONAL/arm64/CONFIG_DRM_NXP_PTN3460 | 1 + .../arm64/CONFIG_DRM_PANEL_ABT_Y030XX067A | 1 + .../arm64/CONFIG_DRM_PANEL_ARM_VERSATILE | 1 + .../L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_EDP | 1 + .../arm64/CONFIG_DRM_PANEL_ILITEK_IL9322 | 1 + .../arm64/CONFIG_DRM_PANEL_ILITEK_ILI9341 | 1 + .../arm64/CONFIG_DRM_PANEL_INNOLUX_EJ030NA | 1 + .../arm64/CONFIG_DRM_PANEL_LG_LB035Q02 | 1 + .../arm64/CONFIG_DRM_PANEL_LG_LG4573 | 1 + .../L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_LVDS | 1 + .../arm64/CONFIG_DRM_PANEL_NEC_NL8048HL11 | 1 + .../arm64/CONFIG_DRM_PANEL_NEWVISION_NV3052C | 1 + .../arm64/CONFIG_DRM_PANEL_NOVATEK_NT39016 | 1 + .../CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO | 1 + .../arm64/CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20 | 1 + .../arm64/CONFIG_DRM_PANEL_SAMSUNG_DB7430 | 1 + .../arm64/CONFIG_DRM_PANEL_SAMSUNG_LD9040 | 1 + .../arm64/CONFIG_DRM_PANEL_SAMSUNG_S6D27A1 | 1 + .../arm64/CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0 | 1 + .../arm64/CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 | 1 + ...ONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 | 1 + .../arm64/CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 | 1 + .../arm64/CONFIG_DRM_PANEL_SEIKO_43WVF1G | 1 + .../arm64/CONFIG_DRM_PANEL_SHARP_LS037V7DW01 | 1 + .../L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SIMPLE | 1 + .../arm64/CONFIG_DRM_PANEL_SITRONIX_ST7789V | 1 + .../arm64/CONFIG_DRM_PANEL_SONY_ACX565AKM | 1 + .../arm64/CONFIG_DRM_PANEL_TPO_TD028TTEC1 | 1 + .../arm64/CONFIG_DRM_PANEL_TPO_TD043MTEA1 | 1 + .../arm64/CONFIG_DRM_PANEL_TPO_TPG110 | 1 + .../L2-OPTIONAL/arm64/CONFIG_DRM_PANFROST | 1 + .../arm64/CONFIG_DRM_PARADE_PS8622 | 1 + .../arm64/CONFIG_DRM_PARADE_PS8640 | 1 + .../L2-OPTIONAL/arm64/CONFIG_DRM_PL111 | 1 + .../L2-OPTIONAL/arm64/CONFIG_DRM_SAMSUNG_DSIM | 1 + .../L2-OPTIONAL/arm64/CONFIG_DRM_SII902X | 1 + .../L2-OPTIONAL/arm64/CONFIG_DRM_SII9234 | 1 + .../L2-OPTIONAL/arm64/CONFIG_DRM_SIL_SII8620 | 1 + .../arm64/CONFIG_DRM_SIMPLE_BRIDGE | 1 + .../arm64/CONFIG_DRM_THINE_THC63LVD1024 | 1 + .../L2-OPTIONAL/arm64/CONFIG_DRM_TIDSS | 1 + .../L2-OPTIONAL/arm64/CONFIG_DRM_TI_DLPC3433 | 1 + .../L2-OPTIONAL/arm64/CONFIG_DRM_TI_SN65DSI83 | 1 + .../L2-OPTIONAL/arm64/CONFIG_DRM_TI_SN65DSI86 | 1 + .../L2-OPTIONAL/arm64/CONFIG_DRM_TI_TFP410 | 1 + .../L2-OPTIONAL/arm64/CONFIG_DRM_TI_TPD12S015 | 1 + .../arm64/CONFIG_DRM_TOSHIBA_TC358762 | 1 + .../arm64/CONFIG_DRM_TOSHIBA_TC358764 | 1 + .../arm64/CONFIG_DRM_TOSHIBA_TC358767 | 1 + .../arm64/CONFIG_DRM_TOSHIBA_TC358768 | 1 + .../arm64/CONFIG_DRM_TOSHIBA_TC358775 | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_DTC | 1 + .../L2-OPTIONAL/arm64/CONFIG_DW_AXI_DMAC | 1 + .../L2-OPTIONAL/arm64/CONFIG_DW_DMAC_CORE | 1 + .../arm64/CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS | 1 + .../L2-OPTIONAL/arm64/CONFIG_EDAC_DMC520 | 1 + .../L2-OPTIONAL/arm64/CONFIG_EDAC_THUNDERX | 1 + .../L2-OPTIONAL/arm64/CONFIG_EFI_GENERIC_STUB | 1 + .../arm64/CONFIG_EFI_PARAMS_FROM_FDT | 1 + .../L2-OPTIONAL/arm64/CONFIG_EFI_ZBOOT | 1 + .../L2-OPTIONAL/arm64/CONFIG_EXTCON_FSA9480 | 1 + .../L2-OPTIONAL/arm64/CONFIG_EXTCON_MAX3355 | 1 + .../L2-OPTIONAL/arm64/CONFIG_EXTCON_PTN5150 | 1 + .../arm64/CONFIG_EXTCON_QCOM_SPMI_MISC | 1 + .../L2-OPTIONAL/arm64/CONFIG_EXTCON_RT8973A | 1 + .../L2-OPTIONAL/arm64/CONFIG_EXTCON_SM5502 | 1 + .../arm64/CONFIG_EXTCON_USBC_TUSB320 | 1 + .../L2-OPTIONAL/arm64/CONFIG_EXTCON_USB_GPIO | 1 + .../L2-OPTIONAL/arm64/CONFIG_FB_ARMCLCD | 1 + .../L2-OPTIONAL/arm64/CONFIG_FB_BACKLIGHT | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_FB_SM750 | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_FB_TFT | 1 + .../L2-OPTIONAL/arm64/CONFIG_FIELDBUS_DEV | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_FIREWIRE | 1 + .../L2-OPTIONAL/arm64/CONFIG_FRAME_POINTER | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSI | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_FSL_EDMA | 1 + .../arm64/CONFIG_FSL_ERRATUM_A008585 | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_FSL_QDMA | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_FSL_RCPM | 1 + .../L2-OPTIONAL/arm64/CONFIG_FS_MBCACHE | 1 + ...FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY | 1 + .../L2-OPTIONAL/arm64/CONFIG_FUJITSU_ES | 1 + .../arm64/CONFIG_FUNCTION_ALIGNMENT | 1 + .../arm64/CONFIG_FUNCTION_ALIGNMENT_8B | 1 + ...NFIG_GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS | 1 + .../arm64/CONFIG_GENERIC_ARCH_TOPOLOGY | 1 + .../L2-OPTIONAL/arm64/CONFIG_GENERIC_CSUM | 1 + .../L2-OPTIONAL/arm64/CONFIG_GENERIC_HWEIGHT | 1 + .../arm64/CONFIG_GENERIC_IDLE_POLL_SETUP | 1 + .../L2-OPTIONAL/arm64/CONFIG_GENERIC_IRQ_IPI | 1 + .../arm64/CONFIG_GENERIC_IRQ_SHOW_LEVEL | 1 + .../arm64/CONFIG_GENERIC_SCHED_CLOCK | 1 + .../L2-OPTIONAL/arm64/CONFIG_GPIO_74X164 | 1 + .../L2-OPTIONAL/arm64/CONFIG_GPIO_74XX_MMIO | 1 + .../L2-OPTIONAL/arm64/CONFIG_GPIO_ADNP | 1 + .../L2-OPTIONAL/arm64/CONFIG_GPIO_ALTERA | 1 + .../L2-OPTIONAL/arm64/CONFIG_GPIO_CADENCE | 1 + .../L2-OPTIONAL/arm64/CONFIG_GPIO_DWAPB | 1 + .../L2-OPTIONAL/arm64/CONFIG_GPIO_FTGPIO010 | 1 + .../L2-OPTIONAL/arm64/CONFIG_GPIO_GRGPIO | 1 + .../L2-OPTIONAL/arm64/CONFIG_GPIO_GW_PLD | 1 + .../L2-OPTIONAL/arm64/CONFIG_GPIO_HLWD | 1 + .../L2-OPTIONAL/arm64/CONFIG_GPIO_LOGICVC | 1 + .../L2-OPTIONAL/arm64/CONFIG_GPIO_PL061 | 1 + .../L2-OPTIONAL/arm64/CONFIG_GPIO_SIFIVE | 1 + .../L2-OPTIONAL/arm64/CONFIG_GPIO_SYSCON | 1 + .../L2-OPTIONAL/arm64/CONFIG_GPIO_THUNDERX | 1 + .../L2-OPTIONAL/arm64/CONFIG_GPIO_WATCHDOG | 1 + .../L2-OPTIONAL/arm64/CONFIG_GPIO_XGENE | 1 + .../L2-OPTIONAL/arm64/CONFIG_GPIO_XGENE_SB | 1 + .../L2-OPTIONAL/arm64/CONFIG_GPIO_XILINX | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_GPIO_XLP | 1 + .../arm64/CONFIG_HAVE_ARCH_BITREVERSE | 1 + .../arm64/CONFIG_HAVE_ARCH_COMPILER_H | 1 + .../arm64/CONFIG_HAVE_ARCH_KASAN_HW_TAGS | 1 + .../arm64/CONFIG_HAVE_ARCH_KASAN_SW_TAGS | 1 + .../L2-OPTIONAL/arm64/CONFIG_HAVE_ARM_SMCCC | 1 + .../arm64/CONFIG_HAVE_ARM_SMCCC_DISCOVERY | 1 + .../CONFIG_HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS | 1 + .../arm64/CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE | 1 + .../arm64/CONFIG_HAVE_PREEMPT_DYNAMIC_KEY | 1 + .../L2-OPTIONAL/arm64/CONFIG_HI3660_MBOX | 1 + .../L2-OPTIONAL/arm64/CONFIG_HI6220_MBOX | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_HID_ALPS | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_HID_ASUS | 1 + .../L2-OPTIONAL/arm64/CONFIG_HID_CMEDIA | 1 + .../arm64/CONFIG_HID_SENSOR_CUSTOM_SENSOR | 1 + .../L2-OPTIONAL/arm64/CONFIG_HID_SENSOR_HUB | 1 + .../L2-OPTIONAL/arm64/CONFIG_HIP04_ETH | 1 + .../arm64/CONFIG_HISILICON_ERRATUM_161010101 | 1 + .../arm64/CONFIG_HISILICON_IRQ_MBIGEN | 1 + .../L2-OPTIONAL/arm64/CONFIG_HISILICON_LPC | 1 + .../arm64/CONFIG_HISI_ACC_VFIO_PCI | 1 + .../L2-OPTIONAL/arm64/CONFIG_HISI_FEMAC | 1 + .../L2-OPTIONAL/arm64/CONFIG_HISI_HIKEY_USB | 1 + .../L2-OPTIONAL/arm64/CONFIG_HISI_PCIE_PMU | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_HISI_PMU | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_HISI_PTT | 1 + .../L2-OPTIONAL/arm64/CONFIG_HIX5HD2_GMAC | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_HNS | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_HNS3_PMU | 1 + .../L2-OPTIONAL/arm64/CONFIG_HOTPLUG_PCI_SHPC | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_HP_ILO | 1 + .../L2-OPTIONAL/arm64/CONFIG_HP_WATCHDOG | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_HVC_DCC | 1 + .../L2-OPTIONAL/arm64/CONFIG_HWSPINLOCK_QCOM | 1 + .../L2-OPTIONAL/arm64/CONFIG_HW_PERF_EVENTS | 1 + .../arm64/CONFIG_HW_RANDOM_ARM_SMCCC_TRNG | 1 + .../L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_CAVIUM | 1 + .../L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_CCTRNG | 1 + .../L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_CN10K | 1 + .../L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_HISI | 1 + .../L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_HISTB | 1 + .../L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_VIRTIO | 1 + .../L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_XGENE | 1 + .../L2-OPTIONAL/arm64/CONFIG_I2C_ALGOPCF | 1 + .../L2-OPTIONAL/arm64/CONFIG_I2C_AMD756 | 1 + .../L2-OPTIONAL/arm64/CONFIG_I2C_AMD8111 | 1 + .../arm64/CONFIG_I2C_ARB_GPIO_CHALLENGE | 1 + .../L2-OPTIONAL/arm64/CONFIG_I2C_CADENCE | 1 + .../arm64/CONFIG_I2C_DEMUX_PINCTRL | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_I2C_GPIO | 1 + .../arm64/CONFIG_I2C_GPIO_FAULT_INJECTOR | 1 + .../L2-OPTIONAL/arm64/CONFIG_I2C_HELPER_AUTO | 1 + .../L2-OPTIONAL/arm64/CONFIG_I2C_HID_OF_ELAN | 1 + .../arm64/CONFIG_I2C_HID_OF_GOODIX | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_I2C_HISI | 1 + .../L2-OPTIONAL/arm64/CONFIG_I2C_HIX5HD2 | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_I2C_I801 | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_I2C_ISCH | 1 + .../L2-OPTIONAL/arm64/CONFIG_I2C_MLXCPLD | 1 + .../L2-OPTIONAL/arm64/CONFIG_I2C_MUX_GPIO | 1 + .../L2-OPTIONAL/arm64/CONFIG_I2C_MUX_GPMUX | 1 + .../L2-OPTIONAL/arm64/CONFIG_I2C_MUX_PCA9541 | 1 + .../L2-OPTIONAL/arm64/CONFIG_I2C_MUX_PCA954x | 1 + .../L2-OPTIONAL/arm64/CONFIG_I2C_MUX_PINCTRL | 1 + .../L2-OPTIONAL/arm64/CONFIG_I2C_NOMADIK | 1 + .../L2-OPTIONAL/arm64/CONFIG_I2C_PIIX4 | 1 + .../L2-OPTIONAL/arm64/CONFIG_I2C_QCOM_CCI | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_I2C_QUP | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_I2C_RK3X | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_I2C_SCMI | 1 + .../L2-OPTIONAL/arm64/CONFIG_I2C_SIS96X | 1 + .../arm64/CONFIG_I2C_SLAVE_TESTUNIT | 1 + .../L2-OPTIONAL/arm64/CONFIG_I2C_THUNDERX | 1 + .../L2-OPTIONAL/arm64/CONFIG_I2C_VERSATILE | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_I2C_VIA | 1 + .../L2-OPTIONAL/arm64/CONFIG_I2C_VIAPRO | 1 + .../arm64/CONFIG_I2C_XGENE_SLIMPRO | 1 + .../L2-OPTIONAL/arm64/CONFIG_I2C_XLP9XX | 1 + .../L2-OPTIONAL/arm64/CONFIG_I2C_ZHAOXIN | 1 + .../arm64/CONFIG_I2C_ZHAOXIN_SMBUS | 1 + .../arm64/CONFIG_IEEE802154_FAKELB | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_IIO | 1 + .../L2-OPTIONAL/arm64/CONFIG_INFINIBAND_HNS | 1 + .../arm64/CONFIG_INFINIBAND_HNS_HIP08 | 1 + .../L2-OPTIONAL/arm64/CONFIG_INPUT_JOYDEV | 1 + .../L2-OPTIONAL/arm64/CONFIG_INPUT_MISC | 1 + .../L2-OPTIONAL/arm64/CONFIG_INPUT_TABLET | 1 + .../arm64/CONFIG_INPUT_TOUCHSCREEN | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_IOMMUFD | 1 + .../arm64/CONFIG_IOMMU_IO_PGTABLE_ARMV7S | 1 + .../arm64/CONFIG_IOMMU_IO_PGTABLE_DART | 1 + .../CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST | 1 + .../arm64/CONFIG_IPMB_DEVICE_INTERFACE | 1 + .../L2-OPTIONAL/arm64/CONFIG_IPMI_IPMB | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_IRQCHIP | 1 + .../arm64/CONFIG_IRQ_BYPASS_MANAGER | 1 + .../CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_ISDN | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_ISL29003 | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_ISL29020 | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_K3_DMA | 1 + .../L2-OPTIONAL/arm64/CONFIG_KARMA_PARTITION | 1 + .../L2-OPTIONAL/arm64/CONFIG_KERNEL_MODE_NEON | 1 + .../L2-OPTIONAL/arm64/CONFIG_KEYBOARD_BCM | 1 + .../L2-OPTIONAL/arm64/CONFIG_KEYBOARD_CAP11XX | 1 + .../L2-OPTIONAL/arm64/CONFIG_KEYBOARD_GPIO | 1 + .../L2-OPTIONAL/arm64/CONFIG_KEYBOARD_OMAP4 | 1 + .../arm64/CONFIG_KEYBOARD_PINEPHONE | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_KS7010 | 1 + .../L2-OPTIONAL/arm64/CONFIG_LEDS_AAT1290 | 1 + .../L2-OPTIONAL/arm64/CONFIG_LEDS_AN30259A | 1 + .../L2-OPTIONAL/arm64/CONFIG_LEDS_AS3645A | 1 + .../L2-OPTIONAL/arm64/CONFIG_LEDS_AW2013 | 1 + .../L2-OPTIONAL/arm64/CONFIG_LEDS_BCM6328 | 1 + .../L2-OPTIONAL/arm64/CONFIG_LEDS_BCM6358 | 1 + .../L2-OPTIONAL/arm64/CONFIG_LEDS_CLASS_FLASH | 1 + .../L2-OPTIONAL/arm64/CONFIG_LEDS_CR0014114 | 1 + .../L2-OPTIONAL/arm64/CONFIG_LEDS_EL15203000 | 1 + .../L2-OPTIONAL/arm64/CONFIG_LEDS_IS31FL32XX | 1 + .../L2-OPTIONAL/arm64/CONFIG_LEDS_KTD2692 | 1 + .../L2-OPTIONAL/arm64/CONFIG_LEDS_LM3601X | 1 + .../L2-OPTIONAL/arm64/CONFIG_LEDS_LM3692X | 1 + .../L2-OPTIONAL/arm64/CONFIG_LEDS_LM3697 | 1 + .../arm64/CONFIG_LEDS_LP55XX_COMMON | 1 + .../L2-OPTIONAL/arm64/CONFIG_LEDS_LP8860 | 1 + .../L2-OPTIONAL/arm64/CONFIG_LEDS_LT3593 | 1 + .../L2-OPTIONAL/arm64/CONFIG_LEDS_REGULATOR | 1 + .../L2-OPTIONAL/arm64/CONFIG_LEDS_RT4505 | 1 + .../L2-OPTIONAL/arm64/CONFIG_LEDS_RT8515 | 1 + .../L2-OPTIONAL/arm64/CONFIG_LEDS_SGM3140 | 1 + .../L2-OPTIONAL/arm64/CONFIG_LEDS_SPI_BYTE | 1 + .../L2-OPTIONAL/arm64/CONFIG_LEDS_SYSCON | 1 + .../arm64/CONFIG_LEDS_TRIGGER_AUDIO | 1 + .../arm64/CONFIG_LEDS_TRIGGER_DISK | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_LIBFDT | 1 + .../L2-OPTIONAL/arm64/CONFIG_LIBNVDIMM | 1 + .../L2-OPTIONAL/arm64/CONFIG_LINEAR_RANGES | 1 + .../L2-OPTIONAL/arm64/CONFIG_LITEX_LITEETH | 1 + .../arm64/CONFIG_LITEX_SOC_CONTROLLER | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_LPC_ICH | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_LPC_SCH | 1 + .../L2-OPTIONAL/arm64/CONFIG_LTE_GDM724X | 1 + .../L2-OPTIONAL/arm64/CONFIG_MAC_PARTITION | 1 + .../L2-OPTIONAL/arm64/CONFIG_MAILBOX_TEST | 1 + .../arm64/CONFIG_MARVELL_CN10K_DDR_PMU | 1 + .../arm64/CONFIG_MARVELL_CN10K_TAD_PMU | 1 + .../L2-OPTIONAL/arm64/CONFIG_MARVELL_GTI_WDT | 1 + .../arm64/CONFIG_MDIO_BUS_MUX_GPIO | 1 + .../arm64/CONFIG_MDIO_BUS_MUX_MMIOREG | 1 + .../arm64/CONFIG_MDIO_BUS_MUX_MULTIPLEXER | 1 + .../L2-OPTIONAL/arm64/CONFIG_MDIO_GPIO | 1 + .../L2-OPTIONAL/arm64/CONFIG_MDIO_HISI_FEMAC | 1 + .../L2-OPTIONAL/arm64/CONFIG_MDIO_IPQ4019 | 1 + .../L2-OPTIONAL/arm64/CONFIG_MDIO_IPQ8064 | 1 + .../L2-OPTIONAL/arm64/CONFIG_MDIO_MSCC_MIIM | 1 + .../L2-OPTIONAL/arm64/CONFIG_MDIO_OCTEON | 1 + .../L2-OPTIONAL/arm64/CONFIG_MDIO_XGENE | 1 + .../arm64/CONFIG_MEDIA_CEC_SUPPORT | 1 + .../L2-OPTIONAL/arm64/CONFIG_MEDIA_SUPPORT | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_ACT8945A | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_AS3722 | 1 + .../arm64/CONFIG_MFD_ATMEL_FLEXCOM | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_ATMEL_HLCDC | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_MFD_CORE | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_CPCAP | 1 + .../arm64/CONFIG_MFD_GATEWORKS_GSC | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_HI6421_PMIC | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_HI655X_PMIC | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_LOCHNAGAR | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_MAX5970 | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_MAX77620 | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_MAX77650 | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_MAX77686 | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_MAX77714 | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_NTXEC | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_QCOM_PM8008 | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_QCOM_RPM | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_RK8XX_I2C | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_RK8XX_SPI | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_RN5T618 | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_ROHM_BD71828 | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_ROHM_BD718XX | 1 + .../arm64/CONFIG_MFD_ROHM_BD957XMUF | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_RSMU_I2C | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_RSMU_SPI | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_SEC_CORE | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_SM501 | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_STMFX | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_STMPE | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_STPMIC1 | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_SYSCON | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_TC3589X | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_TI_LP87565 | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_TPS65217 | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_TPS65218 | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_TPS65219 | 1 + .../arm64/CONFIG_MFD_VEXPRESS_SYSREG | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_VIPERBOARD | 1 + .../L2-OPTIONAL/arm64/CONFIG_MFD_VX855 | 1 + .../L2-OPTIONAL/arm64/CONFIG_MICREL_KS8995MA | 1 + .../arm64/CONFIG_MINIX_SUBPARTITION | 1 + .../L2-OPTIONAL/arm64/CONFIG_MISC_RTSX_PCI | 1 + .../L2-OPTIONAL/arm64/CONFIG_MISC_RTSX_USB | 1 + .../L2-OPTIONAL/arm64/CONFIG_MLXBF_GIGE | 1 + .../L2-OPTIONAL/arm64/CONFIG_MMC_ARMMMCI | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW | 1 + .../L2-OPTIONAL/arm64/CONFIG_MMC_DW_BLUEFIELD | 1 + .../L2-OPTIONAL/arm64/CONFIG_MMC_DW_EXYNOS | 1 + .../arm64/CONFIG_MMC_DW_HI3798CV200 | 1 + .../L2-OPTIONAL/arm64/CONFIG_MMC_DW_K3 | 1 + .../L2-OPTIONAL/arm64/CONFIG_MMC_DW_PCI | 1 + .../L2-OPTIONAL/arm64/CONFIG_MMC_DW_PLTFM | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_MMC_MTK | 1 + .../arm64/CONFIG_MMC_SDHCI_CADENCE | 1 + .../arm64/CONFIG_MMC_SDHCI_MILBEAUT | 1 + .../L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_MSM | 1 + .../arm64/CONFIG_MMC_SDHCI_OF_ARASAN | 1 + .../arm64/CONFIG_MMC_SDHCI_OF_AT91 | 1 + .../arm64/CONFIG_MMC_SDHCI_OF_DWCMSHC | 1 + .../L2-OPTIONAL/arm64/CONFIG_MMC_STM32_SDMMC | 1 + .../L2-OPTIONAL/arm64/CONFIG_MMC_TOSHIBA_PCI | 1 + .../L2-OPTIONAL/arm64/CONFIG_MOUSE_APPLETOUCH | 1 + .../L2-OPTIONAL/arm64/CONFIG_MOUSE_BCM5974 | 1 + .../L2-OPTIONAL/arm64/CONFIG_MOUSE_CYAPA | 1 + .../arm64/CONFIG_MOUSE_ELAN_I2C_SMBUS | 1 + .../L2-OPTIONAL/arm64/CONFIG_MOUSE_SERIAL | 1 + .../L2-OPTIONAL/arm64/CONFIG_MOUSE_VSXXXAA | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_MOXTET | 1 + .../L2-OPTIONAL/arm64/CONFIG_MTD_AFS_PARTS | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI | 1 + .../arm64/CONFIG_MTD_CFI_ADV_OPTIONS | 1 + .../L2-OPTIONAL/arm64/CONFIG_MTD_CFI_AMDSTD | 1 + .../L2-OPTIONAL/arm64/CONFIG_MTD_CFI_INTELEXT | 1 + .../L2-OPTIONAL/arm64/CONFIG_MTD_CFI_STAA | 1 + .../L2-OPTIONAL/arm64/CONFIG_MTD_CFI_UTIL | 1 + .../L2-OPTIONAL/arm64/CONFIG_MTD_GEN_PROBE | 1 + .../L2-OPTIONAL/arm64/CONFIG_MTD_OF_PARTS | 1 + .../L2-OPTIONAL/arm64/CONFIG_MTD_PHYSMAP | 1 + .../arm64/CONFIG_MTD_PHYSMAP_COMPAT | 1 + .../L2-OPTIONAL/arm64/CONFIG_MTD_PHYSMAP_OF | 1 + .../L2-OPTIONAL/arm64/CONFIG_MV_XOR_V2 | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_MYRI10GE | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_ND_BTT | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_ND_PFN | 1 + .../CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP | 1 + .../L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_AMD | 1 + .../arm64/CONFIG_NET_VENDOR_BROCADE | 1 + .../L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_CISCO | 1 + .../L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_DEC | 1 + .../arm64/CONFIG_NET_VENDOR_EMULEX | 1 + .../arm64/CONFIG_NET_VENDOR_QUALCOMM | 1 + .../L2-OPTIONAL/arm64/CONFIG_NET_XGENE | 1 + .../L2-OPTIONAL/arm64/CONFIG_NET_XGENE_V2 | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_NOZOMI | 1 + .../L2-OPTIONAL/arm64/CONFIG_NVHE_EL2_DEBUG | 1 + .../arm64/CONFIG_NVIDIA_CARMEL_CNP_ERRATUM | 1 + .../arm64/CONFIG_NVMEM_QCOM_QFPROM | 1 + .../arm64/CONFIG_NVMEM_QCOM_SEC_QFPROM | 1 + .../arm64/CONFIG_NVMEM_REBOOT_MODE | 1 + .../L2-OPTIONAL/arm64/CONFIG_NVMEM_U_BOOT_ENV | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF | 1 + .../L2-OPTIONAL/arm64/CONFIG_OF_ADDRESS | 1 + .../L2-OPTIONAL/arm64/CONFIG_OF_DYNAMIC | 1 + .../arm64/CONFIG_OF_EARLY_FLATTREE | 1 + .../L2-OPTIONAL/arm64/CONFIG_OF_FLATTREE | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_OF_GPIO | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_OF_IOMMU | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_OF_IRQ | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_OF_KOBJ | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_OF_MDIO | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_OF_NUMA | 1 + .../L2-OPTIONAL/arm64/CONFIG_OF_OVERLAY | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_OF_PMEM | 1 + .../L2-OPTIONAL/arm64/CONFIG_OF_RESERVED_MEM | 1 + .../L2-OPTIONAL/arm64/CONFIG_OF_RESOLVE | 1 + .../L2-OPTIONAL/arm64/CONFIG_OF_UNITTEST | 1 + .../L2-OPTIONAL/arm64/CONFIG_OPEN_DICE | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_OPTEE | 1 + .../L2-OPTIONAL/arm64/CONFIG_OSF_PARTITION | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_PARPORT | 1 + .../L2-OPTIONAL/arm64/CONFIG_PARTITION_PERCPU | 1 + .../L2-OPTIONAL/arm64/CONFIG_PATA_OF_PLATFORM | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_PCIE_AL | 1 + .../L2-OPTIONAL/arm64/CONFIG_PCIE_ALTERA | 1 + .../arm64/CONFIG_PCIE_CADENCE_PLAT_HOST | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_PCIE_DW | 1 + .../L2-OPTIONAL/arm64/CONFIG_PCIE_DW_HOST | 1 + .../L2-OPTIONAL/arm64/CONFIG_PCIE_HISI_ERR | 1 + .../L2-OPTIONAL/arm64/CONFIG_PCIE_HISI_STB | 1 + .../L2-OPTIONAL/arm64/CONFIG_PCIE_KIRIN | 1 + .../arm64/CONFIG_PCIE_MICROCHIP_HOST | 1 + .../L2-OPTIONAL/arm64/CONFIG_PCIE_QCOM | 1 + .../L2-OPTIONAL/arm64/CONFIG_PCIE_XILINX | 1 + .../arm64/CONFIG_PCI_DOMAINS_GENERIC | 1 + .../arm64/CONFIG_PCI_DYNAMIC_OF_NODES | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_PCI_ECAM | 1 + .../L2-OPTIONAL/arm64/CONFIG_PCI_FTPCI100 | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_PCI_HISI | 1 + .../L2-OPTIONAL/arm64/CONFIG_PCI_HOST_COMMON | 1 + .../arm64/CONFIG_PCI_HOST_THUNDER_ECAM | 1 + .../arm64/CONFIG_PCI_HOST_THUNDER_PEM | 1 + .../L2-OPTIONAL/arm64/CONFIG_PCI_J721E_HOST | 1 + .../L2-OPTIONAL/arm64/CONFIG_PCI_SYSCALL | 1 + .../L2-OPTIONAL/arm64/CONFIG_PCI_XGENE | 1 + .../L2-OPTIONAL/arm64/CONFIG_PCI_XGENE_MSI | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_PCNET32 | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_PDS_CORE | 1 + .../L2-OPTIONAL/arm64/CONFIG_PERF_USE_VMALLOC | 1 + .../L2-OPTIONAL/arm64/CONFIG_PHYLIB_LEDS | 1 + .../L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_DPHY | 1 + .../arm64/CONFIG_PHY_CADENCE_DPHY_RX | 1 + .../arm64/CONFIG_PHY_CADENCE_SALVO | 1 + .../arm64/CONFIG_PHY_CADENCE_SIERRA | 1 + .../arm64/CONFIG_PHY_CADENCE_TORRENT | 1 + .../L2-OPTIONAL/arm64/CONFIG_PHY_HI3660_USB | 1 + .../L2-OPTIONAL/arm64/CONFIG_PHY_HI3670_PCIE | 1 + .../L2-OPTIONAL/arm64/CONFIG_PHY_HI3670_USB | 1 + .../L2-OPTIONAL/arm64/CONFIG_PHY_HI6220_USB | 1 + .../arm64/CONFIG_PHY_HISI_INNO_USB2 | 1 + .../arm64/CONFIG_PHY_HISTB_COMBPHY | 1 + .../arm64/CONFIG_PHY_LAN966X_SERDES | 1 + .../arm64/CONFIG_PHY_MAPPHONE_MDM6600 | 1 + .../arm64/CONFIG_PHY_OCELOT_SERDES | 1 + .../arm64/CONFIG_PHY_QCOM_APQ8064_SATA | 1 + .../L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_EDP | 1 + .../arm64/CONFIG_PHY_QCOM_EUSB2_REPEATER | 1 + .../arm64/CONFIG_PHY_QCOM_IPQ4019_USB | 1 + .../arm64/CONFIG_PHY_QCOM_IPQ806X_SATA | 1 + .../arm64/CONFIG_PHY_QCOM_IPQ806X_USB | 1 + .../L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_M31_USB | 1 + .../L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_PCIE2 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_QMP | 1 + .../L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_QUSB2 | 1 + .../arm64/CONFIG_PHY_QCOM_SGMII_ETH | 1 + .../arm64/CONFIG_PHY_QCOM_SNPS_EUSB2 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_HS | 1 + .../arm64/CONFIG_PHY_QCOM_USB_HSIC | 1 + .../arm64/CONFIG_PHY_QCOM_USB_HS_28NM | 1 + .../arm64/CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_SS | 1 + .../L2-OPTIONAL/arm64/CONFIG_PHY_TUSB1210 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PHY_XGENE | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_PI433 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ5018 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ5332 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ6018 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ8074 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ9574 | 1 + .../arm64/CONFIG_PINCTRL_LPASS_LPI | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_MDM9607 | 1 + .../arm64/CONFIG_PINCTRL_MICROCHIP_SGPIO | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8916 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8953 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8976 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8994 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8996 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8998 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_OCELOT | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_QCM2290 | 1 + .../arm64/CONFIG_PINCTRL_QCOM_SSBI_PMIC | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_QCS404 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_QDF2XXX | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_QDU1000 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_SA8775P | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_SC7180 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_SC7280 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_SC8180X | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_SC8280XP | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_SDM660 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_SDM670 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_SDM845 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_SDX75 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_SINGLE | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM6115 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM6125 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM6350 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM6375 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM7150 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8150 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8250 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8350 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8450 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8550 | 1 + .../L2-OPTIONAL/arm64/CONFIG_PINCTRL_STMFX | 1 + .../L2-OPTIONAL/arm64/CONFIG_PL320_MBOX | 1 + .../L2-OPTIONAL/arm64/CONFIG_PL330_DMA | 1 + .../L2-OPTIONAL/arm64/CONFIG_PLATFORM_MHU | 1 + .../L2-OPTIONAL/arm64/CONFIG_PMIC_OPREGION | 1 + .../arm64/CONFIG_PM_GENERIC_DOMAINS | 1 + .../arm64/CONFIG_PM_GENERIC_DOMAINS_OF | 1 + .../arm64/CONFIG_PM_GENERIC_DOMAINS_SLEEP | 1 + .../arm64/CONFIG_PNP_DEBUG_MESSAGES | 1 + .../arm64/CONFIG_POWER_RESET_BRCMSTB | 1 + .../L2-OPTIONAL/arm64/CONFIG_POWER_RESET_GPIO | 1 + .../arm64/CONFIG_POWER_RESET_GPIO_RESTART | 1 + .../L2-OPTIONAL/arm64/CONFIG_POWER_RESET_HISI | 1 + .../arm64/CONFIG_POWER_RESET_LTC2952 | 1 + .../L2-OPTIONAL/arm64/CONFIG_POWER_RESET_MSM | 1 + .../arm64/CONFIG_POWER_RESET_REGULATOR | 1 + .../arm64/CONFIG_POWER_RESET_RESTART | 1 + .../arm64/CONFIG_POWER_RESET_SYSCON | 1 + .../arm64/CONFIG_POWER_RESET_SYSCON_POWEROFF | 1 + .../arm64/CONFIG_POWER_RESET_VEXPRESS | 1 + .../arm64/CONFIG_POWER_RESET_XGENE | 1 + .../arm64/CONFIG_PTP_1588_CLOCK_KVM | 1 + .../L2-OPTIONAL/arm64/CONFIG_PWM_FSL_FTM | 1 + .../L2-OPTIONAL/arm64/CONFIG_PWM_HIBVT | 1 + .../L2-OPTIONAL/arm64/CONFIG_PWRSEQ_EMMC | 1 + .../L2-OPTIONAL/arm64/CONFIG_PWRSEQ_SIMPLE | 1 + .../L2-OPTIONAL/arm64/CONFIG_QCA7000_SPI | 1 + .../L2-OPTIONAL/arm64/CONFIG_QCOM_AOSS_QMP | 1 + .../L2-OPTIONAL/arm64/CONFIG_QCOM_APCS_IPC | 1 + .../L2-OPTIONAL/arm64/CONFIG_QCOM_BAM_DMA | 1 + .../L2-OPTIONAL/arm64/CONFIG_QCOM_COMMAND_DB | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_QCOM_CPR | 1 + .../L2-OPTIONAL/arm64/CONFIG_QCOM_EBI2 | 1 + .../L2-OPTIONAL/arm64/CONFIG_QCOM_EMAC | 1 + .../L2-OPTIONAL/arm64/CONFIG_QCOM_GENI_SE | 1 + .../L2-OPTIONAL/arm64/CONFIG_QCOM_GSBI | 1 + .../L2-OPTIONAL/arm64/CONFIG_QCOM_HIDMA | 1 + .../L2-OPTIONAL/arm64/CONFIG_QCOM_HIDMA_MGMT | 1 + .../L2-OPTIONAL/arm64/CONFIG_QCOM_IOMMU | 1 + .../L2-OPTIONAL/arm64/CONFIG_QCOM_IPCC | 1 + .../arm64/CONFIG_QCOM_IRQ_COMBINER | 1 + .../arm64/CONFIG_QCOM_KRYO_L2_ACCESSORS | 1 + .../L2-OPTIONAL/arm64/CONFIG_QCOM_L2_PMU | 1 + .../L2-OPTIONAL/arm64/CONFIG_QCOM_L3_PMU | 1 + .../L2-OPTIONAL/arm64/CONFIG_QCOM_LLCC | 1 + .../L2-OPTIONAL/arm64/CONFIG_QCOM_OCMEM | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_QCOM_PDC | 1 + .../L2-OPTIONAL/arm64/CONFIG_QCOM_RMTFS_MEM | 1 + .../L2-OPTIONAL/arm64/CONFIG_QCOM_RPMH | 1 + .../L2-OPTIONAL/arm64/CONFIG_QCOM_SMEM | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_QCOM_WDT | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_QLGE | 1 + .../L2-OPTIONAL/arm64/CONFIG_QUICC_ENGINE | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_RC_CORE | 1 + .../L2-OPTIONAL/arm64/CONFIG_REGMAP_MMIO | 1 + .../L2-OPTIONAL/arm64/CONFIG_REGULATOR | 1 + .../arm64/CONFIG_REGULATOR_88PG86X | 1 + .../arm64/CONFIG_REGULATOR_ACT8865 | 1 + .../L2-OPTIONAL/arm64/CONFIG_REGULATOR_AD5398 | 1 + .../arm64/CONFIG_REGULATOR_AW37503 | 1 + .../L2-OPTIONAL/arm64/CONFIG_REGULATOR_DA9121 | 1 + .../L2-OPTIONAL/arm64/CONFIG_REGULATOR_DA9210 | 1 + .../L2-OPTIONAL/arm64/CONFIG_REGULATOR_DA9211 | 1 + .../L2-OPTIONAL/arm64/CONFIG_REGULATOR_DEBUG | 1 + .../arm64/CONFIG_REGULATOR_FAN53555 | 1 + .../arm64/CONFIG_REGULATOR_FAN53880 | 1 + .../arm64/CONFIG_REGULATOR_FIXED_VOLTAGE | 1 + .../L2-OPTIONAL/arm64/CONFIG_REGULATOR_GPIO | 1 + .../arm64/CONFIG_REGULATOR_ISL6271A | 1 + .../arm64/CONFIG_REGULATOR_ISL9305 | 1 + .../L2-OPTIONAL/arm64/CONFIG_REGULATOR_LP3971 | 1 + .../L2-OPTIONAL/arm64/CONFIG_REGULATOR_LP3972 | 1 + .../L2-OPTIONAL/arm64/CONFIG_REGULATOR_LP872X | 1 + .../L2-OPTIONAL/arm64/CONFIG_REGULATOR_LP8755 | 1 + .../arm64/CONFIG_REGULATOR_LTC3589 | 1 + .../arm64/CONFIG_REGULATOR_LTC3676 | 1 + .../arm64/CONFIG_REGULATOR_MAX1586 | 1 + .../arm64/CONFIG_REGULATOR_MAX20086 | 1 + .../arm64/CONFIG_REGULATOR_MAX20411 | 1 + .../arm64/CONFIG_REGULATOR_MAX77826 | 1 + .../arm64/CONFIG_REGULATOR_MAX77857 | 1 + .../arm64/CONFIG_REGULATOR_MAX8649 | 1 + .../arm64/CONFIG_REGULATOR_MAX8660 | 1 + .../arm64/CONFIG_REGULATOR_MAX8893 | 1 + .../arm64/CONFIG_REGULATOR_MAX8952 | 1 + .../arm64/CONFIG_REGULATOR_MAX8973 | 1 + .../arm64/CONFIG_REGULATOR_MCP16502 | 1 + .../L2-OPTIONAL/arm64/CONFIG_REGULATOR_MP5416 | 1 + .../L2-OPTIONAL/arm64/CONFIG_REGULATOR_MP8859 | 1 + .../L2-OPTIONAL/arm64/CONFIG_REGULATOR_MP886X | 1 + .../arm64/CONFIG_REGULATOR_MPQ7920 | 1 + .../L2-OPTIONAL/arm64/CONFIG_REGULATOR_MT6311 | 1 + .../arm64/CONFIG_REGULATOR_PCA9450 | 1 + .../L2-OPTIONAL/arm64/CONFIG_REGULATOR_PF8X00 | 1 + .../arm64/CONFIG_REGULATOR_PFUZE100 | 1 + .../arm64/CONFIG_REGULATOR_PV88060 | 1 + .../arm64/CONFIG_REGULATOR_PV88080 | 1 + .../arm64/CONFIG_REGULATOR_PV88090 | 1 + .../L2-OPTIONAL/arm64/CONFIG_REGULATOR_PWM | 1 + .../arm64/CONFIG_REGULATOR_QCOM_REFGEN | 1 + .../arm64/CONFIG_REGULATOR_RAA215300 | 1 + ...G_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY | 1 + .../L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT4801 | 1 + .../L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT4803 | 1 + .../arm64/CONFIG_REGULATOR_RT5190A | 1 + .../L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT5739 | 1 + .../L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT5759 | 1 + .../L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT6160 | 1 + .../L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT6190 | 1 + .../L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT6245 | 1 + .../L2-OPTIONAL/arm64/CONFIG_REGULATOR_RTMV20 | 1 + .../arm64/CONFIG_REGULATOR_RTQ2134 | 1 + .../arm64/CONFIG_REGULATOR_RTQ2208 | 1 + .../arm64/CONFIG_REGULATOR_RTQ6752 | 1 + .../arm64/CONFIG_REGULATOR_SLG51000 | 1 + .../arm64/CONFIG_REGULATOR_SY8106A | 1 + .../arm64/CONFIG_REGULATOR_SY8824X | 1 + .../arm64/CONFIG_REGULATOR_SY8827N | 1 + .../arm64/CONFIG_REGULATOR_TPS51632 | 1 + .../arm64/CONFIG_REGULATOR_TPS62360 | 1 + .../arm64/CONFIG_REGULATOR_TPS6286X | 1 + .../arm64/CONFIG_REGULATOR_TPS6287X | 1 + .../arm64/CONFIG_REGULATOR_TPS65023 | 1 + .../arm64/CONFIG_REGULATOR_TPS6507X | 1 + .../arm64/CONFIG_REGULATOR_TPS65132 | 1 + .../arm64/CONFIG_REGULATOR_TPS6524X | 1 + .../arm64/CONFIG_REGULATOR_USERSPACE_CONSUMER | 1 + .../L2-OPTIONAL/arm64/CONFIG_REGULATOR_VCTRL | 1 + .../arm64/CONFIG_REGULATOR_VEXPRESS | 1 + .../arm64/CONFIG_REGULATOR_VIRTUAL_CONSUMER | 1 + .../arm64/CONFIG_REGULATOR_VQMMC_IPQ4019 | 1 + .../CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID | 1 + .../L2-OPTIONAL/arm64/CONFIG_RESET_HISI | 1 + .../L2-OPTIONAL/arm64/CONFIG_RESET_QCOM_AOSS | 1 + .../L2-OPTIONAL/arm64/CONFIG_RESET_QCOM_PDC | 1 + .../L2-OPTIONAL/arm64/CONFIG_RFKILL_GPIO | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_RMI4_F34 | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_RMI4_SPI | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_RMNET | 1 + .../arm64/CONFIG_ROCKCHIP_ERRATUM_3588001 | 1 + .../L2-OPTIONAL/arm64/CONFIG_RTC_DRV_ABB5ZES3 | 1 + .../L2-OPTIONAL/arm64/CONFIG_RTC_DRV_ABX80X | 1 + .../L2-OPTIONAL/arm64/CONFIG_RTC_DRV_CADENCE | 1 + .../L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1305 | 1 + .../L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1343 | 1 + .../L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1347 | 1 + .../arm64/CONFIG_RTC_DRV_DS1374_WDT | 1 + .../L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1390 | 1 + .../L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1685 | 1 + .../arm64/CONFIG_RTC_DRV_DS1685_FAMILY | 1 + .../L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1689 | 1 + .../L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS17285 | 1 + .../L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS17485 | 1 + .../L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS17885 | 1 + .../L2-OPTIONAL/arm64/CONFIG_RTC_DRV_HYM8563 | 1 + .../L2-OPTIONAL/arm64/CONFIG_RTC_DRV_ISL12026 | 1 + .../L2-OPTIONAL/arm64/CONFIG_RTC_DRV_M41T93 | 1 + .../L2-OPTIONAL/arm64/CONFIG_RTC_DRV_M41T94 | 1 + .../L2-OPTIONAL/arm64/CONFIG_RTC_DRV_MAX6902 | 1 + .../L2-OPTIONAL/arm64/CONFIG_RTC_DRV_MCP795 | 1 + .../L2-OPTIONAL/arm64/CONFIG_RTC_DRV_NCT3018Y | 1 + .../L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PCF2123 | 1 + .../L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PCF2127 | 1 + .../L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PCF85063 | 1 + .../L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PL030 | 1 + .../L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PL031 | 1 + .../L2-OPTIONAL/arm64/CONFIG_RTC_DRV_R7301 | 1 + .../L2-OPTIONAL/arm64/CONFIG_RTC_DRV_R9701 | 1 + .../L2-OPTIONAL/arm64/CONFIG_RTC_DRV_RS5C348 | 1 + .../L2-OPTIONAL/arm64/CONFIG_RTC_DRV_RX4581 | 1 + .../L2-OPTIONAL/arm64/CONFIG_RTC_DRV_RX8010 | 1 + .../L2-OPTIONAL/arm64/CONFIG_RTC_DRV_XGENE | 1 + .../L2-OPTIONAL/arm64/CONFIG_RTC_DRV_ZYNQMP | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_RTS5208 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SATA_ZHAOXIN | 1 + .../arm64/CONFIG_SCHED_THERMAL_PRESSURE | 1 + .../L2-OPTIONAL/arm64/CONFIG_SCSI_AACRAID | 1 + .../L2-OPTIONAL/arm64/CONFIG_SCSI_BNX2X_FCOE | 1 + .../L2-OPTIONAL/arm64/CONFIG_SCSI_BNX2_ISCSI | 1 + .../L2-OPTIONAL/arm64/CONFIG_SCSI_HISI_SAS | 1 + ...ONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE | 1 + .../arm64/CONFIG_SCSI_HISI_SAS_PCI | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_SCSI_IPR | 1 + .../L2-OPTIONAL/arm64/CONFIG_SCSI_IPR_DUMP | 1 + .../L2-OPTIONAL/arm64/CONFIG_SCSI_IPR_TRACE | 1 + .../arm64/CONFIG_SENSORS_ACPI_POWER | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_AD7314 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_AD7414 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_AD7418 | 1 + .../arm64/CONFIG_SENSORS_ADC128D818 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_ADCXX | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1021 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1025 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1026 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1029 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1031 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1275 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM9240 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_ADS7828 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_ADS7871 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7410 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7411 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7462 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7470 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7475 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_AMC6821 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_APDS990X | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_ARM_SCPI | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_ASC7621 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_ATXP1 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_BH1770 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_DME1737 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_DS1621 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_DS620 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_EMC1403 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_EMC6W201 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_F71805F | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_F71882FG | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_F75375S | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_G760A | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_G762 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_GL518SM | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_GL520SM | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_GPIO_FAN | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_I5K_AMB | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_IBMAEM | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_IBMPEX | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_INA209 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_INA2XX | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_IT87 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_JC42 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_LINEAGE | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_LIS3_I2C | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_LM25066 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_LM63 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_LM70 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_LM73 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_LM75 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_LM77 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_LM78 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_LM80 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_LM83 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_LM85 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_LM87 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_LM90 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_LM92 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_LM93 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_LM95234 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_LM95241 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_LM95245 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC2945 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC2978 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC3815 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4151 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4215 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4222 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4245 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4260 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4261 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX1111 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX16064 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX16065 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX1619 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX1668 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX197 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX20751 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX31790 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX34440 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX6639 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX6642 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX6650 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX6697 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX8688 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_MCP3021 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_NCT6683 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_NCT6775 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_NCT7802 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_NCT7904 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_PC87360 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_PC87427 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_PCF8591 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_PMBUS | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_POWR1220 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_PWM_FAN | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_SCH5627 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_SCH5636 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_SHT15 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_SHT21 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_SHTC1 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_SIS5595 | 1 + .../arm64/CONFIG_SENSORS_SMSC47B397 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_SMSC47M1 | 1 + .../arm64/CONFIG_SENSORS_SMSC47M192 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_TC74 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_THMC50 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_TMP102 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_TMP103 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_TMP401 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_TMP421 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_TPS40422 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_TSL2550 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_UCD9000 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_UCD9200 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_VEXPRESS | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_VIA686A | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_VT1211 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_VT8231 | 1 + .../arm64/CONFIG_SENSORS_W83627EHF | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_W83627HF | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_W83781D | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_W83791D | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_W83792D | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_W83793 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_W83795 | 1 + .../arm64/CONFIG_SENSORS_W83L785TS | 1 + .../arm64/CONFIG_SENSORS_W83L786NG | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_XGENE | 1 + .../L2-OPTIONAL/arm64/CONFIG_SENSORS_ZL6100 | 1 + .../arm64/CONFIG_SERIAL_8250_16550A_VARIANTS | 1 + .../L2-OPTIONAL/arm64/CONFIG_SERIAL_8250_FSL | 1 + .../arm64/CONFIG_SERIAL_8250_RT288X | 1 + .../arm64/CONFIG_SERIAL_AMBA_PL010 | 1 + .../arm64/CONFIG_SERIAL_AMBA_PL011 | 1 + .../arm64/CONFIG_SERIAL_AMBA_PL011_CONSOLE | 1 + .../L2-OPTIONAL/arm64/CONFIG_SERIAL_ARC | 1 + .../arm64/CONFIG_SERIAL_CONEXANT_DIGICOLOR | 1 + .../arm64/CONFIG_SERIAL_EARLYCON_SEMIHOST | 1 + .../L2-OPTIONAL/arm64/CONFIG_SERIAL_JSM | 1 + .../L2-OPTIONAL/arm64/CONFIG_SERIAL_MSM | 1 + .../arm64/CONFIG_SERIAL_OF_PLATFORM | 1 + .../L2-OPTIONAL/arm64/CONFIG_SERIAL_SIFIVE | 1 + .../arm64/CONFIG_SERIAL_XILINX_PS_UART | 1 + .../L2-OPTIONAL/arm64/CONFIG_SERIO_APBPS2 | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_SFC | 1 + .../L2-OPTIONAL/arm64/CONFIG_SGI_PARTITION | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_SG_SPLIT | 1 + .../arm64/CONFIG_SHADOW_CALL_STACK | 1 + .../L2-OPTIONAL/arm64/CONFIG_SOC_BRCMSTB | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_SOC_BUS | 1 + .../arm64/CONFIG_SOLARIS_X86_PARTITION | 1 + .../arm64/CONFIG_SPI_CADENCE_QUADSPI | 1 + .../L2-OPTIONAL/arm64/CONFIG_SPI_DW_DMA | 1 + .../L2-OPTIONAL/arm64/CONFIG_SPI_DW_PCI | 1 + .../L2-OPTIONAL/arm64/CONFIG_SPI_FSL_SPI | 1 + .../arm64/CONFIG_SPI_HISI_SFC_V3XX | 1 + .../L2-OPTIONAL/arm64/CONFIG_SPI_QCOM_QSPI | 1 + .../L2-OPTIONAL/arm64/CONFIG_SPI_THUNDERX | 1 + .../L2-OPTIONAL/arm64/CONFIG_SSIF_IPMI_BMC | 1 + .../arm64/CONFIG_STACKPROTECTOR_PER_TASK | 1 + .../L2-OPTIONAL/arm64/CONFIG_STAGING_BOARD | 1 + .../L2-OPTIONAL/arm64/CONFIG_STAGING_MEDIA | 1 + .../L2-OPTIONAL/arm64/CONFIG_STUB_CLK_HI3660 | 1 + .../L2-OPTIONAL/arm64/CONFIG_SUN_PARTITION | 1 + .../arm64/CONFIG_SYSCON_REBOOT_MODE | 1 + .../arm64/CONFIG_TCG_TIS_I2C_ATMEL | 1 + .../arm64/CONFIG_TCG_TIS_I2C_INFINEON | 1 + .../arm64/CONFIG_TCG_TIS_I2C_NUVOTON | 1 + .../L2-OPTIONAL/arm64/CONFIG_TCG_TIS_SPI | 1 + .../L2-OPTIONAL/arm64/CONFIG_TCG_TIS_SPI_CR50 | 1 + .../arm64/CONFIG_TCG_TIS_ST33ZP24_I2C | 1 + .../L2-OPTIONAL/arm64/CONFIG_THUNDERX2_PMU | 1 + .../L2-OPTIONAL/arm64/CONFIG_THUNDER_NIC_BGX | 1 + .../L2-OPTIONAL/arm64/CONFIG_THUNDER_NIC_PF | 1 + .../L2-OPTIONAL/arm64/CONFIG_THUNDER_NIC_RGX | 1 + .../L2-OPTIONAL/arm64/CONFIG_THUNDER_NIC_VF | 1 + .../L2-OPTIONAL/arm64/CONFIG_TIFM_7XX1 | 1 + .../L2-OPTIONAL/arm64/CONFIG_TIMER_ACPI | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_TIMER_OF | 1 + .../L2-OPTIONAL/arm64/CONFIG_TIMER_PROBE | 1 + .../arm64/CONFIG_TRACE_MMIO_ACCESS | 1 + .../L2-OPTIONAL/arm64/CONFIG_TRANS_TABLE | 1 + .../L2-OPTIONAL/arm64/CONFIG_TYPEC_FUSB302 | 1 + .../L2-OPTIONAL/arm64/CONFIG_TYPEC_QCOM_PMIC | 1 + .../L2-OPTIONAL/arm64/CONFIG_UEFI_CPER_ARM | 1 + .../L2-OPTIONAL/arm64/CONFIG_ULTRASOC_SMB | 1 + .../arm64/CONFIG_UNIXWARE_DISKLABEL | 1 + .../L2-OPTIONAL/arm64/CONFIG_USB_CHAOSKEY | 1 + .../arm64/CONFIG_USB_EHCI_HCD_PLATFORM | 1 + .../L2-OPTIONAL/arm64/CONFIG_USB_NET_SR9700 | 1 + .../L2-OPTIONAL/arm64/CONFIG_USB_ONBOARD_HUB | 1 + .../L2-OPTIONAL/arm64/CONFIG_USB_QCOM_EUD | 1 + .../arm64/CONFIG_USB_SERIAL_CONSOLE | 1 + .../arm64/CONFIG_USB_SERIAL_SIMPLE | 1 + .../L2-OPTIONAL/arm64/CONFIG_USB_SPEEDTOUCH | 1 + .../L2-OPTIONAL/arm64/CONFIG_USB_UHCI_HCD | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_USB_ULPI | 1 + .../L2-OPTIONAL/arm64/CONFIG_USB_ULPI_BUS | 1 + .../L2-OPTIONAL/arm64/CONFIG_USB_XHCI_DBGCAP | 1 + .../L2-OPTIONAL/arm64/CONFIG_USB_XHCI_HISTB | 1 + .../arm64/CONFIG_USB_XHCI_PLATFORM | 1 + .../arm64/CONFIG_VCPU_STALL_DETECTOR | 1 + .../L2-OPTIONAL/arm64/CONFIG_VEXPRESS_CONFIG | 1 + .../L2-OPTIONAL/arm64/CONFIG_VFIO_AMBA | 1 + .../arm64/CONFIG_VFIO_PLATFORM_AMDXGBE_RESET | 1 + .../CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_VME_BUS | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_VT6655 | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_WLAN | 1 + .../L2-OPTIONAL/arm64/CONFIG_XGENE_DMA | 1 + .../L2-OPTIONAL/arm64/CONFIG_XGENE_PMU | 1 + .../arm64/CONFIG_XGENE_SLIMPRO_MBOX | 1 + .../L2-OPTIONAL/arm64/CONFIG_XILINX_INTC | 1 + .../arm64/CONFIG_XILINX_WINDOW_WATCHDOG | 1 + .../arm64/CONFIG_XILINX_ZYNQMP_DMA | 1 + .../arm64/CONFIG_XILINX_ZYNQMP_DPDMA | 1 + .../L2-OPTIONAL/arm64/CONFIG_XIL_AXIS_FIFO | 1 + .../L2-OPTIONAL/default/CONFIG_6LOWPAN | 1 + .../default/CONFIG_6LOWPAN_DEBUGFS | 1 + .../L2-OPTIONAL/default/CONFIG_6LOWPAN_NHC | 1 + .../configs/L2-OPTIONAL/default/CONFIG_8139CP | 1 + .../L2-OPTIONAL/default/CONFIG_8139TOO | 1 + .../L2-OPTIONAL/default/CONFIG_8139TOO_8129 | 1 + .../L2-OPTIONAL/default/CONFIG_8139TOO_PIO | 1 + .../default/CONFIG_8139TOO_TUNE_TWISTER | 1 + .../default/CONFIG_8139_OLD_RX_RESET | 1 + .../L2-OPTIONAL/default/CONFIG_ACCESSIBILITY | 1 + .../default/CONFIG_ACORN_PARTITION | 1 + .../L2-OPTIONAL/default/CONFIG_ACPI_CPPC_LIB | 1 + .../L2-OPTIONAL/default/CONFIG_ACPI_FFH | 1 + .../L2-OPTIONAL/default/CONFIG_ACPI_FPDT | 1 + .../default/CONFIG_ACPI_HOTPLUG_CPU | 1 + .../default/CONFIG_ACPI_I2C_OPREGION | 1 + .../L2-OPTIONAL/default/CONFIG_ACPI_MDIO | 1 + .../L2-OPTIONAL/default/CONFIG_ACPI_PFRUT | 1 + .../default/CONFIG_ACPI_PROCESSOR_IDLE | 1 + .../L2-OPTIONAL/default/CONFIG_ACPI_TABLE_LIB | 1 + .../L2-OPTIONAL/default/CONFIG_AD525X_DPOT | 1 + .../L2-OPTIONAL/default/CONFIG_ADFS_FS | 1 + .../L2-OPTIONAL/default/CONFIG_ADIN1100_PHY | 1 + .../L2-OPTIONAL/default/CONFIG_ADIN1110 | 1 + .../L2-OPTIONAL/default/CONFIG_ADIN_PHY | 1 + .../L2-OPTIONAL/default/CONFIG_AFFS_FS | 1 + .../configs/L2-OPTIONAL/default/CONFIG_AFS_FS | 1 + .../configs/L2-OPTIONAL/default/CONFIG_AF_KCM | 1 + .../L2-OPTIONAL/default/CONFIG_AF_RXRPC | 1 + .../L2-OPTIONAL/default/CONFIG_AF_UNIX_OOB | 1 + .../L2-OPTIONAL/default/CONFIG_AHCI_DWC | 1 + .../L2-OPTIONAL/default/CONFIG_AIX_PARTITION | 1 + .../L2-OPTIONAL/default/CONFIG_ALIM7101_WDT | 1 + .../L2-OPTIONAL/default/CONFIG_ALTERA_MBOX | 1 + .../L2-OPTIONAL/default/CONFIG_ALTERA_MSGDMA | 1 + .../L2-OPTIONAL/default/CONFIG_ALTERA_TSE | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_ALX | 1 + .../L2-OPTIONAL/default/CONFIG_AMD_PHY | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_AMT | 1 + .../default/CONFIG_ANDROID_BINDER_IPC | 1 + .../L2-OPTIONAL/default/CONFIG_ANON_VMA_NAME | 1 + .../default/CONFIG_APERTURE_HELPERS | 1 + .../default/CONFIG_APPLE_MFI_FASTCHARGE | 1 + .../L2-OPTIONAL/default/CONFIG_APPLICOM | 1 + .../L2-OPTIONAL/default/CONFIG_AQUANTIA_PHY | 1 + ...ONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE | 1 + .../default/CONFIG_ARCH_DMA_ADDR_T_64BIT | 1 + .../CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION | 1 + .../default/CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG | 1 + .../CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE | 1 + .../CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK | 1 + .../default/CONFIG_ARCH_ENABLE_THP_MIGRATION | 1 + .../CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE | 1 + .../default/CONFIG_ARCH_HAS_CACHE_LINE_SIZE | 1 + .../default/CONFIG_ARCH_HAS_COPY_MC | 1 + .../default/CONFIG_ARCH_HAS_CPU_RESCTRL | 1 + .../CONFIG_ARCH_HAS_CURRENT_STACK_POINTER | 1 + .../default/CONFIG_ARCH_HAS_DEBUG_VIRTUAL | 1 + .../default/CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE | 1 + .../default/CONFIG_ARCH_HAS_DEBUG_WX | 1 + .../default/CONFIG_ARCH_HAS_ELF_RANDOMIZE | 1 + .../default/CONFIG_ARCH_HAS_FAST_MULTIPLIER | 1 + .../default/CONFIG_ARCH_HAS_FORTIFY_SOURCE | 1 + .../default/CONFIG_ARCH_HAS_GCOV_PROFILE_ALL | 1 + .../default/CONFIG_ARCH_HAS_GIGANTIC_PAGE | 1 + .../L2-OPTIONAL/default/CONFIG_ARCH_HAS_KCOV | 1 + .../CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE | 1 + .../CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS | 1 + ...FIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE | 1 + .../default/CONFIG_ARCH_HAS_PMEM_API | 1 + .../default/CONFIG_ARCH_HAS_PTE_DEVMAP | 1 + .../default/CONFIG_ARCH_HAS_PTE_SPECIAL | 1 + .../default/CONFIG_ARCH_HAS_SET_DIRECT_MAP | 1 + .../default/CONFIG_ARCH_HAS_SET_MEMORY | 1 + .../default/CONFIG_ARCH_HAS_STRICT_KERNEL_RWX | 1 + .../default/CONFIG_ARCH_HAS_STRICT_MODULE_RWX | 1 + .../default/CONFIG_ARCH_HAS_SYSCALL_WRAPPER | 1 + .../CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE | 1 + .../CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL | 1 + .../default/CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG | 1 + .../default/CONFIG_ARCH_HIBERNATION_HEADER | 1 + .../default/CONFIG_ARCH_HIBERNATION_POSSIBLE | 1 + .../CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE | 1 + .../CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX | 1 + .../default/CONFIG_ARCH_PROC_KCORE_TEXT | 1 + .../default/CONFIG_ARCH_SELECTS_KEXEC_FILE | 1 + .../default/CONFIG_ARCH_SPARSEMEM_ENABLE | 1 + .../L2-OPTIONAL/default/CONFIG_ARCH_STACKWALK | 1 + .../default/CONFIG_ARCH_SUPPORTS_ACPI | 1 + .../default/CONFIG_ARCH_SUPPORTS_ATOMIC_RMW | 1 + .../default/CONFIG_ARCH_SUPPORTS_CFI_CLANG | 1 + .../default/CONFIG_ARCH_SUPPORTS_CRASH_DUMP | 1 + .../CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC | 1 + .../default/CONFIG_ARCH_SUPPORTS_INT128 | 1 + .../default/CONFIG_ARCH_SUPPORTS_KEXEC | 1 + .../default/CONFIG_ARCH_SUPPORTS_KEXEC_FILE | 1 + .../default/CONFIG_ARCH_SUPPORTS_KEXEC_SIG | 1 + .../default/CONFIG_ARCH_SUPPORTS_LTO_CLANG | 1 + .../CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN | 1 + .../CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE | 1 + .../CONFIG_ARCH_SUPPORTS_NUMA_BALANCING | 1 + .../CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK | 1 + .../default/CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK | 1 + .../default/CONFIG_ARCH_SUPPORTS_UPROBES | 1 + .../default/CONFIG_ARCH_SUSPEND_POSSIBLE | 1 + .../default/CONFIG_ARCH_USES_HIGH_VMA_FLAGS | 1 + .../default/CONFIG_ARCH_USE_CMPXCHG_LOCKREF | 1 + .../default/CONFIG_ARCH_USE_MEMREMAP_PROT | 1 + .../default/CONFIG_ARCH_USE_MEMTEST | 1 + .../default/CONFIG_ARCH_USE_QUEUED_RWLOCKS | 1 + .../default/CONFIG_ARCH_USE_QUEUED_SPINLOCKS | 1 + .../default/CONFIG_ARCH_USE_SYM_ANNOTATIONS | 1 + .../default/CONFIG_ARCH_WANTS_NO_INSTR | 1 + .../default/CONFIG_ARCH_WANTS_THP_SWAP | 1 + .../CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH | 1 + .../CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION | 1 + .../default/CONFIG_ARCH_WANT_DEFAULT_BPF_JIT | 1 + .../default/CONFIG_ARCH_WANT_HUGE_PMD_SHARE | 1 + .../default/CONFIG_ARCH_WANT_LD_ORPHAN_WARN | 1 + .../default/CONFIG_ARCH_WANT_PMD_MKWRITE | 1 + .../configs/L2-OPTIONAL/default/CONFIG_ARCNET | 1 + .../default/CONFIG_ASM_MODVERSIONS | 1 + .../configs/L2-OPTIONAL/default/CONFIG_ASN1 | 1 + .../L2-OPTIONAL/default/CONFIG_ASN1_ENCODER | 1 + .../default/CONFIG_ASSOCIATIVE_ARRAY | 1 + .../default/CONFIG_AS_HAS_NON_CONST_LEB128 | 1 + .../L2-OPTIONAL/default/CONFIG_AS_IS_GNU | 1 + .../L2-OPTIONAL/default/CONFIG_AS_VERSION | 1 + .../configs/L2-OPTIONAL/default/CONFIG_ATALK | 1 + .../default/CONFIG_ATARI_PARTITION | 1 + .../configs/L2-OPTIONAL/default/CONFIG_ATL1 | 1 + .../configs/L2-OPTIONAL/default/CONFIG_ATL1C | 1 + .../configs/L2-OPTIONAL/default/CONFIG_ATL1E | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_ATM | 1 + .../L2-OPTIONAL/default/CONFIG_ATM_BR2684 | 1 + .../default/CONFIG_ATM_BR2684_IPFILTER | 1 + .../L2-OPTIONAL/default/CONFIG_ATM_CLIP | 1 + .../default/CONFIG_ATM_CLIP_NO_ICMP | 1 + .../L2-OPTIONAL/default/CONFIG_ATM_LANE | 1 + .../L2-OPTIONAL/default/CONFIG_ATM_MPOA | 1 + .../L2-OPTIONAL/default/CONFIG_AUDITSYSCALL | 1 + .../L2-OPTIONAL/default/CONFIG_AUXDISPLAY | 1 + .../L2-OPTIONAL/default/CONFIG_AX88796B_PHY | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_B44 | 1 + .../default/CONFIG_BACKLIGHT_ADP8860 | 1 + .../default/CONFIG_BACKLIGHT_ADP8870 | 1 + .../default/CONFIG_BACKLIGHT_ARCXCNN | 1 + .../default/CONFIG_BACKLIGHT_BD6107 | 1 + .../default/CONFIG_BACKLIGHT_CLASS_DEVICE | 1 + .../default/CONFIG_BACKLIGHT_KTD253 | 1 + .../default/CONFIG_BACKLIGHT_KTZ8866 | 1 + .../default/CONFIG_BACKLIGHT_LM3630A | 1 + .../default/CONFIG_BACKLIGHT_LM3639 | 1 + .../default/CONFIG_BACKLIGHT_LP855X | 1 + .../default/CONFIG_BACKLIGHT_LV5207LP | 1 + .../default/CONFIG_BACKLIGHT_QCOM_WLED | 1 + .../default/CONFIG_BACKTRACE_SELF_TEST | 1 + .../L2-OPTIONAL/default/CONFIG_BAREUDP | 1 + .../L2-OPTIONAL/default/CONFIG_BASE_SMALL | 1 + .../L2-OPTIONAL/default/CONFIG_BATMAN_ADV | 1 + .../default/CONFIG_BATTERY_BQ27XXX | 1 + .../L2-OPTIONAL/default/CONFIG_BATTERY_CW2015 | 1 + .../L2-OPTIONAL/default/CONFIG_BATTERY_DS2780 | 1 + .../L2-OPTIONAL/default/CONFIG_BATTERY_DS2781 | 1 + .../L2-OPTIONAL/default/CONFIG_BATTERY_DS2782 | 1 + .../default/CONFIG_BATTERY_GAUGE_LTC2941 | 1 + .../default/CONFIG_BATTERY_GOLDFISH | 1 + .../default/CONFIG_BATTERY_MAX17040 | 1 + .../default/CONFIG_BATTERY_MAX17042 | 1 + .../L2-OPTIONAL/default/CONFIG_BATTERY_RT5033 | 1 + .../default/CONFIG_BATTERY_SAMSUNG_SDI | 1 + .../L2-OPTIONAL/default/CONFIG_BATTERY_SBS | 1 + .../L2-OPTIONAL/default/CONFIG_BATTERY_UG3105 | 1 + .../L2-OPTIONAL/default/CONFIG_BCM54140_PHY | 1 + .../L2-OPTIONAL/default/CONFIG_BCM7XXX_PHY | 1 + .../L2-OPTIONAL/default/CONFIG_BCM84881_PHY | 1 + .../L2-OPTIONAL/default/CONFIG_BCM87XX_PHY | 1 + .../configs/L2-OPTIONAL/default/CONFIG_BCMA | 1 + .../L2-OPTIONAL/default/CONFIG_BCMA_DEBUG | 1 + .../default/CONFIG_BCMA_DRIVER_GMAC_CMN | 1 + .../default/CONFIG_BCMA_DRIVER_GPIO | 1 + .../default/CONFIG_BCMA_DRIVER_PCI | 1 + .../L2-OPTIONAL/default/CONFIG_BCMA_HOST_PCI | 1 + .../default/CONFIG_BCMA_HOST_PCI_POSSIBLE | 1 + .../L2-OPTIONAL/default/CONFIG_BCMA_HOST_SOC | 1 + .../L2-OPTIONAL/default/CONFIG_BCMA_POSSIBLE | 1 + .../default/CONFIG_BCM_KONA_USB2_PHY | 1 + .../L2-OPTIONAL/default/CONFIG_BCM_NET_PHYLIB | 1 + .../L2-OPTIONAL/default/CONFIG_BCM_NET_PHYPTP | 1 + .../configs/L2-OPTIONAL/default/CONFIG_BCM_VK | 1 + .../L2-OPTIONAL/default/CONFIG_BE2ISCSI | 1 + .../L2-OPTIONAL/default/CONFIG_BEFS_FS | 1 + .../configs/L2-OPTIONAL/default/CONFIG_BFS_FS | 1 + .../L2-OPTIONAL/default/CONFIG_BINARY_PRINTF | 1 + .../L2-OPTIONAL/default/CONFIG_BITREVERSE | 1 + .../default/CONFIG_BLK_CGROUP_FC_APPID | 1 + .../default/CONFIG_BLK_CGROUP_IOPRIO | 1 + .../default/CONFIG_BLK_CGROUP_PUNT_BIO | 1 + .../default/CONFIG_BLK_CGROUP_RWSTAT | 1 + .../default/CONFIG_BLK_DEV_3W_XXXX_RAID | 1 + .../default/CONFIG_BLK_DEV_BSG_COMMON | 1 + .../default/CONFIG_BLK_DEV_DM_BUILTIN | 1 + .../L2-OPTIONAL/default/CONFIG_BLK_ICQ | 1 + .../default/CONFIG_BLK_INLINE_ENCRYPTION | 1 + .../default/CONFIG_BLK_MQ_STACKING | 1 + .../configs/L2-OPTIONAL/default/CONFIG_BLK_PM | 1 + .../L2-OPTIONAL/default/CONFIG_BLK_SED_OPAL | 1 + .../default/CONFIG_BLOCK_HOLDER_DEPRECATED | 1 + .../default/CONFIG_BLOCK_LEGACY_AUTOLOAD | 1 + .../default/CONFIG_BOOTPARAM_HUNG_TASK_PANIC | 1 + .../default/CONFIG_BPF_JIT_DEFAULT_ON | 1 + .../L2-OPTIONAL/default/CONFIG_BPF_PRELOAD | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_BQL | 1 + .../L2-OPTIONAL/default/CONFIG_BRIDGE_CFM | 1 + .../L2-OPTIONAL/default/CONFIG_BROADCOM_PHY | 1 + .../L2-OPTIONAL/default/CONFIG_BSD_DISKLABEL | 1 + .../configs/L2-OPTIONAL/default/CONFIG_BTREE | 1 + .../L2-OPTIONAL/default/CONFIG_BTRFS_ASSERT | 1 + .../L2-OPTIONAL/default/CONFIG_BTRFS_DEBUG | 1 + .../default/CONFIG_BTRFS_FS_CHECK_INTEGRITY | 1 + .../default/CONFIG_BTRFS_FS_POSIX_ACL | 1 + .../default/CONFIG_BTRFS_FS_REF_VERIFY | 1 + .../default/CONFIG_BTRFS_FS_RUN_SANITY_TESTS | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_BTT | 1 + .../L2-OPTIONAL/default/CONFIG_BUFFER_HEAD | 1 + .../default/CONFIG_BUILDTIME_TABLE_SORT | 1 + .../configs/L2-OPTIONAL/default/CONFIG_C2PORT | 1 + .../default/CONFIG_CACHEFILES_ERROR_INJECTION | 1 + .../default/CONFIG_CADENCE_WATCHDOG | 1 + .../configs/L2-OPTIONAL/default/CONFIG_CAIF | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_CAN | 1 + .../L2-OPTIONAL/default/CONFIG_CARDBUS | 1 + .../L2-OPTIONAL/default/CONFIG_CAVIUM_PTP | 1 + .../L2-OPTIONAL/default/CONFIG_CB710_CORE | 1 + .../L2-OPTIONAL/default/CONFIG_CB710_DEBUG | 1 + .../default/CONFIG_CB710_DEBUG_ASSUMPTIONS | 1 + .../L2-OPTIONAL/default/CONFIG_CC_CAN_LINK | 1 + .../default/CONFIG_CC_CAN_LINK_STATIC | 1 + .../default/CONFIG_CC_HAS_ASM_GOTO_OUTPUT | 1 + .../CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT | 1 + .../default/CONFIG_CC_HAS_ASM_INLINE | 1 + .../CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN | 1 + .../default/CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO | 1 + .../CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE | 1 + .../L2-OPTIONAL/default/CONFIG_CC_HAS_INT128 | 1 + .../default/CONFIG_CC_HAS_KASAN_GENERIC | 1 + .../default/CONFIG_CC_HAS_KASAN_SW_TAGS | 1 + .../default/CONFIG_CC_HAS_NO_PROFILE_FN_ATTR | 1 + .../default/CONFIG_CC_HAS_RANDSTRUCT | 1 + .../default/CONFIG_CC_HAS_SANCOV_TRACE_PC | 1 + .../CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS | 1 + .../default/CONFIG_CC_HAS_ZERO_CALL_USED_REGS | 1 + .../default/CONFIG_CC_IMPLICIT_FALLTHROUGH | 1 + .../L2-OPTIONAL/default/CONFIG_CC_IS_GCC | 1 + .../default/CONFIG_CC_NO_ARRAY_BOUNDS | 1 + .../default/CONFIG_CC_VERSION_TEXT | 1 + .../L2-OPTIONAL/default/CONFIG_CEC_CORE | 1 + .../L2-OPTIONAL/default/CONFIG_CEPH_LIB | 1 + .../default/CONFIG_CEPH_LIB_PRETTYDEBUG | 1 + .../default/CONFIG_CEPH_LIB_USE_DNS_RESOLVER | 1 + .../L2-OPTIONAL/default/CONFIG_CFG80211 | 1 + .../default/CONFIG_CFG80211_CRDA_SUPPORT | 1 + .../default/CONFIG_CFG80211_DEBUGFS | 1 + .../default/CONFIG_CFG80211_DEFAULT_PS | 1 + .../CONFIG_CFG80211_DEVELOPER_WARNINGS | 1 + .../CONFIG_CFG80211_REQUIRE_SIGNED_REGDB | 1 + .../CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS | 1 + .../L2-OPTIONAL/default/CONFIG_CFG80211_WEXT | 1 + .../L2-OPTIONAL/default/CONFIG_CFI_CLANG | 1 + .../default/CONFIG_CGROUP_FAVOR_DYNMODS | 1 + .../L2-OPTIONAL/default/CONFIG_CGROUP_MISC | 1 + .../default/CONFIG_CHARGER_ADP5061 | 1 + .../default/CONFIG_CHARGER_BD99954 | 1 + .../default/CONFIG_CHARGER_BQ2415X | 1 + .../default/CONFIG_CHARGER_BQ24257 | 1 + .../default/CONFIG_CHARGER_BQ24735 | 1 + .../default/CONFIG_CHARGER_BQ2515X | 1 + .../default/CONFIG_CHARGER_BQ256XX | 1 + .../default/CONFIG_CHARGER_BQ25890 | 1 + .../default/CONFIG_CHARGER_BQ25980 | 1 + .../L2-OPTIONAL/default/CONFIG_CHARGER_GPIO | 1 + .../L2-OPTIONAL/default/CONFIG_CHARGER_LP8727 | 1 + .../L2-OPTIONAL/default/CONFIG_CHARGER_LT3651 | 1 + .../default/CONFIG_CHARGER_LTC4162L | 1 + .../default/CONFIG_CHARGER_MAX77976 | 1 + .../default/CONFIG_CHARGER_MAX8903 | 1 + .../L2-OPTIONAL/default/CONFIG_CHARGER_RT9455 | 1 + .../L2-OPTIONAL/default/CONFIG_CHARGER_SBS | 1 + .../default/CONFIG_CHECK_SIGNATURE | 1 + .../default/CONFIG_CHELSIO_INLINE_CRYPTO | 1 + .../default/CONFIG_CHELSIO_IPSEC_INLINE | 1 + .../L2-OPTIONAL/default/CONFIG_CHELSIO_LIB | 1 + .../L2-OPTIONAL/default/CONFIG_CHELSIO_T1 | 1 + .../L2-OPTIONAL/default/CONFIG_CHELSIO_T3 | 1 + .../L2-OPTIONAL/default/CONFIG_CHELSIO_T4 | 1 + .../L2-OPTIONAL/default/CONFIG_CHELSIO_T4VF | 1 + .../L2-OPTIONAL/default/CONFIG_CHELSIO_T4_DCB | 1 + .../default/CONFIG_CHELSIO_TLS_DEVICE | 1 + .../L2-OPTIONAL/default/CONFIG_CICADA_PHY | 1 + .../configs/L2-OPTIONAL/default/CONFIG_CIFS | 1 + .../default/CONFIG_CIFS_ALLOW_INSECURE_LEGACY | 1 + .../L2-OPTIONAL/default/CONFIG_CIFS_DEBUG | 1 + .../L2-OPTIONAL/default/CONFIG_CIFS_DEBUG2 | 1 + .../default/CONFIG_CIFS_DEBUG_DUMP_KEYS | 1 + .../default/CONFIG_CIFS_DFS_UPCALL | 1 + .../L2-OPTIONAL/default/CONFIG_CIFS_FSCACHE | 1 + .../L2-OPTIONAL/default/CONFIG_CIFS_POSIX | 1 + .../default/CONFIG_CIFS_SMB_DIRECT | 1 + .../L2-OPTIONAL/default/CONFIG_CIFS_STATS2 | 1 + .../default/CONFIG_CIFS_SWN_UPCALL | 1 + .../L2-OPTIONAL/default/CONFIG_CIFS_UPCALL | 1 + .../L2-OPTIONAL/default/CONFIG_CIFS_XATTR | 1 + .../L2-OPTIONAL/default/CONFIG_CLANG_VERSION | 1 + .../L2-OPTIONAL/default/CONFIG_CLZ_TAB | 1 + .../L2-OPTIONAL/default/CONFIG_CMA_SYSFS | 1 + .../default/CONFIG_CMDLINE_PARTITION | 1 + .../L2-OPTIONAL/default/CONFIG_CODA_FS | 1 + .../configs/L2-OPTIONAL/default/CONFIG_COMEDI | 1 + .../default/CONFIG_COMMON_CLK_CDCE706 | 1 + .../default/CONFIG_COMMON_CLK_CS2000_CP | 1 + .../default/CONFIG_COMMON_CLK_MAX9485 | 1 + .../L2-OPTIONAL/default/CONFIG_COMMON_CLK_PWM | 1 + .../default/CONFIG_COMMON_CLK_SI5341 | 1 + .../default/CONFIG_COMMON_CLK_SI5351 | 1 + .../default/CONFIG_COMMON_CLK_SI544 | 1 + .../CONFIG_COMPACT_UNEVICTABLE_DEFAULT | 1 + .../default/CONFIG_COMPAT_BINFMT_ELF | 1 + .../default/CONFIG_COMPAT_OLD_SIGACTION | 1 + .../L2-OPTIONAL/default/CONFIG_CONSOLE_POLL | 1 + .../default/CONFIG_CONTEXT_SWITCH_TRACER | 1 + .../default/CONFIG_CONTEXT_TRACKING | 1 + .../default/CONFIG_CONTEXT_TRACKING_IDLE | 1 + .../default/CONFIG_CONTEXT_TRACKING_USER | 1 + .../CONFIG_CONTEXT_TRACKING_USER_FORCE | 1 + .../L2-OPTIONAL/default/CONFIG_CONTIG_ALLOC | 1 + .../configs/L2-OPTIONAL/default/CONFIG_CORDIC | 1 + .../L2-OPTIONAL/default/CONFIG_CORTINA_PHY | 1 + .../L2-OPTIONAL/default/CONFIG_COUNTER | 1 + .../default/CONFIG_CPU_FREQ_GOV_ATTR_SET | 1 + .../default/CONFIG_CPU_FREQ_GOV_COMMON | 1 + .../default/CONFIG_CPU_HOTPLUG_STATE_CONTROL | 1 + .../L2-OPTIONAL/default/CONFIG_CPU_RMAP | 1 + .../configs/L2-OPTIONAL/default/CONFIG_CRC16 | 1 + .../configs/L2-OPTIONAL/default/CONFIG_CRC32 | 1 + .../L2-OPTIONAL/default/CONFIG_CRC32_BIT | 1 + .../L2-OPTIONAL/default/CONFIG_CRC32_SARWATE | 1 + .../L2-OPTIONAL/default/CONFIG_CRC32_SELFTEST | 1 + .../L2-OPTIONAL/default/CONFIG_CRC32_SLICEBY4 | 1 + .../L2-OPTIONAL/default/CONFIG_CRC32_SLICEBY8 | 1 + .../configs/L2-OPTIONAL/default/CONFIG_CRC4 | 1 + .../configs/L2-OPTIONAL/default/CONFIG_CRC7 | 1 + .../configs/L2-OPTIONAL/default/CONFIG_CRC8 | 1 + .../L2-OPTIONAL/default/CONFIG_CRC_CCITT | 1 + .../L2-OPTIONAL/default/CONFIG_CRC_ITU_T | 1 + .../L2-OPTIONAL/default/CONFIG_CRC_T10DIF | 1 + .../CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA | 1 + .../CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305 | 1 + .../L2-OPTIONAL/default/CONFIG_CRYPTO_ARIA | 1 + .../default/CONFIG_CRYPTO_DEV_AMLOGIC_GXL | 1 + .../default/CONFIG_CRYPTO_DEV_ATMEL_ECC | 1 + .../default/CONFIG_CRYPTO_DEV_ATMEL_SHA204A | 1 + .../default/CONFIG_CRYPTO_DEV_CHELSIO | 1 + .../default/CONFIG_CRYPTO_DEV_NITROX | 1 + .../default/CONFIG_CRYPTO_DEV_NITROX_CNN55XX | 1 + .../default/CONFIG_CRYPTO_DEV_QAT_420XX | 1 + .../default/CONFIG_CRYPTO_DEV_SAFEXCEL | 1 + .../default/CONFIG_CRYPTO_DH_RFC7919_GROUPS | 1 + .../L2-OPTIONAL/default/CONFIG_CRYPTO_ECRDSA | 1 + .../default/CONFIG_CRYPTO_FIPS_CUSTOM_VERSION | 1 + .../default/CONFIG_CRYPTO_FIPS_NAME | 1 + .../L2-OPTIONAL/default/CONFIG_CRYPTO_GENIV | 1 + .../L2-OPTIONAL/default/CONFIG_CRYPTO_HCTR2 | 1 + .../CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE | 1 + .../default/CONFIG_CRYPTO_LIB_ARC4 | 1 + .../default/CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC | 1 + .../default/CONFIG_CRYPTO_LIB_CHACHA | 1 + .../CONFIG_CRYPTO_LIB_CHACHA20POLY1305 | 1 + .../default/CONFIG_CRYPTO_LIB_CHACHA_GENERIC | 1 + .../default/CONFIG_CRYPTO_LIB_CURVE25519 | 1 + .../CONFIG_CRYPTO_LIB_CURVE25519_GENERIC | 1 + .../L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_DES | 1 + .../default/CONFIG_CRYPTO_LIB_GF128MUL | 1 + .../default/CONFIG_CRYPTO_LIB_POLY1305 | 1 + .../CONFIG_CRYPTO_LIB_POLY1305_GENERIC | 1 + .../default/CONFIG_CRYPTO_LIB_SHA1 | 1 + .../default/CONFIG_CRYPTO_LIB_UTILS | 1 + .../L2-OPTIONAL/default/CONFIG_CRYPTO_SIG2 | 1 + .../default/CONFIG_CSD_LOCK_WAIT_DEBUG | 1 + .../CONFIG_CXL_REGION_INVALIDATION_TEST | 1 + .../L2-OPTIONAL/default/CONFIG_DAMON_LRU_SORT | 1 + .../L2-OPTIONAL/default/CONFIG_DAMON_RECLAIM | 1 + .../L2-OPTIONAL/default/CONFIG_DAMON_SYSFS | 1 + .../L2-OPTIONAL/default/CONFIG_DAVICOM_PHY | 1 + .../default/CONFIG_DCACHE_WORD_ACCESS | 1 + .../default/CONFIG_DEBUG_ATOMIC_SLEEP | 1 + .../default/CONFIG_DEBUG_CGROUP_REF | 1 + .../L2-OPTIONAL/default/CONFIG_DEBUG_DEVRES | 1 + .../L2-OPTIONAL/default/CONFIG_DEBUG_DRIVER | 1 + .../L2-OPTIONAL/default/CONFIG_DEBUG_GPIO | 1 + .../default/CONFIG_DEBUG_INFO_COMPRESSED_NONE | 1 + .../default/CONFIG_DEBUG_INFO_COMPRESSED_ZLIB | 1 + .../default/CONFIG_DEBUG_INFO_COMPRESSED_ZSTD | 1 + .../default/CONFIG_DEBUG_INFO_DWARF5 | 1 + .../default/CONFIG_DEBUG_INFO_NONE | 1 + .../L2-OPTIONAL/default/CONFIG_DEBUG_IRQFLAGS | 1 + .../L2-OPTIONAL/default/CONFIG_DEBUG_KOBJECT | 1 + .../CONFIG_DEBUG_LOCKING_API_SELFTESTS | 1 + .../default/CONFIG_DEBUG_LOCK_ALLOC | 1 + .../default/CONFIG_DEBUG_MAPLE_TREE | 1 + .../L2-OPTIONAL/default/CONFIG_DEBUG_MUTEXES | 1 + .../L2-OPTIONAL/default/CONFIG_DEBUG_NET | 1 + .../L2-OPTIONAL/default/CONFIG_DEBUG_OBJECTS | 1 + .../default/CONFIG_DEBUG_PER_CPU_MAPS | 1 + .../L2-OPTIONAL/default/CONFIG_DEBUG_PINCTRL | 1 + .../L2-OPTIONAL/default/CONFIG_DEBUG_PREEMPT | 1 + .../default/CONFIG_DEBUG_RT_MUTEXES | 1 + .../L2-OPTIONAL/default/CONFIG_DEBUG_RWSEMS | 1 + .../L2-OPTIONAL/default/CONFIG_DEBUG_SHIRQ | 1 + .../L2-OPTIONAL/default/CONFIG_DEBUG_SPINLOCK | 1 + .../default/CONFIG_DEBUG_STACK_USAGE | 1 + .../default/CONFIG_DEBUG_TEST_DRIVER_REMOVE | 1 + .../default/CONFIG_DEBUG_TIMEKEEPING | 1 + .../L2-OPTIONAL/default/CONFIG_DEBUG_VIRTUAL | 1 + .../L2-OPTIONAL/default/CONFIG_DEBUG_VM | 1 + .../default/CONFIG_DEBUG_VM_PGTABLE | 1 + .../default/CONFIG_DEBUG_WQ_FORCE_RR_CPU | 1 + .../default/CONFIG_DEBUG_WW_MUTEX_SLOWPATH | 1 + .../default/CONFIG_DECOMPRESS_BZIP2 | 1 + .../default/CONFIG_DECOMPRESS_GZIP | 1 + .../L2-OPTIONAL/default/CONFIG_DECOMPRESS_LZ4 | 1 + .../default/CONFIG_DECOMPRESS_LZMA | 1 + .../L2-OPTIONAL/default/CONFIG_DECOMPRESS_LZO | 1 + .../L2-OPTIONAL/default/CONFIG_DECOMPRESS_XZ | 1 + .../default/CONFIG_DECOMPRESS_ZSTD | 1 + .../default/CONFIG_DEFAULT_HOSTNAME | 1 + .../L2-OPTIONAL/default/CONFIG_DEFAULT_INIT | 1 + .../default/CONFIG_DEVICE_MIGRATION | 1 + .../L2-OPTIONAL/default/CONFIG_DEVTMPFS_SAFE | 1 + .../configs/L2-OPTIONAL/default/CONFIG_DIMLIB | 1 + .../configs/L2-OPTIONAL/default/CONFIG_DM9051 | 1 + .../L2-OPTIONAL/default/CONFIG_DMABUF_DEBUG | 1 + .../L2-OPTIONAL/default/CONFIG_DMABUF_HEAPS | 1 + .../default/CONFIG_DMABUF_MOVE_NOTIFY | 1 + .../default/CONFIG_DMABUF_SELFTESTS | 1 + .../default/CONFIG_DMABUF_SYSFS_STATS | 1 + .../L2-OPTIONAL/default/CONFIG_DMAPOOL_TEST | 1 + .../L2-OPTIONAL/default/CONFIG_DMA_ACPI | 1 + .../default/CONFIG_DMA_COHERENT_POOL | 1 + .../default/CONFIG_DMA_ENGINE_RAID | 1 + .../default/CONFIG_DMA_FENCE_TRACE | 1 + .../default/CONFIG_DMA_MAP_BENCHMARK | 1 + .../L2-OPTIONAL/default/CONFIG_DMA_NUMA_CMA | 1 + .../L2-OPTIONAL/default/CONFIG_DMA_OPS | 1 + .../default/CONFIG_DMA_SHARED_BUFFER | 1 + .../L2-OPTIONAL/default/CONFIG_DM_AUDIT | 1 + .../L2-OPTIONAL/default/CONFIG_DM_BIO_PRISON | 1 + .../L2-OPTIONAL/default/CONFIG_DM_BUFIO | 1 + .../L2-OPTIONAL/default/CONFIG_DM_CLONE | 1 + .../CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING | 1 + .../L2-OPTIONAL/default/CONFIG_DM_DUST | 1 + .../configs/L2-OPTIONAL/default/CONFIG_DM_EBS | 1 + .../default/CONFIG_DM_MULTIPATH_HST | 1 + .../default/CONFIG_DM_MULTIPATH_IOA | 1 + .../default/CONFIG_DM_PERSISTENT_DATA | 1 + .../L2-OPTIONAL/default/CONFIG_DM_UNSTRIPED | 1 + .../L2-OPTIONAL/default/CONFIG_DM_VERITY_FEC | 1 + .../CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG | 1 + .../configs/L2-OPTIONAL/default/CONFIG_DNET | 1 + .../L2-OPTIONAL/default/CONFIG_DP83640_PHY | 1 + .../L2-OPTIONAL/default/CONFIG_DP83822_PHY | 1 + .../L2-OPTIONAL/default/CONFIG_DP83848_PHY | 1 + .../L2-OPTIONAL/default/CONFIG_DP83867_PHY | 1 + .../L2-OPTIONAL/default/CONFIG_DP83869_PHY | 1 + .../L2-OPTIONAL/default/CONFIG_DP83TC811_PHY | 1 + .../L2-OPTIONAL/default/CONFIG_DP83TD510_PHY | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_DQL | 1 + .../L2-OPTIONAL/default/CONFIG_DRAGONRISE_FF | 1 + .../L2-OPTIONAL/default/CONFIG_DRM_ACCEL | 1 + .../L2-OPTIONAL/default/CONFIG_DRM_AMD_DC_FP | 1 + .../default/CONFIG_DRM_AMD_SECURE_DISPLAY | 1 + .../default/CONFIG_DRM_ANALOGIX_ANX78XX | 1 + .../L2-OPTIONAL/default/CONFIG_DRM_BRIDGE | 1 + .../L2-OPTIONAL/default/CONFIG_DRM_BUDDY | 1 + .../default/CONFIG_DRM_DISPLAY_DP_HELPER | 1 + .../default/CONFIG_DRM_DISPLAY_HDCP_HELPER | 1 + .../default/CONFIG_DRM_DISPLAY_HDMI_HELPER | 1 + .../default/CONFIG_DRM_DISPLAY_HELPER | 1 + .../L2-OPTIONAL/default/CONFIG_DRM_ETNAVIV | 1 + .../L2-OPTIONAL/default/CONFIG_DRM_EXEC | 1 + .../default/CONFIG_DRM_GEM_SHMEM_HELPER | 1 + .../L2-OPTIONAL/default/CONFIG_DRM_GM12U320 | 1 + .../L2-OPTIONAL/default/CONFIG_DRM_GUD | 1 + .../default/CONFIG_DRM_I2C_NXP_TDA9950 | 1 + .../L2-OPTIONAL/default/CONFIG_DRM_KMS_HELPER | 1 + .../L2-OPTIONAL/default/CONFIG_DRM_LEGACY | 1 + .../L2-OPTIONAL/default/CONFIG_DRM_LOONGSON | 1 + .../L2-OPTIONAL/default/CONFIG_DRM_PANEL | 1 + .../default/CONFIG_DRM_PANEL_AUO_A030JTN01 | 1 + .../default/CONFIG_DRM_PANEL_BRIDGE | 1 + .../default/CONFIG_DRM_PANEL_MIPI_DBI | 1 + .../CONFIG_DRM_PANEL_ORIENTATION_QUIRKS | 1 + .../CONFIG_DRM_PANEL_ORISETECH_OTA5601A | 1 + .../default/CONFIG_DRM_PANEL_WIDECHIPS_WS2401 | 1 + .../L2-OPTIONAL/default/CONFIG_DRM_SCHED | 1 + .../L2-OPTIONAL/default/CONFIG_DRM_SIMPLEDRM | 1 + .../L2-OPTIONAL/default/CONFIG_DRM_SSD130X | 1 + .../default/CONFIG_DRM_SUBALLOC_HELPER | 1 + .../L2-OPTIONAL/default/CONFIG_DRM_TTM | 1 + .../L2-OPTIONAL/default/CONFIG_DRM_TTM_HELPER | 1 + .../L2-OPTIONAL/default/CONFIG_DRM_VGEM | 1 + .../default/CONFIG_DRM_VIRTIO_GPU_KMS | 1 + .../L2-OPTIONAL/default/CONFIG_DRM_VKMS | 1 + .../default/CONFIG_DRM_VRAM_HELPER | 1 + .../configs/L2-OPTIONAL/default/CONFIG_DS1682 | 1 + .../L2-OPTIONAL/default/CONFIG_DST_CACHE | 1 + .../L2-OPTIONAL/default/CONFIG_DUMMY_CONSOLE | 1 + .../default/CONFIG_DUMMY_CONSOLE_COLUMNS | 1 + .../default/CONFIG_DUMMY_CONSOLE_ROWS | 1 + .../L2-OPTIONAL/default/CONFIG_DUMMY_IRQ | 1 + .../L2-OPTIONAL/default/CONFIG_DW_EDMA | 1 + .../L2-OPTIONAL/default/CONFIG_DW_WATCHDOG | 1 + .../L2-OPTIONAL/default/CONFIG_DW_XDATA_PCIE | 1 + .../L2-OPTIONAL/default/CONFIG_DYNAMIC_EVENTS | 1 + .../default/CONFIG_DYNAMIC_FTRACE_WITH_ARGS | 1 + .../CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS | 1 + .../configs/L2-OPTIONAL/default/CONFIG_ECHO | 1 + .../L2-OPTIONAL/default/CONFIG_ECRYPT_FS | 1 + .../L2-OPTIONAL/default/CONFIG_EDAC_SUPPORT | 1 + .../L2-OPTIONAL/default/CONFIG_EEPROM_93CX6 | 1 + .../L2-OPTIONAL/default/CONFIG_EEPROM_93XX46 | 1 + .../L2-OPTIONAL/default/CONFIG_EEPROM_AT24 | 1 + .../L2-OPTIONAL/default/CONFIG_EEPROM_AT25 | 1 + .../L2-OPTIONAL/default/CONFIG_EEPROM_EE1004 | 1 + .../default/CONFIG_EEPROM_IDT_89HPESX | 1 + .../L2-OPTIONAL/default/CONFIG_EEPROM_LEGACY | 1 + .../L2-OPTIONAL/default/CONFIG_EEPROM_MAX6875 | 1 + .../default/CONFIG_EFI_BOOTLOADER_CONTROL | 1 + .../default/CONFIG_EFI_CAPSULE_LOADER | 1 + .../default/CONFIG_EFI_DISABLE_PCI_DMA | 1 + .../default/CONFIG_EFI_DISABLE_RUNTIME | 1 + .../L2-OPTIONAL/default/CONFIG_EFI_EARLYCON | 1 + .../L2-OPTIONAL/default/CONFIG_EFI_ESRT | 1 + .../L2-OPTIONAL/default/CONFIG_EFI_PARTITION | 1 + .../default/CONFIG_EFI_RUNTIME_WRAPPERS | 1 + .../L2-OPTIONAL/default/CONFIG_EFI_TEST | 1 + .../configs/L2-OPTIONAL/default/CONFIG_EFS_FS | 1 + .../L2-OPTIONAL/default/CONFIG_ELFCORE | 1 + .../L2-OPTIONAL/default/CONFIG_ENA_ETHERNET | 1 + .../default/CONFIG_ENCLOSURE_SERVICES | 1 + .../L2-OPTIONAL/default/CONFIG_ENERGY_MODEL | 1 + .../L2-OPTIONAL/default/CONFIG_EQUALIZER | 1 + .../default/CONFIG_EROFS_FS_PCPU_KTHREAD | 1 + .../L2-OPTIONAL/default/CONFIG_ETHERNET | 1 + .../configs/L2-OPTIONAL/default/CONFIG_ETHOC | 1 + .../L2-OPTIONAL/default/CONFIG_EVENT_TRACING | 1 + .../default/CONFIG_EXCLUSIVE_SYSTEM_RAM | 1 + .../L2-OPTIONAL/default/CONFIG_EXPORTFS | 1 + .../default/CONFIG_EXT3_FS_POSIX_ACL | 1 + .../default/CONFIG_EXT3_FS_SECURITY | 1 + .../L2-OPTIONAL/default/CONFIG_EZX_PCAP | 1 + .../L2-OPTIONAL/default/CONFIG_F2FS_FS | 1 + .../L2-OPTIONAL/default/CONFIG_FARSYNC | 1 + .../L2-OPTIONAL/default/CONFIG_FB_3DFX | 1 + .../configs/L2-OPTIONAL/default/CONFIG_FB_ARK | 1 + .../L2-OPTIONAL/default/CONFIG_FB_ASILIANT | 1 + .../configs/L2-OPTIONAL/default/CONFIG_FB_ATY | 1 + .../L2-OPTIONAL/default/CONFIG_FB_ATY128 | 1 + .../L2-OPTIONAL/default/CONFIG_FB_CARMINE | 1 + .../default/CONFIG_FB_CFB_COPYAREA | 1 + .../default/CONFIG_FB_CFB_FILLRECT | 1 + .../default/CONFIG_FB_CFB_IMAGEBLIT | 1 + .../L2-OPTIONAL/default/CONFIG_FB_CIRRUS | 1 + .../L2-OPTIONAL/default/CONFIG_FB_CORE | 1 + .../L2-OPTIONAL/default/CONFIG_FB_CYBER2000 | 1 + .../L2-OPTIONAL/default/CONFIG_FB_DEFERRED_IO | 1 + .../L2-OPTIONAL/default/CONFIG_FB_DEVICE | 1 + .../default/CONFIG_FB_FOREIGN_ENDIAN | 1 + .../L2-OPTIONAL/default/CONFIG_FB_I740 | 1 + .../L2-OPTIONAL/default/CONFIG_FB_IBM_GXT4500 | 1 + .../L2-OPTIONAL/default/CONFIG_FB_IMSTT | 1 + .../default/CONFIG_FB_IOMEM_HELPERS | 1 + .../L2-OPTIONAL/default/CONFIG_FB_KYRO | 1 + .../L2-OPTIONAL/default/CONFIG_FB_MATROX | 1 + .../L2-OPTIONAL/default/CONFIG_FB_MB862XX | 1 + .../L2-OPTIONAL/default/CONFIG_FB_METRONOME | 1 + .../default/CONFIG_FB_MODE_HELPERS | 1 + .../L2-OPTIONAL/default/CONFIG_FB_NEOMAGIC | 1 + .../L2-OPTIONAL/default/CONFIG_FB_NOTIFY | 1 + .../L2-OPTIONAL/default/CONFIG_FB_NVIDIA | 1 + .../L2-OPTIONAL/default/CONFIG_FB_OPENCORES | 1 + .../configs/L2-OPTIONAL/default/CONFIG_FB_PM2 | 1 + .../configs/L2-OPTIONAL/default/CONFIG_FB_PM3 | 1 + .../L2-OPTIONAL/default/CONFIG_FB_RADEON | 1 + .../L2-OPTIONAL/default/CONFIG_FB_RIVA | 1 + .../L2-OPTIONAL/default/CONFIG_FB_S1D13XXX | 1 + .../configs/L2-OPTIONAL/default/CONFIG_FB_S3 | 1 + .../L2-OPTIONAL/default/CONFIG_FB_SAVAGE | 1 + .../configs/L2-OPTIONAL/default/CONFIG_FB_SIS | 1 + .../L2-OPTIONAL/default/CONFIG_FB_SM712 | 1 + .../L2-OPTIONAL/default/CONFIG_FB_SMSCUFX | 1 + .../default/CONFIG_FB_SYSMEM_HELPERS | 1 + .../default/CONFIG_FB_SYSMEM_HELPERS_DEFERRED | 1 + .../default/CONFIG_FB_SYS_COPYAREA | 1 + .../default/CONFIG_FB_SYS_FILLRECT | 1 + .../L2-OPTIONAL/default/CONFIG_FB_SYS_FOPS | 1 + .../default/CONFIG_FB_SYS_IMAGEBLIT | 1 + .../L2-OPTIONAL/default/CONFIG_FB_TRIDENT | 1 + .../configs/L2-OPTIONAL/default/CONFIG_FB_UDL | 1 + .../L2-OPTIONAL/default/CONFIG_FB_UVESA | 1 + .../L2-OPTIONAL/default/CONFIG_FB_VIRTUAL | 1 + .../L2-OPTIONAL/default/CONFIG_FB_VOODOO1 | 1 + .../L2-OPTIONAL/default/CONFIG_FB_VT8623 | 1 + .../configs/L2-OPTIONAL/default/CONFIG_FDDI | 1 + .../configs/L2-OPTIONAL/default/CONFIG_FEALNX | 1 + .../L2-OPTIONAL/default/CONFIG_FIB_RULES | 1 + .../default/CONFIG_FIND_BIT_BENCHMARK | 1 + .../default/CONFIG_FIPS_SIGNATURE_SELFTEST | 1 + .../L2-OPTIONAL/default/CONFIG_FIREWIRE_NOSY | 1 + .../L2-OPTIONAL/default/CONFIG_FIXED_PHY | 1 + .../configs/L2-OPTIONAL/default/CONFIG_FONTS | 1 + .../L2-OPTIONAL/default/CONFIG_FONT_8x16 | 1 + .../L2-OPTIONAL/default/CONFIG_FONT_8x8 | 1 + .../L2-OPTIONAL/default/CONFIG_FONT_SUPPORT | 1 + .../configs/L2-OPTIONAL/default/CONFIG_FPGA | 1 + ...NFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER | 1 + ...IG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION | 1 + .../L2-OPTIONAL/default/CONFIG_FREEZER | 1 + .../L2-OPTIONAL/default/CONFIG_FS_DAX_PMD | 1 + .../L2-OPTIONAL/default/CONFIG_FS_ENCRYPTION | 1 + .../L2-OPTIONAL/default/CONFIG_FS_IOMAP | 1 + .../L2-OPTIONAL/default/CONFIG_FS_POSIX_ACL | 1 + .../L2-OPTIONAL/default/CONFIG_FS_VERITY | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_FTL | 1 + .../default/CONFIG_FTRACE_MCOUNT_RECORD | 1 + .../default/CONFIG_FTRACE_STARTUP_TEST | 1 + .../default/CONFIG_FUNCTION_ALIGNMENT_4B | 1 + .../default/CONFIG_FUNCTION_ERROR_INJECTION | 1 + .../default/CONFIG_FUNCTION_GRAPH_RETVAL | 1 + .../L2-OPTIONAL/default/CONFIG_FUN_ETH | 1 + .../L2-OPTIONAL/default/CONFIG_FUTEX_PI | 1 + .../L2-OPTIONAL/default/CONFIG_FWNODE_MDIO | 1 + .../CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT | 1 + .../default/CONFIG_FW_LOADER_COMPRESS | 1 + .../default/CONFIG_FW_LOADER_DEBUG | 1 + .../default/CONFIG_FW_LOADER_PAGED_BUF | 1 + .../default/CONFIG_FW_LOADER_SYSFS | 1 + .../L2-OPTIONAL/default/CONFIG_FW_UPLOAD | 1 + .../L2-OPTIONAL/default/CONFIG_GAMEPORT | 1 + .../configs/L2-OPTIONAL/default/CONFIG_GARP | 1 + .../default/CONFIG_GCC10_NO_ARRAY_BOUNDS | 1 + .../L2-OPTIONAL/default/CONFIG_GCC_PLUGINS | 1 + .../default/CONFIG_GCC_PLUGIN_LATENT_ENTROPY | 1 + .../default/CONFIG_GCC_PLUGIN_STACKLEAK | 1 + .../L2-OPTIONAL/default/CONFIG_GCC_VERSION | 1 + .../default/CONFIG_GENERIC_ALLOCATOR | 1 + .../L2-OPTIONAL/default/CONFIG_GENERIC_BUG | 1 + .../CONFIG_GENERIC_BUG_RELATIVE_POINTERS | 1 + .../default/CONFIG_GENERIC_CALIBRATE_DELAY | 1 + .../default/CONFIG_GENERIC_CLOCKEVENTS | 1 + .../CONFIG_GENERIC_CLOCKEVENTS_BROADCAST | 1 + .../default/CONFIG_GENERIC_CPU_AUTOPROBE | 1 + .../CONFIG_GENERIC_CPU_VULNERABILITIES | 1 + .../default/CONFIG_GENERIC_EARLY_IOREMAP | 1 + .../CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK | 1 + .../default/CONFIG_GENERIC_IRQ_MIGRATION | 1 + .../default/CONFIG_GENERIC_MSI_IRQ | 1 + .../default/CONFIG_GENERIC_NET_UTILS | 1 + .../default/CONFIG_GENERIC_PCI_IOMAP | 1 + .../default/CONFIG_GENERIC_PINCONF | 1 + .../L2-OPTIONAL/default/CONFIG_GENERIC_PTDUMP | 1 + .../default/CONFIG_GENERIC_SMP_IDLE_THREAD | 1 + .../default/CONFIG_GENERIC_STRNCPY_FROM_USER | 1 + .../default/CONFIG_GENERIC_STRNLEN_USER | 1 + .../default/CONFIG_GENERIC_TIME_VSYSCALL | 1 + .../L2-OPTIONAL/default/CONFIG_GENERIC_TRACER | 1 + .../configs/L2-OPTIONAL/default/CONFIG_GENWQE | 1 + .../configs/L2-OPTIONAL/default/CONFIG_GLOB | 1 + .../L2-OPTIONAL/default/CONFIG_GLOB_SELFTEST | 1 + .../configs/L2-OPTIONAL/default/CONFIG_GNSS | 1 + .../L2-OPTIONAL/default/CONFIG_GOLDFISH | 1 + .../default/CONFIG_GOOGLE_FIRMWARE | 1 + .../L2-OPTIONAL/default/CONFIG_GPIOLIB | 1 + .../default/CONFIG_GPIOLIB_FASTPATH_LIMIT | 1 + .../default/CONFIG_GPIOLIB_IRQCHIP | 1 + .../L2-OPTIONAL/default/CONFIG_GPIO_ACPI | 1 + .../default/CONFIG_GPIO_AGGREGATOR | 1 + .../L2-OPTIONAL/default/CONFIG_GPIO_AMDPT | 1 + .../L2-OPTIONAL/default/CONFIG_GPIO_AMD_FCH | 1 + .../L2-OPTIONAL/default/CONFIG_GPIO_BT8XX | 1 + .../L2-OPTIONAL/default/CONFIG_GPIO_CDEV | 1 + .../L2-OPTIONAL/default/CONFIG_GPIO_CDEV_V1 | 1 + .../L2-OPTIONAL/default/CONFIG_GPIO_EXAR | 1 + .../L2-OPTIONAL/default/CONFIG_GPIO_MAX3191X | 1 + .../L2-OPTIONAL/default/CONFIG_GPIO_MAX7300 | 1 + .../L2-OPTIONAL/default/CONFIG_GPIO_MAX7301 | 1 + .../L2-OPTIONAL/default/CONFIG_GPIO_MAX732X | 1 + .../L2-OPTIONAL/default/CONFIG_GPIO_MB86S7X | 1 + .../L2-OPTIONAL/default/CONFIG_GPIO_MC33880 | 1 + .../L2-OPTIONAL/default/CONFIG_GPIO_MOCKUP | 1 + .../L2-OPTIONAL/default/CONFIG_GPIO_PCA953X | 1 + .../L2-OPTIONAL/default/CONFIG_GPIO_PCA9570 | 1 + .../L2-OPTIONAL/default/CONFIG_GPIO_PCF857X | 1 + .../default/CONFIG_GPIO_PCIE_IDIO_24 | 1 + .../default/CONFIG_GPIO_PCI_IDIO_16 | 1 + .../L2-OPTIONAL/default/CONFIG_GPIO_PISOSR | 1 + .../L2-OPTIONAL/default/CONFIG_GPIO_RDC321X | 1 + .../L2-OPTIONAL/default/CONFIG_GPIO_TPIC2810 | 1 + .../L2-OPTIONAL/default/CONFIG_GPIO_XRA1403 | 1 + .../L2-OPTIONAL/default/CONFIG_GRACE_PERIOD | 1 + .../L2-OPTIONAL/default/CONFIG_GREENASIA_FF | 1 + .../L2-OPTIONAL/default/CONFIG_GREYBUS | 1 + .../L2-OPTIONAL/default/CONFIG_GRO_CELLS | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_GTP | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_GVE | 1 + .../L2-OPTIONAL/default/CONFIG_HAMRADIO | 1 + .../default/CONFIG_HARDIRQS_SW_RESEND | 1 + .../default/CONFIG_HARDLOCKUP_DETECTOR_ARCH | 1 + .../default/CONFIG_HARDLOCKUP_DETECTOR_BUDDY | 1 + .../CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER | 1 + .../default/CONFIG_HARDLOCKUP_DETECTOR_PERF | 1 + .../CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY | 1 + .../L2-OPTIONAL/default/CONFIG_HAS_DMA | 1 + .../L2-OPTIONAL/default/CONFIG_HAS_IOMEM | 1 + .../L2-OPTIONAL/default/CONFIG_HAS_IOPORT | 1 + .../L2-OPTIONAL/default/CONFIG_HAS_IOPORT_MAP | 1 + .../L2-OPTIONAL/default/CONFIG_HAVE_ACPI_APEI | 1 + .../default/CONFIG_HAVE_ALIGNED_STRUCT_PAGE | 1 + .../default/CONFIG_HAVE_ARCH_AUDITSYSCALL | 1 + .../default/CONFIG_HAVE_ARCH_HUGE_VMALLOC | 1 + .../default/CONFIG_HAVE_ARCH_HUGE_VMAP | 1 + .../default/CONFIG_HAVE_ARCH_JUMP_LABEL | 1 + .../CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE | 1 + .../default/CONFIG_HAVE_ARCH_KASAN | 1 + .../default/CONFIG_HAVE_ARCH_KASAN_VMALLOC | 1 + .../default/CONFIG_HAVE_ARCH_KFENCE | 1 + .../L2-OPTIONAL/default/CONFIG_HAVE_ARCH_KGDB | 1 + .../default/CONFIG_HAVE_ARCH_MMAP_RND_BITS | 1 + .../CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS | 1 + .../CONFIG_HAVE_ARCH_PREL32_RELOCATIONS | 1 + .../CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET | 1 + .../default/CONFIG_HAVE_ARCH_SECCOMP | 1 + .../default/CONFIG_HAVE_ARCH_SECCOMP_FILTER | 1 + .../default/CONFIG_HAVE_ARCH_STACKLEAK | 1 + .../CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST | 1 + .../default/CONFIG_HAVE_ARCH_TRACEHOOK | 1 + .../CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE | 1 + .../CONFIG_HAVE_ARCH_USERFAULTFD_MINOR | 1 + .../default/CONFIG_HAVE_ARCH_VMAP_STACK | 1 + .../default/CONFIG_HAVE_ASM_MODVERSIONS | 1 + .../L2-OPTIONAL/default/CONFIG_HAVE_CLK | 1 + .../default/CONFIG_HAVE_CLK_PREPARE | 1 + .../default/CONFIG_HAVE_CMPXCHG_DOUBLE | 1 + .../default/CONFIG_HAVE_CMPXCHG_LOCAL | 1 + .../default/CONFIG_HAVE_CONTEXT_TRACKING_USER | 1 + .../default/CONFIG_HAVE_C_RECORDMCOUNT | 1 + .../default/CONFIG_HAVE_DEBUG_KMEMLEAK | 1 + .../default/CONFIG_HAVE_DMA_CONTIGUOUS | 1 + .../default/CONFIG_HAVE_DYNAMIC_FTRACE | 1 + .../CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS | 1 + ...NFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS | 1 + .../L2-OPTIONAL/default/CONFIG_HAVE_EBPF_JIT | 1 + .../CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS | 1 + .../L2-OPTIONAL/default/CONFIG_HAVE_FAST_GUP | 1 + .../default/CONFIG_HAVE_FTRACE_MCOUNT_RECORD | 1 + .../CONFIG_HAVE_FUNCTION_ARG_ACCESS_API | 1 + .../CONFIG_HAVE_FUNCTION_ERROR_INJECTION | 1 + .../default/CONFIG_HAVE_FUNCTION_GRAPH_RETVAL | 1 + .../default/CONFIG_HAVE_FUNCTION_GRAPH_TRACER | 1 + .../default/CONFIG_HAVE_FUNCTION_TRACER | 1 + .../default/CONFIG_HAVE_GCC_PLUGINS | 1 + .../default/CONFIG_HAVE_GENERIC_VDSO | 1 + .../CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY | 1 + .../CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF | 1 + .../default/CONFIG_HAVE_HW_BREAKPOINT | 1 + .../L2-OPTIONAL/default/CONFIG_HAVE_IMA_KEXEC | 1 + .../default/CONFIG_HAVE_IOREMAP_PROT | 1 + .../default/CONFIG_HAVE_IRQ_TIME_ACCOUNTING | 1 + .../default/CONFIG_HAVE_KCSAN_COMPILER | 1 + .../L2-OPTIONAL/default/CONFIG_HAVE_KPROBES | 1 + .../default/CONFIG_HAVE_KRETPROBES | 1 + .../L2-OPTIONAL/default/CONFIG_HAVE_KVM | 1 + .../CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT | 1 + .../default/CONFIG_HAVE_KVM_DIRTY_RING | 1 + .../CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL | 1 + .../default/CONFIG_HAVE_KVM_EVENTFD | 1 + .../default/CONFIG_HAVE_KVM_IRQCHIP | 1 + .../L2-OPTIONAL/default/CONFIG_HAVE_KVM_IRQFD | 1 + .../default/CONFIG_HAVE_KVM_IRQ_BYPASS | 1 + .../default/CONFIG_HAVE_KVM_IRQ_ROUTING | 1 + .../L2-OPTIONAL/default/CONFIG_HAVE_KVM_MSI | 1 + .../default/CONFIG_HAVE_MOD_ARCH_SPECIFIC | 1 + .../L2-OPTIONAL/default/CONFIG_HAVE_MOVE_PMD | 1 + .../L2-OPTIONAL/default/CONFIG_HAVE_MOVE_PUD | 1 + .../L2-OPTIONAL/default/CONFIG_HAVE_NMI | 1 + .../L2-OPTIONAL/default/CONFIG_HAVE_PCI | 1 + .../default/CONFIG_HAVE_PERF_EVENTS | 1 + .../default/CONFIG_HAVE_PERF_EVENTS_NMI | 1 + .../L2-OPTIONAL/default/CONFIG_HAVE_PERF_REGS | 1 + .../default/CONFIG_HAVE_PERF_USER_STACK_DUMP | 1 + .../CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK | 1 + .../default/CONFIG_HAVE_PREEMPT_DYNAMIC | 1 + .../CONFIG_HAVE_REGS_AND_STACK_ACCESS_API | 1 + .../L2-OPTIONAL/default/CONFIG_HAVE_RSEQ | 1 + .../default/CONFIG_HAVE_SAMPLE_FTRACE_DIRECT | 1 + .../CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI | 1 + .../default/CONFIG_HAVE_SCHED_AVG_IRQ | 1 + .../default/CONFIG_HAVE_SETUP_PER_CPU_AREA | 1 + .../default/CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK | 1 + .../default/CONFIG_HAVE_STACKPROTECTOR | 1 + .../default/CONFIG_HAVE_SYSCALL_TRACEPOINTS | 1 + .../L2-OPTIONAL/default/CONFIG_HAVE_UID16 | 1 + .../CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN | 1 + .../configs/L2-OPTIONAL/default/CONFIG_HDLC | 1 + .../L2-OPTIONAL/default/CONFIG_HDLC_CISCO | 1 + .../L2-OPTIONAL/default/CONFIG_HDLC_FR | 1 + .../L2-OPTIONAL/default/CONFIG_HDLC_PPP | 1 + .../L2-OPTIONAL/default/CONFIG_HDLC_RAW | 1 + .../L2-OPTIONAL/default/CONFIG_HDLC_RAW_ETH | 1 + .../L2-OPTIONAL/default/CONFIG_HFSPLUS_FS | 1 + .../configs/L2-OPTIONAL/default/CONFIG_HFS_FS | 1 + .../configs/L2-OPTIONAL/default/CONFIG_HIDRAW | 1 + .../L2-OPTIONAL/default/CONFIG_HID_A4TECH | 1 + .../L2-OPTIONAL/default/CONFIG_HID_ACCUTOUCH | 1 + .../L2-OPTIONAL/default/CONFIG_HID_ACRUX | 1 + .../L2-OPTIONAL/default/CONFIG_HID_ACRUX_FF | 1 + .../L2-OPTIONAL/default/CONFIG_HID_APPLE | 1 + .../L2-OPTIONAL/default/CONFIG_HID_APPLEIR | 1 + .../L2-OPTIONAL/default/CONFIG_HID_AUREAL | 1 + .../default/CONFIG_HID_BATTERY_STRENGTH | 1 + .../L2-OPTIONAL/default/CONFIG_HID_BELKIN | 1 + .../L2-OPTIONAL/default/CONFIG_HID_BETOP_FF | 1 + .../L2-OPTIONAL/default/CONFIG_HID_BIGBEN_FF | 1 + .../L2-OPTIONAL/default/CONFIG_HID_BPF | 1 + .../L2-OPTIONAL/default/CONFIG_HID_CHERRY | 1 + .../L2-OPTIONAL/default/CONFIG_HID_CHICONY | 1 + .../L2-OPTIONAL/default/CONFIG_HID_CORSAIR | 1 + .../L2-OPTIONAL/default/CONFIG_HID_COUGAR | 1 + .../L2-OPTIONAL/default/CONFIG_HID_CP2112 | 1 + .../default/CONFIG_HID_CREATIVE_SB0540 | 1 + .../L2-OPTIONAL/default/CONFIG_HID_CYPRESS | 1 + .../L2-OPTIONAL/default/CONFIG_HID_DRAGONRISE | 1 + .../L2-OPTIONAL/default/CONFIG_HID_ELAN | 1 + .../L2-OPTIONAL/default/CONFIG_HID_ELECOM | 1 + .../L2-OPTIONAL/default/CONFIG_HID_ELO | 1 + .../L2-OPTIONAL/default/CONFIG_HID_EMS_FF | 1 + .../L2-OPTIONAL/default/CONFIG_HID_EVISION | 1 + .../L2-OPTIONAL/default/CONFIG_HID_EZKEY | 1 + .../L2-OPTIONAL/default/CONFIG_HID_FT260 | 1 + .../L2-OPTIONAL/default/CONFIG_HID_GEMBIRD | 1 + .../L2-OPTIONAL/default/CONFIG_HID_GENERIC | 1 + .../L2-OPTIONAL/default/CONFIG_HID_GFRM | 1 + .../L2-OPTIONAL/default/CONFIG_HID_GLORIOUS | 1 + .../default/CONFIG_HID_GOOGLE_STADIA_FF | 1 + .../L2-OPTIONAL/default/CONFIG_HID_GREENASIA | 1 + .../L2-OPTIONAL/default/CONFIG_HID_GT683R | 1 + .../L2-OPTIONAL/default/CONFIG_HID_GYRATION | 1 + .../L2-OPTIONAL/default/CONFIG_HID_HOLTEK | 1 + .../L2-OPTIONAL/default/CONFIG_HID_ICADE | 1 + .../L2-OPTIONAL/default/CONFIG_HID_ITE | 1 + .../L2-OPTIONAL/default/CONFIG_HID_JABRA | 1 + .../L2-OPTIONAL/default/CONFIG_HID_KENSINGTON | 1 + .../L2-OPTIONAL/default/CONFIG_HID_KEYTOUCH | 1 + .../L2-OPTIONAL/default/CONFIG_HID_KYE | 1 + .../L2-OPTIONAL/default/CONFIG_HID_LCPOWER | 1 + .../L2-OPTIONAL/default/CONFIG_HID_LED | 1 + .../L2-OPTIONAL/default/CONFIG_HID_LENOVO | 1 + .../L2-OPTIONAL/default/CONFIG_HID_LETSKETCH | 1 + .../L2-OPTIONAL/default/CONFIG_HID_LOGITECH | 1 + .../default/CONFIG_HID_LOGITECH_DJ | 1 + .../default/CONFIG_HID_LOGITECH_HIDPP | 1 + .../L2-OPTIONAL/default/CONFIG_HID_MACALLY | 1 + .../L2-OPTIONAL/default/CONFIG_HID_MAGICMOUSE | 1 + .../L2-OPTIONAL/default/CONFIG_HID_MALTRON | 1 + .../L2-OPTIONAL/default/CONFIG_HID_MAYFLASH | 1 + .../L2-OPTIONAL/default/CONFIG_HID_MCP2221 | 1 + .../default/CONFIG_HID_MEGAWORLD_FF | 1 + .../L2-OPTIONAL/default/CONFIG_HID_MICROSOFT | 1 + .../L2-OPTIONAL/default/CONFIG_HID_MONTEREY | 1 + .../L2-OPTIONAL/default/CONFIG_HID_MULTITOUCH | 1 + .../L2-OPTIONAL/default/CONFIG_HID_NINTENDO | 1 + .../L2-OPTIONAL/default/CONFIG_HID_NTI | 1 + .../L2-OPTIONAL/default/CONFIG_HID_NTRIG | 1 + .../L2-OPTIONAL/default/CONFIG_HID_ORTEK | 1 + .../default/CONFIG_HID_PANTHERLORD | 1 + .../L2-OPTIONAL/default/CONFIG_HID_PENMOUNT | 1 + .../L2-OPTIONAL/default/CONFIG_HID_PETALYNX | 1 + .../L2-OPTIONAL/default/CONFIG_HID_PICOLCD | 1 + .../default/CONFIG_HID_PICOLCD_BACKLIGHT | 1 + .../L2-OPTIONAL/default/CONFIG_HID_PICOLCD_FB | 1 + .../default/CONFIG_HID_PICOLCD_LCD | 1 + .../default/CONFIG_HID_PICOLCD_LEDS | 1 + .../L2-OPTIONAL/default/CONFIG_HID_PID | 1 + .../default/CONFIG_HID_PLANTRONICS | 1 + .../L2-OPTIONAL/default/CONFIG_HID_PRIMAX | 1 + .../L2-OPTIONAL/default/CONFIG_HID_PXRC | 1 + .../L2-OPTIONAL/default/CONFIG_HID_RAZER | 1 + .../L2-OPTIONAL/default/CONFIG_HID_REDRAGON | 1 + .../L2-OPTIONAL/default/CONFIG_HID_RETRODE | 1 + .../L2-OPTIONAL/default/CONFIG_HID_RMI | 1 + .../L2-OPTIONAL/default/CONFIG_HID_ROCCAT | 1 + .../L2-OPTIONAL/default/CONFIG_HID_SAITEK | 1 + .../L2-OPTIONAL/default/CONFIG_HID_SAMSUNG | 1 + .../L2-OPTIONAL/default/CONFIG_HID_SEMITEK | 1 + .../L2-OPTIONAL/default/CONFIG_HID_SIGMAMICRO | 1 + .../default/CONFIG_HID_SMARTJOYPLUS | 1 + .../L2-OPTIONAL/default/CONFIG_HID_SONY | 1 + .../L2-OPTIONAL/default/CONFIG_HID_SPEEDLINK | 1 + .../L2-OPTIONAL/default/CONFIG_HID_STEAM | 1 + .../default/CONFIG_HID_STEELSERIES | 1 + .../L2-OPTIONAL/default/CONFIG_HID_SUNPLUS | 1 + .../L2-OPTIONAL/default/CONFIG_HID_THINGM | 1 + .../default/CONFIG_HID_THRUSTMASTER | 1 + .../L2-OPTIONAL/default/CONFIG_HID_TIVO | 1 + .../L2-OPTIONAL/default/CONFIG_HID_TOPRE | 1 + .../L2-OPTIONAL/default/CONFIG_HID_TOPSEED | 1 + .../L2-OPTIONAL/default/CONFIG_HID_TWINHAN | 1 + .../L2-OPTIONAL/default/CONFIG_HID_U2FZERO | 1 + .../L2-OPTIONAL/default/CONFIG_HID_UCLOGIC | 1 + .../L2-OPTIONAL/default/CONFIG_HID_UDRAW_PS3 | 1 + .../L2-OPTIONAL/default/CONFIG_HID_VIEWSONIC | 1 + .../L2-OPTIONAL/default/CONFIG_HID_VIVALDI | 1 + .../L2-OPTIONAL/default/CONFIG_HID_VRC2 | 1 + .../L2-OPTIONAL/default/CONFIG_HID_WACOM | 1 + .../L2-OPTIONAL/default/CONFIG_HID_WALTOP | 1 + .../L2-OPTIONAL/default/CONFIG_HID_WIIMOTE | 1 + .../L2-OPTIONAL/default/CONFIG_HID_XIAOMI | 1 + .../L2-OPTIONAL/default/CONFIG_HID_XINMO | 1 + .../L2-OPTIONAL/default/CONFIG_HID_ZEROPLUS | 1 + .../L2-OPTIONAL/default/CONFIG_HID_ZYDACRON | 1 + .../configs/L2-OPTIONAL/default/CONFIG_HIPPI | 1 + .../default/CONFIG_HIST_TRIGGERS_DEBUG | 1 + .../L2-OPTIONAL/default/CONFIG_HMC6352 | 1 + .../L2-OPTIONAL/default/CONFIG_HMEM_REPORTING | 1 + .../L2-OPTIONAL/default/CONFIG_HMM_MIRROR | 1 + .../L2-OPTIONAL/default/CONFIG_HOLTEK_FF | 1 + .../default/CONFIG_HOTPLUG_CORE_SYNC | 1 + .../default/CONFIG_HOTPLUG_CORE_SYNC_DEAD | 1 + .../default/CONFIG_HOTPLUG_PCI_ACPI_IBM | 1 + .../default/CONFIG_HOTPLUG_PCI_CPCI | 1 + .../L2-OPTIONAL/default/CONFIG_HPFS_FS | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_HSI | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_HSR | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_HTE | 1 + .../L2-OPTIONAL/default/CONFIG_HVC_DRIVER | 1 + .../default/CONFIG_HWMON_DEBUG_CHIP | 1 + .../L2-OPTIONAL/default/CONFIG_HWSPINLOCK | 1 + .../L2-OPTIONAL/default/CONFIG_HW_CONSOLE | 1 + .../default/CONFIG_HW_RANDOM_BA431 | 1 + .../default/CONFIG_HW_RANDOM_TIMERIOMEM | 1 + .../default/CONFIG_HW_RANDOM_XIPHERA | 1 + .../L2-OPTIONAL/default/CONFIG_I2C_ALGOBIT | 1 + .../L2-OPTIONAL/default/CONFIG_I2C_ALGOPCA | 1 + .../L2-OPTIONAL/default/CONFIG_I2C_ALI1535 | 1 + .../L2-OPTIONAL/default/CONFIG_I2C_ALI1563 | 1 + .../L2-OPTIONAL/default/CONFIG_I2C_ALI15X3 | 1 + .../L2-OPTIONAL/default/CONFIG_I2C_AMD_MP2 | 1 + .../L2-OPTIONAL/default/CONFIG_I2C_BOARDINFO | 1 + .../L2-OPTIONAL/default/CONFIG_I2C_CBUS_GPIO | 1 + .../L2-OPTIONAL/default/CONFIG_I2C_COMPAT | 1 + .../L2-OPTIONAL/default/CONFIG_I2C_CP2615 | 1 + .../L2-OPTIONAL/default/CONFIG_I2C_DEBUG_ALGO | 1 + .../L2-OPTIONAL/default/CONFIG_I2C_DEBUG_BUS | 1 + .../L2-OPTIONAL/default/CONFIG_I2C_DEBUG_CORE | 1 + .../default/CONFIG_I2C_DESIGNWARE_CORE | 1 + .../default/CONFIG_I2C_DESIGNWARE_PCI | 1 + .../default/CONFIG_I2C_DESIGNWARE_PLATFORM | 1 + .../default/CONFIG_I2C_DESIGNWARE_SLAVE | 1 + .../L2-OPTIONAL/default/CONFIG_I2C_DIOLAN_U2C | 1 + .../L2-OPTIONAL/default/CONFIG_I2C_EMEV2 | 1 + .../L2-OPTIONAL/default/CONFIG_I2C_HID | 1 + .../L2-OPTIONAL/default/CONFIG_I2C_HID_ACPI | 1 + .../L2-OPTIONAL/default/CONFIG_I2C_HID_OF | 1 + .../default/CONFIG_I2C_MUX_LTC4306 | 1 + .../default/CONFIG_I2C_MUX_MLXCPLD | 1 + .../L2-OPTIONAL/default/CONFIG_I2C_MUX_REG | 1 + .../L2-OPTIONAL/default/CONFIG_I2C_NFORCE2 | 1 + .../L2-OPTIONAL/default/CONFIG_I2C_NVIDIA_GPU | 1 + .../L2-OPTIONAL/default/CONFIG_I2C_OCORES | 1 + .../default/CONFIG_I2C_PCA_PLATFORM | 1 + .../L2-OPTIONAL/default/CONFIG_I2C_PCI1XXXX | 1 + .../default/CONFIG_I2C_ROBOTFUZZ_OSIF | 1 + .../L2-OPTIONAL/default/CONFIG_I2C_SIMTEC | 1 + .../L2-OPTIONAL/default/CONFIG_I2C_SIS5595 | 1 + .../L2-OPTIONAL/default/CONFIG_I2C_SIS630 | 1 + .../L2-OPTIONAL/default/CONFIG_I2C_STUB | 1 + .../L2-OPTIONAL/default/CONFIG_I2C_TAOS_EVM | 1 + .../L2-OPTIONAL/default/CONFIG_I2C_TINY_USB | 1 + .../L2-OPTIONAL/default/CONFIG_I2C_VIRTIO | 1 + .../L2-OPTIONAL/default/CONFIG_I2C_XILINX | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_I3C | 1 + .../configs/L2-OPTIONAL/default/CONFIG_IAVF | 1 + .../L2-OPTIONAL/default/CONFIG_ICE_SWITCHDEV | 1 + .../L2-OPTIONAL/default/CONFIG_ICPLUS_PHY | 1 + .../L2-OPTIONAL/default/CONFIG_ICS932S401 | 1 + .../L2-OPTIONAL/default/CONFIG_IEEE802154 | 1 + .../default/CONFIG_IEEE802154_6LOWPAN | 1 + .../default/CONFIG_IEEE802154_ADF7242 | 1 + .../default/CONFIG_IEEE802154_AT86RF230 | 1 + .../default/CONFIG_IEEE802154_ATUSB | 1 + .../default/CONFIG_IEEE802154_CA8210 | 1 + .../default/CONFIG_IEEE802154_CC2520 | 1 + .../default/CONFIG_IEEE802154_DRIVERS | 1 + .../default/CONFIG_IEEE802154_HWSIM | 1 + .../default/CONFIG_IEEE802154_MCR20A | 1 + .../default/CONFIG_IEEE802154_MRF24J40 | 1 + .../CONFIG_IEEE802154_NL802154_EXPERIMENTAL | 1 + .../default/CONFIG_IEEE802154_SOCKET | 1 + .../default/CONFIG_IMA_DISABLE_HTABLE | 1 + .../L2-OPTIONAL/default/CONFIG_IMA_KEXEC | 1 + .../L2-OPTIONAL/default/CONFIG_INET_SCTP_DIAG | 1 + .../default/CONFIG_INET_TABLE_PERTURB_ORDER | 1 + .../L2-OPTIONAL/default/CONFIG_INET_TUNNEL | 1 + .../default/CONFIG_INET_XFRM_TUNNEL | 1 + .../CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS | 1 + .../default/CONFIG_INFINIBAND_BNXT_RE | 1 + .../default/CONFIG_INFINIBAND_CXGB4 | 1 + .../L2-OPTIONAL/default/CONFIG_INFINIBAND_EFA | 1 + .../CONFIG_INFINIBAND_IPOIB_DEBUG_DATA | 1 + .../default/CONFIG_INFINIBAND_IRDMA | 1 + .../default/CONFIG_INFINIBAND_OCRDMA | 1 + .../default/CONFIG_INFINIBAND_QEDR | 1 + .../default/CONFIG_INFINIBAND_USER_MEM | 1 + .../default/CONFIG_INFINIBAND_VIRT_DMA | 1 + .../configs/L2-OPTIONAL/default/CONFIG_INFTL | 1 + .../default/CONFIG_INITRAMFS_PRESERVE_MTIME | 1 + .../default/CONFIG_INIT_ENV_ARG_LIMIT | 1 + .../default/CONFIG_INIT_STACK_ALL_PATTERN | 1 + .../default/CONFIG_INIT_STACK_ALL_ZERO | 1 + .../default/CONFIG_INIT_STACK_NONE | 1 + .../L2-OPTIONAL/default/CONFIG_INPUT_EVBUG | 1 + .../default/CONFIG_INPUT_FF_MEMLESS | 1 + .../L2-OPTIONAL/default/CONFIG_INPUT_JOYSTICK | 1 + .../L2-OPTIONAL/default/CONFIG_INPUT_LEDS | 1 + .../default/CONFIG_INPUT_MATRIXKMAP | 1 + .../default/CONFIG_INPUT_MOUSEDEV_PSAUX | 1 + .../default/CONFIG_INPUT_MOUSEDEV_SCREEN_X | 1 + .../default/CONFIG_INPUT_MOUSEDEV_SCREEN_Y | 1 + .../default/CONFIG_INPUT_SPARSEKMAP | 1 + .../default/CONFIG_INTEGRITY_MACHINE_KEYRING | 1 + .../L2-OPTIONAL/default/CONFIG_INTEL_XWAY_PHY | 1 + .../L2-OPTIONAL/default/CONFIG_INTERCONNECT | 1 + .../L2-OPTIONAL/default/CONFIG_INTERVAL_TREE | 1 + .../default/CONFIG_INTERVAL_TREE_TEST | 1 + .../L2-OPTIONAL/default/CONFIG_IOMMU_SVA | 1 + .../configs/L2-OPTIONAL/default/CONFIG_IONIC | 1 + .../configs/L2-OPTIONAL/default/CONFIG_IO_WQ | 1 + .../L2-OPTIONAL/default/CONFIG_IP5XXX_POWER | 1 + .../L2-OPTIONAL/default/CONFIG_IPACK_BUS | 1 + .../default/CONFIG_IPV6_IOAM6_LWTUNNEL | 1 + .../L2-OPTIONAL/default/CONFIG_IPVLAN_L3S | 1 + .../default/CONFIG_IP_MROUTE_COMMON | 1 + .../configs/L2-OPTIONAL/default/CONFIG_IP_PNP | 1 + .../default/CONFIG_IP_ROUTE_CLASSID | 1 + .../L2-OPTIONAL/default/CONFIG_IP_SCTP | 1 + .../L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_AH | 1 + .../default/CONFIG_IP_VS_PROTO_AH_ESP | 1 + .../default/CONFIG_IP_VS_PROTO_ESP | 1 + .../default/CONFIG_IP_VS_PROTO_SCTP | 1 + .../default/CONFIG_IP_VS_PROTO_TCP | 1 + .../default/CONFIG_IP_VS_PROTO_UDP | 1 + .../L2-OPTIONAL/default/CONFIG_IP_VS_TWOS | 1 + .../L2-OPTIONAL/default/CONFIG_IRQSOFF_TRACER | 1 + .../L2-OPTIONAL/default/CONFIG_IRQ_DOMAIN | 1 + .../default/CONFIG_IRQ_DOMAIN_HIERARCHY | 1 + .../default/CONFIG_IRQ_FORCED_THREADING | 1 + .../L2-OPTIONAL/default/CONFIG_IRQ_MSI_IOMMU | 1 + .../L2-OPTIONAL/default/CONFIG_IRQ_POLL | 1 + .../L2-OPTIONAL/default/CONFIG_IRQ_WORK | 1 + .../default/CONFIG_ISCSI_TARGET_CXGB4 | 1 + .../L2-OPTIONAL/default/CONFIG_JFFS2_FS | 1 + .../configs/L2-OPTIONAL/default/CONFIG_JFS_FS | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_JME | 1 + .../default/CONFIG_KALLSYMS_BASE_RELATIVE | 1 + .../default/CONFIG_KALLSYMS_SELFTEST | 1 + .../default/CONFIG_KEYBOARD_ADP5588 | 1 + .../default/CONFIG_KEYBOARD_ADP5589 | 1 + .../default/CONFIG_KEYBOARD_CYPRESS_SF | 1 + .../default/CONFIG_KEYBOARD_DLINK_DIR685 | 1 + .../default/CONFIG_KEYBOARD_GPIO_POLLED | 1 + .../L2-OPTIONAL/default/CONFIG_KEYBOARD_LKKBD | 1 + .../default/CONFIG_KEYBOARD_LM8323 | 1 + .../default/CONFIG_KEYBOARD_LM8333 | 1 + .../default/CONFIG_KEYBOARD_MATRIX | 1 + .../default/CONFIG_KEYBOARD_MAX7359 | 1 + .../L2-OPTIONAL/default/CONFIG_KEYBOARD_MCS | 1 + .../default/CONFIG_KEYBOARD_MPR121 | 1 + .../default/CONFIG_KEYBOARD_NEWTON | 1 + .../default/CONFIG_KEYBOARD_OPENCORES | 1 + .../default/CONFIG_KEYBOARD_QT1050 | 1 + .../default/CONFIG_KEYBOARD_QT1070 | 1 + .../default/CONFIG_KEYBOARD_QT2160 | 1 + .../default/CONFIG_KEYBOARD_SAMSUNG | 1 + .../default/CONFIG_KEYBOARD_STOWAWAY | 1 + .../default/CONFIG_KEYBOARD_SUNKBD | 1 + .../default/CONFIG_KEYBOARD_TCA6416 | 1 + .../default/CONFIG_KEYBOARD_TCA8418 | 1 + .../default/CONFIG_KEYBOARD_TM2_TOUCHKEY | 1 + .../L2-OPTIONAL/default/CONFIG_KEYBOARD_XTKBD | 1 + .../default/CONFIG_KPROBE_EVENT_GEN_TEST | 1 + .../configs/L2-OPTIONAL/default/CONFIG_KUNIT | 1 + .../CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT | 1 + .../CONFIG_KVM_GENERIC_HARDWARE_ENABLING | 1 + .../L2-OPTIONAL/default/CONFIG_KVM_MMIO | 1 + .../L2-OPTIONAL/default/CONFIG_KVM_VFIO | 1 + .../default/CONFIG_KVM_XFER_TO_GUEST_WORK | 1 + .../configs/L2-OPTIONAL/default/CONFIG_L2TP | 1 + .../L2-OPTIONAL/default/CONFIG_L2TP_DEBUGFS | 1 + .../L2-OPTIONAL/default/CONFIG_L2TP_ETH | 1 + .../L2-OPTIONAL/default/CONFIG_L2TP_IP | 1 + .../L2-OPTIONAL/default/CONFIG_L2TP_V3 | 1 + .../configs/L2-OPTIONAL/default/CONFIG_LAPB | 1 + .../L2-OPTIONAL/default/CONFIG_LATENCYTOP | 1 + .../default/CONFIG_LATTICE_ECP3_CONFIG | 1 + .../L2-OPTIONAL/default/CONFIG_LCD_AMS369FG06 | 1 + .../default/CONFIG_LCD_CLASS_DEVICE | 1 + .../L2-OPTIONAL/default/CONFIG_LCD_HX8357 | 1 + .../L2-OPTIONAL/default/CONFIG_LCD_ILI922X | 1 + .../L2-OPTIONAL/default/CONFIG_LCD_ILI9320 | 1 + .../default/CONFIG_LCD_L4F00242T03 | 1 + .../L2-OPTIONAL/default/CONFIG_LCD_LMS283GF05 | 1 + .../L2-OPTIONAL/default/CONFIG_LCD_LMS501KF03 | 1 + .../L2-OPTIONAL/default/CONFIG_LCD_LTV350QV | 1 + .../L2-OPTIONAL/default/CONFIG_LCD_OTM3225A | 1 + .../L2-OPTIONAL/default/CONFIG_LCD_PLATFORM | 1 + .../L2-OPTIONAL/default/CONFIG_LCD_TDO24M | 1 + .../L2-OPTIONAL/default/CONFIG_LCD_VGG2432A4 | 1 + .../L2-OPTIONAL/default/CONFIG_LDM_PARTITION | 1 + .../L2-OPTIONAL/default/CONFIG_LD_IS_BFD | 1 + .../L2-OPTIONAL/default/CONFIG_LD_ORPHAN_WARN | 1 + .../default/CONFIG_LD_ORPHAN_WARN_LEVEL | 1 + .../L2-OPTIONAL/default/CONFIG_LD_VERSION | 1 + .../L2-OPTIONAL/default/CONFIG_LEDS_AW200XX | 1 + .../L2-OPTIONAL/default/CONFIG_LEDS_BD2606MVV | 1 + .../L2-OPTIONAL/default/CONFIG_LEDS_BD2802 | 1 + .../L2-OPTIONAL/default/CONFIG_LEDS_BLINKM | 1 + .../default/CONFIG_LEDS_BRIGHTNESS_HW_CHANGED | 1 + .../L2-OPTIONAL/default/CONFIG_LEDS_CLASS | 1 + .../default/CONFIG_LEDS_CLASS_MULTICOLOR | 1 + .../default/CONFIG_LEDS_DAC124S085 | 1 + .../L2-OPTIONAL/default/CONFIG_LEDS_GPIO | 1 + .../default/CONFIG_LEDS_IS31FL319X | 1 + .../L2-OPTIONAL/default/CONFIG_LEDS_LM3530 | 1 + .../L2-OPTIONAL/default/CONFIG_LEDS_LM3532 | 1 + .../L2-OPTIONAL/default/CONFIG_LEDS_LM355x | 1 + .../L2-OPTIONAL/default/CONFIG_LEDS_LM3642 | 1 + .../L2-OPTIONAL/default/CONFIG_LEDS_LP3944 | 1 + .../L2-OPTIONAL/default/CONFIG_LEDS_LP3952 | 1 + .../L2-OPTIONAL/default/CONFIG_LEDS_LP50XX | 1 + .../L2-OPTIONAL/default/CONFIG_LEDS_MLXREG | 1 + .../L2-OPTIONAL/default/CONFIG_LEDS_PCA9532 | 1 + .../L2-OPTIONAL/default/CONFIG_LEDS_PCA955X | 1 + .../L2-OPTIONAL/default/CONFIG_LEDS_PCA963X | 1 + .../L2-OPTIONAL/default/CONFIG_LEDS_PCA995X | 1 + .../L2-OPTIONAL/default/CONFIG_LEDS_PWM | 1 + .../L2-OPTIONAL/default/CONFIG_LEDS_TCA6507 | 1 + .../L2-OPTIONAL/default/CONFIG_LEDS_TLC591XX | 1 + .../L2-OPTIONAL/default/CONFIG_LEDS_TRIGGERS | 1 + .../default/CONFIG_LEDS_TRIGGER_ACTIVITY | 1 + .../default/CONFIG_LEDS_TRIGGER_BACKLIGHT | 1 + .../default/CONFIG_LEDS_TRIGGER_CAMERA | 1 + .../default/CONFIG_LEDS_TRIGGER_CPU | 1 + .../default/CONFIG_LEDS_TRIGGER_DEFAULT_ON | 1 + .../default/CONFIG_LEDS_TRIGGER_HEARTBEAT | 1 + .../default/CONFIG_LEDS_TRIGGER_MTD | 1 + .../default/CONFIG_LEDS_TRIGGER_NETDEV | 1 + .../default/CONFIG_LEDS_TRIGGER_ONESHOT | 1 + .../default/CONFIG_LEDS_TRIGGER_PANIC | 1 + .../default/CONFIG_LEDS_TRIGGER_PATTERN | 1 + .../default/CONFIG_LEDS_TRIGGER_TIMER | 1 + .../default/CONFIG_LEDS_TRIGGER_TRANSIENT | 1 + .../default/CONFIG_LEDS_TRIGGER_TTY | 1 + .../L2-OPTIONAL/default/CONFIG_LEDS_USER | 1 + .../default/CONFIG_LED_TRIGGER_PHY | 1 + .../default/CONFIG_LEGACY_DIRECT_IO | 1 + .../L2-OPTIONAL/default/CONFIG_LEGACY_PTYS | 1 + .../L2-OPTIONAL/default/CONFIG_LEGACY_TIOCSTI | 1 + .../L2-OPTIONAL/default/CONFIG_LIBCRC32C | 1 + .../configs/L2-OPTIONAL/default/CONFIG_LIBWX | 1 + .../L2-OPTIONAL/default/CONFIG_LIQUIDIO | 1 + .../L2-OPTIONAL/default/CONFIG_LIQUIDIO_CORE | 1 + .../L2-OPTIONAL/default/CONFIG_LIQUIDIO_VF | 1 + .../configs/L2-OPTIONAL/default/CONFIG_LKDTM | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_LLC | 1 + .../configs/L2-OPTIONAL/default/CONFIG_LLC2 | 1 + .../L2-OPTIONAL/default/CONFIG_LLD_VERSION | 1 + .../L2-OPTIONAL/default/CONFIG_LMK04832 | 1 + .../default/CONFIG_LOCK_DEBUGGING_SUPPORT | 1 + .../default/CONFIG_LOCK_MM_AND_FIND_VMA | 1 + .../default/CONFIG_LOCK_SPIN_ON_OWNER | 1 + .../L2-OPTIONAL/default/CONFIG_LOCK_STAT | 1 + .../default/CONFIG_LOCK_TORTURE_TEST | 1 + .../L2-OPTIONAL/default/CONFIG_LOGIG940_FF | 1 + .../default/CONFIG_LOGIRUMBLEPAD2_FF | 1 + .../L2-OPTIONAL/default/CONFIG_LOGITECH_FF | 1 + .../L2-OPTIONAL/default/CONFIG_LOGIWHEELS_FF | 1 + .../configs/L2-OPTIONAL/default/CONFIG_LOGO | 1 + .../default/CONFIG_LOGO_LINUX_CLUT224 | 1 + .../default/CONFIG_LOGO_LINUX_MONO | 1 + .../default/CONFIG_LOGO_LINUX_VGA16 | 1 + .../default/CONFIG_LSI_ET1011C_PHY | 1 + .../L2-OPTIONAL/default/CONFIG_LXT_PHY | 1 + .../L2-OPTIONAL/default/CONFIG_LZ4HC_COMPRESS | 1 + .../L2-OPTIONAL/default/CONFIG_LZ4_COMPRESS | 1 + .../L2-OPTIONAL/default/CONFIG_LZ4_DECOMPRESS | 1 + .../L2-OPTIONAL/default/CONFIG_LZO_COMPRESS | 1 + .../L2-OPTIONAL/default/CONFIG_LZO_DECOMPRESS | 1 + .../L2-OPTIONAL/default/CONFIG_MAC80211 | 1 + .../default/CONFIG_MAC80211_DEBUGFS | 1 + .../default/CONFIG_MAC80211_DEBUG_MENU | 1 + .../default/CONFIG_MAC80211_HAS_RC | 1 + .../L2-OPTIONAL/default/CONFIG_MAC80211_LEDS | 1 + .../L2-OPTIONAL/default/CONFIG_MAC80211_MESH | 1 + .../default/CONFIG_MAC80211_MESSAGE_TRACING | 1 + .../default/CONFIG_MAC80211_RC_DEFAULT | 1 + .../CONFIG_MAC80211_RC_DEFAULT_MINSTREL | 1 + .../default/CONFIG_MAC80211_RC_MINSTREL | 1 + .../default/CONFIG_MAC80211_STA_HASH_MAX_SIZE | 1 + .../L2-OPTIONAL/default/CONFIG_MAC802154 | 1 + .../L2-OPTIONAL/default/CONFIG_MAILBOX | 1 + .../L2-OPTIONAL/default/CONFIG_MANAGER_SBS | 1 + .../default/CONFIG_MARVELL_10G_PHY | 1 + .../default/CONFIG_MARVELL_88Q2XXX_PHY | 1 + .../default/CONFIG_MARVELL_88X2222_PHY | 1 + .../L2-OPTIONAL/default/CONFIG_MARVELL_PHY | 1 + .../L2-OPTIONAL/default/CONFIG_MAX31827 | 1 + .../default/CONFIG_MAX63XX_WATCHDOG | 1 + .../L2-OPTIONAL/default/CONFIG_MAXLINEAR_GPHY | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_MCB | 1 + .../configs/L2-OPTIONAL/default/CONFIG_MCTP | 1 + .../configs/L2-OPTIONAL/default/CONFIG_MDIO | 1 + .../default/CONFIG_MDIO_BCM_UNIMAC | 1 + .../L2-OPTIONAL/default/CONFIG_MDIO_BITBANG | 1 + .../L2-OPTIONAL/default/CONFIG_MDIO_BUS | 1 + .../L2-OPTIONAL/default/CONFIG_MDIO_CAVIUM | 1 + .../L2-OPTIONAL/default/CONFIG_MDIO_DEVICE | 1 + .../L2-OPTIONAL/default/CONFIG_MDIO_DEVRES | 1 + .../L2-OPTIONAL/default/CONFIG_MDIO_I2C | 1 + .../L2-OPTIONAL/default/CONFIG_MDIO_MVUSB | 1 + .../L2-OPTIONAL/default/CONFIG_MDIO_THUNDER | 1 + .../L2-OPTIONAL/default/CONFIG_MD_BITMAP_FILE | 1 + .../default/CONFIG_MEDIATEK_GE_PHY | 1 + .../L2-OPTIONAL/default/CONFIG_MEMCG_KMEM | 1 + .../L2-OPTIONAL/default/CONFIG_MEMFD_CREATE | 1 + .../default/CONFIG_MEMORY_ISOLATION | 1 + .../L2-OPTIONAL/default/CONFIG_MEMREGION | 1 + .../L2-OPTIONAL/default/CONFIG_MEMSTICK | 1 + .../L2-OPTIONAL/default/CONFIG_MEMSTICK_DEBUG | 1 + .../default/CONFIG_MEMSTICK_JMICRON_38X | 1 + .../L2-OPTIONAL/default/CONFIG_MEMSTICK_R592 | 1 + .../default/CONFIG_MEMSTICK_TIFM_MS | 1 + .../default/CONFIG_MEMSTICK_UNSAFE_RESUME | 1 + .../L2-OPTIONAL/default/CONFIG_MEMTEST | 1 + .../L2-OPTIONAL/default/CONFIG_MEN_A21_WDT | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_88PM800 | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_88PM805 | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_88PM860X | 1 + .../default/CONFIG_MFD_AAT2870_CORE | 1 + .../default/CONFIG_MFD_ARIZONA_I2C | 1 + .../default/CONFIG_MFD_ARIZONA_SPI | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_AS3711 | 1 + .../default/CONFIG_MFD_ATC260X_I2C | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_AXP20X_I2C | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_BCM590XX | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_BD9571MWV | 1 + .../default/CONFIG_MFD_CS42L43_I2C | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_DA9052_I2C | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_DA9052_SPI | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_DA9055 | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_DA9062 | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_DA9063 | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_DA9150 | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_DLN2 | 1 + .../default/CONFIG_MFD_INTEL_M10_BMC_SPI | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_IQS62X | 1 + .../default/CONFIG_MFD_JANZ_CMODIO | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_KEMPLD | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_LM3533 | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_LP3943 | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_LP8788 | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_MADERA | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_MAX14577 | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_MAX77541 | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_MAX77693 | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_MAX77843 | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_MAX8907 | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_MAX8925 | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_MAX8997 | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_MAX8998 | 1 + .../default/CONFIG_MFD_MC13XXX_I2C | 1 + .../default/CONFIG_MFD_MC13XXX_SPI | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_MENF21BMC | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_MP2629 | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_MT6360 | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_MT6370 | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_MT6397 | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_OCELOT | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_PALMAS | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_PCF50633 | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_RC5T583 | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_RDC321X | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_RETU | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_RT4831 | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_RT5033 | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_RT5120 | 1 + .../default/CONFIG_MFD_SI476X_CORE | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_SKY81452 | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_SMPRO | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_SY7636A | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_TI_LMU | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_TI_LP873X | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_TPS65086 | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_TPS65090 | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_TPS6586X | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_TPS65910 | 1 + .../default/CONFIG_MFD_TPS65912_I2C | 1 + .../default/CONFIG_MFD_TPS65912_SPI | 1 + .../default/CONFIG_MFD_TPS6594_I2C | 1 + .../default/CONFIG_MFD_TPS6594_SPI | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_TQMX86 | 1 + .../default/CONFIG_MFD_WL1273_CORE | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_WM831X_I2C | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_WM831X_SPI | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_WM8350_I2C | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_WM8400 | 1 + .../L2-OPTIONAL/default/CONFIG_MFD_WM8994 | 1 + .../L2-OPTIONAL/default/CONFIG_MHI_BUS | 1 + .../L2-OPTIONAL/default/CONFIG_MHI_BUS_EP | 1 + .../default/CONFIG_MHP_MEMMAP_ON_MEMORY | 1 + .../L2-OPTIONAL/default/CONFIG_MICREL_PHY | 1 + .../L2-OPTIONAL/default/CONFIG_MICROCHIP_PHY | 1 + .../default/CONFIG_MICROCHIP_T1S_PHY | 1 + .../default/CONFIG_MICROCHIP_T1_PHY | 1 + .../L2-OPTIONAL/default/CONFIG_MICROSEMI_PHY | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_MII | 1 + .../L2-OPTIONAL/default/CONFIG_MINIX_FS | 1 + .../L2-OPTIONAL/default/CONFIG_MISC_ALCOR_PCI | 1 + .../default/CONFIG_MLX4_INFINIBAND | 1 + .../L2-OPTIONAL/default/CONFIG_MLX5_EN_IPSEC | 1 + .../L2-OPTIONAL/default/CONFIG_MLX5_EN_TLS | 1 + .../L2-OPTIONAL/default/CONFIG_MLX5_MACSEC | 1 + .../L2-OPTIONAL/default/CONFIG_MLX5_VFIO_PCI | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_MMC | 1 + .../L2-OPTIONAL/default/CONFIG_MMC_BLOCK | 1 + .../default/CONFIG_MMC_BLOCK_MINORS | 1 + .../L2-OPTIONAL/default/CONFIG_MMC_CB710 | 1 + .../L2-OPTIONAL/default/CONFIG_MMC_CQHCI | 1 + .../L2-OPTIONAL/default/CONFIG_MMC_DEBUG | 1 + .../L2-OPTIONAL/default/CONFIG_MMC_HSQ | 1 + .../L2-OPTIONAL/default/CONFIG_MMC_RICOH_MMC | 1 + .../L2-OPTIONAL/default/CONFIG_MMC_SDHCI | 1 + .../L2-OPTIONAL/default/CONFIG_MMC_SDHCI_ACPI | 1 + .../default/CONFIG_MMC_SDHCI_F_SDH30 | 1 + .../default/CONFIG_MMC_SDHCI_IO_ACCESSORS | 1 + .../L2-OPTIONAL/default/CONFIG_MMC_SDHCI_PCI | 1 + .../default/CONFIG_MMC_SDHCI_PLTFM | 1 + .../default/CONFIG_MMC_SDHCI_XENON | 1 + .../L2-OPTIONAL/default/CONFIG_MMC_SPI | 1 + .../L2-OPTIONAL/default/CONFIG_MMC_TEST | 1 + .../L2-OPTIONAL/default/CONFIG_MMC_TIFM_SD | 1 + .../L2-OPTIONAL/default/CONFIG_MMC_USDHI6ROL0 | 1 + .../L2-OPTIONAL/default/CONFIG_MMC_USHC | 1 + .../L2-OPTIONAL/default/CONFIG_MMC_VIA_SDMMC | 1 + .../L2-OPTIONAL/default/CONFIG_MMC_VUB300 | 1 + .../default/CONFIG_MMU_GATHER_RCU_TABLE_FREE | 1 + .../default/CONFIG_MMU_GATHER_TABLE_FREE | 1 + .../default/CONFIG_MMU_LAZY_TLB_REFCOUNT | 1 + .../L2-OPTIONAL/default/CONFIG_MMU_NOTIFIER | 1 + .../default/CONFIG_MODULES_TREE_LOOKUP | 1 + .../default/CONFIG_MODULES_USE_ELF_RELA | 1 + .../default/CONFIG_MODULE_ALLOW_BTF_MISMATCH | 1 + .../default/CONFIG_MODULE_COMPRESS_GZIP | 1 + .../default/CONFIG_MODULE_COMPRESS_NONE | 1 + .../default/CONFIG_MODULE_COMPRESS_XZ | 1 + .../default/CONFIG_MODULE_COMPRESS_ZSTD | 1 + .../L2-OPTIONAL/default/CONFIG_MODULE_DEBUG | 1 + .../default/CONFIG_MODULE_SIG_FORMAT | 1 + .../default/CONFIG_MODULE_SIG_KEY_TYPE_ECDSA | 1 + .../default/CONFIG_MODULE_SIG_KEY_TYPE_RSA | 1 + .../CONFIG_MODULE_UNLOAD_TAINT_TRACKING | 1 + .../configs/L2-OPTIONAL/default/CONFIG_MOST | 1 + .../L2-OPTIONAL/default/CONFIG_MOTORCOMM_PHY | 1 + .../L2-OPTIONAL/default/CONFIG_MOUSE_ELAN_I2C | 1 + .../default/CONFIG_MOUSE_ELAN_I2C_I2C | 1 + .../L2-OPTIONAL/default/CONFIG_MOUSE_GPIO | 1 + .../default/CONFIG_MOUSE_SYNAPTICS_I2C | 1 + .../default/CONFIG_MOUSE_SYNAPTICS_USB | 1 + .../L2-OPTIONAL/default/CONFIG_MOXA_INTELLIO | 1 + .../L2-OPTIONAL/default/CONFIG_MOXA_SMARTIO | 1 + .../configs/L2-OPTIONAL/default/CONFIG_MPILIB | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_MRP | 1 + .../default/CONFIG_MSDOS_PARTITION | 1 + .../L2-OPTIONAL/default/CONFIG_MSE102X | 1 + .../L2-OPTIONAL/default/CONFIG_MSPRO_BLOCK | 1 + .../L2-OPTIONAL/default/CONFIG_MS_BLOCK | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_MTD | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_ABSENT | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_AR7_PARTS | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_BLKDEVS | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_BLOCK | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_BLOCK2MTD | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_BLOCK_RO | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_CFI_I1 | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_CFI_I2 | 1 + .../default/CONFIG_MTD_CMDLINE_PARTS | 1 + .../default/CONFIG_MTD_COMPLEX_MAPPINGS | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_DATAFLASH | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_DOCG3 | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_HYPERBUS | 1 + .../default/CONFIG_MTD_INTEL_VR_NOR | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_JEDECPROBE | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_LPDDR | 1 + .../default/CONFIG_MTD_MAP_BANK_WIDTH_1 | 1 + .../default/CONFIG_MTD_MAP_BANK_WIDTH_2 | 1 + .../default/CONFIG_MTD_MAP_BANK_WIDTH_4 | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_MCHP23K256 | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_MCHP48L640 | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_MTDRAM | 1 + .../default/CONFIG_MTD_NAND_ECC_MXIC | 1 + .../default/CONFIG_MTD_NAND_ECC_SW_BCH | 1 + .../default/CONFIG_MTD_NAND_ECC_SW_HAMMING | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_ONENAND | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_OOPS | 1 + .../default/CONFIG_MTD_PARTITIONED_MASTER | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_PHRAM | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_PLATRAM | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_PMC551 | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_RAM | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_RAW_NAND | 1 + .../default/CONFIG_MTD_REDBOOT_PARTS | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_ROM | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_SLRAM | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_SPI_NAND | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_SPI_NOR | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_SST25L | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_SWAP | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_TESTS | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_UBI | 1 + .../default/CONFIG_MTD_UBI_BEB_LIMIT | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_UBI_BLOCK | 1 + .../default/CONFIG_MTD_UBI_FASTMAP | 1 + .../L2-OPTIONAL/default/CONFIG_MTD_UBI_GLUEBI | 1 + .../default/CONFIG_MTD_UBI_WL_THRESHOLD | 1 + .../default/CONFIG_MUTEX_SPIN_ON_OWNER | 1 + .../L2-OPTIONAL/default/CONFIG_NATIONAL_PHY | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_NCE | 1 + .../L2-OPTIONAL/default/CONFIG_NCN26000_PHY | 1 + .../L2-OPTIONAL/default/CONFIG_ND_CLAIM | 1 + .../configs/L2-OPTIONAL/default/CONFIG_NE6X | 1 + .../configs/L2-OPTIONAL/default/CONFIG_NE6XVF | 1 + .../default/CONFIG_NEED_DMA_MAP_STATE | 1 + .../CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK | 1 + .../CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK | 1 + .../default/CONFIG_NEED_SG_DMA_FLAGS | 1 + .../default/CONFIG_NEED_SG_DMA_LENGTH | 1 + .../default/CONFIG_NETCONSOLE_EXTENDED_LOG | 1 + .../default/CONFIG_NETFILTER_BPF_LINK | 1 + .../default/CONFIG_NETFILTER_NETLINK_HOOK | 1 + .../default/CONFIG_NETFILTER_SKIP_EGRESS | 1 + .../default/CONFIG_NETFILTER_XTABLES_COMPAT | 1 + .../L2-OPTIONAL/default/CONFIG_NETPOLL | 1 + .../L2-OPTIONAL/default/CONFIG_NETXEN_NIC | 1 + .../configs/L2-OPTIONAL/default/CONFIG_NET_9P | 1 + .../L2-OPTIONAL/default/CONFIG_NET_DEVLINK | 1 + .../default/CONFIG_NET_DEV_REFCNT_TRACKER | 1 + .../L2-OPTIONAL/default/CONFIG_NET_DSA | 1 + .../L2-OPTIONAL/default/CONFIG_NET_EGRESS | 1 + .../L2-OPTIONAL/default/CONFIG_NET_FLOW_LIMIT | 1 + .../L2-OPTIONAL/default/CONFIG_NET_FOU | 1 + .../default/CONFIG_NET_FOU_IP_TUNNELS | 1 + .../L2-OPTIONAL/default/CONFIG_NET_HANDSHAKE | 1 + .../L2-OPTIONAL/default/CONFIG_NET_IFE | 1 + .../L2-OPTIONAL/default/CONFIG_NET_INGRESS | 1 + .../L2-OPTIONAL/default/CONFIG_NET_IPVTI | 1 + .../L2-OPTIONAL/default/CONFIG_NET_IP_TUNNEL | 1 + .../L2-OPTIONAL/default/CONFIG_NET_NCSI | 1 + .../default/CONFIG_NET_NS_REFCNT_TRACKER | 1 + .../default/CONFIG_NET_POLL_CONTROLLER | 1 + .../L2-OPTIONAL/default/CONFIG_NET_REDIRECT | 1 + .../default/CONFIG_NET_RX_BUSY_POLL | 1 + .../L2-OPTIONAL/default/CONFIG_NET_SB1000 | 1 + .../L2-OPTIONAL/default/CONFIG_NET_SCH_FIFO | 1 + .../default/CONFIG_NET_SCH_MQPRIO_LIB | 1 + .../L2-OPTIONAL/default/CONFIG_NET_SELFTESTS | 1 + .../L2-OPTIONAL/default/CONFIG_NET_SOCK_MSG | 1 + .../L2-OPTIONAL/default/CONFIG_NET_SWITCHDEV | 1 + .../L2-OPTIONAL/default/CONFIG_NET_UDP_TUNNEL | 1 + .../default/CONFIG_NET_VENDOR_3COM | 1 + .../default/CONFIG_NET_VENDOR_ADAPTEC | 1 + .../L2-OPTIONAL/default/CONFIG_NET_VENDOR_ADI | 1 + .../default/CONFIG_NET_VENDOR_AGERE | 1 + .../default/CONFIG_NET_VENDOR_ALACRITECH | 1 + .../default/CONFIG_NET_VENDOR_ALTEON | 1 + .../default/CONFIG_NET_VENDOR_AMAZON | 1 + .../default/CONFIG_NET_VENDOR_AQUANTIA | 1 + .../L2-OPTIONAL/default/CONFIG_NET_VENDOR_ARC | 1 + .../default/CONFIG_NET_VENDOR_ASIX | 1 + .../default/CONFIG_NET_VENDOR_ATHEROS | 1 + .../default/CONFIG_NET_VENDOR_BZWX | 1 + .../default/CONFIG_NET_VENDOR_CADENCE | 1 + .../default/CONFIG_NET_VENDOR_CAVIUM | 1 + .../default/CONFIG_NET_VENDOR_CHELSIO | 1 + .../default/CONFIG_NET_VENDOR_CORTINA | 1 + .../default/CONFIG_NET_VENDOR_DAVICOM | 1 + .../default/CONFIG_NET_VENDOR_DLINK | 1 + .../default/CONFIG_NET_VENDOR_ENGLEDER | 1 + .../default/CONFIG_NET_VENDOR_EZCHIP | 1 + .../default/CONFIG_NET_VENDOR_FUNGIBLE | 1 + .../default/CONFIG_NET_VENDOR_GOOGLE | 1 + .../default/CONFIG_NET_VENDOR_I825XX | 1 + .../default/CONFIG_NET_VENDOR_LITEX | 1 + .../default/CONFIG_NET_VENDOR_MARVELL | 1 + .../default/CONFIG_NET_VENDOR_MICREL | 1 + .../default/CONFIG_NET_VENDOR_MICROCHIP | 1 + .../default/CONFIG_NET_VENDOR_MICROSEMI | 1 + .../default/CONFIG_NET_VENDOR_MICROSOFT | 1 + .../default/CONFIG_NET_VENDOR_MYRI | 1 + .../default/CONFIG_NET_VENDOR_NATSEMI | 1 + .../default/CONFIG_NET_VENDOR_NETERION | 1 + .../default/CONFIG_NET_VENDOR_NETRONOME | 1 + .../L2-OPTIONAL/default/CONFIG_NET_VENDOR_NI | 1 + .../default/CONFIG_NET_VENDOR_NVIDIA | 1 + .../L2-OPTIONAL/default/CONFIG_NET_VENDOR_OKI | 1 + .../default/CONFIG_NET_VENDOR_PACKET_ENGINES | 1 + .../default/CONFIG_NET_VENDOR_PENSANDO | 1 + .../default/CONFIG_NET_VENDOR_QLOGIC | 1 + .../L2-OPTIONAL/default/CONFIG_NET_VENDOR_RDC | 1 + .../default/CONFIG_NET_VENDOR_REALTEK | 1 + .../default/CONFIG_NET_VENDOR_RENESAS | 1 + .../default/CONFIG_NET_VENDOR_ROCKER | 1 + .../default/CONFIG_NET_VENDOR_SAMSUNG | 1 + .../default/CONFIG_NET_VENDOR_SEEQ | 1 + .../default/CONFIG_NET_VENDOR_SILAN | 1 + .../L2-OPTIONAL/default/CONFIG_NET_VENDOR_SIS | 1 + .../default/CONFIG_NET_VENDOR_SMSC | 1 + .../default/CONFIG_NET_VENDOR_SOCIONEXT | 1 + .../default/CONFIG_NET_VENDOR_STMICRO | 1 + .../L2-OPTIONAL/default/CONFIG_NET_VENDOR_SUN | 1 + .../default/CONFIG_NET_VENDOR_SYNOPSYS | 1 + .../default/CONFIG_NET_VENDOR_TEHUTI | 1 + .../L2-OPTIONAL/default/CONFIG_NET_VENDOR_TI | 1 + .../default/CONFIG_NET_VENDOR_VERTEXCOM | 1 + .../L2-OPTIONAL/default/CONFIG_NET_VENDOR_VIA | 1 + .../default/CONFIG_NET_VENDOR_WIZNET | 1 + .../default/CONFIG_NET_VENDOR_XILINX | 1 + .../L2-OPTIONAL/default/CONFIG_NEW_LEDS | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_NFC | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_NFP | 1 + .../default/CONFIG_NFP_APP_ABM_NIC | 1 + .../L2-OPTIONAL/default/CONFIG_NFP_APP_FLOWER | 1 + .../L2-OPTIONAL/default/CONFIG_NFP_DEBUG | 1 + .../L2-OPTIONAL/default/CONFIG_NFP_NET_IPSEC | 1 + .../L2-OPTIONAL/default/CONFIG_NFSD_V2 | 1 + .../L2-OPTIONAL/default/CONFIG_NFS_DEBUG | 1 + .../default/CONFIG_NFS_USE_KERNEL_DNS | 1 + .../CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN | 1 + .../default/CONFIG_NFS_V4_2_SSC_HELPER | 1 + .../default/CONFIG_NFS_V4_SECURITY_LABEL | 1 + .../configs/L2-OPTIONAL/default/CONFIG_NFTL | 1 + .../default/CONFIG_NFT_REJECT_IPV4 | 1 + .../default/CONFIG_NFT_REJECT_NETDEV | 1 + .../default/CONFIG_NF_CONNTRACK_OVS | 1 + .../L2-OPTIONAL/default/CONFIG_NF_DEFRAG_IPV4 | 1 + .../default/CONFIG_NF_FLOW_TABLE_PROCFS | 1 + .../L2-OPTIONAL/default/CONFIG_NF_LOG_SYSLOG | 1 + .../L2-OPTIONAL/default/CONFIG_NF_NAT_OVS | 1 + .../L2-OPTIONAL/default/CONFIG_NF_TPROXY_IPV4 | 1 + .../L2-OPTIONAL/default/CONFIG_NILFS2_FS | 1 + .../default/CONFIG_NL80211_TESTMODE | 1 + .../configs/L2-OPTIONAL/default/CONFIG_NLATTR | 1 + .../default/CONFIG_NLS_CODEPAGE_1250 | 1 + .../default/CONFIG_NLS_CODEPAGE_1251 | 1 + .../default/CONFIG_NLS_CODEPAGE_437 | 1 + .../default/CONFIG_NLS_CODEPAGE_737 | 1 + .../default/CONFIG_NLS_CODEPAGE_775 | 1 + .../default/CONFIG_NLS_CODEPAGE_850 | 1 + .../default/CONFIG_NLS_CODEPAGE_852 | 1 + .../default/CONFIG_NLS_CODEPAGE_855 | 1 + .../default/CONFIG_NLS_CODEPAGE_857 | 1 + .../default/CONFIG_NLS_CODEPAGE_860 | 1 + .../default/CONFIG_NLS_CODEPAGE_861 | 1 + .../default/CONFIG_NLS_CODEPAGE_862 | 1 + .../default/CONFIG_NLS_CODEPAGE_863 | 1 + .../default/CONFIG_NLS_CODEPAGE_864 | 1 + .../default/CONFIG_NLS_CODEPAGE_865 | 1 + .../default/CONFIG_NLS_CODEPAGE_866 | 1 + .../default/CONFIG_NLS_CODEPAGE_869 | 1 + .../default/CONFIG_NLS_CODEPAGE_874 | 1 + .../default/CONFIG_NLS_CODEPAGE_932 | 1 + .../default/CONFIG_NLS_CODEPAGE_949 | 1 + .../L2-OPTIONAL/default/CONFIG_NLS_ISO8859_1 | 1 + .../L2-OPTIONAL/default/CONFIG_NLS_ISO8859_13 | 1 + .../L2-OPTIONAL/default/CONFIG_NLS_ISO8859_14 | 1 + .../L2-OPTIONAL/default/CONFIG_NLS_ISO8859_15 | 1 + .../L2-OPTIONAL/default/CONFIG_NLS_ISO8859_2 | 1 + .../L2-OPTIONAL/default/CONFIG_NLS_ISO8859_3 | 1 + .../L2-OPTIONAL/default/CONFIG_NLS_ISO8859_4 | 1 + .../L2-OPTIONAL/default/CONFIG_NLS_ISO8859_5 | 1 + .../L2-OPTIONAL/default/CONFIG_NLS_ISO8859_6 | 1 + .../L2-OPTIONAL/default/CONFIG_NLS_ISO8859_7 | 1 + .../L2-OPTIONAL/default/CONFIG_NLS_ISO8859_8 | 1 + .../L2-OPTIONAL/default/CONFIG_NLS_ISO8859_9 | 1 + .../L2-OPTIONAL/default/CONFIG_NLS_KOI8_R | 1 + .../L2-OPTIONAL/default/CONFIG_NLS_KOI8_U | 1 + .../L2-OPTIONAL/default/CONFIG_NLS_MAC_CELTIC | 1 + .../default/CONFIG_NLS_MAC_CENTEURO | 1 + .../default/CONFIG_NLS_MAC_CROATIAN | 1 + .../default/CONFIG_NLS_MAC_CYRILLIC | 1 + .../L2-OPTIONAL/default/CONFIG_NLS_MAC_GAELIC | 1 + .../L2-OPTIONAL/default/CONFIG_NLS_MAC_GREEK | 1 + .../default/CONFIG_NLS_MAC_ICELAND | 1 + .../L2-OPTIONAL/default/CONFIG_NLS_MAC_INUIT | 1 + .../L2-OPTIONAL/default/CONFIG_NLS_MAC_ROMAN | 1 + .../default/CONFIG_NLS_MAC_ROMANIAN | 1 + .../default/CONFIG_NLS_MAC_TURKISH | 1 + .../L2-OPTIONAL/default/CONFIG_NLS_UCS2_UTILS | 1 + .../L2-OPTIONAL/default/CONFIG_NOP_TRACER | 1 + .../L2-OPTIONAL/default/CONFIG_NOP_USB_XCEIV | 1 + .../default/CONFIG_NOTIFIER_ERROR_INJECTION | 1 + .../default/CONFIG_NOUVEAU_DEBUG_MMU | 1 + .../default/CONFIG_NOUVEAU_DEBUG_PUSH | 1 + .../L2-OPTIONAL/default/CONFIG_NO_HZ_COMMON | 1 + .../L2-OPTIONAL/default/CONFIG_NTB_EPF | 1 + .../L2-OPTIONAL/default/CONFIG_NTB_IDT | 1 + .../L2-OPTIONAL/default/CONFIG_NTB_MSI | 1 + .../L2-OPTIONAL/default/CONFIG_NTB_PERF | 1 + .../L2-OPTIONAL/default/CONFIG_NTB_PINGPONG | 1 + .../L2-OPTIONAL/default/CONFIG_NTB_SWITCHTEC | 1 + .../L2-OPTIONAL/default/CONFIG_NTB_TOOL | 1 + .../L2-OPTIONAL/default/CONFIG_NTB_TRANSPORT | 1 + .../default/CONFIG_NTFS3_64BIT_CLUSTER | 1 + .../L2-OPTIONAL/default/CONFIG_NTFS3_FS | 1 + .../default/CONFIG_NTFS3_FS_POSIX_ACL | 1 + .../default/CONFIG_NTFS3_LZX_XPRESS | 1 + .../L2-OPTIONAL/default/CONFIG_NULL_TTY | 1 + .../default/CONFIG_NUMA_KEEP_MEMINFO | 1 + .../L2-OPTIONAL/default/CONFIG_NVDIMM_DAX | 1 + .../L2-OPTIONAL/default/CONFIG_NVDIMM_PFN | 1 + .../default/CONFIG_NVDIMM_SECURITY_TEST | 1 + .../default/CONFIG_NVMEM_LAYOUT_ONIE_TLV | 1 + .../default/CONFIG_NVMEM_LAYOUT_SL28_VPD | 1 + .../L2-OPTIONAL/default/CONFIG_NVMEM_RMEM | 1 + .../L2-OPTIONAL/default/CONFIG_NVME_AUTH | 1 + .../L2-OPTIONAL/default/CONFIG_NVME_HWMON | 1 + .../L2-OPTIONAL/default/CONFIG_NVME_MULTIPATH | 1 + .../L2-OPTIONAL/default/CONFIG_NVME_TARGET | 1 + .../default/CONFIG_NVME_TARGET_AUTH | 1 + .../L2-OPTIONAL/default/CONFIG_NVME_TARGET_FC | 1 + .../default/CONFIG_NVME_TARGET_FCLOOP | 1 + .../default/CONFIG_NVME_TARGET_LOOP | 1 + .../default/CONFIG_NVME_TARGET_PASSTHRU | 1 + .../default/CONFIG_NVME_TARGET_RDMA | 1 + .../default/CONFIG_NVME_TARGET_TCP | 1 + .../default/CONFIG_NVME_VERBOSE_ERRORS | 1 + .../default/CONFIG_NXP_C45_TJA11XX_PHY | 1 + .../L2-OPTIONAL/default/CONFIG_NXP_CBTX_PHY | 1 + .../default/CONFIG_NXP_TJA11XX_PHY | 1 + .../configs/L2-OPTIONAL/default/CONFIG_N_GSM | 1 + .../configs/L2-OPTIONAL/default/CONFIG_N_HDLC | 1 + .../configs/L2-OPTIONAL/default/CONFIG_OBJAGG | 1 + .../L2-OPTIONAL/default/CONFIG_OCFS2_FS | 1 + .../L2-OPTIONAL/default/CONFIG_OID_REGISTRY | 1 + .../default/CONFIG_OLD_SIGSUSPEND3 | 1 + .../L2-OPTIONAL/default/CONFIG_OMFS_FS | 1 + .../L2-OPTIONAL/default/CONFIG_ORANGEFS_FS | 1 + .../default/CONFIG_OVERLAY_FS_DEBUG | 1 + .../L2-OPTIONAL/default/CONFIG_PACKING | 1 + .../configs/L2-OPTIONAL/default/CONFIG_PADATA | 1 + .../L2-OPTIONAL/default/CONFIG_PAGE_COUNTER | 1 + .../L2-OPTIONAL/default/CONFIG_PAGE_POOL | 1 + .../default/CONFIG_PAGE_POOL_STATS | 1 + .../default/CONFIG_PAGE_SIZE_LESS_THAN_256KB | 1 + .../default/CONFIG_PAGE_SIZE_LESS_THAN_64KB | 1 + .../default/CONFIG_PAGE_TABLE_CHECK | 1 + .../default/CONFIG_PAHOLE_HAS_LANG_EXCLUDE | 1 + .../default/CONFIG_PAHOLE_HAS_SPLIT_BTF | 1 + .../L2-OPTIONAL/default/CONFIG_PAHOLE_VERSION | 1 + .../default/CONFIG_PANIC_ON_OOPS_VALUE | 1 + .../L2-OPTIONAL/default/CONFIG_PANTHERLORD_FF | 1 + .../configs/L2-OPTIONAL/default/CONFIG_PARMAN | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_ACPI | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_ALI | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_AMD | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_ARTOP | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_ATIIXP | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_ATP867X | 1 + .../default/CONFIG_PATA_CMD640_PCI | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_CMD64X | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_CYPRESS | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_EFAR | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_HPT366 | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_HPT37X | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_HPT3X2N | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_HPT3X3 | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_IT8213 | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_IT821X | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_JMICRON | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_LEGACY | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_MARVELL | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_MPIIX | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_NETCELL | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_NINJA32 | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_NS87410 | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_NS87415 | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_OLDPIIX | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_OPTI | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_OPTIDMA | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_PDC2027X | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_PDC_OLD | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_RADISYS | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_RDC | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_RZ1000 | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_SCH | 1 + .../default/CONFIG_PATA_SERVERWORKS | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_SIL680 | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_SIS | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_TIMINGS | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_TOSHIBA | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_TRIFLEX | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_VIA | 1 + .../L2-OPTIONAL/default/CONFIG_PATA_WINBOND | 1 + .../L2-OPTIONAL/default/CONFIG_PC300TOO | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_PCC | 1 + .../configs/L2-OPTIONAL/default/CONFIG_PCCARD | 1 + .../L2-OPTIONAL/default/CONFIG_PCI200SYN | 1 + .../L2-OPTIONAL/default/CONFIG_PCIEAER_INJECT | 1 + .../default/CONFIG_PCIEASPM_PERFORMANCE | 1 + .../default/CONFIG_PCIEASPM_POWERSAVE | 1 + .../default/CONFIG_PCIEASPM_POWER_SUPERSAVE | 1 + .../default/CONFIG_PCIE_DW_PLAT_HOST | 1 + .../L2-OPTIONAL/default/CONFIG_PCIE_PME | 1 + .../L2-OPTIONAL/default/CONFIG_PCIE_PTM | 1 + .../L2-OPTIONAL/default/CONFIG_PCIPCWATCHDOG | 1 + .../L2-OPTIONAL/default/CONFIG_PCI_ATS | 1 + .../L2-OPTIONAL/default/CONFIG_PCI_DEBUG | 1 + .../L2-OPTIONAL/default/CONFIG_PCI_DOE | 1 + .../L2-OPTIONAL/default/CONFIG_PCI_DOMAINS | 1 + .../L2-OPTIONAL/default/CONFIG_PCI_ENDPOINT | 1 + .../default/CONFIG_PCI_ENDPOINT_TEST | 1 + .../L2-OPTIONAL/default/CONFIG_PCI_LABEL | 1 + .../L2-OPTIONAL/default/CONFIG_PCI_MESON | 1 + .../L2-OPTIONAL/default/CONFIG_PCI_P2PDMA | 1 + .../default/CONFIG_PCI_REALLOC_ENABLE_AUTO | 1 + .../default/CONFIG_PCI_SW_SWITCHTEC | 1 + .../configs/L2-OPTIONAL/default/CONFIG_PCMCIA | 1 + .../default/CONFIG_PCPU_DEV_REFCNT | 1 + .../L2-OPTIONAL/default/CONFIG_PCS_XPCS | 1 + .../L2-OPTIONAL/default/CONFIG_PDC_ADMA | 1 + .../configs/L2-OPTIONAL/default/CONFIG_PECI | 1 + .../L2-OPTIONAL/default/CONFIG_PER_VMA_LOCK | 1 + .../default/CONFIG_PER_VMA_LOCK_STATS | 1 + .../L2-OPTIONAL/default/CONFIG_PHANTOM | 1 + .../configs/L2-OPTIONAL/default/CONFIG_PHONET | 1 + .../L2-OPTIONAL/default/CONFIG_PHYLINK | 1 + .../default/CONFIG_PHYS_ADDR_T_64BIT | 1 + .../default/CONFIG_PHY_CAN_TRANSCEIVER | 1 + .../default/CONFIG_PHY_PXA_28NM_HSIC | 1 + .../default/CONFIG_PHY_PXA_28NM_USB2 | 1 + .../L2-OPTIONAL/default/CONFIG_PINCONF | 1 + .../L2-OPTIONAL/default/CONFIG_PINCTRL | 1 + .../L2-OPTIONAL/default/CONFIG_PINCTRL_AMD | 1 + .../default/CONFIG_PINCTRL_CY8C95X0 | 1 + .../default/CONFIG_PINCTRL_MCP23S08 | 1 + .../L2-OPTIONAL/default/CONFIG_PINCTRL_SX150X | 1 + .../configs/L2-OPTIONAL/default/CONFIG_PINMUX | 1 + .../configs/L2-OPTIONAL/default/CONFIG_PLDMFW | 1 + .../L2-OPTIONAL/default/CONFIG_PLX_DMA | 1 + .../L2-OPTIONAL/default/CONFIG_PMIC_ADP5520 | 1 + .../L2-OPTIONAL/default/CONFIG_PMIC_DA903X | 1 + .../configs/L2-OPTIONAL/default/CONFIG_PM_CLK | 1 + .../L2-OPTIONAL/default/CONFIG_PM_DEVFREQ | 1 + .../default/CONFIG_PM_USERSPACE_AUTOSLEEP | 1 + .../L2-OPTIONAL/default/CONFIG_PNFS_BLOCK | 1 + .../default/CONFIG_PNFS_FILE_LAYOUT | 1 + .../default/CONFIG_PNFS_FLEXFILE_LAYOUT | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_PNP | 1 + .../L2-OPTIONAL/default/CONFIG_PNPACPI | 1 + .../default/CONFIG_POSIX_CPU_TIMERS_TASK_WORK | 1 + .../L2-OPTIONAL/default/CONFIG_POWER_SUPPLY | 1 + .../default/CONFIG_POWER_SUPPLY_DEBUG | 1 + .../default/CONFIG_POWER_SUPPLY_HWMON | 1 + .../L2-OPTIONAL/default/CONFIG_PPPOATM | 1 + .../default/CONFIG_PPPOE_HASH_BITS | 1 + .../default/CONFIG_PPPOE_HASH_BITS_1 | 1 + .../default/CONFIG_PPPOE_HASH_BITS_2 | 1 + .../default/CONFIG_PPPOE_HASH_BITS_4 | 1 + .../default/CONFIG_PPPOE_HASH_BITS_8 | 1 + .../L2-OPTIONAL/default/CONFIG_PPPOL2TP | 1 + .../L2-OPTIONAL/default/CONFIG_PPP_ASYNC | 1 + .../L2-OPTIONAL/default/CONFIG_PPP_BSDCOMP | 1 + .../L2-OPTIONAL/default/CONFIG_PPP_DEFLATE | 1 + .../L2-OPTIONAL/default/CONFIG_PPP_FILTER | 1 + .../L2-OPTIONAL/default/CONFIG_PPP_MPPE | 1 + .../L2-OPTIONAL/default/CONFIG_PPP_MULTILINK | 1 + .../L2-OPTIONAL/default/CONFIG_PPP_SYNC_TTY | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_PPS | 1 + .../default/CONFIG_PPS_CLIENT_GPIO | 1 + .../default/CONFIG_PPS_CLIENT_KTIMER | 1 + .../default/CONFIG_PPS_CLIENT_LDISC | 1 + .../L2-OPTIONAL/default/CONFIG_PPS_DEBUG | 1 + .../configs/L2-OPTIONAL/default/CONFIG_PPTP | 1 + .../default/CONFIG_PREEMPTIRQ_DELAY_TEST | 1 + .../L2-OPTIONAL/default/CONFIG_PREEMPT_COUNT | 1 + .../default/CONFIG_PREEMPT_NOTIFIERS | 1 + .../default/CONFIG_PREVENT_FIRMWARE_BUILD | 1 + .../L2-OPTIONAL/default/CONFIG_PRIME_NUMBERS | 1 + .../L2-OPTIONAL/default/CONFIG_PRINTK_CALLER | 1 + .../L2-OPTIONAL/default/CONFIG_PROBE_EVENTS | 1 + .../default/CONFIG_PROC_CPU_RESCTRL | 1 + .../default/CONFIG_PROC_PID_CPUSET | 1 + .../L2-OPTIONAL/default/CONFIG_PROVE_LOCKING | 1 + .../L2-OPTIONAL/default/CONFIG_PSTORE_FTRACE | 1 + .../L2-OPTIONAL/default/CONFIG_PSTORE_PMSG | 1 + .../L2-OPTIONAL/default/CONFIG_PTDUMP_DEBUGFS | 1 + .../L2-OPTIONAL/default/CONFIG_PTP_1588_CLOCK | 1 + .../default/CONFIG_PTP_1588_CLOCK_IDT82P33 | 1 + .../default/CONFIG_PTP_1588_CLOCK_IDTCM | 1 + .../default/CONFIG_PTP_1588_CLOCK_INES | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_PWM | 1 + .../L2-OPTIONAL/default/CONFIG_PWM_DEBUG | 1 + .../L2-OPTIONAL/default/CONFIG_PWM_PCA9685 | 1 + .../L2-OPTIONAL/default/CONFIG_PWM_SYSFS | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_QED | 1 + .../configs/L2-OPTIONAL/default/CONFIG_QEDE | 1 + .../configs/L2-OPTIONAL/default/CONFIG_QEDF | 1 + .../configs/L2-OPTIONAL/default/CONFIG_QEDI | 1 + .../L2-OPTIONAL/default/CONFIG_QED_FCOE | 1 + .../L2-OPTIONAL/default/CONFIG_QED_ISCSI | 1 + .../L2-OPTIONAL/default/CONFIG_QED_LL2 | 1 + .../L2-OPTIONAL/default/CONFIG_QED_OOO | 1 + .../L2-OPTIONAL/default/CONFIG_QED_RDMA | 1 + .../L2-OPTIONAL/default/CONFIG_QED_SRIOV | 1 + .../L2-OPTIONAL/default/CONFIG_QLA3XXX | 1 + .../configs/L2-OPTIONAL/default/CONFIG_QLCNIC | 1 + .../L2-OPTIONAL/default/CONFIG_QNX4FS_FS | 1 + .../L2-OPTIONAL/default/CONFIG_QNX6FS_FS | 1 + .../configs/L2-OPTIONAL/default/CONFIG_QRTR | 1 + .../L2-OPTIONAL/default/CONFIG_QSEMI_PHY | 1 + .../L2-OPTIONAL/default/CONFIG_QUEUED_RWLOCKS | 1 + .../default/CONFIG_QUEUED_SPINLOCKS | 1 + .../L2-OPTIONAL/default/CONFIG_QUOTACTL | 1 + .../L2-OPTIONAL/default/CONFIG_QUOTA_TREE | 1 + .../configs/L2-OPTIONAL/default/CONFIG_R8169 | 1 + .../default/CONFIG_RANDOM32_SELFTEST | 1 + .../default/CONFIG_RANDOM_KMALLOC_CACHES | 1 + .../L2-OPTIONAL/default/CONFIG_RAPIDIO | 1 + .../L2-OPTIONAL/default/CONFIG_RATIONAL | 1 + .../L2-OPTIONAL/default/CONFIG_RBTREE_TEST | 1 + .../default/CONFIG_RCU_CPU_STALL_CPUTIME | 1 + .../default/CONFIG_RCU_EXP_CPU_STALL_TIMEOUT | 1 + .../L2-OPTIONAL/default/CONFIG_RCU_LAZY | 1 + .../default/CONFIG_RCU_NEED_SEGCBLIST | 1 + .../default/CONFIG_RCU_NOCB_CPU_DEFAULT_ALL | 1 + .../default/CONFIG_RCU_STALL_COMMON | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_RDS | 1 + .../L2-OPTIONAL/default/CONFIG_REALTEK_AUTOPM | 1 + .../L2-OPTIONAL/default/CONFIG_REALTEK_PHY | 1 + .../L2-OPTIONAL/default/CONFIG_REED_SOLOMON | 1 + .../default/CONFIG_REED_SOLOMON_DEC8 | 1 + .../default/CONFIG_REED_SOLOMON_ENC8 | 1 + .../default/CONFIG_REED_SOLOMON_TEST | 1 + .../configs/L2-OPTIONAL/default/CONFIG_REGMAP | 1 + .../L2-OPTIONAL/default/CONFIG_REGMAP_I2C | 1 + .../L2-OPTIONAL/default/CONFIG_REGMAP_SPI | 1 + .../L2-OPTIONAL/default/CONFIG_REISERFS_FS | 1 + .../L2-OPTIONAL/default/CONFIG_REMOTEPROC | 1 + .../L2-OPTIONAL/default/CONFIG_REMOTE_TARGET | 1 + .../L2-OPTIONAL/default/CONFIG_RENESAS_PHY | 1 + .../default/CONFIG_RESET_ATTACK_MITIGATION | 1 + .../default/CONFIG_RESET_TI_SYSCON | 1 + .../default/CONFIG_RESET_TI_TPS380X | 1 + .../L2-OPTIONAL/default/CONFIG_RFD_FTL | 1 + .../configs/L2-OPTIONAL/default/CONFIG_RFKILL | 1 + .../L2-OPTIONAL/default/CONFIG_RFKILL_INPUT | 1 + .../L2-OPTIONAL/default/CONFIG_RFKILL_LEDS | 1 + .../L2-OPTIONAL/default/CONFIG_RFS_ACCEL | 1 + .../L2-OPTIONAL/default/CONFIG_RING_BUFFER | 1 + .../default/CONFIG_RING_BUFFER_BENCHMARK | 1 + .../default/CONFIG_RING_BUFFER_STARTUP_TEST | 1 + .../CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS | 1 + .../L2-OPTIONAL/default/CONFIG_RMI4_2D_SENSOR | 1 + .../L2-OPTIONAL/default/CONFIG_RMI4_CORE | 1 + .../L2-OPTIONAL/default/CONFIG_RMI4_F03 | 1 + .../L2-OPTIONAL/default/CONFIG_RMI4_F03_SERIO | 1 + .../L2-OPTIONAL/default/CONFIG_RMI4_F11 | 1 + .../L2-OPTIONAL/default/CONFIG_RMI4_F12 | 1 + .../L2-OPTIONAL/default/CONFIG_RMI4_F30 | 1 + .../L2-OPTIONAL/default/CONFIG_RMI4_F3A | 1 + .../L2-OPTIONAL/default/CONFIG_RMI4_F55 | 1 + .../L2-OPTIONAL/default/CONFIG_RMI4_I2C | 1 + .../L2-OPTIONAL/default/CONFIG_RMI4_SMB | 1 + .../L2-OPTIONAL/default/CONFIG_ROCKCHIP_PHY | 1 + .../configs/L2-OPTIONAL/default/CONFIG_ROCKER | 1 + .../L2-OPTIONAL/default/CONFIG_ROMFS_FS | 1 + .../CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1 | 1 + .../CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 | 1 + .../CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA | 1 + .../default/CONFIG_RPMSG_QCOM_GLINK_RPM | 1 + .../L2-OPTIONAL/default/CONFIG_RPMSG_VIRTIO | 1 + .../L2-OPTIONAL/default/CONFIG_RTC_DEBUG | 1 + .../L2-OPTIONAL/default/CONFIG_RTC_DRV_ABEOZ9 | 1 + .../L2-OPTIONAL/default/CONFIG_RTC_DRV_BQ32K | 1 + .../L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1286 | 1 + .../L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1302 | 1 + .../L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1307 | 1 + .../default/CONFIG_RTC_DRV_DS1307_CENTURY | 1 + .../L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1374 | 1 + .../L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1511 | 1 + .../L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1553 | 1 + .../L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1672 | 1 + .../L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1742 | 1 + .../L2-OPTIONAL/default/CONFIG_RTC_DRV_DS2404 | 1 + .../L2-OPTIONAL/default/CONFIG_RTC_DRV_DS3232 | 1 + .../default/CONFIG_RTC_DRV_DS3232_HWMON | 1 + .../L2-OPTIONAL/default/CONFIG_RTC_DRV_EM3027 | 1 + .../L2-OPTIONAL/default/CONFIG_RTC_DRV_FM3130 | 1 + .../default/CONFIG_RTC_DRV_FTRTC010 | 1 + .../default/CONFIG_RTC_DRV_GOLDFISH | 1 + .../default/CONFIG_RTC_DRV_ISL12022 | 1 + .../default/CONFIG_RTC_DRV_ISL1208 | 1 + .../L2-OPTIONAL/default/CONFIG_RTC_DRV_M41T80 | 1 + .../default/CONFIG_RTC_DRV_M41T80_WDT | 1 + .../L2-OPTIONAL/default/CONFIG_RTC_DRV_M48T35 | 1 + .../L2-OPTIONAL/default/CONFIG_RTC_DRV_M48T59 | 1 + .../L2-OPTIONAL/default/CONFIG_RTC_DRV_M48T86 | 1 + .../default/CONFIG_RTC_DRV_MAX6900 | 1 + .../default/CONFIG_RTC_DRV_MAX6916 | 1 + .../default/CONFIG_RTC_DRV_MSM6242 | 1 + .../default/CONFIG_RTC_DRV_PCF8523 | 1 + .../default/CONFIG_RTC_DRV_PCF85363 | 1 + .../default/CONFIG_RTC_DRV_PCF8563 | 1 + .../default/CONFIG_RTC_DRV_PCF8583 | 1 + .../L2-OPTIONAL/default/CONFIG_RTC_DRV_RP5C01 | 1 + .../default/CONFIG_RTC_DRV_RS5C372 | 1 + .../L2-OPTIONAL/default/CONFIG_RTC_DRV_RV3028 | 1 + .../default/CONFIG_RTC_DRV_RV3029C2 | 1 + .../default/CONFIG_RTC_DRV_RV3029_HWMON | 1 + .../L2-OPTIONAL/default/CONFIG_RTC_DRV_RV3032 | 1 + .../L2-OPTIONAL/default/CONFIG_RTC_DRV_RV8803 | 1 + .../L2-OPTIONAL/default/CONFIG_RTC_DRV_RX6110 | 1 + .../L2-OPTIONAL/default/CONFIG_RTC_DRV_RX8025 | 1 + .../L2-OPTIONAL/default/CONFIG_RTC_DRV_RX8581 | 1 + .../default/CONFIG_RTC_DRV_S35390A | 1 + .../L2-OPTIONAL/default/CONFIG_RTC_DRV_SD3078 | 1 + .../default/CONFIG_RTC_DRV_STK17TA8 | 1 + .../L2-OPTIONAL/default/CONFIG_RTC_DRV_TEST | 1 + .../L2-OPTIONAL/default/CONFIG_RTC_DRV_X1205 | 1 + .../default/CONFIG_RTC_I2C_AND_SPI | 1 + .../default/CONFIG_RTC_INTF_DEV_UIE_EMUL | 1 + .../L2-OPTIONAL/default/CONFIG_RTC_LIB | 1 + .../L2-OPTIONAL/default/CONFIG_RT_MUTEXES | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_RV | 1 + .../default/CONFIG_RWSEM_SPIN_ON_OWNER | 1 + .../L2-OPTIONAL/default/CONFIG_SAMPLES | 1 + .../default/CONFIG_SATA_ACARD_AHCI | 1 + .../L2-OPTIONAL/default/CONFIG_SATA_DWC | 1 + .../L2-OPTIONAL/default/CONFIG_SATA_HOST | 1 + .../L2-OPTIONAL/default/CONFIG_SATA_INIC162X | 1 + .../L2-OPTIONAL/default/CONFIG_SATA_MV | 1 + .../L2-OPTIONAL/default/CONFIG_SATA_NV | 1 + .../L2-OPTIONAL/default/CONFIG_SATA_PROMISE | 1 + .../L2-OPTIONAL/default/CONFIG_SATA_QSTOR | 1 + .../L2-OPTIONAL/default/CONFIG_SATA_SIL | 1 + .../L2-OPTIONAL/default/CONFIG_SATA_SIL24 | 1 + .../L2-OPTIONAL/default/CONFIG_SATA_SIS | 1 + .../L2-OPTIONAL/default/CONFIG_SATA_SVW | 1 + .../L2-OPTIONAL/default/CONFIG_SATA_SX4 | 1 + .../L2-OPTIONAL/default/CONFIG_SATA_ULI | 1 + .../L2-OPTIONAL/default/CONFIG_SATA_VIA | 1 + .../L2-OPTIONAL/default/CONFIG_SATA_VITESSE | 1 + .../L2-OPTIONAL/default/CONFIG_SATA_ZPODD | 1 + .../L2-OPTIONAL/default/CONFIG_SBITMAP | 1 + .../default/CONFIG_SCF_TORTURE_TEST | 1 + .../L2-OPTIONAL/default/CONFIG_SCHED_HRTICK | 1 + .../L2-OPTIONAL/default/CONFIG_SCHED_MM_CID | 1 + .../default/CONFIG_SCHED_STACK_END_CHECK | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_3W_9XXX | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_3W_SAS | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_ACARD | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_ADVANSYS | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_AIC79XX | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_AIC7XXX | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_AIC94XX | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_AM53C974 | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_ARCMSR | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_BFA_FC | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_BUSLOGIC | 1 + .../default/CONFIG_SCSI_CHELSIO_FCOE | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_COMMON | 1 + .../default/CONFIG_SCSI_CXGB3_ISCSI | 1 + .../default/CONFIG_SCSI_CXGB4_ISCSI | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_DC395x | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_DH_ALUA | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_DH_EMC | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_DH_HP_SW | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_DH_RDAC | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_DMX3191D | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_EFCT | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_ESAS2R | 1 + .../default/CONFIG_SCSI_FDOMAIN_PCI | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_HPSA | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_HPTIOP | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_INIA100 | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_INITIO | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_IPS | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_LPFC | 1 + .../default/CONFIG_SCSI_LPFC_DEBUG_FS | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_MOD | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_MVSAS | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_MVUMI | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_MYRB | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_MYRS | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_NETLINK | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_PM8001 | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_PMCRAID | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_QLA_FC | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_QLA_ISCSI | 1 + .../default/CONFIG_SCSI_QLOGIC_1280 | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_SNIC | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_STEX | 1 + .../default/CONFIG_SCSI_SYM53C8XX_2 | 1 + .../L2-OPTIONAL/default/CONFIG_SCSI_WD719X | 1 + .../default/CONFIG_SCTP_COOKIE_HMAC_MD5 | 1 + .../default/CONFIG_SCTP_COOKIE_HMAC_SHA1 | 1 + .../default/CONFIG_SCTP_DBG_OBJCNT | 1 + .../CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 | 1 + .../CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE | 1 + .../CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1 | 1 + .../L2-OPTIONAL/default/CONFIG_SDIO_UART | 1 + .../default/CONFIG_SECCOMP_CACHE_DEBUG | 1 + .../default/CONFIG_SECURITY_APPARMOR | 1 + .../default/CONFIG_SECURITY_LANDLOCK | 1 + .../default/CONFIG_SECURITY_LOADPIN | 1 + .../default/CONFIG_SECURITY_LOCKDOWN_LSM | 1 + .../default/CONFIG_SECURITY_SAFESETID | 1 + .../default/CONFIG_SECURITY_SELINUX_DEBUG | 1 + .../default/CONFIG_SECURITY_TOMOYO | 1 + .../L2-OPTIONAL/default/CONFIG_SECURITY_YAMA | 1 + .../default/CONFIG_SENSORS_ACBEL_FSG032 | 1 + .../default/CONFIG_SENSORS_ADM1177 | 1 + .../default/CONFIG_SENSORS_ADM1266 | 1 + .../default/CONFIG_SENSORS_ADT7310 | 1 + .../L2-OPTIONAL/default/CONFIG_SENSORS_AHT10 | 1 + .../CONFIG_SENSORS_AQUACOMPUTER_D5NEXT | 1 + .../L2-OPTIONAL/default/CONFIG_SENSORS_AS370 | 1 + .../default/CONFIG_SENSORS_AXI_FAN_CONTROL | 1 + .../default/CONFIG_SENSORS_BEL_PFE | 1 + .../default/CONFIG_SENSORS_BPA_RS600 | 1 + .../default/CONFIG_SENSORS_CORSAIR_CPRO | 1 + .../default/CONFIG_SENSORS_CORSAIR_PSU | 1 + .../default/CONFIG_SENSORS_DELTA_AHE50DC_FAN | 1 + .../default/CONFIG_SENSORS_DPS920AB | 1 + .../default/CONFIG_SENSORS_DRIVETEMP | 1 + .../default/CONFIG_SENSORS_EMC2103 | 1 + .../default/CONFIG_SENSORS_EMC2305 | 1 + .../L2-OPTIONAL/default/CONFIG_SENSORS_FSP_3Y | 1 + .../default/CONFIG_SENSORS_FTSTEUTATES | 1 + .../default/CONFIG_SENSORS_HIH6130 | 1 + .../L2-OPTIONAL/default/CONFIG_SENSORS_HS3001 | 1 + .../default/CONFIG_SENSORS_IBM_CFFPS | 1 + .../L2-OPTIONAL/default/CONFIG_SENSORS_INA238 | 1 + .../default/CONFIG_SENSORS_INA3221 | 1 + .../default/CONFIG_SENSORS_INSPUR_IPSPS | 1 + .../default/CONFIG_SENSORS_IR35221 | 1 + .../default/CONFIG_SENSORS_IR36021 | 1 + .../default/CONFIG_SENSORS_IR38064 | 1 + .../default/CONFIG_SENSORS_IRPS5401 | 1 + .../default/CONFIG_SENSORS_ISL68137 | 1 + .../default/CONFIG_SENSORS_LT7182S | 1 + .../default/CONFIG_SENSORS_LTC2947_I2C | 1 + .../default/CONFIG_SENSORS_LTC2947_SPI | 1 + .../default/CONFIG_SENSORS_LTC2990 | 1 + .../default/CONFIG_SENSORS_LTC2992 | 1 + .../L2-OPTIONAL/default/CONFIG_SENSORS_MAX127 | 1 + .../default/CONFIG_SENSORS_MAX15301 | 1 + .../default/CONFIG_SENSORS_MAX16601 | 1 + .../default/CONFIG_SENSORS_MAX20730 | 1 + .../default/CONFIG_SENSORS_MAX31722 | 1 + .../default/CONFIG_SENSORS_MAX31730 | 1 + .../default/CONFIG_SENSORS_MAX31760 | 1 + .../default/CONFIG_SENSORS_MAX31785 | 1 + .../default/CONFIG_SENSORS_MAX6620 | 1 + .../default/CONFIG_SENSORS_MAX6621 | 1 + .../default/CONFIG_SENSORS_MC34VR500 | 1 + .../L2-OPTIONAL/default/CONFIG_SENSORS_MP2888 | 1 + .../L2-OPTIONAL/default/CONFIG_SENSORS_MP2975 | 1 + .../L2-OPTIONAL/default/CONFIG_SENSORS_MP5023 | 1 + .../default/CONFIG_SENSORS_MPQ7932 | 1 + .../default/CONFIG_SENSORS_MR75203 | 1 + .../default/CONFIG_SENSORS_NCT6775_I2C | 1 + .../default/CONFIG_SENSORS_NPCM7XX | 1 + .../default/CONFIG_SENSORS_NZXT_KRAKEN2 | 1 + .../default/CONFIG_SENSORS_NZXT_SMART2 | 1 + .../default/CONFIG_SENSORS_OCC_P8_I2C | 1 + .../default/CONFIG_SENSORS_PIM4328 | 1 + .../default/CONFIG_SENSORS_PLI1209BC | 1 + .../default/CONFIG_SENSORS_PM6764TR | 1 + .../default/CONFIG_SENSORS_PXE1610 | 1 + .../default/CONFIG_SENSORS_Q54SJ108A2 | 1 + .../L2-OPTIONAL/default/CONFIG_SENSORS_SBRMI | 1 + .../L2-OPTIONAL/default/CONFIG_SENSORS_SBTSI | 1 + .../L2-OPTIONAL/default/CONFIG_SENSORS_SHT3x | 1 + .../L2-OPTIONAL/default/CONFIG_SENSORS_SHT4x | 1 + .../default/CONFIG_SENSORS_STPDDC60 | 1 + .../default/CONFIG_SENSORS_STTS751 | 1 + .../L2-OPTIONAL/default/CONFIG_SENSORS_TC654 | 1 + .../default/CONFIG_SENSORS_TDA38640 | 1 + .../L2-OPTIONAL/default/CONFIG_SENSORS_TMP108 | 1 + .../L2-OPTIONAL/default/CONFIG_SENSORS_TMP464 | 1 + .../L2-OPTIONAL/default/CONFIG_SENSORS_TMP513 | 1 + .../default/CONFIG_SENSORS_TPS23861 | 1 + .../default/CONFIG_SENSORS_TPS53679 | 1 + .../default/CONFIG_SENSORS_TPS546D24 | 1 + .../default/CONFIG_SENSORS_W83773G | 1 + .../default/CONFIG_SENSORS_XDPE122 | 1 + .../default/CONFIG_SENSORS_XDPE152 | 1 + .../L2-OPTIONAL/default/CONFIG_SERIAL_8250 | 1 + .../default/CONFIG_SERIAL_8250_CONSOLE | 1 + .../CONFIG_SERIAL_8250_DEPRECATED_OPTIONS | 1 + .../default/CONFIG_SERIAL_8250_DETECT_IRQ | 1 + .../default/CONFIG_SERIAL_8250_DMA | 1 + .../L2-OPTIONAL/default/CONFIG_SERIAL_8250_DW | 1 + .../default/CONFIG_SERIAL_8250_DWLIB | 1 + .../default/CONFIG_SERIAL_8250_EXAR | 1 + .../default/CONFIG_SERIAL_8250_EXTENDED | 1 + .../default/CONFIG_SERIAL_8250_FINTEK | 1 + .../default/CONFIG_SERIAL_8250_MANY_PORTS | 1 + .../default/CONFIG_SERIAL_8250_NR_UARTS | 1 + .../default/CONFIG_SERIAL_8250_PCI | 1 + .../default/CONFIG_SERIAL_8250_PCI1XXXX | 1 + .../default/CONFIG_SERIAL_8250_PCILIB | 1 + .../default/CONFIG_SERIAL_8250_PERICOM | 1 + .../default/CONFIG_SERIAL_8250_PNP | 1 + .../default/CONFIG_SERIAL_8250_RSA | 1 + .../default/CONFIG_SERIAL_8250_RUNTIME_UARTS | 1 + .../default/CONFIG_SERIAL_8250_SHARE_IRQ | 1 + .../default/CONFIG_SERIAL_ALTERA_JTAGUART | 1 + .../default/CONFIG_SERIAL_ALTERA_UART | 1 + .../L2-OPTIONAL/default/CONFIG_SERIAL_CORE | 1 + .../default/CONFIG_SERIAL_CORE_CONSOLE | 1 + .../L2-OPTIONAL/default/CONFIG_SERIAL_DEV_BUS | 1 + .../default/CONFIG_SERIAL_EARLYCON | 1 + .../default/CONFIG_SERIAL_FSL_LINFLEXUART | 1 + .../default/CONFIG_SERIAL_FSL_LPUART | 1 + .../default/CONFIG_SERIAL_KGDB_NMI | 1 + .../L2-OPTIONAL/default/CONFIG_SERIAL_MAX3100 | 1 + .../L2-OPTIONAL/default/CONFIG_SERIAL_MAX310X | 1 + .../default/CONFIG_SERIAL_MCTRL_GPIO | 1 + .../default/CONFIG_SERIAL_NONSTANDARD | 1 + .../L2-OPTIONAL/default/CONFIG_SERIAL_RP2 | 1 + .../default/CONFIG_SERIAL_SC16IS7XX | 1 + .../L2-OPTIONAL/default/CONFIG_SERIAL_SCCNXP | 1 + .../L2-OPTIONAL/default/CONFIG_SERIAL_SPRD | 1 + .../default/CONFIG_SERIAL_UARTLITE | 1 + .../L2-OPTIONAL/default/CONFIG_SERIO_GPIO_PS2 | 1 + .../L2-OPTIONAL/default/CONFIG_SERIO_PCIPS2 | 1 + .../L2-OPTIONAL/default/CONFIG_SERIO_PS2MULT | 1 + .../L2-OPTIONAL/default/CONFIG_SFC_SIENA | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_SFP | 1 + .../L2-OPTIONAL/default/CONFIG_SF_PDMA | 1 + .../L2-OPTIONAL/default/CONFIG_SGL_ALLOC | 1 + .../L2-OPTIONAL/default/CONFIG_SG_POOL | 1 + .../L2-OPTIONAL/default/CONFIG_SHRINKER_DEBUG | 1 + .../L2-OPTIONAL/default/CONFIG_SIGNATURE | 1 + .../configs/L2-OPTIONAL/default/CONFIG_SIOX | 1 + .../L2-OPTIONAL/default/CONFIG_SKB_EXTENSIONS | 1 + .../default/CONFIG_SLAB_DEPRECATED | 1 + .../configs/L2-OPTIONAL/default/CONFIG_SLHC | 1 + .../L2-OPTIONAL/default/CONFIG_SLIMBUS | 1 + .../default/CONFIG_SLIP_COMPRESSED | 1 + .../default/CONFIG_SLIP_MODE_SLIP6 | 1 + .../L2-OPTIONAL/default/CONFIG_SLIP_SMART | 1 + .../default/CONFIG_SMARTJOYPLUS_FF | 1 + .../configs/L2-OPTIONAL/default/CONFIG_SMBFS | 1 + .../L2-OPTIONAL/default/CONFIG_SMB_SERVER | 1 + .../L2-OPTIONAL/default/CONFIG_SMSC_PHY | 1 + .../configs/L2-OPTIONAL/default/CONFIG_SM_FTL | 1 + .../default/CONFIG_SOCK_CGROUP_DATA | 1 + .../default/CONFIG_SOCK_RX_QUEUE_MAPPING | 1 + .../default/CONFIG_SOCK_VALIDATE_XMIT | 1 + .../configs/L2-OPTIONAL/default/CONFIG_SOC_TI | 1 + .../default/CONFIG_SOFTIRQ_ON_OWN_STACK | 1 + .../L2-OPTIONAL/default/CONFIG_SONY_FF | 1 + .../configs/L2-OPTIONAL/default/CONFIG_SOUND | 1 + .../L2-OPTIONAL/default/CONFIG_SOUNDWIRE | 1 + .../default/CONFIG_SPARSEMEM_EXTREME | 1 + .../default/CONFIG_SPARSEMEM_VMEMMAP_ENABLE | 1 + .../L2-OPTIONAL/default/CONFIG_SPARSE_IRQ | 1 + .../L2-OPTIONAL/default/CONFIG_SPI_ALTERA | 1 + .../L2-OPTIONAL/default/CONFIG_SPI_AMD | 1 + .../L2-OPTIONAL/default/CONFIG_SPI_AX88796C | 1 + .../default/CONFIG_SPI_AXI_SPI_ENGINE | 1 + .../L2-OPTIONAL/default/CONFIG_SPI_BITBANG | 1 + .../L2-OPTIONAL/default/CONFIG_SPI_DYNAMIC | 1 + .../L2-OPTIONAL/default/CONFIG_SPI_GPIO | 1 + .../default/CONFIG_SPI_LOOPBACK_TEST | 1 + .../L2-OPTIONAL/default/CONFIG_SPI_MASTER | 1 + .../L2-OPTIONAL/default/CONFIG_SPI_MEM | 1 + .../default/CONFIG_SPI_MICROCHIP_CORE | 1 + .../default/CONFIG_SPI_MICROCHIP_CORE_QSPI | 1 + .../L2-OPTIONAL/default/CONFIG_SPI_MUX | 1 + .../L2-OPTIONAL/default/CONFIG_SPI_MXIC | 1 + .../L2-OPTIONAL/default/CONFIG_SPI_OC_TINY | 1 + .../L2-OPTIONAL/default/CONFIG_SPI_PCI1XXXX | 1 + .../L2-OPTIONAL/default/CONFIG_SPI_PXA2XX | 1 + .../L2-OPTIONAL/default/CONFIG_SPI_SC18IS602 | 1 + .../L2-OPTIONAL/default/CONFIG_SPI_SIFIVE | 1 + .../L2-OPTIONAL/default/CONFIG_SPI_SLAVE | 1 + .../L2-OPTIONAL/default/CONFIG_SPI_SPIDEV | 1 + .../L2-OPTIONAL/default/CONFIG_SPI_TLE62X0 | 1 + .../L2-OPTIONAL/default/CONFIG_SPI_XCOMM | 1 + .../L2-OPTIONAL/default/CONFIG_SPI_XILINX | 1 + .../default/CONFIG_SPLIT_PTLOCK_CPUS | 1 + .../configs/L2-OPTIONAL/default/CONFIG_SPMI | 1 + .../CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT | 1 + .../CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI | 1 + ...ONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU | 1 + .../CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE | 1 + .../configs/L2-OPTIONAL/default/CONFIG_SRAM | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_SSB | 1 + .../L2-OPTIONAL/default/CONFIG_SSB_POSSIBLE | 1 + .../configs/L2-OPTIONAL/default/CONFIG_SSFDC | 1 + .../L2-OPTIONAL/default/CONFIG_STACKDEPOT | 1 + .../default/CONFIG_STACKTRACE_BUILD_ID | 1 + .../L2-OPTIONAL/default/CONFIG_STANDALONE | 1 + .../L2-OPTIONAL/default/CONFIG_STE10XP | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_STP | 1 + .../default/CONFIG_STRING_SELFTEST | 1 + .../default/CONFIG_SUNRPC_BACKCHANNEL | 1 + .../L2-OPTIONAL/default/CONFIG_SUNRPC_GSS | 1 + .../default/CONFIG_SURFACE_3_POWER_OPREGION | 1 + .../L2-OPTIONAL/default/CONFIG_SURFACE_GPE | 1 + .../default/CONFIG_SURFACE_HOTPLUG | 1 + .../default/CONFIG_SURFACE_PLATFORMS | 1 + .../default/CONFIG_SURFACE_PRO3_BUTTON | 1 + .../L2-OPTIONAL/default/CONFIG_SWIOTLB | 1 + .../default/CONFIG_SWIOTLB_DYNAMIC | 1 + .../configs/L2-OPTIONAL/default/CONFIG_SWPHY | 1 + .../L2-OPTIONAL/default/CONFIG_SW_SYNC | 1 + .../default/CONFIG_SYNTH_EVENT_GEN_TEST | 1 + .../default/CONFIG_SYSCTL_EXCEPTION_TRACE | 1 + .../configs/L2-OPTIONAL/default/CONFIG_SYSFB | 1 + .../CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE | 1 + .../default/CONFIG_SYSV68_PARTITION | 1 + .../L2-OPTIONAL/default/CONFIG_SYSVIPC_COMPAT | 1 + .../L2-OPTIONAL/default/CONFIG_SYSV_FS | 1 + .../L2-OPTIONAL/default/CONFIG_TASKS_RCU | 1 + .../default/CONFIG_TASKS_RCU_GENERIC | 1 + .../L2-OPTIONAL/default/CONFIG_TASKS_RUDE_RCU | 1 + .../default/CONFIG_TASKS_TRACE_RCU | 1 + .../L2-OPTIONAL/default/CONFIG_TCG_TIS_I2C | 1 + .../default/CONFIG_TCG_TIS_I2C_CR50 | 1 + .../default/CONFIG_TCG_TIS_ST33ZP24_SPI | 1 + .../L2-OPTIONAL/default/CONFIG_TCG_VTPM_PROXY | 1 + .../configs/L2-OPTIONAL/default/CONFIG_TCM_FC | 1 + .../L2-OPTIONAL/default/CONFIG_TCM_QLA2XXX | 1 + .../L2-OPTIONAL/default/CONFIG_TERANETICS_PHY | 1 + .../default/CONFIG_TEST_ASYNC_DRIVER_PROBE | 1 + .../L2-OPTIONAL/default/CONFIG_TEST_BITMAP | 1 + .../L2-OPTIONAL/default/CONFIG_TEST_BITOPS | 1 + .../default/CONFIG_TEST_BLACKHOLE_DEV | 1 + .../L2-OPTIONAL/default/CONFIG_TEST_DHRY | 1 + .../L2-OPTIONAL/default/CONFIG_TEST_DIV64 | 1 + .../default/CONFIG_TEST_DYNAMIC_DEBUG | 1 + .../L2-OPTIONAL/default/CONFIG_TEST_FIRMWARE | 1 + .../default/CONFIG_TEST_FREE_PAGES | 1 + .../L2-OPTIONAL/default/CONFIG_TEST_HEXDUMP | 1 + .../L2-OPTIONAL/default/CONFIG_TEST_IDA | 1 + .../L2-OPTIONAL/default/CONFIG_TEST_KMOD | 1 + .../L2-OPTIONAL/default/CONFIG_TEST_KSTRTOX | 1 + .../L2-OPTIONAL/default/CONFIG_TEST_LKM | 1 + .../L2-OPTIONAL/default/CONFIG_TEST_LOCKUP | 1 + .../default/CONFIG_TEST_MAPLE_TREE | 1 + .../L2-OPTIONAL/default/CONFIG_TEST_MEMCAT_P | 1 + .../L2-OPTIONAL/default/CONFIG_TEST_MEMINIT | 1 + .../L2-OPTIONAL/default/CONFIG_TEST_MIN_HEAP | 1 + .../L2-OPTIONAL/default/CONFIG_TEST_OBJAGG | 1 + .../L2-OPTIONAL/default/CONFIG_TEST_PARMAN | 1 + .../L2-OPTIONAL/default/CONFIG_TEST_POWER | 1 + .../L2-OPTIONAL/default/CONFIG_TEST_PRINTF | 1 + .../default/CONFIG_TEST_REF_TRACKER | 1 + .../default/CONFIG_TEST_RHASHTABLE | 1 + .../L2-OPTIONAL/default/CONFIG_TEST_SCANF | 1 + .../default/CONFIG_TEST_STATIC_KEYS | 1 + .../default/CONFIG_TEST_STRING_HELPERS | 1 + .../L2-OPTIONAL/default/CONFIG_TEST_SYSCTL | 1 + .../L2-OPTIONAL/default/CONFIG_TEST_UDELAY | 1 + .../L2-OPTIONAL/default/CONFIG_TEST_USER_COPY | 1 + .../L2-OPTIONAL/default/CONFIG_TEST_UUID | 1 + .../L2-OPTIONAL/default/CONFIG_TEST_VMALLOC | 1 + .../L2-OPTIONAL/default/CONFIG_TEST_XARRAY | 1 + .../L2-OPTIONAL/default/CONFIG_TEXTSEARCH | 1 + .../L2-OPTIONAL/default/CONFIG_TEXTSEARCH_BM | 1 + .../L2-OPTIONAL/default/CONFIG_TEXTSEARCH_FSM | 1 + .../L2-OPTIONAL/default/CONFIG_TEXTSEARCH_KMP | 1 + .../default/CONFIG_THERMAL_EMULATION | 1 + .../default/CONFIG_THERMAL_STATISTICS | 1 + .../L2-OPTIONAL/default/CONFIG_THP_SWAP | 1 + .../default/CONFIG_THREAD_INFO_IN_TASK | 1 + .../default/CONFIG_THRUSTMASTER_FF | 1 + .../L2-OPTIONAL/default/CONFIG_TICK_ONESHOT | 1 + .../L2-OPTIONAL/default/CONFIG_TIFM_CORE | 1 + .../default/CONFIG_TINYDRM_HX8357D | 1 + .../default/CONFIG_TINYDRM_ILI9163 | 1 + .../default/CONFIG_TINYDRM_ILI9225 | 1 + .../default/CONFIG_TINYDRM_ILI9341 | 1 + .../default/CONFIG_TINYDRM_ILI9486 | 1 + .../default/CONFIG_TINYDRM_MI0283QT | 1 + .../default/CONFIG_TINYDRM_REPAPER | 1 + .../L2-OPTIONAL/default/CONFIG_TINYDRM_ST7586 | 1 + .../default/CONFIG_TINYDRM_ST7735R | 1 + .../configs/L2-OPTIONAL/default/CONFIG_TIPC | 1 + .../L2-OPTIONAL/default/CONFIG_TIPC_CRYPTO | 1 + .../L2-OPTIONAL/default/CONFIG_TIPC_DIAG | 1 + .../L2-OPTIONAL/default/CONFIG_TIPC_MEDIA_IB | 1 + .../L2-OPTIONAL/default/CONFIG_TIPC_MEDIA_UDP | 1 + .../configs/L2-OPTIONAL/default/CONFIG_TI_ST | 1 + .../L2-OPTIONAL/default/CONFIG_TMPFS_QUOTA | 1 + .../default/CONFIG_TOOLS_SUPPORT_RELR | 1 + .../L2-OPTIONAL/default/CONFIG_TPS6105X | 1 + .../L2-OPTIONAL/default/CONFIG_TPS65010 | 1 + .../L2-OPTIONAL/default/CONFIG_TPS6507X | 1 + .../default/CONFIG_TRACEPOINT_BENCHMARK | 1 + .../default/CONFIG_TRACER_MAX_TRACE | 1 + .../L2-OPTIONAL/default/CONFIG_TRACE_CLOCK | 1 + .../default/CONFIG_TRACE_EVAL_MAP_FILE | 1 + .../default/CONFIG_TRACE_EVENT_INJECT | 1 + .../default/CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT | 1 + .../default/CONFIG_TRACE_IRQFLAGS_SUPPORT | 1 + .../L2-OPTIONAL/default/CONFIG_TRACING | 1 + .../default/CONFIG_TRACING_SUPPORT | 1 + .../L2-OPTIONAL/default/CONFIG_TREE_RCU | 1 + .../L2-OPTIONAL/default/CONFIG_TREE_SRCU | 1 + .../configs/L2-OPTIONAL/default/CONFIG_TSNEP | 1 + .../default/CONFIG_TUN_VNET_CROSS_LE | 1 + .../L2-OPTIONAL/default/CONFIG_TWL4030_CORE | 1 + .../L2-OPTIONAL/default/CONFIG_TWL6040_CORE | 1 + .../configs/L2-OPTIONAL/default/CONFIG_TYPEC | 1 + .../L2-OPTIONAL/default/CONFIG_TYPEC_ANX7411 | 1 + .../default/CONFIG_TYPEC_DP_ALTMODE | 1 + .../default/CONFIG_TYPEC_HD3SS3220 | 1 + .../default/CONFIG_TYPEC_MUX_FSA4480 | 1 + .../default/CONFIG_TYPEC_MUX_GPIO_SBU | 1 + .../default/CONFIG_TYPEC_MUX_NB7VPQ904M | 1 + .../default/CONFIG_TYPEC_MUX_PI3USB30532 | 1 + .../default/CONFIG_TYPEC_NVIDIA_ALTMODE | 1 + .../L2-OPTIONAL/default/CONFIG_TYPEC_RT1719 | 1 + .../default/CONFIG_TYPEC_STUSB160X | 1 + .../L2-OPTIONAL/default/CONFIG_TYPEC_TCPCI | 1 + .../L2-OPTIONAL/default/CONFIG_TYPEC_TCPM | 1 + .../L2-OPTIONAL/default/CONFIG_TYPEC_TPS6598X | 1 + .../L2-OPTIONAL/default/CONFIG_TYPEC_UCSI | 1 + .../L2-OPTIONAL/default/CONFIG_TYPEC_WUSB3801 | 1 + .../configs/L2-OPTIONAL/default/CONFIG_UACCE | 1 + .../L2-OPTIONAL/default/CONFIG_UBIFS_FS | 1 + .../L2-OPTIONAL/default/CONFIG_UCS2_STRING | 1 + .../L2-OPTIONAL/default/CONFIG_UCSI_ACPI | 1 + .../L2-OPTIONAL/default/CONFIG_UCSI_CCG | 1 + .../L2-OPTIONAL/default/CONFIG_UCSI_STM32G0 | 1 + .../L2-OPTIONAL/default/CONFIG_UDMABUF | 1 + .../L2-OPTIONAL/default/CONFIG_UEFI_CPER | 1 + .../configs/L2-OPTIONAL/default/CONFIG_UFS_FS | 1 + .../configs/L2-OPTIONAL/default/CONFIG_UHID | 1 + .../default/CONFIG_ULTRIX_PARTITION | 1 + .../L2-OPTIONAL/default/CONFIG_UNICODE | 1 + .../default/CONFIG_UNINLINE_SPIN_UNLOCK | 1 + .../L2-OPTIONAL/default/CONFIG_UNIX_SCM | 1 + .../configs/L2-OPTIONAL/default/CONFIG_USB4 | 1 + .../L2-OPTIONAL/default/CONFIG_USBIP_CORE | 1 + .../L2-OPTIONAL/default/CONFIG_USBPCWATCHDOG | 1 + .../L2-OPTIONAL/default/CONFIG_USB_ACM | 1 + .../L2-OPTIONAL/default/CONFIG_USB_ADUTUX | 1 + .../L2-OPTIONAL/default/CONFIG_USB_ALI_M5632 | 1 + .../L2-OPTIONAL/default/CONFIG_USB_AN2720 | 1 + .../default/CONFIG_USB_ANNOUNCE_NEW_DEVICES | 1 + .../default/CONFIG_USB_APPLEDISPLAY | 1 + .../default/CONFIG_USB_ARCH_HAS_HCD | 1 + .../L2-OPTIONAL/default/CONFIG_USB_ARMLINUX | 1 + .../L2-OPTIONAL/default/CONFIG_USB_ATM | 1 + .../default/CONFIG_USB_AUTOSUSPEND_DELAY | 1 + .../L2-OPTIONAL/default/CONFIG_USB_BELKIN | 1 + .../L2-OPTIONAL/default/CONFIG_USB_C67X00_HCD | 1 + .../L2-OPTIONAL/default/CONFIG_USB_CATC | 1 + .../default/CONFIG_USB_CDNS_SUPPORT | 1 + .../L2-OPTIONAL/default/CONFIG_USB_CHIPIDEA | 1 + .../L2-OPTIONAL/default/CONFIG_USB_COMMON | 1 + .../L2-OPTIONAL/default/CONFIG_USB_CONN_GPIO | 1 + .../L2-OPTIONAL/default/CONFIG_USB_CXACRU | 1 + .../default/CONFIG_USB_CYPRESS_CY7C63 | 1 + .../L2-OPTIONAL/default/CONFIG_USB_CYTHERM | 1 + .../default/CONFIG_USB_DEFAULT_PERSIST | 1 + .../L2-OPTIONAL/default/CONFIG_USB_DWC2 | 1 + .../L2-OPTIONAL/default/CONFIG_USB_DWC3 | 1 + .../default/CONFIG_USB_DYNAMIC_MINORS | 1 + .../L2-OPTIONAL/default/CONFIG_USB_EHCI_FSL | 1 + .../L2-OPTIONAL/default/CONFIG_USB_EHCI_HCD | 1 + .../L2-OPTIONAL/default/CONFIG_USB_EHCI_PCI | 1 + .../default/CONFIG_USB_EHCI_ROOT_HUB_TT | 1 + .../default/CONFIG_USB_EHCI_TT_NEWSCHED | 1 + .../default/CONFIG_USB_EHSET_TEST_FIXTURE | 1 + .../L2-OPTIONAL/default/CONFIG_USB_EMI26 | 1 + .../L2-OPTIONAL/default/CONFIG_USB_EMI62 | 1 + .../L2-OPTIONAL/default/CONFIG_USB_EPSON2888 | 1 + .../L2-OPTIONAL/default/CONFIG_USB_EZUSB_FX2 | 1 + .../default/CONFIG_USB_FEW_INIT_RETRIES | 1 + .../L2-OPTIONAL/default/CONFIG_USB_GADGET | 1 + .../L2-OPTIONAL/default/CONFIG_USB_GPIO_VBUS | 1 + .../L2-OPTIONAL/default/CONFIG_USB_HCD_BCMA | 1 + .../default/CONFIG_USB_HCD_TEST_MODE | 1 + .../L2-OPTIONAL/default/CONFIG_USB_HID | 1 + .../L2-OPTIONAL/default/CONFIG_USB_HIDDEV | 1 + .../default/CONFIG_USB_HSIC_USB3503 | 1 + .../default/CONFIG_USB_HSIC_USB4604 | 1 + .../L2-OPTIONAL/default/CONFIG_USB_HSO | 1 + .../default/CONFIG_USB_HUB_USB251XB | 1 + .../L2-OPTIONAL/default/CONFIG_USB_IDMOUSE | 1 + .../L2-OPTIONAL/default/CONFIG_USB_IOWARRIOR | 1 + .../L2-OPTIONAL/default/CONFIG_USB_IPHETH | 1 + .../L2-OPTIONAL/default/CONFIG_USB_ISIGHTFW | 1 + .../default/CONFIG_USB_ISP116X_HCD | 1 + .../L2-OPTIONAL/default/CONFIG_USB_ISP1301 | 1 + .../L2-OPTIONAL/default/CONFIG_USB_ISP1760 | 1 + .../L2-OPTIONAL/default/CONFIG_USB_KAWETH | 1 + .../L2-OPTIONAL/default/CONFIG_USB_KC2190 | 1 + .../L2-OPTIONAL/default/CONFIG_USB_LAN78XX | 1 + .../L2-OPTIONAL/default/CONFIG_USB_LCD | 1 + .../configs/L2-OPTIONAL/default/CONFIG_USB_LD | 1 + .../default/CONFIG_USB_LEDS_TRIGGER_USBPORT | 1 + .../L2-OPTIONAL/default/CONFIG_USB_LED_TRIG | 1 + .../L2-OPTIONAL/default/CONFIG_USB_LEGOTOWER | 1 + .../default/CONFIG_USB_LINK_LAYER_TEST | 1 + .../default/CONFIG_USB_MAX3421_HCD | 1 + .../L2-OPTIONAL/default/CONFIG_USB_MDC800 | 1 + .../L2-OPTIONAL/default/CONFIG_USB_MICROTEK | 1 + .../L2-OPTIONAL/default/CONFIG_USB_MON | 1 + .../L2-OPTIONAL/default/CONFIG_USB_MUSB_HDRC | 1 + .../L2-OPTIONAL/default/CONFIG_USB_NET_AQC111 | 1 + .../default/CONFIG_USB_NET_AX88179_178A | 1 + .../default/CONFIG_USB_NET_AX8817X | 1 + .../default/CONFIG_USB_NET_CDCETHER | 1 + .../default/CONFIG_USB_NET_CDC_EEM | 1 + .../default/CONFIG_USB_NET_CDC_MBIM | 1 + .../default/CONFIG_USB_NET_CDC_NCM | 1 + .../default/CONFIG_USB_NET_CDC_SUBSET | 1 + .../default/CONFIG_USB_NET_CDC_SUBSET_ENABLE | 1 + .../L2-OPTIONAL/default/CONFIG_USB_NET_CH9200 | 1 + .../default/CONFIG_USB_NET_CX82310_ETH | 1 + .../L2-OPTIONAL/default/CONFIG_USB_NET_DM9601 | 1 + .../L2-OPTIONAL/default/CONFIG_USB_NET_GL620A | 1 + .../default/CONFIG_USB_NET_HUAWEI_CDC_NCM | 1 + .../default/CONFIG_USB_NET_INT51X1 | 1 + .../L2-OPTIONAL/default/CONFIG_USB_NET_KALMIA | 1 + .../default/CONFIG_USB_NET_MCS7830 | 1 + .../default/CONFIG_USB_NET_NET1080 | 1 + .../L2-OPTIONAL/default/CONFIG_USB_NET_PLUSB | 1 + .../default/CONFIG_USB_NET_QMI_WWAN | 1 + .../default/CONFIG_USB_NET_RNDIS_HOST | 1 + .../default/CONFIG_USB_NET_SMSC75XX | 1 + .../default/CONFIG_USB_NET_SMSC95XX | 1 + .../L2-OPTIONAL/default/CONFIG_USB_NET_SR9800 | 1 + .../L2-OPTIONAL/default/CONFIG_USB_NET_ZAURUS | 1 + .../L2-OPTIONAL/default/CONFIG_USB_OHCI_HCD | 1 + .../default/CONFIG_USB_OHCI_HCD_PCI | 1 + .../default/CONFIG_USB_OHCI_HCD_PLATFORM | 1 + .../default/CONFIG_USB_OHCI_LITTLE_ENDIAN | 1 + .../L2-OPTIONAL/default/CONFIG_USB_OTG | 1 + .../default/CONFIG_USB_OTG_PRODUCTLIST | 1 + .../default/CONFIG_USB_OXU210HP_HCD | 1 + .../L2-OPTIONAL/default/CONFIG_USB_PEGASUS | 1 + .../L2-OPTIONAL/default/CONFIG_USB_PRINTER | 1 + .../default/CONFIG_USB_R8A66597_HCD | 1 + .../default/CONFIG_USB_ROLE_SWITCH | 1 + .../L2-OPTIONAL/default/CONFIG_USB_RTL8150 | 1 + .../L2-OPTIONAL/default/CONFIG_USB_RTL8152 | 1 + .../default/CONFIG_USB_RTL8153_ECM | 1 + .../L2-OPTIONAL/default/CONFIG_USB_SERIAL | 1 + .../default/CONFIG_USB_SERIAL_AIRCABLE | 1 + .../default/CONFIG_USB_SERIAL_ARK3116 | 1 + .../default/CONFIG_USB_SERIAL_BELKIN | 1 + .../default/CONFIG_USB_SERIAL_CH341 | 1 + .../default/CONFIG_USB_SERIAL_CP210X | 1 + .../default/CONFIG_USB_SERIAL_CYBERJACK | 1 + .../default/CONFIG_USB_SERIAL_CYPRESS_M8 | 1 + .../default/CONFIG_USB_SERIAL_DEBUG | 1 + .../default/CONFIG_USB_SERIAL_DIGI_ACCELEPORT | 1 + .../default/CONFIG_USB_SERIAL_EDGEPORT | 1 + .../default/CONFIG_USB_SERIAL_EDGEPORT_TI | 1 + .../default/CONFIG_USB_SERIAL_EMPEG | 1 + .../default/CONFIG_USB_SERIAL_F81232 | 1 + .../default/CONFIG_USB_SERIAL_F8153X | 1 + .../default/CONFIG_USB_SERIAL_FTDI_SIO | 1 + .../default/CONFIG_USB_SERIAL_GARMIN | 1 + .../default/CONFIG_USB_SERIAL_GENERIC | 1 + .../default/CONFIG_USB_SERIAL_IPAQ | 1 + .../L2-OPTIONAL/default/CONFIG_USB_SERIAL_IPW | 1 + .../L2-OPTIONAL/default/CONFIG_USB_SERIAL_IR | 1 + .../L2-OPTIONAL/default/CONFIG_USB_SERIAL_IUU | 1 + .../default/CONFIG_USB_SERIAL_KEYSPAN | 1 + .../default/CONFIG_USB_SERIAL_KEYSPAN_PDA | 1 + .../default/CONFIG_USB_SERIAL_KLSI | 1 + .../default/CONFIG_USB_SERIAL_KOBIL_SCT | 1 + .../default/CONFIG_USB_SERIAL_MCT_U232 | 1 + .../default/CONFIG_USB_SERIAL_METRO | 1 + .../default/CONFIG_USB_SERIAL_MOS7720 | 1 + .../default/CONFIG_USB_SERIAL_MOS7840 | 1 + .../default/CONFIG_USB_SERIAL_MXUPORT | 1 + .../default/CONFIG_USB_SERIAL_NAVMAN | 1 + .../default/CONFIG_USB_SERIAL_OMNINET | 1 + .../default/CONFIG_USB_SERIAL_OPTICON | 1 + .../default/CONFIG_USB_SERIAL_OPTION | 1 + .../default/CONFIG_USB_SERIAL_OTI6858 | 1 + .../default/CONFIG_USB_SERIAL_PL2303 | 1 + .../default/CONFIG_USB_SERIAL_QCAUX | 1 + .../L2-OPTIONAL/default/CONFIG_USB_SERIAL_QT2 | 1 + .../default/CONFIG_USB_SERIAL_QUALCOMM | 1 + .../default/CONFIG_USB_SERIAL_SAFE | 1 + .../default/CONFIG_USB_SERIAL_SAFE_PADDED | 1 + .../default/CONFIG_USB_SERIAL_SIERRAWIRELESS | 1 + .../default/CONFIG_USB_SERIAL_SPCP8X5 | 1 + .../default/CONFIG_USB_SERIAL_SSU100 | 1 + .../default/CONFIG_USB_SERIAL_SYMBOL | 1 + .../L2-OPTIONAL/default/CONFIG_USB_SERIAL_TI | 1 + .../default/CONFIG_USB_SERIAL_UPD78F0730 | 1 + .../default/CONFIG_USB_SERIAL_VISOR | 1 + .../default/CONFIG_USB_SERIAL_WHITEHEAT | 1 + .../default/CONFIG_USB_SERIAL_WISHBONE | 1 + .../default/CONFIG_USB_SERIAL_WWAN | 1 + .../L2-OPTIONAL/default/CONFIG_USB_SERIAL_XR | 1 + .../default/CONFIG_USB_SERIAL_XSENS_MT | 1 + .../L2-OPTIONAL/default/CONFIG_USB_SEVSEG | 1 + .../L2-OPTIONAL/default/CONFIG_USB_SIERRA_NET | 1 + .../L2-OPTIONAL/default/CONFIG_USB_SISUSBVGA | 1 + .../L2-OPTIONAL/default/CONFIG_USB_SL811_HCD | 1 + .../L2-OPTIONAL/default/CONFIG_USB_STORAGE | 1 + .../default/CONFIG_USB_STORAGE_ALAUDA | 1 + .../default/CONFIG_USB_STORAGE_CYPRESS_ATACB | 1 + .../default/CONFIG_USB_STORAGE_DATAFAB | 1 + .../default/CONFIG_USB_STORAGE_DEBUG | 1 + .../default/CONFIG_USB_STORAGE_ENE_UB6250 | 1 + .../default/CONFIG_USB_STORAGE_FREECOM | 1 + .../default/CONFIG_USB_STORAGE_ISD200 | 1 + .../default/CONFIG_USB_STORAGE_JUMPSHOT | 1 + .../default/CONFIG_USB_STORAGE_KARMA | 1 + .../default/CONFIG_USB_STORAGE_ONETOUCH | 1 + .../default/CONFIG_USB_STORAGE_REALTEK | 1 + .../default/CONFIG_USB_STORAGE_SDDR09 | 1 + .../default/CONFIG_USB_STORAGE_SDDR55 | 1 + .../default/CONFIG_USB_STORAGE_USBAT | 1 + .../L2-OPTIONAL/default/CONFIG_USB_TEST | 1 + .../L2-OPTIONAL/default/CONFIG_USB_TMC | 1 + .../default/CONFIG_USB_TRANCEVIBRATOR | 1 + .../L2-OPTIONAL/default/CONFIG_USB_UAS | 1 + .../L2-OPTIONAL/default/CONFIG_USB_UEAGLEATM | 1 + .../L2-OPTIONAL/default/CONFIG_USB_USBNET | 1 + .../L2-OPTIONAL/default/CONFIG_USB_VL600 | 1 + .../L2-OPTIONAL/default/CONFIG_USB_WDM | 1 + .../L2-OPTIONAL/default/CONFIG_USB_XHCI_HCD | 1 + .../L2-OPTIONAL/default/CONFIG_USB_XHCI_PCI | 1 + .../default/CONFIG_USB_XHCI_PCI_RENESAS | 1 + .../L2-OPTIONAL/default/CONFIG_USB_XUSBATM | 1 + .../L2-OPTIONAL/default/CONFIG_USB_YUREX | 1 + .../configs/L2-OPTIONAL/default/CONFIG_USERIO | 1 + .../default/CONFIG_USER_DECRYPTED_DATA | 1 + .../L2-OPTIONAL/default/CONFIG_USER_EVENTS | 1 + .../default/CONFIG_USE_PERCPU_NUMA_NODE_ID | 1 + .../default/CONFIG_VALIDATE_FS_PARSER | 1 + .../default/CONFIG_VFIO_IOMMU_TYPE1 | 1 + .../L2-OPTIONAL/default/CONFIG_VFIO_PCI_INTX | 1 + .../L2-OPTIONAL/default/CONFIG_VFIO_PCI_MMAP | 1 + .../L2-OPTIONAL/default/CONFIG_VFIO_VIRQFD | 1 + .../configs/L2-OPTIONAL/default/CONFIG_VHOST | 1 + .../default/CONFIG_VHOST_CROSS_ENDIAN_LEGACY | 1 + .../L2-OPTIONAL/default/CONFIG_VHOST_IOTLB | 1 + .../L2-OPTIONAL/default/CONFIG_VHOST_TASK | 1 + .../L2-OPTIONAL/default/CONFIG_VIDEO_CMDLINE | 1 + .../default/CONFIG_VIDEO_NOMODESET | 1 + .../L2-OPTIONAL/default/CONFIG_VIRTIO_ANCHOR | 1 + .../L2-OPTIONAL/default/CONFIG_VIRTIO_IOMMU | 1 + .../default/CONFIG_VIRT_CPU_ACCOUNTING | 1 + .../L2-OPTIONAL/default/CONFIG_VITESSE_PHY | 1 + .../L2-OPTIONAL/default/CONFIG_VLAN_8021Q | 1 + .../default/CONFIG_VLAN_8021Q_GVRP | 1 + .../default/CONFIG_VLAN_8021Q_MVRP | 1 + .../default/CONFIG_VT_CONSOLE_SLEEP | 1 + .../L2-OPTIONAL/default/CONFIG_VXFS_FS | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_W1 | 1 + .../configs/L2-OPTIONAL/default/CONFIG_WANXL | 1 + .../CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT | 1 + .../L2-OPTIONAL/default/CONFIG_WATCH_QUEUE | 1 + .../configs/L2-OPTIONAL/default/CONFIG_WDTPCI | 1 + .../configs/L2-OPTIONAL/default/CONFIG_WERROR | 1 + .../L2-OPTIONAL/default/CONFIG_WIRELESS | 1 + .../L2-OPTIONAL/default/CONFIG_WPCM450_SOC | 1 + .../default/CONFIG_WQ_CPU_INTENSIVE_REPORT | 1 + .../default/CONFIG_WQ_POWER_EFFICIENT_DEFAULT | 1 + .../configs/L2-OPTIONAL/default/CONFIG_WWAN | 1 + .../default/CONFIG_WW_MUTEX_SELFTEST | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_X25 | 1 + .../L2-OPTIONAL/default/CONFIG_XARRAY_MULTI | 1 + .../configs/L2-OPTIONAL/default/CONFIG_XFRM | 1 + .../L2-OPTIONAL/default/CONFIG_XFRM_AH | 1 + .../L2-OPTIONAL/default/CONFIG_XFRM_ALGO | 1 + .../L2-OPTIONAL/default/CONFIG_XFRM_ESP | 1 + .../L2-OPTIONAL/default/CONFIG_XFRM_IPCOMP | 1 + .../L2-OPTIONAL/default/CONFIG_XFRM_OFFLOAD | 1 + .../default/CONFIG_XFS_SUPPORT_ASCII_CI | 1 + .../default/CONFIG_XILINX_AXI_EMAC | 1 + .../L2-OPTIONAL/default/CONFIG_XILINX_DMA | 1 + .../default/CONFIG_XILINX_EMACLITE | 1 + .../default/CONFIG_XILINX_GMII2RGMII | 1 + .../default/CONFIG_XILINX_LL_TEMAC | 1 + .../L2-OPTIONAL/default/CONFIG_XILINX_SDFEC | 1 + .../L2-OPTIONAL/default/CONFIG_XILINX_VCU | 1 + .../default/CONFIG_XILINX_WATCHDOG | 1 + .../L2-OPTIONAL/default/CONFIG_XILINX_XDMA | 1 + .../L2-OPTIONAL/default/CONFIG_XILLYBUS | 1 + .../L2-OPTIONAL/default/CONFIG_XILLYUSB | 1 + .../configs/L2-OPTIONAL/default/CONFIG_XXHASH | 1 + .../configs/L2-OPTIONAL/default/CONFIG_YENTA | 1 + .../L2-OPTIONAL/default/CONFIG_YENTA_ENE_TUNE | 1 + .../L2-OPTIONAL/default/CONFIG_YENTA_O2 | 1 + .../L2-OPTIONAL/default/CONFIG_YENTA_RICOH | 1 + .../L2-OPTIONAL/default/CONFIG_YENTA_TI | 1 + .../L2-OPTIONAL/default/CONFIG_YENTA_TOSHIBA | 1 + .../L2-OPTIONAL/default/CONFIG_ZEROPLUS_FF | 1 + .../default/CONFIG_ZIIRAVE_WATCHDOG | 1 + .../L2-OPTIONAL/default/CONFIG_ZLIB_DEFLATE | 1 + .../L2-OPTIONAL/default/CONFIG_ZLIB_INFLATE | 1 + .../L2-OPTIONAL/default/CONFIG_ZONEFS_FS | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_60XX_WDT | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ABP060MG | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_ACERHDF | 1 + .../L2-OPTIONAL/x86/CONFIG_ACER_WIRELESS | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ACER_WMI | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ACPI_ADXL | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ACPI_ALS | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ACPI_CMPC | 1 + .../L2-OPTIONAL/x86/CONFIG_ACPI_CPU_FREQ_PSS | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ACPI_DPTF | 1 + .../x86/CONFIG_ACPI_HOTPLUG_IOAPIC | 1 + .../x86/CONFIG_ACPI_LEGACY_TABLES_LOOKUP | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ACPI_LPIT | 1 + .../x86/CONFIG_ACPI_PLATFORM_PROFILE | 1 + .../x86/CONFIG_ACPI_PROCESSOR_CSTATE | 1 + .../CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT | 1 + .../L2-OPTIONAL/x86/CONFIG_ACPI_TOSHIBA | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ACPI_WMI | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ACRN_GUEST | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_AD2S1200 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD2S90 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD3552R | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD4130 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5064 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5110 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5272 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5360 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5380 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5421 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5446 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5449 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5504 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5592R | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5593R | 1 + .../L2-OPTIONAL/x86/CONFIG_AD5624R_SPI | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_AD5686_SPI | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_AD5696_I2C | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5755 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5758 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5761 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5764 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5766 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5770R | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5791 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_AD7091R5 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7124 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7150 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7192 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7266 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7280 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7291 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7292 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7293 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7298 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7303 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD74115 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_AD74413R | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7476 | 1 + .../x86/CONFIG_AD7606_IFACE_PARALLEL | 1 + .../L2-OPTIONAL/x86/CONFIG_AD7606_IFACE_SPI | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7746 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7766 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_AD7768_1 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7780 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7791 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7793 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7887 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7923 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7949 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD799X | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD8366 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD8801 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD9523 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_ADA4250 | 1 + .../L2-OPTIONAL/x86/CONFIG_ADDRESS_MASKING | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_ADF4350 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_ADF4371 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_ADF4377 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ADIS16080 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ADIS16130 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ADIS16136 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ADIS16201 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ADIS16209 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ADIS16260 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ADIS16400 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ADIS16460 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ADIS16475 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ADIS16480 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ADJD_S311 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ADMV1013 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ADMV1014 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ADMV4420 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ADMV8818 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ADRF6780 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ADUX1020 | 1 + .../L2-OPTIONAL/x86/CONFIG_ADVANTECH_EC_WDT | 1 + .../L2-OPTIONAL/x86/CONFIG_ADV_SWBUTTON | 1 + .../L2-OPTIONAL/x86/CONFIG_ADXL313_I2C | 1 + .../L2-OPTIONAL/x86/CONFIG_ADXL313_SPI | 1 + .../L2-OPTIONAL/x86/CONFIG_ADXL345_I2C | 1 + .../L2-OPTIONAL/x86/CONFIG_ADXL345_SPI | 1 + .../L2-OPTIONAL/x86/CONFIG_ADXL355_I2C | 1 + .../L2-OPTIONAL/x86/CONFIG_ADXL355_SPI | 1 + .../L2-OPTIONAL/x86/CONFIG_ADXL367_I2C | 1 + .../L2-OPTIONAL/x86/CONFIG_ADXL367_SPI | 1 + .../L2-OPTIONAL/x86/CONFIG_ADXL372_I2C | 1 + .../L2-OPTIONAL/x86/CONFIG_ADXL372_SPI | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ADXRS290 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ADXRS450 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AFE4403 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AFE4404 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AGP | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AK09911 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AK8974 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AK8975 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AL3010 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AL3320A | 1 + .../L2-OPTIONAL/x86/CONFIG_ALIM1535_WDT | 1 + .../L2-OPTIONAL/x86/CONFIG_ALTERA_STAPL | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AM2315 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AMD_NB | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AMD_PMC | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AMD_PMF | 1 + .../L2-OPTIONAL/x86/CONFIG_AMD_SFH_HID | 1 + .../L2-OPTIONAL/x86/CONFIG_AMIGA_PARTITION | 1 + .../L2-OPTIONAL/x86/CONFIG_AMILO_RFKILL | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_APDS9300 | 1 + .../L2-OPTIONAL/x86/CONFIG_APDS9802ALS | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_APDS9960 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_APPLE_GMUX | 1 + .../L2-OPTIONAL/x86/CONFIG_APPLE_PROPERTIES | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AQTION | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AR5523 | 1 + .../x86/CONFIG_ARCH_CLOCKSOURCE_INIT | 1 + .../L2-OPTIONAL/x86/CONFIG_ARCH_HAS_ADD_PAGES | 1 + .../x86/CONFIG_ARCH_HAS_CC_PLATFORM | 1 + ...IG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION | 1 + .../x86/CONFIG_ARCH_HAS_CPU_FINALIZE_INIT | 1 + .../L2-OPTIONAL/x86/CONFIG_ARCH_HAS_CPU_RELAX | 1 + .../x86/CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED | 1 + .../x86/CONFIG_ARCH_HAS_EARLY_DEBUG | 1 + .../x86/CONFIG_ARCH_HAS_ELFCORE_COMPAT | 1 + .../x86/CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED | 1 + .../x86/CONFIG_ARCH_HAS_MEM_ENCRYPT | 1 + .../x86/CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG | 1 + .../x86/CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH | 1 + .../L2-OPTIONAL/x86/CONFIG_ARCH_HAS_PKEYS | 1 + .../CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE | 1 + .../x86/CONFIG_ARCH_MAY_HAVE_PC_FDC | 1 + .../x86/CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC | 1 + .../x86/CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT | 1 + .../x86/CONFIG_ARCH_MIGHT_HAVE_PC_SERIO | 1 + .../L2-OPTIONAL/x86/CONFIG_ARCH_MMAP_RND_BITS | 1 + .../x86/CONFIG_ARCH_MMAP_RND_BITS_MAX | 1 + .../x86/CONFIG_ARCH_MMAP_RND_BITS_MIN | 1 + .../x86/CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN | 1 + .../x86/CONFIG_ARCH_SPARSEMEM_DEFAULT | 1 + .../x86/CONFIG_ARCH_SUPPORTS_CRASH_HOTPLUG | 1 + ...FIG_ARCH_SUPPORTS_KEXEC_BZIMAGE_VERIFY_SIG | 1 + .../x86/CONFIG_ARCH_SUPPORTS_KEXEC_JUMP | 1 + .../x86/CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY | 1 + .../x86/CONFIG_ARCH_SUPPORTS_KEXEC_SIG_FORCE | 1 + .../CONFIG_ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP | 1 + .../x86/CONFIG_ARCH_USES_PG_UNCACHED | 1 + .../x86/CONFIG_ARCH_USE_BUILTIN_BSWAP | 1 + .../x86/CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT | 1 + .../x86/CONFIG_ARCH_WANT_GENERAL_HUGETLB | 1 + .../x86/CONFIG_ARCH_WANT_OLD_COMPAT_IPC | 1 + .../x86/CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP | 1 + .../CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AS3935 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AS73211 | 1 + .../L2-OPTIONAL/x86/CONFIG_ASUS_LAPTOP | 1 + .../L2-OPTIONAL/x86/CONFIG_ASUS_NB_WMI | 1 + .../L2-OPTIONAL/x86/CONFIG_ASUS_TF103C_DOCK | 1 + .../L2-OPTIONAL/x86/CONFIG_ASUS_WIRELESS | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ASUS_WMI | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_AS_AVX512 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_GFNI | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_AS_SHA1_NI | 1 + .../L2-OPTIONAL/x86/CONFIG_AS_SHA256_NI | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_AS_TPAUSE | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_AS_WRUSS | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ATH10K_CE | 1 + .../L2-OPTIONAL/x86/CONFIG_ATH10K_DEBUG | 1 + .../L2-OPTIONAL/x86/CONFIG_ATH10K_DEBUGFS | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ATH10K_PCI | 1 + .../L2-OPTIONAL/x86/CONFIG_ATH10K_SDIO | 1 + .../L2-OPTIONAL/x86/CONFIG_ATH10K_TRACING | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ATH10K_USB | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH11K | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH12K | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH5K | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ATH5K_PCI | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH6KL | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ATH9K_AHB | 1 + .../x86/CONFIG_ATH9K_BTCOEX_SUPPORT | 1 + .../x86/CONFIG_ATH9K_CHANNEL_CONTEXT | 1 + .../L2-OPTIONAL/x86/CONFIG_ATH9K_COMMON | 1 + .../L2-OPTIONAL/x86/CONFIG_ATH9K_COMMON_DEBUG | 1 + .../x86/CONFIG_ATH9K_COMMON_SPECTRAL | 1 + .../L2-OPTIONAL/x86/CONFIG_ATH9K_DEBUGFS | 1 + .../L2-OPTIONAL/x86/CONFIG_ATH9K_DYNACK | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ATH9K_HTC | 1 + .../L2-OPTIONAL/x86/CONFIG_ATH9K_HTC_DEBUGFS | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ATH9K_HW | 1 + .../L2-OPTIONAL/x86/CONFIG_ATH9K_HWRNG | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ATH9K_PCI | 1 + .../x86/CONFIG_ATH9K_PCI_NO_EEPROM | 1 + .../L2-OPTIONAL/x86/CONFIG_ATH9K_PCOEM | 1 + .../L2-OPTIONAL/x86/CONFIG_ATH9K_RFKILL | 1 + .../x86/CONFIG_ATH9K_STATION_STATISTICS | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ATH9K_WOW | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ATH_COMMON | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ATH_DEBUG | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_ATL2 | 1 + .../L2-OPTIONAL/x86/CONFIG_ATLAS_EZO_SENSOR | 1 + .../L2-OPTIONAL/x86/CONFIG_ATLAS_PH_SENSOR | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_ATP | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_AUDIT_ARCH | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_B43 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_B43LEGACY | 1 + .../L2-OPTIONAL/x86/CONFIG_BACKLIGHT_APPLE | 1 + .../L2-OPTIONAL/x86/CONFIG_BACKLIGHT_GPIO | 1 + .../L2-OPTIONAL/x86/CONFIG_BACKLIGHT_PWM | 1 + .../L2-OPTIONAL/x86/CONFIG_BACKLIGHT_SAHARA | 1 + .../L2-OPTIONAL/x86/CONFIG_BARCO_P50_GPIO | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_BE2NET_BE2 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_BE2NET_BE3 | 1 + .../L2-OPTIONAL/x86/CONFIG_BE2NET_HWMON | 1 + .../L2-OPTIONAL/x86/CONFIG_BE2NET_LANCER | 1 + .../L2-OPTIONAL/x86/CONFIG_BE2NET_SKYHAWK | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_BH1750 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_BH1780 | 1 + .../x86/CONFIG_BLK_CGROUP_IOLATENCY | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_BLK_DEV_FD | 1 + .../L2-OPTIONAL/x86/CONFIG_BLK_DEV_PMEM | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_BMA180 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_BMA220 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_BMA400 | 1 + .../L2-OPTIONAL/x86/CONFIG_BMC150_ACCEL | 1 + .../L2-OPTIONAL/x86/CONFIG_BMC150_MAGN_I2C | 1 + .../L2-OPTIONAL/x86/CONFIG_BMC150_MAGN_SPI | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_BME680 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_BMG160 | 1 + .../L2-OPTIONAL/x86/CONFIG_BMI088_ACCEL | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_BMI160_I2C | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_BMI160_SPI | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_BMP280 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_BNA | 1 + .../L2-OPTIONAL/x86/CONFIG_BOOT_VESA_SUPPORT | 1 + .../L2-OPTIONAL/x86/CONFIG_BOSCH_BNO055_I2C | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMDBG | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC | 1 + .../L2-OPTIONAL/x86/CONFIG_BRCMFMAC_PCIE | 1 + .../x86/CONFIG_BRCMFMAC_PROTO_BCDC | 1 + .../x86/CONFIG_BRCMFMAC_PROTO_MSGBUF | 1 + .../L2-OPTIONAL/x86/CONFIG_BRCMFMAC_SDIO | 1 + .../L2-OPTIONAL/x86/CONFIG_BRCMFMAC_USB | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_BRCMSMAC | 1 + .../L2-OPTIONAL/x86/CONFIG_BRCMSMAC_LEDS | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_BRCMUTIL | 1 + .../L2-OPTIONAL/x86/CONFIG_BRCM_TRACING | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_BT | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_BT_6LOWPAN | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_BT_AOSPEXT | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_BT_ATH3K | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BCM | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BNEP | 1 + .../L2-OPTIONAL/x86/CONFIG_BT_BNEP_MC_FILTER | 1 + .../x86/CONFIG_BT_BNEP_PROTO_FILTER | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_BT_BREDR | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_CMTP | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_BT_DEBUGFS | 1 + .../L2-OPTIONAL/x86/CONFIG_BT_HCIBCM203X | 1 + .../L2-OPTIONAL/x86/CONFIG_BT_HCIBCM4377 | 1 + .../L2-OPTIONAL/x86/CONFIG_BT_HCIBFUSB | 1 + .../L2-OPTIONAL/x86/CONFIG_BT_HCIBPA10X | 1 + .../L2-OPTIONAL/x86/CONFIG_BT_HCIBTSDIO | 1 + .../L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB | 1 + .../x86/CONFIG_BT_HCIBTUSB_AUTOSUSPEND | 1 + .../L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_BCM | 1 + .../L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_MTK | 1 + .../x86/CONFIG_BT_HCIBTUSB_POLL_SYNC | 1 + .../L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_RTL | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART | 1 + .../L2-OPTIONAL/x86/CONFIG_BT_HCIUART_AG6XX | 1 + .../L2-OPTIONAL/x86/CONFIG_BT_HCIUART_ATH3K | 1 + .../L2-OPTIONAL/x86/CONFIG_BT_HCIUART_BCSP | 1 + .../L2-OPTIONAL/x86/CONFIG_BT_HCIUART_H4 | 1 + .../L2-OPTIONAL/x86/CONFIG_BT_HCIUART_INTEL | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_BT_HCIVHCI | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HIDP | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_BT_INTEL | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_LE | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_LEDS | 1 + .../L2-OPTIONAL/x86/CONFIG_BT_LE_L2CAP_ECRED | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_MRVL | 1 + .../L2-OPTIONAL/x86/CONFIG_BT_MRVL_SDIO | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_BT_MSFTEXT | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_BT_MTKSDIO | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_BT_RFCOMM | 1 + .../L2-OPTIONAL/x86/CONFIG_BT_RFCOMM_TTY | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_RTL | 1 + .../L2-OPTIONAL/x86/CONFIG_BT_SELFTEST | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_BT_VIRTIO | 1 + .../x86/CONFIG_BUILDTIME_MCOUNT_SORT | 1 + .../L2-OPTIONAL/x86/CONFIG_CALL_PADDING | 1 + .../L2-OPTIONAL/x86/CONFIG_CALL_THUNKS | 1 + .../L2-OPTIONAL/x86/CONFIG_CALL_THUNKS_DEBUG | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_CAPI_TRACE | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_CARL9170 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_CCS811 | 1 + .../x86/CONFIG_CC_HAS_ENTRY_PADDING | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_IBT | 1 + .../x86/CONFIG_CC_HAS_RETURN_THUNK | 1 + .../x86/CONFIG_CC_HAS_SANE_STACKPROTECTOR | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_SLS | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_CEC_CH7322 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_CEC_GPIO | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_CEC_SECO | 1 + .../L2-OPTIONAL/x86/CONFIG_CHROME_PLATFORMS | 1 + .../L2-OPTIONAL/x86/CONFIG_CLKBLD_I8253 | 1 + .../L2-OPTIONAL/x86/CONFIG_CLKEVT_I8253 | 1 + .../CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE | 1 + .../x86/CONFIG_CLOCKSOURCE_WATCHDOG | 1 + .../CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_CM32181 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_CM3232 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_CM3323 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_CM3605 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_CM36651 | 1 + .../L2-OPTIONAL/x86/CONFIG_CMDLINE_BOOL | 1 + .../L2-OPTIONAL/x86/CONFIG_COMPAL_LAPTOP | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_COMPAT_32 | 1 + .../x86/CONFIG_COMPAT_FOR_U64_ALIGNMENT | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_CPA_DEBUG | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_CPU5_WDT | 1 + .../L2-OPTIONAL/x86/CONFIG_CPU_SUP_CENTAUR | 1 + .../x86/CONFIG_CRASH_MAX_MEMORY_RANGES | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_CRC64 | 1 + .../L2-OPTIONAL/x86/CONFIG_CRC64_ROCKSOFT | 1 + .../x86/CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S | 1 + .../CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519 | 1 + .../x86/CONFIG_CRYPTO_ARIA_AESNI_AVX2_X86_64 | 1 + .../x86/CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64 | 1 + .../x86/CONFIG_CRYPTO_ARIA_GFNI_AVX512_X86_64 | 1 + .../x86/CONFIG_CRYPTO_CRC64_ROCKSOFT | 1 + .../L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_PADLOCK | 1 + .../x86/CONFIG_CRYPTO_DEV_PADLOCK_AES | 1 + .../x86/CONFIG_CRYPTO_DEV_PADLOCK_SHA | 1 + .../x86/CONFIG_CRYPTO_DEV_QAT_4XXX | 1 + .../x86/CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION | 1 + .../x86/CONFIG_CRYPTO_LIB_POLY1305_RSIZE | 1 + .../x86/CONFIG_CRYPTO_POLYVAL_CLMUL_NI | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_CX_ECAT | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_DA280 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_DA311 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_DCA | 1 + .../L2-OPTIONAL/x86/CONFIG_DEBUG_ENTRY | 1 + .../x86/CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP | 1 + .../L2-OPTIONAL/x86/CONFIG_DEBUG_NMI_SELFTEST | 1 + .../L2-OPTIONAL/x86/CONFIG_DEBUG_TLBFLUSH | 1 + .../L2-OPTIONAL/x86/CONFIG_DEV_COREDUMP | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_DHT11 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_DLHL60D | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_DMARD06 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_DMARD09 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_DMARD10 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_DMAR_TABLE | 1 + .../x86/CONFIG_DMA_VIRTUAL_CHANNELS | 1 + .../CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_DPOT_DAC | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_DPS310 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_DRM_DP_CEC | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_DRM_HYPERV | 1 + .../x86/CONFIG_DRM_I2C_NXP_TDA998X | 1 + .../L2-OPTIONAL/x86/CONFIG_DRM_I2C_SIL164 | 1 + .../CONFIG_DRM_I915_PREEMPT_TIMEOUT_COMPUTE | 1 + .../x86/CONFIG_DRM_I915_REQUEST_TIMEOUT | 1 + .../L2-OPTIONAL/x86/CONFIG_DRM_MIPI_DSI | 1 + .../CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN | 1 + .../L2-OPTIONAL/x86/CONFIG_DRM_PRIVACY_SCREEN | 1 + .../L2-OPTIONAL/x86/CONFIG_DRM_VBOXVIDEO | 1 + .../x86/CONFIG_DRM_VMWGFX_MKSSTATS | 1 + .../L2-OPTIONAL/x86/CONFIG_DRM_XEN_FRONTEND | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_DS1803 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_DS4424 | 1 + .../L2-OPTIONAL/x86/CONFIG_DW_DMAC_CORE | 1 + .../x86/CONFIG_DYNAMIC_FTRACE_WITH_REGS | 1 + .../x86/CONFIG_DYNAMIC_MEMORY_LAYOUT | 1 + .../L2-OPTIONAL/x86/CONFIG_EARLY_PRINTK_USB | 1 + .../L2-OPTIONAL/x86/CONFIG_EBC_C384_WDT | 1 + .../L2-OPTIONAL/x86/CONFIG_EDAC_ATOMIC_SCRUB | 1 + .../L2-OPTIONAL/x86/CONFIG_EDAC_IE31200 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_EDAC_IGEN6 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_EDAC_PND2 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_EDAC_X38 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_EDD_OFF | 1 + .../L2-OPTIONAL/x86/CONFIG_EEEPC_LAPTOP | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_EEEPC_WMI | 1 + .../x86/CONFIG_EFI_DXE_MEM_ATTRIBUTES | 1 + .../L2-OPTIONAL/x86/CONFIG_EFI_FAKE_MEMMAP | 1 + .../x86/CONFIG_EFI_HANDOVER_PROTOCOL | 1 + .../L2-OPTIONAL/x86/CONFIG_EFI_PGT_DUMP | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_EISA | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_ENIC | 1 + .../L2-OPTIONAL/x86/CONFIG_ENVELOPE_DETECTOR | 1 + .../L2-OPTIONAL/x86/CONFIG_EUROTECH_WDT | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_EXAR_WDT | 1 + .../L2-OPTIONAL/x86/CONFIG_F71808E_WDT | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_ARC | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_HGA | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_FB_LE80578 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_N411 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_FB_SM501 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_FB_VGA16 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_VIA | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_FIREWIRE | 1 + .../L2-OPTIONAL/x86/CONFIG_FIREWIRE_NET | 1 + .../L2-OPTIONAL/x86/CONFIG_FIREWIRE_OHCI | 1 + .../L2-OPTIONAL/x86/CONFIG_FIREWIRE_SBP2 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_FPROBE | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_FS_MBCACHE | 1 + .../x86/CONFIG_FTRACE_MCOUNT_USE_CC | 1 + .../x86/CONFIG_FTRACE_SORT_STARTUP_TEST | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_FUJITSU_ES | 1 + .../L2-OPTIONAL/x86/CONFIG_FUJITSU_LAPTOP | 1 + .../L2-OPTIONAL/x86/CONFIG_FUJITSU_TABLET | 1 + .../L2-OPTIONAL/x86/CONFIG_FUNCTION_ALIGNMENT | 1 + .../x86/CONFIG_FUNCTION_ALIGNMENT_16B | 1 + .../x86/CONFIG_FUNCTION_PADDING_BYTES | 1 + .../x86/CONFIG_FUNCTION_PADDING_CFI | 1 + .../x86/CONFIG_FW_LOADER_USER_HELPER_FALLBACK | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_FXAS21002C | 1 + .../L2-OPTIONAL/x86/CONFIG_FXLS8962AF_I2C | 1 + .../L2-OPTIONAL/x86/CONFIG_FXLS8962AF_SPI | 1 + .../L2-OPTIONAL/x86/CONFIG_FXOS8700_I2C | 1 + .../L2-OPTIONAL/x86/CONFIG_FXOS8700_SPI | 1 + .../x86/CONFIG_GDS_FORCE_MITIGATION | 1 + .../x86/CONFIG_GENERIC_ADC_BATTERY | 1 + .../x86/CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST | 1 + .../x86/CONFIG_GENERIC_CMOS_UPDATE | 1 + .../L2-OPTIONAL/x86/CONFIG_GENERIC_ENTRY | 1 + .../L2-OPTIONAL/x86/CONFIG_GENERIC_IOMAP | 1 + .../x86/CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR | 1 + .../x86/CONFIG_GENERIC_IRQ_RESERVATION_MODE | 1 + .../L2-OPTIONAL/x86/CONFIG_GIGABYTE_WMI | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_GP2AP002 | 1 + .../L2-OPTIONAL/x86/CONFIG_GP2AP020A00F | 1 + .../L2-OPTIONAL/x86/CONFIG_GPD_POCKET_FAN | 1 + .../L2-OPTIONAL/x86/CONFIG_GPIO_AMD8111 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_GPIO_DWAPB | 1 + .../L2-OPTIONAL/x86/CONFIG_GPIO_ELKHARTLAKE | 1 + .../L2-OPTIONAL/x86/CONFIG_GPIO_F7188X | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_GPIO_ICH | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_GPIO_IT87 | 1 + .../L2-OPTIONAL/x86/CONFIG_GPIO_ML_IOH | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_GPIO_SCH | 1 + .../L2-OPTIONAL/x86/CONFIG_GPIO_SCH311X | 1 + .../L2-OPTIONAL/x86/CONFIG_GPIO_VIPERBOARD | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_GPIO_VX855 | 1 + .../L2-OPTIONAL/x86/CONFIG_GPIO_WINBOND | 1 + .../L2-OPTIONAL/x86/CONFIG_GPIO_WS16C48 | 1 + .../x86/CONFIG_HARDLOCKUP_CHECK_TIMESTAMP | 1 + .../L2-OPTIONAL/x86/CONFIG_HAVE_ACPI_APEI_NMI | 1 + .../x86/CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES | 1 + .../L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_KCSAN | 1 + .../L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_KMSAN | 1 + .../x86/CONFIG_HAVE_ARCH_NODE_DEV_GROUP | 1 + .../x86/CONFIG_HAVE_ARCH_SOFT_DIRTY | 1 + .../CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD | 1 + .../x86/CONFIG_HAVE_ARCH_USERFAULTFD_WP | 1 + .../x86/CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES | 1 + .../x86/CONFIG_HAVE_BOOTMEM_INFO_NODE | 1 + .../x86/CONFIG_HAVE_BUILDTIME_MCOUNT_SORT | 1 + .../L2-OPTIONAL/x86/CONFIG_HAVE_CALL_THUNKS | 1 + ...CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK | 1 + .../CONFIG_HAVE_DYNAMIC_FTRACE_NO_PATCHABLE | 1 + .../x86/CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_HAVE_EISA | 1 + .../L2-OPTIONAL/x86/CONFIG_HAVE_EXIT_THREAD | 1 + .../L2-OPTIONAL/x86/CONFIG_HAVE_FENTRY | 1 + .../L2-OPTIONAL/x86/CONFIG_HAVE_INTEL_TXT | 1 + .../x86/CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK | 1 + .../x86/CONFIG_HAVE_JUMP_LABEL_HACK | 1 + .../L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_BZIP2 | 1 + .../L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_GZIP | 1 + .../L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_LZ4 | 1 + .../L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_LZMA | 1 + .../L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_LZO | 1 + .../L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_XZ | 1 + .../L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_ZSTD | 1 + .../x86/CONFIG_HAVE_KPROBES_ON_FTRACE | 1 + .../x86/CONFIG_HAVE_KVM_DIRTY_RING_TSO | 1 + .../L2-OPTIONAL/x86/CONFIG_HAVE_KVM_NO_POLL | 1 + .../L2-OPTIONAL/x86/CONFIG_HAVE_KVM_PFNCACHE | 1 + .../x86/CONFIG_HAVE_KVM_PM_NOTIFIER | 1 + .../L2-OPTIONAL/x86/CONFIG_HAVE_LIVEPATCH | 1 + .../x86/CONFIG_HAVE_MIXED_BREAKPOINTS_REGS | 1 + .../x86/CONFIG_HAVE_MMIOTRACE_SUPPORT | 1 + .../L2-OPTIONAL/x86/CONFIG_HAVE_NOINSTR_HACK | 1 + .../x86/CONFIG_HAVE_NOINSTR_VALIDATION | 1 + .../L2-OPTIONAL/x86/CONFIG_HAVE_OBJTOOL | 1 + .../x86/CONFIG_HAVE_OBJTOOL_MCOUNT | 1 + .../x86/CONFIG_HAVE_OBJTOOL_NOP_MCOUNT | 1 + .../L2-OPTIONAL/x86/CONFIG_HAVE_OPTPROBES | 1 + .../x86/CONFIG_HAVE_PCSPKR_PLATFORM | 1 + .../x86/CONFIG_HAVE_PREEMPT_DYNAMIC_CALL | 1 + .../x86/CONFIG_HAVE_RELIABLE_STACKTRACE | 1 + .../L2-OPTIONAL/x86/CONFIG_HAVE_RETHOOK | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_HAVE_RUST | 1 + .../x86/CONFIG_HAVE_STACK_VALIDATION | 1 + .../L2-OPTIONAL/x86/CONFIG_HAVE_STATIC_CALL | 1 + .../x86/CONFIG_HAVE_STATIC_CALL_INLINE | 1 + .../x86/CONFIG_HAVE_UACCESS_VALIDATION | 1 + .../x86/CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | 1 + .../x86/CONFIG_HAVE_USER_RETURN_NOTIFIER | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_HDC100X | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_HDC2010 | 1 + .../x86/CONFIG_HFI1_DEBUG_SDMA_ORDER | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_HI8435 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_HID_ALPS | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_HID_ASUS | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_HID_CMEDIA | 1 + .../L2-OPTIONAL/x86/CONFIG_HID_HYPERV_MOUSE | 1 + .../L2-OPTIONAL/x86/CONFIG_HID_NVIDIA_SHIELD | 1 + .../L2-OPTIONAL/x86/CONFIG_HID_PICOLCD_CIR | 1 + .../x86/CONFIG_HID_SENSOR_ACCEL_3D | 1 + .../L2-OPTIONAL/x86/CONFIG_HID_SENSOR_ALS | 1 + .../x86/CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE | 1 + .../x86/CONFIG_HID_SENSOR_CUSTOM_SENSOR | 1 + .../x86/CONFIG_HID_SENSOR_DEVICE_ROTATION | 1 + .../L2-OPTIONAL/x86/CONFIG_HID_SENSOR_GYRO_3D | 1 + .../L2-OPTIONAL/x86/CONFIG_HID_SENSOR_HUB | 1 + .../x86/CONFIG_HID_SENSOR_HUMIDITY | 1 + .../x86/CONFIG_HID_SENSOR_IIO_COMMON | 1 + .../x86/CONFIG_HID_SENSOR_IIO_TRIGGER | 1 + .../x86/CONFIG_HID_SENSOR_INCLINOMETER_3D | 1 + .../x86/CONFIG_HID_SENSOR_MAGNETOMETER_3D | 1 + .../L2-OPTIONAL/x86/CONFIG_HID_SENSOR_PRESS | 1 + .../L2-OPTIONAL/x86/CONFIG_HID_SENSOR_PROX | 1 + .../L2-OPTIONAL/x86/CONFIG_HID_SENSOR_TEMP | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_HMC425 | 1 + .../x86/CONFIG_HOTPLUG_CORE_SYNC_FULL | 1 + .../L2-OPTIONAL/x86/CONFIG_HOTPLUG_PARALLEL | 1 + .../L2-OPTIONAL/x86/CONFIG_HOTPLUG_PCI_SHPC | 1 + .../L2-OPTIONAL/x86/CONFIG_HOTPLUG_SMT | 1 + .../x86/CONFIG_HOTPLUG_SPLIT_STARTUP | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_HP03 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_HP206C | 1 + .../L2-OPTIONAL/x86/CONFIG_HPET_EMULATE_RTC | 1 + .../L2-OPTIONAL/x86/CONFIG_HPET_MMAP_DEFAULT | 1 + .../L2-OPTIONAL/x86/CONFIG_HPWDT_NMI_DECODING | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_HP_ILO | 1 + .../L2-OPTIONAL/x86/CONFIG_HP_WATCHDOG | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_HSU_DMA | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_HTS221 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_HTU21 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_HUAWEI_WMI | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_HVC_IRQ | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_HVC_XEN | 1 + .../L2-OPTIONAL/x86/CONFIG_HVC_XEN_FRONTEND | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_HWMON_VID | 1 + .../L2-OPTIONAL/x86/CONFIG_HW_RANDOM_VIA | 1 + .../L2-OPTIONAL/x86/CONFIG_HW_RANDOM_VIRTIO | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_HX711 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_HYPERV_NET | 1 + .../L2-OPTIONAL/x86/CONFIG_HYPERV_TESTING | 1 + .../L2-OPTIONAL/x86/CONFIG_HYPERV_TIMER | 1 + .../L2-OPTIONAL/x86/CONFIG_HYPERV_VTL_MODE | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_I2C_AMD756 | 1 + .../L2-OPTIONAL/x86/CONFIG_I2C_AMD756_S4882 | 1 + .../L2-OPTIONAL/x86/CONFIG_I2C_AMD8111 | 1 + .../x86/CONFIG_I2C_DESIGNWARE_AMDPSP | 1 + .../x86/CONFIG_I2C_DESIGNWARE_BAYTRAIL | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_I2C_GPIO | 1 + .../L2-OPTIONAL/x86/CONFIG_I2C_HELPER_AUTO | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_I2C_I801 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_I2C_ISCH | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_I2C_ISMT | 1 + .../L2-OPTIONAL/x86/CONFIG_I2C_MLXCPLD | 1 + .../L2-OPTIONAL/x86/CONFIG_I2C_MUX_GPIO | 1 + .../L2-OPTIONAL/x86/CONFIG_I2C_MUX_PCA9541 | 1 + .../L2-OPTIONAL/x86/CONFIG_I2C_MUX_PCA954x | 1 + .../L2-OPTIONAL/x86/CONFIG_I2C_NFORCE2_S4985 | 1 + .../L2-OPTIONAL/x86/CONFIG_I2C_PARPORT | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_I2C_PIIX4 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_I2C_SCMI | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_I2C_SIS96X | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_VIA | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_I2C_VIAPRO | 1 + .../L2-OPTIONAL/x86/CONFIG_I2C_VIPERBOARD | 1 + .../L2-OPTIONAL/x86/CONFIG_I2C_ZHAOXIN | 1 + .../L2-OPTIONAL/x86/CONFIG_I2C_ZHAOXIN_SMBUS | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_I8253_LOCK | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_IAQCORE | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_IB700_WDT | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_IBMASR | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_IBM_ASM | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_IBM_RTL | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ICE_HWTS | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ICP10100 | 1 + .../L2-OPTIONAL/x86/CONFIG_IDEAPAD_LAPTOP | 1 + .../L2-OPTIONAL/x86/CONFIG_IDLE_INJECT | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_IE6XX_WDT | 1 + .../L2-OPTIONAL/x86/CONFIG_IEEE802154_FAKELB | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER | 1 + .../L2-OPTIONAL/x86/CONFIG_IIO_BUFFER_CB | 1 + .../L2-OPTIONAL/x86/CONFIG_IIO_BUFFER_DMA | 1 + .../x86/CONFIG_IIO_BUFFER_DMAENGINE | 1 + .../x86/CONFIG_IIO_BUFFER_HW_CONSUMER | 1 + .../L2-OPTIONAL/x86/CONFIG_IIO_CONFIGFS | 1 + .../x86/CONFIG_IIO_CONSUMERS_PER_TRIGGER | 1 + .../x86/CONFIG_IIO_INTERRUPT_TRIGGER | 1 + .../L2-OPTIONAL/x86/CONFIG_IIO_KFIFO_BUF | 1 + .../L2-OPTIONAL/x86/CONFIG_IIO_KX022A_I2C | 1 + .../L2-OPTIONAL/x86/CONFIG_IIO_KX022A_SPI | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_MUX | 1 + .../L2-OPTIONAL/x86/CONFIG_IIO_RESCALE | 1 + .../L2-OPTIONAL/x86/CONFIG_IIO_SSP_SENSORHUB | 1 + .../L2-OPTIONAL/x86/CONFIG_IIO_ST_ACCEL_3AXIS | 1 + .../L2-OPTIONAL/x86/CONFIG_IIO_ST_GYRO_3AXIS | 1 + .../L2-OPTIONAL/x86/CONFIG_IIO_ST_LSM6DSX | 1 + .../L2-OPTIONAL/x86/CONFIG_IIO_ST_LSM9DS0 | 1 + .../L2-OPTIONAL/x86/CONFIG_IIO_ST_MAGN_3AXIS | 1 + .../L2-OPTIONAL/x86/CONFIG_IIO_ST_PRESS | 1 + .../L2-OPTIONAL/x86/CONFIG_IIO_SW_DEVICE | 1 + .../L2-OPTIONAL/x86/CONFIG_IIO_SW_TRIGGER | 1 + .../L2-OPTIONAL/x86/CONFIG_IIO_SYSFS_TRIGGER | 1 + .../L2-OPTIONAL/x86/CONFIG_IIO_TRIGGER | 1 + .../x86/CONFIG_IIO_TRIGGERED_BUFFER | 1 + .../x86/CONFIG_IIO_TRIGGERED_EVENT | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_INA2XX_ADC | 1 + .../L2-OPTIONAL/x86/CONFIG_INFINIBAND_HFI1 | 1 + .../L2-OPTIONAL/x86/CONFIG_INFINIBAND_QIB | 1 + .../L2-OPTIONAL/x86/CONFIG_INFINIBAND_USNIC | 1 + .../x86/CONFIG_INFINIBAND_VMWARE_PVRDMA | 1 + .../L2-OPTIONAL/x86/CONFIG_INPUT_AD714X | 1 + .../L2-OPTIONAL/x86/CONFIG_INPUT_ADXL34X | 1 + .../L2-OPTIONAL/x86/CONFIG_INPUT_APANEL | 1 + .../L2-OPTIONAL/x86/CONFIG_INPUT_ATI_REMOTE2 | 1 + .../L2-OPTIONAL/x86/CONFIG_INPUT_ATLAS_BTNS | 1 + .../L2-OPTIONAL/x86/CONFIG_INPUT_BMA150 | 1 + .../L2-OPTIONAL/x86/CONFIG_INPUT_CM109 | 1 + .../L2-OPTIONAL/x86/CONFIG_INPUT_CMA3000 | 1 + .../x86/CONFIG_INPUT_DA7280_HAPTICS | 1 + .../x86/CONFIG_INPUT_DRV260X_HAPTICS | 1 + .../x86/CONFIG_INPUT_DRV2665_HAPTICS | 1 + .../x86/CONFIG_INPUT_DRV2667_HAPTICS | 1 + .../L2-OPTIONAL/x86/CONFIG_INPUT_E3X0_BUTTON | 1 + .../L2-OPTIONAL/x86/CONFIG_INPUT_GPIO_BEEPER | 1 + .../L2-OPTIONAL/x86/CONFIG_INPUT_GPIO_DECODER | 1 + .../x86/CONFIG_INPUT_GPIO_ROTARY_ENCODER | 1 + .../L2-OPTIONAL/x86/CONFIG_INPUT_GPIO_VIBRA | 1 + .../x86/CONFIG_INPUT_IDEAPAD_SLIDEBAR | 1 + .../L2-OPTIONAL/x86/CONFIG_INPUT_IMS_PCU | 1 + .../L2-OPTIONAL/x86/CONFIG_INPUT_IQS269A | 1 + .../L2-OPTIONAL/x86/CONFIG_INPUT_IQS626A | 1 + .../L2-OPTIONAL/x86/CONFIG_INPUT_IQS7222 | 1 + .../L2-OPTIONAL/x86/CONFIG_INPUT_JOYDEV | 1 + .../x86/CONFIG_INPUT_KEYSPAN_REMOTE | 1 + .../L2-OPTIONAL/x86/CONFIG_INPUT_KXTJ9 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_INPUT_MISC | 1 + .../L2-OPTIONAL/x86/CONFIG_INPUT_MMA8450 | 1 + .../L2-OPTIONAL/x86/CONFIG_INPUT_PCF8574 | 1 + .../L2-OPTIONAL/x86/CONFIG_INPUT_PCSPKR | 1 + .../L2-OPTIONAL/x86/CONFIG_INPUT_POWERMATE | 1 + .../L2-OPTIONAL/x86/CONFIG_INPUT_PWM_BEEPER | 1 + .../L2-OPTIONAL/x86/CONFIG_INPUT_PWM_VIBRA | 1 + .../L2-OPTIONAL/x86/CONFIG_INPUT_TABLET | 1 + .../L2-OPTIONAL/x86/CONFIG_INPUT_TOUCHSCREEN | 1 + .../L2-OPTIONAL/x86/CONFIG_INPUT_UINPUT | 1 + .../L2-OPTIONAL/x86/CONFIG_INPUT_VIVALDIFMAP | 1 + .../x86/CONFIG_INPUT_XEN_KBDDEV_FRONTEND | 1 + .../L2-OPTIONAL/x86/CONFIG_INPUT_YEALINK | 1 + .../L2-OPTIONAL/x86/CONFIG_INTEL_ATOMISP2_PM | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_INTEL_GTT | 1 + .../L2-OPTIONAL/x86/CONFIG_INTEL_HID_EVENT | 1 + .../L2-OPTIONAL/x86/CONFIG_INTEL_IDXD_COMPAT | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_INTEL_IFS | 1 + .../x86/CONFIG_INTEL_INT0002_VGPIO | 1 + .../x86/CONFIG_INTEL_IOMMU_DEFAULT_ON | 1 + .../x86/CONFIG_INTEL_IOMMU_FLOPPY_WA | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_INTEL_IPS | 1 + .../L2-OPTIONAL/x86/CONFIG_INTEL_ISHTP_ECLITE | 1 + .../x86/CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER | 1 + .../L2-OPTIONAL/x86/CONFIG_INTEL_ISH_HID | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_INTEL_LDMA | 1 + .../L2-OPTIONAL/x86/CONFIG_INTEL_MEI_GSC | 1 + .../x86/CONFIG_INTEL_MEI_GSC_PROXY | 1 + .../L2-OPTIONAL/x86/CONFIG_INTEL_MEI_HDCP | 1 + .../L2-OPTIONAL/x86/CONFIG_INTEL_MEI_PXP | 1 + .../L2-OPTIONAL/x86/CONFIG_INTEL_MEI_TXE | 1 + .../L2-OPTIONAL/x86/CONFIG_INTEL_OAKTRAIL | 1 + .../L2-OPTIONAL/x86/CONFIG_INTEL_PUNIT_IPC | 1 + .../L2-OPTIONAL/x86/CONFIG_INTEL_SAR_INT1092 | 1 + .../L2-OPTIONAL/x86/CONFIG_INTEL_SCU_PCI | 1 + .../L2-OPTIONAL/x86/CONFIG_INTEL_SCU_PLATFORM | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_INTEL_SDSI | 1 + .../L2-OPTIONAL/x86/CONFIG_INTEL_SMARTCONNECT | 1 + .../x86/CONFIG_INTEL_SOC_DTS_IOSF_CORE | 1 + .../x86/CONFIG_INTEL_SOC_DTS_THERMAL | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_INTEL_TCC | 1 + .../L2-OPTIONAL/x86/CONFIG_INTEL_TCC_COOLING | 1 + .../x86/CONFIG_INTEL_UNCORE_FREQ_CONTROL | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_INTEL_VBTN | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_INTEL_WMI | 1 + .../x86/CONFIG_INTEL_WMI_SBL_FW_UPDATE | 1 + .../x86/CONFIG_INTEL_WMI_THUNDERBOLT | 1 + .../x86/CONFIG_INTERVAL_TREE_SPAN_ITER | 1 + .../L2-OPTIONAL/x86/CONFIG_INV_ICM42600_I2C | 1 + .../L2-OPTIONAL/x86/CONFIG_INV_ICM42600_SPI | 1 + .../L2-OPTIONAL/x86/CONFIG_INV_MPU6050_I2C | 1 + .../L2-OPTIONAL/x86/CONFIG_INV_MPU6050_SPI | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_IOMMUFD | 1 + .../L2-OPTIONAL/x86/CONFIG_IOMMUFD_DRIVER | 1 + .../L2-OPTIONAL/x86/CONFIG_IOSF_MBI_DEBUG | 1 + .../L2-OPTIONAL/x86/CONFIG_IO_DELAY_0XED | 1 + .../L2-OPTIONAL/x86/CONFIG_IO_DELAY_NONE | 1 + .../L2-OPTIONAL/x86/CONFIG_IO_DELAY_UDELAY | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_IPU_BRIDGE | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_IPW2100 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_IPW2200 | 1 + .../L2-OPTIONAL/x86/CONFIG_IRQ_BYPASS_MANAGER | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_IRSD200 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_ENE | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_IR_FINTEK | 1 + .../L2-OPTIONAL/x86/CONFIG_IR_IGORPLUGUSB | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_IR_IGUANA | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IMON | 1 + .../L2-OPTIONAL/x86/CONFIG_IR_IMON_DECODER | 1 + .../L2-OPTIONAL/x86/CONFIG_IR_IMON_RAW | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_IR_ITE_CIR | 1 + .../L2-OPTIONAL/x86/CONFIG_IR_JVC_DECODER | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_IR_MCEUSB | 1 + .../L2-OPTIONAL/x86/CONFIG_IR_MCE_KBD_DECODER | 1 + .../L2-OPTIONAL/x86/CONFIG_IR_NEC_DECODER | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_IR_NUVOTON | 1 + .../L2-OPTIONAL/x86/CONFIG_IR_RC5_DECODER | 1 + .../L2-OPTIONAL/x86/CONFIG_IR_RC6_DECODER | 1 + .../L2-OPTIONAL/x86/CONFIG_IR_RCMM_DECODER | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_IR_REDRAT3 | 1 + .../L2-OPTIONAL/x86/CONFIG_IR_SANYO_DECODER | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_IR_SERIAL | 1 + .../L2-OPTIONAL/x86/CONFIG_IR_SONY_DECODER | 1 + .../L2-OPTIONAL/x86/CONFIG_IR_STREAMZAP | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_TOY | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_IR_TTUSBIR | 1 + .../L2-OPTIONAL/x86/CONFIG_IR_WINBOND_CIR | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_ISDN | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ISDN_CAPI | 1 + .../x86/CONFIG_ISDN_CAPI_MIDDLEWARE | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ISL29003 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ISL29020 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ISL29125 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ISL29501 | 1 + .../L2-OPTIONAL/x86/CONFIG_IT8712F_WDT | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_IT87_WDT | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_ITG3200 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_IWL3945 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_IWL4965 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLDVM | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLMVM | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI | 1 + .../L2-OPTIONAL/x86/CONFIG_IWLWIFI_DEBUG | 1 + .../L2-OPTIONAL/x86/CONFIG_IWLWIFI_DEBUGFS | 1 + .../x86/CONFIG_IWLWIFI_DEVICE_TRACING | 1 + .../L2-OPTIONAL/x86/CONFIG_IWLWIFI_LEDS | 1 + .../x86/CONFIG_IWLWIFI_OPMODE_MODULAR | 1 + .../L2-OPTIONAL/x86/CONFIG_JAILHOUSE_GUEST | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_JSA1212 | 1 + .../x86/CONFIG_KALLSYMS_ABSOLUTE_PERCPU | 1 + .../L2-OPTIONAL/x86/CONFIG_KARMA_PARTITION | 1 + .../L2-OPTIONAL/x86/CONFIG_KEYBOARD_APPLESPI | 1 + .../L2-OPTIONAL/x86/CONFIG_KEYBOARD_GPIO | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_KMX61 | 1 + .../x86/CONFIG_KRETPROBE_ON_RETHOOK | 1 + .../L2-OPTIONAL/x86/CONFIG_KVM_ASYNC_PF | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_KVM_COMPAT | 1 + .../x86/CONFIG_KVM_EXTERNAL_WRITE_TRACKING | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_SMM | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_XEN | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_KXCJK1013 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_KXSD9 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_LEDS_APU | 1 + .../L2-OPTIONAL/x86/CONFIG_LEDS_CLASS_FLASH | 1 + .../L2-OPTIONAL/x86/CONFIG_LEDS_INTEL_SS4200 | 1 + .../L2-OPTIONAL/x86/CONFIG_LEDS_LT3593 | 1 + .../L2-OPTIONAL/x86/CONFIG_LEDS_MLXCPLD | 1 + .../L2-OPTIONAL/x86/CONFIG_LEDS_NIC78BX | 1 + .../L2-OPTIONAL/x86/CONFIG_LEDS_TRIGGER_AUDIO | 1 + .../L2-OPTIONAL/x86/CONFIG_LEDS_TRIGGER_DISK | 1 + .../x86/CONFIG_LEGACY_VSYSCALL_XONLY | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_LENOVO_YMC | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_LG_LAPTOP | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_LIBERTAS | 1 + .../L2-OPTIONAL/x86/CONFIG_LIBERTAS_THINFIRM | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_LIBNVDIMM | 1 + .../L2-OPTIONAL/x86/CONFIG_LIDAR_LITE_V2 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_LIRC | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_LMP91000 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_LPC_ICH | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_LPC_SCH | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_LP_CONSOLE | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC1660 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2471 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2485 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2496 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2497 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2632 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2688 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2983 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_LTR501 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_LTRF216A | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_LV0104CS | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_M62332 | 1 + .../L2-OPTIONAL/x86/CONFIG_MAC80211_HWSIM | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MACHZ_WDT | 1 + .../L2-OPTIONAL/x86/CONFIG_MACINTOSH_DRIVERS | 1 + .../L2-OPTIONAL/x86/CONFIG_MAC_EMUMOUSEBTN | 1 + .../L2-OPTIONAL/x86/CONFIG_MAC_PARTITION | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MAG3110 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MATOM | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX1027 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MAX11100 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX1118 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MAX11205 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MAX11410 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX1241 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX1363 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MAX30100 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MAX30102 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MAX30208 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MAX31856 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MAX31865 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MAX44000 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MAX44009 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX517 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5432 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5481 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5487 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5522 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5821 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX9611 | 1 + .../L2-OPTIONAL/x86/CONFIG_MAXIM_THERMOCOUPLE | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MB1232 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MC3230 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP320X | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP3422 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP3911 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4018 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MCP41010 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4131 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4531 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4725 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4728 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4922 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MDIO_GPIO | 1 + .../x86/CONFIG_MEDIA_ANALOG_TV_SUPPORT | 1 + .../x86/CONFIG_MEDIA_CAMERA_SUPPORT | 1 + .../L2-OPTIONAL/x86/CONFIG_MEDIA_CEC_RC | 1 + .../L2-OPTIONAL/x86/CONFIG_MEDIA_CEC_SUPPORT | 1 + .../x86/CONFIG_MEDIA_DIGITAL_TV_SUPPORT | 1 + .../x86/CONFIG_MEDIA_HIDE_ANCILLARY_SUBDRV | 1 + .../L2-OPTIONAL/x86/CONFIG_MEDIA_PCI_SUPPORT | 1 + .../x86/CONFIG_MEDIA_PLATFORM_SUPPORT | 1 + .../x86/CONFIG_MEDIA_RADIO_SUPPORT | 1 + .../L2-OPTIONAL/x86/CONFIG_MEDIA_SDR_SUPPORT | 1 + .../x86/CONFIG_MEDIA_SUBDRV_AUTOSELECT | 1 + .../L2-OPTIONAL/x86/CONFIG_MEDIA_SUPPORT | 1 + .../x86/CONFIG_MEDIA_SUPPORT_FILTER | 1 + .../L2-OPTIONAL/x86/CONFIG_MEDIA_TEST_SUPPORT | 1 + .../L2-OPTIONAL/x86/CONFIG_MEDIA_USB_SUPPORT | 1 + .../x86/CONFIG_MEMSTICK_REALTEK_PCI | 1 + .../x86/CONFIG_MEMSTICK_REALTEK_USB | 1 + .../L2-OPTIONAL/x86/CONFIG_MERAKI_MX100 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MFD_CORE | 1 + .../L2-OPTIONAL/x86/CONFIG_MFD_INTEL_LPSS | 1 + .../x86/CONFIG_MFD_INTEL_LPSS_ACPI | 1 + .../L2-OPTIONAL/x86/CONFIG_MFD_INTEL_LPSS_PCI | 1 + .../L2-OPTIONAL/x86/CONFIG_MFD_INTEL_PMC_BXT | 1 + .../x86/CONFIG_MFD_INTEL_QUARK_I2C_GPIO | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MFD_SM501 | 1 + .../L2-OPTIONAL/x86/CONFIG_MFD_SM501_GPIO | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MFD_SYSCON | 1 + .../L2-OPTIONAL/x86/CONFIG_MFD_VIPERBOARD | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MFD_VX855 | 1 + .../L2-OPTIONAL/x86/CONFIG_MICREL_KS8995MA | 1 + .../x86/CONFIG_MICROCODE_LATE_LOADING | 1 + .../L2-OPTIONAL/x86/CONFIG_MICROSOFT_MANA | 1 + .../L2-OPTIONAL/x86/CONFIG_MINIX_SUBPARTITION | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MISC_RTSX | 1 + .../L2-OPTIONAL/x86/CONFIG_MISC_RTSX_PCI | 1 + .../L2-OPTIONAL/x86/CONFIG_MISC_RTSX_USB | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN | 1 + .../L2-OPTIONAL/x86/CONFIG_MISDN_AVMFRITZ | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MISDN_DSP | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MISDN_HDLC | 1 + .../L2-OPTIONAL/x86/CONFIG_MISDN_HFCMULTI | 1 + .../L2-OPTIONAL/x86/CONFIG_MISDN_HFCPCI | 1 + .../L2-OPTIONAL/x86/CONFIG_MISDN_HFCUSB | 1 + .../L2-OPTIONAL/x86/CONFIG_MISDN_INFINEON | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MISDN_IPAC | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MISDN_ISAR | 1 + .../L2-OPTIONAL/x86/CONFIG_MISDN_L1OIP | 1 + .../L2-OPTIONAL/x86/CONFIG_MISDN_NETJET | 1 + .../L2-OPTIONAL/x86/CONFIG_MISDN_SPEEDFAX | 1 + .../L2-OPTIONAL/x86/CONFIG_MISDN_W6692 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MK8 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MLX90614 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MLX90632 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MLXREG_IO | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MLXREG_LC | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MLX_WDT | 1 + .../L2-OPTIONAL/x86/CONFIG_MMA7455_I2C | 1 + .../L2-OPTIONAL/x86/CONFIG_MMA7455_SPI | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA7660 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA8452 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA9551 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA9553 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MMC35240 | 1 + .../L2-OPTIONAL/x86/CONFIG_MMCONF_FAM10H | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_MTK | 1 + .../L2-OPTIONAL/x86/CONFIG_MMC_REALTEK_PCI | 1 + .../L2-OPTIONAL/x86/CONFIG_MMC_REALTEK_USB | 1 + .../L2-OPTIONAL/x86/CONFIG_MMC_TOSHIBA_PCI | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MMC_WBSD | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MMIOTRACE | 1 + .../x86/CONFIG_MMU_GATHER_MERGE_VMAS | 1 + .../L2-OPTIONAL/x86/CONFIG_MOUSE_APPLETOUCH | 1 + .../L2-OPTIONAL/x86/CONFIG_MOUSE_BCM5974 | 1 + .../L2-OPTIONAL/x86/CONFIG_MOUSE_CYAPA | 1 + .../x86/CONFIG_MOUSE_ELAN_I2C_SMBUS | 1 + .../L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_ALPS | 1 + .../L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_BYD | 1 + .../L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_CYPRESS | 1 + .../L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_ELANTECH | 1 + .../x86/CONFIG_MOUSE_PS2_ELANTECH_SMBUS | 1 + .../x86/CONFIG_MOUSE_PS2_FOCALTECH | 1 + .../L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_LIFEBOOK | 1 + .../x86/CONFIG_MOUSE_PS2_LOGIPS2PP | 1 + .../L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_SENTELIC | 1 + .../L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_SMBUS | 1 + .../x86/CONFIG_MOUSE_PS2_SYNAPTICS | 1 + .../x86/CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS | 1 + .../L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_TOUCHKIT | 1 + .../x86/CONFIG_MOUSE_PS2_TRACKPOINT | 1 + .../L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_VMMOUSE | 1 + .../L2-OPTIONAL/x86/CONFIG_MOUSE_SERIAL | 1 + .../L2-OPTIONAL/x86/CONFIG_MOUSE_VSXXXAA | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MPL115_I2C | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MPL115_SPI | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MPL3115 | 1 + .../L2-OPTIONAL/x86/CONFIG_MPRLS0025PA | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MPSC | 1 + .../L2-OPTIONAL/x86/CONFIG_MPU3050_I2C | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MS5611 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MS5637 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MSA311 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MSI_EC | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MSI_LAPTOP | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MSI_WMI | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7601U | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7603E | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7615E | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7663S | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7663U | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MT76_CORE | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MT76_LEDS | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MT76_USB | 1 + .../L2-OPTIONAL/x86/CONFIG_MT76x02_LIB | 1 + .../L2-OPTIONAL/x86/CONFIG_MT76x02_USB | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x0E | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x0U | 1 + .../L2-OPTIONAL/x86/CONFIG_MT76x0_COMMON | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x2E | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x2U | 1 + .../L2-OPTIONAL/x86/CONFIG_MT76x2_COMMON | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7915E | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7921E | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7921S | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7921U | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7996E | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MTD_CFI | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MWAVE | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MWIFIEX | 1 + .../L2-OPTIONAL/x86/CONFIG_MWIFIEX_PCIE | 1 + .../L2-OPTIONAL/x86/CONFIG_MWIFIEX_SDIO | 1 + .../L2-OPTIONAL/x86/CONFIG_MWIFIEX_USB | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MWL8K | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MXC4005 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MXC6255 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_MXM_WMI | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_MYRI10GE | 1 + .../L2-OPTIONAL/x86/CONFIG_MYRI10GE_DCA | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_NAU7802 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_ND_BTT | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_ND_PFN | 1 + .../L2-OPTIONAL/x86/CONFIG_NET_VENDOR_AMD | 1 + .../L2-OPTIONAL/x86/CONFIG_NET_VENDOR_BROCADE | 1 + .../L2-OPTIONAL/x86/CONFIG_NET_VENDOR_CISCO | 1 + .../L2-OPTIONAL/x86/CONFIG_NET_VENDOR_DEC | 1 + .../L2-OPTIONAL/x86/CONFIG_NET_VENDOR_EMULEX | 1 + .../x86/CONFIG_NET_VENDOR_QUALCOMM | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_NI903X_WDT | 1 + .../L2-OPTIONAL/x86/CONFIG_NIC7018_WDT | 1 + .../L2-OPTIONAL/x86/CONFIG_NITRO_ENCLAVES | 1 + .../L2-OPTIONAL/x86/CONFIG_NMI_CHECK_CPU | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_NOA1305 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_NOZOMI | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_NTB_AMD | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_NTB_INTEL | 1 + .../x86/CONFIG_NVIDIA_WMI_EC_BACKLIGHT | 1 + .../L2-OPTIONAL/x86/CONFIG_NVSW_SN2201 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_OBJTOOL | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_OF | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_OPT3001 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_OPT4001 | 1 + .../L2-OPTIONAL/x86/CONFIG_OSF_PARTITION | 1 + .../L2-OPTIONAL/x86/CONFIG_OUTPUT_FORMAT | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_P2SB | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_PA12203001 | 1 + .../L2-OPTIONAL/x86/CONFIG_PANASONIC_LAPTOP | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_PANEL | 1 + .../L2-OPTIONAL/x86/CONFIG_PARAVIRT_CLOCK | 1 + .../L2-OPTIONAL/x86/CONFIG_PARAVIRT_DEBUG | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT | 1 + .../L2-OPTIONAL/x86/CONFIG_PARPORT_1284 | 1 + .../L2-OPTIONAL/x86/CONFIG_PARPORT_NOT_PC | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_PARPORT_PC | 1 + .../L2-OPTIONAL/x86/CONFIG_PARPORT_PC_FIFO | 1 + .../L2-OPTIONAL/x86/CONFIG_PARPORT_PC_SUPERIO | 1 + .../L2-OPTIONAL/x86/CONFIG_PARPORT_SERIAL | 1 + .../L2-OPTIONAL/x86/CONFIG_PATA_PARPORT | 1 + .../L2-OPTIONAL/x86/CONFIG_PC87413_WDT | 1 + .../L2-OPTIONAL/x86/CONFIG_PCENGINES_APU2 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_PCI_DIRECT | 1 + .../x86/CONFIG_PCI_LOCKLESS_CONFIG | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_PCI_XEN | 1 + .../L2-OPTIONAL/x86/CONFIG_PHY_CPCAP_USB | 1 + .../L2-OPTIONAL/x86/CONFIG_PHY_INTEL_LGM_EMMC | 1 + .../L2-OPTIONAL/x86/CONFIG_PINCTRL_ALDERLAKE | 1 + .../L2-OPTIONAL/x86/CONFIG_PINCTRL_BAYTRAIL | 1 + .../L2-OPTIONAL/x86/CONFIG_PINCTRL_BROXTON | 1 + .../L2-OPTIONAL/x86/CONFIG_PINCTRL_CANNONLAKE | 1 + .../L2-OPTIONAL/x86/CONFIG_PINCTRL_CEDARFORK | 1 + .../L2-OPTIONAL/x86/CONFIG_PINCTRL_CHERRYVIEW | 1 + .../L2-OPTIONAL/x86/CONFIG_PINCTRL_DENVERTON | 1 + .../x86/CONFIG_PINCTRL_ELKHARTLAKE | 1 + .../L2-OPTIONAL/x86/CONFIG_PINCTRL_EMMITSBURG | 1 + .../L2-OPTIONAL/x86/CONFIG_PINCTRL_GEMINILAKE | 1 + .../L2-OPTIONAL/x86/CONFIG_PINCTRL_ICELAKE | 1 + .../L2-OPTIONAL/x86/CONFIG_PINCTRL_INTEL | 1 + .../L2-OPTIONAL/x86/CONFIG_PINCTRL_JASPERLAKE | 1 + .../L2-OPTIONAL/x86/CONFIG_PINCTRL_LAKEFIELD | 1 + .../L2-OPTIONAL/x86/CONFIG_PINCTRL_LEWISBURG | 1 + .../L2-OPTIONAL/x86/CONFIG_PINCTRL_LYNXPOINT | 1 + .../L2-OPTIONAL/x86/CONFIG_PINCTRL_METEORLAKE | 1 + .../x86/CONFIG_PINCTRL_SUNRISEPOINT | 1 + .../L2-OPTIONAL/x86/CONFIG_PINCTRL_TIGERLAKE | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_PING | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_PLFXLC | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_PLIP | 1 + .../L2-OPTIONAL/x86/CONFIG_PMIC_OPREGION | 1 + .../L2-OPTIONAL/x86/CONFIG_PNP_DEBUG_MESSAGES | 1 + .../x86/CONFIG_POWER_RESET_RESTART | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_PPDEV | 1 + .../L2-OPTIONAL/x86/CONFIG_PPS_CLIENT_PARPORT | 1 + .../L2-OPTIONAL/x86/CONFIG_PREFIX_SYMBOLS | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_PRINTER | 1 + .../x86/CONFIG_PROC_PID_ARCH_STATUS | 1 + .../x86/CONFIG_PROVIDE_OHCI1394_DMA_INIT | 1 + .../L2-OPTIONAL/x86/CONFIG_PTP_1588_CLOCK_KVM | 1 + .../L2-OPTIONAL/x86/CONFIG_PTP_1588_CLOCK_VMW | 1 + .../L2-OPTIONAL/x86/CONFIG_PUNIT_ATOM_DEBUG | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_PVH | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_PWM_LPSS | 1 + .../L2-OPTIONAL/x86/CONFIG_PWM_LPSS_PCI | 1 + .../L2-OPTIONAL/x86/CONFIG_PWM_LPSS_PLATFORM | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_QCOM_HIDMA | 1 + .../L2-OPTIONAL/x86/CONFIG_QCOM_HIDMA_MGMT | 1 + .../L2-OPTIONAL/x86/CONFIG_QTNFMAC_PCIE | 1 + .../L2-OPTIONAL/x86/CONFIG_RC_ATI_REMOTE | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_CORE | 1 + .../L2-OPTIONAL/x86/CONFIG_RC_DECODERS | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_RC_DEVICES | 1 + .../L2-OPTIONAL/x86/CONFIG_RC_LOOPBACK | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_MAP | 1 + .../L2-OPTIONAL/x86/CONFIG_RC_XBOX_DVD | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_REGULATOR | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_RETHOOK | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_RFD77402 | 1 + .../L2-OPTIONAL/x86/CONFIG_RFKILL_GPIO | 1 + .../L2-OPTIONAL/x86/CONFIG_RICHTEK_RTQ6056 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_RMI4_F34 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_RMI4_SPI | 1 + .../L2-OPTIONAL/x86/CONFIG_ROHM_BU27008 | 1 + .../L2-OPTIONAL/x86/CONFIG_ROHM_BU27034 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_RPR0521 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_RT2400PCI | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_RT2500PCI | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_RT2500USB | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI | 1 + .../L2-OPTIONAL/x86/CONFIG_RT2800PCI_RT3290 | 1 + .../L2-OPTIONAL/x86/CONFIG_RT2800PCI_RT33XX | 1 + .../L2-OPTIONAL/x86/CONFIG_RT2800PCI_RT35XX | 1 + .../L2-OPTIONAL/x86/CONFIG_RT2800PCI_RT53XX | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_RT2800USB | 1 + .../L2-OPTIONAL/x86/CONFIG_RT2800USB_RT33XX | 1 + .../L2-OPTIONAL/x86/CONFIG_RT2800USB_RT3573 | 1 + .../L2-OPTIONAL/x86/CONFIG_RT2800USB_RT35XX | 1 + .../L2-OPTIONAL/x86/CONFIG_RT2800USB_RT53XX | 1 + .../L2-OPTIONAL/x86/CONFIG_RT2800USB_RT55XX | 1 + .../L2-OPTIONAL/x86/CONFIG_RT2800USB_UNKNOWN | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_RT2800_LIB | 1 + .../L2-OPTIONAL/x86/CONFIG_RT2800_LIB_MMIO | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00 | 1 + .../L2-OPTIONAL/x86/CONFIG_RT2X00_DEBUG | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB | 1 + .../L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_CRYPTO | 1 + .../L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_DEBUGFS | 1 + .../x86/CONFIG_RT2X00_LIB_FIRMWARE | 1 + .../L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_LEDS | 1 + .../L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_MMIO | 1 + .../L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_PCI | 1 + .../L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_USB | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_RT61PCI | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_RT73USB | 1 + .../L2-OPTIONAL/x86/CONFIG_RTC_DRV_ABB5ZES3 | 1 + .../L2-OPTIONAL/x86/CONFIG_RTC_DRV_ABX80X | 1 + .../L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1305 | 1 + .../L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1343 | 1 + .../L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1347 | 1 + .../L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1374_WDT | 1 + .../L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1390 | 1 + .../x86/CONFIG_RTC_DRV_DS1685_FAMILY | 1 + .../x86/CONFIG_RTC_DRV_HID_SENSOR_TIME | 1 + .../L2-OPTIONAL/x86/CONFIG_RTC_DRV_M41T93 | 1 + .../L2-OPTIONAL/x86/CONFIG_RTC_DRV_M41T94 | 1 + .../L2-OPTIONAL/x86/CONFIG_RTC_DRV_MAX6902 | 1 + .../L2-OPTIONAL/x86/CONFIG_RTC_DRV_MCP795 | 1 + .../L2-OPTIONAL/x86/CONFIG_RTC_DRV_PCF2123 | 1 + .../L2-OPTIONAL/x86/CONFIG_RTC_DRV_PCF2127 | 1 + .../L2-OPTIONAL/x86/CONFIG_RTC_DRV_PCF85063 | 1 + .../L2-OPTIONAL/x86/CONFIG_RTC_DRV_R9701 | 1 + .../L2-OPTIONAL/x86/CONFIG_RTC_DRV_RS5C348 | 1 + .../L2-OPTIONAL/x86/CONFIG_RTC_DRV_RX4581 | 1 + .../L2-OPTIONAL/x86/CONFIG_RTC_DRV_RX8010 | 1 + .../L2-OPTIONAL/x86/CONFIG_RTC_MC146818_LIB | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8180 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8187 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_RTL8188EE | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_RTL8192CE | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_RTL8192CU | 1 + .../L2-OPTIONAL/x86/CONFIG_RTL8192C_COMMON | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_RTL8192DE | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_RTL8192EE | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_RTL8192SE | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_RTL8723AE | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_RTL8723BE | 1 + .../L2-OPTIONAL/x86/CONFIG_RTL8723_COMMON | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_RTL8821AE | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_RTL8XXXU | 1 + .../L2-OPTIONAL/x86/CONFIG_RTL8XXXU_UNTESTED | 1 + .../L2-OPTIONAL/x86/CONFIG_RTLBTCOEXIST | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLWIFI | 1 + .../L2-OPTIONAL/x86/CONFIG_RTLWIFI_DEBUG | 1 + .../L2-OPTIONAL/x86/CONFIG_RTLWIFI_PCI | 1 + .../L2-OPTIONAL/x86/CONFIG_RTLWIFI_USB | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_RTL_CARDS | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88 | 1 + .../L2-OPTIONAL/x86/CONFIG_RTW88_8723DE | 1 + .../L2-OPTIONAL/x86/CONFIG_RTW88_8723DS | 1 + .../L2-OPTIONAL/x86/CONFIG_RTW88_8723DU | 1 + .../L2-OPTIONAL/x86/CONFIG_RTW88_8821CE | 1 + .../L2-OPTIONAL/x86/CONFIG_RTW88_8821CS | 1 + .../L2-OPTIONAL/x86/CONFIG_RTW88_8821CU | 1 + .../L2-OPTIONAL/x86/CONFIG_RTW88_8822B | 1 + .../L2-OPTIONAL/x86/CONFIG_RTW88_8822BE | 1 + .../L2-OPTIONAL/x86/CONFIG_RTW88_8822BS | 1 + .../L2-OPTIONAL/x86/CONFIG_RTW88_8822BU | 1 + .../L2-OPTIONAL/x86/CONFIG_RTW88_8822C | 1 + .../L2-OPTIONAL/x86/CONFIG_RTW88_8822CE | 1 + .../L2-OPTIONAL/x86/CONFIG_RTW88_8822CS | 1 + .../L2-OPTIONAL/x86/CONFIG_RTW88_8822CU | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_RTW88_CORE | 1 + .../L2-OPTIONAL/x86/CONFIG_RTW88_DEBUG | 1 + .../L2-OPTIONAL/x86/CONFIG_RTW88_DEBUGFS | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_RTW88_PCI | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW89 | 1 + .../L2-OPTIONAL/x86/CONFIG_SAMSUNG_LAPTOP | 1 + .../L2-OPTIONAL/x86/CONFIG_SAMSUNG_Q10 | 1 + .../L2-OPTIONAL/x86/CONFIG_SATA_ZHAOXIN | 1 + .../x86/CONFIG_SBC_EPX_C3_WATCHDOG | 1 + .../x86/CONFIG_SBC_FITPC2_WATCHDOG | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_SBP_TARGET | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_SC1200_WDT | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SCA3000 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SCA3300 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_SCD30_CORE | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SCD4X | 1 + .../L2-OPTIONAL/x86/CONFIG_SCSI_AACRAID | 1 + .../L2-OPTIONAL/x86/CONFIG_SCSI_BNX2X_FCOE | 1 + .../L2-OPTIONAL/x86/CONFIG_SCSI_BNX2_ISCSI | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_SCSI_IMM | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_SCSI_IPR | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_SCSI_ISCI | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_SCSI_PPA | 1 + .../L2-OPTIONAL/x86/CONFIG_SDMA_VERBOSITY | 1 + .../L2-OPTIONAL/x86/CONFIG_SD_ADC_MODULATOR | 1 + .../L2-OPTIONAL/x86/CONFIG_SEL3350_PLATFORM | 1 + .../x86/CONFIG_SENSEAIR_SUNRISE_CO2 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSIRION_SGP30 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSIRION_SGP40 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_ABITUGURU | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_ABITUGURU3 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_ACPI_POWER | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_AD7314 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_AD7414 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_AD7418 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_ADC128D818 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_ADCXX | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1025 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1026 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1029 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1031 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1275 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_ADM9240 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_ADS7828 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_ADS7871 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7410 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7411 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7462 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7470 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7475 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7X10 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_AMC6821 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_APDS990X | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_APPLESMC | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_ASB100 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_ASC7621 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_ASUS_EC | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_ASUS_WMI | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_ATK0110 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_ATXP1 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_BH1770 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_CORETEMP | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_DELL_SMM | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_DME1737 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_DS1621 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_DS620 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_EMC1403 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_EMC6W201 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_F71805F | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_F71882FG | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_F75375S | 1 + .../x86/CONFIG_SENSORS_FAM15H_POWER | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_FSCHMD | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_G760A | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_G762 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_GL518SM | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_GL520SM | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_HDAPS | 1 + .../x86/CONFIG_SENSORS_HMC5843_I2C | 1 + .../x86/CONFIG_SENSORS_HMC5843_SPI | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_HP_WMI | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_I5500 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_I5K_AMB | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_IBMAEM | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_IBMPEX | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_IIO_HWMON | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_INA209 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_INA2XX | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_ISL29018 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_ISL29028 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_IT87 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_JC42 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_K10TEMP | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_K8TEMP | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_LINEAGE | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_LIS3LV02D | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_LIS3_I2C | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_LM25066 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_LM63 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_LM70 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_LM73 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_LM75 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_LM77 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_LM78 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_LM80 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_LM83 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_LM85 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_LM87 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_LM90 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_LM92 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_LM93 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_LM95234 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_LM95241 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_LM95245 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_LTC2945 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_LTC2978 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_LTC3815 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4151 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4215 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4222 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4245 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4260 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4261 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_MAX1111 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_MAX16064 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_MAX16065 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_MAX1619 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_MAX1668 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_MAX197 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_MAX20751 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_MAX31790 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_MAX34440 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_MAX6639 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_MAX6650 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_MAX6697 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_MAX8688 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_MCP3021 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_MLXREG_FAN | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_NCT6683 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_NCT6775 | 1 + .../x86/CONFIG_SENSORS_NCT6775_CORE | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_NCT7802 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_NCT7904 | 1 + .../x86/CONFIG_SENSORS_NTC_THERMISTOR | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_OXP | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_PC87360 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_PC87427 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_PCF8591 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_PMBUS | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_POWR1220 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_RM3100_I2C | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_RM3100_SPI | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_SCH5627 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_SCH5636 | 1 + .../x86/CONFIG_SENSORS_SCH56XX_COMMON | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_SHT15 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_SHT21 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_SHTC1 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_SIS5595 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_SMSC47B397 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_SMSC47M1 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_SMSC47M192 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_TC74 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_THMC50 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_TMP102 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_TMP103 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_TMP401 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_TMP421 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_TPS40422 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_TSL2550 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_TSL2563 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_UCD9000 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_UCD9200 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_VIA686A | 1 + .../x86/CONFIG_SENSORS_VIA_CPUTEMP | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_VT1211 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_VT8231 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_W83627EHF | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_W83627HF | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_W83781D | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_W83791D | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_W83792D | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_W83793 | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_W83795 | 1 + .../x86/CONFIG_SENSORS_W83795_FANCTRL | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_W83L785TS | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_W83L786NG | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_XGENE | 1 + .../L2-OPTIONAL/x86/CONFIG_SENSORS_ZL6100 | 1 + .../x86/CONFIG_SERIAL_8250_16550A_VARIANTS | 1 + .../L2-OPTIONAL/x86/CONFIG_SERIAL_8250_LPSS | 1 + .../L2-OPTIONAL/x86/CONFIG_SERIAL_8250_MID | 1 + .../L2-OPTIONAL/x86/CONFIG_SERIAL_8250_RT288X | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_SERIAL_ARC | 1 + .../x86/CONFIG_SERIAL_ARC_NR_PORTS | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_SERIAL_JSM | 1 + .../L2-OPTIONAL/x86/CONFIG_SERIAL_LANTIQ | 1 + .../x86/CONFIG_SERIAL_MULTI_INSTANTIATE | 1 + .../L2-OPTIONAL/x86/CONFIG_SERIO_CT82C710 | 1 + .../L2-OPTIONAL/x86/CONFIG_SERIO_PARKBD | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC | 1 + .../L2-OPTIONAL/x86/CONFIG_SFC_MCDI_LOGGING | 1 + .../L2-OPTIONAL/x86/CONFIG_SFC_MCDI_MON | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC_MTD | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_SFC_SRIOV | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SGI_GRU | 1 + .../L2-OPTIONAL/x86/CONFIG_SGI_GRU_DEBUG | 1 + .../L2-OPTIONAL/x86/CONFIG_SGI_PARTITION | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SGI_XP | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SI1133 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SI1145 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SI7005 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SI7020 | 1 + .../x86/CONFIG_SIEMENS_SIMATIC_IPC | 1 + .../L2-OPTIONAL/x86/CONFIG_SMSC37B787_WDT | 1 + .../L2-OPTIONAL/x86/CONFIG_SMSC_SCH311X_WDT | 1 + .../x86/CONFIG_SOLARIS_X86_PARTITION | 1 + .../L2-OPTIONAL/x86/CONFIG_SONYPI_COMPAT | 1 + .../L2-OPTIONAL/x86/CONFIG_SONY_LAPTOP | 1 + .../L2-OPTIONAL/x86/CONFIG_SPI_BUTTERFLY | 1 + .../L2-OPTIONAL/x86/CONFIG_SPI_LANTIQ_SSC | 1 + .../L2-OPTIONAL/x86/CONFIG_SPI_LM70_LLP | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_SPS30_I2C | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SRF04 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SRF08 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_STK3310 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_STK8312 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_STK8BA50 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ST_UVIS25 | 1 + .../L2-OPTIONAL/x86/CONFIG_SUN_PARTITION | 1 + .../L2-OPTIONAL/x86/CONFIG_SURFACE3_WMI | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SX9310 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SX9324 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SX9360 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SX9500 | 1 + .../L2-OPTIONAL/x86/CONFIG_SYSTEM76_ACPI | 1 + .../L2-OPTIONAL/x86/CONFIG_SYS_HYPERVISOR | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_T5403 | 1 + .../x86/CONFIG_TABLET_SERIAL_WACOM4 | 1 + .../L2-OPTIONAL/x86/CONFIG_TABLET_USB_ACECAD | 1 + .../L2-OPTIONAL/x86/CONFIG_TABLET_USB_AIPTEK | 1 + .../L2-OPTIONAL/x86/CONFIG_TABLET_USB_HANWANG | 1 + .../L2-OPTIONAL/x86/CONFIG_TABLET_USB_KBTAB | 1 + .../L2-OPTIONAL/x86/CONFIG_TABLET_USB_PEGASUS | 1 + .../L2-OPTIONAL/x86/CONFIG_TCG_TIS_I2C_ATMEL | 1 + .../x86/CONFIG_TCG_TIS_I2C_INFINEON | 1 + .../x86/CONFIG_TCG_TIS_I2C_NUVOTON | 1 + .../L2-OPTIONAL/x86/CONFIG_TCG_TIS_SPI | 1 + .../L2-OPTIONAL/x86/CONFIG_TCG_TIS_ST33ZP24 | 1 + .../x86/CONFIG_TCG_TIS_ST33ZP24_I2C | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_XEN | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_TCS3414 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_TCS3472 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_TELCLOCK | 1 + .../x86/CONFIG_TEST_CLOCKSOURCE_WATCHDOG | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_TEST_FPU | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_TEST_HMM | 1 + .../L2-OPTIONAL/x86/CONFIG_THERMAL_ACPI | 1 + .../x86/CONFIG_THERMAL_DEFAULT_GOV_BANG_BANG | 1 + .../L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI | 1 + .../x86/CONFIG_THINKPAD_ACPI_DEBUG | 1 + .../x86/CONFIG_THINKPAD_ACPI_DEBUGFACILITIES | 1 + .../x86/CONFIG_THINKPAD_ACPI_HOTKEY_POLL | 1 + .../x86/CONFIG_THINKPAD_ACPI_UNSAFE_LEDS | 1 + .../x86/CONFIG_THINKPAD_ACPI_VIDEO | 1 + .../L2-OPTIONAL/x86/CONFIG_THINKPAD_LMI | 1 + .../L2-OPTIONAL/x86/CONFIG_THUNDER_NIC_BGX | 1 + .../L2-OPTIONAL/x86/CONFIG_THUNDER_NIC_PF | 1 + .../L2-OPTIONAL/x86/CONFIG_THUNDER_NIC_RGX | 1 + .../L2-OPTIONAL/x86/CONFIG_THUNDER_NIC_VF | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_TIFM_7XX1 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_TI_ADC081C | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_TI_ADC0832 | 1 + .../L2-OPTIONAL/x86/CONFIG_TI_ADC084S021 | 1 + .../L2-OPTIONAL/x86/CONFIG_TI_ADC108S102 | 1 + .../L2-OPTIONAL/x86/CONFIG_TI_ADC12138 | 1 + .../L2-OPTIONAL/x86/CONFIG_TI_ADC128S052 | 1 + .../L2-OPTIONAL/x86/CONFIG_TI_ADC161S626 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_TI_ADS1015 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_TI_ADS1100 | 1 + .../L2-OPTIONAL/x86/CONFIG_TI_ADS124S08 | 1 + .../L2-OPTIONAL/x86/CONFIG_TI_ADS131E08 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_TI_ADS7924 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_TI_ADS7950 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_TI_ADS8344 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_TI_ADS8688 | 1 + .../L2-OPTIONAL/x86/CONFIG_TI_DAC082S085 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_TI_DAC5571 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_TI_DAC7311 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_TI_DAC7612 | 1 + .../L2-OPTIONAL/x86/CONFIG_TI_LMP92064 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_TI_TLC4541 | 1 + .../L2-OPTIONAL/x86/CONFIG_TI_TMAG5273 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_TI_TSC2046 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_TMP006 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_TMP007 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_TMP117 | 1 + .../L2-OPTIONAL/x86/CONFIG_TOPSTAR_LAPTOP | 1 + .../L2-OPTIONAL/x86/CONFIG_TOSHIBA_BT_RFKILL | 1 + .../L2-OPTIONAL/x86/CONFIG_TOSHIBA_HAPS | 1 + .../L2-OPTIONAL/x86/CONFIG_TOSHIBA_WMI | 1 + .../L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_AD7877 | 1 + .../L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_AD7879 | 1 + .../x86/CONFIG_TOUCHSCREEN_ADS7846 | 1 + .../x86/CONFIG_TOUCHSCREEN_ATMEL_MXT | 1 + .../x86/CONFIG_TOUCHSCREEN_AUO_PIXCIR | 1 + .../x86/CONFIG_TOUCHSCREEN_BU21013 | 1 + .../x86/CONFIG_TOUCHSCREEN_BU21029 | 1 + .../x86/CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 | 1 + .../x86/CONFIG_TOUCHSCREEN_COLIBRI_VF50 | 1 + .../x86/CONFIG_TOUCHSCREEN_CY8CTMA140 | 1 + .../x86/CONFIG_TOUCHSCREEN_CY8CTMG110 | 1 + .../x86/CONFIG_TOUCHSCREEN_CYTTSP4_CORE | 1 + .../x86/CONFIG_TOUCHSCREEN_CYTTSP5 | 1 + .../x86/CONFIG_TOUCHSCREEN_CYTTSP_CORE | 1 + .../x86/CONFIG_TOUCHSCREEN_DYNAPRO | 1 + .../x86/CONFIG_TOUCHSCREEN_EDT_FT5X06 | 1 + .../L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EETI | 1 + .../x86/CONFIG_TOUCHSCREEN_EGALAX_SERIAL | 1 + .../x86/CONFIG_TOUCHSCREEN_EKTF2127 | 1 + .../L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ELAN | 1 + .../L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ELO | 1 + .../x86/CONFIG_TOUCHSCREEN_EXC3000 | 1 + .../x86/CONFIG_TOUCHSCREEN_FUJITSU | 1 + .../L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_GOODIX | 1 + .../L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_GUNZE | 1 + .../x86/CONFIG_TOUCHSCREEN_HAMPSHIRE | 1 + .../L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HIDEEP | 1 + .../x86/CONFIG_TOUCHSCREEN_HIMAX_HX83112B | 1 + .../x86/CONFIG_TOUCHSCREEN_HYCON_HY46XX | 1 + .../x86/CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX | 1 + .../x86/CONFIG_TOUCHSCREEN_ILI210X | 1 + .../L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ILITEK | 1 + .../L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_IMAGIS | 1 + .../L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_INEXIO | 1 + .../L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_IQS5XX | 1 + .../x86/CONFIG_TOUCHSCREEN_IQS7211 | 1 + .../x86/CONFIG_TOUCHSCREEN_MAX11801 | 1 + .../x86/CONFIG_TOUCHSCREEN_MCS5000 | 1 + .../x86/CONFIG_TOUCHSCREEN_MELFAS_MIP4 | 1 + .../L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MMS114 | 1 + .../x86/CONFIG_TOUCHSCREEN_MSG2638 | 1 + .../L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MTOUCH | 1 + .../x86/CONFIG_TOUCHSCREEN_NOVATEK_NVT_TS | 1 + .../x86/CONFIG_TOUCHSCREEN_PENMOUNT | 1 + .../L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_PIXCIR | 1 + .../L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_RM_TS | 1 + .../x86/CONFIG_TOUCHSCREEN_ROHM_BU21023 | 1 + .../x86/CONFIG_TOUCHSCREEN_S6SY761 | 1 + .../L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_SILEAD | 1 + .../x86/CONFIG_TOUCHSCREEN_SIS_I2C | 1 + .../L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ST1232 | 1 + .../L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_STMFTS | 1 + .../x86/CONFIG_TOUCHSCREEN_SURFACE3_SPI | 1 + .../L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_SX8654 | 1 + .../x86/CONFIG_TOUCHSCREEN_TOUCHIT213 | 1 + .../x86/CONFIG_TOUCHSCREEN_TOUCHRIGHT | 1 + .../x86/CONFIG_TOUCHSCREEN_TOUCHWIN | 1 + .../x86/CONFIG_TOUCHSCREEN_TPS6507X | 1 + .../x86/CONFIG_TOUCHSCREEN_TSC2004 | 1 + .../x86/CONFIG_TOUCHSCREEN_TSC2005 | 1 + .../x86/CONFIG_TOUCHSCREEN_TSC2007 | 1 + .../x86/CONFIG_TOUCHSCREEN_TSC_SERIO | 1 + .../x86/CONFIG_TOUCHSCREEN_USB_COMPOSITE | 1 + .../x86/CONFIG_TOUCHSCREEN_WACOM_I2C | 1 + .../x86/CONFIG_TOUCHSCREEN_WACOM_W8001 | 1 + .../x86/CONFIG_TOUCHSCREEN_WDT87XX_I2C | 1 + .../x86/CONFIG_TOUCHSCREEN_ZET6223 | 1 + .../L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ZFORCE | 1 + .../x86/CONFIG_TOUCHSCREEN_ZINITIX | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_TPL0102 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_TQMX86_WDT | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_TSL2583 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_TSL2591 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_TSL2772 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_TSL4531 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_TSYS01 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_TSYS02D | 1 + .../L2-OPTIONAL/x86/CONFIG_TYPEC_FUSB302 | 1 + .../L2-OPTIONAL/x86/CONFIG_UEFI_CPER_X86 | 1 + .../L2-OPTIONAL/x86/CONFIG_UNIXWARE_DISKLABEL | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_US5182D | 1 + .../L2-OPTIONAL/x86/CONFIG_USB_CHAOSKEY | 1 + .../x86/CONFIG_USB_EHCI_HCD_PLATFORM | 1 + .../L2-OPTIONAL/x86/CONFIG_USB_LGM_PHY | 1 + .../L2-OPTIONAL/x86/CONFIG_USB_NET_RNDIS_WLAN | 1 + .../L2-OPTIONAL/x86/CONFIG_USB_NET_SR9700 | 1 + .../L2-OPTIONAL/x86/CONFIG_USB_PULSE8_CEC | 1 + .../L2-OPTIONAL/x86/CONFIG_USB_RAINSHADOW_CEC | 1 + .../x86/CONFIG_USB_ROLES_INTEL_XHCI | 1 + .../L2-OPTIONAL/x86/CONFIG_USB_SERIAL_CONSOLE | 1 + .../x86/CONFIG_USB_SERIAL_MOS7715_PARPORT | 1 + .../L2-OPTIONAL/x86/CONFIG_USB_SERIAL_SIMPLE | 1 + .../L2-OPTIONAL/x86/CONFIG_USB_SPEEDTOUCH | 1 + .../L2-OPTIONAL/x86/CONFIG_USB_UHCI_HCD | 1 + .../L2-OPTIONAL/x86/CONFIG_USB_ULPI_BUS | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_USB_USS720 | 1 + .../L2-OPTIONAL/x86/CONFIG_USB_XEN_HCD | 1 + .../L2-OPTIONAL/x86/CONFIG_USB_XHCI_DBGCAP | 1 + .../L2-OPTIONAL/x86/CONFIG_USB_XHCI_PLATFORM | 1 + .../x86/CONFIG_USER_RETURN_NOTIFIER | 1 + .../x86/CONFIG_USER_STACKTRACE_SUPPORT | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_UV_MMTIMER | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_UV_SYSFS | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_VCNL3020 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_VCNL4000 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_VCNL4035 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_VEML6030 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_VEML6070 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_VF610_ADC | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_VF610_DAC | 1 + .../L2-OPTIONAL/x86/CONFIG_VFIO_DEVICE_CDEV | 1 + .../L2-OPTIONAL/x86/CONFIG_VFIO_PCI_VGA | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_VIA_WDT | 1 + .../L2-OPTIONAL/x86/CONFIG_VIPERBOARD_ADC | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_VIRT_WIFI | 1 + .../L2-OPTIONAL/x86/CONFIG_VL53L0X_I2C | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_VL6180 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_VMAP_PFN | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_VMGENID | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_VZ89X | 1 + .../L2-OPTIONAL/x86/CONFIG_W83627HF_WDT | 1 + .../L2-OPTIONAL/x86/CONFIG_W83877F_WDT | 1 + .../L2-OPTIONAL/x86/CONFIG_W83977F_WDT | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_WAFER_WDT | 1 + .../L2-OPTIONAL/x86/CONFIG_WANT_DEV_COREDUMP | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_WCN36XX | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_WFX | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_WIL6210 | 1 + .../L2-OPTIONAL/x86/CONFIG_WILC1000_SDIO | 1 + .../L2-OPTIONAL/x86/CONFIG_WILC1000_SPI | 1 + .../L2-OPTIONAL/x86/CONFIG_WINMATE_FM07_KEYS | 1 + .../L2-OPTIONAL/x86/CONFIG_WIRELESS_HOTKEY | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN | 1 + .../L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ADMTEK | 1 + .../L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ATH | 1 + .../L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ATMEL | 1 + .../x86/CONFIG_WLAN_VENDOR_BROADCOM | 1 + .../L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_CISCO | 1 + .../L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_INTEL | 1 + .../x86/CONFIG_WLAN_VENDOR_INTERSIL | 1 + .../x86/CONFIG_WLAN_VENDOR_MARVELL | 1 + .../x86/CONFIG_WLAN_VENDOR_MEDIATEK | 1 + .../x86/CONFIG_WLAN_VENDOR_MICROCHIP | 1 + .../x86/CONFIG_WLAN_VENDOR_PURELIFI | 1 + .../x86/CONFIG_WLAN_VENDOR_QUANTENNA | 1 + .../L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_RALINK | 1 + .../x86/CONFIG_WLAN_VENDOR_REALTEK | 1 + .../L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_RSI | 1 + .../L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_SILABS | 1 + .../L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ST | 1 + .../L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_TI | 1 + .../L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ZYDAS | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_WMI_BMOF | 1 + .../x86/CONFIG_X86_ACPI_CPUFREQ_CPB | 1 + .../L2-OPTIONAL/x86/CONFIG_X86_AMD_PSTATE_UT | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_CET | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_X86_CMOV | 1 + .../L2-OPTIONAL/x86/CONFIG_X86_DEBUGCTLMSR | 1 + .../x86/CONFIG_X86_HV_CALLBACK_VECTOR | 1 + .../x86/CONFIG_X86_INTERNODE_CACHE_SHIFT | 1 + .../L2-OPTIONAL/x86/CONFIG_X86_L1_CACHE_SHIFT | 1 + .../L2-OPTIONAL/x86/CONFIG_X86_MCE_THRESHOLD | 1 + .../L2-OPTIONAL/x86/CONFIG_X86_MEM_ENCRYPT | 1 + .../x86/CONFIG_X86_MINIMUM_CPU_FAMILY | 1 + .../L2-OPTIONAL/x86/CONFIG_X86_NEED_RELOCS | 1 + .../x86/CONFIG_X86_PLATFORM_DRIVERS_DELL | 1 + .../x86/CONFIG_X86_PLATFORM_DRIVERS_HP | 1 + .../L2-OPTIONAL/x86/CONFIG_X86_PMEM_LEGACY | 1 + .../x86/CONFIG_X86_PMEM_LEGACY_DEVICE | 1 + .../L2-OPTIONAL/x86/CONFIG_X86_POWERNOW_K8 | 1 + .../x86/CONFIG_X86_SPEEDSTEP_CENTRINO | 1 + .../L2-OPTIONAL/x86/CONFIG_X86_SPEEDSTEP_LIB | 1 + .../L2-OPTIONAL/x86/CONFIG_X86_THERMAL_VECTOR | 1 + .../x86/CONFIG_X86_USER_SHADOW_STACK | 1 + .../x86/CONFIG_X86_VMX_FEATURE_NAMES | 1 + .../L2-OPTIONAL/x86/CONFIG_X86_X32_ABI | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_X9250 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_XENFS | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_XEN_ACPI | 1 + .../L2-OPTIONAL/x86/CONFIG_XEN_AUTO_XLATE | 1 + .../L2-OPTIONAL/x86/CONFIG_XEN_BACKEND | 1 + .../L2-OPTIONAL/x86/CONFIG_XEN_BALLOON | 1 + .../x86/CONFIG_XEN_BLKDEV_FRONTEND | 1 + .../L2-OPTIONAL/x86/CONFIG_XEN_COMPAT_XENFS | 1 + .../L2-OPTIONAL/x86/CONFIG_XEN_DEBUG_FS | 1 + .../L2-OPTIONAL/x86/CONFIG_XEN_DEV_EVTCHN | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_EFI | 1 + .../L2-OPTIONAL/x86/CONFIG_XEN_FBDEV_FRONTEND | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_XEN_GNTDEV | 1 + .../x86/CONFIG_XEN_GRANT_DEV_ALLOC | 1 + .../x86/CONFIG_XEN_GRANT_DMA_ALLOC | 1 + .../L2-OPTIONAL/x86/CONFIG_XEN_PRIVCMD | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PV | 1 + .../x86/CONFIG_XEN_PVCALLS_FRONTEND | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PVH | 1 + .../L2-OPTIONAL/x86/CONFIG_XEN_PVHVM_GUEST | 1 + .../L2-OPTIONAL/x86/CONFIG_XEN_PVHVM_SMP | 1 + .../L2-OPTIONAL/x86/CONFIG_XEN_SAVE_RESTORE | 1 + .../L2-OPTIONAL/x86/CONFIG_XEN_SCSI_FRONTEND | 1 + .../L2-OPTIONAL/x86/CONFIG_XEN_SYS_HYPERVISOR | 1 + .../x86/CONFIG_XEN_UNPOPULATED_ALLOC | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_XEN_VIRTIO | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_WDT | 1 + .../x86/CONFIG_XEN_XENBUS_FRONTEND | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_XIAOMI_WMI | 1 + .../L2-OPTIONAL/x86/CONFIG_XILINX_XADC | 1 + .../L2-OPTIONAL/x86/CONFIG_YAMAHA_YAS530 | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_YOGABOOK | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_ZOPT2201 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_ZPA2326 | 1 + .../configs/custom-overrides/64k/arm64.config | 2 + .../custom-overrides/debug/arm64.config | 32 +++++ .../custom-overrides/debug/default.config | 111 ++++++++++++++++++ .../configs/custom-overrides/debug/x86.config | 46 ++++++++ .../custom-overrides/gcov/default.config | 2 + .../kvm_modulize/arm64.config | 1 + anolis/configs/metadata/changelog/CONFIG_LSM | 6 + .../changelog/CONFIG_PREEMPT_VOLUNTARY | 3 + .../metadata/changelog/CONFIG_VIRT_PLAT_DEV | 3 + 8138 files changed, 8335 insertions(+) create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARCH_MMAP_RND_COMPAT_BITS create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARCH_PHYTIUM create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64 create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_16K_PAGES create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_4K_PAGES create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_64K_PAGES create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_BTI create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_CNP create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_E0PD create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_HW_AFDBM create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_MPAM create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_MTE create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_PAN create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_PSEUDO_NMI create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_PTR_AUTH create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_RAS_EXTN create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_SVE create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_TLB_RANGE create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_USE_LSE_ATOMICS create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_VA_BITS_39 create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_CCN create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_CMN create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_CPU_RESCTRL create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_PHYTIUM_2500 create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_V2M create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_V3 create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_V3_ITS create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_V3_ITS_PCI create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_PMU create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_PMU_ACPI create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_SMMU create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_SMMU_V3 create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_SMMU_V3_PMU create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_SPE_PMU create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_CORESIGHT create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_CPU_LITTLE_ENDIAN create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_DEFERRED_STRUCT_PAGE_INIT create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_EXT4_FS create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_EXTCON create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_HZ create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_HZ_1000 create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_HZ_250 create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_IRQ_TIME_ACCOUNTING create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_JBD2 create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_KVM create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_PCI_HOST_GENERIC create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_PCI_PF_STUB create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_PREEMPT_NONE create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_PREEMPT_VOLUNTARY create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_PWM_ATMEL_TCB create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_PWM_XILINX create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_GPI_DMA create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_ICC_BWMON create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_LMH create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_MPM create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_RAMP_CTRL create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_RPM_MASTER_STATS create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_SCM create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_SPM create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_SSC_BLOCK_BUS create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_UNMAP_KERNEL_AT_EL0 create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_VIRTIO create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_VIRTIO_BLK create mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_VIRTIO_PCI create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_64BIT create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_ACPI create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_ACPI_APEI create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_ACPI_IPMI create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_ACPI_NUMA create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_ACPI_PROCESSOR create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_ADVISE_SYSCALLS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_AIO create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_ALLOW_DEV_COREDUMP create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_ASYMMETRIC_KEY_TYPE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_AUDIT create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_AUTOFS_FS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_AUXILIARY_BUS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_BASE_FULL create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_BFQ_GROUP_IOSCHED create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_BINFMT_ELF create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_BINFMT_SCRIPT create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_BLK_CGROUP create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_BLK_CGROUP_IOCOST create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEBUG_FS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV_INITRD create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV_IO_TRACE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV_NVME create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV_THROTTLING create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_BLK_MQ_PCI create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_BLK_MQ_VIRTIO create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_BLK_RQ_ALLOC_TIME create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_BLOCK create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_BONDING create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_BPF create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_BPF_EVENTS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_BPF_JIT create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_BPF_LSM create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_BPF_SYSCALL create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_BPF_UNPRIV_DEFAULT_OFF create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_BRIDGE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_BUG create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CACHEFILES create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CACHEFILES_ONDEMAND create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CACHESTAT_SYSCALL create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CC_OPTIMIZE_FOR_SIZE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CFS_BANDWIDTH create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CGROUPS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_BPF create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_CPUACCT create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_DEVICE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_HUGETLB create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_PERF create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_PIDS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_RDMA create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_SCHED create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CHECKPOINT_RESTORE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CK_KABI_RESERVE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CK_KABI_SIZE_ALIGN_CHECKS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_COMMON_CLK create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_COMPACTION create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CONFIGFS_FS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CONSOLE_TRANSLATIONS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_COREDUMP create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CPUSETS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CPU_FREQ create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CPU_IDLE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CPU_ISOLATION create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CRAMFS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CRASH_CORE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CRASH_DUMP create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AEAD create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AEAD2 create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AES create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AKCIPHER create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AKCIPHER2 create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_ALGAPI create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_ALGAPI2 create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_GCM create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_GHASH create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_HASH create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_HASH2 create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_LIB_AES create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_LIB_SHA256 create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_MANAGER create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_MANAGER2 create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_RNG create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_RNG2 create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_RSA create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SHA256 create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SKCIPHER create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SKCIPHER2 create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM2 create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM3 create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM3_GENERIC create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM4 create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM4_GENERIC create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CXL_BUS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_DAX create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_BUGVERBOSE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_FS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_INFO create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_INFO_BTF create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_INFO_DWARF4 create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_KERNEL create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_MISC create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_SECTION_MISMATCH create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_DEFAULT_CUBIC create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_DEFAULT_SECURITY_DAC create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_DETECT_HUNG_TASK create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_DEVTMPFS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_DEVTMPFS_MOUNT create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_DMADEVICES create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_DMI create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_DNOTIFY create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_DNS_RESOLVER create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_DRM create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_DYNAMIC_DEBUG create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_DYNAMIC_DEBUG_CORE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_DYNAMIC_FTRACE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_EDAC create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_EFI create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_ELF_CORE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_EPOLL create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_EROFS_FS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_ETHTOOL_NETLINK create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_EVENTFD create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_EVM create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_EXPERT create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_EXT3_FS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_EXT4_FS_POSIX_ACL create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_EXT4_FS_SECURITY create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_FAIR_GROUP_SCHED create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_FANOTIFY create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_FAT_FS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_FB create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_FHANDLE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_FILE_LOCKING create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_FRAMEBUFFER_CONSOLE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_FSCACHE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_FSNOTIFY create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_FS_DAX create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_FTRACE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_FTRACE_SYSCALLS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_FUNCTION_GRAPH_TRACER create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_FUNCTION_TRACER create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_FUSE_FS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_FUTEX create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_FW_LOADER create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_GENERIC_GETTIMEOFDAY create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_GENERIC_VDSO_TIME_NS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_HARDLOCKUP_DETECTOR create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_HDMI create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_HIGH_RES_TIMERS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_HOTPLUG_CPU create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_HOTPLUG_PCI create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_HOTPLUG_PCI_PCIE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_HUGETLBFS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_HUGETLB_PAGE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_HWMON create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_HW_RANDOM create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_HZ_100 create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_HZ_300 create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_HZ_PERIODIC create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_I2C create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_I40E create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_I40EVF create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_ICE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_IDLE_PAGE_TRACKING create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_IGB create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_IGBVF create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_IMA create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_INET create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_INET_DIAG create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_INET_MPTCP_DIAG create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_INET_TCP_DIAG create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_INET_UDP_DIAG create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_INFINIBAND create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_INPUT create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_INPUT_KEYBOARD create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_INPUT_MOUSE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_INTEGRITY create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_IOMMU_SUPPORT create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_IOSCHED_BFQ create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_IO_URING create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_IPC_NS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_IPMI_HANDLER create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_IPV6 create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_IP_NF_ARPTABLES create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_IP_NF_RAW create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_IP_NF_SECURITY create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_IP_SET create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_IP_VS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_IP_VS_IPV6 create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_ISO9660_FS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_IXGBE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_IXGBEVF create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_JUMP_LABEL create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_KALLSYMS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_KALLSYMS_ALL create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_KCMP create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_KERNFS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_KEXEC create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_KEXEC_CORE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_KEXEC_FILE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_KFENCE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_KGDB create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_KGDB_SERIAL_CONSOLE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_KGDB_TESTS_ON_BOOT create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_KPROBES create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_KPROBE_EVENTS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_KRETPROBES create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_LOCKD create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_LOCKD_V4 create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_LOCKUP_DETECTOR create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_LRU_GEN create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_MAGIC_SYSRQ create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_MD create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_MEMBARRIER create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_MEMCG create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_MEMORY_FAILURE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_MEMORY_HOTPLUG create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_MIGRATION create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_MISC_FILESYSTEMS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_MLX4_EN create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_MLX5_CORE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_MLX5_CORE_EN create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_MLXSW_CORE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_MMU create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_MODPROBE_PATH create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_MODULES create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_MODULE_SIG create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_MODULE_SRCVERSION_ALL create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_MODULE_UNLOAD create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_MODVERSIONS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_MPTCP create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_MQ_IOSCHED_DEADLINE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_MQ_IOSCHED_KYBER create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_MULTIUSER create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NAMESPACES create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NET create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NETDEVICES create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NETFILTER create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NETFILTER_ADVANCED create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NETFILTER_INGRESS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NETLINK_DIAG create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NETWORK_FILESYSTEMS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NET_ACT_GACT create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NET_ACT_POLICE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NET_CLS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NET_CLS_ACT create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NET_CORE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NET_FAILOVER create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NET_KEY create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NET_NS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NET_SCHED create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NET_SCH_FQ_CODEL create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NET_SCH_INGRESS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NET_VENDOR_BROADCOM create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NET_VENDOR_INTEL create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NET_VENDOR_MELLANOX create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NET_VENDOR_WANGXUN create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NFSD create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NFSD_V4 create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NFS_COMMON create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NFS_FS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NFS_FSCACHE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NFS_V3 create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NFS_V4 create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NFS_V4_1 create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NFS_V4_2 create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NF_CONNTRACK create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NF_NAT create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NF_TABLES create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NF_TABLES_INET create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NF_TABLES_IPV4 create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NF_TABLES_IPV6 create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NGBE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NLS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NLS_ASCII create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NLS_CODEPAGE_936 create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NLS_CODEPAGE_950 create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NLS_DEFAULT create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NLS_UTF8 create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NO_HZ create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NO_HZ_FULL create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NO_HZ_IDLE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NR_CPUS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NTFS_FS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NUMA create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NVME_CORE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NVME_FABRICS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NVME_RDMA create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_NVME_TCP create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_OVERLAY_FS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PACKET create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PAGE_IDLE_FLAG create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PANIC_ON_OOPS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PANIC_TIMEOUT create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PARAVIRT create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PARTITION_ADVANCED create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PCI create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PCIEPORTBUS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PCI_IOV create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PCI_MSI create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PCI_STUB create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PERF_EVENTS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PGTABLE_LEVELS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PID_NS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PKCS7_MESSAGE_PARSER create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PM create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_POSIX_TIMERS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPTION create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_BUILD create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_DYNAMIC create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_RCU create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_TRACER create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PRINTK create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PRINTK_INDEX create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PRINTK_TIME create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PROBE_EVENTS_BTF_ARGS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PROC_FS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PROC_KCORE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PROC_PAGE_MONITOR create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PROC_SYSCTL create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PROC_VMCORE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PSE_CONTROLLER create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PSTORE_BLK create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PSTORE_DEFAULT_KMSG_BYTES create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PTP_1588_CLOCK_MOCK create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PTP_1588_CLOCK_OCP create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PTP_1588_CLOCK_OPTIONAL create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PVPANIC create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PVPANIC_MMIO create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PVPANIC_PCI create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PWM_CLK create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_PWM_DWC create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_QUOTA create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_RANDOMIZE_BASE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_RANDSTRUCT_FULL create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_RANDSTRUCT_NONE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_RANDSTRUCT_PERFORMANCE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_RAS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_RELAY create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_RELOCATABLE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_RESCTRL_FS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_RESET_CONTROLLER create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_RPS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_RSEQ create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_RTC_CLASS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SCHEDSTATS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SCHED_AUTOGROUP create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SCHED_CORE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SCHED_DEBUG create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SCHED_MC create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SCSI create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SECCOMP create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SECCOMP_FILTER create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SECONDARY_TRUSTED_KEYRING create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SECURITYFS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_INFINIBAND create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_NETWORK create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_NETWORK_XFRM create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_PATH create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_SELINUX create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_SELINUX_BOOTPARAM create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SERIO create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SHMEM create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SIGNALFD create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SLUB create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SLUB_CPU_PARTIAL create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SLUB_DEBUG create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SLUB_DEBUG_ON create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SLUB_STATS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SMC create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SMP create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SOFTLOCKUP_DETECTOR create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SPARSEMEM create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SPARSEMEM_VMEMMAP create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SPI create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SQUASHFS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_STACKTRACE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_STM create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_STREAM_PARSER create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_STRICT_KERNEL_RWX create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_STRICT_MODULE_RWX create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SUNRPC create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SWAP create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SYN_COOKIES create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SYSCTL create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SYSFS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SYSTEM_TRUSTED_KEYRING create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SYSTEM_TRUSTED_KEYS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_SYSVIPC create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_TAP create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_TARGET_CORE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_TCG_TIS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_TCG_TPM create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_TCP_CONG_ADVANCED create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_TCP_CONG_BBR create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_TCP_CONG_CUBIC create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_THERMAL create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_TIMERFD create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_TLS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_TMPFS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_TRACEPOINTS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_TRANSPARENT_HUGEPAGE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_TTY create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_TUN create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_TXGBE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_UIO create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_UNIX create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_UNIX98_PTYS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_UPROBE_EVENTS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_USB create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_USB_SUPPORT create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_USERFAULTFD create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_UTS_NS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_VETH create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_VFAT_FS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_VFIO create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_VFIO_PCI create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_VGA_ARB create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_VHOST_NET create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_VHOST_VSOCK create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_BALLOON create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_CONSOLE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_FS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_MEM create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_MENU create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_MMIO create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_NET create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_PMEM create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_VIRTUALIZATION create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_VIRT_CPU_ACCOUNTING_GEN create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_VMAP_STACK create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_VM_EVENT_COUNTERS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_VSOCKETS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_VT create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_VT_CONSOLE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_WATCHDOG create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_X509_CERTIFICATE_PARSER create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_XDP_SOCKETS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_XFRM_USER create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_XFS_FS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_XPS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_XZ_DEC create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_ZONE_DEVICE create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_ZONE_DMA create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_ZONE_DMA32 create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_ZRAM create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_ZSTD_COMPRESS create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_ZSTD_DECOMPRESS create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_AMD_NUMA create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_ARCH_MMAP_RND_COMPAT_BITS create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_COMPAT_VDSO create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_CPU_SUP_AMD create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_CPU_SUP_HYGON create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_CPU_SUP_INTEL create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_CPU_SUP_ZHAOXIN create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SIMD create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SM3_AVX_X86_64 create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64 create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64 create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_DEFERRED_STRUCT_PAGE_INIT create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_EXT4_FS create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_EXTCON create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_HPET_TIMER create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_HZ create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_HZ_1000 create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_HZ_250 create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_IA32_FEAT_CTL create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_INSTRUCTION_DECODER create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_INTEL_IDLE create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_INTEL_IOMMU create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_INTEL_IOMMU_SVM create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_INTEL_TDX_GUEST create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_IRQ_REMAP create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_IRQ_TIME_ACCOUNTING create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_JBD2 create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_KPROBES_ON_FTRACE create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_KVM create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_KVM_AMD create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_KVM_GUEST create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_KVM_INTEL create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_LIVEPATCH create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_MTRR create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_OPTPROBES create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_PAGE_TABLE_ISOLATION create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_PCI_PF_STUB create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_PHYSICAL_START create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_PREEMPT_NONE create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_PREEMPT_VOLUNTARY create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_RANDOMIZE_MEMORY create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_RETHUNK create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_RETPOLINE create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_SEV_GUEST create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_SPECULATION_MITIGATIONS create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_UNWINDER_FRAME_POINTER create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_UNWINDER_ORC create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_VGA_CONSOLE create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_VIRTIO create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_VIRTIO_BLK create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_VIRTIO_PCI create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_X86 create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_X86_5LEVEL create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_X86_64 create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_X86_64_ACPI_NUMA create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_X86_64_SMP create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_X86_CMPXCHG64 create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_X86_CPU_RESCTRL create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_X86_IOPL_IOPERM create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_X86_IO_APIC create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_X86_LOCAL_APIC create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_X86_MCE create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_X86_MCE_AMD create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_X86_MCE_INTEL create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_X86_PAT create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_X86_PLATFORM_DEVICES create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_X86_SGX create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_X86_TSC create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_X86_UMIP create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_X86_VSYSCALL_EMULATION create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_X86_X2APIC create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_AGDI create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_BGRT create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_CPPC_CPUFREQ create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_DOCK create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_EC_DEBUGFS create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_REDUCED_HARDWARE_ONLY create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_TAD create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_AHCI_XGENE create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ALIBABA_UNCORE_DRW_PMU create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARCH_FORCE_MAX_ORDER create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ACPI_PARKING_PROTOCOL create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_AMU_EXTN create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_DEBUG_PRIORITY_MASKING create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_EPAN create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1024718 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1165522 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1286807 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1319367 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1418040 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1463225 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1508412 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1530923 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1542419 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1742098 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2051678 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2054223 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2067961 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2077057 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2441007 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2441009 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2457168 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2645198 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2658417 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2966298 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_3117295 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_819472 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_824069 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_826319 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_827319 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_832075 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_834220 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_843419 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_845719 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_PMEM create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_SME create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_SW_TTBR0_PAN create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_TAGGED_ADDR_ABI create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARMV8_DEPRECATED create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_DMC620_PMU create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_DSU_PMU create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_MHU_V2 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_PMUV3 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SCPI_CPUFREQ create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SCPI_POWER_DOMAIN create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SCPI_PROTOCOL create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SDE_INTERFACE create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SMC_WATCHDOG create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SP805_WATCHDOG create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_22375 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_23144 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_23154 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_27456 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_30115 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_TX2_ERRATUM_219 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CMA_SIZE_MBYTES create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CMDLINE create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CMDLINE_FORCE create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CMDLINE_FROM_BOOTLOADER create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CNIC create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CATU create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CPU_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CPU_DEBUG_DEFAULT_ON create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CTI create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CTI_INTEGRATION_REGS create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_LINKS_AND_SINKS create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_LINK_AND_SINK_TMC create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_SINK_ETBV10 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_SINK_TPIU create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_SOURCE_ETM4X create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_STM create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPUMASK_OFFSTACK create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_BIG_ENDIAN create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_FREQ_GOV_SCHEDUTIL create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_FREQ_THERMAL create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_THERMAL create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_BS create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_CE create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_CE_BLK create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_CE_CCM create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_NEON_BLK create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_CHACHA20_NEON create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_CRCT10DIF_ARM64_CE create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_CURVE25519 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_CCP create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_C3XXX create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_C3XXXVF create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_C62X create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_C62XVF create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_DH895xCC create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_DH895xCCVF create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_GHASH_ARM64_CE create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_POLY1305_NEON create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SHA1_ARM64_CE create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SHA256_ARM64 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SHA2_ARM64_CE create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM3_ARM64_CE create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM3_NEON create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_CE create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_CE_BLK create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_CE_CCM create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_CE_GCM create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_NEON_BLK create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEBUG_PERF_USE_VMALLOC create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEVICE_PRIVATE create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEVPORT create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEV_DAX create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEV_DAX_HMEM create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEV_DAX_KMEM create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEV_DAX_PMEM create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_DLM_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_AMDGPU_CIK create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_AMDGPU_USERPTR create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_AMD_ACP create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_DP_AUX_CHARDEV create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_PHYTIUM create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_VMWGFX create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_DWC_PCIE_PMU create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_DW_DMAC_PCI create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_EDAC_XGENE create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_EFI_ARMSTUB_DTB_LOADER create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_EFI_COCO_SECRET create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ETM4X_IMPDEF_FEATURE create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_EXTCON_GPIO create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_FB_SIMPLE create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_FB_SSD1307 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_FIRMWARE_EDID create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_FUJITSU_ERRATUM_010001 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_FUNCTION_PROFILER create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_FW_LOADER_USER_HELPER create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_GENERIC_ARCH_NUMA create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_GENERIC_IOREMAP create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_GENERIC_PHY create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_GFS2_FS create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_GPIO_GENERIC_PLATFORM create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_GPIO_HISI create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HISILICON_ERRATUM_161600802 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HISI_DMA create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HISI_THERMAL create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3_DCB create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3_ENET create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3_HCLGE create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3_HCLGEVF create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS_DSAF create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS_ENET create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS_MDIO create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HSA_AMD create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_I2C_SLAVE create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_I2C_SLAVE_EEPROM create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_I40E_DCB create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_INDIRECT_PIO create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_INPUT_MOUSEDEV create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_INTEL_IDMA64 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_INTEL_TH create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_IOMMU_DEFAULT_DMA_STRICT create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_IOMMU_DEFAULT_PASSTHROUGH create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_IOMMU_IO_PGTABLE_LPAE create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ISCSI_IBFT create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_KDB_DEFAULT_ENABLE create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_KEXEC_IMAGE_VERIFY_SIG create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_KEYBOARD_ATKBD create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_KUNPENG_HCCS create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_KUSER_HELPERS create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_LOG_BUF_SHIFT create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_MELLANOX_PLATFORM create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_MLX5_CORE_IPOIB create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_MOUSE_PS2 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_NET_VENDOR_HISILICON create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_PID_IN_CONTEXTIDR create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_POWERCAP create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_QCOM_FALKOR_ERRATUM_1003 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_QCOM_FALKOR_ERRATUM_1009 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_QCOM_FALKOR_ERRATUM_E1041 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_QCOM_QDF2400_ERRATUM_0065 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_MODULE_REGION_FULL create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_RELR create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_RODATA_FULL_DEFAULT_ENABLED create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_RTC_DRV_EFI create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_SATA_AHCI_SEATTLE create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_SDEI_WATCHDOG create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_SERIO_AMBAKMI create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_SOCIONEXT_SYNQUACER_PREITS create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_CADENCE create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_DESIGNWARE create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_DW_MMIO create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_HISI_KUNPENG create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_PL022 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_QUP create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_XLP create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_SQUASHFS_LZ4 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_STAGING create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_DUMMY create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_PROTO_BASIC create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_PROTO_SYS_T create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_SOURCE_CONSOLE create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_SOURCE_FTRACE create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_SOURCE_HEARTBEAT create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_TCG_INFINEON create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_TCP_CONG_CDG create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_GOV_BANG_BANG create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_MMIO create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_OF create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_WRITABLE_TRIPS create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_VFIO_PLATFORM create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_VFIO_PLATFORM_BASE create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_VIRTIO_PCI_LIB create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_VIRTIO_PCI_LIB_LEGACY create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_VIRT_DRIVERS create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_VMWARE_VMCI create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_VMXNET3 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_WDAT_WDT create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_XEN create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_YITIAN_CPER_RAWDATA create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_AC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_EINJ create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_ERST_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_GHES create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_MEMORY_FAILURE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_PCIEAER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_BATTERY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_BUTTON create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_CONFIGFS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_CONTAINER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_CUSTOM_METHOD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_DEBUGGER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_FAN create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_HED create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_HMAT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_HOTPLUG_MEMORY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_NFIT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_PCC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_PCI_SLOT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_PRMT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_SPCR_TABLE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_TABLE_UPGRADE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_THERMAL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_VIDEO create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_CORE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_MEMCPY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_PQ create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_RAID6_RECOV create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_RAID6_TEST create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_TX_DMA create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_XOR create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ATA create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_ACPI create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_BMDMA create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_FORCE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_GENERIC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_OVER_ETH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_PIIX create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_SFF create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_VERBOSE_ERROR create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ATM_DRIVERS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ATOMIC64_SELFTEST create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BALLOON_COMPACTION create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BCACHE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BCMGENET create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BFQ_CGROUP_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BINFMT_MISC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BLKDEV_UBLK_LEGACY_OPCODES create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEBUG_FS_ZONED create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_BSG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_BSGLIB create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_DM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_DRBD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_INTEGRITY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_INTEGRITY_T10 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_LOOP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_LOOP_MIN_COUNT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_MD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_NBD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_NULL_BLK create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_PCIESSD_MTIP32XX create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_RAM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_RAM_COUNT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_RAM_SIZE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_RBD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_SD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_SR create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_THROTTLING_LOW create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_UBLK create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_ZONED create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_WBT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BNX2 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BNX2X create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BNX2X_SRIOV create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT_DCB create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT_FLOWER_OFFLOAD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT_HWMON create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT_SRIOV create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BOOTPARAM_HARDLOCKUP_PANIC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BOOTTIME_TRACING create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BOOT_CONFIG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BOOT_PRINTK_DELAY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BPFILTER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BPF_JIT_ALWAYS_ON create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BPF_KPROBE_OVERRIDE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BPF_STREAM_PARSER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BRANCH_PROFILE_NONE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_802_3 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_AMONG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_ARP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_ARPREPLY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_BROUTE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_DNAT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_IP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_IP6 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_LIMIT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_LOG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_MARK create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_MARK_T create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_NFLOG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_PKTTYPE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_REDIRECT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_SNAT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_STP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_T_FILTER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_T_NAT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_VLAN create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_IGMP_SNOOPING create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_MRP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_NETFILTER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_NF_EBTABLES create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_VLAN_FILTERING create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BSD_PROCESS_ACCT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BSD_PROCESS_ACCT_V3 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BTRFS_FS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BUG_ON_DATA_CORRUPTION create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_BUILD_SALT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CACHEFILES_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CDROM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CDROM_PKTCDVD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CDROM_PKTCDVD_BUFFERS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CDROM_PKTCDVD_WCACHE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CEPH_FS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CEPH_FSCACHE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CEPH_FS_POSIX_ACL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CEPH_FS_SECURITY_LABEL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_FREEZER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_NET_CLASSID create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_NET_PRIO create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_WRITEBACK create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CHR_DEV_SCH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CHR_DEV_SG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CHR_DEV_ST create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CLS_U32_MARK create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CLS_U32_PERF create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CMA create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_ALIGNMENT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_AREAS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_DEBUGFS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_SIZE_SEL_MAX create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_SIZE_SEL_MBYTES create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_SIZE_SEL_MIN create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_SIZE_SEL_PERCENTAGE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_COMPAT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_COMPAT_32BIT_TIME create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_COMPAT_BRK create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_COMPILE_TEST create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CONNECTOR create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CONSOLE_LOGLEVEL_DEFAULT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CONSOLE_LOGLEVEL_QUIET create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_CONSERVATIVE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_ONDEMAND create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_PERFORMANCE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_POWERSAVE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_USERSPACE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_STAT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_IDLE_GOV_LADDER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_IDLE_GOV_MENU create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_IDLE_GOV_TEO create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRAMFS_BLOCKDEV create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRAMFS_MTD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CROSS_MEMORY_ATTACH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_842 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ACOMP2 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ADIANTUM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_AEGIS128 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_AES_TI create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ANSI_CPRNG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ANUBIS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ARC4 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_AUTHENC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_BLAKE2B create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_BLOWFISH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_BLOWFISH_COMMON create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CAMELLIA create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CAST5 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CAST6 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CAST_COMMON create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CBC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CCM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CFB create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CHACHA20 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CHACHA20POLY1305 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CMAC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CRC32 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CRC32C create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CRCT10DIF create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CRYPTD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CTR create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CTS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEFLATE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DES create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_VIRTIO create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG_CTR create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG_HASH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG_HMAC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG_MENU create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECB create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECDH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECDSA create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECHAINIV create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ESSIV create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_FCRYPT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_FIPS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_HASH_INFO create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_HMAC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_HW create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_JITTERENTROPY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_KEYWRAP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_KHAZAD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_KPP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_KPP2 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_LRW create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_LZ4 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_LZ4HC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_LZO create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MANAGER_DISABLE_TESTS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MANAGER_EXTRA_TESTS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MD4 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MD5 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MICHAEL_MIC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_NULL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_NULL2 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_OFB create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_PCBC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_PCRYPT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_POLY1305 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_RMD160 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_RNG_DEFAULT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SEED create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SEQIV create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SERPENT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SHA1 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SHA3 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SHA512 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_STATS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_STREEBOG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_TEA create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_TEST create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_TWOFISH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_TWOFISH_COMMON create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_AEAD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_HASH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_RNG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_RNG_CAVP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_SKCIPHER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_VMAC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_WP512 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_XCBC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_XTS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_XXHASH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ZSTD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CUSE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_ACPI create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_MEM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_MEM_RAW_COMMANDS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_PCI create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_PMEM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_PMU create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_PORT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_REGION create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_SUSPEND create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DAMON create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DAMON_DBGFS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DAMON_PADDR create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DAMON_VADDR create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DCB create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_FORCE_WEAK_PER_CPU create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_FS_ALLOW_ALL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_FS_ALLOW_NONE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_FS_DISALLOW_MOUNT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_INFO_BTF_MODULES create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_INFO_REDUCED create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_INFO_SPLIT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_KERNEL_DC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_KMEMLEAK create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_LIST create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_MEMORY_INIT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_NOTIFIERS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_PAGEALLOC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_PAGE_REF create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_PLIST create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_RODATA_TEST create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_SG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_WX create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_CODEL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_FQ create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_FQ_CODEL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_HUNG_TASK_TIMEOUT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_MMAP_MIN_ADDR create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_NET_SCH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_PFIFO_FAST create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_RENO create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_SECURITY_SELINUX create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_SFQ create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_TCP_CONG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEVMEM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEV_DAX_CXL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DEV_DAX_HMEM_DEVICES create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DLM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DMADEVICES_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DMATEST create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DMA_API_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DMA_CMA create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DMA_ENGINE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DMIID create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DMI_SYSFS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DM_CACHE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DM_CACHE_SMQ create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DM_CRYPT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DM_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DM_DELAY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DM_ERA create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DM_FLAKEY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DM_INTEGRITY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DM_LOG_USERSPACE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DM_LOG_WRITES create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DM_MIRROR create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DM_MULTIPATH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DM_MULTIPATH_QL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DM_MULTIPATH_ST create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DM_RAID create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DM_SNAPSHOT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DM_SWITCH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DM_THIN_PROVISIONING create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DM_UEVENT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DM_VERITY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DM_WRITECACHE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DM_ZERO create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DM_ZONED create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_AMDGPU create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_AMDGPU_SI create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_AMD_DC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_AST create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_BOCHS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_CIRRUS_QEMU create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_FBDEV_EMULATION create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_FBDEV_OVERALLOC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_I2C_CH7006 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_INSPUR create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_LOAD_EDID_FIRMWARE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_MGAG200 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_NOUVEAU create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_NOUVEAU_BACKLIGHT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_QXL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_RADEON create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_RADEON_USERPTR create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_UDL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_VIRTIO_GPU create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DUMMY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_DW_DMAC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_E100 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_E1000 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_E1000E create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_EDAC_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_EDAC_GHES create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_EDAC_LEGACY_SYSFS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_EFIVAR_FS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_CUSTOM_SSDT_OVERLAYS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_SOFT_RESERVE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_STUB create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_VARS_PSTORE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ENCRYPTED_KEYS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_ONDEMAND create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_POSIX_ACL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_SECURITY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_XATTR create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_ZIP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_ZIP_DEFLATE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_ZIP_LZMA create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_EVM_ADD_XATTRS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_EVM_ATTR_FSUUID create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_EVM_LOAD_X509 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_EVM_X509_PATH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_EXFAT_FS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_EXPORTFS_BLOCK_OPS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_EXT2_FS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_EXT4_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_EXT4_USE_FOR_EXT2 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_EXTRA_FIRMWARE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_FAILOVER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_FANOTIFY_ACCESS_PERMISSIONS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_FAT_DEFAULT_CODEPAGE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_FAT_DEFAULT_IOCHARSET create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_FAT_DEFAULT_UTF8 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_FAULT_INJECTION create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_FB_EFI create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_FB_LS2K500 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_FB_TILEBLITTING create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_FCOE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_FIX_EARLYCON_MEM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_FM10K create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_FORTIFY_SOURCE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_FRAMEBUFFER_CONSOLE_ROTATION create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_FRAME_WARN create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_FSCACHE_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_FSCACHE_STATS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_FTRACE_RECORD_RECURSION create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_FUSE_DAX create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_CTL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_FC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_LOGGING create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_MAX_SGE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_SAS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_SPI create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_FW_CACHE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_FW_CFG_SYSFS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_FW_CFG_SYSFS_CMDLINE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_GACT_PROB create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_GCOV_KERNEL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_GDB_SCRIPTS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_IRQ_DEBUGFS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_IRQ_INJECTION create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_IRQ_PROBE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_IRQ_SHOW create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_GENEVE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_GET_FREE_REGION create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_DS4520 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_FXL6408 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_GENERIC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_LATCH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_SIM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_VIRTIO create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_GP_PCI1XXXX create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_GUEST_PERF_EVENTS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_GUP_TEST create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_HARDENED_USERCOPY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_HEADERS_INSTALL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_HIBERNATE_CALLBACKS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_HIBERNATION create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_HIBERNATION_SNAPSHOT_DEV create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_HID create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_HID_SUPPORT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_HINIC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_HIST_TRIGGERS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_HOTPLUG_PCI_ACPI create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_HWLAT_TRACER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_HWPOISON_INJECT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_HW_RANDOM_TPM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_HYDCU_FIXUP_HEADER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_I2C_CHARDEV create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_I2C_MUX create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_I2C_SMBUS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_I6300ESB_WDT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IFB create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IGB_HWMON create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IGC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IKCONFIG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IKCONFIG_PROC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IKHEADERS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ILLEGAL_POINTER_VALUE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_BOOTPARAM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_BUILD_POLICY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_MODSIG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_REQUIRE_MODULE_SIGS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_REQUIRE_POLICY_SIGS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_SIGNED_INIT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_ARCH_POLICY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_BLACKLIST_KEYRING create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH_SHA1 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH_SHA256 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH_SHA512 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH_SM3 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_TEMPLATE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_LOAD_X509 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_LSM_RULES create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_MEASURE_PCR_IDX create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_NG_TEMPLATE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_READ_POLICY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_SIG_TEMPLATE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_WRITE_POLICY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_X509_PATH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_AH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_ESP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_ESPINTCP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_ESP_OFFLOAD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_IPCOMP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_TUNNEL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_XFRM_TUNNEL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INET_AH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INET_DIAG_DESTROY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INET_ESP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INET_ESPINTCP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INET_ESP_OFFLOAD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INET_IPCOMP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INET_RAW_DIAG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ADDR_TRANS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ERDMA create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_IPOIB create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_IPOIB_CM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_IPOIB_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ISER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ISERT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_MTHCA create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ON_DEMAND_PAGING create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_RTRS_CLIENT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_RTRS_SERVER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_SRP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_SRPT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_USER_ACCESS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_USER_MAD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INITRAMFS_SOURCE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INIT_ON_ALLOC_DEFAULT_ON create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INIT_ON_FREE_DEFAULT_ON create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INOTIFY_USER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INPUT_EVDEV create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_ASYMMETRIC_KEYS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_AUDIT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_PLATFORM_KEYRING create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_SIGNATURE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_TRUSTED_KEYRING create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_API create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_DEBUGFS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_DEFAULT_DMA_LAZY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_DMA create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_IOVA create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_IO_PGTABLE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IO_STRICT_DEVMEM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_FILTER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_IPTABLES create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MANGLE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_AH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_EUI64 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_FRAG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_HL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_IPV6HEADER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_MH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_OPTS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_RPFILTER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_RT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_SRH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_NAT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_RAW create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_SECURITY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_HL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_MASQUERADE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_NPT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_REJECT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_SYNPROXY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_DEVICE_INTERFACE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_DMI_DECODE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_PANIC_EVENT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_PANIC_STRING create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_PLAT_DATA create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_POWEROFF create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_SI create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_SSIF create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_WATCHDOG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_GRE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_ILA create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_MIP6 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_MROUTE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_MROUTE_MULTIPLE_TABLES create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_MULTIPLE_TABLES create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_NDISC_NODETYPE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_OPTIMISTIC_DAD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_PIMSM_V2 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_ROUTER_PREF create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_ROUTE_INFO create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_RPL_LWTUNNEL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SEG6_HMAC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SEG6_LWTUNNEL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SIT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SIT_6RD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SUBTREES create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_TUNNEL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_VTI create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IPVLAN create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IPVTAP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_ADVANCED_ROUTER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_DCCP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_FIB_TRIE_STATS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_MROUTE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_MROUTE_MULTIPLE_TABLES create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_MULTICAST create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_MULTIPLE_TABLES create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_ARPFILTER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_ARP_MANGLE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_FILTER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_IPTABLES create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MANGLE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MATCH_AH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MATCH_ECN create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MATCH_RPFILTER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MATCH_TTL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_NAT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_ECN create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_MASQUERADE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_NETMAP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_REDIRECT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_REJECT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_SYNPROXY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_TTL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_PIMSM_V1 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_PIMSM_V2 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_ROUTE_MULTIPATH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_ROUTE_VERBOSE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_BITMAP_IP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_BITMAP_IPMAC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_BITMAP_PORT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPMAC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPMARK create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPPORT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPPORTIP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPPORTNET create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_MAC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NET create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NETIFACE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NETNET create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NETPORT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NETPORTNET create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_LIST_SET create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_MAX create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_DH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_FO create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_FTP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_LBLC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_LBLCR create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_LC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_MH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_MH_TAB_INDEX create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_NFCT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_NQ create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_OVF create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_PE_SIP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_RR create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_SED create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_SH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_SH_TAB_BITS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_TAB_BITS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_WLC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_WRR create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ISCSI_BOOT_SYSFS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ISCSI_TARGET create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ISCSI_TCP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IXGBEVF_IPSEC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IXGBE_DCB create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IXGBE_HWMON create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_IXGBE_IPSEC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_JBD2_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_JOLIET create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_KASAN create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_KCOV create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_KDB_CONTINUE_CATASTROPHIC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_KDB_KEYBOARD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_KEXEC_SIG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_KEYS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_KEYS_REQUEST_CACHE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_KEY_DH_OPERATIONS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_KFENCE_DEFERRABLE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_KFENCE_NUM_OBJECTS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_KFENCE_SAMPLE_INTERVAL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_KFENCE_STRESS_TEST_FAULTS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_KGDB_HONOUR_BLOCKLIST create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_KGDB_KDB create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_KGDB_TESTS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_KPROBE_EVENTS_ON_NOTRACE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_KSM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_LDISC_AUTOLOAD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_LIBFC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_LIBFCOE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_LIST_HARDENED create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_LOAD_UEFI_KEYS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_LOCALVERSION create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_LOCALVERSION_AUTO create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_LOCKDEP_SUPPORT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_LOCK_EVENT_COUNTS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_LOG_CPU_MAX_BUF_SHIFT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_LOOPBACK_TARGET create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_LRU_GEN_ENABLED create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_LRU_GEN_STATS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_LSM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_LSM_MMAP_MIN_ADDR create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_LTO_NONE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_LWTUNNEL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_LWTUNNEL_BPF create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MACSEC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MACVLAN create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MACVTAP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MAGIC_SYSRQ_SERIAL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MAX_SKB_FRAGS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MD_AUTODETECT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MD_CLUSTER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MD_FAULTY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MD_LINEAR create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MD_MULTIPATH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MD_RAID0 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MD_RAID1 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MD_RAID10 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MD_RAID456 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MEGARAID_LEGACY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MEGARAID_NEWGEN create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MEGARAID_SAS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MEMORY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MEMORY_BALLOON create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MEMORY_HOTREMOVE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MESSAGE_LOGLEVEL_DEFAULT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MLX4_CORE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MLX4_CORE_GEN2 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MLX4_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MLX4_EN_DCB create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_BRIDGE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_CLS_ACT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_CORE_EN_DCB create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_EN_ARFS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_EN_RXNFC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_ESWITCH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_FPGA create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_INFINIBAND create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_MPFS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_SF create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_SW_STEERING create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_TC_CT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_TC_SAMPLE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MLXFW create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_CORE_HWMON create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_CORE_THERMAL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_I2C create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_MINIMAL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_PCI create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_SPECTRUM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_SPECTRUM_DCB create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_FORCE_LOAD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_FORCE_UNLOAD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_ALL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_FORCE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_HASH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_KEY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA1 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA224 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA256 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA384 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA512 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MPLS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MPLS_IPTUNNEL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MPLS_ROUTING create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MPTCP_IPV6 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MSDOS_FS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETCONSOLE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETCONSOLE_DYNAMIC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETDEVSIM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_CONNCOUNT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_EGRESS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_FAMILY_ARP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_FAMILY_BRIDGE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_ACCT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_GLUE_CT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_LOG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_OSF create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_QUEUE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_SYNPROXY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XTABLES create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_CONNMARK create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MARK create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_ADDRTYPE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_BPF create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CGROUP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CLUSTER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_COMMENT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNBYTES create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNLABEL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNLIMIT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNMARK create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNTRACK create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CPU create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_DCCP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_DEVGROUP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_DSCP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_ECN create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_ESP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_HASHLIMIT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_HELPER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_HL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_IPCOMP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_IPRANGE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_IPVS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_L2TP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_LENGTH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_LIMIT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_MAC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_MARK create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_MULTIPORT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_NFACCT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_OSF create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_OWNER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_PHYSDEV create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_PKTTYPE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_POLICY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_QUOTA create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_RATEEST create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_REALM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_RECENT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_SCTP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_SOCKET create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_STATE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_STATISTIC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_STRING create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_TCPMSS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_TIME create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_U32 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_NAT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_SET create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_AUDIT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CHECKSUM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CLASSIFY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CONNMARK create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CONNSECMARK create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_DSCP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_HL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_HMARK create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_IDLETIMER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_LED create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_LOG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_MARK create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_MASQUERADE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_NETMAP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_NFLOG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_NFQUEUE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_NOTRACK create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_RATEEST create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_REDIRECT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_SECMARK create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TCPMSS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TEE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TPROXY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TRACE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFS_STATS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETFS_SUPPORT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETLABEL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETWORK_PHY_TIMESTAMPING create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NETWORK_SECMARK create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_BPF create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_CONNMARK create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_CSUM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_CT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_CTINFO create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_GATE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_IFE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_IPT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_MIRRED create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_MPLS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_NAT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_PEDIT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_SAMPLE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_SIMP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_SKBEDIT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_SKBMOD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_TUNNEL_KEY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_VLAN create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_BASIC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_BPF create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_CGROUP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_FLOW create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_FLOWER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_FW create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_MATCHALL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_ROUTE4 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_U32 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_DROP_MONITOR create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_CMP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_IPSET create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_IPT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_META create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_NBYTE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_STACK create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_TEXT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_U32 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_FC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_IPGRE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_IPGRE_BROADCAST create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_IPGRE_DEMUX create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_IPIP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_KEY_MIGRATE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_L3_MASTER_DEV create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_MPLS_GSO create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_NSH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_PKTGEN create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_PTP_CLASSIFY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_CAKE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_CBS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_CHOKE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_CODEL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_DEFAULT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_DRR create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_ETF create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_ETS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_FQ create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_FQ_PIE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_GRED create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_HFSC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_HHF create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_HTB create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_MQPRIO create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_MULTIQ create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_NETEM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_PIE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_PLUG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_PRIO create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_QFQ create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_RED create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_SFB create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_SFQ create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_SKBPRIO create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_TAPRIO create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_TBF create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_TEQL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TC_SKB_EXT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_ACTIVEBACKUP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_BROADCAST create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_LOADBALANCE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_RANDOM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_ROUNDROBIN create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_VENDOR_HUAWEI create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_VENDOR_SOLARFLARE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_VRF create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_XGRESS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFIT_SECURITY_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_BLOCKLAYOUT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_FLEXFILELAYOUT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_PNFS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_SCSILAYOUT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_V3_ACL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_V4_2_INTER_SSC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_V4_SECURITY_LABEL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_ACL_SUPPORT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_DISABLE_UDP_SUPPORT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_SWAP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_USE_LEGACY_DNS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_V2 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_V3_ACL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_V4_1_MIGRATION create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_V4_2_READ_PLUS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_BRIDGE_META create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_BRIDGE_REJECT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_COMPAT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_CONNLIMIT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_CT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_DUP_IPV4 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_DUP_IPV6 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_DUP_NETDEV create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB_INET create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB_IPV4 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB_IPV6 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB_NETDEV create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FLOW_OFFLOAD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FWD_NETDEV create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_HASH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_LIMIT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_LOG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_MASQ create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_NAT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_NUMGEN create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_OSF create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_QUEUE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_QUOTA create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_REDIR create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_REJECT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_REJECT_INET create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_REJECT_IPV6 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_SOCKET create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_SYNPROXY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_TPROXY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_TUNNEL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_XFRM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_AMANDA create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_BRIDGE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_BROADCAST create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_EVENTS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_FTP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_H323 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_IRC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_LABELS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_MARK create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_NETBIOS_NS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_PPTP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_PROCFS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_SANE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_SECMARK create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_SIP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_SNMP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_TFTP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_TIMEOUT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_TIMESTAMP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_ZONES create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_NETLINK create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_NETLINK_HELPER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_NETLINK_TIMEOUT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_PROTO_DCCP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_PROTO_GRE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_PROTO_SCTP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_PROTO_UDPLITE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_DEFRAG_IPV6 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_DUP_IPV4 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_DUP_IPV6 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_DUP_NETDEV create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_FLOW_TABLE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_FLOW_TABLE_INET create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_LOG_ARP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_LOG_IPV4 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_LOG_IPV6 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_AMANDA create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_FTP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_H323 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_IRC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_MASQUERADE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_PPTP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_REDIRECT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_SIP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_SNMP_BASIC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_TFTP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_REJECT_IPV4 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_REJECT_IPV6 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_SOCKET_IPV4 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_SOCKET_IPV6 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_TABLES_ARP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_TABLES_BRIDGE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_TABLES_NETDEV create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NF_TPROXY_IPV6 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NLMON create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NODES_SHIFT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NOUVEAU_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NOUVEAU_DEBUG_DEFAULT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NTB create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NUMA_BALANCING create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NUMA_BALANCING_DEFAULT_ENABLED create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NVDIMM_KEYS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NVMEM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NVMEM_SYSFS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NVME_FC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_OPENVSWITCH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_OPENVSWITCH_GENEVE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_OPENVSWITCH_GRE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_OPENVSWITCH_VXLAN create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_OSNOISE_TRACER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_INDEX create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_METACOPY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_NFS_EXPORT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_REDIRECT_DIR create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_XINO_AUTO create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PACKET_DIAG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PAGE_EXTENSION create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PAGE_OWNER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PAGE_POISONING create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PAGE_REPORTING create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PARAVIRT_TIME_ACCOUNTING create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PCIEAER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PCIEASPM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PCIEASPM_DEFAULT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PCIE_DPC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PCIE_ECRC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PCIE_EDR create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PCI_PASID create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PCI_PRI create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PCI_QUIRKS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PERCPU_STATS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PERCPU_TEST create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PERSISTENT_KEYRINGS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PHYLIB create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PKCS7_TEST_KEY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PKCS8_PRIVATE_KEY_PARSER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PMBUS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PM_ADVANCED_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PM_AUTOSLEEP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PM_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PM_SLEEP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PM_SLEEP_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PM_SLEEP_SMP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PM_STD_PARTITION create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PM_TEST_SUSPEND create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PM_WAKELOCKS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_POSIX_MQUEUE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_POSIX_MQUEUE_SYSCTL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_POWER_RESET create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PPP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PPPOE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PROC_CHILDREN create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PROC_EVENTS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PROC_VMCORE_DEVICE_DUMP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PROFILE_ANNOTATED_BRANCHES create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PROFILING create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PSAMPLE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PSI create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PSI_DEFAULT_DISABLED create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PSTORE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PSTORE_COMPRESS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PSTORE_CONSOLE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PSTORE_RAM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_QFMT_V1 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_QFMT_V2 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_QUOTA_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_QUOTA_NETLINK_INTERFACE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RAID6_PQ create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RAID6_PQ_BENCHMARK create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RAID_ATTRS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RANDOMIZE_KSTACK_OFFSET create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_CPU_STALL_TIMEOUT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_EQS_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_EXPERT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_NOCB_CPU create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_REF_SCALE_TEST create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_SCALE_TEST create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_TORTURE_TEST create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_TRACE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RDMA_RXE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RDMA_SIW create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RD_BZIP2 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RD_GZIP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RD_LZ4 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RD_LZMA create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RD_LZO create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RD_XZ create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RD_ZSTD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_READABLE_ASM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_READ_ONLY_THP_FOR_FS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RICH_CONTAINER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RPCSEC_GSS_KRB5 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_HCTOSYS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_HCTOSYS_DEVICE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_INTF_DEV create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_INTF_PROC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_INTF_SYSFS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_NVMEM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_SYSTOHC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_SYSTOHC_DEVICE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RT_GROUP_SCHED create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RUNTIME_TESTING_MENU create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SATA_AHCI create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SATA_AHCI_PLATFORM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SATA_MOBILE_LPM_POLICY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SATA_PMP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_ACPU create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_CLUSTER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_INFO create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_SLI create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_SMT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_TRACER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_CONSTANTS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_DH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_DMA create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_ENCLOSURE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_FC_ATTRS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_ISCSI_ATTRS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_LOGGING create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_LOWLEVEL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPI3MR create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPT2SAS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPT2SAS_MAX_SGE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPT3SAS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPT3SAS_MAX_SGE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_PROC_FS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SAS_ATA create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SAS_ATTRS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SAS_HOST_SMP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SAS_LIBSAS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SCAN_ASYNC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SMARTPQI create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SPI_ATTRS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SRP_ATTRS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_UFSHCD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_VIRTIO create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SECRETMEM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SECTION_MISMATCH_WARN_ONLY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_DMESG_RESTRICT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SELINUX_AVC_STATS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SELINUX_DEVELOP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SMACK create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_ALTERA_PS2 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_ARC_PS2 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_LIBPS2 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_RAW create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_SERPORT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SFC_FALCON create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SHUFFLE_PAGE_ALLOCATOR create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SIGNED_PE_FILE_VERIFICATION create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SLAB_FREELIST_HARDENED create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SLAB_FREELIST_RANDOM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SLAB_MERGE_DEFAULT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SLIP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SMC_DIAG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SND create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SOFT_WATCHDOG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SPI_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_4K_DEVBLK_SIZE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_DECOMP_SINGLE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_EMBEDDED create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_FILE_CACHE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_FILE_DIRECT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_LZO create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_XATTR create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_XZ create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_ZLIB create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_ZSTD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_STACKPROTECTOR create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_STACKPROTECTOR_STRONG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_STACKTRACE_SUPPORT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_STACK_TRACER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_STATIC_KEYS_SELFTEST create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_STATIC_USERMODEHELPER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_STRICT_DEVMEM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_STRIP_ASM_SYMS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SUNRPC_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SUNRPC_XPRT_RDMA create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SUSPEND create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SUSPEND_FREEZER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SYMBOLIC_ERRNAME create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SYNC_FILE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SYNTH_EVENTS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SYSFB_SIMPLEFB create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SYSFS_SYSCALL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEMPORT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_BLACKLIST_HASH_LIST create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_BLACKLIST_KEYRING create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_DATA_VERIFICATION create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_EXTRA_CERTIFICATE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_REVOCATION_LIST create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SYSVIPC_SYSCTL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TASKSTATS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TASK_DELAY_ACCT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TASK_IO_ACCOUNTING create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TASK_XACCT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TCG_ATMEL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TCG_CRB create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TCG_TIS_CORE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TCM_FILEIO create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TCM_IBLOCK create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TCM_PSCSI create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TCM_USER2 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_BIC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_DCTCP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_HSTCP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_HTCP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_HYBLA create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_ILLINOIS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_LP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_NV create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_SCALABLE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_VEGAS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_VENO create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_WESTWOOD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_YEAH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_MD5SIG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TEE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TEST_BPF create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_GOV_FAIR_SHARE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_GOV_STEP_WISE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_GOV_USER_SPACE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_HWMON create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_NETLINK create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TIGON3 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TIGON3_HWMON create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TIMERLAT_TRACER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TIME_NS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TLS_DEVICE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TLS_TOE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TMPFS_INODE64 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TMPFS_POSIX_ACL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TMPFS_XATTR create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TRACER_SNAPSHOT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TRACING_MAP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TRANSPARENT_HUGEPAGE_MADVISE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TRUSTED_KEYS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_TRUSTED_KEYS_TPM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_UBSAN create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_UDF_FS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_UEVENT_HELPER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_UID16 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_AEC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_CIF create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_DMEM_GENIRQ create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_MF624 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_NETX create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_PCI_GENERIC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_PDRV_GENIRQ create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_PRUSS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_SERCOS3 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_UNIX_DIAG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_UPROBES create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_USB_NET_DRIVERS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_USB_PCI create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_USELIB create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_USER_NS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_VDPA create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_VFIO_CONTAINER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_VFIO_GROUP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_VFIO_NOIOMMU create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_VFIO_PCI_CORE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_VGA_ARB_MAX_GPUS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_VHOST_MENU create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_VHOST_SCSI create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_DMA_SHARED_BUFFER create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_INPUT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_PCI_LEGACY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_VSOCKETS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_VSOCKETS_COMMON create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_VIRT_FUSE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_VSOCKETS_DIAG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_VSOCKETS_LOOPBACK create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_VSOCKMON create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_VT_HW_CONSOLE_BINDING create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_VXLAN create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_WAN create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_WARN_ALL_UNSEEDED_RANDOM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_CORE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_NOWAYOUT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_OPEN_TIMEOUT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_PRETIMEOUT_GOV create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_SYSFS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_WIREGUARD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_WIREGUARD_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_WQ_WATCHDOG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_XDP_SOCKETS_DIAG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_XFRM_INTERFACE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_XFRM_MIGRATE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_XFRM_STATISTICS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_XFRM_SUB_POLICY create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_ONLINE_SCRUB create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_POSIX_ACL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_QUOTA create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_RT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_SUPPORT_V4 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_WARN create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_XOR_BLOCKS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_ARM create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_ARMTHUMB create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_BCJ create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_IA64 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_MICROLZMA create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_POWERPC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_SPARC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_TEST create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_X86 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_Z3FOLD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ZBUD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ZERO_CALL_USED_REGS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ZISOFS create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ZPOOL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_LZ4 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_LZ4HC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_LZO create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_LZORLE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_ZSTD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_MEMORY_TRACKING create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_MULTI_COMP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_WRITEBACK create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ZSMALLOC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ZSMALLOC_CHAIN_SIZE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ZSMALLOC_STAT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ZSTD_COMMON create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_DEFAULT_ON create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_ZPOOL_DEFAULT create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_BGRT create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_DOCK create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_EC_DEBUGFS create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_EXTLOG create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_PROCESSOR_AGGREGATOR create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_REV_OVERRIDE_POSSIBLE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_SBS create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_SLEEP create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_TAD create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_THERMAL_REL create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_WATCHDOG create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_ACQUIRE_WDT create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_ADVANTECH_WDT create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_AMDTEE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_HSMP create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_IOMMU create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_IOMMU_V2 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_MEM_ENCRYPT create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_PTDMA create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_ARCH_CPUIDLE_HALTPOLL create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_ARCH_MEMORY_PROBE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CALL_DEPTH_TRACKING create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CMA_SIZE_MBYTES create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CNIC create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CPUMASK_OFFSTACK create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_FREQ_GOV_SCHEDUTIL create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_IBPB_ENTRY create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_IBRS_ENTRY create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_IDLE_GOV_HALTPOLL create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_SRSO create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_UNRET_ENTRY create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRASH_HOTPLUG create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_AES_NI_INTEL create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_BLAKE2S_X86 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_BLOWFISH_X86_64 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAMELLIA_X86_64 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAST5_AVX_X86_64 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAST6_AVX_X86_64 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CHACHA20_X86_64 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CRC32C_INTEL create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CRC32_PCLMUL create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CRCT10DIF_PCLMUL create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CURVE25519 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CURVE25519_X86 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DES3_EDE_X86_64 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_CCP create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_CCP_CRYPTO create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_CCP_DD create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_CCP_DEBUGFS create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_HCT create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_IAA_CRYPTO create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_C3XXX create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_C3XXXVF create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_C62X create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_C62XVF create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_DH895xCC create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_DH895xCCVF create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_SP_CCP create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_SP_PSP create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_TSSE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_ZHAOXIN create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_ZHAOXIN_AES create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_ZHAOXIN_SHA create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_NHPOLY1305_AVX2 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_NHPOLY1305_SSE2 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_POLY1305_X86_64 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SERPENT_AVX2_X86_64 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SERPENT_AVX_X86_64 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SERPENT_SSE2_X86_64 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SHA1_SSSE3 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SHA256_SSSE3 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SHA512_SSSE3 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SM2_ZHAOXIN_GMI create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SM3_ZHAOXIN_GMI create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SM4_ZHAOXIN_GMI create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_TWOFISH_AVX_X86_64 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_TWOFISH_X86_64 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_TWOFISH_X86_64_3WAY create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CSV_GUEST create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DEBUG_BOOT_PARAMS create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DEBUG_PERF_USE_VMALLOC create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DEVICE_PRIVATE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DEVPORT create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX_HMEM create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX_KMEM create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX_PMEM create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DLM_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_AMDGPU_CIK create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_AMDGPU_USERPTR create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_AMD_ACP create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_DP_AUX_CHARDEV create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_GMA500 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_CAPTURE_ERROR create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_COMPRESS_ERROR create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_FENCE_TIMEOUT create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_FORCE_PROBE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_GVT create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_GVT_KVMGT create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_HEARTBEAT_INTERVAL create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_PREEMPT_TIMEOUT create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_STOP_TIMEOUT create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_TIMESLICE_DURATION create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_USERPTR create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_VMWGFX create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DWC_PCIE_PMU create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DW_DMAC_PCI create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DYNAMIC_PHYSICAL_MASK create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DYNAMIC_SIGFRAME create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_E1000E_HWTS create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_EARLY_PRINTK create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_EARLY_PRINTK_DBGP create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_EARLY_PRINTK_USB_XDBC create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_AMD64 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_DECODE_MCE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_E752X create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I10NM create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I3000 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I3200 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I5100 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I5400 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I7300 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I7CORE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I82975X create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_SBRIDGE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_SKX create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_EDD create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_COCO_SECRET create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_MIXED create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_RCI2_TABLE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_RUNTIME_MAP create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_SECRET create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_FB_HYPERV create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_FB_SIMPLE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_FB_SSD1307 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_FB_VESA create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_FCOE_FNIC create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_FIRMWARE_EDID create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_FIRMWARE_MEMMAP create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_FUNCTION_PROFILER create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_FW_LOADER_USER_HELPER create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_GART_IOMMU create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_ADC_THERMAL create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_CPU create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_ISA_DMA create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_PENDING_IRQ create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_PHY create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_GFS2_FS create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_GFS2_FS_LOCKING_DLM create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_GPIO_GENERIC_PLATFORM create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_HALTPOLL_CPUIDLE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_HANGCHECK_TIMER create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_HPET create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_HPET_MMAP create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_HSA_AMD create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_HW_RANDOM_AMD create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_HW_RANDOM_INTEL create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_HW_RANDOM_ZHAOXIN create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_HYGON_CSV create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_HYGON_GM create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_HYGON_PSP2CPU_CMD create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERVISOR_GUEST create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_BALLOON create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_IOMMU create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_KEYBOARD create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_STORAGE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_UTILS create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_VSOCKETS create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_I2C_SLAVE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_I40E_DCB create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_I8K create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_IA32_EMULATION create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_IGB_DCA create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INFINIBAND_OPA_VNIC create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INFINIBAND_RDMAVT create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INPUT_MOUSEDEV create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INT3406_THERMAL create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INT340X_THERMAL create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_HFI_THERMAL create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDMA64 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD_BUS create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD_PERFMON create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD_SVM create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IOATDMA create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IOMMU_PERF_EVENTS create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_MEI create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_MEI_ME create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_MEI_WDT create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PCH_THERMAL create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PMC_CORE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PMT_CLASS create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PMT_CRASHLOG create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PMT_TELEMETRY create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_POWERCLAMP create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RAPL create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RAPL_CORE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RAPL_TPMI create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RST create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_SPEED_SELECT_INTERFACE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_SPEED_SELECT_TPMI create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_ACPI create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_GTH create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_MSU create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_PCI create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_PTI create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_STH create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TPMI create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TURBO_MAX_3 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TXT create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_VSEC create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_IOMMU_DEFAULT_DMA_STRICT create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_IOMMU_DEFAULT_PASSTHROUGH create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_IOSF_MBI create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_IO_DELAY_0X80 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_IR_SERIAL_TRANSMITTER create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_IR_SHARP_DECODER create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_IR_XMP_DECODER create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_ISA_DMA_API create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_ISCSI_IBFT create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_ISCSI_IBFT_FIND create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_ITCO_VENDOR_SUPPORT create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_ITCO_WDT create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_IXGBE_DCA create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_KCSAN create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_KDB_DEFAULT_ENABLE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_BZIP2 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_GZIP create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_LZ4 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_LZMA create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_LZO create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_XZ create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_ZSTD create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_KEXEC_BZIMAGE_VERIFY_SIG create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_KEXEC_JUMP create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_KEXEC_SIG_FORCE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_KEYBOARD_ADC create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_KEYBOARD_ATKBD create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_KGDB_LOW_LEVEL_TRAP create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_KVM_AMD_SEV create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_LEGACY_VSYSCALL_NONE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_LOG_BUF_SHIFT create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_MAPPING_DIRTY_HELPERS create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_MAXSMP create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_MCORE2 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_MELLANOX_PLATFORM create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_MEM_SOFT_DIRTY create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_MICROCODE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_MITIGATION_RFDS create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_MLX5_CORE_IPOIB create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_MLXREG_HOTPLUG create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_MLX_PLATFORM create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_MODIFY_LDT_SYSCALL create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_MOUSE_PS2 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_MTRR_SANITIZER create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_NET_TULIP create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_NR_CPUS_DEFAULT create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_NR_CPUS_RANGE_BEGIN create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_NR_CPUS_RANGE_END create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_NUMA_EMU create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_NVRAM create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_NV_TCO create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_PARAVIRT_SPINLOCKS create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_PCI_HYPERV create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_PCI_HYPERV_INTERFACE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_PCI_MMCONFIG create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_PCSPKR_PLATFORM create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_AMD_BRS create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_AMD_POWER create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_AMD_UNCORE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_INTEL_CSTATE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_INTEL_RAPL create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_INTEL_UNCORE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_PHYSICAL_ALIGN create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_PINCTRL_KX7000 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_PINCTRL_ZHAOXIN create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_PM_TRACE_RTC create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_POWERCAP create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_PROC_THERMAL_MMIO_RAPL create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_PTE_MARKER_UFFD_WP create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_QAT_VFIO_PCI create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_RAS_CEC create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_RAS_CEC_DEBUG create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_RESCTRL_FS_PSEUDO_LOCK create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_RTC_DRV_CMOS create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_SCHED_MC_PRIO create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_SCHED_OMIT_FRAME_POINTER create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_SDEI_WATCHDOG create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_SENSORS_ZHAOXIN_CPUTEMP create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_SERIO_I8042 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_SGETMASK_SYSCALL create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_SLS create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_SP5100_TCO create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_SPI_CADENCE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_SPI_DESIGNWARE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_SQUASHFS_LZ4 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_STAGING create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_STATIC_CALL_SELFTEST create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_DUMMY create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_PROTO_BASIC create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_PROTO_SYS_T create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_SOURCE_CONSOLE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_SOURCE_FTRACE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_SOURCE_HEARTBEAT create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_STRICT_SIGALTSTACK_SIZE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_TCG_HYGON create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_TCG_INFINEON create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_TCG_NSC create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_TCM_HYGON create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_TCP_CONG_CDG create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_TDM_DEV_HYGON create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_TDM_KERNEL_GUARD create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_TDX_GUEST_DRIVER create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_TEST_LIVEPATCH create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_THERMAL_GOV_BANG_BANG create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_THERMAL_WRITABLE_TRIPS create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_TOUCHSCREEN_ADC create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_UCLAMP_TASK create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_UIO_HV_GENERIC create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_UNACCEPTED_MEMORY create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_VBOXGUEST create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_VFIO_MDEV create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_VFIO_PCI_IGD create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_VGA_SWITCHEROO create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_VIRTIO_PCI_LIB create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_VIRTIO_PCI_LIB_LEGACY create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_VIRT_DRIVERS create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_VMD create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_VMWARE_BALLOON create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_VMWARE_PVSCSI create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_VMWARE_VMCI create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_VMWARE_VMCI_VSOCKETS create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_VMXNET3 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_WDAT_WDT create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_16BIT create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_ACPI_CPUFREQ create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_AMD_FREQ_SENSITIVITY create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_AMD_PLATFORM_DEVICE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_AMD_PSTATE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_AMD_PSTATE_DEFAULT_MODE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_CHECK_BIOS_CORRUPTION create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_CPA_STATISTICS create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_CPUID create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_DEBUG_FPU create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_DECODER_SELFTEST create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_DIRECT_GBPAGES create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_ESPFIX64 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_EXTENDED_PLATFORM create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_GOLDFISH create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_LPSS create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_MID create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_PSTATE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_TSX_MODE_AUTO create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_TSX_MODE_OFF create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_TSX_MODE_ON create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_KERNEL_IBT create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_MCELOG_LEGACY create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_MCE_INJECT create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_MPPARSE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_MSR create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_NUMACHIP create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_P4_CLOCKMOD create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_PCC_CPUFREQ create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_PKG_TEMP_THERMAL create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_PM_TIMER create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_SGX_KVM create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_SUPPORTS_MEMORY_FAILURE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_UV create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_VERBOSE_BOOTUP create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_VSMP create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_XEN create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_XEN_NETDEV_FRONTEND create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_XEN_PVHVM create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_XFRM_USER_COMPAT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_A64FX_DIAG create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_APEI_SEA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_APMT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_CCA_REQUIRED create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_CPPC_CPUFREQ_FIE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_GENERIC_GSI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_GTDT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_IORT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_MCFG create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_MPAM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_PPTT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_AHCI_CEVA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ALTERA_STAPL create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_AL_FIC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMBA_PL08X create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMD8111_ETH create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMD_XGBE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMD_XGBE_DCB create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMIGA_PARTITION create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMPERE_ERRATUM_AC03_CPU_38 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_APDS9802ALS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_AQTION create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_ACTIONS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_ALPINE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_APPLE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BCM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BERLIN create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BINFMT_ELF_STATE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BITMAIN create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_SIG create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_EXYNOS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_DMA_PREP_COHERENT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_KEEPINITRD create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_RELR create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_SETUP_DMA_OPS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_SUBPAGE_FAULTS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_TICK_BROADCAST create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAVE_ELF_PROT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAVE_TRACE_MMIO_ACCESS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HISI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_INTEL_SOCFPGA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_K3 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_KEEMBAY create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_KEEP_MEMBLOCK create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_LG1K create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MA35 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MEDIATEK create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MESON create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MMAP_RND_BITS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MMAP_RND_BITS_MAX create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MMAP_RND_BITS_MIN create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MVEBU create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_NPCM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_NXP create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_QCOM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_REALTEK create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_RENESAS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_ROCKCHIP create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SEATTLE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SPARX5 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SPRD create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_STM32 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SUNXI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SUPPORTS_HUGETLBFS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SUPPORTS_KEXEC_IMAGE_VERIFY_SIG create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SUPPORTS_SHADOW_CALL_STACK create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SYNQUACER create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_THUNDER create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_THUNDER2 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_UNIPHIER create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_USES_PG_ARCH_X create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_USE_GNU_PROPERTY create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_VEXPRESS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_VISCONTI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_WANT_FRAME_POINTERS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_XGENE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_ZYNQMP create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_AS_HAS_MTE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_CONT_PMD_SHIFT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_CONT_PTE_SHIFT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_ERRATUM_858921 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_LSE_ATOMICS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_PAGE_SHIFT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_PA_BITS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_PA_BITS_48 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_RELOC_TEST create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_VA_BITS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_VA_BITS_48 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_CLEAN_CACHE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_REPEAT_TLBI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_AMBA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_ARCH_TIMER create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_ARCH_TIMER_EVTSTREAM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_CCI_PMU create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_FFA_TRANSPORT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_GIC_MAX_NR create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_MHU create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_PSCI_CHECKER create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_PSCI_CPUIDLE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_PSCI_FW create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_QCOM_CPUFREQ_HW create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SBSA_WATCHDOG create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SCMI_PROTOCOL create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMCCC_SOC_ID create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMMU_QCOM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMMU_QCOM_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMMU_V3_SVA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_TIMER_SP804 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_ARMV8_2 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_ARMV8_3 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_ARMV8_4 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_ARMV8_5 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_CFI_NEGATE_RA_STATE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_LDAPR create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_LSE_ATOMICS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_SHA3 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_AT803X_PHY create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ATL2 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_AUDIT_ARCH_COMPAT_GENERIC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_AUDIT_COMPAT_GENERIC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_AUDIT_GENERIC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_BACKLIGHT_GPIO create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_BACKLIGHT_LED create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_BACKLIGHT_PWM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_BCM_SBA_RAID create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_BLK_CGROUP_IOLATENCY create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_BLK_DEV_PMEM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_BRCMSTB_GISB_ARB create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_BT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CAVIUM_CPT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAS_BRANCH_PROT_PAC_RET create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAS_SIGN_RETURN_ADDRESS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAVE_SHADOW_CALL_STACK create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAVE_STACKPROTECTOR_SYSREG create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CDX_BUS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_BQ24190 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_DETECTOR_MAX14656 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_MANAGER create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_RT9467 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_RT9471 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_SMB347 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_UCS1002 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHROMEOS_ACPI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHROMEOS_PRIVACY_SCREEN create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHROMEOS_TBMC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHROME_PLATFORMS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLKSRC_MMIO create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLK_ICST create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLK_SP810 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLK_VEXPRESS_OSC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLONE_BACKWARDS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_AXI_CLKGEN create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_CDCE925 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_FIXED_MMIO create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3516CV300 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3519 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3559A create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3660 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3670 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3798CV200 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI6220 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_QCOM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_RS9_PCIE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_SCPI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_SI514 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_SI521XX create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_SI570 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_VC3 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_VC5 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_VC7 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_XGENE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_XLNX_CLKWZRD create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_RESET_HI3660 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_RESET_HI6220 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMPAT_ALIGNMENT_FIXUPS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CORESIGHT_DUMMY create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CORESIGHT_TPDA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CORESIGHT_TPDM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CORESIGHT_TRBE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CPUFREQ_DT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CPUFREQ_DT_PLATDEV create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CPU_PM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRC64 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRC64_ROCKSOFT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CROS_EC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CROS_HPS_I2C create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CROS_KBD_LED_BACKLIGHT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_CRC64_ROCKSOFT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_CAVIUM_ZIP create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_CCREE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_CPT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_HPRE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_QM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_SEC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_SEC2 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_TRNG create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_ZIP create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_OCTEONTX_CPT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_QAT_4XXX create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_QCE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_QCOM_RNG create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_LIB_POLY1305_RSIZE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_NHPOLY1305_NEON create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_POLYVAL_ARM64_CE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_SHA3_ARM64 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_SHA512_ARM64 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_SHA512_ARM64_CE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DEBUG_EFI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_DECLARE_COHERENT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_DIRECT_REMAP create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_NONCOHERENT_MMAP create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_OF create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_RESTRICTED_POOL create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ANALOGIX_ANX6345 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ANALOGIX_ANX7625 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ARCPGU create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_CDNS_DSI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_CDNS_MHDP8546 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_CHIPONE_ICN6211 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_CHRONTEL_CH7033 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_DISPLAY_CONNECTOR create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_DP_CEC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_HDLCD create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_HISI_HIBMC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_HISI_KIRIN create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_I2C_ADV7511 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_I2C_NXP_TDA998X create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_I2C_SIL164 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ITE_IT6505 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ITE_IT66121 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_KOMEDA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LIMA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LOGICVC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LONTIUM_LT8912B create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LONTIUM_LT9211 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LONTIUM_LT9611 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LONTIUM_LT9611UXC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LVDS_CODEC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_MALI_DISPLAY create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_MSM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_NWL_MIPI_DSI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_NXP_PTN3460 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_ABT_Y030XX067A create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_ARM_VERSATILE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_EDP create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_ILITEK_IL9322 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_ILITEK_ILI9341 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_INNOLUX_EJ030NA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_LG_LB035Q02 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_LG_LG4573 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_LVDS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_NEC_NL8048HL11 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_NEWVISION_NV3052C create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_NOVATEK_NT39016 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_DB7430 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_LD9040 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6D27A1 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SEIKO_43WVF1G create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SHARP_LS037V7DW01 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SIMPLE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SITRONIX_ST7789V create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SONY_ACX565AKM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_TPO_TD028TTEC1 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_TPO_TD043MTEA1 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_TPO_TPG110 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANFROST create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PARADE_PS8622 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PARADE_PS8640 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PL111 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SAMSUNG_DSIM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SII902X create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SII9234 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SIL_SII8620 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SIMPLE_BRIDGE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_THINE_THC63LVD1024 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TIDSS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_DLPC3433 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_SN65DSI83 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_SN65DSI86 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_TFP410 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_TPD12S015 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358762 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358764 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358767 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358768 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358775 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DTC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DW_AXI_DMAC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DW_DMAC_CORE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_EDAC_DMC520 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_EDAC_THUNDERX create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_EFI_GENERIC_STUB create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_EFI_PARAMS_FROM_FDT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_EFI_ZBOOT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_FSA9480 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_MAX3355 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_PTN5150 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_QCOM_SPMI_MISC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_RT8973A create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_SM5502 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_USBC_TUSB320 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_USB_GPIO create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_FB_ARMCLCD create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_FB_BACKLIGHT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_FB_SM750 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_FB_TFT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_FIELDBUS_DEV create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_FIREWIRE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_FRAME_POINTER create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSL_EDMA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSL_ERRATUM_A008585 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSL_QDMA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSL_RCPM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_FS_MBCACHE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_FUJITSU_ES create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_FUNCTION_ALIGNMENT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_FUNCTION_ALIGNMENT_8B create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_ARCH_TOPOLOGY create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_CSUM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_HWEIGHT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_IDLE_POLL_SETUP create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_IRQ_IPI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_IRQ_SHOW_LEVEL create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_SCHED_CLOCK create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_74X164 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_74XX_MMIO create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_ADNP create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_ALTERA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_CADENCE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_DWAPB create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_FTGPIO010 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_GRGPIO create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_GW_PLD create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_HLWD create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_LOGICVC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_PL061 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_SIFIVE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_SYSCON create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_THUNDERX create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_WATCHDOG create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_XGENE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_XGENE_SB create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_XILINX create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_XLP create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARCH_BITREVERSE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARCH_COMPILER_H create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARCH_KASAN_HW_TAGS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARCH_KASAN_SW_TAGS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARM_SMCCC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARM_SMCCC_DISCOVERY create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_PREEMPT_DYNAMIC_KEY create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HI3660_MBOX create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HI6220_MBOX create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_ALPS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_ASUS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_CMEDIA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_SENSOR_CUSTOM_SENSOR create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_SENSOR_HUB create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HIP04_ETH create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISILICON_ERRATUM_161010101 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISILICON_IRQ_MBIGEN create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISILICON_LPC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_ACC_VFIO_PCI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_FEMAC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_HIKEY_USB create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_PCIE_PMU create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_PMU create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_PTT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HIX5HD2_GMAC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HNS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HNS3_PMU create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HOTPLUG_PCI_SHPC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HP_ILO create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HP_WATCHDOG create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HVC_DCC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HWSPINLOCK_QCOM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_PERF_EVENTS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_ARM_SMCCC_TRNG create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_CAVIUM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_CCTRNG create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_CN10K create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_HISI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_HISTB create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_VIRTIO create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_XGENE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ALGOPCF create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_AMD756 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_AMD8111 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ARB_GPIO_CHALLENGE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_CADENCE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_DEMUX_PINCTRL create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_GPIO create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_GPIO_FAULT_INJECTOR create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HELPER_AUTO create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HID_OF_ELAN create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HID_OF_GOODIX create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HISI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HIX5HD2 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_I801 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ISCH create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MLXCPLD create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_GPIO create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_GPMUX create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_PCA9541 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_PCA954x create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_PINCTRL create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_NOMADIK create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_PIIX4 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_QCOM_CCI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_QUP create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_RK3X create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_SCMI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_SIS96X create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_SLAVE_TESTUNIT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_THUNDERX create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_VERSATILE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_VIA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_VIAPRO create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_XGENE_SLIMPRO create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_XLP9XX create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ZHAOXIN create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ZHAOXIN_SMBUS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_IEEE802154_FAKELB create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_IIO create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_INFINIBAND_HNS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_INFINIBAND_HNS_HIP08 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_INPUT_JOYDEV create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_INPUT_MISC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_INPUT_TABLET create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_INPUT_TOUCHSCREEN create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_IOMMUFD create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_IOMMU_IO_PGTABLE_ARMV7S create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_IOMMU_IO_PGTABLE_DART create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_IPMB_DEVICE_INTERFACE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_IPMI_IPMB create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_IRQCHIP create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_IRQ_BYPASS_MANAGER create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ISDN create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ISL29003 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ISL29020 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_K3_DMA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_KARMA_PARTITION create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_KERNEL_MODE_NEON create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_BCM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_CAP11XX create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_GPIO create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_OMAP4 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_PINEPHONE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_KS7010 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_AAT1290 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_AN30259A create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_AS3645A create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_AW2013 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_BCM6328 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_BCM6358 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_CLASS_FLASH create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_CR0014114 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_EL15203000 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_IS31FL32XX create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_KTD2692 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LM3601X create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LM3692X create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LM3697 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LP55XX_COMMON create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LP8860 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LT3593 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_REGULATOR create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_RT4505 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_RT8515 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_SGM3140 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_SPI_BYTE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_SYSCON create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_TRIGGER_AUDIO create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_TRIGGER_DISK create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LIBFDT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LIBNVDIMM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LINEAR_RANGES create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LITEX_LITEETH create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LITEX_SOC_CONTROLLER create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LPC_ICH create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LPC_SCH create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_LTE_GDM724X create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MAC_PARTITION create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MAILBOX_TEST create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MARVELL_CN10K_DDR_PMU create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MARVELL_CN10K_TAD_PMU create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MARVELL_GTI_WDT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_BUS_MUX_GPIO create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_BUS_MUX_MMIOREG create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_BUS_MUX_MULTIPLEXER create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_GPIO create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_HISI_FEMAC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_IPQ4019 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_IPQ8064 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_MSCC_MIIM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_OCTEON create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_XGENE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MEDIA_CEC_SUPPORT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MEDIA_SUPPORT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ACT8945A create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_AS3722 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ATMEL_FLEXCOM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ATMEL_HLCDC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_CORE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_CPCAP create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_GATEWORKS_GSC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_HI6421_PMIC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_HI655X_PMIC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_LOCHNAGAR create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX5970 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX77620 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX77650 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX77686 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX77714 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_NTXEC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_QCOM_PM8008 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_QCOM_RPM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RK8XX_I2C create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RK8XX_SPI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RN5T618 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ROHM_BD71828 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ROHM_BD718XX create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ROHM_BD957XMUF create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RSMU_I2C create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RSMU_SPI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_SEC_CORE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_SM501 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_STMFX create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_STMPE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_STPMIC1 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_SYSCON create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TC3589X create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TI_LP87565 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TPS65217 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TPS65218 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TPS65219 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_VEXPRESS_SYSREG create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_VIPERBOARD create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_VX855 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MICREL_KS8995MA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MINIX_SUBPARTITION create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MISC_RTSX_PCI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MISC_RTSX_USB create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MLXBF_GIGE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_ARMMMCI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_BLUEFIELD create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_EXYNOS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_HI3798CV200 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_K3 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_PCI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_PLTFM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_MTK create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_CADENCE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_MILBEAUT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_MSM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_OF_ARASAN create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_OF_AT91 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_OF_DWCMSHC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_STM32_SDMMC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_TOSHIBA_PCI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_APPLETOUCH create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_BCM5974 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_CYAPA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_ELAN_I2C_SMBUS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_SERIAL create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_VSXXXAA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOXTET create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_AFS_PARTS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_ADV_OPTIONS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_AMDSTD create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_INTELEXT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_STAA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_UTIL create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_GEN_PROBE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_OF_PARTS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_PHYSMAP create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_PHYSMAP_COMPAT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_PHYSMAP_OF create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MV_XOR_V2 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MYRI10GE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ND_BTT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ND_PFN create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_AMD create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_BROCADE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_CISCO create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_DEC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_EMULEX create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_QUALCOMM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_XGENE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_XGENE_V2 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_NOZOMI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVHE_EL2_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVIDIA_CARMEL_CNP_ERRATUM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVMEM_QCOM_QFPROM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVMEM_QCOM_SEC_QFPROM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVMEM_REBOOT_MODE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVMEM_U_BOOT_ENV create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_ADDRESS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_DYNAMIC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_EARLY_FLATTREE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_FLATTREE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_GPIO create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_IOMMU create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_IRQ create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_KOBJ create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_MDIO create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_NUMA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_OVERLAY create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_PMEM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_RESERVED_MEM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_RESOLVE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_UNITTEST create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_OPEN_DICE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_OPTEE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_OSF_PARTITION create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PARPORT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PARTITION_PERCPU create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PATA_OF_PLATFORM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_AL create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_ALTERA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_CADENCE_PLAT_HOST create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_DW create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_DW_HOST create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_HISI_ERR create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_HISI_STB create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_KIRIN create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_MICROCHIP_HOST create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_QCOM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_XILINX create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_DOMAINS_GENERIC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_DYNAMIC_OF_NODES create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_ECAM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_FTPCI100 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_HISI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_HOST_COMMON create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_HOST_THUNDER_ECAM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_HOST_THUNDER_PEM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_J721E_HOST create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_SYSCALL create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_XGENE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_XGENE_MSI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCNET32 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PDS_CORE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PERF_USE_VMALLOC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHYLIB_LEDS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_DPHY create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_DPHY_RX create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_SALVO create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_SIERRA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_TORRENT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HI3660_USB create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HI3670_PCIE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HI3670_USB create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HI6220_USB create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HISI_INNO_USB2 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HISTB_COMBPHY create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_LAN966X_SERDES create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_MAPPHONE_MDM6600 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_OCELOT_SERDES create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_APQ8064_SATA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_EDP create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_EUSB2_REPEATER create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_IPQ4019_USB create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_IPQ806X_SATA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_IPQ806X_USB create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_M31_USB create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_PCIE2 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_QMP create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_QUSB2 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_SGMII_ETH create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_SNPS_EUSB2 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_HS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_HSIC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_HS_28NM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_SS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_TUSB1210 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_XGENE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PI433 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ5018 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ5332 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ6018 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ8074 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ9574 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_LPASS_LPI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MDM9607 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MICROCHIP_SGPIO create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8916 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8953 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8976 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8994 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8996 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8998 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_OCELOT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QCM2290 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QCOM_SSBI_PMIC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QCS404 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QDF2XXX create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QDU1000 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SA8775P create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SC7180 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SC7280 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SC8180X create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SC8280XP create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SDM660 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SDM670 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SDM845 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SDX75 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SINGLE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM6115 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM6125 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM6350 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM6375 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM7150 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8150 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8250 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8350 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8450 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8550 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_STMFX create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PL320_MBOX create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PL330_DMA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PLATFORM_MHU create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PMIC_OPREGION create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PM_GENERIC_DOMAINS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PM_GENERIC_DOMAINS_OF create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PM_GENERIC_DOMAINS_SLEEP create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PNP_DEBUG_MESSAGES create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_BRCMSTB create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_GPIO create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_GPIO_RESTART create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_HISI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_LTC2952 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_MSM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_REGULATOR create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_RESTART create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_SYSCON create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_SYSCON_POWEROFF create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_VEXPRESS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_XGENE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PTP_1588_CLOCK_KVM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWM_FSL_FTM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWM_HIBVT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWRSEQ_EMMC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWRSEQ_SIMPLE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCA7000_SPI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_AOSS_QMP create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_APCS_IPC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_BAM_DMA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_COMMAND_DB create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_CPR create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_EBI2 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_EMAC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_GENI_SE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_GSBI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_HIDMA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_HIDMA_MGMT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_IOMMU create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_IPCC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_IRQ_COMBINER create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_KRYO_L2_ACCESSORS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_L2_PMU create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_L3_PMU create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_LLCC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_OCMEM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_PDC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_RMTFS_MEM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_RPMH create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_SMEM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_WDT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_QLGE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_QUICC_ENGINE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RC_CORE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGMAP_MMIO create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_88PG86X create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_ACT8865 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_AD5398 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_AW37503 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_DA9121 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_DA9210 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_DA9211 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_FAN53555 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_FAN53880 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_FIXED_VOLTAGE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_GPIO create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_ISL6271A create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_ISL9305 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LP3971 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LP3972 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LP872X create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LP8755 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LTC3589 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LTC3676 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX1586 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX20086 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX20411 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX77826 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX77857 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8649 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8660 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8893 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8952 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8973 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MCP16502 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MP5416 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MP8859 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MP886X create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MPQ7920 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MT6311 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PCA9450 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PF8X00 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PFUZE100 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PV88060 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PV88080 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PV88090 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PWM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_QCOM_REFGEN create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RAA215300 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT4801 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT4803 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT5190A create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT5739 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT5759 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT6160 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT6190 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT6245 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RTMV20 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RTQ2134 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RTQ2208 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RTQ6752 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_SLG51000 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_SY8106A create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_SY8824X create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_SY8827N create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS51632 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS62360 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS6286X create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS6287X create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS65023 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS6507X create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS65132 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS6524X create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_USERSPACE_CONSUMER create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_VCTRL create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_VEXPRESS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_VIRTUAL_CONSUMER create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_VQMMC_IPQ4019 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESET_HISI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESET_QCOM_AOSS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESET_QCOM_PDC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RFKILL_GPIO create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RMI4_F34 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RMI4_SPI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RMNET create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ROCKCHIP_ERRATUM_3588001 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_ABB5ZES3 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_ABX80X create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_CADENCE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1305 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1343 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1347 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1374_WDT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1390 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1685 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1685_FAMILY create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1689 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS17285 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS17485 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS17885 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_HYM8563 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_ISL12026 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_M41T93 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_M41T94 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_MAX6902 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_MCP795 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_NCT3018Y create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PCF2123 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PCF2127 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PCF85063 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PL030 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PL031 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_R7301 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_R9701 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_RS5C348 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_RX4581 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_RX8010 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_XGENE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_ZYNQMP create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTS5208 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SATA_ZHAOXIN create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCHED_THERMAL_PRESSURE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_AACRAID create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_BNX2X_FCOE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_BNX2_ISCSI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_HISI_SAS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_HISI_SAS_PCI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_IPR create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_IPR_DUMP create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_IPR_TRACE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ACPI_POWER create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_AD7314 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_AD7414 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_AD7418 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADC128D818 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADCXX create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1021 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1025 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1026 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1029 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1031 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1275 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM9240 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADS7828 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADS7871 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7410 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7411 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7462 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7470 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7475 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_AMC6821 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_APDS990X create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ARM_SCPI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ASC7621 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ATXP1 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_BH1770 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_DME1737 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_DS1621 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_DS620 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_EMC1403 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_EMC6W201 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_F71805F create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_F71882FG create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_F75375S create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_G760A create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_G762 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_GL518SM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_GL520SM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_GPIO_FAN create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_I5K_AMB create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_IBMAEM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_IBMPEX create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_INA209 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_INA2XX create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_IT87 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_JC42 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LINEAGE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LIS3_I2C create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM25066 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM63 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM70 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM73 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM75 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM77 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM78 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM80 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM83 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM85 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM87 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM90 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM92 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM93 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM95234 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM95241 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM95245 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC2945 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC2978 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC3815 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4151 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4215 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4222 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4245 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4260 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4261 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX1111 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX16064 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX16065 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX1619 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX1668 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX197 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX20751 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX31790 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX34440 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX6639 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX6642 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX6650 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX6697 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX8688 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MCP3021 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_NCT6683 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_NCT6775 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_NCT7802 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_NCT7904 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PC87360 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PC87427 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PCF8591 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PMBUS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_POWR1220 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PWM_FAN create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SCH5627 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SCH5636 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SHT15 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SHT21 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SHTC1 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SIS5595 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SMSC47B397 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SMSC47M1 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SMSC47M192 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TC74 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_THMC50 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TMP102 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TMP103 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TMP401 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TMP421 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TPS40422 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TSL2550 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_UCD9000 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_UCD9200 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_VEXPRESS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_VIA686A create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_VT1211 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_VT8231 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83627EHF create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83627HF create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83781D create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83791D create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83792D create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83793 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83795 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83L785TS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83L786NG create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_XGENE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ZL6100 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_8250_16550A_VARIANTS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_8250_FSL create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_8250_RT288X create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_AMBA_PL010 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_AMBA_PL011 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_AMBA_PL011_CONSOLE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_ARC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_CONEXANT_DIGICOLOR create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_EARLYCON_SEMIHOST create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_JSM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_MSM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_OF_PLATFORM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_SIFIVE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_XILINX_PS_UART create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIO_APBPS2 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SFC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SGI_PARTITION create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SG_SPLIT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SHADOW_CALL_STACK create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_BRCMSTB create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_BUS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOLARIS_X86_PARTITION create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_CADENCE_QUADSPI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_DW_DMA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_DW_PCI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_FSL_SPI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_HISI_SFC_V3XX create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_QCOM_QSPI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_THUNDERX create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SSIF_IPMI_BMC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_STACKPROTECTOR_PER_TASK create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_STAGING_BOARD create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_STAGING_MEDIA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_STUB_CLK_HI3660 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SUN_PARTITION create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SYSCON_REBOOT_MODE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_I2C_ATMEL create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_I2C_INFINEON create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_I2C_NUVOTON create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_SPI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_SPI_CR50 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_ST33ZP24_I2C create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDERX2_PMU create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDER_NIC_BGX create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDER_NIC_PF create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDER_NIC_RGX create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDER_NIC_VF create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TIFM_7XX1 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TIMER_ACPI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TIMER_OF create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TIMER_PROBE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TRACE_MMIO_ACCESS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TRANS_TABLE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TYPEC_FUSB302 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TYPEC_QCOM_PMIC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_UEFI_CPER_ARM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ULTRASOC_SMB create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_UNIXWARE_DISKLABEL create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_CHAOSKEY create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_EHCI_HCD_PLATFORM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_NET_SR9700 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_ONBOARD_HUB create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_QCOM_EUD create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_SERIAL_CONSOLE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_SERIAL_SIMPLE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_SPEEDTOUCH create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_UHCI_HCD create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_ULPI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_ULPI_BUS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_XHCI_DBGCAP create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_XHCI_HISTB create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_XHCI_PLATFORM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_VCPU_STALL_DETECTOR create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_VEXPRESS_CONFIG create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_VFIO_AMBA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_VFIO_PLATFORM_AMDXGBE_RESET create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_VME_BUS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_VT6655 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_WLAN create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_XGENE_DMA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_XGENE_PMU create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_XGENE_SLIMPRO_MBOX create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_XILINX_INTC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_XILINX_WINDOW_WATCHDOG create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_XILINX_ZYNQMP_DMA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_XILINX_ZYNQMP_DPDMA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_XIL_AXIS_FIFO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_6LOWPAN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_6LOWPAN_DEBUGFS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_6LOWPAN_NHC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_8139CP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_8139TOO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_8139TOO_8129 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_8139TOO_PIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_8139TOO_TUNE_TWISTER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_8139_OLD_RX_RESET create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ACCESSIBILITY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ACORN_PARTITION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_CPPC_LIB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_FFH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_FPDT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_HOTPLUG_CPU create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_I2C_OPREGION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_MDIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_PFRUT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_PROCESSOR_IDLE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_TABLE_LIB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_AD525X_DPOT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ADFS_FS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ADIN1100_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ADIN1110 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ADIN_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_AFFS_FS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_AFS_FS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_AF_KCM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_AF_RXRPC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_AF_UNIX_OOB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_AHCI_DWC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_AIX_PARTITION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ALIM7101_WDT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ALTERA_MBOX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ALTERA_MSGDMA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ALTERA_TSE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ALX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_AMD_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_AMT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ANDROID_BINDER_IPC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ANON_VMA_NAME create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_APERTURE_HELPERS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_APPLE_MFI_FASTCHARGE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_APPLICOM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_AQUANTIA_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_DMA_ADDR_T_64BIT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_THP_MIGRATION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_CACHE_LINE_SIZE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_COPY_MC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_CPU_RESCTRL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_CURRENT_STACK_POINTER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_DEBUG_VIRTUAL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_DEBUG_WX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_ELF_RANDOMIZE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_FAST_MULTIPLIER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_FORTIFY_SOURCE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_GCOV_PROFILE_ALL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_GIGANTIC_PAGE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_KCOV create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_PMEM_API create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_PTE_DEVMAP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_PTE_SPECIAL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_SET_DIRECT_MAP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_SET_MEMORY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_STRICT_KERNEL_RWX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_STRICT_MODULE_RWX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_SYSCALL_WRAPPER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HIBERNATION_HEADER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HIBERNATION_POSSIBLE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_PROC_KCORE_TEXT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SELECTS_KEXEC_FILE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SPARSEMEM_ENABLE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_STACKWALK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_ACPI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_ATOMIC_RMW create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_CFI_CLANG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_CRASH_DUMP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_INT128 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_KEXEC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_KEXEC_FILE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_KEXEC_SIG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_LTO_CLANG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_NUMA_BALANCING create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_UPROBES create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUSPEND_POSSIBLE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USES_HIGH_VMA_FLAGS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_CMPXCHG_LOCKREF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_MEMREMAP_PROT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_MEMTEST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_QUEUED_RWLOCKS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_QUEUED_SPINLOCKS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_SYM_ANNOTATIONS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANTS_NO_INSTR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANTS_THP_SWAP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_DEFAULT_BPF_JIT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_HUGE_PMD_SHARE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_LD_ORPHAN_WARN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_PMD_MKWRITE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ARCNET create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ASM_MODVERSIONS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ASN1 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ASN1_ENCODER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ASSOCIATIVE_ARRAY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_AS_HAS_NON_CONST_LEB128 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_AS_IS_GNU create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_AS_VERSION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ATALK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ATARI_PARTITION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ATL1 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ATL1C create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ATL1E create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ATM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_BR2684 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_BR2684_IPFILTER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_CLIP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_CLIP_NO_ICMP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_LANE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_MPOA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_AUDITSYSCALL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_AUXDISPLAY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_AX88796B_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_B44 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_ADP8860 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_ADP8870 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_ARCXCNN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_BD6107 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_CLASS_DEVICE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_KTD253 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_KTZ8866 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_LM3630A create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_LM3639 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_LP855X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_LV5207LP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_QCOM_WLED create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BACKTRACE_SELF_TEST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BAREUDP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BASE_SMALL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BATMAN_ADV create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_BQ27XXX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_CW2015 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_DS2780 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_DS2781 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_DS2782 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_GAUGE_LTC2941 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_GOLDFISH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_MAX17040 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_MAX17042 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_RT5033 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_SAMSUNG_SDI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_SBS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_UG3105 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BCM54140_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BCM7XXX_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BCM84881_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BCM87XX_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_DRIVER_GMAC_CMN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_DRIVER_GPIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_DRIVER_PCI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_HOST_PCI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_HOST_PCI_POSSIBLE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_HOST_SOC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_POSSIBLE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BCM_KONA_USB2_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BCM_NET_PHYLIB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BCM_NET_PHYPTP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BCM_VK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BE2ISCSI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BEFS_FS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BFS_FS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BINARY_PRINTF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BITREVERSE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_CGROUP_FC_APPID create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_CGROUP_IOPRIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_CGROUP_PUNT_BIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_CGROUP_RWSTAT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_DEV_3W_XXXX_RAID create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_DEV_BSG_COMMON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_DEV_DM_BUILTIN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_ICQ create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_INLINE_ENCRYPTION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_MQ_STACKING create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_PM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_SED_OPAL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BLOCK_HOLDER_DEPRECATED create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BLOCK_LEGACY_AUTOLOAD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BOOTPARAM_HUNG_TASK_PANIC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BPF_JIT_DEFAULT_ON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BPF_PRELOAD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BQL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BRIDGE_CFM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BROADCOM_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BSD_DISKLABEL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BTREE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_ASSERT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_FS_CHECK_INTEGRITY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_FS_POSIX_ACL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_FS_REF_VERIFY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_FS_RUN_SANITY_TESTS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BTT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BUFFER_HEAD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BUILDTIME_TABLE_SORT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_C2PORT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CACHEFILES_ERROR_INJECTION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CADENCE_WATCHDOG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CAIF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CAN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CARDBUS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CAVIUM_PTP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CB710_CORE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CB710_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CB710_DEBUG_ASSUMPTIONS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CC_CAN_LINK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CC_CAN_LINK_STATIC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_ASM_GOTO_OUTPUT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_ASM_INLINE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_INT128 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_KASAN_GENERIC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_KASAN_SW_TAGS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_NO_PROFILE_FN_ATTR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_RANDSTRUCT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_SANCOV_TRACE_PC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_ZERO_CALL_USED_REGS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CC_IMPLICIT_FALLTHROUGH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CC_IS_GCC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CC_NO_ARRAY_BOUNDS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CC_VERSION_TEXT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CEC_CORE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CEPH_LIB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CEPH_LIB_PRETTYDEBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CEPH_LIB_USE_DNS_RESOLVER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_CRDA_SUPPORT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_DEBUGFS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_DEFAULT_PS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_DEVELOPER_WARNINGS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_REQUIRE_SIGNED_REGDB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_WEXT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CFI_CLANG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CGROUP_FAVOR_DYNMODS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CGROUP_MISC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_ADP5061 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BD99954 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ2415X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ24257 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ24735 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ2515X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ256XX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ25890 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ25980 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_GPIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_LP8727 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_LT3651 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_LTC4162L create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_MAX77976 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_MAX8903 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_RT9455 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_SBS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CHECK_SIGNATURE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_INLINE_CRYPTO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_IPSEC_INLINE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_LIB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T1 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T3 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T4 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T4VF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T4_DCB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_TLS_DEVICE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CICADA_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_ALLOW_INSECURE_LEGACY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_DEBUG2 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_DEBUG_DUMP_KEYS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_DFS_UPCALL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_FSCACHE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_POSIX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_SMB_DIRECT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_STATS2 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_SWN_UPCALL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_UPCALL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_XATTR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CLANG_VERSION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CLZ_TAB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CMA_SYSFS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CMDLINE_PARTITION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CODA_FS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_COMEDI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_CDCE706 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_CS2000_CP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_MAX9485 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_PWM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_SI5341 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_SI5351 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_SI544 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_COMPACT_UNEVICTABLE_DEFAULT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_COMPAT_BINFMT_ELF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_COMPAT_OLD_SIGACTION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CONSOLE_POLL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_SWITCH_TRACER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_TRACKING create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_TRACKING_IDLE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_TRACKING_USER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_TRACKING_USER_FORCE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CONTIG_ALLOC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CORDIC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CORTINA_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_COUNTER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CPU_FREQ_GOV_ATTR_SET create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CPU_FREQ_GOV_COMMON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CPU_HOTPLUG_STATE_CONTROL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CPU_RMAP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRC16 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_BIT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_SARWATE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_SELFTEST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_SLICEBY4 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_SLICEBY8 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRC4 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRC7 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRC8 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRC_CCITT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRC_ITU_T create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRC_T10DIF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ARIA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_AMLOGIC_GXL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_ATMEL_ECC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_ATMEL_SHA204A create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_CHELSIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_NITROX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_NITROX_CNN55XX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_QAT_420XX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_SAFEXCEL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DH_RFC7919_GROUPS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ECRDSA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_FIPS_CUSTOM_VERSION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_FIPS_NAME create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_GENIV create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_HCTR2 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_ARC4 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CHACHA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CHACHA20POLY1305 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CHACHA_GENERIC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CURVE25519 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CURVE25519_GENERIC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_DES create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_GF128MUL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_POLY1305 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_POLY1305_GENERIC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_SHA1 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_UTILS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_SIG2 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CSD_LOCK_WAIT_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CXL_REGION_INVALIDATION_TEST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DAMON_LRU_SORT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DAMON_RECLAIM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DAMON_SYSFS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DAVICOM_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DCACHE_WORD_ACCESS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_ATOMIC_SLEEP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_CGROUP_REF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_DEVRES create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_DRIVER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_GPIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_COMPRESSED_NONE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_COMPRESSED_ZLIB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_COMPRESSED_ZSTD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_DWARF5 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_NONE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_IRQFLAGS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_KOBJECT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_LOCKING_API_SELFTESTS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_LOCK_ALLOC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_MAPLE_TREE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_MUTEXES create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_NET create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_OBJECTS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_PER_CPU_MAPS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_PINCTRL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_PREEMPT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_RT_MUTEXES create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_RWSEMS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_SHIRQ create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_SPINLOCK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_STACK_USAGE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_TEST_DRIVER_REMOVE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_TIMEKEEPING create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_VIRTUAL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_VM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_VM_PGTABLE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_WQ_FORCE_RR_CPU create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_WW_MUTEX_SLOWPATH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_BZIP2 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_GZIP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_LZ4 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_LZMA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_LZO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_XZ create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_ZSTD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEFAULT_HOSTNAME create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEFAULT_INIT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEVICE_MIGRATION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DEVTMPFS_SAFE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DIMLIB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DM9051 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_HEAPS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_MOVE_NOTIFY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_SELFTESTS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_SYSFS_STATS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DMAPOOL_TEST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_ACPI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_COHERENT_POOL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_ENGINE_RAID create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_FENCE_TRACE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_MAP_BENCHMARK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_NUMA_CMA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_OPS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_SHARED_BUFFER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DM_AUDIT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DM_BIO_PRISON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DM_BUFIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DM_CLONE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DM_DUST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DM_EBS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DM_MULTIPATH_HST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DM_MULTIPATH_IOA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DM_PERSISTENT_DATA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DM_UNSTRIPED create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DM_VERITY_FEC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DNET create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DP83640_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DP83822_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DP83848_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DP83867_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DP83869_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DP83TC811_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DP83TD510_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DQL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRAGONRISE_FF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_ACCEL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_AMD_DC_FP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_AMD_SECURE_DISPLAY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_ANALOGIX_ANX78XX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_BRIDGE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_BUDDY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_DISPLAY_DP_HELPER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_DISPLAY_HDCP_HELPER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_DISPLAY_HDMI_HELPER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_DISPLAY_HELPER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_ETNAVIV create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_EXEC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_GEM_SHMEM_HELPER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_GM12U320 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_GUD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_I2C_NXP_TDA9950 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_KMS_HELPER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_LEGACY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_LOONGSON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_AUO_A030JTN01 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_BRIDGE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_MIPI_DBI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_ORIENTATION_QUIRKS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_ORISETECH_OTA5601A create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_WIDECHIPS_WS2401 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_SCHED create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_SIMPLEDRM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_SSD130X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_SUBALLOC_HELPER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_TTM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_TTM_HELPER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_VGEM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_VIRTIO_GPU_KMS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_VKMS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_VRAM_HELPER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DS1682 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DST_CACHE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DUMMY_CONSOLE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DUMMY_CONSOLE_COLUMNS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DUMMY_CONSOLE_ROWS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DUMMY_IRQ create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DW_EDMA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DW_WATCHDOG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DW_XDATA_PCIE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DYNAMIC_EVENTS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DYNAMIC_FTRACE_WITH_ARGS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ECHO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ECRYPT_FS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_EDAC_SUPPORT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_93CX6 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_93XX46 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_AT24 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_AT25 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_EE1004 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_IDT_89HPESX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_LEGACY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_MAX6875 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_BOOTLOADER_CONTROL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_CAPSULE_LOADER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_DISABLE_PCI_DMA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_DISABLE_RUNTIME create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_EARLYCON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_ESRT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_PARTITION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_RUNTIME_WRAPPERS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_TEST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_EFS_FS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ELFCORE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ENA_ETHERNET create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ENCLOSURE_SERVICES create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ENERGY_MODEL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_EQUALIZER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_EROFS_FS_PCPU_KTHREAD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ETHERNET create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ETHOC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_EVENT_TRACING create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_EXCLUSIVE_SYSTEM_RAM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_EXPORTFS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_EXT3_FS_POSIX_ACL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_EXT3_FS_SECURITY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_EZX_PCAP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_F2FS_FS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FARSYNC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_3DFX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_ARK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_ASILIANT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_ATY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_ATY128 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CARMINE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CFB_COPYAREA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CFB_FILLRECT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CFB_IMAGEBLIT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CIRRUS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CORE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CYBER2000 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_DEFERRED_IO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_DEVICE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_FOREIGN_ENDIAN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_I740 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IBM_GXT4500 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IMSTT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IOMEM_HELPERS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_KYRO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_MATROX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_MB862XX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_METRONOME create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_MODE_HELPERS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_NEOMAGIC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_NOTIFY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_NVIDIA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_OPENCORES create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_PM2 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_PM3 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_RADEON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_RIVA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_S1D13XXX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_S3 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SAVAGE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SIS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SM712 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SMSCUFX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYSMEM_HELPERS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYSMEM_HELPERS_DEFERRED create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYS_COPYAREA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYS_FILLRECT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYS_FOPS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYS_IMAGEBLIT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_TRIDENT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_UDL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_UVESA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_VIRTUAL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_VOODOO1 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_VT8623 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FDDI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FEALNX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FIB_RULES create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FIND_BIT_BENCHMARK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FIPS_SIGNATURE_SELFTEST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FIREWIRE_NOSY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FIXED_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FONTS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FONT_8x16 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FONT_8x8 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FONT_SUPPORT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FPGA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FREEZER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FS_DAX_PMD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FS_ENCRYPTION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FS_IOMAP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FS_POSIX_ACL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FS_VERITY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FTL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FTRACE_MCOUNT_RECORD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FTRACE_STARTUP_TEST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FUNCTION_ALIGNMENT_4B create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FUNCTION_ERROR_INJECTION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FUNCTION_GRAPH_RETVAL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FUN_ETH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FUTEX_PI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FWNODE_MDIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FW_LOADER_COMPRESS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FW_LOADER_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FW_LOADER_PAGED_BUF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FW_LOADER_SYSFS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FW_UPLOAD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GAMEPORT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GARP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GCC10_NO_ARRAY_BOUNDS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GCC_PLUGINS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GCC_PLUGIN_LATENT_ENTROPY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GCC_PLUGIN_STACKLEAK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GCC_VERSION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_ALLOCATOR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_BUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_BUG_RELATIVE_POINTERS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CALIBRATE_DELAY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CLOCKEVENTS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CLOCKEVENTS_BROADCAST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CPU_AUTOPROBE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CPU_VULNERABILITIES create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_EARLY_IOREMAP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_IRQ_MIGRATION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_MSI_IRQ create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_NET_UTILS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_PCI_IOMAP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_PINCONF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_PTDUMP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_SMP_IDLE_THREAD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_STRNCPY_FROM_USER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_STRNLEN_USER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_TIME_VSYSCALL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_TRACER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GENWQE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GLOB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GLOB_SELFTEST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GNSS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GOLDFISH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GOOGLE_FIRMWARE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GPIOLIB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GPIOLIB_FASTPATH_LIMIT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GPIOLIB_IRQCHIP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_ACPI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_AGGREGATOR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_AMDPT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_AMD_FCH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_BT8XX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_CDEV create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_CDEV_V1 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_EXAR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MAX3191X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MAX7300 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MAX7301 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MAX732X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MB86S7X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MC33880 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MOCKUP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCA953X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCA9570 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCF857X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCIE_IDIO_24 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCI_IDIO_16 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PISOSR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_RDC321X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_TPIC2810 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_XRA1403 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GRACE_PERIOD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GREENASIA_FF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GREYBUS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GRO_CELLS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GTP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_GVE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAMRADIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HARDIRQS_SW_RESEND create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_ARCH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_BUDDY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_PERF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAS_DMA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAS_IOMEM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAS_IOPORT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAS_IOPORT_MAP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ACPI_APEI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ALIGNED_STRUCT_PAGE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_AUDITSYSCALL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_HUGE_VMALLOC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_HUGE_VMAP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_JUMP_LABEL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_KASAN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_KASAN_VMALLOC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_KFENCE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_KGDB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_MMAP_RND_BITS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_PREL32_RELOCATIONS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_SECCOMP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_SECCOMP_FILTER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_STACKLEAK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_TRACEHOOK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_USERFAULTFD_MINOR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_VMAP_STACK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ASM_MODVERSIONS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CLK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CLK_PREPARE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CMPXCHG_DOUBLE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CMPXCHG_LOCAL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CONTEXT_TRACKING_USER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_C_RECORDMCOUNT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DEBUG_KMEMLEAK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DMA_CONTIGUOUS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DYNAMIC_FTRACE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_EBPF_JIT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FAST_GUP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FTRACE_MCOUNT_RECORD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_ARG_ACCESS_API create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_ERROR_INJECTION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_GRAPH_RETVAL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_GRAPH_TRACER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_TRACER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_GCC_PLUGINS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_GENERIC_VDSO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_HW_BREAKPOINT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_IMA_KEXEC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_IOREMAP_PROT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_IRQ_TIME_ACCOUNTING create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KCSAN_COMPILER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KPROBES create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KRETPROBES create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_DIRTY_RING create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_EVENTFD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_IRQCHIP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_IRQFD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_IRQ_BYPASS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_IRQ_ROUTING create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_MSI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_MOD_ARCH_SPECIFIC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_MOVE_PMD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_MOVE_PUD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_NMI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PCI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PERF_EVENTS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PERF_EVENTS_NMI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PERF_REGS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PERF_USER_STACK_DUMP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PREEMPT_DYNAMIC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_REGS_AND_STACK_ACCESS_API create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_RSEQ create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SAMPLE_FTRACE_DIRECT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SCHED_AVG_IRQ create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SETUP_PER_CPU_AREA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_STACKPROTECTOR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SYSCALL_TRACEPOINTS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_UID16 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_CISCO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_FR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_PPP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_RAW create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_RAW_ETH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HFSPLUS_FS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HFS_FS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HIDRAW create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_A4TECH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ACCUTOUCH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ACRUX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ACRUX_FF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_APPLE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_APPLEIR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_AUREAL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BATTERY_STRENGTH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BELKIN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BETOP_FF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BIGBEN_FF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BPF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CHERRY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CHICONY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CORSAIR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_COUGAR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CP2112 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CREATIVE_SB0540 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CYPRESS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_DRAGONRISE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ELAN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ELECOM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ELO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_EMS_FF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_EVISION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_EZKEY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_FT260 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GEMBIRD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GENERIC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GFRM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GLORIOUS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GOOGLE_STADIA_FF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GREENASIA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GT683R create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GYRATION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_HOLTEK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ICADE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ITE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_JABRA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_KENSINGTON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_KEYTOUCH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_KYE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LCPOWER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LED create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LENOVO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LETSKETCH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LOGITECH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LOGITECH_DJ create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LOGITECH_HIDPP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MACALLY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MAGICMOUSE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MALTRON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MAYFLASH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MCP2221 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MEGAWORLD_FF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MICROSOFT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MONTEREY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MULTITOUCH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_NINTENDO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_NTI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_NTRIG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ORTEK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PANTHERLORD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PENMOUNT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PETALYNX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD_BACKLIGHT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD_FB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD_LCD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD_LEDS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PID create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PLANTRONICS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PRIMAX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PXRC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_RAZER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_REDRAGON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_RETRODE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_RMI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ROCCAT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SAITEK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SAMSUNG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SEMITEK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SIGMAMICRO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SMARTJOYPLUS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SONY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SPEEDLINK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_STEAM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_STEELSERIES create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SUNPLUS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_THINGM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_THRUSTMASTER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_TIVO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_TOPRE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_TOPSEED create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_TWINHAN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_U2FZERO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_UCLOGIC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_UDRAW_PS3 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_VIEWSONIC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_VIVALDI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_VRC2 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_WACOM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_WALTOP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_WIIMOTE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_XIAOMI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_XINMO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ZEROPLUS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ZYDACRON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HIPPI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HIST_TRIGGERS_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HMC6352 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HMEM_REPORTING create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HMM_MIRROR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HOLTEK_FF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HOTPLUG_CORE_SYNC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HOTPLUG_CORE_SYNC_DEAD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HOTPLUG_PCI_ACPI_IBM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HOTPLUG_PCI_CPCI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HPFS_FS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HSI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HSR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HTE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HVC_DRIVER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HWMON_DEBUG_CHIP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HWSPINLOCK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HW_CONSOLE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HW_RANDOM_BA431 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HW_RANDOM_TIMERIOMEM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HW_RANDOM_XIPHERA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALGOBIT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALGOPCA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALI1535 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALI1563 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALI15X3 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_AMD_MP2 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_BOARDINFO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_CBUS_GPIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_COMPAT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_CP2615 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DEBUG_ALGO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DEBUG_BUS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DEBUG_CORE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DESIGNWARE_CORE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DESIGNWARE_PCI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DESIGNWARE_PLATFORM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DESIGNWARE_SLAVE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DIOLAN_U2C create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_EMEV2 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_HID create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_HID_ACPI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_HID_OF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_MUX_LTC4306 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_MUX_MLXCPLD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_MUX_REG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_NFORCE2 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_NVIDIA_GPU create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_OCORES create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_PCA_PLATFORM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_PCI1XXXX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ROBOTFUZZ_OSIF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_SIMTEC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_SIS5595 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_SIS630 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_STUB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_TAOS_EVM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_TINY_USB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_VIRTIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_XILINX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_I3C create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IAVF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ICE_SWITCHDEV create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ICPLUS_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ICS932S401 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_6LOWPAN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_ADF7242 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_AT86RF230 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_ATUSB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_CA8210 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_CC2520 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_DRIVERS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_HWSIM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_MCR20A create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_MRF24J40 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_NL802154_EXPERIMENTAL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_SOCKET create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IMA_DISABLE_HTABLE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IMA_KEXEC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INET_SCTP_DIAG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INET_TABLE_PERTURB_ORDER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INET_TUNNEL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INET_XFRM_TUNNEL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_BNXT_RE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_CXGB4 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_EFA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_IPOIB_DEBUG_DATA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_IRDMA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_OCRDMA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_QEDR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_USER_MEM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_VIRT_DMA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INFTL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INITRAMFS_PRESERVE_MTIME create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INIT_ENV_ARG_LIMIT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INIT_STACK_ALL_PATTERN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INIT_STACK_ALL_ZERO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INIT_STACK_NONE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_EVBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_FF_MEMLESS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_JOYSTICK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_LEDS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_MATRIXKMAP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_MOUSEDEV_PSAUX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_MOUSEDEV_SCREEN_X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_MOUSEDEV_SCREEN_Y create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_SPARSEKMAP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INTEGRITY_MACHINE_KEYRING create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INTEL_XWAY_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INTERCONNECT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INTERVAL_TREE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_INTERVAL_TREE_TEST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IOMMU_SVA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IONIC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IO_WQ create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IP5XXX_POWER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IPACK_BUS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IPV6_IOAM6_LWTUNNEL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IPVLAN_L3S create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IP_MROUTE_COMMON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IP_PNP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IP_ROUTE_CLASSID create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IP_SCTP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_AH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_AH_ESP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_ESP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_SCTP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_TCP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_UDP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_TWOS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IRQSOFF_TRACER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_DOMAIN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_DOMAIN_HIERARCHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_FORCED_THREADING create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_MSI_IOMMU create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_POLL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_WORK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ISCSI_TARGET_CXGB4 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_JFFS2_FS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_JFS_FS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_JME create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KALLSYMS_BASE_RELATIVE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KALLSYMS_SELFTEST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_ADP5588 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_ADP5589 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_CYPRESS_SF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_DLINK_DIR685 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_GPIO_POLLED create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_LKKBD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_LM8323 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_LM8333 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_MATRIX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_MAX7359 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_MCS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_MPR121 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_NEWTON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_OPENCORES create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_QT1050 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_QT1070 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_QT2160 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_SAMSUNG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_STOWAWAY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_SUNKBD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_TCA6416 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_TCA8418 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_TM2_TOUCHKEY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_XTKBD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KPROBE_EVENT_GEN_TEST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KUNIT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_GENERIC_HARDWARE_ENABLING create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_MMIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_VFIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_XFER_TO_GUEST_WORK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP_DEBUGFS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP_ETH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP_IP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP_V3 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LAPB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LATENCYTOP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LATTICE_ECP3_CONFIG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_AMS369FG06 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_CLASS_DEVICE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_HX8357 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_ILI922X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_ILI9320 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_L4F00242T03 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_LMS283GF05 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_LMS501KF03 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_LTV350QV create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_OTM3225A create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_PLATFORM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_TDO24M create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_VGG2432A4 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LDM_PARTITION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LD_IS_BFD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LD_ORPHAN_WARN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LD_ORPHAN_WARN_LEVEL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LD_VERSION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_AW200XX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_BD2606MVV create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_BD2802 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_BLINKM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_BRIGHTNESS_HW_CHANGED create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_CLASS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_CLASS_MULTICOLOR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_DAC124S085 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_GPIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_IS31FL319X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LM3530 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LM3532 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LM355x create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LM3642 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LP3944 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LP3952 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LP50XX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_MLXREG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PCA9532 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PCA955X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PCA963X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PCA995X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PWM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TCA6507 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TLC591XX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGERS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_ACTIVITY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_BACKLIGHT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_CAMERA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_CPU create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_DEFAULT_ON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_HEARTBEAT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_MTD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_NETDEV create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_ONESHOT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_PANIC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_PATTERN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_TIMER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_TRANSIENT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_TTY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_USER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LED_TRIGGER_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEGACY_DIRECT_IO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEGACY_PTYS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LEGACY_TIOCSTI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LIBCRC32C create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LIBWX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LIQUIDIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LIQUIDIO_CORE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LIQUIDIO_VF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LKDTM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LLC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LLC2 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LLD_VERSION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LMK04832 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_DEBUGGING_SUPPORT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_MM_AND_FIND_VMA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_SPIN_ON_OWNER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_STAT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_TORTURE_TEST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LOGIG940_FF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LOGIRUMBLEPAD2_FF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LOGITECH_FF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LOGIWHEELS_FF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LOGO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LOGO_LINUX_CLUT224 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LOGO_LINUX_MONO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LOGO_LINUX_VGA16 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LSI_ET1011C_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LXT_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LZ4HC_COMPRESS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LZ4_COMPRESS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LZ4_DECOMPRESS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LZO_COMPRESS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_LZO_DECOMPRESS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_DEBUGFS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_DEBUG_MENU create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_HAS_RC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_LEDS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_MESH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_MESSAGE_TRACING create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_RC_DEFAULT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_RC_DEFAULT_MINSTREL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_RC_MINSTREL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_STA_HASH_MAX_SIZE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MAC802154 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MAILBOX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MANAGER_SBS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MARVELL_10G_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MARVELL_88Q2XXX_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MARVELL_88X2222_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MARVELL_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MAX31827 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MAX63XX_WATCHDOG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MAXLINEAR_GPHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MCB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MCTP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_BCM_UNIMAC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_BITBANG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_BUS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_CAVIUM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_DEVICE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_DEVRES create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_I2C create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_MVUSB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_THUNDER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MD_BITMAP_FILE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MEDIATEK_GE_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MEMCG_KMEM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MEMFD_CREATE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MEMORY_ISOLATION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MEMREGION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_JMICRON_38X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_R592 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_TIFM_MS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_UNSAFE_RESUME create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MEMTEST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MEN_A21_WDT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_88PM800 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_88PM805 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_88PM860X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_AAT2870_CORE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_ARIZONA_I2C create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_ARIZONA_SPI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_AS3711 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_ATC260X_I2C create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_AXP20X_I2C create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_BCM590XX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_BD9571MWV create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_CS42L43_I2C create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9052_I2C create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9052_SPI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9055 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9062 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9063 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9150 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DLN2 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_INTEL_M10_BMC_SPI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_IQS62X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_JANZ_CMODIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_KEMPLD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_LM3533 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_LP3943 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_LP8788 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MADERA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX14577 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX77541 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX77693 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX77843 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX8907 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX8925 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX8997 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX8998 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MC13XXX_I2C create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MC13XXX_SPI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MENF21BMC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MP2629 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MT6360 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MT6370 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MT6397 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_OCELOT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_PALMAS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_PCF50633 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RC5T583 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RDC321X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RETU create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RT4831 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RT5033 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RT5120 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_SI476X_CORE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_SKY81452 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_SMPRO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_SY7636A create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TI_LMU create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TI_LP873X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65086 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65090 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS6586X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65910 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65912_I2C create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65912_SPI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS6594_I2C create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS6594_SPI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TQMX86 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WL1273_CORE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM831X_I2C create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM831X_SPI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM8350_I2C create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM8400 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM8994 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MHI_BUS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MHI_BUS_EP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MHP_MEMMAP_ON_MEMORY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MICREL_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MICROCHIP_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MICROCHIP_T1S_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MICROCHIP_T1_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MICROSEMI_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MII create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MINIX_FS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MISC_ALCOR_PCI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MLX4_INFINIBAND create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MLX5_EN_IPSEC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MLX5_EN_TLS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MLX5_MACSEC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MLX5_VFIO_PCI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MMC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_BLOCK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_BLOCK_MINORS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_CB710 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_CQHCI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_HSQ create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_RICOH_MMC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_ACPI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_F_SDH30 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_IO_ACCESSORS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_PCI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_PLTFM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_XENON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SPI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_TEST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_TIFM_SD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_USDHI6ROL0 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_USHC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_VIA_SDMMC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_VUB300 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MMU_GATHER_RCU_TABLE_FREE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MMU_GATHER_TABLE_FREE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MMU_LAZY_TLB_REFCOUNT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MMU_NOTIFIER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MODULES_TREE_LOOKUP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MODULES_USE_ELF_RELA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_ALLOW_BTF_MISMATCH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_COMPRESS_GZIP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_COMPRESS_NONE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_COMPRESS_XZ create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_COMPRESS_ZSTD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_SIG_FORMAT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_SIG_KEY_TYPE_ECDSA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_SIG_KEY_TYPE_RSA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_UNLOAD_TAINT_TRACKING create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MOST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MOTORCOMM_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_ELAN_I2C create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_ELAN_I2C_I2C create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_GPIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_SYNAPTICS_I2C create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_SYNAPTICS_USB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MOXA_INTELLIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MOXA_SMARTIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MPILIB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MRP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MSDOS_PARTITION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MSE102X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MSPRO_BLOCK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MS_BLOCK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_ABSENT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_AR7_PARTS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_BLKDEVS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_BLOCK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_BLOCK2MTD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_BLOCK_RO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_CFI_I1 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_CFI_I2 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_CMDLINE_PARTS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_COMPLEX_MAPPINGS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_DATAFLASH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_DOCG3 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_HYPERBUS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_INTEL_VR_NOR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_JEDECPROBE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_LPDDR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MAP_BANK_WIDTH_1 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MAP_BANK_WIDTH_2 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MAP_BANK_WIDTH_4 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MCHP23K256 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MCHP48L640 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MTDRAM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_NAND_ECC_MXIC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_NAND_ECC_SW_BCH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_NAND_ECC_SW_HAMMING create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_ONENAND create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_OOPS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_PARTITIONED_MASTER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_PHRAM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_PLATRAM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_PMC551 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_RAM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_RAW_NAND create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_REDBOOT_PARTS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_ROM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SLRAM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SPI_NAND create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SPI_NOR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SST25L create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SWAP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_TESTS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_BEB_LIMIT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_BLOCK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_FASTMAP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_GLUEBI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_WL_THRESHOLD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_MUTEX_SPIN_ON_OWNER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NATIONAL_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NCE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NCN26000_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ND_CLAIM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NE6X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NE6XVF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_DMA_MAP_STATE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_SG_DMA_FLAGS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_SG_DMA_LENGTH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NETCONSOLE_EXTENDED_LOG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NETFILTER_BPF_LINK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NETFILTER_NETLINK_HOOK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NETFILTER_SKIP_EGRESS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NETFILTER_XTABLES_COMPAT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NETPOLL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NETXEN_NIC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_9P create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_DEVLINK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_DEV_REFCNT_TRACKER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_DSA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_EGRESS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_FLOW_LIMIT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_FOU create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_FOU_IP_TUNNELS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_HANDSHAKE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_IFE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_INGRESS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_IPVTI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_IP_TUNNEL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_NCSI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_NS_REFCNT_TRACKER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_POLL_CONTROLLER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_REDIRECT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_RX_BUSY_POLL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SB1000 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SCH_FIFO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SCH_MQPRIO_LIB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SELFTESTS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SOCK_MSG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SWITCHDEV create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_UDP_TUNNEL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_3COM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ADAPTEC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ADI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_AGERE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ALACRITECH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ALTEON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_AMAZON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_AQUANTIA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ARC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ASIX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ATHEROS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_BZWX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_CADENCE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_CAVIUM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_CHELSIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_CORTINA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_DAVICOM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_DLINK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ENGLEDER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_EZCHIP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_FUNGIBLE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_GOOGLE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_I825XX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_LITEX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MARVELL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MICREL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MICROCHIP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MICROSEMI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MICROSOFT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MYRI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NATSEMI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NETERION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NETRONOME create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NVIDIA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_OKI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_PACKET_ENGINES create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_PENSANDO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_QLOGIC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_RDC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_REALTEK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_RENESAS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ROCKER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SAMSUNG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SEEQ create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SILAN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SIS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SMSC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SOCIONEXT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_STMICRO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SUN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SYNOPSYS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_TEHUTI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_TI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_VERTEXCOM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_VIA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_WIZNET create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_XILINX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NEW_LEDS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NFC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NFP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NFP_APP_ABM_NIC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NFP_APP_FLOWER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NFP_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NFP_NET_IPSEC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NFSD_V2 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_USE_KERNEL_DNS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_V4_2_SSC_HELPER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_V4_SECURITY_LABEL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NFTL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NFT_REJECT_IPV4 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NFT_REJECT_NETDEV create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NF_CONNTRACK_OVS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NF_DEFRAG_IPV4 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NF_FLOW_TABLE_PROCFS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NF_LOG_SYSLOG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NF_NAT_OVS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NF_TPROXY_IPV4 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NILFS2_FS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NL80211_TESTMODE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLATTR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_1250 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_1251 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_437 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_737 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_775 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_850 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_852 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_855 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_857 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_860 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_861 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_862 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_863 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_864 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_865 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_866 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_869 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_874 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_932 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_949 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_1 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_13 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_14 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_15 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_2 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_3 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_4 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_5 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_6 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_7 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_8 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_9 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_KOI8_R create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_KOI8_U create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_CELTIC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_CENTEURO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_CROATIAN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_CYRILLIC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_GAELIC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_GREEK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_ICELAND create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_INUIT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_ROMAN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_ROMANIAN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_TURKISH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_UCS2_UTILS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NOP_TRACER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NOP_USB_XCEIV create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NOTIFIER_ERROR_INJECTION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NOUVEAU_DEBUG_MMU create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NOUVEAU_DEBUG_PUSH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NO_HZ_COMMON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_EPF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_IDT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_MSI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_PERF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_PINGPONG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_SWITCHTEC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_TOOL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_TRANSPORT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NTFS3_64BIT_CLUSTER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NTFS3_FS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NTFS3_FS_POSIX_ACL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NTFS3_LZX_XPRESS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NULL_TTY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NUMA_KEEP_MEMINFO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NVDIMM_DAX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NVDIMM_PFN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NVDIMM_SECURITY_TEST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NVMEM_LAYOUT_ONIE_TLV create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NVMEM_LAYOUT_SL28_VPD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NVMEM_RMEM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_AUTH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_HWMON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_MULTIPATH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_AUTH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_FC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_FCLOOP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_LOOP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_PASSTHRU create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_RDMA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_TCP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_VERBOSE_ERRORS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NXP_C45_TJA11XX_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NXP_CBTX_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NXP_TJA11XX_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_N_GSM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_N_HDLC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_OBJAGG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_OCFS2_FS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_OID_REGISTRY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_OLD_SIGSUSPEND3 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_OMFS_FS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ORANGEFS_FS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_OVERLAY_FS_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PACKING create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PADATA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_COUNTER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_POOL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_POOL_STATS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_SIZE_LESS_THAN_256KB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_SIZE_LESS_THAN_64KB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_TABLE_CHECK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PAHOLE_HAS_LANG_EXCLUDE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PAHOLE_HAS_SPLIT_BTF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PAHOLE_VERSION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PANIC_ON_OOPS_VALUE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PANTHERLORD_FF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PARMAN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ACPI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ALI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_AMD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ARTOP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ATIIXP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ATP867X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_CMD640_PCI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_CMD64X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_CYPRESS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_EFAR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT366 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT37X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X2N create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X3 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_IT8213 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_IT821X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_JMICRON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_LEGACY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_MARVELL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_MPIIX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NETCELL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NINJA32 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NS87410 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NS87415 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_OLDPIIX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_OPTI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_OPTIDMA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_PDC2027X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_PDC_OLD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_RADISYS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_RDC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_RZ1000 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SCH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SERVERWORKS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SIL680 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SIS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_TIMINGS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_TOSHIBA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_TRIFLEX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_VIA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_WINBOND create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PC300TOO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PCC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PCCARD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PCI200SYN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PCIEAER_INJECT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PCIEASPM_PERFORMANCE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PCIEASPM_POWERSAVE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PCIEASPM_POWER_SUPERSAVE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PCIE_DW_PLAT_HOST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PCIE_PME create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PCIE_PTM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PCIPCWATCHDOG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_ATS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_DOE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_DOMAINS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_ENDPOINT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_ENDPOINT_TEST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_LABEL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_MESON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_P2PDMA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_REALLOC_ENABLE_AUTO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_SW_SWITCHTEC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PCMCIA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PCPU_DEV_REFCNT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PCS_XPCS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PDC_ADMA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PECI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PER_VMA_LOCK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PER_VMA_LOCK_STATS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PHANTOM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PHONET create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PHYLINK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PHYS_ADDR_T_64BIT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PHY_CAN_TRANSCEIVER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PHY_PXA_28NM_HSIC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PHY_PXA_28NM_USB2 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PINCONF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL_AMD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL_CY8C95X0 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL_MCP23S08 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL_SX150X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PINMUX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PLDMFW create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PLX_DMA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PMIC_ADP5520 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PMIC_DA903X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PM_CLK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PM_DEVFREQ create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PM_USERSPACE_AUTOSLEEP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PNFS_BLOCK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PNFS_FILE_LAYOUT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PNFS_FLEXFILE_LAYOUT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PNP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PNPACPI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_POSIX_CPU_TIMERS_TASK_WORK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_POWER_SUPPLY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_POWER_SUPPLY_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_POWER_SUPPLY_HWMON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOATM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS_1 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS_2 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS_4 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS_8 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOL2TP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_ASYNC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_BSDCOMP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_DEFLATE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_FILTER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_MPPE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_MULTILINK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_SYNC_TTY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PPS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PPS_CLIENT_GPIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PPS_CLIENT_KTIMER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PPS_CLIENT_LDISC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PPS_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PPTP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PREEMPTIRQ_DELAY_TEST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PREEMPT_COUNT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PREEMPT_NOTIFIERS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PREVENT_FIRMWARE_BUILD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PRIME_NUMBERS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PRINTK_CALLER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PROBE_EVENTS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PROC_CPU_RESCTRL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PROC_PID_CPUSET create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PROVE_LOCKING create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PSTORE_FTRACE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PSTORE_PMSG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PTDUMP_DEBUGFS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PTP_1588_CLOCK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PTP_1588_CLOCK_IDT82P33 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PTP_1588_CLOCK_IDTCM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PTP_1588_CLOCK_INES create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PWM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PWM_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PWM_PCA9685 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PWM_SYSFS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_QED create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_QEDE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_QEDF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_QEDI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_QED_FCOE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_QED_ISCSI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_QED_LL2 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_QED_OOO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_QED_RDMA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_QED_SRIOV create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_QLA3XXX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_QLCNIC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_QNX4FS_FS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_QNX6FS_FS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_QRTR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_QSEMI_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_QUEUED_RWLOCKS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_QUEUED_SPINLOCKS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_QUOTACTL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_QUOTA_TREE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_R8169 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RANDOM32_SELFTEST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RANDOM_KMALLOC_CACHES create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RAPIDIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RATIONAL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RBTREE_TEST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_CPU_STALL_CPUTIME create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_EXP_CPU_STALL_TIMEOUT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_LAZY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_NEED_SEGCBLIST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_NOCB_CPU_DEFAULT_ALL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_STALL_COMMON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RDS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_REALTEK_AUTOPM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_REALTEK_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_REED_SOLOMON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_REED_SOLOMON_DEC8 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_REED_SOLOMON_ENC8 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_REED_SOLOMON_TEST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_REGMAP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_REGMAP_I2C create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_REGMAP_SPI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_REISERFS_FS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_REMOTEPROC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_REMOTE_TARGET create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RENESAS_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RESET_ATTACK_MITIGATION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RESET_TI_SYSCON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RESET_TI_TPS380X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RFD_FTL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RFKILL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RFKILL_INPUT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RFKILL_LEDS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RFS_ACCEL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RING_BUFFER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RING_BUFFER_BENCHMARK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RING_BUFFER_STARTUP_TEST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_2D_SENSOR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_CORE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F03 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F03_SERIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F11 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F12 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F30 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F3A create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F55 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_I2C create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_SMB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ROCKCHIP_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ROCKER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ROMFS_FS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RPMSG_QCOM_GLINK_RPM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RPMSG_VIRTIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_ABEOZ9 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_BQ32K create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1286 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1302 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1307 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1307_CENTURY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1374 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1511 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1553 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1672 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1742 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS2404 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS3232 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS3232_HWMON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_EM3027 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_FM3130 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_FTRTC010 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_GOLDFISH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_ISL12022 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_ISL1208 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M41T80 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M41T80_WDT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M48T35 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M48T59 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M48T86 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_MAX6900 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_MAX6916 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_MSM6242 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_PCF8523 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_PCF85363 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_PCF8563 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_PCF8583 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RP5C01 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RS5C372 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV3028 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV3029C2 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV3029_HWMON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV3032 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV8803 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RX6110 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RX8025 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RX8581 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_S35390A create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_SD3078 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_STK17TA8 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_TEST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_X1205 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_I2C_AND_SPI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_INTF_DEV_UIE_EMUL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_LIB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RT_MUTEXES create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RV create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_RWSEM_SPIN_ON_OWNER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SAMPLES create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_ACARD_AHCI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_DWC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_HOST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_INIC162X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_MV create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_NV create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_PROMISE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_QSTOR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIL24 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SVW create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SX4 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_ULI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_VIA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_VITESSE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_ZPODD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SBITMAP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCF_TORTURE_TEST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCHED_HRTICK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCHED_MM_CID create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCHED_STACK_END_CHECK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_3W_9XXX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_3W_SAS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_ACARD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_ADVANSYS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_AIC79XX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_AIC7XXX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_AIC94XX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_AM53C974 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_ARCMSR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_BFA_FC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_BUSLOGIC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_CHELSIO_FCOE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_COMMON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_CXGB3_ISCSI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_CXGB4_ISCSI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DC395x create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DH_ALUA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DH_EMC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DH_HP_SW create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DH_RDAC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DMX3191D create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_EFCT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_ESAS2R create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_FDOMAIN_PCI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_HPSA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_HPTIOP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_INIA100 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_INITIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_IPS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_LPFC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_LPFC_DEBUG_FS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MOD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVUMI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MYRB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MYRS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_NETLINK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_PM8001 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_PMCRAID create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_QLA_FC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_QLA_ISCSI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_QLOGIC_1280 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_SNIC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_STEX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_SYM53C8XX_2 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_WD719X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_COOKIE_HMAC_MD5 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_COOKIE_HMAC_SHA1 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_DBG_OBJCNT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SDIO_UART create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SECCOMP_CACHE_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_APPARMOR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_LANDLOCK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_LOADPIN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_LOCKDOWN_LSM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_SAFESETID create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_SELINUX_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_TOMOYO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_YAMA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ACBEL_FSG032 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ADM1177 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ADM1266 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ADT7310 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_AHT10 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_AQUACOMPUTER_D5NEXT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_AS370 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_AXI_FAN_CONTROL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_BEL_PFE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_BPA_RS600 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_CORSAIR_CPRO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_CORSAIR_PSU create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_DELTA_AHE50DC_FAN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_DPS920AB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_DRIVETEMP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_EMC2103 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_EMC2305 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_FSP_3Y create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_FTSTEUTATES create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_HIH6130 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_HS3001 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IBM_CFFPS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_INA238 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_INA3221 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_INSPUR_IPSPS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IR35221 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IR36021 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IR38064 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IRPS5401 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ISL68137 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LT7182S create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LTC2947_I2C create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LTC2947_SPI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LTC2990 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LTC2992 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX127 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX15301 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX16601 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX20730 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX31722 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX31730 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX31760 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX31785 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX6620 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX6621 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MC34VR500 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MP2888 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MP2975 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MP5023 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MPQ7932 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MR75203 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_NCT6775_I2C create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_NPCM7XX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_NZXT_KRAKEN2 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_NZXT_SMART2 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_OCC_P8_I2C create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_PIM4328 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_PLI1209BC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_PM6764TR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_PXE1610 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_Q54SJ108A2 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_SBRMI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_SBTSI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_SHT3x create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_SHT4x create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_STPDDC60 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_STTS751 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TC654 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TDA38640 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TMP108 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TMP464 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TMP513 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TPS23861 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TPS53679 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TPS546D24 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_W83773G create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_XDPE122 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_XDPE152 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_CONSOLE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DEPRECATED_OPTIONS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DETECT_IRQ create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DMA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DW create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DWLIB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_EXAR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_EXTENDED create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_FINTEK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_MANY_PORTS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_NR_UARTS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PCI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PCI1XXXX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PCILIB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PERICOM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PNP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_RSA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_RUNTIME_UARTS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_SHARE_IRQ create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_ALTERA_JTAGUART create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_ALTERA_UART create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_CORE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_CORE_CONSOLE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_DEV_BUS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_EARLYCON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_FSL_LINFLEXUART create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_FSL_LPUART create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_KGDB_NMI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_MAX3100 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_MAX310X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_MCTRL_GPIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_NONSTANDARD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_RP2 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_SC16IS7XX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_SCCNXP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_SPRD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_UARTLITE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIO_GPIO_PS2 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIO_PCIPS2 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SERIO_PS2MULT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SFC_SIENA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SFP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SF_PDMA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SGL_ALLOC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SG_POOL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SHRINKER_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SIGNATURE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SIOX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SKB_EXTENSIONS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SLAB_DEPRECATED create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SLHC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SLIMBUS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SLIP_COMPRESSED create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SLIP_MODE_SLIP6 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SLIP_SMART create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SMARTJOYPLUS_FF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SMBFS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SMB_SERVER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SMSC_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SM_FTL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SOCK_CGROUP_DATA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SOCK_RX_QUEUE_MAPPING create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SOCK_VALIDATE_XMIT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SOC_TI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SOFTIRQ_ON_OWN_STACK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SONY_FF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SOUND create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SOUNDWIRE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SPARSEMEM_EXTREME create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SPARSEMEM_VMEMMAP_ENABLE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SPARSE_IRQ create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_ALTERA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_AMD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_AX88796C create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_AXI_SPI_ENGINE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_BITBANG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_DYNAMIC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_GPIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_LOOPBACK_TEST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MASTER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MEM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MICROCHIP_CORE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MICROCHIP_CORE_QSPI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MUX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MXIC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_OC_TINY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_PCI1XXXX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_PXA2XX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_SC18IS602 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_SIFIVE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_SLAVE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_SPIDEV create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_TLE62X0 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_XCOMM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_XILINX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SPLIT_PTLOCK_CPUS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SPMI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SRAM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SSB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SSB_POSSIBLE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SSFDC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_STACKDEPOT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_STACKTRACE_BUILD_ID create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_STANDALONE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_STE10XP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_STP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_STRING_SELFTEST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SUNRPC_BACKCHANNEL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SUNRPC_GSS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_3_POWER_OPREGION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_GPE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_HOTPLUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_PLATFORMS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_PRO3_BUTTON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SWIOTLB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SWIOTLB_DYNAMIC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SWPHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SW_SYNC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SYNTH_EVENT_GEN_TEST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SYSCTL_EXCEPTION_TRACE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SYSFB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SYSV68_PARTITION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SYSVIPC_COMPAT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SYSV_FS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TASKS_RCU create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TASKS_RCU_GENERIC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TASKS_RUDE_RCU create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TASKS_TRACE_RCU create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TCG_TIS_I2C create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TCG_TIS_I2C_CR50 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TCG_TIS_ST33ZP24_SPI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TCG_VTPM_PROXY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TCM_FC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TCM_QLA2XXX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TERANETICS_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_ASYNC_DRIVER_PROBE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_BITMAP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_BITOPS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_BLACKHOLE_DEV create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_DHRY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_DIV64 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_DYNAMIC_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_FIRMWARE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_FREE_PAGES create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_HEXDUMP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_IDA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_KMOD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_KSTRTOX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_LKM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_LOCKUP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_MAPLE_TREE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_MEMCAT_P create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_MEMINIT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_MIN_HEAP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_OBJAGG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_PARMAN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_POWER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_PRINTF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_REF_TRACKER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_RHASHTABLE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_SCANF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_STATIC_KEYS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_STRING_HELPERS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_SYSCTL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_UDELAY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_USER_COPY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_UUID create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_VMALLOC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_XARRAY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEXTSEARCH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEXTSEARCH_BM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEXTSEARCH_FSM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TEXTSEARCH_KMP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_THERMAL_EMULATION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_THERMAL_STATISTICS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_THP_SWAP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_THREAD_INFO_IN_TASK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_THRUSTMASTER_FF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TICK_ONESHOT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TIFM_CORE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_HX8357D create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ILI9163 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ILI9225 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ILI9341 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ILI9486 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_MI0283QT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_REPAPER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ST7586 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ST7735R create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC_CRYPTO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC_DIAG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC_MEDIA_IB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC_MEDIA_UDP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TI_ST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TMPFS_QUOTA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TOOLS_SUPPORT_RELR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TPS6105X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TPS65010 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TPS6507X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TRACEPOINT_BENCHMARK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TRACER_MAX_TRACE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_CLOCK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_EVAL_MAP_FILE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_EVENT_INJECT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_IRQFLAGS_SUPPORT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TRACING create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TRACING_SUPPORT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TREE_RCU create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TREE_SRCU create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TSNEP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TUN_VNET_CROSS_LE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TWL4030_CORE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TWL6040_CORE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_ANX7411 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_DP_ALTMODE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_HD3SS3220 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_MUX_FSA4480 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_MUX_GPIO_SBU create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_MUX_NB7VPQ904M create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_MUX_PI3USB30532 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_NVIDIA_ALTMODE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_RT1719 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_STUSB160X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_TCPCI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_TCPM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_TPS6598X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_UCSI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_WUSB3801 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_UACCE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_UBIFS_FS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_UCS2_STRING create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_UCSI_ACPI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_UCSI_CCG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_UCSI_STM32G0 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_UDMABUF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_UEFI_CPER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_UFS_FS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_UHID create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ULTRIX_PARTITION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_UNICODE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_UNINLINE_SPIN_UNLOCK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_UNIX_SCM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB4 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USBIP_CORE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USBPCWATCHDOG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ACM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ADUTUX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ALI_M5632 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_AN2720 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ANNOUNCE_NEW_DEVICES create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_APPLEDISPLAY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ARCH_HAS_HCD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ARMLINUX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ATM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_AUTOSUSPEND_DELAY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_BELKIN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_C67X00_HCD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CATC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CDNS_SUPPORT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CHIPIDEA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_COMMON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CONN_GPIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CXACRU create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CYPRESS_CY7C63 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CYTHERM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_DEFAULT_PERSIST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_DWC2 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_DWC3 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_DYNAMIC_MINORS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_FSL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_HCD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_PCI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_ROOT_HUB_TT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_TT_NEWSCHED create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHSET_TEST_FIXTURE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EMI26 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EMI62 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EPSON2888 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EZUSB_FX2 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_FEW_INIT_RETRIES create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_GADGET create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_GPIO_VBUS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HCD_BCMA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HCD_TEST_MODE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HID create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HIDDEV create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HSIC_USB3503 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HSIC_USB4604 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HSO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HUB_USB251XB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_IDMOUSE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_IOWARRIOR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_IPHETH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ISIGHTFW create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ISP116X_HCD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ISP1301 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ISP1760 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_KAWETH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_KC2190 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LAN78XX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LCD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LEDS_TRIGGER_USBPORT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LED_TRIG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LEGOTOWER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LINK_LAYER_TEST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MAX3421_HCD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MDC800 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MICROTEK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MUSB_HDRC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_AQC111 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_AX88179_178A create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_AX8817X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDCETHER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_EEM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_MBIM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_NCM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_SUBSET create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_SUBSET_ENABLE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CH9200 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CX82310_ETH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_DM9601 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_GL620A create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_HUAWEI_CDC_NCM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_INT51X1 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_KALMIA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_MCS7830 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_NET1080 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_PLUSB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_QMI_WWAN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_RNDIS_HOST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_SMSC75XX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_SMSC95XX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_SR9800 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_ZAURUS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OHCI_HCD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OHCI_HCD_PCI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OHCI_HCD_PLATFORM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OHCI_LITTLE_ENDIAN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OTG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OTG_PRODUCTLIST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OXU210HP_HCD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_PEGASUS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_PRINTER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_R8A66597_HCD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ROLE_SWITCH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_RTL8150 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_RTL8152 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_RTL8153_ECM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_AIRCABLE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_ARK3116 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_BELKIN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_CH341 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_CP210X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_CYBERJACK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_CYPRESS_M8 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_DIGI_ACCELEPORT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_EDGEPORT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_EDGEPORT_TI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_EMPEG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_F81232 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_F8153X create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_FTDI_SIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_GARMIN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_GENERIC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_IPAQ create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_IPW create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_IR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_IUU create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_KEYSPAN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_KEYSPAN_PDA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_KLSI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_KOBIL_SCT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_MCT_U232 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_METRO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_MOS7720 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_MOS7840 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_MXUPORT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_NAVMAN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_OMNINET create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_OPTICON create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_OPTION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_OTI6858 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_PL2303 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_QCAUX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_QT2 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_QUALCOMM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SAFE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SAFE_PADDED create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SIERRAWIRELESS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SPCP8X5 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SSU100 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SYMBOL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_TI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_UPD78F0730 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_VISOR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_WHITEHEAT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_WISHBONE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_WWAN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_XR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_XSENS_MT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SEVSEG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SIERRA_NET create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SISUSBVGA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SL811_HCD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_ALAUDA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_CYPRESS_ATACB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_DATAFAB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_ENE_UB6250 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_FREECOM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_ISD200 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_JUMPSHOT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_KARMA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_ONETOUCH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_REALTEK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_SDDR09 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_SDDR55 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_USBAT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_TEST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_TMC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_TRANCEVIBRATOR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_UAS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_UEAGLEATM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_USBNET create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_VL600 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_WDM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_XHCI_HCD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_XHCI_PCI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_XHCI_PCI_RENESAS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_XUSBATM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USB_YUREX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USERIO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USER_DECRYPTED_DATA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USER_EVENTS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_USE_PERCPU_NUMA_NODE_ID create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_VALIDATE_FS_PARSER create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_VFIO_IOMMU_TYPE1 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_VFIO_PCI_INTX create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_VFIO_PCI_MMAP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_VFIO_VIRQFD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_VHOST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_VHOST_CROSS_ENDIAN_LEGACY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_VHOST_IOTLB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_VHOST_TASK create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_VIDEO_CMDLINE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_VIDEO_NOMODESET create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_VIRTIO_ANCHOR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_VIRTIO_IOMMU create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_VIRT_CPU_ACCOUNTING create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_VITESSE_PHY create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_VLAN_8021Q create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_VLAN_8021Q_GVRP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_VLAN_8021Q_MVRP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_VT_CONSOLE_SLEEP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_VXFS_FS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_W1 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_WANXL create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_WATCH_QUEUE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_WDTPCI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_WERROR create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_WIRELESS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_WPCM450_SOC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_WQ_CPU_INTENSIVE_REPORT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_WQ_POWER_EFFICIENT_DEFAULT create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_WWAN create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_WW_MUTEX_SELFTEST create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_X25 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_XARRAY_MULTI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_AH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_ALGO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_ESP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_IPCOMP create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_OFFLOAD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_XFS_SUPPORT_ASCII_CI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_AXI_EMAC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_DMA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_EMACLITE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_GMII2RGMII create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_LL_TEMAC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_SDFEC create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_VCU create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_WATCHDOG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_XDMA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_XILLYBUS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_XILLYUSB create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_XXHASH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_ENE_TUNE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_O2 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_RICOH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_TI create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_TOSHIBA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ZEROPLUS_FF create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ZIIRAVE_WATCHDOG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ZLIB_DEFLATE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ZLIB_INFLATE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ZONEFS_FS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_60XX_WDT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ABP060MG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ACERHDF create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ACER_WIRELESS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ACER_WMI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_ADXL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_ALS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_CMPC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_CPU_FREQ_PSS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_DPTF create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_HOTPLUG_IOAPIC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_LEGACY_TABLES_LOOKUP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_LPIT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_PLATFORM_PROFILE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_PROCESSOR_CSTATE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_TOSHIBA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_WMI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ACRN_GUEST create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD2S1200 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD2S90 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD3552R create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD4130 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5064 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5110 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5272 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5360 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5380 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5421 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5446 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5449 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5504 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5592R create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5593R create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5624R_SPI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5686_SPI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5696_I2C create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5755 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5758 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5761 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5764 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5766 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5770R create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5791 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7091R5 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7124 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7150 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7192 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7266 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7280 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7291 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7292 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7293 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7298 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7303 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD74115 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD74413R create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7476 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7606_IFACE_PARALLEL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7606_IFACE_SPI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7746 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7766 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7768_1 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7780 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7791 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7793 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7887 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7923 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7949 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD799X create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD8366 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD8801 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD9523 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADA4250 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADDRESS_MASKING create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADF4350 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADF4371 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADF4377 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16080 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16130 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16136 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16201 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16209 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16260 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16400 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16460 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16475 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16480 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADJD_S311 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADMV1013 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADMV1014 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADMV4420 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADMV8818 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADRF6780 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADUX1020 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADVANTECH_EC_WDT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADV_SWBUTTON create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL313_I2C create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL313_SPI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL345_I2C create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL345_SPI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL355_I2C create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL355_SPI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL367_I2C create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL367_SPI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL372_I2C create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL372_SPI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXRS290 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXRS450 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AFE4403 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AFE4404 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AGP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AK09911 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AK8974 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AK8975 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AL3010 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AL3320A create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ALIM1535_WDT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ALTERA_STAPL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AM2315 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AMD_NB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AMD_PMC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AMD_PMF create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AMD_SFH_HID create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AMIGA_PARTITION create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AMILO_RFKILL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_APDS9300 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_APDS9802ALS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_APDS9960 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_APPLE_GMUX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_APPLE_PROPERTIES create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AQTION create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AR5523 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_CLOCKSOURCE_INIT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_ADD_PAGES create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_CC_PLATFORM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_CPU_FINALIZE_INIT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_CPU_RELAX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_EARLY_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_ELFCORE_COMPAT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_MEM_ENCRYPT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_PKEYS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MAY_HAVE_PC_FDC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MIGHT_HAVE_PC_SERIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MMAP_RND_BITS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MMAP_RND_BITS_MAX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MMAP_RND_BITS_MIN create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SPARSEMEM_DEFAULT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_CRASH_HOTPLUG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KEXEC_BZIMAGE_VERIFY_SIG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KEXEC_JUMP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KEXEC_SIG_FORCE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_USES_PG_UNCACHED create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_USE_BUILTIN_BSWAP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANT_GENERAL_HUGETLB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANT_OLD_COMPAT_IPC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AS3935 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AS73211 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_LAPTOP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_NB_WMI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_TF103C_DOCK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_WIRELESS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_WMI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_AVX512 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_GFNI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_SHA1_NI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_SHA256_NI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_TPAUSE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_WRUSS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_CE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_DEBUGFS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_PCI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_SDIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_TRACING create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_USB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH11K create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH12K create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH5K create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH5K_PCI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH6KL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_AHB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_BTCOEX_SUPPORT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_CHANNEL_CONTEXT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_COMMON create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_COMMON_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_COMMON_SPECTRAL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_DEBUGFS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_DYNACK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_HTC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_HTC_DEBUGFS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_HW create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_HWRNG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_PCI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_PCI_NO_EEPROM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_PCOEM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_RFKILL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_STATION_STATISTICS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_WOW create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH_COMMON create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATL2 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATLAS_EZO_SENSOR create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATLAS_PH_SENSOR create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ATP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AUDIT_ARCH create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_B43 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_B43LEGACY create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BACKLIGHT_APPLE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BACKLIGHT_GPIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BACKLIGHT_PWM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BACKLIGHT_SAHARA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BARCO_P50_GPIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_BE2 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_BE3 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_HWMON create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_LANCER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_SKYHAWK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BH1750 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BH1780 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BLK_CGROUP_IOLATENCY create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BLK_DEV_FD create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BLK_DEV_PMEM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BMA180 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BMA220 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BMA400 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BMC150_ACCEL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BMC150_MAGN_I2C create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BMC150_MAGN_SPI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BME680 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BMG160 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BMI088_ACCEL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BMI160_I2C create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BMI160_SPI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BMP280 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BNA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BOOT_VESA_SUPPORT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BOSCH_BNO055_I2C create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMDBG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_PCIE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_PROTO_BCDC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_PROTO_MSGBUF create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_SDIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_USB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMSMAC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMSMAC_LEDS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMUTIL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCM_TRACING create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_6LOWPAN create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_AOSPEXT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_ATH3K create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BCM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BNEP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BNEP_MC_FILTER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BNEP_PROTO_FILTER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BREDR create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_CMTP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_DEBUGFS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBCM203X create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBCM4377 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBFUSB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBPA10X create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTSDIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_AUTOSUSPEND create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_BCM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_MTK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_POLL_SYNC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_RTL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_AG6XX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_ATH3K create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_BCSP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_H4 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_INTEL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIVHCI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HIDP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_INTEL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_LE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_LEDS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_LE_L2CAP_ECRED create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_MRVL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_MRVL_SDIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_MSFTEXT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_MTKSDIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_RFCOMM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_RFCOMM_TTY create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_RTL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_SELFTEST create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_VIRTIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_BUILDTIME_MCOUNT_SORT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CALL_PADDING create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CALL_THUNKS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CALL_THUNKS_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CAPI_TRACE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CARL9170 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CCS811 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_ENTRY_PADDING create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_IBT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_RETURN_THUNK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_SANE_STACKPROTECTOR create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_SLS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CEC_CH7322 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CEC_GPIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CEC_SECO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CHROME_PLATFORMS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CLKBLD_I8253 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CLKEVT_I8253 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CLOCKSOURCE_WATCHDOG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CM32181 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CM3232 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CM3323 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CM3605 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CM36651 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CMDLINE_BOOL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_COMPAL_LAPTOP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_COMPAT_32 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_COMPAT_FOR_U64_ALIGNMENT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CPA_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CPU5_WDT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CPU_SUP_CENTAUR create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CRASH_MAX_MEMORY_RANGES create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CRC64 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CRC64_ROCKSOFT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARIA_AESNI_AVX2_X86_64 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARIA_GFNI_AVX512_X86_64 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_CRC64_ROCKSOFT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_PADLOCK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_PADLOCK_AES create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_PADLOCK_SHA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_QAT_4XXX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_LIB_POLY1305_RSIZE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_POLYVAL_CLMUL_NI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_CX_ECAT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DA280 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DA311 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DCA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DEBUG_ENTRY create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DEBUG_NMI_SELFTEST create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DEBUG_TLBFLUSH create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DEV_COREDUMP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DHT11 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DLHL60D create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DMARD06 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DMARD09 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DMARD10 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DMAR_TABLE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DMA_VIRTUAL_CHANNELS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DPOT_DAC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DPS310 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_DP_CEC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_HYPERV create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_I2C_NXP_TDA998X create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_I2C_SIL164 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_I915_PREEMPT_TIMEOUT_COMPUTE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_I915_REQUEST_TIMEOUT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_MIPI_DSI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_PRIVACY_SCREEN create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_VBOXVIDEO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_VMWGFX_MKSSTATS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_XEN_FRONTEND create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DS1803 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DS4424 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DW_DMAC_CORE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DYNAMIC_FTRACE_WITH_REGS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DYNAMIC_MEMORY_LAYOUT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_EARLY_PRINTK_USB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_EBC_C384_WDT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_ATOMIC_SCRUB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_IE31200 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_IGEN6 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_PND2 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_X38 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_EDD_OFF create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_EEEPC_LAPTOP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_EEEPC_WMI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_EFI_DXE_MEM_ATTRIBUTES create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_EFI_FAKE_MEMMAP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_EFI_HANDOVER_PROTOCOL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_EFI_PGT_DUMP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_EISA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ENIC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ENVELOPE_DETECTOR create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_EUROTECH_WDT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_EXAR_WDT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_F71808E_WDT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_ARC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_HGA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_LE80578 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_N411 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_SM501 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_VGA16 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_VIA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_FIREWIRE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_FIREWIRE_NET create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_FIREWIRE_OHCI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_FIREWIRE_SBP2 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_FPROBE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_FS_MBCACHE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_FTRACE_MCOUNT_USE_CC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_FTRACE_SORT_STARTUP_TEST create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_FUJITSU_ES create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_FUJITSU_LAPTOP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_FUJITSU_TABLET create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_FUNCTION_ALIGNMENT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_FUNCTION_ALIGNMENT_16B create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_FUNCTION_PADDING_BYTES create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_FUNCTION_PADDING_CFI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_FW_LOADER_USER_HELPER_FALLBACK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_FXAS21002C create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_FXLS8962AF_I2C create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_FXLS8962AF_SPI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_FXOS8700_I2C create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_FXOS8700_SPI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_GDS_FORCE_MITIGATION create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_ADC_BATTERY create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_CMOS_UPDATE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_ENTRY create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_IOMAP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_IRQ_RESERVATION_MODE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_GIGABYTE_WMI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_GP2AP002 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_GP2AP020A00F create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_GPD_POCKET_FAN create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_AMD8111 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_DWAPB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_ELKHARTLAKE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_F7188X create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_ICH create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_IT87 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_ML_IOH create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_SCH create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_SCH311X create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_VIPERBOARD create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_VX855 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_WINBOND create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_WS16C48 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HARDLOCKUP_CHECK_TIMESTAMP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ACPI_APEI_NMI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_KCSAN create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_KMSAN create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_NODE_DEV_GROUP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_SOFT_DIRTY create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_USERFAULTFD_WP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_BOOTMEM_INFO_NODE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_BUILDTIME_MCOUNT_SORT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_CALL_THUNKS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_DYNAMIC_FTRACE_NO_PATCHABLE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_EISA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_EXIT_THREAD create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_FENTRY create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_INTEL_TXT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_JUMP_LABEL_HACK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_BZIP2 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_GZIP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_LZ4 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_LZMA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_LZO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_XZ create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_ZSTD create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KPROBES_ON_FTRACE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KVM_DIRTY_RING_TSO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KVM_NO_POLL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KVM_PFNCACHE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KVM_PM_NOTIFIER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_LIVEPATCH create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_MIXED_BREAKPOINTS_REGS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_MMIOTRACE_SUPPORT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_NOINSTR_HACK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_NOINSTR_VALIDATION create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_OBJTOOL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_OBJTOOL_MCOUNT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_OBJTOOL_NOP_MCOUNT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_OPTPROBES create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_PCSPKR_PLATFORM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_PREEMPT_DYNAMIC_CALL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RELIABLE_STACKTRACE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RETHOOK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RUST create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STACK_VALIDATION create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STATIC_CALL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STATIC_CALL_INLINE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_UACCESS_VALIDATION create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_UNSTABLE_SCHED_CLOCK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_USER_RETURN_NOTIFIER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HDC100X create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HDC2010 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HFI1_DEBUG_SDMA_ORDER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HI8435 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_ALPS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_ASUS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_CMEDIA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_HYPERV_MOUSE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_NVIDIA_SHIELD create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_PICOLCD_CIR create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_ACCEL_3D create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_ALS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_CUSTOM_SENSOR create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_DEVICE_ROTATION create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_GYRO_3D create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_HUB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_HUMIDITY create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_IIO_COMMON create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_IIO_TRIGGER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_INCLINOMETER_3D create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_MAGNETOMETER_3D create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_PRESS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_PROX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_TEMP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HMC425 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_CORE_SYNC_FULL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_PARALLEL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_PCI_SHPC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_SMT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_SPLIT_STARTUP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HP03 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HP206C create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HPET_EMULATE_RTC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HPET_MMAP_DEFAULT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HPWDT_NMI_DECODING create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HP_ILO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HP_WATCHDOG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HSU_DMA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HTS221 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HTU21 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HUAWEI_WMI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HVC_IRQ create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HVC_XEN create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HVC_XEN_FRONTEND create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HWMON_VID create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HW_RANDOM_VIA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HW_RANDOM_VIRTIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HX711 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HYPERV_NET create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HYPERV_TESTING create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HYPERV_TIMER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HYPERV_VTL_MODE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_AMD756 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_AMD756_S4882 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_AMD8111 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_DESIGNWARE_AMDPSP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_DESIGNWARE_BAYTRAIL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_GPIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_HELPER_AUTO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_I801 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_ISCH create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_ISMT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_MLXCPLD create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_MUX_GPIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_MUX_PCA9541 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_MUX_PCA954x create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_NFORCE2_S4985 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_PARPORT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_PIIX4 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_SCMI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_SIS96X create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_VIA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_VIAPRO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_VIPERBOARD create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_ZHAOXIN create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_ZHAOXIN_SMBUS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_I8253_LOCK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IAQCORE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IB700_WDT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IBMASR create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IBM_ASM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IBM_RTL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ICE_HWTS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ICP10100 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IDEAPAD_LAPTOP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IDLE_INJECT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IE6XX_WDT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IEEE802154_FAKELB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER_CB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER_DMA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER_DMAENGINE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER_HW_CONSUMER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_CONFIGFS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_CONSUMERS_PER_TRIGGER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_INTERRUPT_TRIGGER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_KFIFO_BUF create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_KX022A_I2C create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_KX022A_SPI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_MUX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_RESCALE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_SSP_SENSORHUB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_ACCEL_3AXIS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_GYRO_3AXIS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_LSM6DSX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_LSM9DS0 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_MAGN_3AXIS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_PRESS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_SW_DEVICE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_SW_TRIGGER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_SYSFS_TRIGGER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_TRIGGER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_TRIGGERED_BUFFER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_TRIGGERED_EVENT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INA2XX_ADC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_HFI1 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_QIB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_USNIC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_VMWARE_PVRDMA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_AD714X create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_ADXL34X create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_APANEL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_ATI_REMOTE2 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_ATLAS_BTNS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_BMA150 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_CM109 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_CMA3000 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_DA7280_HAPTICS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_DRV260X_HAPTICS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_DRV2665_HAPTICS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_DRV2667_HAPTICS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_E3X0_BUTTON create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_GPIO_BEEPER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_GPIO_DECODER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_GPIO_ROTARY_ENCODER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_GPIO_VIBRA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IDEAPAD_SLIDEBAR create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IMS_PCU create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IQS269A create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IQS626A create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IQS7222 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_JOYDEV create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_KEYSPAN_REMOTE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_KXTJ9 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_MISC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_MMA8450 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_PCF8574 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_PCSPKR create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_POWERMATE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_PWM_BEEPER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_PWM_VIBRA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_TABLET create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_TOUCHSCREEN create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_UINPUT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_VIVALDIFMAP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_XEN_KBDDEV_FRONTEND create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_YEALINK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_ATOMISP2_PM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_GTT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_HID_EVENT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IDXD_COMPAT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IFS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_INT0002_VGPIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IOMMU_DEFAULT_ON create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IOMMU_FLOPPY_WA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IPS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_ISHTP_ECLITE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_ISH_HID create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_LDMA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_GSC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_GSC_PROXY create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_HDCP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_PXP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_TXE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_OAKTRAIL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_PUNIT_IPC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SAR_INT1092 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SCU_PCI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SCU_PLATFORM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SDSI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SMARTCONNECT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SOC_DTS_IOSF_CORE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SOC_DTS_THERMAL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_TCC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_TCC_COOLING create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_UNCORE_FREQ_CONTROL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_VBTN create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_WMI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_WMI_SBL_FW_UPDATE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_WMI_THUNDERBOLT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTERVAL_TREE_SPAN_ITER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INV_ICM42600_I2C create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INV_ICM42600_SPI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INV_MPU6050_I2C create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INV_MPU6050_SPI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IOMMUFD create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IOMMUFD_DRIVER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IOSF_MBI_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IO_DELAY_0XED create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IO_DELAY_NONE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IO_DELAY_UDELAY create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IPU_BRIDGE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IPW2100 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IPW2200 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IRQ_BYPASS_MANAGER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IRSD200 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_ENE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_FINTEK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IGORPLUGUSB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IGUANA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IMON create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IMON_DECODER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IMON_RAW create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_ITE_CIR create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_JVC_DECODER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_MCEUSB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_MCE_KBD_DECODER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_NEC_DECODER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_NUVOTON create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_RC5_DECODER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_RC6_DECODER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_RCMM_DECODER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_REDRAT3 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_SANYO_DECODER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_SERIAL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_SONY_DECODER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_STREAMZAP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_TOY create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_TTUSBIR create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_WINBOND_CIR create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ISDN create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ISDN_CAPI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ISDN_CAPI_MIDDLEWARE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ISL29003 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ISL29020 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ISL29125 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ISL29501 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IT8712F_WDT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IT87_WDT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ITG3200 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IWL3945 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IWL4965 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLDVM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLMVM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_DEBUGFS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_DEVICE_TRACING create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_LEDS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_OPMODE_MODULAR create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_JAILHOUSE_GUEST create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_JSA1212 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_KALLSYMS_ABSOLUTE_PERCPU create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_KARMA_PARTITION create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_KEYBOARD_APPLESPI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_KEYBOARD_GPIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_KMX61 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_KRETPROBE_ON_RETHOOK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_ASYNC_PF create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_COMPAT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_EXTERNAL_WRITE_TRACKING create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_SMM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_XEN create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_KXCJK1013 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_KXSD9 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_APU create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_CLASS_FLASH create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_INTEL_SS4200 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_LT3593 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_MLXCPLD create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_NIC78BX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_TRIGGER_AUDIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_TRIGGER_DISK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_LEGACY_VSYSCALL_XONLY create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_LENOVO_YMC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_LG_LAPTOP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_LIBERTAS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_LIBERTAS_THINFIRM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_LIBNVDIMM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_LIDAR_LITE_V2 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_LIRC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_LMP91000 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_LPC_ICH create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_LPC_SCH create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_LP_CONSOLE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC1660 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2471 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2485 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2496 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2497 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2632 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2688 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2983 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_LTR501 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_LTRF216A create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_LV0104CS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_M62332 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MAC80211_HWSIM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MACHZ_WDT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MACINTOSH_DRIVERS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MAC_EMUMOUSEBTN create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MAC_PARTITION create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MAG3110 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MATOM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX1027 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX11100 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX1118 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX11205 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX11410 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX1241 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX1363 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX30100 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX30102 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX30208 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX31856 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX31865 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX44000 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX44009 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX517 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5432 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5481 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5487 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5522 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5821 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX9611 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MAXIM_THERMOCOUPLE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MB1232 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MC3230 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP320X create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP3422 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP3911 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4018 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP41010 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4131 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4531 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4725 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4728 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4922 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MDIO_GPIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_ANALOG_TV_SUPPORT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_CAMERA_SUPPORT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_CEC_RC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_CEC_SUPPORT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_DIGITAL_TV_SUPPORT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_HIDE_ANCILLARY_SUBDRV create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_PCI_SUPPORT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_PLATFORM_SUPPORT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_RADIO_SUPPORT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_SDR_SUPPORT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_SUBDRV_AUTOSELECT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_SUPPORT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_SUPPORT_FILTER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_TEST_SUPPORT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_USB_SUPPORT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MEMSTICK_REALTEK_PCI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MEMSTICK_REALTEK_USB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MERAKI_MX100 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_CORE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_LPSS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_LPSS_ACPI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_LPSS_PCI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_PMC_BXT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_QUARK_I2C_GPIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_SM501 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_SM501_GPIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_SYSCON create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_VIPERBOARD create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_VX855 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MICREL_KS8995MA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MICROCODE_LATE_LOADING create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MICROSOFT_MANA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MINIX_SUBPARTITION create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MISC_RTSX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MISC_RTSX_PCI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MISC_RTSX_USB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_AVMFRITZ create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_DSP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_HDLC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_HFCMULTI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_HFCPCI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_HFCUSB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_INFINEON create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_IPAC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_ISAR create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_L1OIP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_NETJET create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_SPEEDFAX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_W6692 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MK8 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MLX90614 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MLX90632 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MLXREG_IO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MLXREG_LC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MLX_WDT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA7455_I2C create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA7455_SPI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA7660 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA8452 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA9551 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA9553 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC35240 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MMCONF_FAM10H create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_MTK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_REALTEK_PCI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_REALTEK_USB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_TOSHIBA_PCI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_WBSD create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MMIOTRACE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MMU_GATHER_MERGE_VMAS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_APPLETOUCH create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_BCM5974 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_CYAPA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_ELAN_I2C_SMBUS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_ALPS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_BYD create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_CYPRESS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_ELANTECH create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_ELANTECH_SMBUS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_FOCALTECH create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_LIFEBOOK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_LOGIPS2PP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_SENTELIC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_SMBUS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_SYNAPTICS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_TOUCHKIT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_TRACKPOINT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_VMMOUSE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_SERIAL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_VSXXXAA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MPL115_I2C create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MPL115_SPI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MPL3115 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MPRLS0025PA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MPSC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MPU3050_I2C create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MS5611 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MS5637 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MSA311 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MSI_EC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MSI_LAPTOP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MSI_WMI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7601U create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7603E create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7615E create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7663S create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7663U create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76_CORE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76_LEDS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76_USB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x02_LIB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x02_USB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x0E create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x0U create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x0_COMMON create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x2E create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x2U create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x2_COMMON create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7915E create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7921E create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7921S create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7921U create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7996E create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MTD_CFI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MWAVE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MWIFIEX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MWIFIEX_PCIE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MWIFIEX_SDIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MWIFIEX_USB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MWL8K create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MXC4005 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MXC6255 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MXM_WMI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MYRI10GE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_MYRI10GE_DCA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_NAU7802 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ND_BTT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ND_PFN create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_AMD create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_BROCADE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_CISCO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_DEC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_EMULEX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_QUALCOMM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_NI903X_WDT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_NIC7018_WDT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_NITRO_ENCLAVES create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_NMI_CHECK_CPU create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_NOA1305 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_NOZOMI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_NTB_AMD create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_NTB_INTEL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_NVIDIA_WMI_EC_BACKLIGHT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_NVSW_SN2201 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_OBJTOOL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_OF create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_OPT3001 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_OPT4001 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_OSF_PARTITION create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_OUTPUT_FORMAT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_P2SB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PA12203001 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PANASONIC_LAPTOP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PANEL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PARAVIRT_CLOCK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PARAVIRT_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_1284 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_NOT_PC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_PC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_PC_FIFO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_PC_SUPERIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_SERIAL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PATA_PARPORT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PC87413_WDT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PCENGINES_APU2 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PCI_DIRECT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PCI_LOCKLESS_CONFIG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PCI_XEN create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PHY_CPCAP_USB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PHY_INTEL_LGM_EMMC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_ALDERLAKE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_BAYTRAIL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_BROXTON create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_CANNONLAKE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_CEDARFORK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_CHERRYVIEW create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_DENVERTON create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_ELKHARTLAKE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_EMMITSBURG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_GEMINILAKE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_ICELAKE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_INTEL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_JASPERLAKE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_LAKEFIELD create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_LEWISBURG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_LYNXPOINT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_METEORLAKE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_SUNRISEPOINT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_TIGERLAKE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PING create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PLFXLC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PLIP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PMIC_OPREGION create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PNP_DEBUG_MESSAGES create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_POWER_RESET_RESTART create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PPDEV create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PPS_CLIENT_PARPORT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PREFIX_SYMBOLS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PRINTER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PROC_PID_ARCH_STATUS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PROVIDE_OHCI1394_DMA_INIT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PTP_1588_CLOCK_KVM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PTP_1588_CLOCK_VMW create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PUNIT_ATOM_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PVH create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PWM_LPSS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PWM_LPSS_PCI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PWM_LPSS_PLATFORM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_QCOM_HIDMA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_QCOM_HIDMA_MGMT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_QTNFMAC_PCIE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_ATI_REMOTE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_CORE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_DECODERS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_DEVICES create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_LOOPBACK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_MAP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_XBOX_DVD create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_REGULATOR create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RETHOOK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RFD77402 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RFKILL_GPIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RICHTEK_RTQ6056 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RMI4_F34 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RMI4_SPI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ROHM_BU27008 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ROHM_BU27034 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RPR0521 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2400PCI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2500PCI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2500USB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI_RT3290 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI_RT33XX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI_RT35XX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI_RT53XX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT33XX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT3573 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT35XX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT53XX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT55XX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_UNKNOWN create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800_LIB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800_LIB_MMIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_CRYPTO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_DEBUGFS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_FIRMWARE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_LEDS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_MMIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_PCI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_USB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RT61PCI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RT73USB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_ABB5ZES3 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_ABX80X create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1305 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1343 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1347 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1374_WDT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1390 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1685_FAMILY create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_HID_SENSOR_TIME create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_M41T93 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_M41T94 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_MAX6902 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_MCP795 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_PCF2123 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_PCF2127 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_PCF85063 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_R9701 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_RS5C348 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_RX4581 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_RX8010 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_MC146818_LIB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8180 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8187 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8188EE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192CE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192CU create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192C_COMMON create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192DE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192EE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192SE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8723AE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8723BE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8723_COMMON create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8821AE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8XXXU create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8XXXU_UNTESTED create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLBTCOEXIST create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLWIFI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLWIFI_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLWIFI_PCI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLWIFI_USB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL_CARDS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8723DE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8723DS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8723DU create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8821CE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8821CS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8821CU create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822B create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822BE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822BS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822BU create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822C create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822CE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822CS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822CU create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_CORE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_DEBUGFS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_PCI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW89 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SAMSUNG_LAPTOP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SAMSUNG_Q10 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SATA_ZHAOXIN create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SBC_EPX_C3_WATCHDOG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SBC_FITPC2_WATCHDOG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SBP_TARGET create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SC1200_WDT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SCA3000 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SCA3300 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SCD30_CORE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SCD4X create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_AACRAID create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_BNX2X_FCOE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_BNX2_ISCSI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_IMM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_IPR create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_ISCI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_PPA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SDMA_VERBOSITY create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SD_ADC_MODULATOR create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SEL3350_PLATFORM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSEAIR_SUNRISE_CO2 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSIRION_SGP30 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSIRION_SGP40 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ABITUGURU create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ABITUGURU3 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ACPI_POWER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_AD7314 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_AD7414 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_AD7418 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADC128D818 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADCXX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1025 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1026 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1029 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1031 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1275 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM9240 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADS7828 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADS7871 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7410 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7411 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7462 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7470 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7475 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7X10 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_AMC6821 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_APDS990X create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_APPLESMC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ASB100 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ASC7621 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ASUS_EC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ASUS_WMI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ATK0110 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ATXP1 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_BH1770 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_CORETEMP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_DELL_SMM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_DME1737 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_DS1621 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_DS620 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_EMC1403 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_EMC6W201 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_F71805F create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_F71882FG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_F75375S create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_FAM15H_POWER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_FSCHMD create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_G760A create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_G762 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_GL518SM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_GL520SM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_HDAPS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_HMC5843_I2C create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_HMC5843_SPI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_HP_WMI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_I5500 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_I5K_AMB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_IBMAEM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_IBMPEX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_IIO_HWMON create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_INA209 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_INA2XX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ISL29018 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ISL29028 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_IT87 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_JC42 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_K10TEMP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_K8TEMP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LINEAGE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LIS3LV02D create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LIS3_I2C create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM25066 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM63 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM70 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM73 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM75 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM77 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM78 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM80 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM83 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM85 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM87 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM90 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM92 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM93 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM95234 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM95241 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM95245 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC2945 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC2978 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC3815 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4151 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4215 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4222 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4245 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4260 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4261 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX1111 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX16064 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX16065 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX1619 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX1668 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX197 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX20751 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX31790 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX34440 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX6639 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX6650 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX6697 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX8688 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MCP3021 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MLXREG_FAN create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT6683 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT6775 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT6775_CORE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT7802 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT7904 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NTC_THERMISTOR create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_OXP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_PC87360 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_PC87427 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_PCF8591 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_PMBUS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_POWR1220 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_RM3100_I2C create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_RM3100_SPI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SCH5627 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SCH5636 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SCH56XX_COMMON create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SHT15 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SHT21 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SHTC1 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SIS5595 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SMSC47B397 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SMSC47M1 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SMSC47M192 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TC74 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_THMC50 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TMP102 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TMP103 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TMP401 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TMP421 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TPS40422 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TSL2550 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TSL2563 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_UCD9000 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_UCD9200 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_VIA686A create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_VIA_CPUTEMP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_VT1211 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_VT8231 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83627EHF create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83627HF create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83781D create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83791D create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83792D create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83793 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83795 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83795_FANCTRL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83L785TS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83L786NG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_XGENE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ZL6100 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_8250_16550A_VARIANTS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_8250_LPSS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_8250_MID create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_8250_RT288X create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_ARC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_ARC_NR_PORTS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_JSM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_LANTIQ create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_MULTI_INSTANTIATE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIO_CT82C710 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIO_PARKBD create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC_MCDI_LOGGING create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC_MCDI_MON create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC_MTD create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC_SRIOV create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SGI_GRU create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SGI_GRU_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SGI_PARTITION create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SGI_XP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SI1133 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SI1145 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SI7005 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SI7020 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SIEMENS_SIMATIC_IPC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SMSC37B787_WDT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SMSC_SCH311X_WDT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SOLARIS_X86_PARTITION create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SONYPI_COMPAT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SONY_LAPTOP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SPI_BUTTERFLY create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SPI_LANTIQ_SSC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SPI_LM70_LLP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SPS30_I2C create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SRF04 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SRF08 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_STK3310 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_STK8312 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_STK8BA50 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ST_UVIS25 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SUN_PARTITION create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SURFACE3_WMI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SX9310 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SX9324 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SX9360 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SX9500 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SYSTEM76_ACPI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SYS_HYPERVISOR create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_T5403 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_SERIAL_WACOM4 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_ACECAD create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_AIPTEK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_HANWANG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_KBTAB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_PEGASUS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_I2C_ATMEL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_I2C_INFINEON create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_I2C_NUVOTON create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_SPI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_ST33ZP24 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_ST33ZP24_I2C create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_XEN create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TCS3414 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TCS3472 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TELCLOCK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TEST_CLOCKSOURCE_WATCHDOG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TEST_FPU create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TEST_HMM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_THERMAL_ACPI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_THERMAL_DEFAULT_GOV_BANG_BANG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_DEBUGFACILITIES create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_HOTKEY_POLL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_UNSAFE_LEDS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_VIDEO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_LMI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_THUNDER_NIC_BGX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_THUNDER_NIC_PF create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_THUNDER_NIC_RGX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_THUNDER_NIC_VF create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TIFM_7XX1 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC081C create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC0832 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC084S021 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC108S102 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC12138 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC128S052 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC161S626 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS1015 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS1100 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS124S08 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS131E08 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS7924 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS7950 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS8344 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS8688 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_DAC082S085 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_DAC5571 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_DAC7311 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_DAC7612 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_LMP92064 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_TLC4541 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_TMAG5273 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_TSC2046 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TMP006 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TMP007 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TMP117 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOPSTAR_LAPTOP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOSHIBA_BT_RFKILL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOSHIBA_HAPS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOSHIBA_WMI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_AD7877 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_AD7879 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ADS7846 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ATMEL_MXT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_AUO_PIXCIR create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_BU21013 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_BU21029 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_COLIBRI_VF50 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CY8CTMA140 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CY8CTMG110 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CYTTSP4_CORE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CYTTSP5 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CYTTSP_CORE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_DYNAPRO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EDT_FT5X06 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EETI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EGALAX_SERIAL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EKTF2127 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ELAN create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ELO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EXC3000 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_FUJITSU create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_GOODIX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_GUNZE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HAMPSHIRE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HIDEEP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HIMAX_HX83112B create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HYCON_HY46XX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ILI210X create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ILITEK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_IMAGIS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_INEXIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_IQS5XX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_IQS7211 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MAX11801 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MCS5000 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MELFAS_MIP4 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MMS114 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MSG2638 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MTOUCH create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_NOVATEK_NVT_TS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_PENMOUNT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_PIXCIR create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_RM_TS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ROHM_BU21023 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_S6SY761 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_SILEAD create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_SIS_I2C create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ST1232 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_STMFTS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_SURFACE3_SPI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_SX8654 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TOUCHIT213 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TOUCHRIGHT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TOUCHWIN create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TPS6507X create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TSC2004 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TSC2005 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TSC2007 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TSC_SERIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_USB_COMPOSITE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_WACOM_I2C create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_WACOM_W8001 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_WDT87XX_I2C create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ZET6223 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ZFORCE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ZINITIX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TPL0102 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TQMX86_WDT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TSL2583 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TSL2591 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TSL2772 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TSL4531 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TSYS01 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TSYS02D create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TYPEC_FUSB302 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_UEFI_CPER_X86 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_UNIXWARE_DISKLABEL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_US5182D create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_CHAOSKEY create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_EHCI_HCD_PLATFORM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_LGM_PHY create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_NET_RNDIS_WLAN create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_NET_SR9700 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_PULSE8_CEC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_RAINSHADOW_CEC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_ROLES_INTEL_XHCI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_SERIAL_CONSOLE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_SERIAL_MOS7715_PARPORT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_SERIAL_SIMPLE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_SPEEDTOUCH create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_UHCI_HCD create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_ULPI_BUS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_USS720 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_XEN_HCD create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_XHCI_DBGCAP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_XHCI_PLATFORM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_USER_RETURN_NOTIFIER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_USER_STACKTRACE_SUPPORT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_UV_MMTIMER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_UV_SYSFS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_VCNL3020 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_VCNL4000 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_VCNL4035 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_VEML6030 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_VEML6070 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_VF610_ADC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_VF610_DAC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_VFIO_DEVICE_CDEV create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_VFIO_PCI_VGA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_VIA_WDT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_VIPERBOARD_ADC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_VIRT_WIFI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_VL53L0X_I2C create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_VL6180 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_VMAP_PFN create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_VMGENID create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_VZ89X create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_W83627HF_WDT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_W83877F_WDT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_W83977F_WDT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_WAFER_WDT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_WANT_DEV_COREDUMP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_WCN36XX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_WFX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_WIL6210 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_WILC1000_SDIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_WILC1000_SPI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_WINMATE_FM07_KEYS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_WIRELESS_HOTKEY create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ADMTEK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ATH create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ATMEL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_BROADCOM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_CISCO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_INTEL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_INTERSIL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_MARVELL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_MEDIATEK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_MICROCHIP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_PURELIFI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_QUANTENNA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_RALINK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_REALTEK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_RSI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_SILABS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ST create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_TI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ZYDAS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_WMI_BMOF create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_ACPI_CPUFREQ_CPB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_AMD_PSTATE_UT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_CET create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_CMOV create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_DEBUGCTLMSR create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_HV_CALLBACK_VECTOR create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_INTERNODE_CACHE_SHIFT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_L1_CACHE_SHIFT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_MCE_THRESHOLD create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_MEM_ENCRYPT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_MINIMUM_CPU_FAMILY create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_NEED_RELOCS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_PLATFORM_DRIVERS_DELL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_PLATFORM_DRIVERS_HP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_PMEM_LEGACY create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_PMEM_LEGACY_DEVICE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_POWERNOW_K8 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_SPEEDSTEP_CENTRINO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_SPEEDSTEP_LIB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_THERMAL_VECTOR create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_USER_SHADOW_STACK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_VMX_FEATURE_NAMES create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_X32_ABI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_X9250 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_XENFS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_ACPI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_AUTO_XLATE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_BACKEND create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_BALLOON create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_BLKDEV_FRONTEND create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_COMPAT_XENFS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_DEBUG_FS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_DEV_EVTCHN create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_EFI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_FBDEV_FRONTEND create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_GNTDEV create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_GRANT_DEV_ALLOC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_GRANT_DMA_ALLOC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PRIVCMD create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PV create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PVCALLS_FRONTEND create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PVH create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PVHVM_GUEST create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PVHVM_SMP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_SAVE_RESTORE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_SCSI_FRONTEND create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_SYS_HYPERVISOR create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_UNPOPULATED_ALLOC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_VIRTIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_WDT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_XENBUS_FRONTEND create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_XIAOMI_WMI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_XILINX_XADC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_YAMAHA_YAS530 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_YOGABOOK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ZOPT2201 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ZPA2326 create mode 100644 anolis/configs/custom-overrides/64k/arm64.config create mode 100644 anolis/configs/custom-overrides/debug/arm64.config create mode 100644 anolis/configs/custom-overrides/debug/default.config create mode 100644 anolis/configs/custom-overrides/debug/x86.config create mode 100644 anolis/configs/custom-overrides/gcov/default.config create mode 100644 anolis/configs/custom-overrides/kvm_modulize/arm64.config create mode 100644 anolis/configs/metadata/changelog/CONFIG_LSM create mode 100644 anolis/configs/metadata/changelog/CONFIG_PREEMPT_VOLUNTARY create mode 100644 anolis/configs/metadata/changelog/CONFIG_VIRT_PLAT_DEV diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARCH_MMAP_RND_COMPAT_BITS b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARCH_MMAP_RND_COMPAT_BITS new file mode 100644 index 000000000000..2925d2a06226 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARCH_MMAP_RND_COMPAT_BITS @@ -0,0 +1 @@ +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=11 diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARCH_PHYTIUM b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARCH_PHYTIUM new file mode 100644 index 000000000000..d49f05a3cba1 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARCH_PHYTIUM @@ -0,0 +1 @@ +CONFIG_ARCH_PHYTIUM=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64 b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64 new file mode 100644 index 000000000000..e40393f9ae82 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64 @@ -0,0 +1 @@ +CONFIG_ARM64=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_16K_PAGES b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_16K_PAGES new file mode 100644 index 000000000000..517a9e44ba43 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_16K_PAGES @@ -0,0 +1 @@ +# CONFIG_ARM64_16K_PAGES is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_4K_PAGES b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_4K_PAGES new file mode 100644 index 000000000000..5df91df1aa06 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_4K_PAGES @@ -0,0 +1 @@ +CONFIG_ARM64_4K_PAGES=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_64K_PAGES b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_64K_PAGES new file mode 100644 index 000000000000..c63a3faadd97 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_64K_PAGES @@ -0,0 +1 @@ +# CONFIG_ARM64_64K_PAGES is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_BTI b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_BTI new file mode 100644 index 000000000000..f2b5fedbf279 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_BTI @@ -0,0 +1 @@ +# CONFIG_ARM64_BTI is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_CNP b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_CNP new file mode 100644 index 000000000000..09a40aa6394a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_CNP @@ -0,0 +1 @@ +CONFIG_ARM64_CNP=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_E0PD b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_E0PD new file mode 100644 index 000000000000..cba9bf0b8cd0 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_E0PD @@ -0,0 +1 @@ +CONFIG_ARM64_E0PD=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_HW_AFDBM b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_HW_AFDBM new file mode 100644 index 000000000000..4fc9f03d7411 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_HW_AFDBM @@ -0,0 +1 @@ +CONFIG_ARM64_HW_AFDBM=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_MPAM b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_MPAM new file mode 100644 index 000000000000..45957b7b4ea2 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_MPAM @@ -0,0 +1 @@ +CONFIG_ARM64_MPAM=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_MTE b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_MTE new file mode 100644 index 000000000000..69b7778d449b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_MTE @@ -0,0 +1 @@ +CONFIG_ARM64_MTE=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_PAN b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_PAN new file mode 100644 index 000000000000..ac8c85ac7fc2 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_PAN @@ -0,0 +1 @@ +CONFIG_ARM64_PAN=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_PSEUDO_NMI b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_PSEUDO_NMI new file mode 100644 index 000000000000..9a822122078c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_PSEUDO_NMI @@ -0,0 +1 @@ +CONFIG_ARM64_PSEUDO_NMI=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_PTR_AUTH b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_PTR_AUTH new file mode 100644 index 000000000000..15cf70dcbed3 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_PTR_AUTH @@ -0,0 +1 @@ +# CONFIG_ARM64_PTR_AUTH is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_RAS_EXTN b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_RAS_EXTN new file mode 100644 index 000000000000..b664a0de1b4c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_RAS_EXTN @@ -0,0 +1 @@ +CONFIG_ARM64_RAS_EXTN=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_SVE b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_SVE new file mode 100644 index 000000000000..cbb647e2703a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_SVE @@ -0,0 +1 @@ +CONFIG_ARM64_SVE=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_TLB_RANGE b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_TLB_RANGE new file mode 100644 index 000000000000..b34bf805a72d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_TLB_RANGE @@ -0,0 +1 @@ +CONFIG_ARM64_TLB_RANGE=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_USE_LSE_ATOMICS b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_USE_LSE_ATOMICS new file mode 100644 index 000000000000..bb1ab4cb28e5 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_USE_LSE_ATOMICS @@ -0,0 +1 @@ +CONFIG_ARM64_USE_LSE_ATOMICS=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_VA_BITS_39 b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_VA_BITS_39 new file mode 100644 index 000000000000..085f98ecb058 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM64_VA_BITS_39 @@ -0,0 +1 @@ +# CONFIG_ARM64_VA_BITS_39 is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_CCN b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_CCN new file mode 100644 index 000000000000..af18c065af17 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_CCN @@ -0,0 +1 @@ +CONFIG_ARM_CCN=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_CMN b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_CMN new file mode 100644 index 000000000000..50c015319fbc --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_CMN @@ -0,0 +1 @@ +CONFIG_ARM_CMN=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_CPU_RESCTRL b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_CPU_RESCTRL new file mode 100644 index 000000000000..b1c35e9ba99b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_CPU_RESCTRL @@ -0,0 +1 @@ +CONFIG_ARM_CPU_RESCTRL=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC new file mode 100644 index 000000000000..2cb25cc89b7b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC @@ -0,0 +1 @@ +CONFIG_ARM_GIC=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_PHYTIUM_2500 b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_PHYTIUM_2500 new file mode 100644 index 000000000000..ed757b34f23b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_PHYTIUM_2500 @@ -0,0 +1 @@ +CONFIG_ARM_GIC_PHYTIUM_2500=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_V2M b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_V2M new file mode 100644 index 000000000000..b3eb7dd653ac --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_V2M @@ -0,0 +1 @@ +CONFIG_ARM_GIC_V2M=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_V3 b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_V3 new file mode 100644 index 000000000000..424dd88e7fdf --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_V3 @@ -0,0 +1 @@ +CONFIG_ARM_GIC_V3=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_V3_ITS b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_V3_ITS new file mode 100644 index 000000000000..d50b79f8d9ef --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_V3_ITS @@ -0,0 +1 @@ +CONFIG_ARM_GIC_V3_ITS=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_V3_ITS_PCI b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_V3_ITS_PCI new file mode 100644 index 000000000000..250435957adf --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_GIC_V3_ITS_PCI @@ -0,0 +1 @@ +CONFIG_ARM_GIC_V3_ITS_PCI=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_PMU b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_PMU new file mode 100644 index 000000000000..a9348c81752e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_PMU @@ -0,0 +1 @@ +CONFIG_ARM_PMU=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_PMU_ACPI b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_PMU_ACPI new file mode 100644 index 000000000000..ec97cfb1617f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_PMU_ACPI @@ -0,0 +1 @@ +CONFIG_ARM_PMU_ACPI=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_SMMU b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_SMMU new file mode 100644 index 000000000000..920fadc10584 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_SMMU @@ -0,0 +1 @@ +CONFIG_ARM_SMMU=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_SMMU_V3 b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_SMMU_V3 new file mode 100644 index 000000000000..83d144ae08d0 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_SMMU_V3 @@ -0,0 +1 @@ +CONFIG_ARM_SMMU_V3=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_SMMU_V3_PMU b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_SMMU_V3_PMU new file mode 100644 index 000000000000..827377f8cda7 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_SMMU_V3_PMU @@ -0,0 +1 @@ +CONFIG_ARM_SMMU_V3_PMU=m diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_SPE_PMU b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_SPE_PMU new file mode 100644 index 000000000000..c7f32cf49f37 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_ARM_SPE_PMU @@ -0,0 +1 @@ +CONFIG_ARM_SPE_PMU=m diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_CORESIGHT b/anolis/configs/L0-MANDATORY/arm64/CONFIG_CORESIGHT new file mode 100644 index 000000000000..4d70504d87d4 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_CORESIGHT @@ -0,0 +1 @@ +CONFIG_CORESIGHT=m diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_CPU_LITTLE_ENDIAN b/anolis/configs/L0-MANDATORY/arm64/CONFIG_CPU_LITTLE_ENDIAN new file mode 100644 index 000000000000..ee43fdb3b8f4 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_CPU_LITTLE_ENDIAN @@ -0,0 +1 @@ +CONFIG_CPU_LITTLE_ENDIAN=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_DEFERRED_STRUCT_PAGE_INIT b/anolis/configs/L0-MANDATORY/arm64/CONFIG_DEFERRED_STRUCT_PAGE_INIT new file mode 100644 index 000000000000..c23e98f1ee46 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_DEFERRED_STRUCT_PAGE_INIT @@ -0,0 +1 @@ +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_EXT4_FS b/anolis/configs/L0-MANDATORY/arm64/CONFIG_EXT4_FS new file mode 100644 index 000000000000..6ead740de57a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_EXT4_FS @@ -0,0 +1 @@ +CONFIG_EXT4_FS=m diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_EXTCON b/anolis/configs/L0-MANDATORY/arm64/CONFIG_EXTCON new file mode 100644 index 000000000000..bde29bcfc23f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_EXTCON @@ -0,0 +1 @@ +CONFIG_EXTCON=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_HZ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_HZ new file mode 100644 index 000000000000..dfae244722fd --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_HZ @@ -0,0 +1 @@ +CONFIG_HZ=250 diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_HZ_1000 b/anolis/configs/L0-MANDATORY/arm64/CONFIG_HZ_1000 new file mode 100644 index 000000000000..c211724d6d0f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_HZ_1000 @@ -0,0 +1 @@ +# CONFIG_HZ_1000 is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_HZ_250 b/anolis/configs/L0-MANDATORY/arm64/CONFIG_HZ_250 new file mode 100644 index 000000000000..5bb56df22812 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_HZ_250 @@ -0,0 +1 @@ +CONFIG_HZ_250=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_IRQ_TIME_ACCOUNTING b/anolis/configs/L0-MANDATORY/arm64/CONFIG_IRQ_TIME_ACCOUNTING new file mode 100644 index 000000000000..50707d66e988 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_IRQ_TIME_ACCOUNTING @@ -0,0 +1 @@ +# CONFIG_IRQ_TIME_ACCOUNTING is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_JBD2 b/anolis/configs/L0-MANDATORY/arm64/CONFIG_JBD2 new file mode 100644 index 000000000000..72298fc42df1 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_JBD2 @@ -0,0 +1 @@ +CONFIG_JBD2=m diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_KVM b/anolis/configs/L0-MANDATORY/arm64/CONFIG_KVM new file mode 100644 index 000000000000..14f90d8d6801 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_KVM @@ -0,0 +1 @@ +CONFIG_KVM=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY b/anolis/configs/L0-MANDATORY/arm64/CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY new file mode 100644 index 000000000000..7e44440231db --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY @@ -0,0 +1 @@ +CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_PCI_HOST_GENERIC b/anolis/configs/L0-MANDATORY/arm64/CONFIG_PCI_HOST_GENERIC new file mode 100644 index 000000000000..c9ebcff72334 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_PCI_HOST_GENERIC @@ -0,0 +1 @@ +CONFIG_PCI_HOST_GENERIC=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_PCI_PF_STUB b/anolis/configs/L0-MANDATORY/arm64/CONFIG_PCI_PF_STUB new file mode 100644 index 000000000000..35de7b23ab9d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_PCI_PF_STUB @@ -0,0 +1 @@ +# CONFIG_PCI_PF_STUB is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_PREEMPT_NONE b/anolis/configs/L0-MANDATORY/arm64/CONFIG_PREEMPT_NONE new file mode 100644 index 000000000000..45e3146818c9 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_PREEMPT_NONE @@ -0,0 +1 @@ +# CONFIG_PREEMPT_NONE is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_PREEMPT_VOLUNTARY b/anolis/configs/L0-MANDATORY/arm64/CONFIG_PREEMPT_VOLUNTARY new file mode 100644 index 000000000000..4762d5ecdb30 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_PREEMPT_VOLUNTARY @@ -0,0 +1 @@ +CONFIG_PREEMPT_VOLUNTARY=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_PWM_ATMEL_TCB b/anolis/configs/L0-MANDATORY/arm64/CONFIG_PWM_ATMEL_TCB new file mode 100644 index 000000000000..0b75af7f9d88 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_PWM_ATMEL_TCB @@ -0,0 +1 @@ +# CONFIG_PWM_ATMEL_TCB is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_PWM_XILINX b/anolis/configs/L0-MANDATORY/arm64/CONFIG_PWM_XILINX new file mode 100644 index 000000000000..7018716d153f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_PWM_XILINX @@ -0,0 +1 @@ +# CONFIG_PWM_XILINX is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_GPI_DMA b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_GPI_DMA new file mode 100644 index 000000000000..8b26467a3104 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_GPI_DMA @@ -0,0 +1 @@ +# CONFIG_QCOM_GPI_DMA is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_ICC_BWMON b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_ICC_BWMON new file mode 100644 index 000000000000..eae24f1162d1 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_ICC_BWMON @@ -0,0 +1 @@ +# CONFIG_QCOM_ICC_BWMON is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_LMH b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_LMH new file mode 100644 index 000000000000..b89caa4b6306 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_LMH @@ -0,0 +1 @@ +# CONFIG_QCOM_LMH is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_MPM b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_MPM new file mode 100644 index 000000000000..7daedf3de7b7 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_MPM @@ -0,0 +1 @@ +# CONFIG_QCOM_MPM is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_RAMP_CTRL b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_RAMP_CTRL new file mode 100644 index 000000000000..61a3f52d3004 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_RAMP_CTRL @@ -0,0 +1 @@ +# CONFIG_QCOM_RAMP_CTRL is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_RPM_MASTER_STATS b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_RPM_MASTER_STATS new file mode 100644 index 000000000000..07a0ad76ba60 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_RPM_MASTER_STATS @@ -0,0 +1 @@ +# CONFIG_QCOM_RPM_MASTER_STATS is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_SCM b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_SCM new file mode 100644 index 000000000000..58e98180c4d5 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_SCM @@ -0,0 +1 @@ +CONFIG_QCOM_SCM=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT new file mode 100644 index 000000000000..8f9c32859fb6 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT @@ -0,0 +1 @@ +# CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_SPM b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_SPM new file mode 100644 index 000000000000..13face25cbfa --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_SPM @@ -0,0 +1 @@ +# CONFIG_QCOM_SPM is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_SSC_BLOCK_BUS b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_SSC_BLOCK_BUS new file mode 100644 index 000000000000..31f85458d145 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_QCOM_SSC_BLOCK_BUS @@ -0,0 +1 @@ +# CONFIG_QCOM_SSC_BLOCK_BUS is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_UNMAP_KERNEL_AT_EL0 b/anolis/configs/L0-MANDATORY/arm64/CONFIG_UNMAP_KERNEL_AT_EL0 new file mode 100644 index 000000000000..1a577664e2b4 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_UNMAP_KERNEL_AT_EL0 @@ -0,0 +1 @@ +CONFIG_UNMAP_KERNEL_AT_EL0=y diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_VIRTIO b/anolis/configs/L0-MANDATORY/arm64/CONFIG_VIRTIO new file mode 100644 index 000000000000..f738f50ac231 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_VIRTIO @@ -0,0 +1 @@ +CONFIG_VIRTIO=m diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_VIRTIO_BLK b/anolis/configs/L0-MANDATORY/arm64/CONFIG_VIRTIO_BLK new file mode 100644 index 000000000000..193a208422f2 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_VIRTIO_BLK @@ -0,0 +1 @@ +CONFIG_VIRTIO_BLK=m diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_VIRTIO_PCI b/anolis/configs/L0-MANDATORY/arm64/CONFIG_VIRTIO_PCI new file mode 100644 index 000000000000..58505d3a58ff --- /dev/null +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_VIRTIO_PCI @@ -0,0 +1 @@ +CONFIG_VIRTIO_PCI=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_64BIT b/anolis/configs/L0-MANDATORY/default/CONFIG_64BIT new file mode 100644 index 000000000000..06a94e48bf68 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_64BIT @@ -0,0 +1 @@ +CONFIG_64BIT=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ACPI b/anolis/configs/L0-MANDATORY/default/CONFIG_ACPI new file mode 100644 index 000000000000..839566bae001 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ACPI @@ -0,0 +1 @@ +CONFIG_ACPI=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ACPI_APEI b/anolis/configs/L0-MANDATORY/default/CONFIG_ACPI_APEI new file mode 100644 index 000000000000..9ab33facf55a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ACPI_APEI @@ -0,0 +1 @@ +CONFIG_ACPI_APEI=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ACPI_IPMI b/anolis/configs/L0-MANDATORY/default/CONFIG_ACPI_IPMI new file mode 100644 index 000000000000..e40fb9aeac03 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ACPI_IPMI @@ -0,0 +1 @@ +CONFIG_ACPI_IPMI=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ACPI_NUMA b/anolis/configs/L0-MANDATORY/default/CONFIG_ACPI_NUMA new file mode 100644 index 000000000000..19881f440202 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ACPI_NUMA @@ -0,0 +1 @@ +CONFIG_ACPI_NUMA=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ACPI_PROCESSOR b/anolis/configs/L0-MANDATORY/default/CONFIG_ACPI_PROCESSOR new file mode 100644 index 000000000000..a24416dcfc82 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ACPI_PROCESSOR @@ -0,0 +1 @@ +CONFIG_ACPI_PROCESSOR=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ADVISE_SYSCALLS b/anolis/configs/L0-MANDATORY/default/CONFIG_ADVISE_SYSCALLS new file mode 100644 index 000000000000..0c60467178b8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ADVISE_SYSCALLS @@ -0,0 +1 @@ +CONFIG_ADVISE_SYSCALLS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_AIO b/anolis/configs/L0-MANDATORY/default/CONFIG_AIO new file mode 100644 index 000000000000..4272502fc834 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_AIO @@ -0,0 +1 @@ +CONFIG_AIO=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ALLOW_DEV_COREDUMP b/anolis/configs/L0-MANDATORY/default/CONFIG_ALLOW_DEV_COREDUMP new file mode 100644 index 000000000000..ff2c37d0b638 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ALLOW_DEV_COREDUMP @@ -0,0 +1 @@ +CONFIG_ALLOW_DEV_COREDUMP=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ASYMMETRIC_KEY_TYPE b/anolis/configs/L0-MANDATORY/default/CONFIG_ASYMMETRIC_KEY_TYPE new file mode 100644 index 000000000000..c7d15a4b1f09 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ASYMMETRIC_KEY_TYPE @@ -0,0 +1 @@ +CONFIG_ASYMMETRIC_KEY_TYPE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE b/anolis/configs/L0-MANDATORY/default/CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE new file mode 100644 index 000000000000..f05823216bcf --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE @@ -0,0 +1 @@ +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_AUDIT b/anolis/configs/L0-MANDATORY/default/CONFIG_AUDIT new file mode 100644 index 000000000000..aa15dd05b4f1 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_AUDIT @@ -0,0 +1 @@ +CONFIG_AUDIT=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_AUTOFS_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_AUTOFS_FS new file mode 100644 index 000000000000..27fd9c1cf4b8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_AUTOFS_FS @@ -0,0 +1 @@ +CONFIG_AUTOFS_FS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_AUXILIARY_BUS b/anolis/configs/L0-MANDATORY/default/CONFIG_AUXILIARY_BUS new file mode 100644 index 000000000000..2a0020db1d03 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_AUXILIARY_BUS @@ -0,0 +1 @@ +CONFIG_AUXILIARY_BUS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BASE_FULL b/anolis/configs/L0-MANDATORY/default/CONFIG_BASE_FULL new file mode 100644 index 000000000000..da5d20df17cd --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BASE_FULL @@ -0,0 +1 @@ +CONFIG_BASE_FULL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BFQ_GROUP_IOSCHED b/anolis/configs/L0-MANDATORY/default/CONFIG_BFQ_GROUP_IOSCHED new file mode 100644 index 000000000000..731981ca3083 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BFQ_GROUP_IOSCHED @@ -0,0 +1 @@ +CONFIG_BFQ_GROUP_IOSCHED=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BINFMT_ELF b/anolis/configs/L0-MANDATORY/default/CONFIG_BINFMT_ELF new file mode 100644 index 000000000000..f9a79c64b4aa --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BINFMT_ELF @@ -0,0 +1 @@ +CONFIG_BINFMT_ELF=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BINFMT_SCRIPT b/anolis/configs/L0-MANDATORY/default/CONFIG_BINFMT_SCRIPT new file mode 100644 index 000000000000..b9821f947288 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BINFMT_SCRIPT @@ -0,0 +1 @@ +CONFIG_BINFMT_SCRIPT=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_CGROUP b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_CGROUP new file mode 100644 index 000000000000..b80f0100a923 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_CGROUP @@ -0,0 +1 @@ +CONFIG_BLK_CGROUP=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_CGROUP_IOCOST b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_CGROUP_IOCOST new file mode 100644 index 000000000000..b5de1617390c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_CGROUP_IOCOST @@ -0,0 +1 @@ +CONFIG_BLK_CGROUP_IOCOST=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEBUG_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEBUG_FS new file mode 100644 index 000000000000..71cc6e708b40 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEBUG_FS @@ -0,0 +1 @@ +CONFIG_BLK_DEBUG_FS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV new file mode 100644 index 000000000000..8b43214d0ea4 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV @@ -0,0 +1 @@ +CONFIG_BLK_DEV=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV_INITRD b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV_INITRD new file mode 100644 index 000000000000..f97f7a0a90d3 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV_INITRD @@ -0,0 +1 @@ +CONFIG_BLK_DEV_INITRD=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV_IO_TRACE b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV_IO_TRACE new file mode 100644 index 000000000000..3e61f2b38920 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV_IO_TRACE @@ -0,0 +1 @@ +CONFIG_BLK_DEV_IO_TRACE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV_NVME b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV_NVME new file mode 100644 index 000000000000..b1e62d9adf97 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV_NVME @@ -0,0 +1 @@ +CONFIG_BLK_DEV_NVME=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV_THROTTLING b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV_THROTTLING new file mode 100644 index 000000000000..54ba3b928404 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_DEV_THROTTLING @@ -0,0 +1 @@ +CONFIG_BLK_DEV_THROTTLING=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_MQ_PCI b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_MQ_PCI new file mode 100644 index 000000000000..e56957b4d291 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_MQ_PCI @@ -0,0 +1 @@ +CONFIG_BLK_MQ_PCI=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_MQ_VIRTIO b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_MQ_VIRTIO new file mode 100644 index 000000000000..f9e2e18b64f9 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_MQ_VIRTIO @@ -0,0 +1 @@ +CONFIG_BLK_MQ_VIRTIO=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_RQ_ALLOC_TIME b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_RQ_ALLOC_TIME new file mode 100644 index 000000000000..9cc328d40312 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BLK_RQ_ALLOC_TIME @@ -0,0 +1 @@ +CONFIG_BLK_RQ_ALLOC_TIME=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BLOCK b/anolis/configs/L0-MANDATORY/default/CONFIG_BLOCK new file mode 100644 index 000000000000..4ef627738355 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BLOCK @@ -0,0 +1 @@ +CONFIG_BLOCK=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BONDING b/anolis/configs/L0-MANDATORY/default/CONFIG_BONDING new file mode 100644 index 000000000000..2fd2a3159f6e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BONDING @@ -0,0 +1 @@ +CONFIG_BONDING=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BPF b/anolis/configs/L0-MANDATORY/default/CONFIG_BPF new file mode 100644 index 000000000000..63f09e9eccf5 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BPF @@ -0,0 +1 @@ +CONFIG_BPF=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BPF_EVENTS b/anolis/configs/L0-MANDATORY/default/CONFIG_BPF_EVENTS new file mode 100644 index 000000000000..28e92884696c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BPF_EVENTS @@ -0,0 +1 @@ +CONFIG_BPF_EVENTS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BPF_JIT b/anolis/configs/L0-MANDATORY/default/CONFIG_BPF_JIT new file mode 100644 index 000000000000..5f9bba75323e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BPF_JIT @@ -0,0 +1 @@ +CONFIG_BPF_JIT=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BPF_LSM b/anolis/configs/L0-MANDATORY/default/CONFIG_BPF_LSM new file mode 100644 index 000000000000..bf5ae0ddc861 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BPF_LSM @@ -0,0 +1 @@ +CONFIG_BPF_LSM=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BPF_SYSCALL b/anolis/configs/L0-MANDATORY/default/CONFIG_BPF_SYSCALL new file mode 100644 index 000000000000..4adb0f7e93cf --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BPF_SYSCALL @@ -0,0 +1 @@ +CONFIG_BPF_SYSCALL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BPF_UNPRIV_DEFAULT_OFF b/anolis/configs/L0-MANDATORY/default/CONFIG_BPF_UNPRIV_DEFAULT_OFF new file mode 100644 index 000000000000..dea83415f480 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BPF_UNPRIV_DEFAULT_OFF @@ -0,0 +1 @@ +CONFIG_BPF_UNPRIV_DEFAULT_OFF=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BRIDGE b/anolis/configs/L0-MANDATORY/default/CONFIG_BRIDGE new file mode 100644 index 000000000000..06ef54326e6e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BRIDGE @@ -0,0 +1 @@ +CONFIG_BRIDGE=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_BUG b/anolis/configs/L0-MANDATORY/default/CONFIG_BUG new file mode 100644 index 000000000000..7a3a7bf96880 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_BUG @@ -0,0 +1 @@ +CONFIG_BUG=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CACHEFILES b/anolis/configs/L0-MANDATORY/default/CONFIG_CACHEFILES new file mode 100644 index 000000000000..9c31a788b8a5 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CACHEFILES @@ -0,0 +1 @@ +CONFIG_CACHEFILES=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CACHEFILES_ONDEMAND b/anolis/configs/L0-MANDATORY/default/CONFIG_CACHEFILES_ONDEMAND new file mode 100644 index 000000000000..b234b86bb2cc --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CACHEFILES_ONDEMAND @@ -0,0 +1 @@ +CONFIG_CACHEFILES_ONDEMAND=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CACHESTAT_SYSCALL b/anolis/configs/L0-MANDATORY/default/CONFIG_CACHESTAT_SYSCALL new file mode 100644 index 000000000000..a65ea33149e0 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CACHESTAT_SYSCALL @@ -0,0 +1 @@ +CONFIG_CACHESTAT_SYSCALL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE b/anolis/configs/L0-MANDATORY/default/CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE new file mode 100644 index 000000000000..b4d7c4e3dc97 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE @@ -0,0 +1 @@ +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CC_OPTIMIZE_FOR_SIZE b/anolis/configs/L0-MANDATORY/default/CONFIG_CC_OPTIMIZE_FOR_SIZE new file mode 100644 index 000000000000..781657e578af --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CC_OPTIMIZE_FOR_SIZE @@ -0,0 +1 @@ +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CFS_BANDWIDTH b/anolis/configs/L0-MANDATORY/default/CONFIG_CFS_BANDWIDTH new file mode 100644 index 000000000000..0be30bfd50b9 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CFS_BANDWIDTH @@ -0,0 +1 @@ +CONFIG_CFS_BANDWIDTH=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUPS b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUPS new file mode 100644 index 000000000000..de40ae788fd1 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUPS @@ -0,0 +1 @@ +CONFIG_CGROUPS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_BPF b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_BPF new file mode 100644 index 000000000000..659477cdb698 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_BPF @@ -0,0 +1 @@ +CONFIG_CGROUP_BPF=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_CPUACCT b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_CPUACCT new file mode 100644 index 000000000000..43f05000a125 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_CPUACCT @@ -0,0 +1 @@ +CONFIG_CGROUP_CPUACCT=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_DEVICE b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_DEVICE new file mode 100644 index 000000000000..5a233a69b057 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_DEVICE @@ -0,0 +1 @@ +CONFIG_CGROUP_DEVICE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_HUGETLB b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_HUGETLB new file mode 100644 index 000000000000..0e9e34b4c5be --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_HUGETLB @@ -0,0 +1 @@ +CONFIG_CGROUP_HUGETLB=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_PERF b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_PERF new file mode 100644 index 000000000000..faa1d1cb71fc --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_PERF @@ -0,0 +1 @@ +CONFIG_CGROUP_PERF=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_PIDS b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_PIDS new file mode 100644 index 000000000000..399a03754d53 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_PIDS @@ -0,0 +1 @@ +CONFIG_CGROUP_PIDS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_RDMA b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_RDMA new file mode 100644 index 000000000000..6d9fbd1dd163 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_RDMA @@ -0,0 +1 @@ +CONFIG_CGROUP_RDMA=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_SCHED b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_SCHED new file mode 100644 index 000000000000..aa4be387efbb --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CGROUP_SCHED @@ -0,0 +1 @@ +CONFIG_CGROUP_SCHED=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CHECKPOINT_RESTORE b/anolis/configs/L0-MANDATORY/default/CONFIG_CHECKPOINT_RESTORE new file mode 100644 index 000000000000..c554a09cece8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CHECKPOINT_RESTORE @@ -0,0 +1 @@ +CONFIG_CHECKPOINT_RESTORE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CK_KABI_RESERVE b/anolis/configs/L0-MANDATORY/default/CONFIG_CK_KABI_RESERVE new file mode 100644 index 000000000000..3c5b7a555a9f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CK_KABI_RESERVE @@ -0,0 +1 @@ +CONFIG_CK_KABI_RESERVE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CK_KABI_SIZE_ALIGN_CHECKS b/anolis/configs/L0-MANDATORY/default/CONFIG_CK_KABI_SIZE_ALIGN_CHECKS new file mode 100644 index 000000000000..6f78e7258a77 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CK_KABI_SIZE_ALIGN_CHECKS @@ -0,0 +1 @@ +CONFIG_CK_KABI_SIZE_ALIGN_CHECKS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_COMMON_CLK b/anolis/configs/L0-MANDATORY/default/CONFIG_COMMON_CLK new file mode 100644 index 000000000000..3cbf93120f33 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_COMMON_CLK @@ -0,0 +1 @@ +CONFIG_COMMON_CLK=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_COMPACTION b/anolis/configs/L0-MANDATORY/default/CONFIG_COMPACTION new file mode 100644 index 000000000000..23ab91c48b26 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_COMPACTION @@ -0,0 +1 @@ +CONFIG_COMPACTION=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CONFIGFS_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_CONFIGFS_FS new file mode 100644 index 000000000000..1ef892062e48 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CONFIGFS_FS @@ -0,0 +1 @@ +CONFIG_CONFIGFS_FS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CONSOLE_TRANSLATIONS b/anolis/configs/L0-MANDATORY/default/CONFIG_CONSOLE_TRANSLATIONS new file mode 100644 index 000000000000..983fcc993d36 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CONSOLE_TRANSLATIONS @@ -0,0 +1 @@ +CONFIG_CONSOLE_TRANSLATIONS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_COREDUMP b/anolis/configs/L0-MANDATORY/default/CONFIG_COREDUMP new file mode 100644 index 000000000000..b2426d3acdc2 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_COREDUMP @@ -0,0 +1 @@ +CONFIG_COREDUMP=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CPUSETS b/anolis/configs/L0-MANDATORY/default/CONFIG_CPUSETS new file mode 100644 index 000000000000..9920b4659fca --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CPUSETS @@ -0,0 +1 @@ +CONFIG_CPUSETS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CPU_FREQ b/anolis/configs/L0-MANDATORY/default/CONFIG_CPU_FREQ new file mode 100644 index 000000000000..04872f671d30 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CPU_FREQ @@ -0,0 +1 @@ +CONFIG_CPU_FREQ=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CPU_IDLE b/anolis/configs/L0-MANDATORY/default/CONFIG_CPU_IDLE new file mode 100644 index 000000000000..98dd7d370185 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CPU_IDLE @@ -0,0 +1 @@ +CONFIG_CPU_IDLE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CPU_ISOLATION b/anolis/configs/L0-MANDATORY/default/CONFIG_CPU_ISOLATION new file mode 100644 index 000000000000..da3a02c10eb9 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CPU_ISOLATION @@ -0,0 +1 @@ +CONFIG_CPU_ISOLATION=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRAMFS b/anolis/configs/L0-MANDATORY/default/CONFIG_CRAMFS new file mode 100644 index 000000000000..99803b5a1e62 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRAMFS @@ -0,0 +1 @@ +CONFIG_CRAMFS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRASH_CORE b/anolis/configs/L0-MANDATORY/default/CONFIG_CRASH_CORE new file mode 100644 index 000000000000..071d9918f8b8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRASH_CORE @@ -0,0 +1 @@ +CONFIG_CRASH_CORE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRASH_DUMP b/anolis/configs/L0-MANDATORY/default/CONFIG_CRASH_DUMP new file mode 100644 index 000000000000..84bb04c03f98 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRASH_DUMP @@ -0,0 +1 @@ +CONFIG_CRASH_DUMP=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO new file mode 100644 index 000000000000..6fc752ec5216 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO @@ -0,0 +1 @@ +CONFIG_CRYPTO=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AEAD b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AEAD new file mode 100644 index 000000000000..d5733f86937c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AEAD @@ -0,0 +1 @@ +CONFIG_CRYPTO_AEAD=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AEAD2 b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AEAD2 new file mode 100644 index 000000000000..b1fbe1f7edd3 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AEAD2 @@ -0,0 +1 @@ +CONFIG_CRYPTO_AEAD2=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AES b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AES new file mode 100644 index 000000000000..dd56b423e13e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AES @@ -0,0 +1 @@ +CONFIG_CRYPTO_AES=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AKCIPHER b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AKCIPHER new file mode 100644 index 000000000000..528d61fcc1b9 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AKCIPHER @@ -0,0 +1 @@ +CONFIG_CRYPTO_AKCIPHER=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AKCIPHER2 b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AKCIPHER2 new file mode 100644 index 000000000000..7adade1dafb0 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_AKCIPHER2 @@ -0,0 +1 @@ +CONFIG_CRYPTO_AKCIPHER2=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_ALGAPI b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_ALGAPI new file mode 100644 index 000000000000..cb9dc1dd4603 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_ALGAPI @@ -0,0 +1 @@ +CONFIG_CRYPTO_ALGAPI=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_ALGAPI2 b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_ALGAPI2 new file mode 100644 index 000000000000..3e7c7ffca953 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_ALGAPI2 @@ -0,0 +1 @@ +CONFIG_CRYPTO_ALGAPI2=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_GCM b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_GCM new file mode 100644 index 000000000000..8b509be56358 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_GCM @@ -0,0 +1 @@ +CONFIG_CRYPTO_GCM=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_GHASH b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_GHASH new file mode 100644 index 000000000000..2104f2f02998 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_GHASH @@ -0,0 +1 @@ +CONFIG_CRYPTO_GHASH=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_HASH b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_HASH new file mode 100644 index 000000000000..a5e3b09910e7 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_HASH @@ -0,0 +1 @@ +CONFIG_CRYPTO_HASH=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_HASH2 b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_HASH2 new file mode 100644 index 000000000000..288112ec373b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_HASH2 @@ -0,0 +1 @@ +CONFIG_CRYPTO_HASH2=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_LIB_AES b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_LIB_AES new file mode 100644 index 000000000000..93b5da616948 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_LIB_AES @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_AES=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_LIB_SHA256 b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_LIB_SHA256 new file mode 100644 index 000000000000..d952a4334ef7 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_LIB_SHA256 @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_SHA256=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_MANAGER b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_MANAGER new file mode 100644 index 000000000000..084eac591e65 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_MANAGER @@ -0,0 +1 @@ +CONFIG_CRYPTO_MANAGER=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_MANAGER2 b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_MANAGER2 new file mode 100644 index 000000000000..7eb36f78b599 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_MANAGER2 @@ -0,0 +1 @@ +CONFIG_CRYPTO_MANAGER2=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_RNG b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_RNG new file mode 100644 index 000000000000..b7959aaec6d8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_RNG @@ -0,0 +1 @@ +CONFIG_CRYPTO_RNG=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_RNG2 b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_RNG2 new file mode 100644 index 000000000000..d69333622412 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_RNG2 @@ -0,0 +1 @@ +CONFIG_CRYPTO_RNG2=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_RSA b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_RSA new file mode 100644 index 000000000000..bd58f120558a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_RSA @@ -0,0 +1 @@ +CONFIG_CRYPTO_RSA=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SHA256 b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SHA256 new file mode 100644 index 000000000000..dcaffa2ebcb7 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SHA256 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SHA256=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SKCIPHER b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SKCIPHER new file mode 100644 index 000000000000..3120100c2339 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SKCIPHER @@ -0,0 +1 @@ +CONFIG_CRYPTO_SKCIPHER=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SKCIPHER2 b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SKCIPHER2 new file mode 100644 index 000000000000..27565787b29c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SKCIPHER2 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SKCIPHER2=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM2 b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM2 new file mode 100644 index 000000000000..e554f7498cee --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM2 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM2=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM3 b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM3 new file mode 100644 index 000000000000..79d952c6847e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM3 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM3=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM3_GENERIC b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM3_GENERIC new file mode 100644 index 000000000000..a9c9296a6835 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM3_GENERIC @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM3_GENERIC=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM4 b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM4 new file mode 100644 index 000000000000..8460ca1b63db --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM4 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM4=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM4_GENERIC b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM4_GENERIC new file mode 100644 index 000000000000..388c7fa8a4af --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CRYPTO_SM4_GENERIC @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM4_GENERIC=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CXL_BUS b/anolis/configs/L0-MANDATORY/default/CONFIG_CXL_BUS new file mode 100644 index 000000000000..1a82f4dba556 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CXL_BUS @@ -0,0 +1 @@ +CONFIG_CXL_BUS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DAX b/anolis/configs/L0-MANDATORY/default/CONFIG_DAX new file mode 100644 index 000000000000..b756b7915628 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DAX @@ -0,0 +1 @@ +CONFIG_DAX=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_BUGVERBOSE b/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_BUGVERBOSE new file mode 100644 index 000000000000..95a87c614300 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_BUGVERBOSE @@ -0,0 +1 @@ +CONFIG_DEBUG_BUGVERBOSE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_FS new file mode 100644 index 000000000000..39c2d26805b6 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_FS @@ -0,0 +1 @@ +CONFIG_DEBUG_FS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_INFO b/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_INFO new file mode 100644 index 000000000000..4df8bd06fd8b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_INFO @@ -0,0 +1 @@ +CONFIG_DEBUG_INFO=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_INFO_BTF b/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_INFO_BTF new file mode 100644 index 000000000000..39227b4511af --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_INFO_BTF @@ -0,0 +1 @@ +CONFIG_DEBUG_INFO_BTF=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_INFO_DWARF4 b/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_INFO_DWARF4 new file mode 100644 index 000000000000..571ad34e0973 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_INFO_DWARF4 @@ -0,0 +1 @@ +# CONFIG_DEBUG_INFO_DWARF4 is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_KERNEL b/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_KERNEL new file mode 100644 index 000000000000..cc34cddf40ad --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_KERNEL @@ -0,0 +1 @@ +CONFIG_DEBUG_KERNEL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_MISC b/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_MISC new file mode 100644 index 000000000000..b1c6fde1b676 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_MISC @@ -0,0 +1 @@ +CONFIG_DEBUG_MISC=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_SECTION_MISMATCH b/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_SECTION_MISMATCH new file mode 100644 index 000000000000..441e3464c292 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DEBUG_SECTION_MISMATCH @@ -0,0 +1 @@ +CONFIG_DEBUG_SECTION_MISMATCH=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DEFAULT_CUBIC b/anolis/configs/L0-MANDATORY/default/CONFIG_DEFAULT_CUBIC new file mode 100644 index 000000000000..05dd37c064cf --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DEFAULT_CUBIC @@ -0,0 +1 @@ +CONFIG_DEFAULT_CUBIC=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DEFAULT_SECURITY_DAC b/anolis/configs/L0-MANDATORY/default/CONFIG_DEFAULT_SECURITY_DAC new file mode 100644 index 000000000000..a3af1ff70d08 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DEFAULT_SECURITY_DAC @@ -0,0 +1 @@ +# CONFIG_DEFAULT_SECURITY_DAC is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DETECT_HUNG_TASK b/anolis/configs/L0-MANDATORY/default/CONFIG_DETECT_HUNG_TASK new file mode 100644 index 000000000000..28ac9ac1f98c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DETECT_HUNG_TASK @@ -0,0 +1 @@ +CONFIG_DETECT_HUNG_TASK=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DEVTMPFS b/anolis/configs/L0-MANDATORY/default/CONFIG_DEVTMPFS new file mode 100644 index 000000000000..4b70528477a1 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DEVTMPFS @@ -0,0 +1 @@ +CONFIG_DEVTMPFS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DEVTMPFS_MOUNT b/anolis/configs/L0-MANDATORY/default/CONFIG_DEVTMPFS_MOUNT new file mode 100644 index 000000000000..e1cd3e81bb27 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DEVTMPFS_MOUNT @@ -0,0 +1 @@ +CONFIG_DEVTMPFS_MOUNT=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DMADEVICES b/anolis/configs/L0-MANDATORY/default/CONFIG_DMADEVICES new file mode 100644 index 000000000000..169d6c3d0d4f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DMADEVICES @@ -0,0 +1 @@ +CONFIG_DMADEVICES=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DMI b/anolis/configs/L0-MANDATORY/default/CONFIG_DMI new file mode 100644 index 000000000000..f961d1678db2 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DMI @@ -0,0 +1 @@ +CONFIG_DMI=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DNOTIFY b/anolis/configs/L0-MANDATORY/default/CONFIG_DNOTIFY new file mode 100644 index 000000000000..1871c683bcef --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DNOTIFY @@ -0,0 +1 @@ +CONFIG_DNOTIFY=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DNS_RESOLVER b/anolis/configs/L0-MANDATORY/default/CONFIG_DNS_RESOLVER new file mode 100644 index 000000000000..bf44e0eea3ee --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DNS_RESOLVER @@ -0,0 +1 @@ +CONFIG_DNS_RESOLVER=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DRM b/anolis/configs/L0-MANDATORY/default/CONFIG_DRM new file mode 100644 index 000000000000..1ba603c1c7b8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DRM @@ -0,0 +1 @@ +CONFIG_DRM=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DYNAMIC_DEBUG b/anolis/configs/L0-MANDATORY/default/CONFIG_DYNAMIC_DEBUG new file mode 100644 index 000000000000..5698f5ebf655 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DYNAMIC_DEBUG @@ -0,0 +1 @@ +CONFIG_DYNAMIC_DEBUG=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DYNAMIC_DEBUG_CORE b/anolis/configs/L0-MANDATORY/default/CONFIG_DYNAMIC_DEBUG_CORE new file mode 100644 index 000000000000..1375e6ef8c3b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DYNAMIC_DEBUG_CORE @@ -0,0 +1 @@ +CONFIG_DYNAMIC_DEBUG_CORE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_DYNAMIC_FTRACE b/anolis/configs/L0-MANDATORY/default/CONFIG_DYNAMIC_FTRACE new file mode 100644 index 000000000000..21f2bdd4849d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_DYNAMIC_FTRACE @@ -0,0 +1 @@ +CONFIG_DYNAMIC_FTRACE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_EDAC b/anolis/configs/L0-MANDATORY/default/CONFIG_EDAC new file mode 100644 index 000000000000..dcb32adb912e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_EDAC @@ -0,0 +1 @@ +CONFIG_EDAC=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_EFI b/anolis/configs/L0-MANDATORY/default/CONFIG_EFI new file mode 100644 index 000000000000..7dcf2966daba --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_EFI @@ -0,0 +1 @@ +CONFIG_EFI=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ELF_CORE b/anolis/configs/L0-MANDATORY/default/CONFIG_ELF_CORE new file mode 100644 index 000000000000..441e14118ef6 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ELF_CORE @@ -0,0 +1 @@ +CONFIG_ELF_CORE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_EPOLL b/anolis/configs/L0-MANDATORY/default/CONFIG_EPOLL new file mode 100644 index 000000000000..eb0dd3c213f8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_EPOLL @@ -0,0 +1 @@ +CONFIG_EPOLL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_EROFS_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_EROFS_FS new file mode 100644 index 000000000000..0394e57c2a07 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_EROFS_FS @@ -0,0 +1 @@ +CONFIG_EROFS_FS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ETHTOOL_NETLINK b/anolis/configs/L0-MANDATORY/default/CONFIG_ETHTOOL_NETLINK new file mode 100644 index 000000000000..7ede260aaeb6 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ETHTOOL_NETLINK @@ -0,0 +1 @@ +CONFIG_ETHTOOL_NETLINK=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_EVENTFD b/anolis/configs/L0-MANDATORY/default/CONFIG_EVENTFD new file mode 100644 index 000000000000..e8f9fb5d2886 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_EVENTFD @@ -0,0 +1 @@ +CONFIG_EVENTFD=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_EVM b/anolis/configs/L0-MANDATORY/default/CONFIG_EVM new file mode 100644 index 000000000000..5e5b1549882a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_EVM @@ -0,0 +1 @@ +CONFIG_EVM=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_EXPERT b/anolis/configs/L0-MANDATORY/default/CONFIG_EXPERT new file mode 100644 index 000000000000..6643b3280f26 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_EXPERT @@ -0,0 +1 @@ +# CONFIG_EXPERT is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_EXT3_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_EXT3_FS new file mode 100644 index 000000000000..be0609953b7e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_EXT3_FS @@ -0,0 +1 @@ +CONFIG_EXT3_FS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_EXT4_FS_POSIX_ACL b/anolis/configs/L0-MANDATORY/default/CONFIG_EXT4_FS_POSIX_ACL new file mode 100644 index 000000000000..5dd650fde834 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_EXT4_FS_POSIX_ACL @@ -0,0 +1 @@ +CONFIG_EXT4_FS_POSIX_ACL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_EXT4_FS_SECURITY b/anolis/configs/L0-MANDATORY/default/CONFIG_EXT4_FS_SECURITY new file mode 100644 index 000000000000..6603fbbd62a6 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_EXT4_FS_SECURITY @@ -0,0 +1 @@ +CONFIG_EXT4_FS_SECURITY=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FAIR_GROUP_SCHED b/anolis/configs/L0-MANDATORY/default/CONFIG_FAIR_GROUP_SCHED new file mode 100644 index 000000000000..7c73cd02e246 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FAIR_GROUP_SCHED @@ -0,0 +1 @@ +CONFIG_FAIR_GROUP_SCHED=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FANOTIFY b/anolis/configs/L0-MANDATORY/default/CONFIG_FANOTIFY new file mode 100644 index 000000000000..03964624f8cb --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FANOTIFY @@ -0,0 +1 @@ +CONFIG_FANOTIFY=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FAT_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_FAT_FS new file mode 100644 index 000000000000..bb11abcfa290 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FAT_FS @@ -0,0 +1 @@ +CONFIG_FAT_FS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FB b/anolis/configs/L0-MANDATORY/default/CONFIG_FB new file mode 100644 index 000000000000..7adf7d4970d9 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FB @@ -0,0 +1 @@ +CONFIG_FB=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FHANDLE b/anolis/configs/L0-MANDATORY/default/CONFIG_FHANDLE new file mode 100644 index 000000000000..edcdc053f729 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FHANDLE @@ -0,0 +1 @@ +CONFIG_FHANDLE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FILE_LOCKING b/anolis/configs/L0-MANDATORY/default/CONFIG_FILE_LOCKING new file mode 100644 index 000000000000..d77ce16e5e6e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FILE_LOCKING @@ -0,0 +1 @@ +CONFIG_FILE_LOCKING=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FRAMEBUFFER_CONSOLE b/anolis/configs/L0-MANDATORY/default/CONFIG_FRAMEBUFFER_CONSOLE new file mode 100644 index 000000000000..84ef39c6841f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FRAMEBUFFER_CONSOLE @@ -0,0 +1 @@ +CONFIG_FRAMEBUFFER_CONSOLE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FSCACHE b/anolis/configs/L0-MANDATORY/default/CONFIG_FSCACHE new file mode 100644 index 000000000000..80b71a3729c8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FSCACHE @@ -0,0 +1 @@ +CONFIG_FSCACHE=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FSNOTIFY b/anolis/configs/L0-MANDATORY/default/CONFIG_FSNOTIFY new file mode 100644 index 000000000000..2e3862d3aa6b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FSNOTIFY @@ -0,0 +1 @@ +CONFIG_FSNOTIFY=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FS_DAX b/anolis/configs/L0-MANDATORY/default/CONFIG_FS_DAX new file mode 100644 index 000000000000..141ae8514ada --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FS_DAX @@ -0,0 +1 @@ +CONFIG_FS_DAX=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FTRACE b/anolis/configs/L0-MANDATORY/default/CONFIG_FTRACE new file mode 100644 index 000000000000..ef8214661612 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FTRACE @@ -0,0 +1 @@ +CONFIG_FTRACE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FTRACE_SYSCALLS b/anolis/configs/L0-MANDATORY/default/CONFIG_FTRACE_SYSCALLS new file mode 100644 index 000000000000..f2f6f04b04b8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FTRACE_SYSCALLS @@ -0,0 +1 @@ +CONFIG_FTRACE_SYSCALLS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FUNCTION_GRAPH_TRACER b/anolis/configs/L0-MANDATORY/default/CONFIG_FUNCTION_GRAPH_TRACER new file mode 100644 index 000000000000..b1c634d001a0 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FUNCTION_GRAPH_TRACER @@ -0,0 +1 @@ +CONFIG_FUNCTION_GRAPH_TRACER=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FUNCTION_TRACER b/anolis/configs/L0-MANDATORY/default/CONFIG_FUNCTION_TRACER new file mode 100644 index 000000000000..d1977efec424 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FUNCTION_TRACER @@ -0,0 +1 @@ +CONFIG_FUNCTION_TRACER=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FUSE_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_FUSE_FS new file mode 100644 index 000000000000..835c7f4dadcd --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FUSE_FS @@ -0,0 +1 @@ +CONFIG_FUSE_FS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FUTEX b/anolis/configs/L0-MANDATORY/default/CONFIG_FUTEX new file mode 100644 index 000000000000..df59af0b6b71 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FUTEX @@ -0,0 +1 @@ +CONFIG_FUTEX=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_FW_LOADER b/anolis/configs/L0-MANDATORY/default/CONFIG_FW_LOADER new file mode 100644 index 000000000000..c8e64ebb4589 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_FW_LOADER @@ -0,0 +1 @@ +CONFIG_FW_LOADER=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_GENERIC_GETTIMEOFDAY b/anolis/configs/L0-MANDATORY/default/CONFIG_GENERIC_GETTIMEOFDAY new file mode 100644 index 000000000000..492970855851 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_GENERIC_GETTIMEOFDAY @@ -0,0 +1 @@ +CONFIG_GENERIC_GETTIMEOFDAY=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_GENERIC_VDSO_TIME_NS b/anolis/configs/L0-MANDATORY/default/CONFIG_GENERIC_VDSO_TIME_NS new file mode 100644 index 000000000000..36652dfed18a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_GENERIC_VDSO_TIME_NS @@ -0,0 +1 @@ +CONFIG_GENERIC_VDSO_TIME_NS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_HARDLOCKUP_DETECTOR b/anolis/configs/L0-MANDATORY/default/CONFIG_HARDLOCKUP_DETECTOR new file mode 100644 index 000000000000..dc5ae5ce314e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_HARDLOCKUP_DETECTOR @@ -0,0 +1 @@ +CONFIG_HARDLOCKUP_DETECTOR=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_HDMI b/anolis/configs/L0-MANDATORY/default/CONFIG_HDMI new file mode 100644 index 000000000000..c67208b21459 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_HDMI @@ -0,0 +1 @@ +CONFIG_HDMI=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_HIGH_RES_TIMERS b/anolis/configs/L0-MANDATORY/default/CONFIG_HIGH_RES_TIMERS new file mode 100644 index 000000000000..bf244406f03e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_HIGH_RES_TIMERS @@ -0,0 +1 @@ +CONFIG_HIGH_RES_TIMERS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_HOTPLUG_CPU b/anolis/configs/L0-MANDATORY/default/CONFIG_HOTPLUG_CPU new file mode 100644 index 000000000000..3704a7a0f205 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_HOTPLUG_CPU @@ -0,0 +1 @@ +CONFIG_HOTPLUG_CPU=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_HOTPLUG_PCI b/anolis/configs/L0-MANDATORY/default/CONFIG_HOTPLUG_PCI new file mode 100644 index 000000000000..278cde73c789 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_HOTPLUG_PCI @@ -0,0 +1 @@ +CONFIG_HOTPLUG_PCI=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_HOTPLUG_PCI_PCIE b/anolis/configs/L0-MANDATORY/default/CONFIG_HOTPLUG_PCI_PCIE new file mode 100644 index 000000000000..31781d4a918e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_HOTPLUG_PCI_PCIE @@ -0,0 +1 @@ +CONFIG_HOTPLUG_PCI_PCIE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_HUGETLBFS b/anolis/configs/L0-MANDATORY/default/CONFIG_HUGETLBFS new file mode 100644 index 000000000000..3c2dea56a541 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_HUGETLBFS @@ -0,0 +1 @@ +CONFIG_HUGETLBFS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_HUGETLB_PAGE b/anolis/configs/L0-MANDATORY/default/CONFIG_HUGETLB_PAGE new file mode 100644 index 000000000000..17c929f62cc5 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_HUGETLB_PAGE @@ -0,0 +1 @@ +CONFIG_HUGETLB_PAGE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_HWMON b/anolis/configs/L0-MANDATORY/default/CONFIG_HWMON new file mode 100644 index 000000000000..ff2b3294f655 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_HWMON @@ -0,0 +1 @@ +CONFIG_HWMON=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_HW_RANDOM b/anolis/configs/L0-MANDATORY/default/CONFIG_HW_RANDOM new file mode 100644 index 000000000000..971856ab7475 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_HW_RANDOM @@ -0,0 +1 @@ +CONFIG_HW_RANDOM=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_HZ_100 b/anolis/configs/L0-MANDATORY/default/CONFIG_HZ_100 new file mode 100644 index 000000000000..920c10df708d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_HZ_100 @@ -0,0 +1 @@ +# CONFIG_HZ_100 is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_HZ_300 b/anolis/configs/L0-MANDATORY/default/CONFIG_HZ_300 new file mode 100644 index 000000000000..082ba4207cfa --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_HZ_300 @@ -0,0 +1 @@ +# CONFIG_HZ_300 is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_HZ_PERIODIC b/anolis/configs/L0-MANDATORY/default/CONFIG_HZ_PERIODIC new file mode 100644 index 000000000000..55d8ff9baa54 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_HZ_PERIODIC @@ -0,0 +1 @@ +# CONFIG_HZ_PERIODIC is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_I2C b/anolis/configs/L0-MANDATORY/default/CONFIG_I2C new file mode 100644 index 000000000000..aafb657f5b9a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_I2C @@ -0,0 +1 @@ +CONFIG_I2C=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_I40E b/anolis/configs/L0-MANDATORY/default/CONFIG_I40E new file mode 100644 index 000000000000..c52af201eb2f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_I40E @@ -0,0 +1 @@ +CONFIG_I40E=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_I40EVF b/anolis/configs/L0-MANDATORY/default/CONFIG_I40EVF new file mode 100644 index 000000000000..21e0bf4cd205 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_I40EVF @@ -0,0 +1 @@ +CONFIG_I40EVF=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ICE b/anolis/configs/L0-MANDATORY/default/CONFIG_ICE new file mode 100644 index 000000000000..855d37ea1722 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ICE @@ -0,0 +1 @@ +CONFIG_ICE=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IDLE_PAGE_TRACKING b/anolis/configs/L0-MANDATORY/default/CONFIG_IDLE_PAGE_TRACKING new file mode 100644 index 000000000000..e7af620e0635 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IDLE_PAGE_TRACKING @@ -0,0 +1 @@ +CONFIG_IDLE_PAGE_TRACKING=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IGB b/anolis/configs/L0-MANDATORY/default/CONFIG_IGB new file mode 100644 index 000000000000..1a8ee88776e1 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IGB @@ -0,0 +1 @@ +CONFIG_IGB=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IGBVF b/anolis/configs/L0-MANDATORY/default/CONFIG_IGBVF new file mode 100644 index 000000000000..63bb3beea6d8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IGBVF @@ -0,0 +1 @@ +CONFIG_IGBVF=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IMA b/anolis/configs/L0-MANDATORY/default/CONFIG_IMA new file mode 100644 index 000000000000..752982bdd927 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IMA @@ -0,0 +1 @@ +CONFIG_IMA=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_INET b/anolis/configs/L0-MANDATORY/default/CONFIG_INET new file mode 100644 index 000000000000..aac63495d8d1 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_INET @@ -0,0 +1 @@ +CONFIG_INET=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_INET_DIAG b/anolis/configs/L0-MANDATORY/default/CONFIG_INET_DIAG new file mode 100644 index 000000000000..8814c39c6eba --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_INET_DIAG @@ -0,0 +1 @@ +CONFIG_INET_DIAG=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_INET_MPTCP_DIAG b/anolis/configs/L0-MANDATORY/default/CONFIG_INET_MPTCP_DIAG new file mode 100644 index 000000000000..9ff0e8977aa7 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_INET_MPTCP_DIAG @@ -0,0 +1 @@ +CONFIG_INET_MPTCP_DIAG=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_INET_TCP_DIAG b/anolis/configs/L0-MANDATORY/default/CONFIG_INET_TCP_DIAG new file mode 100644 index 000000000000..72fbd06a91ff --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_INET_TCP_DIAG @@ -0,0 +1 @@ +CONFIG_INET_TCP_DIAG=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_INET_UDP_DIAG b/anolis/configs/L0-MANDATORY/default/CONFIG_INET_UDP_DIAG new file mode 100644 index 000000000000..6abf654838cb --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_INET_UDP_DIAG @@ -0,0 +1 @@ +CONFIG_INET_UDP_DIAG=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_INFINIBAND b/anolis/configs/L0-MANDATORY/default/CONFIG_INFINIBAND new file mode 100644 index 000000000000..50f88a2829b7 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_INFINIBAND @@ -0,0 +1 @@ +CONFIG_INFINIBAND=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_INPUT b/anolis/configs/L0-MANDATORY/default/CONFIG_INPUT new file mode 100644 index 000000000000..de103d88fedb --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_INPUT @@ -0,0 +1 @@ +CONFIG_INPUT=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_INPUT_KEYBOARD b/anolis/configs/L0-MANDATORY/default/CONFIG_INPUT_KEYBOARD new file mode 100644 index 000000000000..42362ad6f1df --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_INPUT_KEYBOARD @@ -0,0 +1 @@ +CONFIG_INPUT_KEYBOARD=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_INPUT_MOUSE b/anolis/configs/L0-MANDATORY/default/CONFIG_INPUT_MOUSE new file mode 100644 index 000000000000..4f6de0eef834 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_INPUT_MOUSE @@ -0,0 +1 @@ +CONFIG_INPUT_MOUSE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_INTEGRITY b/anolis/configs/L0-MANDATORY/default/CONFIG_INTEGRITY new file mode 100644 index 000000000000..a3524cb6b8f4 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_INTEGRITY @@ -0,0 +1 @@ +CONFIG_INTEGRITY=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IOMMU_SUPPORT b/anolis/configs/L0-MANDATORY/default/CONFIG_IOMMU_SUPPORT new file mode 100644 index 000000000000..05bdc99f3d11 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IOMMU_SUPPORT @@ -0,0 +1 @@ +CONFIG_IOMMU_SUPPORT=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IOSCHED_BFQ b/anolis/configs/L0-MANDATORY/default/CONFIG_IOSCHED_BFQ new file mode 100644 index 000000000000..784fa4506e4b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IOSCHED_BFQ @@ -0,0 +1 @@ +CONFIG_IOSCHED_BFQ=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IO_URING b/anolis/configs/L0-MANDATORY/default/CONFIG_IO_URING new file mode 100644 index 000000000000..eff85c7a8f85 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IO_URING @@ -0,0 +1 @@ +CONFIG_IO_URING=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IPC_NS b/anolis/configs/L0-MANDATORY/default/CONFIG_IPC_NS new file mode 100644 index 000000000000..037635cb2881 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IPC_NS @@ -0,0 +1 @@ +CONFIG_IPC_NS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IPMI_HANDLER b/anolis/configs/L0-MANDATORY/default/CONFIG_IPMI_HANDLER new file mode 100644 index 000000000000..55e555c77722 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IPMI_HANDLER @@ -0,0 +1 @@ +CONFIG_IPMI_HANDLER=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IPV6 b/anolis/configs/L0-MANDATORY/default/CONFIG_IPV6 new file mode 100644 index 000000000000..6450f34933e6 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IPV6 @@ -0,0 +1 @@ +CONFIG_IPV6=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IP_NF_ARPTABLES b/anolis/configs/L0-MANDATORY/default/CONFIG_IP_NF_ARPTABLES new file mode 100644 index 000000000000..b4f356ef036d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IP_NF_ARPTABLES @@ -0,0 +1 @@ +CONFIG_IP_NF_ARPTABLES=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IP_NF_RAW b/anolis/configs/L0-MANDATORY/default/CONFIG_IP_NF_RAW new file mode 100644 index 000000000000..dbe34884bcbb --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IP_NF_RAW @@ -0,0 +1 @@ +CONFIG_IP_NF_RAW=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IP_NF_SECURITY b/anolis/configs/L0-MANDATORY/default/CONFIG_IP_NF_SECURITY new file mode 100644 index 000000000000..e23a4ded8a85 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IP_NF_SECURITY @@ -0,0 +1 @@ +CONFIG_IP_NF_SECURITY=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IP_SET b/anolis/configs/L0-MANDATORY/default/CONFIG_IP_SET new file mode 100644 index 000000000000..e4213b7088c5 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IP_SET @@ -0,0 +1 @@ +CONFIG_IP_SET=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IP_VS b/anolis/configs/L0-MANDATORY/default/CONFIG_IP_VS new file mode 100644 index 000000000000..5595ef784c64 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IP_VS @@ -0,0 +1 @@ +CONFIG_IP_VS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IP_VS_IPV6 b/anolis/configs/L0-MANDATORY/default/CONFIG_IP_VS_IPV6 new file mode 100644 index 000000000000..02fb992d6694 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IP_VS_IPV6 @@ -0,0 +1 @@ +CONFIG_IP_VS_IPV6=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ISO9660_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_ISO9660_FS new file mode 100644 index 000000000000..379622f04840 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ISO9660_FS @@ -0,0 +1 @@ +CONFIG_ISO9660_FS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IXGBE b/anolis/configs/L0-MANDATORY/default/CONFIG_IXGBE new file mode 100644 index 000000000000..a31939ee049b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IXGBE @@ -0,0 +1 @@ +CONFIG_IXGBE=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_IXGBEVF b/anolis/configs/L0-MANDATORY/default/CONFIG_IXGBEVF new file mode 100644 index 000000000000..ee2e70d6eb91 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_IXGBEVF @@ -0,0 +1 @@ +CONFIG_IXGBEVF=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_JUMP_LABEL b/anolis/configs/L0-MANDATORY/default/CONFIG_JUMP_LABEL new file mode 100644 index 000000000000..8371ec0677fb --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_JUMP_LABEL @@ -0,0 +1 @@ +CONFIG_JUMP_LABEL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_KALLSYMS b/anolis/configs/L0-MANDATORY/default/CONFIG_KALLSYMS new file mode 100644 index 000000000000..4701e4fa66f0 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_KALLSYMS @@ -0,0 +1 @@ +CONFIG_KALLSYMS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_KALLSYMS_ALL b/anolis/configs/L0-MANDATORY/default/CONFIG_KALLSYMS_ALL new file mode 100644 index 000000000000..e5f6b9c6910f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_KALLSYMS_ALL @@ -0,0 +1 @@ +CONFIG_KALLSYMS_ALL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_KCMP b/anolis/configs/L0-MANDATORY/default/CONFIG_KCMP new file mode 100644 index 000000000000..19e2db992267 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_KCMP @@ -0,0 +1 @@ +CONFIG_KCMP=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_KERNFS b/anolis/configs/L0-MANDATORY/default/CONFIG_KERNFS new file mode 100644 index 000000000000..ddb8a5f451f0 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_KERNFS @@ -0,0 +1 @@ +CONFIG_KERNFS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_KEXEC b/anolis/configs/L0-MANDATORY/default/CONFIG_KEXEC new file mode 100644 index 000000000000..b45488dbb610 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_KEXEC @@ -0,0 +1 @@ +CONFIG_KEXEC=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_KEXEC_CORE b/anolis/configs/L0-MANDATORY/default/CONFIG_KEXEC_CORE new file mode 100644 index 000000000000..093bbdf2650a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_KEXEC_CORE @@ -0,0 +1 @@ +CONFIG_KEXEC_CORE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_KEXEC_FILE b/anolis/configs/L0-MANDATORY/default/CONFIG_KEXEC_FILE new file mode 100644 index 000000000000..25862bf98de8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_KEXEC_FILE @@ -0,0 +1 @@ +CONFIG_KEXEC_FILE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_KFENCE b/anolis/configs/L0-MANDATORY/default/CONFIG_KFENCE new file mode 100644 index 000000000000..79ca5de40924 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_KFENCE @@ -0,0 +1 @@ +CONFIG_KFENCE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_KGDB b/anolis/configs/L0-MANDATORY/default/CONFIG_KGDB new file mode 100644 index 000000000000..64b6ebfb4efa --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_KGDB @@ -0,0 +1 @@ +CONFIG_KGDB=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_KGDB_SERIAL_CONSOLE b/anolis/configs/L0-MANDATORY/default/CONFIG_KGDB_SERIAL_CONSOLE new file mode 100644 index 000000000000..26a6aac707fa --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_KGDB_SERIAL_CONSOLE @@ -0,0 +1 @@ +CONFIG_KGDB_SERIAL_CONSOLE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_KGDB_TESTS_ON_BOOT b/anolis/configs/L0-MANDATORY/default/CONFIG_KGDB_TESTS_ON_BOOT new file mode 100644 index 000000000000..731febb628fa --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_KGDB_TESTS_ON_BOOT @@ -0,0 +1 @@ +# CONFIG_KGDB_TESTS_ON_BOOT is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_KPROBES b/anolis/configs/L0-MANDATORY/default/CONFIG_KPROBES new file mode 100644 index 000000000000..e24be2770fd1 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_KPROBES @@ -0,0 +1 @@ +CONFIG_KPROBES=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_KPROBE_EVENTS b/anolis/configs/L0-MANDATORY/default/CONFIG_KPROBE_EVENTS new file mode 100644 index 000000000000..2111c5d1a27b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_KPROBE_EVENTS @@ -0,0 +1 @@ +CONFIG_KPROBE_EVENTS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_KRETPROBES b/anolis/configs/L0-MANDATORY/default/CONFIG_KRETPROBES new file mode 100644 index 000000000000..78afe778fff8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_KRETPROBES @@ -0,0 +1 @@ +CONFIG_KRETPROBES=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_LOCKD b/anolis/configs/L0-MANDATORY/default/CONFIG_LOCKD new file mode 100644 index 000000000000..f4ae1670697c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_LOCKD @@ -0,0 +1 @@ +CONFIG_LOCKD=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_LOCKD_V4 b/anolis/configs/L0-MANDATORY/default/CONFIG_LOCKD_V4 new file mode 100644 index 000000000000..0740609a259f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_LOCKD_V4 @@ -0,0 +1 @@ +CONFIG_LOCKD_V4=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_LOCKUP_DETECTOR b/anolis/configs/L0-MANDATORY/default/CONFIG_LOCKUP_DETECTOR new file mode 100644 index 000000000000..3a80a4e1b5e9 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_LOCKUP_DETECTOR @@ -0,0 +1 @@ +CONFIG_LOCKUP_DETECTOR=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_LRU_GEN b/anolis/configs/L0-MANDATORY/default/CONFIG_LRU_GEN new file mode 100644 index 000000000000..93292b3493ea --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_LRU_GEN @@ -0,0 +1 @@ +CONFIG_LRU_GEN=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MAGIC_SYSRQ b/anolis/configs/L0-MANDATORY/default/CONFIG_MAGIC_SYSRQ new file mode 100644 index 000000000000..3a3a7285a708 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MAGIC_SYSRQ @@ -0,0 +1 @@ +CONFIG_MAGIC_SYSRQ=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MD b/anolis/configs/L0-MANDATORY/default/CONFIG_MD new file mode 100644 index 000000000000..a11629d79772 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MD @@ -0,0 +1 @@ +CONFIG_MD=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MEMBARRIER b/anolis/configs/L0-MANDATORY/default/CONFIG_MEMBARRIER new file mode 100644 index 000000000000..7d1e33a67f7d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MEMBARRIER @@ -0,0 +1 @@ +CONFIG_MEMBARRIER=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MEMCG b/anolis/configs/L0-MANDATORY/default/CONFIG_MEMCG new file mode 100644 index 000000000000..100384ba8db2 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MEMCG @@ -0,0 +1 @@ +CONFIG_MEMCG=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MEMORY_FAILURE b/anolis/configs/L0-MANDATORY/default/CONFIG_MEMORY_FAILURE new file mode 100644 index 000000000000..af716b05c6c8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MEMORY_FAILURE @@ -0,0 +1 @@ +CONFIG_MEMORY_FAILURE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MEMORY_HOTPLUG b/anolis/configs/L0-MANDATORY/default/CONFIG_MEMORY_HOTPLUG new file mode 100644 index 000000000000..ff75139e042e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MEMORY_HOTPLUG @@ -0,0 +1 @@ +CONFIG_MEMORY_HOTPLUG=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE b/anolis/configs/L0-MANDATORY/default/CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE new file mode 100644 index 000000000000..bbbf7d364ab6 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE @@ -0,0 +1 @@ +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MIGRATION b/anolis/configs/L0-MANDATORY/default/CONFIG_MIGRATION new file mode 100644 index 000000000000..83a9d97c4f8f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MIGRATION @@ -0,0 +1 @@ +CONFIG_MIGRATION=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MISC_FILESYSTEMS b/anolis/configs/L0-MANDATORY/default/CONFIG_MISC_FILESYSTEMS new file mode 100644 index 000000000000..a82849e39df9 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MISC_FILESYSTEMS @@ -0,0 +1 @@ +CONFIG_MISC_FILESYSTEMS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MLX4_EN b/anolis/configs/L0-MANDATORY/default/CONFIG_MLX4_EN new file mode 100644 index 000000000000..0a147ed38821 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MLX4_EN @@ -0,0 +1 @@ +CONFIG_MLX4_EN=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MLX5_CORE b/anolis/configs/L0-MANDATORY/default/CONFIG_MLX5_CORE new file mode 100644 index 000000000000..f81faa5f3cf9 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MLX5_CORE @@ -0,0 +1 @@ +CONFIG_MLX5_CORE=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MLX5_CORE_EN b/anolis/configs/L0-MANDATORY/default/CONFIG_MLX5_CORE_EN new file mode 100644 index 000000000000..e55da7520a46 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MLX5_CORE_EN @@ -0,0 +1 @@ +CONFIG_MLX5_CORE_EN=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MLXSW_CORE b/anolis/configs/L0-MANDATORY/default/CONFIG_MLXSW_CORE new file mode 100644 index 000000000000..2a06bb59c733 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MLXSW_CORE @@ -0,0 +1 @@ +CONFIG_MLXSW_CORE=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MMU b/anolis/configs/L0-MANDATORY/default/CONFIG_MMU new file mode 100644 index 000000000000..3dec296304f2 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MMU @@ -0,0 +1 @@ +CONFIG_MMU=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MODPROBE_PATH b/anolis/configs/L0-MANDATORY/default/CONFIG_MODPROBE_PATH new file mode 100644 index 000000000000..d235ab2677b2 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MODPROBE_PATH @@ -0,0 +1 @@ +CONFIG_MODPROBE_PATH="/sbin/modprobe" diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MODULES b/anolis/configs/L0-MANDATORY/default/CONFIG_MODULES new file mode 100644 index 000000000000..a83bb6e6b9a6 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MODULES @@ -0,0 +1 @@ +CONFIG_MODULES=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MODULE_SIG b/anolis/configs/L0-MANDATORY/default/CONFIG_MODULE_SIG new file mode 100644 index 000000000000..53288b393fcc --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MODULE_SIG @@ -0,0 +1 @@ +CONFIG_MODULE_SIG=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MODULE_SRCVERSION_ALL b/anolis/configs/L0-MANDATORY/default/CONFIG_MODULE_SRCVERSION_ALL new file mode 100644 index 000000000000..f0872fa329cb --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MODULE_SRCVERSION_ALL @@ -0,0 +1 @@ +CONFIG_MODULE_SRCVERSION_ALL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MODULE_UNLOAD b/anolis/configs/L0-MANDATORY/default/CONFIG_MODULE_UNLOAD new file mode 100644 index 000000000000..4e6976174365 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MODULE_UNLOAD @@ -0,0 +1 @@ +CONFIG_MODULE_UNLOAD=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MODVERSIONS b/anolis/configs/L0-MANDATORY/default/CONFIG_MODVERSIONS new file mode 100644 index 000000000000..6119c683c6a8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MODVERSIONS @@ -0,0 +1 @@ +CONFIG_MODVERSIONS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MPTCP b/anolis/configs/L0-MANDATORY/default/CONFIG_MPTCP new file mode 100644 index 000000000000..3bfe60494af8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MPTCP @@ -0,0 +1 @@ +CONFIG_MPTCP=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MQ_IOSCHED_DEADLINE b/anolis/configs/L0-MANDATORY/default/CONFIG_MQ_IOSCHED_DEADLINE new file mode 100644 index 000000000000..ad5c7700b0f0 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MQ_IOSCHED_DEADLINE @@ -0,0 +1 @@ +CONFIG_MQ_IOSCHED_DEADLINE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MQ_IOSCHED_KYBER b/anolis/configs/L0-MANDATORY/default/CONFIG_MQ_IOSCHED_KYBER new file mode 100644 index 000000000000..16623d2a0928 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MQ_IOSCHED_KYBER @@ -0,0 +1 @@ +CONFIG_MQ_IOSCHED_KYBER=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_MULTIUSER b/anolis/configs/L0-MANDATORY/default/CONFIG_MULTIUSER new file mode 100644 index 000000000000..ffe93503bae4 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_MULTIUSER @@ -0,0 +1 @@ +CONFIG_MULTIUSER=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NAMESPACES b/anolis/configs/L0-MANDATORY/default/CONFIG_NAMESPACES new file mode 100644 index 000000000000..6c6db9d257b7 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NAMESPACES @@ -0,0 +1 @@ +CONFIG_NAMESPACES=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NET b/anolis/configs/L0-MANDATORY/default/CONFIG_NET new file mode 100644 index 000000000000..bb0276068e27 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NET @@ -0,0 +1 @@ +CONFIG_NET=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NETDEVICES b/anolis/configs/L0-MANDATORY/default/CONFIG_NETDEVICES new file mode 100644 index 000000000000..c12b1f36af84 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NETDEVICES @@ -0,0 +1 @@ +CONFIG_NETDEVICES=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NETFILTER b/anolis/configs/L0-MANDATORY/default/CONFIG_NETFILTER new file mode 100644 index 000000000000..7e722d1a8920 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NETFILTER @@ -0,0 +1 @@ +CONFIG_NETFILTER=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NETFILTER_ADVANCED b/anolis/configs/L0-MANDATORY/default/CONFIG_NETFILTER_ADVANCED new file mode 100644 index 000000000000..c24a71207065 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NETFILTER_ADVANCED @@ -0,0 +1 @@ +CONFIG_NETFILTER_ADVANCED=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NETFILTER_INGRESS b/anolis/configs/L0-MANDATORY/default/CONFIG_NETFILTER_INGRESS new file mode 100644 index 000000000000..d9ad7389f7e4 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NETFILTER_INGRESS @@ -0,0 +1 @@ +CONFIG_NETFILTER_INGRESS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NETLINK_DIAG b/anolis/configs/L0-MANDATORY/default/CONFIG_NETLINK_DIAG new file mode 100644 index 000000000000..0c5dc18066a4 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NETLINK_DIAG @@ -0,0 +1 @@ +CONFIG_NETLINK_DIAG=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NETWORK_FILESYSTEMS b/anolis/configs/L0-MANDATORY/default/CONFIG_NETWORK_FILESYSTEMS new file mode 100644 index 000000000000..55e644969f3f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NETWORK_FILESYSTEMS @@ -0,0 +1 @@ +CONFIG_NETWORK_FILESYSTEMS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NET_ACT_GACT b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_ACT_GACT new file mode 100644 index 000000000000..0ae15de6b360 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_ACT_GACT @@ -0,0 +1 @@ +CONFIG_NET_ACT_GACT=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NET_ACT_POLICE b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_ACT_POLICE new file mode 100644 index 000000000000..f0b4560c2890 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_ACT_POLICE @@ -0,0 +1 @@ +CONFIG_NET_ACT_POLICE=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NET_CLS b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_CLS new file mode 100644 index 000000000000..f2125c41edbb --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_CLS @@ -0,0 +1 @@ +CONFIG_NET_CLS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NET_CLS_ACT b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_CLS_ACT new file mode 100644 index 000000000000..3f248b9138c0 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_CLS_ACT @@ -0,0 +1 @@ +CONFIG_NET_CLS_ACT=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NET_CORE b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_CORE new file mode 100644 index 000000000000..104f6aed80fd --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_CORE @@ -0,0 +1 @@ +CONFIG_NET_CORE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NET_FAILOVER b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_FAILOVER new file mode 100644 index 000000000000..755a0916e7f4 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_FAILOVER @@ -0,0 +1 @@ +CONFIG_NET_FAILOVER=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NET_KEY b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_KEY new file mode 100644 index 000000000000..d823a2fa3c7c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_KEY @@ -0,0 +1 @@ +CONFIG_NET_KEY=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NET_NS b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_NS new file mode 100644 index 000000000000..6c95e6942dd3 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_NS @@ -0,0 +1 @@ +CONFIG_NET_NS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NET_SCHED b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_SCHED new file mode 100644 index 000000000000..32758b597fb5 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_SCHED @@ -0,0 +1 @@ +CONFIG_NET_SCHED=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NET_SCH_FQ_CODEL b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_SCH_FQ_CODEL new file mode 100644 index 000000000000..1a0f8aa77deb --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_SCH_FQ_CODEL @@ -0,0 +1 @@ +CONFIG_NET_SCH_FQ_CODEL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NET_SCH_INGRESS b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_SCH_INGRESS new file mode 100644 index 000000000000..7cee4bc281ee --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_SCH_INGRESS @@ -0,0 +1 @@ +CONFIG_NET_SCH_INGRESS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NET_VENDOR_BROADCOM b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_VENDOR_BROADCOM new file mode 100644 index 000000000000..32cb667659df --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_VENDOR_BROADCOM @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_BROADCOM=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NET_VENDOR_INTEL b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_VENDOR_INTEL new file mode 100644 index 000000000000..00321eadb66a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_VENDOR_INTEL @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_INTEL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NET_VENDOR_MELLANOX b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_VENDOR_MELLANOX new file mode 100644 index 000000000000..e33e9ccf9512 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_VENDOR_MELLANOX @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_MELLANOX=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NET_VENDOR_WANGXUN b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_VENDOR_WANGXUN new file mode 100644 index 000000000000..e755f944fa3b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NET_VENDOR_WANGXUN @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_WANGXUN=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NFSD b/anolis/configs/L0-MANDATORY/default/CONFIG_NFSD new file mode 100644 index 000000000000..4edd3f446db5 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NFSD @@ -0,0 +1 @@ +CONFIG_NFSD=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NFSD_V4 b/anolis/configs/L0-MANDATORY/default/CONFIG_NFSD_V4 new file mode 100644 index 000000000000..a1df1fcb6f1a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NFSD_V4 @@ -0,0 +1 @@ +CONFIG_NFSD_V4=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_COMMON b/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_COMMON new file mode 100644 index 000000000000..19298484cd9c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_COMMON @@ -0,0 +1 @@ +CONFIG_NFS_COMMON=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_FS new file mode 100644 index 000000000000..b181162e4b42 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_FS @@ -0,0 +1 @@ +CONFIG_NFS_FS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_FSCACHE b/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_FSCACHE new file mode 100644 index 000000000000..dff728512a18 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_FSCACHE @@ -0,0 +1 @@ +CONFIG_NFS_FSCACHE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_V3 b/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_V3 new file mode 100644 index 000000000000..22442d57ae84 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_V3 @@ -0,0 +1 @@ +CONFIG_NFS_V3=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_V4 b/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_V4 new file mode 100644 index 000000000000..f18a771f0d7e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_V4 @@ -0,0 +1 @@ +CONFIG_NFS_V4=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_V4_1 b/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_V4_1 new file mode 100644 index 000000000000..f131fcb574a8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_V4_1 @@ -0,0 +1 @@ +CONFIG_NFS_V4_1=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_V4_2 b/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_V4_2 new file mode 100644 index 000000000000..1c4ac47992d4 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NFS_V4_2 @@ -0,0 +1 @@ +CONFIG_NFS_V4_2=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NF_CONNTRACK b/anolis/configs/L0-MANDATORY/default/CONFIG_NF_CONNTRACK new file mode 100644 index 000000000000..6eb10bb34ad4 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NF_CONNTRACK @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NF_NAT b/anolis/configs/L0-MANDATORY/default/CONFIG_NF_NAT new file mode 100644 index 000000000000..25565b971480 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NF_NAT @@ -0,0 +1 @@ +CONFIG_NF_NAT=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NF_TABLES b/anolis/configs/L0-MANDATORY/default/CONFIG_NF_TABLES new file mode 100644 index 000000000000..b7803fc4026d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NF_TABLES @@ -0,0 +1 @@ +CONFIG_NF_TABLES=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NF_TABLES_INET b/anolis/configs/L0-MANDATORY/default/CONFIG_NF_TABLES_INET new file mode 100644 index 000000000000..407ac412fc38 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NF_TABLES_INET @@ -0,0 +1 @@ +CONFIG_NF_TABLES_INET=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NF_TABLES_IPV4 b/anolis/configs/L0-MANDATORY/default/CONFIG_NF_TABLES_IPV4 new file mode 100644 index 000000000000..4393170ed9e7 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NF_TABLES_IPV4 @@ -0,0 +1 @@ +CONFIG_NF_TABLES_IPV4=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NF_TABLES_IPV6 b/anolis/configs/L0-MANDATORY/default/CONFIG_NF_TABLES_IPV6 new file mode 100644 index 000000000000..271ac4a7a388 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NF_TABLES_IPV6 @@ -0,0 +1 @@ +CONFIG_NF_TABLES_IPV6=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NGBE b/anolis/configs/L0-MANDATORY/default/CONFIG_NGBE new file mode 100644 index 000000000000..bab5cad95e8c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NGBE @@ -0,0 +1 @@ +CONFIG_NGBE=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NLS b/anolis/configs/L0-MANDATORY/default/CONFIG_NLS new file mode 100644 index 000000000000..0ca649ce781c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NLS @@ -0,0 +1 @@ +CONFIG_NLS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NLS_ASCII b/anolis/configs/L0-MANDATORY/default/CONFIG_NLS_ASCII new file mode 100644 index 000000000000..4c75b84a9fcc --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NLS_ASCII @@ -0,0 +1 @@ +CONFIG_NLS_ASCII=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NLS_CODEPAGE_936 b/anolis/configs/L0-MANDATORY/default/CONFIG_NLS_CODEPAGE_936 new file mode 100644 index 000000000000..49aec3120b05 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NLS_CODEPAGE_936 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_936=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NLS_CODEPAGE_950 b/anolis/configs/L0-MANDATORY/default/CONFIG_NLS_CODEPAGE_950 new file mode 100644 index 000000000000..4b6af6728a00 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NLS_CODEPAGE_950 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_950=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NLS_DEFAULT b/anolis/configs/L0-MANDATORY/default/CONFIG_NLS_DEFAULT new file mode 100644 index 000000000000..bf5fe34ea9cc --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NLS_DEFAULT @@ -0,0 +1 @@ +CONFIG_NLS_DEFAULT="utf8" diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NLS_UTF8 b/anolis/configs/L0-MANDATORY/default/CONFIG_NLS_UTF8 new file mode 100644 index 000000000000..c9692f4e2727 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NLS_UTF8 @@ -0,0 +1 @@ +CONFIG_NLS_UTF8=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NO_HZ b/anolis/configs/L0-MANDATORY/default/CONFIG_NO_HZ new file mode 100644 index 000000000000..8c68dcd51eff --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NO_HZ @@ -0,0 +1 @@ +CONFIG_NO_HZ=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NO_HZ_FULL b/anolis/configs/L0-MANDATORY/default/CONFIG_NO_HZ_FULL new file mode 100644 index 000000000000..4cd3ab9ec63e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NO_HZ_FULL @@ -0,0 +1 @@ +CONFIG_NO_HZ_FULL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NO_HZ_IDLE b/anolis/configs/L0-MANDATORY/default/CONFIG_NO_HZ_IDLE new file mode 100644 index 000000000000..8641a52f7947 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NO_HZ_IDLE @@ -0,0 +1 @@ +# CONFIG_NO_HZ_IDLE is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NR_CPUS b/anolis/configs/L0-MANDATORY/default/CONFIG_NR_CPUS new file mode 100644 index 000000000000..27d187f4dbfc --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NR_CPUS @@ -0,0 +1 @@ +CONFIG_NR_CPUS=1024 diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NTFS_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_NTFS_FS new file mode 100644 index 000000000000..454b2b913846 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NTFS_FS @@ -0,0 +1 @@ +# CONFIG_NTFS_FS is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NUMA b/anolis/configs/L0-MANDATORY/default/CONFIG_NUMA new file mode 100644 index 000000000000..e480f0c04294 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NUMA @@ -0,0 +1 @@ +CONFIG_NUMA=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NVME_CORE b/anolis/configs/L0-MANDATORY/default/CONFIG_NVME_CORE new file mode 100644 index 000000000000..63c9ce1730b0 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NVME_CORE @@ -0,0 +1 @@ +CONFIG_NVME_CORE=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NVME_FABRICS b/anolis/configs/L0-MANDATORY/default/CONFIG_NVME_FABRICS new file mode 100644 index 000000000000..c5c11f934022 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NVME_FABRICS @@ -0,0 +1 @@ +CONFIG_NVME_FABRICS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NVME_RDMA b/anolis/configs/L0-MANDATORY/default/CONFIG_NVME_RDMA new file mode 100644 index 000000000000..134190973d15 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NVME_RDMA @@ -0,0 +1 @@ +CONFIG_NVME_RDMA=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_NVME_TCP b/anolis/configs/L0-MANDATORY/default/CONFIG_NVME_TCP new file mode 100644 index 000000000000..738cd8284b7b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_NVME_TCP @@ -0,0 +1 @@ +CONFIG_NVME_TCP=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_OVERLAY_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_OVERLAY_FS new file mode 100644 index 000000000000..b30428db0fd8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_OVERLAY_FS @@ -0,0 +1 @@ +CONFIG_OVERLAY_FS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PACKET b/anolis/configs/L0-MANDATORY/default/CONFIG_PACKET new file mode 100644 index 000000000000..15b6ef5b8123 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PACKET @@ -0,0 +1 @@ +CONFIG_PACKET=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PAGE_IDLE_FLAG b/anolis/configs/L0-MANDATORY/default/CONFIG_PAGE_IDLE_FLAG new file mode 100644 index 000000000000..5bf96f75047b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PAGE_IDLE_FLAG @@ -0,0 +1 @@ +CONFIG_PAGE_IDLE_FLAG=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PANIC_ON_OOPS b/anolis/configs/L0-MANDATORY/default/CONFIG_PANIC_ON_OOPS new file mode 100644 index 000000000000..29dc6ff9860c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PANIC_ON_OOPS @@ -0,0 +1 @@ +CONFIG_PANIC_ON_OOPS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PANIC_TIMEOUT b/anolis/configs/L0-MANDATORY/default/CONFIG_PANIC_TIMEOUT new file mode 100644 index 000000000000..842da8779f98 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PANIC_TIMEOUT @@ -0,0 +1 @@ +CONFIG_PANIC_TIMEOUT=1 diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PARAVIRT b/anolis/configs/L0-MANDATORY/default/CONFIG_PARAVIRT new file mode 100644 index 000000000000..65d95d1e2819 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PARAVIRT @@ -0,0 +1 @@ +CONFIG_PARAVIRT=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PARTITION_ADVANCED b/anolis/configs/L0-MANDATORY/default/CONFIG_PARTITION_ADVANCED new file mode 100644 index 000000000000..313ee5586097 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PARTITION_ADVANCED @@ -0,0 +1 @@ +CONFIG_PARTITION_ADVANCED=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PCI b/anolis/configs/L0-MANDATORY/default/CONFIG_PCI new file mode 100644 index 000000000000..c499609c3f62 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PCI @@ -0,0 +1 @@ +CONFIG_PCI=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PCIEPORTBUS b/anolis/configs/L0-MANDATORY/default/CONFIG_PCIEPORTBUS new file mode 100644 index 000000000000..ead4f09ac07b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PCIEPORTBUS @@ -0,0 +1 @@ +CONFIG_PCIEPORTBUS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PCI_IOV b/anolis/configs/L0-MANDATORY/default/CONFIG_PCI_IOV new file mode 100644 index 000000000000..c52af465a46b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PCI_IOV @@ -0,0 +1 @@ +CONFIG_PCI_IOV=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PCI_MSI b/anolis/configs/L0-MANDATORY/default/CONFIG_PCI_MSI new file mode 100644 index 000000000000..9fc3bddd0b5c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PCI_MSI @@ -0,0 +1 @@ +CONFIG_PCI_MSI=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PCI_STUB b/anolis/configs/L0-MANDATORY/default/CONFIG_PCI_STUB new file mode 100644 index 000000000000..b88db42e9c84 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PCI_STUB @@ -0,0 +1 @@ +CONFIG_PCI_STUB=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PERF_EVENTS b/anolis/configs/L0-MANDATORY/default/CONFIG_PERF_EVENTS new file mode 100644 index 000000000000..ba58ff2203e4 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PERF_EVENTS @@ -0,0 +1 @@ +CONFIG_PERF_EVENTS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PGTABLE_LEVELS b/anolis/configs/L0-MANDATORY/default/CONFIG_PGTABLE_LEVELS new file mode 100644 index 000000000000..238bddf8d97c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PGTABLE_LEVELS @@ -0,0 +1 @@ +CONFIG_PGTABLE_LEVELS=4 diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PID_NS b/anolis/configs/L0-MANDATORY/default/CONFIG_PID_NS new file mode 100644 index 000000000000..eae7bdaa3790 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PID_NS @@ -0,0 +1 @@ +CONFIG_PID_NS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PKCS7_MESSAGE_PARSER b/anolis/configs/L0-MANDATORY/default/CONFIG_PKCS7_MESSAGE_PARSER new file mode 100644 index 000000000000..49e251bf11de --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PKCS7_MESSAGE_PARSER @@ -0,0 +1 @@ +CONFIG_PKCS7_MESSAGE_PARSER=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PM b/anolis/configs/L0-MANDATORY/default/CONFIG_PM new file mode 100644 index 000000000000..2df782efc711 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PM @@ -0,0 +1 @@ +CONFIG_PM=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_POSIX_TIMERS b/anolis/configs/L0-MANDATORY/default/CONFIG_POSIX_TIMERS new file mode 100644 index 000000000000..f240ffc379f8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_POSIX_TIMERS @@ -0,0 +1 @@ +CONFIG_POSIX_TIMERS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT b/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT new file mode 100644 index 000000000000..38362d94a92a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT @@ -0,0 +1 @@ +# CONFIG_PREEMPT is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPTION b/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPTION new file mode 100644 index 000000000000..38913b0e31f8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPTION @@ -0,0 +1 @@ +CONFIG_PREEMPTION=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_BUILD b/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_BUILD new file mode 100644 index 000000000000..ebc04961669e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_BUILD @@ -0,0 +1 @@ +CONFIG_PREEMPT_BUILD=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_DYNAMIC b/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_DYNAMIC new file mode 100644 index 000000000000..6d995d6c4628 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_DYNAMIC @@ -0,0 +1 @@ +CONFIG_PREEMPT_DYNAMIC=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_RCU b/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_RCU new file mode 100644 index 000000000000..719b5866fee8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_RCU @@ -0,0 +1 @@ +CONFIG_PREEMPT_RCU=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_TRACER b/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_TRACER new file mode 100644 index 000000000000..42ab34971e78 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_TRACER @@ -0,0 +1 @@ +# CONFIG_PREEMPT_TRACER is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PRINTK b/anolis/configs/L0-MANDATORY/default/CONFIG_PRINTK new file mode 100644 index 000000000000..c6baee637082 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PRINTK @@ -0,0 +1 @@ +CONFIG_PRINTK=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PRINTK_INDEX b/anolis/configs/L0-MANDATORY/default/CONFIG_PRINTK_INDEX new file mode 100644 index 000000000000..bf385684d57d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PRINTK_INDEX @@ -0,0 +1 @@ +# CONFIG_PRINTK_INDEX is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PRINTK_TIME b/anolis/configs/L0-MANDATORY/default/CONFIG_PRINTK_TIME new file mode 100644 index 000000000000..5d5b73e9cc8e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PRINTK_TIME @@ -0,0 +1 @@ +CONFIG_PRINTK_TIME=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PROBE_EVENTS_BTF_ARGS b/anolis/configs/L0-MANDATORY/default/CONFIG_PROBE_EVENTS_BTF_ARGS new file mode 100644 index 000000000000..8f99f97d3740 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PROBE_EVENTS_BTF_ARGS @@ -0,0 +1 @@ +CONFIG_PROBE_EVENTS_BTF_ARGS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PROC_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_PROC_FS new file mode 100644 index 000000000000..68fbd2b35884 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PROC_FS @@ -0,0 +1 @@ +CONFIG_PROC_FS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PROC_KCORE b/anolis/configs/L0-MANDATORY/default/CONFIG_PROC_KCORE new file mode 100644 index 000000000000..eb475c0a7d7c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PROC_KCORE @@ -0,0 +1 @@ +CONFIG_PROC_KCORE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PROC_PAGE_MONITOR b/anolis/configs/L0-MANDATORY/default/CONFIG_PROC_PAGE_MONITOR new file mode 100644 index 000000000000..e728c17ad208 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PROC_PAGE_MONITOR @@ -0,0 +1 @@ +CONFIG_PROC_PAGE_MONITOR=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PROC_SYSCTL b/anolis/configs/L0-MANDATORY/default/CONFIG_PROC_SYSCTL new file mode 100644 index 000000000000..eccf86024e96 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PROC_SYSCTL @@ -0,0 +1 @@ +CONFIG_PROC_SYSCTL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PROC_VMCORE b/anolis/configs/L0-MANDATORY/default/CONFIG_PROC_VMCORE new file mode 100644 index 000000000000..c864e6b0b392 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PROC_VMCORE @@ -0,0 +1 @@ +CONFIG_PROC_VMCORE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PSE_CONTROLLER b/anolis/configs/L0-MANDATORY/default/CONFIG_PSE_CONTROLLER new file mode 100644 index 000000000000..d29dad9c7ffb --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PSE_CONTROLLER @@ -0,0 +1 @@ +# CONFIG_PSE_CONTROLLER is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PSTORE_BLK b/anolis/configs/L0-MANDATORY/default/CONFIG_PSTORE_BLK new file mode 100644 index 000000000000..c227a05adc7f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PSTORE_BLK @@ -0,0 +1 @@ +# CONFIG_PSTORE_BLK is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PSTORE_DEFAULT_KMSG_BYTES b/anolis/configs/L0-MANDATORY/default/CONFIG_PSTORE_DEFAULT_KMSG_BYTES new file mode 100644 index 000000000000..84340b2550e1 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PSTORE_DEFAULT_KMSG_BYTES @@ -0,0 +1 @@ +CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PTP_1588_CLOCK_MOCK b/anolis/configs/L0-MANDATORY/default/CONFIG_PTP_1588_CLOCK_MOCK new file mode 100644 index 000000000000..ecf8c4a4473f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PTP_1588_CLOCK_MOCK @@ -0,0 +1 @@ +# CONFIG_PTP_1588_CLOCK_MOCK is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PTP_1588_CLOCK_OCP b/anolis/configs/L0-MANDATORY/default/CONFIG_PTP_1588_CLOCK_OCP new file mode 100644 index 000000000000..466cead8f0a5 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PTP_1588_CLOCK_OCP @@ -0,0 +1 @@ +# CONFIG_PTP_1588_CLOCK_OCP is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PTP_1588_CLOCK_OPTIONAL b/anolis/configs/L0-MANDATORY/default/CONFIG_PTP_1588_CLOCK_OPTIONAL new file mode 100644 index 000000000000..b494f9af4eb7 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PTP_1588_CLOCK_OPTIONAL @@ -0,0 +1 @@ +CONFIG_PTP_1588_CLOCK_OPTIONAL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PVPANIC b/anolis/configs/L0-MANDATORY/default/CONFIG_PVPANIC new file mode 100644 index 000000000000..ae30596cbc3d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PVPANIC @@ -0,0 +1 @@ +CONFIG_PVPANIC=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PVPANIC_MMIO b/anolis/configs/L0-MANDATORY/default/CONFIG_PVPANIC_MMIO new file mode 100644 index 000000000000..4cc1414eb4b6 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PVPANIC_MMIO @@ -0,0 +1 @@ +# CONFIG_PVPANIC_MMIO is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PVPANIC_PCI b/anolis/configs/L0-MANDATORY/default/CONFIG_PVPANIC_PCI new file mode 100644 index 000000000000..99efce3fb5da --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PVPANIC_PCI @@ -0,0 +1 @@ +# CONFIG_PVPANIC_PCI is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PWM_CLK b/anolis/configs/L0-MANDATORY/default/CONFIG_PWM_CLK new file mode 100644 index 000000000000..640d5daf7b6d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PWM_CLK @@ -0,0 +1 @@ +# CONFIG_PWM_CLK is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PWM_DWC b/anolis/configs/L0-MANDATORY/default/CONFIG_PWM_DWC new file mode 100644 index 000000000000..8aea5b09615e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_PWM_DWC @@ -0,0 +1 @@ +# CONFIG_PWM_DWC is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_QUOTA b/anolis/configs/L0-MANDATORY/default/CONFIG_QUOTA new file mode 100644 index 000000000000..7ae6b6fbaa0d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_QUOTA @@ -0,0 +1 @@ +CONFIG_QUOTA=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_RANDOMIZE_BASE b/anolis/configs/L0-MANDATORY/default/CONFIG_RANDOMIZE_BASE new file mode 100644 index 000000000000..20610a95a187 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_RANDOMIZE_BASE @@ -0,0 +1 @@ +CONFIG_RANDOMIZE_BASE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_RANDSTRUCT_FULL b/anolis/configs/L0-MANDATORY/default/CONFIG_RANDSTRUCT_FULL new file mode 100644 index 000000000000..5d26c21e2535 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_RANDSTRUCT_FULL @@ -0,0 +1 @@ +# CONFIG_RANDSTRUCT_FULL is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_RANDSTRUCT_NONE b/anolis/configs/L0-MANDATORY/default/CONFIG_RANDSTRUCT_NONE new file mode 100644 index 000000000000..e2aaf1b5dacd --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_RANDSTRUCT_NONE @@ -0,0 +1 @@ +CONFIG_RANDSTRUCT_NONE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_RANDSTRUCT_PERFORMANCE b/anolis/configs/L0-MANDATORY/default/CONFIG_RANDSTRUCT_PERFORMANCE new file mode 100644 index 000000000000..790181ab9f5c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_RANDSTRUCT_PERFORMANCE @@ -0,0 +1 @@ +# CONFIG_RANDSTRUCT_PERFORMANCE is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_RAS b/anolis/configs/L0-MANDATORY/default/CONFIG_RAS new file mode 100644 index 000000000000..15fb268c3b80 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_RAS @@ -0,0 +1 @@ +CONFIG_RAS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_RELAY b/anolis/configs/L0-MANDATORY/default/CONFIG_RELAY new file mode 100644 index 000000000000..146eff5b478d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_RELAY @@ -0,0 +1 @@ +CONFIG_RELAY=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_RELOCATABLE b/anolis/configs/L0-MANDATORY/default/CONFIG_RELOCATABLE new file mode 100644 index 000000000000..36808edb3af7 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_RELOCATABLE @@ -0,0 +1 @@ +CONFIG_RELOCATABLE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_RESCTRL_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_RESCTRL_FS new file mode 100644 index 000000000000..4d7ca33dcdc4 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_RESCTRL_FS @@ -0,0 +1 @@ +CONFIG_RESCTRL_FS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_RESET_CONTROLLER b/anolis/configs/L0-MANDATORY/default/CONFIG_RESET_CONTROLLER new file mode 100644 index 000000000000..a268c67a9b48 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_RESET_CONTROLLER @@ -0,0 +1 @@ +CONFIG_RESET_CONTROLLER=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_RPS b/anolis/configs/L0-MANDATORY/default/CONFIG_RPS new file mode 100644 index 000000000000..0c947aea2415 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_RPS @@ -0,0 +1 @@ +CONFIG_RPS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_RSEQ b/anolis/configs/L0-MANDATORY/default/CONFIG_RSEQ new file mode 100644 index 000000000000..adc7767df654 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_RSEQ @@ -0,0 +1 @@ +CONFIG_RSEQ=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_RTC_CLASS b/anolis/configs/L0-MANDATORY/default/CONFIG_RTC_CLASS new file mode 100644 index 000000000000..70c9c29c0397 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_RTC_CLASS @@ -0,0 +1 @@ +CONFIG_RTC_CLASS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SCHEDSTATS b/anolis/configs/L0-MANDATORY/default/CONFIG_SCHEDSTATS new file mode 100644 index 000000000000..a9ef3bd78e30 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SCHEDSTATS @@ -0,0 +1 @@ +CONFIG_SCHEDSTATS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SCHED_AUTOGROUP b/anolis/configs/L0-MANDATORY/default/CONFIG_SCHED_AUTOGROUP new file mode 100644 index 000000000000..6f615c29f044 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SCHED_AUTOGROUP @@ -0,0 +1 @@ +CONFIG_SCHED_AUTOGROUP=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SCHED_CORE b/anolis/configs/L0-MANDATORY/default/CONFIG_SCHED_CORE new file mode 100644 index 000000000000..cc3f25e8f01b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SCHED_CORE @@ -0,0 +1 @@ +CONFIG_SCHED_CORE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SCHED_DEBUG b/anolis/configs/L0-MANDATORY/default/CONFIG_SCHED_DEBUG new file mode 100644 index 000000000000..e8b09aa7c0c4 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SCHED_DEBUG @@ -0,0 +1 @@ +CONFIG_SCHED_DEBUG=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SCHED_MC b/anolis/configs/L0-MANDATORY/default/CONFIG_SCHED_MC new file mode 100644 index 000000000000..348674403b1b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SCHED_MC @@ -0,0 +1 @@ +CONFIG_SCHED_MC=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SCSI b/anolis/configs/L0-MANDATORY/default/CONFIG_SCSI new file mode 100644 index 000000000000..2e9142f10612 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SCSI @@ -0,0 +1 @@ +CONFIG_SCSI=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SECCOMP b/anolis/configs/L0-MANDATORY/default/CONFIG_SECCOMP new file mode 100644 index 000000000000..eb9e150920ef --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SECCOMP @@ -0,0 +1 @@ +CONFIG_SECCOMP=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SECCOMP_FILTER b/anolis/configs/L0-MANDATORY/default/CONFIG_SECCOMP_FILTER new file mode 100644 index 000000000000..0814ba30a826 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SECCOMP_FILTER @@ -0,0 +1 @@ +CONFIG_SECCOMP_FILTER=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SECONDARY_TRUSTED_KEYRING b/anolis/configs/L0-MANDATORY/default/CONFIG_SECONDARY_TRUSTED_KEYRING new file mode 100644 index 000000000000..6f8a21900f95 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SECONDARY_TRUSTED_KEYRING @@ -0,0 +1 @@ +CONFIG_SECONDARY_TRUSTED_KEYRING=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY b/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY new file mode 100644 index 000000000000..56c9d8401201 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY @@ -0,0 +1 @@ +CONFIG_SECURITY=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITYFS b/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITYFS new file mode 100644 index 000000000000..a108272d735d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITYFS @@ -0,0 +1 @@ +CONFIG_SECURITYFS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_INFINIBAND b/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_INFINIBAND new file mode 100644 index 000000000000..393c3f5f8e26 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_INFINIBAND @@ -0,0 +1 @@ +CONFIG_SECURITY_INFINIBAND=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_NETWORK b/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_NETWORK new file mode 100644 index 000000000000..cfde232dc88c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_NETWORK @@ -0,0 +1 @@ +CONFIG_SECURITY_NETWORK=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_NETWORK_XFRM b/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_NETWORK_XFRM new file mode 100644 index 000000000000..4920b268cdd7 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_NETWORK_XFRM @@ -0,0 +1 @@ +CONFIG_SECURITY_NETWORK_XFRM=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_PATH b/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_PATH new file mode 100644 index 000000000000..dcabac67e585 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_PATH @@ -0,0 +1 @@ +CONFIG_SECURITY_PATH=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_SELINUX b/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_SELINUX new file mode 100644 index 000000000000..377ec83aaaf2 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_SELINUX @@ -0,0 +1 @@ +CONFIG_SECURITY_SELINUX=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_SELINUX_BOOTPARAM b/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_SELINUX_BOOTPARAM new file mode 100644 index 000000000000..e39bcd52e6f2 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SECURITY_SELINUX_BOOTPARAM @@ -0,0 +1 @@ +CONFIG_SECURITY_SELINUX_BOOTPARAM=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SERIO b/anolis/configs/L0-MANDATORY/default/CONFIG_SERIO new file mode 100644 index 000000000000..e0e5093355b5 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SERIO @@ -0,0 +1 @@ +CONFIG_SERIO=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SHMEM b/anolis/configs/L0-MANDATORY/default/CONFIG_SHMEM new file mode 100644 index 000000000000..5867be65ea68 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SHMEM @@ -0,0 +1 @@ +CONFIG_SHMEM=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SIGNALFD b/anolis/configs/L0-MANDATORY/default/CONFIG_SIGNALFD new file mode 100644 index 000000000000..2237664d4d8b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SIGNALFD @@ -0,0 +1 @@ +CONFIG_SIGNALFD=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SLUB b/anolis/configs/L0-MANDATORY/default/CONFIG_SLUB new file mode 100644 index 000000000000..05f729d8b5a2 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SLUB @@ -0,0 +1 @@ +CONFIG_SLUB=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SLUB_CPU_PARTIAL b/anolis/configs/L0-MANDATORY/default/CONFIG_SLUB_CPU_PARTIAL new file mode 100644 index 000000000000..bc41c67abcb0 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SLUB_CPU_PARTIAL @@ -0,0 +1 @@ +CONFIG_SLUB_CPU_PARTIAL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SLUB_DEBUG b/anolis/configs/L0-MANDATORY/default/CONFIG_SLUB_DEBUG new file mode 100644 index 000000000000..b19a5f05c484 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SLUB_DEBUG @@ -0,0 +1 @@ +CONFIG_SLUB_DEBUG=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SLUB_DEBUG_ON b/anolis/configs/L0-MANDATORY/default/CONFIG_SLUB_DEBUG_ON new file mode 100644 index 000000000000..ed8690f187c3 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SLUB_DEBUG_ON @@ -0,0 +1 @@ +# CONFIG_SLUB_DEBUG_ON is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SLUB_STATS b/anolis/configs/L0-MANDATORY/default/CONFIG_SLUB_STATS new file mode 100644 index 000000000000..349cf04499fb --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SLUB_STATS @@ -0,0 +1 @@ +# CONFIG_SLUB_STATS is not set diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SMC b/anolis/configs/L0-MANDATORY/default/CONFIG_SMC new file mode 100644 index 000000000000..56a7452afaf0 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SMC @@ -0,0 +1 @@ +CONFIG_SMC=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SMP b/anolis/configs/L0-MANDATORY/default/CONFIG_SMP new file mode 100644 index 000000000000..1cbf7ec071da --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SMP @@ -0,0 +1 @@ +CONFIG_SMP=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SOFTLOCKUP_DETECTOR b/anolis/configs/L0-MANDATORY/default/CONFIG_SOFTLOCKUP_DETECTOR new file mode 100644 index 000000000000..1ff04a43b963 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SOFTLOCKUP_DETECTOR @@ -0,0 +1 @@ +CONFIG_SOFTLOCKUP_DETECTOR=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SPARSEMEM b/anolis/configs/L0-MANDATORY/default/CONFIG_SPARSEMEM new file mode 100644 index 000000000000..04c45b45045d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SPARSEMEM @@ -0,0 +1 @@ +CONFIG_SPARSEMEM=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SPARSEMEM_VMEMMAP b/anolis/configs/L0-MANDATORY/default/CONFIG_SPARSEMEM_VMEMMAP new file mode 100644 index 000000000000..a2826dacd1f9 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SPARSEMEM_VMEMMAP @@ -0,0 +1 @@ +CONFIG_SPARSEMEM_VMEMMAP=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SPI b/anolis/configs/L0-MANDATORY/default/CONFIG_SPI new file mode 100644 index 000000000000..5616bfc48d77 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SPI @@ -0,0 +1 @@ +CONFIG_SPI=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SQUASHFS b/anolis/configs/L0-MANDATORY/default/CONFIG_SQUASHFS new file mode 100644 index 000000000000..931ff1ddd5a1 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SQUASHFS @@ -0,0 +1 @@ +CONFIG_SQUASHFS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_STACKTRACE b/anolis/configs/L0-MANDATORY/default/CONFIG_STACKTRACE new file mode 100644 index 000000000000..3947d556f7f2 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_STACKTRACE @@ -0,0 +1 @@ +CONFIG_STACKTRACE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_STM b/anolis/configs/L0-MANDATORY/default/CONFIG_STM new file mode 100644 index 000000000000..3542730b9e82 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_STM @@ -0,0 +1 @@ +CONFIG_STM=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_STREAM_PARSER b/anolis/configs/L0-MANDATORY/default/CONFIG_STREAM_PARSER new file mode 100644 index 000000000000..6086b5e508ec --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_STREAM_PARSER @@ -0,0 +1 @@ +CONFIG_STREAM_PARSER=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_STRICT_KERNEL_RWX b/anolis/configs/L0-MANDATORY/default/CONFIG_STRICT_KERNEL_RWX new file mode 100644 index 000000000000..8c57b454ad26 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_STRICT_KERNEL_RWX @@ -0,0 +1 @@ +CONFIG_STRICT_KERNEL_RWX=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_STRICT_MODULE_RWX b/anolis/configs/L0-MANDATORY/default/CONFIG_STRICT_MODULE_RWX new file mode 100644 index 000000000000..2f1f100d7241 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_STRICT_MODULE_RWX @@ -0,0 +1 @@ +CONFIG_STRICT_MODULE_RWX=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SUNRPC b/anolis/configs/L0-MANDATORY/default/CONFIG_SUNRPC new file mode 100644 index 000000000000..1c5b2e240745 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SUNRPC @@ -0,0 +1 @@ +CONFIG_SUNRPC=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SWAP b/anolis/configs/L0-MANDATORY/default/CONFIG_SWAP new file mode 100644 index 000000000000..38565471d0e3 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SWAP @@ -0,0 +1 @@ +CONFIG_SWAP=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SYN_COOKIES b/anolis/configs/L0-MANDATORY/default/CONFIG_SYN_COOKIES new file mode 100644 index 000000000000..5fec45b29307 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SYN_COOKIES @@ -0,0 +1 @@ +CONFIG_SYN_COOKIES=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SYSCTL b/anolis/configs/L0-MANDATORY/default/CONFIG_SYSCTL new file mode 100644 index 000000000000..dd53c266bf52 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SYSCTL @@ -0,0 +1 @@ +CONFIG_SYSCTL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SYSFS b/anolis/configs/L0-MANDATORY/default/CONFIG_SYSFS new file mode 100644 index 000000000000..54827af2b5c3 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SYSFS @@ -0,0 +1 @@ +CONFIG_SYSFS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SYSTEM_TRUSTED_KEYRING b/anolis/configs/L0-MANDATORY/default/CONFIG_SYSTEM_TRUSTED_KEYRING new file mode 100644 index 000000000000..9cb63f099a13 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SYSTEM_TRUSTED_KEYRING @@ -0,0 +1 @@ +CONFIG_SYSTEM_TRUSTED_KEYRING=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SYSTEM_TRUSTED_KEYS b/anolis/configs/L0-MANDATORY/default/CONFIG_SYSTEM_TRUSTED_KEYS new file mode 100644 index 000000000000..db0105d04900 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SYSTEM_TRUSTED_KEYS @@ -0,0 +1 @@ +CONFIG_SYSTEM_TRUSTED_KEYS="" diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_SYSVIPC b/anolis/configs/L0-MANDATORY/default/CONFIG_SYSVIPC new file mode 100644 index 000000000000..3d67b9bacb9e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_SYSVIPC @@ -0,0 +1 @@ +CONFIG_SYSVIPC=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_TAP b/anolis/configs/L0-MANDATORY/default/CONFIG_TAP new file mode 100644 index 000000000000..7604449e6d91 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_TAP @@ -0,0 +1 @@ +CONFIG_TAP=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_TARGET_CORE b/anolis/configs/L0-MANDATORY/default/CONFIG_TARGET_CORE new file mode 100644 index 000000000000..437db1bb0d2f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_TARGET_CORE @@ -0,0 +1 @@ +CONFIG_TARGET_CORE=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_TCG_TIS b/anolis/configs/L0-MANDATORY/default/CONFIG_TCG_TIS new file mode 100644 index 000000000000..eb9a4ccaca40 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_TCG_TIS @@ -0,0 +1 @@ +CONFIG_TCG_TIS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_TCG_TPM b/anolis/configs/L0-MANDATORY/default/CONFIG_TCG_TPM new file mode 100644 index 000000000000..07d9499c121f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_TCG_TPM @@ -0,0 +1 @@ +CONFIG_TCG_TPM=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_TCP_CONG_ADVANCED b/anolis/configs/L0-MANDATORY/default/CONFIG_TCP_CONG_ADVANCED new file mode 100644 index 000000000000..86f996b95d6d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_TCP_CONG_ADVANCED @@ -0,0 +1 @@ +CONFIG_TCP_CONG_ADVANCED=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_TCP_CONG_BBR b/anolis/configs/L0-MANDATORY/default/CONFIG_TCP_CONG_BBR new file mode 100644 index 000000000000..55ff89cb627f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_TCP_CONG_BBR @@ -0,0 +1 @@ +CONFIG_TCP_CONG_BBR=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_TCP_CONG_CUBIC b/anolis/configs/L0-MANDATORY/default/CONFIG_TCP_CONG_CUBIC new file mode 100644 index 000000000000..7be0dc4241ef --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_TCP_CONG_CUBIC @@ -0,0 +1 @@ +CONFIG_TCP_CONG_CUBIC=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_THERMAL b/anolis/configs/L0-MANDATORY/default/CONFIG_THERMAL new file mode 100644 index 000000000000..42356be7d00a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_THERMAL @@ -0,0 +1 @@ +CONFIG_THERMAL=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_TIMERFD b/anolis/configs/L0-MANDATORY/default/CONFIG_TIMERFD new file mode 100644 index 000000000000..e6f99e2dd67c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_TIMERFD @@ -0,0 +1 @@ +CONFIG_TIMERFD=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_TLS b/anolis/configs/L0-MANDATORY/default/CONFIG_TLS new file mode 100644 index 000000000000..1d627c36a363 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_TLS @@ -0,0 +1 @@ +CONFIG_TLS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_TMPFS b/anolis/configs/L0-MANDATORY/default/CONFIG_TMPFS new file mode 100644 index 000000000000..417f7b76b34a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_TMPFS @@ -0,0 +1 @@ +CONFIG_TMPFS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_TRACEPOINTS b/anolis/configs/L0-MANDATORY/default/CONFIG_TRACEPOINTS new file mode 100644 index 000000000000..510725c37a8e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_TRACEPOINTS @@ -0,0 +1 @@ +CONFIG_TRACEPOINTS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_TRANSPARENT_HUGEPAGE b/anolis/configs/L0-MANDATORY/default/CONFIG_TRANSPARENT_HUGEPAGE new file mode 100644 index 000000000000..75d999c665c9 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_TRANSPARENT_HUGEPAGE @@ -0,0 +1 @@ +CONFIG_TRANSPARENT_HUGEPAGE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_TTY b/anolis/configs/L0-MANDATORY/default/CONFIG_TTY new file mode 100644 index 000000000000..f21b4a108f33 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_TTY @@ -0,0 +1 @@ +CONFIG_TTY=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_TUN b/anolis/configs/L0-MANDATORY/default/CONFIG_TUN new file mode 100644 index 000000000000..12009e34d34e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_TUN @@ -0,0 +1 @@ +CONFIG_TUN=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_TXGBE b/anolis/configs/L0-MANDATORY/default/CONFIG_TXGBE new file mode 100644 index 000000000000..415e2083637a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_TXGBE @@ -0,0 +1 @@ +CONFIG_TXGBE=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_UIO b/anolis/configs/L0-MANDATORY/default/CONFIG_UIO new file mode 100644 index 000000000000..109e55966663 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_UIO @@ -0,0 +1 @@ +CONFIG_UIO=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_UNIX b/anolis/configs/L0-MANDATORY/default/CONFIG_UNIX new file mode 100644 index 000000000000..07b000ef4ddb --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_UNIX @@ -0,0 +1 @@ +CONFIG_UNIX=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_UNIX98_PTYS b/anolis/configs/L0-MANDATORY/default/CONFIG_UNIX98_PTYS new file mode 100644 index 000000000000..9e23599229b8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_UNIX98_PTYS @@ -0,0 +1 @@ +CONFIG_UNIX98_PTYS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_UPROBE_EVENTS b/anolis/configs/L0-MANDATORY/default/CONFIG_UPROBE_EVENTS new file mode 100644 index 000000000000..81f7be965605 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_UPROBE_EVENTS @@ -0,0 +1 @@ +CONFIG_UPROBE_EVENTS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_USB b/anolis/configs/L0-MANDATORY/default/CONFIG_USB new file mode 100644 index 000000000000..45e19309d49c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_USB @@ -0,0 +1 @@ +CONFIG_USB=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_USB_SUPPORT b/anolis/configs/L0-MANDATORY/default/CONFIG_USB_SUPPORT new file mode 100644 index 000000000000..c4310e0b7237 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_USB_SUPPORT @@ -0,0 +1 @@ +CONFIG_USB_SUPPORT=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_USERFAULTFD b/anolis/configs/L0-MANDATORY/default/CONFIG_USERFAULTFD new file mode 100644 index 000000000000..698c7ed28a26 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_USERFAULTFD @@ -0,0 +1 @@ +CONFIG_USERFAULTFD=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_UTS_NS b/anolis/configs/L0-MANDATORY/default/CONFIG_UTS_NS new file mode 100644 index 000000000000..d6c1f3443d8b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_UTS_NS @@ -0,0 +1 @@ +CONFIG_UTS_NS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VETH b/anolis/configs/L0-MANDATORY/default/CONFIG_VETH new file mode 100644 index 000000000000..80311f71266d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VETH @@ -0,0 +1 @@ +CONFIG_VETH=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VFAT_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_VFAT_FS new file mode 100644 index 000000000000..3204b85ef3fd --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VFAT_FS @@ -0,0 +1 @@ +CONFIG_VFAT_FS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VFIO b/anolis/configs/L0-MANDATORY/default/CONFIG_VFIO new file mode 100644 index 000000000000..3cdbde3d6df7 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VFIO @@ -0,0 +1 @@ +CONFIG_VFIO=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VFIO_PCI b/anolis/configs/L0-MANDATORY/default/CONFIG_VFIO_PCI new file mode 100644 index 000000000000..ace58fff3e60 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VFIO_PCI @@ -0,0 +1 @@ +CONFIG_VFIO_PCI=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VGA_ARB b/anolis/configs/L0-MANDATORY/default/CONFIG_VGA_ARB new file mode 100644 index 000000000000..c6d1681b354c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VGA_ARB @@ -0,0 +1 @@ +CONFIG_VGA_ARB=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VHOST_NET b/anolis/configs/L0-MANDATORY/default/CONFIG_VHOST_NET new file mode 100644 index 000000000000..4dd9712dc3de --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VHOST_NET @@ -0,0 +1 @@ +CONFIG_VHOST_NET=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VHOST_VSOCK b/anolis/configs/L0-MANDATORY/default/CONFIG_VHOST_VSOCK new file mode 100644 index 000000000000..935594a5a07d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VHOST_VSOCK @@ -0,0 +1 @@ +CONFIG_VHOST_VSOCK=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_BALLOON b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_BALLOON new file mode 100644 index 000000000000..f3aec11c1598 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_BALLOON @@ -0,0 +1 @@ +CONFIG_VIRTIO_BALLOON=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_CONSOLE b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_CONSOLE new file mode 100644 index 000000000000..92643f4c4b6a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_CONSOLE @@ -0,0 +1 @@ +CONFIG_VIRTIO_CONSOLE=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_FS new file mode 100644 index 000000000000..9fe6466163ed --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_FS @@ -0,0 +1 @@ +CONFIG_VIRTIO_FS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_MEM b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_MEM new file mode 100644 index 000000000000..3780f32af7be --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_MEM @@ -0,0 +1 @@ +CONFIG_VIRTIO_MEM=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_MENU b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_MENU new file mode 100644 index 000000000000..ce9f283a857d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_MENU @@ -0,0 +1 @@ +CONFIG_VIRTIO_MENU=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_MMIO b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_MMIO new file mode 100644 index 000000000000..be547dc13d52 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_MMIO @@ -0,0 +1 @@ +CONFIG_VIRTIO_MMIO=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_NET b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_NET new file mode 100644 index 000000000000..170da19d65de --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_NET @@ -0,0 +1 @@ +CONFIG_VIRTIO_NET=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_PMEM b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_PMEM new file mode 100644 index 000000000000..b870f02c6196 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_PMEM @@ -0,0 +1 @@ +CONFIG_VIRTIO_PMEM=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTUALIZATION b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTUALIZATION new file mode 100644 index 000000000000..097e8b93583b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTUALIZATION @@ -0,0 +1 @@ +CONFIG_VIRTUALIZATION=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VIRT_CPU_ACCOUNTING_GEN b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRT_CPU_ACCOUNTING_GEN new file mode 100644 index 000000000000..16aaf1a83d58 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VIRT_CPU_ACCOUNTING_GEN @@ -0,0 +1 @@ +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VMAP_STACK b/anolis/configs/L0-MANDATORY/default/CONFIG_VMAP_STACK new file mode 100644 index 000000000000..8bd986875fc7 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VMAP_STACK @@ -0,0 +1 @@ +CONFIG_VMAP_STACK=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VM_EVENT_COUNTERS b/anolis/configs/L0-MANDATORY/default/CONFIG_VM_EVENT_COUNTERS new file mode 100644 index 000000000000..de44b20ecc6b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VM_EVENT_COUNTERS @@ -0,0 +1 @@ +CONFIG_VM_EVENT_COUNTERS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VSOCKETS b/anolis/configs/L0-MANDATORY/default/CONFIG_VSOCKETS new file mode 100644 index 000000000000..da4a1880654d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VSOCKETS @@ -0,0 +1 @@ +CONFIG_VSOCKETS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VT b/anolis/configs/L0-MANDATORY/default/CONFIG_VT new file mode 100644 index 000000000000..4842a9980512 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VT @@ -0,0 +1 @@ +CONFIG_VT=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VT_CONSOLE b/anolis/configs/L0-MANDATORY/default/CONFIG_VT_CONSOLE new file mode 100644 index 000000000000..c3fcafd5f5d4 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_VT_CONSOLE @@ -0,0 +1 @@ +CONFIG_VT_CONSOLE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_WATCHDOG b/anolis/configs/L0-MANDATORY/default/CONFIG_WATCHDOG new file mode 100644 index 000000000000..80e211dcedd2 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_WATCHDOG @@ -0,0 +1 @@ +CONFIG_WATCHDOG=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_X509_CERTIFICATE_PARSER b/anolis/configs/L0-MANDATORY/default/CONFIG_X509_CERTIFICATE_PARSER new file mode 100644 index 000000000000..4376b3a12b66 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_X509_CERTIFICATE_PARSER @@ -0,0 +1 @@ +CONFIG_X509_CERTIFICATE_PARSER=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_XDP_SOCKETS b/anolis/configs/L0-MANDATORY/default/CONFIG_XDP_SOCKETS new file mode 100644 index 000000000000..061b65a41476 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_XDP_SOCKETS @@ -0,0 +1 @@ +CONFIG_XDP_SOCKETS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_XFRM_USER b/anolis/configs/L0-MANDATORY/default/CONFIG_XFRM_USER new file mode 100644 index 000000000000..bfb1935c1691 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_XFRM_USER @@ -0,0 +1 @@ +CONFIG_XFRM_USER=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_XFS_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_XFS_FS new file mode 100644 index 000000000000..52766aeca40d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_XFS_FS @@ -0,0 +1 @@ +CONFIG_XFS_FS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_XPS b/anolis/configs/L0-MANDATORY/default/CONFIG_XPS new file mode 100644 index 000000000000..357db44258fc --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_XPS @@ -0,0 +1 @@ +CONFIG_XPS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_XZ_DEC b/anolis/configs/L0-MANDATORY/default/CONFIG_XZ_DEC new file mode 100644 index 000000000000..42ed64a29056 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_XZ_DEC @@ -0,0 +1 @@ +CONFIG_XZ_DEC=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ZONE_DEVICE b/anolis/configs/L0-MANDATORY/default/CONFIG_ZONE_DEVICE new file mode 100644 index 000000000000..7d0942fcf12a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ZONE_DEVICE @@ -0,0 +1 @@ +CONFIG_ZONE_DEVICE=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ZONE_DMA b/anolis/configs/L0-MANDATORY/default/CONFIG_ZONE_DMA new file mode 100644 index 000000000000..c1b5f84a5b40 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ZONE_DMA @@ -0,0 +1 @@ +CONFIG_ZONE_DMA=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ZONE_DMA32 b/anolis/configs/L0-MANDATORY/default/CONFIG_ZONE_DMA32 new file mode 100644 index 000000000000..8e7948af4e8c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ZONE_DMA32 @@ -0,0 +1 @@ +CONFIG_ZONE_DMA32=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ZRAM b/anolis/configs/L0-MANDATORY/default/CONFIG_ZRAM new file mode 100644 index 000000000000..32adeab9e5dd --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ZRAM @@ -0,0 +1 @@ +CONFIG_ZRAM=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ZSTD_COMPRESS b/anolis/configs/L0-MANDATORY/default/CONFIG_ZSTD_COMPRESS new file mode 100644 index 000000000000..b50376457fb0 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ZSTD_COMPRESS @@ -0,0 +1 @@ +CONFIG_ZSTD_COMPRESS=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_ZSTD_DECOMPRESS b/anolis/configs/L0-MANDATORY/default/CONFIG_ZSTD_DECOMPRESS new file mode 100644 index 000000000000..c9a4b9bc3639 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_ZSTD_DECOMPRESS @@ -0,0 +1 @@ +CONFIG_ZSTD_DECOMPRESS=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_AMD_NUMA b/anolis/configs/L0-MANDATORY/x86/CONFIG_AMD_NUMA new file mode 100644 index 000000000000..2ff0db092fda --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_AMD_NUMA @@ -0,0 +1 @@ +CONFIG_AMD_NUMA=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_ARCH_MMAP_RND_COMPAT_BITS b/anolis/configs/L0-MANDATORY/x86/CONFIG_ARCH_MMAP_RND_COMPAT_BITS new file mode 100644 index 000000000000..f572df8b6f04 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_ARCH_MMAP_RND_COMPAT_BITS @@ -0,0 +1 @@ +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_COMPAT_VDSO b/anolis/configs/L0-MANDATORY/x86/CONFIG_COMPAT_VDSO new file mode 100644 index 000000000000..a79844faa708 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_COMPAT_VDSO @@ -0,0 +1 @@ +# CONFIG_COMPAT_VDSO is not set diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_CPU_SUP_AMD b/anolis/configs/L0-MANDATORY/x86/CONFIG_CPU_SUP_AMD new file mode 100644 index 000000000000..d5e1923eee95 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_CPU_SUP_AMD @@ -0,0 +1 @@ +CONFIG_CPU_SUP_AMD=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_CPU_SUP_HYGON b/anolis/configs/L0-MANDATORY/x86/CONFIG_CPU_SUP_HYGON new file mode 100644 index 000000000000..21b46d0551af --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_CPU_SUP_HYGON @@ -0,0 +1 @@ +CONFIG_CPU_SUP_HYGON=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_CPU_SUP_INTEL b/anolis/configs/L0-MANDATORY/x86/CONFIG_CPU_SUP_INTEL new file mode 100644 index 000000000000..7eadcb816655 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_CPU_SUP_INTEL @@ -0,0 +1 @@ +CONFIG_CPU_SUP_INTEL=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_CPU_SUP_ZHAOXIN b/anolis/configs/L0-MANDATORY/x86/CONFIG_CPU_SUP_ZHAOXIN new file mode 100644 index 000000000000..04e96754b00b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_CPU_SUP_ZHAOXIN @@ -0,0 +1 @@ +CONFIG_CPU_SUP_ZHAOXIN=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SIMD b/anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SIMD new file mode 100644 index 000000000000..91fac0ebe36a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SIMD @@ -0,0 +1 @@ +CONFIG_CRYPTO_SIMD=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SM3_AVX_X86_64 b/anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SM3_AVX_X86_64 new file mode 100644 index 000000000000..3ff5948232ff --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SM3_AVX_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM3_AVX_X86_64=m diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64 b/anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64 new file mode 100644 index 000000000000..8a091c4c4f01 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64=m diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64 b/anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64 new file mode 100644 index 000000000000..857d6494dee5 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64=m diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_DEFERRED_STRUCT_PAGE_INIT b/anolis/configs/L0-MANDATORY/x86/CONFIG_DEFERRED_STRUCT_PAGE_INIT new file mode 100644 index 000000000000..ce9faa20fc18 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_DEFERRED_STRUCT_PAGE_INIT @@ -0,0 +1 @@ +CONFIG_DEFERRED_STRUCT_PAGE_INIT=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_EXT4_FS b/anolis/configs/L0-MANDATORY/x86/CONFIG_EXT4_FS new file mode 100644 index 000000000000..7f2f33f2758a --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_EXT4_FS @@ -0,0 +1 @@ +CONFIG_EXT4_FS=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_EXTCON b/anolis/configs/L0-MANDATORY/x86/CONFIG_EXTCON new file mode 100644 index 000000000000..efa6c7e6f1e8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_EXTCON @@ -0,0 +1 @@ +# CONFIG_EXTCON is not set diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_HPET_TIMER b/anolis/configs/L0-MANDATORY/x86/CONFIG_HPET_TIMER new file mode 100644 index 000000000000..381266e17a1d --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_HPET_TIMER @@ -0,0 +1 @@ +CONFIG_HPET_TIMER=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_HZ b/anolis/configs/L0-MANDATORY/x86/CONFIG_HZ new file mode 100644 index 000000000000..b2857157e15f --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_HZ @@ -0,0 +1 @@ +CONFIG_HZ=1000 diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_HZ_1000 b/anolis/configs/L0-MANDATORY/x86/CONFIG_HZ_1000 new file mode 100644 index 000000000000..81777c737e09 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_HZ_1000 @@ -0,0 +1 @@ +CONFIG_HZ_1000=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_HZ_250 b/anolis/configs/L0-MANDATORY/x86/CONFIG_HZ_250 new file mode 100644 index 000000000000..fde8748650f4 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_HZ_250 @@ -0,0 +1 @@ +# CONFIG_HZ_250 is not set diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_IA32_FEAT_CTL b/anolis/configs/L0-MANDATORY/x86/CONFIG_IA32_FEAT_CTL new file mode 100644 index 000000000000..75afb0cb1de0 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_IA32_FEAT_CTL @@ -0,0 +1 @@ +CONFIG_IA32_FEAT_CTL=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_INSTRUCTION_DECODER b/anolis/configs/L0-MANDATORY/x86/CONFIG_INSTRUCTION_DECODER new file mode 100644 index 000000000000..46610d00bcb3 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_INSTRUCTION_DECODER @@ -0,0 +1 @@ +CONFIG_INSTRUCTION_DECODER=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_INTEL_IDLE b/anolis/configs/L0-MANDATORY/x86/CONFIG_INTEL_IDLE new file mode 100644 index 000000000000..11c398412921 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_INTEL_IDLE @@ -0,0 +1 @@ +CONFIG_INTEL_IDLE=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_INTEL_IOMMU b/anolis/configs/L0-MANDATORY/x86/CONFIG_INTEL_IOMMU new file mode 100644 index 000000000000..b21af5c551f0 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_INTEL_IOMMU @@ -0,0 +1 @@ +CONFIG_INTEL_IOMMU=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_INTEL_IOMMU_SVM b/anolis/configs/L0-MANDATORY/x86/CONFIG_INTEL_IOMMU_SVM new file mode 100644 index 000000000000..eb03ccae7899 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_INTEL_IOMMU_SVM @@ -0,0 +1 @@ +CONFIG_INTEL_IOMMU_SVM=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_INTEL_TDX_GUEST b/anolis/configs/L0-MANDATORY/x86/CONFIG_INTEL_TDX_GUEST new file mode 100644 index 000000000000..1340073a4abf --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_INTEL_TDX_GUEST @@ -0,0 +1 @@ +CONFIG_INTEL_TDX_GUEST=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_IRQ_REMAP b/anolis/configs/L0-MANDATORY/x86/CONFIG_IRQ_REMAP new file mode 100644 index 000000000000..dfe559c5e88c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_IRQ_REMAP @@ -0,0 +1 @@ +CONFIG_IRQ_REMAP=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_IRQ_TIME_ACCOUNTING b/anolis/configs/L0-MANDATORY/x86/CONFIG_IRQ_TIME_ACCOUNTING new file mode 100644 index 000000000000..e250776e77e2 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_IRQ_TIME_ACCOUNTING @@ -0,0 +1 @@ +CONFIG_IRQ_TIME_ACCOUNTING=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_JBD2 b/anolis/configs/L0-MANDATORY/x86/CONFIG_JBD2 new file mode 100644 index 000000000000..8cfd98f3b1e4 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_JBD2 @@ -0,0 +1 @@ +CONFIG_JBD2=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_KPROBES_ON_FTRACE b/anolis/configs/L0-MANDATORY/x86/CONFIG_KPROBES_ON_FTRACE new file mode 100644 index 000000000000..de3ae48570c6 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_KPROBES_ON_FTRACE @@ -0,0 +1 @@ +CONFIG_KPROBES_ON_FTRACE=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_KVM b/anolis/configs/L0-MANDATORY/x86/CONFIG_KVM new file mode 100644 index 000000000000..cf9bf67a6a49 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_KVM @@ -0,0 +1 @@ +CONFIG_KVM=m diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_KVM_AMD b/anolis/configs/L0-MANDATORY/x86/CONFIG_KVM_AMD new file mode 100644 index 000000000000..b96224f70aff --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_KVM_AMD @@ -0,0 +1 @@ +CONFIG_KVM_AMD=m diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_KVM_GUEST b/anolis/configs/L0-MANDATORY/x86/CONFIG_KVM_GUEST new file mode 100644 index 000000000000..133da04247ee --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_KVM_GUEST @@ -0,0 +1 @@ +CONFIG_KVM_GUEST=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_KVM_INTEL b/anolis/configs/L0-MANDATORY/x86/CONFIG_KVM_INTEL new file mode 100644 index 000000000000..7f5fa4452999 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_KVM_INTEL @@ -0,0 +1 @@ +CONFIG_KVM_INTEL=m diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_LIVEPATCH b/anolis/configs/L0-MANDATORY/x86/CONFIG_LIVEPATCH new file mode 100644 index 000000000000..1b05d0d1a109 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_LIVEPATCH @@ -0,0 +1 @@ +CONFIG_LIVEPATCH=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_MTRR b/anolis/configs/L0-MANDATORY/x86/CONFIG_MTRR new file mode 100644 index 000000000000..744162bca1b8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_MTRR @@ -0,0 +1 @@ +CONFIG_MTRR=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_OPTPROBES b/anolis/configs/L0-MANDATORY/x86/CONFIG_OPTPROBES new file mode 100644 index 000000000000..a604dda96dbd --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_OPTPROBES @@ -0,0 +1 @@ +CONFIG_OPTPROBES=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_PAGE_TABLE_ISOLATION b/anolis/configs/L0-MANDATORY/x86/CONFIG_PAGE_TABLE_ISOLATION new file mode 100644 index 000000000000..6881a7757248 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_PAGE_TABLE_ISOLATION @@ -0,0 +1 @@ +CONFIG_PAGE_TABLE_ISOLATION=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_PCI_PF_STUB b/anolis/configs/L0-MANDATORY/x86/CONFIG_PCI_PF_STUB new file mode 100644 index 000000000000..46eee76194b0 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_PCI_PF_STUB @@ -0,0 +1 @@ +CONFIG_PCI_PF_STUB=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_PHYSICAL_START b/anolis/configs/L0-MANDATORY/x86/CONFIG_PHYSICAL_START new file mode 100644 index 000000000000..197ff1f91272 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_PHYSICAL_START @@ -0,0 +1 @@ +CONFIG_PHYSICAL_START=0x1000000 diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_PREEMPT_NONE b/anolis/configs/L0-MANDATORY/x86/CONFIG_PREEMPT_NONE new file mode 100644 index 000000000000..0cbeb5a53cb9 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_PREEMPT_NONE @@ -0,0 +1 @@ +CONFIG_PREEMPT_NONE=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_PREEMPT_VOLUNTARY b/anolis/configs/L0-MANDATORY/x86/CONFIG_PREEMPT_VOLUNTARY new file mode 100644 index 000000000000..6ba012c36d21 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_PREEMPT_VOLUNTARY @@ -0,0 +1 @@ +# CONFIG_PREEMPT_VOLUNTARY is not set diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_RANDOMIZE_MEMORY b/anolis/configs/L0-MANDATORY/x86/CONFIG_RANDOMIZE_MEMORY new file mode 100644 index 000000000000..cb4fdaaaca38 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_RANDOMIZE_MEMORY @@ -0,0 +1 @@ +CONFIG_RANDOMIZE_MEMORY=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_RETHUNK b/anolis/configs/L0-MANDATORY/x86/CONFIG_RETHUNK new file mode 100644 index 000000000000..dd6882f9adb5 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_RETHUNK @@ -0,0 +1 @@ +CONFIG_RETHUNK=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_RETPOLINE b/anolis/configs/L0-MANDATORY/x86/CONFIG_RETPOLINE new file mode 100644 index 000000000000..c46e12644718 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_RETPOLINE @@ -0,0 +1 @@ +CONFIG_RETPOLINE=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_SEV_GUEST b/anolis/configs/L0-MANDATORY/x86/CONFIG_SEV_GUEST new file mode 100644 index 000000000000..2917a4256f6b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_SEV_GUEST @@ -0,0 +1 @@ +CONFIG_SEV_GUEST=m diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_SPECULATION_MITIGATIONS b/anolis/configs/L0-MANDATORY/x86/CONFIG_SPECULATION_MITIGATIONS new file mode 100644 index 000000000000..37f78a6f2368 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_SPECULATION_MITIGATIONS @@ -0,0 +1 @@ +CONFIG_SPECULATION_MITIGATIONS=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_UNWINDER_FRAME_POINTER b/anolis/configs/L0-MANDATORY/x86/CONFIG_UNWINDER_FRAME_POINTER new file mode 100644 index 000000000000..abdba63a5044 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_UNWINDER_FRAME_POINTER @@ -0,0 +1 @@ +# CONFIG_UNWINDER_FRAME_POINTER is not set diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_UNWINDER_ORC b/anolis/configs/L0-MANDATORY/x86/CONFIG_UNWINDER_ORC new file mode 100644 index 000000000000..6b6908419acb --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_UNWINDER_ORC @@ -0,0 +1 @@ +CONFIG_UNWINDER_ORC=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_VGA_CONSOLE b/anolis/configs/L0-MANDATORY/x86/CONFIG_VGA_CONSOLE new file mode 100644 index 000000000000..461d0b1d4e82 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_VGA_CONSOLE @@ -0,0 +1 @@ +CONFIG_VGA_CONSOLE=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_VIRTIO b/anolis/configs/L0-MANDATORY/x86/CONFIG_VIRTIO new file mode 100644 index 000000000000..30057292047c --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_VIRTIO @@ -0,0 +1 @@ +CONFIG_VIRTIO=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_VIRTIO_BLK b/anolis/configs/L0-MANDATORY/x86/CONFIG_VIRTIO_BLK new file mode 100644 index 000000000000..a98570e79857 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_VIRTIO_BLK @@ -0,0 +1 @@ +CONFIG_VIRTIO_BLK=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_VIRTIO_PCI b/anolis/configs/L0-MANDATORY/x86/CONFIG_VIRTIO_PCI new file mode 100644 index 000000000000..902a03720ac5 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_VIRTIO_PCI @@ -0,0 +1 @@ +CONFIG_VIRTIO_PCI=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86 b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86 new file mode 100644 index 000000000000..083f4ef43580 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86 @@ -0,0 +1 @@ +CONFIG_X86=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_5LEVEL b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_5LEVEL new file mode 100644 index 000000000000..db301f396452 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_5LEVEL @@ -0,0 +1 @@ +# CONFIG_X86_5LEVEL is not set diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_64 b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_64 new file mode 100644 index 000000000000..0f9a5b591c63 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_64 @@ -0,0 +1 @@ +CONFIG_X86_64=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_64_ACPI_NUMA b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_64_ACPI_NUMA new file mode 100644 index 000000000000..9ed4791dca11 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_64_ACPI_NUMA @@ -0,0 +1 @@ +CONFIG_X86_64_ACPI_NUMA=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_64_SMP b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_64_SMP new file mode 100644 index 000000000000..40d5ce7ec16b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_64_SMP @@ -0,0 +1 @@ +CONFIG_X86_64_SMP=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_CMPXCHG64 b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_CMPXCHG64 new file mode 100644 index 000000000000..de479387b877 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_CMPXCHG64 @@ -0,0 +1 @@ +CONFIG_X86_CMPXCHG64=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_CPU_RESCTRL b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_CPU_RESCTRL new file mode 100644 index 000000000000..0388f23848bf --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_CPU_RESCTRL @@ -0,0 +1 @@ +CONFIG_X86_CPU_RESCTRL=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_IOPL_IOPERM b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_IOPL_IOPERM new file mode 100644 index 000000000000..cb77e518a533 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_IOPL_IOPERM @@ -0,0 +1 @@ +CONFIG_X86_IOPL_IOPERM=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_IO_APIC b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_IO_APIC new file mode 100644 index 000000000000..4e82cbf6613b --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_IO_APIC @@ -0,0 +1 @@ +CONFIG_X86_IO_APIC=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_LOCAL_APIC b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_LOCAL_APIC new file mode 100644 index 000000000000..a4be2f8427cb --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_LOCAL_APIC @@ -0,0 +1 @@ +CONFIG_X86_LOCAL_APIC=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_MCE b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_MCE new file mode 100644 index 000000000000..de8e48077fc4 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_MCE @@ -0,0 +1 @@ +CONFIG_X86_MCE=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_MCE_AMD b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_MCE_AMD new file mode 100644 index 000000000000..7c1f05da9c25 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_MCE_AMD @@ -0,0 +1 @@ +CONFIG_X86_MCE_AMD=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_MCE_INTEL b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_MCE_INTEL new file mode 100644 index 000000000000..17f52def25f4 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_MCE_INTEL @@ -0,0 +1 @@ +CONFIG_X86_MCE_INTEL=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_PAT b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_PAT new file mode 100644 index 000000000000..09f062ec2b29 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_PAT @@ -0,0 +1 @@ +CONFIG_X86_PAT=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_PLATFORM_DEVICES b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_PLATFORM_DEVICES new file mode 100644 index 000000000000..9f3b3e436155 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_PLATFORM_DEVICES @@ -0,0 +1 @@ +CONFIG_X86_PLATFORM_DEVICES=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_SGX b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_SGX new file mode 100644 index 000000000000..afd56e8184ae --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_SGX @@ -0,0 +1 @@ +CONFIG_X86_SGX=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_TSC b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_TSC new file mode 100644 index 000000000000..0b00e7901536 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_TSC @@ -0,0 +1 @@ +CONFIG_X86_TSC=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_UMIP b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_UMIP new file mode 100644 index 000000000000..1fc309a65208 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_UMIP @@ -0,0 +1 @@ +CONFIG_X86_UMIP=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_VSYSCALL_EMULATION b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_VSYSCALL_EMULATION new file mode 100644 index 000000000000..6c8bee87eed7 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_VSYSCALL_EMULATION @@ -0,0 +1 @@ +CONFIG_X86_VSYSCALL_EMULATION=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_X2APIC b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_X2APIC new file mode 100644 index 000000000000..8bee94deca3e --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_X86_X2APIC @@ -0,0 +1 @@ +CONFIG_X86_X2APIC=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_AGDI b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_AGDI new file mode 100644 index 000000000000..5f0eaeae8aeb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_AGDI @@ -0,0 +1 @@ +CONFIG_ACPI_AGDI=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_BGRT b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_BGRT new file mode 100644 index 000000000000..0eb2d78651d3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_BGRT @@ -0,0 +1 @@ +# CONFIG_ACPI_BGRT is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_CPPC_CPUFREQ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_CPPC_CPUFREQ new file mode 100644 index 000000000000..5cc88132a921 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_CPPC_CPUFREQ @@ -0,0 +1 @@ +CONFIG_ACPI_CPPC_CPUFREQ=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_DOCK b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_DOCK new file mode 100644 index 000000000000..39ad17e2b7c1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_DOCK @@ -0,0 +1 @@ +# CONFIG_ACPI_DOCK is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_EC_DEBUGFS b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_EC_DEBUGFS new file mode 100644 index 000000000000..ee9b2fca882f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_EC_DEBUGFS @@ -0,0 +1 @@ +# CONFIG_ACPI_EC_DEBUGFS is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_REDUCED_HARDWARE_ONLY b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_REDUCED_HARDWARE_ONLY new file mode 100644 index 000000000000..5b4c7c472ba0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_REDUCED_HARDWARE_ONLY @@ -0,0 +1 @@ +CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_TAD b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_TAD new file mode 100644 index 000000000000..fb9c9ae81ac5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_TAD @@ -0,0 +1 @@ +# CONFIG_ACPI_TAD is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_AHCI_XGENE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_AHCI_XGENE new file mode 100644 index 000000000000..d8d54113b6ef --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_AHCI_XGENE @@ -0,0 +1 @@ +CONFIG_AHCI_XGENE=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ALIBABA_UNCORE_DRW_PMU b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ALIBABA_UNCORE_DRW_PMU new file mode 100644 index 000000000000..abe6d16f12a3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ALIBABA_UNCORE_DRW_PMU @@ -0,0 +1 @@ +CONFIG_ALIBABA_UNCORE_DRW_PMU=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARCH_FORCE_MAX_ORDER b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARCH_FORCE_MAX_ORDER new file mode 100644 index 000000000000..87511982f8e3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARCH_FORCE_MAX_ORDER @@ -0,0 +1 @@ +CONFIG_ARCH_FORCE_MAX_ORDER=10 diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ACPI_PARKING_PROTOCOL b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ACPI_PARKING_PROTOCOL new file mode 100644 index 000000000000..bdb51423da77 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ACPI_PARKING_PROTOCOL @@ -0,0 +1 @@ +CONFIG_ARM64_ACPI_PARKING_PROTOCOL=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_AMU_EXTN b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_AMU_EXTN new file mode 100644 index 000000000000..e16916ceca22 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_AMU_EXTN @@ -0,0 +1 @@ +CONFIG_ARM64_AMU_EXTN=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_DEBUG_PRIORITY_MASKING b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_DEBUG_PRIORITY_MASKING new file mode 100644 index 000000000000..3eb3dbd290a8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_DEBUG_PRIORITY_MASKING @@ -0,0 +1 @@ +# CONFIG_ARM64_DEBUG_PRIORITY_MASKING is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_EPAN b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_EPAN new file mode 100644 index 000000000000..625b39cb9381 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_EPAN @@ -0,0 +1 @@ +CONFIG_ARM64_EPAN=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1024718 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1024718 new file mode 100644 index 000000000000..1c8f6ffc8379 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1024718 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_1024718=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1165522 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1165522 new file mode 100644 index 000000000000..454fb1ecca77 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1165522 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_1165522=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1286807 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1286807 new file mode 100644 index 000000000000..e35f8566910e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1286807 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_1286807=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1319367 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1319367 new file mode 100644 index 000000000000..3c76daab59eb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1319367 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_1319367=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1418040 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1418040 new file mode 100644 index 000000000000..06e9dc0ef821 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1418040 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_1418040=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1463225 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1463225 new file mode 100644 index 000000000000..3e0b01e04788 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1463225 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_1463225=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1508412 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1508412 new file mode 100644 index 000000000000..241640b0fee7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1508412 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_1508412=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1530923 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1530923 new file mode 100644 index 000000000000..4cc427c4a91b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1530923 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_1530923=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1542419 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1542419 new file mode 100644 index 000000000000..fc1543d3ad90 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1542419 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_1542419=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1742098 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1742098 new file mode 100644 index 000000000000..906c998532df --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_1742098 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_1742098=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2051678 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2051678 new file mode 100644 index 000000000000..f511d7ca5d3a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2051678 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_2051678=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2054223 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2054223 new file mode 100644 index 000000000000..7c24dc2ce9cc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2054223 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_2054223=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2067961 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2067961 new file mode 100644 index 000000000000..abae3e10fe1d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2067961 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_2067961=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2077057 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2077057 new file mode 100644 index 000000000000..fdbc56319ebe --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2077057 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_2077057=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2441007 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2441007 new file mode 100644 index 000000000000..6525de1c2142 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2441007 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_2441007=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2441009 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2441009 new file mode 100644 index 000000000000..206bfce41c51 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2441009 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_2441009=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2457168 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2457168 new file mode 100644 index 000000000000..8fa26ac1ec03 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2457168 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_2457168=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2645198 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2645198 new file mode 100644 index 000000000000..a908ff25e11d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2645198 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_2645198=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2658417 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2658417 new file mode 100644 index 000000000000..6caad365482a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2658417 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_2658417=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2966298 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2966298 new file mode 100644 index 000000000000..830700ac32cb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_2966298 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_2966298=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_3117295 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_3117295 new file mode 100644 index 000000000000..953b476f9b30 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_3117295 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_3117295=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_819472 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_819472 new file mode 100644 index 000000000000..e53fbbbe50a4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_819472 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_819472=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_824069 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_824069 new file mode 100644 index 000000000000..2b4897f441f3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_824069 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_824069=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_826319 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_826319 new file mode 100644 index 000000000000..8a3b9aaaf871 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_826319 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_826319=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_827319 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_827319 new file mode 100644 index 000000000000..d341b420c3fb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_827319 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_827319=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_832075 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_832075 new file mode 100644 index 000000000000..9d1f8fcd7c01 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_832075 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_832075=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_834220 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_834220 new file mode 100644 index 000000000000..a4bf47d176aa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_834220 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_834220=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_843419 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_843419 new file mode 100644 index 000000000000..303cae7624da --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_843419 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_843419=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_845719 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_845719 new file mode 100644 index 000000000000..c223b74d76ac --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_845719 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_845719=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_PMEM b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_PMEM new file mode 100644 index 000000000000..f2e1b130ff17 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_PMEM @@ -0,0 +1 @@ +CONFIG_ARM64_PMEM=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_SME b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_SME new file mode 100644 index 000000000000..701e39d85196 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_SME @@ -0,0 +1 @@ +CONFIG_ARM64_SME=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_SW_TTBR0_PAN b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_SW_TTBR0_PAN new file mode 100644 index 000000000000..3b878e832c90 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_SW_TTBR0_PAN @@ -0,0 +1 @@ +# CONFIG_ARM64_SW_TTBR0_PAN is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_TAGGED_ADDR_ABI b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_TAGGED_ADDR_ABI new file mode 100644 index 000000000000..478c311de7aa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_TAGGED_ADDR_ABI @@ -0,0 +1 @@ +CONFIG_ARM64_TAGGED_ADDR_ABI=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARMV8_DEPRECATED b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARMV8_DEPRECATED new file mode 100644 index 000000000000..ed87b7c75bfa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARMV8_DEPRECATED @@ -0,0 +1 @@ +# CONFIG_ARMV8_DEPRECATED is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU new file mode 100644 index 000000000000..3d6294853651 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU @@ -0,0 +1 @@ +# CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_DMC620_PMU b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_DMC620_PMU new file mode 100644 index 000000000000..a7163f24fe2d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_DMC620_PMU @@ -0,0 +1 @@ +# CONFIG_ARM_DMC620_PMU is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_DSU_PMU b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_DSU_PMU new file mode 100644 index 000000000000..9c56fdf217b6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_DSU_PMU @@ -0,0 +1 @@ +CONFIG_ARM_DSU_PMU=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_MHU_V2 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_MHU_V2 new file mode 100644 index 000000000000..3cf36797cdd4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_MHU_V2 @@ -0,0 +1 @@ +# CONFIG_ARM_MHU_V2 is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_PMUV3 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_PMUV3 new file mode 100644 index 000000000000..7183292c347e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_PMUV3 @@ -0,0 +1 @@ +CONFIG_ARM_PMUV3=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SCPI_CPUFREQ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SCPI_CPUFREQ new file mode 100644 index 000000000000..f2b7060d62ab --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SCPI_CPUFREQ @@ -0,0 +1 @@ +CONFIG_ARM_SCPI_CPUFREQ=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SCPI_POWER_DOMAIN b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SCPI_POWER_DOMAIN new file mode 100644 index 000000000000..b1f92c0d44e7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SCPI_POWER_DOMAIN @@ -0,0 +1 @@ +CONFIG_ARM_SCPI_POWER_DOMAIN=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SCPI_PROTOCOL b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SCPI_PROTOCOL new file mode 100644 index 000000000000..29ef036c5e67 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SCPI_PROTOCOL @@ -0,0 +1 @@ +CONFIG_ARM_SCPI_PROTOCOL=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SDE_INTERFACE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SDE_INTERFACE new file mode 100644 index 000000000000..b33609158a6a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SDE_INTERFACE @@ -0,0 +1 @@ +CONFIG_ARM_SDE_INTERFACE=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SMC_WATCHDOG b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SMC_WATCHDOG new file mode 100644 index 000000000000..5292578ce994 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SMC_WATCHDOG @@ -0,0 +1 @@ +# CONFIG_ARM_SMC_WATCHDOG is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT new file mode 100644 index 000000000000..1f0c3b533ef9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT @@ -0,0 +1 @@ +CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SP805_WATCHDOG b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SP805_WATCHDOG new file mode 100644 index 000000000000..24d9b92d0c9b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM_SP805_WATCHDOG @@ -0,0 +1 @@ +CONFIG_ARM_SP805_WATCHDOG=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_22375 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_22375 new file mode 100644 index 000000000000..e173734bd7ff --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_22375 @@ -0,0 +1 @@ +CONFIG_CAVIUM_ERRATUM_22375=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_23144 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_23144 new file mode 100644 index 000000000000..59e70bcee7fb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_23144 @@ -0,0 +1 @@ +CONFIG_CAVIUM_ERRATUM_23144=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_23154 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_23154 new file mode 100644 index 000000000000..04080e787ef6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_23154 @@ -0,0 +1 @@ +CONFIG_CAVIUM_ERRATUM_23154=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_27456 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_27456 new file mode 100644 index 000000000000..540c843070ab --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_27456 @@ -0,0 +1 @@ +CONFIG_CAVIUM_ERRATUM_27456=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_30115 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_30115 new file mode 100644 index 000000000000..e3f4218af665 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_ERRATUM_30115 @@ -0,0 +1 @@ +CONFIG_CAVIUM_ERRATUM_30115=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_TX2_ERRATUM_219 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_TX2_ERRATUM_219 new file mode 100644 index 000000000000..b7f72514b832 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CAVIUM_TX2_ERRATUM_219 @@ -0,0 +1 @@ +CONFIG_CAVIUM_TX2_ERRATUM_219=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CMA_SIZE_MBYTES b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CMA_SIZE_MBYTES new file mode 100644 index 000000000000..cc0c4e7eefc5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CMA_SIZE_MBYTES @@ -0,0 +1 @@ +CONFIG_CMA_SIZE_MBYTES=64 diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CMDLINE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CMDLINE new file mode 100644 index 000000000000..f218410dcca6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CMDLINE @@ -0,0 +1 @@ +CONFIG_CMDLINE="console=ttyAMA0" diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CMDLINE_FORCE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CMDLINE_FORCE new file mode 100644 index 000000000000..db59f377052d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CMDLINE_FORCE @@ -0,0 +1 @@ +# CONFIG_CMDLINE_FORCE is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CMDLINE_FROM_BOOTLOADER b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CMDLINE_FROM_BOOTLOADER new file mode 100644 index 000000000000..251fe61ac21a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CMDLINE_FROM_BOOTLOADER @@ -0,0 +1 @@ +CONFIG_CMDLINE_FROM_BOOTLOADER=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CNIC b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CNIC new file mode 100644 index 000000000000..6f1f30e41a52 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CNIC @@ -0,0 +1 @@ +# CONFIG_CNIC is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CATU b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CATU new file mode 100644 index 000000000000..160c1a367bad --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CATU @@ -0,0 +1 @@ +CONFIG_CORESIGHT_CATU=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CPU_DEBUG b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CPU_DEBUG new file mode 100644 index 000000000000..05ee4b1530f6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CPU_DEBUG @@ -0,0 +1 @@ +CONFIG_CORESIGHT_CPU_DEBUG=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CPU_DEBUG_DEFAULT_ON b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CPU_DEBUG_DEFAULT_ON new file mode 100644 index 000000000000..9fda80dcc739 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CPU_DEBUG_DEFAULT_ON @@ -0,0 +1 @@ +# CONFIG_CORESIGHT_CPU_DEBUG_DEFAULT_ON is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CTI b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CTI new file mode 100644 index 000000000000..da3d7a8beae5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CTI @@ -0,0 +1 @@ +CONFIG_CORESIGHT_CTI=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CTI_INTEGRATION_REGS b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CTI_INTEGRATION_REGS new file mode 100644 index 000000000000..d65a71a185ce --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_CTI_INTEGRATION_REGS @@ -0,0 +1 @@ +CONFIG_CORESIGHT_CTI_INTEGRATION_REGS=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_LINKS_AND_SINKS b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_LINKS_AND_SINKS new file mode 100644 index 000000000000..c1885e26676d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_LINKS_AND_SINKS @@ -0,0 +1 @@ +CONFIG_CORESIGHT_LINKS_AND_SINKS=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_LINK_AND_SINK_TMC b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_LINK_AND_SINK_TMC new file mode 100644 index 000000000000..af3cefcef6b2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_LINK_AND_SINK_TMC @@ -0,0 +1 @@ +CONFIG_CORESIGHT_LINK_AND_SINK_TMC=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_SINK_ETBV10 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_SINK_ETBV10 new file mode 100644 index 000000000000..e2179b9b003b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_SINK_ETBV10 @@ -0,0 +1 @@ +CONFIG_CORESIGHT_SINK_ETBV10=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_SINK_TPIU b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_SINK_TPIU new file mode 100644 index 000000000000..3875bb704ffe --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_SINK_TPIU @@ -0,0 +1 @@ +CONFIG_CORESIGHT_SINK_TPIU=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_SOURCE_ETM4X b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_SOURCE_ETM4X new file mode 100644 index 000000000000..7989081534a0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_SOURCE_ETM4X @@ -0,0 +1 @@ +CONFIG_CORESIGHT_SOURCE_ETM4X=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_STM b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_STM new file mode 100644 index 000000000000..742eeaa7521c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CORESIGHT_STM @@ -0,0 +1 @@ +CONFIG_CORESIGHT_STM=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPUMASK_OFFSTACK b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPUMASK_OFFSTACK new file mode 100644 index 000000000000..6e4f61ead862 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPUMASK_OFFSTACK @@ -0,0 +1 @@ +# CONFIG_CPUMASK_OFFSTACK is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_BIG_ENDIAN b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_BIG_ENDIAN new file mode 100644 index 000000000000..be479cf38250 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_BIG_ENDIAN @@ -0,0 +1 @@ +# CONFIG_CPU_BIG_ENDIAN is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE new file mode 100644 index 000000000000..87da942a3967 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE @@ -0,0 +1 @@ +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND new file mode 100644 index 000000000000..39bec58842dd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND @@ -0,0 +1 @@ +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_FREQ_GOV_SCHEDUTIL b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_FREQ_GOV_SCHEDUTIL new file mode 100644 index 000000000000..f9ae389f5e8e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_FREQ_GOV_SCHEDUTIL @@ -0,0 +1 @@ +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_FREQ_THERMAL b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_FREQ_THERMAL new file mode 100644 index 000000000000..25d6ba24864b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_FREQ_THERMAL @@ -0,0 +1 @@ +CONFIG_CPU_FREQ_THERMAL=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_THERMAL b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_THERMAL new file mode 100644 index 000000000000..fa1c22af3fa8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_THERMAL @@ -0,0 +1 @@ +CONFIG_CPU_THERMAL=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64 new file mode 100644 index 000000000000..dd0ae2c1dc82 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_AES_ARM64=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_BS b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_BS new file mode 100644 index 000000000000..6e04273531a2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_BS @@ -0,0 +1 @@ +CONFIG_CRYPTO_AES_ARM64_BS=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_CE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_CE new file mode 100644 index 000000000000..720099599c6b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_CE @@ -0,0 +1 @@ +CONFIG_CRYPTO_AES_ARM64_CE=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_CE_BLK b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_CE_BLK new file mode 100644 index 000000000000..b07b273c0819 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_CE_BLK @@ -0,0 +1 @@ +CONFIG_CRYPTO_AES_ARM64_CE_BLK=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_CE_CCM b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_CE_CCM new file mode 100644 index 000000000000..20ef5afff9d8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_CE_CCM @@ -0,0 +1 @@ +CONFIG_CRYPTO_AES_ARM64_CE_CCM=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_NEON_BLK b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_NEON_BLK new file mode 100644 index 000000000000..2ac813860483 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_AES_ARM64_NEON_BLK @@ -0,0 +1 @@ +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_CHACHA20_NEON b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_CHACHA20_NEON new file mode 100644 index 000000000000..c680f1a05b3a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_CHACHA20_NEON @@ -0,0 +1 @@ +CONFIG_CRYPTO_CHACHA20_NEON=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_CRCT10DIF_ARM64_CE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_CRCT10DIF_ARM64_CE new file mode 100644 index 000000000000..c59d53cfb35c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_CRCT10DIF_ARM64_CE @@ -0,0 +1 @@ +CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_CURVE25519 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_CURVE25519 new file mode 100644 index 000000000000..7ddf015cc516 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_CURVE25519 @@ -0,0 +1 @@ +CONFIG_CRYPTO_CURVE25519=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_CCP b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_CCP new file mode 100644 index 000000000000..db6cdd873726 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_CCP @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_CCP is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_C3XXX b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_C3XXX new file mode 100644 index 000000000000..4af70dce5453 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_C3XXX @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_QAT_C3XXX is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_C3XXXVF b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_C3XXXVF new file mode 100644 index 000000000000..be5e56223590 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_C3XXXVF @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_C62X b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_C62X new file mode 100644 index 000000000000..358ca4222b3c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_C62X @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_QAT_C62X is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_C62XVF b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_C62XVF new file mode 100644 index 000000000000..6b24e553f52f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_C62XVF @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_QAT_C62XVF is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_DH895xCC b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_DH895xCC new file mode 100644 index 000000000000..f7cb83cd2f37 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_DH895xCC @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_QAT_DH895xCC is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_DH895xCCVF b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_DH895xCCVF new file mode 100644 index 000000000000..9e540c16040d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_QAT_DH895xCCVF @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_GHASH_ARM64_CE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_GHASH_ARM64_CE new file mode 100644 index 000000000000..f2db2f21a866 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_GHASH_ARM64_CE @@ -0,0 +1 @@ +CONFIG_CRYPTO_GHASH_ARM64_CE=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_POLY1305_NEON b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_POLY1305_NEON new file mode 100644 index 000000000000..9dba23aee2d7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_POLY1305_NEON @@ -0,0 +1 @@ +CONFIG_CRYPTO_POLY1305_NEON=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SHA1_ARM64_CE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SHA1_ARM64_CE new file mode 100644 index 000000000000..8923fcedfa98 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SHA1_ARM64_CE @@ -0,0 +1 @@ +CONFIG_CRYPTO_SHA1_ARM64_CE=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SHA256_ARM64 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SHA256_ARM64 new file mode 100644 index 000000000000..ba32f8501a37 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SHA256_ARM64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SHA256_ARM64=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SHA2_ARM64_CE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SHA2_ARM64_CE new file mode 100644 index 000000000000..7c87d7f4cc4c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SHA2_ARM64_CE @@ -0,0 +1 @@ +CONFIG_CRYPTO_SHA2_ARM64_CE=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM3_ARM64_CE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM3_ARM64_CE new file mode 100644 index 000000000000..93a6a00f40e6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM3_ARM64_CE @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM3_ARM64_CE=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM3_NEON b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM3_NEON new file mode 100644 index 000000000000..d4345460d87f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM3_NEON @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM3_NEON=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_CE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_CE new file mode 100644 index 000000000000..5aa28cca8cec --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_CE @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM4_ARM64_CE=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_CE_BLK b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_CE_BLK new file mode 100644 index 000000000000..979299a7a833 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_CE_BLK @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM4_ARM64_CE_BLK=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_CE_CCM b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_CE_CCM new file mode 100644 index 000000000000..7ea8b7e6eb65 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_CE_CCM @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM4_ARM64_CE_CCM=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_CE_GCM b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_CE_GCM new file mode 100644 index 000000000000..4bde9d25faec --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_CE_GCM @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM4_ARM64_CE_GCM=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_NEON_BLK b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_NEON_BLK new file mode 100644 index 000000000000..f1f668a9fb3c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_SM4_ARM64_NEON_BLK @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM4_ARM64_NEON_BLK=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEBUG_PERF_USE_VMALLOC b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEBUG_PERF_USE_VMALLOC new file mode 100644 index 000000000000..3aeeb5cd4251 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEBUG_PERF_USE_VMALLOC @@ -0,0 +1 @@ +CONFIG_DEBUG_PERF_USE_VMALLOC=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEVICE_PRIVATE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEVICE_PRIVATE new file mode 100644 index 000000000000..838736fb6da1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEVICE_PRIVATE @@ -0,0 +1 @@ +# CONFIG_DEVICE_PRIVATE is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEVPORT b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEVPORT new file mode 100644 index 000000000000..555cf4be4d77 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEVPORT @@ -0,0 +1 @@ +# CONFIG_DEVPORT is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEV_DAX b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEV_DAX new file mode 100644 index 000000000000..77478a213075 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEV_DAX @@ -0,0 +1 @@ +CONFIG_DEV_DAX=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEV_DAX_HMEM b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEV_DAX_HMEM new file mode 100644 index 000000000000..beb328c9bc42 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEV_DAX_HMEM @@ -0,0 +1 @@ +CONFIG_DEV_DAX_HMEM=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEV_DAX_KMEM b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEV_DAX_KMEM new file mode 100644 index 000000000000..755061fbf2eb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEV_DAX_KMEM @@ -0,0 +1 @@ +# CONFIG_DEV_DAX_KMEM is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEV_DAX_PMEM b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEV_DAX_PMEM new file mode 100644 index 000000000000..8c7fd6732ccd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEV_DAX_PMEM @@ -0,0 +1 @@ +CONFIG_DEV_DAX_PMEM=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DLM_DEBUG b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DLM_DEBUG new file mode 100644 index 000000000000..6d3eed26fd28 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DLM_DEBUG @@ -0,0 +1 @@ +# CONFIG_DLM_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_AMDGPU_CIK b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_AMDGPU_CIK new file mode 100644 index 000000000000..6f3da0bab768 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_AMDGPU_CIK @@ -0,0 +1 @@ +CONFIG_DRM_AMDGPU_CIK=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_AMDGPU_USERPTR b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_AMDGPU_USERPTR new file mode 100644 index 000000000000..4134231af34a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_AMDGPU_USERPTR @@ -0,0 +1 @@ +CONFIG_DRM_AMDGPU_USERPTR=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_AMD_ACP b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_AMD_ACP new file mode 100644 index 000000000000..dfff6b592b18 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_AMD_ACP @@ -0,0 +1 @@ +CONFIG_DRM_AMD_ACP=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_DP_AUX_CHARDEV b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_DP_AUX_CHARDEV new file mode 100644 index 000000000000..d875280a04a1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_DP_AUX_CHARDEV @@ -0,0 +1 @@ +CONFIG_DRM_DP_AUX_CHARDEV=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_PHYTIUM b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_PHYTIUM new file mode 100644 index 000000000000..a663e2800867 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_PHYTIUM @@ -0,0 +1 @@ +CONFIG_DRM_PHYTIUM=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_VMWGFX b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_VMWGFX new file mode 100644 index 000000000000..4e22be4d92e6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DRM_VMWGFX @@ -0,0 +1 @@ +# CONFIG_DRM_VMWGFX is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DWC_PCIE_PMU b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DWC_PCIE_PMU new file mode 100644 index 000000000000..041de175cfc0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DWC_PCIE_PMU @@ -0,0 +1 @@ +CONFIG_DWC_PCIE_PMU=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DW_DMAC_PCI b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DW_DMAC_PCI new file mode 100644 index 000000000000..c8fcf1485498 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DW_DMAC_PCI @@ -0,0 +1 @@ +CONFIG_DW_DMAC_PCI=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_EDAC_XGENE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_EDAC_XGENE new file mode 100644 index 000000000000..dbadc3572072 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_EDAC_XGENE @@ -0,0 +1 @@ +CONFIG_EDAC_XGENE=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_EFI_ARMSTUB_DTB_LOADER b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_EFI_ARMSTUB_DTB_LOADER new file mode 100644 index 000000000000..d729d29d3261 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_EFI_ARMSTUB_DTB_LOADER @@ -0,0 +1 @@ +CONFIG_EFI_ARMSTUB_DTB_LOADER=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_EFI_COCO_SECRET b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_EFI_COCO_SECRET new file mode 100644 index 000000000000..58c96a0ad9b7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_EFI_COCO_SECRET @@ -0,0 +1 @@ +# CONFIG_EFI_COCO_SECRET is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ETM4X_IMPDEF_FEATURE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ETM4X_IMPDEF_FEATURE new file mode 100644 index 000000000000..bcdf7b1f5218 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ETM4X_IMPDEF_FEATURE @@ -0,0 +1 @@ +CONFIG_ETM4X_IMPDEF_FEATURE=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_EXTCON_GPIO b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_EXTCON_GPIO new file mode 100644 index 000000000000..543d0e0d5880 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_EXTCON_GPIO @@ -0,0 +1 @@ +CONFIG_EXTCON_GPIO=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FB_SIMPLE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FB_SIMPLE new file mode 100644 index 000000000000..0eb3d04225f1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FB_SIMPLE @@ -0,0 +1 @@ +CONFIG_FB_SIMPLE=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FB_SSD1307 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FB_SSD1307 new file mode 100644 index 000000000000..efd995bdfe8a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FB_SSD1307 @@ -0,0 +1 @@ +CONFIG_FB_SSD1307=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FIRMWARE_EDID b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FIRMWARE_EDID new file mode 100644 index 000000000000..7c3c659ce6bd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FIRMWARE_EDID @@ -0,0 +1 @@ +# CONFIG_FIRMWARE_EDID is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FUJITSU_ERRATUM_010001 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FUJITSU_ERRATUM_010001 new file mode 100644 index 000000000000..122d7ac6ee73 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FUJITSU_ERRATUM_010001 @@ -0,0 +1 @@ +CONFIG_FUJITSU_ERRATUM_010001=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FUNCTION_PROFILER b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FUNCTION_PROFILER new file mode 100644 index 000000000000..5c1b6a2a7394 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FUNCTION_PROFILER @@ -0,0 +1 @@ +# CONFIG_FUNCTION_PROFILER is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FW_LOADER_USER_HELPER b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FW_LOADER_USER_HELPER new file mode 100644 index 000000000000..686f93187907 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FW_LOADER_USER_HELPER @@ -0,0 +1 @@ +# CONFIG_FW_LOADER_USER_HELPER is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GENERIC_ARCH_NUMA b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GENERIC_ARCH_NUMA new file mode 100644 index 000000000000..d4e2f44b0d31 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GENERIC_ARCH_NUMA @@ -0,0 +1 @@ +CONFIG_GENERIC_ARCH_NUMA=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GENERIC_IOREMAP b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GENERIC_IOREMAP new file mode 100644 index 000000000000..3b77a1ac1f43 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GENERIC_IOREMAP @@ -0,0 +1 @@ +CONFIG_GENERIC_IOREMAP=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED new file mode 100644 index 000000000000..523d35dd4f5e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED @@ -0,0 +1 @@ +CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GENERIC_PHY b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GENERIC_PHY new file mode 100644 index 000000000000..40cd1a4f556e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GENERIC_PHY @@ -0,0 +1 @@ +CONFIG_GENERIC_PHY=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GFS2_FS b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GFS2_FS new file mode 100644 index 000000000000..5f660149cb44 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GFS2_FS @@ -0,0 +1 @@ +# CONFIG_GFS2_FS is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GPIO_GENERIC_PLATFORM b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GPIO_GENERIC_PLATFORM new file mode 100644 index 000000000000..3ff4618e8f54 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GPIO_GENERIC_PLATFORM @@ -0,0 +1 @@ +CONFIG_GPIO_GENERIC_PLATFORM=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GPIO_HISI b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GPIO_HISI new file mode 100644 index 000000000000..03090da19d95 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GPIO_HISI @@ -0,0 +1 @@ +CONFIG_GPIO_HISI=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HISILICON_ERRATUM_161600802 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HISILICON_ERRATUM_161600802 new file mode 100644 index 000000000000..86ef85f43608 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HISILICON_ERRATUM_161600802 @@ -0,0 +1 @@ +CONFIG_HISILICON_ERRATUM_161600802=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HISI_DMA b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HISI_DMA new file mode 100644 index 000000000000..57350600f333 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HISI_DMA @@ -0,0 +1 @@ +# CONFIG_HISI_DMA is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HISI_THERMAL b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HISI_THERMAL new file mode 100644 index 000000000000..05fc96b89e1c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HISI_THERMAL @@ -0,0 +1 @@ +CONFIG_HISI_THERMAL=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3 new file mode 100644 index 000000000000..80d338e39f2d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3 @@ -0,0 +1 @@ +CONFIG_HNS3=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3_DCB b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3_DCB new file mode 100644 index 000000000000..fb3b91604256 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3_DCB @@ -0,0 +1 @@ +CONFIG_HNS3_DCB=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3_ENET b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3_ENET new file mode 100644 index 000000000000..cf9aa0ff62f1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3_ENET @@ -0,0 +1 @@ +CONFIG_HNS3_ENET=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3_HCLGE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3_HCLGE new file mode 100644 index 000000000000..dbbea3f5bc2d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3_HCLGE @@ -0,0 +1 @@ +CONFIG_HNS3_HCLGE=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3_HCLGEVF b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3_HCLGEVF new file mode 100644 index 000000000000..57ab09961c49 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS3_HCLGEVF @@ -0,0 +1 @@ +CONFIG_HNS3_HCLGEVF=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS_DSAF b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS_DSAF new file mode 100644 index 000000000000..754ba14348bd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS_DSAF @@ -0,0 +1 @@ +CONFIG_HNS_DSAF=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS_ENET b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS_ENET new file mode 100644 index 000000000000..304f8cab4603 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS_ENET @@ -0,0 +1 @@ +CONFIG_HNS_ENET=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS_MDIO b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS_MDIO new file mode 100644 index 000000000000..e34372a357bb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HNS_MDIO @@ -0,0 +1 @@ +CONFIG_HNS_MDIO=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HSA_AMD b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HSA_AMD new file mode 100644 index 000000000000..7a5701ba9ca4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HSA_AMD @@ -0,0 +1 @@ +CONFIG_HSA_AMD=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV new file mode 100644 index 000000000000..94db0ce5eef1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV @@ -0,0 +1 @@ +# CONFIG_HYPERV is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_I2C_SLAVE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_I2C_SLAVE new file mode 100644 index 000000000000..e17d76517bd1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_I2C_SLAVE @@ -0,0 +1 @@ +CONFIG_I2C_SLAVE=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_I2C_SLAVE_EEPROM b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_I2C_SLAVE_EEPROM new file mode 100644 index 000000000000..682e5476b4f4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_I2C_SLAVE_EEPROM @@ -0,0 +1 @@ +CONFIG_I2C_SLAVE_EEPROM=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_I40E_DCB b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_I40E_DCB new file mode 100644 index 000000000000..b65fe813f69a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_I40E_DCB @@ -0,0 +1 @@ +# CONFIG_I40E_DCB is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_INDIRECT_PIO b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_INDIRECT_PIO new file mode 100644 index 000000000000..56e0675cefa5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_INDIRECT_PIO @@ -0,0 +1 @@ +CONFIG_INDIRECT_PIO=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_INPUT_MOUSEDEV b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_INPUT_MOUSEDEV new file mode 100644 index 000000000000..53cd13a609a7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_INPUT_MOUSEDEV @@ -0,0 +1 @@ +CONFIG_INPUT_MOUSEDEV=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_INTEL_IDMA64 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_INTEL_IDMA64 new file mode 100644 index 000000000000..52e4d562a66c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_INTEL_IDMA64 @@ -0,0 +1 @@ +# CONFIG_INTEL_IDMA64 is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_INTEL_TH b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_INTEL_TH new file mode 100644 index 000000000000..4b2285f565b0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_INTEL_TH @@ -0,0 +1 @@ +# CONFIG_INTEL_TH is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_IOMMU_DEFAULT_DMA_STRICT b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_IOMMU_DEFAULT_DMA_STRICT new file mode 100644 index 000000000000..173fa6d4dfa6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_IOMMU_DEFAULT_DMA_STRICT @@ -0,0 +1 @@ +CONFIG_IOMMU_DEFAULT_DMA_STRICT=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_IOMMU_DEFAULT_PASSTHROUGH b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_IOMMU_DEFAULT_PASSTHROUGH new file mode 100644 index 000000000000..b71df81edd20 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_IOMMU_DEFAULT_PASSTHROUGH @@ -0,0 +1 @@ +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_IOMMU_IO_PGTABLE_LPAE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_IOMMU_IO_PGTABLE_LPAE new file mode 100644 index 000000000000..73494e9d2af0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_IOMMU_IO_PGTABLE_LPAE @@ -0,0 +1 @@ +CONFIG_IOMMU_IO_PGTABLE_LPAE=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ISCSI_IBFT b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ISCSI_IBFT new file mode 100644 index 000000000000..0ca6169eeea2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ISCSI_IBFT @@ -0,0 +1 @@ +# CONFIG_ISCSI_IBFT is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_KDB_DEFAULT_ENABLE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_KDB_DEFAULT_ENABLE new file mode 100644 index 000000000000..aeef61ee9433 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_KDB_DEFAULT_ENABLE @@ -0,0 +1 @@ +CONFIG_KDB_DEFAULT_ENABLE=0x0 diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_KEXEC_IMAGE_VERIFY_SIG b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_KEXEC_IMAGE_VERIFY_SIG new file mode 100644 index 000000000000..1eeee81d686d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_KEXEC_IMAGE_VERIFY_SIG @@ -0,0 +1 @@ +CONFIG_KEXEC_IMAGE_VERIFY_SIG=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_KEYBOARD_ATKBD b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_KEYBOARD_ATKBD new file mode 100644 index 000000000000..99703e4ab082 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_KEYBOARD_ATKBD @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_ATKBD is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_KUNPENG_HCCS b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_KUNPENG_HCCS new file mode 100644 index 000000000000..c8644d0f3c1b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_KUNPENG_HCCS @@ -0,0 +1 @@ +CONFIG_KUNPENG_HCCS=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_KUSER_HELPERS b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_KUSER_HELPERS new file mode 100644 index 000000000000..b61b9dd53a35 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_KUSER_HELPERS @@ -0,0 +1 @@ +CONFIG_KUSER_HELPERS=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_LOG_BUF_SHIFT b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_LOG_BUF_SHIFT new file mode 100644 index 000000000000..7ed0134a53fe --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_LOG_BUF_SHIFT @@ -0,0 +1 @@ +CONFIG_LOG_BUF_SHIFT=20 diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_MELLANOX_PLATFORM b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_MELLANOX_PLATFORM new file mode 100644 index 000000000000..900d0304defd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_MELLANOX_PLATFORM @@ -0,0 +1 @@ +# CONFIG_MELLANOX_PLATFORM is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_MLX5_CORE_IPOIB b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_MLX5_CORE_IPOIB new file mode 100644 index 000000000000..d78d82d65ae1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_MLX5_CORE_IPOIB @@ -0,0 +1 @@ +CONFIG_MLX5_CORE_IPOIB=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_MOUSE_PS2 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_MOUSE_PS2 new file mode 100644 index 000000000000..bd4390f41ebb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_MOUSE_PS2 @@ -0,0 +1 @@ +# CONFIG_MOUSE_PS2 is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_NET_VENDOR_HISILICON b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_NET_VENDOR_HISILICON new file mode 100644 index 000000000000..0a881cc22b52 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_NET_VENDOR_HISILICON @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_HISILICON=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_PID_IN_CONTEXTIDR b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_PID_IN_CONTEXTIDR new file mode 100644 index 000000000000..29cefd2bda14 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_PID_IN_CONTEXTIDR @@ -0,0 +1 @@ +CONFIG_PID_IN_CONTEXTIDR=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_POWERCAP b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_POWERCAP new file mode 100644 index 000000000000..24baf5a1b45f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_POWERCAP @@ -0,0 +1 @@ +# CONFIG_POWERCAP is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_QCOM_FALKOR_ERRATUM_1003 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_QCOM_FALKOR_ERRATUM_1003 new file mode 100644 index 000000000000..41768200c145 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_QCOM_FALKOR_ERRATUM_1003 @@ -0,0 +1 @@ +CONFIG_QCOM_FALKOR_ERRATUM_1003=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_QCOM_FALKOR_ERRATUM_1009 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_QCOM_FALKOR_ERRATUM_1009 new file mode 100644 index 000000000000..1449efafbbd3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_QCOM_FALKOR_ERRATUM_1009 @@ -0,0 +1 @@ +CONFIG_QCOM_FALKOR_ERRATUM_1009=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_QCOM_FALKOR_ERRATUM_E1041 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_QCOM_FALKOR_ERRATUM_E1041 new file mode 100644 index 000000000000..f0d31a87b34d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_QCOM_FALKOR_ERRATUM_E1041 @@ -0,0 +1 @@ +CONFIG_QCOM_FALKOR_ERRATUM_E1041=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_QCOM_QDF2400_ERRATUM_0065 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_QCOM_QDF2400_ERRATUM_0065 new file mode 100644 index 000000000000..dec9be97084b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_QCOM_QDF2400_ERRATUM_0065 @@ -0,0 +1 @@ +CONFIG_QCOM_QDF2400_ERRATUM_0065=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_MODULE_REGION_FULL b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_MODULE_REGION_FULL new file mode 100644 index 000000000000..7645a371e7ef --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_MODULE_REGION_FULL @@ -0,0 +1 @@ +CONFIG_RANDOMIZE_MODULE_REGION_FULL=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RELR b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RELR new file mode 100644 index 000000000000..a30c007c1182 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RELR @@ -0,0 +1 @@ +CONFIG_RELR=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RODATA_FULL_DEFAULT_ENABLED b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RODATA_FULL_DEFAULT_ENABLED new file mode 100644 index 000000000000..4615f4e257b6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RODATA_FULL_DEFAULT_ENABLED @@ -0,0 +1 @@ +# CONFIG_RODATA_FULL_DEFAULT_ENABLED is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RTC_DRV_EFI b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RTC_DRV_EFI new file mode 100644 index 000000000000..e2a2dcd64d43 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RTC_DRV_EFI @@ -0,0 +1 @@ +CONFIG_RTC_DRV_EFI=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SATA_AHCI_SEATTLE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SATA_AHCI_SEATTLE new file mode 100644 index 000000000000..dfcf03f079e0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SATA_AHCI_SEATTLE @@ -0,0 +1 @@ +CONFIG_SATA_AHCI_SEATTLE=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SDEI_WATCHDOG b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SDEI_WATCHDOG new file mode 100644 index 000000000000..7c8fc508c409 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SDEI_WATCHDOG @@ -0,0 +1 @@ +CONFIG_SDEI_WATCHDOG=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SERIO_AMBAKMI b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SERIO_AMBAKMI new file mode 100644 index 000000000000..ff9bebcb321a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SERIO_AMBAKMI @@ -0,0 +1 @@ +CONFIG_SERIO_AMBAKMI=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SOCIONEXT_SYNQUACER_PREITS b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SOCIONEXT_SYNQUACER_PREITS new file mode 100644 index 000000000000..ded5c358ea61 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SOCIONEXT_SYNQUACER_PREITS @@ -0,0 +1 @@ +CONFIG_SOCIONEXT_SYNQUACER_PREITS=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_CADENCE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_CADENCE new file mode 100644 index 000000000000..3a8bb168c76c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_CADENCE @@ -0,0 +1 @@ +CONFIG_SPI_CADENCE=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_DESIGNWARE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_DESIGNWARE new file mode 100644 index 000000000000..9f92cba69b5c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_DESIGNWARE @@ -0,0 +1 @@ +CONFIG_SPI_DESIGNWARE=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_DW_MMIO b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_DW_MMIO new file mode 100644 index 000000000000..12b554155697 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_DW_MMIO @@ -0,0 +1 @@ +CONFIG_SPI_DW_MMIO=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_HISI_KUNPENG b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_HISI_KUNPENG new file mode 100644 index 000000000000..6ad4401a4acc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_HISI_KUNPENG @@ -0,0 +1 @@ +CONFIG_SPI_HISI_KUNPENG=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_PL022 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_PL022 new file mode 100644 index 000000000000..67bd507db556 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_PL022 @@ -0,0 +1 @@ +CONFIG_SPI_PL022=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_QUP b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_QUP new file mode 100644 index 000000000000..55cd5d2b86a2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_QUP @@ -0,0 +1 @@ +CONFIG_SPI_QUP=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_XLP b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_XLP new file mode 100644 index 000000000000..6026d5f511d8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_XLP @@ -0,0 +1 @@ +CONFIG_SPI_XLP=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SQUASHFS_LZ4 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SQUASHFS_LZ4 new file mode 100644 index 000000000000..f45773f0c015 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SQUASHFS_LZ4 @@ -0,0 +1 @@ +CONFIG_SQUASHFS_LZ4=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STAGING b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STAGING new file mode 100644 index 000000000000..c53ae30fa9b4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STAGING @@ -0,0 +1 @@ +CONFIG_STAGING=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_DUMMY b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_DUMMY new file mode 100644 index 000000000000..704a19ecec34 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_DUMMY @@ -0,0 +1 @@ +# CONFIG_STM_DUMMY is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_PROTO_BASIC b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_PROTO_BASIC new file mode 100644 index 000000000000..7aed3f091d14 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_PROTO_BASIC @@ -0,0 +1 @@ +# CONFIG_STM_PROTO_BASIC is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_PROTO_SYS_T b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_PROTO_SYS_T new file mode 100644 index 000000000000..2dfea9b0aebf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_PROTO_SYS_T @@ -0,0 +1 @@ +# CONFIG_STM_PROTO_SYS_T is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_SOURCE_CONSOLE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_SOURCE_CONSOLE new file mode 100644 index 000000000000..aa10fd4769d8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_SOURCE_CONSOLE @@ -0,0 +1 @@ +# CONFIG_STM_SOURCE_CONSOLE is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_SOURCE_FTRACE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_SOURCE_FTRACE new file mode 100644 index 000000000000..db5d7c1dcb81 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_SOURCE_FTRACE @@ -0,0 +1 @@ +# CONFIG_STM_SOURCE_FTRACE is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_SOURCE_HEARTBEAT b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_SOURCE_HEARTBEAT new file mode 100644 index 000000000000..70814f3b7bce --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STM_SOURCE_HEARTBEAT @@ -0,0 +1 @@ +# CONFIG_STM_SOURCE_HEARTBEAT is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_TCG_INFINEON b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_TCG_INFINEON new file mode 100644 index 000000000000..e2e45880fd09 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_TCG_INFINEON @@ -0,0 +1 @@ +# CONFIG_TCG_INFINEON is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_TCP_CONG_CDG b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_TCP_CONG_CDG new file mode 100644 index 000000000000..30b467e16703 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_TCP_CONG_CDG @@ -0,0 +1 @@ +# CONFIG_TCP_CONG_CDG is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_GOV_BANG_BANG b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_GOV_BANG_BANG new file mode 100644 index 000000000000..63f90615aed6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_GOV_BANG_BANG @@ -0,0 +1 @@ +# CONFIG_THERMAL_GOV_BANG_BANG is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_MMIO b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_MMIO new file mode 100644 index 000000000000..004769c5f154 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_MMIO @@ -0,0 +1 @@ +# CONFIG_THERMAL_MMIO is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_OF b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_OF new file mode 100644 index 000000000000..e8ba034f7ce8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_OF @@ -0,0 +1 @@ +CONFIG_THERMAL_OF=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_WRITABLE_TRIPS b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_WRITABLE_TRIPS new file mode 100644 index 000000000000..abc3c076e1d6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_WRITABLE_TRIPS @@ -0,0 +1 @@ +# CONFIG_THERMAL_WRITABLE_TRIPS is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VFIO_PLATFORM b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VFIO_PLATFORM new file mode 100644 index 000000000000..c05b69e841a6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VFIO_PLATFORM @@ -0,0 +1 @@ +CONFIG_VFIO_PLATFORM=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VFIO_PLATFORM_BASE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VFIO_PLATFORM_BASE new file mode 100644 index 000000000000..993fade9832f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VFIO_PLATFORM_BASE @@ -0,0 +1 @@ +CONFIG_VFIO_PLATFORM_BASE=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES new file mode 100644 index 000000000000..1abf97c1a25a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES @@ -0,0 +1 @@ +# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VIRTIO_PCI_LIB b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VIRTIO_PCI_LIB new file mode 100644 index 000000000000..fc5d87b7666e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VIRTIO_PCI_LIB @@ -0,0 +1 @@ +CONFIG_VIRTIO_PCI_LIB=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VIRTIO_PCI_LIB_LEGACY b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VIRTIO_PCI_LIB_LEGACY new file mode 100644 index 000000000000..fce6f51c8b19 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VIRTIO_PCI_LIB_LEGACY @@ -0,0 +1 @@ +CONFIG_VIRTIO_PCI_LIB_LEGACY=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VIRT_DRIVERS b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VIRT_DRIVERS new file mode 100644 index 000000000000..8cc1125d46ee --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VIRT_DRIVERS @@ -0,0 +1 @@ +# CONFIG_VIRT_DRIVERS is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VMWARE_VMCI b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VMWARE_VMCI new file mode 100644 index 000000000000..db263c420e93 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VMWARE_VMCI @@ -0,0 +1 @@ +# CONFIG_VMWARE_VMCI is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VMXNET3 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VMXNET3 new file mode 100644 index 000000000000..5055f163e393 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VMXNET3 @@ -0,0 +1 @@ +# CONFIG_VMXNET3 is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_WDAT_WDT b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_WDAT_WDT new file mode 100644 index 000000000000..cf7b00dd9878 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_WDAT_WDT @@ -0,0 +1 @@ +# CONFIG_WDAT_WDT is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_XEN b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_XEN new file mode 100644 index 000000000000..f154fee42a65 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_XEN @@ -0,0 +1 @@ +# CONFIG_XEN is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_YITIAN_CPER_RAWDATA b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_YITIAN_CPER_RAWDATA new file mode 100644 index 000000000000..4b6ac4a5df60 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_YITIAN_CPER_RAWDATA @@ -0,0 +1 @@ +# CONFIG_YITIAN_CPER_RAWDATA is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_AC b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_AC new file mode 100644 index 000000000000..5dbaee1a85df --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_AC @@ -0,0 +1 @@ +CONFIG_ACPI_AC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_EINJ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_EINJ new file mode 100644 index 000000000000..66e425d5b770 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_EINJ @@ -0,0 +1 @@ +CONFIG_ACPI_APEI_EINJ=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_ERST_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_ERST_DEBUG new file mode 100644 index 000000000000..235760d408f4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_ERST_DEBUG @@ -0,0 +1 @@ +# CONFIG_ACPI_APEI_ERST_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_GHES b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_GHES new file mode 100644 index 000000000000..8fd037d8f12b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_GHES @@ -0,0 +1 @@ +CONFIG_ACPI_APEI_GHES=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_MEMORY_FAILURE b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_MEMORY_FAILURE new file mode 100644 index 000000000000..46aa1579fd2a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_MEMORY_FAILURE @@ -0,0 +1 @@ +CONFIG_ACPI_APEI_MEMORY_FAILURE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_PCIEAER b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_PCIEAER new file mode 100644 index 000000000000..26f3e912912f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_APEI_PCIEAER @@ -0,0 +1 @@ +CONFIG_ACPI_APEI_PCIEAER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_BATTERY b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_BATTERY new file mode 100644 index 000000000000..eb3286698d34 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_BATTERY @@ -0,0 +1 @@ +CONFIG_ACPI_BATTERY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_BUTTON b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_BUTTON new file mode 100644 index 000000000000..1f552016b1da --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_BUTTON @@ -0,0 +1 @@ +CONFIG_ACPI_BUTTON=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_CONFIGFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_CONFIGFS new file mode 100644 index 000000000000..4214adc8cf3f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_CONFIGFS @@ -0,0 +1 @@ +# CONFIG_ACPI_CONFIGFS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_CONTAINER b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_CONTAINER new file mode 100644 index 000000000000..24287daf8421 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_CONTAINER @@ -0,0 +1 @@ +CONFIG_ACPI_CONTAINER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_CUSTOM_METHOD b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_CUSTOM_METHOD new file mode 100644 index 000000000000..e7b797d0235e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_CUSTOM_METHOD @@ -0,0 +1 @@ +# CONFIG_ACPI_CUSTOM_METHOD is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_DEBUG new file mode 100644 index 000000000000..aee5969aa48d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_DEBUG @@ -0,0 +1 @@ +# CONFIG_ACPI_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_DEBUGGER b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_DEBUGGER new file mode 100644 index 000000000000..3f5999200406 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_DEBUGGER @@ -0,0 +1 @@ +# CONFIG_ACPI_DEBUGGER is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_FAN b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_FAN new file mode 100644 index 000000000000..0feefba7b3f0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_FAN @@ -0,0 +1 @@ +CONFIG_ACPI_FAN=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_HED b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_HED new file mode 100644 index 000000000000..06042df5ab2e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_HED @@ -0,0 +1 @@ +CONFIG_ACPI_HED=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_HMAT b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_HMAT new file mode 100644 index 000000000000..34e2b9331ffe --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_HMAT @@ -0,0 +1 @@ +CONFIG_ACPI_HMAT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_HOTPLUG_MEMORY b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_HOTPLUG_MEMORY new file mode 100644 index 000000000000..d2933e65ff91 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_HOTPLUG_MEMORY @@ -0,0 +1 @@ +CONFIG_ACPI_HOTPLUG_MEMORY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_NFIT b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_NFIT new file mode 100644 index 000000000000..922b719bf78b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_NFIT @@ -0,0 +1 @@ +CONFIG_ACPI_NFIT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_PCC b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_PCC new file mode 100644 index 000000000000..741c7152a4c6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_PCC @@ -0,0 +1 @@ +CONFIG_ACPI_PCC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_PCI_SLOT b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_PCI_SLOT new file mode 100644 index 000000000000..6ef14483b6d9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_PCI_SLOT @@ -0,0 +1 @@ +CONFIG_ACPI_PCI_SLOT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_PRMT b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_PRMT new file mode 100644 index 000000000000..416b5bab5226 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_PRMT @@ -0,0 +1 @@ +CONFIG_ACPI_PRMT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_SPCR_TABLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_SPCR_TABLE new file mode 100644 index 000000000000..f8a19253c3b0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_SPCR_TABLE @@ -0,0 +1 @@ +CONFIG_ACPI_SPCR_TABLE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_TABLE_UPGRADE b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_TABLE_UPGRADE new file mode 100644 index 000000000000..276233fba1dd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_TABLE_UPGRADE @@ -0,0 +1 @@ +CONFIG_ACPI_TABLE_UPGRADE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_THERMAL b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_THERMAL new file mode 100644 index 000000000000..1bd7964b474e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_THERMAL @@ -0,0 +1 @@ +CONFIG_ACPI_THERMAL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_VIDEO b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_VIDEO new file mode 100644 index 000000000000..b642d06e3ad4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_VIDEO @@ -0,0 +1 @@ +CONFIG_ACPI_VIDEO=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_CORE b/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_CORE new file mode 100644 index 000000000000..0d809fccc867 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_CORE @@ -0,0 +1 @@ +CONFIG_ASYNC_CORE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_MEMCPY b/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_MEMCPY new file mode 100644 index 000000000000..80c35b93679e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_MEMCPY @@ -0,0 +1 @@ +CONFIG_ASYNC_MEMCPY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_PQ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_PQ new file mode 100644 index 000000000000..645846af2bc5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_PQ @@ -0,0 +1 @@ +CONFIG_ASYNC_PQ=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_RAID6_RECOV b/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_RAID6_RECOV new file mode 100644 index 000000000000..72ab56e75f3f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_RAID6_RECOV @@ -0,0 +1 @@ +CONFIG_ASYNC_RAID6_RECOV=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_RAID6_TEST b/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_RAID6_TEST new file mode 100644 index 000000000000..8fb25dd27814 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_RAID6_TEST @@ -0,0 +1 @@ +CONFIG_ASYNC_RAID6_TEST=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_TX_DMA b/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_TX_DMA new file mode 100644 index 000000000000..c62f8624b5d6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_TX_DMA @@ -0,0 +1 @@ +CONFIG_ASYNC_TX_DMA=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_XOR b/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_XOR new file mode 100644 index 000000000000..7c2396c38493 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_XOR @@ -0,0 +1 @@ +CONFIG_ASYNC_XOR=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA new file mode 100644 index 000000000000..14102c1ab11b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA @@ -0,0 +1 @@ +CONFIG_ATA=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_ACPI b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_ACPI new file mode 100644 index 000000000000..a0be76e56668 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_ACPI @@ -0,0 +1 @@ +CONFIG_ATA_ACPI=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_BMDMA b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_BMDMA new file mode 100644 index 000000000000..06163193f349 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_BMDMA @@ -0,0 +1 @@ +CONFIG_ATA_BMDMA=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_FORCE b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_FORCE new file mode 100644 index 000000000000..aacf2b3c7eab --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_FORCE @@ -0,0 +1 @@ +CONFIG_ATA_FORCE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_GENERIC b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_GENERIC new file mode 100644 index 000000000000..eb3093a11939 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_GENERIC @@ -0,0 +1 @@ +CONFIG_ATA_GENERIC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_OVER_ETH b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_OVER_ETH new file mode 100644 index 000000000000..4742a627864d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_OVER_ETH @@ -0,0 +1 @@ +# CONFIG_ATA_OVER_ETH is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_PIIX b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_PIIX new file mode 100644 index 000000000000..45bfefead8a5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_PIIX @@ -0,0 +1 @@ +CONFIG_ATA_PIIX=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_SFF b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_SFF new file mode 100644 index 000000000000..8631ad6d7d6a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_SFF @@ -0,0 +1 @@ +CONFIG_ATA_SFF=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_VERBOSE_ERROR b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_VERBOSE_ERROR new file mode 100644 index 000000000000..cc5b03f8d342 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_VERBOSE_ERROR @@ -0,0 +1 @@ +CONFIG_ATA_VERBOSE_ERROR=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ATM_DRIVERS b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATM_DRIVERS new file mode 100644 index 000000000000..9ef2a621becb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATM_DRIVERS @@ -0,0 +1 @@ +# CONFIG_ATM_DRIVERS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ATOMIC64_SELFTEST b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATOMIC64_SELFTEST new file mode 100644 index 000000000000..26a054e27160 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ATOMIC64_SELFTEST @@ -0,0 +1 @@ +CONFIG_ATOMIC64_SELFTEST=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BALLOON_COMPACTION b/anolis/configs/L1-RECOMMEND/default/CONFIG_BALLOON_COMPACTION new file mode 100644 index 000000000000..930afec9df3e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BALLOON_COMPACTION @@ -0,0 +1 @@ +CONFIG_BALLOON_COMPACTION=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BCACHE b/anolis/configs/L1-RECOMMEND/default/CONFIG_BCACHE new file mode 100644 index 000000000000..7091077fc7d0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BCACHE @@ -0,0 +1 @@ +# CONFIG_BCACHE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BCMGENET b/anolis/configs/L1-RECOMMEND/default/CONFIG_BCMGENET new file mode 100644 index 000000000000..674bcb0ddd0c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BCMGENET @@ -0,0 +1 @@ +# CONFIG_BCMGENET is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BFQ_CGROUP_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_BFQ_CGROUP_DEBUG new file mode 100644 index 000000000000..90c958fa573a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BFQ_CGROUP_DEBUG @@ -0,0 +1 @@ +# CONFIG_BFQ_CGROUP_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BINFMT_MISC b/anolis/configs/L1-RECOMMEND/default/CONFIG_BINFMT_MISC new file mode 100644 index 000000000000..20754804bf50 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BINFMT_MISC @@ -0,0 +1 @@ +CONFIG_BINFMT_MISC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLKDEV_UBLK_LEGACY_OPCODES b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLKDEV_UBLK_LEGACY_OPCODES new file mode 100644 index 000000000000..5abaa4d2046e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLKDEV_UBLK_LEGACY_OPCODES @@ -0,0 +1 @@ +CONFIG_BLKDEV_UBLK_LEGACY_OPCODES=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEBUG_FS_ZONED b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEBUG_FS_ZONED new file mode 100644 index 000000000000..15ef78eaa592 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEBUG_FS_ZONED @@ -0,0 +1 @@ +CONFIG_BLK_DEBUG_FS_ZONED=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_BSG b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_BSG new file mode 100644 index 000000000000..8acabf1ac697 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_BSG @@ -0,0 +1 @@ +CONFIG_BLK_DEV_BSG=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_BSGLIB b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_BSGLIB new file mode 100644 index 000000000000..5e1a78b5e007 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_BSGLIB @@ -0,0 +1 @@ +CONFIG_BLK_DEV_BSGLIB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_DM b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_DM new file mode 100644 index 000000000000..e6cd5d106e17 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_DM @@ -0,0 +1 @@ +CONFIG_BLK_DEV_DM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_DRBD b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_DRBD new file mode 100644 index 000000000000..5ce59aee1b7e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_DRBD @@ -0,0 +1 @@ +# CONFIG_BLK_DEV_DRBD is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_INTEGRITY b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_INTEGRITY new file mode 100644 index 000000000000..1ef600f47b42 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_INTEGRITY @@ -0,0 +1 @@ +CONFIG_BLK_DEV_INTEGRITY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_INTEGRITY_T10 b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_INTEGRITY_T10 new file mode 100644 index 000000000000..e1ad0a7a8a16 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_INTEGRITY_T10 @@ -0,0 +1 @@ +CONFIG_BLK_DEV_INTEGRITY_T10=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_LOOP b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_LOOP new file mode 100644 index 000000000000..72437e0c0fc1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_LOOP @@ -0,0 +1 @@ +CONFIG_BLK_DEV_LOOP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_LOOP_MIN_COUNT b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_LOOP_MIN_COUNT new file mode 100644 index 000000000000..e81690649106 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_LOOP_MIN_COUNT @@ -0,0 +1 @@ +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_MD b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_MD new file mode 100644 index 000000000000..791d32f4588c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_MD @@ -0,0 +1 @@ +CONFIG_BLK_DEV_MD=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_NBD b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_NBD new file mode 100644 index 000000000000..be2735d4538c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_NBD @@ -0,0 +1 @@ +CONFIG_BLK_DEV_NBD=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_NULL_BLK b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_NULL_BLK new file mode 100644 index 000000000000..09340ef3dde8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_NULL_BLK @@ -0,0 +1 @@ +CONFIG_BLK_DEV_NULL_BLK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_PCIESSD_MTIP32XX b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_PCIESSD_MTIP32XX new file mode 100644 index 000000000000..82024346d771 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_PCIESSD_MTIP32XX @@ -0,0 +1 @@ +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_RAM b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_RAM new file mode 100644 index 000000000000..834088f95a0e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_RAM @@ -0,0 +1 @@ +CONFIG_BLK_DEV_RAM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_RAM_COUNT b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_RAM_COUNT new file mode 100644 index 000000000000..cefe13e1483d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_RAM_COUNT @@ -0,0 +1 @@ +CONFIG_BLK_DEV_RAM_COUNT=16 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_RAM_SIZE b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_RAM_SIZE new file mode 100644 index 000000000000..ac4916f29ab4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_RAM_SIZE @@ -0,0 +1 @@ +CONFIG_BLK_DEV_RAM_SIZE=16384 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_RBD b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_RBD new file mode 100644 index 000000000000..156a099704bc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_RBD @@ -0,0 +1 @@ +CONFIG_BLK_DEV_RBD=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_SD b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_SD new file mode 100644 index 000000000000..f283d1a826dc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_SD @@ -0,0 +1 @@ +CONFIG_BLK_DEV_SD=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_SR b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_SR new file mode 100644 index 000000000000..fff43e8802d5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_SR @@ -0,0 +1 @@ +CONFIG_BLK_DEV_SR=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_THROTTLING_LOW b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_THROTTLING_LOW new file mode 100644 index 000000000000..802bc55b4c5e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_THROTTLING_LOW @@ -0,0 +1 @@ +# CONFIG_BLK_DEV_THROTTLING_LOW is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_UBLK b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_UBLK new file mode 100644 index 000000000000..592b0ba4d661 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_UBLK @@ -0,0 +1 @@ +CONFIG_BLK_DEV_UBLK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_ZONED b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_ZONED new file mode 100644 index 000000000000..529b0b105c2a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_ZONED @@ -0,0 +1 @@ +CONFIG_BLK_DEV_ZONED=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_WBT b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_WBT new file mode 100644 index 000000000000..1e5381167dad --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_WBT @@ -0,0 +1 @@ +# CONFIG_BLK_WBT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BNX2 b/anolis/configs/L1-RECOMMEND/default/CONFIG_BNX2 new file mode 100644 index 000000000000..ff541ef7fb47 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BNX2 @@ -0,0 +1 @@ +CONFIG_BNX2=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BNX2X b/anolis/configs/L1-RECOMMEND/default/CONFIG_BNX2X new file mode 100644 index 000000000000..d32b37750388 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BNX2X @@ -0,0 +1 @@ +CONFIG_BNX2X=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BNX2X_SRIOV b/anolis/configs/L1-RECOMMEND/default/CONFIG_BNX2X_SRIOV new file mode 100644 index 000000000000..73521e4aad84 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BNX2X_SRIOV @@ -0,0 +1 @@ +CONFIG_BNX2X_SRIOV=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT b/anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT new file mode 100644 index 000000000000..3305b042d177 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT @@ -0,0 +1 @@ +CONFIG_BNXT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT_DCB b/anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT_DCB new file mode 100644 index 000000000000..fd4061d58f41 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT_DCB @@ -0,0 +1 @@ +CONFIG_BNXT_DCB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT_FLOWER_OFFLOAD b/anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT_FLOWER_OFFLOAD new file mode 100644 index 000000000000..170bbf312ca2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT_FLOWER_OFFLOAD @@ -0,0 +1 @@ +CONFIG_BNXT_FLOWER_OFFLOAD=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT_HWMON b/anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT_HWMON new file mode 100644 index 000000000000..4de524b9613d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT_HWMON @@ -0,0 +1 @@ +CONFIG_BNXT_HWMON=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT_SRIOV b/anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT_SRIOV new file mode 100644 index 000000000000..92285090d098 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BNXT_SRIOV @@ -0,0 +1 @@ +CONFIG_BNXT_SRIOV=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BOOTPARAM_HARDLOCKUP_PANIC b/anolis/configs/L1-RECOMMEND/default/CONFIG_BOOTPARAM_HARDLOCKUP_PANIC new file mode 100644 index 000000000000..ea640e553409 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BOOTPARAM_HARDLOCKUP_PANIC @@ -0,0 +1 @@ +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC b/anolis/configs/L1-RECOMMEND/default/CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC new file mode 100644 index 000000000000..20270b1c6ee0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC @@ -0,0 +1 @@ +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BOOTTIME_TRACING b/anolis/configs/L1-RECOMMEND/default/CONFIG_BOOTTIME_TRACING new file mode 100644 index 000000000000..d2bc2c14e573 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BOOTTIME_TRACING @@ -0,0 +1 @@ +# CONFIG_BOOTTIME_TRACING is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BOOT_CONFIG b/anolis/configs/L1-RECOMMEND/default/CONFIG_BOOT_CONFIG new file mode 100644 index 000000000000..947b159c3805 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BOOT_CONFIG @@ -0,0 +1 @@ +# CONFIG_BOOT_CONFIG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BOOT_PRINTK_DELAY b/anolis/configs/L1-RECOMMEND/default/CONFIG_BOOT_PRINTK_DELAY new file mode 100644 index 000000000000..081352b0ce81 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BOOT_PRINTK_DELAY @@ -0,0 +1 @@ +CONFIG_BOOT_PRINTK_DELAY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BPFILTER b/anolis/configs/L1-RECOMMEND/default/CONFIG_BPFILTER new file mode 100644 index 000000000000..5da614fb247f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BPFILTER @@ -0,0 +1 @@ +# CONFIG_BPFILTER is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BPF_JIT_ALWAYS_ON b/anolis/configs/L1-RECOMMEND/default/CONFIG_BPF_JIT_ALWAYS_ON new file mode 100644 index 000000000000..45aacb43476b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BPF_JIT_ALWAYS_ON @@ -0,0 +1 @@ +CONFIG_BPF_JIT_ALWAYS_ON=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BPF_KPROBE_OVERRIDE b/anolis/configs/L1-RECOMMEND/default/CONFIG_BPF_KPROBE_OVERRIDE new file mode 100644 index 000000000000..573604162bfb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BPF_KPROBE_OVERRIDE @@ -0,0 +1 @@ +# CONFIG_BPF_KPROBE_OVERRIDE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BPF_STREAM_PARSER b/anolis/configs/L1-RECOMMEND/default/CONFIG_BPF_STREAM_PARSER new file mode 100644 index 000000000000..7cf783506033 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BPF_STREAM_PARSER @@ -0,0 +1 @@ +CONFIG_BPF_STREAM_PARSER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRANCH_PROFILE_NONE b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRANCH_PROFILE_NONE new file mode 100644 index 000000000000..a741dd6151f3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRANCH_PROFILE_NONE @@ -0,0 +1 @@ +CONFIG_BRANCH_PROFILE_NONE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_802_3 b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_802_3 new file mode 100644 index 000000000000..a7a385edfa31 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_802_3 @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_802_3=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_AMONG b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_AMONG new file mode 100644 index 000000000000..5b854f904018 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_AMONG @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_AMONG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_ARP b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_ARP new file mode 100644 index 000000000000..a8781c379b0f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_ARP @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_ARP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_ARPREPLY b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_ARPREPLY new file mode 100644 index 000000000000..f1bd17c9d124 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_ARPREPLY @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_ARPREPLY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_BROUTE b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_BROUTE new file mode 100644 index 000000000000..d8c6c59adb86 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_BROUTE @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_BROUTE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_DNAT b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_DNAT new file mode 100644 index 000000000000..309f316d1256 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_DNAT @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_DNAT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_IP b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_IP new file mode 100644 index 000000000000..5a60e537e6d1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_IP @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_IP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_IP6 b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_IP6 new file mode 100644 index 000000000000..df1578dc274b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_IP6 @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_IP6=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_LIMIT b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_LIMIT new file mode 100644 index 000000000000..5c74954a0399 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_LIMIT @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_LIMIT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_LOG b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_LOG new file mode 100644 index 000000000000..33821290e398 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_LOG @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_LOG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_MARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_MARK new file mode 100644 index 000000000000..63b5e4b0abf5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_MARK @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_MARK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_MARK_T b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_MARK_T new file mode 100644 index 000000000000..b89d06ee31cc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_MARK_T @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_MARK_T=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_NFLOG b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_NFLOG new file mode 100644 index 000000000000..0419263a0cd8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_NFLOG @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_NFLOG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_PKTTYPE b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_PKTTYPE new file mode 100644 index 000000000000..c29e3e03036d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_PKTTYPE @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_PKTTYPE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_REDIRECT b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_REDIRECT new file mode 100644 index 000000000000..c40b497d184a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_REDIRECT @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_REDIRECT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_SNAT b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_SNAT new file mode 100644 index 000000000000..1bd963216eec --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_SNAT @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_SNAT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_STP b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_STP new file mode 100644 index 000000000000..f1c9e454d219 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_STP @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_STP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_T_FILTER b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_T_FILTER new file mode 100644 index 000000000000..e2c7f477221d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_T_FILTER @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_T_FILTER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_T_NAT b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_T_NAT new file mode 100644 index 000000000000..a64d08d8d8f3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_T_NAT @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_T_NAT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_VLAN b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_VLAN new file mode 100644 index 000000000000..bde07d6ecf29 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_EBT_VLAN @@ -0,0 +1 @@ +CONFIG_BRIDGE_EBT_VLAN=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_IGMP_SNOOPING b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_IGMP_SNOOPING new file mode 100644 index 000000000000..a7abc6bb4ed7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_IGMP_SNOOPING @@ -0,0 +1 @@ +CONFIG_BRIDGE_IGMP_SNOOPING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_MRP b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_MRP new file mode 100644 index 000000000000..38f0a923d22d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_MRP @@ -0,0 +1 @@ +# CONFIG_BRIDGE_MRP is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_NETFILTER b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_NETFILTER new file mode 100644 index 000000000000..d052fbcce3a1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_NETFILTER @@ -0,0 +1 @@ +CONFIG_BRIDGE_NETFILTER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_NF_EBTABLES b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_NF_EBTABLES new file mode 100644 index 000000000000..f68518f59f3d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_NF_EBTABLES @@ -0,0 +1 @@ +CONFIG_BRIDGE_NF_EBTABLES=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_VLAN_FILTERING b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_VLAN_FILTERING new file mode 100644 index 000000000000..0792e0d510d2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BRIDGE_VLAN_FILTERING @@ -0,0 +1 @@ +CONFIG_BRIDGE_VLAN_FILTERING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BSD_PROCESS_ACCT b/anolis/configs/L1-RECOMMEND/default/CONFIG_BSD_PROCESS_ACCT new file mode 100644 index 000000000000..b9a4966014fe --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BSD_PROCESS_ACCT @@ -0,0 +1 @@ +CONFIG_BSD_PROCESS_ACCT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BSD_PROCESS_ACCT_V3 b/anolis/configs/L1-RECOMMEND/default/CONFIG_BSD_PROCESS_ACCT_V3 new file mode 100644 index 000000000000..bf334a252c19 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BSD_PROCESS_ACCT_V3 @@ -0,0 +1 @@ +CONFIG_BSD_PROCESS_ACCT_V3=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BTRFS_FS b/anolis/configs/L1-RECOMMEND/default/CONFIG_BTRFS_FS new file mode 100644 index 000000000000..3b4d4254c153 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BTRFS_FS @@ -0,0 +1 @@ +CONFIG_BTRFS_FS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BUG_ON_DATA_CORRUPTION b/anolis/configs/L1-RECOMMEND/default/CONFIG_BUG_ON_DATA_CORRUPTION new file mode 100644 index 000000000000..5ebeba7b200d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BUG_ON_DATA_CORRUPTION @@ -0,0 +1 @@ +CONFIG_BUG_ON_DATA_CORRUPTION=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BUILD_SALT b/anolis/configs/L1-RECOMMEND/default/CONFIG_BUILD_SALT new file mode 100644 index 000000000000..6cf55b283df7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BUILD_SALT @@ -0,0 +1 @@ +CONFIG_BUILD_SALT="" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CACHEFILES_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_CACHEFILES_DEBUG new file mode 100644 index 000000000000..81ad37f9789a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CACHEFILES_DEBUG @@ -0,0 +1 @@ +# CONFIG_CACHEFILES_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CDROM b/anolis/configs/L1-RECOMMEND/default/CONFIG_CDROM new file mode 100644 index 000000000000..2bf814b0a776 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CDROM @@ -0,0 +1 @@ +CONFIG_CDROM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CDROM_PKTCDVD b/anolis/configs/L1-RECOMMEND/default/CONFIG_CDROM_PKTCDVD new file mode 100644 index 000000000000..509827e58aba --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CDROM_PKTCDVD @@ -0,0 +1 @@ +CONFIG_CDROM_PKTCDVD=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CDROM_PKTCDVD_BUFFERS b/anolis/configs/L1-RECOMMEND/default/CONFIG_CDROM_PKTCDVD_BUFFERS new file mode 100644 index 000000000000..8dafda73512c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CDROM_PKTCDVD_BUFFERS @@ -0,0 +1 @@ +CONFIG_CDROM_PKTCDVD_BUFFERS=8 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CDROM_PKTCDVD_WCACHE b/anolis/configs/L1-RECOMMEND/default/CONFIG_CDROM_PKTCDVD_WCACHE new file mode 100644 index 000000000000..ce8fec39d57f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CDROM_PKTCDVD_WCACHE @@ -0,0 +1 @@ +# CONFIG_CDROM_PKTCDVD_WCACHE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CEPH_FS b/anolis/configs/L1-RECOMMEND/default/CONFIG_CEPH_FS new file mode 100644 index 000000000000..25623fa97399 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CEPH_FS @@ -0,0 +1 @@ +CONFIG_CEPH_FS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CEPH_FSCACHE b/anolis/configs/L1-RECOMMEND/default/CONFIG_CEPH_FSCACHE new file mode 100644 index 000000000000..7c46162890d5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CEPH_FSCACHE @@ -0,0 +1 @@ +# CONFIG_CEPH_FSCACHE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CEPH_FS_POSIX_ACL b/anolis/configs/L1-RECOMMEND/default/CONFIG_CEPH_FS_POSIX_ACL new file mode 100644 index 000000000000..680364368a16 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CEPH_FS_POSIX_ACL @@ -0,0 +1 @@ +CONFIG_CEPH_FS_POSIX_ACL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CEPH_FS_SECURITY_LABEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_CEPH_FS_SECURITY_LABEL new file mode 100644 index 000000000000..3c5d08237214 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CEPH_FS_SECURITY_LABEL @@ -0,0 +1 @@ +# CONFIG_CEPH_FS_SECURITY_LABEL is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_DEBUG new file mode 100644 index 000000000000..3d57cb63b394 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_DEBUG @@ -0,0 +1 @@ +# CONFIG_CGROUP_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_FREEZER b/anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_FREEZER new file mode 100644 index 000000000000..d7e06d250f40 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_FREEZER @@ -0,0 +1 @@ +CONFIG_CGROUP_FREEZER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_NET_CLASSID b/anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_NET_CLASSID new file mode 100644 index 000000000000..73e4e4b3f024 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_NET_CLASSID @@ -0,0 +1 @@ +CONFIG_CGROUP_NET_CLASSID=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_NET_PRIO b/anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_NET_PRIO new file mode 100644 index 000000000000..b4e8e68d5140 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_NET_PRIO @@ -0,0 +1 @@ +CONFIG_CGROUP_NET_PRIO=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_WRITEBACK b/anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_WRITEBACK new file mode 100644 index 000000000000..baf2252de5ff --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CGROUP_WRITEBACK @@ -0,0 +1 @@ +CONFIG_CGROUP_WRITEBACK=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CHR_DEV_SCH b/anolis/configs/L1-RECOMMEND/default/CONFIG_CHR_DEV_SCH new file mode 100644 index 000000000000..06017f05f6ab --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CHR_DEV_SCH @@ -0,0 +1 @@ +CONFIG_CHR_DEV_SCH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CHR_DEV_SG b/anolis/configs/L1-RECOMMEND/default/CONFIG_CHR_DEV_SG new file mode 100644 index 000000000000..7350041d12c6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CHR_DEV_SG @@ -0,0 +1 @@ +CONFIG_CHR_DEV_SG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CHR_DEV_ST b/anolis/configs/L1-RECOMMEND/default/CONFIG_CHR_DEV_ST new file mode 100644 index 000000000000..430ba8718a20 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CHR_DEV_ST @@ -0,0 +1 @@ +CONFIG_CHR_DEV_ST=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CLS_U32_MARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_CLS_U32_MARK new file mode 100644 index 000000000000..5fb0654ba12f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CLS_U32_MARK @@ -0,0 +1 @@ +CONFIG_CLS_U32_MARK=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CLS_U32_PERF b/anolis/configs/L1-RECOMMEND/default/CONFIG_CLS_U32_PERF new file mode 100644 index 000000000000..44646ed873f0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CLS_U32_PERF @@ -0,0 +1 @@ +CONFIG_CLS_U32_PERF=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA new file mode 100644 index 000000000000..309c9e771d6b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA @@ -0,0 +1 @@ +CONFIG_CMA=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_ALIGNMENT b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_ALIGNMENT new file mode 100644 index 000000000000..7941445e73ae --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_ALIGNMENT @@ -0,0 +1 @@ +CONFIG_CMA_ALIGNMENT=8 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_AREAS b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_AREAS new file mode 100644 index 000000000000..9aac2ce735e5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_AREAS @@ -0,0 +1 @@ +CONFIG_CMA_AREAS=19 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_DEBUG new file mode 100644 index 000000000000..64ff80c56681 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_DEBUG @@ -0,0 +1 @@ +# CONFIG_CMA_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_DEBUGFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_DEBUGFS new file mode 100644 index 000000000000..fba89903a06c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_DEBUGFS @@ -0,0 +1 @@ +# CONFIG_CMA_DEBUGFS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_SIZE_SEL_MAX b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_SIZE_SEL_MAX new file mode 100644 index 000000000000..e8ad8cf05cdc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_SIZE_SEL_MAX @@ -0,0 +1 @@ +# CONFIG_CMA_SIZE_SEL_MAX is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_SIZE_SEL_MBYTES b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_SIZE_SEL_MBYTES new file mode 100644 index 000000000000..2a76a105cd03 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_SIZE_SEL_MBYTES @@ -0,0 +1 @@ +CONFIG_CMA_SIZE_SEL_MBYTES=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_SIZE_SEL_MIN b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_SIZE_SEL_MIN new file mode 100644 index 000000000000..2748b1eb698a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_SIZE_SEL_MIN @@ -0,0 +1 @@ +# CONFIG_CMA_SIZE_SEL_MIN is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_SIZE_SEL_PERCENTAGE b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_SIZE_SEL_PERCENTAGE new file mode 100644 index 000000000000..a23118a96938 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CMA_SIZE_SEL_PERCENTAGE @@ -0,0 +1 @@ +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_COMPAT b/anolis/configs/L1-RECOMMEND/default/CONFIG_COMPAT new file mode 100644 index 000000000000..9b072bae787e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_COMPAT @@ -0,0 +1 @@ +CONFIG_COMPAT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_COMPAT_32BIT_TIME b/anolis/configs/L1-RECOMMEND/default/CONFIG_COMPAT_32BIT_TIME new file mode 100644 index 000000000000..da143b00f8bc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_COMPAT_32BIT_TIME @@ -0,0 +1 @@ +CONFIG_COMPAT_32BIT_TIME=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_COMPAT_BRK b/anolis/configs/L1-RECOMMEND/default/CONFIG_COMPAT_BRK new file mode 100644 index 000000000000..e05246612c55 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_COMPAT_BRK @@ -0,0 +1 @@ +# CONFIG_COMPAT_BRK is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_COMPILE_TEST b/anolis/configs/L1-RECOMMEND/default/CONFIG_COMPILE_TEST new file mode 100644 index 000000000000..bcee8efc30e2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_COMPILE_TEST @@ -0,0 +1 @@ +# CONFIG_COMPILE_TEST is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CONNECTOR b/anolis/configs/L1-RECOMMEND/default/CONFIG_CONNECTOR new file mode 100644 index 000000000000..ea191496dbda --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CONNECTOR @@ -0,0 +1 @@ +CONFIG_CONNECTOR=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CONSOLE_LOGLEVEL_DEFAULT b/anolis/configs/L1-RECOMMEND/default/CONFIG_CONSOLE_LOGLEVEL_DEFAULT new file mode 100644 index 000000000000..7f3f03e39daf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CONSOLE_LOGLEVEL_DEFAULT @@ -0,0 +1 @@ +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CONSOLE_LOGLEVEL_QUIET b/anolis/configs/L1-RECOMMEND/default/CONFIG_CONSOLE_LOGLEVEL_QUIET new file mode 100644 index 000000000000..0c5771bca6ff --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CONSOLE_LOGLEVEL_QUIET @@ -0,0 +1 @@ +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS b/anolis/configs/L1-RECOMMEND/default/CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS new file mode 100644 index 000000000000..db11fb1a7fa8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS @@ -0,0 +1 @@ +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE new file mode 100644 index 000000000000..f99021fe2137 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE @@ -0,0 +1 @@ +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE new file mode 100644 index 000000000000..e8723b1ef09e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE @@ -0,0 +1 @@ +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL new file mode 100644 index 000000000000..c08cd0d5dd72 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL @@ -0,0 +1 @@ +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE new file mode 100644 index 000000000000..896fd6dbff5e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE @@ -0,0 +1 @@ +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_CONSERVATIVE b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_CONSERVATIVE new file mode 100644 index 000000000000..ff981a945db7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_CONSERVATIVE @@ -0,0 +1 @@ +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_ONDEMAND b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_ONDEMAND new file mode 100644 index 000000000000..fb152cb92043 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_ONDEMAND @@ -0,0 +1 @@ +CONFIG_CPU_FREQ_GOV_ONDEMAND=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_PERFORMANCE b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_PERFORMANCE new file mode 100644 index 000000000000..2bf548fc409a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_PERFORMANCE @@ -0,0 +1 @@ +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_POWERSAVE b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_POWERSAVE new file mode 100644 index 000000000000..a22c379626bb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_POWERSAVE @@ -0,0 +1 @@ +CONFIG_CPU_FREQ_GOV_POWERSAVE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_USERSPACE b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_USERSPACE new file mode 100644 index 000000000000..8c1bc6848c91 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_USERSPACE @@ -0,0 +1 @@ +CONFIG_CPU_FREQ_GOV_USERSPACE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_STAT b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_STAT new file mode 100644 index 000000000000..ea0bc7f5397f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_STAT @@ -0,0 +1 @@ +CONFIG_CPU_FREQ_STAT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_IDLE_GOV_LADDER b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_IDLE_GOV_LADDER new file mode 100644 index 000000000000..776a3b20b233 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_IDLE_GOV_LADDER @@ -0,0 +1 @@ +# CONFIG_CPU_IDLE_GOV_LADDER is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_IDLE_GOV_MENU b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_IDLE_GOV_MENU new file mode 100644 index 000000000000..38d24a2762fc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_IDLE_GOV_MENU @@ -0,0 +1 @@ +CONFIG_CPU_IDLE_GOV_MENU=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_IDLE_GOV_TEO b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_IDLE_GOV_TEO new file mode 100644 index 000000000000..6bb1788aa034 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_IDLE_GOV_TEO @@ -0,0 +1 @@ +# CONFIG_CPU_IDLE_GOV_TEO is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRAMFS_BLOCKDEV b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRAMFS_BLOCKDEV new file mode 100644 index 000000000000..c5433919b939 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRAMFS_BLOCKDEV @@ -0,0 +1 @@ +CONFIG_CRAMFS_BLOCKDEV=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRAMFS_MTD b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRAMFS_MTD new file mode 100644 index 000000000000..d5c14020ff22 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRAMFS_MTD @@ -0,0 +1 @@ +# CONFIG_CRAMFS_MTD is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CROSS_MEMORY_ATTACH b/anolis/configs/L1-RECOMMEND/default/CONFIG_CROSS_MEMORY_ATTACH new file mode 100644 index 000000000000..e960b10202c4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CROSS_MEMORY_ATTACH @@ -0,0 +1 @@ +CONFIG_CROSS_MEMORY_ATTACH=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_842 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_842 new file mode 100644 index 000000000000..a5e4ffef2dcf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_842 @@ -0,0 +1 @@ +# CONFIG_CRYPTO_842 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ACOMP2 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ACOMP2 new file mode 100644 index 000000000000..87f3fab93216 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ACOMP2 @@ -0,0 +1 @@ +CONFIG_CRYPTO_ACOMP2=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ADIANTUM b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ADIANTUM new file mode 100644 index 000000000000..ef4db40e3287 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ADIANTUM @@ -0,0 +1 @@ +# CONFIG_CRYPTO_ADIANTUM is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_AEGIS128 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_AEGIS128 new file mode 100644 index 000000000000..d5748cf4c3e8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_AEGIS128 @@ -0,0 +1 @@ +# CONFIG_CRYPTO_AEGIS128 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_AES_TI b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_AES_TI new file mode 100644 index 000000000000..de13d3ebe487 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_AES_TI @@ -0,0 +1 @@ +# CONFIG_CRYPTO_AES_TI is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ANSI_CPRNG b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ANSI_CPRNG new file mode 100644 index 000000000000..eca113843b6c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ANSI_CPRNG @@ -0,0 +1 @@ +CONFIG_CRYPTO_ANSI_CPRNG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ANUBIS b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ANUBIS new file mode 100644 index 000000000000..f7884852a984 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ANUBIS @@ -0,0 +1 @@ +CONFIG_CRYPTO_ANUBIS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ARC4 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ARC4 new file mode 100644 index 000000000000..08d710ec5079 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ARC4 @@ -0,0 +1 @@ +CONFIG_CRYPTO_ARC4=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_AUTHENC b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_AUTHENC new file mode 100644 index 000000000000..07d7a88a5564 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_AUTHENC @@ -0,0 +1 @@ +CONFIG_CRYPTO_AUTHENC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_BLAKE2B b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_BLAKE2B new file mode 100644 index 000000000000..13f92b91f5b6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_BLAKE2B @@ -0,0 +1 @@ +CONFIG_CRYPTO_BLAKE2B=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_BLOWFISH b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_BLOWFISH new file mode 100644 index 000000000000..5145b0c977ec --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_BLOWFISH @@ -0,0 +1 @@ +CONFIG_CRYPTO_BLOWFISH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_BLOWFISH_COMMON b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_BLOWFISH_COMMON new file mode 100644 index 000000000000..5a8ded61a18b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_BLOWFISH_COMMON @@ -0,0 +1 @@ +CONFIG_CRYPTO_BLOWFISH_COMMON=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CAMELLIA b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CAMELLIA new file mode 100644 index 000000000000..5781d7da1d33 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CAMELLIA @@ -0,0 +1 @@ +CONFIG_CRYPTO_CAMELLIA=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CAST5 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CAST5 new file mode 100644 index 000000000000..3dd03560f85d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CAST5 @@ -0,0 +1 @@ +CONFIG_CRYPTO_CAST5=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CAST6 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CAST6 new file mode 100644 index 000000000000..6c52c80f3e7c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CAST6 @@ -0,0 +1 @@ +CONFIG_CRYPTO_CAST6=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CAST_COMMON b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CAST_COMMON new file mode 100644 index 000000000000..f89da04cc6fd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CAST_COMMON @@ -0,0 +1 @@ +CONFIG_CRYPTO_CAST_COMMON=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CBC b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CBC new file mode 100644 index 000000000000..c501e8e03ffa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CBC @@ -0,0 +1 @@ +CONFIG_CRYPTO_CBC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CCM b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CCM new file mode 100644 index 000000000000..f552c9a96c5e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CCM @@ -0,0 +1 @@ +CONFIG_CRYPTO_CCM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CFB b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CFB new file mode 100644 index 000000000000..6f257b4545d4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CFB @@ -0,0 +1 @@ +CONFIG_CRYPTO_CFB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CHACHA20 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CHACHA20 new file mode 100644 index 000000000000..1eb6ab8d33e8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CHACHA20 @@ -0,0 +1 @@ +CONFIG_CRYPTO_CHACHA20=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CHACHA20POLY1305 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CHACHA20POLY1305 new file mode 100644 index 000000000000..09126b7b2ac9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CHACHA20POLY1305 @@ -0,0 +1 @@ +CONFIG_CRYPTO_CHACHA20POLY1305=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CMAC b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CMAC new file mode 100644 index 000000000000..587b31509430 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CMAC @@ -0,0 +1 @@ +CONFIG_CRYPTO_CMAC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CRC32 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CRC32 new file mode 100644 index 000000000000..9ab72d652248 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CRC32 @@ -0,0 +1 @@ +CONFIG_CRYPTO_CRC32=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CRC32C b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CRC32C new file mode 100644 index 000000000000..9323d8397fe8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CRC32C @@ -0,0 +1 @@ +CONFIG_CRYPTO_CRC32C=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CRCT10DIF b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CRCT10DIF new file mode 100644 index 000000000000..eb632d3f0de8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CRCT10DIF @@ -0,0 +1 @@ +CONFIG_CRYPTO_CRCT10DIF=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CRYPTD b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CRYPTD new file mode 100644 index 000000000000..36649cff393d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CRYPTD @@ -0,0 +1 @@ +CONFIG_CRYPTO_CRYPTD=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CTR b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CTR new file mode 100644 index 000000000000..63e15a0d8da7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CTR @@ -0,0 +1 @@ +CONFIG_CRYPTO_CTR=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CTS b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CTS new file mode 100644 index 000000000000..e7ff7297caed --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_CTS @@ -0,0 +1 @@ +CONFIG_CRYPTO_CTS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEFLATE b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEFLATE new file mode 100644 index 000000000000..071307ba639c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEFLATE @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEFLATE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DES b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DES new file mode 100644 index 000000000000..e8145a59a884 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DES @@ -0,0 +1 @@ +CONFIG_CRYPTO_DES=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_VIRTIO b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_VIRTIO new file mode 100644 index 000000000000..8acad1aac397 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_VIRTIO @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_VIRTIO is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DH b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DH new file mode 100644 index 000000000000..ea06ab3c25c5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DH @@ -0,0 +1 @@ +CONFIG_CRYPTO_DH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG new file mode 100644 index 000000000000..c31236d2472b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG @@ -0,0 +1 @@ +CONFIG_CRYPTO_DRBG=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG_CTR b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG_CTR new file mode 100644 index 000000000000..1c292fe5ca66 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG_CTR @@ -0,0 +1 @@ +CONFIG_CRYPTO_DRBG_CTR=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG_HASH b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG_HASH new file mode 100644 index 000000000000..2ec6b20ee10f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG_HASH @@ -0,0 +1 @@ +CONFIG_CRYPTO_DRBG_HASH=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG_HMAC b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG_HMAC new file mode 100644 index 000000000000..d61f0434fc03 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG_HMAC @@ -0,0 +1 @@ +CONFIG_CRYPTO_DRBG_HMAC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG_MENU b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG_MENU new file mode 100644 index 000000000000..f11183adbf55 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DRBG_MENU @@ -0,0 +1 @@ +CONFIG_CRYPTO_DRBG_MENU=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECB b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECB new file mode 100644 index 000000000000..bc645abc9053 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECB @@ -0,0 +1 @@ +CONFIG_CRYPTO_ECB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECC b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECC new file mode 100644 index 000000000000..0e9e5705f32c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECC @@ -0,0 +1 @@ +CONFIG_CRYPTO_ECC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECDH b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECDH new file mode 100644 index 000000000000..0244ab63e71a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECDH @@ -0,0 +1 @@ +CONFIG_CRYPTO_ECDH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECDSA b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECDSA new file mode 100644 index 000000000000..3fb9c1b9e1f4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECDSA @@ -0,0 +1 @@ +# CONFIG_CRYPTO_ECDSA is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECHAINIV b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECHAINIV new file mode 100644 index 000000000000..f6f8d76f37cb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECHAINIV @@ -0,0 +1 @@ +CONFIG_CRYPTO_ECHAINIV=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ESSIV b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ESSIV new file mode 100644 index 000000000000..9c914d4467a0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ESSIV @@ -0,0 +1 @@ +CONFIG_CRYPTO_ESSIV=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_FCRYPT b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_FCRYPT new file mode 100644 index 000000000000..e3905ab84c57 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_FCRYPT @@ -0,0 +1 @@ +CONFIG_CRYPTO_FCRYPT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_FIPS b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_FIPS new file mode 100644 index 000000000000..a948c8208307 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_FIPS @@ -0,0 +1 @@ +CONFIG_CRYPTO_FIPS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_HASH_INFO b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_HASH_INFO new file mode 100644 index 000000000000..d511eacd5eeb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_HASH_INFO @@ -0,0 +1 @@ +CONFIG_CRYPTO_HASH_INFO=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_HMAC b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_HMAC new file mode 100644 index 000000000000..2aa4086a0a70 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_HMAC @@ -0,0 +1 @@ +CONFIG_CRYPTO_HMAC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_HW b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_HW new file mode 100644 index 000000000000..245062d86f43 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_HW @@ -0,0 +1 @@ +CONFIG_CRYPTO_HW=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_JITTERENTROPY b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_JITTERENTROPY new file mode 100644 index 000000000000..bee3ca16f7dc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_JITTERENTROPY @@ -0,0 +1 @@ +CONFIG_CRYPTO_JITTERENTROPY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_KEYWRAP b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_KEYWRAP new file mode 100644 index 000000000000..75daba15358e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_KEYWRAP @@ -0,0 +1 @@ +# CONFIG_CRYPTO_KEYWRAP is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_KHAZAD b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_KHAZAD new file mode 100644 index 000000000000..23d94b45f5fd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_KHAZAD @@ -0,0 +1 @@ +CONFIG_CRYPTO_KHAZAD=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_KPP b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_KPP new file mode 100644 index 000000000000..47c6b01604c6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_KPP @@ -0,0 +1 @@ +CONFIG_CRYPTO_KPP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_KPP2 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_KPP2 new file mode 100644 index 000000000000..005f1b813d15 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_KPP2 @@ -0,0 +1 @@ +CONFIG_CRYPTO_KPP2=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_LRW b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_LRW new file mode 100644 index 000000000000..22ce862b83e9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_LRW @@ -0,0 +1 @@ +CONFIG_CRYPTO_LRW=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_LZ4 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_LZ4 new file mode 100644 index 000000000000..2f1d09a8a173 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_LZ4 @@ -0,0 +1 @@ +CONFIG_CRYPTO_LZ4=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_LZ4HC b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_LZ4HC new file mode 100644 index 000000000000..9f1a3d6a2d9b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_LZ4HC @@ -0,0 +1 @@ +CONFIG_CRYPTO_LZ4HC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_LZO b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_LZO new file mode 100644 index 000000000000..418ab2f794d3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_LZO @@ -0,0 +1 @@ +CONFIG_CRYPTO_LZO=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MANAGER_DISABLE_TESTS b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MANAGER_DISABLE_TESTS new file mode 100644 index 000000000000..2b7116fd210a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MANAGER_DISABLE_TESTS @@ -0,0 +1 @@ +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MANAGER_EXTRA_TESTS b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MANAGER_EXTRA_TESTS new file mode 100644 index 000000000000..b27d3dbb7566 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MANAGER_EXTRA_TESTS @@ -0,0 +1 @@ +# CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MD4 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MD4 new file mode 100644 index 000000000000..52b52847546a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MD4 @@ -0,0 +1 @@ +CONFIG_CRYPTO_MD4=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MD5 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MD5 new file mode 100644 index 000000000000..4ae53bd159f4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MD5 @@ -0,0 +1 @@ +CONFIG_CRYPTO_MD5=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MICHAEL_MIC b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MICHAEL_MIC new file mode 100644 index 000000000000..7075e63ecffa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_MICHAEL_MIC @@ -0,0 +1 @@ +CONFIG_CRYPTO_MICHAEL_MIC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_NULL b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_NULL new file mode 100644 index 000000000000..0747b1197264 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_NULL @@ -0,0 +1 @@ +CONFIG_CRYPTO_NULL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_NULL2 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_NULL2 new file mode 100644 index 000000000000..471900dda245 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_NULL2 @@ -0,0 +1 @@ +CONFIG_CRYPTO_NULL2=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_OFB b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_OFB new file mode 100644 index 000000000000..2874ba2d83f3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_OFB @@ -0,0 +1 @@ +CONFIG_CRYPTO_OFB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_PCBC b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_PCBC new file mode 100644 index 000000000000..b59ce6d78181 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_PCBC @@ -0,0 +1 @@ +CONFIG_CRYPTO_PCBC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_PCRYPT b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_PCRYPT new file mode 100644 index 000000000000..59772cc021a8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_PCRYPT @@ -0,0 +1 @@ +CONFIG_CRYPTO_PCRYPT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_POLY1305 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_POLY1305 new file mode 100644 index 000000000000..71c5d4b3865a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_POLY1305 @@ -0,0 +1 @@ +CONFIG_CRYPTO_POLY1305=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_RMD160 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_RMD160 new file mode 100644 index 000000000000..7fd4b816b0cf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_RMD160 @@ -0,0 +1 @@ +CONFIG_CRYPTO_RMD160=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_RNG_DEFAULT b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_RNG_DEFAULT new file mode 100644 index 000000000000..e111e0254a92 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_RNG_DEFAULT @@ -0,0 +1 @@ +CONFIG_CRYPTO_RNG_DEFAULT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SEED b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SEED new file mode 100644 index 000000000000..09ec68ec6ad0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SEED @@ -0,0 +1 @@ +CONFIG_CRYPTO_SEED=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SEQIV b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SEQIV new file mode 100644 index 000000000000..01c950cb6f50 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SEQIV @@ -0,0 +1 @@ +CONFIG_CRYPTO_SEQIV=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SERPENT b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SERPENT new file mode 100644 index 000000000000..f8a7bfbb5977 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SERPENT @@ -0,0 +1 @@ +CONFIG_CRYPTO_SERPENT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SHA1 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SHA1 new file mode 100644 index 000000000000..82e74b118961 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SHA1 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SHA1=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SHA3 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SHA3 new file mode 100644 index 000000000000..c98ae6eb65e1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SHA3 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SHA3=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SHA512 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SHA512 new file mode 100644 index 000000000000..5c25197e538b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SHA512 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SHA512=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_STATS b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_STATS new file mode 100644 index 000000000000..058c5ef85f1d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_STATS @@ -0,0 +1 @@ +# CONFIG_CRYPTO_STATS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_STREEBOG b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_STREEBOG new file mode 100644 index 000000000000..67ae425b2da1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_STREEBOG @@ -0,0 +1 @@ +# CONFIG_CRYPTO_STREEBOG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_TEA b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_TEA new file mode 100644 index 000000000000..53982fa7c62d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_TEA @@ -0,0 +1 @@ +CONFIG_CRYPTO_TEA=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_TEST b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_TEST new file mode 100644 index 000000000000..a2e883781cae --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_TEST @@ -0,0 +1 @@ +CONFIG_CRYPTO_TEST=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_TWOFISH b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_TWOFISH new file mode 100644 index 000000000000..41e8b34b85d0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_TWOFISH @@ -0,0 +1 @@ +CONFIG_CRYPTO_TWOFISH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_TWOFISH_COMMON b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_TWOFISH_COMMON new file mode 100644 index 000000000000..92d3fd0bd3dd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_TWOFISH_COMMON @@ -0,0 +1 @@ +CONFIG_CRYPTO_TWOFISH_COMMON=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER new file mode 100644 index 000000000000..9bd523374490 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER @@ -0,0 +1 @@ +CONFIG_CRYPTO_USER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API new file mode 100644 index 000000000000..e99d1bf5c683 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API @@ -0,0 +1 @@ +CONFIG_CRYPTO_USER_API=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_AEAD b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_AEAD new file mode 100644 index 000000000000..01f9c504d242 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_AEAD @@ -0,0 +1 @@ +CONFIG_CRYPTO_USER_API_AEAD=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE new file mode 100644 index 000000000000..21d316c28741 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE @@ -0,0 +1 @@ +CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_HASH b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_HASH new file mode 100644 index 000000000000..d5d61c6547f4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_HASH @@ -0,0 +1 @@ +CONFIG_CRYPTO_USER_API_HASH=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_RNG b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_RNG new file mode 100644 index 000000000000..3562fafe7bb6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_RNG @@ -0,0 +1 @@ +CONFIG_CRYPTO_USER_API_RNG=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_RNG_CAVP b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_RNG_CAVP new file mode 100644 index 000000000000..7826178972a9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_RNG_CAVP @@ -0,0 +1 @@ +# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_SKCIPHER b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_SKCIPHER new file mode 100644 index 000000000000..4a7da08a5ec1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_USER_API_SKCIPHER @@ -0,0 +1 @@ +CONFIG_CRYPTO_USER_API_SKCIPHER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_VMAC b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_VMAC new file mode 100644 index 000000000000..eb719b78e850 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_VMAC @@ -0,0 +1 @@ +CONFIG_CRYPTO_VMAC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_WP512 b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_WP512 new file mode 100644 index 000000000000..d907a1abecbb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_WP512 @@ -0,0 +1 @@ +CONFIG_CRYPTO_WP512=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_XCBC b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_XCBC new file mode 100644 index 000000000000..8984fb2e7c5f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_XCBC @@ -0,0 +1 @@ +CONFIG_CRYPTO_XCBC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_XTS b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_XTS new file mode 100644 index 000000000000..b7e82261c0f3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_XTS @@ -0,0 +1 @@ +CONFIG_CRYPTO_XTS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_XXHASH b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_XXHASH new file mode 100644 index 000000000000..95c76019f410 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_XXHASH @@ -0,0 +1 @@ +CONFIG_CRYPTO_XXHASH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ZSTD b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ZSTD new file mode 100644 index 000000000000..3c80ecf2ad2f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ZSTD @@ -0,0 +1 @@ +CONFIG_CRYPTO_ZSTD=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CUSE b/anolis/configs/L1-RECOMMEND/default/CONFIG_CUSE new file mode 100644 index 000000000000..9796e51dabc3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CUSE @@ -0,0 +1 @@ +CONFIG_CUSE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_ACPI b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_ACPI new file mode 100644 index 000000000000..8e96f120a919 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_ACPI @@ -0,0 +1 @@ +CONFIG_CXL_ACPI=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_MEM b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_MEM new file mode 100644 index 000000000000..25be7c22471a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_MEM @@ -0,0 +1 @@ +CONFIG_CXL_MEM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_MEM_RAW_COMMANDS b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_MEM_RAW_COMMANDS new file mode 100644 index 000000000000..7349bf47042e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_MEM_RAW_COMMANDS @@ -0,0 +1 @@ +# CONFIG_CXL_MEM_RAW_COMMANDS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_PCI b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_PCI new file mode 100644 index 000000000000..6f46cf249a8e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_PCI @@ -0,0 +1 @@ +CONFIG_CXL_PCI=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_PMEM b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_PMEM new file mode 100644 index 000000000000..8ada02548d15 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_PMEM @@ -0,0 +1 @@ +CONFIG_CXL_PMEM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_PMU b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_PMU new file mode 100644 index 000000000000..0ad05f80c2ab --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_PMU @@ -0,0 +1 @@ +CONFIG_CXL_PMU=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_PORT b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_PORT new file mode 100644 index 000000000000..20d91ef81d80 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_PORT @@ -0,0 +1 @@ +CONFIG_CXL_PORT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_REGION b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_REGION new file mode 100644 index 000000000000..3a4b4b45bb7d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_REGION @@ -0,0 +1 @@ +CONFIG_CXL_REGION=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_SUSPEND b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_SUSPEND new file mode 100644 index 000000000000..b9f6e99529ec --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CXL_SUSPEND @@ -0,0 +1 @@ +CONFIG_CXL_SUSPEND=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DAMON b/anolis/configs/L1-RECOMMEND/default/CONFIG_DAMON new file mode 100644 index 000000000000..05e0ca4a8df4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DAMON @@ -0,0 +1 @@ +CONFIG_DAMON=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DAMON_DBGFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_DAMON_DBGFS new file mode 100644 index 000000000000..ab10113a5f98 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DAMON_DBGFS @@ -0,0 +1 @@ +CONFIG_DAMON_DBGFS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DAMON_PADDR b/anolis/configs/L1-RECOMMEND/default/CONFIG_DAMON_PADDR new file mode 100644 index 000000000000..8941ac88a9da --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DAMON_PADDR @@ -0,0 +1 @@ +CONFIG_DAMON_PADDR=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DAMON_VADDR b/anolis/configs/L1-RECOMMEND/default/CONFIG_DAMON_VADDR new file mode 100644 index 000000000000..ee67a052d7a9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DAMON_VADDR @@ -0,0 +1 @@ +CONFIG_DAMON_VADDR=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DCB b/anolis/configs/L1-RECOMMEND/default/CONFIG_DCB new file mode 100644 index 000000000000..01340a052800 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DCB @@ -0,0 +1 @@ +CONFIG_DCB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_FORCE_WEAK_PER_CPU b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_FORCE_WEAK_PER_CPU new file mode 100644 index 000000000000..2d64b977959e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_FORCE_WEAK_PER_CPU @@ -0,0 +1 @@ +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_FS_ALLOW_ALL b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_FS_ALLOW_ALL new file mode 100644 index 000000000000..69490c3a53d5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_FS_ALLOW_ALL @@ -0,0 +1 @@ +CONFIG_DEBUG_FS_ALLOW_ALL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_FS_ALLOW_NONE b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_FS_ALLOW_NONE new file mode 100644 index 000000000000..2d2a249b14c9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_FS_ALLOW_NONE @@ -0,0 +1 @@ +# CONFIG_DEBUG_FS_ALLOW_NONE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_FS_DISALLOW_MOUNT b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_FS_DISALLOW_MOUNT new file mode 100644 index 000000000000..d70cb272a100 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_FS_DISALLOW_MOUNT @@ -0,0 +1 @@ +# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_INFO_BTF_MODULES b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_INFO_BTF_MODULES new file mode 100644 index 000000000000..bc7bbace27c7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_INFO_BTF_MODULES @@ -0,0 +1 @@ +CONFIG_DEBUG_INFO_BTF_MODULES=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT new file mode 100644 index 000000000000..f0c49fae6b0c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT @@ -0,0 +1 @@ +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_INFO_REDUCED b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_INFO_REDUCED new file mode 100644 index 000000000000..e78eada40b6e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_INFO_REDUCED @@ -0,0 +1 @@ +# CONFIG_DEBUG_INFO_REDUCED is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_INFO_SPLIT b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_INFO_SPLIT new file mode 100644 index 000000000000..dbce5882ebaf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_INFO_SPLIT @@ -0,0 +1 @@ +# CONFIG_DEBUG_INFO_SPLIT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_KERNEL_DC b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_KERNEL_DC new file mode 100644 index 000000000000..8cd6a5085e79 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_KERNEL_DC @@ -0,0 +1 @@ +# CONFIG_DEBUG_KERNEL_DC is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_KMEMLEAK b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_KMEMLEAK new file mode 100644 index 000000000000..40fa8633d035 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_KMEMLEAK @@ -0,0 +1 @@ +# CONFIG_DEBUG_KMEMLEAK is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_LIST b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_LIST new file mode 100644 index 000000000000..b5386ce11f16 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_LIST @@ -0,0 +1 @@ +CONFIG_DEBUG_LIST=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_MEMORY_INIT b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_MEMORY_INIT new file mode 100644 index 000000000000..22b5d9419233 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_MEMORY_INIT @@ -0,0 +1 @@ +CONFIG_DEBUG_MEMORY_INIT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_NOTIFIERS b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_NOTIFIERS new file mode 100644 index 000000000000..4f6c1fe4c5b0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_NOTIFIERS @@ -0,0 +1 @@ +# CONFIG_DEBUG_NOTIFIERS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_PAGEALLOC b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_PAGEALLOC new file mode 100644 index 000000000000..0e1c7855b0dd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_PAGEALLOC @@ -0,0 +1 @@ +# CONFIG_DEBUG_PAGEALLOC is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_PAGE_REF b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_PAGE_REF new file mode 100644 index 000000000000..c35abbd89164 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_PAGE_REF @@ -0,0 +1 @@ +# CONFIG_DEBUG_PAGE_REF is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_PLIST b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_PLIST new file mode 100644 index 000000000000..602b2be89278 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_PLIST @@ -0,0 +1 @@ +# CONFIG_DEBUG_PLIST is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_RODATA_TEST b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_RODATA_TEST new file mode 100644 index 000000000000..c56477430b90 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_RODATA_TEST @@ -0,0 +1 @@ +# CONFIG_DEBUG_RODATA_TEST is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_SG b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_SG new file mode 100644 index 000000000000..8c3ab98dd6fb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_SG @@ -0,0 +1 @@ +# CONFIG_DEBUG_SG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_WX b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_WX new file mode 100644 index 000000000000..932dfd61d8f2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEBUG_WX @@ -0,0 +1 @@ +# CONFIG_DEBUG_WX is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_CODEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_CODEL new file mode 100644 index 000000000000..4f8ee1d8124c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_CODEL @@ -0,0 +1 @@ +# CONFIG_DEFAULT_CODEL is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_FQ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_FQ new file mode 100644 index 000000000000..d2b368f2b7e0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_FQ @@ -0,0 +1 @@ +# CONFIG_DEFAULT_FQ is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_FQ_CODEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_FQ_CODEL new file mode 100644 index 000000000000..e648072672ba --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_FQ_CODEL @@ -0,0 +1 @@ +CONFIG_DEFAULT_FQ_CODEL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_HUNG_TASK_TIMEOUT b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_HUNG_TASK_TIMEOUT new file mode 100644 index 000000000000..715091634ea0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_HUNG_TASK_TIMEOUT @@ -0,0 +1 @@ +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_MMAP_MIN_ADDR b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_MMAP_MIN_ADDR new file mode 100644 index 000000000000..68e78b7aa230 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_MMAP_MIN_ADDR @@ -0,0 +1 @@ +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_NET_SCH b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_NET_SCH new file mode 100644 index 000000000000..26ffbc18bf76 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_NET_SCH @@ -0,0 +1 @@ +CONFIG_DEFAULT_NET_SCH="fq_codel" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_PFIFO_FAST b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_PFIFO_FAST new file mode 100644 index 000000000000..4e4f4f3d61d9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_PFIFO_FAST @@ -0,0 +1 @@ +# CONFIG_DEFAULT_PFIFO_FAST is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_RENO b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_RENO new file mode 100644 index 000000000000..b284b6c3ea84 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_RENO @@ -0,0 +1 @@ +# CONFIG_DEFAULT_RENO is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_SECURITY_SELINUX b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_SECURITY_SELINUX new file mode 100644 index 000000000000..ef2f9974a530 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_SECURITY_SELINUX @@ -0,0 +1 @@ +CONFIG_DEFAULT_SECURITY_SELINUX=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_SFQ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_SFQ new file mode 100644 index 000000000000..be80a9cf8ab7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_SFQ @@ -0,0 +1 @@ +# CONFIG_DEFAULT_SFQ is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_TCP_CONG b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_TCP_CONG new file mode 100644 index 000000000000..6e69eeae9d31 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEFAULT_TCP_CONG @@ -0,0 +1 @@ +CONFIG_DEFAULT_TCP_CONG="cubic" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEVMEM b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEVMEM new file mode 100644 index 000000000000..174a3f203012 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEVMEM @@ -0,0 +1 @@ +CONFIG_DEVMEM=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEV_DAX_CXL b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEV_DAX_CXL new file mode 100644 index 000000000000..377ea24aa0e6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEV_DAX_CXL @@ -0,0 +1 @@ +CONFIG_DEV_DAX_CXL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DEV_DAX_HMEM_DEVICES b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEV_DAX_HMEM_DEVICES new file mode 100644 index 000000000000..817d302f0e7d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEV_DAX_HMEM_DEVICES @@ -0,0 +1 @@ +CONFIG_DEV_DAX_HMEM_DEVICES=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DLM b/anolis/configs/L1-RECOMMEND/default/CONFIG_DLM new file mode 100644 index 000000000000..5a1c1ed7c1a3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DLM @@ -0,0 +1 @@ +CONFIG_DLM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DMADEVICES_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_DMADEVICES_DEBUG new file mode 100644 index 000000000000..7cd4fec866f7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DMADEVICES_DEBUG @@ -0,0 +1 @@ +# CONFIG_DMADEVICES_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DMATEST b/anolis/configs/L1-RECOMMEND/default/CONFIG_DMATEST new file mode 100644 index 000000000000..d11f7746f8db --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DMATEST @@ -0,0 +1 @@ +CONFIG_DMATEST=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DMA_API_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_DMA_API_DEBUG new file mode 100644 index 000000000000..cf9e6050ec29 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DMA_API_DEBUG @@ -0,0 +1 @@ +# CONFIG_DMA_API_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DMA_CMA b/anolis/configs/L1-RECOMMEND/default/CONFIG_DMA_CMA new file mode 100644 index 000000000000..c7c1c2c43787 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DMA_CMA @@ -0,0 +1 @@ +CONFIG_DMA_CMA=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DMA_ENGINE b/anolis/configs/L1-RECOMMEND/default/CONFIG_DMA_ENGINE new file mode 100644 index 000000000000..167354c01574 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DMA_ENGINE @@ -0,0 +1 @@ +CONFIG_DMA_ENGINE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DMIID b/anolis/configs/L1-RECOMMEND/default/CONFIG_DMIID new file mode 100644 index 000000000000..d0ea3622a47b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DMIID @@ -0,0 +1 @@ +CONFIG_DMIID=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DMI_SYSFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_DMI_SYSFS new file mode 100644 index 000000000000..76565caf88f7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DMI_SYSFS @@ -0,0 +1 @@ +CONFIG_DMI_SYSFS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_CACHE b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_CACHE new file mode 100644 index 000000000000..24a42667d204 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_CACHE @@ -0,0 +1 @@ +CONFIG_DM_CACHE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_CACHE_SMQ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_CACHE_SMQ new file mode 100644 index 000000000000..41f26854fea0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_CACHE_SMQ @@ -0,0 +1 @@ +CONFIG_DM_CACHE_SMQ=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_CRYPT b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_CRYPT new file mode 100644 index 000000000000..2ca34ebc75c2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_CRYPT @@ -0,0 +1 @@ +CONFIG_DM_CRYPT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_DEBUG new file mode 100644 index 000000000000..1f45a5797411 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_DEBUG @@ -0,0 +1 @@ +CONFIG_DM_DEBUG=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_DELAY b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_DELAY new file mode 100644 index 000000000000..95fbe6249d29 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_DELAY @@ -0,0 +1 @@ +CONFIG_DM_DELAY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_ERA b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_ERA new file mode 100644 index 000000000000..62543add45df --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_ERA @@ -0,0 +1 @@ +CONFIG_DM_ERA=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_FLAKEY b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_FLAKEY new file mode 100644 index 000000000000..a68a41332e01 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_FLAKEY @@ -0,0 +1 @@ +CONFIG_DM_FLAKEY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_INTEGRITY b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_INTEGRITY new file mode 100644 index 000000000000..ee953fd2dfe3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_INTEGRITY @@ -0,0 +1 @@ +CONFIG_DM_INTEGRITY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_LOG_USERSPACE b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_LOG_USERSPACE new file mode 100644 index 000000000000..085b4e385e4f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_LOG_USERSPACE @@ -0,0 +1 @@ +CONFIG_DM_LOG_USERSPACE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_LOG_WRITES b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_LOG_WRITES new file mode 100644 index 000000000000..f9030b9f4c12 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_LOG_WRITES @@ -0,0 +1 @@ +CONFIG_DM_LOG_WRITES=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_MIRROR b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_MIRROR new file mode 100644 index 000000000000..65d6ea60287e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_MIRROR @@ -0,0 +1 @@ +CONFIG_DM_MIRROR=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_MULTIPATH b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_MULTIPATH new file mode 100644 index 000000000000..3613fcce0d16 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_MULTIPATH @@ -0,0 +1 @@ +CONFIG_DM_MULTIPATH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_MULTIPATH_QL b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_MULTIPATH_QL new file mode 100644 index 000000000000..d220fcab620a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_MULTIPATH_QL @@ -0,0 +1 @@ +CONFIG_DM_MULTIPATH_QL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_MULTIPATH_ST b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_MULTIPATH_ST new file mode 100644 index 000000000000..6bd64251b6eb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_MULTIPATH_ST @@ -0,0 +1 @@ +CONFIG_DM_MULTIPATH_ST=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_RAID b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_RAID new file mode 100644 index 000000000000..c2387b555c52 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_RAID @@ -0,0 +1 @@ +CONFIG_DM_RAID=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_SNAPSHOT b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_SNAPSHOT new file mode 100644 index 000000000000..cd20f0919f53 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_SNAPSHOT @@ -0,0 +1 @@ +CONFIG_DM_SNAPSHOT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_SWITCH b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_SWITCH new file mode 100644 index 000000000000..4dc3ff401d8b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_SWITCH @@ -0,0 +1 @@ +CONFIG_DM_SWITCH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_THIN_PROVISIONING b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_THIN_PROVISIONING new file mode 100644 index 000000000000..b5e94cbd68aa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_THIN_PROVISIONING @@ -0,0 +1 @@ +CONFIG_DM_THIN_PROVISIONING=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_UEVENT b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_UEVENT new file mode 100644 index 000000000000..6d88d4893efe --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_UEVENT @@ -0,0 +1 @@ +CONFIG_DM_UEVENT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_VERITY b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_VERITY new file mode 100644 index 000000000000..3906845c1614 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_VERITY @@ -0,0 +1 @@ +CONFIG_DM_VERITY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_WRITECACHE b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_WRITECACHE new file mode 100644 index 000000000000..6a1a639c882a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_WRITECACHE @@ -0,0 +1 @@ +CONFIG_DM_WRITECACHE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_ZERO b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_ZERO new file mode 100644 index 000000000000..1751792de332 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_ZERO @@ -0,0 +1 @@ +CONFIG_DM_ZERO=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_ZONED b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_ZONED new file mode 100644 index 000000000000..8924814fb9a7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DM_ZONED @@ -0,0 +1 @@ +# CONFIG_DM_ZONED is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_AMDGPU b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_AMDGPU new file mode 100644 index 000000000000..a7c6b65b63c0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_AMDGPU @@ -0,0 +1 @@ +CONFIG_DRM_AMDGPU=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_AMDGPU_SI b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_AMDGPU_SI new file mode 100644 index 000000000000..78dd0a3c6d27 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_AMDGPU_SI @@ -0,0 +1 @@ +# CONFIG_DRM_AMDGPU_SI is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_AMD_DC b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_AMD_DC new file mode 100644 index 000000000000..a642bb05ada2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_AMD_DC @@ -0,0 +1 @@ +CONFIG_DRM_AMD_DC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_AST b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_AST new file mode 100644 index 000000000000..d427867f1336 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_AST @@ -0,0 +1 @@ +CONFIG_DRM_AST=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_BOCHS b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_BOCHS new file mode 100644 index 000000000000..8dff9db0645f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_BOCHS @@ -0,0 +1 @@ +CONFIG_DRM_BOCHS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_CIRRUS_QEMU b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_CIRRUS_QEMU new file mode 100644 index 000000000000..75df6271a716 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_CIRRUS_QEMU @@ -0,0 +1 @@ +CONFIG_DRM_CIRRUS_QEMU=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_FBDEV_EMULATION b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_FBDEV_EMULATION new file mode 100644 index 000000000000..16ef2c82ec88 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_FBDEV_EMULATION @@ -0,0 +1 @@ +CONFIG_DRM_FBDEV_EMULATION=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_FBDEV_OVERALLOC b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_FBDEV_OVERALLOC new file mode 100644 index 000000000000..32e5c45207e6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_FBDEV_OVERALLOC @@ -0,0 +1 @@ +CONFIG_DRM_FBDEV_OVERALLOC=100 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_I2C_CH7006 b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_I2C_CH7006 new file mode 100644 index 000000000000..0e04442bf1b5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_I2C_CH7006 @@ -0,0 +1 @@ +CONFIG_DRM_I2C_CH7006=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_INSPUR b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_INSPUR new file mode 100644 index 000000000000..b6105f9482be --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_INSPUR @@ -0,0 +1 @@ +CONFIG_DRM_INSPUR=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_LOAD_EDID_FIRMWARE b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_LOAD_EDID_FIRMWARE new file mode 100644 index 000000000000..cbc53f636d0d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_LOAD_EDID_FIRMWARE @@ -0,0 +1 @@ +CONFIG_DRM_LOAD_EDID_FIRMWARE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_MGAG200 b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_MGAG200 new file mode 100644 index 000000000000..48b6c6106fe0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_MGAG200 @@ -0,0 +1 @@ +CONFIG_DRM_MGAG200=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_NOUVEAU b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_NOUVEAU new file mode 100644 index 000000000000..9375fdfc7a1f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_NOUVEAU @@ -0,0 +1 @@ +CONFIG_DRM_NOUVEAU=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_NOUVEAU_BACKLIGHT b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_NOUVEAU_BACKLIGHT new file mode 100644 index 000000000000..bb06e545858d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_NOUVEAU_BACKLIGHT @@ -0,0 +1 @@ +CONFIG_DRM_NOUVEAU_BACKLIGHT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_QXL b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_QXL new file mode 100644 index 000000000000..cff18896bc52 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_QXL @@ -0,0 +1 @@ +CONFIG_DRM_QXL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_RADEON b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_RADEON new file mode 100644 index 000000000000..12dfb1eb31df --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_RADEON @@ -0,0 +1 @@ +CONFIG_DRM_RADEON=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_RADEON_USERPTR b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_RADEON_USERPTR new file mode 100644 index 000000000000..feecc185370b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_RADEON_USERPTR @@ -0,0 +1 @@ +CONFIG_DRM_RADEON_USERPTR=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_UDL b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_UDL new file mode 100644 index 000000000000..6b64ab832bf9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_UDL @@ -0,0 +1 @@ +CONFIG_DRM_UDL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_VIRTIO_GPU b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_VIRTIO_GPU new file mode 100644 index 000000000000..3f7fd91d7799 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DRM_VIRTIO_GPU @@ -0,0 +1 @@ +CONFIG_DRM_VIRTIO_GPU=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DUMMY b/anolis/configs/L1-RECOMMEND/default/CONFIG_DUMMY new file mode 100644 index 000000000000..5c3261758a81 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DUMMY @@ -0,0 +1 @@ +CONFIG_DUMMY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_DW_DMAC b/anolis/configs/L1-RECOMMEND/default/CONFIG_DW_DMAC new file mode 100644 index 000000000000..5c8179948d36 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_DW_DMAC @@ -0,0 +1 @@ +CONFIG_DW_DMAC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_E100 b/anolis/configs/L1-RECOMMEND/default/CONFIG_E100 new file mode 100644 index 000000000000..ab8dce967f68 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_E100 @@ -0,0 +1 @@ +# CONFIG_E100 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_E1000 b/anolis/configs/L1-RECOMMEND/default/CONFIG_E1000 new file mode 100644 index 000000000000..7aebd6baf17d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_E1000 @@ -0,0 +1 @@ +CONFIG_E1000=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_E1000E b/anolis/configs/L1-RECOMMEND/default/CONFIG_E1000E new file mode 100644 index 000000000000..5c5726365578 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_E1000E @@ -0,0 +1 @@ +CONFIG_E1000E=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EDAC_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_EDAC_DEBUG new file mode 100644 index 000000000000..78248bea8a2d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EDAC_DEBUG @@ -0,0 +1 @@ +# CONFIG_EDAC_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EDAC_GHES b/anolis/configs/L1-RECOMMEND/default/CONFIG_EDAC_GHES new file mode 100644 index 000000000000..e68c7c4c2776 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EDAC_GHES @@ -0,0 +1 @@ +CONFIG_EDAC_GHES=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EDAC_LEGACY_SYSFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_EDAC_LEGACY_SYSFS new file mode 100644 index 000000000000..5d389a274003 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EDAC_LEGACY_SYSFS @@ -0,0 +1 @@ +CONFIG_EDAC_LEGACY_SYSFS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EFIVAR_FS b/anolis/configs/L1-RECOMMEND/default/CONFIG_EFIVAR_FS new file mode 100644 index 000000000000..4e151f1005b2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EFIVAR_FS @@ -0,0 +1 @@ +CONFIG_EFIVAR_FS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_CUSTOM_SSDT_OVERLAYS b/anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_CUSTOM_SSDT_OVERLAYS new file mode 100644 index 000000000000..e2c7e30e6b05 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_CUSTOM_SSDT_OVERLAYS @@ -0,0 +1 @@ +CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_SOFT_RESERVE b/anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_SOFT_RESERVE new file mode 100644 index 000000000000..f6b5ec7c7269 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_SOFT_RESERVE @@ -0,0 +1 @@ +CONFIG_EFI_SOFT_RESERVE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_STUB b/anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_STUB new file mode 100644 index 000000000000..c8859686c10a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_STUB @@ -0,0 +1 @@ +CONFIG_EFI_STUB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_VARS_PSTORE b/anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_VARS_PSTORE new file mode 100644 index 000000000000..231576abfac4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_VARS_PSTORE @@ -0,0 +1 @@ +CONFIG_EFI_VARS_PSTORE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE new file mode 100644 index 000000000000..b26ce1fb9a85 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE @@ -0,0 +1 @@ +CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ENCRYPTED_KEYS b/anolis/configs/L1-RECOMMEND/default/CONFIG_ENCRYPTED_KEYS new file mode 100644 index 000000000000..09d264daff2b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ENCRYPTED_KEYS @@ -0,0 +1 @@ +CONFIG_ENCRYPTED_KEYS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_DEBUG new file mode 100644 index 000000000000..5363c0f0a759 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_DEBUG @@ -0,0 +1 @@ +# CONFIG_EROFS_FS_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_ONDEMAND b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_ONDEMAND new file mode 100644 index 000000000000..c738efed2f78 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_ONDEMAND @@ -0,0 +1 @@ +CONFIG_EROFS_FS_ONDEMAND=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_POSIX_ACL b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_POSIX_ACL new file mode 100644 index 000000000000..fe4f9a82613f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_POSIX_ACL @@ -0,0 +1 @@ +CONFIG_EROFS_FS_POSIX_ACL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_SECURITY b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_SECURITY new file mode 100644 index 000000000000..f24b07262b0e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_SECURITY @@ -0,0 +1 @@ +CONFIG_EROFS_FS_SECURITY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_XATTR b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_XATTR new file mode 100644 index 000000000000..751034acd74d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_XATTR @@ -0,0 +1 @@ +CONFIG_EROFS_FS_XATTR=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_ZIP b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_ZIP new file mode 100644 index 000000000000..fe558502b301 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_ZIP @@ -0,0 +1 @@ +CONFIG_EROFS_FS_ZIP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_ZIP_DEFLATE b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_ZIP_DEFLATE new file mode 100644 index 000000000000..a5b66f23823f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_ZIP_DEFLATE @@ -0,0 +1 @@ +CONFIG_EROFS_FS_ZIP_DEFLATE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_ZIP_LZMA b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_ZIP_LZMA new file mode 100644 index 000000000000..843dc4e89d4e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_ZIP_LZMA @@ -0,0 +1 @@ +CONFIG_EROFS_FS_ZIP_LZMA=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EVM_ADD_XATTRS b/anolis/configs/L1-RECOMMEND/default/CONFIG_EVM_ADD_XATTRS new file mode 100644 index 000000000000..687632a21f2b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EVM_ADD_XATTRS @@ -0,0 +1 @@ +# CONFIG_EVM_ADD_XATTRS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EVM_ATTR_FSUUID b/anolis/configs/L1-RECOMMEND/default/CONFIG_EVM_ATTR_FSUUID new file mode 100644 index 000000000000..559a1dad3497 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EVM_ATTR_FSUUID @@ -0,0 +1 @@ +CONFIG_EVM_ATTR_FSUUID=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EVM_LOAD_X509 b/anolis/configs/L1-RECOMMEND/default/CONFIG_EVM_LOAD_X509 new file mode 100644 index 000000000000..0dd95a176560 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EVM_LOAD_X509 @@ -0,0 +1 @@ +CONFIG_EVM_LOAD_X509=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EVM_X509_PATH b/anolis/configs/L1-RECOMMEND/default/CONFIG_EVM_X509_PATH new file mode 100644 index 000000000000..11b63bed0287 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EVM_X509_PATH @@ -0,0 +1 @@ +CONFIG_EVM_X509_PATH="/etc/keys/x509_evm.der" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EXFAT_FS b/anolis/configs/L1-RECOMMEND/default/CONFIG_EXFAT_FS new file mode 100644 index 000000000000..2113d81064a9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EXFAT_FS @@ -0,0 +1 @@ +# CONFIG_EXFAT_FS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EXPORTFS_BLOCK_OPS b/anolis/configs/L1-RECOMMEND/default/CONFIG_EXPORTFS_BLOCK_OPS new file mode 100644 index 000000000000..a796344a723a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EXPORTFS_BLOCK_OPS @@ -0,0 +1 @@ +CONFIG_EXPORTFS_BLOCK_OPS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EXT2_FS b/anolis/configs/L1-RECOMMEND/default/CONFIG_EXT2_FS new file mode 100644 index 000000000000..95332e711c4c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EXT2_FS @@ -0,0 +1 @@ +# CONFIG_EXT2_FS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EXT4_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_EXT4_DEBUG new file mode 100644 index 000000000000..cade17e50e65 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EXT4_DEBUG @@ -0,0 +1 @@ +# CONFIG_EXT4_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EXT4_USE_FOR_EXT2 b/anolis/configs/L1-RECOMMEND/default/CONFIG_EXT4_USE_FOR_EXT2 new file mode 100644 index 000000000000..05d6610ffde8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EXT4_USE_FOR_EXT2 @@ -0,0 +1 @@ +CONFIG_EXT4_USE_FOR_EXT2=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EXTRA_FIRMWARE b/anolis/configs/L1-RECOMMEND/default/CONFIG_EXTRA_FIRMWARE new file mode 100644 index 000000000000..46a0a270c15d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EXTRA_FIRMWARE @@ -0,0 +1 @@ +CONFIG_EXTRA_FIRMWARE="" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FAILOVER b/anolis/configs/L1-RECOMMEND/default/CONFIG_FAILOVER new file mode 100644 index 000000000000..2c85d6ab76e3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FAILOVER @@ -0,0 +1 @@ +CONFIG_FAILOVER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FANOTIFY_ACCESS_PERMISSIONS b/anolis/configs/L1-RECOMMEND/default/CONFIG_FANOTIFY_ACCESS_PERMISSIONS new file mode 100644 index 000000000000..197a02ec791f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FANOTIFY_ACCESS_PERMISSIONS @@ -0,0 +1 @@ +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FAT_DEFAULT_CODEPAGE b/anolis/configs/L1-RECOMMEND/default/CONFIG_FAT_DEFAULT_CODEPAGE new file mode 100644 index 000000000000..280e26edb308 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FAT_DEFAULT_CODEPAGE @@ -0,0 +1 @@ +CONFIG_FAT_DEFAULT_CODEPAGE=437 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FAT_DEFAULT_IOCHARSET b/anolis/configs/L1-RECOMMEND/default/CONFIG_FAT_DEFAULT_IOCHARSET new file mode 100644 index 000000000000..f4e49e2a3dd4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FAT_DEFAULT_IOCHARSET @@ -0,0 +1 @@ +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FAT_DEFAULT_UTF8 b/anolis/configs/L1-RECOMMEND/default/CONFIG_FAT_DEFAULT_UTF8 new file mode 100644 index 000000000000..ee85a55f00dd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FAT_DEFAULT_UTF8 @@ -0,0 +1 @@ +# CONFIG_FAT_DEFAULT_UTF8 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FAULT_INJECTION b/anolis/configs/L1-RECOMMEND/default/CONFIG_FAULT_INJECTION new file mode 100644 index 000000000000..288866e72b30 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FAULT_INJECTION @@ -0,0 +1 @@ +# CONFIG_FAULT_INJECTION is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FB_EFI b/anolis/configs/L1-RECOMMEND/default/CONFIG_FB_EFI new file mode 100644 index 000000000000..62adf192c62a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FB_EFI @@ -0,0 +1 @@ +CONFIG_FB_EFI=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FB_LS2K500 b/anolis/configs/L1-RECOMMEND/default/CONFIG_FB_LS2K500 new file mode 100644 index 000000000000..d34e5b98ecaf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FB_LS2K500 @@ -0,0 +1 @@ +# CONFIG_FB_LS2K500 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FB_TILEBLITTING b/anolis/configs/L1-RECOMMEND/default/CONFIG_FB_TILEBLITTING new file mode 100644 index 000000000000..f3b6635cff97 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FB_TILEBLITTING @@ -0,0 +1 @@ +CONFIG_FB_TILEBLITTING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FCOE b/anolis/configs/L1-RECOMMEND/default/CONFIG_FCOE new file mode 100644 index 000000000000..0516a090373a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FCOE @@ -0,0 +1 @@ +CONFIG_FCOE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FIX_EARLYCON_MEM b/anolis/configs/L1-RECOMMEND/default/CONFIG_FIX_EARLYCON_MEM new file mode 100644 index 000000000000..10b715239bab --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FIX_EARLYCON_MEM @@ -0,0 +1 @@ +CONFIG_FIX_EARLYCON_MEM=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FM10K b/anolis/configs/L1-RECOMMEND/default/CONFIG_FM10K new file mode 100644 index 000000000000..c9b11d9bd884 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FM10K @@ -0,0 +1 @@ +CONFIG_FM10K=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FORTIFY_SOURCE b/anolis/configs/L1-RECOMMEND/default/CONFIG_FORTIFY_SOURCE new file mode 100644 index 000000000000..926b56799e78 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FORTIFY_SOURCE @@ -0,0 +1 @@ +CONFIG_FORTIFY_SOURCE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY b/anolis/configs/L1-RECOMMEND/default/CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY new file mode 100644 index 000000000000..3153802cdb9e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY @@ -0,0 +1 @@ +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FRAMEBUFFER_CONSOLE_ROTATION b/anolis/configs/L1-RECOMMEND/default/CONFIG_FRAMEBUFFER_CONSOLE_ROTATION new file mode 100644 index 000000000000..3887f86bbd23 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FRAMEBUFFER_CONSOLE_ROTATION @@ -0,0 +1 @@ +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FRAME_WARN b/anolis/configs/L1-RECOMMEND/default/CONFIG_FRAME_WARN new file mode 100644 index 000000000000..6826578df976 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FRAME_WARN @@ -0,0 +1 @@ +CONFIG_FRAME_WARN=2048 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FSCACHE_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_FSCACHE_DEBUG new file mode 100644 index 000000000000..4da10ce35549 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FSCACHE_DEBUG @@ -0,0 +1 @@ +# CONFIG_FSCACHE_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FSCACHE_STATS b/anolis/configs/L1-RECOMMEND/default/CONFIG_FSCACHE_STATS new file mode 100644 index 000000000000..a65dcfa31c0a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FSCACHE_STATS @@ -0,0 +1 @@ +CONFIG_FSCACHE_STATS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FTRACE_RECORD_RECURSION b/anolis/configs/L1-RECOMMEND/default/CONFIG_FTRACE_RECORD_RECURSION new file mode 100644 index 000000000000..613dd9d01aef --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FTRACE_RECORD_RECURSION @@ -0,0 +1 @@ +# CONFIG_FTRACE_RECORD_RECURSION is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSE_DAX b/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSE_DAX new file mode 100644 index 000000000000..cc453d1c5bc7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSE_DAX @@ -0,0 +1 @@ +CONFIG_FUSE_DAX=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION b/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION new file mode 100644 index 000000000000..6c920d6b1e7f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION @@ -0,0 +1 @@ +CONFIG_FUSION=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_CTL b/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_CTL new file mode 100644 index 000000000000..f72467bd7804 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_CTL @@ -0,0 +1 @@ +# CONFIG_FUSION_CTL is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_FC b/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_FC new file mode 100644 index 000000000000..ce3b17f6cdcc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_FC @@ -0,0 +1 @@ +# CONFIG_FUSION_FC is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_LOGGING b/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_LOGGING new file mode 100644 index 000000000000..0c2d45eff713 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_LOGGING @@ -0,0 +1 @@ +CONFIG_FUSION_LOGGING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_MAX_SGE b/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_MAX_SGE new file mode 100644 index 000000000000..7e3440002f1c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_MAX_SGE @@ -0,0 +1 @@ +CONFIG_FUSION_MAX_SGE=128 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_SAS b/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_SAS new file mode 100644 index 000000000000..fa9006f153d7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_SAS @@ -0,0 +1 @@ +CONFIG_FUSION_SAS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_SPI b/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_SPI new file mode 100644 index 000000000000..938c1127599c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_SPI @@ -0,0 +1 @@ +CONFIG_FUSION_SPI=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FW_CACHE b/anolis/configs/L1-RECOMMEND/default/CONFIG_FW_CACHE new file mode 100644 index 000000000000..374610244e84 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FW_CACHE @@ -0,0 +1 @@ +CONFIG_FW_CACHE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FW_CFG_SYSFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_FW_CFG_SYSFS new file mode 100644 index 000000000000..5c41f98a0b27 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FW_CFG_SYSFS @@ -0,0 +1 @@ +CONFIG_FW_CFG_SYSFS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FW_CFG_SYSFS_CMDLINE b/anolis/configs/L1-RECOMMEND/default/CONFIG_FW_CFG_SYSFS_CMDLINE new file mode 100644 index 000000000000..ffca1920fe67 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_FW_CFG_SYSFS_CMDLINE @@ -0,0 +1 @@ +# CONFIG_FW_CFG_SYSFS_CMDLINE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GACT_PROB b/anolis/configs/L1-RECOMMEND/default/CONFIG_GACT_PROB new file mode 100644 index 000000000000..aaec40095e7f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GACT_PROB @@ -0,0 +1 @@ +CONFIG_GACT_PROB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GCOV_KERNEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_GCOV_KERNEL new file mode 100644 index 000000000000..05f92777b7aa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GCOV_KERNEL @@ -0,0 +1 @@ +# CONFIG_GCOV_KERNEL is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GDB_SCRIPTS b/anolis/configs/L1-RECOMMEND/default/CONFIG_GDB_SCRIPTS new file mode 100644 index 000000000000..72774e707930 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GDB_SCRIPTS @@ -0,0 +1 @@ +# CONFIG_GDB_SCRIPTS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_IRQ_DEBUGFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_IRQ_DEBUGFS new file mode 100644 index 000000000000..539bb6640b82 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_IRQ_DEBUGFS @@ -0,0 +1 @@ +# CONFIG_GENERIC_IRQ_DEBUGFS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_IRQ_INJECTION b/anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_IRQ_INJECTION new file mode 100644 index 000000000000..11c9853455ea --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_IRQ_INJECTION @@ -0,0 +1 @@ +CONFIG_GENERIC_IRQ_INJECTION=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_IRQ_PROBE b/anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_IRQ_PROBE new file mode 100644 index 000000000000..99ffc502cc26 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_IRQ_PROBE @@ -0,0 +1 @@ +CONFIG_GENERIC_IRQ_PROBE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_IRQ_SHOW b/anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_IRQ_SHOW new file mode 100644 index 000000000000..6de8d0b4de3f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_IRQ_SHOW @@ -0,0 +1 @@ +CONFIG_GENERIC_IRQ_SHOW=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GENEVE b/anolis/configs/L1-RECOMMEND/default/CONFIG_GENEVE new file mode 100644 index 000000000000..99344bcd9e12 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GENEVE @@ -0,0 +1 @@ +CONFIG_GENEVE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GET_FREE_REGION b/anolis/configs/L1-RECOMMEND/default/CONFIG_GET_FREE_REGION new file mode 100644 index 000000000000..b64cd9e90b37 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GET_FREE_REGION @@ -0,0 +1 @@ +CONFIG_GET_FREE_REGION=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_DS4520 b/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_DS4520 new file mode 100644 index 000000000000..efc77941e54c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_DS4520 @@ -0,0 +1 @@ +# CONFIG_GPIO_DS4520 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_FXL6408 b/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_FXL6408 new file mode 100644 index 000000000000..6a63066040c5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_FXL6408 @@ -0,0 +1 @@ +# CONFIG_GPIO_FXL6408 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_GENERIC b/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_GENERIC new file mode 100644 index 000000000000..a1dfb641912f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_GENERIC @@ -0,0 +1 @@ +CONFIG_GPIO_GENERIC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_LATCH b/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_LATCH new file mode 100644 index 000000000000..4fcea037be64 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_LATCH @@ -0,0 +1 @@ +# CONFIG_GPIO_LATCH is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_SIM b/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_SIM new file mode 100644 index 000000000000..177dc4b4d1d4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_SIM @@ -0,0 +1 @@ +# CONFIG_GPIO_SIM is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_VIRTIO b/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_VIRTIO new file mode 100644 index 000000000000..5524fe6a6c1f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GPIO_VIRTIO @@ -0,0 +1 @@ +# CONFIG_GPIO_VIRTIO is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GP_PCI1XXXX b/anolis/configs/L1-RECOMMEND/default/CONFIG_GP_PCI1XXXX new file mode 100644 index 000000000000..5cd838999bc4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GP_PCI1XXXX @@ -0,0 +1 @@ +# CONFIG_GP_PCI1XXXX is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GUEST_PERF_EVENTS b/anolis/configs/L1-RECOMMEND/default/CONFIG_GUEST_PERF_EVENTS new file mode 100644 index 000000000000..11cfcba0b0d2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GUEST_PERF_EVENTS @@ -0,0 +1 @@ +CONFIG_GUEST_PERF_EVENTS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GUP_TEST b/anolis/configs/L1-RECOMMEND/default/CONFIG_GUP_TEST new file mode 100644 index 000000000000..5df4896bb447 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_GUP_TEST @@ -0,0 +1 @@ +# CONFIG_GUP_TEST is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HARDENED_USERCOPY b/anolis/configs/L1-RECOMMEND/default/CONFIG_HARDENED_USERCOPY new file mode 100644 index 000000000000..d2dcc857f2dc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HARDENED_USERCOPY @@ -0,0 +1 @@ +CONFIG_HARDENED_USERCOPY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HEADERS_INSTALL b/anolis/configs/L1-RECOMMEND/default/CONFIG_HEADERS_INSTALL new file mode 100644 index 000000000000..5b30575118a3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HEADERS_INSTALL @@ -0,0 +1 @@ +# CONFIG_HEADERS_INSTALL is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HIBERNATE_CALLBACKS b/anolis/configs/L1-RECOMMEND/default/CONFIG_HIBERNATE_CALLBACKS new file mode 100644 index 000000000000..563709ddefb6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HIBERNATE_CALLBACKS @@ -0,0 +1 @@ +CONFIG_HIBERNATE_CALLBACKS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HIBERNATION b/anolis/configs/L1-RECOMMEND/default/CONFIG_HIBERNATION new file mode 100644 index 000000000000..8df6f5c694d2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HIBERNATION @@ -0,0 +1 @@ +CONFIG_HIBERNATION=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HIBERNATION_SNAPSHOT_DEV b/anolis/configs/L1-RECOMMEND/default/CONFIG_HIBERNATION_SNAPSHOT_DEV new file mode 100644 index 000000000000..c4627aa7e564 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HIBERNATION_SNAPSHOT_DEV @@ -0,0 +1 @@ +CONFIG_HIBERNATION_SNAPSHOT_DEV=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HID b/anolis/configs/L1-RECOMMEND/default/CONFIG_HID new file mode 100644 index 000000000000..b82f510fc4be --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HID @@ -0,0 +1 @@ +CONFIG_HID=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HID_SUPPORT b/anolis/configs/L1-RECOMMEND/default/CONFIG_HID_SUPPORT new file mode 100644 index 000000000000..27e01b36f61b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HID_SUPPORT @@ -0,0 +1 @@ +CONFIG_HID_SUPPORT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HINIC b/anolis/configs/L1-RECOMMEND/default/CONFIG_HINIC new file mode 100644 index 000000000000..5a9004f4a1c4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HINIC @@ -0,0 +1 @@ +CONFIG_HINIC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HIST_TRIGGERS b/anolis/configs/L1-RECOMMEND/default/CONFIG_HIST_TRIGGERS new file mode 100644 index 000000000000..93ab853e5a46 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HIST_TRIGGERS @@ -0,0 +1 @@ +CONFIG_HIST_TRIGGERS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HOTPLUG_PCI_ACPI b/anolis/configs/L1-RECOMMEND/default/CONFIG_HOTPLUG_PCI_ACPI new file mode 100644 index 000000000000..8086e293ad0a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HOTPLUG_PCI_ACPI @@ -0,0 +1 @@ +CONFIG_HOTPLUG_PCI_ACPI=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HWLAT_TRACER b/anolis/configs/L1-RECOMMEND/default/CONFIG_HWLAT_TRACER new file mode 100644 index 000000000000..cfdda978444b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HWLAT_TRACER @@ -0,0 +1 @@ +CONFIG_HWLAT_TRACER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HWPOISON_INJECT b/anolis/configs/L1-RECOMMEND/default/CONFIG_HWPOISON_INJECT new file mode 100644 index 000000000000..ae646aa80fd1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HWPOISON_INJECT @@ -0,0 +1 @@ +CONFIG_HWPOISON_INJECT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HW_RANDOM_TPM b/anolis/configs/L1-RECOMMEND/default/CONFIG_HW_RANDOM_TPM new file mode 100644 index 000000000000..d991b3c93ca4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HW_RANDOM_TPM @@ -0,0 +1 @@ +CONFIG_HW_RANDOM_TPM=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HYDCU_FIXUP_HEADER b/anolis/configs/L1-RECOMMEND/default/CONFIG_HYDCU_FIXUP_HEADER new file mode 100644 index 000000000000..5a7a369f9fdb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_HYDCU_FIXUP_HEADER @@ -0,0 +1 @@ +# CONFIG_HYDCU_FIXUP_HEADER is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_I2C_CHARDEV b/anolis/configs/L1-RECOMMEND/default/CONFIG_I2C_CHARDEV new file mode 100644 index 000000000000..d843de200b5a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_I2C_CHARDEV @@ -0,0 +1 @@ +CONFIG_I2C_CHARDEV=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_I2C_MUX b/anolis/configs/L1-RECOMMEND/default/CONFIG_I2C_MUX new file mode 100644 index 000000000000..6982ed98a06f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_I2C_MUX @@ -0,0 +1 @@ +CONFIG_I2C_MUX=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_I2C_SMBUS b/anolis/configs/L1-RECOMMEND/default/CONFIG_I2C_SMBUS new file mode 100644 index 000000000000..1a756e77bd86 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_I2C_SMBUS @@ -0,0 +1 @@ +CONFIG_I2C_SMBUS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_I6300ESB_WDT b/anolis/configs/L1-RECOMMEND/default/CONFIG_I6300ESB_WDT new file mode 100644 index 000000000000..e65f0870cc21 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_I6300ESB_WDT @@ -0,0 +1 @@ +CONFIG_I6300ESB_WDT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IFB b/anolis/configs/L1-RECOMMEND/default/CONFIG_IFB new file mode 100644 index 000000000000..738210cd4c13 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IFB @@ -0,0 +1 @@ +CONFIG_IFB=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IGB_HWMON b/anolis/configs/L1-RECOMMEND/default/CONFIG_IGB_HWMON new file mode 100644 index 000000000000..d47de32e6225 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IGB_HWMON @@ -0,0 +1 @@ +CONFIG_IGB_HWMON=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IGC b/anolis/configs/L1-RECOMMEND/default/CONFIG_IGC new file mode 100644 index 000000000000..f744b9900faa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IGC @@ -0,0 +1 @@ +CONFIG_IGC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IKCONFIG b/anolis/configs/L1-RECOMMEND/default/CONFIG_IKCONFIG new file mode 100644 index 000000000000..3d80bd86a196 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IKCONFIG @@ -0,0 +1 @@ +CONFIG_IKCONFIG=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IKCONFIG_PROC b/anolis/configs/L1-RECOMMEND/default/CONFIG_IKCONFIG_PROC new file mode 100644 index 000000000000..7d45f8bfb857 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IKCONFIG_PROC @@ -0,0 +1 @@ +CONFIG_IKCONFIG_PROC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IKHEADERS b/anolis/configs/L1-RECOMMEND/default/CONFIG_IKHEADERS new file mode 100644 index 000000000000..e214495e0279 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IKHEADERS @@ -0,0 +1 @@ +# CONFIG_IKHEADERS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ILLEGAL_POINTER_VALUE b/anolis/configs/L1-RECOMMEND/default/CONFIG_ILLEGAL_POINTER_VALUE new file mode 100644 index 000000000000..5fa2f045f2be --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ILLEGAL_POINTER_VALUE @@ -0,0 +1 @@ +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE new file mode 100644 index 000000000000..da04fd67d6a6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE @@ -0,0 +1 @@ +CONFIG_IMA_APPRAISE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_BOOTPARAM b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_BOOTPARAM new file mode 100644 index 000000000000..000a58fb65a3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_BOOTPARAM @@ -0,0 +1 @@ +CONFIG_IMA_APPRAISE_BOOTPARAM=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_BUILD_POLICY b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_BUILD_POLICY new file mode 100644 index 000000000000..b89ec93a48ad --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_BUILD_POLICY @@ -0,0 +1 @@ +CONFIG_IMA_APPRAISE_BUILD_POLICY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_MODSIG b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_MODSIG new file mode 100644 index 000000000000..2718d45137c7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_MODSIG @@ -0,0 +1 @@ +# CONFIG_IMA_APPRAISE_MODSIG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS new file mode 100644 index 000000000000..64c5f2bf79fd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS @@ -0,0 +1 @@ +# CONFIG_IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS new file mode 100644 index 000000000000..bf15301d4ca9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS @@ -0,0 +1 @@ +# CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_REQUIRE_MODULE_SIGS b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_REQUIRE_MODULE_SIGS new file mode 100644 index 000000000000..6596de713c57 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_REQUIRE_MODULE_SIGS @@ -0,0 +1 @@ +# CONFIG_IMA_APPRAISE_REQUIRE_MODULE_SIGS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_REQUIRE_POLICY_SIGS b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_REQUIRE_POLICY_SIGS new file mode 100644 index 000000000000..3abafe60ec9f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_REQUIRE_POLICY_SIGS @@ -0,0 +1 @@ +# CONFIG_IMA_APPRAISE_REQUIRE_POLICY_SIGS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_SIGNED_INIT b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_SIGNED_INIT new file mode 100644 index 000000000000..2c92177c72d6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_APPRAISE_SIGNED_INIT @@ -0,0 +1 @@ +# CONFIG_IMA_APPRAISE_SIGNED_INIT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_ARCH_POLICY b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_ARCH_POLICY new file mode 100644 index 000000000000..7187ae0dce9d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_ARCH_POLICY @@ -0,0 +1 @@ +# CONFIG_IMA_ARCH_POLICY is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_BLACKLIST_KEYRING b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_BLACKLIST_KEYRING new file mode 100644 index 000000000000..7457767336f4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_BLACKLIST_KEYRING @@ -0,0 +1 @@ +CONFIG_IMA_BLACKLIST_KEYRING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH new file mode 100644 index 000000000000..35a36af692ea --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH @@ -0,0 +1 @@ +CONFIG_IMA_DEFAULT_HASH="sha256" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH_SHA1 b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH_SHA1 new file mode 100644 index 000000000000..b51889849965 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH_SHA1 @@ -0,0 +1 @@ +# CONFIG_IMA_DEFAULT_HASH_SHA1 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH_SHA256 b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH_SHA256 new file mode 100644 index 000000000000..e627fd9e9a2f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH_SHA256 @@ -0,0 +1 @@ +CONFIG_IMA_DEFAULT_HASH_SHA256=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH_SHA512 b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH_SHA512 new file mode 100644 index 000000000000..63c78568591f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH_SHA512 @@ -0,0 +1 @@ +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH_SM3 b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH_SM3 new file mode 100644 index 000000000000..d00f8cdc20aa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_HASH_SM3 @@ -0,0 +1 @@ +# CONFIG_IMA_DEFAULT_HASH_SM3 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_TEMPLATE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_TEMPLATE new file mode 100644 index 000000000000..0d38cb6c5fe0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_DEFAULT_TEMPLATE @@ -0,0 +1 @@ +CONFIG_IMA_DEFAULT_TEMPLATE="ima-sig" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY new file mode 100644 index 000000000000..08056234d1d9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY @@ -0,0 +1 @@ +CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_LOAD_X509 b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_LOAD_X509 new file mode 100644 index 000000000000..37c785db29b3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_LOAD_X509 @@ -0,0 +1 @@ +CONFIG_IMA_LOAD_X509=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_LSM_RULES b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_LSM_RULES new file mode 100644 index 000000000000..97d7dd6429a4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_LSM_RULES @@ -0,0 +1 @@ +CONFIG_IMA_LSM_RULES=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS new file mode 100644 index 000000000000..3682c2753d20 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS @@ -0,0 +1 @@ +CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_MEASURE_PCR_IDX b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_MEASURE_PCR_IDX new file mode 100644 index 000000000000..685377b3111d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_MEASURE_PCR_IDX @@ -0,0 +1 @@ +CONFIG_IMA_MEASURE_PCR_IDX=10 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_NG_TEMPLATE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_NG_TEMPLATE new file mode 100644 index 000000000000..970afd0203f0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_NG_TEMPLATE @@ -0,0 +1 @@ +# CONFIG_IMA_NG_TEMPLATE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS new file mode 100644 index 000000000000..eb31e05a08b7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS @@ -0,0 +1 @@ +CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_READ_POLICY b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_READ_POLICY new file mode 100644 index 000000000000..8f280d80334d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_READ_POLICY @@ -0,0 +1 @@ +CONFIG_IMA_READ_POLICY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT new file mode 100644 index 000000000000..3dbf4221f830 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT @@ -0,0 +1 @@ +# CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_SIG_TEMPLATE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_SIG_TEMPLATE new file mode 100644 index 000000000000..f1d95dcd9671 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_SIG_TEMPLATE @@ -0,0 +1 @@ +CONFIG_IMA_SIG_TEMPLATE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_WRITE_POLICY b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_WRITE_POLICY new file mode 100644 index 000000000000..e54ce85d7ff0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_WRITE_POLICY @@ -0,0 +1 @@ +CONFIG_IMA_WRITE_POLICY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_X509_PATH b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_X509_PATH new file mode 100644 index 000000000000..2b2332402234 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IMA_X509_PATH @@ -0,0 +1 @@ +CONFIG_IMA_X509_PATH="/etc/keys/x509_ima.der" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_AH b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_AH new file mode 100644 index 000000000000..9e4fbe002bb5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_AH @@ -0,0 +1 @@ +CONFIG_INET6_AH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_ESP b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_ESP new file mode 100644 index 000000000000..e3a4a08d6174 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_ESP @@ -0,0 +1 @@ +CONFIG_INET6_ESP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_ESPINTCP b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_ESPINTCP new file mode 100644 index 000000000000..1b4ebe140e27 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_ESPINTCP @@ -0,0 +1 @@ +# CONFIG_INET6_ESPINTCP is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_ESP_OFFLOAD b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_ESP_OFFLOAD new file mode 100644 index 000000000000..24c35000494d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_ESP_OFFLOAD @@ -0,0 +1 @@ +CONFIG_INET6_ESP_OFFLOAD=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_IPCOMP b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_IPCOMP new file mode 100644 index 000000000000..40e0dde512e6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_IPCOMP @@ -0,0 +1 @@ +CONFIG_INET6_IPCOMP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_TUNNEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_TUNNEL new file mode 100644 index 000000000000..78cd37a0a3b7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_TUNNEL @@ -0,0 +1 @@ +CONFIG_INET6_TUNNEL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_XFRM_TUNNEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_XFRM_TUNNEL new file mode 100644 index 000000000000..0b2416447fc6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET6_XFRM_TUNNEL @@ -0,0 +1 @@ +CONFIG_INET6_XFRM_TUNNEL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_AH b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_AH new file mode 100644 index 000000000000..89b662d11236 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_AH @@ -0,0 +1 @@ +CONFIG_INET_AH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_DIAG_DESTROY b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_DIAG_DESTROY new file mode 100644 index 000000000000..6b50163279b2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_DIAG_DESTROY @@ -0,0 +1 @@ +# CONFIG_INET_DIAG_DESTROY is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_ESP b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_ESP new file mode 100644 index 000000000000..36f69aa2a72a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_ESP @@ -0,0 +1 @@ +CONFIG_INET_ESP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_ESPINTCP b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_ESPINTCP new file mode 100644 index 000000000000..04ac14c89384 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_ESPINTCP @@ -0,0 +1 @@ +# CONFIG_INET_ESPINTCP is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_ESP_OFFLOAD b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_ESP_OFFLOAD new file mode 100644 index 000000000000..d5cca6d1daeb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_ESP_OFFLOAD @@ -0,0 +1 @@ +CONFIG_INET_ESP_OFFLOAD=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_IPCOMP b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_IPCOMP new file mode 100644 index 000000000000..27b6ba9d8fa9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_IPCOMP @@ -0,0 +1 @@ +CONFIG_INET_IPCOMP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_RAW_DIAG b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_RAW_DIAG new file mode 100644 index 000000000000..b1932e98a8e8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INET_RAW_DIAG @@ -0,0 +1 @@ +CONFIG_INET_RAW_DIAG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ADDR_TRANS b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ADDR_TRANS new file mode 100644 index 000000000000..b68f9bbd9837 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ADDR_TRANS @@ -0,0 +1 @@ +CONFIG_INFINIBAND_ADDR_TRANS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ERDMA b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ERDMA new file mode 100644 index 000000000000..a83695df816c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ERDMA @@ -0,0 +1 @@ +CONFIG_INFINIBAND_ERDMA=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_IPOIB b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_IPOIB new file mode 100644 index 000000000000..8548857aa9bb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_IPOIB @@ -0,0 +1 @@ +CONFIG_INFINIBAND_IPOIB=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_IPOIB_CM b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_IPOIB_CM new file mode 100644 index 000000000000..e4086829975d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_IPOIB_CM @@ -0,0 +1 @@ +CONFIG_INFINIBAND_IPOIB_CM=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_IPOIB_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_IPOIB_DEBUG new file mode 100644 index 000000000000..d86370d98b24 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_IPOIB_DEBUG @@ -0,0 +1 @@ +CONFIG_INFINIBAND_IPOIB_DEBUG=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ISER b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ISER new file mode 100644 index 000000000000..c963877f5e7c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ISER @@ -0,0 +1 @@ +CONFIG_INFINIBAND_ISER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ISERT b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ISERT new file mode 100644 index 000000000000..62b0f926c6ec --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ISERT @@ -0,0 +1 @@ +CONFIG_INFINIBAND_ISERT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_MTHCA b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_MTHCA new file mode 100644 index 000000000000..a134e36a320a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_MTHCA @@ -0,0 +1 @@ +# CONFIG_INFINIBAND_MTHCA is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ON_DEMAND_PAGING b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ON_DEMAND_PAGING new file mode 100644 index 000000000000..ea5a3d8d8b6f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_ON_DEMAND_PAGING @@ -0,0 +1 @@ +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_RTRS_CLIENT b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_RTRS_CLIENT new file mode 100644 index 000000000000..4d8db5cda5a8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_RTRS_CLIENT @@ -0,0 +1 @@ +# CONFIG_INFINIBAND_RTRS_CLIENT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_RTRS_SERVER b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_RTRS_SERVER new file mode 100644 index 000000000000..dfdaaad5e995 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_RTRS_SERVER @@ -0,0 +1 @@ +# CONFIG_INFINIBAND_RTRS_SERVER is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_SRP b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_SRP new file mode 100644 index 000000000000..b382bf656659 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_SRP @@ -0,0 +1 @@ +CONFIG_INFINIBAND_SRP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_SRPT b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_SRPT new file mode 100644 index 000000000000..ff4989b54945 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_SRPT @@ -0,0 +1 @@ +CONFIG_INFINIBAND_SRPT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_USER_ACCESS b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_USER_ACCESS new file mode 100644 index 000000000000..1b11b6f4aefc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_USER_ACCESS @@ -0,0 +1 @@ +CONFIG_INFINIBAND_USER_ACCESS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_USER_MAD b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_USER_MAD new file mode 100644 index 000000000000..36e7eb29d29f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_USER_MAD @@ -0,0 +1 @@ +CONFIG_INFINIBAND_USER_MAD=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INITRAMFS_SOURCE b/anolis/configs/L1-RECOMMEND/default/CONFIG_INITRAMFS_SOURCE new file mode 100644 index 000000000000..becad4d7ed34 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INITRAMFS_SOURCE @@ -0,0 +1 @@ +CONFIG_INITRAMFS_SOURCE="" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INIT_ON_ALLOC_DEFAULT_ON b/anolis/configs/L1-RECOMMEND/default/CONFIG_INIT_ON_ALLOC_DEFAULT_ON new file mode 100644 index 000000000000..67917dc22f18 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INIT_ON_ALLOC_DEFAULT_ON @@ -0,0 +1 @@ +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INIT_ON_FREE_DEFAULT_ON b/anolis/configs/L1-RECOMMEND/default/CONFIG_INIT_ON_FREE_DEFAULT_ON new file mode 100644 index 000000000000..5fd7392f7dbe --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INIT_ON_FREE_DEFAULT_ON @@ -0,0 +1 @@ +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INOTIFY_USER b/anolis/configs/L1-RECOMMEND/default/CONFIG_INOTIFY_USER new file mode 100644 index 000000000000..eb2e0c9a8145 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INOTIFY_USER @@ -0,0 +1 @@ +CONFIG_INOTIFY_USER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INPUT_EVDEV b/anolis/configs/L1-RECOMMEND/default/CONFIG_INPUT_EVDEV new file mode 100644 index 000000000000..b738491e8046 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INPUT_EVDEV @@ -0,0 +1 @@ +CONFIG_INPUT_EVDEV=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_ASYMMETRIC_KEYS b/anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_ASYMMETRIC_KEYS new file mode 100644 index 000000000000..a1485b903d08 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_ASYMMETRIC_KEYS @@ -0,0 +1 @@ +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_AUDIT b/anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_AUDIT new file mode 100644 index 000000000000..09d5db2b6a8c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_AUDIT @@ -0,0 +1 @@ +CONFIG_INTEGRITY_AUDIT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_PLATFORM_KEYRING b/anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_PLATFORM_KEYRING new file mode 100644 index 000000000000..a7b1b167b479 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_PLATFORM_KEYRING @@ -0,0 +1 @@ +CONFIG_INTEGRITY_PLATFORM_KEYRING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_SIGNATURE b/anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_SIGNATURE new file mode 100644 index 000000000000..2d104809dd91 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_SIGNATURE @@ -0,0 +1 @@ +CONFIG_INTEGRITY_SIGNATURE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_TRUSTED_KEYRING b/anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_TRUSTED_KEYRING new file mode 100644 index 000000000000..cfb23d479a99 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INTEGRITY_TRUSTED_KEYRING @@ -0,0 +1 @@ +CONFIG_INTEGRITY_TRUSTED_KEYRING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_API b/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_API new file mode 100644 index 000000000000..0d7838e89dc5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_API @@ -0,0 +1 @@ +CONFIG_IOMMU_API=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_DEBUGFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_DEBUGFS new file mode 100644 index 000000000000..ac8aa1de6e72 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_DEBUGFS @@ -0,0 +1 @@ +# CONFIG_IOMMU_DEBUGFS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_DEFAULT_DMA_LAZY b/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_DEFAULT_DMA_LAZY new file mode 100644 index 000000000000..8d9990cfabf4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_DEFAULT_DMA_LAZY @@ -0,0 +1 @@ +# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_DMA b/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_DMA new file mode 100644 index 000000000000..a9155fba14ea --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_DMA @@ -0,0 +1 @@ +CONFIG_IOMMU_DMA=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_IOVA b/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_IOVA new file mode 100644 index 000000000000..70bdc8af504f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_IOVA @@ -0,0 +1 @@ +CONFIG_IOMMU_IOVA=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_IO_PGTABLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_IO_PGTABLE new file mode 100644 index 000000000000..bef737908c5b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IOMMU_IO_PGTABLE @@ -0,0 +1 @@ +CONFIG_IOMMU_IO_PGTABLE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IO_STRICT_DEVMEM b/anolis/configs/L1-RECOMMEND/default/CONFIG_IO_STRICT_DEVMEM new file mode 100644 index 000000000000..f95505f7cc9c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IO_STRICT_DEVMEM @@ -0,0 +1 @@ +# CONFIG_IO_STRICT_DEVMEM is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_FILTER b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_FILTER new file mode 100644 index 000000000000..40c33ab1f627 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_FILTER @@ -0,0 +1 @@ +CONFIG_IP6_NF_FILTER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_IPTABLES b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_IPTABLES new file mode 100644 index 000000000000..6505ac4fe894 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_IPTABLES @@ -0,0 +1 @@ +CONFIG_IP6_NF_IPTABLES=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MANGLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MANGLE new file mode 100644 index 000000000000..20221d52326e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MANGLE @@ -0,0 +1 @@ +CONFIG_IP6_NF_MANGLE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_AH b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_AH new file mode 100644 index 000000000000..c3a8d4bbfd13 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_AH @@ -0,0 +1 @@ +CONFIG_IP6_NF_MATCH_AH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_EUI64 b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_EUI64 new file mode 100644 index 000000000000..caa47b591cbf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_EUI64 @@ -0,0 +1 @@ +CONFIG_IP6_NF_MATCH_EUI64=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_FRAG b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_FRAG new file mode 100644 index 000000000000..e288cb5386dc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_FRAG @@ -0,0 +1 @@ +CONFIG_IP6_NF_MATCH_FRAG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_HL b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_HL new file mode 100644 index 000000000000..179ce8d4763f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_HL @@ -0,0 +1 @@ +CONFIG_IP6_NF_MATCH_HL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_IPV6HEADER b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_IPV6HEADER new file mode 100644 index 000000000000..e5c78739634e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_IPV6HEADER @@ -0,0 +1 @@ +CONFIG_IP6_NF_MATCH_IPV6HEADER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_MH b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_MH new file mode 100644 index 000000000000..81b6caa1f7ea --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_MH @@ -0,0 +1 @@ +CONFIG_IP6_NF_MATCH_MH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_OPTS b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_OPTS new file mode 100644 index 000000000000..ff6c9a495eff --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_OPTS @@ -0,0 +1 @@ +CONFIG_IP6_NF_MATCH_OPTS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_RPFILTER b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_RPFILTER new file mode 100644 index 000000000000..7b0204783acd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_RPFILTER @@ -0,0 +1 @@ +CONFIG_IP6_NF_MATCH_RPFILTER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_RT b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_RT new file mode 100644 index 000000000000..4f7509d4fcce --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_RT @@ -0,0 +1 @@ +CONFIG_IP6_NF_MATCH_RT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_SRH b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_SRH new file mode 100644 index 000000000000..5f5f25c4d9c2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_MATCH_SRH @@ -0,0 +1 @@ +# CONFIG_IP6_NF_MATCH_SRH is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_NAT b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_NAT new file mode 100644 index 000000000000..9796b0b059a8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_NAT @@ -0,0 +1 @@ +CONFIG_IP6_NF_NAT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_RAW b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_RAW new file mode 100644 index 000000000000..d84b4a62d5d8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_RAW @@ -0,0 +1 @@ +CONFIG_IP6_NF_RAW=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_SECURITY b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_SECURITY new file mode 100644 index 000000000000..01c6144f893c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_SECURITY @@ -0,0 +1 @@ +CONFIG_IP6_NF_SECURITY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_HL b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_HL new file mode 100644 index 000000000000..d094bc611c56 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_HL @@ -0,0 +1 @@ +# CONFIG_IP6_NF_TARGET_HL is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_MASQUERADE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_MASQUERADE new file mode 100644 index 000000000000..4b63a260a877 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_MASQUERADE @@ -0,0 +1 @@ +CONFIG_IP6_NF_TARGET_MASQUERADE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_NPT b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_NPT new file mode 100644 index 000000000000..e9a67d4fe963 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_NPT @@ -0,0 +1 @@ +CONFIG_IP6_NF_TARGET_NPT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_REJECT b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_REJECT new file mode 100644 index 000000000000..2de07d601e9a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_REJECT @@ -0,0 +1 @@ +CONFIG_IP6_NF_TARGET_REJECT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_SYNPROXY b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_SYNPROXY new file mode 100644 index 000000000000..6a3245b1f85c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_SYNPROXY @@ -0,0 +1 @@ +CONFIG_IP6_NF_TARGET_SYNPROXY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_DEVICE_INTERFACE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_DEVICE_INTERFACE new file mode 100644 index 000000000000..3d0a94d76b3a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_DEVICE_INTERFACE @@ -0,0 +1 @@ +CONFIG_IPMI_DEVICE_INTERFACE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_DMI_DECODE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_DMI_DECODE new file mode 100644 index 000000000000..7444b769c2fa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_DMI_DECODE @@ -0,0 +1 @@ +CONFIG_IPMI_DMI_DECODE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_PANIC_EVENT b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_PANIC_EVENT new file mode 100644 index 000000000000..c95560a24c1f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_PANIC_EVENT @@ -0,0 +1 @@ +CONFIG_IPMI_PANIC_EVENT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_PANIC_STRING b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_PANIC_STRING new file mode 100644 index 000000000000..c560a1299414 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_PANIC_STRING @@ -0,0 +1 @@ +CONFIG_IPMI_PANIC_STRING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_PLAT_DATA b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_PLAT_DATA new file mode 100644 index 000000000000..ae2c67ead77d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_PLAT_DATA @@ -0,0 +1 @@ +CONFIG_IPMI_PLAT_DATA=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_POWEROFF b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_POWEROFF new file mode 100644 index 000000000000..e37543efcac9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_POWEROFF @@ -0,0 +1 @@ +CONFIG_IPMI_POWEROFF=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_SI b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_SI new file mode 100644 index 000000000000..ba6bb31db42c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_SI @@ -0,0 +1 @@ +CONFIG_IPMI_SI=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_SSIF b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_SSIF new file mode 100644 index 000000000000..d563156fbda6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_SSIF @@ -0,0 +1 @@ +CONFIG_IPMI_SSIF=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_WATCHDOG b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_WATCHDOG new file mode 100644 index 000000000000..4feb657b8ec2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPMI_WATCHDOG @@ -0,0 +1 @@ +CONFIG_IPMI_WATCHDOG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_GRE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_GRE new file mode 100644 index 000000000000..d182bc16193e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_GRE @@ -0,0 +1 @@ +CONFIG_IPV6_GRE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_ILA b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_ILA new file mode 100644 index 000000000000..c9fa4cf9d9a7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_ILA @@ -0,0 +1 @@ +# CONFIG_IPV6_ILA is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_MIP6 b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_MIP6 new file mode 100644 index 000000000000..c3c95b5570d9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_MIP6 @@ -0,0 +1 @@ +CONFIG_IPV6_MIP6=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_MROUTE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_MROUTE new file mode 100644 index 000000000000..5dcf23403f69 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_MROUTE @@ -0,0 +1 @@ +CONFIG_IPV6_MROUTE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_MROUTE_MULTIPLE_TABLES b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_MROUTE_MULTIPLE_TABLES new file mode 100644 index 000000000000..0e6ac689574c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_MROUTE_MULTIPLE_TABLES @@ -0,0 +1 @@ +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_MULTIPLE_TABLES b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_MULTIPLE_TABLES new file mode 100644 index 000000000000..18c565e1b99e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_MULTIPLE_TABLES @@ -0,0 +1 @@ +CONFIG_IPV6_MULTIPLE_TABLES=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_NDISC_NODETYPE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_NDISC_NODETYPE new file mode 100644 index 000000000000..46c0fcea2165 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_NDISC_NODETYPE @@ -0,0 +1 @@ +CONFIG_IPV6_NDISC_NODETYPE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_OPTIMISTIC_DAD b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_OPTIMISTIC_DAD new file mode 100644 index 000000000000..a60dd2fc2b92 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_OPTIMISTIC_DAD @@ -0,0 +1 @@ +CONFIG_IPV6_OPTIMISTIC_DAD=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_PIMSM_V2 b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_PIMSM_V2 new file mode 100644 index 000000000000..f0b06614627d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_PIMSM_V2 @@ -0,0 +1 @@ +CONFIG_IPV6_PIMSM_V2=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_ROUTER_PREF b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_ROUTER_PREF new file mode 100644 index 000000000000..8f5958c8f1f2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_ROUTER_PREF @@ -0,0 +1 @@ +CONFIG_IPV6_ROUTER_PREF=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_ROUTE_INFO b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_ROUTE_INFO new file mode 100644 index 000000000000..842b4d775f95 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_ROUTE_INFO @@ -0,0 +1 @@ +CONFIG_IPV6_ROUTE_INFO=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_RPL_LWTUNNEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_RPL_LWTUNNEL new file mode 100644 index 000000000000..695e2a10a10d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_RPL_LWTUNNEL @@ -0,0 +1 @@ +# CONFIG_IPV6_RPL_LWTUNNEL is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SEG6_HMAC b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SEG6_HMAC new file mode 100644 index 000000000000..b98fbf937d1d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SEG6_HMAC @@ -0,0 +1 @@ +# CONFIG_IPV6_SEG6_HMAC is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SEG6_LWTUNNEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SEG6_LWTUNNEL new file mode 100644 index 000000000000..cedc498cb1b8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SEG6_LWTUNNEL @@ -0,0 +1 @@ +# CONFIG_IPV6_SEG6_LWTUNNEL is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SIT b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SIT new file mode 100644 index 000000000000..af75f60dd325 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SIT @@ -0,0 +1 @@ +CONFIG_IPV6_SIT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SIT_6RD b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SIT_6RD new file mode 100644 index 000000000000..ab337b29b09e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SIT_6RD @@ -0,0 +1 @@ +CONFIG_IPV6_SIT_6RD=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SUBTREES b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SUBTREES new file mode 100644 index 000000000000..e79ed9830940 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_SUBTREES @@ -0,0 +1 @@ +CONFIG_IPV6_SUBTREES=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_TUNNEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_TUNNEL new file mode 100644 index 000000000000..2665c69ef3b3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_TUNNEL @@ -0,0 +1 @@ +CONFIG_IPV6_TUNNEL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_VTI b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_VTI new file mode 100644 index 000000000000..cb7d072de381 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPV6_VTI @@ -0,0 +1 @@ +CONFIG_IPV6_VTI=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPVLAN b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPVLAN new file mode 100644 index 000000000000..7926522454e5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPVLAN @@ -0,0 +1 @@ +CONFIG_IPVLAN=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IPVTAP b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPVTAP new file mode 100644 index 000000000000..4bb3856734cd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IPVTAP @@ -0,0 +1 @@ +CONFIG_IPVTAP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_ADVANCED_ROUTER b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_ADVANCED_ROUTER new file mode 100644 index 000000000000..d8f8f5b4ee78 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_ADVANCED_ROUTER @@ -0,0 +1 @@ +CONFIG_IP_ADVANCED_ROUTER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_DCCP b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_DCCP new file mode 100644 index 000000000000..6ecb43a3e349 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_DCCP @@ -0,0 +1 @@ +# CONFIG_IP_DCCP is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_FIB_TRIE_STATS b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_FIB_TRIE_STATS new file mode 100644 index 000000000000..c6bc9c42c1d6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_FIB_TRIE_STATS @@ -0,0 +1 @@ +CONFIG_IP_FIB_TRIE_STATS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_MROUTE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_MROUTE new file mode 100644 index 000000000000..56fa1578de21 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_MROUTE @@ -0,0 +1 @@ +CONFIG_IP_MROUTE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_MROUTE_MULTIPLE_TABLES b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_MROUTE_MULTIPLE_TABLES new file mode 100644 index 000000000000..450a74bd6788 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_MROUTE_MULTIPLE_TABLES @@ -0,0 +1 @@ +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_MULTICAST b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_MULTICAST new file mode 100644 index 000000000000..15dbdffe14f9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_MULTICAST @@ -0,0 +1 @@ +CONFIG_IP_MULTICAST=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_MULTIPLE_TABLES b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_MULTIPLE_TABLES new file mode 100644 index 000000000000..b68058a302d7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_MULTIPLE_TABLES @@ -0,0 +1 @@ +CONFIG_IP_MULTIPLE_TABLES=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_ARPFILTER b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_ARPFILTER new file mode 100644 index 000000000000..06b00645c72a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_ARPFILTER @@ -0,0 +1 @@ +CONFIG_IP_NF_ARPFILTER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_ARP_MANGLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_ARP_MANGLE new file mode 100644 index 000000000000..2af015359f7f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_ARP_MANGLE @@ -0,0 +1 @@ +CONFIG_IP_NF_ARP_MANGLE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_FILTER b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_FILTER new file mode 100644 index 000000000000..24e5d7ee297a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_FILTER @@ -0,0 +1 @@ +CONFIG_IP_NF_FILTER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_IPTABLES b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_IPTABLES new file mode 100644 index 000000000000..5238d9e0b563 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_IPTABLES @@ -0,0 +1 @@ +CONFIG_IP_NF_IPTABLES=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MANGLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MANGLE new file mode 100644 index 000000000000..f0082088892a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MANGLE @@ -0,0 +1 @@ +CONFIG_IP_NF_MANGLE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MATCH_AH b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MATCH_AH new file mode 100644 index 000000000000..e9277f2bb7a3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MATCH_AH @@ -0,0 +1 @@ +CONFIG_IP_NF_MATCH_AH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MATCH_ECN b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MATCH_ECN new file mode 100644 index 000000000000..15cb5557877c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MATCH_ECN @@ -0,0 +1 @@ +CONFIG_IP_NF_MATCH_ECN=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MATCH_RPFILTER b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MATCH_RPFILTER new file mode 100644 index 000000000000..cce5cf1e41de --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MATCH_RPFILTER @@ -0,0 +1 @@ +CONFIG_IP_NF_MATCH_RPFILTER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MATCH_TTL b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MATCH_TTL new file mode 100644 index 000000000000..82e731606e7d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_MATCH_TTL @@ -0,0 +1 @@ +CONFIG_IP_NF_MATCH_TTL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_NAT b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_NAT new file mode 100644 index 000000000000..d2aa272407dc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_NAT @@ -0,0 +1 @@ +CONFIG_IP_NF_NAT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_ECN b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_ECN new file mode 100644 index 000000000000..3f81954ca20b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_ECN @@ -0,0 +1 @@ +CONFIG_IP_NF_TARGET_ECN=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_MASQUERADE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_MASQUERADE new file mode 100644 index 000000000000..a72c7fad593d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_MASQUERADE @@ -0,0 +1 @@ +CONFIG_IP_NF_TARGET_MASQUERADE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_NETMAP b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_NETMAP new file mode 100644 index 000000000000..7b44a7a7617b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_NETMAP @@ -0,0 +1 @@ +CONFIG_IP_NF_TARGET_NETMAP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_REDIRECT b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_REDIRECT new file mode 100644 index 000000000000..0f42816a9023 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_REDIRECT @@ -0,0 +1 @@ +CONFIG_IP_NF_TARGET_REDIRECT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_REJECT b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_REJECT new file mode 100644 index 000000000000..7db5e7783a78 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_REJECT @@ -0,0 +1 @@ +CONFIG_IP_NF_TARGET_REJECT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_SYNPROXY b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_SYNPROXY new file mode 100644 index 000000000000..8fc66aff47a4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_SYNPROXY @@ -0,0 +1 @@ +CONFIG_IP_NF_TARGET_SYNPROXY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_TTL b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_TTL new file mode 100644 index 000000000000..f45533e024c3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_NF_TARGET_TTL @@ -0,0 +1 @@ +CONFIG_IP_NF_TARGET_TTL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_PIMSM_V1 b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_PIMSM_V1 new file mode 100644 index 000000000000..8d52cd2eb204 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_PIMSM_V1 @@ -0,0 +1 @@ +CONFIG_IP_PIMSM_V1=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_PIMSM_V2 b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_PIMSM_V2 new file mode 100644 index 000000000000..a475102ea8d6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_PIMSM_V2 @@ -0,0 +1 @@ +CONFIG_IP_PIMSM_V2=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_ROUTE_MULTIPATH b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_ROUTE_MULTIPATH new file mode 100644 index 000000000000..9daaed3b6824 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_ROUTE_MULTIPATH @@ -0,0 +1 @@ +CONFIG_IP_ROUTE_MULTIPATH=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_ROUTE_VERBOSE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_ROUTE_VERBOSE new file mode 100644 index 000000000000..f616d8ad1608 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_ROUTE_VERBOSE @@ -0,0 +1 @@ +CONFIG_IP_ROUTE_VERBOSE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_BITMAP_IP b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_BITMAP_IP new file mode 100644 index 000000000000..194e715a0606 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_BITMAP_IP @@ -0,0 +1 @@ +CONFIG_IP_SET_BITMAP_IP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_BITMAP_IPMAC b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_BITMAP_IPMAC new file mode 100644 index 000000000000..26361aab6480 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_BITMAP_IPMAC @@ -0,0 +1 @@ +CONFIG_IP_SET_BITMAP_IPMAC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_BITMAP_PORT b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_BITMAP_PORT new file mode 100644 index 000000000000..b251fecbf6ff --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_BITMAP_PORT @@ -0,0 +1 @@ +CONFIG_IP_SET_BITMAP_PORT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IP b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IP new file mode 100644 index 000000000000..ed55391a0248 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IP @@ -0,0 +1 @@ +CONFIG_IP_SET_HASH_IP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPMAC b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPMAC new file mode 100644 index 000000000000..5e50cb0d7724 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPMAC @@ -0,0 +1 @@ +CONFIG_IP_SET_HASH_IPMAC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPMARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPMARK new file mode 100644 index 000000000000..f93dde91427d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPMARK @@ -0,0 +1 @@ +CONFIG_IP_SET_HASH_IPMARK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPPORT b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPPORT new file mode 100644 index 000000000000..8f2368eb11d0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPPORT @@ -0,0 +1 @@ +CONFIG_IP_SET_HASH_IPPORT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPPORTIP b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPPORTIP new file mode 100644 index 000000000000..ce51dedf7db2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPPORTIP @@ -0,0 +1 @@ +CONFIG_IP_SET_HASH_IPPORTIP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPPORTNET b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPPORTNET new file mode 100644 index 000000000000..755daa4ab439 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_IPPORTNET @@ -0,0 +1 @@ +CONFIG_IP_SET_HASH_IPPORTNET=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_MAC b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_MAC new file mode 100644 index 000000000000..f0f2f1eab422 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_MAC @@ -0,0 +1 @@ +CONFIG_IP_SET_HASH_MAC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NET b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NET new file mode 100644 index 000000000000..2f1258e6a1bf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NET @@ -0,0 +1 @@ +CONFIG_IP_SET_HASH_NET=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NETIFACE b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NETIFACE new file mode 100644 index 000000000000..3f37e0275537 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NETIFACE @@ -0,0 +1 @@ +CONFIG_IP_SET_HASH_NETIFACE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NETNET b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NETNET new file mode 100644 index 000000000000..c644763e4ff5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NETNET @@ -0,0 +1 @@ +CONFIG_IP_SET_HASH_NETNET=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NETPORT b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NETPORT new file mode 100644 index 000000000000..ad2168816a79 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NETPORT @@ -0,0 +1 @@ +CONFIG_IP_SET_HASH_NETPORT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NETPORTNET b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NETPORTNET new file mode 100644 index 000000000000..416de30b8d39 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_HASH_NETPORTNET @@ -0,0 +1 @@ +CONFIG_IP_SET_HASH_NETPORTNET=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_LIST_SET b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_LIST_SET new file mode 100644 index 000000000000..71cafb46d3b0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_LIST_SET @@ -0,0 +1 @@ +CONFIG_IP_SET_LIST_SET=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_MAX b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_MAX new file mode 100644 index 000000000000..e1ae2701a2a0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_SET_MAX @@ -0,0 +1 @@ +CONFIG_IP_SET_MAX=256 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_DEBUG new file mode 100644 index 000000000000..19dd826277df --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_DEBUG @@ -0,0 +1 @@ +# CONFIG_IP_VS_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_DH b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_DH new file mode 100644 index 000000000000..0740f4f2f9fa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_DH @@ -0,0 +1 @@ +CONFIG_IP_VS_DH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_FO b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_FO new file mode 100644 index 000000000000..335f1003d574 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_FO @@ -0,0 +1 @@ +CONFIG_IP_VS_FO=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_FTP b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_FTP new file mode 100644 index 000000000000..9bea42ef4230 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_FTP @@ -0,0 +1 @@ +CONFIG_IP_VS_FTP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_LBLC b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_LBLC new file mode 100644 index 000000000000..e2e9dda11108 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_LBLC @@ -0,0 +1 @@ +CONFIG_IP_VS_LBLC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_LBLCR b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_LBLCR new file mode 100644 index 000000000000..4e8c22960246 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_LBLCR @@ -0,0 +1 @@ +CONFIG_IP_VS_LBLCR=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_LC b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_LC new file mode 100644 index 000000000000..bde490d673e1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_LC @@ -0,0 +1 @@ +CONFIG_IP_VS_LC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_MH b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_MH new file mode 100644 index 000000000000..1d07e9a6114b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_MH @@ -0,0 +1 @@ +CONFIG_IP_VS_MH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_MH_TAB_INDEX b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_MH_TAB_INDEX new file mode 100644 index 000000000000..5e8640114282 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_MH_TAB_INDEX @@ -0,0 +1 @@ +CONFIG_IP_VS_MH_TAB_INDEX=12 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_NFCT b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_NFCT new file mode 100644 index 000000000000..96260705f201 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_NFCT @@ -0,0 +1 @@ +CONFIG_IP_VS_NFCT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_NQ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_NQ new file mode 100644 index 000000000000..9ac21c3a57d4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_NQ @@ -0,0 +1 @@ +CONFIG_IP_VS_NQ=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_OVF b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_OVF new file mode 100644 index 000000000000..1adeebe14dde --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_OVF @@ -0,0 +1 @@ +CONFIG_IP_VS_OVF=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_PE_SIP b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_PE_SIP new file mode 100644 index 000000000000..29b770f0ffd8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_PE_SIP @@ -0,0 +1 @@ +CONFIG_IP_VS_PE_SIP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_RR b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_RR new file mode 100644 index 000000000000..0f146f62e782 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_RR @@ -0,0 +1 @@ +CONFIG_IP_VS_RR=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_SED b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_SED new file mode 100644 index 000000000000..9689e6f0c894 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_SED @@ -0,0 +1 @@ +CONFIG_IP_VS_SED=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_SH b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_SH new file mode 100644 index 000000000000..17b4fd13ebbd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_SH @@ -0,0 +1 @@ +CONFIG_IP_VS_SH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_SH_TAB_BITS b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_SH_TAB_BITS new file mode 100644 index 000000000000..0a0f326b82c9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_SH_TAB_BITS @@ -0,0 +1 @@ +CONFIG_IP_VS_SH_TAB_BITS=8 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_TAB_BITS b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_TAB_BITS new file mode 100644 index 000000000000..b41927ca0bc3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_TAB_BITS @@ -0,0 +1 @@ +CONFIG_IP_VS_TAB_BITS=12 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_WLC b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_WLC new file mode 100644 index 000000000000..79fb718ad182 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_WLC @@ -0,0 +1 @@ +CONFIG_IP_VS_WLC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_WRR b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_WRR new file mode 100644 index 000000000000..3dabf2db5991 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_VS_WRR @@ -0,0 +1 @@ +CONFIG_IP_VS_WRR=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ISCSI_BOOT_SYSFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_ISCSI_BOOT_SYSFS new file mode 100644 index 000000000000..1b9880a01972 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ISCSI_BOOT_SYSFS @@ -0,0 +1 @@ +CONFIG_ISCSI_BOOT_SYSFS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ISCSI_TARGET b/anolis/configs/L1-RECOMMEND/default/CONFIG_ISCSI_TARGET new file mode 100644 index 000000000000..78b66a50858e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ISCSI_TARGET @@ -0,0 +1 @@ +CONFIG_ISCSI_TARGET=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ISCSI_TCP b/anolis/configs/L1-RECOMMEND/default/CONFIG_ISCSI_TCP new file mode 100644 index 000000000000..fefe32a01b12 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ISCSI_TCP @@ -0,0 +1 @@ +CONFIG_ISCSI_TCP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IXGBEVF_IPSEC b/anolis/configs/L1-RECOMMEND/default/CONFIG_IXGBEVF_IPSEC new file mode 100644 index 000000000000..38828328ee61 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IXGBEVF_IPSEC @@ -0,0 +1 @@ +CONFIG_IXGBEVF_IPSEC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IXGBE_DCB b/anolis/configs/L1-RECOMMEND/default/CONFIG_IXGBE_DCB new file mode 100644 index 000000000000..aacb4453ac5e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IXGBE_DCB @@ -0,0 +1 @@ +CONFIG_IXGBE_DCB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IXGBE_HWMON b/anolis/configs/L1-RECOMMEND/default/CONFIG_IXGBE_HWMON new file mode 100644 index 000000000000..27194b4ae523 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IXGBE_HWMON @@ -0,0 +1 @@ +CONFIG_IXGBE_HWMON=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IXGBE_IPSEC b/anolis/configs/L1-RECOMMEND/default/CONFIG_IXGBE_IPSEC new file mode 100644 index 000000000000..17c87410fac0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_IXGBE_IPSEC @@ -0,0 +1 @@ +CONFIG_IXGBE_IPSEC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_JBD2_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_JBD2_DEBUG new file mode 100644 index 000000000000..bce5ddaf272d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_JBD2_DEBUG @@ -0,0 +1 @@ +# CONFIG_JBD2_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_JOLIET b/anolis/configs/L1-RECOMMEND/default/CONFIG_JOLIET new file mode 100644 index 000000000000..4a9f8fd47c6d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_JOLIET @@ -0,0 +1 @@ +CONFIG_JOLIET=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KASAN b/anolis/configs/L1-RECOMMEND/default/CONFIG_KASAN new file mode 100644 index 000000000000..31767b1fe4e2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KASAN @@ -0,0 +1 @@ +# CONFIG_KASAN is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KCOV b/anolis/configs/L1-RECOMMEND/default/CONFIG_KCOV new file mode 100644 index 000000000000..736eb752a23f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KCOV @@ -0,0 +1 @@ +# CONFIG_KCOV is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KDB_CONTINUE_CATASTROPHIC b/anolis/configs/L1-RECOMMEND/default/CONFIG_KDB_CONTINUE_CATASTROPHIC new file mode 100644 index 000000000000..47466928f766 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KDB_CONTINUE_CATASTROPHIC @@ -0,0 +1 @@ +CONFIG_KDB_CONTINUE_CATASTROPHIC=0 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KDB_KEYBOARD b/anolis/configs/L1-RECOMMEND/default/CONFIG_KDB_KEYBOARD new file mode 100644 index 000000000000..afe601d90f06 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KDB_KEYBOARD @@ -0,0 +1 @@ +CONFIG_KDB_KEYBOARD=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KEXEC_SIG b/anolis/configs/L1-RECOMMEND/default/CONFIG_KEXEC_SIG new file mode 100644 index 000000000000..67b68865886c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KEXEC_SIG @@ -0,0 +1 @@ +CONFIG_KEXEC_SIG=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KEYS b/anolis/configs/L1-RECOMMEND/default/CONFIG_KEYS new file mode 100644 index 000000000000..957ee122d275 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KEYS @@ -0,0 +1 @@ +CONFIG_KEYS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KEYS_REQUEST_CACHE b/anolis/configs/L1-RECOMMEND/default/CONFIG_KEYS_REQUEST_CACHE new file mode 100644 index 000000000000..7d5a6bb6127c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KEYS_REQUEST_CACHE @@ -0,0 +1 @@ +# CONFIG_KEYS_REQUEST_CACHE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KEY_DH_OPERATIONS b/anolis/configs/L1-RECOMMEND/default/CONFIG_KEY_DH_OPERATIONS new file mode 100644 index 000000000000..87d8b646df85 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KEY_DH_OPERATIONS @@ -0,0 +1 @@ +# CONFIG_KEY_DH_OPERATIONS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KFENCE_DEFERRABLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_KFENCE_DEFERRABLE new file mode 100644 index 000000000000..6f6a6a279aea --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KFENCE_DEFERRABLE @@ -0,0 +1 @@ +CONFIG_KFENCE_DEFERRABLE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KFENCE_NUM_OBJECTS b/anolis/configs/L1-RECOMMEND/default/CONFIG_KFENCE_NUM_OBJECTS new file mode 100644 index 000000000000..d3f46787a4dd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KFENCE_NUM_OBJECTS @@ -0,0 +1 @@ +CONFIG_KFENCE_NUM_OBJECTS=255 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KFENCE_SAMPLE_INTERVAL b/anolis/configs/L1-RECOMMEND/default/CONFIG_KFENCE_SAMPLE_INTERVAL new file mode 100644 index 000000000000..d5ab530e803a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KFENCE_SAMPLE_INTERVAL @@ -0,0 +1 @@ +CONFIG_KFENCE_SAMPLE_INTERVAL=0 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KFENCE_STRESS_TEST_FAULTS b/anolis/configs/L1-RECOMMEND/default/CONFIG_KFENCE_STRESS_TEST_FAULTS new file mode 100644 index 000000000000..6cdbcfa610e3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KFENCE_STRESS_TEST_FAULTS @@ -0,0 +1 @@ +CONFIG_KFENCE_STRESS_TEST_FAULTS=0 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KGDB_HONOUR_BLOCKLIST b/anolis/configs/L1-RECOMMEND/default/CONFIG_KGDB_HONOUR_BLOCKLIST new file mode 100644 index 000000000000..7da79f661883 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KGDB_HONOUR_BLOCKLIST @@ -0,0 +1 @@ +CONFIG_KGDB_HONOUR_BLOCKLIST=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KGDB_KDB b/anolis/configs/L1-RECOMMEND/default/CONFIG_KGDB_KDB new file mode 100644 index 000000000000..3bb4d48c4395 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KGDB_KDB @@ -0,0 +1 @@ +CONFIG_KGDB_KDB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KGDB_TESTS b/anolis/configs/L1-RECOMMEND/default/CONFIG_KGDB_TESTS new file mode 100644 index 000000000000..2defea72ef13 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KGDB_TESTS @@ -0,0 +1 @@ +CONFIG_KGDB_TESTS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KPROBE_EVENTS_ON_NOTRACE b/anolis/configs/L1-RECOMMEND/default/CONFIG_KPROBE_EVENTS_ON_NOTRACE new file mode 100644 index 000000000000..d36844fdb504 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KPROBE_EVENTS_ON_NOTRACE @@ -0,0 +1 @@ +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_KSM b/anolis/configs/L1-RECOMMEND/default/CONFIG_KSM new file mode 100644 index 000000000000..757efcb905c2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_KSM @@ -0,0 +1 @@ +CONFIG_KSM=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LDISC_AUTOLOAD b/anolis/configs/L1-RECOMMEND/default/CONFIG_LDISC_AUTOLOAD new file mode 100644 index 000000000000..4f92dceabf3a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LDISC_AUTOLOAD @@ -0,0 +1 @@ +CONFIG_LDISC_AUTOLOAD=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LIBFC b/anolis/configs/L1-RECOMMEND/default/CONFIG_LIBFC new file mode 100644 index 000000000000..af692f8d7b56 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LIBFC @@ -0,0 +1 @@ +CONFIG_LIBFC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LIBFCOE b/anolis/configs/L1-RECOMMEND/default/CONFIG_LIBFCOE new file mode 100644 index 000000000000..8c693951bbad --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LIBFCOE @@ -0,0 +1 @@ +CONFIG_LIBFCOE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LIST_HARDENED b/anolis/configs/L1-RECOMMEND/default/CONFIG_LIST_HARDENED new file mode 100644 index 000000000000..99cee0814645 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LIST_HARDENED @@ -0,0 +1 @@ +CONFIG_LIST_HARDENED=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LOAD_UEFI_KEYS b/anolis/configs/L1-RECOMMEND/default/CONFIG_LOAD_UEFI_KEYS new file mode 100644 index 000000000000..22502e981748 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LOAD_UEFI_KEYS @@ -0,0 +1 @@ +CONFIG_LOAD_UEFI_KEYS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LOCALVERSION b/anolis/configs/L1-RECOMMEND/default/CONFIG_LOCALVERSION new file mode 100644 index 000000000000..22833e328af2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LOCALVERSION @@ -0,0 +1 @@ +CONFIG_LOCALVERSION="" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LOCALVERSION_AUTO b/anolis/configs/L1-RECOMMEND/default/CONFIG_LOCALVERSION_AUTO new file mode 100644 index 000000000000..3addafb9eabe --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LOCALVERSION_AUTO @@ -0,0 +1 @@ +# CONFIG_LOCALVERSION_AUTO is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LOCKDEP_SUPPORT b/anolis/configs/L1-RECOMMEND/default/CONFIG_LOCKDEP_SUPPORT new file mode 100644 index 000000000000..b2f2705c18ca --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LOCKDEP_SUPPORT @@ -0,0 +1 @@ +CONFIG_LOCKDEP_SUPPORT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LOCK_EVENT_COUNTS b/anolis/configs/L1-RECOMMEND/default/CONFIG_LOCK_EVENT_COUNTS new file mode 100644 index 000000000000..8fb759417d49 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LOCK_EVENT_COUNTS @@ -0,0 +1 @@ +# CONFIG_LOCK_EVENT_COUNTS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LOG_CPU_MAX_BUF_SHIFT b/anolis/configs/L1-RECOMMEND/default/CONFIG_LOG_CPU_MAX_BUF_SHIFT new file mode 100644 index 000000000000..a3c44ae926ec --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LOG_CPU_MAX_BUF_SHIFT @@ -0,0 +1 @@ +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LOOPBACK_TARGET b/anolis/configs/L1-RECOMMEND/default/CONFIG_LOOPBACK_TARGET new file mode 100644 index 000000000000..8d0415cbf0db --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LOOPBACK_TARGET @@ -0,0 +1 @@ +CONFIG_LOOPBACK_TARGET=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LRU_GEN_ENABLED b/anolis/configs/L1-RECOMMEND/default/CONFIG_LRU_GEN_ENABLED new file mode 100644 index 000000000000..d32c5afd6e39 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LRU_GEN_ENABLED @@ -0,0 +1 @@ +# CONFIG_LRU_GEN_ENABLED is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LRU_GEN_STATS b/anolis/configs/L1-RECOMMEND/default/CONFIG_LRU_GEN_STATS new file mode 100644 index 000000000000..44b5a476d4cb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LRU_GEN_STATS @@ -0,0 +1 @@ +# CONFIG_LRU_GEN_STATS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LSM b/anolis/configs/L1-RECOMMEND/default/CONFIG_LSM new file mode 100644 index 000000000000..e9af105b6c24 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LSM @@ -0,0 +1 @@ +CONFIG_LSM="integrity,selinux,smack,tomoyo,apparmor" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LSM_MMAP_MIN_ADDR b/anolis/configs/L1-RECOMMEND/default/CONFIG_LSM_MMAP_MIN_ADDR new file mode 100644 index 000000000000..8a24c1f03fe6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LSM_MMAP_MIN_ADDR @@ -0,0 +1 @@ +CONFIG_LSM_MMAP_MIN_ADDR=65535 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LTO_NONE b/anolis/configs/L1-RECOMMEND/default/CONFIG_LTO_NONE new file mode 100644 index 000000000000..8e9a8bbdac2c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LTO_NONE @@ -0,0 +1 @@ +CONFIG_LTO_NONE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LWTUNNEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_LWTUNNEL new file mode 100644 index 000000000000..1bd2e1c28eab --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LWTUNNEL @@ -0,0 +1 @@ +CONFIG_LWTUNNEL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_LWTUNNEL_BPF b/anolis/configs/L1-RECOMMEND/default/CONFIG_LWTUNNEL_BPF new file mode 100644 index 000000000000..ecd9c79a2f9f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_LWTUNNEL_BPF @@ -0,0 +1 @@ +CONFIG_LWTUNNEL_BPF=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MACSEC b/anolis/configs/L1-RECOMMEND/default/CONFIG_MACSEC new file mode 100644 index 000000000000..678c117d07fe --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MACSEC @@ -0,0 +1 @@ +CONFIG_MACSEC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MACVLAN b/anolis/configs/L1-RECOMMEND/default/CONFIG_MACVLAN new file mode 100644 index 000000000000..76ead66855f9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MACVLAN @@ -0,0 +1 @@ +CONFIG_MACVLAN=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MACVTAP b/anolis/configs/L1-RECOMMEND/default/CONFIG_MACVTAP new file mode 100644 index 000000000000..ee7d8996d470 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MACVTAP @@ -0,0 +1 @@ +CONFIG_MACVTAP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE new file mode 100644 index 000000000000..6d80e2cddcae --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE @@ -0,0 +1 @@ +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MAGIC_SYSRQ_SERIAL b/anolis/configs/L1-RECOMMEND/default/CONFIG_MAGIC_SYSRQ_SERIAL new file mode 100644 index 000000000000..649399011f5f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MAGIC_SYSRQ_SERIAL @@ -0,0 +1 @@ +CONFIG_MAGIC_SYSRQ_SERIAL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE b/anolis/configs/L1-RECOMMEND/default/CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE new file mode 100644 index 000000000000..3b2b7e8deee7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE @@ -0,0 +1 @@ +CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MAX_SKB_FRAGS b/anolis/configs/L1-RECOMMEND/default/CONFIG_MAX_SKB_FRAGS new file mode 100644 index 000000000000..10682c4542d5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MAX_SKB_FRAGS @@ -0,0 +1 @@ +CONFIG_MAX_SKB_FRAGS=17 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_AUTODETECT b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_AUTODETECT new file mode 100644 index 000000000000..29191f7e1ed2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_AUTODETECT @@ -0,0 +1 @@ +CONFIG_MD_AUTODETECT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_CLUSTER b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_CLUSTER new file mode 100644 index 000000000000..1750ff8ca837 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_CLUSTER @@ -0,0 +1 @@ +CONFIG_MD_CLUSTER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_FAULTY b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_FAULTY new file mode 100644 index 000000000000..d332a7499c80 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_FAULTY @@ -0,0 +1 @@ +CONFIG_MD_FAULTY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_LINEAR b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_LINEAR new file mode 100644 index 000000000000..7ea8e58fb564 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_LINEAR @@ -0,0 +1 @@ +CONFIG_MD_LINEAR=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_MULTIPATH b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_MULTIPATH new file mode 100644 index 000000000000..c95cb9cb3e6f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_MULTIPATH @@ -0,0 +1 @@ +# CONFIG_MD_MULTIPATH is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_RAID0 b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_RAID0 new file mode 100644 index 000000000000..bc197d6db237 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_RAID0 @@ -0,0 +1 @@ +CONFIG_MD_RAID0=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_RAID1 b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_RAID1 new file mode 100644 index 000000000000..b54611532c51 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_RAID1 @@ -0,0 +1 @@ +CONFIG_MD_RAID1=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_RAID10 b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_RAID10 new file mode 100644 index 000000000000..fbfc466709ac --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_RAID10 @@ -0,0 +1 @@ +CONFIG_MD_RAID10=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_RAID456 b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_RAID456 new file mode 100644 index 000000000000..f5f9b93e1c86 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MD_RAID456 @@ -0,0 +1 @@ +CONFIG_MD_RAID456=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MEGARAID_LEGACY b/anolis/configs/L1-RECOMMEND/default/CONFIG_MEGARAID_LEGACY new file mode 100644 index 000000000000..3109de7c1fe0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MEGARAID_LEGACY @@ -0,0 +1 @@ +# CONFIG_MEGARAID_LEGACY is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MEGARAID_NEWGEN b/anolis/configs/L1-RECOMMEND/default/CONFIG_MEGARAID_NEWGEN new file mode 100644 index 000000000000..a132e5f39ca8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MEGARAID_NEWGEN @@ -0,0 +1 @@ +# CONFIG_MEGARAID_NEWGEN is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MEGARAID_SAS b/anolis/configs/L1-RECOMMEND/default/CONFIG_MEGARAID_SAS new file mode 100644 index 000000000000..c32b82a357a5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MEGARAID_SAS @@ -0,0 +1 @@ +CONFIG_MEGARAID_SAS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MEMORY b/anolis/configs/L1-RECOMMEND/default/CONFIG_MEMORY new file mode 100644 index 000000000000..0e3aceb9597a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MEMORY @@ -0,0 +1 @@ +# CONFIG_MEMORY is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MEMORY_BALLOON b/anolis/configs/L1-RECOMMEND/default/CONFIG_MEMORY_BALLOON new file mode 100644 index 000000000000..d3b2adc6b971 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MEMORY_BALLOON @@ -0,0 +1 @@ +CONFIG_MEMORY_BALLOON=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MEMORY_HOTREMOVE b/anolis/configs/L1-RECOMMEND/default/CONFIG_MEMORY_HOTREMOVE new file mode 100644 index 000000000000..362150e6923c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MEMORY_HOTREMOVE @@ -0,0 +1 @@ +CONFIG_MEMORY_HOTREMOVE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MESSAGE_LOGLEVEL_DEFAULT b/anolis/configs/L1-RECOMMEND/default/CONFIG_MESSAGE_LOGLEVEL_DEFAULT new file mode 100644 index 000000000000..ca39b7f7a35d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MESSAGE_LOGLEVEL_DEFAULT @@ -0,0 +1 @@ +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX4_CORE b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX4_CORE new file mode 100644 index 000000000000..1b4c55bdd30c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX4_CORE @@ -0,0 +1 @@ +CONFIG_MLX4_CORE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX4_CORE_GEN2 b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX4_CORE_GEN2 new file mode 100644 index 000000000000..e3653e88b67b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX4_CORE_GEN2 @@ -0,0 +1 @@ +# CONFIG_MLX4_CORE_GEN2 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX4_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX4_DEBUG new file mode 100644 index 000000000000..8c1ea389b39d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX4_DEBUG @@ -0,0 +1 @@ +CONFIG_MLX4_DEBUG=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX4_EN_DCB b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX4_EN_DCB new file mode 100644 index 000000000000..33382c1ac42a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX4_EN_DCB @@ -0,0 +1 @@ +CONFIG_MLX4_EN_DCB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_BRIDGE b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_BRIDGE new file mode 100644 index 000000000000..4e7033e48f95 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_BRIDGE @@ -0,0 +1 @@ +CONFIG_MLX5_BRIDGE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_CLS_ACT b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_CLS_ACT new file mode 100644 index 000000000000..2ff38eef7747 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_CLS_ACT @@ -0,0 +1 @@ +CONFIG_MLX5_CLS_ACT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_CORE_EN_DCB b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_CORE_EN_DCB new file mode 100644 index 000000000000..6607070b8d09 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_CORE_EN_DCB @@ -0,0 +1 @@ +CONFIG_MLX5_CORE_EN_DCB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_EN_ARFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_EN_ARFS new file mode 100644 index 000000000000..0a220b0e7cc0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_EN_ARFS @@ -0,0 +1 @@ +CONFIG_MLX5_EN_ARFS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_EN_RXNFC b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_EN_RXNFC new file mode 100644 index 000000000000..0a64be4dbcf0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_EN_RXNFC @@ -0,0 +1 @@ +CONFIG_MLX5_EN_RXNFC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_ESWITCH b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_ESWITCH new file mode 100644 index 000000000000..8a69e0671e37 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_ESWITCH @@ -0,0 +1 @@ +CONFIG_MLX5_ESWITCH=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_FPGA b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_FPGA new file mode 100644 index 000000000000..bac6b305da11 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_FPGA @@ -0,0 +1 @@ +CONFIG_MLX5_FPGA=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_INFINIBAND b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_INFINIBAND new file mode 100644 index 000000000000..a81a552d1fb3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_INFINIBAND @@ -0,0 +1 @@ +CONFIG_MLX5_INFINIBAND=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_MPFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_MPFS new file mode 100644 index 000000000000..6799ed484a57 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_MPFS @@ -0,0 +1 @@ +CONFIG_MLX5_MPFS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_SF b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_SF new file mode 100644 index 000000000000..3375a7223f73 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_SF @@ -0,0 +1 @@ +# CONFIG_MLX5_SF is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_SW_STEERING b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_SW_STEERING new file mode 100644 index 000000000000..28a9bc46aae5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_SW_STEERING @@ -0,0 +1 @@ +CONFIG_MLX5_SW_STEERING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_TC_CT b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_TC_CT new file mode 100644 index 000000000000..0651a412ecd7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_TC_CT @@ -0,0 +1 @@ +CONFIG_MLX5_TC_CT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_TC_SAMPLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_TC_SAMPLE new file mode 100644 index 000000000000..777a6403729c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLX5_TC_SAMPLE @@ -0,0 +1 @@ +CONFIG_MLX5_TC_SAMPLE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXFW b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXFW new file mode 100644 index 000000000000..5b475180673b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXFW @@ -0,0 +1 @@ +CONFIG_MLXFW=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_CORE_HWMON b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_CORE_HWMON new file mode 100644 index 000000000000..a0a37fc5281f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_CORE_HWMON @@ -0,0 +1 @@ +CONFIG_MLXSW_CORE_HWMON=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_CORE_THERMAL b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_CORE_THERMAL new file mode 100644 index 000000000000..b3b3b2fc5049 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_CORE_THERMAL @@ -0,0 +1 @@ +CONFIG_MLXSW_CORE_THERMAL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_I2C b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_I2C new file mode 100644 index 000000000000..57927c23e4f2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_I2C @@ -0,0 +1 @@ +CONFIG_MLXSW_I2C=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_MINIMAL b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_MINIMAL new file mode 100644 index 000000000000..5cc296afcafb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_MINIMAL @@ -0,0 +1 @@ +CONFIG_MLXSW_MINIMAL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_PCI b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_PCI new file mode 100644 index 000000000000..f1cbb9173b78 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_PCI @@ -0,0 +1 @@ +CONFIG_MLXSW_PCI=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_SPECTRUM b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_SPECTRUM new file mode 100644 index 000000000000..0bdac0c19707 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_SPECTRUM @@ -0,0 +1 @@ +CONFIG_MLXSW_SPECTRUM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_SPECTRUM_DCB b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_SPECTRUM_DCB new file mode 100644 index 000000000000..3c8c38d0bc0e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MLXSW_SPECTRUM_DCB @@ -0,0 +1 @@ +CONFIG_MLXSW_SPECTRUM_DCB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS new file mode 100644 index 000000000000..35c9fe219b3a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS @@ -0,0 +1 @@ +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_FORCE_LOAD b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_FORCE_LOAD new file mode 100644 index 000000000000..8583b86bd57f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_FORCE_LOAD @@ -0,0 +1 @@ +CONFIG_MODULE_FORCE_LOAD=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_FORCE_UNLOAD b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_FORCE_UNLOAD new file mode 100644 index 000000000000..757f81d34bcd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_FORCE_UNLOAD @@ -0,0 +1 @@ +# CONFIG_MODULE_FORCE_UNLOAD is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_ALL b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_ALL new file mode 100644 index 000000000000..4a502b6f02df --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_ALL @@ -0,0 +1 @@ +# CONFIG_MODULE_SIG_ALL is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_FORCE b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_FORCE new file mode 100644 index 000000000000..80b1d0c4b436 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_FORCE @@ -0,0 +1 @@ +# CONFIG_MODULE_SIG_FORCE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_HASH b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_HASH new file mode 100644 index 000000000000..04ae06b2dafb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_HASH @@ -0,0 +1 @@ +CONFIG_MODULE_SIG_HASH="sha256" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_KEY b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_KEY new file mode 100644 index 000000000000..80339e3427c3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_KEY @@ -0,0 +1 @@ +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA1 b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA1 new file mode 100644 index 000000000000..d96584a87e50 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA1 @@ -0,0 +1 @@ +# CONFIG_MODULE_SIG_SHA1 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA224 b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA224 new file mode 100644 index 000000000000..d49245d2291b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA224 @@ -0,0 +1 @@ +# CONFIG_MODULE_SIG_SHA224 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA256 b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA256 new file mode 100644 index 000000000000..b350aa05ab58 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA256 @@ -0,0 +1 @@ +CONFIG_MODULE_SIG_SHA256=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA384 b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA384 new file mode 100644 index 000000000000..ac52049ea819 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA384 @@ -0,0 +1 @@ +# CONFIG_MODULE_SIG_SHA384 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA512 b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA512 new file mode 100644 index 000000000000..2910d833029c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MODULE_SIG_SHA512 @@ -0,0 +1 @@ +# CONFIG_MODULE_SIG_SHA512 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MPLS b/anolis/configs/L1-RECOMMEND/default/CONFIG_MPLS new file mode 100644 index 000000000000..a709ae7d93f7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MPLS @@ -0,0 +1 @@ +CONFIG_MPLS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MPLS_IPTUNNEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_MPLS_IPTUNNEL new file mode 100644 index 000000000000..229af61b854f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MPLS_IPTUNNEL @@ -0,0 +1 @@ +CONFIG_MPLS_IPTUNNEL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MPLS_ROUTING b/anolis/configs/L1-RECOMMEND/default/CONFIG_MPLS_ROUTING new file mode 100644 index 000000000000..50248380eb7b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MPLS_ROUTING @@ -0,0 +1 @@ +CONFIG_MPLS_ROUTING=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MPTCP_IPV6 b/anolis/configs/L1-RECOMMEND/default/CONFIG_MPTCP_IPV6 new file mode 100644 index 000000000000..d0780145de59 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MPTCP_IPV6 @@ -0,0 +1 @@ +CONFIG_MPTCP_IPV6=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MSDOS_FS b/anolis/configs/L1-RECOMMEND/default/CONFIG_MSDOS_FS new file mode 100644 index 000000000000..841afeb52f73 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MSDOS_FS @@ -0,0 +1 @@ +CONFIG_MSDOS_FS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETCONSOLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETCONSOLE new file mode 100644 index 000000000000..37ec75b06d39 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETCONSOLE @@ -0,0 +1 @@ +CONFIG_NETCONSOLE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETCONSOLE_DYNAMIC b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETCONSOLE_DYNAMIC new file mode 100644 index 000000000000..764af964b571 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETCONSOLE_DYNAMIC @@ -0,0 +1 @@ +CONFIG_NETCONSOLE_DYNAMIC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETDEVSIM b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETDEVSIM new file mode 100644 index 000000000000..96004592ade4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETDEVSIM @@ -0,0 +1 @@ +CONFIG_NETDEVSIM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_CONNCOUNT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_CONNCOUNT new file mode 100644 index 000000000000..354fd1be45bb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_CONNCOUNT @@ -0,0 +1 @@ +CONFIG_NETFILTER_CONNCOUNT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_EGRESS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_EGRESS new file mode 100644 index 000000000000..df0daaae81dc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_EGRESS @@ -0,0 +1 @@ +CONFIG_NETFILTER_EGRESS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_FAMILY_ARP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_FAMILY_ARP new file mode 100644 index 000000000000..f778280ee529 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_FAMILY_ARP @@ -0,0 +1 @@ +CONFIG_NETFILTER_FAMILY_ARP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_FAMILY_BRIDGE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_FAMILY_BRIDGE new file mode 100644 index 000000000000..3767186695e4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_FAMILY_BRIDGE @@ -0,0 +1 @@ +CONFIG_NETFILTER_FAMILY_BRIDGE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK new file mode 100644 index 000000000000..cb5690897c81 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK @@ -0,0 +1 @@ +CONFIG_NETFILTER_NETLINK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_ACCT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_ACCT new file mode 100644 index 000000000000..2ad7ea565da2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_ACCT @@ -0,0 +1 @@ +CONFIG_NETFILTER_NETLINK_ACCT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_GLUE_CT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_GLUE_CT new file mode 100644 index 000000000000..e413357f9eb8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_GLUE_CT @@ -0,0 +1 @@ +CONFIG_NETFILTER_NETLINK_GLUE_CT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_LOG b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_LOG new file mode 100644 index 000000000000..3f8d3d07ccad --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_LOG @@ -0,0 +1 @@ +CONFIG_NETFILTER_NETLINK_LOG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_OSF b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_OSF new file mode 100644 index 000000000000..aa7e3042f0af --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_OSF @@ -0,0 +1 @@ +CONFIG_NETFILTER_NETLINK_OSF=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_QUEUE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_QUEUE new file mode 100644 index 000000000000..c31c5b848f02 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_NETLINK_QUEUE @@ -0,0 +1 @@ +CONFIG_NETFILTER_NETLINK_QUEUE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_SYNPROXY b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_SYNPROXY new file mode 100644 index 000000000000..6bf87167fa6a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_SYNPROXY @@ -0,0 +1 @@ +CONFIG_NETFILTER_SYNPROXY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XTABLES b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XTABLES new file mode 100644 index 000000000000..a97d971dcb5d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XTABLES @@ -0,0 +1 @@ +CONFIG_NETFILTER_XTABLES=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_CONNMARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_CONNMARK new file mode 100644 index 000000000000..5a7391f6b92d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_CONNMARK @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_CONNMARK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MARK new file mode 100644 index 000000000000..a65aee77e712 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MARK @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MARK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_ADDRTYPE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_ADDRTYPE new file mode 100644 index 000000000000..d0ebb666a723 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_ADDRTYPE @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_BPF b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_BPF new file mode 100644 index 000000000000..dd51452a876a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_BPF @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_BPF=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CGROUP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CGROUP new file mode 100644 index 000000000000..d8a9d8646820 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CGROUP @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_CGROUP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CLUSTER b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CLUSTER new file mode 100644 index 000000000000..020fa47f483d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CLUSTER @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_COMMENT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_COMMENT new file mode 100644 index 000000000000..a3c59620f18c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_COMMENT @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_COMMENT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNBYTES b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNBYTES new file mode 100644 index 000000000000..9b3a056b7b1f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNBYTES @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNLABEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNLABEL new file mode 100644 index 000000000000..29f343313112 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNLABEL @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNLIMIT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNLIMIT new file mode 100644 index 000000000000..ee136fa468f0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNLIMIT @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNMARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNMARK new file mode 100644 index 000000000000..8ad13363923b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNMARK @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNTRACK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNTRACK new file mode 100644 index 000000000000..45db821ffa97 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CONNTRACK @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CPU b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CPU new file mode 100644 index 000000000000..556ebfac9a43 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_CPU @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_CPU=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_DCCP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_DCCP new file mode 100644 index 000000000000..8b4224d8ce43 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_DCCP @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_DCCP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_DEVGROUP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_DEVGROUP new file mode 100644 index 000000000000..2f50c144f73c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_DEVGROUP @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_DSCP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_DSCP new file mode 100644 index 000000000000..d4f55cb4fe15 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_DSCP @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_DSCP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_ECN b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_ECN new file mode 100644 index 000000000000..3ac23e5f8573 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_ECN @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_ECN=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_ESP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_ESP new file mode 100644 index 000000000000..16ce1ab2e5e3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_ESP @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_ESP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_HASHLIMIT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_HASHLIMIT new file mode 100644 index 000000000000..3112cfca2dc7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_HASHLIMIT @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_HELPER b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_HELPER new file mode 100644 index 000000000000..1c79d125ae2e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_HELPER @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_HELPER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_HL b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_HL new file mode 100644 index 000000000000..fe60ffc566b4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_HL @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_HL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_IPCOMP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_IPCOMP new file mode 100644 index 000000000000..9e114c643133 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_IPCOMP @@ -0,0 +1 @@ +# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_IPRANGE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_IPRANGE new file mode 100644 index 000000000000..42bf14df38f4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_IPRANGE @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_IPVS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_IPVS new file mode 100644 index 000000000000..db117714a5ce --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_IPVS @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_IPVS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_L2TP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_L2TP new file mode 100644 index 000000000000..b1115825322e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_L2TP @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_L2TP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_LENGTH b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_LENGTH new file mode 100644 index 000000000000..22f3920af106 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_LENGTH @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_LENGTH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_LIMIT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_LIMIT new file mode 100644 index 000000000000..69d2d65dcb7c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_LIMIT @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_LIMIT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_MAC b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_MAC new file mode 100644 index 000000000000..2175aa582900 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_MAC @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_MAC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_MARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_MARK new file mode 100644 index 000000000000..6c20543fba3b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_MARK @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_MARK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_MULTIPORT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_MULTIPORT new file mode 100644 index 000000000000..c3d9c38a6e14 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_MULTIPORT @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_NFACCT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_NFACCT new file mode 100644 index 000000000000..81ec63eb68a0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_NFACCT @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_NFACCT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_OSF b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_OSF new file mode 100644 index 000000000000..fc51bc9866ea --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_OSF @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_OSF=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_OWNER b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_OWNER new file mode 100644 index 000000000000..34be704c15fe --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_OWNER @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_OWNER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_PHYSDEV b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_PHYSDEV new file mode 100644 index 000000000000..756371f03a64 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_PHYSDEV @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_PKTTYPE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_PKTTYPE new file mode 100644 index 000000000000..d1412b2ac249 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_PKTTYPE @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_POLICY b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_POLICY new file mode 100644 index 000000000000..05e378a49f70 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_POLICY @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_POLICY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_QUOTA b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_QUOTA new file mode 100644 index 000000000000..5977d4d970c6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_QUOTA @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_QUOTA=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_RATEEST b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_RATEEST new file mode 100644 index 000000000000..a7bec3652661 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_RATEEST @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_RATEEST=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_REALM b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_REALM new file mode 100644 index 000000000000..e7ce1f51ac37 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_REALM @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_REALM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_RECENT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_RECENT new file mode 100644 index 000000000000..0f092d603487 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_RECENT @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_RECENT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_SCTP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_SCTP new file mode 100644 index 000000000000..61a2485946b9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_SCTP @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_SCTP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_SOCKET b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_SOCKET new file mode 100644 index 000000000000..ec4886c06a6e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_SOCKET @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_SOCKET=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_STATE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_STATE new file mode 100644 index 000000000000..87f0e3fe7499 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_STATE @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_STATE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_STATISTIC b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_STATISTIC new file mode 100644 index 000000000000..79f06effbf8f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_STATISTIC @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_STRING b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_STRING new file mode 100644 index 000000000000..b7bf31333ced --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_STRING @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_STRING=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_TCPMSS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_TCPMSS new file mode 100644 index 000000000000..abc5cdb59604 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_TCPMSS @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_TIME b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_TIME new file mode 100644 index 000000000000..d4eb9fcb212c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_TIME @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_TIME=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_U32 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_U32 new file mode 100644 index 000000000000..3033733e5a1c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_U32 @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_MATCH_U32=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_NAT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_NAT new file mode 100644 index 000000000000..797733334413 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_NAT @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_NAT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_SET b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_SET new file mode 100644 index 000000000000..422aad468050 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_SET @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_SET=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_AUDIT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_AUDIT new file mode 100644 index 000000000000..9b8ef0d6d697 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_AUDIT @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_AUDIT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CHECKSUM b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CHECKSUM new file mode 100644 index 000000000000..58afbff6d9ca --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CHECKSUM @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CLASSIFY b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CLASSIFY new file mode 100644 index 000000000000..595deedceaa9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CLASSIFY @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CONNMARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CONNMARK new file mode 100644 index 000000000000..b64202c157f3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CONNMARK @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CONNSECMARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CONNSECMARK new file mode 100644 index 000000000000..6cd3b71b7abe --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CONNSECMARK @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CT new file mode 100644 index 000000000000..e777799b0677 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_CT @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_CT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_DSCP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_DSCP new file mode 100644 index 000000000000..2b7b2177260e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_DSCP @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_DSCP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_HL b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_HL new file mode 100644 index 000000000000..72cf5b3e5cd7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_HL @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_HL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_HMARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_HMARK new file mode 100644 index 000000000000..e75e82b04a35 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_HMARK @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_HMARK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_IDLETIMER b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_IDLETIMER new file mode 100644 index 000000000000..ecb2856c0e31 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_IDLETIMER @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_LED b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_LED new file mode 100644 index 000000000000..819ec668cc90 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_LED @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_LED=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_LOG b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_LOG new file mode 100644 index 000000000000..a4e073d6f30d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_LOG @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_LOG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_MARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_MARK new file mode 100644 index 000000000000..077ea5295481 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_MARK @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_MARK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_MASQUERADE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_MASQUERADE new file mode 100644 index 000000000000..37960b6b1f45 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_MASQUERADE @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_NETMAP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_NETMAP new file mode 100644 index 000000000000..ca635aa2a1c2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_NETMAP @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_NETMAP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_NFLOG b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_NFLOG new file mode 100644 index 000000000000..c4c331a568fd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_NFLOG @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_NFLOG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_NFQUEUE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_NFQUEUE new file mode 100644 index 000000000000..9885488c8cf0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_NFQUEUE @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_NOTRACK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_NOTRACK new file mode 100644 index 000000000000..2c0abc17f2e3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_NOTRACK @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_RATEEST b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_RATEEST new file mode 100644 index 000000000000..140be4a026f6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_RATEEST @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_RATEEST=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_REDIRECT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_REDIRECT new file mode 100644 index 000000000000..e31cca97a784 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_REDIRECT @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_SECMARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_SECMARK new file mode 100644 index 000000000000..4d6dd7ff4774 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_SECMARK @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_SECMARK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TCPMSS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TCPMSS new file mode 100644 index 000000000000..fa3ac174dfd3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TCPMSS @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP new file mode 100644 index 000000000000..72c208d325d1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TEE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TEE new file mode 100644 index 000000000000..1ed57ae5143d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TEE @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_TEE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TPROXY b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TPROXY new file mode 100644 index 000000000000..89f3b7d2b0da --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TPROXY @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_TPROXY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TRACE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TRACE new file mode 100644 index 000000000000..dc5f43fb316b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_TARGET_TRACE @@ -0,0 +1 @@ +CONFIG_NETFILTER_XT_TARGET_TRACE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFS_STATS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFS_STATS new file mode 100644 index 000000000000..253ef3a36d06 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFS_STATS @@ -0,0 +1 @@ +CONFIG_NETFS_STATS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFS_SUPPORT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFS_SUPPORT new file mode 100644 index 000000000000..0a7f76b994b0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFS_SUPPORT @@ -0,0 +1 @@ +CONFIG_NETFS_SUPPORT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETLABEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETLABEL new file mode 100644 index 000000000000..b6e2bb09024d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETLABEL @@ -0,0 +1 @@ +CONFIG_NETLABEL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETWORK_PHY_TIMESTAMPING b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETWORK_PHY_TIMESTAMPING new file mode 100644 index 000000000000..3ef3fad28bb6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETWORK_PHY_TIMESTAMPING @@ -0,0 +1 @@ +CONFIG_NETWORK_PHY_TIMESTAMPING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETWORK_SECMARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETWORK_SECMARK new file mode 100644 index 000000000000..8218703c0bd7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETWORK_SECMARK @@ -0,0 +1 @@ +CONFIG_NETWORK_SECMARK=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_BPF b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_BPF new file mode 100644 index 000000000000..b66cfa276d66 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_BPF @@ -0,0 +1 @@ +CONFIG_NET_ACT_BPF=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_CONNMARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_CONNMARK new file mode 100644 index 000000000000..eb651d1910aa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_CONNMARK @@ -0,0 +1 @@ +# CONFIG_NET_ACT_CONNMARK is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_CSUM b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_CSUM new file mode 100644 index 000000000000..4e9409406070 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_CSUM @@ -0,0 +1 @@ +CONFIG_NET_ACT_CSUM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_CT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_CT new file mode 100644 index 000000000000..93f4c486eb29 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_CT @@ -0,0 +1 @@ +CONFIG_NET_ACT_CT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_CTINFO b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_CTINFO new file mode 100644 index 000000000000..0becc0cc7fe4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_CTINFO @@ -0,0 +1 @@ +# CONFIG_NET_ACT_CTINFO is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_GATE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_GATE new file mode 100644 index 000000000000..39e73e5da996 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_GATE @@ -0,0 +1 @@ +# CONFIG_NET_ACT_GATE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_IFE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_IFE new file mode 100644 index 000000000000..a466bca7be09 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_IFE @@ -0,0 +1 @@ +# CONFIG_NET_ACT_IFE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_IPT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_IPT new file mode 100644 index 000000000000..3b03e8356ba4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_IPT @@ -0,0 +1 @@ +CONFIG_NET_ACT_IPT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_MIRRED b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_MIRRED new file mode 100644 index 000000000000..e16b33a223d7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_MIRRED @@ -0,0 +1 @@ +CONFIG_NET_ACT_MIRRED=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_MPLS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_MPLS new file mode 100644 index 000000000000..2f486dbf828f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_MPLS @@ -0,0 +1 @@ +# CONFIG_NET_ACT_MPLS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_NAT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_NAT new file mode 100644 index 000000000000..e6812fbdc9ae --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_NAT @@ -0,0 +1 @@ +CONFIG_NET_ACT_NAT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_PEDIT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_PEDIT new file mode 100644 index 000000000000..8ac1123aaa30 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_PEDIT @@ -0,0 +1 @@ +CONFIG_NET_ACT_PEDIT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_SAMPLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_SAMPLE new file mode 100644 index 000000000000..4582be19d0a8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_SAMPLE @@ -0,0 +1 @@ +CONFIG_NET_ACT_SAMPLE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_SIMP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_SIMP new file mode 100644 index 000000000000..7203e8d682d1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_SIMP @@ -0,0 +1 @@ +CONFIG_NET_ACT_SIMP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_SKBEDIT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_SKBEDIT new file mode 100644 index 000000000000..f3da96aa3dfa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_SKBEDIT @@ -0,0 +1 @@ +CONFIG_NET_ACT_SKBEDIT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_SKBMOD b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_SKBMOD new file mode 100644 index 000000000000..90fa7304404b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_SKBMOD @@ -0,0 +1 @@ +CONFIG_NET_ACT_SKBMOD=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_TUNNEL_KEY b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_TUNNEL_KEY new file mode 100644 index 000000000000..a030419554a9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_TUNNEL_KEY @@ -0,0 +1 @@ +CONFIG_NET_ACT_TUNNEL_KEY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_VLAN b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_VLAN new file mode 100644 index 000000000000..b47e5e427b0d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_ACT_VLAN @@ -0,0 +1 @@ +CONFIG_NET_ACT_VLAN=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_BASIC b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_BASIC new file mode 100644 index 000000000000..9f9628713237 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_BASIC @@ -0,0 +1 @@ +CONFIG_NET_CLS_BASIC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_BPF b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_BPF new file mode 100644 index 000000000000..5645a27b56e7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_BPF @@ -0,0 +1 @@ +CONFIG_NET_CLS_BPF=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_CGROUP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_CGROUP new file mode 100644 index 000000000000..43802195c9df --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_CGROUP @@ -0,0 +1 @@ +CONFIG_NET_CLS_CGROUP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_FLOW b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_FLOW new file mode 100644 index 000000000000..0f3fee0898a6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_FLOW @@ -0,0 +1 @@ +CONFIG_NET_CLS_FLOW=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_FLOWER b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_FLOWER new file mode 100644 index 000000000000..d26b4ef821e4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_FLOWER @@ -0,0 +1 @@ +CONFIG_NET_CLS_FLOWER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_FW b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_FW new file mode 100644 index 000000000000..fb88679bbe3f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_FW @@ -0,0 +1 @@ +CONFIG_NET_CLS_FW=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_MATCHALL b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_MATCHALL new file mode 100644 index 000000000000..a365ce870302 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_MATCHALL @@ -0,0 +1 @@ +CONFIG_NET_CLS_MATCHALL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_ROUTE4 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_ROUTE4 new file mode 100644 index 000000000000..1260f8c9a136 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_ROUTE4 @@ -0,0 +1 @@ +CONFIG_NET_CLS_ROUTE4=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_U32 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_U32 new file mode 100644 index 000000000000..ebbda24969ad --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_CLS_U32 @@ -0,0 +1 @@ +CONFIG_NET_CLS_U32=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_DROP_MONITOR b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_DROP_MONITOR new file mode 100644 index 000000000000..9c5a23a5f169 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_DROP_MONITOR @@ -0,0 +1 @@ +CONFIG_NET_DROP_MONITOR=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH new file mode 100644 index 000000000000..f6171a72f06e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH @@ -0,0 +1 @@ +CONFIG_NET_EMATCH=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_CMP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_CMP new file mode 100644 index 000000000000..086dbaf24fec --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_CMP @@ -0,0 +1 @@ +CONFIG_NET_EMATCH_CMP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_IPSET b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_IPSET new file mode 100644 index 000000000000..2b0a70570ace --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_IPSET @@ -0,0 +1 @@ +CONFIG_NET_EMATCH_IPSET=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_IPT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_IPT new file mode 100644 index 000000000000..194717561a66 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_IPT @@ -0,0 +1 @@ +# CONFIG_NET_EMATCH_IPT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_META b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_META new file mode 100644 index 000000000000..0b395db767cd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_META @@ -0,0 +1 @@ +CONFIG_NET_EMATCH_META=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_NBYTE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_NBYTE new file mode 100644 index 000000000000..4567128fc90e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_NBYTE @@ -0,0 +1 @@ +CONFIG_NET_EMATCH_NBYTE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_STACK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_STACK new file mode 100644 index 000000000000..e37337192580 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_STACK @@ -0,0 +1 @@ +CONFIG_NET_EMATCH_STACK=32 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_TEXT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_TEXT new file mode 100644 index 000000000000..6fd4727362f0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_TEXT @@ -0,0 +1 @@ +CONFIG_NET_EMATCH_TEXT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_U32 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_U32 new file mode 100644 index 000000000000..4bdebd3eea34 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_EMATCH_U32 @@ -0,0 +1 @@ +CONFIG_NET_EMATCH_U32=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_FC b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_FC new file mode 100644 index 000000000000..e232d912f7fe --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_FC @@ -0,0 +1 @@ +CONFIG_NET_FC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_IPGRE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_IPGRE new file mode 100644 index 000000000000..ad527835c120 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_IPGRE @@ -0,0 +1 @@ +CONFIG_NET_IPGRE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_IPGRE_BROADCAST b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_IPGRE_BROADCAST new file mode 100644 index 000000000000..dfb259c45d6b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_IPGRE_BROADCAST @@ -0,0 +1 @@ +CONFIG_NET_IPGRE_BROADCAST=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_IPGRE_DEMUX b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_IPGRE_DEMUX new file mode 100644 index 000000000000..787db526330e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_IPGRE_DEMUX @@ -0,0 +1 @@ +CONFIG_NET_IPGRE_DEMUX=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_IPIP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_IPIP new file mode 100644 index 000000000000..3f1247bfc9d4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_IPIP @@ -0,0 +1 @@ +CONFIG_NET_IPIP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_KEY_MIGRATE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_KEY_MIGRATE new file mode 100644 index 000000000000..2168c1a29a9d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_KEY_MIGRATE @@ -0,0 +1 @@ +CONFIG_NET_KEY_MIGRATE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_L3_MASTER_DEV b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_L3_MASTER_DEV new file mode 100644 index 000000000000..bdf39009ea65 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_L3_MASTER_DEV @@ -0,0 +1 @@ +CONFIG_NET_L3_MASTER_DEV=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_MPLS_GSO b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_MPLS_GSO new file mode 100644 index 000000000000..a1aedf04c348 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_MPLS_GSO @@ -0,0 +1 @@ +CONFIG_NET_MPLS_GSO=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_NSH b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_NSH new file mode 100644 index 000000000000..67f7ca8f68ee --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_NSH @@ -0,0 +1 @@ +CONFIG_NET_NSH=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_PKTGEN b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_PKTGEN new file mode 100644 index 000000000000..b12bf03e9333 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_PKTGEN @@ -0,0 +1 @@ +CONFIG_NET_PKTGEN=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_PTP_CLASSIFY b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_PTP_CLASSIFY new file mode 100644 index 000000000000..3b024145c3d2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_PTP_CLASSIFY @@ -0,0 +1 @@ +CONFIG_NET_PTP_CLASSIFY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_CAKE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_CAKE new file mode 100644 index 000000000000..1413c498e3a7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_CAKE @@ -0,0 +1 @@ +# CONFIG_NET_SCH_CAKE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_CBS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_CBS new file mode 100644 index 000000000000..96a7f28e56d2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_CBS @@ -0,0 +1 @@ +# CONFIG_NET_SCH_CBS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_CHOKE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_CHOKE new file mode 100644 index 000000000000..6ab0baf140f7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_CHOKE @@ -0,0 +1 @@ +CONFIG_NET_SCH_CHOKE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_CODEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_CODEL new file mode 100644 index 000000000000..817865e082b3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_CODEL @@ -0,0 +1 @@ +CONFIG_NET_SCH_CODEL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_DEFAULT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_DEFAULT new file mode 100644 index 000000000000..268f6d957936 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_DEFAULT @@ -0,0 +1 @@ +CONFIG_NET_SCH_DEFAULT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_DRR b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_DRR new file mode 100644 index 000000000000..4577d1602943 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_DRR @@ -0,0 +1 @@ +CONFIG_NET_SCH_DRR=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_ETF b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_ETF new file mode 100644 index 000000000000..81be382999ac --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_ETF @@ -0,0 +1 @@ +# CONFIG_NET_SCH_ETF is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_ETS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_ETS new file mode 100644 index 000000000000..95ea61e437be --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_ETS @@ -0,0 +1 @@ +# CONFIG_NET_SCH_ETS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_FQ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_FQ new file mode 100644 index 000000000000..a8feeea079b7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_FQ @@ -0,0 +1 @@ +CONFIG_NET_SCH_FQ=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_FQ_PIE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_FQ_PIE new file mode 100644 index 000000000000..71241b274a8c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_FQ_PIE @@ -0,0 +1 @@ +# CONFIG_NET_SCH_FQ_PIE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_GRED b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_GRED new file mode 100644 index 000000000000..6c050d77049a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_GRED @@ -0,0 +1 @@ +CONFIG_NET_SCH_GRED=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_HFSC b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_HFSC new file mode 100644 index 000000000000..6c5361abd154 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_HFSC @@ -0,0 +1 @@ +CONFIG_NET_SCH_HFSC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_HHF b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_HHF new file mode 100644 index 000000000000..784312a46be4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_HHF @@ -0,0 +1 @@ +CONFIG_NET_SCH_HHF=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_HTB b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_HTB new file mode 100644 index 000000000000..046d4d9fb05b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_HTB @@ -0,0 +1 @@ +CONFIG_NET_SCH_HTB=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_MQPRIO b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_MQPRIO new file mode 100644 index 000000000000..4acab89150fa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_MQPRIO @@ -0,0 +1 @@ +CONFIG_NET_SCH_MQPRIO=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_MULTIQ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_MULTIQ new file mode 100644 index 000000000000..118b13e09ed8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_MULTIQ @@ -0,0 +1 @@ +CONFIG_NET_SCH_MULTIQ=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_NETEM b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_NETEM new file mode 100644 index 000000000000..37799e653ed1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_NETEM @@ -0,0 +1 @@ +CONFIG_NET_SCH_NETEM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_PIE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_PIE new file mode 100644 index 000000000000..dc9f48fe535c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_PIE @@ -0,0 +1 @@ +CONFIG_NET_SCH_PIE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_PLUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_PLUG new file mode 100644 index 000000000000..b27bf6ac099f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_PLUG @@ -0,0 +1 @@ +CONFIG_NET_SCH_PLUG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_PRIO b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_PRIO new file mode 100644 index 000000000000..d7c3ad33d97d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_PRIO @@ -0,0 +1 @@ +CONFIG_NET_SCH_PRIO=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_QFQ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_QFQ new file mode 100644 index 000000000000..a4000beb94cf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_QFQ @@ -0,0 +1 @@ +CONFIG_NET_SCH_QFQ=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_RED b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_RED new file mode 100644 index 000000000000..c08d04d94040 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_RED @@ -0,0 +1 @@ +CONFIG_NET_SCH_RED=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_SFB b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_SFB new file mode 100644 index 000000000000..72985ce86c6b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_SFB @@ -0,0 +1 @@ +CONFIG_NET_SCH_SFB=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_SFQ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_SFQ new file mode 100644 index 000000000000..66b3ebbae600 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_SFQ @@ -0,0 +1 @@ +CONFIG_NET_SCH_SFQ=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_SKBPRIO b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_SKBPRIO new file mode 100644 index 000000000000..9805c0149c6a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_SKBPRIO @@ -0,0 +1 @@ +# CONFIG_NET_SCH_SKBPRIO is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_TAPRIO b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_TAPRIO new file mode 100644 index 000000000000..fd5e3d773e09 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_TAPRIO @@ -0,0 +1 @@ +# CONFIG_NET_SCH_TAPRIO is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_TBF b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_TBF new file mode 100644 index 000000000000..b45dd0213a2b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_TBF @@ -0,0 +1 @@ +CONFIG_NET_SCH_TBF=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_TEQL b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_TEQL new file mode 100644 index 000000000000..2c7a73b04e79 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_SCH_TEQL @@ -0,0 +1 @@ +CONFIG_NET_SCH_TEQL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TC_SKB_EXT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TC_SKB_EXT new file mode 100644 index 000000000000..3290f992f5ac --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TC_SKB_EXT @@ -0,0 +1 @@ +CONFIG_NET_TC_SKB_EXT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM new file mode 100644 index 000000000000..1577c67e3e72 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM @@ -0,0 +1 @@ +CONFIG_NET_TEAM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_ACTIVEBACKUP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_ACTIVEBACKUP new file mode 100644 index 000000000000..bcfc4d09f970 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_ACTIVEBACKUP @@ -0,0 +1 @@ +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_BROADCAST b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_BROADCAST new file mode 100644 index 000000000000..ec786b43112d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_BROADCAST @@ -0,0 +1 @@ +CONFIG_NET_TEAM_MODE_BROADCAST=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_LOADBALANCE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_LOADBALANCE new file mode 100644 index 000000000000..018ec31ed1e2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_LOADBALANCE @@ -0,0 +1 @@ +CONFIG_NET_TEAM_MODE_LOADBALANCE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_RANDOM b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_RANDOM new file mode 100644 index 000000000000..f4ed7452ea72 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_RANDOM @@ -0,0 +1 @@ +CONFIG_NET_TEAM_MODE_RANDOM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_ROUNDROBIN b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_ROUNDROBIN new file mode 100644 index 000000000000..c7d4ad7aec2d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_TEAM_MODE_ROUNDROBIN @@ -0,0 +1 @@ +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_VENDOR_HUAWEI b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_VENDOR_HUAWEI new file mode 100644 index 000000000000..b3af680f0a0e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_VENDOR_HUAWEI @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_HUAWEI=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_VENDOR_SOLARFLARE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_VENDOR_SOLARFLARE new file mode 100644 index 000000000000..8b1f0b6bfe61 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_VENDOR_SOLARFLARE @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_SOLARFLARE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_VRF b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_VRF new file mode 100644 index 000000000000..8c9e84febcad --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_VRF @@ -0,0 +1 @@ +CONFIG_NET_VRF=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_XGRESS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_XGRESS new file mode 100644 index 000000000000..e0441ff5017a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_XGRESS @@ -0,0 +1 @@ +CONFIG_NET_XGRESS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFIT_SECURITY_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFIT_SECURITY_DEBUG new file mode 100644 index 000000000000..fff1bd54155e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFIT_SECURITY_DEBUG @@ -0,0 +1 @@ +# CONFIG_NFIT_SECURITY_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_BLOCKLAYOUT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_BLOCKLAYOUT new file mode 100644 index 000000000000..60150858f87a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_BLOCKLAYOUT @@ -0,0 +1 @@ +# CONFIG_NFSD_BLOCKLAYOUT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_FLEXFILELAYOUT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_FLEXFILELAYOUT new file mode 100644 index 000000000000..fd0c215381f2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_FLEXFILELAYOUT @@ -0,0 +1 @@ +# CONFIG_NFSD_FLEXFILELAYOUT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_PNFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_PNFS new file mode 100644 index 000000000000..d934bd18ce57 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_PNFS @@ -0,0 +1 @@ +CONFIG_NFSD_PNFS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_SCSILAYOUT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_SCSILAYOUT new file mode 100644 index 000000000000..1d94de716279 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_SCSILAYOUT @@ -0,0 +1 @@ +CONFIG_NFSD_SCSILAYOUT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_V3_ACL b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_V3_ACL new file mode 100644 index 000000000000..451933884fb0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_V3_ACL @@ -0,0 +1 @@ +CONFIG_NFSD_V3_ACL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_V4_2_INTER_SSC b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_V4_2_INTER_SSC new file mode 100644 index 000000000000..6a5dff80979f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_V4_2_INTER_SSC @@ -0,0 +1 @@ +# CONFIG_NFSD_V4_2_INTER_SSC is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_V4_SECURITY_LABEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_V4_SECURITY_LABEL new file mode 100644 index 000000000000..16f3ee380e66 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_V4_SECURITY_LABEL @@ -0,0 +1 @@ +CONFIG_NFSD_V4_SECURITY_LABEL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_ACL_SUPPORT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_ACL_SUPPORT new file mode 100644 index 000000000000..62f13210429b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_ACL_SUPPORT @@ -0,0 +1 @@ +CONFIG_NFS_ACL_SUPPORT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_DISABLE_UDP_SUPPORT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_DISABLE_UDP_SUPPORT new file mode 100644 index 000000000000..9537c47e325d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_DISABLE_UDP_SUPPORT @@ -0,0 +1 @@ +CONFIG_NFS_DISABLE_UDP_SUPPORT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_SWAP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_SWAP new file mode 100644 index 000000000000..6618a3d86be9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_SWAP @@ -0,0 +1 @@ +# CONFIG_NFS_SWAP is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_USE_LEGACY_DNS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_USE_LEGACY_DNS new file mode 100644 index 000000000000..6919929412c8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_USE_LEGACY_DNS @@ -0,0 +1 @@ +# CONFIG_NFS_USE_LEGACY_DNS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_V2 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_V2 new file mode 100644 index 000000000000..f0f45e1802b6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_V2 @@ -0,0 +1 @@ +# CONFIG_NFS_V2 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_V3_ACL b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_V3_ACL new file mode 100644 index 000000000000..d4c6a260e747 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_V3_ACL @@ -0,0 +1 @@ +CONFIG_NFS_V3_ACL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_V4_1_MIGRATION b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_V4_1_MIGRATION new file mode 100644 index 000000000000..c64757144a86 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_V4_1_MIGRATION @@ -0,0 +1 @@ +# CONFIG_NFS_V4_1_MIGRATION is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_V4_2_READ_PLUS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_V4_2_READ_PLUS new file mode 100644 index 000000000000..4d62001f1f91 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_V4_2_READ_PLUS @@ -0,0 +1 @@ +# CONFIG_NFS_V4_2_READ_PLUS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_BRIDGE_META b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_BRIDGE_META new file mode 100644 index 000000000000..3e5759b849e4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_BRIDGE_META @@ -0,0 +1 @@ +# CONFIG_NFT_BRIDGE_META is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_BRIDGE_REJECT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_BRIDGE_REJECT new file mode 100644 index 000000000000..1ec341fc3e4b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_BRIDGE_REJECT @@ -0,0 +1 @@ +CONFIG_NFT_BRIDGE_REJECT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_COMPAT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_COMPAT new file mode 100644 index 000000000000..3b5eab9dfda2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_COMPAT @@ -0,0 +1 @@ +CONFIG_NFT_COMPAT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_CONNLIMIT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_CONNLIMIT new file mode 100644 index 000000000000..dfa4af5dbfec --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_CONNLIMIT @@ -0,0 +1 @@ +CONFIG_NFT_CONNLIMIT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_CT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_CT new file mode 100644 index 000000000000..33c2f017d44d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_CT @@ -0,0 +1 @@ +CONFIG_NFT_CT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_DUP_IPV4 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_DUP_IPV4 new file mode 100644 index 000000000000..fb9ab921d434 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_DUP_IPV4 @@ -0,0 +1 @@ +CONFIG_NFT_DUP_IPV4=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_DUP_IPV6 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_DUP_IPV6 new file mode 100644 index 000000000000..391a7f256e50 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_DUP_IPV6 @@ -0,0 +1 @@ +CONFIG_NFT_DUP_IPV6=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_DUP_NETDEV b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_DUP_NETDEV new file mode 100644 index 000000000000..4cab8e030674 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_DUP_NETDEV @@ -0,0 +1 @@ +CONFIG_NFT_DUP_NETDEV=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB new file mode 100644 index 000000000000..82fa162bbfb1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB @@ -0,0 +1 @@ +CONFIG_NFT_FIB=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB_INET b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB_INET new file mode 100644 index 000000000000..fe7a8f35b4b5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB_INET @@ -0,0 +1 @@ +CONFIG_NFT_FIB_INET=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB_IPV4 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB_IPV4 new file mode 100644 index 000000000000..210fb695a4ca --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB_IPV4 @@ -0,0 +1 @@ +CONFIG_NFT_FIB_IPV4=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB_IPV6 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB_IPV6 new file mode 100644 index 000000000000..26b95dea82e7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB_IPV6 @@ -0,0 +1 @@ +CONFIG_NFT_FIB_IPV6=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB_NETDEV b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB_NETDEV new file mode 100644 index 000000000000..273bfeb6a706 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FIB_NETDEV @@ -0,0 +1 @@ +CONFIG_NFT_FIB_NETDEV=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FLOW_OFFLOAD b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FLOW_OFFLOAD new file mode 100644 index 000000000000..2960ce593de2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FLOW_OFFLOAD @@ -0,0 +1 @@ +CONFIG_NFT_FLOW_OFFLOAD=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FWD_NETDEV b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FWD_NETDEV new file mode 100644 index 000000000000..2f10f39e2b64 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_FWD_NETDEV @@ -0,0 +1 @@ +CONFIG_NFT_FWD_NETDEV=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_HASH b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_HASH new file mode 100644 index 000000000000..581bd38e130f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_HASH @@ -0,0 +1 @@ +CONFIG_NFT_HASH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_LIMIT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_LIMIT new file mode 100644 index 000000000000..ab64be55f266 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_LIMIT @@ -0,0 +1 @@ +CONFIG_NFT_LIMIT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_LOG b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_LOG new file mode 100644 index 000000000000..beb1a00ccbf4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_LOG @@ -0,0 +1 @@ +CONFIG_NFT_LOG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_MASQ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_MASQ new file mode 100644 index 000000000000..ab4234885e83 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_MASQ @@ -0,0 +1 @@ +CONFIG_NFT_MASQ=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_NAT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_NAT new file mode 100644 index 000000000000..6fc5795d258f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_NAT @@ -0,0 +1 @@ +CONFIG_NFT_NAT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_NUMGEN b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_NUMGEN new file mode 100644 index 000000000000..243007744b52 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_NUMGEN @@ -0,0 +1 @@ +CONFIG_NFT_NUMGEN=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_OSF b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_OSF new file mode 100644 index 000000000000..beadca8f51d0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_OSF @@ -0,0 +1 @@ +CONFIG_NFT_OSF=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_QUEUE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_QUEUE new file mode 100644 index 000000000000..b8aea55e813c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_QUEUE @@ -0,0 +1 @@ +CONFIG_NFT_QUEUE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_QUOTA b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_QUOTA new file mode 100644 index 000000000000..1984d0ec795b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_QUOTA @@ -0,0 +1 @@ +CONFIG_NFT_QUOTA=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_REDIR b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_REDIR new file mode 100644 index 000000000000..d7f2d7c76a69 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_REDIR @@ -0,0 +1 @@ +CONFIG_NFT_REDIR=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_REJECT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_REJECT new file mode 100644 index 000000000000..3f0167e83a62 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_REJECT @@ -0,0 +1 @@ +CONFIG_NFT_REJECT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_REJECT_INET b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_REJECT_INET new file mode 100644 index 000000000000..62ebfe6e850c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_REJECT_INET @@ -0,0 +1 @@ +CONFIG_NFT_REJECT_INET=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_REJECT_IPV6 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_REJECT_IPV6 new file mode 100644 index 000000000000..2e8750b56c81 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_REJECT_IPV6 @@ -0,0 +1 @@ +CONFIG_NFT_REJECT_IPV6=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_SOCKET b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_SOCKET new file mode 100644 index 000000000000..84aa8fd92b7d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_SOCKET @@ -0,0 +1 @@ +CONFIG_NFT_SOCKET=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_SYNPROXY b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_SYNPROXY new file mode 100644 index 000000000000..a8e9cdca8e5f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_SYNPROXY @@ -0,0 +1 @@ +# CONFIG_NFT_SYNPROXY is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_TPROXY b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_TPROXY new file mode 100644 index 000000000000..d43e8c5f4bbb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_TPROXY @@ -0,0 +1 @@ +CONFIG_NFT_TPROXY=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_TUNNEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_TUNNEL new file mode 100644 index 000000000000..30f2b484f6fe --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_TUNNEL @@ -0,0 +1 @@ +CONFIG_NFT_TUNNEL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_XFRM b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_XFRM new file mode 100644 index 000000000000..9147adfb0fa9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFT_XFRM @@ -0,0 +1 @@ +CONFIG_NFT_XFRM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_AMANDA b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_AMANDA new file mode 100644 index 000000000000..07c3990d1f78 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_AMANDA @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_AMANDA=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_BRIDGE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_BRIDGE new file mode 100644 index 000000000000..1a7916a68faf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_BRIDGE @@ -0,0 +1 @@ +# CONFIG_NF_CONNTRACK_BRIDGE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_BROADCAST b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_BROADCAST new file mode 100644 index 000000000000..04a20201b044 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_BROADCAST @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_BROADCAST=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_EVENTS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_EVENTS new file mode 100644 index 000000000000..711ab9d5d354 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_EVENTS @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_EVENTS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_FTP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_FTP new file mode 100644 index 000000000000..7a0409d05002 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_FTP @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_FTP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_H323 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_H323 new file mode 100644 index 000000000000..a8b54d4baa04 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_H323 @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_H323=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_IRC b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_IRC new file mode 100644 index 000000000000..5034694f2754 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_IRC @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_IRC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_LABELS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_LABELS new file mode 100644 index 000000000000..d1b4da6338a2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_LABELS @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_LABELS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_MARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_MARK new file mode 100644 index 000000000000..e8915b4a5f99 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_MARK @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_MARK=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_NETBIOS_NS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_NETBIOS_NS new file mode 100644 index 000000000000..dc0343f638d5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_NETBIOS_NS @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_NETBIOS_NS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_PPTP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_PPTP new file mode 100644 index 000000000000..a1a813aaf23b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_PPTP @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_PPTP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_PROCFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_PROCFS new file mode 100644 index 000000000000..11ac5f67b294 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_PROCFS @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_PROCFS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_SANE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_SANE new file mode 100644 index 000000000000..a17c92be62f9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_SANE @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_SANE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_SECMARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_SECMARK new file mode 100644 index 000000000000..9490d56ed23a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_SECMARK @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_SECMARK=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_SIP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_SIP new file mode 100644 index 000000000000..82f089e80c81 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_SIP @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_SIP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_SNMP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_SNMP new file mode 100644 index 000000000000..44c5aec3708f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_SNMP @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_SNMP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_TFTP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_TFTP new file mode 100644 index 000000000000..161b2f0077a8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_TFTP @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_TFTP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_TIMEOUT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_TIMEOUT new file mode 100644 index 000000000000..24a71e9388a5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_TIMEOUT @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_TIMEOUT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_TIMESTAMP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_TIMESTAMP new file mode 100644 index 000000000000..09d9e909bb90 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_TIMESTAMP @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_TIMESTAMP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_ZONES b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_ZONES new file mode 100644 index 000000000000..e4b2f40cdffb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CONNTRACK_ZONES @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_ZONES=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_NETLINK b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_NETLINK new file mode 100644 index 000000000000..805a8bc939a9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_NETLINK @@ -0,0 +1 @@ +CONFIG_NF_CT_NETLINK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_NETLINK_HELPER b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_NETLINK_HELPER new file mode 100644 index 000000000000..6dc2ad6c7021 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_NETLINK_HELPER @@ -0,0 +1 @@ +CONFIG_NF_CT_NETLINK_HELPER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_NETLINK_TIMEOUT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_NETLINK_TIMEOUT new file mode 100644 index 000000000000..57a6aac8509a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_NETLINK_TIMEOUT @@ -0,0 +1 @@ +CONFIG_NF_CT_NETLINK_TIMEOUT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_PROTO_DCCP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_PROTO_DCCP new file mode 100644 index 000000000000..87cb5eaf980a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_PROTO_DCCP @@ -0,0 +1 @@ +CONFIG_NF_CT_PROTO_DCCP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_PROTO_GRE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_PROTO_GRE new file mode 100644 index 000000000000..acbbf595f867 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_PROTO_GRE @@ -0,0 +1 @@ +CONFIG_NF_CT_PROTO_GRE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_PROTO_SCTP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_PROTO_SCTP new file mode 100644 index 000000000000..c882bb26292c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_PROTO_SCTP @@ -0,0 +1 @@ +CONFIG_NF_CT_PROTO_SCTP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_PROTO_UDPLITE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_PROTO_UDPLITE new file mode 100644 index 000000000000..25c61d430641 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_CT_PROTO_UDPLITE @@ -0,0 +1 @@ +CONFIG_NF_CT_PROTO_UDPLITE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_DEFRAG_IPV6 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_DEFRAG_IPV6 new file mode 100644 index 000000000000..6540eb96d8c3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_DEFRAG_IPV6 @@ -0,0 +1 @@ +CONFIG_NF_DEFRAG_IPV6=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_DUP_IPV4 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_DUP_IPV4 new file mode 100644 index 000000000000..7c349df6c16c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_DUP_IPV4 @@ -0,0 +1 @@ +CONFIG_NF_DUP_IPV4=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_DUP_IPV6 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_DUP_IPV6 new file mode 100644 index 000000000000..a130933ed994 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_DUP_IPV6 @@ -0,0 +1 @@ +CONFIG_NF_DUP_IPV6=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_DUP_NETDEV b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_DUP_NETDEV new file mode 100644 index 000000000000..d058a0ab802d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_DUP_NETDEV @@ -0,0 +1 @@ +CONFIG_NF_DUP_NETDEV=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_FLOW_TABLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_FLOW_TABLE new file mode 100644 index 000000000000..a2b5e03a7d8c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_FLOW_TABLE @@ -0,0 +1 @@ +CONFIG_NF_FLOW_TABLE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_FLOW_TABLE_INET b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_FLOW_TABLE_INET new file mode 100644 index 000000000000..f41129cf14a6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_FLOW_TABLE_INET @@ -0,0 +1 @@ +CONFIG_NF_FLOW_TABLE_INET=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_LOG_ARP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_LOG_ARP new file mode 100644 index 000000000000..561b42dbb05d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_LOG_ARP @@ -0,0 +1 @@ +CONFIG_NF_LOG_ARP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_LOG_IPV4 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_LOG_IPV4 new file mode 100644 index 000000000000..97d73f968877 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_LOG_IPV4 @@ -0,0 +1 @@ +CONFIG_NF_LOG_IPV4=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_LOG_IPV6 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_LOG_IPV6 new file mode 100644 index 000000000000..dbfa7adfbb58 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_LOG_IPV6 @@ -0,0 +1 @@ +CONFIG_NF_LOG_IPV6=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_AMANDA b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_AMANDA new file mode 100644 index 000000000000..4e670b072470 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_AMANDA @@ -0,0 +1 @@ +CONFIG_NF_NAT_AMANDA=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_FTP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_FTP new file mode 100644 index 000000000000..f760b7fff4c9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_FTP @@ -0,0 +1 @@ +CONFIG_NF_NAT_FTP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_H323 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_H323 new file mode 100644 index 000000000000..0af2054a49b9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_H323 @@ -0,0 +1 @@ +CONFIG_NF_NAT_H323=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_IRC b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_IRC new file mode 100644 index 000000000000..7db14c9ce128 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_IRC @@ -0,0 +1 @@ +CONFIG_NF_NAT_IRC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_MASQUERADE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_MASQUERADE new file mode 100644 index 000000000000..2039d997069a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_MASQUERADE @@ -0,0 +1 @@ +CONFIG_NF_NAT_MASQUERADE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_PPTP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_PPTP new file mode 100644 index 000000000000..6026fcc87534 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_PPTP @@ -0,0 +1 @@ +CONFIG_NF_NAT_PPTP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_REDIRECT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_REDIRECT new file mode 100644 index 000000000000..4fa55b7848ca --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_REDIRECT @@ -0,0 +1 @@ +CONFIG_NF_NAT_REDIRECT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_SIP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_SIP new file mode 100644 index 000000000000..390f1fce80da --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_SIP @@ -0,0 +1 @@ +CONFIG_NF_NAT_SIP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_SNMP_BASIC b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_SNMP_BASIC new file mode 100644 index 000000000000..b73738613728 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_SNMP_BASIC @@ -0,0 +1 @@ +CONFIG_NF_NAT_SNMP_BASIC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_TFTP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_TFTP new file mode 100644 index 000000000000..099ed3e59cc8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_NAT_TFTP @@ -0,0 +1 @@ +CONFIG_NF_NAT_TFTP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_REJECT_IPV4 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_REJECT_IPV4 new file mode 100644 index 000000000000..8223fd9009ed --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_REJECT_IPV4 @@ -0,0 +1 @@ +CONFIG_NF_REJECT_IPV4=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_REJECT_IPV6 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_REJECT_IPV6 new file mode 100644 index 000000000000..efa588cd51a5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_REJECT_IPV6 @@ -0,0 +1 @@ +CONFIG_NF_REJECT_IPV6=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_SOCKET_IPV4 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_SOCKET_IPV4 new file mode 100644 index 000000000000..330b7cf66edb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_SOCKET_IPV4 @@ -0,0 +1 @@ +CONFIG_NF_SOCKET_IPV4=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_SOCKET_IPV6 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_SOCKET_IPV6 new file mode 100644 index 000000000000..f3ec9e0fda48 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_SOCKET_IPV6 @@ -0,0 +1 @@ +CONFIG_NF_SOCKET_IPV6=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_TABLES_ARP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_TABLES_ARP new file mode 100644 index 000000000000..5e3cf6e4b311 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_TABLES_ARP @@ -0,0 +1 @@ +CONFIG_NF_TABLES_ARP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_TABLES_BRIDGE b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_TABLES_BRIDGE new file mode 100644 index 000000000000..bfdd62752840 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_TABLES_BRIDGE @@ -0,0 +1 @@ +CONFIG_NF_TABLES_BRIDGE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_TABLES_NETDEV b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_TABLES_NETDEV new file mode 100644 index 000000000000..2fd27f8e9ad7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_TABLES_NETDEV @@ -0,0 +1 @@ +CONFIG_NF_TABLES_NETDEV=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_TPROXY_IPV6 b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_TPROXY_IPV6 new file mode 100644 index 000000000000..d8785321cd61 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NF_TPROXY_IPV6 @@ -0,0 +1 @@ +CONFIG_NF_TPROXY_IPV6=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NLMON b/anolis/configs/L1-RECOMMEND/default/CONFIG_NLMON new file mode 100644 index 000000000000..53e8b7742627 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NLMON @@ -0,0 +1 @@ +CONFIG_NLMON=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NODES_SHIFT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NODES_SHIFT new file mode 100644 index 000000000000..59f7bcc00158 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NODES_SHIFT @@ -0,0 +1 @@ +CONFIG_NODES_SHIFT=6 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NOUVEAU_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_NOUVEAU_DEBUG new file mode 100644 index 000000000000..6a31e2d26c9d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NOUVEAU_DEBUG @@ -0,0 +1 @@ +CONFIG_NOUVEAU_DEBUG=5 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NOUVEAU_DEBUG_DEFAULT b/anolis/configs/L1-RECOMMEND/default/CONFIG_NOUVEAU_DEBUG_DEFAULT new file mode 100644 index 000000000000..521d58d7d5ce --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NOUVEAU_DEBUG_DEFAULT @@ -0,0 +1 @@ +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NTB b/anolis/configs/L1-RECOMMEND/default/CONFIG_NTB new file mode 100644 index 000000000000..f9d9791f4855 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NTB @@ -0,0 +1 @@ +CONFIG_NTB=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NUMA_BALANCING b/anolis/configs/L1-RECOMMEND/default/CONFIG_NUMA_BALANCING new file mode 100644 index 000000000000..9de89d51d8df --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NUMA_BALANCING @@ -0,0 +1 @@ +CONFIG_NUMA_BALANCING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NUMA_BALANCING_DEFAULT_ENABLED b/anolis/configs/L1-RECOMMEND/default/CONFIG_NUMA_BALANCING_DEFAULT_ENABLED new file mode 100644 index 000000000000..5f7402180579 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NUMA_BALANCING_DEFAULT_ENABLED @@ -0,0 +1 @@ +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NVDIMM_KEYS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NVDIMM_KEYS new file mode 100644 index 000000000000..a844847b2f38 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NVDIMM_KEYS @@ -0,0 +1 @@ +CONFIG_NVDIMM_KEYS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NVMEM b/anolis/configs/L1-RECOMMEND/default/CONFIG_NVMEM new file mode 100644 index 000000000000..df779d8ecac5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NVMEM @@ -0,0 +1 @@ +CONFIG_NVMEM=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NVMEM_SYSFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_NVMEM_SYSFS new file mode 100644 index 000000000000..decd343e67fb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NVMEM_SYSFS @@ -0,0 +1 @@ +CONFIG_NVMEM_SYSFS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NVME_FC b/anolis/configs/L1-RECOMMEND/default/CONFIG_NVME_FC new file mode 100644 index 000000000000..2152575d9dba --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NVME_FC @@ -0,0 +1 @@ +CONFIG_NVME_FC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_OPENVSWITCH b/anolis/configs/L1-RECOMMEND/default/CONFIG_OPENVSWITCH new file mode 100644 index 000000000000..82b61f8a1688 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_OPENVSWITCH @@ -0,0 +1 @@ +CONFIG_OPENVSWITCH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_OPENVSWITCH_GENEVE b/anolis/configs/L1-RECOMMEND/default/CONFIG_OPENVSWITCH_GENEVE new file mode 100644 index 000000000000..158138e6595b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_OPENVSWITCH_GENEVE @@ -0,0 +1 @@ +CONFIG_OPENVSWITCH_GENEVE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_OPENVSWITCH_GRE b/anolis/configs/L1-RECOMMEND/default/CONFIG_OPENVSWITCH_GRE new file mode 100644 index 000000000000..462594567347 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_OPENVSWITCH_GRE @@ -0,0 +1 @@ +CONFIG_OPENVSWITCH_GRE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_OPENVSWITCH_VXLAN b/anolis/configs/L1-RECOMMEND/default/CONFIG_OPENVSWITCH_VXLAN new file mode 100644 index 000000000000..c572c1296987 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_OPENVSWITCH_VXLAN @@ -0,0 +1 @@ +CONFIG_OPENVSWITCH_VXLAN=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_OSNOISE_TRACER b/anolis/configs/L1-RECOMMEND/default/CONFIG_OSNOISE_TRACER new file mode 100644 index 000000000000..07e643608e53 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_OSNOISE_TRACER @@ -0,0 +1 @@ +CONFIG_OSNOISE_TRACER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_INDEX b/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_INDEX new file mode 100644 index 000000000000..c61cab2ace07 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_INDEX @@ -0,0 +1 @@ +CONFIG_OVERLAY_FS_INDEX=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_METACOPY b/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_METACOPY new file mode 100644 index 000000000000..6b4564cf1774 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_METACOPY @@ -0,0 +1 @@ +# CONFIG_OVERLAY_FS_METACOPY is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_NFS_EXPORT b/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_NFS_EXPORT new file mode 100644 index 000000000000..6b3609a32894 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_NFS_EXPORT @@ -0,0 +1 @@ +# CONFIG_OVERLAY_FS_NFS_EXPORT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW b/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW new file mode 100644 index 000000000000..23e8ade165b6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW @@ -0,0 +1 @@ +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_REDIRECT_DIR b/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_REDIRECT_DIR new file mode 100644 index 000000000000..80cb207a956f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_REDIRECT_DIR @@ -0,0 +1 @@ +CONFIG_OVERLAY_FS_REDIRECT_DIR=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_XINO_AUTO b/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_XINO_AUTO new file mode 100644 index 000000000000..3a73c51ee49e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_OVERLAY_FS_XINO_AUTO @@ -0,0 +1 @@ +# CONFIG_OVERLAY_FS_XINO_AUTO is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PACKET_DIAG b/anolis/configs/L1-RECOMMEND/default/CONFIG_PACKET_DIAG new file mode 100644 index 000000000000..f9e9bf425944 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PACKET_DIAG @@ -0,0 +1 @@ +CONFIG_PACKET_DIAG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PAGE_EXTENSION b/anolis/configs/L1-RECOMMEND/default/CONFIG_PAGE_EXTENSION new file mode 100644 index 000000000000..f55df4f4e264 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PAGE_EXTENSION @@ -0,0 +1 @@ +# CONFIG_PAGE_EXTENSION is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PAGE_OWNER b/anolis/configs/L1-RECOMMEND/default/CONFIG_PAGE_OWNER new file mode 100644 index 000000000000..441661dcfade --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PAGE_OWNER @@ -0,0 +1 @@ +# CONFIG_PAGE_OWNER is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PAGE_POISONING b/anolis/configs/L1-RECOMMEND/default/CONFIG_PAGE_POISONING new file mode 100644 index 000000000000..0602168f36e6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PAGE_POISONING @@ -0,0 +1 @@ +# CONFIG_PAGE_POISONING is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PAGE_REPORTING b/anolis/configs/L1-RECOMMEND/default/CONFIG_PAGE_REPORTING new file mode 100644 index 000000000000..454926ab42ef --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PAGE_REPORTING @@ -0,0 +1 @@ +CONFIG_PAGE_REPORTING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PARAVIRT_TIME_ACCOUNTING b/anolis/configs/L1-RECOMMEND/default/CONFIG_PARAVIRT_TIME_ACCOUNTING new file mode 100644 index 000000000000..176320ad5896 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PARAVIRT_TIME_ACCOUNTING @@ -0,0 +1 @@ +CONFIG_PARAVIRT_TIME_ACCOUNTING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIEAER b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIEAER new file mode 100644 index 000000000000..47b8ec3cad54 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIEAER @@ -0,0 +1 @@ +CONFIG_PCIEAER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIEASPM b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIEASPM new file mode 100644 index 000000000000..5233234fe949 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIEASPM @@ -0,0 +1 @@ +CONFIG_PCIEASPM=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIEASPM_DEFAULT b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIEASPM_DEFAULT new file mode 100644 index 000000000000..cc46215a4b21 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIEASPM_DEFAULT @@ -0,0 +1 @@ +CONFIG_PCIEASPM_DEFAULT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIE_DPC b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIE_DPC new file mode 100644 index 000000000000..e45e816fc8b2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIE_DPC @@ -0,0 +1 @@ +CONFIG_PCIE_DPC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIE_ECRC b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIE_ECRC new file mode 100644 index 000000000000..e44624ce1b3d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIE_ECRC @@ -0,0 +1 @@ +CONFIG_PCIE_ECRC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIE_EDR b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIE_EDR new file mode 100644 index 000000000000..9c6ee7bc6bdb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCIE_EDR @@ -0,0 +1 @@ +CONFIG_PCIE_EDR=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PCI_PASID b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCI_PASID new file mode 100644 index 000000000000..33abee91ca0a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCI_PASID @@ -0,0 +1 @@ +CONFIG_PCI_PASID=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PCI_PRI b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCI_PRI new file mode 100644 index 000000000000..4f25cd0ae505 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCI_PRI @@ -0,0 +1 @@ +CONFIG_PCI_PRI=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PCI_QUIRKS b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCI_QUIRKS new file mode 100644 index 000000000000..ddc2d4cb1149 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCI_QUIRKS @@ -0,0 +1 @@ +CONFIG_PCI_QUIRKS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PERCPU_STATS b/anolis/configs/L1-RECOMMEND/default/CONFIG_PERCPU_STATS new file mode 100644 index 000000000000..873749756446 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PERCPU_STATS @@ -0,0 +1 @@ +# CONFIG_PERCPU_STATS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PERCPU_TEST b/anolis/configs/L1-RECOMMEND/default/CONFIG_PERCPU_TEST new file mode 100644 index 000000000000..f3d441c00e2b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PERCPU_TEST @@ -0,0 +1 @@ +# CONFIG_PERCPU_TEST is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PERSISTENT_KEYRINGS b/anolis/configs/L1-RECOMMEND/default/CONFIG_PERSISTENT_KEYRINGS new file mode 100644 index 000000000000..6894726868c0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PERSISTENT_KEYRINGS @@ -0,0 +1 @@ +CONFIG_PERSISTENT_KEYRINGS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PHYLIB b/anolis/configs/L1-RECOMMEND/default/CONFIG_PHYLIB new file mode 100644 index 000000000000..64ef522339b4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PHYLIB @@ -0,0 +1 @@ +CONFIG_PHYLIB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PKCS7_TEST_KEY b/anolis/configs/L1-RECOMMEND/default/CONFIG_PKCS7_TEST_KEY new file mode 100644 index 000000000000..ba15fabfb2df --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PKCS7_TEST_KEY @@ -0,0 +1 @@ +# CONFIG_PKCS7_TEST_KEY is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PKCS8_PRIVATE_KEY_PARSER b/anolis/configs/L1-RECOMMEND/default/CONFIG_PKCS8_PRIVATE_KEY_PARSER new file mode 100644 index 000000000000..f7a7c8539892 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PKCS8_PRIVATE_KEY_PARSER @@ -0,0 +1 @@ +# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PMBUS b/anolis/configs/L1-RECOMMEND/default/CONFIG_PMBUS new file mode 100644 index 000000000000..8c904bea1883 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PMBUS @@ -0,0 +1 @@ +CONFIG_PMBUS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_ADVANCED_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_ADVANCED_DEBUG new file mode 100644 index 000000000000..5ebe2ad86343 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_ADVANCED_DEBUG @@ -0,0 +1 @@ +# CONFIG_PM_ADVANCED_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_AUTOSLEEP b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_AUTOSLEEP new file mode 100644 index 000000000000..f79c502c3619 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_AUTOSLEEP @@ -0,0 +1 @@ +# CONFIG_PM_AUTOSLEEP is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_DEBUG new file mode 100644 index 000000000000..7bdf35967d06 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_DEBUG @@ -0,0 +1 @@ +CONFIG_PM_DEBUG=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_SLEEP b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_SLEEP new file mode 100644 index 000000000000..94359f9ce90c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_SLEEP @@ -0,0 +1 @@ +CONFIG_PM_SLEEP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_SLEEP_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_SLEEP_DEBUG new file mode 100644 index 000000000000..2af0abe869c7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_SLEEP_DEBUG @@ -0,0 +1 @@ +CONFIG_PM_SLEEP_DEBUG=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_SLEEP_SMP b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_SLEEP_SMP new file mode 100644 index 000000000000..2927603759bb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_SLEEP_SMP @@ -0,0 +1 @@ +CONFIG_PM_SLEEP_SMP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_STD_PARTITION b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_STD_PARTITION new file mode 100644 index 000000000000..f4866f0ea2e0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_STD_PARTITION @@ -0,0 +1 @@ +CONFIG_PM_STD_PARTITION="" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_TEST_SUSPEND b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_TEST_SUSPEND new file mode 100644 index 000000000000..f0311233a350 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_TEST_SUSPEND @@ -0,0 +1 @@ +# CONFIG_PM_TEST_SUSPEND is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_WAKELOCKS b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_WAKELOCKS new file mode 100644 index 000000000000..8c4739d32b88 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PM_WAKELOCKS @@ -0,0 +1 @@ +# CONFIG_PM_WAKELOCKS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_POSIX_MQUEUE b/anolis/configs/L1-RECOMMEND/default/CONFIG_POSIX_MQUEUE new file mode 100644 index 000000000000..6991d0bcbb21 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_POSIX_MQUEUE @@ -0,0 +1 @@ +CONFIG_POSIX_MQUEUE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_POSIX_MQUEUE_SYSCTL b/anolis/configs/L1-RECOMMEND/default/CONFIG_POSIX_MQUEUE_SYSCTL new file mode 100644 index 000000000000..23783f9a63bc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_POSIX_MQUEUE_SYSCTL @@ -0,0 +1 @@ +CONFIG_POSIX_MQUEUE_SYSCTL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_POWER_RESET b/anolis/configs/L1-RECOMMEND/default/CONFIG_POWER_RESET new file mode 100644 index 000000000000..453890b18e26 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_POWER_RESET @@ -0,0 +1 @@ +CONFIG_POWER_RESET=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PPP b/anolis/configs/L1-RECOMMEND/default/CONFIG_PPP new file mode 100644 index 000000000000..f2e4de8c9216 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PPP @@ -0,0 +1 @@ +CONFIG_PPP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PPPOE b/anolis/configs/L1-RECOMMEND/default/CONFIG_PPPOE new file mode 100644 index 000000000000..3451811b54fa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PPPOE @@ -0,0 +1 @@ +CONFIG_PPPOE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PROC_CHILDREN b/anolis/configs/L1-RECOMMEND/default/CONFIG_PROC_CHILDREN new file mode 100644 index 000000000000..e0d900831e35 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PROC_CHILDREN @@ -0,0 +1 @@ +CONFIG_PROC_CHILDREN=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PROC_EVENTS b/anolis/configs/L1-RECOMMEND/default/CONFIG_PROC_EVENTS new file mode 100644 index 000000000000..3755504b7dd2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PROC_EVENTS @@ -0,0 +1 @@ +CONFIG_PROC_EVENTS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PROC_VMCORE_DEVICE_DUMP b/anolis/configs/L1-RECOMMEND/default/CONFIG_PROC_VMCORE_DEVICE_DUMP new file mode 100644 index 000000000000..1a63c6ae7a45 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PROC_VMCORE_DEVICE_DUMP @@ -0,0 +1 @@ +CONFIG_PROC_VMCORE_DEVICE_DUMP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PROFILE_ANNOTATED_BRANCHES b/anolis/configs/L1-RECOMMEND/default/CONFIG_PROFILE_ANNOTATED_BRANCHES new file mode 100644 index 000000000000..ea7229988789 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PROFILE_ANNOTATED_BRANCHES @@ -0,0 +1 @@ +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PROFILING b/anolis/configs/L1-RECOMMEND/default/CONFIG_PROFILING new file mode 100644 index 000000000000..5c7124d60119 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PROFILING @@ -0,0 +1 @@ +CONFIG_PROFILING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PSAMPLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_PSAMPLE new file mode 100644 index 000000000000..fca0860ea69f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PSAMPLE @@ -0,0 +1 @@ +CONFIG_PSAMPLE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PSI b/anolis/configs/L1-RECOMMEND/default/CONFIG_PSI new file mode 100644 index 000000000000..72452cf33af8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PSI @@ -0,0 +1 @@ +CONFIG_PSI=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PSI_DEFAULT_DISABLED b/anolis/configs/L1-RECOMMEND/default/CONFIG_PSI_DEFAULT_DISABLED new file mode 100644 index 000000000000..35dd99eec061 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PSI_DEFAULT_DISABLED @@ -0,0 +1 @@ +CONFIG_PSI_DEFAULT_DISABLED=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PSTORE b/anolis/configs/L1-RECOMMEND/default/CONFIG_PSTORE new file mode 100644 index 000000000000..e7ffca20825c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PSTORE @@ -0,0 +1 @@ +CONFIG_PSTORE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PSTORE_COMPRESS b/anolis/configs/L1-RECOMMEND/default/CONFIG_PSTORE_COMPRESS new file mode 100644 index 000000000000..92d9fa1aad2e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PSTORE_COMPRESS @@ -0,0 +1 @@ +CONFIG_PSTORE_COMPRESS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PSTORE_CONSOLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_PSTORE_CONSOLE new file mode 100644 index 000000000000..c740961f5cc8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PSTORE_CONSOLE @@ -0,0 +1 @@ +CONFIG_PSTORE_CONSOLE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PSTORE_RAM b/anolis/configs/L1-RECOMMEND/default/CONFIG_PSTORE_RAM new file mode 100644 index 000000000000..cf887d29571e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PSTORE_RAM @@ -0,0 +1 @@ +CONFIG_PSTORE_RAM=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_QFMT_V1 b/anolis/configs/L1-RECOMMEND/default/CONFIG_QFMT_V1 new file mode 100644 index 000000000000..ca782388a577 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_QFMT_V1 @@ -0,0 +1 @@ +# CONFIG_QFMT_V1 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_QFMT_V2 b/anolis/configs/L1-RECOMMEND/default/CONFIG_QFMT_V2 new file mode 100644 index 000000000000..961af550cac3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_QFMT_V2 @@ -0,0 +1 @@ +CONFIG_QFMT_V2=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_QUOTA_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_QUOTA_DEBUG new file mode 100644 index 000000000000..f7312a8411be --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_QUOTA_DEBUG @@ -0,0 +1 @@ +# CONFIG_QUOTA_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_QUOTA_NETLINK_INTERFACE b/anolis/configs/L1-RECOMMEND/default/CONFIG_QUOTA_NETLINK_INTERFACE new file mode 100644 index 000000000000..83abecebb841 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_QUOTA_NETLINK_INTERFACE @@ -0,0 +1 @@ +CONFIG_QUOTA_NETLINK_INTERFACE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RAID6_PQ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RAID6_PQ new file mode 100644 index 000000000000..3b150e43bdd0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RAID6_PQ @@ -0,0 +1 @@ +CONFIG_RAID6_PQ=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RAID6_PQ_BENCHMARK b/anolis/configs/L1-RECOMMEND/default/CONFIG_RAID6_PQ_BENCHMARK new file mode 100644 index 000000000000..1f010f386359 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RAID6_PQ_BENCHMARK @@ -0,0 +1 @@ +CONFIG_RAID6_PQ_BENCHMARK=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RAID_ATTRS b/anolis/configs/L1-RECOMMEND/default/CONFIG_RAID_ATTRS new file mode 100644 index 000000000000..6ba5f1cc4726 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RAID_ATTRS @@ -0,0 +1 @@ +CONFIG_RAID_ATTRS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RANDOMIZE_KSTACK_OFFSET b/anolis/configs/L1-RECOMMEND/default/CONFIG_RANDOMIZE_KSTACK_OFFSET new file mode 100644 index 000000000000..c08960cff891 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RANDOMIZE_KSTACK_OFFSET @@ -0,0 +1 @@ +CONFIG_RANDOMIZE_KSTACK_OFFSET=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT b/anolis/configs/L1-RECOMMEND/default/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT new file mode 100644 index 000000000000..d680659c1703 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT @@ -0,0 +1 @@ +# CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_CPU_STALL_TIMEOUT b/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_CPU_STALL_TIMEOUT new file mode 100644 index 000000000000..472dbad6eadf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_CPU_STALL_TIMEOUT @@ -0,0 +1 @@ +CONFIG_RCU_CPU_STALL_TIMEOUT=60 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_EQS_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_EQS_DEBUG new file mode 100644 index 000000000000..225c6bd3a6ee --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_EQS_DEBUG @@ -0,0 +1 @@ +# CONFIG_RCU_EQS_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_EXPERT b/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_EXPERT new file mode 100644 index 000000000000..8c9922cf3d2e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_EXPERT @@ -0,0 +1 @@ +# CONFIG_RCU_EXPERT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_NOCB_CPU b/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_NOCB_CPU new file mode 100644 index 000000000000..19a9621357b9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_NOCB_CPU @@ -0,0 +1 @@ +CONFIG_RCU_NOCB_CPU=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_REF_SCALE_TEST b/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_REF_SCALE_TEST new file mode 100644 index 000000000000..50d7d874be7a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_REF_SCALE_TEST @@ -0,0 +1 @@ +# CONFIG_RCU_REF_SCALE_TEST is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_SCALE_TEST b/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_SCALE_TEST new file mode 100644 index 000000000000..af6860852e89 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_SCALE_TEST @@ -0,0 +1 @@ +# CONFIG_RCU_SCALE_TEST is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_TORTURE_TEST b/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_TORTURE_TEST new file mode 100644 index 000000000000..ee84473477c4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_TORTURE_TEST @@ -0,0 +1 @@ +# CONFIG_RCU_TORTURE_TEST is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_TRACE b/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_TRACE new file mode 100644 index 000000000000..4acae1aab8c7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RCU_TRACE @@ -0,0 +1 @@ +# CONFIG_RCU_TRACE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RDMA_RXE b/anolis/configs/L1-RECOMMEND/default/CONFIG_RDMA_RXE new file mode 100644 index 000000000000..53cd17fc227e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RDMA_RXE @@ -0,0 +1 @@ +CONFIG_RDMA_RXE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RDMA_SIW b/anolis/configs/L1-RECOMMEND/default/CONFIG_RDMA_SIW new file mode 100644 index 000000000000..3bc1ec48a5a0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RDMA_SIW @@ -0,0 +1 @@ +CONFIG_RDMA_SIW=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_BZIP2 b/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_BZIP2 new file mode 100644 index 000000000000..9d25ca811185 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_BZIP2 @@ -0,0 +1 @@ +CONFIG_RD_BZIP2=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_GZIP b/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_GZIP new file mode 100644 index 000000000000..d38a59a2bdb2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_GZIP @@ -0,0 +1 @@ +CONFIG_RD_GZIP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_LZ4 b/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_LZ4 new file mode 100644 index 000000000000..ecc2a70486a3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_LZ4 @@ -0,0 +1 @@ +CONFIG_RD_LZ4=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_LZMA b/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_LZMA new file mode 100644 index 000000000000..02827c69bec1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_LZMA @@ -0,0 +1 @@ +CONFIG_RD_LZMA=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_LZO b/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_LZO new file mode 100644 index 000000000000..4734aec1da08 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_LZO @@ -0,0 +1 @@ +CONFIG_RD_LZO=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_XZ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_XZ new file mode 100644 index 000000000000..bbd13fc54c3c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_XZ @@ -0,0 +1 @@ +CONFIG_RD_XZ=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_ZSTD b/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_ZSTD new file mode 100644 index 000000000000..da1496ccb2e2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RD_ZSTD @@ -0,0 +1 @@ +CONFIG_RD_ZSTD=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_READABLE_ASM b/anolis/configs/L1-RECOMMEND/default/CONFIG_READABLE_ASM new file mode 100644 index 000000000000..147b2c061836 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_READABLE_ASM @@ -0,0 +1 @@ +# CONFIG_READABLE_ASM is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_READ_ONLY_THP_FOR_FS b/anolis/configs/L1-RECOMMEND/default/CONFIG_READ_ONLY_THP_FOR_FS new file mode 100644 index 000000000000..9b7a59151f7d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_READ_ONLY_THP_FOR_FS @@ -0,0 +1 @@ +CONFIG_READ_ONLY_THP_FOR_FS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RICH_CONTAINER b/anolis/configs/L1-RECOMMEND/default/CONFIG_RICH_CONTAINER new file mode 100644 index 000000000000..18c58f3058f3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RICH_CONTAINER @@ -0,0 +1 @@ +CONFIG_RICH_CONTAINER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RPCSEC_GSS_KRB5 b/anolis/configs/L1-RECOMMEND/default/CONFIG_RPCSEC_GSS_KRB5 new file mode 100644 index 000000000000..81fb2fe892e0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RPCSEC_GSS_KRB5 @@ -0,0 +1 @@ +CONFIG_RPCSEC_GSS_KRB5=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_HCTOSYS b/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_HCTOSYS new file mode 100644 index 000000000000..bea3441816b6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_HCTOSYS @@ -0,0 +1 @@ +CONFIG_RTC_HCTOSYS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_HCTOSYS_DEVICE b/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_HCTOSYS_DEVICE new file mode 100644 index 000000000000..0e6f28849f89 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_HCTOSYS_DEVICE @@ -0,0 +1 @@ +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_INTF_DEV b/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_INTF_DEV new file mode 100644 index 000000000000..9481b9dfa59f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_INTF_DEV @@ -0,0 +1 @@ +CONFIG_RTC_INTF_DEV=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_INTF_PROC b/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_INTF_PROC new file mode 100644 index 000000000000..12f03421672d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_INTF_PROC @@ -0,0 +1 @@ +CONFIG_RTC_INTF_PROC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_INTF_SYSFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_INTF_SYSFS new file mode 100644 index 000000000000..7c60145874f2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_INTF_SYSFS @@ -0,0 +1 @@ +CONFIG_RTC_INTF_SYSFS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_NVMEM b/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_NVMEM new file mode 100644 index 000000000000..bee8859105a4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_NVMEM @@ -0,0 +1 @@ +CONFIG_RTC_NVMEM=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_SYSTOHC b/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_SYSTOHC new file mode 100644 index 000000000000..f3581c76b8c6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_SYSTOHC @@ -0,0 +1 @@ +CONFIG_RTC_SYSTOHC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_SYSTOHC_DEVICE b/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_SYSTOHC_DEVICE new file mode 100644 index 000000000000..031950602e96 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RTC_SYSTOHC_DEVICE @@ -0,0 +1 @@ +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RT_GROUP_SCHED b/anolis/configs/L1-RECOMMEND/default/CONFIG_RT_GROUP_SCHED new file mode 100644 index 000000000000..15dbb458364a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RT_GROUP_SCHED @@ -0,0 +1 @@ +CONFIG_RT_GROUP_SCHED=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RUNTIME_TESTING_MENU b/anolis/configs/L1-RECOMMEND/default/CONFIG_RUNTIME_TESTING_MENU new file mode 100644 index 000000000000..7d54fe1195e4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RUNTIME_TESTING_MENU @@ -0,0 +1 @@ +CONFIG_RUNTIME_TESTING_MENU=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SATA_AHCI b/anolis/configs/L1-RECOMMEND/default/CONFIG_SATA_AHCI new file mode 100644 index 000000000000..18bfae1bc284 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SATA_AHCI @@ -0,0 +1 @@ +CONFIG_SATA_AHCI=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SATA_AHCI_PLATFORM b/anolis/configs/L1-RECOMMEND/default/CONFIG_SATA_AHCI_PLATFORM new file mode 100644 index 000000000000..ff2a626abffb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SATA_AHCI_PLATFORM @@ -0,0 +1 @@ +CONFIG_SATA_AHCI_PLATFORM=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SATA_MOBILE_LPM_POLICY b/anolis/configs/L1-RECOMMEND/default/CONFIG_SATA_MOBILE_LPM_POLICY new file mode 100644 index 000000000000..a510f6d0b50b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SATA_MOBILE_LPM_POLICY @@ -0,0 +1 @@ +CONFIG_SATA_MOBILE_LPM_POLICY=0 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SATA_PMP b/anolis/configs/L1-RECOMMEND/default/CONFIG_SATA_PMP new file mode 100644 index 000000000000..11f39dd5035e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SATA_PMP @@ -0,0 +1 @@ +CONFIG_SATA_PMP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_ACPU b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_ACPU new file mode 100644 index 000000000000..770ca7798e77 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_ACPU @@ -0,0 +1 @@ +CONFIG_SCHED_ACPU=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_CLUSTER b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_CLUSTER new file mode 100644 index 000000000000..046feb27d436 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_CLUSTER @@ -0,0 +1 @@ +CONFIG_SCHED_CLUSTER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_INFO b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_INFO new file mode 100644 index 000000000000..aea94368094a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_INFO @@ -0,0 +1 @@ +CONFIG_SCHED_INFO=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_SLI b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_SLI new file mode 100644 index 000000000000..31147745cfef --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_SLI @@ -0,0 +1 @@ +CONFIG_SCHED_SLI=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_SMT b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_SMT new file mode 100644 index 000000000000..ed3f04a207ad --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_SMT @@ -0,0 +1 @@ +CONFIG_SCHED_SMT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_TRACER b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_TRACER new file mode 100644 index 000000000000..413447ad6ec1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCHED_TRACER @@ -0,0 +1 @@ +CONFIG_SCHED_TRACER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_CONSTANTS b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_CONSTANTS new file mode 100644 index 000000000000..7d1c6cb2226c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_CONSTANTS @@ -0,0 +1 @@ +CONFIG_SCSI_CONSTANTS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_DEBUG new file mode 100644 index 000000000000..86b328498d36 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_DEBUG @@ -0,0 +1 @@ +CONFIG_SCSI_DEBUG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_DH b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_DH new file mode 100644 index 000000000000..b73df00a21cf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_DH @@ -0,0 +1 @@ +CONFIG_SCSI_DH=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_DMA b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_DMA new file mode 100644 index 000000000000..55138cc94c49 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_DMA @@ -0,0 +1 @@ +CONFIG_SCSI_DMA=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_ENCLOSURE b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_ENCLOSURE new file mode 100644 index 000000000000..adc36eab068c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_ENCLOSURE @@ -0,0 +1 @@ +CONFIG_SCSI_ENCLOSURE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_FC_ATTRS b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_FC_ATTRS new file mode 100644 index 000000000000..71f848d2d5c8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_FC_ATTRS @@ -0,0 +1 @@ +CONFIG_SCSI_FC_ATTRS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_ISCSI_ATTRS b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_ISCSI_ATTRS new file mode 100644 index 000000000000..e75a2fb48df8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_ISCSI_ATTRS @@ -0,0 +1 @@ +CONFIG_SCSI_ISCSI_ATTRS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_LOGGING b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_LOGGING new file mode 100644 index 000000000000..5739436c0d89 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_LOGGING @@ -0,0 +1 @@ +CONFIG_SCSI_LOGGING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_LOWLEVEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_LOWLEVEL new file mode 100644 index 000000000000..0aa35b3047d4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_LOWLEVEL @@ -0,0 +1 @@ +CONFIG_SCSI_LOWLEVEL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPI3MR b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPI3MR new file mode 100644 index 000000000000..ba2223b78c90 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPI3MR @@ -0,0 +1 @@ +CONFIG_SCSI_MPI3MR=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPT2SAS b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPT2SAS new file mode 100644 index 000000000000..b9b08c3fb1dc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPT2SAS @@ -0,0 +1 @@ +CONFIG_SCSI_MPT2SAS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPT2SAS_MAX_SGE b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPT2SAS_MAX_SGE new file mode 100644 index 000000000000..b0633b38ddd9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPT2SAS_MAX_SGE @@ -0,0 +1 @@ +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPT3SAS b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPT3SAS new file mode 100644 index 000000000000..dc5f4d8ffb77 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPT3SAS @@ -0,0 +1 @@ +CONFIG_SCSI_MPT3SAS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPT3SAS_MAX_SGE b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPT3SAS_MAX_SGE new file mode 100644 index 000000000000..b5f29773f06c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_MPT3SAS_MAX_SGE @@ -0,0 +1 @@ +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_PROC_FS b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_PROC_FS new file mode 100644 index 000000000000..565a44b8b920 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_PROC_FS @@ -0,0 +1 @@ +CONFIG_SCSI_PROC_FS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SAS_ATA b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SAS_ATA new file mode 100644 index 000000000000..5bd5a74d7dd3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SAS_ATA @@ -0,0 +1 @@ +CONFIG_SCSI_SAS_ATA=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SAS_ATTRS b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SAS_ATTRS new file mode 100644 index 000000000000..11e4323abaae --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SAS_ATTRS @@ -0,0 +1 @@ +CONFIG_SCSI_SAS_ATTRS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SAS_HOST_SMP b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SAS_HOST_SMP new file mode 100644 index 000000000000..8227d7063f12 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SAS_HOST_SMP @@ -0,0 +1 @@ +CONFIG_SCSI_SAS_HOST_SMP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SAS_LIBSAS b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SAS_LIBSAS new file mode 100644 index 000000000000..31d5483879fa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SAS_LIBSAS @@ -0,0 +1 @@ +CONFIG_SCSI_SAS_LIBSAS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SCAN_ASYNC b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SCAN_ASYNC new file mode 100644 index 000000000000..7de665f4b831 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SCAN_ASYNC @@ -0,0 +1 @@ +CONFIG_SCSI_SCAN_ASYNC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SMARTPQI b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SMARTPQI new file mode 100644 index 000000000000..da9632f9abd4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SMARTPQI @@ -0,0 +1 @@ +CONFIG_SCSI_SMARTPQI=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SPI_ATTRS b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SPI_ATTRS new file mode 100644 index 000000000000..fab2e0b91b57 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SPI_ATTRS @@ -0,0 +1 @@ +CONFIG_SCSI_SPI_ATTRS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SRP_ATTRS b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SRP_ATTRS new file mode 100644 index 000000000000..b1f7492d328f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_SRP_ATTRS @@ -0,0 +1 @@ +CONFIG_SCSI_SRP_ATTRS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_UFSHCD b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_UFSHCD new file mode 100644 index 000000000000..542c89fc7a88 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_UFSHCD @@ -0,0 +1 @@ +# CONFIG_SCSI_UFSHCD is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_VIRTIO b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_VIRTIO new file mode 100644 index 000000000000..6fadaf0f2d8e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_VIRTIO @@ -0,0 +1 @@ +CONFIG_SCSI_VIRTIO=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SECRETMEM b/anolis/configs/L1-RECOMMEND/default/CONFIG_SECRETMEM new file mode 100644 index 000000000000..440f8bc23289 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SECRETMEM @@ -0,0 +1 @@ +CONFIG_SECRETMEM=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SECTION_MISMATCH_WARN_ONLY b/anolis/configs/L1-RECOMMEND/default/CONFIG_SECTION_MISMATCH_WARN_ONLY new file mode 100644 index 000000000000..0a6814b814cb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SECTION_MISMATCH_WARN_ONLY @@ -0,0 +1 @@ +CONFIG_SECTION_MISMATCH_WARN_ONLY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_DMESG_RESTRICT b/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_DMESG_RESTRICT new file mode 100644 index 000000000000..353ad62886fa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_DMESG_RESTRICT @@ -0,0 +1 @@ +# CONFIG_SECURITY_DMESG_RESTRICT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SELINUX_AVC_STATS b/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SELINUX_AVC_STATS new file mode 100644 index 000000000000..8596c96eb924 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SELINUX_AVC_STATS @@ -0,0 +1 @@ +CONFIG_SECURITY_SELINUX_AVC_STATS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SELINUX_DEVELOP b/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SELINUX_DEVELOP new file mode 100644 index 000000000000..b9559b49fa20 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SELINUX_DEVELOP @@ -0,0 +1 @@ +CONFIG_SECURITY_SELINUX_DEVELOP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE b/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE new file mode 100644 index 000000000000..e8e9e3ea8c1e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE @@ -0,0 +1 @@ +CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS b/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS new file mode 100644 index 000000000000..5e8c57ec512e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS @@ -0,0 +1 @@ +CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SMACK b/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SMACK new file mode 100644 index 000000000000..32bdd7cd5549 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SECURITY_SMACK @@ -0,0 +1 @@ +# CONFIG_SECURITY_SMACK is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_ALTERA_PS2 b/anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_ALTERA_PS2 new file mode 100644 index 000000000000..9e366c11a8ac --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_ALTERA_PS2 @@ -0,0 +1 @@ +CONFIG_SERIO_ALTERA_PS2=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_ARC_PS2 b/anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_ARC_PS2 new file mode 100644 index 000000000000..7535bbef9cf2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_ARC_PS2 @@ -0,0 +1 @@ +CONFIG_SERIO_ARC_PS2=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_LIBPS2 b/anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_LIBPS2 new file mode 100644 index 000000000000..0db209ef285e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_LIBPS2 @@ -0,0 +1 @@ +CONFIG_SERIO_LIBPS2=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_RAW b/anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_RAW new file mode 100644 index 000000000000..29fd33a295fd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_RAW @@ -0,0 +1 @@ +CONFIG_SERIO_RAW=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_SERPORT b/anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_SERPORT new file mode 100644 index 000000000000..341b61290d26 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SERIO_SERPORT @@ -0,0 +1 @@ +CONFIG_SERIO_SERPORT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SFC_FALCON b/anolis/configs/L1-RECOMMEND/default/CONFIG_SFC_FALCON new file mode 100644 index 000000000000..95bfee80fb8b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SFC_FALCON @@ -0,0 +1 @@ +# CONFIG_SFC_FALCON is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SHUFFLE_PAGE_ALLOCATOR b/anolis/configs/L1-RECOMMEND/default/CONFIG_SHUFFLE_PAGE_ALLOCATOR new file mode 100644 index 000000000000..cff3a0b960e9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SHUFFLE_PAGE_ALLOCATOR @@ -0,0 +1 @@ +CONFIG_SHUFFLE_PAGE_ALLOCATOR=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SIGNED_PE_FILE_VERIFICATION b/anolis/configs/L1-RECOMMEND/default/CONFIG_SIGNED_PE_FILE_VERIFICATION new file mode 100644 index 000000000000..d7551a589127 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SIGNED_PE_FILE_VERIFICATION @@ -0,0 +1 @@ +CONFIG_SIGNED_PE_FILE_VERIFICATION=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SLAB_FREELIST_HARDENED b/anolis/configs/L1-RECOMMEND/default/CONFIG_SLAB_FREELIST_HARDENED new file mode 100644 index 000000000000..630a3ed8150e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SLAB_FREELIST_HARDENED @@ -0,0 +1 @@ +# CONFIG_SLAB_FREELIST_HARDENED is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SLAB_FREELIST_RANDOM b/anolis/configs/L1-RECOMMEND/default/CONFIG_SLAB_FREELIST_RANDOM new file mode 100644 index 000000000000..2ec53d02bc4f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SLAB_FREELIST_RANDOM @@ -0,0 +1 @@ +CONFIG_SLAB_FREELIST_RANDOM=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SLAB_MERGE_DEFAULT b/anolis/configs/L1-RECOMMEND/default/CONFIG_SLAB_MERGE_DEFAULT new file mode 100644 index 000000000000..2baeb5938e8f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SLAB_MERGE_DEFAULT @@ -0,0 +1 @@ +# CONFIG_SLAB_MERGE_DEFAULT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SLIP b/anolis/configs/L1-RECOMMEND/default/CONFIG_SLIP new file mode 100644 index 000000000000..86e448e492ee --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SLIP @@ -0,0 +1 @@ +CONFIG_SLIP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SMC_DIAG b/anolis/configs/L1-RECOMMEND/default/CONFIG_SMC_DIAG new file mode 100644 index 000000000000..728cb1e4f36e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SMC_DIAG @@ -0,0 +1 @@ +CONFIG_SMC_DIAG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SND b/anolis/configs/L1-RECOMMEND/default/CONFIG_SND new file mode 100644 index 000000000000..5dfa0106a952 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SND @@ -0,0 +1 @@ +# CONFIG_SND is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SOFT_WATCHDOG b/anolis/configs/L1-RECOMMEND/default/CONFIG_SOFT_WATCHDOG new file mode 100644 index 000000000000..0e0d36a4806d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SOFT_WATCHDOG @@ -0,0 +1 @@ +CONFIG_SOFT_WATCHDOG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SPI_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_SPI_DEBUG new file mode 100644 index 000000000000..eb5fcb37be3d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SPI_DEBUG @@ -0,0 +1 @@ +# CONFIG_SPI_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_4K_DEVBLK_SIZE b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_4K_DEVBLK_SIZE new file mode 100644 index 000000000000..575ec8501047 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_4K_DEVBLK_SIZE @@ -0,0 +1 @@ +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_DECOMP_SINGLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_DECOMP_SINGLE new file mode 100644 index 000000000000..5978a504eaec --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_DECOMP_SINGLE @@ -0,0 +1 @@ +CONFIG_SQUASHFS_DECOMP_SINGLE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_EMBEDDED b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_EMBEDDED new file mode 100644 index 000000000000..a28449ed1fc2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_EMBEDDED @@ -0,0 +1 @@ +# CONFIG_SQUASHFS_EMBEDDED is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_FILE_CACHE b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_FILE_CACHE new file mode 100644 index 000000000000..fc22b2a90f80 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_FILE_CACHE @@ -0,0 +1 @@ +# CONFIG_SQUASHFS_FILE_CACHE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_FILE_DIRECT b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_FILE_DIRECT new file mode 100644 index 000000000000..dee95e704627 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_FILE_DIRECT @@ -0,0 +1 @@ +CONFIG_SQUASHFS_FILE_DIRECT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE new file mode 100644 index 000000000000..16cdab759a61 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE @@ -0,0 +1 @@ +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_LZO b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_LZO new file mode 100644 index 000000000000..8b3e6f933305 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_LZO @@ -0,0 +1 @@ +CONFIG_SQUASHFS_LZO=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_XATTR b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_XATTR new file mode 100644 index 000000000000..81b21b94c54f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_XATTR @@ -0,0 +1 @@ +CONFIG_SQUASHFS_XATTR=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_XZ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_XZ new file mode 100644 index 000000000000..2a9b9bd461e3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_XZ @@ -0,0 +1 @@ +CONFIG_SQUASHFS_XZ=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_ZLIB b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_ZLIB new file mode 100644 index 000000000000..dcb246a8ef26 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_ZLIB @@ -0,0 +1 @@ +CONFIG_SQUASHFS_ZLIB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_ZSTD b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_ZSTD new file mode 100644 index 000000000000..e4707f831f67 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_ZSTD @@ -0,0 +1 @@ +# CONFIG_SQUASHFS_ZSTD is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_STACKPROTECTOR b/anolis/configs/L1-RECOMMEND/default/CONFIG_STACKPROTECTOR new file mode 100644 index 000000000000..b5942a551508 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_STACKPROTECTOR @@ -0,0 +1 @@ +CONFIG_STACKPROTECTOR=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_STACKPROTECTOR_STRONG b/anolis/configs/L1-RECOMMEND/default/CONFIG_STACKPROTECTOR_STRONG new file mode 100644 index 000000000000..6c885445ee68 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_STACKPROTECTOR_STRONG @@ -0,0 +1 @@ +CONFIG_STACKPROTECTOR_STRONG=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_STACKTRACE_SUPPORT b/anolis/configs/L1-RECOMMEND/default/CONFIG_STACKTRACE_SUPPORT new file mode 100644 index 000000000000..d7f613555a90 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_STACKTRACE_SUPPORT @@ -0,0 +1 @@ +CONFIG_STACKTRACE_SUPPORT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_STACK_TRACER b/anolis/configs/L1-RECOMMEND/default/CONFIG_STACK_TRACER new file mode 100644 index 000000000000..643bacdcd269 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_STACK_TRACER @@ -0,0 +1 @@ +CONFIG_STACK_TRACER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_STATIC_KEYS_SELFTEST b/anolis/configs/L1-RECOMMEND/default/CONFIG_STATIC_KEYS_SELFTEST new file mode 100644 index 000000000000..09446dc77d77 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_STATIC_KEYS_SELFTEST @@ -0,0 +1 @@ +# CONFIG_STATIC_KEYS_SELFTEST is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_STATIC_USERMODEHELPER b/anolis/configs/L1-RECOMMEND/default/CONFIG_STATIC_USERMODEHELPER new file mode 100644 index 000000000000..9cfc00a5ffb8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_STATIC_USERMODEHELPER @@ -0,0 +1 @@ +# CONFIG_STATIC_USERMODEHELPER is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_STRICT_DEVMEM b/anolis/configs/L1-RECOMMEND/default/CONFIG_STRICT_DEVMEM new file mode 100644 index 000000000000..f2c0c3a6122c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_STRICT_DEVMEM @@ -0,0 +1 @@ +CONFIG_STRICT_DEVMEM=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_STRIP_ASM_SYMS b/anolis/configs/L1-RECOMMEND/default/CONFIG_STRIP_ASM_SYMS new file mode 100644 index 000000000000..274455389126 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_STRIP_ASM_SYMS @@ -0,0 +1 @@ +CONFIG_STRIP_ASM_SYMS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SUNRPC_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_SUNRPC_DEBUG new file mode 100644 index 000000000000..448eb69eaa31 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SUNRPC_DEBUG @@ -0,0 +1 @@ +CONFIG_SUNRPC_DEBUG=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SUNRPC_XPRT_RDMA b/anolis/configs/L1-RECOMMEND/default/CONFIG_SUNRPC_XPRT_RDMA new file mode 100644 index 000000000000..b85162d8f7f0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SUNRPC_XPRT_RDMA @@ -0,0 +1 @@ +CONFIG_SUNRPC_XPRT_RDMA=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SUSPEND b/anolis/configs/L1-RECOMMEND/default/CONFIG_SUSPEND new file mode 100644 index 000000000000..68d22d8bdce5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SUSPEND @@ -0,0 +1 @@ +CONFIG_SUSPEND=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SUSPEND_FREEZER b/anolis/configs/L1-RECOMMEND/default/CONFIG_SUSPEND_FREEZER new file mode 100644 index 000000000000..f816da7cd0ba --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SUSPEND_FREEZER @@ -0,0 +1 @@ +CONFIG_SUSPEND_FREEZER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SYMBOLIC_ERRNAME b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYMBOLIC_ERRNAME new file mode 100644 index 000000000000..237264c75aa9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYMBOLIC_ERRNAME @@ -0,0 +1 @@ +CONFIG_SYMBOLIC_ERRNAME=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SYNC_FILE b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYNC_FILE new file mode 100644 index 000000000000..391ab547b458 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYNC_FILE @@ -0,0 +1 @@ +CONFIG_SYNC_FILE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SYNTH_EVENTS b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYNTH_EVENTS new file mode 100644 index 000000000000..6d49aea46556 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYNTH_EVENTS @@ -0,0 +1 @@ +CONFIG_SYNTH_EVENTS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSFB_SIMPLEFB b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSFB_SIMPLEFB new file mode 100644 index 000000000000..6ae3e10a489e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSFB_SIMPLEFB @@ -0,0 +1 @@ +# CONFIG_SYSFB_SIMPLEFB is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSFS_SYSCALL b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSFS_SYSCALL new file mode 100644 index 000000000000..0876b7697680 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSFS_SYSCALL @@ -0,0 +1 @@ +CONFIG_SYSFS_SYSCALL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEMPORT b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEMPORT new file mode 100644 index 000000000000..edb77a634b92 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEMPORT @@ -0,0 +1 @@ +# CONFIG_SYSTEMPORT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_BLACKLIST_HASH_LIST b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_BLACKLIST_HASH_LIST new file mode 100644 index 000000000000..858e87e78a9c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_BLACKLIST_HASH_LIST @@ -0,0 +1 @@ +CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_BLACKLIST_KEYRING b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_BLACKLIST_KEYRING new file mode 100644 index 000000000000..4fcc4b31e966 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_BLACKLIST_KEYRING @@ -0,0 +1 @@ +CONFIG_SYSTEM_BLACKLIST_KEYRING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_DATA_VERIFICATION b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_DATA_VERIFICATION new file mode 100644 index 000000000000..0c264f7d40ee --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_DATA_VERIFICATION @@ -0,0 +1 @@ +CONFIG_SYSTEM_DATA_VERIFICATION=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_EXTRA_CERTIFICATE b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_EXTRA_CERTIFICATE new file mode 100644 index 000000000000..a831f7ab1820 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_EXTRA_CERTIFICATE @@ -0,0 +1 @@ +CONFIG_SYSTEM_EXTRA_CERTIFICATE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE new file mode 100644 index 000000000000..a270cb857dfe --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE @@ -0,0 +1 @@ +CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE=8192 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_REVOCATION_LIST b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_REVOCATION_LIST new file mode 100644 index 000000000000..8e8438fd1a83 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSTEM_REVOCATION_LIST @@ -0,0 +1 @@ +# CONFIG_SYSTEM_REVOCATION_LIST is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSVIPC_SYSCTL b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSVIPC_SYSCTL new file mode 100644 index 000000000000..ac42a5fcc8b4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SYSVIPC_SYSCTL @@ -0,0 +1 @@ +CONFIG_SYSVIPC_SYSCTL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TASKSTATS b/anolis/configs/L1-RECOMMEND/default/CONFIG_TASKSTATS new file mode 100644 index 000000000000..2e4f141d5789 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TASKSTATS @@ -0,0 +1 @@ +CONFIG_TASKSTATS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TASK_DELAY_ACCT b/anolis/configs/L1-RECOMMEND/default/CONFIG_TASK_DELAY_ACCT new file mode 100644 index 000000000000..11205fe20ac2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TASK_DELAY_ACCT @@ -0,0 +1 @@ +CONFIG_TASK_DELAY_ACCT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TASK_IO_ACCOUNTING b/anolis/configs/L1-RECOMMEND/default/CONFIG_TASK_IO_ACCOUNTING new file mode 100644 index 000000000000..7c36f88980aa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TASK_IO_ACCOUNTING @@ -0,0 +1 @@ +CONFIG_TASK_IO_ACCOUNTING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TASK_XACCT b/anolis/configs/L1-RECOMMEND/default/CONFIG_TASK_XACCT new file mode 100644 index 000000000000..8b61b9cb0b12 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TASK_XACCT @@ -0,0 +1 @@ +CONFIG_TASK_XACCT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCG_ATMEL b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCG_ATMEL new file mode 100644 index 000000000000..05d7d17d4c7d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCG_ATMEL @@ -0,0 +1 @@ +CONFIG_TCG_ATMEL=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCG_CRB b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCG_CRB new file mode 100644 index 000000000000..a36cd309dc2e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCG_CRB @@ -0,0 +1 @@ +CONFIG_TCG_CRB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCG_TIS_CORE b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCG_TIS_CORE new file mode 100644 index 000000000000..fc7a623eee39 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCG_TIS_CORE @@ -0,0 +1 @@ +CONFIG_TCG_TIS_CORE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCM_FILEIO b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCM_FILEIO new file mode 100644 index 000000000000..cab5895c1dcc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCM_FILEIO @@ -0,0 +1 @@ +CONFIG_TCM_FILEIO=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCM_IBLOCK b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCM_IBLOCK new file mode 100644 index 000000000000..0eccf352f957 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCM_IBLOCK @@ -0,0 +1 @@ +CONFIG_TCM_IBLOCK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCM_PSCSI b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCM_PSCSI new file mode 100644 index 000000000000..5fa5e2b3a720 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCM_PSCSI @@ -0,0 +1 @@ +CONFIG_TCM_PSCSI=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCM_USER2 b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCM_USER2 new file mode 100644 index 000000000000..df07ef1d435a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCM_USER2 @@ -0,0 +1 @@ +CONFIG_TCM_USER2=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_BIC b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_BIC new file mode 100644 index 000000000000..82e23b7f3f14 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_BIC @@ -0,0 +1 @@ +CONFIG_TCP_CONG_BIC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_DCTCP b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_DCTCP new file mode 100644 index 000000000000..f9aa892d883f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_DCTCP @@ -0,0 +1 @@ +CONFIG_TCP_CONG_DCTCP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_HSTCP b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_HSTCP new file mode 100644 index 000000000000..6f546faad473 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_HSTCP @@ -0,0 +1 @@ +CONFIG_TCP_CONG_HSTCP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_HTCP b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_HTCP new file mode 100644 index 000000000000..c68bb11a6f3e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_HTCP @@ -0,0 +1 @@ +CONFIG_TCP_CONG_HTCP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_HYBLA b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_HYBLA new file mode 100644 index 000000000000..82892dbfdc19 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_HYBLA @@ -0,0 +1 @@ +CONFIG_TCP_CONG_HYBLA=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_ILLINOIS b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_ILLINOIS new file mode 100644 index 000000000000..7dbbe73774e9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_ILLINOIS @@ -0,0 +1 @@ +CONFIG_TCP_CONG_ILLINOIS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_LP b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_LP new file mode 100644 index 000000000000..1136111b9aca --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_LP @@ -0,0 +1 @@ +CONFIG_TCP_CONG_LP=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_NV b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_NV new file mode 100644 index 000000000000..2994a8025009 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_NV @@ -0,0 +1 @@ +CONFIG_TCP_CONG_NV=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_SCALABLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_SCALABLE new file mode 100644 index 000000000000..d5b5bf020c0a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_SCALABLE @@ -0,0 +1 @@ +CONFIG_TCP_CONG_SCALABLE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_VEGAS b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_VEGAS new file mode 100644 index 000000000000..a584ed4f125d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_VEGAS @@ -0,0 +1 @@ +CONFIG_TCP_CONG_VEGAS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_VENO b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_VENO new file mode 100644 index 000000000000..e450df2f939a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_VENO @@ -0,0 +1 @@ +CONFIG_TCP_CONG_VENO=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_WESTWOOD b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_WESTWOOD new file mode 100644 index 000000000000..ca233b9df5b7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_WESTWOOD @@ -0,0 +1 @@ +CONFIG_TCP_CONG_WESTWOOD=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_YEAH b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_YEAH new file mode 100644 index 000000000000..c2db3694198b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_CONG_YEAH @@ -0,0 +1 @@ +CONFIG_TCP_CONG_YEAH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_MD5SIG b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_MD5SIG new file mode 100644 index 000000000000..b92ad2fb56b6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TCP_MD5SIG @@ -0,0 +1 @@ +CONFIG_TCP_MD5SIG=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TEE b/anolis/configs/L1-RECOMMEND/default/CONFIG_TEE new file mode 100644 index 000000000000..85be673c532e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TEE @@ -0,0 +1 @@ +CONFIG_TEE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TEST_BPF b/anolis/configs/L1-RECOMMEND/default/CONFIG_TEST_BPF new file mode 100644 index 000000000000..c5ddc0080fba --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TEST_BPF @@ -0,0 +1 @@ +CONFIG_TEST_BPF=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE new file mode 100644 index 000000000000..83801238f6ad --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE @@ -0,0 +1 @@ +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE new file mode 100644 index 000000000000..4acf93b3675d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE @@ -0,0 +1 @@ +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE new file mode 100644 index 000000000000..e70c564b44dc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE @@ -0,0 +1 @@ +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS new file mode 100644 index 000000000000..9288765d6192 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS @@ -0,0 +1 @@ +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_GOV_FAIR_SHARE b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_GOV_FAIR_SHARE new file mode 100644 index 000000000000..b32c15590063 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_GOV_FAIR_SHARE @@ -0,0 +1 @@ +CONFIG_THERMAL_GOV_FAIR_SHARE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_GOV_STEP_WISE b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_GOV_STEP_WISE new file mode 100644 index 000000000000..614bc305eed9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_GOV_STEP_WISE @@ -0,0 +1 @@ +CONFIG_THERMAL_GOV_STEP_WISE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_GOV_USER_SPACE b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_GOV_USER_SPACE new file mode 100644 index 000000000000..040cf3947318 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_GOV_USER_SPACE @@ -0,0 +1 @@ +CONFIG_THERMAL_GOV_USER_SPACE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_HWMON b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_HWMON new file mode 100644 index 000000000000..3404084e0475 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_HWMON @@ -0,0 +1 @@ +CONFIG_THERMAL_HWMON=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_NETLINK b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_NETLINK new file mode 100644 index 000000000000..8abdc692a524 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_NETLINK @@ -0,0 +1 @@ +# CONFIG_THERMAL_NETLINK is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TIGON3 b/anolis/configs/L1-RECOMMEND/default/CONFIG_TIGON3 new file mode 100644 index 000000000000..673c01b6a2bb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TIGON3 @@ -0,0 +1 @@ +CONFIG_TIGON3=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TIGON3_HWMON b/anolis/configs/L1-RECOMMEND/default/CONFIG_TIGON3_HWMON new file mode 100644 index 000000000000..31215b5556b1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TIGON3_HWMON @@ -0,0 +1 @@ +CONFIG_TIGON3_HWMON=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TIMERLAT_TRACER b/anolis/configs/L1-RECOMMEND/default/CONFIG_TIMERLAT_TRACER new file mode 100644 index 000000000000..560c834542ec --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TIMERLAT_TRACER @@ -0,0 +1 @@ +CONFIG_TIMERLAT_TRACER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TIME_NS b/anolis/configs/L1-RECOMMEND/default/CONFIG_TIME_NS new file mode 100644 index 000000000000..4480620f6f49 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TIME_NS @@ -0,0 +1 @@ +CONFIG_TIME_NS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TLS_DEVICE b/anolis/configs/L1-RECOMMEND/default/CONFIG_TLS_DEVICE new file mode 100644 index 000000000000..7a19aef6ae49 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TLS_DEVICE @@ -0,0 +1 @@ +CONFIG_TLS_DEVICE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TLS_TOE b/anolis/configs/L1-RECOMMEND/default/CONFIG_TLS_TOE new file mode 100644 index 000000000000..a6c7df399b4a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TLS_TOE @@ -0,0 +1 @@ +# CONFIG_TLS_TOE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TMPFS_INODE64 b/anolis/configs/L1-RECOMMEND/default/CONFIG_TMPFS_INODE64 new file mode 100644 index 000000000000..cce8a2274ff4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TMPFS_INODE64 @@ -0,0 +1 @@ +# CONFIG_TMPFS_INODE64 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TMPFS_POSIX_ACL b/anolis/configs/L1-RECOMMEND/default/CONFIG_TMPFS_POSIX_ACL new file mode 100644 index 000000000000..2c4c8f4e8efc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TMPFS_POSIX_ACL @@ -0,0 +1 @@ +CONFIG_TMPFS_POSIX_ACL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TMPFS_XATTR b/anolis/configs/L1-RECOMMEND/default/CONFIG_TMPFS_XATTR new file mode 100644 index 000000000000..c83e77775eab --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TMPFS_XATTR @@ -0,0 +1 @@ +CONFIG_TMPFS_XATTR=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TRACER_SNAPSHOT b/anolis/configs/L1-RECOMMEND/default/CONFIG_TRACER_SNAPSHOT new file mode 100644 index 000000000000..31429b63de46 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TRACER_SNAPSHOT @@ -0,0 +1 @@ +CONFIG_TRACER_SNAPSHOT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP b/anolis/configs/L1-RECOMMEND/default/CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP new file mode 100644 index 000000000000..83708605ff25 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP @@ -0,0 +1 @@ +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TRACING_MAP b/anolis/configs/L1-RECOMMEND/default/CONFIG_TRACING_MAP new file mode 100644 index 000000000000..e463a5ad9ef8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TRACING_MAP @@ -0,0 +1 @@ +CONFIG_TRACING_MAP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS b/anolis/configs/L1-RECOMMEND/default/CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS new file mode 100644 index 000000000000..65f6432d9c15 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS @@ -0,0 +1 @@ +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TRANSPARENT_HUGEPAGE_MADVISE b/anolis/configs/L1-RECOMMEND/default/CONFIG_TRANSPARENT_HUGEPAGE_MADVISE new file mode 100644 index 000000000000..018db0a0f4e2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TRANSPARENT_HUGEPAGE_MADVISE @@ -0,0 +1 @@ +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TRUSTED_KEYS b/anolis/configs/L1-RECOMMEND/default/CONFIG_TRUSTED_KEYS new file mode 100644 index 000000000000..921580124211 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TRUSTED_KEYS @@ -0,0 +1 @@ +CONFIG_TRUSTED_KEYS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TRUSTED_KEYS_TPM b/anolis/configs/L1-RECOMMEND/default/CONFIG_TRUSTED_KEYS_TPM new file mode 100644 index 000000000000..66895051f213 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_TRUSTED_KEYS_TPM @@ -0,0 +1 @@ +CONFIG_TRUSTED_KEYS_TPM=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_UBSAN b/anolis/configs/L1-RECOMMEND/default/CONFIG_UBSAN new file mode 100644 index 000000000000..ef973c71d610 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UBSAN @@ -0,0 +1 @@ +# CONFIG_UBSAN is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_UDF_FS b/anolis/configs/L1-RECOMMEND/default/CONFIG_UDF_FS new file mode 100644 index 000000000000..4c8e1e829b2f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UDF_FS @@ -0,0 +1 @@ +CONFIG_UDF_FS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_UEVENT_HELPER b/anolis/configs/L1-RECOMMEND/default/CONFIG_UEVENT_HELPER new file mode 100644 index 000000000000..262a0f054872 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UEVENT_HELPER @@ -0,0 +1 @@ +# CONFIG_UEVENT_HELPER is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_UID16 b/anolis/configs/L1-RECOMMEND/default/CONFIG_UID16 new file mode 100644 index 000000000000..c766b9089ef7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UID16 @@ -0,0 +1 @@ +CONFIG_UID16=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_AEC b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_AEC new file mode 100644 index 000000000000..2ee61ddb0a6f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_AEC @@ -0,0 +1 @@ +CONFIG_UIO_AEC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_CIF b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_CIF new file mode 100644 index 000000000000..6ed58ca58c93 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_CIF @@ -0,0 +1 @@ +CONFIG_UIO_CIF=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_DMEM_GENIRQ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_DMEM_GENIRQ new file mode 100644 index 000000000000..2ad111cbe32a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_DMEM_GENIRQ @@ -0,0 +1 @@ +# CONFIG_UIO_DMEM_GENIRQ is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_MF624 b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_MF624 new file mode 100644 index 000000000000..4a912c486468 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_MF624 @@ -0,0 +1 @@ +# CONFIG_UIO_MF624 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_NETX b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_NETX new file mode 100644 index 000000000000..faca8ad87222 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_NETX @@ -0,0 +1 @@ +# CONFIG_UIO_NETX is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_PCI_GENERIC b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_PCI_GENERIC new file mode 100644 index 000000000000..46f7ab71c13b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_PCI_GENERIC @@ -0,0 +1 @@ +CONFIG_UIO_PCI_GENERIC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_PDRV_GENIRQ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_PDRV_GENIRQ new file mode 100644 index 000000000000..393d57f1d8a7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_PDRV_GENIRQ @@ -0,0 +1 @@ +CONFIG_UIO_PDRV_GENIRQ=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_PRUSS b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_PRUSS new file mode 100644 index 000000000000..e1549ea24f0f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_PRUSS @@ -0,0 +1 @@ +# CONFIG_UIO_PRUSS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_SERCOS3 b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_SERCOS3 new file mode 100644 index 000000000000..287c23a643c0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UIO_SERCOS3 @@ -0,0 +1 @@ +CONFIG_UIO_SERCOS3=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_UNIX_DIAG b/anolis/configs/L1-RECOMMEND/default/CONFIG_UNIX_DIAG new file mode 100644 index 000000000000..51b222763d78 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UNIX_DIAG @@ -0,0 +1 @@ +CONFIG_UNIX_DIAG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_UPROBES b/anolis/configs/L1-RECOMMEND/default/CONFIG_UPROBES new file mode 100644 index 000000000000..4822a082f41b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_UPROBES @@ -0,0 +1 @@ +CONFIG_UPROBES=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_USB_NET_DRIVERS b/anolis/configs/L1-RECOMMEND/default/CONFIG_USB_NET_DRIVERS new file mode 100644 index 000000000000..954a18989f3b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_USB_NET_DRIVERS @@ -0,0 +1 @@ +CONFIG_USB_NET_DRIVERS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_USB_PCI b/anolis/configs/L1-RECOMMEND/default/CONFIG_USB_PCI new file mode 100644 index 000000000000..26c372a3a8fb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_USB_PCI @@ -0,0 +1 @@ +CONFIG_USB_PCI=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_USELIB b/anolis/configs/L1-RECOMMEND/default/CONFIG_USELIB new file mode 100644 index 000000000000..a7491a18a142 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_USELIB @@ -0,0 +1 @@ +# CONFIG_USELIB is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_USER_NS b/anolis/configs/L1-RECOMMEND/default/CONFIG_USER_NS new file mode 100644 index 000000000000..416bd53ce982 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_USER_NS @@ -0,0 +1 @@ +CONFIG_USER_NS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VDPA b/anolis/configs/L1-RECOMMEND/default/CONFIG_VDPA new file mode 100644 index 000000000000..1cf31b087898 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VDPA @@ -0,0 +1 @@ +# CONFIG_VDPA is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VFIO_CONTAINER b/anolis/configs/L1-RECOMMEND/default/CONFIG_VFIO_CONTAINER new file mode 100644 index 000000000000..72cb59edff82 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VFIO_CONTAINER @@ -0,0 +1 @@ +CONFIG_VFIO_CONTAINER=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VFIO_GROUP b/anolis/configs/L1-RECOMMEND/default/CONFIG_VFIO_GROUP new file mode 100644 index 000000000000..eaa917e807e3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VFIO_GROUP @@ -0,0 +1 @@ +CONFIG_VFIO_GROUP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VFIO_NOIOMMU b/anolis/configs/L1-RECOMMEND/default/CONFIG_VFIO_NOIOMMU new file mode 100644 index 000000000000..09ba4d1cd5b6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VFIO_NOIOMMU @@ -0,0 +1 @@ +CONFIG_VFIO_NOIOMMU=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VFIO_PCI_CORE b/anolis/configs/L1-RECOMMEND/default/CONFIG_VFIO_PCI_CORE new file mode 100644 index 000000000000..ebc0606bd5c0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VFIO_PCI_CORE @@ -0,0 +1 @@ +CONFIG_VFIO_PCI_CORE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VGA_ARB_MAX_GPUS b/anolis/configs/L1-RECOMMEND/default/CONFIG_VGA_ARB_MAX_GPUS new file mode 100644 index 000000000000..e66aea99da8d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VGA_ARB_MAX_GPUS @@ -0,0 +1 @@ +CONFIG_VGA_ARB_MAX_GPUS=64 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VHOST_MENU b/anolis/configs/L1-RECOMMEND/default/CONFIG_VHOST_MENU new file mode 100644 index 000000000000..00536a2b000f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VHOST_MENU @@ -0,0 +1 @@ +CONFIG_VHOST_MENU=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VHOST_SCSI b/anolis/configs/L1-RECOMMEND/default/CONFIG_VHOST_SCSI new file mode 100644 index 000000000000..3f733834e424 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VHOST_SCSI @@ -0,0 +1 @@ +CONFIG_VHOST_SCSI=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_DMA_SHARED_BUFFER b/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_DMA_SHARED_BUFFER new file mode 100644 index 000000000000..fe206fc5b167 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_DMA_SHARED_BUFFER @@ -0,0 +1 @@ +CONFIG_VIRTIO_DMA_SHARED_BUFFER=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_INPUT b/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_INPUT new file mode 100644 index 000000000000..87130e4039d6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_INPUT @@ -0,0 +1 @@ +CONFIG_VIRTIO_INPUT=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_PCI_LEGACY b/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_PCI_LEGACY new file mode 100644 index 000000000000..dc31c9947a65 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_PCI_LEGACY @@ -0,0 +1 @@ +CONFIG_VIRTIO_PCI_LEGACY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_VSOCKETS b/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_VSOCKETS new file mode 100644 index 000000000000..a14419e0fdab --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_VSOCKETS @@ -0,0 +1 @@ +CONFIG_VIRTIO_VSOCKETS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_VSOCKETS_COMMON b/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_VSOCKETS_COMMON new file mode 100644 index 000000000000..3ae644bb1178 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_VSOCKETS_COMMON @@ -0,0 +1 @@ +CONFIG_VIRTIO_VSOCKETS_COMMON=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRT_FUSE b/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRT_FUSE new file mode 100644 index 000000000000..ebd7105d198e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRT_FUSE @@ -0,0 +1 @@ +CONFIG_VIRT_FUSE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VSOCKETS_DIAG b/anolis/configs/L1-RECOMMEND/default/CONFIG_VSOCKETS_DIAG new file mode 100644 index 000000000000..89432b02b65f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VSOCKETS_DIAG @@ -0,0 +1 @@ +CONFIG_VSOCKETS_DIAG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VSOCKETS_LOOPBACK b/anolis/configs/L1-RECOMMEND/default/CONFIG_VSOCKETS_LOOPBACK new file mode 100644 index 000000000000..e07891f5bb9f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VSOCKETS_LOOPBACK @@ -0,0 +1 @@ +CONFIG_VSOCKETS_LOOPBACK=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VSOCKMON b/anolis/configs/L1-RECOMMEND/default/CONFIG_VSOCKMON new file mode 100644 index 000000000000..82594c488369 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VSOCKMON @@ -0,0 +1 @@ +CONFIG_VSOCKMON=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VT_HW_CONSOLE_BINDING b/anolis/configs/L1-RECOMMEND/default/CONFIG_VT_HW_CONSOLE_BINDING new file mode 100644 index 000000000000..04f0917d8871 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VT_HW_CONSOLE_BINDING @@ -0,0 +1 @@ +CONFIG_VT_HW_CONSOLE_BINDING=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_VXLAN b/anolis/configs/L1-RECOMMEND/default/CONFIG_VXLAN new file mode 100644 index 000000000000..2aa404d8daba --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_VXLAN @@ -0,0 +1 @@ +CONFIG_VXLAN=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_WAN b/anolis/configs/L1-RECOMMEND/default/CONFIG_WAN new file mode 100644 index 000000000000..215b93ea87c6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_WAN @@ -0,0 +1 @@ +CONFIG_WAN=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_WARN_ALL_UNSEEDED_RANDOM b/anolis/configs/L1-RECOMMEND/default/CONFIG_WARN_ALL_UNSEEDED_RANDOM new file mode 100644 index 000000000000..5244e5664f1a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_WARN_ALL_UNSEEDED_RANDOM @@ -0,0 +1 @@ +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_CORE b/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_CORE new file mode 100644 index 000000000000..e70e50b70562 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_CORE @@ -0,0 +1 @@ +CONFIG_WATCHDOG_CORE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED b/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED new file mode 100644 index 000000000000..2cdeb93aa965 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED @@ -0,0 +1 @@ +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_NOWAYOUT b/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_NOWAYOUT new file mode 100644 index 000000000000..0eb79411b060 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_NOWAYOUT @@ -0,0 +1 @@ +# CONFIG_WATCHDOG_NOWAYOUT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_OPEN_TIMEOUT b/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_OPEN_TIMEOUT new file mode 100644 index 000000000000..3f1d15f6e293 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_OPEN_TIMEOUT @@ -0,0 +1 @@ +CONFIG_WATCHDOG_OPEN_TIMEOUT=0 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_PRETIMEOUT_GOV b/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_PRETIMEOUT_GOV new file mode 100644 index 000000000000..9002114dfe15 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_PRETIMEOUT_GOV @@ -0,0 +1 @@ +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_SYSFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_SYSFS new file mode 100644 index 000000000000..7eb5ab9fc39f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_WATCHDOG_SYSFS @@ -0,0 +1 @@ +CONFIG_WATCHDOG_SYSFS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_WIREGUARD b/anolis/configs/L1-RECOMMEND/default/CONFIG_WIREGUARD new file mode 100644 index 000000000000..f4bb670b4a41 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_WIREGUARD @@ -0,0 +1 @@ +CONFIG_WIREGUARD=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_WIREGUARD_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_WIREGUARD_DEBUG new file mode 100644 index 000000000000..bcd81132829b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_WIREGUARD_DEBUG @@ -0,0 +1 @@ +# CONFIG_WIREGUARD_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_WQ_WATCHDOG b/anolis/configs/L1-RECOMMEND/default/CONFIG_WQ_WATCHDOG new file mode 100644 index 000000000000..459eb0f7989b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_WQ_WATCHDOG @@ -0,0 +1 @@ +# CONFIG_WQ_WATCHDOG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XDP_SOCKETS_DIAG b/anolis/configs/L1-RECOMMEND/default/CONFIG_XDP_SOCKETS_DIAG new file mode 100644 index 000000000000..99fbcd0958fb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XDP_SOCKETS_DIAG @@ -0,0 +1 @@ +CONFIG_XDP_SOCKETS_DIAG=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XFRM_INTERFACE b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFRM_INTERFACE new file mode 100644 index 000000000000..d808c272dad0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFRM_INTERFACE @@ -0,0 +1 @@ +CONFIG_XFRM_INTERFACE=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XFRM_MIGRATE b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFRM_MIGRATE new file mode 100644 index 000000000000..a0e21902312b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFRM_MIGRATE @@ -0,0 +1 @@ +CONFIG_XFRM_MIGRATE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XFRM_STATISTICS b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFRM_STATISTICS new file mode 100644 index 000000000000..27c7ef7a527f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFRM_STATISTICS @@ -0,0 +1 @@ +CONFIG_XFRM_STATISTICS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XFRM_SUB_POLICY b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFRM_SUB_POLICY new file mode 100644 index 000000000000..75c36f7a8f92 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFRM_SUB_POLICY @@ -0,0 +1 @@ +CONFIG_XFRM_SUB_POLICY=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_DEBUG b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_DEBUG new file mode 100644 index 000000000000..63b3c69b3008 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_DEBUG @@ -0,0 +1 @@ +# CONFIG_XFS_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_ONLINE_SCRUB b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_ONLINE_SCRUB new file mode 100644 index 000000000000..dd4ff1f484a6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_ONLINE_SCRUB @@ -0,0 +1 @@ +# CONFIG_XFS_ONLINE_SCRUB is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_POSIX_ACL b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_POSIX_ACL new file mode 100644 index 000000000000..3ec55ecb1320 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_POSIX_ACL @@ -0,0 +1 @@ +CONFIG_XFS_POSIX_ACL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_QUOTA b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_QUOTA new file mode 100644 index 000000000000..34757192adde --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_QUOTA @@ -0,0 +1 @@ +CONFIG_XFS_QUOTA=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_RT b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_RT new file mode 100644 index 000000000000..c6b8fc0c559f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_RT @@ -0,0 +1 @@ +# CONFIG_XFS_RT is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_SUPPORT_V4 b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_SUPPORT_V4 new file mode 100644 index 000000000000..12315e1fff2b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_SUPPORT_V4 @@ -0,0 +1 @@ +CONFIG_XFS_SUPPORT_V4=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_WARN b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_WARN new file mode 100644 index 000000000000..abdb2fd86122 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XFS_WARN @@ -0,0 +1 @@ +# CONFIG_XFS_WARN is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XOR_BLOCKS b/anolis/configs/L1-RECOMMEND/default/CONFIG_XOR_BLOCKS new file mode 100644 index 000000000000..584c49c1818d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XOR_BLOCKS @@ -0,0 +1 @@ +CONFIG_XOR_BLOCKS=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_ARM b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_ARM new file mode 100644 index 000000000000..52cbc2d1097d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_ARM @@ -0,0 +1 @@ +CONFIG_XZ_DEC_ARM=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_ARMTHUMB b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_ARMTHUMB new file mode 100644 index 000000000000..50b05d1159cd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_ARMTHUMB @@ -0,0 +1 @@ +CONFIG_XZ_DEC_ARMTHUMB=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_BCJ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_BCJ new file mode 100644 index 000000000000..c7d5e04283b1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_BCJ @@ -0,0 +1 @@ +CONFIG_XZ_DEC_BCJ=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_IA64 b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_IA64 new file mode 100644 index 000000000000..34a4cb72e9a9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_IA64 @@ -0,0 +1 @@ +CONFIG_XZ_DEC_IA64=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_MICROLZMA b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_MICROLZMA new file mode 100644 index 000000000000..514ce6c253d7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_MICROLZMA @@ -0,0 +1 @@ +CONFIG_XZ_DEC_MICROLZMA=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_POWERPC b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_POWERPC new file mode 100644 index 000000000000..118f59ba1638 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_POWERPC @@ -0,0 +1 @@ +CONFIG_XZ_DEC_POWERPC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_SPARC b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_SPARC new file mode 100644 index 000000000000..328ae24e23b6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_SPARC @@ -0,0 +1 @@ +CONFIG_XZ_DEC_SPARC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_TEST b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_TEST new file mode 100644 index 000000000000..bc04be3e6cfc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_TEST @@ -0,0 +1 @@ +# CONFIG_XZ_DEC_TEST is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_X86 b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_X86 new file mode 100644 index 000000000000..1be802334c8b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_XZ_DEC_X86 @@ -0,0 +1 @@ +CONFIG_XZ_DEC_X86=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_Z3FOLD b/anolis/configs/L1-RECOMMEND/default/CONFIG_Z3FOLD new file mode 100644 index 000000000000..6a3fff219f3a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_Z3FOLD @@ -0,0 +1 @@ +# CONFIG_Z3FOLD is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZBUD b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZBUD new file mode 100644 index 000000000000..87b4f7fecc0f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZBUD @@ -0,0 +1 @@ +CONFIG_ZBUD=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZERO_CALL_USED_REGS b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZERO_CALL_USED_REGS new file mode 100644 index 000000000000..edba7cc04736 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZERO_CALL_USED_REGS @@ -0,0 +1 @@ +# CONFIG_ZERO_CALL_USED_REGS is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZISOFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZISOFS new file mode 100644 index 000000000000..ff0b58565dc2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZISOFS @@ -0,0 +1 @@ +CONFIG_ZISOFS=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZPOOL b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZPOOL new file mode 100644 index 000000000000..a4b814c14924 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZPOOL @@ -0,0 +1 @@ +CONFIG_ZPOOL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP new file mode 100644 index 000000000000..b611bad3a790 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP @@ -0,0 +1 @@ +CONFIG_ZRAM_DEF_COMP="lzo-rle" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_LZ4 b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_LZ4 new file mode 100644 index 000000000000..92b2902592cd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_LZ4 @@ -0,0 +1 @@ +# CONFIG_ZRAM_DEF_COMP_LZ4 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_LZ4HC b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_LZ4HC new file mode 100644 index 000000000000..24855f5243ea --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_LZ4HC @@ -0,0 +1 @@ +# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_LZO b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_LZO new file mode 100644 index 000000000000..d07adc06e609 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_LZO @@ -0,0 +1 @@ +# CONFIG_ZRAM_DEF_COMP_LZO is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_LZORLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_LZORLE new file mode 100644 index 000000000000..2da229a01938 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_LZORLE @@ -0,0 +1 @@ +CONFIG_ZRAM_DEF_COMP_LZORLE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_ZSTD b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_ZSTD new file mode 100644 index 000000000000..16073be8e082 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_DEF_COMP_ZSTD @@ -0,0 +1 @@ +# CONFIG_ZRAM_DEF_COMP_ZSTD is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_MEMORY_TRACKING b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_MEMORY_TRACKING new file mode 100644 index 000000000000..a45c396a428c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_MEMORY_TRACKING @@ -0,0 +1 @@ +# CONFIG_ZRAM_MEMORY_TRACKING is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_MULTI_COMP b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_MULTI_COMP new file mode 100644 index 000000000000..121ae124dbbf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_MULTI_COMP @@ -0,0 +1 @@ +# CONFIG_ZRAM_MULTI_COMP is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_WRITEBACK b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_WRITEBACK new file mode 100644 index 000000000000..b1fcb086bf73 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_WRITEBACK @@ -0,0 +1 @@ +CONFIG_ZRAM_WRITEBACK=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSMALLOC b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSMALLOC new file mode 100644 index 000000000000..c716bc70c4f2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSMALLOC @@ -0,0 +1 @@ +CONFIG_ZSMALLOC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSMALLOC_CHAIN_SIZE b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSMALLOC_CHAIN_SIZE new file mode 100644 index 000000000000..8924c75fa1b3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSMALLOC_CHAIN_SIZE @@ -0,0 +1 @@ +CONFIG_ZSMALLOC_CHAIN_SIZE=8 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSMALLOC_STAT b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSMALLOC_STAT new file mode 100644 index 000000000000..3e024e092627 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSMALLOC_STAT @@ -0,0 +1 @@ +CONFIG_ZSMALLOC_STAT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSTD_COMMON b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSTD_COMMON new file mode 100644 index 000000000000..8dd27a340d81 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSTD_COMMON @@ -0,0 +1 @@ +CONFIG_ZSTD_COMMON=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP new file mode 100644 index 000000000000..64b92172dfd0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP @@ -0,0 +1 @@ +CONFIG_ZSWAP=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT new file mode 100644 index 000000000000..7a61b6fcde04 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT @@ -0,0 +1 @@ +CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lzo" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 new file mode 100644 index 000000000000..3f2f042dc7b4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 @@ -0,0 +1 @@ +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE new file mode 100644 index 000000000000..73b9be5b0599 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE @@ -0,0 +1 @@ +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 new file mode 100644 index 000000000000..d15eadf94d12 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 @@ -0,0 +1 @@ +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC new file mode 100644 index 000000000000..96763a4c4ecf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC @@ -0,0 +1 @@ +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO new file mode 100644 index 000000000000..c6af1aeb90fa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO @@ -0,0 +1 @@ +CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD new file mode 100644 index 000000000000..adcc3b94723e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD @@ -0,0 +1 @@ +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_DEFAULT_ON b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_DEFAULT_ON new file mode 100644 index 000000000000..93a95edd6d53 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_DEFAULT_ON @@ -0,0 +1 @@ +# CONFIG_ZSWAP_DEFAULT_ON is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON new file mode 100644 index 000000000000..1792514c9b45 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON @@ -0,0 +1 @@ +# CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_ZPOOL_DEFAULT b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_ZPOOL_DEFAULT new file mode 100644 index 000000000000..753d56a0c343 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_ZPOOL_DEFAULT @@ -0,0 +1 @@ +CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud" diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD new file mode 100644 index 000000000000..c963eaebfbd7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD @@ -0,0 +1 @@ +# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD new file mode 100644 index 000000000000..15a8e22be2a0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD @@ -0,0 +1 @@ +CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC new file mode 100644 index 000000000000..cb1392399f58 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC @@ -0,0 +1 @@ +# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_BGRT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_BGRT new file mode 100644 index 000000000000..13035dd8275b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_BGRT @@ -0,0 +1 @@ +CONFIG_ACPI_BGRT=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_DOCK b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_DOCK new file mode 100644 index 000000000000..e4d916353f68 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_DOCK @@ -0,0 +1 @@ +CONFIG_ACPI_DOCK=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_EC_DEBUGFS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_EC_DEBUGFS new file mode 100644 index 000000000000..dfc6b278f421 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_EC_DEBUGFS @@ -0,0 +1 @@ +CONFIG_ACPI_EC_DEBUGFS=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_EXTLOG b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_EXTLOG new file mode 100644 index 000000000000..f1c760c4566c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_EXTLOG @@ -0,0 +1 @@ +CONFIG_ACPI_EXTLOG=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_PROCESSOR_AGGREGATOR b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_PROCESSOR_AGGREGATOR new file mode 100644 index 000000000000..67779d4893c1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_PROCESSOR_AGGREGATOR @@ -0,0 +1 @@ +CONFIG_ACPI_PROCESSOR_AGGREGATOR=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_REV_OVERRIDE_POSSIBLE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_REV_OVERRIDE_POSSIBLE new file mode 100644 index 000000000000..021ea2f25134 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_REV_OVERRIDE_POSSIBLE @@ -0,0 +1 @@ +CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_SBS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_SBS new file mode 100644 index 000000000000..82848bd17fc2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_SBS @@ -0,0 +1 @@ +CONFIG_ACPI_SBS=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_SLEEP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_SLEEP new file mode 100644 index 000000000000..68612464dbe0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_SLEEP @@ -0,0 +1 @@ +CONFIG_ACPI_SLEEP=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_TAD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_TAD new file mode 100644 index 000000000000..9588df987872 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_TAD @@ -0,0 +1 @@ +CONFIG_ACPI_TAD=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_THERMAL_REL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_THERMAL_REL new file mode 100644 index 000000000000..e4794b0ebb4f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_THERMAL_REL @@ -0,0 +1 @@ +CONFIG_ACPI_THERMAL_REL=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_WATCHDOG b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_WATCHDOG new file mode 100644 index 000000000000..d82966e07f87 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACPI_WATCHDOG @@ -0,0 +1 @@ +CONFIG_ACPI_WATCHDOG=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACQUIRE_WDT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACQUIRE_WDT new file mode 100644 index 000000000000..400222bb07ee --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ACQUIRE_WDT @@ -0,0 +1 @@ +# CONFIG_ACQUIRE_WDT is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ADVANTECH_WDT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ADVANTECH_WDT new file mode 100644 index 000000000000..07d7f5d17cf9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ADVANTECH_WDT @@ -0,0 +1 @@ +# CONFIG_ADVANTECH_WDT is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMDTEE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMDTEE new file mode 100644 index 000000000000..9f091c9f5fa1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMDTEE @@ -0,0 +1 @@ +CONFIG_AMDTEE=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_HSMP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_HSMP new file mode 100644 index 000000000000..bf5761a2d851 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_HSMP @@ -0,0 +1 @@ +# CONFIG_AMD_HSMP is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_IOMMU b/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_IOMMU new file mode 100644 index 000000000000..ede0caaa0e3f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_IOMMU @@ -0,0 +1 @@ +CONFIG_AMD_IOMMU=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_IOMMU_V2 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_IOMMU_V2 new file mode 100644 index 000000000000..bc103a7b9c35 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_IOMMU_V2 @@ -0,0 +1 @@ +CONFIG_AMD_IOMMU_V2=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_MEM_ENCRYPT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_MEM_ENCRYPT new file mode 100644 index 000000000000..f9eacfabc806 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_MEM_ENCRYPT @@ -0,0 +1 @@ +CONFIG_AMD_MEM_ENCRYPT=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_PTDMA b/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_PTDMA new file mode 100644 index 000000000000..227504d21348 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_AMD_PTDMA @@ -0,0 +1 @@ +CONFIG_AMD_PTDMA=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ARCH_CPUIDLE_HALTPOLL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ARCH_CPUIDLE_HALTPOLL new file mode 100644 index 000000000000..87ff0c771e50 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ARCH_CPUIDLE_HALTPOLL @@ -0,0 +1 @@ +CONFIG_ARCH_CPUIDLE_HALTPOLL=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ARCH_MEMORY_PROBE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ARCH_MEMORY_PROBE new file mode 100644 index 000000000000..9b51745063dc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ARCH_MEMORY_PROBE @@ -0,0 +1 @@ +# CONFIG_ARCH_MEMORY_PROBE is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CALL_DEPTH_TRACKING b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CALL_DEPTH_TRACKING new file mode 100644 index 000000000000..2e0554f3cc89 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CALL_DEPTH_TRACKING @@ -0,0 +1 @@ +CONFIG_CALL_DEPTH_TRACKING=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CMA_SIZE_MBYTES b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CMA_SIZE_MBYTES new file mode 100644 index 000000000000..2fb4d4a552cb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CMA_SIZE_MBYTES @@ -0,0 +1 @@ +CONFIG_CMA_SIZE_MBYTES=0 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CNIC b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CNIC new file mode 100644 index 000000000000..b32c2cc2af81 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CNIC @@ -0,0 +1 @@ +CONFIG_CNIC=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPUMASK_OFFSTACK b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPUMASK_OFFSTACK new file mode 100644 index 000000000000..aa9b7f32309d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPUMASK_OFFSTACK @@ -0,0 +1 @@ +CONFIG_CPUMASK_OFFSTACK=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_FREQ_GOV_SCHEDUTIL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_FREQ_GOV_SCHEDUTIL new file mode 100644 index 000000000000..0aec996431ac --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_FREQ_GOV_SCHEDUTIL @@ -0,0 +1 @@ +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_IBPB_ENTRY b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_IBPB_ENTRY new file mode 100644 index 000000000000..64efeec23f9c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_IBPB_ENTRY @@ -0,0 +1 @@ +CONFIG_CPU_IBPB_ENTRY=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_IBRS_ENTRY b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_IBRS_ENTRY new file mode 100644 index 000000000000..9ab5b99170aa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_IBRS_ENTRY @@ -0,0 +1 @@ +CONFIG_CPU_IBRS_ENTRY=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_IDLE_GOV_HALTPOLL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_IDLE_GOV_HALTPOLL new file mode 100644 index 000000000000..4e01ab97cd89 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_IDLE_GOV_HALTPOLL @@ -0,0 +1 @@ +CONFIG_CPU_IDLE_GOV_HALTPOLL=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_SRSO b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_SRSO new file mode 100644 index 000000000000..1512ec8e9766 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_SRSO @@ -0,0 +1 @@ +CONFIG_CPU_SRSO=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_UNRET_ENTRY b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_UNRET_ENTRY new file mode 100644 index 000000000000..aa4a0eb5b6a4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_UNRET_ENTRY @@ -0,0 +1 @@ +CONFIG_CPU_UNRET_ENTRY=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRASH_HOTPLUG b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRASH_HOTPLUG new file mode 100644 index 000000000000..a723b9e5b45c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRASH_HOTPLUG @@ -0,0 +1 @@ +CONFIG_CRASH_HOTPLUG=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 new file mode 100644 index 000000000000..7eb1a4c0299a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 @@ -0,0 +1 @@ +# CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_AES_NI_INTEL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_AES_NI_INTEL new file mode 100644 index 000000000000..7f29bd9bcf86 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_AES_NI_INTEL @@ -0,0 +1 @@ +CONFIG_CRYPTO_AES_NI_INTEL=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_BLAKE2S_X86 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_BLAKE2S_X86 new file mode 100644 index 000000000000..a9c552040aa0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_BLAKE2S_X86 @@ -0,0 +1 @@ +CONFIG_CRYPTO_BLAKE2S_X86=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_BLOWFISH_X86_64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_BLOWFISH_X86_64 new file mode 100644 index 000000000000..52bdc5fdde9e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_BLOWFISH_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_BLOWFISH_X86_64=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 new file mode 100644 index 000000000000..67edf8ddeb73 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64 new file mode 100644 index 000000000000..6b7b873157d9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAMELLIA_X86_64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAMELLIA_X86_64 new file mode 100644 index 000000000000..b60b238f480c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAMELLIA_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_CAMELLIA_X86_64=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAST5_AVX_X86_64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAST5_AVX_X86_64 new file mode 100644 index 000000000000..a8afa374dad3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAST5_AVX_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_CAST5_AVX_X86_64=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAST6_AVX_X86_64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAST6_AVX_X86_64 new file mode 100644 index 000000000000..5dbd020a95d8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CAST6_AVX_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_CAST6_AVX_X86_64=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CHACHA20_X86_64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CHACHA20_X86_64 new file mode 100644 index 000000000000..6131a7b56e90 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CHACHA20_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_CHACHA20_X86_64=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CRC32C_INTEL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CRC32C_INTEL new file mode 100644 index 000000000000..c500b4f55bf2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CRC32C_INTEL @@ -0,0 +1 @@ +CONFIG_CRYPTO_CRC32C_INTEL=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CRC32_PCLMUL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CRC32_PCLMUL new file mode 100644 index 000000000000..c503f02e3944 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CRC32_PCLMUL @@ -0,0 +1 @@ +CONFIG_CRYPTO_CRC32_PCLMUL=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CRCT10DIF_PCLMUL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CRCT10DIF_PCLMUL new file mode 100644 index 000000000000..14e5d708b70a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CRCT10DIF_PCLMUL @@ -0,0 +1 @@ +CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CURVE25519 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CURVE25519 new file mode 100644 index 000000000000..fc4c61ee3fc5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CURVE25519 @@ -0,0 +1 @@ +# CONFIG_CRYPTO_CURVE25519 is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CURVE25519_X86 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CURVE25519_X86 new file mode 100644 index 000000000000..19b41bb62262 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_CURVE25519_X86 @@ -0,0 +1 @@ +CONFIG_CRYPTO_CURVE25519_X86=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DES3_EDE_X86_64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DES3_EDE_X86_64 new file mode 100644 index 000000000000..fac68c6a1d23 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DES3_EDE_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_DES3_EDE_X86_64=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_CCP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_CCP new file mode 100644 index 000000000000..d2f5497f2d95 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_CCP @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_CCP=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_CCP_CRYPTO b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_CCP_CRYPTO new file mode 100644 index 000000000000..4887c6d76436 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_CCP_CRYPTO @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_CCP_CRYPTO=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_CCP_DD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_CCP_DD new file mode 100644 index 000000000000..18c7b900eb3c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_CCP_DD @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_CCP_DD=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_CCP_DEBUGFS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_CCP_DEBUGFS new file mode 100644 index 000000000000..fe46585daea7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_CCP_DEBUGFS @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_HCT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_HCT new file mode 100644 index 000000000000..e135e9e50ec7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_HCT @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_HCT=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_IAA_CRYPTO b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_IAA_CRYPTO new file mode 100644 index 000000000000..3204078bac0b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_IAA_CRYPTO @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_IAA_CRYPTO=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS new file mode 100644 index 000000000000..34817183f2da --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT new file mode 100644 index 000000000000..06ec9df18672 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_QAT=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_C3XXX b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_C3XXX new file mode 100644 index 000000000000..9fcd620a9899 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_C3XXX @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_QAT_C3XXX=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_C3XXXVF b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_C3XXXVF new file mode 100644 index 000000000000..9e5c620530a0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_C3XXXVF @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_C62X b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_C62X new file mode 100644 index 000000000000..2583c47f29ea --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_C62X @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_QAT_C62X=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_C62XVF b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_C62XVF new file mode 100644 index 000000000000..589fd67c5ab1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_C62XVF @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_QAT_C62XVF=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_DH895xCC b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_DH895xCC new file mode 100644 index 000000000000..3d37c7af5658 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_DH895xCC @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_QAT_DH895xCC=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_DH895xCCVF b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_DH895xCCVF new file mode 100644 index 000000000000..8035712989f1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_QAT_DH895xCCVF @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_SP_CCP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_SP_CCP new file mode 100644 index 000000000000..413ad9c2adce --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_SP_CCP @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_SP_CCP=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_SP_PSP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_SP_PSP new file mode 100644 index 000000000000..7b0c6490a36c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_SP_PSP @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_SP_PSP=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_TSSE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_TSSE new file mode 100644 index 000000000000..110860ed4b4a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_TSSE @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_TSSE=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_ZHAOXIN b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_ZHAOXIN new file mode 100644 index 000000000000..b17515fdcbce --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_ZHAOXIN @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_ZHAOXIN=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_ZHAOXIN_AES b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_ZHAOXIN_AES new file mode 100644 index 000000000000..3619496e7f70 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_ZHAOXIN_AES @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_ZHAOXIN_AES=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_ZHAOXIN_SHA b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_ZHAOXIN_SHA new file mode 100644 index 000000000000..1d4629abb049 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_ZHAOXIN_SHA @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_ZHAOXIN_SHA=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL new file mode 100644 index 000000000000..3c9561aee00b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL @@ -0,0 +1 @@ +CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_NHPOLY1305_AVX2 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_NHPOLY1305_AVX2 new file mode 100644 index 000000000000..c15f094b1d1f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_NHPOLY1305_AVX2 @@ -0,0 +1 @@ +# CONFIG_CRYPTO_NHPOLY1305_AVX2 is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_NHPOLY1305_SSE2 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_NHPOLY1305_SSE2 new file mode 100644 index 000000000000..69f3444b40dd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_NHPOLY1305_SSE2 @@ -0,0 +1 @@ +# CONFIG_CRYPTO_NHPOLY1305_SSE2 is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_POLY1305_X86_64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_POLY1305_X86_64 new file mode 100644 index 000000000000..b444d23171f7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_POLY1305_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_POLY1305_X86_64=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SERPENT_AVX2_X86_64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SERPENT_AVX2_X86_64 new file mode 100644 index 000000000000..604ebff651db --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SERPENT_AVX2_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SERPENT_AVX_X86_64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SERPENT_AVX_X86_64 new file mode 100644 index 000000000000..e3ab517c6dd6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SERPENT_AVX_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SERPENT_AVX_X86_64=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SERPENT_SSE2_X86_64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SERPENT_SSE2_X86_64 new file mode 100644 index 000000000000..6706b3873b46 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SERPENT_SSE2_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SHA1_SSSE3 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SHA1_SSSE3 new file mode 100644 index 000000000000..dc0e1b2a6ac5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SHA1_SSSE3 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SHA1_SSSE3=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SHA256_SSSE3 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SHA256_SSSE3 new file mode 100644 index 000000000000..4969f8f458d4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SHA256_SSSE3 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SHA256_SSSE3=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SHA512_SSSE3 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SHA512_SSSE3 new file mode 100644 index 000000000000..227c91b5bfd9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SHA512_SSSE3 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SHA512_SSSE3=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SM2_ZHAOXIN_GMI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SM2_ZHAOXIN_GMI new file mode 100644 index 000000000000..6242432eb58b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SM2_ZHAOXIN_GMI @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM2_ZHAOXIN_GMI=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SM3_ZHAOXIN_GMI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SM3_ZHAOXIN_GMI new file mode 100644 index 000000000000..98554908a1fe --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SM3_ZHAOXIN_GMI @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM3_ZHAOXIN_GMI=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SM4_ZHAOXIN_GMI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SM4_ZHAOXIN_GMI new file mode 100644 index 000000000000..327d40661bd4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_SM4_ZHAOXIN_GMI @@ -0,0 +1 @@ +CONFIG_CRYPTO_SM4_ZHAOXIN_GMI=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_TWOFISH_AVX_X86_64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_TWOFISH_AVX_X86_64 new file mode 100644 index 000000000000..abdc67677392 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_TWOFISH_AVX_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_TWOFISH_X86_64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_TWOFISH_X86_64 new file mode 100644 index 000000000000..cf51269aa370 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_TWOFISH_X86_64 @@ -0,0 +1 @@ +CONFIG_CRYPTO_TWOFISH_X86_64=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_TWOFISH_X86_64_3WAY b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_TWOFISH_X86_64_3WAY new file mode 100644 index 000000000000..3dbb7102f737 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_TWOFISH_X86_64_3WAY @@ -0,0 +1 @@ +CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CSV_GUEST b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CSV_GUEST new file mode 100644 index 000000000000..df53c0727d75 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CSV_GUEST @@ -0,0 +1 @@ +CONFIG_CSV_GUEST=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEBUG_BOOT_PARAMS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEBUG_BOOT_PARAMS new file mode 100644 index 000000000000..fa10c0ea793e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEBUG_BOOT_PARAMS @@ -0,0 +1 @@ +CONFIG_DEBUG_BOOT_PARAMS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEBUG_PERF_USE_VMALLOC b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEBUG_PERF_USE_VMALLOC new file mode 100644 index 000000000000..a1d865d212ae --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEBUG_PERF_USE_VMALLOC @@ -0,0 +1 @@ +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEVICE_PRIVATE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEVICE_PRIVATE new file mode 100644 index 000000000000..ef0a4ad5b9f8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEVICE_PRIVATE @@ -0,0 +1 @@ +CONFIG_DEVICE_PRIVATE=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEVPORT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEVPORT new file mode 100644 index 000000000000..ff170aad12a6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEVPORT @@ -0,0 +1 @@ +CONFIG_DEVPORT=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX new file mode 100644 index 000000000000..e7bd7d00db51 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX @@ -0,0 +1 @@ +CONFIG_DEV_DAX=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX_HMEM b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX_HMEM new file mode 100644 index 000000000000..ebdd7cb1a555 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX_HMEM @@ -0,0 +1 @@ +CONFIG_DEV_DAX_HMEM=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX_KMEM b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX_KMEM new file mode 100644 index 000000000000..ab22bc2e6d92 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX_KMEM @@ -0,0 +1 @@ +CONFIG_DEV_DAX_KMEM=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX_PMEM b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX_PMEM new file mode 100644 index 000000000000..603159c2b2ef --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX_PMEM @@ -0,0 +1 @@ +CONFIG_DEV_DAX_PMEM=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DLM_DEBUG b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DLM_DEBUG new file mode 100644 index 000000000000..002a38454b3b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DLM_DEBUG @@ -0,0 +1 @@ +CONFIG_DLM_DEBUG=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_AMDGPU_CIK b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_AMDGPU_CIK new file mode 100644 index 000000000000..e184e53af3c0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_AMDGPU_CIK @@ -0,0 +1 @@ +# CONFIG_DRM_AMDGPU_CIK is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_AMDGPU_USERPTR b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_AMDGPU_USERPTR new file mode 100644 index 000000000000..06dba06e3d32 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_AMDGPU_USERPTR @@ -0,0 +1 @@ +# CONFIG_DRM_AMDGPU_USERPTR is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_AMD_ACP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_AMD_ACP new file mode 100644 index 000000000000..6d5ecf147b34 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_AMD_ACP @@ -0,0 +1 @@ +# CONFIG_DRM_AMD_ACP is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_DP_AUX_CHARDEV b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_DP_AUX_CHARDEV new file mode 100644 index 000000000000..9f59149e02bc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_DP_AUX_CHARDEV @@ -0,0 +1 @@ +# CONFIG_DRM_DP_AUX_CHARDEV is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_GMA500 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_GMA500 new file mode 100644 index 000000000000..38ba652000c6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_GMA500 @@ -0,0 +1 @@ +CONFIG_DRM_GMA500=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915 new file mode 100644 index 000000000000..1034adf42222 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915 @@ -0,0 +1 @@ +CONFIG_DRM_I915=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_CAPTURE_ERROR b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_CAPTURE_ERROR new file mode 100644 index 000000000000..d85c7203563a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_CAPTURE_ERROR @@ -0,0 +1 @@ +CONFIG_DRM_I915_CAPTURE_ERROR=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_COMPRESS_ERROR b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_COMPRESS_ERROR new file mode 100644 index 000000000000..6d6c129f5d27 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_COMPRESS_ERROR @@ -0,0 +1 @@ +CONFIG_DRM_I915_COMPRESS_ERROR=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_FENCE_TIMEOUT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_FENCE_TIMEOUT new file mode 100644 index 000000000000..04c3d575ecdb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_FENCE_TIMEOUT @@ -0,0 +1 @@ +CONFIG_DRM_I915_FENCE_TIMEOUT=10000 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_FORCE_PROBE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_FORCE_PROBE new file mode 100644 index 000000000000..660b7a1eabdc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_FORCE_PROBE @@ -0,0 +1 @@ +CONFIG_DRM_I915_FORCE_PROBE="" diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_GVT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_GVT new file mode 100644 index 000000000000..c6af3c3ccb96 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_GVT @@ -0,0 +1 @@ +CONFIG_DRM_I915_GVT=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_GVT_KVMGT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_GVT_KVMGT new file mode 100644 index 000000000000..016a41e8a679 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_GVT_KVMGT @@ -0,0 +1 @@ +CONFIG_DRM_I915_GVT_KVMGT=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_HEARTBEAT_INTERVAL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_HEARTBEAT_INTERVAL new file mode 100644 index 000000000000..6db87334d09d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_HEARTBEAT_INTERVAL @@ -0,0 +1 @@ +CONFIG_DRM_I915_HEARTBEAT_INTERVAL=2500 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT new file mode 100644 index 000000000000..150c2e288609 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT @@ -0,0 +1 @@ +CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT=8000 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_PREEMPT_TIMEOUT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_PREEMPT_TIMEOUT new file mode 100644 index 000000000000..455aa7c8427f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_PREEMPT_TIMEOUT @@ -0,0 +1 @@ +CONFIG_DRM_I915_PREEMPT_TIMEOUT=640 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_STOP_TIMEOUT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_STOP_TIMEOUT new file mode 100644 index 000000000000..f987ddca213d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_STOP_TIMEOUT @@ -0,0 +1 @@ +CONFIG_DRM_I915_STOP_TIMEOUT=100 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_TIMESLICE_DURATION b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_TIMESLICE_DURATION new file mode 100644 index 000000000000..67b628a5b97e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_TIMESLICE_DURATION @@ -0,0 +1 @@ +CONFIG_DRM_I915_TIMESLICE_DURATION=1 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND new file mode 100644 index 000000000000..90b96974302b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND @@ -0,0 +1 @@ +CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND=250 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_USERPTR b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_USERPTR new file mode 100644 index 000000000000..4f253abc1402 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_I915_USERPTR @@ -0,0 +1 @@ +CONFIG_DRM_I915_USERPTR=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_VMWGFX b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_VMWGFX new file mode 100644 index 000000000000..12fe6b15f13e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DRM_VMWGFX @@ -0,0 +1 @@ +CONFIG_DRM_VMWGFX=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DWC_PCIE_PMU b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DWC_PCIE_PMU new file mode 100644 index 000000000000..66e35fcc546e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DWC_PCIE_PMU @@ -0,0 +1 @@ +# CONFIG_DWC_PCIE_PMU is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DW_DMAC_PCI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DW_DMAC_PCI new file mode 100644 index 000000000000..2a2239b2440f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DW_DMAC_PCI @@ -0,0 +1 @@ +CONFIG_DW_DMAC_PCI=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DYNAMIC_PHYSICAL_MASK b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DYNAMIC_PHYSICAL_MASK new file mode 100644 index 000000000000..1d9f0653942f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DYNAMIC_PHYSICAL_MASK @@ -0,0 +1 @@ +CONFIG_DYNAMIC_PHYSICAL_MASK=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DYNAMIC_SIGFRAME b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DYNAMIC_SIGFRAME new file mode 100644 index 000000000000..0f0491574115 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DYNAMIC_SIGFRAME @@ -0,0 +1 @@ +CONFIG_DYNAMIC_SIGFRAME=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_E1000E_HWTS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_E1000E_HWTS new file mode 100644 index 000000000000..9b00c96b49a9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_E1000E_HWTS @@ -0,0 +1 @@ +CONFIG_E1000E_HWTS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EARLY_PRINTK b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EARLY_PRINTK new file mode 100644 index 000000000000..d8c2487be3e2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EARLY_PRINTK @@ -0,0 +1 @@ +CONFIG_EARLY_PRINTK=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EARLY_PRINTK_DBGP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EARLY_PRINTK_DBGP new file mode 100644 index 000000000000..b8016d0cb890 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EARLY_PRINTK_DBGP @@ -0,0 +1 @@ +CONFIG_EARLY_PRINTK_DBGP=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EARLY_PRINTK_USB_XDBC b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EARLY_PRINTK_USB_XDBC new file mode 100644 index 000000000000..47e8f4090684 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EARLY_PRINTK_USB_XDBC @@ -0,0 +1 @@ +CONFIG_EARLY_PRINTK_USB_XDBC=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_AMD64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_AMD64 new file mode 100644 index 000000000000..667e1c0abd32 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_AMD64 @@ -0,0 +1 @@ +CONFIG_EDAC_AMD64=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_DECODE_MCE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_DECODE_MCE new file mode 100644 index 000000000000..e408064da251 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_DECODE_MCE @@ -0,0 +1 @@ +CONFIG_EDAC_DECODE_MCE=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_E752X b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_E752X new file mode 100644 index 000000000000..4340af994e25 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_E752X @@ -0,0 +1 @@ +CONFIG_EDAC_E752X=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I10NM b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I10NM new file mode 100644 index 000000000000..8214abca5a09 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I10NM @@ -0,0 +1 @@ +CONFIG_EDAC_I10NM=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I3000 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I3000 new file mode 100644 index 000000000000..4f30c3dbb607 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I3000 @@ -0,0 +1 @@ +CONFIG_EDAC_I3000=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I3200 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I3200 new file mode 100644 index 000000000000..eaf5b3300719 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I3200 @@ -0,0 +1 @@ +CONFIG_EDAC_I3200=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I5100 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I5100 new file mode 100644 index 000000000000..255b23ed32df --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I5100 @@ -0,0 +1 @@ +CONFIG_EDAC_I5100=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I5400 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I5400 new file mode 100644 index 000000000000..5cd55fc12660 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I5400 @@ -0,0 +1 @@ +CONFIG_EDAC_I5400=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I7300 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I7300 new file mode 100644 index 000000000000..d1c3314d74b0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I7300 @@ -0,0 +1 @@ +CONFIG_EDAC_I7300=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I7CORE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I7CORE new file mode 100644 index 000000000000..b49e12979725 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I7CORE @@ -0,0 +1 @@ +CONFIG_EDAC_I7CORE=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I82975X b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I82975X new file mode 100644 index 000000000000..0fff85c4a34c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_I82975X @@ -0,0 +1 @@ +CONFIG_EDAC_I82975X=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_SBRIDGE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_SBRIDGE new file mode 100644 index 000000000000..18dc9b53a86a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_SBRIDGE @@ -0,0 +1 @@ +CONFIG_EDAC_SBRIDGE=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_SKX b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_SKX new file mode 100644 index 000000000000..33f417f1e3a9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDAC_SKX @@ -0,0 +1 @@ +CONFIG_EDAC_SKX=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDD new file mode 100644 index 000000000000..9b8a635dea5b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EDD @@ -0,0 +1 @@ +CONFIG_EDD=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_COCO_SECRET b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_COCO_SECRET new file mode 100644 index 000000000000..c2da2e0bfdb2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_COCO_SECRET @@ -0,0 +1 @@ +CONFIG_EFI_COCO_SECRET=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_MIXED b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_MIXED new file mode 100644 index 000000000000..3eb4a43bf55b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_MIXED @@ -0,0 +1 @@ +CONFIG_EFI_MIXED=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_RCI2_TABLE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_RCI2_TABLE new file mode 100644 index 000000000000..083461929710 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_RCI2_TABLE @@ -0,0 +1 @@ +CONFIG_EFI_RCI2_TABLE=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_RUNTIME_MAP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_RUNTIME_MAP new file mode 100644 index 000000000000..3a4462f3949b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_RUNTIME_MAP @@ -0,0 +1 @@ +CONFIG_EFI_RUNTIME_MAP=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_SECRET b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_SECRET new file mode 100644 index 000000000000..7c4a3fbc212f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_EFI_SECRET @@ -0,0 +1 @@ +CONFIG_EFI_SECRET=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_FB_HYPERV b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FB_HYPERV new file mode 100644 index 000000000000..06c30d0e72cb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FB_HYPERV @@ -0,0 +1 @@ +CONFIG_FB_HYPERV=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_FB_SIMPLE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FB_SIMPLE new file mode 100644 index 000000000000..3ae9be22cdf9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FB_SIMPLE @@ -0,0 +1 @@ +# CONFIG_FB_SIMPLE is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_FB_SSD1307 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FB_SSD1307 new file mode 100644 index 000000000000..cddd16bb7398 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FB_SSD1307 @@ -0,0 +1 @@ +# CONFIG_FB_SSD1307 is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_FB_VESA b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FB_VESA new file mode 100644 index 000000000000..3ef695ad37e9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FB_VESA @@ -0,0 +1 @@ +CONFIG_FB_VESA=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_FCOE_FNIC b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FCOE_FNIC new file mode 100644 index 000000000000..197c5e7045e9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FCOE_FNIC @@ -0,0 +1 @@ +CONFIG_FCOE_FNIC=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_FIRMWARE_EDID b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FIRMWARE_EDID new file mode 100644 index 000000000000..ad5ccc9592e9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FIRMWARE_EDID @@ -0,0 +1 @@ +CONFIG_FIRMWARE_EDID=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_FIRMWARE_MEMMAP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FIRMWARE_MEMMAP new file mode 100644 index 000000000000..6d06dab85813 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FIRMWARE_MEMMAP @@ -0,0 +1 @@ +CONFIG_FIRMWARE_MEMMAP=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_FUNCTION_PROFILER b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FUNCTION_PROFILER new file mode 100644 index 000000000000..de1cfdb543b9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FUNCTION_PROFILER @@ -0,0 +1 @@ +CONFIG_FUNCTION_PROFILER=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_FW_LOADER_USER_HELPER b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FW_LOADER_USER_HELPER new file mode 100644 index 000000000000..8ce27a439315 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FW_LOADER_USER_HELPER @@ -0,0 +1 @@ +CONFIG_FW_LOADER_USER_HELPER=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_GART_IOMMU b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GART_IOMMU new file mode 100644 index 000000000000..c735af30c5df --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GART_IOMMU @@ -0,0 +1 @@ +# CONFIG_GART_IOMMU is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_ADC_THERMAL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_ADC_THERMAL new file mode 100644 index 000000000000..7719b8960eef --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_ADC_THERMAL @@ -0,0 +1 @@ +# CONFIG_GENERIC_ADC_THERMAL is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_CPU b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_CPU new file mode 100644 index 000000000000..9cd8d3177e2d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_CPU @@ -0,0 +1 @@ +CONFIG_GENERIC_CPU=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_ISA_DMA b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_ISA_DMA new file mode 100644 index 000000000000..01c1798573ce --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_ISA_DMA @@ -0,0 +1 @@ +CONFIG_GENERIC_ISA_DMA=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_PENDING_IRQ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_PENDING_IRQ new file mode 100644 index 000000000000..b534c7167ab5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_PENDING_IRQ @@ -0,0 +1 @@ +CONFIG_GENERIC_PENDING_IRQ=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_PHY b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_PHY new file mode 100644 index 000000000000..582e87c3b9f5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_PHY @@ -0,0 +1 @@ +# CONFIG_GENERIC_PHY is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_GFS2_FS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GFS2_FS new file mode 100644 index 000000000000..0ddd7115391a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GFS2_FS @@ -0,0 +1 @@ +CONFIG_GFS2_FS=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_GFS2_FS_LOCKING_DLM b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GFS2_FS_LOCKING_DLM new file mode 100644 index 000000000000..424a3046b157 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GFS2_FS_LOCKING_DLM @@ -0,0 +1 @@ +CONFIG_GFS2_FS_LOCKING_DLM=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_GPIO_GENERIC_PLATFORM b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GPIO_GENERIC_PLATFORM new file mode 100644 index 000000000000..d45d5bae2a11 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GPIO_GENERIC_PLATFORM @@ -0,0 +1 @@ +# CONFIG_GPIO_GENERIC_PLATFORM is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HALTPOLL_CPUIDLE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HALTPOLL_CPUIDLE new file mode 100644 index 000000000000..2a48c8bdc78f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HALTPOLL_CPUIDLE @@ -0,0 +1 @@ +CONFIG_HALTPOLL_CPUIDLE=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HANGCHECK_TIMER b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HANGCHECK_TIMER new file mode 100644 index 000000000000..74d6f3e7a5a4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HANGCHECK_TIMER @@ -0,0 +1 @@ +CONFIG_HANGCHECK_TIMER=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HPET b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HPET new file mode 100644 index 000000000000..9ac1b11db4c1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HPET @@ -0,0 +1 @@ +CONFIG_HPET=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HPET_MMAP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HPET_MMAP new file mode 100644 index 000000000000..d6eb1d36e01f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HPET_MMAP @@ -0,0 +1 @@ +CONFIG_HPET_MMAP=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HSA_AMD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HSA_AMD new file mode 100644 index 000000000000..2515e018011e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HSA_AMD @@ -0,0 +1 @@ +# CONFIG_HSA_AMD is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP new file mode 100644 index 000000000000..c4938b4fb501 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP @@ -0,0 +1 @@ +CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON new file mode 100644 index 000000000000..641af94e1b1b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON @@ -0,0 +1 @@ +CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HW_RANDOM_AMD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HW_RANDOM_AMD new file mode 100644 index 000000000000..dd7a51f3d484 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HW_RANDOM_AMD @@ -0,0 +1 @@ +CONFIG_HW_RANDOM_AMD=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HW_RANDOM_INTEL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HW_RANDOM_INTEL new file mode 100644 index 000000000000..8929685e6a44 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HW_RANDOM_INTEL @@ -0,0 +1 @@ +CONFIG_HW_RANDOM_INTEL=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HW_RANDOM_ZHAOXIN b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HW_RANDOM_ZHAOXIN new file mode 100644 index 000000000000..61ccb3bff141 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HW_RANDOM_ZHAOXIN @@ -0,0 +1 @@ +CONFIG_HW_RANDOM_ZHAOXIN=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYGON_CSV b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYGON_CSV new file mode 100644 index 000000000000..631fc6896012 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYGON_CSV @@ -0,0 +1 @@ +CONFIG_HYGON_CSV=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYGON_GM b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYGON_GM new file mode 100644 index 000000000000..0266c4f60b32 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYGON_GM @@ -0,0 +1 @@ +CONFIG_HYGON_GM=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYGON_PSP2CPU_CMD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYGON_PSP2CPU_CMD new file mode 100644 index 000000000000..1c6ee94d9041 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYGON_PSP2CPU_CMD @@ -0,0 +1 @@ +CONFIG_HYGON_PSP2CPU_CMD=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV new file mode 100644 index 000000000000..586091822f18 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV @@ -0,0 +1 @@ +CONFIG_HYPERV=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERVISOR_GUEST b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERVISOR_GUEST new file mode 100644 index 000000000000..2770560d56a0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERVISOR_GUEST @@ -0,0 +1 @@ +CONFIG_HYPERVISOR_GUEST=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_BALLOON b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_BALLOON new file mode 100644 index 000000000000..b1f01cbe4591 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_BALLOON @@ -0,0 +1 @@ +CONFIG_HYPERV_BALLOON=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_IOMMU b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_IOMMU new file mode 100644 index 000000000000..2f259f2f3f42 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_IOMMU @@ -0,0 +1 @@ +CONFIG_HYPERV_IOMMU=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_KEYBOARD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_KEYBOARD new file mode 100644 index 000000000000..25dccab6b173 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_KEYBOARD @@ -0,0 +1 @@ +CONFIG_HYPERV_KEYBOARD=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_STORAGE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_STORAGE new file mode 100644 index 000000000000..6782fa59b524 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_STORAGE @@ -0,0 +1 @@ +CONFIG_HYPERV_STORAGE=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_UTILS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_UTILS new file mode 100644 index 000000000000..9b8c0d2a473c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_UTILS @@ -0,0 +1 @@ +CONFIG_HYPERV_UTILS=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_VSOCKETS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_VSOCKETS new file mode 100644 index 000000000000..bd21cd6753b4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_VSOCKETS @@ -0,0 +1 @@ +CONFIG_HYPERV_VSOCKETS=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_I2C_SLAVE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_I2C_SLAVE new file mode 100644 index 000000000000..364b36792f70 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_I2C_SLAVE @@ -0,0 +1 @@ +# CONFIG_I2C_SLAVE is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_I40E_DCB b/anolis/configs/L1-RECOMMEND/x86/CONFIG_I40E_DCB new file mode 100644 index 000000000000..55eb7892030b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_I40E_DCB @@ -0,0 +1 @@ +CONFIG_I40E_DCB=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_I8K b/anolis/configs/L1-RECOMMEND/x86/CONFIG_I8K new file mode 100644 index 000000000000..fe2362aceea1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_I8K @@ -0,0 +1 @@ +# CONFIG_I8K is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_IA32_EMULATION b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IA32_EMULATION new file mode 100644 index 000000000000..66c73dad00d7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IA32_EMULATION @@ -0,0 +1 @@ +CONFIG_IA32_EMULATION=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_IGB_DCA b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IGB_DCA new file mode 100644 index 000000000000..c2fe0c3f6a50 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IGB_DCA @@ -0,0 +1 @@ +CONFIG_IGB_DCA=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INFINIBAND_OPA_VNIC b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INFINIBAND_OPA_VNIC new file mode 100644 index 000000000000..d79565e48151 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INFINIBAND_OPA_VNIC @@ -0,0 +1 @@ +CONFIG_INFINIBAND_OPA_VNIC=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INFINIBAND_RDMAVT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INFINIBAND_RDMAVT new file mode 100644 index 000000000000..ce4854611a33 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INFINIBAND_RDMAVT @@ -0,0 +1 @@ +CONFIG_INFINIBAND_RDMAVT=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INPUT_MOUSEDEV b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INPUT_MOUSEDEV new file mode 100644 index 000000000000..cc573920bb78 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INPUT_MOUSEDEV @@ -0,0 +1 @@ +CONFIG_INPUT_MOUSEDEV=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INT3406_THERMAL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INT3406_THERMAL new file mode 100644 index 000000000000..ba7fc04e6983 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INT3406_THERMAL @@ -0,0 +1 @@ +# CONFIG_INT3406_THERMAL is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INT340X_THERMAL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INT340X_THERMAL new file mode 100644 index 000000000000..1df71f851b4f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INT340X_THERMAL @@ -0,0 +1 @@ +CONFIG_INT340X_THERMAL=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_HFI_THERMAL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_HFI_THERMAL new file mode 100644 index 000000000000..d918b09fe21c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_HFI_THERMAL @@ -0,0 +1 @@ +# CONFIG_INTEL_HFI_THERMAL is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDMA64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDMA64 new file mode 100644 index 000000000000..599b2317ee4f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDMA64 @@ -0,0 +1 @@ +CONFIG_INTEL_IDMA64=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD new file mode 100644 index 000000000000..5ca68a398eb8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD @@ -0,0 +1 @@ +CONFIG_INTEL_IDXD=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD_BUS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD_BUS new file mode 100644 index 000000000000..66f94d13c622 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD_BUS @@ -0,0 +1 @@ +CONFIG_INTEL_IDXD_BUS=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD_PERFMON b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD_PERFMON new file mode 100644 index 000000000000..238078a4b727 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD_PERFMON @@ -0,0 +1 @@ +# CONFIG_INTEL_IDXD_PERFMON is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD_SVM b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD_SVM new file mode 100644 index 000000000000..930a6b9ca482 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD_SVM @@ -0,0 +1 @@ +CONFIG_INTEL_IDXD_SVM=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IOATDMA b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IOATDMA new file mode 100644 index 000000000000..916ea17276b1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IOATDMA @@ -0,0 +1 @@ +CONFIG_INTEL_IOATDMA=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IOMMU_PERF_EVENTS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IOMMU_PERF_EVENTS new file mode 100644 index 000000000000..f2574d5938c4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IOMMU_PERF_EVENTS @@ -0,0 +1 @@ +CONFIG_INTEL_IOMMU_PERF_EVENTS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON new file mode 100644 index 000000000000..e0046c4d9212 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON @@ -0,0 +1 @@ +CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_MEI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_MEI new file mode 100644 index 000000000000..15212b9f87ed --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_MEI @@ -0,0 +1 @@ +CONFIG_INTEL_MEI=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_MEI_ME b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_MEI_ME new file mode 100644 index 000000000000..1592e963b34f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_MEI_ME @@ -0,0 +1 @@ +CONFIG_INTEL_MEI_ME=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_MEI_WDT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_MEI_WDT new file mode 100644 index 000000000000..a1d5fc1bff9e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_MEI_WDT @@ -0,0 +1 @@ +CONFIG_INTEL_MEI_WDT=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PCH_THERMAL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PCH_THERMAL new file mode 100644 index 000000000000..626785458c1d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PCH_THERMAL @@ -0,0 +1 @@ +CONFIG_INTEL_PCH_THERMAL=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PMC_CORE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PMC_CORE new file mode 100644 index 000000000000..55f71325a996 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PMC_CORE @@ -0,0 +1 @@ +CONFIG_INTEL_PMC_CORE=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PMT_CLASS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PMT_CLASS new file mode 100644 index 000000000000..166f5cbbe49d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PMT_CLASS @@ -0,0 +1 @@ +CONFIG_INTEL_PMT_CLASS=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PMT_CRASHLOG b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PMT_CRASHLOG new file mode 100644 index 000000000000..4b31113c6a4a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PMT_CRASHLOG @@ -0,0 +1 @@ +CONFIG_INTEL_PMT_CRASHLOG=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PMT_TELEMETRY b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PMT_TELEMETRY new file mode 100644 index 000000000000..25a382862e09 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_PMT_TELEMETRY @@ -0,0 +1 @@ +CONFIG_INTEL_PMT_TELEMETRY=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_POWERCLAMP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_POWERCLAMP new file mode 100644 index 000000000000..84f4db21f22d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_POWERCLAMP @@ -0,0 +1 @@ +CONFIG_INTEL_POWERCLAMP=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RAPL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RAPL new file mode 100644 index 000000000000..c894934f73dc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RAPL @@ -0,0 +1 @@ +CONFIG_INTEL_RAPL=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RAPL_CORE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RAPL_CORE new file mode 100644 index 000000000000..0b06ec1dd119 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RAPL_CORE @@ -0,0 +1 @@ +CONFIG_INTEL_RAPL_CORE=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RAPL_TPMI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RAPL_TPMI new file mode 100644 index 000000000000..e809fe80e84e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RAPL_TPMI @@ -0,0 +1 @@ +# CONFIG_INTEL_RAPL_TPMI is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RST b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RST new file mode 100644 index 000000000000..3a471adf04ee --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RST @@ -0,0 +1 @@ +CONFIG_INTEL_RST=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_SPEED_SELECT_INTERFACE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_SPEED_SELECT_INTERFACE new file mode 100644 index 000000000000..293d4d2eb3e6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_SPEED_SELECT_INTERFACE @@ -0,0 +1 @@ +CONFIG_INTEL_SPEED_SELECT_INTERFACE=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_SPEED_SELECT_TPMI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_SPEED_SELECT_TPMI new file mode 100644 index 000000000000..27dc766e7858 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_SPEED_SELECT_TPMI @@ -0,0 +1 @@ +CONFIG_INTEL_SPEED_SELECT_TPMI=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH new file mode 100644 index 000000000000..27419eb298aa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH @@ -0,0 +1 @@ +CONFIG_INTEL_TH=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_ACPI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_ACPI new file mode 100644 index 000000000000..06eaf9127685 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_ACPI @@ -0,0 +1 @@ +CONFIG_INTEL_TH_ACPI=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_DEBUG b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_DEBUG new file mode 100644 index 000000000000..dc2f37af726a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_DEBUG @@ -0,0 +1 @@ +# CONFIG_INTEL_TH_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_GTH b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_GTH new file mode 100644 index 000000000000..f3574b9e7ce8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_GTH @@ -0,0 +1 @@ +CONFIG_INTEL_TH_GTH=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_MSU b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_MSU new file mode 100644 index 000000000000..e3a95a9b805f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_MSU @@ -0,0 +1 @@ +CONFIG_INTEL_TH_MSU=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_PCI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_PCI new file mode 100644 index 000000000000..dcb9cb074eff --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_PCI @@ -0,0 +1 @@ +CONFIG_INTEL_TH_PCI=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_PTI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_PTI new file mode 100644 index 000000000000..1db8f6b35012 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_PTI @@ -0,0 +1 @@ +CONFIG_INTEL_TH_PTI=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_STH b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_STH new file mode 100644 index 000000000000..68bdb4399f28 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TH_STH @@ -0,0 +1 @@ +CONFIG_INTEL_TH_STH=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TPMI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TPMI new file mode 100644 index 000000000000..0e4c0ec3308e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TPMI @@ -0,0 +1 @@ +CONFIG_INTEL_TPMI=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TURBO_MAX_3 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TURBO_MAX_3 new file mode 100644 index 000000000000..9516cd8f6999 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TURBO_MAX_3 @@ -0,0 +1 @@ +CONFIG_INTEL_TURBO_MAX_3=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TXT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TXT new file mode 100644 index 000000000000..f5428a4ebce2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_TXT @@ -0,0 +1 @@ +CONFIG_INTEL_TXT=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_VSEC b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_VSEC new file mode 100644 index 000000000000..4c2846bdb979 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_VSEC @@ -0,0 +1 @@ +CONFIG_INTEL_VSEC=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_IOMMU_DEFAULT_DMA_STRICT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IOMMU_DEFAULT_DMA_STRICT new file mode 100644 index 000000000000..33c82672246d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IOMMU_DEFAULT_DMA_STRICT @@ -0,0 +1 @@ +# CONFIG_IOMMU_DEFAULT_DMA_STRICT is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_IOMMU_DEFAULT_PASSTHROUGH b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IOMMU_DEFAULT_PASSTHROUGH new file mode 100644 index 000000000000..8c9db2c8ff5a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IOMMU_DEFAULT_PASSTHROUGH @@ -0,0 +1 @@ +CONFIG_IOMMU_DEFAULT_PASSTHROUGH=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_IOSF_MBI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IOSF_MBI new file mode 100644 index 000000000000..27b224a5631e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IOSF_MBI @@ -0,0 +1 @@ +CONFIG_IOSF_MBI=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_IO_DELAY_0X80 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IO_DELAY_0X80 new file mode 100644 index 000000000000..4acbe19706c1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IO_DELAY_0X80 @@ -0,0 +1 @@ +CONFIG_IO_DELAY_0X80=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_IR_SERIAL_TRANSMITTER b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IR_SERIAL_TRANSMITTER new file mode 100644 index 000000000000..aec1b1f27120 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IR_SERIAL_TRANSMITTER @@ -0,0 +1 @@ +# CONFIG_IR_SERIAL_TRANSMITTER is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_IR_SHARP_DECODER b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IR_SHARP_DECODER new file mode 100644 index 000000000000..9ea076845465 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IR_SHARP_DECODER @@ -0,0 +1 @@ +# CONFIG_IR_SHARP_DECODER is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_IR_XMP_DECODER b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IR_XMP_DECODER new file mode 100644 index 000000000000..6c9e03537430 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IR_XMP_DECODER @@ -0,0 +1 @@ +# CONFIG_IR_XMP_DECODER is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ISA_DMA_API b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ISA_DMA_API new file mode 100644 index 000000000000..22d7b84ab12d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ISA_DMA_API @@ -0,0 +1 @@ +CONFIG_ISA_DMA_API=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ISCSI_IBFT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ISCSI_IBFT new file mode 100644 index 000000000000..de0808095c46 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ISCSI_IBFT @@ -0,0 +1 @@ +CONFIG_ISCSI_IBFT=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ISCSI_IBFT_FIND b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ISCSI_IBFT_FIND new file mode 100644 index 000000000000..4737a1e892f2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ISCSI_IBFT_FIND @@ -0,0 +1 @@ +CONFIG_ISCSI_IBFT_FIND=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ITCO_VENDOR_SUPPORT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ITCO_VENDOR_SUPPORT new file mode 100644 index 000000000000..f563e52c7b1b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ITCO_VENDOR_SUPPORT @@ -0,0 +1 @@ +CONFIG_ITCO_VENDOR_SUPPORT=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ITCO_WDT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ITCO_WDT new file mode 100644 index 000000000000..c18e387e35e4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ITCO_WDT @@ -0,0 +1 @@ +CONFIG_ITCO_WDT=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_IXGBE_DCA b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IXGBE_DCA new file mode 100644 index 000000000000..36c6076d317b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IXGBE_DCA @@ -0,0 +1 @@ +CONFIG_IXGBE_DCA=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KCSAN b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KCSAN new file mode 100644 index 000000000000..f9c0456c344c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KCSAN @@ -0,0 +1 @@ +# CONFIG_KCSAN is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KDB_DEFAULT_ENABLE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KDB_DEFAULT_ENABLE new file mode 100644 index 000000000000..25fa262830ea --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KDB_DEFAULT_ENABLE @@ -0,0 +1 @@ +CONFIG_KDB_DEFAULT_ENABLE=0x1 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_BZIP2 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_BZIP2 new file mode 100644 index 000000000000..8beb2ec34487 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_BZIP2 @@ -0,0 +1 @@ +# CONFIG_KERNEL_BZIP2 is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_GZIP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_GZIP new file mode 100644 index 000000000000..e6689c725c9a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_GZIP @@ -0,0 +1 @@ +# CONFIG_KERNEL_GZIP is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_LZ4 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_LZ4 new file mode 100644 index 000000000000..9a61eec25d85 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_LZ4 @@ -0,0 +1 @@ +# CONFIG_KERNEL_LZ4 is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_LZMA b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_LZMA new file mode 100644 index 000000000000..149abd495777 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_LZMA @@ -0,0 +1 @@ +# CONFIG_KERNEL_LZMA is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_LZO b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_LZO new file mode 100644 index 000000000000..5810f4138dc0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_LZO @@ -0,0 +1 @@ +# CONFIG_KERNEL_LZO is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_XZ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_XZ new file mode 100644 index 000000000000..4129549c260a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_XZ @@ -0,0 +1 @@ +# CONFIG_KERNEL_XZ is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_ZSTD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_ZSTD new file mode 100644 index 000000000000..dfaf8e6a9ae8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KERNEL_ZSTD @@ -0,0 +1 @@ +CONFIG_KERNEL_ZSTD=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KEXEC_BZIMAGE_VERIFY_SIG b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KEXEC_BZIMAGE_VERIFY_SIG new file mode 100644 index 000000000000..e740740657e7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KEXEC_BZIMAGE_VERIFY_SIG @@ -0,0 +1 @@ +CONFIG_KEXEC_BZIMAGE_VERIFY_SIG=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KEXEC_JUMP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KEXEC_JUMP new file mode 100644 index 000000000000..e87b72ed17ab --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KEXEC_JUMP @@ -0,0 +1 @@ +CONFIG_KEXEC_JUMP=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KEXEC_SIG_FORCE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KEXEC_SIG_FORCE new file mode 100644 index 000000000000..21d707af1ae3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KEXEC_SIG_FORCE @@ -0,0 +1 @@ +# CONFIG_KEXEC_SIG_FORCE is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KEYBOARD_ADC b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KEYBOARD_ADC new file mode 100644 index 000000000000..be5cd27d6857 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KEYBOARD_ADC @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_ADC is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KEYBOARD_ATKBD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KEYBOARD_ATKBD new file mode 100644 index 000000000000..54a1bd12bfd8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KEYBOARD_ATKBD @@ -0,0 +1 @@ +CONFIG_KEYBOARD_ATKBD=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KGDB_LOW_LEVEL_TRAP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KGDB_LOW_LEVEL_TRAP new file mode 100644 index 000000000000..18fdda1977f6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KGDB_LOW_LEVEL_TRAP @@ -0,0 +1 @@ +CONFIG_KGDB_LOW_LEVEL_TRAP=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KVM_AMD_SEV b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KVM_AMD_SEV new file mode 100644 index 000000000000..de33426a5a81 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KVM_AMD_SEV @@ -0,0 +1 @@ +CONFIG_KVM_AMD_SEV=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID new file mode 100644 index 000000000000..e09de32dc399 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID @@ -0,0 +1 @@ +CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_LEGACY_VSYSCALL_NONE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_LEGACY_VSYSCALL_NONE new file mode 100644 index 000000000000..d3697026578d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_LEGACY_VSYSCALL_NONE @@ -0,0 +1 @@ +# CONFIG_LEGACY_VSYSCALL_NONE is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_LOG_BUF_SHIFT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_LOG_BUF_SHIFT new file mode 100644 index 000000000000..d57cc3d2d84a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_LOG_BUF_SHIFT @@ -0,0 +1 @@ +CONFIG_LOG_BUF_SHIFT=21 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MAPPING_DIRTY_HELPERS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MAPPING_DIRTY_HELPERS new file mode 100644 index 000000000000..22e6d62645c5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MAPPING_DIRTY_HELPERS @@ -0,0 +1 @@ +CONFIG_MAPPING_DIRTY_HELPERS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MAXSMP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MAXSMP new file mode 100644 index 000000000000..d0d71de5336d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MAXSMP @@ -0,0 +1 @@ +# CONFIG_MAXSMP is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MCORE2 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MCORE2 new file mode 100644 index 000000000000..5d6819c2c762 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MCORE2 @@ -0,0 +1 @@ +# CONFIG_MCORE2 is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MELLANOX_PLATFORM b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MELLANOX_PLATFORM new file mode 100644 index 000000000000..8bd7b2e548c8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MELLANOX_PLATFORM @@ -0,0 +1 @@ +CONFIG_MELLANOX_PLATFORM=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MEM_SOFT_DIRTY b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MEM_SOFT_DIRTY new file mode 100644 index 000000000000..356f2edd8522 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MEM_SOFT_DIRTY @@ -0,0 +1 @@ +CONFIG_MEM_SOFT_DIRTY=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MICROCODE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MICROCODE new file mode 100644 index 000000000000..bbf6abac40e2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MICROCODE @@ -0,0 +1 @@ +CONFIG_MICROCODE=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MITIGATION_RFDS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MITIGATION_RFDS new file mode 100644 index 000000000000..01a4bd8d8955 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MITIGATION_RFDS @@ -0,0 +1 @@ +CONFIG_MITIGATION_RFDS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MLX5_CORE_IPOIB b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MLX5_CORE_IPOIB new file mode 100644 index 000000000000..0b04de7574c6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MLX5_CORE_IPOIB @@ -0,0 +1 @@ +# CONFIG_MLX5_CORE_IPOIB is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MLXREG_HOTPLUG b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MLXREG_HOTPLUG new file mode 100644 index 000000000000..44d9d37713a4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MLXREG_HOTPLUG @@ -0,0 +1 @@ +CONFIG_MLXREG_HOTPLUG=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MLX_PLATFORM b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MLX_PLATFORM new file mode 100644 index 000000000000..54e7906ec22b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MLX_PLATFORM @@ -0,0 +1 @@ +CONFIG_MLX_PLATFORM=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MODIFY_LDT_SYSCALL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MODIFY_LDT_SYSCALL new file mode 100644 index 000000000000..769222e7e834 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MODIFY_LDT_SYSCALL @@ -0,0 +1 @@ +CONFIG_MODIFY_LDT_SYSCALL=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MOUSE_PS2 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MOUSE_PS2 new file mode 100644 index 000000000000..5902f25a65e4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MOUSE_PS2 @@ -0,0 +1 @@ +CONFIG_MOUSE_PS2=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MTRR_SANITIZER b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MTRR_SANITIZER new file mode 100644 index 000000000000..2a32885dd6de --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MTRR_SANITIZER @@ -0,0 +1 @@ +CONFIG_MTRR_SANITIZER=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT new file mode 100644 index 000000000000..80cb642a332f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT @@ -0,0 +1 @@ +CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT new file mode 100644 index 000000000000..f400e9c4437e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT @@ -0,0 +1 @@ +CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_NET_TULIP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NET_TULIP new file mode 100644 index 000000000000..3a54ce6fffc6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NET_TULIP @@ -0,0 +1 @@ +# CONFIG_NET_TULIP is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_NR_CPUS_DEFAULT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NR_CPUS_DEFAULT new file mode 100644 index 000000000000..b6b78b619e42 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NR_CPUS_DEFAULT @@ -0,0 +1 @@ +CONFIG_NR_CPUS_DEFAULT=64 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_NR_CPUS_RANGE_BEGIN b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NR_CPUS_RANGE_BEGIN new file mode 100644 index 000000000000..5ba0de00121d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NR_CPUS_RANGE_BEGIN @@ -0,0 +1 @@ +CONFIG_NR_CPUS_RANGE_BEGIN=2 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_NR_CPUS_RANGE_END b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NR_CPUS_RANGE_END new file mode 100644 index 000000000000..61a564799684 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NR_CPUS_RANGE_END @@ -0,0 +1 @@ +CONFIG_NR_CPUS_RANGE_END=8192 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_NUMA_EMU b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NUMA_EMU new file mode 100644 index 000000000000..a444d47bb6da --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NUMA_EMU @@ -0,0 +1 @@ +CONFIG_NUMA_EMU=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_NVRAM b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NVRAM new file mode 100644 index 000000000000..a296f91340f7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NVRAM @@ -0,0 +1 @@ +CONFIG_NVRAM=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_NV_TCO b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NV_TCO new file mode 100644 index 000000000000..984c1ff65944 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NV_TCO @@ -0,0 +1 @@ +CONFIG_NV_TCO=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PARAVIRT_SPINLOCKS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PARAVIRT_SPINLOCKS new file mode 100644 index 000000000000..14b4c8d8d785 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PARAVIRT_SPINLOCKS @@ -0,0 +1 @@ +CONFIG_PARAVIRT_SPINLOCKS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PCI_HYPERV b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PCI_HYPERV new file mode 100644 index 000000000000..baf87f9b9c53 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PCI_HYPERV @@ -0,0 +1 @@ +CONFIG_PCI_HYPERV=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PCI_HYPERV_INTERFACE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PCI_HYPERV_INTERFACE new file mode 100644 index 000000000000..20446aad4a67 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PCI_HYPERV_INTERFACE @@ -0,0 +1 @@ +CONFIG_PCI_HYPERV_INTERFACE=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PCI_MMCONFIG b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PCI_MMCONFIG new file mode 100644 index 000000000000..cd749582cb16 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PCI_MMCONFIG @@ -0,0 +1 @@ +CONFIG_PCI_MMCONFIG=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PCSPKR_PLATFORM b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PCSPKR_PLATFORM new file mode 100644 index 000000000000..3da062fee8cc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PCSPKR_PLATFORM @@ -0,0 +1 @@ +CONFIG_PCSPKR_PLATFORM=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_AMD_BRS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_AMD_BRS new file mode 100644 index 000000000000..aed94fdcd2ce --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_AMD_BRS @@ -0,0 +1 @@ +CONFIG_PERF_EVENTS_AMD_BRS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_AMD_POWER b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_AMD_POWER new file mode 100644 index 000000000000..481bdca2953b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_AMD_POWER @@ -0,0 +1 @@ +CONFIG_PERF_EVENTS_AMD_POWER=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_AMD_UNCORE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_AMD_UNCORE new file mode 100644 index 000000000000..934014659916 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_AMD_UNCORE @@ -0,0 +1 @@ +CONFIG_PERF_EVENTS_AMD_UNCORE=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_INTEL_CSTATE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_INTEL_CSTATE new file mode 100644 index 000000000000..aca4e0481c58 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_INTEL_CSTATE @@ -0,0 +1 @@ +CONFIG_PERF_EVENTS_INTEL_CSTATE=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_INTEL_RAPL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_INTEL_RAPL new file mode 100644 index 000000000000..b4c5123f1063 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_INTEL_RAPL @@ -0,0 +1 @@ +CONFIG_PERF_EVENTS_INTEL_RAPL=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_INTEL_UNCORE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_INTEL_UNCORE new file mode 100644 index 000000000000..c96e92f02580 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PERF_EVENTS_INTEL_UNCORE @@ -0,0 +1 @@ +CONFIG_PERF_EVENTS_INTEL_UNCORE=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PHYSICAL_ALIGN b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PHYSICAL_ALIGN new file mode 100644 index 000000000000..6a12c860e655 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PHYSICAL_ALIGN @@ -0,0 +1 @@ +CONFIG_PHYSICAL_ALIGN=0x1000000 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PINCTRL_KX7000 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PINCTRL_KX7000 new file mode 100644 index 000000000000..5aa54c945709 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PINCTRL_KX7000 @@ -0,0 +1 @@ +CONFIG_PINCTRL_KX7000=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PINCTRL_ZHAOXIN b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PINCTRL_ZHAOXIN new file mode 100644 index 000000000000..82e13600e546 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PINCTRL_ZHAOXIN @@ -0,0 +1 @@ +CONFIG_PINCTRL_ZHAOXIN=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PM_TRACE_RTC b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PM_TRACE_RTC new file mode 100644 index 000000000000..56d77e177ee9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PM_TRACE_RTC @@ -0,0 +1 @@ +# CONFIG_PM_TRACE_RTC is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_POWERCAP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_POWERCAP new file mode 100644 index 000000000000..279fe368fcea --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_POWERCAP @@ -0,0 +1 @@ +CONFIG_POWERCAP=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PROC_THERMAL_MMIO_RAPL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PROC_THERMAL_MMIO_RAPL new file mode 100644 index 000000000000..7460c13275f1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PROC_THERMAL_MMIO_RAPL @@ -0,0 +1 @@ +CONFIG_PROC_THERMAL_MMIO_RAPL=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PTE_MARKER_UFFD_WP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PTE_MARKER_UFFD_WP new file mode 100644 index 000000000000..644df34574dd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_PTE_MARKER_UFFD_WP @@ -0,0 +1 @@ +CONFIG_PTE_MARKER_UFFD_WP=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_QAT_VFIO_PCI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_QAT_VFIO_PCI new file mode 100644 index 000000000000..6a4f45288b3d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_QAT_VFIO_PCI @@ -0,0 +1 @@ +CONFIG_QAT_VFIO_PCI=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING b/anolis/configs/L1-RECOMMEND/x86/CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING new file mode 100644 index 000000000000..2063d2ecfdcb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING @@ -0,0 +1 @@ +CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0xa diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_RAS_CEC b/anolis/configs/L1-RECOMMEND/x86/CONFIG_RAS_CEC new file mode 100644 index 000000000000..7b0901ca1fb1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_RAS_CEC @@ -0,0 +1 @@ +CONFIG_RAS_CEC=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_RAS_CEC_DEBUG b/anolis/configs/L1-RECOMMEND/x86/CONFIG_RAS_CEC_DEBUG new file mode 100644 index 000000000000..116af5def7d5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_RAS_CEC_DEBUG @@ -0,0 +1 @@ +# CONFIG_RAS_CEC_DEBUG is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_RESCTRL_FS_PSEUDO_LOCK b/anolis/configs/L1-RECOMMEND/x86/CONFIG_RESCTRL_FS_PSEUDO_LOCK new file mode 100644 index 000000000000..731e801f5294 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_RESCTRL_FS_PSEUDO_LOCK @@ -0,0 +1 @@ +CONFIG_RESCTRL_FS_PSEUDO_LOCK=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_RTC_DRV_CMOS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_RTC_DRV_CMOS new file mode 100644 index 000000000000..2051e4afebd5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_RTC_DRV_CMOS @@ -0,0 +1 @@ +CONFIG_RTC_DRV_CMOS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_SCHED_MC_PRIO b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SCHED_MC_PRIO new file mode 100644 index 000000000000..893581e346bc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SCHED_MC_PRIO @@ -0,0 +1 @@ +CONFIG_SCHED_MC_PRIO=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_SCHED_OMIT_FRAME_POINTER b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SCHED_OMIT_FRAME_POINTER new file mode 100644 index 000000000000..c567a751378e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SCHED_OMIT_FRAME_POINTER @@ -0,0 +1 @@ +CONFIG_SCHED_OMIT_FRAME_POINTER=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_SDEI_WATCHDOG b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SDEI_WATCHDOG new file mode 100644 index 000000000000..01cbe8e321af --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SDEI_WATCHDOG @@ -0,0 +1 @@ +# CONFIG_SDEI_WATCHDOG is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_SENSORS_ZHAOXIN_CPUTEMP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SENSORS_ZHAOXIN_CPUTEMP new file mode 100644 index 000000000000..c8c7e99082e4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SENSORS_ZHAOXIN_CPUTEMP @@ -0,0 +1 @@ +CONFIG_SENSORS_ZHAOXIN_CPUTEMP=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_SERIO_I8042 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SERIO_I8042 new file mode 100644 index 000000000000..8e5a28dd35c8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SERIO_I8042 @@ -0,0 +1 @@ +CONFIG_SERIO_I8042=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_SGETMASK_SYSCALL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SGETMASK_SYSCALL new file mode 100644 index 000000000000..61bf135dd3f9 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SGETMASK_SYSCALL @@ -0,0 +1 @@ +CONFIG_SGETMASK_SYSCALL=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_SLS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SLS new file mode 100644 index 000000000000..96eccd587468 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SLS @@ -0,0 +1 @@ +# CONFIG_SLS is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_SP5100_TCO b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SP5100_TCO new file mode 100644 index 000000000000..db59115bfab4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SP5100_TCO @@ -0,0 +1 @@ +CONFIG_SP5100_TCO=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_SPI_CADENCE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SPI_CADENCE new file mode 100644 index 000000000000..78e16ec974b8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SPI_CADENCE @@ -0,0 +1 @@ +# CONFIG_SPI_CADENCE is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_SPI_DESIGNWARE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SPI_DESIGNWARE new file mode 100644 index 000000000000..de58a1341d27 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SPI_DESIGNWARE @@ -0,0 +1 @@ +# CONFIG_SPI_DESIGNWARE is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_SQUASHFS_LZ4 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SQUASHFS_LZ4 new file mode 100644 index 000000000000..27d171277c77 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SQUASHFS_LZ4 @@ -0,0 +1 @@ +# CONFIG_SQUASHFS_LZ4 is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_STAGING b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STAGING new file mode 100644 index 000000000000..9f033d229aac --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STAGING @@ -0,0 +1 @@ +# CONFIG_STAGING is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_STATIC_CALL_SELFTEST b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STATIC_CALL_SELFTEST new file mode 100644 index 000000000000..ac1a3526ff7a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STATIC_CALL_SELFTEST @@ -0,0 +1 @@ +# CONFIG_STATIC_CALL_SELFTEST is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_DUMMY b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_DUMMY new file mode 100644 index 000000000000..309ca390e91e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_DUMMY @@ -0,0 +1 @@ +CONFIG_STM_DUMMY=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_PROTO_BASIC b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_PROTO_BASIC new file mode 100644 index 000000000000..76be4de38a82 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_PROTO_BASIC @@ -0,0 +1 @@ +CONFIG_STM_PROTO_BASIC=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_PROTO_SYS_T b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_PROTO_SYS_T new file mode 100644 index 000000000000..c0bbf7e78067 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_PROTO_SYS_T @@ -0,0 +1 @@ +CONFIG_STM_PROTO_SYS_T=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_SOURCE_CONSOLE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_SOURCE_CONSOLE new file mode 100644 index 000000000000..6a4b15b5e598 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_SOURCE_CONSOLE @@ -0,0 +1 @@ +CONFIG_STM_SOURCE_CONSOLE=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_SOURCE_FTRACE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_SOURCE_FTRACE new file mode 100644 index 000000000000..d832097801ca --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_SOURCE_FTRACE @@ -0,0 +1 @@ +CONFIG_STM_SOURCE_FTRACE=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_SOURCE_HEARTBEAT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_SOURCE_HEARTBEAT new file mode 100644 index 000000000000..0df073d48dcd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STM_SOURCE_HEARTBEAT @@ -0,0 +1 @@ +CONFIG_STM_SOURCE_HEARTBEAT=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_STRICT_SIGALTSTACK_SIZE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STRICT_SIGALTSTACK_SIZE new file mode 100644 index 000000000000..042170f3b2a7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_STRICT_SIGALTSTACK_SIZE @@ -0,0 +1 @@ +# CONFIG_STRICT_SIGALTSTACK_SIZE is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCG_HYGON b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCG_HYGON new file mode 100644 index 000000000000..aaa0cb94d759 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCG_HYGON @@ -0,0 +1 @@ +CONFIG_TCG_HYGON=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCG_INFINEON b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCG_INFINEON new file mode 100644 index 000000000000..d73a2c668a55 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCG_INFINEON @@ -0,0 +1 @@ +CONFIG_TCG_INFINEON=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCG_NSC b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCG_NSC new file mode 100644 index 000000000000..21b0d6865582 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCG_NSC @@ -0,0 +1 @@ +CONFIG_TCG_NSC=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCM_HYGON b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCM_HYGON new file mode 100644 index 000000000000..432adad10f93 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCM_HYGON @@ -0,0 +1 @@ +CONFIG_TCM_HYGON=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCP_CONG_CDG b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCP_CONG_CDG new file mode 100644 index 000000000000..fb074cdd5257 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCP_CONG_CDG @@ -0,0 +1 @@ +CONFIG_TCP_CONG_CDG=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_TDM_DEV_HYGON b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TDM_DEV_HYGON new file mode 100644 index 000000000000..ba303419c7cb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TDM_DEV_HYGON @@ -0,0 +1 @@ +CONFIG_TDM_DEV_HYGON=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_TDM_KERNEL_GUARD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TDM_KERNEL_GUARD new file mode 100644 index 000000000000..4498c082785f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TDM_KERNEL_GUARD @@ -0,0 +1 @@ +CONFIG_TDM_KERNEL_GUARD=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_TDX_GUEST_DRIVER b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TDX_GUEST_DRIVER new file mode 100644 index 000000000000..eb5121298e70 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TDX_GUEST_DRIVER @@ -0,0 +1 @@ +CONFIG_TDX_GUEST_DRIVER=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_TEST_LIVEPATCH b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TEST_LIVEPATCH new file mode 100644 index 000000000000..0dd7700464a8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TEST_LIVEPATCH @@ -0,0 +1 @@ +CONFIG_TEST_LIVEPATCH=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_THERMAL_GOV_BANG_BANG b/anolis/configs/L1-RECOMMEND/x86/CONFIG_THERMAL_GOV_BANG_BANG new file mode 100644 index 000000000000..7f6f73a466ab --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_THERMAL_GOV_BANG_BANG @@ -0,0 +1 @@ +CONFIG_THERMAL_GOV_BANG_BANG=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_THERMAL_WRITABLE_TRIPS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_THERMAL_WRITABLE_TRIPS new file mode 100644 index 000000000000..a0a8924e042b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_THERMAL_WRITABLE_TRIPS @@ -0,0 +1 @@ +CONFIG_THERMAL_WRITABLE_TRIPS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_TOUCHSCREEN_ADC b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TOUCHSCREEN_ADC new file mode 100644 index 000000000000..e20c7d85a490 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TOUCHSCREEN_ADC @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_ADC is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_UCLAMP_TASK b/anolis/configs/L1-RECOMMEND/x86/CONFIG_UCLAMP_TASK new file mode 100644 index 000000000000..aea06191db20 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_UCLAMP_TASK @@ -0,0 +1 @@ +# CONFIG_UCLAMP_TASK is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_UIO_HV_GENERIC b/anolis/configs/L1-RECOMMEND/x86/CONFIG_UIO_HV_GENERIC new file mode 100644 index 000000000000..12f0b2b2a403 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_UIO_HV_GENERIC @@ -0,0 +1 @@ +CONFIG_UIO_HV_GENERIC=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_UNACCEPTED_MEMORY b/anolis/configs/L1-RECOMMEND/x86/CONFIG_UNACCEPTED_MEMORY new file mode 100644 index 000000000000..2c1ae834f15f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_UNACCEPTED_MEMORY @@ -0,0 +1 @@ +CONFIG_UNACCEPTED_MEMORY=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_VBOXGUEST b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VBOXGUEST new file mode 100644 index 000000000000..790186610b10 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VBOXGUEST @@ -0,0 +1 @@ +# CONFIG_VBOXGUEST is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_VFIO_MDEV b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VFIO_MDEV new file mode 100644 index 000000000000..6657966d948d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VFIO_MDEV @@ -0,0 +1 @@ +CONFIG_VFIO_MDEV=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_VFIO_PCI_IGD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VFIO_PCI_IGD new file mode 100644 index 000000000000..566c032ec0f4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VFIO_PCI_IGD @@ -0,0 +1 @@ +# CONFIG_VFIO_PCI_IGD is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_VGA_SWITCHEROO b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VGA_SWITCHEROO new file mode 100644 index 000000000000..1bfcb26d6e76 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VGA_SWITCHEROO @@ -0,0 +1 @@ +CONFIG_VGA_SWITCHEROO=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES new file mode 100644 index 000000000000..4066b9c11b29 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES @@ -0,0 +1 @@ +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_VIRTIO_PCI_LIB b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VIRTIO_PCI_LIB new file mode 100644 index 000000000000..ec44dcaec92a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VIRTIO_PCI_LIB @@ -0,0 +1 @@ +CONFIG_VIRTIO_PCI_LIB=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_VIRTIO_PCI_LIB_LEGACY b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VIRTIO_PCI_LIB_LEGACY new file mode 100644 index 000000000000..03baf7192cc7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VIRTIO_PCI_LIB_LEGACY @@ -0,0 +1 @@ +CONFIG_VIRTIO_PCI_LIB_LEGACY=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_VIRT_DRIVERS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VIRT_DRIVERS new file mode 100644 index 000000000000..7173b9c64eeb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VIRT_DRIVERS @@ -0,0 +1 @@ +CONFIG_VIRT_DRIVERS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMD new file mode 100644 index 000000000000..7434a552c9e4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMD @@ -0,0 +1 @@ +CONFIG_VMD=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMWARE_BALLOON b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMWARE_BALLOON new file mode 100644 index 000000000000..324a0cebc0e3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMWARE_BALLOON @@ -0,0 +1 @@ +CONFIG_VMWARE_BALLOON=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMWARE_PVSCSI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMWARE_PVSCSI new file mode 100644 index 000000000000..3568e9188595 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMWARE_PVSCSI @@ -0,0 +1 @@ +CONFIG_VMWARE_PVSCSI=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMWARE_VMCI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMWARE_VMCI new file mode 100644 index 000000000000..e54667c66b59 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMWARE_VMCI @@ -0,0 +1 @@ +CONFIG_VMWARE_VMCI=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMWARE_VMCI_VSOCKETS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMWARE_VMCI_VSOCKETS new file mode 100644 index 000000000000..e49bd5cf214c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMWARE_VMCI_VSOCKETS @@ -0,0 +1 @@ +CONFIG_VMWARE_VMCI_VSOCKETS=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMXNET3 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMXNET3 new file mode 100644 index 000000000000..a5a8f9fa7eeb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VMXNET3 @@ -0,0 +1 @@ +CONFIG_VMXNET3=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_WDAT_WDT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_WDAT_WDT new file mode 100644 index 000000000000..0aeb123ab6fb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_WDAT_WDT @@ -0,0 +1 @@ +CONFIG_WDAT_WDT=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_16BIT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_16BIT new file mode 100644 index 000000000000..471298885d65 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_16BIT @@ -0,0 +1 @@ +CONFIG_X86_16BIT=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_ACPI_CPUFREQ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_ACPI_CPUFREQ new file mode 100644 index 000000000000..95aca65354c3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_ACPI_CPUFREQ @@ -0,0 +1 @@ +CONFIG_X86_ACPI_CPUFREQ=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_AMD_FREQ_SENSITIVITY b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_AMD_FREQ_SENSITIVITY new file mode 100644 index 000000000000..30b790026842 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_AMD_FREQ_SENSITIVITY @@ -0,0 +1 @@ +CONFIG_X86_AMD_FREQ_SENSITIVITY=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_AMD_PLATFORM_DEVICE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_AMD_PLATFORM_DEVICE new file mode 100644 index 000000000000..4da780aa721a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_AMD_PLATFORM_DEVICE @@ -0,0 +1 @@ +CONFIG_X86_AMD_PLATFORM_DEVICE=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_AMD_PSTATE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_AMD_PSTATE new file mode 100644 index 000000000000..377cfefb7f94 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_AMD_PSTATE @@ -0,0 +1 @@ +CONFIG_X86_AMD_PSTATE=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_AMD_PSTATE_DEFAULT_MODE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_AMD_PSTATE_DEFAULT_MODE new file mode 100644 index 000000000000..c72fef3fc669 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_AMD_PSTATE_DEFAULT_MODE @@ -0,0 +1 @@ +CONFIG_X86_AMD_PSTATE_DEFAULT_MODE=3 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK new file mode 100644 index 000000000000..9f86fd4f5ee8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK @@ -0,0 +1 @@ +# CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_CHECK_BIOS_CORRUPTION b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_CHECK_BIOS_CORRUPTION new file mode 100644 index 000000000000..be693cd811ff --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_CHECK_BIOS_CORRUPTION @@ -0,0 +1 @@ +CONFIG_X86_CHECK_BIOS_CORRUPTION=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_CPA_STATISTICS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_CPA_STATISTICS new file mode 100644 index 000000000000..7aa847ecbd82 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_CPA_STATISTICS @@ -0,0 +1 @@ +CONFIG_X86_CPA_STATISTICS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_CPUID b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_CPUID new file mode 100644 index 000000000000..165a101605bd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_CPUID @@ -0,0 +1 @@ +CONFIG_X86_CPUID=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_DEBUG_FPU b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_DEBUG_FPU new file mode 100644 index 000000000000..26258a114764 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_DEBUG_FPU @@ -0,0 +1 @@ +# CONFIG_X86_DEBUG_FPU is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_DECODER_SELFTEST b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_DECODER_SELFTEST new file mode 100644 index 000000000000..4452e43ddc1a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_DECODER_SELFTEST @@ -0,0 +1 @@ +CONFIG_X86_DECODER_SELFTEST=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_DIRECT_GBPAGES b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_DIRECT_GBPAGES new file mode 100644 index 000000000000..4cf6e4f59733 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_DIRECT_GBPAGES @@ -0,0 +1 @@ +CONFIG_X86_DIRECT_GBPAGES=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_ESPFIX64 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_ESPFIX64 new file mode 100644 index 000000000000..30aedc033a67 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_ESPFIX64 @@ -0,0 +1 @@ +CONFIG_X86_ESPFIX64=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_EXTENDED_PLATFORM b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_EXTENDED_PLATFORM new file mode 100644 index 000000000000..8000cbb78a00 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_EXTENDED_PLATFORM @@ -0,0 +1 @@ +CONFIG_X86_EXTENDED_PLATFORM=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_GOLDFISH b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_GOLDFISH new file mode 100644 index 000000000000..e509890ff039 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_GOLDFISH @@ -0,0 +1 @@ +# CONFIG_X86_GOLDFISH is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_LPSS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_LPSS new file mode 100644 index 000000000000..2d20612f80db --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_LPSS @@ -0,0 +1 @@ +CONFIG_X86_INTEL_LPSS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS new file mode 100644 index 000000000000..9d135af1dee3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS @@ -0,0 +1 @@ +CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_MID b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_MID new file mode 100644 index 000000000000..9c338605e6df --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_MID @@ -0,0 +1 @@ +# CONFIG_X86_INTEL_MID is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_PSTATE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_PSTATE new file mode 100644 index 000000000000..35365aa70954 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_PSTATE @@ -0,0 +1 @@ +CONFIG_X86_INTEL_PSTATE=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_TSX_MODE_AUTO b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_TSX_MODE_AUTO new file mode 100644 index 000000000000..60d980ced4bf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_TSX_MODE_AUTO @@ -0,0 +1 @@ +CONFIG_X86_INTEL_TSX_MODE_AUTO=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_TSX_MODE_OFF b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_TSX_MODE_OFF new file mode 100644 index 000000000000..7a9d3e959359 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_TSX_MODE_OFF @@ -0,0 +1 @@ +# CONFIG_X86_INTEL_TSX_MODE_OFF is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_TSX_MODE_ON b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_TSX_MODE_ON new file mode 100644 index 000000000000..b6471915c150 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_INTEL_TSX_MODE_ON @@ -0,0 +1 @@ +# CONFIG_X86_INTEL_TSX_MODE_ON is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_KERNEL_IBT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_KERNEL_IBT new file mode 100644 index 000000000000..a2a71465fadc --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_KERNEL_IBT @@ -0,0 +1 @@ +CONFIG_X86_KERNEL_IBT=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_MCELOG_LEGACY b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_MCELOG_LEGACY new file mode 100644 index 000000000000..2a4755640b4a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_MCELOG_LEGACY @@ -0,0 +1 @@ +CONFIG_X86_MCELOG_LEGACY=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_MCE_INJECT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_MCE_INJECT new file mode 100644 index 000000000000..1e3d328432bf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_MCE_INJECT @@ -0,0 +1 @@ +CONFIG_X86_MCE_INJECT=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_MPPARSE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_MPPARSE new file mode 100644 index 000000000000..1e4f55144dbb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_MPPARSE @@ -0,0 +1 @@ +CONFIG_X86_MPPARSE=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_MSR b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_MSR new file mode 100644 index 000000000000..18d5b64ddd08 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_MSR @@ -0,0 +1 @@ +CONFIG_X86_MSR=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_NUMACHIP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_NUMACHIP new file mode 100644 index 000000000000..13b9221418fa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_NUMACHIP @@ -0,0 +1 @@ +# CONFIG_X86_NUMACHIP is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_P4_CLOCKMOD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_P4_CLOCKMOD new file mode 100644 index 000000000000..714e722dd085 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_P4_CLOCKMOD @@ -0,0 +1 @@ +CONFIG_X86_P4_CLOCKMOD=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_PCC_CPUFREQ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_PCC_CPUFREQ new file mode 100644 index 000000000000..533a2352e2cd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_PCC_CPUFREQ @@ -0,0 +1 @@ +# CONFIG_X86_PCC_CPUFREQ is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_PKG_TEMP_THERMAL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_PKG_TEMP_THERMAL new file mode 100644 index 000000000000..8fb87c3a6028 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_PKG_TEMP_THERMAL @@ -0,0 +1 @@ +CONFIG_X86_PKG_TEMP_THERMAL=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_PM_TIMER b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_PM_TIMER new file mode 100644 index 000000000000..75c2da2a426e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_PM_TIMER @@ -0,0 +1 @@ +CONFIG_X86_PM_TIMER=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS new file mode 100644 index 000000000000..d7dc1147e9ab --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS @@ -0,0 +1 @@ +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_SGX_KVM b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_SGX_KVM new file mode 100644 index 000000000000..3737e6294a8c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_SGX_KVM @@ -0,0 +1 @@ +CONFIG_X86_SGX_KVM=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_SUPPORTS_MEMORY_FAILURE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_SUPPORTS_MEMORY_FAILURE new file mode 100644 index 000000000000..ec1cc6f38455 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_SUPPORTS_MEMORY_FAILURE @@ -0,0 +1 @@ +CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_UV b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_UV new file mode 100644 index 000000000000..48e2726e3fbe --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_UV @@ -0,0 +1 @@ +CONFIG_X86_UV=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_VERBOSE_BOOTUP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_VERBOSE_BOOTUP new file mode 100644 index 000000000000..e209c212bc46 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_VERBOSE_BOOTUP @@ -0,0 +1 @@ +# CONFIG_X86_VERBOSE_BOOTUP is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_VSMP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_VSMP new file mode 100644 index 000000000000..808ee39bfccb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_VSMP @@ -0,0 +1 @@ +# CONFIG_X86_VSMP is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_XEN b/anolis/configs/L1-RECOMMEND/x86/CONFIG_XEN new file mode 100644 index 000000000000..a4985b44e2d6 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_XEN @@ -0,0 +1 @@ +CONFIG_XEN=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_XEN_NETDEV_FRONTEND b/anolis/configs/L1-RECOMMEND/x86/CONFIG_XEN_NETDEV_FRONTEND new file mode 100644 index 000000000000..eb86ad0f82d8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_XEN_NETDEV_FRONTEND @@ -0,0 +1 @@ +CONFIG_XEN_NETDEV_FRONTEND=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_XEN_PVHVM b/anolis/configs/L1-RECOMMEND/x86/CONFIG_XEN_PVHVM new file mode 100644 index 000000000000..be722d2200ef --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_XEN_PVHVM @@ -0,0 +1 @@ +CONFIG_XEN_PVHVM=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_XFRM_USER_COMPAT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_XFRM_USER_COMPAT new file mode 100644 index 000000000000..798d10e8e93e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_XFRM_USER_COMPAT @@ -0,0 +1 @@ +# CONFIG_XFRM_USER_COMPAT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_A64FX_DIAG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_A64FX_DIAG new file mode 100644 index 000000000000..feaee255ff05 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_A64FX_DIAG @@ -0,0 +1 @@ +# CONFIG_A64FX_DIAG is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_APEI_SEA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_APEI_SEA new file mode 100644 index 000000000000..db573ffb867b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_APEI_SEA @@ -0,0 +1 @@ +CONFIG_ACPI_APEI_SEA=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_APMT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_APMT new file mode 100644 index 000000000000..844ccb4d36d2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_APMT @@ -0,0 +1 @@ +CONFIG_ACPI_APMT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_CCA_REQUIRED b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_CCA_REQUIRED new file mode 100644 index 000000000000..341bde47989e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_CCA_REQUIRED @@ -0,0 +1 @@ +CONFIG_ACPI_CCA_REQUIRED=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_CPPC_CPUFREQ_FIE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_CPPC_CPUFREQ_FIE new file mode 100644 index 000000000000..da007c591e66 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_CPPC_CPUFREQ_FIE @@ -0,0 +1 @@ +CONFIG_ACPI_CPPC_CPUFREQ_FIE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_GENERIC_GSI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_GENERIC_GSI new file mode 100644 index 000000000000..b65d19be0a03 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_GENERIC_GSI @@ -0,0 +1 @@ +CONFIG_ACPI_GENERIC_GSI=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_GTDT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_GTDT new file mode 100644 index 000000000000..c8fd21b4d5e0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_GTDT @@ -0,0 +1 @@ +CONFIG_ACPI_GTDT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_IORT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_IORT new file mode 100644 index 000000000000..447dcd823407 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_IORT @@ -0,0 +1 @@ +CONFIG_ACPI_IORT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_MCFG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_MCFG new file mode 100644 index 000000000000..26b4dba417bb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_MCFG @@ -0,0 +1 @@ +CONFIG_ACPI_MCFG=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_MPAM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_MPAM new file mode 100644 index 000000000000..e93cbd36cedc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_MPAM @@ -0,0 +1 @@ +CONFIG_ACPI_MPAM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_PPTT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_PPTT new file mode 100644 index 000000000000..bfd01f155d3c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ACPI_PPTT @@ -0,0 +1 @@ +CONFIG_ACPI_PPTT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AHCI_CEVA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AHCI_CEVA new file mode 100644 index 000000000000..d9279dda2974 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AHCI_CEVA @@ -0,0 +1 @@ +# CONFIG_AHCI_CEVA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ALTERA_STAPL b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ALTERA_STAPL new file mode 100644 index 000000000000..f454734fa68e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ALTERA_STAPL @@ -0,0 +1 @@ +# CONFIG_ALTERA_STAPL is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AL_FIC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AL_FIC new file mode 100644 index 000000000000..9e85b9bdae31 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AL_FIC @@ -0,0 +1 @@ +# CONFIG_AL_FIC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMBA_PL08X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMBA_PL08X new file mode 100644 index 000000000000..a13d91b90d19 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMBA_PL08X @@ -0,0 +1 @@ +# CONFIG_AMBA_PL08X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMD8111_ETH b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMD8111_ETH new file mode 100644 index 000000000000..a8d56c027174 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMD8111_ETH @@ -0,0 +1 @@ +# CONFIG_AMD8111_ETH is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMD_XGBE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMD_XGBE new file mode 100644 index 000000000000..27be1a7ee2c7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMD_XGBE @@ -0,0 +1 @@ +CONFIG_AMD_XGBE=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMD_XGBE_DCB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMD_XGBE_DCB new file mode 100644 index 000000000000..f76ed0831cd6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMD_XGBE_DCB @@ -0,0 +1 @@ +# CONFIG_AMD_XGBE_DCB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMIGA_PARTITION b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMIGA_PARTITION new file mode 100644 index 000000000000..300a97b3270f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMIGA_PARTITION @@ -0,0 +1 @@ +# CONFIG_AMIGA_PARTITION is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMPERE_ERRATUM_AC03_CPU_38 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMPERE_ERRATUM_AC03_CPU_38 new file mode 100644 index 000000000000..fa29e9c838d1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AMPERE_ERRATUM_AC03_CPU_38 @@ -0,0 +1 @@ +CONFIG_AMPERE_ERRATUM_AC03_CPU_38=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_APDS9802ALS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_APDS9802ALS new file mode 100644 index 000000000000..c40795bfa26f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_APDS9802ALS @@ -0,0 +1 @@ +# CONFIG_APDS9802ALS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AQTION b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AQTION new file mode 100644 index 000000000000..42dea55ccf3b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AQTION @@ -0,0 +1 @@ +# CONFIG_AQTION is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_ACTIONS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_ACTIONS new file mode 100644 index 000000000000..760663b50548 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_ACTIONS @@ -0,0 +1 @@ +# CONFIG_ARCH_ACTIONS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_ALPINE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_ALPINE new file mode 100644 index 000000000000..a347a04fc899 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_ALPINE @@ -0,0 +1 @@ +# CONFIG_ARCH_ALPINE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_APPLE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_APPLE new file mode 100644 index 000000000000..793ee5ebd4f5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_APPLE @@ -0,0 +1 @@ +# CONFIG_ARCH_APPLE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BCM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BCM new file mode 100644 index 000000000000..3d8a993e6fd7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BCM @@ -0,0 +1 @@ +# CONFIG_ARCH_BCM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BERLIN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BERLIN new file mode 100644 index 000000000000..9a647a4a160c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BERLIN @@ -0,0 +1 @@ +# CONFIG_ARCH_BERLIN is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS new file mode 100644 index 000000000000..df9c31a7d853 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS @@ -0,0 +1 @@ +CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BINFMT_ELF_STATE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BINFMT_ELF_STATE new file mode 100644 index 000000000000..5f20719ea1d6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BINFMT_ELF_STATE @@ -0,0 +1 @@ +CONFIG_ARCH_BINFMT_ELF_STATE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BITMAIN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BITMAIN new file mode 100644 index 000000000000..ac0bc6480373 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_BITMAIN @@ -0,0 +1 @@ +# CONFIG_ARCH_BITMAIN is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_SIG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_SIG new file mode 100644 index 000000000000..cf025a741a90 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_SIG @@ -0,0 +1 @@ +CONFIG_ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_SIG=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_EXYNOS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_EXYNOS new file mode 100644 index 000000000000..1ee894dd8194 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_EXYNOS @@ -0,0 +1 @@ +# CONFIG_ARCH_EXYNOS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_DMA_PREP_COHERENT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_DMA_PREP_COHERENT new file mode 100644 index 000000000000..2c6e5013bfe2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_DMA_PREP_COHERENT @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_DMA_PREP_COHERENT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_KEEPINITRD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_KEEPINITRD new file mode 100644 index 000000000000..84f3bdc50740 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_KEEPINITRD @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_KEEPINITRD=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_RELR b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_RELR new file mode 100644 index 000000000000..701f9cc08991 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_RELR @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_RELR=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_SETUP_DMA_OPS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_SETUP_DMA_OPS new file mode 100644 index 000000000000..e371dd95954d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_SETUP_DMA_OPS @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_SETUP_DMA_OPS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_SUBPAGE_FAULTS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_SUBPAGE_FAULTS new file mode 100644 index 000000000000..907b4c6e5f4e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_SUBPAGE_FAULTS @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_SUBPAGE_FAULTS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU new file mode 100644 index 000000000000..d3f3f32bdbca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE new file mode 100644 index 000000000000..131ace1f9533 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS new file mode 100644 index 000000000000..31ff9e21830a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_TICK_BROADCAST b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_TICK_BROADCAST new file mode 100644 index 000000000000..869e3ebc6380 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_TICK_BROADCAST @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_TICK_BROADCAST=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAVE_ELF_PROT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAVE_ELF_PROT new file mode 100644 index 000000000000..54b8c8c2f457 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAVE_ELF_PROT @@ -0,0 +1 @@ +CONFIG_ARCH_HAVE_ELF_PROT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAVE_TRACE_MMIO_ACCESS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAVE_TRACE_MMIO_ACCESS new file mode 100644 index 000000000000..a321411bd03e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAVE_TRACE_MMIO_ACCESS @@ -0,0 +1 @@ +CONFIG_ARCH_HAVE_TRACE_MMIO_ACCESS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HISI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HISI new file mode 100644 index 000000000000..9afa5dcee233 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HISI @@ -0,0 +1 @@ +CONFIG_ARCH_HISI=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_INTEL_SOCFPGA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_INTEL_SOCFPGA new file mode 100644 index 000000000000..2a600d0dc26b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_INTEL_SOCFPGA @@ -0,0 +1 @@ +# CONFIG_ARCH_INTEL_SOCFPGA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_K3 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_K3 new file mode 100644 index 000000000000..6929420f8d0f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_K3 @@ -0,0 +1 @@ +# CONFIG_ARCH_K3 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_KEEMBAY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_KEEMBAY new file mode 100644 index 000000000000..08875182cdd8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_KEEMBAY @@ -0,0 +1 @@ +# CONFIG_ARCH_KEEMBAY is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_KEEP_MEMBLOCK b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_KEEP_MEMBLOCK new file mode 100644 index 000000000000..20aa4070bcf1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_KEEP_MEMBLOCK @@ -0,0 +1 @@ +CONFIG_ARCH_KEEP_MEMBLOCK=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_LG1K b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_LG1K new file mode 100644 index 000000000000..0d73af4e2246 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_LG1K @@ -0,0 +1 @@ +# CONFIG_ARCH_LG1K is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MA35 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MA35 new file mode 100644 index 000000000000..2f00524de4ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MA35 @@ -0,0 +1 @@ +# CONFIG_ARCH_MA35 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MEDIATEK b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MEDIATEK new file mode 100644 index 000000000000..62b0a1f19a0f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MEDIATEK @@ -0,0 +1 @@ +# CONFIG_ARCH_MEDIATEK is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MESON b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MESON new file mode 100644 index 000000000000..849319f60afb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MESON @@ -0,0 +1 @@ +# CONFIG_ARCH_MESON is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MMAP_RND_BITS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MMAP_RND_BITS new file mode 100644 index 000000000000..5d8b9fd79619 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MMAP_RND_BITS @@ -0,0 +1 @@ +CONFIG_ARCH_MMAP_RND_BITS=18 diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MMAP_RND_BITS_MAX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MMAP_RND_BITS_MAX new file mode 100644 index 000000000000..9b8aa49b0b7c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MMAP_RND_BITS_MAX @@ -0,0 +1 @@ +CONFIG_ARCH_MMAP_RND_BITS_MAX=33 diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MMAP_RND_BITS_MIN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MMAP_RND_BITS_MIN new file mode 100644 index 000000000000..d409ee8284f0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MMAP_RND_BITS_MIN @@ -0,0 +1 @@ +CONFIG_ARCH_MMAP_RND_BITS_MIN=18 diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN new file mode 100644 index 000000000000..d4c9b09089a8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN @@ -0,0 +1 @@ +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11 diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MVEBU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MVEBU new file mode 100644 index 000000000000..d7a6da1f2e2b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_MVEBU @@ -0,0 +1 @@ +# CONFIG_ARCH_MVEBU is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_NPCM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_NPCM new file mode 100644 index 000000000000..4117554db7f5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_NPCM @@ -0,0 +1 @@ +# CONFIG_ARCH_NPCM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_NXP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_NXP new file mode 100644 index 000000000000..4e89cddb9cd2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_NXP @@ -0,0 +1 @@ +# CONFIG_ARCH_NXP is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_QCOM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_QCOM new file mode 100644 index 000000000000..e51a38aa67b8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_QCOM @@ -0,0 +1 @@ +CONFIG_ARCH_QCOM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_REALTEK b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_REALTEK new file mode 100644 index 000000000000..49536f6d5780 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_REALTEK @@ -0,0 +1 @@ +# CONFIG_ARCH_REALTEK is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_RENESAS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_RENESAS new file mode 100644 index 000000000000..0fa3a2ec3d4c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_RENESAS @@ -0,0 +1 @@ +# CONFIG_ARCH_RENESAS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_ROCKCHIP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_ROCKCHIP new file mode 100644 index 000000000000..f0df52228bd8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_ROCKCHIP @@ -0,0 +1 @@ +# CONFIG_ARCH_ROCKCHIP is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SEATTLE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SEATTLE new file mode 100644 index 000000000000..83c1e8bc4b6d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SEATTLE @@ -0,0 +1 @@ +CONFIG_ARCH_SEATTLE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SPARX5 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SPARX5 new file mode 100644 index 000000000000..cb416e9915a0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SPARX5 @@ -0,0 +1 @@ +# CONFIG_ARCH_SPARX5 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SPRD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SPRD new file mode 100644 index 000000000000..a090576dec9a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SPRD @@ -0,0 +1 @@ +# CONFIG_ARCH_SPRD is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_STM32 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_STM32 new file mode 100644 index 000000000000..cb609deb9c10 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_STM32 @@ -0,0 +1 @@ +# CONFIG_ARCH_STM32 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SUNXI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SUNXI new file mode 100644 index 000000000000..e802c1d4f827 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SUNXI @@ -0,0 +1 @@ +# CONFIG_ARCH_SUNXI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SUPPORTS_HUGETLBFS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SUPPORTS_HUGETLBFS new file mode 100644 index 000000000000..d17b5b55577f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SUPPORTS_HUGETLBFS @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_HUGETLBFS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SUPPORTS_KEXEC_IMAGE_VERIFY_SIG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SUPPORTS_KEXEC_IMAGE_VERIFY_SIG new file mode 100644 index 000000000000..b202bfb6ca3c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SUPPORTS_KEXEC_IMAGE_VERIFY_SIG @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_KEXEC_IMAGE_VERIFY_SIG=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SUPPORTS_SHADOW_CALL_STACK b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SUPPORTS_SHADOW_CALL_STACK new file mode 100644 index 000000000000..289fd1ea24df --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SUPPORTS_SHADOW_CALL_STACK @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_SHADOW_CALL_STACK=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SYNQUACER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SYNQUACER new file mode 100644 index 000000000000..a73cda5a18e8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_SYNQUACER @@ -0,0 +1 @@ +# CONFIG_ARCH_SYNQUACER is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA new file mode 100644 index 000000000000..85b1b424972d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA @@ -0,0 +1 @@ +# CONFIG_ARCH_TEGRA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_THUNDER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_THUNDER new file mode 100644 index 000000000000..fc1527e5ece9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_THUNDER @@ -0,0 +1 @@ +CONFIG_ARCH_THUNDER=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_THUNDER2 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_THUNDER2 new file mode 100644 index 000000000000..aa0f1f3199dc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_THUNDER2 @@ -0,0 +1 @@ +CONFIG_ARCH_THUNDER2=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_UNIPHIER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_UNIPHIER new file mode 100644 index 000000000000..2c110333793a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_UNIPHIER @@ -0,0 +1 @@ +# CONFIG_ARCH_UNIPHIER is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_USES_PG_ARCH_X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_USES_PG_ARCH_X new file mode 100644 index 000000000000..b762db4030ba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_USES_PG_ARCH_X @@ -0,0 +1 @@ +CONFIG_ARCH_USES_PG_ARCH_X=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_USE_GNU_PROPERTY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_USE_GNU_PROPERTY new file mode 100644 index 000000000000..f773a78edbb5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_USE_GNU_PROPERTY @@ -0,0 +1 @@ +CONFIG_ARCH_USE_GNU_PROPERTY=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_VEXPRESS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_VEXPRESS new file mode 100644 index 000000000000..aa238e6be0a1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_VEXPRESS @@ -0,0 +1 @@ +CONFIG_ARCH_VEXPRESS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_VISCONTI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_VISCONTI new file mode 100644 index 000000000000..099b60922ebf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_VISCONTI @@ -0,0 +1 @@ +# CONFIG_ARCH_VISCONTI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT new file mode 100644 index 000000000000..b4d0a5cbcd79 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT @@ -0,0 +1 @@ +CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_WANT_FRAME_POINTERS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_WANT_FRAME_POINTERS new file mode 100644 index 000000000000..614f6979533c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_WANT_FRAME_POINTERS @@ -0,0 +1 @@ +CONFIG_ARCH_WANT_FRAME_POINTERS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_XGENE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_XGENE new file mode 100644 index 000000000000..c1bd4d11639c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_XGENE @@ -0,0 +1 @@ +CONFIG_ARCH_XGENE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_ZYNQMP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_ZYNQMP new file mode 100644 index 000000000000..f92a386c63a3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_ZYNQMP @@ -0,0 +1 @@ +# CONFIG_ARCH_ZYNQMP is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_AS_HAS_MTE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_AS_HAS_MTE new file mode 100644 index 000000000000..5bb9a4675eb4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_AS_HAS_MTE @@ -0,0 +1 @@ +CONFIG_ARM64_AS_HAS_MTE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_CONT_PMD_SHIFT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_CONT_PMD_SHIFT new file mode 100644 index 000000000000..5216b165ec13 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_CONT_PMD_SHIFT @@ -0,0 +1 @@ +CONFIG_ARM64_CONT_PMD_SHIFT=4 diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_CONT_PTE_SHIFT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_CONT_PTE_SHIFT new file mode 100644 index 000000000000..d5e455df2a2e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_CONT_PTE_SHIFT @@ -0,0 +1 @@ +CONFIG_ARM64_CONT_PTE_SHIFT=4 diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_ERRATUM_858921 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_ERRATUM_858921 new file mode 100644 index 000000000000..055a6880cdc4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_ERRATUM_858921 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_858921=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419 new file mode 100644 index 000000000000..28e186e92a27 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419 @@ -0,0 +1 @@ +CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_LSE_ATOMICS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_LSE_ATOMICS new file mode 100644 index 000000000000..a54d60033875 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_LSE_ATOMICS @@ -0,0 +1 @@ +CONFIG_ARM64_LSE_ATOMICS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_PAGE_SHIFT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_PAGE_SHIFT new file mode 100644 index 000000000000..71ee2d57a079 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_PAGE_SHIFT @@ -0,0 +1 @@ +CONFIG_ARM64_PAGE_SHIFT=12 diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_PA_BITS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_PA_BITS new file mode 100644 index 000000000000..8d200cc60779 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_PA_BITS @@ -0,0 +1 @@ +CONFIG_ARM64_PA_BITS=48 diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_PA_BITS_48 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_PA_BITS_48 new file mode 100644 index 000000000000..742d9411da74 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_PA_BITS_48 @@ -0,0 +1 @@ +CONFIG_ARM64_PA_BITS_48=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_RELOC_TEST b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_RELOC_TEST new file mode 100644 index 000000000000..864fc6a6bb73 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_RELOC_TEST @@ -0,0 +1 @@ +# CONFIG_ARM64_RELOC_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_VA_BITS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_VA_BITS new file mode 100644 index 000000000000..3a2764df2ed1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_VA_BITS @@ -0,0 +1 @@ +CONFIG_ARM64_VA_BITS=48 diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_VA_BITS_48 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_VA_BITS_48 new file mode 100644 index 000000000000..1bafe6e581ed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_VA_BITS_48 @@ -0,0 +1 @@ +CONFIG_ARM64_VA_BITS_48=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_CLEAN_CACHE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_CLEAN_CACHE new file mode 100644 index 000000000000..66d54a8595c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_CLEAN_CACHE @@ -0,0 +1 @@ +CONFIG_ARM64_WORKAROUND_CLEAN_CACHE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_REPEAT_TLBI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_REPEAT_TLBI new file mode 100644 index 000000000000..404622b8cd01 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_REPEAT_TLBI @@ -0,0 +1 @@ +CONFIG_ARM64_WORKAROUND_REPEAT_TLBI=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT new file mode 100644 index 000000000000..e30fdea51796 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT @@ -0,0 +1 @@ +CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD new file mode 100644 index 000000000000..df3d7de5eef4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD @@ -0,0 +1 @@ +CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE new file mode 100644 index 000000000000..5900d7737840 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE @@ -0,0 +1 @@ +CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_AMBA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_AMBA new file mode 100644 index 000000000000..ed20b0276309 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_AMBA @@ -0,0 +1 @@ +CONFIG_ARM_AMBA=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_ARCH_TIMER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_ARCH_TIMER new file mode 100644 index 000000000000..f30148a82030 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_ARCH_TIMER @@ -0,0 +1 @@ +CONFIG_ARM_ARCH_TIMER=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_ARCH_TIMER_EVTSTREAM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_ARCH_TIMER_EVTSTREAM new file mode 100644 index 000000000000..1073ce86bfc6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_ARCH_TIMER_EVTSTREAM @@ -0,0 +1 @@ +CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND new file mode 100644 index 000000000000..a76e926050fc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND @@ -0,0 +1 @@ +CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_CCI_PMU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_CCI_PMU new file mode 100644 index 000000000000..1fea9928288b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_CCI_PMU @@ -0,0 +1 @@ +# CONFIG_ARM_CCI_PMU is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_FFA_TRANSPORT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_FFA_TRANSPORT new file mode 100644 index 000000000000..8914e84510f2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_FFA_TRANSPORT @@ -0,0 +1 @@ +# CONFIG_ARM_FFA_TRANSPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_GIC_MAX_NR b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_GIC_MAX_NR new file mode 100644 index 000000000000..ed911917bf16 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_GIC_MAX_NR @@ -0,0 +1 @@ +CONFIG_ARM_GIC_MAX_NR=1 diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_MHU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_MHU new file mode 100644 index 000000000000..fd6e8cc15f2c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_MHU @@ -0,0 +1 @@ +CONFIG_ARM_MHU=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_PSCI_CHECKER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_PSCI_CHECKER new file mode 100644 index 000000000000..8d66ce36a339 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_PSCI_CHECKER @@ -0,0 +1 @@ +# CONFIG_ARM_PSCI_CHECKER is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_PSCI_CPUIDLE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_PSCI_CPUIDLE new file mode 100644 index 000000000000..5d304af486e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_PSCI_CPUIDLE @@ -0,0 +1 @@ +# CONFIG_ARM_PSCI_CPUIDLE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_PSCI_FW b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_PSCI_FW new file mode 100644 index 000000000000..8f3a935754bc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_PSCI_FW @@ -0,0 +1 @@ +CONFIG_ARM_PSCI_FW=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_QCOM_CPUFREQ_HW b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_QCOM_CPUFREQ_HW new file mode 100644 index 000000000000..cc99ba49a528 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_QCOM_CPUFREQ_HW @@ -0,0 +1 @@ +# CONFIG_ARM_QCOM_CPUFREQ_HW is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SBSA_WATCHDOG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SBSA_WATCHDOG new file mode 100644 index 000000000000..01aa8f1525d9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SBSA_WATCHDOG @@ -0,0 +1 @@ +CONFIG_ARM_SBSA_WATCHDOG=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SCMI_PROTOCOL b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SCMI_PROTOCOL new file mode 100644 index 000000000000..8e99d7695567 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SCMI_PROTOCOL @@ -0,0 +1 @@ +# CONFIG_ARM_SCMI_PROTOCOL is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMCCC_SOC_ID b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMCCC_SOC_ID new file mode 100644 index 000000000000..c53e28f0b599 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMCCC_SOC_ID @@ -0,0 +1 @@ +CONFIG_ARM_SMCCC_SOC_ID=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS new file mode 100644 index 000000000000..a8cf80f896c3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS @@ -0,0 +1 @@ +# CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMMU_QCOM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMMU_QCOM new file mode 100644 index 000000000000..dbd9c71b9033 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMMU_QCOM @@ -0,0 +1 @@ +CONFIG_ARM_SMMU_QCOM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMMU_QCOM_DEBUG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMMU_QCOM_DEBUG new file mode 100644 index 000000000000..898fa4177cd7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMMU_QCOM_DEBUG @@ -0,0 +1 @@ +# CONFIG_ARM_SMMU_QCOM_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMMU_V3_SVA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMMU_V3_SVA new file mode 100644 index 000000000000..73b0c35772c4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_SMMU_V3_SVA @@ -0,0 +1 @@ +CONFIG_ARM_SMMU_V3_SVA=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_TIMER_SP804 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_TIMER_SP804 new file mode 100644 index 000000000000..94c9ede0a244 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_TIMER_SP804 @@ -0,0 +1 @@ +CONFIG_ARM_TIMER_SP804=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_ARMV8_2 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_ARMV8_2 new file mode 100644 index 000000000000..abbc5ca641a0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_ARMV8_2 @@ -0,0 +1 @@ +CONFIG_AS_HAS_ARMV8_2=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_ARMV8_3 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_ARMV8_3 new file mode 100644 index 000000000000..33b9b5ba161f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_ARMV8_3 @@ -0,0 +1 @@ +CONFIG_AS_HAS_ARMV8_3=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_ARMV8_4 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_ARMV8_4 new file mode 100644 index 000000000000..7f650f221e5a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_ARMV8_4 @@ -0,0 +1 @@ +CONFIG_AS_HAS_ARMV8_4=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_ARMV8_5 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_ARMV8_5 new file mode 100644 index 000000000000..70144b1c117e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_ARMV8_5 @@ -0,0 +1 @@ +CONFIG_AS_HAS_ARMV8_5=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_CFI_NEGATE_RA_STATE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_CFI_NEGATE_RA_STATE new file mode 100644 index 000000000000..12b3f6ebe3de --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_CFI_NEGATE_RA_STATE @@ -0,0 +1 @@ +CONFIG_AS_HAS_CFI_NEGATE_RA_STATE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_LDAPR b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_LDAPR new file mode 100644 index 000000000000..b5a93d0b7c3e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_LDAPR @@ -0,0 +1 @@ +CONFIG_AS_HAS_LDAPR=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_LSE_ATOMICS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_LSE_ATOMICS new file mode 100644 index 000000000000..dbd7bfb45770 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_LSE_ATOMICS @@ -0,0 +1 @@ +CONFIG_AS_HAS_LSE_ATOMICS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_SHA3 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_SHA3 new file mode 100644 index 000000000000..4c8d5b3d9ae2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AS_HAS_SHA3 @@ -0,0 +1 @@ +CONFIG_AS_HAS_SHA3=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AT803X_PHY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AT803X_PHY new file mode 100644 index 000000000000..93e86302bb00 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AT803X_PHY @@ -0,0 +1 @@ +CONFIG_AT803X_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ATL2 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ATL2 new file mode 100644 index 000000000000..f5b426720e08 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ATL2 @@ -0,0 +1 @@ +# CONFIG_ATL2 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AUDIT_ARCH_COMPAT_GENERIC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AUDIT_ARCH_COMPAT_GENERIC new file mode 100644 index 000000000000..ec86bf5ac29a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AUDIT_ARCH_COMPAT_GENERIC @@ -0,0 +1 @@ +CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AUDIT_COMPAT_GENERIC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AUDIT_COMPAT_GENERIC new file mode 100644 index 000000000000..60219cf94e57 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AUDIT_COMPAT_GENERIC @@ -0,0 +1 @@ +CONFIG_AUDIT_COMPAT_GENERIC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AUDIT_GENERIC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AUDIT_GENERIC new file mode 100644 index 000000000000..b20e404a08aa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_AUDIT_GENERIC @@ -0,0 +1 @@ +CONFIG_AUDIT_GENERIC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BACKLIGHT_GPIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BACKLIGHT_GPIO new file mode 100644 index 000000000000..e4776be5a756 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BACKLIGHT_GPIO @@ -0,0 +1 @@ +CONFIG_BACKLIGHT_GPIO=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BACKLIGHT_LED b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BACKLIGHT_LED new file mode 100644 index 000000000000..70fef3d5d549 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BACKLIGHT_LED @@ -0,0 +1 @@ +# CONFIG_BACKLIGHT_LED is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BACKLIGHT_PWM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BACKLIGHT_PWM new file mode 100644 index 000000000000..44737e2148ff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BACKLIGHT_PWM @@ -0,0 +1 @@ +CONFIG_BACKLIGHT_PWM=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BCM_SBA_RAID b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BCM_SBA_RAID new file mode 100644 index 000000000000..ea5dba56e282 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BCM_SBA_RAID @@ -0,0 +1 @@ +# CONFIG_BCM_SBA_RAID is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BLK_CGROUP_IOLATENCY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BLK_CGROUP_IOLATENCY new file mode 100644 index 000000000000..59e1ad44df4e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BLK_CGROUP_IOLATENCY @@ -0,0 +1 @@ +# CONFIG_BLK_CGROUP_IOLATENCY is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BLK_DEV_PMEM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BLK_DEV_PMEM new file mode 100644 index 000000000000..04cb6f83afed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BLK_DEV_PMEM @@ -0,0 +1 @@ +CONFIG_BLK_DEV_PMEM=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BRCMSTB_GISB_ARB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BRCMSTB_GISB_ARB new file mode 100644 index 000000000000..36e31edd24fc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BRCMSTB_GISB_ARB @@ -0,0 +1 @@ +# CONFIG_BRCMSTB_GISB_ARB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BT new file mode 100644 index 000000000000..ce6ddb435863 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BT @@ -0,0 +1 @@ +# CONFIG_BT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC new file mode 100644 index 000000000000..2a6b95e058c1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC @@ -0,0 +1 @@ +CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CAVIUM_CPT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CAVIUM_CPT new file mode 100644 index 000000000000..18380568726d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CAVIUM_CPT @@ -0,0 +1 @@ +CONFIG_CAVIUM_CPT=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAS_BRANCH_PROT_PAC_RET b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAS_BRANCH_PROT_PAC_RET new file mode 100644 index 000000000000..df222f914e4e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAS_BRANCH_PROT_PAC_RET @@ -0,0 +1 @@ +CONFIG_CC_HAS_BRANCH_PROT_PAC_RET=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI new file mode 100644 index 000000000000..4270ef8ea782 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI @@ -0,0 +1 @@ +CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAS_SIGN_RETURN_ADDRESS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAS_SIGN_RETURN_ADDRESS new file mode 100644 index 000000000000..4a4787f56440 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAS_SIGN_RETURN_ADDRESS @@ -0,0 +1 @@ +CONFIG_CC_HAS_SIGN_RETURN_ADDRESS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAVE_SHADOW_CALL_STACK b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAVE_SHADOW_CALL_STACK new file mode 100644 index 000000000000..a8b721605f61 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAVE_SHADOW_CALL_STACK @@ -0,0 +1 @@ +CONFIG_CC_HAVE_SHADOW_CALL_STACK=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAVE_STACKPROTECTOR_SYSREG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAVE_STACKPROTECTOR_SYSREG new file mode 100644 index 000000000000..39c7dc935e43 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CC_HAVE_STACKPROTECTOR_SYSREG @@ -0,0 +1 @@ +CONFIG_CC_HAVE_STACKPROTECTOR_SYSREG=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CDX_BUS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CDX_BUS new file mode 100644 index 000000000000..788991243832 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CDX_BUS @@ -0,0 +1 @@ +# CONFIG_CDX_BUS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_BQ24190 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_BQ24190 new file mode 100644 index 000000000000..676283f06119 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_BQ24190 @@ -0,0 +1 @@ +# CONFIG_CHARGER_BQ24190 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_DETECTOR_MAX14656 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_DETECTOR_MAX14656 new file mode 100644 index 000000000000..434d9466ffc3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_DETECTOR_MAX14656 @@ -0,0 +1 @@ +# CONFIG_CHARGER_DETECTOR_MAX14656 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_MANAGER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_MANAGER new file mode 100644 index 000000000000..51fe252eb85e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_MANAGER @@ -0,0 +1 @@ +# CONFIG_CHARGER_MANAGER is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_RT9467 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_RT9467 new file mode 100644 index 000000000000..46b07d722b21 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_RT9467 @@ -0,0 +1 @@ +# CONFIG_CHARGER_RT9467 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_RT9471 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_RT9471 new file mode 100644 index 000000000000..cd3acdd16199 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_RT9471 @@ -0,0 +1 @@ +# CONFIG_CHARGER_RT9471 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_SMB347 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_SMB347 new file mode 100644 index 000000000000..b388a2f6e9cd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_SMB347 @@ -0,0 +1 @@ +CONFIG_CHARGER_SMB347=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_UCS1002 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_UCS1002 new file mode 100644 index 000000000000..22ed3b9c32ad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHARGER_UCS1002 @@ -0,0 +1 @@ +# CONFIG_CHARGER_UCS1002 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHROMEOS_ACPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHROMEOS_ACPI new file mode 100644 index 000000000000..b8b2ba79c7e4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHROMEOS_ACPI @@ -0,0 +1 @@ +# CONFIG_CHROMEOS_ACPI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHROMEOS_PRIVACY_SCREEN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHROMEOS_PRIVACY_SCREEN new file mode 100644 index 000000000000..2e8ede780120 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHROMEOS_PRIVACY_SCREEN @@ -0,0 +1 @@ +# CONFIG_CHROMEOS_PRIVACY_SCREEN is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHROMEOS_TBMC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHROMEOS_TBMC new file mode 100644 index 000000000000..9252aafa4d95 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHROMEOS_TBMC @@ -0,0 +1 @@ +# CONFIG_CHROMEOS_TBMC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHROME_PLATFORMS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHROME_PLATFORMS new file mode 100644 index 000000000000..c9336a3aaa0a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CHROME_PLATFORMS @@ -0,0 +1 @@ +CONFIG_CHROME_PLATFORMS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLKSRC_MMIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLKSRC_MMIO new file mode 100644 index 000000000000..9f20297c5420 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLKSRC_MMIO @@ -0,0 +1 @@ +CONFIG_CLKSRC_MMIO=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLK_ICST b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLK_ICST new file mode 100644 index 000000000000..0816de981815 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLK_ICST @@ -0,0 +1 @@ +# CONFIG_CLK_ICST is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLK_SP810 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLK_SP810 new file mode 100644 index 000000000000..ff341b3ab47c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLK_SP810 @@ -0,0 +1 @@ +CONFIG_CLK_SP810=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLK_VEXPRESS_OSC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLK_VEXPRESS_OSC new file mode 100644 index 000000000000..867fff294d10 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLK_VEXPRESS_OSC @@ -0,0 +1 @@ +CONFIG_CLK_VEXPRESS_OSC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLONE_BACKWARDS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLONE_BACKWARDS new file mode 100644 index 000000000000..ef87d0e3bb74 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLONE_BACKWARDS @@ -0,0 +1 @@ +CONFIG_CLONE_BACKWARDS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_AXI_CLKGEN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_AXI_CLKGEN new file mode 100644 index 000000000000..3db5c0c087ab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_AXI_CLKGEN @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_AXI_CLKGEN is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_CDCE925 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_CDCE925 new file mode 100644 index 000000000000..ea85e587618f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_CDCE925 @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_CDCE925 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_FIXED_MMIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_FIXED_MMIO new file mode 100644 index 000000000000..85e803318d76 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_FIXED_MMIO @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_FIXED_MMIO is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3516CV300 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3516CV300 new file mode 100644 index 000000000000..efc11711dfa4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3516CV300 @@ -0,0 +1 @@ +CONFIG_COMMON_CLK_HI3516CV300=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3519 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3519 new file mode 100644 index 000000000000..7e847bb7ad2c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3519 @@ -0,0 +1 @@ +CONFIG_COMMON_CLK_HI3519=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3559A b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3559A new file mode 100644 index 000000000000..d2a7dff8958a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3559A @@ -0,0 +1 @@ +CONFIG_COMMON_CLK_HI3559A=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3660 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3660 new file mode 100644 index 000000000000..44693544d35b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3660 @@ -0,0 +1 @@ +CONFIG_COMMON_CLK_HI3660=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3670 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3670 new file mode 100644 index 000000000000..a2d57c82d98b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3670 @@ -0,0 +1 @@ +CONFIG_COMMON_CLK_HI3670=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3798CV200 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3798CV200 new file mode 100644 index 000000000000..b5e428f1536c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI3798CV200 @@ -0,0 +1 @@ +CONFIG_COMMON_CLK_HI3798CV200=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI6220 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI6220 new file mode 100644 index 000000000000..7704eee260ab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_HI6220 @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_HI6220 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_QCOM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_QCOM new file mode 100644 index 000000000000..e2adf60aa609 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_QCOM @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_QCOM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_RS9_PCIE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_RS9_PCIE new file mode 100644 index 000000000000..aaf8755ddc81 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_RS9_PCIE @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_RS9_PCIE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_SCPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_SCPI new file mode 100644 index 000000000000..b6694a09cbf0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_SCPI @@ -0,0 +1 @@ +CONFIG_COMMON_CLK_SCPI=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_SI514 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_SI514 new file mode 100644 index 000000000000..04b8fe1dcfed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_SI514 @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_SI514 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_SI521XX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_SI521XX new file mode 100644 index 000000000000..0d95f4d48300 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_SI521XX @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_SI521XX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_SI570 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_SI570 new file mode 100644 index 000000000000..aa746413aeeb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_SI570 @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_SI570 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_VC3 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_VC3 new file mode 100644 index 000000000000..fbf1e1b65afe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_VC3 @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_VC3 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_VC5 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_VC5 new file mode 100644 index 000000000000..9aaf6ae19368 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_VC5 @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_VC5 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_VC7 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_VC7 new file mode 100644 index 000000000000..7d244219685d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_VC7 @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_VC7 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_XGENE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_XGENE new file mode 100644 index 000000000000..44d0a9d9bdf8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_XGENE @@ -0,0 +1 @@ +CONFIG_COMMON_CLK_XGENE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_XLNX_CLKWZRD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_XLNX_CLKWZRD new file mode 100644 index 000000000000..de088b678b1c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_CLK_XLNX_CLKWZRD @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_RESET_HI3660 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_RESET_HI3660 new file mode 100644 index 000000000000..0d13136e0265 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_RESET_HI3660 @@ -0,0 +1 @@ +# CONFIG_COMMON_RESET_HI3660 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_RESET_HI6220 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_RESET_HI6220 new file mode 100644 index 000000000000..a603d7319ea2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMMON_RESET_HI6220 @@ -0,0 +1 @@ +CONFIG_COMMON_RESET_HI6220=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMPAT_ALIGNMENT_FIXUPS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMPAT_ALIGNMENT_FIXUPS new file mode 100644 index 000000000000..8107deaf0449 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_COMPAT_ALIGNMENT_FIXUPS @@ -0,0 +1 @@ +# CONFIG_COMPAT_ALIGNMENT_FIXUPS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CORESIGHT_DUMMY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CORESIGHT_DUMMY new file mode 100644 index 000000000000..349cd1676424 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CORESIGHT_DUMMY @@ -0,0 +1 @@ +# CONFIG_CORESIGHT_DUMMY is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CORESIGHT_TPDA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CORESIGHT_TPDA new file mode 100644 index 000000000000..4d36d06d8e95 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CORESIGHT_TPDA @@ -0,0 +1 @@ +# CONFIG_CORESIGHT_TPDA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CORESIGHT_TPDM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CORESIGHT_TPDM new file mode 100644 index 000000000000..92de2396b7a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CORESIGHT_TPDM @@ -0,0 +1 @@ +# CONFIG_CORESIGHT_TPDM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CORESIGHT_TRBE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CORESIGHT_TRBE new file mode 100644 index 000000000000..4f6158a7e826 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CORESIGHT_TRBE @@ -0,0 +1 @@ +# CONFIG_CORESIGHT_TRBE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CPUFREQ_DT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CPUFREQ_DT new file mode 100644 index 000000000000..5a49a05308e8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CPUFREQ_DT @@ -0,0 +1 @@ +# CONFIG_CPUFREQ_DT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CPUFREQ_DT_PLATDEV b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CPUFREQ_DT_PLATDEV new file mode 100644 index 000000000000..62fd7906ad40 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CPUFREQ_DT_PLATDEV @@ -0,0 +1 @@ +# CONFIG_CPUFREQ_DT_PLATDEV is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CPU_PM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CPU_PM new file mode 100644 index 000000000000..aa877af41c75 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CPU_PM @@ -0,0 +1 @@ +CONFIG_CPU_PM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRC64 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRC64 new file mode 100644 index 000000000000..5bc3a058626a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRC64 @@ -0,0 +1 @@ +CONFIG_CRC64=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRC64_ROCKSOFT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRC64_ROCKSOFT new file mode 100644 index 000000000000..223c14ad37fe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRC64_ROCKSOFT @@ -0,0 +1 @@ +CONFIG_CRC64_ROCKSOFT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CROS_EC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CROS_EC new file mode 100644 index 000000000000..bd6dd449ba8e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CROS_EC @@ -0,0 +1 @@ +# CONFIG_CROS_EC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CROS_HPS_I2C b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CROS_HPS_I2C new file mode 100644 index 000000000000..19dd510cacb7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CROS_HPS_I2C @@ -0,0 +1 @@ +# CONFIG_CROS_HPS_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CROS_KBD_LED_BACKLIGHT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CROS_KBD_LED_BACKLIGHT new file mode 100644 index 000000000000..95f043d6889e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CROS_KBD_LED_BACKLIGHT @@ -0,0 +1 @@ +# CONFIG_CROS_KBD_LED_BACKLIGHT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_CRC64_ROCKSOFT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_CRC64_ROCKSOFT new file mode 100644 index 000000000000..1f6b24e092c6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_CRC64_ROCKSOFT @@ -0,0 +1 @@ +CONFIG_CRYPTO_CRC64_ROCKSOFT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_CAVIUM_ZIP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_CAVIUM_ZIP new file mode 100644 index 000000000000..d5226e157049 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_CAVIUM_ZIP @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_CAVIUM_ZIP=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_CCREE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_CCREE new file mode 100644 index 000000000000..fe4fcee59846 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_CCREE @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_CCREE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_CPT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_CPT new file mode 100644 index 000000000000..364ba089fbf5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_CPT @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_CPT=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_HPRE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_HPRE new file mode 100644 index 000000000000..5c97a6853454 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_HPRE @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_HISI_HPRE=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_QM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_QM new file mode 100644 index 000000000000..6c75485a207a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_QM @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_HISI_QM=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_SEC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_SEC new file mode 100644 index 000000000000..517b45df771e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_SEC @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_HISI_SEC=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_SEC2 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_SEC2 new file mode 100644 index 000000000000..59b3e238e201 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_SEC2 @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_HISI_SEC2=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_TRNG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_TRNG new file mode 100644 index 000000000000..f203ee55d4b6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_TRNG @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_HISI_TRNG=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_ZIP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_ZIP new file mode 100644 index 000000000000..53d7f44de0a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_HISI_ZIP @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_HISI_ZIP=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_OCTEONTX_CPT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_OCTEONTX_CPT new file mode 100644 index 000000000000..19e3dc42c46c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_OCTEONTX_CPT @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_OCTEONTX_CPT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_QAT_4XXX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_QAT_4XXX new file mode 100644 index 000000000000..54fd0faec1c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_QAT_4XXX @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_QAT_4XXX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_QCE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_QCE new file mode 100644 index 000000000000..206220e544d4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_QCE @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_QCE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_QCOM_RNG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_QCOM_RNG new file mode 100644 index 000000000000..cde300803e4c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_DEV_QCOM_RNG @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_QCOM_RNG is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_LIB_POLY1305_RSIZE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_LIB_POLY1305_RSIZE new file mode 100644 index 000000000000..7eab5761438f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_LIB_POLY1305_RSIZE @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=9 diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_NHPOLY1305_NEON b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_NHPOLY1305_NEON new file mode 100644 index 000000000000..4dc5989cc3ac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_NHPOLY1305_NEON @@ -0,0 +1 @@ +# CONFIG_CRYPTO_NHPOLY1305_NEON is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_POLYVAL_ARM64_CE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_POLYVAL_ARM64_CE new file mode 100644 index 000000000000..611ebb23a069 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_POLYVAL_ARM64_CE @@ -0,0 +1 @@ +# CONFIG_CRYPTO_POLYVAL_ARM64_CE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_SHA3_ARM64 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_SHA3_ARM64 new file mode 100644 index 000000000000..dfbbf4fa5c22 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_SHA3_ARM64 @@ -0,0 +1 @@ +# CONFIG_CRYPTO_SHA3_ARM64 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_SHA512_ARM64 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_SHA512_ARM64 new file mode 100644 index 000000000000..ed3f38d42132 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_SHA512_ARM64 @@ -0,0 +1 @@ +# CONFIG_CRYPTO_SHA512_ARM64 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_SHA512_ARM64_CE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_SHA512_ARM64_CE new file mode 100644 index 000000000000..26a1ababe45c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CRYPTO_SHA512_ARM64_CE @@ -0,0 +1 @@ +# CONFIG_CRYPTO_SHA512_ARM64_CE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DEBUG_EFI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DEBUG_EFI new file mode 100644 index 000000000000..68b7b8a1e5a7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DEBUG_EFI @@ -0,0 +1 @@ +# CONFIG_DEBUG_EFI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC new file mode 100644 index 000000000000..f8a01763fe23 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC @@ -0,0 +1 @@ +CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_DECLARE_COHERENT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_DECLARE_COHERENT new file mode 100644 index 000000000000..77abed5ca11c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_DECLARE_COHERENT @@ -0,0 +1 @@ +CONFIG_DMA_DECLARE_COHERENT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_DIRECT_REMAP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_DIRECT_REMAP new file mode 100644 index 000000000000..d918392b9464 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_DIRECT_REMAP @@ -0,0 +1 @@ +CONFIG_DMA_DIRECT_REMAP=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_NONCOHERENT_MMAP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_NONCOHERENT_MMAP new file mode 100644 index 000000000000..b0dd4928f157 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_NONCOHERENT_MMAP @@ -0,0 +1 @@ +CONFIG_DMA_NONCOHERENT_MMAP=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_OF b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_OF new file mode 100644 index 000000000000..ffc03612089f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_OF @@ -0,0 +1 @@ +CONFIG_DMA_OF=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_RESTRICTED_POOL b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_RESTRICTED_POOL new file mode 100644 index 000000000000..2f680768ebef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMA_RESTRICTED_POOL @@ -0,0 +1 @@ +# CONFIG_DMA_RESTRICTED_POOL is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ANALOGIX_ANX6345 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ANALOGIX_ANX6345 new file mode 100644 index 000000000000..4597ef1bb5ad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ANALOGIX_ANX6345 @@ -0,0 +1 @@ +# CONFIG_DRM_ANALOGIX_ANX6345 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ANALOGIX_ANX7625 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ANALOGIX_ANX7625 new file mode 100644 index 000000000000..8399c28a8c18 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ANALOGIX_ANX7625 @@ -0,0 +1 @@ +# CONFIG_DRM_ANALOGIX_ANX7625 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ARCPGU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ARCPGU new file mode 100644 index 000000000000..d7d5d576abf6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ARCPGU @@ -0,0 +1 @@ +# CONFIG_DRM_ARCPGU is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_CDNS_DSI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_CDNS_DSI new file mode 100644 index 000000000000..c20551264c47 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_CDNS_DSI @@ -0,0 +1 @@ +# CONFIG_DRM_CDNS_DSI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_CDNS_MHDP8546 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_CDNS_MHDP8546 new file mode 100644 index 000000000000..2f08c639144c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_CDNS_MHDP8546 @@ -0,0 +1 @@ +# CONFIG_DRM_CDNS_MHDP8546 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_CHIPONE_ICN6211 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_CHIPONE_ICN6211 new file mode 100644 index 000000000000..1c971f5c7a81 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_CHIPONE_ICN6211 @@ -0,0 +1 @@ +# CONFIG_DRM_CHIPONE_ICN6211 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_CHRONTEL_CH7033 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_CHRONTEL_CH7033 new file mode 100644 index 000000000000..5cfc88342d4f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_CHRONTEL_CH7033 @@ -0,0 +1 @@ +# CONFIG_DRM_CHRONTEL_CH7033 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_DISPLAY_CONNECTOR b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_DISPLAY_CONNECTOR new file mode 100644 index 000000000000..e39ee39f4403 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_DISPLAY_CONNECTOR @@ -0,0 +1 @@ +# CONFIG_DRM_DISPLAY_CONNECTOR is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_DP_CEC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_DP_CEC new file mode 100644 index 000000000000..5f9b385554c1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_DP_CEC @@ -0,0 +1 @@ +CONFIG_DRM_DP_CEC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_HDLCD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_HDLCD new file mode 100644 index 000000000000..36ae461a61cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_HDLCD @@ -0,0 +1 @@ +# CONFIG_DRM_HDLCD is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_HISI_HIBMC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_HISI_HIBMC new file mode 100644 index 000000000000..3138ee3c9ad7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_HISI_HIBMC @@ -0,0 +1 @@ +CONFIG_DRM_HISI_HIBMC=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_HISI_KIRIN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_HISI_KIRIN new file mode 100644 index 000000000000..23c7279b82b5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_HISI_KIRIN @@ -0,0 +1 @@ +# CONFIG_DRM_HISI_KIRIN is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_I2C_ADV7511 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_I2C_ADV7511 new file mode 100644 index 000000000000..18208c93f500 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_I2C_ADV7511 @@ -0,0 +1 @@ +# CONFIG_DRM_I2C_ADV7511 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_I2C_NXP_TDA998X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_I2C_NXP_TDA998X new file mode 100644 index 000000000000..a816d583e510 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_I2C_NXP_TDA998X @@ -0,0 +1 @@ +CONFIG_DRM_I2C_NXP_TDA998X=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_I2C_SIL164 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_I2C_SIL164 new file mode 100644 index 000000000000..44078d9b63cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_I2C_SIL164 @@ -0,0 +1 @@ +# CONFIG_DRM_I2C_SIL164 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ITE_IT6505 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ITE_IT6505 new file mode 100644 index 000000000000..9575170f0c4f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ITE_IT6505 @@ -0,0 +1 @@ +# CONFIG_DRM_ITE_IT6505 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ITE_IT66121 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ITE_IT66121 new file mode 100644 index 000000000000..4e7581a9507c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_ITE_IT66121 @@ -0,0 +1 @@ +# CONFIG_DRM_ITE_IT66121 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_KOMEDA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_KOMEDA new file mode 100644 index 000000000000..14369562c667 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_KOMEDA @@ -0,0 +1 @@ +# CONFIG_DRM_KOMEDA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LIMA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LIMA new file mode 100644 index 000000000000..eb1331ceaeee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LIMA @@ -0,0 +1 @@ +# CONFIG_DRM_LIMA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LOGICVC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LOGICVC new file mode 100644 index 000000000000..30b6af31130e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LOGICVC @@ -0,0 +1 @@ +# CONFIG_DRM_LOGICVC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LONTIUM_LT8912B b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LONTIUM_LT8912B new file mode 100644 index 000000000000..3ea4cd170925 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LONTIUM_LT8912B @@ -0,0 +1 @@ +# CONFIG_DRM_LONTIUM_LT8912B is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LONTIUM_LT9211 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LONTIUM_LT9211 new file mode 100644 index 000000000000..8ab179f6394e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LONTIUM_LT9211 @@ -0,0 +1 @@ +# CONFIG_DRM_LONTIUM_LT9211 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LONTIUM_LT9611 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LONTIUM_LT9611 new file mode 100644 index 000000000000..0ee3b259ff15 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LONTIUM_LT9611 @@ -0,0 +1 @@ +# CONFIG_DRM_LONTIUM_LT9611 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LONTIUM_LT9611UXC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LONTIUM_LT9611UXC new file mode 100644 index 000000000000..d9827b457ccc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LONTIUM_LT9611UXC @@ -0,0 +1 @@ +# CONFIG_DRM_LONTIUM_LT9611UXC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LVDS_CODEC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LVDS_CODEC new file mode 100644 index 000000000000..9b5f29038f39 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_LVDS_CODEC @@ -0,0 +1 @@ +# CONFIG_DRM_LVDS_CODEC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_MALI_DISPLAY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_MALI_DISPLAY new file mode 100644 index 000000000000..af3d09fbe2eb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_MALI_DISPLAY @@ -0,0 +1 @@ +# CONFIG_DRM_MALI_DISPLAY is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW new file mode 100644 index 000000000000..7aa3826f4d4c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW @@ -0,0 +1 @@ +# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_MSM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_MSM new file mode 100644 index 000000000000..fc038c98bf41 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_MSM @@ -0,0 +1 @@ +# CONFIG_DRM_MSM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_NWL_MIPI_DSI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_NWL_MIPI_DSI new file mode 100644 index 000000000000..6f4e4e857904 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_NWL_MIPI_DSI @@ -0,0 +1 @@ +# CONFIG_DRM_NWL_MIPI_DSI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_NXP_PTN3460 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_NXP_PTN3460 new file mode 100644 index 000000000000..be8b96f7918c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_NXP_PTN3460 @@ -0,0 +1 @@ +# CONFIG_DRM_NXP_PTN3460 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_ABT_Y030XX067A b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_ABT_Y030XX067A new file mode 100644 index 000000000000..9aea79539571 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_ABT_Y030XX067A @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_ABT_Y030XX067A is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_ARM_VERSATILE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_ARM_VERSATILE new file mode 100644 index 000000000000..f5ffd8d31741 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_ARM_VERSATILE @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_ARM_VERSATILE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_EDP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_EDP new file mode 100644 index 000000000000..92a647ca83f2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_EDP @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_EDP is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_ILITEK_IL9322 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_ILITEK_IL9322 new file mode 100644 index 000000000000..4a9fd454c2b8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_ILITEK_IL9322 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_ILITEK_IL9322 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_ILITEK_ILI9341 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_ILITEK_ILI9341 new file mode 100644 index 000000000000..8b5a455d4f26 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_ILITEK_ILI9341 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_ILITEK_ILI9341 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_INNOLUX_EJ030NA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_INNOLUX_EJ030NA new file mode 100644 index 000000000000..8713ccc27e1e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_INNOLUX_EJ030NA @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_INNOLUX_EJ030NA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_LG_LB035Q02 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_LG_LB035Q02 new file mode 100644 index 000000000000..fe60a1992da4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_LG_LB035Q02 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_LG_LB035Q02 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_LG_LG4573 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_LG_LG4573 new file mode 100644 index 000000000000..35ecd6eba8e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_LG_LG4573 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_LG_LG4573 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_LVDS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_LVDS new file mode 100644 index 000000000000..af4bf6e016a3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_LVDS @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_LVDS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_NEC_NL8048HL11 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_NEC_NL8048HL11 new file mode 100644 index 000000000000..339ff848fea4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_NEC_NL8048HL11 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_NEC_NL8048HL11 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_NEWVISION_NV3052C b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_NEWVISION_NV3052C new file mode 100644 index 000000000000..16c72b0ef3ec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_NEWVISION_NV3052C @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_NEWVISION_NV3052C is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_NOVATEK_NT39016 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_NOVATEK_NT39016 new file mode 100644 index 000000000000..edce2272337d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_NOVATEK_NT39016 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_NOVATEK_NT39016 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO new file mode 100644 index 000000000000..14e8c8bb288d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20 new file mode 100644 index 000000000000..5d2b9f2ef7ff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_DB7430 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_DB7430 new file mode 100644 index 000000000000..f1c456d76050 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_DB7430 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_SAMSUNG_DB7430 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_LD9040 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_LD9040 new file mode 100644 index 000000000000..8aae098881b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_LD9040 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6D27A1 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6D27A1 new file mode 100644 index 000000000000..b9c43be811c1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6D27A1 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_SAMSUNG_S6D27A1 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0 new file mode 100644 index 000000000000..44ebe3ed9504 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 new file mode 100644 index 000000000000..1dd8218ba968 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 new file mode 100644 index 000000000000..8572cb504b43 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 new file mode 100644 index 000000000000..253f569ccd12 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SEIKO_43WVF1G b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SEIKO_43WVF1G new file mode 100644 index 000000000000..b32cceac2f1d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SEIKO_43WVF1G @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_SEIKO_43WVF1G is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SHARP_LS037V7DW01 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SHARP_LS037V7DW01 new file mode 100644 index 000000000000..26cceaa221e2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SHARP_LS037V7DW01 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_SHARP_LS037V7DW01 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SIMPLE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SIMPLE new file mode 100644 index 000000000000..a1ed63eca094 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SIMPLE @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_SIMPLE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SITRONIX_ST7789V b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SITRONIX_ST7789V new file mode 100644 index 000000000000..712cb79a7b32 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SITRONIX_ST7789V @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SONY_ACX565AKM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SONY_ACX565AKM new file mode 100644 index 000000000000..de1d34f6bd01 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_SONY_ACX565AKM @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_SONY_ACX565AKM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_TPO_TD028TTEC1 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_TPO_TD028TTEC1 new file mode 100644 index 000000000000..8c0af5fb484e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_TPO_TD028TTEC1 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_TPO_TD028TTEC1 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_TPO_TD043MTEA1 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_TPO_TD043MTEA1 new file mode 100644 index 000000000000..4494ef96f0cd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_TPO_TD043MTEA1 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_TPO_TD043MTEA1 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_TPO_TPG110 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_TPO_TPG110 new file mode 100644 index 000000000000..df2c43c266f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_TPO_TPG110 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_TPO_TPG110 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANFROST b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANFROST new file mode 100644 index 000000000000..2f4bbc3243e4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANFROST @@ -0,0 +1 @@ +# CONFIG_DRM_PANFROST is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PARADE_PS8622 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PARADE_PS8622 new file mode 100644 index 000000000000..e541cb69375f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PARADE_PS8622 @@ -0,0 +1 @@ +# CONFIG_DRM_PARADE_PS8622 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PARADE_PS8640 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PARADE_PS8640 new file mode 100644 index 000000000000..fe2aea0dcf31 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PARADE_PS8640 @@ -0,0 +1 @@ +# CONFIG_DRM_PARADE_PS8640 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PL111 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PL111 new file mode 100644 index 000000000000..770564391dc0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PL111 @@ -0,0 +1 @@ +# CONFIG_DRM_PL111 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SAMSUNG_DSIM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SAMSUNG_DSIM new file mode 100644 index 000000000000..ab2a01510d37 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SAMSUNG_DSIM @@ -0,0 +1 @@ +# CONFIG_DRM_SAMSUNG_DSIM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SII902X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SII902X new file mode 100644 index 000000000000..18a102e07d1b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SII902X @@ -0,0 +1 @@ +# CONFIG_DRM_SII902X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SII9234 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SII9234 new file mode 100644 index 000000000000..b0bfbfb7afd9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SII9234 @@ -0,0 +1 @@ +# CONFIG_DRM_SII9234 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SIL_SII8620 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SIL_SII8620 new file mode 100644 index 000000000000..651ddf007090 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SIL_SII8620 @@ -0,0 +1 @@ +# CONFIG_DRM_SIL_SII8620 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SIMPLE_BRIDGE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SIMPLE_BRIDGE new file mode 100644 index 000000000000..263c76996e19 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_SIMPLE_BRIDGE @@ -0,0 +1 @@ +# CONFIG_DRM_SIMPLE_BRIDGE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_THINE_THC63LVD1024 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_THINE_THC63LVD1024 new file mode 100644 index 000000000000..d62fd2aa222c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_THINE_THC63LVD1024 @@ -0,0 +1 @@ +# CONFIG_DRM_THINE_THC63LVD1024 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TIDSS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TIDSS new file mode 100644 index 000000000000..8afce19a3e0c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TIDSS @@ -0,0 +1 @@ +# CONFIG_DRM_TIDSS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_DLPC3433 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_DLPC3433 new file mode 100644 index 000000000000..2ae917aa6c2e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_DLPC3433 @@ -0,0 +1 @@ +# CONFIG_DRM_TI_DLPC3433 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_SN65DSI83 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_SN65DSI83 new file mode 100644 index 000000000000..7e47d0baf490 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_SN65DSI83 @@ -0,0 +1 @@ +# CONFIG_DRM_TI_SN65DSI83 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_SN65DSI86 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_SN65DSI86 new file mode 100644 index 000000000000..d3088d3a9d5f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_SN65DSI86 @@ -0,0 +1 @@ +# CONFIG_DRM_TI_SN65DSI86 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_TFP410 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_TFP410 new file mode 100644 index 000000000000..b4f765e30253 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_TFP410 @@ -0,0 +1 @@ +# CONFIG_DRM_TI_TFP410 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_TPD12S015 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_TPD12S015 new file mode 100644 index 000000000000..038aad3b72ba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TI_TPD12S015 @@ -0,0 +1 @@ +# CONFIG_DRM_TI_TPD12S015 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358762 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358762 new file mode 100644 index 000000000000..a99c1adf0ba5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358762 @@ -0,0 +1 @@ +# CONFIG_DRM_TOSHIBA_TC358762 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358764 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358764 new file mode 100644 index 000000000000..67c629a9d438 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358764 @@ -0,0 +1 @@ +# CONFIG_DRM_TOSHIBA_TC358764 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358767 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358767 new file mode 100644 index 000000000000..cdb5218c62f3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358767 @@ -0,0 +1 @@ +# CONFIG_DRM_TOSHIBA_TC358767 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358768 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358768 new file mode 100644 index 000000000000..5aef40e4d497 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358768 @@ -0,0 +1 @@ +# CONFIG_DRM_TOSHIBA_TC358768 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358775 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358775 new file mode 100644 index 000000000000..5090a0707875 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TOSHIBA_TC358775 @@ -0,0 +1 @@ +# CONFIG_DRM_TOSHIBA_TC358775 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DTC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DTC new file mode 100644 index 000000000000..64dbfdd901d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DTC @@ -0,0 +1 @@ +CONFIG_DTC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DW_AXI_DMAC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DW_AXI_DMAC new file mode 100644 index 000000000000..0d88fd60aa04 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DW_AXI_DMAC @@ -0,0 +1 @@ +# CONFIG_DW_AXI_DMAC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DW_DMAC_CORE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DW_DMAC_CORE new file mode 100644 index 000000000000..bac83695b10f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DW_DMAC_CORE @@ -0,0 +1 @@ +CONFIG_DW_DMAC_CORE=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS new file mode 100644 index 000000000000..080a3fc455b6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS @@ -0,0 +1 @@ +CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EDAC_DMC520 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EDAC_DMC520 new file mode 100644 index 000000000000..c93ee4727e98 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EDAC_DMC520 @@ -0,0 +1 @@ +# CONFIG_EDAC_DMC520 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EDAC_THUNDERX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EDAC_THUNDERX new file mode 100644 index 000000000000..dae44bb02125 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EDAC_THUNDERX @@ -0,0 +1 @@ +CONFIG_EDAC_THUNDERX=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EFI_GENERIC_STUB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EFI_GENERIC_STUB new file mode 100644 index 000000000000..7e8f6b07561c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EFI_GENERIC_STUB @@ -0,0 +1 @@ +CONFIG_EFI_GENERIC_STUB=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EFI_PARAMS_FROM_FDT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EFI_PARAMS_FROM_FDT new file mode 100644 index 000000000000..2324b275770e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EFI_PARAMS_FROM_FDT @@ -0,0 +1 @@ +CONFIG_EFI_PARAMS_FROM_FDT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EFI_ZBOOT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EFI_ZBOOT new file mode 100644 index 000000000000..dd98202456bb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EFI_ZBOOT @@ -0,0 +1 @@ +# CONFIG_EFI_ZBOOT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_FSA9480 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_FSA9480 new file mode 100644 index 000000000000..d1cee17ac1b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_FSA9480 @@ -0,0 +1 @@ +# CONFIG_EXTCON_FSA9480 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_MAX3355 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_MAX3355 new file mode 100644 index 000000000000..680b5a774265 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_MAX3355 @@ -0,0 +1 @@ +# CONFIG_EXTCON_MAX3355 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_PTN5150 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_PTN5150 new file mode 100644 index 000000000000..092a8419c9ff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_PTN5150 @@ -0,0 +1 @@ +# CONFIG_EXTCON_PTN5150 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_QCOM_SPMI_MISC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_QCOM_SPMI_MISC new file mode 100644 index 000000000000..5f2508da3ffc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_QCOM_SPMI_MISC @@ -0,0 +1 @@ +# CONFIG_EXTCON_QCOM_SPMI_MISC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_RT8973A b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_RT8973A new file mode 100644 index 000000000000..e5f7236c9c69 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_RT8973A @@ -0,0 +1 @@ +# CONFIG_EXTCON_RT8973A is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_SM5502 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_SM5502 new file mode 100644 index 000000000000..916994aa9714 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_SM5502 @@ -0,0 +1 @@ +# CONFIG_EXTCON_SM5502 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_USBC_TUSB320 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_USBC_TUSB320 new file mode 100644 index 000000000000..4eedae83091b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_USBC_TUSB320 @@ -0,0 +1 @@ +# CONFIG_EXTCON_USBC_TUSB320 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_USB_GPIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_USB_GPIO new file mode 100644 index 000000000000..7a0c9af305f0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_EXTCON_USB_GPIO @@ -0,0 +1 @@ +# CONFIG_EXTCON_USB_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FB_ARMCLCD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FB_ARMCLCD new file mode 100644 index 000000000000..7d58051b7869 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FB_ARMCLCD @@ -0,0 +1 @@ +# CONFIG_FB_ARMCLCD is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FB_BACKLIGHT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FB_BACKLIGHT new file mode 100644 index 000000000000..cbd70c3ee0c7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FB_BACKLIGHT @@ -0,0 +1 @@ +CONFIG_FB_BACKLIGHT=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FB_SM750 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FB_SM750 new file mode 100644 index 000000000000..2275940beb47 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FB_SM750 @@ -0,0 +1 @@ +# CONFIG_FB_SM750 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FB_TFT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FB_TFT new file mode 100644 index 000000000000..ec64dd1894d3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FB_TFT @@ -0,0 +1 @@ +# CONFIG_FB_TFT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FIELDBUS_DEV b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FIELDBUS_DEV new file mode 100644 index 000000000000..7c61724580d6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FIELDBUS_DEV @@ -0,0 +1 @@ +# CONFIG_FIELDBUS_DEV is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FIREWIRE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FIREWIRE new file mode 100644 index 000000000000..d75b839c4d30 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FIREWIRE @@ -0,0 +1 @@ +# CONFIG_FIREWIRE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FRAME_POINTER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FRAME_POINTER new file mode 100644 index 000000000000..90a6184b155a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FRAME_POINTER @@ -0,0 +1 @@ +CONFIG_FRAME_POINTER=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSI new file mode 100644 index 000000000000..da7d31329415 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSI @@ -0,0 +1 @@ +# CONFIG_FSI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSL_EDMA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSL_EDMA new file mode 100644 index 000000000000..d9673ee72c68 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSL_EDMA @@ -0,0 +1 @@ +# CONFIG_FSL_EDMA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSL_ERRATUM_A008585 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSL_ERRATUM_A008585 new file mode 100644 index 000000000000..8dce0afd8c31 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSL_ERRATUM_A008585 @@ -0,0 +1 @@ +CONFIG_FSL_ERRATUM_A008585=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSL_QDMA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSL_QDMA new file mode 100644 index 000000000000..f26803f1e455 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSL_QDMA @@ -0,0 +1 @@ +# CONFIG_FSL_QDMA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSL_RCPM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSL_RCPM new file mode 100644 index 000000000000..aab0d456e6a1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FSL_RCPM @@ -0,0 +1 @@ +# CONFIG_FSL_RCPM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FS_MBCACHE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FS_MBCACHE new file mode 100644 index 000000000000..daee2e23d02f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FS_MBCACHE @@ -0,0 +1 @@ +CONFIG_FS_MBCACHE=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY new file mode 100644 index 000000000000..d1f89cd7802f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY @@ -0,0 +1 @@ +CONFIG_FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FUJITSU_ES b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FUJITSU_ES new file mode 100644 index 000000000000..39b50c904155 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FUJITSU_ES @@ -0,0 +1 @@ +# CONFIG_FUJITSU_ES is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FUNCTION_ALIGNMENT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FUNCTION_ALIGNMENT new file mode 100644 index 000000000000..711e4b76e221 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FUNCTION_ALIGNMENT @@ -0,0 +1 @@ +CONFIG_FUNCTION_ALIGNMENT=8 diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FUNCTION_ALIGNMENT_8B b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FUNCTION_ALIGNMENT_8B new file mode 100644 index 000000000000..0fe9f0d17b10 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FUNCTION_ALIGNMENT_8B @@ -0,0 +1 @@ +CONFIG_FUNCTION_ALIGNMENT_8B=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS new file mode 100644 index 000000000000..9f843f7e3aa4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS @@ -0,0 +1 @@ +CONFIG_GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_ARCH_TOPOLOGY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_ARCH_TOPOLOGY new file mode 100644 index 000000000000..e40dd4f28e2d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_ARCH_TOPOLOGY @@ -0,0 +1 @@ +CONFIG_GENERIC_ARCH_TOPOLOGY=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_CSUM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_CSUM new file mode 100644 index 000000000000..b93b55b27d64 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_CSUM @@ -0,0 +1 @@ +CONFIG_GENERIC_CSUM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_HWEIGHT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_HWEIGHT new file mode 100644 index 000000000000..6b9eb552a3de --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_HWEIGHT @@ -0,0 +1 @@ +CONFIG_GENERIC_HWEIGHT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_IDLE_POLL_SETUP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_IDLE_POLL_SETUP new file mode 100644 index 000000000000..e509206c959f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_IDLE_POLL_SETUP @@ -0,0 +1 @@ +CONFIG_GENERIC_IDLE_POLL_SETUP=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_IRQ_IPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_IRQ_IPI new file mode 100644 index 000000000000..64496f311493 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_IRQ_IPI @@ -0,0 +1 @@ +CONFIG_GENERIC_IRQ_IPI=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_IRQ_SHOW_LEVEL b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_IRQ_SHOW_LEVEL new file mode 100644 index 000000000000..b5f8b58f38a9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_IRQ_SHOW_LEVEL @@ -0,0 +1 @@ +CONFIG_GENERIC_IRQ_SHOW_LEVEL=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_SCHED_CLOCK b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_SCHED_CLOCK new file mode 100644 index 000000000000..3f66970f6957 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GENERIC_SCHED_CLOCK @@ -0,0 +1 @@ +CONFIG_GENERIC_SCHED_CLOCK=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_74X164 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_74X164 new file mode 100644 index 000000000000..543197b0bab1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_74X164 @@ -0,0 +1 @@ +# CONFIG_GPIO_74X164 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_74XX_MMIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_74XX_MMIO new file mode 100644 index 000000000000..b761ec18af4a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_74XX_MMIO @@ -0,0 +1 @@ +# CONFIG_GPIO_74XX_MMIO is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_ADNP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_ADNP new file mode 100644 index 000000000000..5f2bab733e67 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_ADNP @@ -0,0 +1 @@ +# CONFIG_GPIO_ADNP is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_ALTERA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_ALTERA new file mode 100644 index 000000000000..621d4475bd0a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_ALTERA @@ -0,0 +1 @@ +# CONFIG_GPIO_ALTERA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_CADENCE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_CADENCE new file mode 100644 index 000000000000..fd915b0e90d9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_CADENCE @@ -0,0 +1 @@ +# CONFIG_GPIO_CADENCE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_DWAPB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_DWAPB new file mode 100644 index 000000000000..eabf56effa8a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_DWAPB @@ -0,0 +1 @@ +CONFIG_GPIO_DWAPB=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_FTGPIO010 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_FTGPIO010 new file mode 100644 index 000000000000..3a1fb41ea869 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_FTGPIO010 @@ -0,0 +1 @@ +# CONFIG_GPIO_FTGPIO010 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_GRGPIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_GRGPIO new file mode 100644 index 000000000000..826d836bf5af --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_GRGPIO @@ -0,0 +1 @@ +# CONFIG_GPIO_GRGPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_GW_PLD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_GW_PLD new file mode 100644 index 000000000000..a521799fe070 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_GW_PLD @@ -0,0 +1 @@ +# CONFIG_GPIO_GW_PLD is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_HLWD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_HLWD new file mode 100644 index 000000000000..85864554caa3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_HLWD @@ -0,0 +1 @@ +# CONFIG_GPIO_HLWD is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_LOGICVC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_LOGICVC new file mode 100644 index 000000000000..1ee4eb11a799 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_LOGICVC @@ -0,0 +1 @@ +# CONFIG_GPIO_LOGICVC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_PL061 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_PL061 new file mode 100644 index 000000000000..36d6b513b176 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_PL061 @@ -0,0 +1 @@ +CONFIG_GPIO_PL061=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_SIFIVE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_SIFIVE new file mode 100644 index 000000000000..461cb49e425c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_SIFIVE @@ -0,0 +1 @@ +# CONFIG_GPIO_SIFIVE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_SYSCON b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_SYSCON new file mode 100644 index 000000000000..714d52d62085 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_SYSCON @@ -0,0 +1 @@ +# CONFIG_GPIO_SYSCON is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_THUNDERX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_THUNDERX new file mode 100644 index 000000000000..7a45d6a75215 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_THUNDERX @@ -0,0 +1 @@ +# CONFIG_GPIO_THUNDERX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_WATCHDOG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_WATCHDOG new file mode 100644 index 000000000000..37cf2150c1fe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_WATCHDOG @@ -0,0 +1 @@ +CONFIG_GPIO_WATCHDOG=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_XGENE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_XGENE new file mode 100644 index 000000000000..f210ebcce6a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_XGENE @@ -0,0 +1 @@ +CONFIG_GPIO_XGENE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_XGENE_SB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_XGENE_SB new file mode 100644 index 000000000000..01e2940751f2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_XGENE_SB @@ -0,0 +1 @@ +CONFIG_GPIO_XGENE_SB=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_XILINX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_XILINX new file mode 100644 index 000000000000..d7769012beb7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_XILINX @@ -0,0 +1 @@ +# CONFIG_GPIO_XILINX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_XLP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_XLP new file mode 100644 index 000000000000..f99cd41f3bc5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_XLP @@ -0,0 +1 @@ +CONFIG_GPIO_XLP=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARCH_BITREVERSE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARCH_BITREVERSE new file mode 100644 index 000000000000..e13003cb82ac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARCH_BITREVERSE @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_BITREVERSE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARCH_COMPILER_H b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARCH_COMPILER_H new file mode 100644 index 000000000000..f6287de8ae21 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARCH_COMPILER_H @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_COMPILER_H=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARCH_KASAN_HW_TAGS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARCH_KASAN_HW_TAGS new file mode 100644 index 000000000000..c6dd2f170ad4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARCH_KASAN_HW_TAGS @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_KASAN_HW_TAGS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARCH_KASAN_SW_TAGS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARCH_KASAN_SW_TAGS new file mode 100644 index 000000000000..18b5a02c7b7c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARCH_KASAN_SW_TAGS @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_KASAN_SW_TAGS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARM_SMCCC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARM_SMCCC new file mode 100644 index 000000000000..34c80e3eacfb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARM_SMCCC @@ -0,0 +1 @@ +CONFIG_HAVE_ARM_SMCCC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARM_SMCCC_DISCOVERY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARM_SMCCC_DISCOVERY new file mode 100644 index 000000000000..a156962aec95 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_ARM_SMCCC_DISCOVERY @@ -0,0 +1 @@ +CONFIG_HAVE_ARM_SMCCC_DISCOVERY=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS new file mode 100644 index 000000000000..a4bf5edc0535 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS @@ -0,0 +1 @@ +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE new file mode 100644 index 000000000000..fd2a1e088d72 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE @@ -0,0 +1 @@ +CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_PREEMPT_DYNAMIC_KEY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_PREEMPT_DYNAMIC_KEY new file mode 100644 index 000000000000..c0dea18ed0a7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HAVE_PREEMPT_DYNAMIC_KEY @@ -0,0 +1 @@ +CONFIG_HAVE_PREEMPT_DYNAMIC_KEY=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HI3660_MBOX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HI3660_MBOX new file mode 100644 index 000000000000..f41b9a6d575b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HI3660_MBOX @@ -0,0 +1 @@ +CONFIG_HI3660_MBOX=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HI6220_MBOX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HI6220_MBOX new file mode 100644 index 000000000000..e8313e8a8fa3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HI6220_MBOX @@ -0,0 +1 @@ +CONFIG_HI6220_MBOX=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_ALPS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_ALPS new file mode 100644 index 000000000000..a0a848d703f0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_ALPS @@ -0,0 +1 @@ +# CONFIG_HID_ALPS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_ASUS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_ASUS new file mode 100644 index 000000000000..b5df524a7a25 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_ASUS @@ -0,0 +1 @@ +# CONFIG_HID_ASUS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_CMEDIA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_CMEDIA new file mode 100644 index 000000000000..63e6eb1cdf1c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_CMEDIA @@ -0,0 +1 @@ +# CONFIG_HID_CMEDIA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_SENSOR_CUSTOM_SENSOR b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_SENSOR_CUSTOM_SENSOR new file mode 100644 index 000000000000..ff4111852e8c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_SENSOR_CUSTOM_SENSOR @@ -0,0 +1 @@ +# CONFIG_HID_SENSOR_CUSTOM_SENSOR is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_SENSOR_HUB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_SENSOR_HUB new file mode 100644 index 000000000000..bfb17e4031e3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_SENSOR_HUB @@ -0,0 +1 @@ +CONFIG_HID_SENSOR_HUB=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HIP04_ETH b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HIP04_ETH new file mode 100644 index 000000000000..0c4541ec0626 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HIP04_ETH @@ -0,0 +1 @@ +# CONFIG_HIP04_ETH is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISILICON_ERRATUM_161010101 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISILICON_ERRATUM_161010101 new file mode 100644 index 000000000000..b5914254a5aa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISILICON_ERRATUM_161010101 @@ -0,0 +1 @@ +CONFIG_HISILICON_ERRATUM_161010101=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISILICON_IRQ_MBIGEN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISILICON_IRQ_MBIGEN new file mode 100644 index 000000000000..5ca6d30d145a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISILICON_IRQ_MBIGEN @@ -0,0 +1 @@ +CONFIG_HISILICON_IRQ_MBIGEN=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISILICON_LPC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISILICON_LPC new file mode 100644 index 000000000000..fff86879cd66 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISILICON_LPC @@ -0,0 +1 @@ +CONFIG_HISILICON_LPC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_ACC_VFIO_PCI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_ACC_VFIO_PCI new file mode 100644 index 000000000000..ac59d0074877 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_ACC_VFIO_PCI @@ -0,0 +1 @@ +# CONFIG_HISI_ACC_VFIO_PCI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_FEMAC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_FEMAC new file mode 100644 index 000000000000..42a71fa39fcb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_FEMAC @@ -0,0 +1 @@ +# CONFIG_HISI_FEMAC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_HIKEY_USB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_HIKEY_USB new file mode 100644 index 000000000000..9e6126e97046 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_HIKEY_USB @@ -0,0 +1 @@ +# CONFIG_HISI_HIKEY_USB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_PCIE_PMU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_PCIE_PMU new file mode 100644 index 000000000000..6f10dd0dbed9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_PCIE_PMU @@ -0,0 +1 @@ +CONFIG_HISI_PCIE_PMU=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_PMU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_PMU new file mode 100644 index 000000000000..f9809111ad96 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_PMU @@ -0,0 +1 @@ +CONFIG_HISI_PMU=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_PTT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_PTT new file mode 100644 index 000000000000..790d8f354778 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HISI_PTT @@ -0,0 +1 @@ +# CONFIG_HISI_PTT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HIX5HD2_GMAC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HIX5HD2_GMAC new file mode 100644 index 000000000000..94b80b5d701a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HIX5HD2_GMAC @@ -0,0 +1 @@ +# CONFIG_HIX5HD2_GMAC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HNS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HNS new file mode 100644 index 000000000000..b4839e93f57f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HNS @@ -0,0 +1 @@ +CONFIG_HNS=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HNS3_PMU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HNS3_PMU new file mode 100644 index 000000000000..513062961f77 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HNS3_PMU @@ -0,0 +1 @@ +# CONFIG_HNS3_PMU is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HOTPLUG_PCI_SHPC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HOTPLUG_PCI_SHPC new file mode 100644 index 000000000000..f03be8a8f294 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HOTPLUG_PCI_SHPC @@ -0,0 +1 @@ +# CONFIG_HOTPLUG_PCI_SHPC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HP_ILO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HP_ILO new file mode 100644 index 000000000000..74de63a68884 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HP_ILO @@ -0,0 +1 @@ +# CONFIG_HP_ILO is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HP_WATCHDOG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HP_WATCHDOG new file mode 100644 index 000000000000..d2bf24e6b5cd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HP_WATCHDOG @@ -0,0 +1 @@ +# CONFIG_HP_WATCHDOG is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HVC_DCC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HVC_DCC new file mode 100644 index 000000000000..b2e800764a49 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HVC_DCC @@ -0,0 +1 @@ +# CONFIG_HVC_DCC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HWSPINLOCK_QCOM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HWSPINLOCK_QCOM new file mode 100644 index 000000000000..ccfa8e92649e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HWSPINLOCK_QCOM @@ -0,0 +1 @@ +# CONFIG_HWSPINLOCK_QCOM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_PERF_EVENTS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_PERF_EVENTS new file mode 100644 index 000000000000..10c0008c175e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_PERF_EVENTS @@ -0,0 +1 @@ +CONFIG_HW_PERF_EVENTS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_ARM_SMCCC_TRNG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_ARM_SMCCC_TRNG new file mode 100644 index 000000000000..bf0e4ee3a9dc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_ARM_SMCCC_TRNG @@ -0,0 +1 @@ +CONFIG_HW_RANDOM_ARM_SMCCC_TRNG=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_CAVIUM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_CAVIUM new file mode 100644 index 000000000000..e3bec9744b53 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_CAVIUM @@ -0,0 +1 @@ +CONFIG_HW_RANDOM_CAVIUM=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_CCTRNG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_CCTRNG new file mode 100644 index 000000000000..97f5363c7233 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_CCTRNG @@ -0,0 +1 @@ +# CONFIG_HW_RANDOM_CCTRNG is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_CN10K b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_CN10K new file mode 100644 index 000000000000..84a1cd4682d0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_CN10K @@ -0,0 +1 @@ +CONFIG_HW_RANDOM_CN10K=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_HISI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_HISI new file mode 100644 index 000000000000..727609690c50 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_HISI @@ -0,0 +1 @@ +CONFIG_HW_RANDOM_HISI=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_HISTB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_HISTB new file mode 100644 index 000000000000..621d496a1aba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_HISTB @@ -0,0 +1 @@ +CONFIG_HW_RANDOM_HISTB=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_VIRTIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_VIRTIO new file mode 100644 index 000000000000..1b31e553522e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_VIRTIO @@ -0,0 +1 @@ +CONFIG_HW_RANDOM_VIRTIO=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_XGENE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_XGENE new file mode 100644 index 000000000000..85af60d35a41 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HW_RANDOM_XGENE @@ -0,0 +1 @@ +CONFIG_HW_RANDOM_XGENE=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ALGOPCF b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ALGOPCF new file mode 100644 index 000000000000..98e1f0c7a1ee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ALGOPCF @@ -0,0 +1 @@ +CONFIG_I2C_ALGOPCF=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_AMD756 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_AMD756 new file mode 100644 index 000000000000..d30b6d5d4827 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_AMD756 @@ -0,0 +1 @@ +# CONFIG_I2C_AMD756 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_AMD8111 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_AMD8111 new file mode 100644 index 000000000000..f200ec046b48 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_AMD8111 @@ -0,0 +1 @@ +# CONFIG_I2C_AMD8111 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ARB_GPIO_CHALLENGE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ARB_GPIO_CHALLENGE new file mode 100644 index 000000000000..b828d65082c1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ARB_GPIO_CHALLENGE @@ -0,0 +1 @@ +CONFIG_I2C_ARB_GPIO_CHALLENGE=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_CADENCE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_CADENCE new file mode 100644 index 000000000000..f18079afe06f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_CADENCE @@ -0,0 +1 @@ +# CONFIG_I2C_CADENCE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_DEMUX_PINCTRL b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_DEMUX_PINCTRL new file mode 100644 index 000000000000..92132d0e44d9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_DEMUX_PINCTRL @@ -0,0 +1 @@ +# CONFIG_I2C_DEMUX_PINCTRL is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_GPIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_GPIO new file mode 100644 index 000000000000..99bcd1824491 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_GPIO @@ -0,0 +1 @@ +CONFIG_I2C_GPIO=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_GPIO_FAULT_INJECTOR b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_GPIO_FAULT_INJECTOR new file mode 100644 index 000000000000..711cee4c93fb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_GPIO_FAULT_INJECTOR @@ -0,0 +1 @@ +# CONFIG_I2C_GPIO_FAULT_INJECTOR is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HELPER_AUTO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HELPER_AUTO new file mode 100644 index 000000000000..335f5749903c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HELPER_AUTO @@ -0,0 +1 @@ +# CONFIG_I2C_HELPER_AUTO is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HID_OF_ELAN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HID_OF_ELAN new file mode 100644 index 000000000000..26857f6f7ebe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HID_OF_ELAN @@ -0,0 +1 @@ +# CONFIG_I2C_HID_OF_ELAN is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HID_OF_GOODIX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HID_OF_GOODIX new file mode 100644 index 000000000000..ad8ec4bc1fb7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HID_OF_GOODIX @@ -0,0 +1 @@ +# CONFIG_I2C_HID_OF_GOODIX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HISI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HISI new file mode 100644 index 000000000000..47a85722ea5f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HISI @@ -0,0 +1 @@ +CONFIG_I2C_HISI=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HIX5HD2 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HIX5HD2 new file mode 100644 index 000000000000..7b16fe2e5038 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_HIX5HD2 @@ -0,0 +1 @@ +# CONFIG_I2C_HIX5HD2 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_I801 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_I801 new file mode 100644 index 000000000000..2663365cdb83 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_I801 @@ -0,0 +1 @@ +# CONFIG_I2C_I801 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ISCH b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ISCH new file mode 100644 index 000000000000..083275fd9594 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ISCH @@ -0,0 +1 @@ +# CONFIG_I2C_ISCH is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MLXCPLD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MLXCPLD new file mode 100644 index 000000000000..5f09745e3fc5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MLXCPLD @@ -0,0 +1 @@ +# CONFIG_I2C_MLXCPLD is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_GPIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_GPIO new file mode 100644 index 000000000000..f9fcb2b2fe97 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_GPIO @@ -0,0 +1 @@ +CONFIG_I2C_MUX_GPIO=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_GPMUX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_GPMUX new file mode 100644 index 000000000000..62f68a335129 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_GPMUX @@ -0,0 +1 @@ +# CONFIG_I2C_MUX_GPMUX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_PCA9541 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_PCA9541 new file mode 100644 index 000000000000..5b5fe6052075 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_PCA9541 @@ -0,0 +1 @@ +CONFIG_I2C_MUX_PCA9541=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_PCA954x b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_PCA954x new file mode 100644 index 000000000000..24c8bfd0fcb6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_PCA954x @@ -0,0 +1 @@ +CONFIG_I2C_MUX_PCA954x=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_PINCTRL b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_PINCTRL new file mode 100644 index 000000000000..eadba88bd986 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_MUX_PINCTRL @@ -0,0 +1 @@ +CONFIG_I2C_MUX_PINCTRL=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_NOMADIK b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_NOMADIK new file mode 100644 index 000000000000..244fb5321433 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_NOMADIK @@ -0,0 +1 @@ +# CONFIG_I2C_NOMADIK is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_PIIX4 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_PIIX4 new file mode 100644 index 000000000000..ed534b4739a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_PIIX4 @@ -0,0 +1 @@ +# CONFIG_I2C_PIIX4 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_QCOM_CCI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_QCOM_CCI new file mode 100644 index 000000000000..b66c46744f33 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_QCOM_CCI @@ -0,0 +1 @@ +# CONFIG_I2C_QCOM_CCI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_QUP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_QUP new file mode 100644 index 000000000000..7fb7cd350d2c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_QUP @@ -0,0 +1 @@ +CONFIG_I2C_QUP=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_RK3X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_RK3X new file mode 100644 index 000000000000..b11d3db83351 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_RK3X @@ -0,0 +1 @@ +# CONFIG_I2C_RK3X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_SCMI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_SCMI new file mode 100644 index 000000000000..fcaad2a3a490 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_SCMI @@ -0,0 +1 @@ +# CONFIG_I2C_SCMI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_SIS96X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_SIS96X new file mode 100644 index 000000000000..d896b4d2fbb5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_SIS96X @@ -0,0 +1 @@ +# CONFIG_I2C_SIS96X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_SLAVE_TESTUNIT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_SLAVE_TESTUNIT new file mode 100644 index 000000000000..14e66f89c8a8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_SLAVE_TESTUNIT @@ -0,0 +1 @@ +# CONFIG_I2C_SLAVE_TESTUNIT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_THUNDERX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_THUNDERX new file mode 100644 index 000000000000..c544a8c6863a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_THUNDERX @@ -0,0 +1 @@ +CONFIG_I2C_THUNDERX=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_VERSATILE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_VERSATILE new file mode 100644 index 000000000000..3591505da871 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_VERSATILE @@ -0,0 +1 @@ +CONFIG_I2C_VERSATILE=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_VIA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_VIA new file mode 100644 index 000000000000..7fa98de9de28 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_VIA @@ -0,0 +1 @@ +# CONFIG_I2C_VIA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_VIAPRO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_VIAPRO new file mode 100644 index 000000000000..a1e152bbe6d6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_VIAPRO @@ -0,0 +1 @@ +# CONFIG_I2C_VIAPRO is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_XGENE_SLIMPRO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_XGENE_SLIMPRO new file mode 100644 index 000000000000..44af582d4b80 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_XGENE_SLIMPRO @@ -0,0 +1 @@ +CONFIG_I2C_XGENE_SLIMPRO=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_XLP9XX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_XLP9XX new file mode 100644 index 000000000000..bcc41c37608f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_XLP9XX @@ -0,0 +1 @@ +CONFIG_I2C_XLP9XX=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ZHAOXIN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ZHAOXIN new file mode 100644 index 000000000000..b8667abedd33 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ZHAOXIN @@ -0,0 +1 @@ +# CONFIG_I2C_ZHAOXIN is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ZHAOXIN_SMBUS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ZHAOXIN_SMBUS new file mode 100644 index 000000000000..65414ab9fc9c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_ZHAOXIN_SMBUS @@ -0,0 +1 @@ +# CONFIG_I2C_ZHAOXIN_SMBUS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IEEE802154_FAKELB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IEEE802154_FAKELB new file mode 100644 index 000000000000..a6386c72c6c1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IEEE802154_FAKELB @@ -0,0 +1 @@ +# CONFIG_IEEE802154_FAKELB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IIO new file mode 100644 index 000000000000..80d289815272 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IIO @@ -0,0 +1 @@ +# CONFIG_IIO is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INFINIBAND_HNS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INFINIBAND_HNS new file mode 100644 index 000000000000..7b67654797c7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INFINIBAND_HNS @@ -0,0 +1 @@ +CONFIG_INFINIBAND_HNS=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INFINIBAND_HNS_HIP08 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INFINIBAND_HNS_HIP08 new file mode 100644 index 000000000000..bc6e71eae8b6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INFINIBAND_HNS_HIP08 @@ -0,0 +1 @@ +CONFIG_INFINIBAND_HNS_HIP08=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INPUT_JOYDEV b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INPUT_JOYDEV new file mode 100644 index 000000000000..f0730fa0823f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INPUT_JOYDEV @@ -0,0 +1 @@ +# CONFIG_INPUT_JOYDEV is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INPUT_MISC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INPUT_MISC new file mode 100644 index 000000000000..f9ad3d4b752d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INPUT_MISC @@ -0,0 +1 @@ +# CONFIG_INPUT_MISC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INPUT_TABLET b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INPUT_TABLET new file mode 100644 index 000000000000..ea8f01dfa4c6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INPUT_TABLET @@ -0,0 +1 @@ +# CONFIG_INPUT_TABLET is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INPUT_TOUCHSCREEN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INPUT_TOUCHSCREEN new file mode 100644 index 000000000000..e74a7dfb04e8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_INPUT_TOUCHSCREEN @@ -0,0 +1 @@ +# CONFIG_INPUT_TOUCHSCREEN is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IOMMUFD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IOMMUFD new file mode 100644 index 000000000000..e8af97f06094 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IOMMUFD @@ -0,0 +1 @@ +# CONFIG_IOMMUFD is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IOMMU_IO_PGTABLE_ARMV7S b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IOMMU_IO_PGTABLE_ARMV7S new file mode 100644 index 000000000000..9f8a9a2ca806 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IOMMU_IO_PGTABLE_ARMV7S @@ -0,0 +1 @@ +# CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IOMMU_IO_PGTABLE_DART b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IOMMU_IO_PGTABLE_DART new file mode 100644 index 000000000000..2a03ed16f53b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IOMMU_IO_PGTABLE_DART @@ -0,0 +1 @@ +# CONFIG_IOMMU_IO_PGTABLE_DART is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST new file mode 100644 index 000000000000..b6f514a3f1ac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST @@ -0,0 +1 @@ +# CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IPMB_DEVICE_INTERFACE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IPMB_DEVICE_INTERFACE new file mode 100644 index 000000000000..ec241f3218b3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IPMB_DEVICE_INTERFACE @@ -0,0 +1 @@ +# CONFIG_IPMB_DEVICE_INTERFACE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IPMI_IPMB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IPMI_IPMB new file mode 100644 index 000000000000..a76add6debef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IPMI_IPMB @@ -0,0 +1 @@ +# CONFIG_IPMI_IPMB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IRQCHIP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IRQCHIP new file mode 100644 index 000000000000..fd0d005fc854 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IRQCHIP @@ -0,0 +1 @@ +CONFIG_IRQCHIP=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IRQ_BYPASS_MANAGER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IRQ_BYPASS_MANAGER new file mode 100644 index 000000000000..b10c64ccdb3a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IRQ_BYPASS_MANAGER @@ -0,0 +1 @@ +CONFIG_IRQ_BYPASS_MANAGER=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS new file mode 100644 index 000000000000..76712fca3972 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS @@ -0,0 +1 @@ +CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ISDN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ISDN new file mode 100644 index 000000000000..7cf059484bb3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ISDN @@ -0,0 +1 @@ +# CONFIG_ISDN is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ISL29003 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ISL29003 new file mode 100644 index 000000000000..266725319048 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ISL29003 @@ -0,0 +1 @@ +# CONFIG_ISL29003 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ISL29020 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ISL29020 new file mode 100644 index 000000000000..abf4d72ad771 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ISL29020 @@ -0,0 +1 @@ +# CONFIG_ISL29020 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_K3_DMA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_K3_DMA new file mode 100644 index 000000000000..b698e7e5e1e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_K3_DMA @@ -0,0 +1 @@ +# CONFIG_K3_DMA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KARMA_PARTITION b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KARMA_PARTITION new file mode 100644 index 000000000000..ea284b410a90 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KARMA_PARTITION @@ -0,0 +1 @@ +# CONFIG_KARMA_PARTITION is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KERNEL_MODE_NEON b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KERNEL_MODE_NEON new file mode 100644 index 000000000000..9f6b7232f00f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KERNEL_MODE_NEON @@ -0,0 +1 @@ +CONFIG_KERNEL_MODE_NEON=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_BCM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_BCM new file mode 100644 index 000000000000..d904364b517f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_BCM @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_BCM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_CAP11XX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_CAP11XX new file mode 100644 index 000000000000..add2537e2901 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_CAP11XX @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_CAP11XX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_GPIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_GPIO new file mode 100644 index 000000000000..46a9f62efb08 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_GPIO @@ -0,0 +1 @@ +CONFIG_KEYBOARD_GPIO=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_OMAP4 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_OMAP4 new file mode 100644 index 000000000000..e3ce3f315af2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_OMAP4 @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_OMAP4 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_PINEPHONE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_PINEPHONE new file mode 100644 index 000000000000..714e0bec40bc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_PINEPHONE @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_PINEPHONE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KS7010 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KS7010 new file mode 100644 index 000000000000..169ffed69d68 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KS7010 @@ -0,0 +1 @@ +# CONFIG_KS7010 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_AAT1290 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_AAT1290 new file mode 100644 index 000000000000..ab2a5b89a146 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_AAT1290 @@ -0,0 +1 @@ +# CONFIG_LEDS_AAT1290 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_AN30259A b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_AN30259A new file mode 100644 index 000000000000..7732da37590d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_AN30259A @@ -0,0 +1 @@ +# CONFIG_LEDS_AN30259A is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_AS3645A b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_AS3645A new file mode 100644 index 000000000000..b5ebf4ccf341 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_AS3645A @@ -0,0 +1 @@ +# CONFIG_LEDS_AS3645A is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_AW2013 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_AW2013 new file mode 100644 index 000000000000..9a84d9f6bb86 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_AW2013 @@ -0,0 +1 @@ +# CONFIG_LEDS_AW2013 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_BCM6328 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_BCM6328 new file mode 100644 index 000000000000..b4c1a1e368a8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_BCM6328 @@ -0,0 +1 @@ +# CONFIG_LEDS_BCM6328 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_BCM6358 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_BCM6358 new file mode 100644 index 000000000000..98e396e48929 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_BCM6358 @@ -0,0 +1 @@ +# CONFIG_LEDS_BCM6358 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_CLASS_FLASH b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_CLASS_FLASH new file mode 100644 index 000000000000..fef225ff2d53 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_CLASS_FLASH @@ -0,0 +1 @@ +CONFIG_LEDS_CLASS_FLASH=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_CR0014114 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_CR0014114 new file mode 100644 index 000000000000..41b6f0596914 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_CR0014114 @@ -0,0 +1 @@ +# CONFIG_LEDS_CR0014114 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_EL15203000 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_EL15203000 new file mode 100644 index 000000000000..034500718170 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_EL15203000 @@ -0,0 +1 @@ +# CONFIG_LEDS_EL15203000 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_IS31FL32XX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_IS31FL32XX new file mode 100644 index 000000000000..bc726f797e2c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_IS31FL32XX @@ -0,0 +1 @@ +# CONFIG_LEDS_IS31FL32XX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_KTD2692 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_KTD2692 new file mode 100644 index 000000000000..69f58992613b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_KTD2692 @@ -0,0 +1 @@ +# CONFIG_LEDS_KTD2692 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LM3601X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LM3601X new file mode 100644 index 000000000000..86560369f43d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LM3601X @@ -0,0 +1 @@ +# CONFIG_LEDS_LM3601X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LM3692X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LM3692X new file mode 100644 index 000000000000..00fbe48a7070 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LM3692X @@ -0,0 +1 @@ +# CONFIG_LEDS_LM3692X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LM3697 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LM3697 new file mode 100644 index 000000000000..fe1bb37742b0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LM3697 @@ -0,0 +1 @@ +# CONFIG_LEDS_LM3697 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LP55XX_COMMON b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LP55XX_COMMON new file mode 100644 index 000000000000..a90d7885b2a3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LP55XX_COMMON @@ -0,0 +1 @@ +# CONFIG_LEDS_LP55XX_COMMON is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LP8860 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LP8860 new file mode 100644 index 000000000000..cc362679de0d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LP8860 @@ -0,0 +1 @@ +# CONFIG_LEDS_LP8860 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LT3593 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LT3593 new file mode 100644 index 000000000000..50bf0613fe3c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_LT3593 @@ -0,0 +1 @@ +CONFIG_LEDS_LT3593=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_REGULATOR b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_REGULATOR new file mode 100644 index 000000000000..b190ec3a8404 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_REGULATOR @@ -0,0 +1 @@ +# CONFIG_LEDS_REGULATOR is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_RT4505 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_RT4505 new file mode 100644 index 000000000000..04841c7d3df0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_RT4505 @@ -0,0 +1 @@ +# CONFIG_LEDS_RT4505 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_RT8515 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_RT8515 new file mode 100644 index 000000000000..e8f8a1a5aec4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_RT8515 @@ -0,0 +1 @@ +# CONFIG_LEDS_RT8515 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_SGM3140 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_SGM3140 new file mode 100644 index 000000000000..38c3598c8552 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_SGM3140 @@ -0,0 +1 @@ +# CONFIG_LEDS_SGM3140 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_SPI_BYTE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_SPI_BYTE new file mode 100644 index 000000000000..60b8283165f5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_SPI_BYTE @@ -0,0 +1 @@ +# CONFIG_LEDS_SPI_BYTE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_SYSCON b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_SYSCON new file mode 100644 index 000000000000..cc80700c5399 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_SYSCON @@ -0,0 +1 @@ +# CONFIG_LEDS_SYSCON is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_TRIGGER_AUDIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_TRIGGER_AUDIO new file mode 100644 index 000000000000..6004ddbe5727 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_TRIGGER_AUDIO @@ -0,0 +1 @@ +# CONFIG_LEDS_TRIGGER_AUDIO is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_TRIGGER_DISK b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_TRIGGER_DISK new file mode 100644 index 000000000000..5c1e5bef601f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LEDS_TRIGGER_DISK @@ -0,0 +1 @@ +# CONFIG_LEDS_TRIGGER_DISK is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LIBFDT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LIBFDT new file mode 100644 index 000000000000..ff6b4da46e59 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LIBFDT @@ -0,0 +1 @@ +CONFIG_LIBFDT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LIBNVDIMM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LIBNVDIMM new file mode 100644 index 000000000000..aeaaefec7061 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LIBNVDIMM @@ -0,0 +1 @@ +CONFIG_LIBNVDIMM=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LINEAR_RANGES b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LINEAR_RANGES new file mode 100644 index 000000000000..7b41203e91d9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LINEAR_RANGES @@ -0,0 +1 @@ +CONFIG_LINEAR_RANGES=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LITEX_LITEETH b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LITEX_LITEETH new file mode 100644 index 000000000000..39dafe4de685 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LITEX_LITEETH @@ -0,0 +1 @@ +# CONFIG_LITEX_LITEETH is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LITEX_SOC_CONTROLLER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LITEX_SOC_CONTROLLER new file mode 100644 index 000000000000..0070e1494bae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LITEX_SOC_CONTROLLER @@ -0,0 +1 @@ +# CONFIG_LITEX_SOC_CONTROLLER is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LPC_ICH b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LPC_ICH new file mode 100644 index 000000000000..117d89d785c4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LPC_ICH @@ -0,0 +1 @@ +# CONFIG_LPC_ICH is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LPC_SCH b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LPC_SCH new file mode 100644 index 000000000000..13ca80491f48 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LPC_SCH @@ -0,0 +1 @@ +# CONFIG_LPC_SCH is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LTE_GDM724X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LTE_GDM724X new file mode 100644 index 000000000000..ae10a4a61f7c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_LTE_GDM724X @@ -0,0 +1 @@ +# CONFIG_LTE_GDM724X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MAC_PARTITION b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MAC_PARTITION new file mode 100644 index 000000000000..b6cee505757c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MAC_PARTITION @@ -0,0 +1 @@ +# CONFIG_MAC_PARTITION is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MAILBOX_TEST b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MAILBOX_TEST new file mode 100644 index 000000000000..6b2b9888d55c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MAILBOX_TEST @@ -0,0 +1 @@ +# CONFIG_MAILBOX_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MARVELL_CN10K_DDR_PMU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MARVELL_CN10K_DDR_PMU new file mode 100644 index 000000000000..e9beb55d69ad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MARVELL_CN10K_DDR_PMU @@ -0,0 +1 @@ +# CONFIG_MARVELL_CN10K_DDR_PMU is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MARVELL_CN10K_TAD_PMU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MARVELL_CN10K_TAD_PMU new file mode 100644 index 000000000000..2341fc1eb2b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MARVELL_CN10K_TAD_PMU @@ -0,0 +1 @@ +# CONFIG_MARVELL_CN10K_TAD_PMU is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MARVELL_GTI_WDT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MARVELL_GTI_WDT new file mode 100644 index 000000000000..deac0785918c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MARVELL_GTI_WDT @@ -0,0 +1 @@ +CONFIG_MARVELL_GTI_WDT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_BUS_MUX_GPIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_BUS_MUX_GPIO new file mode 100644 index 000000000000..27b93466f9fd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_BUS_MUX_GPIO @@ -0,0 +1 @@ +# CONFIG_MDIO_BUS_MUX_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_BUS_MUX_MMIOREG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_BUS_MUX_MMIOREG new file mode 100644 index 000000000000..4d738fd985df --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_BUS_MUX_MMIOREG @@ -0,0 +1 @@ +# CONFIG_MDIO_BUS_MUX_MMIOREG is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_BUS_MUX_MULTIPLEXER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_BUS_MUX_MULTIPLEXER new file mode 100644 index 000000000000..c2786fc089f0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_BUS_MUX_MULTIPLEXER @@ -0,0 +1 @@ +# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_GPIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_GPIO new file mode 100644 index 000000000000..a317fa125fc7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_GPIO @@ -0,0 +1 @@ +CONFIG_MDIO_GPIO=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_HISI_FEMAC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_HISI_FEMAC new file mode 100644 index 000000000000..6e4c16cf9d0d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_HISI_FEMAC @@ -0,0 +1 @@ +CONFIG_MDIO_HISI_FEMAC=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_IPQ4019 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_IPQ4019 new file mode 100644 index 000000000000..7bdf4704b651 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_IPQ4019 @@ -0,0 +1 @@ +# CONFIG_MDIO_IPQ4019 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_IPQ8064 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_IPQ8064 new file mode 100644 index 000000000000..3104618fc456 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_IPQ8064 @@ -0,0 +1 @@ +# CONFIG_MDIO_IPQ8064 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_MSCC_MIIM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_MSCC_MIIM new file mode 100644 index 000000000000..7ed03900e08f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_MSCC_MIIM @@ -0,0 +1 @@ +CONFIG_MDIO_MSCC_MIIM=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_OCTEON b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_OCTEON new file mode 100644 index 000000000000..28cd01876971 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_OCTEON @@ -0,0 +1 @@ +CONFIG_MDIO_OCTEON=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_XGENE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_XGENE new file mode 100644 index 000000000000..8a6b53a5b709 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MDIO_XGENE @@ -0,0 +1 @@ +CONFIG_MDIO_XGENE=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MEDIA_CEC_SUPPORT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MEDIA_CEC_SUPPORT new file mode 100644 index 000000000000..99e81383fc8f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MEDIA_CEC_SUPPORT @@ -0,0 +1 @@ +# CONFIG_MEDIA_CEC_SUPPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MEDIA_SUPPORT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MEDIA_SUPPORT new file mode 100644 index 000000000000..0348bacd103f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MEDIA_SUPPORT @@ -0,0 +1 @@ +# CONFIG_MEDIA_SUPPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ACT8945A b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ACT8945A new file mode 100644 index 000000000000..d13164db622c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ACT8945A @@ -0,0 +1 @@ +# CONFIG_MFD_ACT8945A is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_AS3722 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_AS3722 new file mode 100644 index 000000000000..eeaa8ff75763 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_AS3722 @@ -0,0 +1 @@ +# CONFIG_MFD_AS3722 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ATMEL_FLEXCOM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ATMEL_FLEXCOM new file mode 100644 index 000000000000..e41ced06262a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ATMEL_FLEXCOM @@ -0,0 +1 @@ +# CONFIG_MFD_ATMEL_FLEXCOM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ATMEL_HLCDC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ATMEL_HLCDC new file mode 100644 index 000000000000..cfd9b42ae1bf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ATMEL_HLCDC @@ -0,0 +1 @@ +# CONFIG_MFD_ATMEL_HLCDC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_CORE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_CORE new file mode 100644 index 000000000000..c8855e8a0a4b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_CORE @@ -0,0 +1 @@ +CONFIG_MFD_CORE=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_CPCAP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_CPCAP new file mode 100644 index 000000000000..0f04081818e0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_CPCAP @@ -0,0 +1 @@ +# CONFIG_MFD_CPCAP is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_GATEWORKS_GSC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_GATEWORKS_GSC new file mode 100644 index 000000000000..19ea1a015197 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_GATEWORKS_GSC @@ -0,0 +1 @@ +# CONFIG_MFD_GATEWORKS_GSC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_HI6421_PMIC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_HI6421_PMIC new file mode 100644 index 000000000000..5b380187f337 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_HI6421_PMIC @@ -0,0 +1 @@ +# CONFIG_MFD_HI6421_PMIC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_HI655X_PMIC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_HI655X_PMIC new file mode 100644 index 000000000000..b45244e7282e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_HI655X_PMIC @@ -0,0 +1 @@ +# CONFIG_MFD_HI655X_PMIC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_LOCHNAGAR b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_LOCHNAGAR new file mode 100644 index 000000000000..400a681aba05 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_LOCHNAGAR @@ -0,0 +1 @@ +# CONFIG_MFD_LOCHNAGAR is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX5970 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX5970 new file mode 100644 index 000000000000..0c7e2967be08 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX5970 @@ -0,0 +1 @@ +# CONFIG_MFD_MAX5970 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX77620 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX77620 new file mode 100644 index 000000000000..a4fa3073f7a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX77620 @@ -0,0 +1 @@ +# CONFIG_MFD_MAX77620 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX77650 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX77650 new file mode 100644 index 000000000000..3ebe2fe0fd31 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX77650 @@ -0,0 +1 @@ +# CONFIG_MFD_MAX77650 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX77686 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX77686 new file mode 100644 index 000000000000..9cf9bc1ee4c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX77686 @@ -0,0 +1 @@ +# CONFIG_MFD_MAX77686 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX77714 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX77714 new file mode 100644 index 000000000000..3702f1749e66 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_MAX77714 @@ -0,0 +1 @@ +# CONFIG_MFD_MAX77714 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_NTXEC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_NTXEC new file mode 100644 index 000000000000..5f7ec3cee225 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_NTXEC @@ -0,0 +1 @@ +# CONFIG_MFD_NTXEC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_QCOM_PM8008 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_QCOM_PM8008 new file mode 100644 index 000000000000..090e632199b3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_QCOM_PM8008 @@ -0,0 +1 @@ +# CONFIG_MFD_QCOM_PM8008 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_QCOM_RPM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_QCOM_RPM new file mode 100644 index 000000000000..a5c3c02d4dd8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_QCOM_RPM @@ -0,0 +1 @@ +# CONFIG_MFD_QCOM_RPM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RK8XX_I2C b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RK8XX_I2C new file mode 100644 index 000000000000..ae6e69800ca5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RK8XX_I2C @@ -0,0 +1 @@ +# CONFIG_MFD_RK8XX_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RK8XX_SPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RK8XX_SPI new file mode 100644 index 000000000000..4f39f1a9cd6d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RK8XX_SPI @@ -0,0 +1 @@ +# CONFIG_MFD_RK8XX_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RN5T618 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RN5T618 new file mode 100644 index 000000000000..752630240df1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RN5T618 @@ -0,0 +1 @@ +# CONFIG_MFD_RN5T618 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ROHM_BD71828 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ROHM_BD71828 new file mode 100644 index 000000000000..e60c1237e5ee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ROHM_BD71828 @@ -0,0 +1 @@ +# CONFIG_MFD_ROHM_BD71828 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ROHM_BD718XX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ROHM_BD718XX new file mode 100644 index 000000000000..3132a5664b89 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ROHM_BD718XX @@ -0,0 +1 @@ +# CONFIG_MFD_ROHM_BD718XX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ROHM_BD957XMUF b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ROHM_BD957XMUF new file mode 100644 index 000000000000..f24e7b524dd9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_ROHM_BD957XMUF @@ -0,0 +1 @@ +# CONFIG_MFD_ROHM_BD957XMUF is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RSMU_I2C b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RSMU_I2C new file mode 100644 index 000000000000..ad3004be1be5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RSMU_I2C @@ -0,0 +1 @@ +# CONFIG_MFD_RSMU_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RSMU_SPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RSMU_SPI new file mode 100644 index 000000000000..4721dc83707b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_RSMU_SPI @@ -0,0 +1 @@ +# CONFIG_MFD_RSMU_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_SEC_CORE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_SEC_CORE new file mode 100644 index 000000000000..d6bc4de4d02b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_SEC_CORE @@ -0,0 +1 @@ +# CONFIG_MFD_SEC_CORE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_SM501 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_SM501 new file mode 100644 index 000000000000..1a0ec3f74af9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_SM501 @@ -0,0 +1 @@ +# CONFIG_MFD_SM501 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_STMFX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_STMFX new file mode 100644 index 000000000000..480542e98283 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_STMFX @@ -0,0 +1 @@ +# CONFIG_MFD_STMFX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_STMPE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_STMPE new file mode 100644 index 000000000000..a0f17335366b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_STMPE @@ -0,0 +1 @@ +# CONFIG_MFD_STMPE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_STPMIC1 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_STPMIC1 new file mode 100644 index 000000000000..d146574b6fdb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_STPMIC1 @@ -0,0 +1 @@ +# CONFIG_MFD_STPMIC1 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_SYSCON b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_SYSCON new file mode 100644 index 000000000000..9890ebdb9fa0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_SYSCON @@ -0,0 +1 @@ +CONFIG_MFD_SYSCON=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TC3589X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TC3589X new file mode 100644 index 000000000000..a11f656fc99b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TC3589X @@ -0,0 +1 @@ +# CONFIG_MFD_TC3589X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TI_LP87565 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TI_LP87565 new file mode 100644 index 000000000000..112b4154ba37 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TI_LP87565 @@ -0,0 +1 @@ +# CONFIG_MFD_TI_LP87565 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TPS65217 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TPS65217 new file mode 100644 index 000000000000..2b2ee453ffc2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TPS65217 @@ -0,0 +1 @@ +# CONFIG_MFD_TPS65217 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TPS65218 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TPS65218 new file mode 100644 index 000000000000..ebc4bf3c0052 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TPS65218 @@ -0,0 +1 @@ +# CONFIG_MFD_TPS65218 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TPS65219 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TPS65219 new file mode 100644 index 000000000000..dbd6423b3d6a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_TPS65219 @@ -0,0 +1 @@ +# CONFIG_MFD_TPS65219 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_VEXPRESS_SYSREG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_VEXPRESS_SYSREG new file mode 100644 index 000000000000..0b40c91a58ec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_VEXPRESS_SYSREG @@ -0,0 +1 @@ +# CONFIG_MFD_VEXPRESS_SYSREG is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_VIPERBOARD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_VIPERBOARD new file mode 100644 index 000000000000..1d4d00579b90 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_VIPERBOARD @@ -0,0 +1 @@ +# CONFIG_MFD_VIPERBOARD is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_VX855 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_VX855 new file mode 100644 index 000000000000..8c01f8ec191d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_VX855 @@ -0,0 +1 @@ +# CONFIG_MFD_VX855 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MICREL_KS8995MA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MICREL_KS8995MA new file mode 100644 index 000000000000..cba7a233f9e1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MICREL_KS8995MA @@ -0,0 +1 @@ +CONFIG_MICREL_KS8995MA=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MINIX_SUBPARTITION b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MINIX_SUBPARTITION new file mode 100644 index 000000000000..341becea35ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MINIX_SUBPARTITION @@ -0,0 +1 @@ +# CONFIG_MINIX_SUBPARTITION is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MISC_RTSX_PCI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MISC_RTSX_PCI new file mode 100644 index 000000000000..eeec3f096a3a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MISC_RTSX_PCI @@ -0,0 +1 @@ +# CONFIG_MISC_RTSX_PCI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MISC_RTSX_USB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MISC_RTSX_USB new file mode 100644 index 000000000000..f41dad5dd03c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MISC_RTSX_USB @@ -0,0 +1 @@ +# CONFIG_MISC_RTSX_USB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MLXBF_GIGE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MLXBF_GIGE new file mode 100644 index 000000000000..3b96f758d9f2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MLXBF_GIGE @@ -0,0 +1 @@ +# CONFIG_MLXBF_GIGE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_ARMMMCI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_ARMMMCI new file mode 100644 index 000000000000..0b49cccef133 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_ARMMMCI @@ -0,0 +1 @@ +CONFIG_MMC_ARMMMCI=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW new file mode 100644 index 000000000000..163b7bed55b1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW @@ -0,0 +1 @@ +CONFIG_MMC_DW=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_BLUEFIELD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_BLUEFIELD new file mode 100644 index 000000000000..c18a0e09be3e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_BLUEFIELD @@ -0,0 +1 @@ +CONFIG_MMC_DW_BLUEFIELD=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_EXYNOS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_EXYNOS new file mode 100644 index 000000000000..36913703dae6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_EXYNOS @@ -0,0 +1 @@ +# CONFIG_MMC_DW_EXYNOS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_HI3798CV200 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_HI3798CV200 new file mode 100644 index 000000000000..27bb58f91d19 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_HI3798CV200 @@ -0,0 +1 @@ +# CONFIG_MMC_DW_HI3798CV200 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_K3 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_K3 new file mode 100644 index 000000000000..9ab75ac118e4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_K3 @@ -0,0 +1 @@ +# CONFIG_MMC_DW_K3 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_PCI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_PCI new file mode 100644 index 000000000000..29336885e28a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_PCI @@ -0,0 +1 @@ +# CONFIG_MMC_DW_PCI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_PLTFM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_PLTFM new file mode 100644 index 000000000000..c9318bc4fe47 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_DW_PLTFM @@ -0,0 +1 @@ +CONFIG_MMC_DW_PLTFM=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_MTK b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_MTK new file mode 100644 index 000000000000..8d1b25c489e4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_MTK @@ -0,0 +1 @@ +CONFIG_MMC_MTK=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_CADENCE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_CADENCE new file mode 100644 index 000000000000..db3e24055f68 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_CADENCE @@ -0,0 +1 @@ +# CONFIG_MMC_SDHCI_CADENCE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_MILBEAUT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_MILBEAUT new file mode 100644 index 000000000000..c67ef179b9be --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_MILBEAUT @@ -0,0 +1 @@ +# CONFIG_MMC_SDHCI_MILBEAUT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_MSM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_MSM new file mode 100644 index 000000000000..e9997a15fb0f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_MSM @@ -0,0 +1 @@ +# CONFIG_MMC_SDHCI_MSM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_OF_ARASAN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_OF_ARASAN new file mode 100644 index 000000000000..c8021b0083e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_OF_ARASAN @@ -0,0 +1 @@ +# CONFIG_MMC_SDHCI_OF_ARASAN is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_OF_AT91 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_OF_AT91 new file mode 100644 index 000000000000..933195ce7f1b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_OF_AT91 @@ -0,0 +1 @@ +# CONFIG_MMC_SDHCI_OF_AT91 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_OF_DWCMSHC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_OF_DWCMSHC new file mode 100644 index 000000000000..a0ac36500753 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_OF_DWCMSHC @@ -0,0 +1 @@ +# CONFIG_MMC_SDHCI_OF_DWCMSHC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_STM32_SDMMC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_STM32_SDMMC new file mode 100644 index 000000000000..a4209fe9ce12 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_STM32_SDMMC @@ -0,0 +1 @@ +CONFIG_MMC_STM32_SDMMC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_TOSHIBA_PCI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_TOSHIBA_PCI new file mode 100644 index 000000000000..9372cd4fe432 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_TOSHIBA_PCI @@ -0,0 +1 @@ +CONFIG_MMC_TOSHIBA_PCI=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_APPLETOUCH b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_APPLETOUCH new file mode 100644 index 000000000000..57f14f837286 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_APPLETOUCH @@ -0,0 +1 @@ +# CONFIG_MOUSE_APPLETOUCH is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_BCM5974 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_BCM5974 new file mode 100644 index 000000000000..6b4472cc7be7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_BCM5974 @@ -0,0 +1 @@ +# CONFIG_MOUSE_BCM5974 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_CYAPA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_CYAPA new file mode 100644 index 000000000000..e577ccf187ee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_CYAPA @@ -0,0 +1 @@ +# CONFIG_MOUSE_CYAPA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_ELAN_I2C_SMBUS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_ELAN_I2C_SMBUS new file mode 100644 index 000000000000..14ce73f7565b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_ELAN_I2C_SMBUS @@ -0,0 +1 @@ +CONFIG_MOUSE_ELAN_I2C_SMBUS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_SERIAL b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_SERIAL new file mode 100644 index 000000000000..9e443d06f91c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_SERIAL @@ -0,0 +1 @@ +# CONFIG_MOUSE_SERIAL is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_VSXXXAA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_VSXXXAA new file mode 100644 index 000000000000..8e411d15c47e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_VSXXXAA @@ -0,0 +1 @@ +# CONFIG_MOUSE_VSXXXAA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOXTET b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOXTET new file mode 100644 index 000000000000..d141565b64b1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOXTET @@ -0,0 +1 @@ +# CONFIG_MOXTET is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_AFS_PARTS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_AFS_PARTS new file mode 100644 index 000000000000..0abf6cc8eaad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_AFS_PARTS @@ -0,0 +1 @@ +# CONFIG_MTD_AFS_PARTS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI new file mode 100644 index 000000000000..bc8c1b815712 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI @@ -0,0 +1 @@ +CONFIG_MTD_CFI=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_ADV_OPTIONS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_ADV_OPTIONS new file mode 100644 index 000000000000..29d7bb0b3251 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_ADV_OPTIONS @@ -0,0 +1 @@ +# CONFIG_MTD_CFI_ADV_OPTIONS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_AMDSTD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_AMDSTD new file mode 100644 index 000000000000..cbc688f2f218 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_AMDSTD @@ -0,0 +1 @@ +CONFIG_MTD_CFI_AMDSTD=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_INTELEXT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_INTELEXT new file mode 100644 index 000000000000..01c8ff426c97 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_INTELEXT @@ -0,0 +1 @@ +CONFIG_MTD_CFI_INTELEXT=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_STAA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_STAA new file mode 100644 index 000000000000..67ee4aed7b14 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_STAA @@ -0,0 +1 @@ +CONFIG_MTD_CFI_STAA=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_UTIL b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_UTIL new file mode 100644 index 000000000000..7610710766f3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_CFI_UTIL @@ -0,0 +1 @@ +CONFIG_MTD_CFI_UTIL=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_GEN_PROBE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_GEN_PROBE new file mode 100644 index 000000000000..17a50bd6ddf9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_GEN_PROBE @@ -0,0 +1 @@ +CONFIG_MTD_GEN_PROBE=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_OF_PARTS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_OF_PARTS new file mode 100644 index 000000000000..bddcc3b5e83e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_OF_PARTS @@ -0,0 +1 @@ +CONFIG_MTD_OF_PARTS=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_PHYSMAP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_PHYSMAP new file mode 100644 index 000000000000..8b99e3061f88 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_PHYSMAP @@ -0,0 +1 @@ +CONFIG_MTD_PHYSMAP=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_PHYSMAP_COMPAT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_PHYSMAP_COMPAT new file mode 100644 index 000000000000..bbb6119b5cd6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_PHYSMAP_COMPAT @@ -0,0 +1 @@ +# CONFIG_MTD_PHYSMAP_COMPAT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_PHYSMAP_OF b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_PHYSMAP_OF new file mode 100644 index 000000000000..20f59ebc7e4d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_PHYSMAP_OF @@ -0,0 +1 @@ +# CONFIG_MTD_PHYSMAP_OF is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MV_XOR_V2 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MV_XOR_V2 new file mode 100644 index 000000000000..a6e590eb8884 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MV_XOR_V2 @@ -0,0 +1 @@ +# CONFIG_MV_XOR_V2 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MYRI10GE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MYRI10GE new file mode 100644 index 000000000000..231d963dbb19 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MYRI10GE @@ -0,0 +1 @@ +# CONFIG_MYRI10GE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ND_BTT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ND_BTT new file mode 100644 index 000000000000..d4e20e237e30 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ND_BTT @@ -0,0 +1 @@ +CONFIG_ND_BTT=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ND_PFN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ND_PFN new file mode 100644 index 000000000000..8dd69e19b404 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ND_PFN @@ -0,0 +1 @@ +CONFIG_ND_PFN=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP new file mode 100644 index 000000000000..ad5ff3969571 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP @@ -0,0 +1 @@ +CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_AMD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_AMD new file mode 100644 index 000000000000..f67eb8fad709 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_AMD @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_AMD=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_BROCADE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_BROCADE new file mode 100644 index 000000000000..fed5e48a410a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_BROCADE @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_BROCADE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_CISCO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_CISCO new file mode 100644 index 000000000000..7b4bdaa25a95 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_CISCO @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_CISCO is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_DEC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_DEC new file mode 100644 index 000000000000..7c3a697fb78d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_DEC @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_DEC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_EMULEX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_EMULEX new file mode 100644 index 000000000000..e48620d8d36d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_EMULEX @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_EMULEX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_QUALCOMM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_QUALCOMM new file mode 100644 index 000000000000..46be71a9047e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_VENDOR_QUALCOMM @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_QUALCOMM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_XGENE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_XGENE new file mode 100644 index 000000000000..57527a6168ba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_XGENE @@ -0,0 +1 @@ +CONFIG_NET_XGENE=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_XGENE_V2 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_XGENE_V2 new file mode 100644 index 000000000000..7d5cbcdbf8b5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NET_XGENE_V2 @@ -0,0 +1 @@ +CONFIG_NET_XGENE_V2=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NOZOMI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NOZOMI new file mode 100644 index 000000000000..35ef1afd52b6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NOZOMI @@ -0,0 +1 @@ +# CONFIG_NOZOMI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVHE_EL2_DEBUG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVHE_EL2_DEBUG new file mode 100644 index 000000000000..4b6b822e4808 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVHE_EL2_DEBUG @@ -0,0 +1 @@ +# CONFIG_NVHE_EL2_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVIDIA_CARMEL_CNP_ERRATUM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVIDIA_CARMEL_CNP_ERRATUM new file mode 100644 index 000000000000..36cdd53cf483 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVIDIA_CARMEL_CNP_ERRATUM @@ -0,0 +1 @@ +CONFIG_NVIDIA_CARMEL_CNP_ERRATUM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVMEM_QCOM_QFPROM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVMEM_QCOM_QFPROM new file mode 100644 index 000000000000..fbed6bf6b9a2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVMEM_QCOM_QFPROM @@ -0,0 +1 @@ +# CONFIG_NVMEM_QCOM_QFPROM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVMEM_QCOM_SEC_QFPROM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVMEM_QCOM_SEC_QFPROM new file mode 100644 index 000000000000..2844f81892cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVMEM_QCOM_SEC_QFPROM @@ -0,0 +1 @@ +# CONFIG_NVMEM_QCOM_SEC_QFPROM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVMEM_REBOOT_MODE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVMEM_REBOOT_MODE new file mode 100644 index 000000000000..44f3649815ba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVMEM_REBOOT_MODE @@ -0,0 +1 @@ +# CONFIG_NVMEM_REBOOT_MODE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVMEM_U_BOOT_ENV b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVMEM_U_BOOT_ENV new file mode 100644 index 000000000000..8db0f4878b02 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NVMEM_U_BOOT_ENV @@ -0,0 +1 @@ +# CONFIG_NVMEM_U_BOOT_ENV is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF new file mode 100644 index 000000000000..b7345dd59430 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF @@ -0,0 +1 @@ +CONFIG_OF=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_ADDRESS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_ADDRESS new file mode 100644 index 000000000000..1c5bd9918b59 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_ADDRESS @@ -0,0 +1 @@ +CONFIG_OF_ADDRESS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_DYNAMIC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_DYNAMIC new file mode 100644 index 000000000000..b5c03535f542 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_DYNAMIC @@ -0,0 +1 @@ +CONFIG_OF_DYNAMIC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_EARLY_FLATTREE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_EARLY_FLATTREE new file mode 100644 index 000000000000..f71bca86c30d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_EARLY_FLATTREE @@ -0,0 +1 @@ +CONFIG_OF_EARLY_FLATTREE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_FLATTREE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_FLATTREE new file mode 100644 index 000000000000..5738a15c0cac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_FLATTREE @@ -0,0 +1 @@ +CONFIG_OF_FLATTREE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_GPIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_GPIO new file mode 100644 index 000000000000..13fbd11f9ab4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_GPIO @@ -0,0 +1 @@ +CONFIG_OF_GPIO=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_IOMMU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_IOMMU new file mode 100644 index 000000000000..994e558eded0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_IOMMU @@ -0,0 +1 @@ +CONFIG_OF_IOMMU=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_IRQ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_IRQ new file mode 100644 index 000000000000..aa09892b46e4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_IRQ @@ -0,0 +1 @@ +CONFIG_OF_IRQ=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_KOBJ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_KOBJ new file mode 100644 index 000000000000..e0923313484e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_KOBJ @@ -0,0 +1 @@ +CONFIG_OF_KOBJ=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_MDIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_MDIO new file mode 100644 index 000000000000..cbdf2ee2f6c3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_MDIO @@ -0,0 +1 @@ +CONFIG_OF_MDIO=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_NUMA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_NUMA new file mode 100644 index 000000000000..288c3c8f1a9e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_NUMA @@ -0,0 +1 @@ +CONFIG_OF_NUMA=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_OVERLAY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_OVERLAY new file mode 100644 index 000000000000..30ae0afc56dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_OVERLAY @@ -0,0 +1 @@ +CONFIG_OF_OVERLAY=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_PMEM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_PMEM new file mode 100644 index 000000000000..71309bbab176 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_PMEM @@ -0,0 +1 @@ +CONFIG_OF_PMEM=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_RESERVED_MEM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_RESERVED_MEM new file mode 100644 index 000000000000..b826a1f71630 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_RESERVED_MEM @@ -0,0 +1 @@ +CONFIG_OF_RESERVED_MEM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_RESOLVE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_RESOLVE new file mode 100644 index 000000000000..2b68a809d7b2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_RESOLVE @@ -0,0 +1 @@ +CONFIG_OF_RESOLVE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_UNITTEST b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_UNITTEST new file mode 100644 index 000000000000..f9773f73dec6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OF_UNITTEST @@ -0,0 +1 @@ +# CONFIG_OF_UNITTEST is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OPEN_DICE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OPEN_DICE new file mode 100644 index 000000000000..65a55add3c12 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OPEN_DICE @@ -0,0 +1 @@ +# CONFIG_OPEN_DICE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OPTEE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OPTEE new file mode 100644 index 000000000000..1f66abf9737b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OPTEE @@ -0,0 +1 @@ +# CONFIG_OPTEE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OSF_PARTITION b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OSF_PARTITION new file mode 100644 index 000000000000..50b3cb64eff6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_OSF_PARTITION @@ -0,0 +1 @@ +# CONFIG_OSF_PARTITION is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PARPORT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PARPORT new file mode 100644 index 000000000000..9dd8f33af36e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PARPORT @@ -0,0 +1 @@ +# CONFIG_PARPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PARTITION_PERCPU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PARTITION_PERCPU new file mode 100644 index 000000000000..7cc3f6141697 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PARTITION_PERCPU @@ -0,0 +1 @@ +CONFIG_PARTITION_PERCPU=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PATA_OF_PLATFORM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PATA_OF_PLATFORM new file mode 100644 index 000000000000..4df88fc88969 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PATA_OF_PLATFORM @@ -0,0 +1 @@ +# CONFIG_PATA_OF_PLATFORM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_AL b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_AL new file mode 100644 index 000000000000..4cfae7f6a2e8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_AL @@ -0,0 +1 @@ +# CONFIG_PCIE_AL is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_ALTERA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_ALTERA new file mode 100644 index 000000000000..98cb6dd64d36 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_ALTERA @@ -0,0 +1 @@ +# CONFIG_PCIE_ALTERA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_CADENCE_PLAT_HOST b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_CADENCE_PLAT_HOST new file mode 100644 index 000000000000..23a74e2eb0f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_CADENCE_PLAT_HOST @@ -0,0 +1 @@ +# CONFIG_PCIE_CADENCE_PLAT_HOST is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_DW b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_DW new file mode 100644 index 000000000000..8c266b96d02f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_DW @@ -0,0 +1 @@ +CONFIG_PCIE_DW=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_DW_HOST b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_DW_HOST new file mode 100644 index 000000000000..6aecdd9c1e44 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_DW_HOST @@ -0,0 +1 @@ +CONFIG_PCIE_DW_HOST=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_HISI_ERR b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_HISI_ERR new file mode 100644 index 000000000000..be5d9b1ba76a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_HISI_ERR @@ -0,0 +1 @@ +# CONFIG_PCIE_HISI_ERR is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_HISI_STB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_HISI_STB new file mode 100644 index 000000000000..6504604cc4b2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_HISI_STB @@ -0,0 +1 @@ +# CONFIG_PCIE_HISI_STB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_KIRIN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_KIRIN new file mode 100644 index 000000000000..18352d4b3f7e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_KIRIN @@ -0,0 +1 @@ +# CONFIG_PCIE_KIRIN is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_MICROCHIP_HOST b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_MICROCHIP_HOST new file mode 100644 index 000000000000..e19d7651202a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_MICROCHIP_HOST @@ -0,0 +1 @@ +# CONFIG_PCIE_MICROCHIP_HOST is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_QCOM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_QCOM new file mode 100644 index 000000000000..363ef92c1fa4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_QCOM @@ -0,0 +1 @@ +# CONFIG_PCIE_QCOM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_XILINX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_XILINX new file mode 100644 index 000000000000..510085b1fb2f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCIE_XILINX @@ -0,0 +1 @@ +# CONFIG_PCIE_XILINX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_DOMAINS_GENERIC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_DOMAINS_GENERIC new file mode 100644 index 000000000000..eee89e99220a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_DOMAINS_GENERIC @@ -0,0 +1 @@ +CONFIG_PCI_DOMAINS_GENERIC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_DYNAMIC_OF_NODES b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_DYNAMIC_OF_NODES new file mode 100644 index 000000000000..1b8c0f8adcc2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_DYNAMIC_OF_NODES @@ -0,0 +1 @@ +# CONFIG_PCI_DYNAMIC_OF_NODES is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_ECAM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_ECAM new file mode 100644 index 000000000000..cf0869e9a1d6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_ECAM @@ -0,0 +1 @@ +CONFIG_PCI_ECAM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_FTPCI100 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_FTPCI100 new file mode 100644 index 000000000000..f9fe5b6ea0fa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_FTPCI100 @@ -0,0 +1 @@ +# CONFIG_PCI_FTPCI100 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_HISI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_HISI new file mode 100644 index 000000000000..468f3e431142 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_HISI @@ -0,0 +1 @@ +CONFIG_PCI_HISI=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_HOST_COMMON b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_HOST_COMMON new file mode 100644 index 000000000000..7269a6fb8a33 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_HOST_COMMON @@ -0,0 +1 @@ +CONFIG_PCI_HOST_COMMON=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_HOST_THUNDER_ECAM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_HOST_THUNDER_ECAM new file mode 100644 index 000000000000..a812c3179353 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_HOST_THUNDER_ECAM @@ -0,0 +1 @@ +CONFIG_PCI_HOST_THUNDER_ECAM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_HOST_THUNDER_PEM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_HOST_THUNDER_PEM new file mode 100644 index 000000000000..6c45e0930606 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_HOST_THUNDER_PEM @@ -0,0 +1 @@ +CONFIG_PCI_HOST_THUNDER_PEM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_J721E_HOST b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_J721E_HOST new file mode 100644 index 000000000000..11ea915c1b27 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_J721E_HOST @@ -0,0 +1 @@ +# CONFIG_PCI_J721E_HOST is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_SYSCALL b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_SYSCALL new file mode 100644 index 000000000000..fc3b9475ac10 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_SYSCALL @@ -0,0 +1 @@ +CONFIG_PCI_SYSCALL=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_XGENE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_XGENE new file mode 100644 index 000000000000..85317304c8ca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_XGENE @@ -0,0 +1 @@ +CONFIG_PCI_XGENE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_XGENE_MSI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_XGENE_MSI new file mode 100644 index 000000000000..ce07c62c8c42 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_XGENE_MSI @@ -0,0 +1 @@ +CONFIG_PCI_XGENE_MSI=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCNET32 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCNET32 new file mode 100644 index 000000000000..fc9f806ab79a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCNET32 @@ -0,0 +1 @@ +# CONFIG_PCNET32 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PDS_CORE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PDS_CORE new file mode 100644 index 000000000000..3ed21ba14b1f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PDS_CORE @@ -0,0 +1 @@ +# CONFIG_PDS_CORE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PERF_USE_VMALLOC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PERF_USE_VMALLOC new file mode 100644 index 000000000000..d166723c2229 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PERF_USE_VMALLOC @@ -0,0 +1 @@ +CONFIG_PERF_USE_VMALLOC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHYLIB_LEDS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHYLIB_LEDS new file mode 100644 index 000000000000..24479deda0da --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHYLIB_LEDS @@ -0,0 +1 @@ +CONFIG_PHYLIB_LEDS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_DPHY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_DPHY new file mode 100644 index 000000000000..7b45af693a12 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_DPHY @@ -0,0 +1 @@ +# CONFIG_PHY_CADENCE_DPHY is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_DPHY_RX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_DPHY_RX new file mode 100644 index 000000000000..3db4592f5bda --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_DPHY_RX @@ -0,0 +1 @@ +# CONFIG_PHY_CADENCE_DPHY_RX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_SALVO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_SALVO new file mode 100644 index 000000000000..bb0551527d7e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_SALVO @@ -0,0 +1 @@ +# CONFIG_PHY_CADENCE_SALVO is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_SIERRA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_SIERRA new file mode 100644 index 000000000000..672fe76013ab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_SIERRA @@ -0,0 +1 @@ +# CONFIG_PHY_CADENCE_SIERRA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_TORRENT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_TORRENT new file mode 100644 index 000000000000..070906386a71 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_CADENCE_TORRENT @@ -0,0 +1 @@ +# CONFIG_PHY_CADENCE_TORRENT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HI3660_USB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HI3660_USB new file mode 100644 index 000000000000..bdb7df17f7b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HI3660_USB @@ -0,0 +1 @@ +# CONFIG_PHY_HI3660_USB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HI3670_PCIE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HI3670_PCIE new file mode 100644 index 000000000000..f17343f12ff9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HI3670_PCIE @@ -0,0 +1 @@ +# CONFIG_PHY_HI3670_PCIE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HI3670_USB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HI3670_USB new file mode 100644 index 000000000000..9d80fd694224 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HI3670_USB @@ -0,0 +1 @@ +# CONFIG_PHY_HI3670_USB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HI6220_USB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HI6220_USB new file mode 100644 index 000000000000..462bb7c31671 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HI6220_USB @@ -0,0 +1 @@ +CONFIG_PHY_HI6220_USB=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HISI_INNO_USB2 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HISI_INNO_USB2 new file mode 100644 index 000000000000..fb2367b44f5b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HISI_INNO_USB2 @@ -0,0 +1 @@ +# CONFIG_PHY_HISI_INNO_USB2 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HISTB_COMBPHY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HISTB_COMBPHY new file mode 100644 index 000000000000..2d83cdd2368f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_HISTB_COMBPHY @@ -0,0 +1 @@ +# CONFIG_PHY_HISTB_COMBPHY is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_LAN966X_SERDES b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_LAN966X_SERDES new file mode 100644 index 000000000000..ae926852ada2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_LAN966X_SERDES @@ -0,0 +1 @@ +# CONFIG_PHY_LAN966X_SERDES is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_MAPPHONE_MDM6600 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_MAPPHONE_MDM6600 new file mode 100644 index 000000000000..e6ad9bd4c45a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_MAPPHONE_MDM6600 @@ -0,0 +1 @@ +# CONFIG_PHY_MAPPHONE_MDM6600 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_OCELOT_SERDES b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_OCELOT_SERDES new file mode 100644 index 000000000000..58eceea1dec1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_OCELOT_SERDES @@ -0,0 +1 @@ +# CONFIG_PHY_OCELOT_SERDES is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_APQ8064_SATA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_APQ8064_SATA new file mode 100644 index 000000000000..bb25b4634d97 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_APQ8064_SATA @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_APQ8064_SATA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_EDP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_EDP new file mode 100644 index 000000000000..51417886dd82 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_EDP @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_EDP is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_EUSB2_REPEATER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_EUSB2_REPEATER new file mode 100644 index 000000000000..beac208b79c7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_EUSB2_REPEATER @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_EUSB2_REPEATER is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_IPQ4019_USB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_IPQ4019_USB new file mode 100644 index 000000000000..feefe01f6694 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_IPQ4019_USB @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_IPQ4019_USB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_IPQ806X_SATA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_IPQ806X_SATA new file mode 100644 index 000000000000..360f739273e5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_IPQ806X_SATA @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_IPQ806X_SATA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_IPQ806X_USB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_IPQ806X_USB new file mode 100644 index 000000000000..de880c0fb3a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_IPQ806X_USB @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_IPQ806X_USB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_M31_USB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_M31_USB new file mode 100644 index 000000000000..fb97792a3dc8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_M31_USB @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_M31_USB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_PCIE2 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_PCIE2 new file mode 100644 index 000000000000..69c59e99a827 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_PCIE2 @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_PCIE2 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_QMP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_QMP new file mode 100644 index 000000000000..a0f4ab70489c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_QMP @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_QMP is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_QUSB2 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_QUSB2 new file mode 100644 index 000000000000..9c957ac9a79c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_QUSB2 @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_QUSB2 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_SGMII_ETH b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_SGMII_ETH new file mode 100644 index 000000000000..310805c0707c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_SGMII_ETH @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_SGMII_ETH is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_SNPS_EUSB2 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_SNPS_EUSB2 new file mode 100644 index 000000000000..ac6ac96c6479 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_SNPS_EUSB2 @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_SNPS_EUSB2 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_HS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_HS new file mode 100644 index 000000000000..ce9808509ffb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_HS @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_USB_HS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_HSIC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_HSIC new file mode 100644 index 000000000000..2a37d673c0b0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_HSIC @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_USB_HSIC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_HS_28NM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_HS_28NM new file mode 100644 index 000000000000..1f67fe251dd0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_HS_28NM @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_USB_HS_28NM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2 new file mode 100644 index 000000000000..7f13bc31554a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2 @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_SS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_SS new file mode 100644 index 000000000000..0213a42d7488 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_QCOM_USB_SS @@ -0,0 +1 @@ +# CONFIG_PHY_QCOM_USB_SS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_TUSB1210 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_TUSB1210 new file mode 100644 index 000000000000..39d68df37d77 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_TUSB1210 @@ -0,0 +1 @@ +# CONFIG_PHY_TUSB1210 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_XGENE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_XGENE new file mode 100644 index 000000000000..8d9f368f3d65 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_XGENE @@ -0,0 +1 @@ +CONFIG_PHY_XGENE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PI433 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PI433 new file mode 100644 index 000000000000..b275e1e6ae91 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PI433 @@ -0,0 +1 @@ +# CONFIG_PI433 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ5018 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ5018 new file mode 100644 index 000000000000..2cceeac3b451 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ5018 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_IPQ5018 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ5332 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ5332 new file mode 100644 index 000000000000..055666b1f0aa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ5332 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_IPQ5332 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ6018 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ6018 new file mode 100644 index 000000000000..2a4b31643e77 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ6018 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_IPQ6018 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ8074 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ8074 new file mode 100644 index 000000000000..3cb74ba4b2be --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ8074 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_IPQ8074 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ9574 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ9574 new file mode 100644 index 000000000000..2d6a81f4fe09 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_IPQ9574 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_IPQ9574 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_LPASS_LPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_LPASS_LPI new file mode 100644 index 000000000000..e76dab69c9e3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_LPASS_LPI @@ -0,0 +1 @@ +# CONFIG_PINCTRL_LPASS_LPI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MDM9607 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MDM9607 new file mode 100644 index 000000000000..e2d4a4343360 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MDM9607 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_MDM9607 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MICROCHIP_SGPIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MICROCHIP_SGPIO new file mode 100644 index 000000000000..e8878348968c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MICROCHIP_SGPIO @@ -0,0 +1 @@ +# CONFIG_PINCTRL_MICROCHIP_SGPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM new file mode 100644 index 000000000000..b042dc93b7b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM @@ -0,0 +1 @@ +CONFIG_PINCTRL_MSM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8916 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8916 new file mode 100644 index 000000000000..faea54e94d41 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8916 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_MSM8916 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8953 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8953 new file mode 100644 index 000000000000..fa88033ad5be --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8953 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_MSM8953 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8976 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8976 new file mode 100644 index 000000000000..05e860247d20 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8976 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_MSM8976 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8994 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8994 new file mode 100644 index 000000000000..977b1c3c4304 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8994 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_MSM8994 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8996 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8996 new file mode 100644 index 000000000000..dc49ba08c0f7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8996 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_MSM8996 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8998 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8998 new file mode 100644 index 000000000000..29cb6660e724 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_MSM8998 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_MSM8998 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_OCELOT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_OCELOT new file mode 100644 index 000000000000..60a20dba75d9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_OCELOT @@ -0,0 +1 @@ +# CONFIG_PINCTRL_OCELOT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QCM2290 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QCM2290 new file mode 100644 index 000000000000..9e5ce28f36d4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QCM2290 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_QCM2290 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QCOM_SSBI_PMIC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QCOM_SSBI_PMIC new file mode 100644 index 000000000000..bc00fae73a5a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QCOM_SSBI_PMIC @@ -0,0 +1 @@ +# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QCS404 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QCS404 new file mode 100644 index 000000000000..5ac31f57a446 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QCS404 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_QCS404 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QDF2XXX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QDF2XXX new file mode 100644 index 000000000000..e8dca820de4d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QDF2XXX @@ -0,0 +1 @@ +CONFIG_PINCTRL_QDF2XXX=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QDU1000 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QDU1000 new file mode 100644 index 000000000000..dccaf90c6dc6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_QDU1000 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_QDU1000 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SA8775P b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SA8775P new file mode 100644 index 000000000000..76ee372169ed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SA8775P @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SA8775P is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SC7180 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SC7180 new file mode 100644 index 000000000000..797cd04fb863 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SC7180 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SC7180 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SC7280 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SC7280 new file mode 100644 index 000000000000..13cf0a83eca4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SC7280 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SC7280 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SC8180X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SC8180X new file mode 100644 index 000000000000..764aa990ee8d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SC8180X @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SC8180X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SC8280XP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SC8280XP new file mode 100644 index 000000000000..323becbcf94c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SC8280XP @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SC8280XP is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SDM660 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SDM660 new file mode 100644 index 000000000000..e646188fa0a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SDM660 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SDM660 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SDM670 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SDM670 new file mode 100644 index 000000000000..f7825f006558 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SDM670 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SDM670 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SDM845 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SDM845 new file mode 100644 index 000000000000..425ac0b4d6e0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SDM845 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SDM845 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SDX75 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SDX75 new file mode 100644 index 000000000000..a147a5432090 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SDX75 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SDX75 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SINGLE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SINGLE new file mode 100644 index 000000000000..4142920af3e1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SINGLE @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SINGLE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM6115 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM6115 new file mode 100644 index 000000000000..d82fbc402285 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM6115 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SM6115 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM6125 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM6125 new file mode 100644 index 000000000000..c6e13a9ae9be --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM6125 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SM6125 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM6350 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM6350 new file mode 100644 index 000000000000..6d097b1a825b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM6350 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SM6350 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM6375 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM6375 new file mode 100644 index 000000000000..b0269d0465e3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM6375 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SM6375 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM7150 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM7150 new file mode 100644 index 000000000000..1780f3f3cebd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM7150 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SM7150 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8150 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8150 new file mode 100644 index 000000000000..b4bb081d9385 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8150 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SM8150 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8250 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8250 new file mode 100644 index 000000000000..490872420c0e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8250 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SM8250 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8350 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8350 new file mode 100644 index 000000000000..002dd3fbd3c2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8350 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SM8350 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8450 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8450 new file mode 100644 index 000000000000..601239bd2d1a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8450 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SM8450 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8550 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8550 new file mode 100644 index 000000000000..48641b755f6b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_SM8550 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SM8550 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_STMFX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_STMFX new file mode 100644 index 000000000000..dd3a3a31f2ff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_STMFX @@ -0,0 +1 @@ +# CONFIG_PINCTRL_STMFX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PL320_MBOX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PL320_MBOX new file mode 100644 index 000000000000..1cdbb24bbdee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PL320_MBOX @@ -0,0 +1 @@ +# CONFIG_PL320_MBOX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PL330_DMA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PL330_DMA new file mode 100644 index 000000000000..0e0863a5c0d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PL330_DMA @@ -0,0 +1 @@ +# CONFIG_PL330_DMA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PLATFORM_MHU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PLATFORM_MHU new file mode 100644 index 000000000000..b30b5761332b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PLATFORM_MHU @@ -0,0 +1 @@ +# CONFIG_PLATFORM_MHU is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PMIC_OPREGION b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PMIC_OPREGION new file mode 100644 index 000000000000..15102fe450ac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PMIC_OPREGION @@ -0,0 +1 @@ +# CONFIG_PMIC_OPREGION is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PM_GENERIC_DOMAINS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PM_GENERIC_DOMAINS new file mode 100644 index 000000000000..1b1ea25d197a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PM_GENERIC_DOMAINS @@ -0,0 +1 @@ +CONFIG_PM_GENERIC_DOMAINS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PM_GENERIC_DOMAINS_OF b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PM_GENERIC_DOMAINS_OF new file mode 100644 index 000000000000..e878dafb6029 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PM_GENERIC_DOMAINS_OF @@ -0,0 +1 @@ +CONFIG_PM_GENERIC_DOMAINS_OF=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PM_GENERIC_DOMAINS_SLEEP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PM_GENERIC_DOMAINS_SLEEP new file mode 100644 index 000000000000..279fc3496158 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PM_GENERIC_DOMAINS_SLEEP @@ -0,0 +1 @@ +CONFIG_PM_GENERIC_DOMAINS_SLEEP=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PNP_DEBUG_MESSAGES b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PNP_DEBUG_MESSAGES new file mode 100644 index 000000000000..227307038cff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PNP_DEBUG_MESSAGES @@ -0,0 +1 @@ +CONFIG_PNP_DEBUG_MESSAGES=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_BRCMSTB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_BRCMSTB new file mode 100644 index 000000000000..35f35e595ebb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_BRCMSTB @@ -0,0 +1 @@ +# CONFIG_POWER_RESET_BRCMSTB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_GPIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_GPIO new file mode 100644 index 000000000000..dac784d6c530 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_GPIO @@ -0,0 +1 @@ +CONFIG_POWER_RESET_GPIO=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_GPIO_RESTART b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_GPIO_RESTART new file mode 100644 index 000000000000..cb0a3228ba04 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_GPIO_RESTART @@ -0,0 +1 @@ +CONFIG_POWER_RESET_GPIO_RESTART=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_HISI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_HISI new file mode 100644 index 000000000000..c5516db40ad1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_HISI @@ -0,0 +1 @@ +CONFIG_POWER_RESET_HISI=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_LTC2952 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_LTC2952 new file mode 100644 index 000000000000..33d2e69bd0e1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_LTC2952 @@ -0,0 +1 @@ +# CONFIG_POWER_RESET_LTC2952 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_MSM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_MSM new file mode 100644 index 000000000000..b03b124a53ab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_MSM @@ -0,0 +1 @@ +# CONFIG_POWER_RESET_MSM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_REGULATOR b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_REGULATOR new file mode 100644 index 000000000000..712536cfc88f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_REGULATOR @@ -0,0 +1 @@ +# CONFIG_POWER_RESET_REGULATOR is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_RESTART b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_RESTART new file mode 100644 index 000000000000..5b8c6398dff6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_RESTART @@ -0,0 +1 @@ +CONFIG_POWER_RESET_RESTART=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_SYSCON b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_SYSCON new file mode 100644 index 000000000000..d0db157033ea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_SYSCON @@ -0,0 +1 @@ +CONFIG_POWER_RESET_SYSCON=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_SYSCON_POWEROFF b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_SYSCON_POWEROFF new file mode 100644 index 000000000000..72673e06d02f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_SYSCON_POWEROFF @@ -0,0 +1 @@ +# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_VEXPRESS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_VEXPRESS new file mode 100644 index 000000000000..6b285d5c913a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_VEXPRESS @@ -0,0 +1 @@ +# CONFIG_POWER_RESET_VEXPRESS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_XGENE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_XGENE new file mode 100644 index 000000000000..70d1b925462b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_POWER_RESET_XGENE @@ -0,0 +1 @@ +# CONFIG_POWER_RESET_XGENE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PTP_1588_CLOCK_KVM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PTP_1588_CLOCK_KVM new file mode 100644 index 000000000000..647dbe3b5529 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PTP_1588_CLOCK_KVM @@ -0,0 +1 @@ +CONFIG_PTP_1588_CLOCK_KVM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWM_FSL_FTM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWM_FSL_FTM new file mode 100644 index 000000000000..8bd1025eeae0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWM_FSL_FTM @@ -0,0 +1 @@ +# CONFIG_PWM_FSL_FTM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWM_HIBVT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWM_HIBVT new file mode 100644 index 000000000000..7527f89ecc04 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWM_HIBVT @@ -0,0 +1 @@ +# CONFIG_PWM_HIBVT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWRSEQ_EMMC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWRSEQ_EMMC new file mode 100644 index 000000000000..4f3ddc111d4e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWRSEQ_EMMC @@ -0,0 +1 @@ +# CONFIG_PWRSEQ_EMMC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWRSEQ_SIMPLE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWRSEQ_SIMPLE new file mode 100644 index 000000000000..b15e391d3d0b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWRSEQ_SIMPLE @@ -0,0 +1 @@ +# CONFIG_PWRSEQ_SIMPLE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCA7000_SPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCA7000_SPI new file mode 100644 index 000000000000..9e9088849d90 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCA7000_SPI @@ -0,0 +1 @@ +# CONFIG_QCA7000_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_AOSS_QMP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_AOSS_QMP new file mode 100644 index 000000000000..a51484e8f453 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_AOSS_QMP @@ -0,0 +1 @@ +# CONFIG_QCOM_AOSS_QMP is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_APCS_IPC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_APCS_IPC new file mode 100644 index 000000000000..2d27dfbee0a2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_APCS_IPC @@ -0,0 +1 @@ +# CONFIG_QCOM_APCS_IPC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_BAM_DMA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_BAM_DMA new file mode 100644 index 000000000000..a36fb42914ed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_BAM_DMA @@ -0,0 +1 @@ +# CONFIG_QCOM_BAM_DMA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_COMMAND_DB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_COMMAND_DB new file mode 100644 index 000000000000..50311c7b5a77 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_COMMAND_DB @@ -0,0 +1 @@ +# CONFIG_QCOM_COMMAND_DB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_CPR b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_CPR new file mode 100644 index 000000000000..96eea8d43019 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_CPR @@ -0,0 +1 @@ +# CONFIG_QCOM_CPR is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_EBI2 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_EBI2 new file mode 100644 index 000000000000..e96b3544e964 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_EBI2 @@ -0,0 +1 @@ +# CONFIG_QCOM_EBI2 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_EMAC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_EMAC new file mode 100644 index 000000000000..4e3fa019bd8f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_EMAC @@ -0,0 +1 @@ +CONFIG_QCOM_EMAC=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_GENI_SE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_GENI_SE new file mode 100644 index 000000000000..ac6e626794eb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_GENI_SE @@ -0,0 +1 @@ +# CONFIG_QCOM_GENI_SE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_GSBI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_GSBI new file mode 100644 index 000000000000..43946f18a1b1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_GSBI @@ -0,0 +1 @@ +# CONFIG_QCOM_GSBI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_HIDMA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_HIDMA new file mode 100644 index 000000000000..a5442952ffbb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_HIDMA @@ -0,0 +1 @@ +CONFIG_QCOM_HIDMA=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_HIDMA_MGMT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_HIDMA_MGMT new file mode 100644 index 000000000000..8085b2ae4b3f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_HIDMA_MGMT @@ -0,0 +1 @@ +CONFIG_QCOM_HIDMA_MGMT=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_IOMMU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_IOMMU new file mode 100644 index 000000000000..04124422783a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_IOMMU @@ -0,0 +1 @@ +# CONFIG_QCOM_IOMMU is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_IPCC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_IPCC new file mode 100644 index 000000000000..a799e3d9b6e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_IPCC @@ -0,0 +1 @@ +# CONFIG_QCOM_IPCC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_IRQ_COMBINER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_IRQ_COMBINER new file mode 100644 index 000000000000..6c23d15f753b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_IRQ_COMBINER @@ -0,0 +1 @@ +CONFIG_QCOM_IRQ_COMBINER=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_KRYO_L2_ACCESSORS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_KRYO_L2_ACCESSORS new file mode 100644 index 000000000000..ce5f413638be --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_KRYO_L2_ACCESSORS @@ -0,0 +1 @@ +CONFIG_QCOM_KRYO_L2_ACCESSORS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_L2_PMU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_L2_PMU new file mode 100644 index 000000000000..2a553c8b17fa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_L2_PMU @@ -0,0 +1 @@ +CONFIG_QCOM_L2_PMU=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_L3_PMU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_L3_PMU new file mode 100644 index 000000000000..ed899d66bc21 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_L3_PMU @@ -0,0 +1 @@ +CONFIG_QCOM_L3_PMU=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_LLCC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_LLCC new file mode 100644 index 000000000000..5372311adc73 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_LLCC @@ -0,0 +1 @@ +# CONFIG_QCOM_LLCC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_OCMEM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_OCMEM new file mode 100644 index 000000000000..21e1c3dcdfe2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_OCMEM @@ -0,0 +1 @@ +# CONFIG_QCOM_OCMEM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_PDC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_PDC new file mode 100644 index 000000000000..1e7c24135e20 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_PDC @@ -0,0 +1 @@ +# CONFIG_QCOM_PDC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_RMTFS_MEM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_RMTFS_MEM new file mode 100644 index 000000000000..87425e103788 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_RMTFS_MEM @@ -0,0 +1 @@ +# CONFIG_QCOM_RMTFS_MEM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_RPMH b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_RPMH new file mode 100644 index 000000000000..f4736e823258 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_RPMH @@ -0,0 +1 @@ +# CONFIG_QCOM_RPMH is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_SMEM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_SMEM new file mode 100644 index 000000000000..59069977efe0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_SMEM @@ -0,0 +1 @@ +# CONFIG_QCOM_SMEM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_WDT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_WDT new file mode 100644 index 000000000000..ca08cee25d93 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QCOM_WDT @@ -0,0 +1 @@ +# CONFIG_QCOM_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QLGE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QLGE new file mode 100644 index 000000000000..7cf2571c81df --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QLGE @@ -0,0 +1 @@ +# CONFIG_QLGE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QUICC_ENGINE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QUICC_ENGINE new file mode 100644 index 000000000000..b340a0279cfd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_QUICC_ENGINE @@ -0,0 +1 @@ +# CONFIG_QUICC_ENGINE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RC_CORE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RC_CORE new file mode 100644 index 000000000000..d44890bc9dfb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RC_CORE @@ -0,0 +1 @@ +# CONFIG_RC_CORE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGMAP_MMIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGMAP_MMIO new file mode 100644 index 000000000000..2e7e1299d838 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGMAP_MMIO @@ -0,0 +1 @@ +CONFIG_REGMAP_MMIO=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR new file mode 100644 index 000000000000..5b7c35c8f7bc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR @@ -0,0 +1 @@ +CONFIG_REGULATOR=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_88PG86X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_88PG86X new file mode 100644 index 000000000000..ba92dea72ead --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_88PG86X @@ -0,0 +1 @@ +# CONFIG_REGULATOR_88PG86X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_ACT8865 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_ACT8865 new file mode 100644 index 000000000000..f1e82abd5b0e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_ACT8865 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_ACT8865 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_AD5398 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_AD5398 new file mode 100644 index 000000000000..83b5968236a7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_AD5398 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_AD5398 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_AW37503 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_AW37503 new file mode 100644 index 000000000000..f5500cf71c87 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_AW37503 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_AW37503 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_DA9121 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_DA9121 new file mode 100644 index 000000000000..3ebf366febfa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_DA9121 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_DA9121 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_DA9210 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_DA9210 new file mode 100644 index 000000000000..ed858d92d50b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_DA9210 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_DA9210 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_DA9211 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_DA9211 new file mode 100644 index 000000000000..5f4b883da55a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_DA9211 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_DA9211 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_DEBUG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_DEBUG new file mode 100644 index 000000000000..2894d490943d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_DEBUG @@ -0,0 +1 @@ +# CONFIG_REGULATOR_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_FAN53555 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_FAN53555 new file mode 100644 index 000000000000..d62314c7dda4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_FAN53555 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_FAN53555 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_FAN53880 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_FAN53880 new file mode 100644 index 000000000000..9c63fafa097f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_FAN53880 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_FAN53880 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_FIXED_VOLTAGE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_FIXED_VOLTAGE new file mode 100644 index 000000000000..63c1bd929762 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_FIXED_VOLTAGE @@ -0,0 +1 @@ +# CONFIG_REGULATOR_FIXED_VOLTAGE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_GPIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_GPIO new file mode 100644 index 000000000000..91c099fda2f7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_GPIO @@ -0,0 +1 @@ +# CONFIG_REGULATOR_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_ISL6271A b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_ISL6271A new file mode 100644 index 000000000000..f2507c882743 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_ISL6271A @@ -0,0 +1 @@ +# CONFIG_REGULATOR_ISL6271A is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_ISL9305 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_ISL9305 new file mode 100644 index 000000000000..70ade2e4dab8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_ISL9305 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_ISL9305 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LP3971 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LP3971 new file mode 100644 index 000000000000..e5bd8a9e8330 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LP3971 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_LP3971 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LP3972 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LP3972 new file mode 100644 index 000000000000..3820f4be8e38 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LP3972 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_LP3972 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LP872X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LP872X new file mode 100644 index 000000000000..a41e5d369a04 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LP872X @@ -0,0 +1 @@ +# CONFIG_REGULATOR_LP872X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LP8755 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LP8755 new file mode 100644 index 000000000000..3d3d38b77b39 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LP8755 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_LP8755 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LTC3589 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LTC3589 new file mode 100644 index 000000000000..d14c63b54e1f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LTC3589 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_LTC3589 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LTC3676 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LTC3676 new file mode 100644 index 000000000000..a8f50af1c912 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_LTC3676 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_LTC3676 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX1586 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX1586 new file mode 100644 index 000000000000..a97539613581 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX1586 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_MAX1586 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX20086 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX20086 new file mode 100644 index 000000000000..cfa7f164bf7d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX20086 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_MAX20086 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX20411 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX20411 new file mode 100644 index 000000000000..177d58b58040 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX20411 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_MAX20411 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX77826 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX77826 new file mode 100644 index 000000000000..64d512ed5765 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX77826 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_MAX77826 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX77857 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX77857 new file mode 100644 index 000000000000..cc3902f60bd2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX77857 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_MAX77857 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8649 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8649 new file mode 100644 index 000000000000..79620946aaa8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8649 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_MAX8649 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8660 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8660 new file mode 100644 index 000000000000..6b033e3b6a38 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8660 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_MAX8660 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8893 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8893 new file mode 100644 index 000000000000..38ec09a45308 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8893 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_MAX8893 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8952 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8952 new file mode 100644 index 000000000000..f8346c0fcc73 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8952 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_MAX8952 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8973 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8973 new file mode 100644 index 000000000000..27d82cb7815a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MAX8973 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_MAX8973 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MCP16502 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MCP16502 new file mode 100644 index 000000000000..778d3559d662 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MCP16502 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_MCP16502 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MP5416 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MP5416 new file mode 100644 index 000000000000..1e382f58472a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MP5416 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_MP5416 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MP8859 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MP8859 new file mode 100644 index 000000000000..754940abfaf3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MP8859 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_MP8859 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MP886X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MP886X new file mode 100644 index 000000000000..1f7822e56962 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MP886X @@ -0,0 +1 @@ +# CONFIG_REGULATOR_MP886X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MPQ7920 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MPQ7920 new file mode 100644 index 000000000000..c1a23686cdbf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MPQ7920 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_MPQ7920 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MT6311 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MT6311 new file mode 100644 index 000000000000..884c0d452dd8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_MT6311 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_MT6311 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PCA9450 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PCA9450 new file mode 100644 index 000000000000..8545b10b30c9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PCA9450 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_PCA9450 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PF8X00 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PF8X00 new file mode 100644 index 000000000000..f75129b3d561 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PF8X00 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_PF8X00 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PFUZE100 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PFUZE100 new file mode 100644 index 000000000000..7265415981b2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PFUZE100 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_PFUZE100 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PV88060 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PV88060 new file mode 100644 index 000000000000..6c69caa24320 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PV88060 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_PV88060 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PV88080 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PV88080 new file mode 100644 index 000000000000..4b024f4ba59f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PV88080 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_PV88080 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PV88090 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PV88090 new file mode 100644 index 000000000000..009707021ef5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PV88090 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_PV88090 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PWM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PWM new file mode 100644 index 000000000000..81698143a022 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_PWM @@ -0,0 +1 @@ +# CONFIG_REGULATOR_PWM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_QCOM_REFGEN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_QCOM_REFGEN new file mode 100644 index 000000000000..53876721e076 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_QCOM_REFGEN @@ -0,0 +1 @@ +# CONFIG_REGULATOR_QCOM_REFGEN is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RAA215300 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RAA215300 new file mode 100644 index 000000000000..98ec4ce12504 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RAA215300 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_RAA215300 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY new file mode 100644 index 000000000000..7dbbfb6b4125 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY @@ -0,0 +1 @@ +# CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT4801 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT4801 new file mode 100644 index 000000000000..f60258af4bee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT4801 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_RT4801 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT4803 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT4803 new file mode 100644 index 000000000000..f1388d814937 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT4803 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_RT4803 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT5190A b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT5190A new file mode 100644 index 000000000000..35e23fba5f04 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT5190A @@ -0,0 +1 @@ +# CONFIG_REGULATOR_RT5190A is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT5739 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT5739 new file mode 100644 index 000000000000..b8c0b5e1575a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT5739 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_RT5739 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT5759 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT5759 new file mode 100644 index 000000000000..9928ef545232 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT5759 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_RT5759 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT6160 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT6160 new file mode 100644 index 000000000000..7107fd311c81 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT6160 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_RT6160 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT6190 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT6190 new file mode 100644 index 000000000000..5fe4661c58bb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT6190 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_RT6190 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT6245 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT6245 new file mode 100644 index 000000000000..d240a99fcb5f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RT6245 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_RT6245 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RTMV20 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RTMV20 new file mode 100644 index 000000000000..680603f48616 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RTMV20 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_RTMV20 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RTQ2134 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RTQ2134 new file mode 100644 index 000000000000..13a439c360b3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RTQ2134 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_RTQ2134 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RTQ2208 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RTQ2208 new file mode 100644 index 000000000000..d879bc88ae0e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RTQ2208 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_RTQ2208 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RTQ6752 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RTQ6752 new file mode 100644 index 000000000000..5e9c27e6faca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_RTQ6752 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_RTQ6752 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_SLG51000 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_SLG51000 new file mode 100644 index 000000000000..b65742cc97a9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_SLG51000 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_SLG51000 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_SY8106A b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_SY8106A new file mode 100644 index 000000000000..619b46329883 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_SY8106A @@ -0,0 +1 @@ +# CONFIG_REGULATOR_SY8106A is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_SY8824X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_SY8824X new file mode 100644 index 000000000000..42053ce4cc2e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_SY8824X @@ -0,0 +1 @@ +# CONFIG_REGULATOR_SY8824X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_SY8827N b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_SY8827N new file mode 100644 index 000000000000..fafd2495eb5e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_SY8827N @@ -0,0 +1 @@ +# CONFIG_REGULATOR_SY8827N is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS51632 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS51632 new file mode 100644 index 000000000000..b586678e320d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS51632 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_TPS51632 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS62360 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS62360 new file mode 100644 index 000000000000..b6904c247850 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS62360 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_TPS62360 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS6286X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS6286X new file mode 100644 index 000000000000..ddd62b4bd224 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS6286X @@ -0,0 +1 @@ +# CONFIG_REGULATOR_TPS6286X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS6287X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS6287X new file mode 100644 index 000000000000..07d63aa3547d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS6287X @@ -0,0 +1 @@ +# CONFIG_REGULATOR_TPS6287X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS65023 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS65023 new file mode 100644 index 000000000000..7e5697b53d1a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS65023 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_TPS65023 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS6507X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS6507X new file mode 100644 index 000000000000..bcb7b9d409f7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS6507X @@ -0,0 +1 @@ +# CONFIG_REGULATOR_TPS6507X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS65132 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS65132 new file mode 100644 index 000000000000..b82a99f6c238 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS65132 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_TPS65132 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS6524X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS6524X new file mode 100644 index 000000000000..a7363878b1e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_TPS6524X @@ -0,0 +1 @@ +# CONFIG_REGULATOR_TPS6524X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_USERSPACE_CONSUMER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_USERSPACE_CONSUMER new file mode 100644 index 000000000000..f6a6e11df167 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_USERSPACE_CONSUMER @@ -0,0 +1 @@ +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_VCTRL b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_VCTRL new file mode 100644 index 000000000000..e27e9024520b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_VCTRL @@ -0,0 +1 @@ +# CONFIG_REGULATOR_VCTRL is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_VEXPRESS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_VEXPRESS new file mode 100644 index 000000000000..d73432b3362e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_VEXPRESS @@ -0,0 +1 @@ +# CONFIG_REGULATOR_VEXPRESS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_VIRTUAL_CONSUMER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_VIRTUAL_CONSUMER new file mode 100644 index 000000000000..cfdfe491c4df --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_VIRTUAL_CONSUMER @@ -0,0 +1 @@ +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_VQMMC_IPQ4019 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_VQMMC_IPQ4019 new file mode 100644 index 000000000000..2fe47853da49 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_REGULATOR_VQMMC_IPQ4019 @@ -0,0 +1 @@ +# CONFIG_REGULATOR_VQMMC_IPQ4019 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID new file mode 100644 index 000000000000..8cddb03cb135 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID @@ -0,0 +1 @@ +CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESET_HISI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESET_HISI new file mode 100644 index 000000000000..af17d8a85fb1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESET_HISI @@ -0,0 +1 @@ +CONFIG_RESET_HISI=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESET_QCOM_AOSS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESET_QCOM_AOSS new file mode 100644 index 000000000000..7213d3d92ce0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESET_QCOM_AOSS @@ -0,0 +1 @@ +# CONFIG_RESET_QCOM_AOSS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESET_QCOM_PDC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESET_QCOM_PDC new file mode 100644 index 000000000000..e7a1af4466e8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESET_QCOM_PDC @@ -0,0 +1 @@ +# CONFIG_RESET_QCOM_PDC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RFKILL_GPIO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RFKILL_GPIO new file mode 100644 index 000000000000..1665e0ed4f92 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RFKILL_GPIO @@ -0,0 +1 @@ +CONFIG_RFKILL_GPIO=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RMI4_F34 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RMI4_F34 new file mode 100644 index 000000000000..9cf1d1384961 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RMI4_F34 @@ -0,0 +1 @@ +CONFIG_RMI4_F34=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RMI4_SPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RMI4_SPI new file mode 100644 index 000000000000..805037dfdcfb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RMI4_SPI @@ -0,0 +1 @@ +CONFIG_RMI4_SPI=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RMNET b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RMNET new file mode 100644 index 000000000000..5e8c115b5e5d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RMNET @@ -0,0 +1 @@ +# CONFIG_RMNET is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ROCKCHIP_ERRATUM_3588001 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ROCKCHIP_ERRATUM_3588001 new file mode 100644 index 000000000000..289875384303 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ROCKCHIP_ERRATUM_3588001 @@ -0,0 +1 @@ +CONFIG_ROCKCHIP_ERRATUM_3588001=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_ABB5ZES3 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_ABB5ZES3 new file mode 100644 index 000000000000..4342c2d802e7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_ABB5ZES3 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_ABB5ZES3=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_ABX80X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_ABX80X new file mode 100644 index 000000000000..8a32953e6017 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_ABX80X @@ -0,0 +1 @@ +CONFIG_RTC_DRV_ABX80X=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_CADENCE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_CADENCE new file mode 100644 index 000000000000..e5d78054ebaf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_CADENCE @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_CADENCE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1305 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1305 new file mode 100644 index 000000000000..37b8971b4084 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1305 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS1305=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1343 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1343 new file mode 100644 index 000000000000..452567d49b4b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1343 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS1343=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1347 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1347 new file mode 100644 index 000000000000..c0dec754cb7c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1347 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS1347=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1374_WDT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1374_WDT new file mode 100644 index 000000000000..025d8e33a337 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1374_WDT @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS1374_WDT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1390 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1390 new file mode 100644 index 000000000000..9c4133771bb3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1390 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS1390=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1685 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1685 new file mode 100644 index 000000000000..2784655cb237 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1685 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS1685=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1685_FAMILY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1685_FAMILY new file mode 100644 index 000000000000..c780040dbc23 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1685_FAMILY @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS1685_FAMILY=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1689 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1689 new file mode 100644 index 000000000000..d6b20ab72748 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS1689 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_DS1689 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS17285 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS17285 new file mode 100644 index 000000000000..76bf35382247 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS17285 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_DS17285 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS17485 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS17485 new file mode 100644 index 000000000000..67bda4bbed53 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS17485 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_DS17485 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS17885 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS17885 new file mode 100644 index 000000000000..a38b8f58c278 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_DS17885 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_DS17885 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_HYM8563 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_HYM8563 new file mode 100644 index 000000000000..2c04b57d9561 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_HYM8563 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_HYM8563 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_ISL12026 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_ISL12026 new file mode 100644 index 000000000000..81aad46d322b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_ISL12026 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_ISL12026 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_M41T93 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_M41T93 new file mode 100644 index 000000000000..c2255ff15792 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_M41T93 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_M41T93=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_M41T94 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_M41T94 new file mode 100644 index 000000000000..85abd7c6a65d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_M41T94 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_M41T94=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_MAX6902 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_MAX6902 new file mode 100644 index 000000000000..4bfa40bf096a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_MAX6902 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_MAX6902=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_MCP795 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_MCP795 new file mode 100644 index 000000000000..498c0614beb3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_MCP795 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_MCP795=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_NCT3018Y b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_NCT3018Y new file mode 100644 index 000000000000..6601f1c36a98 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_NCT3018Y @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_NCT3018Y is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PCF2123 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PCF2123 new file mode 100644 index 000000000000..3a3a2e87b571 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PCF2123 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_PCF2123=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PCF2127 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PCF2127 new file mode 100644 index 000000000000..20c191fb7422 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PCF2127 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_PCF2127=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PCF85063 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PCF85063 new file mode 100644 index 000000000000..acad89a939fe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PCF85063 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_PCF85063=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PL030 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PL030 new file mode 100644 index 000000000000..6f6f4ec88b18 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PL030 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_PL030 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PL031 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PL031 new file mode 100644 index 000000000000..42990f0defdf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_PL031 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_PL031=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_R7301 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_R7301 new file mode 100644 index 000000000000..9b16e0a154a3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_R7301 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_R7301 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_R9701 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_R9701 new file mode 100644 index 000000000000..3a1cb4f3a06c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_R9701 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_R9701=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_RS5C348 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_RS5C348 new file mode 100644 index 000000000000..56e73e950ff9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_RS5C348 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_RS5C348=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_RX4581 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_RX4581 new file mode 100644 index 000000000000..9d14898946b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_RX4581 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_RX4581=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_RX8010 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_RX8010 new file mode 100644 index 000000000000..334d51df6d90 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_RX8010 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_RX8010=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_XGENE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_XGENE new file mode 100644 index 000000000000..35c7bf140847 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_XGENE @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_XGENE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_ZYNQMP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_ZYNQMP new file mode 100644 index 000000000000..0125f992ec48 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_ZYNQMP @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_ZYNQMP is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTS5208 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTS5208 new file mode 100644 index 000000000000..7d01cbf8fc13 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTS5208 @@ -0,0 +1 @@ +# CONFIG_RTS5208 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SATA_ZHAOXIN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SATA_ZHAOXIN new file mode 100644 index 000000000000..68dba87c9a88 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SATA_ZHAOXIN @@ -0,0 +1 @@ +# CONFIG_SATA_ZHAOXIN is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCHED_THERMAL_PRESSURE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCHED_THERMAL_PRESSURE new file mode 100644 index 000000000000..cf16318c1d29 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCHED_THERMAL_PRESSURE @@ -0,0 +1 @@ +CONFIG_SCHED_THERMAL_PRESSURE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_AACRAID b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_AACRAID new file mode 100644 index 000000000000..5686e689d701 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_AACRAID @@ -0,0 +1 @@ +# CONFIG_SCSI_AACRAID is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_BNX2X_FCOE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_BNX2X_FCOE new file mode 100644 index 000000000000..5d36d085f2f6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_BNX2X_FCOE @@ -0,0 +1 @@ +# CONFIG_SCSI_BNX2X_FCOE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_BNX2_ISCSI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_BNX2_ISCSI new file mode 100644 index 000000000000..d400977dfcb3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_BNX2_ISCSI @@ -0,0 +1 @@ +# CONFIG_SCSI_BNX2_ISCSI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_HISI_SAS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_HISI_SAS new file mode 100644 index 000000000000..778ee9b80f58 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_HISI_SAS @@ -0,0 +1 @@ +CONFIG_SCSI_HISI_SAS=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE new file mode 100644 index 000000000000..9b9d7981e19d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE @@ -0,0 +1 @@ +# CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_HISI_SAS_PCI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_HISI_SAS_PCI new file mode 100644 index 000000000000..601e09bf1380 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_HISI_SAS_PCI @@ -0,0 +1 @@ +CONFIG_SCSI_HISI_SAS_PCI=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_IPR b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_IPR new file mode 100644 index 000000000000..ce2ee561899b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_IPR @@ -0,0 +1 @@ +CONFIG_SCSI_IPR=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_IPR_DUMP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_IPR_DUMP new file mode 100644 index 000000000000..e9892cabfb12 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_IPR_DUMP @@ -0,0 +1 @@ +CONFIG_SCSI_IPR_DUMP=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_IPR_TRACE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_IPR_TRACE new file mode 100644 index 000000000000..126c699b69c7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SCSI_IPR_TRACE @@ -0,0 +1 @@ +CONFIG_SCSI_IPR_TRACE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ACPI_POWER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ACPI_POWER new file mode 100644 index 000000000000..6d637e9f8873 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ACPI_POWER @@ -0,0 +1 @@ +CONFIG_SENSORS_ACPI_POWER=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_AD7314 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_AD7314 new file mode 100644 index 000000000000..b5581b7983d1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_AD7314 @@ -0,0 +1 @@ +CONFIG_SENSORS_AD7314=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_AD7414 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_AD7414 new file mode 100644 index 000000000000..e80d53b99027 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_AD7414 @@ -0,0 +1 @@ +# CONFIG_SENSORS_AD7414 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_AD7418 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_AD7418 new file mode 100644 index 000000000000..d1c3ba2ddea1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_AD7418 @@ -0,0 +1 @@ +# CONFIG_SENSORS_AD7418 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADC128D818 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADC128D818 new file mode 100644 index 000000000000..9f24634fc1f6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADC128D818 @@ -0,0 +1 @@ +CONFIG_SENSORS_ADC128D818=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADCXX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADCXX new file mode 100644 index 000000000000..78229786bc91 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADCXX @@ -0,0 +1 @@ +CONFIG_SENSORS_ADCXX=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1021 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1021 new file mode 100644 index 000000000000..7c7cbf394b5e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1021 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADM1021 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1025 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1025 new file mode 100644 index 000000000000..3ecefab88ec3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1025 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADM1025 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1026 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1026 new file mode 100644 index 000000000000..e81f9ea44a01 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1026 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADM1026 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1029 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1029 new file mode 100644 index 000000000000..0f6b6b3b2f69 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1029 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADM1029 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1031 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1031 new file mode 100644 index 000000000000..d4b3ab743b9f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1031 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADM1031 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1275 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1275 new file mode 100644 index 000000000000..c0a4008cec90 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM1275 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADM1275 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM9240 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM9240 new file mode 100644 index 000000000000..919188fe84f0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADM9240 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADM9240 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADS7828 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADS7828 new file mode 100644 index 000000000000..4d1bb6f1483c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADS7828 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADS7828 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADS7871 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADS7871 new file mode 100644 index 000000000000..9a4091fb891b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADS7871 @@ -0,0 +1 @@ +CONFIG_SENSORS_ADS7871=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7410 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7410 new file mode 100644 index 000000000000..c30e7ff6cf22 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7410 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADT7410 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7411 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7411 new file mode 100644 index 000000000000..619c6e260e19 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7411 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADT7411 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7462 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7462 new file mode 100644 index 000000000000..a3ad4ef509c9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7462 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADT7462 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7470 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7470 new file mode 100644 index 000000000000..1983d0385144 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7470 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADT7470 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7475 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7475 new file mode 100644 index 000000000000..6fb9e97ebcac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ADT7475 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADT7475 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_AMC6821 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_AMC6821 new file mode 100644 index 000000000000..54cb7710554a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_AMC6821 @@ -0,0 +1 @@ +# CONFIG_SENSORS_AMC6821 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_APDS990X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_APDS990X new file mode 100644 index 000000000000..3aa738be2dbf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_APDS990X @@ -0,0 +1 @@ +# CONFIG_SENSORS_APDS990X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ARM_SCPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ARM_SCPI new file mode 100644 index 000000000000..37da65383507 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ARM_SCPI @@ -0,0 +1 @@ +CONFIG_SENSORS_ARM_SCPI=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ASC7621 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ASC7621 new file mode 100644 index 000000000000..a9d200ccee5e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ASC7621 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ASC7621 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ATXP1 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ATXP1 new file mode 100644 index 000000000000..3928000a463d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ATXP1 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ATXP1 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_BH1770 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_BH1770 new file mode 100644 index 000000000000..f6d8bfafabda --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_BH1770 @@ -0,0 +1 @@ +# CONFIG_SENSORS_BH1770 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_DME1737 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_DME1737 new file mode 100644 index 000000000000..8a86374249ef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_DME1737 @@ -0,0 +1 @@ +# CONFIG_SENSORS_DME1737 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_DS1621 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_DS1621 new file mode 100644 index 000000000000..9cd0e4ac1442 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_DS1621 @@ -0,0 +1 @@ +# CONFIG_SENSORS_DS1621 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_DS620 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_DS620 new file mode 100644 index 000000000000..45ee72f7b399 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_DS620 @@ -0,0 +1 @@ +# CONFIG_SENSORS_DS620 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_EMC1403 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_EMC1403 new file mode 100644 index 000000000000..e49ca1d978f4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_EMC1403 @@ -0,0 +1 @@ +# CONFIG_SENSORS_EMC1403 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_EMC6W201 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_EMC6W201 new file mode 100644 index 000000000000..ccf86b7b428f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_EMC6W201 @@ -0,0 +1 @@ +# CONFIG_SENSORS_EMC6W201 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_F71805F b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_F71805F new file mode 100644 index 000000000000..f7176cb54a18 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_F71805F @@ -0,0 +1 @@ +# CONFIG_SENSORS_F71805F is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_F71882FG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_F71882FG new file mode 100644 index 000000000000..69aef1e8f5ed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_F71882FG @@ -0,0 +1 @@ +# CONFIG_SENSORS_F71882FG is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_F75375S b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_F75375S new file mode 100644 index 000000000000..1db7725af385 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_F75375S @@ -0,0 +1 @@ +# CONFIG_SENSORS_F75375S is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_G760A b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_G760A new file mode 100644 index 000000000000..2ee6a3d0a05c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_G760A @@ -0,0 +1 @@ +# CONFIG_SENSORS_G760A is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_G762 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_G762 new file mode 100644 index 000000000000..c036656b3e41 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_G762 @@ -0,0 +1 @@ +CONFIG_SENSORS_G762=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_GL518SM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_GL518SM new file mode 100644 index 000000000000..86045f87f425 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_GL518SM @@ -0,0 +1 @@ +# CONFIG_SENSORS_GL518SM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_GL520SM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_GL520SM new file mode 100644 index 000000000000..45c088381d8c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_GL520SM @@ -0,0 +1 @@ +# CONFIG_SENSORS_GL520SM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_GPIO_FAN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_GPIO_FAN new file mode 100644 index 000000000000..7211f3f57711 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_GPIO_FAN @@ -0,0 +1 @@ +# CONFIG_SENSORS_GPIO_FAN is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_I5K_AMB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_I5K_AMB new file mode 100644 index 000000000000..e68ba7b46e79 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_I5K_AMB @@ -0,0 +1 @@ +# CONFIG_SENSORS_I5K_AMB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_IBMAEM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_IBMAEM new file mode 100644 index 000000000000..ecbf82750806 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_IBMAEM @@ -0,0 +1 @@ +# CONFIG_SENSORS_IBMAEM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_IBMPEX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_IBMPEX new file mode 100644 index 000000000000..dcd70ee1e076 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_IBMPEX @@ -0,0 +1 @@ +# CONFIG_SENSORS_IBMPEX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_INA209 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_INA209 new file mode 100644 index 000000000000..32f735340f04 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_INA209 @@ -0,0 +1 @@ +# CONFIG_SENSORS_INA209 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_INA2XX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_INA2XX new file mode 100644 index 000000000000..cd7741d38ee6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_INA2XX @@ -0,0 +1 @@ +# CONFIG_SENSORS_INA2XX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_IT87 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_IT87 new file mode 100644 index 000000000000..c5d87c024bb0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_IT87 @@ -0,0 +1 @@ +# CONFIG_SENSORS_IT87 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_JC42 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_JC42 new file mode 100644 index 000000000000..02894fb8f1c4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_JC42 @@ -0,0 +1 @@ +# CONFIG_SENSORS_JC42 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LINEAGE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LINEAGE new file mode 100644 index 000000000000..0e9e3f66257b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LINEAGE @@ -0,0 +1 @@ +# CONFIG_SENSORS_LINEAGE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LIS3_I2C b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LIS3_I2C new file mode 100644 index 000000000000..3086c9ed52c9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LIS3_I2C @@ -0,0 +1 @@ +# CONFIG_SENSORS_LIS3_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM25066 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM25066 new file mode 100644 index 000000000000..f5dcf76364b8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM25066 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM25066 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM63 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM63 new file mode 100644 index 000000000000..251b62540224 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM63 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM63 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM70 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM70 new file mode 100644 index 000000000000..d8626cc07adb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM70 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM70=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM73 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM73 new file mode 100644 index 000000000000..9d0010922e22 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM73 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM73 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM75 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM75 new file mode 100644 index 000000000000..07e8eb7f7a41 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM75 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM75 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM77 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM77 new file mode 100644 index 000000000000..601df6402927 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM77 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM77 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM78 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM78 new file mode 100644 index 000000000000..2bc678f9f0b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM78 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM78 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM80 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM80 new file mode 100644 index 000000000000..9625f3de7f8e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM80 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM80 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM83 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM83 new file mode 100644 index 000000000000..d9836cd3fa54 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM83 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM83 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM85 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM85 new file mode 100644 index 000000000000..b2811fbafe87 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM85 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM85 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM87 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM87 new file mode 100644 index 000000000000..6deeb8c26266 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM87 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM87 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM90 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM90 new file mode 100644 index 000000000000..9400a5b9ff45 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM90 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM90 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM92 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM92 new file mode 100644 index 000000000000..09f46ba0f37f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM92 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM92 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM93 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM93 new file mode 100644 index 000000000000..b5a1f74bda1a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM93 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM93 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM95234 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM95234 new file mode 100644 index 000000000000..eab1dd4ed602 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM95234 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM95234 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM95241 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM95241 new file mode 100644 index 000000000000..098b2e5e36cd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM95241 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM95241 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM95245 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM95245 new file mode 100644 index 000000000000..7e37a1c41573 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LM95245 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM95245 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC2945 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC2945 new file mode 100644 index 000000000000..4df4be93d647 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC2945 @@ -0,0 +1 @@ +CONFIG_SENSORS_LTC2945=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC2978 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC2978 new file mode 100644 index 000000000000..b9f38acfd973 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC2978 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LTC2978 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC3815 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC3815 new file mode 100644 index 000000000000..af91d3915fc4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC3815 @@ -0,0 +1 @@ +CONFIG_SENSORS_LTC3815=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4151 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4151 new file mode 100644 index 000000000000..c3fe846a242d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4151 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LTC4151 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4215 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4215 new file mode 100644 index 000000000000..752ec8bd9af0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4215 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LTC4215 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4222 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4222 new file mode 100644 index 000000000000..e7e172810cb7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4222 @@ -0,0 +1 @@ +CONFIG_SENSORS_LTC4222=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4245 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4245 new file mode 100644 index 000000000000..301686b547ff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4245 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LTC4245 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4260 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4260 new file mode 100644 index 000000000000..fa5a33de1f69 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4260 @@ -0,0 +1 @@ +CONFIG_SENSORS_LTC4260=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4261 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4261 new file mode 100644 index 000000000000..77fd1e970952 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_LTC4261 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LTC4261 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX1111 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX1111 new file mode 100644 index 000000000000..fdefcc26b636 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX1111 @@ -0,0 +1 @@ +CONFIG_SENSORS_MAX1111=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX16064 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX16064 new file mode 100644 index 000000000000..fbcb65964b70 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX16064 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX16064 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX16065 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX16065 new file mode 100644 index 000000000000..5ccb15648f8c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX16065 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX16065 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX1619 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX1619 new file mode 100644 index 000000000000..a55124be98b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX1619 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX1619 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX1668 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX1668 new file mode 100644 index 000000000000..46c334a6f6b8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX1668 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX1668 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX197 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX197 new file mode 100644 index 000000000000..7199862491f7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX197 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX197 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX20751 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX20751 new file mode 100644 index 000000000000..c1b89ff071b6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX20751 @@ -0,0 +1 @@ +CONFIG_SENSORS_MAX20751=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX31790 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX31790 new file mode 100644 index 000000000000..fb5b83c53482 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX31790 @@ -0,0 +1 @@ +CONFIG_SENSORS_MAX31790=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX34440 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX34440 new file mode 100644 index 000000000000..ec48b85dbec2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX34440 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX34440 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX6639 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX6639 new file mode 100644 index 000000000000..ca9883a4296c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX6639 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX6639 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX6642 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX6642 new file mode 100644 index 000000000000..8bacba0f745e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX6642 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX6642 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX6650 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX6650 new file mode 100644 index 000000000000..01aaa1a3e7b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX6650 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX6650 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX6697 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX6697 new file mode 100644 index 000000000000..27dc23f0a43b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX6697 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX6697 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX8688 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX8688 new file mode 100644 index 000000000000..ad2279f08f89 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MAX8688 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX8688 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MCP3021 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MCP3021 new file mode 100644 index 000000000000..0baeaead8278 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_MCP3021 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MCP3021 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_NCT6683 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_NCT6683 new file mode 100644 index 000000000000..3dfe7f818252 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_NCT6683 @@ -0,0 +1 @@ +CONFIG_SENSORS_NCT6683=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_NCT6775 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_NCT6775 new file mode 100644 index 000000000000..63986c5b2cd2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_NCT6775 @@ -0,0 +1 @@ +# CONFIG_SENSORS_NCT6775 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_NCT7802 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_NCT7802 new file mode 100644 index 000000000000..63cdd409c758 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_NCT7802 @@ -0,0 +1 @@ +CONFIG_SENSORS_NCT7802=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_NCT7904 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_NCT7904 new file mode 100644 index 000000000000..adf813657826 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_NCT7904 @@ -0,0 +1 @@ +CONFIG_SENSORS_NCT7904=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PC87360 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PC87360 new file mode 100644 index 000000000000..741effe11296 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PC87360 @@ -0,0 +1 @@ +# CONFIG_SENSORS_PC87360 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PC87427 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PC87427 new file mode 100644 index 000000000000..ff6fc05635af --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PC87427 @@ -0,0 +1 @@ +# CONFIG_SENSORS_PC87427 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PCF8591 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PCF8591 new file mode 100644 index 000000000000..6f628a342c6c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PCF8591 @@ -0,0 +1 @@ +# CONFIG_SENSORS_PCF8591 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PMBUS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PMBUS new file mode 100644 index 000000000000..7e4dc7661235 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PMBUS @@ -0,0 +1 @@ +# CONFIG_SENSORS_PMBUS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_POWR1220 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_POWR1220 new file mode 100644 index 000000000000..5524b22aae4f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_POWR1220 @@ -0,0 +1 @@ +CONFIG_SENSORS_POWR1220=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PWM_FAN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PWM_FAN new file mode 100644 index 000000000000..4d8a381c52e5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_PWM_FAN @@ -0,0 +1 @@ +CONFIG_SENSORS_PWM_FAN=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SCH5627 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SCH5627 new file mode 100644 index 000000000000..c024c82e7c49 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SCH5627 @@ -0,0 +1 @@ +# CONFIG_SENSORS_SCH5627 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SCH5636 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SCH5636 new file mode 100644 index 000000000000..4752ebfcd8d9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SCH5636 @@ -0,0 +1 @@ +# CONFIG_SENSORS_SCH5636 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SHT15 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SHT15 new file mode 100644 index 000000000000..be3633e224da --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SHT15 @@ -0,0 +1 @@ +# CONFIG_SENSORS_SHT15 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SHT21 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SHT21 new file mode 100644 index 000000000000..c07100a989b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SHT21 @@ -0,0 +1 @@ +# CONFIG_SENSORS_SHT21 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SHTC1 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SHTC1 new file mode 100644 index 000000000000..7eeb26cc71dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SHTC1 @@ -0,0 +1 @@ +CONFIG_SENSORS_SHTC1=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SIS5595 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SIS5595 new file mode 100644 index 000000000000..d6dab578fd89 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SIS5595 @@ -0,0 +1 @@ +# CONFIG_SENSORS_SIS5595 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SMSC47B397 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SMSC47B397 new file mode 100644 index 000000000000..745ae49869bd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SMSC47B397 @@ -0,0 +1 @@ +# CONFIG_SENSORS_SMSC47B397 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SMSC47M1 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SMSC47M1 new file mode 100644 index 000000000000..150f310153c3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SMSC47M1 @@ -0,0 +1 @@ +# CONFIG_SENSORS_SMSC47M1 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SMSC47M192 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SMSC47M192 new file mode 100644 index 000000000000..2a306f5f0223 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_SMSC47M192 @@ -0,0 +1 @@ +# CONFIG_SENSORS_SMSC47M192 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TC74 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TC74 new file mode 100644 index 000000000000..32f0b8ea495e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TC74 @@ -0,0 +1 @@ +CONFIG_SENSORS_TC74=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_THMC50 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_THMC50 new file mode 100644 index 000000000000..7fc6f1ee39b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_THMC50 @@ -0,0 +1 @@ +# CONFIG_SENSORS_THMC50 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TMP102 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TMP102 new file mode 100644 index 000000000000..48f4e8447cfe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TMP102 @@ -0,0 +1 @@ +# CONFIG_SENSORS_TMP102 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TMP103 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TMP103 new file mode 100644 index 000000000000..89c6eca5e525 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TMP103 @@ -0,0 +1 @@ +CONFIG_SENSORS_TMP103=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TMP401 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TMP401 new file mode 100644 index 000000000000..565627932698 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TMP401 @@ -0,0 +1 @@ +# CONFIG_SENSORS_TMP401 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TMP421 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TMP421 new file mode 100644 index 000000000000..865e47e2a848 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TMP421 @@ -0,0 +1 @@ +# CONFIG_SENSORS_TMP421 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TPS40422 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TPS40422 new file mode 100644 index 000000000000..1abcf8970403 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TPS40422 @@ -0,0 +1 @@ +CONFIG_SENSORS_TPS40422=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TSL2550 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TSL2550 new file mode 100644 index 000000000000..c2702cc2c8e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_TSL2550 @@ -0,0 +1 @@ +# CONFIG_SENSORS_TSL2550 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_UCD9000 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_UCD9000 new file mode 100644 index 000000000000..1b3c69391580 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_UCD9000 @@ -0,0 +1 @@ +# CONFIG_SENSORS_UCD9000 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_UCD9200 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_UCD9200 new file mode 100644 index 000000000000..2e9678aa2609 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_UCD9200 @@ -0,0 +1 @@ +# CONFIG_SENSORS_UCD9200 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_VEXPRESS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_VEXPRESS new file mode 100644 index 000000000000..a24ba02a891c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_VEXPRESS @@ -0,0 +1 @@ +CONFIG_SENSORS_VEXPRESS=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_VIA686A b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_VIA686A new file mode 100644 index 000000000000..70903ff3806b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_VIA686A @@ -0,0 +1 @@ +# CONFIG_SENSORS_VIA686A is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_VT1211 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_VT1211 new file mode 100644 index 000000000000..e6495b5ae585 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_VT1211 @@ -0,0 +1 @@ +# CONFIG_SENSORS_VT1211 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_VT8231 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_VT8231 new file mode 100644 index 000000000000..9c3442298f85 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_VT8231 @@ -0,0 +1 @@ +# CONFIG_SENSORS_VT8231 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83627EHF b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83627EHF new file mode 100644 index 000000000000..cf28c4254305 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83627EHF @@ -0,0 +1 @@ +# CONFIG_SENSORS_W83627EHF is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83627HF b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83627HF new file mode 100644 index 000000000000..3fe5afab024a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83627HF @@ -0,0 +1 @@ +# CONFIG_SENSORS_W83627HF is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83781D b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83781D new file mode 100644 index 000000000000..1b25f4725b72 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83781D @@ -0,0 +1 @@ +# CONFIG_SENSORS_W83781D is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83791D b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83791D new file mode 100644 index 000000000000..9b7e1cf9b64c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83791D @@ -0,0 +1 @@ +# CONFIG_SENSORS_W83791D is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83792D b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83792D new file mode 100644 index 000000000000..79866eaf88e3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83792D @@ -0,0 +1 @@ +# CONFIG_SENSORS_W83792D is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83793 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83793 new file mode 100644 index 000000000000..55d6678c1a55 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83793 @@ -0,0 +1 @@ +# CONFIG_SENSORS_W83793 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83795 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83795 new file mode 100644 index 000000000000..7396aa9ec3f2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83795 @@ -0,0 +1 @@ +# CONFIG_SENSORS_W83795 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83L785TS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83L785TS new file mode 100644 index 000000000000..9c053cfdb1c7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83L785TS @@ -0,0 +1 @@ +# CONFIG_SENSORS_W83L785TS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83L786NG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83L786NG new file mode 100644 index 000000000000..c2209730bf93 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_W83L786NG @@ -0,0 +1 @@ +# CONFIG_SENSORS_W83L786NG is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_XGENE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_XGENE new file mode 100644 index 000000000000..84ffdf1e91bb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_XGENE @@ -0,0 +1 @@ +CONFIG_SENSORS_XGENE=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ZL6100 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ZL6100 new file mode 100644 index 000000000000..00681ebdc0e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SENSORS_ZL6100 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ZL6100 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_8250_16550A_VARIANTS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_8250_16550A_VARIANTS new file mode 100644 index 000000000000..689bc1f75a12 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_8250_16550A_VARIANTS @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_16550A_VARIANTS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_8250_FSL b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_8250_FSL new file mode 100644 index 000000000000..2aa5263f2832 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_8250_FSL @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_FSL=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_8250_RT288X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_8250_RT288X new file mode 100644 index 000000000000..8d92a9a3fc83 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_8250_RT288X @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_RT288X=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_AMBA_PL010 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_AMBA_PL010 new file mode 100644 index 000000000000..ee4b9d52a1d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_AMBA_PL010 @@ -0,0 +1 @@ +# CONFIG_SERIAL_AMBA_PL010 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_AMBA_PL011 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_AMBA_PL011 new file mode 100644 index 000000000000..1e7631992213 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_AMBA_PL011 @@ -0,0 +1 @@ +CONFIG_SERIAL_AMBA_PL011=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_AMBA_PL011_CONSOLE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_AMBA_PL011_CONSOLE new file mode 100644 index 000000000000..498816601edb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_AMBA_PL011_CONSOLE @@ -0,0 +1 @@ +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_ARC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_ARC new file mode 100644 index 000000000000..48427664c8dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_ARC @@ -0,0 +1 @@ +# CONFIG_SERIAL_ARC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_CONEXANT_DIGICOLOR b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_CONEXANT_DIGICOLOR new file mode 100644 index 000000000000..b7ae4d4a234b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_CONEXANT_DIGICOLOR @@ -0,0 +1 @@ +# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_EARLYCON_SEMIHOST b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_EARLYCON_SEMIHOST new file mode 100644 index 000000000000..c7042c27e68a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_EARLYCON_SEMIHOST @@ -0,0 +1 @@ +# CONFIG_SERIAL_EARLYCON_SEMIHOST is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_JSM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_JSM new file mode 100644 index 000000000000..d9de0605a54f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_JSM @@ -0,0 +1 @@ +# CONFIG_SERIAL_JSM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_MSM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_MSM new file mode 100644 index 000000000000..9835c5271da1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_MSM @@ -0,0 +1 @@ +# CONFIG_SERIAL_MSM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_OF_PLATFORM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_OF_PLATFORM new file mode 100644 index 000000000000..837a43b2e894 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_OF_PLATFORM @@ -0,0 +1 @@ +CONFIG_SERIAL_OF_PLATFORM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_SIFIVE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_SIFIVE new file mode 100644 index 000000000000..95657f51a586 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_SIFIVE @@ -0,0 +1 @@ +# CONFIG_SERIAL_SIFIVE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_XILINX_PS_UART b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_XILINX_PS_UART new file mode 100644 index 000000000000..3d6ecfc5e05c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_XILINX_PS_UART @@ -0,0 +1 @@ +# CONFIG_SERIAL_XILINX_PS_UART is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIO_APBPS2 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIO_APBPS2 new file mode 100644 index 000000000000..9e86f27a9954 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIO_APBPS2 @@ -0,0 +1 @@ +# CONFIG_SERIO_APBPS2 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SFC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SFC new file mode 100644 index 000000000000..fdc60b90f768 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SFC @@ -0,0 +1 @@ +# CONFIG_SFC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SGI_PARTITION b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SGI_PARTITION new file mode 100644 index 000000000000..49a58fc9bc96 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SGI_PARTITION @@ -0,0 +1 @@ +# CONFIG_SGI_PARTITION is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SG_SPLIT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SG_SPLIT new file mode 100644 index 000000000000..b6f8e64dcf48 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SG_SPLIT @@ -0,0 +1 @@ +CONFIG_SG_SPLIT=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SHADOW_CALL_STACK b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SHADOW_CALL_STACK new file mode 100644 index 000000000000..08f4cf8044ea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SHADOW_CALL_STACK @@ -0,0 +1 @@ +# CONFIG_SHADOW_CALL_STACK is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_BRCMSTB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_BRCMSTB new file mode 100644 index 000000000000..7b8f8dcbb66f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_BRCMSTB @@ -0,0 +1 @@ +# CONFIG_SOC_BRCMSTB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_BUS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_BUS new file mode 100644 index 000000000000..de561f5b4ec6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_BUS @@ -0,0 +1 @@ +CONFIG_SOC_BUS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOLARIS_X86_PARTITION b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOLARIS_X86_PARTITION new file mode 100644 index 000000000000..7b428330f665 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOLARIS_X86_PARTITION @@ -0,0 +1 @@ +# CONFIG_SOLARIS_X86_PARTITION is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_CADENCE_QUADSPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_CADENCE_QUADSPI new file mode 100644 index 000000000000..7c242fd0db53 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_CADENCE_QUADSPI @@ -0,0 +1 @@ +# CONFIG_SPI_CADENCE_QUADSPI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_DW_DMA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_DW_DMA new file mode 100644 index 000000000000..cf996d71fdd4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_DW_DMA @@ -0,0 +1 @@ +# CONFIG_SPI_DW_DMA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_DW_PCI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_DW_PCI new file mode 100644 index 000000000000..ef5d048b4cf4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_DW_PCI @@ -0,0 +1 @@ +# CONFIG_SPI_DW_PCI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_FSL_SPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_FSL_SPI new file mode 100644 index 000000000000..aa09f75414c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_FSL_SPI @@ -0,0 +1 @@ +# CONFIG_SPI_FSL_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_HISI_SFC_V3XX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_HISI_SFC_V3XX new file mode 100644 index 000000000000..5705c51bd057 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_HISI_SFC_V3XX @@ -0,0 +1 @@ +CONFIG_SPI_HISI_SFC_V3XX=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_QCOM_QSPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_QCOM_QSPI new file mode 100644 index 000000000000..aaed52ccea75 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_QCOM_QSPI @@ -0,0 +1 @@ +# CONFIG_SPI_QCOM_QSPI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_THUNDERX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_THUNDERX new file mode 100644 index 000000000000..2f95cc2861f7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_THUNDERX @@ -0,0 +1 @@ +# CONFIG_SPI_THUNDERX is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SSIF_IPMI_BMC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SSIF_IPMI_BMC new file mode 100644 index 000000000000..ce7b4d44146a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SSIF_IPMI_BMC @@ -0,0 +1 @@ +# CONFIG_SSIF_IPMI_BMC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_STACKPROTECTOR_PER_TASK b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_STACKPROTECTOR_PER_TASK new file mode 100644 index 000000000000..2d53b16b601a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_STACKPROTECTOR_PER_TASK @@ -0,0 +1 @@ +CONFIG_STACKPROTECTOR_PER_TASK=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_STAGING_BOARD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_STAGING_BOARD new file mode 100644 index 000000000000..16496bf2bf3a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_STAGING_BOARD @@ -0,0 +1 @@ +# CONFIG_STAGING_BOARD is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_STAGING_MEDIA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_STAGING_MEDIA new file mode 100644 index 000000000000..59c987cafcfe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_STAGING_MEDIA @@ -0,0 +1 @@ +# CONFIG_STAGING_MEDIA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_STUB_CLK_HI3660 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_STUB_CLK_HI3660 new file mode 100644 index 000000000000..99ffc024c0de --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_STUB_CLK_HI3660 @@ -0,0 +1 @@ +CONFIG_STUB_CLK_HI3660=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SUN_PARTITION b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SUN_PARTITION new file mode 100644 index 000000000000..ba52703282bd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SUN_PARTITION @@ -0,0 +1 @@ +# CONFIG_SUN_PARTITION is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SYSCON_REBOOT_MODE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SYSCON_REBOOT_MODE new file mode 100644 index 000000000000..0f871191b0f5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SYSCON_REBOOT_MODE @@ -0,0 +1 @@ +# CONFIG_SYSCON_REBOOT_MODE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_I2C_ATMEL b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_I2C_ATMEL new file mode 100644 index 000000000000..2d7f3fd83e2d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_I2C_ATMEL @@ -0,0 +1 @@ +# CONFIG_TCG_TIS_I2C_ATMEL is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_I2C_INFINEON b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_I2C_INFINEON new file mode 100644 index 000000000000..0514455b1877 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_I2C_INFINEON @@ -0,0 +1 @@ +# CONFIG_TCG_TIS_I2C_INFINEON is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_I2C_NUVOTON b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_I2C_NUVOTON new file mode 100644 index 000000000000..84d30bc2c0f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_I2C_NUVOTON @@ -0,0 +1 @@ +# CONFIG_TCG_TIS_I2C_NUVOTON is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_SPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_SPI new file mode 100644 index 000000000000..bfd1ff673b66 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_SPI @@ -0,0 +1 @@ +CONFIG_TCG_TIS_SPI=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_SPI_CR50 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_SPI_CR50 new file mode 100644 index 000000000000..734199d2e8e0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_SPI_CR50 @@ -0,0 +1 @@ +# CONFIG_TCG_TIS_SPI_CR50 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_ST33ZP24_I2C b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_ST33ZP24_I2C new file mode 100644 index 000000000000..1ff9e8cfa9f5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_ST33ZP24_I2C @@ -0,0 +1 @@ +# CONFIG_TCG_TIS_ST33ZP24_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDERX2_PMU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDERX2_PMU new file mode 100644 index 000000000000..e42dde2ddf84 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDERX2_PMU @@ -0,0 +1 @@ +CONFIG_THUNDERX2_PMU=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDER_NIC_BGX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDER_NIC_BGX new file mode 100644 index 000000000000..123fe6f48ad3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDER_NIC_BGX @@ -0,0 +1 @@ +CONFIG_THUNDER_NIC_BGX=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDER_NIC_PF b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDER_NIC_PF new file mode 100644 index 000000000000..6b047a13cf72 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDER_NIC_PF @@ -0,0 +1 @@ +CONFIG_THUNDER_NIC_PF=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDER_NIC_RGX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDER_NIC_RGX new file mode 100644 index 000000000000..c50ae013ff8b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDER_NIC_RGX @@ -0,0 +1 @@ +CONFIG_THUNDER_NIC_RGX=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDER_NIC_VF b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDER_NIC_VF new file mode 100644 index 000000000000..5fdfca5dbde7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_THUNDER_NIC_VF @@ -0,0 +1 @@ +CONFIG_THUNDER_NIC_VF=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TIFM_7XX1 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TIFM_7XX1 new file mode 100644 index 000000000000..7cbf18710090 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TIFM_7XX1 @@ -0,0 +1 @@ +# CONFIG_TIFM_7XX1 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TIMER_ACPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TIMER_ACPI new file mode 100644 index 000000000000..4eadf73929f2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TIMER_ACPI @@ -0,0 +1 @@ +CONFIG_TIMER_ACPI=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TIMER_OF b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TIMER_OF new file mode 100644 index 000000000000..99fc54e6a614 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TIMER_OF @@ -0,0 +1 @@ +CONFIG_TIMER_OF=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TIMER_PROBE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TIMER_PROBE new file mode 100644 index 000000000000..b2aa1db30036 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TIMER_PROBE @@ -0,0 +1 @@ +CONFIG_TIMER_PROBE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TRACE_MMIO_ACCESS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TRACE_MMIO_ACCESS new file mode 100644 index 000000000000..ff22d56979aa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TRACE_MMIO_ACCESS @@ -0,0 +1 @@ +# CONFIG_TRACE_MMIO_ACCESS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TRANS_TABLE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TRANS_TABLE new file mode 100644 index 000000000000..4fa677f2e6b1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TRANS_TABLE @@ -0,0 +1 @@ +CONFIG_TRANS_TABLE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TYPEC_FUSB302 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TYPEC_FUSB302 new file mode 100644 index 000000000000..c3f23ea16bf7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TYPEC_FUSB302 @@ -0,0 +1 @@ +# CONFIG_TYPEC_FUSB302 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TYPEC_QCOM_PMIC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TYPEC_QCOM_PMIC new file mode 100644 index 000000000000..b04c909a37fd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TYPEC_QCOM_PMIC @@ -0,0 +1 @@ +# CONFIG_TYPEC_QCOM_PMIC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_UEFI_CPER_ARM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_UEFI_CPER_ARM new file mode 100644 index 000000000000..b387eada34cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_UEFI_CPER_ARM @@ -0,0 +1 @@ +CONFIG_UEFI_CPER_ARM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ULTRASOC_SMB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ULTRASOC_SMB new file mode 100644 index 000000000000..820ee82fd9db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ULTRASOC_SMB @@ -0,0 +1 @@ +# CONFIG_ULTRASOC_SMB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_UNIXWARE_DISKLABEL b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_UNIXWARE_DISKLABEL new file mode 100644 index 000000000000..2af4dbd9b63c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_UNIXWARE_DISKLABEL @@ -0,0 +1 @@ +# CONFIG_UNIXWARE_DISKLABEL is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_CHAOSKEY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_CHAOSKEY new file mode 100644 index 000000000000..e92d4cfb0a5e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_CHAOSKEY @@ -0,0 +1 @@ +CONFIG_USB_CHAOSKEY=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_EHCI_HCD_PLATFORM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_EHCI_HCD_PLATFORM new file mode 100644 index 000000000000..b8939d990a5e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_EHCI_HCD_PLATFORM @@ -0,0 +1 @@ +CONFIG_USB_EHCI_HCD_PLATFORM=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_NET_SR9700 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_NET_SR9700 new file mode 100644 index 000000000000..f2b8724f9564 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_NET_SR9700 @@ -0,0 +1 @@ +CONFIG_USB_NET_SR9700=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_ONBOARD_HUB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_ONBOARD_HUB new file mode 100644 index 000000000000..79ccaa2177ef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_ONBOARD_HUB @@ -0,0 +1 @@ +# CONFIG_USB_ONBOARD_HUB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_QCOM_EUD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_QCOM_EUD new file mode 100644 index 000000000000..38c147a01339 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_QCOM_EUD @@ -0,0 +1 @@ +# CONFIG_USB_QCOM_EUD is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_SERIAL_CONSOLE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_SERIAL_CONSOLE new file mode 100644 index 000000000000..753c9f223264 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_SERIAL_CONSOLE @@ -0,0 +1 @@ +# CONFIG_USB_SERIAL_CONSOLE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_SERIAL_SIMPLE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_SERIAL_SIMPLE new file mode 100644 index 000000000000..6ffd8f60b27b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_SERIAL_SIMPLE @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_SIMPLE=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_SPEEDTOUCH b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_SPEEDTOUCH new file mode 100644 index 000000000000..e34bebc6254d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_SPEEDTOUCH @@ -0,0 +1 @@ +# CONFIG_USB_SPEEDTOUCH is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_UHCI_HCD b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_UHCI_HCD new file mode 100644 index 000000000000..9c9d8c2f84c7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_UHCI_HCD @@ -0,0 +1 @@ +CONFIG_USB_UHCI_HCD=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_ULPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_ULPI new file mode 100644 index 000000000000..63568d53655a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_ULPI @@ -0,0 +1 @@ +# CONFIG_USB_ULPI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_ULPI_BUS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_ULPI_BUS new file mode 100644 index 000000000000..2e81d95ff4f5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_ULPI_BUS @@ -0,0 +1 @@ +CONFIG_USB_ULPI_BUS=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_XHCI_DBGCAP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_XHCI_DBGCAP new file mode 100644 index 000000000000..195a33d45d0e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_XHCI_DBGCAP @@ -0,0 +1 @@ +# CONFIG_USB_XHCI_DBGCAP is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_XHCI_HISTB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_XHCI_HISTB new file mode 100644 index 000000000000..c1785f199642 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_XHCI_HISTB @@ -0,0 +1 @@ +# CONFIG_USB_XHCI_HISTB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_XHCI_PLATFORM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_XHCI_PLATFORM new file mode 100644 index 000000000000..f0dce4b31845 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_XHCI_PLATFORM @@ -0,0 +1 @@ +CONFIG_USB_XHCI_PLATFORM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VCPU_STALL_DETECTOR b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VCPU_STALL_DETECTOR new file mode 100644 index 000000000000..06a28199bd34 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VCPU_STALL_DETECTOR @@ -0,0 +1 @@ +# CONFIG_VCPU_STALL_DETECTOR is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VEXPRESS_CONFIG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VEXPRESS_CONFIG new file mode 100644 index 000000000000..389660309f56 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VEXPRESS_CONFIG @@ -0,0 +1 @@ +CONFIG_VEXPRESS_CONFIG=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VFIO_AMBA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VFIO_AMBA new file mode 100644 index 000000000000..3b202f4c64ef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VFIO_AMBA @@ -0,0 +1 @@ +# CONFIG_VFIO_AMBA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VFIO_PLATFORM_AMDXGBE_RESET b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VFIO_PLATFORM_AMDXGBE_RESET new file mode 100644 index 000000000000..1662b48c3be6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VFIO_PLATFORM_AMDXGBE_RESET @@ -0,0 +1 @@ +# CONFIG_VFIO_PLATFORM_AMDXGBE_RESET is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET new file mode 100644 index 000000000000..7c71659cf7d8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET @@ -0,0 +1 @@ +# CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VME_BUS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VME_BUS new file mode 100644 index 000000000000..4523e4e1dde9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VME_BUS @@ -0,0 +1 @@ +# CONFIG_VME_BUS is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VT6655 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VT6655 new file mode 100644 index 000000000000..d13d9caef68e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_VT6655 @@ -0,0 +1 @@ +# CONFIG_VT6655 is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_WLAN b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_WLAN new file mode 100644 index 000000000000..35667b211fae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_WLAN @@ -0,0 +1 @@ +# CONFIG_WLAN is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XGENE_DMA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XGENE_DMA new file mode 100644 index 000000000000..7c67d5b57e94 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XGENE_DMA @@ -0,0 +1 @@ +# CONFIG_XGENE_DMA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XGENE_PMU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XGENE_PMU new file mode 100644 index 000000000000..df5afee9fef3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XGENE_PMU @@ -0,0 +1 @@ +CONFIG_XGENE_PMU=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XGENE_SLIMPRO_MBOX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XGENE_SLIMPRO_MBOX new file mode 100644 index 000000000000..cbc486395bb3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XGENE_SLIMPRO_MBOX @@ -0,0 +1 @@ +CONFIG_XGENE_SLIMPRO_MBOX=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XILINX_INTC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XILINX_INTC new file mode 100644 index 000000000000..f5d8ef7f1b7a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XILINX_INTC @@ -0,0 +1 @@ +# CONFIG_XILINX_INTC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XILINX_WINDOW_WATCHDOG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XILINX_WINDOW_WATCHDOG new file mode 100644 index 000000000000..818d251e7f08 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XILINX_WINDOW_WATCHDOG @@ -0,0 +1 @@ +# CONFIG_XILINX_WINDOW_WATCHDOG is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XILINX_ZYNQMP_DMA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XILINX_ZYNQMP_DMA new file mode 100644 index 000000000000..462b390f4043 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XILINX_ZYNQMP_DMA @@ -0,0 +1 @@ +# CONFIG_XILINX_ZYNQMP_DMA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XILINX_ZYNQMP_DPDMA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XILINX_ZYNQMP_DPDMA new file mode 100644 index 000000000000..70851f72d1e7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XILINX_ZYNQMP_DPDMA @@ -0,0 +1 @@ +# CONFIG_XILINX_ZYNQMP_DPDMA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XIL_AXIS_FIFO b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XIL_AXIS_FIFO new file mode 100644 index 000000000000..0e5adab18aa7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_XIL_AXIS_FIFO @@ -0,0 +1 @@ +# CONFIG_XIL_AXIS_FIFO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_6LOWPAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_6LOWPAN new file mode 100644 index 000000000000..4b6ead8298f9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_6LOWPAN @@ -0,0 +1 @@ +CONFIG_6LOWPAN=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_6LOWPAN_DEBUGFS b/anolis/configs/L2-OPTIONAL/default/CONFIG_6LOWPAN_DEBUGFS new file mode 100644 index 000000000000..16bf71605076 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_6LOWPAN_DEBUGFS @@ -0,0 +1 @@ +# CONFIG_6LOWPAN_DEBUGFS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_6LOWPAN_NHC b/anolis/configs/L2-OPTIONAL/default/CONFIG_6LOWPAN_NHC new file mode 100644 index 000000000000..002da8af2aa7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_6LOWPAN_NHC @@ -0,0 +1 @@ +# CONFIG_6LOWPAN_NHC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_8139CP b/anolis/configs/L2-OPTIONAL/default/CONFIG_8139CP new file mode 100644 index 000000000000..7688cd893471 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_8139CP @@ -0,0 +1 @@ +CONFIG_8139CP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_8139TOO b/anolis/configs/L2-OPTIONAL/default/CONFIG_8139TOO new file mode 100644 index 000000000000..fe01332954f6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_8139TOO @@ -0,0 +1 @@ +CONFIG_8139TOO=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_8139TOO_8129 b/anolis/configs/L2-OPTIONAL/default/CONFIG_8139TOO_8129 new file mode 100644 index 000000000000..b8b8ef3d78f6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_8139TOO_8129 @@ -0,0 +1 @@ +CONFIG_8139TOO_8129=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_8139TOO_PIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_8139TOO_PIO new file mode 100644 index 000000000000..28b3b851a570 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_8139TOO_PIO @@ -0,0 +1 @@ +# CONFIG_8139TOO_PIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_8139TOO_TUNE_TWISTER b/anolis/configs/L2-OPTIONAL/default/CONFIG_8139TOO_TUNE_TWISTER new file mode 100644 index 000000000000..ba1c1bd0dd8d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_8139TOO_TUNE_TWISTER @@ -0,0 +1 @@ +# CONFIG_8139TOO_TUNE_TWISTER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_8139_OLD_RX_RESET b/anolis/configs/L2-OPTIONAL/default/CONFIG_8139_OLD_RX_RESET new file mode 100644 index 000000000000..d30a504dae55 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_8139_OLD_RX_RESET @@ -0,0 +1 @@ +# CONFIG_8139_OLD_RX_RESET is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ACCESSIBILITY b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACCESSIBILITY new file mode 100644 index 000000000000..9047179a277e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACCESSIBILITY @@ -0,0 +1 @@ +# CONFIG_ACCESSIBILITY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ACORN_PARTITION b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACORN_PARTITION new file mode 100644 index 000000000000..91e1dd62196f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACORN_PARTITION @@ -0,0 +1 @@ +# CONFIG_ACORN_PARTITION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_CPPC_LIB b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_CPPC_LIB new file mode 100644 index 000000000000..0b8ca34adade --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_CPPC_LIB @@ -0,0 +1 @@ +CONFIG_ACPI_CPPC_LIB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_FFH b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_FFH new file mode 100644 index 000000000000..7961cdc19406 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_FFH @@ -0,0 +1 @@ +# CONFIG_ACPI_FFH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_FPDT b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_FPDT new file mode 100644 index 000000000000..385ec923a2cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_FPDT @@ -0,0 +1 @@ +# CONFIG_ACPI_FPDT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_HOTPLUG_CPU b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_HOTPLUG_CPU new file mode 100644 index 000000000000..ef79411654e0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_HOTPLUG_CPU @@ -0,0 +1 @@ +CONFIG_ACPI_HOTPLUG_CPU=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_I2C_OPREGION b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_I2C_OPREGION new file mode 100644 index 000000000000..92036b9757c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_I2C_OPREGION @@ -0,0 +1 @@ +CONFIG_ACPI_I2C_OPREGION=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_MDIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_MDIO new file mode 100644 index 000000000000..b24aedf50125 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_MDIO @@ -0,0 +1 @@ +CONFIG_ACPI_MDIO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_PFRUT b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_PFRUT new file mode 100644 index 000000000000..870f63f35233 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_PFRUT @@ -0,0 +1 @@ +# CONFIG_ACPI_PFRUT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_PROCESSOR_IDLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_PROCESSOR_IDLE new file mode 100644 index 000000000000..e6e1026db37b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_PROCESSOR_IDLE @@ -0,0 +1 @@ +CONFIG_ACPI_PROCESSOR_IDLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_TABLE_LIB b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_TABLE_LIB new file mode 100644 index 000000000000..0744bee41167 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ACPI_TABLE_LIB @@ -0,0 +1 @@ +CONFIG_ACPI_TABLE_LIB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AD525X_DPOT b/anolis/configs/L2-OPTIONAL/default/CONFIG_AD525X_DPOT new file mode 100644 index 000000000000..789ddbbca427 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AD525X_DPOT @@ -0,0 +1 @@ +# CONFIG_AD525X_DPOT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ADFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_ADFS_FS new file mode 100644 index 000000000000..5d5a3e9c8db6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ADFS_FS @@ -0,0 +1 @@ +# CONFIG_ADFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ADIN1100_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_ADIN1100_PHY new file mode 100644 index 000000000000..128f41d6a2b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ADIN1100_PHY @@ -0,0 +1 @@ +# CONFIG_ADIN1100_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ADIN1110 b/anolis/configs/L2-OPTIONAL/default/CONFIG_ADIN1110 new file mode 100644 index 000000000000..38c918ed04af --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ADIN1110 @@ -0,0 +1 @@ +# CONFIG_ADIN1110 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ADIN_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_ADIN_PHY new file mode 100644 index 000000000000..7523e9d1f9c0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ADIN_PHY @@ -0,0 +1 @@ +# CONFIG_ADIN_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AFFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_AFFS_FS new file mode 100644 index 000000000000..20d90af1cd46 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AFFS_FS @@ -0,0 +1 @@ +# CONFIG_AFFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_AFS_FS new file mode 100644 index 000000000000..b622cdb8ef06 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AFS_FS @@ -0,0 +1 @@ +# CONFIG_AFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AF_KCM b/anolis/configs/L2-OPTIONAL/default/CONFIG_AF_KCM new file mode 100644 index 000000000000..b26e52616152 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AF_KCM @@ -0,0 +1 @@ +# CONFIG_AF_KCM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AF_RXRPC b/anolis/configs/L2-OPTIONAL/default/CONFIG_AF_RXRPC new file mode 100644 index 000000000000..b703c0366e67 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AF_RXRPC @@ -0,0 +1 @@ +# CONFIG_AF_RXRPC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AF_UNIX_OOB b/anolis/configs/L2-OPTIONAL/default/CONFIG_AF_UNIX_OOB new file mode 100644 index 000000000000..be4aa77487b2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AF_UNIX_OOB @@ -0,0 +1 @@ +CONFIG_AF_UNIX_OOB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AHCI_DWC b/anolis/configs/L2-OPTIONAL/default/CONFIG_AHCI_DWC new file mode 100644 index 000000000000..d5d0496905ed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AHCI_DWC @@ -0,0 +1 @@ +# CONFIG_AHCI_DWC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AIX_PARTITION b/anolis/configs/L2-OPTIONAL/default/CONFIG_AIX_PARTITION new file mode 100644 index 000000000000..6e03c59c70e8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AIX_PARTITION @@ -0,0 +1 @@ +# CONFIG_AIX_PARTITION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ALIM7101_WDT b/anolis/configs/L2-OPTIONAL/default/CONFIG_ALIM7101_WDT new file mode 100644 index 000000000000..6ded51391838 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ALIM7101_WDT @@ -0,0 +1 @@ +CONFIG_ALIM7101_WDT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ALTERA_MBOX b/anolis/configs/L2-OPTIONAL/default/CONFIG_ALTERA_MBOX new file mode 100644 index 000000000000..62cd998d230f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ALTERA_MBOX @@ -0,0 +1 @@ +# CONFIG_ALTERA_MBOX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ALTERA_MSGDMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_ALTERA_MSGDMA new file mode 100644 index 000000000000..88d345ac66b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ALTERA_MSGDMA @@ -0,0 +1 @@ +# CONFIG_ALTERA_MSGDMA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ALTERA_TSE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ALTERA_TSE new file mode 100644 index 000000000000..f803036e892d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ALTERA_TSE @@ -0,0 +1 @@ +# CONFIG_ALTERA_TSE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ALX b/anolis/configs/L2-OPTIONAL/default/CONFIG_ALX new file mode 100644 index 000000000000..00298a2d451d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ALX @@ -0,0 +1 @@ +CONFIG_ALX=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AMD_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_AMD_PHY new file mode 100644 index 000000000000..6675832f2c5b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AMD_PHY @@ -0,0 +1 @@ +CONFIG_AMD_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AMT b/anolis/configs/L2-OPTIONAL/default/CONFIG_AMT new file mode 100644 index 000000000000..55399527e3ef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AMT @@ -0,0 +1 @@ +# CONFIG_AMT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ANDROID_BINDER_IPC b/anolis/configs/L2-OPTIONAL/default/CONFIG_ANDROID_BINDER_IPC new file mode 100644 index 000000000000..538c5f8c1c6a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ANDROID_BINDER_IPC @@ -0,0 +1 @@ +# CONFIG_ANDROID_BINDER_IPC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ANON_VMA_NAME b/anolis/configs/L2-OPTIONAL/default/CONFIG_ANON_VMA_NAME new file mode 100644 index 000000000000..73bfbfec5b30 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ANON_VMA_NAME @@ -0,0 +1 @@ +# CONFIG_ANON_VMA_NAME is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_APERTURE_HELPERS b/anolis/configs/L2-OPTIONAL/default/CONFIG_APERTURE_HELPERS new file mode 100644 index 000000000000..87b1eca12171 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_APERTURE_HELPERS @@ -0,0 +1 @@ +CONFIG_APERTURE_HELPERS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_APPLE_MFI_FASTCHARGE b/anolis/configs/L2-OPTIONAL/default/CONFIG_APPLE_MFI_FASTCHARGE new file mode 100644 index 000000000000..978db90d7ec1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_APPLE_MFI_FASTCHARGE @@ -0,0 +1 @@ +# CONFIG_APPLE_MFI_FASTCHARGE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_APPLICOM b/anolis/configs/L2-OPTIONAL/default/CONFIG_APPLICOM new file mode 100644 index 000000000000..0b3abc60a2b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_APPLICOM @@ -0,0 +1 @@ +# CONFIG_APPLICOM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AQUANTIA_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_AQUANTIA_PHY new file mode 100644 index 000000000000..81c48619ef1d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AQUANTIA_PHY @@ -0,0 +1 @@ +CONFIG_AQUANTIA_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE new file mode 100644 index 000000000000..f26f165028cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE @@ -0,0 +1 @@ +CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_DMA_ADDR_T_64BIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_DMA_ADDR_T_64BIT new file mode 100644 index 000000000000..595f734a0ec0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_DMA_ADDR_T_64BIT @@ -0,0 +1 @@ +CONFIG_ARCH_DMA_ADDR_T_64BIT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION new file mode 100644 index 000000000000..fae543d1a0c6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION @@ -0,0 +1 @@ +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG new file mode 100644 index 000000000000..10b01946ac51 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG @@ -0,0 +1 @@ +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE new file mode 100644 index 000000000000..9be2daab643a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE @@ -0,0 +1 @@ +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK new file mode 100644 index 000000000000..418099763e47 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK @@ -0,0 +1 @@ +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_THP_MIGRATION b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_THP_MIGRATION new file mode 100644 index 000000000000..400a50b711c1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_ENABLE_THP_MIGRATION @@ -0,0 +1 @@ +CONFIG_ARCH_ENABLE_THP_MIGRATION=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE new file mode 100644 index 000000000000..921cab65c036 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_CACHE_LINE_SIZE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_CACHE_LINE_SIZE new file mode 100644 index 000000000000..196fda086baa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_CACHE_LINE_SIZE @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_COPY_MC b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_COPY_MC new file mode 100644 index 000000000000..83b47f0f9520 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_COPY_MC @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_COPY_MC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_CPU_RESCTRL b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_CPU_RESCTRL new file mode 100644 index 000000000000..6cd1474b8d32 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_CPU_RESCTRL @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_CPU_RESCTRL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_CURRENT_STACK_POINTER b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_CURRENT_STACK_POINTER new file mode 100644 index 000000000000..83d84d344041 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_CURRENT_STACK_POINTER @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_CURRENT_STACK_POINTER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_DEBUG_VIRTUAL b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_DEBUG_VIRTUAL new file mode 100644 index 000000000000..77cf47530ad3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_DEBUG_VIRTUAL @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE new file mode 100644 index 000000000000..b2191b6b813e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_DEBUG_WX b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_DEBUG_WX new file mode 100644 index 000000000000..f6c9583f6153 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_DEBUG_WX @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_DEBUG_WX=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_ELF_RANDOMIZE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_ELF_RANDOMIZE new file mode 100644 index 000000000000..7a97d06d8c7f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_ELF_RANDOMIZE @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_FAST_MULTIPLIER b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_FAST_MULTIPLIER new file mode 100644 index 000000000000..25a951619ac2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_FAST_MULTIPLIER @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_FORTIFY_SOURCE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_FORTIFY_SOURCE new file mode 100644 index 000000000000..90349d67d492 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_FORTIFY_SOURCE @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_GCOV_PROFILE_ALL b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_GCOV_PROFILE_ALL new file mode 100644 index 000000000000..ad8dc5ba8b13 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_GCOV_PROFILE_ALL @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_GIGANTIC_PAGE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_GIGANTIC_PAGE new file mode 100644 index 000000000000..a99d8fc5735f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_GIGANTIC_PAGE @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_KCOV b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_KCOV new file mode 100644 index 000000000000..ccdeb82355b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_KCOV @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_KCOV=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE new file mode 100644 index 000000000000..5dfa4eb1546f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS new file mode 100644 index 000000000000..297716642373 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE new file mode 100644 index 000000000000..8efdf990ab68 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_PMEM_API b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_PMEM_API new file mode 100644 index 000000000000..fa1d33faebf0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_PMEM_API @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_PMEM_API=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_PTE_DEVMAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_PTE_DEVMAP new file mode 100644 index 000000000000..4b170a7c5dbd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_PTE_DEVMAP @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_PTE_DEVMAP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_PTE_SPECIAL b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_PTE_SPECIAL new file mode 100644 index 000000000000..571587567b8e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_PTE_SPECIAL @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_PTE_SPECIAL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_SET_DIRECT_MAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_SET_DIRECT_MAP new file mode 100644 index 000000000000..9530949b5df1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_SET_DIRECT_MAP @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_SET_DIRECT_MAP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_SET_MEMORY b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_SET_MEMORY new file mode 100644 index 000000000000..18f175619de4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_SET_MEMORY @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_SET_MEMORY=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_STRICT_KERNEL_RWX b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_STRICT_KERNEL_RWX new file mode 100644 index 000000000000..20f381cf9c0a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_STRICT_KERNEL_RWX @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_STRICT_MODULE_RWX b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_STRICT_MODULE_RWX new file mode 100644 index 000000000000..569e82edce5a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_STRICT_MODULE_RWX @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_SYSCALL_WRAPPER b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_SYSCALL_WRAPPER new file mode 100644 index 000000000000..de5d0e76570f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_SYSCALL_WRAPPER @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE new file mode 100644 index 000000000000..e0e96531f64f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL new file mode 100644 index 000000000000..c2e0af431350 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG new file mode 100644 index 000000000000..4f2494e61441 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG @@ -0,0 +1 @@ +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HIBERNATION_HEADER b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HIBERNATION_HEADER new file mode 100644 index 000000000000..14a5d8705579 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HIBERNATION_HEADER @@ -0,0 +1 @@ +CONFIG_ARCH_HIBERNATION_HEADER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HIBERNATION_POSSIBLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HIBERNATION_POSSIBLE new file mode 100644 index 000000000000..db0d6e6eb38d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_HIBERNATION_POSSIBLE @@ -0,0 +1 @@ +CONFIG_ARCH_HIBERNATION_POSSIBLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE new file mode 100644 index 000000000000..5d65abe41431 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE @@ -0,0 +1 @@ +CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX new file mode 100644 index 000000000000..f8be112fd91f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX @@ -0,0 +1 @@ +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_PROC_KCORE_TEXT b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_PROC_KCORE_TEXT new file mode 100644 index 000000000000..67d0d787bf08 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_PROC_KCORE_TEXT @@ -0,0 +1 @@ +CONFIG_ARCH_PROC_KCORE_TEXT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SELECTS_KEXEC_FILE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SELECTS_KEXEC_FILE new file mode 100644 index 000000000000..29027c6066ec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SELECTS_KEXEC_FILE @@ -0,0 +1 @@ +CONFIG_ARCH_SELECTS_KEXEC_FILE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SPARSEMEM_ENABLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SPARSEMEM_ENABLE new file mode 100644 index 000000000000..0ff8e294ce87 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SPARSEMEM_ENABLE @@ -0,0 +1 @@ +CONFIG_ARCH_SPARSEMEM_ENABLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_STACKWALK b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_STACKWALK new file mode 100644 index 000000000000..a78d75b4de0a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_STACKWALK @@ -0,0 +1 @@ +CONFIG_ARCH_STACKWALK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_ACPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_ACPI new file mode 100644 index 000000000000..e9edfd8a8836 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_ACPI @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_ACPI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_ATOMIC_RMW b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_ATOMIC_RMW new file mode 100644 index 000000000000..285625cdcc9d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_ATOMIC_RMW @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_CFI_CLANG b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_CFI_CLANG new file mode 100644 index 000000000000..f6bc5a7b3e66 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_CFI_CLANG @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_CFI_CLANG=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_CRASH_DUMP b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_CRASH_DUMP new file mode 100644 index 000000000000..88b6c6a18b85 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_CRASH_DUMP @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC new file mode 100644 index 000000000000..6f142bb2a4b5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_INT128 b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_INT128 new file mode 100644 index 000000000000..c57089673001 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_INT128 @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_INT128=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_KEXEC b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_KEXEC new file mode 100644 index 000000000000..599138ae4cd0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_KEXEC @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_KEXEC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_KEXEC_FILE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_KEXEC_FILE new file mode 100644 index 000000000000..e3b5a0f4d627 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_KEXEC_FILE @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_KEXEC_FILE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_KEXEC_SIG b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_KEXEC_SIG new file mode 100644 index 000000000000..0f90d8751280 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_KEXEC_SIG @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_KEXEC_SIG=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_LTO_CLANG b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_LTO_CLANG new file mode 100644 index 000000000000..29dd600988c9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_LTO_CLANG @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_LTO_CLANG=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN new file mode 100644 index 000000000000..86a79b3a911f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE new file mode 100644 index 000000000000..08c47ee88379 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_NUMA_BALANCING b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_NUMA_BALANCING new file mode 100644 index 000000000000..5880157a7407 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_NUMA_BALANCING @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK new file mode 100644 index 000000000000..766a7a058d0f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK new file mode 100644 index 000000000000..8f104442a665 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_UPROBES b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_UPROBES new file mode 100644 index 000000000000..197bae34d98b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUPPORTS_UPROBES @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_UPROBES=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUSPEND_POSSIBLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUSPEND_POSSIBLE new file mode 100644 index 000000000000..d90dc4432ce3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_SUSPEND_POSSIBLE @@ -0,0 +1 @@ +CONFIG_ARCH_SUSPEND_POSSIBLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USES_HIGH_VMA_FLAGS b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USES_HIGH_VMA_FLAGS new file mode 100644 index 000000000000..32a337c700ff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USES_HIGH_VMA_FLAGS @@ -0,0 +1 @@ +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_CMPXCHG_LOCKREF b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_CMPXCHG_LOCKREF new file mode 100644 index 000000000000..9af3231c3587 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_CMPXCHG_LOCKREF @@ -0,0 +1 @@ +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_MEMREMAP_PROT b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_MEMREMAP_PROT new file mode 100644 index 000000000000..b52fb384d068 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_MEMREMAP_PROT @@ -0,0 +1 @@ +CONFIG_ARCH_USE_MEMREMAP_PROT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_MEMTEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_MEMTEST new file mode 100644 index 000000000000..d6699602bd95 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_MEMTEST @@ -0,0 +1 @@ +CONFIG_ARCH_USE_MEMTEST=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_QUEUED_RWLOCKS b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_QUEUED_RWLOCKS new file mode 100644 index 000000000000..5c3a81a683fe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_QUEUED_RWLOCKS @@ -0,0 +1 @@ +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_QUEUED_SPINLOCKS b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_QUEUED_SPINLOCKS new file mode 100644 index 000000000000..3c2e0bd4dd52 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_QUEUED_SPINLOCKS @@ -0,0 +1 @@ +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_SYM_ANNOTATIONS b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_SYM_ANNOTATIONS new file mode 100644 index 000000000000..20828e090abd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_USE_SYM_ANNOTATIONS @@ -0,0 +1 @@ +CONFIG_ARCH_USE_SYM_ANNOTATIONS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANTS_NO_INSTR b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANTS_NO_INSTR new file mode 100644 index 000000000000..c43bbe810ca4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANTS_NO_INSTR @@ -0,0 +1 @@ +CONFIG_ARCH_WANTS_NO_INSTR=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANTS_THP_SWAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANTS_THP_SWAP new file mode 100644 index 000000000000..5bc9663c1ebe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANTS_THP_SWAP @@ -0,0 +1 @@ +CONFIG_ARCH_WANTS_THP_SWAP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH new file mode 100644 index 000000000000..22d97ed5e0fb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH @@ -0,0 +1 @@ +CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION new file mode 100644 index 000000000000..ad11043d0787 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION @@ -0,0 +1 @@ +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_DEFAULT_BPF_JIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_DEFAULT_BPF_JIT new file mode 100644 index 000000000000..22d98b1329fe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_DEFAULT_BPF_JIT @@ -0,0 +1 @@ +CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_HUGE_PMD_SHARE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_HUGE_PMD_SHARE new file mode 100644 index 000000000000..a7682c3bd51f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_HUGE_PMD_SHARE @@ -0,0 +1 @@ +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_LD_ORPHAN_WARN b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_LD_ORPHAN_WARN new file mode 100644 index 000000000000..83e0c287372d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_LD_ORPHAN_WARN @@ -0,0 +1 @@ +CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_PMD_MKWRITE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_PMD_MKWRITE new file mode 100644 index 000000000000..43207356f5a7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCH_WANT_PMD_MKWRITE @@ -0,0 +1 @@ +CONFIG_ARCH_WANT_PMD_MKWRITE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCNET b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCNET new file mode 100644 index 000000000000..8c988b7ca7d1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ARCNET @@ -0,0 +1 @@ +# CONFIG_ARCNET is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ASM_MODVERSIONS b/anolis/configs/L2-OPTIONAL/default/CONFIG_ASM_MODVERSIONS new file mode 100644 index 000000000000..a731c3aa8a26 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ASM_MODVERSIONS @@ -0,0 +1 @@ +CONFIG_ASM_MODVERSIONS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ASN1 b/anolis/configs/L2-OPTIONAL/default/CONFIG_ASN1 new file mode 100644 index 000000000000..f414b61f85ef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ASN1 @@ -0,0 +1 @@ +CONFIG_ASN1=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ASN1_ENCODER b/anolis/configs/L2-OPTIONAL/default/CONFIG_ASN1_ENCODER new file mode 100644 index 000000000000..b388e0b7c9b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ASN1_ENCODER @@ -0,0 +1 @@ +CONFIG_ASN1_ENCODER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ASSOCIATIVE_ARRAY b/anolis/configs/L2-OPTIONAL/default/CONFIG_ASSOCIATIVE_ARRAY new file mode 100644 index 000000000000..fa19bf447739 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ASSOCIATIVE_ARRAY @@ -0,0 +1 @@ +CONFIG_ASSOCIATIVE_ARRAY=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AS_HAS_NON_CONST_LEB128 b/anolis/configs/L2-OPTIONAL/default/CONFIG_AS_HAS_NON_CONST_LEB128 new file mode 100644 index 000000000000..7aad62d92b3b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AS_HAS_NON_CONST_LEB128 @@ -0,0 +1 @@ +CONFIG_AS_HAS_NON_CONST_LEB128=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AS_IS_GNU b/anolis/configs/L2-OPTIONAL/default/CONFIG_AS_IS_GNU new file mode 100644 index 000000000000..17f9e6ef9e1f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AS_IS_GNU @@ -0,0 +1 @@ +CONFIG_AS_IS_GNU=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AS_VERSION b/anolis/configs/L2-OPTIONAL/default/CONFIG_AS_VERSION new file mode 100644 index 000000000000..c228e3f926da --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AS_VERSION @@ -0,0 +1 @@ +CONFIG_AS_VERSION=25000 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ATALK b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATALK new file mode 100644 index 000000000000..577083a7c02d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATALK @@ -0,0 +1 @@ +# CONFIG_ATALK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ATARI_PARTITION b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATARI_PARTITION new file mode 100644 index 000000000000..c4f07279cad8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATARI_PARTITION @@ -0,0 +1 @@ +# CONFIG_ATARI_PARTITION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ATL1 b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATL1 new file mode 100644 index 000000000000..ed8b8ff63db8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATL1 @@ -0,0 +1 @@ +CONFIG_ATL1=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ATL1C b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATL1C new file mode 100644 index 000000000000..391eb51ee43c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATL1C @@ -0,0 +1 @@ +CONFIG_ATL1C=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ATL1E b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATL1E new file mode 100644 index 000000000000..e2ce74691bfe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATL1E @@ -0,0 +1 @@ +CONFIG_ATL1E=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM new file mode 100644 index 000000000000..70dd368fa146 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM @@ -0,0 +1 @@ +CONFIG_ATM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_BR2684 b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_BR2684 new file mode 100644 index 000000000000..65ae8b6ac70c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_BR2684 @@ -0,0 +1 @@ +CONFIG_ATM_BR2684=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_BR2684_IPFILTER b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_BR2684_IPFILTER new file mode 100644 index 000000000000..655294fd4cd1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_BR2684_IPFILTER @@ -0,0 +1 @@ +# CONFIG_ATM_BR2684_IPFILTER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_CLIP b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_CLIP new file mode 100644 index 000000000000..1474a19781ac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_CLIP @@ -0,0 +1 @@ +CONFIG_ATM_CLIP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_CLIP_NO_ICMP b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_CLIP_NO_ICMP new file mode 100644 index 000000000000..011e40340c41 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_CLIP_NO_ICMP @@ -0,0 +1 @@ +# CONFIG_ATM_CLIP_NO_ICMP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_LANE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_LANE new file mode 100644 index 000000000000..b7578abbd535 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_LANE @@ -0,0 +1 @@ +CONFIG_ATM_LANE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_MPOA b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_MPOA new file mode 100644 index 000000000000..75378a2c5660 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ATM_MPOA @@ -0,0 +1 @@ +# CONFIG_ATM_MPOA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AUDITSYSCALL b/anolis/configs/L2-OPTIONAL/default/CONFIG_AUDITSYSCALL new file mode 100644 index 000000000000..7ae4f9cd89af --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AUDITSYSCALL @@ -0,0 +1 @@ +CONFIG_AUDITSYSCALL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AUXDISPLAY b/anolis/configs/L2-OPTIONAL/default/CONFIG_AUXDISPLAY new file mode 100644 index 000000000000..36da27ae2bbd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AUXDISPLAY @@ -0,0 +1 @@ +# CONFIG_AUXDISPLAY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_AX88796B_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_AX88796B_PHY new file mode 100644 index 000000000000..ee2e42ff289b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_AX88796B_PHY @@ -0,0 +1 @@ +CONFIG_AX88796B_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_B44 b/anolis/configs/L2-OPTIONAL/default/CONFIG_B44 new file mode 100644 index 000000000000..f1e41b3072bc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_B44 @@ -0,0 +1 @@ +# CONFIG_B44 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_ADP8860 b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_ADP8860 new file mode 100644 index 000000000000..401ceb236ffe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_ADP8860 @@ -0,0 +1 @@ +# CONFIG_BACKLIGHT_ADP8860 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_ADP8870 b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_ADP8870 new file mode 100644 index 000000000000..6d0c88caf802 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_ADP8870 @@ -0,0 +1 @@ +# CONFIG_BACKLIGHT_ADP8870 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_ARCXCNN b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_ARCXCNN new file mode 100644 index 000000000000..9371c26efa3c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_ARCXCNN @@ -0,0 +1 @@ +# CONFIG_BACKLIGHT_ARCXCNN is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_BD6107 b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_BD6107 new file mode 100644 index 000000000000..07df4bf1541c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_BD6107 @@ -0,0 +1 @@ +# CONFIG_BACKLIGHT_BD6107 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_CLASS_DEVICE b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_CLASS_DEVICE new file mode 100644 index 000000000000..c37961b4c8f9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_CLASS_DEVICE @@ -0,0 +1 @@ +CONFIG_BACKLIGHT_CLASS_DEVICE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_KTD253 b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_KTD253 new file mode 100644 index 000000000000..5f66e1f9d617 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_KTD253 @@ -0,0 +1 @@ +# CONFIG_BACKLIGHT_KTD253 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_KTZ8866 b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_KTZ8866 new file mode 100644 index 000000000000..f9355b9ee024 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_KTZ8866 @@ -0,0 +1 @@ +# CONFIG_BACKLIGHT_KTZ8866 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_LM3630A b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_LM3630A new file mode 100644 index 000000000000..1cc4fc66b60a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_LM3630A @@ -0,0 +1 @@ +# CONFIG_BACKLIGHT_LM3630A is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_LM3639 b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_LM3639 new file mode 100644 index 000000000000..cf301c4f996a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_LM3639 @@ -0,0 +1 @@ +# CONFIG_BACKLIGHT_LM3639 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_LP855X b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_LP855X new file mode 100644 index 000000000000..bf846f83255a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_LP855X @@ -0,0 +1 @@ +CONFIG_BACKLIGHT_LP855X=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_LV5207LP b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_LV5207LP new file mode 100644 index 000000000000..ed80d7e6601e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_LV5207LP @@ -0,0 +1 @@ +# CONFIG_BACKLIGHT_LV5207LP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_QCOM_WLED b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_QCOM_WLED new file mode 100644 index 000000000000..d1e1fdbc7d6b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKLIGHT_QCOM_WLED @@ -0,0 +1 @@ +# CONFIG_BACKLIGHT_QCOM_WLED is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKTRACE_SELF_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKTRACE_SELF_TEST new file mode 100644 index 000000000000..d8da786506b2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BACKTRACE_SELF_TEST @@ -0,0 +1 @@ +# CONFIG_BACKTRACE_SELF_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BAREUDP b/anolis/configs/L2-OPTIONAL/default/CONFIG_BAREUDP new file mode 100644 index 000000000000..ee554a97a7b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BAREUDP @@ -0,0 +1 @@ +# CONFIG_BAREUDP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BASE_SMALL b/anolis/configs/L2-OPTIONAL/default/CONFIG_BASE_SMALL new file mode 100644 index 000000000000..ae5e778e0a49 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BASE_SMALL @@ -0,0 +1 @@ +CONFIG_BASE_SMALL=0 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BATMAN_ADV b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATMAN_ADV new file mode 100644 index 000000000000..8d45d826f6cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATMAN_ADV @@ -0,0 +1 @@ +# CONFIG_BATMAN_ADV is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_BQ27XXX b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_BQ27XXX new file mode 100644 index 000000000000..4ddb243bae97 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_BQ27XXX @@ -0,0 +1 @@ +# CONFIG_BATTERY_BQ27XXX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_CW2015 b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_CW2015 new file mode 100644 index 000000000000..2c8831e4d2c1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_CW2015 @@ -0,0 +1 @@ +# CONFIG_BATTERY_CW2015 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_DS2780 b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_DS2780 new file mode 100644 index 000000000000..508ab19d9c93 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_DS2780 @@ -0,0 +1 @@ +# CONFIG_BATTERY_DS2780 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_DS2781 b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_DS2781 new file mode 100644 index 000000000000..f35c00807c9a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_DS2781 @@ -0,0 +1 @@ +# CONFIG_BATTERY_DS2781 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_DS2782 b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_DS2782 new file mode 100644 index 000000000000..126e3105af73 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_DS2782 @@ -0,0 +1 @@ +# CONFIG_BATTERY_DS2782 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_GAUGE_LTC2941 b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_GAUGE_LTC2941 new file mode 100644 index 000000000000..a4a35e3374ee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_GAUGE_LTC2941 @@ -0,0 +1 @@ +# CONFIG_BATTERY_GAUGE_LTC2941 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_GOLDFISH b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_GOLDFISH new file mode 100644 index 000000000000..75b9c00e9bfc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_GOLDFISH @@ -0,0 +1 @@ +# CONFIG_BATTERY_GOLDFISH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_MAX17040 b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_MAX17040 new file mode 100644 index 000000000000..074c2386b1cf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_MAX17040 @@ -0,0 +1 @@ +# CONFIG_BATTERY_MAX17040 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_MAX17042 b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_MAX17042 new file mode 100644 index 000000000000..19ff6af7a079 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_MAX17042 @@ -0,0 +1 @@ +# CONFIG_BATTERY_MAX17042 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_RT5033 b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_RT5033 new file mode 100644 index 000000000000..a86c2de54c5c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_RT5033 @@ -0,0 +1 @@ +# CONFIG_BATTERY_RT5033 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_SAMSUNG_SDI b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_SAMSUNG_SDI new file mode 100644 index 000000000000..b68d54359323 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_SAMSUNG_SDI @@ -0,0 +1 @@ +# CONFIG_BATTERY_SAMSUNG_SDI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_SBS b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_SBS new file mode 100644 index 000000000000..c624a2f7fa60 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_SBS @@ -0,0 +1 @@ +# CONFIG_BATTERY_SBS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_UG3105 b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_UG3105 new file mode 100644 index 000000000000..2ef40f78c7df --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BATTERY_UG3105 @@ -0,0 +1 @@ +# CONFIG_BATTERY_UG3105 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM54140_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM54140_PHY new file mode 100644 index 000000000000..ebd2ac9225ca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM54140_PHY @@ -0,0 +1 @@ +# CONFIG_BCM54140_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM7XXX_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM7XXX_PHY new file mode 100644 index 000000000000..09c4987ca6c9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM7XXX_PHY @@ -0,0 +1 @@ +CONFIG_BCM7XXX_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM84881_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM84881_PHY new file mode 100644 index 000000000000..6f472549d5a7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM84881_PHY @@ -0,0 +1 @@ +# CONFIG_BCM84881_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM87XX_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM87XX_PHY new file mode 100644 index 000000000000..b9dd7faba7dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM87XX_PHY @@ -0,0 +1 @@ +CONFIG_BCM87XX_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA new file mode 100644 index 000000000000..d9d0a9bbc4aa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA @@ -0,0 +1 @@ +CONFIG_BCMA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_DEBUG new file mode 100644 index 000000000000..c5cc6a5cfd19 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_DEBUG @@ -0,0 +1 @@ +# CONFIG_BCMA_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_DRIVER_GMAC_CMN b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_DRIVER_GMAC_CMN new file mode 100644 index 000000000000..501bc2aecb6e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_DRIVER_GMAC_CMN @@ -0,0 +1 @@ +CONFIG_BCMA_DRIVER_GMAC_CMN=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_DRIVER_GPIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_DRIVER_GPIO new file mode 100644 index 000000000000..ae73e2287533 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_DRIVER_GPIO @@ -0,0 +1 @@ +CONFIG_BCMA_DRIVER_GPIO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_DRIVER_PCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_DRIVER_PCI new file mode 100644 index 000000000000..92d35164df86 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_DRIVER_PCI @@ -0,0 +1 @@ +CONFIG_BCMA_DRIVER_PCI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_HOST_PCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_HOST_PCI new file mode 100644 index 000000000000..a37624a12da8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_HOST_PCI @@ -0,0 +1 @@ +CONFIG_BCMA_HOST_PCI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_HOST_PCI_POSSIBLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_HOST_PCI_POSSIBLE new file mode 100644 index 000000000000..204a2cca7a6e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_HOST_PCI_POSSIBLE @@ -0,0 +1 @@ +CONFIG_BCMA_HOST_PCI_POSSIBLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_HOST_SOC b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_HOST_SOC new file mode 100644 index 000000000000..0c3f2477f3cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_HOST_SOC @@ -0,0 +1 @@ +# CONFIG_BCMA_HOST_SOC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_POSSIBLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_POSSIBLE new file mode 100644 index 000000000000..aa4a9269007a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCMA_POSSIBLE @@ -0,0 +1 @@ +CONFIG_BCMA_POSSIBLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM_KONA_USB2_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM_KONA_USB2_PHY new file mode 100644 index 000000000000..787233648ced --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM_KONA_USB2_PHY @@ -0,0 +1 @@ +# CONFIG_BCM_KONA_USB2_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM_NET_PHYLIB b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM_NET_PHYLIB new file mode 100644 index 000000000000..a31aeb4ac26b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM_NET_PHYLIB @@ -0,0 +1 @@ +CONFIG_BCM_NET_PHYLIB=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM_NET_PHYPTP b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM_NET_PHYPTP new file mode 100644 index 000000000000..2946b54059ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM_NET_PHYPTP @@ -0,0 +1 @@ +CONFIG_BCM_NET_PHYPTP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM_VK b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM_VK new file mode 100644 index 000000000000..17a577df57bd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCM_VK @@ -0,0 +1 @@ +# CONFIG_BCM_VK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BE2ISCSI b/anolis/configs/L2-OPTIONAL/default/CONFIG_BE2ISCSI new file mode 100644 index 000000000000..d27a7a0582f0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BE2ISCSI @@ -0,0 +1 @@ +CONFIG_BE2ISCSI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BEFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_BEFS_FS new file mode 100644 index 000000000000..cdabc728df98 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BEFS_FS @@ -0,0 +1 @@ +# CONFIG_BEFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_BFS_FS new file mode 100644 index 000000000000..e667a937b15e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BFS_FS @@ -0,0 +1 @@ +# CONFIG_BFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BINARY_PRINTF b/anolis/configs/L2-OPTIONAL/default/CONFIG_BINARY_PRINTF new file mode 100644 index 000000000000..3dd24e0191b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BINARY_PRINTF @@ -0,0 +1 @@ +CONFIG_BINARY_PRINTF=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BITREVERSE b/anolis/configs/L2-OPTIONAL/default/CONFIG_BITREVERSE new file mode 100644 index 000000000000..22507f9b83bc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BITREVERSE @@ -0,0 +1 @@ +CONFIG_BITREVERSE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_CGROUP_FC_APPID b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_CGROUP_FC_APPID new file mode 100644 index 000000000000..a3c81418b4e0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_CGROUP_FC_APPID @@ -0,0 +1 @@ +# CONFIG_BLK_CGROUP_FC_APPID is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_CGROUP_IOPRIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_CGROUP_IOPRIO new file mode 100644 index 000000000000..39c0d13ff4b2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_CGROUP_IOPRIO @@ -0,0 +1 @@ +# CONFIG_BLK_CGROUP_IOPRIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_CGROUP_PUNT_BIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_CGROUP_PUNT_BIO new file mode 100644 index 000000000000..5e25b9cba096 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_CGROUP_PUNT_BIO @@ -0,0 +1 @@ +CONFIG_BLK_CGROUP_PUNT_BIO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_CGROUP_RWSTAT b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_CGROUP_RWSTAT new file mode 100644 index 000000000000..5e0f1c36ddeb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_CGROUP_RWSTAT @@ -0,0 +1 @@ +CONFIG_BLK_CGROUP_RWSTAT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_DEV_3W_XXXX_RAID b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_DEV_3W_XXXX_RAID new file mode 100644 index 000000000000..0fbe95ebc376 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_DEV_3W_XXXX_RAID @@ -0,0 +1 @@ +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_DEV_BSG_COMMON b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_DEV_BSG_COMMON new file mode 100644 index 000000000000..b645b9f74708 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_DEV_BSG_COMMON @@ -0,0 +1 @@ +CONFIG_BLK_DEV_BSG_COMMON=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_DEV_DM_BUILTIN b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_DEV_DM_BUILTIN new file mode 100644 index 000000000000..2cd73581f8ee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_DEV_DM_BUILTIN @@ -0,0 +1 @@ +CONFIG_BLK_DEV_DM_BUILTIN=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_ICQ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_ICQ new file mode 100644 index 000000000000..e76683c61f0f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_ICQ @@ -0,0 +1 @@ +CONFIG_BLK_ICQ=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_INLINE_ENCRYPTION b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_INLINE_ENCRYPTION new file mode 100644 index 000000000000..3f642705f0e4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_INLINE_ENCRYPTION @@ -0,0 +1 @@ +# CONFIG_BLK_INLINE_ENCRYPTION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_MQ_STACKING b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_MQ_STACKING new file mode 100644 index 000000000000..6caf8dfba303 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_MQ_STACKING @@ -0,0 +1 @@ +CONFIG_BLK_MQ_STACKING=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_PM b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_PM new file mode 100644 index 000000000000..279d2c44a337 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_PM @@ -0,0 +1 @@ +CONFIG_BLK_PM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_SED_OPAL b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_SED_OPAL new file mode 100644 index 000000000000..e0c2a4ef6657 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLK_SED_OPAL @@ -0,0 +1 @@ +# CONFIG_BLK_SED_OPAL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BLOCK_HOLDER_DEPRECATED b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLOCK_HOLDER_DEPRECATED new file mode 100644 index 000000000000..7dde89264318 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLOCK_HOLDER_DEPRECATED @@ -0,0 +1 @@ +CONFIG_BLOCK_HOLDER_DEPRECATED=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BLOCK_LEGACY_AUTOLOAD b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLOCK_LEGACY_AUTOLOAD new file mode 100644 index 000000000000..3e7433ac6ee1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BLOCK_LEGACY_AUTOLOAD @@ -0,0 +1 @@ +CONFIG_BLOCK_LEGACY_AUTOLOAD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BOOTPARAM_HUNG_TASK_PANIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_BOOTPARAM_HUNG_TASK_PANIC new file mode 100644 index 000000000000..93452648f2f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BOOTPARAM_HUNG_TASK_PANIC @@ -0,0 +1 @@ +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BPF_JIT_DEFAULT_ON b/anolis/configs/L2-OPTIONAL/default/CONFIG_BPF_JIT_DEFAULT_ON new file mode 100644 index 000000000000..cb9ab4f3b6ab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BPF_JIT_DEFAULT_ON @@ -0,0 +1 @@ +CONFIG_BPF_JIT_DEFAULT_ON=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BPF_PRELOAD b/anolis/configs/L2-OPTIONAL/default/CONFIG_BPF_PRELOAD new file mode 100644 index 000000000000..71cb0ca98e19 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BPF_PRELOAD @@ -0,0 +1 @@ +# CONFIG_BPF_PRELOAD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BQL b/anolis/configs/L2-OPTIONAL/default/CONFIG_BQL new file mode 100644 index 000000000000..726b6e8e2bd2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BQL @@ -0,0 +1 @@ +CONFIG_BQL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BRIDGE_CFM b/anolis/configs/L2-OPTIONAL/default/CONFIG_BRIDGE_CFM new file mode 100644 index 000000000000..d3175e47af57 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BRIDGE_CFM @@ -0,0 +1 @@ +# CONFIG_BRIDGE_CFM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BROADCOM_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_BROADCOM_PHY new file mode 100644 index 000000000000..a4c3dcde035c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BROADCOM_PHY @@ -0,0 +1 @@ +CONFIG_BROADCOM_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BSD_DISKLABEL b/anolis/configs/L2-OPTIONAL/default/CONFIG_BSD_DISKLABEL new file mode 100644 index 000000000000..980fa2b75f3d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BSD_DISKLABEL @@ -0,0 +1 @@ +CONFIG_BSD_DISKLABEL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BTREE b/anolis/configs/L2-OPTIONAL/default/CONFIG_BTREE new file mode 100644 index 000000000000..592dbd5ee8df --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BTREE @@ -0,0 +1 @@ +CONFIG_BTREE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_ASSERT b/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_ASSERT new file mode 100644 index 000000000000..a4f8b08173a8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_ASSERT @@ -0,0 +1 @@ +# CONFIG_BTRFS_ASSERT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_DEBUG new file mode 100644 index 000000000000..3e69c0f7dfef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_DEBUG @@ -0,0 +1 @@ +# CONFIG_BTRFS_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_FS_CHECK_INTEGRITY b/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_FS_CHECK_INTEGRITY new file mode 100644 index 000000000000..018480b8bc38 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_FS_CHECK_INTEGRITY @@ -0,0 +1 @@ +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_FS_POSIX_ACL b/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_FS_POSIX_ACL new file mode 100644 index 000000000000..c416c3fff147 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_FS_POSIX_ACL @@ -0,0 +1 @@ +# CONFIG_BTRFS_FS_POSIX_ACL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_FS_REF_VERIFY b/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_FS_REF_VERIFY new file mode 100644 index 000000000000..2fb0f884e4b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_FS_REF_VERIFY @@ -0,0 +1 @@ +# CONFIG_BTRFS_FS_REF_VERIFY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_FS_RUN_SANITY_TESTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_FS_RUN_SANITY_TESTS new file mode 100644 index 000000000000..b3ae62702352 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BTRFS_FS_RUN_SANITY_TESTS @@ -0,0 +1 @@ +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BTT b/anolis/configs/L2-OPTIONAL/default/CONFIG_BTT new file mode 100644 index 000000000000..9cfddf1b90f8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BTT @@ -0,0 +1 @@ +CONFIG_BTT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BUFFER_HEAD b/anolis/configs/L2-OPTIONAL/default/CONFIG_BUFFER_HEAD new file mode 100644 index 000000000000..dc047db31705 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BUFFER_HEAD @@ -0,0 +1 @@ +CONFIG_BUFFER_HEAD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BUILDTIME_TABLE_SORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_BUILDTIME_TABLE_SORT new file mode 100644 index 000000000000..9c2116a57e92 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BUILDTIME_TABLE_SORT @@ -0,0 +1 @@ +CONFIG_BUILDTIME_TABLE_SORT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_C2PORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_C2PORT new file mode 100644 index 000000000000..98a99aa3e28f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_C2PORT @@ -0,0 +1 @@ +# CONFIG_C2PORT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CACHEFILES_ERROR_INJECTION b/anolis/configs/L2-OPTIONAL/default/CONFIG_CACHEFILES_ERROR_INJECTION new file mode 100644 index 000000000000..d1256b3e941a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CACHEFILES_ERROR_INJECTION @@ -0,0 +1 @@ +# CONFIG_CACHEFILES_ERROR_INJECTION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CADENCE_WATCHDOG b/anolis/configs/L2-OPTIONAL/default/CONFIG_CADENCE_WATCHDOG new file mode 100644 index 000000000000..3892db3284e1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CADENCE_WATCHDOG @@ -0,0 +1 @@ +# CONFIG_CADENCE_WATCHDOG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CAIF b/anolis/configs/L2-OPTIONAL/default/CONFIG_CAIF new file mode 100644 index 000000000000..e484697633a8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CAIF @@ -0,0 +1 @@ +# CONFIG_CAIF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_CAN new file mode 100644 index 000000000000..37ca11c95227 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CAN @@ -0,0 +1 @@ +# CONFIG_CAN is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CARDBUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CARDBUS new file mode 100644 index 000000000000..398c236738e4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CARDBUS @@ -0,0 +1 @@ +CONFIG_CARDBUS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CAVIUM_PTP b/anolis/configs/L2-OPTIONAL/default/CONFIG_CAVIUM_PTP new file mode 100644 index 000000000000..61fb6b41a947 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CAVIUM_PTP @@ -0,0 +1 @@ +CONFIG_CAVIUM_PTP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CB710_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_CB710_CORE new file mode 100644 index 000000000000..88c0c9f9c81e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CB710_CORE @@ -0,0 +1 @@ +CONFIG_CB710_CORE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CB710_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_CB710_DEBUG new file mode 100644 index 000000000000..fc70a51d0ac8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CB710_DEBUG @@ -0,0 +1 @@ +# CONFIG_CB710_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CB710_DEBUG_ASSUMPTIONS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CB710_DEBUG_ASSUMPTIONS new file mode 100644 index 000000000000..8bf7e308822a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CB710_DEBUG_ASSUMPTIONS @@ -0,0 +1 @@ +CONFIG_CB710_DEBUG_ASSUMPTIONS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_CAN_LINK b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_CAN_LINK new file mode 100644 index 000000000000..e962c4f2a47c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_CAN_LINK @@ -0,0 +1 @@ +CONFIG_CC_CAN_LINK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_CAN_LINK_STATIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_CAN_LINK_STATIC new file mode 100644 index 000000000000..a359402c7cba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_CAN_LINK_STATIC @@ -0,0 +1 @@ +CONFIG_CC_CAN_LINK_STATIC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_ASM_GOTO_OUTPUT b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_ASM_GOTO_OUTPUT new file mode 100644 index 000000000000..9a33905ada1b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_ASM_GOTO_OUTPUT @@ -0,0 +1 @@ +CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT new file mode 100644 index 000000000000..77f479fa9bbb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT @@ -0,0 +1 @@ +CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_ASM_INLINE b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_ASM_INLINE new file mode 100644 index 000000000000..4b72546c10f7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_ASM_INLINE @@ -0,0 +1 @@ +CONFIG_CC_HAS_ASM_INLINE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN new file mode 100644 index 000000000000..4bc8c8dcdf97 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN @@ -0,0 +1 @@ +CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO new file mode 100644 index 000000000000..21bfd6db4efc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO @@ -0,0 +1 @@ +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE new file mode 100644 index 000000000000..f5ec022f0d15 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE @@ -0,0 +1 @@ +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_INT128 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_INT128 new file mode 100644 index 000000000000..1b98764bfea3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_INT128 @@ -0,0 +1 @@ +CONFIG_CC_HAS_INT128=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_KASAN_GENERIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_KASAN_GENERIC new file mode 100644 index 000000000000..e0e19735edd6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_KASAN_GENERIC @@ -0,0 +1 @@ +CONFIG_CC_HAS_KASAN_GENERIC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_KASAN_SW_TAGS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_KASAN_SW_TAGS new file mode 100644 index 000000000000..530edb74379a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_KASAN_SW_TAGS @@ -0,0 +1 @@ +CONFIG_CC_HAS_KASAN_SW_TAGS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_NO_PROFILE_FN_ATTR b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_NO_PROFILE_FN_ATTR new file mode 100644 index 000000000000..c3356979cd40 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_NO_PROFILE_FN_ATTR @@ -0,0 +1 @@ +CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_RANDSTRUCT b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_RANDSTRUCT new file mode 100644 index 000000000000..f28e37e10019 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_RANDSTRUCT @@ -0,0 +1 @@ +CONFIG_CC_HAS_RANDSTRUCT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_SANCOV_TRACE_PC b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_SANCOV_TRACE_PC new file mode 100644 index 000000000000..a0988149d815 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_SANCOV_TRACE_PC @@ -0,0 +1 @@ +CONFIG_CC_HAS_SANCOV_TRACE_PC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS new file mode 100644 index 000000000000..1f4a4f60cfd9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS @@ -0,0 +1 @@ +CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_ZERO_CALL_USED_REGS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_ZERO_CALL_USED_REGS new file mode 100644 index 000000000000..d1d0c3a9658f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_ZERO_CALL_USED_REGS @@ -0,0 +1 @@ +CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_IMPLICIT_FALLTHROUGH b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_IMPLICIT_FALLTHROUGH new file mode 100644 index 000000000000..aa8a121fae8d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_IMPLICIT_FALLTHROUGH @@ -0,0 +1 @@ +CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_IS_GCC b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_IS_GCC new file mode 100644 index 000000000000..7698d68c208a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_IS_GCC @@ -0,0 +1 @@ +CONFIG_CC_IS_GCC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_NO_ARRAY_BOUNDS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_NO_ARRAY_BOUNDS new file mode 100644 index 000000000000..aba54617fc73 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_NO_ARRAY_BOUNDS @@ -0,0 +1 @@ +CONFIG_CC_NO_ARRAY_BOUNDS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_VERSION_TEXT b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_VERSION_TEXT new file mode 100644 index 000000000000..e9f8043b92e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_VERSION_TEXT @@ -0,0 +1 @@ +CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CEC_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_CEC_CORE new file mode 100644 index 000000000000..3b38aad569c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CEC_CORE @@ -0,0 +1 @@ +CONFIG_CEC_CORE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CEPH_LIB b/anolis/configs/L2-OPTIONAL/default/CONFIG_CEPH_LIB new file mode 100644 index 000000000000..5f539b4b6985 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CEPH_LIB @@ -0,0 +1 @@ +CONFIG_CEPH_LIB=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CEPH_LIB_PRETTYDEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_CEPH_LIB_PRETTYDEBUG new file mode 100644 index 000000000000..57958e0f5ae5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CEPH_LIB_PRETTYDEBUG @@ -0,0 +1 @@ +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CEPH_LIB_USE_DNS_RESOLVER b/anolis/configs/L2-OPTIONAL/default/CONFIG_CEPH_LIB_USE_DNS_RESOLVER new file mode 100644 index 000000000000..876075b46f77 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CEPH_LIB_USE_DNS_RESOLVER @@ -0,0 +1 @@ +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211 new file mode 100644 index 000000000000..c61c420dda9d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211 @@ -0,0 +1 @@ +CONFIG_CFG80211=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_CRDA_SUPPORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_CRDA_SUPPORT new file mode 100644 index 000000000000..51f113cca60f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_CRDA_SUPPORT @@ -0,0 +1 @@ +CONFIG_CFG80211_CRDA_SUPPORT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_DEBUGFS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_DEBUGFS new file mode 100644 index 000000000000..fbc90925d667 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_DEBUGFS @@ -0,0 +1 @@ +# CONFIG_CFG80211_DEBUGFS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_DEFAULT_PS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_DEFAULT_PS new file mode 100644 index 000000000000..89fd54c1bed8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_DEFAULT_PS @@ -0,0 +1 @@ +CONFIG_CFG80211_DEFAULT_PS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_DEVELOPER_WARNINGS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_DEVELOPER_WARNINGS new file mode 100644 index 000000000000..92c3d1dcd9dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_DEVELOPER_WARNINGS @@ -0,0 +1 @@ +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_REQUIRE_SIGNED_REGDB b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_REQUIRE_SIGNED_REGDB new file mode 100644 index 000000000000..03b978a85f56 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_REQUIRE_SIGNED_REGDB @@ -0,0 +1 @@ +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS new file mode 100644 index 000000000000..6849c7d54c31 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS @@ -0,0 +1 @@ +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_WEXT b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_WEXT new file mode 100644 index 000000000000..5cbaa6e679ec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFG80211_WEXT @@ -0,0 +1 @@ +# CONFIG_CFG80211_WEXT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CFI_CLANG b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFI_CLANG new file mode 100644 index 000000000000..f1b68b1b18ef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CFI_CLANG @@ -0,0 +1 @@ +# CONFIG_CFI_CLANG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CGROUP_FAVOR_DYNMODS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CGROUP_FAVOR_DYNMODS new file mode 100644 index 000000000000..138558a8e10e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CGROUP_FAVOR_DYNMODS @@ -0,0 +1 @@ +# CONFIG_CGROUP_FAVOR_DYNMODS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CGROUP_MISC b/anolis/configs/L2-OPTIONAL/default/CONFIG_CGROUP_MISC new file mode 100644 index 000000000000..39276f477c64 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CGROUP_MISC @@ -0,0 +1 @@ +# CONFIG_CGROUP_MISC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_ADP5061 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_ADP5061 new file mode 100644 index 000000000000..f4086ccd21d9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_ADP5061 @@ -0,0 +1 @@ +# CONFIG_CHARGER_ADP5061 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BD99954 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BD99954 new file mode 100644 index 000000000000..50cfb7bcf116 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BD99954 @@ -0,0 +1 @@ +# CONFIG_CHARGER_BD99954 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ2415X b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ2415X new file mode 100644 index 000000000000..bd6cf1aefb6b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ2415X @@ -0,0 +1 @@ +# CONFIG_CHARGER_BQ2415X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ24257 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ24257 new file mode 100644 index 000000000000..b4a8aea45632 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ24257 @@ -0,0 +1 @@ +# CONFIG_CHARGER_BQ24257 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ24735 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ24735 new file mode 100644 index 000000000000..bc9915c842bd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ24735 @@ -0,0 +1 @@ +# CONFIG_CHARGER_BQ24735 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ2515X b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ2515X new file mode 100644 index 000000000000..abd8044a92e3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ2515X @@ -0,0 +1 @@ +# CONFIG_CHARGER_BQ2515X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ256XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ256XX new file mode 100644 index 000000000000..6e6842a5d6ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ256XX @@ -0,0 +1 @@ +# CONFIG_CHARGER_BQ256XX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ25890 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ25890 new file mode 100644 index 000000000000..a5d1bc4b359b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ25890 @@ -0,0 +1 @@ +# CONFIG_CHARGER_BQ25890 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ25980 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ25980 new file mode 100644 index 000000000000..65e06f37c92d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_BQ25980 @@ -0,0 +1 @@ +# CONFIG_CHARGER_BQ25980 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_GPIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_GPIO new file mode 100644 index 000000000000..436dad60a92e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_GPIO @@ -0,0 +1 @@ +# CONFIG_CHARGER_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_LP8727 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_LP8727 new file mode 100644 index 000000000000..cf915198f094 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_LP8727 @@ -0,0 +1 @@ +# CONFIG_CHARGER_LP8727 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_LT3651 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_LT3651 new file mode 100644 index 000000000000..16a7e801ae9d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_LT3651 @@ -0,0 +1 @@ +# CONFIG_CHARGER_LT3651 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_LTC4162L b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_LTC4162L new file mode 100644 index 000000000000..911950ca80a7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_LTC4162L @@ -0,0 +1 @@ +# CONFIG_CHARGER_LTC4162L is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_MAX77976 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_MAX77976 new file mode 100644 index 000000000000..143efa3a8654 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_MAX77976 @@ -0,0 +1 @@ +# CONFIG_CHARGER_MAX77976 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_MAX8903 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_MAX8903 new file mode 100644 index 000000000000..6e62d27d753e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_MAX8903 @@ -0,0 +1 @@ +# CONFIG_CHARGER_MAX8903 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_RT9455 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_RT9455 new file mode 100644 index 000000000000..e9ffbe4d172a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_RT9455 @@ -0,0 +1 @@ +# CONFIG_CHARGER_RT9455 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_SBS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_SBS new file mode 100644 index 000000000000..afb3f2f1dd8a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHARGER_SBS @@ -0,0 +1 @@ +# CONFIG_CHARGER_SBS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHECK_SIGNATURE b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHECK_SIGNATURE new file mode 100644 index 000000000000..7d8287608344 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHECK_SIGNATURE @@ -0,0 +1 @@ +CONFIG_CHECK_SIGNATURE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_INLINE_CRYPTO b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_INLINE_CRYPTO new file mode 100644 index 000000000000..bd32df3059f2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_INLINE_CRYPTO @@ -0,0 +1 @@ +CONFIG_CHELSIO_INLINE_CRYPTO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_IPSEC_INLINE b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_IPSEC_INLINE new file mode 100644 index 000000000000..ee972e70f2ef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_IPSEC_INLINE @@ -0,0 +1 @@ +CONFIG_CHELSIO_IPSEC_INLINE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_LIB b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_LIB new file mode 100644 index 000000000000..73c420b67291 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_LIB @@ -0,0 +1 @@ +CONFIG_CHELSIO_LIB=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T1 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T1 new file mode 100644 index 000000000000..78f33d433ce0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T1 @@ -0,0 +1 @@ +# CONFIG_CHELSIO_T1 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T3 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T3 new file mode 100644 index 000000000000..bfa32e65389b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T3 @@ -0,0 +1 @@ +# CONFIG_CHELSIO_T3 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T4 new file mode 100644 index 000000000000..a9f70238b466 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T4 @@ -0,0 +1 @@ +CONFIG_CHELSIO_T4=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T4VF b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T4VF new file mode 100644 index 000000000000..f1805956847e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T4VF @@ -0,0 +1 @@ +CONFIG_CHELSIO_T4VF=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T4_DCB b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T4_DCB new file mode 100644 index 000000000000..90621bdd912e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_T4_DCB @@ -0,0 +1 @@ +# CONFIG_CHELSIO_T4_DCB is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_TLS_DEVICE b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_TLS_DEVICE new file mode 100644 index 000000000000..a9b5fef19981 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CHELSIO_TLS_DEVICE @@ -0,0 +1 @@ +# CONFIG_CHELSIO_TLS_DEVICE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CICADA_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_CICADA_PHY new file mode 100644 index 000000000000..4e90d6243a16 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CICADA_PHY @@ -0,0 +1 @@ +CONFIG_CICADA_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS new file mode 100644 index 000000000000..bc37a8fc6a8c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS @@ -0,0 +1 @@ +CONFIG_CIFS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_ALLOW_INSECURE_LEGACY b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_ALLOW_INSECURE_LEGACY new file mode 100644 index 000000000000..b6531cac300e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_ALLOW_INSECURE_LEGACY @@ -0,0 +1 @@ +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_DEBUG new file mode 100644 index 000000000000..4fc95add98e2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_DEBUG @@ -0,0 +1 @@ +CONFIG_CIFS_DEBUG=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_DEBUG2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_DEBUG2 new file mode 100644 index 000000000000..5f930487324d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_DEBUG2 @@ -0,0 +1 @@ +# CONFIG_CIFS_DEBUG2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_DEBUG_DUMP_KEYS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_DEBUG_DUMP_KEYS new file mode 100644 index 000000000000..03f554dbac07 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_DEBUG_DUMP_KEYS @@ -0,0 +1 @@ +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_DFS_UPCALL b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_DFS_UPCALL new file mode 100644 index 000000000000..5f96d08c0e76 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_DFS_UPCALL @@ -0,0 +1 @@ +CONFIG_CIFS_DFS_UPCALL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_FSCACHE b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_FSCACHE new file mode 100644 index 000000000000..48901f8ee434 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_FSCACHE @@ -0,0 +1 @@ +# CONFIG_CIFS_FSCACHE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_POSIX b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_POSIX new file mode 100644 index 000000000000..1737fa4cd1ed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_POSIX @@ -0,0 +1 @@ +CONFIG_CIFS_POSIX=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_SMB_DIRECT b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_SMB_DIRECT new file mode 100644 index 000000000000..849bffb38ecd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_SMB_DIRECT @@ -0,0 +1 @@ +# CONFIG_CIFS_SMB_DIRECT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_STATS2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_STATS2 new file mode 100644 index 000000000000..16763446cdeb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_STATS2 @@ -0,0 +1 @@ +# CONFIG_CIFS_STATS2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_SWN_UPCALL b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_SWN_UPCALL new file mode 100644 index 000000000000..895af9460510 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_SWN_UPCALL @@ -0,0 +1 @@ +# CONFIG_CIFS_SWN_UPCALL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_UPCALL b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_UPCALL new file mode 100644 index 000000000000..d41540f2313c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_UPCALL @@ -0,0 +1 @@ +CONFIG_CIFS_UPCALL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_XATTR b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_XATTR new file mode 100644 index 000000000000..7e35cc630fcc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CIFS_XATTR @@ -0,0 +1 @@ +CONFIG_CIFS_XATTR=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CLANG_VERSION b/anolis/configs/L2-OPTIONAL/default/CONFIG_CLANG_VERSION new file mode 100644 index 000000000000..9e328f41dd0c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CLANG_VERSION @@ -0,0 +1 @@ +CONFIG_CLANG_VERSION=0 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CLZ_TAB b/anolis/configs/L2-OPTIONAL/default/CONFIG_CLZ_TAB new file mode 100644 index 000000000000..dc08dce1f4b1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CLZ_TAB @@ -0,0 +1 @@ +CONFIG_CLZ_TAB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CMA_SYSFS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CMA_SYSFS new file mode 100644 index 000000000000..20d58f7e8117 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CMA_SYSFS @@ -0,0 +1 @@ +# CONFIG_CMA_SYSFS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CMDLINE_PARTITION b/anolis/configs/L2-OPTIONAL/default/CONFIG_CMDLINE_PARTITION new file mode 100644 index 000000000000..2de3d9b7e824 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CMDLINE_PARTITION @@ -0,0 +1 @@ +# CONFIG_CMDLINE_PARTITION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CODA_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CODA_FS new file mode 100644 index 000000000000..f8fb39996766 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CODA_FS @@ -0,0 +1 @@ +# CONFIG_CODA_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_COMEDI b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMEDI new file mode 100644 index 000000000000..2d36fa996f91 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMEDI @@ -0,0 +1 @@ +# CONFIG_COMEDI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_CDCE706 b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_CDCE706 new file mode 100644 index 000000000000..518e96d457bd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_CDCE706 @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_CDCE706 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_CS2000_CP b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_CS2000_CP new file mode 100644 index 000000000000..25982987f7a7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_CS2000_CP @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_CS2000_CP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_MAX9485 b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_MAX9485 new file mode 100644 index 000000000000..162d666b2891 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_MAX9485 @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_MAX9485 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_PWM b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_PWM new file mode 100644 index 000000000000..8e67308395e4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_PWM @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_PWM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_SI5341 b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_SI5341 new file mode 100644 index 000000000000..1496845d78f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_SI5341 @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_SI5341 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_SI5351 b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_SI5351 new file mode 100644 index 000000000000..3951baf0494c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_SI5351 @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_SI5351 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_SI544 b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_SI544 new file mode 100644 index 000000000000..718d5db93170 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMMON_CLK_SI544 @@ -0,0 +1 @@ +# CONFIG_COMMON_CLK_SI544 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_COMPACT_UNEVICTABLE_DEFAULT b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMPACT_UNEVICTABLE_DEFAULT new file mode 100644 index 000000000000..4f32a29f7eea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMPACT_UNEVICTABLE_DEFAULT @@ -0,0 +1 @@ +CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_COMPAT_BINFMT_ELF b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMPAT_BINFMT_ELF new file mode 100644 index 000000000000..de53e58267e3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMPAT_BINFMT_ELF @@ -0,0 +1 @@ +CONFIG_COMPAT_BINFMT_ELF=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_COMPAT_OLD_SIGACTION b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMPAT_OLD_SIGACTION new file mode 100644 index 000000000000..ef0c1b3718a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_COMPAT_OLD_SIGACTION @@ -0,0 +1 @@ +CONFIG_COMPAT_OLD_SIGACTION=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CONSOLE_POLL b/anolis/configs/L2-OPTIONAL/default/CONFIG_CONSOLE_POLL new file mode 100644 index 000000000000..3b674a925539 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CONSOLE_POLL @@ -0,0 +1 @@ +CONFIG_CONSOLE_POLL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_SWITCH_TRACER b/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_SWITCH_TRACER new file mode 100644 index 000000000000..72ba91c11a7e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_SWITCH_TRACER @@ -0,0 +1 @@ +CONFIG_CONTEXT_SWITCH_TRACER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_TRACKING b/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_TRACKING new file mode 100644 index 000000000000..b295430906e2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_TRACKING @@ -0,0 +1 @@ +CONFIG_CONTEXT_TRACKING=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_TRACKING_IDLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_TRACKING_IDLE new file mode 100644 index 000000000000..4d0d66aa892b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_TRACKING_IDLE @@ -0,0 +1 @@ +CONFIG_CONTEXT_TRACKING_IDLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_TRACKING_USER b/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_TRACKING_USER new file mode 100644 index 000000000000..db4d6c58fd59 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_TRACKING_USER @@ -0,0 +1 @@ +CONFIG_CONTEXT_TRACKING_USER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_TRACKING_USER_FORCE b/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_TRACKING_USER_FORCE new file mode 100644 index 000000000000..5f1671cdd130 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTEXT_TRACKING_USER_FORCE @@ -0,0 +1 @@ +# CONFIG_CONTEXT_TRACKING_USER_FORCE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTIG_ALLOC b/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTIG_ALLOC new file mode 100644 index 000000000000..6e91a33ce90b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CONTIG_ALLOC @@ -0,0 +1 @@ +CONFIG_CONTIG_ALLOC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CORDIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_CORDIC new file mode 100644 index 000000000000..1e5e51d4fdfc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CORDIC @@ -0,0 +1 @@ +CONFIG_CORDIC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CORTINA_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_CORTINA_PHY new file mode 100644 index 000000000000..87341d40e91b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CORTINA_PHY @@ -0,0 +1 @@ +CONFIG_CORTINA_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_COUNTER b/anolis/configs/L2-OPTIONAL/default/CONFIG_COUNTER new file mode 100644 index 000000000000..7321d72e3f90 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_COUNTER @@ -0,0 +1 @@ +# CONFIG_COUNTER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CPU_FREQ_GOV_ATTR_SET b/anolis/configs/L2-OPTIONAL/default/CONFIG_CPU_FREQ_GOV_ATTR_SET new file mode 100644 index 000000000000..2bdd9c9b59c3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CPU_FREQ_GOV_ATTR_SET @@ -0,0 +1 @@ +CONFIG_CPU_FREQ_GOV_ATTR_SET=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CPU_FREQ_GOV_COMMON b/anolis/configs/L2-OPTIONAL/default/CONFIG_CPU_FREQ_GOV_COMMON new file mode 100644 index 000000000000..2fbd9f776c8f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CPU_FREQ_GOV_COMMON @@ -0,0 +1 @@ +CONFIG_CPU_FREQ_GOV_COMMON=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CPU_HOTPLUG_STATE_CONTROL b/anolis/configs/L2-OPTIONAL/default/CONFIG_CPU_HOTPLUG_STATE_CONTROL new file mode 100644 index 000000000000..4b0b6ab02b16 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CPU_HOTPLUG_STATE_CONTROL @@ -0,0 +1 @@ +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CPU_RMAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_CPU_RMAP new file mode 100644 index 000000000000..8ab51fcba81b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CPU_RMAP @@ -0,0 +1 @@ +CONFIG_CPU_RMAP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC16 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC16 new file mode 100644 index 000000000000..3c20a511ae16 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC16 @@ -0,0 +1 @@ +CONFIG_CRC16=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32 new file mode 100644 index 000000000000..1333300126d8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32 @@ -0,0 +1 @@ +CONFIG_CRC32=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_BIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_BIT new file mode 100644 index 000000000000..efaa0ffc1472 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_BIT @@ -0,0 +1 @@ +# CONFIG_CRC32_BIT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_SARWATE b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_SARWATE new file mode 100644 index 000000000000..6f9aa0b27162 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_SARWATE @@ -0,0 +1 @@ +# CONFIG_CRC32_SARWATE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_SELFTEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_SELFTEST new file mode 100644 index 000000000000..ddea70b11a87 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_SELFTEST @@ -0,0 +1 @@ +# CONFIG_CRC32_SELFTEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_SLICEBY4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_SLICEBY4 new file mode 100644 index 000000000000..3741ea9300d9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_SLICEBY4 @@ -0,0 +1 @@ +# CONFIG_CRC32_SLICEBY4 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_SLICEBY8 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_SLICEBY8 new file mode 100644 index 000000000000..9af267ff7e91 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC32_SLICEBY8 @@ -0,0 +1 @@ +CONFIG_CRC32_SLICEBY8=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC4 new file mode 100644 index 000000000000..7cd25b4af4c3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC4 @@ -0,0 +1 @@ +# CONFIG_CRC4 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC7 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC7 new file mode 100644 index 000000000000..0e8b98dc4f3d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC7 @@ -0,0 +1 @@ +CONFIG_CRC7=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC8 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC8 new file mode 100644 index 000000000000..2fd408e1bbb8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC8 @@ -0,0 +1 @@ +CONFIG_CRC8=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC_CCITT b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC_CCITT new file mode 100644 index 000000000000..e5498da09ee7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC_CCITT @@ -0,0 +1 @@ +CONFIG_CRC_CCITT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC_ITU_T b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC_ITU_T new file mode 100644 index 000000000000..b69e01140c8b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC_ITU_T @@ -0,0 +1 @@ +CONFIG_CRC_ITU_T=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC_T10DIF b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC_T10DIF new file mode 100644 index 000000000000..0f16713a6855 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRC_T10DIF @@ -0,0 +1 @@ +CONFIG_CRC_T10DIF=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA new file mode 100644 index 000000000000..d9d7604f7695 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA @@ -0,0 +1 @@ +CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305 new file mode 100644 index 000000000000..537df62b40fc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305 @@ -0,0 +1 @@ +CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ARIA b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ARIA new file mode 100644 index 000000000000..2ff8e14fcfa9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ARIA @@ -0,0 +1 @@ +# CONFIG_CRYPTO_ARIA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_AMLOGIC_GXL b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_AMLOGIC_GXL new file mode 100644 index 000000000000..bd91573298a8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_AMLOGIC_GXL @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_ATMEL_ECC b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_ATMEL_ECC new file mode 100644 index 000000000000..cc0194c3ca8e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_ATMEL_ECC @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_ATMEL_SHA204A b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_ATMEL_SHA204A new file mode 100644 index 000000000000..c2d37b3dabb9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_ATMEL_SHA204A @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_CHELSIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_CHELSIO new file mode 100644 index 000000000000..42606535d648 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_CHELSIO @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_CHELSIO=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_NITROX b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_NITROX new file mode 100644 index 000000000000..4f48a626fa21 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_NITROX @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_NITROX=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_NITROX_CNN55XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_NITROX_CNN55XX new file mode 100644 index 000000000000..47ee7d9bc6d4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_NITROX_CNN55XX @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_QAT_420XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_QAT_420XX new file mode 100644 index 000000000000..077d8661509b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_QAT_420XX @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_QAT_420XX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_SAFEXCEL b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_SAFEXCEL new file mode 100644 index 000000000000..6bd1d5baa694 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DEV_SAFEXCEL @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_SAFEXCEL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DH_RFC7919_GROUPS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DH_RFC7919_GROUPS new file mode 100644 index 000000000000..ca071066c02b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_DH_RFC7919_GROUPS @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ECRDSA b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ECRDSA new file mode 100644 index 000000000000..63f9e139a11b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ECRDSA @@ -0,0 +1 @@ +# CONFIG_CRYPTO_ECRDSA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_FIPS_CUSTOM_VERSION b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_FIPS_CUSTOM_VERSION new file mode 100644 index 000000000000..ea35dbd137cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_FIPS_CUSTOM_VERSION @@ -0,0 +1 @@ +# CONFIG_CRYPTO_FIPS_CUSTOM_VERSION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_FIPS_NAME b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_FIPS_NAME new file mode 100644 index 000000000000..c5396d0eb7d2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_FIPS_NAME @@ -0,0 +1 @@ +CONFIG_CRYPTO_FIPS_NAME="Linux Kernel Cryptographic API" diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_GENIV b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_GENIV new file mode 100644 index 000000000000..f805fe3c8a30 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_GENIV @@ -0,0 +1 @@ +CONFIG_CRYPTO_GENIV=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_HCTR2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_HCTR2 new file mode 100644 index 000000000000..d6d6e5acef0b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_HCTR2 @@ -0,0 +1 @@ +# CONFIG_CRYPTO_HCTR2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE new file mode 100644 index 000000000000..6c0d8f6fef33 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE @@ -0,0 +1 @@ +# CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_ARC4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_ARC4 new file mode 100644 index 000000000000..a7ab5dc3b8f9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_ARC4 @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_ARC4=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC new file mode 100644 index 000000000000..4669430bdf36 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CHACHA b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CHACHA new file mode 100644 index 000000000000..7259b16414a0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CHACHA @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_CHACHA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CHACHA20POLY1305 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CHACHA20POLY1305 new file mode 100644 index 000000000000..cbd06835522e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CHACHA20POLY1305 @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CHACHA_GENERIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CHACHA_GENERIC new file mode 100644 index 000000000000..55269579a77b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CHACHA_GENERIC @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CURVE25519 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CURVE25519 new file mode 100644 index 000000000000..3b1931064346 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CURVE25519 @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_CURVE25519=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CURVE25519_GENERIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CURVE25519_GENERIC new file mode 100644 index 000000000000..d2afd62f53e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_CURVE25519_GENERIC @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_DES b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_DES new file mode 100644 index 000000000000..fa076d710943 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_DES @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_DES=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_GF128MUL b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_GF128MUL new file mode 100644 index 000000000000..38fce81f8e9a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_GF128MUL @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_GF128MUL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_POLY1305 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_POLY1305 new file mode 100644 index 000000000000..3d2420f667a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_POLY1305 @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_POLY1305=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_POLY1305_GENERIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_POLY1305_GENERIC new file mode 100644 index 000000000000..d0e733171567 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_POLY1305_GENERIC @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_SHA1 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_SHA1 new file mode 100644 index 000000000000..33d50c299373 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_SHA1 @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_SHA1=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_UTILS b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_UTILS new file mode 100644 index 000000000000..4f2879497171 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_LIB_UTILS @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_UTILS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_SIG2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_SIG2 new file mode 100644 index 000000000000..e67083ef0815 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_SIG2 @@ -0,0 +1 @@ +CONFIG_CRYPTO_SIG2=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CSD_LOCK_WAIT_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_CSD_LOCK_WAIT_DEBUG new file mode 100644 index 000000000000..086e41bbfcf8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CSD_LOCK_WAIT_DEBUG @@ -0,0 +1 @@ +# CONFIG_CSD_LOCK_WAIT_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CXL_REGION_INVALIDATION_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_CXL_REGION_INVALIDATION_TEST new file mode 100644 index 000000000000..a07a1943aa56 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CXL_REGION_INVALIDATION_TEST @@ -0,0 +1 @@ +# CONFIG_CXL_REGION_INVALIDATION_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DAMON_LRU_SORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_DAMON_LRU_SORT new file mode 100644 index 000000000000..773ad5d68b1d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DAMON_LRU_SORT @@ -0,0 +1 @@ +# CONFIG_DAMON_LRU_SORT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DAMON_RECLAIM b/anolis/configs/L2-OPTIONAL/default/CONFIG_DAMON_RECLAIM new file mode 100644 index 000000000000..e3e14ea37a22 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DAMON_RECLAIM @@ -0,0 +1 @@ +# CONFIG_DAMON_RECLAIM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DAMON_SYSFS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DAMON_SYSFS new file mode 100644 index 000000000000..9cd6a1d32eb5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DAMON_SYSFS @@ -0,0 +1 @@ +# CONFIG_DAMON_SYSFS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DAVICOM_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_DAVICOM_PHY new file mode 100644 index 000000000000..064b2bebafc1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DAVICOM_PHY @@ -0,0 +1 @@ +CONFIG_DAVICOM_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DCACHE_WORD_ACCESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DCACHE_WORD_ACCESS new file mode 100644 index 000000000000..7e6e1f9f1725 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DCACHE_WORD_ACCESS @@ -0,0 +1 @@ +CONFIG_DCACHE_WORD_ACCESS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_ATOMIC_SLEEP b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_ATOMIC_SLEEP new file mode 100644 index 000000000000..cfd25ccf58f0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_ATOMIC_SLEEP @@ -0,0 +1 @@ +# CONFIG_DEBUG_ATOMIC_SLEEP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_CGROUP_REF b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_CGROUP_REF new file mode 100644 index 000000000000..0019796d6694 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_CGROUP_REF @@ -0,0 +1 @@ +# CONFIG_DEBUG_CGROUP_REF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_DEVRES b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_DEVRES new file mode 100644 index 000000000000..f54e7fd39acd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_DEVRES @@ -0,0 +1 @@ +# CONFIG_DEBUG_DEVRES is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_DRIVER b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_DRIVER new file mode 100644 index 000000000000..84f5416f9116 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_DRIVER @@ -0,0 +1 @@ +# CONFIG_DEBUG_DRIVER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_GPIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_GPIO new file mode 100644 index 000000000000..c278d8cce41e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_GPIO @@ -0,0 +1 @@ +# CONFIG_DEBUG_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_COMPRESSED_NONE b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_COMPRESSED_NONE new file mode 100644 index 000000000000..d6a7951bf8be --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_COMPRESSED_NONE @@ -0,0 +1 @@ +CONFIG_DEBUG_INFO_COMPRESSED_NONE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_COMPRESSED_ZLIB b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_COMPRESSED_ZLIB new file mode 100644 index 000000000000..426fd1faeb00 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_COMPRESSED_ZLIB @@ -0,0 +1 @@ +# CONFIG_DEBUG_INFO_COMPRESSED_ZLIB is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_COMPRESSED_ZSTD b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_COMPRESSED_ZSTD new file mode 100644 index 000000000000..815ade5f15a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_COMPRESSED_ZSTD @@ -0,0 +1 @@ +# CONFIG_DEBUG_INFO_COMPRESSED_ZSTD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_DWARF5 b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_DWARF5 new file mode 100644 index 000000000000..e31a11334fce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_DWARF5 @@ -0,0 +1 @@ +# CONFIG_DEBUG_INFO_DWARF5 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_NONE b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_NONE new file mode 100644 index 000000000000..3e8d195ebbfa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_INFO_NONE @@ -0,0 +1 @@ +# CONFIG_DEBUG_INFO_NONE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_IRQFLAGS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_IRQFLAGS new file mode 100644 index 000000000000..87c713339cef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_IRQFLAGS @@ -0,0 +1 @@ +# CONFIG_DEBUG_IRQFLAGS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_KOBJECT b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_KOBJECT new file mode 100644 index 000000000000..c62a3581c010 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_KOBJECT @@ -0,0 +1 @@ +# CONFIG_DEBUG_KOBJECT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_LOCKING_API_SELFTESTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_LOCKING_API_SELFTESTS new file mode 100644 index 000000000000..bc7067b6bd58 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_LOCKING_API_SELFTESTS @@ -0,0 +1 @@ +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_LOCK_ALLOC b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_LOCK_ALLOC new file mode 100644 index 000000000000..ea3c2dc126d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_LOCK_ALLOC @@ -0,0 +1 @@ +# CONFIG_DEBUG_LOCK_ALLOC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_MAPLE_TREE b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_MAPLE_TREE new file mode 100644 index 000000000000..a41698b8ec24 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_MAPLE_TREE @@ -0,0 +1 @@ +# CONFIG_DEBUG_MAPLE_TREE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_MUTEXES b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_MUTEXES new file mode 100644 index 000000000000..92a6a5feabe5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_MUTEXES @@ -0,0 +1 @@ +# CONFIG_DEBUG_MUTEXES is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_NET b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_NET new file mode 100644 index 000000000000..03304d546a5d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_NET @@ -0,0 +1 @@ +# CONFIG_DEBUG_NET is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_OBJECTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_OBJECTS new file mode 100644 index 000000000000..0846705c9739 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_OBJECTS @@ -0,0 +1 @@ +# CONFIG_DEBUG_OBJECTS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_PER_CPU_MAPS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_PER_CPU_MAPS new file mode 100644 index 000000000000..01ead72817e1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_PER_CPU_MAPS @@ -0,0 +1 @@ +# CONFIG_DEBUG_PER_CPU_MAPS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_PINCTRL b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_PINCTRL new file mode 100644 index 000000000000..82a04e826bfa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_PINCTRL @@ -0,0 +1 @@ +# CONFIG_DEBUG_PINCTRL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_PREEMPT b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_PREEMPT new file mode 100644 index 000000000000..a1f73ed5c177 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_PREEMPT @@ -0,0 +1 @@ +# CONFIG_DEBUG_PREEMPT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_RT_MUTEXES b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_RT_MUTEXES new file mode 100644 index 000000000000..742d62fb459f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_RT_MUTEXES @@ -0,0 +1 @@ +# CONFIG_DEBUG_RT_MUTEXES is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_RWSEMS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_RWSEMS new file mode 100644 index 000000000000..7572af4fc068 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_RWSEMS @@ -0,0 +1 @@ +# CONFIG_DEBUG_RWSEMS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_SHIRQ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_SHIRQ new file mode 100644 index 000000000000..ab64f3007d1b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_SHIRQ @@ -0,0 +1 @@ +CONFIG_DEBUG_SHIRQ=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_SPINLOCK b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_SPINLOCK new file mode 100644 index 000000000000..1440abeb3194 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_SPINLOCK @@ -0,0 +1 @@ +# CONFIG_DEBUG_SPINLOCK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_STACK_USAGE b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_STACK_USAGE new file mode 100644 index 000000000000..50688e28a552 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_STACK_USAGE @@ -0,0 +1 @@ +# CONFIG_DEBUG_STACK_USAGE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_TEST_DRIVER_REMOVE b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_TEST_DRIVER_REMOVE new file mode 100644 index 000000000000..a7886e4ac04b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_TEST_DRIVER_REMOVE @@ -0,0 +1 @@ +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_TIMEKEEPING b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_TIMEKEEPING new file mode 100644 index 000000000000..2c0dd678c6c7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_TIMEKEEPING @@ -0,0 +1 @@ +# CONFIG_DEBUG_TIMEKEEPING is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_VIRTUAL b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_VIRTUAL new file mode 100644 index 000000000000..c06eb53bede5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_VIRTUAL @@ -0,0 +1 @@ +# CONFIG_DEBUG_VIRTUAL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_VM b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_VM new file mode 100644 index 000000000000..5dc25e115459 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_VM @@ -0,0 +1 @@ +# CONFIG_DEBUG_VM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_VM_PGTABLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_VM_PGTABLE new file mode 100644 index 000000000000..2dabf3429c81 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_VM_PGTABLE @@ -0,0 +1 @@ +# CONFIG_DEBUG_VM_PGTABLE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_WQ_FORCE_RR_CPU b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_WQ_FORCE_RR_CPU new file mode 100644 index 000000000000..f5952cf0b490 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_WQ_FORCE_RR_CPU @@ -0,0 +1 @@ +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_WW_MUTEX_SLOWPATH b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_WW_MUTEX_SLOWPATH new file mode 100644 index 000000000000..f4151e3d5e76 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEBUG_WW_MUTEX_SLOWPATH @@ -0,0 +1 @@ +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_BZIP2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_BZIP2 new file mode 100644 index 000000000000..73f457e49f5e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_BZIP2 @@ -0,0 +1 @@ +CONFIG_DECOMPRESS_BZIP2=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_GZIP b/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_GZIP new file mode 100644 index 000000000000..7bb30c7d9935 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_GZIP @@ -0,0 +1 @@ +CONFIG_DECOMPRESS_GZIP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_LZ4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_LZ4 new file mode 100644 index 000000000000..1dc3df164bfa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_LZ4 @@ -0,0 +1 @@ +CONFIG_DECOMPRESS_LZ4=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_LZMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_LZMA new file mode 100644 index 000000000000..dbec9f244d63 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_LZMA @@ -0,0 +1 @@ +CONFIG_DECOMPRESS_LZMA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_LZO b/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_LZO new file mode 100644 index 000000000000..9b21e6b9987d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_LZO @@ -0,0 +1 @@ +CONFIG_DECOMPRESS_LZO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_XZ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_XZ new file mode 100644 index 000000000000..ee04f1dca51d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_XZ @@ -0,0 +1 @@ +CONFIG_DECOMPRESS_XZ=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_ZSTD b/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_ZSTD new file mode 100644 index 000000000000..4cad492ad6c4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DECOMPRESS_ZSTD @@ -0,0 +1 @@ +CONFIG_DECOMPRESS_ZSTD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEFAULT_HOSTNAME b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEFAULT_HOSTNAME new file mode 100644 index 000000000000..2991d2f1191a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEFAULT_HOSTNAME @@ -0,0 +1 @@ +CONFIG_DEFAULT_HOSTNAME="(none)" diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEFAULT_INIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEFAULT_INIT new file mode 100644 index 000000000000..f89ac0249ea0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEFAULT_INIT @@ -0,0 +1 @@ +CONFIG_DEFAULT_INIT="" diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEVICE_MIGRATION b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEVICE_MIGRATION new file mode 100644 index 000000000000..6b188826054a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEVICE_MIGRATION @@ -0,0 +1 @@ +CONFIG_DEVICE_MIGRATION=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DEVTMPFS_SAFE b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEVTMPFS_SAFE new file mode 100644 index 000000000000..29951ab12ab3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DEVTMPFS_SAFE @@ -0,0 +1 @@ +# CONFIG_DEVTMPFS_SAFE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DIMLIB b/anolis/configs/L2-OPTIONAL/default/CONFIG_DIMLIB new file mode 100644 index 000000000000..a082ff1adc01 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DIMLIB @@ -0,0 +1 @@ +CONFIG_DIMLIB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DM9051 b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM9051 new file mode 100644 index 000000000000..b0a87854d9bb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM9051 @@ -0,0 +1 @@ +# CONFIG_DM9051 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_DEBUG new file mode 100644 index 000000000000..15e748ca8beb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_DEBUG @@ -0,0 +1 @@ +# CONFIG_DMABUF_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_HEAPS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_HEAPS new file mode 100644 index 000000000000..06c5f4cf1acc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_HEAPS @@ -0,0 +1 @@ +# CONFIG_DMABUF_HEAPS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_MOVE_NOTIFY b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_MOVE_NOTIFY new file mode 100644 index 000000000000..33c2fe87688e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_MOVE_NOTIFY @@ -0,0 +1 @@ +# CONFIG_DMABUF_MOVE_NOTIFY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_SELFTESTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_SELFTESTS new file mode 100644 index 000000000000..6943ce41c58f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_SELFTESTS @@ -0,0 +1 @@ +# CONFIG_DMABUF_SELFTESTS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_SYSFS_STATS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_SYSFS_STATS new file mode 100644 index 000000000000..2a616d744d11 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_SYSFS_STATS @@ -0,0 +1 @@ +# CONFIG_DMABUF_SYSFS_STATS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DMAPOOL_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMAPOOL_TEST new file mode 100644 index 000000000000..8049f96f0322 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMAPOOL_TEST @@ -0,0 +1 @@ +# CONFIG_DMAPOOL_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_ACPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_ACPI new file mode 100644 index 000000000000..cfc6819153b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_ACPI @@ -0,0 +1 @@ +CONFIG_DMA_ACPI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_COHERENT_POOL b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_COHERENT_POOL new file mode 100644 index 000000000000..4350357375fe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_COHERENT_POOL @@ -0,0 +1 @@ +CONFIG_DMA_COHERENT_POOL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_ENGINE_RAID b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_ENGINE_RAID new file mode 100644 index 000000000000..074c691a9347 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_ENGINE_RAID @@ -0,0 +1 @@ +CONFIG_DMA_ENGINE_RAID=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_FENCE_TRACE b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_FENCE_TRACE new file mode 100644 index 000000000000..bb21d9c521b1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_FENCE_TRACE @@ -0,0 +1 @@ +# CONFIG_DMA_FENCE_TRACE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_MAP_BENCHMARK b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_MAP_BENCHMARK new file mode 100644 index 000000000000..c168b4b6cd69 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_MAP_BENCHMARK @@ -0,0 +1 @@ +# CONFIG_DMA_MAP_BENCHMARK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_NUMA_CMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_NUMA_CMA new file mode 100644 index 000000000000..e4146244134d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_NUMA_CMA @@ -0,0 +1 @@ +# CONFIG_DMA_NUMA_CMA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_OPS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_OPS new file mode 100644 index 000000000000..c18773fcb27f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_OPS @@ -0,0 +1 @@ +CONFIG_DMA_OPS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_SHARED_BUFFER b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_SHARED_BUFFER new file mode 100644 index 000000000000..1d2691cb1de4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DMA_SHARED_BUFFER @@ -0,0 +1 @@ +CONFIG_DMA_SHARED_BUFFER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_AUDIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_AUDIT new file mode 100644 index 000000000000..318a255494f6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_AUDIT @@ -0,0 +1 @@ +CONFIG_DM_AUDIT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_BIO_PRISON b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_BIO_PRISON new file mode 100644 index 000000000000..17aa67d71984 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_BIO_PRISON @@ -0,0 +1 @@ +CONFIG_DM_BIO_PRISON=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_BUFIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_BUFIO new file mode 100644 index 000000000000..93af8fc46ce5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_BUFIO @@ -0,0 +1 @@ +CONFIG_DM_BUFIO=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_CLONE b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_CLONE new file mode 100644 index 000000000000..03f992664487 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_CLONE @@ -0,0 +1 @@ +# CONFIG_DM_CLONE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING new file mode 100644 index 000000000000..52dd34960278 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING @@ -0,0 +1 @@ +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_DUST b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_DUST new file mode 100644 index 000000000000..ffac78bb5d30 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_DUST @@ -0,0 +1 @@ +# CONFIG_DM_DUST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_EBS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_EBS new file mode 100644 index 000000000000..fef2f5dccd12 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_EBS @@ -0,0 +1 @@ +# CONFIG_DM_EBS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_MULTIPATH_HST b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_MULTIPATH_HST new file mode 100644 index 000000000000..a9939aa97bde --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_MULTIPATH_HST @@ -0,0 +1 @@ +# CONFIG_DM_MULTIPATH_HST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_MULTIPATH_IOA b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_MULTIPATH_IOA new file mode 100644 index 000000000000..9dbcf92fa81e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_MULTIPATH_IOA @@ -0,0 +1 @@ +# CONFIG_DM_MULTIPATH_IOA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_PERSISTENT_DATA b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_PERSISTENT_DATA new file mode 100644 index 000000000000..529f0c35b924 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_PERSISTENT_DATA @@ -0,0 +1 @@ +CONFIG_DM_PERSISTENT_DATA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_UNSTRIPED b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_UNSTRIPED new file mode 100644 index 000000000000..f3d4f533b1b8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_UNSTRIPED @@ -0,0 +1 @@ +# CONFIG_DM_UNSTRIPED is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_VERITY_FEC b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_VERITY_FEC new file mode 100644 index 000000000000..955cb360f27b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_VERITY_FEC @@ -0,0 +1 @@ +# CONFIG_DM_VERITY_FEC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG new file mode 100644 index 000000000000..4d2da6ce4dda --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG @@ -0,0 +1 @@ +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DNET b/anolis/configs/L2-OPTIONAL/default/CONFIG_DNET new file mode 100644 index 000000000000..55807a9e0450 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DNET @@ -0,0 +1 @@ +CONFIG_DNET=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83640_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83640_PHY new file mode 100644 index 000000000000..15f62f80290b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83640_PHY @@ -0,0 +1 @@ +CONFIG_DP83640_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83822_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83822_PHY new file mode 100644 index 000000000000..e97e5ab0da29 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83822_PHY @@ -0,0 +1 @@ +CONFIG_DP83822_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83848_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83848_PHY new file mode 100644 index 000000000000..86d916a4aafc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83848_PHY @@ -0,0 +1 @@ +CONFIG_DP83848_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83867_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83867_PHY new file mode 100644 index 000000000000..5ba3d57bd52c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83867_PHY @@ -0,0 +1 @@ +CONFIG_DP83867_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83869_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83869_PHY new file mode 100644 index 000000000000..100b19b53f56 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83869_PHY @@ -0,0 +1 @@ +# CONFIG_DP83869_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83TC811_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83TC811_PHY new file mode 100644 index 000000000000..5c750e473f53 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83TC811_PHY @@ -0,0 +1 @@ +CONFIG_DP83TC811_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83TD510_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83TD510_PHY new file mode 100644 index 000000000000..e12a8fbdfb7a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DP83TD510_PHY @@ -0,0 +1 @@ +# CONFIG_DP83TD510_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DQL b/anolis/configs/L2-OPTIONAL/default/CONFIG_DQL new file mode 100644 index 000000000000..a7120cf432d9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DQL @@ -0,0 +1 @@ +CONFIG_DQL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRAGONRISE_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRAGONRISE_FF new file mode 100644 index 000000000000..c1951e201a13 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRAGONRISE_FF @@ -0,0 +1 @@ +# CONFIG_DRAGONRISE_FF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_ACCEL b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_ACCEL new file mode 100644 index 000000000000..2cf0b91ca5da --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_ACCEL @@ -0,0 +1 @@ +# CONFIG_DRM_ACCEL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_AMD_DC_FP b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_AMD_DC_FP new file mode 100644 index 000000000000..eb6a593afa3c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_AMD_DC_FP @@ -0,0 +1 @@ +CONFIG_DRM_AMD_DC_FP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_AMD_SECURE_DISPLAY b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_AMD_SECURE_DISPLAY new file mode 100644 index 000000000000..1159df7e69cd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_AMD_SECURE_DISPLAY @@ -0,0 +1 @@ +# CONFIG_DRM_AMD_SECURE_DISPLAY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_ANALOGIX_ANX78XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_ANALOGIX_ANX78XX new file mode 100644 index 000000000000..5593305c3178 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_ANALOGIX_ANX78XX @@ -0,0 +1 @@ +# CONFIG_DRM_ANALOGIX_ANX78XX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_BRIDGE b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_BRIDGE new file mode 100644 index 000000000000..243fa7dfa527 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_BRIDGE @@ -0,0 +1 @@ +CONFIG_DRM_BRIDGE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_BUDDY b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_BUDDY new file mode 100644 index 000000000000..3f817c6c3525 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_BUDDY @@ -0,0 +1 @@ +CONFIG_DRM_BUDDY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_DISPLAY_DP_HELPER b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_DISPLAY_DP_HELPER new file mode 100644 index 000000000000..119df919c13d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_DISPLAY_DP_HELPER @@ -0,0 +1 @@ +CONFIG_DRM_DISPLAY_DP_HELPER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_DISPLAY_HDCP_HELPER b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_DISPLAY_HDCP_HELPER new file mode 100644 index 000000000000..bc9c3a681dec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_DISPLAY_HDCP_HELPER @@ -0,0 +1 @@ +CONFIG_DRM_DISPLAY_HDCP_HELPER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_DISPLAY_HDMI_HELPER b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_DISPLAY_HDMI_HELPER new file mode 100644 index 000000000000..84ff83cac275 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_DISPLAY_HDMI_HELPER @@ -0,0 +1 @@ +CONFIG_DRM_DISPLAY_HDMI_HELPER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_DISPLAY_HELPER b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_DISPLAY_HELPER new file mode 100644 index 000000000000..e19cd5d255e0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_DISPLAY_HELPER @@ -0,0 +1 @@ +CONFIG_DRM_DISPLAY_HELPER=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_ETNAVIV b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_ETNAVIV new file mode 100644 index 000000000000..e9f67ce86810 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_ETNAVIV @@ -0,0 +1 @@ +# CONFIG_DRM_ETNAVIV is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_EXEC b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_EXEC new file mode 100644 index 000000000000..1d9dffe557e4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_EXEC @@ -0,0 +1 @@ +CONFIG_DRM_EXEC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_GEM_SHMEM_HELPER b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_GEM_SHMEM_HELPER new file mode 100644 index 000000000000..256f244966f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_GEM_SHMEM_HELPER @@ -0,0 +1 @@ +CONFIG_DRM_GEM_SHMEM_HELPER=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_GM12U320 b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_GM12U320 new file mode 100644 index 000000000000..68cc5925e030 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_GM12U320 @@ -0,0 +1 @@ +# CONFIG_DRM_GM12U320 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_GUD b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_GUD new file mode 100644 index 000000000000..824b3830a0c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_GUD @@ -0,0 +1 @@ +# CONFIG_DRM_GUD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_I2C_NXP_TDA9950 b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_I2C_NXP_TDA9950 new file mode 100644 index 000000000000..e077c7537236 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_I2C_NXP_TDA9950 @@ -0,0 +1 @@ +# CONFIG_DRM_I2C_NXP_TDA9950 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_KMS_HELPER b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_KMS_HELPER new file mode 100644 index 000000000000..b35bf4c66a10 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_KMS_HELPER @@ -0,0 +1 @@ +CONFIG_DRM_KMS_HELPER=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_LEGACY b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_LEGACY new file mode 100644 index 000000000000..2c74b876fb48 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_LEGACY @@ -0,0 +1 @@ +# CONFIG_DRM_LEGACY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_LOONGSON b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_LOONGSON new file mode 100644 index 000000000000..2c481701cea8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_LOONGSON @@ -0,0 +1 @@ +# CONFIG_DRM_LOONGSON is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL new file mode 100644 index 000000000000..de8a9c247d1c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL @@ -0,0 +1 @@ +CONFIG_DRM_PANEL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_AUO_A030JTN01 b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_AUO_A030JTN01 new file mode 100644 index 000000000000..80eb7d860091 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_AUO_A030JTN01 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_AUO_A030JTN01 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_BRIDGE b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_BRIDGE new file mode 100644 index 000000000000..38e0dfae76bc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_BRIDGE @@ -0,0 +1 @@ +CONFIG_DRM_PANEL_BRIDGE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_MIPI_DBI b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_MIPI_DBI new file mode 100644 index 000000000000..036e60ec6549 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_MIPI_DBI @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_MIPI_DBI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_ORIENTATION_QUIRKS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_ORIENTATION_QUIRKS new file mode 100644 index 000000000000..8e68b27635a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_ORIENTATION_QUIRKS @@ -0,0 +1 @@ +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_ORISETECH_OTA5601A b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_ORISETECH_OTA5601A new file mode 100644 index 000000000000..e07f56f0bf7f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_ORISETECH_OTA5601A @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_ORISETECH_OTA5601A is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_WIDECHIPS_WS2401 b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_WIDECHIPS_WS2401 new file mode 100644 index 000000000000..3886d75b0cde --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_WIDECHIPS_WS2401 @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_WIDECHIPS_WS2401 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_SCHED b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_SCHED new file mode 100644 index 000000000000..e13621fecffa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_SCHED @@ -0,0 +1 @@ +CONFIG_DRM_SCHED=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_SIMPLEDRM b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_SIMPLEDRM new file mode 100644 index 000000000000..2bd12280c074 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_SIMPLEDRM @@ -0,0 +1 @@ +# CONFIG_DRM_SIMPLEDRM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_SSD130X b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_SSD130X new file mode 100644 index 000000000000..8ebb4ae752ee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_SSD130X @@ -0,0 +1 @@ +# CONFIG_DRM_SSD130X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_SUBALLOC_HELPER b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_SUBALLOC_HELPER new file mode 100644 index 000000000000..9edd082ef9c2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_SUBALLOC_HELPER @@ -0,0 +1 @@ +CONFIG_DRM_SUBALLOC_HELPER=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_TTM b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_TTM new file mode 100644 index 000000000000..0b3c61dd1f61 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_TTM @@ -0,0 +1 @@ +CONFIG_DRM_TTM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_TTM_HELPER b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_TTM_HELPER new file mode 100644 index 000000000000..2b1b32c6c831 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_TTM_HELPER @@ -0,0 +1 @@ +CONFIG_DRM_TTM_HELPER=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_VGEM b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_VGEM new file mode 100644 index 000000000000..66aeb12990be --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_VGEM @@ -0,0 +1 @@ +# CONFIG_DRM_VGEM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_VIRTIO_GPU_KMS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_VIRTIO_GPU_KMS new file mode 100644 index 000000000000..4e81ebc2f73c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_VIRTIO_GPU_KMS @@ -0,0 +1 @@ +CONFIG_DRM_VIRTIO_GPU_KMS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_VKMS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_VKMS new file mode 100644 index 000000000000..5e10197e7c26 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_VKMS @@ -0,0 +1 @@ +CONFIG_DRM_VKMS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_VRAM_HELPER b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_VRAM_HELPER new file mode 100644 index 000000000000..c9ca0b26147f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_VRAM_HELPER @@ -0,0 +1 @@ +CONFIG_DRM_VRAM_HELPER=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DS1682 b/anolis/configs/L2-OPTIONAL/default/CONFIG_DS1682 new file mode 100644 index 000000000000..7266bceb4017 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DS1682 @@ -0,0 +1 @@ +# CONFIG_DS1682 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DST_CACHE b/anolis/configs/L2-OPTIONAL/default/CONFIG_DST_CACHE new file mode 100644 index 000000000000..989343b204af --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DST_CACHE @@ -0,0 +1 @@ +CONFIG_DST_CACHE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DUMMY_CONSOLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_DUMMY_CONSOLE new file mode 100644 index 000000000000..4a7a56908765 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DUMMY_CONSOLE @@ -0,0 +1 @@ +CONFIG_DUMMY_CONSOLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DUMMY_CONSOLE_COLUMNS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DUMMY_CONSOLE_COLUMNS new file mode 100644 index 000000000000..e05b288c4322 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DUMMY_CONSOLE_COLUMNS @@ -0,0 +1 @@ +CONFIG_DUMMY_CONSOLE_COLUMNS=80 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DUMMY_CONSOLE_ROWS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DUMMY_CONSOLE_ROWS new file mode 100644 index 000000000000..4b42476d88af --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DUMMY_CONSOLE_ROWS @@ -0,0 +1 @@ +CONFIG_DUMMY_CONSOLE_ROWS=25 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DUMMY_IRQ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DUMMY_IRQ new file mode 100644 index 000000000000..d24642867df8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DUMMY_IRQ @@ -0,0 +1 @@ +# CONFIG_DUMMY_IRQ is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DW_EDMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_DW_EDMA new file mode 100644 index 000000000000..dc2c1589af2d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DW_EDMA @@ -0,0 +1 @@ +# CONFIG_DW_EDMA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DW_WATCHDOG b/anolis/configs/L2-OPTIONAL/default/CONFIG_DW_WATCHDOG new file mode 100644 index 000000000000..e4db55403e04 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DW_WATCHDOG @@ -0,0 +1 @@ +# CONFIG_DW_WATCHDOG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DW_XDATA_PCIE b/anolis/configs/L2-OPTIONAL/default/CONFIG_DW_XDATA_PCIE new file mode 100644 index 000000000000..d5aecfc82d79 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DW_XDATA_PCIE @@ -0,0 +1 @@ +# CONFIG_DW_XDATA_PCIE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DYNAMIC_EVENTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DYNAMIC_EVENTS new file mode 100644 index 000000000000..08f1910943e4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DYNAMIC_EVENTS @@ -0,0 +1 @@ +CONFIG_DYNAMIC_EVENTS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DYNAMIC_FTRACE_WITH_ARGS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DYNAMIC_FTRACE_WITH_ARGS new file mode 100644 index 000000000000..28e0ae95aa58 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DYNAMIC_FTRACE_WITH_ARGS @@ -0,0 +1 @@ +CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS b/anolis/configs/L2-OPTIONAL/default/CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS new file mode 100644 index 000000000000..bdcba5638e8a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS @@ -0,0 +1 @@ +CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ECHO b/anolis/configs/L2-OPTIONAL/default/CONFIG_ECHO new file mode 100644 index 000000000000..b84a07b07a76 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ECHO @@ -0,0 +1 @@ +# CONFIG_ECHO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ECRYPT_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_ECRYPT_FS new file mode 100644 index 000000000000..5d2468d89d64 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ECRYPT_FS @@ -0,0 +1 @@ +# CONFIG_ECRYPT_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EDAC_SUPPORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_EDAC_SUPPORT new file mode 100644 index 000000000000..ff1c03ae44e2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EDAC_SUPPORT @@ -0,0 +1 @@ +CONFIG_EDAC_SUPPORT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_93CX6 b/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_93CX6 new file mode 100644 index 000000000000..9e93b9eb029e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_93CX6 @@ -0,0 +1 @@ +CONFIG_EEPROM_93CX6=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_93XX46 b/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_93XX46 new file mode 100644 index 000000000000..483f0eb88ebb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_93XX46 @@ -0,0 +1 @@ +# CONFIG_EEPROM_93XX46 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_AT24 b/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_AT24 new file mode 100644 index 000000000000..c19b44bc9639 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_AT24 @@ -0,0 +1 @@ +# CONFIG_EEPROM_AT24 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_AT25 b/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_AT25 new file mode 100644 index 000000000000..5fe1f07dee3d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_AT25 @@ -0,0 +1 @@ +# CONFIG_EEPROM_AT25 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_EE1004 b/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_EE1004 new file mode 100644 index 000000000000..7bb8b65f09ff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_EE1004 @@ -0,0 +1 @@ +# CONFIG_EEPROM_EE1004 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_IDT_89HPESX b/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_IDT_89HPESX new file mode 100644 index 000000000000..7f11cc3038f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_IDT_89HPESX @@ -0,0 +1 @@ +# CONFIG_EEPROM_IDT_89HPESX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_LEGACY b/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_LEGACY new file mode 100644 index 000000000000..c53217d4d472 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_LEGACY @@ -0,0 +1 @@ +CONFIG_EEPROM_LEGACY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_MAX6875 b/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_MAX6875 new file mode 100644 index 000000000000..9fc04b971031 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EEPROM_MAX6875 @@ -0,0 +1 @@ +CONFIG_EEPROM_MAX6875=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_BOOTLOADER_CONTROL b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_BOOTLOADER_CONTROL new file mode 100644 index 000000000000..ca42dfb428c2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_BOOTLOADER_CONTROL @@ -0,0 +1 @@ +# CONFIG_EFI_BOOTLOADER_CONTROL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_CAPSULE_LOADER b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_CAPSULE_LOADER new file mode 100644 index 000000000000..2cc06321635b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_CAPSULE_LOADER @@ -0,0 +1 @@ +# CONFIG_EFI_CAPSULE_LOADER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_DISABLE_PCI_DMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_DISABLE_PCI_DMA new file mode 100644 index 000000000000..db43b223469e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_DISABLE_PCI_DMA @@ -0,0 +1 @@ +# CONFIG_EFI_DISABLE_PCI_DMA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_DISABLE_RUNTIME b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_DISABLE_RUNTIME new file mode 100644 index 000000000000..a406d6669210 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_DISABLE_RUNTIME @@ -0,0 +1 @@ +# CONFIG_EFI_DISABLE_RUNTIME is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_EARLYCON b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_EARLYCON new file mode 100644 index 000000000000..dcc91b9ff6a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_EARLYCON @@ -0,0 +1 @@ +CONFIG_EFI_EARLYCON=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_ESRT b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_ESRT new file mode 100644 index 000000000000..b0c1d889512d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_ESRT @@ -0,0 +1 @@ +CONFIG_EFI_ESRT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_PARTITION b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_PARTITION new file mode 100644 index 000000000000..a7a899356a31 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_PARTITION @@ -0,0 +1 @@ +CONFIG_EFI_PARTITION=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_RUNTIME_WRAPPERS b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_RUNTIME_WRAPPERS new file mode 100644 index 000000000000..417cfeda4afd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_RUNTIME_WRAPPERS @@ -0,0 +1 @@ +CONFIG_EFI_RUNTIME_WRAPPERS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_TEST new file mode 100644 index 000000000000..455eb306151d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFI_TEST @@ -0,0 +1 @@ +# CONFIG_EFI_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFS_FS new file mode 100644 index 000000000000..8b1f1cbecb2a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EFS_FS @@ -0,0 +1 @@ +# CONFIG_EFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ELFCORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ELFCORE new file mode 100644 index 000000000000..55854ef005f5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ELFCORE @@ -0,0 +1 @@ +CONFIG_ELFCORE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ENA_ETHERNET b/anolis/configs/L2-OPTIONAL/default/CONFIG_ENA_ETHERNET new file mode 100644 index 000000000000..64c0a73c1959 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ENA_ETHERNET @@ -0,0 +1 @@ +CONFIG_ENA_ETHERNET=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ENCLOSURE_SERVICES b/anolis/configs/L2-OPTIONAL/default/CONFIG_ENCLOSURE_SERVICES new file mode 100644 index 000000000000..040c8ef0085a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ENCLOSURE_SERVICES @@ -0,0 +1 @@ +CONFIG_ENCLOSURE_SERVICES=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ENERGY_MODEL b/anolis/configs/L2-OPTIONAL/default/CONFIG_ENERGY_MODEL new file mode 100644 index 000000000000..ae61c90f2ecc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ENERGY_MODEL @@ -0,0 +1 @@ +# CONFIG_ENERGY_MODEL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EQUALIZER b/anolis/configs/L2-OPTIONAL/default/CONFIG_EQUALIZER new file mode 100644 index 000000000000..5c3282d3cdd2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EQUALIZER @@ -0,0 +1 @@ +# CONFIG_EQUALIZER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EROFS_FS_PCPU_KTHREAD b/anolis/configs/L2-OPTIONAL/default/CONFIG_EROFS_FS_PCPU_KTHREAD new file mode 100644 index 000000000000..76ef583e60b1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EROFS_FS_PCPU_KTHREAD @@ -0,0 +1 @@ +# CONFIG_EROFS_FS_PCPU_KTHREAD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ETHERNET b/anolis/configs/L2-OPTIONAL/default/CONFIG_ETHERNET new file mode 100644 index 000000000000..62a7778f150b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ETHERNET @@ -0,0 +1 @@ +CONFIG_ETHERNET=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ETHOC b/anolis/configs/L2-OPTIONAL/default/CONFIG_ETHOC new file mode 100644 index 000000000000..e78d27772937 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ETHOC @@ -0,0 +1 @@ +CONFIG_ETHOC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EVENT_TRACING b/anolis/configs/L2-OPTIONAL/default/CONFIG_EVENT_TRACING new file mode 100644 index 000000000000..c411df81986a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EVENT_TRACING @@ -0,0 +1 @@ +CONFIG_EVENT_TRACING=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EXCLUSIVE_SYSTEM_RAM b/anolis/configs/L2-OPTIONAL/default/CONFIG_EXCLUSIVE_SYSTEM_RAM new file mode 100644 index 000000000000..a06b7c7a995a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EXCLUSIVE_SYSTEM_RAM @@ -0,0 +1 @@ +CONFIG_EXCLUSIVE_SYSTEM_RAM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EXPORTFS b/anolis/configs/L2-OPTIONAL/default/CONFIG_EXPORTFS new file mode 100644 index 000000000000..21b4cdebaf32 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EXPORTFS @@ -0,0 +1 @@ +CONFIG_EXPORTFS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EXT3_FS_POSIX_ACL b/anolis/configs/L2-OPTIONAL/default/CONFIG_EXT3_FS_POSIX_ACL new file mode 100644 index 000000000000..ab615c293cc0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EXT3_FS_POSIX_ACL @@ -0,0 +1 @@ +# CONFIG_EXT3_FS_POSIX_ACL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EXT3_FS_SECURITY b/anolis/configs/L2-OPTIONAL/default/CONFIG_EXT3_FS_SECURITY new file mode 100644 index 000000000000..e8c96ad0b639 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EXT3_FS_SECURITY @@ -0,0 +1 @@ +# CONFIG_EXT3_FS_SECURITY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EZX_PCAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_EZX_PCAP new file mode 100644 index 000000000000..f4ac470bdaea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EZX_PCAP @@ -0,0 +1 @@ +# CONFIG_EZX_PCAP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_F2FS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_F2FS_FS new file mode 100644 index 000000000000..e71bcee61e39 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_F2FS_FS @@ -0,0 +1 @@ +# CONFIG_F2FS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FARSYNC b/anolis/configs/L2-OPTIONAL/default/CONFIG_FARSYNC new file mode 100644 index 000000000000..1dd0929a4e2f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FARSYNC @@ -0,0 +1 @@ +# CONFIG_FARSYNC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_3DFX b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_3DFX new file mode 100644 index 000000000000..63214cf1b92a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_3DFX @@ -0,0 +1 @@ +# CONFIG_FB_3DFX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_ARK b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_ARK new file mode 100644 index 000000000000..3ed9dfc30b50 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_ARK @@ -0,0 +1 @@ +# CONFIG_FB_ARK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_ASILIANT b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_ASILIANT new file mode 100644 index 000000000000..34148fdc4bef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_ASILIANT @@ -0,0 +1 @@ +# CONFIG_FB_ASILIANT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_ATY b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_ATY new file mode 100644 index 000000000000..cae8eee36d90 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_ATY @@ -0,0 +1 @@ +# CONFIG_FB_ATY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_ATY128 b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_ATY128 new file mode 100644 index 000000000000..da69465555ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_ATY128 @@ -0,0 +1 @@ +# CONFIG_FB_ATY128 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CARMINE b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CARMINE new file mode 100644 index 000000000000..4710f2f333c6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CARMINE @@ -0,0 +1 @@ +# CONFIG_FB_CARMINE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CFB_COPYAREA b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CFB_COPYAREA new file mode 100644 index 000000000000..c7e361de6511 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CFB_COPYAREA @@ -0,0 +1 @@ +CONFIG_FB_CFB_COPYAREA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CFB_FILLRECT b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CFB_FILLRECT new file mode 100644 index 000000000000..704d65343e07 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CFB_FILLRECT @@ -0,0 +1 @@ +CONFIG_FB_CFB_FILLRECT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CFB_IMAGEBLIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CFB_IMAGEBLIT new file mode 100644 index 000000000000..86440cdbf6e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CFB_IMAGEBLIT @@ -0,0 +1 @@ +CONFIG_FB_CFB_IMAGEBLIT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CIRRUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CIRRUS new file mode 100644 index 000000000000..e4a9f519acf3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CIRRUS @@ -0,0 +1 @@ +# CONFIG_FB_CIRRUS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CORE new file mode 100644 index 000000000000..22cafa667b23 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CORE @@ -0,0 +1 @@ +CONFIG_FB_CORE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CYBER2000 b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CYBER2000 new file mode 100644 index 000000000000..27a73fa27136 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_CYBER2000 @@ -0,0 +1 @@ +# CONFIG_FB_CYBER2000 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_DEFERRED_IO b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_DEFERRED_IO new file mode 100644 index 000000000000..0b29411b8b17 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_DEFERRED_IO @@ -0,0 +1 @@ +CONFIG_FB_DEFERRED_IO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_DEVICE b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_DEVICE new file mode 100644 index 000000000000..a01b5849f85f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_DEVICE @@ -0,0 +1 @@ +CONFIG_FB_DEVICE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_FOREIGN_ENDIAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_FOREIGN_ENDIAN new file mode 100644 index 000000000000..583ddc4f9bf2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_FOREIGN_ENDIAN @@ -0,0 +1 @@ +# CONFIG_FB_FOREIGN_ENDIAN is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_I740 b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_I740 new file mode 100644 index 000000000000..4a3cb7cebd50 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_I740 @@ -0,0 +1 @@ +# CONFIG_FB_I740 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IBM_GXT4500 b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IBM_GXT4500 new file mode 100644 index 000000000000..628c9a85e012 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IBM_GXT4500 @@ -0,0 +1 @@ +# CONFIG_FB_IBM_GXT4500 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IMSTT b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IMSTT new file mode 100644 index 000000000000..4ca1a915d60a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IMSTT @@ -0,0 +1 @@ +# CONFIG_FB_IMSTT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IOMEM_HELPERS b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IOMEM_HELPERS new file mode 100644 index 000000000000..762245fb865d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IOMEM_HELPERS @@ -0,0 +1 @@ +CONFIG_FB_IOMEM_HELPERS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_KYRO b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_KYRO new file mode 100644 index 000000000000..812aad0b8f36 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_KYRO @@ -0,0 +1 @@ +# CONFIG_FB_KYRO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_MATROX b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_MATROX new file mode 100644 index 000000000000..ff6a83e395f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_MATROX @@ -0,0 +1 @@ +# CONFIG_FB_MATROX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_MB862XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_MB862XX new file mode 100644 index 000000000000..b2ea119f36a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_MB862XX @@ -0,0 +1 @@ +# CONFIG_FB_MB862XX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_METRONOME b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_METRONOME new file mode 100644 index 000000000000..7eb7b58caeb0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_METRONOME @@ -0,0 +1 @@ +# CONFIG_FB_METRONOME is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_MODE_HELPERS b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_MODE_HELPERS new file mode 100644 index 000000000000..e9eb6ec4ffe9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_MODE_HELPERS @@ -0,0 +1 @@ +# CONFIG_FB_MODE_HELPERS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_NEOMAGIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_NEOMAGIC new file mode 100644 index 000000000000..c40b63c27f98 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_NEOMAGIC @@ -0,0 +1 @@ +# CONFIG_FB_NEOMAGIC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_NOTIFY b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_NOTIFY new file mode 100644 index 000000000000..cf08f4c4db57 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_NOTIFY @@ -0,0 +1 @@ +CONFIG_FB_NOTIFY=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_NVIDIA b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_NVIDIA new file mode 100644 index 000000000000..00e8d12d5055 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_NVIDIA @@ -0,0 +1 @@ +# CONFIG_FB_NVIDIA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_OPENCORES b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_OPENCORES new file mode 100644 index 000000000000..af7bd5b48986 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_OPENCORES @@ -0,0 +1 @@ +# CONFIG_FB_OPENCORES is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_PM2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_PM2 new file mode 100644 index 000000000000..402b162af8e4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_PM2 @@ -0,0 +1 @@ +# CONFIG_FB_PM2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_PM3 b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_PM3 new file mode 100644 index 000000000000..c0e9092e3362 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_PM3 @@ -0,0 +1 @@ +# CONFIG_FB_PM3 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_RADEON b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_RADEON new file mode 100644 index 000000000000..844570e81d78 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_RADEON @@ -0,0 +1 @@ +# CONFIG_FB_RADEON is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_RIVA b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_RIVA new file mode 100644 index 000000000000..51d3df8ebfa2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_RIVA @@ -0,0 +1 @@ +# CONFIG_FB_RIVA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_S1D13XXX b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_S1D13XXX new file mode 100644 index 000000000000..723326189585 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_S1D13XXX @@ -0,0 +1 @@ +# CONFIG_FB_S1D13XXX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_S3 b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_S3 new file mode 100644 index 000000000000..5e8d701d41f9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_S3 @@ -0,0 +1 @@ +# CONFIG_FB_S3 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SAVAGE b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SAVAGE new file mode 100644 index 000000000000..7512c54c647d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SAVAGE @@ -0,0 +1 @@ +# CONFIG_FB_SAVAGE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SIS b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SIS new file mode 100644 index 000000000000..3ad07d50ad15 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SIS @@ -0,0 +1 @@ +# CONFIG_FB_SIS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SM712 b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SM712 new file mode 100644 index 000000000000..78188e33d5c1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SM712 @@ -0,0 +1 @@ +# CONFIG_FB_SM712 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SMSCUFX b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SMSCUFX new file mode 100644 index 000000000000..f80de74b87de --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SMSCUFX @@ -0,0 +1 @@ +# CONFIG_FB_SMSCUFX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYSMEM_HELPERS b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYSMEM_HELPERS new file mode 100644 index 000000000000..4296fa8b9b92 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYSMEM_HELPERS @@ -0,0 +1 @@ +CONFIG_FB_SYSMEM_HELPERS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYSMEM_HELPERS_DEFERRED b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYSMEM_HELPERS_DEFERRED new file mode 100644 index 000000000000..d2ea0b42899d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYSMEM_HELPERS_DEFERRED @@ -0,0 +1 @@ +CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYS_COPYAREA b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYS_COPYAREA new file mode 100644 index 000000000000..d165742d50fe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYS_COPYAREA @@ -0,0 +1 @@ +CONFIG_FB_SYS_COPYAREA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYS_FILLRECT b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYS_FILLRECT new file mode 100644 index 000000000000..ae245540bcce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYS_FILLRECT @@ -0,0 +1 @@ +CONFIG_FB_SYS_FILLRECT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYS_FOPS b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYS_FOPS new file mode 100644 index 000000000000..5d67337536f9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYS_FOPS @@ -0,0 +1 @@ +CONFIG_FB_SYS_FOPS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYS_IMAGEBLIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYS_IMAGEBLIT new file mode 100644 index 000000000000..910e47f5e45d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_SYS_IMAGEBLIT @@ -0,0 +1 @@ +CONFIG_FB_SYS_IMAGEBLIT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_TRIDENT b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_TRIDENT new file mode 100644 index 000000000000..d5a885fb9fb9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_TRIDENT @@ -0,0 +1 @@ +# CONFIG_FB_TRIDENT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_UDL b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_UDL new file mode 100644 index 000000000000..b61aece886b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_UDL @@ -0,0 +1 @@ +# CONFIG_FB_UDL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_UVESA b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_UVESA new file mode 100644 index 000000000000..b677212dcfe2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_UVESA @@ -0,0 +1 @@ +# CONFIG_FB_UVESA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_VIRTUAL b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_VIRTUAL new file mode 100644 index 000000000000..79dd529e9a79 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_VIRTUAL @@ -0,0 +1 @@ +# CONFIG_FB_VIRTUAL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_VOODOO1 b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_VOODOO1 new file mode 100644 index 000000000000..230c5eca2861 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_VOODOO1 @@ -0,0 +1 @@ +# CONFIG_FB_VOODOO1 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_VT8623 b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_VT8623 new file mode 100644 index 000000000000..e8ebebc5fe67 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_VT8623 @@ -0,0 +1 @@ +# CONFIG_FB_VT8623 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FDDI b/anolis/configs/L2-OPTIONAL/default/CONFIG_FDDI new file mode 100644 index 000000000000..e13f968a5a79 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FDDI @@ -0,0 +1 @@ +# CONFIG_FDDI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FEALNX b/anolis/configs/L2-OPTIONAL/default/CONFIG_FEALNX new file mode 100644 index 000000000000..4cca6a26d04f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FEALNX @@ -0,0 +1 @@ +# CONFIG_FEALNX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FIB_RULES b/anolis/configs/L2-OPTIONAL/default/CONFIG_FIB_RULES new file mode 100644 index 000000000000..0c0fd787522f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FIB_RULES @@ -0,0 +1 @@ +CONFIG_FIB_RULES=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FIND_BIT_BENCHMARK b/anolis/configs/L2-OPTIONAL/default/CONFIG_FIND_BIT_BENCHMARK new file mode 100644 index 000000000000..93fe7e039048 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FIND_BIT_BENCHMARK @@ -0,0 +1 @@ +# CONFIG_FIND_BIT_BENCHMARK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FIPS_SIGNATURE_SELFTEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_FIPS_SIGNATURE_SELFTEST new file mode 100644 index 000000000000..bd5282b3ceb1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FIPS_SIGNATURE_SELFTEST @@ -0,0 +1 @@ +# CONFIG_FIPS_SIGNATURE_SELFTEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FIREWIRE_NOSY b/anolis/configs/L2-OPTIONAL/default/CONFIG_FIREWIRE_NOSY new file mode 100644 index 000000000000..0b48c48d1796 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FIREWIRE_NOSY @@ -0,0 +1 @@ +# CONFIG_FIREWIRE_NOSY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FIXED_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_FIXED_PHY new file mode 100644 index 000000000000..80379efd3cf9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FIXED_PHY @@ -0,0 +1 @@ +CONFIG_FIXED_PHY=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FONTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_FONTS new file mode 100644 index 000000000000..abe0213f932f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FONTS @@ -0,0 +1 @@ +# CONFIG_FONTS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FONT_8x16 b/anolis/configs/L2-OPTIONAL/default/CONFIG_FONT_8x16 new file mode 100644 index 000000000000..aecbfb2b1ed7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FONT_8x16 @@ -0,0 +1 @@ +CONFIG_FONT_8x16=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FONT_8x8 b/anolis/configs/L2-OPTIONAL/default/CONFIG_FONT_8x8 new file mode 100644 index 000000000000..6efb90c1daa7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FONT_8x8 @@ -0,0 +1 @@ +CONFIG_FONT_8x8=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FONT_SUPPORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_FONT_SUPPORT new file mode 100644 index 000000000000..89f62253375f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FONT_SUPPORT @@ -0,0 +1 @@ +CONFIG_FONT_SUPPORT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FPGA b/anolis/configs/L2-OPTIONAL/default/CONFIG_FPGA new file mode 100644 index 000000000000..8bb6ca99e3bf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FPGA @@ -0,0 +1 @@ +# CONFIG_FPGA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER b/anolis/configs/L2-OPTIONAL/default/CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER new file mode 100644 index 000000000000..91eda6cd3a62 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER @@ -0,0 +1 @@ +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION b/anolis/configs/L2-OPTIONAL/default/CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION new file mode 100644 index 000000000000..725b06c7d8e2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION @@ -0,0 +1 @@ +# CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FREEZER b/anolis/configs/L2-OPTIONAL/default/CONFIG_FREEZER new file mode 100644 index 000000000000..db6aab2a5722 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FREEZER @@ -0,0 +1 @@ +CONFIG_FREEZER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_DAX_PMD b/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_DAX_PMD new file mode 100644 index 000000000000..2345dbe2f7eb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_DAX_PMD @@ -0,0 +1 @@ +CONFIG_FS_DAX_PMD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_ENCRYPTION b/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_ENCRYPTION new file mode 100644 index 000000000000..b36ec94f0760 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_ENCRYPTION @@ -0,0 +1 @@ +# CONFIG_FS_ENCRYPTION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_IOMAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_IOMAP new file mode 100644 index 000000000000..d21093b7bcc5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_IOMAP @@ -0,0 +1 @@ +CONFIG_FS_IOMAP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_POSIX_ACL b/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_POSIX_ACL new file mode 100644 index 000000000000..ac587b8e48d2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_POSIX_ACL @@ -0,0 +1 @@ +CONFIG_FS_POSIX_ACL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_VERITY b/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_VERITY new file mode 100644 index 000000000000..1c1298830047 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_VERITY @@ -0,0 +1 @@ +# CONFIG_FS_VERITY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FTL b/anolis/configs/L2-OPTIONAL/default/CONFIG_FTL new file mode 100644 index 000000000000..cc37e0b2bfb6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FTL @@ -0,0 +1 @@ +# CONFIG_FTL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FTRACE_MCOUNT_RECORD b/anolis/configs/L2-OPTIONAL/default/CONFIG_FTRACE_MCOUNT_RECORD new file mode 100644 index 000000000000..5849b3c45b70 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FTRACE_MCOUNT_RECORD @@ -0,0 +1 @@ +CONFIG_FTRACE_MCOUNT_RECORD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FTRACE_STARTUP_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_FTRACE_STARTUP_TEST new file mode 100644 index 000000000000..8e7db27579a8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FTRACE_STARTUP_TEST @@ -0,0 +1 @@ +# CONFIG_FTRACE_STARTUP_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FUNCTION_ALIGNMENT_4B b/anolis/configs/L2-OPTIONAL/default/CONFIG_FUNCTION_ALIGNMENT_4B new file mode 100644 index 000000000000..4e48d80c8f64 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FUNCTION_ALIGNMENT_4B @@ -0,0 +1 @@ +CONFIG_FUNCTION_ALIGNMENT_4B=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FUNCTION_ERROR_INJECTION b/anolis/configs/L2-OPTIONAL/default/CONFIG_FUNCTION_ERROR_INJECTION new file mode 100644 index 000000000000..f6b81a84aff3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FUNCTION_ERROR_INJECTION @@ -0,0 +1 @@ +CONFIG_FUNCTION_ERROR_INJECTION=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FUNCTION_GRAPH_RETVAL b/anolis/configs/L2-OPTIONAL/default/CONFIG_FUNCTION_GRAPH_RETVAL new file mode 100644 index 000000000000..6161ea515619 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FUNCTION_GRAPH_RETVAL @@ -0,0 +1 @@ +# CONFIG_FUNCTION_GRAPH_RETVAL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FUN_ETH b/anolis/configs/L2-OPTIONAL/default/CONFIG_FUN_ETH new file mode 100644 index 000000000000..ff715bf7dbe8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FUN_ETH @@ -0,0 +1 @@ +# CONFIG_FUN_ETH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FUTEX_PI b/anolis/configs/L2-OPTIONAL/default/CONFIG_FUTEX_PI new file mode 100644 index 000000000000..80d8966e7a9a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FUTEX_PI @@ -0,0 +1 @@ +CONFIG_FUTEX_PI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FWNODE_MDIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_FWNODE_MDIO new file mode 100644 index 000000000000..c9e5bf427515 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FWNODE_MDIO @@ -0,0 +1 @@ +CONFIG_FWNODE_MDIO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT b/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT new file mode 100644 index 000000000000..0f3f509c640c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT @@ -0,0 +1 @@ +# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_LOADER_COMPRESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_LOADER_COMPRESS new file mode 100644 index 000000000000..f95f93620f70 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_LOADER_COMPRESS @@ -0,0 +1 @@ +# CONFIG_FW_LOADER_COMPRESS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_LOADER_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_LOADER_DEBUG new file mode 100644 index 000000000000..87cc900a5152 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_LOADER_DEBUG @@ -0,0 +1 @@ +CONFIG_FW_LOADER_DEBUG=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_LOADER_PAGED_BUF b/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_LOADER_PAGED_BUF new file mode 100644 index 000000000000..f515939aa3f7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_LOADER_PAGED_BUF @@ -0,0 +1 @@ +CONFIG_FW_LOADER_PAGED_BUF=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_LOADER_SYSFS b/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_LOADER_SYSFS new file mode 100644 index 000000000000..b6548de023d3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_LOADER_SYSFS @@ -0,0 +1 @@ +CONFIG_FW_LOADER_SYSFS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_UPLOAD b/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_UPLOAD new file mode 100644 index 000000000000..733915f1756a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_UPLOAD @@ -0,0 +1 @@ +CONFIG_FW_UPLOAD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GAMEPORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_GAMEPORT new file mode 100644 index 000000000000..03c782bf39e7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GAMEPORT @@ -0,0 +1 @@ +# CONFIG_GAMEPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GARP b/anolis/configs/L2-OPTIONAL/default/CONFIG_GARP new file mode 100644 index 000000000000..37a1a1b697ea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GARP @@ -0,0 +1 @@ +CONFIG_GARP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GCC10_NO_ARRAY_BOUNDS b/anolis/configs/L2-OPTIONAL/default/CONFIG_GCC10_NO_ARRAY_BOUNDS new file mode 100644 index 000000000000..c4f1555e19d8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GCC10_NO_ARRAY_BOUNDS @@ -0,0 +1 @@ +CONFIG_GCC10_NO_ARRAY_BOUNDS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GCC_PLUGINS b/anolis/configs/L2-OPTIONAL/default/CONFIG_GCC_PLUGINS new file mode 100644 index 000000000000..178242a97f0b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GCC_PLUGINS @@ -0,0 +1 @@ +CONFIG_GCC_PLUGINS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GCC_PLUGIN_LATENT_ENTROPY b/anolis/configs/L2-OPTIONAL/default/CONFIG_GCC_PLUGIN_LATENT_ENTROPY new file mode 100644 index 000000000000..4775521e9792 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GCC_PLUGIN_LATENT_ENTROPY @@ -0,0 +1 @@ +# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GCC_PLUGIN_STACKLEAK b/anolis/configs/L2-OPTIONAL/default/CONFIG_GCC_PLUGIN_STACKLEAK new file mode 100644 index 000000000000..701c49771691 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GCC_PLUGIN_STACKLEAK @@ -0,0 +1 @@ +# CONFIG_GCC_PLUGIN_STACKLEAK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GCC_VERSION b/anolis/configs/L2-OPTIONAL/default/CONFIG_GCC_VERSION new file mode 100644 index 000000000000..0b34ca5bf12c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GCC_VERSION @@ -0,0 +1 @@ +CONFIG_GCC_VERSION=200000 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_ALLOCATOR b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_ALLOCATOR new file mode 100644 index 000000000000..532e98c5134b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_ALLOCATOR @@ -0,0 +1 @@ +CONFIG_GENERIC_ALLOCATOR=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_BUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_BUG new file mode 100644 index 000000000000..2c5f5f1da54e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_BUG @@ -0,0 +1 @@ +CONFIG_GENERIC_BUG=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_BUG_RELATIVE_POINTERS b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_BUG_RELATIVE_POINTERS new file mode 100644 index 000000000000..13714e8647a1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_BUG_RELATIVE_POINTERS @@ -0,0 +1 @@ +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CALIBRATE_DELAY b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CALIBRATE_DELAY new file mode 100644 index 000000000000..7670ace05c83 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CALIBRATE_DELAY @@ -0,0 +1 @@ +CONFIG_GENERIC_CALIBRATE_DELAY=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CLOCKEVENTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CLOCKEVENTS new file mode 100644 index 000000000000..156cf712f0b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CLOCKEVENTS @@ -0,0 +1 @@ +CONFIG_GENERIC_CLOCKEVENTS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CLOCKEVENTS_BROADCAST b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CLOCKEVENTS_BROADCAST new file mode 100644 index 000000000000..ffe0d1894806 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CLOCKEVENTS_BROADCAST @@ -0,0 +1 @@ +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CPU_AUTOPROBE b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CPU_AUTOPROBE new file mode 100644 index 000000000000..30af7a839fe1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CPU_AUTOPROBE @@ -0,0 +1 @@ +CONFIG_GENERIC_CPU_AUTOPROBE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CPU_VULNERABILITIES b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CPU_VULNERABILITIES new file mode 100644 index 000000000000..66a48cca7107 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_CPU_VULNERABILITIES @@ -0,0 +1 @@ +CONFIG_GENERIC_CPU_VULNERABILITIES=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_EARLY_IOREMAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_EARLY_IOREMAP new file mode 100644 index 000000000000..56c8df711663 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_EARLY_IOREMAP @@ -0,0 +1 @@ +CONFIG_GENERIC_EARLY_IOREMAP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK new file mode 100644 index 000000000000..69743b49cb15 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK @@ -0,0 +1 @@ +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_IRQ_MIGRATION b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_IRQ_MIGRATION new file mode 100644 index 000000000000..79cc3d3473f9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_IRQ_MIGRATION @@ -0,0 +1 @@ +CONFIG_GENERIC_IRQ_MIGRATION=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_MSI_IRQ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_MSI_IRQ new file mode 100644 index 000000000000..3c8d8e767893 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_MSI_IRQ @@ -0,0 +1 @@ +CONFIG_GENERIC_MSI_IRQ=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_NET_UTILS b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_NET_UTILS new file mode 100644 index 000000000000..e4d79181a5b3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_NET_UTILS @@ -0,0 +1 @@ +CONFIG_GENERIC_NET_UTILS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_PCI_IOMAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_PCI_IOMAP new file mode 100644 index 000000000000..31aaf92b387b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_PCI_IOMAP @@ -0,0 +1 @@ +CONFIG_GENERIC_PCI_IOMAP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_PINCONF b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_PINCONF new file mode 100644 index 000000000000..dededed2ddc5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_PINCONF @@ -0,0 +1 @@ +CONFIG_GENERIC_PINCONF=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_PTDUMP b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_PTDUMP new file mode 100644 index 000000000000..8924484ad9c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_PTDUMP @@ -0,0 +1 @@ +CONFIG_GENERIC_PTDUMP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_SMP_IDLE_THREAD b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_SMP_IDLE_THREAD new file mode 100644 index 000000000000..3af93e91c286 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_SMP_IDLE_THREAD @@ -0,0 +1 @@ +CONFIG_GENERIC_SMP_IDLE_THREAD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_STRNCPY_FROM_USER b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_STRNCPY_FROM_USER new file mode 100644 index 000000000000..da6133f23c71 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_STRNCPY_FROM_USER @@ -0,0 +1 @@ +CONFIG_GENERIC_STRNCPY_FROM_USER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_STRNLEN_USER b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_STRNLEN_USER new file mode 100644 index 000000000000..3479709f71f8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_STRNLEN_USER @@ -0,0 +1 @@ +CONFIG_GENERIC_STRNLEN_USER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_TIME_VSYSCALL b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_TIME_VSYSCALL new file mode 100644 index 000000000000..e28ded4ea4c4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_TIME_VSYSCALL @@ -0,0 +1 @@ +CONFIG_GENERIC_TIME_VSYSCALL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_TRACER b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_TRACER new file mode 100644 index 000000000000..3597bf1a8577 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENERIC_TRACER @@ -0,0 +1 @@ +CONFIG_GENERIC_TRACER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GENWQE b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENWQE new file mode 100644 index 000000000000..0d6952464189 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GENWQE @@ -0,0 +1 @@ +# CONFIG_GENWQE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GLOB b/anolis/configs/L2-OPTIONAL/default/CONFIG_GLOB new file mode 100644 index 000000000000..7ad953de48eb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GLOB @@ -0,0 +1 @@ +CONFIG_GLOB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GLOB_SELFTEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_GLOB_SELFTEST new file mode 100644 index 000000000000..8ee334320818 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GLOB_SELFTEST @@ -0,0 +1 @@ +# CONFIG_GLOB_SELFTEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GNSS b/anolis/configs/L2-OPTIONAL/default/CONFIG_GNSS new file mode 100644 index 000000000000..07c7233bf96c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GNSS @@ -0,0 +1 @@ +# CONFIG_GNSS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GOLDFISH b/anolis/configs/L2-OPTIONAL/default/CONFIG_GOLDFISH new file mode 100644 index 000000000000..570eec3f0716 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GOLDFISH @@ -0,0 +1 @@ +# CONFIG_GOLDFISH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GOOGLE_FIRMWARE b/anolis/configs/L2-OPTIONAL/default/CONFIG_GOOGLE_FIRMWARE new file mode 100644 index 000000000000..a9a15cf41398 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GOOGLE_FIRMWARE @@ -0,0 +1 @@ +# CONFIG_GOOGLE_FIRMWARE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIOLIB b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIOLIB new file mode 100644 index 000000000000..7c7603d68e3e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIOLIB @@ -0,0 +1 @@ +CONFIG_GPIOLIB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIOLIB_FASTPATH_LIMIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIOLIB_FASTPATH_LIMIT new file mode 100644 index 000000000000..09425777796a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIOLIB_FASTPATH_LIMIT @@ -0,0 +1 @@ +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIOLIB_IRQCHIP b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIOLIB_IRQCHIP new file mode 100644 index 000000000000..48c78b42cd61 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIOLIB_IRQCHIP @@ -0,0 +1 @@ +CONFIG_GPIOLIB_IRQCHIP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_ACPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_ACPI new file mode 100644 index 000000000000..4c39fc0dde33 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_ACPI @@ -0,0 +1 @@ +CONFIG_GPIO_ACPI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_AGGREGATOR b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_AGGREGATOR new file mode 100644 index 000000000000..71bc3505a35f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_AGGREGATOR @@ -0,0 +1 @@ +# CONFIG_GPIO_AGGREGATOR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_AMDPT b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_AMDPT new file mode 100644 index 000000000000..04ac1ad2c10b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_AMDPT @@ -0,0 +1 @@ +CONFIG_GPIO_AMDPT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_AMD_FCH b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_AMD_FCH new file mode 100644 index 000000000000..6a7a2f22328c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_AMD_FCH @@ -0,0 +1 @@ +# CONFIG_GPIO_AMD_FCH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_BT8XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_BT8XX new file mode 100644 index 000000000000..dbc524d7ced0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_BT8XX @@ -0,0 +1 @@ +# CONFIG_GPIO_BT8XX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_CDEV b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_CDEV new file mode 100644 index 000000000000..eb3c0e436697 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_CDEV @@ -0,0 +1 @@ +CONFIG_GPIO_CDEV=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_CDEV_V1 b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_CDEV_V1 new file mode 100644 index 000000000000..2a4e60a576ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_CDEV_V1 @@ -0,0 +1 @@ +CONFIG_GPIO_CDEV_V1=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_EXAR b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_EXAR new file mode 100644 index 000000000000..895a0888b304 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_EXAR @@ -0,0 +1 @@ +# CONFIG_GPIO_EXAR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MAX3191X b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MAX3191X new file mode 100644 index 000000000000..b0cd128e84c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MAX3191X @@ -0,0 +1 @@ +# CONFIG_GPIO_MAX3191X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MAX7300 b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MAX7300 new file mode 100644 index 000000000000..27e781e577f8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MAX7300 @@ -0,0 +1 @@ +# CONFIG_GPIO_MAX7300 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MAX7301 b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MAX7301 new file mode 100644 index 000000000000..cd127dd6beb3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MAX7301 @@ -0,0 +1 @@ +# CONFIG_GPIO_MAX7301 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MAX732X b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MAX732X new file mode 100644 index 000000000000..690f870476ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MAX732X @@ -0,0 +1 @@ +# CONFIG_GPIO_MAX732X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MB86S7X b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MB86S7X new file mode 100644 index 000000000000..1b3c1acc10e3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MB86S7X @@ -0,0 +1 @@ +# CONFIG_GPIO_MB86S7X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MC33880 b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MC33880 new file mode 100644 index 000000000000..17f039cfc3b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MC33880 @@ -0,0 +1 @@ +# CONFIG_GPIO_MC33880 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MOCKUP b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MOCKUP new file mode 100644 index 000000000000..7f135a6ef04e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_MOCKUP @@ -0,0 +1 @@ +# CONFIG_GPIO_MOCKUP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCA953X b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCA953X new file mode 100644 index 000000000000..ca0543fbb784 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCA953X @@ -0,0 +1 @@ +# CONFIG_GPIO_PCA953X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCA9570 b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCA9570 new file mode 100644 index 000000000000..ab334325aed6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCA9570 @@ -0,0 +1 @@ +# CONFIG_GPIO_PCA9570 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCF857X b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCF857X new file mode 100644 index 000000000000..d58fab0742d6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCF857X @@ -0,0 +1 @@ +# CONFIG_GPIO_PCF857X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCIE_IDIO_24 b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCIE_IDIO_24 new file mode 100644 index 000000000000..6313cc61d8e3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCIE_IDIO_24 @@ -0,0 +1 @@ +# CONFIG_GPIO_PCIE_IDIO_24 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCI_IDIO_16 b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCI_IDIO_16 new file mode 100644 index 000000000000..2228f07877c4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PCI_IDIO_16 @@ -0,0 +1 @@ +# CONFIG_GPIO_PCI_IDIO_16 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PISOSR b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PISOSR new file mode 100644 index 000000000000..0026e4baf776 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_PISOSR @@ -0,0 +1 @@ +# CONFIG_GPIO_PISOSR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_RDC321X b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_RDC321X new file mode 100644 index 000000000000..241293f19d33 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_RDC321X @@ -0,0 +1 @@ +# CONFIG_GPIO_RDC321X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_TPIC2810 b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_TPIC2810 new file mode 100644 index 000000000000..a522f61016ba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_TPIC2810 @@ -0,0 +1 @@ +# CONFIG_GPIO_TPIC2810 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_XRA1403 b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_XRA1403 new file mode 100644 index 000000000000..c9567433fedb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GPIO_XRA1403 @@ -0,0 +1 @@ +# CONFIG_GPIO_XRA1403 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GRACE_PERIOD b/anolis/configs/L2-OPTIONAL/default/CONFIG_GRACE_PERIOD new file mode 100644 index 000000000000..eeb72ba89583 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GRACE_PERIOD @@ -0,0 +1 @@ +CONFIG_GRACE_PERIOD=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GREENASIA_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_GREENASIA_FF new file mode 100644 index 000000000000..def920e42e88 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GREENASIA_FF @@ -0,0 +1 @@ +# CONFIG_GREENASIA_FF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GREYBUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_GREYBUS new file mode 100644 index 000000000000..7fa945c3364f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GREYBUS @@ -0,0 +1 @@ +# CONFIG_GREYBUS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GRO_CELLS b/anolis/configs/L2-OPTIONAL/default/CONFIG_GRO_CELLS new file mode 100644 index 000000000000..1ffae25c20de --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GRO_CELLS @@ -0,0 +1 @@ +CONFIG_GRO_CELLS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GTP b/anolis/configs/L2-OPTIONAL/default/CONFIG_GTP new file mode 100644 index 000000000000..ec01f6d28b7d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GTP @@ -0,0 +1 @@ +# CONFIG_GTP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_GVE b/anolis/configs/L2-OPTIONAL/default/CONFIG_GVE new file mode 100644 index 000000000000..26aadc46d3c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_GVE @@ -0,0 +1 @@ +CONFIG_GVE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAMRADIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAMRADIO new file mode 100644 index 000000000000..477ae40dbb27 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAMRADIO @@ -0,0 +1 @@ +# CONFIG_HAMRADIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDIRQS_SW_RESEND b/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDIRQS_SW_RESEND new file mode 100644 index 000000000000..4e6810c6ee17 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDIRQS_SW_RESEND @@ -0,0 +1 @@ +CONFIG_HARDIRQS_SW_RESEND=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_ARCH b/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_ARCH new file mode 100644 index 000000000000..39ea0c40bd9c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_ARCH @@ -0,0 +1 @@ +# CONFIG_HARDLOCKUP_DETECTOR_ARCH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_BUDDY b/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_BUDDY new file mode 100644 index 000000000000..6cab31885336 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_BUDDY @@ -0,0 +1 @@ +# CONFIG_HARDLOCKUP_DETECTOR_BUDDY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER b/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER new file mode 100644 index 000000000000..5737b3d6e445 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER @@ -0,0 +1 @@ +CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_PERF b/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_PERF new file mode 100644 index 000000000000..e2ce4db33460 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_PERF @@ -0,0 +1 @@ +CONFIG_HARDLOCKUP_DETECTOR_PERF=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY b/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY new file mode 100644 index 000000000000..e0fcf6e9163c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY @@ -0,0 +1 @@ +# CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAS_DMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAS_DMA new file mode 100644 index 000000000000..074fcade6e38 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAS_DMA @@ -0,0 +1 @@ +CONFIG_HAS_DMA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAS_IOMEM b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAS_IOMEM new file mode 100644 index 000000000000..2e73c44ae51d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAS_IOMEM @@ -0,0 +1 @@ +CONFIG_HAS_IOMEM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAS_IOPORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAS_IOPORT new file mode 100644 index 000000000000..3fb32343e428 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAS_IOPORT @@ -0,0 +1 @@ +CONFIG_HAS_IOPORT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAS_IOPORT_MAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAS_IOPORT_MAP new file mode 100644 index 000000000000..26e978eb9599 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAS_IOPORT_MAP @@ -0,0 +1 @@ +CONFIG_HAS_IOPORT_MAP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ACPI_APEI b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ACPI_APEI new file mode 100644 index 000000000000..f6e2adf17547 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ACPI_APEI @@ -0,0 +1 @@ +CONFIG_HAVE_ACPI_APEI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ALIGNED_STRUCT_PAGE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ALIGNED_STRUCT_PAGE new file mode 100644 index 000000000000..c7b4aa501206 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ALIGNED_STRUCT_PAGE @@ -0,0 +1 @@ +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_AUDITSYSCALL b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_AUDITSYSCALL new file mode 100644 index 000000000000..a7f74c1e403f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_AUDITSYSCALL @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_AUDITSYSCALL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_HUGE_VMALLOC b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_HUGE_VMALLOC new file mode 100644 index 000000000000..99b8b5b62fa0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_HUGE_VMALLOC @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_HUGE_VMALLOC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_HUGE_VMAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_HUGE_VMAP new file mode 100644 index 000000000000..424d3e46c3bd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_HUGE_VMAP @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_HUGE_VMAP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_JUMP_LABEL b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_JUMP_LABEL new file mode 100644 index 000000000000..f8e9b05d5647 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_JUMP_LABEL @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_JUMP_LABEL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE new file mode 100644 index 000000000000..cf9ecf703edb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_KASAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_KASAN new file mode 100644 index 000000000000..c91df1bf0c70 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_KASAN @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_KASAN=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_KASAN_VMALLOC b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_KASAN_VMALLOC new file mode 100644 index 000000000000..74fe2bd0b7f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_KASAN_VMALLOC @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_KASAN_VMALLOC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_KFENCE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_KFENCE new file mode 100644 index 000000000000..30e3e70d573a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_KFENCE @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_KFENCE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_KGDB b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_KGDB new file mode 100644 index 000000000000..bab9449945db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_KGDB @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_KGDB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_MMAP_RND_BITS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_MMAP_RND_BITS new file mode 100644 index 000000000000..7ad15d3c444e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_MMAP_RND_BITS @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS new file mode 100644 index 000000000000..c603b80ac339 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_PREL32_RELOCATIONS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_PREL32_RELOCATIONS new file mode 100644 index 000000000000..c7b2979290b6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_PREL32_RELOCATIONS @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET new file mode 100644 index 000000000000..0c8134ea4cdc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_SECCOMP b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_SECCOMP new file mode 100644 index 000000000000..3e2f16ecb858 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_SECCOMP @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_SECCOMP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_SECCOMP_FILTER b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_SECCOMP_FILTER new file mode 100644 index 000000000000..c3a4f1bce664 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_SECCOMP_FILTER @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_STACKLEAK b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_STACKLEAK new file mode 100644 index 000000000000..cf87c8026f6d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_STACKLEAK @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_STACKLEAK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST new file mode 100644 index 000000000000..328ddf968b73 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_TRACEHOOK b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_TRACEHOOK new file mode 100644 index 000000000000..d834130d89f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_TRACEHOOK @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_TRACEHOOK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE new file mode 100644 index 000000000000..6e840a347b8a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_USERFAULTFD_MINOR b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_USERFAULTFD_MINOR new file mode 100644 index 000000000000..250507e2d4e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_USERFAULTFD_MINOR @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_USERFAULTFD_MINOR=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_VMAP_STACK b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_VMAP_STACK new file mode 100644 index 000000000000..6fdf79325aa4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_VMAP_STACK @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_VMAP_STACK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ASM_MODVERSIONS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ASM_MODVERSIONS new file mode 100644 index 000000000000..396524114502 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ASM_MODVERSIONS @@ -0,0 +1 @@ +CONFIG_HAVE_ASM_MODVERSIONS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CLK b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CLK new file mode 100644 index 000000000000..d1e4deb60dac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CLK @@ -0,0 +1 @@ +CONFIG_HAVE_CLK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CLK_PREPARE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CLK_PREPARE new file mode 100644 index 000000000000..1ffe5a9b6a73 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CLK_PREPARE @@ -0,0 +1 @@ +CONFIG_HAVE_CLK_PREPARE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CMPXCHG_DOUBLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CMPXCHG_DOUBLE new file mode 100644 index 000000000000..bbb12794a56b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CMPXCHG_DOUBLE @@ -0,0 +1 @@ +CONFIG_HAVE_CMPXCHG_DOUBLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CMPXCHG_LOCAL b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CMPXCHG_LOCAL new file mode 100644 index 000000000000..924e3b1cecb5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CMPXCHG_LOCAL @@ -0,0 +1 @@ +CONFIG_HAVE_CMPXCHG_LOCAL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CONTEXT_TRACKING_USER b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CONTEXT_TRACKING_USER new file mode 100644 index 000000000000..50c186f9d417 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_CONTEXT_TRACKING_USER @@ -0,0 +1 @@ +CONFIG_HAVE_CONTEXT_TRACKING_USER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_C_RECORDMCOUNT b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_C_RECORDMCOUNT new file mode 100644 index 000000000000..e5e5b7aea3b2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_C_RECORDMCOUNT @@ -0,0 +1 @@ +CONFIG_HAVE_C_RECORDMCOUNT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DEBUG_KMEMLEAK b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DEBUG_KMEMLEAK new file mode 100644 index 000000000000..fc41260656e4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DEBUG_KMEMLEAK @@ -0,0 +1 @@ +CONFIG_HAVE_DEBUG_KMEMLEAK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DMA_CONTIGUOUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DMA_CONTIGUOUS new file mode 100644 index 000000000000..2d0cab411cc2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DMA_CONTIGUOUS @@ -0,0 +1 @@ +CONFIG_HAVE_DMA_CONTIGUOUS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DYNAMIC_FTRACE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DYNAMIC_FTRACE new file mode 100644 index 000000000000..392a9fcedbf5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DYNAMIC_FTRACE @@ -0,0 +1 @@ +CONFIG_HAVE_DYNAMIC_FTRACE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS new file mode 100644 index 000000000000..97a77a3b1f27 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS @@ -0,0 +1 @@ +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS new file mode 100644 index 000000000000..79483bdeefca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS @@ -0,0 +1 @@ +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_EBPF_JIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_EBPF_JIT new file mode 100644 index 000000000000..29ba64074d88 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_EBPF_JIT @@ -0,0 +1 @@ +CONFIG_HAVE_EBPF_JIT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS new file mode 100644 index 000000000000..eb414b6fa153 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS @@ -0,0 +1 @@ +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FAST_GUP b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FAST_GUP new file mode 100644 index 000000000000..042e83df6c47 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FAST_GUP @@ -0,0 +1 @@ +CONFIG_HAVE_FAST_GUP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FTRACE_MCOUNT_RECORD b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FTRACE_MCOUNT_RECORD new file mode 100644 index 000000000000..f3fbc8cd56a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FTRACE_MCOUNT_RECORD @@ -0,0 +1 @@ +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_ARG_ACCESS_API b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_ARG_ACCESS_API new file mode 100644 index 000000000000..f6cc5601c93b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_ARG_ACCESS_API @@ -0,0 +1 @@ +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_ERROR_INJECTION b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_ERROR_INJECTION new file mode 100644 index 000000000000..148288801d65 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_ERROR_INJECTION @@ -0,0 +1 @@ +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_GRAPH_RETVAL b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_GRAPH_RETVAL new file mode 100644 index 000000000000..b4ff36b365d9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_GRAPH_RETVAL @@ -0,0 +1 @@ +CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_GRAPH_TRACER b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_GRAPH_TRACER new file mode 100644 index 000000000000..2c1cd9480b32 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_GRAPH_TRACER @@ -0,0 +1 @@ +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_TRACER b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_TRACER new file mode 100644 index 000000000000..856fbeef1499 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_FUNCTION_TRACER @@ -0,0 +1 @@ +CONFIG_HAVE_FUNCTION_TRACER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_GCC_PLUGINS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_GCC_PLUGINS new file mode 100644 index 000000000000..e6906ae85050 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_GCC_PLUGINS @@ -0,0 +1 @@ +CONFIG_HAVE_GCC_PLUGINS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_GENERIC_VDSO b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_GENERIC_VDSO new file mode 100644 index 000000000000..6e6d74a3d352 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_GENERIC_VDSO @@ -0,0 +1 @@ +CONFIG_HAVE_GENERIC_VDSO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY new file mode 100644 index 000000000000..7b43eb3981d2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY @@ -0,0 +1 @@ +CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF new file mode 100644 index 000000000000..55bb31912950 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF @@ -0,0 +1 @@ +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_HW_BREAKPOINT b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_HW_BREAKPOINT new file mode 100644 index 000000000000..def43335c4fa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_HW_BREAKPOINT @@ -0,0 +1 @@ +CONFIG_HAVE_HW_BREAKPOINT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_IMA_KEXEC b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_IMA_KEXEC new file mode 100644 index 000000000000..04187ebe8f32 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_IMA_KEXEC @@ -0,0 +1 @@ +CONFIG_HAVE_IMA_KEXEC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_IOREMAP_PROT b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_IOREMAP_PROT new file mode 100644 index 000000000000..77192b88488a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_IOREMAP_PROT @@ -0,0 +1 @@ +CONFIG_HAVE_IOREMAP_PROT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_IRQ_TIME_ACCOUNTING b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_IRQ_TIME_ACCOUNTING new file mode 100644 index 000000000000..b8327b498ddc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_IRQ_TIME_ACCOUNTING @@ -0,0 +1 @@ +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KCSAN_COMPILER b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KCSAN_COMPILER new file mode 100644 index 000000000000..875afb1d43d5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KCSAN_COMPILER @@ -0,0 +1 @@ +CONFIG_HAVE_KCSAN_COMPILER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KPROBES b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KPROBES new file mode 100644 index 000000000000..92c5cb62f5df --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KPROBES @@ -0,0 +1 @@ +CONFIG_HAVE_KPROBES=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KRETPROBES b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KRETPROBES new file mode 100644 index 000000000000..c15a04c9f32b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KRETPROBES @@ -0,0 +1 @@ +CONFIG_HAVE_KRETPROBES=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM new file mode 100644 index 000000000000..0b2f8920a3a8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM @@ -0,0 +1 @@ +CONFIG_HAVE_KVM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT new file mode 100644 index 000000000000..552732f475e1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT @@ -0,0 +1 @@ +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_DIRTY_RING b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_DIRTY_RING new file mode 100644 index 000000000000..d0cfa603f096 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_DIRTY_RING @@ -0,0 +1 @@ +CONFIG_HAVE_KVM_DIRTY_RING=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL new file mode 100644 index 000000000000..fd0862c5da33 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL @@ -0,0 +1 @@ +CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_EVENTFD b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_EVENTFD new file mode 100644 index 000000000000..c64e0b862e8c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_EVENTFD @@ -0,0 +1 @@ +CONFIG_HAVE_KVM_EVENTFD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_IRQCHIP b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_IRQCHIP new file mode 100644 index 000000000000..520c556af7ff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_IRQCHIP @@ -0,0 +1 @@ +CONFIG_HAVE_KVM_IRQCHIP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_IRQFD b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_IRQFD new file mode 100644 index 000000000000..7a18c8d5c636 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_IRQFD @@ -0,0 +1 @@ +CONFIG_HAVE_KVM_IRQFD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_IRQ_BYPASS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_IRQ_BYPASS new file mode 100644 index 000000000000..937b9171e084 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_IRQ_BYPASS @@ -0,0 +1 @@ +CONFIG_HAVE_KVM_IRQ_BYPASS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_IRQ_ROUTING b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_IRQ_ROUTING new file mode 100644 index 000000000000..5beab811773d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_IRQ_ROUTING @@ -0,0 +1 @@ +CONFIG_HAVE_KVM_IRQ_ROUTING=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_MSI b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_MSI new file mode 100644 index 000000000000..57f155750e49 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_KVM_MSI @@ -0,0 +1 @@ +CONFIG_HAVE_KVM_MSI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_MOD_ARCH_SPECIFIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_MOD_ARCH_SPECIFIC new file mode 100644 index 000000000000..d110694044a2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_MOD_ARCH_SPECIFIC @@ -0,0 +1 @@ +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_MOVE_PMD b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_MOVE_PMD new file mode 100644 index 000000000000..004d4f599f6f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_MOVE_PMD @@ -0,0 +1 @@ +CONFIG_HAVE_MOVE_PMD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_MOVE_PUD b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_MOVE_PUD new file mode 100644 index 000000000000..12381366d520 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_MOVE_PUD @@ -0,0 +1 @@ +CONFIG_HAVE_MOVE_PUD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_NMI b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_NMI new file mode 100644 index 000000000000..e5c6c28a242d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_NMI @@ -0,0 +1 @@ +CONFIG_HAVE_NMI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PCI new file mode 100644 index 000000000000..963b18208caa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PCI @@ -0,0 +1 @@ +CONFIG_HAVE_PCI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PERF_EVENTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PERF_EVENTS new file mode 100644 index 000000000000..1ac8ca88d415 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PERF_EVENTS @@ -0,0 +1 @@ +CONFIG_HAVE_PERF_EVENTS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PERF_EVENTS_NMI b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PERF_EVENTS_NMI new file mode 100644 index 000000000000..90edb6ad0715 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PERF_EVENTS_NMI @@ -0,0 +1 @@ +CONFIG_HAVE_PERF_EVENTS_NMI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PERF_REGS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PERF_REGS new file mode 100644 index 000000000000..300ba387b892 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PERF_REGS @@ -0,0 +1 @@ +CONFIG_HAVE_PERF_REGS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PERF_USER_STACK_DUMP b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PERF_USER_STACK_DUMP new file mode 100644 index 000000000000..64793851e749 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PERF_USER_STACK_DUMP @@ -0,0 +1 @@ +CONFIG_HAVE_PERF_USER_STACK_DUMP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK new file mode 100644 index 000000000000..2fc3d0500fed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK @@ -0,0 +1 @@ +CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PREEMPT_DYNAMIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PREEMPT_DYNAMIC new file mode 100644 index 000000000000..61099041ff02 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_PREEMPT_DYNAMIC @@ -0,0 +1 @@ +CONFIG_HAVE_PREEMPT_DYNAMIC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_REGS_AND_STACK_ACCESS_API b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_REGS_AND_STACK_ACCESS_API new file mode 100644 index 000000000000..0e0c363403e5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_REGS_AND_STACK_ACCESS_API @@ -0,0 +1 @@ +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_RSEQ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_RSEQ new file mode 100644 index 000000000000..8a24f5d4f59d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_RSEQ @@ -0,0 +1 @@ +CONFIG_HAVE_RSEQ=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SAMPLE_FTRACE_DIRECT b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SAMPLE_FTRACE_DIRECT new file mode 100644 index 000000000000..dc29cb2e93bc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SAMPLE_FTRACE_DIRECT @@ -0,0 +1 @@ +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI new file mode 100644 index 000000000000..e162169a9cc0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI @@ -0,0 +1 @@ +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SCHED_AVG_IRQ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SCHED_AVG_IRQ new file mode 100644 index 000000000000..b6cf7a74d5fd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SCHED_AVG_IRQ @@ -0,0 +1 @@ +CONFIG_HAVE_SCHED_AVG_IRQ=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SETUP_PER_CPU_AREA b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SETUP_PER_CPU_AREA new file mode 100644 index 000000000000..355eade795d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SETUP_PER_CPU_AREA @@ -0,0 +1 @@ +CONFIG_HAVE_SETUP_PER_CPU_AREA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK new file mode 100644 index 000000000000..7cd0e74f6347 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK @@ -0,0 +1 @@ +CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_STACKPROTECTOR b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_STACKPROTECTOR new file mode 100644 index 000000000000..f79989b20543 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_STACKPROTECTOR @@ -0,0 +1 @@ +CONFIG_HAVE_STACKPROTECTOR=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SYSCALL_TRACEPOINTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SYSCALL_TRACEPOINTS new file mode 100644 index 000000000000..85483c6bea7d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_SYSCALL_TRACEPOINTS @@ -0,0 +1 @@ +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_UID16 b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_UID16 new file mode 100644 index 000000000000..48d2bdb947dc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_UID16 @@ -0,0 +1 @@ +CONFIG_HAVE_UID16=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN new file mode 100644 index 000000000000..1fdb561060ec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN @@ -0,0 +1 @@ +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC b/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC new file mode 100644 index 000000000000..c9224b642b17 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC @@ -0,0 +1 @@ +CONFIG_HDLC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_CISCO b/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_CISCO new file mode 100644 index 000000000000..4319985ec7b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_CISCO @@ -0,0 +1 @@ +CONFIG_HDLC_CISCO=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_FR b/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_FR new file mode 100644 index 000000000000..8eba85d222d0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_FR @@ -0,0 +1 @@ +CONFIG_HDLC_FR=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_PPP b/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_PPP new file mode 100644 index 000000000000..509257a9b1a7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_PPP @@ -0,0 +1 @@ +CONFIG_HDLC_PPP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_RAW b/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_RAW new file mode 100644 index 000000000000..890580afb83e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_RAW @@ -0,0 +1 @@ +CONFIG_HDLC_RAW=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_RAW_ETH b/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_RAW_ETH new file mode 100644 index 000000000000..0ff29fb976a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HDLC_RAW_ETH @@ -0,0 +1 @@ +# CONFIG_HDLC_RAW_ETH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HFSPLUS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HFSPLUS_FS new file mode 100644 index 000000000000..18720556cc71 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HFSPLUS_FS @@ -0,0 +1 @@ +# CONFIG_HFSPLUS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HFS_FS new file mode 100644 index 000000000000..0e53a5809041 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HFS_FS @@ -0,0 +1 @@ +# CONFIG_HFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HIDRAW b/anolis/configs/L2-OPTIONAL/default/CONFIG_HIDRAW new file mode 100644 index 000000000000..bbd271ab1129 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HIDRAW @@ -0,0 +1 @@ +CONFIG_HIDRAW=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_A4TECH b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_A4TECH new file mode 100644 index 000000000000..bc5b3baabf1f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_A4TECH @@ -0,0 +1 @@ +CONFIG_HID_A4TECH=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ACCUTOUCH b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ACCUTOUCH new file mode 100644 index 000000000000..8fa02a6c7142 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ACCUTOUCH @@ -0,0 +1 @@ +# CONFIG_HID_ACCUTOUCH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ACRUX b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ACRUX new file mode 100644 index 000000000000..4964a93c2a03 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ACRUX @@ -0,0 +1 @@ +CONFIG_HID_ACRUX=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ACRUX_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ACRUX_FF new file mode 100644 index 000000000000..60ec856234ea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ACRUX_FF @@ -0,0 +1 @@ +# CONFIG_HID_ACRUX_FF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_APPLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_APPLE new file mode 100644 index 000000000000..cf025b274cc1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_APPLE @@ -0,0 +1 @@ +CONFIG_HID_APPLE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_APPLEIR b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_APPLEIR new file mode 100644 index 000000000000..6e8cc4a50ce2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_APPLEIR @@ -0,0 +1 @@ +CONFIG_HID_APPLEIR=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_AUREAL b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_AUREAL new file mode 100644 index 000000000000..c639c7db12da --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_AUREAL @@ -0,0 +1 @@ +CONFIG_HID_AUREAL=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BATTERY_STRENGTH b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BATTERY_STRENGTH new file mode 100644 index 000000000000..46efe6a265f9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BATTERY_STRENGTH @@ -0,0 +1 @@ +CONFIG_HID_BATTERY_STRENGTH=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BELKIN b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BELKIN new file mode 100644 index 000000000000..62b7e7ed7c57 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BELKIN @@ -0,0 +1 @@ +CONFIG_HID_BELKIN=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BETOP_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BETOP_FF new file mode 100644 index 000000000000..61c9a90cfeca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BETOP_FF @@ -0,0 +1 @@ +CONFIG_HID_BETOP_FF=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BIGBEN_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BIGBEN_FF new file mode 100644 index 000000000000..e210fd5e837d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BIGBEN_FF @@ -0,0 +1 @@ +# CONFIG_HID_BIGBEN_FF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BPF b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BPF new file mode 100644 index 000000000000..c2590759eeba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_BPF @@ -0,0 +1 @@ +# CONFIG_HID_BPF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CHERRY b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CHERRY new file mode 100644 index 000000000000..160f0d2006dc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CHERRY @@ -0,0 +1 @@ +CONFIG_HID_CHERRY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CHICONY b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CHICONY new file mode 100644 index 000000000000..c1d156579b82 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CHICONY @@ -0,0 +1 @@ +CONFIG_HID_CHICONY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CORSAIR b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CORSAIR new file mode 100644 index 000000000000..a169a44cecaa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CORSAIR @@ -0,0 +1 @@ +CONFIG_HID_CORSAIR=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_COUGAR b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_COUGAR new file mode 100644 index 000000000000..20f10861eb93 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_COUGAR @@ -0,0 +1 @@ +# CONFIG_HID_COUGAR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CP2112 b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CP2112 new file mode 100644 index 000000000000..3f9425d1d955 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CP2112 @@ -0,0 +1 @@ +# CONFIG_HID_CP2112 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CREATIVE_SB0540 b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CREATIVE_SB0540 new file mode 100644 index 000000000000..ce52dd6a4540 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CREATIVE_SB0540 @@ -0,0 +1 @@ +# CONFIG_HID_CREATIVE_SB0540 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CYPRESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CYPRESS new file mode 100644 index 000000000000..8e9d3427e513 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_CYPRESS @@ -0,0 +1 @@ +CONFIG_HID_CYPRESS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_DRAGONRISE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_DRAGONRISE new file mode 100644 index 000000000000..284f173cb235 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_DRAGONRISE @@ -0,0 +1 @@ +CONFIG_HID_DRAGONRISE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ELAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ELAN new file mode 100644 index 000000000000..78d5680c8175 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ELAN @@ -0,0 +1 @@ +CONFIG_HID_ELAN=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ELECOM b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ELECOM new file mode 100644 index 000000000000..6257669d8fee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ELECOM @@ -0,0 +1 @@ +CONFIG_HID_ELECOM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ELO b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ELO new file mode 100644 index 000000000000..96a4baae71b5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ELO @@ -0,0 +1 @@ +CONFIG_HID_ELO=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_EMS_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_EMS_FF new file mode 100644 index 000000000000..6c3035d84aeb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_EMS_FF @@ -0,0 +1 @@ +# CONFIG_HID_EMS_FF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_EVISION b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_EVISION new file mode 100644 index 000000000000..47e6dace37ab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_EVISION @@ -0,0 +1 @@ +# CONFIG_HID_EVISION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_EZKEY b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_EZKEY new file mode 100644 index 000000000000..ddb013273c18 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_EZKEY @@ -0,0 +1 @@ +CONFIG_HID_EZKEY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_FT260 b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_FT260 new file mode 100644 index 000000000000..23db7a254803 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_FT260 @@ -0,0 +1 @@ +# CONFIG_HID_FT260 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GEMBIRD b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GEMBIRD new file mode 100644 index 000000000000..30a71336e2fa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GEMBIRD @@ -0,0 +1 @@ +CONFIG_HID_GEMBIRD=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GENERIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GENERIC new file mode 100644 index 000000000000..a2b8a6c4a44f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GENERIC @@ -0,0 +1 @@ +CONFIG_HID_GENERIC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GFRM b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GFRM new file mode 100644 index 000000000000..7d79dc791837 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GFRM @@ -0,0 +1 @@ +CONFIG_HID_GFRM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GLORIOUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GLORIOUS new file mode 100644 index 000000000000..2619055d095e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GLORIOUS @@ -0,0 +1 @@ +# CONFIG_HID_GLORIOUS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GOOGLE_STADIA_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GOOGLE_STADIA_FF new file mode 100644 index 000000000000..52519b024e8a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GOOGLE_STADIA_FF @@ -0,0 +1 @@ +# CONFIG_HID_GOOGLE_STADIA_FF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GREENASIA b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GREENASIA new file mode 100644 index 000000000000..faed7bf9d961 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GREENASIA @@ -0,0 +1 @@ +CONFIG_HID_GREENASIA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GT683R b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GT683R new file mode 100644 index 000000000000..c65fcc22fcb3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GT683R @@ -0,0 +1 @@ +CONFIG_HID_GT683R=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GYRATION b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GYRATION new file mode 100644 index 000000000000..6d4a9391f67f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_GYRATION @@ -0,0 +1 @@ +CONFIG_HID_GYRATION=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_HOLTEK b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_HOLTEK new file mode 100644 index 000000000000..2118385dd6ea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_HOLTEK @@ -0,0 +1 @@ +CONFIG_HID_HOLTEK=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ICADE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ICADE new file mode 100644 index 000000000000..c9d3d1edd3e1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ICADE @@ -0,0 +1 @@ +CONFIG_HID_ICADE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ITE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ITE new file mode 100644 index 000000000000..b4af4b45e2eb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ITE @@ -0,0 +1 @@ +CONFIG_HID_ITE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_JABRA b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_JABRA new file mode 100644 index 000000000000..c93fae8a6079 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_JABRA @@ -0,0 +1 @@ +CONFIG_HID_JABRA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_KENSINGTON b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_KENSINGTON new file mode 100644 index 000000000000..4fbf2d237c30 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_KENSINGTON @@ -0,0 +1 @@ +CONFIG_HID_KENSINGTON=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_KEYTOUCH b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_KEYTOUCH new file mode 100644 index 000000000000..39d8ced6043d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_KEYTOUCH @@ -0,0 +1 @@ +CONFIG_HID_KEYTOUCH=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_KYE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_KYE new file mode 100644 index 000000000000..0987d5b063f6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_KYE @@ -0,0 +1 @@ +CONFIG_HID_KYE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LCPOWER b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LCPOWER new file mode 100644 index 000000000000..4ccf9204d3ff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LCPOWER @@ -0,0 +1 @@ +CONFIG_HID_LCPOWER=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LED b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LED new file mode 100644 index 000000000000..dfe3a9a1eb26 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LED @@ -0,0 +1 @@ +CONFIG_HID_LED=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LENOVO b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LENOVO new file mode 100644 index 000000000000..32888ff64e29 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LENOVO @@ -0,0 +1 @@ +CONFIG_HID_LENOVO=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LETSKETCH b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LETSKETCH new file mode 100644 index 000000000000..e987810bd4f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LETSKETCH @@ -0,0 +1 @@ +# CONFIG_HID_LETSKETCH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LOGITECH b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LOGITECH new file mode 100644 index 000000000000..dc04d70e82d3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LOGITECH @@ -0,0 +1 @@ +CONFIG_HID_LOGITECH=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LOGITECH_DJ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LOGITECH_DJ new file mode 100644 index 000000000000..5973b1e78f84 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LOGITECH_DJ @@ -0,0 +1 @@ +CONFIG_HID_LOGITECH_DJ=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LOGITECH_HIDPP b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LOGITECH_HIDPP new file mode 100644 index 000000000000..965f6aa0f83d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_LOGITECH_HIDPP @@ -0,0 +1 @@ +CONFIG_HID_LOGITECH_HIDPP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MACALLY b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MACALLY new file mode 100644 index 000000000000..6b38c1f2101a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MACALLY @@ -0,0 +1 @@ +# CONFIG_HID_MACALLY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MAGICMOUSE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MAGICMOUSE new file mode 100644 index 000000000000..524303e24070 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MAGICMOUSE @@ -0,0 +1 @@ +CONFIG_HID_MAGICMOUSE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MALTRON b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MALTRON new file mode 100644 index 000000000000..2b17c227352a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MALTRON @@ -0,0 +1 @@ +# CONFIG_HID_MALTRON is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MAYFLASH b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MAYFLASH new file mode 100644 index 000000000000..eaee56db96e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MAYFLASH @@ -0,0 +1 @@ +# CONFIG_HID_MAYFLASH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MCP2221 b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MCP2221 new file mode 100644 index 000000000000..556c46a013d9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MCP2221 @@ -0,0 +1 @@ +# CONFIG_HID_MCP2221 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MEGAWORLD_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MEGAWORLD_FF new file mode 100644 index 000000000000..3f697747afa7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MEGAWORLD_FF @@ -0,0 +1 @@ +# CONFIG_HID_MEGAWORLD_FF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MICROSOFT b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MICROSOFT new file mode 100644 index 000000000000..48983ea3b175 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MICROSOFT @@ -0,0 +1 @@ +CONFIG_HID_MICROSOFT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MONTEREY b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MONTEREY new file mode 100644 index 000000000000..087b000d3867 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MONTEREY @@ -0,0 +1 @@ +CONFIG_HID_MONTEREY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MULTITOUCH b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MULTITOUCH new file mode 100644 index 000000000000..7902798886b1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MULTITOUCH @@ -0,0 +1 @@ +CONFIG_HID_MULTITOUCH=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_NINTENDO b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_NINTENDO new file mode 100644 index 000000000000..c2269e5bfbfa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_NINTENDO @@ -0,0 +1 @@ +# CONFIG_HID_NINTENDO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_NTI b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_NTI new file mode 100644 index 000000000000..c239c7052d12 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_NTI @@ -0,0 +1 @@ +CONFIG_HID_NTI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_NTRIG b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_NTRIG new file mode 100644 index 000000000000..9b0ac9189e22 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_NTRIG @@ -0,0 +1 @@ +CONFIG_HID_NTRIG=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ORTEK b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ORTEK new file mode 100644 index 000000000000..a0030810c265 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ORTEK @@ -0,0 +1 @@ +CONFIG_HID_ORTEK=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PANTHERLORD b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PANTHERLORD new file mode 100644 index 000000000000..e12ea738a673 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PANTHERLORD @@ -0,0 +1 @@ +CONFIG_HID_PANTHERLORD=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PENMOUNT b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PENMOUNT new file mode 100644 index 000000000000..e8ce0dab6ba9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PENMOUNT @@ -0,0 +1 @@ +CONFIG_HID_PENMOUNT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PETALYNX b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PETALYNX new file mode 100644 index 000000000000..260ea69bd684 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PETALYNX @@ -0,0 +1 @@ +CONFIG_HID_PETALYNX=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD new file mode 100644 index 000000000000..fb20d5ce3a32 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD @@ -0,0 +1 @@ +CONFIG_HID_PICOLCD=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD_BACKLIGHT b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD_BACKLIGHT new file mode 100644 index 000000000000..9accd76906a8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD_BACKLIGHT @@ -0,0 +1 @@ +CONFIG_HID_PICOLCD_BACKLIGHT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD_FB b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD_FB new file mode 100644 index 000000000000..38631826cdaf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD_FB @@ -0,0 +1 @@ +CONFIG_HID_PICOLCD_FB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD_LCD b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD_LCD new file mode 100644 index 000000000000..84d52ea84e2e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD_LCD @@ -0,0 +1 @@ +CONFIG_HID_PICOLCD_LCD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD_LEDS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD_LEDS new file mode 100644 index 000000000000..bf6539eaaba7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PICOLCD_LEDS @@ -0,0 +1 @@ +CONFIG_HID_PICOLCD_LEDS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PID b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PID new file mode 100644 index 000000000000..d4c80820d169 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PID @@ -0,0 +1 @@ +CONFIG_HID_PID=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PLANTRONICS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PLANTRONICS new file mode 100644 index 000000000000..f99242de7c46 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PLANTRONICS @@ -0,0 +1 @@ +CONFIG_HID_PLANTRONICS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PRIMAX b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PRIMAX new file mode 100644 index 000000000000..1c60e2cdc9c7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PRIMAX @@ -0,0 +1 @@ +CONFIG_HID_PRIMAX=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PXRC b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PXRC new file mode 100644 index 000000000000..c1be11caef00 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_PXRC @@ -0,0 +1 @@ +# CONFIG_HID_PXRC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_RAZER b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_RAZER new file mode 100644 index 000000000000..55369bcd061c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_RAZER @@ -0,0 +1 @@ +# CONFIG_HID_RAZER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_REDRAGON b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_REDRAGON new file mode 100644 index 000000000000..b3aa83efaa3e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_REDRAGON @@ -0,0 +1 @@ +# CONFIG_HID_REDRAGON is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_RETRODE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_RETRODE new file mode 100644 index 000000000000..18cf5fa2b069 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_RETRODE @@ -0,0 +1 @@ +# CONFIG_HID_RETRODE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_RMI b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_RMI new file mode 100644 index 000000000000..287d4bc0f354 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_RMI @@ -0,0 +1 @@ +CONFIG_HID_RMI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ROCCAT b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ROCCAT new file mode 100644 index 000000000000..9b2f14d42e92 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ROCCAT @@ -0,0 +1 @@ +CONFIG_HID_ROCCAT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SAITEK b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SAITEK new file mode 100644 index 000000000000..fecfba018cd8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SAITEK @@ -0,0 +1 @@ +CONFIG_HID_SAITEK=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SAMSUNG b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SAMSUNG new file mode 100644 index 000000000000..2540e9c9a58c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SAMSUNG @@ -0,0 +1 @@ +CONFIG_HID_SAMSUNG=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SEMITEK b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SEMITEK new file mode 100644 index 000000000000..4bc807d8b642 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SEMITEK @@ -0,0 +1 @@ +# CONFIG_HID_SEMITEK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SIGMAMICRO b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SIGMAMICRO new file mode 100644 index 000000000000..255a34689086 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SIGMAMICRO @@ -0,0 +1 @@ +# CONFIG_HID_SIGMAMICRO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SMARTJOYPLUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SMARTJOYPLUS new file mode 100644 index 000000000000..9bbdeb0e6b19 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SMARTJOYPLUS @@ -0,0 +1 @@ +CONFIG_HID_SMARTJOYPLUS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SONY b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SONY new file mode 100644 index 000000000000..6ee0f6474bd0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SONY @@ -0,0 +1 @@ +CONFIG_HID_SONY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SPEEDLINK b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SPEEDLINK new file mode 100644 index 000000000000..6019addf9283 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SPEEDLINK @@ -0,0 +1 @@ +CONFIG_HID_SPEEDLINK=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_STEAM b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_STEAM new file mode 100644 index 000000000000..dcc1ee8519e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_STEAM @@ -0,0 +1 @@ +# CONFIG_HID_STEAM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_STEELSERIES b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_STEELSERIES new file mode 100644 index 000000000000..77f65e9e779d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_STEELSERIES @@ -0,0 +1 @@ +CONFIG_HID_STEELSERIES=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SUNPLUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SUNPLUS new file mode 100644 index 000000000000..724c2c737416 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_SUNPLUS @@ -0,0 +1 @@ +CONFIG_HID_SUNPLUS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_THINGM b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_THINGM new file mode 100644 index 000000000000..a41c6e458cc3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_THINGM @@ -0,0 +1 @@ +CONFIG_HID_THINGM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_THRUSTMASTER b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_THRUSTMASTER new file mode 100644 index 000000000000..4dde89479a33 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_THRUSTMASTER @@ -0,0 +1 @@ +CONFIG_HID_THRUSTMASTER=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_TIVO b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_TIVO new file mode 100644 index 000000000000..a33d735dd3b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_TIVO @@ -0,0 +1 @@ +CONFIG_HID_TIVO=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_TOPRE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_TOPRE new file mode 100644 index 000000000000..926ac41cd2ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_TOPRE @@ -0,0 +1 @@ +# CONFIG_HID_TOPRE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_TOPSEED b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_TOPSEED new file mode 100644 index 000000000000..8e1879f7cdfd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_TOPSEED @@ -0,0 +1 @@ +CONFIG_HID_TOPSEED=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_TWINHAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_TWINHAN new file mode 100644 index 000000000000..0f930a425df0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_TWINHAN @@ -0,0 +1 @@ +CONFIG_HID_TWINHAN=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_U2FZERO b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_U2FZERO new file mode 100644 index 000000000000..2b3371700939 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_U2FZERO @@ -0,0 +1 @@ +# CONFIG_HID_U2FZERO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_UCLOGIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_UCLOGIC new file mode 100644 index 000000000000..9fa1b889bf9b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_UCLOGIC @@ -0,0 +1 @@ +CONFIG_HID_UCLOGIC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_UDRAW_PS3 b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_UDRAW_PS3 new file mode 100644 index 000000000000..ae8a9040a8f3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_UDRAW_PS3 @@ -0,0 +1 @@ +# CONFIG_HID_UDRAW_PS3 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_VIEWSONIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_VIEWSONIC new file mode 100644 index 000000000000..dd2c624e2cb9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_VIEWSONIC @@ -0,0 +1 @@ +# CONFIG_HID_VIEWSONIC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_VIVALDI b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_VIVALDI new file mode 100644 index 000000000000..289b0f0090f3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_VIVALDI @@ -0,0 +1 @@ +# CONFIG_HID_VIVALDI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_VRC2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_VRC2 new file mode 100644 index 000000000000..d76c4dc2e477 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_VRC2 @@ -0,0 +1 @@ +# CONFIG_HID_VRC2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_WACOM b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_WACOM new file mode 100644 index 000000000000..9da42d4d3ed5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_WACOM @@ -0,0 +1 @@ +CONFIG_HID_WACOM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_WALTOP b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_WALTOP new file mode 100644 index 000000000000..be9f1ac7df00 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_WALTOP @@ -0,0 +1 @@ +CONFIG_HID_WALTOP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_WIIMOTE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_WIIMOTE new file mode 100644 index 000000000000..1f69eb18fa0b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_WIIMOTE @@ -0,0 +1 @@ +CONFIG_HID_WIIMOTE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_XIAOMI b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_XIAOMI new file mode 100644 index 000000000000..31d23ec9ab64 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_XIAOMI @@ -0,0 +1 @@ +# CONFIG_HID_XIAOMI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_XINMO b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_XINMO new file mode 100644 index 000000000000..05c54c7a9cf6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_XINMO @@ -0,0 +1 @@ +CONFIG_HID_XINMO=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ZEROPLUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ZEROPLUS new file mode 100644 index 000000000000..204a0cdf5298 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ZEROPLUS @@ -0,0 +1 @@ +CONFIG_HID_ZEROPLUS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ZYDACRON b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ZYDACRON new file mode 100644 index 000000000000..c145906db5b8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_ZYDACRON @@ -0,0 +1 @@ +CONFIG_HID_ZYDACRON=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HIPPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_HIPPI new file mode 100644 index 000000000000..8f4d0983a853 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HIPPI @@ -0,0 +1 @@ +# CONFIG_HIPPI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HIST_TRIGGERS_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_HIST_TRIGGERS_DEBUG new file mode 100644 index 000000000000..a6a06ed369a0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HIST_TRIGGERS_DEBUG @@ -0,0 +1 @@ +# CONFIG_HIST_TRIGGERS_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HMC6352 b/anolis/configs/L2-OPTIONAL/default/CONFIG_HMC6352 new file mode 100644 index 000000000000..c283a00bb465 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HMC6352 @@ -0,0 +1 @@ +# CONFIG_HMC6352 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HMEM_REPORTING b/anolis/configs/L2-OPTIONAL/default/CONFIG_HMEM_REPORTING new file mode 100644 index 000000000000..e4a0b396ae6d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HMEM_REPORTING @@ -0,0 +1 @@ +CONFIG_HMEM_REPORTING=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HMM_MIRROR b/anolis/configs/L2-OPTIONAL/default/CONFIG_HMM_MIRROR new file mode 100644 index 000000000000..11dfee6c1576 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HMM_MIRROR @@ -0,0 +1 @@ +CONFIG_HMM_MIRROR=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HOLTEK_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_HOLTEK_FF new file mode 100644 index 000000000000..f19776110e25 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HOLTEK_FF @@ -0,0 +1 @@ +# CONFIG_HOLTEK_FF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HOTPLUG_CORE_SYNC b/anolis/configs/L2-OPTIONAL/default/CONFIG_HOTPLUG_CORE_SYNC new file mode 100644 index 000000000000..bfbc290642bd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HOTPLUG_CORE_SYNC @@ -0,0 +1 @@ +CONFIG_HOTPLUG_CORE_SYNC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HOTPLUG_CORE_SYNC_DEAD b/anolis/configs/L2-OPTIONAL/default/CONFIG_HOTPLUG_CORE_SYNC_DEAD new file mode 100644 index 000000000000..545a0b721c2c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HOTPLUG_CORE_SYNC_DEAD @@ -0,0 +1 @@ +CONFIG_HOTPLUG_CORE_SYNC_DEAD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HOTPLUG_PCI_ACPI_IBM b/anolis/configs/L2-OPTIONAL/default/CONFIG_HOTPLUG_PCI_ACPI_IBM new file mode 100644 index 000000000000..c2e8de4e5d26 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HOTPLUG_PCI_ACPI_IBM @@ -0,0 +1 @@ +CONFIG_HOTPLUG_PCI_ACPI_IBM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HOTPLUG_PCI_CPCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_HOTPLUG_PCI_CPCI new file mode 100644 index 000000000000..dcd07008eb2c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HOTPLUG_PCI_CPCI @@ -0,0 +1 @@ +# CONFIG_HOTPLUG_PCI_CPCI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HPFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_HPFS_FS new file mode 100644 index 000000000000..e5ad7579ffb6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HPFS_FS @@ -0,0 +1 @@ +# CONFIG_HPFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HSI b/anolis/configs/L2-OPTIONAL/default/CONFIG_HSI new file mode 100644 index 000000000000..1581f53f92e2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HSI @@ -0,0 +1 @@ +# CONFIG_HSI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HSR b/anolis/configs/L2-OPTIONAL/default/CONFIG_HSR new file mode 100644 index 000000000000..9a440220af28 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HSR @@ -0,0 +1 @@ +# CONFIG_HSR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HTE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HTE new file mode 100644 index 000000000000..9603fbdc3b29 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HTE @@ -0,0 +1 @@ +# CONFIG_HTE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HVC_DRIVER b/anolis/configs/L2-OPTIONAL/default/CONFIG_HVC_DRIVER new file mode 100644 index 000000000000..2b13a0cc9b59 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HVC_DRIVER @@ -0,0 +1 @@ +CONFIG_HVC_DRIVER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HWMON_DEBUG_CHIP b/anolis/configs/L2-OPTIONAL/default/CONFIG_HWMON_DEBUG_CHIP new file mode 100644 index 000000000000..700df44029ea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HWMON_DEBUG_CHIP @@ -0,0 +1 @@ +# CONFIG_HWMON_DEBUG_CHIP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HWSPINLOCK b/anolis/configs/L2-OPTIONAL/default/CONFIG_HWSPINLOCK new file mode 100644 index 000000000000..6b531e93f5be --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HWSPINLOCK @@ -0,0 +1 @@ +CONFIG_HWSPINLOCK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HW_CONSOLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_HW_CONSOLE new file mode 100644 index 000000000000..1a03cd404e58 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HW_CONSOLE @@ -0,0 +1 @@ +CONFIG_HW_CONSOLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HW_RANDOM_BA431 b/anolis/configs/L2-OPTIONAL/default/CONFIG_HW_RANDOM_BA431 new file mode 100644 index 000000000000..ccafb50a2a93 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HW_RANDOM_BA431 @@ -0,0 +1 @@ +# CONFIG_HW_RANDOM_BA431 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HW_RANDOM_TIMERIOMEM b/anolis/configs/L2-OPTIONAL/default/CONFIG_HW_RANDOM_TIMERIOMEM new file mode 100644 index 000000000000..b1963489bf7f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HW_RANDOM_TIMERIOMEM @@ -0,0 +1 @@ +CONFIG_HW_RANDOM_TIMERIOMEM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HW_RANDOM_XIPHERA b/anolis/configs/L2-OPTIONAL/default/CONFIG_HW_RANDOM_XIPHERA new file mode 100644 index 000000000000..779befaec438 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HW_RANDOM_XIPHERA @@ -0,0 +1 @@ +# CONFIG_HW_RANDOM_XIPHERA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALGOBIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALGOBIT new file mode 100644 index 000000000000..75e39f8b6df1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALGOBIT @@ -0,0 +1 @@ +CONFIG_I2C_ALGOBIT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALGOPCA b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALGOPCA new file mode 100644 index 000000000000..2c5a407690f2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALGOPCA @@ -0,0 +1 @@ +CONFIG_I2C_ALGOPCA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALI1535 b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALI1535 new file mode 100644 index 000000000000..3f9e2fef11fb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALI1535 @@ -0,0 +1 @@ +# CONFIG_I2C_ALI1535 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALI1563 b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALI1563 new file mode 100644 index 000000000000..035045f8b56d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALI1563 @@ -0,0 +1 @@ +# CONFIG_I2C_ALI1563 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALI15X3 b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALI15X3 new file mode 100644 index 000000000000..1621ea3814e0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ALI15X3 @@ -0,0 +1 @@ +# CONFIG_I2C_ALI15X3 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_AMD_MP2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_AMD_MP2 new file mode 100644 index 000000000000..e128a6c99bdb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_AMD_MP2 @@ -0,0 +1 @@ +# CONFIG_I2C_AMD_MP2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_BOARDINFO b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_BOARDINFO new file mode 100644 index 000000000000..8bb991a444da --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_BOARDINFO @@ -0,0 +1 @@ +CONFIG_I2C_BOARDINFO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_CBUS_GPIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_CBUS_GPIO new file mode 100644 index 000000000000..d626de063b6b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_CBUS_GPIO @@ -0,0 +1 @@ +# CONFIG_I2C_CBUS_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_COMPAT b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_COMPAT new file mode 100644 index 000000000000..e24cc28d31ee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_COMPAT @@ -0,0 +1 @@ +CONFIG_I2C_COMPAT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_CP2615 b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_CP2615 new file mode 100644 index 000000000000..099aa14b27a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_CP2615 @@ -0,0 +1 @@ +# CONFIG_I2C_CP2615 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DEBUG_ALGO b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DEBUG_ALGO new file mode 100644 index 000000000000..6c95613697a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DEBUG_ALGO @@ -0,0 +1 @@ +# CONFIG_I2C_DEBUG_ALGO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DEBUG_BUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DEBUG_BUS new file mode 100644 index 000000000000..944e060939ec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DEBUG_BUS @@ -0,0 +1 @@ +# CONFIG_I2C_DEBUG_BUS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DEBUG_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DEBUG_CORE new file mode 100644 index 000000000000..6b0f751b1caf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DEBUG_CORE @@ -0,0 +1 @@ +# CONFIG_I2C_DEBUG_CORE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DESIGNWARE_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DESIGNWARE_CORE new file mode 100644 index 000000000000..661ffb01a393 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DESIGNWARE_CORE @@ -0,0 +1 @@ +CONFIG_I2C_DESIGNWARE_CORE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DESIGNWARE_PCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DESIGNWARE_PCI new file mode 100644 index 000000000000..7f371b4dc4dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DESIGNWARE_PCI @@ -0,0 +1 @@ +# CONFIG_I2C_DESIGNWARE_PCI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DESIGNWARE_PLATFORM b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DESIGNWARE_PLATFORM new file mode 100644 index 000000000000..cec2f8633590 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DESIGNWARE_PLATFORM @@ -0,0 +1 @@ +CONFIG_I2C_DESIGNWARE_PLATFORM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DESIGNWARE_SLAVE b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DESIGNWARE_SLAVE new file mode 100644 index 000000000000..b4ae47a7eaec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DESIGNWARE_SLAVE @@ -0,0 +1 @@ +# CONFIG_I2C_DESIGNWARE_SLAVE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DIOLAN_U2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DIOLAN_U2C new file mode 100644 index 000000000000..7cd85b9d7ced --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_DIOLAN_U2C @@ -0,0 +1 @@ +CONFIG_I2C_DIOLAN_U2C=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_EMEV2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_EMEV2 new file mode 100644 index 000000000000..9121ff3a70b0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_EMEV2 @@ -0,0 +1 @@ +# CONFIG_I2C_EMEV2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_HID b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_HID new file mode 100644 index 000000000000..d4b9febdd17d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_HID @@ -0,0 +1 @@ +CONFIG_I2C_HID=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_HID_ACPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_HID_ACPI new file mode 100644 index 000000000000..540ca3a21518 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_HID_ACPI @@ -0,0 +1 @@ +# CONFIG_I2C_HID_ACPI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_HID_OF b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_HID_OF new file mode 100644 index 000000000000..847537d7b616 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_HID_OF @@ -0,0 +1 @@ +# CONFIG_I2C_HID_OF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_MUX_LTC4306 b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_MUX_LTC4306 new file mode 100644 index 000000000000..84a6a3221c56 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_MUX_LTC4306 @@ -0,0 +1 @@ +# CONFIG_I2C_MUX_LTC4306 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_MUX_MLXCPLD b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_MUX_MLXCPLD new file mode 100644 index 000000000000..d4e6b7b4ff8e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_MUX_MLXCPLD @@ -0,0 +1 @@ +CONFIG_I2C_MUX_MLXCPLD=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_MUX_REG b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_MUX_REG new file mode 100644 index 000000000000..f16a809e1281 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_MUX_REG @@ -0,0 +1 @@ +# CONFIG_I2C_MUX_REG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_NFORCE2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_NFORCE2 new file mode 100644 index 000000000000..50bbe34f292c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_NFORCE2 @@ -0,0 +1 @@ +CONFIG_I2C_NFORCE2=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_NVIDIA_GPU b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_NVIDIA_GPU new file mode 100644 index 000000000000..fde08e31378b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_NVIDIA_GPU @@ -0,0 +1 @@ +# CONFIG_I2C_NVIDIA_GPU is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_OCORES b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_OCORES new file mode 100644 index 000000000000..302dfede4bee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_OCORES @@ -0,0 +1 @@ +# CONFIG_I2C_OCORES is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_PCA_PLATFORM b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_PCA_PLATFORM new file mode 100644 index 000000000000..f01485df4591 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_PCA_PLATFORM @@ -0,0 +1 @@ +CONFIG_I2C_PCA_PLATFORM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_PCI1XXXX b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_PCI1XXXX new file mode 100644 index 000000000000..7a3160209795 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_PCI1XXXX @@ -0,0 +1 @@ +# CONFIG_I2C_PCI1XXXX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ROBOTFUZZ_OSIF b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ROBOTFUZZ_OSIF new file mode 100644 index 000000000000..f9b66870a9a8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_ROBOTFUZZ_OSIF @@ -0,0 +1 @@ +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_SIMTEC b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_SIMTEC new file mode 100644 index 000000000000..9cfdf4eec6aa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_SIMTEC @@ -0,0 +1 @@ +CONFIG_I2C_SIMTEC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_SIS5595 b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_SIS5595 new file mode 100644 index 000000000000..881262cd6223 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_SIS5595 @@ -0,0 +1 @@ +# CONFIG_I2C_SIS5595 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_SIS630 b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_SIS630 new file mode 100644 index 000000000000..daa589151410 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_SIS630 @@ -0,0 +1 @@ +# CONFIG_I2C_SIS630 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_STUB b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_STUB new file mode 100644 index 000000000000..4966820791d6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_STUB @@ -0,0 +1 @@ +CONFIG_I2C_STUB=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_TAOS_EVM b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_TAOS_EVM new file mode 100644 index 000000000000..3db7fabfb248 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_TAOS_EVM @@ -0,0 +1 @@ +# CONFIG_I2C_TAOS_EVM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_TINY_USB b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_TINY_USB new file mode 100644 index 000000000000..ae818d4e9b08 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_TINY_USB @@ -0,0 +1 @@ +CONFIG_I2C_TINY_USB=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_VIRTIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_VIRTIO new file mode 100644 index 000000000000..e6a1fcb2844d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_VIRTIO @@ -0,0 +1 @@ +# CONFIG_I2C_VIRTIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_XILINX b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_XILINX new file mode 100644 index 000000000000..9526c6adf237 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I2C_XILINX @@ -0,0 +1 @@ +# CONFIG_I2C_XILINX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_I3C b/anolis/configs/L2-OPTIONAL/default/CONFIG_I3C new file mode 100644 index 000000000000..387a138c15d8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_I3C @@ -0,0 +1 @@ +# CONFIG_I3C is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IAVF b/anolis/configs/L2-OPTIONAL/default/CONFIG_IAVF new file mode 100644 index 000000000000..8c6e509e9e43 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IAVF @@ -0,0 +1 @@ +CONFIG_IAVF=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ICE_SWITCHDEV b/anolis/configs/L2-OPTIONAL/default/CONFIG_ICE_SWITCHDEV new file mode 100644 index 000000000000..d0e4f7610f0c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ICE_SWITCHDEV @@ -0,0 +1 @@ +CONFIG_ICE_SWITCHDEV=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ICPLUS_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_ICPLUS_PHY new file mode 100644 index 000000000000..f58838d2fb90 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ICPLUS_PHY @@ -0,0 +1 @@ +CONFIG_ICPLUS_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ICS932S401 b/anolis/configs/L2-OPTIONAL/default/CONFIG_ICS932S401 new file mode 100644 index 000000000000..e8020befdd28 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ICS932S401 @@ -0,0 +1 @@ +# CONFIG_ICS932S401 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154 b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154 new file mode 100644 index 000000000000..bff5041fdd69 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154 @@ -0,0 +1 @@ +CONFIG_IEEE802154=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_6LOWPAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_6LOWPAN new file mode 100644 index 000000000000..4c23edbf740b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_6LOWPAN @@ -0,0 +1 @@ +CONFIG_IEEE802154_6LOWPAN=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_ADF7242 b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_ADF7242 new file mode 100644 index 000000000000..57df1ab7f9c2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_ADF7242 @@ -0,0 +1 @@ +# CONFIG_IEEE802154_ADF7242 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_AT86RF230 b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_AT86RF230 new file mode 100644 index 000000000000..d46e81158508 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_AT86RF230 @@ -0,0 +1 @@ +# CONFIG_IEEE802154_AT86RF230 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_ATUSB b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_ATUSB new file mode 100644 index 000000000000..ab561ab9e264 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_ATUSB @@ -0,0 +1 @@ +# CONFIG_IEEE802154_ATUSB is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_CA8210 b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_CA8210 new file mode 100644 index 000000000000..462211e30813 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_CA8210 @@ -0,0 +1 @@ +# CONFIG_IEEE802154_CA8210 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_CC2520 b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_CC2520 new file mode 100644 index 000000000000..037cabb8459a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_CC2520 @@ -0,0 +1 @@ +# CONFIG_IEEE802154_CC2520 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_DRIVERS b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_DRIVERS new file mode 100644 index 000000000000..caa6c2f8765f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_DRIVERS @@ -0,0 +1 @@ +CONFIG_IEEE802154_DRIVERS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_HWSIM b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_HWSIM new file mode 100644 index 000000000000..02d982747504 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_HWSIM @@ -0,0 +1 @@ +# CONFIG_IEEE802154_HWSIM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_MCR20A b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_MCR20A new file mode 100644 index 000000000000..a574a9660520 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_MCR20A @@ -0,0 +1 @@ +# CONFIG_IEEE802154_MCR20A is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_MRF24J40 b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_MRF24J40 new file mode 100644 index 000000000000..48d1236eb326 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_MRF24J40 @@ -0,0 +1 @@ +# CONFIG_IEEE802154_MRF24J40 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_NL802154_EXPERIMENTAL b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_NL802154_EXPERIMENTAL new file mode 100644 index 000000000000..9a637d779be0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_NL802154_EXPERIMENTAL @@ -0,0 +1 @@ +# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_SOCKET b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_SOCKET new file mode 100644 index 000000000000..e7bfdca2d383 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IEEE802154_SOCKET @@ -0,0 +1 @@ +CONFIG_IEEE802154_SOCKET=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IMA_DISABLE_HTABLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_IMA_DISABLE_HTABLE new file mode 100644 index 000000000000..c749fd4869ad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IMA_DISABLE_HTABLE @@ -0,0 +1 @@ +# CONFIG_IMA_DISABLE_HTABLE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IMA_KEXEC b/anolis/configs/L2-OPTIONAL/default/CONFIG_IMA_KEXEC new file mode 100644 index 000000000000..20a119416adb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IMA_KEXEC @@ -0,0 +1 @@ +# CONFIG_IMA_KEXEC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INET_SCTP_DIAG b/anolis/configs/L2-OPTIONAL/default/CONFIG_INET_SCTP_DIAG new file mode 100644 index 000000000000..7ab0f4e2b8fe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INET_SCTP_DIAG @@ -0,0 +1 @@ +CONFIG_INET_SCTP_DIAG=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INET_TABLE_PERTURB_ORDER b/anolis/configs/L2-OPTIONAL/default/CONFIG_INET_TABLE_PERTURB_ORDER new file mode 100644 index 000000000000..61b701a25a3f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INET_TABLE_PERTURB_ORDER @@ -0,0 +1 @@ +CONFIG_INET_TABLE_PERTURB_ORDER=16 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INET_TUNNEL b/anolis/configs/L2-OPTIONAL/default/CONFIG_INET_TUNNEL new file mode 100644 index 000000000000..ab07f30096df --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INET_TUNNEL @@ -0,0 +1 @@ +CONFIG_INET_TUNNEL=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INET_XFRM_TUNNEL b/anolis/configs/L2-OPTIONAL/default/CONFIG_INET_XFRM_TUNNEL new file mode 100644 index 000000000000..795cbac7a8d8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INET_XFRM_TUNNEL @@ -0,0 +1 @@ +CONFIG_INET_XFRM_TUNNEL=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS new file mode 100644 index 000000000000..cc04097ebc12 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS @@ -0,0 +1 @@ +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_BNXT_RE b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_BNXT_RE new file mode 100644 index 000000000000..23a29a3f1ff3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_BNXT_RE @@ -0,0 +1 @@ +CONFIG_INFINIBAND_BNXT_RE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_CXGB4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_CXGB4 new file mode 100644 index 000000000000..5968a3ee27d6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_CXGB4 @@ -0,0 +1 @@ +CONFIG_INFINIBAND_CXGB4=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_EFA b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_EFA new file mode 100644 index 000000000000..b6635f12bc38 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_EFA @@ -0,0 +1 @@ +# CONFIG_INFINIBAND_EFA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_IPOIB_DEBUG_DATA b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_IPOIB_DEBUG_DATA new file mode 100644 index 000000000000..00e419c17777 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_IPOIB_DEBUG_DATA @@ -0,0 +1 @@ +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_IRDMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_IRDMA new file mode 100644 index 000000000000..fbb0af9a95b2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_IRDMA @@ -0,0 +1 @@ +# CONFIG_INFINIBAND_IRDMA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_OCRDMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_OCRDMA new file mode 100644 index 000000000000..12ff35161153 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_OCRDMA @@ -0,0 +1 @@ +# CONFIG_INFINIBAND_OCRDMA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_QEDR b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_QEDR new file mode 100644 index 000000000000..657c87e5c03f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_QEDR @@ -0,0 +1 @@ +CONFIG_INFINIBAND_QEDR=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_USER_MEM b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_USER_MEM new file mode 100644 index 000000000000..f26ba36a5f4e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_USER_MEM @@ -0,0 +1 @@ +CONFIG_INFINIBAND_USER_MEM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_VIRT_DMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_VIRT_DMA new file mode 100644 index 000000000000..139371233296 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_VIRT_DMA @@ -0,0 +1 @@ +CONFIG_INFINIBAND_VIRT_DMA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INFTL b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFTL new file mode 100644 index 000000000000..bb3c8b3c5a9d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFTL @@ -0,0 +1 @@ +# CONFIG_INFTL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INITRAMFS_PRESERVE_MTIME b/anolis/configs/L2-OPTIONAL/default/CONFIG_INITRAMFS_PRESERVE_MTIME new file mode 100644 index 000000000000..3095ee146ae8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INITRAMFS_PRESERVE_MTIME @@ -0,0 +1 @@ +CONFIG_INITRAMFS_PRESERVE_MTIME=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INIT_ENV_ARG_LIMIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_INIT_ENV_ARG_LIMIT new file mode 100644 index 000000000000..11eb3976e766 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INIT_ENV_ARG_LIMIT @@ -0,0 +1 @@ +CONFIG_INIT_ENV_ARG_LIMIT=32 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INIT_STACK_ALL_PATTERN b/anolis/configs/L2-OPTIONAL/default/CONFIG_INIT_STACK_ALL_PATTERN new file mode 100644 index 000000000000..5a246dc0fc42 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INIT_STACK_ALL_PATTERN @@ -0,0 +1 @@ +# CONFIG_INIT_STACK_ALL_PATTERN is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INIT_STACK_ALL_ZERO b/anolis/configs/L2-OPTIONAL/default/CONFIG_INIT_STACK_ALL_ZERO new file mode 100644 index 000000000000..06b00f4c7b1d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INIT_STACK_ALL_ZERO @@ -0,0 +1 @@ +# CONFIG_INIT_STACK_ALL_ZERO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INIT_STACK_NONE b/anolis/configs/L2-OPTIONAL/default/CONFIG_INIT_STACK_NONE new file mode 100644 index 000000000000..16e74023a918 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INIT_STACK_NONE @@ -0,0 +1 @@ +CONFIG_INIT_STACK_NONE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_EVBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_EVBUG new file mode 100644 index 000000000000..93144f90acec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_EVBUG @@ -0,0 +1 @@ +# CONFIG_INPUT_EVBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_FF_MEMLESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_FF_MEMLESS new file mode 100644 index 000000000000..817e3e1ed346 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_FF_MEMLESS @@ -0,0 +1 @@ +CONFIG_INPUT_FF_MEMLESS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_JOYSTICK b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_JOYSTICK new file mode 100644 index 000000000000..6f75cf8151ef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_JOYSTICK @@ -0,0 +1 @@ +# CONFIG_INPUT_JOYSTICK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_LEDS b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_LEDS new file mode 100644 index 000000000000..4fd79ff1b455 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_LEDS @@ -0,0 +1 @@ +CONFIG_INPUT_LEDS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_MATRIXKMAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_MATRIXKMAP new file mode 100644 index 000000000000..2662c28f0b4a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_MATRIXKMAP @@ -0,0 +1 @@ +# CONFIG_INPUT_MATRIXKMAP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_MOUSEDEV_PSAUX b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_MOUSEDEV_PSAUX new file mode 100644 index 000000000000..9f3efab31946 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_MOUSEDEV_PSAUX @@ -0,0 +1 @@ +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_MOUSEDEV_SCREEN_X b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_MOUSEDEV_SCREEN_X new file mode 100644 index 000000000000..2d2125c5e2d6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_MOUSEDEV_SCREEN_X @@ -0,0 +1 @@ +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_MOUSEDEV_SCREEN_Y b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_MOUSEDEV_SCREEN_Y new file mode 100644 index 000000000000..661c803b9298 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_MOUSEDEV_SCREEN_Y @@ -0,0 +1 @@ +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_SPARSEKMAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_SPARSEKMAP new file mode 100644 index 000000000000..5b212431a67f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_SPARSEKMAP @@ -0,0 +1 @@ +CONFIG_INPUT_SPARSEKMAP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INTEGRITY_MACHINE_KEYRING b/anolis/configs/L2-OPTIONAL/default/CONFIG_INTEGRITY_MACHINE_KEYRING new file mode 100644 index 000000000000..513e1f806df5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INTEGRITY_MACHINE_KEYRING @@ -0,0 +1 @@ +# CONFIG_INTEGRITY_MACHINE_KEYRING is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INTEL_XWAY_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_INTEL_XWAY_PHY new file mode 100644 index 000000000000..a21e3fff7af3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INTEL_XWAY_PHY @@ -0,0 +1 @@ +CONFIG_INTEL_XWAY_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INTERCONNECT b/anolis/configs/L2-OPTIONAL/default/CONFIG_INTERCONNECT new file mode 100644 index 000000000000..44680093ffc2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INTERCONNECT @@ -0,0 +1 @@ +# CONFIG_INTERCONNECT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INTERVAL_TREE b/anolis/configs/L2-OPTIONAL/default/CONFIG_INTERVAL_TREE new file mode 100644 index 000000000000..d41cedd8495f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INTERVAL_TREE @@ -0,0 +1 @@ +CONFIG_INTERVAL_TREE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INTERVAL_TREE_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_INTERVAL_TREE_TEST new file mode 100644 index 000000000000..16bcd6d9dd4c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INTERVAL_TREE_TEST @@ -0,0 +1 @@ +# CONFIG_INTERVAL_TREE_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IOMMU_SVA b/anolis/configs/L2-OPTIONAL/default/CONFIG_IOMMU_SVA new file mode 100644 index 000000000000..406b9486746d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IOMMU_SVA @@ -0,0 +1 @@ +CONFIG_IOMMU_SVA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IONIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_IONIC new file mode 100644 index 000000000000..d363a092732f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IONIC @@ -0,0 +1 @@ +# CONFIG_IONIC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IO_WQ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IO_WQ new file mode 100644 index 000000000000..b08ae18ca6c3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IO_WQ @@ -0,0 +1 @@ +CONFIG_IO_WQ=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IP5XXX_POWER b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP5XXX_POWER new file mode 100644 index 000000000000..6198a67bfbb2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP5XXX_POWER @@ -0,0 +1 @@ +# CONFIG_IP5XXX_POWER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IPACK_BUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_IPACK_BUS new file mode 100644 index 000000000000..6c582c83a872 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IPACK_BUS @@ -0,0 +1 @@ +# CONFIG_IPACK_BUS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IPV6_IOAM6_LWTUNNEL b/anolis/configs/L2-OPTIONAL/default/CONFIG_IPV6_IOAM6_LWTUNNEL new file mode 100644 index 000000000000..c1839dbecffd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IPV6_IOAM6_LWTUNNEL @@ -0,0 +1 @@ +# CONFIG_IPV6_IOAM6_LWTUNNEL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IPVLAN_L3S b/anolis/configs/L2-OPTIONAL/default/CONFIG_IPVLAN_L3S new file mode 100644 index 000000000000..1beb2a49fd64 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IPVLAN_L3S @@ -0,0 +1 @@ +CONFIG_IPVLAN_L3S=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_MROUTE_COMMON b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_MROUTE_COMMON new file mode 100644 index 000000000000..48d772ed5a33 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_MROUTE_COMMON @@ -0,0 +1 @@ +CONFIG_IP_MROUTE_COMMON=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_PNP b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_PNP new file mode 100644 index 000000000000..43f68080b72f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_PNP @@ -0,0 +1 @@ +# CONFIG_IP_PNP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_ROUTE_CLASSID b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_ROUTE_CLASSID new file mode 100644 index 000000000000..5e45007dacf5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_ROUTE_CLASSID @@ -0,0 +1 @@ +CONFIG_IP_ROUTE_CLASSID=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_SCTP b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_SCTP new file mode 100644 index 000000000000..cf97ee4f7028 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_SCTP @@ -0,0 +1 @@ +CONFIG_IP_SCTP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_AH b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_AH new file mode 100644 index 000000000000..7fd4c93204af --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_AH @@ -0,0 +1 @@ +CONFIG_IP_VS_PROTO_AH=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_AH_ESP b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_AH_ESP new file mode 100644 index 000000000000..1264051d41c9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_AH_ESP @@ -0,0 +1 @@ +CONFIG_IP_VS_PROTO_AH_ESP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_ESP b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_ESP new file mode 100644 index 000000000000..7f109769c0bf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_ESP @@ -0,0 +1 @@ +CONFIG_IP_VS_PROTO_ESP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_SCTP b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_SCTP new file mode 100644 index 000000000000..2fae9319d6a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_SCTP @@ -0,0 +1 @@ +CONFIG_IP_VS_PROTO_SCTP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_TCP b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_TCP new file mode 100644 index 000000000000..cd43245e2343 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_TCP @@ -0,0 +1 @@ +CONFIG_IP_VS_PROTO_TCP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_UDP b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_UDP new file mode 100644 index 000000000000..bf6d4c1b1d7d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_PROTO_UDP @@ -0,0 +1 @@ +CONFIG_IP_VS_PROTO_UDP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_TWOS b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_TWOS new file mode 100644 index 000000000000..fa92cefd835e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IP_VS_TWOS @@ -0,0 +1 @@ +# CONFIG_IP_VS_TWOS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQSOFF_TRACER b/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQSOFF_TRACER new file mode 100644 index 000000000000..e04443c539ef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQSOFF_TRACER @@ -0,0 +1 @@ +# CONFIG_IRQSOFF_TRACER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_DOMAIN b/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_DOMAIN new file mode 100644 index 000000000000..88e18f18c92c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_DOMAIN @@ -0,0 +1 @@ +CONFIG_IRQ_DOMAIN=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_DOMAIN_HIERARCHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_DOMAIN_HIERARCHY new file mode 100644 index 000000000000..a7c5b6f50438 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_DOMAIN_HIERARCHY @@ -0,0 +1 @@ +CONFIG_IRQ_DOMAIN_HIERARCHY=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_FORCED_THREADING b/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_FORCED_THREADING new file mode 100644 index 000000000000..c69c8b694f3f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_FORCED_THREADING @@ -0,0 +1 @@ +CONFIG_IRQ_FORCED_THREADING=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_MSI_IOMMU b/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_MSI_IOMMU new file mode 100644 index 000000000000..aafc7eeb7bd6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_MSI_IOMMU @@ -0,0 +1 @@ +CONFIG_IRQ_MSI_IOMMU=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_POLL b/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_POLL new file mode 100644 index 000000000000..57deb13287cd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_POLL @@ -0,0 +1 @@ +CONFIG_IRQ_POLL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_WORK b/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_WORK new file mode 100644 index 000000000000..375211307ad2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_IRQ_WORK @@ -0,0 +1 @@ +CONFIG_IRQ_WORK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ISCSI_TARGET_CXGB4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_ISCSI_TARGET_CXGB4 new file mode 100644 index 000000000000..6b68bab41483 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ISCSI_TARGET_CXGB4 @@ -0,0 +1 @@ +CONFIG_ISCSI_TARGET_CXGB4=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_JFFS2_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_JFFS2_FS new file mode 100644 index 000000000000..1093b2329444 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_JFFS2_FS @@ -0,0 +1 @@ +# CONFIG_JFFS2_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_JFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_JFS_FS new file mode 100644 index 000000000000..b92ea893cb8e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_JFS_FS @@ -0,0 +1 @@ +# CONFIG_JFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_JME b/anolis/configs/L2-OPTIONAL/default/CONFIG_JME new file mode 100644 index 000000000000..63c5aeb653fc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_JME @@ -0,0 +1 @@ +# CONFIG_JME is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KALLSYMS_BASE_RELATIVE b/anolis/configs/L2-OPTIONAL/default/CONFIG_KALLSYMS_BASE_RELATIVE new file mode 100644 index 000000000000..a90301e39dc2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KALLSYMS_BASE_RELATIVE @@ -0,0 +1 @@ +CONFIG_KALLSYMS_BASE_RELATIVE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KALLSYMS_SELFTEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_KALLSYMS_SELFTEST new file mode 100644 index 000000000000..6782a881cb6d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KALLSYMS_SELFTEST @@ -0,0 +1 @@ +# CONFIG_KALLSYMS_SELFTEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_ADP5588 b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_ADP5588 new file mode 100644 index 000000000000..8249429fa112 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_ADP5588 @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_ADP5588 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_ADP5589 b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_ADP5589 new file mode 100644 index 000000000000..d4c770f47d20 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_ADP5589 @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_ADP5589 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_CYPRESS_SF b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_CYPRESS_SF new file mode 100644 index 000000000000..368ac620faaf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_CYPRESS_SF @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_CYPRESS_SF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_DLINK_DIR685 b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_DLINK_DIR685 new file mode 100644 index 000000000000..9f273ca53db4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_DLINK_DIR685 @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_DLINK_DIR685 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_GPIO_POLLED b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_GPIO_POLLED new file mode 100644 index 000000000000..33c4b140dfe2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_GPIO_POLLED @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_GPIO_POLLED is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_LKKBD b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_LKKBD new file mode 100644 index 000000000000..2f71c1a03f91 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_LKKBD @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_LKKBD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_LM8323 b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_LM8323 new file mode 100644 index 000000000000..be40c9e2c957 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_LM8323 @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_LM8323 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_LM8333 b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_LM8333 new file mode 100644 index 000000000000..0c8d88d48bb6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_LM8333 @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_LM8333 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_MATRIX b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_MATRIX new file mode 100644 index 000000000000..47a15a463863 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_MATRIX @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_MATRIX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_MAX7359 b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_MAX7359 new file mode 100644 index 000000000000..c2ac1e60a405 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_MAX7359 @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_MAX7359 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_MCS b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_MCS new file mode 100644 index 000000000000..ea7fe67f8715 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_MCS @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_MCS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_MPR121 b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_MPR121 new file mode 100644 index 000000000000..7f335d05fd30 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_MPR121 @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_MPR121 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_NEWTON b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_NEWTON new file mode 100644 index 000000000000..3f4e6520893c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_NEWTON @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_NEWTON is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_OPENCORES b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_OPENCORES new file mode 100644 index 000000000000..36e0f18f2407 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_OPENCORES @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_OPENCORES is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_QT1050 b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_QT1050 new file mode 100644 index 000000000000..517dc46ebd7c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_QT1050 @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_QT1050 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_QT1070 b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_QT1070 new file mode 100644 index 000000000000..7deb75f9af86 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_QT1070 @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_QT1070 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_QT2160 b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_QT2160 new file mode 100644 index 000000000000..4a7aaa4934c6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_QT2160 @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_QT2160 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_SAMSUNG b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_SAMSUNG new file mode 100644 index 000000000000..591faaee0821 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_SAMSUNG @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_SAMSUNG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_STOWAWAY b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_STOWAWAY new file mode 100644 index 000000000000..1b8d883b9d2a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_STOWAWAY @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_STOWAWAY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_SUNKBD b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_SUNKBD new file mode 100644 index 000000000000..7ace2dc5334b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_SUNKBD @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_SUNKBD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_TCA6416 b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_TCA6416 new file mode 100644 index 000000000000..f2db3acca158 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_TCA6416 @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_TCA6416 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_TCA8418 b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_TCA8418 new file mode 100644 index 000000000000..b853fc9da25c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_TCA8418 @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_TCA8418 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_TM2_TOUCHKEY b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_TM2_TOUCHKEY new file mode 100644 index 000000000000..92da2d284893 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_TM2_TOUCHKEY @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_XTKBD b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_XTKBD new file mode 100644 index 000000000000..5e70361bcbfe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KEYBOARD_XTKBD @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_XTKBD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KPROBE_EVENT_GEN_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_KPROBE_EVENT_GEN_TEST new file mode 100644 index 000000000000..01e6cc50de61 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KPROBE_EVENT_GEN_TEST @@ -0,0 +1 @@ +# CONFIG_KPROBE_EVENT_GEN_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KUNIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_KUNIT new file mode 100644 index 000000000000..ce06a7ad37a7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KUNIT @@ -0,0 +1 @@ +# CONFIG_KUNIT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT b/anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT new file mode 100644 index 000000000000..61cfae823b72 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT @@ -0,0 +1 @@ +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_GENERIC_HARDWARE_ENABLING b/anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_GENERIC_HARDWARE_ENABLING new file mode 100644 index 000000000000..43e7a9aaad72 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_GENERIC_HARDWARE_ENABLING @@ -0,0 +1 @@ +CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_MMIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_MMIO new file mode 100644 index 000000000000..930fda45aa75 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_MMIO @@ -0,0 +1 @@ +CONFIG_KVM_MMIO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_VFIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_VFIO new file mode 100644 index 000000000000..80cc4b9d23d2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_VFIO @@ -0,0 +1 @@ +CONFIG_KVM_VFIO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_XFER_TO_GUEST_WORK b/anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_XFER_TO_GUEST_WORK new file mode 100644 index 000000000000..f17cdfae9935 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_KVM_XFER_TO_GUEST_WORK @@ -0,0 +1 @@ +CONFIG_KVM_XFER_TO_GUEST_WORK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP b/anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP new file mode 100644 index 000000000000..04ed675319d6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP @@ -0,0 +1 @@ +CONFIG_L2TP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP_DEBUGFS b/anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP_DEBUGFS new file mode 100644 index 000000000000..776213a3b5fc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP_DEBUGFS @@ -0,0 +1 @@ +CONFIG_L2TP_DEBUGFS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP_ETH b/anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP_ETH new file mode 100644 index 000000000000..ecca78c0d1d9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP_ETH @@ -0,0 +1 @@ +CONFIG_L2TP_ETH=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP_IP b/anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP_IP new file mode 100644 index 000000000000..00bcedf83cdf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP_IP @@ -0,0 +1 @@ +CONFIG_L2TP_IP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP_V3 b/anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP_V3 new file mode 100644 index 000000000000..aaa7e44e9c1c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_L2TP_V3 @@ -0,0 +1 @@ +CONFIG_L2TP_V3=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LAPB b/anolis/configs/L2-OPTIONAL/default/CONFIG_LAPB new file mode 100644 index 000000000000..4279431a3aaf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LAPB @@ -0,0 +1 @@ +# CONFIG_LAPB is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LATENCYTOP b/anolis/configs/L2-OPTIONAL/default/CONFIG_LATENCYTOP new file mode 100644 index 000000000000..9d1f9b49d3a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LATENCYTOP @@ -0,0 +1 @@ +# CONFIG_LATENCYTOP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LATTICE_ECP3_CONFIG b/anolis/configs/L2-OPTIONAL/default/CONFIG_LATTICE_ECP3_CONFIG new file mode 100644 index 000000000000..7212e8899b78 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LATTICE_ECP3_CONFIG @@ -0,0 +1 @@ +# CONFIG_LATTICE_ECP3_CONFIG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_AMS369FG06 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_AMS369FG06 new file mode 100644 index 000000000000..9fdcb8e361d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_AMS369FG06 @@ -0,0 +1 @@ +# CONFIG_LCD_AMS369FG06 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_CLASS_DEVICE b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_CLASS_DEVICE new file mode 100644 index 000000000000..b558d099950f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_CLASS_DEVICE @@ -0,0 +1 @@ +CONFIG_LCD_CLASS_DEVICE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_HX8357 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_HX8357 new file mode 100644 index 000000000000..26e06206449e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_HX8357 @@ -0,0 +1 @@ +# CONFIG_LCD_HX8357 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_ILI922X b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_ILI922X new file mode 100644 index 000000000000..540e27d99c4d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_ILI922X @@ -0,0 +1 @@ +# CONFIG_LCD_ILI922X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_ILI9320 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_ILI9320 new file mode 100644 index 000000000000..a3beda73c5be --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_ILI9320 @@ -0,0 +1 @@ +# CONFIG_LCD_ILI9320 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_L4F00242T03 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_L4F00242T03 new file mode 100644 index 000000000000..393941bfaf10 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_L4F00242T03 @@ -0,0 +1 @@ +# CONFIG_LCD_L4F00242T03 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_LMS283GF05 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_LMS283GF05 new file mode 100644 index 000000000000..dd444aac3305 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_LMS283GF05 @@ -0,0 +1 @@ +# CONFIG_LCD_LMS283GF05 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_LMS501KF03 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_LMS501KF03 new file mode 100644 index 000000000000..1daabfaefba6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_LMS501KF03 @@ -0,0 +1 @@ +# CONFIG_LCD_LMS501KF03 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_LTV350QV b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_LTV350QV new file mode 100644 index 000000000000..d6b328870ad1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_LTV350QV @@ -0,0 +1 @@ +# CONFIG_LCD_LTV350QV is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_OTM3225A b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_OTM3225A new file mode 100644 index 000000000000..8ace97ec132d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_OTM3225A @@ -0,0 +1 @@ +# CONFIG_LCD_OTM3225A is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_PLATFORM b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_PLATFORM new file mode 100644 index 000000000000..b607ce508b5f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_PLATFORM @@ -0,0 +1 @@ +CONFIG_LCD_PLATFORM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_TDO24M b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_TDO24M new file mode 100644 index 000000000000..d75934134bed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_TDO24M @@ -0,0 +1 @@ +# CONFIG_LCD_TDO24M is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_VGG2432A4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_VGG2432A4 new file mode 100644 index 000000000000..812eb1cf3bdc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LCD_VGG2432A4 @@ -0,0 +1 @@ +# CONFIG_LCD_VGG2432A4 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LDM_PARTITION b/anolis/configs/L2-OPTIONAL/default/CONFIG_LDM_PARTITION new file mode 100644 index 000000000000..39c9fed31f2d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LDM_PARTITION @@ -0,0 +1 @@ +# CONFIG_LDM_PARTITION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LD_IS_BFD b/anolis/configs/L2-OPTIONAL/default/CONFIG_LD_IS_BFD new file mode 100644 index 000000000000..4805d9b35f03 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LD_IS_BFD @@ -0,0 +1 @@ +CONFIG_LD_IS_BFD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LD_ORPHAN_WARN b/anolis/configs/L2-OPTIONAL/default/CONFIG_LD_ORPHAN_WARN new file mode 100644 index 000000000000..797671dc0ea1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LD_ORPHAN_WARN @@ -0,0 +1 @@ +CONFIG_LD_ORPHAN_WARN=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LD_ORPHAN_WARN_LEVEL b/anolis/configs/L2-OPTIONAL/default/CONFIG_LD_ORPHAN_WARN_LEVEL new file mode 100644 index 000000000000..111581a8888d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LD_ORPHAN_WARN_LEVEL @@ -0,0 +1 @@ +CONFIG_LD_ORPHAN_WARN_LEVEL="warn" diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LD_VERSION b/anolis/configs/L2-OPTIONAL/default/CONFIG_LD_VERSION new file mode 100644 index 000000000000..06cd1617130c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LD_VERSION @@ -0,0 +1 @@ +CONFIG_LD_VERSION=25000 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_AW200XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_AW200XX new file mode 100644 index 000000000000..7acfb44a9aa5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_AW200XX @@ -0,0 +1 @@ +# CONFIG_LEDS_AW200XX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_BD2606MVV b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_BD2606MVV new file mode 100644 index 000000000000..4bad376c6aaf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_BD2606MVV @@ -0,0 +1 @@ +# CONFIG_LEDS_BD2606MVV is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_BD2802 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_BD2802 new file mode 100644 index 000000000000..e8bc68ba9ed7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_BD2802 @@ -0,0 +1 @@ +# CONFIG_LEDS_BD2802 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_BLINKM b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_BLINKM new file mode 100644 index 000000000000..31e5d7658fa5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_BLINKM @@ -0,0 +1 @@ +CONFIG_LEDS_BLINKM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_BRIGHTNESS_HW_CHANGED b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_BRIGHTNESS_HW_CHANGED new file mode 100644 index 000000000000..d2f9256b468f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_BRIGHTNESS_HW_CHANGED @@ -0,0 +1 @@ +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_CLASS b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_CLASS new file mode 100644 index 000000000000..6ab3be8d5dc7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_CLASS @@ -0,0 +1 @@ +CONFIG_LEDS_CLASS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_CLASS_MULTICOLOR b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_CLASS_MULTICOLOR new file mode 100644 index 000000000000..159fd0d5c0c7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_CLASS_MULTICOLOR @@ -0,0 +1 @@ +# CONFIG_LEDS_CLASS_MULTICOLOR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_DAC124S085 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_DAC124S085 new file mode 100644 index 000000000000..a26e77d7fd41 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_DAC124S085 @@ -0,0 +1 @@ +# CONFIG_LEDS_DAC124S085 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_GPIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_GPIO new file mode 100644 index 000000000000..f7c11aa94956 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_GPIO @@ -0,0 +1 @@ +# CONFIG_LEDS_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_IS31FL319X b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_IS31FL319X new file mode 100644 index 000000000000..1fa853d1c4c9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_IS31FL319X @@ -0,0 +1 @@ +# CONFIG_LEDS_IS31FL319X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LM3530 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LM3530 new file mode 100644 index 000000000000..4ecc73b3498e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LM3530 @@ -0,0 +1 @@ +CONFIG_LEDS_LM3530=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LM3532 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LM3532 new file mode 100644 index 000000000000..eb788f96e5a3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LM3532 @@ -0,0 +1 @@ +# CONFIG_LEDS_LM3532 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LM355x b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LM355x new file mode 100644 index 000000000000..4824c7640496 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LM355x @@ -0,0 +1 @@ +# CONFIG_LEDS_LM355x is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LM3642 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LM3642 new file mode 100644 index 000000000000..0d6f2ea9a730 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LM3642 @@ -0,0 +1 @@ +# CONFIG_LEDS_LM3642 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LP3944 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LP3944 new file mode 100644 index 000000000000..25af7786239a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LP3944 @@ -0,0 +1 @@ +CONFIG_LEDS_LP3944=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LP3952 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LP3952 new file mode 100644 index 000000000000..be5fbf7a9804 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LP3952 @@ -0,0 +1 @@ +# CONFIG_LEDS_LP3952 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LP50XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LP50XX new file mode 100644 index 000000000000..99ee0f5990b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_LP50XX @@ -0,0 +1 @@ +# CONFIG_LEDS_LP50XX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_MLXREG b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_MLXREG new file mode 100644 index 000000000000..30420c7833ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_MLXREG @@ -0,0 +1 @@ +# CONFIG_LEDS_MLXREG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PCA9532 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PCA9532 new file mode 100644 index 000000000000..1c43dff3333e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PCA9532 @@ -0,0 +1 @@ +# CONFIG_LEDS_PCA9532 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PCA955X b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PCA955X new file mode 100644 index 000000000000..7660d64c170a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PCA955X @@ -0,0 +1 @@ +# CONFIG_LEDS_PCA955X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PCA963X b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PCA963X new file mode 100644 index 000000000000..4e351d6541af --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PCA963X @@ -0,0 +1 @@ +# CONFIG_LEDS_PCA963X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PCA995X b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PCA995X new file mode 100644 index 000000000000..d5bca711d915 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PCA995X @@ -0,0 +1 @@ +# CONFIG_LEDS_PCA995X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PWM b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PWM new file mode 100644 index 000000000000..da45a39e0af6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_PWM @@ -0,0 +1 @@ +# CONFIG_LEDS_PWM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TCA6507 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TCA6507 new file mode 100644 index 000000000000..074d795d8fd9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TCA6507 @@ -0,0 +1 @@ +# CONFIG_LEDS_TCA6507 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TLC591XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TLC591XX new file mode 100644 index 000000000000..31a66a3fc8a9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TLC591XX @@ -0,0 +1 @@ +# CONFIG_LEDS_TLC591XX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGERS b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGERS new file mode 100644 index 000000000000..5793ba354381 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGERS @@ -0,0 +1 @@ +CONFIG_LEDS_TRIGGERS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_ACTIVITY b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_ACTIVITY new file mode 100644 index 000000000000..2a67a187f208 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_ACTIVITY @@ -0,0 +1 @@ +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_BACKLIGHT b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_BACKLIGHT new file mode 100644 index 000000000000..714cf78df871 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_BACKLIGHT @@ -0,0 +1 @@ +CONFIG_LEDS_TRIGGER_BACKLIGHT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_CAMERA b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_CAMERA new file mode 100644 index 000000000000..09cfe4e9dd70 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_CAMERA @@ -0,0 +1 @@ +CONFIG_LEDS_TRIGGER_CAMERA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_CPU b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_CPU new file mode 100644 index 000000000000..de9ab8230a9e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_CPU @@ -0,0 +1 @@ +# CONFIG_LEDS_TRIGGER_CPU is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_DEFAULT_ON b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_DEFAULT_ON new file mode 100644 index 000000000000..28cf72daa467 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_DEFAULT_ON @@ -0,0 +1 @@ +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_HEARTBEAT b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_HEARTBEAT new file mode 100644 index 000000000000..f1aa15726333 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_HEARTBEAT @@ -0,0 +1 @@ +CONFIG_LEDS_TRIGGER_HEARTBEAT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_MTD b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_MTD new file mode 100644 index 000000000000..12e4f7f857f2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_MTD @@ -0,0 +1 @@ +# CONFIG_LEDS_TRIGGER_MTD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_NETDEV b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_NETDEV new file mode 100644 index 000000000000..cbd7cccc7bdf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_NETDEV @@ -0,0 +1 @@ +# CONFIG_LEDS_TRIGGER_NETDEV is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_ONESHOT b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_ONESHOT new file mode 100644 index 000000000000..5f108ca76140 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_ONESHOT @@ -0,0 +1 @@ +CONFIG_LEDS_TRIGGER_ONESHOT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_PANIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_PANIC new file mode 100644 index 000000000000..f9e36c96e267 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_PANIC @@ -0,0 +1 @@ +# CONFIG_LEDS_TRIGGER_PANIC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_PATTERN b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_PATTERN new file mode 100644 index 000000000000..21b9a789c388 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_PATTERN @@ -0,0 +1 @@ +# CONFIG_LEDS_TRIGGER_PATTERN is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_TIMER b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_TIMER new file mode 100644 index 000000000000..b274bc418c72 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_TIMER @@ -0,0 +1 @@ +CONFIG_LEDS_TRIGGER_TIMER=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_TRANSIENT b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_TRANSIENT new file mode 100644 index 000000000000..0fb49a106797 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_TRANSIENT @@ -0,0 +1 @@ +CONFIG_LEDS_TRIGGER_TRANSIENT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_TTY b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_TTY new file mode 100644 index 000000000000..0f9979241165 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_TRIGGER_TTY @@ -0,0 +1 @@ +# CONFIG_LEDS_TRIGGER_TTY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_USER b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_USER new file mode 100644 index 000000000000..15eaa9cbba96 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEDS_USER @@ -0,0 +1 @@ +# CONFIG_LEDS_USER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LED_TRIGGER_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_LED_TRIGGER_PHY new file mode 100644 index 000000000000..fb63bf4f8150 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LED_TRIGGER_PHY @@ -0,0 +1 @@ +CONFIG_LED_TRIGGER_PHY=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEGACY_DIRECT_IO b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEGACY_DIRECT_IO new file mode 100644 index 000000000000..4ee74e5ffa6b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEGACY_DIRECT_IO @@ -0,0 +1 @@ +CONFIG_LEGACY_DIRECT_IO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEGACY_PTYS b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEGACY_PTYS new file mode 100644 index 000000000000..ddc9171d6189 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEGACY_PTYS @@ -0,0 +1 @@ +# CONFIG_LEGACY_PTYS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LEGACY_TIOCSTI b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEGACY_TIOCSTI new file mode 100644 index 000000000000..c6373aba6636 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LEGACY_TIOCSTI @@ -0,0 +1 @@ +CONFIG_LEGACY_TIOCSTI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LIBCRC32C b/anolis/configs/L2-OPTIONAL/default/CONFIG_LIBCRC32C new file mode 100644 index 000000000000..ed0c7dfc7191 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LIBCRC32C @@ -0,0 +1 @@ +CONFIG_LIBCRC32C=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LIBWX b/anolis/configs/L2-OPTIONAL/default/CONFIG_LIBWX new file mode 100644 index 000000000000..6d784c86d22f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LIBWX @@ -0,0 +1 @@ +CONFIG_LIBWX=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LIQUIDIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_LIQUIDIO new file mode 100644 index 000000000000..9a815fc0627e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LIQUIDIO @@ -0,0 +1 @@ +CONFIG_LIQUIDIO=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LIQUIDIO_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_LIQUIDIO_CORE new file mode 100644 index 000000000000..2f9d0a1c6429 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LIQUIDIO_CORE @@ -0,0 +1 @@ +CONFIG_LIQUIDIO_CORE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LIQUIDIO_VF b/anolis/configs/L2-OPTIONAL/default/CONFIG_LIQUIDIO_VF new file mode 100644 index 000000000000..c32f5e60c81e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LIQUIDIO_VF @@ -0,0 +1 @@ +CONFIG_LIQUIDIO_VF=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LKDTM b/anolis/configs/L2-OPTIONAL/default/CONFIG_LKDTM new file mode 100644 index 000000000000..5ac6135a9226 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LKDTM @@ -0,0 +1 @@ +# CONFIG_LKDTM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LLC b/anolis/configs/L2-OPTIONAL/default/CONFIG_LLC new file mode 100644 index 000000000000..1e58a6325dd6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LLC @@ -0,0 +1 @@ +CONFIG_LLC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LLC2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LLC2 new file mode 100644 index 000000000000..7b1143ffc782 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LLC2 @@ -0,0 +1 @@ +# CONFIG_LLC2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LLD_VERSION b/anolis/configs/L2-OPTIONAL/default/CONFIG_LLD_VERSION new file mode 100644 index 000000000000..339de241324e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LLD_VERSION @@ -0,0 +1 @@ +CONFIG_LLD_VERSION=0 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LMK04832 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LMK04832 new file mode 100644 index 000000000000..563084c7c873 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LMK04832 @@ -0,0 +1 @@ +# CONFIG_LMK04832 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_DEBUGGING_SUPPORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_DEBUGGING_SUPPORT new file mode 100644 index 000000000000..b7ff683eb3a0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_DEBUGGING_SUPPORT @@ -0,0 +1 @@ +CONFIG_LOCK_DEBUGGING_SUPPORT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_MM_AND_FIND_VMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_MM_AND_FIND_VMA new file mode 100644 index 000000000000..2b43310193c3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_MM_AND_FIND_VMA @@ -0,0 +1 @@ +CONFIG_LOCK_MM_AND_FIND_VMA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_SPIN_ON_OWNER b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_SPIN_ON_OWNER new file mode 100644 index 000000000000..c00775144ca5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_SPIN_ON_OWNER @@ -0,0 +1 @@ +CONFIG_LOCK_SPIN_ON_OWNER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_STAT b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_STAT new file mode 100644 index 000000000000..ab733ddc26ab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_STAT @@ -0,0 +1 @@ +# CONFIG_LOCK_STAT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_TORTURE_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_TORTURE_TEST new file mode 100644 index 000000000000..8d2ede5fbd19 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOCK_TORTURE_TEST @@ -0,0 +1 @@ +# CONFIG_LOCK_TORTURE_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGIG940_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGIG940_FF new file mode 100644 index 000000000000..c4201b052bb4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGIG940_FF @@ -0,0 +1 @@ +# CONFIG_LOGIG940_FF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGIRUMBLEPAD2_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGIRUMBLEPAD2_FF new file mode 100644 index 000000000000..07bc15222d58 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGIRUMBLEPAD2_FF @@ -0,0 +1 @@ +# CONFIG_LOGIRUMBLEPAD2_FF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGITECH_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGITECH_FF new file mode 100644 index 000000000000..0f258d20be17 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGITECH_FF @@ -0,0 +1 @@ +# CONFIG_LOGITECH_FF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGIWHEELS_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGIWHEELS_FF new file mode 100644 index 000000000000..5730edeb0ffb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGIWHEELS_FF @@ -0,0 +1 @@ +# CONFIG_LOGIWHEELS_FF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGO b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGO new file mode 100644 index 000000000000..9772c12e8197 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGO @@ -0,0 +1 @@ +CONFIG_LOGO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGO_LINUX_CLUT224 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGO_LINUX_CLUT224 new file mode 100644 index 000000000000..53fbc2986a7f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGO_LINUX_CLUT224 @@ -0,0 +1 @@ +CONFIG_LOGO_LINUX_CLUT224=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGO_LINUX_MONO b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGO_LINUX_MONO new file mode 100644 index 000000000000..98033fe35bf7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGO_LINUX_MONO @@ -0,0 +1 @@ +# CONFIG_LOGO_LINUX_MONO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGO_LINUX_VGA16 b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGO_LINUX_VGA16 new file mode 100644 index 000000000000..28fc94e9b8cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LOGO_LINUX_VGA16 @@ -0,0 +1 @@ +# CONFIG_LOGO_LINUX_VGA16 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LSI_ET1011C_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_LSI_ET1011C_PHY new file mode 100644 index 000000000000..d1c03f3a1fc7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LSI_ET1011C_PHY @@ -0,0 +1 @@ +CONFIG_LSI_ET1011C_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LXT_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_LXT_PHY new file mode 100644 index 000000000000..11a8eb8d2e58 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LXT_PHY @@ -0,0 +1 @@ +CONFIG_LXT_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LZ4HC_COMPRESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_LZ4HC_COMPRESS new file mode 100644 index 000000000000..5e792ce7701c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LZ4HC_COMPRESS @@ -0,0 +1 @@ +CONFIG_LZ4HC_COMPRESS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LZ4_COMPRESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_LZ4_COMPRESS new file mode 100644 index 000000000000..d16694035c1e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LZ4_COMPRESS @@ -0,0 +1 @@ +CONFIG_LZ4_COMPRESS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LZ4_DECOMPRESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_LZ4_DECOMPRESS new file mode 100644 index 000000000000..f7f4d65e2b03 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LZ4_DECOMPRESS @@ -0,0 +1 @@ +CONFIG_LZ4_DECOMPRESS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LZO_COMPRESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_LZO_COMPRESS new file mode 100644 index 000000000000..3da61c520b50 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LZO_COMPRESS @@ -0,0 +1 @@ +CONFIG_LZO_COMPRESS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_LZO_DECOMPRESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_LZO_DECOMPRESS new file mode 100644 index 000000000000..322d2185e406 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_LZO_DECOMPRESS @@ -0,0 +1 @@ +CONFIG_LZO_DECOMPRESS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211 new file mode 100644 index 000000000000..eaf3124108b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211 @@ -0,0 +1 @@ +CONFIG_MAC80211=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_DEBUGFS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_DEBUGFS new file mode 100644 index 000000000000..704e88c8b0f8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_DEBUGFS @@ -0,0 +1 @@ +CONFIG_MAC80211_DEBUGFS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_DEBUG_MENU b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_DEBUG_MENU new file mode 100644 index 000000000000..3d7fd5e3c956 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_DEBUG_MENU @@ -0,0 +1 @@ +# CONFIG_MAC80211_DEBUG_MENU is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_HAS_RC b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_HAS_RC new file mode 100644 index 000000000000..8491b8f3093e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_HAS_RC @@ -0,0 +1 @@ +CONFIG_MAC80211_HAS_RC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_LEDS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_LEDS new file mode 100644 index 000000000000..d4e90cabcb60 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_LEDS @@ -0,0 +1 @@ +CONFIG_MAC80211_LEDS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_MESH b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_MESH new file mode 100644 index 000000000000..b322ebd5c158 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_MESH @@ -0,0 +1 @@ +# CONFIG_MAC80211_MESH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_MESSAGE_TRACING b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_MESSAGE_TRACING new file mode 100644 index 000000000000..5e52e182b791 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_MESSAGE_TRACING @@ -0,0 +1 @@ +# CONFIG_MAC80211_MESSAGE_TRACING is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_RC_DEFAULT b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_RC_DEFAULT new file mode 100644 index 000000000000..e0c92ab2c8db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_RC_DEFAULT @@ -0,0 +1 @@ +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_RC_DEFAULT_MINSTREL b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_RC_DEFAULT_MINSTREL new file mode 100644 index 000000000000..62bbca7615b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_RC_DEFAULT_MINSTREL @@ -0,0 +1 @@ +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_RC_MINSTREL b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_RC_MINSTREL new file mode 100644 index 000000000000..99bb0c835960 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_RC_MINSTREL @@ -0,0 +1 @@ +CONFIG_MAC80211_RC_MINSTREL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_STA_HASH_MAX_SIZE b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_STA_HASH_MAX_SIZE new file mode 100644 index 000000000000..c8e7c16b850b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC80211_STA_HASH_MAX_SIZE @@ -0,0 +1 @@ +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC802154 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC802154 new file mode 100644 index 000000000000..d63984cd58be --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAC802154 @@ -0,0 +1 @@ +CONFIG_MAC802154=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MAILBOX b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAILBOX new file mode 100644 index 000000000000..f1a6342b456a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAILBOX @@ -0,0 +1 @@ +CONFIG_MAILBOX=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MANAGER_SBS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MANAGER_SBS new file mode 100644 index 000000000000..aae8258e3e36 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MANAGER_SBS @@ -0,0 +1 @@ +# CONFIG_MANAGER_SBS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MARVELL_10G_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_MARVELL_10G_PHY new file mode 100644 index 000000000000..6dadd98ffcdb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MARVELL_10G_PHY @@ -0,0 +1 @@ +CONFIG_MARVELL_10G_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MARVELL_88Q2XXX_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_MARVELL_88Q2XXX_PHY new file mode 100644 index 000000000000..f5aeed40f247 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MARVELL_88Q2XXX_PHY @@ -0,0 +1 @@ +# CONFIG_MARVELL_88Q2XXX_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MARVELL_88X2222_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_MARVELL_88X2222_PHY new file mode 100644 index 000000000000..dbcfd730e375 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MARVELL_88X2222_PHY @@ -0,0 +1 @@ +# CONFIG_MARVELL_88X2222_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MARVELL_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_MARVELL_PHY new file mode 100644 index 000000000000..e6aeb4de19f9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MARVELL_PHY @@ -0,0 +1 @@ +CONFIG_MARVELL_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MAX31827 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAX31827 new file mode 100644 index 000000000000..6a6d25b02030 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAX31827 @@ -0,0 +1 @@ +# CONFIG_MAX31827 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MAX63XX_WATCHDOG b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAX63XX_WATCHDOG new file mode 100644 index 000000000000..cb71a58bd18c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAX63XX_WATCHDOG @@ -0,0 +1 @@ +# CONFIG_MAX63XX_WATCHDOG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MAXLINEAR_GPHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAXLINEAR_GPHY new file mode 100644 index 000000000000..c69d4261e8d0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MAXLINEAR_GPHY @@ -0,0 +1 @@ +# CONFIG_MAXLINEAR_GPHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MCB b/anolis/configs/L2-OPTIONAL/default/CONFIG_MCB new file mode 100644 index 000000000000..775421e9629c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MCB @@ -0,0 +1 @@ +# CONFIG_MCB is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MCTP b/anolis/configs/L2-OPTIONAL/default/CONFIG_MCTP new file mode 100644 index 000000000000..c0ad73c2d113 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MCTP @@ -0,0 +1 @@ +# CONFIG_MCTP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO new file mode 100644 index 000000000000..c2915e9c3a46 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO @@ -0,0 +1 @@ +CONFIG_MDIO=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_BCM_UNIMAC b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_BCM_UNIMAC new file mode 100644 index 000000000000..a0c92ceeb2e7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_BCM_UNIMAC @@ -0,0 +1 @@ +CONFIG_MDIO_BCM_UNIMAC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_BITBANG b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_BITBANG new file mode 100644 index 000000000000..06f4ddebc84d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_BITBANG @@ -0,0 +1 @@ +CONFIG_MDIO_BITBANG=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_BUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_BUS new file mode 100644 index 000000000000..00e812243dcc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_BUS @@ -0,0 +1 @@ +CONFIG_MDIO_BUS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_CAVIUM b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_CAVIUM new file mode 100644 index 000000000000..6ebe615162ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_CAVIUM @@ -0,0 +1 @@ +CONFIG_MDIO_CAVIUM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_DEVICE b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_DEVICE new file mode 100644 index 000000000000..07444b0be692 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_DEVICE @@ -0,0 +1 @@ +CONFIG_MDIO_DEVICE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_DEVRES b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_DEVRES new file mode 100644 index 000000000000..6f2079183fb7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_DEVRES @@ -0,0 +1 @@ +CONFIG_MDIO_DEVRES=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_I2C new file mode 100644 index 000000000000..df7d9e02511a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_I2C @@ -0,0 +1 @@ +CONFIG_MDIO_I2C=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_MVUSB b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_MVUSB new file mode 100644 index 000000000000..dd417ba959f3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_MVUSB @@ -0,0 +1 @@ +# CONFIG_MDIO_MVUSB is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_THUNDER b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_THUNDER new file mode 100644 index 000000000000..00eb9a6baf80 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MDIO_THUNDER @@ -0,0 +1 @@ +CONFIG_MDIO_THUNDER=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MD_BITMAP_FILE b/anolis/configs/L2-OPTIONAL/default/CONFIG_MD_BITMAP_FILE new file mode 100644 index 000000000000..08867772ddd5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MD_BITMAP_FILE @@ -0,0 +1 @@ +CONFIG_MD_BITMAP_FILE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MEDIATEK_GE_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEDIATEK_GE_PHY new file mode 100644 index 000000000000..a28e8edef0d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEDIATEK_GE_PHY @@ -0,0 +1 @@ +# CONFIG_MEDIATEK_GE_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMCG_KMEM b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMCG_KMEM new file mode 100644 index 000000000000..6304e9d807b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMCG_KMEM @@ -0,0 +1 @@ +CONFIG_MEMCG_KMEM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMFD_CREATE b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMFD_CREATE new file mode 100644 index 000000000000..31a75bff9601 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMFD_CREATE @@ -0,0 +1 @@ +CONFIG_MEMFD_CREATE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMORY_ISOLATION b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMORY_ISOLATION new file mode 100644 index 000000000000..9a288f900bff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMORY_ISOLATION @@ -0,0 +1 @@ +CONFIG_MEMORY_ISOLATION=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMREGION b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMREGION new file mode 100644 index 000000000000..741d86477cd2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMREGION @@ -0,0 +1 @@ +CONFIG_MEMREGION=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK new file mode 100644 index 000000000000..9ed5ed5f5c65 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK @@ -0,0 +1 @@ +CONFIG_MEMSTICK=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_DEBUG new file mode 100644 index 000000000000..31f989fb9339 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_DEBUG @@ -0,0 +1 @@ +# CONFIG_MEMSTICK_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_JMICRON_38X b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_JMICRON_38X new file mode 100644 index 000000000000..15743ca31997 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_JMICRON_38X @@ -0,0 +1 @@ +CONFIG_MEMSTICK_JMICRON_38X=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_R592 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_R592 new file mode 100644 index 000000000000..8aec8b70b2d4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_R592 @@ -0,0 +1 @@ +CONFIG_MEMSTICK_R592=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_TIFM_MS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_TIFM_MS new file mode 100644 index 000000000000..d6757770ac1b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_TIFM_MS @@ -0,0 +1 @@ +CONFIG_MEMSTICK_TIFM_MS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_UNSAFE_RESUME b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_UNSAFE_RESUME new file mode 100644 index 000000000000..940b4c5a698a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMSTICK_UNSAFE_RESUME @@ -0,0 +1 @@ +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMTEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMTEST new file mode 100644 index 000000000000..27d7daee6734 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEMTEST @@ -0,0 +1 @@ +# CONFIG_MEMTEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MEN_A21_WDT b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEN_A21_WDT new file mode 100644 index 000000000000..bc360bfab56c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MEN_A21_WDT @@ -0,0 +1 @@ +# CONFIG_MEN_A21_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_88PM800 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_88PM800 new file mode 100644 index 000000000000..858dc2db1f28 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_88PM800 @@ -0,0 +1 @@ +# CONFIG_MFD_88PM800 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_88PM805 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_88PM805 new file mode 100644 index 000000000000..f3cd46c908e7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_88PM805 @@ -0,0 +1 @@ +# CONFIG_MFD_88PM805 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_88PM860X b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_88PM860X new file mode 100644 index 000000000000..8591283715af --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_88PM860X @@ -0,0 +1 @@ +# CONFIG_MFD_88PM860X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_AAT2870_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_AAT2870_CORE new file mode 100644 index 000000000000..9d4cf9056ea2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_AAT2870_CORE @@ -0,0 +1 @@ +# CONFIG_MFD_AAT2870_CORE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_ARIZONA_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_ARIZONA_I2C new file mode 100644 index 000000000000..63be91cb3947 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_ARIZONA_I2C @@ -0,0 +1 @@ +# CONFIG_MFD_ARIZONA_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_ARIZONA_SPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_ARIZONA_SPI new file mode 100644 index 000000000000..e3e9c675871e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_ARIZONA_SPI @@ -0,0 +1 @@ +# CONFIG_MFD_ARIZONA_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_AS3711 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_AS3711 new file mode 100644 index 000000000000..53b3b04f1e30 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_AS3711 @@ -0,0 +1 @@ +# CONFIG_MFD_AS3711 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_ATC260X_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_ATC260X_I2C new file mode 100644 index 000000000000..f24eab65148b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_ATC260X_I2C @@ -0,0 +1 @@ +# CONFIG_MFD_ATC260X_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_AXP20X_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_AXP20X_I2C new file mode 100644 index 000000000000..e9bcb19b142b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_AXP20X_I2C @@ -0,0 +1 @@ +# CONFIG_MFD_AXP20X_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_BCM590XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_BCM590XX new file mode 100644 index 000000000000..104736d3861a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_BCM590XX @@ -0,0 +1 @@ +# CONFIG_MFD_BCM590XX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_BD9571MWV b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_BD9571MWV new file mode 100644 index 000000000000..d321ad3c62d6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_BD9571MWV @@ -0,0 +1 @@ +# CONFIG_MFD_BD9571MWV is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_CS42L43_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_CS42L43_I2C new file mode 100644 index 000000000000..20a2316bfad7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_CS42L43_I2C @@ -0,0 +1 @@ +# CONFIG_MFD_CS42L43_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9052_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9052_I2C new file mode 100644 index 000000000000..c294bc24d5cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9052_I2C @@ -0,0 +1 @@ +# CONFIG_MFD_DA9052_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9052_SPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9052_SPI new file mode 100644 index 000000000000..095062683cd7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9052_SPI @@ -0,0 +1 @@ +# CONFIG_MFD_DA9052_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9055 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9055 new file mode 100644 index 000000000000..3f7ae6423011 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9055 @@ -0,0 +1 @@ +# CONFIG_MFD_DA9055 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9062 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9062 new file mode 100644 index 000000000000..d605877c79af --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9062 @@ -0,0 +1 @@ +# CONFIG_MFD_DA9062 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9063 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9063 new file mode 100644 index 000000000000..ba3cfa7cbfff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9063 @@ -0,0 +1 @@ +# CONFIG_MFD_DA9063 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9150 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9150 new file mode 100644 index 000000000000..33d4eb529453 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DA9150 @@ -0,0 +1 @@ +# CONFIG_MFD_DA9150 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DLN2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DLN2 new file mode 100644 index 000000000000..b0c8ca366c64 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_DLN2 @@ -0,0 +1 @@ +# CONFIG_MFD_DLN2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_INTEL_M10_BMC_SPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_INTEL_M10_BMC_SPI new file mode 100644 index 000000000000..fa028d4fc507 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_INTEL_M10_BMC_SPI @@ -0,0 +1 @@ +# CONFIG_MFD_INTEL_M10_BMC_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_IQS62X b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_IQS62X new file mode 100644 index 000000000000..c1f3356ac1e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_IQS62X @@ -0,0 +1 @@ +# CONFIG_MFD_IQS62X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_JANZ_CMODIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_JANZ_CMODIO new file mode 100644 index 000000000000..c6926b8416da --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_JANZ_CMODIO @@ -0,0 +1 @@ +# CONFIG_MFD_JANZ_CMODIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_KEMPLD b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_KEMPLD new file mode 100644 index 000000000000..a94ded357c4e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_KEMPLD @@ -0,0 +1 @@ +# CONFIG_MFD_KEMPLD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_LM3533 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_LM3533 new file mode 100644 index 000000000000..0ab890140d9d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_LM3533 @@ -0,0 +1 @@ +# CONFIG_MFD_LM3533 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_LP3943 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_LP3943 new file mode 100644 index 000000000000..de711a681b2f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_LP3943 @@ -0,0 +1 @@ +# CONFIG_MFD_LP3943 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_LP8788 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_LP8788 new file mode 100644 index 000000000000..4600a154f884 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_LP8788 @@ -0,0 +1 @@ +# CONFIG_MFD_LP8788 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MADERA b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MADERA new file mode 100644 index 000000000000..71aa85920f50 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MADERA @@ -0,0 +1 @@ +# CONFIG_MFD_MADERA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX14577 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX14577 new file mode 100644 index 000000000000..ebbae1997462 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX14577 @@ -0,0 +1 @@ +# CONFIG_MFD_MAX14577 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX77541 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX77541 new file mode 100644 index 000000000000..9d07de075613 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX77541 @@ -0,0 +1 @@ +# CONFIG_MFD_MAX77541 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX77693 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX77693 new file mode 100644 index 000000000000..35769bd83e79 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX77693 @@ -0,0 +1 @@ +# CONFIG_MFD_MAX77693 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX77843 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX77843 new file mode 100644 index 000000000000..487219b46158 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX77843 @@ -0,0 +1 @@ +# CONFIG_MFD_MAX77843 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX8907 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX8907 new file mode 100644 index 000000000000..7921e7b735a3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX8907 @@ -0,0 +1 @@ +# CONFIG_MFD_MAX8907 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX8925 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX8925 new file mode 100644 index 000000000000..302fe8314b70 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX8925 @@ -0,0 +1 @@ +# CONFIG_MFD_MAX8925 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX8997 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX8997 new file mode 100644 index 000000000000..bb749db3274f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX8997 @@ -0,0 +1 @@ +# CONFIG_MFD_MAX8997 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX8998 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX8998 new file mode 100644 index 000000000000..9478aeba04a1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MAX8998 @@ -0,0 +1 @@ +# CONFIG_MFD_MAX8998 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MC13XXX_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MC13XXX_I2C new file mode 100644 index 000000000000..cf8c1df5cf19 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MC13XXX_I2C @@ -0,0 +1 @@ +# CONFIG_MFD_MC13XXX_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MC13XXX_SPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MC13XXX_SPI new file mode 100644 index 000000000000..a368bc8e68a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MC13XXX_SPI @@ -0,0 +1 @@ +# CONFIG_MFD_MC13XXX_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MENF21BMC b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MENF21BMC new file mode 100644 index 000000000000..378b77170c3f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MENF21BMC @@ -0,0 +1 @@ +# CONFIG_MFD_MENF21BMC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MP2629 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MP2629 new file mode 100644 index 000000000000..2bbfa6798c28 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MP2629 @@ -0,0 +1 @@ +# CONFIG_MFD_MP2629 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MT6360 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MT6360 new file mode 100644 index 000000000000..34ddea555002 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MT6360 @@ -0,0 +1 @@ +# CONFIG_MFD_MT6360 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MT6370 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MT6370 new file mode 100644 index 000000000000..590822965b68 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MT6370 @@ -0,0 +1 @@ +# CONFIG_MFD_MT6370 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MT6397 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MT6397 new file mode 100644 index 000000000000..312fa668fe3a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_MT6397 @@ -0,0 +1 @@ +# CONFIG_MFD_MT6397 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_OCELOT b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_OCELOT new file mode 100644 index 000000000000..e3179a571497 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_OCELOT @@ -0,0 +1 @@ +# CONFIG_MFD_OCELOT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_PALMAS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_PALMAS new file mode 100644 index 000000000000..e09bcbf18b90 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_PALMAS @@ -0,0 +1 @@ +# CONFIG_MFD_PALMAS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_PCF50633 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_PCF50633 new file mode 100644 index 000000000000..d79315e31a59 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_PCF50633 @@ -0,0 +1 @@ +# CONFIG_MFD_PCF50633 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RC5T583 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RC5T583 new file mode 100644 index 000000000000..f3976be092cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RC5T583 @@ -0,0 +1 @@ +# CONFIG_MFD_RC5T583 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RDC321X b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RDC321X new file mode 100644 index 000000000000..000bbf18d6e1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RDC321X @@ -0,0 +1 @@ +# CONFIG_MFD_RDC321X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RETU b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RETU new file mode 100644 index 000000000000..bc6621f33017 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RETU @@ -0,0 +1 @@ +# CONFIG_MFD_RETU is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RT4831 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RT4831 new file mode 100644 index 000000000000..ac3a6f05e883 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RT4831 @@ -0,0 +1 @@ +# CONFIG_MFD_RT4831 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RT5033 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RT5033 new file mode 100644 index 000000000000..6e156c85b1e0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RT5033 @@ -0,0 +1 @@ +# CONFIG_MFD_RT5033 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RT5120 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RT5120 new file mode 100644 index 000000000000..51ad42893eb0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_RT5120 @@ -0,0 +1 @@ +# CONFIG_MFD_RT5120 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_SI476X_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_SI476X_CORE new file mode 100644 index 000000000000..677f708982e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_SI476X_CORE @@ -0,0 +1 @@ +# CONFIG_MFD_SI476X_CORE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_SKY81452 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_SKY81452 new file mode 100644 index 000000000000..37cbb32d4ca4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_SKY81452 @@ -0,0 +1 @@ +# CONFIG_MFD_SKY81452 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_SMPRO b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_SMPRO new file mode 100644 index 000000000000..42478cf33f2b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_SMPRO @@ -0,0 +1 @@ +# CONFIG_MFD_SMPRO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_SY7636A b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_SY7636A new file mode 100644 index 000000000000..ad3a242f2e9c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_SY7636A @@ -0,0 +1 @@ +# CONFIG_MFD_SY7636A is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TI_LMU b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TI_LMU new file mode 100644 index 000000000000..4a84e3fd01f9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TI_LMU @@ -0,0 +1 @@ +# CONFIG_MFD_TI_LMU is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TI_LP873X b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TI_LP873X new file mode 100644 index 000000000000..55cab8a6a0b1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TI_LP873X @@ -0,0 +1 @@ +# CONFIG_MFD_TI_LP873X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65086 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65086 new file mode 100644 index 000000000000..8257e007649c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65086 @@ -0,0 +1 @@ +# CONFIG_MFD_TPS65086 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65090 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65090 new file mode 100644 index 000000000000..50e4f86dac28 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65090 @@ -0,0 +1 @@ +# CONFIG_MFD_TPS65090 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS6586X b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS6586X new file mode 100644 index 000000000000..142ccf9dd12c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS6586X @@ -0,0 +1 @@ +# CONFIG_MFD_TPS6586X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65910 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65910 new file mode 100644 index 000000000000..d4a3d009110e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65910 @@ -0,0 +1 @@ +# CONFIG_MFD_TPS65910 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65912_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65912_I2C new file mode 100644 index 000000000000..a5cb98430f9c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65912_I2C @@ -0,0 +1 @@ +# CONFIG_MFD_TPS65912_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65912_SPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65912_SPI new file mode 100644 index 000000000000..99856458b5b6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS65912_SPI @@ -0,0 +1 @@ +# CONFIG_MFD_TPS65912_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS6594_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS6594_I2C new file mode 100644 index 000000000000..e86265f9648e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS6594_I2C @@ -0,0 +1 @@ +# CONFIG_MFD_TPS6594_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS6594_SPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS6594_SPI new file mode 100644 index 000000000000..0386f325e0a2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TPS6594_SPI @@ -0,0 +1 @@ +# CONFIG_MFD_TPS6594_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TQMX86 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TQMX86 new file mode 100644 index 000000000000..af8cb255f196 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_TQMX86 @@ -0,0 +1 @@ +# CONFIG_MFD_TQMX86 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WL1273_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WL1273_CORE new file mode 100644 index 000000000000..cfdc6c17b12e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WL1273_CORE @@ -0,0 +1 @@ +# CONFIG_MFD_WL1273_CORE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM831X_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM831X_I2C new file mode 100644 index 000000000000..5134deeb3d16 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM831X_I2C @@ -0,0 +1 @@ +# CONFIG_MFD_WM831X_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM831X_SPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM831X_SPI new file mode 100644 index 000000000000..443939c2d4a3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM831X_SPI @@ -0,0 +1 @@ +# CONFIG_MFD_WM831X_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM8350_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM8350_I2C new file mode 100644 index 000000000000..9da9a94d8e9f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM8350_I2C @@ -0,0 +1 @@ +# CONFIG_MFD_WM8350_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM8400 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM8400 new file mode 100644 index 000000000000..555799f97c84 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM8400 @@ -0,0 +1 @@ +# CONFIG_MFD_WM8400 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM8994 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM8994 new file mode 100644 index 000000000000..678e15d01d5c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MFD_WM8994 @@ -0,0 +1 @@ +# CONFIG_MFD_WM8994 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MHI_BUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MHI_BUS new file mode 100644 index 000000000000..bdccdbabcb06 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MHI_BUS @@ -0,0 +1 @@ +# CONFIG_MHI_BUS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MHI_BUS_EP b/anolis/configs/L2-OPTIONAL/default/CONFIG_MHI_BUS_EP new file mode 100644 index 000000000000..5aba2828c381 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MHI_BUS_EP @@ -0,0 +1 @@ +# CONFIG_MHI_BUS_EP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MHP_MEMMAP_ON_MEMORY b/anolis/configs/L2-OPTIONAL/default/CONFIG_MHP_MEMMAP_ON_MEMORY new file mode 100644 index 000000000000..5674a0084838 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MHP_MEMMAP_ON_MEMORY @@ -0,0 +1 @@ +CONFIG_MHP_MEMMAP_ON_MEMORY=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MICREL_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_MICREL_PHY new file mode 100644 index 000000000000..f82ae918420f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MICREL_PHY @@ -0,0 +1 @@ +CONFIG_MICREL_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MICROCHIP_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_MICROCHIP_PHY new file mode 100644 index 000000000000..6b800d4d4b14 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MICROCHIP_PHY @@ -0,0 +1 @@ +CONFIG_MICROCHIP_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MICROCHIP_T1S_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_MICROCHIP_T1S_PHY new file mode 100644 index 000000000000..601562cd991b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MICROCHIP_T1S_PHY @@ -0,0 +1 @@ +# CONFIG_MICROCHIP_T1S_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MICROCHIP_T1_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_MICROCHIP_T1_PHY new file mode 100644 index 000000000000..39cad732d644 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MICROCHIP_T1_PHY @@ -0,0 +1 @@ +CONFIG_MICROCHIP_T1_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MICROSEMI_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_MICROSEMI_PHY new file mode 100644 index 000000000000..0e171f614fef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MICROSEMI_PHY @@ -0,0 +1 @@ +CONFIG_MICROSEMI_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MII b/anolis/configs/L2-OPTIONAL/default/CONFIG_MII new file mode 100644 index 000000000000..b7d4f657c118 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MII @@ -0,0 +1 @@ +CONFIG_MII=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MINIX_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MINIX_FS new file mode 100644 index 000000000000..d407e6e09318 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MINIX_FS @@ -0,0 +1 @@ +# CONFIG_MINIX_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MISC_ALCOR_PCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_MISC_ALCOR_PCI new file mode 100644 index 000000000000..6d2cb18a77f9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MISC_ALCOR_PCI @@ -0,0 +1 @@ +# CONFIG_MISC_ALCOR_PCI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MLX4_INFINIBAND b/anolis/configs/L2-OPTIONAL/default/CONFIG_MLX4_INFINIBAND new file mode 100644 index 000000000000..4f7e86412b01 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MLX4_INFINIBAND @@ -0,0 +1 @@ +CONFIG_MLX4_INFINIBAND=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MLX5_EN_IPSEC b/anolis/configs/L2-OPTIONAL/default/CONFIG_MLX5_EN_IPSEC new file mode 100644 index 000000000000..ba2658e5ae52 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MLX5_EN_IPSEC @@ -0,0 +1 @@ +# CONFIG_MLX5_EN_IPSEC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MLX5_EN_TLS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MLX5_EN_TLS new file mode 100644 index 000000000000..75b19d32aa42 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MLX5_EN_TLS @@ -0,0 +1 @@ +# CONFIG_MLX5_EN_TLS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MLX5_MACSEC b/anolis/configs/L2-OPTIONAL/default/CONFIG_MLX5_MACSEC new file mode 100644 index 000000000000..c1e8375c10fb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MLX5_MACSEC @@ -0,0 +1 @@ +# CONFIG_MLX5_MACSEC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MLX5_VFIO_PCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_MLX5_VFIO_PCI new file mode 100644 index 000000000000..2ee229cc030b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MLX5_VFIO_PCI @@ -0,0 +1 @@ +# CONFIG_MLX5_VFIO_PCI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC new file mode 100644 index 000000000000..af952ae6546e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC @@ -0,0 +1 @@ +CONFIG_MMC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_BLOCK b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_BLOCK new file mode 100644 index 000000000000..c89e86066e72 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_BLOCK @@ -0,0 +1 @@ +CONFIG_MMC_BLOCK=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_BLOCK_MINORS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_BLOCK_MINORS new file mode 100644 index 000000000000..9aad9741a701 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_BLOCK_MINORS @@ -0,0 +1 @@ +CONFIG_MMC_BLOCK_MINORS=8 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_CB710 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_CB710 new file mode 100644 index 000000000000..b315628e437a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_CB710 @@ -0,0 +1 @@ +CONFIG_MMC_CB710=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_CQHCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_CQHCI new file mode 100644 index 000000000000..3ecbe65d0099 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_CQHCI @@ -0,0 +1 @@ +CONFIG_MMC_CQHCI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_DEBUG new file mode 100644 index 000000000000..678bd75ed43d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_DEBUG @@ -0,0 +1 @@ +# CONFIG_MMC_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_HSQ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_HSQ new file mode 100644 index 000000000000..d3a1fc7067fa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_HSQ @@ -0,0 +1 @@ +# CONFIG_MMC_HSQ is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_RICOH_MMC b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_RICOH_MMC new file mode 100644 index 000000000000..1899ff9a0b78 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_RICOH_MMC @@ -0,0 +1 @@ +CONFIG_MMC_RICOH_MMC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI new file mode 100644 index 000000000000..dd2edd0c647b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI @@ -0,0 +1 @@ +CONFIG_MMC_SDHCI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_ACPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_ACPI new file mode 100644 index 000000000000..065143892306 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_ACPI @@ -0,0 +1 @@ +CONFIG_MMC_SDHCI_ACPI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_F_SDH30 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_F_SDH30 new file mode 100644 index 000000000000..00c6ab2f58ab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_F_SDH30 @@ -0,0 +1 @@ +# CONFIG_MMC_SDHCI_F_SDH30 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_IO_ACCESSORS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_IO_ACCESSORS new file mode 100644 index 000000000000..2395a1e3e41c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_IO_ACCESSORS @@ -0,0 +1 @@ +CONFIG_MMC_SDHCI_IO_ACCESSORS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_PCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_PCI new file mode 100644 index 000000000000..8800540d3593 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_PCI @@ -0,0 +1 @@ +CONFIG_MMC_SDHCI_PCI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_PLTFM b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_PLTFM new file mode 100644 index 000000000000..59c122bf5e5c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_PLTFM @@ -0,0 +1 @@ +CONFIG_MMC_SDHCI_PLTFM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_XENON b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_XENON new file mode 100644 index 000000000000..19a13aadeee0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SDHCI_XENON @@ -0,0 +1 @@ +# CONFIG_MMC_SDHCI_XENON is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SPI new file mode 100644 index 000000000000..30837d93e98f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_SPI @@ -0,0 +1 @@ +# CONFIG_MMC_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_TEST new file mode 100644 index 000000000000..6032d28f91b3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_TEST @@ -0,0 +1 @@ +# CONFIG_MMC_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_TIFM_SD b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_TIFM_SD new file mode 100644 index 000000000000..6e0f4ae1eab6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_TIFM_SD @@ -0,0 +1 @@ +CONFIG_MMC_TIFM_SD=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_USDHI6ROL0 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_USDHI6ROL0 new file mode 100644 index 000000000000..94d6a41394f7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_USDHI6ROL0 @@ -0,0 +1 @@ +# CONFIG_MMC_USDHI6ROL0 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_USHC b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_USHC new file mode 100644 index 000000000000..1baab6557946 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_USHC @@ -0,0 +1 @@ +CONFIG_MMC_USHC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_VIA_SDMMC b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_VIA_SDMMC new file mode 100644 index 000000000000..88b7c20e7dea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_VIA_SDMMC @@ -0,0 +1 @@ +CONFIG_MMC_VIA_SDMMC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_VUB300 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_VUB300 new file mode 100644 index 000000000000..72839ca5f13e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMC_VUB300 @@ -0,0 +1 @@ +CONFIG_MMC_VUB300=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMU_GATHER_RCU_TABLE_FREE b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMU_GATHER_RCU_TABLE_FREE new file mode 100644 index 000000000000..c8fc27c15961 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMU_GATHER_RCU_TABLE_FREE @@ -0,0 +1 @@ +CONFIG_MMU_GATHER_RCU_TABLE_FREE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMU_GATHER_TABLE_FREE b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMU_GATHER_TABLE_FREE new file mode 100644 index 000000000000..eacf2dc4696f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMU_GATHER_TABLE_FREE @@ -0,0 +1 @@ +CONFIG_MMU_GATHER_TABLE_FREE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMU_LAZY_TLB_REFCOUNT b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMU_LAZY_TLB_REFCOUNT new file mode 100644 index 000000000000..c8fbb01fffb4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMU_LAZY_TLB_REFCOUNT @@ -0,0 +1 @@ +CONFIG_MMU_LAZY_TLB_REFCOUNT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MMU_NOTIFIER b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMU_NOTIFIER new file mode 100644 index 000000000000..2c93a2aaa111 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MMU_NOTIFIER @@ -0,0 +1 @@ +CONFIG_MMU_NOTIFIER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULES_TREE_LOOKUP b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULES_TREE_LOOKUP new file mode 100644 index 000000000000..d24f8dbaf270 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULES_TREE_LOOKUP @@ -0,0 +1 @@ +CONFIG_MODULES_TREE_LOOKUP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULES_USE_ELF_RELA b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULES_USE_ELF_RELA new file mode 100644 index 000000000000..8f1882ca43e8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULES_USE_ELF_RELA @@ -0,0 +1 @@ +CONFIG_MODULES_USE_ELF_RELA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_ALLOW_BTF_MISMATCH b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_ALLOW_BTF_MISMATCH new file mode 100644 index 000000000000..18722eb4707e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_ALLOW_BTF_MISMATCH @@ -0,0 +1 @@ +# CONFIG_MODULE_ALLOW_BTF_MISMATCH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_COMPRESS_GZIP b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_COMPRESS_GZIP new file mode 100644 index 000000000000..01c9c5435ecb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_COMPRESS_GZIP @@ -0,0 +1 @@ +# CONFIG_MODULE_COMPRESS_GZIP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_COMPRESS_NONE b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_COMPRESS_NONE new file mode 100644 index 000000000000..2168cd5666da --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_COMPRESS_NONE @@ -0,0 +1 @@ +CONFIG_MODULE_COMPRESS_NONE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_COMPRESS_XZ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_COMPRESS_XZ new file mode 100644 index 000000000000..d554be9bc551 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_COMPRESS_XZ @@ -0,0 +1 @@ +# CONFIG_MODULE_COMPRESS_XZ is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_COMPRESS_ZSTD b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_COMPRESS_ZSTD new file mode 100644 index 000000000000..4f5f059b8d8d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_COMPRESS_ZSTD @@ -0,0 +1 @@ +# CONFIG_MODULE_COMPRESS_ZSTD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_DEBUG new file mode 100644 index 000000000000..2cdde2aa1df3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_DEBUG @@ -0,0 +1 @@ +# CONFIG_MODULE_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_SIG_FORMAT b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_SIG_FORMAT new file mode 100644 index 000000000000..96c6d879efa0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_SIG_FORMAT @@ -0,0 +1 @@ +CONFIG_MODULE_SIG_FORMAT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_SIG_KEY_TYPE_ECDSA b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_SIG_KEY_TYPE_ECDSA new file mode 100644 index 000000000000..a436b46985a3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_SIG_KEY_TYPE_ECDSA @@ -0,0 +1 @@ +# CONFIG_MODULE_SIG_KEY_TYPE_ECDSA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_SIG_KEY_TYPE_RSA b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_SIG_KEY_TYPE_RSA new file mode 100644 index 000000000000..ba723e51c9d8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_SIG_KEY_TYPE_RSA @@ -0,0 +1 @@ +CONFIG_MODULE_SIG_KEY_TYPE_RSA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_UNLOAD_TAINT_TRACKING b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_UNLOAD_TAINT_TRACKING new file mode 100644 index 000000000000..63575ab294f0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MODULE_UNLOAD_TAINT_TRACKING @@ -0,0 +1 @@ +# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MOST b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOST new file mode 100644 index 000000000000..db207dfc5d9f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOST @@ -0,0 +1 @@ +# CONFIG_MOST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MOTORCOMM_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOTORCOMM_PHY new file mode 100644 index 000000000000..6bbd457ada24 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOTORCOMM_PHY @@ -0,0 +1 @@ +# CONFIG_MOTORCOMM_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_ELAN_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_ELAN_I2C new file mode 100644 index 000000000000..cab50daec803 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_ELAN_I2C @@ -0,0 +1 @@ +CONFIG_MOUSE_ELAN_I2C=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_ELAN_I2C_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_ELAN_I2C_I2C new file mode 100644 index 000000000000..3bfdd5743c64 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_ELAN_I2C_I2C @@ -0,0 +1 @@ +CONFIG_MOUSE_ELAN_I2C_I2C=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_GPIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_GPIO new file mode 100644 index 000000000000..9aefee3c8caf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_GPIO @@ -0,0 +1 @@ +# CONFIG_MOUSE_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_SYNAPTICS_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_SYNAPTICS_I2C new file mode 100644 index 000000000000..7368088e6b3d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_SYNAPTICS_I2C @@ -0,0 +1 @@ +CONFIG_MOUSE_SYNAPTICS_I2C=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_SYNAPTICS_USB b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_SYNAPTICS_USB new file mode 100644 index 000000000000..175daf73793e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_SYNAPTICS_USB @@ -0,0 +1 @@ +CONFIG_MOUSE_SYNAPTICS_USB=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MOXA_INTELLIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOXA_INTELLIO new file mode 100644 index 000000000000..cad28e8d19a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOXA_INTELLIO @@ -0,0 +1 @@ +# CONFIG_MOXA_INTELLIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MOXA_SMARTIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOXA_SMARTIO new file mode 100644 index 000000000000..cd82be48cbb1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOXA_SMARTIO @@ -0,0 +1 @@ +# CONFIG_MOXA_SMARTIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MPILIB b/anolis/configs/L2-OPTIONAL/default/CONFIG_MPILIB new file mode 100644 index 000000000000..a41ce5d0eaf7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MPILIB @@ -0,0 +1 @@ +CONFIG_MPILIB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MRP b/anolis/configs/L2-OPTIONAL/default/CONFIG_MRP new file mode 100644 index 000000000000..515a828b329d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MRP @@ -0,0 +1 @@ +CONFIG_MRP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MSDOS_PARTITION b/anolis/configs/L2-OPTIONAL/default/CONFIG_MSDOS_PARTITION new file mode 100644 index 000000000000..dda33929172c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MSDOS_PARTITION @@ -0,0 +1 @@ +CONFIG_MSDOS_PARTITION=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MSE102X b/anolis/configs/L2-OPTIONAL/default/CONFIG_MSE102X new file mode 100644 index 000000000000..198777735ec2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MSE102X @@ -0,0 +1 @@ +# CONFIG_MSE102X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MSPRO_BLOCK b/anolis/configs/L2-OPTIONAL/default/CONFIG_MSPRO_BLOCK new file mode 100644 index 000000000000..94c242c790c0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MSPRO_BLOCK @@ -0,0 +1 @@ +CONFIG_MSPRO_BLOCK=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MS_BLOCK b/anolis/configs/L2-OPTIONAL/default/CONFIG_MS_BLOCK new file mode 100644 index 000000000000..5628a82fc6f4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MS_BLOCK @@ -0,0 +1 @@ +# CONFIG_MS_BLOCK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD new file mode 100644 index 000000000000..5a861e745c1f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD @@ -0,0 +1 @@ +CONFIG_MTD=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_ABSENT b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_ABSENT new file mode 100644 index 000000000000..243168a3440f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_ABSENT @@ -0,0 +1 @@ +# CONFIG_MTD_ABSENT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_AR7_PARTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_AR7_PARTS new file mode 100644 index 000000000000..a9369c2a1abc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_AR7_PARTS @@ -0,0 +1 @@ +# CONFIG_MTD_AR7_PARTS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_BLKDEVS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_BLKDEVS new file mode 100644 index 000000000000..b59074744f6e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_BLKDEVS @@ -0,0 +1 @@ +CONFIG_MTD_BLKDEVS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_BLOCK b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_BLOCK new file mode 100644 index 000000000000..74f992fdaf8d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_BLOCK @@ -0,0 +1 @@ +CONFIG_MTD_BLOCK=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_BLOCK2MTD b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_BLOCK2MTD new file mode 100644 index 000000000000..4a942ccb5fb2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_BLOCK2MTD @@ -0,0 +1 @@ +# CONFIG_MTD_BLOCK2MTD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_BLOCK_RO b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_BLOCK_RO new file mode 100644 index 000000000000..d49910a2738f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_BLOCK_RO @@ -0,0 +1 @@ +# CONFIG_MTD_BLOCK_RO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_CFI_I1 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_CFI_I1 new file mode 100644 index 000000000000..8976f930f33f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_CFI_I1 @@ -0,0 +1 @@ +CONFIG_MTD_CFI_I1=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_CFI_I2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_CFI_I2 new file mode 100644 index 000000000000..24fdbdfe22fc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_CFI_I2 @@ -0,0 +1 @@ +CONFIG_MTD_CFI_I2=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_CMDLINE_PARTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_CMDLINE_PARTS new file mode 100644 index 000000000000..63ac9245bb53 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_CMDLINE_PARTS @@ -0,0 +1 @@ +# CONFIG_MTD_CMDLINE_PARTS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_COMPLEX_MAPPINGS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_COMPLEX_MAPPINGS new file mode 100644 index 000000000000..650cf2c1e99a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_COMPLEX_MAPPINGS @@ -0,0 +1 @@ +# CONFIG_MTD_COMPLEX_MAPPINGS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_DATAFLASH b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_DATAFLASH new file mode 100644 index 000000000000..c31b6910b459 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_DATAFLASH @@ -0,0 +1 @@ +# CONFIG_MTD_DATAFLASH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_DOCG3 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_DOCG3 new file mode 100644 index 000000000000..4e4b41f4ab46 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_DOCG3 @@ -0,0 +1 @@ +# CONFIG_MTD_DOCG3 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_HYPERBUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_HYPERBUS new file mode 100644 index 000000000000..e9e0a97331d9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_HYPERBUS @@ -0,0 +1 @@ +# CONFIG_MTD_HYPERBUS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_INTEL_VR_NOR b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_INTEL_VR_NOR new file mode 100644 index 000000000000..7099c7895f8f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_INTEL_VR_NOR @@ -0,0 +1 @@ +# CONFIG_MTD_INTEL_VR_NOR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_JEDECPROBE b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_JEDECPROBE new file mode 100644 index 000000000000..1af3e1262579 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_JEDECPROBE @@ -0,0 +1 @@ +# CONFIG_MTD_JEDECPROBE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_LPDDR b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_LPDDR new file mode 100644 index 000000000000..eebb90ae3858 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_LPDDR @@ -0,0 +1 @@ +# CONFIG_MTD_LPDDR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MAP_BANK_WIDTH_1 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MAP_BANK_WIDTH_1 new file mode 100644 index 000000000000..bcfdd7b7a451 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MAP_BANK_WIDTH_1 @@ -0,0 +1 @@ +CONFIG_MTD_MAP_BANK_WIDTH_1=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MAP_BANK_WIDTH_2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MAP_BANK_WIDTH_2 new file mode 100644 index 000000000000..21d0f95b273c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MAP_BANK_WIDTH_2 @@ -0,0 +1 @@ +CONFIG_MTD_MAP_BANK_WIDTH_2=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MAP_BANK_WIDTH_4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MAP_BANK_WIDTH_4 new file mode 100644 index 000000000000..e98e1687e1fc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MAP_BANK_WIDTH_4 @@ -0,0 +1 @@ +CONFIG_MTD_MAP_BANK_WIDTH_4=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MCHP23K256 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MCHP23K256 new file mode 100644 index 000000000000..ed6627e35c87 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MCHP23K256 @@ -0,0 +1 @@ +# CONFIG_MTD_MCHP23K256 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MCHP48L640 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MCHP48L640 new file mode 100644 index 000000000000..e496f9040955 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MCHP48L640 @@ -0,0 +1 @@ +# CONFIG_MTD_MCHP48L640 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MTDRAM b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MTDRAM new file mode 100644 index 000000000000..70da5d08cd45 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_MTDRAM @@ -0,0 +1 @@ +# CONFIG_MTD_MTDRAM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_NAND_ECC_MXIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_NAND_ECC_MXIC new file mode 100644 index 000000000000..4e6b42035feb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_NAND_ECC_MXIC @@ -0,0 +1 @@ +# CONFIG_MTD_NAND_ECC_MXIC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_NAND_ECC_SW_BCH b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_NAND_ECC_SW_BCH new file mode 100644 index 000000000000..68691d93530a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_NAND_ECC_SW_BCH @@ -0,0 +1 @@ +# CONFIG_MTD_NAND_ECC_SW_BCH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_NAND_ECC_SW_HAMMING b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_NAND_ECC_SW_HAMMING new file mode 100644 index 000000000000..81d0f3e93ba8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_NAND_ECC_SW_HAMMING @@ -0,0 +1 @@ +# CONFIG_MTD_NAND_ECC_SW_HAMMING is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_ONENAND b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_ONENAND new file mode 100644 index 000000000000..7f0bdb85a5f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_ONENAND @@ -0,0 +1 @@ +# CONFIG_MTD_ONENAND is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_OOPS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_OOPS new file mode 100644 index 000000000000..b6c42a73aeae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_OOPS @@ -0,0 +1 @@ +# CONFIG_MTD_OOPS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_PARTITIONED_MASTER b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_PARTITIONED_MASTER new file mode 100644 index 000000000000..118d5ecd72b6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_PARTITIONED_MASTER @@ -0,0 +1 @@ +# CONFIG_MTD_PARTITIONED_MASTER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_PHRAM b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_PHRAM new file mode 100644 index 000000000000..5ce19c74da92 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_PHRAM @@ -0,0 +1 @@ +# CONFIG_MTD_PHRAM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_PLATRAM b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_PLATRAM new file mode 100644 index 000000000000..dd004c169a49 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_PLATRAM @@ -0,0 +1 @@ +# CONFIG_MTD_PLATRAM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_PMC551 b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_PMC551 new file mode 100644 index 000000000000..6bf4ad0b570d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_PMC551 @@ -0,0 +1 @@ +# CONFIG_MTD_PMC551 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_RAM b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_RAM new file mode 100644 index 000000000000..151de3c5249c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_RAM @@ -0,0 +1 @@ +# CONFIG_MTD_RAM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_RAW_NAND b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_RAW_NAND new file mode 100644 index 000000000000..cbd84d732dec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_RAW_NAND @@ -0,0 +1 @@ +# CONFIG_MTD_RAW_NAND is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_REDBOOT_PARTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_REDBOOT_PARTS new file mode 100644 index 000000000000..8e87e0d6fd9d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_REDBOOT_PARTS @@ -0,0 +1 @@ +# CONFIG_MTD_REDBOOT_PARTS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_ROM b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_ROM new file mode 100644 index 000000000000..1efb84d75cbf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_ROM @@ -0,0 +1 @@ +# CONFIG_MTD_ROM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SLRAM b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SLRAM new file mode 100644 index 000000000000..09db4e8e749c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SLRAM @@ -0,0 +1 @@ +# CONFIG_MTD_SLRAM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SPI_NAND b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SPI_NAND new file mode 100644 index 000000000000..6df5264428b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SPI_NAND @@ -0,0 +1 @@ +# CONFIG_MTD_SPI_NAND is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SPI_NOR b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SPI_NOR new file mode 100644 index 000000000000..7d0f3cae3b91 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SPI_NOR @@ -0,0 +1 @@ +# CONFIG_MTD_SPI_NOR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SST25L b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SST25L new file mode 100644 index 000000000000..0a42bba26ffc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SST25L @@ -0,0 +1 @@ +# CONFIG_MTD_SST25L is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SWAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SWAP new file mode 100644 index 000000000000..d5892dbb7fb9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SWAP @@ -0,0 +1 @@ +# CONFIG_MTD_SWAP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_TESTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_TESTS new file mode 100644 index 000000000000..bca68acd69fa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_TESTS @@ -0,0 +1 @@ +# CONFIG_MTD_TESTS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI new file mode 100644 index 000000000000..d610c466a3fd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI @@ -0,0 +1 @@ +CONFIG_MTD_UBI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_BEB_LIMIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_BEB_LIMIT new file mode 100644 index 000000000000..69372005c4da --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_BEB_LIMIT @@ -0,0 +1 @@ +CONFIG_MTD_UBI_BEB_LIMIT=20 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_BLOCK b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_BLOCK new file mode 100644 index 000000000000..3b7526c293f9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_BLOCK @@ -0,0 +1 @@ +# CONFIG_MTD_UBI_BLOCK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_FASTMAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_FASTMAP new file mode 100644 index 000000000000..f7a8769e73f2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_FASTMAP @@ -0,0 +1 @@ +# CONFIG_MTD_UBI_FASTMAP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_GLUEBI b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_GLUEBI new file mode 100644 index 000000000000..8b3dd1030f6d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_GLUEBI @@ -0,0 +1 @@ +# CONFIG_MTD_UBI_GLUEBI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_WL_THRESHOLD b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_WL_THRESHOLD new file mode 100644 index 000000000000..184d7026df85 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_UBI_WL_THRESHOLD @@ -0,0 +1 @@ +CONFIG_MTD_UBI_WL_THRESHOLD=4096 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MUTEX_SPIN_ON_OWNER b/anolis/configs/L2-OPTIONAL/default/CONFIG_MUTEX_SPIN_ON_OWNER new file mode 100644 index 000000000000..46806d5badde --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_MUTEX_SPIN_ON_OWNER @@ -0,0 +1 @@ +CONFIG_MUTEX_SPIN_ON_OWNER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NATIONAL_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_NATIONAL_PHY new file mode 100644 index 000000000000..7bd6621d80aa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NATIONAL_PHY @@ -0,0 +1 @@ +CONFIG_NATIONAL_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NCE b/anolis/configs/L2-OPTIONAL/default/CONFIG_NCE new file mode 100644 index 000000000000..2ea655b13d63 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NCE @@ -0,0 +1 @@ +CONFIG_NCE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NCN26000_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_NCN26000_PHY new file mode 100644 index 000000000000..d16d92953dc0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NCN26000_PHY @@ -0,0 +1 @@ +# CONFIG_NCN26000_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ND_CLAIM b/anolis/configs/L2-OPTIONAL/default/CONFIG_ND_CLAIM new file mode 100644 index 000000000000..79464b155706 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ND_CLAIM @@ -0,0 +1 @@ +CONFIG_ND_CLAIM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NE6X b/anolis/configs/L2-OPTIONAL/default/CONFIG_NE6X new file mode 100644 index 000000000000..46fca559be2f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NE6X @@ -0,0 +1 @@ +CONFIG_NE6X=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NE6XVF b/anolis/configs/L2-OPTIONAL/default/CONFIG_NE6XVF new file mode 100644 index 000000000000..e2f1ffded646 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NE6XVF @@ -0,0 +1 @@ +CONFIG_NE6XVF=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_DMA_MAP_STATE b/anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_DMA_MAP_STATE new file mode 100644 index 000000000000..086b740b0c0e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_DMA_MAP_STATE @@ -0,0 +1 @@ +CONFIG_NEED_DMA_MAP_STATE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK b/anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK new file mode 100644 index 000000000000..b795ff30a196 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK @@ -0,0 +1 @@ +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK b/anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK new file mode 100644 index 000000000000..aff172e8e109 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK @@ -0,0 +1 @@ +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_SG_DMA_FLAGS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_SG_DMA_FLAGS new file mode 100644 index 000000000000..46c8245b45ba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_SG_DMA_FLAGS @@ -0,0 +1 @@ +CONFIG_NEED_SG_DMA_FLAGS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_SG_DMA_LENGTH b/anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_SG_DMA_LENGTH new file mode 100644 index 000000000000..63f7327a1fbc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NEED_SG_DMA_LENGTH @@ -0,0 +1 @@ +CONFIG_NEED_SG_DMA_LENGTH=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NETCONSOLE_EXTENDED_LOG b/anolis/configs/L2-OPTIONAL/default/CONFIG_NETCONSOLE_EXTENDED_LOG new file mode 100644 index 000000000000..0791a32f491c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NETCONSOLE_EXTENDED_LOG @@ -0,0 +1 @@ +# CONFIG_NETCONSOLE_EXTENDED_LOG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NETFILTER_BPF_LINK b/anolis/configs/L2-OPTIONAL/default/CONFIG_NETFILTER_BPF_LINK new file mode 100644 index 000000000000..551bef0dacc4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NETFILTER_BPF_LINK @@ -0,0 +1 @@ +CONFIG_NETFILTER_BPF_LINK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NETFILTER_NETLINK_HOOK b/anolis/configs/L2-OPTIONAL/default/CONFIG_NETFILTER_NETLINK_HOOK new file mode 100644 index 000000000000..9f6748bb605d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NETFILTER_NETLINK_HOOK @@ -0,0 +1 @@ +# CONFIG_NETFILTER_NETLINK_HOOK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NETFILTER_SKIP_EGRESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NETFILTER_SKIP_EGRESS new file mode 100644 index 000000000000..e1be1fcebc63 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NETFILTER_SKIP_EGRESS @@ -0,0 +1 @@ +CONFIG_NETFILTER_SKIP_EGRESS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NETFILTER_XTABLES_COMPAT b/anolis/configs/L2-OPTIONAL/default/CONFIG_NETFILTER_XTABLES_COMPAT new file mode 100644 index 000000000000..95501c067313 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NETFILTER_XTABLES_COMPAT @@ -0,0 +1 @@ +# CONFIG_NETFILTER_XTABLES_COMPAT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NETPOLL b/anolis/configs/L2-OPTIONAL/default/CONFIG_NETPOLL new file mode 100644 index 000000000000..1740767a7b96 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NETPOLL @@ -0,0 +1 @@ +CONFIG_NETPOLL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NETXEN_NIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_NETXEN_NIC new file mode 100644 index 000000000000..0ecefb663727 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NETXEN_NIC @@ -0,0 +1 @@ +CONFIG_NETXEN_NIC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_9P b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_9P new file mode 100644 index 000000000000..2fe70dcce82d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_9P @@ -0,0 +1 @@ +# CONFIG_NET_9P is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_DEVLINK b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_DEVLINK new file mode 100644 index 000000000000..a27d13004ba0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_DEVLINK @@ -0,0 +1 @@ +CONFIG_NET_DEVLINK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_DEV_REFCNT_TRACKER b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_DEV_REFCNT_TRACKER new file mode 100644 index 000000000000..e13c9133b3d9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_DEV_REFCNT_TRACKER @@ -0,0 +1 @@ +# CONFIG_NET_DEV_REFCNT_TRACKER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_DSA b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_DSA new file mode 100644 index 000000000000..afa1d60e979d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_DSA @@ -0,0 +1 @@ +# CONFIG_NET_DSA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_EGRESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_EGRESS new file mode 100644 index 000000000000..67aba9ef8f9e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_EGRESS @@ -0,0 +1 @@ +CONFIG_NET_EGRESS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_FLOW_LIMIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_FLOW_LIMIT new file mode 100644 index 000000000000..466100548f7d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_FLOW_LIMIT @@ -0,0 +1 @@ +CONFIG_NET_FLOW_LIMIT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_FOU b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_FOU new file mode 100644 index 000000000000..12f5c083633a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_FOU @@ -0,0 +1 @@ +# CONFIG_NET_FOU is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_FOU_IP_TUNNELS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_FOU_IP_TUNNELS new file mode 100644 index 000000000000..b2cb0a55c763 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_FOU_IP_TUNNELS @@ -0,0 +1 @@ +# CONFIG_NET_FOU_IP_TUNNELS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_HANDSHAKE b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_HANDSHAKE new file mode 100644 index 000000000000..1b0aced97004 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_HANDSHAKE @@ -0,0 +1 @@ +CONFIG_NET_HANDSHAKE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_IFE b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_IFE new file mode 100644 index 000000000000..3925520244b2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_IFE @@ -0,0 +1 @@ +# CONFIG_NET_IFE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_INGRESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_INGRESS new file mode 100644 index 000000000000..541df22a0079 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_INGRESS @@ -0,0 +1 @@ +CONFIG_NET_INGRESS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_IPVTI b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_IPVTI new file mode 100644 index 000000000000..e6268e22d85e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_IPVTI @@ -0,0 +1 @@ +CONFIG_NET_IPVTI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_IP_TUNNEL b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_IP_TUNNEL new file mode 100644 index 000000000000..e8d57916ba1e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_IP_TUNNEL @@ -0,0 +1 @@ +CONFIG_NET_IP_TUNNEL=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_NCSI b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_NCSI new file mode 100644 index 000000000000..53eaf483628a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_NCSI @@ -0,0 +1 @@ +# CONFIG_NET_NCSI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_NS_REFCNT_TRACKER b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_NS_REFCNT_TRACKER new file mode 100644 index 000000000000..152a143df529 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_NS_REFCNT_TRACKER @@ -0,0 +1 @@ +# CONFIG_NET_NS_REFCNT_TRACKER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_POLL_CONTROLLER b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_POLL_CONTROLLER new file mode 100644 index 000000000000..681844676a79 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_POLL_CONTROLLER @@ -0,0 +1 @@ +CONFIG_NET_POLL_CONTROLLER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_REDIRECT b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_REDIRECT new file mode 100644 index 000000000000..089417665917 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_REDIRECT @@ -0,0 +1 @@ +CONFIG_NET_REDIRECT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_RX_BUSY_POLL b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_RX_BUSY_POLL new file mode 100644 index 000000000000..7c8ec0f83b8c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_RX_BUSY_POLL @@ -0,0 +1 @@ +CONFIG_NET_RX_BUSY_POLL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SB1000 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SB1000 new file mode 100644 index 000000000000..c776a1581c04 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SB1000 @@ -0,0 +1 @@ +# CONFIG_NET_SB1000 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SCH_FIFO b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SCH_FIFO new file mode 100644 index 000000000000..315c5d560e63 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SCH_FIFO @@ -0,0 +1 @@ +CONFIG_NET_SCH_FIFO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SCH_MQPRIO_LIB b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SCH_MQPRIO_LIB new file mode 100644 index 000000000000..78e5df30d76b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SCH_MQPRIO_LIB @@ -0,0 +1 @@ +CONFIG_NET_SCH_MQPRIO_LIB=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SELFTESTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SELFTESTS new file mode 100644 index 000000000000..0018559cc430 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SELFTESTS @@ -0,0 +1 @@ +CONFIG_NET_SELFTESTS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SOCK_MSG b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SOCK_MSG new file mode 100644 index 000000000000..188ef3975f3f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SOCK_MSG @@ -0,0 +1 @@ +CONFIG_NET_SOCK_MSG=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SWITCHDEV b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SWITCHDEV new file mode 100644 index 000000000000..f4f578e755fc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_SWITCHDEV @@ -0,0 +1 @@ +CONFIG_NET_SWITCHDEV=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_UDP_TUNNEL b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_UDP_TUNNEL new file mode 100644 index 000000000000..1a84ce40f09d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_UDP_TUNNEL @@ -0,0 +1 @@ +CONFIG_NET_UDP_TUNNEL=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_3COM b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_3COM new file mode 100644 index 000000000000..ed6108c188cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_3COM @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_3COM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ADAPTEC b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ADAPTEC new file mode 100644 index 000000000000..c96958732776 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ADAPTEC @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_ADAPTEC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ADI b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ADI new file mode 100644 index 000000000000..1316ed732394 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ADI @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_ADI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_AGERE b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_AGERE new file mode 100644 index 000000000000..3647271ce911 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_AGERE @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_AGERE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ALACRITECH b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ALACRITECH new file mode 100644 index 000000000000..f1a98f8bb816 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ALACRITECH @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_ALACRITECH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ALTEON b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ALTEON new file mode 100644 index 000000000000..88beca48ee98 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ALTEON @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_ALTEON is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_AMAZON b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_AMAZON new file mode 100644 index 000000000000..71e03d0794ac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_AMAZON @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_AMAZON=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_AQUANTIA b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_AQUANTIA new file mode 100644 index 000000000000..f8ae0ca05dd8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_AQUANTIA @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_AQUANTIA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ARC b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ARC new file mode 100644 index 000000000000..98de351367a9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ARC @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_ARC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ASIX b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ASIX new file mode 100644 index 000000000000..ce6db20d2921 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ASIX @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_ASIX=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ATHEROS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ATHEROS new file mode 100644 index 000000000000..96c98bc6a5fb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ATHEROS @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_ATHEROS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_BZWX b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_BZWX new file mode 100644 index 000000000000..cec1d47ff35d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_BZWX @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_BZWX=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_CADENCE b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_CADENCE new file mode 100644 index 000000000000..e5f7e015d474 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_CADENCE @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_CADENCE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_CAVIUM b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_CAVIUM new file mode 100644 index 000000000000..5ae580523a42 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_CAVIUM @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_CAVIUM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_CHELSIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_CHELSIO new file mode 100644 index 000000000000..b039e40c2ece --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_CHELSIO @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_CHELSIO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_CORTINA b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_CORTINA new file mode 100644 index 000000000000..3e188cd812b6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_CORTINA @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_CORTINA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_DAVICOM b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_DAVICOM new file mode 100644 index 000000000000..acb536eaa595 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_DAVICOM @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_DAVICOM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_DLINK b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_DLINK new file mode 100644 index 000000000000..181e6a008c54 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_DLINK @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_DLINK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ENGLEDER b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ENGLEDER new file mode 100644 index 000000000000..e25daf9651f9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ENGLEDER @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_ENGLEDER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_EZCHIP b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_EZCHIP new file mode 100644 index 000000000000..6b7cb989253d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_EZCHIP @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_EZCHIP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_FUNGIBLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_FUNGIBLE new file mode 100644 index 000000000000..42e0b6a45b82 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_FUNGIBLE @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_FUNGIBLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_GOOGLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_GOOGLE new file mode 100644 index 000000000000..e9bdb7cf1188 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_GOOGLE @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_GOOGLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_I825XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_I825XX new file mode 100644 index 000000000000..0647bca02a1a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_I825XX @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_I825XX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_LITEX b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_LITEX new file mode 100644 index 000000000000..b1a296f33ea0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_LITEX @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_LITEX=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MARVELL b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MARVELL new file mode 100644 index 000000000000..ebe703be2253 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MARVELL @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_MARVELL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MICREL b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MICREL new file mode 100644 index 000000000000..d359479e318f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MICREL @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_MICREL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MICROCHIP b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MICROCHIP new file mode 100644 index 000000000000..8a7b69b77c49 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MICROCHIP @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_MICROCHIP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MICROSEMI b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MICROSEMI new file mode 100644 index 000000000000..e39610d5165e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MICROSEMI @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_MICROSEMI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MICROSOFT b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MICROSOFT new file mode 100644 index 000000000000..f873fb19897f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MICROSOFT @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_MICROSOFT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MYRI b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MYRI new file mode 100644 index 000000000000..9a7d24c542ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MYRI @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_MYRI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NATSEMI b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NATSEMI new file mode 100644 index 000000000000..f2bf134ab6c4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NATSEMI @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_NATSEMI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NETERION b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NETERION new file mode 100644 index 000000000000..12910e830330 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NETERION @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_NETERION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NETRONOME b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NETRONOME new file mode 100644 index 000000000000..fc775de95e41 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NETRONOME @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_NETRONOME=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NI b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NI new file mode 100644 index 000000000000..8504bfca5d82 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NI @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_NI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NVIDIA b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NVIDIA new file mode 100644 index 000000000000..27ee4d58cc11 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_NVIDIA @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_NVIDIA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_OKI b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_OKI new file mode 100644 index 000000000000..e5b745dc609a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_OKI @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_OKI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_PACKET_ENGINES b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_PACKET_ENGINES new file mode 100644 index 000000000000..7a9eec2cfd29 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_PACKET_ENGINES @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_PACKET_ENGINES is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_PENSANDO b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_PENSANDO new file mode 100644 index 000000000000..aba7e5839300 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_PENSANDO @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_PENSANDO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_QLOGIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_QLOGIC new file mode 100644 index 000000000000..868b452abfbc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_QLOGIC @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_QLOGIC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_RDC b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_RDC new file mode 100644 index 000000000000..4d84f77dc971 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_RDC @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_RDC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_REALTEK b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_REALTEK new file mode 100644 index 000000000000..1e65bbda6bfb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_REALTEK @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_REALTEK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_RENESAS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_RENESAS new file mode 100644 index 000000000000..c190891099eb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_RENESAS @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_RENESAS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ROCKER b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ROCKER new file mode 100644 index 000000000000..c946a8575638 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_ROCKER @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_ROCKER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SAMSUNG b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SAMSUNG new file mode 100644 index 000000000000..2542b3e88280 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SAMSUNG @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_SAMSUNG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SEEQ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SEEQ new file mode 100644 index 000000000000..6fe8245eb130 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SEEQ @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_SEEQ is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SILAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SILAN new file mode 100644 index 000000000000..07a129493b77 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SILAN @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_SILAN is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SIS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SIS new file mode 100644 index 000000000000..5ff2e309b3f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SIS @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_SIS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SMSC b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SMSC new file mode 100644 index 000000000000..4e3e7aec851b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SMSC @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_SMSC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SOCIONEXT b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SOCIONEXT new file mode 100644 index 000000000000..7e4d43ba55e0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SOCIONEXT @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_SOCIONEXT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_STMICRO b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_STMICRO new file mode 100644 index 000000000000..040f835c6095 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_STMICRO @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_STMICRO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SUN b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SUN new file mode 100644 index 000000000000..a4ce7787579a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SUN @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_SUN is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SYNOPSYS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SYNOPSYS new file mode 100644 index 000000000000..979e832d6943 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_SYNOPSYS @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_SYNOPSYS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_TEHUTI b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_TEHUTI new file mode 100644 index 000000000000..7c0c6ee39d1a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_TEHUTI @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_TEHUTI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_TI b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_TI new file mode 100644 index 000000000000..a9395e8cca7d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_TI @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_TI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_VERTEXCOM b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_VERTEXCOM new file mode 100644 index 000000000000..1005473aa017 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_VERTEXCOM @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_VERTEXCOM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_VIA b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_VIA new file mode 100644 index 000000000000..ddca33bb41ea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_VIA @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_VIA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_WIZNET b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_WIZNET new file mode 100644 index 000000000000..08efd75ae50a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_WIZNET @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_WIZNET is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_XILINX b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_XILINX new file mode 100644 index 000000000000..46ccaba73f70 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_XILINX @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_XILINX=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NEW_LEDS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NEW_LEDS new file mode 100644 index 000000000000..a5eb3c0a2d62 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NEW_LEDS @@ -0,0 +1 @@ +CONFIG_NEW_LEDS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NFC b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFC new file mode 100644 index 000000000000..73cd9395ef11 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFC @@ -0,0 +1 @@ +# CONFIG_NFC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NFP b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFP new file mode 100644 index 000000000000..14f22b12c949 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFP @@ -0,0 +1 @@ +CONFIG_NFP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NFP_APP_ABM_NIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFP_APP_ABM_NIC new file mode 100644 index 000000000000..cbb99c4e940f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFP_APP_ABM_NIC @@ -0,0 +1 @@ +CONFIG_NFP_APP_ABM_NIC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NFP_APP_FLOWER b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFP_APP_FLOWER new file mode 100644 index 000000000000..d9ff8a178141 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFP_APP_FLOWER @@ -0,0 +1 @@ +CONFIG_NFP_APP_FLOWER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NFP_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFP_DEBUG new file mode 100644 index 000000000000..b7f75b3f3e46 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFP_DEBUG @@ -0,0 +1 @@ +# CONFIG_NFP_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NFP_NET_IPSEC b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFP_NET_IPSEC new file mode 100644 index 000000000000..680092ad5e62 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFP_NET_IPSEC @@ -0,0 +1 @@ +CONFIG_NFP_NET_IPSEC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NFSD_V2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFSD_V2 new file mode 100644 index 000000000000..bcba20a2d853 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFSD_V2 @@ -0,0 +1 @@ +# CONFIG_NFSD_V2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_DEBUG new file mode 100644 index 000000000000..fd9df13faca8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_DEBUG @@ -0,0 +1 @@ +CONFIG_NFS_DEBUG=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_USE_KERNEL_DNS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_USE_KERNEL_DNS new file mode 100644 index 000000000000..159758ff0791 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_USE_KERNEL_DNS @@ -0,0 +1 @@ +CONFIG_NFS_USE_KERNEL_DNS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN new file mode 100644 index 000000000000..1a02c09043b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN @@ -0,0 +1 @@ +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_V4_2_SSC_HELPER b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_V4_2_SSC_HELPER new file mode 100644 index 000000000000..1e4a2c7471a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_V4_2_SSC_HELPER @@ -0,0 +1 @@ +CONFIG_NFS_V4_2_SSC_HELPER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_V4_SECURITY_LABEL b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_V4_SECURITY_LABEL new file mode 100644 index 000000000000..aa664ac8be56 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFS_V4_SECURITY_LABEL @@ -0,0 +1 @@ +CONFIG_NFS_V4_SECURITY_LABEL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NFTL b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFTL new file mode 100644 index 000000000000..c834b8376648 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFTL @@ -0,0 +1 @@ +# CONFIG_NFTL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NFT_REJECT_IPV4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFT_REJECT_IPV4 new file mode 100644 index 000000000000..dd70ddc432d2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFT_REJECT_IPV4 @@ -0,0 +1 @@ +CONFIG_NFT_REJECT_IPV4=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NFT_REJECT_NETDEV b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFT_REJECT_NETDEV new file mode 100644 index 000000000000..2b94f900d859 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFT_REJECT_NETDEV @@ -0,0 +1 @@ +# CONFIG_NFT_REJECT_NETDEV is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_CONNTRACK_OVS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_CONNTRACK_OVS new file mode 100644 index 000000000000..9ef5988c4b82 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_CONNTRACK_OVS @@ -0,0 +1 @@ +CONFIG_NF_CONNTRACK_OVS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_DEFRAG_IPV4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_DEFRAG_IPV4 new file mode 100644 index 000000000000..cfe1cb42cac6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_DEFRAG_IPV4 @@ -0,0 +1 @@ +CONFIG_NF_DEFRAG_IPV4=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_FLOW_TABLE_PROCFS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_FLOW_TABLE_PROCFS new file mode 100644 index 000000000000..16da187039d0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_FLOW_TABLE_PROCFS @@ -0,0 +1 @@ +# CONFIG_NF_FLOW_TABLE_PROCFS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_LOG_SYSLOG b/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_LOG_SYSLOG new file mode 100644 index 000000000000..920e389bf1a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_LOG_SYSLOG @@ -0,0 +1 @@ +CONFIG_NF_LOG_SYSLOG=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_NAT_OVS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_NAT_OVS new file mode 100644 index 000000000000..dc07edb9e1a2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_NAT_OVS @@ -0,0 +1 @@ +CONFIG_NF_NAT_OVS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_TPROXY_IPV4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_TPROXY_IPV4 new file mode 100644 index 000000000000..995fadff97fd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NF_TPROXY_IPV4 @@ -0,0 +1 @@ +CONFIG_NF_TPROXY_IPV4=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NILFS2_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NILFS2_FS new file mode 100644 index 000000000000..23c53bef0372 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NILFS2_FS @@ -0,0 +1 @@ +# CONFIG_NILFS2_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NL80211_TESTMODE b/anolis/configs/L2-OPTIONAL/default/CONFIG_NL80211_TESTMODE new file mode 100644 index 000000000000..fc4435e08a57 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NL80211_TESTMODE @@ -0,0 +1 @@ +# CONFIG_NL80211_TESTMODE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLATTR b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLATTR new file mode 100644 index 000000000000..8aee114c268c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLATTR @@ -0,0 +1 @@ +CONFIG_NLATTR=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_1250 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_1250 new file mode 100644 index 000000000000..2ab2aa02648d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_1250 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_1250=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_1251 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_1251 new file mode 100644 index 000000000000..2c23320ef53d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_1251 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_1251=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_437 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_437 new file mode 100644 index 000000000000..28e6c82d3ef5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_437 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_437=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_737 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_737 new file mode 100644 index 000000000000..7fbb36904302 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_737 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_737=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_775 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_775 new file mode 100644 index 000000000000..a6dbd8ecff53 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_775 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_775=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_850 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_850 new file mode 100644 index 000000000000..37a7de91b803 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_850 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_850=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_852 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_852 new file mode 100644 index 000000000000..6154f8d3d76c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_852 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_852=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_855 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_855 new file mode 100644 index 000000000000..347b0df13fc3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_855 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_855=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_857 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_857 new file mode 100644 index 000000000000..f44591925e08 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_857 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_857=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_860 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_860 new file mode 100644 index 000000000000..5e8d61ab1b30 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_860 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_860=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_861 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_861 new file mode 100644 index 000000000000..39d141b6b1d5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_861 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_861=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_862 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_862 new file mode 100644 index 000000000000..cbdc58462beb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_862 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_862=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_863 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_863 new file mode 100644 index 000000000000..f0274505ce06 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_863 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_863=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_864 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_864 new file mode 100644 index 000000000000..de63e9d614a7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_864 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_864=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_865 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_865 new file mode 100644 index 000000000000..ebbe1cdbf095 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_865 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_865=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_866 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_866 new file mode 100644 index 000000000000..694549836f6d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_866 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_866=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_869 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_869 new file mode 100644 index 000000000000..3b4b8bf73e26 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_869 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_869=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_874 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_874 new file mode 100644 index 000000000000..eead1363babb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_874 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_874=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_932 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_932 new file mode 100644 index 000000000000..f423190f8232 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_932 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_932=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_949 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_949 new file mode 100644 index 000000000000..f67e8c400ed6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_CODEPAGE_949 @@ -0,0 +1 @@ +CONFIG_NLS_CODEPAGE_949=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_1 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_1 new file mode 100644 index 000000000000..883240974b12 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_1 @@ -0,0 +1 @@ +CONFIG_NLS_ISO8859_1=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_13 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_13 new file mode 100644 index 000000000000..78ad020a6fa8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_13 @@ -0,0 +1 @@ +CONFIG_NLS_ISO8859_13=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_14 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_14 new file mode 100644 index 000000000000..94002d530925 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_14 @@ -0,0 +1 @@ +CONFIG_NLS_ISO8859_14=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_15 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_15 new file mode 100644 index 000000000000..19eb61c9c4ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_15 @@ -0,0 +1 @@ +CONFIG_NLS_ISO8859_15=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_2 new file mode 100644 index 000000000000..13f70956ddd1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_2 @@ -0,0 +1 @@ +CONFIG_NLS_ISO8859_2=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_3 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_3 new file mode 100644 index 000000000000..73b4a2e7d5d4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_3 @@ -0,0 +1 @@ +CONFIG_NLS_ISO8859_3=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_4 new file mode 100644 index 000000000000..bd726a9a473a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_4 @@ -0,0 +1 @@ +CONFIG_NLS_ISO8859_4=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_5 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_5 new file mode 100644 index 000000000000..5f1dc8d93321 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_5 @@ -0,0 +1 @@ +CONFIG_NLS_ISO8859_5=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_6 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_6 new file mode 100644 index 000000000000..f2a298248242 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_6 @@ -0,0 +1 @@ +CONFIG_NLS_ISO8859_6=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_7 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_7 new file mode 100644 index 000000000000..27e788c91019 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_7 @@ -0,0 +1 @@ +CONFIG_NLS_ISO8859_7=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_8 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_8 new file mode 100644 index 000000000000..23288bc773fa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_8 @@ -0,0 +1 @@ +CONFIG_NLS_ISO8859_8=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_9 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_9 new file mode 100644 index 000000000000..155b6f80c6c4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_ISO8859_9 @@ -0,0 +1 @@ +CONFIG_NLS_ISO8859_9=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_KOI8_R b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_KOI8_R new file mode 100644 index 000000000000..55956c2df327 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_KOI8_R @@ -0,0 +1 @@ +CONFIG_NLS_KOI8_R=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_KOI8_U b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_KOI8_U new file mode 100644 index 000000000000..81ce86b0f724 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_KOI8_U @@ -0,0 +1 @@ +CONFIG_NLS_KOI8_U=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_CELTIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_CELTIC new file mode 100644 index 000000000000..2eeb972185af --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_CELTIC @@ -0,0 +1 @@ +CONFIG_NLS_MAC_CELTIC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_CENTEURO b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_CENTEURO new file mode 100644 index 000000000000..8f378fa0b1f6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_CENTEURO @@ -0,0 +1 @@ +CONFIG_NLS_MAC_CENTEURO=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_CROATIAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_CROATIAN new file mode 100644 index 000000000000..450569f5a628 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_CROATIAN @@ -0,0 +1 @@ +CONFIG_NLS_MAC_CROATIAN=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_CYRILLIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_CYRILLIC new file mode 100644 index 000000000000..027531c44904 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_CYRILLIC @@ -0,0 +1 @@ +CONFIG_NLS_MAC_CYRILLIC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_GAELIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_GAELIC new file mode 100644 index 000000000000..d3617cd61298 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_GAELIC @@ -0,0 +1 @@ +CONFIG_NLS_MAC_GAELIC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_GREEK b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_GREEK new file mode 100644 index 000000000000..3d52cb40d1b0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_GREEK @@ -0,0 +1 @@ +CONFIG_NLS_MAC_GREEK=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_ICELAND b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_ICELAND new file mode 100644 index 000000000000..edb7121c9e65 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_ICELAND @@ -0,0 +1 @@ +CONFIG_NLS_MAC_ICELAND=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_INUIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_INUIT new file mode 100644 index 000000000000..877602ad2dba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_INUIT @@ -0,0 +1 @@ +CONFIG_NLS_MAC_INUIT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_ROMAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_ROMAN new file mode 100644 index 000000000000..256f7e8a83da --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_ROMAN @@ -0,0 +1 @@ +CONFIG_NLS_MAC_ROMAN=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_ROMANIAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_ROMANIAN new file mode 100644 index 000000000000..1b9b8506afd6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_ROMANIAN @@ -0,0 +1 @@ +CONFIG_NLS_MAC_ROMANIAN=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_TURKISH b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_TURKISH new file mode 100644 index 000000000000..f2b486a5b53d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_MAC_TURKISH @@ -0,0 +1 @@ +CONFIG_NLS_MAC_TURKISH=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_UCS2_UTILS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_UCS2_UTILS new file mode 100644 index 000000000000..632113ac8592 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NLS_UCS2_UTILS @@ -0,0 +1 @@ +CONFIG_NLS_UCS2_UTILS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NOP_TRACER b/anolis/configs/L2-OPTIONAL/default/CONFIG_NOP_TRACER new file mode 100644 index 000000000000..bdae48953dc3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NOP_TRACER @@ -0,0 +1 @@ +CONFIG_NOP_TRACER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NOP_USB_XCEIV b/anolis/configs/L2-OPTIONAL/default/CONFIG_NOP_USB_XCEIV new file mode 100644 index 000000000000..647c72934953 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NOP_USB_XCEIV @@ -0,0 +1 @@ +# CONFIG_NOP_USB_XCEIV is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NOTIFIER_ERROR_INJECTION b/anolis/configs/L2-OPTIONAL/default/CONFIG_NOTIFIER_ERROR_INJECTION new file mode 100644 index 000000000000..44e141c457ec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NOTIFIER_ERROR_INJECTION @@ -0,0 +1 @@ +# CONFIG_NOTIFIER_ERROR_INJECTION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NOUVEAU_DEBUG_MMU b/anolis/configs/L2-OPTIONAL/default/CONFIG_NOUVEAU_DEBUG_MMU new file mode 100644 index 000000000000..5d30f458862f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NOUVEAU_DEBUG_MMU @@ -0,0 +1 @@ +# CONFIG_NOUVEAU_DEBUG_MMU is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NOUVEAU_DEBUG_PUSH b/anolis/configs/L2-OPTIONAL/default/CONFIG_NOUVEAU_DEBUG_PUSH new file mode 100644 index 000000000000..5614c2c49809 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NOUVEAU_DEBUG_PUSH @@ -0,0 +1 @@ +# CONFIG_NOUVEAU_DEBUG_PUSH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NO_HZ_COMMON b/anolis/configs/L2-OPTIONAL/default/CONFIG_NO_HZ_COMMON new file mode 100644 index 000000000000..8ad203b24298 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NO_HZ_COMMON @@ -0,0 +1 @@ +CONFIG_NO_HZ_COMMON=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_EPF b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_EPF new file mode 100644 index 000000000000..b9df2cd6a6a7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_EPF @@ -0,0 +1 @@ +# CONFIG_NTB_EPF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_IDT b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_IDT new file mode 100644 index 000000000000..1a39fe8de83d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_IDT @@ -0,0 +1 @@ +# CONFIG_NTB_IDT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_MSI b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_MSI new file mode 100644 index 000000000000..62a7410ff3bf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_MSI @@ -0,0 +1 @@ +# CONFIG_NTB_MSI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_PERF b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_PERF new file mode 100644 index 000000000000..8d7ecb6852eb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_PERF @@ -0,0 +1 @@ +# CONFIG_NTB_PERF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_PINGPONG b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_PINGPONG new file mode 100644 index 000000000000..c6b36e57d423 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_PINGPONG @@ -0,0 +1 @@ +# CONFIG_NTB_PINGPONG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_SWITCHTEC b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_SWITCHTEC new file mode 100644 index 000000000000..b5760cb7a158 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_SWITCHTEC @@ -0,0 +1 @@ +# CONFIG_NTB_SWITCHTEC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_TOOL b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_TOOL new file mode 100644 index 000000000000..93ba98f5d0c7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_TOOL @@ -0,0 +1 @@ +# CONFIG_NTB_TOOL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_TRANSPORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_TRANSPORT new file mode 100644 index 000000000000..167e3e650217 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTB_TRANSPORT @@ -0,0 +1 @@ +# CONFIG_NTB_TRANSPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NTFS3_64BIT_CLUSTER b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTFS3_64BIT_CLUSTER new file mode 100644 index 000000000000..65a74b806f0b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTFS3_64BIT_CLUSTER @@ -0,0 +1 @@ +# CONFIG_NTFS3_64BIT_CLUSTER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NTFS3_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTFS3_FS new file mode 100644 index 000000000000..280b2a549ce8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTFS3_FS @@ -0,0 +1 @@ +CONFIG_NTFS3_FS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NTFS3_FS_POSIX_ACL b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTFS3_FS_POSIX_ACL new file mode 100644 index 000000000000..08340880dd06 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTFS3_FS_POSIX_ACL @@ -0,0 +1 @@ +# CONFIG_NTFS3_FS_POSIX_ACL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NTFS3_LZX_XPRESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTFS3_LZX_XPRESS new file mode 100644 index 000000000000..6379df2d3578 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NTFS3_LZX_XPRESS @@ -0,0 +1 @@ +# CONFIG_NTFS3_LZX_XPRESS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NULL_TTY b/anolis/configs/L2-OPTIONAL/default/CONFIG_NULL_TTY new file mode 100644 index 000000000000..d09e7fbb8214 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NULL_TTY @@ -0,0 +1 @@ +# CONFIG_NULL_TTY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NUMA_KEEP_MEMINFO b/anolis/configs/L2-OPTIONAL/default/CONFIG_NUMA_KEEP_MEMINFO new file mode 100644 index 000000000000..0b9ad21db132 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NUMA_KEEP_MEMINFO @@ -0,0 +1 @@ +CONFIG_NUMA_KEEP_MEMINFO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVDIMM_DAX b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVDIMM_DAX new file mode 100644 index 000000000000..947636ca8c1d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVDIMM_DAX @@ -0,0 +1 @@ +CONFIG_NVDIMM_DAX=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVDIMM_PFN b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVDIMM_PFN new file mode 100644 index 000000000000..9db164ae565a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVDIMM_PFN @@ -0,0 +1 @@ +CONFIG_NVDIMM_PFN=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVDIMM_SECURITY_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVDIMM_SECURITY_TEST new file mode 100644 index 000000000000..903b6332e9b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVDIMM_SECURITY_TEST @@ -0,0 +1 @@ +# CONFIG_NVDIMM_SECURITY_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVMEM_LAYOUT_ONIE_TLV b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVMEM_LAYOUT_ONIE_TLV new file mode 100644 index 000000000000..bc4dfbc18f08 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVMEM_LAYOUT_ONIE_TLV @@ -0,0 +1 @@ +# CONFIG_NVMEM_LAYOUT_ONIE_TLV is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVMEM_LAYOUT_SL28_VPD b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVMEM_LAYOUT_SL28_VPD new file mode 100644 index 000000000000..f4248d286bf3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVMEM_LAYOUT_SL28_VPD @@ -0,0 +1 @@ +# CONFIG_NVMEM_LAYOUT_SL28_VPD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVMEM_RMEM b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVMEM_RMEM new file mode 100644 index 000000000000..d0ea9507bad8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVMEM_RMEM @@ -0,0 +1 @@ +# CONFIG_NVMEM_RMEM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_AUTH b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_AUTH new file mode 100644 index 000000000000..a0a7178528c9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_AUTH @@ -0,0 +1 @@ +# CONFIG_NVME_AUTH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_HWMON b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_HWMON new file mode 100644 index 000000000000..6e5917e9a4b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_HWMON @@ -0,0 +1 @@ +# CONFIG_NVME_HWMON is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_MULTIPATH b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_MULTIPATH new file mode 100644 index 000000000000..7eb14dd07a01 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_MULTIPATH @@ -0,0 +1 @@ +CONFIG_NVME_MULTIPATH=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET new file mode 100644 index 000000000000..03d6079ba231 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET @@ -0,0 +1 @@ +CONFIG_NVME_TARGET=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_AUTH b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_AUTH new file mode 100644 index 000000000000..d2d0c7cd7c68 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_AUTH @@ -0,0 +1 @@ +# CONFIG_NVME_TARGET_AUTH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_FC b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_FC new file mode 100644 index 000000000000..5d264040c7c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_FC @@ -0,0 +1 @@ +CONFIG_NVME_TARGET_FC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_FCLOOP b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_FCLOOP new file mode 100644 index 000000000000..7334e95ad60b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_FCLOOP @@ -0,0 +1 @@ +CONFIG_NVME_TARGET_FCLOOP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_LOOP b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_LOOP new file mode 100644 index 000000000000..31f358bca104 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_LOOP @@ -0,0 +1 @@ +CONFIG_NVME_TARGET_LOOP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_PASSTHRU b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_PASSTHRU new file mode 100644 index 000000000000..12bf02e23a41 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_PASSTHRU @@ -0,0 +1 @@ +# CONFIG_NVME_TARGET_PASSTHRU is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_RDMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_RDMA new file mode 100644 index 000000000000..0f1470c53a9d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_RDMA @@ -0,0 +1 @@ +CONFIG_NVME_TARGET_RDMA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_TCP b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_TCP new file mode 100644 index 000000000000..46078d0a7f49 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_TARGET_TCP @@ -0,0 +1 @@ +CONFIG_NVME_TARGET_TCP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_VERBOSE_ERRORS b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_VERBOSE_ERRORS new file mode 100644 index 000000000000..76bd67d16c39 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NVME_VERBOSE_ERRORS @@ -0,0 +1 @@ +# CONFIG_NVME_VERBOSE_ERRORS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NXP_C45_TJA11XX_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_NXP_C45_TJA11XX_PHY new file mode 100644 index 000000000000..58d46266f892 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NXP_C45_TJA11XX_PHY @@ -0,0 +1 @@ +# CONFIG_NXP_C45_TJA11XX_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NXP_CBTX_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_NXP_CBTX_PHY new file mode 100644 index 000000000000..7f675ff13a34 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NXP_CBTX_PHY @@ -0,0 +1 @@ +# CONFIG_NXP_CBTX_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NXP_TJA11XX_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_NXP_TJA11XX_PHY new file mode 100644 index 000000000000..37d64a89189c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NXP_TJA11XX_PHY @@ -0,0 +1 @@ +# CONFIG_NXP_TJA11XX_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_N_GSM b/anolis/configs/L2-OPTIONAL/default/CONFIG_N_GSM new file mode 100644 index 000000000000..333f7243e3de --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_N_GSM @@ -0,0 +1 @@ +CONFIG_N_GSM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_N_HDLC b/anolis/configs/L2-OPTIONAL/default/CONFIG_N_HDLC new file mode 100644 index 000000000000..cac70b3e4b1b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_N_HDLC @@ -0,0 +1 @@ +CONFIG_N_HDLC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_OBJAGG b/anolis/configs/L2-OPTIONAL/default/CONFIG_OBJAGG new file mode 100644 index 000000000000..d44f2ff1c653 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_OBJAGG @@ -0,0 +1 @@ +CONFIG_OBJAGG=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_OCFS2_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_OCFS2_FS new file mode 100644 index 000000000000..99fe2608d724 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_OCFS2_FS @@ -0,0 +1 @@ +# CONFIG_OCFS2_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_OID_REGISTRY b/anolis/configs/L2-OPTIONAL/default/CONFIG_OID_REGISTRY new file mode 100644 index 000000000000..4a755a3f3394 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_OID_REGISTRY @@ -0,0 +1 @@ +CONFIG_OID_REGISTRY=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_OLD_SIGSUSPEND3 b/anolis/configs/L2-OPTIONAL/default/CONFIG_OLD_SIGSUSPEND3 new file mode 100644 index 000000000000..7432702d2858 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_OLD_SIGSUSPEND3 @@ -0,0 +1 @@ +CONFIG_OLD_SIGSUSPEND3=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_OMFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_OMFS_FS new file mode 100644 index 000000000000..0aecb6226b9a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_OMFS_FS @@ -0,0 +1 @@ +# CONFIG_OMFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ORANGEFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_ORANGEFS_FS new file mode 100644 index 000000000000..f7edc3b2af3a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ORANGEFS_FS @@ -0,0 +1 @@ +# CONFIG_ORANGEFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_OVERLAY_FS_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_OVERLAY_FS_DEBUG new file mode 100644 index 000000000000..c08ef0299332 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_OVERLAY_FS_DEBUG @@ -0,0 +1 @@ +# CONFIG_OVERLAY_FS_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PACKING b/anolis/configs/L2-OPTIONAL/default/CONFIG_PACKING new file mode 100644 index 000000000000..6af3d64ddbaa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PACKING @@ -0,0 +1 @@ +# CONFIG_PACKING is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PADATA b/anolis/configs/L2-OPTIONAL/default/CONFIG_PADATA new file mode 100644 index 000000000000..d044574ca11a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PADATA @@ -0,0 +1 @@ +CONFIG_PADATA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_COUNTER b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_COUNTER new file mode 100644 index 000000000000..45bb7b51460e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_COUNTER @@ -0,0 +1 @@ +CONFIG_PAGE_COUNTER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_POOL b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_POOL new file mode 100644 index 000000000000..0eb23c7cba9c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_POOL @@ -0,0 +1 @@ +CONFIG_PAGE_POOL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_POOL_STATS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_POOL_STATS new file mode 100644 index 000000000000..8b2d7f8c1355 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_POOL_STATS @@ -0,0 +1 @@ +# CONFIG_PAGE_POOL_STATS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_SIZE_LESS_THAN_256KB b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_SIZE_LESS_THAN_256KB new file mode 100644 index 000000000000..12c87bd99979 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_SIZE_LESS_THAN_256KB @@ -0,0 +1 @@ +CONFIG_PAGE_SIZE_LESS_THAN_256KB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_SIZE_LESS_THAN_64KB b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_SIZE_LESS_THAN_64KB new file mode 100644 index 000000000000..22ea98060768 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_SIZE_LESS_THAN_64KB @@ -0,0 +1 @@ +CONFIG_PAGE_SIZE_LESS_THAN_64KB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_TABLE_CHECK b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_TABLE_CHECK new file mode 100644 index 000000000000..2949da69ef21 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAGE_TABLE_CHECK @@ -0,0 +1 @@ +# CONFIG_PAGE_TABLE_CHECK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PAHOLE_HAS_LANG_EXCLUDE b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAHOLE_HAS_LANG_EXCLUDE new file mode 100644 index 000000000000..2cb7741fd132 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAHOLE_HAS_LANG_EXCLUDE @@ -0,0 +1 @@ +CONFIG_PAHOLE_HAS_LANG_EXCLUDE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PAHOLE_HAS_SPLIT_BTF b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAHOLE_HAS_SPLIT_BTF new file mode 100644 index 000000000000..d2ef15e398d3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAHOLE_HAS_SPLIT_BTF @@ -0,0 +1 @@ +CONFIG_PAHOLE_HAS_SPLIT_BTF=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PAHOLE_VERSION b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAHOLE_VERSION new file mode 100644 index 000000000000..d86b86b274da --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAHOLE_VERSION @@ -0,0 +1 @@ +CONFIG_PAHOLE_VERSION=124 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PANIC_ON_OOPS_VALUE b/anolis/configs/L2-OPTIONAL/default/CONFIG_PANIC_ON_OOPS_VALUE new file mode 100644 index 000000000000..165233f74875 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PANIC_ON_OOPS_VALUE @@ -0,0 +1 @@ +CONFIG_PANIC_ON_OOPS_VALUE=1 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PANTHERLORD_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_PANTHERLORD_FF new file mode 100644 index 000000000000..7b66d03b9ffd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PANTHERLORD_FF @@ -0,0 +1 @@ +# CONFIG_PANTHERLORD_FF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PARMAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_PARMAN new file mode 100644 index 000000000000..1e5d4dd9125e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PARMAN @@ -0,0 +1 @@ +CONFIG_PARMAN=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ACPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ACPI new file mode 100644 index 000000000000..e40bacb6f515 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ACPI @@ -0,0 +1 @@ +# CONFIG_PATA_ACPI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ALI b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ALI new file mode 100644 index 000000000000..15c8144544cf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ALI @@ -0,0 +1 @@ +# CONFIG_PATA_ALI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_AMD b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_AMD new file mode 100644 index 000000000000..33a58facc902 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_AMD @@ -0,0 +1 @@ +# CONFIG_PATA_AMD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ARTOP b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ARTOP new file mode 100644 index 000000000000..a4e7eb3b0085 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ARTOP @@ -0,0 +1 @@ +# CONFIG_PATA_ARTOP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ATIIXP b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ATIIXP new file mode 100644 index 000000000000..2730e0e76380 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ATIIXP @@ -0,0 +1 @@ +# CONFIG_PATA_ATIIXP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ATP867X b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ATP867X new file mode 100644 index 000000000000..5e66402b82d9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ATP867X @@ -0,0 +1 @@ +# CONFIG_PATA_ATP867X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_CMD640_PCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_CMD640_PCI new file mode 100644 index 000000000000..9244705d255c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_CMD640_PCI @@ -0,0 +1 @@ +# CONFIG_PATA_CMD640_PCI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_CMD64X b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_CMD64X new file mode 100644 index 000000000000..eb4773083247 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_CMD64X @@ -0,0 +1 @@ +# CONFIG_PATA_CMD64X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_CYPRESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_CYPRESS new file mode 100644 index 000000000000..901fb21a9d58 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_CYPRESS @@ -0,0 +1 @@ +# CONFIG_PATA_CYPRESS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_EFAR b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_EFAR new file mode 100644 index 000000000000..d1957bf57c19 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_EFAR @@ -0,0 +1 @@ +# CONFIG_PATA_EFAR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT366 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT366 new file mode 100644 index 000000000000..b67ac339dfd8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT366 @@ -0,0 +1 @@ +# CONFIG_PATA_HPT366 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT37X b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT37X new file mode 100644 index 000000000000..990af0509a62 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT37X @@ -0,0 +1 @@ +# CONFIG_PATA_HPT37X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X2N b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X2N new file mode 100644 index 000000000000..471fcff45292 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X2N @@ -0,0 +1 @@ +# CONFIG_PATA_HPT3X2N is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X3 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X3 new file mode 100644 index 000000000000..9fc36a00b62b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X3 @@ -0,0 +1 @@ +# CONFIG_PATA_HPT3X3 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_IT8213 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_IT8213 new file mode 100644 index 000000000000..47fbeb7f5a46 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_IT8213 @@ -0,0 +1 @@ +# CONFIG_PATA_IT8213 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_IT821X b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_IT821X new file mode 100644 index 000000000000..c29d7f7cb42c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_IT821X @@ -0,0 +1 @@ +# CONFIG_PATA_IT821X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_JMICRON b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_JMICRON new file mode 100644 index 000000000000..55695c65f458 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_JMICRON @@ -0,0 +1 @@ +# CONFIG_PATA_JMICRON is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_LEGACY b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_LEGACY new file mode 100644 index 000000000000..c8c3b44f2671 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_LEGACY @@ -0,0 +1 @@ +# CONFIG_PATA_LEGACY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_MARVELL b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_MARVELL new file mode 100644 index 000000000000..83041a0030e0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_MARVELL @@ -0,0 +1 @@ +# CONFIG_PATA_MARVELL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_MPIIX b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_MPIIX new file mode 100644 index 000000000000..241149b47c43 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_MPIIX @@ -0,0 +1 @@ +# CONFIG_PATA_MPIIX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NETCELL b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NETCELL new file mode 100644 index 000000000000..e598ae30341e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NETCELL @@ -0,0 +1 @@ +# CONFIG_PATA_NETCELL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NINJA32 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NINJA32 new file mode 100644 index 000000000000..9deb5dff7190 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NINJA32 @@ -0,0 +1 @@ +# CONFIG_PATA_NINJA32 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NS87410 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NS87410 new file mode 100644 index 000000000000..32f9c72e036e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NS87410 @@ -0,0 +1 @@ +# CONFIG_PATA_NS87410 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NS87415 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NS87415 new file mode 100644 index 000000000000..d12f9010de64 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NS87415 @@ -0,0 +1 @@ +# CONFIG_PATA_NS87415 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_OLDPIIX b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_OLDPIIX new file mode 100644 index 000000000000..b35bb81d0ab3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_OLDPIIX @@ -0,0 +1 @@ +# CONFIG_PATA_OLDPIIX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_OPTI b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_OPTI new file mode 100644 index 000000000000..f95b6c966e0b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_OPTI @@ -0,0 +1 @@ +# CONFIG_PATA_OPTI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_OPTIDMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_OPTIDMA new file mode 100644 index 000000000000..6bf1d2e8aea6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_OPTIDMA @@ -0,0 +1 @@ +# CONFIG_PATA_OPTIDMA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_PDC2027X b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_PDC2027X new file mode 100644 index 000000000000..c8ad0b82611b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_PDC2027X @@ -0,0 +1 @@ +# CONFIG_PATA_PDC2027X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_PDC_OLD b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_PDC_OLD new file mode 100644 index 000000000000..d77240fcf1b5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_PDC_OLD @@ -0,0 +1 @@ +# CONFIG_PATA_PDC_OLD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_RADISYS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_RADISYS new file mode 100644 index 000000000000..30316e774e95 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_RADISYS @@ -0,0 +1 @@ +# CONFIG_PATA_RADISYS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_RDC b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_RDC new file mode 100644 index 000000000000..48afa962bf61 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_RDC @@ -0,0 +1 @@ +# CONFIG_PATA_RDC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_RZ1000 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_RZ1000 new file mode 100644 index 000000000000..7da7e0970466 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_RZ1000 @@ -0,0 +1 @@ +# CONFIG_PATA_RZ1000 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SCH b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SCH new file mode 100644 index 000000000000..52035bf53a84 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SCH @@ -0,0 +1 @@ +# CONFIG_PATA_SCH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SERVERWORKS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SERVERWORKS new file mode 100644 index 000000000000..5cacac7a670b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SERVERWORKS @@ -0,0 +1 @@ +# CONFIG_PATA_SERVERWORKS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SIL680 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SIL680 new file mode 100644 index 000000000000..75224a38b3bb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SIL680 @@ -0,0 +1 @@ +# CONFIG_PATA_SIL680 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SIS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SIS new file mode 100644 index 000000000000..8c547495a6a1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SIS @@ -0,0 +1 @@ +# CONFIG_PATA_SIS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_TIMINGS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_TIMINGS new file mode 100644 index 000000000000..61e1a8d684c7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_TIMINGS @@ -0,0 +1 @@ +CONFIG_PATA_TIMINGS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_TOSHIBA b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_TOSHIBA new file mode 100644 index 000000000000..ffe9957f10dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_TOSHIBA @@ -0,0 +1 @@ +# CONFIG_PATA_TOSHIBA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_TRIFLEX b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_TRIFLEX new file mode 100644 index 000000000000..5da0e57c4b29 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_TRIFLEX @@ -0,0 +1 @@ +# CONFIG_PATA_TRIFLEX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_VIA b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_VIA new file mode 100644 index 000000000000..9deaa6c3c904 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_VIA @@ -0,0 +1 @@ +# CONFIG_PATA_VIA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_WINBOND b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_WINBOND new file mode 100644 index 000000000000..9b3eb62898ca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_WINBOND @@ -0,0 +1 @@ +# CONFIG_PATA_WINBOND is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PC300TOO b/anolis/configs/L2-OPTIONAL/default/CONFIG_PC300TOO new file mode 100644 index 000000000000..00bad4065b40 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PC300TOO @@ -0,0 +1 @@ +# CONFIG_PC300TOO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCC b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCC new file mode 100644 index 000000000000..36eb8c2abf40 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCC @@ -0,0 +1 @@ +CONFIG_PCC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCCARD b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCCARD new file mode 100644 index 000000000000..44b97544dc6e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCCARD @@ -0,0 +1 @@ +CONFIG_PCCARD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI200SYN b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI200SYN new file mode 100644 index 000000000000..95dcd9b5401b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI200SYN @@ -0,0 +1 @@ +# CONFIG_PCI200SYN is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIEAER_INJECT b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIEAER_INJECT new file mode 100644 index 000000000000..3e2f994f4140 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIEAER_INJECT @@ -0,0 +1 @@ +CONFIG_PCIEAER_INJECT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIEASPM_PERFORMANCE b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIEASPM_PERFORMANCE new file mode 100644 index 000000000000..22e8072b9f9e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIEASPM_PERFORMANCE @@ -0,0 +1 @@ +# CONFIG_PCIEASPM_PERFORMANCE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIEASPM_POWERSAVE b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIEASPM_POWERSAVE new file mode 100644 index 000000000000..ac6efe4b078b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIEASPM_POWERSAVE @@ -0,0 +1 @@ +# CONFIG_PCIEASPM_POWERSAVE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIEASPM_POWER_SUPERSAVE b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIEASPM_POWER_SUPERSAVE new file mode 100644 index 000000000000..5f5d7b13575f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIEASPM_POWER_SUPERSAVE @@ -0,0 +1 @@ +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIE_DW_PLAT_HOST b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIE_DW_PLAT_HOST new file mode 100644 index 000000000000..02bf1662b42b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIE_DW_PLAT_HOST @@ -0,0 +1 @@ +# CONFIG_PCIE_DW_PLAT_HOST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIE_PME b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIE_PME new file mode 100644 index 000000000000..4b9611bfb3fd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIE_PME @@ -0,0 +1 @@ +CONFIG_PCIE_PME=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIE_PTM b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIE_PTM new file mode 100644 index 000000000000..aad8440d6799 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIE_PTM @@ -0,0 +1 @@ +# CONFIG_PCIE_PTM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIPCWATCHDOG b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIPCWATCHDOG new file mode 100644 index 000000000000..fedb6b37ba28 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCIPCWATCHDOG @@ -0,0 +1 @@ +CONFIG_PCIPCWATCHDOG=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_ATS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_ATS new file mode 100644 index 000000000000..3d08025de233 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_ATS @@ -0,0 +1 @@ +CONFIG_PCI_ATS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_DEBUG new file mode 100644 index 000000000000..6162d029fd61 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_DEBUG @@ -0,0 +1 @@ +# CONFIG_PCI_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_DOE b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_DOE new file mode 100644 index 000000000000..94c5a31f45dc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_DOE @@ -0,0 +1 @@ +CONFIG_PCI_DOE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_DOMAINS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_DOMAINS new file mode 100644 index 000000000000..115ead1d588a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_DOMAINS @@ -0,0 +1 @@ +CONFIG_PCI_DOMAINS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_ENDPOINT b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_ENDPOINT new file mode 100644 index 000000000000..d90e2a4f79b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_ENDPOINT @@ -0,0 +1 @@ +# CONFIG_PCI_ENDPOINT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_ENDPOINT_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_ENDPOINT_TEST new file mode 100644 index 000000000000..ac8854da9d65 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_ENDPOINT_TEST @@ -0,0 +1 @@ +# CONFIG_PCI_ENDPOINT_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_LABEL b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_LABEL new file mode 100644 index 000000000000..6d8608b80870 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_LABEL @@ -0,0 +1 @@ +CONFIG_PCI_LABEL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_MESON b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_MESON new file mode 100644 index 000000000000..22158fded463 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_MESON @@ -0,0 +1 @@ +# CONFIG_PCI_MESON is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_P2PDMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_P2PDMA new file mode 100644 index 000000000000..8898dbd7961d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_P2PDMA @@ -0,0 +1 @@ +# CONFIG_PCI_P2PDMA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_REALLOC_ENABLE_AUTO b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_REALLOC_ENABLE_AUTO new file mode 100644 index 000000000000..55501f103c93 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_REALLOC_ENABLE_AUTO @@ -0,0 +1 @@ +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_SW_SWITCHTEC b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_SW_SWITCHTEC new file mode 100644 index 000000000000..cc2e5e8ba5d3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCI_SW_SWITCHTEC @@ -0,0 +1 @@ +# CONFIG_PCI_SW_SWITCHTEC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCMCIA b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCMCIA new file mode 100644 index 000000000000..3cf9bfbdce64 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCMCIA @@ -0,0 +1 @@ +# CONFIG_PCMCIA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCPU_DEV_REFCNT b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCPU_DEV_REFCNT new file mode 100644 index 000000000000..235a1ec3a68c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCPU_DEV_REFCNT @@ -0,0 +1 @@ +CONFIG_PCPU_DEV_REFCNT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PCS_XPCS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCS_XPCS new file mode 100644 index 000000000000..a50391efc8d1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PCS_XPCS @@ -0,0 +1 @@ +CONFIG_PCS_XPCS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PDC_ADMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_PDC_ADMA new file mode 100644 index 000000000000..108646b90ec5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PDC_ADMA @@ -0,0 +1 @@ +# CONFIG_PDC_ADMA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PECI b/anolis/configs/L2-OPTIONAL/default/CONFIG_PECI new file mode 100644 index 000000000000..44ed21553769 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PECI @@ -0,0 +1 @@ +# CONFIG_PECI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PER_VMA_LOCK b/anolis/configs/L2-OPTIONAL/default/CONFIG_PER_VMA_LOCK new file mode 100644 index 000000000000..fad3e9927d5d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PER_VMA_LOCK @@ -0,0 +1 @@ +CONFIG_PER_VMA_LOCK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PER_VMA_LOCK_STATS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PER_VMA_LOCK_STATS new file mode 100644 index 000000000000..d552e777c904 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PER_VMA_LOCK_STATS @@ -0,0 +1 @@ +# CONFIG_PER_VMA_LOCK_STATS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PHANTOM b/anolis/configs/L2-OPTIONAL/default/CONFIG_PHANTOM new file mode 100644 index 000000000000..dc10468e6b79 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PHANTOM @@ -0,0 +1 @@ +# CONFIG_PHANTOM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PHONET b/anolis/configs/L2-OPTIONAL/default/CONFIG_PHONET new file mode 100644 index 000000000000..093c18313d17 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PHONET @@ -0,0 +1 @@ +# CONFIG_PHONET is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PHYLINK b/anolis/configs/L2-OPTIONAL/default/CONFIG_PHYLINK new file mode 100644 index 000000000000..cc1e23e0b2aa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PHYLINK @@ -0,0 +1 @@ +CONFIG_PHYLINK=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PHYS_ADDR_T_64BIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_PHYS_ADDR_T_64BIT new file mode 100644 index 000000000000..ec6ffd47fc56 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PHYS_ADDR_T_64BIT @@ -0,0 +1 @@ +CONFIG_PHYS_ADDR_T_64BIT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PHY_CAN_TRANSCEIVER b/anolis/configs/L2-OPTIONAL/default/CONFIG_PHY_CAN_TRANSCEIVER new file mode 100644 index 000000000000..dc960daa8011 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PHY_CAN_TRANSCEIVER @@ -0,0 +1 @@ +# CONFIG_PHY_CAN_TRANSCEIVER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PHY_PXA_28NM_HSIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_PHY_PXA_28NM_HSIC new file mode 100644 index 000000000000..450c0fddf2c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PHY_PXA_28NM_HSIC @@ -0,0 +1 @@ +# CONFIG_PHY_PXA_28NM_HSIC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PHY_PXA_28NM_USB2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PHY_PXA_28NM_USB2 new file mode 100644 index 000000000000..02e05a7915d1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PHY_PXA_28NM_USB2 @@ -0,0 +1 @@ +# CONFIG_PHY_PXA_28NM_USB2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCONF b/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCONF new file mode 100644 index 000000000000..2f04b24a4959 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCONF @@ -0,0 +1 @@ +CONFIG_PINCONF=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL b/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL new file mode 100644 index 000000000000..d8d83d561de7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL @@ -0,0 +1 @@ +CONFIG_PINCTRL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL_AMD b/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL_AMD new file mode 100644 index 000000000000..02626b83511b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL_AMD @@ -0,0 +1 @@ +# CONFIG_PINCTRL_AMD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL_CY8C95X0 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL_CY8C95X0 new file mode 100644 index 000000000000..179d4861c81e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL_CY8C95X0 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_CY8C95X0 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL_MCP23S08 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL_MCP23S08 new file mode 100644 index 000000000000..948eb60576b3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL_MCP23S08 @@ -0,0 +1 @@ +# CONFIG_PINCTRL_MCP23S08 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL_SX150X b/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL_SX150X new file mode 100644 index 000000000000..4416b9f36a3d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PINCTRL_SX150X @@ -0,0 +1 @@ +# CONFIG_PINCTRL_SX150X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PINMUX b/anolis/configs/L2-OPTIONAL/default/CONFIG_PINMUX new file mode 100644 index 000000000000..2e704c683cc7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PINMUX @@ -0,0 +1 @@ +CONFIG_PINMUX=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PLDMFW b/anolis/configs/L2-OPTIONAL/default/CONFIG_PLDMFW new file mode 100644 index 000000000000..8f49f56d5651 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PLDMFW @@ -0,0 +1 @@ +CONFIG_PLDMFW=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PLX_DMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_PLX_DMA new file mode 100644 index 000000000000..61c623f9c8fd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PLX_DMA @@ -0,0 +1 @@ +# CONFIG_PLX_DMA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PMIC_ADP5520 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PMIC_ADP5520 new file mode 100644 index 000000000000..8664194c8f70 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PMIC_ADP5520 @@ -0,0 +1 @@ +# CONFIG_PMIC_ADP5520 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PMIC_DA903X b/anolis/configs/L2-OPTIONAL/default/CONFIG_PMIC_DA903X new file mode 100644 index 000000000000..875cd25808a3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PMIC_DA903X @@ -0,0 +1 @@ +# CONFIG_PMIC_DA903X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PM_CLK b/anolis/configs/L2-OPTIONAL/default/CONFIG_PM_CLK new file mode 100644 index 000000000000..eba55a7afd61 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PM_CLK @@ -0,0 +1 @@ +CONFIG_PM_CLK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PM_DEVFREQ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PM_DEVFREQ new file mode 100644 index 000000000000..ada3814f1c27 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PM_DEVFREQ @@ -0,0 +1 @@ +# CONFIG_PM_DEVFREQ is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PM_USERSPACE_AUTOSLEEP b/anolis/configs/L2-OPTIONAL/default/CONFIG_PM_USERSPACE_AUTOSLEEP new file mode 100644 index 000000000000..5ef0a4ef953b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PM_USERSPACE_AUTOSLEEP @@ -0,0 +1 @@ +# CONFIG_PM_USERSPACE_AUTOSLEEP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PNFS_BLOCK b/anolis/configs/L2-OPTIONAL/default/CONFIG_PNFS_BLOCK new file mode 100644 index 000000000000..e3d84638baf1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PNFS_BLOCK @@ -0,0 +1 @@ +CONFIG_PNFS_BLOCK=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PNFS_FILE_LAYOUT b/anolis/configs/L2-OPTIONAL/default/CONFIG_PNFS_FILE_LAYOUT new file mode 100644 index 000000000000..3fe33fa512f2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PNFS_FILE_LAYOUT @@ -0,0 +1 @@ +CONFIG_PNFS_FILE_LAYOUT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PNFS_FLEXFILE_LAYOUT b/anolis/configs/L2-OPTIONAL/default/CONFIG_PNFS_FLEXFILE_LAYOUT new file mode 100644 index 000000000000..94dbe98593c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PNFS_FLEXFILE_LAYOUT @@ -0,0 +1 @@ +CONFIG_PNFS_FLEXFILE_LAYOUT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PNP b/anolis/configs/L2-OPTIONAL/default/CONFIG_PNP new file mode 100644 index 000000000000..5c70e41cdb6b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PNP @@ -0,0 +1 @@ +CONFIG_PNP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PNPACPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_PNPACPI new file mode 100644 index 000000000000..02d6f696ebc4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PNPACPI @@ -0,0 +1 @@ +CONFIG_PNPACPI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_POSIX_CPU_TIMERS_TASK_WORK b/anolis/configs/L2-OPTIONAL/default/CONFIG_POSIX_CPU_TIMERS_TASK_WORK new file mode 100644 index 000000000000..656863ecc120 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_POSIX_CPU_TIMERS_TASK_WORK @@ -0,0 +1 @@ +CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_POWER_SUPPLY b/anolis/configs/L2-OPTIONAL/default/CONFIG_POWER_SUPPLY new file mode 100644 index 000000000000..5f28cbaf0dbd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_POWER_SUPPLY @@ -0,0 +1 @@ +CONFIG_POWER_SUPPLY=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_POWER_SUPPLY_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_POWER_SUPPLY_DEBUG new file mode 100644 index 000000000000..23ed22a2e0ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_POWER_SUPPLY_DEBUG @@ -0,0 +1 @@ +# CONFIG_POWER_SUPPLY_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_POWER_SUPPLY_HWMON b/anolis/configs/L2-OPTIONAL/default/CONFIG_POWER_SUPPLY_HWMON new file mode 100644 index 000000000000..b9bd3a83dc0d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_POWER_SUPPLY_HWMON @@ -0,0 +1 @@ +CONFIG_POWER_SUPPLY_HWMON=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOATM b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOATM new file mode 100644 index 000000000000..d2b11b3b6046 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOATM @@ -0,0 +1 @@ +CONFIG_PPPOATM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS new file mode 100644 index 000000000000..19200f9dbcc2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS @@ -0,0 +1 @@ +CONFIG_PPPOE_HASH_BITS=4 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS_1 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS_1 new file mode 100644 index 000000000000..075eaaab96cf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS_1 @@ -0,0 +1 @@ +# CONFIG_PPPOE_HASH_BITS_1 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS_2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS_2 new file mode 100644 index 000000000000..ce4762a2e499 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS_2 @@ -0,0 +1 @@ +# CONFIG_PPPOE_HASH_BITS_2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS_4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS_4 new file mode 100644 index 000000000000..7db351db4313 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS_4 @@ -0,0 +1 @@ +CONFIG_PPPOE_HASH_BITS_4=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS_8 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS_8 new file mode 100644 index 000000000000..8e1d3a4148be --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOE_HASH_BITS_8 @@ -0,0 +1 @@ +# CONFIG_PPPOE_HASH_BITS_8 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOL2TP b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOL2TP new file mode 100644 index 000000000000..cb8272d91d79 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPPOL2TP @@ -0,0 +1 @@ +CONFIG_PPPOL2TP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_ASYNC b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_ASYNC new file mode 100644 index 000000000000..822b86ae94f9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_ASYNC @@ -0,0 +1 @@ +CONFIG_PPP_ASYNC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_BSDCOMP b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_BSDCOMP new file mode 100644 index 000000000000..da4db48b45d4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_BSDCOMP @@ -0,0 +1 @@ +CONFIG_PPP_BSDCOMP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_DEFLATE b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_DEFLATE new file mode 100644 index 000000000000..a292110b3eb0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_DEFLATE @@ -0,0 +1 @@ +CONFIG_PPP_DEFLATE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_FILTER b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_FILTER new file mode 100644 index 000000000000..0a7a25c08119 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_FILTER @@ -0,0 +1 @@ +CONFIG_PPP_FILTER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_MPPE b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_MPPE new file mode 100644 index 000000000000..534e6d2b1976 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_MPPE @@ -0,0 +1 @@ +CONFIG_PPP_MPPE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_MULTILINK b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_MULTILINK new file mode 100644 index 000000000000..815360d7a946 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_MULTILINK @@ -0,0 +1 @@ +CONFIG_PPP_MULTILINK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_SYNC_TTY b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_SYNC_TTY new file mode 100644 index 000000000000..e50b04a03096 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPP_SYNC_TTY @@ -0,0 +1 @@ +CONFIG_PPP_SYNC_TTY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPS new file mode 100644 index 000000000000..192e8c5cf15f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPS @@ -0,0 +1 @@ +CONFIG_PPS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPS_CLIENT_GPIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPS_CLIENT_GPIO new file mode 100644 index 000000000000..70ddbfa66e25 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPS_CLIENT_GPIO @@ -0,0 +1 @@ +CONFIG_PPS_CLIENT_GPIO=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPS_CLIENT_KTIMER b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPS_CLIENT_KTIMER new file mode 100644 index 000000000000..58cc6a5c15b2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPS_CLIENT_KTIMER @@ -0,0 +1 @@ +# CONFIG_PPS_CLIENT_KTIMER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPS_CLIENT_LDISC b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPS_CLIENT_LDISC new file mode 100644 index 000000000000..856b2b85ba9e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPS_CLIENT_LDISC @@ -0,0 +1 @@ +CONFIG_PPS_CLIENT_LDISC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPS_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPS_DEBUG new file mode 100644 index 000000000000..68357ea56eee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPS_DEBUG @@ -0,0 +1 @@ +# CONFIG_PPS_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PPTP b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPTP new file mode 100644 index 000000000000..cbe2708765cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PPTP @@ -0,0 +1 @@ +CONFIG_PPTP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PREEMPTIRQ_DELAY_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_PREEMPTIRQ_DELAY_TEST new file mode 100644 index 000000000000..6c650bba671a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PREEMPTIRQ_DELAY_TEST @@ -0,0 +1 @@ +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PREEMPT_COUNT b/anolis/configs/L2-OPTIONAL/default/CONFIG_PREEMPT_COUNT new file mode 100644 index 000000000000..607b3560847f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PREEMPT_COUNT @@ -0,0 +1 @@ +CONFIG_PREEMPT_COUNT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PREEMPT_NOTIFIERS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PREEMPT_NOTIFIERS new file mode 100644 index 000000000000..d9c869d3f513 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PREEMPT_NOTIFIERS @@ -0,0 +1 @@ +CONFIG_PREEMPT_NOTIFIERS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PREVENT_FIRMWARE_BUILD b/anolis/configs/L2-OPTIONAL/default/CONFIG_PREVENT_FIRMWARE_BUILD new file mode 100644 index 000000000000..3793f37f1d89 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PREVENT_FIRMWARE_BUILD @@ -0,0 +1 @@ +CONFIG_PREVENT_FIRMWARE_BUILD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PRIME_NUMBERS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PRIME_NUMBERS new file mode 100644 index 000000000000..86f0c393140b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PRIME_NUMBERS @@ -0,0 +1 @@ +# CONFIG_PRIME_NUMBERS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PRINTK_CALLER b/anolis/configs/L2-OPTIONAL/default/CONFIG_PRINTK_CALLER new file mode 100644 index 000000000000..470a071e3b58 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PRINTK_CALLER @@ -0,0 +1 @@ +# CONFIG_PRINTK_CALLER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PROBE_EVENTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PROBE_EVENTS new file mode 100644 index 000000000000..5bc5f69f7176 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PROBE_EVENTS @@ -0,0 +1 @@ +CONFIG_PROBE_EVENTS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PROC_CPU_RESCTRL b/anolis/configs/L2-OPTIONAL/default/CONFIG_PROC_CPU_RESCTRL new file mode 100644 index 000000000000..8f76128c1853 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PROC_CPU_RESCTRL @@ -0,0 +1 @@ +CONFIG_PROC_CPU_RESCTRL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PROC_PID_CPUSET b/anolis/configs/L2-OPTIONAL/default/CONFIG_PROC_PID_CPUSET new file mode 100644 index 000000000000..1d12cb14d270 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PROC_PID_CPUSET @@ -0,0 +1 @@ +CONFIG_PROC_PID_CPUSET=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PROVE_LOCKING b/anolis/configs/L2-OPTIONAL/default/CONFIG_PROVE_LOCKING new file mode 100644 index 000000000000..adea6cc66ded --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PROVE_LOCKING @@ -0,0 +1 @@ +# CONFIG_PROVE_LOCKING is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PSTORE_FTRACE b/anolis/configs/L2-OPTIONAL/default/CONFIG_PSTORE_FTRACE new file mode 100644 index 000000000000..e8e51dd1ea14 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PSTORE_FTRACE @@ -0,0 +1 @@ +# CONFIG_PSTORE_FTRACE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PSTORE_PMSG b/anolis/configs/L2-OPTIONAL/default/CONFIG_PSTORE_PMSG new file mode 100644 index 000000000000..06350590dff1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PSTORE_PMSG @@ -0,0 +1 @@ +# CONFIG_PSTORE_PMSG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PTDUMP_DEBUGFS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PTDUMP_DEBUGFS new file mode 100644 index 000000000000..57785aa591d2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PTDUMP_DEBUGFS @@ -0,0 +1 @@ +# CONFIG_PTDUMP_DEBUGFS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PTP_1588_CLOCK b/anolis/configs/L2-OPTIONAL/default/CONFIG_PTP_1588_CLOCK new file mode 100644 index 000000000000..120e79f0f924 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PTP_1588_CLOCK @@ -0,0 +1 @@ +CONFIG_PTP_1588_CLOCK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PTP_1588_CLOCK_IDT82P33 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PTP_1588_CLOCK_IDT82P33 new file mode 100644 index 000000000000..f0dda628f592 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PTP_1588_CLOCK_IDT82P33 @@ -0,0 +1 @@ +# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PTP_1588_CLOCK_IDTCM b/anolis/configs/L2-OPTIONAL/default/CONFIG_PTP_1588_CLOCK_IDTCM new file mode 100644 index 000000000000..49444182dab8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PTP_1588_CLOCK_IDTCM @@ -0,0 +1 @@ +# CONFIG_PTP_1588_CLOCK_IDTCM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PTP_1588_CLOCK_INES b/anolis/configs/L2-OPTIONAL/default/CONFIG_PTP_1588_CLOCK_INES new file mode 100644 index 000000000000..ccff0ec88e76 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PTP_1588_CLOCK_INES @@ -0,0 +1 @@ +# CONFIG_PTP_1588_CLOCK_INES is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PWM b/anolis/configs/L2-OPTIONAL/default/CONFIG_PWM new file mode 100644 index 000000000000..346c909b60ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PWM @@ -0,0 +1 @@ +CONFIG_PWM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PWM_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_PWM_DEBUG new file mode 100644 index 000000000000..2c6acd1d0099 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PWM_DEBUG @@ -0,0 +1 @@ +# CONFIG_PWM_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PWM_PCA9685 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PWM_PCA9685 new file mode 100644 index 000000000000..87e2b86aecc6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PWM_PCA9685 @@ -0,0 +1 @@ +# CONFIG_PWM_PCA9685 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PWM_SYSFS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PWM_SYSFS new file mode 100644 index 000000000000..27e302076290 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PWM_SYSFS @@ -0,0 +1 @@ +CONFIG_PWM_SYSFS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QED b/anolis/configs/L2-OPTIONAL/default/CONFIG_QED new file mode 100644 index 000000000000..c39c087a9604 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QED @@ -0,0 +1 @@ +CONFIG_QED=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QEDE b/anolis/configs/L2-OPTIONAL/default/CONFIG_QEDE new file mode 100644 index 000000000000..3e672b166871 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QEDE @@ -0,0 +1 @@ +CONFIG_QEDE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QEDF b/anolis/configs/L2-OPTIONAL/default/CONFIG_QEDF new file mode 100644 index 000000000000..33c24865a514 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QEDF @@ -0,0 +1 @@ +CONFIG_QEDF=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QEDI b/anolis/configs/L2-OPTIONAL/default/CONFIG_QEDI new file mode 100644 index 000000000000..c751aba20d3b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QEDI @@ -0,0 +1 @@ +CONFIG_QEDI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_FCOE b/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_FCOE new file mode 100644 index 000000000000..5681a40e94db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_FCOE @@ -0,0 +1 @@ +CONFIG_QED_FCOE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_ISCSI b/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_ISCSI new file mode 100644 index 000000000000..46c1c743e83b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_ISCSI @@ -0,0 +1 @@ +CONFIG_QED_ISCSI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_LL2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_LL2 new file mode 100644 index 000000000000..f574d32c6d19 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_LL2 @@ -0,0 +1 @@ +CONFIG_QED_LL2=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_OOO b/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_OOO new file mode 100644 index 000000000000..b17caa3fb20c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_OOO @@ -0,0 +1 @@ +CONFIG_QED_OOO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_RDMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_RDMA new file mode 100644 index 000000000000..98cb65f0108d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_RDMA @@ -0,0 +1 @@ +CONFIG_QED_RDMA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_SRIOV b/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_SRIOV new file mode 100644 index 000000000000..35fa4ac2ff91 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QED_SRIOV @@ -0,0 +1 @@ +CONFIG_QED_SRIOV=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QLA3XXX b/anolis/configs/L2-OPTIONAL/default/CONFIG_QLA3XXX new file mode 100644 index 000000000000..718c130d4f84 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QLA3XXX @@ -0,0 +1 @@ +CONFIG_QLA3XXX=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QLCNIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_QLCNIC new file mode 100644 index 000000000000..1ba4defc930e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QLCNIC @@ -0,0 +1 @@ +# CONFIG_QLCNIC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QNX4FS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_QNX4FS_FS new file mode 100644 index 000000000000..43a604f47a82 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QNX4FS_FS @@ -0,0 +1 @@ +# CONFIG_QNX4FS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QNX6FS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_QNX6FS_FS new file mode 100644 index 000000000000..9fba608a836f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QNX6FS_FS @@ -0,0 +1 @@ +# CONFIG_QNX6FS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QRTR b/anolis/configs/L2-OPTIONAL/default/CONFIG_QRTR new file mode 100644 index 000000000000..19f911ca5043 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QRTR @@ -0,0 +1 @@ +# CONFIG_QRTR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QSEMI_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_QSEMI_PHY new file mode 100644 index 000000000000..460334246d95 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QSEMI_PHY @@ -0,0 +1 @@ +CONFIG_QSEMI_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QUEUED_RWLOCKS b/anolis/configs/L2-OPTIONAL/default/CONFIG_QUEUED_RWLOCKS new file mode 100644 index 000000000000..e671b310df7c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QUEUED_RWLOCKS @@ -0,0 +1 @@ +CONFIG_QUEUED_RWLOCKS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QUEUED_SPINLOCKS b/anolis/configs/L2-OPTIONAL/default/CONFIG_QUEUED_SPINLOCKS new file mode 100644 index 000000000000..b0cb4b31e179 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QUEUED_SPINLOCKS @@ -0,0 +1 @@ +CONFIG_QUEUED_SPINLOCKS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QUOTACTL b/anolis/configs/L2-OPTIONAL/default/CONFIG_QUOTACTL new file mode 100644 index 000000000000..d473401f10a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QUOTACTL @@ -0,0 +1 @@ +CONFIG_QUOTACTL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_QUOTA_TREE b/anolis/configs/L2-OPTIONAL/default/CONFIG_QUOTA_TREE new file mode 100644 index 000000000000..afac1fbe9468 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_QUOTA_TREE @@ -0,0 +1 @@ +CONFIG_QUOTA_TREE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_R8169 b/anolis/configs/L2-OPTIONAL/default/CONFIG_R8169 new file mode 100644 index 000000000000..3f47885c13bc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_R8169 @@ -0,0 +1 @@ +CONFIG_R8169=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RANDOM32_SELFTEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_RANDOM32_SELFTEST new file mode 100644 index 000000000000..5c9c3b98675d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RANDOM32_SELFTEST @@ -0,0 +1 @@ +# CONFIG_RANDOM32_SELFTEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RANDOM_KMALLOC_CACHES b/anolis/configs/L2-OPTIONAL/default/CONFIG_RANDOM_KMALLOC_CACHES new file mode 100644 index 000000000000..04abd3467224 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RANDOM_KMALLOC_CACHES @@ -0,0 +1 @@ +# CONFIG_RANDOM_KMALLOC_CACHES is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RAPIDIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_RAPIDIO new file mode 100644 index 000000000000..79892b190b05 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RAPIDIO @@ -0,0 +1 @@ +# CONFIG_RAPIDIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RATIONAL b/anolis/configs/L2-OPTIONAL/default/CONFIG_RATIONAL new file mode 100644 index 000000000000..5be225b5331e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RATIONAL @@ -0,0 +1 @@ +CONFIG_RATIONAL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RBTREE_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_RBTREE_TEST new file mode 100644 index 000000000000..51b8db7b7813 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RBTREE_TEST @@ -0,0 +1 @@ +# CONFIG_RBTREE_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_CPU_STALL_CPUTIME b/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_CPU_STALL_CPUTIME new file mode 100644 index 000000000000..caef7fbece11 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_CPU_STALL_CPUTIME @@ -0,0 +1 @@ +# CONFIG_RCU_CPU_STALL_CPUTIME is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_EXP_CPU_STALL_TIMEOUT b/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_EXP_CPU_STALL_TIMEOUT new file mode 100644 index 000000000000..3012f3c3e849 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_EXP_CPU_STALL_TIMEOUT @@ -0,0 +1 @@ +CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_LAZY b/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_LAZY new file mode 100644 index 000000000000..545896a7b18c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_LAZY @@ -0,0 +1 @@ +# CONFIG_RCU_LAZY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_NEED_SEGCBLIST b/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_NEED_SEGCBLIST new file mode 100644 index 000000000000..fa4469bf8543 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_NEED_SEGCBLIST @@ -0,0 +1 @@ +CONFIG_RCU_NEED_SEGCBLIST=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_NOCB_CPU_DEFAULT_ALL b/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_NOCB_CPU_DEFAULT_ALL new file mode 100644 index 000000000000..e53819b90f6a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_NOCB_CPU_DEFAULT_ALL @@ -0,0 +1 @@ +# CONFIG_RCU_NOCB_CPU_DEFAULT_ALL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_STALL_COMMON b/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_STALL_COMMON new file mode 100644 index 000000000000..181afd39fa71 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RCU_STALL_COMMON @@ -0,0 +1 @@ +CONFIG_RCU_STALL_COMMON=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RDS b/anolis/configs/L2-OPTIONAL/default/CONFIG_RDS new file mode 100644 index 000000000000..ba0653e704de --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RDS @@ -0,0 +1 @@ +# CONFIG_RDS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_REALTEK_AUTOPM b/anolis/configs/L2-OPTIONAL/default/CONFIG_REALTEK_AUTOPM new file mode 100644 index 000000000000..ea952943f854 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_REALTEK_AUTOPM @@ -0,0 +1 @@ +CONFIG_REALTEK_AUTOPM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_REALTEK_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_REALTEK_PHY new file mode 100644 index 000000000000..051e7cc45b00 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_REALTEK_PHY @@ -0,0 +1 @@ +CONFIG_REALTEK_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_REED_SOLOMON b/anolis/configs/L2-OPTIONAL/default/CONFIG_REED_SOLOMON new file mode 100644 index 000000000000..d9b33b6cfd4e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_REED_SOLOMON @@ -0,0 +1 @@ +CONFIG_REED_SOLOMON=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_REED_SOLOMON_DEC8 b/anolis/configs/L2-OPTIONAL/default/CONFIG_REED_SOLOMON_DEC8 new file mode 100644 index 000000000000..e153f1356a73 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_REED_SOLOMON_DEC8 @@ -0,0 +1 @@ +CONFIG_REED_SOLOMON_DEC8=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_REED_SOLOMON_ENC8 b/anolis/configs/L2-OPTIONAL/default/CONFIG_REED_SOLOMON_ENC8 new file mode 100644 index 000000000000..7624f7bb2759 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_REED_SOLOMON_ENC8 @@ -0,0 +1 @@ +CONFIG_REED_SOLOMON_ENC8=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_REED_SOLOMON_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_REED_SOLOMON_TEST new file mode 100644 index 000000000000..6ca2a5270b78 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_REED_SOLOMON_TEST @@ -0,0 +1 @@ +# CONFIG_REED_SOLOMON_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_REGMAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_REGMAP new file mode 100644 index 000000000000..5e40b3c4b122 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_REGMAP @@ -0,0 +1 @@ +CONFIG_REGMAP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_REGMAP_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_REGMAP_I2C new file mode 100644 index 000000000000..8440b2cafcb2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_REGMAP_I2C @@ -0,0 +1 @@ +CONFIG_REGMAP_I2C=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_REGMAP_SPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_REGMAP_SPI new file mode 100644 index 000000000000..adb8804fabf5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_REGMAP_SPI @@ -0,0 +1 @@ +CONFIG_REGMAP_SPI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_REISERFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_REISERFS_FS new file mode 100644 index 000000000000..d11f952ac942 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_REISERFS_FS @@ -0,0 +1 @@ +# CONFIG_REISERFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_REMOTEPROC b/anolis/configs/L2-OPTIONAL/default/CONFIG_REMOTEPROC new file mode 100644 index 000000000000..5a54c2936599 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_REMOTEPROC @@ -0,0 +1 @@ +# CONFIG_REMOTEPROC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_REMOTE_TARGET b/anolis/configs/L2-OPTIONAL/default/CONFIG_REMOTE_TARGET new file mode 100644 index 000000000000..91c6f1c28c2e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_REMOTE_TARGET @@ -0,0 +1 @@ +# CONFIG_REMOTE_TARGET is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RENESAS_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_RENESAS_PHY new file mode 100644 index 000000000000..2314e0cf2d9a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RENESAS_PHY @@ -0,0 +1 @@ +CONFIG_RENESAS_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RESET_ATTACK_MITIGATION b/anolis/configs/L2-OPTIONAL/default/CONFIG_RESET_ATTACK_MITIGATION new file mode 100644 index 000000000000..eea15dd529c2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RESET_ATTACK_MITIGATION @@ -0,0 +1 @@ +# CONFIG_RESET_ATTACK_MITIGATION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RESET_TI_SYSCON b/anolis/configs/L2-OPTIONAL/default/CONFIG_RESET_TI_SYSCON new file mode 100644 index 000000000000..1e76bd1354ee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RESET_TI_SYSCON @@ -0,0 +1 @@ +# CONFIG_RESET_TI_SYSCON is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RESET_TI_TPS380X b/anolis/configs/L2-OPTIONAL/default/CONFIG_RESET_TI_TPS380X new file mode 100644 index 000000000000..4f44ddf912b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RESET_TI_TPS380X @@ -0,0 +1 @@ +# CONFIG_RESET_TI_TPS380X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RFD_FTL b/anolis/configs/L2-OPTIONAL/default/CONFIG_RFD_FTL new file mode 100644 index 000000000000..3d4b2f0de2c6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RFD_FTL @@ -0,0 +1 @@ +# CONFIG_RFD_FTL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RFKILL b/anolis/configs/L2-OPTIONAL/default/CONFIG_RFKILL new file mode 100644 index 000000000000..7ec901c5331d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RFKILL @@ -0,0 +1 @@ +CONFIG_RFKILL=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RFKILL_INPUT b/anolis/configs/L2-OPTIONAL/default/CONFIG_RFKILL_INPUT new file mode 100644 index 000000000000..15fc0f8ad8c4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RFKILL_INPUT @@ -0,0 +1 @@ +CONFIG_RFKILL_INPUT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RFKILL_LEDS b/anolis/configs/L2-OPTIONAL/default/CONFIG_RFKILL_LEDS new file mode 100644 index 000000000000..35a45201d109 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RFKILL_LEDS @@ -0,0 +1 @@ +CONFIG_RFKILL_LEDS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RFS_ACCEL b/anolis/configs/L2-OPTIONAL/default/CONFIG_RFS_ACCEL new file mode 100644 index 000000000000..3cdf9be6ba1a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RFS_ACCEL @@ -0,0 +1 @@ +CONFIG_RFS_ACCEL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RING_BUFFER b/anolis/configs/L2-OPTIONAL/default/CONFIG_RING_BUFFER new file mode 100644 index 000000000000..624958564c91 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RING_BUFFER @@ -0,0 +1 @@ +CONFIG_RING_BUFFER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RING_BUFFER_BENCHMARK b/anolis/configs/L2-OPTIONAL/default/CONFIG_RING_BUFFER_BENCHMARK new file mode 100644 index 000000000000..4116ef243b9e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RING_BUFFER_BENCHMARK @@ -0,0 +1 @@ +CONFIG_RING_BUFFER_BENCHMARK=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RING_BUFFER_STARTUP_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_RING_BUFFER_STARTUP_TEST new file mode 100644 index 000000000000..999c92369404 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RING_BUFFER_STARTUP_TEST @@ -0,0 +1 @@ +# CONFIG_RING_BUFFER_STARTUP_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS b/anolis/configs/L2-OPTIONAL/default/CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS new file mode 100644 index 000000000000..062dee43468f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS @@ -0,0 +1 @@ +# CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_2D_SENSOR b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_2D_SENSOR new file mode 100644 index 000000000000..1e7576254ae5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_2D_SENSOR @@ -0,0 +1 @@ +CONFIG_RMI4_2D_SENSOR=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_CORE new file mode 100644 index 000000000000..2e5716d66f12 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_CORE @@ -0,0 +1 @@ +CONFIG_RMI4_CORE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F03 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F03 new file mode 100644 index 000000000000..08ae820b188d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F03 @@ -0,0 +1 @@ +CONFIG_RMI4_F03=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F03_SERIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F03_SERIO new file mode 100644 index 000000000000..a1bdeb7d83fc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F03_SERIO @@ -0,0 +1 @@ +CONFIG_RMI4_F03_SERIO=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F11 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F11 new file mode 100644 index 000000000000..94542f838bb0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F11 @@ -0,0 +1 @@ +CONFIG_RMI4_F11=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F12 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F12 new file mode 100644 index 000000000000..c1bf5103b420 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F12 @@ -0,0 +1 @@ +CONFIG_RMI4_F12=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F30 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F30 new file mode 100644 index 000000000000..2efcd29cff5d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F30 @@ -0,0 +1 @@ +CONFIG_RMI4_F30=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F3A b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F3A new file mode 100644 index 000000000000..7daf3f63cac2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F3A @@ -0,0 +1 @@ +# CONFIG_RMI4_F3A is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F55 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F55 new file mode 100644 index 000000000000..893bb030d1a0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_F55 @@ -0,0 +1 @@ +CONFIG_RMI4_F55=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_I2C new file mode 100644 index 000000000000..731005809051 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_I2C @@ -0,0 +1 @@ +CONFIG_RMI4_I2C=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_SMB b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_SMB new file mode 100644 index 000000000000..9c2ab3f29e74 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RMI4_SMB @@ -0,0 +1 @@ +CONFIG_RMI4_SMB=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ROCKCHIP_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_ROCKCHIP_PHY new file mode 100644 index 000000000000..e49faf8f93ad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ROCKCHIP_PHY @@ -0,0 +1 @@ +CONFIG_ROCKCHIP_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ROCKER b/anolis/configs/L2-OPTIONAL/default/CONFIG_ROCKER new file mode 100644 index 000000000000..4e61c5a5ef0a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ROCKER @@ -0,0 +1 @@ +CONFIG_ROCKER=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ROMFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_ROMFS_FS new file mode 100644 index 000000000000..3591977a24c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ROMFS_FS @@ -0,0 +1 @@ +# CONFIG_ROMFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1 new file mode 100644 index 000000000000..3050e4496023 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1 @@ -0,0 +1 @@ +CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 new file mode 100644 index 000000000000..a25e20873924 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 @@ -0,0 +1 @@ +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA b/anolis/configs/L2-OPTIONAL/default/CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA new file mode 100644 index 000000000000..8253c2cb6fe8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA @@ -0,0 +1 @@ +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RPMSG_QCOM_GLINK_RPM b/anolis/configs/L2-OPTIONAL/default/CONFIG_RPMSG_QCOM_GLINK_RPM new file mode 100644 index 000000000000..df2fa18d0f73 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RPMSG_QCOM_GLINK_RPM @@ -0,0 +1 @@ +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RPMSG_VIRTIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_RPMSG_VIRTIO new file mode 100644 index 000000000000..04b624a7ccc3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RPMSG_VIRTIO @@ -0,0 +1 @@ +# CONFIG_RPMSG_VIRTIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DEBUG new file mode 100644 index 000000000000..13664a344820 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DEBUG @@ -0,0 +1 @@ +# CONFIG_RTC_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_ABEOZ9 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_ABEOZ9 new file mode 100644 index 000000000000..6e67aa0eec5f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_ABEOZ9 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_ABEOZ9 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_BQ32K b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_BQ32K new file mode 100644 index 000000000000..a5b9fe1c6a53 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_BQ32K @@ -0,0 +1 @@ +CONFIG_RTC_DRV_BQ32K=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1286 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1286 new file mode 100644 index 000000000000..26ed4fb2144c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1286 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS1286=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1302 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1302 new file mode 100644 index 000000000000..053d2126a955 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1302 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_DS1302 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1307 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1307 new file mode 100644 index 000000000000..2d3d02090bc8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1307 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS1307=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1307_CENTURY b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1307_CENTURY new file mode 100644 index 000000000000..dd3202f455d3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1307_CENTURY @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_DS1307_CENTURY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1374 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1374 new file mode 100644 index 000000000000..39c837c5e598 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1374 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS1374=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1511 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1511 new file mode 100644 index 000000000000..a36656c78ef1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1511 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS1511=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1553 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1553 new file mode 100644 index 000000000000..a069d4ba2dad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1553 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS1553=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1672 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1672 new file mode 100644 index 000000000000..2b12e3905cd5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1672 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS1672=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1742 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1742 new file mode 100644 index 000000000000..ab8422c1b0a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS1742 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS1742=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS2404 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS2404 new file mode 100644 index 000000000000..88909128bd78 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS2404 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS2404=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS3232 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS3232 new file mode 100644 index 000000000000..f891d41b863a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS3232 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS3232=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS3232_HWMON b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS3232_HWMON new file mode 100644 index 000000000000..616fe7d3bf0e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_DS3232_HWMON @@ -0,0 +1 @@ +CONFIG_RTC_DRV_DS3232_HWMON=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_EM3027 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_EM3027 new file mode 100644 index 000000000000..5045a2f143ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_EM3027 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_EM3027=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_FM3130 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_FM3130 new file mode 100644 index 000000000000..32065e365a4b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_FM3130 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_FM3130=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_FTRTC010 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_FTRTC010 new file mode 100644 index 000000000000..bbb608e4f562 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_FTRTC010 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_FTRTC010 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_GOLDFISH b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_GOLDFISH new file mode 100644 index 000000000000..4b7e561b61de --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_GOLDFISH @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_GOLDFISH is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_ISL12022 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_ISL12022 new file mode 100644 index 000000000000..e12f2be210b3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_ISL12022 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_ISL12022=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_ISL1208 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_ISL1208 new file mode 100644 index 000000000000..b3f7280e14fc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_ISL1208 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_ISL1208=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M41T80 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M41T80 new file mode 100644 index 000000000000..0654b656ca28 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M41T80 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_M41T80=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M41T80_WDT b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M41T80_WDT new file mode 100644 index 000000000000..7b134a9a8741 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M41T80_WDT @@ -0,0 +1 @@ +CONFIG_RTC_DRV_M41T80_WDT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M48T35 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M48T35 new file mode 100644 index 000000000000..6bbe79a050e8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M48T35 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_M48T35=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M48T59 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M48T59 new file mode 100644 index 000000000000..62a856ff3a64 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M48T59 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_M48T59=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M48T86 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M48T86 new file mode 100644 index 000000000000..d7a81799326b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_M48T86 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_M48T86 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_MAX6900 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_MAX6900 new file mode 100644 index 000000000000..bed716d84938 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_MAX6900 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_MAX6900=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_MAX6916 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_MAX6916 new file mode 100644 index 000000000000..23c78cae6849 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_MAX6916 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_MAX6916 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_MSM6242 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_MSM6242 new file mode 100644 index 000000000000..3f567e9991a2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_MSM6242 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_MSM6242=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_PCF8523 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_PCF8523 new file mode 100644 index 000000000000..a4f00644851c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_PCF8523 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_PCF8523=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_PCF85363 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_PCF85363 new file mode 100644 index 000000000000..115d491a470f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_PCF85363 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_PCF85363 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_PCF8563 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_PCF8563 new file mode 100644 index 000000000000..f3654f9d7c19 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_PCF8563 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_PCF8563=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_PCF8583 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_PCF8583 new file mode 100644 index 000000000000..06e61bf3b136 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_PCF8583 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_PCF8583=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RP5C01 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RP5C01 new file mode 100644 index 000000000000..e13e06b65575 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RP5C01 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_RP5C01=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RS5C372 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RS5C372 new file mode 100644 index 000000000000..52e9cb50f794 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RS5C372 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_RS5C372=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV3028 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV3028 new file mode 100644 index 000000000000..909476a51041 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV3028 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_RV3028 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV3029C2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV3029C2 new file mode 100644 index 000000000000..4c0b36c33265 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV3029C2 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_RV3029C2=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV3029_HWMON b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV3029_HWMON new file mode 100644 index 000000000000..5b6c908f74b5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV3029_HWMON @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_RV3029_HWMON is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV3032 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV3032 new file mode 100644 index 000000000000..e34dbb84b1f6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV3032 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_RV3032 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV8803 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV8803 new file mode 100644 index 000000000000..413de6727d1d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RV8803 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_RV8803 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RX6110 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RX6110 new file mode 100644 index 000000000000..a7fc05a8e0cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RX6110 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_RX6110 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RX8025 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RX8025 new file mode 100644 index 000000000000..62d78291898c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RX8025 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_RX8025=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RX8581 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RX8581 new file mode 100644 index 000000000000..051a55f2a27e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_RX8581 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_RX8581=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_S35390A b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_S35390A new file mode 100644 index 000000000000..5c810d5654f6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_S35390A @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_S35390A is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_SD3078 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_SD3078 new file mode 100644 index 000000000000..0d3e4c43e429 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_SD3078 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_SD3078 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_STK17TA8 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_STK17TA8 new file mode 100644 index 000000000000..ace13585227c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_STK17TA8 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_STK17TA8=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_TEST new file mode 100644 index 000000000000..70982b01205b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_TEST @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_X1205 b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_X1205 new file mode 100644 index 000000000000..444d4d32c80e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_DRV_X1205 @@ -0,0 +1 @@ +CONFIG_RTC_DRV_X1205=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_I2C_AND_SPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_I2C_AND_SPI new file mode 100644 index 000000000000..b16b2084499d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_I2C_AND_SPI @@ -0,0 +1 @@ +CONFIG_RTC_I2C_AND_SPI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_INTF_DEV_UIE_EMUL b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_INTF_DEV_UIE_EMUL new file mode 100644 index 000000000000..80e6b2a9b8e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_INTF_DEV_UIE_EMUL @@ -0,0 +1 @@ +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_LIB b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_LIB new file mode 100644 index 000000000000..cfdd517579cf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RTC_LIB @@ -0,0 +1 @@ +CONFIG_RTC_LIB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RT_MUTEXES b/anolis/configs/L2-OPTIONAL/default/CONFIG_RT_MUTEXES new file mode 100644 index 000000000000..4402573d0a55 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RT_MUTEXES @@ -0,0 +1 @@ +CONFIG_RT_MUTEXES=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RV b/anolis/configs/L2-OPTIONAL/default/CONFIG_RV new file mode 100644 index 000000000000..5514d15cb949 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RV @@ -0,0 +1 @@ +# CONFIG_RV is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_RWSEM_SPIN_ON_OWNER b/anolis/configs/L2-OPTIONAL/default/CONFIG_RWSEM_SPIN_ON_OWNER new file mode 100644 index 000000000000..6585a2376fa8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_RWSEM_SPIN_ON_OWNER @@ -0,0 +1 @@ +CONFIG_RWSEM_SPIN_ON_OWNER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SAMPLES b/anolis/configs/L2-OPTIONAL/default/CONFIG_SAMPLES new file mode 100644 index 000000000000..6e026deba112 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SAMPLES @@ -0,0 +1 @@ +# CONFIG_SAMPLES is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_ACARD_AHCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_ACARD_AHCI new file mode 100644 index 000000000000..7ee7391a2fbb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_ACARD_AHCI @@ -0,0 +1 @@ +# CONFIG_SATA_ACARD_AHCI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_DWC b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_DWC new file mode 100644 index 000000000000..57c2836483da --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_DWC @@ -0,0 +1 @@ +# CONFIG_SATA_DWC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_HOST b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_HOST new file mode 100644 index 000000000000..4216567c05dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_HOST @@ -0,0 +1 @@ +CONFIG_SATA_HOST=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_INIC162X b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_INIC162X new file mode 100644 index 000000000000..6abf495a0521 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_INIC162X @@ -0,0 +1 @@ +# CONFIG_SATA_INIC162X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_MV b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_MV new file mode 100644 index 000000000000..82f4fb00dca7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_MV @@ -0,0 +1 @@ +# CONFIG_SATA_MV is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_NV b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_NV new file mode 100644 index 000000000000..c8419327af60 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_NV @@ -0,0 +1 @@ +# CONFIG_SATA_NV is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_PROMISE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_PROMISE new file mode 100644 index 000000000000..2af59fe8ef91 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_PROMISE @@ -0,0 +1 @@ +# CONFIG_SATA_PROMISE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_QSTOR b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_QSTOR new file mode 100644 index 000000000000..8b3cce9abcf6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_QSTOR @@ -0,0 +1 @@ +# CONFIG_SATA_QSTOR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIL b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIL new file mode 100644 index 000000000000..5d551ee6a8c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIL @@ -0,0 +1 @@ +# CONFIG_SATA_SIL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIL24 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIL24 new file mode 100644 index 000000000000..7526a06a0f31 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIL24 @@ -0,0 +1 @@ +# CONFIG_SATA_SIL24 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIS new file mode 100644 index 000000000000..1cd4ae75f556 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIS @@ -0,0 +1 @@ +# CONFIG_SATA_SIS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SVW b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SVW new file mode 100644 index 000000000000..625275c8d6cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SVW @@ -0,0 +1 @@ +# CONFIG_SATA_SVW is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SX4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SX4 new file mode 100644 index 000000000000..abbd89112264 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SX4 @@ -0,0 +1 @@ +# CONFIG_SATA_SX4 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_ULI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_ULI new file mode 100644 index 000000000000..b50271fc3780 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_ULI @@ -0,0 +1 @@ +# CONFIG_SATA_ULI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_VIA b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_VIA new file mode 100644 index 000000000000..f5754463a7d8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_VIA @@ -0,0 +1 @@ +# CONFIG_SATA_VIA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_VITESSE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_VITESSE new file mode 100644 index 000000000000..59856ec7bcee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_VITESSE @@ -0,0 +1 @@ +# CONFIG_SATA_VITESSE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_ZPODD b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_ZPODD new file mode 100644 index 000000000000..61d1a70b3a7a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_ZPODD @@ -0,0 +1 @@ +# CONFIG_SATA_ZPODD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SBITMAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_SBITMAP new file mode 100644 index 000000000000..da987bc7011c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SBITMAP @@ -0,0 +1 @@ +CONFIG_SBITMAP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCF_TORTURE_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCF_TORTURE_TEST new file mode 100644 index 000000000000..53ebe3b18974 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCF_TORTURE_TEST @@ -0,0 +1 @@ +# CONFIG_SCF_TORTURE_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCHED_HRTICK b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCHED_HRTICK new file mode 100644 index 000000000000..3c856f61af10 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCHED_HRTICK @@ -0,0 +1 @@ +CONFIG_SCHED_HRTICK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCHED_MM_CID b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCHED_MM_CID new file mode 100644 index 000000000000..1ea865fce380 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCHED_MM_CID @@ -0,0 +1 @@ +CONFIG_SCHED_MM_CID=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCHED_STACK_END_CHECK b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCHED_STACK_END_CHECK new file mode 100644 index 000000000000..976ab2b50791 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCHED_STACK_END_CHECK @@ -0,0 +1 @@ +# CONFIG_SCHED_STACK_END_CHECK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_3W_9XXX b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_3W_9XXX new file mode 100644 index 000000000000..00de6f158f63 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_3W_9XXX @@ -0,0 +1 @@ +# CONFIG_SCSI_3W_9XXX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_3W_SAS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_3W_SAS new file mode 100644 index 000000000000..06a89f9183a9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_3W_SAS @@ -0,0 +1 @@ +# CONFIG_SCSI_3W_SAS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_ACARD b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_ACARD new file mode 100644 index 000000000000..d4002ddd0634 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_ACARD @@ -0,0 +1 @@ +# CONFIG_SCSI_ACARD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_ADVANSYS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_ADVANSYS new file mode 100644 index 000000000000..9727b0b930e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_ADVANSYS @@ -0,0 +1 @@ +# CONFIG_SCSI_ADVANSYS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_AIC79XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_AIC79XX new file mode 100644 index 000000000000..801fade38899 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_AIC79XX @@ -0,0 +1 @@ +# CONFIG_SCSI_AIC79XX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_AIC7XXX b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_AIC7XXX new file mode 100644 index 000000000000..f305031d08f6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_AIC7XXX @@ -0,0 +1 @@ +# CONFIG_SCSI_AIC7XXX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_AIC94XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_AIC94XX new file mode 100644 index 000000000000..ec2405137293 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_AIC94XX @@ -0,0 +1 @@ +# CONFIG_SCSI_AIC94XX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_AM53C974 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_AM53C974 new file mode 100644 index 000000000000..ce64864a7087 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_AM53C974 @@ -0,0 +1 @@ +# CONFIG_SCSI_AM53C974 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_ARCMSR b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_ARCMSR new file mode 100644 index 000000000000..425f89f74fbe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_ARCMSR @@ -0,0 +1 @@ +# CONFIG_SCSI_ARCMSR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_BFA_FC b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_BFA_FC new file mode 100644 index 000000000000..8568e1ec84c2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_BFA_FC @@ -0,0 +1 @@ +# CONFIG_SCSI_BFA_FC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_BUSLOGIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_BUSLOGIC new file mode 100644 index 000000000000..c1a96e3d8505 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_BUSLOGIC @@ -0,0 +1 @@ +# CONFIG_SCSI_BUSLOGIC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_CHELSIO_FCOE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_CHELSIO_FCOE new file mode 100644 index 000000000000..8acf63ea341f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_CHELSIO_FCOE @@ -0,0 +1 @@ +CONFIG_SCSI_CHELSIO_FCOE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_COMMON b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_COMMON new file mode 100644 index 000000000000..c26141e329f8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_COMMON @@ -0,0 +1 @@ +CONFIG_SCSI_COMMON=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_CXGB3_ISCSI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_CXGB3_ISCSI new file mode 100644 index 000000000000..d1e803258f4a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_CXGB3_ISCSI @@ -0,0 +1 @@ +# CONFIG_SCSI_CXGB3_ISCSI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_CXGB4_ISCSI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_CXGB4_ISCSI new file mode 100644 index 000000000000..3cbb0b57d96a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_CXGB4_ISCSI @@ -0,0 +1 @@ +CONFIG_SCSI_CXGB4_ISCSI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DC395x b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DC395x new file mode 100644 index 000000000000..04438189f3c0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DC395x @@ -0,0 +1 @@ +# CONFIG_SCSI_DC395x is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DH_ALUA b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DH_ALUA new file mode 100644 index 000000000000..85e9ad83c76e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DH_ALUA @@ -0,0 +1 @@ +CONFIG_SCSI_DH_ALUA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DH_EMC b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DH_EMC new file mode 100644 index 000000000000..23170c892144 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DH_EMC @@ -0,0 +1 @@ +CONFIG_SCSI_DH_EMC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DH_HP_SW b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DH_HP_SW new file mode 100644 index 000000000000..a5feace2a94d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DH_HP_SW @@ -0,0 +1 @@ +CONFIG_SCSI_DH_HP_SW=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DH_RDAC b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DH_RDAC new file mode 100644 index 000000000000..ef1739ea0103 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DH_RDAC @@ -0,0 +1 @@ +CONFIG_SCSI_DH_RDAC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DMX3191D b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DMX3191D new file mode 100644 index 000000000000..291b9196e10c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_DMX3191D @@ -0,0 +1 @@ +# CONFIG_SCSI_DMX3191D is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_EFCT b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_EFCT new file mode 100644 index 000000000000..e78712e8f025 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_EFCT @@ -0,0 +1 @@ +# CONFIG_SCSI_EFCT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_ESAS2R b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_ESAS2R new file mode 100644 index 000000000000..190cb39db74e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_ESAS2R @@ -0,0 +1 @@ +# CONFIG_SCSI_ESAS2R is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_FDOMAIN_PCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_FDOMAIN_PCI new file mode 100644 index 000000000000..706b0549dbf2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_FDOMAIN_PCI @@ -0,0 +1 @@ +# CONFIG_SCSI_FDOMAIN_PCI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_HPSA b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_HPSA new file mode 100644 index 000000000000..11cf299e0857 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_HPSA @@ -0,0 +1 @@ +CONFIG_SCSI_HPSA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_HPTIOP b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_HPTIOP new file mode 100644 index 000000000000..e92d4a91f8c4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_HPTIOP @@ -0,0 +1 @@ +# CONFIG_SCSI_HPTIOP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_INIA100 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_INIA100 new file mode 100644 index 000000000000..50f5dd42b71d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_INIA100 @@ -0,0 +1 @@ +# CONFIG_SCSI_INIA100 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_INITIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_INITIO new file mode 100644 index 000000000000..7abe6c2d9278 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_INITIO @@ -0,0 +1 @@ +# CONFIG_SCSI_INITIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_IPS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_IPS new file mode 100644 index 000000000000..32e4abe67df7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_IPS @@ -0,0 +1 @@ +# CONFIG_SCSI_IPS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_LPFC b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_LPFC new file mode 100644 index 000000000000..c7cbc90fe66e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_LPFC @@ -0,0 +1 @@ +CONFIG_SCSI_LPFC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_LPFC_DEBUG_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_LPFC_DEBUG_FS new file mode 100644 index 000000000000..52fa94e2793e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_LPFC_DEBUG_FS @@ -0,0 +1 @@ +# CONFIG_SCSI_LPFC_DEBUG_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MOD b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MOD new file mode 100644 index 000000000000..7049073c114c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MOD @@ -0,0 +1 @@ +CONFIG_SCSI_MOD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS new file mode 100644 index 000000000000..dd17532f6815 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS @@ -0,0 +1 @@ +# CONFIG_SCSI_MVSAS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVUMI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVUMI new file mode 100644 index 000000000000..a9b158a8553e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVUMI @@ -0,0 +1 @@ +# CONFIG_SCSI_MVUMI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MYRB b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MYRB new file mode 100644 index 000000000000..efe4d49c4568 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MYRB @@ -0,0 +1 @@ +# CONFIG_SCSI_MYRB is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MYRS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MYRS new file mode 100644 index 000000000000..b7fc26a5995f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MYRS @@ -0,0 +1 @@ +# CONFIG_SCSI_MYRS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_NETLINK b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_NETLINK new file mode 100644 index 000000000000..7e3736192530 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_NETLINK @@ -0,0 +1 @@ +CONFIG_SCSI_NETLINK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_PM8001 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_PM8001 new file mode 100644 index 000000000000..ddafafd46e6e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_PM8001 @@ -0,0 +1 @@ +# CONFIG_SCSI_PM8001 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_PMCRAID b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_PMCRAID new file mode 100644 index 000000000000..91c798c43fee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_PMCRAID @@ -0,0 +1 @@ +# CONFIG_SCSI_PMCRAID is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_QLA_FC b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_QLA_FC new file mode 100644 index 000000000000..68581bbb0e64 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_QLA_FC @@ -0,0 +1 @@ +CONFIG_SCSI_QLA_FC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_QLA_ISCSI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_QLA_ISCSI new file mode 100644 index 000000000000..4c654831b757 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_QLA_ISCSI @@ -0,0 +1 @@ +CONFIG_SCSI_QLA_ISCSI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_QLOGIC_1280 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_QLOGIC_1280 new file mode 100644 index 000000000000..98bf0179a1dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_QLOGIC_1280 @@ -0,0 +1 @@ +# CONFIG_SCSI_QLOGIC_1280 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_SNIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_SNIC new file mode 100644 index 000000000000..5fa55adc91b2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_SNIC @@ -0,0 +1 @@ +# CONFIG_SCSI_SNIC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_STEX b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_STEX new file mode 100644 index 000000000000..51be1d1ebd47 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_STEX @@ -0,0 +1 @@ +# CONFIG_SCSI_STEX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_SYM53C8XX_2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_SYM53C8XX_2 new file mode 100644 index 000000000000..1cb5921057ed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_SYM53C8XX_2 @@ -0,0 +1 @@ +# CONFIG_SCSI_SYM53C8XX_2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_WD719X b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_WD719X new file mode 100644 index 000000000000..96273e30305f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_WD719X @@ -0,0 +1 @@ +# CONFIG_SCSI_WD719X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_COOKIE_HMAC_MD5 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_COOKIE_HMAC_MD5 new file mode 100644 index 000000000000..fabd74383373 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_COOKIE_HMAC_MD5 @@ -0,0 +1 @@ +CONFIG_SCTP_COOKIE_HMAC_MD5=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_COOKIE_HMAC_SHA1 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_COOKIE_HMAC_SHA1 new file mode 100644 index 000000000000..7b2efb49bca1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_COOKIE_HMAC_SHA1 @@ -0,0 +1 @@ +CONFIG_SCTP_COOKIE_HMAC_SHA1=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_DBG_OBJCNT b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_DBG_OBJCNT new file mode 100644 index 000000000000..c9b5228397ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_DBG_OBJCNT @@ -0,0 +1 @@ +# CONFIG_SCTP_DBG_OBJCNT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 new file mode 100644 index 000000000000..39efc2075d1a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 @@ -0,0 +1 @@ +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE new file mode 100644 index 000000000000..e5bb9ecfe318 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE @@ -0,0 +1 @@ +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1 new file mode 100644 index 000000000000..ecf09aef8ab9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1 @@ -0,0 +1 @@ +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SDIO_UART b/anolis/configs/L2-OPTIONAL/default/CONFIG_SDIO_UART new file mode 100644 index 000000000000..11f20bf33fa4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SDIO_UART @@ -0,0 +1 @@ +CONFIG_SDIO_UART=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SECCOMP_CACHE_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECCOMP_CACHE_DEBUG new file mode 100644 index 000000000000..af600e9beaec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECCOMP_CACHE_DEBUG @@ -0,0 +1 @@ +# CONFIG_SECCOMP_CACHE_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_APPARMOR b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_APPARMOR new file mode 100644 index 000000000000..4e207e1acf0a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_APPARMOR @@ -0,0 +1 @@ +# CONFIG_SECURITY_APPARMOR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_LANDLOCK b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_LANDLOCK new file mode 100644 index 000000000000..6cd907f1bc3c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_LANDLOCK @@ -0,0 +1 @@ +# CONFIG_SECURITY_LANDLOCK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_LOADPIN b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_LOADPIN new file mode 100644 index 000000000000..3276f3e87fdf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_LOADPIN @@ -0,0 +1 @@ +# CONFIG_SECURITY_LOADPIN is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_LOCKDOWN_LSM b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_LOCKDOWN_LSM new file mode 100644 index 000000000000..cacea5434060 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_LOCKDOWN_LSM @@ -0,0 +1 @@ +# CONFIG_SECURITY_LOCKDOWN_LSM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_SAFESETID b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_SAFESETID new file mode 100644 index 000000000000..ad72247ef42f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_SAFESETID @@ -0,0 +1 @@ +# CONFIG_SECURITY_SAFESETID is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_SELINUX_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_SELINUX_DEBUG new file mode 100644 index 000000000000..1affa2db54dc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_SELINUX_DEBUG @@ -0,0 +1 @@ +# CONFIG_SECURITY_SELINUX_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_TOMOYO b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_TOMOYO new file mode 100644 index 000000000000..e0785195f169 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_TOMOYO @@ -0,0 +1 @@ +# CONFIG_SECURITY_TOMOYO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_YAMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_YAMA new file mode 100644 index 000000000000..3b55731fa516 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SECURITY_YAMA @@ -0,0 +1 @@ +CONFIG_SECURITY_YAMA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ACBEL_FSG032 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ACBEL_FSG032 new file mode 100644 index 000000000000..96a3ee0837fd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ACBEL_FSG032 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ACBEL_FSG032 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ADM1177 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ADM1177 new file mode 100644 index 000000000000..73f1d35f4955 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ADM1177 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADM1177 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ADM1266 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ADM1266 new file mode 100644 index 000000000000..fac63d365677 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ADM1266 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADM1266 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ADT7310 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ADT7310 new file mode 100644 index 000000000000..61225fbce3f4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ADT7310 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADT7310 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_AHT10 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_AHT10 new file mode 100644 index 000000000000..b955f391ead1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_AHT10 @@ -0,0 +1 @@ +# CONFIG_SENSORS_AHT10 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_AQUACOMPUTER_D5NEXT b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_AQUACOMPUTER_D5NEXT new file mode 100644 index 000000000000..2040fdd8c647 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_AQUACOMPUTER_D5NEXT @@ -0,0 +1 @@ +# CONFIG_SENSORS_AQUACOMPUTER_D5NEXT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_AS370 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_AS370 new file mode 100644 index 000000000000..f76a8def3d94 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_AS370 @@ -0,0 +1 @@ +# CONFIG_SENSORS_AS370 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_AXI_FAN_CONTROL b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_AXI_FAN_CONTROL new file mode 100644 index 000000000000..8f02a9e62750 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_AXI_FAN_CONTROL @@ -0,0 +1 @@ +# CONFIG_SENSORS_AXI_FAN_CONTROL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_BEL_PFE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_BEL_PFE new file mode 100644 index 000000000000..20f8ac71d4c6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_BEL_PFE @@ -0,0 +1 @@ +# CONFIG_SENSORS_BEL_PFE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_BPA_RS600 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_BPA_RS600 new file mode 100644 index 000000000000..633b62404578 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_BPA_RS600 @@ -0,0 +1 @@ +# CONFIG_SENSORS_BPA_RS600 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_CORSAIR_CPRO b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_CORSAIR_CPRO new file mode 100644 index 000000000000..ffe4c7ee219b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_CORSAIR_CPRO @@ -0,0 +1 @@ +# CONFIG_SENSORS_CORSAIR_CPRO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_CORSAIR_PSU b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_CORSAIR_PSU new file mode 100644 index 000000000000..4bc9dc6f6bfd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_CORSAIR_PSU @@ -0,0 +1 @@ +# CONFIG_SENSORS_CORSAIR_PSU is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_DELTA_AHE50DC_FAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_DELTA_AHE50DC_FAN new file mode 100644 index 000000000000..cd332db67935 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_DELTA_AHE50DC_FAN @@ -0,0 +1 @@ +# CONFIG_SENSORS_DELTA_AHE50DC_FAN is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_DPS920AB b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_DPS920AB new file mode 100644 index 000000000000..4bae136656fa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_DPS920AB @@ -0,0 +1 @@ +# CONFIG_SENSORS_DPS920AB is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_DRIVETEMP b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_DRIVETEMP new file mode 100644 index 000000000000..34bc561d5c6c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_DRIVETEMP @@ -0,0 +1 @@ +# CONFIG_SENSORS_DRIVETEMP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_EMC2103 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_EMC2103 new file mode 100644 index 000000000000..17ff741c58d6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_EMC2103 @@ -0,0 +1 @@ +# CONFIG_SENSORS_EMC2103 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_EMC2305 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_EMC2305 new file mode 100644 index 000000000000..8e1d208a7e42 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_EMC2305 @@ -0,0 +1 @@ +# CONFIG_SENSORS_EMC2305 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_FSP_3Y b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_FSP_3Y new file mode 100644 index 000000000000..3b73f0d67240 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_FSP_3Y @@ -0,0 +1 @@ +# CONFIG_SENSORS_FSP_3Y is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_FTSTEUTATES b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_FTSTEUTATES new file mode 100644 index 000000000000..b3d0d4c87a2d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_FTSTEUTATES @@ -0,0 +1 @@ +# CONFIG_SENSORS_FTSTEUTATES is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_HIH6130 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_HIH6130 new file mode 100644 index 000000000000..ee050b4acca3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_HIH6130 @@ -0,0 +1 @@ +# CONFIG_SENSORS_HIH6130 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_HS3001 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_HS3001 new file mode 100644 index 000000000000..035b60da4cd3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_HS3001 @@ -0,0 +1 @@ +# CONFIG_SENSORS_HS3001 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IBM_CFFPS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IBM_CFFPS new file mode 100644 index 000000000000..a217d973e740 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IBM_CFFPS @@ -0,0 +1 @@ +# CONFIG_SENSORS_IBM_CFFPS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_INA238 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_INA238 new file mode 100644 index 000000000000..19a35ab442f2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_INA238 @@ -0,0 +1 @@ +# CONFIG_SENSORS_INA238 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_INA3221 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_INA3221 new file mode 100644 index 000000000000..21b6a947e3fa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_INA3221 @@ -0,0 +1 @@ +# CONFIG_SENSORS_INA3221 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_INSPUR_IPSPS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_INSPUR_IPSPS new file mode 100644 index 000000000000..ac589f45a46c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_INSPUR_IPSPS @@ -0,0 +1 @@ +# CONFIG_SENSORS_INSPUR_IPSPS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IR35221 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IR35221 new file mode 100644 index 000000000000..788fcfa05cdc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IR35221 @@ -0,0 +1 @@ +# CONFIG_SENSORS_IR35221 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IR36021 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IR36021 new file mode 100644 index 000000000000..09c870fa43bd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IR36021 @@ -0,0 +1 @@ +# CONFIG_SENSORS_IR36021 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IR38064 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IR38064 new file mode 100644 index 000000000000..3ccb6f2bbfbf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IR38064 @@ -0,0 +1 @@ +# CONFIG_SENSORS_IR38064 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IRPS5401 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IRPS5401 new file mode 100644 index 000000000000..c16243880121 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_IRPS5401 @@ -0,0 +1 @@ +# CONFIG_SENSORS_IRPS5401 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ISL68137 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ISL68137 new file mode 100644 index 000000000000..b7586e0dc135 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_ISL68137 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ISL68137 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LT7182S b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LT7182S new file mode 100644 index 000000000000..8bb204de2d7e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LT7182S @@ -0,0 +1 @@ +# CONFIG_SENSORS_LT7182S is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LTC2947_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LTC2947_I2C new file mode 100644 index 000000000000..c1a98db590e4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LTC2947_I2C @@ -0,0 +1 @@ +# CONFIG_SENSORS_LTC2947_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LTC2947_SPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LTC2947_SPI new file mode 100644 index 000000000000..9bfd69b12089 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LTC2947_SPI @@ -0,0 +1 @@ +# CONFIG_SENSORS_LTC2947_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LTC2990 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LTC2990 new file mode 100644 index 000000000000..8e84521a9c73 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LTC2990 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LTC2990 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LTC2992 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LTC2992 new file mode 100644 index 000000000000..ea1d267ab8e1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_LTC2992 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LTC2992 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX127 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX127 new file mode 100644 index 000000000000..2886282bde7d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX127 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX127 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX15301 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX15301 new file mode 100644 index 000000000000..b76611b07e9a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX15301 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX15301 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX16601 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX16601 new file mode 100644 index 000000000000..4fa6afb577fa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX16601 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX16601 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX20730 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX20730 new file mode 100644 index 000000000000..38f8a8dd9dbe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX20730 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX20730 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX31722 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX31722 new file mode 100644 index 000000000000..362fa9a5ad26 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX31722 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX31722 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX31730 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX31730 new file mode 100644 index 000000000000..463fd89b12dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX31730 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX31730 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX31760 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX31760 new file mode 100644 index 000000000000..f374857bd4f0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX31760 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX31760 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX31785 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX31785 new file mode 100644 index 000000000000..071991fe56f3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX31785 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX31785 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX6620 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX6620 new file mode 100644 index 000000000000..9196a2cb2e70 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX6620 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX6620 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX6621 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX6621 new file mode 100644 index 000000000000..cc3cdefc6e31 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MAX6621 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX6621 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MC34VR500 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MC34VR500 new file mode 100644 index 000000000000..0f1d0e6061e8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MC34VR500 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MC34VR500 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MP2888 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MP2888 new file mode 100644 index 000000000000..b16ee9089de6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MP2888 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MP2888 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MP2975 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MP2975 new file mode 100644 index 000000000000..018a1d2ea217 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MP2975 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MP2975 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MP5023 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MP5023 new file mode 100644 index 000000000000..0cbd87152f9f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MP5023 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MP5023 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MPQ7932 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MPQ7932 new file mode 100644 index 000000000000..4c84a460a900 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MPQ7932 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MPQ7932 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MR75203 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MR75203 new file mode 100644 index 000000000000..6edfde475b0c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_MR75203 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MR75203 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_NCT6775_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_NCT6775_I2C new file mode 100644 index 000000000000..663f31a268a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_NCT6775_I2C @@ -0,0 +1 @@ +# CONFIG_SENSORS_NCT6775_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_NPCM7XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_NPCM7XX new file mode 100644 index 000000000000..5f3e62e2486d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_NPCM7XX @@ -0,0 +1 @@ +# CONFIG_SENSORS_NPCM7XX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_NZXT_KRAKEN2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_NZXT_KRAKEN2 new file mode 100644 index 000000000000..fac10c23da14 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_NZXT_KRAKEN2 @@ -0,0 +1 @@ +# CONFIG_SENSORS_NZXT_KRAKEN2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_NZXT_SMART2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_NZXT_SMART2 new file mode 100644 index 000000000000..f942ec4e0d3d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_NZXT_SMART2 @@ -0,0 +1 @@ +# CONFIG_SENSORS_NZXT_SMART2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_OCC_P8_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_OCC_P8_I2C new file mode 100644 index 000000000000..2b2e556867f3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_OCC_P8_I2C @@ -0,0 +1 @@ +# CONFIG_SENSORS_OCC_P8_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_PIM4328 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_PIM4328 new file mode 100644 index 000000000000..71f7cda75cc4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_PIM4328 @@ -0,0 +1 @@ +# CONFIG_SENSORS_PIM4328 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_PLI1209BC b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_PLI1209BC new file mode 100644 index 000000000000..672c2ae853ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_PLI1209BC @@ -0,0 +1 @@ +# CONFIG_SENSORS_PLI1209BC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_PM6764TR b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_PM6764TR new file mode 100644 index 000000000000..0af0b3e7870e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_PM6764TR @@ -0,0 +1 @@ +# CONFIG_SENSORS_PM6764TR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_PXE1610 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_PXE1610 new file mode 100644 index 000000000000..7d12a457c950 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_PXE1610 @@ -0,0 +1 @@ +# CONFIG_SENSORS_PXE1610 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_Q54SJ108A2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_Q54SJ108A2 new file mode 100644 index 000000000000..0033b87cbac9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_Q54SJ108A2 @@ -0,0 +1 @@ +# CONFIG_SENSORS_Q54SJ108A2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_SBRMI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_SBRMI new file mode 100644 index 000000000000..ee2be310e7f5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_SBRMI @@ -0,0 +1 @@ +# CONFIG_SENSORS_SBRMI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_SBTSI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_SBTSI new file mode 100644 index 000000000000..434ec0ad9b6e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_SBTSI @@ -0,0 +1 @@ +# CONFIG_SENSORS_SBTSI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_SHT3x b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_SHT3x new file mode 100644 index 000000000000..487a3c998bda --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_SHT3x @@ -0,0 +1 @@ +# CONFIG_SENSORS_SHT3x is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_SHT4x b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_SHT4x new file mode 100644 index 000000000000..6ba610dcfce5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_SHT4x @@ -0,0 +1 @@ +# CONFIG_SENSORS_SHT4x is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_STPDDC60 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_STPDDC60 new file mode 100644 index 000000000000..d2c14b35d5d4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_STPDDC60 @@ -0,0 +1 @@ +# CONFIG_SENSORS_STPDDC60 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_STTS751 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_STTS751 new file mode 100644 index 000000000000..f2f8006af281 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_STTS751 @@ -0,0 +1 @@ +# CONFIG_SENSORS_STTS751 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TC654 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TC654 new file mode 100644 index 000000000000..cd9dfa1518e0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TC654 @@ -0,0 +1 @@ +# CONFIG_SENSORS_TC654 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TDA38640 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TDA38640 new file mode 100644 index 000000000000..423510d94081 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TDA38640 @@ -0,0 +1 @@ +# CONFIG_SENSORS_TDA38640 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TMP108 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TMP108 new file mode 100644 index 000000000000..5fc9166021b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TMP108 @@ -0,0 +1 @@ +# CONFIG_SENSORS_TMP108 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TMP464 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TMP464 new file mode 100644 index 000000000000..ff2a270bcc67 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TMP464 @@ -0,0 +1 @@ +# CONFIG_SENSORS_TMP464 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TMP513 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TMP513 new file mode 100644 index 000000000000..906f493fca45 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TMP513 @@ -0,0 +1 @@ +# CONFIG_SENSORS_TMP513 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TPS23861 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TPS23861 new file mode 100644 index 000000000000..70f166c164e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TPS23861 @@ -0,0 +1 @@ +# CONFIG_SENSORS_TPS23861 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TPS53679 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TPS53679 new file mode 100644 index 000000000000..861aa87d38c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TPS53679 @@ -0,0 +1 @@ +# CONFIG_SENSORS_TPS53679 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TPS546D24 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TPS546D24 new file mode 100644 index 000000000000..eac2bb021f04 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_TPS546D24 @@ -0,0 +1 @@ +# CONFIG_SENSORS_TPS546D24 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_W83773G b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_W83773G new file mode 100644 index 000000000000..665b8bfb1e60 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_W83773G @@ -0,0 +1 @@ +# CONFIG_SENSORS_W83773G is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_XDPE122 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_XDPE122 new file mode 100644 index 000000000000..4de67bfd0768 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_XDPE122 @@ -0,0 +1 @@ +# CONFIG_SENSORS_XDPE122 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_XDPE152 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_XDPE152 new file mode 100644 index 000000000000..b84e16fd152b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SENSORS_XDPE152 @@ -0,0 +1 @@ +# CONFIG_SENSORS_XDPE152 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250 new file mode 100644 index 000000000000..6896f6a62d0f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250 @@ -0,0 +1 @@ +CONFIG_SERIAL_8250=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_CONSOLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_CONSOLE new file mode 100644 index 000000000000..49dda6ea764d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_CONSOLE @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_CONSOLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DEPRECATED_OPTIONS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DEPRECATED_OPTIONS new file mode 100644 index 000000000000..bec6e2822713 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DEPRECATED_OPTIONS @@ -0,0 +1 @@ +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DETECT_IRQ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DETECT_IRQ new file mode 100644 index 000000000000..3fc7f91eaad1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DETECT_IRQ @@ -0,0 +1 @@ +# CONFIG_SERIAL_8250_DETECT_IRQ is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DMA new file mode 100644 index 000000000000..333f0636a9db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DMA @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_DMA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DW b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DW new file mode 100644 index 000000000000..43894bea0c67 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DW @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_DW=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DWLIB b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DWLIB new file mode 100644 index 000000000000..3dcf20ed9b3e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_DWLIB @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_DWLIB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_EXAR b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_EXAR new file mode 100644 index 000000000000..3df024acdc5b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_EXAR @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_EXAR=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_EXTENDED b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_EXTENDED new file mode 100644 index 000000000000..91881b19569e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_EXTENDED @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_EXTENDED=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_FINTEK b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_FINTEK new file mode 100644 index 000000000000..b6cf08b714de --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_FINTEK @@ -0,0 +1 @@ +# CONFIG_SERIAL_8250_FINTEK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_MANY_PORTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_MANY_PORTS new file mode 100644 index 000000000000..ded898381acc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_MANY_PORTS @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_MANY_PORTS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_NR_UARTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_NR_UARTS new file mode 100644 index 000000000000..c30ccb26bb81 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_NR_UARTS @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_NR_UARTS=32 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PCI new file mode 100644 index 000000000000..c0ac5637fef6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PCI @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_PCI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PCI1XXXX b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PCI1XXXX new file mode 100644 index 000000000000..134ab6721ff3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PCI1XXXX @@ -0,0 +1 @@ +# CONFIG_SERIAL_8250_PCI1XXXX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PCILIB b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PCILIB new file mode 100644 index 000000000000..25b02aa103a8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PCILIB @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_PCILIB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PERICOM b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PERICOM new file mode 100644 index 000000000000..7bd924eace19 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PERICOM @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_PERICOM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PNP b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PNP new file mode 100644 index 000000000000..09e4c12bd6cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_PNP @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_PNP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_RSA b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_RSA new file mode 100644 index 000000000000..2e1143ad48d1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_RSA @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_RSA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_RUNTIME_UARTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_RUNTIME_UARTS new file mode 100644 index 000000000000..2bde688add54 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_RUNTIME_UARTS @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_SHARE_IRQ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_SHARE_IRQ new file mode 100644 index 000000000000..d346f0d47003 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_8250_SHARE_IRQ @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_SHARE_IRQ=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_ALTERA_JTAGUART b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_ALTERA_JTAGUART new file mode 100644 index 000000000000..b21939fa7602 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_ALTERA_JTAGUART @@ -0,0 +1 @@ +# CONFIG_SERIAL_ALTERA_JTAGUART is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_ALTERA_UART b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_ALTERA_UART new file mode 100644 index 000000000000..a4c4bd74281a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_ALTERA_UART @@ -0,0 +1 @@ +# CONFIG_SERIAL_ALTERA_UART is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_CORE new file mode 100644 index 000000000000..a9e727ef6fc1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_CORE @@ -0,0 +1 @@ +CONFIG_SERIAL_CORE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_CORE_CONSOLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_CORE_CONSOLE new file mode 100644 index 000000000000..7a5650c97f48 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_CORE_CONSOLE @@ -0,0 +1 @@ +CONFIG_SERIAL_CORE_CONSOLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_DEV_BUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_DEV_BUS new file mode 100644 index 000000000000..4390febac668 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_DEV_BUS @@ -0,0 +1 @@ +# CONFIG_SERIAL_DEV_BUS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_EARLYCON b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_EARLYCON new file mode 100644 index 000000000000..387f53de3e97 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_EARLYCON @@ -0,0 +1 @@ +CONFIG_SERIAL_EARLYCON=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_FSL_LINFLEXUART b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_FSL_LINFLEXUART new file mode 100644 index 000000000000..68d716e9dea2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_FSL_LINFLEXUART @@ -0,0 +1 @@ +# CONFIG_SERIAL_FSL_LINFLEXUART is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_FSL_LPUART b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_FSL_LPUART new file mode 100644 index 000000000000..345c6ea2aedb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_FSL_LPUART @@ -0,0 +1 @@ +# CONFIG_SERIAL_FSL_LPUART is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_KGDB_NMI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_KGDB_NMI new file mode 100644 index 000000000000..617006684052 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_KGDB_NMI @@ -0,0 +1 @@ +# CONFIG_SERIAL_KGDB_NMI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_MAX3100 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_MAX3100 new file mode 100644 index 000000000000..5fb197ebc3c7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_MAX3100 @@ -0,0 +1 @@ +# CONFIG_SERIAL_MAX3100 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_MAX310X b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_MAX310X new file mode 100644 index 000000000000..10f39f7fa3bb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_MAX310X @@ -0,0 +1 @@ +# CONFIG_SERIAL_MAX310X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_MCTRL_GPIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_MCTRL_GPIO new file mode 100644 index 000000000000..32c2ac585277 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_MCTRL_GPIO @@ -0,0 +1 @@ +CONFIG_SERIAL_MCTRL_GPIO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_NONSTANDARD b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_NONSTANDARD new file mode 100644 index 000000000000..f371b2fdcf04 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_NONSTANDARD @@ -0,0 +1 @@ +CONFIG_SERIAL_NONSTANDARD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_RP2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_RP2 new file mode 100644 index 000000000000..2645fe9c765c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_RP2 @@ -0,0 +1 @@ +# CONFIG_SERIAL_RP2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_SC16IS7XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_SC16IS7XX new file mode 100644 index 000000000000..29deacf2a23f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_SC16IS7XX @@ -0,0 +1 @@ +# CONFIG_SERIAL_SC16IS7XX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_SCCNXP b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_SCCNXP new file mode 100644 index 000000000000..1ef04c2082dc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_SCCNXP @@ -0,0 +1 @@ +# CONFIG_SERIAL_SCCNXP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_SPRD b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_SPRD new file mode 100644 index 000000000000..c1831502fe74 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_SPRD @@ -0,0 +1 @@ +# CONFIG_SERIAL_SPRD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_UARTLITE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_UARTLITE new file mode 100644 index 000000000000..5c730f9cf93c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIAL_UARTLITE @@ -0,0 +1 @@ +# CONFIG_SERIAL_UARTLITE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIO_GPIO_PS2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIO_GPIO_PS2 new file mode 100644 index 000000000000..22c1adbfbf03 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIO_GPIO_PS2 @@ -0,0 +1 @@ +# CONFIG_SERIO_GPIO_PS2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIO_PCIPS2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIO_PCIPS2 new file mode 100644 index 000000000000..81c68994feb6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIO_PCIPS2 @@ -0,0 +1 @@ +# CONFIG_SERIO_PCIPS2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIO_PS2MULT b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIO_PS2MULT new file mode 100644 index 000000000000..ecaa8cce173b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SERIO_PS2MULT @@ -0,0 +1 @@ +# CONFIG_SERIO_PS2MULT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SFC_SIENA b/anolis/configs/L2-OPTIONAL/default/CONFIG_SFC_SIENA new file mode 100644 index 000000000000..345587e7256c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SFC_SIENA @@ -0,0 +1 @@ +# CONFIG_SFC_SIENA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SFP b/anolis/configs/L2-OPTIONAL/default/CONFIG_SFP new file mode 100644 index 000000000000..db57db12e9df --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SFP @@ -0,0 +1 @@ +CONFIG_SFP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SF_PDMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_SF_PDMA new file mode 100644 index 000000000000..87ab2c1f07c2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SF_PDMA @@ -0,0 +1 @@ +# CONFIG_SF_PDMA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SGL_ALLOC b/anolis/configs/L2-OPTIONAL/default/CONFIG_SGL_ALLOC new file mode 100644 index 000000000000..299ad6f71368 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SGL_ALLOC @@ -0,0 +1 @@ +CONFIG_SGL_ALLOC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SG_POOL b/anolis/configs/L2-OPTIONAL/default/CONFIG_SG_POOL new file mode 100644 index 000000000000..c2cfa29ce56a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SG_POOL @@ -0,0 +1 @@ +CONFIG_SG_POOL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SHRINKER_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_SHRINKER_DEBUG new file mode 100644 index 000000000000..1f64821da2a8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SHRINKER_DEBUG @@ -0,0 +1 @@ +# CONFIG_SHRINKER_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SIGNATURE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SIGNATURE new file mode 100644 index 000000000000..cfefd18705cf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SIGNATURE @@ -0,0 +1 @@ +CONFIG_SIGNATURE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SIOX b/anolis/configs/L2-OPTIONAL/default/CONFIG_SIOX new file mode 100644 index 000000000000..89578e7ae70c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SIOX @@ -0,0 +1 @@ +# CONFIG_SIOX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SKB_EXTENSIONS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SKB_EXTENSIONS new file mode 100644 index 000000000000..7dd2c37be080 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SKB_EXTENSIONS @@ -0,0 +1 @@ +CONFIG_SKB_EXTENSIONS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SLAB_DEPRECATED b/anolis/configs/L2-OPTIONAL/default/CONFIG_SLAB_DEPRECATED new file mode 100644 index 000000000000..5b0f444ee136 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SLAB_DEPRECATED @@ -0,0 +1 @@ +# CONFIG_SLAB_DEPRECATED is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SLHC b/anolis/configs/L2-OPTIONAL/default/CONFIG_SLHC new file mode 100644 index 000000000000..cb09fa7d2f09 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SLHC @@ -0,0 +1 @@ +CONFIG_SLHC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SLIMBUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SLIMBUS new file mode 100644 index 000000000000..63141c29d4c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SLIMBUS @@ -0,0 +1 @@ +# CONFIG_SLIMBUS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SLIP_COMPRESSED b/anolis/configs/L2-OPTIONAL/default/CONFIG_SLIP_COMPRESSED new file mode 100644 index 000000000000..a67a13f7403a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SLIP_COMPRESSED @@ -0,0 +1 @@ +CONFIG_SLIP_COMPRESSED=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SLIP_MODE_SLIP6 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SLIP_MODE_SLIP6 new file mode 100644 index 000000000000..8ddc7389f0df --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SLIP_MODE_SLIP6 @@ -0,0 +1 @@ +# CONFIG_SLIP_MODE_SLIP6 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SLIP_SMART b/anolis/configs/L2-OPTIONAL/default/CONFIG_SLIP_SMART new file mode 100644 index 000000000000..b6e3b7cbadda --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SLIP_SMART @@ -0,0 +1 @@ +CONFIG_SLIP_SMART=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SMARTJOYPLUS_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_SMARTJOYPLUS_FF new file mode 100644 index 000000000000..8644453aab07 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SMARTJOYPLUS_FF @@ -0,0 +1 @@ +# CONFIG_SMARTJOYPLUS_FF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SMBFS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SMBFS new file mode 100644 index 000000000000..6211f79279b3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SMBFS @@ -0,0 +1 @@ +CONFIG_SMBFS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SMB_SERVER b/anolis/configs/L2-OPTIONAL/default/CONFIG_SMB_SERVER new file mode 100644 index 000000000000..0cb54ecd7dca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SMB_SERVER @@ -0,0 +1 @@ +# CONFIG_SMB_SERVER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SMSC_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_SMSC_PHY new file mode 100644 index 000000000000..12a6f39b18b8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SMSC_PHY @@ -0,0 +1 @@ +CONFIG_SMSC_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SM_FTL b/anolis/configs/L2-OPTIONAL/default/CONFIG_SM_FTL new file mode 100644 index 000000000000..da9ab33b453b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SM_FTL @@ -0,0 +1 @@ +# CONFIG_SM_FTL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SOCK_CGROUP_DATA b/anolis/configs/L2-OPTIONAL/default/CONFIG_SOCK_CGROUP_DATA new file mode 100644 index 000000000000..4c3788456e67 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SOCK_CGROUP_DATA @@ -0,0 +1 @@ +CONFIG_SOCK_CGROUP_DATA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SOCK_RX_QUEUE_MAPPING b/anolis/configs/L2-OPTIONAL/default/CONFIG_SOCK_RX_QUEUE_MAPPING new file mode 100644 index 000000000000..7c6f5f44fcbf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SOCK_RX_QUEUE_MAPPING @@ -0,0 +1 @@ +CONFIG_SOCK_RX_QUEUE_MAPPING=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SOCK_VALIDATE_XMIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_SOCK_VALIDATE_XMIT new file mode 100644 index 000000000000..5645b5adfbe0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SOCK_VALIDATE_XMIT @@ -0,0 +1 @@ +CONFIG_SOCK_VALIDATE_XMIT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SOC_TI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SOC_TI new file mode 100644 index 000000000000..88248080d408 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SOC_TI @@ -0,0 +1 @@ +# CONFIG_SOC_TI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SOFTIRQ_ON_OWN_STACK b/anolis/configs/L2-OPTIONAL/default/CONFIG_SOFTIRQ_ON_OWN_STACK new file mode 100644 index 000000000000..18f68f67c552 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SOFTIRQ_ON_OWN_STACK @@ -0,0 +1 @@ +CONFIG_SOFTIRQ_ON_OWN_STACK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SONY_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_SONY_FF new file mode 100644 index 000000000000..f3a327bba2ab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SONY_FF @@ -0,0 +1 @@ +CONFIG_SONY_FF=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SOUND b/anolis/configs/L2-OPTIONAL/default/CONFIG_SOUND new file mode 100644 index 000000000000..b62f24440c36 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SOUND @@ -0,0 +1 @@ +CONFIG_SOUND=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SOUNDWIRE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SOUNDWIRE new file mode 100644 index 000000000000..7ddeacfd9956 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SOUNDWIRE @@ -0,0 +1 @@ +# CONFIG_SOUNDWIRE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPARSEMEM_EXTREME b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPARSEMEM_EXTREME new file mode 100644 index 000000000000..0e87090ab442 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPARSEMEM_EXTREME @@ -0,0 +1 @@ +CONFIG_SPARSEMEM_EXTREME=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPARSEMEM_VMEMMAP_ENABLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPARSEMEM_VMEMMAP_ENABLE new file mode 100644 index 000000000000..f7d1d81f20c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPARSEMEM_VMEMMAP_ENABLE @@ -0,0 +1 @@ +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPARSE_IRQ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPARSE_IRQ new file mode 100644 index 000000000000..943900f6318e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPARSE_IRQ @@ -0,0 +1 @@ +CONFIG_SPARSE_IRQ=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_ALTERA b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_ALTERA new file mode 100644 index 000000000000..2f6b419d6c74 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_ALTERA @@ -0,0 +1 @@ +# CONFIG_SPI_ALTERA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_AMD b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_AMD new file mode 100644 index 000000000000..0dc059dce496 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_AMD @@ -0,0 +1 @@ +# CONFIG_SPI_AMD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_AX88796C b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_AX88796C new file mode 100644 index 000000000000..c9ba73ea33c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_AX88796C @@ -0,0 +1 @@ +# CONFIG_SPI_AX88796C is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_AXI_SPI_ENGINE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_AXI_SPI_ENGINE new file mode 100644 index 000000000000..2f54b92cdc1f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_AXI_SPI_ENGINE @@ -0,0 +1 @@ +# CONFIG_SPI_AXI_SPI_ENGINE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_BITBANG b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_BITBANG new file mode 100644 index 000000000000..07bad2e562cd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_BITBANG @@ -0,0 +1 @@ +# CONFIG_SPI_BITBANG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_DYNAMIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_DYNAMIC new file mode 100644 index 000000000000..516a1b41fa98 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_DYNAMIC @@ -0,0 +1 @@ +CONFIG_SPI_DYNAMIC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_GPIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_GPIO new file mode 100644 index 000000000000..ebdaac3fe2f4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_GPIO @@ -0,0 +1 @@ +# CONFIG_SPI_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_LOOPBACK_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_LOOPBACK_TEST new file mode 100644 index 000000000000..c3e9782b13cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_LOOPBACK_TEST @@ -0,0 +1 @@ +# CONFIG_SPI_LOOPBACK_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MASTER b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MASTER new file mode 100644 index 000000000000..63f789d5b226 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MASTER @@ -0,0 +1 @@ +CONFIG_SPI_MASTER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MEM b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MEM new file mode 100644 index 000000000000..896082297805 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MEM @@ -0,0 +1 @@ +# CONFIG_SPI_MEM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MICROCHIP_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MICROCHIP_CORE new file mode 100644 index 000000000000..38b69b2031de --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MICROCHIP_CORE @@ -0,0 +1 @@ +# CONFIG_SPI_MICROCHIP_CORE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MICROCHIP_CORE_QSPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MICROCHIP_CORE_QSPI new file mode 100644 index 000000000000..23bd027bb0f3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MICROCHIP_CORE_QSPI @@ -0,0 +1 @@ +# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MUX b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MUX new file mode 100644 index 000000000000..d1caa5aa793f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MUX @@ -0,0 +1 @@ +# CONFIG_SPI_MUX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MXIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MXIC new file mode 100644 index 000000000000..2564a03db4c9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MXIC @@ -0,0 +1 @@ +# CONFIG_SPI_MXIC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_OC_TINY b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_OC_TINY new file mode 100644 index 000000000000..742e9abe78b5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_OC_TINY @@ -0,0 +1 @@ +# CONFIG_SPI_OC_TINY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_PCI1XXXX b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_PCI1XXXX new file mode 100644 index 000000000000..356a3951461f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_PCI1XXXX @@ -0,0 +1 @@ +# CONFIG_SPI_PCI1XXXX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_PXA2XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_PXA2XX new file mode 100644 index 000000000000..266e686b8459 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_PXA2XX @@ -0,0 +1 @@ +# CONFIG_SPI_PXA2XX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_SC18IS602 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_SC18IS602 new file mode 100644 index 000000000000..5d8f72c23123 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_SC18IS602 @@ -0,0 +1 @@ +# CONFIG_SPI_SC18IS602 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_SIFIVE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_SIFIVE new file mode 100644 index 000000000000..f24362cbf756 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_SIFIVE @@ -0,0 +1 @@ +# CONFIG_SPI_SIFIVE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_SLAVE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_SLAVE new file mode 100644 index 000000000000..663aaaaf305b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_SLAVE @@ -0,0 +1 @@ +# CONFIG_SPI_SLAVE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_SPIDEV b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_SPIDEV new file mode 100644 index 000000000000..c785b6d7ed01 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_SPIDEV @@ -0,0 +1 @@ +# CONFIG_SPI_SPIDEV is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_TLE62X0 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_TLE62X0 new file mode 100644 index 000000000000..f9c7a26af8a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_TLE62X0 @@ -0,0 +1 @@ +# CONFIG_SPI_TLE62X0 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_XCOMM b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_XCOMM new file mode 100644 index 000000000000..c3da8e9c3e8b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_XCOMM @@ -0,0 +1 @@ +# CONFIG_SPI_XCOMM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_XILINX b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_XILINX new file mode 100644 index 000000000000..47ea00e62a9f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_XILINX @@ -0,0 +1 @@ +# CONFIG_SPI_XILINX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPLIT_PTLOCK_CPUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPLIT_PTLOCK_CPUS new file mode 100644 index 000000000000..412fc198ef2e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPLIT_PTLOCK_CPUS @@ -0,0 +1 @@ +CONFIG_SPLIT_PTLOCK_CPUS=4 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPMI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPMI new file mode 100644 index 000000000000..35e1ee706a11 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SPMI @@ -0,0 +1 @@ +# CONFIG_SPMI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT b/anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT new file mode 100644 index 000000000000..2b95cc6b9b39 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT @@ -0,0 +1 @@ +# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI new file mode 100644 index 000000000000..8a049d3d94a1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI @@ -0,0 +1 @@ +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU b/anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU new file mode 100644 index 000000000000..2963ff2cb1b1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU @@ -0,0 +1 @@ +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE new file mode 100644 index 000000000000..638c3630aecf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE @@ -0,0 +1 @@ +CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SRAM b/anolis/configs/L2-OPTIONAL/default/CONFIG_SRAM new file mode 100644 index 000000000000..29a3ef2ac378 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SRAM @@ -0,0 +1 @@ +# CONFIG_SRAM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SSB b/anolis/configs/L2-OPTIONAL/default/CONFIG_SSB new file mode 100644 index 000000000000..7b1e39107257 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SSB @@ -0,0 +1 @@ +# CONFIG_SSB is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SSB_POSSIBLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SSB_POSSIBLE new file mode 100644 index 000000000000..90707d7854fd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SSB_POSSIBLE @@ -0,0 +1 @@ +CONFIG_SSB_POSSIBLE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SSFDC b/anolis/configs/L2-OPTIONAL/default/CONFIG_SSFDC new file mode 100644 index 000000000000..29dfb19eddaf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SSFDC @@ -0,0 +1 @@ +# CONFIG_SSFDC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_STACKDEPOT b/anolis/configs/L2-OPTIONAL/default/CONFIG_STACKDEPOT new file mode 100644 index 000000000000..49872bcb2fe6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_STACKDEPOT @@ -0,0 +1 @@ +CONFIG_STACKDEPOT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_STACKTRACE_BUILD_ID b/anolis/configs/L2-OPTIONAL/default/CONFIG_STACKTRACE_BUILD_ID new file mode 100644 index 000000000000..832ef97a0634 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_STACKTRACE_BUILD_ID @@ -0,0 +1 @@ +# CONFIG_STACKTRACE_BUILD_ID is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_STANDALONE b/anolis/configs/L2-OPTIONAL/default/CONFIG_STANDALONE new file mode 100644 index 000000000000..c3d25b3d6731 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_STANDALONE @@ -0,0 +1 @@ +CONFIG_STANDALONE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_STE10XP b/anolis/configs/L2-OPTIONAL/default/CONFIG_STE10XP new file mode 100644 index 000000000000..e6a1c2bcacb0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_STE10XP @@ -0,0 +1 @@ +CONFIG_STE10XP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_STP b/anolis/configs/L2-OPTIONAL/default/CONFIG_STP new file mode 100644 index 000000000000..7c878c8f9c0f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_STP @@ -0,0 +1 @@ +CONFIG_STP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_STRING_SELFTEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_STRING_SELFTEST new file mode 100644 index 000000000000..dbff6d7faa3a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_STRING_SELFTEST @@ -0,0 +1 @@ +# CONFIG_STRING_SELFTEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SUNRPC_BACKCHANNEL b/anolis/configs/L2-OPTIONAL/default/CONFIG_SUNRPC_BACKCHANNEL new file mode 100644 index 000000000000..9dc73284e4f7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SUNRPC_BACKCHANNEL @@ -0,0 +1 @@ +CONFIG_SUNRPC_BACKCHANNEL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SUNRPC_GSS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SUNRPC_GSS new file mode 100644 index 000000000000..4a4e93eed9c7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SUNRPC_GSS @@ -0,0 +1 @@ +CONFIG_SUNRPC_GSS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_3_POWER_OPREGION b/anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_3_POWER_OPREGION new file mode 100644 index 000000000000..aaa37ad19c5d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_3_POWER_OPREGION @@ -0,0 +1 @@ +# CONFIG_SURFACE_3_POWER_OPREGION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_GPE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_GPE new file mode 100644 index 000000000000..160153290de5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_GPE @@ -0,0 +1 @@ +# CONFIG_SURFACE_GPE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_HOTPLUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_HOTPLUG new file mode 100644 index 000000000000..f303a3f6e90b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_HOTPLUG @@ -0,0 +1 @@ +# CONFIG_SURFACE_HOTPLUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_PLATFORMS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_PLATFORMS new file mode 100644 index 000000000000..4cc37929c7e5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_PLATFORMS @@ -0,0 +1 @@ +CONFIG_SURFACE_PLATFORMS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_PRO3_BUTTON b/anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_PRO3_BUTTON new file mode 100644 index 000000000000..b649d5976e66 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SURFACE_PRO3_BUTTON @@ -0,0 +1 @@ +# CONFIG_SURFACE_PRO3_BUTTON is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SWIOTLB b/anolis/configs/L2-OPTIONAL/default/CONFIG_SWIOTLB new file mode 100644 index 000000000000..5405b65b4d68 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SWIOTLB @@ -0,0 +1 @@ +CONFIG_SWIOTLB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SWIOTLB_DYNAMIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_SWIOTLB_DYNAMIC new file mode 100644 index 000000000000..2e7793f60007 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SWIOTLB_DYNAMIC @@ -0,0 +1 @@ +# CONFIG_SWIOTLB_DYNAMIC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SWPHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_SWPHY new file mode 100644 index 000000000000..32139c70bf01 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SWPHY @@ -0,0 +1 @@ +CONFIG_SWPHY=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SW_SYNC b/anolis/configs/L2-OPTIONAL/default/CONFIG_SW_SYNC new file mode 100644 index 000000000000..6afd35b6ba1c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SW_SYNC @@ -0,0 +1 @@ +# CONFIG_SW_SYNC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SYNTH_EVENT_GEN_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_SYNTH_EVENT_GEN_TEST new file mode 100644 index 000000000000..23019ea4cf6d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SYNTH_EVENT_GEN_TEST @@ -0,0 +1 @@ +# CONFIG_SYNTH_EVENT_GEN_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSCTL_EXCEPTION_TRACE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSCTL_EXCEPTION_TRACE new file mode 100644 index 000000000000..a8beb0f1bc77 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSCTL_EXCEPTION_TRACE @@ -0,0 +1 @@ +CONFIG_SYSCTL_EXCEPTION_TRACE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSFB b/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSFB new file mode 100644 index 000000000000..880e56bea101 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSFB @@ -0,0 +1 @@ +CONFIG_SYSFB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE new file mode 100644 index 000000000000..f56884877ee3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE @@ -0,0 +1 @@ +# CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSV68_PARTITION b/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSV68_PARTITION new file mode 100644 index 000000000000..90936c779bda --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSV68_PARTITION @@ -0,0 +1 @@ +# CONFIG_SYSV68_PARTITION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSVIPC_COMPAT b/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSVIPC_COMPAT new file mode 100644 index 000000000000..6adabb910295 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSVIPC_COMPAT @@ -0,0 +1 @@ +CONFIG_SYSVIPC_COMPAT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSV_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSV_FS new file mode 100644 index 000000000000..1304fb157f36 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SYSV_FS @@ -0,0 +1 @@ +# CONFIG_SYSV_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TASKS_RCU b/anolis/configs/L2-OPTIONAL/default/CONFIG_TASKS_RCU new file mode 100644 index 000000000000..fb944bc3b88e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TASKS_RCU @@ -0,0 +1 @@ +CONFIG_TASKS_RCU=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TASKS_RCU_GENERIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_TASKS_RCU_GENERIC new file mode 100644 index 000000000000..0cdcfef08436 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TASKS_RCU_GENERIC @@ -0,0 +1 @@ +CONFIG_TASKS_RCU_GENERIC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TASKS_RUDE_RCU b/anolis/configs/L2-OPTIONAL/default/CONFIG_TASKS_RUDE_RCU new file mode 100644 index 000000000000..e3ac552ae7a9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TASKS_RUDE_RCU @@ -0,0 +1 @@ +CONFIG_TASKS_RUDE_RCU=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TASKS_TRACE_RCU b/anolis/configs/L2-OPTIONAL/default/CONFIG_TASKS_TRACE_RCU new file mode 100644 index 000000000000..72d905396b7f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TASKS_TRACE_RCU @@ -0,0 +1 @@ +CONFIG_TASKS_TRACE_RCU=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TCG_TIS_I2C b/anolis/configs/L2-OPTIONAL/default/CONFIG_TCG_TIS_I2C new file mode 100644 index 000000000000..7b8f0996b6d2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TCG_TIS_I2C @@ -0,0 +1 @@ +# CONFIG_TCG_TIS_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TCG_TIS_I2C_CR50 b/anolis/configs/L2-OPTIONAL/default/CONFIG_TCG_TIS_I2C_CR50 new file mode 100644 index 000000000000..78e78b8e701b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TCG_TIS_I2C_CR50 @@ -0,0 +1 @@ +# CONFIG_TCG_TIS_I2C_CR50 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TCG_TIS_ST33ZP24_SPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_TCG_TIS_ST33ZP24_SPI new file mode 100644 index 000000000000..5d54489c9fe9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TCG_TIS_ST33ZP24_SPI @@ -0,0 +1 @@ +# CONFIG_TCG_TIS_ST33ZP24_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TCG_VTPM_PROXY b/anolis/configs/L2-OPTIONAL/default/CONFIG_TCG_VTPM_PROXY new file mode 100644 index 000000000000..c5a7ebd133f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TCG_VTPM_PROXY @@ -0,0 +1 @@ +# CONFIG_TCG_VTPM_PROXY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TCM_FC b/anolis/configs/L2-OPTIONAL/default/CONFIG_TCM_FC new file mode 100644 index 000000000000..ecef5ff8b170 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TCM_FC @@ -0,0 +1 @@ +# CONFIG_TCM_FC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TCM_QLA2XXX b/anolis/configs/L2-OPTIONAL/default/CONFIG_TCM_QLA2XXX new file mode 100644 index 000000000000..585dff63159d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TCM_QLA2XXX @@ -0,0 +1 @@ +# CONFIG_TCM_QLA2XXX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TERANETICS_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_TERANETICS_PHY new file mode 100644 index 000000000000..3f0ce3f88c35 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TERANETICS_PHY @@ -0,0 +1 @@ +CONFIG_TERANETICS_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_ASYNC_DRIVER_PROBE b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_ASYNC_DRIVER_PROBE new file mode 100644 index 000000000000..2e3d64285755 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_ASYNC_DRIVER_PROBE @@ -0,0 +1 @@ +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_BITMAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_BITMAP new file mode 100644 index 000000000000..444f2cff03a1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_BITMAP @@ -0,0 +1 @@ +# CONFIG_TEST_BITMAP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_BITOPS b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_BITOPS new file mode 100644 index 000000000000..245f24428550 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_BITOPS @@ -0,0 +1 @@ +# CONFIG_TEST_BITOPS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_BLACKHOLE_DEV b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_BLACKHOLE_DEV new file mode 100644 index 000000000000..6243b331d396 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_BLACKHOLE_DEV @@ -0,0 +1 @@ +# CONFIG_TEST_BLACKHOLE_DEV is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_DHRY b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_DHRY new file mode 100644 index 000000000000..39a3905198c2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_DHRY @@ -0,0 +1 @@ +# CONFIG_TEST_DHRY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_DIV64 b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_DIV64 new file mode 100644 index 000000000000..fa139eb8d387 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_DIV64 @@ -0,0 +1 @@ +# CONFIG_TEST_DIV64 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_DYNAMIC_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_DYNAMIC_DEBUG new file mode 100644 index 000000000000..6577d7d78d11 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_DYNAMIC_DEBUG @@ -0,0 +1 @@ +# CONFIG_TEST_DYNAMIC_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_FIRMWARE b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_FIRMWARE new file mode 100644 index 000000000000..e59302889503 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_FIRMWARE @@ -0,0 +1 @@ +# CONFIG_TEST_FIRMWARE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_FREE_PAGES b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_FREE_PAGES new file mode 100644 index 000000000000..246fdf83a463 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_FREE_PAGES @@ -0,0 +1 @@ +# CONFIG_TEST_FREE_PAGES is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_HEXDUMP b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_HEXDUMP new file mode 100644 index 000000000000..7a62932d91ad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_HEXDUMP @@ -0,0 +1 @@ +# CONFIG_TEST_HEXDUMP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_IDA b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_IDA new file mode 100644 index 000000000000..4d54b4522c82 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_IDA @@ -0,0 +1 @@ +# CONFIG_TEST_IDA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_KMOD b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_KMOD new file mode 100644 index 000000000000..59165b345a76 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_KMOD @@ -0,0 +1 @@ +# CONFIG_TEST_KMOD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_KSTRTOX b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_KSTRTOX new file mode 100644 index 000000000000..e85f83bc9d88 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_KSTRTOX @@ -0,0 +1 @@ +CONFIG_TEST_KSTRTOX=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_LKM b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_LKM new file mode 100644 index 000000000000..b52f756d8d39 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_LKM @@ -0,0 +1 @@ +# CONFIG_TEST_LKM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_LOCKUP b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_LOCKUP new file mode 100644 index 000000000000..bdd84b4fbbcd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_LOCKUP @@ -0,0 +1 @@ +# CONFIG_TEST_LOCKUP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_MAPLE_TREE b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_MAPLE_TREE new file mode 100644 index 000000000000..c0d4771b529d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_MAPLE_TREE @@ -0,0 +1 @@ +# CONFIG_TEST_MAPLE_TREE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_MEMCAT_P b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_MEMCAT_P new file mode 100644 index 000000000000..41b9190fae37 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_MEMCAT_P @@ -0,0 +1 @@ +# CONFIG_TEST_MEMCAT_P is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_MEMINIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_MEMINIT new file mode 100644 index 000000000000..649e9c857c4f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_MEMINIT @@ -0,0 +1 @@ +# CONFIG_TEST_MEMINIT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_MIN_HEAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_MIN_HEAP new file mode 100644 index 000000000000..3bafbc63608a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_MIN_HEAP @@ -0,0 +1 @@ +# CONFIG_TEST_MIN_HEAP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_OBJAGG b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_OBJAGG new file mode 100644 index 000000000000..37ec47bee303 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_OBJAGG @@ -0,0 +1 @@ +# CONFIG_TEST_OBJAGG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_PARMAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_PARMAN new file mode 100644 index 000000000000..2aade4293141 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_PARMAN @@ -0,0 +1 @@ +# CONFIG_TEST_PARMAN is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_POWER b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_POWER new file mode 100644 index 000000000000..64e229a69694 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_POWER @@ -0,0 +1 @@ +# CONFIG_TEST_POWER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_PRINTF b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_PRINTF new file mode 100644 index 000000000000..5b3c2da98845 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_PRINTF @@ -0,0 +1 @@ +# CONFIG_TEST_PRINTF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_REF_TRACKER b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_REF_TRACKER new file mode 100644 index 000000000000..e2b4aa20417d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_REF_TRACKER @@ -0,0 +1 @@ +# CONFIG_TEST_REF_TRACKER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_RHASHTABLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_RHASHTABLE new file mode 100644 index 000000000000..5d9f7d2900f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_RHASHTABLE @@ -0,0 +1 @@ +# CONFIG_TEST_RHASHTABLE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_SCANF b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_SCANF new file mode 100644 index 000000000000..d160b6ef4dd5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_SCANF @@ -0,0 +1 @@ +# CONFIG_TEST_SCANF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_STATIC_KEYS b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_STATIC_KEYS new file mode 100644 index 000000000000..1bf36ae82fb8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_STATIC_KEYS @@ -0,0 +1 @@ +# CONFIG_TEST_STATIC_KEYS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_STRING_HELPERS b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_STRING_HELPERS new file mode 100644 index 000000000000..b125615670a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_STRING_HELPERS @@ -0,0 +1 @@ +# CONFIG_TEST_STRING_HELPERS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_SYSCTL b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_SYSCTL new file mode 100644 index 000000000000..f81589ea4698 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_SYSCTL @@ -0,0 +1 @@ +# CONFIG_TEST_SYSCTL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_UDELAY b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_UDELAY new file mode 100644 index 000000000000..95fd4596f711 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_UDELAY @@ -0,0 +1 @@ +# CONFIG_TEST_UDELAY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_USER_COPY b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_USER_COPY new file mode 100644 index 000000000000..ba777e177fd2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_USER_COPY @@ -0,0 +1 @@ +# CONFIG_TEST_USER_COPY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_UUID b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_UUID new file mode 100644 index 000000000000..434dbe1004a2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_UUID @@ -0,0 +1 @@ +# CONFIG_TEST_UUID is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_VMALLOC b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_VMALLOC new file mode 100644 index 000000000000..35c691714c5f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_VMALLOC @@ -0,0 +1 @@ +# CONFIG_TEST_VMALLOC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_XARRAY b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_XARRAY new file mode 100644 index 000000000000..327e557319aa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_XARRAY @@ -0,0 +1 @@ +# CONFIG_TEST_XARRAY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEXTSEARCH b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEXTSEARCH new file mode 100644 index 000000000000..035fa28850a0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEXTSEARCH @@ -0,0 +1 @@ +CONFIG_TEXTSEARCH=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEXTSEARCH_BM b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEXTSEARCH_BM new file mode 100644 index 000000000000..6be770b24b87 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEXTSEARCH_BM @@ -0,0 +1 @@ +CONFIG_TEXTSEARCH_BM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEXTSEARCH_FSM b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEXTSEARCH_FSM new file mode 100644 index 000000000000..b08348a22d9d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEXTSEARCH_FSM @@ -0,0 +1 @@ +CONFIG_TEXTSEARCH_FSM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TEXTSEARCH_KMP b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEXTSEARCH_KMP new file mode 100644 index 000000000000..2961a652465e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEXTSEARCH_KMP @@ -0,0 +1 @@ +CONFIG_TEXTSEARCH_KMP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_THERMAL_EMULATION b/anolis/configs/L2-OPTIONAL/default/CONFIG_THERMAL_EMULATION new file mode 100644 index 000000000000..230517ec2f76 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_THERMAL_EMULATION @@ -0,0 +1 @@ +# CONFIG_THERMAL_EMULATION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_THERMAL_STATISTICS b/anolis/configs/L2-OPTIONAL/default/CONFIG_THERMAL_STATISTICS new file mode 100644 index 000000000000..d375534fe253 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_THERMAL_STATISTICS @@ -0,0 +1 @@ +# CONFIG_THERMAL_STATISTICS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_THP_SWAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_THP_SWAP new file mode 100644 index 000000000000..b3e1926b376a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_THP_SWAP @@ -0,0 +1 @@ +CONFIG_THP_SWAP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_THREAD_INFO_IN_TASK b/anolis/configs/L2-OPTIONAL/default/CONFIG_THREAD_INFO_IN_TASK new file mode 100644 index 000000000000..e73b49f63a76 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_THREAD_INFO_IN_TASK @@ -0,0 +1 @@ +CONFIG_THREAD_INFO_IN_TASK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_THRUSTMASTER_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_THRUSTMASTER_FF new file mode 100644 index 000000000000..c2c1106a61db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_THRUSTMASTER_FF @@ -0,0 +1 @@ +# CONFIG_THRUSTMASTER_FF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TICK_ONESHOT b/anolis/configs/L2-OPTIONAL/default/CONFIG_TICK_ONESHOT new file mode 100644 index 000000000000..96a94b70c496 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TICK_ONESHOT @@ -0,0 +1 @@ +CONFIG_TICK_ONESHOT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TIFM_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_TIFM_CORE new file mode 100644 index 000000000000..21d8210ff2cf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TIFM_CORE @@ -0,0 +1 @@ +CONFIG_TIFM_CORE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_HX8357D b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_HX8357D new file mode 100644 index 000000000000..b6034f2f3698 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_HX8357D @@ -0,0 +1 @@ +# CONFIG_TINYDRM_HX8357D is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ILI9163 b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ILI9163 new file mode 100644 index 000000000000..2a0b903c4dd3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ILI9163 @@ -0,0 +1 @@ +# CONFIG_TINYDRM_ILI9163 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ILI9225 b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ILI9225 new file mode 100644 index 000000000000..1371c3f6e421 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ILI9225 @@ -0,0 +1 @@ +# CONFIG_TINYDRM_ILI9225 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ILI9341 b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ILI9341 new file mode 100644 index 000000000000..d0fdddaeb2fa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ILI9341 @@ -0,0 +1 @@ +# CONFIG_TINYDRM_ILI9341 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ILI9486 b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ILI9486 new file mode 100644 index 000000000000..0728f544a3e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ILI9486 @@ -0,0 +1 @@ +# CONFIG_TINYDRM_ILI9486 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_MI0283QT b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_MI0283QT new file mode 100644 index 000000000000..fb0b5f045e3e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_MI0283QT @@ -0,0 +1 @@ +# CONFIG_TINYDRM_MI0283QT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_REPAPER b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_REPAPER new file mode 100644 index 000000000000..c4d2874faaa5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_REPAPER @@ -0,0 +1 @@ +# CONFIG_TINYDRM_REPAPER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ST7586 b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ST7586 new file mode 100644 index 000000000000..2b9e29f63216 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ST7586 @@ -0,0 +1 @@ +# CONFIG_TINYDRM_ST7586 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ST7735R b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ST7735R new file mode 100644 index 000000000000..365910ba1339 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TINYDRM_ST7735R @@ -0,0 +1 @@ +# CONFIG_TINYDRM_ST7735R is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC b/anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC new file mode 100644 index 000000000000..764e2085e379 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC @@ -0,0 +1 @@ +CONFIG_TIPC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC_CRYPTO b/anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC_CRYPTO new file mode 100644 index 000000000000..73824dd3483b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC_CRYPTO @@ -0,0 +1 @@ +CONFIG_TIPC_CRYPTO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC_DIAG b/anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC_DIAG new file mode 100644 index 000000000000..8defa28ca8ec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC_DIAG @@ -0,0 +1 @@ +CONFIG_TIPC_DIAG=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC_MEDIA_IB b/anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC_MEDIA_IB new file mode 100644 index 000000000000..f60ea000af52 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC_MEDIA_IB @@ -0,0 +1 @@ +CONFIG_TIPC_MEDIA_IB=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC_MEDIA_UDP b/anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC_MEDIA_UDP new file mode 100644 index 000000000000..d25fc0f034eb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TIPC_MEDIA_UDP @@ -0,0 +1 @@ +CONFIG_TIPC_MEDIA_UDP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TI_ST b/anolis/configs/L2-OPTIONAL/default/CONFIG_TI_ST new file mode 100644 index 000000000000..6d11e4e2c8b2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TI_ST @@ -0,0 +1 @@ +# CONFIG_TI_ST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TMPFS_QUOTA b/anolis/configs/L2-OPTIONAL/default/CONFIG_TMPFS_QUOTA new file mode 100644 index 000000000000..360fa99ee3c0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TMPFS_QUOTA @@ -0,0 +1 @@ +# CONFIG_TMPFS_QUOTA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TOOLS_SUPPORT_RELR b/anolis/configs/L2-OPTIONAL/default/CONFIG_TOOLS_SUPPORT_RELR new file mode 100644 index 000000000000..583e5c152a3b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TOOLS_SUPPORT_RELR @@ -0,0 +1 @@ +CONFIG_TOOLS_SUPPORT_RELR=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TPS6105X b/anolis/configs/L2-OPTIONAL/default/CONFIG_TPS6105X new file mode 100644 index 000000000000..8b8423e575f2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TPS6105X @@ -0,0 +1 @@ +# CONFIG_TPS6105X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TPS65010 b/anolis/configs/L2-OPTIONAL/default/CONFIG_TPS65010 new file mode 100644 index 000000000000..3be4f7b82f4f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TPS65010 @@ -0,0 +1 @@ +# CONFIG_TPS65010 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TPS6507X b/anolis/configs/L2-OPTIONAL/default/CONFIG_TPS6507X new file mode 100644 index 000000000000..bf40879fd1f2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TPS6507X @@ -0,0 +1 @@ +# CONFIG_TPS6507X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACEPOINT_BENCHMARK b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACEPOINT_BENCHMARK new file mode 100644 index 000000000000..c5ad1dcc2aed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACEPOINT_BENCHMARK @@ -0,0 +1 @@ +# CONFIG_TRACEPOINT_BENCHMARK is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACER_MAX_TRACE b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACER_MAX_TRACE new file mode 100644 index 000000000000..e01d140566a3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACER_MAX_TRACE @@ -0,0 +1 @@ +CONFIG_TRACER_MAX_TRACE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_CLOCK b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_CLOCK new file mode 100644 index 000000000000..155e7b6983ba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_CLOCK @@ -0,0 +1 @@ +CONFIG_TRACE_CLOCK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_EVAL_MAP_FILE b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_EVAL_MAP_FILE new file mode 100644 index 000000000000..8c710a18f317 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_EVAL_MAP_FILE @@ -0,0 +1 @@ +# CONFIG_TRACE_EVAL_MAP_FILE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_EVENT_INJECT b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_EVENT_INJECT new file mode 100644 index 000000000000..1c47143cc38c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_EVENT_INJECT @@ -0,0 +1 @@ +# CONFIG_TRACE_EVENT_INJECT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT new file mode 100644 index 000000000000..7a3d1a02972c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT @@ -0,0 +1 @@ +CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_IRQFLAGS_SUPPORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_IRQFLAGS_SUPPORT new file mode 100644 index 000000000000..de7fd6ff09e5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACE_IRQFLAGS_SUPPORT @@ -0,0 +1 @@ +CONFIG_TRACE_IRQFLAGS_SUPPORT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACING b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACING new file mode 100644 index 000000000000..c2100521d9d3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACING @@ -0,0 +1 @@ +CONFIG_TRACING=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACING_SUPPORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACING_SUPPORT new file mode 100644 index 000000000000..0260c7f5dedf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TRACING_SUPPORT @@ -0,0 +1 @@ +CONFIG_TRACING_SUPPORT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TREE_RCU b/anolis/configs/L2-OPTIONAL/default/CONFIG_TREE_RCU new file mode 100644 index 000000000000..64ed0eab34ec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TREE_RCU @@ -0,0 +1 @@ +CONFIG_TREE_RCU=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TREE_SRCU b/anolis/configs/L2-OPTIONAL/default/CONFIG_TREE_SRCU new file mode 100644 index 000000000000..d432877a2fac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TREE_SRCU @@ -0,0 +1 @@ +CONFIG_TREE_SRCU=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TSNEP b/anolis/configs/L2-OPTIONAL/default/CONFIG_TSNEP new file mode 100644 index 000000000000..8361307c4ba8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TSNEP @@ -0,0 +1 @@ +# CONFIG_TSNEP is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TUN_VNET_CROSS_LE b/anolis/configs/L2-OPTIONAL/default/CONFIG_TUN_VNET_CROSS_LE new file mode 100644 index 000000000000..80f43181563d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TUN_VNET_CROSS_LE @@ -0,0 +1 @@ +# CONFIG_TUN_VNET_CROSS_LE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TWL4030_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_TWL4030_CORE new file mode 100644 index 000000000000..1f5b92782554 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TWL4030_CORE @@ -0,0 +1 @@ +# CONFIG_TWL4030_CORE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TWL6040_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_TWL6040_CORE new file mode 100644 index 000000000000..d44fdc15f94f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TWL6040_CORE @@ -0,0 +1 @@ +# CONFIG_TWL6040_CORE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC new file mode 100644 index 000000000000..455b230d19d8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC @@ -0,0 +1 @@ +CONFIG_TYPEC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_ANX7411 b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_ANX7411 new file mode 100644 index 000000000000..44b5f6b101d4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_ANX7411 @@ -0,0 +1 @@ +# CONFIG_TYPEC_ANX7411 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_DP_ALTMODE b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_DP_ALTMODE new file mode 100644 index 000000000000..53f064aa4543 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_DP_ALTMODE @@ -0,0 +1 @@ +CONFIG_TYPEC_DP_ALTMODE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_HD3SS3220 b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_HD3SS3220 new file mode 100644 index 000000000000..0616d7d3d35a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_HD3SS3220 @@ -0,0 +1 @@ +# CONFIG_TYPEC_HD3SS3220 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_MUX_FSA4480 b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_MUX_FSA4480 new file mode 100644 index 000000000000..dbb620396ce8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_MUX_FSA4480 @@ -0,0 +1 @@ +# CONFIG_TYPEC_MUX_FSA4480 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_MUX_GPIO_SBU b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_MUX_GPIO_SBU new file mode 100644 index 000000000000..bd5c3d8f0be6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_MUX_GPIO_SBU @@ -0,0 +1 @@ +# CONFIG_TYPEC_MUX_GPIO_SBU is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_MUX_NB7VPQ904M b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_MUX_NB7VPQ904M new file mode 100644 index 000000000000..460d42daf2a9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_MUX_NB7VPQ904M @@ -0,0 +1 @@ +# CONFIG_TYPEC_MUX_NB7VPQ904M is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_MUX_PI3USB30532 b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_MUX_PI3USB30532 new file mode 100644 index 000000000000..350ebc23d14f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_MUX_PI3USB30532 @@ -0,0 +1 @@ +CONFIG_TYPEC_MUX_PI3USB30532=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_NVIDIA_ALTMODE b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_NVIDIA_ALTMODE new file mode 100644 index 000000000000..4ebb1eaeca7b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_NVIDIA_ALTMODE @@ -0,0 +1 @@ +# CONFIG_TYPEC_NVIDIA_ALTMODE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_RT1719 b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_RT1719 new file mode 100644 index 000000000000..c1291042a958 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_RT1719 @@ -0,0 +1 @@ +# CONFIG_TYPEC_RT1719 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_STUSB160X b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_STUSB160X new file mode 100644 index 000000000000..9c9373827da5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_STUSB160X @@ -0,0 +1 @@ +# CONFIG_TYPEC_STUSB160X is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_TCPCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_TCPCI new file mode 100644 index 000000000000..4da3884e266b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_TCPCI @@ -0,0 +1 @@ +# CONFIG_TYPEC_TCPCI is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_TCPM b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_TCPM new file mode 100644 index 000000000000..e38aa8b2747a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_TCPM @@ -0,0 +1 @@ +CONFIG_TYPEC_TCPM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_TPS6598X b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_TPS6598X new file mode 100644 index 000000000000..b6ef0d699f79 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_TPS6598X @@ -0,0 +1 @@ +CONFIG_TYPEC_TPS6598X=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_UCSI b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_UCSI new file mode 100644 index 000000000000..744f2f6214e0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_UCSI @@ -0,0 +1 @@ +CONFIG_TYPEC_UCSI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_WUSB3801 b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_WUSB3801 new file mode 100644 index 000000000000..717e2b34acb8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_TYPEC_WUSB3801 @@ -0,0 +1 @@ +# CONFIG_TYPEC_WUSB3801 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_UACCE b/anolis/configs/L2-OPTIONAL/default/CONFIG_UACCE new file mode 100644 index 000000000000..ec75233c9b3e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_UACCE @@ -0,0 +1 @@ +CONFIG_UACCE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_UBIFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_UBIFS_FS new file mode 100644 index 000000000000..72c1f7a49d95 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_UBIFS_FS @@ -0,0 +1 @@ +# CONFIG_UBIFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_UCS2_STRING b/anolis/configs/L2-OPTIONAL/default/CONFIG_UCS2_STRING new file mode 100644 index 000000000000..d5156e3e8df9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_UCS2_STRING @@ -0,0 +1 @@ +CONFIG_UCS2_STRING=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_UCSI_ACPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_UCSI_ACPI new file mode 100644 index 000000000000..6adca85bd208 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_UCSI_ACPI @@ -0,0 +1 @@ +CONFIG_UCSI_ACPI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_UCSI_CCG b/anolis/configs/L2-OPTIONAL/default/CONFIG_UCSI_CCG new file mode 100644 index 000000000000..00d6d3344e30 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_UCSI_CCG @@ -0,0 +1 @@ +# CONFIG_UCSI_CCG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_UCSI_STM32G0 b/anolis/configs/L2-OPTIONAL/default/CONFIG_UCSI_STM32G0 new file mode 100644 index 000000000000..4f097b689de7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_UCSI_STM32G0 @@ -0,0 +1 @@ +# CONFIG_UCSI_STM32G0 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_UDMABUF b/anolis/configs/L2-OPTIONAL/default/CONFIG_UDMABUF new file mode 100644 index 000000000000..1d2a9a2f5b31 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_UDMABUF @@ -0,0 +1 @@ +# CONFIG_UDMABUF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_UEFI_CPER b/anolis/configs/L2-OPTIONAL/default/CONFIG_UEFI_CPER new file mode 100644 index 000000000000..6205dc54cfdc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_UEFI_CPER @@ -0,0 +1 @@ +CONFIG_UEFI_CPER=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_UFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_UFS_FS new file mode 100644 index 000000000000..768c9ac9e227 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_UFS_FS @@ -0,0 +1 @@ +# CONFIG_UFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_UHID b/anolis/configs/L2-OPTIONAL/default/CONFIG_UHID new file mode 100644 index 000000000000..dc29857794f4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_UHID @@ -0,0 +1 @@ +CONFIG_UHID=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ULTRIX_PARTITION b/anolis/configs/L2-OPTIONAL/default/CONFIG_ULTRIX_PARTITION new file mode 100644 index 000000000000..918b1ac2b734 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ULTRIX_PARTITION @@ -0,0 +1 @@ +# CONFIG_ULTRIX_PARTITION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_UNICODE b/anolis/configs/L2-OPTIONAL/default/CONFIG_UNICODE new file mode 100644 index 000000000000..a9c4799a491c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_UNICODE @@ -0,0 +1 @@ +# CONFIG_UNICODE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_UNINLINE_SPIN_UNLOCK b/anolis/configs/L2-OPTIONAL/default/CONFIG_UNINLINE_SPIN_UNLOCK new file mode 100644 index 000000000000..e4969f9444c9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_UNINLINE_SPIN_UNLOCK @@ -0,0 +1 @@ +CONFIG_UNINLINE_SPIN_UNLOCK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_UNIX_SCM b/anolis/configs/L2-OPTIONAL/default/CONFIG_UNIX_SCM new file mode 100644 index 000000000000..2488ff87253e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_UNIX_SCM @@ -0,0 +1 @@ +CONFIG_UNIX_SCM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB4 new file mode 100644 index 000000000000..00afcdf8f05a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB4 @@ -0,0 +1 @@ +# CONFIG_USB4 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USBIP_CORE b/anolis/configs/L2-OPTIONAL/default/CONFIG_USBIP_CORE new file mode 100644 index 000000000000..f23acaa70d93 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USBIP_CORE @@ -0,0 +1 @@ +# CONFIG_USBIP_CORE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USBPCWATCHDOG b/anolis/configs/L2-OPTIONAL/default/CONFIG_USBPCWATCHDOG new file mode 100644 index 000000000000..e92c7cbba012 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USBPCWATCHDOG @@ -0,0 +1 @@ +CONFIG_USBPCWATCHDOG=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ACM b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ACM new file mode 100644 index 000000000000..62189f613d57 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ACM @@ -0,0 +1 @@ +CONFIG_USB_ACM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ADUTUX b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ADUTUX new file mode 100644 index 000000000000..e082deeb5e7d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ADUTUX @@ -0,0 +1 @@ +CONFIG_USB_ADUTUX=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ALI_M5632 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ALI_M5632 new file mode 100644 index 000000000000..e578e0be42fe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ALI_M5632 @@ -0,0 +1 @@ +CONFIG_USB_ALI_M5632=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_AN2720 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_AN2720 new file mode 100644 index 000000000000..696ce5c88317 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_AN2720 @@ -0,0 +1 @@ +CONFIG_USB_AN2720=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ANNOUNCE_NEW_DEVICES b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ANNOUNCE_NEW_DEVICES new file mode 100644 index 000000000000..f6c074bb0ece --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ANNOUNCE_NEW_DEVICES @@ -0,0 +1 @@ +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_APPLEDISPLAY b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_APPLEDISPLAY new file mode 100644 index 000000000000..ec4e8a64a7a1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_APPLEDISPLAY @@ -0,0 +1 @@ +CONFIG_USB_APPLEDISPLAY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ARCH_HAS_HCD b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ARCH_HAS_HCD new file mode 100644 index 000000000000..4d952e2fa961 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ARCH_HAS_HCD @@ -0,0 +1 @@ +CONFIG_USB_ARCH_HAS_HCD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ARMLINUX b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ARMLINUX new file mode 100644 index 000000000000..597b27f40b12 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ARMLINUX @@ -0,0 +1 @@ +CONFIG_USB_ARMLINUX=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ATM b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ATM new file mode 100644 index 000000000000..0bdc7ed90ce0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ATM @@ -0,0 +1 @@ +CONFIG_USB_ATM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_AUTOSUSPEND_DELAY b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_AUTOSUSPEND_DELAY new file mode 100644 index 000000000000..0886e4b55a21 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_AUTOSUSPEND_DELAY @@ -0,0 +1 @@ +CONFIG_USB_AUTOSUSPEND_DELAY=2 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_BELKIN b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_BELKIN new file mode 100644 index 000000000000..2f7e9876d9de --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_BELKIN @@ -0,0 +1 @@ +CONFIG_USB_BELKIN=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_C67X00_HCD b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_C67X00_HCD new file mode 100644 index 000000000000..a0e2f42dac40 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_C67X00_HCD @@ -0,0 +1 @@ +# CONFIG_USB_C67X00_HCD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CATC b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CATC new file mode 100644 index 000000000000..c89d8f72c9e4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CATC @@ -0,0 +1 @@ +CONFIG_USB_CATC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CDNS_SUPPORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CDNS_SUPPORT new file mode 100644 index 000000000000..c38a12002597 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CDNS_SUPPORT @@ -0,0 +1 @@ +# CONFIG_USB_CDNS_SUPPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CHIPIDEA b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CHIPIDEA new file mode 100644 index 000000000000..b006254ad58e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CHIPIDEA @@ -0,0 +1 @@ +# CONFIG_USB_CHIPIDEA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_COMMON b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_COMMON new file mode 100644 index 000000000000..14bd22787685 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_COMMON @@ -0,0 +1 @@ +CONFIG_USB_COMMON=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CONN_GPIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CONN_GPIO new file mode 100644 index 000000000000..e91343881fcc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CONN_GPIO @@ -0,0 +1 @@ +# CONFIG_USB_CONN_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CXACRU b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CXACRU new file mode 100644 index 000000000000..7650d329ea27 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CXACRU @@ -0,0 +1 @@ +CONFIG_USB_CXACRU=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CYPRESS_CY7C63 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CYPRESS_CY7C63 new file mode 100644 index 000000000000..5547144d9b0e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CYPRESS_CY7C63 @@ -0,0 +1 @@ +# CONFIG_USB_CYPRESS_CY7C63 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CYTHERM b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CYTHERM new file mode 100644 index 000000000000..e8dc4c60b744 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_CYTHERM @@ -0,0 +1 @@ +# CONFIG_USB_CYTHERM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_DEFAULT_PERSIST b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_DEFAULT_PERSIST new file mode 100644 index 000000000000..db89fa3d4281 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_DEFAULT_PERSIST @@ -0,0 +1 @@ +CONFIG_USB_DEFAULT_PERSIST=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_DWC2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_DWC2 new file mode 100644 index 000000000000..94b56af6d056 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_DWC2 @@ -0,0 +1 @@ +# CONFIG_USB_DWC2 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_DWC3 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_DWC3 new file mode 100644 index 000000000000..fc9f5c8fe104 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_DWC3 @@ -0,0 +1 @@ +# CONFIG_USB_DWC3 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_DYNAMIC_MINORS b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_DYNAMIC_MINORS new file mode 100644 index 000000000000..88b6f1f71e27 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_DYNAMIC_MINORS @@ -0,0 +1 @@ +# CONFIG_USB_DYNAMIC_MINORS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_FSL b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_FSL new file mode 100644 index 000000000000..c2f936a3444c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_FSL @@ -0,0 +1 @@ +# CONFIG_USB_EHCI_FSL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_HCD b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_HCD new file mode 100644 index 000000000000..7322dc2d519c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_HCD @@ -0,0 +1 @@ +CONFIG_USB_EHCI_HCD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_PCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_PCI new file mode 100644 index 000000000000..e482ceabd85a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_PCI @@ -0,0 +1 @@ +CONFIG_USB_EHCI_PCI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_ROOT_HUB_TT b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_ROOT_HUB_TT new file mode 100644 index 000000000000..a5c3cc477270 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_ROOT_HUB_TT @@ -0,0 +1 @@ +CONFIG_USB_EHCI_ROOT_HUB_TT=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_TT_NEWSCHED b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_TT_NEWSCHED new file mode 100644 index 000000000000..1eebea30348c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHCI_TT_NEWSCHED @@ -0,0 +1 @@ +CONFIG_USB_EHCI_TT_NEWSCHED=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHSET_TEST_FIXTURE b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHSET_TEST_FIXTURE new file mode 100644 index 000000000000..4cef71e49e33 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EHSET_TEST_FIXTURE @@ -0,0 +1 @@ +# CONFIG_USB_EHSET_TEST_FIXTURE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EMI26 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EMI26 new file mode 100644 index 000000000000..35081c0de252 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EMI26 @@ -0,0 +1 @@ +CONFIG_USB_EMI26=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EMI62 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EMI62 new file mode 100644 index 000000000000..f2e7d662899a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EMI62 @@ -0,0 +1 @@ +CONFIG_USB_EMI62=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EPSON2888 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EPSON2888 new file mode 100644 index 000000000000..944ff495a26b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EPSON2888 @@ -0,0 +1 @@ +CONFIG_USB_EPSON2888=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EZUSB_FX2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EZUSB_FX2 new file mode 100644 index 000000000000..cbcda7399e8d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_EZUSB_FX2 @@ -0,0 +1 @@ +CONFIG_USB_EZUSB_FX2=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_FEW_INIT_RETRIES b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_FEW_INIT_RETRIES new file mode 100644 index 000000000000..9d905e9cef4f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_FEW_INIT_RETRIES @@ -0,0 +1 @@ +# CONFIG_USB_FEW_INIT_RETRIES is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_GADGET b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_GADGET new file mode 100644 index 000000000000..86fb660fe6d5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_GADGET @@ -0,0 +1 @@ +# CONFIG_USB_GADGET is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_GPIO_VBUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_GPIO_VBUS new file mode 100644 index 000000000000..3fb8053360f0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_GPIO_VBUS @@ -0,0 +1 @@ +# CONFIG_USB_GPIO_VBUS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HCD_BCMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HCD_BCMA new file mode 100644 index 000000000000..645b791664a2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HCD_BCMA @@ -0,0 +1 @@ +# CONFIG_USB_HCD_BCMA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HCD_TEST_MODE b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HCD_TEST_MODE new file mode 100644 index 000000000000..3d8668f5e9ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HCD_TEST_MODE @@ -0,0 +1 @@ +# CONFIG_USB_HCD_TEST_MODE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HID b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HID new file mode 100644 index 000000000000..9f35c0af7324 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HID @@ -0,0 +1 @@ +CONFIG_USB_HID=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HIDDEV b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HIDDEV new file mode 100644 index 000000000000..4d37a25b7d86 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HIDDEV @@ -0,0 +1 @@ +CONFIG_USB_HIDDEV=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HSIC_USB3503 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HSIC_USB3503 new file mode 100644 index 000000000000..8f6b0dd20bc4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HSIC_USB3503 @@ -0,0 +1 @@ +CONFIG_USB_HSIC_USB3503=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HSIC_USB4604 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HSIC_USB4604 new file mode 100644 index 000000000000..2e27cbba8b62 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HSIC_USB4604 @@ -0,0 +1 @@ +# CONFIG_USB_HSIC_USB4604 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HSO b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HSO new file mode 100644 index 000000000000..0a009eeca2a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HSO @@ -0,0 +1 @@ +CONFIG_USB_HSO=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HUB_USB251XB b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HUB_USB251XB new file mode 100644 index 000000000000..5507c061fd81 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_HUB_USB251XB @@ -0,0 +1 @@ +# CONFIG_USB_HUB_USB251XB is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_IDMOUSE b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_IDMOUSE new file mode 100644 index 000000000000..8b54088757d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_IDMOUSE @@ -0,0 +1 @@ +CONFIG_USB_IDMOUSE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_IOWARRIOR b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_IOWARRIOR new file mode 100644 index 000000000000..7bf2b0c653f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_IOWARRIOR @@ -0,0 +1 @@ +CONFIG_USB_IOWARRIOR=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_IPHETH b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_IPHETH new file mode 100644 index 000000000000..9aa5dc3442d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_IPHETH @@ -0,0 +1 @@ +CONFIG_USB_IPHETH=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ISIGHTFW b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ISIGHTFW new file mode 100644 index 000000000000..80ead4f41ccb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ISIGHTFW @@ -0,0 +1 @@ +CONFIG_USB_ISIGHTFW=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ISP116X_HCD b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ISP116X_HCD new file mode 100644 index 000000000000..bd16b51652b0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ISP116X_HCD @@ -0,0 +1 @@ +# CONFIG_USB_ISP116X_HCD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ISP1301 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ISP1301 new file mode 100644 index 000000000000..8a6cb284e0b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ISP1301 @@ -0,0 +1 @@ +# CONFIG_USB_ISP1301 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ISP1760 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ISP1760 new file mode 100644 index 000000000000..aed47c8aa1bf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ISP1760 @@ -0,0 +1 @@ +# CONFIG_USB_ISP1760 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_KAWETH b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_KAWETH new file mode 100644 index 000000000000..daed8dbd4135 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_KAWETH @@ -0,0 +1 @@ +CONFIG_USB_KAWETH=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_KC2190 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_KC2190 new file mode 100644 index 000000000000..bd099751cb76 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_KC2190 @@ -0,0 +1 @@ +CONFIG_USB_KC2190=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LAN78XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LAN78XX new file mode 100644 index 000000000000..6d409c13e26b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LAN78XX @@ -0,0 +1 @@ +CONFIG_USB_LAN78XX=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LCD b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LCD new file mode 100644 index 000000000000..b87994bf232e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LCD @@ -0,0 +1 @@ +CONFIG_USB_LCD=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LD b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LD new file mode 100644 index 000000000000..ba993ee02d8d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LD @@ -0,0 +1 @@ +CONFIG_USB_LD=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LEDS_TRIGGER_USBPORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LEDS_TRIGGER_USBPORT new file mode 100644 index 000000000000..9d83c48a4a21 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LEDS_TRIGGER_USBPORT @@ -0,0 +1 @@ +CONFIG_USB_LEDS_TRIGGER_USBPORT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LED_TRIG b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LED_TRIG new file mode 100644 index 000000000000..28127f785c68 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LED_TRIG @@ -0,0 +1 @@ +CONFIG_USB_LED_TRIG=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LEGOTOWER b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LEGOTOWER new file mode 100644 index 000000000000..4f0188a9ab08 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LEGOTOWER @@ -0,0 +1 @@ +CONFIG_USB_LEGOTOWER=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LINK_LAYER_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LINK_LAYER_TEST new file mode 100644 index 000000000000..0e4389a3db5d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_LINK_LAYER_TEST @@ -0,0 +1 @@ +# CONFIG_USB_LINK_LAYER_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MAX3421_HCD b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MAX3421_HCD new file mode 100644 index 000000000000..51b1205bf53a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MAX3421_HCD @@ -0,0 +1 @@ +# CONFIG_USB_MAX3421_HCD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MDC800 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MDC800 new file mode 100644 index 000000000000..bc7b0bfd8dc6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MDC800 @@ -0,0 +1 @@ +CONFIG_USB_MDC800=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MICROTEK b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MICROTEK new file mode 100644 index 000000000000..94f288329da2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MICROTEK @@ -0,0 +1 @@ +CONFIG_USB_MICROTEK=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MON b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MON new file mode 100644 index 000000000000..330d7225cbf6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MON @@ -0,0 +1 @@ +CONFIG_USB_MON=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MUSB_HDRC b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MUSB_HDRC new file mode 100644 index 000000000000..ae1dea512660 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_MUSB_HDRC @@ -0,0 +1 @@ +# CONFIG_USB_MUSB_HDRC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_AQC111 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_AQC111 new file mode 100644 index 000000000000..dd5f4e15149e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_AQC111 @@ -0,0 +1 @@ +# CONFIG_USB_NET_AQC111 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_AX88179_178A b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_AX88179_178A new file mode 100644 index 000000000000..da5aa15b2e67 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_AX88179_178A @@ -0,0 +1 @@ +CONFIG_USB_NET_AX88179_178A=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_AX8817X b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_AX8817X new file mode 100644 index 000000000000..3ac11e4ec081 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_AX8817X @@ -0,0 +1 @@ +CONFIG_USB_NET_AX8817X=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDCETHER b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDCETHER new file mode 100644 index 000000000000..87452e109744 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDCETHER @@ -0,0 +1 @@ +CONFIG_USB_NET_CDCETHER=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_EEM b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_EEM new file mode 100644 index 000000000000..73bbe6213088 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_EEM @@ -0,0 +1 @@ +CONFIG_USB_NET_CDC_EEM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_MBIM b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_MBIM new file mode 100644 index 000000000000..a728babfc784 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_MBIM @@ -0,0 +1 @@ +CONFIG_USB_NET_CDC_MBIM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_NCM b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_NCM new file mode 100644 index 000000000000..4c6e461cbc53 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_NCM @@ -0,0 +1 @@ +CONFIG_USB_NET_CDC_NCM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_SUBSET b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_SUBSET new file mode 100644 index 000000000000..f54b326ead73 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_SUBSET @@ -0,0 +1 @@ +CONFIG_USB_NET_CDC_SUBSET=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_SUBSET_ENABLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_SUBSET_ENABLE new file mode 100644 index 000000000000..5d3aa7ce08af --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CDC_SUBSET_ENABLE @@ -0,0 +1 @@ +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CH9200 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CH9200 new file mode 100644 index 000000000000..ce29732451a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CH9200 @@ -0,0 +1 @@ +CONFIG_USB_NET_CH9200=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CX82310_ETH b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CX82310_ETH new file mode 100644 index 000000000000..13aaf6f75c30 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_CX82310_ETH @@ -0,0 +1 @@ +CONFIG_USB_NET_CX82310_ETH=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_DM9601 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_DM9601 new file mode 100644 index 000000000000..7227f76861b8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_DM9601 @@ -0,0 +1 @@ +CONFIG_USB_NET_DM9601=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_GL620A b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_GL620A new file mode 100644 index 000000000000..8a950c494aca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_GL620A @@ -0,0 +1 @@ +CONFIG_USB_NET_GL620A=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_HUAWEI_CDC_NCM b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_HUAWEI_CDC_NCM new file mode 100644 index 000000000000..16acb23012bd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_HUAWEI_CDC_NCM @@ -0,0 +1 @@ +CONFIG_USB_NET_HUAWEI_CDC_NCM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_INT51X1 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_INT51X1 new file mode 100644 index 000000000000..a2526e648818 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_INT51X1 @@ -0,0 +1 @@ +CONFIG_USB_NET_INT51X1=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_KALMIA b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_KALMIA new file mode 100644 index 000000000000..a92ab21e5a19 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_KALMIA @@ -0,0 +1 @@ +CONFIG_USB_NET_KALMIA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_MCS7830 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_MCS7830 new file mode 100644 index 000000000000..0c6fd98a5090 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_MCS7830 @@ -0,0 +1 @@ +CONFIG_USB_NET_MCS7830=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_NET1080 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_NET1080 new file mode 100644 index 000000000000..0ad1bc9be042 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_NET1080 @@ -0,0 +1 @@ +CONFIG_USB_NET_NET1080=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_PLUSB b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_PLUSB new file mode 100644 index 000000000000..f87e9896eafb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_PLUSB @@ -0,0 +1 @@ +CONFIG_USB_NET_PLUSB=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_QMI_WWAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_QMI_WWAN new file mode 100644 index 000000000000..6be46c7d2034 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_QMI_WWAN @@ -0,0 +1 @@ +CONFIG_USB_NET_QMI_WWAN=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_RNDIS_HOST b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_RNDIS_HOST new file mode 100644 index 000000000000..58c44e88a6a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_RNDIS_HOST @@ -0,0 +1 @@ +CONFIG_USB_NET_RNDIS_HOST=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_SMSC75XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_SMSC75XX new file mode 100644 index 000000000000..3011223ad230 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_SMSC75XX @@ -0,0 +1 @@ +CONFIG_USB_NET_SMSC75XX=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_SMSC95XX b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_SMSC95XX new file mode 100644 index 000000000000..479b43b1d4a3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_SMSC95XX @@ -0,0 +1 @@ +CONFIG_USB_NET_SMSC95XX=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_SR9800 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_SR9800 new file mode 100644 index 000000000000..3caca281c289 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_SR9800 @@ -0,0 +1 @@ +# CONFIG_USB_NET_SR9800 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_ZAURUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_ZAURUS new file mode 100644 index 000000000000..00dfbaf3b103 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_NET_ZAURUS @@ -0,0 +1 @@ +CONFIG_USB_NET_ZAURUS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OHCI_HCD b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OHCI_HCD new file mode 100644 index 000000000000..59f6d5fd5b34 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OHCI_HCD @@ -0,0 +1 @@ +CONFIG_USB_OHCI_HCD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OHCI_HCD_PCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OHCI_HCD_PCI new file mode 100644 index 000000000000..a78b62cb547e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OHCI_HCD_PCI @@ -0,0 +1 @@ +CONFIG_USB_OHCI_HCD_PCI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OHCI_HCD_PLATFORM b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OHCI_HCD_PLATFORM new file mode 100644 index 000000000000..c35b2cdd6080 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OHCI_HCD_PLATFORM @@ -0,0 +1 @@ +# CONFIG_USB_OHCI_HCD_PLATFORM is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OHCI_LITTLE_ENDIAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OHCI_LITTLE_ENDIAN new file mode 100644 index 000000000000..ae53877ea536 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OHCI_LITTLE_ENDIAN @@ -0,0 +1 @@ +CONFIG_USB_OHCI_LITTLE_ENDIAN=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OTG b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OTG new file mode 100644 index 000000000000..b4ad21e4876e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OTG @@ -0,0 +1 @@ +# CONFIG_USB_OTG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OTG_PRODUCTLIST b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OTG_PRODUCTLIST new file mode 100644 index 000000000000..a9ccf6a0ef8f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OTG_PRODUCTLIST @@ -0,0 +1 @@ +# CONFIG_USB_OTG_PRODUCTLIST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OXU210HP_HCD b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OXU210HP_HCD new file mode 100644 index 000000000000..4f1efda37716 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_OXU210HP_HCD @@ -0,0 +1 @@ +# CONFIG_USB_OXU210HP_HCD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_PEGASUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_PEGASUS new file mode 100644 index 000000000000..3218ed61fad0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_PEGASUS @@ -0,0 +1 @@ +CONFIG_USB_PEGASUS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_PRINTER b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_PRINTER new file mode 100644 index 000000000000..108d00bea8fe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_PRINTER @@ -0,0 +1 @@ +CONFIG_USB_PRINTER=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_R8A66597_HCD b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_R8A66597_HCD new file mode 100644 index 000000000000..70dcc7701792 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_R8A66597_HCD @@ -0,0 +1 @@ +# CONFIG_USB_R8A66597_HCD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ROLE_SWITCH b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ROLE_SWITCH new file mode 100644 index 000000000000..6a168b92696b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_ROLE_SWITCH @@ -0,0 +1 @@ +CONFIG_USB_ROLE_SWITCH=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_RTL8150 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_RTL8150 new file mode 100644 index 000000000000..29f740fcfdbc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_RTL8150 @@ -0,0 +1 @@ +CONFIG_USB_RTL8150=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_RTL8152 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_RTL8152 new file mode 100644 index 000000000000..66869c0244eb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_RTL8152 @@ -0,0 +1 @@ +CONFIG_USB_RTL8152=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_RTL8153_ECM b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_RTL8153_ECM new file mode 100644 index 000000000000..3ec37682be5c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_RTL8153_ECM @@ -0,0 +1 @@ +CONFIG_USB_RTL8153_ECM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL new file mode 100644 index 000000000000..e5f3a6d27d3f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL @@ -0,0 +1 @@ +CONFIG_USB_SERIAL=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_AIRCABLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_AIRCABLE new file mode 100644 index 000000000000..bbe37bf7578f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_AIRCABLE @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_AIRCABLE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_ARK3116 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_ARK3116 new file mode 100644 index 000000000000..285aeacc9d40 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_ARK3116 @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_ARK3116=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_BELKIN b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_BELKIN new file mode 100644 index 000000000000..313844222fdc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_BELKIN @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_BELKIN=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_CH341 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_CH341 new file mode 100644 index 000000000000..7b09e0d904f0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_CH341 @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_CH341=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_CP210X b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_CP210X new file mode 100644 index 000000000000..18e8641d54da --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_CP210X @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_CP210X=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_CYBERJACK b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_CYBERJACK new file mode 100644 index 000000000000..3681a379785d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_CYBERJACK @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_CYBERJACK=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_CYPRESS_M8 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_CYPRESS_M8 new file mode 100644 index 000000000000..b9f46a74260f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_CYPRESS_M8 @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_CYPRESS_M8=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_DEBUG new file mode 100644 index 000000000000..896ed4ca6f7a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_DEBUG @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_DEBUG=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_DIGI_ACCELEPORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_DIGI_ACCELEPORT new file mode 100644 index 000000000000..5617d88f807a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_DIGI_ACCELEPORT @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_EDGEPORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_EDGEPORT new file mode 100644 index 000000000000..bba8a43e8566 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_EDGEPORT @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_EDGEPORT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_EDGEPORT_TI b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_EDGEPORT_TI new file mode 100644 index 000000000000..06184eb4d4e0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_EDGEPORT_TI @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_EDGEPORT_TI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_EMPEG b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_EMPEG new file mode 100644 index 000000000000..139e9f9aaa87 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_EMPEG @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_EMPEG=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_F81232 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_F81232 new file mode 100644 index 000000000000..c02ccac8388a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_F81232 @@ -0,0 +1 @@ +# CONFIG_USB_SERIAL_F81232 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_F8153X b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_F8153X new file mode 100644 index 000000000000..94dd9f277e36 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_F8153X @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_F8153X=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_FTDI_SIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_FTDI_SIO new file mode 100644 index 000000000000..0d2907aaa19a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_FTDI_SIO @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_FTDI_SIO=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_GARMIN b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_GARMIN new file mode 100644 index 000000000000..81637b4917f8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_GARMIN @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_GARMIN=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_GENERIC b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_GENERIC new file mode 100644 index 000000000000..ad338db81378 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_GENERIC @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_GENERIC=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_IPAQ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_IPAQ new file mode 100644 index 000000000000..3a435bc53363 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_IPAQ @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_IPAQ=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_IPW b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_IPW new file mode 100644 index 000000000000..43c1be4f98d6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_IPW @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_IPW=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_IR b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_IR new file mode 100644 index 000000000000..5e8e417681d8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_IR @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_IR=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_IUU b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_IUU new file mode 100644 index 000000000000..e2b601afd731 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_IUU @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_IUU=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_KEYSPAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_KEYSPAN new file mode 100644 index 000000000000..a09ff3f77a6d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_KEYSPAN @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_KEYSPAN=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_KEYSPAN_PDA b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_KEYSPAN_PDA new file mode 100644 index 000000000000..fe57af002db0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_KEYSPAN_PDA @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_KEYSPAN_PDA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_KLSI b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_KLSI new file mode 100644 index 000000000000..6b920b85920b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_KLSI @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_KLSI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_KOBIL_SCT b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_KOBIL_SCT new file mode 100644 index 000000000000..12f7217e23fb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_KOBIL_SCT @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_KOBIL_SCT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_MCT_U232 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_MCT_U232 new file mode 100644 index 000000000000..ee2c0eebbe8a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_MCT_U232 @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_MCT_U232=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_METRO b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_METRO new file mode 100644 index 000000000000..e18da6c48212 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_METRO @@ -0,0 +1 @@ +# CONFIG_USB_SERIAL_METRO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_MOS7720 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_MOS7720 new file mode 100644 index 000000000000..221f04db688c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_MOS7720 @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_MOS7720=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_MOS7840 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_MOS7840 new file mode 100644 index 000000000000..d47711d40285 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_MOS7840 @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_MOS7840=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_MXUPORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_MXUPORT new file mode 100644 index 000000000000..22d9816009f5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_MXUPORT @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_MXUPORT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_NAVMAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_NAVMAN new file mode 100644 index 000000000000..76073f7f8bc8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_NAVMAN @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_NAVMAN=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_OMNINET b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_OMNINET new file mode 100644 index 000000000000..3185812058b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_OMNINET @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_OMNINET=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_OPTICON b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_OPTICON new file mode 100644 index 000000000000..697554b8bffd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_OPTICON @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_OPTICON=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_OPTION b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_OPTION new file mode 100644 index 000000000000..6c05eb345fef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_OPTION @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_OPTION=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_OTI6858 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_OTI6858 new file mode 100644 index 000000000000..051bd0fa41c3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_OTI6858 @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_OTI6858=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_PL2303 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_PL2303 new file mode 100644 index 000000000000..5b99f3ed0f86 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_PL2303 @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_PL2303=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_QCAUX b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_QCAUX new file mode 100644 index 000000000000..1c898ff4ad83 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_QCAUX @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_QCAUX=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_QT2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_QT2 new file mode 100644 index 000000000000..16c67f4eaafb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_QT2 @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_QT2=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_QUALCOMM b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_QUALCOMM new file mode 100644 index 000000000000..26e5667b2ab5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_QUALCOMM @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_QUALCOMM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SAFE b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SAFE new file mode 100644 index 000000000000..687dfe214671 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SAFE @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_SAFE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SAFE_PADDED b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SAFE_PADDED new file mode 100644 index 000000000000..7af538c757af --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SAFE_PADDED @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_SAFE_PADDED=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SIERRAWIRELESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SIERRAWIRELESS new file mode 100644 index 000000000000..4f35e2616f08 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SIERRAWIRELESS @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_SIERRAWIRELESS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SPCP8X5 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SPCP8X5 new file mode 100644 index 000000000000..97176567f273 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SPCP8X5 @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_SPCP8X5=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SSU100 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SSU100 new file mode 100644 index 000000000000..5d35a493b08a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SSU100 @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_SSU100=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SYMBOL b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SYMBOL new file mode 100644 index 000000000000..2f3118f1233b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_SYMBOL @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_SYMBOL=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_TI b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_TI new file mode 100644 index 000000000000..eb82f9ed7992 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_TI @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_TI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_UPD78F0730 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_UPD78F0730 new file mode 100644 index 000000000000..8258d573c7f7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_UPD78F0730 @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_UPD78F0730=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_VISOR b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_VISOR new file mode 100644 index 000000000000..51a00636b74a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_VISOR @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_VISOR=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_WHITEHEAT b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_WHITEHEAT new file mode 100644 index 000000000000..38ca17549e29 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_WHITEHEAT @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_WHITEHEAT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_WISHBONE b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_WISHBONE new file mode 100644 index 000000000000..975ebe400d0b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_WISHBONE @@ -0,0 +1 @@ +# CONFIG_USB_SERIAL_WISHBONE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_WWAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_WWAN new file mode 100644 index 000000000000..7ff3107ac975 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_WWAN @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_WWAN=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_XR b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_XR new file mode 100644 index 000000000000..bd445c4e83c0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_XR @@ -0,0 +1 @@ +# CONFIG_USB_SERIAL_XR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_XSENS_MT b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_XSENS_MT new file mode 100644 index 000000000000..bf74a07342a7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SERIAL_XSENS_MT @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_XSENS_MT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SEVSEG b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SEVSEG new file mode 100644 index 000000000000..161fcb9b919b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SEVSEG @@ -0,0 +1 @@ +CONFIG_USB_SEVSEG=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SIERRA_NET b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SIERRA_NET new file mode 100644 index 000000000000..310a9f31aae4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SIERRA_NET @@ -0,0 +1 @@ +CONFIG_USB_SIERRA_NET=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SISUSBVGA b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SISUSBVGA new file mode 100644 index 000000000000..7efda84c1bbd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SISUSBVGA @@ -0,0 +1 @@ +CONFIG_USB_SISUSBVGA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SL811_HCD b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SL811_HCD new file mode 100644 index 000000000000..73078fbbce44 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_SL811_HCD @@ -0,0 +1 @@ +# CONFIG_USB_SL811_HCD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE new file mode 100644 index 000000000000..c99c095dfa35 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE @@ -0,0 +1 @@ +CONFIG_USB_STORAGE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_ALAUDA b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_ALAUDA new file mode 100644 index 000000000000..9f4bfdeac483 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_ALAUDA @@ -0,0 +1 @@ +CONFIG_USB_STORAGE_ALAUDA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_CYPRESS_ATACB b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_CYPRESS_ATACB new file mode 100644 index 000000000000..3e095761b351 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_CYPRESS_ATACB @@ -0,0 +1 @@ +CONFIG_USB_STORAGE_CYPRESS_ATACB=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_DATAFAB b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_DATAFAB new file mode 100644 index 000000000000..92eb4beb3cbe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_DATAFAB @@ -0,0 +1 @@ +CONFIG_USB_STORAGE_DATAFAB=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_DEBUG new file mode 100644 index 000000000000..3a8be6ac1a06 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_DEBUG @@ -0,0 +1 @@ +# CONFIG_USB_STORAGE_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_ENE_UB6250 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_ENE_UB6250 new file mode 100644 index 000000000000..64edcc2aca9f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_ENE_UB6250 @@ -0,0 +1 @@ +CONFIG_USB_STORAGE_ENE_UB6250=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_FREECOM b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_FREECOM new file mode 100644 index 000000000000..44ffc7edc1e7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_FREECOM @@ -0,0 +1 @@ +CONFIG_USB_STORAGE_FREECOM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_ISD200 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_ISD200 new file mode 100644 index 000000000000..c9604f3463cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_ISD200 @@ -0,0 +1 @@ +CONFIG_USB_STORAGE_ISD200=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_JUMPSHOT b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_JUMPSHOT new file mode 100644 index 000000000000..3753489e015d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_JUMPSHOT @@ -0,0 +1 @@ +CONFIG_USB_STORAGE_JUMPSHOT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_KARMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_KARMA new file mode 100644 index 000000000000..a3fcafa6348f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_KARMA @@ -0,0 +1 @@ +CONFIG_USB_STORAGE_KARMA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_ONETOUCH b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_ONETOUCH new file mode 100644 index 000000000000..de105d95fd19 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_ONETOUCH @@ -0,0 +1 @@ +CONFIG_USB_STORAGE_ONETOUCH=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_REALTEK b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_REALTEK new file mode 100644 index 000000000000..d726f0eff6f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_REALTEK @@ -0,0 +1 @@ +CONFIG_USB_STORAGE_REALTEK=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_SDDR09 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_SDDR09 new file mode 100644 index 000000000000..4336ce368a49 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_SDDR09 @@ -0,0 +1 @@ +CONFIG_USB_STORAGE_SDDR09=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_SDDR55 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_SDDR55 new file mode 100644 index 000000000000..18e63e8218d2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_SDDR55 @@ -0,0 +1 @@ +CONFIG_USB_STORAGE_SDDR55=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_USBAT b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_USBAT new file mode 100644 index 000000000000..f5309cd3d21e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_STORAGE_USBAT @@ -0,0 +1 @@ +CONFIG_USB_STORAGE_USBAT=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_TEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_TEST new file mode 100644 index 000000000000..d11d0fdaadbf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_TEST @@ -0,0 +1 @@ +# CONFIG_USB_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_TMC b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_TMC new file mode 100644 index 000000000000..1215a85c275e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_TMC @@ -0,0 +1 @@ +CONFIG_USB_TMC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_TRANCEVIBRATOR b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_TRANCEVIBRATOR new file mode 100644 index 000000000000..f30a7b06687a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_TRANCEVIBRATOR @@ -0,0 +1 @@ +# CONFIG_USB_TRANCEVIBRATOR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_UAS b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_UAS new file mode 100644 index 000000000000..8d98eeae1181 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_UAS @@ -0,0 +1 @@ +CONFIG_USB_UAS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_UEAGLEATM b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_UEAGLEATM new file mode 100644 index 000000000000..42aac423b2bd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_UEAGLEATM @@ -0,0 +1 @@ +CONFIG_USB_UEAGLEATM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_USBNET b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_USBNET new file mode 100644 index 000000000000..587a08dc5124 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_USBNET @@ -0,0 +1 @@ +CONFIG_USB_USBNET=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_VL600 b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_VL600 new file mode 100644 index 000000000000..7f1be44f559d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_VL600 @@ -0,0 +1 @@ +CONFIG_USB_VL600=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_WDM b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_WDM new file mode 100644 index 000000000000..5f264ab428c7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_WDM @@ -0,0 +1 @@ +CONFIG_USB_WDM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_XHCI_HCD b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_XHCI_HCD new file mode 100644 index 000000000000..ae4294baf611 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_XHCI_HCD @@ -0,0 +1 @@ +CONFIG_USB_XHCI_HCD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_XHCI_PCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_XHCI_PCI new file mode 100644 index 000000000000..69eea98dae52 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_XHCI_PCI @@ -0,0 +1 @@ +CONFIG_USB_XHCI_PCI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_XHCI_PCI_RENESAS b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_XHCI_PCI_RENESAS new file mode 100644 index 000000000000..0d81c3d4d186 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_XHCI_PCI_RENESAS @@ -0,0 +1 @@ +# CONFIG_USB_XHCI_PCI_RENESAS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_XUSBATM b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_XUSBATM new file mode 100644 index 000000000000..6c567c836e80 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_XUSBATM @@ -0,0 +1 @@ +CONFIG_USB_XUSBATM=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_YUREX b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_YUREX new file mode 100644 index 000000000000..71cffb1a41f0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USB_YUREX @@ -0,0 +1 @@ +# CONFIG_USB_YUREX is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USERIO b/anolis/configs/L2-OPTIONAL/default/CONFIG_USERIO new file mode 100644 index 000000000000..994335413f23 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USERIO @@ -0,0 +1 @@ +# CONFIG_USERIO is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USER_DECRYPTED_DATA b/anolis/configs/L2-OPTIONAL/default/CONFIG_USER_DECRYPTED_DATA new file mode 100644 index 000000000000..b2e46dd0575e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USER_DECRYPTED_DATA @@ -0,0 +1 @@ +# CONFIG_USER_DECRYPTED_DATA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USER_EVENTS b/anolis/configs/L2-OPTIONAL/default/CONFIG_USER_EVENTS new file mode 100644 index 000000000000..3dab01b676c4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USER_EVENTS @@ -0,0 +1 @@ +# CONFIG_USER_EVENTS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_USE_PERCPU_NUMA_NODE_ID b/anolis/configs/L2-OPTIONAL/default/CONFIG_USE_PERCPU_NUMA_NODE_ID new file mode 100644 index 000000000000..90c9b33d52c0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_USE_PERCPU_NUMA_NODE_ID @@ -0,0 +1 @@ +CONFIG_USE_PERCPU_NUMA_NODE_ID=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VALIDATE_FS_PARSER b/anolis/configs/L2-OPTIONAL/default/CONFIG_VALIDATE_FS_PARSER new file mode 100644 index 000000000000..e1915bb4a4e8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VALIDATE_FS_PARSER @@ -0,0 +1 @@ +# CONFIG_VALIDATE_FS_PARSER is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VFIO_IOMMU_TYPE1 b/anolis/configs/L2-OPTIONAL/default/CONFIG_VFIO_IOMMU_TYPE1 new file mode 100644 index 000000000000..0c5602392c32 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VFIO_IOMMU_TYPE1 @@ -0,0 +1 @@ +CONFIG_VFIO_IOMMU_TYPE1=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VFIO_PCI_INTX b/anolis/configs/L2-OPTIONAL/default/CONFIG_VFIO_PCI_INTX new file mode 100644 index 000000000000..d58708279a2b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VFIO_PCI_INTX @@ -0,0 +1 @@ +CONFIG_VFIO_PCI_INTX=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VFIO_PCI_MMAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_VFIO_PCI_MMAP new file mode 100644 index 000000000000..4a77d0d3219e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VFIO_PCI_MMAP @@ -0,0 +1 @@ +CONFIG_VFIO_PCI_MMAP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VFIO_VIRQFD b/anolis/configs/L2-OPTIONAL/default/CONFIG_VFIO_VIRQFD new file mode 100644 index 000000000000..af7e2cab380c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VFIO_VIRQFD @@ -0,0 +1 @@ +CONFIG_VFIO_VIRQFD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VHOST b/anolis/configs/L2-OPTIONAL/default/CONFIG_VHOST new file mode 100644 index 000000000000..391e6cb1a17b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VHOST @@ -0,0 +1 @@ +CONFIG_VHOST=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VHOST_CROSS_ENDIAN_LEGACY b/anolis/configs/L2-OPTIONAL/default/CONFIG_VHOST_CROSS_ENDIAN_LEGACY new file mode 100644 index 000000000000..e7cbfa39a4f7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VHOST_CROSS_ENDIAN_LEGACY @@ -0,0 +1 @@ +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VHOST_IOTLB b/anolis/configs/L2-OPTIONAL/default/CONFIG_VHOST_IOTLB new file mode 100644 index 000000000000..cf2867757f6a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VHOST_IOTLB @@ -0,0 +1 @@ +CONFIG_VHOST_IOTLB=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VHOST_TASK b/anolis/configs/L2-OPTIONAL/default/CONFIG_VHOST_TASK new file mode 100644 index 000000000000..662156d8439a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VHOST_TASK @@ -0,0 +1 @@ +CONFIG_VHOST_TASK=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VIDEO_CMDLINE b/anolis/configs/L2-OPTIONAL/default/CONFIG_VIDEO_CMDLINE new file mode 100644 index 000000000000..b9b499925593 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VIDEO_CMDLINE @@ -0,0 +1 @@ +CONFIG_VIDEO_CMDLINE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VIDEO_NOMODESET b/anolis/configs/L2-OPTIONAL/default/CONFIG_VIDEO_NOMODESET new file mode 100644 index 000000000000..6fa3400c7288 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VIDEO_NOMODESET @@ -0,0 +1 @@ +CONFIG_VIDEO_NOMODESET=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VIRTIO_ANCHOR b/anolis/configs/L2-OPTIONAL/default/CONFIG_VIRTIO_ANCHOR new file mode 100644 index 000000000000..55b32c7aef56 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VIRTIO_ANCHOR @@ -0,0 +1 @@ +CONFIG_VIRTIO_ANCHOR=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VIRTIO_IOMMU b/anolis/configs/L2-OPTIONAL/default/CONFIG_VIRTIO_IOMMU new file mode 100644 index 000000000000..ecd7366a12be --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VIRTIO_IOMMU @@ -0,0 +1 @@ +# CONFIG_VIRTIO_IOMMU is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VIRT_CPU_ACCOUNTING b/anolis/configs/L2-OPTIONAL/default/CONFIG_VIRT_CPU_ACCOUNTING new file mode 100644 index 000000000000..e176b462a608 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VIRT_CPU_ACCOUNTING @@ -0,0 +1 @@ +CONFIG_VIRT_CPU_ACCOUNTING=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VITESSE_PHY b/anolis/configs/L2-OPTIONAL/default/CONFIG_VITESSE_PHY new file mode 100644 index 000000000000..98b7defb93ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VITESSE_PHY @@ -0,0 +1 @@ +CONFIG_VITESSE_PHY=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VLAN_8021Q b/anolis/configs/L2-OPTIONAL/default/CONFIG_VLAN_8021Q new file mode 100644 index 000000000000..1cb92b9cd0c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VLAN_8021Q @@ -0,0 +1 @@ +CONFIG_VLAN_8021Q=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VLAN_8021Q_GVRP b/anolis/configs/L2-OPTIONAL/default/CONFIG_VLAN_8021Q_GVRP new file mode 100644 index 000000000000..dd976efa2c97 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VLAN_8021Q_GVRP @@ -0,0 +1 @@ +CONFIG_VLAN_8021Q_GVRP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VLAN_8021Q_MVRP b/anolis/configs/L2-OPTIONAL/default/CONFIG_VLAN_8021Q_MVRP new file mode 100644 index 000000000000..c0184a3f6183 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VLAN_8021Q_MVRP @@ -0,0 +1 @@ +CONFIG_VLAN_8021Q_MVRP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VT_CONSOLE_SLEEP b/anolis/configs/L2-OPTIONAL/default/CONFIG_VT_CONSOLE_SLEEP new file mode 100644 index 000000000000..dc6d564ef6e4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VT_CONSOLE_SLEEP @@ -0,0 +1 @@ +CONFIG_VT_CONSOLE_SLEEP=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_VXFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_VXFS_FS new file mode 100644 index 000000000000..07d6c238c947 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_VXFS_FS @@ -0,0 +1 @@ +# CONFIG_VXFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_W1 b/anolis/configs/L2-OPTIONAL/default/CONFIG_W1 new file mode 100644 index 000000000000..c224e25f0cb4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_W1 @@ -0,0 +1 @@ +# CONFIG_W1 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_WANXL b/anolis/configs/L2-OPTIONAL/default/CONFIG_WANXL new file mode 100644 index 000000000000..8fb832a9d89a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_WANXL @@ -0,0 +1 @@ +# CONFIG_WANXL is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT b/anolis/configs/L2-OPTIONAL/default/CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT new file mode 100644 index 000000000000..804787a3c403 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT @@ -0,0 +1 @@ +# CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_WATCH_QUEUE b/anolis/configs/L2-OPTIONAL/default/CONFIG_WATCH_QUEUE new file mode 100644 index 000000000000..4e2060b85ff3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_WATCH_QUEUE @@ -0,0 +1 @@ +# CONFIG_WATCH_QUEUE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_WDTPCI b/anolis/configs/L2-OPTIONAL/default/CONFIG_WDTPCI new file mode 100644 index 000000000000..9cc0d4bd549e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_WDTPCI @@ -0,0 +1 @@ +CONFIG_WDTPCI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_WERROR b/anolis/configs/L2-OPTIONAL/default/CONFIG_WERROR new file mode 100644 index 000000000000..2bfe4c686d74 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_WERROR @@ -0,0 +1 @@ +# CONFIG_WERROR is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_WIRELESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_WIRELESS new file mode 100644 index 000000000000..150710089702 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_WIRELESS @@ -0,0 +1 @@ +CONFIG_WIRELESS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_WPCM450_SOC b/anolis/configs/L2-OPTIONAL/default/CONFIG_WPCM450_SOC new file mode 100644 index 000000000000..5598b82c7fbf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_WPCM450_SOC @@ -0,0 +1 @@ +# CONFIG_WPCM450_SOC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_WQ_CPU_INTENSIVE_REPORT b/anolis/configs/L2-OPTIONAL/default/CONFIG_WQ_CPU_INTENSIVE_REPORT new file mode 100644 index 000000000000..67231fa15e30 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_WQ_CPU_INTENSIVE_REPORT @@ -0,0 +1 @@ +# CONFIG_WQ_CPU_INTENSIVE_REPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_WQ_POWER_EFFICIENT_DEFAULT b/anolis/configs/L2-OPTIONAL/default/CONFIG_WQ_POWER_EFFICIENT_DEFAULT new file mode 100644 index 000000000000..6cf98a6238b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_WQ_POWER_EFFICIENT_DEFAULT @@ -0,0 +1 @@ +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_WWAN b/anolis/configs/L2-OPTIONAL/default/CONFIG_WWAN new file mode 100644 index 000000000000..55b7942e0a8a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_WWAN @@ -0,0 +1 @@ +# CONFIG_WWAN is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_WW_MUTEX_SELFTEST b/anolis/configs/L2-OPTIONAL/default/CONFIG_WW_MUTEX_SELFTEST new file mode 100644 index 000000000000..014b2354ca78 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_WW_MUTEX_SELFTEST @@ -0,0 +1 @@ +# CONFIG_WW_MUTEX_SELFTEST is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_X25 b/anolis/configs/L2-OPTIONAL/default/CONFIG_X25 new file mode 100644 index 000000000000..92856948214a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_X25 @@ -0,0 +1 @@ +# CONFIG_X25 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XARRAY_MULTI b/anolis/configs/L2-OPTIONAL/default/CONFIG_XARRAY_MULTI new file mode 100644 index 000000000000..a4f4eb42fdb3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XARRAY_MULTI @@ -0,0 +1 @@ +CONFIG_XARRAY_MULTI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM b/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM new file mode 100644 index 000000000000..63f3578d2421 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM @@ -0,0 +1 @@ +CONFIG_XFRM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_AH b/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_AH new file mode 100644 index 000000000000..9ad8fc4e61b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_AH @@ -0,0 +1 @@ +CONFIG_XFRM_AH=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_ALGO b/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_ALGO new file mode 100644 index 000000000000..ad44d0de5f30 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_ALGO @@ -0,0 +1 @@ +CONFIG_XFRM_ALGO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_ESP b/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_ESP new file mode 100644 index 000000000000..c836db65133e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_ESP @@ -0,0 +1 @@ +CONFIG_XFRM_ESP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_IPCOMP b/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_IPCOMP new file mode 100644 index 000000000000..ebcbe78807bb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_IPCOMP @@ -0,0 +1 @@ +CONFIG_XFRM_IPCOMP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_OFFLOAD b/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_OFFLOAD new file mode 100644 index 000000000000..37cb46a2f160 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XFRM_OFFLOAD @@ -0,0 +1 @@ +CONFIG_XFRM_OFFLOAD=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XFS_SUPPORT_ASCII_CI b/anolis/configs/L2-OPTIONAL/default/CONFIG_XFS_SUPPORT_ASCII_CI new file mode 100644 index 000000000000..0116c0e57265 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XFS_SUPPORT_ASCII_CI @@ -0,0 +1 @@ +CONFIG_XFS_SUPPORT_ASCII_CI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_AXI_EMAC b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_AXI_EMAC new file mode 100644 index 000000000000..9b5486d39f4d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_AXI_EMAC @@ -0,0 +1 @@ +# CONFIG_XILINX_AXI_EMAC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_DMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_DMA new file mode 100644 index 000000000000..67b50b087234 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_DMA @@ -0,0 +1 @@ +# CONFIG_XILINX_DMA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_EMACLITE b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_EMACLITE new file mode 100644 index 000000000000..ce6f11720757 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_EMACLITE @@ -0,0 +1 @@ +# CONFIG_XILINX_EMACLITE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_GMII2RGMII b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_GMII2RGMII new file mode 100644 index 000000000000..c2901c6ea781 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_GMII2RGMII @@ -0,0 +1 @@ +CONFIG_XILINX_GMII2RGMII=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_LL_TEMAC b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_LL_TEMAC new file mode 100644 index 000000000000..01a4f19cca09 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_LL_TEMAC @@ -0,0 +1 @@ +# CONFIG_XILINX_LL_TEMAC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_SDFEC b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_SDFEC new file mode 100644 index 000000000000..5de693e9871e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_SDFEC @@ -0,0 +1 @@ +# CONFIG_XILINX_SDFEC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_VCU b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_VCU new file mode 100644 index 000000000000..7ed663374be9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_VCU @@ -0,0 +1 @@ +# CONFIG_XILINX_VCU is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_WATCHDOG b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_WATCHDOG new file mode 100644 index 000000000000..b0f810054392 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_WATCHDOG @@ -0,0 +1 @@ +# CONFIG_XILINX_WATCHDOG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_XDMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_XDMA new file mode 100644 index 000000000000..7818eb3188dc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILINX_XDMA @@ -0,0 +1 @@ +# CONFIG_XILINX_XDMA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XILLYBUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILLYBUS new file mode 100644 index 000000000000..ce2448ee6c70 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILLYBUS @@ -0,0 +1 @@ +# CONFIG_XILLYBUS is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XILLYUSB b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILLYUSB new file mode 100644 index 000000000000..1b74c675f4e1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XILLYUSB @@ -0,0 +1 @@ +# CONFIG_XILLYUSB is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_XXHASH b/anolis/configs/L2-OPTIONAL/default/CONFIG_XXHASH new file mode 100644 index 000000000000..25ccb2e89b88 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_XXHASH @@ -0,0 +1 @@ +CONFIG_XXHASH=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA b/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA new file mode 100644 index 000000000000..fa07f12c4899 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA @@ -0,0 +1 @@ +CONFIG_YENTA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_ENE_TUNE b/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_ENE_TUNE new file mode 100644 index 000000000000..8753860d35c0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_ENE_TUNE @@ -0,0 +1 @@ +CONFIG_YENTA_ENE_TUNE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_O2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_O2 new file mode 100644 index 000000000000..d77df1f4688b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_O2 @@ -0,0 +1 @@ +CONFIG_YENTA_O2=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_RICOH b/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_RICOH new file mode 100644 index 000000000000..47b537c5dbf7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_RICOH @@ -0,0 +1 @@ +CONFIG_YENTA_RICOH=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_TI b/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_TI new file mode 100644 index 000000000000..003f693c2f51 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_TI @@ -0,0 +1 @@ +CONFIG_YENTA_TI=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_TOSHIBA b/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_TOSHIBA new file mode 100644 index 000000000000..ce7555e6874c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_YENTA_TOSHIBA @@ -0,0 +1 @@ +CONFIG_YENTA_TOSHIBA=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ZEROPLUS_FF b/anolis/configs/L2-OPTIONAL/default/CONFIG_ZEROPLUS_FF new file mode 100644 index 000000000000..a3656c69a7a9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ZEROPLUS_FF @@ -0,0 +1 @@ +# CONFIG_ZEROPLUS_FF is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ZIIRAVE_WATCHDOG b/anolis/configs/L2-OPTIONAL/default/CONFIG_ZIIRAVE_WATCHDOG new file mode 100644 index 000000000000..db53f11b7e65 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ZIIRAVE_WATCHDOG @@ -0,0 +1 @@ +# CONFIG_ZIIRAVE_WATCHDOG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ZLIB_DEFLATE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ZLIB_DEFLATE new file mode 100644 index 000000000000..078cde4af4c4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ZLIB_DEFLATE @@ -0,0 +1 @@ +CONFIG_ZLIB_DEFLATE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ZLIB_INFLATE b/anolis/configs/L2-OPTIONAL/default/CONFIG_ZLIB_INFLATE new file mode 100644 index 000000000000..e23856bdc9fd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ZLIB_INFLATE @@ -0,0 +1 @@ +CONFIG_ZLIB_INFLATE=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ZONEFS_FS b/anolis/configs/L2-OPTIONAL/default/CONFIG_ZONEFS_FS new file mode 100644 index 000000000000..557a55ca1cf8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ZONEFS_FS @@ -0,0 +1 @@ +# CONFIG_ZONEFS_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_60XX_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_60XX_WDT new file mode 100644 index 000000000000..4506c4b5b014 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_60XX_WDT @@ -0,0 +1 @@ +# CONFIG_60XX_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ABP060MG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ABP060MG new file mode 100644 index 000000000000..56b9bd17fcec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ABP060MG @@ -0,0 +1 @@ +# CONFIG_ABP060MG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACERHDF b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACERHDF new file mode 100644 index 000000000000..53c61f3c7150 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACERHDF @@ -0,0 +1 @@ +CONFIG_ACERHDF=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACER_WIRELESS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACER_WIRELESS new file mode 100644 index 000000000000..648d05da41ab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACER_WIRELESS @@ -0,0 +1 @@ +# CONFIG_ACER_WIRELESS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACER_WMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACER_WMI new file mode 100644 index 000000000000..06ff5c76de7b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACER_WMI @@ -0,0 +1 @@ +CONFIG_ACER_WMI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_ADXL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_ADXL new file mode 100644 index 000000000000..e76a18724d29 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_ADXL @@ -0,0 +1 @@ +CONFIG_ACPI_ADXL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_ALS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_ALS new file mode 100644 index 000000000000..e1f9ffcf0df7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_ALS @@ -0,0 +1 @@ +# CONFIG_ACPI_ALS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_CMPC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_CMPC new file mode 100644 index 000000000000..72ad5cede8ea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_CMPC @@ -0,0 +1 @@ +CONFIG_ACPI_CMPC=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_CPU_FREQ_PSS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_CPU_FREQ_PSS new file mode 100644 index 000000000000..6cb5293d9869 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_CPU_FREQ_PSS @@ -0,0 +1 @@ +CONFIG_ACPI_CPU_FREQ_PSS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_DPTF b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_DPTF new file mode 100644 index 000000000000..e1ef119ef2e3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_DPTF @@ -0,0 +1 @@ +# CONFIG_ACPI_DPTF is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_HOTPLUG_IOAPIC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_HOTPLUG_IOAPIC new file mode 100644 index 000000000000..4e82e7d4e35c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_HOTPLUG_IOAPIC @@ -0,0 +1 @@ +CONFIG_ACPI_HOTPLUG_IOAPIC=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_LEGACY_TABLES_LOOKUP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_LEGACY_TABLES_LOOKUP new file mode 100644 index 000000000000..dd59ebd67c12 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_LEGACY_TABLES_LOOKUP @@ -0,0 +1 @@ +CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_LPIT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_LPIT new file mode 100644 index 000000000000..7d63b33df491 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_LPIT @@ -0,0 +1 @@ +CONFIG_ACPI_LPIT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_PLATFORM_PROFILE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_PLATFORM_PROFILE new file mode 100644 index 000000000000..33f4540f3d11 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_PLATFORM_PROFILE @@ -0,0 +1 @@ +CONFIG_ACPI_PLATFORM_PROFILE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_PROCESSOR_CSTATE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_PROCESSOR_CSTATE new file mode 100644 index 000000000000..32905041afe9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_PROCESSOR_CSTATE @@ -0,0 +1 @@ +CONFIG_ACPI_PROCESSOR_CSTATE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT new file mode 100644 index 000000000000..4b19514a9a9b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT @@ -0,0 +1 @@ +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_TOSHIBA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_TOSHIBA new file mode 100644 index 000000000000..2504b4dc8d4f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_TOSHIBA @@ -0,0 +1 @@ +# CONFIG_ACPI_TOSHIBA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_WMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_WMI new file mode 100644 index 000000000000..9cef4743c8cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACPI_WMI @@ -0,0 +1 @@ +CONFIG_ACPI_WMI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACRN_GUEST b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACRN_GUEST new file mode 100644 index 000000000000..700a0209a856 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ACRN_GUEST @@ -0,0 +1 @@ +# CONFIG_ACRN_GUEST is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD2S1200 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD2S1200 new file mode 100644 index 000000000000..4c835768054e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD2S1200 @@ -0,0 +1 @@ +# CONFIG_AD2S1200 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD2S90 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD2S90 new file mode 100644 index 000000000000..8652c581466d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD2S90 @@ -0,0 +1 @@ +# CONFIG_AD2S90 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD3552R b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD3552R new file mode 100644 index 000000000000..3e21ea9bcf28 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD3552R @@ -0,0 +1 @@ +# CONFIG_AD3552R is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD4130 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD4130 new file mode 100644 index 000000000000..34fe968a7451 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD4130 @@ -0,0 +1 @@ +# CONFIG_AD4130 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5064 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5064 new file mode 100644 index 000000000000..d26142d73fd7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5064 @@ -0,0 +1 @@ +# CONFIG_AD5064 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5110 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5110 new file mode 100644 index 000000000000..3beaff0bb3e3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5110 @@ -0,0 +1 @@ +# CONFIG_AD5110 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5272 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5272 new file mode 100644 index 000000000000..66aa4460977e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5272 @@ -0,0 +1 @@ +# CONFIG_AD5272 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5360 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5360 new file mode 100644 index 000000000000..294533b24b14 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5360 @@ -0,0 +1 @@ +# CONFIG_AD5360 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5380 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5380 new file mode 100644 index 000000000000..cc21a83aa48e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5380 @@ -0,0 +1 @@ +# CONFIG_AD5380 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5421 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5421 new file mode 100644 index 000000000000..9ec8d94a8d95 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5421 @@ -0,0 +1 @@ +# CONFIG_AD5421 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5446 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5446 new file mode 100644 index 000000000000..1a8d66614b5d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5446 @@ -0,0 +1 @@ +# CONFIG_AD5446 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5449 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5449 new file mode 100644 index 000000000000..a370e945dce5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5449 @@ -0,0 +1 @@ +# CONFIG_AD5449 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5504 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5504 new file mode 100644 index 000000000000..cad3ffe128e2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5504 @@ -0,0 +1 @@ +# CONFIG_AD5504 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5592R b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5592R new file mode 100644 index 000000000000..019d6225df59 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5592R @@ -0,0 +1 @@ +# CONFIG_AD5592R is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5593R b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5593R new file mode 100644 index 000000000000..a0a89f52b4fe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5593R @@ -0,0 +1 @@ +# CONFIG_AD5593R is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5624R_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5624R_SPI new file mode 100644 index 000000000000..fb8e93bb2b6a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5624R_SPI @@ -0,0 +1 @@ +# CONFIG_AD5624R_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5686_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5686_SPI new file mode 100644 index 000000000000..ab255270b2b2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5686_SPI @@ -0,0 +1 @@ +# CONFIG_AD5686_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5696_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5696_I2C new file mode 100644 index 000000000000..5fd9e716619b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5696_I2C @@ -0,0 +1 @@ +# CONFIG_AD5696_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5755 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5755 new file mode 100644 index 000000000000..986e3bdc9249 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5755 @@ -0,0 +1 @@ +# CONFIG_AD5755 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5758 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5758 new file mode 100644 index 000000000000..244fcdfa62f4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5758 @@ -0,0 +1 @@ +# CONFIG_AD5758 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5761 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5761 new file mode 100644 index 000000000000..93e281ced3b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5761 @@ -0,0 +1 @@ +# CONFIG_AD5761 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5764 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5764 new file mode 100644 index 000000000000..c6885466f205 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5764 @@ -0,0 +1 @@ +# CONFIG_AD5764 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5766 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5766 new file mode 100644 index 000000000000..5df88b41d4b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5766 @@ -0,0 +1 @@ +# CONFIG_AD5766 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5770R b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5770R new file mode 100644 index 000000000000..3a875e6c1735 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5770R @@ -0,0 +1 @@ +# CONFIG_AD5770R is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5791 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5791 new file mode 100644 index 000000000000..4d143c659219 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD5791 @@ -0,0 +1 @@ +# CONFIG_AD5791 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7091R5 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7091R5 new file mode 100644 index 000000000000..a2cd8825e209 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7091R5 @@ -0,0 +1 @@ +# CONFIG_AD7091R5 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7124 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7124 new file mode 100644 index 000000000000..b25a1166b690 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7124 @@ -0,0 +1 @@ +# CONFIG_AD7124 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7150 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7150 new file mode 100644 index 000000000000..cd5a3edb0850 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7150 @@ -0,0 +1 @@ +# CONFIG_AD7150 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7192 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7192 new file mode 100644 index 000000000000..231728de4734 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7192 @@ -0,0 +1 @@ +# CONFIG_AD7192 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7266 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7266 new file mode 100644 index 000000000000..62b03da046d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7266 @@ -0,0 +1 @@ +# CONFIG_AD7266 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7280 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7280 new file mode 100644 index 000000000000..6ac49feefaef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7280 @@ -0,0 +1 @@ +# CONFIG_AD7280 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7291 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7291 new file mode 100644 index 000000000000..9257aeb05d7a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7291 @@ -0,0 +1 @@ +# CONFIG_AD7291 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7292 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7292 new file mode 100644 index 000000000000..a9c0380b3c4b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7292 @@ -0,0 +1 @@ +# CONFIG_AD7292 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7293 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7293 new file mode 100644 index 000000000000..f804e7bf7209 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7293 @@ -0,0 +1 @@ +# CONFIG_AD7293 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7298 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7298 new file mode 100644 index 000000000000..40ce2af3a07b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7298 @@ -0,0 +1 @@ +# CONFIG_AD7298 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7303 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7303 new file mode 100644 index 000000000000..f4909c7c0045 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7303 @@ -0,0 +1 @@ +# CONFIG_AD7303 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD74115 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD74115 new file mode 100644 index 000000000000..0fbf7ef087a2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD74115 @@ -0,0 +1 @@ +# CONFIG_AD74115 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD74413R b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD74413R new file mode 100644 index 000000000000..c22b3660b36b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD74413R @@ -0,0 +1 @@ +# CONFIG_AD74413R is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7476 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7476 new file mode 100644 index 000000000000..a8a9e9361a02 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7476 @@ -0,0 +1 @@ +# CONFIG_AD7476 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7606_IFACE_PARALLEL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7606_IFACE_PARALLEL new file mode 100644 index 000000000000..c04c8bcd2eb6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7606_IFACE_PARALLEL @@ -0,0 +1 @@ +# CONFIG_AD7606_IFACE_PARALLEL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7606_IFACE_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7606_IFACE_SPI new file mode 100644 index 000000000000..6f532a5afc09 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7606_IFACE_SPI @@ -0,0 +1 @@ +# CONFIG_AD7606_IFACE_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7746 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7746 new file mode 100644 index 000000000000..49d1a7d26ae1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7746 @@ -0,0 +1 @@ +# CONFIG_AD7746 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7766 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7766 new file mode 100644 index 000000000000..c1a1ea27718b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7766 @@ -0,0 +1 @@ +# CONFIG_AD7766 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7768_1 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7768_1 new file mode 100644 index 000000000000..eee87cb8238e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7768_1 @@ -0,0 +1 @@ +# CONFIG_AD7768_1 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7780 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7780 new file mode 100644 index 000000000000..ad5e89ca58a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7780 @@ -0,0 +1 @@ +# CONFIG_AD7780 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7791 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7791 new file mode 100644 index 000000000000..3b0b7f0f1bd7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7791 @@ -0,0 +1 @@ +# CONFIG_AD7791 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7793 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7793 new file mode 100644 index 000000000000..f2cf5fc2f76e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7793 @@ -0,0 +1 @@ +# CONFIG_AD7793 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7887 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7887 new file mode 100644 index 000000000000..4746e6e69beb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7887 @@ -0,0 +1 @@ +# CONFIG_AD7887 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7923 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7923 new file mode 100644 index 000000000000..cc3b9c69497a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7923 @@ -0,0 +1 @@ +# CONFIG_AD7923 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7949 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7949 new file mode 100644 index 000000000000..a11e18aea30b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD7949 @@ -0,0 +1 @@ +# CONFIG_AD7949 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD799X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD799X new file mode 100644 index 000000000000..66e971f76cea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD799X @@ -0,0 +1 @@ +# CONFIG_AD799X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD8366 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD8366 new file mode 100644 index 000000000000..0d9754eeb9bc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD8366 @@ -0,0 +1 @@ +# CONFIG_AD8366 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD8801 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD8801 new file mode 100644 index 000000000000..e634e592558b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD8801 @@ -0,0 +1 @@ +# CONFIG_AD8801 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD9523 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD9523 new file mode 100644 index 000000000000..abfd7e162c82 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD9523 @@ -0,0 +1 @@ +# CONFIG_AD9523 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADA4250 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADA4250 new file mode 100644 index 000000000000..7261f710891e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADA4250 @@ -0,0 +1 @@ +# CONFIG_ADA4250 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADDRESS_MASKING b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADDRESS_MASKING new file mode 100644 index 000000000000..c918428df741 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADDRESS_MASKING @@ -0,0 +1 @@ +# CONFIG_ADDRESS_MASKING is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADF4350 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADF4350 new file mode 100644 index 000000000000..77a7e76604b5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADF4350 @@ -0,0 +1 @@ +# CONFIG_ADF4350 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADF4371 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADF4371 new file mode 100644 index 000000000000..0d7d09dd13dc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADF4371 @@ -0,0 +1 @@ +# CONFIG_ADF4371 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADF4377 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADF4377 new file mode 100644 index 000000000000..045780028d52 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADF4377 @@ -0,0 +1 @@ +# CONFIG_ADF4377 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16080 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16080 new file mode 100644 index 000000000000..5718d4066600 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16080 @@ -0,0 +1 @@ +# CONFIG_ADIS16080 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16130 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16130 new file mode 100644 index 000000000000..2a9469708eb8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16130 @@ -0,0 +1 @@ +# CONFIG_ADIS16130 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16136 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16136 new file mode 100644 index 000000000000..efe24e8bc1fe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16136 @@ -0,0 +1 @@ +# CONFIG_ADIS16136 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16201 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16201 new file mode 100644 index 000000000000..87928c55476f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16201 @@ -0,0 +1 @@ +# CONFIG_ADIS16201 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16209 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16209 new file mode 100644 index 000000000000..8f686138b83b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16209 @@ -0,0 +1 @@ +# CONFIG_ADIS16209 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16260 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16260 new file mode 100644 index 000000000000..a96770bc9833 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16260 @@ -0,0 +1 @@ +# CONFIG_ADIS16260 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16400 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16400 new file mode 100644 index 000000000000..f7717e8d82c3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16400 @@ -0,0 +1 @@ +# CONFIG_ADIS16400 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16460 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16460 new file mode 100644 index 000000000000..e3f206072caf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16460 @@ -0,0 +1 @@ +# CONFIG_ADIS16460 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16475 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16475 new file mode 100644 index 000000000000..18dd3612a10f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16475 @@ -0,0 +1 @@ +# CONFIG_ADIS16475 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16480 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16480 new file mode 100644 index 000000000000..023204875e7d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADIS16480 @@ -0,0 +1 @@ +# CONFIG_ADIS16480 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADJD_S311 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADJD_S311 new file mode 100644 index 000000000000..624a7bc07ad6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADJD_S311 @@ -0,0 +1 @@ +# CONFIG_ADJD_S311 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADMV1013 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADMV1013 new file mode 100644 index 000000000000..06f76f7c1226 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADMV1013 @@ -0,0 +1 @@ +# CONFIG_ADMV1013 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADMV1014 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADMV1014 new file mode 100644 index 000000000000..983402c3c0db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADMV1014 @@ -0,0 +1 @@ +# CONFIG_ADMV1014 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADMV4420 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADMV4420 new file mode 100644 index 000000000000..d5ae672e12b0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADMV4420 @@ -0,0 +1 @@ +# CONFIG_ADMV4420 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADMV8818 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADMV8818 new file mode 100644 index 000000000000..0130531ed952 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADMV8818 @@ -0,0 +1 @@ +# CONFIG_ADMV8818 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADRF6780 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADRF6780 new file mode 100644 index 000000000000..4830752d31d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADRF6780 @@ -0,0 +1 @@ +# CONFIG_ADRF6780 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADUX1020 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADUX1020 new file mode 100644 index 000000000000..3e18d3e0a4d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADUX1020 @@ -0,0 +1 @@ +# CONFIG_ADUX1020 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADVANTECH_EC_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADVANTECH_EC_WDT new file mode 100644 index 000000000000..48d437ed84f2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADVANTECH_EC_WDT @@ -0,0 +1 @@ +# CONFIG_ADVANTECH_EC_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADV_SWBUTTON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADV_SWBUTTON new file mode 100644 index 000000000000..c359f60c6f84 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADV_SWBUTTON @@ -0,0 +1 @@ +# CONFIG_ADV_SWBUTTON is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL313_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL313_I2C new file mode 100644 index 000000000000..5746dee1e8c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL313_I2C @@ -0,0 +1 @@ +# CONFIG_ADXL313_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL313_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL313_SPI new file mode 100644 index 000000000000..2e5c52a6e312 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL313_SPI @@ -0,0 +1 @@ +# CONFIG_ADXL313_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL345_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL345_I2C new file mode 100644 index 000000000000..f6976b836c60 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL345_I2C @@ -0,0 +1 @@ +# CONFIG_ADXL345_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL345_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL345_SPI new file mode 100644 index 000000000000..186ab0d3ad9c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL345_SPI @@ -0,0 +1 @@ +# CONFIG_ADXL345_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL355_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL355_I2C new file mode 100644 index 000000000000..5d6dbfc75181 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL355_I2C @@ -0,0 +1 @@ +# CONFIG_ADXL355_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL355_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL355_SPI new file mode 100644 index 000000000000..d546ccabbe02 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL355_SPI @@ -0,0 +1 @@ +# CONFIG_ADXL355_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL367_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL367_I2C new file mode 100644 index 000000000000..d876f8d2be8b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL367_I2C @@ -0,0 +1 @@ +# CONFIG_ADXL367_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL367_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL367_SPI new file mode 100644 index 000000000000..980e9a2e1a7a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL367_SPI @@ -0,0 +1 @@ +# CONFIG_ADXL367_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL372_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL372_I2C new file mode 100644 index 000000000000..dad8d9bf6fb4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL372_I2C @@ -0,0 +1 @@ +# CONFIG_ADXL372_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL372_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL372_SPI new file mode 100644 index 000000000000..e9916f854444 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXL372_SPI @@ -0,0 +1 @@ +# CONFIG_ADXL372_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXRS290 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXRS290 new file mode 100644 index 000000000000..8b7d877f13bc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXRS290 @@ -0,0 +1 @@ +# CONFIG_ADXRS290 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXRS450 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXRS450 new file mode 100644 index 000000000000..7abb6c9c598d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADXRS450 @@ -0,0 +1 @@ +# CONFIG_ADXRS450 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AFE4403 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AFE4403 new file mode 100644 index 000000000000..26df5ff60250 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AFE4403 @@ -0,0 +1 @@ +# CONFIG_AFE4403 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AFE4404 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AFE4404 new file mode 100644 index 000000000000..0244492b53ff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AFE4404 @@ -0,0 +1 @@ +# CONFIG_AFE4404 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AGP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AGP new file mode 100644 index 000000000000..f7332ef53c00 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AGP @@ -0,0 +1 @@ +# CONFIG_AGP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AK09911 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AK09911 new file mode 100644 index 000000000000..e4eae5d43484 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AK09911 @@ -0,0 +1 @@ +# CONFIG_AK09911 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AK8974 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AK8974 new file mode 100644 index 000000000000..c74de8fec3a7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AK8974 @@ -0,0 +1 @@ +# CONFIG_AK8974 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AK8975 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AK8975 new file mode 100644 index 000000000000..958f0bd067c7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AK8975 @@ -0,0 +1 @@ +# CONFIG_AK8975 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AL3010 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AL3010 new file mode 100644 index 000000000000..c0ce27f46490 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AL3010 @@ -0,0 +1 @@ +# CONFIG_AL3010 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AL3320A b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AL3320A new file mode 100644 index 000000000000..0b60a6dfd3c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AL3320A @@ -0,0 +1 @@ +# CONFIG_AL3320A is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ALIM1535_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ALIM1535_WDT new file mode 100644 index 000000000000..752901cddd1d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ALIM1535_WDT @@ -0,0 +1 @@ +CONFIG_ALIM1535_WDT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ALTERA_STAPL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ALTERA_STAPL new file mode 100644 index 000000000000..7931c1e5ab18 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ALTERA_STAPL @@ -0,0 +1 @@ +CONFIG_ALTERA_STAPL=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AM2315 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AM2315 new file mode 100644 index 000000000000..9acd703102e5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AM2315 @@ -0,0 +1 @@ +# CONFIG_AM2315 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMD_NB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMD_NB new file mode 100644 index 000000000000..31b1f5e5f6fa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMD_NB @@ -0,0 +1 @@ +CONFIG_AMD_NB=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMD_PMC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMD_PMC new file mode 100644 index 000000000000..1ad2137c611f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMD_PMC @@ -0,0 +1 @@ +# CONFIG_AMD_PMC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMD_PMF b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMD_PMF new file mode 100644 index 000000000000..0b4aaae83f32 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMD_PMF @@ -0,0 +1 @@ +# CONFIG_AMD_PMF is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMD_SFH_HID b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMD_SFH_HID new file mode 100644 index 000000000000..166fa39b1cc2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMD_SFH_HID @@ -0,0 +1 @@ +# CONFIG_AMD_SFH_HID is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMIGA_PARTITION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMIGA_PARTITION new file mode 100644 index 000000000000..5f03ffaf5337 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMIGA_PARTITION @@ -0,0 +1 @@ +CONFIG_AMIGA_PARTITION=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMILO_RFKILL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMILO_RFKILL new file mode 100644 index 000000000000..ab49a1190354 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AMILO_RFKILL @@ -0,0 +1 @@ +CONFIG_AMILO_RFKILL=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_APDS9300 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_APDS9300 new file mode 100644 index 000000000000..21d37456539d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_APDS9300 @@ -0,0 +1 @@ +# CONFIG_APDS9300 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_APDS9802ALS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_APDS9802ALS new file mode 100644 index 000000000000..086fb1becf62 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_APDS9802ALS @@ -0,0 +1 @@ +CONFIG_APDS9802ALS=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_APDS9960 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_APDS9960 new file mode 100644 index 000000000000..ac56682f409c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_APDS9960 @@ -0,0 +1 @@ +# CONFIG_APDS9960 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_APPLE_GMUX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_APPLE_GMUX new file mode 100644 index 000000000000..9ecb754be9de --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_APPLE_GMUX @@ -0,0 +1 @@ +CONFIG_APPLE_GMUX=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_APPLE_PROPERTIES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_APPLE_PROPERTIES new file mode 100644 index 000000000000..5f2035b0e917 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_APPLE_PROPERTIES @@ -0,0 +1 @@ +# CONFIG_APPLE_PROPERTIES is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AQTION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AQTION new file mode 100644 index 000000000000..7812ca016377 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AQTION @@ -0,0 +1 @@ +CONFIG_AQTION=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AR5523 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AR5523 new file mode 100644 index 000000000000..731e605b7172 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AR5523 @@ -0,0 +1 @@ +# CONFIG_AR5523 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_CLOCKSOURCE_INIT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_CLOCKSOURCE_INIT new file mode 100644 index 000000000000..0e1d8fd3bb7b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_CLOCKSOURCE_INIT @@ -0,0 +1 @@ +CONFIG_ARCH_CLOCKSOURCE_INIT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_ADD_PAGES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_ADD_PAGES new file mode 100644 index 000000000000..8b8300af27aa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_ADD_PAGES @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_ADD_PAGES=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_CC_PLATFORM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_CC_PLATFORM new file mode 100644 index 000000000000..6ee641e4cf79 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_CC_PLATFORM @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_CC_PLATFORM=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION new file mode 100644 index 000000000000..c4c0018dd1ad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_CPU_FINALIZE_INIT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_CPU_FINALIZE_INIT new file mode 100644 index 000000000000..a826e586c4c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_CPU_FINALIZE_INIT @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_CPU_FINALIZE_INIT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_CPU_RELAX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_CPU_RELAX new file mode 100644 index 000000000000..1d046c958ef6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_CPU_RELAX @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_CPU_RELAX=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED new file mode 100644 index 000000000000..e94589313f21 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_EARLY_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_EARLY_DEBUG new file mode 100644 index 000000000000..330e0806b2ea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_EARLY_DEBUG @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_EARLY_DEBUG=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_ELFCORE_COMPAT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_ELFCORE_COMPAT new file mode 100644 index 000000000000..1d94e34b5a85 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_ELFCORE_COMPAT @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_ELFCORE_COMPAT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED new file mode 100644 index 000000000000..bf71c649fb8b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_MEM_ENCRYPT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_MEM_ENCRYPT new file mode 100644 index 000000000000..a87d5d6bb2bb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_MEM_ENCRYPT @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_MEM_ENCRYPT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG new file mode 100644 index 000000000000..817a506f6881 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH new file mode 100644 index 000000000000..01e8ba194e3d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_PKEYS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_PKEYS new file mode 100644 index 000000000000..ee751e45fe39 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_PKEYS @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_PKEYS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE new file mode 100644 index 000000000000..402b0726b7df --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MAY_HAVE_PC_FDC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MAY_HAVE_PC_FDC new file mode 100644 index 000000000000..7884f40dbed5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MAY_HAVE_PC_FDC @@ -0,0 +1 @@ +CONFIG_ARCH_MAY_HAVE_PC_FDC=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC new file mode 100644 index 000000000000..2789c27e17a2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC @@ -0,0 +1 @@ +CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT new file mode 100644 index 000000000000..2d87e628fcce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT @@ -0,0 +1 @@ +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MIGHT_HAVE_PC_SERIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MIGHT_HAVE_PC_SERIO new file mode 100644 index 000000000000..8d49b424e2a8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MIGHT_HAVE_PC_SERIO @@ -0,0 +1 @@ +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MMAP_RND_BITS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MMAP_RND_BITS new file mode 100644 index 000000000000..e4ce24568402 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MMAP_RND_BITS @@ -0,0 +1 @@ +CONFIG_ARCH_MMAP_RND_BITS=28 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MMAP_RND_BITS_MAX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MMAP_RND_BITS_MAX new file mode 100644 index 000000000000..9f3e6d136c58 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MMAP_RND_BITS_MAX @@ -0,0 +1 @@ +CONFIG_ARCH_MMAP_RND_BITS_MAX=32 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MMAP_RND_BITS_MIN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MMAP_RND_BITS_MIN new file mode 100644 index 000000000000..8518e1303f78 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MMAP_RND_BITS_MIN @@ -0,0 +1 @@ +CONFIG_ARCH_MMAP_RND_BITS_MIN=28 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN new file mode 100644 index 000000000000..9cd428feb208 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN @@ -0,0 +1 @@ +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SPARSEMEM_DEFAULT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SPARSEMEM_DEFAULT new file mode 100644 index 000000000000..799122cf34aa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SPARSEMEM_DEFAULT @@ -0,0 +1 @@ +CONFIG_ARCH_SPARSEMEM_DEFAULT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_CRASH_HOTPLUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_CRASH_HOTPLUG new file mode 100644 index 000000000000..d981b8e0e036 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_CRASH_HOTPLUG @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_CRASH_HOTPLUG=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KEXEC_BZIMAGE_VERIFY_SIG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KEXEC_BZIMAGE_VERIFY_SIG new file mode 100644 index 000000000000..40f0233e6b32 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KEXEC_BZIMAGE_VERIFY_SIG @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_KEXEC_BZIMAGE_VERIFY_SIG=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KEXEC_JUMP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KEXEC_JUMP new file mode 100644 index 000000000000..4f63af6947cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KEXEC_JUMP @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_KEXEC_JUMP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY new file mode 100644 index 000000000000..bb22c1173c4d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KEXEC_SIG_FORCE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KEXEC_SIG_FORCE new file mode 100644 index 000000000000..b5c30c362399 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KEXEC_SIG_FORCE @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_KEXEC_SIG_FORCE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP new file mode 100644 index 000000000000..b2816f365d92 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP @@ -0,0 +1 @@ +CONFIG_ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_USES_PG_UNCACHED b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_USES_PG_UNCACHED new file mode 100644 index 000000000000..a07ba5e0b33a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_USES_PG_UNCACHED @@ -0,0 +1 @@ +CONFIG_ARCH_USES_PG_UNCACHED=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_USE_BUILTIN_BSWAP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_USE_BUILTIN_BSWAP new file mode 100644 index 000000000000..0665eb9f3990 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_USE_BUILTIN_BSWAP @@ -0,0 +1 @@ +CONFIG_ARCH_USE_BUILTIN_BSWAP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT new file mode 100644 index 000000000000..e1a56bfcc7ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT @@ -0,0 +1 @@ +CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANT_GENERAL_HUGETLB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANT_GENERAL_HUGETLB new file mode 100644 index 000000000000..e1afff6d9573 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANT_GENERAL_HUGETLB @@ -0,0 +1 @@ +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANT_OLD_COMPAT_IPC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANT_OLD_COMPAT_IPC new file mode 100644 index 000000000000..9d328ef054ef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANT_OLD_COMPAT_IPC @@ -0,0 +1 @@ +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP new file mode 100644 index 000000000000..edd57b80994b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP @@ -0,0 +1 @@ +CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP new file mode 100644 index 000000000000..9308f5db98cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP @@ -0,0 +1 @@ +CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS3935 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS3935 new file mode 100644 index 000000000000..3d08d5594364 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS3935 @@ -0,0 +1 @@ +# CONFIG_AS3935 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS73211 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS73211 new file mode 100644 index 000000000000..dd3aa86a497a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS73211 @@ -0,0 +1 @@ +# CONFIG_AS73211 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_LAPTOP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_LAPTOP new file mode 100644 index 000000000000..40e261455a0a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_LAPTOP @@ -0,0 +1 @@ +CONFIG_ASUS_LAPTOP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_NB_WMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_NB_WMI new file mode 100644 index 000000000000..16d2c7fa9b0d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_NB_WMI @@ -0,0 +1 @@ +CONFIG_ASUS_NB_WMI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_TF103C_DOCK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_TF103C_DOCK new file mode 100644 index 000000000000..a6e76ad34038 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_TF103C_DOCK @@ -0,0 +1 @@ +# CONFIG_ASUS_TF103C_DOCK is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_WIRELESS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_WIRELESS new file mode 100644 index 000000000000..6e8ebd8da47c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_WIRELESS @@ -0,0 +1 @@ +# CONFIG_ASUS_WIRELESS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_WMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_WMI new file mode 100644 index 000000000000..e881859cc8c6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ASUS_WMI @@ -0,0 +1 @@ +CONFIG_ASUS_WMI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_AVX512 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_AVX512 new file mode 100644 index 000000000000..5b1d1bd8c343 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_AVX512 @@ -0,0 +1 @@ +CONFIG_AS_AVX512=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_GFNI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_GFNI new file mode 100644 index 000000000000..96ae77780964 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_GFNI @@ -0,0 +1 @@ +CONFIG_AS_GFNI=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_SHA1_NI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_SHA1_NI new file mode 100644 index 000000000000..e0ad2b172c47 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_SHA1_NI @@ -0,0 +1 @@ +CONFIG_AS_SHA1_NI=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_SHA256_NI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_SHA256_NI new file mode 100644 index 000000000000..df77e5b9d949 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_SHA256_NI @@ -0,0 +1 @@ +CONFIG_AS_SHA256_NI=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_TPAUSE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_TPAUSE new file mode 100644 index 000000000000..e1d3f4ac127b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_TPAUSE @@ -0,0 +1 @@ +CONFIG_AS_TPAUSE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_WRUSS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_WRUSS new file mode 100644 index 000000000000..51b5d9ab8b70 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AS_WRUSS @@ -0,0 +1 @@ +CONFIG_AS_WRUSS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K new file mode 100644 index 000000000000..7e304c3b6173 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K @@ -0,0 +1 @@ +CONFIG_ATH10K=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_CE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_CE new file mode 100644 index 000000000000..d07e3d10a7bb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_CE @@ -0,0 +1 @@ +CONFIG_ATH10K_CE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_DEBUG new file mode 100644 index 000000000000..6390cff87f17 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_DEBUG @@ -0,0 +1 @@ +# CONFIG_ATH10K_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_DEBUGFS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_DEBUGFS new file mode 100644 index 000000000000..a7a5d6286c3f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_DEBUGFS @@ -0,0 +1 @@ +# CONFIG_ATH10K_DEBUGFS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_PCI new file mode 100644 index 000000000000..888f54594ce9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_PCI @@ -0,0 +1 @@ +CONFIG_ATH10K_PCI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_SDIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_SDIO new file mode 100644 index 000000000000..efdf37f81fe2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_SDIO @@ -0,0 +1 @@ +# CONFIG_ATH10K_SDIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_TRACING b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_TRACING new file mode 100644 index 000000000000..e6827cecbd33 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_TRACING @@ -0,0 +1 @@ +# CONFIG_ATH10K_TRACING is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_USB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_USB new file mode 100644 index 000000000000..3db2059ec227 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH10K_USB @@ -0,0 +1 @@ +# CONFIG_ATH10K_USB is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH11K b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH11K new file mode 100644 index 000000000000..584c5e3f1ebc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH11K @@ -0,0 +1 @@ +# CONFIG_ATH11K is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH12K b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH12K new file mode 100644 index 000000000000..5d4074281435 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH12K @@ -0,0 +1 @@ +# CONFIG_ATH12K is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH5K b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH5K new file mode 100644 index 000000000000..add610882e5d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH5K @@ -0,0 +1 @@ +# CONFIG_ATH5K is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH5K_PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH5K_PCI new file mode 100644 index 000000000000..6447cbab9f1a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH5K_PCI @@ -0,0 +1 @@ +# CONFIG_ATH5K_PCI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH6KL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH6KL new file mode 100644 index 000000000000..34de675fb8e3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH6KL @@ -0,0 +1 @@ +# CONFIG_ATH6KL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K new file mode 100644 index 000000000000..3346478981ea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K @@ -0,0 +1 @@ +CONFIG_ATH9K=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_AHB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_AHB new file mode 100644 index 000000000000..b9a31aab7584 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_AHB @@ -0,0 +1 @@ +CONFIG_ATH9K_AHB=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_BTCOEX_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_BTCOEX_SUPPORT new file mode 100644 index 000000000000..64cf5324c025 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_BTCOEX_SUPPORT @@ -0,0 +1 @@ +CONFIG_ATH9K_BTCOEX_SUPPORT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_CHANNEL_CONTEXT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_CHANNEL_CONTEXT new file mode 100644 index 000000000000..e63bc2c75fdd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_CHANNEL_CONTEXT @@ -0,0 +1 @@ +# CONFIG_ATH9K_CHANNEL_CONTEXT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_COMMON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_COMMON new file mode 100644 index 000000000000..cccaa9d0c22d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_COMMON @@ -0,0 +1 @@ +CONFIG_ATH9K_COMMON=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_COMMON_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_COMMON_DEBUG new file mode 100644 index 000000000000..22e139de6c52 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_COMMON_DEBUG @@ -0,0 +1 @@ +CONFIG_ATH9K_COMMON_DEBUG=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_COMMON_SPECTRAL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_COMMON_SPECTRAL new file mode 100644 index 000000000000..48a79591472b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_COMMON_SPECTRAL @@ -0,0 +1 @@ +# CONFIG_ATH9K_COMMON_SPECTRAL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_DEBUGFS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_DEBUGFS new file mode 100644 index 000000000000..87a6218f9887 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_DEBUGFS @@ -0,0 +1 @@ +CONFIG_ATH9K_DEBUGFS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_DYNACK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_DYNACK new file mode 100644 index 000000000000..fd6896cbc6b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_DYNACK @@ -0,0 +1 @@ +# CONFIG_ATH9K_DYNACK is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_HTC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_HTC new file mode 100644 index 000000000000..604c945745e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_HTC @@ -0,0 +1 @@ +CONFIG_ATH9K_HTC=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_HTC_DEBUGFS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_HTC_DEBUGFS new file mode 100644 index 000000000000..811b2d5bd49e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_HTC_DEBUGFS @@ -0,0 +1 @@ +# CONFIG_ATH9K_HTC_DEBUGFS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_HW b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_HW new file mode 100644 index 000000000000..916a1f10e25e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_HW @@ -0,0 +1 @@ +CONFIG_ATH9K_HW=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_HWRNG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_HWRNG new file mode 100644 index 000000000000..7b35feaa0371 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_HWRNG @@ -0,0 +1 @@ +# CONFIG_ATH9K_HWRNG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_PCI new file mode 100644 index 000000000000..adbd3924862c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_PCI @@ -0,0 +1 @@ +CONFIG_ATH9K_PCI=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_PCI_NO_EEPROM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_PCI_NO_EEPROM new file mode 100644 index 000000000000..aa3549208394 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_PCI_NO_EEPROM @@ -0,0 +1 @@ +# CONFIG_ATH9K_PCI_NO_EEPROM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_PCOEM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_PCOEM new file mode 100644 index 000000000000..e72b499c8f8d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_PCOEM @@ -0,0 +1 @@ +CONFIG_ATH9K_PCOEM=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_RFKILL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_RFKILL new file mode 100644 index 000000000000..2e75824b7c89 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_RFKILL @@ -0,0 +1 @@ +CONFIG_ATH9K_RFKILL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_STATION_STATISTICS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_STATION_STATISTICS new file mode 100644 index 000000000000..a8ee2a27c623 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_STATION_STATISTICS @@ -0,0 +1 @@ +# CONFIG_ATH9K_STATION_STATISTICS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_WOW b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_WOW new file mode 100644 index 000000000000..169bfaa9d78b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH9K_WOW @@ -0,0 +1 @@ +CONFIG_ATH9K_WOW=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH_COMMON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH_COMMON new file mode 100644 index 000000000000..98ba398b96e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH_COMMON @@ -0,0 +1 @@ +CONFIG_ATH_COMMON=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH_DEBUG new file mode 100644 index 000000000000..a93cd5ce2518 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATH_DEBUG @@ -0,0 +1 @@ +# CONFIG_ATH_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATL2 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATL2 new file mode 100644 index 000000000000..789b4a0d9abe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATL2 @@ -0,0 +1 @@ +CONFIG_ATL2=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATLAS_EZO_SENSOR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATLAS_EZO_SENSOR new file mode 100644 index 000000000000..be8768690df2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATLAS_EZO_SENSOR @@ -0,0 +1 @@ +# CONFIG_ATLAS_EZO_SENSOR is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATLAS_PH_SENSOR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATLAS_PH_SENSOR new file mode 100644 index 000000000000..1a854eb57bb4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATLAS_PH_SENSOR @@ -0,0 +1 @@ +# CONFIG_ATLAS_PH_SENSOR is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATP new file mode 100644 index 000000000000..3ccfa591d429 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ATP @@ -0,0 +1 @@ +# CONFIG_ATP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AUDIT_ARCH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AUDIT_ARCH new file mode 100644 index 000000000000..df22866032ac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AUDIT_ARCH @@ -0,0 +1 @@ +CONFIG_AUDIT_ARCH=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_B43 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_B43 new file mode 100644 index 000000000000..9a5037afbefa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_B43 @@ -0,0 +1 @@ +# CONFIG_B43 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_B43LEGACY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_B43LEGACY new file mode 100644 index 000000000000..be4bdc84a585 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_B43LEGACY @@ -0,0 +1 @@ +# CONFIG_B43LEGACY is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BACKLIGHT_APPLE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BACKLIGHT_APPLE new file mode 100644 index 000000000000..5150ad41e5ea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BACKLIGHT_APPLE @@ -0,0 +1 @@ +CONFIG_BACKLIGHT_APPLE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BACKLIGHT_GPIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BACKLIGHT_GPIO new file mode 100644 index 000000000000..4ddf33e9a31d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BACKLIGHT_GPIO @@ -0,0 +1 @@ +# CONFIG_BACKLIGHT_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BACKLIGHT_PWM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BACKLIGHT_PWM new file mode 100644 index 000000000000..e867170c2999 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BACKLIGHT_PWM @@ -0,0 +1 @@ +# CONFIG_BACKLIGHT_PWM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BACKLIGHT_SAHARA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BACKLIGHT_SAHARA new file mode 100644 index 000000000000..bec58af815eb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BACKLIGHT_SAHARA @@ -0,0 +1 @@ +# CONFIG_BACKLIGHT_SAHARA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BARCO_P50_GPIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BARCO_P50_GPIO new file mode 100644 index 000000000000..c5b5c80c7dbf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BARCO_P50_GPIO @@ -0,0 +1 @@ +# CONFIG_BARCO_P50_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET new file mode 100644 index 000000000000..b5f51dbbd686 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET @@ -0,0 +1 @@ +CONFIG_BE2NET=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_BE2 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_BE2 new file mode 100644 index 000000000000..feddf969fb3a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_BE2 @@ -0,0 +1 @@ +# CONFIG_BE2NET_BE2 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_BE3 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_BE3 new file mode 100644 index 000000000000..a4dd47a94309 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_BE3 @@ -0,0 +1 @@ +# CONFIG_BE2NET_BE3 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_HWMON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_HWMON new file mode 100644 index 000000000000..7e5e7cfe30c6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_HWMON @@ -0,0 +1 @@ +CONFIG_BE2NET_HWMON=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_LANCER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_LANCER new file mode 100644 index 000000000000..d63e3ec0a598 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_LANCER @@ -0,0 +1 @@ +CONFIG_BE2NET_LANCER=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_SKYHAWK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_SKYHAWK new file mode 100644 index 000000000000..d10755283ad5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BE2NET_SKYHAWK @@ -0,0 +1 @@ +CONFIG_BE2NET_SKYHAWK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BH1750 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BH1750 new file mode 100644 index 000000000000..f69e2b6b1c8d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BH1750 @@ -0,0 +1 @@ +# CONFIG_BH1750 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BH1780 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BH1780 new file mode 100644 index 000000000000..07bbc5b68102 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BH1780 @@ -0,0 +1 @@ +# CONFIG_BH1780 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BLK_CGROUP_IOLATENCY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BLK_CGROUP_IOLATENCY new file mode 100644 index 000000000000..1ea3b679b4b2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BLK_CGROUP_IOLATENCY @@ -0,0 +1 @@ +CONFIG_BLK_CGROUP_IOLATENCY=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BLK_DEV_FD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BLK_DEV_FD new file mode 100644 index 000000000000..4f8c857f952e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BLK_DEV_FD @@ -0,0 +1 @@ +# CONFIG_BLK_DEV_FD is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BLK_DEV_PMEM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BLK_DEV_PMEM new file mode 100644 index 000000000000..ea2a00056c89 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BLK_DEV_PMEM @@ -0,0 +1 @@ +CONFIG_BLK_DEV_PMEM=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMA180 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMA180 new file mode 100644 index 000000000000..f1c41d2909f0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMA180 @@ -0,0 +1 @@ +# CONFIG_BMA180 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMA220 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMA220 new file mode 100644 index 000000000000..21f418545909 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMA220 @@ -0,0 +1 @@ +# CONFIG_BMA220 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMA400 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMA400 new file mode 100644 index 000000000000..597efc0837f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMA400 @@ -0,0 +1 @@ +# CONFIG_BMA400 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMC150_ACCEL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMC150_ACCEL new file mode 100644 index 000000000000..05b1b0025818 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMC150_ACCEL @@ -0,0 +1 @@ +# CONFIG_BMC150_ACCEL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMC150_MAGN_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMC150_MAGN_I2C new file mode 100644 index 000000000000..9ee4a72054bb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMC150_MAGN_I2C @@ -0,0 +1 @@ +# CONFIG_BMC150_MAGN_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMC150_MAGN_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMC150_MAGN_SPI new file mode 100644 index 000000000000..500adb8335e2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMC150_MAGN_SPI @@ -0,0 +1 @@ +# CONFIG_BMC150_MAGN_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BME680 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BME680 new file mode 100644 index 000000000000..b14efe57a9a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BME680 @@ -0,0 +1 @@ +# CONFIG_BME680 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMG160 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMG160 new file mode 100644 index 000000000000..6406a84d5711 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMG160 @@ -0,0 +1 @@ +# CONFIG_BMG160 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMI088_ACCEL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMI088_ACCEL new file mode 100644 index 000000000000..20952f3f5575 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMI088_ACCEL @@ -0,0 +1 @@ +# CONFIG_BMI088_ACCEL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMI160_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMI160_I2C new file mode 100644 index 000000000000..f58224424a93 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMI160_I2C @@ -0,0 +1 @@ +# CONFIG_BMI160_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMI160_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMI160_SPI new file mode 100644 index 000000000000..9ca7127aed0c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMI160_SPI @@ -0,0 +1 @@ +# CONFIG_BMI160_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMP280 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMP280 new file mode 100644 index 000000000000..e882be513619 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BMP280 @@ -0,0 +1 @@ +# CONFIG_BMP280 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BNA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BNA new file mode 100644 index 000000000000..3899b6f1bef1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BNA @@ -0,0 +1 @@ +# CONFIG_BNA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BOOT_VESA_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BOOT_VESA_SUPPORT new file mode 100644 index 000000000000..50aadaed14dc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BOOT_VESA_SUPPORT @@ -0,0 +1 @@ +CONFIG_BOOT_VESA_SUPPORT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BOSCH_BNO055_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BOSCH_BNO055_I2C new file mode 100644 index 000000000000..498977ee39cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BOSCH_BNO055_I2C @@ -0,0 +1 @@ +# CONFIG_BOSCH_BNO055_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMDBG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMDBG new file mode 100644 index 000000000000..6c88b45b4dd6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMDBG @@ -0,0 +1 @@ +# CONFIG_BRCMDBG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC new file mode 100644 index 000000000000..bd1d084d3477 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC @@ -0,0 +1 @@ +CONFIG_BRCMFMAC=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_PCIE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_PCIE new file mode 100644 index 000000000000..7f83fdfbd65b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_PCIE @@ -0,0 +1 @@ +CONFIG_BRCMFMAC_PCIE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_PROTO_BCDC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_PROTO_BCDC new file mode 100644 index 000000000000..4e5140188d05 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_PROTO_BCDC @@ -0,0 +1 @@ +CONFIG_BRCMFMAC_PROTO_BCDC=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_PROTO_MSGBUF b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_PROTO_MSGBUF new file mode 100644 index 000000000000..d66f94533045 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_PROTO_MSGBUF @@ -0,0 +1 @@ +CONFIG_BRCMFMAC_PROTO_MSGBUF=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_SDIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_SDIO new file mode 100644 index 000000000000..be5c23d2e80c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_SDIO @@ -0,0 +1 @@ +CONFIG_BRCMFMAC_SDIO=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_USB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_USB new file mode 100644 index 000000000000..0bcd32d69539 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMFMAC_USB @@ -0,0 +1 @@ +CONFIG_BRCMFMAC_USB=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMSMAC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMSMAC new file mode 100644 index 000000000000..92915921ffa1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMSMAC @@ -0,0 +1 @@ +CONFIG_BRCMSMAC=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMSMAC_LEDS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMSMAC_LEDS new file mode 100644 index 000000000000..b789e04f05ee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMSMAC_LEDS @@ -0,0 +1 @@ +CONFIG_BRCMSMAC_LEDS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMUTIL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMUTIL new file mode 100644 index 000000000000..66c5eac06358 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCMUTIL @@ -0,0 +1 @@ +CONFIG_BRCMUTIL=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCM_TRACING b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCM_TRACING new file mode 100644 index 000000000000..cda166d43237 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BRCM_TRACING @@ -0,0 +1 @@ +# CONFIG_BRCM_TRACING is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT new file mode 100644 index 000000000000..ec1159297406 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT @@ -0,0 +1 @@ +CONFIG_BT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_6LOWPAN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_6LOWPAN new file mode 100644 index 000000000000..dde565107dad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_6LOWPAN @@ -0,0 +1 @@ +# CONFIG_BT_6LOWPAN is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_AOSPEXT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_AOSPEXT new file mode 100644 index 000000000000..e7c3f2c7d5e1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_AOSPEXT @@ -0,0 +1 @@ +# CONFIG_BT_AOSPEXT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_ATH3K b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_ATH3K new file mode 100644 index 000000000000..7dc74ada07ac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_ATH3K @@ -0,0 +1 @@ +CONFIG_BT_ATH3K=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BCM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BCM new file mode 100644 index 000000000000..56bb7cf8c1f7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BCM @@ -0,0 +1 @@ +CONFIG_BT_BCM=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BNEP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BNEP new file mode 100644 index 000000000000..36b8e51be349 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BNEP @@ -0,0 +1 @@ +CONFIG_BT_BNEP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BNEP_MC_FILTER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BNEP_MC_FILTER new file mode 100644 index 000000000000..fc4bf7c14ece --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BNEP_MC_FILTER @@ -0,0 +1 @@ +CONFIG_BT_BNEP_MC_FILTER=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BNEP_PROTO_FILTER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BNEP_PROTO_FILTER new file mode 100644 index 000000000000..0cfac59b0417 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BNEP_PROTO_FILTER @@ -0,0 +1 @@ +CONFIG_BT_BNEP_PROTO_FILTER=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BREDR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BREDR new file mode 100644 index 000000000000..c7b6f3ddb64c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_BREDR @@ -0,0 +1 @@ +CONFIG_BT_BREDR=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_CMTP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_CMTP new file mode 100644 index 000000000000..6fe276815d80 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_CMTP @@ -0,0 +1 @@ +CONFIG_BT_CMTP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_DEBUGFS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_DEBUGFS new file mode 100644 index 000000000000..a13341283777 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_DEBUGFS @@ -0,0 +1 @@ +CONFIG_BT_DEBUGFS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBCM203X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBCM203X new file mode 100644 index 000000000000..08b55441f4e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBCM203X @@ -0,0 +1 @@ +CONFIG_BT_HCIBCM203X=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBCM4377 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBCM4377 new file mode 100644 index 000000000000..c1e35e11b91d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBCM4377 @@ -0,0 +1 @@ +# CONFIG_BT_HCIBCM4377 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBFUSB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBFUSB new file mode 100644 index 000000000000..14a07fa0e9f9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBFUSB @@ -0,0 +1 @@ +CONFIG_BT_HCIBFUSB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBPA10X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBPA10X new file mode 100644 index 000000000000..fec109e44069 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBPA10X @@ -0,0 +1 @@ +CONFIG_BT_HCIBPA10X=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTSDIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTSDIO new file mode 100644 index 000000000000..82ea86e4902a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTSDIO @@ -0,0 +1 @@ +CONFIG_BT_HCIBTSDIO=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB new file mode 100644 index 000000000000..87df53377e71 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB @@ -0,0 +1 @@ +CONFIG_BT_HCIBTUSB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_AUTOSUSPEND b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_AUTOSUSPEND new file mode 100644 index 000000000000..76c0b541b3fa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_AUTOSUSPEND @@ -0,0 +1 @@ +CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_BCM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_BCM new file mode 100644 index 000000000000..61e55a376fda --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_BCM @@ -0,0 +1 @@ +CONFIG_BT_HCIBTUSB_BCM=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_MTK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_MTK new file mode 100644 index 000000000000..a01298616a8c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_MTK @@ -0,0 +1 @@ +# CONFIG_BT_HCIBTUSB_MTK is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_POLL_SYNC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_POLL_SYNC new file mode 100644 index 000000000000..d4d28bc87933 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_POLL_SYNC @@ -0,0 +1 @@ +CONFIG_BT_HCIBTUSB_POLL_SYNC=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_RTL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_RTL new file mode 100644 index 000000000000..0ea9fa578df0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIBTUSB_RTL @@ -0,0 +1 @@ +CONFIG_BT_HCIBTUSB_RTL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART new file mode 100644 index 000000000000..a22748bb0f2c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART @@ -0,0 +1 @@ +CONFIG_BT_HCIUART=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_AG6XX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_AG6XX new file mode 100644 index 000000000000..c711a76c537f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_AG6XX @@ -0,0 +1 @@ +# CONFIG_BT_HCIUART_AG6XX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_ATH3K b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_ATH3K new file mode 100644 index 000000000000..818b4f4f48db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_ATH3K @@ -0,0 +1 @@ +CONFIG_BT_HCIUART_ATH3K=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_BCSP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_BCSP new file mode 100644 index 000000000000..c90c17852a16 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_BCSP @@ -0,0 +1 @@ +CONFIG_BT_HCIUART_BCSP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_H4 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_H4 new file mode 100644 index 000000000000..813998955f94 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_H4 @@ -0,0 +1 @@ +CONFIG_BT_HCIUART_H4=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_INTEL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_INTEL new file mode 100644 index 000000000000..18f6e54ada58 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIUART_INTEL @@ -0,0 +1 @@ +# CONFIG_BT_HCIUART_INTEL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIVHCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIVHCI new file mode 100644 index 000000000000..caba0cdc8c57 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HCIVHCI @@ -0,0 +1 @@ +CONFIG_BT_HCIVHCI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HIDP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HIDP new file mode 100644 index 000000000000..b555d07c30fe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_HIDP @@ -0,0 +1 @@ +CONFIG_BT_HIDP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_INTEL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_INTEL new file mode 100644 index 000000000000..876bc4f979bc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_INTEL @@ -0,0 +1 @@ +CONFIG_BT_INTEL=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_LE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_LE new file mode 100644 index 000000000000..e4bd7f9acfb7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_LE @@ -0,0 +1 @@ +CONFIG_BT_LE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_LEDS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_LEDS new file mode 100644 index 000000000000..dabfa7971f3f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_LEDS @@ -0,0 +1 @@ +# CONFIG_BT_LEDS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_LE_L2CAP_ECRED b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_LE_L2CAP_ECRED new file mode 100644 index 000000000000..290fa0aa335e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_LE_L2CAP_ECRED @@ -0,0 +1 @@ +CONFIG_BT_LE_L2CAP_ECRED=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_MRVL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_MRVL new file mode 100644 index 000000000000..a22a81e3ca32 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_MRVL @@ -0,0 +1 @@ +CONFIG_BT_MRVL=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_MRVL_SDIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_MRVL_SDIO new file mode 100644 index 000000000000..9b96ff020db2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_MRVL_SDIO @@ -0,0 +1 @@ +CONFIG_BT_MRVL_SDIO=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_MSFTEXT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_MSFTEXT new file mode 100644 index 000000000000..7fae871b2426 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_MSFTEXT @@ -0,0 +1 @@ +# CONFIG_BT_MSFTEXT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_MTKSDIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_MTKSDIO new file mode 100644 index 000000000000..528c870c2a94 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_MTKSDIO @@ -0,0 +1 @@ +# CONFIG_BT_MTKSDIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_RFCOMM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_RFCOMM new file mode 100644 index 000000000000..a203466fc7cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_RFCOMM @@ -0,0 +1 @@ +CONFIG_BT_RFCOMM=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_RFCOMM_TTY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_RFCOMM_TTY new file mode 100644 index 000000000000..b3bbd06342ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_RFCOMM_TTY @@ -0,0 +1 @@ +CONFIG_BT_RFCOMM_TTY=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_RTL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_RTL new file mode 100644 index 000000000000..7ff5bef0400b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_RTL @@ -0,0 +1 @@ +CONFIG_BT_RTL=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_SELFTEST b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_SELFTEST new file mode 100644 index 000000000000..e7b42ec08708 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_SELFTEST @@ -0,0 +1 @@ +# CONFIG_BT_SELFTEST is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_VIRTIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_VIRTIO new file mode 100644 index 000000000000..a31bdc88d5da --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BT_VIRTIO @@ -0,0 +1 @@ +# CONFIG_BT_VIRTIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_BUILDTIME_MCOUNT_SORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BUILDTIME_MCOUNT_SORT new file mode 100644 index 000000000000..2990b20d629f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BUILDTIME_MCOUNT_SORT @@ -0,0 +1 @@ +CONFIG_BUILDTIME_MCOUNT_SORT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CALL_PADDING b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CALL_PADDING new file mode 100644 index 000000000000..bc0c12b17fe2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CALL_PADDING @@ -0,0 +1 @@ +CONFIG_CALL_PADDING=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CALL_THUNKS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CALL_THUNKS new file mode 100644 index 000000000000..48aa2967be00 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CALL_THUNKS @@ -0,0 +1 @@ +CONFIG_CALL_THUNKS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CALL_THUNKS_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CALL_THUNKS_DEBUG new file mode 100644 index 000000000000..d0cb45d63457 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CALL_THUNKS_DEBUG @@ -0,0 +1 @@ +# CONFIG_CALL_THUNKS_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CAPI_TRACE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CAPI_TRACE new file mode 100644 index 000000000000..a2de337f37e0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CAPI_TRACE @@ -0,0 +1 @@ +CONFIG_CAPI_TRACE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CARL9170 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CARL9170 new file mode 100644 index 000000000000..cff12d58adb8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CARL9170 @@ -0,0 +1 @@ +# CONFIG_CARL9170 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CCS811 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CCS811 new file mode 100644 index 000000000000..931f14e2ac6f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CCS811 @@ -0,0 +1 @@ +# CONFIG_CCS811 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_ENTRY_PADDING b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_ENTRY_PADDING new file mode 100644 index 000000000000..e86c4e63aadf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_ENTRY_PADDING @@ -0,0 +1 @@ +CONFIG_CC_HAS_ENTRY_PADDING=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_IBT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_IBT new file mode 100644 index 000000000000..eeba04d6d758 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_IBT @@ -0,0 +1 @@ +CONFIG_CC_HAS_IBT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_RETURN_THUNK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_RETURN_THUNK new file mode 100644 index 000000000000..26513cd1e173 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_RETURN_THUNK @@ -0,0 +1 @@ +CONFIG_CC_HAS_RETURN_THUNK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_SANE_STACKPROTECTOR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_SANE_STACKPROTECTOR new file mode 100644 index 000000000000..1fa5f625dec7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_SANE_STACKPROTECTOR @@ -0,0 +1 @@ +CONFIG_CC_HAS_SANE_STACKPROTECTOR=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_SLS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_SLS new file mode 100644 index 000000000000..e5ea0a517696 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CC_HAS_SLS @@ -0,0 +1 @@ +CONFIG_CC_HAS_SLS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CEC_CH7322 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CEC_CH7322 new file mode 100644 index 000000000000..7433b698fa24 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CEC_CH7322 @@ -0,0 +1 @@ +# CONFIG_CEC_CH7322 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CEC_GPIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CEC_GPIO new file mode 100644 index 000000000000..96304804583c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CEC_GPIO @@ -0,0 +1 @@ +# CONFIG_CEC_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CEC_SECO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CEC_SECO new file mode 100644 index 000000000000..6b713c371e94 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CEC_SECO @@ -0,0 +1 @@ +# CONFIG_CEC_SECO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CHROME_PLATFORMS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CHROME_PLATFORMS new file mode 100644 index 000000000000..e7b940cfc3b3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CHROME_PLATFORMS @@ -0,0 +1 @@ +# CONFIG_CHROME_PLATFORMS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CLKBLD_I8253 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CLKBLD_I8253 new file mode 100644 index 000000000000..7c405b82f660 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CLKBLD_I8253 @@ -0,0 +1 @@ +CONFIG_CLKBLD_I8253=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CLKEVT_I8253 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CLKEVT_I8253 new file mode 100644 index 000000000000..3ed355dfeb40 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CLKEVT_I8253 @@ -0,0 +1 @@ +CONFIG_CLKEVT_I8253=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE new file mode 100644 index 000000000000..9cefd31ed4f6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE @@ -0,0 +1 @@ +CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CLOCKSOURCE_WATCHDOG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CLOCKSOURCE_WATCHDOG new file mode 100644 index 000000000000..7ae919a190fe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CLOCKSOURCE_WATCHDOG @@ -0,0 +1 @@ +CONFIG_CLOCKSOURCE_WATCHDOG=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US new file mode 100644 index 000000000000..22d8ef24cbd7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US @@ -0,0 +1 @@ +CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US=125 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CM32181 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CM32181 new file mode 100644 index 000000000000..706f9fc70667 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CM32181 @@ -0,0 +1 @@ +# CONFIG_CM32181 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CM3232 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CM3232 new file mode 100644 index 000000000000..12580fd2fa60 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CM3232 @@ -0,0 +1 @@ +# CONFIG_CM3232 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CM3323 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CM3323 new file mode 100644 index 000000000000..1337712578a2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CM3323 @@ -0,0 +1 @@ +# CONFIG_CM3323 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CM3605 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CM3605 new file mode 100644 index 000000000000..be5dca2eab02 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CM3605 @@ -0,0 +1 @@ +# CONFIG_CM3605 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CM36651 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CM36651 new file mode 100644 index 000000000000..5ad3dd89d41e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CM36651 @@ -0,0 +1 @@ +# CONFIG_CM36651 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CMDLINE_BOOL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CMDLINE_BOOL new file mode 100644 index 000000000000..ca23a06ea98d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CMDLINE_BOOL @@ -0,0 +1 @@ +# CONFIG_CMDLINE_BOOL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_COMPAL_LAPTOP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_COMPAL_LAPTOP new file mode 100644 index 000000000000..84994e1774e1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_COMPAL_LAPTOP @@ -0,0 +1 @@ +CONFIG_COMPAL_LAPTOP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_COMPAT_32 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_COMPAT_32 new file mode 100644 index 000000000000..15ce6df157fa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_COMPAT_32 @@ -0,0 +1 @@ +CONFIG_COMPAT_32=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_COMPAT_FOR_U64_ALIGNMENT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_COMPAT_FOR_U64_ALIGNMENT new file mode 100644 index 000000000000..9af0f279bc4b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_COMPAT_FOR_U64_ALIGNMENT @@ -0,0 +1 @@ +CONFIG_COMPAT_FOR_U64_ALIGNMENT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CPA_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CPA_DEBUG new file mode 100644 index 000000000000..d87a4d23583f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CPA_DEBUG @@ -0,0 +1 @@ +# CONFIG_CPA_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CPU5_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CPU5_WDT new file mode 100644 index 000000000000..146167517774 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CPU5_WDT @@ -0,0 +1 @@ +# CONFIG_CPU5_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CPU_SUP_CENTAUR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CPU_SUP_CENTAUR new file mode 100644 index 000000000000..d320ee4cd6ee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CPU_SUP_CENTAUR @@ -0,0 +1 @@ +CONFIG_CPU_SUP_CENTAUR=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRASH_MAX_MEMORY_RANGES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRASH_MAX_MEMORY_RANGES new file mode 100644 index 000000000000..2c57448707cd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRASH_MAX_MEMORY_RANGES @@ -0,0 +1 @@ +CONFIG_CRASH_MAX_MEMORY_RANGES=8192 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRC64 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRC64 new file mode 100644 index 000000000000..0da0f2707223 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRC64 @@ -0,0 +1 @@ +CONFIG_CRC64=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRC64_ROCKSOFT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRC64_ROCKSOFT new file mode 100644 index 000000000000..4b85a46ebe6f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRC64_ROCKSOFT @@ -0,0 +1 @@ +CONFIG_CRC64_ROCKSOFT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S new file mode 100644 index 000000000000..dcfccc3ddc0a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S @@ -0,0 +1 @@ +CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519 new file mode 100644 index 000000000000..b8d4160933c6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519 @@ -0,0 +1 @@ +CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARIA_AESNI_AVX2_X86_64 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARIA_AESNI_AVX2_X86_64 new file mode 100644 index 000000000000..1185030737e7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARIA_AESNI_AVX2_X86_64 @@ -0,0 +1 @@ +# CONFIG_CRYPTO_ARIA_AESNI_AVX2_X86_64 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64 new file mode 100644 index 000000000000..5c153ec358f2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64 @@ -0,0 +1 @@ +# CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARIA_GFNI_AVX512_X86_64 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARIA_GFNI_AVX512_X86_64 new file mode 100644 index 000000000000..a39408b1cd41 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_ARIA_GFNI_AVX512_X86_64 @@ -0,0 +1 @@ +# CONFIG_CRYPTO_ARIA_GFNI_AVX512_X86_64 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_CRC64_ROCKSOFT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_CRC64_ROCKSOFT new file mode 100644 index 000000000000..ee01701c7776 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_CRC64_ROCKSOFT @@ -0,0 +1 @@ +CONFIG_CRYPTO_CRC64_ROCKSOFT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_PADLOCK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_PADLOCK new file mode 100644 index 000000000000..8a6e8e6cb79e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_PADLOCK @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_PADLOCK=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_PADLOCK_AES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_PADLOCK_AES new file mode 100644 index 000000000000..fed2a47ff27a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_PADLOCK_AES @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_PADLOCK_AES=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_PADLOCK_SHA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_PADLOCK_SHA new file mode 100644 index 000000000000..7f043e865eba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_PADLOCK_SHA @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_PADLOCK_SHA=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_QAT_4XXX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_QAT_4XXX new file mode 100644 index 000000000000..64a85cdcb962 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_QAT_4XXX @@ -0,0 +1 @@ +CONFIG_CRYPTO_DEV_QAT_4XXX=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION new file mode 100644 index 000000000000..eb5cbd1e34b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION @@ -0,0 +1 @@ +# CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_LIB_POLY1305_RSIZE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_LIB_POLY1305_RSIZE new file mode 100644 index 000000000000..ca84f9442991 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_LIB_POLY1305_RSIZE @@ -0,0 +1 @@ +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=11 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_POLYVAL_CLMUL_NI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_POLYVAL_CLMUL_NI new file mode 100644 index 000000000000..cb55de791152 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CRYPTO_POLYVAL_CLMUL_NI @@ -0,0 +1 @@ +# CONFIG_CRYPTO_POLYVAL_CLMUL_NI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_CX_ECAT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CX_ECAT new file mode 100644 index 000000000000..f1dda12b0127 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_CX_ECAT @@ -0,0 +1 @@ +# CONFIG_CX_ECAT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DA280 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DA280 new file mode 100644 index 000000000000..deab20f930f6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DA280 @@ -0,0 +1 @@ +# CONFIG_DA280 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DA311 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DA311 new file mode 100644 index 000000000000..f83efb28804e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DA311 @@ -0,0 +1 @@ +# CONFIG_DA311 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DCA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DCA new file mode 100644 index 000000000000..6ebff19925dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DCA @@ -0,0 +1 @@ +CONFIG_DCA=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEBUG_ENTRY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEBUG_ENTRY new file mode 100644 index 000000000000..88cb6da0863d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEBUG_ENTRY @@ -0,0 +1 @@ +# CONFIG_DEBUG_ENTRY is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP new file mode 100644 index 000000000000..3bb67a6665b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP @@ -0,0 +1 @@ +# CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEBUG_NMI_SELFTEST b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEBUG_NMI_SELFTEST new file mode 100644 index 000000000000..4ea92fec1e8b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEBUG_NMI_SELFTEST @@ -0,0 +1 @@ +# CONFIG_DEBUG_NMI_SELFTEST is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEBUG_TLBFLUSH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEBUG_TLBFLUSH new file mode 100644 index 000000000000..6544b79a85a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEBUG_TLBFLUSH @@ -0,0 +1 @@ +# CONFIG_DEBUG_TLBFLUSH is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEV_COREDUMP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEV_COREDUMP new file mode 100644 index 000000000000..eb4e4395cb18 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEV_COREDUMP @@ -0,0 +1 @@ +CONFIG_DEV_COREDUMP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DHT11 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DHT11 new file mode 100644 index 000000000000..877b95de0ab5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DHT11 @@ -0,0 +1 @@ +# CONFIG_DHT11 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DLHL60D b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DLHL60D new file mode 100644 index 000000000000..031f3d2e3a49 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DLHL60D @@ -0,0 +1 @@ +# CONFIG_DLHL60D is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMARD06 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMARD06 new file mode 100644 index 000000000000..0d22f625b6fa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMARD06 @@ -0,0 +1 @@ +# CONFIG_DMARD06 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMARD09 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMARD09 new file mode 100644 index 000000000000..cdf8d6bad89a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMARD09 @@ -0,0 +1 @@ +# CONFIG_DMARD09 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMARD10 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMARD10 new file mode 100644 index 000000000000..fdffba901ab8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMARD10 @@ -0,0 +1 @@ +# CONFIG_DMARD10 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMAR_TABLE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMAR_TABLE new file mode 100644 index 000000000000..97672685d864 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMAR_TABLE @@ -0,0 +1 @@ +CONFIG_DMAR_TABLE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMA_VIRTUAL_CHANNELS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMA_VIRTUAL_CHANNELS new file mode 100644 index 000000000000..2471fccb8c68 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMA_VIRTUAL_CHANNELS @@ -0,0 +1 @@ +CONFIG_DMA_VIRTUAL_CHANNELS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK new file mode 100644 index 000000000000..f72f8c5e2567 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK @@ -0,0 +1 @@ +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DPOT_DAC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DPOT_DAC new file mode 100644 index 000000000000..78bc08ab5160 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DPOT_DAC @@ -0,0 +1 @@ +# CONFIG_DPOT_DAC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DPS310 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DPS310 new file mode 100644 index 000000000000..1a46ce6c0415 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DPS310 @@ -0,0 +1 @@ +# CONFIG_DPS310 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_DP_CEC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_DP_CEC new file mode 100644 index 000000000000..f0dbdcca2ed4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_DP_CEC @@ -0,0 +1 @@ +# CONFIG_DRM_DP_CEC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_HYPERV b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_HYPERV new file mode 100644 index 000000000000..b263ea50239c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_HYPERV @@ -0,0 +1 @@ +# CONFIG_DRM_HYPERV is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_I2C_NXP_TDA998X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_I2C_NXP_TDA998X new file mode 100644 index 000000000000..b04038c91499 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_I2C_NXP_TDA998X @@ -0,0 +1 @@ +# CONFIG_DRM_I2C_NXP_TDA998X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_I2C_SIL164 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_I2C_SIL164 new file mode 100644 index 000000000000..fb5fc4d67153 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_I2C_SIL164 @@ -0,0 +1 @@ +CONFIG_DRM_I2C_SIL164=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_I915_PREEMPT_TIMEOUT_COMPUTE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_I915_PREEMPT_TIMEOUT_COMPUTE new file mode 100644 index 000000000000..d545eec6d5d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_I915_PREEMPT_TIMEOUT_COMPUTE @@ -0,0 +1 @@ +CONFIG_DRM_I915_PREEMPT_TIMEOUT_COMPUTE=7500 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_I915_REQUEST_TIMEOUT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_I915_REQUEST_TIMEOUT new file mode 100644 index 000000000000..f67cc1be19b8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_I915_REQUEST_TIMEOUT @@ -0,0 +1 @@ +CONFIG_DRM_I915_REQUEST_TIMEOUT=20000 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_MIPI_DSI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_MIPI_DSI new file mode 100644 index 000000000000..3b3739e51aee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_MIPI_DSI @@ -0,0 +1 @@ +CONFIG_DRM_MIPI_DSI=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN new file mode 100644 index 000000000000..48ef8d46b856 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN @@ -0,0 +1 @@ +# CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_PRIVACY_SCREEN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_PRIVACY_SCREEN new file mode 100644 index 000000000000..7823f3418575 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_PRIVACY_SCREEN @@ -0,0 +1 @@ +CONFIG_DRM_PRIVACY_SCREEN=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_VBOXVIDEO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_VBOXVIDEO new file mode 100644 index 000000000000..8cd637a0c076 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_VBOXVIDEO @@ -0,0 +1 @@ +# CONFIG_DRM_VBOXVIDEO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_VMWGFX_MKSSTATS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_VMWGFX_MKSSTATS new file mode 100644 index 000000000000..429edecedefa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_VMWGFX_MKSSTATS @@ -0,0 +1 @@ +# CONFIG_DRM_VMWGFX_MKSSTATS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_XEN_FRONTEND b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_XEN_FRONTEND new file mode 100644 index 000000000000..47be407b1134 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_XEN_FRONTEND @@ -0,0 +1 @@ +# CONFIG_DRM_XEN_FRONTEND is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DS1803 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DS1803 new file mode 100644 index 000000000000..04e81cfe1e82 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DS1803 @@ -0,0 +1 @@ +# CONFIG_DS1803 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DS4424 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DS4424 new file mode 100644 index 000000000000..199b37762c73 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DS4424 @@ -0,0 +1 @@ +# CONFIG_DS4424 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DW_DMAC_CORE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DW_DMAC_CORE new file mode 100644 index 000000000000..110558a22025 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DW_DMAC_CORE @@ -0,0 +1 @@ +CONFIG_DW_DMAC_CORE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DYNAMIC_FTRACE_WITH_REGS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DYNAMIC_FTRACE_WITH_REGS new file mode 100644 index 000000000000..49bcef1aca7f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DYNAMIC_FTRACE_WITH_REGS @@ -0,0 +1 @@ +CONFIG_DYNAMIC_FTRACE_WITH_REGS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DYNAMIC_MEMORY_LAYOUT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DYNAMIC_MEMORY_LAYOUT new file mode 100644 index 000000000000..d05a49d8b297 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DYNAMIC_MEMORY_LAYOUT @@ -0,0 +1 @@ +CONFIG_DYNAMIC_MEMORY_LAYOUT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EARLY_PRINTK_USB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EARLY_PRINTK_USB new file mode 100644 index 000000000000..47eaad0217b1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EARLY_PRINTK_USB @@ -0,0 +1 @@ +CONFIG_EARLY_PRINTK_USB=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EBC_C384_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EBC_C384_WDT new file mode 100644 index 000000000000..262928b23a0d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EBC_C384_WDT @@ -0,0 +1 @@ +# CONFIG_EBC_C384_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_ATOMIC_SCRUB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_ATOMIC_SCRUB new file mode 100644 index 000000000000..0ca60f8d6371 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_ATOMIC_SCRUB @@ -0,0 +1 @@ +CONFIG_EDAC_ATOMIC_SCRUB=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_IE31200 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_IE31200 new file mode 100644 index 000000000000..1d4b071dea2b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_IE31200 @@ -0,0 +1 @@ +CONFIG_EDAC_IE31200=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_IGEN6 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_IGEN6 new file mode 100644 index 000000000000..88b22f502c82 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_IGEN6 @@ -0,0 +1 @@ +# CONFIG_EDAC_IGEN6 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_PND2 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_PND2 new file mode 100644 index 000000000000..d0b59df60e9e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_PND2 @@ -0,0 +1 @@ +CONFIG_EDAC_PND2=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_X38 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_X38 new file mode 100644 index 000000000000..710ecf317b57 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDAC_X38 @@ -0,0 +1 @@ +CONFIG_EDAC_X38=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDD_OFF b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDD_OFF new file mode 100644 index 000000000000..8e6d55871362 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EDD_OFF @@ -0,0 +1 @@ +# CONFIG_EDD_OFF is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EEEPC_LAPTOP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EEEPC_LAPTOP new file mode 100644 index 000000000000..b0fe48b006fd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EEEPC_LAPTOP @@ -0,0 +1 @@ +CONFIG_EEEPC_LAPTOP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EEEPC_WMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EEEPC_WMI new file mode 100644 index 000000000000..bd7f00ece0d6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EEEPC_WMI @@ -0,0 +1 @@ +CONFIG_EEEPC_WMI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EFI_DXE_MEM_ATTRIBUTES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EFI_DXE_MEM_ATTRIBUTES new file mode 100644 index 000000000000..17ce2878b0be --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EFI_DXE_MEM_ATTRIBUTES @@ -0,0 +1 @@ +CONFIG_EFI_DXE_MEM_ATTRIBUTES=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EFI_FAKE_MEMMAP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EFI_FAKE_MEMMAP new file mode 100644 index 000000000000..316a93b97aee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EFI_FAKE_MEMMAP @@ -0,0 +1 @@ +# CONFIG_EFI_FAKE_MEMMAP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EFI_HANDOVER_PROTOCOL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EFI_HANDOVER_PROTOCOL new file mode 100644 index 000000000000..1beee118e043 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EFI_HANDOVER_PROTOCOL @@ -0,0 +1 @@ +CONFIG_EFI_HANDOVER_PROTOCOL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EFI_PGT_DUMP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EFI_PGT_DUMP new file mode 100644 index 000000000000..c7e9f9f088b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EFI_PGT_DUMP @@ -0,0 +1 @@ +# CONFIG_EFI_PGT_DUMP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EISA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EISA new file mode 100644 index 000000000000..01c95e39f470 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EISA @@ -0,0 +1 @@ +# CONFIG_EISA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ENIC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ENIC new file mode 100644 index 000000000000..5c206ee50139 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ENIC @@ -0,0 +1 @@ +CONFIG_ENIC=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ENVELOPE_DETECTOR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ENVELOPE_DETECTOR new file mode 100644 index 000000000000..ac79f531ac6a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ENVELOPE_DETECTOR @@ -0,0 +1 @@ +# CONFIG_ENVELOPE_DETECTOR is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EUROTECH_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EUROTECH_WDT new file mode 100644 index 000000000000..08f35862f63f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EUROTECH_WDT @@ -0,0 +1 @@ +# CONFIG_EUROTECH_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_EXAR_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EXAR_WDT new file mode 100644 index 000000000000..3b782c1737cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_EXAR_WDT @@ -0,0 +1 @@ +# CONFIG_EXAR_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_F71808E_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_F71808E_WDT new file mode 100644 index 000000000000..9fc51eb65135 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_F71808E_WDT @@ -0,0 +1 @@ +CONFIG_F71808E_WDT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_ARC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_ARC new file mode 100644 index 000000000000..1d66b92f45c1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_ARC @@ -0,0 +1 @@ +# CONFIG_FB_ARC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_HGA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_HGA new file mode 100644 index 000000000000..3c3142a1e7e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_HGA @@ -0,0 +1 @@ +# CONFIG_FB_HGA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_LE80578 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_LE80578 new file mode 100644 index 000000000000..327044a84a90 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_LE80578 @@ -0,0 +1 @@ +# CONFIG_FB_LE80578 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_N411 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_N411 new file mode 100644 index 000000000000..50f6f3584e84 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_N411 @@ -0,0 +1 @@ +# CONFIG_FB_N411 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_SM501 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_SM501 new file mode 100644 index 000000000000..8ee07d09d2a1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_SM501 @@ -0,0 +1 @@ +# CONFIG_FB_SM501 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_VGA16 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_VGA16 new file mode 100644 index 000000000000..d933f13d7a84 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_VGA16 @@ -0,0 +1 @@ +# CONFIG_FB_VGA16 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_VIA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_VIA new file mode 100644 index 000000000000..d34242c33dd9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FB_VIA @@ -0,0 +1 @@ +# CONFIG_FB_VIA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FIREWIRE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FIREWIRE new file mode 100644 index 000000000000..7e1163b282bb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FIREWIRE @@ -0,0 +1 @@ +CONFIG_FIREWIRE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FIREWIRE_NET b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FIREWIRE_NET new file mode 100644 index 000000000000..5a9e50eb3b69 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FIREWIRE_NET @@ -0,0 +1 @@ +CONFIG_FIREWIRE_NET=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FIREWIRE_OHCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FIREWIRE_OHCI new file mode 100644 index 000000000000..de045fe5aa6e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FIREWIRE_OHCI @@ -0,0 +1 @@ +CONFIG_FIREWIRE_OHCI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FIREWIRE_SBP2 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FIREWIRE_SBP2 new file mode 100644 index 000000000000..1255474ee936 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FIREWIRE_SBP2 @@ -0,0 +1 @@ +CONFIG_FIREWIRE_SBP2=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FPROBE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FPROBE new file mode 100644 index 000000000000..56010245c46f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FPROBE @@ -0,0 +1 @@ +# CONFIG_FPROBE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FS_MBCACHE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FS_MBCACHE new file mode 100644 index 000000000000..6bfe922de885 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FS_MBCACHE @@ -0,0 +1 @@ +CONFIG_FS_MBCACHE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FTRACE_MCOUNT_USE_CC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FTRACE_MCOUNT_USE_CC new file mode 100644 index 000000000000..f4727a5a4438 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FTRACE_MCOUNT_USE_CC @@ -0,0 +1 @@ +CONFIG_FTRACE_MCOUNT_USE_CC=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FTRACE_SORT_STARTUP_TEST b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FTRACE_SORT_STARTUP_TEST new file mode 100644 index 000000000000..aea4adb9eb14 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FTRACE_SORT_STARTUP_TEST @@ -0,0 +1 @@ +# CONFIG_FTRACE_SORT_STARTUP_TEST is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUJITSU_ES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUJITSU_ES new file mode 100644 index 000000000000..2c1643a52b0b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUJITSU_ES @@ -0,0 +1 @@ +CONFIG_FUJITSU_ES=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUJITSU_LAPTOP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUJITSU_LAPTOP new file mode 100644 index 000000000000..e47131e837c0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUJITSU_LAPTOP @@ -0,0 +1 @@ +CONFIG_FUJITSU_LAPTOP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUJITSU_TABLET b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUJITSU_TABLET new file mode 100644 index 000000000000..4e9d1a0a1ea8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUJITSU_TABLET @@ -0,0 +1 @@ +CONFIG_FUJITSU_TABLET=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUNCTION_ALIGNMENT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUNCTION_ALIGNMENT new file mode 100644 index 000000000000..54b8841aa24c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUNCTION_ALIGNMENT @@ -0,0 +1 @@ +CONFIG_FUNCTION_ALIGNMENT=16 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUNCTION_ALIGNMENT_16B b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUNCTION_ALIGNMENT_16B new file mode 100644 index 000000000000..bfa63ab172be --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUNCTION_ALIGNMENT_16B @@ -0,0 +1 @@ +CONFIG_FUNCTION_ALIGNMENT_16B=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUNCTION_PADDING_BYTES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUNCTION_PADDING_BYTES new file mode 100644 index 000000000000..2e4315d4111e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUNCTION_PADDING_BYTES @@ -0,0 +1 @@ +CONFIG_FUNCTION_PADDING_BYTES=16 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUNCTION_PADDING_CFI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUNCTION_PADDING_CFI new file mode 100644 index 000000000000..28a5f3daec77 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FUNCTION_PADDING_CFI @@ -0,0 +1 @@ +CONFIG_FUNCTION_PADDING_CFI=11 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FW_LOADER_USER_HELPER_FALLBACK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FW_LOADER_USER_HELPER_FALLBACK new file mode 100644 index 000000000000..29daf8fb65c3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FW_LOADER_USER_HELPER_FALLBACK @@ -0,0 +1 @@ +# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FXAS21002C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FXAS21002C new file mode 100644 index 000000000000..80e57d9425db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FXAS21002C @@ -0,0 +1 @@ +# CONFIG_FXAS21002C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FXLS8962AF_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FXLS8962AF_I2C new file mode 100644 index 000000000000..312ae1763d74 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FXLS8962AF_I2C @@ -0,0 +1 @@ +# CONFIG_FXLS8962AF_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FXLS8962AF_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FXLS8962AF_SPI new file mode 100644 index 000000000000..5721a78eb210 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FXLS8962AF_SPI @@ -0,0 +1 @@ +# CONFIG_FXLS8962AF_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FXOS8700_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FXOS8700_I2C new file mode 100644 index 000000000000..8ad59615f73d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FXOS8700_I2C @@ -0,0 +1 @@ +# CONFIG_FXOS8700_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FXOS8700_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FXOS8700_SPI new file mode 100644 index 000000000000..b291b78e2c88 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_FXOS8700_SPI @@ -0,0 +1 @@ +# CONFIG_FXOS8700_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GDS_FORCE_MITIGATION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GDS_FORCE_MITIGATION new file mode 100644 index 000000000000..89396f95d3e2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GDS_FORCE_MITIGATION @@ -0,0 +1 @@ +# CONFIG_GDS_FORCE_MITIGATION is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_ADC_BATTERY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_ADC_BATTERY new file mode 100644 index 000000000000..440938a36dba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_ADC_BATTERY @@ -0,0 +1 @@ +# CONFIG_GENERIC_ADC_BATTERY is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST new file mode 100644 index 000000000000..cbe34665557e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST @@ -0,0 +1 @@ +CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_CMOS_UPDATE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_CMOS_UPDATE new file mode 100644 index 000000000000..ba31fc229ab5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_CMOS_UPDATE @@ -0,0 +1 @@ +CONFIG_GENERIC_CMOS_UPDATE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_ENTRY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_ENTRY new file mode 100644 index 000000000000..039e303e59d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_ENTRY @@ -0,0 +1 @@ +CONFIG_GENERIC_ENTRY=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_IOMAP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_IOMAP new file mode 100644 index 000000000000..3332e15f4e30 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_IOMAP @@ -0,0 +1 @@ +CONFIG_GENERIC_IOMAP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR new file mode 100644 index 000000000000..9fbd28afe9b2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR @@ -0,0 +1 @@ +CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_IRQ_RESERVATION_MODE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_IRQ_RESERVATION_MODE new file mode 100644 index 000000000000..c0bbab3e55b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GENERIC_IRQ_RESERVATION_MODE @@ -0,0 +1 @@ +CONFIG_GENERIC_IRQ_RESERVATION_MODE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GIGABYTE_WMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GIGABYTE_WMI new file mode 100644 index 000000000000..3cd2ec4144e3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GIGABYTE_WMI @@ -0,0 +1 @@ +# CONFIG_GIGABYTE_WMI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GP2AP002 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GP2AP002 new file mode 100644 index 000000000000..4be624a417b4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GP2AP002 @@ -0,0 +1 @@ +# CONFIG_GP2AP002 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GP2AP020A00F b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GP2AP020A00F new file mode 100644 index 000000000000..15d94281d250 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GP2AP020A00F @@ -0,0 +1 @@ +# CONFIG_GP2AP020A00F is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPD_POCKET_FAN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPD_POCKET_FAN new file mode 100644 index 000000000000..a4541a51d7b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPD_POCKET_FAN @@ -0,0 +1 @@ +# CONFIG_GPD_POCKET_FAN is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_AMD8111 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_AMD8111 new file mode 100644 index 000000000000..097d52ce198e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_AMD8111 @@ -0,0 +1 @@ +# CONFIG_GPIO_AMD8111 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_DWAPB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_DWAPB new file mode 100644 index 000000000000..364383328711 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_DWAPB @@ -0,0 +1 @@ +# CONFIG_GPIO_DWAPB is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_ELKHARTLAKE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_ELKHARTLAKE new file mode 100644 index 000000000000..d39ce6563347 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_ELKHARTLAKE @@ -0,0 +1 @@ +# CONFIG_GPIO_ELKHARTLAKE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_F7188X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_F7188X new file mode 100644 index 000000000000..4ab26719d33b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_F7188X @@ -0,0 +1 @@ +# CONFIG_GPIO_F7188X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_ICH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_ICH new file mode 100644 index 000000000000..7712d05165d6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_ICH @@ -0,0 +1 @@ +CONFIG_GPIO_ICH=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_IT87 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_IT87 new file mode 100644 index 000000000000..aaeb504acabf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_IT87 @@ -0,0 +1 @@ +# CONFIG_GPIO_IT87 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_ML_IOH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_ML_IOH new file mode 100644 index 000000000000..9155cc76a8de --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_ML_IOH @@ -0,0 +1 @@ +# CONFIG_GPIO_ML_IOH is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_SCH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_SCH new file mode 100644 index 000000000000..91a3ab850cba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_SCH @@ -0,0 +1 @@ +# CONFIG_GPIO_SCH is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_SCH311X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_SCH311X new file mode 100644 index 000000000000..62c932c31b6d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_SCH311X @@ -0,0 +1 @@ +# CONFIG_GPIO_SCH311X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_VIPERBOARD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_VIPERBOARD new file mode 100644 index 000000000000..e92c6f0d3740 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_VIPERBOARD @@ -0,0 +1 @@ +CONFIG_GPIO_VIPERBOARD=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_VX855 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_VX855 new file mode 100644 index 000000000000..07b0ce3abc05 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_VX855 @@ -0,0 +1 @@ +# CONFIG_GPIO_VX855 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_WINBOND b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_WINBOND new file mode 100644 index 000000000000..0e6058047c89 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_WINBOND @@ -0,0 +1 @@ +# CONFIG_GPIO_WINBOND is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_WS16C48 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_WS16C48 new file mode 100644 index 000000000000..75427dc0f517 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_GPIO_WS16C48 @@ -0,0 +1 @@ +# CONFIG_GPIO_WS16C48 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HARDLOCKUP_CHECK_TIMESTAMP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HARDLOCKUP_CHECK_TIMESTAMP new file mode 100644 index 000000000000..bf5847da79f8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HARDLOCKUP_CHECK_TIMESTAMP @@ -0,0 +1 @@ +CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ACPI_APEI_NMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ACPI_APEI_NMI new file mode 100644 index 000000000000..bda00028a2bf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ACPI_APEI_NMI @@ -0,0 +1 @@ +CONFIG_HAVE_ACPI_APEI_NMI=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES new file mode 100644 index 000000000000..f6490ad53e3c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_KCSAN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_KCSAN new file mode 100644 index 000000000000..72fd84bfab21 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_KCSAN @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_KCSAN=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_KMSAN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_KMSAN new file mode 100644 index 000000000000..c4b12d1aaf44 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_KMSAN @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_KMSAN=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_NODE_DEV_GROUP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_NODE_DEV_GROUP new file mode 100644 index 000000000000..a39a0b90ae2e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_NODE_DEV_GROUP @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_NODE_DEV_GROUP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_SOFT_DIRTY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_SOFT_DIRTY new file mode 100644 index 000000000000..77f3b4e3b64a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_SOFT_DIRTY @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_SOFT_DIRTY=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD new file mode 100644 index 000000000000..e97355f3bf36 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_USERFAULTFD_WP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_USERFAULTFD_WP new file mode 100644 index 000000000000..4b72ebe2ba58 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_USERFAULTFD_WP @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_USERFAULTFD_WP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES new file mode 100644 index 000000000000..83d9554e9be2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES @@ -0,0 +1 @@ +CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_BOOTMEM_INFO_NODE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_BOOTMEM_INFO_NODE new file mode 100644 index 000000000000..ee51a6582c46 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_BOOTMEM_INFO_NODE @@ -0,0 +1 @@ +CONFIG_HAVE_BOOTMEM_INFO_NODE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_BUILDTIME_MCOUNT_SORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_BUILDTIME_MCOUNT_SORT new file mode 100644 index 000000000000..657e0932379b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_BUILDTIME_MCOUNT_SORT @@ -0,0 +1 @@ +CONFIG_HAVE_BUILDTIME_MCOUNT_SORT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_CALL_THUNKS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_CALL_THUNKS new file mode 100644 index 000000000000..66d15550a4e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_CALL_THUNKS @@ -0,0 +1 @@ +CONFIG_HAVE_CALL_THUNKS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK new file mode 100644 index 000000000000..d8e9df335d63 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK @@ -0,0 +1 @@ +CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_DYNAMIC_FTRACE_NO_PATCHABLE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_DYNAMIC_FTRACE_NO_PATCHABLE new file mode 100644 index 000000000000..8300b2acaf7a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_DYNAMIC_FTRACE_NO_PATCHABLE @@ -0,0 +1 @@ +CONFIG_HAVE_DYNAMIC_FTRACE_NO_PATCHABLE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS new file mode 100644 index 000000000000..0898f1463c5c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS @@ -0,0 +1 @@ +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_EISA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_EISA new file mode 100644 index 000000000000..f5707a64cd4b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_EISA @@ -0,0 +1 @@ +CONFIG_HAVE_EISA=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_EXIT_THREAD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_EXIT_THREAD new file mode 100644 index 000000000000..67b28cbe3516 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_EXIT_THREAD @@ -0,0 +1 @@ +CONFIG_HAVE_EXIT_THREAD=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_FENTRY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_FENTRY new file mode 100644 index 000000000000..9b6997a06e59 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_FENTRY @@ -0,0 +1 @@ +CONFIG_HAVE_FENTRY=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_INTEL_TXT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_INTEL_TXT new file mode 100644 index 000000000000..0e05464bc731 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_INTEL_TXT @@ -0,0 +1 @@ +CONFIG_HAVE_INTEL_TXT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK new file mode 100644 index 000000000000..4c8e7558df72 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK @@ -0,0 +1 @@ +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_JUMP_LABEL_HACK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_JUMP_LABEL_HACK new file mode 100644 index 000000000000..4588049c08a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_JUMP_LABEL_HACK @@ -0,0 +1 @@ +CONFIG_HAVE_JUMP_LABEL_HACK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_BZIP2 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_BZIP2 new file mode 100644 index 000000000000..1b1d867b334c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_BZIP2 @@ -0,0 +1 @@ +CONFIG_HAVE_KERNEL_BZIP2=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_GZIP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_GZIP new file mode 100644 index 000000000000..feb2641d88dc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_GZIP @@ -0,0 +1 @@ +CONFIG_HAVE_KERNEL_GZIP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_LZ4 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_LZ4 new file mode 100644 index 000000000000..ec1e81c3c306 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_LZ4 @@ -0,0 +1 @@ +CONFIG_HAVE_KERNEL_LZ4=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_LZMA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_LZMA new file mode 100644 index 000000000000..c50e6ad86436 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_LZMA @@ -0,0 +1 @@ +CONFIG_HAVE_KERNEL_LZMA=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_LZO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_LZO new file mode 100644 index 000000000000..81bb9ec7ed5d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_LZO @@ -0,0 +1 @@ +CONFIG_HAVE_KERNEL_LZO=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_XZ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_XZ new file mode 100644 index 000000000000..329229234b26 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_XZ @@ -0,0 +1 @@ +CONFIG_HAVE_KERNEL_XZ=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_ZSTD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_ZSTD new file mode 100644 index 000000000000..1a47f3ca73e7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KERNEL_ZSTD @@ -0,0 +1 @@ +CONFIG_HAVE_KERNEL_ZSTD=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KPROBES_ON_FTRACE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KPROBES_ON_FTRACE new file mode 100644 index 000000000000..805752949837 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KPROBES_ON_FTRACE @@ -0,0 +1 @@ +CONFIG_HAVE_KPROBES_ON_FTRACE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KVM_DIRTY_RING_TSO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KVM_DIRTY_RING_TSO new file mode 100644 index 000000000000..46fbdd90ba20 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KVM_DIRTY_RING_TSO @@ -0,0 +1 @@ +CONFIG_HAVE_KVM_DIRTY_RING_TSO=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KVM_NO_POLL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KVM_NO_POLL new file mode 100644 index 000000000000..c620814f57db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KVM_NO_POLL @@ -0,0 +1 @@ +CONFIG_HAVE_KVM_NO_POLL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KVM_PFNCACHE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KVM_PFNCACHE new file mode 100644 index 000000000000..db784852153f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KVM_PFNCACHE @@ -0,0 +1 @@ +CONFIG_HAVE_KVM_PFNCACHE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KVM_PM_NOTIFIER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KVM_PM_NOTIFIER new file mode 100644 index 000000000000..fea671aea6b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_KVM_PM_NOTIFIER @@ -0,0 +1 @@ +CONFIG_HAVE_KVM_PM_NOTIFIER=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_LIVEPATCH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_LIVEPATCH new file mode 100644 index 000000000000..7ebdb924703e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_LIVEPATCH @@ -0,0 +1 @@ +CONFIG_HAVE_LIVEPATCH=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_MIXED_BREAKPOINTS_REGS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_MIXED_BREAKPOINTS_REGS new file mode 100644 index 000000000000..a54d950e2d5e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_MIXED_BREAKPOINTS_REGS @@ -0,0 +1 @@ +CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_MMIOTRACE_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_MMIOTRACE_SUPPORT new file mode 100644 index 000000000000..91d2dc7c91c1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_MMIOTRACE_SUPPORT @@ -0,0 +1 @@ +CONFIG_HAVE_MMIOTRACE_SUPPORT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_NOINSTR_HACK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_NOINSTR_HACK new file mode 100644 index 000000000000..7413f93aa6f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_NOINSTR_HACK @@ -0,0 +1 @@ +CONFIG_HAVE_NOINSTR_HACK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_NOINSTR_VALIDATION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_NOINSTR_VALIDATION new file mode 100644 index 000000000000..1367d6e73a4c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_NOINSTR_VALIDATION @@ -0,0 +1 @@ +CONFIG_HAVE_NOINSTR_VALIDATION=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_OBJTOOL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_OBJTOOL new file mode 100644 index 000000000000..469c7ff06eb5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_OBJTOOL @@ -0,0 +1 @@ +CONFIG_HAVE_OBJTOOL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_OBJTOOL_MCOUNT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_OBJTOOL_MCOUNT new file mode 100644 index 000000000000..4634d2d6365f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_OBJTOOL_MCOUNT @@ -0,0 +1 @@ +CONFIG_HAVE_OBJTOOL_MCOUNT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_OBJTOOL_NOP_MCOUNT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_OBJTOOL_NOP_MCOUNT new file mode 100644 index 000000000000..94191899a055 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_OBJTOOL_NOP_MCOUNT @@ -0,0 +1 @@ +CONFIG_HAVE_OBJTOOL_NOP_MCOUNT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_OPTPROBES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_OPTPROBES new file mode 100644 index 000000000000..06751c73a8ac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_OPTPROBES @@ -0,0 +1 @@ +CONFIG_HAVE_OPTPROBES=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_PCSPKR_PLATFORM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_PCSPKR_PLATFORM new file mode 100644 index 000000000000..eab8b6f8a3ab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_PCSPKR_PLATFORM @@ -0,0 +1 @@ +CONFIG_HAVE_PCSPKR_PLATFORM=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_PREEMPT_DYNAMIC_CALL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_PREEMPT_DYNAMIC_CALL new file mode 100644 index 000000000000..af4fcec50888 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_PREEMPT_DYNAMIC_CALL @@ -0,0 +1 @@ +CONFIG_HAVE_PREEMPT_DYNAMIC_CALL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RELIABLE_STACKTRACE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RELIABLE_STACKTRACE new file mode 100644 index 000000000000..2ce8faabc4cf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RELIABLE_STACKTRACE @@ -0,0 +1 @@ +CONFIG_HAVE_RELIABLE_STACKTRACE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RETHOOK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RETHOOK new file mode 100644 index 000000000000..b25116106132 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RETHOOK @@ -0,0 +1 @@ +CONFIG_HAVE_RETHOOK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RUST b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RUST new file mode 100644 index 000000000000..517440189cca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RUST @@ -0,0 +1 @@ +CONFIG_HAVE_RUST=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STACK_VALIDATION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STACK_VALIDATION new file mode 100644 index 000000000000..6f36a32d84ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STACK_VALIDATION @@ -0,0 +1 @@ +CONFIG_HAVE_STACK_VALIDATION=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STATIC_CALL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STATIC_CALL new file mode 100644 index 000000000000..f5d02db4c03b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STATIC_CALL @@ -0,0 +1 @@ +CONFIG_HAVE_STATIC_CALL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STATIC_CALL_INLINE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STATIC_CALL_INLINE new file mode 100644 index 000000000000..5b16b117c7ab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STATIC_CALL_INLINE @@ -0,0 +1 @@ +CONFIG_HAVE_STATIC_CALL_INLINE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_UACCESS_VALIDATION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_UACCESS_VALIDATION new file mode 100644 index 000000000000..d337e94712f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_UACCESS_VALIDATION @@ -0,0 +1 @@ +CONFIG_HAVE_UACCESS_VALIDATION=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_UNSTABLE_SCHED_CLOCK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_UNSTABLE_SCHED_CLOCK new file mode 100644 index 000000000000..747a0312fa84 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_UNSTABLE_SCHED_CLOCK @@ -0,0 +1 @@ +CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_USER_RETURN_NOTIFIER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_USER_RETURN_NOTIFIER new file mode 100644 index 000000000000..448c08460956 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_USER_RETURN_NOTIFIER @@ -0,0 +1 @@ +CONFIG_HAVE_USER_RETURN_NOTIFIER=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HDC100X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HDC100X new file mode 100644 index 000000000000..e8e7ce278b27 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HDC100X @@ -0,0 +1 @@ +# CONFIG_HDC100X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HDC2010 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HDC2010 new file mode 100644 index 000000000000..fad2fecfd916 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HDC2010 @@ -0,0 +1 @@ +# CONFIG_HDC2010 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HFI1_DEBUG_SDMA_ORDER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HFI1_DEBUG_SDMA_ORDER new file mode 100644 index 000000000000..a5c5e96e64a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HFI1_DEBUG_SDMA_ORDER @@ -0,0 +1 @@ +# CONFIG_HFI1_DEBUG_SDMA_ORDER is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HI8435 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HI8435 new file mode 100644 index 000000000000..21ee368cedd5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HI8435 @@ -0,0 +1 @@ +# CONFIG_HI8435 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_ALPS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_ALPS new file mode 100644 index 000000000000..81f929d6b853 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_ALPS @@ -0,0 +1 @@ +CONFIG_HID_ALPS=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_ASUS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_ASUS new file mode 100644 index 000000000000..efc1ef4f8da9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_ASUS @@ -0,0 +1 @@ +CONFIG_HID_ASUS=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_CMEDIA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_CMEDIA new file mode 100644 index 000000000000..c168e7ea03b3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_CMEDIA @@ -0,0 +1 @@ +CONFIG_HID_CMEDIA=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_HYPERV_MOUSE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_HYPERV_MOUSE new file mode 100644 index 000000000000..2089444e567c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_HYPERV_MOUSE @@ -0,0 +1 @@ +CONFIG_HID_HYPERV_MOUSE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_NVIDIA_SHIELD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_NVIDIA_SHIELD new file mode 100644 index 000000000000..dc7f0d9989d9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_NVIDIA_SHIELD @@ -0,0 +1 @@ +# CONFIG_HID_NVIDIA_SHIELD is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_PICOLCD_CIR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_PICOLCD_CIR new file mode 100644 index 000000000000..312f304cf23d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_PICOLCD_CIR @@ -0,0 +1 @@ +CONFIG_HID_PICOLCD_CIR=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_ACCEL_3D b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_ACCEL_3D new file mode 100644 index 000000000000..58460b8bfc47 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_ACCEL_3D @@ -0,0 +1 @@ +CONFIG_HID_SENSOR_ACCEL_3D=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_ALS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_ALS new file mode 100644 index 000000000000..a8a99f6b3b38 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_ALS @@ -0,0 +1 @@ +CONFIG_HID_SENSOR_ALS=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE new file mode 100644 index 000000000000..705f7e447dc6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE @@ -0,0 +1 @@ +# CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_CUSTOM_SENSOR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_CUSTOM_SENSOR new file mode 100644 index 000000000000..e8a6e4971a51 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_CUSTOM_SENSOR @@ -0,0 +1 @@ +CONFIG_HID_SENSOR_CUSTOM_SENSOR=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_DEVICE_ROTATION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_DEVICE_ROTATION new file mode 100644 index 000000000000..66449de3b74c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_DEVICE_ROTATION @@ -0,0 +1 @@ +CONFIG_HID_SENSOR_DEVICE_ROTATION=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_GYRO_3D b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_GYRO_3D new file mode 100644 index 000000000000..b2aa13a9f4ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_GYRO_3D @@ -0,0 +1 @@ +CONFIG_HID_SENSOR_GYRO_3D=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_HUB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_HUB new file mode 100644 index 000000000000..090ecbd4043f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_HUB @@ -0,0 +1 @@ +CONFIG_HID_SENSOR_HUB=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_HUMIDITY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_HUMIDITY new file mode 100644 index 000000000000..d50f5014af5c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_HUMIDITY @@ -0,0 +1 @@ +CONFIG_HID_SENSOR_HUMIDITY=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_IIO_COMMON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_IIO_COMMON new file mode 100644 index 000000000000..fc396a26cbd2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_IIO_COMMON @@ -0,0 +1 @@ +CONFIG_HID_SENSOR_IIO_COMMON=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_IIO_TRIGGER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_IIO_TRIGGER new file mode 100644 index 000000000000..b9cfc649f65c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_IIO_TRIGGER @@ -0,0 +1 @@ +CONFIG_HID_SENSOR_IIO_TRIGGER=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_INCLINOMETER_3D b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_INCLINOMETER_3D new file mode 100644 index 000000000000..7a9399cb1aef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_INCLINOMETER_3D @@ -0,0 +1 @@ +CONFIG_HID_SENSOR_INCLINOMETER_3D=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_MAGNETOMETER_3D b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_MAGNETOMETER_3D new file mode 100644 index 000000000000..61036565fa4b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_MAGNETOMETER_3D @@ -0,0 +1 @@ +CONFIG_HID_SENSOR_MAGNETOMETER_3D=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_PRESS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_PRESS new file mode 100644 index 000000000000..735e740b0d70 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_PRESS @@ -0,0 +1 @@ +CONFIG_HID_SENSOR_PRESS=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_PROX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_PROX new file mode 100644 index 000000000000..46e212b880c2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_PROX @@ -0,0 +1 @@ +CONFIG_HID_SENSOR_PROX=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_TEMP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_TEMP new file mode 100644 index 000000000000..6f1a98bc0df1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_SENSOR_TEMP @@ -0,0 +1 @@ +CONFIG_HID_SENSOR_TEMP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HMC425 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HMC425 new file mode 100644 index 000000000000..79128bfd7bbc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HMC425 @@ -0,0 +1 @@ +# CONFIG_HMC425 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_CORE_SYNC_FULL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_CORE_SYNC_FULL new file mode 100644 index 000000000000..74e300e56012 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_CORE_SYNC_FULL @@ -0,0 +1 @@ +CONFIG_HOTPLUG_CORE_SYNC_FULL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_PARALLEL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_PARALLEL new file mode 100644 index 000000000000..5eac1fd8ef52 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_PARALLEL @@ -0,0 +1 @@ +CONFIG_HOTPLUG_PARALLEL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_PCI_SHPC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_PCI_SHPC new file mode 100644 index 000000000000..4380a971729d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_PCI_SHPC @@ -0,0 +1 @@ +CONFIG_HOTPLUG_PCI_SHPC=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_SMT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_SMT new file mode 100644 index 000000000000..ddb25fc38d3a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_SMT @@ -0,0 +1 @@ +CONFIG_HOTPLUG_SMT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_SPLIT_STARTUP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_SPLIT_STARTUP new file mode 100644 index 000000000000..9ba8dd70d43d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HOTPLUG_SPLIT_STARTUP @@ -0,0 +1 @@ +CONFIG_HOTPLUG_SPLIT_STARTUP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HP03 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HP03 new file mode 100644 index 000000000000..bd8714af3cda --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HP03 @@ -0,0 +1 @@ +# CONFIG_HP03 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HP206C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HP206C new file mode 100644 index 000000000000..6d5f0703259e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HP206C @@ -0,0 +1 @@ +# CONFIG_HP206C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HPET_EMULATE_RTC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HPET_EMULATE_RTC new file mode 100644 index 000000000000..fa706c711c31 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HPET_EMULATE_RTC @@ -0,0 +1 @@ +CONFIG_HPET_EMULATE_RTC=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HPET_MMAP_DEFAULT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HPET_MMAP_DEFAULT new file mode 100644 index 000000000000..05bd11cb7de6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HPET_MMAP_DEFAULT @@ -0,0 +1 @@ +# CONFIG_HPET_MMAP_DEFAULT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HPWDT_NMI_DECODING b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HPWDT_NMI_DECODING new file mode 100644 index 000000000000..2f21b282dfce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HPWDT_NMI_DECODING @@ -0,0 +1 @@ +CONFIG_HPWDT_NMI_DECODING=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HP_ILO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HP_ILO new file mode 100644 index 000000000000..1d3256354a5b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HP_ILO @@ -0,0 +1 @@ +CONFIG_HP_ILO=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HP_WATCHDOG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HP_WATCHDOG new file mode 100644 index 000000000000..9a829b4e98c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HP_WATCHDOG @@ -0,0 +1 @@ +CONFIG_HP_WATCHDOG=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HSU_DMA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HSU_DMA new file mode 100644 index 000000000000..0dcb85dec3f0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HSU_DMA @@ -0,0 +1 @@ +CONFIG_HSU_DMA=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HTS221 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HTS221 new file mode 100644 index 000000000000..e38bd6677d07 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HTS221 @@ -0,0 +1 @@ +# CONFIG_HTS221 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HTU21 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HTU21 new file mode 100644 index 000000000000..5ba8bbff7d76 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HTU21 @@ -0,0 +1 @@ +# CONFIG_HTU21 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HUAWEI_WMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HUAWEI_WMI new file mode 100644 index 000000000000..0ad2cb4e3c5c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HUAWEI_WMI @@ -0,0 +1 @@ +# CONFIG_HUAWEI_WMI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HVC_IRQ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HVC_IRQ new file mode 100644 index 000000000000..2064342295b0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HVC_IRQ @@ -0,0 +1 @@ +CONFIG_HVC_IRQ=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HVC_XEN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HVC_XEN new file mode 100644 index 000000000000..df97a05c75ea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HVC_XEN @@ -0,0 +1 @@ +CONFIG_HVC_XEN=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HVC_XEN_FRONTEND b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HVC_XEN_FRONTEND new file mode 100644 index 000000000000..02ccaac1310c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HVC_XEN_FRONTEND @@ -0,0 +1 @@ +CONFIG_HVC_XEN_FRONTEND=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HWMON_VID b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HWMON_VID new file mode 100644 index 000000000000..5cb16aa7c2fb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HWMON_VID @@ -0,0 +1 @@ +CONFIG_HWMON_VID=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HW_RANDOM_VIA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HW_RANDOM_VIA new file mode 100644 index 000000000000..9b9c512317b0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HW_RANDOM_VIA @@ -0,0 +1 @@ +CONFIG_HW_RANDOM_VIA=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HW_RANDOM_VIRTIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HW_RANDOM_VIRTIO new file mode 100644 index 000000000000..ec44b8453241 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HW_RANDOM_VIRTIO @@ -0,0 +1 @@ +CONFIG_HW_RANDOM_VIRTIO=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HX711 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HX711 new file mode 100644 index 000000000000..5378c733942a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HX711 @@ -0,0 +1 @@ +# CONFIG_HX711 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HYPERV_NET b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HYPERV_NET new file mode 100644 index 000000000000..ecacee7154d0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HYPERV_NET @@ -0,0 +1 @@ +CONFIG_HYPERV_NET=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HYPERV_TESTING b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HYPERV_TESTING new file mode 100644 index 000000000000..d763bef97e27 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HYPERV_TESTING @@ -0,0 +1 @@ +# CONFIG_HYPERV_TESTING is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HYPERV_TIMER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HYPERV_TIMER new file mode 100644 index 000000000000..983959256fe3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HYPERV_TIMER @@ -0,0 +1 @@ +CONFIG_HYPERV_TIMER=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HYPERV_VTL_MODE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HYPERV_VTL_MODE new file mode 100644 index 000000000000..bc928703dc8b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HYPERV_VTL_MODE @@ -0,0 +1 @@ +# CONFIG_HYPERV_VTL_MODE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_AMD756 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_AMD756 new file mode 100644 index 000000000000..760d8bd4289e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_AMD756 @@ -0,0 +1 @@ +CONFIG_I2C_AMD756=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_AMD756_S4882 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_AMD756_S4882 new file mode 100644 index 000000000000..32da34a3ec9c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_AMD756_S4882 @@ -0,0 +1 @@ +CONFIG_I2C_AMD756_S4882=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_AMD8111 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_AMD8111 new file mode 100644 index 000000000000..3359257cda3d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_AMD8111 @@ -0,0 +1 @@ +CONFIG_I2C_AMD8111=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_DESIGNWARE_AMDPSP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_DESIGNWARE_AMDPSP new file mode 100644 index 000000000000..3ae24381d8af --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_DESIGNWARE_AMDPSP @@ -0,0 +1 @@ +# CONFIG_I2C_DESIGNWARE_AMDPSP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_DESIGNWARE_BAYTRAIL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_DESIGNWARE_BAYTRAIL new file mode 100644 index 000000000000..88e9c546ec06 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_DESIGNWARE_BAYTRAIL @@ -0,0 +1 @@ +CONFIG_I2C_DESIGNWARE_BAYTRAIL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_GPIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_GPIO new file mode 100644 index 000000000000..1dd3eb65409f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_GPIO @@ -0,0 +1 @@ +# CONFIG_I2C_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_HELPER_AUTO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_HELPER_AUTO new file mode 100644 index 000000000000..59be08e04c9d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_HELPER_AUTO @@ -0,0 +1 @@ +CONFIG_I2C_HELPER_AUTO=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_I801 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_I801 new file mode 100644 index 000000000000..5d1883a34d28 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_I801 @@ -0,0 +1 @@ +CONFIG_I2C_I801=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_ISCH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_ISCH new file mode 100644 index 000000000000..ca084a709949 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_ISCH @@ -0,0 +1 @@ +CONFIG_I2C_ISCH=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_ISMT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_ISMT new file mode 100644 index 000000000000..f720d1bb1a2b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_ISMT @@ -0,0 +1 @@ +CONFIG_I2C_ISMT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_MLXCPLD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_MLXCPLD new file mode 100644 index 000000000000..92afd7ee9654 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_MLXCPLD @@ -0,0 +1 @@ +CONFIG_I2C_MLXCPLD=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_MUX_GPIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_MUX_GPIO new file mode 100644 index 000000000000..7d3813d3461b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_MUX_GPIO @@ -0,0 +1 @@ +# CONFIG_I2C_MUX_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_MUX_PCA9541 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_MUX_PCA9541 new file mode 100644 index 000000000000..77a2d793949a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_MUX_PCA9541 @@ -0,0 +1 @@ +# CONFIG_I2C_MUX_PCA9541 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_MUX_PCA954x b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_MUX_PCA954x new file mode 100644 index 000000000000..51e48b468b58 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_MUX_PCA954x @@ -0,0 +1 @@ +# CONFIG_I2C_MUX_PCA954x is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_NFORCE2_S4985 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_NFORCE2_S4985 new file mode 100644 index 000000000000..758a790d2558 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_NFORCE2_S4985 @@ -0,0 +1 @@ +CONFIG_I2C_NFORCE2_S4985=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_PARPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_PARPORT new file mode 100644 index 000000000000..58827a258983 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_PARPORT @@ -0,0 +1 @@ +CONFIG_I2C_PARPORT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_PIIX4 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_PIIX4 new file mode 100644 index 000000000000..ff0307146e25 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_PIIX4 @@ -0,0 +1 @@ +CONFIG_I2C_PIIX4=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_SCMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_SCMI new file mode 100644 index 000000000000..6e60bd4e1174 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_SCMI @@ -0,0 +1 @@ +CONFIG_I2C_SCMI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_SIS96X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_SIS96X new file mode 100644 index 000000000000..e716d349283e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_SIS96X @@ -0,0 +1 @@ +CONFIG_I2C_SIS96X=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_VIA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_VIA new file mode 100644 index 000000000000..4b9475a7bf95 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_VIA @@ -0,0 +1 @@ +CONFIG_I2C_VIA=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_VIAPRO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_VIAPRO new file mode 100644 index 000000000000..93d8303dbf8f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_VIAPRO @@ -0,0 +1 @@ +CONFIG_I2C_VIAPRO=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_VIPERBOARD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_VIPERBOARD new file mode 100644 index 000000000000..a041d4d66b8a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_VIPERBOARD @@ -0,0 +1 @@ +CONFIG_I2C_VIPERBOARD=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_ZHAOXIN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_ZHAOXIN new file mode 100644 index 000000000000..d9333ca2495c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_ZHAOXIN @@ -0,0 +1 @@ +CONFIG_I2C_ZHAOXIN=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_ZHAOXIN_SMBUS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_ZHAOXIN_SMBUS new file mode 100644 index 000000000000..12afe80402f3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I2C_ZHAOXIN_SMBUS @@ -0,0 +1 @@ +CONFIG_I2C_ZHAOXIN_SMBUS=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_I8253_LOCK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I8253_LOCK new file mode 100644 index 000000000000..f5026f2f767f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_I8253_LOCK @@ -0,0 +1 @@ +CONFIG_I8253_LOCK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IAQCORE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IAQCORE new file mode 100644 index 000000000000..7d167fb8a92d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IAQCORE @@ -0,0 +1 @@ +# CONFIG_IAQCORE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IB700_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IB700_WDT new file mode 100644 index 000000000000..3cf068abd52e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IB700_WDT @@ -0,0 +1 @@ +CONFIG_IB700_WDT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IBMASR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IBMASR new file mode 100644 index 000000000000..c5d4eb2539f8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IBMASR @@ -0,0 +1 @@ +CONFIG_IBMASR=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IBM_ASM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IBM_ASM new file mode 100644 index 000000000000..dd7f292eb8ff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IBM_ASM @@ -0,0 +1 @@ +# CONFIG_IBM_ASM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IBM_RTL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IBM_RTL new file mode 100644 index 000000000000..3a0fee720c72 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IBM_RTL @@ -0,0 +1 @@ +# CONFIG_IBM_RTL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ICE_HWTS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ICE_HWTS new file mode 100644 index 000000000000..ef6713236940 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ICE_HWTS @@ -0,0 +1 @@ +CONFIG_ICE_HWTS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ICP10100 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ICP10100 new file mode 100644 index 000000000000..4a8ee10d0335 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ICP10100 @@ -0,0 +1 @@ +# CONFIG_ICP10100 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IDEAPAD_LAPTOP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IDEAPAD_LAPTOP new file mode 100644 index 000000000000..5937dca3e801 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IDEAPAD_LAPTOP @@ -0,0 +1 @@ +CONFIG_IDEAPAD_LAPTOP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IDLE_INJECT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IDLE_INJECT new file mode 100644 index 000000000000..9e7af864ae54 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IDLE_INJECT @@ -0,0 +1 @@ +CONFIG_IDLE_INJECT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IE6XX_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IE6XX_WDT new file mode 100644 index 000000000000..96368c0afc44 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IE6XX_WDT @@ -0,0 +1 @@ +CONFIG_IE6XX_WDT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IEEE802154_FAKELB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IEEE802154_FAKELB new file mode 100644 index 000000000000..63f336d574da --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IEEE802154_FAKELB @@ -0,0 +1 @@ +CONFIG_IEEE802154_FAKELB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO new file mode 100644 index 000000000000..72953e4fa961 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO @@ -0,0 +1 @@ +CONFIG_IIO=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER new file mode 100644 index 000000000000..e10ee5ac8c8f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER @@ -0,0 +1 @@ +CONFIG_IIO_BUFFER=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER_CB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER_CB new file mode 100644 index 000000000000..657cf48d33c3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER_CB @@ -0,0 +1 @@ +# CONFIG_IIO_BUFFER_CB is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER_DMA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER_DMA new file mode 100644 index 000000000000..fdc2817a1429 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER_DMA @@ -0,0 +1 @@ +# CONFIG_IIO_BUFFER_DMA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER_DMAENGINE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER_DMAENGINE new file mode 100644 index 000000000000..7f7ca607ab89 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER_DMAENGINE @@ -0,0 +1 @@ +# CONFIG_IIO_BUFFER_DMAENGINE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER_HW_CONSUMER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER_HW_CONSUMER new file mode 100644 index 000000000000..0baa9e884b24 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_BUFFER_HW_CONSUMER @@ -0,0 +1 @@ +# CONFIG_IIO_BUFFER_HW_CONSUMER is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_CONFIGFS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_CONFIGFS new file mode 100644 index 000000000000..bbf2a829d728 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_CONFIGFS @@ -0,0 +1 @@ +# CONFIG_IIO_CONFIGFS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_CONSUMERS_PER_TRIGGER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_CONSUMERS_PER_TRIGGER new file mode 100644 index 000000000000..9198f59332fc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_CONSUMERS_PER_TRIGGER @@ -0,0 +1 @@ +CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_INTERRUPT_TRIGGER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_INTERRUPT_TRIGGER new file mode 100644 index 000000000000..fd56749b1ee1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_INTERRUPT_TRIGGER @@ -0,0 +1 @@ +# CONFIG_IIO_INTERRUPT_TRIGGER is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_KFIFO_BUF b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_KFIFO_BUF new file mode 100644 index 000000000000..5791c27c1db6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_KFIFO_BUF @@ -0,0 +1 @@ +CONFIG_IIO_KFIFO_BUF=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_KX022A_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_KX022A_I2C new file mode 100644 index 000000000000..b2454203d357 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_KX022A_I2C @@ -0,0 +1 @@ +# CONFIG_IIO_KX022A_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_KX022A_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_KX022A_SPI new file mode 100644 index 000000000000..4630db8b4736 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_KX022A_SPI @@ -0,0 +1 @@ +# CONFIG_IIO_KX022A_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_MUX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_MUX new file mode 100644 index 000000000000..def041a2b196 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_MUX @@ -0,0 +1 @@ +# CONFIG_IIO_MUX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_RESCALE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_RESCALE new file mode 100644 index 000000000000..4c25d3102621 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_RESCALE @@ -0,0 +1 @@ +# CONFIG_IIO_RESCALE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_SSP_SENSORHUB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_SSP_SENSORHUB new file mode 100644 index 000000000000..65fe1e730198 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_SSP_SENSORHUB @@ -0,0 +1 @@ +# CONFIG_IIO_SSP_SENSORHUB is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_ACCEL_3AXIS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_ACCEL_3AXIS new file mode 100644 index 000000000000..8f88bbdf50af --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_ACCEL_3AXIS @@ -0,0 +1 @@ +# CONFIG_IIO_ST_ACCEL_3AXIS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_GYRO_3AXIS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_GYRO_3AXIS new file mode 100644 index 000000000000..b76d81866ab4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_GYRO_3AXIS @@ -0,0 +1 @@ +# CONFIG_IIO_ST_GYRO_3AXIS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_LSM6DSX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_LSM6DSX new file mode 100644 index 000000000000..b6ec5783698f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_LSM6DSX @@ -0,0 +1 @@ +# CONFIG_IIO_ST_LSM6DSX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_LSM9DS0 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_LSM9DS0 new file mode 100644 index 000000000000..6c71996a504e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_LSM9DS0 @@ -0,0 +1 @@ +# CONFIG_IIO_ST_LSM9DS0 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_MAGN_3AXIS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_MAGN_3AXIS new file mode 100644 index 000000000000..3019839a3550 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_MAGN_3AXIS @@ -0,0 +1 @@ +# CONFIG_IIO_ST_MAGN_3AXIS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_PRESS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_PRESS new file mode 100644 index 000000000000..6932cca0bc68 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_ST_PRESS @@ -0,0 +1 @@ +# CONFIG_IIO_ST_PRESS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_SW_DEVICE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_SW_DEVICE new file mode 100644 index 000000000000..92c6b2e2885c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_SW_DEVICE @@ -0,0 +1 @@ +# CONFIG_IIO_SW_DEVICE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_SW_TRIGGER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_SW_TRIGGER new file mode 100644 index 000000000000..6373a07ab06a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_SW_TRIGGER @@ -0,0 +1 @@ +# CONFIG_IIO_SW_TRIGGER is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_SYSFS_TRIGGER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_SYSFS_TRIGGER new file mode 100644 index 000000000000..3ea306080ed8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_SYSFS_TRIGGER @@ -0,0 +1 @@ +# CONFIG_IIO_SYSFS_TRIGGER is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_TRIGGER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_TRIGGER new file mode 100644 index 000000000000..f32839d5fc98 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_TRIGGER @@ -0,0 +1 @@ +CONFIG_IIO_TRIGGER=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_TRIGGERED_BUFFER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_TRIGGERED_BUFFER new file mode 100644 index 000000000000..1707c5f48696 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_TRIGGERED_BUFFER @@ -0,0 +1 @@ +CONFIG_IIO_TRIGGERED_BUFFER=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_TRIGGERED_EVENT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_TRIGGERED_EVENT new file mode 100644 index 000000000000..d9e9ba26a23b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IIO_TRIGGERED_EVENT @@ -0,0 +1 @@ +# CONFIG_IIO_TRIGGERED_EVENT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INA2XX_ADC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INA2XX_ADC new file mode 100644 index 000000000000..ee86c5485107 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INA2XX_ADC @@ -0,0 +1 @@ +# CONFIG_INA2XX_ADC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_HFI1 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_HFI1 new file mode 100644 index 000000000000..eb333c845ba1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_HFI1 @@ -0,0 +1 @@ +CONFIG_INFINIBAND_HFI1=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_QIB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_QIB new file mode 100644 index 000000000000..591f4e962814 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_QIB @@ -0,0 +1 @@ +# CONFIG_INFINIBAND_QIB is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_USNIC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_USNIC new file mode 100644 index 000000000000..3d8c33da50e1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_USNIC @@ -0,0 +1 @@ +CONFIG_INFINIBAND_USNIC=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_VMWARE_PVRDMA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_VMWARE_PVRDMA new file mode 100644 index 000000000000..164f3b26c121 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_VMWARE_PVRDMA @@ -0,0 +1 @@ +CONFIG_INFINIBAND_VMWARE_PVRDMA=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_AD714X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_AD714X new file mode 100644 index 000000000000..ce04c7c5e73f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_AD714X @@ -0,0 +1 @@ +# CONFIG_INPUT_AD714X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_ADXL34X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_ADXL34X new file mode 100644 index 000000000000..98fa0144307d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_ADXL34X @@ -0,0 +1 @@ +# CONFIG_INPUT_ADXL34X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_APANEL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_APANEL new file mode 100644 index 000000000000..16deb3fd37e5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_APANEL @@ -0,0 +1 @@ +CONFIG_INPUT_APANEL=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_ATI_REMOTE2 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_ATI_REMOTE2 new file mode 100644 index 000000000000..c60eb9e65dcc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_ATI_REMOTE2 @@ -0,0 +1 @@ +CONFIG_INPUT_ATI_REMOTE2=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_ATLAS_BTNS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_ATLAS_BTNS new file mode 100644 index 000000000000..730f49614137 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_ATLAS_BTNS @@ -0,0 +1 @@ +CONFIG_INPUT_ATLAS_BTNS=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_BMA150 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_BMA150 new file mode 100644 index 000000000000..4dd4cb0fe158 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_BMA150 @@ -0,0 +1 @@ +# CONFIG_INPUT_BMA150 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_CM109 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_CM109 new file mode 100644 index 000000000000..ede5a1df4c01 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_CM109 @@ -0,0 +1 @@ +CONFIG_INPUT_CM109=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_CMA3000 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_CMA3000 new file mode 100644 index 000000000000..9743d2028571 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_CMA3000 @@ -0,0 +1 @@ +# CONFIG_INPUT_CMA3000 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_DA7280_HAPTICS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_DA7280_HAPTICS new file mode 100644 index 000000000000..27256e8ec087 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_DA7280_HAPTICS @@ -0,0 +1 @@ +# CONFIG_INPUT_DA7280_HAPTICS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_DRV260X_HAPTICS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_DRV260X_HAPTICS new file mode 100644 index 000000000000..482b80c549f0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_DRV260X_HAPTICS @@ -0,0 +1 @@ +# CONFIG_INPUT_DRV260X_HAPTICS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_DRV2665_HAPTICS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_DRV2665_HAPTICS new file mode 100644 index 000000000000..0ce8ccd60473 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_DRV2665_HAPTICS @@ -0,0 +1 @@ +# CONFIG_INPUT_DRV2665_HAPTICS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_DRV2667_HAPTICS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_DRV2667_HAPTICS new file mode 100644 index 000000000000..29d7ce7536be --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_DRV2667_HAPTICS @@ -0,0 +1 @@ +# CONFIG_INPUT_DRV2667_HAPTICS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_E3X0_BUTTON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_E3X0_BUTTON new file mode 100644 index 000000000000..70002037e5e7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_E3X0_BUTTON @@ -0,0 +1 @@ +# CONFIG_INPUT_E3X0_BUTTON is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_GPIO_BEEPER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_GPIO_BEEPER new file mode 100644 index 000000000000..411fd135edf6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_GPIO_BEEPER @@ -0,0 +1 @@ +# CONFIG_INPUT_GPIO_BEEPER is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_GPIO_DECODER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_GPIO_DECODER new file mode 100644 index 000000000000..47e04fe3a9e0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_GPIO_DECODER @@ -0,0 +1 @@ +# CONFIG_INPUT_GPIO_DECODER is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_GPIO_ROTARY_ENCODER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_GPIO_ROTARY_ENCODER new file mode 100644 index 000000000000..fd61dc3ff5cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_GPIO_ROTARY_ENCODER @@ -0,0 +1 @@ +CONFIG_INPUT_GPIO_ROTARY_ENCODER=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_GPIO_VIBRA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_GPIO_VIBRA new file mode 100644 index 000000000000..ef2cfbcab11a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_GPIO_VIBRA @@ -0,0 +1 @@ +# CONFIG_INPUT_GPIO_VIBRA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IDEAPAD_SLIDEBAR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IDEAPAD_SLIDEBAR new file mode 100644 index 000000000000..16feab58f09c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IDEAPAD_SLIDEBAR @@ -0,0 +1 @@ +# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IMS_PCU b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IMS_PCU new file mode 100644 index 000000000000..476ca55ac98a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IMS_PCU @@ -0,0 +1 @@ +# CONFIG_INPUT_IMS_PCU is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IQS269A b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IQS269A new file mode 100644 index 000000000000..8b78e198494a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IQS269A @@ -0,0 +1 @@ +# CONFIG_INPUT_IQS269A is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IQS626A b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IQS626A new file mode 100644 index 000000000000..0091ceedf94f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IQS626A @@ -0,0 +1 @@ +# CONFIG_INPUT_IQS626A is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IQS7222 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IQS7222 new file mode 100644 index 000000000000..6ac434ac975f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_IQS7222 @@ -0,0 +1 @@ +# CONFIG_INPUT_IQS7222 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_JOYDEV b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_JOYDEV new file mode 100644 index 000000000000..d4bf799d0518 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_JOYDEV @@ -0,0 +1 @@ +CONFIG_INPUT_JOYDEV=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_KEYSPAN_REMOTE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_KEYSPAN_REMOTE new file mode 100644 index 000000000000..5cadd6973685 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_KEYSPAN_REMOTE @@ -0,0 +1 @@ +CONFIG_INPUT_KEYSPAN_REMOTE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_KXTJ9 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_KXTJ9 new file mode 100644 index 000000000000..f2d3d0cb698e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_KXTJ9 @@ -0,0 +1 @@ +# CONFIG_INPUT_KXTJ9 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_MISC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_MISC new file mode 100644 index 000000000000..3708f5641c1e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_MISC @@ -0,0 +1 @@ +CONFIG_INPUT_MISC=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_MMA8450 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_MMA8450 new file mode 100644 index 000000000000..1051809178e7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_MMA8450 @@ -0,0 +1 @@ +# CONFIG_INPUT_MMA8450 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_PCF8574 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_PCF8574 new file mode 100644 index 000000000000..caaa9ddb2066 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_PCF8574 @@ -0,0 +1 @@ +# CONFIG_INPUT_PCF8574 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_PCSPKR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_PCSPKR new file mode 100644 index 000000000000..87fa5268dfa0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_PCSPKR @@ -0,0 +1 @@ +CONFIG_INPUT_PCSPKR=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_POWERMATE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_POWERMATE new file mode 100644 index 000000000000..e7251e1ce29f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_POWERMATE @@ -0,0 +1 @@ +CONFIG_INPUT_POWERMATE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_PWM_BEEPER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_PWM_BEEPER new file mode 100644 index 000000000000..9ac1c7b31613 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_PWM_BEEPER @@ -0,0 +1 @@ +# CONFIG_INPUT_PWM_BEEPER is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_PWM_VIBRA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_PWM_VIBRA new file mode 100644 index 000000000000..39a51b4903f2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_PWM_VIBRA @@ -0,0 +1 @@ +# CONFIG_INPUT_PWM_VIBRA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_TABLET b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_TABLET new file mode 100644 index 000000000000..a435e97d0004 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_TABLET @@ -0,0 +1 @@ +CONFIG_INPUT_TABLET=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_TOUCHSCREEN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_TOUCHSCREEN new file mode 100644 index 000000000000..3ee240e2b492 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_TOUCHSCREEN @@ -0,0 +1 @@ +CONFIG_INPUT_TOUCHSCREEN=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_UINPUT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_UINPUT new file mode 100644 index 000000000000..ae1ea9a9c0ed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_UINPUT @@ -0,0 +1 @@ +CONFIG_INPUT_UINPUT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_VIVALDIFMAP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_VIVALDIFMAP new file mode 100644 index 000000000000..3cec2d822f91 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_VIVALDIFMAP @@ -0,0 +1 @@ +CONFIG_INPUT_VIVALDIFMAP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_XEN_KBDDEV_FRONTEND b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_XEN_KBDDEV_FRONTEND new file mode 100644 index 000000000000..9703adaf45ab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_XEN_KBDDEV_FRONTEND @@ -0,0 +1 @@ +CONFIG_INPUT_XEN_KBDDEV_FRONTEND=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_YEALINK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_YEALINK new file mode 100644 index 000000000000..328da30ac4e8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_YEALINK @@ -0,0 +1 @@ +CONFIG_INPUT_YEALINK=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_ATOMISP2_PM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_ATOMISP2_PM new file mode 100644 index 000000000000..cf052120e411 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_ATOMISP2_PM @@ -0,0 +1 @@ +# CONFIG_INTEL_ATOMISP2_PM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_GTT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_GTT new file mode 100644 index 000000000000..a4a57e0b71ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_GTT @@ -0,0 +1 @@ +CONFIG_INTEL_GTT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_HID_EVENT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_HID_EVENT new file mode 100644 index 000000000000..db284f9da343 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_HID_EVENT @@ -0,0 +1 @@ +CONFIG_INTEL_HID_EVENT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IDXD_COMPAT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IDXD_COMPAT new file mode 100644 index 000000000000..c7105ed60dee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IDXD_COMPAT @@ -0,0 +1 @@ +# CONFIG_INTEL_IDXD_COMPAT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IFS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IFS new file mode 100644 index 000000000000..6299ba67249b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IFS @@ -0,0 +1 @@ +CONFIG_INTEL_IFS=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_INT0002_VGPIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_INT0002_VGPIO new file mode 100644 index 000000000000..f416f2ddcdf1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_INT0002_VGPIO @@ -0,0 +1 @@ +# CONFIG_INTEL_INT0002_VGPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IOMMU_DEFAULT_ON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IOMMU_DEFAULT_ON new file mode 100644 index 000000000000..65c519e1d8cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IOMMU_DEFAULT_ON @@ -0,0 +1 @@ +# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IOMMU_FLOPPY_WA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IOMMU_FLOPPY_WA new file mode 100644 index 000000000000..24e7e07d86bf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IOMMU_FLOPPY_WA @@ -0,0 +1 @@ +CONFIG_INTEL_IOMMU_FLOPPY_WA=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IPS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IPS new file mode 100644 index 000000000000..3d5e32159cb9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_IPS @@ -0,0 +1 @@ +CONFIG_INTEL_IPS=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_ISHTP_ECLITE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_ISHTP_ECLITE new file mode 100644 index 000000000000..83930e0edd4a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_ISHTP_ECLITE @@ -0,0 +1 @@ +# CONFIG_INTEL_ISHTP_ECLITE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER new file mode 100644 index 000000000000..af8d58e2b224 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER @@ -0,0 +1 @@ +# CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_ISH_HID b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_ISH_HID new file mode 100644 index 000000000000..7f7cbea0afd5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_ISH_HID @@ -0,0 +1 @@ +CONFIG_INTEL_ISH_HID=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_LDMA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_LDMA new file mode 100644 index 000000000000..7ff87eb77759 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_LDMA @@ -0,0 +1 @@ +# CONFIG_INTEL_LDMA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_GSC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_GSC new file mode 100644 index 000000000000..b2bd8b907376 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_GSC @@ -0,0 +1 @@ +# CONFIG_INTEL_MEI_GSC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_GSC_PROXY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_GSC_PROXY new file mode 100644 index 000000000000..2282406918d2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_GSC_PROXY @@ -0,0 +1 @@ +# CONFIG_INTEL_MEI_GSC_PROXY is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_HDCP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_HDCP new file mode 100644 index 000000000000..3566e3d207c7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_HDCP @@ -0,0 +1 @@ +# CONFIG_INTEL_MEI_HDCP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_PXP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_PXP new file mode 100644 index 000000000000..b9589b52478a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_PXP @@ -0,0 +1 @@ +# CONFIG_INTEL_MEI_PXP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_TXE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_TXE new file mode 100644 index 000000000000..97e40f644525 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_MEI_TXE @@ -0,0 +1 @@ +# CONFIG_INTEL_MEI_TXE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_OAKTRAIL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_OAKTRAIL new file mode 100644 index 000000000000..c2cbe39cd930 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_OAKTRAIL @@ -0,0 +1 @@ +CONFIG_INTEL_OAKTRAIL=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_PUNIT_IPC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_PUNIT_IPC new file mode 100644 index 000000000000..132df88276e5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_PUNIT_IPC @@ -0,0 +1 @@ +# CONFIG_INTEL_PUNIT_IPC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SAR_INT1092 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SAR_INT1092 new file mode 100644 index 000000000000..6e0e28bee79a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SAR_INT1092 @@ -0,0 +1 @@ +# CONFIG_INTEL_SAR_INT1092 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SCU_PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SCU_PCI new file mode 100644 index 000000000000..0418b146247f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SCU_PCI @@ -0,0 +1 @@ +# CONFIG_INTEL_SCU_PCI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SCU_PLATFORM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SCU_PLATFORM new file mode 100644 index 000000000000..18641bd2d7d0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SCU_PLATFORM @@ -0,0 +1 @@ +# CONFIG_INTEL_SCU_PLATFORM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SDSI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SDSI new file mode 100644 index 000000000000..2e3194ab0130 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SDSI @@ -0,0 +1 @@ +# CONFIG_INTEL_SDSI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SMARTCONNECT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SMARTCONNECT new file mode 100644 index 000000000000..f9966a58b5dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SMARTCONNECT @@ -0,0 +1 @@ +# CONFIG_INTEL_SMARTCONNECT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SOC_DTS_IOSF_CORE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SOC_DTS_IOSF_CORE new file mode 100644 index 000000000000..783efbc28d12 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SOC_DTS_IOSF_CORE @@ -0,0 +1 @@ +CONFIG_INTEL_SOC_DTS_IOSF_CORE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SOC_DTS_THERMAL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SOC_DTS_THERMAL new file mode 100644 index 000000000000..a9e9470522cf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_SOC_DTS_THERMAL @@ -0,0 +1 @@ +# CONFIG_INTEL_SOC_DTS_THERMAL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_TCC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_TCC new file mode 100644 index 000000000000..c994c6ea735c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_TCC @@ -0,0 +1 @@ +CONFIG_INTEL_TCC=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_TCC_COOLING b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_TCC_COOLING new file mode 100644 index 000000000000..c64c52e1bde4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_TCC_COOLING @@ -0,0 +1 @@ +# CONFIG_INTEL_TCC_COOLING is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_UNCORE_FREQ_CONTROL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_UNCORE_FREQ_CONTROL new file mode 100644 index 000000000000..671fe025d6f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_UNCORE_FREQ_CONTROL @@ -0,0 +1 @@ +# CONFIG_INTEL_UNCORE_FREQ_CONTROL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_VBTN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_VBTN new file mode 100644 index 000000000000..62234540ab5a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_VBTN @@ -0,0 +1 @@ +CONFIG_INTEL_VBTN=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_WMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_WMI new file mode 100644 index 000000000000..d9b411a9ced3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_WMI @@ -0,0 +1 @@ +CONFIG_INTEL_WMI=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_WMI_SBL_FW_UPDATE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_WMI_SBL_FW_UPDATE new file mode 100644 index 000000000000..f3b82a785560 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_WMI_SBL_FW_UPDATE @@ -0,0 +1 @@ +# CONFIG_INTEL_WMI_SBL_FW_UPDATE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_WMI_THUNDERBOLT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_WMI_THUNDERBOLT new file mode 100644 index 000000000000..05356f742f40 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_WMI_THUNDERBOLT @@ -0,0 +1 @@ +CONFIG_INTEL_WMI_THUNDERBOLT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTERVAL_TREE_SPAN_ITER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTERVAL_TREE_SPAN_ITER new file mode 100644 index 000000000000..674eb3d2aa06 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTERVAL_TREE_SPAN_ITER @@ -0,0 +1 @@ +CONFIG_INTERVAL_TREE_SPAN_ITER=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INV_ICM42600_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INV_ICM42600_I2C new file mode 100644 index 000000000000..19a326e2067b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INV_ICM42600_I2C @@ -0,0 +1 @@ +# CONFIG_INV_ICM42600_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INV_ICM42600_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INV_ICM42600_SPI new file mode 100644 index 000000000000..610c7e059403 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INV_ICM42600_SPI @@ -0,0 +1 @@ +# CONFIG_INV_ICM42600_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INV_MPU6050_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INV_MPU6050_I2C new file mode 100644 index 000000000000..75930df434a8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INV_MPU6050_I2C @@ -0,0 +1 @@ +# CONFIG_INV_MPU6050_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INV_MPU6050_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INV_MPU6050_SPI new file mode 100644 index 000000000000..84886298913f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INV_MPU6050_SPI @@ -0,0 +1 @@ +# CONFIG_INV_MPU6050_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IOMMUFD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IOMMUFD new file mode 100644 index 000000000000..9e5b98c5119c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IOMMUFD @@ -0,0 +1 @@ +CONFIG_IOMMUFD=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IOMMUFD_DRIVER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IOMMUFD_DRIVER new file mode 100644 index 000000000000..14ac2301d0ee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IOMMUFD_DRIVER @@ -0,0 +1 @@ +CONFIG_IOMMUFD_DRIVER=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IOSF_MBI_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IOSF_MBI_DEBUG new file mode 100644 index 000000000000..ecc6275ab1d1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IOSF_MBI_DEBUG @@ -0,0 +1 @@ +# CONFIG_IOSF_MBI_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IO_DELAY_0XED b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IO_DELAY_0XED new file mode 100644 index 000000000000..07499527fa63 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IO_DELAY_0XED @@ -0,0 +1 @@ +# CONFIG_IO_DELAY_0XED is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IO_DELAY_NONE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IO_DELAY_NONE new file mode 100644 index 000000000000..36604953911f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IO_DELAY_NONE @@ -0,0 +1 @@ +# CONFIG_IO_DELAY_NONE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IO_DELAY_UDELAY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IO_DELAY_UDELAY new file mode 100644 index 000000000000..968788e0ea0a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IO_DELAY_UDELAY @@ -0,0 +1 @@ +# CONFIG_IO_DELAY_UDELAY is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IPU_BRIDGE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IPU_BRIDGE new file mode 100644 index 000000000000..90e15a7d1760 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IPU_BRIDGE @@ -0,0 +1 @@ +# CONFIG_IPU_BRIDGE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IPW2100 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IPW2100 new file mode 100644 index 000000000000..b9f94f283577 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IPW2100 @@ -0,0 +1 @@ +# CONFIG_IPW2100 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IPW2200 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IPW2200 new file mode 100644 index 000000000000..d1ad73474931 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IPW2200 @@ -0,0 +1 @@ +# CONFIG_IPW2200 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IRQ_BYPASS_MANAGER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IRQ_BYPASS_MANAGER new file mode 100644 index 000000000000..c7d0aceb3588 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IRQ_BYPASS_MANAGER @@ -0,0 +1 @@ +CONFIG_IRQ_BYPASS_MANAGER=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IRSD200 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IRSD200 new file mode 100644 index 000000000000..834da272499c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IRSD200 @@ -0,0 +1 @@ +# CONFIG_IRSD200 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_ENE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_ENE new file mode 100644 index 000000000000..eff4710c000e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_ENE @@ -0,0 +1 @@ +CONFIG_IR_ENE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_FINTEK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_FINTEK new file mode 100644 index 000000000000..d0eedcce1e15 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_FINTEK @@ -0,0 +1 @@ +CONFIG_IR_FINTEK=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IGORPLUGUSB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IGORPLUGUSB new file mode 100644 index 000000000000..ce19e31eff5c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IGORPLUGUSB @@ -0,0 +1 @@ +# CONFIG_IR_IGORPLUGUSB is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IGUANA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IGUANA new file mode 100644 index 000000000000..f07db57bda63 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IGUANA @@ -0,0 +1 @@ +CONFIG_IR_IGUANA=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IMON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IMON new file mode 100644 index 000000000000..6cf331742ab5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IMON @@ -0,0 +1 @@ +CONFIG_IR_IMON=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IMON_DECODER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IMON_DECODER new file mode 100644 index 000000000000..0a129a166896 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IMON_DECODER @@ -0,0 +1 @@ +CONFIG_IR_IMON_DECODER=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IMON_RAW b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IMON_RAW new file mode 100644 index 000000000000..d7a8ae566ff0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_IMON_RAW @@ -0,0 +1 @@ +CONFIG_IR_IMON_RAW=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_ITE_CIR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_ITE_CIR new file mode 100644 index 000000000000..8bf06f3c9296 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_ITE_CIR @@ -0,0 +1 @@ +CONFIG_IR_ITE_CIR=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_JVC_DECODER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_JVC_DECODER new file mode 100644 index 000000000000..0dad4b65e023 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_JVC_DECODER @@ -0,0 +1 @@ +CONFIG_IR_JVC_DECODER=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_MCEUSB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_MCEUSB new file mode 100644 index 000000000000..d569f0985635 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_MCEUSB @@ -0,0 +1 @@ +CONFIG_IR_MCEUSB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_MCE_KBD_DECODER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_MCE_KBD_DECODER new file mode 100644 index 000000000000..60bd7c962966 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_MCE_KBD_DECODER @@ -0,0 +1 @@ +CONFIG_IR_MCE_KBD_DECODER=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_NEC_DECODER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_NEC_DECODER new file mode 100644 index 000000000000..0d4e6b3466dc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_NEC_DECODER @@ -0,0 +1 @@ +CONFIG_IR_NEC_DECODER=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_NUVOTON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_NUVOTON new file mode 100644 index 000000000000..667c8cea6c22 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_NUVOTON @@ -0,0 +1 @@ +CONFIG_IR_NUVOTON=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_RC5_DECODER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_RC5_DECODER new file mode 100644 index 000000000000..ef01aaedab36 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_RC5_DECODER @@ -0,0 +1 @@ +CONFIG_IR_RC5_DECODER=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_RC6_DECODER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_RC6_DECODER new file mode 100644 index 000000000000..7b02e694ae07 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_RC6_DECODER @@ -0,0 +1 @@ +CONFIG_IR_RC6_DECODER=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_RCMM_DECODER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_RCMM_DECODER new file mode 100644 index 000000000000..8393c01f6484 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_RCMM_DECODER @@ -0,0 +1 @@ +# CONFIG_IR_RCMM_DECODER is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_REDRAT3 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_REDRAT3 new file mode 100644 index 000000000000..a09fb6b6d1d9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_REDRAT3 @@ -0,0 +1 @@ +CONFIG_IR_REDRAT3=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_SANYO_DECODER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_SANYO_DECODER new file mode 100644 index 000000000000..26864c7d8384 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_SANYO_DECODER @@ -0,0 +1 @@ +CONFIG_IR_SANYO_DECODER=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_SERIAL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_SERIAL new file mode 100644 index 000000000000..53c7f1d81e8e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_SERIAL @@ -0,0 +1 @@ +CONFIG_IR_SERIAL=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_SONY_DECODER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_SONY_DECODER new file mode 100644 index 000000000000..0ee42a3dbce0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_SONY_DECODER @@ -0,0 +1 @@ +CONFIG_IR_SONY_DECODER=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_STREAMZAP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_STREAMZAP new file mode 100644 index 000000000000..8a21614508cd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_STREAMZAP @@ -0,0 +1 @@ +CONFIG_IR_STREAMZAP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_TOY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_TOY new file mode 100644 index 000000000000..3993b5b17f2f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_TOY @@ -0,0 +1 @@ +# CONFIG_IR_TOY is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_TTUSBIR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_TTUSBIR new file mode 100644 index 000000000000..d18284cc8c8a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_TTUSBIR @@ -0,0 +1 @@ +CONFIG_IR_TTUSBIR=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_WINBOND_CIR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_WINBOND_CIR new file mode 100644 index 000000000000..a8e5cd5e7c7e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IR_WINBOND_CIR @@ -0,0 +1 @@ +CONFIG_IR_WINBOND_CIR=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISDN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISDN new file mode 100644 index 000000000000..faddbf9dd49d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISDN @@ -0,0 +1 @@ +CONFIG_ISDN=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISDN_CAPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISDN_CAPI new file mode 100644 index 000000000000..b36fbdb13e9a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISDN_CAPI @@ -0,0 +1 @@ +CONFIG_ISDN_CAPI=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISDN_CAPI_MIDDLEWARE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISDN_CAPI_MIDDLEWARE new file mode 100644 index 000000000000..759a175f1f71 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISDN_CAPI_MIDDLEWARE @@ -0,0 +1 @@ +CONFIG_ISDN_CAPI_MIDDLEWARE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISL29003 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISL29003 new file mode 100644 index 000000000000..106e426e44d0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISL29003 @@ -0,0 +1 @@ +CONFIG_ISL29003=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISL29020 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISL29020 new file mode 100644 index 000000000000..a441bf00f776 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISL29020 @@ -0,0 +1 @@ +CONFIG_ISL29020=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISL29125 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISL29125 new file mode 100644 index 000000000000..53ed90a59b39 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISL29125 @@ -0,0 +1 @@ +# CONFIG_ISL29125 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISL29501 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISL29501 new file mode 100644 index 000000000000..65b1e2073232 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ISL29501 @@ -0,0 +1 @@ +# CONFIG_ISL29501 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IT8712F_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IT8712F_WDT new file mode 100644 index 000000000000..0b3b587bd3ab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IT8712F_WDT @@ -0,0 +1 @@ +CONFIG_IT8712F_WDT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IT87_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IT87_WDT new file mode 100644 index 000000000000..7c21af517021 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IT87_WDT @@ -0,0 +1 @@ +CONFIG_IT87_WDT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ITG3200 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ITG3200 new file mode 100644 index 000000000000..3ce7a97af61f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ITG3200 @@ -0,0 +1 @@ +# CONFIG_ITG3200 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWL3945 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWL3945 new file mode 100644 index 000000000000..488a21b4b9ec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWL3945 @@ -0,0 +1 @@ +# CONFIG_IWL3945 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWL4965 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWL4965 new file mode 100644 index 000000000000..602216386ff1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWL4965 @@ -0,0 +1 @@ +# CONFIG_IWL4965 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLDVM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLDVM new file mode 100644 index 000000000000..3e6af879094e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLDVM @@ -0,0 +1 @@ +CONFIG_IWLDVM=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLMVM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLMVM new file mode 100644 index 000000000000..c22d966fa17b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLMVM @@ -0,0 +1 @@ +CONFIG_IWLMVM=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI new file mode 100644 index 000000000000..485244aec328 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI @@ -0,0 +1 @@ +CONFIG_IWLWIFI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_DEBUG new file mode 100644 index 000000000000..c646f91ad868 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_DEBUG @@ -0,0 +1 @@ +# CONFIG_IWLWIFI_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_DEBUGFS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_DEBUGFS new file mode 100644 index 000000000000..2429b837bef5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_DEBUGFS @@ -0,0 +1 @@ +CONFIG_IWLWIFI_DEBUGFS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_DEVICE_TRACING b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_DEVICE_TRACING new file mode 100644 index 000000000000..31442f8985c4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_DEVICE_TRACING @@ -0,0 +1 @@ +# CONFIG_IWLWIFI_DEVICE_TRACING is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_LEDS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_LEDS new file mode 100644 index 000000000000..0135ed037e7c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_LEDS @@ -0,0 +1 @@ +CONFIG_IWLWIFI_LEDS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_OPMODE_MODULAR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_OPMODE_MODULAR new file mode 100644 index 000000000000..7af1ca93eb45 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IWLWIFI_OPMODE_MODULAR @@ -0,0 +1 @@ +CONFIG_IWLWIFI_OPMODE_MODULAR=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_JAILHOUSE_GUEST b/anolis/configs/L2-OPTIONAL/x86/CONFIG_JAILHOUSE_GUEST new file mode 100644 index 000000000000..aa93b53af4e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_JAILHOUSE_GUEST @@ -0,0 +1 @@ +# CONFIG_JAILHOUSE_GUEST is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_JSA1212 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_JSA1212 new file mode 100644 index 000000000000..2b65eeb385cd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_JSA1212 @@ -0,0 +1 @@ +# CONFIG_JSA1212 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_KALLSYMS_ABSOLUTE_PERCPU b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KALLSYMS_ABSOLUTE_PERCPU new file mode 100644 index 000000000000..decb2f62f288 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KALLSYMS_ABSOLUTE_PERCPU @@ -0,0 +1 @@ +CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_KARMA_PARTITION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KARMA_PARTITION new file mode 100644 index 000000000000..32e78cf3929c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KARMA_PARTITION @@ -0,0 +1 @@ +CONFIG_KARMA_PARTITION=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_KEYBOARD_APPLESPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KEYBOARD_APPLESPI new file mode 100644 index 000000000000..88d59d0cd42a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KEYBOARD_APPLESPI @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_APPLESPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_KEYBOARD_GPIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KEYBOARD_GPIO new file mode 100644 index 000000000000..d31079cd6b1a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KEYBOARD_GPIO @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_KMX61 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KMX61 new file mode 100644 index 000000000000..5e4362ad9b3c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KMX61 @@ -0,0 +1 @@ +# CONFIG_KMX61 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_KRETPROBE_ON_RETHOOK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KRETPROBE_ON_RETHOOK new file mode 100644 index 000000000000..b29d2d70a332 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KRETPROBE_ON_RETHOOK @@ -0,0 +1 @@ +CONFIG_KRETPROBE_ON_RETHOOK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_ASYNC_PF b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_ASYNC_PF new file mode 100644 index 000000000000..fc0160b92ab6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_ASYNC_PF @@ -0,0 +1 @@ +CONFIG_KVM_ASYNC_PF=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_COMPAT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_COMPAT new file mode 100644 index 000000000000..0a783f2dfd69 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_COMPAT @@ -0,0 +1 @@ +CONFIG_KVM_COMPAT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_EXTERNAL_WRITE_TRACKING b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_EXTERNAL_WRITE_TRACKING new file mode 100644 index 000000000000..483053d53556 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_EXTERNAL_WRITE_TRACKING @@ -0,0 +1 @@ +CONFIG_KVM_EXTERNAL_WRITE_TRACKING=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_SMM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_SMM new file mode 100644 index 000000000000..db0ae8a99e90 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_SMM @@ -0,0 +1 @@ +CONFIG_KVM_SMM=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_XEN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_XEN new file mode 100644 index 000000000000..de03d1010a4b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KVM_XEN @@ -0,0 +1 @@ +# CONFIG_KVM_XEN is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_KXCJK1013 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KXCJK1013 new file mode 100644 index 000000000000..0aa734f18e46 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KXCJK1013 @@ -0,0 +1 @@ +# CONFIG_KXCJK1013 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_KXSD9 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KXSD9 new file mode 100644 index 000000000000..ea0cfd36c587 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_KXSD9 @@ -0,0 +1 @@ +# CONFIG_KXSD9 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_APU b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_APU new file mode 100644 index 000000000000..571c04c1ce2f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_APU @@ -0,0 +1 @@ +# CONFIG_LEDS_APU is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_CLASS_FLASH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_CLASS_FLASH new file mode 100644 index 000000000000..feacc7f5659d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_CLASS_FLASH @@ -0,0 +1 @@ +# CONFIG_LEDS_CLASS_FLASH is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_INTEL_SS4200 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_INTEL_SS4200 new file mode 100644 index 000000000000..9937903af02d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_INTEL_SS4200 @@ -0,0 +1 @@ +CONFIG_LEDS_INTEL_SS4200=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_LT3593 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_LT3593 new file mode 100644 index 000000000000..f0784c9024d6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_LT3593 @@ -0,0 +1 @@ +# CONFIG_LEDS_LT3593 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_MLXCPLD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_MLXCPLD new file mode 100644 index 000000000000..0bef0da2f826 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_MLXCPLD @@ -0,0 +1 @@ +CONFIG_LEDS_MLXCPLD=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_NIC78BX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_NIC78BX new file mode 100644 index 000000000000..2ac0c1c812a9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_NIC78BX @@ -0,0 +1 @@ +# CONFIG_LEDS_NIC78BX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_TRIGGER_AUDIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_TRIGGER_AUDIO new file mode 100644 index 000000000000..bd45e0d885c1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_TRIGGER_AUDIO @@ -0,0 +1 @@ +CONFIG_LEDS_TRIGGER_AUDIO=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_TRIGGER_DISK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_TRIGGER_DISK new file mode 100644 index 000000000000..65230ad6b1b5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEDS_TRIGGER_DISK @@ -0,0 +1 @@ +CONFIG_LEDS_TRIGGER_DISK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEGACY_VSYSCALL_XONLY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEGACY_VSYSCALL_XONLY new file mode 100644 index 000000000000..65d6a69e8c1d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LEGACY_VSYSCALL_XONLY @@ -0,0 +1 @@ +CONFIG_LEGACY_VSYSCALL_XONLY=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LENOVO_YMC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LENOVO_YMC new file mode 100644 index 000000000000..807ab67f1558 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LENOVO_YMC @@ -0,0 +1 @@ +# CONFIG_LENOVO_YMC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LG_LAPTOP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LG_LAPTOP new file mode 100644 index 000000000000..b08108d5adfb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LG_LAPTOP @@ -0,0 +1 @@ +# CONFIG_LG_LAPTOP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LIBERTAS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LIBERTAS new file mode 100644 index 000000000000..9f51a6c121ab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LIBERTAS @@ -0,0 +1 @@ +# CONFIG_LIBERTAS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LIBERTAS_THINFIRM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LIBERTAS_THINFIRM new file mode 100644 index 000000000000..7e611e155629 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LIBERTAS_THINFIRM @@ -0,0 +1 @@ +# CONFIG_LIBERTAS_THINFIRM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LIBNVDIMM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LIBNVDIMM new file mode 100644 index 000000000000..f773300dc43f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LIBNVDIMM @@ -0,0 +1 @@ +CONFIG_LIBNVDIMM=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LIDAR_LITE_V2 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LIDAR_LITE_V2 new file mode 100644 index 000000000000..731a434426c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LIDAR_LITE_V2 @@ -0,0 +1 @@ +# CONFIG_LIDAR_LITE_V2 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LIRC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LIRC new file mode 100644 index 000000000000..bc883983d070 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LIRC @@ -0,0 +1 @@ +# CONFIG_LIRC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LMP91000 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LMP91000 new file mode 100644 index 000000000000..aa44e35dfb35 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LMP91000 @@ -0,0 +1 @@ +# CONFIG_LMP91000 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LPC_ICH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LPC_ICH new file mode 100644 index 000000000000..0348d015ec04 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LPC_ICH @@ -0,0 +1 @@ +CONFIG_LPC_ICH=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LPC_SCH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LPC_SCH new file mode 100644 index 000000000000..5d8664a8a738 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LPC_SCH @@ -0,0 +1 @@ +CONFIG_LPC_SCH=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LP_CONSOLE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LP_CONSOLE new file mode 100644 index 000000000000..1d8b49586404 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LP_CONSOLE @@ -0,0 +1 @@ +# CONFIG_LP_CONSOLE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC1660 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC1660 new file mode 100644 index 000000000000..89f64c1aa48f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC1660 @@ -0,0 +1 @@ +# CONFIG_LTC1660 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2471 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2471 new file mode 100644 index 000000000000..5d272ac29bdb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2471 @@ -0,0 +1 @@ +# CONFIG_LTC2471 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2485 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2485 new file mode 100644 index 000000000000..09a531e8b6d8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2485 @@ -0,0 +1 @@ +# CONFIG_LTC2485 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2496 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2496 new file mode 100644 index 000000000000..b63c5163ce7a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2496 @@ -0,0 +1 @@ +# CONFIG_LTC2496 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2497 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2497 new file mode 100644 index 000000000000..312f3db17f39 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2497 @@ -0,0 +1 @@ +# CONFIG_LTC2497 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2632 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2632 new file mode 100644 index 000000000000..8bc2b8bc435c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2632 @@ -0,0 +1 @@ +# CONFIG_LTC2632 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2688 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2688 new file mode 100644 index 000000000000..115e34b20604 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2688 @@ -0,0 +1 @@ +# CONFIG_LTC2688 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2983 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2983 new file mode 100644 index 000000000000..170afd86b744 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTC2983 @@ -0,0 +1 @@ +# CONFIG_LTC2983 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTR501 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTR501 new file mode 100644 index 000000000000..04512139d893 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTR501 @@ -0,0 +1 @@ +# CONFIG_LTR501 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTRF216A b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTRF216A new file mode 100644 index 000000000000..e57ef9dc388b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LTRF216A @@ -0,0 +1 @@ +# CONFIG_LTRF216A is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_LV0104CS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LV0104CS new file mode 100644 index 000000000000..5e71853613f5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_LV0104CS @@ -0,0 +1 @@ +# CONFIG_LV0104CS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_M62332 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_M62332 new file mode 100644 index 000000000000..5b8b668eca85 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_M62332 @@ -0,0 +1 @@ +# CONFIG_M62332 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAC80211_HWSIM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAC80211_HWSIM new file mode 100644 index 000000000000..274bf1d929ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAC80211_HWSIM @@ -0,0 +1 @@ +CONFIG_MAC80211_HWSIM=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MACHZ_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MACHZ_WDT new file mode 100644 index 000000000000..554b4e91aaa2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MACHZ_WDT @@ -0,0 +1 @@ +CONFIG_MACHZ_WDT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MACINTOSH_DRIVERS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MACINTOSH_DRIVERS new file mode 100644 index 000000000000..105442f402c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MACINTOSH_DRIVERS @@ -0,0 +1 @@ +CONFIG_MACINTOSH_DRIVERS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAC_EMUMOUSEBTN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAC_EMUMOUSEBTN new file mode 100644 index 000000000000..76b30caa5fb9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAC_EMUMOUSEBTN @@ -0,0 +1 @@ +CONFIG_MAC_EMUMOUSEBTN=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAC_PARTITION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAC_PARTITION new file mode 100644 index 000000000000..66fca5aefbe2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAC_PARTITION @@ -0,0 +1 @@ +CONFIG_MAC_PARTITION=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAG3110 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAG3110 new file mode 100644 index 000000000000..2db4f0a85137 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAG3110 @@ -0,0 +1 @@ +# CONFIG_MAG3110 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MATOM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MATOM new file mode 100644 index 000000000000..d80376d42691 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MATOM @@ -0,0 +1 @@ +# CONFIG_MATOM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX1027 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX1027 new file mode 100644 index 000000000000..579a537aa6f4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX1027 @@ -0,0 +1 @@ +# CONFIG_MAX1027 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX11100 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX11100 new file mode 100644 index 000000000000..90819abc810f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX11100 @@ -0,0 +1 @@ +# CONFIG_MAX11100 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX1118 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX1118 new file mode 100644 index 000000000000..615bda2e2325 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX1118 @@ -0,0 +1 @@ +# CONFIG_MAX1118 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX11205 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX11205 new file mode 100644 index 000000000000..c9d46afb3f93 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX11205 @@ -0,0 +1 @@ +# CONFIG_MAX11205 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX11410 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX11410 new file mode 100644 index 000000000000..91912eb0263c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX11410 @@ -0,0 +1 @@ +# CONFIG_MAX11410 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX1241 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX1241 new file mode 100644 index 000000000000..813c3f2cd883 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX1241 @@ -0,0 +1 @@ +# CONFIG_MAX1241 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX1363 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX1363 new file mode 100644 index 000000000000..d0090112ccab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX1363 @@ -0,0 +1 @@ +# CONFIG_MAX1363 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX30100 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX30100 new file mode 100644 index 000000000000..04886e68f320 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX30100 @@ -0,0 +1 @@ +# CONFIG_MAX30100 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX30102 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX30102 new file mode 100644 index 000000000000..5b4aacf3d603 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX30102 @@ -0,0 +1 @@ +# CONFIG_MAX30102 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX30208 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX30208 new file mode 100644 index 000000000000..e324af9d60ea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX30208 @@ -0,0 +1 @@ +# CONFIG_MAX30208 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX31856 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX31856 new file mode 100644 index 000000000000..9152f83b6a41 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX31856 @@ -0,0 +1 @@ +# CONFIG_MAX31856 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX31865 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX31865 new file mode 100644 index 000000000000..ae3c7ebbb7ec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX31865 @@ -0,0 +1 @@ +# CONFIG_MAX31865 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX44000 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX44000 new file mode 100644 index 000000000000..d41f59c30a90 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX44000 @@ -0,0 +1 @@ +# CONFIG_MAX44000 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX44009 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX44009 new file mode 100644 index 000000000000..8a008ed653d0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX44009 @@ -0,0 +1 @@ +# CONFIG_MAX44009 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX517 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX517 new file mode 100644 index 000000000000..5b3bda9c0a6d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX517 @@ -0,0 +1 @@ +# CONFIG_MAX517 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5432 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5432 new file mode 100644 index 000000000000..b6ea28de1bac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5432 @@ -0,0 +1 @@ +# CONFIG_MAX5432 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5481 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5481 new file mode 100644 index 000000000000..36b3781d602c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5481 @@ -0,0 +1 @@ +# CONFIG_MAX5481 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5487 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5487 new file mode 100644 index 000000000000..4ac669fe238a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5487 @@ -0,0 +1 @@ +# CONFIG_MAX5487 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5522 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5522 new file mode 100644 index 000000000000..235413f25805 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5522 @@ -0,0 +1 @@ +# CONFIG_MAX5522 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5821 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5821 new file mode 100644 index 000000000000..a308172ed055 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX5821 @@ -0,0 +1 @@ +# CONFIG_MAX5821 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX9611 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX9611 new file mode 100644 index 000000000000..1cbc674e0c6a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAX9611 @@ -0,0 +1 @@ +# CONFIG_MAX9611 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAXIM_THERMOCOUPLE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAXIM_THERMOCOUPLE new file mode 100644 index 000000000000..442d4efa6f3e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MAXIM_THERMOCOUPLE @@ -0,0 +1 @@ +# CONFIG_MAXIM_THERMOCOUPLE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MB1232 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MB1232 new file mode 100644 index 000000000000..522f4becb46b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MB1232 @@ -0,0 +1 @@ +# CONFIG_MB1232 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MC3230 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MC3230 new file mode 100644 index 000000000000..e06bd44e29d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MC3230 @@ -0,0 +1 @@ +# CONFIG_MC3230 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP320X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP320X new file mode 100644 index 000000000000..da2f99aaf256 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP320X @@ -0,0 +1 @@ +# CONFIG_MCP320X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP3422 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP3422 new file mode 100644 index 000000000000..0f00863b4f3c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP3422 @@ -0,0 +1 @@ +# CONFIG_MCP3422 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP3911 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP3911 new file mode 100644 index 000000000000..2f846d694559 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP3911 @@ -0,0 +1 @@ +# CONFIG_MCP3911 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4018 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4018 new file mode 100644 index 000000000000..5eb63131176f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4018 @@ -0,0 +1 @@ +# CONFIG_MCP4018 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP41010 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP41010 new file mode 100644 index 000000000000..a1a1bfc15678 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP41010 @@ -0,0 +1 @@ +# CONFIG_MCP41010 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4131 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4131 new file mode 100644 index 000000000000..62d46e619224 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4131 @@ -0,0 +1 @@ +# CONFIG_MCP4131 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4531 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4531 new file mode 100644 index 000000000000..cd27ab52132c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4531 @@ -0,0 +1 @@ +# CONFIG_MCP4531 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4725 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4725 new file mode 100644 index 000000000000..c88886b21911 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4725 @@ -0,0 +1 @@ +# CONFIG_MCP4725 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4728 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4728 new file mode 100644 index 000000000000..59a53b7e1bdc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4728 @@ -0,0 +1 @@ +# CONFIG_MCP4728 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4922 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4922 new file mode 100644 index 000000000000..a78c7a7d1246 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MCP4922 @@ -0,0 +1 @@ +# CONFIG_MCP4922 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MDIO_GPIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MDIO_GPIO new file mode 100644 index 000000000000..9ae9783563da --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MDIO_GPIO @@ -0,0 +1 @@ +# CONFIG_MDIO_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_ANALOG_TV_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_ANALOG_TV_SUPPORT new file mode 100644 index 000000000000..48afe48b8f63 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_ANALOG_TV_SUPPORT @@ -0,0 +1 @@ +# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_CAMERA_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_CAMERA_SUPPORT new file mode 100644 index 000000000000..7b16f1e62723 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_CAMERA_SUPPORT @@ -0,0 +1 @@ +# CONFIG_MEDIA_CAMERA_SUPPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_CEC_RC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_CEC_RC new file mode 100644 index 000000000000..93bb282f05df --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_CEC_RC @@ -0,0 +1 @@ +# CONFIG_MEDIA_CEC_RC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_CEC_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_CEC_SUPPORT new file mode 100644 index 000000000000..c23046c74f96 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_CEC_SUPPORT @@ -0,0 +1 @@ +CONFIG_MEDIA_CEC_SUPPORT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_DIGITAL_TV_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_DIGITAL_TV_SUPPORT new file mode 100644 index 000000000000..fbaf5a88de56 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_DIGITAL_TV_SUPPORT @@ -0,0 +1 @@ +# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_HIDE_ANCILLARY_SUBDRV b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_HIDE_ANCILLARY_SUBDRV new file mode 100644 index 000000000000..ba3e5ca61c3a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_HIDE_ANCILLARY_SUBDRV @@ -0,0 +1 @@ +CONFIG_MEDIA_HIDE_ANCILLARY_SUBDRV=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_PCI_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_PCI_SUPPORT new file mode 100644 index 000000000000..74a0751574ab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_PCI_SUPPORT @@ -0,0 +1 @@ +CONFIG_MEDIA_PCI_SUPPORT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_PLATFORM_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_PLATFORM_SUPPORT new file mode 100644 index 000000000000..bae8cf596e81 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_PLATFORM_SUPPORT @@ -0,0 +1 @@ +# CONFIG_MEDIA_PLATFORM_SUPPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_RADIO_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_RADIO_SUPPORT new file mode 100644 index 000000000000..25cbb953d8d4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_RADIO_SUPPORT @@ -0,0 +1 @@ +# CONFIG_MEDIA_RADIO_SUPPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_SDR_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_SDR_SUPPORT new file mode 100644 index 000000000000..e75136efaeef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_SDR_SUPPORT @@ -0,0 +1 @@ +# CONFIG_MEDIA_SDR_SUPPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_SUBDRV_AUTOSELECT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_SUBDRV_AUTOSELECT new file mode 100644 index 000000000000..096cc99ca22f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_SUBDRV_AUTOSELECT @@ -0,0 +1 @@ +CONFIG_MEDIA_SUBDRV_AUTOSELECT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_SUPPORT new file mode 100644 index 000000000000..518972fe85d6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_SUPPORT @@ -0,0 +1 @@ +CONFIG_MEDIA_SUPPORT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_SUPPORT_FILTER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_SUPPORT_FILTER new file mode 100644 index 000000000000..2e98c59914ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_SUPPORT_FILTER @@ -0,0 +1 @@ +CONFIG_MEDIA_SUPPORT_FILTER=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_TEST_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_TEST_SUPPORT new file mode 100644 index 000000000000..da6e02be45a2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_TEST_SUPPORT @@ -0,0 +1 @@ +# CONFIG_MEDIA_TEST_SUPPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_USB_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_USB_SUPPORT new file mode 100644 index 000000000000..49ea6a0ae7e4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEDIA_USB_SUPPORT @@ -0,0 +1 @@ +CONFIG_MEDIA_USB_SUPPORT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEMSTICK_REALTEK_PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEMSTICK_REALTEK_PCI new file mode 100644 index 000000000000..ad0265810c10 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEMSTICK_REALTEK_PCI @@ -0,0 +1 @@ +CONFIG_MEMSTICK_REALTEK_PCI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEMSTICK_REALTEK_USB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEMSTICK_REALTEK_USB new file mode 100644 index 000000000000..8792e797c800 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MEMSTICK_REALTEK_USB @@ -0,0 +1 @@ +CONFIG_MEMSTICK_REALTEK_USB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MERAKI_MX100 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MERAKI_MX100 new file mode 100644 index 000000000000..a6a10756af37 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MERAKI_MX100 @@ -0,0 +1 @@ +# CONFIG_MERAKI_MX100 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_CORE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_CORE new file mode 100644 index 000000000000..0282448d0276 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_CORE @@ -0,0 +1 @@ +CONFIG_MFD_CORE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_LPSS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_LPSS new file mode 100644 index 000000000000..f2e6de9b681d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_LPSS @@ -0,0 +1 @@ +CONFIG_MFD_INTEL_LPSS=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_LPSS_ACPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_LPSS_ACPI new file mode 100644 index 000000000000..7be3065fcac9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_LPSS_ACPI @@ -0,0 +1 @@ +CONFIG_MFD_INTEL_LPSS_ACPI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_LPSS_PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_LPSS_PCI new file mode 100644 index 000000000000..93a430846349 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_LPSS_PCI @@ -0,0 +1 @@ +CONFIG_MFD_INTEL_LPSS_PCI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_PMC_BXT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_PMC_BXT new file mode 100644 index 000000000000..d6d6d1a92d8e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_PMC_BXT @@ -0,0 +1 @@ +# CONFIG_MFD_INTEL_PMC_BXT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_QUARK_I2C_GPIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_QUARK_I2C_GPIO new file mode 100644 index 000000000000..e0e51d57709f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_INTEL_QUARK_I2C_GPIO @@ -0,0 +1 @@ +# CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_SM501 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_SM501 new file mode 100644 index 000000000000..d056f1aad7d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_SM501 @@ -0,0 +1 @@ +CONFIG_MFD_SM501=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_SM501_GPIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_SM501_GPIO new file mode 100644 index 000000000000..2fffb094aa5a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_SM501_GPIO @@ -0,0 +1 @@ +CONFIG_MFD_SM501_GPIO=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_SYSCON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_SYSCON new file mode 100644 index 000000000000..cab0ef0b79ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_SYSCON @@ -0,0 +1 @@ +# CONFIG_MFD_SYSCON is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_VIPERBOARD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_VIPERBOARD new file mode 100644 index 000000000000..1b26e164db46 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_VIPERBOARD @@ -0,0 +1 @@ +CONFIG_MFD_VIPERBOARD=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_VX855 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_VX855 new file mode 100644 index 000000000000..560a3284fd08 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MFD_VX855 @@ -0,0 +1 @@ +CONFIG_MFD_VX855=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MICREL_KS8995MA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MICREL_KS8995MA new file mode 100644 index 000000000000..70b35891446f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MICREL_KS8995MA @@ -0,0 +1 @@ +# CONFIG_MICREL_KS8995MA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MICROCODE_LATE_LOADING b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MICROCODE_LATE_LOADING new file mode 100644 index 000000000000..a064646b3431 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MICROCODE_LATE_LOADING @@ -0,0 +1 @@ +# CONFIG_MICROCODE_LATE_LOADING is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MICROSOFT_MANA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MICROSOFT_MANA new file mode 100644 index 000000000000..ad0b10509f92 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MICROSOFT_MANA @@ -0,0 +1 @@ +# CONFIG_MICROSOFT_MANA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MINIX_SUBPARTITION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MINIX_SUBPARTITION new file mode 100644 index 000000000000..65cb50d6b7da --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MINIX_SUBPARTITION @@ -0,0 +1 @@ +CONFIG_MINIX_SUBPARTITION=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISC_RTSX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISC_RTSX new file mode 100644 index 000000000000..246e4901febe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISC_RTSX @@ -0,0 +1 @@ +CONFIG_MISC_RTSX=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISC_RTSX_PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISC_RTSX_PCI new file mode 100644 index 000000000000..c05d0dda2c79 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISC_RTSX_PCI @@ -0,0 +1 @@ +CONFIG_MISC_RTSX_PCI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISC_RTSX_USB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISC_RTSX_USB new file mode 100644 index 000000000000..c22296e513de --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISC_RTSX_USB @@ -0,0 +1 @@ +CONFIG_MISC_RTSX_USB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN new file mode 100644 index 000000000000..b1a6775949da --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN @@ -0,0 +1 @@ +CONFIG_MISDN=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_AVMFRITZ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_AVMFRITZ new file mode 100644 index 000000000000..c461708cd5ab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_AVMFRITZ @@ -0,0 +1 @@ +CONFIG_MISDN_AVMFRITZ=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_DSP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_DSP new file mode 100644 index 000000000000..897e3c7ea838 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_DSP @@ -0,0 +1 @@ +CONFIG_MISDN_DSP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_HDLC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_HDLC new file mode 100644 index 000000000000..01eddd0cb515 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_HDLC @@ -0,0 +1 @@ +CONFIG_MISDN_HDLC=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_HFCMULTI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_HFCMULTI new file mode 100644 index 000000000000..61c2917c9e4b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_HFCMULTI @@ -0,0 +1 @@ +CONFIG_MISDN_HFCMULTI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_HFCPCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_HFCPCI new file mode 100644 index 000000000000..f9d759bf63fe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_HFCPCI @@ -0,0 +1 @@ +CONFIG_MISDN_HFCPCI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_HFCUSB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_HFCUSB new file mode 100644 index 000000000000..5bf22217bbc0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_HFCUSB @@ -0,0 +1 @@ +CONFIG_MISDN_HFCUSB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_INFINEON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_INFINEON new file mode 100644 index 000000000000..ccb4360c41b5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_INFINEON @@ -0,0 +1 @@ +CONFIG_MISDN_INFINEON=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_IPAC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_IPAC new file mode 100644 index 000000000000..6a53662ae67c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_IPAC @@ -0,0 +1 @@ +CONFIG_MISDN_IPAC=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_ISAR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_ISAR new file mode 100644 index 000000000000..566a8e8a4efb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_ISAR @@ -0,0 +1 @@ +CONFIG_MISDN_ISAR=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_L1OIP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_L1OIP new file mode 100644 index 000000000000..3cde6368dfe0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_L1OIP @@ -0,0 +1 @@ +CONFIG_MISDN_L1OIP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_NETJET b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_NETJET new file mode 100644 index 000000000000..379062a8b991 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_NETJET @@ -0,0 +1 @@ +CONFIG_MISDN_NETJET=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_SPEEDFAX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_SPEEDFAX new file mode 100644 index 000000000000..bd52af29d3b0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_SPEEDFAX @@ -0,0 +1 @@ +CONFIG_MISDN_SPEEDFAX=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_W6692 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_W6692 new file mode 100644 index 000000000000..9b0b1d892d39 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MISDN_W6692 @@ -0,0 +1 @@ +CONFIG_MISDN_W6692=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MK8 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MK8 new file mode 100644 index 000000000000..2f448241b46d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MK8 @@ -0,0 +1 @@ +# CONFIG_MK8 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MLX90614 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MLX90614 new file mode 100644 index 000000000000..886455c5d30d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MLX90614 @@ -0,0 +1 @@ +# CONFIG_MLX90614 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MLX90632 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MLX90632 new file mode 100644 index 000000000000..f82858631790 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MLX90632 @@ -0,0 +1 @@ +# CONFIG_MLX90632 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MLXREG_IO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MLXREG_IO new file mode 100644 index 000000000000..ce97d907dc59 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MLXREG_IO @@ -0,0 +1 @@ +# CONFIG_MLXREG_IO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MLXREG_LC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MLXREG_LC new file mode 100644 index 000000000000..456329a2c287 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MLXREG_LC @@ -0,0 +1 @@ +# CONFIG_MLXREG_LC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MLX_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MLX_WDT new file mode 100644 index 000000000000..68597aed6498 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MLX_WDT @@ -0,0 +1 @@ +# CONFIG_MLX_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA7455_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA7455_I2C new file mode 100644 index 000000000000..ba1a64705aa4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA7455_I2C @@ -0,0 +1 @@ +# CONFIG_MMA7455_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA7455_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA7455_SPI new file mode 100644 index 000000000000..5cb36e546b43 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA7455_SPI @@ -0,0 +1 @@ +# CONFIG_MMA7455_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA7660 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA7660 new file mode 100644 index 000000000000..f729209ea417 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA7660 @@ -0,0 +1 @@ +# CONFIG_MMA7660 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA8452 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA8452 new file mode 100644 index 000000000000..ef5caa02aee2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA8452 @@ -0,0 +1 @@ +# CONFIG_MMA8452 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA9551 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA9551 new file mode 100644 index 000000000000..116e8e246b24 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA9551 @@ -0,0 +1 @@ +# CONFIG_MMA9551 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA9553 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA9553 new file mode 100644 index 000000000000..a1a7373f27ef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMA9553 @@ -0,0 +1 @@ +# CONFIG_MMA9553 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC35240 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC35240 new file mode 100644 index 000000000000..9e1cf1c4bcac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC35240 @@ -0,0 +1 @@ +# CONFIG_MMC35240 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMCONF_FAM10H b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMCONF_FAM10H new file mode 100644 index 000000000000..8cf0a943fc05 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMCONF_FAM10H @@ -0,0 +1 @@ +CONFIG_MMCONF_FAM10H=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_MTK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_MTK new file mode 100644 index 000000000000..d92a9072c875 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_MTK @@ -0,0 +1 @@ +# CONFIG_MMC_MTK is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_REALTEK_PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_REALTEK_PCI new file mode 100644 index 000000000000..90687a235689 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_REALTEK_PCI @@ -0,0 +1 @@ +CONFIG_MMC_REALTEK_PCI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_REALTEK_USB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_REALTEK_USB new file mode 100644 index 000000000000..7268a312edce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_REALTEK_USB @@ -0,0 +1 @@ +CONFIG_MMC_REALTEK_USB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_TOSHIBA_PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_TOSHIBA_PCI new file mode 100644 index 000000000000..2ae27a7ef1b1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_TOSHIBA_PCI @@ -0,0 +1 @@ +# CONFIG_MMC_TOSHIBA_PCI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_WBSD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_WBSD new file mode 100644 index 000000000000..886f684b3b18 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMC_WBSD @@ -0,0 +1 @@ +# CONFIG_MMC_WBSD is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMIOTRACE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMIOTRACE new file mode 100644 index 000000000000..958d609b33c7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMIOTRACE @@ -0,0 +1 @@ +# CONFIG_MMIOTRACE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMU_GATHER_MERGE_VMAS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMU_GATHER_MERGE_VMAS new file mode 100644 index 000000000000..2219eddcd362 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MMU_GATHER_MERGE_VMAS @@ -0,0 +1 @@ +CONFIG_MMU_GATHER_MERGE_VMAS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_APPLETOUCH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_APPLETOUCH new file mode 100644 index 000000000000..2aa5a2df7651 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_APPLETOUCH @@ -0,0 +1 @@ +CONFIG_MOUSE_APPLETOUCH=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_BCM5974 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_BCM5974 new file mode 100644 index 000000000000..797189442ff1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_BCM5974 @@ -0,0 +1 @@ +CONFIG_MOUSE_BCM5974=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_CYAPA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_CYAPA new file mode 100644 index 000000000000..b0a9d858116b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_CYAPA @@ -0,0 +1 @@ +CONFIG_MOUSE_CYAPA=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_ELAN_I2C_SMBUS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_ELAN_I2C_SMBUS new file mode 100644 index 000000000000..f1268e0a84a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_ELAN_I2C_SMBUS @@ -0,0 +1 @@ +# CONFIG_MOUSE_ELAN_I2C_SMBUS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_ALPS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_ALPS new file mode 100644 index 000000000000..a05fe8474326 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_ALPS @@ -0,0 +1 @@ +CONFIG_MOUSE_PS2_ALPS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_BYD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_BYD new file mode 100644 index 000000000000..11d592e3cde6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_BYD @@ -0,0 +1 @@ +CONFIG_MOUSE_PS2_BYD=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_CYPRESS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_CYPRESS new file mode 100644 index 000000000000..794b2cd427b5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_CYPRESS @@ -0,0 +1 @@ +CONFIG_MOUSE_PS2_CYPRESS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_ELANTECH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_ELANTECH new file mode 100644 index 000000000000..15c77ec5c901 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_ELANTECH @@ -0,0 +1 @@ +CONFIG_MOUSE_PS2_ELANTECH=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_ELANTECH_SMBUS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_ELANTECH_SMBUS new file mode 100644 index 000000000000..1aaf34652a40 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_ELANTECH_SMBUS @@ -0,0 +1 @@ +CONFIG_MOUSE_PS2_ELANTECH_SMBUS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_FOCALTECH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_FOCALTECH new file mode 100644 index 000000000000..69ec66931721 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_FOCALTECH @@ -0,0 +1 @@ +CONFIG_MOUSE_PS2_FOCALTECH=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_LIFEBOOK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_LIFEBOOK new file mode 100644 index 000000000000..4b0746ac16f2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_LIFEBOOK @@ -0,0 +1 @@ +CONFIG_MOUSE_PS2_LIFEBOOK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_LOGIPS2PP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_LOGIPS2PP new file mode 100644 index 000000000000..9edffa280c71 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_LOGIPS2PP @@ -0,0 +1 @@ +CONFIG_MOUSE_PS2_LOGIPS2PP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_SENTELIC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_SENTELIC new file mode 100644 index 000000000000..14079ba62504 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_SENTELIC @@ -0,0 +1 @@ +CONFIG_MOUSE_PS2_SENTELIC=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_SMBUS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_SMBUS new file mode 100644 index 000000000000..5aa556188ac7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_SMBUS @@ -0,0 +1 @@ +CONFIG_MOUSE_PS2_SMBUS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_SYNAPTICS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_SYNAPTICS new file mode 100644 index 000000000000..2ca9a3519561 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_SYNAPTICS @@ -0,0 +1 @@ +CONFIG_MOUSE_PS2_SYNAPTICS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS new file mode 100644 index 000000000000..cf7fad209597 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS @@ -0,0 +1 @@ +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_TOUCHKIT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_TOUCHKIT new file mode 100644 index 000000000000..10b1f05ac290 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_TOUCHKIT @@ -0,0 +1 @@ +# CONFIG_MOUSE_PS2_TOUCHKIT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_TRACKPOINT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_TRACKPOINT new file mode 100644 index 000000000000..9316b0c6a3d3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_TRACKPOINT @@ -0,0 +1 @@ +CONFIG_MOUSE_PS2_TRACKPOINT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_VMMOUSE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_VMMOUSE new file mode 100644 index 000000000000..d3f895d3b629 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_VMMOUSE @@ -0,0 +1 @@ +CONFIG_MOUSE_PS2_VMMOUSE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_SERIAL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_SERIAL new file mode 100644 index 000000000000..b98a70fa60c2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_SERIAL @@ -0,0 +1 @@ +CONFIG_MOUSE_SERIAL=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_VSXXXAA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_VSXXXAA new file mode 100644 index 000000000000..1c962b695cd5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_VSXXXAA @@ -0,0 +1 @@ +CONFIG_MOUSE_VSXXXAA=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPL115_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPL115_I2C new file mode 100644 index 000000000000..627567714a6a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPL115_I2C @@ -0,0 +1 @@ +# CONFIG_MPL115_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPL115_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPL115_SPI new file mode 100644 index 000000000000..a4be2fc22f37 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPL115_SPI @@ -0,0 +1 @@ +# CONFIG_MPL115_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPL3115 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPL3115 new file mode 100644 index 000000000000..36eb331ab38f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPL3115 @@ -0,0 +1 @@ +# CONFIG_MPL3115 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPRLS0025PA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPRLS0025PA new file mode 100644 index 000000000000..449bd8b9fd23 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPRLS0025PA @@ -0,0 +1 @@ +# CONFIG_MPRLS0025PA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPSC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPSC new file mode 100644 index 000000000000..8993bc8237a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPSC @@ -0,0 +1 @@ +# CONFIG_MPSC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPU3050_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPU3050_I2C new file mode 100644 index 000000000000..92e6cbf51ba2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MPU3050_I2C @@ -0,0 +1 @@ +# CONFIG_MPU3050_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MS5611 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MS5611 new file mode 100644 index 000000000000..0bf3597059d6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MS5611 @@ -0,0 +1 @@ +# CONFIG_MS5611 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MS5637 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MS5637 new file mode 100644 index 000000000000..867af2e87ebd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MS5637 @@ -0,0 +1 @@ +# CONFIG_MS5637 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MSA311 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MSA311 new file mode 100644 index 000000000000..e74986a6b968 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MSA311 @@ -0,0 +1 @@ +# CONFIG_MSA311 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MSI_EC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MSI_EC new file mode 100644 index 000000000000..bc0e60f4d337 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MSI_EC @@ -0,0 +1 @@ +# CONFIG_MSI_EC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MSI_LAPTOP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MSI_LAPTOP new file mode 100644 index 000000000000..7d45b2e5e567 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MSI_LAPTOP @@ -0,0 +1 @@ +CONFIG_MSI_LAPTOP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MSI_WMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MSI_WMI new file mode 100644 index 000000000000..7949ac91b539 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MSI_WMI @@ -0,0 +1 @@ +CONFIG_MSI_WMI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7601U b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7601U new file mode 100644 index 000000000000..cd5368ee4259 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7601U @@ -0,0 +1 @@ +CONFIG_MT7601U=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7603E b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7603E new file mode 100644 index 000000000000..747b5499c7aa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7603E @@ -0,0 +1 @@ +# CONFIG_MT7603E is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7615E b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7615E new file mode 100644 index 000000000000..f3893bd9457b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7615E @@ -0,0 +1 @@ +# CONFIG_MT7615E is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7663S b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7663S new file mode 100644 index 000000000000..0597f0e96baa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7663S @@ -0,0 +1 @@ +# CONFIG_MT7663S is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7663U b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7663U new file mode 100644 index 000000000000..c4efff0c4a21 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7663U @@ -0,0 +1 @@ +# CONFIG_MT7663U is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76_CORE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76_CORE new file mode 100644 index 000000000000..3c4b786bce82 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76_CORE @@ -0,0 +1 @@ +CONFIG_MT76_CORE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76_LEDS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76_LEDS new file mode 100644 index 000000000000..38082977adbd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76_LEDS @@ -0,0 +1 @@ +CONFIG_MT76_LEDS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76_USB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76_USB new file mode 100644 index 000000000000..ed4e177e4de8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76_USB @@ -0,0 +1 @@ +CONFIG_MT76_USB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x02_LIB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x02_LIB new file mode 100644 index 000000000000..95bc13d243c1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x02_LIB @@ -0,0 +1 @@ +CONFIG_MT76x02_LIB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x02_USB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x02_USB new file mode 100644 index 000000000000..5c92059065ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x02_USB @@ -0,0 +1 @@ +CONFIG_MT76x02_USB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x0E b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x0E new file mode 100644 index 000000000000..21e07c8c4ea8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x0E @@ -0,0 +1 @@ +# CONFIG_MT76x0E is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x0U b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x0U new file mode 100644 index 000000000000..b7b54019634b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x0U @@ -0,0 +1 @@ +CONFIG_MT76x0U=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x0_COMMON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x0_COMMON new file mode 100644 index 000000000000..c8b6b3776e46 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x0_COMMON @@ -0,0 +1 @@ +CONFIG_MT76x0_COMMON=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x2E b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x2E new file mode 100644 index 000000000000..adc8b6205a83 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x2E @@ -0,0 +1 @@ +# CONFIG_MT76x2E is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x2U b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x2U new file mode 100644 index 000000000000..1749e54495ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x2U @@ -0,0 +1 @@ +CONFIG_MT76x2U=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x2_COMMON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x2_COMMON new file mode 100644 index 000000000000..6ce98efe7a35 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT76x2_COMMON @@ -0,0 +1 @@ +CONFIG_MT76x2_COMMON=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7915E b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7915E new file mode 100644 index 000000000000..3ad870e9f43b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7915E @@ -0,0 +1 @@ +# CONFIG_MT7915E is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7921E b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7921E new file mode 100644 index 000000000000..b530241f0632 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7921E @@ -0,0 +1 @@ +# CONFIG_MT7921E is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7921S b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7921S new file mode 100644 index 000000000000..b092cbf7bf52 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7921S @@ -0,0 +1 @@ +# CONFIG_MT7921S is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7921U b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7921U new file mode 100644 index 000000000000..85c2cfd81a08 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7921U @@ -0,0 +1 @@ +# CONFIG_MT7921U is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7996E b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7996E new file mode 100644 index 000000000000..f2e9da094b9d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MT7996E @@ -0,0 +1 @@ +# CONFIG_MT7996E is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MTD_CFI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MTD_CFI new file mode 100644 index 000000000000..67915c663d63 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MTD_CFI @@ -0,0 +1 @@ +# CONFIG_MTD_CFI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWAVE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWAVE new file mode 100644 index 000000000000..95a67924d3fa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWAVE @@ -0,0 +1 @@ +# CONFIG_MWAVE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWIFIEX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWIFIEX new file mode 100644 index 000000000000..d7d6416710d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWIFIEX @@ -0,0 +1 @@ +CONFIG_MWIFIEX=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWIFIEX_PCIE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWIFIEX_PCIE new file mode 100644 index 000000000000..80dd05958b8b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWIFIEX_PCIE @@ -0,0 +1 @@ +CONFIG_MWIFIEX_PCIE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWIFIEX_SDIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWIFIEX_SDIO new file mode 100644 index 000000000000..fa88327f350a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWIFIEX_SDIO @@ -0,0 +1 @@ +CONFIG_MWIFIEX_SDIO=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWIFIEX_USB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWIFIEX_USB new file mode 100644 index 000000000000..fac2cb3d282c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWIFIEX_USB @@ -0,0 +1 @@ +CONFIG_MWIFIEX_USB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWL8K b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWL8K new file mode 100644 index 000000000000..0d993e9f1250 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MWL8K @@ -0,0 +1 @@ +# CONFIG_MWL8K is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MXC4005 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MXC4005 new file mode 100644 index 000000000000..6094387320ac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MXC4005 @@ -0,0 +1 @@ +# CONFIG_MXC4005 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MXC6255 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MXC6255 new file mode 100644 index 000000000000..e834351d7c88 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MXC6255 @@ -0,0 +1 @@ +# CONFIG_MXC6255 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MXM_WMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MXM_WMI new file mode 100644 index 000000000000..7e2ecf29cd72 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MXM_WMI @@ -0,0 +1 @@ +CONFIG_MXM_WMI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MYRI10GE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MYRI10GE new file mode 100644 index 000000000000..a3b2d57f516d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MYRI10GE @@ -0,0 +1 @@ +CONFIG_MYRI10GE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MYRI10GE_DCA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MYRI10GE_DCA new file mode 100644 index 000000000000..7b5904a76f5e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_MYRI10GE_DCA @@ -0,0 +1 @@ +CONFIG_MYRI10GE_DCA=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NAU7802 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NAU7802 new file mode 100644 index 000000000000..2bff4c70af36 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NAU7802 @@ -0,0 +1 @@ +# CONFIG_NAU7802 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ND_BTT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ND_BTT new file mode 100644 index 000000000000..2a3ebca583a8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ND_BTT @@ -0,0 +1 @@ +CONFIG_ND_BTT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ND_PFN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ND_PFN new file mode 100644 index 000000000000..6c96748c6c2e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ND_PFN @@ -0,0 +1 @@ +CONFIG_ND_PFN=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_AMD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_AMD new file mode 100644 index 000000000000..606381824077 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_AMD @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_AMD is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_BROCADE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_BROCADE new file mode 100644 index 000000000000..5ca46d872d02 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_BROCADE @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_BROCADE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_CISCO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_CISCO new file mode 100644 index 000000000000..0c803ecf23fd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_CISCO @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_CISCO=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_DEC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_DEC new file mode 100644 index 000000000000..5d2f9d54c775 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_DEC @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_DEC=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_EMULEX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_EMULEX new file mode 100644 index 000000000000..8751238f9255 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_EMULEX @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_EMULEX=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_QUALCOMM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_QUALCOMM new file mode 100644 index 000000000000..585724f65948 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NET_VENDOR_QUALCOMM @@ -0,0 +1 @@ +# CONFIG_NET_VENDOR_QUALCOMM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NI903X_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NI903X_WDT new file mode 100644 index 000000000000..117073a49351 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NI903X_WDT @@ -0,0 +1 @@ +# CONFIG_NI903X_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NIC7018_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NIC7018_WDT new file mode 100644 index 000000000000..8bc086748983 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NIC7018_WDT @@ -0,0 +1 @@ +# CONFIG_NIC7018_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NITRO_ENCLAVES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NITRO_ENCLAVES new file mode 100644 index 000000000000..517f0308e514 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NITRO_ENCLAVES @@ -0,0 +1 @@ +# CONFIG_NITRO_ENCLAVES is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NMI_CHECK_CPU b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NMI_CHECK_CPU new file mode 100644 index 000000000000..91d403d72e60 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NMI_CHECK_CPU @@ -0,0 +1 @@ +# CONFIG_NMI_CHECK_CPU is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NOA1305 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NOA1305 new file mode 100644 index 000000000000..2d1dd8731570 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NOA1305 @@ -0,0 +1 @@ +# CONFIG_NOA1305 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NOZOMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NOZOMI new file mode 100644 index 000000000000..e94a0abf8b3f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NOZOMI @@ -0,0 +1 @@ +CONFIG_NOZOMI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NTB_AMD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NTB_AMD new file mode 100644 index 000000000000..b458f9403636 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NTB_AMD @@ -0,0 +1 @@ +# CONFIG_NTB_AMD is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NTB_INTEL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NTB_INTEL new file mode 100644 index 000000000000..cca7a6dd7d22 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NTB_INTEL @@ -0,0 +1 @@ +# CONFIG_NTB_INTEL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NVIDIA_WMI_EC_BACKLIGHT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NVIDIA_WMI_EC_BACKLIGHT new file mode 100644 index 000000000000..dee8c6d51740 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NVIDIA_WMI_EC_BACKLIGHT @@ -0,0 +1 @@ +# CONFIG_NVIDIA_WMI_EC_BACKLIGHT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_NVSW_SN2201 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NVSW_SN2201 new file mode 100644 index 000000000000..e1f1d8e831e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_NVSW_SN2201 @@ -0,0 +1 @@ +# CONFIG_NVSW_SN2201 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_OBJTOOL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_OBJTOOL new file mode 100644 index 000000000000..cf3a9f20f93d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_OBJTOOL @@ -0,0 +1 @@ +CONFIG_OBJTOOL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_OF b/anolis/configs/L2-OPTIONAL/x86/CONFIG_OF new file mode 100644 index 000000000000..d15176e9c762 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_OF @@ -0,0 +1 @@ +# CONFIG_OF is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_OPT3001 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_OPT3001 new file mode 100644 index 000000000000..359f9a14edb6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_OPT3001 @@ -0,0 +1 @@ +# CONFIG_OPT3001 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_OPT4001 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_OPT4001 new file mode 100644 index 000000000000..f438ae10f521 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_OPT4001 @@ -0,0 +1 @@ +# CONFIG_OPT4001 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_OSF_PARTITION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_OSF_PARTITION new file mode 100644 index 000000000000..8ff387ae9171 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_OSF_PARTITION @@ -0,0 +1 @@ +CONFIG_OSF_PARTITION=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_OUTPUT_FORMAT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_OUTPUT_FORMAT new file mode 100644 index 000000000000..30e0011c9310 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_OUTPUT_FORMAT @@ -0,0 +1 @@ +CONFIG_OUTPUT_FORMAT="elf64-x86-64" diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_P2SB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_P2SB new file mode 100644 index 000000000000..0cef1061a317 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_P2SB @@ -0,0 +1 @@ +CONFIG_P2SB=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PA12203001 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PA12203001 new file mode 100644 index 000000000000..99737de75f87 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PA12203001 @@ -0,0 +1 @@ +# CONFIG_PA12203001 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PANASONIC_LAPTOP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PANASONIC_LAPTOP new file mode 100644 index 000000000000..f1ac44b84b8c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PANASONIC_LAPTOP @@ -0,0 +1 @@ +CONFIG_PANASONIC_LAPTOP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PANEL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PANEL new file mode 100644 index 000000000000..de8051393a12 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PANEL @@ -0,0 +1 @@ +# CONFIG_PANEL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARAVIRT_CLOCK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARAVIRT_CLOCK new file mode 100644 index 000000000000..a0977b3b88fd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARAVIRT_CLOCK @@ -0,0 +1 @@ +CONFIG_PARAVIRT_CLOCK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARAVIRT_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARAVIRT_DEBUG new file mode 100644 index 000000000000..7b0071250f80 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARAVIRT_DEBUG @@ -0,0 +1 @@ +# CONFIG_PARAVIRT_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT new file mode 100644 index 000000000000..589156958a51 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT @@ -0,0 +1 @@ +CONFIG_PARPORT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_1284 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_1284 new file mode 100644 index 000000000000..585684fb06a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_1284 @@ -0,0 +1 @@ +CONFIG_PARPORT_1284=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_NOT_PC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_NOT_PC new file mode 100644 index 000000000000..2f34bccd2cf5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_NOT_PC @@ -0,0 +1 @@ +CONFIG_PARPORT_NOT_PC=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_PC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_PC new file mode 100644 index 000000000000..b9aa6e8cad78 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_PC @@ -0,0 +1 @@ +CONFIG_PARPORT_PC=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_PC_FIFO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_PC_FIFO new file mode 100644 index 000000000000..62562af4c548 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_PC_FIFO @@ -0,0 +1 @@ +# CONFIG_PARPORT_PC_FIFO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_PC_SUPERIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_PC_SUPERIO new file mode 100644 index 000000000000..b6858ce795a2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_PC_SUPERIO @@ -0,0 +1 @@ +# CONFIG_PARPORT_PC_SUPERIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_SERIAL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_SERIAL new file mode 100644 index 000000000000..8e90201908b6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PARPORT_SERIAL @@ -0,0 +1 @@ +CONFIG_PARPORT_SERIAL=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PATA_PARPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PATA_PARPORT new file mode 100644 index 000000000000..2c3271235ae1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PATA_PARPORT @@ -0,0 +1 @@ +# CONFIG_PATA_PARPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PC87413_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PC87413_WDT new file mode 100644 index 000000000000..5cd896accfac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PC87413_WDT @@ -0,0 +1 @@ +# CONFIG_PC87413_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PCENGINES_APU2 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PCENGINES_APU2 new file mode 100644 index 000000000000..ee09db0f494e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PCENGINES_APU2 @@ -0,0 +1 @@ +# CONFIG_PCENGINES_APU2 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PCI_DIRECT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PCI_DIRECT new file mode 100644 index 000000000000..1d4923bf1df4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PCI_DIRECT @@ -0,0 +1 @@ +CONFIG_PCI_DIRECT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PCI_LOCKLESS_CONFIG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PCI_LOCKLESS_CONFIG new file mode 100644 index 000000000000..486e469e47ca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PCI_LOCKLESS_CONFIG @@ -0,0 +1 @@ +CONFIG_PCI_LOCKLESS_CONFIG=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PCI_XEN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PCI_XEN new file mode 100644 index 000000000000..729d7020fade --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PCI_XEN @@ -0,0 +1 @@ +CONFIG_PCI_XEN=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PHY_CPCAP_USB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PHY_CPCAP_USB new file mode 100644 index 000000000000..c68874d74317 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PHY_CPCAP_USB @@ -0,0 +1 @@ +# CONFIG_PHY_CPCAP_USB is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PHY_INTEL_LGM_EMMC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PHY_INTEL_LGM_EMMC new file mode 100644 index 000000000000..e268046158e3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PHY_INTEL_LGM_EMMC @@ -0,0 +1 @@ +# CONFIG_PHY_INTEL_LGM_EMMC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_ALDERLAKE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_ALDERLAKE new file mode 100644 index 000000000000..fc34a4ac0016 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_ALDERLAKE @@ -0,0 +1 @@ +# CONFIG_PINCTRL_ALDERLAKE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_BAYTRAIL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_BAYTRAIL new file mode 100644 index 000000000000..706680265234 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_BAYTRAIL @@ -0,0 +1 @@ +CONFIG_PINCTRL_BAYTRAIL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_BROXTON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_BROXTON new file mode 100644 index 000000000000..563eb973f8ad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_BROXTON @@ -0,0 +1 @@ +CONFIG_PINCTRL_BROXTON=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_CANNONLAKE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_CANNONLAKE new file mode 100644 index 000000000000..db4143366c2a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_CANNONLAKE @@ -0,0 +1 @@ +CONFIG_PINCTRL_CANNONLAKE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_CEDARFORK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_CEDARFORK new file mode 100644 index 000000000000..b723950b2601 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_CEDARFORK @@ -0,0 +1 @@ +CONFIG_PINCTRL_CEDARFORK=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_CHERRYVIEW b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_CHERRYVIEW new file mode 100644 index 000000000000..6a7cb75e56c3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_CHERRYVIEW @@ -0,0 +1 @@ +# CONFIG_PINCTRL_CHERRYVIEW is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_DENVERTON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_DENVERTON new file mode 100644 index 000000000000..ec6b4e8e1eb9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_DENVERTON @@ -0,0 +1 @@ +CONFIG_PINCTRL_DENVERTON=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_ELKHARTLAKE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_ELKHARTLAKE new file mode 100644 index 000000000000..224fd47cc178 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_ELKHARTLAKE @@ -0,0 +1 @@ +# CONFIG_PINCTRL_ELKHARTLAKE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_EMMITSBURG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_EMMITSBURG new file mode 100644 index 000000000000..c347d0b35ef0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_EMMITSBURG @@ -0,0 +1 @@ +# CONFIG_PINCTRL_EMMITSBURG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_GEMINILAKE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_GEMINILAKE new file mode 100644 index 000000000000..fca1ee23ae24 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_GEMINILAKE @@ -0,0 +1 @@ +CONFIG_PINCTRL_GEMINILAKE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_ICELAKE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_ICELAKE new file mode 100644 index 000000000000..f208784ed29d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_ICELAKE @@ -0,0 +1 @@ +CONFIG_PINCTRL_ICELAKE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_INTEL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_INTEL new file mode 100644 index 000000000000..bd4371168eb2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_INTEL @@ -0,0 +1 @@ +CONFIG_PINCTRL_INTEL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_JASPERLAKE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_JASPERLAKE new file mode 100644 index 000000000000..09f84c415cfe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_JASPERLAKE @@ -0,0 +1 @@ +# CONFIG_PINCTRL_JASPERLAKE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_LAKEFIELD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_LAKEFIELD new file mode 100644 index 000000000000..7333be215f28 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_LAKEFIELD @@ -0,0 +1 @@ +# CONFIG_PINCTRL_LAKEFIELD is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_LEWISBURG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_LEWISBURG new file mode 100644 index 000000000000..c2c51192e2ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_LEWISBURG @@ -0,0 +1 @@ +CONFIG_PINCTRL_LEWISBURG=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_LYNXPOINT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_LYNXPOINT new file mode 100644 index 000000000000..252078559e79 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_LYNXPOINT @@ -0,0 +1 @@ +# CONFIG_PINCTRL_LYNXPOINT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_METEORLAKE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_METEORLAKE new file mode 100644 index 000000000000..82f12b6db0df --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_METEORLAKE @@ -0,0 +1 @@ +# CONFIG_PINCTRL_METEORLAKE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_SUNRISEPOINT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_SUNRISEPOINT new file mode 100644 index 000000000000..f47b9a76dab6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_SUNRISEPOINT @@ -0,0 +1 @@ +CONFIG_PINCTRL_SUNRISEPOINT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_TIGERLAKE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_TIGERLAKE new file mode 100644 index 000000000000..4837dfaf5624 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PINCTRL_TIGERLAKE @@ -0,0 +1 @@ +# CONFIG_PINCTRL_TIGERLAKE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PING b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PING new file mode 100644 index 000000000000..d0dcdc8edcaf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PING @@ -0,0 +1 @@ +# CONFIG_PING is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PLFXLC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PLFXLC new file mode 100644 index 000000000000..eb04922b3722 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PLFXLC @@ -0,0 +1 @@ +# CONFIG_PLFXLC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PLIP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PLIP new file mode 100644 index 000000000000..b8c35e075a95 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PLIP @@ -0,0 +1 @@ +# CONFIG_PLIP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PMIC_OPREGION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PMIC_OPREGION new file mode 100644 index 000000000000..4dbdfac30b9a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PMIC_OPREGION @@ -0,0 +1 @@ +CONFIG_PMIC_OPREGION=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PNP_DEBUG_MESSAGES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PNP_DEBUG_MESSAGES new file mode 100644 index 000000000000..cb591885f895 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PNP_DEBUG_MESSAGES @@ -0,0 +1 @@ +# CONFIG_PNP_DEBUG_MESSAGES is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_POWER_RESET_RESTART b/anolis/configs/L2-OPTIONAL/x86/CONFIG_POWER_RESET_RESTART new file mode 100644 index 000000000000..62520aa1d41d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_POWER_RESET_RESTART @@ -0,0 +1 @@ +# CONFIG_POWER_RESET_RESTART is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PPDEV b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PPDEV new file mode 100644 index 000000000000..34c75b4ab86e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PPDEV @@ -0,0 +1 @@ +CONFIG_PPDEV=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PPS_CLIENT_PARPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PPS_CLIENT_PARPORT new file mode 100644 index 000000000000..d11894deff17 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PPS_CLIENT_PARPORT @@ -0,0 +1 @@ +CONFIG_PPS_CLIENT_PARPORT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PREFIX_SYMBOLS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PREFIX_SYMBOLS new file mode 100644 index 000000000000..8b56494e9fcf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PREFIX_SYMBOLS @@ -0,0 +1 @@ +CONFIG_PREFIX_SYMBOLS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PRINTER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PRINTER new file mode 100644 index 000000000000..9b82c068b790 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PRINTER @@ -0,0 +1 @@ +CONFIG_PRINTER=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PROC_PID_ARCH_STATUS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PROC_PID_ARCH_STATUS new file mode 100644 index 000000000000..5498b06e9ae2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PROC_PID_ARCH_STATUS @@ -0,0 +1 @@ +CONFIG_PROC_PID_ARCH_STATUS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PROVIDE_OHCI1394_DMA_INIT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PROVIDE_OHCI1394_DMA_INIT new file mode 100644 index 000000000000..eee4011e1dee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PROVIDE_OHCI1394_DMA_INIT @@ -0,0 +1 @@ +CONFIG_PROVIDE_OHCI1394_DMA_INIT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PTP_1588_CLOCK_KVM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PTP_1588_CLOCK_KVM new file mode 100644 index 000000000000..203f6b757d8c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PTP_1588_CLOCK_KVM @@ -0,0 +1 @@ +CONFIG_PTP_1588_CLOCK_KVM=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PTP_1588_CLOCK_VMW b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PTP_1588_CLOCK_VMW new file mode 100644 index 000000000000..4b0c1ae34689 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PTP_1588_CLOCK_VMW @@ -0,0 +1 @@ +# CONFIG_PTP_1588_CLOCK_VMW is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PUNIT_ATOM_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PUNIT_ATOM_DEBUG new file mode 100644 index 000000000000..0507cd497376 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PUNIT_ATOM_DEBUG @@ -0,0 +1 @@ +# CONFIG_PUNIT_ATOM_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PVH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PVH new file mode 100644 index 000000000000..2311dfd77b95 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PVH @@ -0,0 +1 @@ +# CONFIG_PVH is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PWM_LPSS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PWM_LPSS new file mode 100644 index 000000000000..7e6a48990a9a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PWM_LPSS @@ -0,0 +1 @@ +CONFIG_PWM_LPSS=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PWM_LPSS_PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PWM_LPSS_PCI new file mode 100644 index 000000000000..d7ed9139ee8a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PWM_LPSS_PCI @@ -0,0 +1 @@ +CONFIG_PWM_LPSS_PCI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PWM_LPSS_PLATFORM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PWM_LPSS_PLATFORM new file mode 100644 index 000000000000..d9042d8b82a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PWM_LPSS_PLATFORM @@ -0,0 +1 @@ +CONFIG_PWM_LPSS_PLATFORM=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_QCOM_HIDMA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_QCOM_HIDMA new file mode 100644 index 000000000000..b58063523974 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_QCOM_HIDMA @@ -0,0 +1 @@ +# CONFIG_QCOM_HIDMA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_QCOM_HIDMA_MGMT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_QCOM_HIDMA_MGMT new file mode 100644 index 000000000000..4b37c0348451 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_QCOM_HIDMA_MGMT @@ -0,0 +1 @@ +# CONFIG_QCOM_HIDMA_MGMT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_QTNFMAC_PCIE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_QTNFMAC_PCIE new file mode 100644 index 000000000000..da040aaf738b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_QTNFMAC_PCIE @@ -0,0 +1 @@ +# CONFIG_QTNFMAC_PCIE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_ATI_REMOTE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_ATI_REMOTE new file mode 100644 index 000000000000..9912b0af8637 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_ATI_REMOTE @@ -0,0 +1 @@ +CONFIG_RC_ATI_REMOTE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_CORE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_CORE new file mode 100644 index 000000000000..73d773223623 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_CORE @@ -0,0 +1 @@ +CONFIG_RC_CORE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_DECODERS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_DECODERS new file mode 100644 index 000000000000..ae93cb8deb27 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_DECODERS @@ -0,0 +1 @@ +CONFIG_RC_DECODERS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_DEVICES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_DEVICES new file mode 100644 index 000000000000..fb9a9e567616 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_DEVICES @@ -0,0 +1 @@ +CONFIG_RC_DEVICES=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_LOOPBACK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_LOOPBACK new file mode 100644 index 000000000000..a0ee48052143 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_LOOPBACK @@ -0,0 +1 @@ +# CONFIG_RC_LOOPBACK is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_MAP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_MAP new file mode 100644 index 000000000000..910c883d4fff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_MAP @@ -0,0 +1 @@ +CONFIG_RC_MAP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_XBOX_DVD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_XBOX_DVD new file mode 100644 index 000000000000..2f36a11126db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RC_XBOX_DVD @@ -0,0 +1 @@ +# CONFIG_RC_XBOX_DVD is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_REGULATOR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_REGULATOR new file mode 100644 index 000000000000..3987d79a1038 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_REGULATOR @@ -0,0 +1 @@ +# CONFIG_REGULATOR is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RETHOOK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RETHOOK new file mode 100644 index 000000000000..34673e12450f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RETHOOK @@ -0,0 +1 @@ +CONFIG_RETHOOK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RFD77402 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RFD77402 new file mode 100644 index 000000000000..00383ffd289b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RFD77402 @@ -0,0 +1 @@ +# CONFIG_RFD77402 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RFKILL_GPIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RFKILL_GPIO new file mode 100644 index 000000000000..400303dcf315 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RFKILL_GPIO @@ -0,0 +1 @@ +# CONFIG_RFKILL_GPIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RICHTEK_RTQ6056 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RICHTEK_RTQ6056 new file mode 100644 index 000000000000..69a728e657c4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RICHTEK_RTQ6056 @@ -0,0 +1 @@ +# CONFIG_RICHTEK_RTQ6056 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RMI4_F34 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RMI4_F34 new file mode 100644 index 000000000000..d9f34d253d7d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RMI4_F34 @@ -0,0 +1 @@ +# CONFIG_RMI4_F34 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RMI4_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RMI4_SPI new file mode 100644 index 000000000000..70a3ee1732f9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RMI4_SPI @@ -0,0 +1 @@ +# CONFIG_RMI4_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ROHM_BU27008 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ROHM_BU27008 new file mode 100644 index 000000000000..e3ef46a987c1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ROHM_BU27008 @@ -0,0 +1 @@ +# CONFIG_ROHM_BU27008 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ROHM_BU27034 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ROHM_BU27034 new file mode 100644 index 000000000000..8596aaa4a8b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ROHM_BU27034 @@ -0,0 +1 @@ +# CONFIG_ROHM_BU27034 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RPR0521 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RPR0521 new file mode 100644 index 000000000000..31f257edd382 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RPR0521 @@ -0,0 +1 @@ +# CONFIG_RPR0521 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2400PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2400PCI new file mode 100644 index 000000000000..1e1b2bf28509 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2400PCI @@ -0,0 +1 @@ +# CONFIG_RT2400PCI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2500PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2500PCI new file mode 100644 index 000000000000..ac28e43e46d1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2500PCI @@ -0,0 +1 @@ +# CONFIG_RT2500PCI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2500USB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2500USB new file mode 100644 index 000000000000..f2fd3effd6b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2500USB @@ -0,0 +1 @@ +# CONFIG_RT2500USB is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI new file mode 100644 index 000000000000..6a092667a1bb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI @@ -0,0 +1 @@ +CONFIG_RT2800PCI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI_RT3290 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI_RT3290 new file mode 100644 index 000000000000..b407d7ee54e3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI_RT3290 @@ -0,0 +1 @@ +CONFIG_RT2800PCI_RT3290=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI_RT33XX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI_RT33XX new file mode 100644 index 000000000000..fc48a116e1e7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI_RT33XX @@ -0,0 +1 @@ +CONFIG_RT2800PCI_RT33XX=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI_RT35XX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI_RT35XX new file mode 100644 index 000000000000..f9d3762c396c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI_RT35XX @@ -0,0 +1 @@ +CONFIG_RT2800PCI_RT35XX=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI_RT53XX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI_RT53XX new file mode 100644 index 000000000000..6527124788c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800PCI_RT53XX @@ -0,0 +1 @@ +CONFIG_RT2800PCI_RT53XX=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB new file mode 100644 index 000000000000..d6eaffd7b958 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB @@ -0,0 +1 @@ +CONFIG_RT2800USB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT33XX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT33XX new file mode 100644 index 000000000000..f86c4e42f757 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT33XX @@ -0,0 +1 @@ +CONFIG_RT2800USB_RT33XX=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT3573 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT3573 new file mode 100644 index 000000000000..df9ae1acc02d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT3573 @@ -0,0 +1 @@ +CONFIG_RT2800USB_RT3573=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT35XX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT35XX new file mode 100644 index 000000000000..52c191500246 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT35XX @@ -0,0 +1 @@ +CONFIG_RT2800USB_RT35XX=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT53XX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT53XX new file mode 100644 index 000000000000..b896dc2ee99d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT53XX @@ -0,0 +1 @@ +CONFIG_RT2800USB_RT53XX=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT55XX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT55XX new file mode 100644 index 000000000000..f585f4f02f87 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_RT55XX @@ -0,0 +1 @@ +CONFIG_RT2800USB_RT55XX=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_UNKNOWN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_UNKNOWN new file mode 100644 index 000000000000..95d862008c96 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800USB_UNKNOWN @@ -0,0 +1 @@ +CONFIG_RT2800USB_UNKNOWN=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800_LIB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800_LIB new file mode 100644 index 000000000000..7cfb7cfaf18e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800_LIB @@ -0,0 +1 @@ +CONFIG_RT2800_LIB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800_LIB_MMIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800_LIB_MMIO new file mode 100644 index 000000000000..f2b1f406312a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2800_LIB_MMIO @@ -0,0 +1 @@ +CONFIG_RT2800_LIB_MMIO=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00 new file mode 100644 index 000000000000..b9390bdc8207 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00 @@ -0,0 +1 @@ +CONFIG_RT2X00=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_DEBUG new file mode 100644 index 000000000000..65f957c23762 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_DEBUG @@ -0,0 +1 @@ +# CONFIG_RT2X00_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB new file mode 100644 index 000000000000..254fa39478f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB @@ -0,0 +1 @@ +CONFIG_RT2X00_LIB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_CRYPTO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_CRYPTO new file mode 100644 index 000000000000..cb52824844db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_CRYPTO @@ -0,0 +1 @@ +CONFIG_RT2X00_LIB_CRYPTO=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_DEBUGFS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_DEBUGFS new file mode 100644 index 000000000000..bec40f8c8538 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_DEBUGFS @@ -0,0 +1 @@ +CONFIG_RT2X00_LIB_DEBUGFS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_FIRMWARE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_FIRMWARE new file mode 100644 index 000000000000..9905d3c4bfde --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_FIRMWARE @@ -0,0 +1 @@ +CONFIG_RT2X00_LIB_FIRMWARE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_LEDS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_LEDS new file mode 100644 index 000000000000..25f61afcc5ac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_LEDS @@ -0,0 +1 @@ +CONFIG_RT2X00_LIB_LEDS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_MMIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_MMIO new file mode 100644 index 000000000000..cde752d572bd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_MMIO @@ -0,0 +1 @@ +CONFIG_RT2X00_LIB_MMIO=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_PCI new file mode 100644 index 000000000000..a11f3c79264e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_PCI @@ -0,0 +1 @@ +CONFIG_RT2X00_LIB_PCI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_USB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_USB new file mode 100644 index 000000000000..0a6599a5c408 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT2X00_LIB_USB @@ -0,0 +1 @@ +CONFIG_RT2X00_LIB_USB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT61PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT61PCI new file mode 100644 index 000000000000..903ef5f2bcf6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT61PCI @@ -0,0 +1 @@ +# CONFIG_RT61PCI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT73USB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT73USB new file mode 100644 index 000000000000..bc5c783f7852 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RT73USB @@ -0,0 +1 @@ +# CONFIG_RT73USB is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_ABB5ZES3 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_ABB5ZES3 new file mode 100644 index 000000000000..e7552fd8f373 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_ABB5ZES3 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_ABB5ZES3 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_ABX80X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_ABX80X new file mode 100644 index 000000000000..5a4765f016c3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_ABX80X @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_ABX80X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1305 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1305 new file mode 100644 index 000000000000..5d85197dc971 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1305 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_DS1305 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1343 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1343 new file mode 100644 index 000000000000..ace147d2e97f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1343 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_DS1343 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1347 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1347 new file mode 100644 index 000000000000..b0b49f400be9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1347 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_DS1347 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1374_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1374_WDT new file mode 100644 index 000000000000..a908c2f97590 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1374_WDT @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_DS1374_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1390 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1390 new file mode 100644 index 000000000000..710b4edafced --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1390 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_DS1390 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1685_FAMILY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1685_FAMILY new file mode 100644 index 000000000000..8389612ed98a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_DS1685_FAMILY @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_DS1685_FAMILY is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_HID_SENSOR_TIME b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_HID_SENSOR_TIME new file mode 100644 index 000000000000..0c15783d95fa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_HID_SENSOR_TIME @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_M41T93 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_M41T93 new file mode 100644 index 000000000000..1339d6d907eb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_M41T93 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_M41T93 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_M41T94 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_M41T94 new file mode 100644 index 000000000000..d39df1187ae8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_M41T94 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_M41T94 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_MAX6902 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_MAX6902 new file mode 100644 index 000000000000..08bfbe6afff9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_MAX6902 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_MAX6902 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_MCP795 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_MCP795 new file mode 100644 index 000000000000..ccef499632d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_MCP795 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_MCP795 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_PCF2123 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_PCF2123 new file mode 100644 index 000000000000..ae2d421c1b08 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_PCF2123 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_PCF2123 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_PCF2127 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_PCF2127 new file mode 100644 index 000000000000..8f90e9341ad0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_PCF2127 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_PCF2127 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_PCF85063 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_PCF85063 new file mode 100644 index 000000000000..b64437deff48 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_PCF85063 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_PCF85063 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_R9701 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_R9701 new file mode 100644 index 000000000000..ff1d81f983c9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_R9701 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_R9701 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_RS5C348 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_RS5C348 new file mode 100644 index 000000000000..b24f6e702c74 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_RS5C348 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_RS5C348 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_RX4581 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_RX4581 new file mode 100644 index 000000000000..6131953dc63a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_RX4581 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_RX4581 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_RX8010 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_RX8010 new file mode 100644 index 000000000000..8cf6aa26c288 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_DRV_RX8010 @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_RX8010 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_MC146818_LIB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_MC146818_LIB new file mode 100644 index 000000000000..0e5b5fd26245 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTC_MC146818_LIB @@ -0,0 +1 @@ +CONFIG_RTC_MC146818_LIB=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8180 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8180 new file mode 100644 index 000000000000..4fc37a64cb2a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8180 @@ -0,0 +1 @@ +# CONFIG_RTL8180 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8187 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8187 new file mode 100644 index 000000000000..fc72ad4a47ee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8187 @@ -0,0 +1 @@ +# CONFIG_RTL8187 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8188EE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8188EE new file mode 100644 index 000000000000..d5e7eaf4342a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8188EE @@ -0,0 +1 @@ +CONFIG_RTL8188EE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192CE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192CE new file mode 100644 index 000000000000..d9088f1a4b53 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192CE @@ -0,0 +1 @@ +CONFIG_RTL8192CE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192CU b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192CU new file mode 100644 index 000000000000..eaf668f4ad57 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192CU @@ -0,0 +1 @@ +CONFIG_RTL8192CU=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192C_COMMON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192C_COMMON new file mode 100644 index 000000000000..4642bcc37a0e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192C_COMMON @@ -0,0 +1 @@ +CONFIG_RTL8192C_COMMON=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192DE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192DE new file mode 100644 index 000000000000..c8c6d22412e2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192DE @@ -0,0 +1 @@ +CONFIG_RTL8192DE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192EE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192EE new file mode 100644 index 000000000000..2a52d41d83dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192EE @@ -0,0 +1 @@ +CONFIG_RTL8192EE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192SE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192SE new file mode 100644 index 000000000000..afaa055bb699 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8192SE @@ -0,0 +1 @@ +CONFIG_RTL8192SE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8723AE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8723AE new file mode 100644 index 000000000000..2c83e20a2863 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8723AE @@ -0,0 +1 @@ +CONFIG_RTL8723AE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8723BE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8723BE new file mode 100644 index 000000000000..cf446b816346 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8723BE @@ -0,0 +1 @@ +CONFIG_RTL8723BE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8723_COMMON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8723_COMMON new file mode 100644 index 000000000000..a3c839013adf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8723_COMMON @@ -0,0 +1 @@ +CONFIG_RTL8723_COMMON=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8821AE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8821AE new file mode 100644 index 000000000000..3498bf5db32d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8821AE @@ -0,0 +1 @@ +CONFIG_RTL8821AE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8XXXU b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8XXXU new file mode 100644 index 000000000000..13b2ed870b58 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8XXXU @@ -0,0 +1 @@ +CONFIG_RTL8XXXU=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8XXXU_UNTESTED b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8XXXU_UNTESTED new file mode 100644 index 000000000000..44fccdd4f30d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL8XXXU_UNTESTED @@ -0,0 +1 @@ +# CONFIG_RTL8XXXU_UNTESTED is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLBTCOEXIST b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLBTCOEXIST new file mode 100644 index 000000000000..065d8886cd56 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLBTCOEXIST @@ -0,0 +1 @@ +CONFIG_RTLBTCOEXIST=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLWIFI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLWIFI new file mode 100644 index 000000000000..61ed2c5ea859 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLWIFI @@ -0,0 +1 @@ +CONFIG_RTLWIFI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLWIFI_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLWIFI_DEBUG new file mode 100644 index 000000000000..e61d486f935e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLWIFI_DEBUG @@ -0,0 +1 @@ +# CONFIG_RTLWIFI_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLWIFI_PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLWIFI_PCI new file mode 100644 index 000000000000..620478ab7281 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLWIFI_PCI @@ -0,0 +1 @@ +CONFIG_RTLWIFI_PCI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLWIFI_USB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLWIFI_USB new file mode 100644 index 000000000000..b41beeb8ae33 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTLWIFI_USB @@ -0,0 +1 @@ +CONFIG_RTLWIFI_USB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL_CARDS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL_CARDS new file mode 100644 index 000000000000..61b7c276b95d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTL_CARDS @@ -0,0 +1 @@ +CONFIG_RTL_CARDS=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88 new file mode 100644 index 000000000000..df8a6f6037b3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88 @@ -0,0 +1 @@ +CONFIG_RTW88=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8723DE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8723DE new file mode 100644 index 000000000000..78ccc3dc8d89 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8723DE @@ -0,0 +1 @@ +# CONFIG_RTW88_8723DE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8723DS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8723DS new file mode 100644 index 000000000000..1a5233a57937 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8723DS @@ -0,0 +1 @@ +# CONFIG_RTW88_8723DS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8723DU b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8723DU new file mode 100644 index 000000000000..a23da9457d54 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8723DU @@ -0,0 +1 @@ +# CONFIG_RTW88_8723DU is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8821CE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8821CE new file mode 100644 index 000000000000..1f652b03a1e1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8821CE @@ -0,0 +1 @@ +# CONFIG_RTW88_8821CE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8821CS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8821CS new file mode 100644 index 000000000000..1d6799b61dda --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8821CS @@ -0,0 +1 @@ +# CONFIG_RTW88_8821CS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8821CU b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8821CU new file mode 100644 index 000000000000..26c987469ce9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8821CU @@ -0,0 +1 @@ +# CONFIG_RTW88_8821CU is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822B b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822B new file mode 100644 index 000000000000..21a486b59f5a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822B @@ -0,0 +1 @@ +CONFIG_RTW88_8822B=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822BE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822BE new file mode 100644 index 000000000000..d7170a75de5d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822BE @@ -0,0 +1 @@ +CONFIG_RTW88_8822BE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822BS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822BS new file mode 100644 index 000000000000..50070985683e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822BS @@ -0,0 +1 @@ +# CONFIG_RTW88_8822BS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822BU b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822BU new file mode 100644 index 000000000000..e975fb1c76ef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822BU @@ -0,0 +1 @@ +# CONFIG_RTW88_8822BU is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822C new file mode 100644 index 000000000000..8cea7041df6d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822C @@ -0,0 +1 @@ +CONFIG_RTW88_8822C=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822CE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822CE new file mode 100644 index 000000000000..2cc838e52b08 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822CE @@ -0,0 +1 @@ +CONFIG_RTW88_8822CE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822CS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822CS new file mode 100644 index 000000000000..be9f5c890868 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822CS @@ -0,0 +1 @@ +# CONFIG_RTW88_8822CS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822CU b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822CU new file mode 100644 index 000000000000..90982727f855 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_8822CU @@ -0,0 +1 @@ +# CONFIG_RTW88_8822CU is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_CORE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_CORE new file mode 100644 index 000000000000..df829f192633 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_CORE @@ -0,0 +1 @@ +CONFIG_RTW88_CORE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_DEBUG new file mode 100644 index 000000000000..da89c1e0a6dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_DEBUG @@ -0,0 +1 @@ +# CONFIG_RTW88_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_DEBUGFS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_DEBUGFS new file mode 100644 index 000000000000..d810b056cdb6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_DEBUGFS @@ -0,0 +1 @@ +# CONFIG_RTW88_DEBUGFS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_PCI new file mode 100644 index 000000000000..bf6c93d5378c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW88_PCI @@ -0,0 +1 @@ +CONFIG_RTW88_PCI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW89 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW89 new file mode 100644 index 000000000000..9f3b4271492b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_RTW89 @@ -0,0 +1 @@ +# CONFIG_RTW89 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SAMSUNG_LAPTOP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SAMSUNG_LAPTOP new file mode 100644 index 000000000000..aaa107689471 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SAMSUNG_LAPTOP @@ -0,0 +1 @@ +CONFIG_SAMSUNG_LAPTOP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SAMSUNG_Q10 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SAMSUNG_Q10 new file mode 100644 index 000000000000..01423b93dba7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SAMSUNG_Q10 @@ -0,0 +1 @@ +CONFIG_SAMSUNG_Q10=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SATA_ZHAOXIN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SATA_ZHAOXIN new file mode 100644 index 000000000000..08c1f428db36 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SATA_ZHAOXIN @@ -0,0 +1 @@ +CONFIG_SATA_ZHAOXIN=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SBC_EPX_C3_WATCHDOG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SBC_EPX_C3_WATCHDOG new file mode 100644 index 000000000000..2f9695c650f3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SBC_EPX_C3_WATCHDOG @@ -0,0 +1 @@ +# CONFIG_SBC_EPX_C3_WATCHDOG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SBC_FITPC2_WATCHDOG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SBC_FITPC2_WATCHDOG new file mode 100644 index 000000000000..7f3cf373323f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SBC_FITPC2_WATCHDOG @@ -0,0 +1 @@ +CONFIG_SBC_FITPC2_WATCHDOG=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SBP_TARGET b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SBP_TARGET new file mode 100644 index 000000000000..bda77b56017b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SBP_TARGET @@ -0,0 +1 @@ +# CONFIG_SBP_TARGET is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SC1200_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SC1200_WDT new file mode 100644 index 000000000000..86f8c743e12f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SC1200_WDT @@ -0,0 +1 @@ +# CONFIG_SC1200_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCA3000 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCA3000 new file mode 100644 index 000000000000..48f4cf26be6f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCA3000 @@ -0,0 +1 @@ +# CONFIG_SCA3000 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCA3300 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCA3300 new file mode 100644 index 000000000000..3d71a75abbad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCA3300 @@ -0,0 +1 @@ +# CONFIG_SCA3300 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCD30_CORE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCD30_CORE new file mode 100644 index 000000000000..6d3d6cbcb62a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCD30_CORE @@ -0,0 +1 @@ +# CONFIG_SCD30_CORE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCD4X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCD4X new file mode 100644 index 000000000000..7e88cabcb490 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCD4X @@ -0,0 +1 @@ +# CONFIG_SCD4X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_AACRAID b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_AACRAID new file mode 100644 index 000000000000..c164284d69d8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_AACRAID @@ -0,0 +1 @@ +CONFIG_SCSI_AACRAID=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_BNX2X_FCOE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_BNX2X_FCOE new file mode 100644 index 000000000000..b74e58b83404 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_BNX2X_FCOE @@ -0,0 +1 @@ +CONFIG_SCSI_BNX2X_FCOE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_BNX2_ISCSI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_BNX2_ISCSI new file mode 100644 index 000000000000..7c597f4bc798 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_BNX2_ISCSI @@ -0,0 +1 @@ +CONFIG_SCSI_BNX2_ISCSI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_IMM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_IMM new file mode 100644 index 000000000000..46434497d4c6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_IMM @@ -0,0 +1 @@ +# CONFIG_SCSI_IMM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_IPR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_IPR new file mode 100644 index 000000000000..8c7626c6ee37 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_IPR @@ -0,0 +1 @@ +# CONFIG_SCSI_IPR is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_ISCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_ISCI new file mode 100644 index 000000000000..9b9e4f77a937 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_ISCI @@ -0,0 +1 @@ +CONFIG_SCSI_ISCI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_PPA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_PPA new file mode 100644 index 000000000000..aa0ba6c1238b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_PPA @@ -0,0 +1 @@ +# CONFIG_SCSI_PPA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SDMA_VERBOSITY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SDMA_VERBOSITY new file mode 100644 index 000000000000..1144ca928e6a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SDMA_VERBOSITY @@ -0,0 +1 @@ +# CONFIG_SDMA_VERBOSITY is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SD_ADC_MODULATOR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SD_ADC_MODULATOR new file mode 100644 index 000000000000..ec43a0ff1d01 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SD_ADC_MODULATOR @@ -0,0 +1 @@ +# CONFIG_SD_ADC_MODULATOR is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SEL3350_PLATFORM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SEL3350_PLATFORM new file mode 100644 index 000000000000..b45fc3d318d6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SEL3350_PLATFORM @@ -0,0 +1 @@ +# CONFIG_SEL3350_PLATFORM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSEAIR_SUNRISE_CO2 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSEAIR_SUNRISE_CO2 new file mode 100644 index 000000000000..2f3809636c9e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSEAIR_SUNRISE_CO2 @@ -0,0 +1 @@ +# CONFIG_SENSEAIR_SUNRISE_CO2 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSIRION_SGP30 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSIRION_SGP30 new file mode 100644 index 000000000000..9d25e3dbba7a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSIRION_SGP30 @@ -0,0 +1 @@ +# CONFIG_SENSIRION_SGP30 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSIRION_SGP40 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSIRION_SGP40 new file mode 100644 index 000000000000..4cfb57036775 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSIRION_SGP40 @@ -0,0 +1 @@ +# CONFIG_SENSIRION_SGP40 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ABITUGURU b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ABITUGURU new file mode 100644 index 000000000000..adab26252588 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ABITUGURU @@ -0,0 +1 @@ +CONFIG_SENSORS_ABITUGURU=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ABITUGURU3 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ABITUGURU3 new file mode 100644 index 000000000000..22666cbee7f3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ABITUGURU3 @@ -0,0 +1 @@ +CONFIG_SENSORS_ABITUGURU3=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ACPI_POWER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ACPI_POWER new file mode 100644 index 000000000000..dbb45c01bf2c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ACPI_POWER @@ -0,0 +1 @@ +CONFIG_SENSORS_ACPI_POWER=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_AD7314 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_AD7314 new file mode 100644 index 000000000000..6e7f02e40048 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_AD7314 @@ -0,0 +1 @@ +# CONFIG_SENSORS_AD7314 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_AD7414 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_AD7414 new file mode 100644 index 000000000000..10f22ae168dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_AD7414 @@ -0,0 +1 @@ +CONFIG_SENSORS_AD7414=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_AD7418 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_AD7418 new file mode 100644 index 000000000000..3df9e9e120cc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_AD7418 @@ -0,0 +1 @@ +CONFIG_SENSORS_AD7418=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADC128D818 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADC128D818 new file mode 100644 index 000000000000..d82687c2f212 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADC128D818 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADC128D818 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADCXX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADCXX new file mode 100644 index 000000000000..581f6ccb69e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADCXX @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADCXX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1025 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1025 new file mode 100644 index 000000000000..a5617d87e3e7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1025 @@ -0,0 +1 @@ +CONFIG_SENSORS_ADM1025=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1026 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1026 new file mode 100644 index 000000000000..d6cb0376df27 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1026 @@ -0,0 +1 @@ +CONFIG_SENSORS_ADM1026=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1029 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1029 new file mode 100644 index 000000000000..0d3c99845fa6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1029 @@ -0,0 +1 @@ +CONFIG_SENSORS_ADM1029=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1031 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1031 new file mode 100644 index 000000000000..6109dab5efc8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1031 @@ -0,0 +1 @@ +CONFIG_SENSORS_ADM1031=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1275 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1275 new file mode 100644 index 000000000000..76144de6f986 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM1275 @@ -0,0 +1 @@ +CONFIG_SENSORS_ADM1275=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM9240 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM9240 new file mode 100644 index 000000000000..f79944a972b1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADM9240 @@ -0,0 +1 @@ +CONFIG_SENSORS_ADM9240=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADS7828 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADS7828 new file mode 100644 index 000000000000..494099a32793 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADS7828 @@ -0,0 +1 @@ +CONFIG_SENSORS_ADS7828=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADS7871 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADS7871 new file mode 100644 index 000000000000..f5de6f551fcf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADS7871 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ADS7871 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7410 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7410 new file mode 100644 index 000000000000..7255e3abd8dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7410 @@ -0,0 +1 @@ +CONFIG_SENSORS_ADT7410=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7411 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7411 new file mode 100644 index 000000000000..a4c670ee7f81 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7411 @@ -0,0 +1 @@ +CONFIG_SENSORS_ADT7411=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7462 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7462 new file mode 100644 index 000000000000..0eaa2e22ff45 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7462 @@ -0,0 +1 @@ +CONFIG_SENSORS_ADT7462=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7470 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7470 new file mode 100644 index 000000000000..7fbf07427b1b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7470 @@ -0,0 +1 @@ +CONFIG_SENSORS_ADT7470=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7475 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7475 new file mode 100644 index 000000000000..38e288230f70 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7475 @@ -0,0 +1 @@ +CONFIG_SENSORS_ADT7475=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7X10 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7X10 new file mode 100644 index 000000000000..74edafb84773 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ADT7X10 @@ -0,0 +1 @@ +CONFIG_SENSORS_ADT7X10=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_AMC6821 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_AMC6821 new file mode 100644 index 000000000000..c0389d18989c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_AMC6821 @@ -0,0 +1 @@ +CONFIG_SENSORS_AMC6821=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_APDS990X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_APDS990X new file mode 100644 index 000000000000..1521713d81cf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_APDS990X @@ -0,0 +1 @@ +CONFIG_SENSORS_APDS990X=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_APPLESMC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_APPLESMC new file mode 100644 index 000000000000..48519f04f841 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_APPLESMC @@ -0,0 +1 @@ +CONFIG_SENSORS_APPLESMC=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ASB100 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ASB100 new file mode 100644 index 000000000000..e568b0eb67a9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ASB100 @@ -0,0 +1 @@ +CONFIG_SENSORS_ASB100=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ASC7621 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ASC7621 new file mode 100644 index 000000000000..5ba8f0815922 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ASC7621 @@ -0,0 +1 @@ +CONFIG_SENSORS_ASC7621=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ASUS_EC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ASUS_EC new file mode 100644 index 000000000000..9c16493b6109 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ASUS_EC @@ -0,0 +1 @@ +# CONFIG_SENSORS_ASUS_EC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ASUS_WMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ASUS_WMI new file mode 100644 index 000000000000..9578d79edf13 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ASUS_WMI @@ -0,0 +1 @@ +# CONFIG_SENSORS_ASUS_WMI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ATK0110 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ATK0110 new file mode 100644 index 000000000000..a323de026e96 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ATK0110 @@ -0,0 +1 @@ +CONFIG_SENSORS_ATK0110=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ATXP1 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ATXP1 new file mode 100644 index 000000000000..78edcd84dfd2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ATXP1 @@ -0,0 +1 @@ +CONFIG_SENSORS_ATXP1=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_BH1770 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_BH1770 new file mode 100644 index 000000000000..78371b1b5ad2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_BH1770 @@ -0,0 +1 @@ +CONFIG_SENSORS_BH1770=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_CORETEMP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_CORETEMP new file mode 100644 index 000000000000..8fb4868a2372 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_CORETEMP @@ -0,0 +1 @@ +CONFIG_SENSORS_CORETEMP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_DELL_SMM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_DELL_SMM new file mode 100644 index 000000000000..d9fa9f5a2fae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_DELL_SMM @@ -0,0 +1 @@ +CONFIG_SENSORS_DELL_SMM=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_DME1737 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_DME1737 new file mode 100644 index 000000000000..981a96d2f650 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_DME1737 @@ -0,0 +1 @@ +CONFIG_SENSORS_DME1737=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_DS1621 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_DS1621 new file mode 100644 index 000000000000..79f96d6ea8d0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_DS1621 @@ -0,0 +1 @@ +CONFIG_SENSORS_DS1621=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_DS620 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_DS620 new file mode 100644 index 000000000000..4a06b361ae98 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_DS620 @@ -0,0 +1 @@ +CONFIG_SENSORS_DS620=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_EMC1403 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_EMC1403 new file mode 100644 index 000000000000..efe0b0e41cee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_EMC1403 @@ -0,0 +1 @@ +CONFIG_SENSORS_EMC1403=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_EMC6W201 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_EMC6W201 new file mode 100644 index 000000000000..4e7513c59793 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_EMC6W201 @@ -0,0 +1 @@ +CONFIG_SENSORS_EMC6W201=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_F71805F b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_F71805F new file mode 100644 index 000000000000..57db5db3cd8e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_F71805F @@ -0,0 +1 @@ +CONFIG_SENSORS_F71805F=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_F71882FG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_F71882FG new file mode 100644 index 000000000000..67c502c7155e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_F71882FG @@ -0,0 +1 @@ +CONFIG_SENSORS_F71882FG=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_F75375S b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_F75375S new file mode 100644 index 000000000000..04d774dca671 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_F75375S @@ -0,0 +1 @@ +CONFIG_SENSORS_F75375S=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_FAM15H_POWER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_FAM15H_POWER new file mode 100644 index 000000000000..9632b4c8df9c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_FAM15H_POWER @@ -0,0 +1 @@ +CONFIG_SENSORS_FAM15H_POWER=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_FSCHMD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_FSCHMD new file mode 100644 index 000000000000..0407bdcd142c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_FSCHMD @@ -0,0 +1 @@ +CONFIG_SENSORS_FSCHMD=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_G760A b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_G760A new file mode 100644 index 000000000000..17b630baad83 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_G760A @@ -0,0 +1 @@ +CONFIG_SENSORS_G760A=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_G762 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_G762 new file mode 100644 index 000000000000..4eea34146db8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_G762 @@ -0,0 +1 @@ +# CONFIG_SENSORS_G762 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_GL518SM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_GL518SM new file mode 100644 index 000000000000..3942992572d4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_GL518SM @@ -0,0 +1 @@ +CONFIG_SENSORS_GL518SM=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_GL520SM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_GL520SM new file mode 100644 index 000000000000..d03316fec699 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_GL520SM @@ -0,0 +1 @@ +CONFIG_SENSORS_GL520SM=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_HDAPS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_HDAPS new file mode 100644 index 000000000000..d2dba2ab1be6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_HDAPS @@ -0,0 +1 @@ +CONFIG_SENSORS_HDAPS=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_HMC5843_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_HMC5843_I2C new file mode 100644 index 000000000000..8ecf97fd1e4f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_HMC5843_I2C @@ -0,0 +1 @@ +# CONFIG_SENSORS_HMC5843_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_HMC5843_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_HMC5843_SPI new file mode 100644 index 000000000000..51f52f1bf8e0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_HMC5843_SPI @@ -0,0 +1 @@ +# CONFIG_SENSORS_HMC5843_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_HP_WMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_HP_WMI new file mode 100644 index 000000000000..ee7ab1228c5b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_HP_WMI @@ -0,0 +1 @@ +# CONFIG_SENSORS_HP_WMI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_I5500 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_I5500 new file mode 100644 index 000000000000..d956e09b5f91 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_I5500 @@ -0,0 +1 @@ +CONFIG_SENSORS_I5500=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_I5K_AMB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_I5K_AMB new file mode 100644 index 000000000000..ad6832efb1d3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_I5K_AMB @@ -0,0 +1 @@ +CONFIG_SENSORS_I5K_AMB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_IBMAEM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_IBMAEM new file mode 100644 index 000000000000..b291c66370c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_IBMAEM @@ -0,0 +1 @@ +CONFIG_SENSORS_IBMAEM=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_IBMPEX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_IBMPEX new file mode 100644 index 000000000000..a82ec4e6d9c2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_IBMPEX @@ -0,0 +1 @@ +CONFIG_SENSORS_IBMPEX=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_IIO_HWMON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_IIO_HWMON new file mode 100644 index 000000000000..d7dbcc83ceff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_IIO_HWMON @@ -0,0 +1 @@ +# CONFIG_SENSORS_IIO_HWMON is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_INA209 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_INA209 new file mode 100644 index 000000000000..4b0197e0a423 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_INA209 @@ -0,0 +1 @@ +CONFIG_SENSORS_INA209=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_INA2XX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_INA2XX new file mode 100644 index 000000000000..4c6a7ebe7b26 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_INA2XX @@ -0,0 +1 @@ +CONFIG_SENSORS_INA2XX=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ISL29018 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ISL29018 new file mode 100644 index 000000000000..131aebf71240 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ISL29018 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ISL29018 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ISL29028 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ISL29028 new file mode 100644 index 000000000000..53f7bb3ff531 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ISL29028 @@ -0,0 +1 @@ +# CONFIG_SENSORS_ISL29028 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_IT87 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_IT87 new file mode 100644 index 000000000000..f6a8baf0d7d9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_IT87 @@ -0,0 +1 @@ +CONFIG_SENSORS_IT87=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_JC42 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_JC42 new file mode 100644 index 000000000000..8a07c3eef08a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_JC42 @@ -0,0 +1 @@ +CONFIG_SENSORS_JC42=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_K10TEMP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_K10TEMP new file mode 100644 index 000000000000..eec8b328a6ff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_K10TEMP @@ -0,0 +1 @@ +CONFIG_SENSORS_K10TEMP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_K8TEMP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_K8TEMP new file mode 100644 index 000000000000..f690795aadb9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_K8TEMP @@ -0,0 +1 @@ +CONFIG_SENSORS_K8TEMP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LINEAGE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LINEAGE new file mode 100644 index 000000000000..43bc1aa07638 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LINEAGE @@ -0,0 +1 @@ +CONFIG_SENSORS_LINEAGE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LIS3LV02D b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LIS3LV02D new file mode 100644 index 000000000000..980124650a42 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LIS3LV02D @@ -0,0 +1 @@ +CONFIG_SENSORS_LIS3LV02D=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LIS3_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LIS3_I2C new file mode 100644 index 000000000000..3f3a59ae65b3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LIS3_I2C @@ -0,0 +1 @@ +CONFIG_SENSORS_LIS3_I2C=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM25066 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM25066 new file mode 100644 index 000000000000..5c202b02f167 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM25066 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM25066=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM63 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM63 new file mode 100644 index 000000000000..7d893171b59f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM63 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM63=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM70 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM70 new file mode 100644 index 000000000000..c927bf635971 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM70 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LM70 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM73 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM73 new file mode 100644 index 000000000000..b78a7f18d844 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM73 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM73=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM75 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM75 new file mode 100644 index 000000000000..205675fc6192 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM75 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM75=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM77 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM77 new file mode 100644 index 000000000000..3fd7f3f3b6b5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM77 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM77=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM78 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM78 new file mode 100644 index 000000000000..6dc289cbcfc1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM78 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM78=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM80 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM80 new file mode 100644 index 000000000000..a7cfa2123d59 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM80 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM80=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM83 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM83 new file mode 100644 index 000000000000..2e15080a2a01 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM83 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM83=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM85 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM85 new file mode 100644 index 000000000000..1ce412b2953b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM85 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM85=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM87 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM87 new file mode 100644 index 000000000000..8930963579e0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM87 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM87=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM90 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM90 new file mode 100644 index 000000000000..2633ff934466 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM90 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM90=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM92 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM92 new file mode 100644 index 000000000000..58b30a4e375a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM92 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM92=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM93 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM93 new file mode 100644 index 000000000000..46514707a402 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM93 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM93=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM95234 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM95234 new file mode 100644 index 000000000000..f8a7d302e038 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM95234 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM95234=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM95241 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM95241 new file mode 100644 index 000000000000..10e50c1c2e94 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM95241 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM95241=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM95245 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM95245 new file mode 100644 index 000000000000..c66b17705547 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LM95245 @@ -0,0 +1 @@ +CONFIG_SENSORS_LM95245=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC2945 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC2945 new file mode 100644 index 000000000000..79d68fd35109 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC2945 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LTC2945 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC2978 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC2978 new file mode 100644 index 000000000000..5b47e4c4440e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC2978 @@ -0,0 +1 @@ +CONFIG_SENSORS_LTC2978=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC3815 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC3815 new file mode 100644 index 000000000000..5ea0210058bb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC3815 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LTC3815 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4151 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4151 new file mode 100644 index 000000000000..9a8d133d299b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4151 @@ -0,0 +1 @@ +CONFIG_SENSORS_LTC4151=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4215 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4215 new file mode 100644 index 000000000000..3734c6ff236f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4215 @@ -0,0 +1 @@ +CONFIG_SENSORS_LTC4215=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4222 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4222 new file mode 100644 index 000000000000..82af61e643ea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4222 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LTC4222 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4245 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4245 new file mode 100644 index 000000000000..9ef758048d95 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4245 @@ -0,0 +1 @@ +CONFIG_SENSORS_LTC4245=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4260 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4260 new file mode 100644 index 000000000000..a42c153cfa41 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4260 @@ -0,0 +1 @@ +# CONFIG_SENSORS_LTC4260 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4261 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4261 new file mode 100644 index 000000000000..a6cd8f7f12b0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_LTC4261 @@ -0,0 +1 @@ +CONFIG_SENSORS_LTC4261=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX1111 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX1111 new file mode 100644 index 000000000000..082ff9e60ad3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX1111 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX1111 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX16064 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX16064 new file mode 100644 index 000000000000..579d1c59d4bf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX16064 @@ -0,0 +1 @@ +CONFIG_SENSORS_MAX16064=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX16065 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX16065 new file mode 100644 index 000000000000..1054c96225ec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX16065 @@ -0,0 +1 @@ +CONFIG_SENSORS_MAX16065=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX1619 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX1619 new file mode 100644 index 000000000000..0b3146610bcb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX1619 @@ -0,0 +1 @@ +CONFIG_SENSORS_MAX1619=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX1668 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX1668 new file mode 100644 index 000000000000..129708ae55dd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX1668 @@ -0,0 +1 @@ +CONFIG_SENSORS_MAX1668=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX197 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX197 new file mode 100644 index 000000000000..10089e38bc00 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX197 @@ -0,0 +1 @@ +CONFIG_SENSORS_MAX197=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX20751 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX20751 new file mode 100644 index 000000000000..5555d692860f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX20751 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX20751 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX31790 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX31790 new file mode 100644 index 000000000000..6f8e031631e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX31790 @@ -0,0 +1 @@ +# CONFIG_SENSORS_MAX31790 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX34440 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX34440 new file mode 100644 index 000000000000..d6c798cafcf2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX34440 @@ -0,0 +1 @@ +CONFIG_SENSORS_MAX34440=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX6639 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX6639 new file mode 100644 index 000000000000..a56c9c522335 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX6639 @@ -0,0 +1 @@ +CONFIG_SENSORS_MAX6639=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX6650 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX6650 new file mode 100644 index 000000000000..45f922494962 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX6650 @@ -0,0 +1 @@ +CONFIG_SENSORS_MAX6650=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX6697 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX6697 new file mode 100644 index 000000000000..346dae3dfd50 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX6697 @@ -0,0 +1 @@ +CONFIG_SENSORS_MAX6697=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX8688 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX8688 new file mode 100644 index 000000000000..d983ad1d5cf3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MAX8688 @@ -0,0 +1 @@ +CONFIG_SENSORS_MAX8688=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MCP3021 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MCP3021 new file mode 100644 index 000000000000..508286fe15d1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MCP3021 @@ -0,0 +1 @@ +CONFIG_SENSORS_MCP3021=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MLXREG_FAN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MLXREG_FAN new file mode 100644 index 000000000000..5d431cda6a42 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_MLXREG_FAN @@ -0,0 +1 @@ +# CONFIG_SENSORS_MLXREG_FAN is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT6683 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT6683 new file mode 100644 index 000000000000..677df6183600 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT6683 @@ -0,0 +1 @@ +# CONFIG_SENSORS_NCT6683 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT6775 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT6775 new file mode 100644 index 000000000000..8bde1424c5c4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT6775 @@ -0,0 +1 @@ +CONFIG_SENSORS_NCT6775=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT6775_CORE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT6775_CORE new file mode 100644 index 000000000000..df64ea6de2d4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT6775_CORE @@ -0,0 +1 @@ +CONFIG_SENSORS_NCT6775_CORE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT7802 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT7802 new file mode 100644 index 000000000000..e699237af139 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT7802 @@ -0,0 +1 @@ +# CONFIG_SENSORS_NCT7802 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT7904 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT7904 new file mode 100644 index 000000000000..aabc731da231 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NCT7904 @@ -0,0 +1 @@ +# CONFIG_SENSORS_NCT7904 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NTC_THERMISTOR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NTC_THERMISTOR new file mode 100644 index 000000000000..269cb4eb7522 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_NTC_THERMISTOR @@ -0,0 +1 @@ +CONFIG_SENSORS_NTC_THERMISTOR=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_OXP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_OXP new file mode 100644 index 000000000000..b490423ca30c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_OXP @@ -0,0 +1 @@ +# CONFIG_SENSORS_OXP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_PC87360 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_PC87360 new file mode 100644 index 000000000000..bbd351dbf7f8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_PC87360 @@ -0,0 +1 @@ +CONFIG_SENSORS_PC87360=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_PC87427 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_PC87427 new file mode 100644 index 000000000000..113695150277 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_PC87427 @@ -0,0 +1 @@ +CONFIG_SENSORS_PC87427=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_PCF8591 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_PCF8591 new file mode 100644 index 000000000000..685b3b3c21c7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_PCF8591 @@ -0,0 +1 @@ +CONFIG_SENSORS_PCF8591=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_PMBUS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_PMBUS new file mode 100644 index 000000000000..7b11c9b8f7ec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_PMBUS @@ -0,0 +1 @@ +CONFIG_SENSORS_PMBUS=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_POWR1220 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_POWR1220 new file mode 100644 index 000000000000..7f6616feb295 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_POWR1220 @@ -0,0 +1 @@ +# CONFIG_SENSORS_POWR1220 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_RM3100_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_RM3100_I2C new file mode 100644 index 000000000000..68d33c0e7cb7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_RM3100_I2C @@ -0,0 +1 @@ +# CONFIG_SENSORS_RM3100_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_RM3100_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_RM3100_SPI new file mode 100644 index 000000000000..7c4a1c0621d7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_RM3100_SPI @@ -0,0 +1 @@ +# CONFIG_SENSORS_RM3100_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SCH5627 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SCH5627 new file mode 100644 index 000000000000..2a3b5924d8a8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SCH5627 @@ -0,0 +1 @@ +CONFIG_SENSORS_SCH5627=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SCH5636 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SCH5636 new file mode 100644 index 000000000000..038ef2e17524 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SCH5636 @@ -0,0 +1 @@ +CONFIG_SENSORS_SCH5636=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SCH56XX_COMMON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SCH56XX_COMMON new file mode 100644 index 000000000000..b2d6ee39ddad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SCH56XX_COMMON @@ -0,0 +1 @@ +CONFIG_SENSORS_SCH56XX_COMMON=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SHT15 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SHT15 new file mode 100644 index 000000000000..997046201a12 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SHT15 @@ -0,0 +1 @@ +CONFIG_SENSORS_SHT15=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SHT21 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SHT21 new file mode 100644 index 000000000000..dbc5dd9f3dff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SHT21 @@ -0,0 +1 @@ +CONFIG_SENSORS_SHT21=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SHTC1 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SHTC1 new file mode 100644 index 000000000000..f91426bc7c01 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SHTC1 @@ -0,0 +1 @@ +# CONFIG_SENSORS_SHTC1 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SIS5595 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SIS5595 new file mode 100644 index 000000000000..70c05287c6ca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SIS5595 @@ -0,0 +1 @@ +CONFIG_SENSORS_SIS5595=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SMSC47B397 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SMSC47B397 new file mode 100644 index 000000000000..eba9f9c8055c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SMSC47B397 @@ -0,0 +1 @@ +CONFIG_SENSORS_SMSC47B397=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SMSC47M1 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SMSC47M1 new file mode 100644 index 000000000000..5c35e76134ea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SMSC47M1 @@ -0,0 +1 @@ +CONFIG_SENSORS_SMSC47M1=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SMSC47M192 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SMSC47M192 new file mode 100644 index 000000000000..e239c8299bad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_SMSC47M192 @@ -0,0 +1 @@ +CONFIG_SENSORS_SMSC47M192=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TC74 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TC74 new file mode 100644 index 000000000000..56843784b051 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TC74 @@ -0,0 +1 @@ +# CONFIG_SENSORS_TC74 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_THMC50 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_THMC50 new file mode 100644 index 000000000000..f3b849b5c2b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_THMC50 @@ -0,0 +1 @@ +CONFIG_SENSORS_THMC50=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TMP102 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TMP102 new file mode 100644 index 000000000000..eec18af38213 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TMP102 @@ -0,0 +1 @@ +CONFIG_SENSORS_TMP102=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TMP103 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TMP103 new file mode 100644 index 000000000000..b3a789182d5c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TMP103 @@ -0,0 +1 @@ +# CONFIG_SENSORS_TMP103 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TMP401 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TMP401 new file mode 100644 index 000000000000..de3f7de90c4a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TMP401 @@ -0,0 +1 @@ +CONFIG_SENSORS_TMP401=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TMP421 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TMP421 new file mode 100644 index 000000000000..390515bf3c5f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TMP421 @@ -0,0 +1 @@ +CONFIG_SENSORS_TMP421=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TPS40422 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TPS40422 new file mode 100644 index 000000000000..111dc8c2a45f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TPS40422 @@ -0,0 +1 @@ +# CONFIG_SENSORS_TPS40422 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TSL2550 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TSL2550 new file mode 100644 index 000000000000..93ceff8107d8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TSL2550 @@ -0,0 +1 @@ +CONFIG_SENSORS_TSL2550=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TSL2563 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TSL2563 new file mode 100644 index 000000000000..b94d33641dd6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_TSL2563 @@ -0,0 +1 @@ +# CONFIG_SENSORS_TSL2563 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_UCD9000 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_UCD9000 new file mode 100644 index 000000000000..1bce80fa9db5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_UCD9000 @@ -0,0 +1 @@ +CONFIG_SENSORS_UCD9000=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_UCD9200 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_UCD9200 new file mode 100644 index 000000000000..da028a1dfc7b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_UCD9200 @@ -0,0 +1 @@ +CONFIG_SENSORS_UCD9200=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_VIA686A b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_VIA686A new file mode 100644 index 000000000000..d3b212f2708b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_VIA686A @@ -0,0 +1 @@ +CONFIG_SENSORS_VIA686A=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_VIA_CPUTEMP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_VIA_CPUTEMP new file mode 100644 index 000000000000..f4ca4454a4fe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_VIA_CPUTEMP @@ -0,0 +1 @@ +CONFIG_SENSORS_VIA_CPUTEMP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_VT1211 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_VT1211 new file mode 100644 index 000000000000..bfcec4313f5e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_VT1211 @@ -0,0 +1 @@ +CONFIG_SENSORS_VT1211=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_VT8231 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_VT8231 new file mode 100644 index 000000000000..7e76791749d6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_VT8231 @@ -0,0 +1 @@ +CONFIG_SENSORS_VT8231=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83627EHF b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83627EHF new file mode 100644 index 000000000000..6a06ef0fac32 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83627EHF @@ -0,0 +1 @@ +CONFIG_SENSORS_W83627EHF=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83627HF b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83627HF new file mode 100644 index 000000000000..8c578db80a0d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83627HF @@ -0,0 +1 @@ +CONFIG_SENSORS_W83627HF=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83781D b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83781D new file mode 100644 index 000000000000..e0a34bf27f8f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83781D @@ -0,0 +1 @@ +CONFIG_SENSORS_W83781D=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83791D b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83791D new file mode 100644 index 000000000000..8bbb2b5b8003 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83791D @@ -0,0 +1 @@ +CONFIG_SENSORS_W83791D=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83792D b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83792D new file mode 100644 index 000000000000..9dc3fda8bb7f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83792D @@ -0,0 +1 @@ +CONFIG_SENSORS_W83792D=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83793 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83793 new file mode 100644 index 000000000000..6c2fefb4566f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83793 @@ -0,0 +1 @@ +CONFIG_SENSORS_W83793=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83795 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83795 new file mode 100644 index 000000000000..e85aa8e5e828 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83795 @@ -0,0 +1 @@ +CONFIG_SENSORS_W83795=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83795_FANCTRL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83795_FANCTRL new file mode 100644 index 000000000000..896ede33498c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83795_FANCTRL @@ -0,0 +1 @@ +# CONFIG_SENSORS_W83795_FANCTRL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83L785TS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83L785TS new file mode 100644 index 000000000000..0c220bdf89a3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83L785TS @@ -0,0 +1 @@ +CONFIG_SENSORS_W83L785TS=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83L786NG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83L786NG new file mode 100644 index 000000000000..0dfd97664709 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_W83L786NG @@ -0,0 +1 @@ +CONFIG_SENSORS_W83L786NG=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_XGENE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_XGENE new file mode 100644 index 000000000000..ae549128e540 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_XGENE @@ -0,0 +1 @@ +# CONFIG_SENSORS_XGENE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ZL6100 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ZL6100 new file mode 100644 index 000000000000..8a28e5080d53 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SENSORS_ZL6100 @@ -0,0 +1 @@ +CONFIG_SENSORS_ZL6100=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_8250_16550A_VARIANTS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_8250_16550A_VARIANTS new file mode 100644 index 000000000000..056bf6f166c8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_8250_16550A_VARIANTS @@ -0,0 +1 @@ +# CONFIG_SERIAL_8250_16550A_VARIANTS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_8250_LPSS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_8250_LPSS new file mode 100644 index 000000000000..e033c4db19cf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_8250_LPSS @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_LPSS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_8250_MID b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_8250_MID new file mode 100644 index 000000000000..58ee08f11f2e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_8250_MID @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_MID=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_8250_RT288X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_8250_RT288X new file mode 100644 index 000000000000..1f10e7574fb4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_8250_RT288X @@ -0,0 +1 @@ +# CONFIG_SERIAL_8250_RT288X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_ARC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_ARC new file mode 100644 index 000000000000..998cbcfd789a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_ARC @@ -0,0 +1 @@ +CONFIG_SERIAL_ARC=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_ARC_NR_PORTS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_ARC_NR_PORTS new file mode 100644 index 000000000000..02dfacf686c2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_ARC_NR_PORTS @@ -0,0 +1 @@ +CONFIG_SERIAL_ARC_NR_PORTS=1 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_JSM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_JSM new file mode 100644 index 000000000000..7f35395cfc28 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_JSM @@ -0,0 +1 @@ +CONFIG_SERIAL_JSM=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_LANTIQ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_LANTIQ new file mode 100644 index 000000000000..7bd602b2fd91 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_LANTIQ @@ -0,0 +1 @@ +# CONFIG_SERIAL_LANTIQ is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_MULTI_INSTANTIATE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_MULTI_INSTANTIATE new file mode 100644 index 000000000000..25a0b4bdf115 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIAL_MULTI_INSTANTIATE @@ -0,0 +1 @@ +# CONFIG_SERIAL_MULTI_INSTANTIATE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIO_CT82C710 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIO_CT82C710 new file mode 100644 index 000000000000..6ba82bb7c567 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIO_CT82C710 @@ -0,0 +1 @@ +# CONFIG_SERIO_CT82C710 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIO_PARKBD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIO_PARKBD new file mode 100644 index 000000000000..8f681961795d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SERIO_PARKBD @@ -0,0 +1 @@ +# CONFIG_SERIO_PARKBD is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC new file mode 100644 index 000000000000..dd76039a1842 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC @@ -0,0 +1 @@ +CONFIG_SFC=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC_MCDI_LOGGING b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC_MCDI_LOGGING new file mode 100644 index 000000000000..25cb3fd7c858 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC_MCDI_LOGGING @@ -0,0 +1 @@ +CONFIG_SFC_MCDI_LOGGING=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC_MCDI_MON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC_MCDI_MON new file mode 100644 index 000000000000..5bb40eaeafc4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC_MCDI_MON @@ -0,0 +1 @@ +CONFIG_SFC_MCDI_MON=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC_MTD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC_MTD new file mode 100644 index 000000000000..35ab623c5d71 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC_MTD @@ -0,0 +1 @@ +CONFIG_SFC_MTD=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC_SRIOV b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC_SRIOV new file mode 100644 index 000000000000..62e41fa84744 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SFC_SRIOV @@ -0,0 +1 @@ +CONFIG_SFC_SRIOV=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SGI_GRU b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SGI_GRU new file mode 100644 index 000000000000..5b133b268699 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SGI_GRU @@ -0,0 +1 @@ +CONFIG_SGI_GRU=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SGI_GRU_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SGI_GRU_DEBUG new file mode 100644 index 000000000000..5e65cd5dd8a8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SGI_GRU_DEBUG @@ -0,0 +1 @@ +# CONFIG_SGI_GRU_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SGI_PARTITION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SGI_PARTITION new file mode 100644 index 000000000000..2c96dcdff373 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SGI_PARTITION @@ -0,0 +1 @@ +CONFIG_SGI_PARTITION=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SGI_XP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SGI_XP new file mode 100644 index 000000000000..c27ee782f3af --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SGI_XP @@ -0,0 +1 @@ +CONFIG_SGI_XP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SI1133 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SI1133 new file mode 100644 index 000000000000..8336ff08b8b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SI1133 @@ -0,0 +1 @@ +# CONFIG_SI1133 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SI1145 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SI1145 new file mode 100644 index 000000000000..744fc2b7fa0c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SI1145 @@ -0,0 +1 @@ +# CONFIG_SI1145 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SI7005 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SI7005 new file mode 100644 index 000000000000..f04c186e3f01 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SI7005 @@ -0,0 +1 @@ +# CONFIG_SI7005 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SI7020 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SI7020 new file mode 100644 index 000000000000..25a811632e71 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SI7020 @@ -0,0 +1 @@ +# CONFIG_SI7020 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SIEMENS_SIMATIC_IPC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SIEMENS_SIMATIC_IPC new file mode 100644 index 000000000000..91b7eb527e36 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SIEMENS_SIMATIC_IPC @@ -0,0 +1 @@ +# CONFIG_SIEMENS_SIMATIC_IPC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SMSC37B787_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SMSC37B787_WDT new file mode 100644 index 000000000000..5448253d6650 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SMSC37B787_WDT @@ -0,0 +1 @@ +# CONFIG_SMSC37B787_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SMSC_SCH311X_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SMSC_SCH311X_WDT new file mode 100644 index 000000000000..86a9a5ae1fba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SMSC_SCH311X_WDT @@ -0,0 +1 @@ +CONFIG_SMSC_SCH311X_WDT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SOLARIS_X86_PARTITION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SOLARIS_X86_PARTITION new file mode 100644 index 000000000000..deb79e27e78f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SOLARIS_X86_PARTITION @@ -0,0 +1 @@ +CONFIG_SOLARIS_X86_PARTITION=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SONYPI_COMPAT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SONYPI_COMPAT new file mode 100644 index 000000000000..6655c779b0ab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SONYPI_COMPAT @@ -0,0 +1 @@ +CONFIG_SONYPI_COMPAT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SONY_LAPTOP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SONY_LAPTOP new file mode 100644 index 000000000000..a31919301348 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SONY_LAPTOP @@ -0,0 +1 @@ +CONFIG_SONY_LAPTOP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SPI_BUTTERFLY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SPI_BUTTERFLY new file mode 100644 index 000000000000..958604c8fe60 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SPI_BUTTERFLY @@ -0,0 +1 @@ +# CONFIG_SPI_BUTTERFLY is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SPI_LANTIQ_SSC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SPI_LANTIQ_SSC new file mode 100644 index 000000000000..f93334dd32f3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SPI_LANTIQ_SSC @@ -0,0 +1 @@ +# CONFIG_SPI_LANTIQ_SSC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SPI_LM70_LLP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SPI_LM70_LLP new file mode 100644 index 000000000000..a3b56af5a4f9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SPI_LM70_LLP @@ -0,0 +1 @@ +# CONFIG_SPI_LM70_LLP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SPS30_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SPS30_I2C new file mode 100644 index 000000000000..65e6b608ecac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SPS30_I2C @@ -0,0 +1 @@ +# CONFIG_SPS30_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SRF04 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SRF04 new file mode 100644 index 000000000000..7dcc9136e884 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SRF04 @@ -0,0 +1 @@ +# CONFIG_SRF04 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SRF08 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SRF08 new file mode 100644 index 000000000000..11f5dbcf5a3c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SRF08 @@ -0,0 +1 @@ +# CONFIG_SRF08 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_STK3310 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_STK3310 new file mode 100644 index 000000000000..085e44f4d1a0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_STK3310 @@ -0,0 +1 @@ +# CONFIG_STK3310 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_STK8312 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_STK8312 new file mode 100644 index 000000000000..a6776d90eef6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_STK8312 @@ -0,0 +1 @@ +# CONFIG_STK8312 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_STK8BA50 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_STK8BA50 new file mode 100644 index 000000000000..7608b4ef79e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_STK8BA50 @@ -0,0 +1 @@ +# CONFIG_STK8BA50 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ST_UVIS25 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ST_UVIS25 new file mode 100644 index 000000000000..c4b590934fb8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ST_UVIS25 @@ -0,0 +1 @@ +# CONFIG_ST_UVIS25 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SUN_PARTITION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SUN_PARTITION new file mode 100644 index 000000000000..67e2deb517cd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SUN_PARTITION @@ -0,0 +1 @@ +CONFIG_SUN_PARTITION=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SURFACE3_WMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SURFACE3_WMI new file mode 100644 index 000000000000..8e45af1e32a0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SURFACE3_WMI @@ -0,0 +1 @@ +# CONFIG_SURFACE3_WMI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SX9310 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SX9310 new file mode 100644 index 000000000000..14c168021e5f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SX9310 @@ -0,0 +1 @@ +# CONFIG_SX9310 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SX9324 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SX9324 new file mode 100644 index 000000000000..1864bc0dc627 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SX9324 @@ -0,0 +1 @@ +# CONFIG_SX9324 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SX9360 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SX9360 new file mode 100644 index 000000000000..1b9af0ea4056 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SX9360 @@ -0,0 +1 @@ +# CONFIG_SX9360 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SX9500 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SX9500 new file mode 100644 index 000000000000..f7899805e8a7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SX9500 @@ -0,0 +1 @@ +# CONFIG_SX9500 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SYSTEM76_ACPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SYSTEM76_ACPI new file mode 100644 index 000000000000..40471194d63c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SYSTEM76_ACPI @@ -0,0 +1 @@ +# CONFIG_SYSTEM76_ACPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SYS_HYPERVISOR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SYS_HYPERVISOR new file mode 100644 index 000000000000..a237fda7aa29 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SYS_HYPERVISOR @@ -0,0 +1 @@ +CONFIG_SYS_HYPERVISOR=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_T5403 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_T5403 new file mode 100644 index 000000000000..ef5a8130a77a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_T5403 @@ -0,0 +1 @@ +# CONFIG_T5403 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_SERIAL_WACOM4 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_SERIAL_WACOM4 new file mode 100644 index 000000000000..dc8670f60fe3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_SERIAL_WACOM4 @@ -0,0 +1 @@ +CONFIG_TABLET_SERIAL_WACOM4=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_ACECAD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_ACECAD new file mode 100644 index 000000000000..cc981e4af3de --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_ACECAD @@ -0,0 +1 @@ +CONFIG_TABLET_USB_ACECAD=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_AIPTEK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_AIPTEK new file mode 100644 index 000000000000..250cea738cc6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_AIPTEK @@ -0,0 +1 @@ +CONFIG_TABLET_USB_AIPTEK=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_HANWANG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_HANWANG new file mode 100644 index 000000000000..0e71e48a49cb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_HANWANG @@ -0,0 +1 @@ +# CONFIG_TABLET_USB_HANWANG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_KBTAB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_KBTAB new file mode 100644 index 000000000000..2dcbea933486 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_KBTAB @@ -0,0 +1 @@ +CONFIG_TABLET_USB_KBTAB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_PEGASUS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_PEGASUS new file mode 100644 index 000000000000..78f51e018cfc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TABLET_USB_PEGASUS @@ -0,0 +1 @@ +# CONFIG_TABLET_USB_PEGASUS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_I2C_ATMEL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_I2C_ATMEL new file mode 100644 index 000000000000..d7bd6a50d126 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_I2C_ATMEL @@ -0,0 +1 @@ +CONFIG_TCG_TIS_I2C_ATMEL=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_I2C_INFINEON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_I2C_INFINEON new file mode 100644 index 000000000000..c4409f95bf96 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_I2C_INFINEON @@ -0,0 +1 @@ +CONFIG_TCG_TIS_I2C_INFINEON=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_I2C_NUVOTON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_I2C_NUVOTON new file mode 100644 index 000000000000..bba929925219 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_I2C_NUVOTON @@ -0,0 +1 @@ +CONFIG_TCG_TIS_I2C_NUVOTON=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_SPI new file mode 100644 index 000000000000..3b66237980be --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_SPI @@ -0,0 +1 @@ +# CONFIG_TCG_TIS_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_ST33ZP24 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_ST33ZP24 new file mode 100644 index 000000000000..7dbdd343ad16 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_ST33ZP24 @@ -0,0 +1 @@ +CONFIG_TCG_TIS_ST33ZP24=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_ST33ZP24_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_ST33ZP24_I2C new file mode 100644 index 000000000000..284cac4f07e3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_ST33ZP24_I2C @@ -0,0 +1 @@ +CONFIG_TCG_TIS_ST33ZP24_I2C=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_XEN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_XEN new file mode 100644 index 000000000000..778bc2d900f0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_XEN @@ -0,0 +1 @@ +# CONFIG_TCG_XEN is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCS3414 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCS3414 new file mode 100644 index 000000000000..5b04d4ee1298 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCS3414 @@ -0,0 +1 @@ +# CONFIG_TCS3414 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCS3472 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCS3472 new file mode 100644 index 000000000000..0d00b4785abd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCS3472 @@ -0,0 +1 @@ +# CONFIG_TCS3472 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TELCLOCK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TELCLOCK new file mode 100644 index 000000000000..0cfe9ee4aedd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TELCLOCK @@ -0,0 +1 @@ +CONFIG_TELCLOCK=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TEST_CLOCKSOURCE_WATCHDOG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TEST_CLOCKSOURCE_WATCHDOG new file mode 100644 index 000000000000..2f86ea497b70 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TEST_CLOCKSOURCE_WATCHDOG @@ -0,0 +1 @@ +# CONFIG_TEST_CLOCKSOURCE_WATCHDOG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TEST_FPU b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TEST_FPU new file mode 100644 index 000000000000..8091d4abf379 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TEST_FPU @@ -0,0 +1 @@ +# CONFIG_TEST_FPU is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TEST_HMM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TEST_HMM new file mode 100644 index 000000000000..5210be0d7788 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TEST_HMM @@ -0,0 +1 @@ +# CONFIG_TEST_HMM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_THERMAL_ACPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THERMAL_ACPI new file mode 100644 index 000000000000..f0a399285ee8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THERMAL_ACPI @@ -0,0 +1 @@ +CONFIG_THERMAL_ACPI=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_THERMAL_DEFAULT_GOV_BANG_BANG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THERMAL_DEFAULT_GOV_BANG_BANG new file mode 100644 index 000000000000..eeb8ec40aa12 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THERMAL_DEFAULT_GOV_BANG_BANG @@ -0,0 +1 @@ +# CONFIG_THERMAL_DEFAULT_GOV_BANG_BANG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI new file mode 100644 index 000000000000..b56a7701d5d1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI @@ -0,0 +1 @@ +CONFIG_THINKPAD_ACPI=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_DEBUG new file mode 100644 index 000000000000..10e6eb6a70fa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_DEBUG @@ -0,0 +1 @@ +# CONFIG_THINKPAD_ACPI_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_DEBUGFACILITIES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_DEBUGFACILITIES new file mode 100644 index 000000000000..0673591f5ac3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_DEBUGFACILITIES @@ -0,0 +1 @@ +# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_HOTKEY_POLL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_HOTKEY_POLL new file mode 100644 index 000000000000..cf6a85819368 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_HOTKEY_POLL @@ -0,0 +1 @@ +CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_UNSAFE_LEDS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_UNSAFE_LEDS new file mode 100644 index 000000000000..535380b27802 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_UNSAFE_LEDS @@ -0,0 +1 @@ +# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_VIDEO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_VIDEO new file mode 100644 index 000000000000..a13ebc06be53 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_VIDEO @@ -0,0 +1 @@ +CONFIG_THINKPAD_ACPI_VIDEO=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_LMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_LMI new file mode 100644 index 000000000000..71ff74ef7e7b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_LMI @@ -0,0 +1 @@ +# CONFIG_THINKPAD_LMI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_THUNDER_NIC_BGX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THUNDER_NIC_BGX new file mode 100644 index 000000000000..99c2371d5292 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THUNDER_NIC_BGX @@ -0,0 +1 @@ +# CONFIG_THUNDER_NIC_BGX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_THUNDER_NIC_PF b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THUNDER_NIC_PF new file mode 100644 index 000000000000..7b9f749d884d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THUNDER_NIC_PF @@ -0,0 +1 @@ +# CONFIG_THUNDER_NIC_PF is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_THUNDER_NIC_RGX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THUNDER_NIC_RGX new file mode 100644 index 000000000000..ee56094121d9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THUNDER_NIC_RGX @@ -0,0 +1 @@ +# CONFIG_THUNDER_NIC_RGX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_THUNDER_NIC_VF b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THUNDER_NIC_VF new file mode 100644 index 000000000000..44295fa0bc37 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THUNDER_NIC_VF @@ -0,0 +1 @@ +# CONFIG_THUNDER_NIC_VF is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TIFM_7XX1 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TIFM_7XX1 new file mode 100644 index 000000000000..86734bb34b95 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TIFM_7XX1 @@ -0,0 +1 @@ +CONFIG_TIFM_7XX1=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC081C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC081C new file mode 100644 index 000000000000..58ab3469536d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC081C @@ -0,0 +1 @@ +# CONFIG_TI_ADC081C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC0832 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC0832 new file mode 100644 index 000000000000..a1e55e6c029a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC0832 @@ -0,0 +1 @@ +# CONFIG_TI_ADC0832 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC084S021 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC084S021 new file mode 100644 index 000000000000..4845420948d4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC084S021 @@ -0,0 +1 @@ +# CONFIG_TI_ADC084S021 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC108S102 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC108S102 new file mode 100644 index 000000000000..b70880f5bfa7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC108S102 @@ -0,0 +1 @@ +# CONFIG_TI_ADC108S102 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC12138 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC12138 new file mode 100644 index 000000000000..097a76b1d61f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC12138 @@ -0,0 +1 @@ +# CONFIG_TI_ADC12138 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC128S052 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC128S052 new file mode 100644 index 000000000000..d42dd79ca6f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC128S052 @@ -0,0 +1 @@ +# CONFIG_TI_ADC128S052 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC161S626 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC161S626 new file mode 100644 index 000000000000..016e8c552677 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADC161S626 @@ -0,0 +1 @@ +# CONFIG_TI_ADC161S626 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS1015 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS1015 new file mode 100644 index 000000000000..8a8d511c60a8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS1015 @@ -0,0 +1 @@ +# CONFIG_TI_ADS1015 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS1100 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS1100 new file mode 100644 index 000000000000..abc5533f8cde --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS1100 @@ -0,0 +1 @@ +# CONFIG_TI_ADS1100 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS124S08 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS124S08 new file mode 100644 index 000000000000..8d6d673b505b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS124S08 @@ -0,0 +1 @@ +# CONFIG_TI_ADS124S08 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS131E08 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS131E08 new file mode 100644 index 000000000000..c60c731eb2d1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS131E08 @@ -0,0 +1 @@ +# CONFIG_TI_ADS131E08 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS7924 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS7924 new file mode 100644 index 000000000000..0c6bdb1eea12 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS7924 @@ -0,0 +1 @@ +# CONFIG_TI_ADS7924 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS7950 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS7950 new file mode 100644 index 000000000000..f2a68954b22c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS7950 @@ -0,0 +1 @@ +# CONFIG_TI_ADS7950 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS8344 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS8344 new file mode 100644 index 000000000000..5c0ba0608a38 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS8344 @@ -0,0 +1 @@ +# CONFIG_TI_ADS8344 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS8688 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS8688 new file mode 100644 index 000000000000..c1e7d30101b5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_ADS8688 @@ -0,0 +1 @@ +# CONFIG_TI_ADS8688 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_DAC082S085 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_DAC082S085 new file mode 100644 index 000000000000..90397f5e302a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_DAC082S085 @@ -0,0 +1 @@ +# CONFIG_TI_DAC082S085 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_DAC5571 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_DAC5571 new file mode 100644 index 000000000000..4c8e5798c547 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_DAC5571 @@ -0,0 +1 @@ +# CONFIG_TI_DAC5571 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_DAC7311 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_DAC7311 new file mode 100644 index 000000000000..26dfffbc34e7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_DAC7311 @@ -0,0 +1 @@ +# CONFIG_TI_DAC7311 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_DAC7612 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_DAC7612 new file mode 100644 index 000000000000..38e697dd813f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_DAC7612 @@ -0,0 +1 @@ +# CONFIG_TI_DAC7612 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_LMP92064 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_LMP92064 new file mode 100644 index 000000000000..61cc56124502 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_LMP92064 @@ -0,0 +1 @@ +# CONFIG_TI_LMP92064 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_TLC4541 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_TLC4541 new file mode 100644 index 000000000000..c8c8183fd385 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_TLC4541 @@ -0,0 +1 @@ +# CONFIG_TI_TLC4541 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_TMAG5273 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_TMAG5273 new file mode 100644 index 000000000000..31297ff62f03 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_TMAG5273 @@ -0,0 +1 @@ +# CONFIG_TI_TMAG5273 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_TSC2046 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_TSC2046 new file mode 100644 index 000000000000..32d63cd87446 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TI_TSC2046 @@ -0,0 +1 @@ +# CONFIG_TI_TSC2046 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TMP006 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TMP006 new file mode 100644 index 000000000000..d76f2b090ee5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TMP006 @@ -0,0 +1 @@ +# CONFIG_TMP006 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TMP007 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TMP007 new file mode 100644 index 000000000000..a3a96d4a8b3c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TMP007 @@ -0,0 +1 @@ +# CONFIG_TMP007 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TMP117 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TMP117 new file mode 100644 index 000000000000..68663fdbed8c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TMP117 @@ -0,0 +1 @@ +# CONFIG_TMP117 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOPSTAR_LAPTOP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOPSTAR_LAPTOP new file mode 100644 index 000000000000..d38a75930dea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOPSTAR_LAPTOP @@ -0,0 +1 @@ +CONFIG_TOPSTAR_LAPTOP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOSHIBA_BT_RFKILL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOSHIBA_BT_RFKILL new file mode 100644 index 000000000000..07f3cf8b1f7c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOSHIBA_BT_RFKILL @@ -0,0 +1 @@ +CONFIG_TOSHIBA_BT_RFKILL=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOSHIBA_HAPS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOSHIBA_HAPS new file mode 100644 index 000000000000..09c625d33293 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOSHIBA_HAPS @@ -0,0 +1 @@ +# CONFIG_TOSHIBA_HAPS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOSHIBA_WMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOSHIBA_WMI new file mode 100644 index 000000000000..37af276daa73 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOSHIBA_WMI @@ -0,0 +1 @@ +# CONFIG_TOSHIBA_WMI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_AD7877 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_AD7877 new file mode 100644 index 000000000000..822e85f151a5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_AD7877 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_AD7877 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_AD7879 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_AD7879 new file mode 100644 index 000000000000..c3836e69d50a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_AD7879 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_AD7879 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ADS7846 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ADS7846 new file mode 100644 index 000000000000..dd12ad15c85b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ADS7846 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_ADS7846 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ATMEL_MXT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ATMEL_MXT new file mode 100644 index 000000000000..2fd6137904cf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ATMEL_MXT @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_AUO_PIXCIR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_AUO_PIXCIR new file mode 100644 index 000000000000..b33f61f376bb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_AUO_PIXCIR @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_BU21013 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_BU21013 new file mode 100644 index 000000000000..547232cb2683 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_BU21013 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_BU21013 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_BU21029 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_BU21029 new file mode 100644 index 000000000000..231bdaf59ee5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_BU21029 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_BU21029 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 new file mode 100644 index 000000000000..20fb6c5e9f0e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_COLIBRI_VF50 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_COLIBRI_VF50 new file mode 100644 index 000000000000..2629724240b6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_COLIBRI_VF50 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_COLIBRI_VF50 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CY8CTMA140 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CY8CTMA140 new file mode 100644 index 000000000000..fa03c444e54b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CY8CTMA140 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_CY8CTMA140 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CY8CTMG110 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CY8CTMG110 new file mode 100644 index 000000000000..0e43d53b5930 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CY8CTMG110 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CYTTSP4_CORE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CYTTSP4_CORE new file mode 100644 index 000000000000..9a38f4fd8d07 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CYTTSP4_CORE @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CYTTSP5 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CYTTSP5 new file mode 100644 index 000000000000..58375d2ed6e4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CYTTSP5 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_CYTTSP5 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CYTTSP_CORE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CYTTSP_CORE new file mode 100644 index 000000000000..aa92f1028348 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_CYTTSP_CORE @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_DYNAPRO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_DYNAPRO new file mode 100644 index 000000000000..a392363c08e3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_DYNAPRO @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_DYNAPRO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EDT_FT5X06 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EDT_FT5X06 new file mode 100644 index 000000000000..c84fdf491d7e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EDT_FT5X06 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EETI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EETI new file mode 100644 index 000000000000..6fdac1278637 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EETI @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_EETI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EGALAX_SERIAL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EGALAX_SERIAL new file mode 100644 index 000000000000..f3e1b7a98af7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EGALAX_SERIAL @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EKTF2127 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EKTF2127 new file mode 100644 index 000000000000..2b8f3d594434 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EKTF2127 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_EKTF2127 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ELAN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ELAN new file mode 100644 index 000000000000..9b42854babab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ELAN @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_ELAN is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ELO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ELO new file mode 100644 index 000000000000..889c6b213f9b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ELO @@ -0,0 +1 @@ +CONFIG_TOUCHSCREEN_ELO=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EXC3000 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EXC3000 new file mode 100644 index 000000000000..99f4f0f0f8f1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_EXC3000 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_EXC3000 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_FUJITSU b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_FUJITSU new file mode 100644 index 000000000000..f9c8e6cc39f5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_FUJITSU @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_FUJITSU is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_GOODIX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_GOODIX new file mode 100644 index 000000000000..2cd772276bed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_GOODIX @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_GOODIX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_GUNZE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_GUNZE new file mode 100644 index 000000000000..138d24dcb40c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_GUNZE @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_GUNZE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HAMPSHIRE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HAMPSHIRE new file mode 100644 index 000000000000..6030c1a6c834 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HAMPSHIRE @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HIDEEP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HIDEEP new file mode 100644 index 000000000000..95f913e6fac9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HIDEEP @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_HIDEEP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HIMAX_HX83112B b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HIMAX_HX83112B new file mode 100644 index 000000000000..6b38ce12978c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HIMAX_HX83112B @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_HIMAX_HX83112B is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HYCON_HY46XX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HYCON_HY46XX new file mode 100644 index 000000000000..0514f62a88e9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HYCON_HY46XX @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_HYCON_HY46XX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX new file mode 100644 index 000000000000..cfc752b45586 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ILI210X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ILI210X new file mode 100644 index 000000000000..b8045cd897bd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ILI210X @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_ILI210X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ILITEK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ILITEK new file mode 100644 index 000000000000..77f5ae298ede --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ILITEK @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_ILITEK is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_IMAGIS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_IMAGIS new file mode 100644 index 000000000000..454a356efa2a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_IMAGIS @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_IMAGIS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_INEXIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_INEXIO new file mode 100644 index 000000000000..5b2d7d9fa692 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_INEXIO @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_INEXIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_IQS5XX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_IQS5XX new file mode 100644 index 000000000000..cf0340b28c52 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_IQS5XX @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_IQS5XX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_IQS7211 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_IQS7211 new file mode 100644 index 000000000000..184ee14a0bdb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_IQS7211 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_IQS7211 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MAX11801 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MAX11801 new file mode 100644 index 000000000000..2273504dac44 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MAX11801 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_MAX11801 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MCS5000 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MCS5000 new file mode 100644 index 000000000000..5e3c79663101 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MCS5000 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_MCS5000 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MELFAS_MIP4 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MELFAS_MIP4 new file mode 100644 index 000000000000..0adb4f84a0f3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MELFAS_MIP4 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MMS114 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MMS114 new file mode 100644 index 000000000000..5220b17a780c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MMS114 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_MMS114 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MSG2638 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MSG2638 new file mode 100644 index 000000000000..5fead293a159 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MSG2638 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_MSG2638 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MTOUCH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MTOUCH new file mode 100644 index 000000000000..1e80aceb8e0a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_MTOUCH @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_MTOUCH is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_NOVATEK_NVT_TS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_NOVATEK_NVT_TS new file mode 100644 index 000000000000..1c325921ea7d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_NOVATEK_NVT_TS @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_NOVATEK_NVT_TS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_PENMOUNT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_PENMOUNT new file mode 100644 index 000000000000..c86237b5a10a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_PENMOUNT @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_PENMOUNT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_PIXCIR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_PIXCIR new file mode 100644 index 000000000000..30d025e2849c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_PIXCIR @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_PIXCIR is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_RM_TS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_RM_TS new file mode 100644 index 000000000000..ddaacfcb2c6e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_RM_TS @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_RM_TS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ROHM_BU21023 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ROHM_BU21023 new file mode 100644 index 000000000000..94349fdcb4fd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ROHM_BU21023 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_S6SY761 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_S6SY761 new file mode 100644 index 000000000000..3b9b34f031ec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_S6SY761 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_S6SY761 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_SILEAD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_SILEAD new file mode 100644 index 000000000000..0c094b8525d3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_SILEAD @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_SILEAD is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_SIS_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_SIS_I2C new file mode 100644 index 000000000000..1522b1e2fa03 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_SIS_I2C @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_SIS_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ST1232 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ST1232 new file mode 100644 index 000000000000..a665e9edac13 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ST1232 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_ST1232 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_STMFTS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_STMFTS new file mode 100644 index 000000000000..0b1cd21e702c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_STMFTS @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_STMFTS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_SURFACE3_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_SURFACE3_SPI new file mode 100644 index 000000000000..2254ef05f9b9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_SURFACE3_SPI @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_SX8654 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_SX8654 new file mode 100644 index 000000000000..d63f10cfbe49 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_SX8654 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_SX8654 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TOUCHIT213 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TOUCHIT213 new file mode 100644 index 000000000000..f00370927717 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TOUCHIT213 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TOUCHRIGHT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TOUCHRIGHT new file mode 100644 index 000000000000..a7c81b4512cd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TOUCHRIGHT @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TOUCHWIN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TOUCHWIN new file mode 100644 index 000000000000..8ed1bea31290 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TOUCHWIN @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TPS6507X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TPS6507X new file mode 100644 index 000000000000..6f56a322e032 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TPS6507X @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_TPS6507X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TSC2004 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TSC2004 new file mode 100644 index 000000000000..6ecfb739ddb7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TSC2004 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_TSC2004 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TSC2005 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TSC2005 new file mode 100644 index 000000000000..6c1893766c8c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TSC2005 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_TSC2005 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TSC2007 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TSC2007 new file mode 100644 index 000000000000..5232f1df83a3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TSC2007 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_TSC2007 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TSC_SERIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TSC_SERIO new file mode 100644 index 000000000000..05b67b58de97 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_TSC_SERIO @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_TSC_SERIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_USB_COMPOSITE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_USB_COMPOSITE new file mode 100644 index 000000000000..b376949578a3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_USB_COMPOSITE @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_WACOM_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_WACOM_I2C new file mode 100644 index 000000000000..efff558f0b19 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_WACOM_I2C @@ -0,0 +1 @@ +CONFIG_TOUCHSCREEN_WACOM_I2C=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_WACOM_W8001 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_WACOM_W8001 new file mode 100644 index 000000000000..27610e7df87a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_WACOM_W8001 @@ -0,0 +1 @@ +CONFIG_TOUCHSCREEN_WACOM_W8001=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_WDT87XX_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_WDT87XX_I2C new file mode 100644 index 000000000000..66be75a9dfd8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_WDT87XX_I2C @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ZET6223 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ZET6223 new file mode 100644 index 000000000000..ed3563855dda --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ZET6223 @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_ZET6223 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ZFORCE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ZFORCE new file mode 100644 index 000000000000..51a44c719cf5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ZFORCE @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_ZFORCE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ZINITIX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ZINITIX new file mode 100644 index 000000000000..72fb1fc318f2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TOUCHSCREEN_ZINITIX @@ -0,0 +1 @@ +# CONFIG_TOUCHSCREEN_ZINITIX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TPL0102 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TPL0102 new file mode 100644 index 000000000000..a74d18f7572f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TPL0102 @@ -0,0 +1 @@ +# CONFIG_TPL0102 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TQMX86_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TQMX86_WDT new file mode 100644 index 000000000000..38671f9df968 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TQMX86_WDT @@ -0,0 +1 @@ +# CONFIG_TQMX86_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSL2583 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSL2583 new file mode 100644 index 000000000000..04de26240196 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSL2583 @@ -0,0 +1 @@ +# CONFIG_TSL2583 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSL2591 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSL2591 new file mode 100644 index 000000000000..bccc3cfc94d4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSL2591 @@ -0,0 +1 @@ +# CONFIG_TSL2591 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSL2772 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSL2772 new file mode 100644 index 000000000000..5a6ef0c7ae7a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSL2772 @@ -0,0 +1 @@ +# CONFIG_TSL2772 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSL4531 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSL4531 new file mode 100644 index 000000000000..f83080df9daa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSL4531 @@ -0,0 +1 @@ +# CONFIG_TSL4531 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSYS01 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSYS01 new file mode 100644 index 000000000000..278ed9b09181 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSYS01 @@ -0,0 +1 @@ +# CONFIG_TSYS01 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSYS02D b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSYS02D new file mode 100644 index 000000000000..a81a8031d549 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TSYS02D @@ -0,0 +1 @@ +# CONFIG_TSYS02D is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TYPEC_FUSB302 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TYPEC_FUSB302 new file mode 100644 index 000000000000..9633e1956bc3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TYPEC_FUSB302 @@ -0,0 +1 @@ +CONFIG_TYPEC_FUSB302=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_UEFI_CPER_X86 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_UEFI_CPER_X86 new file mode 100644 index 000000000000..aa26eae2bb14 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_UEFI_CPER_X86 @@ -0,0 +1 @@ +CONFIG_UEFI_CPER_X86=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_UNIXWARE_DISKLABEL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_UNIXWARE_DISKLABEL new file mode 100644 index 000000000000..94a5d7bd1c33 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_UNIXWARE_DISKLABEL @@ -0,0 +1 @@ +CONFIG_UNIXWARE_DISKLABEL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_US5182D b/anolis/configs/L2-OPTIONAL/x86/CONFIG_US5182D new file mode 100644 index 000000000000..176817910fb9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_US5182D @@ -0,0 +1 @@ +# CONFIG_US5182D is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_CHAOSKEY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_CHAOSKEY new file mode 100644 index 000000000000..f8d3f2f8c80f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_CHAOSKEY @@ -0,0 +1 @@ +# CONFIG_USB_CHAOSKEY is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_EHCI_HCD_PLATFORM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_EHCI_HCD_PLATFORM new file mode 100644 index 000000000000..28e68839c042 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_EHCI_HCD_PLATFORM @@ -0,0 +1 @@ +# CONFIG_USB_EHCI_HCD_PLATFORM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_LGM_PHY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_LGM_PHY new file mode 100644 index 000000000000..793c45154118 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_LGM_PHY @@ -0,0 +1 @@ +# CONFIG_USB_LGM_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_NET_RNDIS_WLAN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_NET_RNDIS_WLAN new file mode 100644 index 000000000000..d576cda2cf53 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_NET_RNDIS_WLAN @@ -0,0 +1 @@ +# CONFIG_USB_NET_RNDIS_WLAN is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_NET_SR9700 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_NET_SR9700 new file mode 100644 index 000000000000..8002721e31b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_NET_SR9700 @@ -0,0 +1 @@ +# CONFIG_USB_NET_SR9700 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_PULSE8_CEC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_PULSE8_CEC new file mode 100644 index 000000000000..bd9c5b9bc8ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_PULSE8_CEC @@ -0,0 +1 @@ +CONFIG_USB_PULSE8_CEC=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_RAINSHADOW_CEC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_RAINSHADOW_CEC new file mode 100644 index 000000000000..c6605282b6d4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_RAINSHADOW_CEC @@ -0,0 +1 @@ +CONFIG_USB_RAINSHADOW_CEC=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_ROLES_INTEL_XHCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_ROLES_INTEL_XHCI new file mode 100644 index 000000000000..4613084180a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_ROLES_INTEL_XHCI @@ -0,0 +1 @@ +CONFIG_USB_ROLES_INTEL_XHCI=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_SERIAL_CONSOLE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_SERIAL_CONSOLE new file mode 100644 index 000000000000..d0dc474f93df --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_SERIAL_CONSOLE @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_CONSOLE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_SERIAL_MOS7715_PARPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_SERIAL_MOS7715_PARPORT new file mode 100644 index 000000000000..87be7826c004 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_SERIAL_MOS7715_PARPORT @@ -0,0 +1 @@ +CONFIG_USB_SERIAL_MOS7715_PARPORT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_SERIAL_SIMPLE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_SERIAL_SIMPLE new file mode 100644 index 000000000000..d4a7817eecf4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_SERIAL_SIMPLE @@ -0,0 +1 @@ +# CONFIG_USB_SERIAL_SIMPLE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_SPEEDTOUCH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_SPEEDTOUCH new file mode 100644 index 000000000000..4d2b84780e5c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_SPEEDTOUCH @@ -0,0 +1 @@ +CONFIG_USB_SPEEDTOUCH=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_UHCI_HCD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_UHCI_HCD new file mode 100644 index 000000000000..4722af6171e0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_UHCI_HCD @@ -0,0 +1 @@ +# CONFIG_USB_UHCI_HCD is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_ULPI_BUS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_ULPI_BUS new file mode 100644 index 000000000000..81571d175b5d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_ULPI_BUS @@ -0,0 +1 @@ +# CONFIG_USB_ULPI_BUS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_USS720 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_USS720 new file mode 100644 index 000000000000..ca903371e4ad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_USS720 @@ -0,0 +1 @@ +CONFIG_USB_USS720=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_XEN_HCD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_XEN_HCD new file mode 100644 index 000000000000..cca3e16d1ab1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_XEN_HCD @@ -0,0 +1 @@ +# CONFIG_USB_XEN_HCD is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_XHCI_DBGCAP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_XHCI_DBGCAP new file mode 100644 index 000000000000..3cf92a71ad6f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_XHCI_DBGCAP @@ -0,0 +1 @@ +CONFIG_USB_XHCI_DBGCAP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_XHCI_PLATFORM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_XHCI_PLATFORM new file mode 100644 index 000000000000..c824b0c45bec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USB_XHCI_PLATFORM @@ -0,0 +1 @@ +# CONFIG_USB_XHCI_PLATFORM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USER_RETURN_NOTIFIER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USER_RETURN_NOTIFIER new file mode 100644 index 000000000000..02295270b59a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USER_RETURN_NOTIFIER @@ -0,0 +1 @@ +CONFIG_USER_RETURN_NOTIFIER=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USER_STACKTRACE_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USER_STACKTRACE_SUPPORT new file mode 100644 index 000000000000..079153a892dc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USER_STACKTRACE_SUPPORT @@ -0,0 +1 @@ +CONFIG_USER_STACKTRACE_SUPPORT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_UV_MMTIMER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_UV_MMTIMER new file mode 100644 index 000000000000..93fe2251b4ac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_UV_MMTIMER @@ -0,0 +1 @@ +CONFIG_UV_MMTIMER=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_UV_SYSFS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_UV_SYSFS new file mode 100644 index 000000000000..d5ea1fa7ee09 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_UV_SYSFS @@ -0,0 +1 @@ +# CONFIG_UV_SYSFS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VCNL3020 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VCNL3020 new file mode 100644 index 000000000000..256fb80de2f7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VCNL3020 @@ -0,0 +1 @@ +# CONFIG_VCNL3020 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VCNL4000 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VCNL4000 new file mode 100644 index 000000000000..ae803eceddf0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VCNL4000 @@ -0,0 +1 @@ +# CONFIG_VCNL4000 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VCNL4035 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VCNL4035 new file mode 100644 index 000000000000..2fbd58f6d8ed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VCNL4035 @@ -0,0 +1 @@ +# CONFIG_VCNL4035 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VEML6030 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VEML6030 new file mode 100644 index 000000000000..a78163437edf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VEML6030 @@ -0,0 +1 @@ +# CONFIG_VEML6030 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VEML6070 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VEML6070 new file mode 100644 index 000000000000..ad005705fa54 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VEML6070 @@ -0,0 +1 @@ +# CONFIG_VEML6070 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VF610_ADC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VF610_ADC new file mode 100644 index 000000000000..33803bea02ee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VF610_ADC @@ -0,0 +1 @@ +# CONFIG_VF610_ADC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VF610_DAC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VF610_DAC new file mode 100644 index 000000000000..556c9934d3eb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VF610_DAC @@ -0,0 +1 @@ +# CONFIG_VF610_DAC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VFIO_DEVICE_CDEV b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VFIO_DEVICE_CDEV new file mode 100644 index 000000000000..f6c5ea150d6b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VFIO_DEVICE_CDEV @@ -0,0 +1 @@ +# CONFIG_VFIO_DEVICE_CDEV is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VFIO_PCI_VGA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VFIO_PCI_VGA new file mode 100644 index 000000000000..047bcb8d655c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VFIO_PCI_VGA @@ -0,0 +1 @@ +# CONFIG_VFIO_PCI_VGA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VIA_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VIA_WDT new file mode 100644 index 000000000000..cf0dc5e89921 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VIA_WDT @@ -0,0 +1 @@ +CONFIG_VIA_WDT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VIPERBOARD_ADC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VIPERBOARD_ADC new file mode 100644 index 000000000000..6427af502ff2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VIPERBOARD_ADC @@ -0,0 +1 @@ +# CONFIG_VIPERBOARD_ADC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VIRT_WIFI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VIRT_WIFI new file mode 100644 index 000000000000..54a643a6503f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VIRT_WIFI @@ -0,0 +1 @@ +# CONFIG_VIRT_WIFI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VL53L0X_I2C b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VL53L0X_I2C new file mode 100644 index 000000000000..2ee7f58bb912 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VL53L0X_I2C @@ -0,0 +1 @@ +# CONFIG_VL53L0X_I2C is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VL6180 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VL6180 new file mode 100644 index 000000000000..47fbc80f53c9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VL6180 @@ -0,0 +1 @@ +# CONFIG_VL6180 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VMAP_PFN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VMAP_PFN new file mode 100644 index 000000000000..f522eff5bcda --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VMAP_PFN @@ -0,0 +1 @@ +CONFIG_VMAP_PFN=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VMGENID b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VMGENID new file mode 100644 index 000000000000..4811b5ce5b72 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VMGENID @@ -0,0 +1 @@ +CONFIG_VMGENID=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_VZ89X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VZ89X new file mode 100644 index 000000000000..3c56d573f037 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_VZ89X @@ -0,0 +1 @@ +# CONFIG_VZ89X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_W83627HF_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_W83627HF_WDT new file mode 100644 index 000000000000..f2cd258ebe03 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_W83627HF_WDT @@ -0,0 +1 @@ +CONFIG_W83627HF_WDT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_W83877F_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_W83877F_WDT new file mode 100644 index 000000000000..f72b7e392a34 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_W83877F_WDT @@ -0,0 +1 @@ +CONFIG_W83877F_WDT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_W83977F_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_W83977F_WDT new file mode 100644 index 000000000000..77f20d78f4d9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_W83977F_WDT @@ -0,0 +1 @@ +CONFIG_W83977F_WDT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WAFER_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WAFER_WDT new file mode 100644 index 000000000000..40857148cc46 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WAFER_WDT @@ -0,0 +1 @@ +# CONFIG_WAFER_WDT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WANT_DEV_COREDUMP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WANT_DEV_COREDUMP new file mode 100644 index 000000000000..1151440d7d75 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WANT_DEV_COREDUMP @@ -0,0 +1 @@ +CONFIG_WANT_DEV_COREDUMP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WCN36XX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WCN36XX new file mode 100644 index 000000000000..f6863da14089 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WCN36XX @@ -0,0 +1 @@ +# CONFIG_WCN36XX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WFX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WFX new file mode 100644 index 000000000000..aa136a90c6a2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WFX @@ -0,0 +1 @@ +# CONFIG_WFX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WIL6210 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WIL6210 new file mode 100644 index 000000000000..e15bf8c17488 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WIL6210 @@ -0,0 +1 @@ +# CONFIG_WIL6210 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WILC1000_SDIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WILC1000_SDIO new file mode 100644 index 000000000000..603383d16f39 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WILC1000_SDIO @@ -0,0 +1 @@ +# CONFIG_WILC1000_SDIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WILC1000_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WILC1000_SPI new file mode 100644 index 000000000000..a12849b374c4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WILC1000_SPI @@ -0,0 +1 @@ +# CONFIG_WILC1000_SPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WINMATE_FM07_KEYS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WINMATE_FM07_KEYS new file mode 100644 index 000000000000..2ad330a0001e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WINMATE_FM07_KEYS @@ -0,0 +1 @@ +# CONFIG_WINMATE_FM07_KEYS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WIRELESS_HOTKEY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WIRELESS_HOTKEY new file mode 100644 index 000000000000..8b379c4a93e6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WIRELESS_HOTKEY @@ -0,0 +1 @@ +# CONFIG_WIRELESS_HOTKEY is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN new file mode 100644 index 000000000000..acb274930693 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN @@ -0,0 +1 @@ +CONFIG_WLAN=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ADMTEK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ADMTEK new file mode 100644 index 000000000000..b38ec2938d71 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ADMTEK @@ -0,0 +1 @@ +# CONFIG_WLAN_VENDOR_ADMTEK is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ATH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ATH new file mode 100644 index 000000000000..bbd0da798dc6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ATH @@ -0,0 +1 @@ +CONFIG_WLAN_VENDOR_ATH=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ATMEL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ATMEL new file mode 100644 index 000000000000..0cf5b4233396 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ATMEL @@ -0,0 +1 @@ +# CONFIG_WLAN_VENDOR_ATMEL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_BROADCOM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_BROADCOM new file mode 100644 index 000000000000..24736aa895fb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_BROADCOM @@ -0,0 +1 @@ +CONFIG_WLAN_VENDOR_BROADCOM=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_CISCO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_CISCO new file mode 100644 index 000000000000..19bbb0848582 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_CISCO @@ -0,0 +1 @@ +# CONFIG_WLAN_VENDOR_CISCO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_INTEL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_INTEL new file mode 100644 index 000000000000..80de8dade29e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_INTEL @@ -0,0 +1 @@ +CONFIG_WLAN_VENDOR_INTEL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_INTERSIL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_INTERSIL new file mode 100644 index 000000000000..842e3e9cc744 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_INTERSIL @@ -0,0 +1 @@ +# CONFIG_WLAN_VENDOR_INTERSIL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_MARVELL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_MARVELL new file mode 100644 index 000000000000..46fdde2171df --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_MARVELL @@ -0,0 +1 @@ +CONFIG_WLAN_VENDOR_MARVELL=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_MEDIATEK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_MEDIATEK new file mode 100644 index 000000000000..47b49d2cbca9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_MEDIATEK @@ -0,0 +1 @@ +CONFIG_WLAN_VENDOR_MEDIATEK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_MICROCHIP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_MICROCHIP new file mode 100644 index 000000000000..48f5fabaecfb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_MICROCHIP @@ -0,0 +1 @@ +CONFIG_WLAN_VENDOR_MICROCHIP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_PURELIFI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_PURELIFI new file mode 100644 index 000000000000..2261b86f0058 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_PURELIFI @@ -0,0 +1 @@ +CONFIG_WLAN_VENDOR_PURELIFI=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_QUANTENNA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_QUANTENNA new file mode 100644 index 000000000000..94a9969d70e7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_QUANTENNA @@ -0,0 +1 @@ +CONFIG_WLAN_VENDOR_QUANTENNA=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_RALINK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_RALINK new file mode 100644 index 000000000000..604b617c52ce --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_RALINK @@ -0,0 +1 @@ +CONFIG_WLAN_VENDOR_RALINK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_REALTEK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_REALTEK new file mode 100644 index 000000000000..2f41028dd087 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_REALTEK @@ -0,0 +1 @@ +CONFIG_WLAN_VENDOR_REALTEK=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_RSI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_RSI new file mode 100644 index 000000000000..255ed59d805c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_RSI @@ -0,0 +1 @@ +# CONFIG_WLAN_VENDOR_RSI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_SILABS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_SILABS new file mode 100644 index 000000000000..d381a7c18b03 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_SILABS @@ -0,0 +1 @@ +CONFIG_WLAN_VENDOR_SILABS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ST b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ST new file mode 100644 index 000000000000..39df8e212faa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ST @@ -0,0 +1 @@ +# CONFIG_WLAN_VENDOR_ST is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_TI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_TI new file mode 100644 index 000000000000..68c9bcef3452 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_TI @@ -0,0 +1 @@ +# CONFIG_WLAN_VENDOR_TI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ZYDAS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ZYDAS new file mode 100644 index 000000000000..416b6599496f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WLAN_VENDOR_ZYDAS @@ -0,0 +1 @@ +# CONFIG_WLAN_VENDOR_ZYDAS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WMI_BMOF b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WMI_BMOF new file mode 100644 index 000000000000..61dcf543be3a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WMI_BMOF @@ -0,0 +1 @@ +CONFIG_WMI_BMOF=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_ACPI_CPUFREQ_CPB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_ACPI_CPUFREQ_CPB new file mode 100644 index 000000000000..db5a9b22be53 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_ACPI_CPUFREQ_CPB @@ -0,0 +1 @@ +CONFIG_X86_ACPI_CPUFREQ_CPB=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_AMD_PSTATE_UT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_AMD_PSTATE_UT new file mode 100644 index 000000000000..27a13b9c5180 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_AMD_PSTATE_UT @@ -0,0 +1 @@ +# CONFIG_X86_AMD_PSTATE_UT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_CET b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_CET new file mode 100644 index 000000000000..6b7f2db8e7af --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_CET @@ -0,0 +1 @@ +CONFIG_X86_CET=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_CMOV b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_CMOV new file mode 100644 index 000000000000..ee7029780b0b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_CMOV @@ -0,0 +1 @@ +CONFIG_X86_CMOV=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_DEBUGCTLMSR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_DEBUGCTLMSR new file mode 100644 index 000000000000..15c1dd3e760b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_DEBUGCTLMSR @@ -0,0 +1 @@ +CONFIG_X86_DEBUGCTLMSR=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_HV_CALLBACK_VECTOR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_HV_CALLBACK_VECTOR new file mode 100644 index 000000000000..cf8d705e4d50 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_HV_CALLBACK_VECTOR @@ -0,0 +1 @@ +CONFIG_X86_HV_CALLBACK_VECTOR=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_INTERNODE_CACHE_SHIFT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_INTERNODE_CACHE_SHIFT new file mode 100644 index 000000000000..76cfa7a5ea7f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_INTERNODE_CACHE_SHIFT @@ -0,0 +1 @@ +CONFIG_X86_INTERNODE_CACHE_SHIFT=6 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_L1_CACHE_SHIFT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_L1_CACHE_SHIFT new file mode 100644 index 000000000000..27ee8728baea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_L1_CACHE_SHIFT @@ -0,0 +1 @@ +CONFIG_X86_L1_CACHE_SHIFT=6 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_MCE_THRESHOLD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_MCE_THRESHOLD new file mode 100644 index 000000000000..4df8f99e8826 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_MCE_THRESHOLD @@ -0,0 +1 @@ +CONFIG_X86_MCE_THRESHOLD=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_MEM_ENCRYPT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_MEM_ENCRYPT new file mode 100644 index 000000000000..40d268ca36c1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_MEM_ENCRYPT @@ -0,0 +1 @@ +CONFIG_X86_MEM_ENCRYPT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_MINIMUM_CPU_FAMILY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_MINIMUM_CPU_FAMILY new file mode 100644 index 000000000000..b8b342994073 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_MINIMUM_CPU_FAMILY @@ -0,0 +1 @@ +CONFIG_X86_MINIMUM_CPU_FAMILY=64 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_NEED_RELOCS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_NEED_RELOCS new file mode 100644 index 000000000000..8d53b5ff155c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_NEED_RELOCS @@ -0,0 +1 @@ +CONFIG_X86_NEED_RELOCS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_PLATFORM_DRIVERS_DELL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_PLATFORM_DRIVERS_DELL new file mode 100644 index 000000000000..8b178c89ebad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_PLATFORM_DRIVERS_DELL @@ -0,0 +1 @@ +# CONFIG_X86_PLATFORM_DRIVERS_DELL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_PLATFORM_DRIVERS_HP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_PLATFORM_DRIVERS_HP new file mode 100644 index 000000000000..5230804c7fec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_PLATFORM_DRIVERS_HP @@ -0,0 +1 @@ +# CONFIG_X86_PLATFORM_DRIVERS_HP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_PMEM_LEGACY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_PMEM_LEGACY new file mode 100644 index 000000000000..b93c1350fb30 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_PMEM_LEGACY @@ -0,0 +1 @@ +CONFIG_X86_PMEM_LEGACY=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_PMEM_LEGACY_DEVICE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_PMEM_LEGACY_DEVICE new file mode 100644 index 000000000000..f55010b529b1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_PMEM_LEGACY_DEVICE @@ -0,0 +1 @@ +CONFIG_X86_PMEM_LEGACY_DEVICE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_POWERNOW_K8 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_POWERNOW_K8 new file mode 100644 index 000000000000..07b56a824833 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_POWERNOW_K8 @@ -0,0 +1 @@ +CONFIG_X86_POWERNOW_K8=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_SPEEDSTEP_CENTRINO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_SPEEDSTEP_CENTRINO new file mode 100644 index 000000000000..ea1e56da2297 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_SPEEDSTEP_CENTRINO @@ -0,0 +1 @@ +# CONFIG_X86_SPEEDSTEP_CENTRINO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_SPEEDSTEP_LIB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_SPEEDSTEP_LIB new file mode 100644 index 000000000000..15c7b319a566 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_SPEEDSTEP_LIB @@ -0,0 +1 @@ +CONFIG_X86_SPEEDSTEP_LIB=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_THERMAL_VECTOR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_THERMAL_VECTOR new file mode 100644 index 000000000000..df08747dda71 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_THERMAL_VECTOR @@ -0,0 +1 @@ +CONFIG_X86_THERMAL_VECTOR=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_USER_SHADOW_STACK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_USER_SHADOW_STACK new file mode 100644 index 000000000000..5f911c56286b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_USER_SHADOW_STACK @@ -0,0 +1 @@ +# CONFIG_X86_USER_SHADOW_STACK is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_VMX_FEATURE_NAMES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_VMX_FEATURE_NAMES new file mode 100644 index 000000000000..124b55ac9188 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_VMX_FEATURE_NAMES @@ -0,0 +1 @@ +CONFIG_X86_VMX_FEATURE_NAMES=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_X32_ABI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_X32_ABI new file mode 100644 index 000000000000..d06854274864 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X86_X32_ABI @@ -0,0 +1 @@ +# CONFIG_X86_X32_ABI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_X9250 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X9250 new file mode 100644 index 000000000000..76a420beefe9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_X9250 @@ -0,0 +1 @@ +# CONFIG_X9250 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XENFS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XENFS new file mode 100644 index 000000000000..2a9db2ee750b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XENFS @@ -0,0 +1 @@ +CONFIG_XENFS=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_ACPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_ACPI new file mode 100644 index 000000000000..0912f9c17184 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_ACPI @@ -0,0 +1 @@ +CONFIG_XEN_ACPI=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_AUTO_XLATE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_AUTO_XLATE new file mode 100644 index 000000000000..1401d69f15f3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_AUTO_XLATE @@ -0,0 +1 @@ +CONFIG_XEN_AUTO_XLATE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_BACKEND b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_BACKEND new file mode 100644 index 000000000000..838367289aad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_BACKEND @@ -0,0 +1 @@ +# CONFIG_XEN_BACKEND is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_BALLOON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_BALLOON new file mode 100644 index 000000000000..1210e9672ad7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_BALLOON @@ -0,0 +1 @@ +# CONFIG_XEN_BALLOON is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_BLKDEV_FRONTEND b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_BLKDEV_FRONTEND new file mode 100644 index 000000000000..5b1ec5f1480f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_BLKDEV_FRONTEND @@ -0,0 +1 @@ +CONFIG_XEN_BLKDEV_FRONTEND=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_COMPAT_XENFS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_COMPAT_XENFS new file mode 100644 index 000000000000..280bc95c8f95 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_COMPAT_XENFS @@ -0,0 +1 @@ +CONFIG_XEN_COMPAT_XENFS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_DEBUG_FS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_DEBUG_FS new file mode 100644 index 000000000000..4346991da02f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_DEBUG_FS @@ -0,0 +1 @@ +# CONFIG_XEN_DEBUG_FS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_DEV_EVTCHN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_DEV_EVTCHN new file mode 100644 index 000000000000..a198d3b62d56 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_DEV_EVTCHN @@ -0,0 +1 @@ +CONFIG_XEN_DEV_EVTCHN=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_EFI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_EFI new file mode 100644 index 000000000000..21644fd8e20e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_EFI @@ -0,0 +1 @@ +CONFIG_XEN_EFI=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_FBDEV_FRONTEND b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_FBDEV_FRONTEND new file mode 100644 index 000000000000..dfa002e49bee --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_FBDEV_FRONTEND @@ -0,0 +1 @@ +# CONFIG_XEN_FBDEV_FRONTEND is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_GNTDEV b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_GNTDEV new file mode 100644 index 000000000000..e0e2127a67f6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_GNTDEV @@ -0,0 +1 @@ +# CONFIG_XEN_GNTDEV is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_GRANT_DEV_ALLOC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_GRANT_DEV_ALLOC new file mode 100644 index 000000000000..b3d34b07e12e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_GRANT_DEV_ALLOC @@ -0,0 +1 @@ +# CONFIG_XEN_GRANT_DEV_ALLOC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_GRANT_DMA_ALLOC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_GRANT_DMA_ALLOC new file mode 100644 index 000000000000..1fd236c73558 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_GRANT_DMA_ALLOC @@ -0,0 +1 @@ +# CONFIG_XEN_GRANT_DMA_ALLOC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PRIVCMD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PRIVCMD new file mode 100644 index 000000000000..a471c328b311 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PRIVCMD @@ -0,0 +1 @@ +CONFIG_XEN_PRIVCMD=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PV b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PV new file mode 100644 index 000000000000..4743cb300f35 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PV @@ -0,0 +1 @@ +# CONFIG_XEN_PV is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PVCALLS_FRONTEND b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PVCALLS_FRONTEND new file mode 100644 index 000000000000..057a41318133 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PVCALLS_FRONTEND @@ -0,0 +1 @@ +# CONFIG_XEN_PVCALLS_FRONTEND is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PVH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PVH new file mode 100644 index 000000000000..2435b87927fc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PVH @@ -0,0 +1 @@ +# CONFIG_XEN_PVH is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PVHVM_GUEST b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PVHVM_GUEST new file mode 100644 index 000000000000..d971c335c658 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PVHVM_GUEST @@ -0,0 +1 @@ +CONFIG_XEN_PVHVM_GUEST=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PVHVM_SMP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PVHVM_SMP new file mode 100644 index 000000000000..25314166b736 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_PVHVM_SMP @@ -0,0 +1 @@ +CONFIG_XEN_PVHVM_SMP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_SAVE_RESTORE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_SAVE_RESTORE new file mode 100644 index 000000000000..b42ea51375f7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_SAVE_RESTORE @@ -0,0 +1 @@ +CONFIG_XEN_SAVE_RESTORE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_SCSI_FRONTEND b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_SCSI_FRONTEND new file mode 100644 index 000000000000..5cc303352c12 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_SCSI_FRONTEND @@ -0,0 +1 @@ +# CONFIG_XEN_SCSI_FRONTEND is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_SYS_HYPERVISOR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_SYS_HYPERVISOR new file mode 100644 index 000000000000..a8ff9c8e6bc6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_SYS_HYPERVISOR @@ -0,0 +1 @@ +CONFIG_XEN_SYS_HYPERVISOR=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_UNPOPULATED_ALLOC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_UNPOPULATED_ALLOC new file mode 100644 index 000000000000..8ab61b832add --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_UNPOPULATED_ALLOC @@ -0,0 +1 @@ +# CONFIG_XEN_UNPOPULATED_ALLOC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_VIRTIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_VIRTIO new file mode 100644 index 000000000000..87b3667c853c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_VIRTIO @@ -0,0 +1 @@ +# CONFIG_XEN_VIRTIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_WDT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_WDT new file mode 100644 index 000000000000..333aa5528e7d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_WDT @@ -0,0 +1 @@ +CONFIG_XEN_WDT=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_XENBUS_FRONTEND b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_XENBUS_FRONTEND new file mode 100644 index 000000000000..b25d45529bf1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XEN_XENBUS_FRONTEND @@ -0,0 +1 @@ +CONFIG_XEN_XENBUS_FRONTEND=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XIAOMI_WMI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XIAOMI_WMI new file mode 100644 index 000000000000..6ecb6ffa9522 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XIAOMI_WMI @@ -0,0 +1 @@ +# CONFIG_XIAOMI_WMI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_XILINX_XADC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XILINX_XADC new file mode 100644 index 000000000000..c9b967ad14c6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_XILINX_XADC @@ -0,0 +1 @@ +# CONFIG_XILINX_XADC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_YAMAHA_YAS530 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_YAMAHA_YAS530 new file mode 100644 index 000000000000..d68189626fc6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_YAMAHA_YAS530 @@ -0,0 +1 @@ +# CONFIG_YAMAHA_YAS530 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_YOGABOOK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_YOGABOOK new file mode 100644 index 000000000000..010ddb94e845 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_YOGABOOK @@ -0,0 +1 @@ +# CONFIG_YOGABOOK is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ZOPT2201 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ZOPT2201 new file mode 100644 index 000000000000..be622878758d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ZOPT2201 @@ -0,0 +1 @@ +# CONFIG_ZOPT2201 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ZPA2326 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ZPA2326 new file mode 100644 index 000000000000..a2a86f07a8ff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ZPA2326 @@ -0,0 +1 @@ +# CONFIG_ZPA2326 is not set diff --git a/anolis/configs/custom-overrides/64k/arm64.config b/anolis/configs/custom-overrides/64k/arm64.config new file mode 100644 index 000000000000..e38bc0ebca18 --- /dev/null +++ b/anolis/configs/custom-overrides/64k/arm64.config @@ -0,0 +1,2 @@ +# CONFIG_ARM64_4K_PAGES is not set +CONFIG_ARM64_64K_PAGES=y \ No newline at end of file diff --git a/anolis/configs/custom-overrides/debug/arm64.config b/anolis/configs/custom-overrides/debug/arm64.config new file mode 100644 index 000000000000..e006806c71cf --- /dev/null +++ b/anolis/configs/custom-overrides/debug/arm64.config @@ -0,0 +1,32 @@ +# CONFIG_VMAP_STACK is not set +CONFIG_EROFS_FS_DEBUG=y +CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y +CONFIG_PERCPU_TEST=m +CONFIG_PM_TEST_SUSPEND=y +CONFIG_SPI_DEBUG=y +CONFIG_WQ_WATCHDOG=y +CONFIG_I2C_GPIO_FAULT_INJECTOR=y +# CONFIG_INLINE_READ_LOCK is not set +# CONFIG_INLINE_READ_LOCK_BH is not set +# CONFIG_INLINE_READ_LOCK_IRQ is not set +# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set +# CONFIG_INLINE_READ_UNLOCK_BH is not set +# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_SPIN_LOCK is not set +# CONFIG_INLINE_SPIN_LOCK_BH is not set +# CONFIG_INLINE_SPIN_LOCK_IRQ is not set +# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set +# CONFIG_INLINE_SPIN_TRYLOCK is not set +# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set +# CONFIG_INLINE_SPIN_UNLOCK_BH is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set +# CONFIG_INLINE_WRITE_LOCK is not set +# CONFIG_INLINE_WRITE_LOCK_BH is not set +# CONFIG_INLINE_WRITE_LOCK_IRQ is not set +# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set +# CONFIG_INLINE_WRITE_UNLOCK_BH is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set +CONFIG_KASAN_SHADOW_OFFSET=0xdfffa00000000000 +# CONFIG_KASAN_SW_TAGS is not set +CONFIG_MAILBOX_TEST=m +# CONFIG_UBSAN_UNREACHABLE is not set diff --git a/anolis/configs/custom-overrides/debug/default.config b/anolis/configs/custom-overrides/debug/default.config new file mode 100644 index 000000000000..f0c9f36c8058 --- /dev/null +++ b/anolis/configs/custom-overrides/debug/default.config @@ -0,0 +1,111 @@ +CONFIG_ACPI_APEI_ERST_DEBUG=m +CONFIG_ACPI_CONFIGFS=m +CONFIG_ACPI_CUSTOM_METHOD=m +CONFIG_ACPI_DEBUG=y +CONFIG_ACPI_DEBUGGER=y +CONFIG_ACPI_DEBUGGER_USER=m +CONFIG_CGROUP_DEBUG=y +CONFIG_DEBUG_CREDENTIALS=y +CONFIG_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_NOTIFIERS=y +CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1 +CONFIG_DEBUG_OBJECTS_FREE=y +CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y +CONFIG_DEBUG_OBJECTS_RCU_HEAD=y +CONFIG_DEBUG_OBJECTS_TIMERS=y +CONFIG_DEBUG_OBJECTS_WORK=y +CONFIG_DEBUG_PAGEALLOC=y +# CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT is not set +CONFIG_DEBUG_PAGE_REF=y +CONFIG_DEBUG_SG=y +CONFIG_DMA_API_DEBUG=y +CONFIG_DMA_API_DEBUG_SG=y +CONFIG_DMADEVICES_DEBUG=y +CONFIG_DMADEVICES_VDEBUG=y +CONFIG_DRM_AMDGPU_GART_DEBUGFS=y +CONFIG_EDAC_DEBUG=y +CONFIG_EXT4_DEBUG=y +CONFIG_FAIL_IO_TIMEOUT=y +CONFIG_FAIL_MAKE_REQUEST=y +CONFIG_FAIL_MMC_REQUEST=y +CONFIG_FAIL_PAGE_ALLOC=y +CONFIG_FAILSLAB=y +CONFIG_FAULT_INJECTION=y +CONFIG_FAULT_INJECTION_DEBUG_FS=y +# CONFIG_FAULT_INJECTION_USERCOPY is not set +CONFIG_FSCACHE_OBJECT_LIST=y +CONFIG_FTRACE_RECORD_RECURSION=y +CONFIG_FTRACE_RECORD_RECURSION_SIZE=128 +CONFIG_GENERIC_IRQ_DEBUGFS=y +CONFIG_KASAN=y +CONFIG_KASAN_GENERIC=y +CONFIG_KASAN_INLINE=y +# CONFIG_KASAN_OUTLINE is not set +CONFIG_LOCKDEP=y +CONFIG_LOCK_EVENT_COUNTS=y +CONFIG_PAGE_EXTENSION=y +CONFIG_PM_ADVANCED_DEBUG=y +# CONFIG_PROVE_RAW_LOCK_NESTING is not set +CONFIG_QUOTA_DEBUG=y +CONFIG_RCU_TORTURE_TEST=m +CONFIG_RING_BUFFER_RECORD_RECURSION=y +CONFIG_UBSAN=y +CONFIG_UBSAN_BOOL=y +CONFIG_UBSAN_BOUNDS=y +CONFIG_UBSAN_ENUM=y +CONFIG_UBSAN_SANITIZE_ALL=y +CONFIG_UBSAN_SHIFT=y +CONFIG_XFS_WARN=y +# CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION is not set +CONFIG_CC_HAS_UBSAN_ARRAY_BOUNDS=y +CONFIG_CC_HAS_UBSAN_BOUNDS=y +CONFIG_CEPH_LIB_PRETTYDEBUG=y +CONFIG_CONSTRUCTORS=y +CONFIG_DEBUG_ATOMIC_SLEEP=y +CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN=y +CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y +CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE=16000 +# CONFIG_DEBUG_KMEMLEAK_TEST is not set +# CONFIG_DEBUG_KOBJECT_RELEASE is not set +CONFIG_DEBUG_LOCK_ALLOC=y +# CONFIG_DEBUG_LOCKDEP is not set +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_OBJECTS=y +# CONFIG_DEBUG_OBJECTS_SELFTEST is not set +CONFIG_DEBUG_PER_CPU_MAPS=y +CONFIG_DEBUG_RT_MUTEXES=y +CONFIG_DEBUG_RWSEMS=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_STACK_USAGE=y +CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y +# CONFIG_FAIL_FUNCTION is not set +# CONFIG_FAIL_FUTEX is not set +# CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF is not set +# CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL is not set +# CONFIG_INLINE_READ_UNLOCK is not set +# CONFIG_INLINE_READ_UNLOCK_IRQ is not set +# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set +# CONFIG_INLINE_WRITE_UNLOCK is not set +# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set +CONFIG_KASAN_STACK=y +CONFIG_LATENCYTOP=y +CONFIG_LOCK_TORTURE_TEST=m +CONFIG_NFP_DEBUG=y +CONFIG_NOUVEAU_DEBUG_MMU=y +CONFIG_PREEMPT_COUNT=y +CONFIG_PREEMPTIRQ_TRACEPOINTS=y +CONFIG_PROVE_LOCKING=y +CONFIG_PROVE_RCU=y +CONFIG_STACKDEPOT=y +CONFIG_TASKS_RCU=y +# CONFIG_TEST_KASAN_MODULE is not set +CONFIG_TEST_LIST_SORT=y +CONFIG_TEST_STRING_HELPERS=m +# CONFIG_TEST_UBSAN is not set +CONFIG_TORTURE_TEST=m +CONFIG_TRACE_IRQFLAGS=y +# CONFIG_UBSAN_ALIGNMENT is not set +CONFIG_UBSAN_ARRAY_BOUNDS=y +# CONFIG_UBSAN_DIV_ZERO is not set +# CONFIG_UBSAN_TRAP is not set +CONFIG_UNINLINE_SPIN_UNLOCK=y diff --git a/anolis/configs/custom-overrides/debug/x86.config b/anolis/configs/custom-overrides/debug/x86.config new file mode 100644 index 000000000000..be7f712a99e4 --- /dev/null +++ b/anolis/configs/custom-overrides/debug/x86.config @@ -0,0 +1,46 @@ +CONFIG_DNS_RESOLVER=y +CONFIG_AMD_PTDMA=y +CONFIG_BFQ_CGROUP_DEBUG=y +CONFIG_CRYPTO_CCM=y +CONFIG_CRYPTO_CMAC=y +CONFIG_CRYPTO_MD4=y +CONFIG_INTEL_IOMMU_DEBUGFS=y +CONFIG_IOMMU_DEBUGFS=y +CONFIG_IP_VS_DEBUG=y +CONFIG_KASAN_VMALLOC=y +# CONFIG_KCSAN is not set +CONFIG_KFENCE_SAMPLE_INTERVAL=100 +CONFIG_PM_TRACE=y +CONFIG_PM_TRACE_RTC=y +CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y +CONFIG_X86_DEBUG_FPU=y +# CONFIG_AMD_IOMMU_DEBUGFS is not set +CONFIG_ATH10K_DEBUG=y +CONFIG_ATH10K_TRACING=y +CONFIG_ATH_DEBUG=y +# CONFIG_ATH_TRACEPOINTS is not set +CONFIG_CFG80211_DEBUGFS=y +CONFIG_CIFS=y +# CONFIG_CIFS_FSCACHE is not set +# CONFIG_CIFS_SMB_DIRECT is not set +CONFIG_CRYPTO_LIB_ARC4=y +CONFIG_CRYPTO_LIB_DES=y +CONFIG_DEBUG_VM=y +# CONFIG_DEBUG_VM_PGFLAGS is not set +CONFIG_DEBUG_VM_PGTABLE=y +# CONFIG_DEBUG_VM_RB is not set +# CONFIG_DEBUG_VM_VMACACHE is not set +CONFIG_DMAR_PERF=y +CONFIG_IWLWIFI_DEBUG=y +CONFIG_IWLWIFI_DEVICE_TRACING=y +CONFIG_KASAN_SHADOW_OFFSET=0xdffffc0000000000 +CONFIG_LOCK_STAT=y +CONFIG_MAC80211_MESSAGE_TRACING=y +CONFIG_MMIOTRACE=y +# CONFIG_MMIOTRACE_TEST is not set +CONFIG_RANDOM32_SELFTEST=y +CONFIG_RC_LOOPBACK=m +CONFIG_RTLWIFI_DEBUG=y +CONFIG_RTW88_DEBUG=y +CONFIG_RTW88_DEBUGFS=y +CONFIG_TRACE_IRQFLAGS_NMI=y diff --git a/anolis/configs/custom-overrides/gcov/default.config b/anolis/configs/custom-overrides/gcov/default.config new file mode 100644 index 000000000000..6481357789fe --- /dev/null +++ b/anolis/configs/custom-overrides/gcov/default.config @@ -0,0 +1,2 @@ +CONFIG_GCOV_KERNEL=y +CONFIG_GCOV_PROFILE_ALL=y \ No newline at end of file diff --git a/anolis/configs/custom-overrides/kvm_modulize/arm64.config b/anolis/configs/custom-overrides/kvm_modulize/arm64.config new file mode 100644 index 000000000000..c221222ab1c9 --- /dev/null +++ b/anolis/configs/custom-overrides/kvm_modulize/arm64.config @@ -0,0 +1 @@ +CONFIG_KVM=m \ No newline at end of file diff --git a/anolis/configs/metadata/changelog/CONFIG_LSM b/anolis/configs/metadata/changelog/CONFIG_LSM new file mode 100644 index 000000000000..a7feeea4ae7d --- /dev/null +++ b/anolis/configs/metadata/changelog/CONFIG_LSM @@ -0,0 +1,6 @@ +Enabling lsm=bpf by default would cause a performance regression +for lmbench/tcp and lmbench/syscall by 5%~11%. + +Actually not all or most users want lsm=bpf and those who want +could enable it by cmdline. Thus here we will not enable lsm=bpf +by default. diff --git a/anolis/configs/metadata/changelog/CONFIG_PREEMPT_VOLUNTARY b/anolis/configs/metadata/changelog/CONFIG_PREEMPT_VOLUNTARY new file mode 100644 index 000000000000..680ba446eb33 --- /dev/null +++ b/anolis/configs/metadata/changelog/CONFIG_PREEMPT_VOLUNTARY @@ -0,0 +1,3 @@ +Considering compatibility issues, we have introduced dynamic preempt and set the Kconfig of preempt +in ANCK to Voluntary, and added 'preempt=none' in bootcmdline to ensure that the preemption mode +remains consistent with previous settings. diff --git a/anolis/configs/metadata/changelog/CONFIG_VIRT_PLAT_DEV b/anolis/configs/metadata/changelog/CONFIG_VIRT_PLAT_DEV new file mode 100644 index 000000000000..71f781a9b647 --- /dev/null +++ b/anolis/configs/metadata/changelog/CONFIG_VIRT_PLAT_DEV @@ -0,0 +1,3 @@ +Backport patches from openEuler to support virt platform device direct inject lpi irq(ANBZ: #9398). + +It's required by internal users(Aone: 56141911). -- Gitee From 31b79cada15841fe43e41ab2df071ddf5d7a99d0 Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Tue, 20 Aug 2024 19:03:07 +0800 Subject: [PATCH 1183/2138] anolis: configs: adjust the script logic of import operation ANBZ: #8678 This commit fixes a bug of import operation. The file anolis/configs/scripts/kconfig_import defines the operations to import & refresh kconfigs, it has 5 primitives: 1. FILE, to declare the basic information of the whole config files 2. LEVELINFO, to tell kconfig baseline how to collect level information 3. IMPORT, to break the whole config files into many tiny config files 4. COLLAPSE, to collapse same configs from x86 & arm64 dir into default/ 5. STRIP, to remove the same configs between downstream distributions and ANCK. Usually, we use `FILE` to declare config files for arch x86 and arm, and then use `IMPORT` to split them. However, if `FILE` declares two config files with the same name, the later one will override the previous one, and then the subsequent operations will only work for the later one, which is not expectable. This patch fixes such bug by adding a unique number for the file name. Fixes: 2585f668ef7c ("anolis: configs: refactor ANCK configs baseline") Signed-off-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/3750 --- anolis/configs/scripts/anolis_kconfig.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/anolis/configs/scripts/anolis_kconfig.py b/anolis/configs/scripts/anolis_kconfig.py index a84ee594a4f3..3db9215f0eb5 100644 --- a/anolis/configs/scripts/anolis_kconfig.py +++ b/anolis/configs/scripts/anolis_kconfig.py @@ -455,6 +455,8 @@ class ImportOpTranslater(): output_dir: str src_root: str + file_counter = 0 + def __init__(self, input_dir: str, output_dir: str, src_root: str) -> None: self.files = {} self.files_info = {} @@ -468,8 +470,11 @@ class ImportOpTranslater(): def __op_file(self, args: str): # FILE dist arch variant file_path REFRESH/NOREFRESH + + # use file_counter to make file name unique + ImportOpTranslater.file_counter += 1 dist, arch, subarch, path, refresh = args.split() - new_path = os.path.join(self.output_dir, os.path.basename(path)) + new_path = os.path.join(self.output_dir, f"{ImportOpTranslater.file_counter}-{os.path.basename(path)}") if subarch != "null": self.files[f"{dist}-{arch}-{subarch}"] = new_path self.files_info[(dist, arch, subarch)] = new_path -- Gitee From 91e6780e52c3531f28be0e9dee899c347d57893c Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Wed, 21 Aug 2024 15:20:44 +0800 Subject: [PATCH 1184/2138] anolis: spec: generate configs from kconfig baseline ANBZ: #8678 The kconfig baseline has been imported, use the generated config files from baseline rather than just copy from arch/{x86,arm64}/configs when building kernel rpm packages. Signed-off-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/3750 --- anolis/genrpmtree.sh | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/anolis/genrpmtree.sh b/anolis/genrpmtree.sh index 7f1f4036f797..65fc8d016212 100644 --- a/anolis/genrpmtree.sh +++ b/anolis/genrpmtree.sh @@ -24,10 +24,20 @@ function do_prep() { popd > /dev/null DIST_OUTPUT=${DIST_RPMBUILDDIR_OUTPUT}/SPECS/ sh genspec.sh - cp ${DIST_SRCROOT}/arch/x86/configs/anolis_defconfig ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-x86_64.config - cp ${DIST_SRCROOT}/arch/x86/configs/anolis-debug_defconfig ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-x86_64-debug.config - cp ${DIST_SRCROOT}/arch/arm64/configs/anolis_defconfig ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-aarch64.config - cp ${DIST_SRCROOT}/arch/arm64/configs/anolis-debug_defconfig ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-aarch64-debug.config + # the kconfigs of x86 and arm64 has been moved to kconfig baseline, + # so use `make dist-configs` to generate them + make -C ${DIST_SRCROOT}/anolis dist-configs + cp ${DIST_OUTPUT}/kernel-ANCK-generic-x86.config \ + ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-x86_64.config + cp ${DIST_OUTPUT}/kernel-ANCK-debug-x86.config \ + ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-x86_64-debug.config + cp ${DIST_OUTPUT}/kernel-ANCK-generic-arm64.config \ + ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-aarch64.config + cp ${DIST_OUTPUT}/kernel-ANCK-debug-arm64.config \ + ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-aarch64-debug.config + + # the kconfigs of sw_64 and loongarch keep the legacy way, + # so still copy them from arch/${arch}/configs/ directory. cp ${DIST_SRCROOT}/arch/loongarch/configs/anolis_defconfig \ ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-loongarch64.config cp ${DIST_SRCROOT}/arch/loongarch/configs/anolis-debug_defconfig \ -- Gitee From a81f15c49d680a4cb6c9b025f8b0b3c91d19290b Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Mon, 4 Dec 2023 17:16:42 +0800 Subject: [PATCH 1185/2138] anolis: configs: support `make anolis_defconfig` command expicitly ANBZ: #8678 Command `make anolis_defconfig` is valid if anolis_defconfig file exists in arch/{arm64,x86}/configs, and the config baseline will remove it in the future, so add this target expicitly to keep developer's habits unchanged. This commit is backported from devel-5.10, and it is based on 3 commits: - commit dc90182eb7c90 ("anolis: configs: add `make anolis_defconfig` description in `make help`") - commit 76e25c2114ebf ("anolis: configs: support generate configs cross arch") - commit f161d7d3050e8 ("anolis: configs: support `make anolis_defconfig` command expicitly") Signed-off-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/3750 --- anolis/Makefile.variables | 11 +++++++++++ scripts/kconfig/Makefile | 10 ++++++++++ 2 files changed, 21 insertions(+) diff --git a/anolis/Makefile.variables b/anolis/Makefile.variables index d806e7988230..f2598d1b0ce8 100644 --- a/anolis/Makefile.variables +++ b/anolis/Makefile.variables @@ -78,3 +78,14 @@ DIST_ANOLIS_VERSION = $(DIST_KERNELVERSION)-$(DIST_PKGRELEASEVERION) # for unofficial build, we compress tarball from git HEAD DIST_PKG_COMMIT_ID = $(if $(DIST_OFFICIAL_BUILD),$(DIST_ANOLIS_VERSION),$(DIST_GIT_HEAD_FULL_COMMIT_ID)) +ifneq ("${ARCH}","") +DIST_ARCH ?= $(ARCH) +else +DIST_ARCH ?= $(shell uname -m) +endif +ifeq ($(DIST_ARCH),x86_64) + DIST_ARCH := x86 +endif +ifeq ($(DIST_ARCH),aarch64) + DIST_ARCH := arm64 +endif diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile index 4eee155121a8..6e97230421d4 100644 --- a/scripts/kconfig/Makefile +++ b/scripts/kconfig/Makefile @@ -90,6 +90,14 @@ else $(Q)$(MAKE) -f $(srctree)/Makefile $(KBUILD_DEFCONFIG) endif +anolis_defconfig: $(obj)/conf + $(Q)DIST_DO_GENERATE_DOT_CONFIG=Y DIST_CONFIG_KERNEL_NAME=ANCK $(MAKE) -C $(srctree)/anolis/ dist-defconfig + $(Q)$(MAKE) -C $(srctree) olddefconfig + +anolis-debug_defconfig: $(obj)/conf + $(Q)DIST_DO_GENERATE_DOT_CONFIG=Y DIST_CONFIG_KERNEL_NAME=ANCK $(MAKE) -C $(srctree)/anolis/ dist-debug-defconfig + $(Q)$(MAKE) -C $(srctree) olddefconfig + %_defconfig: $(obj)/conf $(Q)$< $(silent) --defconfig=arch/$(SRCARCH)/configs/$@ $(Kconfig) @@ -118,6 +126,8 @@ clean-files += tests/.cache # Help text used by make help help: @echo 'Configuration targets:' + @echo ' anolis_defconfig - Generate anolis config for production environment' + @echo ' anolis-debug_defconfig - Generate anolis config for testing environment' @echo ' config - Update current config utilising a line-oriented program' @echo ' nconfig - Update current config utilising a ncurses menu based program' @echo ' menuconfig - Update current config utilising a menu based program' -- Gitee From ec13c39e98fccd60eb76625c43750b442f6085d4 Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Wed, 21 Aug 2024 15:08:27 +0800 Subject: [PATCH 1186/2138] anolis: configs: remove anolis_defconfig and anolis-debug_defconfig ANBZ: #8678 The kconfig baseline has been imported, remove legacy anolis_defconfig and anolis-debug_defconfig files. Signed-off-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/3750 --- arch/arm64/configs/anolis-debug_defconfig | 7246 ------------------ arch/arm64/configs/anolis_defconfig | 7184 ------------------ arch/x86/configs/anolis-debug_defconfig | 8148 --------------------- arch/x86/configs/anolis_defconfig | 8082 -------------------- 4 files changed, 30660 deletions(-) delete mode 100644 arch/arm64/configs/anolis-debug_defconfig delete mode 100644 arch/arm64/configs/anolis_defconfig delete mode 100644 arch/x86/configs/anolis-debug_defconfig delete mode 100644 arch/x86/configs/anolis_defconfig diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig deleted file mode 100644 index 0d5d34dbb3ca..000000000000 --- a/arch/arm64/configs/anolis-debug_defconfig +++ /dev/null @@ -1,7246 +0,0 @@ -# -# Automatically generated file; DO NOT EDIT. -# Linux/arm64 6.6.25 Kernel Configuration -# -CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" -CONFIG_CC_IS_GCC=y -CONFIG_GCC_VERSION=200000 -CONFIG_CLANG_VERSION=0 -CONFIG_AS_IS_GNU=y -CONFIG_AS_VERSION=25000 -CONFIG_LD_IS_BFD=y -CONFIG_LD_VERSION=25000 -CONFIG_LLD_VERSION=0 -CONFIG_CC_CAN_LINK=y -CONFIG_CC_CAN_LINK_STATIC=y -CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y -CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y -CONFIG_TOOLS_SUPPORT_RELR=y -CONFIG_CC_HAS_ASM_INLINE=y -CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y -CONFIG_PAHOLE_VERSION=117 -CONFIG_CONSTRUCTORS=y -CONFIG_IRQ_WORK=y -CONFIG_BUILDTIME_TABLE_SORT=y -CONFIG_THREAD_INFO_IN_TASK=y - -# -# General setup -# -CONFIG_INIT_ENV_ARG_LIMIT=32 -# CONFIG_COMPILE_TEST is not set -# CONFIG_WERROR is not set -CONFIG_LOCALVERSION="" -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_BUILD_SALT="" -CONFIG_DEFAULT_INIT="" -CONFIG_DEFAULT_HOSTNAME="(none)" -CONFIG_SYSVIPC=y -CONFIG_SYSVIPC_SYSCTL=y -CONFIG_SYSVIPC_COMPAT=y -CONFIG_POSIX_MQUEUE=y -CONFIG_POSIX_MQUEUE_SYSCTL=y -# CONFIG_WATCH_QUEUE is not set -CONFIG_CROSS_MEMORY_ATTACH=y -# CONFIG_USELIB is not set -CONFIG_AUDIT=y -CONFIG_HAVE_ARCH_AUDITSYSCALL=y -CONFIG_AUDITSYSCALL=y - -# -# IRQ subsystem -# -CONFIG_GENERIC_IRQ_PROBE=y -CONFIG_GENERIC_IRQ_SHOW=y -CONFIG_GENERIC_IRQ_SHOW_LEVEL=y -CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y -CONFIG_GENERIC_IRQ_MIGRATION=y -CONFIG_GENERIC_IRQ_INJECTION=y -CONFIG_HARDIRQS_SW_RESEND=y -CONFIG_IRQ_DOMAIN=y -CONFIG_IRQ_DOMAIN_HIERARCHY=y -CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS=y -CONFIG_GENERIC_IRQ_IPI=y -CONFIG_GENERIC_MSI_IRQ=y -CONFIG_IRQ_MSI_IOMMU=y -CONFIG_IRQ_FORCED_THREADING=y -CONFIG_SPARSE_IRQ=y -CONFIG_GENERIC_IRQ_DEBUGFS=y -# end of IRQ subsystem - -CONFIG_GENERIC_TIME_VSYSCALL=y -CONFIG_GENERIC_CLOCKEVENTS=y -CONFIG_ARCH_HAS_TICK_BROADCAST=y -CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y -CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y -CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y -CONFIG_CONTEXT_TRACKING=y -CONFIG_CONTEXT_TRACKING_IDLE=y - -# -# Timers subsystem -# -CONFIG_TICK_ONESHOT=y -CONFIG_NO_HZ_COMMON=y -# CONFIG_HZ_PERIODIC is not set -# CONFIG_NO_HZ_IDLE is not set -CONFIG_NO_HZ_FULL=y -CONFIG_CONTEXT_TRACKING_USER=y -# CONFIG_CONTEXT_TRACKING_USER_FORCE is not set -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -# end of Timers subsystem - -CONFIG_BPF=y -CONFIG_HAVE_EBPF_JIT=y -CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y - -# -# BPF subsystem -# -CONFIG_BPF_SYSCALL=y -CONFIG_BPF_JIT=y -CONFIG_BPF_JIT_ALWAYS_ON=y -CONFIG_BPF_JIT_DEFAULT_ON=y -CONFIG_BPF_UNPRIV_DEFAULT_OFF=y -# CONFIG_BPF_PRELOAD is not set -CONFIG_BPF_LSM=y -# end of BPF subsystem - -CONFIG_PREEMPT_BUILD=y -# CONFIG_PREEMPT_NONE is not set -CONFIG_PREEMPT_VOLUNTARY=y -# CONFIG_PREEMPT is not set -CONFIG_PREEMPT_COUNT=y -CONFIG_PREEMPTION=y -CONFIG_PREEMPT_DYNAMIC=y -CONFIG_SCHED_CORE=y - -# -# CPU/Task time and stats accounting -# -CONFIG_VIRT_CPU_ACCOUNTING=y -CONFIG_VIRT_CPU_ACCOUNTING_GEN=y -# CONFIG_IRQ_TIME_ACCOUNTING is not set -CONFIG_HAVE_SCHED_AVG_IRQ=y -CONFIG_SCHED_THERMAL_PRESSURE=y -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_TASKSTATS=y -CONFIG_TASK_DELAY_ACCT=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y -CONFIG_PSI=y -CONFIG_PSI_DEFAULT_DISABLED=y -# end of CPU/Task time and stats accounting - -CONFIG_CPU_ISOLATION=y - -# -# RCU Subsystem -# -CONFIG_TREE_RCU=y -CONFIG_PREEMPT_RCU=y -# CONFIG_RCU_EXPERT is not set -CONFIG_TREE_SRCU=y -CONFIG_TASKS_RCU_GENERIC=y -CONFIG_TASKS_RCU=y -CONFIG_TASKS_RUDE_RCU=y -CONFIG_TASKS_TRACE_RCU=y -CONFIG_RCU_STALL_COMMON=y -CONFIG_RCU_NEED_SEGCBLIST=y -CONFIG_RCU_NOCB_CPU=y -# CONFIG_RCU_NOCB_CPU_DEFAULT_ALL is not set -# CONFIG_RCU_LAZY is not set -# end of RCU Subsystem - -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -# CONFIG_IKHEADERS is not set -CONFIG_LOG_BUF_SHIFT=20 -CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 -# CONFIG_PRINTK_INDEX is not set -CONFIG_GENERIC_SCHED_CLOCK=y - -# -# Scheduler features -# -# end of Scheduler features - -CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y -CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y -CONFIG_CC_HAS_INT128=y -CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" -CONFIG_GCC10_NO_ARRAY_BOUNDS=y -CONFIG_CC_NO_ARRAY_BOUNDS=y -CONFIG_ARCH_SUPPORTS_INT128=y -CONFIG_NUMA_BALANCING=y -CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y -CONFIG_CGROUPS=y -CONFIG_PAGE_COUNTER=y -# CONFIG_CGROUP_FAVOR_DYNMODS is not set -CONFIG_MEMCG=y -CONFIG_MEMCG_KMEM=y -CONFIG_BLK_CGROUP=y -CONFIG_CGROUP_WRITEBACK=y -CONFIG_CGROUP_SCHED=y -CONFIG_FAIR_GROUP_SCHED=y -CONFIG_CFS_BANDWIDTH=y -CONFIG_RT_GROUP_SCHED=y -CONFIG_SCHED_MM_CID=y -CONFIG_CGROUP_PIDS=y -CONFIG_CGROUP_RDMA=y -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_HUGETLB=y -CONFIG_CPUSETS=y -CONFIG_PROC_PID_CPUSET=y -CONFIG_CGROUP_DEVICE=y -CONFIG_SCHED_SLI=y -CONFIG_RICH_CONTAINER=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_CGROUP_PERF=y -CONFIG_CGROUP_BPF=y -# CONFIG_CGROUP_MISC is not set -CONFIG_CGROUP_DEBUG=y -CONFIG_SOCK_CGROUP_DATA=y -CONFIG_NAMESPACES=y -CONFIG_UTS_NS=y -CONFIG_TIME_NS=y -CONFIG_IPC_NS=y -CONFIG_USER_NS=y -CONFIG_PID_NS=y -CONFIG_NET_NS=y -CONFIG_CHECKPOINT_RESTORE=y -CONFIG_SCHED_AUTOGROUP=y -CONFIG_RELAY=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" -CONFIG_RD_GZIP=y -CONFIG_RD_BZIP2=y -CONFIG_RD_LZMA=y -CONFIG_RD_XZ=y -CONFIG_RD_LZO=y -CONFIG_RD_LZ4=y -CONFIG_RD_ZSTD=y -# CONFIG_BOOT_CONFIG is not set -CONFIG_INITRAMFS_PRESERVE_MTIME=y -CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_LD_ORPHAN_WARN=y -CONFIG_LD_ORPHAN_WARN_LEVEL="warn" -CONFIG_SYSCTL=y -CONFIG_HAVE_UID16=y -CONFIG_SYSCTL_EXCEPTION_TRACE=y -# CONFIG_EXPERT is not set -CONFIG_UID16=y -CONFIG_MULTIUSER=y -CONFIG_SYSFS_SYSCALL=y -CONFIG_FHANDLE=y -CONFIG_POSIX_TIMERS=y -CONFIG_PRINTK=y -CONFIG_BUG=y -CONFIG_ELF_CORE=y -CONFIG_BASE_FULL=y -CONFIG_FUTEX=y -CONFIG_FUTEX_PI=y -CONFIG_EPOLL=y -CONFIG_SIGNALFD=y -CONFIG_TIMERFD=y -CONFIG_EVENTFD=y -CONFIG_SHMEM=y -CONFIG_AIO=y -CONFIG_IO_URING=y -CONFIG_ADVISE_SYSCALLS=y -CONFIG_MEMBARRIER=y -CONFIG_KALLSYMS=y -# CONFIG_KALLSYMS_SELFTEST is not set -CONFIG_KALLSYMS_ALL=y -CONFIG_KALLSYMS_BASE_RELATIVE=y -CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y -CONFIG_KCMP=y -CONFIG_RSEQ=y -CONFIG_CACHESTAT_SYSCALL=y -CONFIG_HAVE_PERF_EVENTS=y -CONFIG_GUEST_PERF_EVENTS=y -CONFIG_PERF_USE_VMALLOC=y - -# -# Kernel Performance Events And Counters -# -CONFIG_PERF_EVENTS=y -CONFIG_DEBUG_PERF_USE_VMALLOC=y -# end of Kernel Performance Events And Counters - -CONFIG_SYSTEM_DATA_VERIFICATION=y -CONFIG_PROFILING=y -CONFIG_TRACEPOINTS=y - -# -# Kexec and crash features -# -CONFIG_CRASH_CORE=y -CONFIG_KEXEC_CORE=y -CONFIG_HAVE_IMA_KEXEC=y -CONFIG_KEXEC=y -CONFIG_KEXEC_FILE=y -CONFIG_KEXEC_SIG=y -CONFIG_KEXEC_IMAGE_VERIFY_SIG=y -CONFIG_CRASH_DUMP=y -# end of Kexec and crash features -# end of General setup - -CONFIG_ARM64=y -CONFIG_GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS=y -CONFIG_64BIT=y -CONFIG_MMU=y -CONFIG_ARM64_PAGE_SHIFT=12 -CONFIG_ARM64_CONT_PTE_SHIFT=4 -CONFIG_ARM64_CONT_PMD_SHIFT=4 -CONFIG_ARCH_MMAP_RND_BITS_MIN=18 -CONFIG_ARCH_MMAP_RND_BITS_MAX=33 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 -CONFIG_STACKTRACE_SUPPORT=y -CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 -CONFIG_LOCKDEP_SUPPORT=y -CONFIG_GENERIC_BUG=y -CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y -CONFIG_GENERIC_HWEIGHT=y -CONFIG_GENERIC_CSUM=y -CONFIG_GENERIC_CALIBRATE_DELAY=y -CONFIG_SMP=y -CONFIG_KERNEL_MODE_NEON=y -CONFIG_FIX_EARLYCON_MEM=y -CONFIG_PGTABLE_LEVELS=4 -CONFIG_ARCH_SUPPORTS_UPROBES=y -CONFIG_ARCH_PROC_KCORE_TEXT=y -CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC=y -CONFIG_KASAN_SHADOW_OFFSET=0xdfff800000000000 - -# -# Platform selection -# -# CONFIG_ARCH_ACTIONS is not set -# CONFIG_ARCH_SUNXI is not set -# CONFIG_ARCH_ALPINE is not set -# CONFIG_ARCH_APPLE is not set -# CONFIG_ARCH_BCM is not set -# CONFIG_ARCH_BERLIN is not set -# CONFIG_ARCH_BITMAIN is not set -# CONFIG_ARCH_EXYNOS is not set -# CONFIG_ARCH_SPARX5 is not set -# CONFIG_ARCH_K3 is not set -# CONFIG_ARCH_LG1K is not set -CONFIG_ARCH_HISI=y -# CONFIG_ARCH_KEEMBAY is not set -# CONFIG_ARCH_MEDIATEK is not set -# CONFIG_ARCH_MESON is not set -# CONFIG_ARCH_MVEBU is not set -# CONFIG_ARCH_NXP is not set -# CONFIG_ARCH_MA35 is not set -# CONFIG_ARCH_NPCM is not set -CONFIG_ARCH_PHYTIUM=y -CONFIG_ARCH_QCOM=y -# CONFIG_ARCH_REALTEK is not set -# CONFIG_ARCH_RENESAS is not set -# CONFIG_ARCH_ROCKCHIP is not set -CONFIG_ARCH_SEATTLE=y -# CONFIG_ARCH_INTEL_SOCFPGA is not set -# CONFIG_ARCH_STM32 is not set -# CONFIG_ARCH_SYNQUACER is not set -# CONFIG_ARCH_TEGRA is not set -# CONFIG_ARCH_SPRD is not set -CONFIG_ARCH_THUNDER=y -CONFIG_ARCH_THUNDER2=y -# CONFIG_ARCH_UNIPHIER is not set -CONFIG_ARCH_VEXPRESS=y -# CONFIG_ARCH_VISCONTI is not set -CONFIG_ARCH_XGENE=y -# CONFIG_ARCH_ZYNQMP is not set -# end of Platform selection - -# -# Kernel Features -# - -# -# ARM errata workarounds via the alternatives framework -# -CONFIG_AMPERE_ERRATUM_AC03_CPU_38=y -CONFIG_ARM64_WORKAROUND_CLEAN_CACHE=y -CONFIG_ARM64_ERRATUM_826319=y -CONFIG_ARM64_ERRATUM_827319=y -CONFIG_ARM64_ERRATUM_824069=y -CONFIG_ARM64_ERRATUM_819472=y -CONFIG_ARM64_ERRATUM_832075=y -CONFIG_ARM64_ERRATUM_834220=y -CONFIG_ARM64_ERRATUM_1742098=y -CONFIG_ARM64_ERRATUM_845719=y -CONFIG_ARM64_ERRATUM_843419=y -CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419=y -CONFIG_ARM64_ERRATUM_1024718=y -CONFIG_ARM64_ERRATUM_1418040=y -CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT=y -CONFIG_ARM64_ERRATUM_1165522=y -CONFIG_ARM64_ERRATUM_1319367=y -CONFIG_ARM64_ERRATUM_1530923=y -CONFIG_ARM64_WORKAROUND_REPEAT_TLBI=y -CONFIG_ARM64_ERRATUM_2441007=y -CONFIG_ARM64_ERRATUM_1286807=y -CONFIG_ARM64_ERRATUM_1463225=y -CONFIG_ARM64_ERRATUM_1542419=y -CONFIG_ARM64_ERRATUM_1508412=y -CONFIG_ARM64_ERRATUM_2051678=y -CONFIG_ARM64_ERRATUM_2077057=y -CONFIG_ARM64_ERRATUM_2658417=y -CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE=y -CONFIG_ARM64_ERRATUM_2054223=y -CONFIG_ARM64_ERRATUM_2067961=y -CONFIG_ARM64_ERRATUM_2441009=y -CONFIG_ARM64_ERRATUM_2457168=y -CONFIG_ARM64_ERRATUM_2645198=y -CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD=y -CONFIG_ARM64_ERRATUM_2966298=y -CONFIG_ARM64_ERRATUM_3117295=y -CONFIG_CAVIUM_ERRATUM_22375=y -CONFIG_CAVIUM_ERRATUM_23144=y -CONFIG_CAVIUM_ERRATUM_23154=y -CONFIG_CAVIUM_ERRATUM_27456=y -CONFIG_CAVIUM_ERRATUM_30115=y -CONFIG_CAVIUM_TX2_ERRATUM_219=y -CONFIG_FUJITSU_ERRATUM_010001=y -CONFIG_HISILICON_ERRATUM_161600802=y -CONFIG_QCOM_FALKOR_ERRATUM_1003=y -CONFIG_QCOM_FALKOR_ERRATUM_1009=y -CONFIG_QCOM_QDF2400_ERRATUM_0065=y -CONFIG_QCOM_FALKOR_ERRATUM_E1041=y -CONFIG_NVIDIA_CARMEL_CNP_ERRATUM=y -CONFIG_ROCKCHIP_ERRATUM_3588001=y -CONFIG_SOCIONEXT_SYNQUACER_PREITS=y -# end of ARM errata workarounds via the alternatives framework - -CONFIG_ARM64_4K_PAGES=y -# CONFIG_ARM64_16K_PAGES is not set -# CONFIG_ARM64_64K_PAGES is not set -# CONFIG_ARM64_VA_BITS_39 is not set -CONFIG_ARM64_VA_BITS_48=y -CONFIG_ARM64_VA_BITS=48 -CONFIG_ARM64_PA_BITS_48=y -CONFIG_ARM64_PA_BITS=48 -# CONFIG_CPU_BIG_ENDIAN is not set -CONFIG_CPU_LITTLE_ENDIAN=y -CONFIG_SCHED_MC=y -CONFIG_SCHED_CLUSTER=y -CONFIG_SCHED_SMT=y -CONFIG_NR_CPUS=1024 -CONFIG_HOTPLUG_CPU=y -CONFIG_NUMA=y -CONFIG_NODES_SHIFT=6 -# CONFIG_HZ_100 is not set -CONFIG_HZ_250=y -# CONFIG_HZ_300 is not set -# CONFIG_HZ_1000 is not set -CONFIG_HZ=250 -CONFIG_SCHED_HRTICK=y -CONFIG_ARCH_SPARSEMEM_ENABLE=y -CONFIG_HW_PERF_EVENTS=y -CONFIG_CC_HAVE_SHADOW_CALL_STACK=y -CONFIG_PARAVIRT=y -CONFIG_PARAVIRT_TIME_ACCOUNTING=y -CONFIG_ARCH_SUPPORTS_KEXEC=y -CONFIG_ARCH_SUPPORTS_KEXEC_FILE=y -CONFIG_ARCH_SELECTS_KEXEC_FILE=y -CONFIG_ARCH_SUPPORTS_KEXEC_SIG=y -CONFIG_ARCH_SUPPORTS_KEXEC_IMAGE_VERIFY_SIG=y -CONFIG_ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_SIG=y -CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y -CONFIG_TRANS_TABLE=y -# CONFIG_XEN is not set -CONFIG_ARCH_FORCE_MAX_ORDER=10 -CONFIG_UNMAP_KERNEL_AT_EL0=y -CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY=y -# CONFIG_RODATA_FULL_DEFAULT_ENABLED is not set -# CONFIG_ARM64_SW_TTBR0_PAN is not set -CONFIG_ARM64_TAGGED_ADDR_ABI=y -CONFIG_COMPAT=y -CONFIG_KUSER_HELPERS=y -# CONFIG_COMPAT_ALIGNMENT_FIXUPS is not set -# CONFIG_ARMV8_DEPRECATED is not set - -# -# ARMv8.1 architectural features -# -CONFIG_ARM64_HW_AFDBM=y -CONFIG_ARM64_PAN=y -CONFIG_AS_HAS_LSE_ATOMICS=y -CONFIG_ARM64_LSE_ATOMICS=y -CONFIG_ARM64_USE_LSE_ATOMICS=y -# end of ARMv8.1 architectural features - -# -# ARMv8.2 architectural features -# -CONFIG_AS_HAS_ARMV8_2=y -CONFIG_AS_HAS_SHA3=y -CONFIG_ARM64_PMEM=y -CONFIG_ARM64_RAS_EXTN=y -CONFIG_ARM64_CNP=y -# end of ARMv8.2 architectural features - -# -# ARMv8.3 architectural features -# -# CONFIG_ARM64_PTR_AUTH is not set -CONFIG_CC_HAS_BRANCH_PROT_PAC_RET=y -CONFIG_CC_HAS_SIGN_RETURN_ADDRESS=y -CONFIG_AS_HAS_ARMV8_3=y -CONFIG_AS_HAS_CFI_NEGATE_RA_STATE=y -CONFIG_AS_HAS_LDAPR=y -# end of ARMv8.3 architectural features - -# -# ARMv8.4 architectural features -# -CONFIG_ARM64_AMU_EXTN=y -CONFIG_AS_HAS_ARMV8_4=y -CONFIG_ARM64_TLB_RANGE=y -CONFIG_ARM64_MPAM=y -# end of ARMv8.4 architectural features - -# -# ARMv8.5 architectural features -# -CONFIG_AS_HAS_ARMV8_5=y -# CONFIG_ARM64_BTI is not set -CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI=y -CONFIG_ARM64_E0PD=y -CONFIG_ARM64_AS_HAS_MTE=y -CONFIG_ARM64_MTE=y -# end of ARMv8.5 architectural features - -# -# ARMv8.7 architectural features -# -CONFIG_ARM64_EPAN=y -# end of ARMv8.7 architectural features - -CONFIG_ARM64_SVE=y -CONFIG_ARM64_SME=y -CONFIG_ARM64_PSEUDO_NMI=y -# CONFIG_ARM64_DEBUG_PRIORITY_MASKING is not set -CONFIG_RELOCATABLE=y -CONFIG_RANDOMIZE_BASE=y -CONFIG_RANDOMIZE_MODULE_REGION_FULL=y -CONFIG_CC_HAVE_STACKPROTECTOR_SYSREG=y -CONFIG_STACKPROTECTOR_PER_TASK=y -# end of Kernel Features - -# -# Boot options -# -CONFIG_ARM64_ACPI_PARKING_PROTOCOL=y -CONFIG_CMDLINE="console=ttyAMA0" -CONFIG_CMDLINE_FROM_BOOTLOADER=y -# CONFIG_CMDLINE_FORCE is not set -CONFIG_EFI_STUB=y -CONFIG_EFI=y -CONFIG_DMI=y -# end of Boot options - -# -# Power management options -# -CONFIG_SUSPEND=y -CONFIG_SUSPEND_FREEZER=y -CONFIG_HIBERNATE_CALLBACKS=y -CONFIG_HIBERNATION=y -CONFIG_HIBERNATION_SNAPSHOT_DEV=y -CONFIG_PM_STD_PARTITION="" -CONFIG_PM_SLEEP=y -CONFIG_PM_SLEEP_SMP=y -# CONFIG_PM_AUTOSLEEP is not set -# CONFIG_PM_USERSPACE_AUTOSLEEP is not set -# CONFIG_PM_WAKELOCKS is not set -CONFIG_PM=y -CONFIG_PM_DEBUG=y -CONFIG_PM_ADVANCED_DEBUG=y -CONFIG_PM_TEST_SUSPEND=y -CONFIG_PM_SLEEP_DEBUG=y -CONFIG_PM_CLK=y -CONFIG_PM_GENERIC_DOMAINS=y -# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set -CONFIG_PM_GENERIC_DOMAINS_SLEEP=y -CONFIG_PM_GENERIC_DOMAINS_OF=y -CONFIG_CPU_PM=y -# CONFIG_ENERGY_MODEL is not set -CONFIG_ARCH_HIBERNATION_POSSIBLE=y -CONFIG_ARCH_HIBERNATION_HEADER=y -CONFIG_ARCH_SUSPEND_POSSIBLE=y -# end of Power management options - -# -# CPU Power Management -# - -# -# CPU Idle -# -CONFIG_CPU_IDLE=y -# CONFIG_CPU_IDLE_GOV_LADDER is not set -CONFIG_CPU_IDLE_GOV_MENU=y -# CONFIG_CPU_IDLE_GOV_TEO is not set - -# -# ARM CPU Idle Drivers -# -# CONFIG_ARM_PSCI_CPUIDLE is not set -# end of ARM CPU Idle Drivers -# end of CPU Idle - -# -# CPU Frequency scaling -# -CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_GOV_ATTR_SET=y -CONFIG_CPU_FREQ_GOV_COMMON=y -CONFIG_CPU_FREQ_STAT=y -CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y -# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set -CONFIG_CPU_FREQ_GOV_PERFORMANCE=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=y -CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_GOV_ONDEMAND=y -CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y -# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set - -# -# CPU frequency scaling drivers -# -# CONFIG_CPUFREQ_DT is not set -# CONFIG_CPUFREQ_DT_PLATDEV is not set -CONFIG_ACPI_CPPC_CPUFREQ=m -CONFIG_ACPI_CPPC_CPUFREQ_FIE=y -CONFIG_ARM_SCPI_CPUFREQ=m -# CONFIG_ARM_QCOM_CPUFREQ_HW is not set -# end of CPU Frequency scaling -# end of CPU Power Management - -CONFIG_ARCH_SUPPORTS_ACPI=y -CONFIG_ACPI=y -CONFIG_ACPI_GENERIC_GSI=y -CONFIG_ACPI_CCA_REQUIRED=y -CONFIG_ACPI_TABLE_LIB=y -CONFIG_ACPI_DEBUGGER=y -CONFIG_ACPI_DEBUGGER_USER=m -CONFIG_ACPI_SPCR_TABLE=y -# CONFIG_ACPI_FPDT is not set -# CONFIG_ACPI_EC_DEBUGFS is not set -CONFIG_ACPI_AC=y -CONFIG_ACPI_BATTERY=y -CONFIG_ACPI_BUTTON=y -CONFIG_ACPI_VIDEO=m -CONFIG_ACPI_FAN=y -# CONFIG_ACPI_TAD is not set -# CONFIG_ACPI_DOCK is not set -CONFIG_ACPI_PROCESSOR_IDLE=y -CONFIG_ACPI_MCFG=y -CONFIG_ACPI_CPPC_LIB=y -CONFIG_ACPI_PROCESSOR=y -CONFIG_ACPI_IPMI=m -CONFIG_ACPI_HOTPLUG_CPU=y -CONFIG_ACPI_THERMAL=y -CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y -CONFIG_ACPI_TABLE_UPGRADE=y -CONFIG_ACPI_DEBUG=y -CONFIG_ACPI_PCI_SLOT=y -CONFIG_ACPI_CONTAINER=y -CONFIG_ACPI_HOTPLUG_MEMORY=y -CONFIG_ACPI_HED=y -CONFIG_ACPI_CUSTOM_METHOD=m -# CONFIG_ACPI_BGRT is not set -CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y -CONFIG_ACPI_NFIT=m -# CONFIG_NFIT_SECURITY_DEBUG is not set -CONFIG_ACPI_NUMA=y -CONFIG_ACPI_HMAT=y -CONFIG_HAVE_ACPI_APEI=y -CONFIG_ACPI_APEI=y -CONFIG_ACPI_APEI_GHES=y -CONFIG_ACPI_APEI_PCIEAER=y -CONFIG_ACPI_APEI_SEA=y -CONFIG_ACPI_APEI_MEMORY_FAILURE=y -CONFIG_ACPI_APEI_EINJ=m -CONFIG_ACPI_APEI_ERST_DEBUG=m -CONFIG_ACPI_CONFIGFS=m -# CONFIG_ACPI_PFRUT is not set -CONFIG_ACPI_IORT=y -CONFIG_ACPI_GTDT=y -CONFIG_ACPI_AGDI=y -CONFIG_ACPI_APMT=y -CONFIG_ACPI_MPAM=y -CONFIG_ACPI_PPTT=y -CONFIG_ACPI_PCC=y -# CONFIG_ACPI_FFH is not set -# CONFIG_PMIC_OPREGION is not set -CONFIG_ACPI_PRMT=y -CONFIG_IRQ_BYPASS_MANAGER=y -CONFIG_HAVE_KVM=y -CONFIG_HAVE_KVM_IRQCHIP=y -CONFIG_HAVE_KVM_IRQFD=y -CONFIG_HAVE_KVM_IRQ_ROUTING=y -CONFIG_HAVE_KVM_DIRTY_RING=y -CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL=y -CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP=y -CONFIG_HAVE_KVM_EVENTFD=y -CONFIG_KVM_MMIO=y -CONFIG_HAVE_KVM_MSI=y -CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y -CONFIG_KVM_VFIO=y -CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y -CONFIG_HAVE_KVM_IRQ_BYPASS=y -CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE=y -CONFIG_KVM_XFER_TO_GUEST_WORK=y -CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y -CONFIG_VIRTUALIZATION=y -CONFIG_KVM=y -# CONFIG_NVHE_EL2_DEBUG is not set - -# -# General architecture-dependent options -# -CONFIG_ARCH_HAS_SUBPAGE_FAULTS=y -CONFIG_HOTPLUG_CORE_SYNC=y -CONFIG_HOTPLUG_CORE_SYNC_DEAD=y -CONFIG_KPROBES=y -CONFIG_JUMP_LABEL=y -# CONFIG_STATIC_KEYS_SELFTEST is not set -CONFIG_UPROBES=y -CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y -CONFIG_KRETPROBES=y -CONFIG_HAVE_IOREMAP_PROT=y -CONFIG_HAVE_KPROBES=y -CONFIG_HAVE_KRETPROBES=y -CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE=y -CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y -CONFIG_HAVE_NMI=y -CONFIG_TRACE_IRQFLAGS_SUPPORT=y -CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y -CONFIG_HAVE_ARCH_TRACEHOOK=y -CONFIG_HAVE_DMA_CONTIGUOUS=y -CONFIG_GENERIC_SMP_IDLE_THREAD=y -CONFIG_GENERIC_IDLE_POLL_SETUP=y -CONFIG_ARCH_HAS_FORTIFY_SOURCE=y -CONFIG_ARCH_HAS_KEEPINITRD=y -CONFIG_ARCH_HAS_SET_MEMORY=y -CONFIG_ARCH_HAS_SET_DIRECT_MAP=y -CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y -CONFIG_ARCH_WANTS_NO_INSTR=y -CONFIG_HAVE_ASM_MODVERSIONS=y -CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y -CONFIG_HAVE_RSEQ=y -CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y -CONFIG_HAVE_HW_BREAKPOINT=y -CONFIG_HAVE_PERF_EVENTS_NMI=y -CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y -CONFIG_HAVE_PERF_REGS=y -CONFIG_HAVE_PERF_USER_STACK_DUMP=y -CONFIG_HAVE_ARCH_JUMP_LABEL=y -CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y -CONFIG_MMU_GATHER_TABLE_FREE=y -CONFIG_MMU_GATHER_RCU_TABLE_FREE=y -CONFIG_MMU_LAZY_TLB_REFCOUNT=y -CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y -CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS=y -CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y -CONFIG_HAVE_CMPXCHG_LOCAL=y -CONFIG_HAVE_CMPXCHG_DOUBLE=y -CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y -CONFIG_HAVE_ARCH_SECCOMP=y -CONFIG_HAVE_ARCH_SECCOMP_FILTER=y -CONFIG_SECCOMP=y -CONFIG_SECCOMP_FILTER=y -# CONFIG_SECCOMP_CACHE_DEBUG is not set -CONFIG_HAVE_ARCH_STACKLEAK=y -CONFIG_HAVE_STACKPROTECTOR=y -CONFIG_STACKPROTECTOR=y -CONFIG_STACKPROTECTOR_STRONG=y -CONFIG_ARCH_SUPPORTS_SHADOW_CALL_STACK=y -# CONFIG_SHADOW_CALL_STACK is not set -CONFIG_ARCH_SUPPORTS_LTO_CLANG=y -CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y -CONFIG_LTO_NONE=y -CONFIG_ARCH_SUPPORTS_CFI_CLANG=y -# CONFIG_CFI_CLANG is not set -CONFIG_HAVE_CONTEXT_TRACKING_USER=y -CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y -CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y -CONFIG_HAVE_MOVE_PUD=y -CONFIG_HAVE_MOVE_PMD=y -CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y -CONFIG_HAVE_ARCH_HUGE_VMAP=y -CONFIG_HAVE_ARCH_HUGE_VMALLOC=y -CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y -CONFIG_ARCH_WANT_PMD_MKWRITE=y -CONFIG_HAVE_MOD_ARCH_SPECIFIC=y -CONFIG_MODULES_USE_ELF_RELA=y -CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK=y -CONFIG_SOFTIRQ_ON_OWN_STACK=y -CONFIG_ARCH_HAS_ELF_RANDOMIZE=y -CONFIG_HAVE_ARCH_MMAP_RND_BITS=y -CONFIG_ARCH_MMAP_RND_BITS=18 -CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y -CONFIG_ARCH_MMAP_RND_COMPAT_BITS=11 -CONFIG_PAGE_SIZE_LESS_THAN_64KB=y -CONFIG_PAGE_SIZE_LESS_THAN_256KB=y -CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT=y -CONFIG_CLONE_BACKWARDS=y -CONFIG_OLD_SIGSUSPEND3=y -CONFIG_COMPAT_OLD_SIGACTION=y -CONFIG_COMPAT_32BIT_TIME=y -CONFIG_HAVE_ARCH_VMAP_STACK=y -CONFIG_VMAP_STACK=y -CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET=y -CONFIG_RANDOMIZE_KSTACK_OFFSET=y -# CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT is not set -CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y -CONFIG_STRICT_KERNEL_RWX=y -CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y -CONFIG_STRICT_MODULE_RWX=y -CONFIG_ARCH_HAS_CPU_RESCTRL=y -CONFIG_HAVE_ARCH_COMPILER_H=y -CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y -CONFIG_ARCH_USE_MEMREMAP_PROT=y -CONFIG_LOCK_EVENT_COUNTS=y -CONFIG_ARCH_HAS_RELR=y -CONFIG_RELR=y -CONFIG_HAVE_PREEMPT_DYNAMIC=y -CONFIG_HAVE_PREEMPT_DYNAMIC_KEY=y -CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y -CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y -CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK=y -CONFIG_ARCH_HAVE_TRACE_MMIO_ACCESS=y - -# -# GCOV-based kernel profiling -# -# CONFIG_GCOV_KERNEL is not set -CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y -# end of GCOV-based kernel profiling - -CONFIG_HAVE_GCC_PLUGINS=y -CONFIG_GCC_PLUGINS=y -# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set -CONFIG_FUNCTION_ALIGNMENT_4B=y -CONFIG_FUNCTION_ALIGNMENT_8B=y -CONFIG_FUNCTION_ALIGNMENT=8 -# end of General architecture-dependent options - -CONFIG_RT_MUTEXES=y -CONFIG_BASE_SMALL=0 -CONFIG_MODULE_SIG_FORMAT=y -CONFIG_MODULES=y -# CONFIG_MODULE_DEBUG is not set -CONFIG_MODULE_FORCE_LOAD=y -CONFIG_MODULE_UNLOAD=y -# CONFIG_MODULE_FORCE_UNLOAD is not set -# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set -CONFIG_MODVERSIONS=y -CONFIG_ASM_MODVERSIONS=y -CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_MODULE_SIG=y -# CONFIG_MODULE_SIG_FORCE is not set -# CONFIG_MODULE_SIG_ALL is not set -# CONFIG_MODULE_SIG_SHA1 is not set -# CONFIG_MODULE_SIG_SHA224 is not set -CONFIG_MODULE_SIG_SHA256=y -# CONFIG_MODULE_SIG_SHA384 is not set -# CONFIG_MODULE_SIG_SHA512 is not set -CONFIG_MODULE_SIG_HASH="sha256" -CONFIG_MODULE_COMPRESS_NONE=y -# CONFIG_MODULE_COMPRESS_GZIP is not set -# CONFIG_MODULE_COMPRESS_XZ is not set -# CONFIG_MODULE_COMPRESS_ZSTD is not set -# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set -CONFIG_MODPROBE_PATH="/sbin/modprobe" -CONFIG_MODULES_TREE_LOOKUP=y -CONFIG_BLOCK=y -CONFIG_BLOCK_LEGACY_AUTOLOAD=y -CONFIG_BLK_RQ_ALLOC_TIME=y -CONFIG_BLK_CGROUP_RWSTAT=y -CONFIG_BLK_CGROUP_PUNT_BIO=y -CONFIG_BLK_DEV_BSG_COMMON=y -CONFIG_BLK_ICQ=y -CONFIG_BLK_DEV_BSGLIB=y -CONFIG_BLK_DEV_INTEGRITY=y -CONFIG_BLK_DEV_INTEGRITY_T10=m -CONFIG_BLK_DEV_ZONED=y -CONFIG_BLK_DEV_THROTTLING=y -# CONFIG_BLK_DEV_THROTTLING_LOW is not set -# CONFIG_BLK_WBT is not set -# CONFIG_BLK_CGROUP_IOLATENCY is not set -# CONFIG_BLK_CGROUP_FC_APPID is not set -CONFIG_BLK_CGROUP_IOCOST=y -# CONFIG_BLK_CGROUP_IOPRIO is not set -CONFIG_BLK_DEBUG_FS=y -CONFIG_BLK_DEBUG_FS_ZONED=y -# CONFIG_BLK_SED_OPAL is not set -# CONFIG_BLK_INLINE_ENCRYPTION is not set - -# -# Partition Types -# -CONFIG_PARTITION_ADVANCED=y -# CONFIG_ACORN_PARTITION is not set -# CONFIG_AIX_PARTITION is not set -# CONFIG_OSF_PARTITION is not set -# CONFIG_AMIGA_PARTITION is not set -# CONFIG_ATARI_PARTITION is not set -# CONFIG_MAC_PARTITION is not set -CONFIG_MSDOS_PARTITION=y -CONFIG_BSD_DISKLABEL=y -# CONFIG_MINIX_SUBPARTITION is not set -# CONFIG_SOLARIS_X86_PARTITION is not set -# CONFIG_UNIXWARE_DISKLABEL is not set -# CONFIG_LDM_PARTITION is not set -# CONFIG_SGI_PARTITION is not set -# CONFIG_ULTRIX_PARTITION is not set -# CONFIG_SUN_PARTITION is not set -# CONFIG_KARMA_PARTITION is not set -CONFIG_EFI_PARTITION=y -# CONFIG_SYSV68_PARTITION is not set -# CONFIG_CMDLINE_PARTITION is not set -# end of Partition Types - -CONFIG_BLK_MQ_PCI=y -CONFIG_BLK_MQ_VIRTIO=y -CONFIG_BLK_PM=y -CONFIG_BLOCK_HOLDER_DEPRECATED=y -CONFIG_BLK_MQ_STACKING=y - -# -# IO Schedulers -# -CONFIG_MQ_IOSCHED_DEADLINE=y -CONFIG_MQ_IOSCHED_KYBER=y -CONFIG_IOSCHED_BFQ=y -CONFIG_BFQ_GROUP_IOSCHED=y -# CONFIG_BFQ_CGROUP_DEBUG is not set -# end of IO Schedulers - -CONFIG_PREEMPT_NOTIFIERS=y -CONFIG_PADATA=y -CONFIG_ASN1=y -CONFIG_UNINLINE_SPIN_UNLOCK=y -CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y -CONFIG_MUTEX_SPIN_ON_OWNER=y -CONFIG_RWSEM_SPIN_ON_OWNER=y -CONFIG_LOCK_SPIN_ON_OWNER=y -CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y -CONFIG_QUEUED_SPINLOCKS=y -CONFIG_ARCH_USE_QUEUED_RWLOCKS=y -CONFIG_QUEUED_RWLOCKS=y -CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y -CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y -CONFIG_CK_KABI_RESERVE=y -CONFIG_CK_KABI_SIZE_ALIGN_CHECKS=y -CONFIG_FREEZER=y - -# -# Executable file formats -# -CONFIG_BINFMT_ELF=y -CONFIG_COMPAT_BINFMT_ELF=y -CONFIG_ARCH_BINFMT_ELF_STATE=y -CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS=y -CONFIG_ARCH_HAVE_ELF_PROT=y -CONFIG_ARCH_USE_GNU_PROPERTY=y -CONFIG_ELFCORE=y -CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y -CONFIG_BINFMT_SCRIPT=y -CONFIG_BINFMT_MISC=m -CONFIG_COREDUMP=y -# end of Executable file formats - -# -# Memory Management options -# -CONFIG_ZPOOL=y -CONFIG_SWAP=y -CONFIG_ZSWAP=y -# CONFIG_ZSWAP_DEFAULT_ON is not set -# CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON is not set -# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set -CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y -# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set -# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 is not set -# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set -# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set -CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lzo" -CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y -# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set -# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set -CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud" -CONFIG_ZBUD=y -# CONFIG_Z3FOLD is not set -CONFIG_ZSMALLOC=y -CONFIG_ZSMALLOC_STAT=y -CONFIG_ZSMALLOC_CHAIN_SIZE=8 - -# -# SLAB allocator options -# -# CONFIG_SLAB_DEPRECATED is not set -CONFIG_SLUB=y -# CONFIG_SLAB_MERGE_DEFAULT is not set -CONFIG_SLAB_FREELIST_RANDOM=y -# CONFIG_SLAB_FREELIST_HARDENED is not set -# CONFIG_SLUB_STATS is not set -CONFIG_SLUB_CPU_PARTIAL=y -# CONFIG_RANDOM_KMALLOC_CACHES is not set -# end of SLAB allocator options - -CONFIG_SHUFFLE_PAGE_ALLOCATOR=y -# CONFIG_COMPAT_BRK is not set -CONFIG_SPARSEMEM=y -CONFIG_SPARSEMEM_EXTREME=y -CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y -CONFIG_SPARSEMEM_VMEMMAP=y -CONFIG_HAVE_FAST_GUP=y -CONFIG_ARCH_KEEP_MEMBLOCK=y -CONFIG_NUMA_KEEP_MEMINFO=y -CONFIG_MEMORY_ISOLATION=y -CONFIG_EXCLUSIVE_SYSTEM_RAM=y -CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y -CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y -CONFIG_MEMORY_HOTPLUG=y -CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y -CONFIG_MEMORY_HOTREMOVE=y -CONFIG_MHP_MEMMAP_ON_MEMORY=y -CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE=y -CONFIG_SPLIT_PTLOCK_CPUS=4 -CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y -CONFIG_MEMORY_BALLOON=y -CONFIG_BALLOON_COMPACTION=y -CONFIG_COMPACTION=y -CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 -CONFIG_PAGE_REPORTING=y -CONFIG_MIGRATION=y -CONFIG_DEVICE_MIGRATION=y -CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y -CONFIG_ARCH_ENABLE_THP_MIGRATION=y -CONFIG_CONTIG_ALLOC=y -CONFIG_PHYS_ADDR_T_64BIT=y -CONFIG_MMU_NOTIFIER=y -CONFIG_KSM=y -CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 -CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y -CONFIG_MEMORY_FAILURE=y -CONFIG_HWPOISON_INJECT=m -CONFIG_ARCH_WANTS_THP_SWAP=y -CONFIG_TRANSPARENT_HUGEPAGE=y -CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y -# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set -CONFIG_THP_SWAP=y -CONFIG_READ_ONLY_THP_FOR_FS=y -CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y -CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y -CONFIG_USE_PERCPU_NUMA_NODE_ID=y -CONFIG_HAVE_SETUP_PER_CPU_AREA=y -CONFIG_CMA=y -# CONFIG_CMA_DEBUG is not set -# CONFIG_CMA_DEBUGFS is not set -# CONFIG_CMA_SYSFS is not set -CONFIG_CMA_AREAS=19 -CONFIG_GENERIC_EARLY_IOREMAP=y -# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set -CONFIG_PAGE_IDLE_FLAG=y -CONFIG_IDLE_PAGE_TRACKING=y -CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y -CONFIG_ARCH_HAS_CURRENT_STACK_POINTER=y -CONFIG_ARCH_HAS_PTE_DEVMAP=y -CONFIG_ZONE_DMA=y -CONFIG_ZONE_DMA32=y -CONFIG_ZONE_DEVICE=y -CONFIG_HMM_MIRROR=y -CONFIG_GET_FREE_REGION=y -# CONFIG_DEVICE_PRIVATE is not set -CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y -CONFIG_ARCH_USES_PG_ARCH_X=y -CONFIG_VM_EVENT_COUNTERS=y -# CONFIG_PERCPU_STATS is not set -# CONFIG_GUP_TEST is not set -# CONFIG_DMAPOOL_TEST is not set -CONFIG_ARCH_HAS_PTE_SPECIAL=y -CONFIG_MEMFD_CREATE=y -CONFIG_SECRETMEM=y -# CONFIG_ANON_VMA_NAME is not set -CONFIG_USERFAULTFD=y -CONFIG_HAVE_ARCH_USERFAULTFD_MINOR=y -CONFIG_LRU_GEN=y -# CONFIG_LRU_GEN_ENABLED is not set -# CONFIG_LRU_GEN_STATS is not set -CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK=y -CONFIG_PER_VMA_LOCK=y -CONFIG_LOCK_MM_AND_FIND_VMA=y - -# -# Data Access Monitoring -# -CONFIG_DAMON=y -CONFIG_DAMON_VADDR=y -CONFIG_DAMON_PADDR=y -# CONFIG_DAMON_SYSFS is not set -CONFIG_DAMON_DBGFS=y -# CONFIG_DAMON_RECLAIM is not set -# CONFIG_DAMON_LRU_SORT is not set -# end of Data Access Monitoring -# end of Memory Management options - -CONFIG_NET=y -CONFIG_NET_INGRESS=y -CONFIG_NET_EGRESS=y -CONFIG_NET_XGRESS=y -CONFIG_NET_REDIRECT=y -CONFIG_SKB_EXTENSIONS=y - -# -# Networking options -# -CONFIG_PACKET=y -CONFIG_PACKET_DIAG=m -CONFIG_UNIX=y -CONFIG_UNIX_SCM=y -CONFIG_AF_UNIX_OOB=y -CONFIG_UNIX_DIAG=m -CONFIG_TLS=m -CONFIG_TLS_DEVICE=y -# CONFIG_TLS_TOE is not set -CONFIG_XFRM=y -CONFIG_XFRM_OFFLOAD=y -CONFIG_XFRM_ALGO=y -CONFIG_XFRM_USER=y -CONFIG_XFRM_INTERFACE=m -CONFIG_XFRM_SUB_POLICY=y -CONFIG_XFRM_MIGRATE=y -CONFIG_XFRM_STATISTICS=y -CONFIG_XFRM_AH=m -CONFIG_XFRM_ESP=m -CONFIG_XFRM_IPCOMP=m -CONFIG_NET_KEY=m -CONFIG_NET_KEY_MIGRATE=y -CONFIG_SMC=m -CONFIG_SMC_DIAG=m -CONFIG_XDP_SOCKETS=y -CONFIG_XDP_SOCKETS_DIAG=m -CONFIG_NET_HANDSHAKE=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_FIB_TRIE_STATS=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_MULTIPATH=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_ROUTE_CLASSID=y -# CONFIG_IP_PNP is not set -CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE_DEMUX=m -CONFIG_NET_IP_TUNNEL=m -CONFIG_NET_IPGRE=m -CONFIG_NET_IPGRE_BROADCAST=y -CONFIG_IP_MROUTE_COMMON=y -CONFIG_IP_MROUTE=y -CONFIG_IP_MROUTE_MULTIPLE_TABLES=y -CONFIG_IP_PIMSM_V1=y -CONFIG_IP_PIMSM_V2=y -CONFIG_SYN_COOKIES=y -CONFIG_NET_IPVTI=m -CONFIG_NET_UDP_TUNNEL=m -# CONFIG_NET_FOU is not set -# CONFIG_NET_FOU_IP_TUNNELS is not set -CONFIG_INET_AH=m -CONFIG_INET_ESP=m -CONFIG_INET_ESP_OFFLOAD=m -# CONFIG_INET_ESPINTCP is not set -CONFIG_INET_IPCOMP=m -CONFIG_INET_TABLE_PERTURB_ORDER=16 -CONFIG_INET_XFRM_TUNNEL=m -CONFIG_INET_TUNNEL=m -CONFIG_INET_DIAG=m -CONFIG_INET_TCP_DIAG=m -CONFIG_INET_UDP_DIAG=m -CONFIG_INET_RAW_DIAG=m -# CONFIG_INET_DIAG_DESTROY is not set -CONFIG_TCP_CONG_ADVANCED=y -CONFIG_TCP_CONG_BIC=m -CONFIG_TCP_CONG_CUBIC=y -CONFIG_TCP_CONG_WESTWOOD=m -CONFIG_TCP_CONG_HTCP=m -CONFIG_TCP_CONG_HSTCP=m -CONFIG_TCP_CONG_HYBLA=m -CONFIG_TCP_CONG_VEGAS=m -CONFIG_TCP_CONG_NV=m -CONFIG_TCP_CONG_SCALABLE=m -CONFIG_TCP_CONG_LP=m -CONFIG_TCP_CONG_VENO=m -CONFIG_TCP_CONG_YEAH=m -CONFIG_TCP_CONG_ILLINOIS=m -CONFIG_TCP_CONG_DCTCP=m -# CONFIG_TCP_CONG_CDG is not set -CONFIG_TCP_CONG_BBR=m -CONFIG_DEFAULT_CUBIC=y -# CONFIG_DEFAULT_RENO is not set -CONFIG_DEFAULT_TCP_CONG="cubic" -CONFIG_TCP_MD5SIG=y -CONFIG_IPV6=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y -CONFIG_IPV6_OPTIMISTIC_DAD=y -CONFIG_INET6_AH=m -CONFIG_INET6_ESP=m -CONFIG_INET6_ESP_OFFLOAD=m -# CONFIG_INET6_ESPINTCP is not set -CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_MIP6=m -# CONFIG_IPV6_ILA is not set -CONFIG_INET6_XFRM_TUNNEL=m -CONFIG_INET6_TUNNEL=m -CONFIG_IPV6_VTI=m -CONFIG_IPV6_SIT=m -CONFIG_IPV6_SIT_6RD=y -CONFIG_IPV6_NDISC_NODETYPE=y -CONFIG_IPV6_TUNNEL=m -CONFIG_IPV6_GRE=m -CONFIG_IPV6_MULTIPLE_TABLES=y -CONFIG_IPV6_SUBTREES=y -CONFIG_IPV6_MROUTE=y -CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y -CONFIG_IPV6_PIMSM_V2=y -# CONFIG_IPV6_SEG6_LWTUNNEL is not set -# CONFIG_IPV6_SEG6_HMAC is not set -# CONFIG_IPV6_RPL_LWTUNNEL is not set -# CONFIG_IPV6_IOAM6_LWTUNNEL is not set -CONFIG_NETLABEL=y -CONFIG_MPTCP=y -CONFIG_INET_MPTCP_DIAG=m -CONFIG_MPTCP_IPV6=y -CONFIG_NETWORK_SECMARK=y -CONFIG_NET_PTP_CLASSIFY=y -CONFIG_NETWORK_PHY_TIMESTAMPING=y -CONFIG_NETFILTER=y -CONFIG_NETFILTER_ADVANCED=y -CONFIG_BRIDGE_NETFILTER=m - -# -# Core Netfilter Configuration -# -CONFIG_NETFILTER_INGRESS=y -CONFIG_NETFILTER_EGRESS=y -CONFIG_NETFILTER_SKIP_EGRESS=y -CONFIG_NETFILTER_NETLINK=m -CONFIG_NETFILTER_FAMILY_BRIDGE=y -CONFIG_NETFILTER_FAMILY_ARP=y -CONFIG_NETFILTER_BPF_LINK=y -# CONFIG_NETFILTER_NETLINK_HOOK is not set -CONFIG_NETFILTER_NETLINK_ACCT=m -CONFIG_NETFILTER_NETLINK_QUEUE=m -CONFIG_NETFILTER_NETLINK_LOG=m -CONFIG_NETFILTER_NETLINK_OSF=m -CONFIG_NF_CONNTRACK=m -CONFIG_NF_LOG_SYSLOG=m -CONFIG_NETFILTER_CONNCOUNT=m -CONFIG_NF_CONNTRACK_MARK=y -CONFIG_NF_CONNTRACK_SECMARK=y -CONFIG_NF_CONNTRACK_ZONES=y -CONFIG_NF_CONNTRACK_PROCFS=y -CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CONNTRACK_TIMEOUT=y -CONFIG_NF_CONNTRACK_TIMESTAMP=y -CONFIG_NF_CONNTRACK_LABELS=y -CONFIG_NF_CONNTRACK_OVS=y -CONFIG_NF_CT_PROTO_DCCP=y -CONFIG_NF_CT_PROTO_GRE=y -CONFIG_NF_CT_PROTO_SCTP=y -CONFIG_NF_CT_PROTO_UDPLITE=y -CONFIG_NF_CONNTRACK_AMANDA=m -CONFIG_NF_CONNTRACK_FTP=m -CONFIG_NF_CONNTRACK_H323=m -CONFIG_NF_CONNTRACK_IRC=m -CONFIG_NF_CONNTRACK_BROADCAST=m -CONFIG_NF_CONNTRACK_NETBIOS_NS=m -CONFIG_NF_CONNTRACK_SNMP=m -CONFIG_NF_CONNTRACK_PPTP=m -CONFIG_NF_CONNTRACK_SANE=m -CONFIG_NF_CONNTRACK_SIP=m -CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NF_CT_NETLINK=m -CONFIG_NF_CT_NETLINK_TIMEOUT=m -CONFIG_NF_CT_NETLINK_HELPER=m -CONFIG_NETFILTER_NETLINK_GLUE_CT=y -CONFIG_NF_NAT=m -CONFIG_NF_NAT_AMANDA=m -CONFIG_NF_NAT_FTP=m -CONFIG_NF_NAT_IRC=m -CONFIG_NF_NAT_SIP=m -CONFIG_NF_NAT_TFTP=m -CONFIG_NF_NAT_REDIRECT=y -CONFIG_NF_NAT_MASQUERADE=y -CONFIG_NF_NAT_OVS=y -CONFIG_NETFILTER_SYNPROXY=m -CONFIG_NF_TABLES=m -CONFIG_NF_TABLES_INET=y -CONFIG_NF_TABLES_NETDEV=y -CONFIG_NFT_NUMGEN=m -CONFIG_NFT_CT=m -CONFIG_NFT_FLOW_OFFLOAD=m -CONFIG_NFT_CONNLIMIT=m -CONFIG_NFT_LOG=m -CONFIG_NFT_LIMIT=m -CONFIG_NFT_MASQ=m -CONFIG_NFT_REDIR=m -CONFIG_NFT_NAT=m -CONFIG_NFT_TUNNEL=m -CONFIG_NFT_QUEUE=m -CONFIG_NFT_QUOTA=m -CONFIG_NFT_REJECT=m -CONFIG_NFT_REJECT_INET=m -CONFIG_NFT_COMPAT=m -CONFIG_NFT_HASH=m -CONFIG_NFT_FIB=m -CONFIG_NFT_FIB_INET=m -CONFIG_NFT_XFRM=m -CONFIG_NFT_SOCKET=m -CONFIG_NFT_OSF=m -CONFIG_NFT_TPROXY=m -# CONFIG_NFT_SYNPROXY is not set -CONFIG_NF_DUP_NETDEV=m -CONFIG_NFT_DUP_NETDEV=m -CONFIG_NFT_FWD_NETDEV=m -CONFIG_NFT_FIB_NETDEV=m -# CONFIG_NFT_REJECT_NETDEV is not set -CONFIG_NF_FLOW_TABLE_INET=m -CONFIG_NF_FLOW_TABLE=m -# CONFIG_NF_FLOW_TABLE_PROCFS is not set -CONFIG_NETFILTER_XTABLES=y -# CONFIG_NETFILTER_XTABLES_COMPAT is not set - -# -# Xtables combined modules -# -CONFIG_NETFILTER_XT_MARK=m -CONFIG_NETFILTER_XT_CONNMARK=m -CONFIG_NETFILTER_XT_SET=m - -# -# Xtables targets -# -CONFIG_NETFILTER_XT_TARGET_AUDIT=m -CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m -CONFIG_NETFILTER_XT_TARGET_CONNMARK=m -CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m -CONFIG_NETFILTER_XT_TARGET_CT=m -CONFIG_NETFILTER_XT_TARGET_DSCP=m -CONFIG_NETFILTER_XT_TARGET_HL=m -CONFIG_NETFILTER_XT_TARGET_HMARK=m -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m -CONFIG_NETFILTER_XT_TARGET_LED=m -CONFIG_NETFILTER_XT_TARGET_LOG=m -CONFIG_NETFILTER_XT_TARGET_MARK=m -CONFIG_NETFILTER_XT_NAT=m -CONFIG_NETFILTER_XT_TARGET_NETMAP=m -CONFIG_NETFILTER_XT_TARGET_NFLOG=m -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_NOTRACK=m -CONFIG_NETFILTER_XT_TARGET_RATEEST=m -CONFIG_NETFILTER_XT_TARGET_REDIRECT=m -CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m -CONFIG_NETFILTER_XT_TARGET_TEE=m -CONFIG_NETFILTER_XT_TARGET_TPROXY=m -CONFIG_NETFILTER_XT_TARGET_TRACE=m -CONFIG_NETFILTER_XT_TARGET_SECMARK=m -CONFIG_NETFILTER_XT_TARGET_TCPMSS=m -CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m - -# -# Xtables matches -# -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m -CONFIG_NETFILTER_XT_MATCH_BPF=m -CONFIG_NETFILTER_XT_MATCH_CGROUP=m -CONFIG_NETFILTER_XT_MATCH_CLUSTER=m -CONFIG_NETFILTER_XT_MATCH_COMMENT=m -CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m -CONFIG_NETFILTER_XT_MATCH_CONNMARK=m -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_CPU=m -CONFIG_NETFILTER_XT_MATCH_DCCP=m -CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m -CONFIG_NETFILTER_XT_MATCH_DSCP=m -CONFIG_NETFILTER_XT_MATCH_ECN=m -CONFIG_NETFILTER_XT_MATCH_ESP=m -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m -CONFIG_NETFILTER_XT_MATCH_HELPER=m -CONFIG_NETFILTER_XT_MATCH_HL=m -# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set -CONFIG_NETFILTER_XT_MATCH_IPRANGE=m -CONFIG_NETFILTER_XT_MATCH_IPVS=m -CONFIG_NETFILTER_XT_MATCH_L2TP=m -CONFIG_NETFILTER_XT_MATCH_LENGTH=m -CONFIG_NETFILTER_XT_MATCH_LIMIT=m -CONFIG_NETFILTER_XT_MATCH_MAC=m -CONFIG_NETFILTER_XT_MATCH_MARK=m -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_NFACCT=m -CONFIG_NETFILTER_XT_MATCH_OSF=m -CONFIG_NETFILTER_XT_MATCH_OWNER=m -CONFIG_NETFILTER_XT_MATCH_POLICY=m -CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m -CONFIG_NETFILTER_XT_MATCH_QUOTA=m -CONFIG_NETFILTER_XT_MATCH_RATEEST=m -CONFIG_NETFILTER_XT_MATCH_REALM=m -CONFIG_NETFILTER_XT_MATCH_RECENT=m -CONFIG_NETFILTER_XT_MATCH_SCTP=m -CONFIG_NETFILTER_XT_MATCH_SOCKET=m -CONFIG_NETFILTER_XT_MATCH_STATE=m -CONFIG_NETFILTER_XT_MATCH_STATISTIC=m -CONFIG_NETFILTER_XT_MATCH_STRING=m -CONFIG_NETFILTER_XT_MATCH_TCPMSS=m -CONFIG_NETFILTER_XT_MATCH_TIME=m -CONFIG_NETFILTER_XT_MATCH_U32=m -# end of Core Netfilter Configuration - -CONFIG_IP_SET=m -CONFIG_IP_SET_MAX=256 -CONFIG_IP_SET_BITMAP_IP=m -CONFIG_IP_SET_BITMAP_IPMAC=m -CONFIG_IP_SET_BITMAP_PORT=m -CONFIG_IP_SET_HASH_IP=m -CONFIG_IP_SET_HASH_IPMARK=m -CONFIG_IP_SET_HASH_IPPORT=m -CONFIG_IP_SET_HASH_IPPORTIP=m -CONFIG_IP_SET_HASH_IPPORTNET=m -CONFIG_IP_SET_HASH_IPMAC=m -CONFIG_IP_SET_HASH_MAC=m -CONFIG_IP_SET_HASH_NETPORTNET=m -CONFIG_IP_SET_HASH_NET=m -CONFIG_IP_SET_HASH_NETNET=m -CONFIG_IP_SET_HASH_NETPORT=m -CONFIG_IP_SET_HASH_NETIFACE=m -CONFIG_IP_SET_LIST_SET=m -CONFIG_IP_VS=m -CONFIG_IP_VS_IPV6=y -# CONFIG_IP_VS_DEBUG is not set -CONFIG_IP_VS_TAB_BITS=12 - -# -# IPVS transport protocol load balancing support -# -CONFIG_IP_VS_PROTO_TCP=y -CONFIG_IP_VS_PROTO_UDP=y -CONFIG_IP_VS_PROTO_AH_ESP=y -CONFIG_IP_VS_PROTO_ESP=y -CONFIG_IP_VS_PROTO_AH=y -CONFIG_IP_VS_PROTO_SCTP=y - -# -# IPVS scheduler -# -CONFIG_IP_VS_RR=m -CONFIG_IP_VS_WRR=m -CONFIG_IP_VS_LC=m -CONFIG_IP_VS_WLC=m -CONFIG_IP_VS_FO=m -CONFIG_IP_VS_OVF=m -CONFIG_IP_VS_LBLC=m -CONFIG_IP_VS_LBLCR=m -CONFIG_IP_VS_DH=m -CONFIG_IP_VS_SH=m -CONFIG_IP_VS_MH=m -CONFIG_IP_VS_SED=m -CONFIG_IP_VS_NQ=m -# CONFIG_IP_VS_TWOS is not set - -# -# IPVS SH scheduler -# -CONFIG_IP_VS_SH_TAB_BITS=8 - -# -# IPVS MH scheduler -# -CONFIG_IP_VS_MH_TAB_INDEX=12 - -# -# IPVS application helper -# -CONFIG_IP_VS_FTP=m -CONFIG_IP_VS_NFCT=y -CONFIG_IP_VS_PE_SIP=m - -# -# IP: Netfilter Configuration -# -CONFIG_NF_DEFRAG_IPV4=m -CONFIG_NF_SOCKET_IPV4=m -CONFIG_NF_TPROXY_IPV4=m -CONFIG_NF_TABLES_IPV4=y -CONFIG_NFT_REJECT_IPV4=m -CONFIG_NFT_DUP_IPV4=m -CONFIG_NFT_FIB_IPV4=m -CONFIG_NF_TABLES_ARP=y -CONFIG_NF_DUP_IPV4=m -CONFIG_NF_LOG_ARP=m -CONFIG_NF_LOG_IPV4=m -CONFIG_NF_REJECT_IPV4=m -CONFIG_NF_NAT_SNMP_BASIC=m -CONFIG_NF_NAT_PPTP=m -CONFIG_NF_NAT_H323=m -CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_AH=m -CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_RPFILTER=m -CONFIG_IP_NF_MATCH_TTL=m -CONFIG_IP_NF_FILTER=m -CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_SYNPROXY=m -CONFIG_IP_NF_NAT=m -CONFIG_IP_NF_TARGET_MASQUERADE=m -CONFIG_IP_NF_TARGET_NETMAP=m -CONFIG_IP_NF_TARGET_REDIRECT=m -CONFIG_IP_NF_MANGLE=m -CONFIG_IP_NF_TARGET_ECN=m -CONFIG_IP_NF_TARGET_TTL=m -CONFIG_IP_NF_RAW=m -CONFIG_IP_NF_SECURITY=m -CONFIG_IP_NF_ARPTABLES=m -CONFIG_IP_NF_ARPFILTER=m -CONFIG_IP_NF_ARP_MANGLE=m -# end of IP: Netfilter Configuration - -# -# IPv6: Netfilter Configuration -# -CONFIG_NF_SOCKET_IPV6=m -CONFIG_NF_TPROXY_IPV6=m -CONFIG_NF_TABLES_IPV6=y -CONFIG_NFT_REJECT_IPV6=m -CONFIG_NFT_DUP_IPV6=m -CONFIG_NFT_FIB_IPV6=m -CONFIG_NF_DUP_IPV6=m -CONFIG_NF_REJECT_IPV6=m -CONFIG_NF_LOG_IPV6=m -CONFIG_IP6_NF_IPTABLES=m -CONFIG_IP6_NF_MATCH_AH=m -CONFIG_IP6_NF_MATCH_EUI64=m -CONFIG_IP6_NF_MATCH_FRAG=m -CONFIG_IP6_NF_MATCH_OPTS=m -CONFIG_IP6_NF_MATCH_HL=m -CONFIG_IP6_NF_MATCH_IPV6HEADER=m -CONFIG_IP6_NF_MATCH_MH=m -CONFIG_IP6_NF_MATCH_RPFILTER=m -CONFIG_IP6_NF_MATCH_RT=m -# CONFIG_IP6_NF_MATCH_SRH is not set -# CONFIG_IP6_NF_TARGET_HL is not set -CONFIG_IP6_NF_FILTER=m -CONFIG_IP6_NF_TARGET_REJECT=m -CONFIG_IP6_NF_TARGET_SYNPROXY=m -CONFIG_IP6_NF_MANGLE=m -CONFIG_IP6_NF_RAW=m -CONFIG_IP6_NF_SECURITY=m -CONFIG_IP6_NF_NAT=m -CONFIG_IP6_NF_TARGET_MASQUERADE=m -CONFIG_IP6_NF_TARGET_NPT=m -# end of IPv6: Netfilter Configuration - -CONFIG_NF_DEFRAG_IPV6=m -CONFIG_NF_TABLES_BRIDGE=m -# CONFIG_NFT_BRIDGE_META is not set -CONFIG_NFT_BRIDGE_REJECT=m -# CONFIG_NF_CONNTRACK_BRIDGE is not set -CONFIG_BRIDGE_NF_EBTABLES=m -CONFIG_BRIDGE_EBT_BROUTE=m -CONFIG_BRIDGE_EBT_T_FILTER=m -CONFIG_BRIDGE_EBT_T_NAT=m -CONFIG_BRIDGE_EBT_802_3=m -CONFIG_BRIDGE_EBT_AMONG=m -CONFIG_BRIDGE_EBT_ARP=m -CONFIG_BRIDGE_EBT_IP=m -CONFIG_BRIDGE_EBT_IP6=m -CONFIG_BRIDGE_EBT_LIMIT=m -CONFIG_BRIDGE_EBT_MARK=m -CONFIG_BRIDGE_EBT_PKTTYPE=m -CONFIG_BRIDGE_EBT_STP=m -CONFIG_BRIDGE_EBT_VLAN=m -CONFIG_BRIDGE_EBT_ARPREPLY=m -CONFIG_BRIDGE_EBT_DNAT=m -CONFIG_BRIDGE_EBT_MARK_T=m -CONFIG_BRIDGE_EBT_REDIRECT=m -CONFIG_BRIDGE_EBT_SNAT=m -CONFIG_BRIDGE_EBT_LOG=m -CONFIG_BRIDGE_EBT_NFLOG=m -# CONFIG_BPFILTER is not set -# CONFIG_IP_DCCP is not set -CONFIG_IP_SCTP=m -# CONFIG_SCTP_DBG_OBJCNT is not set -# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set -CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y -# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set -CONFIG_SCTP_COOKIE_HMAC_MD5=y -CONFIG_SCTP_COOKIE_HMAC_SHA1=y -CONFIG_INET_SCTP_DIAG=m -# CONFIG_RDS is not set -CONFIG_TIPC=m -CONFIG_TIPC_MEDIA_IB=y -CONFIG_TIPC_MEDIA_UDP=y -CONFIG_TIPC_CRYPTO=y -CONFIG_TIPC_DIAG=m -CONFIG_ATM=m -CONFIG_ATM_CLIP=m -# CONFIG_ATM_CLIP_NO_ICMP is not set -CONFIG_ATM_LANE=m -# CONFIG_ATM_MPOA is not set -CONFIG_ATM_BR2684=m -# CONFIG_ATM_BR2684_IPFILTER is not set -CONFIG_L2TP=m -CONFIG_L2TP_DEBUGFS=m -CONFIG_L2TP_V3=y -CONFIG_L2TP_IP=m -CONFIG_L2TP_ETH=m -CONFIG_STP=m -CONFIG_GARP=m -CONFIG_MRP=m -CONFIG_BRIDGE=m -CONFIG_BRIDGE_IGMP_SNOOPING=y -CONFIG_BRIDGE_VLAN_FILTERING=y -# CONFIG_BRIDGE_MRP is not set -# CONFIG_BRIDGE_CFM is not set -# CONFIG_NET_DSA is not set -CONFIG_VLAN_8021Q=m -CONFIG_VLAN_8021Q_GVRP=y -CONFIG_VLAN_8021Q_MVRP=y -CONFIG_LLC=m -# CONFIG_LLC2 is not set -# CONFIG_ATALK is not set -# CONFIG_X25 is not set -# CONFIG_LAPB is not set -# CONFIG_PHONET is not set -CONFIG_6LOWPAN=m -# CONFIG_6LOWPAN_DEBUGFS is not set -# CONFIG_6LOWPAN_NHC is not set -CONFIG_IEEE802154=m -# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set -CONFIG_IEEE802154_SOCKET=m -CONFIG_IEEE802154_6LOWPAN=m -CONFIG_MAC802154=m -CONFIG_NET_SCHED=y - -# -# Queueing/Scheduling -# -CONFIG_NET_SCH_HTB=m -CONFIG_NET_SCH_HFSC=m -CONFIG_NET_SCH_PRIO=m -CONFIG_NET_SCH_MULTIQ=m -CONFIG_NET_SCH_RED=m -CONFIG_NET_SCH_SFB=m -CONFIG_NET_SCH_SFQ=m -CONFIG_NET_SCH_TEQL=m -CONFIG_NET_SCH_TBF=m -# CONFIG_NET_SCH_CBS is not set -# CONFIG_NET_SCH_ETF is not set -CONFIG_NET_SCH_MQPRIO_LIB=m -# CONFIG_NET_SCH_TAPRIO is not set -CONFIG_NET_SCH_GRED=m -CONFIG_NET_SCH_NETEM=m -CONFIG_NET_SCH_DRR=m -CONFIG_NET_SCH_MQPRIO=m -# CONFIG_NET_SCH_SKBPRIO is not set -CONFIG_NET_SCH_CHOKE=m -CONFIG_NET_SCH_QFQ=m -CONFIG_NET_SCH_CODEL=m -CONFIG_NET_SCH_FQ_CODEL=y -# CONFIG_NET_SCH_CAKE is not set -CONFIG_NET_SCH_FQ=m -CONFIG_NET_SCH_HHF=m -CONFIG_NET_SCH_PIE=m -# CONFIG_NET_SCH_FQ_PIE is not set -CONFIG_NET_SCH_INGRESS=m -CONFIG_NET_SCH_PLUG=m -# CONFIG_NET_SCH_ETS is not set -CONFIG_NET_SCH_DEFAULT=y -# CONFIG_DEFAULT_FQ is not set -# CONFIG_DEFAULT_CODEL is not set -CONFIG_DEFAULT_FQ_CODEL=y -# CONFIG_DEFAULT_SFQ is not set -# CONFIG_DEFAULT_PFIFO_FAST is not set -CONFIG_DEFAULT_NET_SCH="fq_codel" - -# -# Classification -# -CONFIG_NET_CLS=y -CONFIG_NET_CLS_BASIC=m -CONFIG_NET_CLS_ROUTE4=m -CONFIG_NET_CLS_FW=m -CONFIG_NET_CLS_U32=m -CONFIG_CLS_U32_PERF=y -CONFIG_CLS_U32_MARK=y -CONFIG_NET_CLS_FLOW=m -CONFIG_NET_CLS_CGROUP=y -CONFIG_NET_CLS_BPF=m -CONFIG_NET_CLS_FLOWER=m -CONFIG_NET_CLS_MATCHALL=m -CONFIG_NET_EMATCH=y -CONFIG_NET_EMATCH_STACK=32 -CONFIG_NET_EMATCH_CMP=m -CONFIG_NET_EMATCH_NBYTE=m -CONFIG_NET_EMATCH_U32=m -CONFIG_NET_EMATCH_META=m -CONFIG_NET_EMATCH_TEXT=m -CONFIG_NET_EMATCH_IPSET=m -# CONFIG_NET_EMATCH_IPT is not set -CONFIG_NET_CLS_ACT=y -CONFIG_NET_ACT_POLICE=m -CONFIG_NET_ACT_GACT=m -CONFIG_GACT_PROB=y -CONFIG_NET_ACT_MIRRED=m -CONFIG_NET_ACT_SAMPLE=m -CONFIG_NET_ACT_IPT=m -CONFIG_NET_ACT_NAT=m -CONFIG_NET_ACT_PEDIT=m -CONFIG_NET_ACT_SIMP=m -CONFIG_NET_ACT_SKBEDIT=m -CONFIG_NET_ACT_CSUM=m -# CONFIG_NET_ACT_MPLS is not set -CONFIG_NET_ACT_VLAN=m -CONFIG_NET_ACT_BPF=m -# CONFIG_NET_ACT_CONNMARK is not set -# CONFIG_NET_ACT_CTINFO is not set -CONFIG_NET_ACT_SKBMOD=m -# CONFIG_NET_ACT_IFE is not set -CONFIG_NET_ACT_TUNNEL_KEY=m -CONFIG_NET_ACT_CT=m -# CONFIG_NET_ACT_GATE is not set -CONFIG_NET_TC_SKB_EXT=y -CONFIG_NET_SCH_FIFO=y -CONFIG_DCB=y -CONFIG_DNS_RESOLVER=m -# CONFIG_BATMAN_ADV is not set -CONFIG_OPENVSWITCH=m -CONFIG_OPENVSWITCH_GRE=m -CONFIG_OPENVSWITCH_VXLAN=m -CONFIG_OPENVSWITCH_GENEVE=m -CONFIG_VSOCKETS=m -CONFIG_VSOCKETS_DIAG=m -CONFIG_VSOCKETS_LOOPBACK=m -CONFIG_VIRTIO_VSOCKETS=m -CONFIG_VIRTIO_VSOCKETS_COMMON=m -CONFIG_NETLINK_DIAG=m -CONFIG_MPLS=y -CONFIG_NET_MPLS_GSO=y -CONFIG_MPLS_ROUTING=m -CONFIG_MPLS_IPTUNNEL=m -CONFIG_NET_NSH=y -# CONFIG_HSR is not set -CONFIG_NET_SWITCHDEV=y -CONFIG_NET_L3_MASTER_DEV=y -# CONFIG_QRTR is not set -# CONFIG_NET_NCSI is not set -CONFIG_PCPU_DEV_REFCNT=y -CONFIG_MAX_SKB_FRAGS=17 -CONFIG_RPS=y -CONFIG_RFS_ACCEL=y -CONFIG_SOCK_RX_QUEUE_MAPPING=y -CONFIG_XPS=y -CONFIG_CGROUP_NET_PRIO=y -CONFIG_CGROUP_NET_CLASSID=y -CONFIG_NET_RX_BUSY_POLL=y -CONFIG_BQL=y -CONFIG_BPF_STREAM_PARSER=y -CONFIG_NET_FLOW_LIMIT=y - -# -# Network testing -# -CONFIG_NET_PKTGEN=m -CONFIG_NET_DROP_MONITOR=y -# end of Network testing -# end of Networking options - -# CONFIG_HAMRADIO is not set -# CONFIG_CAN is not set -# CONFIG_BT is not set -# CONFIG_AF_RXRPC is not set -# CONFIG_AF_KCM is not set -CONFIG_STREAM_PARSER=y -# CONFIG_MCTP is not set -CONFIG_FIB_RULES=y -CONFIG_WIRELESS=y -CONFIG_CFG80211=m -# CONFIG_NL80211_TESTMODE is not set -# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set -CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y -CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y -CONFIG_CFG80211_DEFAULT_PS=y -# CONFIG_CFG80211_DEBUGFS is not set -CONFIG_CFG80211_CRDA_SUPPORT=y -# CONFIG_CFG80211_WEXT is not set -CONFIG_MAC80211=m -CONFIG_MAC80211_HAS_RC=y -CONFIG_MAC80211_RC_MINSTREL=y -CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y -CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" -# CONFIG_MAC80211_MESH is not set -CONFIG_MAC80211_LEDS=y -CONFIG_MAC80211_DEBUGFS=y -# CONFIG_MAC80211_MESSAGE_TRACING is not set -# CONFIG_MAC80211_DEBUG_MENU is not set -CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 -CONFIG_RFKILL=m -CONFIG_RFKILL_LEDS=y -CONFIG_RFKILL_INPUT=y -CONFIG_RFKILL_GPIO=m -# CONFIG_NET_9P is not set -# CONFIG_CAIF is not set -CONFIG_CEPH_LIB=m -CONFIG_CEPH_LIB_PRETTYDEBUG=y -CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y -# CONFIG_NFC is not set -CONFIG_PSAMPLE=m -# CONFIG_NET_IFE is not set -CONFIG_LWTUNNEL=y -CONFIG_LWTUNNEL_BPF=y -CONFIG_DST_CACHE=y -CONFIG_GRO_CELLS=y -CONFIG_SOCK_VALIDATE_XMIT=y -CONFIG_NET_SELFTESTS=y -CONFIG_NET_SOCK_MSG=y -CONFIG_NET_DEVLINK=y -CONFIG_PAGE_POOL=y -# CONFIG_PAGE_POOL_STATS is not set -CONFIG_FAILOVER=m -CONFIG_ETHTOOL_NETLINK=y - -# -# Device Drivers -# -CONFIG_ARM_AMBA=y -CONFIG_HAVE_PCI=y -CONFIG_PCI=y -CONFIG_PCI_DOMAINS=y -CONFIG_PCI_DOMAINS_GENERIC=y -CONFIG_PCI_SYSCALL=y -CONFIG_PCIEPORTBUS=y -CONFIG_HOTPLUG_PCI_PCIE=y -CONFIG_PCIEAER=y -CONFIG_PCIEAER_INJECT=m -CONFIG_PCIE_ECRC=y -CONFIG_PCIEASPM=y -CONFIG_PCIEASPM_DEFAULT=y -# CONFIG_PCIEASPM_POWERSAVE is not set -# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set -# CONFIG_PCIEASPM_PERFORMANCE is not set -CONFIG_PCIE_PME=y -CONFIG_PCIE_DPC=y -# CONFIG_PCIE_PTM is not set -CONFIG_PCIE_EDR=y -CONFIG_PCI_MSI=y -CONFIG_PCI_QUIRKS=y -# CONFIG_PCI_DEBUG is not set -# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set -CONFIG_PCI_STUB=y -# CONFIG_PCI_PF_STUB is not set -CONFIG_PCI_ATS=y -CONFIG_PCI_DOE=y -CONFIG_PCI_ECAM=y -CONFIG_PCI_IOV=y -CONFIG_PCI_PRI=y -CONFIG_PCI_PASID=y -# CONFIG_PCI_P2PDMA is not set -CONFIG_PCI_LABEL=y -# CONFIG_PCI_DYNAMIC_OF_NODES is not set -CONFIG_VGA_ARB=y -CONFIG_VGA_ARB_MAX_GPUS=64 -CONFIG_HOTPLUG_PCI=y -CONFIG_HOTPLUG_PCI_ACPI=y -CONFIG_HOTPLUG_PCI_ACPI_IBM=m -# CONFIG_HOTPLUG_PCI_CPCI is not set -# CONFIG_HOTPLUG_PCI_SHPC is not set - -# -# PCI controller drivers -# -# CONFIG_PCIE_ALTERA is not set -CONFIG_PCI_HOST_THUNDER_PEM=y -CONFIG_PCI_HOST_THUNDER_ECAM=y -# CONFIG_PCI_FTPCI100 is not set -CONFIG_PCI_HOST_COMMON=y -CONFIG_PCI_HOST_GENERIC=y -# CONFIG_PCIE_HISI_ERR is not set -# CONFIG_PCIE_MICROCHIP_HOST is not set -CONFIG_PCI_XGENE=y -CONFIG_PCI_XGENE_MSI=y -# CONFIG_PCIE_XILINX is not set - -# -# Cadence-based PCIe controllers -# -# CONFIG_PCIE_CADENCE_PLAT_HOST is not set -# CONFIG_PCI_J721E_HOST is not set -# end of Cadence-based PCIe controllers - -# -# DesignWare-based PCIe controllers -# -CONFIG_PCIE_DW=y -CONFIG_PCIE_DW_HOST=y -# CONFIG_PCIE_AL is not set -# CONFIG_PCI_MESON is not set -CONFIG_PCI_HISI=y -# CONFIG_PCIE_KIRIN is not set -# CONFIG_PCIE_HISI_STB is not set -# CONFIG_PCIE_DW_PLAT_HOST is not set -# CONFIG_PCIE_QCOM is not set -# end of DesignWare-based PCIe controllers - -# -# Mobiveil-based PCIe controllers -# -# end of Mobiveil-based PCIe controllers -# end of PCI controller drivers - -# -# PCI Endpoint -# -# CONFIG_PCI_ENDPOINT is not set -# end of PCI Endpoint - -# -# PCI switch controller drivers -# -# CONFIG_PCI_SW_SWITCHTEC is not set -# end of PCI switch controller drivers - -CONFIG_CXL_BUS=m -CONFIG_CXL_PCI=m -# CONFIG_CXL_MEM_RAW_COMMANDS is not set -CONFIG_CXL_ACPI=m -CONFIG_CXL_PMEM=m -CONFIG_CXL_MEM=m -CONFIG_CXL_PORT=m -CONFIG_CXL_SUSPEND=y -CONFIG_CXL_REGION=y -# CONFIG_CXL_REGION_INVALIDATION_TEST is not set -CONFIG_CXL_PMU=m -CONFIG_PCCARD=y -# CONFIG_PCMCIA is not set -CONFIG_CARDBUS=y - -# -# PC-card bridges -# -CONFIG_YENTA=m -CONFIG_YENTA_O2=y -CONFIG_YENTA_RICOH=y -CONFIG_YENTA_TI=y -CONFIG_YENTA_ENE_TUNE=y -CONFIG_YENTA_TOSHIBA=y -# CONFIG_RAPIDIO is not set - -# -# Generic Driver Options -# -CONFIG_AUXILIARY_BUS=y -# CONFIG_UEVENT_HELPER is not set -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -# CONFIG_DEVTMPFS_SAFE is not set -CONFIG_STANDALONE=y -CONFIG_PREVENT_FIRMWARE_BUILD=y - -# -# Firmware loader -# -CONFIG_FW_LOADER=y -CONFIG_FW_LOADER_DEBUG=y -CONFIG_FW_LOADER_PAGED_BUF=y -CONFIG_FW_LOADER_SYSFS=y -CONFIG_EXTRA_FIRMWARE="" -# CONFIG_FW_LOADER_USER_HELPER is not set -# CONFIG_FW_LOADER_COMPRESS is not set -CONFIG_FW_CACHE=y -CONFIG_FW_UPLOAD=y -# end of Firmware loader - -CONFIG_ALLOW_DEV_COREDUMP=y -# CONFIG_DEBUG_DRIVER is not set -# CONFIG_DEBUG_DEVRES is not set -# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set -CONFIG_HMEM_REPORTING=y -# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set -CONFIG_GENERIC_CPU_AUTOPROBE=y -CONFIG_GENERIC_CPU_VULNERABILITIES=y -CONFIG_SOC_BUS=y -CONFIG_REGMAP=y -CONFIG_REGMAP_I2C=m -CONFIG_REGMAP_SPI=m -CONFIG_REGMAP_MMIO=y -CONFIG_DMA_SHARED_BUFFER=y -# CONFIG_DMA_FENCE_TRACE is not set -CONFIG_GENERIC_ARCH_TOPOLOGY=y -CONFIG_GENERIC_ARCH_NUMA=y -# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set -# end of Generic Driver Options - -# -# Bus devices -# -# CONFIG_BRCMSTB_GISB_ARB is not set -# CONFIG_MOXTET is not set -CONFIG_HISILICON_LPC=y -# CONFIG_QCOM_EBI2 is not set -# CONFIG_QCOM_SSC_BLOCK_BUS is not set -CONFIG_VEXPRESS_CONFIG=y -# CONFIG_MHI_BUS is not set -# CONFIG_MHI_BUS_EP is not set -# end of Bus devices - -# -# Cache Drivers -# -# end of Cache Drivers - -CONFIG_CONNECTOR=y -CONFIG_PROC_EVENTS=y - -# -# Firmware Drivers -# - -# -# ARM System Control and Management Interface Protocol -# -# CONFIG_ARM_SCMI_PROTOCOL is not set -# end of ARM System Control and Management Interface Protocol - -CONFIG_ARM_SCPI_PROTOCOL=m -CONFIG_ARM_SCPI_POWER_DOMAIN=m -CONFIG_ARM_SDE_INTERFACE=y -CONFIG_DMIID=y -CONFIG_DMI_SYSFS=y -# CONFIG_ISCSI_IBFT is not set -CONFIG_FW_CFG_SYSFS=y -# CONFIG_FW_CFG_SYSFS_CMDLINE is not set -CONFIG_QCOM_SCM=y -# CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT is not set -CONFIG_SYSFB=y -# CONFIG_SYSFB_SIMPLEFB is not set -# CONFIG_ARM_FFA_TRANSPORT is not set -# CONFIG_GOOGLE_FIRMWARE is not set - -# -# EFI (Extensible Firmware Interface) Support -# -CONFIG_EFI_ESRT=y -CONFIG_EFI_VARS_PSTORE=y -CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y -CONFIG_EFI_SOFT_RESERVE=y -CONFIG_EFI_PARAMS_FROM_FDT=y -CONFIG_EFI_RUNTIME_WRAPPERS=y -CONFIG_EFI_GENERIC_STUB=y -# CONFIG_EFI_ZBOOT is not set -CONFIG_EFI_ARMSTUB_DTB_LOADER=y -# CONFIG_EFI_BOOTLOADER_CONTROL is not set -# CONFIG_EFI_CAPSULE_LOADER is not set -# CONFIG_EFI_TEST is not set -# CONFIG_RESET_ATTACK_MITIGATION is not set -# CONFIG_EFI_DISABLE_PCI_DMA is not set -CONFIG_EFI_EARLYCON=y -CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y -# CONFIG_EFI_DISABLE_RUNTIME is not set -# CONFIG_EFI_COCO_SECRET is not set -# end of EFI (Extensible Firmware Interface) Support - -CONFIG_UEFI_CPER=y -CONFIG_UEFI_CPER_ARM=y -# CONFIG_YITIAN_CPER_RAWDATA is not set -CONFIG_ARM_PSCI_FW=y -# CONFIG_ARM_PSCI_CHECKER is not set -CONFIG_HAVE_ARM_SMCCC=y -CONFIG_HAVE_ARM_SMCCC_DISCOVERY=y -CONFIG_ARM_SMCCC_SOC_ID=y - -# -# Tegra firmware driver -# -# end of Tegra firmware driver -# end of Firmware Drivers - -# CONFIG_GNSS is not set -CONFIG_MTD=m -# CONFIG_MTD_TESTS is not set - -# -# Partition parsers -# -# CONFIG_MTD_AR7_PARTS is not set -# CONFIG_MTD_CMDLINE_PARTS is not set -CONFIG_MTD_OF_PARTS=m -# CONFIG_MTD_AFS_PARTS is not set -# CONFIG_MTD_REDBOOT_PARTS is not set -# end of Partition parsers - -# -# User Modules And Translation Layers -# -CONFIG_MTD_BLKDEVS=m -CONFIG_MTD_BLOCK=m -# CONFIG_MTD_BLOCK_RO is not set - -# -# Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK. -# -# CONFIG_FTL is not set -# CONFIG_NFTL is not set -# CONFIG_INFTL is not set -# CONFIG_RFD_FTL is not set -# CONFIG_SSFDC is not set -# CONFIG_SM_FTL is not set -# CONFIG_MTD_OOPS is not set -# CONFIG_MTD_SWAP is not set -# CONFIG_MTD_PARTITIONED_MASTER is not set - -# -# RAM/ROM/Flash chip drivers -# -CONFIG_MTD_CFI=m -# CONFIG_MTD_JEDECPROBE is not set -CONFIG_MTD_GEN_PROBE=m -# CONFIG_MTD_CFI_ADV_OPTIONS is not set -CONFIG_MTD_MAP_BANK_WIDTH_1=y -CONFIG_MTD_MAP_BANK_WIDTH_2=y -CONFIG_MTD_MAP_BANK_WIDTH_4=y -CONFIG_MTD_CFI_I1=y -CONFIG_MTD_CFI_I2=y -CONFIG_MTD_CFI_INTELEXT=m -CONFIG_MTD_CFI_AMDSTD=m -CONFIG_MTD_CFI_STAA=m -CONFIG_MTD_CFI_UTIL=m -# CONFIG_MTD_RAM is not set -# CONFIG_MTD_ROM is not set -# CONFIG_MTD_ABSENT is not set -# end of RAM/ROM/Flash chip drivers - -# -# Mapping drivers for chip access -# -# CONFIG_MTD_COMPLEX_MAPPINGS is not set -CONFIG_MTD_PHYSMAP=m -# CONFIG_MTD_PHYSMAP_COMPAT is not set -# CONFIG_MTD_PHYSMAP_OF is not set -# CONFIG_MTD_INTEL_VR_NOR is not set -# CONFIG_MTD_PLATRAM is not set -# end of Mapping drivers for chip access - -# -# Self-contained MTD device drivers -# -# CONFIG_MTD_PMC551 is not set -# CONFIG_MTD_DATAFLASH is not set -# CONFIG_MTD_MCHP23K256 is not set -# CONFIG_MTD_MCHP48L640 is not set -# CONFIG_MTD_SST25L is not set -# CONFIG_MTD_SLRAM is not set -# CONFIG_MTD_PHRAM is not set -# CONFIG_MTD_MTDRAM is not set -# CONFIG_MTD_BLOCK2MTD is not set - -# -# Disk-On-Chip Device Drivers -# -# CONFIG_MTD_DOCG3 is not set -# end of Self-contained MTD device drivers - -# -# NAND -# -# CONFIG_MTD_ONENAND is not set -# CONFIG_MTD_RAW_NAND is not set -# CONFIG_MTD_SPI_NAND is not set - -# -# ECC engine support -# -# CONFIG_MTD_NAND_ECC_SW_HAMMING is not set -# CONFIG_MTD_NAND_ECC_SW_BCH is not set -# CONFIG_MTD_NAND_ECC_MXIC is not set -# end of ECC engine support -# end of NAND - -# -# LPDDR & LPDDR2 PCM memory drivers -# -# CONFIG_MTD_LPDDR is not set -# end of LPDDR & LPDDR2 PCM memory drivers - -# CONFIG_MTD_SPI_NOR is not set -CONFIG_MTD_UBI=m -CONFIG_MTD_UBI_WL_THRESHOLD=4096 -CONFIG_MTD_UBI_BEB_LIMIT=20 -# CONFIG_MTD_UBI_FASTMAP is not set -# CONFIG_MTD_UBI_GLUEBI is not set -# CONFIG_MTD_UBI_BLOCK is not set -# CONFIG_MTD_HYPERBUS is not set -CONFIG_DTC=y -CONFIG_OF=y -# CONFIG_OF_UNITTEST is not set -CONFIG_OF_FLATTREE=y -CONFIG_OF_EARLY_FLATTREE=y -CONFIG_OF_KOBJ=y -CONFIG_OF_DYNAMIC=y -CONFIG_OF_ADDRESS=y -CONFIG_OF_IRQ=y -CONFIG_OF_RESERVED_MEM=y -CONFIG_OF_RESOLVE=y -CONFIG_OF_OVERLAY=y -CONFIG_OF_NUMA=y -# CONFIG_PARPORT is not set -CONFIG_PNP=y -CONFIG_PNP_DEBUG_MESSAGES=y - -# -# Protocols -# -CONFIG_PNPACPI=y -CONFIG_BLK_DEV=y -CONFIG_BLK_DEV_NULL_BLK=m -CONFIG_CDROM=m -# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set -CONFIG_ZRAM=m -CONFIG_ZRAM_DEF_COMP_LZORLE=y -# CONFIG_ZRAM_DEF_COMP_ZSTD is not set -# CONFIG_ZRAM_DEF_COMP_LZ4 is not set -# CONFIG_ZRAM_DEF_COMP_LZO is not set -# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set -CONFIG_ZRAM_DEF_COMP="lzo-rle" -CONFIG_ZRAM_WRITEBACK=y -# CONFIG_ZRAM_MEMORY_TRACKING is not set -# CONFIG_ZRAM_MULTI_COMP is not set -CONFIG_BLK_DEV_LOOP=m -CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 -# CONFIG_BLK_DEV_DRBD is not set -CONFIG_BLK_DEV_NBD=m -CONFIG_BLK_DEV_RAM=m -CONFIG_BLK_DEV_RAM_COUNT=16 -CONFIG_BLK_DEV_RAM_SIZE=16384 -CONFIG_CDROM_PKTCDVD=m -CONFIG_CDROM_PKTCDVD_BUFFERS=8 -# CONFIG_CDROM_PKTCDVD_WCACHE is not set -# CONFIG_ATA_OVER_ETH is not set -CONFIG_VIRTIO_BLK=m -CONFIG_BLK_DEV_RBD=m -CONFIG_BLK_DEV_UBLK=m -CONFIG_BLKDEV_UBLK_LEGACY_OPCODES=y - -# -# NVME Support -# -CONFIG_NVME_CORE=m -CONFIG_BLK_DEV_NVME=m -CONFIG_NVME_MULTIPATH=y -# CONFIG_NVME_VERBOSE_ERRORS is not set -# CONFIG_NVME_HWMON is not set -CONFIG_NVME_FABRICS=m -CONFIG_NVME_RDMA=m -CONFIG_NVME_FC=m -CONFIG_NVME_TCP=m -# CONFIG_NVME_AUTH is not set -CONFIG_NVME_TARGET=m -# CONFIG_NVME_TARGET_PASSTHRU is not set -CONFIG_NVME_TARGET_LOOP=m -CONFIG_NVME_TARGET_RDMA=m -CONFIG_NVME_TARGET_FC=m -CONFIG_NVME_TARGET_FCLOOP=m -CONFIG_NVME_TARGET_TCP=m -# CONFIG_NVME_TARGET_AUTH is not set -# end of NVME Support - -# -# Misc devices -# -# CONFIG_AD525X_DPOT is not set -# CONFIG_DUMMY_IRQ is not set -# CONFIG_PHANTOM is not set -CONFIG_TIFM_CORE=m -# CONFIG_TIFM_7XX1 is not set -# CONFIG_ICS932S401 is not set -CONFIG_ENCLOSURE_SERVICES=m -# CONFIG_HP_ILO is not set -# CONFIG_APDS9802ALS is not set -# CONFIG_ISL29003 is not set -# CONFIG_ISL29020 is not set -# CONFIG_SENSORS_TSL2550 is not set -# CONFIG_SENSORS_BH1770 is not set -# CONFIG_SENSORS_APDS990X is not set -# CONFIG_HMC6352 is not set -# CONFIG_DS1682 is not set -# CONFIG_LATTICE_ECP3_CONFIG is not set -# CONFIG_SRAM is not set -# CONFIG_DW_XDATA_PCIE is not set -# CONFIG_PCI_ENDPOINT_TEST is not set -# CONFIG_XILINX_SDFEC is not set -# CONFIG_HISI_HIKEY_USB is not set -# CONFIG_OPEN_DICE is not set -# CONFIG_VCPU_STALL_DETECTOR is not set -# CONFIG_C2PORT is not set - -# -# EEPROM support -# -# CONFIG_EEPROM_AT24 is not set -# CONFIG_EEPROM_AT25 is not set -CONFIG_EEPROM_LEGACY=m -CONFIG_EEPROM_MAX6875=m -CONFIG_EEPROM_93CX6=m -# CONFIG_EEPROM_93XX46 is not set -# CONFIG_EEPROM_IDT_89HPESX is not set -# CONFIG_EEPROM_EE1004 is not set -# end of EEPROM support - -CONFIG_CB710_CORE=m -# CONFIG_CB710_DEBUG is not set -CONFIG_CB710_DEBUG_ASSUMPTIONS=y - -# -# Texas Instruments shared transport line discipline -# -# CONFIG_TI_ST is not set -# end of Texas Instruments shared transport line discipline - -# CONFIG_SENSORS_LIS3_I2C is not set -# CONFIG_ALTERA_STAPL is not set -# CONFIG_VMWARE_VMCI is not set -# CONFIG_GENWQE is not set -# CONFIG_ECHO is not set -# CONFIG_BCM_VK is not set -# CONFIG_MISC_ALCOR_PCI is not set -# CONFIG_MISC_RTSX_PCI is not set -# CONFIG_MISC_RTSX_USB is not set -CONFIG_UACCE=m -CONFIG_PVPANIC=y -# CONFIG_PVPANIC_MMIO is not set -# CONFIG_PVPANIC_PCI is not set -# CONFIG_GP_PCI1XXXX is not set -# end of Misc devices - -# -# SCSI device support -# -CONFIG_SCSI_MOD=y -CONFIG_RAID_ATTRS=m -CONFIG_SCSI_COMMON=y -CONFIG_SCSI=y -CONFIG_SCSI_DMA=y -CONFIG_SCSI_NETLINK=y -CONFIG_SCSI_PROC_FS=y - -# -# SCSI support type (disk, tape, CD-ROM) -# -CONFIG_BLK_DEV_SD=m -CONFIG_CHR_DEV_ST=m -CONFIG_BLK_DEV_SR=m -CONFIG_CHR_DEV_SG=m -CONFIG_BLK_DEV_BSG=y -CONFIG_CHR_DEV_SCH=m -CONFIG_SCSI_ENCLOSURE=m -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_LOGGING=y -CONFIG_SCSI_SCAN_ASYNC=y - -# -# SCSI Transports -# -CONFIG_SCSI_SPI_ATTRS=m -CONFIG_SCSI_FC_ATTRS=m -CONFIG_SCSI_ISCSI_ATTRS=m -CONFIG_SCSI_SAS_ATTRS=m -CONFIG_SCSI_SAS_LIBSAS=m -CONFIG_SCSI_SAS_ATA=y -CONFIG_SCSI_SAS_HOST_SMP=y -CONFIG_SCSI_SRP_ATTRS=m -# end of SCSI Transports - -CONFIG_SCSI_LOWLEVEL=y -CONFIG_ISCSI_TCP=m -CONFIG_ISCSI_BOOT_SYSFS=m -# CONFIG_SCSI_CXGB3_ISCSI is not set -CONFIG_SCSI_CXGB4_ISCSI=m -# CONFIG_SCSI_BNX2_ISCSI is not set -# CONFIG_SCSI_BNX2X_FCOE is not set -CONFIG_BE2ISCSI=m -# CONFIG_BLK_DEV_3W_XXXX_RAID is not set -CONFIG_SCSI_HPSA=m -# CONFIG_SCSI_3W_9XXX is not set -# CONFIG_SCSI_3W_SAS is not set -# CONFIG_SCSI_ACARD is not set -# CONFIG_SCSI_AACRAID is not set -# CONFIG_SCSI_AIC7XXX is not set -# CONFIG_SCSI_AIC79XX is not set -# CONFIG_SCSI_AIC94XX is not set -CONFIG_SCSI_HISI_SAS=m -CONFIG_SCSI_HISI_SAS_PCI=m -# CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE is not set -# CONFIG_SCSI_MVSAS is not set -# CONFIG_SCSI_MVUMI is not set -# CONFIG_SCSI_ADVANSYS is not set -# CONFIG_SCSI_ARCMSR is not set -# CONFIG_SCSI_ESAS2R is not set -# CONFIG_MEGARAID_NEWGEN is not set -# CONFIG_MEGARAID_LEGACY is not set -CONFIG_MEGARAID_SAS=m -CONFIG_SCSI_MPT3SAS=m -CONFIG_SCSI_MPT2SAS_MAX_SGE=128 -CONFIG_SCSI_MPT3SAS_MAX_SGE=128 -CONFIG_SCSI_MPT2SAS=m -CONFIG_SCSI_MPI3MR=m -CONFIG_SCSI_SMARTPQI=m -# CONFIG_SCSI_HPTIOP is not set -# CONFIG_SCSI_BUSLOGIC is not set -# CONFIG_SCSI_MYRB is not set -# CONFIG_SCSI_MYRS is not set -CONFIG_LIBFC=m -CONFIG_LIBFCOE=m -CONFIG_FCOE=m -# CONFIG_SCSI_SNIC is not set -# CONFIG_SCSI_DMX3191D is not set -# CONFIG_SCSI_FDOMAIN_PCI is not set -# CONFIG_SCSI_IPS is not set -# CONFIG_SCSI_INITIO is not set -# CONFIG_SCSI_INIA100 is not set -# CONFIG_SCSI_STEX is not set -# CONFIG_SCSI_SYM53C8XX_2 is not set -CONFIG_SCSI_IPR=m -CONFIG_SCSI_IPR_TRACE=y -CONFIG_SCSI_IPR_DUMP=y -# CONFIG_SCSI_QLOGIC_1280 is not set -CONFIG_SCSI_QLA_FC=m -# CONFIG_TCM_QLA2XXX is not set -CONFIG_SCSI_QLA_ISCSI=m -CONFIG_QEDI=m -CONFIG_QEDF=m -CONFIG_SCSI_LPFC=m -# CONFIG_SCSI_LPFC_DEBUG_FS is not set -# CONFIG_SCSI_EFCT is not set -# CONFIG_SCSI_DC395x is not set -# CONFIG_SCSI_AM53C974 is not set -# CONFIG_SCSI_WD719X is not set -CONFIG_SCSI_DEBUG=m -# CONFIG_SCSI_PMCRAID is not set -# CONFIG_SCSI_PM8001 is not set -# CONFIG_SCSI_BFA_FC is not set -CONFIG_SCSI_VIRTIO=m -CONFIG_SCSI_CHELSIO_FCOE=m -CONFIG_SCSI_DH=y -CONFIG_SCSI_DH_RDAC=y -CONFIG_SCSI_DH_HP_SW=y -CONFIG_SCSI_DH_EMC=y -CONFIG_SCSI_DH_ALUA=y -# end of SCSI device support - -CONFIG_ATA=m -CONFIG_SATA_HOST=y -CONFIG_PATA_TIMINGS=y -CONFIG_ATA_VERBOSE_ERROR=y -CONFIG_ATA_FORCE=y -CONFIG_ATA_ACPI=y -# CONFIG_SATA_ZPODD is not set -CONFIG_SATA_PMP=y - -# -# Controllers with non-SFF native interface -# -CONFIG_SATA_AHCI=m -CONFIG_SATA_MOBILE_LPM_POLICY=0 -CONFIG_SATA_AHCI_PLATFORM=m -# CONFIG_AHCI_DWC is not set -# CONFIG_AHCI_CEVA is not set -CONFIG_AHCI_XGENE=m -CONFIG_SATA_AHCI_SEATTLE=m -# CONFIG_SATA_INIC162X is not set -# CONFIG_SATA_ACARD_AHCI is not set -# CONFIG_SATA_SIL24 is not set -CONFIG_ATA_SFF=y - -# -# SFF controllers with custom DMA interface -# -# CONFIG_PDC_ADMA is not set -# CONFIG_SATA_QSTOR is not set -# CONFIG_SATA_SX4 is not set -CONFIG_ATA_BMDMA=y - -# -# SATA SFF controllers with BMDMA -# -CONFIG_ATA_PIIX=m -# CONFIG_SATA_DWC is not set -# CONFIG_SATA_MV is not set -# CONFIG_SATA_NV is not set -# CONFIG_SATA_PROMISE is not set -# CONFIG_SATA_SIL is not set -# CONFIG_SATA_SIS is not set -# CONFIG_SATA_SVW is not set -# CONFIG_SATA_ULI is not set -# CONFIG_SATA_VIA is not set -# CONFIG_SATA_VITESSE is not set -# CONFIG_SATA_ZHAOXIN is not set - -# -# PATA SFF controllers with BMDMA -# -# CONFIG_PATA_ALI is not set -# CONFIG_PATA_AMD is not set -# CONFIG_PATA_ARTOP is not set -# CONFIG_PATA_ATIIXP is not set -# CONFIG_PATA_ATP867X is not set -# CONFIG_PATA_CMD64X is not set -# CONFIG_PATA_CYPRESS is not set -# CONFIG_PATA_EFAR is not set -# CONFIG_PATA_HPT366 is not set -# CONFIG_PATA_HPT37X is not set -# CONFIG_PATA_HPT3X2N is not set -# CONFIG_PATA_HPT3X3 is not set -# CONFIG_PATA_IT8213 is not set -# CONFIG_PATA_IT821X is not set -# CONFIG_PATA_JMICRON is not set -# CONFIG_PATA_MARVELL is not set -# CONFIG_PATA_NETCELL is not set -# CONFIG_PATA_NINJA32 is not set -# CONFIG_PATA_NS87415 is not set -# CONFIG_PATA_OLDPIIX is not set -# CONFIG_PATA_OPTIDMA is not set -# CONFIG_PATA_PDC2027X is not set -# CONFIG_PATA_PDC_OLD is not set -# CONFIG_PATA_RADISYS is not set -# CONFIG_PATA_RDC is not set -# CONFIG_PATA_SCH is not set -# CONFIG_PATA_SERVERWORKS is not set -# CONFIG_PATA_SIL680 is not set -# CONFIG_PATA_SIS is not set -# CONFIG_PATA_TOSHIBA is not set -# CONFIG_PATA_TRIFLEX is not set -# CONFIG_PATA_VIA is not set -# CONFIG_PATA_WINBOND is not set - -# -# PIO-only SFF controllers -# -# CONFIG_PATA_CMD640_PCI is not set -# CONFIG_PATA_MPIIX is not set -# CONFIG_PATA_NS87410 is not set -# CONFIG_PATA_OPTI is not set -# CONFIG_PATA_OF_PLATFORM is not set -# CONFIG_PATA_RZ1000 is not set - -# -# Generic fallback / legacy drivers -# -# CONFIG_PATA_ACPI is not set -CONFIG_ATA_GENERIC=m -# CONFIG_PATA_LEGACY is not set -CONFIG_MD=y -CONFIG_BLK_DEV_MD=y -CONFIG_MD_AUTODETECT=y -CONFIG_MD_BITMAP_FILE=y -CONFIG_MD_LINEAR=m -CONFIG_MD_RAID0=m -CONFIG_MD_RAID1=m -CONFIG_MD_RAID10=m -CONFIG_MD_RAID456=m -# CONFIG_MD_MULTIPATH is not set -CONFIG_MD_FAULTY=m -CONFIG_MD_CLUSTER=m -# CONFIG_BCACHE is not set -CONFIG_BLK_DEV_DM_BUILTIN=y -CONFIG_BLK_DEV_DM=m -CONFIG_DM_DEBUG=y -CONFIG_DM_BUFIO=m -# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set -CONFIG_DM_BIO_PRISON=m -CONFIG_DM_PERSISTENT_DATA=m -# CONFIG_DM_UNSTRIPED is not set -CONFIG_DM_CRYPT=m -CONFIG_DM_SNAPSHOT=m -CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m -CONFIG_DM_CACHE_SMQ=m -CONFIG_DM_WRITECACHE=m -# CONFIG_DM_EBS is not set -CONFIG_DM_ERA=m -# CONFIG_DM_CLONE is not set -CONFIG_DM_MIRROR=m -CONFIG_DM_LOG_USERSPACE=m -CONFIG_DM_RAID=m -CONFIG_DM_ZERO=m -CONFIG_DM_MULTIPATH=m -CONFIG_DM_MULTIPATH_QL=m -CONFIG_DM_MULTIPATH_ST=m -# CONFIG_DM_MULTIPATH_HST is not set -# CONFIG_DM_MULTIPATH_IOA is not set -CONFIG_DM_DELAY=m -# CONFIG_DM_DUST is not set -CONFIG_DM_UEVENT=y -CONFIG_DM_FLAKEY=m -CONFIG_DM_VERITY=m -# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set -# CONFIG_DM_VERITY_FEC is not set -CONFIG_DM_SWITCH=m -CONFIG_DM_LOG_WRITES=m -CONFIG_DM_INTEGRITY=m -# CONFIG_DM_ZONED is not set -CONFIG_DM_AUDIT=y -CONFIG_TARGET_CORE=m -CONFIG_TCM_IBLOCK=m -CONFIG_TCM_FILEIO=m -CONFIG_TCM_PSCSI=m -CONFIG_TCM_USER2=m -CONFIG_LOOPBACK_TARGET=m -# CONFIG_TCM_FC is not set -CONFIG_ISCSI_TARGET=m -CONFIG_ISCSI_TARGET_CXGB4=m -# CONFIG_REMOTE_TARGET is not set -CONFIG_FUSION=y -CONFIG_FUSION_SPI=m -# CONFIG_FUSION_FC is not set -CONFIG_FUSION_SAS=m -CONFIG_FUSION_MAX_SGE=128 -# CONFIG_FUSION_CTL is not set -CONFIG_FUSION_LOGGING=y - -# -# IEEE 1394 (FireWire) support -# -# CONFIG_FIREWIRE is not set -# CONFIG_FIREWIRE_NOSY is not set -# end of IEEE 1394 (FireWire) support - -CONFIG_NETDEVICES=y -CONFIG_MII=m -CONFIG_NET_CORE=y -CONFIG_BONDING=m -CONFIG_DUMMY=m -CONFIG_WIREGUARD=m -# CONFIG_WIREGUARD_DEBUG is not set -# CONFIG_EQUALIZER is not set -CONFIG_NET_FC=y -CONFIG_IFB=m -CONFIG_NET_TEAM=m -CONFIG_NET_TEAM_MODE_BROADCAST=m -CONFIG_NET_TEAM_MODE_ROUNDROBIN=m -CONFIG_NET_TEAM_MODE_RANDOM=m -CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m -CONFIG_NET_TEAM_MODE_LOADBALANCE=m -CONFIG_MACVLAN=m -CONFIG_MACVTAP=m -CONFIG_IPVLAN_L3S=y -CONFIG_IPVLAN=m -CONFIG_IPVTAP=m -CONFIG_VXLAN=m -CONFIG_GENEVE=m -# CONFIG_BAREUDP is not set -# CONFIG_GTP is not set -# CONFIG_AMT is not set -CONFIG_MACSEC=m -CONFIG_NETCONSOLE=m -CONFIG_NETCONSOLE_DYNAMIC=y -# CONFIG_NETCONSOLE_EXTENDED_LOG is not set -CONFIG_NETPOLL=y -CONFIG_NET_POLL_CONTROLLER=y -CONFIG_TUN=m -CONFIG_TAP=m -# CONFIG_TUN_VNET_CROSS_LE is not set -CONFIG_VETH=m -CONFIG_VIRTIO_NET=m -CONFIG_NLMON=m -CONFIG_NET_VRF=m -CONFIG_VSOCKMON=m -# CONFIG_ARCNET is not set -# CONFIG_ATM_DRIVERS is not set -CONFIG_ETHERNET=y -CONFIG_MDIO=m -# CONFIG_NET_VENDOR_3COM is not set -# CONFIG_NET_VENDOR_ADAPTEC is not set -# CONFIG_NET_VENDOR_AGERE is not set -# CONFIG_NET_VENDOR_ALACRITECH is not set -# CONFIG_NET_VENDOR_ALTEON is not set -# CONFIG_ALTERA_TSE is not set -CONFIG_NET_VENDOR_AMAZON=y -CONFIG_ENA_ETHERNET=m -CONFIG_NET_VENDOR_AMD=y -# CONFIG_AMD8111_ETH is not set -# CONFIG_PCNET32 is not set -CONFIG_AMD_XGBE=m -# CONFIG_AMD_XGBE_DCB is not set -# CONFIG_PDS_CORE is not set -CONFIG_NET_XGENE=m -CONFIG_NET_XGENE_V2=m -CONFIG_NET_VENDOR_AQUANTIA=y -# CONFIG_AQTION is not set -# CONFIG_NET_VENDOR_ARC is not set -CONFIG_NET_VENDOR_ASIX=y -# CONFIG_SPI_AX88796C is not set -CONFIG_NET_VENDOR_ATHEROS=y -# CONFIG_ATL2 is not set -CONFIG_ATL1=m -CONFIG_ATL1E=m -CONFIG_ATL1C=m -CONFIG_ALX=m -CONFIG_NET_VENDOR_BROADCOM=y -# CONFIG_B44 is not set -# CONFIG_BCMGENET is not set -CONFIG_BNX2=m -# CONFIG_CNIC is not set -CONFIG_TIGON3=m -CONFIG_TIGON3_HWMON=y -CONFIG_BNX2X=m -CONFIG_BNX2X_SRIOV=y -# CONFIG_SYSTEMPORT is not set -CONFIG_BNXT=m -CONFIG_BNXT_SRIOV=y -CONFIG_BNXT_FLOWER_OFFLOAD=y -CONFIG_BNXT_DCB=y -CONFIG_BNXT_HWMON=y -# CONFIG_NET_VENDOR_CADENCE is not set -CONFIG_NET_VENDOR_CAVIUM=y -CONFIG_THUNDER_NIC_PF=m -CONFIG_THUNDER_NIC_VF=m -CONFIG_THUNDER_NIC_BGX=m -CONFIG_THUNDER_NIC_RGX=m -CONFIG_CAVIUM_PTP=y -CONFIG_LIQUIDIO_CORE=m -CONFIG_LIQUIDIO=m -CONFIG_LIQUIDIO_VF=m -CONFIG_NET_VENDOR_CHELSIO=y -# CONFIG_CHELSIO_T1 is not set -# CONFIG_CHELSIO_T3 is not set -CONFIG_CHELSIO_T4=m -# CONFIG_CHELSIO_T4_DCB is not set -CONFIG_CHELSIO_T4VF=m -CONFIG_CHELSIO_LIB=m -CONFIG_CHELSIO_INLINE_CRYPTO=y -CONFIG_CHELSIO_IPSEC_INLINE=m -# CONFIG_CHELSIO_TLS_DEVICE is not set -# CONFIG_NET_VENDOR_CISCO is not set -# CONFIG_NET_VENDOR_CORTINA is not set -CONFIG_NET_VENDOR_DAVICOM=y -# CONFIG_DM9051 is not set -CONFIG_DNET=m -# CONFIG_NET_VENDOR_DEC is not set -# CONFIG_NET_VENDOR_DLINK is not set -# CONFIG_NET_VENDOR_EMULEX is not set -CONFIG_NET_VENDOR_ENGLEDER=y -# CONFIG_TSNEP is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -CONFIG_NET_VENDOR_FUNGIBLE=y -# CONFIG_FUN_ETH is not set -CONFIG_NET_VENDOR_GOOGLE=y -CONFIG_GVE=m -CONFIG_NET_VENDOR_HISILICON=y -# CONFIG_HIX5HD2_GMAC is not set -# CONFIG_HISI_FEMAC is not set -# CONFIG_HIP04_ETH is not set -CONFIG_HNS_MDIO=m -CONFIG_HNS=m -CONFIG_HNS_DSAF=m -CONFIG_HNS_ENET=m -CONFIG_HNS3=m -CONFIG_HNS3_HCLGE=m -CONFIG_HNS3_DCB=y -CONFIG_HNS3_HCLGEVF=m -CONFIG_HNS3_ENET=m -CONFIG_NET_VENDOR_HUAWEI=y -CONFIG_HINIC=m -# CONFIG_NET_VENDOR_I825XX is not set -CONFIG_NET_VENDOR_INTEL=y -# CONFIG_E100 is not set -CONFIG_E1000=m -CONFIG_E1000E=m -CONFIG_IGB=m -CONFIG_IGB_HWMON=y -CONFIG_IGBVF=m -CONFIG_IXGBE=m -CONFIG_IXGBE_HWMON=y -CONFIG_IXGBE_DCB=y -CONFIG_IXGBE_IPSEC=y -CONFIG_IXGBEVF=m -CONFIG_IXGBEVF_IPSEC=y -CONFIG_I40E=m -# CONFIG_I40E_DCB is not set -CONFIG_IAVF=m -CONFIG_I40EVF=m -CONFIG_ICE=m -CONFIG_ICE_SWITCHDEV=y -CONFIG_FM10K=m -CONFIG_IGC=m -# CONFIG_JME is not set -CONFIG_NET_VENDOR_ADI=y -# CONFIG_ADIN1110 is not set -CONFIG_NET_VENDOR_LITEX=y -# CONFIG_LITEX_LITEETH is not set -# CONFIG_NET_VENDOR_MARVELL is not set -CONFIG_NET_VENDOR_MELLANOX=y -CONFIG_MLX4_EN=m -CONFIG_MLX4_EN_DCB=y -CONFIG_MLX4_CORE=m -CONFIG_MLX4_DEBUG=y -# CONFIG_MLX4_CORE_GEN2 is not set -CONFIG_MLX5_CORE=m -CONFIG_MLX5_FPGA=y -CONFIG_MLX5_CORE_EN=y -CONFIG_MLX5_EN_ARFS=y -CONFIG_MLX5_EN_RXNFC=y -CONFIG_MLX5_MPFS=y -CONFIG_MLX5_ESWITCH=y -CONFIG_MLX5_BRIDGE=y -CONFIG_MLX5_CLS_ACT=y -CONFIG_MLX5_TC_CT=y -CONFIG_MLX5_TC_SAMPLE=y -CONFIG_MLX5_CORE_EN_DCB=y -CONFIG_MLX5_CORE_IPOIB=y -# CONFIG_MLX5_MACSEC is not set -# CONFIG_MLX5_EN_IPSEC is not set -# CONFIG_MLX5_EN_TLS is not set -CONFIG_MLX5_SW_STEERING=y -# CONFIG_MLX5_SF is not set -CONFIG_MLXSW_CORE=m -CONFIG_MLXSW_CORE_HWMON=y -CONFIG_MLXSW_CORE_THERMAL=y -CONFIG_MLXSW_PCI=m -CONFIG_MLXSW_I2C=m -CONFIG_MLXSW_SPECTRUM=m -CONFIG_MLXSW_SPECTRUM_DCB=y -CONFIG_MLXSW_MINIMAL=m -CONFIG_MLXFW=m -# CONFIG_MLXBF_GIGE is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROCHIP is not set -# CONFIG_NET_VENDOR_MICROSEMI is not set -CONFIG_NET_VENDOR_MICROSOFT=y -CONFIG_NET_VENDOR_MYRI=y -# CONFIG_MYRI10GE is not set -# CONFIG_FEALNX is not set -# CONFIG_NET_VENDOR_NI is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_NETERION is not set -CONFIG_NET_VENDOR_NETRONOME=y -CONFIG_NFP=m -CONFIG_NFP_APP_FLOWER=y -CONFIG_NFP_APP_ABM_NIC=y -CONFIG_NFP_NET_IPSEC=y -CONFIG_NFP_DEBUG=y -# CONFIG_NET_VENDOR_NVIDIA is not set -CONFIG_NET_VENDOR_OKI=y -CONFIG_ETHOC=m -# CONFIG_NET_VENDOR_PACKET_ENGINES is not set -CONFIG_NET_VENDOR_PENSANDO=y -# CONFIG_IONIC is not set -CONFIG_NET_VENDOR_QLOGIC=y -CONFIG_QLA3XXX=m -# CONFIG_QLCNIC is not set -CONFIG_NETXEN_NIC=m -CONFIG_QED=m -CONFIG_QED_LL2=y -CONFIG_QED_SRIOV=y -CONFIG_QEDE=m -CONFIG_QED_RDMA=y -CONFIG_QED_ISCSI=y -CONFIG_QED_FCOE=y -CONFIG_QED_OOO=y -# CONFIG_NET_VENDOR_BROCADE is not set -CONFIG_NET_VENDOR_QUALCOMM=y -# CONFIG_QCA7000_SPI is not set -CONFIG_QCOM_EMAC=m -# CONFIG_RMNET is not set -# CONFIG_NET_VENDOR_RDC is not set -CONFIG_NET_VENDOR_REALTEK=y -CONFIG_8139CP=m -CONFIG_8139TOO=m -# CONFIG_8139TOO_PIO is not set -# CONFIG_8139TOO_TUNE_TWISTER is not set -CONFIG_8139TOO_8129=y -# CONFIG_8139_OLD_RX_RESET is not set -CONFIG_R8169=m -# CONFIG_NET_VENDOR_RENESAS is not set -CONFIG_NET_VENDOR_ROCKER=y -CONFIG_ROCKER=m -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SILAN is not set -# CONFIG_NET_VENDOR_SIS is not set -CONFIG_NET_VENDOR_SOLARFLARE=y -# CONFIG_SFC is not set -# CONFIG_SFC_FALCON is not set -# CONFIG_SFC_SIENA is not set -# CONFIG_NET_VENDOR_SMSC is not set -# CONFIG_NET_VENDOR_SOCIONEXT is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SUN is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_TEHUTI is not set -# CONFIG_NET_VENDOR_TI is not set -CONFIG_NET_VENDOR_VERTEXCOM=y -# CONFIG_MSE102X is not set -# CONFIG_NET_VENDOR_VIA is not set -CONFIG_NET_VENDOR_WANGXUN=y -CONFIG_LIBWX=m -CONFIG_NGBE=m -CONFIG_TXGBE=m -# CONFIG_NET_VENDOR_WIZNET is not set -CONFIG_NET_VENDOR_XILINX=y -# CONFIG_XILINX_EMACLITE is not set -# CONFIG_XILINX_AXI_EMAC is not set -# CONFIG_XILINX_LL_TEMAC is not set -# CONFIG_FDDI is not set -# CONFIG_HIPPI is not set -# CONFIG_NET_SB1000 is not set -CONFIG_PHYLINK=m -CONFIG_PHYLIB=y -CONFIG_SWPHY=y -CONFIG_LED_TRIGGER_PHY=y -CONFIG_PHYLIB_LEDS=y -CONFIG_FIXED_PHY=y -CONFIG_SFP=m - -# -# MII PHY device drivers -# -CONFIG_AMD_PHY=m -# CONFIG_ADIN_PHY is not set -# CONFIG_ADIN1100_PHY is not set -CONFIG_AQUANTIA_PHY=m -CONFIG_AX88796B_PHY=m -CONFIG_BROADCOM_PHY=m -# CONFIG_BCM54140_PHY is not set -CONFIG_BCM7XXX_PHY=m -# CONFIG_BCM84881_PHY is not set -CONFIG_BCM87XX_PHY=m -CONFIG_BCM_NET_PHYLIB=m -CONFIG_BCM_NET_PHYPTP=m -CONFIG_CICADA_PHY=m -CONFIG_CORTINA_PHY=m -CONFIG_DAVICOM_PHY=m -CONFIG_ICPLUS_PHY=m -CONFIG_LXT_PHY=m -CONFIG_INTEL_XWAY_PHY=m -CONFIG_LSI_ET1011C_PHY=m -CONFIG_MARVELL_PHY=m -CONFIG_MARVELL_10G_PHY=m -# CONFIG_MARVELL_88Q2XXX_PHY is not set -# CONFIG_MARVELL_88X2222_PHY is not set -# CONFIG_MAXLINEAR_GPHY is not set -# CONFIG_MEDIATEK_GE_PHY is not set -CONFIG_MICREL_PHY=m -# CONFIG_MICROCHIP_T1S_PHY is not set -CONFIG_MICROCHIP_PHY=m -CONFIG_MICROCHIP_T1_PHY=m -CONFIG_MICROSEMI_PHY=m -# CONFIG_MOTORCOMM_PHY is not set -CONFIG_NATIONAL_PHY=m -# CONFIG_NXP_CBTX_PHY is not set -# CONFIG_NXP_C45_TJA11XX_PHY is not set -# CONFIG_NXP_TJA11XX_PHY is not set -# CONFIG_NCN26000_PHY is not set -CONFIG_AT803X_PHY=m -CONFIG_QSEMI_PHY=m -CONFIG_REALTEK_PHY=m -CONFIG_RENESAS_PHY=m -CONFIG_ROCKCHIP_PHY=m -CONFIG_SMSC_PHY=m -CONFIG_STE10XP=m -CONFIG_TERANETICS_PHY=m -CONFIG_DP83822_PHY=m -CONFIG_DP83TC811_PHY=m -CONFIG_DP83848_PHY=m -CONFIG_DP83867_PHY=m -# CONFIG_DP83869_PHY is not set -# CONFIG_DP83TD510_PHY is not set -CONFIG_VITESSE_PHY=m -CONFIG_XILINX_GMII2RGMII=m -CONFIG_MICREL_KS8995MA=m -# CONFIG_PSE_CONTROLLER is not set -CONFIG_MDIO_DEVICE=y -CONFIG_MDIO_BUS=y -CONFIG_FWNODE_MDIO=y -CONFIG_OF_MDIO=y -CONFIG_ACPI_MDIO=y -CONFIG_MDIO_DEVRES=y -CONFIG_MDIO_XGENE=m -CONFIG_MDIO_BITBANG=m -CONFIG_MDIO_BCM_UNIMAC=m -CONFIG_MDIO_CAVIUM=m -CONFIG_MDIO_GPIO=m -CONFIG_MDIO_HISI_FEMAC=m -CONFIG_MDIO_I2C=m -# CONFIG_MDIO_MVUSB is not set -CONFIG_MDIO_MSCC_MIIM=m -CONFIG_MDIO_OCTEON=m -# CONFIG_MDIO_IPQ4019 is not set -# CONFIG_MDIO_IPQ8064 is not set -CONFIG_MDIO_THUNDER=m - -# -# MDIO Multiplexers -# -# CONFIG_MDIO_BUS_MUX_GPIO is not set -# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set -# CONFIG_MDIO_BUS_MUX_MMIOREG is not set - -# -# PCS device drivers -# -CONFIG_PCS_XPCS=m -# end of PCS device drivers - -CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m -CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=m -CONFIG_PPP_MULTILINK=y -CONFIG_PPPOATM=m -CONFIG_PPPOE=m -# CONFIG_PPPOE_HASH_BITS_1 is not set -# CONFIG_PPPOE_HASH_BITS_2 is not set -CONFIG_PPPOE_HASH_BITS_4=y -# CONFIG_PPPOE_HASH_BITS_8 is not set -CONFIG_PPPOE_HASH_BITS=4 -CONFIG_PPTP=m -CONFIG_PPPOL2TP=m -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m -CONFIG_SLIP=m -CONFIG_SLHC=m -CONFIG_SLIP_COMPRESSED=y -CONFIG_SLIP_SMART=y -# CONFIG_SLIP_MODE_SLIP6 is not set -CONFIG_USB_NET_DRIVERS=y -CONFIG_USB_CATC=m -CONFIG_USB_KAWETH=m -CONFIG_USB_PEGASUS=m -CONFIG_USB_RTL8150=m -CONFIG_USB_RTL8152=m -CONFIG_USB_LAN78XX=m -CONFIG_USB_USBNET=m -CONFIG_USB_NET_AX8817X=m -CONFIG_USB_NET_AX88179_178A=m -CONFIG_USB_NET_CDCETHER=m -CONFIG_USB_NET_CDC_EEM=m -CONFIG_USB_NET_CDC_NCM=m -CONFIG_USB_NET_HUAWEI_CDC_NCM=m -CONFIG_USB_NET_CDC_MBIM=m -CONFIG_USB_NET_DM9601=m -CONFIG_USB_NET_SR9700=m -# CONFIG_USB_NET_SR9800 is not set -CONFIG_USB_NET_SMSC75XX=m -CONFIG_USB_NET_SMSC95XX=m -CONFIG_USB_NET_GL620A=m -CONFIG_USB_NET_NET1080=m -CONFIG_USB_NET_PLUSB=m -CONFIG_USB_NET_MCS7830=m -CONFIG_USB_NET_RNDIS_HOST=m -CONFIG_USB_NET_CDC_SUBSET_ENABLE=m -CONFIG_USB_NET_CDC_SUBSET=m -CONFIG_USB_ALI_M5632=y -CONFIG_USB_AN2720=y -CONFIG_USB_BELKIN=y -CONFIG_USB_ARMLINUX=y -CONFIG_USB_EPSON2888=y -CONFIG_USB_KC2190=y -CONFIG_USB_NET_ZAURUS=m -CONFIG_USB_NET_CX82310_ETH=m -CONFIG_USB_NET_KALMIA=m -CONFIG_USB_NET_QMI_WWAN=m -CONFIG_USB_HSO=m -CONFIG_USB_NET_INT51X1=m -CONFIG_USB_IPHETH=m -CONFIG_USB_SIERRA_NET=m -CONFIG_USB_VL600=m -CONFIG_USB_NET_CH9200=m -# CONFIG_USB_NET_AQC111 is not set -CONFIG_USB_RTL8153_ECM=m -# CONFIG_WLAN is not set -CONFIG_WAN=y -CONFIG_HDLC=m -CONFIG_HDLC_RAW=m -# CONFIG_HDLC_RAW_ETH is not set -CONFIG_HDLC_CISCO=m -CONFIG_HDLC_FR=m -CONFIG_HDLC_PPP=m - -# -# X.25/LAPB support is disabled -# -# CONFIG_PCI200SYN is not set -# CONFIG_WANXL is not set -# CONFIG_PC300TOO is not set -# CONFIG_FARSYNC is not set -CONFIG_IEEE802154_DRIVERS=m -# CONFIG_IEEE802154_FAKELB is not set -# CONFIG_IEEE802154_AT86RF230 is not set -# CONFIG_IEEE802154_MRF24J40 is not set -# CONFIG_IEEE802154_CC2520 is not set -# CONFIG_IEEE802154_ATUSB is not set -# CONFIG_IEEE802154_ADF7242 is not set -# CONFIG_IEEE802154_CA8210 is not set -# CONFIG_IEEE802154_MCR20A is not set -# CONFIG_IEEE802154_HWSIM is not set - -# -# Wireless WAN -# -# CONFIG_WWAN is not set -# end of Wireless WAN - -# CONFIG_VMXNET3 is not set -# CONFIG_FUJITSU_ES is not set -CONFIG_NETDEVSIM=m -CONFIG_NET_FAILOVER=m -# CONFIG_ISDN is not set - -# -# Input device support -# -CONFIG_INPUT=y -CONFIG_INPUT_LEDS=y -CONFIG_INPUT_FF_MEMLESS=m -CONFIG_INPUT_SPARSEKMAP=m -# CONFIG_INPUT_MATRIXKMAP is not set - -# -# Userland interfaces -# -CONFIG_INPUT_MOUSEDEV=y -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set -CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 -CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 -# CONFIG_INPUT_JOYDEV is not set -CONFIG_INPUT_EVDEV=y -# CONFIG_INPUT_EVBUG is not set - -# -# Input Device Drivers -# -CONFIG_INPUT_KEYBOARD=y -# CONFIG_KEYBOARD_ADP5588 is not set -# CONFIG_KEYBOARD_ADP5589 is not set -# CONFIG_KEYBOARD_ATKBD is not set -# CONFIG_KEYBOARD_QT1050 is not set -# CONFIG_KEYBOARD_QT1070 is not set -# CONFIG_KEYBOARD_QT2160 is not set -# CONFIG_KEYBOARD_DLINK_DIR685 is not set -# CONFIG_KEYBOARD_LKKBD is not set -CONFIG_KEYBOARD_GPIO=m -# CONFIG_KEYBOARD_GPIO_POLLED is not set -# CONFIG_KEYBOARD_TCA6416 is not set -# CONFIG_KEYBOARD_TCA8418 is not set -# CONFIG_KEYBOARD_MATRIX is not set -# CONFIG_KEYBOARD_LM8323 is not set -# CONFIG_KEYBOARD_LM8333 is not set -# CONFIG_KEYBOARD_MAX7359 is not set -# CONFIG_KEYBOARD_MCS is not set -# CONFIG_KEYBOARD_MPR121 is not set -# CONFIG_KEYBOARD_NEWTON is not set -# CONFIG_KEYBOARD_OPENCORES is not set -# CONFIG_KEYBOARD_PINEPHONE is not set -# CONFIG_KEYBOARD_SAMSUNG is not set -# CONFIG_KEYBOARD_STOWAWAY is not set -# CONFIG_KEYBOARD_SUNKBD is not set -# CONFIG_KEYBOARD_OMAP4 is not set -# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set -# CONFIG_KEYBOARD_XTKBD is not set -# CONFIG_KEYBOARD_CAP11XX is not set -# CONFIG_KEYBOARD_BCM is not set -# CONFIG_KEYBOARD_CYPRESS_SF is not set -CONFIG_INPUT_MOUSE=y -# CONFIG_MOUSE_PS2 is not set -# CONFIG_MOUSE_SERIAL is not set -# CONFIG_MOUSE_APPLETOUCH is not set -# CONFIG_MOUSE_BCM5974 is not set -# CONFIG_MOUSE_CYAPA is not set -CONFIG_MOUSE_ELAN_I2C=m -CONFIG_MOUSE_ELAN_I2C_I2C=y -CONFIG_MOUSE_ELAN_I2C_SMBUS=y -# CONFIG_MOUSE_VSXXXAA is not set -# CONFIG_MOUSE_GPIO is not set -CONFIG_MOUSE_SYNAPTICS_I2C=m -CONFIG_MOUSE_SYNAPTICS_USB=m -# CONFIG_INPUT_JOYSTICK is not set -# CONFIG_INPUT_TABLET is not set -# CONFIG_INPUT_TOUCHSCREEN is not set -# CONFIG_INPUT_MISC is not set -CONFIG_RMI4_CORE=m -CONFIG_RMI4_I2C=m -CONFIG_RMI4_SPI=m -CONFIG_RMI4_SMB=m -CONFIG_RMI4_F03=y -CONFIG_RMI4_F03_SERIO=m -CONFIG_RMI4_2D_SENSOR=y -CONFIG_RMI4_F11=y -CONFIG_RMI4_F12=y -CONFIG_RMI4_F30=y -CONFIG_RMI4_F34=y -# CONFIG_RMI4_F3A is not set -CONFIG_RMI4_F55=y - -# -# Hardware I/O ports -# -CONFIG_SERIO=y -CONFIG_SERIO_SERPORT=y -CONFIG_SERIO_AMBAKMI=y -# CONFIG_SERIO_PCIPS2 is not set -CONFIG_SERIO_LIBPS2=y -CONFIG_SERIO_RAW=m -CONFIG_SERIO_ALTERA_PS2=m -# CONFIG_SERIO_PS2MULT is not set -CONFIG_SERIO_ARC_PS2=m -# CONFIG_SERIO_APBPS2 is not set -# CONFIG_SERIO_GPIO_PS2 is not set -# CONFIG_USERIO is not set -# CONFIG_GAMEPORT is not set -# end of Hardware I/O ports -# end of Input device support - -# -# Character devices -# -CONFIG_TTY=y -CONFIG_VT=y -CONFIG_CONSOLE_TRANSLATIONS=y -CONFIG_VT_CONSOLE=y -CONFIG_VT_CONSOLE_SLEEP=y -CONFIG_HW_CONSOLE=y -CONFIG_VT_HW_CONSOLE_BINDING=y -CONFIG_UNIX98_PTYS=y -# CONFIG_LEGACY_PTYS is not set -CONFIG_LEGACY_TIOCSTI=y -CONFIG_LDISC_AUTOLOAD=y - -# -# Serial drivers -# -CONFIG_SERIAL_EARLYCON=y -CONFIG_SERIAL_8250=y -# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set -CONFIG_SERIAL_8250_PNP=y -CONFIG_SERIAL_8250_16550A_VARIANTS=y -# CONFIG_SERIAL_8250_FINTEK is not set -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_DMA=y -CONFIG_SERIAL_8250_PCILIB=y -CONFIG_SERIAL_8250_PCI=y -CONFIG_SERIAL_8250_EXAR=y -CONFIG_SERIAL_8250_NR_UARTS=32 -CONFIG_SERIAL_8250_RUNTIME_UARTS=4 -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_MANY_PORTS=y -# CONFIG_SERIAL_8250_PCI1XXXX is not set -CONFIG_SERIAL_8250_SHARE_IRQ=y -# CONFIG_SERIAL_8250_DETECT_IRQ is not set -CONFIG_SERIAL_8250_RSA=y -CONFIG_SERIAL_8250_DWLIB=y -CONFIG_SERIAL_8250_FSL=y -CONFIG_SERIAL_8250_DW=y -CONFIG_SERIAL_8250_RT288X=y -CONFIG_SERIAL_8250_PERICOM=y -CONFIG_SERIAL_OF_PLATFORM=y - -# -# Non-8250 serial port support -# -# CONFIG_SERIAL_AMBA_PL010 is not set -CONFIG_SERIAL_AMBA_PL011=y -CONFIG_SERIAL_AMBA_PL011_CONSOLE=y -# CONFIG_SERIAL_EARLYCON_SEMIHOST is not set -# CONFIG_SERIAL_KGDB_NMI is not set -# CONFIG_SERIAL_MAX3100 is not set -# CONFIG_SERIAL_MAX310X is not set -# CONFIG_SERIAL_UARTLITE is not set -CONFIG_SERIAL_CORE=y -CONFIG_SERIAL_CORE_CONSOLE=y -CONFIG_CONSOLE_POLL=y -# CONFIG_SERIAL_JSM is not set -# CONFIG_SERIAL_MSM is not set -# CONFIG_SERIAL_SIFIVE is not set -# CONFIG_SERIAL_SCCNXP is not set -# CONFIG_SERIAL_SC16IS7XX is not set -# CONFIG_SERIAL_ALTERA_JTAGUART is not set -# CONFIG_SERIAL_ALTERA_UART is not set -# CONFIG_SERIAL_XILINX_PS_UART is not set -# CONFIG_SERIAL_ARC is not set -# CONFIG_SERIAL_RP2 is not set -# CONFIG_SERIAL_FSL_LPUART is not set -# CONFIG_SERIAL_FSL_LINFLEXUART is not set -# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set -# CONFIG_SERIAL_SPRD is not set -# end of Serial drivers - -CONFIG_SERIAL_MCTRL_GPIO=y -CONFIG_SERIAL_NONSTANDARD=y -# CONFIG_MOXA_INTELLIO is not set -# CONFIG_MOXA_SMARTIO is not set -CONFIG_N_HDLC=m -CONFIG_N_GSM=m -# CONFIG_NOZOMI is not set -# CONFIG_NULL_TTY is not set -CONFIG_HVC_DRIVER=y -# CONFIG_HVC_DCC is not set -# CONFIG_SERIAL_DEV_BUS is not set -CONFIG_VIRTIO_CONSOLE=m -CONFIG_IPMI_HANDLER=m -CONFIG_IPMI_DMI_DECODE=y -CONFIG_IPMI_PLAT_DATA=y -CONFIG_IPMI_PANIC_EVENT=y -CONFIG_IPMI_PANIC_STRING=y -CONFIG_IPMI_DEVICE_INTERFACE=m -CONFIG_IPMI_SI=m -CONFIG_IPMI_SSIF=m -# CONFIG_IPMI_IPMB is not set -CONFIG_IPMI_WATCHDOG=m -CONFIG_IPMI_POWEROFF=m -# CONFIG_SSIF_IPMI_BMC is not set -# CONFIG_IPMB_DEVICE_INTERFACE is not set -CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_TIMERIOMEM=m -# CONFIG_HW_RANDOM_BA431 is not set -CONFIG_HW_RANDOM_VIRTIO=m -CONFIG_HW_RANDOM_HISI=y -CONFIG_HW_RANDOM_HISTB=y -CONFIG_HW_RANDOM_XGENE=m -CONFIG_HW_RANDOM_CAVIUM=m -# CONFIG_HW_RANDOM_CCTRNG is not set -# CONFIG_HW_RANDOM_XIPHERA is not set -CONFIG_HW_RANDOM_ARM_SMCCC_TRNG=y -CONFIG_HW_RANDOM_CN10K=y -# CONFIG_APPLICOM is not set -CONFIG_DEVMEM=y -# CONFIG_DEVPORT is not set -CONFIG_TCG_TPM=y -CONFIG_HW_RANDOM_TPM=y -CONFIG_TCG_TIS_CORE=y -CONFIG_TCG_TIS=y -CONFIG_TCG_TIS_SPI=m -# CONFIG_TCG_TIS_SPI_CR50 is not set -# CONFIG_TCG_TIS_I2C is not set -# CONFIG_TCG_TIS_I2C_CR50 is not set -# CONFIG_TCG_TIS_I2C_ATMEL is not set -# CONFIG_TCG_TIS_I2C_INFINEON is not set -# CONFIG_TCG_TIS_I2C_NUVOTON is not set -CONFIG_TCG_ATMEL=m -# CONFIG_TCG_INFINEON is not set -CONFIG_TCG_CRB=y -# CONFIG_TCG_VTPM_PROXY is not set -# CONFIG_TCG_TIS_ST33ZP24_I2C is not set -# CONFIG_TCG_TIS_ST33ZP24_SPI is not set -# CONFIG_XILLYBUS is not set -# CONFIG_XILLYUSB is not set -# end of Character devices - -# -# I2C support -# -CONFIG_I2C=y -CONFIG_ACPI_I2C_OPREGION=y -CONFIG_I2C_BOARDINFO=y -CONFIG_I2C_COMPAT=y -CONFIG_I2C_CHARDEV=m -CONFIG_I2C_MUX=m - -# -# Multiplexer I2C Chip support -# -CONFIG_I2C_ARB_GPIO_CHALLENGE=m -CONFIG_I2C_MUX_GPIO=m -# CONFIG_I2C_MUX_GPMUX is not set -# CONFIG_I2C_MUX_LTC4306 is not set -CONFIG_I2C_MUX_PCA9541=m -CONFIG_I2C_MUX_PCA954x=m -CONFIG_I2C_MUX_PINCTRL=m -# CONFIG_I2C_MUX_REG is not set -# CONFIG_I2C_DEMUX_PINCTRL is not set -CONFIG_I2C_MUX_MLXCPLD=m -# end of Multiplexer I2C Chip support - -# CONFIG_I2C_HELPER_AUTO is not set -CONFIG_I2C_SMBUS=m - -# -# I2C Algorithms -# -CONFIG_I2C_ALGOBIT=m -CONFIG_I2C_ALGOPCF=m -CONFIG_I2C_ALGOPCA=m -# end of I2C Algorithms - -# -# I2C Hardware Bus support -# - -# -# PC SMBus host controller drivers -# -# CONFIG_I2C_ALI1535 is not set -# CONFIG_I2C_ALI1563 is not set -# CONFIG_I2C_ALI15X3 is not set -# CONFIG_I2C_AMD756 is not set -# CONFIG_I2C_AMD8111 is not set -# CONFIG_I2C_AMD_MP2 is not set -# CONFIG_I2C_HIX5HD2 is not set -# CONFIG_I2C_I801 is not set -# CONFIG_I2C_ISCH is not set -# CONFIG_I2C_PIIX4 is not set -CONFIG_I2C_NFORCE2=m -# CONFIG_I2C_NVIDIA_GPU is not set -# CONFIG_I2C_SIS5595 is not set -# CONFIG_I2C_SIS630 is not set -# CONFIG_I2C_SIS96X is not set -# CONFIG_I2C_VIA is not set -# CONFIG_I2C_VIAPRO is not set -# CONFIG_I2C_ZHAOXIN is not set - -# -# ACPI drivers -# -# CONFIG_I2C_SCMI is not set -# CONFIG_I2C_ZHAOXIN_SMBUS is not set - -# -# I2C system bus drivers (mostly embedded / system-on-chip) -# -# CONFIG_I2C_CADENCE is not set -# CONFIG_I2C_CBUS_GPIO is not set -CONFIG_I2C_DESIGNWARE_CORE=m -# CONFIG_I2C_DESIGNWARE_SLAVE is not set -CONFIG_I2C_DESIGNWARE_PLATFORM=m -# CONFIG_I2C_DESIGNWARE_PCI is not set -# CONFIG_I2C_EMEV2 is not set -CONFIG_I2C_GPIO=m -CONFIG_I2C_GPIO_FAULT_INJECTOR=y -CONFIG_I2C_HISI=m -# CONFIG_I2C_NOMADIK is not set -# CONFIG_I2C_OCORES is not set -CONFIG_I2C_PCA_PLATFORM=m -# CONFIG_I2C_QCOM_CCI is not set -CONFIG_I2C_QUP=m -# CONFIG_I2C_RK3X is not set -CONFIG_I2C_SIMTEC=m -CONFIG_I2C_VERSATILE=m -CONFIG_I2C_THUNDERX=m -# CONFIG_I2C_XILINX is not set -CONFIG_I2C_XLP9XX=m - -# -# External I2C/SMBus adapter drivers -# -CONFIG_I2C_DIOLAN_U2C=m -# CONFIG_I2C_CP2615 is not set -# CONFIG_I2C_PCI1XXXX is not set -# CONFIG_I2C_ROBOTFUZZ_OSIF is not set -# CONFIG_I2C_TAOS_EVM is not set -CONFIG_I2C_TINY_USB=m - -# -# Other I2C/SMBus bus drivers -# -# CONFIG_I2C_MLXCPLD is not set -CONFIG_I2C_XGENE_SLIMPRO=m -# CONFIG_I2C_VIRTIO is not set -# end of I2C Hardware Bus support - -CONFIG_I2C_STUB=m -CONFIG_I2C_SLAVE=y -CONFIG_I2C_SLAVE_EEPROM=m -# CONFIG_I2C_SLAVE_TESTUNIT is not set -# CONFIG_I2C_DEBUG_CORE is not set -# CONFIG_I2C_DEBUG_ALGO is not set -# CONFIG_I2C_DEBUG_BUS is not set -# end of I2C support - -# CONFIG_I3C is not set -CONFIG_SPI=y -CONFIG_SPI_DEBUG=y -CONFIG_SPI_MASTER=y -# CONFIG_SPI_MEM is not set - -# -# SPI Master Controller Drivers -# -# CONFIG_SPI_ALTERA is not set -# CONFIG_SPI_AXI_SPI_ENGINE is not set -# CONFIG_SPI_BITBANG is not set -CONFIG_SPI_CADENCE=m -# CONFIG_SPI_CADENCE_QUADSPI is not set -CONFIG_SPI_DESIGNWARE=m -# CONFIG_SPI_DW_DMA is not set -# CONFIG_SPI_DW_PCI is not set -CONFIG_SPI_DW_MMIO=m -CONFIG_SPI_HISI_KUNPENG=m -CONFIG_SPI_HISI_SFC_V3XX=m -# CONFIG_SPI_GPIO is not set -# CONFIG_SPI_FSL_SPI is not set -# CONFIG_SPI_MICROCHIP_CORE is not set -# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set -# CONFIG_SPI_OC_TINY is not set -# CONFIG_SPI_PCI1XXXX is not set -CONFIG_SPI_PL022=m -# CONFIG_SPI_PXA2XX is not set -# CONFIG_SPI_QCOM_QSPI is not set -CONFIG_SPI_QUP=y -# CONFIG_SPI_SC18IS602 is not set -# CONFIG_SPI_SIFIVE is not set -# CONFIG_SPI_MXIC is not set -# CONFIG_SPI_THUNDERX is not set -# CONFIG_SPI_XCOMM is not set -# CONFIG_SPI_XILINX is not set -CONFIG_SPI_XLP=m -# CONFIG_SPI_AMD is not set - -# -# SPI Multiplexer support -# -# CONFIG_SPI_MUX is not set - -# -# SPI Protocol Masters -# -# CONFIG_SPI_SPIDEV is not set -# CONFIG_SPI_LOOPBACK_TEST is not set -# CONFIG_SPI_TLE62X0 is not set -# CONFIG_SPI_SLAVE is not set -CONFIG_SPI_DYNAMIC=y -# CONFIG_SPMI is not set -# CONFIG_HSI is not set -CONFIG_PPS=y -# CONFIG_PPS_DEBUG is not set - -# -# PPS clients support -# -# CONFIG_PPS_CLIENT_KTIMER is not set -CONFIG_PPS_CLIENT_LDISC=m -CONFIG_PPS_CLIENT_GPIO=m - -# -# PPS generators support -# - -# -# PTP clock support -# -CONFIG_PTP_1588_CLOCK=y -CONFIG_PTP_1588_CLOCK_OPTIONAL=y -CONFIG_DP83640_PHY=m -# CONFIG_PTP_1588_CLOCK_INES is not set -CONFIG_PTP_1588_CLOCK_KVM=y -# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set -# CONFIG_PTP_1588_CLOCK_IDTCM is not set -# CONFIG_PTP_1588_CLOCK_MOCK is not set -# CONFIG_PTP_1588_CLOCK_OCP is not set -# end of PTP clock support - -CONFIG_PINCTRL=y -CONFIG_PINMUX=y -CONFIG_PINCONF=y -CONFIG_GENERIC_PINCONF=y -# CONFIG_DEBUG_PINCTRL is not set -# CONFIG_PINCTRL_AMD is not set -# CONFIG_PINCTRL_CY8C95X0 is not set -# CONFIG_PINCTRL_MCP23S08 is not set -# CONFIG_PINCTRL_MICROCHIP_SGPIO is not set -# CONFIG_PINCTRL_OCELOT is not set -# CONFIG_PINCTRL_SINGLE is not set -# CONFIG_PINCTRL_STMFX is not set -# CONFIG_PINCTRL_SX150X is not set -CONFIG_PINCTRL_MSM=y -# CONFIG_PINCTRL_IPQ5018 is not set -# CONFIG_PINCTRL_IPQ5332 is not set -# CONFIG_PINCTRL_IPQ8074 is not set -# CONFIG_PINCTRL_IPQ6018 is not set -# CONFIG_PINCTRL_IPQ9574 is not set -# CONFIG_PINCTRL_MDM9607 is not set -# CONFIG_PINCTRL_MSM8916 is not set -# CONFIG_PINCTRL_MSM8953 is not set -# CONFIG_PINCTRL_MSM8976 is not set -# CONFIG_PINCTRL_MSM8994 is not set -# CONFIG_PINCTRL_MSM8996 is not set -# CONFIG_PINCTRL_MSM8998 is not set -# CONFIG_PINCTRL_QCM2290 is not set -# CONFIG_PINCTRL_QCS404 is not set -CONFIG_PINCTRL_QDF2XXX=y -# CONFIG_PINCTRL_QDU1000 is not set -# CONFIG_PINCTRL_SA8775P is not set -# CONFIG_PINCTRL_SC7180 is not set -# CONFIG_PINCTRL_SC7280 is not set -# CONFIG_PINCTRL_SC8180X is not set -# CONFIG_PINCTRL_SC8280XP is not set -# CONFIG_PINCTRL_SDM660 is not set -# CONFIG_PINCTRL_SDM670 is not set -# CONFIG_PINCTRL_SDM845 is not set -# CONFIG_PINCTRL_SDX75 is not set -# CONFIG_PINCTRL_SM6115 is not set -# CONFIG_PINCTRL_SM6125 is not set -# CONFIG_PINCTRL_SM6350 is not set -# CONFIG_PINCTRL_SM6375 is not set -# CONFIG_PINCTRL_SM7150 is not set -# CONFIG_PINCTRL_SM8150 is not set -# CONFIG_PINCTRL_SM8250 is not set -# CONFIG_PINCTRL_SM8350 is not set -# CONFIG_PINCTRL_SM8450 is not set -# CONFIG_PINCTRL_SM8550 is not set -# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set -# CONFIG_PINCTRL_LPASS_LPI is not set - -# -# Renesas pinctrl drivers -# -# end of Renesas pinctrl drivers - -CONFIG_GPIOLIB=y -CONFIG_GPIOLIB_FASTPATH_LIMIT=512 -CONFIG_OF_GPIO=y -CONFIG_GPIO_ACPI=y -CONFIG_GPIOLIB_IRQCHIP=y -# CONFIG_DEBUG_GPIO is not set -CONFIG_GPIO_CDEV=y -CONFIG_GPIO_CDEV_V1=y -CONFIG_GPIO_GENERIC=m - -# -# Memory mapped GPIO drivers -# -# CONFIG_GPIO_74XX_MMIO is not set -# CONFIG_GPIO_ALTERA is not set -CONFIG_GPIO_AMDPT=m -# CONFIG_GPIO_CADENCE is not set -CONFIG_GPIO_DWAPB=m -# CONFIG_GPIO_EXAR is not set -# CONFIG_GPIO_FTGPIO010 is not set -CONFIG_GPIO_GENERIC_PLATFORM=m -# CONFIG_GPIO_GRGPIO is not set -CONFIG_GPIO_HISI=m -# CONFIG_GPIO_HLWD is not set -# CONFIG_GPIO_LOGICVC is not set -# CONFIG_GPIO_MB86S7X is not set -CONFIG_GPIO_PL061=y -# CONFIG_GPIO_SIFIVE is not set -# CONFIG_GPIO_SYSCON is not set -# CONFIG_GPIO_THUNDERX is not set -CONFIG_GPIO_XGENE=y -CONFIG_GPIO_XGENE_SB=m -# CONFIG_GPIO_XILINX is not set -CONFIG_GPIO_XLP=m -# CONFIG_GPIO_AMD_FCH is not set -# end of Memory mapped GPIO drivers - -# -# I2C GPIO expanders -# -# CONFIG_GPIO_ADNP is not set -# CONFIG_GPIO_FXL6408 is not set -# CONFIG_GPIO_DS4520 is not set -# CONFIG_GPIO_GW_PLD is not set -# CONFIG_GPIO_MAX7300 is not set -# CONFIG_GPIO_MAX732X is not set -# CONFIG_GPIO_PCA953X is not set -# CONFIG_GPIO_PCA9570 is not set -# CONFIG_GPIO_PCF857X is not set -# CONFIG_GPIO_TPIC2810 is not set -# end of I2C GPIO expanders - -# -# MFD GPIO expanders -# -# end of MFD GPIO expanders - -# -# PCI GPIO expanders -# -# CONFIG_GPIO_BT8XX is not set -# CONFIG_GPIO_PCI_IDIO_16 is not set -# CONFIG_GPIO_PCIE_IDIO_24 is not set -# CONFIG_GPIO_RDC321X is not set -# end of PCI GPIO expanders - -# -# SPI GPIO expanders -# -# CONFIG_GPIO_74X164 is not set -# CONFIG_GPIO_MAX3191X is not set -# CONFIG_GPIO_MAX7301 is not set -# CONFIG_GPIO_MC33880 is not set -# CONFIG_GPIO_PISOSR is not set -# CONFIG_GPIO_XRA1403 is not set -# end of SPI GPIO expanders - -# -# USB GPIO expanders -# -# end of USB GPIO expanders - -# -# Virtual GPIO drivers -# -# CONFIG_GPIO_AGGREGATOR is not set -# CONFIG_GPIO_LATCH is not set -# CONFIG_GPIO_MOCKUP is not set -# CONFIG_GPIO_VIRTIO is not set -# CONFIG_GPIO_SIM is not set -# end of Virtual GPIO drivers - -# CONFIG_W1 is not set -CONFIG_POWER_RESET=y -# CONFIG_POWER_RESET_BRCMSTB is not set -CONFIG_POWER_RESET_GPIO=y -CONFIG_POWER_RESET_GPIO_RESTART=y -CONFIG_POWER_RESET_HISI=y -# CONFIG_POWER_RESET_MSM is not set -# CONFIG_POWER_RESET_LTC2952 is not set -# CONFIG_POWER_RESET_REGULATOR is not set -CONFIG_POWER_RESET_RESTART=y -# CONFIG_POWER_RESET_VEXPRESS is not set -# CONFIG_POWER_RESET_XGENE is not set -CONFIG_POWER_RESET_SYSCON=y -# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set -# CONFIG_SYSCON_REBOOT_MODE is not set -# CONFIG_NVMEM_REBOOT_MODE is not set -CONFIG_POWER_SUPPLY=y -# CONFIG_POWER_SUPPLY_DEBUG is not set -CONFIG_POWER_SUPPLY_HWMON=y -# CONFIG_IP5XXX_POWER is not set -# CONFIG_TEST_POWER is not set -# CONFIG_CHARGER_ADP5061 is not set -# CONFIG_BATTERY_CW2015 is not set -# CONFIG_BATTERY_DS2780 is not set -# CONFIG_BATTERY_DS2781 is not set -# CONFIG_BATTERY_DS2782 is not set -# CONFIG_BATTERY_SAMSUNG_SDI is not set -# CONFIG_BATTERY_SBS is not set -# CONFIG_CHARGER_SBS is not set -# CONFIG_MANAGER_SBS is not set -# CONFIG_BATTERY_BQ27XXX is not set -# CONFIG_BATTERY_MAX17040 is not set -# CONFIG_BATTERY_MAX17042 is not set -# CONFIG_CHARGER_MAX8903 is not set -# CONFIG_CHARGER_LP8727 is not set -# CONFIG_CHARGER_GPIO is not set -# CONFIG_CHARGER_MANAGER is not set -# CONFIG_CHARGER_LT3651 is not set -# CONFIG_CHARGER_LTC4162L is not set -# CONFIG_CHARGER_DETECTOR_MAX14656 is not set -# CONFIG_CHARGER_MAX77976 is not set -# CONFIG_CHARGER_BQ2415X is not set -# CONFIG_CHARGER_BQ24190 is not set -# CONFIG_CHARGER_BQ24257 is not set -# CONFIG_CHARGER_BQ24735 is not set -# CONFIG_CHARGER_BQ2515X is not set -# CONFIG_CHARGER_BQ25890 is not set -# CONFIG_CHARGER_BQ25980 is not set -# CONFIG_CHARGER_BQ256XX is not set -CONFIG_CHARGER_SMB347=m -# CONFIG_BATTERY_GAUGE_LTC2941 is not set -# CONFIG_BATTERY_GOLDFISH is not set -# CONFIG_BATTERY_RT5033 is not set -# CONFIG_CHARGER_RT9455 is not set -# CONFIG_CHARGER_RT9467 is not set -# CONFIG_CHARGER_RT9471 is not set -# CONFIG_CHARGER_UCS1002 is not set -# CONFIG_CHARGER_BD99954 is not set -# CONFIG_BATTERY_UG3105 is not set -CONFIG_HWMON=y -# CONFIG_HWMON_DEBUG_CHIP is not set - -# -# Native drivers -# -CONFIG_SENSORS_AD7314=m -# CONFIG_SENSORS_AD7414 is not set -# CONFIG_SENSORS_AD7418 is not set -# CONFIG_SENSORS_ADM1021 is not set -# CONFIG_SENSORS_ADM1025 is not set -# CONFIG_SENSORS_ADM1026 is not set -# CONFIG_SENSORS_ADM1029 is not set -# CONFIG_SENSORS_ADM1031 is not set -# CONFIG_SENSORS_ADM1177 is not set -# CONFIG_SENSORS_ADM9240 is not set -# CONFIG_SENSORS_ADT7310 is not set -# CONFIG_SENSORS_ADT7410 is not set -# CONFIG_SENSORS_ADT7411 is not set -# CONFIG_SENSORS_ADT7462 is not set -# CONFIG_SENSORS_ADT7470 is not set -# CONFIG_SENSORS_ADT7475 is not set -# CONFIG_SENSORS_AHT10 is not set -# CONFIG_SENSORS_AQUACOMPUTER_D5NEXT is not set -# CONFIG_SENSORS_AS370 is not set -# CONFIG_SENSORS_ASC7621 is not set -# CONFIG_SENSORS_AXI_FAN_CONTROL is not set -CONFIG_SENSORS_ARM_SCPI=m -# CONFIG_SENSORS_ATXP1 is not set -# CONFIG_SENSORS_CORSAIR_CPRO is not set -# CONFIG_SENSORS_CORSAIR_PSU is not set -# CONFIG_SENSORS_DRIVETEMP is not set -# CONFIG_SENSORS_DS620 is not set -# CONFIG_SENSORS_DS1621 is not set -# CONFIG_SENSORS_I5K_AMB is not set -# CONFIG_SENSORS_F71805F is not set -# CONFIG_SENSORS_F71882FG is not set -# CONFIG_SENSORS_F75375S is not set -# CONFIG_SENSORS_FTSTEUTATES is not set -# CONFIG_SENSORS_GL518SM is not set -# CONFIG_SENSORS_GL520SM is not set -# CONFIG_SENSORS_G760A is not set -CONFIG_SENSORS_G762=m -# CONFIG_SENSORS_GPIO_FAN is not set -# CONFIG_SENSORS_HIH6130 is not set -# CONFIG_SENSORS_HS3001 is not set -# CONFIG_SENSORS_IBMAEM is not set -# CONFIG_SENSORS_IBMPEX is not set -# CONFIG_SENSORS_IT87 is not set -# CONFIG_SENSORS_JC42 is not set -CONFIG_SENSORS_POWR1220=m -# CONFIG_SENSORS_LINEAGE is not set -CONFIG_SENSORS_LTC2945=m -# CONFIG_SENSORS_LTC2947_I2C is not set -# CONFIG_SENSORS_LTC2947_SPI is not set -# CONFIG_SENSORS_LTC2990 is not set -# CONFIG_SENSORS_LTC2992 is not set -# CONFIG_SENSORS_LTC4151 is not set -# CONFIG_SENSORS_LTC4215 is not set -CONFIG_SENSORS_LTC4222=m -# CONFIG_SENSORS_LTC4245 is not set -CONFIG_SENSORS_LTC4260=m -# CONFIG_SENSORS_LTC4261 is not set -CONFIG_SENSORS_MAX1111=m -# CONFIG_SENSORS_MAX127 is not set -# CONFIG_SENSORS_MAX16065 is not set -# CONFIG_SENSORS_MAX1619 is not set -# CONFIG_SENSORS_MAX1668 is not set -# CONFIG_SENSORS_MAX197 is not set -# CONFIG_SENSORS_MAX31722 is not set -# CONFIG_SENSORS_MAX31730 is not set -# CONFIG_SENSORS_MAX31760 is not set -# CONFIG_MAX31827 is not set -# CONFIG_SENSORS_MAX6620 is not set -# CONFIG_SENSORS_MAX6621 is not set -# CONFIG_SENSORS_MAX6639 is not set -# CONFIG_SENSORS_MAX6642 is not set -# CONFIG_SENSORS_MAX6650 is not set -# CONFIG_SENSORS_MAX6697 is not set -CONFIG_SENSORS_MAX31790=m -# CONFIG_SENSORS_MC34VR500 is not set -# CONFIG_SENSORS_MCP3021 is not set -# CONFIG_SENSORS_TC654 is not set -# CONFIG_SENSORS_TPS23861 is not set -# CONFIG_SENSORS_MR75203 is not set -CONFIG_SENSORS_ADCXX=m -# CONFIG_SENSORS_LM63 is not set -CONFIG_SENSORS_LM70=m -# CONFIG_SENSORS_LM73 is not set -# CONFIG_SENSORS_LM75 is not set -# CONFIG_SENSORS_LM77 is not set -# CONFIG_SENSORS_LM78 is not set -# CONFIG_SENSORS_LM80 is not set -# CONFIG_SENSORS_LM83 is not set -# CONFIG_SENSORS_LM85 is not set -# CONFIG_SENSORS_LM87 is not set -# CONFIG_SENSORS_LM90 is not set -# CONFIG_SENSORS_LM92 is not set -# CONFIG_SENSORS_LM93 is not set -# CONFIG_SENSORS_LM95234 is not set -# CONFIG_SENSORS_LM95241 is not set -# CONFIG_SENSORS_LM95245 is not set -# CONFIG_SENSORS_PC87360 is not set -# CONFIG_SENSORS_PC87427 is not set -CONFIG_SENSORS_NCT6683=m -# CONFIG_SENSORS_NCT6775 is not set -# CONFIG_SENSORS_NCT6775_I2C is not set -CONFIG_SENSORS_NCT7802=m -CONFIG_SENSORS_NCT7904=m -# CONFIG_SENSORS_NPCM7XX is not set -# CONFIG_SENSORS_NZXT_KRAKEN2 is not set -# CONFIG_SENSORS_NZXT_SMART2 is not set -# CONFIG_SENSORS_OCC_P8_I2C is not set -# CONFIG_SENSORS_PCF8591 is not set -CONFIG_PMBUS=m -# CONFIG_SENSORS_PMBUS is not set -# CONFIG_SENSORS_ACBEL_FSG032 is not set -# CONFIG_SENSORS_ADM1266 is not set -# CONFIG_SENSORS_ADM1275 is not set -# CONFIG_SENSORS_BEL_PFE is not set -# CONFIG_SENSORS_BPA_RS600 is not set -# CONFIG_SENSORS_DELTA_AHE50DC_FAN is not set -# CONFIG_SENSORS_FSP_3Y is not set -# CONFIG_SENSORS_IBM_CFFPS is not set -# CONFIG_SENSORS_DPS920AB is not set -# CONFIG_SENSORS_INSPUR_IPSPS is not set -# CONFIG_SENSORS_IR35221 is not set -# CONFIG_SENSORS_IR36021 is not set -# CONFIG_SENSORS_IR38064 is not set -# CONFIG_SENSORS_IRPS5401 is not set -# CONFIG_SENSORS_ISL68137 is not set -# CONFIG_SENSORS_LM25066 is not set -# CONFIG_SENSORS_LT7182S is not set -# CONFIG_SENSORS_LTC2978 is not set -CONFIG_SENSORS_LTC3815=m -# CONFIG_SENSORS_MAX15301 is not set -# CONFIG_SENSORS_MAX16064 is not set -# CONFIG_SENSORS_MAX16601 is not set -# CONFIG_SENSORS_MAX20730 is not set -CONFIG_SENSORS_MAX20751=m -# CONFIG_SENSORS_MAX31785 is not set -# CONFIG_SENSORS_MAX34440 is not set -# CONFIG_SENSORS_MAX8688 is not set -# CONFIG_SENSORS_MP2888 is not set -# CONFIG_SENSORS_MP2975 is not set -# CONFIG_SENSORS_MP5023 is not set -# CONFIG_SENSORS_MPQ7932 is not set -# CONFIG_SENSORS_PIM4328 is not set -# CONFIG_SENSORS_PLI1209BC is not set -# CONFIG_SENSORS_PM6764TR is not set -# CONFIG_SENSORS_PXE1610 is not set -# CONFIG_SENSORS_Q54SJ108A2 is not set -# CONFIG_SENSORS_STPDDC60 is not set -# CONFIG_SENSORS_TDA38640 is not set -CONFIG_SENSORS_TPS40422=m -# CONFIG_SENSORS_TPS53679 is not set -# CONFIG_SENSORS_TPS546D24 is not set -# CONFIG_SENSORS_UCD9000 is not set -# CONFIG_SENSORS_UCD9200 is not set -# CONFIG_SENSORS_XDPE152 is not set -# CONFIG_SENSORS_XDPE122 is not set -# CONFIG_SENSORS_ZL6100 is not set -CONFIG_SENSORS_PWM_FAN=m -# CONFIG_SENSORS_SBTSI is not set -# CONFIG_SENSORS_SBRMI is not set -# CONFIG_SENSORS_SHT15 is not set -# CONFIG_SENSORS_SHT21 is not set -# CONFIG_SENSORS_SHT3x is not set -# CONFIG_SENSORS_SHT4x is not set -CONFIG_SENSORS_SHTC1=m -# CONFIG_SENSORS_SIS5595 is not set -# CONFIG_SENSORS_DME1737 is not set -# CONFIG_SENSORS_EMC1403 is not set -# CONFIG_SENSORS_EMC2103 is not set -# CONFIG_SENSORS_EMC2305 is not set -# CONFIG_SENSORS_EMC6W201 is not set -# CONFIG_SENSORS_SMSC47M1 is not set -# CONFIG_SENSORS_SMSC47M192 is not set -# CONFIG_SENSORS_SMSC47B397 is not set -# CONFIG_SENSORS_SCH5627 is not set -# CONFIG_SENSORS_SCH5636 is not set -# CONFIG_SENSORS_STTS751 is not set -CONFIG_SENSORS_ADC128D818=m -# CONFIG_SENSORS_ADS7828 is not set -CONFIG_SENSORS_ADS7871=m -# CONFIG_SENSORS_AMC6821 is not set -# CONFIG_SENSORS_INA209 is not set -# CONFIG_SENSORS_INA2XX is not set -# CONFIG_SENSORS_INA238 is not set -# CONFIG_SENSORS_INA3221 is not set -CONFIG_SENSORS_TC74=m -# CONFIG_SENSORS_THMC50 is not set -# CONFIG_SENSORS_TMP102 is not set -CONFIG_SENSORS_TMP103=m -# CONFIG_SENSORS_TMP108 is not set -# CONFIG_SENSORS_TMP401 is not set -# CONFIG_SENSORS_TMP421 is not set -# CONFIG_SENSORS_TMP464 is not set -# CONFIG_SENSORS_TMP513 is not set -CONFIG_SENSORS_VEXPRESS=m -# CONFIG_SENSORS_VIA686A is not set -# CONFIG_SENSORS_VT1211 is not set -# CONFIG_SENSORS_VT8231 is not set -# CONFIG_SENSORS_W83773G is not set -# CONFIG_SENSORS_W83781D is not set -# CONFIG_SENSORS_W83791D is not set -# CONFIG_SENSORS_W83792D is not set -# CONFIG_SENSORS_W83793 is not set -# CONFIG_SENSORS_W83795 is not set -# CONFIG_SENSORS_W83L785TS is not set -# CONFIG_SENSORS_W83L786NG is not set -# CONFIG_SENSORS_W83627HF is not set -# CONFIG_SENSORS_W83627EHF is not set -CONFIG_SENSORS_XGENE=m - -# -# ACPI drivers -# -CONFIG_SENSORS_ACPI_POWER=y -CONFIG_THERMAL=y -# CONFIG_THERMAL_NETLINK is not set -# CONFIG_THERMAL_STATISTICS is not set -CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 -CONFIG_THERMAL_HWMON=y -CONFIG_THERMAL_OF=y -# CONFIG_THERMAL_WRITABLE_TRIPS is not set -CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y -# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set -# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set -CONFIG_THERMAL_GOV_FAIR_SHARE=y -CONFIG_THERMAL_GOV_STEP_WISE=y -# CONFIG_THERMAL_GOV_BANG_BANG is not set -CONFIG_THERMAL_GOV_USER_SPACE=y -CONFIG_CPU_THERMAL=y -CONFIG_CPU_FREQ_THERMAL=y -# CONFIG_THERMAL_EMULATION is not set -# CONFIG_THERMAL_MMIO is not set -CONFIG_HISI_THERMAL=m - -# -# Qualcomm thermal drivers -# -# CONFIG_QCOM_LMH is not set -# end of Qualcomm thermal drivers - -CONFIG_WATCHDOG=y -CONFIG_WATCHDOG_CORE=y -# CONFIG_WATCHDOG_NOWAYOUT is not set -CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y -CONFIG_WATCHDOG_OPEN_TIMEOUT=0 -CONFIG_WATCHDOG_SYSFS=y -# CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT is not set - -# -# Watchdog Pretimeout Governors -# -# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set - -# -# Watchdog Device Drivers -# -CONFIG_SOFT_WATCHDOG=m -CONFIG_GPIO_WATCHDOG=m -# CONFIG_WDAT_WDT is not set -# CONFIG_XILINX_WATCHDOG is not set -# CONFIG_XILINX_WINDOW_WATCHDOG is not set -# CONFIG_ZIIRAVE_WATCHDOG is not set -CONFIG_ARM_SP805_WATCHDOG=m -CONFIG_ARM_SBSA_WATCHDOG=m -# CONFIG_CADENCE_WATCHDOG is not set -# CONFIG_DW_WATCHDOG is not set -# CONFIG_MAX63XX_WATCHDOG is not set -# CONFIG_QCOM_WDT is not set -# CONFIG_ARM_SMC_WATCHDOG is not set -CONFIG_ALIM7101_WDT=m -CONFIG_I6300ESB_WDT=m -# CONFIG_HP_WATCHDOG is not set -CONFIG_MARVELL_GTI_WDT=y -# CONFIG_MEN_A21_WDT is not set - -# -# PCI-based Watchdog Cards -# -CONFIG_PCIPCWATCHDOG=m -CONFIG_WDTPCI=m - -# -# USB-based Watchdog Cards -# -CONFIG_USBPCWATCHDOG=m -CONFIG_SSB_POSSIBLE=y -# CONFIG_SSB is not set -CONFIG_BCMA_POSSIBLE=y -CONFIG_BCMA=m -CONFIG_BCMA_HOST_PCI_POSSIBLE=y -CONFIG_BCMA_HOST_PCI=y -# CONFIG_BCMA_HOST_SOC is not set -CONFIG_BCMA_DRIVER_PCI=y -CONFIG_BCMA_DRIVER_GMAC_CMN=y -CONFIG_BCMA_DRIVER_GPIO=y -# CONFIG_BCMA_DEBUG is not set - -# -# Multifunction device drivers -# -CONFIG_MFD_CORE=m -# CONFIG_MFD_ACT8945A is not set -# CONFIG_MFD_AS3711 is not set -# CONFIG_MFD_SMPRO is not set -# CONFIG_MFD_AS3722 is not set -# CONFIG_PMIC_ADP5520 is not set -# CONFIG_MFD_AAT2870_CORE is not set -# CONFIG_MFD_ATMEL_FLEXCOM is not set -# CONFIG_MFD_ATMEL_HLCDC is not set -# CONFIG_MFD_BCM590XX is not set -# CONFIG_MFD_BD9571MWV is not set -# CONFIG_MFD_AXP20X_I2C is not set -# CONFIG_MFD_CS42L43_I2C is not set -# CONFIG_MFD_MADERA is not set -# CONFIG_MFD_MAX5970 is not set -# CONFIG_PMIC_DA903X is not set -# CONFIG_MFD_DA9052_SPI is not set -# CONFIG_MFD_DA9052_I2C is not set -# CONFIG_MFD_DA9055 is not set -# CONFIG_MFD_DA9062 is not set -# CONFIG_MFD_DA9063 is not set -# CONFIG_MFD_DA9150 is not set -# CONFIG_MFD_DLN2 is not set -# CONFIG_MFD_GATEWORKS_GSC is not set -# CONFIG_MFD_MC13XXX_SPI is not set -# CONFIG_MFD_MC13XXX_I2C is not set -# CONFIG_MFD_MP2629 is not set -# CONFIG_MFD_HI6421_PMIC is not set -# CONFIG_MFD_HI655X_PMIC is not set -# CONFIG_LPC_ICH is not set -# CONFIG_LPC_SCH is not set -# CONFIG_MFD_IQS62X is not set -# CONFIG_MFD_JANZ_CMODIO is not set -# CONFIG_MFD_KEMPLD is not set -# CONFIG_MFD_88PM800 is not set -# CONFIG_MFD_88PM805 is not set -# CONFIG_MFD_88PM860X is not set -# CONFIG_MFD_MAX14577 is not set -# CONFIG_MFD_MAX77541 is not set -# CONFIG_MFD_MAX77620 is not set -# CONFIG_MFD_MAX77650 is not set -# CONFIG_MFD_MAX77686 is not set -# CONFIG_MFD_MAX77693 is not set -# CONFIG_MFD_MAX77714 is not set -# CONFIG_MFD_MAX77843 is not set -# CONFIG_MFD_MAX8907 is not set -# CONFIG_MFD_MAX8925 is not set -# CONFIG_MFD_MAX8997 is not set -# CONFIG_MFD_MAX8998 is not set -# CONFIG_MFD_MT6360 is not set -# CONFIG_MFD_MT6370 is not set -# CONFIG_MFD_MT6397 is not set -# CONFIG_MFD_MENF21BMC is not set -# CONFIG_MFD_OCELOT is not set -# CONFIG_EZX_PCAP is not set -# CONFIG_MFD_CPCAP is not set -# CONFIG_MFD_VIPERBOARD is not set -# CONFIG_MFD_NTXEC is not set -# CONFIG_MFD_RETU is not set -# CONFIG_MFD_PCF50633 is not set -# CONFIG_MFD_QCOM_RPM is not set -# CONFIG_MFD_SY7636A is not set -# CONFIG_MFD_RDC321X is not set -# CONFIG_MFD_RT4831 is not set -# CONFIG_MFD_RT5033 is not set -# CONFIG_MFD_RT5120 is not set -# CONFIG_MFD_RC5T583 is not set -# CONFIG_MFD_RK8XX_I2C is not set -# CONFIG_MFD_RK8XX_SPI is not set -# CONFIG_MFD_RN5T618 is not set -# CONFIG_MFD_SEC_CORE is not set -# CONFIG_MFD_SI476X_CORE is not set -# CONFIG_MFD_SM501 is not set -# CONFIG_MFD_SKY81452 is not set -# CONFIG_MFD_STMPE is not set -CONFIG_MFD_SYSCON=y -# CONFIG_MFD_LP3943 is not set -# CONFIG_MFD_LP8788 is not set -# CONFIG_MFD_TI_LMU is not set -# CONFIG_MFD_PALMAS is not set -# CONFIG_TPS6105X is not set -# CONFIG_TPS65010 is not set -# CONFIG_TPS6507X is not set -# CONFIG_MFD_TPS65086 is not set -# CONFIG_MFD_TPS65090 is not set -# CONFIG_MFD_TPS65217 is not set -# CONFIG_MFD_TI_LP873X is not set -# CONFIG_MFD_TI_LP87565 is not set -# CONFIG_MFD_TPS65218 is not set -# CONFIG_MFD_TPS65219 is not set -# CONFIG_MFD_TPS6586X is not set -# CONFIG_MFD_TPS65910 is not set -# CONFIG_MFD_TPS65912_I2C is not set -# CONFIG_MFD_TPS65912_SPI is not set -# CONFIG_MFD_TPS6594_I2C is not set -# CONFIG_MFD_TPS6594_SPI is not set -# CONFIG_TWL4030_CORE is not set -# CONFIG_TWL6040_CORE is not set -# CONFIG_MFD_WL1273_CORE is not set -# CONFIG_MFD_LM3533 is not set -# CONFIG_MFD_TC3589X is not set -# CONFIG_MFD_TQMX86 is not set -# CONFIG_MFD_VX855 is not set -# CONFIG_MFD_LOCHNAGAR is not set -# CONFIG_MFD_ARIZONA_I2C is not set -# CONFIG_MFD_ARIZONA_SPI is not set -# CONFIG_MFD_WM8400 is not set -# CONFIG_MFD_WM831X_I2C is not set -# CONFIG_MFD_WM831X_SPI is not set -# CONFIG_MFD_WM8350_I2C is not set -# CONFIG_MFD_WM8994 is not set -# CONFIG_MFD_ROHM_BD718XX is not set -# CONFIG_MFD_ROHM_BD71828 is not set -# CONFIG_MFD_ROHM_BD957XMUF is not set -# CONFIG_MFD_STPMIC1 is not set -# CONFIG_MFD_STMFX is not set -# CONFIG_MFD_ATC260X_I2C is not set -# CONFIG_MFD_QCOM_PM8008 is not set -# CONFIG_MFD_VEXPRESS_SYSREG is not set -# CONFIG_MFD_INTEL_M10_BMC_SPI is not set -# CONFIG_MFD_RSMU_I2C is not set -# CONFIG_MFD_RSMU_SPI is not set -# end of Multifunction device drivers - -CONFIG_REGULATOR=y -# CONFIG_REGULATOR_DEBUG is not set -# CONFIG_REGULATOR_FIXED_VOLTAGE is not set -# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set -# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set -# CONFIG_REGULATOR_88PG86X is not set -# CONFIG_REGULATOR_ACT8865 is not set -# CONFIG_REGULATOR_AD5398 is not set -# CONFIG_REGULATOR_AW37503 is not set -# CONFIG_REGULATOR_DA9121 is not set -# CONFIG_REGULATOR_DA9210 is not set -# CONFIG_REGULATOR_DA9211 is not set -# CONFIG_REGULATOR_FAN53555 is not set -# CONFIG_REGULATOR_FAN53880 is not set -# CONFIG_REGULATOR_GPIO is not set -# CONFIG_REGULATOR_ISL9305 is not set -# CONFIG_REGULATOR_ISL6271A is not set -# CONFIG_REGULATOR_LP3971 is not set -# CONFIG_REGULATOR_LP3972 is not set -# CONFIG_REGULATOR_LP872X is not set -# CONFIG_REGULATOR_LP8755 is not set -# CONFIG_REGULATOR_LTC3589 is not set -# CONFIG_REGULATOR_LTC3676 is not set -# CONFIG_REGULATOR_MAX1586 is not set -# CONFIG_REGULATOR_MAX77857 is not set -# CONFIG_REGULATOR_MAX8649 is not set -# CONFIG_REGULATOR_MAX8660 is not set -# CONFIG_REGULATOR_MAX8893 is not set -# CONFIG_REGULATOR_MAX8952 is not set -# CONFIG_REGULATOR_MAX8973 is not set -# CONFIG_REGULATOR_MAX20086 is not set -# CONFIG_REGULATOR_MAX20411 is not set -# CONFIG_REGULATOR_MAX77826 is not set -# CONFIG_REGULATOR_MCP16502 is not set -# CONFIG_REGULATOR_MP5416 is not set -# CONFIG_REGULATOR_MP8859 is not set -# CONFIG_REGULATOR_MP886X is not set -# CONFIG_REGULATOR_MPQ7920 is not set -# CONFIG_REGULATOR_MT6311 is not set -# CONFIG_REGULATOR_PCA9450 is not set -# CONFIG_REGULATOR_PF8X00 is not set -# CONFIG_REGULATOR_PFUZE100 is not set -# CONFIG_REGULATOR_PV88060 is not set -# CONFIG_REGULATOR_PV88080 is not set -# CONFIG_REGULATOR_PV88090 is not set -# CONFIG_REGULATOR_PWM is not set -# CONFIG_REGULATOR_QCOM_REFGEN is not set -# CONFIG_REGULATOR_RAA215300 is not set -# CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY is not set -# CONFIG_REGULATOR_RT4801 is not set -# CONFIG_REGULATOR_RT4803 is not set -# CONFIG_REGULATOR_RT5190A is not set -# CONFIG_REGULATOR_RT5739 is not set -# CONFIG_REGULATOR_RT5759 is not set -# CONFIG_REGULATOR_RT6160 is not set -# CONFIG_REGULATOR_RT6190 is not set -# CONFIG_REGULATOR_RT6245 is not set -# CONFIG_REGULATOR_RTQ2134 is not set -# CONFIG_REGULATOR_RTMV20 is not set -# CONFIG_REGULATOR_RTQ6752 is not set -# CONFIG_REGULATOR_RTQ2208 is not set -# CONFIG_REGULATOR_SLG51000 is not set -# CONFIG_REGULATOR_SY8106A is not set -# CONFIG_REGULATOR_SY8824X is not set -# CONFIG_REGULATOR_SY8827N is not set -# CONFIG_REGULATOR_TPS51632 is not set -# CONFIG_REGULATOR_TPS62360 is not set -# CONFIG_REGULATOR_TPS6286X is not set -# CONFIG_REGULATOR_TPS6287X is not set -# CONFIG_REGULATOR_TPS65023 is not set -# CONFIG_REGULATOR_TPS6507X is not set -# CONFIG_REGULATOR_TPS65132 is not set -# CONFIG_REGULATOR_TPS6524X is not set -# CONFIG_REGULATOR_VCTRL is not set -# CONFIG_REGULATOR_VEXPRESS is not set -# CONFIG_REGULATOR_VQMMC_IPQ4019 is not set -# CONFIG_RC_CORE is not set -CONFIG_CEC_CORE=m - -# -# CEC support -# -# CONFIG_MEDIA_CEC_SUPPORT is not set -# end of CEC support - -# CONFIG_MEDIA_SUPPORT is not set - -# -# Graphics support -# -CONFIG_APERTURE_HELPERS=y -CONFIG_VIDEO_CMDLINE=y -CONFIG_VIDEO_NOMODESET=y -# CONFIG_AUXDISPLAY is not set -CONFIG_DRM=m -CONFIG_DRM_KMS_HELPER=m -CONFIG_DRM_FBDEV_EMULATION=y -CONFIG_DRM_FBDEV_OVERALLOC=100 -CONFIG_DRM_LOAD_EDID_FIRMWARE=y -CONFIG_DRM_DISPLAY_HELPER=m -CONFIG_DRM_DISPLAY_DP_HELPER=y -CONFIG_DRM_DISPLAY_HDCP_HELPER=y -CONFIG_DRM_DISPLAY_HDMI_HELPER=y -CONFIG_DRM_DP_AUX_CHARDEV=y -CONFIG_DRM_DP_CEC=y -CONFIG_DRM_TTM=m -CONFIG_DRM_EXEC=m -CONFIG_DRM_BUDDY=m -CONFIG_DRM_VRAM_HELPER=m -CONFIG_DRM_TTM_HELPER=m -CONFIG_DRM_GEM_SHMEM_HELPER=m -CONFIG_DRM_SUBALLOC_HELPER=m -CONFIG_DRM_SCHED=m - -# -# I2C encoder or helper chips -# -CONFIG_DRM_I2C_CH7006=m -# CONFIG_DRM_I2C_SIL164 is not set -CONFIG_DRM_I2C_NXP_TDA998X=m -# CONFIG_DRM_I2C_NXP_TDA9950 is not set -# end of I2C encoder or helper chips - -# -# ARM devices -# -# CONFIG_DRM_HDLCD is not set -# CONFIG_DRM_MALI_DISPLAY is not set -# CONFIG_DRM_KOMEDA is not set -# end of ARM devices - -CONFIG_DRM_RADEON=m -CONFIG_DRM_RADEON_USERPTR=y -CONFIG_DRM_AMDGPU=m -# CONFIG_DRM_AMDGPU_SI is not set -CONFIG_DRM_AMDGPU_CIK=y -CONFIG_DRM_AMDGPU_USERPTR=y - -# -# ACP (Audio CoProcessor) Configuration -# -CONFIG_DRM_AMD_ACP=y -# end of ACP (Audio CoProcessor) Configuration - -# -# Display Engine Configuration -# -CONFIG_DRM_AMD_DC=y -CONFIG_DRM_AMD_DC_FP=y -# CONFIG_DEBUG_KERNEL_DC is not set -# CONFIG_DRM_AMD_SECURE_DISPLAY is not set -# end of Display Engine Configuration - -CONFIG_HSA_AMD=y -CONFIG_DRM_NOUVEAU=m -CONFIG_NOUVEAU_DEBUG=5 -CONFIG_NOUVEAU_DEBUG_DEFAULT=3 -CONFIG_NOUVEAU_DEBUG_MMU=y -# CONFIG_NOUVEAU_DEBUG_PUSH is not set -CONFIG_DRM_NOUVEAU_BACKLIGHT=y -# CONFIG_DRM_VGEM is not set -CONFIG_DRM_VKMS=m -# CONFIG_DRM_VMWGFX is not set -CONFIG_DRM_UDL=m -CONFIG_DRM_AST=m -CONFIG_DRM_MGAG200=m -CONFIG_DRM_QXL=m -CONFIG_DRM_VIRTIO_GPU=m -CONFIG_DRM_VIRTIO_GPU_KMS=y -# CONFIG_DRM_MSM is not set -CONFIG_DRM_PANEL=y - -# -# Display Panels -# -# CONFIG_DRM_PANEL_ABT_Y030XX067A is not set -# CONFIG_DRM_PANEL_ARM_VERSATILE is not set -# CONFIG_DRM_PANEL_AUO_A030JTN01 is not set -# CONFIG_DRM_PANEL_LVDS is not set -# CONFIG_DRM_PANEL_SIMPLE is not set -# CONFIG_DRM_PANEL_EDP is not set -# CONFIG_DRM_PANEL_ILITEK_IL9322 is not set -# CONFIG_DRM_PANEL_ILITEK_ILI9341 is not set -# CONFIG_DRM_PANEL_INNOLUX_EJ030NA is not set -# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set -# CONFIG_DRM_PANEL_LG_LB035Q02 is not set -# CONFIG_DRM_PANEL_LG_LG4573 is not set -# CONFIG_DRM_PANEL_NEC_NL8048HL11 is not set -# CONFIG_DRM_PANEL_NEWVISION_NV3052C is not set -# CONFIG_DRM_PANEL_NOVATEK_NT39016 is not set -# CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO is not set -# CONFIG_DRM_PANEL_ORISETECH_OTA5601A is not set -# CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20 is not set -# CONFIG_DRM_PANEL_SAMSUNG_DB7430 is not set -# CONFIG_DRM_PANEL_SAMSUNG_S6D27A1 is not set -# CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0 is not set -# CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 is not set -# CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 is not set -# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set -# CONFIG_DRM_PANEL_SEIKO_43WVF1G is not set -# CONFIG_DRM_PANEL_SHARP_LS037V7DW01 is not set -# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set -# CONFIG_DRM_PANEL_SONY_ACX565AKM is not set -# CONFIG_DRM_PANEL_TPO_TD028TTEC1 is not set -# CONFIG_DRM_PANEL_TPO_TD043MTEA1 is not set -# CONFIG_DRM_PANEL_TPO_TPG110 is not set -# CONFIG_DRM_PANEL_WIDECHIPS_WS2401 is not set -# end of Display Panels - -CONFIG_DRM_BRIDGE=y -CONFIG_DRM_PANEL_BRIDGE=y - -# -# Display Interface Bridges -# -# CONFIG_DRM_CHIPONE_ICN6211 is not set -# CONFIG_DRM_CHRONTEL_CH7033 is not set -# CONFIG_DRM_DISPLAY_CONNECTOR is not set -# CONFIG_DRM_ITE_IT6505 is not set -# CONFIG_DRM_LONTIUM_LT8912B is not set -# CONFIG_DRM_LONTIUM_LT9211 is not set -# CONFIG_DRM_LONTIUM_LT9611 is not set -# CONFIG_DRM_LONTIUM_LT9611UXC is not set -# CONFIG_DRM_ITE_IT66121 is not set -# CONFIG_DRM_LVDS_CODEC is not set -# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set -# CONFIG_DRM_NWL_MIPI_DSI is not set -# CONFIG_DRM_NXP_PTN3460 is not set -# CONFIG_DRM_PARADE_PS8622 is not set -# CONFIG_DRM_PARADE_PS8640 is not set -# CONFIG_DRM_SAMSUNG_DSIM is not set -# CONFIG_DRM_SIL_SII8620 is not set -# CONFIG_DRM_SII902X is not set -# CONFIG_DRM_SII9234 is not set -# CONFIG_DRM_SIMPLE_BRIDGE is not set -# CONFIG_DRM_THINE_THC63LVD1024 is not set -# CONFIG_DRM_TOSHIBA_TC358762 is not set -# CONFIG_DRM_TOSHIBA_TC358764 is not set -# CONFIG_DRM_TOSHIBA_TC358767 is not set -# CONFIG_DRM_TOSHIBA_TC358768 is not set -# CONFIG_DRM_TOSHIBA_TC358775 is not set -# CONFIG_DRM_TI_DLPC3433 is not set -# CONFIG_DRM_TI_TFP410 is not set -# CONFIG_DRM_TI_SN65DSI83 is not set -# CONFIG_DRM_TI_SN65DSI86 is not set -# CONFIG_DRM_TI_TPD12S015 is not set -# CONFIG_DRM_ANALOGIX_ANX6345 is not set -# CONFIG_DRM_ANALOGIX_ANX78XX is not set -# CONFIG_DRM_ANALOGIX_ANX7625 is not set -# CONFIG_DRM_I2C_ADV7511 is not set -# CONFIG_DRM_CDNS_DSI is not set -# CONFIG_DRM_CDNS_MHDP8546 is not set -# end of Display Interface Bridges - -# CONFIG_DRM_LOONGSON is not set -# CONFIG_DRM_ETNAVIV is not set -CONFIG_DRM_HISI_HIBMC=m -# CONFIG_DRM_HISI_KIRIN is not set -# CONFIG_DRM_LOGICVC is not set -# CONFIG_DRM_ARCPGU is not set -CONFIG_DRM_BOCHS=m -CONFIG_DRM_CIRRUS_QEMU=m -# CONFIG_DRM_GM12U320 is not set -# CONFIG_DRM_PANEL_MIPI_DBI is not set -# CONFIG_DRM_SIMPLEDRM is not set -# CONFIG_TINYDRM_HX8357D is not set -# CONFIG_TINYDRM_ILI9163 is not set -# CONFIG_TINYDRM_ILI9225 is not set -# CONFIG_TINYDRM_ILI9341 is not set -# CONFIG_TINYDRM_ILI9486 is not set -# CONFIG_TINYDRM_MI0283QT is not set -# CONFIG_TINYDRM_REPAPER is not set -# CONFIG_TINYDRM_ST7586 is not set -# CONFIG_TINYDRM_ST7735R is not set -# CONFIG_DRM_PL111 is not set -# CONFIG_DRM_LIMA is not set -# CONFIG_DRM_PANFROST is not set -# CONFIG_DRM_TIDSS is not set -# CONFIG_DRM_GUD is not set -# CONFIG_DRM_SSD130X is not set -CONFIG_DRM_PHYTIUM=m -# CONFIG_DRM_LEGACY is not set -CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y -# CONFIG_HYDCU_FIXUP_HEADER is not set -CONFIG_DRM_INSPUR=m - -# -# Frame buffer Devices -# -CONFIG_FB=y -# CONFIG_FB_CIRRUS is not set -# CONFIG_FB_PM2 is not set -# CONFIG_FB_ARMCLCD is not set -# CONFIG_FB_CYBER2000 is not set -# CONFIG_FB_ASILIANT is not set -# CONFIG_FB_IMSTT is not set -# CONFIG_FB_UVESA is not set -CONFIG_FB_EFI=y -# CONFIG_FB_OPENCORES is not set -# CONFIG_FB_S1D13XXX is not set -# CONFIG_FB_NVIDIA is not set -# CONFIG_FB_RIVA is not set -# CONFIG_FB_I740 is not set -# CONFIG_FB_MATROX is not set -# CONFIG_FB_RADEON is not set -# CONFIG_FB_ATY128 is not set -# CONFIG_FB_ATY is not set -# CONFIG_FB_S3 is not set -# CONFIG_FB_SAVAGE is not set -# CONFIG_FB_SIS is not set -# CONFIG_FB_NEOMAGIC is not set -# CONFIG_FB_KYRO is not set -# CONFIG_FB_3DFX is not set -# CONFIG_FB_VOODOO1 is not set -# CONFIG_FB_VT8623 is not set -# CONFIG_FB_TRIDENT is not set -# CONFIG_FB_ARK is not set -# CONFIG_FB_PM3 is not set -# CONFIG_FB_CARMINE is not set -# CONFIG_FB_SMSCUFX is not set -# CONFIG_FB_UDL is not set -# CONFIG_FB_IBM_GXT4500 is not set -# CONFIG_FB_VIRTUAL is not set -# CONFIG_FB_METRONOME is not set -# CONFIG_FB_MB862XX is not set -CONFIG_FB_SIMPLE=y -CONFIG_FB_SSD1307=m -# CONFIG_FB_SM712 is not set -# CONFIG_FB_LS2K500 is not set -CONFIG_FB_CORE=y -CONFIG_FB_NOTIFY=y -# CONFIG_FIRMWARE_EDID is not set -CONFIG_FB_DEVICE=y -CONFIG_FB_CFB_FILLRECT=y -CONFIG_FB_CFB_COPYAREA=y -CONFIG_FB_CFB_IMAGEBLIT=y -CONFIG_FB_SYS_FILLRECT=y -CONFIG_FB_SYS_COPYAREA=y -CONFIG_FB_SYS_IMAGEBLIT=y -# CONFIG_FB_FOREIGN_ENDIAN is not set -CONFIG_FB_SYS_FOPS=y -CONFIG_FB_DEFERRED_IO=y -CONFIG_FB_IOMEM_HELPERS=y -CONFIG_FB_SYSMEM_HELPERS=y -CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y -CONFIG_FB_BACKLIGHT=m -# CONFIG_FB_MODE_HELPERS is not set -CONFIG_FB_TILEBLITTING=y -# end of Frame buffer Devices - -# -# Backlight & LCD device support -# -CONFIG_LCD_CLASS_DEVICE=m -# CONFIG_LCD_L4F00242T03 is not set -# CONFIG_LCD_LMS283GF05 is not set -# CONFIG_LCD_LTV350QV is not set -# CONFIG_LCD_ILI922X is not set -# CONFIG_LCD_ILI9320 is not set -# CONFIG_LCD_TDO24M is not set -# CONFIG_LCD_VGG2432A4 is not set -CONFIG_LCD_PLATFORM=m -# CONFIG_LCD_AMS369FG06 is not set -# CONFIG_LCD_LMS501KF03 is not set -# CONFIG_LCD_HX8357 is not set -# CONFIG_LCD_OTM3225A is not set -CONFIG_BACKLIGHT_CLASS_DEVICE=y -# CONFIG_BACKLIGHT_KTD253 is not set -# CONFIG_BACKLIGHT_KTZ8866 is not set -CONFIG_BACKLIGHT_PWM=m -# CONFIG_BACKLIGHT_QCOM_WLED is not set -# CONFIG_BACKLIGHT_ADP8860 is not set -# CONFIG_BACKLIGHT_ADP8870 is not set -# CONFIG_BACKLIGHT_LM3630A is not set -# CONFIG_BACKLIGHT_LM3639 is not set -CONFIG_BACKLIGHT_LP855X=m -CONFIG_BACKLIGHT_GPIO=m -# CONFIG_BACKLIGHT_LV5207LP is not set -# CONFIG_BACKLIGHT_BD6107 is not set -# CONFIG_BACKLIGHT_ARCXCNN is not set -# CONFIG_BACKLIGHT_LED is not set -# end of Backlight & LCD device support - -CONFIG_HDMI=y - -# -# Console display driver support -# -CONFIG_DUMMY_CONSOLE=y -CONFIG_DUMMY_CONSOLE_COLUMNS=80 -CONFIG_DUMMY_CONSOLE_ROWS=25 -CONFIG_FRAMEBUFFER_CONSOLE=y -# CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION is not set -CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y -CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y -# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set -# end of Console display driver support - -CONFIG_LOGO=y -# CONFIG_LOGO_LINUX_MONO is not set -# CONFIG_LOGO_LINUX_VGA16 is not set -CONFIG_LOGO_LINUX_CLUT224=y -# end of Graphics support - -# CONFIG_DRM_ACCEL is not set -CONFIG_SOUND=m -# CONFIG_SND is not set -CONFIG_HID_SUPPORT=y -CONFIG_HID=y -CONFIG_HID_BATTERY_STRENGTH=y -CONFIG_HIDRAW=y -CONFIG_UHID=m -CONFIG_HID_GENERIC=y - -# -# Special HID drivers -# -CONFIG_HID_A4TECH=m -# CONFIG_HID_ACCUTOUCH is not set -CONFIG_HID_ACRUX=m -# CONFIG_HID_ACRUX_FF is not set -CONFIG_HID_APPLE=m -CONFIG_HID_APPLEIR=m -# CONFIG_HID_ASUS is not set -CONFIG_HID_AUREAL=m -CONFIG_HID_BELKIN=m -CONFIG_HID_BETOP_FF=m -# CONFIG_HID_BIGBEN_FF is not set -CONFIG_HID_CHERRY=m -CONFIG_HID_CHICONY=m -CONFIG_HID_CORSAIR=m -# CONFIG_HID_COUGAR is not set -# CONFIG_HID_MACALLY is not set -# CONFIG_HID_CMEDIA is not set -# CONFIG_HID_CP2112 is not set -# CONFIG_HID_CREATIVE_SB0540 is not set -CONFIG_HID_CYPRESS=m -CONFIG_HID_DRAGONRISE=m -# CONFIG_DRAGONRISE_FF is not set -# CONFIG_HID_EMS_FF is not set -CONFIG_HID_ELAN=m -CONFIG_HID_ELECOM=m -CONFIG_HID_ELO=m -# CONFIG_HID_EVISION is not set -CONFIG_HID_EZKEY=m -# CONFIG_HID_FT260 is not set -CONFIG_HID_GEMBIRD=m -CONFIG_HID_GFRM=m -# CONFIG_HID_GLORIOUS is not set -CONFIG_HID_HOLTEK=m -# CONFIG_HOLTEK_FF is not set -# CONFIG_HID_GOOGLE_STADIA_FF is not set -# CONFIG_HID_VIVALDI is not set -CONFIG_HID_GT683R=m -CONFIG_HID_KEYTOUCH=m -CONFIG_HID_KYE=m -CONFIG_HID_UCLOGIC=m -CONFIG_HID_WALTOP=m -# CONFIG_HID_VIEWSONIC is not set -# CONFIG_HID_VRC2 is not set -# CONFIG_HID_XIAOMI is not set -CONFIG_HID_GYRATION=m -CONFIG_HID_ICADE=m -CONFIG_HID_ITE=m -CONFIG_HID_JABRA=m -CONFIG_HID_TWINHAN=m -CONFIG_HID_KENSINGTON=m -CONFIG_HID_LCPOWER=m -CONFIG_HID_LED=m -CONFIG_HID_LENOVO=m -# CONFIG_HID_LETSKETCH is not set -CONFIG_HID_LOGITECH=m -CONFIG_HID_LOGITECH_DJ=m -CONFIG_HID_LOGITECH_HIDPP=m -# CONFIG_LOGITECH_FF is not set -# CONFIG_LOGIRUMBLEPAD2_FF is not set -# CONFIG_LOGIG940_FF is not set -# CONFIG_LOGIWHEELS_FF is not set -CONFIG_HID_MAGICMOUSE=y -# CONFIG_HID_MALTRON is not set -# CONFIG_HID_MAYFLASH is not set -# CONFIG_HID_MEGAWORLD_FF is not set -# CONFIG_HID_REDRAGON is not set -CONFIG_HID_MICROSOFT=m -CONFIG_HID_MONTEREY=m -CONFIG_HID_MULTITOUCH=m -# CONFIG_HID_NINTENDO is not set -CONFIG_HID_NTI=m -CONFIG_HID_NTRIG=y -CONFIG_HID_ORTEK=m -CONFIG_HID_PANTHERLORD=m -# CONFIG_PANTHERLORD_FF is not set -CONFIG_HID_PENMOUNT=m -CONFIG_HID_PETALYNX=m -CONFIG_HID_PICOLCD=m -CONFIG_HID_PICOLCD_FB=y -CONFIG_HID_PICOLCD_BACKLIGHT=y -CONFIG_HID_PICOLCD_LCD=y -CONFIG_HID_PICOLCD_LEDS=y -CONFIG_HID_PLANTRONICS=m -# CONFIG_HID_PXRC is not set -# CONFIG_HID_RAZER is not set -CONFIG_HID_PRIMAX=m -# CONFIG_HID_RETRODE is not set -CONFIG_HID_ROCCAT=m -CONFIG_HID_SAITEK=m -CONFIG_HID_SAMSUNG=m -# CONFIG_HID_SEMITEK is not set -# CONFIG_HID_SIGMAMICRO is not set -CONFIG_HID_SONY=m -CONFIG_SONY_FF=y -CONFIG_HID_SPEEDLINK=m -# CONFIG_HID_STEAM is not set -CONFIG_HID_STEELSERIES=m -CONFIG_HID_SUNPLUS=m -CONFIG_HID_RMI=m -CONFIG_HID_GREENASIA=m -# CONFIG_GREENASIA_FF is not set -CONFIG_HID_SMARTJOYPLUS=m -# CONFIG_SMARTJOYPLUS_FF is not set -CONFIG_HID_TIVO=m -CONFIG_HID_TOPSEED=m -# CONFIG_HID_TOPRE is not set -CONFIG_HID_THINGM=m -CONFIG_HID_THRUSTMASTER=m -# CONFIG_THRUSTMASTER_FF is not set -# CONFIG_HID_UDRAW_PS3 is not set -# CONFIG_HID_U2FZERO is not set -CONFIG_HID_WACOM=m -CONFIG_HID_WIIMOTE=m -CONFIG_HID_XINMO=m -CONFIG_HID_ZEROPLUS=m -# CONFIG_ZEROPLUS_FF is not set -CONFIG_HID_ZYDACRON=m -CONFIG_HID_SENSOR_HUB=m -# CONFIG_HID_SENSOR_CUSTOM_SENSOR is not set -# CONFIG_HID_ALPS is not set -# CONFIG_HID_MCP2221 is not set -# end of Special HID drivers - -# -# HID-BPF support -# -# CONFIG_HID_BPF is not set -# end of HID-BPF support - -# -# USB HID support -# -CONFIG_USB_HID=y -CONFIG_HID_PID=y -CONFIG_USB_HIDDEV=y -# end of USB HID support - -CONFIG_I2C_HID=m -# CONFIG_I2C_HID_ACPI is not set -# CONFIG_I2C_HID_OF is not set -# CONFIG_I2C_HID_OF_ELAN is not set -# CONFIG_I2C_HID_OF_GOODIX is not set -CONFIG_USB_OHCI_LITTLE_ENDIAN=y -CONFIG_USB_SUPPORT=y -CONFIG_USB_COMMON=y -CONFIG_USB_LED_TRIG=y -CONFIG_USB_ULPI_BUS=m -# CONFIG_USB_CONN_GPIO is not set -CONFIG_USB_ARCH_HAS_HCD=y -CONFIG_USB=y -CONFIG_USB_PCI=y -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y - -# -# Miscellaneous USB options -# -CONFIG_USB_DEFAULT_PERSIST=y -# CONFIG_USB_FEW_INIT_RETRIES is not set -# CONFIG_USB_DYNAMIC_MINORS is not set -# CONFIG_USB_OTG is not set -# CONFIG_USB_OTG_PRODUCTLIST is not set -CONFIG_USB_LEDS_TRIGGER_USBPORT=m -CONFIG_USB_AUTOSUSPEND_DELAY=2 -CONFIG_USB_MON=y - -# -# USB Host Controller Drivers -# -# CONFIG_USB_C67X00_HCD is not set -CONFIG_USB_XHCI_HCD=y -# CONFIG_USB_XHCI_DBGCAP is not set -CONFIG_USB_XHCI_PCI=y -# CONFIG_USB_XHCI_PCI_RENESAS is not set -CONFIG_USB_XHCI_PLATFORM=y -# CONFIG_USB_XHCI_HISTB is not set -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_EHCI_ROOT_HUB_TT=y -CONFIG_USB_EHCI_TT_NEWSCHED=y -CONFIG_USB_EHCI_PCI=y -# CONFIG_USB_EHCI_FSL is not set -CONFIG_USB_EHCI_HCD_PLATFORM=m -# CONFIG_USB_OXU210HP_HCD is not set -# CONFIG_USB_ISP116X_HCD is not set -# CONFIG_USB_MAX3421_HCD is not set -CONFIG_USB_OHCI_HCD=y -CONFIG_USB_OHCI_HCD_PCI=y -# CONFIG_USB_OHCI_HCD_PLATFORM is not set -CONFIG_USB_UHCI_HCD=y -# CONFIG_USB_SL811_HCD is not set -# CONFIG_USB_R8A66597_HCD is not set -# CONFIG_USB_HCD_BCMA is not set -# CONFIG_USB_HCD_TEST_MODE is not set - -# -# USB Device Class drivers -# -CONFIG_USB_ACM=m -CONFIG_USB_PRINTER=m -CONFIG_USB_WDM=m -CONFIG_USB_TMC=m - -# -# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may -# - -# -# also be needed; see USB_STORAGE Help for more info -# -CONFIG_USB_STORAGE=m -# CONFIG_USB_STORAGE_DEBUG is not set -CONFIG_USB_STORAGE_REALTEK=m -CONFIG_REALTEK_AUTOPM=y -CONFIG_USB_STORAGE_DATAFAB=m -CONFIG_USB_STORAGE_FREECOM=m -CONFIG_USB_STORAGE_ISD200=m -CONFIG_USB_STORAGE_USBAT=m -CONFIG_USB_STORAGE_SDDR09=m -CONFIG_USB_STORAGE_SDDR55=m -CONFIG_USB_STORAGE_JUMPSHOT=m -CONFIG_USB_STORAGE_ALAUDA=m -CONFIG_USB_STORAGE_ONETOUCH=m -CONFIG_USB_STORAGE_KARMA=m -CONFIG_USB_STORAGE_CYPRESS_ATACB=m -CONFIG_USB_STORAGE_ENE_UB6250=m -CONFIG_USB_UAS=m - -# -# USB Imaging devices -# -CONFIG_USB_MDC800=m -CONFIG_USB_MICROTEK=m -# CONFIG_USBIP_CORE is not set - -# -# USB dual-mode controller drivers -# -# CONFIG_USB_CDNS_SUPPORT is not set -# CONFIG_USB_MUSB_HDRC is not set -# CONFIG_USB_DWC3 is not set -# CONFIG_USB_DWC2 is not set -# CONFIG_USB_CHIPIDEA is not set -# CONFIG_USB_ISP1760 is not set - -# -# USB port drivers -# -CONFIG_USB_SERIAL=y -# CONFIG_USB_SERIAL_CONSOLE is not set -CONFIG_USB_SERIAL_GENERIC=y -CONFIG_USB_SERIAL_SIMPLE=m -CONFIG_USB_SERIAL_AIRCABLE=m -CONFIG_USB_SERIAL_ARK3116=m -CONFIG_USB_SERIAL_BELKIN=m -CONFIG_USB_SERIAL_CH341=m -CONFIG_USB_SERIAL_WHITEHEAT=m -CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m -CONFIG_USB_SERIAL_CP210X=m -CONFIG_USB_SERIAL_CYPRESS_M8=m -CONFIG_USB_SERIAL_EMPEG=m -CONFIG_USB_SERIAL_FTDI_SIO=m -CONFIG_USB_SERIAL_VISOR=m -CONFIG_USB_SERIAL_IPAQ=m -CONFIG_USB_SERIAL_IR=m -CONFIG_USB_SERIAL_EDGEPORT=m -CONFIG_USB_SERIAL_EDGEPORT_TI=m -# CONFIG_USB_SERIAL_F81232 is not set -CONFIG_USB_SERIAL_F8153X=m -CONFIG_USB_SERIAL_GARMIN=m -CONFIG_USB_SERIAL_IPW=m -CONFIG_USB_SERIAL_IUU=m -CONFIG_USB_SERIAL_KEYSPAN_PDA=m -CONFIG_USB_SERIAL_KEYSPAN=m -CONFIG_USB_SERIAL_KLSI=m -CONFIG_USB_SERIAL_KOBIL_SCT=m -CONFIG_USB_SERIAL_MCT_U232=m -# CONFIG_USB_SERIAL_METRO is not set -CONFIG_USB_SERIAL_MOS7720=m -CONFIG_USB_SERIAL_MOS7840=m -CONFIG_USB_SERIAL_MXUPORT=m -CONFIG_USB_SERIAL_NAVMAN=m -CONFIG_USB_SERIAL_PL2303=m -CONFIG_USB_SERIAL_OTI6858=m -CONFIG_USB_SERIAL_QCAUX=m -CONFIG_USB_SERIAL_QUALCOMM=m -CONFIG_USB_SERIAL_SPCP8X5=m -CONFIG_USB_SERIAL_SAFE=m -CONFIG_USB_SERIAL_SAFE_PADDED=y -CONFIG_USB_SERIAL_SIERRAWIRELESS=m -CONFIG_USB_SERIAL_SYMBOL=m -CONFIG_USB_SERIAL_TI=m -CONFIG_USB_SERIAL_CYBERJACK=m -CONFIG_USB_SERIAL_WWAN=m -CONFIG_USB_SERIAL_OPTION=m -CONFIG_USB_SERIAL_OMNINET=m -CONFIG_USB_SERIAL_OPTICON=m -CONFIG_USB_SERIAL_XSENS_MT=m -# CONFIG_USB_SERIAL_WISHBONE is not set -CONFIG_USB_SERIAL_SSU100=m -CONFIG_USB_SERIAL_QT2=m -CONFIG_USB_SERIAL_UPD78F0730=m -# CONFIG_USB_SERIAL_XR is not set -CONFIG_USB_SERIAL_DEBUG=m - -# -# USB Miscellaneous drivers -# -CONFIG_USB_EMI62=m -CONFIG_USB_EMI26=m -CONFIG_USB_ADUTUX=m -CONFIG_USB_SEVSEG=m -CONFIG_USB_LEGOTOWER=m -CONFIG_USB_LCD=m -# CONFIG_USB_CYPRESS_CY7C63 is not set -# CONFIG_USB_CYTHERM is not set -CONFIG_USB_IDMOUSE=m -CONFIG_USB_APPLEDISPLAY=m -# CONFIG_USB_QCOM_EUD is not set -# CONFIG_APPLE_MFI_FASTCHARGE is not set -CONFIG_USB_SISUSBVGA=m -CONFIG_USB_LD=m -# CONFIG_USB_TRANCEVIBRATOR is not set -CONFIG_USB_IOWARRIOR=m -# CONFIG_USB_TEST is not set -# CONFIG_USB_EHSET_TEST_FIXTURE is not set -CONFIG_USB_ISIGHTFW=m -# CONFIG_USB_YUREX is not set -CONFIG_USB_EZUSB_FX2=m -# CONFIG_USB_HUB_USB251XB is not set -CONFIG_USB_HSIC_USB3503=m -# CONFIG_USB_HSIC_USB4604 is not set -# CONFIG_USB_LINK_LAYER_TEST is not set -CONFIG_USB_CHAOSKEY=m -# CONFIG_USB_ONBOARD_HUB is not set -CONFIG_USB_ATM=m -# CONFIG_USB_SPEEDTOUCH is not set -CONFIG_USB_CXACRU=m -CONFIG_USB_UEAGLEATM=m -CONFIG_USB_XUSBATM=m - -# -# USB Physical Layer drivers -# -# CONFIG_NOP_USB_XCEIV is not set -# CONFIG_USB_GPIO_VBUS is not set -# CONFIG_USB_ISP1301 is not set -# CONFIG_USB_ULPI is not set -# end of USB Physical Layer drivers - -# CONFIG_USB_GADGET is not set -CONFIG_TYPEC=y -CONFIG_TYPEC_TCPM=y -# CONFIG_TYPEC_TCPCI is not set -# CONFIG_TYPEC_FUSB302 is not set -# CONFIG_TYPEC_QCOM_PMIC is not set -CONFIG_TYPEC_UCSI=y -# CONFIG_UCSI_CCG is not set -CONFIG_UCSI_ACPI=y -# CONFIG_UCSI_STM32G0 is not set -CONFIG_TYPEC_TPS6598X=m -# CONFIG_TYPEC_ANX7411 is not set -# CONFIG_TYPEC_RT1719 is not set -# CONFIG_TYPEC_HD3SS3220 is not set -# CONFIG_TYPEC_STUSB160X is not set -# CONFIG_TYPEC_WUSB3801 is not set - -# -# USB Type-C Multiplexer/DeMultiplexer Switch support -# -# CONFIG_TYPEC_MUX_FSA4480 is not set -# CONFIG_TYPEC_MUX_GPIO_SBU is not set -CONFIG_TYPEC_MUX_PI3USB30532=m -# CONFIG_TYPEC_MUX_NB7VPQ904M is not set -# end of USB Type-C Multiplexer/DeMultiplexer Switch support - -# -# USB Type-C Alternate Mode drivers -# -CONFIG_TYPEC_DP_ALTMODE=m -# CONFIG_TYPEC_NVIDIA_ALTMODE is not set -# end of USB Type-C Alternate Mode drivers - -CONFIG_USB_ROLE_SWITCH=y -CONFIG_MMC=m -# CONFIG_PWRSEQ_EMMC is not set -# CONFIG_PWRSEQ_SIMPLE is not set -CONFIG_MMC_BLOCK=m -CONFIG_MMC_BLOCK_MINORS=8 -CONFIG_SDIO_UART=m -# CONFIG_MMC_TEST is not set - -# -# MMC/SD/SDIO Host Controller Drivers -# -# CONFIG_MMC_DEBUG is not set -CONFIG_MMC_ARMMMCI=m -CONFIG_MMC_STM32_SDMMC=y -CONFIG_MMC_SDHCI=m -CONFIG_MMC_SDHCI_IO_ACCESSORS=y -CONFIG_MMC_SDHCI_PCI=m -CONFIG_MMC_RICOH_MMC=y -CONFIG_MMC_SDHCI_ACPI=m -CONFIG_MMC_SDHCI_PLTFM=m -# CONFIG_MMC_SDHCI_OF_ARASAN is not set -# CONFIG_MMC_SDHCI_OF_AT91 is not set -# CONFIG_MMC_SDHCI_OF_DWCMSHC is not set -# CONFIG_MMC_SDHCI_CADENCE is not set -# CONFIG_MMC_SDHCI_F_SDH30 is not set -# CONFIG_MMC_SDHCI_MILBEAUT is not set -# CONFIG_MMC_SDHCI_MSM is not set -CONFIG_MMC_TIFM_SD=m -# CONFIG_MMC_SPI is not set -CONFIG_MMC_CB710=m -CONFIG_MMC_VIA_SDMMC=m -CONFIG_MMC_DW=m -CONFIG_MMC_DW_PLTFM=m -CONFIG_MMC_DW_BLUEFIELD=m -# CONFIG_MMC_DW_EXYNOS is not set -# CONFIG_MMC_DW_HI3798CV200 is not set -# CONFIG_MMC_DW_K3 is not set -# CONFIG_MMC_DW_PCI is not set -CONFIG_MMC_VUB300=m -CONFIG_MMC_USHC=m -# CONFIG_MMC_USDHI6ROL0 is not set -CONFIG_MMC_CQHCI=m -# CONFIG_MMC_HSQ is not set -CONFIG_MMC_TOSHIBA_PCI=m -CONFIG_MMC_MTK=m -# CONFIG_MMC_SDHCI_XENON is not set -# CONFIG_SCSI_UFSHCD is not set -CONFIG_MEMSTICK=m -# CONFIG_MEMSTICK_DEBUG is not set - -# -# MemoryStick drivers -# -# CONFIG_MEMSTICK_UNSAFE_RESUME is not set -CONFIG_MSPRO_BLOCK=m -# CONFIG_MS_BLOCK is not set - -# -# MemoryStick Host Controller Drivers -# -CONFIG_MEMSTICK_TIFM_MS=m -CONFIG_MEMSTICK_JMICRON_38X=m -CONFIG_MEMSTICK_R592=m -CONFIG_NEW_LEDS=y -CONFIG_LEDS_CLASS=y -CONFIG_LEDS_CLASS_FLASH=m -# CONFIG_LEDS_CLASS_MULTICOLOR is not set -# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set - -# -# LED drivers -# -# CONFIG_LEDS_AN30259A is not set -# CONFIG_LEDS_AW200XX is not set -# CONFIG_LEDS_AW2013 is not set -# CONFIG_LEDS_BCM6328 is not set -# CONFIG_LEDS_BCM6358 is not set -# CONFIG_LEDS_CR0014114 is not set -# CONFIG_LEDS_EL15203000 is not set -CONFIG_LEDS_LM3530=m -# CONFIG_LEDS_LM3532 is not set -# CONFIG_LEDS_LM3642 is not set -# CONFIG_LEDS_LM3692X is not set -# CONFIG_LEDS_PCA9532 is not set -# CONFIG_LEDS_GPIO is not set -CONFIG_LEDS_LP3944=m -# CONFIG_LEDS_LP3952 is not set -# CONFIG_LEDS_LP50XX is not set -# CONFIG_LEDS_LP55XX_COMMON is not set -# CONFIG_LEDS_LP8860 is not set -# CONFIG_LEDS_PCA955X is not set -# CONFIG_LEDS_PCA963X is not set -# CONFIG_LEDS_PCA995X is not set -# CONFIG_LEDS_DAC124S085 is not set -# CONFIG_LEDS_PWM is not set -# CONFIG_LEDS_REGULATOR is not set -# CONFIG_LEDS_BD2606MVV is not set -# CONFIG_LEDS_BD2802 is not set -CONFIG_LEDS_LT3593=m -# CONFIG_LEDS_TCA6507 is not set -# CONFIG_LEDS_TLC591XX is not set -# CONFIG_LEDS_LM355x is not set -# CONFIG_LEDS_IS31FL319X is not set -# CONFIG_LEDS_IS31FL32XX is not set - -# -# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) -# -CONFIG_LEDS_BLINKM=m -# CONFIG_LEDS_SYSCON is not set -# CONFIG_LEDS_MLXREG is not set -# CONFIG_LEDS_USER is not set -# CONFIG_LEDS_SPI_BYTE is not set -# CONFIG_LEDS_LM3697 is not set - -# -# Flash and Torch LED drivers -# -# CONFIG_LEDS_AAT1290 is not set -# CONFIG_LEDS_AS3645A is not set -# CONFIG_LEDS_KTD2692 is not set -# CONFIG_LEDS_LM3601X is not set -# CONFIG_LEDS_RT4505 is not set -# CONFIG_LEDS_RT8515 is not set -# CONFIG_LEDS_SGM3140 is not set - -# -# RGB LED drivers -# - -# -# LED Triggers -# -CONFIG_LEDS_TRIGGERS=y -CONFIG_LEDS_TRIGGER_TIMER=m -CONFIG_LEDS_TRIGGER_ONESHOT=m -# CONFIG_LEDS_TRIGGER_DISK is not set -# CONFIG_LEDS_TRIGGER_MTD is not set -CONFIG_LEDS_TRIGGER_HEARTBEAT=m -CONFIG_LEDS_TRIGGER_BACKLIGHT=m -# CONFIG_LEDS_TRIGGER_CPU is not set -# CONFIG_LEDS_TRIGGER_ACTIVITY is not set -CONFIG_LEDS_TRIGGER_DEFAULT_ON=m - -# -# iptables trigger is under Netfilter config (LED target) -# -CONFIG_LEDS_TRIGGER_TRANSIENT=m -CONFIG_LEDS_TRIGGER_CAMERA=m -# CONFIG_LEDS_TRIGGER_PANIC is not set -# CONFIG_LEDS_TRIGGER_NETDEV is not set -# CONFIG_LEDS_TRIGGER_PATTERN is not set -# CONFIG_LEDS_TRIGGER_AUDIO is not set -# CONFIG_LEDS_TRIGGER_TTY is not set - -# -# Simple LED drivers -# -# CONFIG_ACCESSIBILITY is not set -CONFIG_INFINIBAND=m -CONFIG_INFINIBAND_USER_MAD=m -CONFIG_INFINIBAND_USER_ACCESS=m -CONFIG_INFINIBAND_USER_MEM=y -CONFIG_INFINIBAND_ON_DEMAND_PAGING=y -CONFIG_INFINIBAND_ADDR_TRANS=y -CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y -CONFIG_INFINIBAND_VIRT_DMA=y -CONFIG_INFINIBAND_BNXT_RE=m -CONFIG_INFINIBAND_CXGB4=m -# CONFIG_INFINIBAND_EFA is not set -CONFIG_INFINIBAND_ERDMA=m -CONFIG_INFINIBAND_HNS=m -CONFIG_INFINIBAND_HNS_HIP08=y -# CONFIG_INFINIBAND_IRDMA is not set -CONFIG_MLX4_INFINIBAND=m -CONFIG_MLX5_INFINIBAND=m -# CONFIG_INFINIBAND_MTHCA is not set -# CONFIG_INFINIBAND_OCRDMA is not set -CONFIG_INFINIBAND_QEDR=m -CONFIG_RDMA_RXE=m -CONFIG_RDMA_SIW=m -CONFIG_INFINIBAND_IPOIB=m -CONFIG_INFINIBAND_IPOIB_CM=y -CONFIG_INFINIBAND_IPOIB_DEBUG=y -# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set -CONFIG_INFINIBAND_SRP=m -CONFIG_INFINIBAND_SRPT=m -CONFIG_INFINIBAND_ISER=m -CONFIG_INFINIBAND_ISERT=m -# CONFIG_INFINIBAND_RTRS_CLIENT is not set -# CONFIG_INFINIBAND_RTRS_SERVER is not set -CONFIG_EDAC_SUPPORT=y -CONFIG_EDAC=y -CONFIG_EDAC_LEGACY_SYSFS=y -CONFIG_EDAC_DEBUG=y -CONFIG_EDAC_GHES=y -CONFIG_EDAC_THUNDERX=m -CONFIG_EDAC_XGENE=m -# CONFIG_EDAC_DMC520 is not set -CONFIG_RTC_LIB=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_HCTOSYS=y -CONFIG_RTC_HCTOSYS_DEVICE="rtc0" -CONFIG_RTC_SYSTOHC=y -CONFIG_RTC_SYSTOHC_DEVICE="rtc0" -# CONFIG_RTC_DEBUG is not set -CONFIG_RTC_NVMEM=y - -# -# RTC interfaces -# -CONFIG_RTC_INTF_SYSFS=y -CONFIG_RTC_INTF_PROC=y -CONFIG_RTC_INTF_DEV=y -# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set -# CONFIG_RTC_DRV_TEST is not set - -# -# I2C RTC drivers -# -CONFIG_RTC_DRV_ABB5ZES3=m -# CONFIG_RTC_DRV_ABEOZ9 is not set -CONFIG_RTC_DRV_ABX80X=m -CONFIG_RTC_DRV_DS1307=m -# CONFIG_RTC_DRV_DS1307_CENTURY is not set -CONFIG_RTC_DRV_DS1374=m -CONFIG_RTC_DRV_DS1374_WDT=y -CONFIG_RTC_DRV_DS1672=m -# CONFIG_RTC_DRV_HYM8563 is not set -CONFIG_RTC_DRV_MAX6900=m -# CONFIG_RTC_DRV_NCT3018Y is not set -CONFIG_RTC_DRV_RS5C372=m -CONFIG_RTC_DRV_ISL1208=m -CONFIG_RTC_DRV_ISL12022=m -# CONFIG_RTC_DRV_ISL12026 is not set -CONFIG_RTC_DRV_X1205=m -CONFIG_RTC_DRV_PCF8523=m -CONFIG_RTC_DRV_PCF85063=m -# CONFIG_RTC_DRV_PCF85363 is not set -CONFIG_RTC_DRV_PCF8563=m -CONFIG_RTC_DRV_PCF8583=m -CONFIG_RTC_DRV_M41T80=m -CONFIG_RTC_DRV_M41T80_WDT=y -CONFIG_RTC_DRV_BQ32K=m -# CONFIG_RTC_DRV_S35390A is not set -CONFIG_RTC_DRV_FM3130=m -CONFIG_RTC_DRV_RX8010=m -CONFIG_RTC_DRV_RX8581=m -CONFIG_RTC_DRV_RX8025=m -CONFIG_RTC_DRV_EM3027=m -# CONFIG_RTC_DRV_RV3028 is not set -# CONFIG_RTC_DRV_RV3032 is not set -# CONFIG_RTC_DRV_RV8803 is not set -# CONFIG_RTC_DRV_SD3078 is not set - -# -# SPI RTC drivers -# -CONFIG_RTC_DRV_M41T93=m -CONFIG_RTC_DRV_M41T94=m -# CONFIG_RTC_DRV_DS1302 is not set -CONFIG_RTC_DRV_DS1305=m -CONFIG_RTC_DRV_DS1343=m -CONFIG_RTC_DRV_DS1347=m -CONFIG_RTC_DRV_DS1390=m -# CONFIG_RTC_DRV_MAX6916 is not set -CONFIG_RTC_DRV_R9701=m -CONFIG_RTC_DRV_RX4581=m -CONFIG_RTC_DRV_RS5C348=m -CONFIG_RTC_DRV_MAX6902=m -CONFIG_RTC_DRV_PCF2123=m -CONFIG_RTC_DRV_MCP795=m -CONFIG_RTC_I2C_AND_SPI=y - -# -# SPI and I2C RTC drivers -# -CONFIG_RTC_DRV_DS3232=m -CONFIG_RTC_DRV_DS3232_HWMON=y -CONFIG_RTC_DRV_PCF2127=m -CONFIG_RTC_DRV_RV3029C2=m -# CONFIG_RTC_DRV_RV3029_HWMON is not set -# CONFIG_RTC_DRV_RX6110 is not set - -# -# Platform RTC drivers -# -CONFIG_RTC_DRV_DS1286=m -CONFIG_RTC_DRV_DS1511=m -CONFIG_RTC_DRV_DS1553=m -CONFIG_RTC_DRV_DS1685_FAMILY=m -CONFIG_RTC_DRV_DS1685=y -# CONFIG_RTC_DRV_DS1689 is not set -# CONFIG_RTC_DRV_DS17285 is not set -# CONFIG_RTC_DRV_DS17485 is not set -# CONFIG_RTC_DRV_DS17885 is not set -CONFIG_RTC_DRV_DS1742=m -CONFIG_RTC_DRV_DS2404=m -CONFIG_RTC_DRV_EFI=y -CONFIG_RTC_DRV_STK17TA8=m -# CONFIG_RTC_DRV_M48T86 is not set -CONFIG_RTC_DRV_M48T35=m -CONFIG_RTC_DRV_M48T59=m -CONFIG_RTC_DRV_MSM6242=m -CONFIG_RTC_DRV_RP5C01=m -# CONFIG_RTC_DRV_ZYNQMP is not set - -# -# on-CPU RTC drivers -# -# CONFIG_RTC_DRV_PL030 is not set -CONFIG_RTC_DRV_PL031=y -# CONFIG_RTC_DRV_CADENCE is not set -# CONFIG_RTC_DRV_FTRTC010 is not set -# CONFIG_RTC_DRV_XGENE is not set -# CONFIG_RTC_DRV_R7301 is not set - -# -# HID Sensor RTC drivers -# -# CONFIG_RTC_DRV_GOLDFISH is not set -CONFIG_DMADEVICES=y -CONFIG_DMADEVICES_DEBUG=y -CONFIG_DMADEVICES_VDEBUG=y - -# -# DMA Devices -# -CONFIG_DMA_ENGINE=y -CONFIG_DMA_ACPI=y -CONFIG_DMA_OF=y -# CONFIG_ALTERA_MSGDMA is not set -# CONFIG_AMBA_PL08X is not set -# CONFIG_BCM_SBA_RAID is not set -# CONFIG_DW_AXI_DMAC is not set -# CONFIG_FSL_EDMA is not set -# CONFIG_FSL_QDMA is not set -# CONFIG_HISI_DMA is not set -# CONFIG_INTEL_IDMA64 is not set -# CONFIG_K3_DMA is not set -# CONFIG_MV_XOR_V2 is not set -# CONFIG_PL330_DMA is not set -# CONFIG_PLX_DMA is not set -# CONFIG_XGENE_DMA is not set -# CONFIG_XILINX_DMA is not set -# CONFIG_XILINX_XDMA is not set -# CONFIG_XILINX_ZYNQMP_DMA is not set -# CONFIG_XILINX_ZYNQMP_DPDMA is not set -# CONFIG_QCOM_BAM_DMA is not set -# CONFIG_QCOM_GPI_DMA is not set -CONFIG_QCOM_HIDMA_MGMT=m -CONFIG_QCOM_HIDMA=m -CONFIG_DW_DMAC_CORE=m -CONFIG_DW_DMAC=m -CONFIG_DW_DMAC_PCI=m -# CONFIG_DW_EDMA is not set -# CONFIG_SF_PDMA is not set - -# -# DMA Clients -# -CONFIG_ASYNC_TX_DMA=y -CONFIG_DMATEST=m -CONFIG_DMA_ENGINE_RAID=y - -# -# DMABUF options -# -CONFIG_SYNC_FILE=y -# CONFIG_SW_SYNC is not set -# CONFIG_UDMABUF is not set -# CONFIG_DMABUF_MOVE_NOTIFY is not set -CONFIG_DMABUF_DEBUG=y -# CONFIG_DMABUF_SELFTESTS is not set -# CONFIG_DMABUF_HEAPS is not set -# CONFIG_DMABUF_SYSFS_STATS is not set -# end of DMABUF options - -CONFIG_UIO=m -CONFIG_UIO_CIF=m -CONFIG_UIO_PDRV_GENIRQ=m -# CONFIG_UIO_DMEM_GENIRQ is not set -CONFIG_UIO_AEC=m -CONFIG_UIO_SERCOS3=m -CONFIG_UIO_PCI_GENERIC=m -# CONFIG_UIO_NETX is not set -# CONFIG_UIO_PRUSS is not set -# CONFIG_UIO_MF624 is not set -CONFIG_VFIO=m -CONFIG_VFIO_GROUP=y -CONFIG_VFIO_CONTAINER=y -CONFIG_VFIO_IOMMU_TYPE1=m -CONFIG_VFIO_NOIOMMU=y -CONFIG_VFIO_VIRQFD=y - -# -# VFIO support for PCI devices -# -CONFIG_VFIO_PCI_CORE=m -CONFIG_VFIO_PCI_MMAP=y -CONFIG_VFIO_PCI_INTX=y -CONFIG_VFIO_PCI=m -# CONFIG_MLX5_VFIO_PCI is not set -# CONFIG_HISI_ACC_VFIO_PCI is not set -# end of VFIO support for PCI devices - -# -# VFIO support for platform devices -# -CONFIG_VFIO_PLATFORM_BASE=m -CONFIG_VFIO_PLATFORM=m -# CONFIG_VFIO_AMBA is not set - -# -# VFIO platform reset drivers -# -# CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET is not set -# CONFIG_VFIO_PLATFORM_AMDXGBE_RESET is not set -# end of VFIO platform reset drivers -# end of VFIO support for platform devices - -# CONFIG_VIRT_DRIVERS is not set -CONFIG_VIRTIO_ANCHOR=y -CONFIG_VIRTIO=m -CONFIG_VIRTIO_PCI_LIB=m -CONFIG_VIRTIO_PCI_LIB_LEGACY=m -CONFIG_VIRTIO_MENU=y -CONFIG_VIRTIO_PCI=m -CONFIG_VIRTIO_PCI_LEGACY=y -CONFIG_VIRTIO_PMEM=m -CONFIG_VIRTIO_BALLOON=m -CONFIG_VIRTIO_MEM=m -CONFIG_VIRTIO_INPUT=m -CONFIG_VIRTIO_MMIO=m -# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set -CONFIG_VIRTIO_DMA_SHARED_BUFFER=m -# CONFIG_VDPA is not set -CONFIG_VHOST_IOTLB=m -CONFIG_VHOST_TASK=y -CONFIG_VHOST=m -CONFIG_VHOST_MENU=y -CONFIG_VHOST_NET=m -CONFIG_VHOST_SCSI=m -CONFIG_VHOST_VSOCK=m -# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set - -# -# Microsoft Hyper-V guest support -# -# CONFIG_HYPERV is not set -# end of Microsoft Hyper-V guest support - -# CONFIG_GREYBUS is not set -# CONFIG_COMEDI is not set -CONFIG_STAGING=y -# CONFIG_RTS5208 is not set -# CONFIG_VT6655 is not set -# CONFIG_FB_SM750 is not set -# CONFIG_STAGING_MEDIA is not set -# CONFIG_STAGING_BOARD is not set -# CONFIG_LTE_GDM724X is not set -# CONFIG_FB_TFT is not set -# CONFIG_KS7010 is not set -# CONFIG_PI433 is not set -# CONFIG_XIL_AXIS_FIFO is not set -# CONFIG_FIELDBUS_DEV is not set -# CONFIG_QLGE is not set -# CONFIG_VME_BUS is not set -# CONFIG_GOLDFISH is not set -CONFIG_CHROME_PLATFORMS=y -# CONFIG_CHROMEOS_ACPI is not set -# CONFIG_CHROMEOS_TBMC is not set -# CONFIG_CROS_EC is not set -# CONFIG_CROS_KBD_LED_BACKLIGHT is not set -# CONFIG_CROS_HPS_I2C is not set -# CONFIG_CHROMEOS_PRIVACY_SCREEN is not set -# CONFIG_MELLANOX_PLATFORM is not set -CONFIG_ARM_CPU_RESCTRL=y -CONFIG_SURFACE_PLATFORMS=y -# CONFIG_SURFACE_3_POWER_OPREGION is not set -# CONFIG_SURFACE_GPE is not set -# CONFIG_SURFACE_HOTPLUG is not set -# CONFIG_SURFACE_PRO3_BUTTON is not set -CONFIG_HAVE_CLK=y -CONFIG_HAVE_CLK_PREPARE=y -CONFIG_COMMON_CLK=y - -# -# Clock driver for ARM Reference designs -# -# CONFIG_CLK_ICST is not set -CONFIG_CLK_SP810=y -CONFIG_CLK_VEXPRESS_OSC=y -# end of Clock driver for ARM Reference designs - -# CONFIG_LMK04832 is not set -# CONFIG_COMMON_CLK_MAX9485 is not set -CONFIG_COMMON_CLK_SCPI=m -# CONFIG_COMMON_CLK_SI5341 is not set -# CONFIG_COMMON_CLK_SI5351 is not set -# CONFIG_COMMON_CLK_SI514 is not set -# CONFIG_COMMON_CLK_SI544 is not set -# CONFIG_COMMON_CLK_SI570 is not set -# CONFIG_COMMON_CLK_CDCE706 is not set -# CONFIG_COMMON_CLK_CDCE925 is not set -# CONFIG_COMMON_CLK_CS2000_CP is not set -# CONFIG_COMMON_CLK_AXI_CLKGEN is not set -CONFIG_COMMON_CLK_XGENE=y -# CONFIG_COMMON_CLK_PWM is not set -# CONFIG_COMMON_CLK_RS9_PCIE is not set -# CONFIG_COMMON_CLK_SI521XX is not set -# CONFIG_COMMON_CLK_VC3 is not set -# CONFIG_COMMON_CLK_VC5 is not set -# CONFIG_COMMON_CLK_VC7 is not set -# CONFIG_COMMON_CLK_FIXED_MMIO is not set -CONFIG_COMMON_CLK_HI3516CV300=y -CONFIG_COMMON_CLK_HI3519=y -CONFIG_COMMON_CLK_HI3559A=y -CONFIG_COMMON_CLK_HI3660=y -CONFIG_COMMON_CLK_HI3670=y -CONFIG_COMMON_CLK_HI3798CV200=y -# CONFIG_COMMON_CLK_HI6220 is not set -CONFIG_RESET_HISI=y -CONFIG_STUB_CLK_HI3660=y -# CONFIG_COMMON_CLK_QCOM is not set -# CONFIG_XILINX_VCU is not set -# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set -CONFIG_HWSPINLOCK=y -# CONFIG_HWSPINLOCK_QCOM is not set - -# -# Clock Source drivers -# -CONFIG_TIMER_OF=y -CONFIG_TIMER_ACPI=y -CONFIG_TIMER_PROBE=y -CONFIG_CLKSRC_MMIO=y -CONFIG_ARM_ARCH_TIMER=y -CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y -CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND=y -CONFIG_FSL_ERRATUM_A008585=y -CONFIG_HISILICON_ERRATUM_161010101=y -CONFIG_ARM64_ERRATUM_858921=y -CONFIG_ARM_TIMER_SP804=y -# end of Clock Source drivers - -CONFIG_MAILBOX=y -CONFIG_ARM_MHU=m -# CONFIG_ARM_MHU_V2 is not set -# CONFIG_PLATFORM_MHU is not set -# CONFIG_PL320_MBOX is not set -CONFIG_PCC=y -# CONFIG_ALTERA_MBOX is not set -CONFIG_HI3660_MBOX=y -CONFIG_HI6220_MBOX=y -CONFIG_MAILBOX_TEST=m -# CONFIG_QCOM_APCS_IPC is not set -CONFIG_XGENE_SLIMPRO_MBOX=m -# CONFIG_QCOM_IPCC is not set -CONFIG_IOMMU_IOVA=y -CONFIG_IOMMU_API=y -CONFIG_IOMMU_SUPPORT=y - -# -# Generic IOMMU Pagetable Support -# -CONFIG_IOMMU_IO_PGTABLE=y -CONFIG_IOMMU_IO_PGTABLE_LPAE=y -# CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set -# CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set -# CONFIG_IOMMU_IO_PGTABLE_DART is not set -# end of Generic IOMMU Pagetable Support - -# CONFIG_IOMMU_DEBUGFS is not set -CONFIG_IOMMU_DEFAULT_DMA_STRICT=y -# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set -# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set -CONFIG_OF_IOMMU=y -CONFIG_IOMMU_DMA=y -CONFIG_IOMMU_SVA=y -# CONFIG_IOMMUFD is not set -CONFIG_ARM_SMMU=y -# CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS is not set -CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT=y -CONFIG_ARM_SMMU_QCOM=y -# CONFIG_ARM_SMMU_QCOM_DEBUG is not set -CONFIG_ARM_SMMU_V3=y -CONFIG_ARM_SMMU_V3_SVA=y -# CONFIG_QCOM_IOMMU is not set -# CONFIG_VIRTIO_IOMMU is not set - -# -# Remoteproc drivers -# -# CONFIG_REMOTEPROC is not set -# end of Remoteproc drivers - -# -# Rpmsg drivers -# -# CONFIG_RPMSG_QCOM_GLINK_RPM is not set -# CONFIG_RPMSG_VIRTIO is not set -# end of Rpmsg drivers - -# CONFIG_SOUNDWIRE is not set - -# -# SOC (System On Chip) specific Drivers -# - -# -# Amlogic SoC drivers -# -# end of Amlogic SoC drivers - -# -# Broadcom SoC drivers -# -# CONFIG_SOC_BRCMSTB is not set -# end of Broadcom SoC drivers - -# -# NXP/Freescale QorIQ SoC drivers -# -# CONFIG_QUICC_ENGINE is not set -# CONFIG_FSL_RCPM is not set -# end of NXP/Freescale QorIQ SoC drivers - -# -# fujitsu SoC drivers -# -# CONFIG_A64FX_DIAG is not set -# end of fujitsu SoC drivers - -# -# Hisilicon SoC drivers -# -CONFIG_KUNPENG_HCCS=m -# end of Hisilicon SoC drivers - -# -# i.MX SoC drivers -# -# end of i.MX SoC drivers - -# -# Enable LiteX SoC Builder specific drivers -# -# CONFIG_LITEX_SOC_CONTROLLER is not set -# end of Enable LiteX SoC Builder specific drivers - -# CONFIG_WPCM450_SOC is not set - -# -# Qualcomm SoC drivers -# -# CONFIG_QCOM_AOSS_QMP is not set -# CONFIG_QCOM_COMMAND_DB is not set -# CONFIG_QCOM_CPR is not set -# CONFIG_QCOM_GENI_SE is not set -# CONFIG_QCOM_GSBI is not set -# CONFIG_QCOM_LLCC is not set -CONFIG_QCOM_KRYO_L2_ACCESSORS=y -# CONFIG_QCOM_OCMEM is not set -# CONFIG_QCOM_RAMP_CTRL is not set -# CONFIG_QCOM_RMTFS_MEM is not set -# CONFIG_QCOM_RPM_MASTER_STATS is not set -# CONFIG_QCOM_RPMH is not set -# CONFIG_QCOM_SMEM is not set -# CONFIG_QCOM_SPM is not set -# CONFIG_QCOM_ICC_BWMON is not set -# end of Qualcomm SoC drivers - -# CONFIG_SOC_TI is not set - -# -# Xilinx SoC drivers -# -# end of Xilinx SoC drivers -# end of SOC (System On Chip) specific Drivers - -# CONFIG_PM_DEVFREQ is not set -CONFIG_EXTCON=y - -# -# Extcon Device Drivers -# -# CONFIG_EXTCON_FSA9480 is not set -CONFIG_EXTCON_GPIO=m -# CONFIG_EXTCON_MAX3355 is not set -# CONFIG_EXTCON_PTN5150 is not set -# CONFIG_EXTCON_QCOM_SPMI_MISC is not set -# CONFIG_EXTCON_RT8973A is not set -# CONFIG_EXTCON_SM5502 is not set -# CONFIG_EXTCON_USB_GPIO is not set -# CONFIG_EXTCON_USBC_TUSB320 is not set -# CONFIG_MEMORY is not set -# CONFIG_IIO is not set -CONFIG_NTB=m -# CONFIG_NTB_MSI is not set -# CONFIG_NTB_IDT is not set -# CONFIG_NTB_EPF is not set -# CONFIG_NTB_SWITCHTEC is not set -# CONFIG_NTB_PINGPONG is not set -# CONFIG_NTB_TOOL is not set -# CONFIG_NTB_PERF is not set -# CONFIG_NTB_TRANSPORT is not set -CONFIG_PWM=y -CONFIG_PWM_SYSFS=y -# CONFIG_PWM_DEBUG is not set -# CONFIG_PWM_ATMEL_TCB is not set -# CONFIG_PWM_CLK is not set -# CONFIG_PWM_DWC is not set -# CONFIG_PWM_FSL_FTM is not set -# CONFIG_PWM_HIBVT is not set -# CONFIG_PWM_PCA9685 is not set -# CONFIG_PWM_XILINX is not set - -# -# IRQ chip support -# -CONFIG_IRQCHIP=y -CONFIG_ARM_GIC=y -CONFIG_ARM_GIC_MAX_NR=1 -CONFIG_ARM_GIC_V2M=y -CONFIG_ARM_GIC_V3=y -CONFIG_ARM_GIC_V3_ITS=y -CONFIG_ARM_GIC_V3_ITS_PCI=y -CONFIG_ARM_GIC_PHYTIUM_2500=y -# CONFIG_AL_FIC is not set -CONFIG_HISILICON_IRQ_MBIGEN=y -# CONFIG_XILINX_INTC is not set -CONFIG_PARTITION_PERCPU=y -CONFIG_QCOM_IRQ_COMBINER=y -# CONFIG_QCOM_PDC is not set -# CONFIG_QCOM_MPM is not set -# end of IRQ chip support - -# CONFIG_IPACK_BUS is not set -CONFIG_RESET_CONTROLLER=y -# CONFIG_RESET_QCOM_AOSS is not set -# CONFIG_RESET_QCOM_PDC is not set -# CONFIG_RESET_TI_SYSCON is not set -# CONFIG_RESET_TI_TPS380X is not set -# CONFIG_COMMON_RESET_HI3660 is not set -CONFIG_COMMON_RESET_HI6220=m - -# -# PHY Subsystem -# -CONFIG_GENERIC_PHY=y -CONFIG_PHY_XGENE=y -# CONFIG_PHY_CAN_TRANSCEIVER is not set - -# -# PHY drivers for Broadcom platforms -# -# CONFIG_BCM_KONA_USB2_PHY is not set -# end of PHY drivers for Broadcom platforms - -# CONFIG_PHY_CADENCE_TORRENT is not set -# CONFIG_PHY_CADENCE_DPHY is not set -# CONFIG_PHY_CADENCE_DPHY_RX is not set -# CONFIG_PHY_CADENCE_SIERRA is not set -# CONFIG_PHY_CADENCE_SALVO is not set -CONFIG_PHY_HI6220_USB=m -# CONFIG_PHY_HI3660_USB is not set -# CONFIG_PHY_HI3670_USB is not set -# CONFIG_PHY_HI3670_PCIE is not set -# CONFIG_PHY_HISTB_COMBPHY is not set -# CONFIG_PHY_HISI_INNO_USB2 is not set -# CONFIG_PHY_PXA_28NM_HSIC is not set -# CONFIG_PHY_PXA_28NM_USB2 is not set -# CONFIG_PHY_LAN966X_SERDES is not set -# CONFIG_PHY_MAPPHONE_MDM6600 is not set -# CONFIG_PHY_OCELOT_SERDES is not set -# CONFIG_PHY_QCOM_APQ8064_SATA is not set -# CONFIG_PHY_QCOM_EDP is not set -# CONFIG_PHY_QCOM_IPQ4019_USB is not set -# CONFIG_PHY_QCOM_IPQ806X_SATA is not set -# CONFIG_PHY_QCOM_PCIE2 is not set -# CONFIG_PHY_QCOM_QMP is not set -# CONFIG_PHY_QCOM_QUSB2 is not set -# CONFIG_PHY_QCOM_SNPS_EUSB2 is not set -# CONFIG_PHY_QCOM_EUSB2_REPEATER is not set -# CONFIG_PHY_QCOM_M31_USB is not set -# CONFIG_PHY_QCOM_USB_HS is not set -# CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2 is not set -# CONFIG_PHY_QCOM_USB_HSIC is not set -# CONFIG_PHY_QCOM_USB_HS_28NM is not set -# CONFIG_PHY_QCOM_USB_SS is not set -# CONFIG_PHY_QCOM_IPQ806X_USB is not set -# CONFIG_PHY_QCOM_SGMII_ETH is not set -# CONFIG_PHY_TUSB1210 is not set -# end of PHY Subsystem - -# CONFIG_POWERCAP is not set -# CONFIG_MCB is not set - -# -# Performance monitor support -# -# CONFIG_ARM_CCI_PMU is not set -CONFIG_ARM_CCN=y -CONFIG_ARM_CMN=y -CONFIG_ARM_PMU=y -CONFIG_ARM_PMU_ACPI=y -CONFIG_ARM_SMMU_V3_PMU=m -CONFIG_ARM_PMUV3=y -CONFIG_ARM_DSU_PMU=y -CONFIG_QCOM_L2_PMU=y -CONFIG_QCOM_L3_PMU=y -CONFIG_THUNDERX2_PMU=m -CONFIG_XGENE_PMU=y -CONFIG_ARM_SPE_PMU=m -# CONFIG_ARM_DMC620_PMU is not set -# CONFIG_MARVELL_CN10K_TAD_PMU is not set -CONFIG_ALIBABA_UNCORE_DRW_PMU=m -CONFIG_HISI_PMU=m -CONFIG_HISI_PCIE_PMU=m -# CONFIG_HNS3_PMU is not set -# CONFIG_MARVELL_CN10K_DDR_PMU is not set -CONFIG_DWC_PCIE_PMU=m -# CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU is not set -# end of Performance monitor support - -CONFIG_RAS=y -# CONFIG_USB4 is not set - -# -# Android -# -# CONFIG_ANDROID_BINDER_IPC is not set -# end of Android - -CONFIG_LIBNVDIMM=m -CONFIG_BLK_DEV_PMEM=m -CONFIG_ND_CLAIM=y -CONFIG_ND_BTT=m -CONFIG_BTT=y -CONFIG_ND_PFN=m -CONFIG_NVDIMM_PFN=y -CONFIG_NVDIMM_DAX=y -CONFIG_OF_PMEM=m -CONFIG_NVDIMM_KEYS=y -# CONFIG_NVDIMM_SECURITY_TEST is not set -CONFIG_DAX=y -CONFIG_DEV_DAX=m -CONFIG_DEV_DAX_PMEM=m -CONFIG_DEV_DAX_HMEM=m -CONFIG_DEV_DAX_CXL=m -CONFIG_DEV_DAX_HMEM_DEVICES=y -# CONFIG_DEV_DAX_KMEM is not set -CONFIG_NVMEM=y -CONFIG_NVMEM_SYSFS=y - -# -# Layout Types -# -# CONFIG_NVMEM_LAYOUT_SL28_VPD is not set -# CONFIG_NVMEM_LAYOUT_ONIE_TLV is not set -# end of Layout Types - -# CONFIG_NVMEM_QCOM_QFPROM is not set -# CONFIG_NVMEM_QCOM_SEC_QFPROM is not set -# CONFIG_NVMEM_RMEM is not set -# CONFIG_NVMEM_U_BOOT_ENV is not set - -# -# HW tracing support -# -CONFIG_STM=m -# CONFIG_STM_PROTO_BASIC is not set -# CONFIG_STM_PROTO_SYS_T is not set -# CONFIG_STM_DUMMY is not set -# CONFIG_STM_SOURCE_CONSOLE is not set -# CONFIG_STM_SOURCE_HEARTBEAT is not set -# CONFIG_STM_SOURCE_FTRACE is not set -# CONFIG_INTEL_TH is not set -# CONFIG_HISI_PTT is not set -# end of HW tracing support - -# CONFIG_FPGA is not set -# CONFIG_FSI is not set -CONFIG_TEE=m -# CONFIG_OPTEE is not set -# CONFIG_SIOX is not set -# CONFIG_SLIMBUS is not set -# CONFIG_INTERCONNECT is not set -# CONFIG_COUNTER is not set -# CONFIG_MOST is not set -# CONFIG_PECI is not set -# CONFIG_HTE is not set -# CONFIG_CDX_BUS is not set -# end of Device Drivers - -# -# File systems -# -CONFIG_DCACHE_WORD_ACCESS=y -# CONFIG_VALIDATE_FS_PARSER is not set -CONFIG_FS_IOMAP=y -CONFIG_BUFFER_HEAD=y -CONFIG_LEGACY_DIRECT_IO=y -# CONFIG_EXT2_FS is not set -CONFIG_EXT3_FS=m -# CONFIG_EXT3_FS_POSIX_ACL is not set -# CONFIG_EXT3_FS_SECURITY is not set -CONFIG_EXT4_FS=m -CONFIG_EXT4_USE_FOR_EXT2=y -CONFIG_EXT4_FS_POSIX_ACL=y -CONFIG_EXT4_FS_SECURITY=y -CONFIG_EXT4_DEBUG=y -CONFIG_JBD2=m -# CONFIG_JBD2_DEBUG is not set -CONFIG_FS_MBCACHE=m -# CONFIG_REISERFS_FS is not set -# CONFIG_JFS_FS is not set -CONFIG_XFS_FS=m -CONFIG_XFS_SUPPORT_V4=y -CONFIG_XFS_SUPPORT_ASCII_CI=y -CONFIG_XFS_QUOTA=y -CONFIG_XFS_POSIX_ACL=y -# CONFIG_XFS_RT is not set -# CONFIG_XFS_ONLINE_SCRUB is not set -CONFIG_XFS_WARN=y -# CONFIG_XFS_DEBUG is not set -# CONFIG_GFS2_FS is not set -# CONFIG_OCFS2_FS is not set -CONFIG_BTRFS_FS=m -# CONFIG_BTRFS_FS_POSIX_ACL is not set -# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set -# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set -# CONFIG_BTRFS_DEBUG is not set -# CONFIG_BTRFS_ASSERT is not set -# CONFIG_BTRFS_FS_REF_VERIFY is not set -# CONFIG_NILFS2_FS is not set -# CONFIG_F2FS_FS is not set -# CONFIG_ZONEFS_FS is not set -CONFIG_FS_DAX=y -CONFIG_FS_DAX_PMD=y -CONFIG_FS_POSIX_ACL=y -CONFIG_EXPORTFS=y -CONFIG_EXPORTFS_BLOCK_OPS=y -CONFIG_FILE_LOCKING=y -# CONFIG_FS_ENCRYPTION is not set -# CONFIG_FS_VERITY is not set -CONFIG_FSNOTIFY=y -CONFIG_DNOTIFY=y -CONFIG_INOTIFY_USER=y -CONFIG_FANOTIFY=y -CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y -CONFIG_QUOTA=y -CONFIG_QUOTA_NETLINK_INTERFACE=y -CONFIG_QUOTA_DEBUG=y -CONFIG_QUOTA_TREE=y -# CONFIG_QFMT_V1 is not set -CONFIG_QFMT_V2=y -CONFIG_QUOTACTL=y -CONFIG_AUTOFS_FS=y -CONFIG_FUSE_FS=m -CONFIG_CUSE=m -CONFIG_VIRTIO_FS=m -CONFIG_FUSE_DAX=y -CONFIG_VIRT_FUSE=m -CONFIG_OVERLAY_FS=m -CONFIG_OVERLAY_FS_REDIRECT_DIR=y -CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y -CONFIG_OVERLAY_FS_INDEX=y -# CONFIG_OVERLAY_FS_NFS_EXPORT is not set -# CONFIG_OVERLAY_FS_XINO_AUTO is not set -# CONFIG_OVERLAY_FS_METACOPY is not set -# CONFIG_OVERLAY_FS_DEBUG is not set - -# -# Caches -# -CONFIG_NETFS_SUPPORT=m -CONFIG_NETFS_STATS=y -CONFIG_FSCACHE=m -CONFIG_FSCACHE_STATS=y -# CONFIG_FSCACHE_DEBUG is not set -CONFIG_CACHEFILES=m -# CONFIG_CACHEFILES_DEBUG is not set -# CONFIG_CACHEFILES_ERROR_INJECTION is not set -CONFIG_CACHEFILES_ONDEMAND=y -# end of Caches - -# -# CD-ROM/DVD Filesystems -# -CONFIG_ISO9660_FS=m -CONFIG_JOLIET=y -CONFIG_ZISOFS=y -CONFIG_UDF_FS=m -# end of CD-ROM/DVD Filesystems - -# -# DOS/FAT/EXFAT/NT Filesystems -# -CONFIG_FAT_FS=m -CONFIG_MSDOS_FS=m -CONFIG_VFAT_FS=m -CONFIG_FAT_DEFAULT_CODEPAGE=437 -CONFIG_FAT_DEFAULT_IOCHARSET="ascii" -# CONFIG_FAT_DEFAULT_UTF8 is not set -# CONFIG_EXFAT_FS is not set -# CONFIG_NTFS_FS is not set -CONFIG_NTFS3_FS=m -# CONFIG_NTFS3_64BIT_CLUSTER is not set -# CONFIG_NTFS3_LZX_XPRESS is not set -# CONFIG_NTFS3_FS_POSIX_ACL is not set -# end of DOS/FAT/EXFAT/NT Filesystems - -# -# Pseudo filesystems -# -CONFIG_PROC_FS=y -CONFIG_PROC_KCORE=y -CONFIG_PROC_VMCORE=y -CONFIG_PROC_VMCORE_DEVICE_DUMP=y -CONFIG_PROC_SYSCTL=y -CONFIG_PROC_PAGE_MONITOR=y -CONFIG_PROC_CHILDREN=y -CONFIG_PROC_CPU_RESCTRL=y -CONFIG_KERNFS=y -CONFIG_SYSFS=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_TMPFS_XATTR=y -# CONFIG_TMPFS_INODE64 is not set -# CONFIG_TMPFS_QUOTA is not set -CONFIG_ARCH_SUPPORTS_HUGETLBFS=y -CONFIG_HUGETLBFS=y -CONFIG_HUGETLB_PAGE=y -CONFIG_ARCH_HAS_GIGANTIC_PAGE=y -CONFIG_CONFIGFS_FS=y -CONFIG_EFIVAR_FS=y -# end of Pseudo filesystems - -CONFIG_MISC_FILESYSTEMS=y -# CONFIG_ORANGEFS_FS is not set -# CONFIG_ADFS_FS is not set -# CONFIG_AFFS_FS is not set -# CONFIG_ECRYPT_FS is not set -# CONFIG_HFS_FS is not set -# CONFIG_HFSPLUS_FS is not set -# CONFIG_BEFS_FS is not set -# CONFIG_BFS_FS is not set -# CONFIG_EFS_FS is not set -# CONFIG_JFFS2_FS is not set -# CONFIG_UBIFS_FS is not set -CONFIG_CRAMFS=m -CONFIG_CRAMFS_BLOCKDEV=y -# CONFIG_CRAMFS_MTD is not set -CONFIG_SQUASHFS=m -# CONFIG_SQUASHFS_FILE_CACHE is not set -CONFIG_SQUASHFS_FILE_DIRECT=y -CONFIG_SQUASHFS_DECOMP_SINGLE=y -# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set -CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y -# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set -# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set -CONFIG_SQUASHFS_XATTR=y -CONFIG_SQUASHFS_ZLIB=y -CONFIG_SQUASHFS_LZ4=y -CONFIG_SQUASHFS_LZO=y -CONFIG_SQUASHFS_XZ=y -# CONFIG_SQUASHFS_ZSTD is not set -# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set -# CONFIG_SQUASHFS_EMBEDDED is not set -CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 -# CONFIG_VXFS_FS is not set -# CONFIG_MINIX_FS is not set -# CONFIG_OMFS_FS is not set -# CONFIG_HPFS_FS is not set -# CONFIG_QNX4FS_FS is not set -# CONFIG_QNX6FS_FS is not set -CONFIG_RESCTRL_FS=y -CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID=y -# CONFIG_ROMFS_FS is not set -CONFIG_PSTORE=y -CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 -CONFIG_PSTORE_COMPRESS=y -CONFIG_PSTORE_CONSOLE=y -# CONFIG_PSTORE_PMSG is not set -# CONFIG_PSTORE_FTRACE is not set -CONFIG_PSTORE_RAM=y -# CONFIG_PSTORE_BLK is not set -# CONFIG_SYSV_FS is not set -# CONFIG_UFS_FS is not set -CONFIG_EROFS_FS=m -CONFIG_EROFS_FS_DEBUG=y -CONFIG_EROFS_FS_XATTR=y -CONFIG_EROFS_FS_POSIX_ACL=y -CONFIG_EROFS_FS_SECURITY=y -CONFIG_EROFS_FS_ZIP=y -CONFIG_EROFS_FS_ZIP_LZMA=y -# CONFIG_EROFS_FS_ZIP_DEFLATE is not set -CONFIG_EROFS_FS_ONDEMAND=y -# CONFIG_EROFS_FS_PCPU_KTHREAD is not set -CONFIG_NETWORK_FILESYSTEMS=y -CONFIG_NFS_FS=m -# CONFIG_NFS_V2 is not set -CONFIG_NFS_V3=m -CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=m -# CONFIG_NFS_SWAP is not set -CONFIG_NFS_V4_1=y -CONFIG_NFS_V4_2=y -CONFIG_PNFS_FILE_LAYOUT=m -CONFIG_PNFS_BLOCK=m -CONFIG_PNFS_FLEXFILE_LAYOUT=m -CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" -# CONFIG_NFS_V4_1_MIGRATION is not set -CONFIG_NFS_V4_SECURITY_LABEL=y -CONFIG_NFS_FSCACHE=y -# CONFIG_NFS_USE_LEGACY_DNS is not set -CONFIG_NFS_USE_KERNEL_DNS=y -CONFIG_NFS_DEBUG=y -CONFIG_NFS_DISABLE_UDP_SUPPORT=y -# CONFIG_NFS_V4_2_READ_PLUS is not set -CONFIG_NFSD=m -# CONFIG_NFSD_V2 is not set -CONFIG_NFSD_V3_ACL=y -CONFIG_NFSD_V4=y -CONFIG_NFSD_PNFS=y -# CONFIG_NFSD_BLOCKLAYOUT is not set -CONFIG_NFSD_SCSILAYOUT=y -# CONFIG_NFSD_FLEXFILELAYOUT is not set -# CONFIG_NFSD_V4_2_INTER_SSC is not set -CONFIG_NFSD_V4_SECURITY_LABEL=y -CONFIG_GRACE_PERIOD=m -CONFIG_LOCKD=m -CONFIG_LOCKD_V4=y -CONFIG_NFS_ACL_SUPPORT=m -CONFIG_NFS_COMMON=y -CONFIG_NFS_V4_2_SSC_HELPER=y -CONFIG_SUNRPC=m -CONFIG_SUNRPC_GSS=m -CONFIG_SUNRPC_BACKCHANNEL=y -CONFIG_RPCSEC_GSS_KRB5=m -CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1=y -# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA is not set -# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 is not set -CONFIG_SUNRPC_DEBUG=y -CONFIG_SUNRPC_XPRT_RDMA=m -CONFIG_CEPH_FS=m -# CONFIG_CEPH_FSCACHE is not set -CONFIG_CEPH_FS_POSIX_ACL=y -# CONFIG_CEPH_FS_SECURITY_LABEL is not set -CONFIG_CIFS=m -# CONFIG_CIFS_STATS2 is not set -CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y -CONFIG_CIFS_UPCALL=y -CONFIG_CIFS_XATTR=y -CONFIG_CIFS_POSIX=y -CONFIG_CIFS_DEBUG=y -# CONFIG_CIFS_DEBUG2 is not set -# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set -CONFIG_CIFS_DFS_UPCALL=y -# CONFIG_CIFS_SWN_UPCALL is not set -# CONFIG_CIFS_SMB_DIRECT is not set -# CONFIG_CIFS_FSCACHE is not set -# CONFIG_SMB_SERVER is not set -CONFIG_SMBFS=m -# CONFIG_CODA_FS is not set -# CONFIG_AFS_FS is not set -CONFIG_NLS=y -CONFIG_NLS_DEFAULT="utf8" -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_CODEPAGE_737=m -CONFIG_NLS_CODEPAGE_775=m -CONFIG_NLS_CODEPAGE_850=m -CONFIG_NLS_CODEPAGE_852=m -CONFIG_NLS_CODEPAGE_855=m -CONFIG_NLS_CODEPAGE_857=m -CONFIG_NLS_CODEPAGE_860=m -CONFIG_NLS_CODEPAGE_861=m -CONFIG_NLS_CODEPAGE_862=m -CONFIG_NLS_CODEPAGE_863=m -CONFIG_NLS_CODEPAGE_864=m -CONFIG_NLS_CODEPAGE_865=m -CONFIG_NLS_CODEPAGE_866=m -CONFIG_NLS_CODEPAGE_869=m -CONFIG_NLS_CODEPAGE_936=m -CONFIG_NLS_CODEPAGE_950=m -CONFIG_NLS_CODEPAGE_932=m -CONFIG_NLS_CODEPAGE_949=m -CONFIG_NLS_CODEPAGE_874=m -CONFIG_NLS_ISO8859_8=m -CONFIG_NLS_CODEPAGE_1250=m -CONFIG_NLS_CODEPAGE_1251=m -CONFIG_NLS_ASCII=y -CONFIG_NLS_ISO8859_1=m -CONFIG_NLS_ISO8859_2=m -CONFIG_NLS_ISO8859_3=m -CONFIG_NLS_ISO8859_4=m -CONFIG_NLS_ISO8859_5=m -CONFIG_NLS_ISO8859_6=m -CONFIG_NLS_ISO8859_7=m -CONFIG_NLS_ISO8859_9=m -CONFIG_NLS_ISO8859_13=m -CONFIG_NLS_ISO8859_14=m -CONFIG_NLS_ISO8859_15=m -CONFIG_NLS_KOI8_R=m -CONFIG_NLS_KOI8_U=m -CONFIG_NLS_MAC_ROMAN=m -CONFIG_NLS_MAC_CELTIC=m -CONFIG_NLS_MAC_CENTEURO=m -CONFIG_NLS_MAC_CROATIAN=m -CONFIG_NLS_MAC_CYRILLIC=m -CONFIG_NLS_MAC_GAELIC=m -CONFIG_NLS_MAC_GREEK=m -CONFIG_NLS_MAC_ICELAND=m -CONFIG_NLS_MAC_INUIT=m -CONFIG_NLS_MAC_ROMANIAN=m -CONFIG_NLS_MAC_TURKISH=m -CONFIG_NLS_UTF8=m -CONFIG_NLS_UCS2_UTILS=m -CONFIG_DLM=m -# CONFIG_DLM_DEBUG is not set -# CONFIG_UNICODE is not set -CONFIG_IO_WQ=y -# end of File systems - -# -# Security options -# -CONFIG_KEYS=y -# CONFIG_KEYS_REQUEST_CACHE is not set -CONFIG_PERSISTENT_KEYRINGS=y -CONFIG_TRUSTED_KEYS=y -CONFIG_TRUSTED_KEYS_TPM=y -CONFIG_ENCRYPTED_KEYS=y -# CONFIG_USER_DECRYPTED_DATA is not set -# CONFIG_KEY_DH_OPERATIONS is not set -# CONFIG_SECURITY_DMESG_RESTRICT is not set -CONFIG_SECURITY=y -CONFIG_SECURITYFS=y -CONFIG_SECURITY_NETWORK=y -CONFIG_SECURITY_INFINIBAND=y -CONFIG_SECURITY_NETWORK_XFRM=y -CONFIG_SECURITY_PATH=y -CONFIG_LSM_MMAP_MIN_ADDR=65535 -CONFIG_HARDENED_USERCOPY=y -CONFIG_FORTIFY_SOURCE=y -# CONFIG_STATIC_USERMODEHELPER is not set -CONFIG_SECURITY_SELINUX=y -CONFIG_SECURITY_SELINUX_BOOTPARAM=y -CONFIG_SECURITY_SELINUX_DEVELOP=y -CONFIG_SECURITY_SELINUX_AVC_STATS=y -CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9 -CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256 -# CONFIG_SECURITY_SELINUX_DEBUG is not set -# CONFIG_SECURITY_SMACK is not set -# CONFIG_SECURITY_TOMOYO is not set -# CONFIG_SECURITY_APPARMOR is not set -# CONFIG_SECURITY_LOADPIN is not set -CONFIG_SECURITY_YAMA=y -# CONFIG_SECURITY_SAFESETID is not set -# CONFIG_SECURITY_LOCKDOWN_LSM is not set -# CONFIG_SECURITY_LANDLOCK is not set -CONFIG_INTEGRITY=y -CONFIG_INTEGRITY_SIGNATURE=y -CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y -CONFIG_INTEGRITY_TRUSTED_KEYRING=y -CONFIG_INTEGRITY_PLATFORM_KEYRING=y -# CONFIG_INTEGRITY_MACHINE_KEYRING is not set -CONFIG_LOAD_UEFI_KEYS=y -CONFIG_INTEGRITY_AUDIT=y -CONFIG_IMA=y -# CONFIG_IMA_KEXEC is not set -CONFIG_IMA_MEASURE_PCR_IDX=10 -CONFIG_IMA_LSM_RULES=y -# CONFIG_IMA_NG_TEMPLATE is not set -CONFIG_IMA_SIG_TEMPLATE=y -CONFIG_IMA_DEFAULT_TEMPLATE="ima-sig" -# CONFIG_IMA_DEFAULT_HASH_SHA1 is not set -CONFIG_IMA_DEFAULT_HASH_SHA256=y -# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set -# CONFIG_IMA_DEFAULT_HASH_SM3 is not set -CONFIG_IMA_DEFAULT_HASH="sha256" -CONFIG_IMA_WRITE_POLICY=y -CONFIG_IMA_READ_POLICY=y -CONFIG_IMA_APPRAISE=y -# CONFIG_IMA_ARCH_POLICY is not set -CONFIG_IMA_APPRAISE_BUILD_POLICY=y -# CONFIG_IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS is not set -# CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS is not set -# CONFIG_IMA_APPRAISE_REQUIRE_MODULE_SIGS is not set -# CONFIG_IMA_APPRAISE_REQUIRE_POLICY_SIGS is not set -CONFIG_IMA_APPRAISE_BOOTPARAM=y -# CONFIG_IMA_APPRAISE_MODSIG is not set -CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY=y -CONFIG_IMA_BLACKLIST_KEYRING=y -CONFIG_IMA_LOAD_X509=y -CONFIG_IMA_X509_PATH="/etc/keys/x509_ima.der" -# CONFIG_IMA_APPRAISE_SIGNED_INIT is not set -CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y -CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y -# CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT is not set -# CONFIG_IMA_DISABLE_HTABLE is not set -CONFIG_EVM=y -CONFIG_EVM_ATTR_FSUUID=y -# CONFIG_EVM_ADD_XATTRS is not set -CONFIG_EVM_LOAD_X509=y -CONFIG_EVM_X509_PATH="/etc/keys/x509_evm.der" -CONFIG_DEFAULT_SECURITY_SELINUX=y -# CONFIG_DEFAULT_SECURITY_DAC is not set -CONFIG_LSM="integrity,selinux,smack,tomoyo,apparmor" - -# -# Kernel hardening options -# - -# -# Memory initialization -# -CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y -CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y -CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y -CONFIG_INIT_STACK_NONE=y -# CONFIG_INIT_STACK_ALL_PATTERN is not set -# CONFIG_INIT_STACK_ALL_ZERO is not set -# CONFIG_GCC_PLUGIN_STACKLEAK is not set -# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set -# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set -CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y -# CONFIG_ZERO_CALL_USED_REGS is not set -# end of Memory initialization - -# -# Hardening of kernel data structures -# -CONFIG_LIST_HARDENED=y -CONFIG_BUG_ON_DATA_CORRUPTION=y -# end of Hardening of kernel data structures - -CONFIG_CC_HAS_RANDSTRUCT=y -CONFIG_RANDSTRUCT_NONE=y -# CONFIG_RANDSTRUCT_FULL is not set -# CONFIG_RANDSTRUCT_PERFORMANCE is not set -# end of Kernel hardening options -# end of Security options - -CONFIG_XOR_BLOCKS=m -CONFIG_ASYNC_CORE=m -CONFIG_ASYNC_MEMCPY=m -CONFIG_ASYNC_XOR=m -CONFIG_ASYNC_PQ=m -CONFIG_ASYNC_RAID6_RECOV=m -CONFIG_CRYPTO=y - -# -# Crypto core or helper -# -CONFIG_CRYPTO_FIPS=y -CONFIG_CRYPTO_FIPS_NAME="Linux Kernel Cryptographic API" -# CONFIG_CRYPTO_FIPS_CUSTOM_VERSION is not set -CONFIG_CRYPTO_ALGAPI=y -CONFIG_CRYPTO_ALGAPI2=y -CONFIG_CRYPTO_AEAD=y -CONFIG_CRYPTO_AEAD2=y -CONFIG_CRYPTO_SIG2=y -CONFIG_CRYPTO_SKCIPHER=y -CONFIG_CRYPTO_SKCIPHER2=y -CONFIG_CRYPTO_HASH=y -CONFIG_CRYPTO_HASH2=y -CONFIG_CRYPTO_RNG=y -CONFIG_CRYPTO_RNG2=y -CONFIG_CRYPTO_RNG_DEFAULT=m -CONFIG_CRYPTO_AKCIPHER2=y -CONFIG_CRYPTO_AKCIPHER=y -CONFIG_CRYPTO_KPP2=y -CONFIG_CRYPTO_KPP=m -CONFIG_CRYPTO_ACOMP2=y -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_MANAGER2=y -CONFIG_CRYPTO_USER=m -# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set -# CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is not set -CONFIG_CRYPTO_NULL=y -CONFIG_CRYPTO_NULL2=y -CONFIG_CRYPTO_PCRYPT=m -CONFIG_CRYPTO_CRYPTD=y -CONFIG_CRYPTO_AUTHENC=m -CONFIG_CRYPTO_TEST=m -# end of Crypto core or helper - -# -# Public-key cryptography -# -CONFIG_CRYPTO_RSA=y -CONFIG_CRYPTO_DH=m -# CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set -CONFIG_CRYPTO_ECC=m -CONFIG_CRYPTO_ECDH=m -# CONFIG_CRYPTO_ECDSA is not set -# CONFIG_CRYPTO_ECRDSA is not set -CONFIG_CRYPTO_SM2=y -CONFIG_CRYPTO_CURVE25519=m -# end of Public-key cryptography - -# -# Block ciphers -# -CONFIG_CRYPTO_AES=y -# CONFIG_CRYPTO_AES_TI is not set -CONFIG_CRYPTO_ANUBIS=m -# CONFIG_CRYPTO_ARIA is not set -CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_BLOWFISH_COMMON=m -CONFIG_CRYPTO_CAMELLIA=m -CONFIG_CRYPTO_CAST_COMMON=m -CONFIG_CRYPTO_CAST5=m -CONFIG_CRYPTO_CAST6=m -CONFIG_CRYPTO_DES=m -CONFIG_CRYPTO_FCRYPT=m -CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SEED=m -CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SM4_GENERIC=m -CONFIG_CRYPTO_TEA=m -CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_TWOFISH_COMMON=m -# end of Block ciphers - -# -# Length-preserving ciphers and modes -# -# CONFIG_CRYPTO_ADIANTUM is not set -CONFIG_CRYPTO_ARC4=m -CONFIG_CRYPTO_CHACHA20=m -CONFIG_CRYPTO_CBC=y -CONFIG_CRYPTO_CFB=y -CONFIG_CRYPTO_CTR=y -CONFIG_CRYPTO_CTS=y -CONFIG_CRYPTO_ECB=y -# CONFIG_CRYPTO_HCTR2 is not set -# CONFIG_CRYPTO_KEYWRAP is not set -CONFIG_CRYPTO_LRW=m -CONFIG_CRYPTO_OFB=y -CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_XTS=y -# end of Length-preserving ciphers and modes - -# -# AEAD (authenticated encryption with associated data) ciphers -# -# CONFIG_CRYPTO_AEGIS128 is not set -CONFIG_CRYPTO_CHACHA20POLY1305=m -CONFIG_CRYPTO_CCM=m -CONFIG_CRYPTO_GCM=y -CONFIG_CRYPTO_GENIV=m -CONFIG_CRYPTO_SEQIV=m -CONFIG_CRYPTO_ECHAINIV=m -CONFIG_CRYPTO_ESSIV=m -# end of AEAD (authenticated encryption with associated data) ciphers - -# -# Hashes, digests, and MACs -# -CONFIG_CRYPTO_BLAKE2B=m -CONFIG_CRYPTO_CMAC=m -CONFIG_CRYPTO_GHASH=y -CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_MD4=m -CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_POLY1305=m -CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_SHA1=y -CONFIG_CRYPTO_SHA256=y -CONFIG_CRYPTO_SHA512=y -CONFIG_CRYPTO_SHA3=y -CONFIG_CRYPTO_SM3=y -CONFIG_CRYPTO_SM3_GENERIC=y -# CONFIG_CRYPTO_STREEBOG is not set -CONFIG_CRYPTO_VMAC=m -CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_XXHASH=m -# end of Hashes, digests, and MACs - -# -# CRCs (cyclic redundancy checks) -# -CONFIG_CRYPTO_CRC32C=y -CONFIG_CRYPTO_CRC32=m -CONFIG_CRYPTO_CRCT10DIF=y -CONFIG_CRYPTO_CRC64_ROCKSOFT=y -# end of CRCs (cyclic redundancy checks) - -# -# Compression -# -CONFIG_CRYPTO_DEFLATE=y -CONFIG_CRYPTO_LZO=y -# CONFIG_CRYPTO_842 is not set -CONFIG_CRYPTO_LZ4=m -CONFIG_CRYPTO_LZ4HC=m -CONFIG_CRYPTO_ZSTD=m -# end of Compression - -# -# Random number generation -# -CONFIG_CRYPTO_ANSI_CPRNG=m -CONFIG_CRYPTO_DRBG_MENU=y -CONFIG_CRYPTO_DRBG_HMAC=y -CONFIG_CRYPTO_DRBG_HASH=y -CONFIG_CRYPTO_DRBG_CTR=y -CONFIG_CRYPTO_DRBG=y -CONFIG_CRYPTO_JITTERENTROPY=y -# CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE is not set -# end of Random number generation - -# -# Userspace interface -# -CONFIG_CRYPTO_USER_API=y -CONFIG_CRYPTO_USER_API_HASH=y -CONFIG_CRYPTO_USER_API_SKCIPHER=y -CONFIG_CRYPTO_USER_API_RNG=y -# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set -CONFIG_CRYPTO_USER_API_AEAD=y -CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y -# CONFIG_CRYPTO_STATS is not set -# end of Userspace interface - -CONFIG_CRYPTO_HASH_INFO=y -# CONFIG_CRYPTO_NHPOLY1305_NEON is not set -CONFIG_CRYPTO_CHACHA20_NEON=m - -# -# Accelerated Cryptographic Algorithms for CPU (arm64) -# -CONFIG_CRYPTO_GHASH_ARM64_CE=m -CONFIG_CRYPTO_POLY1305_NEON=m -CONFIG_CRYPTO_SHA1_ARM64_CE=m -CONFIG_CRYPTO_SHA256_ARM64=m -CONFIG_CRYPTO_SHA2_ARM64_CE=m -# CONFIG_CRYPTO_SHA512_ARM64 is not set -# CONFIG_CRYPTO_SHA512_ARM64_CE is not set -# CONFIG_CRYPTO_SHA3_ARM64 is not set -CONFIG_CRYPTO_SM3_NEON=m -CONFIG_CRYPTO_SM3_ARM64_CE=m -# CONFIG_CRYPTO_POLYVAL_ARM64_CE is not set -CONFIG_CRYPTO_AES_ARM64=y -CONFIG_CRYPTO_AES_ARM64_CE=y -CONFIG_CRYPTO_AES_ARM64_CE_BLK=y -CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y -CONFIG_CRYPTO_AES_ARM64_BS=m -CONFIG_CRYPTO_SM4_ARM64_CE=m -CONFIG_CRYPTO_SM4_ARM64_CE_BLK=m -CONFIG_CRYPTO_SM4_ARM64_NEON_BLK=m -CONFIG_CRYPTO_AES_ARM64_CE_CCM=y -CONFIG_CRYPTO_SM4_ARM64_CE_CCM=m -CONFIG_CRYPTO_SM4_ARM64_CE_GCM=m -CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m -# end of Accelerated Cryptographic Algorithms for CPU (arm64) - -CONFIG_CRYPTO_HW=y -# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set -# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set -# CONFIG_CRYPTO_DEV_CCP is not set -CONFIG_CRYPTO_DEV_CPT=m -CONFIG_CAVIUM_CPT=m -CONFIG_CRYPTO_DEV_NITROX=m -CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m -# CONFIG_CRYPTO_DEV_OCTEONTX_CPT is not set -# CONFIG_CRYPTO_DEV_QAT_DH895xCC is not set -# CONFIG_CRYPTO_DEV_QAT_C3XXX is not set -# CONFIG_CRYPTO_DEV_QAT_C62X is not set -# CONFIG_CRYPTO_DEV_QAT_4XXX is not set -# CONFIG_CRYPTO_DEV_QAT_420XX is not set -# CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set -# CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set -# CONFIG_CRYPTO_DEV_QAT_C62XVF is not set -CONFIG_CRYPTO_DEV_CAVIUM_ZIP=m -# CONFIG_CRYPTO_DEV_QCE is not set -# CONFIG_CRYPTO_DEV_QCOM_RNG is not set -CONFIG_CRYPTO_DEV_CHELSIO=m -# CONFIG_CRYPTO_DEV_VIRTIO is not set -# CONFIG_CRYPTO_DEV_SAFEXCEL is not set -# CONFIG_CRYPTO_DEV_CCREE is not set -CONFIG_CRYPTO_DEV_HISI_SEC=m -CONFIG_CRYPTO_DEV_HISI_SEC2=m -CONFIG_CRYPTO_DEV_HISI_QM=m -CONFIG_CRYPTO_DEV_HISI_ZIP=m -CONFIG_CRYPTO_DEV_HISI_HPRE=m -CONFIG_CRYPTO_DEV_HISI_TRNG=m -# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set -CONFIG_ASYMMETRIC_KEY_TYPE=y -CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y -CONFIG_X509_CERTIFICATE_PARSER=y -# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set -CONFIG_PKCS7_MESSAGE_PARSER=y -# CONFIG_PKCS7_TEST_KEY is not set -CONFIG_SIGNED_PE_FILE_VERIFICATION=y -# CONFIG_FIPS_SIGNATURE_SELFTEST is not set - -# -# Certificates for signature checking -# -CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" -CONFIG_MODULE_SIG_KEY_TYPE_RSA=y -# CONFIG_MODULE_SIG_KEY_TYPE_ECDSA is not set -CONFIG_SYSTEM_TRUSTED_KEYRING=y -CONFIG_SYSTEM_TRUSTED_KEYS="" -CONFIG_SYSTEM_EXTRA_CERTIFICATE=y -CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE=8192 -CONFIG_SECONDARY_TRUSTED_KEYRING=y -CONFIG_SYSTEM_BLACKLIST_KEYRING=y -CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" -# CONFIG_SYSTEM_REVOCATION_LIST is not set -# CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE is not set -# end of Certificates for signature checking - -CONFIG_BINARY_PRINTF=y - -# -# Library routines -# -CONFIG_RAID6_PQ=m -CONFIG_RAID6_PQ_BENCHMARK=y -CONFIG_LINEAR_RANGES=y -# CONFIG_PACKING is not set -CONFIG_BITREVERSE=y -CONFIG_HAVE_ARCH_BITREVERSE=y -CONFIG_GENERIC_STRNCPY_FROM_USER=y -CONFIG_GENERIC_STRNLEN_USER=y -CONFIG_GENERIC_NET_UTILS=y -CONFIG_CORDIC=m -# CONFIG_PRIME_NUMBERS is not set -CONFIG_RATIONAL=y -CONFIG_GENERIC_PCI_IOMAP=y -CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y -CONFIG_ARCH_HAS_FAST_MULTIPLIER=y -CONFIG_ARCH_USE_SYM_ANNOTATIONS=y -CONFIG_INDIRECT_PIO=y -# CONFIG_TRACE_MMIO_ACCESS is not set - -# -# Crypto library routines -# -CONFIG_CRYPTO_LIB_UTILS=y -CONFIG_CRYPTO_LIB_AES=y -CONFIG_CRYPTO_LIB_ARC4=m -CONFIG_CRYPTO_LIB_GF128MUL=y -CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y -CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA=m -CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m -CONFIG_CRYPTO_LIB_CHACHA=m -CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m -CONFIG_CRYPTO_LIB_CURVE25519=m -CONFIG_CRYPTO_LIB_DES=m -CONFIG_CRYPTO_LIB_POLY1305_RSIZE=9 -CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305=m -CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m -CONFIG_CRYPTO_LIB_POLY1305=m -CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m -CONFIG_CRYPTO_LIB_SHA1=y -CONFIG_CRYPTO_LIB_SHA256=y -# end of Crypto library routines - -CONFIG_CRC_CCITT=y -CONFIG_CRC16=y -CONFIG_CRC_T10DIF=y -CONFIG_CRC64_ROCKSOFT=y -CONFIG_CRC_ITU_T=m -CONFIG_CRC32=y -# CONFIG_CRC32_SELFTEST is not set -CONFIG_CRC32_SLICEBY8=y -# CONFIG_CRC32_SLICEBY4 is not set -# CONFIG_CRC32_SARWATE is not set -# CONFIG_CRC32_BIT is not set -CONFIG_CRC64=y -# CONFIG_CRC4 is not set -CONFIG_CRC7=m -CONFIG_LIBCRC32C=m -CONFIG_CRC8=m -CONFIG_XXHASH=y -CONFIG_AUDIT_GENERIC=y -CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y -CONFIG_AUDIT_COMPAT_GENERIC=y -# CONFIG_RANDOM32_SELFTEST is not set -CONFIG_ZLIB_INFLATE=y -CONFIG_ZLIB_DEFLATE=y -CONFIG_LZO_COMPRESS=y -CONFIG_LZO_DECOMPRESS=y -CONFIG_LZ4_COMPRESS=m -CONFIG_LZ4HC_COMPRESS=m -CONFIG_LZ4_DECOMPRESS=y -CONFIG_ZSTD_COMMON=y -CONFIG_ZSTD_COMPRESS=m -CONFIG_ZSTD_DECOMPRESS=y -CONFIG_XZ_DEC=y -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y -CONFIG_XZ_DEC_MICROLZMA=y -CONFIG_XZ_DEC_BCJ=y -# CONFIG_XZ_DEC_TEST is not set -CONFIG_DECOMPRESS_GZIP=y -CONFIG_DECOMPRESS_BZIP2=y -CONFIG_DECOMPRESS_LZMA=y -CONFIG_DECOMPRESS_XZ=y -CONFIG_DECOMPRESS_LZO=y -CONFIG_DECOMPRESS_LZ4=y -CONFIG_DECOMPRESS_ZSTD=y -CONFIG_GENERIC_ALLOCATOR=y -CONFIG_REED_SOLOMON=y -CONFIG_REED_SOLOMON_ENC8=y -CONFIG_REED_SOLOMON_DEC8=y -CONFIG_TEXTSEARCH=y -CONFIG_TEXTSEARCH_KMP=m -CONFIG_TEXTSEARCH_BM=m -CONFIG_TEXTSEARCH_FSM=m -CONFIG_BTREE=y -CONFIG_INTERVAL_TREE=y -CONFIG_XARRAY_MULTI=y -CONFIG_ASSOCIATIVE_ARRAY=y -CONFIG_HAS_IOMEM=y -CONFIG_HAS_IOPORT=y -CONFIG_HAS_IOPORT_MAP=y -CONFIG_HAS_DMA=y -CONFIG_DMA_OPS=y -CONFIG_NEED_SG_DMA_FLAGS=y -CONFIG_NEED_SG_DMA_LENGTH=y -CONFIG_NEED_DMA_MAP_STATE=y -CONFIG_ARCH_DMA_ADDR_T_64BIT=y -CONFIG_DMA_DECLARE_COHERENT=y -CONFIG_ARCH_HAS_SETUP_DMA_OPS=y -CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS=y -CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE=y -CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU=y -CONFIG_ARCH_HAS_DMA_PREP_COHERENT=y -CONFIG_SWIOTLB=y -# CONFIG_SWIOTLB_DYNAMIC is not set -CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC=y -# CONFIG_DMA_RESTRICTED_POOL is not set -CONFIG_DMA_NONCOHERENT_MMAP=y -CONFIG_DMA_COHERENT_POOL=y -CONFIG_DMA_DIRECT_REMAP=y -CONFIG_DMA_CMA=y -# CONFIG_DMA_NUMA_CMA is not set - -# -# Default contiguous memory area size: -# -CONFIG_CMA_SIZE_MBYTES=64 -CONFIG_CMA_SIZE_SEL_MBYTES=y -# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set -# CONFIG_CMA_SIZE_SEL_MIN is not set -# CONFIG_CMA_SIZE_SEL_MAX is not set -CONFIG_CMA_ALIGNMENT=8 -CONFIG_DMA_API_DEBUG=y -CONFIG_DMA_API_DEBUG_SG=y -# CONFIG_DMA_MAP_BENCHMARK is not set -CONFIG_SGL_ALLOC=y -CONFIG_CHECK_SIGNATURE=y -# CONFIG_CPUMASK_OFFSTACK is not set -CONFIG_CPU_RMAP=y -CONFIG_DQL=y -CONFIG_GLOB=y -# CONFIG_GLOB_SELFTEST is not set -CONFIG_NLATTR=y -CONFIG_CLZ_TAB=y -CONFIG_IRQ_POLL=y -CONFIG_MPILIB=y -CONFIG_SIGNATURE=y -CONFIG_DIMLIB=y -CONFIG_LIBFDT=y -CONFIG_OID_REGISTRY=y -CONFIG_UCS2_STRING=y -CONFIG_HAVE_GENERIC_VDSO=y -CONFIG_GENERIC_GETTIMEOFDAY=y -CONFIG_GENERIC_VDSO_TIME_NS=y -CONFIG_FONT_SUPPORT=y -# CONFIG_FONTS is not set -CONFIG_FONT_8x8=y -CONFIG_FONT_8x16=y -CONFIG_SG_SPLIT=y -CONFIG_SG_POOL=y -CONFIG_ARCH_HAS_PMEM_API=y -CONFIG_MEMREGION=y -CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y -CONFIG_ARCH_HAS_COPY_MC=y -CONFIG_ARCH_STACKWALK=y -CONFIG_STACKDEPOT=y -CONFIG_STACKDEPOT_ALWAYS_INIT=y -CONFIG_SBITMAP=y -CONFIG_PARMAN=m -CONFIG_OBJAGG=m -# end of Library routines - -CONFIG_GENERIC_IOREMAP=y -CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y -CONFIG_PLDMFW=y -CONFIG_ASN1_ENCODER=y - -# -# Kernel hacking -# - -# -# printk and dmesg options -# -CONFIG_PRINTK_TIME=y -# CONFIG_PRINTK_CALLER is not set -# CONFIG_STACKTRACE_BUILD_ID is not set -CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 -CONFIG_CONSOLE_LOGLEVEL_QUIET=4 -CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 -CONFIG_BOOT_PRINTK_DELAY=y -CONFIG_DYNAMIC_DEBUG=y -CONFIG_DYNAMIC_DEBUG_CORE=y -CONFIG_SYMBOLIC_ERRNAME=y -CONFIG_DEBUG_BUGVERBOSE=y -# end of printk and dmesg options - -CONFIG_DEBUG_KERNEL=y -CONFIG_DEBUG_MISC=y - -# -# Compile-time checks and compiler options -# -CONFIG_DEBUG_INFO=y -CONFIG_AS_HAS_NON_CONST_LEB128=y -# CONFIG_DEBUG_INFO_NONE is not set -CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y -# CONFIG_DEBUG_INFO_DWARF4 is not set -# CONFIG_DEBUG_INFO_DWARF5 is not set -# CONFIG_DEBUG_INFO_REDUCED is not set -CONFIG_DEBUG_INFO_COMPRESSED_NONE=y -# CONFIG_DEBUG_INFO_COMPRESSED_ZLIB is not set -# CONFIG_DEBUG_INFO_COMPRESSED_ZSTD is not set -# CONFIG_DEBUG_INFO_SPLIT is not set -CONFIG_DEBUG_INFO_BTF=y -# CONFIG_GDB_SCRIPTS is not set -CONFIG_FRAME_WARN=2048 -CONFIG_STRIP_ASM_SYMS=y -# CONFIG_READABLE_ASM is not set -# CONFIG_HEADERS_INSTALL is not set -CONFIG_DEBUG_SECTION_MISMATCH=y -CONFIG_SECTION_MISMATCH_WARN_ONLY=y -CONFIG_ARCH_WANT_FRAME_POINTERS=y -CONFIG_FRAME_POINTER=y -# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set -# end of Compile-time checks and compiler options - -# -# Generic Kernel Debugging Instruments -# -CONFIG_MAGIC_SYSRQ=y -CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 -CONFIG_MAGIC_SYSRQ_SERIAL=y -CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" -CONFIG_DEBUG_FS=y -CONFIG_DEBUG_FS_ALLOW_ALL=y -# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set -# CONFIG_DEBUG_FS_ALLOW_NONE is not set -CONFIG_HAVE_ARCH_KGDB=y -CONFIG_KGDB=y -CONFIG_KGDB_HONOUR_BLOCKLIST=y -CONFIG_KGDB_SERIAL_CONSOLE=y -CONFIG_KGDB_TESTS=y -# CONFIG_KGDB_TESTS_ON_BOOT is not set -CONFIG_KGDB_KDB=y -CONFIG_KDB_DEFAULT_ENABLE=0x0 -CONFIG_KDB_KEYBOARD=y -CONFIG_KDB_CONTINUE_CATASTROPHIC=0 -CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y -CONFIG_UBSAN=y -# CONFIG_UBSAN_TRAP is not set -CONFIG_CC_HAS_UBSAN_BOUNDS_STRICT=y -CONFIG_CC_HAS_UBSAN_ARRAY_BOUNDS=y -CONFIG_UBSAN_BOUNDS=y -CONFIG_UBSAN_BOUNDS_STRICT=y -CONFIG_UBSAN_ARRAY_BOUNDS=y -CONFIG_UBSAN_SHIFT=y -# CONFIG_UBSAN_DIV_ZERO is not set -# CONFIG_UBSAN_UNREACHABLE is not set -CONFIG_UBSAN_BOOL=y -CONFIG_UBSAN_ENUM=y -# CONFIG_UBSAN_ALIGNMENT is not set -CONFIG_UBSAN_SANITIZE_ALL=y -# CONFIG_TEST_UBSAN is not set -CONFIG_HAVE_KCSAN_COMPILER=y -# end of Generic Kernel Debugging Instruments - -# -# Networking Debugging -# -# CONFIG_NET_DEV_REFCNT_TRACKER is not set -# CONFIG_NET_NS_REFCNT_TRACKER is not set -# CONFIG_DEBUG_NET is not set -# end of Networking Debugging - -# -# Memory Debugging -# -CONFIG_PAGE_EXTENSION=y -CONFIG_DEBUG_PAGEALLOC=y -# CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT is not set -CONFIG_SLUB_DEBUG=y -# CONFIG_SLUB_DEBUG_ON is not set -# CONFIG_PAGE_OWNER is not set -# CONFIG_PAGE_TABLE_CHECK is not set -# CONFIG_PAGE_POISONING is not set -CONFIG_DEBUG_PAGE_REF=y -# CONFIG_DEBUG_RODATA_TEST is not set -CONFIG_ARCH_HAS_DEBUG_WX=y -# CONFIG_DEBUG_WX is not set -CONFIG_GENERIC_PTDUMP=y -# CONFIG_PTDUMP_DEBUGFS is not set -CONFIG_HAVE_DEBUG_KMEMLEAK=y -CONFIG_DEBUG_KMEMLEAK=y -CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE=16000 -CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y -CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN=y -# CONFIG_PER_VMA_LOCK_STATS is not set -CONFIG_DEBUG_OBJECTS=y -# CONFIG_DEBUG_OBJECTS_SELFTEST is not set -CONFIG_DEBUG_OBJECTS_FREE=y -CONFIG_DEBUG_OBJECTS_TIMERS=y -CONFIG_DEBUG_OBJECTS_WORK=y -CONFIG_DEBUG_OBJECTS_RCU_HEAD=y -CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y -CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1 -# CONFIG_SHRINKER_DEBUG is not set -CONFIG_DEBUG_STACK_USAGE=y -# CONFIG_SCHED_STACK_END_CHECK is not set -CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y -# CONFIG_DEBUG_VM is not set -# CONFIG_DEBUG_VM_PGTABLE is not set -CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y -# CONFIG_DEBUG_VIRTUAL is not set -CONFIG_DEBUG_MEMORY_INIT=y -CONFIG_DEBUG_PER_CPU_MAPS=y -CONFIG_HAVE_ARCH_KASAN=y -CONFIG_HAVE_ARCH_KASAN_SW_TAGS=y -CONFIG_HAVE_ARCH_KASAN_HW_TAGS=y -CONFIG_HAVE_ARCH_KASAN_VMALLOC=y -CONFIG_CC_HAS_KASAN_GENERIC=y -CONFIG_CC_HAS_KASAN_SW_TAGS=y -CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y -CONFIG_KASAN=y -CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX=y -CONFIG_KASAN_GENERIC=y -# CONFIG_KASAN_SW_TAGS is not set -# CONFIG_KASAN_HW_TAGS is not set -# CONFIG_KASAN_OUTLINE is not set -CONFIG_KASAN_INLINE=y -CONFIG_KASAN_STACK=y -CONFIG_KASAN_VMALLOC=y -# CONFIG_KASAN_MODULE_TEST is not set -CONFIG_HAVE_ARCH_KFENCE=y -CONFIG_KFENCE=y -CONFIG_KFENCE_SAMPLE_INTERVAL=100 -CONFIG_KFENCE_NUM_OBJECTS=255 -CONFIG_KFENCE_DEFERRABLE=y -CONFIG_KFENCE_STRESS_TEST_FAULTS=0 -# end of Memory Debugging - -CONFIG_DEBUG_SHIRQ=y - -# -# Debug Oops, Lockups and Hangs -# -CONFIG_PANIC_ON_OOPS=y -CONFIG_PANIC_ON_OOPS_VALUE=1 -CONFIG_PANIC_TIMEOUT=1 -CONFIG_LOCKUP_DETECTOR=y -CONFIG_SOFTLOCKUP_DETECTOR=y -# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set -CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y -CONFIG_SDEI_WATCHDOG=y -CONFIG_HARDLOCKUP_DETECTOR=y -# CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY is not set -CONFIG_HARDLOCKUP_DETECTOR_PERF=y -# CONFIG_HARDLOCKUP_DETECTOR_BUDDY is not set -# CONFIG_HARDLOCKUP_DETECTOR_ARCH is not set -CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER=y -CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y -CONFIG_DETECT_HUNG_TASK=y -CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 -# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set -CONFIG_WQ_WATCHDOG=y -# CONFIG_WQ_CPU_INTENSIVE_REPORT is not set -# CONFIG_TEST_LOCKUP is not set -# end of Debug Oops, Lockups and Hangs - -# -# Scheduler Debugging -# -CONFIG_SCHED_DEBUG=y -CONFIG_SCHED_INFO=y -CONFIG_SCHEDSTATS=y -CONFIG_SCHED_ACPU=y -# end of Scheduler Debugging - -# CONFIG_DEBUG_TIMEKEEPING is not set -CONFIG_DEBUG_PREEMPT=y - -# -# Lock Debugging (spinlocks, mutexes, etc...) -# -CONFIG_LOCK_DEBUGGING_SUPPORT=y -CONFIG_PROVE_LOCKING=y -# CONFIG_PROVE_RAW_LOCK_NESTING is not set -# CONFIG_LOCK_STAT is not set -CONFIG_DEBUG_RT_MUTEXES=y -CONFIG_DEBUG_SPINLOCK=y -CONFIG_DEBUG_MUTEXES=y -CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y -CONFIG_DEBUG_RWSEMS=y -CONFIG_DEBUG_LOCK_ALLOC=y -CONFIG_LOCKDEP=y -CONFIG_LOCKDEP_BITS=15 -CONFIG_LOCKDEP_CHAINS_BITS=16 -CONFIG_LOCKDEP_STACK_TRACE_BITS=19 -CONFIG_LOCKDEP_STACK_TRACE_HASH_BITS=14 -CONFIG_LOCKDEP_CIRCULAR_QUEUE_BITS=12 -# CONFIG_DEBUG_LOCKDEP is not set -CONFIG_DEBUG_ATOMIC_SLEEP=y -# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set -CONFIG_LOCK_TORTURE_TEST=m -# CONFIG_WW_MUTEX_SELFTEST is not set -# CONFIG_SCF_TORTURE_TEST is not set -# CONFIG_CSD_LOCK_WAIT_DEBUG is not set -# end of Lock Debugging (spinlocks, mutexes, etc...) - -CONFIG_TRACE_IRQFLAGS=y -CONFIG_TRACE_IRQFLAGS_NMI=y -# CONFIG_DEBUG_IRQFLAGS is not set -CONFIG_STACKTRACE=y -# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set -# CONFIG_DEBUG_KOBJECT is not set -# CONFIG_DEBUG_KOBJECT_RELEASE is not set - -# -# Debug kernel data structures -# -CONFIG_DEBUG_LIST=y -# CONFIG_DEBUG_PLIST is not set -CONFIG_DEBUG_SG=y -CONFIG_DEBUG_NOTIFIERS=y -# CONFIG_DEBUG_MAPLE_TREE is not set -# end of Debug kernel data structures - -# -# RCU Debugging -# -CONFIG_PROVE_RCU=y -CONFIG_TORTURE_TEST=m -# CONFIG_RCU_SCALE_TEST is not set -CONFIG_RCU_TORTURE_TEST=m -# CONFIG_RCU_REF_SCALE_TEST is not set -CONFIG_RCU_CPU_STALL_TIMEOUT=60 -CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 -# CONFIG_RCU_CPU_STALL_CPUTIME is not set -# CONFIG_RCU_TRACE is not set -# CONFIG_RCU_EQS_DEBUG is not set -# end of RCU Debugging - -# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set -# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set -CONFIG_LATENCYTOP=y -# CONFIG_DEBUG_CGROUP_REF is not set -CONFIG_NOP_TRACER=y -CONFIG_HAVE_FUNCTION_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y -CONFIG_HAVE_DYNAMIC_FTRACE=y -CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y -CONFIG_HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS=y -CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y -CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y -CONFIG_HAVE_SYSCALL_TRACEPOINTS=y -CONFIG_HAVE_C_RECORDMCOUNT=y -CONFIG_TRACER_MAX_TRACE=y -CONFIG_TRACE_CLOCK=y -CONFIG_RING_BUFFER=y -CONFIG_EVENT_TRACING=y -CONFIG_CONTEXT_SWITCH_TRACER=y -CONFIG_PREEMPTIRQ_TRACEPOINTS=y -CONFIG_TRACING=y -CONFIG_GENERIC_TRACER=y -CONFIG_TRACING_SUPPORT=y -CONFIG_FTRACE=y -# CONFIG_BOOTTIME_TRACING is not set -CONFIG_FUNCTION_TRACER=y -CONFIG_FUNCTION_GRAPH_TRACER=y -# CONFIG_FUNCTION_GRAPH_RETVAL is not set -CONFIG_DYNAMIC_FTRACE=y -CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y -CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS=y -CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y -# CONFIG_FUNCTION_PROFILER is not set -CONFIG_STACK_TRACER=y -# CONFIG_IRQSOFF_TRACER is not set -# CONFIG_PREEMPT_TRACER is not set -CONFIG_SCHED_TRACER=y -CONFIG_HWLAT_TRACER=y -CONFIG_OSNOISE_TRACER=y -CONFIG_TIMERLAT_TRACER=y -CONFIG_FTRACE_SYSCALLS=y -CONFIG_TRACER_SNAPSHOT=y -# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set -CONFIG_BRANCH_PROFILE_NONE=y -# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_PROBE_EVENTS_BTF_ARGS=y -CONFIG_KPROBE_EVENTS=y -# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set -CONFIG_UPROBE_EVENTS=y -CONFIG_BPF_EVENTS=y -CONFIG_DYNAMIC_EVENTS=y -CONFIG_PROBE_EVENTS=y -# CONFIG_BPF_KPROBE_OVERRIDE is not set -CONFIG_FTRACE_MCOUNT_RECORD=y -CONFIG_FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY=y -CONFIG_TRACING_MAP=y -CONFIG_SYNTH_EVENTS=y -# CONFIG_USER_EVENTS is not set -CONFIG_HIST_TRIGGERS=y -# CONFIG_TRACE_EVENT_INJECT is not set -# CONFIG_TRACEPOINT_BENCHMARK is not set -CONFIG_RING_BUFFER_BENCHMARK=m -# CONFIG_TRACE_EVAL_MAP_FILE is not set -# CONFIG_FTRACE_RECORD_RECURSION is not set -# CONFIG_FTRACE_STARTUP_TEST is not set -# CONFIG_RING_BUFFER_STARTUP_TEST is not set -# CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS is not set -# CONFIG_PREEMPTIRQ_DELAY_TEST is not set -# CONFIG_SYNTH_EVENT_GEN_TEST is not set -# CONFIG_KPROBE_EVENT_GEN_TEST is not set -# CONFIG_HIST_TRIGGERS_DEBUG is not set -# CONFIG_RV is not set -# CONFIG_SAMPLES is not set -CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y -CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y -CONFIG_STRICT_DEVMEM=y -# CONFIG_IO_STRICT_DEVMEM is not set - -# -# arm64 Debugging -# -CONFIG_PID_IN_CONTEXTIDR=y -# CONFIG_DEBUG_EFI is not set -# CONFIG_ARM64_RELOC_TEST is not set -CONFIG_CORESIGHT=m -CONFIG_CORESIGHT_LINKS_AND_SINKS=m -CONFIG_CORESIGHT_LINK_AND_SINK_TMC=m -CONFIG_CORESIGHT_CATU=m -CONFIG_CORESIGHT_SINK_TPIU=m -CONFIG_CORESIGHT_SINK_ETBV10=m -CONFIG_CORESIGHT_SOURCE_ETM4X=m -CONFIG_ETM4X_IMPDEF_FEATURE=y -CONFIG_CORESIGHT_STM=m -CONFIG_CORESIGHT_CPU_DEBUG=m -# CONFIG_CORESIGHT_CPU_DEBUG_DEFAULT_ON is not set -CONFIG_CORESIGHT_CTI=m -CONFIG_CORESIGHT_CTI_INTEGRATION_REGS=y -# CONFIG_CORESIGHT_TRBE is not set -# CONFIG_ULTRASOC_SMB is not set -# CONFIG_CORESIGHT_TPDM is not set -# CONFIG_CORESIGHT_TPDA is not set -# CONFIG_CORESIGHT_DUMMY is not set -# end of arm64 Debugging - -# -# Kernel Testing and Coverage -# -# CONFIG_KUNIT is not set -# CONFIG_NOTIFIER_ERROR_INJECTION is not set -CONFIG_FUNCTION_ERROR_INJECTION=y -CONFIG_FAULT_INJECTION=y -CONFIG_FAILSLAB=y -CONFIG_FAIL_PAGE_ALLOC=y -# CONFIG_FAULT_INJECTION_USERCOPY is not set -CONFIG_FAIL_MAKE_REQUEST=y -CONFIG_FAIL_IO_TIMEOUT=y -# CONFIG_FAIL_FUTEX is not set -CONFIG_FAULT_INJECTION_DEBUG_FS=y -# CONFIG_FAIL_FUNCTION is not set -CONFIG_FAIL_MMC_REQUEST=y -# CONFIG_FAIL_SUNRPC is not set -# CONFIG_FAULT_INJECTION_CONFIGFS is not set -CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y -CONFIG_ARCH_HAS_KCOV=y -CONFIG_CC_HAS_SANCOV_TRACE_PC=y -# CONFIG_KCOV is not set -CONFIG_RUNTIME_TESTING_MENU=y -# CONFIG_TEST_DHRY is not set -# CONFIG_LKDTM is not set -# CONFIG_TEST_MIN_HEAP is not set -# CONFIG_TEST_DIV64 is not set -# CONFIG_BACKTRACE_SELF_TEST is not set -# CONFIG_TEST_REF_TRACKER is not set -# CONFIG_RBTREE_TEST is not set -# CONFIG_REED_SOLOMON_TEST is not set -# CONFIG_INTERVAL_TREE_TEST is not set -CONFIG_PERCPU_TEST=m -CONFIG_ATOMIC64_SELFTEST=y -CONFIG_ASYNC_RAID6_TEST=m -# CONFIG_TEST_HEXDUMP is not set -# CONFIG_STRING_SELFTEST is not set -CONFIG_TEST_STRING_HELPERS=m -CONFIG_TEST_KSTRTOX=y -# CONFIG_TEST_PRINTF is not set -# CONFIG_TEST_SCANF is not set -# CONFIG_TEST_BITMAP is not set -# CONFIG_TEST_UUID is not set -# CONFIG_TEST_XARRAY is not set -# CONFIG_TEST_MAPLE_TREE is not set -# CONFIG_TEST_RHASHTABLE is not set -# CONFIG_TEST_IDA is not set -# CONFIG_TEST_PARMAN is not set -# CONFIG_TEST_LKM is not set -# CONFIG_TEST_BITOPS is not set -# CONFIG_TEST_VMALLOC is not set -# CONFIG_TEST_USER_COPY is not set -CONFIG_TEST_BPF=m -# CONFIG_TEST_BLACKHOLE_DEV is not set -# CONFIG_FIND_BIT_BENCHMARK is not set -# CONFIG_TEST_FIRMWARE is not set -# CONFIG_TEST_SYSCTL is not set -# CONFIG_TEST_UDELAY is not set -# CONFIG_TEST_STATIC_KEYS is not set -# CONFIG_TEST_DYNAMIC_DEBUG is not set -# CONFIG_TEST_KMOD is not set -# CONFIG_TEST_MEMCAT_P is not set -# CONFIG_TEST_OBJAGG is not set -# CONFIG_TEST_MEMINIT is not set -# CONFIG_TEST_FREE_PAGES is not set -CONFIG_ARCH_USE_MEMTEST=y -# CONFIG_MEMTEST is not set -# end of Kernel Testing and Coverage - -# -# Rust hacking -# -# end of Rust hacking -# end of Kernel hacking diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig deleted file mode 100644 index 93c4f1d5c89d..000000000000 --- a/arch/arm64/configs/anolis_defconfig +++ /dev/null @@ -1,7184 +0,0 @@ -# -# Automatically generated file; DO NOT EDIT. -# Linux/arm64 6.6.25 Kernel Configuration -# -CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" -CONFIG_CC_IS_GCC=y -CONFIG_GCC_VERSION=200000 -CONFIG_CLANG_VERSION=0 -CONFIG_AS_IS_GNU=y -CONFIG_AS_VERSION=25000 -CONFIG_LD_IS_BFD=y -CONFIG_LD_VERSION=25000 -CONFIG_LLD_VERSION=0 -CONFIG_CC_CAN_LINK=y -CONFIG_CC_CAN_LINK_STATIC=y -CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y -CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y -CONFIG_TOOLS_SUPPORT_RELR=y -CONFIG_CC_HAS_ASM_INLINE=y -CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y -CONFIG_PAHOLE_VERSION=117 -CONFIG_IRQ_WORK=y -CONFIG_BUILDTIME_TABLE_SORT=y -CONFIG_THREAD_INFO_IN_TASK=y - -# -# General setup -# -CONFIG_INIT_ENV_ARG_LIMIT=32 -# CONFIG_COMPILE_TEST is not set -# CONFIG_WERROR is not set -CONFIG_LOCALVERSION="" -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_BUILD_SALT="" -CONFIG_DEFAULT_INIT="" -CONFIG_DEFAULT_HOSTNAME="(none)" -CONFIG_SYSVIPC=y -CONFIG_SYSVIPC_SYSCTL=y -CONFIG_SYSVIPC_COMPAT=y -CONFIG_POSIX_MQUEUE=y -CONFIG_POSIX_MQUEUE_SYSCTL=y -# CONFIG_WATCH_QUEUE is not set -CONFIG_CROSS_MEMORY_ATTACH=y -# CONFIG_USELIB is not set -CONFIG_AUDIT=y -CONFIG_HAVE_ARCH_AUDITSYSCALL=y -CONFIG_AUDITSYSCALL=y - -# -# IRQ subsystem -# -CONFIG_GENERIC_IRQ_PROBE=y -CONFIG_GENERIC_IRQ_SHOW=y -CONFIG_GENERIC_IRQ_SHOW_LEVEL=y -CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y -CONFIG_GENERIC_IRQ_MIGRATION=y -CONFIG_GENERIC_IRQ_INJECTION=y -CONFIG_HARDIRQS_SW_RESEND=y -CONFIG_IRQ_DOMAIN=y -CONFIG_IRQ_DOMAIN_HIERARCHY=y -CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS=y -CONFIG_GENERIC_IRQ_IPI=y -CONFIG_GENERIC_MSI_IRQ=y -CONFIG_IRQ_MSI_IOMMU=y -CONFIG_IRQ_FORCED_THREADING=y -CONFIG_SPARSE_IRQ=y -# CONFIG_GENERIC_IRQ_DEBUGFS is not set -# end of IRQ subsystem - -CONFIG_GENERIC_TIME_VSYSCALL=y -CONFIG_GENERIC_CLOCKEVENTS=y -CONFIG_ARCH_HAS_TICK_BROADCAST=y -CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y -CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y -CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y -CONFIG_CONTEXT_TRACKING=y -CONFIG_CONTEXT_TRACKING_IDLE=y - -# -# Timers subsystem -# -CONFIG_TICK_ONESHOT=y -CONFIG_NO_HZ_COMMON=y -# CONFIG_HZ_PERIODIC is not set -# CONFIG_NO_HZ_IDLE is not set -CONFIG_NO_HZ_FULL=y -CONFIG_CONTEXT_TRACKING_USER=y -# CONFIG_CONTEXT_TRACKING_USER_FORCE is not set -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -# end of Timers subsystem - -CONFIG_BPF=y -CONFIG_HAVE_EBPF_JIT=y -CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y - -# -# BPF subsystem -# -CONFIG_BPF_SYSCALL=y -CONFIG_BPF_JIT=y -CONFIG_BPF_JIT_ALWAYS_ON=y -CONFIG_BPF_JIT_DEFAULT_ON=y -CONFIG_BPF_UNPRIV_DEFAULT_OFF=y -# CONFIG_BPF_PRELOAD is not set -CONFIG_BPF_LSM=y -# end of BPF subsystem - -CONFIG_PREEMPT_BUILD=y -# CONFIG_PREEMPT_NONE is not set -CONFIG_PREEMPT_VOLUNTARY=y -# CONFIG_PREEMPT is not set -CONFIG_PREEMPT_COUNT=y -CONFIG_PREEMPTION=y -CONFIG_PREEMPT_DYNAMIC=y -CONFIG_SCHED_CORE=y - -# -# CPU/Task time and stats accounting -# -CONFIG_VIRT_CPU_ACCOUNTING=y -CONFIG_VIRT_CPU_ACCOUNTING_GEN=y -# CONFIG_IRQ_TIME_ACCOUNTING is not set -CONFIG_HAVE_SCHED_AVG_IRQ=y -CONFIG_SCHED_THERMAL_PRESSURE=y -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_TASKSTATS=y -CONFIG_TASK_DELAY_ACCT=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y -CONFIG_PSI=y -CONFIG_PSI_DEFAULT_DISABLED=y -# end of CPU/Task time and stats accounting - -CONFIG_CPU_ISOLATION=y - -# -# RCU Subsystem -# -CONFIG_TREE_RCU=y -CONFIG_PREEMPT_RCU=y -# CONFIG_RCU_EXPERT is not set -CONFIG_TREE_SRCU=y -CONFIG_TASKS_RCU_GENERIC=y -CONFIG_TASKS_RCU=y -CONFIG_TASKS_RUDE_RCU=y -CONFIG_TASKS_TRACE_RCU=y -CONFIG_RCU_STALL_COMMON=y -CONFIG_RCU_NEED_SEGCBLIST=y -CONFIG_RCU_NOCB_CPU=y -# CONFIG_RCU_NOCB_CPU_DEFAULT_ALL is not set -# CONFIG_RCU_LAZY is not set -# end of RCU Subsystem - -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -# CONFIG_IKHEADERS is not set -CONFIG_LOG_BUF_SHIFT=20 -CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 -# CONFIG_PRINTK_INDEX is not set -CONFIG_GENERIC_SCHED_CLOCK=y - -# -# Scheduler features -# -# end of Scheduler features - -CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y -CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y -CONFIG_CC_HAS_INT128=y -CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" -CONFIG_GCC10_NO_ARRAY_BOUNDS=y -CONFIG_CC_NO_ARRAY_BOUNDS=y -CONFIG_ARCH_SUPPORTS_INT128=y -CONFIG_NUMA_BALANCING=y -CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y -CONFIG_CGROUPS=y -CONFIG_PAGE_COUNTER=y -# CONFIG_CGROUP_FAVOR_DYNMODS is not set -CONFIG_MEMCG=y -CONFIG_MEMCG_KMEM=y -CONFIG_BLK_CGROUP=y -CONFIG_CGROUP_WRITEBACK=y -CONFIG_CGROUP_SCHED=y -CONFIG_FAIR_GROUP_SCHED=y -CONFIG_CFS_BANDWIDTH=y -CONFIG_RT_GROUP_SCHED=y -CONFIG_SCHED_MM_CID=y -CONFIG_CGROUP_PIDS=y -CONFIG_CGROUP_RDMA=y -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_HUGETLB=y -CONFIG_CPUSETS=y -CONFIG_PROC_PID_CPUSET=y -CONFIG_CGROUP_DEVICE=y -CONFIG_SCHED_SLI=y -CONFIG_RICH_CONTAINER=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_CGROUP_PERF=y -CONFIG_CGROUP_BPF=y -# CONFIG_CGROUP_MISC is not set -# CONFIG_CGROUP_DEBUG is not set -CONFIG_SOCK_CGROUP_DATA=y -CONFIG_NAMESPACES=y -CONFIG_UTS_NS=y -CONFIG_TIME_NS=y -CONFIG_IPC_NS=y -CONFIG_USER_NS=y -CONFIG_PID_NS=y -CONFIG_NET_NS=y -CONFIG_CHECKPOINT_RESTORE=y -CONFIG_SCHED_AUTOGROUP=y -CONFIG_RELAY=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" -CONFIG_RD_GZIP=y -CONFIG_RD_BZIP2=y -CONFIG_RD_LZMA=y -CONFIG_RD_XZ=y -CONFIG_RD_LZO=y -CONFIG_RD_LZ4=y -CONFIG_RD_ZSTD=y -# CONFIG_BOOT_CONFIG is not set -CONFIG_INITRAMFS_PRESERVE_MTIME=y -CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_LD_ORPHAN_WARN=y -CONFIG_LD_ORPHAN_WARN_LEVEL="warn" -CONFIG_SYSCTL=y -CONFIG_HAVE_UID16=y -CONFIG_SYSCTL_EXCEPTION_TRACE=y -# CONFIG_EXPERT is not set -CONFIG_UID16=y -CONFIG_MULTIUSER=y -CONFIG_SYSFS_SYSCALL=y -CONFIG_FHANDLE=y -CONFIG_POSIX_TIMERS=y -CONFIG_PRINTK=y -CONFIG_BUG=y -CONFIG_ELF_CORE=y -CONFIG_BASE_FULL=y -CONFIG_FUTEX=y -CONFIG_FUTEX_PI=y -CONFIG_EPOLL=y -CONFIG_SIGNALFD=y -CONFIG_TIMERFD=y -CONFIG_EVENTFD=y -CONFIG_SHMEM=y -CONFIG_AIO=y -CONFIG_IO_URING=y -CONFIG_ADVISE_SYSCALLS=y -CONFIG_MEMBARRIER=y -CONFIG_KALLSYMS=y -# CONFIG_KALLSYMS_SELFTEST is not set -CONFIG_KALLSYMS_ALL=y -CONFIG_KALLSYMS_BASE_RELATIVE=y -CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y -CONFIG_KCMP=y -CONFIG_RSEQ=y -CONFIG_CACHESTAT_SYSCALL=y -CONFIG_HAVE_PERF_EVENTS=y -CONFIG_GUEST_PERF_EVENTS=y -CONFIG_PERF_USE_VMALLOC=y - -# -# Kernel Performance Events And Counters -# -CONFIG_PERF_EVENTS=y -CONFIG_DEBUG_PERF_USE_VMALLOC=y -# end of Kernel Performance Events And Counters - -CONFIG_SYSTEM_DATA_VERIFICATION=y -CONFIG_PROFILING=y -CONFIG_TRACEPOINTS=y - -# -# Kexec and crash features -# -CONFIG_CRASH_CORE=y -CONFIG_KEXEC_CORE=y -CONFIG_HAVE_IMA_KEXEC=y -CONFIG_KEXEC=y -CONFIG_KEXEC_FILE=y -CONFIG_KEXEC_SIG=y -CONFIG_KEXEC_IMAGE_VERIFY_SIG=y -CONFIG_CRASH_DUMP=y -# end of Kexec and crash features -# end of General setup - -CONFIG_ARM64=y -CONFIG_GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS=y -CONFIG_64BIT=y -CONFIG_MMU=y -CONFIG_ARM64_PAGE_SHIFT=12 -CONFIG_ARM64_CONT_PTE_SHIFT=4 -CONFIG_ARM64_CONT_PMD_SHIFT=4 -CONFIG_ARCH_MMAP_RND_BITS_MIN=18 -CONFIG_ARCH_MMAP_RND_BITS_MAX=33 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 -CONFIG_STACKTRACE_SUPPORT=y -CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 -CONFIG_LOCKDEP_SUPPORT=y -CONFIG_GENERIC_BUG=y -CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y -CONFIG_GENERIC_HWEIGHT=y -CONFIG_GENERIC_CSUM=y -CONFIG_GENERIC_CALIBRATE_DELAY=y -CONFIG_SMP=y -CONFIG_KERNEL_MODE_NEON=y -CONFIG_FIX_EARLYCON_MEM=y -CONFIG_PGTABLE_LEVELS=4 -CONFIG_ARCH_SUPPORTS_UPROBES=y -CONFIG_ARCH_PROC_KCORE_TEXT=y -CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC=y - -# -# Platform selection -# -# CONFIG_ARCH_ACTIONS is not set -# CONFIG_ARCH_SUNXI is not set -# CONFIG_ARCH_ALPINE is not set -# CONFIG_ARCH_APPLE is not set -# CONFIG_ARCH_BCM is not set -# CONFIG_ARCH_BERLIN is not set -# CONFIG_ARCH_BITMAIN is not set -# CONFIG_ARCH_EXYNOS is not set -# CONFIG_ARCH_SPARX5 is not set -# CONFIG_ARCH_K3 is not set -# CONFIG_ARCH_LG1K is not set -CONFIG_ARCH_HISI=y -# CONFIG_ARCH_KEEMBAY is not set -# CONFIG_ARCH_MEDIATEK is not set -# CONFIG_ARCH_MESON is not set -# CONFIG_ARCH_MVEBU is not set -# CONFIG_ARCH_NXP is not set -# CONFIG_ARCH_MA35 is not set -# CONFIG_ARCH_NPCM is not set -CONFIG_ARCH_PHYTIUM=y -CONFIG_ARCH_QCOM=y -# CONFIG_ARCH_REALTEK is not set -# CONFIG_ARCH_RENESAS is not set -# CONFIG_ARCH_ROCKCHIP is not set -CONFIG_ARCH_SEATTLE=y -# CONFIG_ARCH_INTEL_SOCFPGA is not set -# CONFIG_ARCH_STM32 is not set -# CONFIG_ARCH_SYNQUACER is not set -# CONFIG_ARCH_TEGRA is not set -# CONFIG_ARCH_SPRD is not set -CONFIG_ARCH_THUNDER=y -CONFIG_ARCH_THUNDER2=y -# CONFIG_ARCH_UNIPHIER is not set -CONFIG_ARCH_VEXPRESS=y -# CONFIG_ARCH_VISCONTI is not set -CONFIG_ARCH_XGENE=y -# CONFIG_ARCH_ZYNQMP is not set -# end of Platform selection - -# -# Kernel Features -# - -# -# ARM errata workarounds via the alternatives framework -# -CONFIG_AMPERE_ERRATUM_AC03_CPU_38=y -CONFIG_ARM64_WORKAROUND_CLEAN_CACHE=y -CONFIG_ARM64_ERRATUM_826319=y -CONFIG_ARM64_ERRATUM_827319=y -CONFIG_ARM64_ERRATUM_824069=y -CONFIG_ARM64_ERRATUM_819472=y -CONFIG_ARM64_ERRATUM_832075=y -CONFIG_ARM64_ERRATUM_834220=y -CONFIG_ARM64_ERRATUM_1742098=y -CONFIG_ARM64_ERRATUM_845719=y -CONFIG_ARM64_ERRATUM_843419=y -CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419=y -CONFIG_ARM64_ERRATUM_1024718=y -CONFIG_ARM64_ERRATUM_1418040=y -CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT=y -CONFIG_ARM64_ERRATUM_1165522=y -CONFIG_ARM64_ERRATUM_1319367=y -CONFIG_ARM64_ERRATUM_1530923=y -CONFIG_ARM64_WORKAROUND_REPEAT_TLBI=y -CONFIG_ARM64_ERRATUM_2441007=y -CONFIG_ARM64_ERRATUM_1286807=y -CONFIG_ARM64_ERRATUM_1463225=y -CONFIG_ARM64_ERRATUM_1542419=y -CONFIG_ARM64_ERRATUM_1508412=y -CONFIG_ARM64_ERRATUM_2051678=y -CONFIG_ARM64_ERRATUM_2077057=y -CONFIG_ARM64_ERRATUM_2658417=y -CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE=y -CONFIG_ARM64_ERRATUM_2054223=y -CONFIG_ARM64_ERRATUM_2067961=y -CONFIG_ARM64_ERRATUM_2441009=y -CONFIG_ARM64_ERRATUM_2457168=y -CONFIG_ARM64_ERRATUM_2645198=y -CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD=y -CONFIG_ARM64_ERRATUM_2966298=y -CONFIG_ARM64_ERRATUM_3117295=y -CONFIG_CAVIUM_ERRATUM_22375=y -CONFIG_CAVIUM_ERRATUM_23144=y -CONFIG_CAVIUM_ERRATUM_23154=y -CONFIG_CAVIUM_ERRATUM_27456=y -CONFIG_CAVIUM_ERRATUM_30115=y -CONFIG_CAVIUM_TX2_ERRATUM_219=y -CONFIG_FUJITSU_ERRATUM_010001=y -CONFIG_HISILICON_ERRATUM_161600802=y -CONFIG_QCOM_FALKOR_ERRATUM_1003=y -CONFIG_QCOM_FALKOR_ERRATUM_1009=y -CONFIG_QCOM_QDF2400_ERRATUM_0065=y -CONFIG_QCOM_FALKOR_ERRATUM_E1041=y -CONFIG_NVIDIA_CARMEL_CNP_ERRATUM=y -CONFIG_ROCKCHIP_ERRATUM_3588001=y -CONFIG_SOCIONEXT_SYNQUACER_PREITS=y -# end of ARM errata workarounds via the alternatives framework - -CONFIG_ARM64_4K_PAGES=y -# CONFIG_ARM64_16K_PAGES is not set -# CONFIG_ARM64_64K_PAGES is not set -# CONFIG_ARM64_VA_BITS_39 is not set -CONFIG_ARM64_VA_BITS_48=y -CONFIG_ARM64_VA_BITS=48 -CONFIG_ARM64_PA_BITS_48=y -CONFIG_ARM64_PA_BITS=48 -# CONFIG_CPU_BIG_ENDIAN is not set -CONFIG_CPU_LITTLE_ENDIAN=y -CONFIG_SCHED_MC=y -CONFIG_SCHED_CLUSTER=y -CONFIG_SCHED_SMT=y -CONFIG_NR_CPUS=1024 -CONFIG_HOTPLUG_CPU=y -CONFIG_NUMA=y -CONFIG_NODES_SHIFT=6 -# CONFIG_HZ_100 is not set -CONFIG_HZ_250=y -# CONFIG_HZ_300 is not set -# CONFIG_HZ_1000 is not set -CONFIG_HZ=250 -CONFIG_SCHED_HRTICK=y -CONFIG_ARCH_SPARSEMEM_ENABLE=y -CONFIG_HW_PERF_EVENTS=y -CONFIG_CC_HAVE_SHADOW_CALL_STACK=y -CONFIG_PARAVIRT=y -CONFIG_PARAVIRT_TIME_ACCOUNTING=y -CONFIG_ARCH_SUPPORTS_KEXEC=y -CONFIG_ARCH_SUPPORTS_KEXEC_FILE=y -CONFIG_ARCH_SELECTS_KEXEC_FILE=y -CONFIG_ARCH_SUPPORTS_KEXEC_SIG=y -CONFIG_ARCH_SUPPORTS_KEXEC_IMAGE_VERIFY_SIG=y -CONFIG_ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_SIG=y -CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y -CONFIG_TRANS_TABLE=y -# CONFIG_XEN is not set -CONFIG_ARCH_FORCE_MAX_ORDER=10 -CONFIG_UNMAP_KERNEL_AT_EL0=y -CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY=y -# CONFIG_RODATA_FULL_DEFAULT_ENABLED is not set -# CONFIG_ARM64_SW_TTBR0_PAN is not set -CONFIG_ARM64_TAGGED_ADDR_ABI=y -CONFIG_COMPAT=y -CONFIG_KUSER_HELPERS=y -# CONFIG_COMPAT_ALIGNMENT_FIXUPS is not set -# CONFIG_ARMV8_DEPRECATED is not set - -# -# ARMv8.1 architectural features -# -CONFIG_ARM64_HW_AFDBM=y -CONFIG_ARM64_PAN=y -CONFIG_AS_HAS_LSE_ATOMICS=y -CONFIG_ARM64_LSE_ATOMICS=y -CONFIG_ARM64_USE_LSE_ATOMICS=y -# end of ARMv8.1 architectural features - -# -# ARMv8.2 architectural features -# -CONFIG_AS_HAS_ARMV8_2=y -CONFIG_AS_HAS_SHA3=y -CONFIG_ARM64_PMEM=y -CONFIG_ARM64_RAS_EXTN=y -CONFIG_ARM64_CNP=y -# end of ARMv8.2 architectural features - -# -# ARMv8.3 architectural features -# -# CONFIG_ARM64_PTR_AUTH is not set -CONFIG_CC_HAS_BRANCH_PROT_PAC_RET=y -CONFIG_CC_HAS_SIGN_RETURN_ADDRESS=y -CONFIG_AS_HAS_ARMV8_3=y -CONFIG_AS_HAS_CFI_NEGATE_RA_STATE=y -CONFIG_AS_HAS_LDAPR=y -# end of ARMv8.3 architectural features - -# -# ARMv8.4 architectural features -# -CONFIG_ARM64_AMU_EXTN=y -CONFIG_AS_HAS_ARMV8_4=y -CONFIG_ARM64_TLB_RANGE=y -CONFIG_ARM64_MPAM=y -# end of ARMv8.4 architectural features - -# -# ARMv8.5 architectural features -# -CONFIG_AS_HAS_ARMV8_5=y -# CONFIG_ARM64_BTI is not set -CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI=y -CONFIG_ARM64_E0PD=y -CONFIG_ARM64_AS_HAS_MTE=y -CONFIG_ARM64_MTE=y -# end of ARMv8.5 architectural features - -# -# ARMv8.7 architectural features -# -CONFIG_ARM64_EPAN=y -# end of ARMv8.7 architectural features - -CONFIG_ARM64_SVE=y -CONFIG_ARM64_SME=y -CONFIG_ARM64_PSEUDO_NMI=y -# CONFIG_ARM64_DEBUG_PRIORITY_MASKING is not set -CONFIG_RELOCATABLE=y -CONFIG_RANDOMIZE_BASE=y -CONFIG_RANDOMIZE_MODULE_REGION_FULL=y -CONFIG_CC_HAVE_STACKPROTECTOR_SYSREG=y -CONFIG_STACKPROTECTOR_PER_TASK=y -# end of Kernel Features - -# -# Boot options -# -CONFIG_ARM64_ACPI_PARKING_PROTOCOL=y -CONFIG_CMDLINE="console=ttyAMA0" -CONFIG_CMDLINE_FROM_BOOTLOADER=y -# CONFIG_CMDLINE_FORCE is not set -CONFIG_EFI_STUB=y -CONFIG_EFI=y -CONFIG_DMI=y -# end of Boot options - -# -# Power management options -# -CONFIG_SUSPEND=y -CONFIG_SUSPEND_FREEZER=y -CONFIG_HIBERNATE_CALLBACKS=y -CONFIG_HIBERNATION=y -CONFIG_HIBERNATION_SNAPSHOT_DEV=y -CONFIG_PM_STD_PARTITION="" -CONFIG_PM_SLEEP=y -CONFIG_PM_SLEEP_SMP=y -# CONFIG_PM_AUTOSLEEP is not set -# CONFIG_PM_USERSPACE_AUTOSLEEP is not set -# CONFIG_PM_WAKELOCKS is not set -CONFIG_PM=y -CONFIG_PM_DEBUG=y -# CONFIG_PM_ADVANCED_DEBUG is not set -# CONFIG_PM_TEST_SUSPEND is not set -CONFIG_PM_SLEEP_DEBUG=y -CONFIG_PM_CLK=y -CONFIG_PM_GENERIC_DOMAINS=y -# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set -CONFIG_PM_GENERIC_DOMAINS_SLEEP=y -CONFIG_PM_GENERIC_DOMAINS_OF=y -CONFIG_CPU_PM=y -# CONFIG_ENERGY_MODEL is not set -CONFIG_ARCH_HIBERNATION_POSSIBLE=y -CONFIG_ARCH_HIBERNATION_HEADER=y -CONFIG_ARCH_SUSPEND_POSSIBLE=y -# end of Power management options - -# -# CPU Power Management -# - -# -# CPU Idle -# -CONFIG_CPU_IDLE=y -# CONFIG_CPU_IDLE_GOV_LADDER is not set -CONFIG_CPU_IDLE_GOV_MENU=y -# CONFIG_CPU_IDLE_GOV_TEO is not set - -# -# ARM CPU Idle Drivers -# -# CONFIG_ARM_PSCI_CPUIDLE is not set -# end of ARM CPU Idle Drivers -# end of CPU Idle - -# -# CPU Frequency scaling -# -CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_GOV_ATTR_SET=y -CONFIG_CPU_FREQ_GOV_COMMON=y -CONFIG_CPU_FREQ_STAT=y -CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y -# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set -CONFIG_CPU_FREQ_GOV_PERFORMANCE=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=y -CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_GOV_ONDEMAND=y -CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y -# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set - -# -# CPU frequency scaling drivers -# -# CONFIG_CPUFREQ_DT is not set -# CONFIG_CPUFREQ_DT_PLATDEV is not set -CONFIG_ACPI_CPPC_CPUFREQ=m -CONFIG_ACPI_CPPC_CPUFREQ_FIE=y -CONFIG_ARM_SCPI_CPUFREQ=m -# CONFIG_ARM_QCOM_CPUFREQ_HW is not set -# end of CPU Frequency scaling -# end of CPU Power Management - -CONFIG_ARCH_SUPPORTS_ACPI=y -CONFIG_ACPI=y -CONFIG_ACPI_GENERIC_GSI=y -CONFIG_ACPI_CCA_REQUIRED=y -CONFIG_ACPI_TABLE_LIB=y -# CONFIG_ACPI_DEBUGGER is not set -CONFIG_ACPI_SPCR_TABLE=y -# CONFIG_ACPI_FPDT is not set -# CONFIG_ACPI_EC_DEBUGFS is not set -CONFIG_ACPI_AC=y -CONFIG_ACPI_BATTERY=y -CONFIG_ACPI_BUTTON=y -CONFIG_ACPI_VIDEO=m -CONFIG_ACPI_FAN=y -# CONFIG_ACPI_TAD is not set -# CONFIG_ACPI_DOCK is not set -CONFIG_ACPI_PROCESSOR_IDLE=y -CONFIG_ACPI_MCFG=y -CONFIG_ACPI_CPPC_LIB=y -CONFIG_ACPI_PROCESSOR=y -CONFIG_ACPI_IPMI=m -CONFIG_ACPI_HOTPLUG_CPU=y -CONFIG_ACPI_THERMAL=y -CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y -CONFIG_ACPI_TABLE_UPGRADE=y -# CONFIG_ACPI_DEBUG is not set -CONFIG_ACPI_PCI_SLOT=y -CONFIG_ACPI_CONTAINER=y -CONFIG_ACPI_HOTPLUG_MEMORY=y -CONFIG_ACPI_HED=y -# CONFIG_ACPI_CUSTOM_METHOD is not set -# CONFIG_ACPI_BGRT is not set -CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y -CONFIG_ACPI_NFIT=m -# CONFIG_NFIT_SECURITY_DEBUG is not set -CONFIG_ACPI_NUMA=y -CONFIG_ACPI_HMAT=y -CONFIG_HAVE_ACPI_APEI=y -CONFIG_ACPI_APEI=y -CONFIG_ACPI_APEI_GHES=y -CONFIG_ACPI_APEI_PCIEAER=y -CONFIG_ACPI_APEI_SEA=y -CONFIG_ACPI_APEI_MEMORY_FAILURE=y -CONFIG_ACPI_APEI_EINJ=m -# CONFIG_ACPI_APEI_ERST_DEBUG is not set -# CONFIG_ACPI_CONFIGFS is not set -# CONFIG_ACPI_PFRUT is not set -CONFIG_ACPI_IORT=y -CONFIG_ACPI_GTDT=y -CONFIG_ACPI_AGDI=y -CONFIG_ACPI_APMT=y -CONFIG_ACPI_MPAM=y -CONFIG_ACPI_PPTT=y -CONFIG_ACPI_PCC=y -# CONFIG_ACPI_FFH is not set -# CONFIG_PMIC_OPREGION is not set -CONFIG_ACPI_PRMT=y -CONFIG_IRQ_BYPASS_MANAGER=y -CONFIG_HAVE_KVM=y -CONFIG_HAVE_KVM_IRQCHIP=y -CONFIG_HAVE_KVM_IRQFD=y -CONFIG_HAVE_KVM_IRQ_ROUTING=y -CONFIG_HAVE_KVM_DIRTY_RING=y -CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL=y -CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP=y -CONFIG_HAVE_KVM_EVENTFD=y -CONFIG_KVM_MMIO=y -CONFIG_HAVE_KVM_MSI=y -CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y -CONFIG_KVM_VFIO=y -CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y -CONFIG_HAVE_KVM_IRQ_BYPASS=y -CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE=y -CONFIG_KVM_XFER_TO_GUEST_WORK=y -CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y -CONFIG_VIRTUALIZATION=y -CONFIG_KVM=y -# CONFIG_NVHE_EL2_DEBUG is not set - -# -# General architecture-dependent options -# -CONFIG_ARCH_HAS_SUBPAGE_FAULTS=y -CONFIG_HOTPLUG_CORE_SYNC=y -CONFIG_HOTPLUG_CORE_SYNC_DEAD=y -CONFIG_KPROBES=y -CONFIG_JUMP_LABEL=y -# CONFIG_STATIC_KEYS_SELFTEST is not set -CONFIG_UPROBES=y -CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y -CONFIG_KRETPROBES=y -CONFIG_HAVE_IOREMAP_PROT=y -CONFIG_HAVE_KPROBES=y -CONFIG_HAVE_KRETPROBES=y -CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE=y -CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y -CONFIG_HAVE_NMI=y -CONFIG_TRACE_IRQFLAGS_SUPPORT=y -CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y -CONFIG_HAVE_ARCH_TRACEHOOK=y -CONFIG_HAVE_DMA_CONTIGUOUS=y -CONFIG_GENERIC_SMP_IDLE_THREAD=y -CONFIG_GENERIC_IDLE_POLL_SETUP=y -CONFIG_ARCH_HAS_FORTIFY_SOURCE=y -CONFIG_ARCH_HAS_KEEPINITRD=y -CONFIG_ARCH_HAS_SET_MEMORY=y -CONFIG_ARCH_HAS_SET_DIRECT_MAP=y -CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y -CONFIG_ARCH_WANTS_NO_INSTR=y -CONFIG_HAVE_ASM_MODVERSIONS=y -CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y -CONFIG_HAVE_RSEQ=y -CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y -CONFIG_HAVE_HW_BREAKPOINT=y -CONFIG_HAVE_PERF_EVENTS_NMI=y -CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y -CONFIG_HAVE_PERF_REGS=y -CONFIG_HAVE_PERF_USER_STACK_DUMP=y -CONFIG_HAVE_ARCH_JUMP_LABEL=y -CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y -CONFIG_MMU_GATHER_TABLE_FREE=y -CONFIG_MMU_GATHER_RCU_TABLE_FREE=y -CONFIG_MMU_LAZY_TLB_REFCOUNT=y -CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y -CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS=y -CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y -CONFIG_HAVE_CMPXCHG_LOCAL=y -CONFIG_HAVE_CMPXCHG_DOUBLE=y -CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y -CONFIG_HAVE_ARCH_SECCOMP=y -CONFIG_HAVE_ARCH_SECCOMP_FILTER=y -CONFIG_SECCOMP=y -CONFIG_SECCOMP_FILTER=y -# CONFIG_SECCOMP_CACHE_DEBUG is not set -CONFIG_HAVE_ARCH_STACKLEAK=y -CONFIG_HAVE_STACKPROTECTOR=y -CONFIG_STACKPROTECTOR=y -CONFIG_STACKPROTECTOR_STRONG=y -CONFIG_ARCH_SUPPORTS_SHADOW_CALL_STACK=y -# CONFIG_SHADOW_CALL_STACK is not set -CONFIG_ARCH_SUPPORTS_LTO_CLANG=y -CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y -CONFIG_LTO_NONE=y -CONFIG_ARCH_SUPPORTS_CFI_CLANG=y -# CONFIG_CFI_CLANG is not set -CONFIG_HAVE_CONTEXT_TRACKING_USER=y -CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y -CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y -CONFIG_HAVE_MOVE_PUD=y -CONFIG_HAVE_MOVE_PMD=y -CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y -CONFIG_HAVE_ARCH_HUGE_VMAP=y -CONFIG_HAVE_ARCH_HUGE_VMALLOC=y -CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y -CONFIG_ARCH_WANT_PMD_MKWRITE=y -CONFIG_HAVE_MOD_ARCH_SPECIFIC=y -CONFIG_MODULES_USE_ELF_RELA=y -CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK=y -CONFIG_SOFTIRQ_ON_OWN_STACK=y -CONFIG_ARCH_HAS_ELF_RANDOMIZE=y -CONFIG_HAVE_ARCH_MMAP_RND_BITS=y -CONFIG_ARCH_MMAP_RND_BITS=18 -CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y -CONFIG_ARCH_MMAP_RND_COMPAT_BITS=11 -CONFIG_PAGE_SIZE_LESS_THAN_64KB=y -CONFIG_PAGE_SIZE_LESS_THAN_256KB=y -CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT=y -CONFIG_CLONE_BACKWARDS=y -CONFIG_OLD_SIGSUSPEND3=y -CONFIG_COMPAT_OLD_SIGACTION=y -CONFIG_COMPAT_32BIT_TIME=y -CONFIG_HAVE_ARCH_VMAP_STACK=y -CONFIG_VMAP_STACK=y -CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET=y -CONFIG_RANDOMIZE_KSTACK_OFFSET=y -# CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT is not set -CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y -CONFIG_STRICT_KERNEL_RWX=y -CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y -CONFIG_STRICT_MODULE_RWX=y -CONFIG_ARCH_HAS_CPU_RESCTRL=y -CONFIG_HAVE_ARCH_COMPILER_H=y -CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y -CONFIG_ARCH_USE_MEMREMAP_PROT=y -# CONFIG_LOCK_EVENT_COUNTS is not set -CONFIG_ARCH_HAS_RELR=y -CONFIG_RELR=y -CONFIG_HAVE_PREEMPT_DYNAMIC=y -CONFIG_HAVE_PREEMPT_DYNAMIC_KEY=y -CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y -CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y -CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK=y -CONFIG_ARCH_HAVE_TRACE_MMIO_ACCESS=y - -# -# GCOV-based kernel profiling -# -# CONFIG_GCOV_KERNEL is not set -CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y -# end of GCOV-based kernel profiling - -CONFIG_HAVE_GCC_PLUGINS=y -CONFIG_GCC_PLUGINS=y -# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set -CONFIG_FUNCTION_ALIGNMENT_4B=y -CONFIG_FUNCTION_ALIGNMENT_8B=y -CONFIG_FUNCTION_ALIGNMENT=8 -# end of General architecture-dependent options - -CONFIG_RT_MUTEXES=y -CONFIG_BASE_SMALL=0 -CONFIG_MODULE_SIG_FORMAT=y -CONFIG_MODULES=y -# CONFIG_MODULE_DEBUG is not set -CONFIG_MODULE_FORCE_LOAD=y -CONFIG_MODULE_UNLOAD=y -# CONFIG_MODULE_FORCE_UNLOAD is not set -# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set -CONFIG_MODVERSIONS=y -CONFIG_ASM_MODVERSIONS=y -CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_MODULE_SIG=y -# CONFIG_MODULE_SIG_FORCE is not set -# CONFIG_MODULE_SIG_ALL is not set -# CONFIG_MODULE_SIG_SHA1 is not set -# CONFIG_MODULE_SIG_SHA224 is not set -CONFIG_MODULE_SIG_SHA256=y -# CONFIG_MODULE_SIG_SHA384 is not set -# CONFIG_MODULE_SIG_SHA512 is not set -CONFIG_MODULE_SIG_HASH="sha256" -CONFIG_MODULE_COMPRESS_NONE=y -# CONFIG_MODULE_COMPRESS_GZIP is not set -# CONFIG_MODULE_COMPRESS_XZ is not set -# CONFIG_MODULE_COMPRESS_ZSTD is not set -# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set -CONFIG_MODPROBE_PATH="/sbin/modprobe" -CONFIG_MODULES_TREE_LOOKUP=y -CONFIG_BLOCK=y -CONFIG_BLOCK_LEGACY_AUTOLOAD=y -CONFIG_BLK_RQ_ALLOC_TIME=y -CONFIG_BLK_CGROUP_RWSTAT=y -CONFIG_BLK_CGROUP_PUNT_BIO=y -CONFIG_BLK_DEV_BSG_COMMON=y -CONFIG_BLK_ICQ=y -CONFIG_BLK_DEV_BSGLIB=y -CONFIG_BLK_DEV_INTEGRITY=y -CONFIG_BLK_DEV_INTEGRITY_T10=m -CONFIG_BLK_DEV_ZONED=y -CONFIG_BLK_DEV_THROTTLING=y -# CONFIG_BLK_DEV_THROTTLING_LOW is not set -# CONFIG_BLK_WBT is not set -# CONFIG_BLK_CGROUP_IOLATENCY is not set -# CONFIG_BLK_CGROUP_FC_APPID is not set -CONFIG_BLK_CGROUP_IOCOST=y -# CONFIG_BLK_CGROUP_IOPRIO is not set -CONFIG_BLK_DEBUG_FS=y -CONFIG_BLK_DEBUG_FS_ZONED=y -# CONFIG_BLK_SED_OPAL is not set -# CONFIG_BLK_INLINE_ENCRYPTION is not set - -# -# Partition Types -# -CONFIG_PARTITION_ADVANCED=y -# CONFIG_ACORN_PARTITION is not set -# CONFIG_AIX_PARTITION is not set -# CONFIG_OSF_PARTITION is not set -# CONFIG_AMIGA_PARTITION is not set -# CONFIG_ATARI_PARTITION is not set -# CONFIG_MAC_PARTITION is not set -CONFIG_MSDOS_PARTITION=y -CONFIG_BSD_DISKLABEL=y -# CONFIG_MINIX_SUBPARTITION is not set -# CONFIG_SOLARIS_X86_PARTITION is not set -# CONFIG_UNIXWARE_DISKLABEL is not set -# CONFIG_LDM_PARTITION is not set -# CONFIG_SGI_PARTITION is not set -# CONFIG_ULTRIX_PARTITION is not set -# CONFIG_SUN_PARTITION is not set -# CONFIG_KARMA_PARTITION is not set -CONFIG_EFI_PARTITION=y -# CONFIG_SYSV68_PARTITION is not set -# CONFIG_CMDLINE_PARTITION is not set -# end of Partition Types - -CONFIG_BLK_MQ_PCI=y -CONFIG_BLK_MQ_VIRTIO=y -CONFIG_BLK_PM=y -CONFIG_BLOCK_HOLDER_DEPRECATED=y -CONFIG_BLK_MQ_STACKING=y - -# -# IO Schedulers -# -CONFIG_MQ_IOSCHED_DEADLINE=y -CONFIG_MQ_IOSCHED_KYBER=y -CONFIG_IOSCHED_BFQ=y -CONFIG_BFQ_GROUP_IOSCHED=y -# CONFIG_BFQ_CGROUP_DEBUG is not set -# end of IO Schedulers - -CONFIG_PREEMPT_NOTIFIERS=y -CONFIG_PADATA=y -CONFIG_ASN1=y -CONFIG_UNINLINE_SPIN_UNLOCK=y -CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y -CONFIG_MUTEX_SPIN_ON_OWNER=y -CONFIG_RWSEM_SPIN_ON_OWNER=y -CONFIG_LOCK_SPIN_ON_OWNER=y -CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y -CONFIG_QUEUED_SPINLOCKS=y -CONFIG_ARCH_USE_QUEUED_RWLOCKS=y -CONFIG_QUEUED_RWLOCKS=y -CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y -CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y -CONFIG_CK_KABI_RESERVE=y -CONFIG_CK_KABI_SIZE_ALIGN_CHECKS=y -CONFIG_FREEZER=y - -# -# Executable file formats -# -CONFIG_BINFMT_ELF=y -CONFIG_COMPAT_BINFMT_ELF=y -CONFIG_ARCH_BINFMT_ELF_STATE=y -CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS=y -CONFIG_ARCH_HAVE_ELF_PROT=y -CONFIG_ARCH_USE_GNU_PROPERTY=y -CONFIG_ELFCORE=y -CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y -CONFIG_BINFMT_SCRIPT=y -CONFIG_BINFMT_MISC=m -CONFIG_COREDUMP=y -# end of Executable file formats - -# -# Memory Management options -# -CONFIG_ZPOOL=y -CONFIG_SWAP=y -CONFIG_ZSWAP=y -# CONFIG_ZSWAP_DEFAULT_ON is not set -# CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON is not set -# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set -CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y -# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set -# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 is not set -# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set -# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set -CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lzo" -CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y -# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set -# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set -CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud" -CONFIG_ZBUD=y -# CONFIG_Z3FOLD is not set -CONFIG_ZSMALLOC=y -CONFIG_ZSMALLOC_STAT=y -CONFIG_ZSMALLOC_CHAIN_SIZE=8 - -# -# SLAB allocator options -# -# CONFIG_SLAB_DEPRECATED is not set -CONFIG_SLUB=y -# CONFIG_SLAB_MERGE_DEFAULT is not set -CONFIG_SLAB_FREELIST_RANDOM=y -# CONFIG_SLAB_FREELIST_HARDENED is not set -# CONFIG_SLUB_STATS is not set -CONFIG_SLUB_CPU_PARTIAL=y -# CONFIG_RANDOM_KMALLOC_CACHES is not set -# end of SLAB allocator options - -CONFIG_SHUFFLE_PAGE_ALLOCATOR=y -# CONFIG_COMPAT_BRK is not set -CONFIG_SPARSEMEM=y -CONFIG_SPARSEMEM_EXTREME=y -CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y -CONFIG_SPARSEMEM_VMEMMAP=y -CONFIG_HAVE_FAST_GUP=y -CONFIG_ARCH_KEEP_MEMBLOCK=y -CONFIG_NUMA_KEEP_MEMINFO=y -CONFIG_MEMORY_ISOLATION=y -CONFIG_EXCLUSIVE_SYSTEM_RAM=y -CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y -CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y -CONFIG_MEMORY_HOTPLUG=y -CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y -CONFIG_MEMORY_HOTREMOVE=y -CONFIG_MHP_MEMMAP_ON_MEMORY=y -CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE=y -CONFIG_SPLIT_PTLOCK_CPUS=4 -CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y -CONFIG_MEMORY_BALLOON=y -CONFIG_BALLOON_COMPACTION=y -CONFIG_COMPACTION=y -CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 -CONFIG_PAGE_REPORTING=y -CONFIG_MIGRATION=y -CONFIG_DEVICE_MIGRATION=y -CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y -CONFIG_ARCH_ENABLE_THP_MIGRATION=y -CONFIG_CONTIG_ALLOC=y -CONFIG_PHYS_ADDR_T_64BIT=y -CONFIG_MMU_NOTIFIER=y -CONFIG_KSM=y -CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 -CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y -CONFIG_MEMORY_FAILURE=y -CONFIG_HWPOISON_INJECT=m -CONFIG_ARCH_WANTS_THP_SWAP=y -CONFIG_TRANSPARENT_HUGEPAGE=y -CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y -# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set -CONFIG_THP_SWAP=y -CONFIG_READ_ONLY_THP_FOR_FS=y -CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y -CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y -CONFIG_USE_PERCPU_NUMA_NODE_ID=y -CONFIG_HAVE_SETUP_PER_CPU_AREA=y -CONFIG_CMA=y -# CONFIG_CMA_DEBUG is not set -# CONFIG_CMA_DEBUGFS is not set -# CONFIG_CMA_SYSFS is not set -CONFIG_CMA_AREAS=19 -CONFIG_GENERIC_EARLY_IOREMAP=y -# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set -CONFIG_PAGE_IDLE_FLAG=y -CONFIG_IDLE_PAGE_TRACKING=y -CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y -CONFIG_ARCH_HAS_CURRENT_STACK_POINTER=y -CONFIG_ARCH_HAS_PTE_DEVMAP=y -CONFIG_ZONE_DMA=y -CONFIG_ZONE_DMA32=y -CONFIG_ZONE_DEVICE=y -CONFIG_HMM_MIRROR=y -CONFIG_GET_FREE_REGION=y -# CONFIG_DEVICE_PRIVATE is not set -CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y -CONFIG_ARCH_USES_PG_ARCH_X=y -CONFIG_VM_EVENT_COUNTERS=y -# CONFIG_PERCPU_STATS is not set -# CONFIG_GUP_TEST is not set -# CONFIG_DMAPOOL_TEST is not set -CONFIG_ARCH_HAS_PTE_SPECIAL=y -CONFIG_MEMFD_CREATE=y -CONFIG_SECRETMEM=y -# CONFIG_ANON_VMA_NAME is not set -CONFIG_USERFAULTFD=y -CONFIG_HAVE_ARCH_USERFAULTFD_MINOR=y -CONFIG_LRU_GEN=y -# CONFIG_LRU_GEN_ENABLED is not set -# CONFIG_LRU_GEN_STATS is not set -CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK=y -CONFIG_PER_VMA_LOCK=y -CONFIG_LOCK_MM_AND_FIND_VMA=y - -# -# Data Access Monitoring -# -CONFIG_DAMON=y -CONFIG_DAMON_VADDR=y -CONFIG_DAMON_PADDR=y -# CONFIG_DAMON_SYSFS is not set -CONFIG_DAMON_DBGFS=y -# CONFIG_DAMON_RECLAIM is not set -# CONFIG_DAMON_LRU_SORT is not set -# end of Data Access Monitoring -# end of Memory Management options - -CONFIG_NET=y -CONFIG_NET_INGRESS=y -CONFIG_NET_EGRESS=y -CONFIG_NET_XGRESS=y -CONFIG_NET_REDIRECT=y -CONFIG_SKB_EXTENSIONS=y - -# -# Networking options -# -CONFIG_PACKET=y -CONFIG_PACKET_DIAG=m -CONFIG_UNIX=y -CONFIG_UNIX_SCM=y -CONFIG_AF_UNIX_OOB=y -CONFIG_UNIX_DIAG=m -CONFIG_TLS=m -CONFIG_TLS_DEVICE=y -# CONFIG_TLS_TOE is not set -CONFIG_XFRM=y -CONFIG_XFRM_OFFLOAD=y -CONFIG_XFRM_ALGO=y -CONFIG_XFRM_USER=y -CONFIG_XFRM_INTERFACE=m -CONFIG_XFRM_SUB_POLICY=y -CONFIG_XFRM_MIGRATE=y -CONFIG_XFRM_STATISTICS=y -CONFIG_XFRM_AH=m -CONFIG_XFRM_ESP=m -CONFIG_XFRM_IPCOMP=m -CONFIG_NET_KEY=m -CONFIG_NET_KEY_MIGRATE=y -CONFIG_SMC=m -CONFIG_SMC_DIAG=m -CONFIG_XDP_SOCKETS=y -CONFIG_XDP_SOCKETS_DIAG=m -CONFIG_NET_HANDSHAKE=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_FIB_TRIE_STATS=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_MULTIPATH=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_ROUTE_CLASSID=y -# CONFIG_IP_PNP is not set -CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE_DEMUX=m -CONFIG_NET_IP_TUNNEL=m -CONFIG_NET_IPGRE=m -CONFIG_NET_IPGRE_BROADCAST=y -CONFIG_IP_MROUTE_COMMON=y -CONFIG_IP_MROUTE=y -CONFIG_IP_MROUTE_MULTIPLE_TABLES=y -CONFIG_IP_PIMSM_V1=y -CONFIG_IP_PIMSM_V2=y -CONFIG_SYN_COOKIES=y -CONFIG_NET_IPVTI=m -CONFIG_NET_UDP_TUNNEL=m -# CONFIG_NET_FOU is not set -# CONFIG_NET_FOU_IP_TUNNELS is not set -CONFIG_INET_AH=m -CONFIG_INET_ESP=m -CONFIG_INET_ESP_OFFLOAD=m -# CONFIG_INET_ESPINTCP is not set -CONFIG_INET_IPCOMP=m -CONFIG_INET_TABLE_PERTURB_ORDER=16 -CONFIG_INET_XFRM_TUNNEL=m -CONFIG_INET_TUNNEL=m -CONFIG_INET_DIAG=m -CONFIG_INET_TCP_DIAG=m -CONFIG_INET_UDP_DIAG=m -CONFIG_INET_RAW_DIAG=m -# CONFIG_INET_DIAG_DESTROY is not set -CONFIG_TCP_CONG_ADVANCED=y -CONFIG_TCP_CONG_BIC=m -CONFIG_TCP_CONG_CUBIC=y -CONFIG_TCP_CONG_WESTWOOD=m -CONFIG_TCP_CONG_HTCP=m -CONFIG_TCP_CONG_HSTCP=m -CONFIG_TCP_CONG_HYBLA=m -CONFIG_TCP_CONG_VEGAS=m -CONFIG_TCP_CONG_NV=m -CONFIG_TCP_CONG_SCALABLE=m -CONFIG_TCP_CONG_LP=m -CONFIG_TCP_CONG_VENO=m -CONFIG_TCP_CONG_YEAH=m -CONFIG_TCP_CONG_ILLINOIS=m -CONFIG_TCP_CONG_DCTCP=m -# CONFIG_TCP_CONG_CDG is not set -CONFIG_TCP_CONG_BBR=m -CONFIG_DEFAULT_CUBIC=y -# CONFIG_DEFAULT_RENO is not set -CONFIG_DEFAULT_TCP_CONG="cubic" -CONFIG_TCP_MD5SIG=y -CONFIG_IPV6=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y -CONFIG_IPV6_OPTIMISTIC_DAD=y -CONFIG_INET6_AH=m -CONFIG_INET6_ESP=m -CONFIG_INET6_ESP_OFFLOAD=m -# CONFIG_INET6_ESPINTCP is not set -CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_MIP6=m -# CONFIG_IPV6_ILA is not set -CONFIG_INET6_XFRM_TUNNEL=m -CONFIG_INET6_TUNNEL=m -CONFIG_IPV6_VTI=m -CONFIG_IPV6_SIT=m -CONFIG_IPV6_SIT_6RD=y -CONFIG_IPV6_NDISC_NODETYPE=y -CONFIG_IPV6_TUNNEL=m -CONFIG_IPV6_GRE=m -CONFIG_IPV6_MULTIPLE_TABLES=y -CONFIG_IPV6_SUBTREES=y -CONFIG_IPV6_MROUTE=y -CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y -CONFIG_IPV6_PIMSM_V2=y -# CONFIG_IPV6_SEG6_LWTUNNEL is not set -# CONFIG_IPV6_SEG6_HMAC is not set -# CONFIG_IPV6_RPL_LWTUNNEL is not set -# CONFIG_IPV6_IOAM6_LWTUNNEL is not set -CONFIG_NETLABEL=y -CONFIG_MPTCP=y -CONFIG_INET_MPTCP_DIAG=m -CONFIG_MPTCP_IPV6=y -CONFIG_NETWORK_SECMARK=y -CONFIG_NET_PTP_CLASSIFY=y -CONFIG_NETWORK_PHY_TIMESTAMPING=y -CONFIG_NETFILTER=y -CONFIG_NETFILTER_ADVANCED=y -CONFIG_BRIDGE_NETFILTER=m - -# -# Core Netfilter Configuration -# -CONFIG_NETFILTER_INGRESS=y -CONFIG_NETFILTER_EGRESS=y -CONFIG_NETFILTER_SKIP_EGRESS=y -CONFIG_NETFILTER_NETLINK=m -CONFIG_NETFILTER_FAMILY_BRIDGE=y -CONFIG_NETFILTER_FAMILY_ARP=y -CONFIG_NETFILTER_BPF_LINK=y -# CONFIG_NETFILTER_NETLINK_HOOK is not set -CONFIG_NETFILTER_NETLINK_ACCT=m -CONFIG_NETFILTER_NETLINK_QUEUE=m -CONFIG_NETFILTER_NETLINK_LOG=m -CONFIG_NETFILTER_NETLINK_OSF=m -CONFIG_NF_CONNTRACK=m -CONFIG_NF_LOG_SYSLOG=m -CONFIG_NETFILTER_CONNCOUNT=m -CONFIG_NF_CONNTRACK_MARK=y -CONFIG_NF_CONNTRACK_SECMARK=y -CONFIG_NF_CONNTRACK_ZONES=y -CONFIG_NF_CONNTRACK_PROCFS=y -CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CONNTRACK_TIMEOUT=y -CONFIG_NF_CONNTRACK_TIMESTAMP=y -CONFIG_NF_CONNTRACK_LABELS=y -CONFIG_NF_CONNTRACK_OVS=y -CONFIG_NF_CT_PROTO_DCCP=y -CONFIG_NF_CT_PROTO_GRE=y -CONFIG_NF_CT_PROTO_SCTP=y -CONFIG_NF_CT_PROTO_UDPLITE=y -CONFIG_NF_CONNTRACK_AMANDA=m -CONFIG_NF_CONNTRACK_FTP=m -CONFIG_NF_CONNTRACK_H323=m -CONFIG_NF_CONNTRACK_IRC=m -CONFIG_NF_CONNTRACK_BROADCAST=m -CONFIG_NF_CONNTRACK_NETBIOS_NS=m -CONFIG_NF_CONNTRACK_SNMP=m -CONFIG_NF_CONNTRACK_PPTP=m -CONFIG_NF_CONNTRACK_SANE=m -CONFIG_NF_CONNTRACK_SIP=m -CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NF_CT_NETLINK=m -CONFIG_NF_CT_NETLINK_TIMEOUT=m -CONFIG_NF_CT_NETLINK_HELPER=m -CONFIG_NETFILTER_NETLINK_GLUE_CT=y -CONFIG_NF_NAT=m -CONFIG_NF_NAT_AMANDA=m -CONFIG_NF_NAT_FTP=m -CONFIG_NF_NAT_IRC=m -CONFIG_NF_NAT_SIP=m -CONFIG_NF_NAT_TFTP=m -CONFIG_NF_NAT_REDIRECT=y -CONFIG_NF_NAT_MASQUERADE=y -CONFIG_NF_NAT_OVS=y -CONFIG_NETFILTER_SYNPROXY=m -CONFIG_NF_TABLES=m -CONFIG_NF_TABLES_INET=y -CONFIG_NF_TABLES_NETDEV=y -CONFIG_NFT_NUMGEN=m -CONFIG_NFT_CT=m -CONFIG_NFT_FLOW_OFFLOAD=m -CONFIG_NFT_CONNLIMIT=m -CONFIG_NFT_LOG=m -CONFIG_NFT_LIMIT=m -CONFIG_NFT_MASQ=m -CONFIG_NFT_REDIR=m -CONFIG_NFT_NAT=m -CONFIG_NFT_TUNNEL=m -CONFIG_NFT_QUEUE=m -CONFIG_NFT_QUOTA=m -CONFIG_NFT_REJECT=m -CONFIG_NFT_REJECT_INET=m -CONFIG_NFT_COMPAT=m -CONFIG_NFT_HASH=m -CONFIG_NFT_FIB=m -CONFIG_NFT_FIB_INET=m -CONFIG_NFT_XFRM=m -CONFIG_NFT_SOCKET=m -CONFIG_NFT_OSF=m -CONFIG_NFT_TPROXY=m -# CONFIG_NFT_SYNPROXY is not set -CONFIG_NF_DUP_NETDEV=m -CONFIG_NFT_DUP_NETDEV=m -CONFIG_NFT_FWD_NETDEV=m -CONFIG_NFT_FIB_NETDEV=m -# CONFIG_NFT_REJECT_NETDEV is not set -CONFIG_NF_FLOW_TABLE_INET=m -CONFIG_NF_FLOW_TABLE=m -# CONFIG_NF_FLOW_TABLE_PROCFS is not set -CONFIG_NETFILTER_XTABLES=y -# CONFIG_NETFILTER_XTABLES_COMPAT is not set - -# -# Xtables combined modules -# -CONFIG_NETFILTER_XT_MARK=m -CONFIG_NETFILTER_XT_CONNMARK=m -CONFIG_NETFILTER_XT_SET=m - -# -# Xtables targets -# -CONFIG_NETFILTER_XT_TARGET_AUDIT=m -CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m -CONFIG_NETFILTER_XT_TARGET_CONNMARK=m -CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m -CONFIG_NETFILTER_XT_TARGET_CT=m -CONFIG_NETFILTER_XT_TARGET_DSCP=m -CONFIG_NETFILTER_XT_TARGET_HL=m -CONFIG_NETFILTER_XT_TARGET_HMARK=m -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m -CONFIG_NETFILTER_XT_TARGET_LED=m -CONFIG_NETFILTER_XT_TARGET_LOG=m -CONFIG_NETFILTER_XT_TARGET_MARK=m -CONFIG_NETFILTER_XT_NAT=m -CONFIG_NETFILTER_XT_TARGET_NETMAP=m -CONFIG_NETFILTER_XT_TARGET_NFLOG=m -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_NOTRACK=m -CONFIG_NETFILTER_XT_TARGET_RATEEST=m -CONFIG_NETFILTER_XT_TARGET_REDIRECT=m -CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m -CONFIG_NETFILTER_XT_TARGET_TEE=m -CONFIG_NETFILTER_XT_TARGET_TPROXY=m -CONFIG_NETFILTER_XT_TARGET_TRACE=m -CONFIG_NETFILTER_XT_TARGET_SECMARK=m -CONFIG_NETFILTER_XT_TARGET_TCPMSS=m -CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m - -# -# Xtables matches -# -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m -CONFIG_NETFILTER_XT_MATCH_BPF=m -CONFIG_NETFILTER_XT_MATCH_CGROUP=m -CONFIG_NETFILTER_XT_MATCH_CLUSTER=m -CONFIG_NETFILTER_XT_MATCH_COMMENT=m -CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m -CONFIG_NETFILTER_XT_MATCH_CONNMARK=m -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_CPU=m -CONFIG_NETFILTER_XT_MATCH_DCCP=m -CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m -CONFIG_NETFILTER_XT_MATCH_DSCP=m -CONFIG_NETFILTER_XT_MATCH_ECN=m -CONFIG_NETFILTER_XT_MATCH_ESP=m -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m -CONFIG_NETFILTER_XT_MATCH_HELPER=m -CONFIG_NETFILTER_XT_MATCH_HL=m -# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set -CONFIG_NETFILTER_XT_MATCH_IPRANGE=m -CONFIG_NETFILTER_XT_MATCH_IPVS=m -CONFIG_NETFILTER_XT_MATCH_L2TP=m -CONFIG_NETFILTER_XT_MATCH_LENGTH=m -CONFIG_NETFILTER_XT_MATCH_LIMIT=m -CONFIG_NETFILTER_XT_MATCH_MAC=m -CONFIG_NETFILTER_XT_MATCH_MARK=m -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_NFACCT=m -CONFIG_NETFILTER_XT_MATCH_OSF=m -CONFIG_NETFILTER_XT_MATCH_OWNER=m -CONFIG_NETFILTER_XT_MATCH_POLICY=m -CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m -CONFIG_NETFILTER_XT_MATCH_QUOTA=m -CONFIG_NETFILTER_XT_MATCH_RATEEST=m -CONFIG_NETFILTER_XT_MATCH_REALM=m -CONFIG_NETFILTER_XT_MATCH_RECENT=m -CONFIG_NETFILTER_XT_MATCH_SCTP=m -CONFIG_NETFILTER_XT_MATCH_SOCKET=m -CONFIG_NETFILTER_XT_MATCH_STATE=m -CONFIG_NETFILTER_XT_MATCH_STATISTIC=m -CONFIG_NETFILTER_XT_MATCH_STRING=m -CONFIG_NETFILTER_XT_MATCH_TCPMSS=m -CONFIG_NETFILTER_XT_MATCH_TIME=m -CONFIG_NETFILTER_XT_MATCH_U32=m -# end of Core Netfilter Configuration - -CONFIG_IP_SET=m -CONFIG_IP_SET_MAX=256 -CONFIG_IP_SET_BITMAP_IP=m -CONFIG_IP_SET_BITMAP_IPMAC=m -CONFIG_IP_SET_BITMAP_PORT=m -CONFIG_IP_SET_HASH_IP=m -CONFIG_IP_SET_HASH_IPMARK=m -CONFIG_IP_SET_HASH_IPPORT=m -CONFIG_IP_SET_HASH_IPPORTIP=m -CONFIG_IP_SET_HASH_IPPORTNET=m -CONFIG_IP_SET_HASH_IPMAC=m -CONFIG_IP_SET_HASH_MAC=m -CONFIG_IP_SET_HASH_NETPORTNET=m -CONFIG_IP_SET_HASH_NET=m -CONFIG_IP_SET_HASH_NETNET=m -CONFIG_IP_SET_HASH_NETPORT=m -CONFIG_IP_SET_HASH_NETIFACE=m -CONFIG_IP_SET_LIST_SET=m -CONFIG_IP_VS=m -CONFIG_IP_VS_IPV6=y -# CONFIG_IP_VS_DEBUG is not set -CONFIG_IP_VS_TAB_BITS=12 - -# -# IPVS transport protocol load balancing support -# -CONFIG_IP_VS_PROTO_TCP=y -CONFIG_IP_VS_PROTO_UDP=y -CONFIG_IP_VS_PROTO_AH_ESP=y -CONFIG_IP_VS_PROTO_ESP=y -CONFIG_IP_VS_PROTO_AH=y -CONFIG_IP_VS_PROTO_SCTP=y - -# -# IPVS scheduler -# -CONFIG_IP_VS_RR=m -CONFIG_IP_VS_WRR=m -CONFIG_IP_VS_LC=m -CONFIG_IP_VS_WLC=m -CONFIG_IP_VS_FO=m -CONFIG_IP_VS_OVF=m -CONFIG_IP_VS_LBLC=m -CONFIG_IP_VS_LBLCR=m -CONFIG_IP_VS_DH=m -CONFIG_IP_VS_SH=m -CONFIG_IP_VS_MH=m -CONFIG_IP_VS_SED=m -CONFIG_IP_VS_NQ=m -# CONFIG_IP_VS_TWOS is not set - -# -# IPVS SH scheduler -# -CONFIG_IP_VS_SH_TAB_BITS=8 - -# -# IPVS MH scheduler -# -CONFIG_IP_VS_MH_TAB_INDEX=12 - -# -# IPVS application helper -# -CONFIG_IP_VS_FTP=m -CONFIG_IP_VS_NFCT=y -CONFIG_IP_VS_PE_SIP=m - -# -# IP: Netfilter Configuration -# -CONFIG_NF_DEFRAG_IPV4=m -CONFIG_NF_SOCKET_IPV4=m -CONFIG_NF_TPROXY_IPV4=m -CONFIG_NF_TABLES_IPV4=y -CONFIG_NFT_REJECT_IPV4=m -CONFIG_NFT_DUP_IPV4=m -CONFIG_NFT_FIB_IPV4=m -CONFIG_NF_TABLES_ARP=y -CONFIG_NF_DUP_IPV4=m -CONFIG_NF_LOG_ARP=m -CONFIG_NF_LOG_IPV4=m -CONFIG_NF_REJECT_IPV4=m -CONFIG_NF_NAT_SNMP_BASIC=m -CONFIG_NF_NAT_PPTP=m -CONFIG_NF_NAT_H323=m -CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_AH=m -CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_RPFILTER=m -CONFIG_IP_NF_MATCH_TTL=m -CONFIG_IP_NF_FILTER=m -CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_SYNPROXY=m -CONFIG_IP_NF_NAT=m -CONFIG_IP_NF_TARGET_MASQUERADE=m -CONFIG_IP_NF_TARGET_NETMAP=m -CONFIG_IP_NF_TARGET_REDIRECT=m -CONFIG_IP_NF_MANGLE=m -CONFIG_IP_NF_TARGET_ECN=m -CONFIG_IP_NF_TARGET_TTL=m -CONFIG_IP_NF_RAW=m -CONFIG_IP_NF_SECURITY=m -CONFIG_IP_NF_ARPTABLES=m -CONFIG_IP_NF_ARPFILTER=m -CONFIG_IP_NF_ARP_MANGLE=m -# end of IP: Netfilter Configuration - -# -# IPv6: Netfilter Configuration -# -CONFIG_NF_SOCKET_IPV6=m -CONFIG_NF_TPROXY_IPV6=m -CONFIG_NF_TABLES_IPV6=y -CONFIG_NFT_REJECT_IPV6=m -CONFIG_NFT_DUP_IPV6=m -CONFIG_NFT_FIB_IPV6=m -CONFIG_NF_DUP_IPV6=m -CONFIG_NF_REJECT_IPV6=m -CONFIG_NF_LOG_IPV6=m -CONFIG_IP6_NF_IPTABLES=m -CONFIG_IP6_NF_MATCH_AH=m -CONFIG_IP6_NF_MATCH_EUI64=m -CONFIG_IP6_NF_MATCH_FRAG=m -CONFIG_IP6_NF_MATCH_OPTS=m -CONFIG_IP6_NF_MATCH_HL=m -CONFIG_IP6_NF_MATCH_IPV6HEADER=m -CONFIG_IP6_NF_MATCH_MH=m -CONFIG_IP6_NF_MATCH_RPFILTER=m -CONFIG_IP6_NF_MATCH_RT=m -# CONFIG_IP6_NF_MATCH_SRH is not set -# CONFIG_IP6_NF_TARGET_HL is not set -CONFIG_IP6_NF_FILTER=m -CONFIG_IP6_NF_TARGET_REJECT=m -CONFIG_IP6_NF_TARGET_SYNPROXY=m -CONFIG_IP6_NF_MANGLE=m -CONFIG_IP6_NF_RAW=m -CONFIG_IP6_NF_SECURITY=m -CONFIG_IP6_NF_NAT=m -CONFIG_IP6_NF_TARGET_MASQUERADE=m -CONFIG_IP6_NF_TARGET_NPT=m -# end of IPv6: Netfilter Configuration - -CONFIG_NF_DEFRAG_IPV6=m -CONFIG_NF_TABLES_BRIDGE=m -# CONFIG_NFT_BRIDGE_META is not set -CONFIG_NFT_BRIDGE_REJECT=m -# CONFIG_NF_CONNTRACK_BRIDGE is not set -CONFIG_BRIDGE_NF_EBTABLES=m -CONFIG_BRIDGE_EBT_BROUTE=m -CONFIG_BRIDGE_EBT_T_FILTER=m -CONFIG_BRIDGE_EBT_T_NAT=m -CONFIG_BRIDGE_EBT_802_3=m -CONFIG_BRIDGE_EBT_AMONG=m -CONFIG_BRIDGE_EBT_ARP=m -CONFIG_BRIDGE_EBT_IP=m -CONFIG_BRIDGE_EBT_IP6=m -CONFIG_BRIDGE_EBT_LIMIT=m -CONFIG_BRIDGE_EBT_MARK=m -CONFIG_BRIDGE_EBT_PKTTYPE=m -CONFIG_BRIDGE_EBT_STP=m -CONFIG_BRIDGE_EBT_VLAN=m -CONFIG_BRIDGE_EBT_ARPREPLY=m -CONFIG_BRIDGE_EBT_DNAT=m -CONFIG_BRIDGE_EBT_MARK_T=m -CONFIG_BRIDGE_EBT_REDIRECT=m -CONFIG_BRIDGE_EBT_SNAT=m -CONFIG_BRIDGE_EBT_LOG=m -CONFIG_BRIDGE_EBT_NFLOG=m -# CONFIG_BPFILTER is not set -# CONFIG_IP_DCCP is not set -CONFIG_IP_SCTP=m -# CONFIG_SCTP_DBG_OBJCNT is not set -# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set -CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y -# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set -CONFIG_SCTP_COOKIE_HMAC_MD5=y -CONFIG_SCTP_COOKIE_HMAC_SHA1=y -CONFIG_INET_SCTP_DIAG=m -# CONFIG_RDS is not set -CONFIG_TIPC=m -CONFIG_TIPC_MEDIA_IB=y -CONFIG_TIPC_MEDIA_UDP=y -CONFIG_TIPC_CRYPTO=y -CONFIG_TIPC_DIAG=m -CONFIG_ATM=m -CONFIG_ATM_CLIP=m -# CONFIG_ATM_CLIP_NO_ICMP is not set -CONFIG_ATM_LANE=m -# CONFIG_ATM_MPOA is not set -CONFIG_ATM_BR2684=m -# CONFIG_ATM_BR2684_IPFILTER is not set -CONFIG_L2TP=m -CONFIG_L2TP_DEBUGFS=m -CONFIG_L2TP_V3=y -CONFIG_L2TP_IP=m -CONFIG_L2TP_ETH=m -CONFIG_STP=m -CONFIG_GARP=m -CONFIG_MRP=m -CONFIG_BRIDGE=m -CONFIG_BRIDGE_IGMP_SNOOPING=y -CONFIG_BRIDGE_VLAN_FILTERING=y -# CONFIG_BRIDGE_MRP is not set -# CONFIG_BRIDGE_CFM is not set -# CONFIG_NET_DSA is not set -CONFIG_VLAN_8021Q=m -CONFIG_VLAN_8021Q_GVRP=y -CONFIG_VLAN_8021Q_MVRP=y -CONFIG_LLC=m -# CONFIG_LLC2 is not set -# CONFIG_ATALK is not set -# CONFIG_X25 is not set -# CONFIG_LAPB is not set -# CONFIG_PHONET is not set -CONFIG_6LOWPAN=m -# CONFIG_6LOWPAN_DEBUGFS is not set -# CONFIG_6LOWPAN_NHC is not set -CONFIG_IEEE802154=m -# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set -CONFIG_IEEE802154_SOCKET=m -CONFIG_IEEE802154_6LOWPAN=m -CONFIG_MAC802154=m -CONFIG_NET_SCHED=y - -# -# Queueing/Scheduling -# -CONFIG_NET_SCH_HTB=m -CONFIG_NET_SCH_HFSC=m -CONFIG_NET_SCH_PRIO=m -CONFIG_NET_SCH_MULTIQ=m -CONFIG_NET_SCH_RED=m -CONFIG_NET_SCH_SFB=m -CONFIG_NET_SCH_SFQ=m -CONFIG_NET_SCH_TEQL=m -CONFIG_NET_SCH_TBF=m -# CONFIG_NET_SCH_CBS is not set -# CONFIG_NET_SCH_ETF is not set -CONFIG_NET_SCH_MQPRIO_LIB=m -# CONFIG_NET_SCH_TAPRIO is not set -CONFIG_NET_SCH_GRED=m -CONFIG_NET_SCH_NETEM=m -CONFIG_NET_SCH_DRR=m -CONFIG_NET_SCH_MQPRIO=m -# CONFIG_NET_SCH_SKBPRIO is not set -CONFIG_NET_SCH_CHOKE=m -CONFIG_NET_SCH_QFQ=m -CONFIG_NET_SCH_CODEL=m -CONFIG_NET_SCH_FQ_CODEL=y -# CONFIG_NET_SCH_CAKE is not set -CONFIG_NET_SCH_FQ=m -CONFIG_NET_SCH_HHF=m -CONFIG_NET_SCH_PIE=m -# CONFIG_NET_SCH_FQ_PIE is not set -CONFIG_NET_SCH_INGRESS=m -CONFIG_NET_SCH_PLUG=m -# CONFIG_NET_SCH_ETS is not set -CONFIG_NET_SCH_DEFAULT=y -# CONFIG_DEFAULT_FQ is not set -# CONFIG_DEFAULT_CODEL is not set -CONFIG_DEFAULT_FQ_CODEL=y -# CONFIG_DEFAULT_SFQ is not set -# CONFIG_DEFAULT_PFIFO_FAST is not set -CONFIG_DEFAULT_NET_SCH="fq_codel" - -# -# Classification -# -CONFIG_NET_CLS=y -CONFIG_NET_CLS_BASIC=m -CONFIG_NET_CLS_ROUTE4=m -CONFIG_NET_CLS_FW=m -CONFIG_NET_CLS_U32=m -CONFIG_CLS_U32_PERF=y -CONFIG_CLS_U32_MARK=y -CONFIG_NET_CLS_FLOW=m -CONFIG_NET_CLS_CGROUP=y -CONFIG_NET_CLS_BPF=m -CONFIG_NET_CLS_FLOWER=m -CONFIG_NET_CLS_MATCHALL=m -CONFIG_NET_EMATCH=y -CONFIG_NET_EMATCH_STACK=32 -CONFIG_NET_EMATCH_CMP=m -CONFIG_NET_EMATCH_NBYTE=m -CONFIG_NET_EMATCH_U32=m -CONFIG_NET_EMATCH_META=m -CONFIG_NET_EMATCH_TEXT=m -CONFIG_NET_EMATCH_IPSET=m -# CONFIG_NET_EMATCH_IPT is not set -CONFIG_NET_CLS_ACT=y -CONFIG_NET_ACT_POLICE=m -CONFIG_NET_ACT_GACT=m -CONFIG_GACT_PROB=y -CONFIG_NET_ACT_MIRRED=m -CONFIG_NET_ACT_SAMPLE=m -CONFIG_NET_ACT_IPT=m -CONFIG_NET_ACT_NAT=m -CONFIG_NET_ACT_PEDIT=m -CONFIG_NET_ACT_SIMP=m -CONFIG_NET_ACT_SKBEDIT=m -CONFIG_NET_ACT_CSUM=m -# CONFIG_NET_ACT_MPLS is not set -CONFIG_NET_ACT_VLAN=m -CONFIG_NET_ACT_BPF=m -# CONFIG_NET_ACT_CONNMARK is not set -# CONFIG_NET_ACT_CTINFO is not set -CONFIG_NET_ACT_SKBMOD=m -# CONFIG_NET_ACT_IFE is not set -CONFIG_NET_ACT_TUNNEL_KEY=m -CONFIG_NET_ACT_CT=m -# CONFIG_NET_ACT_GATE is not set -CONFIG_NET_TC_SKB_EXT=y -CONFIG_NET_SCH_FIFO=y -CONFIG_DCB=y -CONFIG_DNS_RESOLVER=m -# CONFIG_BATMAN_ADV is not set -CONFIG_OPENVSWITCH=m -CONFIG_OPENVSWITCH_GRE=m -CONFIG_OPENVSWITCH_VXLAN=m -CONFIG_OPENVSWITCH_GENEVE=m -CONFIG_VSOCKETS=m -CONFIG_VSOCKETS_DIAG=m -CONFIG_VSOCKETS_LOOPBACK=m -CONFIG_VIRTIO_VSOCKETS=m -CONFIG_VIRTIO_VSOCKETS_COMMON=m -CONFIG_NETLINK_DIAG=m -CONFIG_MPLS=y -CONFIG_NET_MPLS_GSO=y -CONFIG_MPLS_ROUTING=m -CONFIG_MPLS_IPTUNNEL=m -CONFIG_NET_NSH=y -# CONFIG_HSR is not set -CONFIG_NET_SWITCHDEV=y -CONFIG_NET_L3_MASTER_DEV=y -# CONFIG_QRTR is not set -# CONFIG_NET_NCSI is not set -CONFIG_PCPU_DEV_REFCNT=y -CONFIG_MAX_SKB_FRAGS=17 -CONFIG_RPS=y -CONFIG_RFS_ACCEL=y -CONFIG_SOCK_RX_QUEUE_MAPPING=y -CONFIG_XPS=y -CONFIG_CGROUP_NET_PRIO=y -CONFIG_CGROUP_NET_CLASSID=y -CONFIG_NET_RX_BUSY_POLL=y -CONFIG_BQL=y -CONFIG_BPF_STREAM_PARSER=y -CONFIG_NET_FLOW_LIMIT=y - -# -# Network testing -# -CONFIG_NET_PKTGEN=m -CONFIG_NET_DROP_MONITOR=y -# end of Network testing -# end of Networking options - -# CONFIG_HAMRADIO is not set -# CONFIG_CAN is not set -# CONFIG_BT is not set -# CONFIG_AF_RXRPC is not set -# CONFIG_AF_KCM is not set -CONFIG_STREAM_PARSER=y -# CONFIG_MCTP is not set -CONFIG_FIB_RULES=y -CONFIG_WIRELESS=y -CONFIG_CFG80211=m -# CONFIG_NL80211_TESTMODE is not set -# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set -CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y -CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y -CONFIG_CFG80211_DEFAULT_PS=y -# CONFIG_CFG80211_DEBUGFS is not set -CONFIG_CFG80211_CRDA_SUPPORT=y -# CONFIG_CFG80211_WEXT is not set -CONFIG_MAC80211=m -CONFIG_MAC80211_HAS_RC=y -CONFIG_MAC80211_RC_MINSTREL=y -CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y -CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" -# CONFIG_MAC80211_MESH is not set -CONFIG_MAC80211_LEDS=y -CONFIG_MAC80211_DEBUGFS=y -# CONFIG_MAC80211_MESSAGE_TRACING is not set -# CONFIG_MAC80211_DEBUG_MENU is not set -CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 -CONFIG_RFKILL=m -CONFIG_RFKILL_LEDS=y -CONFIG_RFKILL_INPUT=y -CONFIG_RFKILL_GPIO=m -# CONFIG_NET_9P is not set -# CONFIG_CAIF is not set -CONFIG_CEPH_LIB=m -# CONFIG_CEPH_LIB_PRETTYDEBUG is not set -CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y -# CONFIG_NFC is not set -CONFIG_PSAMPLE=m -# CONFIG_NET_IFE is not set -CONFIG_LWTUNNEL=y -CONFIG_LWTUNNEL_BPF=y -CONFIG_DST_CACHE=y -CONFIG_GRO_CELLS=y -CONFIG_SOCK_VALIDATE_XMIT=y -CONFIG_NET_SELFTESTS=y -CONFIG_NET_SOCK_MSG=y -CONFIG_NET_DEVLINK=y -CONFIG_PAGE_POOL=y -# CONFIG_PAGE_POOL_STATS is not set -CONFIG_FAILOVER=m -CONFIG_ETHTOOL_NETLINK=y - -# -# Device Drivers -# -CONFIG_ARM_AMBA=y -CONFIG_HAVE_PCI=y -CONFIG_PCI=y -CONFIG_PCI_DOMAINS=y -CONFIG_PCI_DOMAINS_GENERIC=y -CONFIG_PCI_SYSCALL=y -CONFIG_PCIEPORTBUS=y -CONFIG_HOTPLUG_PCI_PCIE=y -CONFIG_PCIEAER=y -CONFIG_PCIEAER_INJECT=m -CONFIG_PCIE_ECRC=y -CONFIG_PCIEASPM=y -CONFIG_PCIEASPM_DEFAULT=y -# CONFIG_PCIEASPM_POWERSAVE is not set -# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set -# CONFIG_PCIEASPM_PERFORMANCE is not set -CONFIG_PCIE_PME=y -CONFIG_PCIE_DPC=y -# CONFIG_PCIE_PTM is not set -CONFIG_PCIE_EDR=y -CONFIG_PCI_MSI=y -CONFIG_PCI_QUIRKS=y -# CONFIG_PCI_DEBUG is not set -# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set -CONFIG_PCI_STUB=y -# CONFIG_PCI_PF_STUB is not set -CONFIG_PCI_ATS=y -CONFIG_PCI_DOE=y -CONFIG_PCI_ECAM=y -CONFIG_PCI_IOV=y -CONFIG_PCI_PRI=y -CONFIG_PCI_PASID=y -# CONFIG_PCI_P2PDMA is not set -CONFIG_PCI_LABEL=y -# CONFIG_PCI_DYNAMIC_OF_NODES is not set -CONFIG_VGA_ARB=y -CONFIG_VGA_ARB_MAX_GPUS=64 -CONFIG_HOTPLUG_PCI=y -CONFIG_HOTPLUG_PCI_ACPI=y -CONFIG_HOTPLUG_PCI_ACPI_IBM=m -# CONFIG_HOTPLUG_PCI_CPCI is not set -# CONFIG_HOTPLUG_PCI_SHPC is not set - -# -# PCI controller drivers -# -# CONFIG_PCIE_ALTERA is not set -CONFIG_PCI_HOST_THUNDER_PEM=y -CONFIG_PCI_HOST_THUNDER_ECAM=y -# CONFIG_PCI_FTPCI100 is not set -CONFIG_PCI_HOST_COMMON=y -CONFIG_PCI_HOST_GENERIC=y -# CONFIG_PCIE_HISI_ERR is not set -# CONFIG_PCIE_MICROCHIP_HOST is not set -CONFIG_PCI_XGENE=y -CONFIG_PCI_XGENE_MSI=y -# CONFIG_PCIE_XILINX is not set - -# -# Cadence-based PCIe controllers -# -# CONFIG_PCIE_CADENCE_PLAT_HOST is not set -# CONFIG_PCI_J721E_HOST is not set -# end of Cadence-based PCIe controllers - -# -# DesignWare-based PCIe controllers -# -CONFIG_PCIE_DW=y -CONFIG_PCIE_DW_HOST=y -# CONFIG_PCIE_AL is not set -# CONFIG_PCI_MESON is not set -CONFIG_PCI_HISI=y -# CONFIG_PCIE_KIRIN is not set -# CONFIG_PCIE_HISI_STB is not set -# CONFIG_PCIE_DW_PLAT_HOST is not set -# CONFIG_PCIE_QCOM is not set -# end of DesignWare-based PCIe controllers - -# -# Mobiveil-based PCIe controllers -# -# end of Mobiveil-based PCIe controllers -# end of PCI controller drivers - -# -# PCI Endpoint -# -# CONFIG_PCI_ENDPOINT is not set -# end of PCI Endpoint - -# -# PCI switch controller drivers -# -# CONFIG_PCI_SW_SWITCHTEC is not set -# end of PCI switch controller drivers - -CONFIG_CXL_BUS=m -CONFIG_CXL_PCI=m -# CONFIG_CXL_MEM_RAW_COMMANDS is not set -CONFIG_CXL_ACPI=m -CONFIG_CXL_PMEM=m -CONFIG_CXL_MEM=m -CONFIG_CXL_PORT=m -CONFIG_CXL_SUSPEND=y -CONFIG_CXL_REGION=y -# CONFIG_CXL_REGION_INVALIDATION_TEST is not set -CONFIG_CXL_PMU=m -CONFIG_PCCARD=y -# CONFIG_PCMCIA is not set -CONFIG_CARDBUS=y - -# -# PC-card bridges -# -CONFIG_YENTA=m -CONFIG_YENTA_O2=y -CONFIG_YENTA_RICOH=y -CONFIG_YENTA_TI=y -CONFIG_YENTA_ENE_TUNE=y -CONFIG_YENTA_TOSHIBA=y -# CONFIG_RAPIDIO is not set - -# -# Generic Driver Options -# -CONFIG_AUXILIARY_BUS=y -# CONFIG_UEVENT_HELPER is not set -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -# CONFIG_DEVTMPFS_SAFE is not set -CONFIG_STANDALONE=y -CONFIG_PREVENT_FIRMWARE_BUILD=y - -# -# Firmware loader -# -CONFIG_FW_LOADER=y -CONFIG_FW_LOADER_DEBUG=y -CONFIG_FW_LOADER_PAGED_BUF=y -CONFIG_FW_LOADER_SYSFS=y -CONFIG_EXTRA_FIRMWARE="" -# CONFIG_FW_LOADER_USER_HELPER is not set -# CONFIG_FW_LOADER_COMPRESS is not set -CONFIG_FW_CACHE=y -CONFIG_FW_UPLOAD=y -# end of Firmware loader - -CONFIG_ALLOW_DEV_COREDUMP=y -# CONFIG_DEBUG_DRIVER is not set -# CONFIG_DEBUG_DEVRES is not set -# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set -CONFIG_HMEM_REPORTING=y -# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set -CONFIG_GENERIC_CPU_AUTOPROBE=y -CONFIG_GENERIC_CPU_VULNERABILITIES=y -CONFIG_SOC_BUS=y -CONFIG_REGMAP=y -CONFIG_REGMAP_I2C=m -CONFIG_REGMAP_SPI=m -CONFIG_REGMAP_MMIO=y -CONFIG_DMA_SHARED_BUFFER=y -# CONFIG_DMA_FENCE_TRACE is not set -CONFIG_GENERIC_ARCH_TOPOLOGY=y -CONFIG_GENERIC_ARCH_NUMA=y -# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set -# end of Generic Driver Options - -# -# Bus devices -# -# CONFIG_BRCMSTB_GISB_ARB is not set -# CONFIG_MOXTET is not set -CONFIG_HISILICON_LPC=y -# CONFIG_QCOM_EBI2 is not set -# CONFIG_QCOM_SSC_BLOCK_BUS is not set -CONFIG_VEXPRESS_CONFIG=y -# CONFIG_MHI_BUS is not set -# CONFIG_MHI_BUS_EP is not set -# end of Bus devices - -# -# Cache Drivers -# -# end of Cache Drivers - -CONFIG_CONNECTOR=y -CONFIG_PROC_EVENTS=y - -# -# Firmware Drivers -# - -# -# ARM System Control and Management Interface Protocol -# -# CONFIG_ARM_SCMI_PROTOCOL is not set -# end of ARM System Control and Management Interface Protocol - -CONFIG_ARM_SCPI_PROTOCOL=m -CONFIG_ARM_SCPI_POWER_DOMAIN=m -CONFIG_ARM_SDE_INTERFACE=y -CONFIG_DMIID=y -CONFIG_DMI_SYSFS=y -# CONFIG_ISCSI_IBFT is not set -CONFIG_FW_CFG_SYSFS=y -# CONFIG_FW_CFG_SYSFS_CMDLINE is not set -CONFIG_QCOM_SCM=y -# CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT is not set -CONFIG_SYSFB=y -# CONFIG_SYSFB_SIMPLEFB is not set -# CONFIG_ARM_FFA_TRANSPORT is not set -# CONFIG_GOOGLE_FIRMWARE is not set - -# -# EFI (Extensible Firmware Interface) Support -# -CONFIG_EFI_ESRT=y -CONFIG_EFI_VARS_PSTORE=y -CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y -CONFIG_EFI_SOFT_RESERVE=y -CONFIG_EFI_PARAMS_FROM_FDT=y -CONFIG_EFI_RUNTIME_WRAPPERS=y -CONFIG_EFI_GENERIC_STUB=y -# CONFIG_EFI_ZBOOT is not set -CONFIG_EFI_ARMSTUB_DTB_LOADER=y -# CONFIG_EFI_BOOTLOADER_CONTROL is not set -# CONFIG_EFI_CAPSULE_LOADER is not set -# CONFIG_EFI_TEST is not set -# CONFIG_RESET_ATTACK_MITIGATION is not set -# CONFIG_EFI_DISABLE_PCI_DMA is not set -CONFIG_EFI_EARLYCON=y -CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y -# CONFIG_EFI_DISABLE_RUNTIME is not set -# CONFIG_EFI_COCO_SECRET is not set -# end of EFI (Extensible Firmware Interface) Support - -CONFIG_UEFI_CPER=y -CONFIG_UEFI_CPER_ARM=y -# CONFIG_YITIAN_CPER_RAWDATA is not set -CONFIG_ARM_PSCI_FW=y -# CONFIG_ARM_PSCI_CHECKER is not set -CONFIG_HAVE_ARM_SMCCC=y -CONFIG_HAVE_ARM_SMCCC_DISCOVERY=y -CONFIG_ARM_SMCCC_SOC_ID=y - -# -# Tegra firmware driver -# -# end of Tegra firmware driver -# end of Firmware Drivers - -# CONFIG_GNSS is not set -CONFIG_MTD=m -# CONFIG_MTD_TESTS is not set - -# -# Partition parsers -# -# CONFIG_MTD_AR7_PARTS is not set -# CONFIG_MTD_CMDLINE_PARTS is not set -CONFIG_MTD_OF_PARTS=m -# CONFIG_MTD_AFS_PARTS is not set -# CONFIG_MTD_REDBOOT_PARTS is not set -# end of Partition parsers - -# -# User Modules And Translation Layers -# -CONFIG_MTD_BLKDEVS=m -CONFIG_MTD_BLOCK=m -# CONFIG_MTD_BLOCK_RO is not set - -# -# Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK. -# -# CONFIG_FTL is not set -# CONFIG_NFTL is not set -# CONFIG_INFTL is not set -# CONFIG_RFD_FTL is not set -# CONFIG_SSFDC is not set -# CONFIG_SM_FTL is not set -# CONFIG_MTD_OOPS is not set -# CONFIG_MTD_SWAP is not set -# CONFIG_MTD_PARTITIONED_MASTER is not set - -# -# RAM/ROM/Flash chip drivers -# -CONFIG_MTD_CFI=m -# CONFIG_MTD_JEDECPROBE is not set -CONFIG_MTD_GEN_PROBE=m -# CONFIG_MTD_CFI_ADV_OPTIONS is not set -CONFIG_MTD_MAP_BANK_WIDTH_1=y -CONFIG_MTD_MAP_BANK_WIDTH_2=y -CONFIG_MTD_MAP_BANK_WIDTH_4=y -CONFIG_MTD_CFI_I1=y -CONFIG_MTD_CFI_I2=y -CONFIG_MTD_CFI_INTELEXT=m -CONFIG_MTD_CFI_AMDSTD=m -CONFIG_MTD_CFI_STAA=m -CONFIG_MTD_CFI_UTIL=m -# CONFIG_MTD_RAM is not set -# CONFIG_MTD_ROM is not set -# CONFIG_MTD_ABSENT is not set -# end of RAM/ROM/Flash chip drivers - -# -# Mapping drivers for chip access -# -# CONFIG_MTD_COMPLEX_MAPPINGS is not set -CONFIG_MTD_PHYSMAP=m -# CONFIG_MTD_PHYSMAP_COMPAT is not set -# CONFIG_MTD_PHYSMAP_OF is not set -# CONFIG_MTD_INTEL_VR_NOR is not set -# CONFIG_MTD_PLATRAM is not set -# end of Mapping drivers for chip access - -# -# Self-contained MTD device drivers -# -# CONFIG_MTD_PMC551 is not set -# CONFIG_MTD_DATAFLASH is not set -# CONFIG_MTD_MCHP23K256 is not set -# CONFIG_MTD_MCHP48L640 is not set -# CONFIG_MTD_SST25L is not set -# CONFIG_MTD_SLRAM is not set -# CONFIG_MTD_PHRAM is not set -# CONFIG_MTD_MTDRAM is not set -# CONFIG_MTD_BLOCK2MTD is not set - -# -# Disk-On-Chip Device Drivers -# -# CONFIG_MTD_DOCG3 is not set -# end of Self-contained MTD device drivers - -# -# NAND -# -# CONFIG_MTD_ONENAND is not set -# CONFIG_MTD_RAW_NAND is not set -# CONFIG_MTD_SPI_NAND is not set - -# -# ECC engine support -# -# CONFIG_MTD_NAND_ECC_SW_HAMMING is not set -# CONFIG_MTD_NAND_ECC_SW_BCH is not set -# CONFIG_MTD_NAND_ECC_MXIC is not set -# end of ECC engine support -# end of NAND - -# -# LPDDR & LPDDR2 PCM memory drivers -# -# CONFIG_MTD_LPDDR is not set -# end of LPDDR & LPDDR2 PCM memory drivers - -# CONFIG_MTD_SPI_NOR is not set -CONFIG_MTD_UBI=m -CONFIG_MTD_UBI_WL_THRESHOLD=4096 -CONFIG_MTD_UBI_BEB_LIMIT=20 -# CONFIG_MTD_UBI_FASTMAP is not set -# CONFIG_MTD_UBI_GLUEBI is not set -# CONFIG_MTD_UBI_BLOCK is not set -# CONFIG_MTD_HYPERBUS is not set -CONFIG_DTC=y -CONFIG_OF=y -# CONFIG_OF_UNITTEST is not set -CONFIG_OF_FLATTREE=y -CONFIG_OF_EARLY_FLATTREE=y -CONFIG_OF_KOBJ=y -CONFIG_OF_DYNAMIC=y -CONFIG_OF_ADDRESS=y -CONFIG_OF_IRQ=y -CONFIG_OF_RESERVED_MEM=y -CONFIG_OF_RESOLVE=y -CONFIG_OF_OVERLAY=y -CONFIG_OF_NUMA=y -# CONFIG_PARPORT is not set -CONFIG_PNP=y -CONFIG_PNP_DEBUG_MESSAGES=y - -# -# Protocols -# -CONFIG_PNPACPI=y -CONFIG_BLK_DEV=y -CONFIG_BLK_DEV_NULL_BLK=m -CONFIG_CDROM=m -# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set -CONFIG_ZRAM=m -CONFIG_ZRAM_DEF_COMP_LZORLE=y -# CONFIG_ZRAM_DEF_COMP_ZSTD is not set -# CONFIG_ZRAM_DEF_COMP_LZ4 is not set -# CONFIG_ZRAM_DEF_COMP_LZO is not set -# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set -CONFIG_ZRAM_DEF_COMP="lzo-rle" -CONFIG_ZRAM_WRITEBACK=y -# CONFIG_ZRAM_MEMORY_TRACKING is not set -# CONFIG_ZRAM_MULTI_COMP is not set -CONFIG_BLK_DEV_LOOP=m -CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 -# CONFIG_BLK_DEV_DRBD is not set -CONFIG_BLK_DEV_NBD=m -CONFIG_BLK_DEV_RAM=m -CONFIG_BLK_DEV_RAM_COUNT=16 -CONFIG_BLK_DEV_RAM_SIZE=16384 -CONFIG_CDROM_PKTCDVD=m -CONFIG_CDROM_PKTCDVD_BUFFERS=8 -# CONFIG_CDROM_PKTCDVD_WCACHE is not set -# CONFIG_ATA_OVER_ETH is not set -CONFIG_VIRTIO_BLK=m -CONFIG_BLK_DEV_RBD=m -CONFIG_BLK_DEV_UBLK=m -CONFIG_BLKDEV_UBLK_LEGACY_OPCODES=y - -# -# NVME Support -# -CONFIG_NVME_CORE=m -CONFIG_BLK_DEV_NVME=m -CONFIG_NVME_MULTIPATH=y -# CONFIG_NVME_VERBOSE_ERRORS is not set -# CONFIG_NVME_HWMON is not set -CONFIG_NVME_FABRICS=m -CONFIG_NVME_RDMA=m -CONFIG_NVME_FC=m -CONFIG_NVME_TCP=m -# CONFIG_NVME_AUTH is not set -CONFIG_NVME_TARGET=m -# CONFIG_NVME_TARGET_PASSTHRU is not set -CONFIG_NVME_TARGET_LOOP=m -CONFIG_NVME_TARGET_RDMA=m -CONFIG_NVME_TARGET_FC=m -CONFIG_NVME_TARGET_FCLOOP=m -CONFIG_NVME_TARGET_TCP=m -# CONFIG_NVME_TARGET_AUTH is not set -# end of NVME Support - -# -# Misc devices -# -# CONFIG_AD525X_DPOT is not set -# CONFIG_DUMMY_IRQ is not set -# CONFIG_PHANTOM is not set -CONFIG_TIFM_CORE=m -# CONFIG_TIFM_7XX1 is not set -# CONFIG_ICS932S401 is not set -CONFIG_ENCLOSURE_SERVICES=m -# CONFIG_HP_ILO is not set -# CONFIG_APDS9802ALS is not set -# CONFIG_ISL29003 is not set -# CONFIG_ISL29020 is not set -# CONFIG_SENSORS_TSL2550 is not set -# CONFIG_SENSORS_BH1770 is not set -# CONFIG_SENSORS_APDS990X is not set -# CONFIG_HMC6352 is not set -# CONFIG_DS1682 is not set -# CONFIG_LATTICE_ECP3_CONFIG is not set -# CONFIG_SRAM is not set -# CONFIG_DW_XDATA_PCIE is not set -# CONFIG_PCI_ENDPOINT_TEST is not set -# CONFIG_XILINX_SDFEC is not set -# CONFIG_HISI_HIKEY_USB is not set -# CONFIG_OPEN_DICE is not set -# CONFIG_VCPU_STALL_DETECTOR is not set -# CONFIG_C2PORT is not set - -# -# EEPROM support -# -# CONFIG_EEPROM_AT24 is not set -# CONFIG_EEPROM_AT25 is not set -CONFIG_EEPROM_LEGACY=m -CONFIG_EEPROM_MAX6875=m -CONFIG_EEPROM_93CX6=m -# CONFIG_EEPROM_93XX46 is not set -# CONFIG_EEPROM_IDT_89HPESX is not set -# CONFIG_EEPROM_EE1004 is not set -# end of EEPROM support - -CONFIG_CB710_CORE=m -# CONFIG_CB710_DEBUG is not set -CONFIG_CB710_DEBUG_ASSUMPTIONS=y - -# -# Texas Instruments shared transport line discipline -# -# CONFIG_TI_ST is not set -# end of Texas Instruments shared transport line discipline - -# CONFIG_SENSORS_LIS3_I2C is not set -# CONFIG_ALTERA_STAPL is not set -# CONFIG_VMWARE_VMCI is not set -# CONFIG_GENWQE is not set -# CONFIG_ECHO is not set -# CONFIG_BCM_VK is not set -# CONFIG_MISC_ALCOR_PCI is not set -# CONFIG_MISC_RTSX_PCI is not set -# CONFIG_MISC_RTSX_USB is not set -CONFIG_UACCE=m -CONFIG_PVPANIC=y -# CONFIG_PVPANIC_MMIO is not set -# CONFIG_PVPANIC_PCI is not set -# CONFIG_GP_PCI1XXXX is not set -# end of Misc devices - -# -# SCSI device support -# -CONFIG_SCSI_MOD=y -CONFIG_RAID_ATTRS=m -CONFIG_SCSI_COMMON=y -CONFIG_SCSI=y -CONFIG_SCSI_DMA=y -CONFIG_SCSI_NETLINK=y -CONFIG_SCSI_PROC_FS=y - -# -# SCSI support type (disk, tape, CD-ROM) -# -CONFIG_BLK_DEV_SD=m -CONFIG_CHR_DEV_ST=m -CONFIG_BLK_DEV_SR=m -CONFIG_CHR_DEV_SG=m -CONFIG_BLK_DEV_BSG=y -CONFIG_CHR_DEV_SCH=m -CONFIG_SCSI_ENCLOSURE=m -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_LOGGING=y -CONFIG_SCSI_SCAN_ASYNC=y - -# -# SCSI Transports -# -CONFIG_SCSI_SPI_ATTRS=m -CONFIG_SCSI_FC_ATTRS=m -CONFIG_SCSI_ISCSI_ATTRS=m -CONFIG_SCSI_SAS_ATTRS=m -CONFIG_SCSI_SAS_LIBSAS=m -CONFIG_SCSI_SAS_ATA=y -CONFIG_SCSI_SAS_HOST_SMP=y -CONFIG_SCSI_SRP_ATTRS=m -# end of SCSI Transports - -CONFIG_SCSI_LOWLEVEL=y -CONFIG_ISCSI_TCP=m -CONFIG_ISCSI_BOOT_SYSFS=m -# CONFIG_SCSI_CXGB3_ISCSI is not set -CONFIG_SCSI_CXGB4_ISCSI=m -# CONFIG_SCSI_BNX2_ISCSI is not set -# CONFIG_SCSI_BNX2X_FCOE is not set -CONFIG_BE2ISCSI=m -# CONFIG_BLK_DEV_3W_XXXX_RAID is not set -CONFIG_SCSI_HPSA=m -# CONFIG_SCSI_3W_9XXX is not set -# CONFIG_SCSI_3W_SAS is not set -# CONFIG_SCSI_ACARD is not set -# CONFIG_SCSI_AACRAID is not set -# CONFIG_SCSI_AIC7XXX is not set -# CONFIG_SCSI_AIC79XX is not set -# CONFIG_SCSI_AIC94XX is not set -CONFIG_SCSI_HISI_SAS=m -CONFIG_SCSI_HISI_SAS_PCI=m -# CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE is not set -# CONFIG_SCSI_MVSAS is not set -# CONFIG_SCSI_MVUMI is not set -# CONFIG_SCSI_ADVANSYS is not set -# CONFIG_SCSI_ARCMSR is not set -# CONFIG_SCSI_ESAS2R is not set -# CONFIG_MEGARAID_NEWGEN is not set -# CONFIG_MEGARAID_LEGACY is not set -CONFIG_MEGARAID_SAS=m -CONFIG_SCSI_MPT3SAS=m -CONFIG_SCSI_MPT2SAS_MAX_SGE=128 -CONFIG_SCSI_MPT3SAS_MAX_SGE=128 -CONFIG_SCSI_MPT2SAS=m -CONFIG_SCSI_MPI3MR=m -CONFIG_SCSI_SMARTPQI=m -# CONFIG_SCSI_HPTIOP is not set -# CONFIG_SCSI_BUSLOGIC is not set -# CONFIG_SCSI_MYRB is not set -# CONFIG_SCSI_MYRS is not set -CONFIG_LIBFC=m -CONFIG_LIBFCOE=m -CONFIG_FCOE=m -# CONFIG_SCSI_SNIC is not set -# CONFIG_SCSI_DMX3191D is not set -# CONFIG_SCSI_FDOMAIN_PCI is not set -# CONFIG_SCSI_IPS is not set -# CONFIG_SCSI_INITIO is not set -# CONFIG_SCSI_INIA100 is not set -# CONFIG_SCSI_STEX is not set -# CONFIG_SCSI_SYM53C8XX_2 is not set -CONFIG_SCSI_IPR=m -CONFIG_SCSI_IPR_TRACE=y -CONFIG_SCSI_IPR_DUMP=y -# CONFIG_SCSI_QLOGIC_1280 is not set -CONFIG_SCSI_QLA_FC=m -# CONFIG_TCM_QLA2XXX is not set -CONFIG_SCSI_QLA_ISCSI=m -CONFIG_QEDI=m -CONFIG_QEDF=m -CONFIG_SCSI_LPFC=m -# CONFIG_SCSI_LPFC_DEBUG_FS is not set -# CONFIG_SCSI_EFCT is not set -# CONFIG_SCSI_DC395x is not set -# CONFIG_SCSI_AM53C974 is not set -# CONFIG_SCSI_WD719X is not set -CONFIG_SCSI_DEBUG=m -# CONFIG_SCSI_PMCRAID is not set -# CONFIG_SCSI_PM8001 is not set -# CONFIG_SCSI_BFA_FC is not set -CONFIG_SCSI_VIRTIO=m -CONFIG_SCSI_CHELSIO_FCOE=m -CONFIG_SCSI_DH=y -CONFIG_SCSI_DH_RDAC=y -CONFIG_SCSI_DH_HP_SW=y -CONFIG_SCSI_DH_EMC=y -CONFIG_SCSI_DH_ALUA=y -# end of SCSI device support - -CONFIG_ATA=m -CONFIG_SATA_HOST=y -CONFIG_PATA_TIMINGS=y -CONFIG_ATA_VERBOSE_ERROR=y -CONFIG_ATA_FORCE=y -CONFIG_ATA_ACPI=y -# CONFIG_SATA_ZPODD is not set -CONFIG_SATA_PMP=y - -# -# Controllers with non-SFF native interface -# -CONFIG_SATA_AHCI=m -CONFIG_SATA_MOBILE_LPM_POLICY=0 -CONFIG_SATA_AHCI_PLATFORM=m -# CONFIG_AHCI_DWC is not set -# CONFIG_AHCI_CEVA is not set -CONFIG_AHCI_XGENE=m -CONFIG_SATA_AHCI_SEATTLE=m -# CONFIG_SATA_INIC162X is not set -# CONFIG_SATA_ACARD_AHCI is not set -# CONFIG_SATA_SIL24 is not set -CONFIG_ATA_SFF=y - -# -# SFF controllers with custom DMA interface -# -# CONFIG_PDC_ADMA is not set -# CONFIG_SATA_QSTOR is not set -# CONFIG_SATA_SX4 is not set -CONFIG_ATA_BMDMA=y - -# -# SATA SFF controllers with BMDMA -# -CONFIG_ATA_PIIX=m -# CONFIG_SATA_DWC is not set -# CONFIG_SATA_MV is not set -# CONFIG_SATA_NV is not set -# CONFIG_SATA_PROMISE is not set -# CONFIG_SATA_SIL is not set -# CONFIG_SATA_SIS is not set -# CONFIG_SATA_SVW is not set -# CONFIG_SATA_ULI is not set -# CONFIG_SATA_VIA is not set -# CONFIG_SATA_VITESSE is not set -# CONFIG_SATA_ZHAOXIN is not set - -# -# PATA SFF controllers with BMDMA -# -# CONFIG_PATA_ALI is not set -# CONFIG_PATA_AMD is not set -# CONFIG_PATA_ARTOP is not set -# CONFIG_PATA_ATIIXP is not set -# CONFIG_PATA_ATP867X is not set -# CONFIG_PATA_CMD64X is not set -# CONFIG_PATA_CYPRESS is not set -# CONFIG_PATA_EFAR is not set -# CONFIG_PATA_HPT366 is not set -# CONFIG_PATA_HPT37X is not set -# CONFIG_PATA_HPT3X2N is not set -# CONFIG_PATA_HPT3X3 is not set -# CONFIG_PATA_IT8213 is not set -# CONFIG_PATA_IT821X is not set -# CONFIG_PATA_JMICRON is not set -# CONFIG_PATA_MARVELL is not set -# CONFIG_PATA_NETCELL is not set -# CONFIG_PATA_NINJA32 is not set -# CONFIG_PATA_NS87415 is not set -# CONFIG_PATA_OLDPIIX is not set -# CONFIG_PATA_OPTIDMA is not set -# CONFIG_PATA_PDC2027X is not set -# CONFIG_PATA_PDC_OLD is not set -# CONFIG_PATA_RADISYS is not set -# CONFIG_PATA_RDC is not set -# CONFIG_PATA_SCH is not set -# CONFIG_PATA_SERVERWORKS is not set -# CONFIG_PATA_SIL680 is not set -# CONFIG_PATA_SIS is not set -# CONFIG_PATA_TOSHIBA is not set -# CONFIG_PATA_TRIFLEX is not set -# CONFIG_PATA_VIA is not set -# CONFIG_PATA_WINBOND is not set - -# -# PIO-only SFF controllers -# -# CONFIG_PATA_CMD640_PCI is not set -# CONFIG_PATA_MPIIX is not set -# CONFIG_PATA_NS87410 is not set -# CONFIG_PATA_OPTI is not set -# CONFIG_PATA_OF_PLATFORM is not set -# CONFIG_PATA_RZ1000 is not set - -# -# Generic fallback / legacy drivers -# -# CONFIG_PATA_ACPI is not set -CONFIG_ATA_GENERIC=m -# CONFIG_PATA_LEGACY is not set -CONFIG_MD=y -CONFIG_BLK_DEV_MD=y -CONFIG_MD_AUTODETECT=y -CONFIG_MD_BITMAP_FILE=y -CONFIG_MD_LINEAR=m -CONFIG_MD_RAID0=m -CONFIG_MD_RAID1=m -CONFIG_MD_RAID10=m -CONFIG_MD_RAID456=m -# CONFIG_MD_MULTIPATH is not set -CONFIG_MD_FAULTY=m -CONFIG_MD_CLUSTER=m -# CONFIG_BCACHE is not set -CONFIG_BLK_DEV_DM_BUILTIN=y -CONFIG_BLK_DEV_DM=m -CONFIG_DM_DEBUG=y -CONFIG_DM_BUFIO=m -# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set -CONFIG_DM_BIO_PRISON=m -CONFIG_DM_PERSISTENT_DATA=m -# CONFIG_DM_UNSTRIPED is not set -CONFIG_DM_CRYPT=m -CONFIG_DM_SNAPSHOT=m -CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m -CONFIG_DM_CACHE_SMQ=m -CONFIG_DM_WRITECACHE=m -# CONFIG_DM_EBS is not set -CONFIG_DM_ERA=m -# CONFIG_DM_CLONE is not set -CONFIG_DM_MIRROR=m -CONFIG_DM_LOG_USERSPACE=m -CONFIG_DM_RAID=m -CONFIG_DM_ZERO=m -CONFIG_DM_MULTIPATH=m -CONFIG_DM_MULTIPATH_QL=m -CONFIG_DM_MULTIPATH_ST=m -# CONFIG_DM_MULTIPATH_HST is not set -# CONFIG_DM_MULTIPATH_IOA is not set -CONFIG_DM_DELAY=m -# CONFIG_DM_DUST is not set -CONFIG_DM_UEVENT=y -CONFIG_DM_FLAKEY=m -CONFIG_DM_VERITY=m -# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set -# CONFIG_DM_VERITY_FEC is not set -CONFIG_DM_SWITCH=m -CONFIG_DM_LOG_WRITES=m -CONFIG_DM_INTEGRITY=m -# CONFIG_DM_ZONED is not set -CONFIG_DM_AUDIT=y -CONFIG_TARGET_CORE=m -CONFIG_TCM_IBLOCK=m -CONFIG_TCM_FILEIO=m -CONFIG_TCM_PSCSI=m -CONFIG_TCM_USER2=m -CONFIG_LOOPBACK_TARGET=m -# CONFIG_TCM_FC is not set -CONFIG_ISCSI_TARGET=m -CONFIG_ISCSI_TARGET_CXGB4=m -# CONFIG_REMOTE_TARGET is not set -CONFIG_FUSION=y -CONFIG_FUSION_SPI=m -# CONFIG_FUSION_FC is not set -CONFIG_FUSION_SAS=m -CONFIG_FUSION_MAX_SGE=128 -# CONFIG_FUSION_CTL is not set -CONFIG_FUSION_LOGGING=y - -# -# IEEE 1394 (FireWire) support -# -# CONFIG_FIREWIRE is not set -# CONFIG_FIREWIRE_NOSY is not set -# end of IEEE 1394 (FireWire) support - -CONFIG_NETDEVICES=y -CONFIG_MII=m -CONFIG_NET_CORE=y -CONFIG_BONDING=m -CONFIG_DUMMY=m -CONFIG_WIREGUARD=m -# CONFIG_WIREGUARD_DEBUG is not set -# CONFIG_EQUALIZER is not set -CONFIG_NET_FC=y -CONFIG_IFB=m -CONFIG_NET_TEAM=m -CONFIG_NET_TEAM_MODE_BROADCAST=m -CONFIG_NET_TEAM_MODE_ROUNDROBIN=m -CONFIG_NET_TEAM_MODE_RANDOM=m -CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m -CONFIG_NET_TEAM_MODE_LOADBALANCE=m -CONFIG_MACVLAN=m -CONFIG_MACVTAP=m -CONFIG_IPVLAN_L3S=y -CONFIG_IPVLAN=m -CONFIG_IPVTAP=m -CONFIG_VXLAN=m -CONFIG_GENEVE=m -# CONFIG_BAREUDP is not set -# CONFIG_GTP is not set -# CONFIG_AMT is not set -CONFIG_MACSEC=m -CONFIG_NETCONSOLE=m -CONFIG_NETCONSOLE_DYNAMIC=y -# CONFIG_NETCONSOLE_EXTENDED_LOG is not set -CONFIG_NETPOLL=y -CONFIG_NET_POLL_CONTROLLER=y -CONFIG_TUN=m -CONFIG_TAP=m -# CONFIG_TUN_VNET_CROSS_LE is not set -CONFIG_VETH=m -CONFIG_VIRTIO_NET=m -CONFIG_NLMON=m -CONFIG_NET_VRF=m -CONFIG_VSOCKMON=m -# CONFIG_ARCNET is not set -# CONFIG_ATM_DRIVERS is not set -CONFIG_ETHERNET=y -CONFIG_MDIO=m -# CONFIG_NET_VENDOR_3COM is not set -# CONFIG_NET_VENDOR_ADAPTEC is not set -# CONFIG_NET_VENDOR_AGERE is not set -# CONFIG_NET_VENDOR_ALACRITECH is not set -# CONFIG_NET_VENDOR_ALTEON is not set -# CONFIG_ALTERA_TSE is not set -CONFIG_NET_VENDOR_AMAZON=y -CONFIG_ENA_ETHERNET=m -CONFIG_NET_VENDOR_AMD=y -# CONFIG_AMD8111_ETH is not set -# CONFIG_PCNET32 is not set -CONFIG_AMD_XGBE=m -# CONFIG_AMD_XGBE_DCB is not set -# CONFIG_PDS_CORE is not set -CONFIG_NET_XGENE=m -CONFIG_NET_XGENE_V2=m -CONFIG_NET_VENDOR_AQUANTIA=y -# CONFIG_AQTION is not set -# CONFIG_NET_VENDOR_ARC is not set -CONFIG_NET_VENDOR_ASIX=y -# CONFIG_SPI_AX88796C is not set -CONFIG_NET_VENDOR_ATHEROS=y -# CONFIG_ATL2 is not set -CONFIG_ATL1=m -CONFIG_ATL1E=m -CONFIG_ATL1C=m -CONFIG_ALX=m -CONFIG_NET_VENDOR_BROADCOM=y -# CONFIG_B44 is not set -# CONFIG_BCMGENET is not set -CONFIG_BNX2=m -# CONFIG_CNIC is not set -CONFIG_TIGON3=m -CONFIG_TIGON3_HWMON=y -CONFIG_BNX2X=m -CONFIG_BNX2X_SRIOV=y -# CONFIG_SYSTEMPORT is not set -CONFIG_BNXT=m -CONFIG_BNXT_SRIOV=y -CONFIG_BNXT_FLOWER_OFFLOAD=y -CONFIG_BNXT_DCB=y -CONFIG_BNXT_HWMON=y -# CONFIG_NET_VENDOR_CADENCE is not set -CONFIG_NET_VENDOR_CAVIUM=y -CONFIG_THUNDER_NIC_PF=m -CONFIG_THUNDER_NIC_VF=m -CONFIG_THUNDER_NIC_BGX=m -CONFIG_THUNDER_NIC_RGX=m -CONFIG_CAVIUM_PTP=y -CONFIG_LIQUIDIO_CORE=m -CONFIG_LIQUIDIO=m -CONFIG_LIQUIDIO_VF=m -CONFIG_NET_VENDOR_CHELSIO=y -# CONFIG_CHELSIO_T1 is not set -# CONFIG_CHELSIO_T3 is not set -CONFIG_CHELSIO_T4=m -# CONFIG_CHELSIO_T4_DCB is not set -CONFIG_CHELSIO_T4VF=m -CONFIG_CHELSIO_LIB=m -CONFIG_CHELSIO_INLINE_CRYPTO=y -CONFIG_CHELSIO_IPSEC_INLINE=m -# CONFIG_CHELSIO_TLS_DEVICE is not set -# CONFIG_NET_VENDOR_CISCO is not set -# CONFIG_NET_VENDOR_CORTINA is not set -CONFIG_NET_VENDOR_DAVICOM=y -# CONFIG_DM9051 is not set -CONFIG_DNET=m -CONFIG_NET_VENDOR_BZWX=y -CONFIG_NCE=m -CONFIG_NE6X=m -CONFIG_NE6XVF=m -# CONFIG_NET_VENDOR_DEC is not set -# CONFIG_NET_VENDOR_DLINK is not set -# CONFIG_NET_VENDOR_EMULEX is not set -CONFIG_NET_VENDOR_ENGLEDER=y -# CONFIG_TSNEP is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -CONFIG_NET_VENDOR_FUNGIBLE=y -# CONFIG_FUN_ETH is not set -CONFIG_NET_VENDOR_GOOGLE=y -CONFIG_GVE=m -CONFIG_NET_VENDOR_HISILICON=y -# CONFIG_HIX5HD2_GMAC is not set -# CONFIG_HISI_FEMAC is not set -# CONFIG_HIP04_ETH is not set -CONFIG_HNS_MDIO=m -CONFIG_HNS=m -CONFIG_HNS_DSAF=m -CONFIG_HNS_ENET=m -CONFIG_HNS3=m -CONFIG_HNS3_HCLGE=m -CONFIG_HNS3_DCB=y -CONFIG_HNS3_HCLGEVF=m -CONFIG_HNS3_ENET=m -CONFIG_NET_VENDOR_HUAWEI=y -CONFIG_HINIC=m -# CONFIG_NET_VENDOR_I825XX is not set -CONFIG_NET_VENDOR_INTEL=y -# CONFIG_E100 is not set -CONFIG_E1000=m -CONFIG_E1000E=m -CONFIG_IGB=m -CONFIG_IGB_HWMON=y -CONFIG_IGBVF=m -CONFIG_IXGBE=m -CONFIG_IXGBE_HWMON=y -CONFIG_IXGBE_DCB=y -CONFIG_IXGBE_IPSEC=y -CONFIG_IXGBEVF=m -CONFIG_IXGBEVF_IPSEC=y -CONFIG_I40E=m -# CONFIG_I40E_DCB is not set -CONFIG_IAVF=m -CONFIG_I40EVF=m -CONFIG_ICE=m -CONFIG_ICE_SWITCHDEV=y -CONFIG_FM10K=m -CONFIG_IGC=m -# CONFIG_JME is not set -CONFIG_NET_VENDOR_ADI=y -# CONFIG_ADIN1110 is not set -CONFIG_NET_VENDOR_LITEX=y -# CONFIG_LITEX_LITEETH is not set -# CONFIG_NET_VENDOR_MARVELL is not set -CONFIG_NET_VENDOR_MELLANOX=y -CONFIG_MLX4_EN=m -CONFIG_MLX4_EN_DCB=y -CONFIG_MLX4_CORE=m -CONFIG_MLX4_DEBUG=y -# CONFIG_MLX4_CORE_GEN2 is not set -CONFIG_MLX5_CORE=m -CONFIG_MLX5_FPGA=y -CONFIG_MLX5_CORE_EN=y -CONFIG_MLX5_EN_ARFS=y -CONFIG_MLX5_EN_RXNFC=y -CONFIG_MLX5_MPFS=y -CONFIG_MLX5_ESWITCH=y -CONFIG_MLX5_BRIDGE=y -CONFIG_MLX5_CLS_ACT=y -CONFIG_MLX5_TC_CT=y -CONFIG_MLX5_TC_SAMPLE=y -CONFIG_MLX5_CORE_EN_DCB=y -CONFIG_MLX5_CORE_IPOIB=y -# CONFIG_MLX5_MACSEC is not set -# CONFIG_MLX5_EN_IPSEC is not set -# CONFIG_MLX5_EN_TLS is not set -CONFIG_MLX5_SW_STEERING=y -# CONFIG_MLX5_SF is not set -CONFIG_MLXSW_CORE=m -CONFIG_MLXSW_CORE_HWMON=y -CONFIG_MLXSW_CORE_THERMAL=y -CONFIG_MLXSW_PCI=m -CONFIG_MLXSW_I2C=m -CONFIG_MLXSW_SPECTRUM=m -CONFIG_MLXSW_SPECTRUM_DCB=y -CONFIG_MLXSW_MINIMAL=m -CONFIG_MLXFW=m -# CONFIG_MLXBF_GIGE is not set -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROCHIP is not set -# CONFIG_NET_VENDOR_MICROSEMI is not set -CONFIG_NET_VENDOR_MICROSOFT=y -CONFIG_NET_VENDOR_MYRI=y -# CONFIG_MYRI10GE is not set -# CONFIG_FEALNX is not set -# CONFIG_NET_VENDOR_NI is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_NETERION is not set -CONFIG_NET_VENDOR_NETRONOME=y -CONFIG_NFP=m -CONFIG_NFP_APP_FLOWER=y -CONFIG_NFP_APP_ABM_NIC=y -CONFIG_NFP_NET_IPSEC=y -# CONFIG_NFP_DEBUG is not set -# CONFIG_NET_VENDOR_NVIDIA is not set -CONFIG_NET_VENDOR_OKI=y -CONFIG_ETHOC=m -# CONFIG_NET_VENDOR_PACKET_ENGINES is not set -CONFIG_NET_VENDOR_PENSANDO=y -# CONFIG_IONIC is not set -CONFIG_NET_VENDOR_QLOGIC=y -CONFIG_QLA3XXX=m -# CONFIG_QLCNIC is not set -CONFIG_NETXEN_NIC=m -CONFIG_QED=m -CONFIG_QED_LL2=y -CONFIG_QED_SRIOV=y -CONFIG_QEDE=m -CONFIG_QED_RDMA=y -CONFIG_QED_ISCSI=y -CONFIG_QED_FCOE=y -CONFIG_QED_OOO=y -# CONFIG_NET_VENDOR_BROCADE is not set -CONFIG_NET_VENDOR_QUALCOMM=y -# CONFIG_QCA7000_SPI is not set -CONFIG_QCOM_EMAC=m -# CONFIG_RMNET is not set -# CONFIG_NET_VENDOR_RDC is not set -CONFIG_NET_VENDOR_REALTEK=y -CONFIG_8139CP=m -CONFIG_8139TOO=m -# CONFIG_8139TOO_PIO is not set -# CONFIG_8139TOO_TUNE_TWISTER is not set -CONFIG_8139TOO_8129=y -# CONFIG_8139_OLD_RX_RESET is not set -CONFIG_R8169=m -# CONFIG_NET_VENDOR_RENESAS is not set -CONFIG_NET_VENDOR_ROCKER=y -CONFIG_ROCKER=m -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SILAN is not set -# CONFIG_NET_VENDOR_SIS is not set -CONFIG_NET_VENDOR_SOLARFLARE=y -# CONFIG_SFC is not set -# CONFIG_SFC_FALCON is not set -# CONFIG_SFC_SIENA is not set -# CONFIG_NET_VENDOR_SMSC is not set -# CONFIG_NET_VENDOR_SOCIONEXT is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SUN is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_TEHUTI is not set -# CONFIG_NET_VENDOR_TI is not set -CONFIG_NET_VENDOR_VERTEXCOM=y -# CONFIG_MSE102X is not set -# CONFIG_NET_VENDOR_VIA is not set -CONFIG_NET_VENDOR_WANGXUN=y -CONFIG_LIBWX=m -CONFIG_NGBE=m -CONFIG_TXGBE=m -# CONFIG_NET_VENDOR_WIZNET is not set -CONFIG_NET_VENDOR_XILINX=y -# CONFIG_XILINX_EMACLITE is not set -# CONFIG_XILINX_AXI_EMAC is not set -# CONFIG_XILINX_LL_TEMAC is not set -# CONFIG_FDDI is not set -# CONFIG_HIPPI is not set -# CONFIG_NET_SB1000 is not set -CONFIG_PHYLINK=m -CONFIG_PHYLIB=y -CONFIG_SWPHY=y -CONFIG_LED_TRIGGER_PHY=y -CONFIG_PHYLIB_LEDS=y -CONFIG_FIXED_PHY=y -CONFIG_SFP=m - -# -# MII PHY device drivers -# -CONFIG_AMD_PHY=m -# CONFIG_ADIN_PHY is not set -# CONFIG_ADIN1100_PHY is not set -CONFIG_AQUANTIA_PHY=m -CONFIG_AX88796B_PHY=m -CONFIG_BROADCOM_PHY=m -# CONFIG_BCM54140_PHY is not set -CONFIG_BCM7XXX_PHY=m -# CONFIG_BCM84881_PHY is not set -CONFIG_BCM87XX_PHY=m -CONFIG_BCM_NET_PHYLIB=m -CONFIG_BCM_NET_PHYPTP=m -CONFIG_CICADA_PHY=m -CONFIG_CORTINA_PHY=m -CONFIG_DAVICOM_PHY=m -CONFIG_ICPLUS_PHY=m -CONFIG_LXT_PHY=m -CONFIG_INTEL_XWAY_PHY=m -CONFIG_LSI_ET1011C_PHY=m -CONFIG_MARVELL_PHY=m -CONFIG_MARVELL_10G_PHY=m -# CONFIG_MARVELL_88Q2XXX_PHY is not set -# CONFIG_MARVELL_88X2222_PHY is not set -# CONFIG_MAXLINEAR_GPHY is not set -# CONFIG_MEDIATEK_GE_PHY is not set -CONFIG_MICREL_PHY=m -# CONFIG_MICROCHIP_T1S_PHY is not set -CONFIG_MICROCHIP_PHY=m -CONFIG_MICROCHIP_T1_PHY=m -CONFIG_MICROSEMI_PHY=m -# CONFIG_MOTORCOMM_PHY is not set -CONFIG_NATIONAL_PHY=m -# CONFIG_NXP_CBTX_PHY is not set -# CONFIG_NXP_C45_TJA11XX_PHY is not set -# CONFIG_NXP_TJA11XX_PHY is not set -# CONFIG_NCN26000_PHY is not set -CONFIG_AT803X_PHY=m -CONFIG_QSEMI_PHY=m -CONFIG_REALTEK_PHY=m -CONFIG_RENESAS_PHY=m -CONFIG_ROCKCHIP_PHY=m -CONFIG_SMSC_PHY=m -CONFIG_STE10XP=m -CONFIG_TERANETICS_PHY=m -CONFIG_DP83822_PHY=m -CONFIG_DP83TC811_PHY=m -CONFIG_DP83848_PHY=m -CONFIG_DP83867_PHY=m -# CONFIG_DP83869_PHY is not set -# CONFIG_DP83TD510_PHY is not set -CONFIG_VITESSE_PHY=m -CONFIG_XILINX_GMII2RGMII=m -CONFIG_MICREL_KS8995MA=m -# CONFIG_PSE_CONTROLLER is not set -CONFIG_MDIO_DEVICE=y -CONFIG_MDIO_BUS=y -CONFIG_FWNODE_MDIO=y -CONFIG_OF_MDIO=y -CONFIG_ACPI_MDIO=y -CONFIG_MDIO_DEVRES=y -CONFIG_MDIO_XGENE=m -CONFIG_MDIO_BITBANG=m -CONFIG_MDIO_BCM_UNIMAC=m -CONFIG_MDIO_CAVIUM=m -CONFIG_MDIO_GPIO=m -CONFIG_MDIO_HISI_FEMAC=m -CONFIG_MDIO_I2C=m -# CONFIG_MDIO_MVUSB is not set -CONFIG_MDIO_MSCC_MIIM=m -CONFIG_MDIO_OCTEON=m -# CONFIG_MDIO_IPQ4019 is not set -# CONFIG_MDIO_IPQ8064 is not set -CONFIG_MDIO_THUNDER=m - -# -# MDIO Multiplexers -# -# CONFIG_MDIO_BUS_MUX_GPIO is not set -# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set -# CONFIG_MDIO_BUS_MUX_MMIOREG is not set - -# -# PCS device drivers -# -CONFIG_PCS_XPCS=m -# end of PCS device drivers - -CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m -CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=m -CONFIG_PPP_MULTILINK=y -CONFIG_PPPOATM=m -CONFIG_PPPOE=m -# CONFIG_PPPOE_HASH_BITS_1 is not set -# CONFIG_PPPOE_HASH_BITS_2 is not set -CONFIG_PPPOE_HASH_BITS_4=y -# CONFIG_PPPOE_HASH_BITS_8 is not set -CONFIG_PPPOE_HASH_BITS=4 -CONFIG_PPTP=m -CONFIG_PPPOL2TP=m -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m -CONFIG_SLIP=m -CONFIG_SLHC=m -CONFIG_SLIP_COMPRESSED=y -CONFIG_SLIP_SMART=y -# CONFIG_SLIP_MODE_SLIP6 is not set -CONFIG_USB_NET_DRIVERS=y -CONFIG_USB_CATC=m -CONFIG_USB_KAWETH=m -CONFIG_USB_PEGASUS=m -CONFIG_USB_RTL8150=m -CONFIG_USB_RTL8152=m -CONFIG_USB_LAN78XX=m -CONFIG_USB_USBNET=m -CONFIG_USB_NET_AX8817X=m -CONFIG_USB_NET_AX88179_178A=m -CONFIG_USB_NET_CDCETHER=m -CONFIG_USB_NET_CDC_EEM=m -CONFIG_USB_NET_CDC_NCM=m -CONFIG_USB_NET_HUAWEI_CDC_NCM=m -CONFIG_USB_NET_CDC_MBIM=m -CONFIG_USB_NET_DM9601=m -CONFIG_USB_NET_SR9700=m -# CONFIG_USB_NET_SR9800 is not set -CONFIG_USB_NET_SMSC75XX=m -CONFIG_USB_NET_SMSC95XX=m -CONFIG_USB_NET_GL620A=m -CONFIG_USB_NET_NET1080=m -CONFIG_USB_NET_PLUSB=m -CONFIG_USB_NET_MCS7830=m -CONFIG_USB_NET_RNDIS_HOST=m -CONFIG_USB_NET_CDC_SUBSET_ENABLE=m -CONFIG_USB_NET_CDC_SUBSET=m -CONFIG_USB_ALI_M5632=y -CONFIG_USB_AN2720=y -CONFIG_USB_BELKIN=y -CONFIG_USB_ARMLINUX=y -CONFIG_USB_EPSON2888=y -CONFIG_USB_KC2190=y -CONFIG_USB_NET_ZAURUS=m -CONFIG_USB_NET_CX82310_ETH=m -CONFIG_USB_NET_KALMIA=m -CONFIG_USB_NET_QMI_WWAN=m -CONFIG_USB_HSO=m -CONFIG_USB_NET_INT51X1=m -CONFIG_USB_IPHETH=m -CONFIG_USB_SIERRA_NET=m -CONFIG_USB_VL600=m -CONFIG_USB_NET_CH9200=m -# CONFIG_USB_NET_AQC111 is not set -CONFIG_USB_RTL8153_ECM=m -# CONFIG_WLAN is not set -CONFIG_WAN=y -CONFIG_HDLC=m -CONFIG_HDLC_RAW=m -# CONFIG_HDLC_RAW_ETH is not set -CONFIG_HDLC_CISCO=m -CONFIG_HDLC_FR=m -CONFIG_HDLC_PPP=m - -# -# X.25/LAPB support is disabled -# -# CONFIG_PCI200SYN is not set -# CONFIG_WANXL is not set -# CONFIG_PC300TOO is not set -# CONFIG_FARSYNC is not set -CONFIG_IEEE802154_DRIVERS=m -# CONFIG_IEEE802154_FAKELB is not set -# CONFIG_IEEE802154_AT86RF230 is not set -# CONFIG_IEEE802154_MRF24J40 is not set -# CONFIG_IEEE802154_CC2520 is not set -# CONFIG_IEEE802154_ATUSB is not set -# CONFIG_IEEE802154_ADF7242 is not set -# CONFIG_IEEE802154_CA8210 is not set -# CONFIG_IEEE802154_MCR20A is not set -# CONFIG_IEEE802154_HWSIM is not set - -# -# Wireless WAN -# -# CONFIG_WWAN is not set -# end of Wireless WAN - -# CONFIG_VMXNET3 is not set -# CONFIG_FUJITSU_ES is not set -CONFIG_NETDEVSIM=m -CONFIG_NET_FAILOVER=m -# CONFIG_ISDN is not set - -# -# Input device support -# -CONFIG_INPUT=y -CONFIG_INPUT_LEDS=y -CONFIG_INPUT_FF_MEMLESS=m -CONFIG_INPUT_SPARSEKMAP=m -# CONFIG_INPUT_MATRIXKMAP is not set - -# -# Userland interfaces -# -CONFIG_INPUT_MOUSEDEV=y -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set -CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 -CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 -# CONFIG_INPUT_JOYDEV is not set -CONFIG_INPUT_EVDEV=y -# CONFIG_INPUT_EVBUG is not set - -# -# Input Device Drivers -# -CONFIG_INPUT_KEYBOARD=y -# CONFIG_KEYBOARD_ADP5588 is not set -# CONFIG_KEYBOARD_ADP5589 is not set -# CONFIG_KEYBOARD_ATKBD is not set -# CONFIG_KEYBOARD_QT1050 is not set -# CONFIG_KEYBOARD_QT1070 is not set -# CONFIG_KEYBOARD_QT2160 is not set -# CONFIG_KEYBOARD_DLINK_DIR685 is not set -# CONFIG_KEYBOARD_LKKBD is not set -CONFIG_KEYBOARD_GPIO=m -# CONFIG_KEYBOARD_GPIO_POLLED is not set -# CONFIG_KEYBOARD_TCA6416 is not set -# CONFIG_KEYBOARD_TCA8418 is not set -# CONFIG_KEYBOARD_MATRIX is not set -# CONFIG_KEYBOARD_LM8323 is not set -# CONFIG_KEYBOARD_LM8333 is not set -# CONFIG_KEYBOARD_MAX7359 is not set -# CONFIG_KEYBOARD_MCS is not set -# CONFIG_KEYBOARD_MPR121 is not set -# CONFIG_KEYBOARD_NEWTON is not set -# CONFIG_KEYBOARD_OPENCORES is not set -# CONFIG_KEYBOARD_PINEPHONE is not set -# CONFIG_KEYBOARD_SAMSUNG is not set -# CONFIG_KEYBOARD_STOWAWAY is not set -# CONFIG_KEYBOARD_SUNKBD is not set -# CONFIG_KEYBOARD_OMAP4 is not set -# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set -# CONFIG_KEYBOARD_XTKBD is not set -# CONFIG_KEYBOARD_CAP11XX is not set -# CONFIG_KEYBOARD_BCM is not set -# CONFIG_KEYBOARD_CYPRESS_SF is not set -CONFIG_INPUT_MOUSE=y -# CONFIG_MOUSE_PS2 is not set -# CONFIG_MOUSE_SERIAL is not set -# CONFIG_MOUSE_APPLETOUCH is not set -# CONFIG_MOUSE_BCM5974 is not set -# CONFIG_MOUSE_CYAPA is not set -CONFIG_MOUSE_ELAN_I2C=m -CONFIG_MOUSE_ELAN_I2C_I2C=y -CONFIG_MOUSE_ELAN_I2C_SMBUS=y -# CONFIG_MOUSE_VSXXXAA is not set -# CONFIG_MOUSE_GPIO is not set -CONFIG_MOUSE_SYNAPTICS_I2C=m -CONFIG_MOUSE_SYNAPTICS_USB=m -# CONFIG_INPUT_JOYSTICK is not set -# CONFIG_INPUT_TABLET is not set -# CONFIG_INPUT_TOUCHSCREEN is not set -# CONFIG_INPUT_MISC is not set -CONFIG_RMI4_CORE=m -CONFIG_RMI4_I2C=m -CONFIG_RMI4_SPI=m -CONFIG_RMI4_SMB=m -CONFIG_RMI4_F03=y -CONFIG_RMI4_F03_SERIO=m -CONFIG_RMI4_2D_SENSOR=y -CONFIG_RMI4_F11=y -CONFIG_RMI4_F12=y -CONFIG_RMI4_F30=y -CONFIG_RMI4_F34=y -# CONFIG_RMI4_F3A is not set -CONFIG_RMI4_F55=y - -# -# Hardware I/O ports -# -CONFIG_SERIO=y -CONFIG_SERIO_SERPORT=y -CONFIG_SERIO_AMBAKMI=y -# CONFIG_SERIO_PCIPS2 is not set -CONFIG_SERIO_LIBPS2=y -CONFIG_SERIO_RAW=m -CONFIG_SERIO_ALTERA_PS2=m -# CONFIG_SERIO_PS2MULT is not set -CONFIG_SERIO_ARC_PS2=m -# CONFIG_SERIO_APBPS2 is not set -# CONFIG_SERIO_GPIO_PS2 is not set -# CONFIG_USERIO is not set -# CONFIG_GAMEPORT is not set -# end of Hardware I/O ports -# end of Input device support - -# -# Character devices -# -CONFIG_TTY=y -CONFIG_VT=y -CONFIG_CONSOLE_TRANSLATIONS=y -CONFIG_VT_CONSOLE=y -CONFIG_VT_CONSOLE_SLEEP=y -CONFIG_HW_CONSOLE=y -CONFIG_VT_HW_CONSOLE_BINDING=y -CONFIG_UNIX98_PTYS=y -# CONFIG_LEGACY_PTYS is not set -CONFIG_LEGACY_TIOCSTI=y -CONFIG_LDISC_AUTOLOAD=y - -# -# Serial drivers -# -CONFIG_SERIAL_EARLYCON=y -CONFIG_SERIAL_8250=y -# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set -CONFIG_SERIAL_8250_PNP=y -CONFIG_SERIAL_8250_16550A_VARIANTS=y -# CONFIG_SERIAL_8250_FINTEK is not set -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_DMA=y -CONFIG_SERIAL_8250_PCILIB=y -CONFIG_SERIAL_8250_PCI=y -CONFIG_SERIAL_8250_EXAR=y -CONFIG_SERIAL_8250_NR_UARTS=32 -CONFIG_SERIAL_8250_RUNTIME_UARTS=4 -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_MANY_PORTS=y -# CONFIG_SERIAL_8250_PCI1XXXX is not set -CONFIG_SERIAL_8250_SHARE_IRQ=y -# CONFIG_SERIAL_8250_DETECT_IRQ is not set -CONFIG_SERIAL_8250_RSA=y -CONFIG_SERIAL_8250_DWLIB=y -CONFIG_SERIAL_8250_FSL=y -CONFIG_SERIAL_8250_DW=y -CONFIG_SERIAL_8250_RT288X=y -CONFIG_SERIAL_8250_PERICOM=y -CONFIG_SERIAL_OF_PLATFORM=y - -# -# Non-8250 serial port support -# -# CONFIG_SERIAL_AMBA_PL010 is not set -CONFIG_SERIAL_AMBA_PL011=y -CONFIG_SERIAL_AMBA_PL011_CONSOLE=y -# CONFIG_SERIAL_EARLYCON_SEMIHOST is not set -# CONFIG_SERIAL_KGDB_NMI is not set -# CONFIG_SERIAL_MAX3100 is not set -# CONFIG_SERIAL_MAX310X is not set -# CONFIG_SERIAL_UARTLITE is not set -CONFIG_SERIAL_CORE=y -CONFIG_SERIAL_CORE_CONSOLE=y -CONFIG_CONSOLE_POLL=y -# CONFIG_SERIAL_JSM is not set -# CONFIG_SERIAL_MSM is not set -# CONFIG_SERIAL_SIFIVE is not set -# CONFIG_SERIAL_SCCNXP is not set -# CONFIG_SERIAL_SC16IS7XX is not set -# CONFIG_SERIAL_ALTERA_JTAGUART is not set -# CONFIG_SERIAL_ALTERA_UART is not set -# CONFIG_SERIAL_XILINX_PS_UART is not set -# CONFIG_SERIAL_ARC is not set -# CONFIG_SERIAL_RP2 is not set -# CONFIG_SERIAL_FSL_LPUART is not set -# CONFIG_SERIAL_FSL_LINFLEXUART is not set -# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set -# CONFIG_SERIAL_SPRD is not set -# end of Serial drivers - -CONFIG_SERIAL_MCTRL_GPIO=y -CONFIG_SERIAL_NONSTANDARD=y -# CONFIG_MOXA_INTELLIO is not set -# CONFIG_MOXA_SMARTIO is not set -CONFIG_N_HDLC=m -CONFIG_N_GSM=m -# CONFIG_NOZOMI is not set -# CONFIG_NULL_TTY is not set -CONFIG_HVC_DRIVER=y -# CONFIG_HVC_DCC is not set -# CONFIG_SERIAL_DEV_BUS is not set -CONFIG_VIRTIO_CONSOLE=m -CONFIG_IPMI_HANDLER=m -CONFIG_IPMI_DMI_DECODE=y -CONFIG_IPMI_PLAT_DATA=y -CONFIG_IPMI_PANIC_EVENT=y -CONFIG_IPMI_PANIC_STRING=y -CONFIG_IPMI_DEVICE_INTERFACE=m -CONFIG_IPMI_SI=m -CONFIG_IPMI_SSIF=m -# CONFIG_IPMI_IPMB is not set -CONFIG_IPMI_WATCHDOG=m -CONFIG_IPMI_POWEROFF=m -# CONFIG_SSIF_IPMI_BMC is not set -# CONFIG_IPMB_DEVICE_INTERFACE is not set -CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_TIMERIOMEM=m -# CONFIG_HW_RANDOM_BA431 is not set -CONFIG_HW_RANDOM_VIRTIO=m -CONFIG_HW_RANDOM_HISI=y -CONFIG_HW_RANDOM_HISTB=y -CONFIG_HW_RANDOM_XGENE=m -CONFIG_HW_RANDOM_CAVIUM=m -# CONFIG_HW_RANDOM_CCTRNG is not set -# CONFIG_HW_RANDOM_XIPHERA is not set -CONFIG_HW_RANDOM_ARM_SMCCC_TRNG=y -CONFIG_HW_RANDOM_CN10K=y -# CONFIG_APPLICOM is not set -CONFIG_DEVMEM=y -# CONFIG_DEVPORT is not set -CONFIG_TCG_TPM=y -CONFIG_HW_RANDOM_TPM=y -CONFIG_TCG_TIS_CORE=y -CONFIG_TCG_TIS=y -CONFIG_TCG_TIS_SPI=m -# CONFIG_TCG_TIS_SPI_CR50 is not set -# CONFIG_TCG_TIS_I2C is not set -# CONFIG_TCG_TIS_I2C_CR50 is not set -# CONFIG_TCG_TIS_I2C_ATMEL is not set -# CONFIG_TCG_TIS_I2C_INFINEON is not set -# CONFIG_TCG_TIS_I2C_NUVOTON is not set -CONFIG_TCG_ATMEL=m -# CONFIG_TCG_INFINEON is not set -CONFIG_TCG_CRB=y -# CONFIG_TCG_VTPM_PROXY is not set -# CONFIG_TCG_TIS_ST33ZP24_I2C is not set -# CONFIG_TCG_TIS_ST33ZP24_SPI is not set -# CONFIG_XILLYBUS is not set -# CONFIG_XILLYUSB is not set -# end of Character devices - -# -# I2C support -# -CONFIG_I2C=y -CONFIG_ACPI_I2C_OPREGION=y -CONFIG_I2C_BOARDINFO=y -CONFIG_I2C_COMPAT=y -CONFIG_I2C_CHARDEV=m -CONFIG_I2C_MUX=m - -# -# Multiplexer I2C Chip support -# -CONFIG_I2C_ARB_GPIO_CHALLENGE=m -CONFIG_I2C_MUX_GPIO=m -# CONFIG_I2C_MUX_GPMUX is not set -# CONFIG_I2C_MUX_LTC4306 is not set -CONFIG_I2C_MUX_PCA9541=m -CONFIG_I2C_MUX_PCA954x=m -CONFIG_I2C_MUX_PINCTRL=m -# CONFIG_I2C_MUX_REG is not set -# CONFIG_I2C_DEMUX_PINCTRL is not set -CONFIG_I2C_MUX_MLXCPLD=m -# end of Multiplexer I2C Chip support - -# CONFIG_I2C_HELPER_AUTO is not set -CONFIG_I2C_SMBUS=m - -# -# I2C Algorithms -# -CONFIG_I2C_ALGOBIT=m -CONFIG_I2C_ALGOPCF=m -CONFIG_I2C_ALGOPCA=m -# end of I2C Algorithms - -# -# I2C Hardware Bus support -# - -# -# PC SMBus host controller drivers -# -# CONFIG_I2C_ALI1535 is not set -# CONFIG_I2C_ALI1563 is not set -# CONFIG_I2C_ALI15X3 is not set -# CONFIG_I2C_AMD756 is not set -# CONFIG_I2C_AMD8111 is not set -# CONFIG_I2C_AMD_MP2 is not set -# CONFIG_I2C_HIX5HD2 is not set -# CONFIG_I2C_I801 is not set -# CONFIG_I2C_ISCH is not set -# CONFIG_I2C_PIIX4 is not set -CONFIG_I2C_NFORCE2=m -# CONFIG_I2C_NVIDIA_GPU is not set -# CONFIG_I2C_SIS5595 is not set -# CONFIG_I2C_SIS630 is not set -# CONFIG_I2C_SIS96X is not set -# CONFIG_I2C_VIA is not set -# CONFIG_I2C_VIAPRO is not set -# CONFIG_I2C_ZHAOXIN is not set - -# -# ACPI drivers -# -# CONFIG_I2C_SCMI is not set -# CONFIG_I2C_ZHAOXIN_SMBUS is not set - -# -# I2C system bus drivers (mostly embedded / system-on-chip) -# -# CONFIG_I2C_CADENCE is not set -# CONFIG_I2C_CBUS_GPIO is not set -CONFIG_I2C_DESIGNWARE_CORE=m -# CONFIG_I2C_DESIGNWARE_SLAVE is not set -CONFIG_I2C_DESIGNWARE_PLATFORM=m -# CONFIG_I2C_DESIGNWARE_PCI is not set -# CONFIG_I2C_EMEV2 is not set -CONFIG_I2C_GPIO=m -# CONFIG_I2C_GPIO_FAULT_INJECTOR is not set -CONFIG_I2C_HISI=m -# CONFIG_I2C_NOMADIK is not set -# CONFIG_I2C_OCORES is not set -CONFIG_I2C_PCA_PLATFORM=m -# CONFIG_I2C_QCOM_CCI is not set -CONFIG_I2C_QUP=m -# CONFIG_I2C_RK3X is not set -CONFIG_I2C_SIMTEC=m -CONFIG_I2C_VERSATILE=m -CONFIG_I2C_THUNDERX=m -# CONFIG_I2C_XILINX is not set -CONFIG_I2C_XLP9XX=m - -# -# External I2C/SMBus adapter drivers -# -CONFIG_I2C_DIOLAN_U2C=m -# CONFIG_I2C_CP2615 is not set -# CONFIG_I2C_PCI1XXXX is not set -# CONFIG_I2C_ROBOTFUZZ_OSIF is not set -# CONFIG_I2C_TAOS_EVM is not set -CONFIG_I2C_TINY_USB=m - -# -# Other I2C/SMBus bus drivers -# -# CONFIG_I2C_MLXCPLD is not set -CONFIG_I2C_XGENE_SLIMPRO=m -# CONFIG_I2C_VIRTIO is not set -# end of I2C Hardware Bus support - -CONFIG_I2C_STUB=m -CONFIG_I2C_SLAVE=y -CONFIG_I2C_SLAVE_EEPROM=m -# CONFIG_I2C_SLAVE_TESTUNIT is not set -# CONFIG_I2C_DEBUG_CORE is not set -# CONFIG_I2C_DEBUG_ALGO is not set -# CONFIG_I2C_DEBUG_BUS is not set -# end of I2C support - -# CONFIG_I3C is not set -CONFIG_SPI=y -# CONFIG_SPI_DEBUG is not set -CONFIG_SPI_MASTER=y -# CONFIG_SPI_MEM is not set - -# -# SPI Master Controller Drivers -# -# CONFIG_SPI_ALTERA is not set -# CONFIG_SPI_AXI_SPI_ENGINE is not set -# CONFIG_SPI_BITBANG is not set -CONFIG_SPI_CADENCE=m -# CONFIG_SPI_CADENCE_QUADSPI is not set -CONFIG_SPI_DESIGNWARE=m -# CONFIG_SPI_DW_DMA is not set -# CONFIG_SPI_DW_PCI is not set -CONFIG_SPI_DW_MMIO=m -CONFIG_SPI_HISI_KUNPENG=m -CONFIG_SPI_HISI_SFC_V3XX=m -# CONFIG_SPI_GPIO is not set -# CONFIG_SPI_FSL_SPI is not set -# CONFIG_SPI_MICROCHIP_CORE is not set -# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set -# CONFIG_SPI_OC_TINY is not set -# CONFIG_SPI_PCI1XXXX is not set -CONFIG_SPI_PL022=m -# CONFIG_SPI_PXA2XX is not set -# CONFIG_SPI_QCOM_QSPI is not set -CONFIG_SPI_QUP=y -# CONFIG_SPI_SC18IS602 is not set -# CONFIG_SPI_SIFIVE is not set -# CONFIG_SPI_MXIC is not set -# CONFIG_SPI_THUNDERX is not set -# CONFIG_SPI_XCOMM is not set -# CONFIG_SPI_XILINX is not set -CONFIG_SPI_XLP=m -# CONFIG_SPI_AMD is not set - -# -# SPI Multiplexer support -# -# CONFIG_SPI_MUX is not set - -# -# SPI Protocol Masters -# -# CONFIG_SPI_SPIDEV is not set -# CONFIG_SPI_LOOPBACK_TEST is not set -# CONFIG_SPI_TLE62X0 is not set -# CONFIG_SPI_SLAVE is not set -CONFIG_SPI_DYNAMIC=y -# CONFIG_SPMI is not set -# CONFIG_HSI is not set -CONFIG_PPS=y -# CONFIG_PPS_DEBUG is not set - -# -# PPS clients support -# -# CONFIG_PPS_CLIENT_KTIMER is not set -CONFIG_PPS_CLIENT_LDISC=m -CONFIG_PPS_CLIENT_GPIO=m - -# -# PPS generators support -# - -# -# PTP clock support -# -CONFIG_PTP_1588_CLOCK=y -CONFIG_PTP_1588_CLOCK_OPTIONAL=y -CONFIG_DP83640_PHY=m -# CONFIG_PTP_1588_CLOCK_INES is not set -CONFIG_PTP_1588_CLOCK_KVM=y -# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set -# CONFIG_PTP_1588_CLOCK_IDTCM is not set -# CONFIG_PTP_1588_CLOCK_MOCK is not set -# CONFIG_PTP_1588_CLOCK_OCP is not set -# end of PTP clock support - -CONFIG_PINCTRL=y -CONFIG_PINMUX=y -CONFIG_PINCONF=y -CONFIG_GENERIC_PINCONF=y -# CONFIG_DEBUG_PINCTRL is not set -# CONFIG_PINCTRL_AMD is not set -# CONFIG_PINCTRL_CY8C95X0 is not set -# CONFIG_PINCTRL_MCP23S08 is not set -# CONFIG_PINCTRL_MICROCHIP_SGPIO is not set -# CONFIG_PINCTRL_OCELOT is not set -# CONFIG_PINCTRL_SINGLE is not set -# CONFIG_PINCTRL_STMFX is not set -# CONFIG_PINCTRL_SX150X is not set -CONFIG_PINCTRL_MSM=y -# CONFIG_PINCTRL_IPQ5018 is not set -# CONFIG_PINCTRL_IPQ5332 is not set -# CONFIG_PINCTRL_IPQ8074 is not set -# CONFIG_PINCTRL_IPQ6018 is not set -# CONFIG_PINCTRL_IPQ9574 is not set -# CONFIG_PINCTRL_MDM9607 is not set -# CONFIG_PINCTRL_MSM8916 is not set -# CONFIG_PINCTRL_MSM8953 is not set -# CONFIG_PINCTRL_MSM8976 is not set -# CONFIG_PINCTRL_MSM8994 is not set -# CONFIG_PINCTRL_MSM8996 is not set -# CONFIG_PINCTRL_MSM8998 is not set -# CONFIG_PINCTRL_QCM2290 is not set -# CONFIG_PINCTRL_QCS404 is not set -CONFIG_PINCTRL_QDF2XXX=y -# CONFIG_PINCTRL_QDU1000 is not set -# CONFIG_PINCTRL_SA8775P is not set -# CONFIG_PINCTRL_SC7180 is not set -# CONFIG_PINCTRL_SC7280 is not set -# CONFIG_PINCTRL_SC8180X is not set -# CONFIG_PINCTRL_SC8280XP is not set -# CONFIG_PINCTRL_SDM660 is not set -# CONFIG_PINCTRL_SDM670 is not set -# CONFIG_PINCTRL_SDM845 is not set -# CONFIG_PINCTRL_SDX75 is not set -# CONFIG_PINCTRL_SM6115 is not set -# CONFIG_PINCTRL_SM6125 is not set -# CONFIG_PINCTRL_SM6350 is not set -# CONFIG_PINCTRL_SM6375 is not set -# CONFIG_PINCTRL_SM7150 is not set -# CONFIG_PINCTRL_SM8150 is not set -# CONFIG_PINCTRL_SM8250 is not set -# CONFIG_PINCTRL_SM8350 is not set -# CONFIG_PINCTRL_SM8450 is not set -# CONFIG_PINCTRL_SM8550 is not set -# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set -# CONFIG_PINCTRL_LPASS_LPI is not set - -# -# Renesas pinctrl drivers -# -# end of Renesas pinctrl drivers - -CONFIG_GPIOLIB=y -CONFIG_GPIOLIB_FASTPATH_LIMIT=512 -CONFIG_OF_GPIO=y -CONFIG_GPIO_ACPI=y -CONFIG_GPIOLIB_IRQCHIP=y -# CONFIG_DEBUG_GPIO is not set -CONFIG_GPIO_CDEV=y -CONFIG_GPIO_CDEV_V1=y -CONFIG_GPIO_GENERIC=m - -# -# Memory mapped GPIO drivers -# -# CONFIG_GPIO_74XX_MMIO is not set -# CONFIG_GPIO_ALTERA is not set -CONFIG_GPIO_AMDPT=m -# CONFIG_GPIO_CADENCE is not set -CONFIG_GPIO_DWAPB=m -# CONFIG_GPIO_EXAR is not set -# CONFIG_GPIO_FTGPIO010 is not set -CONFIG_GPIO_GENERIC_PLATFORM=m -# CONFIG_GPIO_GRGPIO is not set -CONFIG_GPIO_HISI=m -# CONFIG_GPIO_HLWD is not set -# CONFIG_GPIO_LOGICVC is not set -# CONFIG_GPIO_MB86S7X is not set -CONFIG_GPIO_PL061=y -# CONFIG_GPIO_SIFIVE is not set -# CONFIG_GPIO_SYSCON is not set -# CONFIG_GPIO_THUNDERX is not set -CONFIG_GPIO_XGENE=y -CONFIG_GPIO_XGENE_SB=m -# CONFIG_GPIO_XILINX is not set -CONFIG_GPIO_XLP=m -# CONFIG_GPIO_AMD_FCH is not set -# end of Memory mapped GPIO drivers - -# -# I2C GPIO expanders -# -# CONFIG_GPIO_ADNP is not set -# CONFIG_GPIO_FXL6408 is not set -# CONFIG_GPIO_DS4520 is not set -# CONFIG_GPIO_GW_PLD is not set -# CONFIG_GPIO_MAX7300 is not set -# CONFIG_GPIO_MAX732X is not set -# CONFIG_GPIO_PCA953X is not set -# CONFIG_GPIO_PCA9570 is not set -# CONFIG_GPIO_PCF857X is not set -# CONFIG_GPIO_TPIC2810 is not set -# end of I2C GPIO expanders - -# -# MFD GPIO expanders -# -# end of MFD GPIO expanders - -# -# PCI GPIO expanders -# -# CONFIG_GPIO_BT8XX is not set -# CONFIG_GPIO_PCI_IDIO_16 is not set -# CONFIG_GPIO_PCIE_IDIO_24 is not set -# CONFIG_GPIO_RDC321X is not set -# end of PCI GPIO expanders - -# -# SPI GPIO expanders -# -# CONFIG_GPIO_74X164 is not set -# CONFIG_GPIO_MAX3191X is not set -# CONFIG_GPIO_MAX7301 is not set -# CONFIG_GPIO_MC33880 is not set -# CONFIG_GPIO_PISOSR is not set -# CONFIG_GPIO_XRA1403 is not set -# end of SPI GPIO expanders - -# -# USB GPIO expanders -# -# end of USB GPIO expanders - -# -# Virtual GPIO drivers -# -# CONFIG_GPIO_AGGREGATOR is not set -# CONFIG_GPIO_LATCH is not set -# CONFIG_GPIO_MOCKUP is not set -# CONFIG_GPIO_VIRTIO is not set -# CONFIG_GPIO_SIM is not set -# end of Virtual GPIO drivers - -# CONFIG_W1 is not set -CONFIG_POWER_RESET=y -# CONFIG_POWER_RESET_BRCMSTB is not set -CONFIG_POWER_RESET_GPIO=y -CONFIG_POWER_RESET_GPIO_RESTART=y -CONFIG_POWER_RESET_HISI=y -# CONFIG_POWER_RESET_MSM is not set -# CONFIG_POWER_RESET_LTC2952 is not set -# CONFIG_POWER_RESET_REGULATOR is not set -CONFIG_POWER_RESET_RESTART=y -# CONFIG_POWER_RESET_VEXPRESS is not set -# CONFIG_POWER_RESET_XGENE is not set -CONFIG_POWER_RESET_SYSCON=y -# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set -# CONFIG_SYSCON_REBOOT_MODE is not set -# CONFIG_NVMEM_REBOOT_MODE is not set -CONFIG_POWER_SUPPLY=y -# CONFIG_POWER_SUPPLY_DEBUG is not set -CONFIG_POWER_SUPPLY_HWMON=y -# CONFIG_IP5XXX_POWER is not set -# CONFIG_TEST_POWER is not set -# CONFIG_CHARGER_ADP5061 is not set -# CONFIG_BATTERY_CW2015 is not set -# CONFIG_BATTERY_DS2780 is not set -# CONFIG_BATTERY_DS2781 is not set -# CONFIG_BATTERY_DS2782 is not set -# CONFIG_BATTERY_SAMSUNG_SDI is not set -# CONFIG_BATTERY_SBS is not set -# CONFIG_CHARGER_SBS is not set -# CONFIG_MANAGER_SBS is not set -# CONFIG_BATTERY_BQ27XXX is not set -# CONFIG_BATTERY_MAX17040 is not set -# CONFIG_BATTERY_MAX17042 is not set -# CONFIG_CHARGER_MAX8903 is not set -# CONFIG_CHARGER_LP8727 is not set -# CONFIG_CHARGER_GPIO is not set -# CONFIG_CHARGER_MANAGER is not set -# CONFIG_CHARGER_LT3651 is not set -# CONFIG_CHARGER_LTC4162L is not set -# CONFIG_CHARGER_DETECTOR_MAX14656 is not set -# CONFIG_CHARGER_MAX77976 is not set -# CONFIG_CHARGER_BQ2415X is not set -# CONFIG_CHARGER_BQ24190 is not set -# CONFIG_CHARGER_BQ24257 is not set -# CONFIG_CHARGER_BQ24735 is not set -# CONFIG_CHARGER_BQ2515X is not set -# CONFIG_CHARGER_BQ25890 is not set -# CONFIG_CHARGER_BQ25980 is not set -# CONFIG_CHARGER_BQ256XX is not set -CONFIG_CHARGER_SMB347=m -# CONFIG_BATTERY_GAUGE_LTC2941 is not set -# CONFIG_BATTERY_GOLDFISH is not set -# CONFIG_BATTERY_RT5033 is not set -# CONFIG_CHARGER_RT9455 is not set -# CONFIG_CHARGER_RT9467 is not set -# CONFIG_CHARGER_RT9471 is not set -# CONFIG_CHARGER_UCS1002 is not set -# CONFIG_CHARGER_BD99954 is not set -# CONFIG_BATTERY_UG3105 is not set -CONFIG_HWMON=y -# CONFIG_HWMON_DEBUG_CHIP is not set - -# -# Native drivers -# -CONFIG_SENSORS_AD7314=m -# CONFIG_SENSORS_AD7414 is not set -# CONFIG_SENSORS_AD7418 is not set -# CONFIG_SENSORS_ADM1021 is not set -# CONFIG_SENSORS_ADM1025 is not set -# CONFIG_SENSORS_ADM1026 is not set -# CONFIG_SENSORS_ADM1029 is not set -# CONFIG_SENSORS_ADM1031 is not set -# CONFIG_SENSORS_ADM1177 is not set -# CONFIG_SENSORS_ADM9240 is not set -# CONFIG_SENSORS_ADT7310 is not set -# CONFIG_SENSORS_ADT7410 is not set -# CONFIG_SENSORS_ADT7411 is not set -# CONFIG_SENSORS_ADT7462 is not set -# CONFIG_SENSORS_ADT7470 is not set -# CONFIG_SENSORS_ADT7475 is not set -# CONFIG_SENSORS_AHT10 is not set -# CONFIG_SENSORS_AQUACOMPUTER_D5NEXT is not set -# CONFIG_SENSORS_AS370 is not set -# CONFIG_SENSORS_ASC7621 is not set -# CONFIG_SENSORS_AXI_FAN_CONTROL is not set -CONFIG_SENSORS_ARM_SCPI=m -# CONFIG_SENSORS_ATXP1 is not set -# CONFIG_SENSORS_CORSAIR_CPRO is not set -# CONFIG_SENSORS_CORSAIR_PSU is not set -# CONFIG_SENSORS_DRIVETEMP is not set -# CONFIG_SENSORS_DS620 is not set -# CONFIG_SENSORS_DS1621 is not set -# CONFIG_SENSORS_I5K_AMB is not set -# CONFIG_SENSORS_F71805F is not set -# CONFIG_SENSORS_F71882FG is not set -# CONFIG_SENSORS_F75375S is not set -# CONFIG_SENSORS_FTSTEUTATES is not set -# CONFIG_SENSORS_GL518SM is not set -# CONFIG_SENSORS_GL520SM is not set -# CONFIG_SENSORS_G760A is not set -CONFIG_SENSORS_G762=m -# CONFIG_SENSORS_GPIO_FAN is not set -# CONFIG_SENSORS_HIH6130 is not set -# CONFIG_SENSORS_HS3001 is not set -# CONFIG_SENSORS_IBMAEM is not set -# CONFIG_SENSORS_IBMPEX is not set -# CONFIG_SENSORS_IT87 is not set -# CONFIG_SENSORS_JC42 is not set -CONFIG_SENSORS_POWR1220=m -# CONFIG_SENSORS_LINEAGE is not set -CONFIG_SENSORS_LTC2945=m -# CONFIG_SENSORS_LTC2947_I2C is not set -# CONFIG_SENSORS_LTC2947_SPI is not set -# CONFIG_SENSORS_LTC2990 is not set -# CONFIG_SENSORS_LTC2992 is not set -# CONFIG_SENSORS_LTC4151 is not set -# CONFIG_SENSORS_LTC4215 is not set -CONFIG_SENSORS_LTC4222=m -# CONFIG_SENSORS_LTC4245 is not set -CONFIG_SENSORS_LTC4260=m -# CONFIG_SENSORS_LTC4261 is not set -CONFIG_SENSORS_MAX1111=m -# CONFIG_SENSORS_MAX127 is not set -# CONFIG_SENSORS_MAX16065 is not set -# CONFIG_SENSORS_MAX1619 is not set -# CONFIG_SENSORS_MAX1668 is not set -# CONFIG_SENSORS_MAX197 is not set -# CONFIG_SENSORS_MAX31722 is not set -# CONFIG_SENSORS_MAX31730 is not set -# CONFIG_SENSORS_MAX31760 is not set -# CONFIG_MAX31827 is not set -# CONFIG_SENSORS_MAX6620 is not set -# CONFIG_SENSORS_MAX6621 is not set -# CONFIG_SENSORS_MAX6639 is not set -# CONFIG_SENSORS_MAX6642 is not set -# CONFIG_SENSORS_MAX6650 is not set -# CONFIG_SENSORS_MAX6697 is not set -CONFIG_SENSORS_MAX31790=m -# CONFIG_SENSORS_MC34VR500 is not set -# CONFIG_SENSORS_MCP3021 is not set -# CONFIG_SENSORS_TC654 is not set -# CONFIG_SENSORS_TPS23861 is not set -# CONFIG_SENSORS_MR75203 is not set -CONFIG_SENSORS_ADCXX=m -# CONFIG_SENSORS_LM63 is not set -CONFIG_SENSORS_LM70=m -# CONFIG_SENSORS_LM73 is not set -# CONFIG_SENSORS_LM75 is not set -# CONFIG_SENSORS_LM77 is not set -# CONFIG_SENSORS_LM78 is not set -# CONFIG_SENSORS_LM80 is not set -# CONFIG_SENSORS_LM83 is not set -# CONFIG_SENSORS_LM85 is not set -# CONFIG_SENSORS_LM87 is not set -# CONFIG_SENSORS_LM90 is not set -# CONFIG_SENSORS_LM92 is not set -# CONFIG_SENSORS_LM93 is not set -# CONFIG_SENSORS_LM95234 is not set -# CONFIG_SENSORS_LM95241 is not set -# CONFIG_SENSORS_LM95245 is not set -# CONFIG_SENSORS_PC87360 is not set -# CONFIG_SENSORS_PC87427 is not set -CONFIG_SENSORS_NCT6683=m -# CONFIG_SENSORS_NCT6775 is not set -# CONFIG_SENSORS_NCT6775_I2C is not set -CONFIG_SENSORS_NCT7802=m -CONFIG_SENSORS_NCT7904=m -# CONFIG_SENSORS_NPCM7XX is not set -# CONFIG_SENSORS_NZXT_KRAKEN2 is not set -# CONFIG_SENSORS_NZXT_SMART2 is not set -# CONFIG_SENSORS_OCC_P8_I2C is not set -# CONFIG_SENSORS_PCF8591 is not set -CONFIG_PMBUS=m -# CONFIG_SENSORS_PMBUS is not set -# CONFIG_SENSORS_ACBEL_FSG032 is not set -# CONFIG_SENSORS_ADM1266 is not set -# CONFIG_SENSORS_ADM1275 is not set -# CONFIG_SENSORS_BEL_PFE is not set -# CONFIG_SENSORS_BPA_RS600 is not set -# CONFIG_SENSORS_DELTA_AHE50DC_FAN is not set -# CONFIG_SENSORS_FSP_3Y is not set -# CONFIG_SENSORS_IBM_CFFPS is not set -# CONFIG_SENSORS_DPS920AB is not set -# CONFIG_SENSORS_INSPUR_IPSPS is not set -# CONFIG_SENSORS_IR35221 is not set -# CONFIG_SENSORS_IR36021 is not set -# CONFIG_SENSORS_IR38064 is not set -# CONFIG_SENSORS_IRPS5401 is not set -# CONFIG_SENSORS_ISL68137 is not set -# CONFIG_SENSORS_LM25066 is not set -# CONFIG_SENSORS_LT7182S is not set -# CONFIG_SENSORS_LTC2978 is not set -CONFIG_SENSORS_LTC3815=m -# CONFIG_SENSORS_MAX15301 is not set -# CONFIG_SENSORS_MAX16064 is not set -# CONFIG_SENSORS_MAX16601 is not set -# CONFIG_SENSORS_MAX20730 is not set -CONFIG_SENSORS_MAX20751=m -# CONFIG_SENSORS_MAX31785 is not set -# CONFIG_SENSORS_MAX34440 is not set -# CONFIG_SENSORS_MAX8688 is not set -# CONFIG_SENSORS_MP2888 is not set -# CONFIG_SENSORS_MP2975 is not set -# CONFIG_SENSORS_MP5023 is not set -# CONFIG_SENSORS_MPQ7932 is not set -# CONFIG_SENSORS_PIM4328 is not set -# CONFIG_SENSORS_PLI1209BC is not set -# CONFIG_SENSORS_PM6764TR is not set -# CONFIG_SENSORS_PXE1610 is not set -# CONFIG_SENSORS_Q54SJ108A2 is not set -# CONFIG_SENSORS_STPDDC60 is not set -# CONFIG_SENSORS_TDA38640 is not set -CONFIG_SENSORS_TPS40422=m -# CONFIG_SENSORS_TPS53679 is not set -# CONFIG_SENSORS_TPS546D24 is not set -# CONFIG_SENSORS_UCD9000 is not set -# CONFIG_SENSORS_UCD9200 is not set -# CONFIG_SENSORS_XDPE152 is not set -# CONFIG_SENSORS_XDPE122 is not set -# CONFIG_SENSORS_ZL6100 is not set -CONFIG_SENSORS_PWM_FAN=m -# CONFIG_SENSORS_SBTSI is not set -# CONFIG_SENSORS_SBRMI is not set -# CONFIG_SENSORS_SHT15 is not set -# CONFIG_SENSORS_SHT21 is not set -# CONFIG_SENSORS_SHT3x is not set -# CONFIG_SENSORS_SHT4x is not set -CONFIG_SENSORS_SHTC1=m -# CONFIG_SENSORS_SIS5595 is not set -# CONFIG_SENSORS_DME1737 is not set -# CONFIG_SENSORS_EMC1403 is not set -# CONFIG_SENSORS_EMC2103 is not set -# CONFIG_SENSORS_EMC2305 is not set -# CONFIG_SENSORS_EMC6W201 is not set -# CONFIG_SENSORS_SMSC47M1 is not set -# CONFIG_SENSORS_SMSC47M192 is not set -# CONFIG_SENSORS_SMSC47B397 is not set -# CONFIG_SENSORS_SCH5627 is not set -# CONFIG_SENSORS_SCH5636 is not set -# CONFIG_SENSORS_STTS751 is not set -CONFIG_SENSORS_ADC128D818=m -# CONFIG_SENSORS_ADS7828 is not set -CONFIG_SENSORS_ADS7871=m -# CONFIG_SENSORS_AMC6821 is not set -# CONFIG_SENSORS_INA209 is not set -# CONFIG_SENSORS_INA2XX is not set -# CONFIG_SENSORS_INA238 is not set -# CONFIG_SENSORS_INA3221 is not set -CONFIG_SENSORS_TC74=m -# CONFIG_SENSORS_THMC50 is not set -# CONFIG_SENSORS_TMP102 is not set -CONFIG_SENSORS_TMP103=m -# CONFIG_SENSORS_TMP108 is not set -# CONFIG_SENSORS_TMP401 is not set -# CONFIG_SENSORS_TMP421 is not set -# CONFIG_SENSORS_TMP464 is not set -# CONFIG_SENSORS_TMP513 is not set -CONFIG_SENSORS_VEXPRESS=m -# CONFIG_SENSORS_VIA686A is not set -# CONFIG_SENSORS_VT1211 is not set -# CONFIG_SENSORS_VT8231 is not set -# CONFIG_SENSORS_W83773G is not set -# CONFIG_SENSORS_W83781D is not set -# CONFIG_SENSORS_W83791D is not set -# CONFIG_SENSORS_W83792D is not set -# CONFIG_SENSORS_W83793 is not set -# CONFIG_SENSORS_W83795 is not set -# CONFIG_SENSORS_W83L785TS is not set -# CONFIG_SENSORS_W83L786NG is not set -# CONFIG_SENSORS_W83627HF is not set -# CONFIG_SENSORS_W83627EHF is not set -CONFIG_SENSORS_XGENE=m - -# -# ACPI drivers -# -CONFIG_SENSORS_ACPI_POWER=y -CONFIG_THERMAL=y -# CONFIG_THERMAL_NETLINK is not set -# CONFIG_THERMAL_STATISTICS is not set -CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 -CONFIG_THERMAL_HWMON=y -CONFIG_THERMAL_OF=y -# CONFIG_THERMAL_WRITABLE_TRIPS is not set -CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y -# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set -# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set -CONFIG_THERMAL_GOV_FAIR_SHARE=y -CONFIG_THERMAL_GOV_STEP_WISE=y -# CONFIG_THERMAL_GOV_BANG_BANG is not set -CONFIG_THERMAL_GOV_USER_SPACE=y -CONFIG_CPU_THERMAL=y -CONFIG_CPU_FREQ_THERMAL=y -# CONFIG_THERMAL_EMULATION is not set -# CONFIG_THERMAL_MMIO is not set -CONFIG_HISI_THERMAL=m - -# -# Qualcomm thermal drivers -# -# CONFIG_QCOM_LMH is not set -# end of Qualcomm thermal drivers - -CONFIG_WATCHDOG=y -CONFIG_WATCHDOG_CORE=y -# CONFIG_WATCHDOG_NOWAYOUT is not set -CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y -CONFIG_WATCHDOG_OPEN_TIMEOUT=0 -CONFIG_WATCHDOG_SYSFS=y -# CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT is not set - -# -# Watchdog Pretimeout Governors -# -# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set - -# -# Watchdog Device Drivers -# -CONFIG_SOFT_WATCHDOG=m -CONFIG_GPIO_WATCHDOG=m -# CONFIG_WDAT_WDT is not set -# CONFIG_XILINX_WATCHDOG is not set -# CONFIG_XILINX_WINDOW_WATCHDOG is not set -# CONFIG_ZIIRAVE_WATCHDOG is not set -CONFIG_ARM_SP805_WATCHDOG=m -CONFIG_ARM_SBSA_WATCHDOG=m -# CONFIG_CADENCE_WATCHDOG is not set -# CONFIG_DW_WATCHDOG is not set -# CONFIG_MAX63XX_WATCHDOG is not set -# CONFIG_QCOM_WDT is not set -# CONFIG_ARM_SMC_WATCHDOG is not set -CONFIG_ALIM7101_WDT=m -CONFIG_I6300ESB_WDT=m -# CONFIG_HP_WATCHDOG is not set -CONFIG_MARVELL_GTI_WDT=y -# CONFIG_MEN_A21_WDT is not set - -# -# PCI-based Watchdog Cards -# -CONFIG_PCIPCWATCHDOG=m -CONFIG_WDTPCI=m - -# -# USB-based Watchdog Cards -# -CONFIG_USBPCWATCHDOG=m -CONFIG_SSB_POSSIBLE=y -# CONFIG_SSB is not set -CONFIG_BCMA_POSSIBLE=y -CONFIG_BCMA=m -CONFIG_BCMA_HOST_PCI_POSSIBLE=y -CONFIG_BCMA_HOST_PCI=y -# CONFIG_BCMA_HOST_SOC is not set -CONFIG_BCMA_DRIVER_PCI=y -CONFIG_BCMA_DRIVER_GMAC_CMN=y -CONFIG_BCMA_DRIVER_GPIO=y -# CONFIG_BCMA_DEBUG is not set - -# -# Multifunction device drivers -# -CONFIG_MFD_CORE=m -# CONFIG_MFD_ACT8945A is not set -# CONFIG_MFD_AS3711 is not set -# CONFIG_MFD_SMPRO is not set -# CONFIG_MFD_AS3722 is not set -# CONFIG_PMIC_ADP5520 is not set -# CONFIG_MFD_AAT2870_CORE is not set -# CONFIG_MFD_ATMEL_FLEXCOM is not set -# CONFIG_MFD_ATMEL_HLCDC is not set -# CONFIG_MFD_BCM590XX is not set -# CONFIG_MFD_BD9571MWV is not set -# CONFIG_MFD_AXP20X_I2C is not set -# CONFIG_MFD_CS42L43_I2C is not set -# CONFIG_MFD_MADERA is not set -# CONFIG_MFD_MAX5970 is not set -# CONFIG_PMIC_DA903X is not set -# CONFIG_MFD_DA9052_SPI is not set -# CONFIG_MFD_DA9052_I2C is not set -# CONFIG_MFD_DA9055 is not set -# CONFIG_MFD_DA9062 is not set -# CONFIG_MFD_DA9063 is not set -# CONFIG_MFD_DA9150 is not set -# CONFIG_MFD_DLN2 is not set -# CONFIG_MFD_GATEWORKS_GSC is not set -# CONFIG_MFD_MC13XXX_SPI is not set -# CONFIG_MFD_MC13XXX_I2C is not set -# CONFIG_MFD_MP2629 is not set -# CONFIG_MFD_HI6421_PMIC is not set -# CONFIG_MFD_HI655X_PMIC is not set -# CONFIG_LPC_ICH is not set -# CONFIG_LPC_SCH is not set -# CONFIG_MFD_IQS62X is not set -# CONFIG_MFD_JANZ_CMODIO is not set -# CONFIG_MFD_KEMPLD is not set -# CONFIG_MFD_88PM800 is not set -# CONFIG_MFD_88PM805 is not set -# CONFIG_MFD_88PM860X is not set -# CONFIG_MFD_MAX14577 is not set -# CONFIG_MFD_MAX77541 is not set -# CONFIG_MFD_MAX77620 is not set -# CONFIG_MFD_MAX77650 is not set -# CONFIG_MFD_MAX77686 is not set -# CONFIG_MFD_MAX77693 is not set -# CONFIG_MFD_MAX77714 is not set -# CONFIG_MFD_MAX77843 is not set -# CONFIG_MFD_MAX8907 is not set -# CONFIG_MFD_MAX8925 is not set -# CONFIG_MFD_MAX8997 is not set -# CONFIG_MFD_MAX8998 is not set -# CONFIG_MFD_MT6360 is not set -# CONFIG_MFD_MT6370 is not set -# CONFIG_MFD_MT6397 is not set -# CONFIG_MFD_MENF21BMC is not set -# CONFIG_MFD_OCELOT is not set -# CONFIG_EZX_PCAP is not set -# CONFIG_MFD_CPCAP is not set -# CONFIG_MFD_VIPERBOARD is not set -# CONFIG_MFD_NTXEC is not set -# CONFIG_MFD_RETU is not set -# CONFIG_MFD_PCF50633 is not set -# CONFIG_MFD_QCOM_RPM is not set -# CONFIG_MFD_SY7636A is not set -# CONFIG_MFD_RDC321X is not set -# CONFIG_MFD_RT4831 is not set -# CONFIG_MFD_RT5033 is not set -# CONFIG_MFD_RT5120 is not set -# CONFIG_MFD_RC5T583 is not set -# CONFIG_MFD_RK8XX_I2C is not set -# CONFIG_MFD_RK8XX_SPI is not set -# CONFIG_MFD_RN5T618 is not set -# CONFIG_MFD_SEC_CORE is not set -# CONFIG_MFD_SI476X_CORE is not set -# CONFIG_MFD_SM501 is not set -# CONFIG_MFD_SKY81452 is not set -# CONFIG_MFD_STMPE is not set -CONFIG_MFD_SYSCON=y -# CONFIG_MFD_LP3943 is not set -# CONFIG_MFD_LP8788 is not set -# CONFIG_MFD_TI_LMU is not set -# CONFIG_MFD_PALMAS is not set -# CONFIG_TPS6105X is not set -# CONFIG_TPS65010 is not set -# CONFIG_TPS6507X is not set -# CONFIG_MFD_TPS65086 is not set -# CONFIG_MFD_TPS65090 is not set -# CONFIG_MFD_TPS65217 is not set -# CONFIG_MFD_TI_LP873X is not set -# CONFIG_MFD_TI_LP87565 is not set -# CONFIG_MFD_TPS65218 is not set -# CONFIG_MFD_TPS65219 is not set -# CONFIG_MFD_TPS6586X is not set -# CONFIG_MFD_TPS65910 is not set -# CONFIG_MFD_TPS65912_I2C is not set -# CONFIG_MFD_TPS65912_SPI is not set -# CONFIG_MFD_TPS6594_I2C is not set -# CONFIG_MFD_TPS6594_SPI is not set -# CONFIG_TWL4030_CORE is not set -# CONFIG_TWL6040_CORE is not set -# CONFIG_MFD_WL1273_CORE is not set -# CONFIG_MFD_LM3533 is not set -# CONFIG_MFD_TC3589X is not set -# CONFIG_MFD_TQMX86 is not set -# CONFIG_MFD_VX855 is not set -# CONFIG_MFD_LOCHNAGAR is not set -# CONFIG_MFD_ARIZONA_I2C is not set -# CONFIG_MFD_ARIZONA_SPI is not set -# CONFIG_MFD_WM8400 is not set -# CONFIG_MFD_WM831X_I2C is not set -# CONFIG_MFD_WM831X_SPI is not set -# CONFIG_MFD_WM8350_I2C is not set -# CONFIG_MFD_WM8994 is not set -# CONFIG_MFD_ROHM_BD718XX is not set -# CONFIG_MFD_ROHM_BD71828 is not set -# CONFIG_MFD_ROHM_BD957XMUF is not set -# CONFIG_MFD_STPMIC1 is not set -# CONFIG_MFD_STMFX is not set -# CONFIG_MFD_ATC260X_I2C is not set -# CONFIG_MFD_QCOM_PM8008 is not set -# CONFIG_MFD_VEXPRESS_SYSREG is not set -# CONFIG_MFD_INTEL_M10_BMC_SPI is not set -# CONFIG_MFD_RSMU_I2C is not set -# CONFIG_MFD_RSMU_SPI is not set -# end of Multifunction device drivers - -CONFIG_REGULATOR=y -# CONFIG_REGULATOR_DEBUG is not set -# CONFIG_REGULATOR_FIXED_VOLTAGE is not set -# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set -# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set -# CONFIG_REGULATOR_88PG86X is not set -# CONFIG_REGULATOR_ACT8865 is not set -# CONFIG_REGULATOR_AD5398 is not set -# CONFIG_REGULATOR_AW37503 is not set -# CONFIG_REGULATOR_DA9121 is not set -# CONFIG_REGULATOR_DA9210 is not set -# CONFIG_REGULATOR_DA9211 is not set -# CONFIG_REGULATOR_FAN53555 is not set -# CONFIG_REGULATOR_FAN53880 is not set -# CONFIG_REGULATOR_GPIO is not set -# CONFIG_REGULATOR_ISL9305 is not set -# CONFIG_REGULATOR_ISL6271A is not set -# CONFIG_REGULATOR_LP3971 is not set -# CONFIG_REGULATOR_LP3972 is not set -# CONFIG_REGULATOR_LP872X is not set -# CONFIG_REGULATOR_LP8755 is not set -# CONFIG_REGULATOR_LTC3589 is not set -# CONFIG_REGULATOR_LTC3676 is not set -# CONFIG_REGULATOR_MAX1586 is not set -# CONFIG_REGULATOR_MAX77857 is not set -# CONFIG_REGULATOR_MAX8649 is not set -# CONFIG_REGULATOR_MAX8660 is not set -# CONFIG_REGULATOR_MAX8893 is not set -# CONFIG_REGULATOR_MAX8952 is not set -# CONFIG_REGULATOR_MAX8973 is not set -# CONFIG_REGULATOR_MAX20086 is not set -# CONFIG_REGULATOR_MAX20411 is not set -# CONFIG_REGULATOR_MAX77826 is not set -# CONFIG_REGULATOR_MCP16502 is not set -# CONFIG_REGULATOR_MP5416 is not set -# CONFIG_REGULATOR_MP8859 is not set -# CONFIG_REGULATOR_MP886X is not set -# CONFIG_REGULATOR_MPQ7920 is not set -# CONFIG_REGULATOR_MT6311 is not set -# CONFIG_REGULATOR_PCA9450 is not set -# CONFIG_REGULATOR_PF8X00 is not set -# CONFIG_REGULATOR_PFUZE100 is not set -# CONFIG_REGULATOR_PV88060 is not set -# CONFIG_REGULATOR_PV88080 is not set -# CONFIG_REGULATOR_PV88090 is not set -# CONFIG_REGULATOR_PWM is not set -# CONFIG_REGULATOR_QCOM_REFGEN is not set -# CONFIG_REGULATOR_RAA215300 is not set -# CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY is not set -# CONFIG_REGULATOR_RT4801 is not set -# CONFIG_REGULATOR_RT4803 is not set -# CONFIG_REGULATOR_RT5190A is not set -# CONFIG_REGULATOR_RT5739 is not set -# CONFIG_REGULATOR_RT5759 is not set -# CONFIG_REGULATOR_RT6160 is not set -# CONFIG_REGULATOR_RT6190 is not set -# CONFIG_REGULATOR_RT6245 is not set -# CONFIG_REGULATOR_RTQ2134 is not set -# CONFIG_REGULATOR_RTMV20 is not set -# CONFIG_REGULATOR_RTQ6752 is not set -# CONFIG_REGULATOR_RTQ2208 is not set -# CONFIG_REGULATOR_SLG51000 is not set -# CONFIG_REGULATOR_SY8106A is not set -# CONFIG_REGULATOR_SY8824X is not set -# CONFIG_REGULATOR_SY8827N is not set -# CONFIG_REGULATOR_TPS51632 is not set -# CONFIG_REGULATOR_TPS62360 is not set -# CONFIG_REGULATOR_TPS6286X is not set -# CONFIG_REGULATOR_TPS6287X is not set -# CONFIG_REGULATOR_TPS65023 is not set -# CONFIG_REGULATOR_TPS6507X is not set -# CONFIG_REGULATOR_TPS65132 is not set -# CONFIG_REGULATOR_TPS6524X is not set -# CONFIG_REGULATOR_VCTRL is not set -# CONFIG_REGULATOR_VEXPRESS is not set -# CONFIG_REGULATOR_VQMMC_IPQ4019 is not set -# CONFIG_RC_CORE is not set -CONFIG_CEC_CORE=m - -# -# CEC support -# -# CONFIG_MEDIA_CEC_SUPPORT is not set -# end of CEC support - -# CONFIG_MEDIA_SUPPORT is not set - -# -# Graphics support -# -CONFIG_APERTURE_HELPERS=y -CONFIG_VIDEO_CMDLINE=y -CONFIG_VIDEO_NOMODESET=y -# CONFIG_AUXDISPLAY is not set -CONFIG_DRM=m -CONFIG_DRM_KMS_HELPER=m -CONFIG_DRM_FBDEV_EMULATION=y -CONFIG_DRM_FBDEV_OVERALLOC=100 -CONFIG_DRM_LOAD_EDID_FIRMWARE=y -CONFIG_DRM_DISPLAY_HELPER=m -CONFIG_DRM_DISPLAY_DP_HELPER=y -CONFIG_DRM_DISPLAY_HDCP_HELPER=y -CONFIG_DRM_DISPLAY_HDMI_HELPER=y -CONFIG_DRM_DP_AUX_CHARDEV=y -CONFIG_DRM_DP_CEC=y -CONFIG_DRM_TTM=m -CONFIG_DRM_EXEC=m -CONFIG_DRM_BUDDY=m -CONFIG_DRM_VRAM_HELPER=m -CONFIG_DRM_TTM_HELPER=m -CONFIG_DRM_GEM_SHMEM_HELPER=m -CONFIG_DRM_SUBALLOC_HELPER=m -CONFIG_DRM_SCHED=m - -# -# I2C encoder or helper chips -# -CONFIG_DRM_I2C_CH7006=m -# CONFIG_DRM_I2C_SIL164 is not set -CONFIG_DRM_I2C_NXP_TDA998X=m -# CONFIG_DRM_I2C_NXP_TDA9950 is not set -# end of I2C encoder or helper chips - -# -# ARM devices -# -# CONFIG_DRM_HDLCD is not set -# CONFIG_DRM_MALI_DISPLAY is not set -# CONFIG_DRM_KOMEDA is not set -# end of ARM devices - -CONFIG_DRM_RADEON=m -CONFIG_DRM_RADEON_USERPTR=y -CONFIG_DRM_AMDGPU=m -# CONFIG_DRM_AMDGPU_SI is not set -CONFIG_DRM_AMDGPU_CIK=y -CONFIG_DRM_AMDGPU_USERPTR=y - -# -# ACP (Audio CoProcessor) Configuration -# -CONFIG_DRM_AMD_ACP=y -# end of ACP (Audio CoProcessor) Configuration - -# -# Display Engine Configuration -# -CONFIG_DRM_AMD_DC=y -CONFIG_DRM_AMD_DC_FP=y -# CONFIG_DEBUG_KERNEL_DC is not set -# CONFIG_DRM_AMD_SECURE_DISPLAY is not set -# end of Display Engine Configuration - -CONFIG_HSA_AMD=y -CONFIG_DRM_NOUVEAU=m -CONFIG_NOUVEAU_DEBUG=5 -CONFIG_NOUVEAU_DEBUG_DEFAULT=3 -# CONFIG_NOUVEAU_DEBUG_MMU is not set -# CONFIG_NOUVEAU_DEBUG_PUSH is not set -CONFIG_DRM_NOUVEAU_BACKLIGHT=y -# CONFIG_DRM_VGEM is not set -CONFIG_DRM_VKMS=m -# CONFIG_DRM_VMWGFX is not set -CONFIG_DRM_UDL=m -CONFIG_DRM_AST=m -CONFIG_DRM_MGAG200=m -CONFIG_DRM_QXL=m -CONFIG_DRM_VIRTIO_GPU=m -CONFIG_DRM_VIRTIO_GPU_KMS=y -# CONFIG_DRM_MSM is not set -CONFIG_DRM_PANEL=y - -# -# Display Panels -# -# CONFIG_DRM_PANEL_ABT_Y030XX067A is not set -# CONFIG_DRM_PANEL_ARM_VERSATILE is not set -# CONFIG_DRM_PANEL_AUO_A030JTN01 is not set -# CONFIG_DRM_PANEL_LVDS is not set -# CONFIG_DRM_PANEL_SIMPLE is not set -# CONFIG_DRM_PANEL_EDP is not set -# CONFIG_DRM_PANEL_ILITEK_IL9322 is not set -# CONFIG_DRM_PANEL_ILITEK_ILI9341 is not set -# CONFIG_DRM_PANEL_INNOLUX_EJ030NA is not set -# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set -# CONFIG_DRM_PANEL_LG_LB035Q02 is not set -# CONFIG_DRM_PANEL_LG_LG4573 is not set -# CONFIG_DRM_PANEL_NEC_NL8048HL11 is not set -# CONFIG_DRM_PANEL_NEWVISION_NV3052C is not set -# CONFIG_DRM_PANEL_NOVATEK_NT39016 is not set -# CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO is not set -# CONFIG_DRM_PANEL_ORISETECH_OTA5601A is not set -# CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20 is not set -# CONFIG_DRM_PANEL_SAMSUNG_DB7430 is not set -# CONFIG_DRM_PANEL_SAMSUNG_S6D27A1 is not set -# CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0 is not set -# CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 is not set -# CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 is not set -# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set -# CONFIG_DRM_PANEL_SEIKO_43WVF1G is not set -# CONFIG_DRM_PANEL_SHARP_LS037V7DW01 is not set -# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set -# CONFIG_DRM_PANEL_SONY_ACX565AKM is not set -# CONFIG_DRM_PANEL_TPO_TD028TTEC1 is not set -# CONFIG_DRM_PANEL_TPO_TD043MTEA1 is not set -# CONFIG_DRM_PANEL_TPO_TPG110 is not set -# CONFIG_DRM_PANEL_WIDECHIPS_WS2401 is not set -# end of Display Panels - -CONFIG_DRM_BRIDGE=y -CONFIG_DRM_PANEL_BRIDGE=y - -# -# Display Interface Bridges -# -# CONFIG_DRM_CHIPONE_ICN6211 is not set -# CONFIG_DRM_CHRONTEL_CH7033 is not set -# CONFIG_DRM_DISPLAY_CONNECTOR is not set -# CONFIG_DRM_ITE_IT6505 is not set -# CONFIG_DRM_LONTIUM_LT8912B is not set -# CONFIG_DRM_LONTIUM_LT9211 is not set -# CONFIG_DRM_LONTIUM_LT9611 is not set -# CONFIG_DRM_LONTIUM_LT9611UXC is not set -# CONFIG_DRM_ITE_IT66121 is not set -# CONFIG_DRM_LVDS_CODEC is not set -# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set -# CONFIG_DRM_NWL_MIPI_DSI is not set -# CONFIG_DRM_NXP_PTN3460 is not set -# CONFIG_DRM_PARADE_PS8622 is not set -# CONFIG_DRM_PARADE_PS8640 is not set -# CONFIG_DRM_SAMSUNG_DSIM is not set -# CONFIG_DRM_SIL_SII8620 is not set -# CONFIG_DRM_SII902X is not set -# CONFIG_DRM_SII9234 is not set -# CONFIG_DRM_SIMPLE_BRIDGE is not set -# CONFIG_DRM_THINE_THC63LVD1024 is not set -# CONFIG_DRM_TOSHIBA_TC358762 is not set -# CONFIG_DRM_TOSHIBA_TC358764 is not set -# CONFIG_DRM_TOSHIBA_TC358767 is not set -# CONFIG_DRM_TOSHIBA_TC358768 is not set -# CONFIG_DRM_TOSHIBA_TC358775 is not set -# CONFIG_DRM_TI_DLPC3433 is not set -# CONFIG_DRM_TI_TFP410 is not set -# CONFIG_DRM_TI_SN65DSI83 is not set -# CONFIG_DRM_TI_SN65DSI86 is not set -# CONFIG_DRM_TI_TPD12S015 is not set -# CONFIG_DRM_ANALOGIX_ANX6345 is not set -# CONFIG_DRM_ANALOGIX_ANX78XX is not set -# CONFIG_DRM_ANALOGIX_ANX7625 is not set -# CONFIG_DRM_I2C_ADV7511 is not set -# CONFIG_DRM_CDNS_DSI is not set -# CONFIG_DRM_CDNS_MHDP8546 is not set -# end of Display Interface Bridges - -# CONFIG_DRM_LOONGSON is not set -# CONFIG_DRM_ETNAVIV is not set -CONFIG_DRM_HISI_HIBMC=m -# CONFIG_DRM_HISI_KIRIN is not set -# CONFIG_DRM_LOGICVC is not set -# CONFIG_DRM_ARCPGU is not set -CONFIG_DRM_BOCHS=m -CONFIG_DRM_CIRRUS_QEMU=m -# CONFIG_DRM_GM12U320 is not set -# CONFIG_DRM_PANEL_MIPI_DBI is not set -# CONFIG_DRM_SIMPLEDRM is not set -# CONFIG_TINYDRM_HX8357D is not set -# CONFIG_TINYDRM_ILI9163 is not set -# CONFIG_TINYDRM_ILI9225 is not set -# CONFIG_TINYDRM_ILI9341 is not set -# CONFIG_TINYDRM_ILI9486 is not set -# CONFIG_TINYDRM_MI0283QT is not set -# CONFIG_TINYDRM_REPAPER is not set -# CONFIG_TINYDRM_ST7586 is not set -# CONFIG_TINYDRM_ST7735R is not set -# CONFIG_DRM_PL111 is not set -# CONFIG_DRM_LIMA is not set -# CONFIG_DRM_PANFROST is not set -# CONFIG_DRM_TIDSS is not set -# CONFIG_DRM_GUD is not set -# CONFIG_DRM_SSD130X is not set -CONFIG_DRM_PHYTIUM=m -# CONFIG_DRM_LEGACY is not set -CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y -# CONFIG_HYDCU_FIXUP_HEADER is not set -CONFIG_DRM_INSPUR=m - -# -# Frame buffer Devices -# -CONFIG_FB=y -# CONFIG_FB_CIRRUS is not set -# CONFIG_FB_PM2 is not set -# CONFIG_FB_ARMCLCD is not set -# CONFIG_FB_CYBER2000 is not set -# CONFIG_FB_ASILIANT is not set -# CONFIG_FB_IMSTT is not set -# CONFIG_FB_UVESA is not set -CONFIG_FB_EFI=y -# CONFIG_FB_OPENCORES is not set -# CONFIG_FB_S1D13XXX is not set -# CONFIG_FB_NVIDIA is not set -# CONFIG_FB_RIVA is not set -# CONFIG_FB_I740 is not set -# CONFIG_FB_MATROX is not set -# CONFIG_FB_RADEON is not set -# CONFIG_FB_ATY128 is not set -# CONFIG_FB_ATY is not set -# CONFIG_FB_S3 is not set -# CONFIG_FB_SAVAGE is not set -# CONFIG_FB_SIS is not set -# CONFIG_FB_NEOMAGIC is not set -# CONFIG_FB_KYRO is not set -# CONFIG_FB_3DFX is not set -# CONFIG_FB_VOODOO1 is not set -# CONFIG_FB_VT8623 is not set -# CONFIG_FB_TRIDENT is not set -# CONFIG_FB_ARK is not set -# CONFIG_FB_PM3 is not set -# CONFIG_FB_CARMINE is not set -# CONFIG_FB_SMSCUFX is not set -# CONFIG_FB_UDL is not set -# CONFIG_FB_IBM_GXT4500 is not set -# CONFIG_FB_VIRTUAL is not set -# CONFIG_FB_METRONOME is not set -# CONFIG_FB_MB862XX is not set -CONFIG_FB_SIMPLE=y -CONFIG_FB_SSD1307=m -# CONFIG_FB_SM712 is not set -# CONFIG_FB_LS2K500 is not set -CONFIG_FB_CORE=y -CONFIG_FB_NOTIFY=y -# CONFIG_FIRMWARE_EDID is not set -CONFIG_FB_DEVICE=y -CONFIG_FB_CFB_FILLRECT=y -CONFIG_FB_CFB_COPYAREA=y -CONFIG_FB_CFB_IMAGEBLIT=y -CONFIG_FB_SYS_FILLRECT=y -CONFIG_FB_SYS_COPYAREA=y -CONFIG_FB_SYS_IMAGEBLIT=y -# CONFIG_FB_FOREIGN_ENDIAN is not set -CONFIG_FB_SYS_FOPS=y -CONFIG_FB_DEFERRED_IO=y -CONFIG_FB_IOMEM_HELPERS=y -CONFIG_FB_SYSMEM_HELPERS=y -CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y -CONFIG_FB_BACKLIGHT=m -# CONFIG_FB_MODE_HELPERS is not set -CONFIG_FB_TILEBLITTING=y -# end of Frame buffer Devices - -# -# Backlight & LCD device support -# -CONFIG_LCD_CLASS_DEVICE=m -# CONFIG_LCD_L4F00242T03 is not set -# CONFIG_LCD_LMS283GF05 is not set -# CONFIG_LCD_LTV350QV is not set -# CONFIG_LCD_ILI922X is not set -# CONFIG_LCD_ILI9320 is not set -# CONFIG_LCD_TDO24M is not set -# CONFIG_LCD_VGG2432A4 is not set -CONFIG_LCD_PLATFORM=m -# CONFIG_LCD_AMS369FG06 is not set -# CONFIG_LCD_LMS501KF03 is not set -# CONFIG_LCD_HX8357 is not set -# CONFIG_LCD_OTM3225A is not set -CONFIG_BACKLIGHT_CLASS_DEVICE=y -# CONFIG_BACKLIGHT_KTD253 is not set -# CONFIG_BACKLIGHT_KTZ8866 is not set -CONFIG_BACKLIGHT_PWM=m -# CONFIG_BACKLIGHT_QCOM_WLED is not set -# CONFIG_BACKLIGHT_ADP8860 is not set -# CONFIG_BACKLIGHT_ADP8870 is not set -# CONFIG_BACKLIGHT_LM3630A is not set -# CONFIG_BACKLIGHT_LM3639 is not set -CONFIG_BACKLIGHT_LP855X=m -CONFIG_BACKLIGHT_GPIO=m -# CONFIG_BACKLIGHT_LV5207LP is not set -# CONFIG_BACKLIGHT_BD6107 is not set -# CONFIG_BACKLIGHT_ARCXCNN is not set -# CONFIG_BACKLIGHT_LED is not set -# end of Backlight & LCD device support - -CONFIG_HDMI=y - -# -# Console display driver support -# -CONFIG_DUMMY_CONSOLE=y -CONFIG_DUMMY_CONSOLE_COLUMNS=80 -CONFIG_DUMMY_CONSOLE_ROWS=25 -CONFIG_FRAMEBUFFER_CONSOLE=y -# CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION is not set -CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y -CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y -# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set -# end of Console display driver support - -CONFIG_LOGO=y -# CONFIG_LOGO_LINUX_MONO is not set -# CONFIG_LOGO_LINUX_VGA16 is not set -CONFIG_LOGO_LINUX_CLUT224=y -# end of Graphics support - -# CONFIG_DRM_ACCEL is not set -CONFIG_SOUND=m -# CONFIG_SND is not set -CONFIG_HID_SUPPORT=y -CONFIG_HID=y -CONFIG_HID_BATTERY_STRENGTH=y -CONFIG_HIDRAW=y -CONFIG_UHID=m -CONFIG_HID_GENERIC=y - -# -# Special HID drivers -# -CONFIG_HID_A4TECH=m -# CONFIG_HID_ACCUTOUCH is not set -CONFIG_HID_ACRUX=m -# CONFIG_HID_ACRUX_FF is not set -CONFIG_HID_APPLE=m -CONFIG_HID_APPLEIR=m -# CONFIG_HID_ASUS is not set -CONFIG_HID_AUREAL=m -CONFIG_HID_BELKIN=m -CONFIG_HID_BETOP_FF=m -# CONFIG_HID_BIGBEN_FF is not set -CONFIG_HID_CHERRY=m -CONFIG_HID_CHICONY=m -CONFIG_HID_CORSAIR=m -# CONFIG_HID_COUGAR is not set -# CONFIG_HID_MACALLY is not set -# CONFIG_HID_CMEDIA is not set -# CONFIG_HID_CP2112 is not set -# CONFIG_HID_CREATIVE_SB0540 is not set -CONFIG_HID_CYPRESS=m -CONFIG_HID_DRAGONRISE=m -# CONFIG_DRAGONRISE_FF is not set -# CONFIG_HID_EMS_FF is not set -CONFIG_HID_ELAN=m -CONFIG_HID_ELECOM=m -CONFIG_HID_ELO=m -# CONFIG_HID_EVISION is not set -CONFIG_HID_EZKEY=m -# CONFIG_HID_FT260 is not set -CONFIG_HID_GEMBIRD=m -CONFIG_HID_GFRM=m -# CONFIG_HID_GLORIOUS is not set -CONFIG_HID_HOLTEK=m -# CONFIG_HOLTEK_FF is not set -# CONFIG_HID_GOOGLE_STADIA_FF is not set -# CONFIG_HID_VIVALDI is not set -CONFIG_HID_GT683R=m -CONFIG_HID_KEYTOUCH=m -CONFIG_HID_KYE=m -CONFIG_HID_UCLOGIC=m -CONFIG_HID_WALTOP=m -# CONFIG_HID_VIEWSONIC is not set -# CONFIG_HID_VRC2 is not set -# CONFIG_HID_XIAOMI is not set -CONFIG_HID_GYRATION=m -CONFIG_HID_ICADE=m -CONFIG_HID_ITE=m -CONFIG_HID_JABRA=m -CONFIG_HID_TWINHAN=m -CONFIG_HID_KENSINGTON=m -CONFIG_HID_LCPOWER=m -CONFIG_HID_LED=m -CONFIG_HID_LENOVO=m -# CONFIG_HID_LETSKETCH is not set -CONFIG_HID_LOGITECH=m -CONFIG_HID_LOGITECH_DJ=m -CONFIG_HID_LOGITECH_HIDPP=m -# CONFIG_LOGITECH_FF is not set -# CONFIG_LOGIRUMBLEPAD2_FF is not set -# CONFIG_LOGIG940_FF is not set -# CONFIG_LOGIWHEELS_FF is not set -CONFIG_HID_MAGICMOUSE=y -# CONFIG_HID_MALTRON is not set -# CONFIG_HID_MAYFLASH is not set -# CONFIG_HID_MEGAWORLD_FF is not set -# CONFIG_HID_REDRAGON is not set -CONFIG_HID_MICROSOFT=m -CONFIG_HID_MONTEREY=m -CONFIG_HID_MULTITOUCH=m -# CONFIG_HID_NINTENDO is not set -CONFIG_HID_NTI=m -CONFIG_HID_NTRIG=y -CONFIG_HID_ORTEK=m -CONFIG_HID_PANTHERLORD=m -# CONFIG_PANTHERLORD_FF is not set -CONFIG_HID_PENMOUNT=m -CONFIG_HID_PETALYNX=m -CONFIG_HID_PICOLCD=m -CONFIG_HID_PICOLCD_FB=y -CONFIG_HID_PICOLCD_BACKLIGHT=y -CONFIG_HID_PICOLCD_LCD=y -CONFIG_HID_PICOLCD_LEDS=y -CONFIG_HID_PLANTRONICS=m -# CONFIG_HID_PXRC is not set -# CONFIG_HID_RAZER is not set -CONFIG_HID_PRIMAX=m -# CONFIG_HID_RETRODE is not set -CONFIG_HID_ROCCAT=m -CONFIG_HID_SAITEK=m -CONFIG_HID_SAMSUNG=m -# CONFIG_HID_SEMITEK is not set -# CONFIG_HID_SIGMAMICRO is not set -CONFIG_HID_SONY=m -CONFIG_SONY_FF=y -CONFIG_HID_SPEEDLINK=m -# CONFIG_HID_STEAM is not set -CONFIG_HID_STEELSERIES=m -CONFIG_HID_SUNPLUS=m -CONFIG_HID_RMI=m -CONFIG_HID_GREENASIA=m -# CONFIG_GREENASIA_FF is not set -CONFIG_HID_SMARTJOYPLUS=m -# CONFIG_SMARTJOYPLUS_FF is not set -CONFIG_HID_TIVO=m -CONFIG_HID_TOPSEED=m -# CONFIG_HID_TOPRE is not set -CONFIG_HID_THINGM=m -CONFIG_HID_THRUSTMASTER=m -# CONFIG_THRUSTMASTER_FF is not set -# CONFIG_HID_UDRAW_PS3 is not set -# CONFIG_HID_U2FZERO is not set -CONFIG_HID_WACOM=m -CONFIG_HID_WIIMOTE=m -CONFIG_HID_XINMO=m -CONFIG_HID_ZEROPLUS=m -# CONFIG_ZEROPLUS_FF is not set -CONFIG_HID_ZYDACRON=m -CONFIG_HID_SENSOR_HUB=m -# CONFIG_HID_SENSOR_CUSTOM_SENSOR is not set -# CONFIG_HID_ALPS is not set -# CONFIG_HID_MCP2221 is not set -# end of Special HID drivers - -# -# HID-BPF support -# -# CONFIG_HID_BPF is not set -# end of HID-BPF support - -# -# USB HID support -# -CONFIG_USB_HID=y -CONFIG_HID_PID=y -CONFIG_USB_HIDDEV=y -# end of USB HID support - -CONFIG_I2C_HID=m -# CONFIG_I2C_HID_ACPI is not set -# CONFIG_I2C_HID_OF is not set -# CONFIG_I2C_HID_OF_ELAN is not set -# CONFIG_I2C_HID_OF_GOODIX is not set -CONFIG_USB_OHCI_LITTLE_ENDIAN=y -CONFIG_USB_SUPPORT=y -CONFIG_USB_COMMON=y -CONFIG_USB_LED_TRIG=y -CONFIG_USB_ULPI_BUS=m -# CONFIG_USB_CONN_GPIO is not set -CONFIG_USB_ARCH_HAS_HCD=y -CONFIG_USB=y -CONFIG_USB_PCI=y -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y - -# -# Miscellaneous USB options -# -CONFIG_USB_DEFAULT_PERSIST=y -# CONFIG_USB_FEW_INIT_RETRIES is not set -# CONFIG_USB_DYNAMIC_MINORS is not set -# CONFIG_USB_OTG is not set -# CONFIG_USB_OTG_PRODUCTLIST is not set -CONFIG_USB_LEDS_TRIGGER_USBPORT=m -CONFIG_USB_AUTOSUSPEND_DELAY=2 -CONFIG_USB_MON=y - -# -# USB Host Controller Drivers -# -# CONFIG_USB_C67X00_HCD is not set -CONFIG_USB_XHCI_HCD=y -# CONFIG_USB_XHCI_DBGCAP is not set -CONFIG_USB_XHCI_PCI=y -# CONFIG_USB_XHCI_PCI_RENESAS is not set -CONFIG_USB_XHCI_PLATFORM=y -# CONFIG_USB_XHCI_HISTB is not set -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_EHCI_ROOT_HUB_TT=y -CONFIG_USB_EHCI_TT_NEWSCHED=y -CONFIG_USB_EHCI_PCI=y -# CONFIG_USB_EHCI_FSL is not set -CONFIG_USB_EHCI_HCD_PLATFORM=m -# CONFIG_USB_OXU210HP_HCD is not set -# CONFIG_USB_ISP116X_HCD is not set -# CONFIG_USB_MAX3421_HCD is not set -CONFIG_USB_OHCI_HCD=y -CONFIG_USB_OHCI_HCD_PCI=y -# CONFIG_USB_OHCI_HCD_PLATFORM is not set -CONFIG_USB_UHCI_HCD=y -# CONFIG_USB_SL811_HCD is not set -# CONFIG_USB_R8A66597_HCD is not set -# CONFIG_USB_HCD_BCMA is not set -# CONFIG_USB_HCD_TEST_MODE is not set - -# -# USB Device Class drivers -# -CONFIG_USB_ACM=m -CONFIG_USB_PRINTER=m -CONFIG_USB_WDM=m -CONFIG_USB_TMC=m - -# -# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may -# - -# -# also be needed; see USB_STORAGE Help for more info -# -CONFIG_USB_STORAGE=m -# CONFIG_USB_STORAGE_DEBUG is not set -CONFIG_USB_STORAGE_REALTEK=m -CONFIG_REALTEK_AUTOPM=y -CONFIG_USB_STORAGE_DATAFAB=m -CONFIG_USB_STORAGE_FREECOM=m -CONFIG_USB_STORAGE_ISD200=m -CONFIG_USB_STORAGE_USBAT=m -CONFIG_USB_STORAGE_SDDR09=m -CONFIG_USB_STORAGE_SDDR55=m -CONFIG_USB_STORAGE_JUMPSHOT=m -CONFIG_USB_STORAGE_ALAUDA=m -CONFIG_USB_STORAGE_ONETOUCH=m -CONFIG_USB_STORAGE_KARMA=m -CONFIG_USB_STORAGE_CYPRESS_ATACB=m -CONFIG_USB_STORAGE_ENE_UB6250=m -CONFIG_USB_UAS=m - -# -# USB Imaging devices -# -CONFIG_USB_MDC800=m -CONFIG_USB_MICROTEK=m -# CONFIG_USBIP_CORE is not set - -# -# USB dual-mode controller drivers -# -# CONFIG_USB_CDNS_SUPPORT is not set -# CONFIG_USB_MUSB_HDRC is not set -# CONFIG_USB_DWC3 is not set -# CONFIG_USB_DWC2 is not set -# CONFIG_USB_CHIPIDEA is not set -# CONFIG_USB_ISP1760 is not set - -# -# USB port drivers -# -CONFIG_USB_SERIAL=y -# CONFIG_USB_SERIAL_CONSOLE is not set -CONFIG_USB_SERIAL_GENERIC=y -CONFIG_USB_SERIAL_SIMPLE=m -CONFIG_USB_SERIAL_AIRCABLE=m -CONFIG_USB_SERIAL_ARK3116=m -CONFIG_USB_SERIAL_BELKIN=m -CONFIG_USB_SERIAL_CH341=m -CONFIG_USB_SERIAL_WHITEHEAT=m -CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m -CONFIG_USB_SERIAL_CP210X=m -CONFIG_USB_SERIAL_CYPRESS_M8=m -CONFIG_USB_SERIAL_EMPEG=m -CONFIG_USB_SERIAL_FTDI_SIO=m -CONFIG_USB_SERIAL_VISOR=m -CONFIG_USB_SERIAL_IPAQ=m -CONFIG_USB_SERIAL_IR=m -CONFIG_USB_SERIAL_EDGEPORT=m -CONFIG_USB_SERIAL_EDGEPORT_TI=m -# CONFIG_USB_SERIAL_F81232 is not set -CONFIG_USB_SERIAL_F8153X=m -CONFIG_USB_SERIAL_GARMIN=m -CONFIG_USB_SERIAL_IPW=m -CONFIG_USB_SERIAL_IUU=m -CONFIG_USB_SERIAL_KEYSPAN_PDA=m -CONFIG_USB_SERIAL_KEYSPAN=m -CONFIG_USB_SERIAL_KLSI=m -CONFIG_USB_SERIAL_KOBIL_SCT=m -CONFIG_USB_SERIAL_MCT_U232=m -# CONFIG_USB_SERIAL_METRO is not set -CONFIG_USB_SERIAL_MOS7720=m -CONFIG_USB_SERIAL_MOS7840=m -CONFIG_USB_SERIAL_MXUPORT=m -CONFIG_USB_SERIAL_NAVMAN=m -CONFIG_USB_SERIAL_PL2303=m -CONFIG_USB_SERIAL_OTI6858=m -CONFIG_USB_SERIAL_QCAUX=m -CONFIG_USB_SERIAL_QUALCOMM=m -CONFIG_USB_SERIAL_SPCP8X5=m -CONFIG_USB_SERIAL_SAFE=m -CONFIG_USB_SERIAL_SAFE_PADDED=y -CONFIG_USB_SERIAL_SIERRAWIRELESS=m -CONFIG_USB_SERIAL_SYMBOL=m -CONFIG_USB_SERIAL_TI=m -CONFIG_USB_SERIAL_CYBERJACK=m -CONFIG_USB_SERIAL_WWAN=m -CONFIG_USB_SERIAL_OPTION=m -CONFIG_USB_SERIAL_OMNINET=m -CONFIG_USB_SERIAL_OPTICON=m -CONFIG_USB_SERIAL_XSENS_MT=m -# CONFIG_USB_SERIAL_WISHBONE is not set -CONFIG_USB_SERIAL_SSU100=m -CONFIG_USB_SERIAL_QT2=m -CONFIG_USB_SERIAL_UPD78F0730=m -# CONFIG_USB_SERIAL_XR is not set -CONFIG_USB_SERIAL_DEBUG=m - -# -# USB Miscellaneous drivers -# -CONFIG_USB_EMI62=m -CONFIG_USB_EMI26=m -CONFIG_USB_ADUTUX=m -CONFIG_USB_SEVSEG=m -CONFIG_USB_LEGOTOWER=m -CONFIG_USB_LCD=m -# CONFIG_USB_CYPRESS_CY7C63 is not set -# CONFIG_USB_CYTHERM is not set -CONFIG_USB_IDMOUSE=m -CONFIG_USB_APPLEDISPLAY=m -# CONFIG_USB_QCOM_EUD is not set -# CONFIG_APPLE_MFI_FASTCHARGE is not set -CONFIG_USB_SISUSBVGA=m -CONFIG_USB_LD=m -# CONFIG_USB_TRANCEVIBRATOR is not set -CONFIG_USB_IOWARRIOR=m -# CONFIG_USB_TEST is not set -# CONFIG_USB_EHSET_TEST_FIXTURE is not set -CONFIG_USB_ISIGHTFW=m -# CONFIG_USB_YUREX is not set -CONFIG_USB_EZUSB_FX2=m -# CONFIG_USB_HUB_USB251XB is not set -CONFIG_USB_HSIC_USB3503=m -# CONFIG_USB_HSIC_USB4604 is not set -# CONFIG_USB_LINK_LAYER_TEST is not set -CONFIG_USB_CHAOSKEY=m -# CONFIG_USB_ONBOARD_HUB is not set -CONFIG_USB_ATM=m -# CONFIG_USB_SPEEDTOUCH is not set -CONFIG_USB_CXACRU=m -CONFIG_USB_UEAGLEATM=m -CONFIG_USB_XUSBATM=m - -# -# USB Physical Layer drivers -# -# CONFIG_NOP_USB_XCEIV is not set -# CONFIG_USB_GPIO_VBUS is not set -# CONFIG_USB_ISP1301 is not set -# CONFIG_USB_ULPI is not set -# end of USB Physical Layer drivers - -# CONFIG_USB_GADGET is not set -CONFIG_TYPEC=y -CONFIG_TYPEC_TCPM=y -# CONFIG_TYPEC_TCPCI is not set -# CONFIG_TYPEC_FUSB302 is not set -# CONFIG_TYPEC_QCOM_PMIC is not set -CONFIG_TYPEC_UCSI=y -# CONFIG_UCSI_CCG is not set -CONFIG_UCSI_ACPI=y -# CONFIG_UCSI_STM32G0 is not set -CONFIG_TYPEC_TPS6598X=m -# CONFIG_TYPEC_ANX7411 is not set -# CONFIG_TYPEC_RT1719 is not set -# CONFIG_TYPEC_HD3SS3220 is not set -# CONFIG_TYPEC_STUSB160X is not set -# CONFIG_TYPEC_WUSB3801 is not set - -# -# USB Type-C Multiplexer/DeMultiplexer Switch support -# -# CONFIG_TYPEC_MUX_FSA4480 is not set -# CONFIG_TYPEC_MUX_GPIO_SBU is not set -CONFIG_TYPEC_MUX_PI3USB30532=m -# CONFIG_TYPEC_MUX_NB7VPQ904M is not set -# end of USB Type-C Multiplexer/DeMultiplexer Switch support - -# -# USB Type-C Alternate Mode drivers -# -CONFIG_TYPEC_DP_ALTMODE=m -# CONFIG_TYPEC_NVIDIA_ALTMODE is not set -# end of USB Type-C Alternate Mode drivers - -CONFIG_USB_ROLE_SWITCH=y -CONFIG_MMC=m -# CONFIG_PWRSEQ_EMMC is not set -# CONFIG_PWRSEQ_SIMPLE is not set -CONFIG_MMC_BLOCK=m -CONFIG_MMC_BLOCK_MINORS=8 -CONFIG_SDIO_UART=m -# CONFIG_MMC_TEST is not set - -# -# MMC/SD/SDIO Host Controller Drivers -# -# CONFIG_MMC_DEBUG is not set -CONFIG_MMC_ARMMMCI=m -CONFIG_MMC_STM32_SDMMC=y -CONFIG_MMC_SDHCI=m -CONFIG_MMC_SDHCI_IO_ACCESSORS=y -CONFIG_MMC_SDHCI_PCI=m -CONFIG_MMC_RICOH_MMC=y -CONFIG_MMC_SDHCI_ACPI=m -CONFIG_MMC_SDHCI_PLTFM=m -# CONFIG_MMC_SDHCI_OF_ARASAN is not set -# CONFIG_MMC_SDHCI_OF_AT91 is not set -# CONFIG_MMC_SDHCI_OF_DWCMSHC is not set -# CONFIG_MMC_SDHCI_CADENCE is not set -# CONFIG_MMC_SDHCI_F_SDH30 is not set -# CONFIG_MMC_SDHCI_MILBEAUT is not set -# CONFIG_MMC_SDHCI_MSM is not set -CONFIG_MMC_TIFM_SD=m -# CONFIG_MMC_SPI is not set -CONFIG_MMC_CB710=m -CONFIG_MMC_VIA_SDMMC=m -CONFIG_MMC_DW=m -CONFIG_MMC_DW_PLTFM=m -CONFIG_MMC_DW_BLUEFIELD=m -# CONFIG_MMC_DW_EXYNOS is not set -# CONFIG_MMC_DW_HI3798CV200 is not set -# CONFIG_MMC_DW_K3 is not set -# CONFIG_MMC_DW_PCI is not set -CONFIG_MMC_VUB300=m -CONFIG_MMC_USHC=m -# CONFIG_MMC_USDHI6ROL0 is not set -CONFIG_MMC_CQHCI=m -# CONFIG_MMC_HSQ is not set -CONFIG_MMC_TOSHIBA_PCI=m -CONFIG_MMC_MTK=m -# CONFIG_MMC_SDHCI_XENON is not set -# CONFIG_SCSI_UFSHCD is not set -CONFIG_MEMSTICK=m -# CONFIG_MEMSTICK_DEBUG is not set - -# -# MemoryStick drivers -# -# CONFIG_MEMSTICK_UNSAFE_RESUME is not set -CONFIG_MSPRO_BLOCK=m -# CONFIG_MS_BLOCK is not set - -# -# MemoryStick Host Controller Drivers -# -CONFIG_MEMSTICK_TIFM_MS=m -CONFIG_MEMSTICK_JMICRON_38X=m -CONFIG_MEMSTICK_R592=m -CONFIG_NEW_LEDS=y -CONFIG_LEDS_CLASS=y -CONFIG_LEDS_CLASS_FLASH=m -# CONFIG_LEDS_CLASS_MULTICOLOR is not set -# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set - -# -# LED drivers -# -# CONFIG_LEDS_AN30259A is not set -# CONFIG_LEDS_AW200XX is not set -# CONFIG_LEDS_AW2013 is not set -# CONFIG_LEDS_BCM6328 is not set -# CONFIG_LEDS_BCM6358 is not set -# CONFIG_LEDS_CR0014114 is not set -# CONFIG_LEDS_EL15203000 is not set -CONFIG_LEDS_LM3530=m -# CONFIG_LEDS_LM3532 is not set -# CONFIG_LEDS_LM3642 is not set -# CONFIG_LEDS_LM3692X is not set -# CONFIG_LEDS_PCA9532 is not set -# CONFIG_LEDS_GPIO is not set -CONFIG_LEDS_LP3944=m -# CONFIG_LEDS_LP3952 is not set -# CONFIG_LEDS_LP50XX is not set -# CONFIG_LEDS_LP55XX_COMMON is not set -# CONFIG_LEDS_LP8860 is not set -# CONFIG_LEDS_PCA955X is not set -# CONFIG_LEDS_PCA963X is not set -# CONFIG_LEDS_PCA995X is not set -# CONFIG_LEDS_DAC124S085 is not set -# CONFIG_LEDS_PWM is not set -# CONFIG_LEDS_REGULATOR is not set -# CONFIG_LEDS_BD2606MVV is not set -# CONFIG_LEDS_BD2802 is not set -CONFIG_LEDS_LT3593=m -# CONFIG_LEDS_TCA6507 is not set -# CONFIG_LEDS_TLC591XX is not set -# CONFIG_LEDS_LM355x is not set -# CONFIG_LEDS_IS31FL319X is not set -# CONFIG_LEDS_IS31FL32XX is not set - -# -# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) -# -CONFIG_LEDS_BLINKM=m -# CONFIG_LEDS_SYSCON is not set -# CONFIG_LEDS_MLXREG is not set -# CONFIG_LEDS_USER is not set -# CONFIG_LEDS_SPI_BYTE is not set -# CONFIG_LEDS_LM3697 is not set - -# -# Flash and Torch LED drivers -# -# CONFIG_LEDS_AAT1290 is not set -# CONFIG_LEDS_AS3645A is not set -# CONFIG_LEDS_KTD2692 is not set -# CONFIG_LEDS_LM3601X is not set -# CONFIG_LEDS_RT4505 is not set -# CONFIG_LEDS_RT8515 is not set -# CONFIG_LEDS_SGM3140 is not set - -# -# RGB LED drivers -# - -# -# LED Triggers -# -CONFIG_LEDS_TRIGGERS=y -CONFIG_LEDS_TRIGGER_TIMER=m -CONFIG_LEDS_TRIGGER_ONESHOT=m -# CONFIG_LEDS_TRIGGER_DISK is not set -# CONFIG_LEDS_TRIGGER_MTD is not set -CONFIG_LEDS_TRIGGER_HEARTBEAT=m -CONFIG_LEDS_TRIGGER_BACKLIGHT=m -# CONFIG_LEDS_TRIGGER_CPU is not set -# CONFIG_LEDS_TRIGGER_ACTIVITY is not set -CONFIG_LEDS_TRIGGER_DEFAULT_ON=m - -# -# iptables trigger is under Netfilter config (LED target) -# -CONFIG_LEDS_TRIGGER_TRANSIENT=m -CONFIG_LEDS_TRIGGER_CAMERA=m -# CONFIG_LEDS_TRIGGER_PANIC is not set -# CONFIG_LEDS_TRIGGER_NETDEV is not set -# CONFIG_LEDS_TRIGGER_PATTERN is not set -# CONFIG_LEDS_TRIGGER_AUDIO is not set -# CONFIG_LEDS_TRIGGER_TTY is not set - -# -# Simple LED drivers -# -# CONFIG_ACCESSIBILITY is not set -CONFIG_INFINIBAND=m -CONFIG_INFINIBAND_USER_MAD=m -CONFIG_INFINIBAND_USER_ACCESS=m -CONFIG_INFINIBAND_USER_MEM=y -CONFIG_INFINIBAND_ON_DEMAND_PAGING=y -CONFIG_INFINIBAND_ADDR_TRANS=y -CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y -CONFIG_INFINIBAND_VIRT_DMA=y -CONFIG_INFINIBAND_BNXT_RE=m -CONFIG_INFINIBAND_CXGB4=m -# CONFIG_INFINIBAND_EFA is not set -CONFIG_INFINIBAND_ERDMA=m -CONFIG_INFINIBAND_HNS=m -CONFIG_INFINIBAND_HNS_HIP08=y -# CONFIG_INFINIBAND_IRDMA is not set -CONFIG_MLX4_INFINIBAND=m -CONFIG_MLX5_INFINIBAND=m -# CONFIG_INFINIBAND_MTHCA is not set -# CONFIG_INFINIBAND_OCRDMA is not set -CONFIG_INFINIBAND_QEDR=m -CONFIG_RDMA_RXE=m -CONFIG_RDMA_SIW=m -CONFIG_INFINIBAND_IPOIB=m -CONFIG_INFINIBAND_IPOIB_CM=y -CONFIG_INFINIBAND_IPOIB_DEBUG=y -# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set -CONFIG_INFINIBAND_SRP=m -CONFIG_INFINIBAND_SRPT=m -CONFIG_INFINIBAND_ISER=m -CONFIG_INFINIBAND_ISERT=m -# CONFIG_INFINIBAND_RTRS_CLIENT is not set -# CONFIG_INFINIBAND_RTRS_SERVER is not set -CONFIG_EDAC_SUPPORT=y -CONFIG_EDAC=y -CONFIG_EDAC_LEGACY_SYSFS=y -# CONFIG_EDAC_DEBUG is not set -CONFIG_EDAC_GHES=y -CONFIG_EDAC_THUNDERX=m -CONFIG_EDAC_XGENE=m -# CONFIG_EDAC_DMC520 is not set -CONFIG_RTC_LIB=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_HCTOSYS=y -CONFIG_RTC_HCTOSYS_DEVICE="rtc0" -CONFIG_RTC_SYSTOHC=y -CONFIG_RTC_SYSTOHC_DEVICE="rtc0" -# CONFIG_RTC_DEBUG is not set -CONFIG_RTC_NVMEM=y - -# -# RTC interfaces -# -CONFIG_RTC_INTF_SYSFS=y -CONFIG_RTC_INTF_PROC=y -CONFIG_RTC_INTF_DEV=y -# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set -# CONFIG_RTC_DRV_TEST is not set - -# -# I2C RTC drivers -# -CONFIG_RTC_DRV_ABB5ZES3=m -# CONFIG_RTC_DRV_ABEOZ9 is not set -CONFIG_RTC_DRV_ABX80X=m -CONFIG_RTC_DRV_DS1307=m -# CONFIG_RTC_DRV_DS1307_CENTURY is not set -CONFIG_RTC_DRV_DS1374=m -CONFIG_RTC_DRV_DS1374_WDT=y -CONFIG_RTC_DRV_DS1672=m -# CONFIG_RTC_DRV_HYM8563 is not set -CONFIG_RTC_DRV_MAX6900=m -# CONFIG_RTC_DRV_NCT3018Y is not set -CONFIG_RTC_DRV_RS5C372=m -CONFIG_RTC_DRV_ISL1208=m -CONFIG_RTC_DRV_ISL12022=m -# CONFIG_RTC_DRV_ISL12026 is not set -CONFIG_RTC_DRV_X1205=m -CONFIG_RTC_DRV_PCF8523=m -CONFIG_RTC_DRV_PCF85063=m -# CONFIG_RTC_DRV_PCF85363 is not set -CONFIG_RTC_DRV_PCF8563=m -CONFIG_RTC_DRV_PCF8583=m -CONFIG_RTC_DRV_M41T80=m -CONFIG_RTC_DRV_M41T80_WDT=y -CONFIG_RTC_DRV_BQ32K=m -# CONFIG_RTC_DRV_S35390A is not set -CONFIG_RTC_DRV_FM3130=m -CONFIG_RTC_DRV_RX8010=m -CONFIG_RTC_DRV_RX8581=m -CONFIG_RTC_DRV_RX8025=m -CONFIG_RTC_DRV_EM3027=m -# CONFIG_RTC_DRV_RV3028 is not set -# CONFIG_RTC_DRV_RV3032 is not set -# CONFIG_RTC_DRV_RV8803 is not set -# CONFIG_RTC_DRV_SD3078 is not set - -# -# SPI RTC drivers -# -CONFIG_RTC_DRV_M41T93=m -CONFIG_RTC_DRV_M41T94=m -# CONFIG_RTC_DRV_DS1302 is not set -CONFIG_RTC_DRV_DS1305=m -CONFIG_RTC_DRV_DS1343=m -CONFIG_RTC_DRV_DS1347=m -CONFIG_RTC_DRV_DS1390=m -# CONFIG_RTC_DRV_MAX6916 is not set -CONFIG_RTC_DRV_R9701=m -CONFIG_RTC_DRV_RX4581=m -CONFIG_RTC_DRV_RS5C348=m -CONFIG_RTC_DRV_MAX6902=m -CONFIG_RTC_DRV_PCF2123=m -CONFIG_RTC_DRV_MCP795=m -CONFIG_RTC_I2C_AND_SPI=y - -# -# SPI and I2C RTC drivers -# -CONFIG_RTC_DRV_DS3232=m -CONFIG_RTC_DRV_DS3232_HWMON=y -CONFIG_RTC_DRV_PCF2127=m -CONFIG_RTC_DRV_RV3029C2=m -# CONFIG_RTC_DRV_RV3029_HWMON is not set -# CONFIG_RTC_DRV_RX6110 is not set - -# -# Platform RTC drivers -# -CONFIG_RTC_DRV_DS1286=m -CONFIG_RTC_DRV_DS1511=m -CONFIG_RTC_DRV_DS1553=m -CONFIG_RTC_DRV_DS1685_FAMILY=m -CONFIG_RTC_DRV_DS1685=y -# CONFIG_RTC_DRV_DS1689 is not set -# CONFIG_RTC_DRV_DS17285 is not set -# CONFIG_RTC_DRV_DS17485 is not set -# CONFIG_RTC_DRV_DS17885 is not set -CONFIG_RTC_DRV_DS1742=m -CONFIG_RTC_DRV_DS2404=m -CONFIG_RTC_DRV_EFI=y -CONFIG_RTC_DRV_STK17TA8=m -# CONFIG_RTC_DRV_M48T86 is not set -CONFIG_RTC_DRV_M48T35=m -CONFIG_RTC_DRV_M48T59=m -CONFIG_RTC_DRV_MSM6242=m -CONFIG_RTC_DRV_RP5C01=m -# CONFIG_RTC_DRV_ZYNQMP is not set - -# -# on-CPU RTC drivers -# -# CONFIG_RTC_DRV_PL030 is not set -CONFIG_RTC_DRV_PL031=y -# CONFIG_RTC_DRV_CADENCE is not set -# CONFIG_RTC_DRV_FTRTC010 is not set -# CONFIG_RTC_DRV_XGENE is not set -# CONFIG_RTC_DRV_R7301 is not set - -# -# HID Sensor RTC drivers -# -# CONFIG_RTC_DRV_GOLDFISH is not set -CONFIG_DMADEVICES=y -# CONFIG_DMADEVICES_DEBUG is not set - -# -# DMA Devices -# -CONFIG_DMA_ENGINE=y -CONFIG_DMA_ACPI=y -CONFIG_DMA_OF=y -# CONFIG_ALTERA_MSGDMA is not set -# CONFIG_AMBA_PL08X is not set -# CONFIG_BCM_SBA_RAID is not set -# CONFIG_DW_AXI_DMAC is not set -# CONFIG_FSL_EDMA is not set -# CONFIG_FSL_QDMA is not set -# CONFIG_HISI_DMA is not set -# CONFIG_INTEL_IDMA64 is not set -# CONFIG_K3_DMA is not set -# CONFIG_MV_XOR_V2 is not set -# CONFIG_PL330_DMA is not set -# CONFIG_PLX_DMA is not set -# CONFIG_XGENE_DMA is not set -# CONFIG_XILINX_DMA is not set -# CONFIG_XILINX_XDMA is not set -# CONFIG_XILINX_ZYNQMP_DMA is not set -# CONFIG_XILINX_ZYNQMP_DPDMA is not set -# CONFIG_QCOM_BAM_DMA is not set -# CONFIG_QCOM_GPI_DMA is not set -CONFIG_QCOM_HIDMA_MGMT=m -CONFIG_QCOM_HIDMA=m -CONFIG_DW_DMAC_CORE=m -CONFIG_DW_DMAC=m -CONFIG_DW_DMAC_PCI=m -# CONFIG_DW_EDMA is not set -# CONFIG_SF_PDMA is not set - -# -# DMA Clients -# -CONFIG_ASYNC_TX_DMA=y -CONFIG_DMATEST=m -CONFIG_DMA_ENGINE_RAID=y - -# -# DMABUF options -# -CONFIG_SYNC_FILE=y -# CONFIG_SW_SYNC is not set -# CONFIG_UDMABUF is not set -# CONFIG_DMABUF_MOVE_NOTIFY is not set -# CONFIG_DMABUF_DEBUG is not set -# CONFIG_DMABUF_SELFTESTS is not set -# CONFIG_DMABUF_HEAPS is not set -# CONFIG_DMABUF_SYSFS_STATS is not set -# end of DMABUF options - -CONFIG_UIO=m -CONFIG_UIO_CIF=m -CONFIG_UIO_PDRV_GENIRQ=m -# CONFIG_UIO_DMEM_GENIRQ is not set -CONFIG_UIO_AEC=m -CONFIG_UIO_SERCOS3=m -CONFIG_UIO_PCI_GENERIC=m -# CONFIG_UIO_NETX is not set -# CONFIG_UIO_PRUSS is not set -# CONFIG_UIO_MF624 is not set -CONFIG_VFIO=m -CONFIG_VFIO_GROUP=y -CONFIG_VFIO_CONTAINER=y -CONFIG_VFIO_IOMMU_TYPE1=m -CONFIG_VFIO_NOIOMMU=y -CONFIG_VFIO_VIRQFD=y - -# -# VFIO support for PCI devices -# -CONFIG_VFIO_PCI_CORE=m -CONFIG_VFIO_PCI_MMAP=y -CONFIG_VFIO_PCI_INTX=y -CONFIG_VFIO_PCI=m -# CONFIG_MLX5_VFIO_PCI is not set -# CONFIG_HISI_ACC_VFIO_PCI is not set -# end of VFIO support for PCI devices - -# -# VFIO support for platform devices -# -CONFIG_VFIO_PLATFORM_BASE=m -CONFIG_VFIO_PLATFORM=m -# CONFIG_VFIO_AMBA is not set - -# -# VFIO platform reset drivers -# -# CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET is not set -# CONFIG_VFIO_PLATFORM_AMDXGBE_RESET is not set -# end of VFIO platform reset drivers -# end of VFIO support for platform devices - -# CONFIG_VIRT_DRIVERS is not set -CONFIG_VIRTIO_ANCHOR=y -CONFIG_VIRTIO=m -CONFIG_VIRTIO_PCI_LIB=m -CONFIG_VIRTIO_PCI_LIB_LEGACY=m -CONFIG_VIRTIO_MENU=y -CONFIG_VIRTIO_PCI=m -CONFIG_VIRTIO_PCI_LEGACY=y -CONFIG_VIRTIO_PMEM=m -CONFIG_VIRTIO_BALLOON=m -CONFIG_VIRTIO_MEM=m -CONFIG_VIRTIO_INPUT=m -CONFIG_VIRTIO_MMIO=m -# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set -CONFIG_VIRTIO_DMA_SHARED_BUFFER=m -# CONFIG_VDPA is not set -CONFIG_VHOST_IOTLB=m -CONFIG_VHOST_TASK=y -CONFIG_VHOST=m -CONFIG_VHOST_MENU=y -CONFIG_VHOST_NET=m -CONFIG_VHOST_SCSI=m -CONFIG_VHOST_VSOCK=m -# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set - -# -# Microsoft Hyper-V guest support -# -# CONFIG_HYPERV is not set -# end of Microsoft Hyper-V guest support - -# CONFIG_GREYBUS is not set -# CONFIG_COMEDI is not set -CONFIG_STAGING=y -# CONFIG_RTS5208 is not set -# CONFIG_VT6655 is not set -# CONFIG_FB_SM750 is not set -# CONFIG_STAGING_MEDIA is not set -# CONFIG_STAGING_BOARD is not set -# CONFIG_LTE_GDM724X is not set -# CONFIG_FB_TFT is not set -# CONFIG_KS7010 is not set -# CONFIG_PI433 is not set -# CONFIG_XIL_AXIS_FIFO is not set -# CONFIG_FIELDBUS_DEV is not set -# CONFIG_QLGE is not set -# CONFIG_VME_BUS is not set -# CONFIG_GOLDFISH is not set -CONFIG_CHROME_PLATFORMS=y -# CONFIG_CHROMEOS_ACPI is not set -# CONFIG_CHROMEOS_TBMC is not set -# CONFIG_CROS_EC is not set -# CONFIG_CROS_KBD_LED_BACKLIGHT is not set -# CONFIG_CROS_HPS_I2C is not set -# CONFIG_CHROMEOS_PRIVACY_SCREEN is not set -# CONFIG_MELLANOX_PLATFORM is not set -CONFIG_ARM_CPU_RESCTRL=y -CONFIG_SURFACE_PLATFORMS=y -# CONFIG_SURFACE_3_POWER_OPREGION is not set -# CONFIG_SURFACE_GPE is not set -# CONFIG_SURFACE_HOTPLUG is not set -# CONFIG_SURFACE_PRO3_BUTTON is not set -CONFIG_HAVE_CLK=y -CONFIG_HAVE_CLK_PREPARE=y -CONFIG_COMMON_CLK=y - -# -# Clock driver for ARM Reference designs -# -# CONFIG_CLK_ICST is not set -CONFIG_CLK_SP810=y -CONFIG_CLK_VEXPRESS_OSC=y -# end of Clock driver for ARM Reference designs - -# CONFIG_LMK04832 is not set -# CONFIG_COMMON_CLK_MAX9485 is not set -CONFIG_COMMON_CLK_SCPI=m -# CONFIG_COMMON_CLK_SI5341 is not set -# CONFIG_COMMON_CLK_SI5351 is not set -# CONFIG_COMMON_CLK_SI514 is not set -# CONFIG_COMMON_CLK_SI544 is not set -# CONFIG_COMMON_CLK_SI570 is not set -# CONFIG_COMMON_CLK_CDCE706 is not set -# CONFIG_COMMON_CLK_CDCE925 is not set -# CONFIG_COMMON_CLK_CS2000_CP is not set -# CONFIG_COMMON_CLK_AXI_CLKGEN is not set -CONFIG_COMMON_CLK_XGENE=y -# CONFIG_COMMON_CLK_PWM is not set -# CONFIG_COMMON_CLK_RS9_PCIE is not set -# CONFIG_COMMON_CLK_SI521XX is not set -# CONFIG_COMMON_CLK_VC3 is not set -# CONFIG_COMMON_CLK_VC5 is not set -# CONFIG_COMMON_CLK_VC7 is not set -# CONFIG_COMMON_CLK_FIXED_MMIO is not set -CONFIG_COMMON_CLK_HI3516CV300=y -CONFIG_COMMON_CLK_HI3519=y -CONFIG_COMMON_CLK_HI3559A=y -CONFIG_COMMON_CLK_HI3660=y -CONFIG_COMMON_CLK_HI3670=y -CONFIG_COMMON_CLK_HI3798CV200=y -# CONFIG_COMMON_CLK_HI6220 is not set -CONFIG_RESET_HISI=y -CONFIG_STUB_CLK_HI3660=y -# CONFIG_COMMON_CLK_QCOM is not set -# CONFIG_XILINX_VCU is not set -# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set -CONFIG_HWSPINLOCK=y -# CONFIG_HWSPINLOCK_QCOM is not set - -# -# Clock Source drivers -# -CONFIG_TIMER_OF=y -CONFIG_TIMER_ACPI=y -CONFIG_TIMER_PROBE=y -CONFIG_CLKSRC_MMIO=y -CONFIG_ARM_ARCH_TIMER=y -CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y -CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND=y -CONFIG_FSL_ERRATUM_A008585=y -CONFIG_HISILICON_ERRATUM_161010101=y -CONFIG_ARM64_ERRATUM_858921=y -CONFIG_ARM_TIMER_SP804=y -# end of Clock Source drivers - -CONFIG_MAILBOX=y -CONFIG_ARM_MHU=m -# CONFIG_ARM_MHU_V2 is not set -# CONFIG_PLATFORM_MHU is not set -# CONFIG_PL320_MBOX is not set -CONFIG_PCC=y -# CONFIG_ALTERA_MBOX is not set -CONFIG_HI3660_MBOX=y -CONFIG_HI6220_MBOX=y -# CONFIG_MAILBOX_TEST is not set -# CONFIG_QCOM_APCS_IPC is not set -CONFIG_XGENE_SLIMPRO_MBOX=m -# CONFIG_QCOM_IPCC is not set -CONFIG_IOMMU_IOVA=y -CONFIG_IOMMU_API=y -CONFIG_IOMMU_SUPPORT=y - -# -# Generic IOMMU Pagetable Support -# -CONFIG_IOMMU_IO_PGTABLE=y -CONFIG_IOMMU_IO_PGTABLE_LPAE=y -# CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set -# CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set -# CONFIG_IOMMU_IO_PGTABLE_DART is not set -# end of Generic IOMMU Pagetable Support - -# CONFIG_IOMMU_DEBUGFS is not set -CONFIG_IOMMU_DEFAULT_DMA_STRICT=y -# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set -# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set -CONFIG_OF_IOMMU=y -CONFIG_IOMMU_DMA=y -CONFIG_IOMMU_SVA=y -# CONFIG_IOMMUFD is not set -CONFIG_ARM_SMMU=y -# CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS is not set -CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT=y -CONFIG_ARM_SMMU_QCOM=y -# CONFIG_ARM_SMMU_QCOM_DEBUG is not set -CONFIG_ARM_SMMU_V3=y -CONFIG_ARM_SMMU_V3_SVA=y -# CONFIG_QCOM_IOMMU is not set -# CONFIG_VIRTIO_IOMMU is not set - -# -# Remoteproc drivers -# -# CONFIG_REMOTEPROC is not set -# end of Remoteproc drivers - -# -# Rpmsg drivers -# -# CONFIG_RPMSG_QCOM_GLINK_RPM is not set -# CONFIG_RPMSG_VIRTIO is not set -# end of Rpmsg drivers - -# CONFIG_SOUNDWIRE is not set - -# -# SOC (System On Chip) specific Drivers -# - -# -# Amlogic SoC drivers -# -# end of Amlogic SoC drivers - -# -# Broadcom SoC drivers -# -# CONFIG_SOC_BRCMSTB is not set -# end of Broadcom SoC drivers - -# -# NXP/Freescale QorIQ SoC drivers -# -# CONFIG_QUICC_ENGINE is not set -# CONFIG_FSL_RCPM is not set -# end of NXP/Freescale QorIQ SoC drivers - -# -# fujitsu SoC drivers -# -# CONFIG_A64FX_DIAG is not set -# end of fujitsu SoC drivers - -# -# Hisilicon SoC drivers -# -CONFIG_KUNPENG_HCCS=m -# end of Hisilicon SoC drivers - -# -# i.MX SoC drivers -# -# end of i.MX SoC drivers - -# -# Enable LiteX SoC Builder specific drivers -# -# CONFIG_LITEX_SOC_CONTROLLER is not set -# end of Enable LiteX SoC Builder specific drivers - -# CONFIG_WPCM450_SOC is not set - -# -# Qualcomm SoC drivers -# -# CONFIG_QCOM_AOSS_QMP is not set -# CONFIG_QCOM_COMMAND_DB is not set -# CONFIG_QCOM_CPR is not set -# CONFIG_QCOM_GENI_SE is not set -# CONFIG_QCOM_GSBI is not set -# CONFIG_QCOM_LLCC is not set -CONFIG_QCOM_KRYO_L2_ACCESSORS=y -# CONFIG_QCOM_OCMEM is not set -# CONFIG_QCOM_RAMP_CTRL is not set -# CONFIG_QCOM_RMTFS_MEM is not set -# CONFIG_QCOM_RPM_MASTER_STATS is not set -# CONFIG_QCOM_RPMH is not set -# CONFIG_QCOM_SMEM is not set -# CONFIG_QCOM_SPM is not set -# CONFIG_QCOM_ICC_BWMON is not set -# end of Qualcomm SoC drivers - -# CONFIG_SOC_TI is not set - -# -# Xilinx SoC drivers -# -# end of Xilinx SoC drivers -# end of SOC (System On Chip) specific Drivers - -# CONFIG_PM_DEVFREQ is not set -CONFIG_EXTCON=y - -# -# Extcon Device Drivers -# -# CONFIG_EXTCON_FSA9480 is not set -CONFIG_EXTCON_GPIO=m -# CONFIG_EXTCON_MAX3355 is not set -# CONFIG_EXTCON_PTN5150 is not set -# CONFIG_EXTCON_QCOM_SPMI_MISC is not set -# CONFIG_EXTCON_RT8973A is not set -# CONFIG_EXTCON_SM5502 is not set -# CONFIG_EXTCON_USB_GPIO is not set -# CONFIG_EXTCON_USBC_TUSB320 is not set -# CONFIG_MEMORY is not set -# CONFIG_IIO is not set -CONFIG_NTB=m -# CONFIG_NTB_MSI is not set -# CONFIG_NTB_IDT is not set -# CONFIG_NTB_EPF is not set -# CONFIG_NTB_SWITCHTEC is not set -# CONFIG_NTB_PINGPONG is not set -# CONFIG_NTB_TOOL is not set -# CONFIG_NTB_PERF is not set -# CONFIG_NTB_TRANSPORT is not set -CONFIG_PWM=y -CONFIG_PWM_SYSFS=y -# CONFIG_PWM_DEBUG is not set -# CONFIG_PWM_ATMEL_TCB is not set -# CONFIG_PWM_CLK is not set -# CONFIG_PWM_DWC is not set -# CONFIG_PWM_FSL_FTM is not set -# CONFIG_PWM_HIBVT is not set -# CONFIG_PWM_PCA9685 is not set -# CONFIG_PWM_XILINX is not set - -# -# IRQ chip support -# -CONFIG_IRQCHIP=y -CONFIG_ARM_GIC=y -CONFIG_ARM_GIC_MAX_NR=1 -CONFIG_ARM_GIC_V2M=y -CONFIG_ARM_GIC_V3=y -CONFIG_ARM_GIC_V3_ITS=y -CONFIG_ARM_GIC_V3_ITS_PCI=y -CONFIG_ARM_GIC_PHYTIUM_2500=y -# CONFIG_AL_FIC is not set -CONFIG_HISILICON_IRQ_MBIGEN=y -# CONFIG_XILINX_INTC is not set -CONFIG_PARTITION_PERCPU=y -CONFIG_QCOM_IRQ_COMBINER=y -# CONFIG_QCOM_PDC is not set -# CONFIG_QCOM_MPM is not set -# end of IRQ chip support - -# CONFIG_IPACK_BUS is not set -CONFIG_RESET_CONTROLLER=y -# CONFIG_RESET_QCOM_AOSS is not set -# CONFIG_RESET_QCOM_PDC is not set -# CONFIG_RESET_TI_SYSCON is not set -# CONFIG_RESET_TI_TPS380X is not set -# CONFIG_COMMON_RESET_HI3660 is not set -CONFIG_COMMON_RESET_HI6220=m - -# -# PHY Subsystem -# -CONFIG_GENERIC_PHY=y -CONFIG_PHY_XGENE=y -# CONFIG_PHY_CAN_TRANSCEIVER is not set - -# -# PHY drivers for Broadcom platforms -# -# CONFIG_BCM_KONA_USB2_PHY is not set -# end of PHY drivers for Broadcom platforms - -# CONFIG_PHY_CADENCE_TORRENT is not set -# CONFIG_PHY_CADENCE_DPHY is not set -# CONFIG_PHY_CADENCE_DPHY_RX is not set -# CONFIG_PHY_CADENCE_SIERRA is not set -# CONFIG_PHY_CADENCE_SALVO is not set -CONFIG_PHY_HI6220_USB=m -# CONFIG_PHY_HI3660_USB is not set -# CONFIG_PHY_HI3670_USB is not set -# CONFIG_PHY_HI3670_PCIE is not set -# CONFIG_PHY_HISTB_COMBPHY is not set -# CONFIG_PHY_HISI_INNO_USB2 is not set -# CONFIG_PHY_PXA_28NM_HSIC is not set -# CONFIG_PHY_PXA_28NM_USB2 is not set -# CONFIG_PHY_LAN966X_SERDES is not set -# CONFIG_PHY_MAPPHONE_MDM6600 is not set -# CONFIG_PHY_OCELOT_SERDES is not set -# CONFIG_PHY_QCOM_APQ8064_SATA is not set -# CONFIG_PHY_QCOM_EDP is not set -# CONFIG_PHY_QCOM_IPQ4019_USB is not set -# CONFIG_PHY_QCOM_IPQ806X_SATA is not set -# CONFIG_PHY_QCOM_PCIE2 is not set -# CONFIG_PHY_QCOM_QMP is not set -# CONFIG_PHY_QCOM_QUSB2 is not set -# CONFIG_PHY_QCOM_SNPS_EUSB2 is not set -# CONFIG_PHY_QCOM_EUSB2_REPEATER is not set -# CONFIG_PHY_QCOM_M31_USB is not set -# CONFIG_PHY_QCOM_USB_HS is not set -# CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2 is not set -# CONFIG_PHY_QCOM_USB_HSIC is not set -# CONFIG_PHY_QCOM_USB_HS_28NM is not set -# CONFIG_PHY_QCOM_USB_SS is not set -# CONFIG_PHY_QCOM_IPQ806X_USB is not set -# CONFIG_PHY_QCOM_SGMII_ETH is not set -# CONFIG_PHY_TUSB1210 is not set -# end of PHY Subsystem - -# CONFIG_POWERCAP is not set -# CONFIG_MCB is not set - -# -# Performance monitor support -# -# CONFIG_ARM_CCI_PMU is not set -CONFIG_ARM_CCN=y -CONFIG_ARM_CMN=y -CONFIG_ARM_PMU=y -CONFIG_ARM_PMU_ACPI=y -CONFIG_ARM_SMMU_V3_PMU=m -CONFIG_ARM_PMUV3=y -CONFIG_ARM_DSU_PMU=y -CONFIG_QCOM_L2_PMU=y -CONFIG_QCOM_L3_PMU=y -CONFIG_THUNDERX2_PMU=m -CONFIG_XGENE_PMU=y -CONFIG_ARM_SPE_PMU=m -# CONFIG_ARM_DMC620_PMU is not set -# CONFIG_MARVELL_CN10K_TAD_PMU is not set -CONFIG_ALIBABA_UNCORE_DRW_PMU=m -CONFIG_HISI_PMU=m -CONFIG_HISI_PCIE_PMU=m -# CONFIG_HNS3_PMU is not set -# CONFIG_MARVELL_CN10K_DDR_PMU is not set -CONFIG_DWC_PCIE_PMU=m -# CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU is not set -# end of Performance monitor support - -CONFIG_RAS=y -# CONFIG_USB4 is not set - -# -# Android -# -# CONFIG_ANDROID_BINDER_IPC is not set -# end of Android - -CONFIG_LIBNVDIMM=m -CONFIG_BLK_DEV_PMEM=m -CONFIG_ND_CLAIM=y -CONFIG_ND_BTT=m -CONFIG_BTT=y -CONFIG_ND_PFN=m -CONFIG_NVDIMM_PFN=y -CONFIG_NVDIMM_DAX=y -CONFIG_OF_PMEM=m -CONFIG_NVDIMM_KEYS=y -# CONFIG_NVDIMM_SECURITY_TEST is not set -CONFIG_DAX=y -CONFIG_DEV_DAX=m -CONFIG_DEV_DAX_PMEM=m -CONFIG_DEV_DAX_HMEM=m -CONFIG_DEV_DAX_CXL=m -CONFIG_DEV_DAX_HMEM_DEVICES=y -# CONFIG_DEV_DAX_KMEM is not set -CONFIG_NVMEM=y -CONFIG_NVMEM_SYSFS=y - -# -# Layout Types -# -# CONFIG_NVMEM_LAYOUT_SL28_VPD is not set -# CONFIG_NVMEM_LAYOUT_ONIE_TLV is not set -# end of Layout Types - -# CONFIG_NVMEM_QCOM_QFPROM is not set -# CONFIG_NVMEM_QCOM_SEC_QFPROM is not set -# CONFIG_NVMEM_RMEM is not set -# CONFIG_NVMEM_U_BOOT_ENV is not set - -# -# HW tracing support -# -CONFIG_STM=m -# CONFIG_STM_PROTO_BASIC is not set -# CONFIG_STM_PROTO_SYS_T is not set -# CONFIG_STM_DUMMY is not set -# CONFIG_STM_SOURCE_CONSOLE is not set -# CONFIG_STM_SOURCE_HEARTBEAT is not set -# CONFIG_STM_SOURCE_FTRACE is not set -# CONFIG_INTEL_TH is not set -# CONFIG_HISI_PTT is not set -# end of HW tracing support - -# CONFIG_FPGA is not set -# CONFIG_FSI is not set -CONFIG_TEE=m -# CONFIG_OPTEE is not set -# CONFIG_SIOX is not set -# CONFIG_SLIMBUS is not set -# CONFIG_INTERCONNECT is not set -# CONFIG_COUNTER is not set -# CONFIG_MOST is not set -# CONFIG_PECI is not set -# CONFIG_HTE is not set -# CONFIG_CDX_BUS is not set -# end of Device Drivers - -# -# File systems -# -CONFIG_DCACHE_WORD_ACCESS=y -# CONFIG_VALIDATE_FS_PARSER is not set -CONFIG_FS_IOMAP=y -CONFIG_BUFFER_HEAD=y -CONFIG_LEGACY_DIRECT_IO=y -# CONFIG_EXT2_FS is not set -CONFIG_EXT3_FS=m -# CONFIG_EXT3_FS_POSIX_ACL is not set -# CONFIG_EXT3_FS_SECURITY is not set -CONFIG_EXT4_FS=m -CONFIG_EXT4_USE_FOR_EXT2=y -CONFIG_EXT4_FS_POSIX_ACL=y -CONFIG_EXT4_FS_SECURITY=y -# CONFIG_EXT4_DEBUG is not set -CONFIG_JBD2=m -# CONFIG_JBD2_DEBUG is not set -CONFIG_FS_MBCACHE=m -# CONFIG_REISERFS_FS is not set -# CONFIG_JFS_FS is not set -CONFIG_XFS_FS=m -CONFIG_XFS_SUPPORT_V4=y -CONFIG_XFS_SUPPORT_ASCII_CI=y -CONFIG_XFS_QUOTA=y -CONFIG_XFS_POSIX_ACL=y -# CONFIG_XFS_RT is not set -# CONFIG_XFS_ONLINE_SCRUB is not set -# CONFIG_XFS_WARN is not set -# CONFIG_XFS_DEBUG is not set -# CONFIG_GFS2_FS is not set -# CONFIG_OCFS2_FS is not set -CONFIG_BTRFS_FS=m -# CONFIG_BTRFS_FS_POSIX_ACL is not set -# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set -# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set -# CONFIG_BTRFS_DEBUG is not set -# CONFIG_BTRFS_ASSERT is not set -# CONFIG_BTRFS_FS_REF_VERIFY is not set -# CONFIG_NILFS2_FS is not set -# CONFIG_F2FS_FS is not set -# CONFIG_ZONEFS_FS is not set -CONFIG_FS_DAX=y -CONFIG_FS_DAX_PMD=y -CONFIG_FS_POSIX_ACL=y -CONFIG_EXPORTFS=y -CONFIG_EXPORTFS_BLOCK_OPS=y -CONFIG_FILE_LOCKING=y -# CONFIG_FS_ENCRYPTION is not set -# CONFIG_FS_VERITY is not set -CONFIG_FSNOTIFY=y -CONFIG_DNOTIFY=y -CONFIG_INOTIFY_USER=y -CONFIG_FANOTIFY=y -CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y -CONFIG_QUOTA=y -CONFIG_QUOTA_NETLINK_INTERFACE=y -# CONFIG_QUOTA_DEBUG is not set -CONFIG_QUOTA_TREE=y -# CONFIG_QFMT_V1 is not set -CONFIG_QFMT_V2=y -CONFIG_QUOTACTL=y -CONFIG_AUTOFS_FS=y -CONFIG_FUSE_FS=m -CONFIG_CUSE=m -CONFIG_VIRTIO_FS=m -CONFIG_FUSE_DAX=y -CONFIG_VIRT_FUSE=m -CONFIG_OVERLAY_FS=m -CONFIG_OVERLAY_FS_REDIRECT_DIR=y -CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y -CONFIG_OVERLAY_FS_INDEX=y -# CONFIG_OVERLAY_FS_NFS_EXPORT is not set -# CONFIG_OVERLAY_FS_XINO_AUTO is not set -# CONFIG_OVERLAY_FS_METACOPY is not set -# CONFIG_OVERLAY_FS_DEBUG is not set - -# -# Caches -# -CONFIG_NETFS_SUPPORT=m -CONFIG_NETFS_STATS=y -CONFIG_FSCACHE=m -CONFIG_FSCACHE_STATS=y -# CONFIG_FSCACHE_DEBUG is not set -CONFIG_CACHEFILES=m -# CONFIG_CACHEFILES_DEBUG is not set -# CONFIG_CACHEFILES_ERROR_INJECTION is not set -CONFIG_CACHEFILES_ONDEMAND=y -# end of Caches - -# -# CD-ROM/DVD Filesystems -# -CONFIG_ISO9660_FS=m -CONFIG_JOLIET=y -CONFIG_ZISOFS=y -CONFIG_UDF_FS=m -# end of CD-ROM/DVD Filesystems - -# -# DOS/FAT/EXFAT/NT Filesystems -# -CONFIG_FAT_FS=m -CONFIG_MSDOS_FS=m -CONFIG_VFAT_FS=m -CONFIG_FAT_DEFAULT_CODEPAGE=437 -CONFIG_FAT_DEFAULT_IOCHARSET="ascii" -# CONFIG_FAT_DEFAULT_UTF8 is not set -# CONFIG_EXFAT_FS is not set -# CONFIG_NTFS_FS is not set -CONFIG_NTFS3_FS=m -# CONFIG_NTFS3_64BIT_CLUSTER is not set -# CONFIG_NTFS3_LZX_XPRESS is not set -# CONFIG_NTFS3_FS_POSIX_ACL is not set -# end of DOS/FAT/EXFAT/NT Filesystems - -# -# Pseudo filesystems -# -CONFIG_PROC_FS=y -CONFIG_PROC_KCORE=y -CONFIG_PROC_VMCORE=y -CONFIG_PROC_VMCORE_DEVICE_DUMP=y -CONFIG_PROC_SYSCTL=y -CONFIG_PROC_PAGE_MONITOR=y -CONFIG_PROC_CHILDREN=y -CONFIG_PROC_CPU_RESCTRL=y -CONFIG_KERNFS=y -CONFIG_SYSFS=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_TMPFS_XATTR=y -# CONFIG_TMPFS_INODE64 is not set -# CONFIG_TMPFS_QUOTA is not set -CONFIG_ARCH_SUPPORTS_HUGETLBFS=y -CONFIG_HUGETLBFS=y -CONFIG_HUGETLB_PAGE=y -CONFIG_ARCH_HAS_GIGANTIC_PAGE=y -CONFIG_CONFIGFS_FS=y -CONFIG_EFIVAR_FS=y -# end of Pseudo filesystems - -CONFIG_MISC_FILESYSTEMS=y -# CONFIG_ORANGEFS_FS is not set -# CONFIG_ADFS_FS is not set -# CONFIG_AFFS_FS is not set -# CONFIG_ECRYPT_FS is not set -# CONFIG_HFS_FS is not set -# CONFIG_HFSPLUS_FS is not set -# CONFIG_BEFS_FS is not set -# CONFIG_BFS_FS is not set -# CONFIG_EFS_FS is not set -# CONFIG_JFFS2_FS is not set -# CONFIG_UBIFS_FS is not set -CONFIG_CRAMFS=m -CONFIG_CRAMFS_BLOCKDEV=y -# CONFIG_CRAMFS_MTD is not set -CONFIG_SQUASHFS=m -# CONFIG_SQUASHFS_FILE_CACHE is not set -CONFIG_SQUASHFS_FILE_DIRECT=y -CONFIG_SQUASHFS_DECOMP_SINGLE=y -# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set -CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y -# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set -# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set -CONFIG_SQUASHFS_XATTR=y -CONFIG_SQUASHFS_ZLIB=y -CONFIG_SQUASHFS_LZ4=y -CONFIG_SQUASHFS_LZO=y -CONFIG_SQUASHFS_XZ=y -# CONFIG_SQUASHFS_ZSTD is not set -# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set -# CONFIG_SQUASHFS_EMBEDDED is not set -CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 -# CONFIG_VXFS_FS is not set -# CONFIG_MINIX_FS is not set -# CONFIG_OMFS_FS is not set -# CONFIG_HPFS_FS is not set -# CONFIG_QNX4FS_FS is not set -# CONFIG_QNX6FS_FS is not set -CONFIG_RESCTRL_FS=y -CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID=y -# CONFIG_ROMFS_FS is not set -CONFIG_PSTORE=y -CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 -CONFIG_PSTORE_COMPRESS=y -CONFIG_PSTORE_CONSOLE=y -# CONFIG_PSTORE_PMSG is not set -# CONFIG_PSTORE_FTRACE is not set -CONFIG_PSTORE_RAM=y -# CONFIG_PSTORE_BLK is not set -# CONFIG_SYSV_FS is not set -# CONFIG_UFS_FS is not set -CONFIG_EROFS_FS=m -# CONFIG_EROFS_FS_DEBUG is not set -CONFIG_EROFS_FS_XATTR=y -CONFIG_EROFS_FS_POSIX_ACL=y -CONFIG_EROFS_FS_SECURITY=y -CONFIG_EROFS_FS_ZIP=y -CONFIG_EROFS_FS_ZIP_LZMA=y -CONFIG_EROFS_FS_ZIP_DEFLATE=y -CONFIG_EROFS_FS_ONDEMAND=y -# CONFIG_EROFS_FS_PCPU_KTHREAD is not set -CONFIG_NETWORK_FILESYSTEMS=y -CONFIG_NFS_FS=m -# CONFIG_NFS_V2 is not set -CONFIG_NFS_V3=m -CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=m -# CONFIG_NFS_SWAP is not set -CONFIG_NFS_V4_1=y -CONFIG_NFS_V4_2=y -CONFIG_PNFS_FILE_LAYOUT=m -CONFIG_PNFS_BLOCK=m -CONFIG_PNFS_FLEXFILE_LAYOUT=m -CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" -# CONFIG_NFS_V4_1_MIGRATION is not set -CONFIG_NFS_V4_SECURITY_LABEL=y -CONFIG_NFS_FSCACHE=y -# CONFIG_NFS_USE_LEGACY_DNS is not set -CONFIG_NFS_USE_KERNEL_DNS=y -CONFIG_NFS_DEBUG=y -CONFIG_NFS_DISABLE_UDP_SUPPORT=y -# CONFIG_NFS_V4_2_READ_PLUS is not set -CONFIG_NFSD=m -# CONFIG_NFSD_V2 is not set -CONFIG_NFSD_V3_ACL=y -CONFIG_NFSD_V4=y -CONFIG_NFSD_PNFS=y -# CONFIG_NFSD_BLOCKLAYOUT is not set -CONFIG_NFSD_SCSILAYOUT=y -# CONFIG_NFSD_FLEXFILELAYOUT is not set -# CONFIG_NFSD_V4_2_INTER_SSC is not set -CONFIG_NFSD_V4_SECURITY_LABEL=y -CONFIG_GRACE_PERIOD=m -CONFIG_LOCKD=m -CONFIG_LOCKD_V4=y -CONFIG_NFS_ACL_SUPPORT=m -CONFIG_NFS_COMMON=y -CONFIG_NFS_V4_2_SSC_HELPER=y -CONFIG_SUNRPC=m -CONFIG_SUNRPC_GSS=m -CONFIG_SUNRPC_BACKCHANNEL=y -CONFIG_RPCSEC_GSS_KRB5=m -CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1=y -# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA is not set -# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 is not set -CONFIG_SUNRPC_DEBUG=y -CONFIG_SUNRPC_XPRT_RDMA=m -CONFIG_CEPH_FS=m -# CONFIG_CEPH_FSCACHE is not set -CONFIG_CEPH_FS_POSIX_ACL=y -# CONFIG_CEPH_FS_SECURITY_LABEL is not set -CONFIG_CIFS=m -# CONFIG_CIFS_STATS2 is not set -CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y -CONFIG_CIFS_UPCALL=y -CONFIG_CIFS_XATTR=y -CONFIG_CIFS_POSIX=y -CONFIG_CIFS_DEBUG=y -# CONFIG_CIFS_DEBUG2 is not set -# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set -CONFIG_CIFS_DFS_UPCALL=y -# CONFIG_CIFS_SWN_UPCALL is not set -# CONFIG_CIFS_SMB_DIRECT is not set -# CONFIG_CIFS_FSCACHE is not set -# CONFIG_SMB_SERVER is not set -CONFIG_SMBFS=m -# CONFIG_CODA_FS is not set -# CONFIG_AFS_FS is not set -CONFIG_NLS=y -CONFIG_NLS_DEFAULT="utf8" -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_CODEPAGE_737=m -CONFIG_NLS_CODEPAGE_775=m -CONFIG_NLS_CODEPAGE_850=m -CONFIG_NLS_CODEPAGE_852=m -CONFIG_NLS_CODEPAGE_855=m -CONFIG_NLS_CODEPAGE_857=m -CONFIG_NLS_CODEPAGE_860=m -CONFIG_NLS_CODEPAGE_861=m -CONFIG_NLS_CODEPAGE_862=m -CONFIG_NLS_CODEPAGE_863=m -CONFIG_NLS_CODEPAGE_864=m -CONFIG_NLS_CODEPAGE_865=m -CONFIG_NLS_CODEPAGE_866=m -CONFIG_NLS_CODEPAGE_869=m -CONFIG_NLS_CODEPAGE_936=m -CONFIG_NLS_CODEPAGE_950=m -CONFIG_NLS_CODEPAGE_932=m -CONFIG_NLS_CODEPAGE_949=m -CONFIG_NLS_CODEPAGE_874=m -CONFIG_NLS_ISO8859_8=m -CONFIG_NLS_CODEPAGE_1250=m -CONFIG_NLS_CODEPAGE_1251=m -CONFIG_NLS_ASCII=y -CONFIG_NLS_ISO8859_1=m -CONFIG_NLS_ISO8859_2=m -CONFIG_NLS_ISO8859_3=m -CONFIG_NLS_ISO8859_4=m -CONFIG_NLS_ISO8859_5=m -CONFIG_NLS_ISO8859_6=m -CONFIG_NLS_ISO8859_7=m -CONFIG_NLS_ISO8859_9=m -CONFIG_NLS_ISO8859_13=m -CONFIG_NLS_ISO8859_14=m -CONFIG_NLS_ISO8859_15=m -CONFIG_NLS_KOI8_R=m -CONFIG_NLS_KOI8_U=m -CONFIG_NLS_MAC_ROMAN=m -CONFIG_NLS_MAC_CELTIC=m -CONFIG_NLS_MAC_CENTEURO=m -CONFIG_NLS_MAC_CROATIAN=m -CONFIG_NLS_MAC_CYRILLIC=m -CONFIG_NLS_MAC_GAELIC=m -CONFIG_NLS_MAC_GREEK=m -CONFIG_NLS_MAC_ICELAND=m -CONFIG_NLS_MAC_INUIT=m -CONFIG_NLS_MAC_ROMANIAN=m -CONFIG_NLS_MAC_TURKISH=m -CONFIG_NLS_UTF8=m -CONFIG_NLS_UCS2_UTILS=m -CONFIG_DLM=m -# CONFIG_DLM_DEBUG is not set -# CONFIG_UNICODE is not set -CONFIG_IO_WQ=y -# end of File systems - -# -# Security options -# -CONFIG_KEYS=y -# CONFIG_KEYS_REQUEST_CACHE is not set -CONFIG_PERSISTENT_KEYRINGS=y -CONFIG_TRUSTED_KEYS=y -CONFIG_TRUSTED_KEYS_TPM=y -CONFIG_ENCRYPTED_KEYS=y -# CONFIG_USER_DECRYPTED_DATA is not set -# CONFIG_KEY_DH_OPERATIONS is not set -# CONFIG_SECURITY_DMESG_RESTRICT is not set -CONFIG_SECURITY=y -CONFIG_SECURITYFS=y -CONFIG_SECURITY_NETWORK=y -CONFIG_SECURITY_INFINIBAND=y -CONFIG_SECURITY_NETWORK_XFRM=y -CONFIG_SECURITY_PATH=y -CONFIG_LSM_MMAP_MIN_ADDR=65535 -CONFIG_HARDENED_USERCOPY=y -CONFIG_FORTIFY_SOURCE=y -# CONFIG_STATIC_USERMODEHELPER is not set -CONFIG_SECURITY_SELINUX=y -CONFIG_SECURITY_SELINUX_BOOTPARAM=y -CONFIG_SECURITY_SELINUX_DEVELOP=y -CONFIG_SECURITY_SELINUX_AVC_STATS=y -CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9 -CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256 -# CONFIG_SECURITY_SELINUX_DEBUG is not set -# CONFIG_SECURITY_SMACK is not set -# CONFIG_SECURITY_TOMOYO is not set -# CONFIG_SECURITY_APPARMOR is not set -# CONFIG_SECURITY_LOADPIN is not set -CONFIG_SECURITY_YAMA=y -# CONFIG_SECURITY_SAFESETID is not set -# CONFIG_SECURITY_LOCKDOWN_LSM is not set -# CONFIG_SECURITY_LANDLOCK is not set -CONFIG_INTEGRITY=y -CONFIG_INTEGRITY_SIGNATURE=y -CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y -CONFIG_INTEGRITY_TRUSTED_KEYRING=y -CONFIG_INTEGRITY_PLATFORM_KEYRING=y -# CONFIG_INTEGRITY_MACHINE_KEYRING is not set -CONFIG_LOAD_UEFI_KEYS=y -CONFIG_INTEGRITY_AUDIT=y -CONFIG_IMA=y -# CONFIG_IMA_KEXEC is not set -CONFIG_IMA_MEASURE_PCR_IDX=10 -CONFIG_IMA_LSM_RULES=y -# CONFIG_IMA_NG_TEMPLATE is not set -CONFIG_IMA_SIG_TEMPLATE=y -CONFIG_IMA_DEFAULT_TEMPLATE="ima-sig" -# CONFIG_IMA_DEFAULT_HASH_SHA1 is not set -CONFIG_IMA_DEFAULT_HASH_SHA256=y -# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set -# CONFIG_IMA_DEFAULT_HASH_SM3 is not set -CONFIG_IMA_DEFAULT_HASH="sha256" -CONFIG_IMA_WRITE_POLICY=y -CONFIG_IMA_READ_POLICY=y -CONFIG_IMA_APPRAISE=y -# CONFIG_IMA_ARCH_POLICY is not set -CONFIG_IMA_APPRAISE_BUILD_POLICY=y -# CONFIG_IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS is not set -# CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS is not set -# CONFIG_IMA_APPRAISE_REQUIRE_MODULE_SIGS is not set -# CONFIG_IMA_APPRAISE_REQUIRE_POLICY_SIGS is not set -CONFIG_IMA_APPRAISE_BOOTPARAM=y -# CONFIG_IMA_APPRAISE_MODSIG is not set -CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY=y -CONFIG_IMA_BLACKLIST_KEYRING=y -CONFIG_IMA_LOAD_X509=y -CONFIG_IMA_X509_PATH="/etc/keys/x509_ima.der" -# CONFIG_IMA_APPRAISE_SIGNED_INIT is not set -CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y -CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y -# CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT is not set -# CONFIG_IMA_DISABLE_HTABLE is not set -CONFIG_EVM=y -CONFIG_EVM_ATTR_FSUUID=y -# CONFIG_EVM_ADD_XATTRS is not set -CONFIG_EVM_LOAD_X509=y -CONFIG_EVM_X509_PATH="/etc/keys/x509_evm.der" -CONFIG_DEFAULT_SECURITY_SELINUX=y -# CONFIG_DEFAULT_SECURITY_DAC is not set -CONFIG_LSM="integrity,selinux,smack,tomoyo,apparmor" - -# -# Kernel hardening options -# - -# -# Memory initialization -# -CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y -CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y -CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y -CONFIG_INIT_STACK_NONE=y -# CONFIG_INIT_STACK_ALL_PATTERN is not set -# CONFIG_INIT_STACK_ALL_ZERO is not set -# CONFIG_GCC_PLUGIN_STACKLEAK is not set -# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set -# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set -CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y -# CONFIG_ZERO_CALL_USED_REGS is not set -# end of Memory initialization - -# -# Hardening of kernel data structures -# -CONFIG_LIST_HARDENED=y -CONFIG_BUG_ON_DATA_CORRUPTION=y -# end of Hardening of kernel data structures - -CONFIG_CC_HAS_RANDSTRUCT=y -CONFIG_RANDSTRUCT_NONE=y -# CONFIG_RANDSTRUCT_FULL is not set -# CONFIG_RANDSTRUCT_PERFORMANCE is not set -# end of Kernel hardening options -# end of Security options - -CONFIG_XOR_BLOCKS=m -CONFIG_ASYNC_CORE=m -CONFIG_ASYNC_MEMCPY=m -CONFIG_ASYNC_XOR=m -CONFIG_ASYNC_PQ=m -CONFIG_ASYNC_RAID6_RECOV=m -CONFIG_CRYPTO=y - -# -# Crypto core or helper -# -CONFIG_CRYPTO_FIPS=y -CONFIG_CRYPTO_FIPS_NAME="Linux Kernel Cryptographic API" -# CONFIG_CRYPTO_FIPS_CUSTOM_VERSION is not set -CONFIG_CRYPTO_ALGAPI=y -CONFIG_CRYPTO_ALGAPI2=y -CONFIG_CRYPTO_AEAD=y -CONFIG_CRYPTO_AEAD2=y -CONFIG_CRYPTO_SIG2=y -CONFIG_CRYPTO_SKCIPHER=y -CONFIG_CRYPTO_SKCIPHER2=y -CONFIG_CRYPTO_HASH=y -CONFIG_CRYPTO_HASH2=y -CONFIG_CRYPTO_RNG=y -CONFIG_CRYPTO_RNG2=y -CONFIG_CRYPTO_RNG_DEFAULT=m -CONFIG_CRYPTO_AKCIPHER2=y -CONFIG_CRYPTO_AKCIPHER=y -CONFIG_CRYPTO_KPP2=y -CONFIG_CRYPTO_KPP=m -CONFIG_CRYPTO_ACOMP2=y -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_MANAGER2=y -CONFIG_CRYPTO_USER=m -# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set -# CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is not set -CONFIG_CRYPTO_NULL=y -CONFIG_CRYPTO_NULL2=y -CONFIG_CRYPTO_PCRYPT=m -CONFIG_CRYPTO_CRYPTD=y -CONFIG_CRYPTO_AUTHENC=m -CONFIG_CRYPTO_TEST=m -# end of Crypto core or helper - -# -# Public-key cryptography -# -CONFIG_CRYPTO_RSA=y -CONFIG_CRYPTO_DH=m -# CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set -CONFIG_CRYPTO_ECC=m -CONFIG_CRYPTO_ECDH=m -# CONFIG_CRYPTO_ECDSA is not set -# CONFIG_CRYPTO_ECRDSA is not set -CONFIG_CRYPTO_SM2=y -CONFIG_CRYPTO_CURVE25519=m -# end of Public-key cryptography - -# -# Block ciphers -# -CONFIG_CRYPTO_AES=y -# CONFIG_CRYPTO_AES_TI is not set -CONFIG_CRYPTO_ANUBIS=m -# CONFIG_CRYPTO_ARIA is not set -CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_BLOWFISH_COMMON=m -CONFIG_CRYPTO_CAMELLIA=m -CONFIG_CRYPTO_CAST_COMMON=m -CONFIG_CRYPTO_CAST5=m -CONFIG_CRYPTO_CAST6=m -CONFIG_CRYPTO_DES=m -CONFIG_CRYPTO_FCRYPT=m -CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SEED=m -CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SM4_GENERIC=m -CONFIG_CRYPTO_TEA=m -CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_TWOFISH_COMMON=m -# end of Block ciphers - -# -# Length-preserving ciphers and modes -# -# CONFIG_CRYPTO_ADIANTUM is not set -CONFIG_CRYPTO_ARC4=m -CONFIG_CRYPTO_CHACHA20=m -CONFIG_CRYPTO_CBC=y -CONFIG_CRYPTO_CFB=y -CONFIG_CRYPTO_CTR=y -CONFIG_CRYPTO_CTS=y -CONFIG_CRYPTO_ECB=y -# CONFIG_CRYPTO_HCTR2 is not set -# CONFIG_CRYPTO_KEYWRAP is not set -CONFIG_CRYPTO_LRW=m -CONFIG_CRYPTO_OFB=y -CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_XTS=y -# end of Length-preserving ciphers and modes - -# -# AEAD (authenticated encryption with associated data) ciphers -# -# CONFIG_CRYPTO_AEGIS128 is not set -CONFIG_CRYPTO_CHACHA20POLY1305=m -CONFIG_CRYPTO_CCM=m -CONFIG_CRYPTO_GCM=y -CONFIG_CRYPTO_GENIV=m -CONFIG_CRYPTO_SEQIV=m -CONFIG_CRYPTO_ECHAINIV=m -CONFIG_CRYPTO_ESSIV=m -# end of AEAD (authenticated encryption with associated data) ciphers - -# -# Hashes, digests, and MACs -# -CONFIG_CRYPTO_BLAKE2B=m -CONFIG_CRYPTO_CMAC=m -CONFIG_CRYPTO_GHASH=y -CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_MD4=m -CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_POLY1305=m -CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_SHA1=y -CONFIG_CRYPTO_SHA256=y -CONFIG_CRYPTO_SHA512=y -CONFIG_CRYPTO_SHA3=y -CONFIG_CRYPTO_SM3=y -CONFIG_CRYPTO_SM3_GENERIC=y -# CONFIG_CRYPTO_STREEBOG is not set -CONFIG_CRYPTO_VMAC=m -CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_XXHASH=m -# end of Hashes, digests, and MACs - -# -# CRCs (cyclic redundancy checks) -# -CONFIG_CRYPTO_CRC32C=y -CONFIG_CRYPTO_CRC32=m -CONFIG_CRYPTO_CRCT10DIF=y -CONFIG_CRYPTO_CRC64_ROCKSOFT=y -# end of CRCs (cyclic redundancy checks) - -# -# Compression -# -CONFIG_CRYPTO_DEFLATE=y -CONFIG_CRYPTO_LZO=y -# CONFIG_CRYPTO_842 is not set -CONFIG_CRYPTO_LZ4=m -CONFIG_CRYPTO_LZ4HC=m -CONFIG_CRYPTO_ZSTD=m -# end of Compression - -# -# Random number generation -# -CONFIG_CRYPTO_ANSI_CPRNG=m -CONFIG_CRYPTO_DRBG_MENU=y -CONFIG_CRYPTO_DRBG_HMAC=y -CONFIG_CRYPTO_DRBG_HASH=y -CONFIG_CRYPTO_DRBG_CTR=y -CONFIG_CRYPTO_DRBG=y -CONFIG_CRYPTO_JITTERENTROPY=y -# CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE is not set -# end of Random number generation - -# -# Userspace interface -# -CONFIG_CRYPTO_USER_API=y -CONFIG_CRYPTO_USER_API_HASH=y -CONFIG_CRYPTO_USER_API_SKCIPHER=y -CONFIG_CRYPTO_USER_API_RNG=y -# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set -CONFIG_CRYPTO_USER_API_AEAD=y -CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y -# CONFIG_CRYPTO_STATS is not set -# end of Userspace interface - -CONFIG_CRYPTO_HASH_INFO=y -# CONFIG_CRYPTO_NHPOLY1305_NEON is not set -CONFIG_CRYPTO_CHACHA20_NEON=m - -# -# Accelerated Cryptographic Algorithms for CPU (arm64) -# -CONFIG_CRYPTO_GHASH_ARM64_CE=m -CONFIG_CRYPTO_POLY1305_NEON=m -CONFIG_CRYPTO_SHA1_ARM64_CE=m -CONFIG_CRYPTO_SHA256_ARM64=m -CONFIG_CRYPTO_SHA2_ARM64_CE=m -# CONFIG_CRYPTO_SHA512_ARM64 is not set -# CONFIG_CRYPTO_SHA512_ARM64_CE is not set -# CONFIG_CRYPTO_SHA3_ARM64 is not set -CONFIG_CRYPTO_SM3_NEON=m -CONFIG_CRYPTO_SM3_ARM64_CE=m -# CONFIG_CRYPTO_POLYVAL_ARM64_CE is not set -CONFIG_CRYPTO_AES_ARM64=y -CONFIG_CRYPTO_AES_ARM64_CE=y -CONFIG_CRYPTO_AES_ARM64_CE_BLK=y -CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y -CONFIG_CRYPTO_AES_ARM64_BS=m -CONFIG_CRYPTO_SM4_ARM64_CE=m -CONFIG_CRYPTO_SM4_ARM64_CE_BLK=m -CONFIG_CRYPTO_SM4_ARM64_NEON_BLK=m -CONFIG_CRYPTO_AES_ARM64_CE_CCM=y -CONFIG_CRYPTO_SM4_ARM64_CE_CCM=m -CONFIG_CRYPTO_SM4_ARM64_CE_GCM=m -CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m -# end of Accelerated Cryptographic Algorithms for CPU (arm64) - -CONFIG_CRYPTO_HW=y -# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set -# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set -# CONFIG_CRYPTO_DEV_CCP is not set -CONFIG_CRYPTO_DEV_CPT=m -CONFIG_CAVIUM_CPT=m -CONFIG_CRYPTO_DEV_NITROX=m -CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m -# CONFIG_CRYPTO_DEV_OCTEONTX_CPT is not set -# CONFIG_CRYPTO_DEV_QAT_DH895xCC is not set -# CONFIG_CRYPTO_DEV_QAT_C3XXX is not set -# CONFIG_CRYPTO_DEV_QAT_C62X is not set -# CONFIG_CRYPTO_DEV_QAT_4XXX is not set -# CONFIG_CRYPTO_DEV_QAT_420XX is not set -# CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set -# CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set -# CONFIG_CRYPTO_DEV_QAT_C62XVF is not set -CONFIG_CRYPTO_DEV_CAVIUM_ZIP=m -# CONFIG_CRYPTO_DEV_QCE is not set -# CONFIG_CRYPTO_DEV_QCOM_RNG is not set -CONFIG_CRYPTO_DEV_CHELSIO=m -# CONFIG_CRYPTO_DEV_VIRTIO is not set -# CONFIG_CRYPTO_DEV_SAFEXCEL is not set -# CONFIG_CRYPTO_DEV_CCREE is not set -CONFIG_CRYPTO_DEV_HISI_SEC=m -CONFIG_CRYPTO_DEV_HISI_SEC2=m -CONFIG_CRYPTO_DEV_HISI_QM=m -CONFIG_CRYPTO_DEV_HISI_ZIP=m -CONFIG_CRYPTO_DEV_HISI_HPRE=m -CONFIG_CRYPTO_DEV_HISI_TRNG=m -# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set -CONFIG_ASYMMETRIC_KEY_TYPE=y -CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y -CONFIG_X509_CERTIFICATE_PARSER=y -# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set -CONFIG_PKCS7_MESSAGE_PARSER=y -# CONFIG_PKCS7_TEST_KEY is not set -CONFIG_SIGNED_PE_FILE_VERIFICATION=y -# CONFIG_FIPS_SIGNATURE_SELFTEST is not set - -# -# Certificates for signature checking -# -CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" -CONFIG_MODULE_SIG_KEY_TYPE_RSA=y -# CONFIG_MODULE_SIG_KEY_TYPE_ECDSA is not set -CONFIG_SYSTEM_TRUSTED_KEYRING=y -CONFIG_SYSTEM_TRUSTED_KEYS="" -CONFIG_SYSTEM_EXTRA_CERTIFICATE=y -CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE=8192 -CONFIG_SECONDARY_TRUSTED_KEYRING=y -CONFIG_SYSTEM_BLACKLIST_KEYRING=y -CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" -# CONFIG_SYSTEM_REVOCATION_LIST is not set -# CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE is not set -# end of Certificates for signature checking - -CONFIG_BINARY_PRINTF=y - -# -# Library routines -# -CONFIG_RAID6_PQ=m -CONFIG_RAID6_PQ_BENCHMARK=y -CONFIG_LINEAR_RANGES=y -# CONFIG_PACKING is not set -CONFIG_BITREVERSE=y -CONFIG_HAVE_ARCH_BITREVERSE=y -CONFIG_GENERIC_STRNCPY_FROM_USER=y -CONFIG_GENERIC_STRNLEN_USER=y -CONFIG_GENERIC_NET_UTILS=y -CONFIG_CORDIC=m -# CONFIG_PRIME_NUMBERS is not set -CONFIG_RATIONAL=y -CONFIG_GENERIC_PCI_IOMAP=y -CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y -CONFIG_ARCH_HAS_FAST_MULTIPLIER=y -CONFIG_ARCH_USE_SYM_ANNOTATIONS=y -CONFIG_INDIRECT_PIO=y -# CONFIG_TRACE_MMIO_ACCESS is not set - -# -# Crypto library routines -# -CONFIG_CRYPTO_LIB_UTILS=y -CONFIG_CRYPTO_LIB_AES=y -CONFIG_CRYPTO_LIB_ARC4=m -CONFIG_CRYPTO_LIB_GF128MUL=y -CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y -CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA=m -CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m -CONFIG_CRYPTO_LIB_CHACHA=m -CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m -CONFIG_CRYPTO_LIB_CURVE25519=m -CONFIG_CRYPTO_LIB_DES=m -CONFIG_CRYPTO_LIB_POLY1305_RSIZE=9 -CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305=m -CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m -CONFIG_CRYPTO_LIB_POLY1305=m -CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m -CONFIG_CRYPTO_LIB_SHA1=y -CONFIG_CRYPTO_LIB_SHA256=y -# end of Crypto library routines - -CONFIG_CRC_CCITT=y -CONFIG_CRC16=y -CONFIG_CRC_T10DIF=y -CONFIG_CRC64_ROCKSOFT=y -CONFIG_CRC_ITU_T=m -CONFIG_CRC32=y -# CONFIG_CRC32_SELFTEST is not set -CONFIG_CRC32_SLICEBY8=y -# CONFIG_CRC32_SLICEBY4 is not set -# CONFIG_CRC32_SARWATE is not set -# CONFIG_CRC32_BIT is not set -CONFIG_CRC64=y -# CONFIG_CRC4 is not set -CONFIG_CRC7=m -CONFIG_LIBCRC32C=m -CONFIG_CRC8=m -CONFIG_XXHASH=y -CONFIG_AUDIT_GENERIC=y -CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y -CONFIG_AUDIT_COMPAT_GENERIC=y -# CONFIG_RANDOM32_SELFTEST is not set -CONFIG_ZLIB_INFLATE=y -CONFIG_ZLIB_DEFLATE=y -CONFIG_LZO_COMPRESS=y -CONFIG_LZO_DECOMPRESS=y -CONFIG_LZ4_COMPRESS=m -CONFIG_LZ4HC_COMPRESS=m -CONFIG_LZ4_DECOMPRESS=y -CONFIG_ZSTD_COMMON=y -CONFIG_ZSTD_COMPRESS=m -CONFIG_ZSTD_DECOMPRESS=y -CONFIG_XZ_DEC=y -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y -CONFIG_XZ_DEC_MICROLZMA=y -CONFIG_XZ_DEC_BCJ=y -# CONFIG_XZ_DEC_TEST is not set -CONFIG_DECOMPRESS_GZIP=y -CONFIG_DECOMPRESS_BZIP2=y -CONFIG_DECOMPRESS_LZMA=y -CONFIG_DECOMPRESS_XZ=y -CONFIG_DECOMPRESS_LZO=y -CONFIG_DECOMPRESS_LZ4=y -CONFIG_DECOMPRESS_ZSTD=y -CONFIG_GENERIC_ALLOCATOR=y -CONFIG_REED_SOLOMON=y -CONFIG_REED_SOLOMON_ENC8=y -CONFIG_REED_SOLOMON_DEC8=y -CONFIG_TEXTSEARCH=y -CONFIG_TEXTSEARCH_KMP=m -CONFIG_TEXTSEARCH_BM=m -CONFIG_TEXTSEARCH_FSM=m -CONFIG_BTREE=y -CONFIG_INTERVAL_TREE=y -CONFIG_XARRAY_MULTI=y -CONFIG_ASSOCIATIVE_ARRAY=y -CONFIG_HAS_IOMEM=y -CONFIG_HAS_IOPORT=y -CONFIG_HAS_IOPORT_MAP=y -CONFIG_HAS_DMA=y -CONFIG_DMA_OPS=y -CONFIG_NEED_SG_DMA_FLAGS=y -CONFIG_NEED_SG_DMA_LENGTH=y -CONFIG_NEED_DMA_MAP_STATE=y -CONFIG_ARCH_DMA_ADDR_T_64BIT=y -CONFIG_DMA_DECLARE_COHERENT=y -CONFIG_ARCH_HAS_SETUP_DMA_OPS=y -CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS=y -CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE=y -CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU=y -CONFIG_ARCH_HAS_DMA_PREP_COHERENT=y -CONFIG_SWIOTLB=y -# CONFIG_SWIOTLB_DYNAMIC is not set -CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC=y -# CONFIG_DMA_RESTRICTED_POOL is not set -CONFIG_DMA_NONCOHERENT_MMAP=y -CONFIG_DMA_COHERENT_POOL=y -CONFIG_DMA_DIRECT_REMAP=y -CONFIG_DMA_CMA=y -# CONFIG_DMA_NUMA_CMA is not set - -# -# Default contiguous memory area size: -# -CONFIG_CMA_SIZE_MBYTES=64 -CONFIG_CMA_SIZE_SEL_MBYTES=y -# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set -# CONFIG_CMA_SIZE_SEL_MIN is not set -# CONFIG_CMA_SIZE_SEL_MAX is not set -CONFIG_CMA_ALIGNMENT=8 -# CONFIG_DMA_API_DEBUG is not set -# CONFIG_DMA_MAP_BENCHMARK is not set -CONFIG_SGL_ALLOC=y -CONFIG_CHECK_SIGNATURE=y -# CONFIG_CPUMASK_OFFSTACK is not set -CONFIG_CPU_RMAP=y -CONFIG_DQL=y -CONFIG_GLOB=y -# CONFIG_GLOB_SELFTEST is not set -CONFIG_NLATTR=y -CONFIG_CLZ_TAB=y -CONFIG_IRQ_POLL=y -CONFIG_MPILIB=y -CONFIG_SIGNATURE=y -CONFIG_DIMLIB=y -CONFIG_LIBFDT=y -CONFIG_OID_REGISTRY=y -CONFIG_UCS2_STRING=y -CONFIG_HAVE_GENERIC_VDSO=y -CONFIG_GENERIC_GETTIMEOFDAY=y -CONFIG_GENERIC_VDSO_TIME_NS=y -CONFIG_FONT_SUPPORT=y -# CONFIG_FONTS is not set -CONFIG_FONT_8x8=y -CONFIG_FONT_8x16=y -CONFIG_SG_SPLIT=y -CONFIG_SG_POOL=y -CONFIG_ARCH_HAS_PMEM_API=y -CONFIG_MEMREGION=y -CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y -CONFIG_ARCH_HAS_COPY_MC=y -CONFIG_ARCH_STACKWALK=y -CONFIG_STACKDEPOT=y -CONFIG_SBITMAP=y -CONFIG_PARMAN=m -CONFIG_OBJAGG=m -# end of Library routines - -CONFIG_GENERIC_IOREMAP=y -CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y -CONFIG_PLDMFW=y -CONFIG_ASN1_ENCODER=y - -# -# Kernel hacking -# - -# -# printk and dmesg options -# -CONFIG_PRINTK_TIME=y -# CONFIG_PRINTK_CALLER is not set -# CONFIG_STACKTRACE_BUILD_ID is not set -CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 -CONFIG_CONSOLE_LOGLEVEL_QUIET=4 -CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 -CONFIG_BOOT_PRINTK_DELAY=y -CONFIG_DYNAMIC_DEBUG=y -CONFIG_DYNAMIC_DEBUG_CORE=y -CONFIG_SYMBOLIC_ERRNAME=y -CONFIG_DEBUG_BUGVERBOSE=y -# end of printk and dmesg options - -CONFIG_DEBUG_KERNEL=y -CONFIG_DEBUG_MISC=y - -# -# Compile-time checks and compiler options -# -CONFIG_DEBUG_INFO=y -CONFIG_AS_HAS_NON_CONST_LEB128=y -# CONFIG_DEBUG_INFO_NONE is not set -CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y -# CONFIG_DEBUG_INFO_DWARF4 is not set -# CONFIG_DEBUG_INFO_DWARF5 is not set -# CONFIG_DEBUG_INFO_REDUCED is not set -CONFIG_DEBUG_INFO_COMPRESSED_NONE=y -# CONFIG_DEBUG_INFO_COMPRESSED_ZLIB is not set -# CONFIG_DEBUG_INFO_COMPRESSED_ZSTD is not set -# CONFIG_DEBUG_INFO_SPLIT is not set -CONFIG_DEBUG_INFO_BTF=y -# CONFIG_GDB_SCRIPTS is not set -CONFIG_FRAME_WARN=2048 -CONFIG_STRIP_ASM_SYMS=y -# CONFIG_READABLE_ASM is not set -# CONFIG_HEADERS_INSTALL is not set -CONFIG_DEBUG_SECTION_MISMATCH=y -CONFIG_SECTION_MISMATCH_WARN_ONLY=y -CONFIG_ARCH_WANT_FRAME_POINTERS=y -CONFIG_FRAME_POINTER=y -# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set -# end of Compile-time checks and compiler options - -# -# Generic Kernel Debugging Instruments -# -CONFIG_MAGIC_SYSRQ=y -CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 -CONFIG_MAGIC_SYSRQ_SERIAL=y -CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" -CONFIG_DEBUG_FS=y -CONFIG_DEBUG_FS_ALLOW_ALL=y -# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set -# CONFIG_DEBUG_FS_ALLOW_NONE is not set -CONFIG_HAVE_ARCH_KGDB=y -CONFIG_KGDB=y -CONFIG_KGDB_HONOUR_BLOCKLIST=y -CONFIG_KGDB_SERIAL_CONSOLE=y -CONFIG_KGDB_TESTS=y -# CONFIG_KGDB_TESTS_ON_BOOT is not set -CONFIG_KGDB_KDB=y -CONFIG_KDB_DEFAULT_ENABLE=0x0 -CONFIG_KDB_KEYBOARD=y -CONFIG_KDB_CONTINUE_CATASTROPHIC=0 -CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y -# CONFIG_UBSAN is not set -CONFIG_HAVE_KCSAN_COMPILER=y -# end of Generic Kernel Debugging Instruments - -# -# Networking Debugging -# -# CONFIG_NET_DEV_REFCNT_TRACKER is not set -# CONFIG_NET_NS_REFCNT_TRACKER is not set -# CONFIG_DEBUG_NET is not set -# end of Networking Debugging - -# -# Memory Debugging -# -# CONFIG_PAGE_EXTENSION is not set -# CONFIG_DEBUG_PAGEALLOC is not set -CONFIG_SLUB_DEBUG=y -# CONFIG_SLUB_DEBUG_ON is not set -# CONFIG_PAGE_OWNER is not set -# CONFIG_PAGE_TABLE_CHECK is not set -# CONFIG_PAGE_POISONING is not set -# CONFIG_DEBUG_PAGE_REF is not set -# CONFIG_DEBUG_RODATA_TEST is not set -CONFIG_ARCH_HAS_DEBUG_WX=y -# CONFIG_DEBUG_WX is not set -CONFIG_GENERIC_PTDUMP=y -# CONFIG_PTDUMP_DEBUGFS is not set -CONFIG_HAVE_DEBUG_KMEMLEAK=y -# CONFIG_DEBUG_KMEMLEAK is not set -# CONFIG_PER_VMA_LOCK_STATS is not set -# CONFIG_DEBUG_OBJECTS is not set -# CONFIG_SHRINKER_DEBUG is not set -# CONFIG_DEBUG_STACK_USAGE is not set -# CONFIG_SCHED_STACK_END_CHECK is not set -CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y -# CONFIG_DEBUG_VM is not set -# CONFIG_DEBUG_VM_PGTABLE is not set -CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y -# CONFIG_DEBUG_VIRTUAL is not set -CONFIG_DEBUG_MEMORY_INIT=y -# CONFIG_DEBUG_PER_CPU_MAPS is not set -CONFIG_HAVE_ARCH_KASAN=y -CONFIG_HAVE_ARCH_KASAN_SW_TAGS=y -CONFIG_HAVE_ARCH_KASAN_HW_TAGS=y -CONFIG_HAVE_ARCH_KASAN_VMALLOC=y -CONFIG_CC_HAS_KASAN_GENERIC=y -CONFIG_CC_HAS_KASAN_SW_TAGS=y -CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y -# CONFIG_KASAN is not set -CONFIG_HAVE_ARCH_KFENCE=y -CONFIG_KFENCE=y -CONFIG_KFENCE_SAMPLE_INTERVAL=0 -CONFIG_KFENCE_NUM_OBJECTS=255 -CONFIG_KFENCE_DEFERRABLE=y -CONFIG_KFENCE_STRESS_TEST_FAULTS=0 -# end of Memory Debugging - -CONFIG_DEBUG_SHIRQ=y - -# -# Debug Oops, Lockups and Hangs -# -CONFIG_PANIC_ON_OOPS=y -CONFIG_PANIC_ON_OOPS_VALUE=1 -CONFIG_PANIC_TIMEOUT=1 -CONFIG_LOCKUP_DETECTOR=y -CONFIG_SOFTLOCKUP_DETECTOR=y -# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set -CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y -CONFIG_SDEI_WATCHDOG=y -CONFIG_HARDLOCKUP_DETECTOR=y -# CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY is not set -CONFIG_HARDLOCKUP_DETECTOR_PERF=y -# CONFIG_HARDLOCKUP_DETECTOR_BUDDY is not set -# CONFIG_HARDLOCKUP_DETECTOR_ARCH is not set -CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER=y -CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y -CONFIG_DETECT_HUNG_TASK=y -CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 -# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set -# CONFIG_WQ_WATCHDOG is not set -# CONFIG_WQ_CPU_INTENSIVE_REPORT is not set -# CONFIG_TEST_LOCKUP is not set -# end of Debug Oops, Lockups and Hangs - -# -# Scheduler Debugging -# -CONFIG_SCHED_DEBUG=y -CONFIG_SCHED_INFO=y -CONFIG_SCHEDSTATS=y -CONFIG_SCHED_ACPU=y -# end of Scheduler Debugging - -# CONFIG_DEBUG_TIMEKEEPING is not set -# CONFIG_DEBUG_PREEMPT is not set - -# -# Lock Debugging (spinlocks, mutexes, etc...) -# -CONFIG_LOCK_DEBUGGING_SUPPORT=y -# CONFIG_PROVE_LOCKING is not set -# CONFIG_LOCK_STAT is not set -# CONFIG_DEBUG_RT_MUTEXES is not set -# CONFIG_DEBUG_SPINLOCK is not set -# CONFIG_DEBUG_MUTEXES is not set -# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set -# CONFIG_DEBUG_RWSEMS is not set -# CONFIG_DEBUG_LOCK_ALLOC is not set -# CONFIG_DEBUG_ATOMIC_SLEEP is not set -# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set -# CONFIG_LOCK_TORTURE_TEST is not set -# CONFIG_WW_MUTEX_SELFTEST is not set -# CONFIG_SCF_TORTURE_TEST is not set -# CONFIG_CSD_LOCK_WAIT_DEBUG is not set -# end of Lock Debugging (spinlocks, mutexes, etc...) - -# CONFIG_DEBUG_IRQFLAGS is not set -CONFIG_STACKTRACE=y -# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set -# CONFIG_DEBUG_KOBJECT is not set - -# -# Debug kernel data structures -# -CONFIG_DEBUG_LIST=y -# CONFIG_DEBUG_PLIST is not set -# CONFIG_DEBUG_SG is not set -# CONFIG_DEBUG_NOTIFIERS is not set -# CONFIG_DEBUG_MAPLE_TREE is not set -# end of Debug kernel data structures - -# -# RCU Debugging -# -# CONFIG_RCU_SCALE_TEST is not set -# CONFIG_RCU_TORTURE_TEST is not set -# CONFIG_RCU_REF_SCALE_TEST is not set -CONFIG_RCU_CPU_STALL_TIMEOUT=60 -CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 -# CONFIG_RCU_CPU_STALL_CPUTIME is not set -# CONFIG_RCU_TRACE is not set -# CONFIG_RCU_EQS_DEBUG is not set -# end of RCU Debugging - -# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set -# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set -# CONFIG_LATENCYTOP is not set -# CONFIG_DEBUG_CGROUP_REF is not set -CONFIG_NOP_TRACER=y -CONFIG_HAVE_FUNCTION_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y -CONFIG_HAVE_DYNAMIC_FTRACE=y -CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y -CONFIG_HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS=y -CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y -CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y -CONFIG_HAVE_SYSCALL_TRACEPOINTS=y -CONFIG_HAVE_C_RECORDMCOUNT=y -CONFIG_TRACER_MAX_TRACE=y -CONFIG_TRACE_CLOCK=y -CONFIG_RING_BUFFER=y -CONFIG_EVENT_TRACING=y -CONFIG_CONTEXT_SWITCH_TRACER=y -CONFIG_TRACING=y -CONFIG_GENERIC_TRACER=y -CONFIG_TRACING_SUPPORT=y -CONFIG_FTRACE=y -# CONFIG_BOOTTIME_TRACING is not set -CONFIG_FUNCTION_TRACER=y -CONFIG_FUNCTION_GRAPH_TRACER=y -# CONFIG_FUNCTION_GRAPH_RETVAL is not set -CONFIG_DYNAMIC_FTRACE=y -CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y -CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS=y -CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y -# CONFIG_FUNCTION_PROFILER is not set -CONFIG_STACK_TRACER=y -# CONFIG_IRQSOFF_TRACER is not set -# CONFIG_PREEMPT_TRACER is not set -CONFIG_SCHED_TRACER=y -CONFIG_HWLAT_TRACER=y -CONFIG_OSNOISE_TRACER=y -CONFIG_TIMERLAT_TRACER=y -CONFIG_FTRACE_SYSCALLS=y -CONFIG_TRACER_SNAPSHOT=y -# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set -CONFIG_BRANCH_PROFILE_NONE=y -# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_PROBE_EVENTS_BTF_ARGS=y -CONFIG_KPROBE_EVENTS=y -# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set -CONFIG_UPROBE_EVENTS=y -CONFIG_BPF_EVENTS=y -CONFIG_DYNAMIC_EVENTS=y -CONFIG_PROBE_EVENTS=y -# CONFIG_BPF_KPROBE_OVERRIDE is not set -CONFIG_FTRACE_MCOUNT_RECORD=y -CONFIG_FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY=y -CONFIG_TRACING_MAP=y -CONFIG_SYNTH_EVENTS=y -# CONFIG_USER_EVENTS is not set -CONFIG_HIST_TRIGGERS=y -# CONFIG_TRACE_EVENT_INJECT is not set -# CONFIG_TRACEPOINT_BENCHMARK is not set -CONFIG_RING_BUFFER_BENCHMARK=m -# CONFIG_TRACE_EVAL_MAP_FILE is not set -# CONFIG_FTRACE_RECORD_RECURSION is not set -# CONFIG_FTRACE_STARTUP_TEST is not set -# CONFIG_RING_BUFFER_STARTUP_TEST is not set -# CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS is not set -# CONFIG_PREEMPTIRQ_DELAY_TEST is not set -# CONFIG_SYNTH_EVENT_GEN_TEST is not set -# CONFIG_KPROBE_EVENT_GEN_TEST is not set -# CONFIG_HIST_TRIGGERS_DEBUG is not set -# CONFIG_RV is not set -# CONFIG_SAMPLES is not set -CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y -CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y -CONFIG_STRICT_DEVMEM=y -# CONFIG_IO_STRICT_DEVMEM is not set - -# -# arm64 Debugging -# -CONFIG_PID_IN_CONTEXTIDR=y -# CONFIG_DEBUG_EFI is not set -# CONFIG_ARM64_RELOC_TEST is not set -CONFIG_CORESIGHT=m -CONFIG_CORESIGHT_LINKS_AND_SINKS=m -CONFIG_CORESIGHT_LINK_AND_SINK_TMC=m -CONFIG_CORESIGHT_CATU=m -CONFIG_CORESIGHT_SINK_TPIU=m -CONFIG_CORESIGHT_SINK_ETBV10=m -CONFIG_CORESIGHT_SOURCE_ETM4X=m -CONFIG_ETM4X_IMPDEF_FEATURE=y -CONFIG_CORESIGHT_STM=m -CONFIG_CORESIGHT_CPU_DEBUG=m -# CONFIG_CORESIGHT_CPU_DEBUG_DEFAULT_ON is not set -CONFIG_CORESIGHT_CTI=m -CONFIG_CORESIGHT_CTI_INTEGRATION_REGS=y -# CONFIG_CORESIGHT_TRBE is not set -# CONFIG_ULTRASOC_SMB is not set -# CONFIG_CORESIGHT_TPDM is not set -# CONFIG_CORESIGHT_TPDA is not set -# CONFIG_CORESIGHT_DUMMY is not set -# end of arm64 Debugging - -# -# Kernel Testing and Coverage -# -# CONFIG_KUNIT is not set -# CONFIG_NOTIFIER_ERROR_INJECTION is not set -CONFIG_FUNCTION_ERROR_INJECTION=y -# CONFIG_FAULT_INJECTION is not set -CONFIG_ARCH_HAS_KCOV=y -CONFIG_CC_HAS_SANCOV_TRACE_PC=y -# CONFIG_KCOV is not set -CONFIG_RUNTIME_TESTING_MENU=y -# CONFIG_TEST_DHRY is not set -# CONFIG_LKDTM is not set -# CONFIG_TEST_MIN_HEAP is not set -# CONFIG_TEST_DIV64 is not set -# CONFIG_BACKTRACE_SELF_TEST is not set -# CONFIG_TEST_REF_TRACKER is not set -# CONFIG_RBTREE_TEST is not set -# CONFIG_REED_SOLOMON_TEST is not set -# CONFIG_INTERVAL_TREE_TEST is not set -# CONFIG_PERCPU_TEST is not set -CONFIG_ATOMIC64_SELFTEST=y -CONFIG_ASYNC_RAID6_TEST=m -# CONFIG_TEST_HEXDUMP is not set -# CONFIG_STRING_SELFTEST is not set -# CONFIG_TEST_STRING_HELPERS is not set -CONFIG_TEST_KSTRTOX=y -# CONFIG_TEST_PRINTF is not set -# CONFIG_TEST_SCANF is not set -# CONFIG_TEST_BITMAP is not set -# CONFIG_TEST_UUID is not set -# CONFIG_TEST_XARRAY is not set -# CONFIG_TEST_MAPLE_TREE is not set -# CONFIG_TEST_RHASHTABLE is not set -# CONFIG_TEST_IDA is not set -# CONFIG_TEST_PARMAN is not set -# CONFIG_TEST_LKM is not set -# CONFIG_TEST_BITOPS is not set -# CONFIG_TEST_VMALLOC is not set -# CONFIG_TEST_USER_COPY is not set -CONFIG_TEST_BPF=m -# CONFIG_TEST_BLACKHOLE_DEV is not set -# CONFIG_FIND_BIT_BENCHMARK is not set -# CONFIG_TEST_FIRMWARE is not set -# CONFIG_TEST_SYSCTL is not set -# CONFIG_TEST_UDELAY is not set -# CONFIG_TEST_STATIC_KEYS is not set -# CONFIG_TEST_DYNAMIC_DEBUG is not set -# CONFIG_TEST_KMOD is not set -# CONFIG_TEST_MEMCAT_P is not set -# CONFIG_TEST_OBJAGG is not set -# CONFIG_TEST_MEMINIT is not set -# CONFIG_TEST_FREE_PAGES is not set -CONFIG_ARCH_USE_MEMTEST=y -# CONFIG_MEMTEST is not set -# end of Kernel Testing and Coverage - -# -# Rust hacking -# -# end of Rust hacking -# end of Kernel hacking diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig deleted file mode 100644 index 7d728ee1318a..000000000000 --- a/arch/x86/configs/anolis-debug_defconfig +++ /dev/null @@ -1,8148 +0,0 @@ -# -# Automatically generated file; DO NOT EDIT. -# Linux/x86 6.6.25 Kernel Configuration -# -CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" -CONFIG_CC_IS_GCC=y -CONFIG_GCC_VERSION=200000 -CONFIG_CLANG_VERSION=0 -CONFIG_AS_IS_GNU=y -CONFIG_AS_VERSION=25000 -CONFIG_LD_IS_BFD=y -CONFIG_LD_VERSION=25000 -CONFIG_LLD_VERSION=0 -CONFIG_CC_CAN_LINK=y -CONFIG_CC_CAN_LINK_STATIC=y -CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y -CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y -CONFIG_TOOLS_SUPPORT_RELR=y -CONFIG_CC_HAS_ASM_INLINE=y -CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y -CONFIG_PAHOLE_VERSION=117 -CONFIG_CONSTRUCTORS=y -CONFIG_IRQ_WORK=y -CONFIG_BUILDTIME_TABLE_SORT=y -CONFIG_THREAD_INFO_IN_TASK=y - -# -# General setup -# -CONFIG_INIT_ENV_ARG_LIMIT=32 -# CONFIG_COMPILE_TEST is not set -# CONFIG_WERROR is not set -CONFIG_LOCALVERSION="" -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_BUILD_SALT="" -CONFIG_HAVE_KERNEL_GZIP=y -CONFIG_HAVE_KERNEL_BZIP2=y -CONFIG_HAVE_KERNEL_LZMA=y -CONFIG_HAVE_KERNEL_XZ=y -CONFIG_HAVE_KERNEL_LZO=y -CONFIG_HAVE_KERNEL_LZ4=y -CONFIG_HAVE_KERNEL_ZSTD=y -# CONFIG_KERNEL_GZIP is not set -# CONFIG_KERNEL_BZIP2 is not set -# CONFIG_KERNEL_LZMA is not set -# CONFIG_KERNEL_XZ is not set -# CONFIG_KERNEL_LZO is not set -# CONFIG_KERNEL_LZ4 is not set -CONFIG_KERNEL_ZSTD=y -CONFIG_DEFAULT_INIT="" -CONFIG_DEFAULT_HOSTNAME="(none)" -CONFIG_SYSVIPC=y -CONFIG_SYSVIPC_SYSCTL=y -CONFIG_SYSVIPC_COMPAT=y -CONFIG_POSIX_MQUEUE=y -CONFIG_POSIX_MQUEUE_SYSCTL=y -# CONFIG_WATCH_QUEUE is not set -CONFIG_CROSS_MEMORY_ATTACH=y -# CONFIG_USELIB is not set -CONFIG_AUDIT=y -CONFIG_HAVE_ARCH_AUDITSYSCALL=y -CONFIG_AUDITSYSCALL=y - -# -# IRQ subsystem -# -CONFIG_GENERIC_IRQ_PROBE=y -CONFIG_GENERIC_IRQ_SHOW=y -CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y -CONFIG_GENERIC_PENDING_IRQ=y -CONFIG_GENERIC_IRQ_MIGRATION=y -CONFIG_GENERIC_IRQ_INJECTION=y -CONFIG_HARDIRQS_SW_RESEND=y -CONFIG_IRQ_DOMAIN=y -CONFIG_IRQ_DOMAIN_HIERARCHY=y -CONFIG_GENERIC_MSI_IRQ=y -CONFIG_IRQ_MSI_IOMMU=y -CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y -CONFIG_GENERIC_IRQ_RESERVATION_MODE=y -CONFIG_IRQ_FORCED_THREADING=y -CONFIG_SPARSE_IRQ=y -CONFIG_GENERIC_IRQ_DEBUGFS=y -# end of IRQ subsystem - -CONFIG_CLOCKSOURCE_WATCHDOG=y -CONFIG_ARCH_CLOCKSOURCE_INIT=y -CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y -CONFIG_GENERIC_TIME_VSYSCALL=y -CONFIG_GENERIC_CLOCKEVENTS=y -CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y -CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y -CONFIG_GENERIC_CMOS_UPDATE=y -CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y -CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y -CONFIG_CONTEXT_TRACKING=y -CONFIG_CONTEXT_TRACKING_IDLE=y - -# -# Timers subsystem -# -CONFIG_TICK_ONESHOT=y -CONFIG_NO_HZ_COMMON=y -# CONFIG_HZ_PERIODIC is not set -# CONFIG_NO_HZ_IDLE is not set -CONFIG_NO_HZ_FULL=y -CONFIG_CONTEXT_TRACKING_USER=y -# CONFIG_CONTEXT_TRACKING_USER_FORCE is not set -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US=125 -# end of Timers subsystem - -CONFIG_BPF=y -CONFIG_HAVE_EBPF_JIT=y -CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y - -# -# BPF subsystem -# -CONFIG_BPF_SYSCALL=y -CONFIG_BPF_JIT=y -CONFIG_BPF_JIT_ALWAYS_ON=y -CONFIG_BPF_JIT_DEFAULT_ON=y -CONFIG_BPF_UNPRIV_DEFAULT_OFF=y -# CONFIG_BPF_PRELOAD is not set -CONFIG_BPF_LSM=y -# end of BPF subsystem - -CONFIG_PREEMPT_BUILD=y -CONFIG_PREEMPT_NONE=y -# CONFIG_PREEMPT_VOLUNTARY is not set -# CONFIG_PREEMPT is not set -CONFIG_PREEMPT_COUNT=y -CONFIG_PREEMPTION=y -CONFIG_PREEMPT_DYNAMIC=y -CONFIG_SCHED_CORE=y - -# -# CPU/Task time and stats accounting -# -CONFIG_VIRT_CPU_ACCOUNTING=y -CONFIG_VIRT_CPU_ACCOUNTING_GEN=y -CONFIG_IRQ_TIME_ACCOUNTING=y -CONFIG_HAVE_SCHED_AVG_IRQ=y -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_TASKSTATS=y -CONFIG_TASK_DELAY_ACCT=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y -CONFIG_PSI=y -CONFIG_PSI_DEFAULT_DISABLED=y -# end of CPU/Task time and stats accounting - -CONFIG_CPU_ISOLATION=y - -# -# RCU Subsystem -# -CONFIG_TREE_RCU=y -CONFIG_PREEMPT_RCU=y -# CONFIG_RCU_EXPERT is not set -CONFIG_TREE_SRCU=y -CONFIG_TASKS_RCU_GENERIC=y -CONFIG_TASKS_RCU=y -CONFIG_TASKS_RUDE_RCU=y -CONFIG_TASKS_TRACE_RCU=y -CONFIG_RCU_STALL_COMMON=y -CONFIG_RCU_NEED_SEGCBLIST=y -CONFIG_RCU_NOCB_CPU=y -# CONFIG_RCU_NOCB_CPU_DEFAULT_ALL is not set -# CONFIG_RCU_LAZY is not set -# end of RCU Subsystem - -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -# CONFIG_IKHEADERS is not set -CONFIG_LOG_BUF_SHIFT=21 -CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 -# CONFIG_PRINTK_INDEX is not set -CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y - -# -# Scheduler features -# -# CONFIG_UCLAMP_TASK is not set -# end of Scheduler features - -CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y -CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y -CONFIG_CC_HAS_INT128=y -CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" -CONFIG_GCC10_NO_ARRAY_BOUNDS=y -CONFIG_CC_NO_ARRAY_BOUNDS=y -CONFIG_ARCH_SUPPORTS_INT128=y -CONFIG_NUMA_BALANCING=y -CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y -CONFIG_CGROUPS=y -CONFIG_PAGE_COUNTER=y -# CONFIG_CGROUP_FAVOR_DYNMODS is not set -CONFIG_MEMCG=y -CONFIG_MEMCG_KMEM=y -CONFIG_BLK_CGROUP=y -CONFIG_CGROUP_WRITEBACK=y -CONFIG_CGROUP_SCHED=y -CONFIG_FAIR_GROUP_SCHED=y -CONFIG_CFS_BANDWIDTH=y -CONFIG_RT_GROUP_SCHED=y -CONFIG_SCHED_MM_CID=y -CONFIG_CGROUP_PIDS=y -CONFIG_CGROUP_RDMA=y -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_HUGETLB=y -CONFIG_CPUSETS=y -CONFIG_PROC_PID_CPUSET=y -CONFIG_CGROUP_DEVICE=y -CONFIG_SCHED_SLI=y -CONFIG_RICH_CONTAINER=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_CGROUP_PERF=y -CONFIG_CGROUP_BPF=y -# CONFIG_CGROUP_MISC is not set -CONFIG_CGROUP_DEBUG=y -CONFIG_SOCK_CGROUP_DATA=y -CONFIG_NAMESPACES=y -CONFIG_UTS_NS=y -CONFIG_TIME_NS=y -CONFIG_IPC_NS=y -CONFIG_USER_NS=y -CONFIG_PID_NS=y -CONFIG_NET_NS=y -CONFIG_CHECKPOINT_RESTORE=y -CONFIG_SCHED_AUTOGROUP=y -CONFIG_RELAY=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" -CONFIG_RD_GZIP=y -CONFIG_RD_BZIP2=y -CONFIG_RD_LZMA=y -CONFIG_RD_XZ=y -CONFIG_RD_LZO=y -CONFIG_RD_LZ4=y -CONFIG_RD_ZSTD=y -# CONFIG_BOOT_CONFIG is not set -CONFIG_INITRAMFS_PRESERVE_MTIME=y -CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_LD_ORPHAN_WARN=y -CONFIG_LD_ORPHAN_WARN_LEVEL="warn" -CONFIG_SYSCTL=y -CONFIG_HAVE_UID16=y -CONFIG_SYSCTL_EXCEPTION_TRACE=y -CONFIG_HAVE_PCSPKR_PLATFORM=y -# CONFIG_EXPERT is not set -CONFIG_UID16=y -CONFIG_MULTIUSER=y -CONFIG_SGETMASK_SYSCALL=y -CONFIG_SYSFS_SYSCALL=y -CONFIG_FHANDLE=y -CONFIG_POSIX_TIMERS=y -CONFIG_PRINTK=y -CONFIG_BUG=y -CONFIG_ELF_CORE=y -CONFIG_PCSPKR_PLATFORM=y -CONFIG_BASE_FULL=y -CONFIG_FUTEX=y -CONFIG_FUTEX_PI=y -CONFIG_EPOLL=y -CONFIG_SIGNALFD=y -CONFIG_TIMERFD=y -CONFIG_EVENTFD=y -CONFIG_SHMEM=y -CONFIG_AIO=y -CONFIG_IO_URING=y -CONFIG_ADVISE_SYSCALLS=y -CONFIG_MEMBARRIER=y -CONFIG_KALLSYMS=y -# CONFIG_KALLSYMS_SELFTEST is not set -CONFIG_KALLSYMS_ALL=y -CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y -CONFIG_KALLSYMS_BASE_RELATIVE=y -CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y -CONFIG_KCMP=y -CONFIG_RSEQ=y -CONFIG_CACHESTAT_SYSCALL=y -CONFIG_HAVE_PERF_EVENTS=y -CONFIG_GUEST_PERF_EVENTS=y - -# -# Kernel Performance Events And Counters -# -CONFIG_PERF_EVENTS=y -# CONFIG_DEBUG_PERF_USE_VMALLOC is not set -# end of Kernel Performance Events And Counters - -CONFIG_SYSTEM_DATA_VERIFICATION=y -CONFIG_PROFILING=y -CONFIG_TRACEPOINTS=y - -# -# Kexec and crash features -# -CONFIG_CRASH_CORE=y -CONFIG_KEXEC_CORE=y -CONFIG_HAVE_IMA_KEXEC=y -CONFIG_KEXEC=y -CONFIG_KEXEC_FILE=y -CONFIG_KEXEC_SIG=y -# CONFIG_KEXEC_SIG_FORCE is not set -CONFIG_KEXEC_BZIMAGE_VERIFY_SIG=y -CONFIG_KEXEC_JUMP=y -CONFIG_CRASH_DUMP=y -CONFIG_CRASH_HOTPLUG=y -CONFIG_CRASH_MAX_MEMORY_RANGES=8192 -# end of Kexec and crash features -# end of General setup - -CONFIG_64BIT=y -CONFIG_X86_64=y -CONFIG_X86=y -CONFIG_INSTRUCTION_DECODER=y -CONFIG_OUTPUT_FORMAT="elf64-x86-64" -CONFIG_LOCKDEP_SUPPORT=y -CONFIG_STACKTRACE_SUPPORT=y -CONFIG_MMU=y -CONFIG_ARCH_MMAP_RND_BITS_MIN=28 -CONFIG_ARCH_MMAP_RND_BITS_MAX=32 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 -CONFIG_GENERIC_ISA_DMA=y -CONFIG_GENERIC_CSUM=y -CONFIG_GENERIC_BUG=y -CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y -CONFIG_ARCH_MAY_HAVE_PC_FDC=y -CONFIG_GENERIC_CALIBRATE_DELAY=y -CONFIG_ARCH_HAS_CPU_RELAX=y -CONFIG_ARCH_HIBERNATION_POSSIBLE=y -CONFIG_ARCH_SUSPEND_POSSIBLE=y -CONFIG_AUDIT_ARCH=y -CONFIG_KASAN_SHADOW_OFFSET=0xdffffc0000000000 -CONFIG_HAVE_INTEL_TXT=y -CONFIG_X86_64_SMP=y -CONFIG_ARCH_SUPPORTS_UPROBES=y -CONFIG_FIX_EARLYCON_MEM=y -CONFIG_DYNAMIC_PHYSICAL_MASK=y -CONFIG_PGTABLE_LEVELS=4 -CONFIG_CC_HAS_SANE_STACKPROTECTOR=y - -# -# Processor type and features -# -CONFIG_SMP=y -CONFIG_X86_X2APIC=y -CONFIG_X86_MPPARSE=y -# CONFIG_GOLDFISH is not set -CONFIG_X86_CPU_RESCTRL=y -CONFIG_X86_EXTENDED_PLATFORM=y -# CONFIG_X86_NUMACHIP is not set -# CONFIG_X86_VSMP is not set -CONFIG_X86_UV=y -# CONFIG_X86_GOLDFISH is not set -# CONFIG_X86_INTEL_MID is not set -CONFIG_X86_INTEL_LPSS=y -CONFIG_X86_AMD_PLATFORM_DEVICE=y -CONFIG_IOSF_MBI=y -# CONFIG_IOSF_MBI_DEBUG is not set -CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y -CONFIG_SCHED_OMIT_FRAME_POINTER=y -CONFIG_HYPERVISOR_GUEST=y -CONFIG_PARAVIRT=y -# CONFIG_PARAVIRT_DEBUG is not set -CONFIG_PARAVIRT_SPINLOCKS=y -CONFIG_X86_HV_CALLBACK_VECTOR=y -CONFIG_XEN=y -# CONFIG_XEN_PV is not set -CONFIG_XEN_PVHVM=y -CONFIG_XEN_PVHVM_SMP=y -CONFIG_XEN_PVHVM_GUEST=y -CONFIG_XEN_SAVE_RESTORE=y -# CONFIG_XEN_DEBUG_FS is not set -# CONFIG_XEN_PVH is not set -CONFIG_KVM_GUEST=y -CONFIG_ARCH_CPUIDLE_HALTPOLL=y -# CONFIG_PVH is not set -CONFIG_PARAVIRT_TIME_ACCOUNTING=y -CONFIG_PARAVIRT_CLOCK=y -# CONFIG_JAILHOUSE_GUEST is not set -# CONFIG_ACRN_GUEST is not set -CONFIG_INTEL_TDX_GUEST=y -# CONFIG_MK8 is not set -# CONFIG_MPSC is not set -# CONFIG_MCORE2 is not set -# CONFIG_MATOM is not set -CONFIG_GENERIC_CPU=y -CONFIG_X86_INTERNODE_CACHE_SHIFT=6 -CONFIG_X86_L1_CACHE_SHIFT=6 -CONFIG_X86_TSC=y -CONFIG_X86_CMPXCHG64=y -CONFIG_X86_CMOV=y -CONFIG_X86_MINIMUM_CPU_FAMILY=64 -CONFIG_X86_DEBUGCTLMSR=y -CONFIG_IA32_FEAT_CTL=y -CONFIG_X86_VMX_FEATURE_NAMES=y -CONFIG_CPU_SUP_INTEL=y -CONFIG_CPU_SUP_AMD=y -CONFIG_CPU_SUP_HYGON=y -CONFIG_CPU_SUP_CENTAUR=y -CONFIG_CPU_SUP_ZHAOXIN=y -CONFIG_HPET_TIMER=y -CONFIG_HPET_EMULATE_RTC=y -CONFIG_DMI=y -# CONFIG_GART_IOMMU is not set -CONFIG_BOOT_VESA_SUPPORT=y -# CONFIG_MAXSMP is not set -CONFIG_NR_CPUS_RANGE_BEGIN=2 -CONFIG_NR_CPUS_RANGE_END=8192 -CONFIG_NR_CPUS_DEFAULT=64 -CONFIG_NR_CPUS=1024 -CONFIG_SCHED_CLUSTER=y -CONFIG_SCHED_SMT=y -CONFIG_SCHED_MC=y -CONFIG_SCHED_MC_PRIO=y -CONFIG_X86_LOCAL_APIC=y -CONFIG_X86_IO_APIC=y -CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y -CONFIG_X86_MCE=y -CONFIG_X86_MCELOG_LEGACY=y -CONFIG_X86_MCE_INTEL=y -CONFIG_X86_MCE_AMD=y -CONFIG_X86_MCE_THRESHOLD=y -CONFIG_X86_MCE_INJECT=m - -# -# Performance monitoring -# -CONFIG_PERF_EVENTS_INTEL_UNCORE=m -CONFIG_PERF_EVENTS_INTEL_RAPL=m -CONFIG_PERF_EVENTS_INTEL_CSTATE=m -CONFIG_PERF_EVENTS_AMD_POWER=m -CONFIG_PERF_EVENTS_AMD_UNCORE=y -CONFIG_PERF_EVENTS_AMD_BRS=y -# end of Performance monitoring - -CONFIG_X86_16BIT=y -CONFIG_X86_ESPFIX64=y -CONFIG_X86_VSYSCALL_EMULATION=y -CONFIG_X86_IOPL_IOPERM=y -CONFIG_MICROCODE=y -# CONFIG_MICROCODE_LATE_LOADING is not set -CONFIG_X86_MSR=y -CONFIG_X86_CPUID=y -# CONFIG_X86_5LEVEL is not set -CONFIG_X86_DIRECT_GBPAGES=y -CONFIG_X86_CPA_STATISTICS=y -CONFIG_X86_MEM_ENCRYPT=y -CONFIG_AMD_MEM_ENCRYPT=y -CONFIG_NUMA=y -CONFIG_AMD_NUMA=y -CONFIG_X86_64_ACPI_NUMA=y -CONFIG_NUMA_EMU=y -CONFIG_NODES_SHIFT=6 -CONFIG_ARCH_SPARSEMEM_ENABLE=y -CONFIG_ARCH_SPARSEMEM_DEFAULT=y -# CONFIG_ARCH_MEMORY_PROBE is not set -CONFIG_ARCH_PROC_KCORE_TEXT=y -CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 -CONFIG_X86_PMEM_LEGACY_DEVICE=y -CONFIG_X86_PMEM_LEGACY=m -CONFIG_X86_CHECK_BIOS_CORRUPTION=y -CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y -CONFIG_MTRR=y -CONFIG_MTRR_SANITIZER=y -CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1 -CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 -CONFIG_X86_PAT=y -CONFIG_ARCH_USES_PG_UNCACHED=y -CONFIG_X86_UMIP=y -CONFIG_CC_HAS_IBT=y -CONFIG_X86_CET=y -CONFIG_X86_KERNEL_IBT=y -CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y -# CONFIG_X86_INTEL_TSX_MODE_OFF is not set -# CONFIG_X86_INTEL_TSX_MODE_ON is not set -CONFIG_X86_INTEL_TSX_MODE_AUTO=y -CONFIG_X86_SGX=y -# CONFIG_X86_USER_SHADOW_STACK is not set -CONFIG_EFI=y -CONFIG_EFI_STUB=y -CONFIG_EFI_HANDOVER_PROTOCOL=y -CONFIG_EFI_MIXED=y -# CONFIG_EFI_FAKE_MEMMAP is not set -CONFIG_EFI_RUNTIME_MAP=y -CONFIG_HYGON_CSV=y -# CONFIG_HZ_100 is not set -# CONFIG_HZ_250 is not set -# CONFIG_HZ_300 is not set -CONFIG_HZ_1000=y -CONFIG_HZ=1000 -CONFIG_SCHED_HRTICK=y -CONFIG_ARCH_SUPPORTS_KEXEC=y -CONFIG_ARCH_SUPPORTS_KEXEC_FILE=y -CONFIG_ARCH_SELECTS_KEXEC_FILE=y -CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY=y -CONFIG_ARCH_SUPPORTS_KEXEC_SIG=y -CONFIG_ARCH_SUPPORTS_KEXEC_SIG_FORCE=y -CONFIG_ARCH_SUPPORTS_KEXEC_BZIMAGE_VERIFY_SIG=y -CONFIG_ARCH_SUPPORTS_KEXEC_JUMP=y -CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y -CONFIG_ARCH_SUPPORTS_CRASH_HOTPLUG=y -CONFIG_PHYSICAL_START=0x1000000 -CONFIG_RELOCATABLE=y -CONFIG_RANDOMIZE_BASE=y -CONFIG_X86_NEED_RELOCS=y -CONFIG_PHYSICAL_ALIGN=0x1000000 -CONFIG_DYNAMIC_MEMORY_LAYOUT=y -CONFIG_RANDOMIZE_MEMORY=y -CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0xa -# CONFIG_ADDRESS_MASKING is not set -CONFIG_HOTPLUG_CPU=y -# CONFIG_COMPAT_VDSO is not set -CONFIG_LEGACY_VSYSCALL_XONLY=y -# CONFIG_LEGACY_VSYSCALL_NONE is not set -# CONFIG_CMDLINE_BOOL is not set -CONFIG_MODIFY_LDT_SYSCALL=y -# CONFIG_STRICT_SIGALTSTACK_SIZE is not set -CONFIG_HAVE_LIVEPATCH=y -CONFIG_LIVEPATCH=y -# end of Processor type and features - -CONFIG_CC_HAS_SLS=y -CONFIG_CC_HAS_RETURN_THUNK=y -CONFIG_CC_HAS_ENTRY_PADDING=y -CONFIG_FUNCTION_PADDING_CFI=11 -CONFIG_FUNCTION_PADDING_BYTES=16 -CONFIG_CALL_PADDING=y -CONFIG_HAVE_CALL_THUNKS=y -CONFIG_CALL_THUNKS=y -CONFIG_PREFIX_SYMBOLS=y -CONFIG_SPECULATION_MITIGATIONS=y -CONFIG_PAGE_TABLE_ISOLATION=y -CONFIG_RETPOLINE=y -CONFIG_RETHUNK=y -CONFIG_CPU_UNRET_ENTRY=y -CONFIG_CALL_DEPTH_TRACKING=y -# CONFIG_CALL_THUNKS_DEBUG is not set -CONFIG_CPU_IBPB_ENTRY=y -CONFIG_CPU_IBRS_ENTRY=y -CONFIG_CPU_SRSO=y -# CONFIG_SLS is not set -# CONFIG_GDS_FORCE_MITIGATION is not set -CONFIG_MITIGATION_RFDS=y -CONFIG_ARCH_HAS_ADD_PAGES=y - -# -# Power management and ACPI options -# -CONFIG_ARCH_HIBERNATION_HEADER=y -CONFIG_SUSPEND=y -CONFIG_SUSPEND_FREEZER=y -CONFIG_HIBERNATE_CALLBACKS=y -CONFIG_HIBERNATION=y -CONFIG_HIBERNATION_SNAPSHOT_DEV=y -CONFIG_PM_STD_PARTITION="" -CONFIG_PM_SLEEP=y -CONFIG_PM_SLEEP_SMP=y -# CONFIG_PM_AUTOSLEEP is not set -# CONFIG_PM_USERSPACE_AUTOSLEEP is not set -# CONFIG_PM_WAKELOCKS is not set -CONFIG_PM=y -CONFIG_PM_DEBUG=y -CONFIG_PM_ADVANCED_DEBUG=y -# CONFIG_PM_TEST_SUSPEND is not set -CONFIG_PM_SLEEP_DEBUG=y -CONFIG_PM_TRACE=y -CONFIG_PM_TRACE_RTC=y -CONFIG_PM_CLK=y -# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set -# CONFIG_ENERGY_MODEL is not set -CONFIG_ARCH_SUPPORTS_ACPI=y -CONFIG_ACPI=y -CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y -CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y -CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y -CONFIG_ACPI_TABLE_LIB=y -CONFIG_ACPI_DEBUGGER=y -CONFIG_ACPI_DEBUGGER_USER=m -CONFIG_ACPI_SPCR_TABLE=y -# CONFIG_ACPI_FPDT is not set -CONFIG_ACPI_LPIT=y -CONFIG_ACPI_SLEEP=y -CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y -CONFIG_ACPI_EC_DEBUGFS=m -CONFIG_ACPI_AC=y -CONFIG_ACPI_BATTERY=y -CONFIG_ACPI_BUTTON=y -CONFIG_ACPI_VIDEO=m -CONFIG_ACPI_FAN=y -CONFIG_ACPI_TAD=m -CONFIG_ACPI_DOCK=y -CONFIG_ACPI_CPU_FREQ_PSS=y -CONFIG_ACPI_PROCESSOR_CSTATE=y -CONFIG_ACPI_PROCESSOR_IDLE=y -CONFIG_ACPI_CPPC_LIB=y -CONFIG_ACPI_PROCESSOR=y -CONFIG_ACPI_IPMI=m -CONFIG_ACPI_HOTPLUG_CPU=y -CONFIG_ACPI_PROCESSOR_AGGREGATOR=m -CONFIG_ACPI_THERMAL=y -CONFIG_ACPI_PLATFORM_PROFILE=m -CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y -CONFIG_ACPI_TABLE_UPGRADE=y -CONFIG_ACPI_DEBUG=y -CONFIG_ACPI_PCI_SLOT=y -CONFIG_ACPI_CONTAINER=y -CONFIG_ACPI_HOTPLUG_MEMORY=y -CONFIG_ACPI_HOTPLUG_IOAPIC=y -CONFIG_ACPI_SBS=m -CONFIG_ACPI_HED=y -CONFIG_ACPI_CUSTOM_METHOD=m -CONFIG_ACPI_BGRT=y -CONFIG_ACPI_NFIT=m -# CONFIG_NFIT_SECURITY_DEBUG is not set -CONFIG_ACPI_NUMA=y -CONFIG_ACPI_HMAT=y -CONFIG_HAVE_ACPI_APEI=y -CONFIG_HAVE_ACPI_APEI_NMI=y -CONFIG_ACPI_APEI=y -CONFIG_ACPI_APEI_GHES=y -CONFIG_ACPI_APEI_PCIEAER=y -CONFIG_ACPI_APEI_MEMORY_FAILURE=y -CONFIG_ACPI_APEI_EINJ=m -CONFIG_ACPI_APEI_ERST_DEBUG=m -# CONFIG_ACPI_DPTF is not set -CONFIG_ACPI_WATCHDOG=y -CONFIG_ACPI_EXTLOG=m -CONFIG_ACPI_ADXL=y -CONFIG_ACPI_CONFIGFS=m -# CONFIG_ACPI_PFRUT is not set -CONFIG_ACPI_PCC=y -# CONFIG_ACPI_FFH is not set -CONFIG_PMIC_OPREGION=y -CONFIG_ACPI_PRMT=y -CONFIG_X86_PM_TIMER=y - -# -# CPU Frequency scaling -# -CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_GOV_ATTR_SET=y -CONFIG_CPU_FREQ_GOV_COMMON=y -CONFIG_CPU_FREQ_STAT=y -CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y -# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set -CONFIG_CPU_FREQ_GOV_PERFORMANCE=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=y -CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_GOV_ONDEMAND=y -CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y -CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y - -# -# CPU frequency scaling drivers -# -CONFIG_X86_INTEL_PSTATE=y -# CONFIG_X86_PCC_CPUFREQ is not set -CONFIG_X86_AMD_PSTATE=y -CONFIG_X86_AMD_PSTATE_DEFAULT_MODE=3 -# CONFIG_X86_AMD_PSTATE_UT is not set -CONFIG_X86_ACPI_CPUFREQ=m -CONFIG_X86_ACPI_CPUFREQ_CPB=y -CONFIG_X86_POWERNOW_K8=m -CONFIG_X86_AMD_FREQ_SENSITIVITY=m -# CONFIG_X86_SPEEDSTEP_CENTRINO is not set -CONFIG_X86_P4_CLOCKMOD=m - -# -# shared options -# -CONFIG_X86_SPEEDSTEP_LIB=m -# end of CPU Frequency scaling - -# -# CPU Idle -# -CONFIG_CPU_IDLE=y -# CONFIG_CPU_IDLE_GOV_LADDER is not set -CONFIG_CPU_IDLE_GOV_MENU=y -# CONFIG_CPU_IDLE_GOV_TEO is not set -CONFIG_CPU_IDLE_GOV_HALTPOLL=y -CONFIG_HALTPOLL_CPUIDLE=y -# end of CPU Idle - -CONFIG_INTEL_IDLE=y -# end of Power management and ACPI options - -# -# Bus options (PCI etc.) -# -CONFIG_PCI_DIRECT=y -CONFIG_PCI_MMCONFIG=y -CONFIG_PCI_XEN=y -CONFIG_MMCONF_FAM10H=y -CONFIG_ISA_DMA_API=y -CONFIG_AMD_NB=y -# end of Bus options (PCI etc.) - -# -# Binary Emulations -# -CONFIG_IA32_EMULATION=y -# CONFIG_X86_X32_ABI is not set -CONFIG_COMPAT_32=y -CONFIG_COMPAT=y -CONFIG_COMPAT_FOR_U64_ALIGNMENT=y -# end of Binary Emulations - -CONFIG_HAVE_KVM=y -CONFIG_HAVE_KVM_PFNCACHE=y -CONFIG_HAVE_KVM_IRQCHIP=y -CONFIG_HAVE_KVM_IRQFD=y -CONFIG_HAVE_KVM_IRQ_ROUTING=y -CONFIG_HAVE_KVM_DIRTY_RING=y -CONFIG_HAVE_KVM_DIRTY_RING_TSO=y -CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL=y -CONFIG_HAVE_KVM_EVENTFD=y -CONFIG_KVM_MMIO=y -CONFIG_KVM_ASYNC_PF=y -CONFIG_HAVE_KVM_MSI=y -CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y -CONFIG_KVM_VFIO=y -CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y -CONFIG_KVM_COMPAT=y -CONFIG_HAVE_KVM_IRQ_BYPASS=y -CONFIG_HAVE_KVM_NO_POLL=y -CONFIG_KVM_XFER_TO_GUEST_WORK=y -CONFIG_HAVE_KVM_PM_NOTIFIER=y -CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y -CONFIG_VIRTUALIZATION=y -CONFIG_KVM=m -CONFIG_KVM_INTEL=m -CONFIG_X86_SGX_KVM=y -CONFIG_KVM_AMD=m -CONFIG_KVM_AMD_SEV=y -CONFIG_KVM_SMM=y -# CONFIG_KVM_XEN is not set -CONFIG_KVM_EXTERNAL_WRITE_TRACKING=y -CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID=y -CONFIG_AS_AVX512=y -CONFIG_AS_SHA1_NI=y -CONFIG_AS_SHA256_NI=y -CONFIG_AS_TPAUSE=y -CONFIG_AS_GFNI=y -CONFIG_AS_WRUSS=y - -# -# General architecture-dependent options -# -CONFIG_HOTPLUG_SMT=y -CONFIG_HOTPLUG_CORE_SYNC=y -CONFIG_HOTPLUG_CORE_SYNC_DEAD=y -CONFIG_HOTPLUG_CORE_SYNC_FULL=y -CONFIG_HOTPLUG_SPLIT_STARTUP=y -CONFIG_HOTPLUG_PARALLEL=y -CONFIG_GENERIC_ENTRY=y -CONFIG_KPROBES=y -CONFIG_JUMP_LABEL=y -# CONFIG_STATIC_KEYS_SELFTEST is not set -# CONFIG_STATIC_CALL_SELFTEST is not set -CONFIG_OPTPROBES=y -CONFIG_KPROBES_ON_FTRACE=y -CONFIG_UPROBES=y -CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y -CONFIG_ARCH_USE_BUILTIN_BSWAP=y -CONFIG_KRETPROBES=y -CONFIG_KRETPROBE_ON_RETHOOK=y -CONFIG_USER_RETURN_NOTIFIER=y -CONFIG_HAVE_IOREMAP_PROT=y -CONFIG_HAVE_KPROBES=y -CONFIG_HAVE_KRETPROBES=y -CONFIG_HAVE_OPTPROBES=y -CONFIG_HAVE_KPROBES_ON_FTRACE=y -CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE=y -CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y -CONFIG_HAVE_NMI=y -CONFIG_TRACE_IRQFLAGS_SUPPORT=y -CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y -CONFIG_HAVE_ARCH_TRACEHOOK=y -CONFIG_HAVE_DMA_CONTIGUOUS=y -CONFIG_GENERIC_SMP_IDLE_THREAD=y -CONFIG_ARCH_HAS_FORTIFY_SOURCE=y -CONFIG_ARCH_HAS_SET_MEMORY=y -CONFIG_ARCH_HAS_SET_DIRECT_MAP=y -CONFIG_ARCH_HAS_CPU_FINALIZE_INIT=y -CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y -CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y -CONFIG_ARCH_WANTS_NO_INSTR=y -CONFIG_HAVE_ASM_MODVERSIONS=y -CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y -CONFIG_HAVE_RSEQ=y -CONFIG_HAVE_RUST=y -CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y -CONFIG_HAVE_HW_BREAKPOINT=y -CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y -CONFIG_HAVE_USER_RETURN_NOTIFIER=y -CONFIG_HAVE_PERF_EVENTS_NMI=y -CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y -CONFIG_HAVE_PERF_REGS=y -CONFIG_HAVE_PERF_USER_STACK_DUMP=y -CONFIG_HAVE_ARCH_JUMP_LABEL=y -CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y -CONFIG_MMU_GATHER_TABLE_FREE=y -CONFIG_MMU_GATHER_RCU_TABLE_FREE=y -CONFIG_MMU_GATHER_MERGE_VMAS=y -CONFIG_MMU_LAZY_TLB_REFCOUNT=y -CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y -CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS=y -CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y -CONFIG_HAVE_CMPXCHG_LOCAL=y -CONFIG_HAVE_CMPXCHG_DOUBLE=y -CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y -CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y -CONFIG_HAVE_ARCH_SECCOMP=y -CONFIG_HAVE_ARCH_SECCOMP_FILTER=y -CONFIG_SECCOMP=y -CONFIG_SECCOMP_FILTER=y -# CONFIG_SECCOMP_CACHE_DEBUG is not set -CONFIG_HAVE_ARCH_STACKLEAK=y -CONFIG_HAVE_STACKPROTECTOR=y -CONFIG_STACKPROTECTOR=y -CONFIG_STACKPROTECTOR_STRONG=y -CONFIG_ARCH_SUPPORTS_LTO_CLANG=y -CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y -CONFIG_LTO_NONE=y -CONFIG_ARCH_SUPPORTS_CFI_CLANG=y -# CONFIG_CFI_CLANG is not set -CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y -CONFIG_HAVE_CONTEXT_TRACKING_USER=y -CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK=y -CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y -CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y -CONFIG_HAVE_MOVE_PUD=y -CONFIG_HAVE_MOVE_PMD=y -CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y -CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y -CONFIG_HAVE_ARCH_HUGE_VMAP=y -CONFIG_HAVE_ARCH_HUGE_VMALLOC=y -CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y -CONFIG_ARCH_WANT_PMD_MKWRITE=y -CONFIG_HAVE_ARCH_SOFT_DIRTY=y -CONFIG_HAVE_MOD_ARCH_SPECIFIC=y -CONFIG_MODULES_USE_ELF_RELA=y -CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y -CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK=y -CONFIG_SOFTIRQ_ON_OWN_STACK=y -CONFIG_ARCH_HAS_ELF_RANDOMIZE=y -CONFIG_HAVE_ARCH_MMAP_RND_BITS=y -CONFIG_HAVE_EXIT_THREAD=y -CONFIG_ARCH_MMAP_RND_BITS=28 -CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y -CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 -CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y -CONFIG_PAGE_SIZE_LESS_THAN_64KB=y -CONFIG_PAGE_SIZE_LESS_THAN_256KB=y -CONFIG_HAVE_OBJTOOL=y -CONFIG_HAVE_JUMP_LABEL_HACK=y -CONFIG_HAVE_NOINSTR_HACK=y -CONFIG_HAVE_NOINSTR_VALIDATION=y -CONFIG_HAVE_UACCESS_VALIDATION=y -CONFIG_HAVE_STACK_VALIDATION=y -CONFIG_HAVE_RELIABLE_STACKTRACE=y -CONFIG_OLD_SIGSUSPEND3=y -CONFIG_COMPAT_OLD_SIGACTION=y -CONFIG_COMPAT_32BIT_TIME=y -CONFIG_HAVE_ARCH_VMAP_STACK=y -CONFIG_VMAP_STACK=y -CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET=y -CONFIG_RANDOMIZE_KSTACK_OFFSET=y -# CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT is not set -CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y -CONFIG_STRICT_KERNEL_RWX=y -CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y -CONFIG_STRICT_MODULE_RWX=y -CONFIG_ARCH_HAS_CPU_RESCTRL=y -CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y -CONFIG_ARCH_USE_MEMREMAP_PROT=y -CONFIG_LOCK_EVENT_COUNTS=y -CONFIG_ARCH_HAS_MEM_ENCRYPT=y -CONFIG_ARCH_HAS_CC_PLATFORM=y -CONFIG_HAVE_STATIC_CALL=y -CONFIG_HAVE_STATIC_CALL_INLINE=y -CONFIG_HAVE_PREEMPT_DYNAMIC=y -CONFIG_HAVE_PREEMPT_DYNAMIC_CALL=y -CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y -CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y -CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK=y -CONFIG_ARCH_HAS_ELFCORE_COMPAT=y -CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH=y -CONFIG_DYNAMIC_SIGFRAME=y -CONFIG_HAVE_ARCH_NODE_DEV_GROUP=y -CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG=y - -# -# GCOV-based kernel profiling -# -# CONFIG_GCOV_KERNEL is not set -CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y -# end of GCOV-based kernel profiling - -CONFIG_HAVE_GCC_PLUGINS=y -CONFIG_GCC_PLUGINS=y -# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set -CONFIG_FUNCTION_ALIGNMENT_4B=y -CONFIG_FUNCTION_ALIGNMENT_16B=y -CONFIG_FUNCTION_ALIGNMENT=16 -# end of General architecture-dependent options - -CONFIG_RT_MUTEXES=y -CONFIG_BASE_SMALL=0 -CONFIG_MODULE_SIG_FORMAT=y -CONFIG_MODULES=y -# CONFIG_MODULE_DEBUG is not set -CONFIG_MODULE_FORCE_LOAD=y -CONFIG_MODULE_UNLOAD=y -# CONFIG_MODULE_FORCE_UNLOAD is not set -# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set -CONFIG_MODVERSIONS=y -CONFIG_ASM_MODVERSIONS=y -CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_MODULE_SIG=y -# CONFIG_MODULE_SIG_FORCE is not set -# CONFIG_MODULE_SIG_ALL is not set -# CONFIG_MODULE_SIG_SHA1 is not set -# CONFIG_MODULE_SIG_SHA224 is not set -CONFIG_MODULE_SIG_SHA256=y -# CONFIG_MODULE_SIG_SHA384 is not set -# CONFIG_MODULE_SIG_SHA512 is not set -CONFIG_MODULE_SIG_HASH="sha256" -CONFIG_MODULE_COMPRESS_NONE=y -# CONFIG_MODULE_COMPRESS_GZIP is not set -# CONFIG_MODULE_COMPRESS_XZ is not set -# CONFIG_MODULE_COMPRESS_ZSTD is not set -# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set -CONFIG_MODPROBE_PATH="/sbin/modprobe" -CONFIG_MODULES_TREE_LOOKUP=y -CONFIG_BLOCK=y -CONFIG_BLOCK_LEGACY_AUTOLOAD=y -CONFIG_BLK_RQ_ALLOC_TIME=y -CONFIG_BLK_CGROUP_RWSTAT=y -CONFIG_BLK_CGROUP_PUNT_BIO=y -CONFIG_BLK_DEV_BSG_COMMON=y -CONFIG_BLK_ICQ=y -CONFIG_BLK_DEV_BSGLIB=y -CONFIG_BLK_DEV_INTEGRITY=y -CONFIG_BLK_DEV_INTEGRITY_T10=m -CONFIG_BLK_DEV_ZONED=y -CONFIG_BLK_DEV_THROTTLING=y -# CONFIG_BLK_DEV_THROTTLING_LOW is not set -# CONFIG_BLK_WBT is not set -CONFIG_BLK_CGROUP_IOLATENCY=y -# CONFIG_BLK_CGROUP_FC_APPID is not set -CONFIG_BLK_CGROUP_IOCOST=y -# CONFIG_BLK_CGROUP_IOPRIO is not set -CONFIG_BLK_DEBUG_FS=y -CONFIG_BLK_DEBUG_FS_ZONED=y -# CONFIG_BLK_SED_OPAL is not set -# CONFIG_BLK_INLINE_ENCRYPTION is not set - -# -# Partition Types -# -CONFIG_PARTITION_ADVANCED=y -# CONFIG_ACORN_PARTITION is not set -# CONFIG_AIX_PARTITION is not set -CONFIG_OSF_PARTITION=y -CONFIG_AMIGA_PARTITION=y -# CONFIG_ATARI_PARTITION is not set -CONFIG_MAC_PARTITION=y -CONFIG_MSDOS_PARTITION=y -CONFIG_BSD_DISKLABEL=y -CONFIG_MINIX_SUBPARTITION=y -CONFIG_SOLARIS_X86_PARTITION=y -CONFIG_UNIXWARE_DISKLABEL=y -# CONFIG_LDM_PARTITION is not set -CONFIG_SGI_PARTITION=y -# CONFIG_ULTRIX_PARTITION is not set -CONFIG_SUN_PARTITION=y -CONFIG_KARMA_PARTITION=y -CONFIG_EFI_PARTITION=y -# CONFIG_SYSV68_PARTITION is not set -# CONFIG_CMDLINE_PARTITION is not set -# end of Partition Types - -CONFIG_BLK_MQ_PCI=y -CONFIG_BLK_MQ_VIRTIO=y -CONFIG_BLK_PM=y -CONFIG_BLOCK_HOLDER_DEPRECATED=y -CONFIG_BLK_MQ_STACKING=y - -# -# IO Schedulers -# -CONFIG_MQ_IOSCHED_DEADLINE=y -CONFIG_MQ_IOSCHED_KYBER=y -CONFIG_IOSCHED_BFQ=y -CONFIG_BFQ_GROUP_IOSCHED=y -CONFIG_BFQ_CGROUP_DEBUG=y -# end of IO Schedulers - -CONFIG_PREEMPT_NOTIFIERS=y -CONFIG_PADATA=y -CONFIG_ASN1=y -CONFIG_UNINLINE_SPIN_UNLOCK=y -CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y -CONFIG_MUTEX_SPIN_ON_OWNER=y -CONFIG_RWSEM_SPIN_ON_OWNER=y -CONFIG_LOCK_SPIN_ON_OWNER=y -CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y -CONFIG_QUEUED_SPINLOCKS=y -CONFIG_ARCH_USE_QUEUED_RWLOCKS=y -CONFIG_QUEUED_RWLOCKS=y -CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y -CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y -CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y -CONFIG_CK_KABI_RESERVE=y -CONFIG_CK_KABI_SIZE_ALIGN_CHECKS=y -CONFIG_FREEZER=y - -# -# Executable file formats -# -CONFIG_BINFMT_ELF=y -CONFIG_COMPAT_BINFMT_ELF=y -CONFIG_ELFCORE=y -CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y -CONFIG_BINFMT_SCRIPT=y -CONFIG_BINFMT_MISC=m -CONFIG_COREDUMP=y -# end of Executable file formats - -# -# Memory Management options -# -CONFIG_ZPOOL=y -CONFIG_SWAP=y -CONFIG_ZSWAP=y -# CONFIG_ZSWAP_DEFAULT_ON is not set -# CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON is not set -# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set -CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y -# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set -# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 is not set -# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set -# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set -CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lzo" -CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y -# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set -# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set -CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud" -CONFIG_ZBUD=y -# CONFIG_Z3FOLD is not set -CONFIG_ZSMALLOC=y -CONFIG_ZSMALLOC_STAT=y -CONFIG_ZSMALLOC_CHAIN_SIZE=8 - -# -# SLAB allocator options -# -# CONFIG_SLAB_DEPRECATED is not set -CONFIG_SLUB=y -# CONFIG_SLAB_MERGE_DEFAULT is not set -CONFIG_SLAB_FREELIST_RANDOM=y -# CONFIG_SLAB_FREELIST_HARDENED is not set -# CONFIG_SLUB_STATS is not set -CONFIG_SLUB_CPU_PARTIAL=y -# CONFIG_RANDOM_KMALLOC_CACHES is not set -# end of SLAB allocator options - -CONFIG_SHUFFLE_PAGE_ALLOCATOR=y -# CONFIG_COMPAT_BRK is not set -CONFIG_SPARSEMEM=y -CONFIG_SPARSEMEM_EXTREME=y -CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y -CONFIG_SPARSEMEM_VMEMMAP=y -CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP=y -CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP=y -CONFIG_HAVE_FAST_GUP=y -CONFIG_NUMA_KEEP_MEMINFO=y -CONFIG_MEMORY_ISOLATION=y -CONFIG_EXCLUSIVE_SYSTEM_RAM=y -CONFIG_HAVE_BOOTMEM_INFO_NODE=y -CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y -CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y -CONFIG_MEMORY_HOTPLUG=y -CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y -CONFIG_MEMORY_HOTREMOVE=y -CONFIG_MHP_MEMMAP_ON_MEMORY=y -CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE=y -CONFIG_SPLIT_PTLOCK_CPUS=4 -CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y -CONFIG_MEMORY_BALLOON=y -CONFIG_BALLOON_COMPACTION=y -CONFIG_COMPACTION=y -CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 -CONFIG_PAGE_REPORTING=y -CONFIG_MIGRATION=y -CONFIG_DEVICE_MIGRATION=y -CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y -CONFIG_ARCH_ENABLE_THP_MIGRATION=y -CONFIG_CONTIG_ALLOC=y -CONFIG_PHYS_ADDR_T_64BIT=y -CONFIG_MMU_NOTIFIER=y -CONFIG_KSM=y -CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 -CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y -CONFIG_MEMORY_FAILURE=y -CONFIG_HWPOISON_INJECT=m -CONFIG_ARCH_WANT_GENERAL_HUGETLB=y -CONFIG_ARCH_WANTS_THP_SWAP=y -CONFIG_TRANSPARENT_HUGEPAGE=y -CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y -# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set -CONFIG_THP_SWAP=y -CONFIG_READ_ONLY_THP_FOR_FS=y -CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y -CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y -CONFIG_USE_PERCPU_NUMA_NODE_ID=y -CONFIG_HAVE_SETUP_PER_CPU_AREA=y -CONFIG_CMA=y -# CONFIG_CMA_DEBUG is not set -# CONFIG_CMA_DEBUGFS is not set -# CONFIG_CMA_SYSFS is not set -CONFIG_CMA_AREAS=19 -CONFIG_MEM_SOFT_DIRTY=y -CONFIG_GENERIC_EARLY_IOREMAP=y -CONFIG_DEFERRED_STRUCT_PAGE_INIT=y -CONFIG_PAGE_IDLE_FLAG=y -CONFIG_IDLE_PAGE_TRACKING=y -CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y -CONFIG_ARCH_HAS_CURRENT_STACK_POINTER=y -CONFIG_ARCH_HAS_PTE_DEVMAP=y -CONFIG_ZONE_DMA=y -CONFIG_ZONE_DMA32=y -CONFIG_ZONE_DEVICE=y -CONFIG_HMM_MIRROR=y -CONFIG_GET_FREE_REGION=y -CONFIG_DEVICE_PRIVATE=y -CONFIG_VMAP_PFN=y -CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y -CONFIG_ARCH_HAS_PKEYS=y -CONFIG_VM_EVENT_COUNTERS=y -# CONFIG_PERCPU_STATS is not set -# CONFIG_GUP_TEST is not set -# CONFIG_DMAPOOL_TEST is not set -CONFIG_ARCH_HAS_PTE_SPECIAL=y -CONFIG_MAPPING_DIRTY_HELPERS=y -CONFIG_MEMFD_CREATE=y -CONFIG_SECRETMEM=y -# CONFIG_ANON_VMA_NAME is not set -CONFIG_USERFAULTFD=y -CONFIG_HAVE_ARCH_USERFAULTFD_WP=y -CONFIG_HAVE_ARCH_USERFAULTFD_MINOR=y -CONFIG_PTE_MARKER_UFFD_WP=y -CONFIG_LRU_GEN=y -# CONFIG_LRU_GEN_ENABLED is not set -# CONFIG_LRU_GEN_STATS is not set -CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK=y -CONFIG_PER_VMA_LOCK=y -CONFIG_LOCK_MM_AND_FIND_VMA=y - -# -# Data Access Monitoring -# -CONFIG_DAMON=y -CONFIG_DAMON_VADDR=y -CONFIG_DAMON_PADDR=y -# CONFIG_DAMON_SYSFS is not set -CONFIG_DAMON_DBGFS=y -# CONFIG_DAMON_RECLAIM is not set -# CONFIG_DAMON_LRU_SORT is not set -# end of Data Access Monitoring -# end of Memory Management options - -CONFIG_NET=y -CONFIG_NET_INGRESS=y -CONFIG_NET_EGRESS=y -CONFIG_NET_XGRESS=y -CONFIG_NET_REDIRECT=y -CONFIG_SKB_EXTENSIONS=y - -# -# Networking options -# -CONFIG_PACKET=y -CONFIG_PACKET_DIAG=m -CONFIG_UNIX=y -CONFIG_UNIX_SCM=y -CONFIG_AF_UNIX_OOB=y -CONFIG_UNIX_DIAG=m -CONFIG_TLS=m -CONFIG_TLS_DEVICE=y -# CONFIG_TLS_TOE is not set -CONFIG_XFRM=y -CONFIG_XFRM_OFFLOAD=y -CONFIG_XFRM_ALGO=y -CONFIG_XFRM_USER=y -# CONFIG_XFRM_USER_COMPAT is not set -CONFIG_XFRM_INTERFACE=m -CONFIG_XFRM_SUB_POLICY=y -CONFIG_XFRM_MIGRATE=y -CONFIG_XFRM_STATISTICS=y -CONFIG_XFRM_AH=m -CONFIG_XFRM_ESP=m -CONFIG_XFRM_IPCOMP=m -CONFIG_NET_KEY=m -CONFIG_NET_KEY_MIGRATE=y -CONFIG_SMC=m -CONFIG_SMC_DIAG=m -CONFIG_XDP_SOCKETS=y -CONFIG_XDP_SOCKETS_DIAG=m -CONFIG_NET_HANDSHAKE=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_FIB_TRIE_STATS=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_MULTIPATH=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_ROUTE_CLASSID=y -# CONFIG_IP_PNP is not set -CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE_DEMUX=m -CONFIG_NET_IP_TUNNEL=m -CONFIG_NET_IPGRE=m -CONFIG_NET_IPGRE_BROADCAST=y -CONFIG_IP_MROUTE_COMMON=y -CONFIG_IP_MROUTE=y -CONFIG_IP_MROUTE_MULTIPLE_TABLES=y -CONFIG_IP_PIMSM_V1=y -CONFIG_IP_PIMSM_V2=y -CONFIG_SYN_COOKIES=y -CONFIG_NET_IPVTI=m -CONFIG_NET_UDP_TUNNEL=m -# CONFIG_NET_FOU is not set -# CONFIG_NET_FOU_IP_TUNNELS is not set -CONFIG_INET_AH=m -CONFIG_INET_ESP=m -CONFIG_INET_ESP_OFFLOAD=m -# CONFIG_INET_ESPINTCP is not set -CONFIG_INET_IPCOMP=m -CONFIG_INET_TABLE_PERTURB_ORDER=16 -CONFIG_INET_XFRM_TUNNEL=m -CONFIG_INET_TUNNEL=m -CONFIG_INET_DIAG=m -CONFIG_INET_TCP_DIAG=m -CONFIG_INET_UDP_DIAG=m -CONFIG_INET_RAW_DIAG=m -# CONFIG_INET_DIAG_DESTROY is not set -CONFIG_TCP_CONG_ADVANCED=y -CONFIG_TCP_CONG_BIC=m -CONFIG_TCP_CONG_CUBIC=y -CONFIG_TCP_CONG_WESTWOOD=m -CONFIG_TCP_CONG_HTCP=m -CONFIG_TCP_CONG_HSTCP=m -CONFIG_TCP_CONG_HYBLA=m -CONFIG_TCP_CONG_VEGAS=m -CONFIG_TCP_CONG_NV=m -CONFIG_TCP_CONG_SCALABLE=m -CONFIG_TCP_CONG_LP=m -CONFIG_TCP_CONG_VENO=m -CONFIG_TCP_CONG_YEAH=m -CONFIG_TCP_CONG_ILLINOIS=m -CONFIG_TCP_CONG_DCTCP=m -CONFIG_TCP_CONG_CDG=m -CONFIG_TCP_CONG_BBR=m -CONFIG_DEFAULT_CUBIC=y -# CONFIG_DEFAULT_RENO is not set -CONFIG_DEFAULT_TCP_CONG="cubic" -CONFIG_TCP_MD5SIG=y -CONFIG_IPV6=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y -CONFIG_IPV6_OPTIMISTIC_DAD=y -CONFIG_INET6_AH=m -CONFIG_INET6_ESP=m -CONFIG_INET6_ESP_OFFLOAD=m -# CONFIG_INET6_ESPINTCP is not set -CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_MIP6=m -# CONFIG_IPV6_ILA is not set -CONFIG_INET6_XFRM_TUNNEL=m -CONFIG_INET6_TUNNEL=m -CONFIG_IPV6_VTI=m -CONFIG_IPV6_SIT=m -CONFIG_IPV6_SIT_6RD=y -CONFIG_IPV6_NDISC_NODETYPE=y -CONFIG_IPV6_TUNNEL=m -CONFIG_IPV6_GRE=m -CONFIG_IPV6_MULTIPLE_TABLES=y -CONFIG_IPV6_SUBTREES=y -CONFIG_IPV6_MROUTE=y -CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y -CONFIG_IPV6_PIMSM_V2=y -# CONFIG_IPV6_SEG6_LWTUNNEL is not set -# CONFIG_IPV6_SEG6_HMAC is not set -# CONFIG_IPV6_RPL_LWTUNNEL is not set -# CONFIG_IPV6_IOAM6_LWTUNNEL is not set -CONFIG_NETLABEL=y -CONFIG_MPTCP=y -CONFIG_INET_MPTCP_DIAG=m -CONFIG_MPTCP_IPV6=y -CONFIG_NETWORK_SECMARK=y -CONFIG_NET_PTP_CLASSIFY=y -CONFIG_NETWORK_PHY_TIMESTAMPING=y -CONFIG_NETFILTER=y -CONFIG_NETFILTER_ADVANCED=y -CONFIG_BRIDGE_NETFILTER=m - -# -# Core Netfilter Configuration -# -CONFIG_NETFILTER_INGRESS=y -CONFIG_NETFILTER_EGRESS=y -CONFIG_NETFILTER_SKIP_EGRESS=y -CONFIG_NETFILTER_NETLINK=m -CONFIG_NETFILTER_FAMILY_BRIDGE=y -CONFIG_NETFILTER_FAMILY_ARP=y -CONFIG_NETFILTER_BPF_LINK=y -# CONFIG_NETFILTER_NETLINK_HOOK is not set -CONFIG_NETFILTER_NETLINK_ACCT=m -CONFIG_NETFILTER_NETLINK_QUEUE=m -CONFIG_NETFILTER_NETLINK_LOG=m -CONFIG_NETFILTER_NETLINK_OSF=m -CONFIG_NF_CONNTRACK=m -CONFIG_NF_LOG_SYSLOG=m -CONFIG_NETFILTER_CONNCOUNT=m -CONFIG_NF_CONNTRACK_MARK=y -CONFIG_NF_CONNTRACK_SECMARK=y -CONFIG_NF_CONNTRACK_ZONES=y -CONFIG_NF_CONNTRACK_PROCFS=y -CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CONNTRACK_TIMEOUT=y -CONFIG_NF_CONNTRACK_TIMESTAMP=y -CONFIG_NF_CONNTRACK_LABELS=y -CONFIG_NF_CONNTRACK_OVS=y -CONFIG_NF_CT_PROTO_DCCP=y -CONFIG_NF_CT_PROTO_GRE=y -CONFIG_NF_CT_PROTO_SCTP=y -CONFIG_NF_CT_PROTO_UDPLITE=y -CONFIG_NF_CONNTRACK_AMANDA=m -CONFIG_NF_CONNTRACK_FTP=m -CONFIG_NF_CONNTRACK_H323=m -CONFIG_NF_CONNTRACK_IRC=m -CONFIG_NF_CONNTRACK_BROADCAST=m -CONFIG_NF_CONNTRACK_NETBIOS_NS=m -CONFIG_NF_CONNTRACK_SNMP=m -CONFIG_NF_CONNTRACK_PPTP=m -CONFIG_NF_CONNTRACK_SANE=m -CONFIG_NF_CONNTRACK_SIP=m -CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NF_CT_NETLINK=m -CONFIG_NF_CT_NETLINK_TIMEOUT=m -CONFIG_NF_CT_NETLINK_HELPER=m -CONFIG_NETFILTER_NETLINK_GLUE_CT=y -CONFIG_NF_NAT=m -CONFIG_NF_NAT_AMANDA=m -CONFIG_NF_NAT_FTP=m -CONFIG_NF_NAT_IRC=m -CONFIG_NF_NAT_SIP=m -CONFIG_NF_NAT_TFTP=m -CONFIG_NF_NAT_REDIRECT=y -CONFIG_NF_NAT_MASQUERADE=y -CONFIG_NF_NAT_OVS=y -CONFIG_NETFILTER_SYNPROXY=m -CONFIG_NF_TABLES=m -CONFIG_NF_TABLES_INET=y -CONFIG_NF_TABLES_NETDEV=y -CONFIG_NFT_NUMGEN=m -CONFIG_NFT_CT=m -CONFIG_NFT_FLOW_OFFLOAD=m -CONFIG_NFT_CONNLIMIT=m -CONFIG_NFT_LOG=m -CONFIG_NFT_LIMIT=m -CONFIG_NFT_MASQ=m -CONFIG_NFT_REDIR=m -CONFIG_NFT_NAT=m -CONFIG_NFT_TUNNEL=m -CONFIG_NFT_QUEUE=m -CONFIG_NFT_QUOTA=m -CONFIG_NFT_REJECT=m -CONFIG_NFT_REJECT_INET=m -CONFIG_NFT_COMPAT=m -CONFIG_NFT_HASH=m -CONFIG_NFT_FIB=m -CONFIG_NFT_FIB_INET=m -CONFIG_NFT_XFRM=m -CONFIG_NFT_SOCKET=m -CONFIG_NFT_OSF=m -CONFIG_NFT_TPROXY=m -# CONFIG_NFT_SYNPROXY is not set -CONFIG_NF_DUP_NETDEV=m -CONFIG_NFT_DUP_NETDEV=m -CONFIG_NFT_FWD_NETDEV=m -CONFIG_NFT_FIB_NETDEV=m -# CONFIG_NFT_REJECT_NETDEV is not set -CONFIG_NF_FLOW_TABLE_INET=m -CONFIG_NF_FLOW_TABLE=m -# CONFIG_NF_FLOW_TABLE_PROCFS is not set -CONFIG_NETFILTER_XTABLES=y -# CONFIG_NETFILTER_XTABLES_COMPAT is not set - -# -# Xtables combined modules -# -CONFIG_NETFILTER_XT_MARK=m -CONFIG_NETFILTER_XT_CONNMARK=m -CONFIG_NETFILTER_XT_SET=m - -# -# Xtables targets -# -CONFIG_NETFILTER_XT_TARGET_AUDIT=m -CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m -CONFIG_NETFILTER_XT_TARGET_CONNMARK=m -CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m -CONFIG_NETFILTER_XT_TARGET_CT=m -CONFIG_NETFILTER_XT_TARGET_DSCP=m -CONFIG_NETFILTER_XT_TARGET_HL=m -CONFIG_NETFILTER_XT_TARGET_HMARK=m -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m -CONFIG_NETFILTER_XT_TARGET_LED=m -CONFIG_NETFILTER_XT_TARGET_LOG=m -CONFIG_NETFILTER_XT_TARGET_MARK=m -CONFIG_NETFILTER_XT_NAT=m -CONFIG_NETFILTER_XT_TARGET_NETMAP=m -CONFIG_NETFILTER_XT_TARGET_NFLOG=m -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_NOTRACK=m -CONFIG_NETFILTER_XT_TARGET_RATEEST=m -CONFIG_NETFILTER_XT_TARGET_REDIRECT=m -CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m -CONFIG_NETFILTER_XT_TARGET_TEE=m -CONFIG_NETFILTER_XT_TARGET_TPROXY=m -CONFIG_NETFILTER_XT_TARGET_TRACE=m -CONFIG_NETFILTER_XT_TARGET_SECMARK=m -CONFIG_NETFILTER_XT_TARGET_TCPMSS=m -CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m - -# -# Xtables matches -# -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m -CONFIG_NETFILTER_XT_MATCH_BPF=m -CONFIG_NETFILTER_XT_MATCH_CGROUP=m -CONFIG_NETFILTER_XT_MATCH_CLUSTER=m -CONFIG_NETFILTER_XT_MATCH_COMMENT=m -CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m -CONFIG_NETFILTER_XT_MATCH_CONNMARK=m -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_CPU=m -CONFIG_NETFILTER_XT_MATCH_DCCP=m -CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m -CONFIG_NETFILTER_XT_MATCH_DSCP=m -CONFIG_NETFILTER_XT_MATCH_ECN=m -CONFIG_NETFILTER_XT_MATCH_ESP=m -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m -CONFIG_NETFILTER_XT_MATCH_HELPER=m -CONFIG_NETFILTER_XT_MATCH_HL=m -# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set -CONFIG_NETFILTER_XT_MATCH_IPRANGE=m -CONFIG_NETFILTER_XT_MATCH_IPVS=m -CONFIG_NETFILTER_XT_MATCH_L2TP=m -CONFIG_NETFILTER_XT_MATCH_LENGTH=m -CONFIG_NETFILTER_XT_MATCH_LIMIT=m -CONFIG_NETFILTER_XT_MATCH_MAC=m -CONFIG_NETFILTER_XT_MATCH_MARK=m -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_NFACCT=m -CONFIG_NETFILTER_XT_MATCH_OSF=m -CONFIG_NETFILTER_XT_MATCH_OWNER=m -CONFIG_NETFILTER_XT_MATCH_POLICY=m -CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m -CONFIG_NETFILTER_XT_MATCH_QUOTA=m -CONFIG_NETFILTER_XT_MATCH_RATEEST=m -CONFIG_NETFILTER_XT_MATCH_REALM=m -CONFIG_NETFILTER_XT_MATCH_RECENT=m -CONFIG_NETFILTER_XT_MATCH_SCTP=m -CONFIG_NETFILTER_XT_MATCH_SOCKET=m -CONFIG_NETFILTER_XT_MATCH_STATE=m -CONFIG_NETFILTER_XT_MATCH_STATISTIC=m -CONFIG_NETFILTER_XT_MATCH_STRING=m -CONFIG_NETFILTER_XT_MATCH_TCPMSS=m -CONFIG_NETFILTER_XT_MATCH_TIME=m -CONFIG_NETFILTER_XT_MATCH_U32=m -# end of Core Netfilter Configuration - -CONFIG_IP_SET=m -CONFIG_IP_SET_MAX=256 -CONFIG_IP_SET_BITMAP_IP=m -CONFIG_IP_SET_BITMAP_IPMAC=m -CONFIG_IP_SET_BITMAP_PORT=m -CONFIG_IP_SET_HASH_IP=m -CONFIG_IP_SET_HASH_IPMARK=m -CONFIG_IP_SET_HASH_IPPORT=m -CONFIG_IP_SET_HASH_IPPORTIP=m -CONFIG_IP_SET_HASH_IPPORTNET=m -CONFIG_IP_SET_HASH_IPMAC=m -CONFIG_IP_SET_HASH_MAC=m -CONFIG_IP_SET_HASH_NETPORTNET=m -CONFIG_IP_SET_HASH_NET=m -CONFIG_IP_SET_HASH_NETNET=m -CONFIG_IP_SET_HASH_NETPORT=m -CONFIG_IP_SET_HASH_NETIFACE=m -CONFIG_IP_SET_LIST_SET=m -CONFIG_IP_VS=m -CONFIG_IP_VS_IPV6=y -CONFIG_IP_VS_DEBUG=y -CONFIG_IP_VS_TAB_BITS=12 - -# -# IPVS transport protocol load balancing support -# -CONFIG_IP_VS_PROTO_TCP=y -CONFIG_IP_VS_PROTO_UDP=y -CONFIG_IP_VS_PROTO_AH_ESP=y -CONFIG_IP_VS_PROTO_ESP=y -CONFIG_IP_VS_PROTO_AH=y -CONFIG_IP_VS_PROTO_SCTP=y - -# -# IPVS scheduler -# -CONFIG_IP_VS_RR=m -CONFIG_IP_VS_WRR=m -CONFIG_IP_VS_LC=m -CONFIG_IP_VS_WLC=m -CONFIG_IP_VS_FO=m -CONFIG_IP_VS_OVF=m -CONFIG_IP_VS_LBLC=m -CONFIG_IP_VS_LBLCR=m -CONFIG_IP_VS_DH=m -CONFIG_IP_VS_SH=m -CONFIG_IP_VS_MH=m -CONFIG_IP_VS_SED=m -CONFIG_IP_VS_NQ=m -# CONFIG_IP_VS_TWOS is not set - -# -# IPVS SH scheduler -# -CONFIG_IP_VS_SH_TAB_BITS=8 - -# -# IPVS MH scheduler -# -CONFIG_IP_VS_MH_TAB_INDEX=12 - -# -# IPVS application helper -# -CONFIG_IP_VS_FTP=m -CONFIG_IP_VS_NFCT=y -CONFIG_IP_VS_PE_SIP=m - -# -# IP: Netfilter Configuration -# -CONFIG_NF_DEFRAG_IPV4=m -CONFIG_NF_SOCKET_IPV4=m -CONFIG_NF_TPROXY_IPV4=m -CONFIG_NF_TABLES_IPV4=y -CONFIG_NFT_REJECT_IPV4=m -CONFIG_NFT_DUP_IPV4=m -CONFIG_NFT_FIB_IPV4=m -CONFIG_NF_TABLES_ARP=y -CONFIG_NF_DUP_IPV4=m -CONFIG_NF_LOG_ARP=m -CONFIG_NF_LOG_IPV4=m -CONFIG_NF_REJECT_IPV4=m -CONFIG_NF_NAT_SNMP_BASIC=m -CONFIG_NF_NAT_PPTP=m -CONFIG_NF_NAT_H323=m -CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_AH=m -CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_RPFILTER=m -CONFIG_IP_NF_MATCH_TTL=m -CONFIG_IP_NF_FILTER=m -CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_SYNPROXY=m -CONFIG_IP_NF_NAT=m -CONFIG_IP_NF_TARGET_MASQUERADE=m -CONFIG_IP_NF_TARGET_NETMAP=m -CONFIG_IP_NF_TARGET_REDIRECT=m -CONFIG_IP_NF_MANGLE=m -CONFIG_IP_NF_TARGET_ECN=m -CONFIG_IP_NF_TARGET_TTL=m -CONFIG_IP_NF_RAW=m -CONFIG_IP_NF_SECURITY=m -CONFIG_IP_NF_ARPTABLES=m -CONFIG_IP_NF_ARPFILTER=m -CONFIG_IP_NF_ARP_MANGLE=m -# end of IP: Netfilter Configuration - -# -# IPv6: Netfilter Configuration -# -CONFIG_NF_SOCKET_IPV6=m -CONFIG_NF_TPROXY_IPV6=m -CONFIG_NF_TABLES_IPV6=y -CONFIG_NFT_REJECT_IPV6=m -CONFIG_NFT_DUP_IPV6=m -CONFIG_NFT_FIB_IPV6=m -CONFIG_NF_DUP_IPV6=m -CONFIG_NF_REJECT_IPV6=m -CONFIG_NF_LOG_IPV6=m -CONFIG_IP6_NF_IPTABLES=m -CONFIG_IP6_NF_MATCH_AH=m -CONFIG_IP6_NF_MATCH_EUI64=m -CONFIG_IP6_NF_MATCH_FRAG=m -CONFIG_IP6_NF_MATCH_OPTS=m -CONFIG_IP6_NF_MATCH_HL=m -CONFIG_IP6_NF_MATCH_IPV6HEADER=m -CONFIG_IP6_NF_MATCH_MH=m -CONFIG_IP6_NF_MATCH_RPFILTER=m -CONFIG_IP6_NF_MATCH_RT=m -# CONFIG_IP6_NF_MATCH_SRH is not set -# CONFIG_IP6_NF_TARGET_HL is not set -CONFIG_IP6_NF_FILTER=m -CONFIG_IP6_NF_TARGET_REJECT=m -CONFIG_IP6_NF_TARGET_SYNPROXY=m -CONFIG_IP6_NF_MANGLE=m -CONFIG_IP6_NF_RAW=m -CONFIG_IP6_NF_SECURITY=m -CONFIG_IP6_NF_NAT=m -CONFIG_IP6_NF_TARGET_MASQUERADE=m -CONFIG_IP6_NF_TARGET_NPT=m -# end of IPv6: Netfilter Configuration - -CONFIG_NF_DEFRAG_IPV6=m -CONFIG_NF_TABLES_BRIDGE=m -# CONFIG_NFT_BRIDGE_META is not set -CONFIG_NFT_BRIDGE_REJECT=m -# CONFIG_NF_CONNTRACK_BRIDGE is not set -CONFIG_BRIDGE_NF_EBTABLES=m -CONFIG_BRIDGE_EBT_BROUTE=m -CONFIG_BRIDGE_EBT_T_FILTER=m -CONFIG_BRIDGE_EBT_T_NAT=m -CONFIG_BRIDGE_EBT_802_3=m -CONFIG_BRIDGE_EBT_AMONG=m -CONFIG_BRIDGE_EBT_ARP=m -CONFIG_BRIDGE_EBT_IP=m -CONFIG_BRIDGE_EBT_IP6=m -CONFIG_BRIDGE_EBT_LIMIT=m -CONFIG_BRIDGE_EBT_MARK=m -CONFIG_BRIDGE_EBT_PKTTYPE=m -CONFIG_BRIDGE_EBT_STP=m -CONFIG_BRIDGE_EBT_VLAN=m -CONFIG_BRIDGE_EBT_ARPREPLY=m -CONFIG_BRIDGE_EBT_DNAT=m -CONFIG_BRIDGE_EBT_MARK_T=m -CONFIG_BRIDGE_EBT_REDIRECT=m -CONFIG_BRIDGE_EBT_SNAT=m -CONFIG_BRIDGE_EBT_LOG=m -CONFIG_BRIDGE_EBT_NFLOG=m -# CONFIG_BPFILTER is not set -# CONFIG_IP_DCCP is not set -CONFIG_IP_SCTP=m -# CONFIG_SCTP_DBG_OBJCNT is not set -# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set -CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y -# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set -CONFIG_SCTP_COOKIE_HMAC_MD5=y -CONFIG_SCTP_COOKIE_HMAC_SHA1=y -CONFIG_INET_SCTP_DIAG=m -# CONFIG_RDS is not set -CONFIG_TIPC=m -CONFIG_TIPC_MEDIA_IB=y -CONFIG_TIPC_MEDIA_UDP=y -CONFIG_TIPC_CRYPTO=y -CONFIG_TIPC_DIAG=m -CONFIG_ATM=m -CONFIG_ATM_CLIP=m -# CONFIG_ATM_CLIP_NO_ICMP is not set -CONFIG_ATM_LANE=m -# CONFIG_ATM_MPOA is not set -CONFIG_ATM_BR2684=m -# CONFIG_ATM_BR2684_IPFILTER is not set -CONFIG_L2TP=m -CONFIG_L2TP_DEBUGFS=m -CONFIG_L2TP_V3=y -CONFIG_L2TP_IP=m -CONFIG_L2TP_ETH=m -CONFIG_STP=m -CONFIG_GARP=m -CONFIG_MRP=m -CONFIG_BRIDGE=m -CONFIG_BRIDGE_IGMP_SNOOPING=y -CONFIG_BRIDGE_VLAN_FILTERING=y -# CONFIG_BRIDGE_MRP is not set -# CONFIG_BRIDGE_CFM is not set -# CONFIG_NET_DSA is not set -CONFIG_VLAN_8021Q=m -CONFIG_VLAN_8021Q_GVRP=y -CONFIG_VLAN_8021Q_MVRP=y -CONFIG_LLC=m -# CONFIG_LLC2 is not set -# CONFIG_ATALK is not set -# CONFIG_X25 is not set -# CONFIG_LAPB is not set -# CONFIG_PHONET is not set -CONFIG_6LOWPAN=m -# CONFIG_6LOWPAN_DEBUGFS is not set -# CONFIG_6LOWPAN_NHC is not set -CONFIG_IEEE802154=m -# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set -CONFIG_IEEE802154_SOCKET=m -CONFIG_IEEE802154_6LOWPAN=m -CONFIG_MAC802154=m -CONFIG_NET_SCHED=y - -# -# Queueing/Scheduling -# -CONFIG_NET_SCH_HTB=m -CONFIG_NET_SCH_HFSC=m -CONFIG_NET_SCH_PRIO=m -CONFIG_NET_SCH_MULTIQ=m -CONFIG_NET_SCH_RED=m -CONFIG_NET_SCH_SFB=m -CONFIG_NET_SCH_SFQ=m -CONFIG_NET_SCH_TEQL=m -CONFIG_NET_SCH_TBF=m -# CONFIG_NET_SCH_CBS is not set -# CONFIG_NET_SCH_ETF is not set -CONFIG_NET_SCH_MQPRIO_LIB=m -# CONFIG_NET_SCH_TAPRIO is not set -CONFIG_NET_SCH_GRED=m -CONFIG_NET_SCH_NETEM=m -CONFIG_NET_SCH_DRR=m -CONFIG_NET_SCH_MQPRIO=m -# CONFIG_NET_SCH_SKBPRIO is not set -CONFIG_NET_SCH_CHOKE=m -CONFIG_NET_SCH_QFQ=m -CONFIG_NET_SCH_CODEL=m -CONFIG_NET_SCH_FQ_CODEL=y -# CONFIG_NET_SCH_CAKE is not set -CONFIG_NET_SCH_FQ=m -CONFIG_NET_SCH_HHF=m -CONFIG_NET_SCH_PIE=m -# CONFIG_NET_SCH_FQ_PIE is not set -CONFIG_NET_SCH_INGRESS=m -CONFIG_NET_SCH_PLUG=m -# CONFIG_NET_SCH_ETS is not set -CONFIG_NET_SCH_DEFAULT=y -# CONFIG_DEFAULT_FQ is not set -# CONFIG_DEFAULT_CODEL is not set -CONFIG_DEFAULT_FQ_CODEL=y -# CONFIG_DEFAULT_SFQ is not set -# CONFIG_DEFAULT_PFIFO_FAST is not set -CONFIG_DEFAULT_NET_SCH="fq_codel" - -# -# Classification -# -CONFIG_NET_CLS=y -CONFIG_NET_CLS_BASIC=m -CONFIG_NET_CLS_ROUTE4=m -CONFIG_NET_CLS_FW=m -CONFIG_NET_CLS_U32=m -CONFIG_CLS_U32_PERF=y -CONFIG_CLS_U32_MARK=y -CONFIG_NET_CLS_FLOW=m -CONFIG_NET_CLS_CGROUP=y -CONFIG_NET_CLS_BPF=m -CONFIG_NET_CLS_FLOWER=m -CONFIG_NET_CLS_MATCHALL=m -CONFIG_NET_EMATCH=y -CONFIG_NET_EMATCH_STACK=32 -CONFIG_NET_EMATCH_CMP=m -CONFIG_NET_EMATCH_NBYTE=m -CONFIG_NET_EMATCH_U32=m -CONFIG_NET_EMATCH_META=m -CONFIG_NET_EMATCH_TEXT=m -CONFIG_NET_EMATCH_IPSET=m -# CONFIG_NET_EMATCH_IPT is not set -CONFIG_NET_CLS_ACT=y -CONFIG_NET_ACT_POLICE=m -CONFIG_NET_ACT_GACT=m -CONFIG_GACT_PROB=y -CONFIG_NET_ACT_MIRRED=m -CONFIG_NET_ACT_SAMPLE=m -CONFIG_NET_ACT_IPT=m -CONFIG_NET_ACT_NAT=m -CONFIG_NET_ACT_PEDIT=m -CONFIG_NET_ACT_SIMP=m -CONFIG_NET_ACT_SKBEDIT=m -CONFIG_NET_ACT_CSUM=m -# CONFIG_NET_ACT_MPLS is not set -CONFIG_NET_ACT_VLAN=m -CONFIG_NET_ACT_BPF=m -# CONFIG_NET_ACT_CONNMARK is not set -# CONFIG_NET_ACT_CTINFO is not set -CONFIG_NET_ACT_SKBMOD=m -# CONFIG_NET_ACT_IFE is not set -CONFIG_NET_ACT_TUNNEL_KEY=m -CONFIG_NET_ACT_CT=m -# CONFIG_NET_ACT_GATE is not set -CONFIG_NET_TC_SKB_EXT=y -CONFIG_NET_SCH_FIFO=y -CONFIG_DCB=y -CONFIG_DNS_RESOLVER=m -# CONFIG_BATMAN_ADV is not set -CONFIG_OPENVSWITCH=m -CONFIG_OPENVSWITCH_GRE=m -CONFIG_OPENVSWITCH_VXLAN=m -CONFIG_OPENVSWITCH_GENEVE=m -CONFIG_VSOCKETS=m -CONFIG_VSOCKETS_DIAG=m -CONFIG_VSOCKETS_LOOPBACK=m -CONFIG_VMWARE_VMCI_VSOCKETS=m -CONFIG_VIRTIO_VSOCKETS=m -CONFIG_VIRTIO_VSOCKETS_COMMON=m -CONFIG_HYPERV_VSOCKETS=m -CONFIG_NETLINK_DIAG=m -CONFIG_MPLS=y -CONFIG_NET_MPLS_GSO=y -CONFIG_MPLS_ROUTING=m -CONFIG_MPLS_IPTUNNEL=m -CONFIG_NET_NSH=y -# CONFIG_HSR is not set -CONFIG_NET_SWITCHDEV=y -CONFIG_NET_L3_MASTER_DEV=y -# CONFIG_QRTR is not set -# CONFIG_NET_NCSI is not set -CONFIG_PCPU_DEV_REFCNT=y -CONFIG_MAX_SKB_FRAGS=17 -CONFIG_RPS=y -CONFIG_RFS_ACCEL=y -CONFIG_SOCK_RX_QUEUE_MAPPING=y -CONFIG_XPS=y -CONFIG_CGROUP_NET_PRIO=y -CONFIG_CGROUP_NET_CLASSID=y -CONFIG_NET_RX_BUSY_POLL=y -CONFIG_BQL=y -CONFIG_BPF_STREAM_PARSER=y -CONFIG_NET_FLOW_LIMIT=y - -# -# Network testing -# -CONFIG_NET_PKTGEN=m -CONFIG_NET_DROP_MONITOR=y -# end of Network testing -# end of Networking options - -# CONFIG_HAMRADIO is not set -# CONFIG_CAN is not set -CONFIG_BT=m -CONFIG_BT_BREDR=y -CONFIG_BT_RFCOMM=m -CONFIG_BT_RFCOMM_TTY=y -CONFIG_BT_BNEP=m -CONFIG_BT_BNEP_MC_FILTER=y -CONFIG_BT_BNEP_PROTO_FILTER=y -CONFIG_BT_CMTP=m -CONFIG_BT_HIDP=m -CONFIG_BT_LE=y -CONFIG_BT_LE_L2CAP_ECRED=y -# CONFIG_BT_6LOWPAN is not set -# CONFIG_BT_LEDS is not set -# CONFIG_BT_MSFTEXT is not set -# CONFIG_BT_AOSPEXT is not set -CONFIG_BT_DEBUGFS=y -# CONFIG_BT_SELFTEST is not set - -# -# Bluetooth device drivers -# -CONFIG_BT_INTEL=m -CONFIG_BT_BCM=m -CONFIG_BT_RTL=m -CONFIG_BT_HCIBTUSB=m -CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y -CONFIG_BT_HCIBTUSB_POLL_SYNC=y -CONFIG_BT_HCIBTUSB_BCM=y -# CONFIG_BT_HCIBTUSB_MTK is not set -CONFIG_BT_HCIBTUSB_RTL=y -CONFIG_BT_HCIBTSDIO=m -CONFIG_BT_HCIUART=m -CONFIG_BT_HCIUART_H4=y -CONFIG_BT_HCIUART_BCSP=y -CONFIG_BT_HCIUART_ATH3K=y -# CONFIG_BT_HCIUART_INTEL is not set -# CONFIG_BT_HCIUART_AG6XX is not set -CONFIG_BT_HCIBCM203X=m -# CONFIG_BT_HCIBCM4377 is not set -CONFIG_BT_HCIBPA10X=m -CONFIG_BT_HCIBFUSB=m -CONFIG_BT_HCIVHCI=m -CONFIG_BT_MRVL=m -CONFIG_BT_MRVL_SDIO=m -CONFIG_BT_ATH3K=m -# CONFIG_BT_MTKSDIO is not set -# CONFIG_BT_VIRTIO is not set -# end of Bluetooth device drivers - -# CONFIG_AF_RXRPC is not set -# CONFIG_AF_KCM is not set -CONFIG_STREAM_PARSER=y -# CONFIG_MCTP is not set -CONFIG_FIB_RULES=y -CONFIG_WIRELESS=y -CONFIG_CFG80211=m -# CONFIG_NL80211_TESTMODE is not set -# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set -CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y -CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y -CONFIG_CFG80211_DEFAULT_PS=y -CONFIG_CFG80211_DEBUGFS=y -CONFIG_CFG80211_CRDA_SUPPORT=y -# CONFIG_CFG80211_WEXT is not set -CONFIG_MAC80211=m -CONFIG_MAC80211_HAS_RC=y -CONFIG_MAC80211_RC_MINSTREL=y -CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y -CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" -# CONFIG_MAC80211_MESH is not set -CONFIG_MAC80211_LEDS=y -CONFIG_MAC80211_DEBUGFS=y -CONFIG_MAC80211_MESSAGE_TRACING=y -# CONFIG_MAC80211_DEBUG_MENU is not set -CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 -CONFIG_RFKILL=m -CONFIG_RFKILL_LEDS=y -CONFIG_RFKILL_INPUT=y -# CONFIG_RFKILL_GPIO is not set -# CONFIG_NET_9P is not set -# CONFIG_CAIF is not set -CONFIG_CEPH_LIB=m -CONFIG_CEPH_LIB_PRETTYDEBUG=y -CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y -# CONFIG_NFC is not set -CONFIG_PSAMPLE=m -# CONFIG_NET_IFE is not set -CONFIG_LWTUNNEL=y -CONFIG_LWTUNNEL_BPF=y -CONFIG_DST_CACHE=y -CONFIG_GRO_CELLS=y -CONFIG_SOCK_VALIDATE_XMIT=y -CONFIG_NET_SELFTESTS=y -CONFIG_NET_SOCK_MSG=y -CONFIG_NET_DEVLINK=y -CONFIG_PAGE_POOL=y -# CONFIG_PAGE_POOL_STATS is not set -CONFIG_FAILOVER=m -CONFIG_ETHTOOL_NETLINK=y - -# -# Device Drivers -# -CONFIG_HAVE_EISA=y -# CONFIG_EISA is not set -CONFIG_HAVE_PCI=y -CONFIG_PCI=y -CONFIG_PCI_DOMAINS=y -CONFIG_PCIEPORTBUS=y -CONFIG_HOTPLUG_PCI_PCIE=y -CONFIG_PCIEAER=y -CONFIG_PCIEAER_INJECT=m -CONFIG_PCIE_ECRC=y -CONFIG_PCIEASPM=y -CONFIG_PCIEASPM_DEFAULT=y -# CONFIG_PCIEASPM_POWERSAVE is not set -# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set -# CONFIG_PCIEASPM_PERFORMANCE is not set -CONFIG_PCIE_PME=y -CONFIG_PCIE_DPC=y -# CONFIG_PCIE_PTM is not set -CONFIG_PCIE_EDR=y -CONFIG_PCI_MSI=y -CONFIG_PCI_QUIRKS=y -# CONFIG_PCI_DEBUG is not set -# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set -CONFIG_PCI_STUB=y -CONFIG_PCI_PF_STUB=y -CONFIG_PCI_ATS=y -CONFIG_PCI_DOE=y -CONFIG_PCI_LOCKLESS_CONFIG=y -CONFIG_PCI_IOV=y -CONFIG_PCI_PRI=y -CONFIG_PCI_PASID=y -# CONFIG_PCI_P2PDMA is not set -CONFIG_PCI_LABEL=y -CONFIG_PCI_HYPERV=m -CONFIG_VGA_ARB=y -CONFIG_VGA_ARB_MAX_GPUS=64 -CONFIG_HOTPLUG_PCI=y -CONFIG_HOTPLUG_PCI_ACPI=y -CONFIG_HOTPLUG_PCI_ACPI_IBM=m -# CONFIG_HOTPLUG_PCI_CPCI is not set -CONFIG_HOTPLUG_PCI_SHPC=y - -# -# PCI controller drivers -# -CONFIG_VMD=y -CONFIG_PCI_HYPERV_INTERFACE=m - -# -# Cadence-based PCIe controllers -# -# end of Cadence-based PCIe controllers - -# -# DesignWare-based PCIe controllers -# -# CONFIG_PCI_MESON is not set -# CONFIG_PCIE_DW_PLAT_HOST is not set -# end of DesignWare-based PCIe controllers - -# -# Mobiveil-based PCIe controllers -# -# end of Mobiveil-based PCIe controllers -# end of PCI controller drivers - -# -# PCI Endpoint -# -# CONFIG_PCI_ENDPOINT is not set -# end of PCI Endpoint - -# -# PCI switch controller drivers -# -# CONFIG_PCI_SW_SWITCHTEC is not set -# end of PCI switch controller drivers - -CONFIG_CXL_BUS=m -CONFIG_CXL_PCI=m -# CONFIG_CXL_MEM_RAW_COMMANDS is not set -CONFIG_CXL_ACPI=m -CONFIG_CXL_PMEM=m -CONFIG_CXL_MEM=m -CONFIG_CXL_PORT=m -CONFIG_CXL_SUSPEND=y -CONFIG_CXL_REGION=y -# CONFIG_CXL_REGION_INVALIDATION_TEST is not set -CONFIG_CXL_PMU=m -CONFIG_PCCARD=y -# CONFIG_PCMCIA is not set -CONFIG_CARDBUS=y - -# -# PC-card bridges -# -CONFIG_YENTA=m -CONFIG_YENTA_O2=y -CONFIG_YENTA_RICOH=y -CONFIG_YENTA_TI=y -CONFIG_YENTA_ENE_TUNE=y -CONFIG_YENTA_TOSHIBA=y -# CONFIG_RAPIDIO is not set - -# -# Generic Driver Options -# -CONFIG_AUXILIARY_BUS=y -# CONFIG_UEVENT_HELPER is not set -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -# CONFIG_DEVTMPFS_SAFE is not set -CONFIG_STANDALONE=y -CONFIG_PREVENT_FIRMWARE_BUILD=y - -# -# Firmware loader -# -CONFIG_FW_LOADER=y -CONFIG_FW_LOADER_DEBUG=y -CONFIG_FW_LOADER_PAGED_BUF=y -CONFIG_FW_LOADER_SYSFS=y -CONFIG_EXTRA_FIRMWARE="" -CONFIG_FW_LOADER_USER_HELPER=y -# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set -# CONFIG_FW_LOADER_COMPRESS is not set -CONFIG_FW_CACHE=y -CONFIG_FW_UPLOAD=y -# end of Firmware loader - -CONFIG_WANT_DEV_COREDUMP=y -CONFIG_ALLOW_DEV_COREDUMP=y -CONFIG_DEV_COREDUMP=y -# CONFIG_DEBUG_DRIVER is not set -# CONFIG_DEBUG_DEVRES is not set -# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set -CONFIG_HMEM_REPORTING=y -# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set -CONFIG_SYS_HYPERVISOR=y -CONFIG_GENERIC_CPU_AUTOPROBE=y -CONFIG_GENERIC_CPU_VULNERABILITIES=y -CONFIG_REGMAP=y -CONFIG_REGMAP_I2C=m -CONFIG_REGMAP_SPI=m -CONFIG_DMA_SHARED_BUFFER=y -# CONFIG_DMA_FENCE_TRACE is not set -# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set -# end of Generic Driver Options - -# -# Bus devices -# -# CONFIG_MHI_BUS is not set -# CONFIG_MHI_BUS_EP is not set -# end of Bus devices - -# -# Cache Drivers -# -# end of Cache Drivers - -CONFIG_CONNECTOR=y -CONFIG_PROC_EVENTS=y - -# -# Firmware Drivers -# - -# -# ARM System Control and Management Interface Protocol -# -# end of ARM System Control and Management Interface Protocol - -CONFIG_EDD=m -# CONFIG_EDD_OFF is not set -CONFIG_FIRMWARE_MEMMAP=y -CONFIG_DMIID=y -CONFIG_DMI_SYSFS=y -CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y -CONFIG_ISCSI_IBFT_FIND=y -CONFIG_ISCSI_IBFT=m -CONFIG_FW_CFG_SYSFS=y -# CONFIG_FW_CFG_SYSFS_CMDLINE is not set -CONFIG_SYSFB=y -# CONFIG_SYSFB_SIMPLEFB is not set -# CONFIG_GOOGLE_FIRMWARE is not set - -# -# EFI (Extensible Firmware Interface) Support -# -CONFIG_EFI_ESRT=y -CONFIG_EFI_VARS_PSTORE=y -CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y -CONFIG_EFI_SOFT_RESERVE=y -CONFIG_EFI_DXE_MEM_ATTRIBUTES=y -CONFIG_EFI_RUNTIME_WRAPPERS=y -# CONFIG_EFI_BOOTLOADER_CONTROL is not set -# CONFIG_EFI_CAPSULE_LOADER is not set -# CONFIG_EFI_TEST is not set -# CONFIG_APPLE_PROPERTIES is not set -# CONFIG_RESET_ATTACK_MITIGATION is not set -CONFIG_EFI_RCI2_TABLE=y -# CONFIG_EFI_DISABLE_PCI_DMA is not set -CONFIG_EFI_EARLYCON=y -CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y -# CONFIG_EFI_DISABLE_RUNTIME is not set -CONFIG_EFI_COCO_SECRET=y -CONFIG_UNACCEPTED_MEMORY=y -# end of EFI (Extensible Firmware Interface) Support - -CONFIG_UEFI_CPER=y -CONFIG_UEFI_CPER_X86=y - -# -# Tegra firmware driver -# -# end of Tegra firmware driver -# end of Firmware Drivers - -# CONFIG_GNSS is not set -CONFIG_MTD=m -# CONFIG_MTD_TESTS is not set - -# -# Partition parsers -# -# CONFIG_MTD_AR7_PARTS is not set -# CONFIG_MTD_CMDLINE_PARTS is not set -# CONFIG_MTD_REDBOOT_PARTS is not set -# end of Partition parsers - -# -# User Modules And Translation Layers -# -CONFIG_MTD_BLKDEVS=m -CONFIG_MTD_BLOCK=m -# CONFIG_MTD_BLOCK_RO is not set - -# -# Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK. -# -# CONFIG_FTL is not set -# CONFIG_NFTL is not set -# CONFIG_INFTL is not set -# CONFIG_RFD_FTL is not set -# CONFIG_SSFDC is not set -# CONFIG_SM_FTL is not set -# CONFIG_MTD_OOPS is not set -# CONFIG_MTD_SWAP is not set -# CONFIG_MTD_PARTITIONED_MASTER is not set - -# -# RAM/ROM/Flash chip drivers -# -# CONFIG_MTD_CFI is not set -# CONFIG_MTD_JEDECPROBE is not set -CONFIG_MTD_MAP_BANK_WIDTH_1=y -CONFIG_MTD_MAP_BANK_WIDTH_2=y -CONFIG_MTD_MAP_BANK_WIDTH_4=y -CONFIG_MTD_CFI_I1=y -CONFIG_MTD_CFI_I2=y -# CONFIG_MTD_RAM is not set -# CONFIG_MTD_ROM is not set -# CONFIG_MTD_ABSENT is not set -# end of RAM/ROM/Flash chip drivers - -# -# Mapping drivers for chip access -# -# CONFIG_MTD_COMPLEX_MAPPINGS is not set -# CONFIG_MTD_INTEL_VR_NOR is not set -# CONFIG_MTD_PLATRAM is not set -# end of Mapping drivers for chip access - -# -# Self-contained MTD device drivers -# -# CONFIG_MTD_PMC551 is not set -# CONFIG_MTD_DATAFLASH is not set -# CONFIG_MTD_MCHP23K256 is not set -# CONFIG_MTD_MCHP48L640 is not set -# CONFIG_MTD_SST25L is not set -# CONFIG_MTD_SLRAM is not set -# CONFIG_MTD_PHRAM is not set -# CONFIG_MTD_MTDRAM is not set -# CONFIG_MTD_BLOCK2MTD is not set - -# -# Disk-On-Chip Device Drivers -# -# CONFIG_MTD_DOCG3 is not set -# end of Self-contained MTD device drivers - -# -# NAND -# -# CONFIG_MTD_ONENAND is not set -# CONFIG_MTD_RAW_NAND is not set -# CONFIG_MTD_SPI_NAND is not set - -# -# ECC engine support -# -# CONFIG_MTD_NAND_ECC_SW_HAMMING is not set -# CONFIG_MTD_NAND_ECC_SW_BCH is not set -# CONFIG_MTD_NAND_ECC_MXIC is not set -# end of ECC engine support -# end of NAND - -# -# LPDDR & LPDDR2 PCM memory drivers -# -# CONFIG_MTD_LPDDR is not set -# end of LPDDR & LPDDR2 PCM memory drivers - -# CONFIG_MTD_SPI_NOR is not set -CONFIG_MTD_UBI=m -CONFIG_MTD_UBI_WL_THRESHOLD=4096 -CONFIG_MTD_UBI_BEB_LIMIT=20 -# CONFIG_MTD_UBI_FASTMAP is not set -# CONFIG_MTD_UBI_GLUEBI is not set -# CONFIG_MTD_UBI_BLOCK is not set -# CONFIG_MTD_HYPERBUS is not set -# CONFIG_OF is not set -CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y -CONFIG_PARPORT=m -CONFIG_PARPORT_PC=m -CONFIG_PARPORT_SERIAL=m -# CONFIG_PARPORT_PC_FIFO is not set -# CONFIG_PARPORT_PC_SUPERIO is not set -CONFIG_PARPORT_1284=y -CONFIG_PARPORT_NOT_PC=y -CONFIG_PNP=y -# CONFIG_PNP_DEBUG_MESSAGES is not set - -# -# Protocols -# -CONFIG_PNPACPI=y -CONFIG_BLK_DEV=y -CONFIG_BLK_DEV_NULL_BLK=m -# CONFIG_BLK_DEV_FD is not set -CONFIG_CDROM=m -# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set -CONFIG_ZRAM=m -CONFIG_ZRAM_DEF_COMP_LZORLE=y -# CONFIG_ZRAM_DEF_COMP_ZSTD is not set -# CONFIG_ZRAM_DEF_COMP_LZ4 is not set -# CONFIG_ZRAM_DEF_COMP_LZO is not set -# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set -CONFIG_ZRAM_DEF_COMP="lzo-rle" -CONFIG_ZRAM_WRITEBACK=y -# CONFIG_ZRAM_MEMORY_TRACKING is not set -# CONFIG_ZRAM_MULTI_COMP is not set -CONFIG_BLK_DEV_LOOP=m -CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 -# CONFIG_BLK_DEV_DRBD is not set -CONFIG_BLK_DEV_NBD=m -CONFIG_BLK_DEV_RAM=m -CONFIG_BLK_DEV_RAM_COUNT=16 -CONFIG_BLK_DEV_RAM_SIZE=16384 -CONFIG_CDROM_PKTCDVD=m -CONFIG_CDROM_PKTCDVD_BUFFERS=8 -# CONFIG_CDROM_PKTCDVD_WCACHE is not set -# CONFIG_ATA_OVER_ETH is not set -CONFIG_XEN_BLKDEV_FRONTEND=m -CONFIG_VIRTIO_BLK=y -CONFIG_BLK_DEV_RBD=m -CONFIG_BLK_DEV_UBLK=m -CONFIG_BLKDEV_UBLK_LEGACY_OPCODES=y - -# -# NVME Support -# -CONFIG_NVME_CORE=m -CONFIG_BLK_DEV_NVME=m -CONFIG_NVME_MULTIPATH=y -# CONFIG_NVME_VERBOSE_ERRORS is not set -# CONFIG_NVME_HWMON is not set -CONFIG_NVME_FABRICS=m -CONFIG_NVME_RDMA=m -CONFIG_NVME_FC=m -CONFIG_NVME_TCP=m -# CONFIG_NVME_AUTH is not set -CONFIG_NVME_TARGET=m -# CONFIG_NVME_TARGET_PASSTHRU is not set -CONFIG_NVME_TARGET_LOOP=m -CONFIG_NVME_TARGET_RDMA=m -CONFIG_NVME_TARGET_FC=m -CONFIG_NVME_TARGET_FCLOOP=m -CONFIG_NVME_TARGET_TCP=m -# CONFIG_NVME_TARGET_AUTH is not set -# end of NVME Support - -# -# Misc devices -# -CONFIG_SENSORS_LIS3LV02D=m -# CONFIG_AD525X_DPOT is not set -# CONFIG_DUMMY_IRQ is not set -# CONFIG_IBM_ASM is not set -# CONFIG_PHANTOM is not set -CONFIG_TIFM_CORE=m -CONFIG_TIFM_7XX1=m -# CONFIG_ICS932S401 is not set -CONFIG_ENCLOSURE_SERVICES=m -CONFIG_SGI_XP=m -CONFIG_HP_ILO=m -CONFIG_SGI_GRU=m -# CONFIG_SGI_GRU_DEBUG is not set -CONFIG_APDS9802ALS=m -CONFIG_ISL29003=m -CONFIG_ISL29020=m -CONFIG_SENSORS_TSL2550=m -CONFIG_SENSORS_BH1770=m -CONFIG_SENSORS_APDS990X=m -# CONFIG_HMC6352 is not set -# CONFIG_DS1682 is not set -CONFIG_VMWARE_BALLOON=m -# CONFIG_LATTICE_ECP3_CONFIG is not set -# CONFIG_SRAM is not set -# CONFIG_DW_XDATA_PCIE is not set -# CONFIG_PCI_ENDPOINT_TEST is not set -# CONFIG_XILINX_SDFEC is not set -CONFIG_MISC_RTSX=m -# CONFIG_C2PORT is not set - -# -# EEPROM support -# -# CONFIG_EEPROM_AT24 is not set -# CONFIG_EEPROM_AT25 is not set -CONFIG_EEPROM_LEGACY=m -CONFIG_EEPROM_MAX6875=m -CONFIG_EEPROM_93CX6=m -# CONFIG_EEPROM_93XX46 is not set -# CONFIG_EEPROM_IDT_89HPESX is not set -# CONFIG_EEPROM_EE1004 is not set -# end of EEPROM support - -CONFIG_CB710_CORE=m -# CONFIG_CB710_DEBUG is not set -CONFIG_CB710_DEBUG_ASSUMPTIONS=y - -# -# Texas Instruments shared transport line discipline -# -# CONFIG_TI_ST is not set -# end of Texas Instruments shared transport line discipline - -CONFIG_SENSORS_LIS3_I2C=m -CONFIG_ALTERA_STAPL=m -CONFIG_INTEL_MEI=m -CONFIG_INTEL_MEI_ME=m -# CONFIG_INTEL_MEI_TXE is not set -# CONFIG_INTEL_MEI_GSC is not set -# CONFIG_INTEL_MEI_HDCP is not set -# CONFIG_INTEL_MEI_PXP is not set -# CONFIG_INTEL_MEI_GSC_PROXY is not set -CONFIG_VMWARE_VMCI=m -# CONFIG_GENWQE is not set -# CONFIG_ECHO is not set -# CONFIG_BCM_VK is not set -# CONFIG_MISC_ALCOR_PCI is not set -CONFIG_MISC_RTSX_PCI=m -CONFIG_MISC_RTSX_USB=m -CONFIG_UACCE=m -CONFIG_PVPANIC=y -# CONFIG_PVPANIC_MMIO is not set -# CONFIG_PVPANIC_PCI is not set -# CONFIG_GP_PCI1XXXX is not set -# end of Misc devices - -# -# SCSI device support -# -CONFIG_SCSI_MOD=y -CONFIG_RAID_ATTRS=m -CONFIG_SCSI_COMMON=y -CONFIG_SCSI=y -CONFIG_SCSI_DMA=y -CONFIG_SCSI_NETLINK=y -CONFIG_SCSI_PROC_FS=y - -# -# SCSI support type (disk, tape, CD-ROM) -# -CONFIG_BLK_DEV_SD=m -CONFIG_CHR_DEV_ST=m -CONFIG_BLK_DEV_SR=m -CONFIG_CHR_DEV_SG=m -CONFIG_BLK_DEV_BSG=y -CONFIG_CHR_DEV_SCH=m -CONFIG_SCSI_ENCLOSURE=m -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_LOGGING=y -CONFIG_SCSI_SCAN_ASYNC=y - -# -# SCSI Transports -# -CONFIG_SCSI_SPI_ATTRS=m -CONFIG_SCSI_FC_ATTRS=m -CONFIG_SCSI_ISCSI_ATTRS=m -CONFIG_SCSI_SAS_ATTRS=m -CONFIG_SCSI_SAS_LIBSAS=m -CONFIG_SCSI_SAS_ATA=y -CONFIG_SCSI_SAS_HOST_SMP=y -CONFIG_SCSI_SRP_ATTRS=m -# end of SCSI Transports - -CONFIG_SCSI_LOWLEVEL=y -CONFIG_ISCSI_TCP=m -CONFIG_ISCSI_BOOT_SYSFS=m -# CONFIG_SCSI_CXGB3_ISCSI is not set -CONFIG_SCSI_CXGB4_ISCSI=m -CONFIG_SCSI_BNX2_ISCSI=m -CONFIG_SCSI_BNX2X_FCOE=m -CONFIG_BE2ISCSI=m -# CONFIG_BLK_DEV_3W_XXXX_RAID is not set -CONFIG_SCSI_HPSA=m -# CONFIG_SCSI_3W_9XXX is not set -# CONFIG_SCSI_3W_SAS is not set -# CONFIG_SCSI_ACARD is not set -CONFIG_SCSI_AACRAID=m -# CONFIG_SCSI_AIC7XXX is not set -# CONFIG_SCSI_AIC79XX is not set -# CONFIG_SCSI_AIC94XX is not set -# CONFIG_SCSI_MVSAS is not set -# CONFIG_SCSI_MVUMI is not set -# CONFIG_SCSI_ADVANSYS is not set -# CONFIG_SCSI_ARCMSR is not set -# CONFIG_SCSI_ESAS2R is not set -# CONFIG_MEGARAID_NEWGEN is not set -# CONFIG_MEGARAID_LEGACY is not set -CONFIG_MEGARAID_SAS=m -CONFIG_SCSI_MPT3SAS=m -CONFIG_SCSI_MPT2SAS_MAX_SGE=128 -CONFIG_SCSI_MPT3SAS_MAX_SGE=128 -CONFIG_SCSI_MPT2SAS=m -CONFIG_SCSI_MPI3MR=m -CONFIG_SCSI_SMARTPQI=m -# CONFIG_SCSI_HPTIOP is not set -# CONFIG_SCSI_BUSLOGIC is not set -# CONFIG_SCSI_MYRB is not set -# CONFIG_SCSI_MYRS is not set -CONFIG_VMWARE_PVSCSI=m -# CONFIG_XEN_SCSI_FRONTEND is not set -CONFIG_HYPERV_STORAGE=m -CONFIG_LIBFC=m -CONFIG_LIBFCOE=m -CONFIG_FCOE=m -CONFIG_FCOE_FNIC=m -# CONFIG_SCSI_SNIC is not set -# CONFIG_SCSI_DMX3191D is not set -# CONFIG_SCSI_FDOMAIN_PCI is not set -CONFIG_SCSI_ISCI=m -# CONFIG_SCSI_IPS is not set -# CONFIG_SCSI_INITIO is not set -# CONFIG_SCSI_INIA100 is not set -# CONFIG_SCSI_PPA is not set -# CONFIG_SCSI_IMM is not set -# CONFIG_SCSI_STEX is not set -# CONFIG_SCSI_SYM53C8XX_2 is not set -# CONFIG_SCSI_IPR is not set -# CONFIG_SCSI_QLOGIC_1280 is not set -CONFIG_SCSI_QLA_FC=m -# CONFIG_TCM_QLA2XXX is not set -CONFIG_SCSI_QLA_ISCSI=m -CONFIG_QEDI=m -CONFIG_QEDF=m -CONFIG_SCSI_LPFC=m -# CONFIG_SCSI_LPFC_DEBUG_FS is not set -# CONFIG_SCSI_EFCT is not set -# CONFIG_SCSI_DC395x is not set -# CONFIG_SCSI_AM53C974 is not set -# CONFIG_SCSI_WD719X is not set -CONFIG_SCSI_DEBUG=m -# CONFIG_SCSI_PMCRAID is not set -# CONFIG_SCSI_PM8001 is not set -# CONFIG_SCSI_BFA_FC is not set -CONFIG_SCSI_VIRTIO=m -CONFIG_SCSI_CHELSIO_FCOE=m -CONFIG_SCSI_DH=y -CONFIG_SCSI_DH_RDAC=y -CONFIG_SCSI_DH_HP_SW=y -CONFIG_SCSI_DH_EMC=y -CONFIG_SCSI_DH_ALUA=y -# end of SCSI device support - -CONFIG_ATA=m -CONFIG_SATA_HOST=y -CONFIG_PATA_TIMINGS=y -CONFIG_ATA_VERBOSE_ERROR=y -CONFIG_ATA_FORCE=y -CONFIG_ATA_ACPI=y -# CONFIG_SATA_ZPODD is not set -CONFIG_SATA_PMP=y - -# -# Controllers with non-SFF native interface -# -CONFIG_SATA_AHCI=m -CONFIG_SATA_MOBILE_LPM_POLICY=0 -CONFIG_SATA_AHCI_PLATFORM=m -# CONFIG_AHCI_DWC is not set -# CONFIG_SATA_INIC162X is not set -# CONFIG_SATA_ACARD_AHCI is not set -# CONFIG_SATA_SIL24 is not set -CONFIG_ATA_SFF=y - -# -# SFF controllers with custom DMA interface -# -# CONFIG_PDC_ADMA is not set -# CONFIG_SATA_QSTOR is not set -# CONFIG_SATA_SX4 is not set -CONFIG_ATA_BMDMA=y - -# -# SATA SFF controllers with BMDMA -# -CONFIG_ATA_PIIX=m -# CONFIG_SATA_DWC is not set -# CONFIG_SATA_MV is not set -# CONFIG_SATA_NV is not set -# CONFIG_SATA_PROMISE is not set -# CONFIG_SATA_SIL is not set -# CONFIG_SATA_SIS is not set -# CONFIG_SATA_SVW is not set -# CONFIG_SATA_ULI is not set -# CONFIG_SATA_VIA is not set -# CONFIG_SATA_VITESSE is not set -CONFIG_SATA_ZHAOXIN=m - -# -# PATA SFF controllers with BMDMA -# -# CONFIG_PATA_ALI is not set -# CONFIG_PATA_AMD is not set -# CONFIG_PATA_ARTOP is not set -# CONFIG_PATA_ATIIXP is not set -# CONFIG_PATA_ATP867X is not set -# CONFIG_PATA_CMD64X is not set -# CONFIG_PATA_CYPRESS is not set -# CONFIG_PATA_EFAR is not set -# CONFIG_PATA_HPT366 is not set -# CONFIG_PATA_HPT37X is not set -# CONFIG_PATA_HPT3X2N is not set -# CONFIG_PATA_HPT3X3 is not set -# CONFIG_PATA_IT8213 is not set -# CONFIG_PATA_IT821X is not set -# CONFIG_PATA_JMICRON is not set -# CONFIG_PATA_MARVELL is not set -# CONFIG_PATA_NETCELL is not set -# CONFIG_PATA_NINJA32 is not set -# CONFIG_PATA_NS87415 is not set -# CONFIG_PATA_OLDPIIX is not set -# CONFIG_PATA_OPTIDMA is not set -# CONFIG_PATA_PDC2027X is not set -# CONFIG_PATA_PDC_OLD is not set -# CONFIG_PATA_RADISYS is not set -# CONFIG_PATA_RDC is not set -# CONFIG_PATA_SCH is not set -# CONFIG_PATA_SERVERWORKS is not set -# CONFIG_PATA_SIL680 is not set -# CONFIG_PATA_SIS is not set -# CONFIG_PATA_TOSHIBA is not set -# CONFIG_PATA_TRIFLEX is not set -# CONFIG_PATA_VIA is not set -# CONFIG_PATA_WINBOND is not set - -# -# PIO-only SFF controllers -# -# CONFIG_PATA_CMD640_PCI is not set -# CONFIG_PATA_MPIIX is not set -# CONFIG_PATA_NS87410 is not set -# CONFIG_PATA_OPTI is not set -# CONFIG_PATA_RZ1000 is not set -# CONFIG_PATA_PARPORT is not set - -# -# Generic fallback / legacy drivers -# -# CONFIG_PATA_ACPI is not set -CONFIG_ATA_GENERIC=m -# CONFIG_PATA_LEGACY is not set -CONFIG_MD=y -CONFIG_BLK_DEV_MD=y -CONFIG_MD_AUTODETECT=y -CONFIG_MD_BITMAP_FILE=y -CONFIG_MD_LINEAR=m -CONFIG_MD_RAID0=m -CONFIG_MD_RAID1=m -CONFIG_MD_RAID10=m -CONFIG_MD_RAID456=m -# CONFIG_MD_MULTIPATH is not set -CONFIG_MD_FAULTY=m -CONFIG_MD_CLUSTER=m -# CONFIG_BCACHE is not set -CONFIG_BLK_DEV_DM_BUILTIN=y -CONFIG_BLK_DEV_DM=m -CONFIG_DM_DEBUG=y -CONFIG_DM_BUFIO=m -# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set -CONFIG_DM_BIO_PRISON=m -CONFIG_DM_PERSISTENT_DATA=m -# CONFIG_DM_UNSTRIPED is not set -CONFIG_DM_CRYPT=m -CONFIG_DM_SNAPSHOT=m -CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m -CONFIG_DM_CACHE_SMQ=m -CONFIG_DM_WRITECACHE=m -# CONFIG_DM_EBS is not set -CONFIG_DM_ERA=m -# CONFIG_DM_CLONE is not set -CONFIG_DM_MIRROR=m -CONFIG_DM_LOG_USERSPACE=m -CONFIG_DM_RAID=m -CONFIG_DM_ZERO=m -CONFIG_DM_MULTIPATH=m -CONFIG_DM_MULTIPATH_QL=m -CONFIG_DM_MULTIPATH_ST=m -# CONFIG_DM_MULTIPATH_HST is not set -# CONFIG_DM_MULTIPATH_IOA is not set -CONFIG_DM_DELAY=m -# CONFIG_DM_DUST is not set -CONFIG_DM_UEVENT=y -CONFIG_DM_FLAKEY=m -CONFIG_DM_VERITY=m -# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set -# CONFIG_DM_VERITY_FEC is not set -CONFIG_DM_SWITCH=m -CONFIG_DM_LOG_WRITES=m -CONFIG_DM_INTEGRITY=m -# CONFIG_DM_ZONED is not set -CONFIG_DM_AUDIT=y -CONFIG_TARGET_CORE=m -CONFIG_TCM_IBLOCK=m -CONFIG_TCM_FILEIO=m -CONFIG_TCM_PSCSI=m -CONFIG_TCM_USER2=m -CONFIG_LOOPBACK_TARGET=m -# CONFIG_TCM_FC is not set -CONFIG_ISCSI_TARGET=m -CONFIG_ISCSI_TARGET_CXGB4=m -# CONFIG_SBP_TARGET is not set -# CONFIG_REMOTE_TARGET is not set -CONFIG_FUSION=y -CONFIG_FUSION_SPI=m -# CONFIG_FUSION_FC is not set -CONFIG_FUSION_SAS=m -CONFIG_FUSION_MAX_SGE=128 -# CONFIG_FUSION_CTL is not set -CONFIG_FUSION_LOGGING=y - -# -# IEEE 1394 (FireWire) support -# -CONFIG_FIREWIRE=m -CONFIG_FIREWIRE_OHCI=m -CONFIG_FIREWIRE_SBP2=m -CONFIG_FIREWIRE_NET=m -# CONFIG_FIREWIRE_NOSY is not set -# end of IEEE 1394 (FireWire) support - -CONFIG_MACINTOSH_DRIVERS=y -CONFIG_MAC_EMUMOUSEBTN=y -CONFIG_NETDEVICES=y -CONFIG_MII=m -CONFIG_NET_CORE=y -CONFIG_BONDING=m -CONFIG_DUMMY=m -CONFIG_WIREGUARD=m -# CONFIG_WIREGUARD_DEBUG is not set -# CONFIG_EQUALIZER is not set -CONFIG_NET_FC=y -CONFIG_IFB=m -CONFIG_NET_TEAM=m -CONFIG_NET_TEAM_MODE_BROADCAST=m -CONFIG_NET_TEAM_MODE_ROUNDROBIN=m -CONFIG_NET_TEAM_MODE_RANDOM=m -CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m -CONFIG_NET_TEAM_MODE_LOADBALANCE=m -CONFIG_MACVLAN=m -CONFIG_MACVTAP=m -CONFIG_IPVLAN_L3S=y -CONFIG_IPVLAN=m -CONFIG_IPVTAP=m -CONFIG_VXLAN=m -CONFIG_GENEVE=m -# CONFIG_BAREUDP is not set -# CONFIG_GTP is not set -# CONFIG_AMT is not set -CONFIG_MACSEC=m -CONFIG_NETCONSOLE=m -CONFIG_NETCONSOLE_DYNAMIC=y -# CONFIG_NETCONSOLE_EXTENDED_LOG is not set -CONFIG_NETPOLL=y -CONFIG_NET_POLL_CONTROLLER=y -CONFIG_TUN=m -CONFIG_TAP=m -# CONFIG_TUN_VNET_CROSS_LE is not set -CONFIG_VETH=m -CONFIG_VIRTIO_NET=m -CONFIG_NLMON=m -CONFIG_NET_VRF=m -CONFIG_VSOCKMON=m -# CONFIG_ARCNET is not set -# CONFIG_ATM_DRIVERS is not set -CONFIG_ETHERNET=y -CONFIG_MDIO=m -# CONFIG_NET_VENDOR_3COM is not set -# CONFIG_NET_VENDOR_ADAPTEC is not set -# CONFIG_NET_VENDOR_AGERE is not set -# CONFIG_NET_VENDOR_ALACRITECH is not set -# CONFIG_NET_VENDOR_ALTEON is not set -# CONFIG_ALTERA_TSE is not set -CONFIG_NET_VENDOR_AMAZON=y -CONFIG_ENA_ETHERNET=m -# CONFIG_NET_VENDOR_AMD is not set -CONFIG_NET_VENDOR_AQUANTIA=y -CONFIG_AQTION=m -# CONFIG_NET_VENDOR_ARC is not set -CONFIG_NET_VENDOR_ASIX=y -# CONFIG_SPI_AX88796C is not set -CONFIG_NET_VENDOR_ATHEROS=y -CONFIG_ATL2=m -CONFIG_ATL1=m -CONFIG_ATL1E=m -CONFIG_ATL1C=m -CONFIG_ALX=m -# CONFIG_CX_ECAT is not set -CONFIG_NET_VENDOR_BROADCOM=y -# CONFIG_B44 is not set -# CONFIG_BCMGENET is not set -CONFIG_BNX2=m -CONFIG_CNIC=m -CONFIG_TIGON3=m -CONFIG_TIGON3_HWMON=y -CONFIG_BNX2X=m -CONFIG_BNX2X_SRIOV=y -# CONFIG_SYSTEMPORT is not set -CONFIG_BNXT=m -CONFIG_BNXT_SRIOV=y -CONFIG_BNXT_FLOWER_OFFLOAD=y -CONFIG_BNXT_DCB=y -CONFIG_BNXT_HWMON=y -# CONFIG_NET_VENDOR_CADENCE is not set -CONFIG_NET_VENDOR_CAVIUM=y -# CONFIG_THUNDER_NIC_PF is not set -# CONFIG_THUNDER_NIC_VF is not set -# CONFIG_THUNDER_NIC_BGX is not set -# CONFIG_THUNDER_NIC_RGX is not set -CONFIG_CAVIUM_PTP=y -CONFIG_LIQUIDIO_CORE=m -CONFIG_LIQUIDIO=m -CONFIG_LIQUIDIO_VF=m -CONFIG_NET_VENDOR_CHELSIO=y -# CONFIG_CHELSIO_T1 is not set -# CONFIG_CHELSIO_T3 is not set -CONFIG_CHELSIO_T4=m -# CONFIG_CHELSIO_T4_DCB is not set -CONFIG_CHELSIO_T4VF=m -CONFIG_CHELSIO_LIB=m -CONFIG_CHELSIO_INLINE_CRYPTO=y -CONFIG_CHELSIO_IPSEC_INLINE=m -# CONFIG_CHELSIO_TLS_DEVICE is not set -CONFIG_NET_VENDOR_CISCO=y -CONFIG_ENIC=m -# CONFIG_NET_VENDOR_CORTINA is not set -CONFIG_NET_VENDOR_DAVICOM=y -# CONFIG_DM9051 is not set -CONFIG_DNET=m -CONFIG_NET_VENDOR_DEC=y -# CONFIG_NET_TULIP is not set -# CONFIG_NET_VENDOR_DLINK is not set -CONFIG_NET_VENDOR_EMULEX=y -CONFIG_BE2NET=m -CONFIG_BE2NET_HWMON=y -# CONFIG_BE2NET_BE2 is not set -# CONFIG_BE2NET_BE3 is not set -CONFIG_BE2NET_LANCER=y -CONFIG_BE2NET_SKYHAWK=y -CONFIG_NET_VENDOR_ENGLEDER=y -# CONFIG_TSNEP is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -CONFIG_NET_VENDOR_FUNGIBLE=y -# CONFIG_FUN_ETH is not set -CONFIG_NET_VENDOR_GOOGLE=y -CONFIG_GVE=m -CONFIG_NET_VENDOR_HUAWEI=y -CONFIG_HINIC=m -# CONFIG_NET_VENDOR_I825XX is not set -CONFIG_NET_VENDOR_INTEL=y -# CONFIG_E100 is not set -CONFIG_E1000=m -CONFIG_E1000E=m -CONFIG_E1000E_HWTS=y -CONFIG_IGB=m -CONFIG_IGB_HWMON=y -CONFIG_IGB_DCA=y -CONFIG_IGBVF=m -CONFIG_IXGBE=m -CONFIG_IXGBE_HWMON=y -CONFIG_IXGBE_DCA=y -CONFIG_IXGBE_DCB=y -CONFIG_IXGBE_IPSEC=y -CONFIG_IXGBEVF=m -CONFIG_IXGBEVF_IPSEC=y -CONFIG_I40E=m -CONFIG_I40E_DCB=y -CONFIG_IAVF=m -CONFIG_I40EVF=m -CONFIG_ICE=m -CONFIG_ICE_SWITCHDEV=y -CONFIG_ICE_HWTS=y -CONFIG_FM10K=m -CONFIG_IGC=m -# CONFIG_JME is not set -CONFIG_NET_VENDOR_ADI=y -# CONFIG_ADIN1110 is not set -CONFIG_NET_VENDOR_LITEX=y -# CONFIG_NET_VENDOR_MARVELL is not set -CONFIG_NET_VENDOR_MELLANOX=y -CONFIG_MLX4_EN=m -CONFIG_MLX4_EN_DCB=y -CONFIG_MLX4_CORE=m -CONFIG_MLX4_DEBUG=y -# CONFIG_MLX4_CORE_GEN2 is not set -CONFIG_MLX5_CORE=m -CONFIG_MLX5_FPGA=y -CONFIG_MLX5_CORE_EN=y -CONFIG_MLX5_EN_ARFS=y -CONFIG_MLX5_EN_RXNFC=y -CONFIG_MLX5_MPFS=y -CONFIG_MLX5_ESWITCH=y -CONFIG_MLX5_BRIDGE=y -CONFIG_MLX5_CLS_ACT=y -CONFIG_MLX5_TC_CT=y -CONFIG_MLX5_TC_SAMPLE=y -CONFIG_MLX5_CORE_EN_DCB=y -# CONFIG_MLX5_CORE_IPOIB is not set -# CONFIG_MLX5_MACSEC is not set -# CONFIG_MLX5_EN_IPSEC is not set -# CONFIG_MLX5_EN_TLS is not set -CONFIG_MLX5_SW_STEERING=y -# CONFIG_MLX5_SF is not set -CONFIG_MLXSW_CORE=m -CONFIG_MLXSW_CORE_HWMON=y -CONFIG_MLXSW_CORE_THERMAL=y -CONFIG_MLXSW_PCI=m -CONFIG_MLXSW_I2C=m -CONFIG_MLXSW_SPECTRUM=m -CONFIG_MLXSW_SPECTRUM_DCB=y -CONFIG_MLXSW_MINIMAL=m -CONFIG_MLXFW=m -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROCHIP is not set -# CONFIG_NET_VENDOR_MICROSEMI is not set -CONFIG_NET_VENDOR_MICROSOFT=y -# CONFIG_MICROSOFT_MANA is not set -CONFIG_NET_VENDOR_MYRI=y -CONFIG_MYRI10GE=m -CONFIG_MYRI10GE_DCA=y -# CONFIG_FEALNX is not set -# CONFIG_NET_VENDOR_NI is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_NETERION is not set -CONFIG_NET_VENDOR_NETRONOME=y -CONFIG_NFP=m -CONFIG_NFP_APP_FLOWER=y -CONFIG_NFP_APP_ABM_NIC=y -CONFIG_NFP_NET_IPSEC=y -CONFIG_NFP_DEBUG=y -# CONFIG_NET_VENDOR_NVIDIA is not set -CONFIG_NET_VENDOR_OKI=y -CONFIG_ETHOC=m -# CONFIG_NET_VENDOR_PACKET_ENGINES is not set -CONFIG_NET_VENDOR_PENSANDO=y -# CONFIG_IONIC is not set -CONFIG_NET_VENDOR_QLOGIC=y -CONFIG_QLA3XXX=m -# CONFIG_QLCNIC is not set -CONFIG_NETXEN_NIC=m -CONFIG_QED=m -CONFIG_QED_LL2=y -CONFIG_QED_SRIOV=y -CONFIG_QEDE=m -CONFIG_QED_RDMA=y -CONFIG_QED_ISCSI=y -CONFIG_QED_FCOE=y -CONFIG_QED_OOO=y -CONFIG_NET_VENDOR_BROCADE=y -# CONFIG_BNA is not set -# CONFIG_NET_VENDOR_QUALCOMM is not set -# CONFIG_NET_VENDOR_RDC is not set -CONFIG_NET_VENDOR_REALTEK=y -# CONFIG_ATP is not set -CONFIG_8139CP=m -CONFIG_8139TOO=m -# CONFIG_8139TOO_PIO is not set -# CONFIG_8139TOO_TUNE_TWISTER is not set -CONFIG_8139TOO_8129=y -# CONFIG_8139_OLD_RX_RESET is not set -CONFIG_R8169=m -# CONFIG_NET_VENDOR_RENESAS is not set -CONFIG_NET_VENDOR_ROCKER=y -CONFIG_ROCKER=m -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SILAN is not set -# CONFIG_NET_VENDOR_SIS is not set -CONFIG_NET_VENDOR_SOLARFLARE=y -CONFIG_SFC=m -CONFIG_SFC_MTD=y -CONFIG_SFC_MCDI_MON=y -CONFIG_SFC_SRIOV=y -CONFIG_SFC_MCDI_LOGGING=y -# CONFIG_SFC_FALCON is not set -# CONFIG_SFC_SIENA is not set -# CONFIG_NET_VENDOR_SMSC is not set -# CONFIG_NET_VENDOR_SOCIONEXT is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SUN is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_TEHUTI is not set -# CONFIG_NET_VENDOR_TI is not set -CONFIG_NET_VENDOR_VERTEXCOM=y -# CONFIG_MSE102X is not set -# CONFIG_NET_VENDOR_VIA is not set -CONFIG_NET_VENDOR_WANGXUN=y -CONFIG_LIBWX=m -CONFIG_NGBE=m -CONFIG_TXGBE=m -# CONFIG_NET_VENDOR_WIZNET is not set -CONFIG_NET_VENDOR_XILINX=y -# CONFIG_XILINX_EMACLITE is not set -# CONFIG_XILINX_AXI_EMAC is not set -# CONFIG_XILINX_LL_TEMAC is not set -# CONFIG_FDDI is not set -# CONFIG_HIPPI is not set -# CONFIG_NET_SB1000 is not set -CONFIG_PHYLINK=m -CONFIG_PHYLIB=y -CONFIG_SWPHY=y -CONFIG_LED_TRIGGER_PHY=y -CONFIG_FIXED_PHY=y -CONFIG_SFP=m - -# -# MII PHY device drivers -# -CONFIG_AMD_PHY=m -# CONFIG_ADIN_PHY is not set -# CONFIG_ADIN1100_PHY is not set -CONFIG_AQUANTIA_PHY=m -CONFIG_AX88796B_PHY=m -CONFIG_BROADCOM_PHY=m -# CONFIG_BCM54140_PHY is not set -CONFIG_BCM7XXX_PHY=m -# CONFIG_BCM84881_PHY is not set -CONFIG_BCM87XX_PHY=m -CONFIG_BCM_NET_PHYLIB=m -CONFIG_BCM_NET_PHYPTP=m -CONFIG_CICADA_PHY=m -CONFIG_CORTINA_PHY=m -CONFIG_DAVICOM_PHY=m -CONFIG_ICPLUS_PHY=m -CONFIG_LXT_PHY=m -CONFIG_INTEL_XWAY_PHY=m -CONFIG_LSI_ET1011C_PHY=m -CONFIG_MARVELL_PHY=m -CONFIG_MARVELL_10G_PHY=m -# CONFIG_MARVELL_88Q2XXX_PHY is not set -# CONFIG_MARVELL_88X2222_PHY is not set -# CONFIG_MAXLINEAR_GPHY is not set -# CONFIG_MEDIATEK_GE_PHY is not set -CONFIG_MICREL_PHY=m -# CONFIG_MICROCHIP_T1S_PHY is not set -CONFIG_MICROCHIP_PHY=m -CONFIG_MICROCHIP_T1_PHY=m -CONFIG_MICROSEMI_PHY=m -# CONFIG_MOTORCOMM_PHY is not set -CONFIG_NATIONAL_PHY=m -# CONFIG_NXP_CBTX_PHY is not set -# CONFIG_NXP_C45_TJA11XX_PHY is not set -# CONFIG_NXP_TJA11XX_PHY is not set -# CONFIG_NCN26000_PHY is not set -CONFIG_QSEMI_PHY=m -CONFIG_REALTEK_PHY=m -CONFIG_RENESAS_PHY=m -CONFIG_ROCKCHIP_PHY=m -CONFIG_SMSC_PHY=m -CONFIG_STE10XP=m -CONFIG_TERANETICS_PHY=m -CONFIG_DP83822_PHY=m -CONFIG_DP83TC811_PHY=m -CONFIG_DP83848_PHY=m -CONFIG_DP83867_PHY=m -# CONFIG_DP83869_PHY is not set -# CONFIG_DP83TD510_PHY is not set -CONFIG_VITESSE_PHY=m -CONFIG_XILINX_GMII2RGMII=m -# CONFIG_MICREL_KS8995MA is not set -# CONFIG_PSE_CONTROLLER is not set -CONFIG_MDIO_DEVICE=y -CONFIG_MDIO_BUS=y -CONFIG_FWNODE_MDIO=y -CONFIG_ACPI_MDIO=y -CONFIG_MDIO_DEVRES=y -CONFIG_MDIO_BITBANG=m -CONFIG_MDIO_BCM_UNIMAC=m -CONFIG_MDIO_CAVIUM=m -# CONFIG_MDIO_GPIO is not set -CONFIG_MDIO_I2C=m -# CONFIG_MDIO_MVUSB is not set -CONFIG_MDIO_THUNDER=m - -# -# MDIO Multiplexers -# - -# -# PCS device drivers -# -CONFIG_PCS_XPCS=m -# end of PCS device drivers - -# CONFIG_PLIP is not set -CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m -CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=m -CONFIG_PPP_MULTILINK=y -CONFIG_PPPOATM=m -CONFIG_PPPOE=m -# CONFIG_PPPOE_HASH_BITS_1 is not set -# CONFIG_PPPOE_HASH_BITS_2 is not set -CONFIG_PPPOE_HASH_BITS_4=y -# CONFIG_PPPOE_HASH_BITS_8 is not set -CONFIG_PPPOE_HASH_BITS=4 -CONFIG_PPTP=m -CONFIG_PPPOL2TP=m -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m -CONFIG_SLIP=m -CONFIG_SLHC=m -CONFIG_SLIP_COMPRESSED=y -CONFIG_SLIP_SMART=y -# CONFIG_SLIP_MODE_SLIP6 is not set -CONFIG_USB_NET_DRIVERS=y -CONFIG_USB_CATC=m -CONFIG_USB_KAWETH=m -CONFIG_USB_PEGASUS=m -CONFIG_USB_RTL8150=m -CONFIG_USB_RTL8152=m -CONFIG_USB_LAN78XX=m -CONFIG_USB_USBNET=m -CONFIG_USB_NET_AX8817X=m -CONFIG_USB_NET_AX88179_178A=m -CONFIG_USB_NET_CDCETHER=m -CONFIG_USB_NET_CDC_EEM=m -CONFIG_USB_NET_CDC_NCM=m -CONFIG_USB_NET_HUAWEI_CDC_NCM=m -CONFIG_USB_NET_CDC_MBIM=m -CONFIG_USB_NET_DM9601=m -# CONFIG_USB_NET_SR9700 is not set -# CONFIG_USB_NET_SR9800 is not set -CONFIG_USB_NET_SMSC75XX=m -CONFIG_USB_NET_SMSC95XX=m -CONFIG_USB_NET_GL620A=m -CONFIG_USB_NET_NET1080=m -CONFIG_USB_NET_PLUSB=m -CONFIG_USB_NET_MCS7830=m -CONFIG_USB_NET_RNDIS_HOST=m -CONFIG_USB_NET_CDC_SUBSET_ENABLE=m -CONFIG_USB_NET_CDC_SUBSET=m -CONFIG_USB_ALI_M5632=y -CONFIG_USB_AN2720=y -CONFIG_USB_BELKIN=y -CONFIG_USB_ARMLINUX=y -CONFIG_USB_EPSON2888=y -CONFIG_USB_KC2190=y -CONFIG_USB_NET_ZAURUS=m -CONFIG_USB_NET_CX82310_ETH=m -CONFIG_USB_NET_KALMIA=m -CONFIG_USB_NET_QMI_WWAN=m -CONFIG_USB_HSO=m -CONFIG_USB_NET_INT51X1=m -CONFIG_USB_IPHETH=m -CONFIG_USB_SIERRA_NET=m -CONFIG_USB_VL600=m -CONFIG_USB_NET_CH9200=m -# CONFIG_USB_NET_AQC111 is not set -CONFIG_USB_RTL8153_ECM=m -CONFIG_WLAN=y -# CONFIG_WLAN_VENDOR_ADMTEK is not set -CONFIG_ATH_COMMON=m -CONFIG_WLAN_VENDOR_ATH=y -CONFIG_ATH_DEBUG=y -# CONFIG_ATH_TRACEPOINTS is not set -# CONFIG_ATH5K is not set -# CONFIG_ATH5K_PCI is not set -CONFIG_ATH9K_HW=m -CONFIG_ATH9K_COMMON=m -CONFIG_ATH9K_COMMON_DEBUG=y -CONFIG_ATH9K_BTCOEX_SUPPORT=y -CONFIG_ATH9K=m -CONFIG_ATH9K_PCI=y -CONFIG_ATH9K_AHB=y -CONFIG_ATH9K_DEBUGFS=y -# CONFIG_ATH9K_STATION_STATISTICS is not set -# CONFIG_ATH9K_DYNACK is not set -CONFIG_ATH9K_WOW=y -CONFIG_ATH9K_RFKILL=y -# CONFIG_ATH9K_CHANNEL_CONTEXT is not set -CONFIG_ATH9K_PCOEM=y -# CONFIG_ATH9K_PCI_NO_EEPROM is not set -CONFIG_ATH9K_HTC=m -# CONFIG_ATH9K_HTC_DEBUGFS is not set -# CONFIG_ATH9K_HWRNG is not set -# CONFIG_ATH9K_COMMON_SPECTRAL is not set -# CONFIG_CARL9170 is not set -# CONFIG_ATH6KL is not set -# CONFIG_AR5523 is not set -# CONFIG_WIL6210 is not set -CONFIG_ATH10K=m -CONFIG_ATH10K_CE=y -CONFIG_ATH10K_PCI=m -# CONFIG_ATH10K_SDIO is not set -# CONFIG_ATH10K_USB is not set -CONFIG_ATH10K_DEBUG=y -# CONFIG_ATH10K_DEBUGFS is not set -CONFIG_ATH10K_TRACING=y -# CONFIG_WCN36XX is not set -# CONFIG_ATH11K is not set -# CONFIG_ATH12K is not set -# CONFIG_WLAN_VENDOR_ATMEL is not set -CONFIG_WLAN_VENDOR_BROADCOM=y -# CONFIG_B43 is not set -# CONFIG_B43LEGACY is not set -CONFIG_BRCMUTIL=m -CONFIG_BRCMSMAC=m -CONFIG_BRCMSMAC_LEDS=y -CONFIG_BRCMFMAC=m -CONFIG_BRCMFMAC_PROTO_BCDC=y -CONFIG_BRCMFMAC_PROTO_MSGBUF=y -CONFIG_BRCMFMAC_SDIO=y -CONFIG_BRCMFMAC_USB=y -CONFIG_BRCMFMAC_PCIE=y -# CONFIG_BRCM_TRACING is not set -# CONFIG_BRCMDBG is not set -# CONFIG_WLAN_VENDOR_CISCO is not set -CONFIG_WLAN_VENDOR_INTEL=y -# CONFIG_IPW2100 is not set -# CONFIG_IPW2200 is not set -# CONFIG_IWL4965 is not set -# CONFIG_IWL3945 is not set -CONFIG_IWLWIFI=m -CONFIG_IWLWIFI_LEDS=y -CONFIG_IWLDVM=m -CONFIG_IWLMVM=m -CONFIG_IWLWIFI_OPMODE_MODULAR=y - -# -# Debugging Options -# -CONFIG_IWLWIFI_DEBUG=y -CONFIG_IWLWIFI_DEBUGFS=y -CONFIG_IWLWIFI_DEVICE_TRACING=y -# end of Debugging Options - -# CONFIG_WLAN_VENDOR_INTERSIL is not set -CONFIG_WLAN_VENDOR_MARVELL=y -# CONFIG_LIBERTAS is not set -# CONFIG_LIBERTAS_THINFIRM is not set -CONFIG_MWIFIEX=m -CONFIG_MWIFIEX_SDIO=m -CONFIG_MWIFIEX_PCIE=m -CONFIG_MWIFIEX_USB=m -# CONFIG_MWL8K is not set -CONFIG_WLAN_VENDOR_MEDIATEK=y -CONFIG_MT7601U=m -CONFIG_MT76_CORE=m -CONFIG_MT76_LEDS=y -CONFIG_MT76_USB=m -CONFIG_MT76x02_LIB=m -CONFIG_MT76x02_USB=m -CONFIG_MT76x0_COMMON=m -CONFIG_MT76x0U=m -# CONFIG_MT76x0E is not set -CONFIG_MT76x2_COMMON=m -# CONFIG_MT76x2E is not set -CONFIG_MT76x2U=m -# CONFIG_MT7603E is not set -# CONFIG_MT7615E is not set -# CONFIG_MT7663U is not set -# CONFIG_MT7663S is not set -# CONFIG_MT7915E is not set -# CONFIG_MT7921E is not set -# CONFIG_MT7921S is not set -# CONFIG_MT7921U is not set -# CONFIG_MT7996E is not set -CONFIG_WLAN_VENDOR_MICROCHIP=y -# CONFIG_WILC1000_SDIO is not set -# CONFIG_WILC1000_SPI is not set -CONFIG_WLAN_VENDOR_PURELIFI=y -# CONFIG_PLFXLC is not set -CONFIG_WLAN_VENDOR_RALINK=y -CONFIG_RT2X00=m -# CONFIG_RT2400PCI is not set -# CONFIG_RT2500PCI is not set -# CONFIG_RT61PCI is not set -CONFIG_RT2800PCI=m -CONFIG_RT2800PCI_RT33XX=y -CONFIG_RT2800PCI_RT35XX=y -CONFIG_RT2800PCI_RT53XX=y -CONFIG_RT2800PCI_RT3290=y -# CONFIG_RT2500USB is not set -# CONFIG_RT73USB is not set -CONFIG_RT2800USB=m -CONFIG_RT2800USB_RT33XX=y -CONFIG_RT2800USB_RT35XX=y -CONFIG_RT2800USB_RT3573=y -CONFIG_RT2800USB_RT53XX=y -CONFIG_RT2800USB_RT55XX=y -CONFIG_RT2800USB_UNKNOWN=y -CONFIG_RT2800_LIB=m -CONFIG_RT2800_LIB_MMIO=m -CONFIG_RT2X00_LIB_MMIO=m -CONFIG_RT2X00_LIB_PCI=m -CONFIG_RT2X00_LIB_USB=m -CONFIG_RT2X00_LIB=m -CONFIG_RT2X00_LIB_FIRMWARE=y -CONFIG_RT2X00_LIB_CRYPTO=y -CONFIG_RT2X00_LIB_LEDS=y -CONFIG_RT2X00_LIB_DEBUGFS=y -# CONFIG_RT2X00_DEBUG is not set -CONFIG_WLAN_VENDOR_REALTEK=y -# CONFIG_RTL8180 is not set -# CONFIG_RTL8187 is not set -CONFIG_RTL_CARDS=m -CONFIG_RTL8192CE=m -CONFIG_RTL8192SE=m -CONFIG_RTL8192DE=m -CONFIG_RTL8723AE=m -CONFIG_RTL8723BE=m -CONFIG_RTL8188EE=m -CONFIG_RTL8192EE=m -CONFIG_RTL8821AE=m -CONFIG_RTL8192CU=m -CONFIG_RTLWIFI=m -CONFIG_RTLWIFI_PCI=m -CONFIG_RTLWIFI_USB=m -CONFIG_RTLWIFI_DEBUG=y -CONFIG_RTL8192C_COMMON=m -CONFIG_RTL8723_COMMON=m -CONFIG_RTLBTCOEXIST=m -CONFIG_RTL8XXXU=m -# CONFIG_RTL8XXXU_UNTESTED is not set -CONFIG_RTW88=m -CONFIG_RTW88_CORE=m -CONFIG_RTW88_PCI=m -CONFIG_RTW88_8822B=m -CONFIG_RTW88_8822C=m -CONFIG_RTW88_8822BE=m -# CONFIG_RTW88_8822BS is not set -# CONFIG_RTW88_8822BU is not set -CONFIG_RTW88_8822CE=m -# CONFIG_RTW88_8822CS is not set -# CONFIG_RTW88_8822CU is not set -# CONFIG_RTW88_8723DE is not set -# CONFIG_RTW88_8723DS is not set -# CONFIG_RTW88_8723DU is not set -# CONFIG_RTW88_8821CE is not set -# CONFIG_RTW88_8821CS is not set -# CONFIG_RTW88_8821CU is not set -CONFIG_RTW88_DEBUG=y -CONFIG_RTW88_DEBUGFS=y -# CONFIG_RTW89 is not set -# CONFIG_WLAN_VENDOR_RSI is not set -CONFIG_WLAN_VENDOR_SILABS=y -# CONFIG_WFX is not set -# CONFIG_WLAN_VENDOR_ST is not set -# CONFIG_WLAN_VENDOR_TI is not set -# CONFIG_WLAN_VENDOR_ZYDAS is not set -CONFIG_WLAN_VENDOR_QUANTENNA=y -# CONFIG_QTNFMAC_PCIE is not set -# CONFIG_USB_NET_RNDIS_WLAN is not set -CONFIG_MAC80211_HWSIM=m -# CONFIG_VIRT_WIFI is not set -CONFIG_WAN=y -CONFIG_HDLC=m -CONFIG_HDLC_RAW=m -# CONFIG_HDLC_RAW_ETH is not set -CONFIG_HDLC_CISCO=m -CONFIG_HDLC_FR=m -CONFIG_HDLC_PPP=m - -# -# X.25/LAPB support is disabled -# -# CONFIG_PCI200SYN is not set -# CONFIG_WANXL is not set -# CONFIG_PC300TOO is not set -# CONFIG_FARSYNC is not set -CONFIG_IEEE802154_DRIVERS=m -CONFIG_IEEE802154_FAKELB=m -# CONFIG_IEEE802154_AT86RF230 is not set -# CONFIG_IEEE802154_MRF24J40 is not set -# CONFIG_IEEE802154_CC2520 is not set -# CONFIG_IEEE802154_ATUSB is not set -# CONFIG_IEEE802154_ADF7242 is not set -# CONFIG_IEEE802154_CA8210 is not set -# CONFIG_IEEE802154_MCR20A is not set -# CONFIG_IEEE802154_HWSIM is not set - -# -# Wireless WAN -# -# CONFIG_WWAN is not set -# end of Wireless WAN - -CONFIG_XEN_NETDEV_FRONTEND=m -CONFIG_VMXNET3=m -CONFIG_FUJITSU_ES=m -CONFIG_HYPERV_NET=m -CONFIG_NETDEVSIM=m -CONFIG_NET_FAILOVER=m -CONFIG_ISDN=y -CONFIG_ISDN_CAPI=y -CONFIG_CAPI_TRACE=y -CONFIG_ISDN_CAPI_MIDDLEWARE=y -CONFIG_MISDN=m -CONFIG_MISDN_DSP=m -CONFIG_MISDN_L1OIP=m - -# -# mISDN hardware drivers -# -CONFIG_MISDN_HFCPCI=m -CONFIG_MISDN_HFCMULTI=m -CONFIG_MISDN_HFCUSB=m -CONFIG_MISDN_AVMFRITZ=m -CONFIG_MISDN_SPEEDFAX=m -CONFIG_MISDN_INFINEON=m -CONFIG_MISDN_W6692=m -CONFIG_MISDN_NETJET=m -CONFIG_MISDN_HDLC=m -CONFIG_MISDN_IPAC=m -CONFIG_MISDN_ISAR=m - -# -# Input device support -# -CONFIG_INPUT=y -CONFIG_INPUT_LEDS=y -CONFIG_INPUT_FF_MEMLESS=m -CONFIG_INPUT_SPARSEKMAP=m -# CONFIG_INPUT_MATRIXKMAP is not set -CONFIG_INPUT_VIVALDIFMAP=y - -# -# Userland interfaces -# -CONFIG_INPUT_MOUSEDEV=m -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set -CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 -CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 -CONFIG_INPUT_JOYDEV=m -CONFIG_INPUT_EVDEV=y -# CONFIG_INPUT_EVBUG is not set - -# -# Input Device Drivers -# -CONFIG_INPUT_KEYBOARD=y -# CONFIG_KEYBOARD_ADC is not set -# CONFIG_KEYBOARD_ADP5588 is not set -# CONFIG_KEYBOARD_ADP5589 is not set -# CONFIG_KEYBOARD_APPLESPI is not set -CONFIG_KEYBOARD_ATKBD=y -# CONFIG_KEYBOARD_QT1050 is not set -# CONFIG_KEYBOARD_QT1070 is not set -# CONFIG_KEYBOARD_QT2160 is not set -# CONFIG_KEYBOARD_DLINK_DIR685 is not set -# CONFIG_KEYBOARD_LKKBD is not set -# CONFIG_KEYBOARD_GPIO is not set -# CONFIG_KEYBOARD_GPIO_POLLED is not set -# CONFIG_KEYBOARD_TCA6416 is not set -# CONFIG_KEYBOARD_TCA8418 is not set -# CONFIG_KEYBOARD_MATRIX is not set -# CONFIG_KEYBOARD_LM8323 is not set -# CONFIG_KEYBOARD_LM8333 is not set -# CONFIG_KEYBOARD_MAX7359 is not set -# CONFIG_KEYBOARD_MCS is not set -# CONFIG_KEYBOARD_MPR121 is not set -# CONFIG_KEYBOARD_NEWTON is not set -# CONFIG_KEYBOARD_OPENCORES is not set -# CONFIG_KEYBOARD_SAMSUNG is not set -# CONFIG_KEYBOARD_STOWAWAY is not set -# CONFIG_KEYBOARD_SUNKBD is not set -# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set -# CONFIG_KEYBOARD_XTKBD is not set -# CONFIG_KEYBOARD_CYPRESS_SF is not set -CONFIG_INPUT_MOUSE=y -CONFIG_MOUSE_PS2=m -CONFIG_MOUSE_PS2_ALPS=y -CONFIG_MOUSE_PS2_BYD=y -CONFIG_MOUSE_PS2_LOGIPS2PP=y -CONFIG_MOUSE_PS2_SYNAPTICS=y -CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y -CONFIG_MOUSE_PS2_CYPRESS=y -CONFIG_MOUSE_PS2_LIFEBOOK=y -CONFIG_MOUSE_PS2_TRACKPOINT=y -CONFIG_MOUSE_PS2_ELANTECH=y -CONFIG_MOUSE_PS2_ELANTECH_SMBUS=y -CONFIG_MOUSE_PS2_SENTELIC=y -# CONFIG_MOUSE_PS2_TOUCHKIT is not set -CONFIG_MOUSE_PS2_FOCALTECH=y -CONFIG_MOUSE_PS2_VMMOUSE=y -CONFIG_MOUSE_PS2_SMBUS=y -CONFIG_MOUSE_SERIAL=m -CONFIG_MOUSE_APPLETOUCH=m -CONFIG_MOUSE_BCM5974=m -CONFIG_MOUSE_CYAPA=m -CONFIG_MOUSE_ELAN_I2C=m -CONFIG_MOUSE_ELAN_I2C_I2C=y -# CONFIG_MOUSE_ELAN_I2C_SMBUS is not set -CONFIG_MOUSE_VSXXXAA=m -# CONFIG_MOUSE_GPIO is not set -CONFIG_MOUSE_SYNAPTICS_I2C=m -CONFIG_MOUSE_SYNAPTICS_USB=m -# CONFIG_INPUT_JOYSTICK is not set -CONFIG_INPUT_TABLET=y -CONFIG_TABLET_USB_ACECAD=m -CONFIG_TABLET_USB_AIPTEK=m -# CONFIG_TABLET_USB_HANWANG is not set -CONFIG_TABLET_USB_KBTAB=m -# CONFIG_TABLET_USB_PEGASUS is not set -CONFIG_TABLET_SERIAL_WACOM4=m -CONFIG_INPUT_TOUCHSCREEN=y -# CONFIG_TOUCHSCREEN_ADS7846 is not set -# CONFIG_TOUCHSCREEN_AD7877 is not set -# CONFIG_TOUCHSCREEN_AD7879 is not set -# CONFIG_TOUCHSCREEN_ADC is not set -# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set -# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set -# CONFIG_TOUCHSCREEN_BU21013 is not set -# CONFIG_TOUCHSCREEN_BU21029 is not set -# CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 is not set -# CONFIG_TOUCHSCREEN_CY8CTMA140 is not set -# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set -# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set -# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set -# CONFIG_TOUCHSCREEN_CYTTSP5 is not set -# CONFIG_TOUCHSCREEN_DYNAPRO is not set -# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set -# CONFIG_TOUCHSCREEN_EETI is not set -# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set -# CONFIG_TOUCHSCREEN_EXC3000 is not set -# CONFIG_TOUCHSCREEN_FUJITSU is not set -# CONFIG_TOUCHSCREEN_GOODIX is not set -# CONFIG_TOUCHSCREEN_HIDEEP is not set -# CONFIG_TOUCHSCREEN_HYCON_HY46XX is not set -# CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX is not set -# CONFIG_TOUCHSCREEN_ILI210X is not set -# CONFIG_TOUCHSCREEN_ILITEK is not set -# CONFIG_TOUCHSCREEN_S6SY761 is not set -# CONFIG_TOUCHSCREEN_GUNZE is not set -# CONFIG_TOUCHSCREEN_EKTF2127 is not set -# CONFIG_TOUCHSCREEN_ELAN is not set -CONFIG_TOUCHSCREEN_ELO=m -CONFIG_TOUCHSCREEN_WACOM_W8001=m -CONFIG_TOUCHSCREEN_WACOM_I2C=m -# CONFIG_TOUCHSCREEN_MAX11801 is not set -# CONFIG_TOUCHSCREEN_MCS5000 is not set -# CONFIG_TOUCHSCREEN_MMS114 is not set -# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set -# CONFIG_TOUCHSCREEN_MSG2638 is not set -# CONFIG_TOUCHSCREEN_MTOUCH is not set -# CONFIG_TOUCHSCREEN_NOVATEK_NVT_TS is not set -# CONFIG_TOUCHSCREEN_IMAGIS is not set -# CONFIG_TOUCHSCREEN_INEXIO is not set -# CONFIG_TOUCHSCREEN_PENMOUNT is not set -# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set -# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set -# CONFIG_TOUCHSCREEN_TOUCHWIN is not set -# CONFIG_TOUCHSCREEN_PIXCIR is not set -# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set -# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set -# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set -# CONFIG_TOUCHSCREEN_TSC_SERIO is not set -# CONFIG_TOUCHSCREEN_TSC2004 is not set -# CONFIG_TOUCHSCREEN_TSC2005 is not set -# CONFIG_TOUCHSCREEN_TSC2007 is not set -# CONFIG_TOUCHSCREEN_RM_TS is not set -# CONFIG_TOUCHSCREEN_SILEAD is not set -# CONFIG_TOUCHSCREEN_SIS_I2C is not set -# CONFIG_TOUCHSCREEN_ST1232 is not set -# CONFIG_TOUCHSCREEN_STMFTS is not set -# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set -# CONFIG_TOUCHSCREEN_SX8654 is not set -# CONFIG_TOUCHSCREEN_TPS6507X is not set -# CONFIG_TOUCHSCREEN_ZET6223 is not set -# CONFIG_TOUCHSCREEN_ZFORCE is not set -# CONFIG_TOUCHSCREEN_COLIBRI_VF50 is not set -# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set -# CONFIG_TOUCHSCREEN_IQS5XX is not set -# CONFIG_TOUCHSCREEN_IQS7211 is not set -# CONFIG_TOUCHSCREEN_ZINITIX is not set -# CONFIG_TOUCHSCREEN_HIMAX_HX83112B is not set -CONFIG_INPUT_MISC=y -# CONFIG_INPUT_AD714X is not set -# CONFIG_INPUT_BMA150 is not set -# CONFIG_INPUT_E3X0_BUTTON is not set -CONFIG_INPUT_PCSPKR=m -# CONFIG_INPUT_MMA8450 is not set -CONFIG_INPUT_APANEL=m -# CONFIG_INPUT_GPIO_BEEPER is not set -# CONFIG_INPUT_GPIO_DECODER is not set -# CONFIG_INPUT_GPIO_VIBRA is not set -CONFIG_INPUT_ATLAS_BTNS=m -CONFIG_INPUT_ATI_REMOTE2=m -CONFIG_INPUT_KEYSPAN_REMOTE=m -# CONFIG_INPUT_KXTJ9 is not set -CONFIG_INPUT_POWERMATE=m -CONFIG_INPUT_YEALINK=m -CONFIG_INPUT_CM109=m -CONFIG_INPUT_UINPUT=m -# CONFIG_INPUT_PCF8574 is not set -# CONFIG_INPUT_PWM_BEEPER is not set -# CONFIG_INPUT_PWM_VIBRA is not set -CONFIG_INPUT_GPIO_ROTARY_ENCODER=m -# CONFIG_INPUT_DA7280_HAPTICS is not set -# CONFIG_INPUT_ADXL34X is not set -# CONFIG_INPUT_IMS_PCU is not set -# CONFIG_INPUT_IQS269A is not set -# CONFIG_INPUT_IQS626A is not set -# CONFIG_INPUT_IQS7222 is not set -# CONFIG_INPUT_CMA3000 is not set -CONFIG_INPUT_XEN_KBDDEV_FRONTEND=m -# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set -# CONFIG_INPUT_DRV260X_HAPTICS is not set -# CONFIG_INPUT_DRV2665_HAPTICS is not set -# CONFIG_INPUT_DRV2667_HAPTICS is not set -CONFIG_RMI4_CORE=m -CONFIG_RMI4_I2C=m -# CONFIG_RMI4_SPI is not set -CONFIG_RMI4_SMB=m -CONFIG_RMI4_F03=y -CONFIG_RMI4_F03_SERIO=m -CONFIG_RMI4_2D_SENSOR=y -CONFIG_RMI4_F11=y -CONFIG_RMI4_F12=y -CONFIG_RMI4_F30=y -# CONFIG_RMI4_F34 is not set -# CONFIG_RMI4_F3A is not set -CONFIG_RMI4_F55=y - -# -# Hardware I/O ports -# -CONFIG_SERIO=y -CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y -CONFIG_SERIO_I8042=y -CONFIG_SERIO_SERPORT=y -# CONFIG_SERIO_CT82C710 is not set -# CONFIG_SERIO_PARKBD is not set -# CONFIG_SERIO_PCIPS2 is not set -CONFIG_SERIO_LIBPS2=y -CONFIG_SERIO_RAW=m -CONFIG_SERIO_ALTERA_PS2=m -# CONFIG_SERIO_PS2MULT is not set -CONFIG_SERIO_ARC_PS2=m -CONFIG_HYPERV_KEYBOARD=m -# CONFIG_SERIO_GPIO_PS2 is not set -# CONFIG_USERIO is not set -# CONFIG_GAMEPORT is not set -# end of Hardware I/O ports -# end of Input device support - -# -# Character devices -# -CONFIG_TTY=y -CONFIG_VT=y -CONFIG_CONSOLE_TRANSLATIONS=y -CONFIG_VT_CONSOLE=y -CONFIG_VT_CONSOLE_SLEEP=y -CONFIG_HW_CONSOLE=y -CONFIG_VT_HW_CONSOLE_BINDING=y -CONFIG_UNIX98_PTYS=y -# CONFIG_LEGACY_PTYS is not set -CONFIG_LEGACY_TIOCSTI=y -CONFIG_LDISC_AUTOLOAD=y - -# -# Serial drivers -# -CONFIG_SERIAL_EARLYCON=y -CONFIG_SERIAL_8250=y -# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set -CONFIG_SERIAL_8250_PNP=y -# CONFIG_SERIAL_8250_16550A_VARIANTS is not set -# CONFIG_SERIAL_8250_FINTEK is not set -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_DMA=y -CONFIG_SERIAL_8250_PCILIB=y -CONFIG_SERIAL_8250_PCI=y -CONFIG_SERIAL_8250_EXAR=y -CONFIG_SERIAL_8250_NR_UARTS=32 -CONFIG_SERIAL_8250_RUNTIME_UARTS=4 -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_MANY_PORTS=y -# CONFIG_SERIAL_8250_PCI1XXXX is not set -CONFIG_SERIAL_8250_SHARE_IRQ=y -# CONFIG_SERIAL_8250_DETECT_IRQ is not set -CONFIG_SERIAL_8250_RSA=y -CONFIG_SERIAL_8250_DWLIB=y -CONFIG_SERIAL_8250_DW=y -# CONFIG_SERIAL_8250_RT288X is not set -CONFIG_SERIAL_8250_LPSS=y -CONFIG_SERIAL_8250_MID=y -CONFIG_SERIAL_8250_PERICOM=y - -# -# Non-8250 serial port support -# -# CONFIG_SERIAL_KGDB_NMI is not set -# CONFIG_SERIAL_MAX3100 is not set -# CONFIG_SERIAL_MAX310X is not set -# CONFIG_SERIAL_UARTLITE is not set -CONFIG_SERIAL_CORE=y -CONFIG_SERIAL_CORE_CONSOLE=y -CONFIG_CONSOLE_POLL=y -CONFIG_SERIAL_JSM=m -# CONFIG_SERIAL_LANTIQ is not set -# CONFIG_SERIAL_SCCNXP is not set -# CONFIG_SERIAL_SC16IS7XX is not set -# CONFIG_SERIAL_ALTERA_JTAGUART is not set -# CONFIG_SERIAL_ALTERA_UART is not set -CONFIG_SERIAL_ARC=m -CONFIG_SERIAL_ARC_NR_PORTS=1 -# CONFIG_SERIAL_RP2 is not set -# CONFIG_SERIAL_FSL_LPUART is not set -# CONFIG_SERIAL_FSL_LINFLEXUART is not set -# CONFIG_SERIAL_SPRD is not set -# end of Serial drivers - -CONFIG_SERIAL_MCTRL_GPIO=y -CONFIG_SERIAL_NONSTANDARD=y -# CONFIG_MOXA_INTELLIO is not set -# CONFIG_MOXA_SMARTIO is not set -CONFIG_N_HDLC=m -CONFIG_N_GSM=m -CONFIG_NOZOMI=m -# CONFIG_NULL_TTY is not set -CONFIG_HVC_DRIVER=y -CONFIG_HVC_IRQ=y -CONFIG_HVC_XEN=y -CONFIG_HVC_XEN_FRONTEND=y -# CONFIG_SERIAL_DEV_BUS is not set -CONFIG_PRINTER=m -# CONFIG_LP_CONSOLE is not set -CONFIG_PPDEV=m -CONFIG_VIRTIO_CONSOLE=m -CONFIG_IPMI_HANDLER=m -CONFIG_IPMI_DMI_DECODE=y -CONFIG_IPMI_PLAT_DATA=y -CONFIG_IPMI_PANIC_EVENT=y -CONFIG_IPMI_PANIC_STRING=y -CONFIG_IPMI_DEVICE_INTERFACE=m -CONFIG_IPMI_SI=m -CONFIG_IPMI_SSIF=m -CONFIG_IPMI_WATCHDOG=m -CONFIG_IPMI_POWEROFF=m -CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_TIMERIOMEM=m -CONFIG_HW_RANDOM_INTEL=m -CONFIG_HW_RANDOM_AMD=m -# CONFIG_HW_RANDOM_BA431 is not set -CONFIG_HW_RANDOM_VIA=m -CONFIG_HW_RANDOM_ZHAOXIN=m -CONFIG_HW_RANDOM_VIRTIO=y -# CONFIG_HW_RANDOM_XIPHERA is not set -# CONFIG_APPLICOM is not set -# CONFIG_MWAVE is not set -CONFIG_DEVMEM=y -CONFIG_NVRAM=y -CONFIG_DEVPORT=y -CONFIG_HPET=y -CONFIG_HPET_MMAP=y -# CONFIG_HPET_MMAP_DEFAULT is not set -CONFIG_HANGCHECK_TIMER=m -CONFIG_UV_MMTIMER=m -CONFIG_TCG_TPM=y -CONFIG_HW_RANDOM_TPM=y -CONFIG_TCG_TIS_CORE=y -CONFIG_TCG_TIS=y -# CONFIG_TCG_TIS_SPI is not set -# CONFIG_TCG_TIS_I2C is not set -# CONFIG_TCG_TIS_I2C_CR50 is not set -CONFIG_TCG_TIS_I2C_ATMEL=m -CONFIG_TCG_TIS_I2C_INFINEON=m -CONFIG_TCG_TIS_I2C_NUVOTON=m -CONFIG_TCG_NSC=m -CONFIG_TCG_ATMEL=m -CONFIG_TCG_INFINEON=m -# CONFIG_TCG_XEN is not set -CONFIG_TCG_CRB=y -# CONFIG_TCG_VTPM_PROXY is not set -CONFIG_TCG_HYGON=m -CONFIG_TCM_HYGON=m -CONFIG_TCG_TIS_ST33ZP24=m -CONFIG_TCG_TIS_ST33ZP24_I2C=m -# CONFIG_TCG_TIS_ST33ZP24_SPI is not set -CONFIG_TELCLOCK=m -# CONFIG_XILLYBUS is not set -# CONFIG_XILLYUSB is not set -# end of Character devices - -# -# I2C support -# -CONFIG_I2C=y -CONFIG_ACPI_I2C_OPREGION=y -CONFIG_I2C_BOARDINFO=y -CONFIG_I2C_COMPAT=y -CONFIG_I2C_CHARDEV=m -CONFIG_I2C_MUX=m - -# -# Multiplexer I2C Chip support -# -# CONFIG_I2C_MUX_GPIO is not set -# CONFIG_I2C_MUX_LTC4306 is not set -# CONFIG_I2C_MUX_PCA9541 is not set -# CONFIG_I2C_MUX_PCA954x is not set -# CONFIG_I2C_MUX_REG is not set -CONFIG_I2C_MUX_MLXCPLD=m -# end of Multiplexer I2C Chip support - -CONFIG_I2C_HELPER_AUTO=y -CONFIG_I2C_SMBUS=m -CONFIG_I2C_ALGOBIT=m -CONFIG_I2C_ALGOPCA=m - -# -# I2C Hardware Bus support -# - -# -# PC SMBus host controller drivers -# -# CONFIG_I2C_ALI1535 is not set -# CONFIG_I2C_ALI1563 is not set -# CONFIG_I2C_ALI15X3 is not set -CONFIG_I2C_AMD756=m -CONFIG_I2C_AMD756_S4882=m -CONFIG_I2C_AMD8111=m -# CONFIG_I2C_AMD_MP2 is not set -CONFIG_I2C_I801=m -CONFIG_I2C_ISCH=m -CONFIG_I2C_ISMT=m -CONFIG_I2C_PIIX4=m -CONFIG_I2C_NFORCE2=m -CONFIG_I2C_NFORCE2_S4985=m -# CONFIG_I2C_NVIDIA_GPU is not set -# CONFIG_I2C_SIS5595 is not set -# CONFIG_I2C_SIS630 is not set -CONFIG_I2C_SIS96X=m -CONFIG_I2C_VIA=m -CONFIG_I2C_VIAPRO=m -CONFIG_I2C_ZHAOXIN=m - -# -# ACPI drivers -# -CONFIG_I2C_SCMI=m -CONFIG_I2C_ZHAOXIN_SMBUS=m - -# -# I2C system bus drivers (mostly embedded / system-on-chip) -# -# CONFIG_I2C_CBUS_GPIO is not set -CONFIG_I2C_DESIGNWARE_CORE=m -# CONFIG_I2C_DESIGNWARE_SLAVE is not set -CONFIG_I2C_DESIGNWARE_PLATFORM=m -# CONFIG_I2C_DESIGNWARE_AMDPSP is not set -CONFIG_I2C_DESIGNWARE_BAYTRAIL=y -# CONFIG_I2C_DESIGNWARE_PCI is not set -# CONFIG_I2C_EMEV2 is not set -# CONFIG_I2C_GPIO is not set -# CONFIG_I2C_OCORES is not set -CONFIG_I2C_PCA_PLATFORM=m -CONFIG_I2C_SIMTEC=m -# CONFIG_I2C_XILINX is not set - -# -# External I2C/SMBus adapter drivers -# -CONFIG_I2C_DIOLAN_U2C=m -# CONFIG_I2C_CP2615 is not set -CONFIG_I2C_PARPORT=m -# CONFIG_I2C_PCI1XXXX is not set -# CONFIG_I2C_ROBOTFUZZ_OSIF is not set -# CONFIG_I2C_TAOS_EVM is not set -CONFIG_I2C_TINY_USB=m -CONFIG_I2C_VIPERBOARD=m - -# -# Other I2C/SMBus bus drivers -# -CONFIG_I2C_MLXCPLD=m -# CONFIG_I2C_VIRTIO is not set -# end of I2C Hardware Bus support - -CONFIG_I2C_STUB=m -# CONFIG_I2C_SLAVE is not set -# CONFIG_I2C_DEBUG_CORE is not set -# CONFIG_I2C_DEBUG_ALGO is not set -# CONFIG_I2C_DEBUG_BUS is not set -# end of I2C support - -# CONFIG_I3C is not set -CONFIG_SPI=y -# CONFIG_SPI_DEBUG is not set -CONFIG_SPI_MASTER=y -# CONFIG_SPI_MEM is not set - -# -# SPI Master Controller Drivers -# -# CONFIG_SPI_ALTERA is not set -# CONFIG_SPI_AXI_SPI_ENGINE is not set -# CONFIG_SPI_BITBANG is not set -# CONFIG_SPI_BUTTERFLY is not set -# CONFIG_SPI_CADENCE is not set -# CONFIG_SPI_DESIGNWARE is not set -# CONFIG_SPI_GPIO is not set -# CONFIG_SPI_LM70_LLP is not set -# CONFIG_SPI_MICROCHIP_CORE is not set -# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set -# CONFIG_SPI_LANTIQ_SSC is not set -# CONFIG_SPI_OC_TINY is not set -# CONFIG_SPI_PCI1XXXX is not set -# CONFIG_SPI_PXA2XX is not set -# CONFIG_SPI_SC18IS602 is not set -# CONFIG_SPI_SIFIVE is not set -# CONFIG_SPI_MXIC is not set -# CONFIG_SPI_XCOMM is not set -# CONFIG_SPI_XILINX is not set -# CONFIG_SPI_AMD is not set - -# -# SPI Multiplexer support -# -# CONFIG_SPI_MUX is not set - -# -# SPI Protocol Masters -# -# CONFIG_SPI_SPIDEV is not set -# CONFIG_SPI_LOOPBACK_TEST is not set -# CONFIG_SPI_TLE62X0 is not set -# CONFIG_SPI_SLAVE is not set -CONFIG_SPI_DYNAMIC=y -# CONFIG_SPMI is not set -# CONFIG_HSI is not set -CONFIG_PPS=y -# CONFIG_PPS_DEBUG is not set - -# -# PPS clients support -# -# CONFIG_PPS_CLIENT_KTIMER is not set -CONFIG_PPS_CLIENT_LDISC=m -CONFIG_PPS_CLIENT_PARPORT=m -CONFIG_PPS_CLIENT_GPIO=m - -# -# PPS generators support -# - -# -# PTP clock support -# -CONFIG_PTP_1588_CLOCK=y -CONFIG_PTP_1588_CLOCK_OPTIONAL=y -CONFIG_DP83640_PHY=m -# CONFIG_PTP_1588_CLOCK_INES is not set -CONFIG_PTP_1588_CLOCK_KVM=m -# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set -# CONFIG_PTP_1588_CLOCK_IDTCM is not set -# CONFIG_PTP_1588_CLOCK_MOCK is not set -# CONFIG_PTP_1588_CLOCK_VMW is not set -# CONFIG_PTP_1588_CLOCK_OCP is not set -# end of PTP clock support - -CONFIG_PINCTRL=y -CONFIG_PINMUX=y -CONFIG_PINCONF=y -CONFIG_GENERIC_PINCONF=y -# CONFIG_DEBUG_PINCTRL is not set -# CONFIG_PINCTRL_AMD is not set -# CONFIG_PINCTRL_CY8C95X0 is not set -# CONFIG_PINCTRL_MCP23S08 is not set -# CONFIG_PINCTRL_SX150X is not set - -# -# Intel pinctrl drivers -# -CONFIG_PINCTRL_BAYTRAIL=y -# CONFIG_PINCTRL_CHERRYVIEW is not set -# CONFIG_PINCTRL_LYNXPOINT is not set -CONFIG_PINCTRL_INTEL=y -# CONFIG_PINCTRL_ALDERLAKE is not set -CONFIG_PINCTRL_BROXTON=m -CONFIG_PINCTRL_CANNONLAKE=m -CONFIG_PINCTRL_CEDARFORK=m -CONFIG_PINCTRL_DENVERTON=m -# CONFIG_PINCTRL_ELKHARTLAKE is not set -# CONFIG_PINCTRL_EMMITSBURG is not set -CONFIG_PINCTRL_GEMINILAKE=m -CONFIG_PINCTRL_ICELAKE=m -# CONFIG_PINCTRL_JASPERLAKE is not set -# CONFIG_PINCTRL_LAKEFIELD is not set -CONFIG_PINCTRL_LEWISBURG=m -# CONFIG_PINCTRL_METEORLAKE is not set -CONFIG_PINCTRL_SUNRISEPOINT=m -# CONFIG_PINCTRL_TIGERLAKE is not set -# end of Intel pinctrl drivers - -CONFIG_PINCTRL_ZHAOXIN=m -CONFIG_PINCTRL_KX7000=m - -# -# Renesas pinctrl drivers -# -# end of Renesas pinctrl drivers - -CONFIG_GPIOLIB=y -CONFIG_GPIOLIB_FASTPATH_LIMIT=512 -CONFIG_GPIO_ACPI=y -CONFIG_GPIOLIB_IRQCHIP=y -# CONFIG_DEBUG_GPIO is not set -CONFIG_GPIO_CDEV=y -CONFIG_GPIO_CDEV_V1=y -CONFIG_GPIO_GENERIC=m - -# -# Memory mapped GPIO drivers -# -CONFIG_GPIO_AMDPT=m -# CONFIG_GPIO_DWAPB is not set -# CONFIG_GPIO_EXAR is not set -# CONFIG_GPIO_GENERIC_PLATFORM is not set -CONFIG_GPIO_ICH=m -# CONFIG_GPIO_MB86S7X is not set -# CONFIG_GPIO_AMD_FCH is not set -# end of Memory mapped GPIO drivers - -# -# Port-mapped I/O GPIO drivers -# -# CONFIG_GPIO_VX855 is not set -# CONFIG_GPIO_F7188X is not set -# CONFIG_GPIO_IT87 is not set -# CONFIG_GPIO_SCH is not set -# CONFIG_GPIO_SCH311X is not set -# CONFIG_GPIO_WINBOND is not set -# CONFIG_GPIO_WS16C48 is not set -# end of Port-mapped I/O GPIO drivers - -# -# I2C GPIO expanders -# -# CONFIG_GPIO_FXL6408 is not set -# CONFIG_GPIO_DS4520 is not set -# CONFIG_GPIO_MAX7300 is not set -# CONFIG_GPIO_MAX732X is not set -# CONFIG_GPIO_PCA953X is not set -# CONFIG_GPIO_PCA9570 is not set -# CONFIG_GPIO_PCF857X is not set -# CONFIG_GPIO_TPIC2810 is not set -# end of I2C GPIO expanders - -# -# MFD GPIO expanders -# -# CONFIG_GPIO_ELKHARTLAKE is not set -# end of MFD GPIO expanders - -# -# PCI GPIO expanders -# -# CONFIG_GPIO_AMD8111 is not set -# CONFIG_GPIO_BT8XX is not set -# CONFIG_GPIO_ML_IOH is not set -# CONFIG_GPIO_PCI_IDIO_16 is not set -# CONFIG_GPIO_PCIE_IDIO_24 is not set -# CONFIG_GPIO_RDC321X is not set -# end of PCI GPIO expanders - -# -# SPI GPIO expanders -# -# CONFIG_GPIO_MAX3191X is not set -# CONFIG_GPIO_MAX7301 is not set -# CONFIG_GPIO_MC33880 is not set -# CONFIG_GPIO_PISOSR is not set -# CONFIG_GPIO_XRA1403 is not set -# end of SPI GPIO expanders - -# -# USB GPIO expanders -# -CONFIG_GPIO_VIPERBOARD=m -# end of USB GPIO expanders - -# -# Virtual GPIO drivers -# -# CONFIG_GPIO_AGGREGATOR is not set -# CONFIG_GPIO_LATCH is not set -# CONFIG_GPIO_MOCKUP is not set -# CONFIG_GPIO_VIRTIO is not set -# CONFIG_GPIO_SIM is not set -# end of Virtual GPIO drivers - -# CONFIG_W1 is not set -CONFIG_POWER_RESET=y -# CONFIG_POWER_RESET_RESTART is not set -CONFIG_POWER_SUPPLY=y -# CONFIG_POWER_SUPPLY_DEBUG is not set -CONFIG_POWER_SUPPLY_HWMON=y -# CONFIG_GENERIC_ADC_BATTERY is not set -# CONFIG_IP5XXX_POWER is not set -# CONFIG_TEST_POWER is not set -# CONFIG_CHARGER_ADP5061 is not set -# CONFIG_BATTERY_CW2015 is not set -# CONFIG_BATTERY_DS2780 is not set -# CONFIG_BATTERY_DS2781 is not set -# CONFIG_BATTERY_DS2782 is not set -# CONFIG_BATTERY_SAMSUNG_SDI is not set -# CONFIG_BATTERY_SBS is not set -# CONFIG_CHARGER_SBS is not set -# CONFIG_MANAGER_SBS is not set -# CONFIG_BATTERY_BQ27XXX is not set -# CONFIG_BATTERY_MAX17040 is not set -# CONFIG_BATTERY_MAX17042 is not set -# CONFIG_CHARGER_MAX8903 is not set -# CONFIG_CHARGER_LP8727 is not set -# CONFIG_CHARGER_GPIO is not set -# CONFIG_CHARGER_LT3651 is not set -# CONFIG_CHARGER_LTC4162L is not set -# CONFIG_CHARGER_MAX77976 is not set -# CONFIG_CHARGER_BQ2415X is not set -# CONFIG_CHARGER_BQ24257 is not set -# CONFIG_CHARGER_BQ24735 is not set -# CONFIG_CHARGER_BQ2515X is not set -# CONFIG_CHARGER_BQ25890 is not set -# CONFIG_CHARGER_BQ25980 is not set -# CONFIG_CHARGER_BQ256XX is not set -# CONFIG_BATTERY_GAUGE_LTC2941 is not set -# CONFIG_BATTERY_GOLDFISH is not set -# CONFIG_BATTERY_RT5033 is not set -# CONFIG_CHARGER_RT9455 is not set -# CONFIG_CHARGER_BD99954 is not set -# CONFIG_BATTERY_UG3105 is not set -CONFIG_HWMON=y -CONFIG_HWMON_VID=m -# CONFIG_HWMON_DEBUG_CHIP is not set - -# -# Native drivers -# -CONFIG_SENSORS_ABITUGURU=m -CONFIG_SENSORS_ABITUGURU3=m -# CONFIG_SENSORS_AD7314 is not set -CONFIG_SENSORS_AD7414=m -CONFIG_SENSORS_AD7418=m -CONFIG_SENSORS_ADM1025=m -CONFIG_SENSORS_ADM1026=m -CONFIG_SENSORS_ADM1029=m -CONFIG_SENSORS_ADM1031=m -# CONFIG_SENSORS_ADM1177 is not set -CONFIG_SENSORS_ADM9240=m -CONFIG_SENSORS_ADT7X10=m -# CONFIG_SENSORS_ADT7310 is not set -CONFIG_SENSORS_ADT7410=m -CONFIG_SENSORS_ADT7411=m -CONFIG_SENSORS_ADT7462=m -CONFIG_SENSORS_ADT7470=m -CONFIG_SENSORS_ADT7475=m -# CONFIG_SENSORS_AHT10 is not set -# CONFIG_SENSORS_AQUACOMPUTER_D5NEXT is not set -# CONFIG_SENSORS_AS370 is not set -CONFIG_SENSORS_ASC7621=m -# CONFIG_SENSORS_AXI_FAN_CONTROL is not set -CONFIG_SENSORS_K8TEMP=m -CONFIG_SENSORS_K10TEMP=m -CONFIG_SENSORS_FAM15H_POWER=m -CONFIG_SENSORS_APPLESMC=m -CONFIG_SENSORS_ASB100=m -CONFIG_SENSORS_ATXP1=m -# CONFIG_SENSORS_CORSAIR_CPRO is not set -# CONFIG_SENSORS_CORSAIR_PSU is not set -# CONFIG_SENSORS_DRIVETEMP is not set -CONFIG_SENSORS_DS620=m -CONFIG_SENSORS_DS1621=m -CONFIG_SENSORS_DELL_SMM=m -# CONFIG_I8K is not set -CONFIG_SENSORS_I5K_AMB=m -CONFIG_SENSORS_F71805F=m -CONFIG_SENSORS_F71882FG=m -CONFIG_SENSORS_F75375S=m -CONFIG_SENSORS_FSCHMD=m -# CONFIG_SENSORS_FTSTEUTATES is not set -CONFIG_SENSORS_GL518SM=m -CONFIG_SENSORS_GL520SM=m -CONFIG_SENSORS_G760A=m -# CONFIG_SENSORS_G762 is not set -# CONFIG_SENSORS_HIH6130 is not set -# CONFIG_SENSORS_HS3001 is not set -CONFIG_SENSORS_IBMAEM=m -CONFIG_SENSORS_IBMPEX=m -# CONFIG_SENSORS_IIO_HWMON is not set -CONFIG_SENSORS_I5500=m -CONFIG_SENSORS_CORETEMP=m -CONFIG_SENSORS_IT87=m -CONFIG_SENSORS_JC42=m -# CONFIG_SENSORS_POWR1220 is not set -CONFIG_SENSORS_LINEAGE=m -# CONFIG_SENSORS_LTC2945 is not set -# CONFIG_SENSORS_LTC2947_I2C is not set -# CONFIG_SENSORS_LTC2947_SPI is not set -# CONFIG_SENSORS_LTC2990 is not set -# CONFIG_SENSORS_LTC2992 is not set -CONFIG_SENSORS_LTC4151=m -CONFIG_SENSORS_LTC4215=m -# CONFIG_SENSORS_LTC4222 is not set -CONFIG_SENSORS_LTC4245=m -# CONFIG_SENSORS_LTC4260 is not set -CONFIG_SENSORS_LTC4261=m -# CONFIG_SENSORS_MAX1111 is not set -# CONFIG_SENSORS_MAX127 is not set -CONFIG_SENSORS_MAX16065=m -CONFIG_SENSORS_MAX1619=m -CONFIG_SENSORS_MAX1668=m -CONFIG_SENSORS_MAX197=m -# CONFIG_SENSORS_MAX31722 is not set -# CONFIG_SENSORS_MAX31730 is not set -# CONFIG_SENSORS_MAX31760 is not set -# CONFIG_MAX31827 is not set -# CONFIG_SENSORS_MAX6620 is not set -# CONFIG_SENSORS_MAX6621 is not set -CONFIG_SENSORS_MAX6639=m -CONFIG_SENSORS_MAX6650=m -CONFIG_SENSORS_MAX6697=m -# CONFIG_SENSORS_MAX31790 is not set -# CONFIG_SENSORS_MC34VR500 is not set -CONFIG_SENSORS_MCP3021=m -# CONFIG_SENSORS_MLXREG_FAN is not set -# CONFIG_SENSORS_TC654 is not set -# CONFIG_SENSORS_TPS23861 is not set -# CONFIG_SENSORS_MR75203 is not set -# CONFIG_SENSORS_ADCXX is not set -CONFIG_SENSORS_LM63=m -# CONFIG_SENSORS_LM70 is not set -CONFIG_SENSORS_LM73=m -CONFIG_SENSORS_LM75=m -CONFIG_SENSORS_LM77=m -CONFIG_SENSORS_LM78=m -CONFIG_SENSORS_LM80=m -CONFIG_SENSORS_LM83=m -CONFIG_SENSORS_LM85=m -CONFIG_SENSORS_LM87=m -CONFIG_SENSORS_LM90=m -CONFIG_SENSORS_LM92=m -CONFIG_SENSORS_LM93=m -CONFIG_SENSORS_LM95234=m -CONFIG_SENSORS_LM95241=m -CONFIG_SENSORS_LM95245=m -CONFIG_SENSORS_PC87360=m -CONFIG_SENSORS_PC87427=m -CONFIG_SENSORS_NTC_THERMISTOR=m -# CONFIG_SENSORS_NCT6683 is not set -CONFIG_SENSORS_NCT6775_CORE=m -CONFIG_SENSORS_NCT6775=m -# CONFIG_SENSORS_NCT6775_I2C is not set -# CONFIG_SENSORS_NCT7802 is not set -# CONFIG_SENSORS_NCT7904 is not set -# CONFIG_SENSORS_NPCM7XX is not set -# CONFIG_SENSORS_NZXT_KRAKEN2 is not set -# CONFIG_SENSORS_NZXT_SMART2 is not set -# CONFIG_SENSORS_OCC_P8_I2C is not set -# CONFIG_SENSORS_OXP is not set -CONFIG_SENSORS_PCF8591=m -CONFIG_PMBUS=m -CONFIG_SENSORS_PMBUS=m -# CONFIG_SENSORS_ACBEL_FSG032 is not set -# CONFIG_SENSORS_ADM1266 is not set -CONFIG_SENSORS_ADM1275=m -# CONFIG_SENSORS_BEL_PFE is not set -# CONFIG_SENSORS_BPA_RS600 is not set -# CONFIG_SENSORS_DELTA_AHE50DC_FAN is not set -# CONFIG_SENSORS_FSP_3Y is not set -# CONFIG_SENSORS_IBM_CFFPS is not set -# CONFIG_SENSORS_DPS920AB is not set -# CONFIG_SENSORS_INSPUR_IPSPS is not set -# CONFIG_SENSORS_IR35221 is not set -# CONFIG_SENSORS_IR36021 is not set -# CONFIG_SENSORS_IR38064 is not set -# CONFIG_SENSORS_IRPS5401 is not set -# CONFIG_SENSORS_ISL68137 is not set -CONFIG_SENSORS_LM25066=m -# CONFIG_SENSORS_LT7182S is not set -CONFIG_SENSORS_LTC2978=m -# CONFIG_SENSORS_LTC3815 is not set -# CONFIG_SENSORS_MAX15301 is not set -CONFIG_SENSORS_MAX16064=m -# CONFIG_SENSORS_MAX16601 is not set -# CONFIG_SENSORS_MAX20730 is not set -# CONFIG_SENSORS_MAX20751 is not set -# CONFIG_SENSORS_MAX31785 is not set -CONFIG_SENSORS_MAX34440=m -CONFIG_SENSORS_MAX8688=m -# CONFIG_SENSORS_MP2888 is not set -# CONFIG_SENSORS_MP2975 is not set -# CONFIG_SENSORS_MP5023 is not set -# CONFIG_SENSORS_MPQ7932 is not set -# CONFIG_SENSORS_PIM4328 is not set -# CONFIG_SENSORS_PLI1209BC is not set -# CONFIG_SENSORS_PM6764TR is not set -# CONFIG_SENSORS_PXE1610 is not set -# CONFIG_SENSORS_Q54SJ108A2 is not set -# CONFIG_SENSORS_STPDDC60 is not set -# CONFIG_SENSORS_TDA38640 is not set -# CONFIG_SENSORS_TPS40422 is not set -# CONFIG_SENSORS_TPS53679 is not set -# CONFIG_SENSORS_TPS546D24 is not set -CONFIG_SENSORS_UCD9000=m -CONFIG_SENSORS_UCD9200=m -# CONFIG_SENSORS_XDPE152 is not set -# CONFIG_SENSORS_XDPE122 is not set -CONFIG_SENSORS_ZL6100=m -# CONFIG_SENSORS_SBTSI is not set -# CONFIG_SENSORS_SBRMI is not set -CONFIG_SENSORS_SHT15=m -CONFIG_SENSORS_SHT21=m -# CONFIG_SENSORS_SHT3x is not set -# CONFIG_SENSORS_SHT4x is not set -# CONFIG_SENSORS_SHTC1 is not set -CONFIG_SENSORS_SIS5595=m -CONFIG_SENSORS_DME1737=m -CONFIG_SENSORS_EMC1403=m -# CONFIG_SENSORS_EMC2103 is not set -# CONFIG_SENSORS_EMC2305 is not set -CONFIG_SENSORS_EMC6W201=m -CONFIG_SENSORS_SMSC47M1=m -CONFIG_SENSORS_SMSC47M192=m -CONFIG_SENSORS_SMSC47B397=m -CONFIG_SENSORS_SCH56XX_COMMON=m -CONFIG_SENSORS_SCH5627=m -CONFIG_SENSORS_SCH5636=m -# CONFIG_SENSORS_STTS751 is not set -# CONFIG_SENSORS_ADC128D818 is not set -CONFIG_SENSORS_ADS7828=m -# CONFIG_SENSORS_ADS7871 is not set -CONFIG_SENSORS_AMC6821=m -CONFIG_SENSORS_INA209=m -CONFIG_SENSORS_INA2XX=m -# CONFIG_SENSORS_INA238 is not set -# CONFIG_SENSORS_INA3221 is not set -# CONFIG_SENSORS_TC74 is not set -CONFIG_SENSORS_THMC50=m -CONFIG_SENSORS_TMP102=m -# CONFIG_SENSORS_TMP103 is not set -# CONFIG_SENSORS_TMP108 is not set -CONFIG_SENSORS_TMP401=m -CONFIG_SENSORS_TMP421=m -# CONFIG_SENSORS_TMP464 is not set -# CONFIG_SENSORS_TMP513 is not set -CONFIG_SENSORS_VIA_CPUTEMP=m -CONFIG_SENSORS_ZHAOXIN_CPUTEMP=m -CONFIG_SENSORS_VIA686A=m -CONFIG_SENSORS_VT1211=m -CONFIG_SENSORS_VT8231=m -# CONFIG_SENSORS_W83773G is not set -CONFIG_SENSORS_W83781D=m -CONFIG_SENSORS_W83791D=m -CONFIG_SENSORS_W83792D=m -CONFIG_SENSORS_W83793=m -CONFIG_SENSORS_W83795=m -# CONFIG_SENSORS_W83795_FANCTRL is not set -CONFIG_SENSORS_W83L785TS=m -CONFIG_SENSORS_W83L786NG=m -CONFIG_SENSORS_W83627HF=m -CONFIG_SENSORS_W83627EHF=m -# CONFIG_SENSORS_XGENE is not set - -# -# ACPI drivers -# -CONFIG_SENSORS_ACPI_POWER=m -CONFIG_SENSORS_ATK0110=m -# CONFIG_SENSORS_ASUS_WMI is not set -# CONFIG_SENSORS_ASUS_EC is not set -# CONFIG_SENSORS_HP_WMI is not set -CONFIG_THERMAL=y -# CONFIG_THERMAL_NETLINK is not set -# CONFIG_THERMAL_STATISTICS is not set -CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 -CONFIG_THERMAL_HWMON=y -CONFIG_THERMAL_ACPI=y -CONFIG_THERMAL_WRITABLE_TRIPS=y -CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y -# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set -# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set -# CONFIG_THERMAL_DEFAULT_GOV_BANG_BANG is not set -CONFIG_THERMAL_GOV_FAIR_SHARE=y -CONFIG_THERMAL_GOV_STEP_WISE=y -CONFIG_THERMAL_GOV_BANG_BANG=y -CONFIG_THERMAL_GOV_USER_SPACE=y -# CONFIG_THERMAL_EMULATION is not set - -# -# Intel thermal drivers -# -CONFIG_INTEL_POWERCLAMP=m -CONFIG_X86_THERMAL_VECTOR=y -CONFIG_INTEL_TCC=y -CONFIG_X86_PKG_TEMP_THERMAL=m -CONFIG_INTEL_SOC_DTS_IOSF_CORE=m -# CONFIG_INTEL_SOC_DTS_THERMAL is not set - -# -# ACPI INT340X thermal drivers -# -CONFIG_INT340X_THERMAL=m -CONFIG_ACPI_THERMAL_REL=m -# CONFIG_INT3406_THERMAL is not set -CONFIG_PROC_THERMAL_MMIO_RAPL=m -# end of ACPI INT340X thermal drivers - -CONFIG_INTEL_PCH_THERMAL=m -# CONFIG_INTEL_TCC_COOLING is not set -# CONFIG_INTEL_HFI_THERMAL is not set -# end of Intel thermal drivers - -# CONFIG_GENERIC_ADC_THERMAL is not set -CONFIG_WATCHDOG=y -CONFIG_WATCHDOG_CORE=y -# CONFIG_WATCHDOG_NOWAYOUT is not set -CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y -CONFIG_WATCHDOG_OPEN_TIMEOUT=0 -CONFIG_WATCHDOG_SYSFS=y -# CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT is not set - -# -# Watchdog Pretimeout Governors -# -# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set - -# -# Watchdog Device Drivers -# -CONFIG_SOFT_WATCHDOG=m -CONFIG_WDAT_WDT=m -# CONFIG_XILINX_WATCHDOG is not set -# CONFIG_ZIIRAVE_WATCHDOG is not set -# CONFIG_MLX_WDT is not set -# CONFIG_CADENCE_WATCHDOG is not set -# CONFIG_DW_WATCHDOG is not set -# CONFIG_MAX63XX_WATCHDOG is not set -# CONFIG_ACQUIRE_WDT is not set -# CONFIG_ADVANTECH_WDT is not set -# CONFIG_ADVANTECH_EC_WDT is not set -CONFIG_ALIM1535_WDT=m -CONFIG_ALIM7101_WDT=m -# CONFIG_EBC_C384_WDT is not set -# CONFIG_EXAR_WDT is not set -CONFIG_F71808E_WDT=m -CONFIG_SP5100_TCO=m -CONFIG_SBC_FITPC2_WATCHDOG=m -# CONFIG_EUROTECH_WDT is not set -CONFIG_IB700_WDT=m -CONFIG_IBMASR=m -# CONFIG_WAFER_WDT is not set -CONFIG_I6300ESB_WDT=m -CONFIG_IE6XX_WDT=m -CONFIG_ITCO_WDT=m -CONFIG_ITCO_VENDOR_SUPPORT=y -CONFIG_IT8712F_WDT=m -CONFIG_IT87_WDT=m -CONFIG_HP_WATCHDOG=m -CONFIG_HPWDT_NMI_DECODING=y -# CONFIG_SC1200_WDT is not set -# CONFIG_PC87413_WDT is not set -CONFIG_NV_TCO=m -# CONFIG_60XX_WDT is not set -# CONFIG_CPU5_WDT is not set -CONFIG_SMSC_SCH311X_WDT=m -# CONFIG_SMSC37B787_WDT is not set -# CONFIG_TQMX86_WDT is not set -CONFIG_VIA_WDT=m -CONFIG_W83627HF_WDT=m -CONFIG_W83877F_WDT=m -CONFIG_W83977F_WDT=m -CONFIG_MACHZ_WDT=m -# CONFIG_SBC_EPX_C3_WATCHDOG is not set -CONFIG_INTEL_MEI_WDT=m -# CONFIG_NI903X_WDT is not set -# CONFIG_NIC7018_WDT is not set -# CONFIG_MEN_A21_WDT is not set -CONFIG_XEN_WDT=m - -# -# PCI-based Watchdog Cards -# -CONFIG_PCIPCWATCHDOG=m -CONFIG_WDTPCI=m - -# -# USB-based Watchdog Cards -# -CONFIG_USBPCWATCHDOG=m -CONFIG_SSB_POSSIBLE=y -# CONFIG_SSB is not set -CONFIG_BCMA_POSSIBLE=y -CONFIG_BCMA=m -CONFIG_BCMA_HOST_PCI_POSSIBLE=y -CONFIG_BCMA_HOST_PCI=y -# CONFIG_BCMA_HOST_SOC is not set -CONFIG_BCMA_DRIVER_PCI=y -CONFIG_BCMA_DRIVER_GMAC_CMN=y -CONFIG_BCMA_DRIVER_GPIO=y -# CONFIG_BCMA_DEBUG is not set - -# -# Multifunction device drivers -# -CONFIG_MFD_CORE=y -# CONFIG_MFD_AS3711 is not set -# CONFIG_MFD_SMPRO is not set -# CONFIG_PMIC_ADP5520 is not set -# CONFIG_MFD_AAT2870_CORE is not set -# CONFIG_MFD_BCM590XX is not set -# CONFIG_MFD_BD9571MWV is not set -# CONFIG_MFD_AXP20X_I2C is not set -# CONFIG_MFD_CS42L43_I2C is not set -# CONFIG_MFD_MADERA is not set -# CONFIG_PMIC_DA903X is not set -# CONFIG_MFD_DA9052_SPI is not set -# CONFIG_MFD_DA9052_I2C is not set -# CONFIG_MFD_DA9055 is not set -# CONFIG_MFD_DA9062 is not set -# CONFIG_MFD_DA9063 is not set -# CONFIG_MFD_DA9150 is not set -# CONFIG_MFD_DLN2 is not set -# CONFIG_MFD_MC13XXX_SPI is not set -# CONFIG_MFD_MC13XXX_I2C is not set -# CONFIG_MFD_MP2629 is not set -# CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set -CONFIG_LPC_ICH=m -CONFIG_LPC_SCH=m -CONFIG_MFD_INTEL_LPSS=m -CONFIG_MFD_INTEL_LPSS_ACPI=m -CONFIG_MFD_INTEL_LPSS_PCI=m -# CONFIG_MFD_INTEL_PMC_BXT is not set -# CONFIG_MFD_IQS62X is not set -# CONFIG_MFD_JANZ_CMODIO is not set -# CONFIG_MFD_KEMPLD is not set -# CONFIG_MFD_88PM800 is not set -# CONFIG_MFD_88PM805 is not set -# CONFIG_MFD_88PM860X is not set -# CONFIG_MFD_MAX14577 is not set -# CONFIG_MFD_MAX77541 is not set -# CONFIG_MFD_MAX77693 is not set -# CONFIG_MFD_MAX77843 is not set -# CONFIG_MFD_MAX8907 is not set -# CONFIG_MFD_MAX8925 is not set -# CONFIG_MFD_MAX8997 is not set -# CONFIG_MFD_MAX8998 is not set -# CONFIG_MFD_MT6360 is not set -# CONFIG_MFD_MT6370 is not set -# CONFIG_MFD_MT6397 is not set -# CONFIG_MFD_MENF21BMC is not set -# CONFIG_MFD_OCELOT is not set -# CONFIG_EZX_PCAP is not set -CONFIG_MFD_VIPERBOARD=m -# CONFIG_MFD_RETU is not set -# CONFIG_MFD_PCF50633 is not set -# CONFIG_MFD_SY7636A is not set -# CONFIG_MFD_RDC321X is not set -# CONFIG_MFD_RT4831 is not set -# CONFIG_MFD_RT5033 is not set -# CONFIG_MFD_RT5120 is not set -# CONFIG_MFD_RC5T583 is not set -# CONFIG_MFD_SI476X_CORE is not set -CONFIG_MFD_SM501=m -CONFIG_MFD_SM501_GPIO=y -# CONFIG_MFD_SKY81452 is not set -# CONFIG_MFD_SYSCON is not set -# CONFIG_MFD_LP3943 is not set -# CONFIG_MFD_LP8788 is not set -# CONFIG_MFD_TI_LMU is not set -# CONFIG_MFD_PALMAS is not set -# CONFIG_TPS6105X is not set -# CONFIG_TPS65010 is not set -# CONFIG_TPS6507X is not set -# CONFIG_MFD_TPS65086 is not set -# CONFIG_MFD_TPS65090 is not set -# CONFIG_MFD_TI_LP873X is not set -# CONFIG_MFD_TPS6586X is not set -# CONFIG_MFD_TPS65910 is not set -# CONFIG_MFD_TPS65912_I2C is not set -# CONFIG_MFD_TPS65912_SPI is not set -# CONFIG_MFD_TPS6594_I2C is not set -# CONFIG_MFD_TPS6594_SPI is not set -# CONFIG_TWL4030_CORE is not set -# CONFIG_TWL6040_CORE is not set -# CONFIG_MFD_WL1273_CORE is not set -# CONFIG_MFD_LM3533 is not set -# CONFIG_MFD_TQMX86 is not set -CONFIG_MFD_VX855=m -# CONFIG_MFD_ARIZONA_I2C is not set -# CONFIG_MFD_ARIZONA_SPI is not set -# CONFIG_MFD_WM8400 is not set -# CONFIG_MFD_WM831X_I2C is not set -# CONFIG_MFD_WM831X_SPI is not set -# CONFIG_MFD_WM8350_I2C is not set -# CONFIG_MFD_WM8994 is not set -# CONFIG_MFD_ATC260X_I2C is not set -# CONFIG_MFD_INTEL_M10_BMC_SPI is not set -# end of Multifunction device drivers - -# CONFIG_REGULATOR is not set -CONFIG_RC_CORE=m -# CONFIG_LIRC is not set -CONFIG_RC_MAP=m -CONFIG_RC_DECODERS=y -CONFIG_IR_IMON_DECODER=m -CONFIG_IR_JVC_DECODER=m -CONFIG_IR_MCE_KBD_DECODER=m -CONFIG_IR_NEC_DECODER=m -CONFIG_IR_RC5_DECODER=m -CONFIG_IR_RC6_DECODER=m -# CONFIG_IR_RCMM_DECODER is not set -CONFIG_IR_SANYO_DECODER=m -# CONFIG_IR_SHARP_DECODER is not set -CONFIG_IR_SONY_DECODER=m -# CONFIG_IR_XMP_DECODER is not set -CONFIG_RC_DEVICES=y -CONFIG_IR_ENE=m -CONFIG_IR_FINTEK=m -# CONFIG_IR_IGORPLUGUSB is not set -CONFIG_IR_IGUANA=m -CONFIG_IR_IMON=m -CONFIG_IR_IMON_RAW=m -CONFIG_IR_ITE_CIR=m -CONFIG_IR_MCEUSB=m -CONFIG_IR_NUVOTON=m -CONFIG_IR_REDRAT3=m -CONFIG_IR_SERIAL=m -# CONFIG_IR_SERIAL_TRANSMITTER is not set -CONFIG_IR_STREAMZAP=m -# CONFIG_IR_TOY is not set -CONFIG_IR_TTUSBIR=m -CONFIG_IR_WINBOND_CIR=m -CONFIG_RC_ATI_REMOTE=m -CONFIG_RC_LOOPBACK=m -# CONFIG_RC_XBOX_DVD is not set -CONFIG_CEC_CORE=m - -# -# CEC support -# -# CONFIG_MEDIA_CEC_RC is not set -CONFIG_MEDIA_CEC_SUPPORT=y -# CONFIG_CEC_CH7322 is not set -# CONFIG_CEC_GPIO is not set -# CONFIG_CEC_SECO is not set -CONFIG_USB_PULSE8_CEC=m -CONFIG_USB_RAINSHADOW_CEC=m -# end of CEC support - -CONFIG_MEDIA_SUPPORT=m -CONFIG_MEDIA_SUPPORT_FILTER=y -CONFIG_MEDIA_SUBDRV_AUTOSELECT=y - -# -# Media device types -# -# CONFIG_MEDIA_CAMERA_SUPPORT is not set -# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set -# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set -# CONFIG_MEDIA_RADIO_SUPPORT is not set -# CONFIG_MEDIA_SDR_SUPPORT is not set -# CONFIG_MEDIA_PLATFORM_SUPPORT is not set -# CONFIG_MEDIA_TEST_SUPPORT is not set -# end of Media device types - -# -# Media drivers -# - -# -# Drivers filtered as selected at 'Filter media drivers' -# - -# -# Media drivers -# -CONFIG_MEDIA_USB_SUPPORT=y -CONFIG_MEDIA_PCI_SUPPORT=y -# CONFIG_IPU_BRIDGE is not set -# end of Media drivers - -CONFIG_MEDIA_HIDE_ANCILLARY_SUBDRV=y - -# -# Media ancillary drivers -# -# end of Media ancillary drivers - -# -# Graphics support -# -CONFIG_APERTURE_HELPERS=y -CONFIG_VIDEO_CMDLINE=y -CONFIG_VIDEO_NOMODESET=y -# CONFIG_AUXDISPLAY is not set -# CONFIG_PANEL is not set -# CONFIG_AGP is not set -CONFIG_INTEL_GTT=m -CONFIG_VGA_SWITCHEROO=y -CONFIG_DRM=m -CONFIG_DRM_MIPI_DSI=y -CONFIG_DRM_KMS_HELPER=m -CONFIG_DRM_FBDEV_EMULATION=y -CONFIG_DRM_FBDEV_OVERALLOC=100 -CONFIG_DRM_LOAD_EDID_FIRMWARE=y -CONFIG_DRM_DISPLAY_HELPER=m -CONFIG_DRM_DISPLAY_DP_HELPER=y -CONFIG_DRM_DISPLAY_HDCP_HELPER=y -CONFIG_DRM_DISPLAY_HDMI_HELPER=y -# CONFIG_DRM_DP_AUX_CHARDEV is not set -# CONFIG_DRM_DP_CEC is not set -CONFIG_DRM_TTM=m -CONFIG_DRM_EXEC=m -CONFIG_DRM_BUDDY=m -CONFIG_DRM_VRAM_HELPER=m -CONFIG_DRM_TTM_HELPER=m -CONFIG_DRM_GEM_SHMEM_HELPER=m -CONFIG_DRM_SUBALLOC_HELPER=m -CONFIG_DRM_SCHED=m - -# -# I2C encoder or helper chips -# -CONFIG_DRM_I2C_CH7006=m -CONFIG_DRM_I2C_SIL164=m -# CONFIG_DRM_I2C_NXP_TDA998X is not set -# CONFIG_DRM_I2C_NXP_TDA9950 is not set -# end of I2C encoder or helper chips - -# -# ARM devices -# -# end of ARM devices - -CONFIG_DRM_RADEON=m -CONFIG_DRM_RADEON_USERPTR=y -CONFIG_DRM_AMDGPU=m -# CONFIG_DRM_AMDGPU_SI is not set -# CONFIG_DRM_AMDGPU_CIK is not set -# CONFIG_DRM_AMDGPU_USERPTR is not set - -# -# ACP (Audio CoProcessor) Configuration -# -# CONFIG_DRM_AMD_ACP is not set -# end of ACP (Audio CoProcessor) Configuration - -# -# Display Engine Configuration -# -CONFIG_DRM_AMD_DC=y -CONFIG_DRM_AMD_DC_FP=y -# CONFIG_DEBUG_KERNEL_DC is not set -# CONFIG_DRM_AMD_SECURE_DISPLAY is not set -# end of Display Engine Configuration - -# CONFIG_HSA_AMD is not set -CONFIG_DRM_NOUVEAU=m -CONFIG_NOUVEAU_DEBUG=5 -CONFIG_NOUVEAU_DEBUG_DEFAULT=3 -CONFIG_NOUVEAU_DEBUG_MMU=y -# CONFIG_NOUVEAU_DEBUG_PUSH is not set -CONFIG_DRM_NOUVEAU_BACKLIGHT=y -CONFIG_DRM_I915=m -CONFIG_DRM_I915_FORCE_PROBE="" -CONFIG_DRM_I915_CAPTURE_ERROR=y -CONFIG_DRM_I915_COMPRESS_ERROR=y -CONFIG_DRM_I915_USERPTR=y -CONFIG_DRM_I915_GVT_KVMGT=m -CONFIG_DRM_I915_REQUEST_TIMEOUT=20000 -CONFIG_DRM_I915_FENCE_TIMEOUT=10000 -CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND=250 -CONFIG_DRM_I915_HEARTBEAT_INTERVAL=2500 -CONFIG_DRM_I915_PREEMPT_TIMEOUT=640 -CONFIG_DRM_I915_PREEMPT_TIMEOUT_COMPUTE=7500 -CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT=8000 -CONFIG_DRM_I915_STOP_TIMEOUT=100 -CONFIG_DRM_I915_TIMESLICE_DURATION=1 -CONFIG_DRM_I915_GVT=y -# CONFIG_DRM_VGEM is not set -CONFIG_DRM_VKMS=m -CONFIG_DRM_VMWGFX=m -# CONFIG_DRM_VMWGFX_MKSSTATS is not set -CONFIG_DRM_GMA500=m -CONFIG_DRM_UDL=m -CONFIG_DRM_AST=m -CONFIG_DRM_MGAG200=m -CONFIG_DRM_QXL=m -CONFIG_DRM_VIRTIO_GPU=m -CONFIG_DRM_VIRTIO_GPU_KMS=y -CONFIG_DRM_PANEL=y - -# -# Display Panels -# -# CONFIG_DRM_PANEL_AUO_A030JTN01 is not set -# CONFIG_DRM_PANEL_ORISETECH_OTA5601A is not set -# CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set -# CONFIG_DRM_PANEL_WIDECHIPS_WS2401 is not set -# end of Display Panels - -CONFIG_DRM_BRIDGE=y -CONFIG_DRM_PANEL_BRIDGE=y - -# -# Display Interface Bridges -# -# CONFIG_DRM_ANALOGIX_ANX78XX is not set -# end of Display Interface Bridges - -# CONFIG_DRM_LOONGSON is not set -# CONFIG_DRM_ETNAVIV is not set -CONFIG_DRM_BOCHS=m -CONFIG_DRM_CIRRUS_QEMU=m -# CONFIG_DRM_GM12U320 is not set -# CONFIG_DRM_PANEL_MIPI_DBI is not set -# CONFIG_DRM_SIMPLEDRM is not set -# CONFIG_TINYDRM_HX8357D is not set -# CONFIG_TINYDRM_ILI9163 is not set -# CONFIG_TINYDRM_ILI9225 is not set -# CONFIG_TINYDRM_ILI9341 is not set -# CONFIG_TINYDRM_ILI9486 is not set -# CONFIG_TINYDRM_MI0283QT is not set -# CONFIG_TINYDRM_REPAPER is not set -# CONFIG_TINYDRM_ST7586 is not set -# CONFIG_TINYDRM_ST7735R is not set -# CONFIG_DRM_XEN_FRONTEND is not set -# CONFIG_DRM_VBOXVIDEO is not set -# CONFIG_DRM_GUD is not set -# CONFIG_DRM_SSD130X is not set -# CONFIG_DRM_HYPERV is not set -# CONFIG_DRM_LEGACY is not set -CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y -CONFIG_DRM_PRIVACY_SCREEN=y -# CONFIG_HYDCU_FIXUP_HEADER is not set -CONFIG_DRM_INSPUR=m - -# -# Frame buffer Devices -# -CONFIG_FB=y -# CONFIG_FB_CIRRUS is not set -# CONFIG_FB_PM2 is not set -# CONFIG_FB_CYBER2000 is not set -# CONFIG_FB_ARC is not set -# CONFIG_FB_ASILIANT is not set -# CONFIG_FB_IMSTT is not set -# CONFIG_FB_VGA16 is not set -# CONFIG_FB_UVESA is not set -CONFIG_FB_VESA=y -CONFIG_FB_EFI=y -# CONFIG_FB_N411 is not set -# CONFIG_FB_HGA is not set -# CONFIG_FB_OPENCORES is not set -# CONFIG_FB_S1D13XXX is not set -# CONFIG_FB_NVIDIA is not set -# CONFIG_FB_RIVA is not set -# CONFIG_FB_I740 is not set -# CONFIG_FB_LE80578 is not set -# CONFIG_FB_MATROX is not set -# CONFIG_FB_RADEON is not set -# CONFIG_FB_ATY128 is not set -# CONFIG_FB_ATY is not set -# CONFIG_FB_S3 is not set -# CONFIG_FB_SAVAGE is not set -# CONFIG_FB_SIS is not set -# CONFIG_FB_VIA is not set -# CONFIG_FB_NEOMAGIC is not set -# CONFIG_FB_KYRO is not set -# CONFIG_FB_3DFX is not set -# CONFIG_FB_VOODOO1 is not set -# CONFIG_FB_VT8623 is not set -# CONFIG_FB_TRIDENT is not set -# CONFIG_FB_ARK is not set -# CONFIG_FB_PM3 is not set -# CONFIG_FB_CARMINE is not set -# CONFIG_FB_SM501 is not set -# CONFIG_FB_SMSCUFX is not set -# CONFIG_FB_UDL is not set -# CONFIG_FB_IBM_GXT4500 is not set -# CONFIG_FB_VIRTUAL is not set -# CONFIG_XEN_FBDEV_FRONTEND is not set -# CONFIG_FB_METRONOME is not set -# CONFIG_FB_MB862XX is not set -CONFIG_FB_HYPERV=m -# CONFIG_FB_SIMPLE is not set -# CONFIG_FB_SSD1307 is not set -# CONFIG_FB_SM712 is not set -# CONFIG_FB_LS2K500 is not set -CONFIG_FB_CORE=y -CONFIG_FB_NOTIFY=y -CONFIG_FIRMWARE_EDID=y -CONFIG_FB_DEVICE=y -CONFIG_FB_CFB_FILLRECT=y -CONFIG_FB_CFB_COPYAREA=y -CONFIG_FB_CFB_IMAGEBLIT=y -CONFIG_FB_SYS_FILLRECT=y -CONFIG_FB_SYS_COPYAREA=y -CONFIG_FB_SYS_IMAGEBLIT=y -# CONFIG_FB_FOREIGN_ENDIAN is not set -CONFIG_FB_SYS_FOPS=y -CONFIG_FB_DEFERRED_IO=y -CONFIG_FB_IOMEM_HELPERS=y -CONFIG_FB_SYSMEM_HELPERS=y -CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y -# CONFIG_FB_MODE_HELPERS is not set -CONFIG_FB_TILEBLITTING=y -# end of Frame buffer Devices - -# -# Backlight & LCD device support -# -CONFIG_LCD_CLASS_DEVICE=m -# CONFIG_LCD_L4F00242T03 is not set -# CONFIG_LCD_LMS283GF05 is not set -# CONFIG_LCD_LTV350QV is not set -# CONFIG_LCD_ILI922X is not set -# CONFIG_LCD_ILI9320 is not set -# CONFIG_LCD_TDO24M is not set -# CONFIG_LCD_VGG2432A4 is not set -CONFIG_LCD_PLATFORM=m -# CONFIG_LCD_AMS369FG06 is not set -# CONFIG_LCD_LMS501KF03 is not set -# CONFIG_LCD_HX8357 is not set -# CONFIG_LCD_OTM3225A is not set -CONFIG_BACKLIGHT_CLASS_DEVICE=y -# CONFIG_BACKLIGHT_KTD253 is not set -# CONFIG_BACKLIGHT_KTZ8866 is not set -# CONFIG_BACKLIGHT_PWM is not set -CONFIG_BACKLIGHT_APPLE=m -# CONFIG_BACKLIGHT_QCOM_WLED is not set -# CONFIG_BACKLIGHT_SAHARA is not set -# CONFIG_BACKLIGHT_ADP8860 is not set -# CONFIG_BACKLIGHT_ADP8870 is not set -# CONFIG_BACKLIGHT_LM3630A is not set -# CONFIG_BACKLIGHT_LM3639 is not set -CONFIG_BACKLIGHT_LP855X=m -# CONFIG_BACKLIGHT_GPIO is not set -# CONFIG_BACKLIGHT_LV5207LP is not set -# CONFIG_BACKLIGHT_BD6107 is not set -# CONFIG_BACKLIGHT_ARCXCNN is not set -# end of Backlight & LCD device support - -CONFIG_HDMI=y - -# -# Console display driver support -# -CONFIG_VGA_CONSOLE=y -CONFIG_DUMMY_CONSOLE=y -CONFIG_DUMMY_CONSOLE_COLUMNS=80 -CONFIG_DUMMY_CONSOLE_ROWS=25 -CONFIG_FRAMEBUFFER_CONSOLE=y -# CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION is not set -CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y -CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y -# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set -# end of Console display driver support - -CONFIG_LOGO=y -# CONFIG_LOGO_LINUX_MONO is not set -# CONFIG_LOGO_LINUX_VGA16 is not set -CONFIG_LOGO_LINUX_CLUT224=y -# end of Graphics support - -# CONFIG_DRM_ACCEL is not set -CONFIG_SOUND=m -# CONFIG_SND is not set -CONFIG_HID_SUPPORT=y -CONFIG_HID=y -CONFIG_HID_BATTERY_STRENGTH=y -CONFIG_HIDRAW=y -CONFIG_UHID=m -CONFIG_HID_GENERIC=y - -# -# Special HID drivers -# -CONFIG_HID_A4TECH=m -# CONFIG_HID_ACCUTOUCH is not set -CONFIG_HID_ACRUX=m -# CONFIG_HID_ACRUX_FF is not set -CONFIG_HID_APPLE=m -CONFIG_HID_APPLEIR=m -CONFIG_HID_ASUS=m -CONFIG_HID_AUREAL=m -CONFIG_HID_BELKIN=m -CONFIG_HID_BETOP_FF=m -# CONFIG_HID_BIGBEN_FF is not set -CONFIG_HID_CHERRY=m -CONFIG_HID_CHICONY=m -CONFIG_HID_CORSAIR=m -# CONFIG_HID_COUGAR is not set -# CONFIG_HID_MACALLY is not set -CONFIG_HID_CMEDIA=m -# CONFIG_HID_CP2112 is not set -# CONFIG_HID_CREATIVE_SB0540 is not set -CONFIG_HID_CYPRESS=m -CONFIG_HID_DRAGONRISE=m -# CONFIG_DRAGONRISE_FF is not set -# CONFIG_HID_EMS_FF is not set -CONFIG_HID_ELAN=m -CONFIG_HID_ELECOM=m -CONFIG_HID_ELO=m -# CONFIG_HID_EVISION is not set -CONFIG_HID_EZKEY=m -# CONFIG_HID_FT260 is not set -CONFIG_HID_GEMBIRD=m -CONFIG_HID_GFRM=m -# CONFIG_HID_GLORIOUS is not set -CONFIG_HID_HOLTEK=m -# CONFIG_HOLTEK_FF is not set -# CONFIG_HID_GOOGLE_STADIA_FF is not set -# CONFIG_HID_VIVALDI is not set -CONFIG_HID_GT683R=m -CONFIG_HID_KEYTOUCH=m -CONFIG_HID_KYE=m -CONFIG_HID_UCLOGIC=m -CONFIG_HID_WALTOP=m -# CONFIG_HID_VIEWSONIC is not set -# CONFIG_HID_VRC2 is not set -# CONFIG_HID_XIAOMI is not set -CONFIG_HID_GYRATION=m -CONFIG_HID_ICADE=m -CONFIG_HID_ITE=m -CONFIG_HID_JABRA=m -CONFIG_HID_TWINHAN=m -CONFIG_HID_KENSINGTON=m -CONFIG_HID_LCPOWER=m -CONFIG_HID_LED=m -CONFIG_HID_LENOVO=m -# CONFIG_HID_LETSKETCH is not set -CONFIG_HID_LOGITECH=m -CONFIG_HID_LOGITECH_DJ=m -CONFIG_HID_LOGITECH_HIDPP=m -# CONFIG_LOGITECH_FF is not set -# CONFIG_LOGIRUMBLEPAD2_FF is not set -# CONFIG_LOGIG940_FF is not set -# CONFIG_LOGIWHEELS_FF is not set -CONFIG_HID_MAGICMOUSE=y -# CONFIG_HID_MALTRON is not set -# CONFIG_HID_MAYFLASH is not set -# CONFIG_HID_MEGAWORLD_FF is not set -# CONFIG_HID_REDRAGON is not set -CONFIG_HID_MICROSOFT=m -CONFIG_HID_MONTEREY=m -CONFIG_HID_MULTITOUCH=m -# CONFIG_HID_NINTENDO is not set -CONFIG_HID_NTI=m -CONFIG_HID_NTRIG=y -# CONFIG_HID_NVIDIA_SHIELD is not set -CONFIG_HID_ORTEK=m -CONFIG_HID_PANTHERLORD=m -# CONFIG_PANTHERLORD_FF is not set -CONFIG_HID_PENMOUNT=m -CONFIG_HID_PETALYNX=m -CONFIG_HID_PICOLCD=m -CONFIG_HID_PICOLCD_FB=y -CONFIG_HID_PICOLCD_BACKLIGHT=y -CONFIG_HID_PICOLCD_LCD=y -CONFIG_HID_PICOLCD_LEDS=y -CONFIG_HID_PICOLCD_CIR=y -CONFIG_HID_PLANTRONICS=m -# CONFIG_HID_PXRC is not set -# CONFIG_HID_RAZER is not set -CONFIG_HID_PRIMAX=m -# CONFIG_HID_RETRODE is not set -CONFIG_HID_ROCCAT=m -CONFIG_HID_SAITEK=m -CONFIG_HID_SAMSUNG=m -# CONFIG_HID_SEMITEK is not set -# CONFIG_HID_SIGMAMICRO is not set -CONFIG_HID_SONY=m -CONFIG_SONY_FF=y -CONFIG_HID_SPEEDLINK=m -# CONFIG_HID_STEAM is not set -CONFIG_HID_STEELSERIES=m -CONFIG_HID_SUNPLUS=m -CONFIG_HID_RMI=m -CONFIG_HID_GREENASIA=m -# CONFIG_GREENASIA_FF is not set -CONFIG_HID_HYPERV_MOUSE=m -CONFIG_HID_SMARTJOYPLUS=m -# CONFIG_SMARTJOYPLUS_FF is not set -CONFIG_HID_TIVO=m -CONFIG_HID_TOPSEED=m -# CONFIG_HID_TOPRE is not set -CONFIG_HID_THINGM=m -CONFIG_HID_THRUSTMASTER=m -# CONFIG_THRUSTMASTER_FF is not set -# CONFIG_HID_UDRAW_PS3 is not set -# CONFIG_HID_U2FZERO is not set -CONFIG_HID_WACOM=m -CONFIG_HID_WIIMOTE=m -CONFIG_HID_XINMO=m -CONFIG_HID_ZEROPLUS=m -# CONFIG_ZEROPLUS_FF is not set -CONFIG_HID_ZYDACRON=m -CONFIG_HID_SENSOR_HUB=y -CONFIG_HID_SENSOR_CUSTOM_SENSOR=m -CONFIG_HID_ALPS=m -# CONFIG_HID_MCP2221 is not set -# end of Special HID drivers - -# -# HID-BPF support -# -# CONFIG_HID_BPF is not set -# end of HID-BPF support - -# -# USB HID support -# -CONFIG_USB_HID=y -CONFIG_HID_PID=y -CONFIG_USB_HIDDEV=y -# end of USB HID support - -CONFIG_I2C_HID=m -# CONFIG_I2C_HID_ACPI is not set -# CONFIG_I2C_HID_OF is not set - -# -# Intel ISH HID support -# -CONFIG_INTEL_ISH_HID=m -# CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER is not set -# end of Intel ISH HID support - -# -# AMD SFH HID Support -# -# CONFIG_AMD_SFH_HID is not set -# end of AMD SFH HID Support - -CONFIG_USB_OHCI_LITTLE_ENDIAN=y -CONFIG_USB_SUPPORT=y -CONFIG_USB_COMMON=y -CONFIG_USB_LED_TRIG=y -# CONFIG_USB_ULPI_BUS is not set -# CONFIG_USB_CONN_GPIO is not set -CONFIG_USB_ARCH_HAS_HCD=y -CONFIG_USB=y -CONFIG_USB_PCI=y -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y - -# -# Miscellaneous USB options -# -CONFIG_USB_DEFAULT_PERSIST=y -# CONFIG_USB_FEW_INIT_RETRIES is not set -# CONFIG_USB_DYNAMIC_MINORS is not set -# CONFIG_USB_OTG is not set -# CONFIG_USB_OTG_PRODUCTLIST is not set -CONFIG_USB_LEDS_TRIGGER_USBPORT=m -CONFIG_USB_AUTOSUSPEND_DELAY=2 -CONFIG_USB_MON=y - -# -# USB Host Controller Drivers -# -# CONFIG_USB_C67X00_HCD is not set -CONFIG_USB_XHCI_HCD=y -CONFIG_USB_XHCI_DBGCAP=y -CONFIG_USB_XHCI_PCI=y -# CONFIG_USB_XHCI_PCI_RENESAS is not set -# CONFIG_USB_XHCI_PLATFORM is not set -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_EHCI_ROOT_HUB_TT=y -CONFIG_USB_EHCI_TT_NEWSCHED=y -CONFIG_USB_EHCI_PCI=y -# CONFIG_USB_EHCI_FSL is not set -# CONFIG_USB_EHCI_HCD_PLATFORM is not set -# CONFIG_USB_OXU210HP_HCD is not set -# CONFIG_USB_ISP116X_HCD is not set -# CONFIG_USB_MAX3421_HCD is not set -CONFIG_USB_OHCI_HCD=y -CONFIG_USB_OHCI_HCD_PCI=y -# CONFIG_USB_OHCI_HCD_PLATFORM is not set -# CONFIG_USB_UHCI_HCD is not set -# CONFIG_USB_SL811_HCD is not set -# CONFIG_USB_R8A66597_HCD is not set -# CONFIG_USB_HCD_BCMA is not set -# CONFIG_USB_HCD_TEST_MODE is not set -# CONFIG_USB_XEN_HCD is not set - -# -# USB Device Class drivers -# -CONFIG_USB_ACM=m -CONFIG_USB_PRINTER=m -CONFIG_USB_WDM=m -CONFIG_USB_TMC=m - -# -# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may -# - -# -# also be needed; see USB_STORAGE Help for more info -# -CONFIG_USB_STORAGE=m -# CONFIG_USB_STORAGE_DEBUG is not set -CONFIG_USB_STORAGE_REALTEK=m -CONFIG_REALTEK_AUTOPM=y -CONFIG_USB_STORAGE_DATAFAB=m -CONFIG_USB_STORAGE_FREECOM=m -CONFIG_USB_STORAGE_ISD200=m -CONFIG_USB_STORAGE_USBAT=m -CONFIG_USB_STORAGE_SDDR09=m -CONFIG_USB_STORAGE_SDDR55=m -CONFIG_USB_STORAGE_JUMPSHOT=m -CONFIG_USB_STORAGE_ALAUDA=m -CONFIG_USB_STORAGE_ONETOUCH=m -CONFIG_USB_STORAGE_KARMA=m -CONFIG_USB_STORAGE_CYPRESS_ATACB=m -CONFIG_USB_STORAGE_ENE_UB6250=m -CONFIG_USB_UAS=m - -# -# USB Imaging devices -# -CONFIG_USB_MDC800=m -CONFIG_USB_MICROTEK=m -# CONFIG_USBIP_CORE is not set - -# -# USB dual-mode controller drivers -# -# CONFIG_USB_CDNS_SUPPORT is not set -# CONFIG_USB_MUSB_HDRC is not set -# CONFIG_USB_DWC3 is not set -# CONFIG_USB_DWC2 is not set -# CONFIG_USB_CHIPIDEA is not set -# CONFIG_USB_ISP1760 is not set - -# -# USB port drivers -# -CONFIG_USB_SERIAL=y -CONFIG_USB_SERIAL_CONSOLE=y -CONFIG_USB_SERIAL_GENERIC=y -# CONFIG_USB_SERIAL_SIMPLE is not set -CONFIG_USB_SERIAL_AIRCABLE=m -CONFIG_USB_SERIAL_ARK3116=m -CONFIG_USB_SERIAL_BELKIN=m -CONFIG_USB_SERIAL_CH341=m -CONFIG_USB_SERIAL_WHITEHEAT=m -CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m -CONFIG_USB_SERIAL_CP210X=m -CONFIG_USB_SERIAL_CYPRESS_M8=m -CONFIG_USB_SERIAL_EMPEG=m -CONFIG_USB_SERIAL_FTDI_SIO=m -CONFIG_USB_SERIAL_VISOR=m -CONFIG_USB_SERIAL_IPAQ=m -CONFIG_USB_SERIAL_IR=m -CONFIG_USB_SERIAL_EDGEPORT=m -CONFIG_USB_SERIAL_EDGEPORT_TI=m -# CONFIG_USB_SERIAL_F81232 is not set -CONFIG_USB_SERIAL_F8153X=m -CONFIG_USB_SERIAL_GARMIN=m -CONFIG_USB_SERIAL_IPW=m -CONFIG_USB_SERIAL_IUU=m -CONFIG_USB_SERIAL_KEYSPAN_PDA=m -CONFIG_USB_SERIAL_KEYSPAN=m -CONFIG_USB_SERIAL_KLSI=m -CONFIG_USB_SERIAL_KOBIL_SCT=m -CONFIG_USB_SERIAL_MCT_U232=m -# CONFIG_USB_SERIAL_METRO is not set -CONFIG_USB_SERIAL_MOS7720=m -CONFIG_USB_SERIAL_MOS7715_PARPORT=y -CONFIG_USB_SERIAL_MOS7840=m -CONFIG_USB_SERIAL_MXUPORT=m -CONFIG_USB_SERIAL_NAVMAN=m -CONFIG_USB_SERIAL_PL2303=m -CONFIG_USB_SERIAL_OTI6858=m -CONFIG_USB_SERIAL_QCAUX=m -CONFIG_USB_SERIAL_QUALCOMM=m -CONFIG_USB_SERIAL_SPCP8X5=m -CONFIG_USB_SERIAL_SAFE=m -CONFIG_USB_SERIAL_SAFE_PADDED=y -CONFIG_USB_SERIAL_SIERRAWIRELESS=m -CONFIG_USB_SERIAL_SYMBOL=m -CONFIG_USB_SERIAL_TI=m -CONFIG_USB_SERIAL_CYBERJACK=m -CONFIG_USB_SERIAL_WWAN=m -CONFIG_USB_SERIAL_OPTION=m -CONFIG_USB_SERIAL_OMNINET=m -CONFIG_USB_SERIAL_OPTICON=m -CONFIG_USB_SERIAL_XSENS_MT=m -# CONFIG_USB_SERIAL_WISHBONE is not set -CONFIG_USB_SERIAL_SSU100=m -CONFIG_USB_SERIAL_QT2=m -CONFIG_USB_SERIAL_UPD78F0730=m -# CONFIG_USB_SERIAL_XR is not set -CONFIG_USB_SERIAL_DEBUG=m - -# -# USB Miscellaneous drivers -# -CONFIG_USB_USS720=m -CONFIG_USB_EMI62=m -CONFIG_USB_EMI26=m -CONFIG_USB_ADUTUX=m -CONFIG_USB_SEVSEG=m -CONFIG_USB_LEGOTOWER=m -CONFIG_USB_LCD=m -# CONFIG_USB_CYPRESS_CY7C63 is not set -# CONFIG_USB_CYTHERM is not set -CONFIG_USB_IDMOUSE=m -CONFIG_USB_APPLEDISPLAY=m -# CONFIG_APPLE_MFI_FASTCHARGE is not set -CONFIG_USB_SISUSBVGA=m -CONFIG_USB_LD=m -# CONFIG_USB_TRANCEVIBRATOR is not set -CONFIG_USB_IOWARRIOR=m -# CONFIG_USB_TEST is not set -# CONFIG_USB_EHSET_TEST_FIXTURE is not set -CONFIG_USB_ISIGHTFW=m -# CONFIG_USB_YUREX is not set -CONFIG_USB_EZUSB_FX2=m -# CONFIG_USB_HUB_USB251XB is not set -CONFIG_USB_HSIC_USB3503=m -# CONFIG_USB_HSIC_USB4604 is not set -# CONFIG_USB_LINK_LAYER_TEST is not set -# CONFIG_USB_CHAOSKEY is not set -CONFIG_USB_ATM=m -CONFIG_USB_SPEEDTOUCH=m -CONFIG_USB_CXACRU=m -CONFIG_USB_UEAGLEATM=m -CONFIG_USB_XUSBATM=m - -# -# USB Physical Layer drivers -# -# CONFIG_NOP_USB_XCEIV is not set -# CONFIG_USB_GPIO_VBUS is not set -# CONFIG_USB_ISP1301 is not set -# end of USB Physical Layer drivers - -# CONFIG_USB_GADGET is not set -CONFIG_TYPEC=y -CONFIG_TYPEC_TCPM=y -# CONFIG_TYPEC_TCPCI is not set -CONFIG_TYPEC_FUSB302=m -CONFIG_TYPEC_UCSI=y -# CONFIG_UCSI_CCG is not set -CONFIG_UCSI_ACPI=y -# CONFIG_UCSI_STM32G0 is not set -CONFIG_TYPEC_TPS6598X=m -# CONFIG_TYPEC_ANX7411 is not set -# CONFIG_TYPEC_RT1719 is not set -# CONFIG_TYPEC_HD3SS3220 is not set -# CONFIG_TYPEC_STUSB160X is not set -# CONFIG_TYPEC_WUSB3801 is not set - -# -# USB Type-C Multiplexer/DeMultiplexer Switch support -# -# CONFIG_TYPEC_MUX_FSA4480 is not set -# CONFIG_TYPEC_MUX_GPIO_SBU is not set -CONFIG_TYPEC_MUX_PI3USB30532=m -# CONFIG_TYPEC_MUX_NB7VPQ904M is not set -# end of USB Type-C Multiplexer/DeMultiplexer Switch support - -# -# USB Type-C Alternate Mode drivers -# -CONFIG_TYPEC_DP_ALTMODE=m -# CONFIG_TYPEC_NVIDIA_ALTMODE is not set -# end of USB Type-C Alternate Mode drivers - -CONFIG_USB_ROLE_SWITCH=y -CONFIG_USB_ROLES_INTEL_XHCI=y -CONFIG_MMC=m -CONFIG_MMC_BLOCK=m -CONFIG_MMC_BLOCK_MINORS=8 -CONFIG_SDIO_UART=m -# CONFIG_MMC_TEST is not set - -# -# MMC/SD/SDIO Host Controller Drivers -# -# CONFIG_MMC_DEBUG is not set -CONFIG_MMC_SDHCI=m -CONFIG_MMC_SDHCI_IO_ACCESSORS=y -CONFIG_MMC_SDHCI_PCI=m -CONFIG_MMC_RICOH_MMC=y -CONFIG_MMC_SDHCI_ACPI=m -CONFIG_MMC_SDHCI_PLTFM=m -# CONFIG_MMC_SDHCI_F_SDH30 is not set -# CONFIG_MMC_WBSD is not set -CONFIG_MMC_TIFM_SD=m -# CONFIG_MMC_SPI is not set -CONFIG_MMC_CB710=m -CONFIG_MMC_VIA_SDMMC=m -CONFIG_MMC_VUB300=m -CONFIG_MMC_USHC=m -# CONFIG_MMC_USDHI6ROL0 is not set -CONFIG_MMC_REALTEK_PCI=m -CONFIG_MMC_REALTEK_USB=m -CONFIG_MMC_CQHCI=m -# CONFIG_MMC_HSQ is not set -# CONFIG_MMC_TOSHIBA_PCI is not set -# CONFIG_MMC_MTK is not set -# CONFIG_MMC_SDHCI_XENON is not set -# CONFIG_SCSI_UFSHCD is not set -CONFIG_MEMSTICK=m -# CONFIG_MEMSTICK_DEBUG is not set - -# -# MemoryStick drivers -# -# CONFIG_MEMSTICK_UNSAFE_RESUME is not set -CONFIG_MSPRO_BLOCK=m -# CONFIG_MS_BLOCK is not set - -# -# MemoryStick Host Controller Drivers -# -CONFIG_MEMSTICK_TIFM_MS=m -CONFIG_MEMSTICK_JMICRON_38X=m -CONFIG_MEMSTICK_R592=m -CONFIG_MEMSTICK_REALTEK_PCI=m -CONFIG_MEMSTICK_REALTEK_USB=m -CONFIG_NEW_LEDS=y -CONFIG_LEDS_CLASS=y -# CONFIG_LEDS_CLASS_FLASH is not set -# CONFIG_LEDS_CLASS_MULTICOLOR is not set -# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set - -# -# LED drivers -# -# CONFIG_LEDS_APU is not set -# CONFIG_LEDS_AW200XX is not set -CONFIG_LEDS_LM3530=m -# CONFIG_LEDS_LM3532 is not set -# CONFIG_LEDS_LM3642 is not set -# CONFIG_LEDS_PCA9532 is not set -# CONFIG_LEDS_GPIO is not set -CONFIG_LEDS_LP3944=m -# CONFIG_LEDS_LP3952 is not set -# CONFIG_LEDS_LP50XX is not set -# CONFIG_LEDS_PCA955X is not set -# CONFIG_LEDS_PCA963X is not set -# CONFIG_LEDS_PCA995X is not set -# CONFIG_LEDS_DAC124S085 is not set -# CONFIG_LEDS_PWM is not set -# CONFIG_LEDS_BD2606MVV is not set -# CONFIG_LEDS_BD2802 is not set -CONFIG_LEDS_INTEL_SS4200=m -# CONFIG_LEDS_LT3593 is not set -# CONFIG_LEDS_TCA6507 is not set -# CONFIG_LEDS_TLC591XX is not set -# CONFIG_LEDS_LM355x is not set -# CONFIG_LEDS_IS31FL319X is not set - -# -# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) -# -CONFIG_LEDS_BLINKM=m -CONFIG_LEDS_MLXCPLD=m -# CONFIG_LEDS_MLXREG is not set -# CONFIG_LEDS_USER is not set -# CONFIG_LEDS_NIC78BX is not set - -# -# Flash and Torch LED drivers -# - -# -# RGB LED drivers -# - -# -# LED Triggers -# -CONFIG_LEDS_TRIGGERS=y -CONFIG_LEDS_TRIGGER_TIMER=m -CONFIG_LEDS_TRIGGER_ONESHOT=m -CONFIG_LEDS_TRIGGER_DISK=y -# CONFIG_LEDS_TRIGGER_MTD is not set -CONFIG_LEDS_TRIGGER_HEARTBEAT=m -CONFIG_LEDS_TRIGGER_BACKLIGHT=m -# CONFIG_LEDS_TRIGGER_CPU is not set -# CONFIG_LEDS_TRIGGER_ACTIVITY is not set -CONFIG_LEDS_TRIGGER_DEFAULT_ON=m - -# -# iptables trigger is under Netfilter config (LED target) -# -CONFIG_LEDS_TRIGGER_TRANSIENT=m -CONFIG_LEDS_TRIGGER_CAMERA=m -# CONFIG_LEDS_TRIGGER_PANIC is not set -# CONFIG_LEDS_TRIGGER_NETDEV is not set -# CONFIG_LEDS_TRIGGER_PATTERN is not set -CONFIG_LEDS_TRIGGER_AUDIO=m -# CONFIG_LEDS_TRIGGER_TTY is not set - -# -# Simple LED drivers -# -# CONFIG_ACCESSIBILITY is not set -CONFIG_INFINIBAND=m -CONFIG_INFINIBAND_USER_MAD=m -CONFIG_INFINIBAND_USER_ACCESS=m -CONFIG_INFINIBAND_USER_MEM=y -CONFIG_INFINIBAND_ON_DEMAND_PAGING=y -CONFIG_INFINIBAND_ADDR_TRANS=y -CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y -CONFIG_INFINIBAND_VIRT_DMA=y -CONFIG_INFINIBAND_BNXT_RE=m -CONFIG_INFINIBAND_CXGB4=m -# CONFIG_INFINIBAND_EFA is not set -CONFIG_INFINIBAND_ERDMA=m -CONFIG_INFINIBAND_HFI1=m -# CONFIG_HFI1_DEBUG_SDMA_ORDER is not set -# CONFIG_SDMA_VERBOSITY is not set -# CONFIG_INFINIBAND_IRDMA is not set -CONFIG_MLX4_INFINIBAND=m -CONFIG_MLX5_INFINIBAND=m -# CONFIG_INFINIBAND_MTHCA is not set -# CONFIG_INFINIBAND_OCRDMA is not set -CONFIG_INFINIBAND_QEDR=m -# CONFIG_INFINIBAND_QIB is not set -CONFIG_INFINIBAND_USNIC=m -CONFIG_INFINIBAND_VMWARE_PVRDMA=m -CONFIG_INFINIBAND_RDMAVT=m -CONFIG_RDMA_RXE=m -CONFIG_RDMA_SIW=m -CONFIG_INFINIBAND_IPOIB=m -CONFIG_INFINIBAND_IPOIB_CM=y -CONFIG_INFINIBAND_IPOIB_DEBUG=y -# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set -CONFIG_INFINIBAND_SRP=m -CONFIG_INFINIBAND_SRPT=m -CONFIG_INFINIBAND_ISER=m -CONFIG_INFINIBAND_ISERT=m -# CONFIG_INFINIBAND_RTRS_CLIENT is not set -# CONFIG_INFINIBAND_RTRS_SERVER is not set -CONFIG_INFINIBAND_OPA_VNIC=m -CONFIG_EDAC_ATOMIC_SCRUB=y -CONFIG_EDAC_SUPPORT=y -CONFIG_EDAC=y -CONFIG_EDAC_LEGACY_SYSFS=y -CONFIG_EDAC_DEBUG=y -CONFIG_EDAC_DECODE_MCE=m -CONFIG_EDAC_GHES=y -CONFIG_EDAC_AMD64=m -CONFIG_EDAC_E752X=m -CONFIG_EDAC_I82975X=m -CONFIG_EDAC_I3000=m -CONFIG_EDAC_I3200=m -CONFIG_EDAC_IE31200=m -CONFIG_EDAC_X38=m -CONFIG_EDAC_I5400=m -CONFIG_EDAC_I7CORE=m -CONFIG_EDAC_I5100=m -CONFIG_EDAC_I7300=m -CONFIG_EDAC_SBRIDGE=m -CONFIG_EDAC_SKX=m -CONFIG_EDAC_I10NM=m -CONFIG_EDAC_PND2=m -# CONFIG_EDAC_IGEN6 is not set -CONFIG_RTC_LIB=y -CONFIG_RTC_MC146818_LIB=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_HCTOSYS=y -CONFIG_RTC_HCTOSYS_DEVICE="rtc0" -CONFIG_RTC_SYSTOHC=y -CONFIG_RTC_SYSTOHC_DEVICE="rtc0" -# CONFIG_RTC_DEBUG is not set -CONFIG_RTC_NVMEM=y - -# -# RTC interfaces -# -CONFIG_RTC_INTF_SYSFS=y -CONFIG_RTC_INTF_PROC=y -CONFIG_RTC_INTF_DEV=y -# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set -# CONFIG_RTC_DRV_TEST is not set - -# -# I2C RTC drivers -# -# CONFIG_RTC_DRV_ABB5ZES3 is not set -# CONFIG_RTC_DRV_ABEOZ9 is not set -# CONFIG_RTC_DRV_ABX80X is not set -CONFIG_RTC_DRV_DS1307=m -# CONFIG_RTC_DRV_DS1307_CENTURY is not set -CONFIG_RTC_DRV_DS1374=m -# CONFIG_RTC_DRV_DS1374_WDT is not set -CONFIG_RTC_DRV_DS1672=m -CONFIG_RTC_DRV_MAX6900=m -CONFIG_RTC_DRV_RS5C372=m -CONFIG_RTC_DRV_ISL1208=m -CONFIG_RTC_DRV_ISL12022=m -CONFIG_RTC_DRV_X1205=m -CONFIG_RTC_DRV_PCF8523=m -# CONFIG_RTC_DRV_PCF85063 is not set -# CONFIG_RTC_DRV_PCF85363 is not set -CONFIG_RTC_DRV_PCF8563=m -CONFIG_RTC_DRV_PCF8583=m -CONFIG_RTC_DRV_M41T80=m -CONFIG_RTC_DRV_M41T80_WDT=y -CONFIG_RTC_DRV_BQ32K=m -# CONFIG_RTC_DRV_S35390A is not set -CONFIG_RTC_DRV_FM3130=m -# CONFIG_RTC_DRV_RX8010 is not set -CONFIG_RTC_DRV_RX8581=m -CONFIG_RTC_DRV_RX8025=m -CONFIG_RTC_DRV_EM3027=m -# CONFIG_RTC_DRV_RV3028 is not set -# CONFIG_RTC_DRV_RV3032 is not set -# CONFIG_RTC_DRV_RV8803 is not set -# CONFIG_RTC_DRV_SD3078 is not set - -# -# SPI RTC drivers -# -# CONFIG_RTC_DRV_M41T93 is not set -# CONFIG_RTC_DRV_M41T94 is not set -# CONFIG_RTC_DRV_DS1302 is not set -# CONFIG_RTC_DRV_DS1305 is not set -# CONFIG_RTC_DRV_DS1343 is not set -# CONFIG_RTC_DRV_DS1347 is not set -# CONFIG_RTC_DRV_DS1390 is not set -# CONFIG_RTC_DRV_MAX6916 is not set -# CONFIG_RTC_DRV_R9701 is not set -# CONFIG_RTC_DRV_RX4581 is not set -# CONFIG_RTC_DRV_RS5C348 is not set -# CONFIG_RTC_DRV_MAX6902 is not set -# CONFIG_RTC_DRV_PCF2123 is not set -# CONFIG_RTC_DRV_MCP795 is not set -CONFIG_RTC_I2C_AND_SPI=y - -# -# SPI and I2C RTC drivers -# -CONFIG_RTC_DRV_DS3232=m -CONFIG_RTC_DRV_DS3232_HWMON=y -# CONFIG_RTC_DRV_PCF2127 is not set -CONFIG_RTC_DRV_RV3029C2=m -# CONFIG_RTC_DRV_RV3029_HWMON is not set -# CONFIG_RTC_DRV_RX6110 is not set - -# -# Platform RTC drivers -# -CONFIG_RTC_DRV_CMOS=y -CONFIG_RTC_DRV_DS1286=m -CONFIG_RTC_DRV_DS1511=m -CONFIG_RTC_DRV_DS1553=m -# CONFIG_RTC_DRV_DS1685_FAMILY is not set -CONFIG_RTC_DRV_DS1742=m -CONFIG_RTC_DRV_DS2404=m -CONFIG_RTC_DRV_STK17TA8=m -# CONFIG_RTC_DRV_M48T86 is not set -CONFIG_RTC_DRV_M48T35=m -CONFIG_RTC_DRV_M48T59=m -CONFIG_RTC_DRV_MSM6242=m -CONFIG_RTC_DRV_RP5C01=m - -# -# on-CPU RTC drivers -# -# CONFIG_RTC_DRV_FTRTC010 is not set - -# -# HID Sensor RTC drivers -# -# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set -# CONFIG_RTC_DRV_GOLDFISH is not set -CONFIG_DMADEVICES=y -CONFIG_DMADEVICES_DEBUG=y -CONFIG_DMADEVICES_VDEBUG=y - -# -# DMA Devices -# -CONFIG_DMA_ENGINE=y -CONFIG_DMA_VIRTUAL_CHANNELS=y -CONFIG_DMA_ACPI=y -# CONFIG_ALTERA_MSGDMA is not set -CONFIG_INTEL_IDMA64=m -CONFIG_INTEL_IDXD_BUS=m -CONFIG_INTEL_IDXD=m -# CONFIG_INTEL_IDXD_COMPAT is not set -CONFIG_INTEL_IDXD_SVM=y -# CONFIG_INTEL_IDXD_PERFMON is not set -CONFIG_INTEL_IOATDMA=m -# CONFIG_PLX_DMA is not set -# CONFIG_XILINX_DMA is not set -# CONFIG_XILINX_XDMA is not set -CONFIG_AMD_PTDMA=y -# CONFIG_QCOM_HIDMA_MGMT is not set -# CONFIG_QCOM_HIDMA is not set -CONFIG_DW_DMAC_CORE=y -CONFIG_DW_DMAC=m -CONFIG_DW_DMAC_PCI=y -# CONFIG_DW_EDMA is not set -CONFIG_HSU_DMA=y -# CONFIG_SF_PDMA is not set -# CONFIG_INTEL_LDMA is not set - -# -# DMA Clients -# -CONFIG_ASYNC_TX_DMA=y -CONFIG_DMATEST=m -CONFIG_DMA_ENGINE_RAID=y - -# -# DMABUF options -# -CONFIG_SYNC_FILE=y -# CONFIG_SW_SYNC is not set -# CONFIG_UDMABUF is not set -# CONFIG_DMABUF_MOVE_NOTIFY is not set -CONFIG_DMABUF_DEBUG=y -# CONFIG_DMABUF_SELFTESTS is not set -# CONFIG_DMABUF_HEAPS is not set -# CONFIG_DMABUF_SYSFS_STATS is not set -# end of DMABUF options - -CONFIG_DCA=m -CONFIG_UIO=m -CONFIG_UIO_CIF=m -CONFIG_UIO_PDRV_GENIRQ=m -# CONFIG_UIO_DMEM_GENIRQ is not set -CONFIG_UIO_AEC=m -CONFIG_UIO_SERCOS3=m -CONFIG_UIO_PCI_GENERIC=m -# CONFIG_UIO_NETX is not set -# CONFIG_UIO_PRUSS is not set -# CONFIG_UIO_MF624 is not set -CONFIG_UIO_HV_GENERIC=m -CONFIG_VFIO=m -CONFIG_VFIO_GROUP=y -CONFIG_VFIO_CONTAINER=y -CONFIG_VFIO_IOMMU_TYPE1=m -CONFIG_VFIO_NOIOMMU=y -CONFIG_VFIO_VIRQFD=y - -# -# VFIO support for PCI devices -# -CONFIG_VFIO_PCI_CORE=m -CONFIG_VFIO_PCI_MMAP=y -CONFIG_VFIO_PCI_INTX=y -CONFIG_VFIO_PCI=m -# CONFIG_VFIO_PCI_VGA is not set -# CONFIG_VFIO_PCI_IGD is not set -# CONFIG_MLX5_VFIO_PCI is not set -CONFIG_QAT_VFIO_PCI=m -# end of VFIO support for PCI devices - -CONFIG_VFIO_MDEV=m -CONFIG_IRQ_BYPASS_MANAGER=m -CONFIG_VIRT_DRIVERS=y -CONFIG_VMGENID=y -# CONFIG_VBOXGUEST is not set -# CONFIG_NITRO_ENCLAVES is not set -CONFIG_EFI_SECRET=m -CONFIG_SEV_GUEST=m -CONFIG_TDX_GUEST_DRIVER=m -CONFIG_CSV_GUEST=m -CONFIG_VIRTIO_ANCHOR=y -CONFIG_VIRTIO=y -CONFIG_VIRTIO_PCI_LIB=y -CONFIG_VIRTIO_PCI_LIB_LEGACY=y -CONFIG_VIRTIO_MENU=y -CONFIG_VIRTIO_PCI=y -CONFIG_VIRTIO_PCI_LEGACY=y -CONFIG_VIRTIO_PMEM=m -CONFIG_VIRTIO_BALLOON=m -CONFIG_VIRTIO_MEM=m -CONFIG_VIRTIO_INPUT=m -CONFIG_VIRTIO_MMIO=m -CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y -CONFIG_VIRTIO_DMA_SHARED_BUFFER=m -# CONFIG_VDPA is not set -CONFIG_VHOST_IOTLB=m -CONFIG_VHOST_TASK=y -CONFIG_VHOST=m -CONFIG_VHOST_MENU=y -CONFIG_VHOST_NET=m -CONFIG_VHOST_SCSI=m -CONFIG_VHOST_VSOCK=m -# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set - -# -# Microsoft Hyper-V guest support -# -CONFIG_HYPERV=m -# CONFIG_HYPERV_VTL_MODE is not set -CONFIG_HYPERV_TIMER=y -CONFIG_HYPERV_UTILS=m -CONFIG_HYPERV_BALLOON=m -# end of Microsoft Hyper-V guest support - -# -# Xen driver support -# -# CONFIG_XEN_BALLOON is not set -CONFIG_XEN_DEV_EVTCHN=m -# CONFIG_XEN_BACKEND is not set -CONFIG_XENFS=m -CONFIG_XEN_COMPAT_XENFS=y -CONFIG_XEN_SYS_HYPERVISOR=y -CONFIG_XEN_XENBUS_FRONTEND=y -# CONFIG_XEN_GNTDEV is not set -# CONFIG_XEN_GRANT_DEV_ALLOC is not set -# CONFIG_XEN_GRANT_DMA_ALLOC is not set -# CONFIG_XEN_PVCALLS_FRONTEND is not set -CONFIG_XEN_PRIVCMD=m -CONFIG_XEN_EFI=y -CONFIG_XEN_AUTO_XLATE=y -CONFIG_XEN_ACPI=y -# CONFIG_XEN_UNPOPULATED_ALLOC is not set -# CONFIG_XEN_VIRTIO is not set -# end of Xen driver support - -# CONFIG_GREYBUS is not set -# CONFIG_COMEDI is not set -# CONFIG_STAGING is not set -# CONFIG_CHROME_PLATFORMS is not set -CONFIG_MELLANOX_PLATFORM=y -CONFIG_MLXREG_HOTPLUG=m -# CONFIG_MLXREG_IO is not set -# CONFIG_MLXREG_LC is not set -# CONFIG_NVSW_SN2201 is not set -CONFIG_SURFACE_PLATFORMS=y -# CONFIG_SURFACE3_WMI is not set -# CONFIG_SURFACE_3_POWER_OPREGION is not set -# CONFIG_SURFACE_GPE is not set -# CONFIG_SURFACE_HOTPLUG is not set -# CONFIG_SURFACE_PRO3_BUTTON is not set -CONFIG_X86_PLATFORM_DEVICES=y -CONFIG_ACPI_WMI=m -CONFIG_WMI_BMOF=m -# CONFIG_HUAWEI_WMI is not set -# CONFIG_UV_SYSFS is not set -CONFIG_MXM_WMI=m -# CONFIG_NVIDIA_WMI_EC_BACKLIGHT is not set -# CONFIG_XIAOMI_WMI is not set -# CONFIG_GIGABYTE_WMI is not set -# CONFIG_YOGABOOK is not set -CONFIG_ACERHDF=m -# CONFIG_ACER_WIRELESS is not set -CONFIG_ACER_WMI=m -# CONFIG_AMD_PMF is not set -# CONFIG_AMD_PMC is not set -# CONFIG_AMD_HSMP is not set -# CONFIG_ADV_SWBUTTON is not set -CONFIG_APPLE_GMUX=m -CONFIG_ASUS_LAPTOP=m -# CONFIG_ASUS_WIRELESS is not set -CONFIG_ASUS_WMI=m -CONFIG_ASUS_NB_WMI=m -# CONFIG_ASUS_TF103C_DOCK is not set -# CONFIG_MERAKI_MX100 is not set -CONFIG_EEEPC_LAPTOP=m -CONFIG_EEEPC_WMI=m -# CONFIG_X86_PLATFORM_DRIVERS_DELL is not set -CONFIG_AMILO_RFKILL=m -CONFIG_FUJITSU_LAPTOP=m -CONFIG_FUJITSU_TABLET=m -# CONFIG_GPD_POCKET_FAN is not set -# CONFIG_X86_PLATFORM_DRIVERS_HP is not set -# CONFIG_WIRELESS_HOTKEY is not set -# CONFIG_IBM_RTL is not set -CONFIG_IDEAPAD_LAPTOP=m -# CONFIG_LENOVO_YMC is not set -CONFIG_SENSORS_HDAPS=m -CONFIG_THINKPAD_ACPI=m -# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set -# CONFIG_THINKPAD_ACPI_DEBUG is not set -# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set -CONFIG_THINKPAD_ACPI_VIDEO=y -CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y -# CONFIG_THINKPAD_LMI is not set -# CONFIG_INTEL_ATOMISP2_PM is not set -CONFIG_INTEL_IFS=m -# CONFIG_INTEL_SAR_INT1092 is not set -CONFIG_INTEL_PMC_CORE=m -CONFIG_INTEL_PMT_CLASS=m -CONFIG_INTEL_PMT_TELEMETRY=m -CONFIG_INTEL_PMT_CRASHLOG=m - -# -# Intel Speed Select Technology interface support -# -CONFIG_INTEL_SPEED_SELECT_TPMI=m -CONFIG_INTEL_SPEED_SELECT_INTERFACE=m -# end of Intel Speed Select Technology interface support - -CONFIG_INTEL_WMI=y -# CONFIG_INTEL_WMI_SBL_FW_UPDATE is not set -CONFIG_INTEL_WMI_THUNDERBOLT=m - -# -# Intel Uncore Frequency Control -# -# CONFIG_INTEL_UNCORE_FREQ_CONTROL is not set -# end of Intel Uncore Frequency Control - -CONFIG_INTEL_HID_EVENT=m -CONFIG_INTEL_VBTN=m -# CONFIG_INTEL_INT0002_VGPIO is not set -CONFIG_INTEL_OAKTRAIL=m -# CONFIG_INTEL_ISHTP_ECLITE is not set -# CONFIG_INTEL_PUNIT_IPC is not set -CONFIG_INTEL_RST=m -# CONFIG_INTEL_SDSI is not set -# CONFIG_INTEL_SMARTCONNECT is not set -CONFIG_INTEL_TPMI=m -CONFIG_INTEL_TURBO_MAX_3=y -CONFIG_INTEL_VSEC=y -# CONFIG_MSI_EC is not set -CONFIG_MSI_LAPTOP=m -CONFIG_MSI_WMI=m -# CONFIG_PCENGINES_APU2 is not set -# CONFIG_BARCO_P50_GPIO is not set -CONFIG_SAMSUNG_LAPTOP=m -CONFIG_SAMSUNG_Q10=m -# CONFIG_ACPI_TOSHIBA is not set -CONFIG_TOSHIBA_BT_RFKILL=m -# CONFIG_TOSHIBA_HAPS is not set -# CONFIG_TOSHIBA_WMI is not set -CONFIG_ACPI_CMPC=m -CONFIG_COMPAL_LAPTOP=m -# CONFIG_LG_LAPTOP is not set -CONFIG_PANASONIC_LAPTOP=m -CONFIG_SONY_LAPTOP=m -CONFIG_SONYPI_COMPAT=y -# CONFIG_SYSTEM76_ACPI is not set -CONFIG_TOPSTAR_LAPTOP=m -# CONFIG_SERIAL_MULTI_INSTANTIATE is not set -CONFIG_MLX_PLATFORM=m -CONFIG_INTEL_IPS=m -# CONFIG_INTEL_SCU_PCI is not set -# CONFIG_INTEL_SCU_PLATFORM is not set -# CONFIG_SIEMENS_SIMATIC_IPC is not set -# CONFIG_WINMATE_FM07_KEYS is not set -# CONFIG_SEL3350_PLATFORM is not set -CONFIG_P2SB=y -CONFIG_HAVE_CLK=y -CONFIG_HAVE_CLK_PREPARE=y -CONFIG_COMMON_CLK=y -# CONFIG_LMK04832 is not set -# CONFIG_COMMON_CLK_MAX9485 is not set -# CONFIG_COMMON_CLK_SI5341 is not set -# CONFIG_COMMON_CLK_SI5351 is not set -# CONFIG_COMMON_CLK_SI544 is not set -# CONFIG_COMMON_CLK_CDCE706 is not set -# CONFIG_COMMON_CLK_CS2000_CP is not set -# CONFIG_COMMON_CLK_PWM is not set -# CONFIG_XILINX_VCU is not set -CONFIG_HWSPINLOCK=y - -# -# Clock Source drivers -# -CONFIG_CLKEVT_I8253=y -CONFIG_I8253_LOCK=y -CONFIG_CLKBLD_I8253=y -# end of Clock Source drivers - -CONFIG_MAILBOX=y -CONFIG_PCC=y -# CONFIG_ALTERA_MBOX is not set -CONFIG_IOMMU_IOVA=y -CONFIG_IOMMU_API=y -CONFIG_IOMMU_SUPPORT=y - -# -# Generic IOMMU Pagetable Support -# -CONFIG_IOMMU_IO_PGTABLE=y -# end of Generic IOMMU Pagetable Support - -CONFIG_IOMMU_DEBUGFS=y -# CONFIG_IOMMU_DEFAULT_DMA_STRICT is not set -# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set -CONFIG_IOMMU_DEFAULT_PASSTHROUGH=y -CONFIG_IOMMU_DMA=y -CONFIG_IOMMU_SVA=y -CONFIG_AMD_IOMMU=y -CONFIG_AMD_IOMMU_V2=m -# CONFIG_AMD_IOMMU_DEBUGFS is not set -CONFIG_DMAR_TABLE=y -CONFIG_DMAR_PERF=y -CONFIG_DMAR_DEBUG=y -CONFIG_INTEL_IOMMU=y -CONFIG_INTEL_IOMMU_DEBUGFS=y -CONFIG_INTEL_IOMMU_SVM=y -# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set -CONFIG_INTEL_IOMMU_FLOPPY_WA=y -CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON=y -CONFIG_INTEL_IOMMU_PERF_EVENTS=y -CONFIG_IOMMUFD=m -CONFIG_IRQ_REMAP=y -CONFIG_HYPERV_IOMMU=y -# CONFIG_VIRTIO_IOMMU is not set - -# -# Remoteproc drivers -# -# CONFIG_REMOTEPROC is not set -# end of Remoteproc drivers - -# -# Rpmsg drivers -# -# CONFIG_RPMSG_QCOM_GLINK_RPM is not set -# CONFIG_RPMSG_VIRTIO is not set -# end of Rpmsg drivers - -# CONFIG_SOUNDWIRE is not set - -# -# SOC (System On Chip) specific Drivers -# - -# -# Amlogic SoC drivers -# -# end of Amlogic SoC drivers - -# -# Broadcom SoC drivers -# -# end of Broadcom SoC drivers - -# -# NXP/Freescale QorIQ SoC drivers -# -# end of NXP/Freescale QorIQ SoC drivers - -# -# fujitsu SoC drivers -# -# end of fujitsu SoC drivers - -# -# i.MX SoC drivers -# -# end of i.MX SoC drivers - -# -# Enable LiteX SoC Builder specific drivers -# -# end of Enable LiteX SoC Builder specific drivers - -# CONFIG_WPCM450_SOC is not set - -# -# Qualcomm SoC drivers -# -# end of Qualcomm SoC drivers - -# CONFIG_SOC_TI is not set - -# -# Xilinx SoC drivers -# -# end of Xilinx SoC drivers -# end of SOC (System On Chip) specific Drivers - -# CONFIG_PM_DEVFREQ is not set -# CONFIG_EXTCON is not set -# CONFIG_MEMORY is not set -CONFIG_IIO=m -CONFIG_IIO_BUFFER=y -# CONFIG_IIO_BUFFER_CB is not set -# CONFIG_IIO_BUFFER_DMA is not set -# CONFIG_IIO_BUFFER_DMAENGINE is not set -# CONFIG_IIO_BUFFER_HW_CONSUMER is not set -CONFIG_IIO_KFIFO_BUF=m -CONFIG_IIO_TRIGGERED_BUFFER=m -# CONFIG_IIO_CONFIGFS is not set -CONFIG_IIO_TRIGGER=y -CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 -# CONFIG_IIO_SW_DEVICE is not set -# CONFIG_IIO_SW_TRIGGER is not set -# CONFIG_IIO_TRIGGERED_EVENT is not set - -# -# Accelerometers -# -# CONFIG_ADIS16201 is not set -# CONFIG_ADIS16209 is not set -# CONFIG_ADXL313_I2C is not set -# CONFIG_ADXL313_SPI is not set -# CONFIG_ADXL345_I2C is not set -# CONFIG_ADXL345_SPI is not set -# CONFIG_ADXL355_I2C is not set -# CONFIG_ADXL355_SPI is not set -# CONFIG_ADXL367_SPI is not set -# CONFIG_ADXL367_I2C is not set -# CONFIG_ADXL372_SPI is not set -# CONFIG_ADXL372_I2C is not set -# CONFIG_BMA180 is not set -# CONFIG_BMA220 is not set -# CONFIG_BMA400 is not set -# CONFIG_BMC150_ACCEL is not set -# CONFIG_BMI088_ACCEL is not set -# CONFIG_DA280 is not set -# CONFIG_DA311 is not set -# CONFIG_DMARD06 is not set -# CONFIG_DMARD09 is not set -# CONFIG_DMARD10 is not set -# CONFIG_FXLS8962AF_I2C is not set -# CONFIG_FXLS8962AF_SPI is not set -CONFIG_HID_SENSOR_ACCEL_3D=m -# CONFIG_IIO_ST_ACCEL_3AXIS is not set -# CONFIG_IIO_KX022A_SPI is not set -# CONFIG_IIO_KX022A_I2C is not set -# CONFIG_KXSD9 is not set -# CONFIG_KXCJK1013 is not set -# CONFIG_MC3230 is not set -# CONFIG_MMA7455_I2C is not set -# CONFIG_MMA7455_SPI is not set -# CONFIG_MMA7660 is not set -# CONFIG_MMA8452 is not set -# CONFIG_MMA9551 is not set -# CONFIG_MMA9553 is not set -# CONFIG_MSA311 is not set -# CONFIG_MXC4005 is not set -# CONFIG_MXC6255 is not set -# CONFIG_SCA3000 is not set -# CONFIG_SCA3300 is not set -# CONFIG_STK8312 is not set -# CONFIG_STK8BA50 is not set -# end of Accelerometers - -# -# Analog to digital converters -# -# CONFIG_AD4130 is not set -# CONFIG_AD7091R5 is not set -# CONFIG_AD7124 is not set -# CONFIG_AD7192 is not set -# CONFIG_AD7266 is not set -# CONFIG_AD7280 is not set -# CONFIG_AD7291 is not set -# CONFIG_AD7292 is not set -# CONFIG_AD7298 is not set -# CONFIG_AD7476 is not set -# CONFIG_AD7606_IFACE_PARALLEL is not set -# CONFIG_AD7606_IFACE_SPI is not set -# CONFIG_AD7766 is not set -# CONFIG_AD7768_1 is not set -# CONFIG_AD7780 is not set -# CONFIG_AD7791 is not set -# CONFIG_AD7793 is not set -# CONFIG_AD7887 is not set -# CONFIG_AD7923 is not set -# CONFIG_AD7949 is not set -# CONFIG_AD799X is not set -# CONFIG_ENVELOPE_DETECTOR is not set -# CONFIG_HI8435 is not set -# CONFIG_HX711 is not set -# CONFIG_INA2XX_ADC is not set -# CONFIG_LTC2471 is not set -# CONFIG_LTC2485 is not set -# CONFIG_LTC2496 is not set -# CONFIG_LTC2497 is not set -# CONFIG_MAX1027 is not set -# CONFIG_MAX11100 is not set -# CONFIG_MAX1118 is not set -# CONFIG_MAX11205 is not set -# CONFIG_MAX11410 is not set -# CONFIG_MAX1241 is not set -# CONFIG_MAX1363 is not set -# CONFIG_MAX9611 is not set -# CONFIG_MCP320X is not set -# CONFIG_MCP3422 is not set -# CONFIG_MCP3911 is not set -# CONFIG_NAU7802 is not set -# CONFIG_RICHTEK_RTQ6056 is not set -# CONFIG_SD_ADC_MODULATOR is not set -# CONFIG_TI_ADC081C is not set -# CONFIG_TI_ADC0832 is not set -# CONFIG_TI_ADC084S021 is not set -# CONFIG_TI_ADC12138 is not set -# CONFIG_TI_ADC108S102 is not set -# CONFIG_TI_ADC128S052 is not set -# CONFIG_TI_ADC161S626 is not set -# CONFIG_TI_ADS1015 is not set -# CONFIG_TI_ADS7924 is not set -# CONFIG_TI_ADS1100 is not set -# CONFIG_TI_ADS7950 is not set -# CONFIG_TI_ADS8344 is not set -# CONFIG_TI_ADS8688 is not set -# CONFIG_TI_ADS124S08 is not set -# CONFIG_TI_ADS131E08 is not set -# CONFIG_TI_LMP92064 is not set -# CONFIG_TI_TLC4541 is not set -# CONFIG_TI_TSC2046 is not set -# CONFIG_VF610_ADC is not set -# CONFIG_VIPERBOARD_ADC is not set -# CONFIG_XILINX_XADC is not set -# end of Analog to digital converters - -# -# Analog to digital and digital to analog converters -# -# CONFIG_AD74115 is not set -# CONFIG_AD74413R is not set -# end of Analog to digital and digital to analog converters - -# -# Analog Front Ends -# -# CONFIG_IIO_RESCALE is not set -# end of Analog Front Ends - -# -# Amplifiers -# -# CONFIG_AD8366 is not set -# CONFIG_ADA4250 is not set -# CONFIG_HMC425 is not set -# end of Amplifiers - -# -# Capacitance to digital converters -# -# CONFIG_AD7150 is not set -# CONFIG_AD7746 is not set -# end of Capacitance to digital converters - -# -# Chemical Sensors -# -# CONFIG_ATLAS_PH_SENSOR is not set -# CONFIG_ATLAS_EZO_SENSOR is not set -# CONFIG_BME680 is not set -# CONFIG_CCS811 is not set -# CONFIG_IAQCORE is not set -# CONFIG_SCD30_CORE is not set -# CONFIG_SCD4X is not set -# CONFIG_SENSIRION_SGP30 is not set -# CONFIG_SENSIRION_SGP40 is not set -# CONFIG_SPS30_I2C is not set -# CONFIG_SENSEAIR_SUNRISE_CO2 is not set -# CONFIG_VZ89X is not set -# end of Chemical Sensors - -# -# Hid Sensor IIO Common -# -CONFIG_HID_SENSOR_IIO_COMMON=m -CONFIG_HID_SENSOR_IIO_TRIGGER=m -# end of Hid Sensor IIO Common - -# -# IIO SCMI Sensors -# -# end of IIO SCMI Sensors - -# -# SSP Sensor Common -# -# CONFIG_IIO_SSP_SENSORHUB is not set -# end of SSP Sensor Common - -# -# Digital to analog converters -# -# CONFIG_AD3552R is not set -# CONFIG_AD5064 is not set -# CONFIG_AD5360 is not set -# CONFIG_AD5380 is not set -# CONFIG_AD5421 is not set -# CONFIG_AD5446 is not set -# CONFIG_AD5449 is not set -# CONFIG_AD5592R is not set -# CONFIG_AD5593R is not set -# CONFIG_AD5504 is not set -# CONFIG_AD5624R_SPI is not set -# CONFIG_LTC2688 is not set -# CONFIG_AD5686_SPI is not set -# CONFIG_AD5696_I2C is not set -# CONFIG_AD5755 is not set -# CONFIG_AD5758 is not set -# CONFIG_AD5761 is not set -# CONFIG_AD5764 is not set -# CONFIG_AD5766 is not set -# CONFIG_AD5770R is not set -# CONFIG_AD5791 is not set -# CONFIG_AD7293 is not set -# CONFIG_AD7303 is not set -# CONFIG_AD8801 is not set -# CONFIG_DPOT_DAC is not set -# CONFIG_DS4424 is not set -# CONFIG_LTC1660 is not set -# CONFIG_LTC2632 is not set -# CONFIG_M62332 is not set -# CONFIG_MAX517 is not set -# CONFIG_MAX5522 is not set -# CONFIG_MAX5821 is not set -# CONFIG_MCP4725 is not set -# CONFIG_MCP4728 is not set -# CONFIG_MCP4922 is not set -# CONFIG_TI_DAC082S085 is not set -# CONFIG_TI_DAC5571 is not set -# CONFIG_TI_DAC7311 is not set -# CONFIG_TI_DAC7612 is not set -# CONFIG_VF610_DAC is not set -# end of Digital to analog converters - -# -# IIO dummy driver -# -# end of IIO dummy driver - -# -# Filters -# -# CONFIG_ADMV8818 is not set -# end of Filters - -# -# Frequency Synthesizers DDS/PLL -# - -# -# Clock Generator/Distribution -# -# CONFIG_AD9523 is not set -# end of Clock Generator/Distribution - -# -# Phase-Locked Loop (PLL) frequency synthesizers -# -# CONFIG_ADF4350 is not set -# CONFIG_ADF4371 is not set -# CONFIG_ADF4377 is not set -# CONFIG_ADMV1013 is not set -# CONFIG_ADMV1014 is not set -# CONFIG_ADMV4420 is not set -# CONFIG_ADRF6780 is not set -# end of Phase-Locked Loop (PLL) frequency synthesizers -# end of Frequency Synthesizers DDS/PLL - -# -# Digital gyroscope sensors -# -# CONFIG_ADIS16080 is not set -# CONFIG_ADIS16130 is not set -# CONFIG_ADIS16136 is not set -# CONFIG_ADIS16260 is not set -# CONFIG_ADXRS290 is not set -# CONFIG_ADXRS450 is not set -# CONFIG_BMG160 is not set -# CONFIG_FXAS21002C is not set -CONFIG_HID_SENSOR_GYRO_3D=m -# CONFIG_MPU3050_I2C is not set -# CONFIG_IIO_ST_GYRO_3AXIS is not set -# CONFIG_ITG3200 is not set -# end of Digital gyroscope sensors - -# -# Health Sensors -# - -# -# Heart Rate Monitors -# -# CONFIG_AFE4403 is not set -# CONFIG_AFE4404 is not set -# CONFIG_MAX30100 is not set -# CONFIG_MAX30102 is not set -# end of Heart Rate Monitors -# end of Health Sensors - -# -# Humidity sensors -# -# CONFIG_AM2315 is not set -# CONFIG_DHT11 is not set -# CONFIG_HDC100X is not set -# CONFIG_HDC2010 is not set -CONFIG_HID_SENSOR_HUMIDITY=m -# CONFIG_HTS221 is not set -# CONFIG_HTU21 is not set -# CONFIG_SI7005 is not set -# CONFIG_SI7020 is not set -# end of Humidity sensors - -# -# Inertial measurement units -# -# CONFIG_ADIS16400 is not set -# CONFIG_ADIS16460 is not set -# CONFIG_ADIS16475 is not set -# CONFIG_ADIS16480 is not set -# CONFIG_BMI160_I2C is not set -# CONFIG_BMI160_SPI is not set -# CONFIG_BOSCH_BNO055_I2C is not set -# CONFIG_FXOS8700_I2C is not set -# CONFIG_FXOS8700_SPI is not set -# CONFIG_KMX61 is not set -# CONFIG_INV_ICM42600_I2C is not set -# CONFIG_INV_ICM42600_SPI is not set -# CONFIG_INV_MPU6050_I2C is not set -# CONFIG_INV_MPU6050_SPI is not set -# CONFIG_IIO_ST_LSM6DSX is not set -# CONFIG_IIO_ST_LSM9DS0 is not set -# end of Inertial measurement units - -# -# Light sensors -# -# CONFIG_ACPI_ALS is not set -# CONFIG_ADJD_S311 is not set -# CONFIG_ADUX1020 is not set -# CONFIG_AL3010 is not set -# CONFIG_AL3320A is not set -# CONFIG_APDS9300 is not set -# CONFIG_APDS9960 is not set -# CONFIG_AS73211 is not set -# CONFIG_BH1750 is not set -# CONFIG_BH1780 is not set -# CONFIG_CM32181 is not set -# CONFIG_CM3232 is not set -# CONFIG_CM3323 is not set -# CONFIG_CM3605 is not set -# CONFIG_CM36651 is not set -# CONFIG_GP2AP002 is not set -# CONFIG_GP2AP020A00F is not set -# CONFIG_SENSORS_ISL29018 is not set -# CONFIG_SENSORS_ISL29028 is not set -# CONFIG_ISL29125 is not set -CONFIG_HID_SENSOR_ALS=m -CONFIG_HID_SENSOR_PROX=m -# CONFIG_JSA1212 is not set -# CONFIG_ROHM_BU27008 is not set -# CONFIG_ROHM_BU27034 is not set -# CONFIG_RPR0521 is not set -# CONFIG_LTR501 is not set -# CONFIG_LTRF216A is not set -# CONFIG_LV0104CS is not set -# CONFIG_MAX44000 is not set -# CONFIG_MAX44009 is not set -# CONFIG_NOA1305 is not set -# CONFIG_OPT3001 is not set -# CONFIG_OPT4001 is not set -# CONFIG_PA12203001 is not set -# CONFIG_SI1133 is not set -# CONFIG_SI1145 is not set -# CONFIG_STK3310 is not set -# CONFIG_ST_UVIS25 is not set -# CONFIG_TCS3414 is not set -# CONFIG_TCS3472 is not set -# CONFIG_SENSORS_TSL2563 is not set -# CONFIG_TSL2583 is not set -# CONFIG_TSL2591 is not set -# CONFIG_TSL2772 is not set -# CONFIG_TSL4531 is not set -# CONFIG_US5182D is not set -# CONFIG_VCNL4000 is not set -# CONFIG_VCNL4035 is not set -# CONFIG_VEML6030 is not set -# CONFIG_VEML6070 is not set -# CONFIG_VL6180 is not set -# CONFIG_ZOPT2201 is not set -# end of Light sensors - -# -# Magnetometer sensors -# -# CONFIG_AK8974 is not set -# CONFIG_AK8975 is not set -# CONFIG_AK09911 is not set -# CONFIG_BMC150_MAGN_I2C is not set -# CONFIG_BMC150_MAGN_SPI is not set -# CONFIG_MAG3110 is not set -CONFIG_HID_SENSOR_MAGNETOMETER_3D=m -# CONFIG_MMC35240 is not set -# CONFIG_IIO_ST_MAGN_3AXIS is not set -# CONFIG_SENSORS_HMC5843_I2C is not set -# CONFIG_SENSORS_HMC5843_SPI is not set -# CONFIG_SENSORS_RM3100_I2C is not set -# CONFIG_SENSORS_RM3100_SPI is not set -# CONFIG_TI_TMAG5273 is not set -# CONFIG_YAMAHA_YAS530 is not set -# end of Magnetometer sensors - -# -# Multiplexers -# -# CONFIG_IIO_MUX is not set -# end of Multiplexers - -# -# Inclinometer sensors -# -CONFIG_HID_SENSOR_INCLINOMETER_3D=m -CONFIG_HID_SENSOR_DEVICE_ROTATION=m -# end of Inclinometer sensors - -# -# Triggers - standalone -# -# CONFIG_IIO_INTERRUPT_TRIGGER is not set -# CONFIG_IIO_SYSFS_TRIGGER is not set -# end of Triggers - standalone - -# -# Linear and angular position sensors -# -# CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE is not set -# end of Linear and angular position sensors - -# -# Digital potentiometers -# -# CONFIG_AD5110 is not set -# CONFIG_AD5272 is not set -# CONFIG_DS1803 is not set -# CONFIG_MAX5432 is not set -# CONFIG_MAX5481 is not set -# CONFIG_MAX5487 is not set -# CONFIG_MCP4018 is not set -# CONFIG_MCP4131 is not set -# CONFIG_MCP4531 is not set -# CONFIG_MCP41010 is not set -# CONFIG_TPL0102 is not set -# CONFIG_X9250 is not set -# end of Digital potentiometers - -# -# Digital potentiostats -# -# CONFIG_LMP91000 is not set -# end of Digital potentiostats - -# -# Pressure sensors -# -# CONFIG_ABP060MG is not set -# CONFIG_BMP280 is not set -# CONFIG_DLHL60D is not set -# CONFIG_DPS310 is not set -CONFIG_HID_SENSOR_PRESS=m -# CONFIG_HP03 is not set -# CONFIG_ICP10100 is not set -# CONFIG_MPL115_I2C is not set -# CONFIG_MPL115_SPI is not set -# CONFIG_MPL3115 is not set -# CONFIG_MPRLS0025PA is not set -# CONFIG_MS5611 is not set -# CONFIG_MS5637 is not set -# CONFIG_IIO_ST_PRESS is not set -# CONFIG_T5403 is not set -# CONFIG_HP206C is not set -# CONFIG_ZPA2326 is not set -# end of Pressure sensors - -# -# Lightning sensors -# -# CONFIG_AS3935 is not set -# end of Lightning sensors - -# -# Proximity and distance sensors -# -# CONFIG_IRSD200 is not set -# CONFIG_ISL29501 is not set -# CONFIG_LIDAR_LITE_V2 is not set -# CONFIG_MB1232 is not set -# CONFIG_PING is not set -# CONFIG_RFD77402 is not set -# CONFIG_SRF04 is not set -# CONFIG_SX9310 is not set -# CONFIG_SX9324 is not set -# CONFIG_SX9360 is not set -# CONFIG_SX9500 is not set -# CONFIG_SRF08 is not set -# CONFIG_VCNL3020 is not set -# CONFIG_VL53L0X_I2C is not set -# end of Proximity and distance sensors - -# -# Resolver to digital converters -# -# CONFIG_AD2S90 is not set -# CONFIG_AD2S1200 is not set -# end of Resolver to digital converters - -# -# Temperature sensors -# -# CONFIG_LTC2983 is not set -# CONFIG_MAXIM_THERMOCOUPLE is not set -CONFIG_HID_SENSOR_TEMP=m -# CONFIG_MLX90614 is not set -# CONFIG_MLX90632 is not set -# CONFIG_TMP006 is not set -# CONFIG_TMP007 is not set -# CONFIG_TMP117 is not set -# CONFIG_TSYS01 is not set -# CONFIG_TSYS02D is not set -# CONFIG_MAX30208 is not set -# CONFIG_MAX31856 is not set -# CONFIG_MAX31865 is not set -# end of Temperature sensors - -CONFIG_NTB=m -# CONFIG_NTB_MSI is not set -# CONFIG_NTB_AMD is not set -# CONFIG_NTB_IDT is not set -# CONFIG_NTB_INTEL is not set -# CONFIG_NTB_EPF is not set -# CONFIG_NTB_SWITCHTEC is not set -# CONFIG_NTB_PINGPONG is not set -# CONFIG_NTB_TOOL is not set -# CONFIG_NTB_PERF is not set -# CONFIG_NTB_TRANSPORT is not set -CONFIG_PWM=y -CONFIG_PWM_SYSFS=y -# CONFIG_PWM_DEBUG is not set -# CONFIG_PWM_CLK is not set -# CONFIG_PWM_DWC is not set -CONFIG_PWM_LPSS=m -CONFIG_PWM_LPSS_PCI=m -CONFIG_PWM_LPSS_PLATFORM=m -# CONFIG_PWM_PCA9685 is not set - -# -# IRQ chip support -# -# end of IRQ chip support - -# CONFIG_IPACK_BUS is not set -CONFIG_RESET_CONTROLLER=y -# CONFIG_RESET_TI_SYSCON is not set -# CONFIG_RESET_TI_TPS380X is not set - -# -# PHY Subsystem -# -# CONFIG_GENERIC_PHY is not set -# CONFIG_USB_LGM_PHY is not set -# CONFIG_PHY_CAN_TRANSCEIVER is not set - -# -# PHY drivers for Broadcom platforms -# -# CONFIG_BCM_KONA_USB2_PHY is not set -# end of PHY drivers for Broadcom platforms - -# CONFIG_PHY_PXA_28NM_HSIC is not set -# CONFIG_PHY_PXA_28NM_USB2 is not set -# CONFIG_PHY_CPCAP_USB is not set -# CONFIG_PHY_INTEL_LGM_EMMC is not set -# end of PHY Subsystem - -CONFIG_POWERCAP=y -CONFIG_INTEL_RAPL_CORE=m -CONFIG_INTEL_RAPL=m -# CONFIG_INTEL_RAPL_TPMI is not set -CONFIG_IDLE_INJECT=y -# CONFIG_MCB is not set - -# -# Performance monitor support -# -# CONFIG_DWC_PCIE_PMU is not set -# end of Performance monitor support - -CONFIG_RAS=y -CONFIG_RAS_CEC=y -# CONFIG_RAS_CEC_DEBUG is not set -# CONFIG_USB4 is not set - -# -# Android -# -# CONFIG_ANDROID_BINDER_IPC is not set -# end of Android - -CONFIG_LIBNVDIMM=y -CONFIG_BLK_DEV_PMEM=y -CONFIG_ND_CLAIM=y -CONFIG_ND_BTT=y -CONFIG_BTT=y -CONFIG_ND_PFN=y -CONFIG_NVDIMM_PFN=y -CONFIG_NVDIMM_DAX=y -CONFIG_NVDIMM_KEYS=y -# CONFIG_NVDIMM_SECURITY_TEST is not set -CONFIG_DAX=y -CONFIG_DEV_DAX=y -CONFIG_DEV_DAX_PMEM=y -CONFIG_DEV_DAX_HMEM=y -CONFIG_DEV_DAX_CXL=m -CONFIG_DEV_DAX_HMEM_DEVICES=y -CONFIG_DEV_DAX_KMEM=y -CONFIG_NVMEM=y -CONFIG_NVMEM_SYSFS=y - -# -# Layout Types -# -# CONFIG_NVMEM_LAYOUT_SL28_VPD is not set -# CONFIG_NVMEM_LAYOUT_ONIE_TLV is not set -# end of Layout Types - -# CONFIG_NVMEM_RMEM is not set - -# -# HW tracing support -# -CONFIG_STM=m -CONFIG_STM_PROTO_BASIC=m -CONFIG_STM_PROTO_SYS_T=m -CONFIG_STM_DUMMY=m -CONFIG_STM_SOURCE_CONSOLE=m -CONFIG_STM_SOURCE_HEARTBEAT=m -CONFIG_STM_SOURCE_FTRACE=m -CONFIG_INTEL_TH=m -CONFIG_INTEL_TH_PCI=m -CONFIG_INTEL_TH_ACPI=m -CONFIG_INTEL_TH_GTH=m -CONFIG_INTEL_TH_STH=m -CONFIG_INTEL_TH_MSU=m -CONFIG_INTEL_TH_PTI=m -# CONFIG_INTEL_TH_DEBUG is not set -# end of HW tracing support - -# CONFIG_FPGA is not set -CONFIG_TEE=m -CONFIG_AMDTEE=m -# CONFIG_SIOX is not set -# CONFIG_SLIMBUS is not set -# CONFIG_INTERCONNECT is not set -# CONFIG_COUNTER is not set -# CONFIG_MOST is not set -# CONFIG_PECI is not set -# CONFIG_HTE is not set -# end of Device Drivers - -# -# File systems -# -CONFIG_DCACHE_WORD_ACCESS=y -# CONFIG_VALIDATE_FS_PARSER is not set -CONFIG_FS_IOMAP=y -CONFIG_BUFFER_HEAD=y -CONFIG_LEGACY_DIRECT_IO=y -# CONFIG_EXT2_FS is not set -CONFIG_EXT3_FS=m -# CONFIG_EXT3_FS_POSIX_ACL is not set -# CONFIG_EXT3_FS_SECURITY is not set -CONFIG_EXT4_FS=y -CONFIG_EXT4_USE_FOR_EXT2=y -CONFIG_EXT4_FS_POSIX_ACL=y -CONFIG_EXT4_FS_SECURITY=y -CONFIG_EXT4_DEBUG=y -CONFIG_JBD2=y -# CONFIG_JBD2_DEBUG is not set -CONFIG_FS_MBCACHE=y -# CONFIG_REISERFS_FS is not set -# CONFIG_JFS_FS is not set -CONFIG_XFS_FS=m -CONFIG_XFS_SUPPORT_V4=y -CONFIG_XFS_SUPPORT_ASCII_CI=y -CONFIG_XFS_QUOTA=y -CONFIG_XFS_POSIX_ACL=y -# CONFIG_XFS_RT is not set -# CONFIG_XFS_ONLINE_SCRUB is not set -CONFIG_XFS_WARN=y -# CONFIG_XFS_DEBUG is not set -CONFIG_GFS2_FS=m -CONFIG_GFS2_FS_LOCKING_DLM=y -# CONFIG_OCFS2_FS is not set -CONFIG_BTRFS_FS=m -# CONFIG_BTRFS_FS_POSIX_ACL is not set -# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set -# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set -# CONFIG_BTRFS_DEBUG is not set -# CONFIG_BTRFS_ASSERT is not set -# CONFIG_BTRFS_FS_REF_VERIFY is not set -# CONFIG_NILFS2_FS is not set -# CONFIG_F2FS_FS is not set -# CONFIG_ZONEFS_FS is not set -CONFIG_FS_DAX=y -CONFIG_FS_DAX_PMD=y -CONFIG_FS_POSIX_ACL=y -CONFIG_EXPORTFS=y -CONFIG_EXPORTFS_BLOCK_OPS=y -CONFIG_FILE_LOCKING=y -# CONFIG_FS_ENCRYPTION is not set -# CONFIG_FS_VERITY is not set -CONFIG_FSNOTIFY=y -CONFIG_DNOTIFY=y -CONFIG_INOTIFY_USER=y -CONFIG_FANOTIFY=y -CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y -CONFIG_QUOTA=y -CONFIG_QUOTA_NETLINK_INTERFACE=y -CONFIG_QUOTA_DEBUG=y -CONFIG_QUOTA_TREE=y -# CONFIG_QFMT_V1 is not set -CONFIG_QFMT_V2=y -CONFIG_QUOTACTL=y -CONFIG_AUTOFS_FS=y -CONFIG_FUSE_FS=m -CONFIG_CUSE=m -CONFIG_VIRTIO_FS=m -CONFIG_FUSE_DAX=y -CONFIG_VIRT_FUSE=m -CONFIG_OVERLAY_FS=m -CONFIG_OVERLAY_FS_REDIRECT_DIR=y -CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y -CONFIG_OVERLAY_FS_INDEX=y -# CONFIG_OVERLAY_FS_NFS_EXPORT is not set -# CONFIG_OVERLAY_FS_XINO_AUTO is not set -# CONFIG_OVERLAY_FS_METACOPY is not set -# CONFIG_OVERLAY_FS_DEBUG is not set - -# -# Caches -# -CONFIG_NETFS_SUPPORT=m -CONFIG_NETFS_STATS=y -CONFIG_FSCACHE=m -CONFIG_FSCACHE_STATS=y -# CONFIG_FSCACHE_DEBUG is not set -CONFIG_CACHEFILES=m -# CONFIG_CACHEFILES_DEBUG is not set -# CONFIG_CACHEFILES_ERROR_INJECTION is not set -CONFIG_CACHEFILES_ONDEMAND=y -# end of Caches - -# -# CD-ROM/DVD Filesystems -# -CONFIG_ISO9660_FS=m -CONFIG_JOLIET=y -CONFIG_ZISOFS=y -CONFIG_UDF_FS=m -# end of CD-ROM/DVD Filesystems - -# -# DOS/FAT/EXFAT/NT Filesystems -# -CONFIG_FAT_FS=m -CONFIG_MSDOS_FS=m -CONFIG_VFAT_FS=m -CONFIG_FAT_DEFAULT_CODEPAGE=437 -CONFIG_FAT_DEFAULT_IOCHARSET="ascii" -# CONFIG_FAT_DEFAULT_UTF8 is not set -# CONFIG_EXFAT_FS is not set -# CONFIG_NTFS_FS is not set -CONFIG_NTFS3_FS=m -# CONFIG_NTFS3_64BIT_CLUSTER is not set -# CONFIG_NTFS3_LZX_XPRESS is not set -# CONFIG_NTFS3_FS_POSIX_ACL is not set -# end of DOS/FAT/EXFAT/NT Filesystems - -# -# Pseudo filesystems -# -CONFIG_PROC_FS=y -CONFIG_PROC_KCORE=y -CONFIG_PROC_VMCORE=y -CONFIG_PROC_VMCORE_DEVICE_DUMP=y -CONFIG_PROC_SYSCTL=y -CONFIG_PROC_PAGE_MONITOR=y -CONFIG_PROC_CHILDREN=y -CONFIG_PROC_PID_ARCH_STATUS=y -CONFIG_PROC_CPU_RESCTRL=y -CONFIG_KERNFS=y -CONFIG_SYSFS=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_TMPFS_XATTR=y -# CONFIG_TMPFS_INODE64 is not set -# CONFIG_TMPFS_QUOTA is not set -CONFIG_HUGETLBFS=y -CONFIG_HUGETLB_PAGE=y -CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP=y -CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON=y -CONFIG_ARCH_HAS_GIGANTIC_PAGE=y -CONFIG_CONFIGFS_FS=y -CONFIG_EFIVAR_FS=y -# end of Pseudo filesystems - -CONFIG_MISC_FILESYSTEMS=y -# CONFIG_ORANGEFS_FS is not set -# CONFIG_ADFS_FS is not set -# CONFIG_AFFS_FS is not set -# CONFIG_ECRYPT_FS is not set -# CONFIG_HFS_FS is not set -# CONFIG_HFSPLUS_FS is not set -# CONFIG_BEFS_FS is not set -# CONFIG_BFS_FS is not set -# CONFIG_EFS_FS is not set -# CONFIG_JFFS2_FS is not set -# CONFIG_UBIFS_FS is not set -CONFIG_CRAMFS=m -CONFIG_CRAMFS_BLOCKDEV=y -# CONFIG_CRAMFS_MTD is not set -CONFIG_SQUASHFS=m -# CONFIG_SQUASHFS_FILE_CACHE is not set -CONFIG_SQUASHFS_FILE_DIRECT=y -CONFIG_SQUASHFS_DECOMP_SINGLE=y -# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set -CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y -# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set -# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set -CONFIG_SQUASHFS_XATTR=y -CONFIG_SQUASHFS_ZLIB=y -# CONFIG_SQUASHFS_LZ4 is not set -CONFIG_SQUASHFS_LZO=y -CONFIG_SQUASHFS_XZ=y -# CONFIG_SQUASHFS_ZSTD is not set -# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set -# CONFIG_SQUASHFS_EMBEDDED is not set -CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 -# CONFIG_VXFS_FS is not set -# CONFIG_MINIX_FS is not set -# CONFIG_OMFS_FS is not set -# CONFIG_HPFS_FS is not set -# CONFIG_QNX4FS_FS is not set -# CONFIG_QNX6FS_FS is not set -CONFIG_RESCTRL_FS=y -CONFIG_RESCTRL_FS_PSEUDO_LOCK=y -# CONFIG_ROMFS_FS is not set -CONFIG_PSTORE=y -CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 -CONFIG_PSTORE_COMPRESS=y -CONFIG_PSTORE_CONSOLE=y -# CONFIG_PSTORE_PMSG is not set -# CONFIG_PSTORE_FTRACE is not set -CONFIG_PSTORE_RAM=y -# CONFIG_PSTORE_BLK is not set -# CONFIG_SYSV_FS is not set -# CONFIG_UFS_FS is not set -CONFIG_EROFS_FS=m -# CONFIG_EROFS_FS_DEBUG is not set -CONFIG_EROFS_FS_XATTR=y -CONFIG_EROFS_FS_POSIX_ACL=y -CONFIG_EROFS_FS_SECURITY=y -CONFIG_EROFS_FS_ZIP=y -CONFIG_EROFS_FS_ZIP_LZMA=y -# CONFIG_EROFS_FS_ZIP_DEFLATE is not set -CONFIG_EROFS_FS_ONDEMAND=y -# CONFIG_EROFS_FS_PCPU_KTHREAD is not set -CONFIG_NETWORK_FILESYSTEMS=y -CONFIG_NFS_FS=m -# CONFIG_NFS_V2 is not set -CONFIG_NFS_V3=m -CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=m -# CONFIG_NFS_SWAP is not set -CONFIG_NFS_V4_1=y -CONFIG_NFS_V4_2=y -CONFIG_PNFS_FILE_LAYOUT=m -CONFIG_PNFS_BLOCK=m -CONFIG_PNFS_FLEXFILE_LAYOUT=m -CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" -# CONFIG_NFS_V4_1_MIGRATION is not set -CONFIG_NFS_V4_SECURITY_LABEL=y -CONFIG_NFS_FSCACHE=y -# CONFIG_NFS_USE_LEGACY_DNS is not set -CONFIG_NFS_USE_KERNEL_DNS=y -CONFIG_NFS_DEBUG=y -CONFIG_NFS_DISABLE_UDP_SUPPORT=y -# CONFIG_NFS_V4_2_READ_PLUS is not set -CONFIG_NFSD=m -# CONFIG_NFSD_V2 is not set -CONFIG_NFSD_V3_ACL=y -CONFIG_NFSD_V4=y -CONFIG_NFSD_PNFS=y -# CONFIG_NFSD_BLOCKLAYOUT is not set -CONFIG_NFSD_SCSILAYOUT=y -# CONFIG_NFSD_FLEXFILELAYOUT is not set -# CONFIG_NFSD_V4_2_INTER_SSC is not set -CONFIG_NFSD_V4_SECURITY_LABEL=y -CONFIG_GRACE_PERIOD=m -CONFIG_LOCKD=m -CONFIG_LOCKD_V4=y -CONFIG_NFS_ACL_SUPPORT=m -CONFIG_NFS_COMMON=y -CONFIG_NFS_V4_2_SSC_HELPER=y -CONFIG_SUNRPC=m -CONFIG_SUNRPC_GSS=m -CONFIG_SUNRPC_BACKCHANNEL=y -CONFIG_RPCSEC_GSS_KRB5=m -CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1=y -# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA is not set -# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 is not set -CONFIG_SUNRPC_DEBUG=y -CONFIG_SUNRPC_XPRT_RDMA=m -CONFIG_CEPH_FS=m -# CONFIG_CEPH_FSCACHE is not set -CONFIG_CEPH_FS_POSIX_ACL=y -# CONFIG_CEPH_FS_SECURITY_LABEL is not set -CONFIG_CIFS=m -# CONFIG_CIFS_STATS2 is not set -CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y -CONFIG_CIFS_UPCALL=y -CONFIG_CIFS_XATTR=y -CONFIG_CIFS_POSIX=y -CONFIG_CIFS_DEBUG=y -# CONFIG_CIFS_DEBUG2 is not set -# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set -CONFIG_CIFS_DFS_UPCALL=y -# CONFIG_CIFS_SWN_UPCALL is not set -# CONFIG_CIFS_SMB_DIRECT is not set -# CONFIG_CIFS_FSCACHE is not set -# CONFIG_SMB_SERVER is not set -CONFIG_SMBFS=m -# CONFIG_CODA_FS is not set -# CONFIG_AFS_FS is not set -CONFIG_NLS=y -CONFIG_NLS_DEFAULT="utf8" -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_CODEPAGE_737=m -CONFIG_NLS_CODEPAGE_775=m -CONFIG_NLS_CODEPAGE_850=m -CONFIG_NLS_CODEPAGE_852=m -CONFIG_NLS_CODEPAGE_855=m -CONFIG_NLS_CODEPAGE_857=m -CONFIG_NLS_CODEPAGE_860=m -CONFIG_NLS_CODEPAGE_861=m -CONFIG_NLS_CODEPAGE_862=m -CONFIG_NLS_CODEPAGE_863=m -CONFIG_NLS_CODEPAGE_864=m -CONFIG_NLS_CODEPAGE_865=m -CONFIG_NLS_CODEPAGE_866=m -CONFIG_NLS_CODEPAGE_869=m -CONFIG_NLS_CODEPAGE_936=m -CONFIG_NLS_CODEPAGE_950=m -CONFIG_NLS_CODEPAGE_932=m -CONFIG_NLS_CODEPAGE_949=m -CONFIG_NLS_CODEPAGE_874=m -CONFIG_NLS_ISO8859_8=m -CONFIG_NLS_CODEPAGE_1250=m -CONFIG_NLS_CODEPAGE_1251=m -CONFIG_NLS_ASCII=y -CONFIG_NLS_ISO8859_1=m -CONFIG_NLS_ISO8859_2=m -CONFIG_NLS_ISO8859_3=m -CONFIG_NLS_ISO8859_4=m -CONFIG_NLS_ISO8859_5=m -CONFIG_NLS_ISO8859_6=m -CONFIG_NLS_ISO8859_7=m -CONFIG_NLS_ISO8859_9=m -CONFIG_NLS_ISO8859_13=m -CONFIG_NLS_ISO8859_14=m -CONFIG_NLS_ISO8859_15=m -CONFIG_NLS_KOI8_R=m -CONFIG_NLS_KOI8_U=m -CONFIG_NLS_MAC_ROMAN=m -CONFIG_NLS_MAC_CELTIC=m -CONFIG_NLS_MAC_CENTEURO=m -CONFIG_NLS_MAC_CROATIAN=m -CONFIG_NLS_MAC_CYRILLIC=m -CONFIG_NLS_MAC_GAELIC=m -CONFIG_NLS_MAC_GREEK=m -CONFIG_NLS_MAC_ICELAND=m -CONFIG_NLS_MAC_INUIT=m -CONFIG_NLS_MAC_ROMANIAN=m -CONFIG_NLS_MAC_TURKISH=m -CONFIG_NLS_UTF8=m -CONFIG_NLS_UCS2_UTILS=m -CONFIG_DLM=m -CONFIG_DLM_DEBUG=y -# CONFIG_UNICODE is not set -CONFIG_IO_WQ=y -# end of File systems - -# -# Security options -# -CONFIG_KEYS=y -# CONFIG_KEYS_REQUEST_CACHE is not set -CONFIG_PERSISTENT_KEYRINGS=y -CONFIG_TRUSTED_KEYS=y -CONFIG_TRUSTED_KEYS_TPM=y -CONFIG_ENCRYPTED_KEYS=y -# CONFIG_USER_DECRYPTED_DATA is not set -# CONFIG_KEY_DH_OPERATIONS is not set -# CONFIG_SECURITY_DMESG_RESTRICT is not set -CONFIG_SECURITY=y -CONFIG_SECURITYFS=y -CONFIG_SECURITY_NETWORK=y -CONFIG_SECURITY_INFINIBAND=y -CONFIG_SECURITY_NETWORK_XFRM=y -CONFIG_SECURITY_PATH=y -CONFIG_INTEL_TXT=y -CONFIG_LSM_MMAP_MIN_ADDR=65535 -CONFIG_HARDENED_USERCOPY=y -CONFIG_FORTIFY_SOURCE=y -# CONFIG_STATIC_USERMODEHELPER is not set -CONFIG_SECURITY_SELINUX=y -CONFIG_SECURITY_SELINUX_BOOTPARAM=y -CONFIG_SECURITY_SELINUX_DEVELOP=y -CONFIG_SECURITY_SELINUX_AVC_STATS=y -CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9 -CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256 -# CONFIG_SECURITY_SELINUX_DEBUG is not set -# CONFIG_SECURITY_SMACK is not set -# CONFIG_SECURITY_TOMOYO is not set -# CONFIG_SECURITY_APPARMOR is not set -# CONFIG_SECURITY_LOADPIN is not set -CONFIG_SECURITY_YAMA=y -# CONFIG_SECURITY_SAFESETID is not set -# CONFIG_SECURITY_LOCKDOWN_LSM is not set -# CONFIG_SECURITY_LANDLOCK is not set -CONFIG_INTEGRITY=y -CONFIG_INTEGRITY_SIGNATURE=y -CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y -CONFIG_INTEGRITY_TRUSTED_KEYRING=y -CONFIG_INTEGRITY_PLATFORM_KEYRING=y -# CONFIG_INTEGRITY_MACHINE_KEYRING is not set -CONFIG_LOAD_UEFI_KEYS=y -CONFIG_INTEGRITY_AUDIT=y -CONFIG_IMA=y -# CONFIG_IMA_KEXEC is not set -CONFIG_IMA_MEASURE_PCR_IDX=10 -CONFIG_IMA_LSM_RULES=y -# CONFIG_IMA_NG_TEMPLATE is not set -CONFIG_IMA_SIG_TEMPLATE=y -CONFIG_IMA_DEFAULT_TEMPLATE="ima-sig" -# CONFIG_IMA_DEFAULT_HASH_SHA1 is not set -CONFIG_IMA_DEFAULT_HASH_SHA256=y -# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set -# CONFIG_IMA_DEFAULT_HASH_SM3 is not set -CONFIG_IMA_DEFAULT_HASH="sha256" -CONFIG_IMA_WRITE_POLICY=y -CONFIG_IMA_READ_POLICY=y -CONFIG_IMA_APPRAISE=y -# CONFIG_IMA_ARCH_POLICY is not set -CONFIG_IMA_APPRAISE_BUILD_POLICY=y -# CONFIG_IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS is not set -# CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS is not set -# CONFIG_IMA_APPRAISE_REQUIRE_MODULE_SIGS is not set -# CONFIG_IMA_APPRAISE_REQUIRE_POLICY_SIGS is not set -CONFIG_IMA_APPRAISE_BOOTPARAM=y -# CONFIG_IMA_APPRAISE_MODSIG is not set -CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY=y -CONFIG_IMA_BLACKLIST_KEYRING=y -CONFIG_IMA_LOAD_X509=y -CONFIG_IMA_X509_PATH="/etc/keys/x509_ima.der" -# CONFIG_IMA_APPRAISE_SIGNED_INIT is not set -CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y -CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y -# CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT is not set -# CONFIG_IMA_DISABLE_HTABLE is not set -CONFIG_EVM=y -CONFIG_EVM_ATTR_FSUUID=y -# CONFIG_EVM_ADD_XATTRS is not set -CONFIG_EVM_LOAD_X509=y -CONFIG_EVM_X509_PATH="/etc/keys/x509_evm.der" -CONFIG_DEFAULT_SECURITY_SELINUX=y -# CONFIG_DEFAULT_SECURITY_DAC is not set -CONFIG_LSM="integrity,selinux,smack,tomoyo,apparmor" - -# -# Kernel hardening options -# - -# -# Memory initialization -# -CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y -CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y -CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y -CONFIG_INIT_STACK_NONE=y -# CONFIG_INIT_STACK_ALL_PATTERN is not set -# CONFIG_INIT_STACK_ALL_ZERO is not set -# CONFIG_GCC_PLUGIN_STACKLEAK is not set -# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set -# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set -CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y -# CONFIG_ZERO_CALL_USED_REGS is not set -# end of Memory initialization - -# -# Hardening of kernel data structures -# -CONFIG_LIST_HARDENED=y -CONFIG_BUG_ON_DATA_CORRUPTION=y -# end of Hardening of kernel data structures - -CONFIG_CC_HAS_RANDSTRUCT=y -CONFIG_RANDSTRUCT_NONE=y -# CONFIG_RANDSTRUCT_FULL is not set -# CONFIG_RANDSTRUCT_PERFORMANCE is not set -# end of Kernel hardening options -# end of Security options - -CONFIG_XOR_BLOCKS=m -CONFIG_ASYNC_CORE=m -CONFIG_ASYNC_MEMCPY=m -CONFIG_ASYNC_XOR=m -CONFIG_ASYNC_PQ=m -CONFIG_ASYNC_RAID6_RECOV=m -CONFIG_CRYPTO=y - -# -# Crypto core or helper -# -CONFIG_CRYPTO_FIPS=y -CONFIG_CRYPTO_FIPS_NAME="Linux Kernel Cryptographic API" -# CONFIG_CRYPTO_FIPS_CUSTOM_VERSION is not set -CONFIG_CRYPTO_ALGAPI=y -CONFIG_CRYPTO_ALGAPI2=y -CONFIG_CRYPTO_AEAD=y -CONFIG_CRYPTO_AEAD2=y -CONFIG_CRYPTO_SIG2=y -CONFIG_CRYPTO_SKCIPHER=y -CONFIG_CRYPTO_SKCIPHER2=y -CONFIG_CRYPTO_HASH=y -CONFIG_CRYPTO_HASH2=y -CONFIG_CRYPTO_RNG=y -CONFIG_CRYPTO_RNG2=y -CONFIG_CRYPTO_RNG_DEFAULT=m -CONFIG_CRYPTO_AKCIPHER2=y -CONFIG_CRYPTO_AKCIPHER=y -CONFIG_CRYPTO_KPP2=y -CONFIG_CRYPTO_KPP=m -CONFIG_CRYPTO_ACOMP2=y -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_MANAGER2=y -CONFIG_CRYPTO_USER=m -# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set -# CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is not set -CONFIG_CRYPTO_NULL=y -CONFIG_CRYPTO_NULL2=y -CONFIG_CRYPTO_PCRYPT=m -CONFIG_CRYPTO_CRYPTD=y -CONFIG_CRYPTO_AUTHENC=m -CONFIG_CRYPTO_TEST=m -CONFIG_CRYPTO_SIMD=y -# end of Crypto core or helper - -# -# Public-key cryptography -# -CONFIG_CRYPTO_RSA=y -CONFIG_CRYPTO_DH=m -# CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set -CONFIG_CRYPTO_ECC=m -CONFIG_CRYPTO_ECDH=m -# CONFIG_CRYPTO_ECDSA is not set -# CONFIG_CRYPTO_ECRDSA is not set -CONFIG_CRYPTO_SM2=y -# CONFIG_CRYPTO_CURVE25519 is not set -# end of Public-key cryptography - -# -# Block ciphers -# -CONFIG_CRYPTO_AES=y -# CONFIG_CRYPTO_AES_TI is not set -CONFIG_CRYPTO_ANUBIS=m -# CONFIG_CRYPTO_ARIA is not set -CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_BLOWFISH_COMMON=m -CONFIG_CRYPTO_CAMELLIA=m -CONFIG_CRYPTO_CAST_COMMON=m -CONFIG_CRYPTO_CAST5=m -CONFIG_CRYPTO_CAST6=m -CONFIG_CRYPTO_DES=m -CONFIG_CRYPTO_FCRYPT=m -CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SEED=m -CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SM4_GENERIC=m -CONFIG_CRYPTO_TEA=m -CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_TWOFISH_COMMON=m -# end of Block ciphers - -# -# Length-preserving ciphers and modes -# -# CONFIG_CRYPTO_ADIANTUM is not set -CONFIG_CRYPTO_ARC4=m -CONFIG_CRYPTO_CHACHA20=m -CONFIG_CRYPTO_CBC=y -CONFIG_CRYPTO_CFB=y -CONFIG_CRYPTO_CTR=y -CONFIG_CRYPTO_CTS=y -CONFIG_CRYPTO_ECB=y -# CONFIG_CRYPTO_HCTR2 is not set -# CONFIG_CRYPTO_KEYWRAP is not set -CONFIG_CRYPTO_LRW=m -CONFIG_CRYPTO_OFB=y -CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_XTS=y -# end of Length-preserving ciphers and modes - -# -# AEAD (authenticated encryption with associated data) ciphers -# -# CONFIG_CRYPTO_AEGIS128 is not set -CONFIG_CRYPTO_CHACHA20POLY1305=m -CONFIG_CRYPTO_CCM=y -CONFIG_CRYPTO_GCM=y -CONFIG_CRYPTO_GENIV=m -CONFIG_CRYPTO_SEQIV=m -CONFIG_CRYPTO_ECHAINIV=m -CONFIG_CRYPTO_ESSIV=m -# end of AEAD (authenticated encryption with associated data) ciphers - -# -# Hashes, digests, and MACs -# -CONFIG_CRYPTO_BLAKE2B=m -CONFIG_CRYPTO_CMAC=y -CONFIG_CRYPTO_GHASH=y -CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_MD4=y -CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_POLY1305=m -CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_SHA1=y -CONFIG_CRYPTO_SHA256=y -CONFIG_CRYPTO_SHA512=y -CONFIG_CRYPTO_SHA3=y -CONFIG_CRYPTO_SM3=y -CONFIG_CRYPTO_SM3_GENERIC=y -# CONFIG_CRYPTO_STREEBOG is not set -CONFIG_CRYPTO_VMAC=m -CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_XXHASH=m -# end of Hashes, digests, and MACs - -# -# CRCs (cyclic redundancy checks) -# -CONFIG_CRYPTO_CRC32C=y -CONFIG_CRYPTO_CRC32=m -CONFIG_CRYPTO_CRCT10DIF=y -CONFIG_CRYPTO_CRC64_ROCKSOFT=m -# end of CRCs (cyclic redundancy checks) - -# -# Compression -# -CONFIG_CRYPTO_DEFLATE=y -CONFIG_CRYPTO_LZO=y -# CONFIG_CRYPTO_842 is not set -CONFIG_CRYPTO_LZ4=m -CONFIG_CRYPTO_LZ4HC=m -CONFIG_CRYPTO_ZSTD=m -# end of Compression - -# -# Random number generation -# -CONFIG_CRYPTO_ANSI_CPRNG=m -CONFIG_CRYPTO_DRBG_MENU=y -CONFIG_CRYPTO_DRBG_HMAC=y -CONFIG_CRYPTO_DRBG_HASH=y -CONFIG_CRYPTO_DRBG_CTR=y -CONFIG_CRYPTO_DRBG=y -CONFIG_CRYPTO_JITTERENTROPY=y -# CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE is not set -# end of Random number generation - -# -# Userspace interface -# -CONFIG_CRYPTO_USER_API=y -CONFIG_CRYPTO_USER_API_HASH=y -CONFIG_CRYPTO_USER_API_SKCIPHER=y -CONFIG_CRYPTO_USER_API_RNG=y -# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set -CONFIG_CRYPTO_USER_API_AEAD=y -CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y -# CONFIG_CRYPTO_STATS is not set -# end of Userspace interface - -CONFIG_CRYPTO_HASH_INFO=y - -# -# Accelerated Cryptographic Algorithms for CPU (x86) -# -CONFIG_CRYPTO_CURVE25519_X86=m -CONFIG_CRYPTO_AES_NI_INTEL=y -CONFIG_CRYPTO_BLOWFISH_X86_64=m -CONFIG_CRYPTO_CAMELLIA_X86_64=m -CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=m -CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m -CONFIG_CRYPTO_CAST5_AVX_X86_64=m -CONFIG_CRYPTO_CAST6_AVX_X86_64=m -CONFIG_CRYPTO_DES3_EDE_X86_64=m -CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m -CONFIG_CRYPTO_SERPENT_AVX_X86_64=m -CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m -CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64=m -CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64=m -CONFIG_CRYPTO_SM4_ZHAOXIN_GMI=m -CONFIG_CRYPTO_TWOFISH_X86_64=m -CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m -CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m -# CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64 is not set -# CONFIG_CRYPTO_ARIA_AESNI_AVX2_X86_64 is not set -# CONFIG_CRYPTO_ARIA_GFNI_AVX512_X86_64 is not set -CONFIG_CRYPTO_CHACHA20_X86_64=m -# CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 is not set -# CONFIG_CRYPTO_NHPOLY1305_SSE2 is not set -# CONFIG_CRYPTO_NHPOLY1305_AVX2 is not set -CONFIG_CRYPTO_BLAKE2S_X86=y -# CONFIG_CRYPTO_POLYVAL_CLMUL_NI is not set -CONFIG_CRYPTO_POLY1305_X86_64=m -CONFIG_CRYPTO_SHA1_SSSE3=y -CONFIG_CRYPTO_SHA256_SSSE3=y -CONFIG_CRYPTO_SHA512_SSSE3=y -CONFIG_CRYPTO_SM3_AVX_X86_64=m -CONFIG_CRYPTO_SM3_ZHAOXIN_GMI=m -CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m -CONFIG_CRYPTO_CRC32C_INTEL=m -CONFIG_CRYPTO_CRC32_PCLMUL=m -CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m -CONFIG_CRYPTO_SM2_ZHAOXIN_GMI=m -# end of Accelerated Cryptographic Algorithms for CPU (x86) - -CONFIG_CRYPTO_HW=y -CONFIG_CRYPTO_DEV_PADLOCK=m -CONFIG_CRYPTO_DEV_PADLOCK_AES=m -CONFIG_CRYPTO_DEV_PADLOCK_SHA=m -# CONFIG_CRYPTO_DEV_ZHAOXIN is not set -# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set -# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set -CONFIG_CRYPTO_DEV_CCP=y -CONFIG_CRYPTO_DEV_CCP_DD=m -CONFIG_CRYPTO_DEV_SP_CCP=y -CONFIG_CRYPTO_DEV_CCP_CRYPTO=m -CONFIG_CRYPTO_DEV_SP_PSP=y -CONFIG_HYGON_GM=y -CONFIG_HYGON_PSP2CPU_CMD=y -CONFIG_TDM_DEV_HYGON=y -CONFIG_TDM_KERNEL_GUARD=m -CONFIG_CRYPTO_DEV_HCT=m -# CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set -CONFIG_CRYPTO_DEV_NITROX=m -CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m -CONFIG_CRYPTO_DEV_QAT=m -CONFIG_CRYPTO_DEV_QAT_DH895xCC=m -CONFIG_CRYPTO_DEV_QAT_C3XXX=m -CONFIG_CRYPTO_DEV_QAT_C62X=m -CONFIG_CRYPTO_DEV_QAT_4XXX=m -# CONFIG_CRYPTO_DEV_QAT_420XX is not set -CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m -CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m -CONFIG_CRYPTO_DEV_QAT_C62XVF=m -# CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION is not set -CONFIG_CRYPTO_DEV_CHELSIO=m -# CONFIG_CRYPTO_DEV_VIRTIO is not set -# CONFIG_CRYPTO_DEV_SAFEXCEL is not set -# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set -CONFIG_CRYPTO_DEV_TSSE=m -CONFIG_ASYMMETRIC_KEY_TYPE=y -CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y -CONFIG_X509_CERTIFICATE_PARSER=y -# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set -CONFIG_PKCS7_MESSAGE_PARSER=y -# CONFIG_PKCS7_TEST_KEY is not set -CONFIG_SIGNED_PE_FILE_VERIFICATION=y -# CONFIG_FIPS_SIGNATURE_SELFTEST is not set - -# -# Certificates for signature checking -# -CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" -CONFIG_MODULE_SIG_KEY_TYPE_RSA=y -# CONFIG_MODULE_SIG_KEY_TYPE_ECDSA is not set -CONFIG_SYSTEM_TRUSTED_KEYRING=y -CONFIG_SYSTEM_TRUSTED_KEYS="" -CONFIG_SYSTEM_EXTRA_CERTIFICATE=y -CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE=8192 -CONFIG_SECONDARY_TRUSTED_KEYRING=y -CONFIG_SYSTEM_BLACKLIST_KEYRING=y -CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" -# CONFIG_SYSTEM_REVOCATION_LIST is not set -# CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE is not set -# end of Certificates for signature checking - -CONFIG_BINARY_PRINTF=y - -# -# Library routines -# -CONFIG_RAID6_PQ=m -CONFIG_RAID6_PQ_BENCHMARK=y -# CONFIG_PACKING is not set -CONFIG_BITREVERSE=y -CONFIG_GENERIC_STRNCPY_FROM_USER=y -CONFIG_GENERIC_STRNLEN_USER=y -CONFIG_GENERIC_NET_UTILS=y -CONFIG_CORDIC=m -# CONFIG_PRIME_NUMBERS is not set -CONFIG_RATIONAL=y -CONFIG_GENERIC_PCI_IOMAP=y -CONFIG_GENERIC_IOMAP=y -CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y -CONFIG_ARCH_HAS_FAST_MULTIPLIER=y -CONFIG_ARCH_USE_SYM_ANNOTATIONS=y - -# -# Crypto library routines -# -CONFIG_CRYPTO_LIB_UTILS=y -CONFIG_CRYPTO_LIB_AES=y -CONFIG_CRYPTO_LIB_ARC4=m -CONFIG_CRYPTO_LIB_GF128MUL=y -CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S=y -CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y -CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA=m -CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m -CONFIG_CRYPTO_LIB_CHACHA=m -CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519=m -CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m -CONFIG_CRYPTO_LIB_CURVE25519=m -CONFIG_CRYPTO_LIB_DES=m -CONFIG_CRYPTO_LIB_POLY1305_RSIZE=11 -CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305=m -CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m -CONFIG_CRYPTO_LIB_POLY1305=m -CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m -CONFIG_CRYPTO_LIB_SHA1=y -CONFIG_CRYPTO_LIB_SHA256=y -# end of Crypto library routines - -CONFIG_CRC_CCITT=y -CONFIG_CRC16=y -CONFIG_CRC_T10DIF=y -CONFIG_CRC64_ROCKSOFT=m -CONFIG_CRC_ITU_T=m -CONFIG_CRC32=y -# CONFIG_CRC32_SELFTEST is not set -CONFIG_CRC32_SLICEBY8=y -# CONFIG_CRC32_SLICEBY4 is not set -# CONFIG_CRC32_SARWATE is not set -# CONFIG_CRC32_BIT is not set -CONFIG_CRC64=m -# CONFIG_CRC4 is not set -CONFIG_CRC7=m -CONFIG_LIBCRC32C=m -CONFIG_CRC8=m -CONFIG_XXHASH=y -CONFIG_RANDOM32_SELFTEST=y -CONFIG_ZLIB_INFLATE=y -CONFIG_ZLIB_DEFLATE=y -CONFIG_LZO_COMPRESS=y -CONFIG_LZO_DECOMPRESS=y -CONFIG_LZ4_COMPRESS=m -CONFIG_LZ4HC_COMPRESS=m -CONFIG_LZ4_DECOMPRESS=y -CONFIG_ZSTD_COMMON=y -CONFIG_ZSTD_COMPRESS=m -CONFIG_ZSTD_DECOMPRESS=y -CONFIG_XZ_DEC=y -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y -CONFIG_XZ_DEC_MICROLZMA=y -CONFIG_XZ_DEC_BCJ=y -# CONFIG_XZ_DEC_TEST is not set -CONFIG_DECOMPRESS_GZIP=y -CONFIG_DECOMPRESS_BZIP2=y -CONFIG_DECOMPRESS_LZMA=y -CONFIG_DECOMPRESS_XZ=y -CONFIG_DECOMPRESS_LZO=y -CONFIG_DECOMPRESS_LZ4=y -CONFIG_DECOMPRESS_ZSTD=y -CONFIG_GENERIC_ALLOCATOR=y -CONFIG_REED_SOLOMON=y -CONFIG_REED_SOLOMON_ENC8=y -CONFIG_REED_SOLOMON_DEC8=y -CONFIG_TEXTSEARCH=y -CONFIG_TEXTSEARCH_KMP=m -CONFIG_TEXTSEARCH_BM=m -CONFIG_TEXTSEARCH_FSM=m -CONFIG_BTREE=y -CONFIG_INTERVAL_TREE=y -CONFIG_XARRAY_MULTI=y -CONFIG_ASSOCIATIVE_ARRAY=y -CONFIG_HAS_IOMEM=y -CONFIG_HAS_IOPORT=y -CONFIG_HAS_IOPORT_MAP=y -CONFIG_HAS_DMA=y -CONFIG_DMA_OPS=y -CONFIG_NEED_SG_DMA_FLAGS=y -CONFIG_NEED_SG_DMA_LENGTH=y -CONFIG_NEED_DMA_MAP_STATE=y -CONFIG_ARCH_DMA_ADDR_T_64BIT=y -CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED=y -CONFIG_SWIOTLB=y -# CONFIG_SWIOTLB_DYNAMIC is not set -CONFIG_DMA_COHERENT_POOL=y -CONFIG_DMA_CMA=y -# CONFIG_DMA_NUMA_CMA is not set - -# -# Default contiguous memory area size: -# -CONFIG_CMA_SIZE_MBYTES=0 -CONFIG_CMA_SIZE_SEL_MBYTES=y -# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set -# CONFIG_CMA_SIZE_SEL_MIN is not set -# CONFIG_CMA_SIZE_SEL_MAX is not set -CONFIG_CMA_ALIGNMENT=8 -CONFIG_DMA_API_DEBUG=y -CONFIG_DMA_API_DEBUG_SG=y -# CONFIG_DMA_MAP_BENCHMARK is not set -CONFIG_SGL_ALLOC=y -CONFIG_CHECK_SIGNATURE=y -CONFIG_CPUMASK_OFFSTACK=y -CONFIG_CPU_RMAP=y -CONFIG_DQL=y -CONFIG_GLOB=y -# CONFIG_GLOB_SELFTEST is not set -CONFIG_NLATTR=y -CONFIG_CLZ_TAB=y -CONFIG_IRQ_POLL=y -CONFIG_MPILIB=y -CONFIG_SIGNATURE=y -CONFIG_DIMLIB=y -CONFIG_OID_REGISTRY=y -CONFIG_UCS2_STRING=y -CONFIG_HAVE_GENERIC_VDSO=y -CONFIG_GENERIC_GETTIMEOFDAY=y -CONFIG_GENERIC_VDSO_TIME_NS=y -CONFIG_FONT_SUPPORT=y -# CONFIG_FONTS is not set -CONFIG_FONT_8x8=y -CONFIG_FONT_8x16=y -CONFIG_SG_POOL=y -CONFIG_ARCH_HAS_PMEM_API=y -CONFIG_MEMREGION=y -CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION=y -CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y -CONFIG_ARCH_HAS_COPY_MC=y -CONFIG_ARCH_STACKWALK=y -CONFIG_STACKDEPOT=y -CONFIG_STACKDEPOT_ALWAYS_INIT=y -CONFIG_SBITMAP=y -CONFIG_PARMAN=m -CONFIG_OBJAGG=m -# end of Library routines - -CONFIG_PLDMFW=y -CONFIG_ASN1_ENCODER=y - -# -# Kernel hacking -# - -# -# printk and dmesg options -# -CONFIG_PRINTK_TIME=y -# CONFIG_PRINTK_CALLER is not set -# CONFIG_STACKTRACE_BUILD_ID is not set -CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 -CONFIG_CONSOLE_LOGLEVEL_QUIET=4 -CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 -CONFIG_BOOT_PRINTK_DELAY=y -CONFIG_DYNAMIC_DEBUG=y -CONFIG_DYNAMIC_DEBUG_CORE=y -CONFIG_SYMBOLIC_ERRNAME=y -CONFIG_DEBUG_BUGVERBOSE=y -# end of printk and dmesg options - -CONFIG_DEBUG_KERNEL=y -CONFIG_DEBUG_MISC=y - -# -# Compile-time checks and compiler options -# -CONFIG_DEBUG_INFO=y -CONFIG_AS_HAS_NON_CONST_LEB128=y -# CONFIG_DEBUG_INFO_NONE is not set -CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y -# CONFIG_DEBUG_INFO_DWARF4 is not set -# CONFIG_DEBUG_INFO_DWARF5 is not set -# CONFIG_DEBUG_INFO_REDUCED is not set -CONFIG_DEBUG_INFO_COMPRESSED_NONE=y -# CONFIG_DEBUG_INFO_COMPRESSED_ZLIB is not set -# CONFIG_DEBUG_INFO_COMPRESSED_ZSTD is not set -# CONFIG_DEBUG_INFO_SPLIT is not set -CONFIG_DEBUG_INFO_BTF=y -# CONFIG_GDB_SCRIPTS is not set -CONFIG_FRAME_WARN=2048 -CONFIG_STRIP_ASM_SYMS=y -# CONFIG_READABLE_ASM is not set -# CONFIG_HEADERS_INSTALL is not set -CONFIG_DEBUG_SECTION_MISMATCH=y -CONFIG_SECTION_MISMATCH_WARN_ONLY=y -CONFIG_OBJTOOL=y -# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set -# end of Compile-time checks and compiler options - -# -# Generic Kernel Debugging Instruments -# -CONFIG_MAGIC_SYSRQ=y -CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 -CONFIG_MAGIC_SYSRQ_SERIAL=y -CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" -CONFIG_DEBUG_FS=y -CONFIG_DEBUG_FS_ALLOW_ALL=y -# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set -# CONFIG_DEBUG_FS_ALLOW_NONE is not set -CONFIG_HAVE_ARCH_KGDB=y -CONFIG_KGDB=y -CONFIG_KGDB_HONOUR_BLOCKLIST=y -CONFIG_KGDB_SERIAL_CONSOLE=y -CONFIG_KGDB_TESTS=y -# CONFIG_KGDB_TESTS_ON_BOOT is not set -CONFIG_KGDB_LOW_LEVEL_TRAP=y -CONFIG_KGDB_KDB=y -CONFIG_KDB_DEFAULT_ENABLE=0x1 -CONFIG_KDB_KEYBOARD=y -CONFIG_KDB_CONTINUE_CATASTROPHIC=0 -CONFIG_ARCH_HAS_EARLY_DEBUG=y -CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y -CONFIG_UBSAN=y -# CONFIG_UBSAN_TRAP is not set -CONFIG_CC_HAS_UBSAN_BOUNDS_STRICT=y -CONFIG_CC_HAS_UBSAN_ARRAY_BOUNDS=y -CONFIG_UBSAN_BOUNDS=y -CONFIG_UBSAN_BOUNDS_STRICT=y -CONFIG_UBSAN_ARRAY_BOUNDS=y -CONFIG_UBSAN_SHIFT=y -# CONFIG_UBSAN_DIV_ZERO is not set -CONFIG_UBSAN_BOOL=y -CONFIG_UBSAN_ENUM=y -# CONFIG_UBSAN_ALIGNMENT is not set -CONFIG_UBSAN_SANITIZE_ALL=y -# CONFIG_TEST_UBSAN is not set -CONFIG_HAVE_ARCH_KCSAN=y -CONFIG_HAVE_KCSAN_COMPILER=y -# end of Generic Kernel Debugging Instruments - -# -# Networking Debugging -# -# CONFIG_NET_DEV_REFCNT_TRACKER is not set -# CONFIG_NET_NS_REFCNT_TRACKER is not set -# CONFIG_DEBUG_NET is not set -# end of Networking Debugging - -# -# Memory Debugging -# -CONFIG_PAGE_EXTENSION=y -CONFIG_DEBUG_PAGEALLOC=y -# CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT is not set -CONFIG_SLUB_DEBUG=y -# CONFIG_SLUB_DEBUG_ON is not set -# CONFIG_PAGE_OWNER is not set -# CONFIG_PAGE_TABLE_CHECK is not set -# CONFIG_PAGE_POISONING is not set -CONFIG_DEBUG_PAGE_REF=y -# CONFIG_DEBUG_RODATA_TEST is not set -CONFIG_ARCH_HAS_DEBUG_WX=y -# CONFIG_DEBUG_WX is not set -CONFIG_GENERIC_PTDUMP=y -# CONFIG_PTDUMP_DEBUGFS is not set -CONFIG_HAVE_DEBUG_KMEMLEAK=y -CONFIG_DEBUG_KMEMLEAK=y -CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE=16000 -CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y -CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN=y -# CONFIG_PER_VMA_LOCK_STATS is not set -CONFIG_DEBUG_OBJECTS=y -# CONFIG_DEBUG_OBJECTS_SELFTEST is not set -CONFIG_DEBUG_OBJECTS_FREE=y -CONFIG_DEBUG_OBJECTS_TIMERS=y -CONFIG_DEBUG_OBJECTS_WORK=y -CONFIG_DEBUG_OBJECTS_RCU_HEAD=y -CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y -CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1 -# CONFIG_SHRINKER_DEBUG is not set -CONFIG_DEBUG_STACK_USAGE=y -# CONFIG_SCHED_STACK_END_CHECK is not set -CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y -CONFIG_DEBUG_VM_IRQSOFF=y -CONFIG_DEBUG_VM=y -# CONFIG_DEBUG_VM_MAPLE_TREE is not set -# CONFIG_DEBUG_VM_RB is not set -# CONFIG_DEBUG_VM_PGFLAGS is not set -CONFIG_DEBUG_VM_PGTABLE=y -CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y -# CONFIG_DEBUG_VIRTUAL is not set -CONFIG_DEBUG_MEMORY_INIT=y -CONFIG_DEBUG_PER_CPU_MAPS=y -CONFIG_ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP=y -# CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP is not set -CONFIG_HAVE_ARCH_KASAN=y -CONFIG_HAVE_ARCH_KASAN_VMALLOC=y -CONFIG_CC_HAS_KASAN_GENERIC=y -CONFIG_CC_HAS_KASAN_SW_TAGS=y -CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y -CONFIG_KASAN=y -CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX=y -CONFIG_KASAN_GENERIC=y -# CONFIG_KASAN_OUTLINE is not set -CONFIG_KASAN_INLINE=y -CONFIG_KASAN_STACK=y -CONFIG_KASAN_VMALLOC=y -# CONFIG_KASAN_MODULE_TEST is not set -CONFIG_HAVE_ARCH_KFENCE=y -CONFIG_KFENCE=y -CONFIG_KFENCE_SAMPLE_INTERVAL=100 -CONFIG_KFENCE_NUM_OBJECTS=255 -CONFIG_KFENCE_DEFERRABLE=y -CONFIG_KFENCE_STRESS_TEST_FAULTS=0 -CONFIG_HAVE_ARCH_KMSAN=y -# end of Memory Debugging - -CONFIG_DEBUG_SHIRQ=y - -# -# Debug Oops, Lockups and Hangs -# -CONFIG_PANIC_ON_OOPS=y -CONFIG_PANIC_ON_OOPS_VALUE=1 -CONFIG_PANIC_TIMEOUT=1 -CONFIG_LOCKUP_DETECTOR=y -CONFIG_SOFTLOCKUP_DETECTOR=y -# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set -CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y -# CONFIG_SDEI_WATCHDOG is not set -CONFIG_HARDLOCKUP_DETECTOR=y -# CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY is not set -CONFIG_HARDLOCKUP_DETECTOR_PERF=y -# CONFIG_HARDLOCKUP_DETECTOR_BUDDY is not set -# CONFIG_HARDLOCKUP_DETECTOR_ARCH is not set -CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER=y -CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y -CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y -CONFIG_DETECT_HUNG_TASK=y -CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 -# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set -# CONFIG_WQ_WATCHDOG is not set -# CONFIG_WQ_CPU_INTENSIVE_REPORT is not set -# CONFIG_TEST_LOCKUP is not set -# end of Debug Oops, Lockups and Hangs - -# -# Scheduler Debugging -# -CONFIG_SCHED_DEBUG=y -CONFIG_SCHED_INFO=y -CONFIG_SCHEDSTATS=y -CONFIG_SCHED_ACPU=y -# end of Scheduler Debugging - -# CONFIG_DEBUG_TIMEKEEPING is not set -CONFIG_DEBUG_PREEMPT=y - -# -# Lock Debugging (spinlocks, mutexes, etc...) -# -CONFIG_LOCK_DEBUGGING_SUPPORT=y -CONFIG_PROVE_LOCKING=y -# CONFIG_PROVE_RAW_LOCK_NESTING is not set -CONFIG_LOCK_STAT=y -CONFIG_DEBUG_RT_MUTEXES=y -CONFIG_DEBUG_SPINLOCK=y -CONFIG_DEBUG_MUTEXES=y -CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y -CONFIG_DEBUG_RWSEMS=y -CONFIG_DEBUG_LOCK_ALLOC=y -CONFIG_LOCKDEP=y -CONFIG_LOCKDEP_BITS=15 -CONFIG_LOCKDEP_CHAINS_BITS=16 -CONFIG_LOCKDEP_STACK_TRACE_BITS=19 -CONFIG_LOCKDEP_STACK_TRACE_HASH_BITS=14 -CONFIG_LOCKDEP_CIRCULAR_QUEUE_BITS=12 -# CONFIG_DEBUG_LOCKDEP is not set -CONFIG_DEBUG_ATOMIC_SLEEP=y -# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set -CONFIG_LOCK_TORTURE_TEST=m -# CONFIG_WW_MUTEX_SELFTEST is not set -# CONFIG_SCF_TORTURE_TEST is not set -# CONFIG_CSD_LOCK_WAIT_DEBUG is not set -# end of Lock Debugging (spinlocks, mutexes, etc...) - -CONFIG_TRACE_IRQFLAGS=y -CONFIG_TRACE_IRQFLAGS_NMI=y -# CONFIG_NMI_CHECK_CPU is not set -# CONFIG_DEBUG_IRQFLAGS is not set -CONFIG_STACKTRACE=y -# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set -# CONFIG_DEBUG_KOBJECT is not set -# CONFIG_DEBUG_KOBJECT_RELEASE is not set - -# -# Debug kernel data structures -# -CONFIG_DEBUG_LIST=y -# CONFIG_DEBUG_PLIST is not set -CONFIG_DEBUG_SG=y -CONFIG_DEBUG_NOTIFIERS=y -# CONFIG_DEBUG_MAPLE_TREE is not set -# end of Debug kernel data structures - -# -# RCU Debugging -# -CONFIG_PROVE_RCU=y -CONFIG_TORTURE_TEST=m -# CONFIG_RCU_SCALE_TEST is not set -CONFIG_RCU_TORTURE_TEST=m -# CONFIG_RCU_REF_SCALE_TEST is not set -CONFIG_RCU_CPU_STALL_TIMEOUT=60 -CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 -# CONFIG_RCU_CPU_STALL_CPUTIME is not set -# CONFIG_RCU_TRACE is not set -# CONFIG_RCU_EQS_DEBUG is not set -# end of RCU Debugging - -# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set -# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set -CONFIG_LATENCYTOP=y -# CONFIG_DEBUG_CGROUP_REF is not set -CONFIG_USER_STACKTRACE_SUPPORT=y -CONFIG_NOP_TRACER=y -CONFIG_HAVE_RETHOOK=y -CONFIG_RETHOOK=y -CONFIG_HAVE_FUNCTION_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y -CONFIG_HAVE_DYNAMIC_FTRACE=y -CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y -CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y -CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y -CONFIG_HAVE_DYNAMIC_FTRACE_NO_PATCHABLE=y -CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y -CONFIG_HAVE_SYSCALL_TRACEPOINTS=y -CONFIG_HAVE_FENTRY=y -CONFIG_HAVE_OBJTOOL_MCOUNT=y -CONFIG_HAVE_OBJTOOL_NOP_MCOUNT=y -CONFIG_HAVE_C_RECORDMCOUNT=y -CONFIG_HAVE_BUILDTIME_MCOUNT_SORT=y -CONFIG_BUILDTIME_MCOUNT_SORT=y -CONFIG_TRACER_MAX_TRACE=y -CONFIG_TRACE_CLOCK=y -CONFIG_RING_BUFFER=y -CONFIG_EVENT_TRACING=y -CONFIG_CONTEXT_SWITCH_TRACER=y -CONFIG_PREEMPTIRQ_TRACEPOINTS=y -CONFIG_TRACING=y -CONFIG_GENERIC_TRACER=y -CONFIG_TRACING_SUPPORT=y -CONFIG_FTRACE=y -# CONFIG_BOOTTIME_TRACING is not set -CONFIG_FUNCTION_TRACER=y -CONFIG_FUNCTION_GRAPH_TRACER=y -# CONFIG_FUNCTION_GRAPH_RETVAL is not set -CONFIG_DYNAMIC_FTRACE=y -CONFIG_DYNAMIC_FTRACE_WITH_REGS=y -CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y -CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y -# CONFIG_FPROBE is not set -CONFIG_FUNCTION_PROFILER=y -CONFIG_STACK_TRACER=y -# CONFIG_IRQSOFF_TRACER is not set -# CONFIG_PREEMPT_TRACER is not set -CONFIG_SCHED_TRACER=y -CONFIG_HWLAT_TRACER=y -CONFIG_OSNOISE_TRACER=y -CONFIG_TIMERLAT_TRACER=y -CONFIG_MMIOTRACE=y -CONFIG_FTRACE_SYSCALLS=y -CONFIG_TRACER_SNAPSHOT=y -# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set -CONFIG_BRANCH_PROFILE_NONE=y -# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_PROBE_EVENTS_BTF_ARGS=y -CONFIG_KPROBE_EVENTS=y -# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set -CONFIG_UPROBE_EVENTS=y -CONFIG_BPF_EVENTS=y -CONFIG_DYNAMIC_EVENTS=y -CONFIG_PROBE_EVENTS=y -# CONFIG_BPF_KPROBE_OVERRIDE is not set -CONFIG_FTRACE_MCOUNT_RECORD=y -CONFIG_FTRACE_MCOUNT_USE_CC=y -CONFIG_TRACING_MAP=y -CONFIG_SYNTH_EVENTS=y -# CONFIG_USER_EVENTS is not set -CONFIG_HIST_TRIGGERS=y -# CONFIG_TRACE_EVENT_INJECT is not set -# CONFIG_TRACEPOINT_BENCHMARK is not set -CONFIG_RING_BUFFER_BENCHMARK=m -# CONFIG_TRACE_EVAL_MAP_FILE is not set -# CONFIG_FTRACE_RECORD_RECURSION is not set -# CONFIG_FTRACE_STARTUP_TEST is not set -# CONFIG_FTRACE_SORT_STARTUP_TEST is not set -# CONFIG_RING_BUFFER_STARTUP_TEST is not set -# CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS is not set -# CONFIG_MMIOTRACE_TEST is not set -# CONFIG_PREEMPTIRQ_DELAY_TEST is not set -# CONFIG_SYNTH_EVENT_GEN_TEST is not set -# CONFIG_KPROBE_EVENT_GEN_TEST is not set -# CONFIG_HIST_TRIGGERS_DEBUG is not set -# CONFIG_RV is not set -CONFIG_PROVIDE_OHCI1394_DMA_INIT=y -# CONFIG_SAMPLES is not set -CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y -CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y -CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y -CONFIG_STRICT_DEVMEM=y -# CONFIG_IO_STRICT_DEVMEM is not set - -# -# x86 Debugging -# -CONFIG_EARLY_PRINTK_USB=y -# CONFIG_X86_VERBOSE_BOOTUP is not set -CONFIG_EARLY_PRINTK=y -CONFIG_EARLY_PRINTK_DBGP=y -CONFIG_EARLY_PRINTK_USB_XDBC=y -# CONFIG_EFI_PGT_DUMP is not set -# CONFIG_DEBUG_TLBFLUSH is not set -CONFIG_HAVE_MMIOTRACE_SUPPORT=y -CONFIG_X86_DECODER_SELFTEST=y -CONFIG_IO_DELAY_0X80=y -# CONFIG_IO_DELAY_0XED is not set -# CONFIG_IO_DELAY_UDELAY is not set -# CONFIG_IO_DELAY_NONE is not set -CONFIG_DEBUG_BOOT_PARAMS=y -# CONFIG_CPA_DEBUG is not set -# CONFIG_DEBUG_ENTRY is not set -# CONFIG_DEBUG_NMI_SELFTEST is not set -CONFIG_X86_DEBUG_FPU=y -# CONFIG_PUNIT_ATOM_DEBUG is not set -CONFIG_UNWINDER_ORC=y -# CONFIG_UNWINDER_FRAME_POINTER is not set -# end of x86 Debugging - -# -# Kernel Testing and Coverage -# -# CONFIG_KUNIT is not set -# CONFIG_NOTIFIER_ERROR_INJECTION is not set -CONFIG_FUNCTION_ERROR_INJECTION=y -CONFIG_FAULT_INJECTION=y -CONFIG_FAILSLAB=y -CONFIG_FAIL_PAGE_ALLOC=y -# CONFIG_FAULT_INJECTION_USERCOPY is not set -CONFIG_FAIL_MAKE_REQUEST=y -CONFIG_FAIL_IO_TIMEOUT=y -# CONFIG_FAIL_FUTEX is not set -CONFIG_FAULT_INJECTION_DEBUG_FS=y -# CONFIG_FAIL_FUNCTION is not set -CONFIG_FAIL_MMC_REQUEST=y -# CONFIG_FAIL_SUNRPC is not set -# CONFIG_FAULT_INJECTION_CONFIGFS is not set -# CONFIG_FAULT_INJECTION_STACKTRACE_FILTER is not set -CONFIG_ARCH_HAS_KCOV=y -CONFIG_CC_HAS_SANCOV_TRACE_PC=y -# CONFIG_KCOV is not set -CONFIG_RUNTIME_TESTING_MENU=y -# CONFIG_TEST_DHRY is not set -# CONFIG_LKDTM is not set -# CONFIG_TEST_MIN_HEAP is not set -# CONFIG_TEST_DIV64 is not set -# CONFIG_BACKTRACE_SELF_TEST is not set -# CONFIG_TEST_REF_TRACKER is not set -# CONFIG_RBTREE_TEST is not set -# CONFIG_REED_SOLOMON_TEST is not set -# CONFIG_INTERVAL_TREE_TEST is not set -# CONFIG_PERCPU_TEST is not set -CONFIG_ATOMIC64_SELFTEST=y -CONFIG_ASYNC_RAID6_TEST=m -# CONFIG_TEST_HEXDUMP is not set -# CONFIG_STRING_SELFTEST is not set -CONFIG_TEST_STRING_HELPERS=m -CONFIG_TEST_KSTRTOX=y -# CONFIG_TEST_PRINTF is not set -# CONFIG_TEST_SCANF is not set -# CONFIG_TEST_BITMAP is not set -# CONFIG_TEST_UUID is not set -# CONFIG_TEST_XARRAY is not set -# CONFIG_TEST_MAPLE_TREE is not set -# CONFIG_TEST_RHASHTABLE is not set -# CONFIG_TEST_IDA is not set -# CONFIG_TEST_PARMAN is not set -# CONFIG_TEST_LKM is not set -# CONFIG_TEST_BITOPS is not set -# CONFIG_TEST_VMALLOC is not set -# CONFIG_TEST_USER_COPY is not set -CONFIG_TEST_BPF=m -# CONFIG_TEST_BLACKHOLE_DEV is not set -# CONFIG_FIND_BIT_BENCHMARK is not set -# CONFIG_TEST_FIRMWARE is not set -# CONFIG_TEST_SYSCTL is not set -# CONFIG_TEST_UDELAY is not set -# CONFIG_TEST_STATIC_KEYS is not set -# CONFIG_TEST_DYNAMIC_DEBUG is not set -# CONFIG_TEST_KMOD is not set -# CONFIG_TEST_MEMCAT_P is not set -CONFIG_TEST_LIVEPATCH=m -# CONFIG_TEST_OBJAGG is not set -# CONFIG_TEST_MEMINIT is not set -# CONFIG_TEST_HMM is not set -# CONFIG_TEST_FREE_PAGES is not set -# CONFIG_TEST_FPU is not set -# CONFIG_TEST_CLOCKSOURCE_WATCHDOG is not set -CONFIG_ARCH_USE_MEMTEST=y -# CONFIG_MEMTEST is not set -# CONFIG_HYPERV_TESTING is not set -# end of Kernel Testing and Coverage - -# -# Rust hacking -# -# end of Rust hacking -# end of Kernel hacking diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig deleted file mode 100644 index 38730def197a..000000000000 --- a/arch/x86/configs/anolis_defconfig +++ /dev/null @@ -1,8082 +0,0 @@ -# -# Automatically generated file; DO NOT EDIT. -# Linux/x86 6.6.25 Kernel Configuration -# -CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" -CONFIG_CC_IS_GCC=y -CONFIG_GCC_VERSION=200000 -CONFIG_CLANG_VERSION=0 -CONFIG_AS_IS_GNU=y -CONFIG_AS_VERSION=25000 -CONFIG_LD_IS_BFD=y -CONFIG_LD_VERSION=25000 -CONFIG_LLD_VERSION=0 -CONFIG_CC_CAN_LINK=y -CONFIG_CC_CAN_LINK_STATIC=y -CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y -CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y -CONFIG_TOOLS_SUPPORT_RELR=y -CONFIG_CC_HAS_ASM_INLINE=y -CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y -CONFIG_PAHOLE_VERSION=117 -CONFIG_IRQ_WORK=y -CONFIG_BUILDTIME_TABLE_SORT=y -CONFIG_THREAD_INFO_IN_TASK=y - -# -# General setup -# -CONFIG_INIT_ENV_ARG_LIMIT=32 -# CONFIG_COMPILE_TEST is not set -# CONFIG_WERROR is not set -CONFIG_LOCALVERSION="" -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_BUILD_SALT="" -CONFIG_HAVE_KERNEL_GZIP=y -CONFIG_HAVE_KERNEL_BZIP2=y -CONFIG_HAVE_KERNEL_LZMA=y -CONFIG_HAVE_KERNEL_XZ=y -CONFIG_HAVE_KERNEL_LZO=y -CONFIG_HAVE_KERNEL_LZ4=y -CONFIG_HAVE_KERNEL_ZSTD=y -# CONFIG_KERNEL_GZIP is not set -# CONFIG_KERNEL_BZIP2 is not set -# CONFIG_KERNEL_LZMA is not set -# CONFIG_KERNEL_XZ is not set -# CONFIG_KERNEL_LZO is not set -# CONFIG_KERNEL_LZ4 is not set -CONFIG_KERNEL_ZSTD=y -CONFIG_DEFAULT_INIT="" -CONFIG_DEFAULT_HOSTNAME="(none)" -CONFIG_SYSVIPC=y -CONFIG_SYSVIPC_SYSCTL=y -CONFIG_SYSVIPC_COMPAT=y -CONFIG_POSIX_MQUEUE=y -CONFIG_POSIX_MQUEUE_SYSCTL=y -# CONFIG_WATCH_QUEUE is not set -CONFIG_CROSS_MEMORY_ATTACH=y -# CONFIG_USELIB is not set -CONFIG_AUDIT=y -CONFIG_HAVE_ARCH_AUDITSYSCALL=y -CONFIG_AUDITSYSCALL=y - -# -# IRQ subsystem -# -CONFIG_GENERIC_IRQ_PROBE=y -CONFIG_GENERIC_IRQ_SHOW=y -CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y -CONFIG_GENERIC_PENDING_IRQ=y -CONFIG_GENERIC_IRQ_MIGRATION=y -CONFIG_GENERIC_IRQ_INJECTION=y -CONFIG_HARDIRQS_SW_RESEND=y -CONFIG_IRQ_DOMAIN=y -CONFIG_IRQ_DOMAIN_HIERARCHY=y -CONFIG_GENERIC_MSI_IRQ=y -CONFIG_IRQ_MSI_IOMMU=y -CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y -CONFIG_GENERIC_IRQ_RESERVATION_MODE=y -CONFIG_IRQ_FORCED_THREADING=y -CONFIG_SPARSE_IRQ=y -# CONFIG_GENERIC_IRQ_DEBUGFS is not set -# end of IRQ subsystem - -CONFIG_CLOCKSOURCE_WATCHDOG=y -CONFIG_ARCH_CLOCKSOURCE_INIT=y -CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y -CONFIG_GENERIC_TIME_VSYSCALL=y -CONFIG_GENERIC_CLOCKEVENTS=y -CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y -CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y -CONFIG_GENERIC_CMOS_UPDATE=y -CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y -CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y -CONFIG_CONTEXT_TRACKING=y -CONFIG_CONTEXT_TRACKING_IDLE=y - -# -# Timers subsystem -# -CONFIG_TICK_ONESHOT=y -CONFIG_NO_HZ_COMMON=y -# CONFIG_HZ_PERIODIC is not set -# CONFIG_NO_HZ_IDLE is not set -CONFIG_NO_HZ_FULL=y -CONFIG_CONTEXT_TRACKING_USER=y -# CONFIG_CONTEXT_TRACKING_USER_FORCE is not set -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US=125 -# end of Timers subsystem - -CONFIG_BPF=y -CONFIG_HAVE_EBPF_JIT=y -CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y - -# -# BPF subsystem -# -CONFIG_BPF_SYSCALL=y -CONFIG_BPF_JIT=y -CONFIG_BPF_JIT_ALWAYS_ON=y -CONFIG_BPF_JIT_DEFAULT_ON=y -CONFIG_BPF_UNPRIV_DEFAULT_OFF=y -# CONFIG_BPF_PRELOAD is not set -CONFIG_BPF_LSM=y -# end of BPF subsystem - -CONFIG_PREEMPT_BUILD=y -CONFIG_PREEMPT_NONE=y -# CONFIG_PREEMPT_VOLUNTARY is not set -# CONFIG_PREEMPT is not set -CONFIG_PREEMPT_COUNT=y -CONFIG_PREEMPTION=y -CONFIG_PREEMPT_DYNAMIC=y -CONFIG_SCHED_CORE=y - -# -# CPU/Task time and stats accounting -# -CONFIG_VIRT_CPU_ACCOUNTING=y -CONFIG_VIRT_CPU_ACCOUNTING_GEN=y -CONFIG_IRQ_TIME_ACCOUNTING=y -CONFIG_HAVE_SCHED_AVG_IRQ=y -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_TASKSTATS=y -CONFIG_TASK_DELAY_ACCT=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y -CONFIG_PSI=y -CONFIG_PSI_DEFAULT_DISABLED=y -# end of CPU/Task time and stats accounting - -CONFIG_CPU_ISOLATION=y - -# -# RCU Subsystem -# -CONFIG_TREE_RCU=y -CONFIG_PREEMPT_RCU=y -# CONFIG_RCU_EXPERT is not set -CONFIG_TREE_SRCU=y -CONFIG_TASKS_RCU_GENERIC=y -CONFIG_TASKS_RCU=y -CONFIG_TASKS_RUDE_RCU=y -CONFIG_TASKS_TRACE_RCU=y -CONFIG_RCU_STALL_COMMON=y -CONFIG_RCU_NEED_SEGCBLIST=y -CONFIG_RCU_NOCB_CPU=y -# CONFIG_RCU_NOCB_CPU_DEFAULT_ALL is not set -# CONFIG_RCU_LAZY is not set -# end of RCU Subsystem - -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -# CONFIG_IKHEADERS is not set -CONFIG_LOG_BUF_SHIFT=21 -CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 -# CONFIG_PRINTK_INDEX is not set -CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y - -# -# Scheduler features -# -# CONFIG_UCLAMP_TASK is not set -# end of Scheduler features - -CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y -CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y -CONFIG_CC_HAS_INT128=y -CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" -CONFIG_GCC10_NO_ARRAY_BOUNDS=y -CONFIG_CC_NO_ARRAY_BOUNDS=y -CONFIG_ARCH_SUPPORTS_INT128=y -CONFIG_NUMA_BALANCING=y -CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y -CONFIG_CGROUPS=y -CONFIG_PAGE_COUNTER=y -# CONFIG_CGROUP_FAVOR_DYNMODS is not set -CONFIG_MEMCG=y -CONFIG_MEMCG_KMEM=y -CONFIG_BLK_CGROUP=y -CONFIG_CGROUP_WRITEBACK=y -CONFIG_CGROUP_SCHED=y -CONFIG_FAIR_GROUP_SCHED=y -CONFIG_CFS_BANDWIDTH=y -CONFIG_RT_GROUP_SCHED=y -CONFIG_SCHED_MM_CID=y -CONFIG_CGROUP_PIDS=y -CONFIG_CGROUP_RDMA=y -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_HUGETLB=y -CONFIG_CPUSETS=y -CONFIG_PROC_PID_CPUSET=y -CONFIG_CGROUP_DEVICE=y -CONFIG_SCHED_SLI=y -CONFIG_RICH_CONTAINER=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_CGROUP_PERF=y -CONFIG_CGROUP_BPF=y -# CONFIG_CGROUP_MISC is not set -# CONFIG_CGROUP_DEBUG is not set -CONFIG_SOCK_CGROUP_DATA=y -CONFIG_NAMESPACES=y -CONFIG_UTS_NS=y -CONFIG_TIME_NS=y -CONFIG_IPC_NS=y -CONFIG_USER_NS=y -CONFIG_PID_NS=y -CONFIG_NET_NS=y -CONFIG_CHECKPOINT_RESTORE=y -CONFIG_SCHED_AUTOGROUP=y -CONFIG_RELAY=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" -CONFIG_RD_GZIP=y -CONFIG_RD_BZIP2=y -CONFIG_RD_LZMA=y -CONFIG_RD_XZ=y -CONFIG_RD_LZO=y -CONFIG_RD_LZ4=y -CONFIG_RD_ZSTD=y -# CONFIG_BOOT_CONFIG is not set -CONFIG_INITRAMFS_PRESERVE_MTIME=y -CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_LD_ORPHAN_WARN=y -CONFIG_LD_ORPHAN_WARN_LEVEL="warn" -CONFIG_SYSCTL=y -CONFIG_HAVE_UID16=y -CONFIG_SYSCTL_EXCEPTION_TRACE=y -CONFIG_HAVE_PCSPKR_PLATFORM=y -# CONFIG_EXPERT is not set -CONFIG_UID16=y -CONFIG_MULTIUSER=y -CONFIG_SGETMASK_SYSCALL=y -CONFIG_SYSFS_SYSCALL=y -CONFIG_FHANDLE=y -CONFIG_POSIX_TIMERS=y -CONFIG_PRINTK=y -CONFIG_BUG=y -CONFIG_ELF_CORE=y -CONFIG_PCSPKR_PLATFORM=y -CONFIG_BASE_FULL=y -CONFIG_FUTEX=y -CONFIG_FUTEX_PI=y -CONFIG_EPOLL=y -CONFIG_SIGNALFD=y -CONFIG_TIMERFD=y -CONFIG_EVENTFD=y -CONFIG_SHMEM=y -CONFIG_AIO=y -CONFIG_IO_URING=y -CONFIG_ADVISE_SYSCALLS=y -CONFIG_MEMBARRIER=y -CONFIG_KALLSYMS=y -# CONFIG_KALLSYMS_SELFTEST is not set -CONFIG_KALLSYMS_ALL=y -CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y -CONFIG_KALLSYMS_BASE_RELATIVE=y -CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y -CONFIG_KCMP=y -CONFIG_RSEQ=y -CONFIG_CACHESTAT_SYSCALL=y -CONFIG_HAVE_PERF_EVENTS=y -CONFIG_GUEST_PERF_EVENTS=y - -# -# Kernel Performance Events And Counters -# -CONFIG_PERF_EVENTS=y -# CONFIG_DEBUG_PERF_USE_VMALLOC is not set -# end of Kernel Performance Events And Counters - -CONFIG_SYSTEM_DATA_VERIFICATION=y -CONFIG_PROFILING=y -CONFIG_TRACEPOINTS=y - -# -# Kexec and crash features -# -CONFIG_CRASH_CORE=y -CONFIG_KEXEC_CORE=y -CONFIG_HAVE_IMA_KEXEC=y -CONFIG_KEXEC=y -CONFIG_KEXEC_FILE=y -CONFIG_KEXEC_SIG=y -# CONFIG_KEXEC_SIG_FORCE is not set -CONFIG_KEXEC_BZIMAGE_VERIFY_SIG=y -CONFIG_KEXEC_JUMP=y -CONFIG_CRASH_DUMP=y -CONFIG_CRASH_HOTPLUG=y -CONFIG_CRASH_MAX_MEMORY_RANGES=8192 -# end of Kexec and crash features -# end of General setup - -CONFIG_64BIT=y -CONFIG_X86_64=y -CONFIG_X86=y -CONFIG_INSTRUCTION_DECODER=y -CONFIG_OUTPUT_FORMAT="elf64-x86-64" -CONFIG_LOCKDEP_SUPPORT=y -CONFIG_STACKTRACE_SUPPORT=y -CONFIG_MMU=y -CONFIG_ARCH_MMAP_RND_BITS_MIN=28 -CONFIG_ARCH_MMAP_RND_BITS_MAX=32 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 -CONFIG_GENERIC_ISA_DMA=y -CONFIG_GENERIC_BUG=y -CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y -CONFIG_ARCH_MAY_HAVE_PC_FDC=y -CONFIG_GENERIC_CALIBRATE_DELAY=y -CONFIG_ARCH_HAS_CPU_RELAX=y -CONFIG_ARCH_HIBERNATION_POSSIBLE=y -CONFIG_ARCH_SUSPEND_POSSIBLE=y -CONFIG_AUDIT_ARCH=y -CONFIG_HAVE_INTEL_TXT=y -CONFIG_X86_64_SMP=y -CONFIG_ARCH_SUPPORTS_UPROBES=y -CONFIG_FIX_EARLYCON_MEM=y -CONFIG_DYNAMIC_PHYSICAL_MASK=y -CONFIG_PGTABLE_LEVELS=4 -CONFIG_CC_HAS_SANE_STACKPROTECTOR=y - -# -# Processor type and features -# -CONFIG_SMP=y -CONFIG_X86_X2APIC=y -CONFIG_X86_MPPARSE=y -# CONFIG_GOLDFISH is not set -CONFIG_X86_CPU_RESCTRL=y -CONFIG_X86_EXTENDED_PLATFORM=y -# CONFIG_X86_NUMACHIP is not set -# CONFIG_X86_VSMP is not set -CONFIG_X86_UV=y -# CONFIG_X86_GOLDFISH is not set -# CONFIG_X86_INTEL_MID is not set -CONFIG_X86_INTEL_LPSS=y -CONFIG_X86_AMD_PLATFORM_DEVICE=y -CONFIG_IOSF_MBI=y -# CONFIG_IOSF_MBI_DEBUG is not set -CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y -CONFIG_SCHED_OMIT_FRAME_POINTER=y -CONFIG_HYPERVISOR_GUEST=y -CONFIG_PARAVIRT=y -# CONFIG_PARAVIRT_DEBUG is not set -CONFIG_PARAVIRT_SPINLOCKS=y -CONFIG_X86_HV_CALLBACK_VECTOR=y -CONFIG_XEN=y -# CONFIG_XEN_PV is not set -CONFIG_XEN_PVHVM=y -CONFIG_XEN_PVHVM_SMP=y -CONFIG_XEN_PVHVM_GUEST=y -CONFIG_XEN_SAVE_RESTORE=y -# CONFIG_XEN_DEBUG_FS is not set -# CONFIG_XEN_PVH is not set -CONFIG_KVM_GUEST=y -CONFIG_ARCH_CPUIDLE_HALTPOLL=y -# CONFIG_PVH is not set -CONFIG_PARAVIRT_TIME_ACCOUNTING=y -CONFIG_PARAVIRT_CLOCK=y -# CONFIG_JAILHOUSE_GUEST is not set -# CONFIG_ACRN_GUEST is not set -CONFIG_INTEL_TDX_GUEST=y -# CONFIG_MK8 is not set -# CONFIG_MPSC is not set -# CONFIG_MCORE2 is not set -# CONFIG_MATOM is not set -CONFIG_GENERIC_CPU=y -CONFIG_X86_INTERNODE_CACHE_SHIFT=6 -CONFIG_X86_L1_CACHE_SHIFT=6 -CONFIG_X86_TSC=y -CONFIG_X86_CMPXCHG64=y -CONFIG_X86_CMOV=y -CONFIG_X86_MINIMUM_CPU_FAMILY=64 -CONFIG_X86_DEBUGCTLMSR=y -CONFIG_IA32_FEAT_CTL=y -CONFIG_X86_VMX_FEATURE_NAMES=y -CONFIG_CPU_SUP_INTEL=y -CONFIG_CPU_SUP_AMD=y -CONFIG_CPU_SUP_HYGON=y -CONFIG_CPU_SUP_CENTAUR=y -CONFIG_CPU_SUP_ZHAOXIN=y -CONFIG_HPET_TIMER=y -CONFIG_HPET_EMULATE_RTC=y -CONFIG_DMI=y -# CONFIG_GART_IOMMU is not set -CONFIG_BOOT_VESA_SUPPORT=y -# CONFIG_MAXSMP is not set -CONFIG_NR_CPUS_RANGE_BEGIN=2 -CONFIG_NR_CPUS_RANGE_END=8192 -CONFIG_NR_CPUS_DEFAULT=64 -CONFIG_NR_CPUS=1024 -CONFIG_SCHED_CLUSTER=y -CONFIG_SCHED_SMT=y -CONFIG_SCHED_MC=y -CONFIG_SCHED_MC_PRIO=y -CONFIG_X86_LOCAL_APIC=y -CONFIG_X86_IO_APIC=y -CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y -CONFIG_X86_MCE=y -CONFIG_X86_MCELOG_LEGACY=y -CONFIG_X86_MCE_INTEL=y -CONFIG_X86_MCE_AMD=y -CONFIG_X86_MCE_THRESHOLD=y -CONFIG_X86_MCE_INJECT=m - -# -# Performance monitoring -# -CONFIG_PERF_EVENTS_INTEL_UNCORE=m -CONFIG_PERF_EVENTS_INTEL_RAPL=m -CONFIG_PERF_EVENTS_INTEL_CSTATE=m -CONFIG_PERF_EVENTS_AMD_POWER=m -CONFIG_PERF_EVENTS_AMD_UNCORE=y -CONFIG_PERF_EVENTS_AMD_BRS=y -# end of Performance monitoring - -CONFIG_X86_16BIT=y -CONFIG_X86_ESPFIX64=y -CONFIG_X86_VSYSCALL_EMULATION=y -CONFIG_X86_IOPL_IOPERM=y -CONFIG_MICROCODE=y -# CONFIG_MICROCODE_LATE_LOADING is not set -CONFIG_X86_MSR=y -CONFIG_X86_CPUID=y -# CONFIG_X86_5LEVEL is not set -CONFIG_X86_DIRECT_GBPAGES=y -CONFIG_X86_CPA_STATISTICS=y -CONFIG_X86_MEM_ENCRYPT=y -CONFIG_AMD_MEM_ENCRYPT=y -CONFIG_NUMA=y -CONFIG_AMD_NUMA=y -CONFIG_X86_64_ACPI_NUMA=y -CONFIG_NUMA_EMU=y -CONFIG_NODES_SHIFT=6 -CONFIG_ARCH_SPARSEMEM_ENABLE=y -CONFIG_ARCH_SPARSEMEM_DEFAULT=y -# CONFIG_ARCH_MEMORY_PROBE is not set -CONFIG_ARCH_PROC_KCORE_TEXT=y -CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 -CONFIG_X86_PMEM_LEGACY_DEVICE=y -CONFIG_X86_PMEM_LEGACY=m -CONFIG_X86_CHECK_BIOS_CORRUPTION=y -# CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK is not set -CONFIG_MTRR=y -CONFIG_MTRR_SANITIZER=y -CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1 -CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 -CONFIG_X86_PAT=y -CONFIG_ARCH_USES_PG_UNCACHED=y -CONFIG_X86_UMIP=y -CONFIG_CC_HAS_IBT=y -CONFIG_X86_CET=y -CONFIG_X86_KERNEL_IBT=y -CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y -# CONFIG_X86_INTEL_TSX_MODE_OFF is not set -# CONFIG_X86_INTEL_TSX_MODE_ON is not set -CONFIG_X86_INTEL_TSX_MODE_AUTO=y -CONFIG_X86_SGX=y -# CONFIG_X86_USER_SHADOW_STACK is not set -CONFIG_EFI=y -CONFIG_EFI_STUB=y -CONFIG_EFI_HANDOVER_PROTOCOL=y -CONFIG_EFI_MIXED=y -# CONFIG_EFI_FAKE_MEMMAP is not set -CONFIG_EFI_RUNTIME_MAP=y -CONFIG_HYGON_CSV=y -# CONFIG_HZ_100 is not set -# CONFIG_HZ_250 is not set -# CONFIG_HZ_300 is not set -CONFIG_HZ_1000=y -CONFIG_HZ=1000 -CONFIG_SCHED_HRTICK=y -CONFIG_ARCH_SUPPORTS_KEXEC=y -CONFIG_ARCH_SUPPORTS_KEXEC_FILE=y -CONFIG_ARCH_SELECTS_KEXEC_FILE=y -CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY=y -CONFIG_ARCH_SUPPORTS_KEXEC_SIG=y -CONFIG_ARCH_SUPPORTS_KEXEC_SIG_FORCE=y -CONFIG_ARCH_SUPPORTS_KEXEC_BZIMAGE_VERIFY_SIG=y -CONFIG_ARCH_SUPPORTS_KEXEC_JUMP=y -CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y -CONFIG_ARCH_SUPPORTS_CRASH_HOTPLUG=y -CONFIG_PHYSICAL_START=0x1000000 -CONFIG_RELOCATABLE=y -CONFIG_RANDOMIZE_BASE=y -CONFIG_X86_NEED_RELOCS=y -CONFIG_PHYSICAL_ALIGN=0x1000000 -CONFIG_DYNAMIC_MEMORY_LAYOUT=y -CONFIG_RANDOMIZE_MEMORY=y -CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0xa -# CONFIG_ADDRESS_MASKING is not set -CONFIG_HOTPLUG_CPU=y -# CONFIG_COMPAT_VDSO is not set -CONFIG_LEGACY_VSYSCALL_XONLY=y -# CONFIG_LEGACY_VSYSCALL_NONE is not set -# CONFIG_CMDLINE_BOOL is not set -CONFIG_MODIFY_LDT_SYSCALL=y -# CONFIG_STRICT_SIGALTSTACK_SIZE is not set -CONFIG_HAVE_LIVEPATCH=y -CONFIG_LIVEPATCH=y -# end of Processor type and features - -CONFIG_CC_HAS_SLS=y -CONFIG_CC_HAS_RETURN_THUNK=y -CONFIG_CC_HAS_ENTRY_PADDING=y -CONFIG_FUNCTION_PADDING_CFI=11 -CONFIG_FUNCTION_PADDING_BYTES=16 -CONFIG_CALL_PADDING=y -CONFIG_HAVE_CALL_THUNKS=y -CONFIG_CALL_THUNKS=y -CONFIG_PREFIX_SYMBOLS=y -CONFIG_SPECULATION_MITIGATIONS=y -CONFIG_PAGE_TABLE_ISOLATION=y -CONFIG_RETPOLINE=y -CONFIG_RETHUNK=y -CONFIG_CPU_UNRET_ENTRY=y -CONFIG_CALL_DEPTH_TRACKING=y -# CONFIG_CALL_THUNKS_DEBUG is not set -CONFIG_CPU_IBPB_ENTRY=y -CONFIG_CPU_IBRS_ENTRY=y -CONFIG_CPU_SRSO=y -# CONFIG_SLS is not set -# CONFIG_GDS_FORCE_MITIGATION is not set -CONFIG_MITIGATION_RFDS=y -CONFIG_ARCH_HAS_ADD_PAGES=y - -# -# Power management and ACPI options -# -CONFIG_ARCH_HIBERNATION_HEADER=y -CONFIG_SUSPEND=y -CONFIG_SUSPEND_FREEZER=y -CONFIG_HIBERNATE_CALLBACKS=y -CONFIG_HIBERNATION=y -CONFIG_HIBERNATION_SNAPSHOT_DEV=y -CONFIG_PM_STD_PARTITION="" -CONFIG_PM_SLEEP=y -CONFIG_PM_SLEEP_SMP=y -# CONFIG_PM_AUTOSLEEP is not set -# CONFIG_PM_USERSPACE_AUTOSLEEP is not set -# CONFIG_PM_WAKELOCKS is not set -CONFIG_PM=y -CONFIG_PM_DEBUG=y -# CONFIG_PM_ADVANCED_DEBUG is not set -# CONFIG_PM_TEST_SUSPEND is not set -CONFIG_PM_SLEEP_DEBUG=y -# CONFIG_PM_TRACE_RTC is not set -CONFIG_PM_CLK=y -# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set -# CONFIG_ENERGY_MODEL is not set -CONFIG_ARCH_SUPPORTS_ACPI=y -CONFIG_ACPI=y -CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y -CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y -CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y -CONFIG_ACPI_TABLE_LIB=y -# CONFIG_ACPI_DEBUGGER is not set -CONFIG_ACPI_SPCR_TABLE=y -# CONFIG_ACPI_FPDT is not set -CONFIG_ACPI_LPIT=y -CONFIG_ACPI_SLEEP=y -CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y -CONFIG_ACPI_EC_DEBUGFS=m -CONFIG_ACPI_AC=y -CONFIG_ACPI_BATTERY=y -CONFIG_ACPI_BUTTON=y -CONFIG_ACPI_VIDEO=m -CONFIG_ACPI_FAN=y -CONFIG_ACPI_TAD=m -CONFIG_ACPI_DOCK=y -CONFIG_ACPI_CPU_FREQ_PSS=y -CONFIG_ACPI_PROCESSOR_CSTATE=y -CONFIG_ACPI_PROCESSOR_IDLE=y -CONFIG_ACPI_CPPC_LIB=y -CONFIG_ACPI_PROCESSOR=y -CONFIG_ACPI_IPMI=m -CONFIG_ACPI_HOTPLUG_CPU=y -CONFIG_ACPI_PROCESSOR_AGGREGATOR=m -CONFIG_ACPI_THERMAL=y -CONFIG_ACPI_PLATFORM_PROFILE=m -CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y -CONFIG_ACPI_TABLE_UPGRADE=y -# CONFIG_ACPI_DEBUG is not set -CONFIG_ACPI_PCI_SLOT=y -CONFIG_ACPI_CONTAINER=y -CONFIG_ACPI_HOTPLUG_MEMORY=y -CONFIG_ACPI_HOTPLUG_IOAPIC=y -CONFIG_ACPI_SBS=m -CONFIG_ACPI_HED=y -# CONFIG_ACPI_CUSTOM_METHOD is not set -CONFIG_ACPI_BGRT=y -CONFIG_ACPI_NFIT=m -# CONFIG_NFIT_SECURITY_DEBUG is not set -CONFIG_ACPI_NUMA=y -CONFIG_ACPI_HMAT=y -CONFIG_HAVE_ACPI_APEI=y -CONFIG_HAVE_ACPI_APEI_NMI=y -CONFIG_ACPI_APEI=y -CONFIG_ACPI_APEI_GHES=y -CONFIG_ACPI_APEI_PCIEAER=y -CONFIG_ACPI_APEI_MEMORY_FAILURE=y -CONFIG_ACPI_APEI_EINJ=m -# CONFIG_ACPI_APEI_ERST_DEBUG is not set -# CONFIG_ACPI_DPTF is not set -CONFIG_ACPI_WATCHDOG=y -CONFIG_ACPI_EXTLOG=m -CONFIG_ACPI_ADXL=y -# CONFIG_ACPI_CONFIGFS is not set -# CONFIG_ACPI_PFRUT is not set -CONFIG_ACPI_PCC=y -# CONFIG_ACPI_FFH is not set -CONFIG_PMIC_OPREGION=y -CONFIG_ACPI_PRMT=y -CONFIG_X86_PM_TIMER=y - -# -# CPU Frequency scaling -# -CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_GOV_ATTR_SET=y -CONFIG_CPU_FREQ_GOV_COMMON=y -CONFIG_CPU_FREQ_STAT=y -CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y -# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set -CONFIG_CPU_FREQ_GOV_PERFORMANCE=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=y -CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_GOV_ONDEMAND=y -CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y -CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y - -# -# CPU frequency scaling drivers -# -CONFIG_X86_INTEL_PSTATE=y -# CONFIG_X86_PCC_CPUFREQ is not set -CONFIG_X86_AMD_PSTATE=y -CONFIG_X86_AMD_PSTATE_DEFAULT_MODE=3 -# CONFIG_X86_AMD_PSTATE_UT is not set -CONFIG_X86_ACPI_CPUFREQ=m -CONFIG_X86_ACPI_CPUFREQ_CPB=y -CONFIG_X86_POWERNOW_K8=m -CONFIG_X86_AMD_FREQ_SENSITIVITY=m -# CONFIG_X86_SPEEDSTEP_CENTRINO is not set -CONFIG_X86_P4_CLOCKMOD=m - -# -# shared options -# -CONFIG_X86_SPEEDSTEP_LIB=m -# end of CPU Frequency scaling - -# -# CPU Idle -# -CONFIG_CPU_IDLE=y -# CONFIG_CPU_IDLE_GOV_LADDER is not set -CONFIG_CPU_IDLE_GOV_MENU=y -# CONFIG_CPU_IDLE_GOV_TEO is not set -CONFIG_CPU_IDLE_GOV_HALTPOLL=y -CONFIG_HALTPOLL_CPUIDLE=y -# end of CPU Idle - -CONFIG_INTEL_IDLE=y -# end of Power management and ACPI options - -# -# Bus options (PCI etc.) -# -CONFIG_PCI_DIRECT=y -CONFIG_PCI_MMCONFIG=y -CONFIG_PCI_XEN=y -CONFIG_MMCONF_FAM10H=y -CONFIG_ISA_DMA_API=y -CONFIG_AMD_NB=y -# end of Bus options (PCI etc.) - -# -# Binary Emulations -# -CONFIG_IA32_EMULATION=y -# CONFIG_X86_X32_ABI is not set -CONFIG_COMPAT_32=y -CONFIG_COMPAT=y -CONFIG_COMPAT_FOR_U64_ALIGNMENT=y -# end of Binary Emulations - -CONFIG_HAVE_KVM=y -CONFIG_HAVE_KVM_PFNCACHE=y -CONFIG_HAVE_KVM_IRQCHIP=y -CONFIG_HAVE_KVM_IRQFD=y -CONFIG_HAVE_KVM_IRQ_ROUTING=y -CONFIG_HAVE_KVM_DIRTY_RING=y -CONFIG_HAVE_KVM_DIRTY_RING_TSO=y -CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL=y -CONFIG_HAVE_KVM_EVENTFD=y -CONFIG_KVM_MMIO=y -CONFIG_KVM_ASYNC_PF=y -CONFIG_HAVE_KVM_MSI=y -CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y -CONFIG_KVM_VFIO=y -CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y -CONFIG_KVM_COMPAT=y -CONFIG_HAVE_KVM_IRQ_BYPASS=y -CONFIG_HAVE_KVM_NO_POLL=y -CONFIG_KVM_XFER_TO_GUEST_WORK=y -CONFIG_HAVE_KVM_PM_NOTIFIER=y -CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y -CONFIG_VIRTUALIZATION=y -CONFIG_KVM=m -CONFIG_KVM_INTEL=m -CONFIG_X86_SGX_KVM=y -CONFIG_KVM_AMD=m -CONFIG_KVM_AMD_SEV=y -CONFIG_KVM_SMM=y -# CONFIG_KVM_XEN is not set -CONFIG_KVM_EXTERNAL_WRITE_TRACKING=y -CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID=y -CONFIG_AS_AVX512=y -CONFIG_AS_SHA1_NI=y -CONFIG_AS_SHA256_NI=y -CONFIG_AS_TPAUSE=y -CONFIG_AS_GFNI=y -CONFIG_AS_WRUSS=y - -# -# General architecture-dependent options -# -CONFIG_HOTPLUG_SMT=y -CONFIG_HOTPLUG_CORE_SYNC=y -CONFIG_HOTPLUG_CORE_SYNC_DEAD=y -CONFIG_HOTPLUG_CORE_SYNC_FULL=y -CONFIG_HOTPLUG_SPLIT_STARTUP=y -CONFIG_HOTPLUG_PARALLEL=y -CONFIG_GENERIC_ENTRY=y -CONFIG_KPROBES=y -CONFIG_JUMP_LABEL=y -# CONFIG_STATIC_KEYS_SELFTEST is not set -# CONFIG_STATIC_CALL_SELFTEST is not set -CONFIG_OPTPROBES=y -CONFIG_KPROBES_ON_FTRACE=y -CONFIG_UPROBES=y -CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y -CONFIG_ARCH_USE_BUILTIN_BSWAP=y -CONFIG_KRETPROBES=y -CONFIG_KRETPROBE_ON_RETHOOK=y -CONFIG_USER_RETURN_NOTIFIER=y -CONFIG_HAVE_IOREMAP_PROT=y -CONFIG_HAVE_KPROBES=y -CONFIG_HAVE_KRETPROBES=y -CONFIG_HAVE_OPTPROBES=y -CONFIG_HAVE_KPROBES_ON_FTRACE=y -CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE=y -CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y -CONFIG_HAVE_NMI=y -CONFIG_TRACE_IRQFLAGS_SUPPORT=y -CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y -CONFIG_HAVE_ARCH_TRACEHOOK=y -CONFIG_HAVE_DMA_CONTIGUOUS=y -CONFIG_GENERIC_SMP_IDLE_THREAD=y -CONFIG_ARCH_HAS_FORTIFY_SOURCE=y -CONFIG_ARCH_HAS_SET_MEMORY=y -CONFIG_ARCH_HAS_SET_DIRECT_MAP=y -CONFIG_ARCH_HAS_CPU_FINALIZE_INIT=y -CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y -CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y -CONFIG_ARCH_WANTS_NO_INSTR=y -CONFIG_HAVE_ASM_MODVERSIONS=y -CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y -CONFIG_HAVE_RSEQ=y -CONFIG_HAVE_RUST=y -CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y -CONFIG_HAVE_HW_BREAKPOINT=y -CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y -CONFIG_HAVE_USER_RETURN_NOTIFIER=y -CONFIG_HAVE_PERF_EVENTS_NMI=y -CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y -CONFIG_HAVE_PERF_REGS=y -CONFIG_HAVE_PERF_USER_STACK_DUMP=y -CONFIG_HAVE_ARCH_JUMP_LABEL=y -CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y -CONFIG_MMU_GATHER_TABLE_FREE=y -CONFIG_MMU_GATHER_RCU_TABLE_FREE=y -CONFIG_MMU_GATHER_MERGE_VMAS=y -CONFIG_MMU_LAZY_TLB_REFCOUNT=y -CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y -CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS=y -CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y -CONFIG_HAVE_CMPXCHG_LOCAL=y -CONFIG_HAVE_CMPXCHG_DOUBLE=y -CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y -CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y -CONFIG_HAVE_ARCH_SECCOMP=y -CONFIG_HAVE_ARCH_SECCOMP_FILTER=y -CONFIG_SECCOMP=y -CONFIG_SECCOMP_FILTER=y -# CONFIG_SECCOMP_CACHE_DEBUG is not set -CONFIG_HAVE_ARCH_STACKLEAK=y -CONFIG_HAVE_STACKPROTECTOR=y -CONFIG_STACKPROTECTOR=y -CONFIG_STACKPROTECTOR_STRONG=y -CONFIG_ARCH_SUPPORTS_LTO_CLANG=y -CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y -CONFIG_LTO_NONE=y -CONFIG_ARCH_SUPPORTS_CFI_CLANG=y -# CONFIG_CFI_CLANG is not set -CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y -CONFIG_HAVE_CONTEXT_TRACKING_USER=y -CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK=y -CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y -CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y -CONFIG_HAVE_MOVE_PUD=y -CONFIG_HAVE_MOVE_PMD=y -CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y -CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y -CONFIG_HAVE_ARCH_HUGE_VMAP=y -CONFIG_HAVE_ARCH_HUGE_VMALLOC=y -CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y -CONFIG_ARCH_WANT_PMD_MKWRITE=y -CONFIG_HAVE_ARCH_SOFT_DIRTY=y -CONFIG_HAVE_MOD_ARCH_SPECIFIC=y -CONFIG_MODULES_USE_ELF_RELA=y -CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y -CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK=y -CONFIG_SOFTIRQ_ON_OWN_STACK=y -CONFIG_ARCH_HAS_ELF_RANDOMIZE=y -CONFIG_HAVE_ARCH_MMAP_RND_BITS=y -CONFIG_HAVE_EXIT_THREAD=y -CONFIG_ARCH_MMAP_RND_BITS=28 -CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y -CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 -CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y -CONFIG_PAGE_SIZE_LESS_THAN_64KB=y -CONFIG_PAGE_SIZE_LESS_THAN_256KB=y -CONFIG_HAVE_OBJTOOL=y -CONFIG_HAVE_JUMP_LABEL_HACK=y -CONFIG_HAVE_NOINSTR_HACK=y -CONFIG_HAVE_NOINSTR_VALIDATION=y -CONFIG_HAVE_UACCESS_VALIDATION=y -CONFIG_HAVE_STACK_VALIDATION=y -CONFIG_HAVE_RELIABLE_STACKTRACE=y -CONFIG_OLD_SIGSUSPEND3=y -CONFIG_COMPAT_OLD_SIGACTION=y -CONFIG_COMPAT_32BIT_TIME=y -CONFIG_HAVE_ARCH_VMAP_STACK=y -CONFIG_VMAP_STACK=y -CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET=y -CONFIG_RANDOMIZE_KSTACK_OFFSET=y -# CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT is not set -CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y -CONFIG_STRICT_KERNEL_RWX=y -CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y -CONFIG_STRICT_MODULE_RWX=y -CONFIG_ARCH_HAS_CPU_RESCTRL=y -CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y -CONFIG_ARCH_USE_MEMREMAP_PROT=y -# CONFIG_LOCK_EVENT_COUNTS is not set -CONFIG_ARCH_HAS_MEM_ENCRYPT=y -CONFIG_ARCH_HAS_CC_PLATFORM=y -CONFIG_HAVE_STATIC_CALL=y -CONFIG_HAVE_STATIC_CALL_INLINE=y -CONFIG_HAVE_PREEMPT_DYNAMIC=y -CONFIG_HAVE_PREEMPT_DYNAMIC_CALL=y -CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y -CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y -CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK=y -CONFIG_ARCH_HAS_ELFCORE_COMPAT=y -CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH=y -CONFIG_DYNAMIC_SIGFRAME=y -CONFIG_HAVE_ARCH_NODE_DEV_GROUP=y -CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG=y - -# -# GCOV-based kernel profiling -# -# CONFIG_GCOV_KERNEL is not set -CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y -# end of GCOV-based kernel profiling - -CONFIG_HAVE_GCC_PLUGINS=y -CONFIG_GCC_PLUGINS=y -# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set -CONFIG_FUNCTION_ALIGNMENT_4B=y -CONFIG_FUNCTION_ALIGNMENT_16B=y -CONFIG_FUNCTION_ALIGNMENT=16 -# end of General architecture-dependent options - -CONFIG_RT_MUTEXES=y -CONFIG_BASE_SMALL=0 -CONFIG_MODULE_SIG_FORMAT=y -CONFIG_MODULES=y -# CONFIG_MODULE_DEBUG is not set -CONFIG_MODULE_FORCE_LOAD=y -CONFIG_MODULE_UNLOAD=y -# CONFIG_MODULE_FORCE_UNLOAD is not set -# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set -CONFIG_MODVERSIONS=y -CONFIG_ASM_MODVERSIONS=y -CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_MODULE_SIG=y -# CONFIG_MODULE_SIG_FORCE is not set -# CONFIG_MODULE_SIG_ALL is not set -# CONFIG_MODULE_SIG_SHA1 is not set -# CONFIG_MODULE_SIG_SHA224 is not set -CONFIG_MODULE_SIG_SHA256=y -# CONFIG_MODULE_SIG_SHA384 is not set -# CONFIG_MODULE_SIG_SHA512 is not set -CONFIG_MODULE_SIG_HASH="sha256" -CONFIG_MODULE_COMPRESS_NONE=y -# CONFIG_MODULE_COMPRESS_GZIP is not set -# CONFIG_MODULE_COMPRESS_XZ is not set -# CONFIG_MODULE_COMPRESS_ZSTD is not set -# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set -CONFIG_MODPROBE_PATH="/sbin/modprobe" -CONFIG_MODULES_TREE_LOOKUP=y -CONFIG_BLOCK=y -CONFIG_BLOCK_LEGACY_AUTOLOAD=y -CONFIG_BLK_RQ_ALLOC_TIME=y -CONFIG_BLK_CGROUP_RWSTAT=y -CONFIG_BLK_CGROUP_PUNT_BIO=y -CONFIG_BLK_DEV_BSG_COMMON=y -CONFIG_BLK_ICQ=y -CONFIG_BLK_DEV_BSGLIB=y -CONFIG_BLK_DEV_INTEGRITY=y -CONFIG_BLK_DEV_INTEGRITY_T10=m -CONFIG_BLK_DEV_ZONED=y -CONFIG_BLK_DEV_THROTTLING=y -# CONFIG_BLK_DEV_THROTTLING_LOW is not set -# CONFIG_BLK_WBT is not set -CONFIG_BLK_CGROUP_IOLATENCY=y -# CONFIG_BLK_CGROUP_FC_APPID is not set -CONFIG_BLK_CGROUP_IOCOST=y -# CONFIG_BLK_CGROUP_IOPRIO is not set -CONFIG_BLK_DEBUG_FS=y -CONFIG_BLK_DEBUG_FS_ZONED=y -# CONFIG_BLK_SED_OPAL is not set -# CONFIG_BLK_INLINE_ENCRYPTION is not set - -# -# Partition Types -# -CONFIG_PARTITION_ADVANCED=y -# CONFIG_ACORN_PARTITION is not set -# CONFIG_AIX_PARTITION is not set -CONFIG_OSF_PARTITION=y -CONFIG_AMIGA_PARTITION=y -# CONFIG_ATARI_PARTITION is not set -CONFIG_MAC_PARTITION=y -CONFIG_MSDOS_PARTITION=y -CONFIG_BSD_DISKLABEL=y -CONFIG_MINIX_SUBPARTITION=y -CONFIG_SOLARIS_X86_PARTITION=y -CONFIG_UNIXWARE_DISKLABEL=y -# CONFIG_LDM_PARTITION is not set -CONFIG_SGI_PARTITION=y -# CONFIG_ULTRIX_PARTITION is not set -CONFIG_SUN_PARTITION=y -CONFIG_KARMA_PARTITION=y -CONFIG_EFI_PARTITION=y -# CONFIG_SYSV68_PARTITION is not set -# CONFIG_CMDLINE_PARTITION is not set -# end of Partition Types - -CONFIG_BLK_MQ_PCI=y -CONFIG_BLK_MQ_VIRTIO=y -CONFIG_BLK_PM=y -CONFIG_BLOCK_HOLDER_DEPRECATED=y -CONFIG_BLK_MQ_STACKING=y - -# -# IO Schedulers -# -CONFIG_MQ_IOSCHED_DEADLINE=y -CONFIG_MQ_IOSCHED_KYBER=y -CONFIG_IOSCHED_BFQ=y -CONFIG_BFQ_GROUP_IOSCHED=y -# CONFIG_BFQ_CGROUP_DEBUG is not set -# end of IO Schedulers - -CONFIG_PREEMPT_NOTIFIERS=y -CONFIG_PADATA=y -CONFIG_ASN1=y -CONFIG_UNINLINE_SPIN_UNLOCK=y -CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y -CONFIG_MUTEX_SPIN_ON_OWNER=y -CONFIG_RWSEM_SPIN_ON_OWNER=y -CONFIG_LOCK_SPIN_ON_OWNER=y -CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y -CONFIG_QUEUED_SPINLOCKS=y -CONFIG_ARCH_USE_QUEUED_RWLOCKS=y -CONFIG_QUEUED_RWLOCKS=y -CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y -CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y -CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y -CONFIG_CK_KABI_RESERVE=y -CONFIG_CK_KABI_SIZE_ALIGN_CHECKS=y -CONFIG_FREEZER=y - -# -# Executable file formats -# -CONFIG_BINFMT_ELF=y -CONFIG_COMPAT_BINFMT_ELF=y -CONFIG_ELFCORE=y -CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y -CONFIG_BINFMT_SCRIPT=y -CONFIG_BINFMT_MISC=m -CONFIG_COREDUMP=y -# end of Executable file formats - -# -# Memory Management options -# -CONFIG_ZPOOL=y -CONFIG_SWAP=y -CONFIG_ZSWAP=y -# CONFIG_ZSWAP_DEFAULT_ON is not set -# CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON is not set -# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set -CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y -# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set -# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 is not set -# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set -# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set -CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lzo" -CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y -# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set -# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set -CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud" -CONFIG_ZBUD=y -# CONFIG_Z3FOLD is not set -CONFIG_ZSMALLOC=y -CONFIG_ZSMALLOC_STAT=y -CONFIG_ZSMALLOC_CHAIN_SIZE=8 - -# -# SLAB allocator options -# -# CONFIG_SLAB_DEPRECATED is not set -CONFIG_SLUB=y -# CONFIG_SLAB_MERGE_DEFAULT is not set -CONFIG_SLAB_FREELIST_RANDOM=y -# CONFIG_SLAB_FREELIST_HARDENED is not set -# CONFIG_SLUB_STATS is not set -CONFIG_SLUB_CPU_PARTIAL=y -# CONFIG_RANDOM_KMALLOC_CACHES is not set -# end of SLAB allocator options - -CONFIG_SHUFFLE_PAGE_ALLOCATOR=y -# CONFIG_COMPAT_BRK is not set -CONFIG_SPARSEMEM=y -CONFIG_SPARSEMEM_EXTREME=y -CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y -CONFIG_SPARSEMEM_VMEMMAP=y -CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP=y -CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP=y -CONFIG_HAVE_FAST_GUP=y -CONFIG_NUMA_KEEP_MEMINFO=y -CONFIG_MEMORY_ISOLATION=y -CONFIG_EXCLUSIVE_SYSTEM_RAM=y -CONFIG_HAVE_BOOTMEM_INFO_NODE=y -CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y -CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y -CONFIG_MEMORY_HOTPLUG=y -CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y -CONFIG_MEMORY_HOTREMOVE=y -CONFIG_MHP_MEMMAP_ON_MEMORY=y -CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE=y -CONFIG_SPLIT_PTLOCK_CPUS=4 -CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y -CONFIG_MEMORY_BALLOON=y -CONFIG_BALLOON_COMPACTION=y -CONFIG_COMPACTION=y -CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 -CONFIG_PAGE_REPORTING=y -CONFIG_MIGRATION=y -CONFIG_DEVICE_MIGRATION=y -CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y -CONFIG_ARCH_ENABLE_THP_MIGRATION=y -CONFIG_CONTIG_ALLOC=y -CONFIG_PHYS_ADDR_T_64BIT=y -CONFIG_MMU_NOTIFIER=y -CONFIG_KSM=y -CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 -CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y -CONFIG_MEMORY_FAILURE=y -CONFIG_HWPOISON_INJECT=m -CONFIG_ARCH_WANT_GENERAL_HUGETLB=y -CONFIG_ARCH_WANTS_THP_SWAP=y -CONFIG_TRANSPARENT_HUGEPAGE=y -CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y -# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set -CONFIG_THP_SWAP=y -CONFIG_READ_ONLY_THP_FOR_FS=y -CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y -CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y -CONFIG_USE_PERCPU_NUMA_NODE_ID=y -CONFIG_HAVE_SETUP_PER_CPU_AREA=y -CONFIG_CMA=y -# CONFIG_CMA_DEBUG is not set -# CONFIG_CMA_DEBUGFS is not set -# CONFIG_CMA_SYSFS is not set -CONFIG_CMA_AREAS=19 -CONFIG_MEM_SOFT_DIRTY=y -CONFIG_GENERIC_EARLY_IOREMAP=y -CONFIG_DEFERRED_STRUCT_PAGE_INIT=y -CONFIG_PAGE_IDLE_FLAG=y -CONFIG_IDLE_PAGE_TRACKING=y -CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y -CONFIG_ARCH_HAS_CURRENT_STACK_POINTER=y -CONFIG_ARCH_HAS_PTE_DEVMAP=y -CONFIG_ZONE_DMA=y -CONFIG_ZONE_DMA32=y -CONFIG_ZONE_DEVICE=y -CONFIG_HMM_MIRROR=y -CONFIG_GET_FREE_REGION=y -CONFIG_DEVICE_PRIVATE=y -CONFIG_VMAP_PFN=y -CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y -CONFIG_ARCH_HAS_PKEYS=y -CONFIG_VM_EVENT_COUNTERS=y -# CONFIG_PERCPU_STATS is not set -# CONFIG_GUP_TEST is not set -# CONFIG_DMAPOOL_TEST is not set -CONFIG_ARCH_HAS_PTE_SPECIAL=y -CONFIG_MAPPING_DIRTY_HELPERS=y -CONFIG_MEMFD_CREATE=y -CONFIG_SECRETMEM=y -# CONFIG_ANON_VMA_NAME is not set -CONFIG_USERFAULTFD=y -CONFIG_HAVE_ARCH_USERFAULTFD_WP=y -CONFIG_HAVE_ARCH_USERFAULTFD_MINOR=y -CONFIG_PTE_MARKER_UFFD_WP=y -CONFIG_LRU_GEN=y -# CONFIG_LRU_GEN_ENABLED is not set -# CONFIG_LRU_GEN_STATS is not set -CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK=y -CONFIG_PER_VMA_LOCK=y -CONFIG_LOCK_MM_AND_FIND_VMA=y - -# -# Data Access Monitoring -# -CONFIG_DAMON=y -CONFIG_DAMON_VADDR=y -CONFIG_DAMON_PADDR=y -# CONFIG_DAMON_SYSFS is not set -CONFIG_DAMON_DBGFS=y -# CONFIG_DAMON_RECLAIM is not set -# CONFIG_DAMON_LRU_SORT is not set -# end of Data Access Monitoring -# end of Memory Management options - -CONFIG_NET=y -CONFIG_NET_INGRESS=y -CONFIG_NET_EGRESS=y -CONFIG_NET_XGRESS=y -CONFIG_NET_REDIRECT=y -CONFIG_SKB_EXTENSIONS=y - -# -# Networking options -# -CONFIG_PACKET=y -CONFIG_PACKET_DIAG=m -CONFIG_UNIX=y -CONFIG_UNIX_SCM=y -CONFIG_AF_UNIX_OOB=y -CONFIG_UNIX_DIAG=m -CONFIG_TLS=m -CONFIG_TLS_DEVICE=y -# CONFIG_TLS_TOE is not set -CONFIG_XFRM=y -CONFIG_XFRM_OFFLOAD=y -CONFIG_XFRM_ALGO=y -CONFIG_XFRM_USER=y -# CONFIG_XFRM_USER_COMPAT is not set -CONFIG_XFRM_INTERFACE=m -CONFIG_XFRM_SUB_POLICY=y -CONFIG_XFRM_MIGRATE=y -CONFIG_XFRM_STATISTICS=y -CONFIG_XFRM_AH=m -CONFIG_XFRM_ESP=m -CONFIG_XFRM_IPCOMP=m -CONFIG_NET_KEY=m -CONFIG_NET_KEY_MIGRATE=y -CONFIG_SMC=m -CONFIG_SMC_DIAG=m -CONFIG_XDP_SOCKETS=y -CONFIG_XDP_SOCKETS_DIAG=m -CONFIG_NET_HANDSHAKE=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_FIB_TRIE_STATS=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_MULTIPATH=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_ROUTE_CLASSID=y -# CONFIG_IP_PNP is not set -CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE_DEMUX=m -CONFIG_NET_IP_TUNNEL=m -CONFIG_NET_IPGRE=m -CONFIG_NET_IPGRE_BROADCAST=y -CONFIG_IP_MROUTE_COMMON=y -CONFIG_IP_MROUTE=y -CONFIG_IP_MROUTE_MULTIPLE_TABLES=y -CONFIG_IP_PIMSM_V1=y -CONFIG_IP_PIMSM_V2=y -CONFIG_SYN_COOKIES=y -CONFIG_NET_IPVTI=m -CONFIG_NET_UDP_TUNNEL=m -# CONFIG_NET_FOU is not set -# CONFIG_NET_FOU_IP_TUNNELS is not set -CONFIG_INET_AH=m -CONFIG_INET_ESP=m -CONFIG_INET_ESP_OFFLOAD=m -# CONFIG_INET_ESPINTCP is not set -CONFIG_INET_IPCOMP=m -CONFIG_INET_TABLE_PERTURB_ORDER=16 -CONFIG_INET_XFRM_TUNNEL=m -CONFIG_INET_TUNNEL=m -CONFIG_INET_DIAG=m -CONFIG_INET_TCP_DIAG=m -CONFIG_INET_UDP_DIAG=m -CONFIG_INET_RAW_DIAG=m -# CONFIG_INET_DIAG_DESTROY is not set -CONFIG_TCP_CONG_ADVANCED=y -CONFIG_TCP_CONG_BIC=m -CONFIG_TCP_CONG_CUBIC=y -CONFIG_TCP_CONG_WESTWOOD=m -CONFIG_TCP_CONG_HTCP=m -CONFIG_TCP_CONG_HSTCP=m -CONFIG_TCP_CONG_HYBLA=m -CONFIG_TCP_CONG_VEGAS=m -CONFIG_TCP_CONG_NV=m -CONFIG_TCP_CONG_SCALABLE=m -CONFIG_TCP_CONG_LP=m -CONFIG_TCP_CONG_VENO=m -CONFIG_TCP_CONG_YEAH=m -CONFIG_TCP_CONG_ILLINOIS=m -CONFIG_TCP_CONG_DCTCP=m -CONFIG_TCP_CONG_CDG=m -CONFIG_TCP_CONG_BBR=m -CONFIG_DEFAULT_CUBIC=y -# CONFIG_DEFAULT_RENO is not set -CONFIG_DEFAULT_TCP_CONG="cubic" -CONFIG_TCP_MD5SIG=y -CONFIG_IPV6=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y -CONFIG_IPV6_OPTIMISTIC_DAD=y -CONFIG_INET6_AH=m -CONFIG_INET6_ESP=m -CONFIG_INET6_ESP_OFFLOAD=m -# CONFIG_INET6_ESPINTCP is not set -CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_MIP6=m -# CONFIG_IPV6_ILA is not set -CONFIG_INET6_XFRM_TUNNEL=m -CONFIG_INET6_TUNNEL=m -CONFIG_IPV6_VTI=m -CONFIG_IPV6_SIT=m -CONFIG_IPV6_SIT_6RD=y -CONFIG_IPV6_NDISC_NODETYPE=y -CONFIG_IPV6_TUNNEL=m -CONFIG_IPV6_GRE=m -CONFIG_IPV6_MULTIPLE_TABLES=y -CONFIG_IPV6_SUBTREES=y -CONFIG_IPV6_MROUTE=y -CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y -CONFIG_IPV6_PIMSM_V2=y -# CONFIG_IPV6_SEG6_LWTUNNEL is not set -# CONFIG_IPV6_SEG6_HMAC is not set -# CONFIG_IPV6_RPL_LWTUNNEL is not set -# CONFIG_IPV6_IOAM6_LWTUNNEL is not set -CONFIG_NETLABEL=y -CONFIG_MPTCP=y -CONFIG_INET_MPTCP_DIAG=m -CONFIG_MPTCP_IPV6=y -CONFIG_NETWORK_SECMARK=y -CONFIG_NET_PTP_CLASSIFY=y -CONFIG_NETWORK_PHY_TIMESTAMPING=y -CONFIG_NETFILTER=y -CONFIG_NETFILTER_ADVANCED=y -CONFIG_BRIDGE_NETFILTER=m - -# -# Core Netfilter Configuration -# -CONFIG_NETFILTER_INGRESS=y -CONFIG_NETFILTER_EGRESS=y -CONFIG_NETFILTER_SKIP_EGRESS=y -CONFIG_NETFILTER_NETLINK=m -CONFIG_NETFILTER_FAMILY_BRIDGE=y -CONFIG_NETFILTER_FAMILY_ARP=y -CONFIG_NETFILTER_BPF_LINK=y -# CONFIG_NETFILTER_NETLINK_HOOK is not set -CONFIG_NETFILTER_NETLINK_ACCT=m -CONFIG_NETFILTER_NETLINK_QUEUE=m -CONFIG_NETFILTER_NETLINK_LOG=m -CONFIG_NETFILTER_NETLINK_OSF=m -CONFIG_NF_CONNTRACK=m -CONFIG_NF_LOG_SYSLOG=m -CONFIG_NETFILTER_CONNCOUNT=m -CONFIG_NF_CONNTRACK_MARK=y -CONFIG_NF_CONNTRACK_SECMARK=y -CONFIG_NF_CONNTRACK_ZONES=y -CONFIG_NF_CONNTRACK_PROCFS=y -CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CONNTRACK_TIMEOUT=y -CONFIG_NF_CONNTRACK_TIMESTAMP=y -CONFIG_NF_CONNTRACK_LABELS=y -CONFIG_NF_CONNTRACK_OVS=y -CONFIG_NF_CT_PROTO_DCCP=y -CONFIG_NF_CT_PROTO_GRE=y -CONFIG_NF_CT_PROTO_SCTP=y -CONFIG_NF_CT_PROTO_UDPLITE=y -CONFIG_NF_CONNTRACK_AMANDA=m -CONFIG_NF_CONNTRACK_FTP=m -CONFIG_NF_CONNTRACK_H323=m -CONFIG_NF_CONNTRACK_IRC=m -CONFIG_NF_CONNTRACK_BROADCAST=m -CONFIG_NF_CONNTRACK_NETBIOS_NS=m -CONFIG_NF_CONNTRACK_SNMP=m -CONFIG_NF_CONNTRACK_PPTP=m -CONFIG_NF_CONNTRACK_SANE=m -CONFIG_NF_CONNTRACK_SIP=m -CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NF_CT_NETLINK=m -CONFIG_NF_CT_NETLINK_TIMEOUT=m -CONFIG_NF_CT_NETLINK_HELPER=m -CONFIG_NETFILTER_NETLINK_GLUE_CT=y -CONFIG_NF_NAT=m -CONFIG_NF_NAT_AMANDA=m -CONFIG_NF_NAT_FTP=m -CONFIG_NF_NAT_IRC=m -CONFIG_NF_NAT_SIP=m -CONFIG_NF_NAT_TFTP=m -CONFIG_NF_NAT_REDIRECT=y -CONFIG_NF_NAT_MASQUERADE=y -CONFIG_NF_NAT_OVS=y -CONFIG_NETFILTER_SYNPROXY=m -CONFIG_NF_TABLES=m -CONFIG_NF_TABLES_INET=y -CONFIG_NF_TABLES_NETDEV=y -CONFIG_NFT_NUMGEN=m -CONFIG_NFT_CT=m -CONFIG_NFT_FLOW_OFFLOAD=m -CONFIG_NFT_CONNLIMIT=m -CONFIG_NFT_LOG=m -CONFIG_NFT_LIMIT=m -CONFIG_NFT_MASQ=m -CONFIG_NFT_REDIR=m -CONFIG_NFT_NAT=m -CONFIG_NFT_TUNNEL=m -CONFIG_NFT_QUEUE=m -CONFIG_NFT_QUOTA=m -CONFIG_NFT_REJECT=m -CONFIG_NFT_REJECT_INET=m -CONFIG_NFT_COMPAT=m -CONFIG_NFT_HASH=m -CONFIG_NFT_FIB=m -CONFIG_NFT_FIB_INET=m -CONFIG_NFT_XFRM=m -CONFIG_NFT_SOCKET=m -CONFIG_NFT_OSF=m -CONFIG_NFT_TPROXY=m -# CONFIG_NFT_SYNPROXY is not set -CONFIG_NF_DUP_NETDEV=m -CONFIG_NFT_DUP_NETDEV=m -CONFIG_NFT_FWD_NETDEV=m -CONFIG_NFT_FIB_NETDEV=m -# CONFIG_NFT_REJECT_NETDEV is not set -CONFIG_NF_FLOW_TABLE_INET=m -CONFIG_NF_FLOW_TABLE=m -# CONFIG_NF_FLOW_TABLE_PROCFS is not set -CONFIG_NETFILTER_XTABLES=y -# CONFIG_NETFILTER_XTABLES_COMPAT is not set - -# -# Xtables combined modules -# -CONFIG_NETFILTER_XT_MARK=m -CONFIG_NETFILTER_XT_CONNMARK=m -CONFIG_NETFILTER_XT_SET=m - -# -# Xtables targets -# -CONFIG_NETFILTER_XT_TARGET_AUDIT=m -CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m -CONFIG_NETFILTER_XT_TARGET_CONNMARK=m -CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m -CONFIG_NETFILTER_XT_TARGET_CT=m -CONFIG_NETFILTER_XT_TARGET_DSCP=m -CONFIG_NETFILTER_XT_TARGET_HL=m -CONFIG_NETFILTER_XT_TARGET_HMARK=m -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m -CONFIG_NETFILTER_XT_TARGET_LED=m -CONFIG_NETFILTER_XT_TARGET_LOG=m -CONFIG_NETFILTER_XT_TARGET_MARK=m -CONFIG_NETFILTER_XT_NAT=m -CONFIG_NETFILTER_XT_TARGET_NETMAP=m -CONFIG_NETFILTER_XT_TARGET_NFLOG=m -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_NOTRACK=m -CONFIG_NETFILTER_XT_TARGET_RATEEST=m -CONFIG_NETFILTER_XT_TARGET_REDIRECT=m -CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m -CONFIG_NETFILTER_XT_TARGET_TEE=m -CONFIG_NETFILTER_XT_TARGET_TPROXY=m -CONFIG_NETFILTER_XT_TARGET_TRACE=m -CONFIG_NETFILTER_XT_TARGET_SECMARK=m -CONFIG_NETFILTER_XT_TARGET_TCPMSS=m -CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m - -# -# Xtables matches -# -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m -CONFIG_NETFILTER_XT_MATCH_BPF=m -CONFIG_NETFILTER_XT_MATCH_CGROUP=m -CONFIG_NETFILTER_XT_MATCH_CLUSTER=m -CONFIG_NETFILTER_XT_MATCH_COMMENT=m -CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m -CONFIG_NETFILTER_XT_MATCH_CONNMARK=m -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_CPU=m -CONFIG_NETFILTER_XT_MATCH_DCCP=m -CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m -CONFIG_NETFILTER_XT_MATCH_DSCP=m -CONFIG_NETFILTER_XT_MATCH_ECN=m -CONFIG_NETFILTER_XT_MATCH_ESP=m -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m -CONFIG_NETFILTER_XT_MATCH_HELPER=m -CONFIG_NETFILTER_XT_MATCH_HL=m -# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set -CONFIG_NETFILTER_XT_MATCH_IPRANGE=m -CONFIG_NETFILTER_XT_MATCH_IPVS=m -CONFIG_NETFILTER_XT_MATCH_L2TP=m -CONFIG_NETFILTER_XT_MATCH_LENGTH=m -CONFIG_NETFILTER_XT_MATCH_LIMIT=m -CONFIG_NETFILTER_XT_MATCH_MAC=m -CONFIG_NETFILTER_XT_MATCH_MARK=m -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_NFACCT=m -CONFIG_NETFILTER_XT_MATCH_OSF=m -CONFIG_NETFILTER_XT_MATCH_OWNER=m -CONFIG_NETFILTER_XT_MATCH_POLICY=m -CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m -CONFIG_NETFILTER_XT_MATCH_QUOTA=m -CONFIG_NETFILTER_XT_MATCH_RATEEST=m -CONFIG_NETFILTER_XT_MATCH_REALM=m -CONFIG_NETFILTER_XT_MATCH_RECENT=m -CONFIG_NETFILTER_XT_MATCH_SCTP=m -CONFIG_NETFILTER_XT_MATCH_SOCKET=m -CONFIG_NETFILTER_XT_MATCH_STATE=m -CONFIG_NETFILTER_XT_MATCH_STATISTIC=m -CONFIG_NETFILTER_XT_MATCH_STRING=m -CONFIG_NETFILTER_XT_MATCH_TCPMSS=m -CONFIG_NETFILTER_XT_MATCH_TIME=m -CONFIG_NETFILTER_XT_MATCH_U32=m -# end of Core Netfilter Configuration - -CONFIG_IP_SET=m -CONFIG_IP_SET_MAX=256 -CONFIG_IP_SET_BITMAP_IP=m -CONFIG_IP_SET_BITMAP_IPMAC=m -CONFIG_IP_SET_BITMAP_PORT=m -CONFIG_IP_SET_HASH_IP=m -CONFIG_IP_SET_HASH_IPMARK=m -CONFIG_IP_SET_HASH_IPPORT=m -CONFIG_IP_SET_HASH_IPPORTIP=m -CONFIG_IP_SET_HASH_IPPORTNET=m -CONFIG_IP_SET_HASH_IPMAC=m -CONFIG_IP_SET_HASH_MAC=m -CONFIG_IP_SET_HASH_NETPORTNET=m -CONFIG_IP_SET_HASH_NET=m -CONFIG_IP_SET_HASH_NETNET=m -CONFIG_IP_SET_HASH_NETPORT=m -CONFIG_IP_SET_HASH_NETIFACE=m -CONFIG_IP_SET_LIST_SET=m -CONFIG_IP_VS=m -CONFIG_IP_VS_IPV6=y -# CONFIG_IP_VS_DEBUG is not set -CONFIG_IP_VS_TAB_BITS=12 - -# -# IPVS transport protocol load balancing support -# -CONFIG_IP_VS_PROTO_TCP=y -CONFIG_IP_VS_PROTO_UDP=y -CONFIG_IP_VS_PROTO_AH_ESP=y -CONFIG_IP_VS_PROTO_ESP=y -CONFIG_IP_VS_PROTO_AH=y -CONFIG_IP_VS_PROTO_SCTP=y - -# -# IPVS scheduler -# -CONFIG_IP_VS_RR=m -CONFIG_IP_VS_WRR=m -CONFIG_IP_VS_LC=m -CONFIG_IP_VS_WLC=m -CONFIG_IP_VS_FO=m -CONFIG_IP_VS_OVF=m -CONFIG_IP_VS_LBLC=m -CONFIG_IP_VS_LBLCR=m -CONFIG_IP_VS_DH=m -CONFIG_IP_VS_SH=m -CONFIG_IP_VS_MH=m -CONFIG_IP_VS_SED=m -CONFIG_IP_VS_NQ=m -# CONFIG_IP_VS_TWOS is not set - -# -# IPVS SH scheduler -# -CONFIG_IP_VS_SH_TAB_BITS=8 - -# -# IPVS MH scheduler -# -CONFIG_IP_VS_MH_TAB_INDEX=12 - -# -# IPVS application helper -# -CONFIG_IP_VS_FTP=m -CONFIG_IP_VS_NFCT=y -CONFIG_IP_VS_PE_SIP=m - -# -# IP: Netfilter Configuration -# -CONFIG_NF_DEFRAG_IPV4=m -CONFIG_NF_SOCKET_IPV4=m -CONFIG_NF_TPROXY_IPV4=m -CONFIG_NF_TABLES_IPV4=y -CONFIG_NFT_REJECT_IPV4=m -CONFIG_NFT_DUP_IPV4=m -CONFIG_NFT_FIB_IPV4=m -CONFIG_NF_TABLES_ARP=y -CONFIG_NF_DUP_IPV4=m -CONFIG_NF_LOG_ARP=m -CONFIG_NF_LOG_IPV4=m -CONFIG_NF_REJECT_IPV4=m -CONFIG_NF_NAT_SNMP_BASIC=m -CONFIG_NF_NAT_PPTP=m -CONFIG_NF_NAT_H323=m -CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_AH=m -CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_RPFILTER=m -CONFIG_IP_NF_MATCH_TTL=m -CONFIG_IP_NF_FILTER=m -CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_SYNPROXY=m -CONFIG_IP_NF_NAT=m -CONFIG_IP_NF_TARGET_MASQUERADE=m -CONFIG_IP_NF_TARGET_NETMAP=m -CONFIG_IP_NF_TARGET_REDIRECT=m -CONFIG_IP_NF_MANGLE=m -CONFIG_IP_NF_TARGET_ECN=m -CONFIG_IP_NF_TARGET_TTL=m -CONFIG_IP_NF_RAW=m -CONFIG_IP_NF_SECURITY=m -CONFIG_IP_NF_ARPTABLES=m -CONFIG_IP_NF_ARPFILTER=m -CONFIG_IP_NF_ARP_MANGLE=m -# end of IP: Netfilter Configuration - -# -# IPv6: Netfilter Configuration -# -CONFIG_NF_SOCKET_IPV6=m -CONFIG_NF_TPROXY_IPV6=m -CONFIG_NF_TABLES_IPV6=y -CONFIG_NFT_REJECT_IPV6=m -CONFIG_NFT_DUP_IPV6=m -CONFIG_NFT_FIB_IPV6=m -CONFIG_NF_DUP_IPV6=m -CONFIG_NF_REJECT_IPV6=m -CONFIG_NF_LOG_IPV6=m -CONFIG_IP6_NF_IPTABLES=m -CONFIG_IP6_NF_MATCH_AH=m -CONFIG_IP6_NF_MATCH_EUI64=m -CONFIG_IP6_NF_MATCH_FRAG=m -CONFIG_IP6_NF_MATCH_OPTS=m -CONFIG_IP6_NF_MATCH_HL=m -CONFIG_IP6_NF_MATCH_IPV6HEADER=m -CONFIG_IP6_NF_MATCH_MH=m -CONFIG_IP6_NF_MATCH_RPFILTER=m -CONFIG_IP6_NF_MATCH_RT=m -# CONFIG_IP6_NF_MATCH_SRH is not set -# CONFIG_IP6_NF_TARGET_HL is not set -CONFIG_IP6_NF_FILTER=m -CONFIG_IP6_NF_TARGET_REJECT=m -CONFIG_IP6_NF_TARGET_SYNPROXY=m -CONFIG_IP6_NF_MANGLE=m -CONFIG_IP6_NF_RAW=m -CONFIG_IP6_NF_SECURITY=m -CONFIG_IP6_NF_NAT=m -CONFIG_IP6_NF_TARGET_MASQUERADE=m -CONFIG_IP6_NF_TARGET_NPT=m -# end of IPv6: Netfilter Configuration - -CONFIG_NF_DEFRAG_IPV6=m -CONFIG_NF_TABLES_BRIDGE=m -# CONFIG_NFT_BRIDGE_META is not set -CONFIG_NFT_BRIDGE_REJECT=m -# CONFIG_NF_CONNTRACK_BRIDGE is not set -CONFIG_BRIDGE_NF_EBTABLES=m -CONFIG_BRIDGE_EBT_BROUTE=m -CONFIG_BRIDGE_EBT_T_FILTER=m -CONFIG_BRIDGE_EBT_T_NAT=m -CONFIG_BRIDGE_EBT_802_3=m -CONFIG_BRIDGE_EBT_AMONG=m -CONFIG_BRIDGE_EBT_ARP=m -CONFIG_BRIDGE_EBT_IP=m -CONFIG_BRIDGE_EBT_IP6=m -CONFIG_BRIDGE_EBT_LIMIT=m -CONFIG_BRIDGE_EBT_MARK=m -CONFIG_BRIDGE_EBT_PKTTYPE=m -CONFIG_BRIDGE_EBT_STP=m -CONFIG_BRIDGE_EBT_VLAN=m -CONFIG_BRIDGE_EBT_ARPREPLY=m -CONFIG_BRIDGE_EBT_DNAT=m -CONFIG_BRIDGE_EBT_MARK_T=m -CONFIG_BRIDGE_EBT_REDIRECT=m -CONFIG_BRIDGE_EBT_SNAT=m -CONFIG_BRIDGE_EBT_LOG=m -CONFIG_BRIDGE_EBT_NFLOG=m -# CONFIG_BPFILTER is not set -# CONFIG_IP_DCCP is not set -CONFIG_IP_SCTP=m -# CONFIG_SCTP_DBG_OBJCNT is not set -# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set -CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y -# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set -CONFIG_SCTP_COOKIE_HMAC_MD5=y -CONFIG_SCTP_COOKIE_HMAC_SHA1=y -CONFIG_INET_SCTP_DIAG=m -# CONFIG_RDS is not set -CONFIG_TIPC=m -CONFIG_TIPC_MEDIA_IB=y -CONFIG_TIPC_MEDIA_UDP=y -CONFIG_TIPC_CRYPTO=y -CONFIG_TIPC_DIAG=m -CONFIG_ATM=m -CONFIG_ATM_CLIP=m -# CONFIG_ATM_CLIP_NO_ICMP is not set -CONFIG_ATM_LANE=m -# CONFIG_ATM_MPOA is not set -CONFIG_ATM_BR2684=m -# CONFIG_ATM_BR2684_IPFILTER is not set -CONFIG_L2TP=m -CONFIG_L2TP_DEBUGFS=m -CONFIG_L2TP_V3=y -CONFIG_L2TP_IP=m -CONFIG_L2TP_ETH=m -CONFIG_STP=m -CONFIG_GARP=m -CONFIG_MRP=m -CONFIG_BRIDGE=m -CONFIG_BRIDGE_IGMP_SNOOPING=y -CONFIG_BRIDGE_VLAN_FILTERING=y -# CONFIG_BRIDGE_MRP is not set -# CONFIG_BRIDGE_CFM is not set -# CONFIG_NET_DSA is not set -CONFIG_VLAN_8021Q=m -CONFIG_VLAN_8021Q_GVRP=y -CONFIG_VLAN_8021Q_MVRP=y -CONFIG_LLC=m -# CONFIG_LLC2 is not set -# CONFIG_ATALK is not set -# CONFIG_X25 is not set -# CONFIG_LAPB is not set -# CONFIG_PHONET is not set -CONFIG_6LOWPAN=m -# CONFIG_6LOWPAN_DEBUGFS is not set -# CONFIG_6LOWPAN_NHC is not set -CONFIG_IEEE802154=m -# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set -CONFIG_IEEE802154_SOCKET=m -CONFIG_IEEE802154_6LOWPAN=m -CONFIG_MAC802154=m -CONFIG_NET_SCHED=y - -# -# Queueing/Scheduling -# -CONFIG_NET_SCH_HTB=m -CONFIG_NET_SCH_HFSC=m -CONFIG_NET_SCH_PRIO=m -CONFIG_NET_SCH_MULTIQ=m -CONFIG_NET_SCH_RED=m -CONFIG_NET_SCH_SFB=m -CONFIG_NET_SCH_SFQ=m -CONFIG_NET_SCH_TEQL=m -CONFIG_NET_SCH_TBF=m -# CONFIG_NET_SCH_CBS is not set -# CONFIG_NET_SCH_ETF is not set -CONFIG_NET_SCH_MQPRIO_LIB=m -# CONFIG_NET_SCH_TAPRIO is not set -CONFIG_NET_SCH_GRED=m -CONFIG_NET_SCH_NETEM=m -CONFIG_NET_SCH_DRR=m -CONFIG_NET_SCH_MQPRIO=m -# CONFIG_NET_SCH_SKBPRIO is not set -CONFIG_NET_SCH_CHOKE=m -CONFIG_NET_SCH_QFQ=m -CONFIG_NET_SCH_CODEL=m -CONFIG_NET_SCH_FQ_CODEL=y -# CONFIG_NET_SCH_CAKE is not set -CONFIG_NET_SCH_FQ=m -CONFIG_NET_SCH_HHF=m -CONFIG_NET_SCH_PIE=m -# CONFIG_NET_SCH_FQ_PIE is not set -CONFIG_NET_SCH_INGRESS=m -CONFIG_NET_SCH_PLUG=m -# CONFIG_NET_SCH_ETS is not set -CONFIG_NET_SCH_DEFAULT=y -# CONFIG_DEFAULT_FQ is not set -# CONFIG_DEFAULT_CODEL is not set -CONFIG_DEFAULT_FQ_CODEL=y -# CONFIG_DEFAULT_SFQ is not set -# CONFIG_DEFAULT_PFIFO_FAST is not set -CONFIG_DEFAULT_NET_SCH="fq_codel" - -# -# Classification -# -CONFIG_NET_CLS=y -CONFIG_NET_CLS_BASIC=m -CONFIG_NET_CLS_ROUTE4=m -CONFIG_NET_CLS_FW=m -CONFIG_NET_CLS_U32=m -CONFIG_CLS_U32_PERF=y -CONFIG_CLS_U32_MARK=y -CONFIG_NET_CLS_FLOW=m -CONFIG_NET_CLS_CGROUP=y -CONFIG_NET_CLS_BPF=m -CONFIG_NET_CLS_FLOWER=m -CONFIG_NET_CLS_MATCHALL=m -CONFIG_NET_EMATCH=y -CONFIG_NET_EMATCH_STACK=32 -CONFIG_NET_EMATCH_CMP=m -CONFIG_NET_EMATCH_NBYTE=m -CONFIG_NET_EMATCH_U32=m -CONFIG_NET_EMATCH_META=m -CONFIG_NET_EMATCH_TEXT=m -CONFIG_NET_EMATCH_IPSET=m -# CONFIG_NET_EMATCH_IPT is not set -CONFIG_NET_CLS_ACT=y -CONFIG_NET_ACT_POLICE=m -CONFIG_NET_ACT_GACT=m -CONFIG_GACT_PROB=y -CONFIG_NET_ACT_MIRRED=m -CONFIG_NET_ACT_SAMPLE=m -CONFIG_NET_ACT_IPT=m -CONFIG_NET_ACT_NAT=m -CONFIG_NET_ACT_PEDIT=m -CONFIG_NET_ACT_SIMP=m -CONFIG_NET_ACT_SKBEDIT=m -CONFIG_NET_ACT_CSUM=m -# CONFIG_NET_ACT_MPLS is not set -CONFIG_NET_ACT_VLAN=m -CONFIG_NET_ACT_BPF=m -# CONFIG_NET_ACT_CONNMARK is not set -# CONFIG_NET_ACT_CTINFO is not set -CONFIG_NET_ACT_SKBMOD=m -# CONFIG_NET_ACT_IFE is not set -CONFIG_NET_ACT_TUNNEL_KEY=m -CONFIG_NET_ACT_CT=m -# CONFIG_NET_ACT_GATE is not set -CONFIG_NET_TC_SKB_EXT=y -CONFIG_NET_SCH_FIFO=y -CONFIG_DCB=y -CONFIG_DNS_RESOLVER=m -# CONFIG_BATMAN_ADV is not set -CONFIG_OPENVSWITCH=m -CONFIG_OPENVSWITCH_GRE=m -CONFIG_OPENVSWITCH_VXLAN=m -CONFIG_OPENVSWITCH_GENEVE=m -CONFIG_VSOCKETS=m -CONFIG_VSOCKETS_DIAG=m -CONFIG_VSOCKETS_LOOPBACK=m -CONFIG_VMWARE_VMCI_VSOCKETS=m -CONFIG_VIRTIO_VSOCKETS=m -CONFIG_VIRTIO_VSOCKETS_COMMON=m -CONFIG_HYPERV_VSOCKETS=m -CONFIG_NETLINK_DIAG=m -CONFIG_MPLS=y -CONFIG_NET_MPLS_GSO=y -CONFIG_MPLS_ROUTING=m -CONFIG_MPLS_IPTUNNEL=m -CONFIG_NET_NSH=y -# CONFIG_HSR is not set -CONFIG_NET_SWITCHDEV=y -CONFIG_NET_L3_MASTER_DEV=y -# CONFIG_QRTR is not set -# CONFIG_NET_NCSI is not set -CONFIG_PCPU_DEV_REFCNT=y -CONFIG_MAX_SKB_FRAGS=17 -CONFIG_RPS=y -CONFIG_RFS_ACCEL=y -CONFIG_SOCK_RX_QUEUE_MAPPING=y -CONFIG_XPS=y -CONFIG_CGROUP_NET_PRIO=y -CONFIG_CGROUP_NET_CLASSID=y -CONFIG_NET_RX_BUSY_POLL=y -CONFIG_BQL=y -CONFIG_BPF_STREAM_PARSER=y -CONFIG_NET_FLOW_LIMIT=y - -# -# Network testing -# -CONFIG_NET_PKTGEN=m -CONFIG_NET_DROP_MONITOR=y -# end of Network testing -# end of Networking options - -# CONFIG_HAMRADIO is not set -# CONFIG_CAN is not set -CONFIG_BT=m -CONFIG_BT_BREDR=y -CONFIG_BT_RFCOMM=m -CONFIG_BT_RFCOMM_TTY=y -CONFIG_BT_BNEP=m -CONFIG_BT_BNEP_MC_FILTER=y -CONFIG_BT_BNEP_PROTO_FILTER=y -CONFIG_BT_CMTP=m -CONFIG_BT_HIDP=m -CONFIG_BT_LE=y -CONFIG_BT_LE_L2CAP_ECRED=y -# CONFIG_BT_6LOWPAN is not set -# CONFIG_BT_LEDS is not set -# CONFIG_BT_MSFTEXT is not set -# CONFIG_BT_AOSPEXT is not set -CONFIG_BT_DEBUGFS=y -# CONFIG_BT_SELFTEST is not set - -# -# Bluetooth device drivers -# -CONFIG_BT_INTEL=m -CONFIG_BT_BCM=m -CONFIG_BT_RTL=m -CONFIG_BT_HCIBTUSB=m -CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y -CONFIG_BT_HCIBTUSB_POLL_SYNC=y -CONFIG_BT_HCIBTUSB_BCM=y -# CONFIG_BT_HCIBTUSB_MTK is not set -CONFIG_BT_HCIBTUSB_RTL=y -CONFIG_BT_HCIBTSDIO=m -CONFIG_BT_HCIUART=m -CONFIG_BT_HCIUART_H4=y -CONFIG_BT_HCIUART_BCSP=y -CONFIG_BT_HCIUART_ATH3K=y -# CONFIG_BT_HCIUART_INTEL is not set -# CONFIG_BT_HCIUART_AG6XX is not set -CONFIG_BT_HCIBCM203X=m -# CONFIG_BT_HCIBCM4377 is not set -CONFIG_BT_HCIBPA10X=m -CONFIG_BT_HCIBFUSB=m -CONFIG_BT_HCIVHCI=m -CONFIG_BT_MRVL=m -CONFIG_BT_MRVL_SDIO=m -CONFIG_BT_ATH3K=m -# CONFIG_BT_MTKSDIO is not set -# CONFIG_BT_VIRTIO is not set -# end of Bluetooth device drivers - -# CONFIG_AF_RXRPC is not set -# CONFIG_AF_KCM is not set -CONFIG_STREAM_PARSER=y -# CONFIG_MCTP is not set -CONFIG_FIB_RULES=y -CONFIG_WIRELESS=y -CONFIG_CFG80211=m -# CONFIG_NL80211_TESTMODE is not set -# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set -CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y -CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y -CONFIG_CFG80211_DEFAULT_PS=y -# CONFIG_CFG80211_DEBUGFS is not set -CONFIG_CFG80211_CRDA_SUPPORT=y -# CONFIG_CFG80211_WEXT is not set -CONFIG_MAC80211=m -CONFIG_MAC80211_HAS_RC=y -CONFIG_MAC80211_RC_MINSTREL=y -CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y -CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" -# CONFIG_MAC80211_MESH is not set -CONFIG_MAC80211_LEDS=y -CONFIG_MAC80211_DEBUGFS=y -# CONFIG_MAC80211_MESSAGE_TRACING is not set -# CONFIG_MAC80211_DEBUG_MENU is not set -CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 -CONFIG_RFKILL=m -CONFIG_RFKILL_LEDS=y -CONFIG_RFKILL_INPUT=y -# CONFIG_RFKILL_GPIO is not set -# CONFIG_NET_9P is not set -# CONFIG_CAIF is not set -CONFIG_CEPH_LIB=m -# CONFIG_CEPH_LIB_PRETTYDEBUG is not set -CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y -# CONFIG_NFC is not set -CONFIG_PSAMPLE=m -# CONFIG_NET_IFE is not set -CONFIG_LWTUNNEL=y -CONFIG_LWTUNNEL_BPF=y -CONFIG_DST_CACHE=y -CONFIG_GRO_CELLS=y -CONFIG_SOCK_VALIDATE_XMIT=y -CONFIG_NET_SELFTESTS=y -CONFIG_NET_SOCK_MSG=y -CONFIG_NET_DEVLINK=y -CONFIG_PAGE_POOL=y -# CONFIG_PAGE_POOL_STATS is not set -CONFIG_FAILOVER=m -CONFIG_ETHTOOL_NETLINK=y - -# -# Device Drivers -# -CONFIG_HAVE_EISA=y -# CONFIG_EISA is not set -CONFIG_HAVE_PCI=y -CONFIG_PCI=y -CONFIG_PCI_DOMAINS=y -CONFIG_PCIEPORTBUS=y -CONFIG_HOTPLUG_PCI_PCIE=y -CONFIG_PCIEAER=y -CONFIG_PCIEAER_INJECT=m -CONFIG_PCIE_ECRC=y -CONFIG_PCIEASPM=y -CONFIG_PCIEASPM_DEFAULT=y -# CONFIG_PCIEASPM_POWERSAVE is not set -# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set -# CONFIG_PCIEASPM_PERFORMANCE is not set -CONFIG_PCIE_PME=y -CONFIG_PCIE_DPC=y -# CONFIG_PCIE_PTM is not set -CONFIG_PCIE_EDR=y -CONFIG_PCI_MSI=y -CONFIG_PCI_QUIRKS=y -# CONFIG_PCI_DEBUG is not set -# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set -CONFIG_PCI_STUB=y -CONFIG_PCI_PF_STUB=y -CONFIG_PCI_ATS=y -CONFIG_PCI_DOE=y -CONFIG_PCI_LOCKLESS_CONFIG=y -CONFIG_PCI_IOV=y -CONFIG_PCI_PRI=y -CONFIG_PCI_PASID=y -# CONFIG_PCI_P2PDMA is not set -CONFIG_PCI_LABEL=y -CONFIG_PCI_HYPERV=m -CONFIG_VGA_ARB=y -CONFIG_VGA_ARB_MAX_GPUS=64 -CONFIG_HOTPLUG_PCI=y -CONFIG_HOTPLUG_PCI_ACPI=y -CONFIG_HOTPLUG_PCI_ACPI_IBM=m -# CONFIG_HOTPLUG_PCI_CPCI is not set -CONFIG_HOTPLUG_PCI_SHPC=y - -# -# PCI controller drivers -# -CONFIG_VMD=y -CONFIG_PCI_HYPERV_INTERFACE=m - -# -# Cadence-based PCIe controllers -# -# end of Cadence-based PCIe controllers - -# -# DesignWare-based PCIe controllers -# -# CONFIG_PCI_MESON is not set -# CONFIG_PCIE_DW_PLAT_HOST is not set -# end of DesignWare-based PCIe controllers - -# -# Mobiveil-based PCIe controllers -# -# end of Mobiveil-based PCIe controllers -# end of PCI controller drivers - -# -# PCI Endpoint -# -# CONFIG_PCI_ENDPOINT is not set -# end of PCI Endpoint - -# -# PCI switch controller drivers -# -# CONFIG_PCI_SW_SWITCHTEC is not set -# end of PCI switch controller drivers - -CONFIG_CXL_BUS=m -CONFIG_CXL_PCI=m -# CONFIG_CXL_MEM_RAW_COMMANDS is not set -CONFIG_CXL_ACPI=m -CONFIG_CXL_PMEM=m -CONFIG_CXL_MEM=m -CONFIG_CXL_PORT=m -CONFIG_CXL_SUSPEND=y -CONFIG_CXL_REGION=y -# CONFIG_CXL_REGION_INVALIDATION_TEST is not set -CONFIG_CXL_PMU=m -CONFIG_PCCARD=y -# CONFIG_PCMCIA is not set -CONFIG_CARDBUS=y - -# -# PC-card bridges -# -CONFIG_YENTA=m -CONFIG_YENTA_O2=y -CONFIG_YENTA_RICOH=y -CONFIG_YENTA_TI=y -CONFIG_YENTA_ENE_TUNE=y -CONFIG_YENTA_TOSHIBA=y -# CONFIG_RAPIDIO is not set - -# -# Generic Driver Options -# -CONFIG_AUXILIARY_BUS=y -# CONFIG_UEVENT_HELPER is not set -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -# CONFIG_DEVTMPFS_SAFE is not set -CONFIG_STANDALONE=y -CONFIG_PREVENT_FIRMWARE_BUILD=y - -# -# Firmware loader -# -CONFIG_FW_LOADER=y -CONFIG_FW_LOADER_DEBUG=y -CONFIG_FW_LOADER_PAGED_BUF=y -CONFIG_FW_LOADER_SYSFS=y -CONFIG_EXTRA_FIRMWARE="" -CONFIG_FW_LOADER_USER_HELPER=y -# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set -# CONFIG_FW_LOADER_COMPRESS is not set -CONFIG_FW_CACHE=y -CONFIG_FW_UPLOAD=y -# end of Firmware loader - -CONFIG_WANT_DEV_COREDUMP=y -CONFIG_ALLOW_DEV_COREDUMP=y -CONFIG_DEV_COREDUMP=y -# CONFIG_DEBUG_DRIVER is not set -# CONFIG_DEBUG_DEVRES is not set -# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set -CONFIG_HMEM_REPORTING=y -# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set -CONFIG_SYS_HYPERVISOR=y -CONFIG_GENERIC_CPU_AUTOPROBE=y -CONFIG_GENERIC_CPU_VULNERABILITIES=y -CONFIG_REGMAP=y -CONFIG_REGMAP_I2C=m -CONFIG_REGMAP_SPI=m -CONFIG_DMA_SHARED_BUFFER=y -# CONFIG_DMA_FENCE_TRACE is not set -# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set -# end of Generic Driver Options - -# -# Bus devices -# -# CONFIG_MHI_BUS is not set -# CONFIG_MHI_BUS_EP is not set -# end of Bus devices - -# -# Cache Drivers -# -# end of Cache Drivers - -CONFIG_CONNECTOR=y -CONFIG_PROC_EVENTS=y - -# -# Firmware Drivers -# - -# -# ARM System Control and Management Interface Protocol -# -# end of ARM System Control and Management Interface Protocol - -CONFIG_EDD=m -# CONFIG_EDD_OFF is not set -CONFIG_FIRMWARE_MEMMAP=y -CONFIG_DMIID=y -CONFIG_DMI_SYSFS=y -CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y -CONFIG_ISCSI_IBFT_FIND=y -CONFIG_ISCSI_IBFT=m -CONFIG_FW_CFG_SYSFS=y -# CONFIG_FW_CFG_SYSFS_CMDLINE is not set -CONFIG_SYSFB=y -# CONFIG_SYSFB_SIMPLEFB is not set -# CONFIG_GOOGLE_FIRMWARE is not set - -# -# EFI (Extensible Firmware Interface) Support -# -CONFIG_EFI_ESRT=y -CONFIG_EFI_VARS_PSTORE=y -CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y -CONFIG_EFI_SOFT_RESERVE=y -CONFIG_EFI_DXE_MEM_ATTRIBUTES=y -CONFIG_EFI_RUNTIME_WRAPPERS=y -# CONFIG_EFI_BOOTLOADER_CONTROL is not set -# CONFIG_EFI_CAPSULE_LOADER is not set -# CONFIG_EFI_TEST is not set -# CONFIG_APPLE_PROPERTIES is not set -# CONFIG_RESET_ATTACK_MITIGATION is not set -CONFIG_EFI_RCI2_TABLE=y -# CONFIG_EFI_DISABLE_PCI_DMA is not set -CONFIG_EFI_EARLYCON=y -CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y -# CONFIG_EFI_DISABLE_RUNTIME is not set -CONFIG_EFI_COCO_SECRET=y -CONFIG_UNACCEPTED_MEMORY=y -# end of EFI (Extensible Firmware Interface) Support - -CONFIG_UEFI_CPER=y -CONFIG_UEFI_CPER_X86=y - -# -# Tegra firmware driver -# -# end of Tegra firmware driver -# end of Firmware Drivers - -# CONFIG_GNSS is not set -CONFIG_MTD=m -# CONFIG_MTD_TESTS is not set - -# -# Partition parsers -# -# CONFIG_MTD_AR7_PARTS is not set -# CONFIG_MTD_CMDLINE_PARTS is not set -# CONFIG_MTD_REDBOOT_PARTS is not set -# end of Partition parsers - -# -# User Modules And Translation Layers -# -CONFIG_MTD_BLKDEVS=m -CONFIG_MTD_BLOCK=m -# CONFIG_MTD_BLOCK_RO is not set - -# -# Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK. -# -# CONFIG_FTL is not set -# CONFIG_NFTL is not set -# CONFIG_INFTL is not set -# CONFIG_RFD_FTL is not set -# CONFIG_SSFDC is not set -# CONFIG_SM_FTL is not set -# CONFIG_MTD_OOPS is not set -# CONFIG_MTD_SWAP is not set -# CONFIG_MTD_PARTITIONED_MASTER is not set - -# -# RAM/ROM/Flash chip drivers -# -# CONFIG_MTD_CFI is not set -# CONFIG_MTD_JEDECPROBE is not set -CONFIG_MTD_MAP_BANK_WIDTH_1=y -CONFIG_MTD_MAP_BANK_WIDTH_2=y -CONFIG_MTD_MAP_BANK_WIDTH_4=y -CONFIG_MTD_CFI_I1=y -CONFIG_MTD_CFI_I2=y -# CONFIG_MTD_RAM is not set -# CONFIG_MTD_ROM is not set -# CONFIG_MTD_ABSENT is not set -# end of RAM/ROM/Flash chip drivers - -# -# Mapping drivers for chip access -# -# CONFIG_MTD_COMPLEX_MAPPINGS is not set -# CONFIG_MTD_INTEL_VR_NOR is not set -# CONFIG_MTD_PLATRAM is not set -# end of Mapping drivers for chip access - -# -# Self-contained MTD device drivers -# -# CONFIG_MTD_PMC551 is not set -# CONFIG_MTD_DATAFLASH is not set -# CONFIG_MTD_MCHP23K256 is not set -# CONFIG_MTD_MCHP48L640 is not set -# CONFIG_MTD_SST25L is not set -# CONFIG_MTD_SLRAM is not set -# CONFIG_MTD_PHRAM is not set -# CONFIG_MTD_MTDRAM is not set -# CONFIG_MTD_BLOCK2MTD is not set - -# -# Disk-On-Chip Device Drivers -# -# CONFIG_MTD_DOCG3 is not set -# end of Self-contained MTD device drivers - -# -# NAND -# -# CONFIG_MTD_ONENAND is not set -# CONFIG_MTD_RAW_NAND is not set -# CONFIG_MTD_SPI_NAND is not set - -# -# ECC engine support -# -# CONFIG_MTD_NAND_ECC_SW_HAMMING is not set -# CONFIG_MTD_NAND_ECC_SW_BCH is not set -# CONFIG_MTD_NAND_ECC_MXIC is not set -# end of ECC engine support -# end of NAND - -# -# LPDDR & LPDDR2 PCM memory drivers -# -# CONFIG_MTD_LPDDR is not set -# end of LPDDR & LPDDR2 PCM memory drivers - -# CONFIG_MTD_SPI_NOR is not set -CONFIG_MTD_UBI=m -CONFIG_MTD_UBI_WL_THRESHOLD=4096 -CONFIG_MTD_UBI_BEB_LIMIT=20 -# CONFIG_MTD_UBI_FASTMAP is not set -# CONFIG_MTD_UBI_GLUEBI is not set -# CONFIG_MTD_UBI_BLOCK is not set -# CONFIG_MTD_HYPERBUS is not set -# CONFIG_OF is not set -CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y -CONFIG_PARPORT=m -CONFIG_PARPORT_PC=m -CONFIG_PARPORT_SERIAL=m -# CONFIG_PARPORT_PC_FIFO is not set -# CONFIG_PARPORT_PC_SUPERIO is not set -CONFIG_PARPORT_1284=y -CONFIG_PARPORT_NOT_PC=y -CONFIG_PNP=y -# CONFIG_PNP_DEBUG_MESSAGES is not set - -# -# Protocols -# -CONFIG_PNPACPI=y -CONFIG_BLK_DEV=y -CONFIG_BLK_DEV_NULL_BLK=m -# CONFIG_BLK_DEV_FD is not set -CONFIG_CDROM=m -# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set -CONFIG_ZRAM=m -CONFIG_ZRAM_DEF_COMP_LZORLE=y -# CONFIG_ZRAM_DEF_COMP_ZSTD is not set -# CONFIG_ZRAM_DEF_COMP_LZ4 is not set -# CONFIG_ZRAM_DEF_COMP_LZO is not set -# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set -CONFIG_ZRAM_DEF_COMP="lzo-rle" -CONFIG_ZRAM_WRITEBACK=y -# CONFIG_ZRAM_MEMORY_TRACKING is not set -# CONFIG_ZRAM_MULTI_COMP is not set -CONFIG_BLK_DEV_LOOP=m -CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 -# CONFIG_BLK_DEV_DRBD is not set -CONFIG_BLK_DEV_NBD=m -CONFIG_BLK_DEV_RAM=m -CONFIG_BLK_DEV_RAM_COUNT=16 -CONFIG_BLK_DEV_RAM_SIZE=16384 -CONFIG_CDROM_PKTCDVD=m -CONFIG_CDROM_PKTCDVD_BUFFERS=8 -# CONFIG_CDROM_PKTCDVD_WCACHE is not set -# CONFIG_ATA_OVER_ETH is not set -CONFIG_XEN_BLKDEV_FRONTEND=m -CONFIG_VIRTIO_BLK=y -CONFIG_BLK_DEV_RBD=m -CONFIG_BLK_DEV_UBLK=m -CONFIG_BLKDEV_UBLK_LEGACY_OPCODES=y - -# -# NVME Support -# -CONFIG_NVME_CORE=m -CONFIG_BLK_DEV_NVME=m -CONFIG_NVME_MULTIPATH=y -# CONFIG_NVME_VERBOSE_ERRORS is not set -# CONFIG_NVME_HWMON is not set -CONFIG_NVME_FABRICS=m -CONFIG_NVME_RDMA=m -CONFIG_NVME_FC=m -CONFIG_NVME_TCP=m -# CONFIG_NVME_AUTH is not set -CONFIG_NVME_TARGET=m -# CONFIG_NVME_TARGET_PASSTHRU is not set -CONFIG_NVME_TARGET_LOOP=m -CONFIG_NVME_TARGET_RDMA=m -CONFIG_NVME_TARGET_FC=m -CONFIG_NVME_TARGET_FCLOOP=m -CONFIG_NVME_TARGET_TCP=m -# CONFIG_NVME_TARGET_AUTH is not set -# end of NVME Support - -# -# Misc devices -# -CONFIG_SENSORS_LIS3LV02D=m -# CONFIG_AD525X_DPOT is not set -# CONFIG_DUMMY_IRQ is not set -# CONFIG_IBM_ASM is not set -# CONFIG_PHANTOM is not set -CONFIG_TIFM_CORE=m -CONFIG_TIFM_7XX1=m -# CONFIG_ICS932S401 is not set -CONFIG_ENCLOSURE_SERVICES=m -CONFIG_SGI_XP=m -CONFIG_HP_ILO=m -CONFIG_SGI_GRU=m -# CONFIG_SGI_GRU_DEBUG is not set -CONFIG_APDS9802ALS=m -CONFIG_ISL29003=m -CONFIG_ISL29020=m -CONFIG_SENSORS_TSL2550=m -CONFIG_SENSORS_BH1770=m -CONFIG_SENSORS_APDS990X=m -# CONFIG_HMC6352 is not set -# CONFIG_DS1682 is not set -CONFIG_VMWARE_BALLOON=m -# CONFIG_LATTICE_ECP3_CONFIG is not set -# CONFIG_SRAM is not set -# CONFIG_DW_XDATA_PCIE is not set -# CONFIG_PCI_ENDPOINT_TEST is not set -# CONFIG_XILINX_SDFEC is not set -CONFIG_MISC_RTSX=m -# CONFIG_C2PORT is not set - -# -# EEPROM support -# -# CONFIG_EEPROM_AT24 is not set -# CONFIG_EEPROM_AT25 is not set -CONFIG_EEPROM_LEGACY=m -CONFIG_EEPROM_MAX6875=m -CONFIG_EEPROM_93CX6=m -# CONFIG_EEPROM_93XX46 is not set -# CONFIG_EEPROM_IDT_89HPESX is not set -# CONFIG_EEPROM_EE1004 is not set -# end of EEPROM support - -CONFIG_CB710_CORE=m -# CONFIG_CB710_DEBUG is not set -CONFIG_CB710_DEBUG_ASSUMPTIONS=y - -# -# Texas Instruments shared transport line discipline -# -# CONFIG_TI_ST is not set -# end of Texas Instruments shared transport line discipline - -CONFIG_SENSORS_LIS3_I2C=m -CONFIG_ALTERA_STAPL=m -CONFIG_INTEL_MEI=m -CONFIG_INTEL_MEI_ME=m -# CONFIG_INTEL_MEI_TXE is not set -# CONFIG_INTEL_MEI_GSC is not set -# CONFIG_INTEL_MEI_HDCP is not set -# CONFIG_INTEL_MEI_PXP is not set -# CONFIG_INTEL_MEI_GSC_PROXY is not set -CONFIG_VMWARE_VMCI=m -# CONFIG_GENWQE is not set -# CONFIG_ECHO is not set -# CONFIG_BCM_VK is not set -# CONFIG_MISC_ALCOR_PCI is not set -CONFIG_MISC_RTSX_PCI=m -CONFIG_MISC_RTSX_USB=m -CONFIG_UACCE=m -CONFIG_PVPANIC=y -# CONFIG_PVPANIC_MMIO is not set -# CONFIG_PVPANIC_PCI is not set -# CONFIG_GP_PCI1XXXX is not set -# end of Misc devices - -# -# SCSI device support -# -CONFIG_SCSI_MOD=y -CONFIG_RAID_ATTRS=m -CONFIG_SCSI_COMMON=y -CONFIG_SCSI=y -CONFIG_SCSI_DMA=y -CONFIG_SCSI_NETLINK=y -CONFIG_SCSI_PROC_FS=y - -# -# SCSI support type (disk, tape, CD-ROM) -# -CONFIG_BLK_DEV_SD=m -CONFIG_CHR_DEV_ST=m -CONFIG_BLK_DEV_SR=m -CONFIG_CHR_DEV_SG=m -CONFIG_BLK_DEV_BSG=y -CONFIG_CHR_DEV_SCH=m -CONFIG_SCSI_ENCLOSURE=m -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_LOGGING=y -CONFIG_SCSI_SCAN_ASYNC=y - -# -# SCSI Transports -# -CONFIG_SCSI_SPI_ATTRS=m -CONFIG_SCSI_FC_ATTRS=m -CONFIG_SCSI_ISCSI_ATTRS=m -CONFIG_SCSI_SAS_ATTRS=m -CONFIG_SCSI_SAS_LIBSAS=m -CONFIG_SCSI_SAS_ATA=y -CONFIG_SCSI_SAS_HOST_SMP=y -CONFIG_SCSI_SRP_ATTRS=m -# end of SCSI Transports - -CONFIG_SCSI_LOWLEVEL=y -CONFIG_ISCSI_TCP=m -CONFIG_ISCSI_BOOT_SYSFS=m -# CONFIG_SCSI_CXGB3_ISCSI is not set -CONFIG_SCSI_CXGB4_ISCSI=m -CONFIG_SCSI_BNX2_ISCSI=m -CONFIG_SCSI_BNX2X_FCOE=m -CONFIG_BE2ISCSI=m -# CONFIG_BLK_DEV_3W_XXXX_RAID is not set -CONFIG_SCSI_HPSA=m -# CONFIG_SCSI_3W_9XXX is not set -# CONFIG_SCSI_3W_SAS is not set -# CONFIG_SCSI_ACARD is not set -CONFIG_SCSI_AACRAID=m -# CONFIG_SCSI_AIC7XXX is not set -# CONFIG_SCSI_AIC79XX is not set -# CONFIG_SCSI_AIC94XX is not set -# CONFIG_SCSI_MVSAS is not set -# CONFIG_SCSI_MVUMI is not set -# CONFIG_SCSI_ADVANSYS is not set -# CONFIG_SCSI_ARCMSR is not set -# CONFIG_SCSI_ESAS2R is not set -# CONFIG_MEGARAID_NEWGEN is not set -# CONFIG_MEGARAID_LEGACY is not set -CONFIG_MEGARAID_SAS=m -CONFIG_SCSI_MPT3SAS=m -CONFIG_SCSI_MPT2SAS_MAX_SGE=128 -CONFIG_SCSI_MPT3SAS_MAX_SGE=128 -CONFIG_SCSI_MPT2SAS=m -CONFIG_SCSI_MPI3MR=m -CONFIG_SCSI_SMARTPQI=m -# CONFIG_SCSI_HPTIOP is not set -# CONFIG_SCSI_BUSLOGIC is not set -# CONFIG_SCSI_MYRB is not set -# CONFIG_SCSI_MYRS is not set -CONFIG_VMWARE_PVSCSI=m -# CONFIG_XEN_SCSI_FRONTEND is not set -CONFIG_HYPERV_STORAGE=m -CONFIG_LIBFC=m -CONFIG_LIBFCOE=m -CONFIG_FCOE=m -CONFIG_FCOE_FNIC=m -# CONFIG_SCSI_SNIC is not set -# CONFIG_SCSI_DMX3191D is not set -# CONFIG_SCSI_FDOMAIN_PCI is not set -CONFIG_SCSI_ISCI=m -# CONFIG_SCSI_IPS is not set -# CONFIG_SCSI_INITIO is not set -# CONFIG_SCSI_INIA100 is not set -# CONFIG_SCSI_PPA is not set -# CONFIG_SCSI_IMM is not set -# CONFIG_SCSI_STEX is not set -# CONFIG_SCSI_SYM53C8XX_2 is not set -# CONFIG_SCSI_IPR is not set -# CONFIG_SCSI_QLOGIC_1280 is not set -CONFIG_SCSI_QLA_FC=m -# CONFIG_TCM_QLA2XXX is not set -CONFIG_SCSI_QLA_ISCSI=m -CONFIG_QEDI=m -CONFIG_QEDF=m -CONFIG_SCSI_LPFC=m -# CONFIG_SCSI_LPFC_DEBUG_FS is not set -# CONFIG_SCSI_EFCT is not set -# CONFIG_SCSI_DC395x is not set -# CONFIG_SCSI_AM53C974 is not set -# CONFIG_SCSI_WD719X is not set -CONFIG_SCSI_DEBUG=m -# CONFIG_SCSI_PMCRAID is not set -# CONFIG_SCSI_PM8001 is not set -# CONFIG_SCSI_BFA_FC is not set -CONFIG_SCSI_VIRTIO=m -CONFIG_SCSI_CHELSIO_FCOE=m -CONFIG_SCSI_DH=y -CONFIG_SCSI_DH_RDAC=y -CONFIG_SCSI_DH_HP_SW=y -CONFIG_SCSI_DH_EMC=y -CONFIG_SCSI_DH_ALUA=y -# end of SCSI device support - -CONFIG_ATA=m -CONFIG_SATA_HOST=y -CONFIG_PATA_TIMINGS=y -CONFIG_ATA_VERBOSE_ERROR=y -CONFIG_ATA_FORCE=y -CONFIG_ATA_ACPI=y -# CONFIG_SATA_ZPODD is not set -CONFIG_SATA_PMP=y - -# -# Controllers with non-SFF native interface -# -CONFIG_SATA_AHCI=m -CONFIG_SATA_MOBILE_LPM_POLICY=0 -CONFIG_SATA_AHCI_PLATFORM=m -# CONFIG_AHCI_DWC is not set -# CONFIG_SATA_INIC162X is not set -# CONFIG_SATA_ACARD_AHCI is not set -# CONFIG_SATA_SIL24 is not set -CONFIG_ATA_SFF=y - -# -# SFF controllers with custom DMA interface -# -# CONFIG_PDC_ADMA is not set -# CONFIG_SATA_QSTOR is not set -# CONFIG_SATA_SX4 is not set -CONFIG_ATA_BMDMA=y - -# -# SATA SFF controllers with BMDMA -# -CONFIG_ATA_PIIX=m -# CONFIG_SATA_DWC is not set -# CONFIG_SATA_MV is not set -# CONFIG_SATA_NV is not set -# CONFIG_SATA_PROMISE is not set -# CONFIG_SATA_SIL is not set -# CONFIG_SATA_SIS is not set -# CONFIG_SATA_SVW is not set -# CONFIG_SATA_ULI is not set -# CONFIG_SATA_VIA is not set -# CONFIG_SATA_VITESSE is not set -CONFIG_SATA_ZHAOXIN=m - -# -# PATA SFF controllers with BMDMA -# -# CONFIG_PATA_ALI is not set -# CONFIG_PATA_AMD is not set -# CONFIG_PATA_ARTOP is not set -# CONFIG_PATA_ATIIXP is not set -# CONFIG_PATA_ATP867X is not set -# CONFIG_PATA_CMD64X is not set -# CONFIG_PATA_CYPRESS is not set -# CONFIG_PATA_EFAR is not set -# CONFIG_PATA_HPT366 is not set -# CONFIG_PATA_HPT37X is not set -# CONFIG_PATA_HPT3X2N is not set -# CONFIG_PATA_HPT3X3 is not set -# CONFIG_PATA_IT8213 is not set -# CONFIG_PATA_IT821X is not set -# CONFIG_PATA_JMICRON is not set -# CONFIG_PATA_MARVELL is not set -# CONFIG_PATA_NETCELL is not set -# CONFIG_PATA_NINJA32 is not set -# CONFIG_PATA_NS87415 is not set -# CONFIG_PATA_OLDPIIX is not set -# CONFIG_PATA_OPTIDMA is not set -# CONFIG_PATA_PDC2027X is not set -# CONFIG_PATA_PDC_OLD is not set -# CONFIG_PATA_RADISYS is not set -# CONFIG_PATA_RDC is not set -# CONFIG_PATA_SCH is not set -# CONFIG_PATA_SERVERWORKS is not set -# CONFIG_PATA_SIL680 is not set -# CONFIG_PATA_SIS is not set -# CONFIG_PATA_TOSHIBA is not set -# CONFIG_PATA_TRIFLEX is not set -# CONFIG_PATA_VIA is not set -# CONFIG_PATA_WINBOND is not set - -# -# PIO-only SFF controllers -# -# CONFIG_PATA_CMD640_PCI is not set -# CONFIG_PATA_MPIIX is not set -# CONFIG_PATA_NS87410 is not set -# CONFIG_PATA_OPTI is not set -# CONFIG_PATA_RZ1000 is not set -# CONFIG_PATA_PARPORT is not set - -# -# Generic fallback / legacy drivers -# -# CONFIG_PATA_ACPI is not set -CONFIG_ATA_GENERIC=m -# CONFIG_PATA_LEGACY is not set -CONFIG_MD=y -CONFIG_BLK_DEV_MD=y -CONFIG_MD_AUTODETECT=y -CONFIG_MD_BITMAP_FILE=y -CONFIG_MD_LINEAR=m -CONFIG_MD_RAID0=m -CONFIG_MD_RAID1=m -CONFIG_MD_RAID10=m -CONFIG_MD_RAID456=m -# CONFIG_MD_MULTIPATH is not set -CONFIG_MD_FAULTY=m -CONFIG_MD_CLUSTER=m -# CONFIG_BCACHE is not set -CONFIG_BLK_DEV_DM_BUILTIN=y -CONFIG_BLK_DEV_DM=m -CONFIG_DM_DEBUG=y -CONFIG_DM_BUFIO=m -# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set -CONFIG_DM_BIO_PRISON=m -CONFIG_DM_PERSISTENT_DATA=m -# CONFIG_DM_UNSTRIPED is not set -CONFIG_DM_CRYPT=m -CONFIG_DM_SNAPSHOT=m -CONFIG_DM_THIN_PROVISIONING=m -CONFIG_DM_CACHE=m -CONFIG_DM_CACHE_SMQ=m -CONFIG_DM_WRITECACHE=m -# CONFIG_DM_EBS is not set -CONFIG_DM_ERA=m -# CONFIG_DM_CLONE is not set -CONFIG_DM_MIRROR=m -CONFIG_DM_LOG_USERSPACE=m -CONFIG_DM_RAID=m -CONFIG_DM_ZERO=m -CONFIG_DM_MULTIPATH=m -CONFIG_DM_MULTIPATH_QL=m -CONFIG_DM_MULTIPATH_ST=m -# CONFIG_DM_MULTIPATH_HST is not set -# CONFIG_DM_MULTIPATH_IOA is not set -CONFIG_DM_DELAY=m -# CONFIG_DM_DUST is not set -CONFIG_DM_UEVENT=y -CONFIG_DM_FLAKEY=m -CONFIG_DM_VERITY=m -# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set -# CONFIG_DM_VERITY_FEC is not set -CONFIG_DM_SWITCH=m -CONFIG_DM_LOG_WRITES=m -CONFIG_DM_INTEGRITY=m -# CONFIG_DM_ZONED is not set -CONFIG_DM_AUDIT=y -CONFIG_TARGET_CORE=m -CONFIG_TCM_IBLOCK=m -CONFIG_TCM_FILEIO=m -CONFIG_TCM_PSCSI=m -CONFIG_TCM_USER2=m -CONFIG_LOOPBACK_TARGET=m -# CONFIG_TCM_FC is not set -CONFIG_ISCSI_TARGET=m -CONFIG_ISCSI_TARGET_CXGB4=m -# CONFIG_SBP_TARGET is not set -# CONFIG_REMOTE_TARGET is not set -CONFIG_FUSION=y -CONFIG_FUSION_SPI=m -# CONFIG_FUSION_FC is not set -CONFIG_FUSION_SAS=m -CONFIG_FUSION_MAX_SGE=128 -# CONFIG_FUSION_CTL is not set -CONFIG_FUSION_LOGGING=y - -# -# IEEE 1394 (FireWire) support -# -CONFIG_FIREWIRE=m -CONFIG_FIREWIRE_OHCI=m -CONFIG_FIREWIRE_SBP2=m -CONFIG_FIREWIRE_NET=m -# CONFIG_FIREWIRE_NOSY is not set -# end of IEEE 1394 (FireWire) support - -CONFIG_MACINTOSH_DRIVERS=y -CONFIG_MAC_EMUMOUSEBTN=y -CONFIG_NETDEVICES=y -CONFIG_MII=m -CONFIG_NET_CORE=y -CONFIG_BONDING=m -CONFIG_DUMMY=m -CONFIG_WIREGUARD=m -# CONFIG_WIREGUARD_DEBUG is not set -# CONFIG_EQUALIZER is not set -CONFIG_NET_FC=y -CONFIG_IFB=m -CONFIG_NET_TEAM=m -CONFIG_NET_TEAM_MODE_BROADCAST=m -CONFIG_NET_TEAM_MODE_ROUNDROBIN=m -CONFIG_NET_TEAM_MODE_RANDOM=m -CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m -CONFIG_NET_TEAM_MODE_LOADBALANCE=m -CONFIG_MACVLAN=m -CONFIG_MACVTAP=m -CONFIG_IPVLAN_L3S=y -CONFIG_IPVLAN=m -CONFIG_IPVTAP=m -CONFIG_VXLAN=m -CONFIG_GENEVE=m -# CONFIG_BAREUDP is not set -# CONFIG_GTP is not set -# CONFIG_AMT is not set -CONFIG_MACSEC=m -CONFIG_NETCONSOLE=m -CONFIG_NETCONSOLE_DYNAMIC=y -# CONFIG_NETCONSOLE_EXTENDED_LOG is not set -CONFIG_NETPOLL=y -CONFIG_NET_POLL_CONTROLLER=y -CONFIG_TUN=m -CONFIG_TAP=m -# CONFIG_TUN_VNET_CROSS_LE is not set -CONFIG_VETH=m -CONFIG_VIRTIO_NET=m -CONFIG_NLMON=m -CONFIG_NET_VRF=m -CONFIG_VSOCKMON=m -# CONFIG_ARCNET is not set -# CONFIG_ATM_DRIVERS is not set -CONFIG_ETHERNET=y -CONFIG_MDIO=m -# CONFIG_NET_VENDOR_3COM is not set -# CONFIG_NET_VENDOR_ADAPTEC is not set -# CONFIG_NET_VENDOR_AGERE is not set -# CONFIG_NET_VENDOR_ALACRITECH is not set -# CONFIG_NET_VENDOR_ALTEON is not set -# CONFIG_ALTERA_TSE is not set -CONFIG_NET_VENDOR_AMAZON=y -CONFIG_ENA_ETHERNET=m -# CONFIG_NET_VENDOR_AMD is not set -CONFIG_NET_VENDOR_AQUANTIA=y -CONFIG_AQTION=m -# CONFIG_NET_VENDOR_ARC is not set -CONFIG_NET_VENDOR_ASIX=y -# CONFIG_SPI_AX88796C is not set -CONFIG_NET_VENDOR_ATHEROS=y -CONFIG_ATL2=m -CONFIG_ATL1=m -CONFIG_ATL1E=m -CONFIG_ATL1C=m -CONFIG_ALX=m -# CONFIG_CX_ECAT is not set -CONFIG_NET_VENDOR_BROADCOM=y -# CONFIG_B44 is not set -# CONFIG_BCMGENET is not set -CONFIG_BNX2=m -CONFIG_CNIC=m -CONFIG_TIGON3=m -CONFIG_TIGON3_HWMON=y -CONFIG_BNX2X=m -CONFIG_BNX2X_SRIOV=y -# CONFIG_SYSTEMPORT is not set -CONFIG_BNXT=m -CONFIG_BNXT_SRIOV=y -CONFIG_BNXT_FLOWER_OFFLOAD=y -CONFIG_BNXT_DCB=y -CONFIG_BNXT_HWMON=y -# CONFIG_NET_VENDOR_CADENCE is not set -CONFIG_NET_VENDOR_CAVIUM=y -# CONFIG_THUNDER_NIC_PF is not set -# CONFIG_THUNDER_NIC_VF is not set -# CONFIG_THUNDER_NIC_BGX is not set -# CONFIG_THUNDER_NIC_RGX is not set -CONFIG_CAVIUM_PTP=y -CONFIG_LIQUIDIO_CORE=m -CONFIG_LIQUIDIO=m -CONFIG_LIQUIDIO_VF=m -CONFIG_NET_VENDOR_CHELSIO=y -# CONFIG_CHELSIO_T1 is not set -# CONFIG_CHELSIO_T3 is not set -CONFIG_CHELSIO_T4=m -# CONFIG_CHELSIO_T4_DCB is not set -CONFIG_CHELSIO_T4VF=m -CONFIG_CHELSIO_LIB=m -CONFIG_CHELSIO_INLINE_CRYPTO=y -CONFIG_CHELSIO_IPSEC_INLINE=m -# CONFIG_CHELSIO_TLS_DEVICE is not set -CONFIG_NET_VENDOR_CISCO=y -CONFIG_ENIC=m -# CONFIG_NET_VENDOR_CORTINA is not set -CONFIG_NET_VENDOR_DAVICOM=y -# CONFIG_DM9051 is not set -CONFIG_DNET=m -CONFIG_NET_VENDOR_BZWX=y -CONFIG_NCE=m -CONFIG_NE6X=m -CONFIG_NE6XVF=m -CONFIG_NET_VENDOR_DEC=y -# CONFIG_NET_TULIP is not set -# CONFIG_NET_VENDOR_DLINK is not set -CONFIG_NET_VENDOR_EMULEX=y -CONFIG_BE2NET=m -CONFIG_BE2NET_HWMON=y -# CONFIG_BE2NET_BE2 is not set -# CONFIG_BE2NET_BE3 is not set -CONFIG_BE2NET_LANCER=y -CONFIG_BE2NET_SKYHAWK=y -CONFIG_NET_VENDOR_ENGLEDER=y -# CONFIG_TSNEP is not set -# CONFIG_NET_VENDOR_EZCHIP is not set -CONFIG_NET_VENDOR_FUNGIBLE=y -# CONFIG_FUN_ETH is not set -CONFIG_NET_VENDOR_GOOGLE=y -CONFIG_GVE=m -CONFIG_NET_VENDOR_HUAWEI=y -CONFIG_HINIC=m -# CONFIG_NET_VENDOR_I825XX is not set -CONFIG_NET_VENDOR_INTEL=y -# CONFIG_E100 is not set -CONFIG_E1000=m -CONFIG_E1000E=m -CONFIG_E1000E_HWTS=y -CONFIG_IGB=m -CONFIG_IGB_HWMON=y -CONFIG_IGB_DCA=y -CONFIG_IGBVF=m -CONFIG_IXGBE=m -CONFIG_IXGBE_HWMON=y -CONFIG_IXGBE_DCA=y -CONFIG_IXGBE_DCB=y -CONFIG_IXGBE_IPSEC=y -CONFIG_IXGBEVF=m -CONFIG_IXGBEVF_IPSEC=y -CONFIG_I40E=m -CONFIG_I40E_DCB=y -CONFIG_IAVF=m -CONFIG_I40EVF=m -CONFIG_ICE=m -CONFIG_ICE_SWITCHDEV=y -CONFIG_ICE_HWTS=y -CONFIG_FM10K=m -CONFIG_IGC=m -# CONFIG_JME is not set -CONFIG_NET_VENDOR_ADI=y -# CONFIG_ADIN1110 is not set -CONFIG_NET_VENDOR_LITEX=y -# CONFIG_NET_VENDOR_MARVELL is not set -CONFIG_NET_VENDOR_MELLANOX=y -CONFIG_MLX4_EN=m -CONFIG_MLX4_EN_DCB=y -CONFIG_MLX4_CORE=m -CONFIG_MLX4_DEBUG=y -# CONFIG_MLX4_CORE_GEN2 is not set -CONFIG_MLX5_CORE=m -CONFIG_MLX5_FPGA=y -CONFIG_MLX5_CORE_EN=y -CONFIG_MLX5_EN_ARFS=y -CONFIG_MLX5_EN_RXNFC=y -CONFIG_MLX5_MPFS=y -CONFIG_MLX5_ESWITCH=y -CONFIG_MLX5_BRIDGE=y -CONFIG_MLX5_CLS_ACT=y -CONFIG_MLX5_TC_CT=y -CONFIG_MLX5_TC_SAMPLE=y -CONFIG_MLX5_CORE_EN_DCB=y -# CONFIG_MLX5_CORE_IPOIB is not set -# CONFIG_MLX5_MACSEC is not set -# CONFIG_MLX5_EN_IPSEC is not set -# CONFIG_MLX5_EN_TLS is not set -CONFIG_MLX5_SW_STEERING=y -# CONFIG_MLX5_SF is not set -CONFIG_MLXSW_CORE=m -CONFIG_MLXSW_CORE_HWMON=y -CONFIG_MLXSW_CORE_THERMAL=y -CONFIG_MLXSW_PCI=m -CONFIG_MLXSW_I2C=m -CONFIG_MLXSW_SPECTRUM=m -CONFIG_MLXSW_SPECTRUM_DCB=y -CONFIG_MLXSW_MINIMAL=m -CONFIG_MLXFW=m -# CONFIG_NET_VENDOR_MICREL is not set -# CONFIG_NET_VENDOR_MICROCHIP is not set -# CONFIG_NET_VENDOR_MICROSEMI is not set -CONFIG_NET_VENDOR_MICROSOFT=y -# CONFIG_MICROSOFT_MANA is not set -CONFIG_NET_VENDOR_MYRI=y -CONFIG_MYRI10GE=m -CONFIG_MYRI10GE_DCA=y -# CONFIG_FEALNX is not set -# CONFIG_NET_VENDOR_NI is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_NETERION is not set -CONFIG_NET_VENDOR_NETRONOME=y -CONFIG_NFP=m -CONFIG_NFP_APP_FLOWER=y -CONFIG_NFP_APP_ABM_NIC=y -CONFIG_NFP_NET_IPSEC=y -# CONFIG_NFP_DEBUG is not set -# CONFIG_NET_VENDOR_NVIDIA is not set -CONFIG_NET_VENDOR_OKI=y -CONFIG_ETHOC=m -# CONFIG_NET_VENDOR_PACKET_ENGINES is not set -CONFIG_NET_VENDOR_PENSANDO=y -# CONFIG_IONIC is not set -CONFIG_NET_VENDOR_QLOGIC=y -CONFIG_QLA3XXX=m -# CONFIG_QLCNIC is not set -CONFIG_NETXEN_NIC=m -CONFIG_QED=m -CONFIG_QED_LL2=y -CONFIG_QED_SRIOV=y -CONFIG_QEDE=m -CONFIG_QED_RDMA=y -CONFIG_QED_ISCSI=y -CONFIG_QED_FCOE=y -CONFIG_QED_OOO=y -CONFIG_NET_VENDOR_BROCADE=y -# CONFIG_BNA is not set -# CONFIG_NET_VENDOR_QUALCOMM is not set -# CONFIG_NET_VENDOR_RDC is not set -CONFIG_NET_VENDOR_REALTEK=y -# CONFIG_ATP is not set -CONFIG_8139CP=m -CONFIG_8139TOO=m -# CONFIG_8139TOO_PIO is not set -# CONFIG_8139TOO_TUNE_TWISTER is not set -CONFIG_8139TOO_8129=y -# CONFIG_8139_OLD_RX_RESET is not set -CONFIG_R8169=m -# CONFIG_NET_VENDOR_RENESAS is not set -CONFIG_NET_VENDOR_ROCKER=y -CONFIG_ROCKER=m -# CONFIG_NET_VENDOR_SAMSUNG is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SILAN is not set -# CONFIG_NET_VENDOR_SIS is not set -CONFIG_NET_VENDOR_SOLARFLARE=y -CONFIG_SFC=m -CONFIG_SFC_MTD=y -CONFIG_SFC_MCDI_MON=y -CONFIG_SFC_SRIOV=y -CONFIG_SFC_MCDI_LOGGING=y -# CONFIG_SFC_FALCON is not set -# CONFIG_SFC_SIENA is not set -# CONFIG_NET_VENDOR_SMSC is not set -# CONFIG_NET_VENDOR_SOCIONEXT is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SUN is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set -# CONFIG_NET_VENDOR_TEHUTI is not set -# CONFIG_NET_VENDOR_TI is not set -CONFIG_NET_VENDOR_VERTEXCOM=y -# CONFIG_MSE102X is not set -# CONFIG_NET_VENDOR_VIA is not set -CONFIG_NET_VENDOR_WANGXUN=y -CONFIG_LIBWX=m -CONFIG_NGBE=m -CONFIG_TXGBE=m -# CONFIG_NET_VENDOR_WIZNET is not set -CONFIG_NET_VENDOR_XILINX=y -# CONFIG_XILINX_EMACLITE is not set -# CONFIG_XILINX_AXI_EMAC is not set -# CONFIG_XILINX_LL_TEMAC is not set -# CONFIG_FDDI is not set -# CONFIG_HIPPI is not set -# CONFIG_NET_SB1000 is not set -CONFIG_PHYLINK=m -CONFIG_PHYLIB=y -CONFIG_SWPHY=y -CONFIG_LED_TRIGGER_PHY=y -CONFIG_FIXED_PHY=y -CONFIG_SFP=m - -# -# MII PHY device drivers -# -CONFIG_AMD_PHY=m -# CONFIG_ADIN_PHY is not set -# CONFIG_ADIN1100_PHY is not set -CONFIG_AQUANTIA_PHY=m -CONFIG_AX88796B_PHY=m -CONFIG_BROADCOM_PHY=m -# CONFIG_BCM54140_PHY is not set -CONFIG_BCM7XXX_PHY=m -# CONFIG_BCM84881_PHY is not set -CONFIG_BCM87XX_PHY=m -CONFIG_BCM_NET_PHYLIB=m -CONFIG_BCM_NET_PHYPTP=m -CONFIG_CICADA_PHY=m -CONFIG_CORTINA_PHY=m -CONFIG_DAVICOM_PHY=m -CONFIG_ICPLUS_PHY=m -CONFIG_LXT_PHY=m -CONFIG_INTEL_XWAY_PHY=m -CONFIG_LSI_ET1011C_PHY=m -CONFIG_MARVELL_PHY=m -CONFIG_MARVELL_10G_PHY=m -# CONFIG_MARVELL_88Q2XXX_PHY is not set -# CONFIG_MARVELL_88X2222_PHY is not set -# CONFIG_MAXLINEAR_GPHY is not set -# CONFIG_MEDIATEK_GE_PHY is not set -CONFIG_MICREL_PHY=m -# CONFIG_MICROCHIP_T1S_PHY is not set -CONFIG_MICROCHIP_PHY=m -CONFIG_MICROCHIP_T1_PHY=m -CONFIG_MICROSEMI_PHY=m -# CONFIG_MOTORCOMM_PHY is not set -CONFIG_NATIONAL_PHY=m -# CONFIG_NXP_CBTX_PHY is not set -# CONFIG_NXP_C45_TJA11XX_PHY is not set -# CONFIG_NXP_TJA11XX_PHY is not set -# CONFIG_NCN26000_PHY is not set -CONFIG_QSEMI_PHY=m -CONFIG_REALTEK_PHY=m -CONFIG_RENESAS_PHY=m -CONFIG_ROCKCHIP_PHY=m -CONFIG_SMSC_PHY=m -CONFIG_STE10XP=m -CONFIG_TERANETICS_PHY=m -CONFIG_DP83822_PHY=m -CONFIG_DP83TC811_PHY=m -CONFIG_DP83848_PHY=m -CONFIG_DP83867_PHY=m -# CONFIG_DP83869_PHY is not set -# CONFIG_DP83TD510_PHY is not set -CONFIG_VITESSE_PHY=m -CONFIG_XILINX_GMII2RGMII=m -# CONFIG_MICREL_KS8995MA is not set -# CONFIG_PSE_CONTROLLER is not set -CONFIG_MDIO_DEVICE=y -CONFIG_MDIO_BUS=y -CONFIG_FWNODE_MDIO=y -CONFIG_ACPI_MDIO=y -CONFIG_MDIO_DEVRES=y -CONFIG_MDIO_BITBANG=m -CONFIG_MDIO_BCM_UNIMAC=m -CONFIG_MDIO_CAVIUM=m -# CONFIG_MDIO_GPIO is not set -CONFIG_MDIO_I2C=m -# CONFIG_MDIO_MVUSB is not set -CONFIG_MDIO_THUNDER=m - -# -# MDIO Multiplexers -# - -# -# PCS device drivers -# -CONFIG_PCS_XPCS=m -# end of PCS device drivers - -# CONFIG_PLIP is not set -CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m -CONFIG_PPP_FILTER=y -CONFIG_PPP_MPPE=m -CONFIG_PPP_MULTILINK=y -CONFIG_PPPOATM=m -CONFIG_PPPOE=m -# CONFIG_PPPOE_HASH_BITS_1 is not set -# CONFIG_PPPOE_HASH_BITS_2 is not set -CONFIG_PPPOE_HASH_BITS_4=y -# CONFIG_PPPOE_HASH_BITS_8 is not set -CONFIG_PPPOE_HASH_BITS=4 -CONFIG_PPTP=m -CONFIG_PPPOL2TP=m -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m -CONFIG_SLIP=m -CONFIG_SLHC=m -CONFIG_SLIP_COMPRESSED=y -CONFIG_SLIP_SMART=y -# CONFIG_SLIP_MODE_SLIP6 is not set -CONFIG_USB_NET_DRIVERS=y -CONFIG_USB_CATC=m -CONFIG_USB_KAWETH=m -CONFIG_USB_PEGASUS=m -CONFIG_USB_RTL8150=m -CONFIG_USB_RTL8152=m -CONFIG_USB_LAN78XX=m -CONFIG_USB_USBNET=m -CONFIG_USB_NET_AX8817X=m -CONFIG_USB_NET_AX88179_178A=m -CONFIG_USB_NET_CDCETHER=m -CONFIG_USB_NET_CDC_EEM=m -CONFIG_USB_NET_CDC_NCM=m -CONFIG_USB_NET_HUAWEI_CDC_NCM=m -CONFIG_USB_NET_CDC_MBIM=m -CONFIG_USB_NET_DM9601=m -# CONFIG_USB_NET_SR9700 is not set -# CONFIG_USB_NET_SR9800 is not set -CONFIG_USB_NET_SMSC75XX=m -CONFIG_USB_NET_SMSC95XX=m -CONFIG_USB_NET_GL620A=m -CONFIG_USB_NET_NET1080=m -CONFIG_USB_NET_PLUSB=m -CONFIG_USB_NET_MCS7830=m -CONFIG_USB_NET_RNDIS_HOST=m -CONFIG_USB_NET_CDC_SUBSET_ENABLE=m -CONFIG_USB_NET_CDC_SUBSET=m -CONFIG_USB_ALI_M5632=y -CONFIG_USB_AN2720=y -CONFIG_USB_BELKIN=y -CONFIG_USB_ARMLINUX=y -CONFIG_USB_EPSON2888=y -CONFIG_USB_KC2190=y -CONFIG_USB_NET_ZAURUS=m -CONFIG_USB_NET_CX82310_ETH=m -CONFIG_USB_NET_KALMIA=m -CONFIG_USB_NET_QMI_WWAN=m -CONFIG_USB_HSO=m -CONFIG_USB_NET_INT51X1=m -CONFIG_USB_IPHETH=m -CONFIG_USB_SIERRA_NET=m -CONFIG_USB_VL600=m -CONFIG_USB_NET_CH9200=m -# CONFIG_USB_NET_AQC111 is not set -CONFIG_USB_RTL8153_ECM=m -CONFIG_WLAN=y -# CONFIG_WLAN_VENDOR_ADMTEK is not set -CONFIG_ATH_COMMON=m -CONFIG_WLAN_VENDOR_ATH=y -# CONFIG_ATH_DEBUG is not set -# CONFIG_ATH5K is not set -# CONFIG_ATH5K_PCI is not set -CONFIG_ATH9K_HW=m -CONFIG_ATH9K_COMMON=m -CONFIG_ATH9K_COMMON_DEBUG=y -CONFIG_ATH9K_BTCOEX_SUPPORT=y -CONFIG_ATH9K=m -CONFIG_ATH9K_PCI=y -CONFIG_ATH9K_AHB=y -CONFIG_ATH9K_DEBUGFS=y -# CONFIG_ATH9K_STATION_STATISTICS is not set -# CONFIG_ATH9K_DYNACK is not set -CONFIG_ATH9K_WOW=y -CONFIG_ATH9K_RFKILL=y -# CONFIG_ATH9K_CHANNEL_CONTEXT is not set -CONFIG_ATH9K_PCOEM=y -# CONFIG_ATH9K_PCI_NO_EEPROM is not set -CONFIG_ATH9K_HTC=m -# CONFIG_ATH9K_HTC_DEBUGFS is not set -# CONFIG_ATH9K_HWRNG is not set -# CONFIG_ATH9K_COMMON_SPECTRAL is not set -# CONFIG_CARL9170 is not set -# CONFIG_ATH6KL is not set -# CONFIG_AR5523 is not set -# CONFIG_WIL6210 is not set -CONFIG_ATH10K=m -CONFIG_ATH10K_CE=y -CONFIG_ATH10K_PCI=m -# CONFIG_ATH10K_SDIO is not set -# CONFIG_ATH10K_USB is not set -# CONFIG_ATH10K_DEBUG is not set -# CONFIG_ATH10K_DEBUGFS is not set -# CONFIG_ATH10K_TRACING is not set -# CONFIG_WCN36XX is not set -# CONFIG_ATH11K is not set -# CONFIG_ATH12K is not set -# CONFIG_WLAN_VENDOR_ATMEL is not set -CONFIG_WLAN_VENDOR_BROADCOM=y -# CONFIG_B43 is not set -# CONFIG_B43LEGACY is not set -CONFIG_BRCMUTIL=m -CONFIG_BRCMSMAC=m -CONFIG_BRCMSMAC_LEDS=y -CONFIG_BRCMFMAC=m -CONFIG_BRCMFMAC_PROTO_BCDC=y -CONFIG_BRCMFMAC_PROTO_MSGBUF=y -CONFIG_BRCMFMAC_SDIO=y -CONFIG_BRCMFMAC_USB=y -CONFIG_BRCMFMAC_PCIE=y -# CONFIG_BRCM_TRACING is not set -# CONFIG_BRCMDBG is not set -# CONFIG_WLAN_VENDOR_CISCO is not set -CONFIG_WLAN_VENDOR_INTEL=y -# CONFIG_IPW2100 is not set -# CONFIG_IPW2200 is not set -# CONFIG_IWL4965 is not set -# CONFIG_IWL3945 is not set -CONFIG_IWLWIFI=m -CONFIG_IWLWIFI_LEDS=y -CONFIG_IWLDVM=m -CONFIG_IWLMVM=m -CONFIG_IWLWIFI_OPMODE_MODULAR=y - -# -# Debugging Options -# -# CONFIG_IWLWIFI_DEBUG is not set -CONFIG_IWLWIFI_DEBUGFS=y -# CONFIG_IWLWIFI_DEVICE_TRACING is not set -# end of Debugging Options - -# CONFIG_WLAN_VENDOR_INTERSIL is not set -CONFIG_WLAN_VENDOR_MARVELL=y -# CONFIG_LIBERTAS is not set -# CONFIG_LIBERTAS_THINFIRM is not set -CONFIG_MWIFIEX=m -CONFIG_MWIFIEX_SDIO=m -CONFIG_MWIFIEX_PCIE=m -CONFIG_MWIFIEX_USB=m -# CONFIG_MWL8K is not set -CONFIG_WLAN_VENDOR_MEDIATEK=y -CONFIG_MT7601U=m -CONFIG_MT76_CORE=m -CONFIG_MT76_LEDS=y -CONFIG_MT76_USB=m -CONFIG_MT76x02_LIB=m -CONFIG_MT76x02_USB=m -CONFIG_MT76x0_COMMON=m -CONFIG_MT76x0U=m -# CONFIG_MT76x0E is not set -CONFIG_MT76x2_COMMON=m -# CONFIG_MT76x2E is not set -CONFIG_MT76x2U=m -# CONFIG_MT7603E is not set -# CONFIG_MT7615E is not set -# CONFIG_MT7663U is not set -# CONFIG_MT7663S is not set -# CONFIG_MT7915E is not set -# CONFIG_MT7921E is not set -# CONFIG_MT7921S is not set -# CONFIG_MT7921U is not set -# CONFIG_MT7996E is not set -CONFIG_WLAN_VENDOR_MICROCHIP=y -# CONFIG_WILC1000_SDIO is not set -# CONFIG_WILC1000_SPI is not set -CONFIG_WLAN_VENDOR_PURELIFI=y -# CONFIG_PLFXLC is not set -CONFIG_WLAN_VENDOR_RALINK=y -CONFIG_RT2X00=m -# CONFIG_RT2400PCI is not set -# CONFIG_RT2500PCI is not set -# CONFIG_RT61PCI is not set -CONFIG_RT2800PCI=m -CONFIG_RT2800PCI_RT33XX=y -CONFIG_RT2800PCI_RT35XX=y -CONFIG_RT2800PCI_RT53XX=y -CONFIG_RT2800PCI_RT3290=y -# CONFIG_RT2500USB is not set -# CONFIG_RT73USB is not set -CONFIG_RT2800USB=m -CONFIG_RT2800USB_RT33XX=y -CONFIG_RT2800USB_RT35XX=y -CONFIG_RT2800USB_RT3573=y -CONFIG_RT2800USB_RT53XX=y -CONFIG_RT2800USB_RT55XX=y -CONFIG_RT2800USB_UNKNOWN=y -CONFIG_RT2800_LIB=m -CONFIG_RT2800_LIB_MMIO=m -CONFIG_RT2X00_LIB_MMIO=m -CONFIG_RT2X00_LIB_PCI=m -CONFIG_RT2X00_LIB_USB=m -CONFIG_RT2X00_LIB=m -CONFIG_RT2X00_LIB_FIRMWARE=y -CONFIG_RT2X00_LIB_CRYPTO=y -CONFIG_RT2X00_LIB_LEDS=y -CONFIG_RT2X00_LIB_DEBUGFS=y -# CONFIG_RT2X00_DEBUG is not set -CONFIG_WLAN_VENDOR_REALTEK=y -# CONFIG_RTL8180 is not set -# CONFIG_RTL8187 is not set -CONFIG_RTL_CARDS=m -CONFIG_RTL8192CE=m -CONFIG_RTL8192SE=m -CONFIG_RTL8192DE=m -CONFIG_RTL8723AE=m -CONFIG_RTL8723BE=m -CONFIG_RTL8188EE=m -CONFIG_RTL8192EE=m -CONFIG_RTL8821AE=m -CONFIG_RTL8192CU=m -CONFIG_RTLWIFI=m -CONFIG_RTLWIFI_PCI=m -CONFIG_RTLWIFI_USB=m -# CONFIG_RTLWIFI_DEBUG is not set -CONFIG_RTL8192C_COMMON=m -CONFIG_RTL8723_COMMON=m -CONFIG_RTLBTCOEXIST=m -CONFIG_RTL8XXXU=m -# CONFIG_RTL8XXXU_UNTESTED is not set -CONFIG_RTW88=m -CONFIG_RTW88_CORE=m -CONFIG_RTW88_PCI=m -CONFIG_RTW88_8822B=m -CONFIG_RTW88_8822C=m -CONFIG_RTW88_8822BE=m -# CONFIG_RTW88_8822BS is not set -# CONFIG_RTW88_8822BU is not set -CONFIG_RTW88_8822CE=m -# CONFIG_RTW88_8822CS is not set -# CONFIG_RTW88_8822CU is not set -# CONFIG_RTW88_8723DE is not set -# CONFIG_RTW88_8723DS is not set -# CONFIG_RTW88_8723DU is not set -# CONFIG_RTW88_8821CE is not set -# CONFIG_RTW88_8821CS is not set -# CONFIG_RTW88_8821CU is not set -# CONFIG_RTW88_DEBUG is not set -# CONFIG_RTW88_DEBUGFS is not set -# CONFIG_RTW89 is not set -# CONFIG_WLAN_VENDOR_RSI is not set -CONFIG_WLAN_VENDOR_SILABS=y -# CONFIG_WFX is not set -# CONFIG_WLAN_VENDOR_ST is not set -# CONFIG_WLAN_VENDOR_TI is not set -# CONFIG_WLAN_VENDOR_ZYDAS is not set -CONFIG_WLAN_VENDOR_QUANTENNA=y -# CONFIG_QTNFMAC_PCIE is not set -# CONFIG_USB_NET_RNDIS_WLAN is not set -CONFIG_MAC80211_HWSIM=m -# CONFIG_VIRT_WIFI is not set -CONFIG_WAN=y -CONFIG_HDLC=m -CONFIG_HDLC_RAW=m -# CONFIG_HDLC_RAW_ETH is not set -CONFIG_HDLC_CISCO=m -CONFIG_HDLC_FR=m -CONFIG_HDLC_PPP=m - -# -# X.25/LAPB support is disabled -# -# CONFIG_PCI200SYN is not set -# CONFIG_WANXL is not set -# CONFIG_PC300TOO is not set -# CONFIG_FARSYNC is not set -CONFIG_IEEE802154_DRIVERS=m -CONFIG_IEEE802154_FAKELB=m -# CONFIG_IEEE802154_AT86RF230 is not set -# CONFIG_IEEE802154_MRF24J40 is not set -# CONFIG_IEEE802154_CC2520 is not set -# CONFIG_IEEE802154_ATUSB is not set -# CONFIG_IEEE802154_ADF7242 is not set -# CONFIG_IEEE802154_CA8210 is not set -# CONFIG_IEEE802154_MCR20A is not set -# CONFIG_IEEE802154_HWSIM is not set - -# -# Wireless WAN -# -# CONFIG_WWAN is not set -# end of Wireless WAN - -CONFIG_XEN_NETDEV_FRONTEND=m -CONFIG_VMXNET3=m -CONFIG_FUJITSU_ES=m -CONFIG_HYPERV_NET=m -CONFIG_NETDEVSIM=m -CONFIG_NET_FAILOVER=m -CONFIG_ISDN=y -CONFIG_ISDN_CAPI=y -CONFIG_CAPI_TRACE=y -CONFIG_ISDN_CAPI_MIDDLEWARE=y -CONFIG_MISDN=m -CONFIG_MISDN_DSP=m -CONFIG_MISDN_L1OIP=m - -# -# mISDN hardware drivers -# -CONFIG_MISDN_HFCPCI=m -CONFIG_MISDN_HFCMULTI=m -CONFIG_MISDN_HFCUSB=m -CONFIG_MISDN_AVMFRITZ=m -CONFIG_MISDN_SPEEDFAX=m -CONFIG_MISDN_INFINEON=m -CONFIG_MISDN_W6692=m -CONFIG_MISDN_NETJET=m -CONFIG_MISDN_HDLC=m -CONFIG_MISDN_IPAC=m -CONFIG_MISDN_ISAR=m - -# -# Input device support -# -CONFIG_INPUT=y -CONFIG_INPUT_LEDS=y -CONFIG_INPUT_FF_MEMLESS=m -CONFIG_INPUT_SPARSEKMAP=m -# CONFIG_INPUT_MATRIXKMAP is not set -CONFIG_INPUT_VIVALDIFMAP=y - -# -# Userland interfaces -# -CONFIG_INPUT_MOUSEDEV=m -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set -CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 -CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 -CONFIG_INPUT_JOYDEV=m -CONFIG_INPUT_EVDEV=y -# CONFIG_INPUT_EVBUG is not set - -# -# Input Device Drivers -# -CONFIG_INPUT_KEYBOARD=y -# CONFIG_KEYBOARD_ADC is not set -# CONFIG_KEYBOARD_ADP5588 is not set -# CONFIG_KEYBOARD_ADP5589 is not set -# CONFIG_KEYBOARD_APPLESPI is not set -CONFIG_KEYBOARD_ATKBD=y -# CONFIG_KEYBOARD_QT1050 is not set -# CONFIG_KEYBOARD_QT1070 is not set -# CONFIG_KEYBOARD_QT2160 is not set -# CONFIG_KEYBOARD_DLINK_DIR685 is not set -# CONFIG_KEYBOARD_LKKBD is not set -# CONFIG_KEYBOARD_GPIO is not set -# CONFIG_KEYBOARD_GPIO_POLLED is not set -# CONFIG_KEYBOARD_TCA6416 is not set -# CONFIG_KEYBOARD_TCA8418 is not set -# CONFIG_KEYBOARD_MATRIX is not set -# CONFIG_KEYBOARD_LM8323 is not set -# CONFIG_KEYBOARD_LM8333 is not set -# CONFIG_KEYBOARD_MAX7359 is not set -# CONFIG_KEYBOARD_MCS is not set -# CONFIG_KEYBOARD_MPR121 is not set -# CONFIG_KEYBOARD_NEWTON is not set -# CONFIG_KEYBOARD_OPENCORES is not set -# CONFIG_KEYBOARD_SAMSUNG is not set -# CONFIG_KEYBOARD_STOWAWAY is not set -# CONFIG_KEYBOARD_SUNKBD is not set -# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set -# CONFIG_KEYBOARD_XTKBD is not set -# CONFIG_KEYBOARD_CYPRESS_SF is not set -CONFIG_INPUT_MOUSE=y -CONFIG_MOUSE_PS2=m -CONFIG_MOUSE_PS2_ALPS=y -CONFIG_MOUSE_PS2_BYD=y -CONFIG_MOUSE_PS2_LOGIPS2PP=y -CONFIG_MOUSE_PS2_SYNAPTICS=y -CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y -CONFIG_MOUSE_PS2_CYPRESS=y -CONFIG_MOUSE_PS2_LIFEBOOK=y -CONFIG_MOUSE_PS2_TRACKPOINT=y -CONFIG_MOUSE_PS2_ELANTECH=y -CONFIG_MOUSE_PS2_ELANTECH_SMBUS=y -CONFIG_MOUSE_PS2_SENTELIC=y -# CONFIG_MOUSE_PS2_TOUCHKIT is not set -CONFIG_MOUSE_PS2_FOCALTECH=y -CONFIG_MOUSE_PS2_VMMOUSE=y -CONFIG_MOUSE_PS2_SMBUS=y -CONFIG_MOUSE_SERIAL=m -CONFIG_MOUSE_APPLETOUCH=m -CONFIG_MOUSE_BCM5974=m -CONFIG_MOUSE_CYAPA=m -CONFIG_MOUSE_ELAN_I2C=m -CONFIG_MOUSE_ELAN_I2C_I2C=y -# CONFIG_MOUSE_ELAN_I2C_SMBUS is not set -CONFIG_MOUSE_VSXXXAA=m -# CONFIG_MOUSE_GPIO is not set -CONFIG_MOUSE_SYNAPTICS_I2C=m -CONFIG_MOUSE_SYNAPTICS_USB=m -# CONFIG_INPUT_JOYSTICK is not set -CONFIG_INPUT_TABLET=y -CONFIG_TABLET_USB_ACECAD=m -CONFIG_TABLET_USB_AIPTEK=m -# CONFIG_TABLET_USB_HANWANG is not set -CONFIG_TABLET_USB_KBTAB=m -# CONFIG_TABLET_USB_PEGASUS is not set -CONFIG_TABLET_SERIAL_WACOM4=m -CONFIG_INPUT_TOUCHSCREEN=y -# CONFIG_TOUCHSCREEN_ADS7846 is not set -# CONFIG_TOUCHSCREEN_AD7877 is not set -# CONFIG_TOUCHSCREEN_AD7879 is not set -# CONFIG_TOUCHSCREEN_ADC is not set -# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set -# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set -# CONFIG_TOUCHSCREEN_BU21013 is not set -# CONFIG_TOUCHSCREEN_BU21029 is not set -# CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 is not set -# CONFIG_TOUCHSCREEN_CY8CTMA140 is not set -# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set -# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set -# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set -# CONFIG_TOUCHSCREEN_CYTTSP5 is not set -# CONFIG_TOUCHSCREEN_DYNAPRO is not set -# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set -# CONFIG_TOUCHSCREEN_EETI is not set -# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set -# CONFIG_TOUCHSCREEN_EXC3000 is not set -# CONFIG_TOUCHSCREEN_FUJITSU is not set -# CONFIG_TOUCHSCREEN_GOODIX is not set -# CONFIG_TOUCHSCREEN_HIDEEP is not set -# CONFIG_TOUCHSCREEN_HYCON_HY46XX is not set -# CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX is not set -# CONFIG_TOUCHSCREEN_ILI210X is not set -# CONFIG_TOUCHSCREEN_ILITEK is not set -# CONFIG_TOUCHSCREEN_S6SY761 is not set -# CONFIG_TOUCHSCREEN_GUNZE is not set -# CONFIG_TOUCHSCREEN_EKTF2127 is not set -# CONFIG_TOUCHSCREEN_ELAN is not set -CONFIG_TOUCHSCREEN_ELO=m -CONFIG_TOUCHSCREEN_WACOM_W8001=m -CONFIG_TOUCHSCREEN_WACOM_I2C=m -# CONFIG_TOUCHSCREEN_MAX11801 is not set -# CONFIG_TOUCHSCREEN_MCS5000 is not set -# CONFIG_TOUCHSCREEN_MMS114 is not set -# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set -# CONFIG_TOUCHSCREEN_MSG2638 is not set -# CONFIG_TOUCHSCREEN_MTOUCH is not set -# CONFIG_TOUCHSCREEN_NOVATEK_NVT_TS is not set -# CONFIG_TOUCHSCREEN_IMAGIS is not set -# CONFIG_TOUCHSCREEN_INEXIO is not set -# CONFIG_TOUCHSCREEN_PENMOUNT is not set -# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set -# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set -# CONFIG_TOUCHSCREEN_TOUCHWIN is not set -# CONFIG_TOUCHSCREEN_PIXCIR is not set -# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set -# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set -# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set -# CONFIG_TOUCHSCREEN_TSC_SERIO is not set -# CONFIG_TOUCHSCREEN_TSC2004 is not set -# CONFIG_TOUCHSCREEN_TSC2005 is not set -# CONFIG_TOUCHSCREEN_TSC2007 is not set -# CONFIG_TOUCHSCREEN_RM_TS is not set -# CONFIG_TOUCHSCREEN_SILEAD is not set -# CONFIG_TOUCHSCREEN_SIS_I2C is not set -# CONFIG_TOUCHSCREEN_ST1232 is not set -# CONFIG_TOUCHSCREEN_STMFTS is not set -# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set -# CONFIG_TOUCHSCREEN_SX8654 is not set -# CONFIG_TOUCHSCREEN_TPS6507X is not set -# CONFIG_TOUCHSCREEN_ZET6223 is not set -# CONFIG_TOUCHSCREEN_ZFORCE is not set -# CONFIG_TOUCHSCREEN_COLIBRI_VF50 is not set -# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set -# CONFIG_TOUCHSCREEN_IQS5XX is not set -# CONFIG_TOUCHSCREEN_IQS7211 is not set -# CONFIG_TOUCHSCREEN_ZINITIX is not set -# CONFIG_TOUCHSCREEN_HIMAX_HX83112B is not set -CONFIG_INPUT_MISC=y -# CONFIG_INPUT_AD714X is not set -# CONFIG_INPUT_BMA150 is not set -# CONFIG_INPUT_E3X0_BUTTON is not set -CONFIG_INPUT_PCSPKR=m -# CONFIG_INPUT_MMA8450 is not set -CONFIG_INPUT_APANEL=m -# CONFIG_INPUT_GPIO_BEEPER is not set -# CONFIG_INPUT_GPIO_DECODER is not set -# CONFIG_INPUT_GPIO_VIBRA is not set -CONFIG_INPUT_ATLAS_BTNS=m -CONFIG_INPUT_ATI_REMOTE2=m -CONFIG_INPUT_KEYSPAN_REMOTE=m -# CONFIG_INPUT_KXTJ9 is not set -CONFIG_INPUT_POWERMATE=m -CONFIG_INPUT_YEALINK=m -CONFIG_INPUT_CM109=m -CONFIG_INPUT_UINPUT=m -# CONFIG_INPUT_PCF8574 is not set -# CONFIG_INPUT_PWM_BEEPER is not set -# CONFIG_INPUT_PWM_VIBRA is not set -CONFIG_INPUT_GPIO_ROTARY_ENCODER=m -# CONFIG_INPUT_DA7280_HAPTICS is not set -# CONFIG_INPUT_ADXL34X is not set -# CONFIG_INPUT_IMS_PCU is not set -# CONFIG_INPUT_IQS269A is not set -# CONFIG_INPUT_IQS626A is not set -# CONFIG_INPUT_IQS7222 is not set -# CONFIG_INPUT_CMA3000 is not set -CONFIG_INPUT_XEN_KBDDEV_FRONTEND=m -# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set -# CONFIG_INPUT_DRV260X_HAPTICS is not set -# CONFIG_INPUT_DRV2665_HAPTICS is not set -# CONFIG_INPUT_DRV2667_HAPTICS is not set -CONFIG_RMI4_CORE=m -CONFIG_RMI4_I2C=m -# CONFIG_RMI4_SPI is not set -CONFIG_RMI4_SMB=m -CONFIG_RMI4_F03=y -CONFIG_RMI4_F03_SERIO=m -CONFIG_RMI4_2D_SENSOR=y -CONFIG_RMI4_F11=y -CONFIG_RMI4_F12=y -CONFIG_RMI4_F30=y -# CONFIG_RMI4_F34 is not set -# CONFIG_RMI4_F3A is not set -CONFIG_RMI4_F55=y - -# -# Hardware I/O ports -# -CONFIG_SERIO=y -CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y -CONFIG_SERIO_I8042=y -CONFIG_SERIO_SERPORT=y -# CONFIG_SERIO_CT82C710 is not set -# CONFIG_SERIO_PARKBD is not set -# CONFIG_SERIO_PCIPS2 is not set -CONFIG_SERIO_LIBPS2=y -CONFIG_SERIO_RAW=m -CONFIG_SERIO_ALTERA_PS2=m -# CONFIG_SERIO_PS2MULT is not set -CONFIG_SERIO_ARC_PS2=m -CONFIG_HYPERV_KEYBOARD=m -# CONFIG_SERIO_GPIO_PS2 is not set -# CONFIG_USERIO is not set -# CONFIG_GAMEPORT is not set -# end of Hardware I/O ports -# end of Input device support - -# -# Character devices -# -CONFIG_TTY=y -CONFIG_VT=y -CONFIG_CONSOLE_TRANSLATIONS=y -CONFIG_VT_CONSOLE=y -CONFIG_VT_CONSOLE_SLEEP=y -CONFIG_HW_CONSOLE=y -CONFIG_VT_HW_CONSOLE_BINDING=y -CONFIG_UNIX98_PTYS=y -# CONFIG_LEGACY_PTYS is not set -CONFIG_LEGACY_TIOCSTI=y -CONFIG_LDISC_AUTOLOAD=y - -# -# Serial drivers -# -CONFIG_SERIAL_EARLYCON=y -CONFIG_SERIAL_8250=y -# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set -CONFIG_SERIAL_8250_PNP=y -# CONFIG_SERIAL_8250_16550A_VARIANTS is not set -# CONFIG_SERIAL_8250_FINTEK is not set -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_DMA=y -CONFIG_SERIAL_8250_PCILIB=y -CONFIG_SERIAL_8250_PCI=y -CONFIG_SERIAL_8250_EXAR=y -CONFIG_SERIAL_8250_NR_UARTS=32 -CONFIG_SERIAL_8250_RUNTIME_UARTS=4 -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_MANY_PORTS=y -# CONFIG_SERIAL_8250_PCI1XXXX is not set -CONFIG_SERIAL_8250_SHARE_IRQ=y -# CONFIG_SERIAL_8250_DETECT_IRQ is not set -CONFIG_SERIAL_8250_RSA=y -CONFIG_SERIAL_8250_DWLIB=y -CONFIG_SERIAL_8250_DW=y -# CONFIG_SERIAL_8250_RT288X is not set -CONFIG_SERIAL_8250_LPSS=y -CONFIG_SERIAL_8250_MID=y -CONFIG_SERIAL_8250_PERICOM=y - -# -# Non-8250 serial port support -# -# CONFIG_SERIAL_KGDB_NMI is not set -# CONFIG_SERIAL_MAX3100 is not set -# CONFIG_SERIAL_MAX310X is not set -# CONFIG_SERIAL_UARTLITE is not set -CONFIG_SERIAL_CORE=y -CONFIG_SERIAL_CORE_CONSOLE=y -CONFIG_CONSOLE_POLL=y -CONFIG_SERIAL_JSM=m -# CONFIG_SERIAL_LANTIQ is not set -# CONFIG_SERIAL_SCCNXP is not set -# CONFIG_SERIAL_SC16IS7XX is not set -# CONFIG_SERIAL_ALTERA_JTAGUART is not set -# CONFIG_SERIAL_ALTERA_UART is not set -CONFIG_SERIAL_ARC=m -CONFIG_SERIAL_ARC_NR_PORTS=1 -# CONFIG_SERIAL_RP2 is not set -# CONFIG_SERIAL_FSL_LPUART is not set -# CONFIG_SERIAL_FSL_LINFLEXUART is not set -# CONFIG_SERIAL_SPRD is not set -# end of Serial drivers - -CONFIG_SERIAL_MCTRL_GPIO=y -CONFIG_SERIAL_NONSTANDARD=y -# CONFIG_MOXA_INTELLIO is not set -# CONFIG_MOXA_SMARTIO is not set -CONFIG_N_HDLC=m -CONFIG_N_GSM=m -CONFIG_NOZOMI=m -# CONFIG_NULL_TTY is not set -CONFIG_HVC_DRIVER=y -CONFIG_HVC_IRQ=y -CONFIG_HVC_XEN=y -CONFIG_HVC_XEN_FRONTEND=y -# CONFIG_SERIAL_DEV_BUS is not set -CONFIG_PRINTER=m -# CONFIG_LP_CONSOLE is not set -CONFIG_PPDEV=m -CONFIG_VIRTIO_CONSOLE=m -CONFIG_IPMI_HANDLER=m -CONFIG_IPMI_DMI_DECODE=y -CONFIG_IPMI_PLAT_DATA=y -CONFIG_IPMI_PANIC_EVENT=y -CONFIG_IPMI_PANIC_STRING=y -CONFIG_IPMI_DEVICE_INTERFACE=m -CONFIG_IPMI_SI=m -CONFIG_IPMI_SSIF=m -CONFIG_IPMI_WATCHDOG=m -CONFIG_IPMI_POWEROFF=m -CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_TIMERIOMEM=m -CONFIG_HW_RANDOM_INTEL=m -CONFIG_HW_RANDOM_AMD=m -# CONFIG_HW_RANDOM_BA431 is not set -CONFIG_HW_RANDOM_VIA=m -CONFIG_HW_RANDOM_ZHAOXIN=m -CONFIG_HW_RANDOM_VIRTIO=y -# CONFIG_HW_RANDOM_XIPHERA is not set -# CONFIG_APPLICOM is not set -# CONFIG_MWAVE is not set -CONFIG_DEVMEM=y -CONFIG_NVRAM=y -CONFIG_DEVPORT=y -CONFIG_HPET=y -CONFIG_HPET_MMAP=y -# CONFIG_HPET_MMAP_DEFAULT is not set -CONFIG_HANGCHECK_TIMER=m -CONFIG_UV_MMTIMER=m -CONFIG_TCG_TPM=y -CONFIG_HW_RANDOM_TPM=y -CONFIG_TCG_TIS_CORE=y -CONFIG_TCG_TIS=y -# CONFIG_TCG_TIS_SPI is not set -# CONFIG_TCG_TIS_I2C is not set -# CONFIG_TCG_TIS_I2C_CR50 is not set -CONFIG_TCG_TIS_I2C_ATMEL=m -CONFIG_TCG_TIS_I2C_INFINEON=m -CONFIG_TCG_TIS_I2C_NUVOTON=m -CONFIG_TCG_NSC=m -CONFIG_TCG_ATMEL=m -CONFIG_TCG_INFINEON=m -# CONFIG_TCG_XEN is not set -CONFIG_TCG_CRB=y -# CONFIG_TCG_VTPM_PROXY is not set -CONFIG_TCG_HYGON=m -CONFIG_TCM_HYGON=m -CONFIG_TCG_TIS_ST33ZP24=m -CONFIG_TCG_TIS_ST33ZP24_I2C=m -# CONFIG_TCG_TIS_ST33ZP24_SPI is not set -CONFIG_TELCLOCK=m -# CONFIG_XILLYBUS is not set -# CONFIG_XILLYUSB is not set -# end of Character devices - -# -# I2C support -# -CONFIG_I2C=y -CONFIG_ACPI_I2C_OPREGION=y -CONFIG_I2C_BOARDINFO=y -CONFIG_I2C_COMPAT=y -CONFIG_I2C_CHARDEV=m -CONFIG_I2C_MUX=m - -# -# Multiplexer I2C Chip support -# -# CONFIG_I2C_MUX_GPIO is not set -# CONFIG_I2C_MUX_LTC4306 is not set -# CONFIG_I2C_MUX_PCA9541 is not set -# CONFIG_I2C_MUX_PCA954x is not set -# CONFIG_I2C_MUX_REG is not set -CONFIG_I2C_MUX_MLXCPLD=m -# end of Multiplexer I2C Chip support - -CONFIG_I2C_HELPER_AUTO=y -CONFIG_I2C_SMBUS=m -CONFIG_I2C_ALGOBIT=m -CONFIG_I2C_ALGOPCA=m - -# -# I2C Hardware Bus support -# - -# -# PC SMBus host controller drivers -# -# CONFIG_I2C_ALI1535 is not set -# CONFIG_I2C_ALI1563 is not set -# CONFIG_I2C_ALI15X3 is not set -CONFIG_I2C_AMD756=m -CONFIG_I2C_AMD756_S4882=m -CONFIG_I2C_AMD8111=m -# CONFIG_I2C_AMD_MP2 is not set -CONFIG_I2C_I801=m -CONFIG_I2C_ISCH=m -CONFIG_I2C_ISMT=m -CONFIG_I2C_PIIX4=m -CONFIG_I2C_NFORCE2=m -CONFIG_I2C_NFORCE2_S4985=m -# CONFIG_I2C_NVIDIA_GPU is not set -# CONFIG_I2C_SIS5595 is not set -# CONFIG_I2C_SIS630 is not set -CONFIG_I2C_SIS96X=m -CONFIG_I2C_VIA=m -CONFIG_I2C_VIAPRO=m -CONFIG_I2C_ZHAOXIN=m - -# -# ACPI drivers -# -CONFIG_I2C_SCMI=m -CONFIG_I2C_ZHAOXIN_SMBUS=m - -# -# I2C system bus drivers (mostly embedded / system-on-chip) -# -# CONFIG_I2C_CBUS_GPIO is not set -CONFIG_I2C_DESIGNWARE_CORE=m -# CONFIG_I2C_DESIGNWARE_SLAVE is not set -CONFIG_I2C_DESIGNWARE_PLATFORM=m -# CONFIG_I2C_DESIGNWARE_AMDPSP is not set -CONFIG_I2C_DESIGNWARE_BAYTRAIL=y -# CONFIG_I2C_DESIGNWARE_PCI is not set -# CONFIG_I2C_EMEV2 is not set -# CONFIG_I2C_GPIO is not set -# CONFIG_I2C_OCORES is not set -CONFIG_I2C_PCA_PLATFORM=m -CONFIG_I2C_SIMTEC=m -# CONFIG_I2C_XILINX is not set - -# -# External I2C/SMBus adapter drivers -# -CONFIG_I2C_DIOLAN_U2C=m -# CONFIG_I2C_CP2615 is not set -CONFIG_I2C_PARPORT=m -# CONFIG_I2C_PCI1XXXX is not set -# CONFIG_I2C_ROBOTFUZZ_OSIF is not set -# CONFIG_I2C_TAOS_EVM is not set -CONFIG_I2C_TINY_USB=m -CONFIG_I2C_VIPERBOARD=m - -# -# Other I2C/SMBus bus drivers -# -CONFIG_I2C_MLXCPLD=m -# CONFIG_I2C_VIRTIO is not set -# end of I2C Hardware Bus support - -CONFIG_I2C_STUB=m -# CONFIG_I2C_SLAVE is not set -# CONFIG_I2C_DEBUG_CORE is not set -# CONFIG_I2C_DEBUG_ALGO is not set -# CONFIG_I2C_DEBUG_BUS is not set -# end of I2C support - -# CONFIG_I3C is not set -CONFIG_SPI=y -# CONFIG_SPI_DEBUG is not set -CONFIG_SPI_MASTER=y -# CONFIG_SPI_MEM is not set - -# -# SPI Master Controller Drivers -# -# CONFIG_SPI_ALTERA is not set -# CONFIG_SPI_AXI_SPI_ENGINE is not set -# CONFIG_SPI_BITBANG is not set -# CONFIG_SPI_BUTTERFLY is not set -# CONFIG_SPI_CADENCE is not set -# CONFIG_SPI_DESIGNWARE is not set -# CONFIG_SPI_GPIO is not set -# CONFIG_SPI_LM70_LLP is not set -# CONFIG_SPI_MICROCHIP_CORE is not set -# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set -# CONFIG_SPI_LANTIQ_SSC is not set -# CONFIG_SPI_OC_TINY is not set -# CONFIG_SPI_PCI1XXXX is not set -# CONFIG_SPI_PXA2XX is not set -# CONFIG_SPI_SC18IS602 is not set -# CONFIG_SPI_SIFIVE is not set -# CONFIG_SPI_MXIC is not set -# CONFIG_SPI_XCOMM is not set -# CONFIG_SPI_XILINX is not set -# CONFIG_SPI_AMD is not set - -# -# SPI Multiplexer support -# -# CONFIG_SPI_MUX is not set - -# -# SPI Protocol Masters -# -# CONFIG_SPI_SPIDEV is not set -# CONFIG_SPI_LOOPBACK_TEST is not set -# CONFIG_SPI_TLE62X0 is not set -# CONFIG_SPI_SLAVE is not set -CONFIG_SPI_DYNAMIC=y -# CONFIG_SPMI is not set -# CONFIG_HSI is not set -CONFIG_PPS=y -# CONFIG_PPS_DEBUG is not set - -# -# PPS clients support -# -# CONFIG_PPS_CLIENT_KTIMER is not set -CONFIG_PPS_CLIENT_LDISC=m -CONFIG_PPS_CLIENT_PARPORT=m -CONFIG_PPS_CLIENT_GPIO=m - -# -# PPS generators support -# - -# -# PTP clock support -# -CONFIG_PTP_1588_CLOCK=y -CONFIG_PTP_1588_CLOCK_OPTIONAL=y -CONFIG_DP83640_PHY=m -# CONFIG_PTP_1588_CLOCK_INES is not set -CONFIG_PTP_1588_CLOCK_KVM=m -# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set -# CONFIG_PTP_1588_CLOCK_IDTCM is not set -# CONFIG_PTP_1588_CLOCK_MOCK is not set -# CONFIG_PTP_1588_CLOCK_VMW is not set -# CONFIG_PTP_1588_CLOCK_OCP is not set -# end of PTP clock support - -CONFIG_PINCTRL=y -CONFIG_PINMUX=y -CONFIG_PINCONF=y -CONFIG_GENERIC_PINCONF=y -# CONFIG_DEBUG_PINCTRL is not set -# CONFIG_PINCTRL_AMD is not set -# CONFIG_PINCTRL_CY8C95X0 is not set -# CONFIG_PINCTRL_MCP23S08 is not set -# CONFIG_PINCTRL_SX150X is not set - -# -# Intel pinctrl drivers -# -CONFIG_PINCTRL_BAYTRAIL=y -# CONFIG_PINCTRL_CHERRYVIEW is not set -# CONFIG_PINCTRL_LYNXPOINT is not set -CONFIG_PINCTRL_INTEL=y -# CONFIG_PINCTRL_ALDERLAKE is not set -CONFIG_PINCTRL_BROXTON=m -CONFIG_PINCTRL_CANNONLAKE=m -CONFIG_PINCTRL_CEDARFORK=m -CONFIG_PINCTRL_DENVERTON=m -# CONFIG_PINCTRL_ELKHARTLAKE is not set -# CONFIG_PINCTRL_EMMITSBURG is not set -CONFIG_PINCTRL_GEMINILAKE=m -CONFIG_PINCTRL_ICELAKE=m -# CONFIG_PINCTRL_JASPERLAKE is not set -# CONFIG_PINCTRL_LAKEFIELD is not set -CONFIG_PINCTRL_LEWISBURG=m -# CONFIG_PINCTRL_METEORLAKE is not set -CONFIG_PINCTRL_SUNRISEPOINT=m -# CONFIG_PINCTRL_TIGERLAKE is not set -# end of Intel pinctrl drivers - -CONFIG_PINCTRL_ZHAOXIN=m -CONFIG_PINCTRL_KX7000=m - -# -# Renesas pinctrl drivers -# -# end of Renesas pinctrl drivers - -CONFIG_GPIOLIB=y -CONFIG_GPIOLIB_FASTPATH_LIMIT=512 -CONFIG_GPIO_ACPI=y -CONFIG_GPIOLIB_IRQCHIP=y -# CONFIG_DEBUG_GPIO is not set -CONFIG_GPIO_CDEV=y -CONFIG_GPIO_CDEV_V1=y -CONFIG_GPIO_GENERIC=m - -# -# Memory mapped GPIO drivers -# -CONFIG_GPIO_AMDPT=m -# CONFIG_GPIO_DWAPB is not set -# CONFIG_GPIO_EXAR is not set -# CONFIG_GPIO_GENERIC_PLATFORM is not set -CONFIG_GPIO_ICH=m -# CONFIG_GPIO_MB86S7X is not set -# CONFIG_GPIO_AMD_FCH is not set -# end of Memory mapped GPIO drivers - -# -# Port-mapped I/O GPIO drivers -# -# CONFIG_GPIO_VX855 is not set -# CONFIG_GPIO_F7188X is not set -# CONFIG_GPIO_IT87 is not set -# CONFIG_GPIO_SCH is not set -# CONFIG_GPIO_SCH311X is not set -# CONFIG_GPIO_WINBOND is not set -# CONFIG_GPIO_WS16C48 is not set -# end of Port-mapped I/O GPIO drivers - -# -# I2C GPIO expanders -# -# CONFIG_GPIO_FXL6408 is not set -# CONFIG_GPIO_DS4520 is not set -# CONFIG_GPIO_MAX7300 is not set -# CONFIG_GPIO_MAX732X is not set -# CONFIG_GPIO_PCA953X is not set -# CONFIG_GPIO_PCA9570 is not set -# CONFIG_GPIO_PCF857X is not set -# CONFIG_GPIO_TPIC2810 is not set -# end of I2C GPIO expanders - -# -# MFD GPIO expanders -# -# CONFIG_GPIO_ELKHARTLAKE is not set -# end of MFD GPIO expanders - -# -# PCI GPIO expanders -# -# CONFIG_GPIO_AMD8111 is not set -# CONFIG_GPIO_BT8XX is not set -# CONFIG_GPIO_ML_IOH is not set -# CONFIG_GPIO_PCI_IDIO_16 is not set -# CONFIG_GPIO_PCIE_IDIO_24 is not set -# CONFIG_GPIO_RDC321X is not set -# end of PCI GPIO expanders - -# -# SPI GPIO expanders -# -# CONFIG_GPIO_MAX3191X is not set -# CONFIG_GPIO_MAX7301 is not set -# CONFIG_GPIO_MC33880 is not set -# CONFIG_GPIO_PISOSR is not set -# CONFIG_GPIO_XRA1403 is not set -# end of SPI GPIO expanders - -# -# USB GPIO expanders -# -CONFIG_GPIO_VIPERBOARD=m -# end of USB GPIO expanders - -# -# Virtual GPIO drivers -# -# CONFIG_GPIO_AGGREGATOR is not set -# CONFIG_GPIO_LATCH is not set -# CONFIG_GPIO_MOCKUP is not set -# CONFIG_GPIO_VIRTIO is not set -# CONFIG_GPIO_SIM is not set -# end of Virtual GPIO drivers - -# CONFIG_W1 is not set -CONFIG_POWER_RESET=y -# CONFIG_POWER_RESET_RESTART is not set -CONFIG_POWER_SUPPLY=y -# CONFIG_POWER_SUPPLY_DEBUG is not set -CONFIG_POWER_SUPPLY_HWMON=y -# CONFIG_GENERIC_ADC_BATTERY is not set -# CONFIG_IP5XXX_POWER is not set -# CONFIG_TEST_POWER is not set -# CONFIG_CHARGER_ADP5061 is not set -# CONFIG_BATTERY_CW2015 is not set -# CONFIG_BATTERY_DS2780 is not set -# CONFIG_BATTERY_DS2781 is not set -# CONFIG_BATTERY_DS2782 is not set -# CONFIG_BATTERY_SAMSUNG_SDI is not set -# CONFIG_BATTERY_SBS is not set -# CONFIG_CHARGER_SBS is not set -# CONFIG_MANAGER_SBS is not set -# CONFIG_BATTERY_BQ27XXX is not set -# CONFIG_BATTERY_MAX17040 is not set -# CONFIG_BATTERY_MAX17042 is not set -# CONFIG_CHARGER_MAX8903 is not set -# CONFIG_CHARGER_LP8727 is not set -# CONFIG_CHARGER_GPIO is not set -# CONFIG_CHARGER_LT3651 is not set -# CONFIG_CHARGER_LTC4162L is not set -# CONFIG_CHARGER_MAX77976 is not set -# CONFIG_CHARGER_BQ2415X is not set -# CONFIG_CHARGER_BQ24257 is not set -# CONFIG_CHARGER_BQ24735 is not set -# CONFIG_CHARGER_BQ2515X is not set -# CONFIG_CHARGER_BQ25890 is not set -# CONFIG_CHARGER_BQ25980 is not set -# CONFIG_CHARGER_BQ256XX is not set -# CONFIG_BATTERY_GAUGE_LTC2941 is not set -# CONFIG_BATTERY_GOLDFISH is not set -# CONFIG_BATTERY_RT5033 is not set -# CONFIG_CHARGER_RT9455 is not set -# CONFIG_CHARGER_BD99954 is not set -# CONFIG_BATTERY_UG3105 is not set -CONFIG_HWMON=y -CONFIG_HWMON_VID=m -# CONFIG_HWMON_DEBUG_CHIP is not set - -# -# Native drivers -# -CONFIG_SENSORS_ABITUGURU=m -CONFIG_SENSORS_ABITUGURU3=m -# CONFIG_SENSORS_AD7314 is not set -CONFIG_SENSORS_AD7414=m -CONFIG_SENSORS_AD7418=m -CONFIG_SENSORS_ADM1025=m -CONFIG_SENSORS_ADM1026=m -CONFIG_SENSORS_ADM1029=m -CONFIG_SENSORS_ADM1031=m -# CONFIG_SENSORS_ADM1177 is not set -CONFIG_SENSORS_ADM9240=m -CONFIG_SENSORS_ADT7X10=m -# CONFIG_SENSORS_ADT7310 is not set -CONFIG_SENSORS_ADT7410=m -CONFIG_SENSORS_ADT7411=m -CONFIG_SENSORS_ADT7462=m -CONFIG_SENSORS_ADT7470=m -CONFIG_SENSORS_ADT7475=m -# CONFIG_SENSORS_AHT10 is not set -# CONFIG_SENSORS_AQUACOMPUTER_D5NEXT is not set -# CONFIG_SENSORS_AS370 is not set -CONFIG_SENSORS_ASC7621=m -# CONFIG_SENSORS_AXI_FAN_CONTROL is not set -CONFIG_SENSORS_K8TEMP=m -CONFIG_SENSORS_K10TEMP=m -CONFIG_SENSORS_FAM15H_POWER=m -CONFIG_SENSORS_APPLESMC=m -CONFIG_SENSORS_ASB100=m -CONFIG_SENSORS_ATXP1=m -# CONFIG_SENSORS_CORSAIR_CPRO is not set -# CONFIG_SENSORS_CORSAIR_PSU is not set -# CONFIG_SENSORS_DRIVETEMP is not set -CONFIG_SENSORS_DS620=m -CONFIG_SENSORS_DS1621=m -CONFIG_SENSORS_DELL_SMM=m -# CONFIG_I8K is not set -CONFIG_SENSORS_I5K_AMB=m -CONFIG_SENSORS_F71805F=m -CONFIG_SENSORS_F71882FG=m -CONFIG_SENSORS_F75375S=m -CONFIG_SENSORS_FSCHMD=m -# CONFIG_SENSORS_FTSTEUTATES is not set -CONFIG_SENSORS_GL518SM=m -CONFIG_SENSORS_GL520SM=m -CONFIG_SENSORS_G760A=m -# CONFIG_SENSORS_G762 is not set -# CONFIG_SENSORS_HIH6130 is not set -# CONFIG_SENSORS_HS3001 is not set -CONFIG_SENSORS_IBMAEM=m -CONFIG_SENSORS_IBMPEX=m -# CONFIG_SENSORS_IIO_HWMON is not set -CONFIG_SENSORS_I5500=m -CONFIG_SENSORS_CORETEMP=m -CONFIG_SENSORS_IT87=m -CONFIG_SENSORS_JC42=m -# CONFIG_SENSORS_POWR1220 is not set -CONFIG_SENSORS_LINEAGE=m -# CONFIG_SENSORS_LTC2945 is not set -# CONFIG_SENSORS_LTC2947_I2C is not set -# CONFIG_SENSORS_LTC2947_SPI is not set -# CONFIG_SENSORS_LTC2990 is not set -# CONFIG_SENSORS_LTC2992 is not set -CONFIG_SENSORS_LTC4151=m -CONFIG_SENSORS_LTC4215=m -# CONFIG_SENSORS_LTC4222 is not set -CONFIG_SENSORS_LTC4245=m -# CONFIG_SENSORS_LTC4260 is not set -CONFIG_SENSORS_LTC4261=m -# CONFIG_SENSORS_MAX1111 is not set -# CONFIG_SENSORS_MAX127 is not set -CONFIG_SENSORS_MAX16065=m -CONFIG_SENSORS_MAX1619=m -CONFIG_SENSORS_MAX1668=m -CONFIG_SENSORS_MAX197=m -# CONFIG_SENSORS_MAX31722 is not set -# CONFIG_SENSORS_MAX31730 is not set -# CONFIG_SENSORS_MAX31760 is not set -# CONFIG_MAX31827 is not set -# CONFIG_SENSORS_MAX6620 is not set -# CONFIG_SENSORS_MAX6621 is not set -CONFIG_SENSORS_MAX6639=m -CONFIG_SENSORS_MAX6650=m -CONFIG_SENSORS_MAX6697=m -# CONFIG_SENSORS_MAX31790 is not set -# CONFIG_SENSORS_MC34VR500 is not set -CONFIG_SENSORS_MCP3021=m -# CONFIG_SENSORS_MLXREG_FAN is not set -# CONFIG_SENSORS_TC654 is not set -# CONFIG_SENSORS_TPS23861 is not set -# CONFIG_SENSORS_MR75203 is not set -# CONFIG_SENSORS_ADCXX is not set -CONFIG_SENSORS_LM63=m -# CONFIG_SENSORS_LM70 is not set -CONFIG_SENSORS_LM73=m -CONFIG_SENSORS_LM75=m -CONFIG_SENSORS_LM77=m -CONFIG_SENSORS_LM78=m -CONFIG_SENSORS_LM80=m -CONFIG_SENSORS_LM83=m -CONFIG_SENSORS_LM85=m -CONFIG_SENSORS_LM87=m -CONFIG_SENSORS_LM90=m -CONFIG_SENSORS_LM92=m -CONFIG_SENSORS_LM93=m -CONFIG_SENSORS_LM95234=m -CONFIG_SENSORS_LM95241=m -CONFIG_SENSORS_LM95245=m -CONFIG_SENSORS_PC87360=m -CONFIG_SENSORS_PC87427=m -CONFIG_SENSORS_NTC_THERMISTOR=m -# CONFIG_SENSORS_NCT6683 is not set -CONFIG_SENSORS_NCT6775_CORE=m -CONFIG_SENSORS_NCT6775=m -# CONFIG_SENSORS_NCT6775_I2C is not set -# CONFIG_SENSORS_NCT7802 is not set -# CONFIG_SENSORS_NCT7904 is not set -# CONFIG_SENSORS_NPCM7XX is not set -# CONFIG_SENSORS_NZXT_KRAKEN2 is not set -# CONFIG_SENSORS_NZXT_SMART2 is not set -# CONFIG_SENSORS_OCC_P8_I2C is not set -# CONFIG_SENSORS_OXP is not set -CONFIG_SENSORS_PCF8591=m -CONFIG_PMBUS=m -CONFIG_SENSORS_PMBUS=m -# CONFIG_SENSORS_ACBEL_FSG032 is not set -# CONFIG_SENSORS_ADM1266 is not set -CONFIG_SENSORS_ADM1275=m -# CONFIG_SENSORS_BEL_PFE is not set -# CONFIG_SENSORS_BPA_RS600 is not set -# CONFIG_SENSORS_DELTA_AHE50DC_FAN is not set -# CONFIG_SENSORS_FSP_3Y is not set -# CONFIG_SENSORS_IBM_CFFPS is not set -# CONFIG_SENSORS_DPS920AB is not set -# CONFIG_SENSORS_INSPUR_IPSPS is not set -# CONFIG_SENSORS_IR35221 is not set -# CONFIG_SENSORS_IR36021 is not set -# CONFIG_SENSORS_IR38064 is not set -# CONFIG_SENSORS_IRPS5401 is not set -# CONFIG_SENSORS_ISL68137 is not set -CONFIG_SENSORS_LM25066=m -# CONFIG_SENSORS_LT7182S is not set -CONFIG_SENSORS_LTC2978=m -# CONFIG_SENSORS_LTC3815 is not set -# CONFIG_SENSORS_MAX15301 is not set -CONFIG_SENSORS_MAX16064=m -# CONFIG_SENSORS_MAX16601 is not set -# CONFIG_SENSORS_MAX20730 is not set -# CONFIG_SENSORS_MAX20751 is not set -# CONFIG_SENSORS_MAX31785 is not set -CONFIG_SENSORS_MAX34440=m -CONFIG_SENSORS_MAX8688=m -# CONFIG_SENSORS_MP2888 is not set -# CONFIG_SENSORS_MP2975 is not set -# CONFIG_SENSORS_MP5023 is not set -# CONFIG_SENSORS_MPQ7932 is not set -# CONFIG_SENSORS_PIM4328 is not set -# CONFIG_SENSORS_PLI1209BC is not set -# CONFIG_SENSORS_PM6764TR is not set -# CONFIG_SENSORS_PXE1610 is not set -# CONFIG_SENSORS_Q54SJ108A2 is not set -# CONFIG_SENSORS_STPDDC60 is not set -# CONFIG_SENSORS_TDA38640 is not set -# CONFIG_SENSORS_TPS40422 is not set -# CONFIG_SENSORS_TPS53679 is not set -# CONFIG_SENSORS_TPS546D24 is not set -CONFIG_SENSORS_UCD9000=m -CONFIG_SENSORS_UCD9200=m -# CONFIG_SENSORS_XDPE152 is not set -# CONFIG_SENSORS_XDPE122 is not set -CONFIG_SENSORS_ZL6100=m -# CONFIG_SENSORS_SBTSI is not set -# CONFIG_SENSORS_SBRMI is not set -CONFIG_SENSORS_SHT15=m -CONFIG_SENSORS_SHT21=m -# CONFIG_SENSORS_SHT3x is not set -# CONFIG_SENSORS_SHT4x is not set -# CONFIG_SENSORS_SHTC1 is not set -CONFIG_SENSORS_SIS5595=m -CONFIG_SENSORS_DME1737=m -CONFIG_SENSORS_EMC1403=m -# CONFIG_SENSORS_EMC2103 is not set -# CONFIG_SENSORS_EMC2305 is not set -CONFIG_SENSORS_EMC6W201=m -CONFIG_SENSORS_SMSC47M1=m -CONFIG_SENSORS_SMSC47M192=m -CONFIG_SENSORS_SMSC47B397=m -CONFIG_SENSORS_SCH56XX_COMMON=m -CONFIG_SENSORS_SCH5627=m -CONFIG_SENSORS_SCH5636=m -# CONFIG_SENSORS_STTS751 is not set -# CONFIG_SENSORS_ADC128D818 is not set -CONFIG_SENSORS_ADS7828=m -# CONFIG_SENSORS_ADS7871 is not set -CONFIG_SENSORS_AMC6821=m -CONFIG_SENSORS_INA209=m -CONFIG_SENSORS_INA2XX=m -# CONFIG_SENSORS_INA238 is not set -# CONFIG_SENSORS_INA3221 is not set -# CONFIG_SENSORS_TC74 is not set -CONFIG_SENSORS_THMC50=m -CONFIG_SENSORS_TMP102=m -# CONFIG_SENSORS_TMP103 is not set -# CONFIG_SENSORS_TMP108 is not set -CONFIG_SENSORS_TMP401=m -CONFIG_SENSORS_TMP421=m -# CONFIG_SENSORS_TMP464 is not set -# CONFIG_SENSORS_TMP513 is not set -CONFIG_SENSORS_VIA_CPUTEMP=m -CONFIG_SENSORS_ZHAOXIN_CPUTEMP=m -CONFIG_SENSORS_VIA686A=m -CONFIG_SENSORS_VT1211=m -CONFIG_SENSORS_VT8231=m -# CONFIG_SENSORS_W83773G is not set -CONFIG_SENSORS_W83781D=m -CONFIG_SENSORS_W83791D=m -CONFIG_SENSORS_W83792D=m -CONFIG_SENSORS_W83793=m -CONFIG_SENSORS_W83795=m -# CONFIG_SENSORS_W83795_FANCTRL is not set -CONFIG_SENSORS_W83L785TS=m -CONFIG_SENSORS_W83L786NG=m -CONFIG_SENSORS_W83627HF=m -CONFIG_SENSORS_W83627EHF=m -# CONFIG_SENSORS_XGENE is not set - -# -# ACPI drivers -# -CONFIG_SENSORS_ACPI_POWER=m -CONFIG_SENSORS_ATK0110=m -# CONFIG_SENSORS_ASUS_WMI is not set -# CONFIG_SENSORS_ASUS_EC is not set -# CONFIG_SENSORS_HP_WMI is not set -CONFIG_THERMAL=y -# CONFIG_THERMAL_NETLINK is not set -# CONFIG_THERMAL_STATISTICS is not set -CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 -CONFIG_THERMAL_HWMON=y -CONFIG_THERMAL_ACPI=y -CONFIG_THERMAL_WRITABLE_TRIPS=y -CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y -# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set -# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set -# CONFIG_THERMAL_DEFAULT_GOV_BANG_BANG is not set -CONFIG_THERMAL_GOV_FAIR_SHARE=y -CONFIG_THERMAL_GOV_STEP_WISE=y -CONFIG_THERMAL_GOV_BANG_BANG=y -CONFIG_THERMAL_GOV_USER_SPACE=y -# CONFIG_THERMAL_EMULATION is not set - -# -# Intel thermal drivers -# -CONFIG_INTEL_POWERCLAMP=m -CONFIG_X86_THERMAL_VECTOR=y -CONFIG_INTEL_TCC=y -CONFIG_X86_PKG_TEMP_THERMAL=m -CONFIG_INTEL_SOC_DTS_IOSF_CORE=m -# CONFIG_INTEL_SOC_DTS_THERMAL is not set - -# -# ACPI INT340X thermal drivers -# -CONFIG_INT340X_THERMAL=m -CONFIG_ACPI_THERMAL_REL=m -# CONFIG_INT3406_THERMAL is not set -CONFIG_PROC_THERMAL_MMIO_RAPL=m -# end of ACPI INT340X thermal drivers - -CONFIG_INTEL_PCH_THERMAL=m -# CONFIG_INTEL_TCC_COOLING is not set -# CONFIG_INTEL_HFI_THERMAL is not set -# end of Intel thermal drivers - -# CONFIG_GENERIC_ADC_THERMAL is not set -CONFIG_WATCHDOG=y -CONFIG_WATCHDOG_CORE=y -# CONFIG_WATCHDOG_NOWAYOUT is not set -CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y -CONFIG_WATCHDOG_OPEN_TIMEOUT=0 -CONFIG_WATCHDOG_SYSFS=y -# CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT is not set - -# -# Watchdog Pretimeout Governors -# -# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set - -# -# Watchdog Device Drivers -# -CONFIG_SOFT_WATCHDOG=m -CONFIG_WDAT_WDT=m -# CONFIG_XILINX_WATCHDOG is not set -# CONFIG_ZIIRAVE_WATCHDOG is not set -# CONFIG_MLX_WDT is not set -# CONFIG_CADENCE_WATCHDOG is not set -# CONFIG_DW_WATCHDOG is not set -# CONFIG_MAX63XX_WATCHDOG is not set -# CONFIG_ACQUIRE_WDT is not set -# CONFIG_ADVANTECH_WDT is not set -# CONFIG_ADVANTECH_EC_WDT is not set -CONFIG_ALIM1535_WDT=m -CONFIG_ALIM7101_WDT=m -# CONFIG_EBC_C384_WDT is not set -# CONFIG_EXAR_WDT is not set -CONFIG_F71808E_WDT=m -CONFIG_SP5100_TCO=m -CONFIG_SBC_FITPC2_WATCHDOG=m -# CONFIG_EUROTECH_WDT is not set -CONFIG_IB700_WDT=m -CONFIG_IBMASR=m -# CONFIG_WAFER_WDT is not set -CONFIG_I6300ESB_WDT=m -CONFIG_IE6XX_WDT=m -CONFIG_ITCO_WDT=m -CONFIG_ITCO_VENDOR_SUPPORT=y -CONFIG_IT8712F_WDT=m -CONFIG_IT87_WDT=m -CONFIG_HP_WATCHDOG=m -CONFIG_HPWDT_NMI_DECODING=y -# CONFIG_SC1200_WDT is not set -# CONFIG_PC87413_WDT is not set -CONFIG_NV_TCO=m -# CONFIG_60XX_WDT is not set -# CONFIG_CPU5_WDT is not set -CONFIG_SMSC_SCH311X_WDT=m -# CONFIG_SMSC37B787_WDT is not set -# CONFIG_TQMX86_WDT is not set -CONFIG_VIA_WDT=m -CONFIG_W83627HF_WDT=m -CONFIG_W83877F_WDT=m -CONFIG_W83977F_WDT=m -CONFIG_MACHZ_WDT=m -# CONFIG_SBC_EPX_C3_WATCHDOG is not set -CONFIG_INTEL_MEI_WDT=m -# CONFIG_NI903X_WDT is not set -# CONFIG_NIC7018_WDT is not set -# CONFIG_MEN_A21_WDT is not set -CONFIG_XEN_WDT=m - -# -# PCI-based Watchdog Cards -# -CONFIG_PCIPCWATCHDOG=m -CONFIG_WDTPCI=m - -# -# USB-based Watchdog Cards -# -CONFIG_USBPCWATCHDOG=m -CONFIG_SSB_POSSIBLE=y -# CONFIG_SSB is not set -CONFIG_BCMA_POSSIBLE=y -CONFIG_BCMA=m -CONFIG_BCMA_HOST_PCI_POSSIBLE=y -CONFIG_BCMA_HOST_PCI=y -# CONFIG_BCMA_HOST_SOC is not set -CONFIG_BCMA_DRIVER_PCI=y -CONFIG_BCMA_DRIVER_GMAC_CMN=y -CONFIG_BCMA_DRIVER_GPIO=y -# CONFIG_BCMA_DEBUG is not set - -# -# Multifunction device drivers -# -CONFIG_MFD_CORE=y -# CONFIG_MFD_AS3711 is not set -# CONFIG_MFD_SMPRO is not set -# CONFIG_PMIC_ADP5520 is not set -# CONFIG_MFD_AAT2870_CORE is not set -# CONFIG_MFD_BCM590XX is not set -# CONFIG_MFD_BD9571MWV is not set -# CONFIG_MFD_AXP20X_I2C is not set -# CONFIG_MFD_CS42L43_I2C is not set -# CONFIG_MFD_MADERA is not set -# CONFIG_PMIC_DA903X is not set -# CONFIG_MFD_DA9052_SPI is not set -# CONFIG_MFD_DA9052_I2C is not set -# CONFIG_MFD_DA9055 is not set -# CONFIG_MFD_DA9062 is not set -# CONFIG_MFD_DA9063 is not set -# CONFIG_MFD_DA9150 is not set -# CONFIG_MFD_DLN2 is not set -# CONFIG_MFD_MC13XXX_SPI is not set -# CONFIG_MFD_MC13XXX_I2C is not set -# CONFIG_MFD_MP2629 is not set -# CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set -CONFIG_LPC_ICH=m -CONFIG_LPC_SCH=m -CONFIG_MFD_INTEL_LPSS=m -CONFIG_MFD_INTEL_LPSS_ACPI=m -CONFIG_MFD_INTEL_LPSS_PCI=m -# CONFIG_MFD_INTEL_PMC_BXT is not set -# CONFIG_MFD_IQS62X is not set -# CONFIG_MFD_JANZ_CMODIO is not set -# CONFIG_MFD_KEMPLD is not set -# CONFIG_MFD_88PM800 is not set -# CONFIG_MFD_88PM805 is not set -# CONFIG_MFD_88PM860X is not set -# CONFIG_MFD_MAX14577 is not set -# CONFIG_MFD_MAX77541 is not set -# CONFIG_MFD_MAX77693 is not set -# CONFIG_MFD_MAX77843 is not set -# CONFIG_MFD_MAX8907 is not set -# CONFIG_MFD_MAX8925 is not set -# CONFIG_MFD_MAX8997 is not set -# CONFIG_MFD_MAX8998 is not set -# CONFIG_MFD_MT6360 is not set -# CONFIG_MFD_MT6370 is not set -# CONFIG_MFD_MT6397 is not set -# CONFIG_MFD_MENF21BMC is not set -# CONFIG_MFD_OCELOT is not set -# CONFIG_EZX_PCAP is not set -CONFIG_MFD_VIPERBOARD=m -# CONFIG_MFD_RETU is not set -# CONFIG_MFD_PCF50633 is not set -# CONFIG_MFD_SY7636A is not set -# CONFIG_MFD_RDC321X is not set -# CONFIG_MFD_RT4831 is not set -# CONFIG_MFD_RT5033 is not set -# CONFIG_MFD_RT5120 is not set -# CONFIG_MFD_RC5T583 is not set -# CONFIG_MFD_SI476X_CORE is not set -CONFIG_MFD_SM501=m -CONFIG_MFD_SM501_GPIO=y -# CONFIG_MFD_SKY81452 is not set -# CONFIG_MFD_SYSCON is not set -# CONFIG_MFD_LP3943 is not set -# CONFIG_MFD_LP8788 is not set -# CONFIG_MFD_TI_LMU is not set -# CONFIG_MFD_PALMAS is not set -# CONFIG_TPS6105X is not set -# CONFIG_TPS65010 is not set -# CONFIG_TPS6507X is not set -# CONFIG_MFD_TPS65086 is not set -# CONFIG_MFD_TPS65090 is not set -# CONFIG_MFD_TI_LP873X is not set -# CONFIG_MFD_TPS6586X is not set -# CONFIG_MFD_TPS65910 is not set -# CONFIG_MFD_TPS65912_I2C is not set -# CONFIG_MFD_TPS65912_SPI is not set -# CONFIG_MFD_TPS6594_I2C is not set -# CONFIG_MFD_TPS6594_SPI is not set -# CONFIG_TWL4030_CORE is not set -# CONFIG_TWL6040_CORE is not set -# CONFIG_MFD_WL1273_CORE is not set -# CONFIG_MFD_LM3533 is not set -# CONFIG_MFD_TQMX86 is not set -CONFIG_MFD_VX855=m -# CONFIG_MFD_ARIZONA_I2C is not set -# CONFIG_MFD_ARIZONA_SPI is not set -# CONFIG_MFD_WM8400 is not set -# CONFIG_MFD_WM831X_I2C is not set -# CONFIG_MFD_WM831X_SPI is not set -# CONFIG_MFD_WM8350_I2C is not set -# CONFIG_MFD_WM8994 is not set -# CONFIG_MFD_ATC260X_I2C is not set -# CONFIG_MFD_INTEL_M10_BMC_SPI is not set -# end of Multifunction device drivers - -# CONFIG_REGULATOR is not set -CONFIG_RC_CORE=m -# CONFIG_LIRC is not set -CONFIG_RC_MAP=m -CONFIG_RC_DECODERS=y -CONFIG_IR_IMON_DECODER=m -CONFIG_IR_JVC_DECODER=m -CONFIG_IR_MCE_KBD_DECODER=m -CONFIG_IR_NEC_DECODER=m -CONFIG_IR_RC5_DECODER=m -CONFIG_IR_RC6_DECODER=m -# CONFIG_IR_RCMM_DECODER is not set -CONFIG_IR_SANYO_DECODER=m -# CONFIG_IR_SHARP_DECODER is not set -CONFIG_IR_SONY_DECODER=m -# CONFIG_IR_XMP_DECODER is not set -CONFIG_RC_DEVICES=y -CONFIG_IR_ENE=m -CONFIG_IR_FINTEK=m -# CONFIG_IR_IGORPLUGUSB is not set -CONFIG_IR_IGUANA=m -CONFIG_IR_IMON=m -CONFIG_IR_IMON_RAW=m -CONFIG_IR_ITE_CIR=m -CONFIG_IR_MCEUSB=m -CONFIG_IR_NUVOTON=m -CONFIG_IR_REDRAT3=m -CONFIG_IR_SERIAL=m -# CONFIG_IR_SERIAL_TRANSMITTER is not set -CONFIG_IR_STREAMZAP=m -# CONFIG_IR_TOY is not set -CONFIG_IR_TTUSBIR=m -CONFIG_IR_WINBOND_CIR=m -CONFIG_RC_ATI_REMOTE=m -# CONFIG_RC_LOOPBACK is not set -# CONFIG_RC_XBOX_DVD is not set -CONFIG_CEC_CORE=m - -# -# CEC support -# -# CONFIG_MEDIA_CEC_RC is not set -CONFIG_MEDIA_CEC_SUPPORT=y -# CONFIG_CEC_CH7322 is not set -# CONFIG_CEC_GPIO is not set -# CONFIG_CEC_SECO is not set -CONFIG_USB_PULSE8_CEC=m -CONFIG_USB_RAINSHADOW_CEC=m -# end of CEC support - -CONFIG_MEDIA_SUPPORT=m -CONFIG_MEDIA_SUPPORT_FILTER=y -CONFIG_MEDIA_SUBDRV_AUTOSELECT=y - -# -# Media device types -# -# CONFIG_MEDIA_CAMERA_SUPPORT is not set -# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set -# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set -# CONFIG_MEDIA_RADIO_SUPPORT is not set -# CONFIG_MEDIA_SDR_SUPPORT is not set -# CONFIG_MEDIA_PLATFORM_SUPPORT is not set -# CONFIG_MEDIA_TEST_SUPPORT is not set -# end of Media device types - -# -# Media drivers -# - -# -# Drivers filtered as selected at 'Filter media drivers' -# - -# -# Media drivers -# -CONFIG_MEDIA_USB_SUPPORT=y -CONFIG_MEDIA_PCI_SUPPORT=y -# CONFIG_IPU_BRIDGE is not set -# end of Media drivers - -CONFIG_MEDIA_HIDE_ANCILLARY_SUBDRV=y - -# -# Media ancillary drivers -# -# end of Media ancillary drivers - -# -# Graphics support -# -CONFIG_APERTURE_HELPERS=y -CONFIG_VIDEO_CMDLINE=y -CONFIG_VIDEO_NOMODESET=y -# CONFIG_AUXDISPLAY is not set -# CONFIG_PANEL is not set -# CONFIG_AGP is not set -CONFIG_INTEL_GTT=m -CONFIG_VGA_SWITCHEROO=y -CONFIG_DRM=m -CONFIG_DRM_MIPI_DSI=y -CONFIG_DRM_KMS_HELPER=m -CONFIG_DRM_FBDEV_EMULATION=y -CONFIG_DRM_FBDEV_OVERALLOC=100 -CONFIG_DRM_LOAD_EDID_FIRMWARE=y -CONFIG_DRM_DISPLAY_HELPER=m -CONFIG_DRM_DISPLAY_DP_HELPER=y -CONFIG_DRM_DISPLAY_HDCP_HELPER=y -CONFIG_DRM_DISPLAY_HDMI_HELPER=y -# CONFIG_DRM_DP_AUX_CHARDEV is not set -# CONFIG_DRM_DP_CEC is not set -CONFIG_DRM_TTM=m -CONFIG_DRM_EXEC=m -CONFIG_DRM_BUDDY=m -CONFIG_DRM_VRAM_HELPER=m -CONFIG_DRM_TTM_HELPER=m -CONFIG_DRM_GEM_SHMEM_HELPER=m -CONFIG_DRM_SUBALLOC_HELPER=m -CONFIG_DRM_SCHED=m - -# -# I2C encoder or helper chips -# -CONFIG_DRM_I2C_CH7006=m -CONFIG_DRM_I2C_SIL164=m -# CONFIG_DRM_I2C_NXP_TDA998X is not set -# CONFIG_DRM_I2C_NXP_TDA9950 is not set -# end of I2C encoder or helper chips - -# -# ARM devices -# -# end of ARM devices - -CONFIG_DRM_RADEON=m -CONFIG_DRM_RADEON_USERPTR=y -CONFIG_DRM_AMDGPU=m -# CONFIG_DRM_AMDGPU_SI is not set -# CONFIG_DRM_AMDGPU_CIK is not set -# CONFIG_DRM_AMDGPU_USERPTR is not set - -# -# ACP (Audio CoProcessor) Configuration -# -# CONFIG_DRM_AMD_ACP is not set -# end of ACP (Audio CoProcessor) Configuration - -# -# Display Engine Configuration -# -CONFIG_DRM_AMD_DC=y -CONFIG_DRM_AMD_DC_FP=y -# CONFIG_DEBUG_KERNEL_DC is not set -# CONFIG_DRM_AMD_SECURE_DISPLAY is not set -# end of Display Engine Configuration - -# CONFIG_HSA_AMD is not set -CONFIG_DRM_NOUVEAU=m -CONFIG_NOUVEAU_DEBUG=5 -CONFIG_NOUVEAU_DEBUG_DEFAULT=3 -# CONFIG_NOUVEAU_DEBUG_MMU is not set -# CONFIG_NOUVEAU_DEBUG_PUSH is not set -CONFIG_DRM_NOUVEAU_BACKLIGHT=y -CONFIG_DRM_I915=m -CONFIG_DRM_I915_FORCE_PROBE="" -CONFIG_DRM_I915_CAPTURE_ERROR=y -CONFIG_DRM_I915_COMPRESS_ERROR=y -CONFIG_DRM_I915_USERPTR=y -CONFIG_DRM_I915_GVT_KVMGT=m -CONFIG_DRM_I915_REQUEST_TIMEOUT=20000 -CONFIG_DRM_I915_FENCE_TIMEOUT=10000 -CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND=250 -CONFIG_DRM_I915_HEARTBEAT_INTERVAL=2500 -CONFIG_DRM_I915_PREEMPT_TIMEOUT=640 -CONFIG_DRM_I915_PREEMPT_TIMEOUT_COMPUTE=7500 -CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT=8000 -CONFIG_DRM_I915_STOP_TIMEOUT=100 -CONFIG_DRM_I915_TIMESLICE_DURATION=1 -CONFIG_DRM_I915_GVT=y -# CONFIG_DRM_VGEM is not set -CONFIG_DRM_VKMS=m -CONFIG_DRM_VMWGFX=m -# CONFIG_DRM_VMWGFX_MKSSTATS is not set -CONFIG_DRM_GMA500=m -CONFIG_DRM_UDL=m -CONFIG_DRM_AST=m -CONFIG_DRM_MGAG200=m -CONFIG_DRM_QXL=m -CONFIG_DRM_VIRTIO_GPU=m -CONFIG_DRM_VIRTIO_GPU_KMS=y -CONFIG_DRM_PANEL=y - -# -# Display Panels -# -# CONFIG_DRM_PANEL_AUO_A030JTN01 is not set -# CONFIG_DRM_PANEL_ORISETECH_OTA5601A is not set -# CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set -# CONFIG_DRM_PANEL_WIDECHIPS_WS2401 is not set -# end of Display Panels - -CONFIG_DRM_BRIDGE=y -CONFIG_DRM_PANEL_BRIDGE=y - -# -# Display Interface Bridges -# -# CONFIG_DRM_ANALOGIX_ANX78XX is not set -# end of Display Interface Bridges - -# CONFIG_DRM_LOONGSON is not set -# CONFIG_DRM_ETNAVIV is not set -CONFIG_DRM_BOCHS=m -CONFIG_DRM_CIRRUS_QEMU=m -# CONFIG_DRM_GM12U320 is not set -# CONFIG_DRM_PANEL_MIPI_DBI is not set -# CONFIG_DRM_SIMPLEDRM is not set -# CONFIG_TINYDRM_HX8357D is not set -# CONFIG_TINYDRM_ILI9163 is not set -# CONFIG_TINYDRM_ILI9225 is not set -# CONFIG_TINYDRM_ILI9341 is not set -# CONFIG_TINYDRM_ILI9486 is not set -# CONFIG_TINYDRM_MI0283QT is not set -# CONFIG_TINYDRM_REPAPER is not set -# CONFIG_TINYDRM_ST7586 is not set -# CONFIG_TINYDRM_ST7735R is not set -# CONFIG_DRM_XEN_FRONTEND is not set -# CONFIG_DRM_VBOXVIDEO is not set -# CONFIG_DRM_GUD is not set -# CONFIG_DRM_SSD130X is not set -# CONFIG_DRM_HYPERV is not set -# CONFIG_DRM_LEGACY is not set -CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y -CONFIG_DRM_PRIVACY_SCREEN=y -# CONFIG_HYDCU_FIXUP_HEADER is not set -CONFIG_DRM_INSPUR=m - -# -# Frame buffer Devices -# -CONFIG_FB=y -# CONFIG_FB_CIRRUS is not set -# CONFIG_FB_PM2 is not set -# CONFIG_FB_CYBER2000 is not set -# CONFIG_FB_ARC is not set -# CONFIG_FB_ASILIANT is not set -# CONFIG_FB_IMSTT is not set -# CONFIG_FB_VGA16 is not set -# CONFIG_FB_UVESA is not set -CONFIG_FB_VESA=y -CONFIG_FB_EFI=y -# CONFIG_FB_N411 is not set -# CONFIG_FB_HGA is not set -# CONFIG_FB_OPENCORES is not set -# CONFIG_FB_S1D13XXX is not set -# CONFIG_FB_NVIDIA is not set -# CONFIG_FB_RIVA is not set -# CONFIG_FB_I740 is not set -# CONFIG_FB_LE80578 is not set -# CONFIG_FB_MATROX is not set -# CONFIG_FB_RADEON is not set -# CONFIG_FB_ATY128 is not set -# CONFIG_FB_ATY is not set -# CONFIG_FB_S3 is not set -# CONFIG_FB_SAVAGE is not set -# CONFIG_FB_SIS is not set -# CONFIG_FB_VIA is not set -# CONFIG_FB_NEOMAGIC is not set -# CONFIG_FB_KYRO is not set -# CONFIG_FB_3DFX is not set -# CONFIG_FB_VOODOO1 is not set -# CONFIG_FB_VT8623 is not set -# CONFIG_FB_TRIDENT is not set -# CONFIG_FB_ARK is not set -# CONFIG_FB_PM3 is not set -# CONFIG_FB_CARMINE is not set -# CONFIG_FB_SM501 is not set -# CONFIG_FB_SMSCUFX is not set -# CONFIG_FB_UDL is not set -# CONFIG_FB_IBM_GXT4500 is not set -# CONFIG_FB_VIRTUAL is not set -# CONFIG_XEN_FBDEV_FRONTEND is not set -# CONFIG_FB_METRONOME is not set -# CONFIG_FB_MB862XX is not set -CONFIG_FB_HYPERV=m -# CONFIG_FB_SIMPLE is not set -# CONFIG_FB_SSD1307 is not set -# CONFIG_FB_SM712 is not set -# CONFIG_FB_LS2K500 is not set -CONFIG_FB_CORE=y -CONFIG_FB_NOTIFY=y -CONFIG_FIRMWARE_EDID=y -CONFIG_FB_DEVICE=y -CONFIG_FB_CFB_FILLRECT=y -CONFIG_FB_CFB_COPYAREA=y -CONFIG_FB_CFB_IMAGEBLIT=y -CONFIG_FB_SYS_FILLRECT=y -CONFIG_FB_SYS_COPYAREA=y -CONFIG_FB_SYS_IMAGEBLIT=y -# CONFIG_FB_FOREIGN_ENDIAN is not set -CONFIG_FB_SYS_FOPS=y -CONFIG_FB_DEFERRED_IO=y -CONFIG_FB_IOMEM_HELPERS=y -CONFIG_FB_SYSMEM_HELPERS=y -CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y -# CONFIG_FB_MODE_HELPERS is not set -CONFIG_FB_TILEBLITTING=y -# end of Frame buffer Devices - -# -# Backlight & LCD device support -# -CONFIG_LCD_CLASS_DEVICE=m -# CONFIG_LCD_L4F00242T03 is not set -# CONFIG_LCD_LMS283GF05 is not set -# CONFIG_LCD_LTV350QV is not set -# CONFIG_LCD_ILI922X is not set -# CONFIG_LCD_ILI9320 is not set -# CONFIG_LCD_TDO24M is not set -# CONFIG_LCD_VGG2432A4 is not set -CONFIG_LCD_PLATFORM=m -# CONFIG_LCD_AMS369FG06 is not set -# CONFIG_LCD_LMS501KF03 is not set -# CONFIG_LCD_HX8357 is not set -# CONFIG_LCD_OTM3225A is not set -CONFIG_BACKLIGHT_CLASS_DEVICE=y -# CONFIG_BACKLIGHT_KTD253 is not set -# CONFIG_BACKLIGHT_KTZ8866 is not set -# CONFIG_BACKLIGHT_PWM is not set -CONFIG_BACKLIGHT_APPLE=m -# CONFIG_BACKLIGHT_QCOM_WLED is not set -# CONFIG_BACKLIGHT_SAHARA is not set -# CONFIG_BACKLIGHT_ADP8860 is not set -# CONFIG_BACKLIGHT_ADP8870 is not set -# CONFIG_BACKLIGHT_LM3630A is not set -# CONFIG_BACKLIGHT_LM3639 is not set -CONFIG_BACKLIGHT_LP855X=m -# CONFIG_BACKLIGHT_GPIO is not set -# CONFIG_BACKLIGHT_LV5207LP is not set -# CONFIG_BACKLIGHT_BD6107 is not set -# CONFIG_BACKLIGHT_ARCXCNN is not set -# end of Backlight & LCD device support - -CONFIG_HDMI=y - -# -# Console display driver support -# -CONFIG_VGA_CONSOLE=y -CONFIG_DUMMY_CONSOLE=y -CONFIG_DUMMY_CONSOLE_COLUMNS=80 -CONFIG_DUMMY_CONSOLE_ROWS=25 -CONFIG_FRAMEBUFFER_CONSOLE=y -# CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION is not set -CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y -CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y -# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set -# end of Console display driver support - -CONFIG_LOGO=y -# CONFIG_LOGO_LINUX_MONO is not set -# CONFIG_LOGO_LINUX_VGA16 is not set -CONFIG_LOGO_LINUX_CLUT224=y -# end of Graphics support - -# CONFIG_DRM_ACCEL is not set -CONFIG_SOUND=m -# CONFIG_SND is not set -CONFIG_HID_SUPPORT=y -CONFIG_HID=y -CONFIG_HID_BATTERY_STRENGTH=y -CONFIG_HIDRAW=y -CONFIG_UHID=m -CONFIG_HID_GENERIC=y - -# -# Special HID drivers -# -CONFIG_HID_A4TECH=m -# CONFIG_HID_ACCUTOUCH is not set -CONFIG_HID_ACRUX=m -# CONFIG_HID_ACRUX_FF is not set -CONFIG_HID_APPLE=m -CONFIG_HID_APPLEIR=m -CONFIG_HID_ASUS=m -CONFIG_HID_AUREAL=m -CONFIG_HID_BELKIN=m -CONFIG_HID_BETOP_FF=m -# CONFIG_HID_BIGBEN_FF is not set -CONFIG_HID_CHERRY=m -CONFIG_HID_CHICONY=m -CONFIG_HID_CORSAIR=m -# CONFIG_HID_COUGAR is not set -# CONFIG_HID_MACALLY is not set -CONFIG_HID_CMEDIA=m -# CONFIG_HID_CP2112 is not set -# CONFIG_HID_CREATIVE_SB0540 is not set -CONFIG_HID_CYPRESS=m -CONFIG_HID_DRAGONRISE=m -# CONFIG_DRAGONRISE_FF is not set -# CONFIG_HID_EMS_FF is not set -CONFIG_HID_ELAN=m -CONFIG_HID_ELECOM=m -CONFIG_HID_ELO=m -# CONFIG_HID_EVISION is not set -CONFIG_HID_EZKEY=m -# CONFIG_HID_FT260 is not set -CONFIG_HID_GEMBIRD=m -CONFIG_HID_GFRM=m -# CONFIG_HID_GLORIOUS is not set -CONFIG_HID_HOLTEK=m -# CONFIG_HOLTEK_FF is not set -# CONFIG_HID_GOOGLE_STADIA_FF is not set -# CONFIG_HID_VIVALDI is not set -CONFIG_HID_GT683R=m -CONFIG_HID_KEYTOUCH=m -CONFIG_HID_KYE=m -CONFIG_HID_UCLOGIC=m -CONFIG_HID_WALTOP=m -# CONFIG_HID_VIEWSONIC is not set -# CONFIG_HID_VRC2 is not set -# CONFIG_HID_XIAOMI is not set -CONFIG_HID_GYRATION=m -CONFIG_HID_ICADE=m -CONFIG_HID_ITE=m -CONFIG_HID_JABRA=m -CONFIG_HID_TWINHAN=m -CONFIG_HID_KENSINGTON=m -CONFIG_HID_LCPOWER=m -CONFIG_HID_LED=m -CONFIG_HID_LENOVO=m -# CONFIG_HID_LETSKETCH is not set -CONFIG_HID_LOGITECH=m -CONFIG_HID_LOGITECH_DJ=m -CONFIG_HID_LOGITECH_HIDPP=m -# CONFIG_LOGITECH_FF is not set -# CONFIG_LOGIRUMBLEPAD2_FF is not set -# CONFIG_LOGIG940_FF is not set -# CONFIG_LOGIWHEELS_FF is not set -CONFIG_HID_MAGICMOUSE=y -# CONFIG_HID_MALTRON is not set -# CONFIG_HID_MAYFLASH is not set -# CONFIG_HID_MEGAWORLD_FF is not set -# CONFIG_HID_REDRAGON is not set -CONFIG_HID_MICROSOFT=m -CONFIG_HID_MONTEREY=m -CONFIG_HID_MULTITOUCH=m -# CONFIG_HID_NINTENDO is not set -CONFIG_HID_NTI=m -CONFIG_HID_NTRIG=y -# CONFIG_HID_NVIDIA_SHIELD is not set -CONFIG_HID_ORTEK=m -CONFIG_HID_PANTHERLORD=m -# CONFIG_PANTHERLORD_FF is not set -CONFIG_HID_PENMOUNT=m -CONFIG_HID_PETALYNX=m -CONFIG_HID_PICOLCD=m -CONFIG_HID_PICOLCD_FB=y -CONFIG_HID_PICOLCD_BACKLIGHT=y -CONFIG_HID_PICOLCD_LCD=y -CONFIG_HID_PICOLCD_LEDS=y -CONFIG_HID_PICOLCD_CIR=y -CONFIG_HID_PLANTRONICS=m -# CONFIG_HID_PXRC is not set -# CONFIG_HID_RAZER is not set -CONFIG_HID_PRIMAX=m -# CONFIG_HID_RETRODE is not set -CONFIG_HID_ROCCAT=m -CONFIG_HID_SAITEK=m -CONFIG_HID_SAMSUNG=m -# CONFIG_HID_SEMITEK is not set -# CONFIG_HID_SIGMAMICRO is not set -CONFIG_HID_SONY=m -CONFIG_SONY_FF=y -CONFIG_HID_SPEEDLINK=m -# CONFIG_HID_STEAM is not set -CONFIG_HID_STEELSERIES=m -CONFIG_HID_SUNPLUS=m -CONFIG_HID_RMI=m -CONFIG_HID_GREENASIA=m -# CONFIG_GREENASIA_FF is not set -CONFIG_HID_HYPERV_MOUSE=m -CONFIG_HID_SMARTJOYPLUS=m -# CONFIG_SMARTJOYPLUS_FF is not set -CONFIG_HID_TIVO=m -CONFIG_HID_TOPSEED=m -# CONFIG_HID_TOPRE is not set -CONFIG_HID_THINGM=m -CONFIG_HID_THRUSTMASTER=m -# CONFIG_THRUSTMASTER_FF is not set -# CONFIG_HID_UDRAW_PS3 is not set -# CONFIG_HID_U2FZERO is not set -CONFIG_HID_WACOM=m -CONFIG_HID_WIIMOTE=m -CONFIG_HID_XINMO=m -CONFIG_HID_ZEROPLUS=m -# CONFIG_ZEROPLUS_FF is not set -CONFIG_HID_ZYDACRON=m -CONFIG_HID_SENSOR_HUB=y -CONFIG_HID_SENSOR_CUSTOM_SENSOR=m -CONFIG_HID_ALPS=m -# CONFIG_HID_MCP2221 is not set -# end of Special HID drivers - -# -# HID-BPF support -# -# CONFIG_HID_BPF is not set -# end of HID-BPF support - -# -# USB HID support -# -CONFIG_USB_HID=y -CONFIG_HID_PID=y -CONFIG_USB_HIDDEV=y -# end of USB HID support - -CONFIG_I2C_HID=m -# CONFIG_I2C_HID_ACPI is not set -# CONFIG_I2C_HID_OF is not set - -# -# Intel ISH HID support -# -CONFIG_INTEL_ISH_HID=m -# CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER is not set -# end of Intel ISH HID support - -# -# AMD SFH HID Support -# -# CONFIG_AMD_SFH_HID is not set -# end of AMD SFH HID Support - -CONFIG_USB_OHCI_LITTLE_ENDIAN=y -CONFIG_USB_SUPPORT=y -CONFIG_USB_COMMON=y -CONFIG_USB_LED_TRIG=y -# CONFIG_USB_ULPI_BUS is not set -# CONFIG_USB_CONN_GPIO is not set -CONFIG_USB_ARCH_HAS_HCD=y -CONFIG_USB=y -CONFIG_USB_PCI=y -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y - -# -# Miscellaneous USB options -# -CONFIG_USB_DEFAULT_PERSIST=y -# CONFIG_USB_FEW_INIT_RETRIES is not set -# CONFIG_USB_DYNAMIC_MINORS is not set -# CONFIG_USB_OTG is not set -# CONFIG_USB_OTG_PRODUCTLIST is not set -CONFIG_USB_LEDS_TRIGGER_USBPORT=m -CONFIG_USB_AUTOSUSPEND_DELAY=2 -CONFIG_USB_MON=y - -# -# USB Host Controller Drivers -# -# CONFIG_USB_C67X00_HCD is not set -CONFIG_USB_XHCI_HCD=y -CONFIG_USB_XHCI_DBGCAP=y -CONFIG_USB_XHCI_PCI=y -# CONFIG_USB_XHCI_PCI_RENESAS is not set -# CONFIG_USB_XHCI_PLATFORM is not set -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_EHCI_ROOT_HUB_TT=y -CONFIG_USB_EHCI_TT_NEWSCHED=y -CONFIG_USB_EHCI_PCI=y -# CONFIG_USB_EHCI_FSL is not set -# CONFIG_USB_EHCI_HCD_PLATFORM is not set -# CONFIG_USB_OXU210HP_HCD is not set -# CONFIG_USB_ISP116X_HCD is not set -# CONFIG_USB_MAX3421_HCD is not set -CONFIG_USB_OHCI_HCD=y -CONFIG_USB_OHCI_HCD_PCI=y -# CONFIG_USB_OHCI_HCD_PLATFORM is not set -# CONFIG_USB_UHCI_HCD is not set -# CONFIG_USB_SL811_HCD is not set -# CONFIG_USB_R8A66597_HCD is not set -# CONFIG_USB_HCD_BCMA is not set -# CONFIG_USB_HCD_TEST_MODE is not set -# CONFIG_USB_XEN_HCD is not set - -# -# USB Device Class drivers -# -CONFIG_USB_ACM=m -CONFIG_USB_PRINTER=m -CONFIG_USB_WDM=m -CONFIG_USB_TMC=m - -# -# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may -# - -# -# also be needed; see USB_STORAGE Help for more info -# -CONFIG_USB_STORAGE=m -# CONFIG_USB_STORAGE_DEBUG is not set -CONFIG_USB_STORAGE_REALTEK=m -CONFIG_REALTEK_AUTOPM=y -CONFIG_USB_STORAGE_DATAFAB=m -CONFIG_USB_STORAGE_FREECOM=m -CONFIG_USB_STORAGE_ISD200=m -CONFIG_USB_STORAGE_USBAT=m -CONFIG_USB_STORAGE_SDDR09=m -CONFIG_USB_STORAGE_SDDR55=m -CONFIG_USB_STORAGE_JUMPSHOT=m -CONFIG_USB_STORAGE_ALAUDA=m -CONFIG_USB_STORAGE_ONETOUCH=m -CONFIG_USB_STORAGE_KARMA=m -CONFIG_USB_STORAGE_CYPRESS_ATACB=m -CONFIG_USB_STORAGE_ENE_UB6250=m -CONFIG_USB_UAS=m - -# -# USB Imaging devices -# -CONFIG_USB_MDC800=m -CONFIG_USB_MICROTEK=m -# CONFIG_USBIP_CORE is not set - -# -# USB dual-mode controller drivers -# -# CONFIG_USB_CDNS_SUPPORT is not set -# CONFIG_USB_MUSB_HDRC is not set -# CONFIG_USB_DWC3 is not set -# CONFIG_USB_DWC2 is not set -# CONFIG_USB_CHIPIDEA is not set -# CONFIG_USB_ISP1760 is not set - -# -# USB port drivers -# -CONFIG_USB_SERIAL=y -CONFIG_USB_SERIAL_CONSOLE=y -CONFIG_USB_SERIAL_GENERIC=y -# CONFIG_USB_SERIAL_SIMPLE is not set -CONFIG_USB_SERIAL_AIRCABLE=m -CONFIG_USB_SERIAL_ARK3116=m -CONFIG_USB_SERIAL_BELKIN=m -CONFIG_USB_SERIAL_CH341=m -CONFIG_USB_SERIAL_WHITEHEAT=m -CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m -CONFIG_USB_SERIAL_CP210X=m -CONFIG_USB_SERIAL_CYPRESS_M8=m -CONFIG_USB_SERIAL_EMPEG=m -CONFIG_USB_SERIAL_FTDI_SIO=m -CONFIG_USB_SERIAL_VISOR=m -CONFIG_USB_SERIAL_IPAQ=m -CONFIG_USB_SERIAL_IR=m -CONFIG_USB_SERIAL_EDGEPORT=m -CONFIG_USB_SERIAL_EDGEPORT_TI=m -# CONFIG_USB_SERIAL_F81232 is not set -CONFIG_USB_SERIAL_F8153X=m -CONFIG_USB_SERIAL_GARMIN=m -CONFIG_USB_SERIAL_IPW=m -CONFIG_USB_SERIAL_IUU=m -CONFIG_USB_SERIAL_KEYSPAN_PDA=m -CONFIG_USB_SERIAL_KEYSPAN=m -CONFIG_USB_SERIAL_KLSI=m -CONFIG_USB_SERIAL_KOBIL_SCT=m -CONFIG_USB_SERIAL_MCT_U232=m -# CONFIG_USB_SERIAL_METRO is not set -CONFIG_USB_SERIAL_MOS7720=m -CONFIG_USB_SERIAL_MOS7715_PARPORT=y -CONFIG_USB_SERIAL_MOS7840=m -CONFIG_USB_SERIAL_MXUPORT=m -CONFIG_USB_SERIAL_NAVMAN=m -CONFIG_USB_SERIAL_PL2303=m -CONFIG_USB_SERIAL_OTI6858=m -CONFIG_USB_SERIAL_QCAUX=m -CONFIG_USB_SERIAL_QUALCOMM=m -CONFIG_USB_SERIAL_SPCP8X5=m -CONFIG_USB_SERIAL_SAFE=m -CONFIG_USB_SERIAL_SAFE_PADDED=y -CONFIG_USB_SERIAL_SIERRAWIRELESS=m -CONFIG_USB_SERIAL_SYMBOL=m -CONFIG_USB_SERIAL_TI=m -CONFIG_USB_SERIAL_CYBERJACK=m -CONFIG_USB_SERIAL_WWAN=m -CONFIG_USB_SERIAL_OPTION=m -CONFIG_USB_SERIAL_OMNINET=m -CONFIG_USB_SERIAL_OPTICON=m -CONFIG_USB_SERIAL_XSENS_MT=m -# CONFIG_USB_SERIAL_WISHBONE is not set -CONFIG_USB_SERIAL_SSU100=m -CONFIG_USB_SERIAL_QT2=m -CONFIG_USB_SERIAL_UPD78F0730=m -# CONFIG_USB_SERIAL_XR is not set -CONFIG_USB_SERIAL_DEBUG=m - -# -# USB Miscellaneous drivers -# -CONFIG_USB_USS720=m -CONFIG_USB_EMI62=m -CONFIG_USB_EMI26=m -CONFIG_USB_ADUTUX=m -CONFIG_USB_SEVSEG=m -CONFIG_USB_LEGOTOWER=m -CONFIG_USB_LCD=m -# CONFIG_USB_CYPRESS_CY7C63 is not set -# CONFIG_USB_CYTHERM is not set -CONFIG_USB_IDMOUSE=m -CONFIG_USB_APPLEDISPLAY=m -# CONFIG_APPLE_MFI_FASTCHARGE is not set -CONFIG_USB_SISUSBVGA=m -CONFIG_USB_LD=m -# CONFIG_USB_TRANCEVIBRATOR is not set -CONFIG_USB_IOWARRIOR=m -# CONFIG_USB_TEST is not set -# CONFIG_USB_EHSET_TEST_FIXTURE is not set -CONFIG_USB_ISIGHTFW=m -# CONFIG_USB_YUREX is not set -CONFIG_USB_EZUSB_FX2=m -# CONFIG_USB_HUB_USB251XB is not set -CONFIG_USB_HSIC_USB3503=m -# CONFIG_USB_HSIC_USB4604 is not set -# CONFIG_USB_LINK_LAYER_TEST is not set -# CONFIG_USB_CHAOSKEY is not set -CONFIG_USB_ATM=m -CONFIG_USB_SPEEDTOUCH=m -CONFIG_USB_CXACRU=m -CONFIG_USB_UEAGLEATM=m -CONFIG_USB_XUSBATM=m - -# -# USB Physical Layer drivers -# -# CONFIG_NOP_USB_XCEIV is not set -# CONFIG_USB_GPIO_VBUS is not set -# CONFIG_USB_ISP1301 is not set -# end of USB Physical Layer drivers - -# CONFIG_USB_GADGET is not set -CONFIG_TYPEC=y -CONFIG_TYPEC_TCPM=y -# CONFIG_TYPEC_TCPCI is not set -CONFIG_TYPEC_FUSB302=m -CONFIG_TYPEC_UCSI=y -# CONFIG_UCSI_CCG is not set -CONFIG_UCSI_ACPI=y -# CONFIG_UCSI_STM32G0 is not set -CONFIG_TYPEC_TPS6598X=m -# CONFIG_TYPEC_ANX7411 is not set -# CONFIG_TYPEC_RT1719 is not set -# CONFIG_TYPEC_HD3SS3220 is not set -# CONFIG_TYPEC_STUSB160X is not set -# CONFIG_TYPEC_WUSB3801 is not set - -# -# USB Type-C Multiplexer/DeMultiplexer Switch support -# -# CONFIG_TYPEC_MUX_FSA4480 is not set -# CONFIG_TYPEC_MUX_GPIO_SBU is not set -CONFIG_TYPEC_MUX_PI3USB30532=m -# CONFIG_TYPEC_MUX_NB7VPQ904M is not set -# end of USB Type-C Multiplexer/DeMultiplexer Switch support - -# -# USB Type-C Alternate Mode drivers -# -CONFIG_TYPEC_DP_ALTMODE=m -# CONFIG_TYPEC_NVIDIA_ALTMODE is not set -# end of USB Type-C Alternate Mode drivers - -CONFIG_USB_ROLE_SWITCH=y -CONFIG_USB_ROLES_INTEL_XHCI=y -CONFIG_MMC=m -CONFIG_MMC_BLOCK=m -CONFIG_MMC_BLOCK_MINORS=8 -CONFIG_SDIO_UART=m -# CONFIG_MMC_TEST is not set - -# -# MMC/SD/SDIO Host Controller Drivers -# -# CONFIG_MMC_DEBUG is not set -CONFIG_MMC_SDHCI=m -CONFIG_MMC_SDHCI_IO_ACCESSORS=y -CONFIG_MMC_SDHCI_PCI=m -CONFIG_MMC_RICOH_MMC=y -CONFIG_MMC_SDHCI_ACPI=m -CONFIG_MMC_SDHCI_PLTFM=m -# CONFIG_MMC_SDHCI_F_SDH30 is not set -# CONFIG_MMC_WBSD is not set -CONFIG_MMC_TIFM_SD=m -# CONFIG_MMC_SPI is not set -CONFIG_MMC_CB710=m -CONFIG_MMC_VIA_SDMMC=m -CONFIG_MMC_VUB300=m -CONFIG_MMC_USHC=m -# CONFIG_MMC_USDHI6ROL0 is not set -CONFIG_MMC_REALTEK_PCI=m -CONFIG_MMC_REALTEK_USB=m -CONFIG_MMC_CQHCI=m -# CONFIG_MMC_HSQ is not set -# CONFIG_MMC_TOSHIBA_PCI is not set -# CONFIG_MMC_MTK is not set -# CONFIG_MMC_SDHCI_XENON is not set -# CONFIG_SCSI_UFSHCD is not set -CONFIG_MEMSTICK=m -# CONFIG_MEMSTICK_DEBUG is not set - -# -# MemoryStick drivers -# -# CONFIG_MEMSTICK_UNSAFE_RESUME is not set -CONFIG_MSPRO_BLOCK=m -# CONFIG_MS_BLOCK is not set - -# -# MemoryStick Host Controller Drivers -# -CONFIG_MEMSTICK_TIFM_MS=m -CONFIG_MEMSTICK_JMICRON_38X=m -CONFIG_MEMSTICK_R592=m -CONFIG_MEMSTICK_REALTEK_PCI=m -CONFIG_MEMSTICK_REALTEK_USB=m -CONFIG_NEW_LEDS=y -CONFIG_LEDS_CLASS=y -# CONFIG_LEDS_CLASS_FLASH is not set -# CONFIG_LEDS_CLASS_MULTICOLOR is not set -# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set - -# -# LED drivers -# -# CONFIG_LEDS_APU is not set -# CONFIG_LEDS_AW200XX is not set -CONFIG_LEDS_LM3530=m -# CONFIG_LEDS_LM3532 is not set -# CONFIG_LEDS_LM3642 is not set -# CONFIG_LEDS_PCA9532 is not set -# CONFIG_LEDS_GPIO is not set -CONFIG_LEDS_LP3944=m -# CONFIG_LEDS_LP3952 is not set -# CONFIG_LEDS_LP50XX is not set -# CONFIG_LEDS_PCA955X is not set -# CONFIG_LEDS_PCA963X is not set -# CONFIG_LEDS_PCA995X is not set -# CONFIG_LEDS_DAC124S085 is not set -# CONFIG_LEDS_PWM is not set -# CONFIG_LEDS_BD2606MVV is not set -# CONFIG_LEDS_BD2802 is not set -CONFIG_LEDS_INTEL_SS4200=m -# CONFIG_LEDS_LT3593 is not set -# CONFIG_LEDS_TCA6507 is not set -# CONFIG_LEDS_TLC591XX is not set -# CONFIG_LEDS_LM355x is not set -# CONFIG_LEDS_IS31FL319X is not set - -# -# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) -# -CONFIG_LEDS_BLINKM=m -CONFIG_LEDS_MLXCPLD=m -# CONFIG_LEDS_MLXREG is not set -# CONFIG_LEDS_USER is not set -# CONFIG_LEDS_NIC78BX is not set - -# -# Flash and Torch LED drivers -# - -# -# RGB LED drivers -# - -# -# LED Triggers -# -CONFIG_LEDS_TRIGGERS=y -CONFIG_LEDS_TRIGGER_TIMER=m -CONFIG_LEDS_TRIGGER_ONESHOT=m -CONFIG_LEDS_TRIGGER_DISK=y -# CONFIG_LEDS_TRIGGER_MTD is not set -CONFIG_LEDS_TRIGGER_HEARTBEAT=m -CONFIG_LEDS_TRIGGER_BACKLIGHT=m -# CONFIG_LEDS_TRIGGER_CPU is not set -# CONFIG_LEDS_TRIGGER_ACTIVITY is not set -CONFIG_LEDS_TRIGGER_DEFAULT_ON=m - -# -# iptables trigger is under Netfilter config (LED target) -# -CONFIG_LEDS_TRIGGER_TRANSIENT=m -CONFIG_LEDS_TRIGGER_CAMERA=m -# CONFIG_LEDS_TRIGGER_PANIC is not set -# CONFIG_LEDS_TRIGGER_NETDEV is not set -# CONFIG_LEDS_TRIGGER_PATTERN is not set -CONFIG_LEDS_TRIGGER_AUDIO=m -# CONFIG_LEDS_TRIGGER_TTY is not set - -# -# Simple LED drivers -# -# CONFIG_ACCESSIBILITY is not set -CONFIG_INFINIBAND=m -CONFIG_INFINIBAND_USER_MAD=m -CONFIG_INFINIBAND_USER_ACCESS=m -CONFIG_INFINIBAND_USER_MEM=y -CONFIG_INFINIBAND_ON_DEMAND_PAGING=y -CONFIG_INFINIBAND_ADDR_TRANS=y -CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y -CONFIG_INFINIBAND_VIRT_DMA=y -CONFIG_INFINIBAND_BNXT_RE=m -CONFIG_INFINIBAND_CXGB4=m -# CONFIG_INFINIBAND_EFA is not set -CONFIG_INFINIBAND_ERDMA=m -CONFIG_INFINIBAND_HFI1=m -# CONFIG_HFI1_DEBUG_SDMA_ORDER is not set -# CONFIG_SDMA_VERBOSITY is not set -# CONFIG_INFINIBAND_IRDMA is not set -CONFIG_MLX4_INFINIBAND=m -CONFIG_MLX5_INFINIBAND=m -# CONFIG_INFINIBAND_MTHCA is not set -# CONFIG_INFINIBAND_OCRDMA is not set -CONFIG_INFINIBAND_QEDR=m -# CONFIG_INFINIBAND_QIB is not set -CONFIG_INFINIBAND_USNIC=m -CONFIG_INFINIBAND_VMWARE_PVRDMA=m -CONFIG_INFINIBAND_RDMAVT=m -CONFIG_RDMA_RXE=m -CONFIG_RDMA_SIW=m -CONFIG_INFINIBAND_IPOIB=m -CONFIG_INFINIBAND_IPOIB_CM=y -CONFIG_INFINIBAND_IPOIB_DEBUG=y -# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set -CONFIG_INFINIBAND_SRP=m -CONFIG_INFINIBAND_SRPT=m -CONFIG_INFINIBAND_ISER=m -CONFIG_INFINIBAND_ISERT=m -# CONFIG_INFINIBAND_RTRS_CLIENT is not set -# CONFIG_INFINIBAND_RTRS_SERVER is not set -CONFIG_INFINIBAND_OPA_VNIC=m -CONFIG_EDAC_ATOMIC_SCRUB=y -CONFIG_EDAC_SUPPORT=y -CONFIG_EDAC=y -CONFIG_EDAC_LEGACY_SYSFS=y -# CONFIG_EDAC_DEBUG is not set -CONFIG_EDAC_DECODE_MCE=m -CONFIG_EDAC_GHES=y -CONFIG_EDAC_AMD64=m -CONFIG_EDAC_E752X=m -CONFIG_EDAC_I82975X=m -CONFIG_EDAC_I3000=m -CONFIG_EDAC_I3200=m -CONFIG_EDAC_IE31200=m -CONFIG_EDAC_X38=m -CONFIG_EDAC_I5400=m -CONFIG_EDAC_I7CORE=m -CONFIG_EDAC_I5100=m -CONFIG_EDAC_I7300=m -CONFIG_EDAC_SBRIDGE=m -CONFIG_EDAC_SKX=m -CONFIG_EDAC_I10NM=m -CONFIG_EDAC_PND2=m -# CONFIG_EDAC_IGEN6 is not set -CONFIG_RTC_LIB=y -CONFIG_RTC_MC146818_LIB=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_HCTOSYS=y -CONFIG_RTC_HCTOSYS_DEVICE="rtc0" -CONFIG_RTC_SYSTOHC=y -CONFIG_RTC_SYSTOHC_DEVICE="rtc0" -# CONFIG_RTC_DEBUG is not set -CONFIG_RTC_NVMEM=y - -# -# RTC interfaces -# -CONFIG_RTC_INTF_SYSFS=y -CONFIG_RTC_INTF_PROC=y -CONFIG_RTC_INTF_DEV=y -# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set -# CONFIG_RTC_DRV_TEST is not set - -# -# I2C RTC drivers -# -# CONFIG_RTC_DRV_ABB5ZES3 is not set -# CONFIG_RTC_DRV_ABEOZ9 is not set -# CONFIG_RTC_DRV_ABX80X is not set -CONFIG_RTC_DRV_DS1307=m -# CONFIG_RTC_DRV_DS1307_CENTURY is not set -CONFIG_RTC_DRV_DS1374=m -# CONFIG_RTC_DRV_DS1374_WDT is not set -CONFIG_RTC_DRV_DS1672=m -CONFIG_RTC_DRV_MAX6900=m -CONFIG_RTC_DRV_RS5C372=m -CONFIG_RTC_DRV_ISL1208=m -CONFIG_RTC_DRV_ISL12022=m -CONFIG_RTC_DRV_X1205=m -CONFIG_RTC_DRV_PCF8523=m -# CONFIG_RTC_DRV_PCF85063 is not set -# CONFIG_RTC_DRV_PCF85363 is not set -CONFIG_RTC_DRV_PCF8563=m -CONFIG_RTC_DRV_PCF8583=m -CONFIG_RTC_DRV_M41T80=m -CONFIG_RTC_DRV_M41T80_WDT=y -CONFIG_RTC_DRV_BQ32K=m -# CONFIG_RTC_DRV_S35390A is not set -CONFIG_RTC_DRV_FM3130=m -# CONFIG_RTC_DRV_RX8010 is not set -CONFIG_RTC_DRV_RX8581=m -CONFIG_RTC_DRV_RX8025=m -CONFIG_RTC_DRV_EM3027=m -# CONFIG_RTC_DRV_RV3028 is not set -# CONFIG_RTC_DRV_RV3032 is not set -# CONFIG_RTC_DRV_RV8803 is not set -# CONFIG_RTC_DRV_SD3078 is not set - -# -# SPI RTC drivers -# -# CONFIG_RTC_DRV_M41T93 is not set -# CONFIG_RTC_DRV_M41T94 is not set -# CONFIG_RTC_DRV_DS1302 is not set -# CONFIG_RTC_DRV_DS1305 is not set -# CONFIG_RTC_DRV_DS1343 is not set -# CONFIG_RTC_DRV_DS1347 is not set -# CONFIG_RTC_DRV_DS1390 is not set -# CONFIG_RTC_DRV_MAX6916 is not set -# CONFIG_RTC_DRV_R9701 is not set -# CONFIG_RTC_DRV_RX4581 is not set -# CONFIG_RTC_DRV_RS5C348 is not set -# CONFIG_RTC_DRV_MAX6902 is not set -# CONFIG_RTC_DRV_PCF2123 is not set -# CONFIG_RTC_DRV_MCP795 is not set -CONFIG_RTC_I2C_AND_SPI=y - -# -# SPI and I2C RTC drivers -# -CONFIG_RTC_DRV_DS3232=m -CONFIG_RTC_DRV_DS3232_HWMON=y -# CONFIG_RTC_DRV_PCF2127 is not set -CONFIG_RTC_DRV_RV3029C2=m -# CONFIG_RTC_DRV_RV3029_HWMON is not set -# CONFIG_RTC_DRV_RX6110 is not set - -# -# Platform RTC drivers -# -CONFIG_RTC_DRV_CMOS=y -CONFIG_RTC_DRV_DS1286=m -CONFIG_RTC_DRV_DS1511=m -CONFIG_RTC_DRV_DS1553=m -# CONFIG_RTC_DRV_DS1685_FAMILY is not set -CONFIG_RTC_DRV_DS1742=m -CONFIG_RTC_DRV_DS2404=m -CONFIG_RTC_DRV_STK17TA8=m -# CONFIG_RTC_DRV_M48T86 is not set -CONFIG_RTC_DRV_M48T35=m -CONFIG_RTC_DRV_M48T59=m -CONFIG_RTC_DRV_MSM6242=m -CONFIG_RTC_DRV_RP5C01=m - -# -# on-CPU RTC drivers -# -# CONFIG_RTC_DRV_FTRTC010 is not set - -# -# HID Sensor RTC drivers -# -# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set -# CONFIG_RTC_DRV_GOLDFISH is not set -CONFIG_DMADEVICES=y -# CONFIG_DMADEVICES_DEBUG is not set - -# -# DMA Devices -# -CONFIG_DMA_ENGINE=y -CONFIG_DMA_VIRTUAL_CHANNELS=y -CONFIG_DMA_ACPI=y -# CONFIG_ALTERA_MSGDMA is not set -CONFIG_INTEL_IDMA64=m -CONFIG_INTEL_IDXD_BUS=m -CONFIG_INTEL_IDXD=m -# CONFIG_INTEL_IDXD_COMPAT is not set -CONFIG_INTEL_IDXD_SVM=y -# CONFIG_INTEL_IDXD_PERFMON is not set -CONFIG_INTEL_IOATDMA=m -# CONFIG_PLX_DMA is not set -# CONFIG_XILINX_DMA is not set -# CONFIG_XILINX_XDMA is not set -CONFIG_AMD_PTDMA=m -# CONFIG_QCOM_HIDMA_MGMT is not set -# CONFIG_QCOM_HIDMA is not set -CONFIG_DW_DMAC_CORE=y -CONFIG_DW_DMAC=m -CONFIG_DW_DMAC_PCI=y -# CONFIG_DW_EDMA is not set -CONFIG_HSU_DMA=y -# CONFIG_SF_PDMA is not set -# CONFIG_INTEL_LDMA is not set - -# -# DMA Clients -# -CONFIG_ASYNC_TX_DMA=y -CONFIG_DMATEST=m -CONFIG_DMA_ENGINE_RAID=y - -# -# DMABUF options -# -CONFIG_SYNC_FILE=y -# CONFIG_SW_SYNC is not set -# CONFIG_UDMABUF is not set -# CONFIG_DMABUF_MOVE_NOTIFY is not set -# CONFIG_DMABUF_DEBUG is not set -# CONFIG_DMABUF_SELFTESTS is not set -# CONFIG_DMABUF_HEAPS is not set -# CONFIG_DMABUF_SYSFS_STATS is not set -# end of DMABUF options - -CONFIG_DCA=m -CONFIG_UIO=m -CONFIG_UIO_CIF=m -CONFIG_UIO_PDRV_GENIRQ=m -# CONFIG_UIO_DMEM_GENIRQ is not set -CONFIG_UIO_AEC=m -CONFIG_UIO_SERCOS3=m -CONFIG_UIO_PCI_GENERIC=m -# CONFIG_UIO_NETX is not set -# CONFIG_UIO_PRUSS is not set -# CONFIG_UIO_MF624 is not set -CONFIG_UIO_HV_GENERIC=m -CONFIG_VFIO=m -CONFIG_VFIO_GROUP=y -CONFIG_VFIO_CONTAINER=y -CONFIG_VFIO_IOMMU_TYPE1=m -CONFIG_VFIO_NOIOMMU=y -CONFIG_VFIO_VIRQFD=y - -# -# VFIO support for PCI devices -# -CONFIG_VFIO_PCI_CORE=m -CONFIG_VFIO_PCI_MMAP=y -CONFIG_VFIO_PCI_INTX=y -CONFIG_VFIO_PCI=m -# CONFIG_VFIO_PCI_VGA is not set -# CONFIG_VFIO_PCI_IGD is not set -# CONFIG_MLX5_VFIO_PCI is not set -CONFIG_QAT_VFIO_PCI=m -# end of VFIO support for PCI devices - -CONFIG_VFIO_MDEV=m -CONFIG_IRQ_BYPASS_MANAGER=m -CONFIG_VIRT_DRIVERS=y -CONFIG_VMGENID=y -# CONFIG_VBOXGUEST is not set -# CONFIG_NITRO_ENCLAVES is not set -CONFIG_EFI_SECRET=m -CONFIG_SEV_GUEST=m -CONFIG_TDX_GUEST_DRIVER=m -CONFIG_CSV_GUEST=m -CONFIG_VIRTIO_ANCHOR=y -CONFIG_VIRTIO=y -CONFIG_VIRTIO_PCI_LIB=y -CONFIG_VIRTIO_PCI_LIB_LEGACY=y -CONFIG_VIRTIO_MENU=y -CONFIG_VIRTIO_PCI=y -CONFIG_VIRTIO_PCI_LEGACY=y -CONFIG_VIRTIO_PMEM=m -CONFIG_VIRTIO_BALLOON=m -CONFIG_VIRTIO_MEM=m -CONFIG_VIRTIO_INPUT=m -CONFIG_VIRTIO_MMIO=m -CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y -CONFIG_VIRTIO_DMA_SHARED_BUFFER=m -# CONFIG_VDPA is not set -CONFIG_VHOST_IOTLB=m -CONFIG_VHOST_TASK=y -CONFIG_VHOST=m -CONFIG_VHOST_MENU=y -CONFIG_VHOST_NET=m -CONFIG_VHOST_SCSI=m -CONFIG_VHOST_VSOCK=m -# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set - -# -# Microsoft Hyper-V guest support -# -CONFIG_HYPERV=m -# CONFIG_HYPERV_VTL_MODE is not set -CONFIG_HYPERV_TIMER=y -CONFIG_HYPERV_UTILS=m -CONFIG_HYPERV_BALLOON=m -# end of Microsoft Hyper-V guest support - -# -# Xen driver support -# -# CONFIG_XEN_BALLOON is not set -CONFIG_XEN_DEV_EVTCHN=m -# CONFIG_XEN_BACKEND is not set -CONFIG_XENFS=m -CONFIG_XEN_COMPAT_XENFS=y -CONFIG_XEN_SYS_HYPERVISOR=y -CONFIG_XEN_XENBUS_FRONTEND=y -# CONFIG_XEN_GNTDEV is not set -# CONFIG_XEN_GRANT_DEV_ALLOC is not set -# CONFIG_XEN_GRANT_DMA_ALLOC is not set -# CONFIG_XEN_PVCALLS_FRONTEND is not set -CONFIG_XEN_PRIVCMD=m -CONFIG_XEN_EFI=y -CONFIG_XEN_AUTO_XLATE=y -CONFIG_XEN_ACPI=y -# CONFIG_XEN_UNPOPULATED_ALLOC is not set -# CONFIG_XEN_VIRTIO is not set -# end of Xen driver support - -# CONFIG_GREYBUS is not set -# CONFIG_COMEDI is not set -# CONFIG_STAGING is not set -# CONFIG_CHROME_PLATFORMS is not set -CONFIG_MELLANOX_PLATFORM=y -CONFIG_MLXREG_HOTPLUG=m -# CONFIG_MLXREG_IO is not set -# CONFIG_MLXREG_LC is not set -# CONFIG_NVSW_SN2201 is not set -CONFIG_SURFACE_PLATFORMS=y -# CONFIG_SURFACE3_WMI is not set -# CONFIG_SURFACE_3_POWER_OPREGION is not set -# CONFIG_SURFACE_GPE is not set -# CONFIG_SURFACE_HOTPLUG is not set -# CONFIG_SURFACE_PRO3_BUTTON is not set -CONFIG_X86_PLATFORM_DEVICES=y -CONFIG_ACPI_WMI=m -CONFIG_WMI_BMOF=m -# CONFIG_HUAWEI_WMI is not set -# CONFIG_UV_SYSFS is not set -CONFIG_MXM_WMI=m -# CONFIG_NVIDIA_WMI_EC_BACKLIGHT is not set -# CONFIG_XIAOMI_WMI is not set -# CONFIG_GIGABYTE_WMI is not set -# CONFIG_YOGABOOK is not set -CONFIG_ACERHDF=m -# CONFIG_ACER_WIRELESS is not set -CONFIG_ACER_WMI=m -# CONFIG_AMD_PMF is not set -# CONFIG_AMD_PMC is not set -# CONFIG_AMD_HSMP is not set -# CONFIG_ADV_SWBUTTON is not set -CONFIG_APPLE_GMUX=m -CONFIG_ASUS_LAPTOP=m -# CONFIG_ASUS_WIRELESS is not set -CONFIG_ASUS_WMI=m -CONFIG_ASUS_NB_WMI=m -# CONFIG_ASUS_TF103C_DOCK is not set -# CONFIG_MERAKI_MX100 is not set -CONFIG_EEEPC_LAPTOP=m -CONFIG_EEEPC_WMI=m -# CONFIG_X86_PLATFORM_DRIVERS_DELL is not set -CONFIG_AMILO_RFKILL=m -CONFIG_FUJITSU_LAPTOP=m -CONFIG_FUJITSU_TABLET=m -# CONFIG_GPD_POCKET_FAN is not set -# CONFIG_X86_PLATFORM_DRIVERS_HP is not set -# CONFIG_WIRELESS_HOTKEY is not set -# CONFIG_IBM_RTL is not set -CONFIG_IDEAPAD_LAPTOP=m -# CONFIG_LENOVO_YMC is not set -CONFIG_SENSORS_HDAPS=m -CONFIG_THINKPAD_ACPI=m -# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set -# CONFIG_THINKPAD_ACPI_DEBUG is not set -# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set -CONFIG_THINKPAD_ACPI_VIDEO=y -CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y -# CONFIG_THINKPAD_LMI is not set -# CONFIG_INTEL_ATOMISP2_PM is not set -CONFIG_INTEL_IFS=m -# CONFIG_INTEL_SAR_INT1092 is not set -CONFIG_INTEL_PMC_CORE=m -CONFIG_INTEL_PMT_CLASS=m -CONFIG_INTEL_PMT_TELEMETRY=m -CONFIG_INTEL_PMT_CRASHLOG=m - -# -# Intel Speed Select Technology interface support -# -CONFIG_INTEL_SPEED_SELECT_TPMI=m -CONFIG_INTEL_SPEED_SELECT_INTERFACE=m -# end of Intel Speed Select Technology interface support - -CONFIG_INTEL_WMI=y -# CONFIG_INTEL_WMI_SBL_FW_UPDATE is not set -CONFIG_INTEL_WMI_THUNDERBOLT=m - -# -# Intel Uncore Frequency Control -# -# CONFIG_INTEL_UNCORE_FREQ_CONTROL is not set -# end of Intel Uncore Frequency Control - -CONFIG_INTEL_HID_EVENT=m -CONFIG_INTEL_VBTN=m -# CONFIG_INTEL_INT0002_VGPIO is not set -CONFIG_INTEL_OAKTRAIL=m -# CONFIG_INTEL_ISHTP_ECLITE is not set -# CONFIG_INTEL_PUNIT_IPC is not set -CONFIG_INTEL_RST=m -# CONFIG_INTEL_SDSI is not set -# CONFIG_INTEL_SMARTCONNECT is not set -CONFIG_INTEL_TPMI=m -CONFIG_INTEL_TURBO_MAX_3=y -CONFIG_INTEL_VSEC=y -# CONFIG_MSI_EC is not set -CONFIG_MSI_LAPTOP=m -CONFIG_MSI_WMI=m -# CONFIG_PCENGINES_APU2 is not set -# CONFIG_BARCO_P50_GPIO is not set -CONFIG_SAMSUNG_LAPTOP=m -CONFIG_SAMSUNG_Q10=m -# CONFIG_ACPI_TOSHIBA is not set -CONFIG_TOSHIBA_BT_RFKILL=m -# CONFIG_TOSHIBA_HAPS is not set -# CONFIG_TOSHIBA_WMI is not set -CONFIG_ACPI_CMPC=m -CONFIG_COMPAL_LAPTOP=m -# CONFIG_LG_LAPTOP is not set -CONFIG_PANASONIC_LAPTOP=m -CONFIG_SONY_LAPTOP=m -CONFIG_SONYPI_COMPAT=y -# CONFIG_SYSTEM76_ACPI is not set -CONFIG_TOPSTAR_LAPTOP=m -# CONFIG_SERIAL_MULTI_INSTANTIATE is not set -CONFIG_MLX_PLATFORM=m -CONFIG_INTEL_IPS=m -# CONFIG_INTEL_SCU_PCI is not set -# CONFIG_INTEL_SCU_PLATFORM is not set -# CONFIG_SIEMENS_SIMATIC_IPC is not set -# CONFIG_WINMATE_FM07_KEYS is not set -# CONFIG_SEL3350_PLATFORM is not set -CONFIG_P2SB=y -CONFIG_HAVE_CLK=y -CONFIG_HAVE_CLK_PREPARE=y -CONFIG_COMMON_CLK=y -# CONFIG_LMK04832 is not set -# CONFIG_COMMON_CLK_MAX9485 is not set -# CONFIG_COMMON_CLK_SI5341 is not set -# CONFIG_COMMON_CLK_SI5351 is not set -# CONFIG_COMMON_CLK_SI544 is not set -# CONFIG_COMMON_CLK_CDCE706 is not set -# CONFIG_COMMON_CLK_CS2000_CP is not set -# CONFIG_COMMON_CLK_PWM is not set -# CONFIG_XILINX_VCU is not set -CONFIG_HWSPINLOCK=y - -# -# Clock Source drivers -# -CONFIG_CLKEVT_I8253=y -CONFIG_I8253_LOCK=y -CONFIG_CLKBLD_I8253=y -# end of Clock Source drivers - -CONFIG_MAILBOX=y -CONFIG_PCC=y -# CONFIG_ALTERA_MBOX is not set -CONFIG_IOMMU_IOVA=y -CONFIG_IOMMU_API=y -CONFIG_IOMMU_SUPPORT=y - -# -# Generic IOMMU Pagetable Support -# -CONFIG_IOMMU_IO_PGTABLE=y -# end of Generic IOMMU Pagetable Support - -# CONFIG_IOMMU_DEBUGFS is not set -# CONFIG_IOMMU_DEFAULT_DMA_STRICT is not set -# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set -CONFIG_IOMMU_DEFAULT_PASSTHROUGH=y -CONFIG_IOMMU_DMA=y -CONFIG_IOMMU_SVA=y -CONFIG_AMD_IOMMU=y -CONFIG_AMD_IOMMU_V2=m -CONFIG_DMAR_TABLE=y -CONFIG_INTEL_IOMMU=y -CONFIG_INTEL_IOMMU_SVM=y -# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set -CONFIG_INTEL_IOMMU_FLOPPY_WA=y -CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON=y -CONFIG_INTEL_IOMMU_PERF_EVENTS=y -CONFIG_IOMMUFD=m -CONFIG_IRQ_REMAP=y -CONFIG_HYPERV_IOMMU=y -# CONFIG_VIRTIO_IOMMU is not set - -# -# Remoteproc drivers -# -# CONFIG_REMOTEPROC is not set -# end of Remoteproc drivers - -# -# Rpmsg drivers -# -# CONFIG_RPMSG_QCOM_GLINK_RPM is not set -# CONFIG_RPMSG_VIRTIO is not set -# end of Rpmsg drivers - -# CONFIG_SOUNDWIRE is not set - -# -# SOC (System On Chip) specific Drivers -# - -# -# Amlogic SoC drivers -# -# end of Amlogic SoC drivers - -# -# Broadcom SoC drivers -# -# end of Broadcom SoC drivers - -# -# NXP/Freescale QorIQ SoC drivers -# -# end of NXP/Freescale QorIQ SoC drivers - -# -# fujitsu SoC drivers -# -# end of fujitsu SoC drivers - -# -# i.MX SoC drivers -# -# end of i.MX SoC drivers - -# -# Enable LiteX SoC Builder specific drivers -# -# end of Enable LiteX SoC Builder specific drivers - -# CONFIG_WPCM450_SOC is not set - -# -# Qualcomm SoC drivers -# -# end of Qualcomm SoC drivers - -# CONFIG_SOC_TI is not set - -# -# Xilinx SoC drivers -# -# end of Xilinx SoC drivers -# end of SOC (System On Chip) specific Drivers - -# CONFIG_PM_DEVFREQ is not set -# CONFIG_EXTCON is not set -# CONFIG_MEMORY is not set -CONFIG_IIO=m -CONFIG_IIO_BUFFER=y -# CONFIG_IIO_BUFFER_CB is not set -# CONFIG_IIO_BUFFER_DMA is not set -# CONFIG_IIO_BUFFER_DMAENGINE is not set -# CONFIG_IIO_BUFFER_HW_CONSUMER is not set -CONFIG_IIO_KFIFO_BUF=m -CONFIG_IIO_TRIGGERED_BUFFER=m -# CONFIG_IIO_CONFIGFS is not set -CONFIG_IIO_TRIGGER=y -CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 -# CONFIG_IIO_SW_DEVICE is not set -# CONFIG_IIO_SW_TRIGGER is not set -# CONFIG_IIO_TRIGGERED_EVENT is not set - -# -# Accelerometers -# -# CONFIG_ADIS16201 is not set -# CONFIG_ADIS16209 is not set -# CONFIG_ADXL313_I2C is not set -# CONFIG_ADXL313_SPI is not set -# CONFIG_ADXL345_I2C is not set -# CONFIG_ADXL345_SPI is not set -# CONFIG_ADXL355_I2C is not set -# CONFIG_ADXL355_SPI is not set -# CONFIG_ADXL367_SPI is not set -# CONFIG_ADXL367_I2C is not set -# CONFIG_ADXL372_SPI is not set -# CONFIG_ADXL372_I2C is not set -# CONFIG_BMA180 is not set -# CONFIG_BMA220 is not set -# CONFIG_BMA400 is not set -# CONFIG_BMC150_ACCEL is not set -# CONFIG_BMI088_ACCEL is not set -# CONFIG_DA280 is not set -# CONFIG_DA311 is not set -# CONFIG_DMARD06 is not set -# CONFIG_DMARD09 is not set -# CONFIG_DMARD10 is not set -# CONFIG_FXLS8962AF_I2C is not set -# CONFIG_FXLS8962AF_SPI is not set -CONFIG_HID_SENSOR_ACCEL_3D=m -# CONFIG_IIO_ST_ACCEL_3AXIS is not set -# CONFIG_IIO_KX022A_SPI is not set -# CONFIG_IIO_KX022A_I2C is not set -# CONFIG_KXSD9 is not set -# CONFIG_KXCJK1013 is not set -# CONFIG_MC3230 is not set -# CONFIG_MMA7455_I2C is not set -# CONFIG_MMA7455_SPI is not set -# CONFIG_MMA7660 is not set -# CONFIG_MMA8452 is not set -# CONFIG_MMA9551 is not set -# CONFIG_MMA9553 is not set -# CONFIG_MSA311 is not set -# CONFIG_MXC4005 is not set -# CONFIG_MXC6255 is not set -# CONFIG_SCA3000 is not set -# CONFIG_SCA3300 is not set -# CONFIG_STK8312 is not set -# CONFIG_STK8BA50 is not set -# end of Accelerometers - -# -# Analog to digital converters -# -# CONFIG_AD4130 is not set -# CONFIG_AD7091R5 is not set -# CONFIG_AD7124 is not set -# CONFIG_AD7192 is not set -# CONFIG_AD7266 is not set -# CONFIG_AD7280 is not set -# CONFIG_AD7291 is not set -# CONFIG_AD7292 is not set -# CONFIG_AD7298 is not set -# CONFIG_AD7476 is not set -# CONFIG_AD7606_IFACE_PARALLEL is not set -# CONFIG_AD7606_IFACE_SPI is not set -# CONFIG_AD7766 is not set -# CONFIG_AD7768_1 is not set -# CONFIG_AD7780 is not set -# CONFIG_AD7791 is not set -# CONFIG_AD7793 is not set -# CONFIG_AD7887 is not set -# CONFIG_AD7923 is not set -# CONFIG_AD7949 is not set -# CONFIG_AD799X is not set -# CONFIG_ENVELOPE_DETECTOR is not set -# CONFIG_HI8435 is not set -# CONFIG_HX711 is not set -# CONFIG_INA2XX_ADC is not set -# CONFIG_LTC2471 is not set -# CONFIG_LTC2485 is not set -# CONFIG_LTC2496 is not set -# CONFIG_LTC2497 is not set -# CONFIG_MAX1027 is not set -# CONFIG_MAX11100 is not set -# CONFIG_MAX1118 is not set -# CONFIG_MAX11205 is not set -# CONFIG_MAX11410 is not set -# CONFIG_MAX1241 is not set -# CONFIG_MAX1363 is not set -# CONFIG_MAX9611 is not set -# CONFIG_MCP320X is not set -# CONFIG_MCP3422 is not set -# CONFIG_MCP3911 is not set -# CONFIG_NAU7802 is not set -# CONFIG_RICHTEK_RTQ6056 is not set -# CONFIG_SD_ADC_MODULATOR is not set -# CONFIG_TI_ADC081C is not set -# CONFIG_TI_ADC0832 is not set -# CONFIG_TI_ADC084S021 is not set -# CONFIG_TI_ADC12138 is not set -# CONFIG_TI_ADC108S102 is not set -# CONFIG_TI_ADC128S052 is not set -# CONFIG_TI_ADC161S626 is not set -# CONFIG_TI_ADS1015 is not set -# CONFIG_TI_ADS7924 is not set -# CONFIG_TI_ADS1100 is not set -# CONFIG_TI_ADS7950 is not set -# CONFIG_TI_ADS8344 is not set -# CONFIG_TI_ADS8688 is not set -# CONFIG_TI_ADS124S08 is not set -# CONFIG_TI_ADS131E08 is not set -# CONFIG_TI_LMP92064 is not set -# CONFIG_TI_TLC4541 is not set -# CONFIG_TI_TSC2046 is not set -# CONFIG_VF610_ADC is not set -# CONFIG_VIPERBOARD_ADC is not set -# CONFIG_XILINX_XADC is not set -# end of Analog to digital converters - -# -# Analog to digital and digital to analog converters -# -# CONFIG_AD74115 is not set -# CONFIG_AD74413R is not set -# end of Analog to digital and digital to analog converters - -# -# Analog Front Ends -# -# CONFIG_IIO_RESCALE is not set -# end of Analog Front Ends - -# -# Amplifiers -# -# CONFIG_AD8366 is not set -# CONFIG_ADA4250 is not set -# CONFIG_HMC425 is not set -# end of Amplifiers - -# -# Capacitance to digital converters -# -# CONFIG_AD7150 is not set -# CONFIG_AD7746 is not set -# end of Capacitance to digital converters - -# -# Chemical Sensors -# -# CONFIG_ATLAS_PH_SENSOR is not set -# CONFIG_ATLAS_EZO_SENSOR is not set -# CONFIG_BME680 is not set -# CONFIG_CCS811 is not set -# CONFIG_IAQCORE is not set -# CONFIG_SCD30_CORE is not set -# CONFIG_SCD4X is not set -# CONFIG_SENSIRION_SGP30 is not set -# CONFIG_SENSIRION_SGP40 is not set -# CONFIG_SPS30_I2C is not set -# CONFIG_SENSEAIR_SUNRISE_CO2 is not set -# CONFIG_VZ89X is not set -# end of Chemical Sensors - -# -# Hid Sensor IIO Common -# -CONFIG_HID_SENSOR_IIO_COMMON=m -CONFIG_HID_SENSOR_IIO_TRIGGER=m -# end of Hid Sensor IIO Common - -# -# IIO SCMI Sensors -# -# end of IIO SCMI Sensors - -# -# SSP Sensor Common -# -# CONFIG_IIO_SSP_SENSORHUB is not set -# end of SSP Sensor Common - -# -# Digital to analog converters -# -# CONFIG_AD3552R is not set -# CONFIG_AD5064 is not set -# CONFIG_AD5360 is not set -# CONFIG_AD5380 is not set -# CONFIG_AD5421 is not set -# CONFIG_AD5446 is not set -# CONFIG_AD5449 is not set -# CONFIG_AD5592R is not set -# CONFIG_AD5593R is not set -# CONFIG_AD5504 is not set -# CONFIG_AD5624R_SPI is not set -# CONFIG_LTC2688 is not set -# CONFIG_AD5686_SPI is not set -# CONFIG_AD5696_I2C is not set -# CONFIG_AD5755 is not set -# CONFIG_AD5758 is not set -# CONFIG_AD5761 is not set -# CONFIG_AD5764 is not set -# CONFIG_AD5766 is not set -# CONFIG_AD5770R is not set -# CONFIG_AD5791 is not set -# CONFIG_AD7293 is not set -# CONFIG_AD7303 is not set -# CONFIG_AD8801 is not set -# CONFIG_DPOT_DAC is not set -# CONFIG_DS4424 is not set -# CONFIG_LTC1660 is not set -# CONFIG_LTC2632 is not set -# CONFIG_M62332 is not set -# CONFIG_MAX517 is not set -# CONFIG_MAX5522 is not set -# CONFIG_MAX5821 is not set -# CONFIG_MCP4725 is not set -# CONFIG_MCP4728 is not set -# CONFIG_MCP4922 is not set -# CONFIG_TI_DAC082S085 is not set -# CONFIG_TI_DAC5571 is not set -# CONFIG_TI_DAC7311 is not set -# CONFIG_TI_DAC7612 is not set -# CONFIG_VF610_DAC is not set -# end of Digital to analog converters - -# -# IIO dummy driver -# -# end of IIO dummy driver - -# -# Filters -# -# CONFIG_ADMV8818 is not set -# end of Filters - -# -# Frequency Synthesizers DDS/PLL -# - -# -# Clock Generator/Distribution -# -# CONFIG_AD9523 is not set -# end of Clock Generator/Distribution - -# -# Phase-Locked Loop (PLL) frequency synthesizers -# -# CONFIG_ADF4350 is not set -# CONFIG_ADF4371 is not set -# CONFIG_ADF4377 is not set -# CONFIG_ADMV1013 is not set -# CONFIG_ADMV1014 is not set -# CONFIG_ADMV4420 is not set -# CONFIG_ADRF6780 is not set -# end of Phase-Locked Loop (PLL) frequency synthesizers -# end of Frequency Synthesizers DDS/PLL - -# -# Digital gyroscope sensors -# -# CONFIG_ADIS16080 is not set -# CONFIG_ADIS16130 is not set -# CONFIG_ADIS16136 is not set -# CONFIG_ADIS16260 is not set -# CONFIG_ADXRS290 is not set -# CONFIG_ADXRS450 is not set -# CONFIG_BMG160 is not set -# CONFIG_FXAS21002C is not set -CONFIG_HID_SENSOR_GYRO_3D=m -# CONFIG_MPU3050_I2C is not set -# CONFIG_IIO_ST_GYRO_3AXIS is not set -# CONFIG_ITG3200 is not set -# end of Digital gyroscope sensors - -# -# Health Sensors -# - -# -# Heart Rate Monitors -# -# CONFIG_AFE4403 is not set -# CONFIG_AFE4404 is not set -# CONFIG_MAX30100 is not set -# CONFIG_MAX30102 is not set -# end of Heart Rate Monitors -# end of Health Sensors - -# -# Humidity sensors -# -# CONFIG_AM2315 is not set -# CONFIG_DHT11 is not set -# CONFIG_HDC100X is not set -# CONFIG_HDC2010 is not set -CONFIG_HID_SENSOR_HUMIDITY=m -# CONFIG_HTS221 is not set -# CONFIG_HTU21 is not set -# CONFIG_SI7005 is not set -# CONFIG_SI7020 is not set -# end of Humidity sensors - -# -# Inertial measurement units -# -# CONFIG_ADIS16400 is not set -# CONFIG_ADIS16460 is not set -# CONFIG_ADIS16475 is not set -# CONFIG_ADIS16480 is not set -# CONFIG_BMI160_I2C is not set -# CONFIG_BMI160_SPI is not set -# CONFIG_BOSCH_BNO055_I2C is not set -# CONFIG_FXOS8700_I2C is not set -# CONFIG_FXOS8700_SPI is not set -# CONFIG_KMX61 is not set -# CONFIG_INV_ICM42600_I2C is not set -# CONFIG_INV_ICM42600_SPI is not set -# CONFIG_INV_MPU6050_I2C is not set -# CONFIG_INV_MPU6050_SPI is not set -# CONFIG_IIO_ST_LSM6DSX is not set -# CONFIG_IIO_ST_LSM9DS0 is not set -# end of Inertial measurement units - -# -# Light sensors -# -# CONFIG_ACPI_ALS is not set -# CONFIG_ADJD_S311 is not set -# CONFIG_ADUX1020 is not set -# CONFIG_AL3010 is not set -# CONFIG_AL3320A is not set -# CONFIG_APDS9300 is not set -# CONFIG_APDS9960 is not set -# CONFIG_AS73211 is not set -# CONFIG_BH1750 is not set -# CONFIG_BH1780 is not set -# CONFIG_CM32181 is not set -# CONFIG_CM3232 is not set -# CONFIG_CM3323 is not set -# CONFIG_CM3605 is not set -# CONFIG_CM36651 is not set -# CONFIG_GP2AP002 is not set -# CONFIG_GP2AP020A00F is not set -# CONFIG_SENSORS_ISL29018 is not set -# CONFIG_SENSORS_ISL29028 is not set -# CONFIG_ISL29125 is not set -CONFIG_HID_SENSOR_ALS=m -CONFIG_HID_SENSOR_PROX=m -# CONFIG_JSA1212 is not set -# CONFIG_ROHM_BU27008 is not set -# CONFIG_ROHM_BU27034 is not set -# CONFIG_RPR0521 is not set -# CONFIG_LTR501 is not set -# CONFIG_LTRF216A is not set -# CONFIG_LV0104CS is not set -# CONFIG_MAX44000 is not set -# CONFIG_MAX44009 is not set -# CONFIG_NOA1305 is not set -# CONFIG_OPT3001 is not set -# CONFIG_OPT4001 is not set -# CONFIG_PA12203001 is not set -# CONFIG_SI1133 is not set -# CONFIG_SI1145 is not set -# CONFIG_STK3310 is not set -# CONFIG_ST_UVIS25 is not set -# CONFIG_TCS3414 is not set -# CONFIG_TCS3472 is not set -# CONFIG_SENSORS_TSL2563 is not set -# CONFIG_TSL2583 is not set -# CONFIG_TSL2591 is not set -# CONFIG_TSL2772 is not set -# CONFIG_TSL4531 is not set -# CONFIG_US5182D is not set -# CONFIG_VCNL4000 is not set -# CONFIG_VCNL4035 is not set -# CONFIG_VEML6030 is not set -# CONFIG_VEML6070 is not set -# CONFIG_VL6180 is not set -# CONFIG_ZOPT2201 is not set -# end of Light sensors - -# -# Magnetometer sensors -# -# CONFIG_AK8974 is not set -# CONFIG_AK8975 is not set -# CONFIG_AK09911 is not set -# CONFIG_BMC150_MAGN_I2C is not set -# CONFIG_BMC150_MAGN_SPI is not set -# CONFIG_MAG3110 is not set -CONFIG_HID_SENSOR_MAGNETOMETER_3D=m -# CONFIG_MMC35240 is not set -# CONFIG_IIO_ST_MAGN_3AXIS is not set -# CONFIG_SENSORS_HMC5843_I2C is not set -# CONFIG_SENSORS_HMC5843_SPI is not set -# CONFIG_SENSORS_RM3100_I2C is not set -# CONFIG_SENSORS_RM3100_SPI is not set -# CONFIG_TI_TMAG5273 is not set -# CONFIG_YAMAHA_YAS530 is not set -# end of Magnetometer sensors - -# -# Multiplexers -# -# CONFIG_IIO_MUX is not set -# end of Multiplexers - -# -# Inclinometer sensors -# -CONFIG_HID_SENSOR_INCLINOMETER_3D=m -CONFIG_HID_SENSOR_DEVICE_ROTATION=m -# end of Inclinometer sensors - -# -# Triggers - standalone -# -# CONFIG_IIO_INTERRUPT_TRIGGER is not set -# CONFIG_IIO_SYSFS_TRIGGER is not set -# end of Triggers - standalone - -# -# Linear and angular position sensors -# -# CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE is not set -# end of Linear and angular position sensors - -# -# Digital potentiometers -# -# CONFIG_AD5110 is not set -# CONFIG_AD5272 is not set -# CONFIG_DS1803 is not set -# CONFIG_MAX5432 is not set -# CONFIG_MAX5481 is not set -# CONFIG_MAX5487 is not set -# CONFIG_MCP4018 is not set -# CONFIG_MCP4131 is not set -# CONFIG_MCP4531 is not set -# CONFIG_MCP41010 is not set -# CONFIG_TPL0102 is not set -# CONFIG_X9250 is not set -# end of Digital potentiometers - -# -# Digital potentiostats -# -# CONFIG_LMP91000 is not set -# end of Digital potentiostats - -# -# Pressure sensors -# -# CONFIG_ABP060MG is not set -# CONFIG_BMP280 is not set -# CONFIG_DLHL60D is not set -# CONFIG_DPS310 is not set -CONFIG_HID_SENSOR_PRESS=m -# CONFIG_HP03 is not set -# CONFIG_ICP10100 is not set -# CONFIG_MPL115_I2C is not set -# CONFIG_MPL115_SPI is not set -# CONFIG_MPL3115 is not set -# CONFIG_MPRLS0025PA is not set -# CONFIG_MS5611 is not set -# CONFIG_MS5637 is not set -# CONFIG_IIO_ST_PRESS is not set -# CONFIG_T5403 is not set -# CONFIG_HP206C is not set -# CONFIG_ZPA2326 is not set -# end of Pressure sensors - -# -# Lightning sensors -# -# CONFIG_AS3935 is not set -# end of Lightning sensors - -# -# Proximity and distance sensors -# -# CONFIG_IRSD200 is not set -# CONFIG_ISL29501 is not set -# CONFIG_LIDAR_LITE_V2 is not set -# CONFIG_MB1232 is not set -# CONFIG_PING is not set -# CONFIG_RFD77402 is not set -# CONFIG_SRF04 is not set -# CONFIG_SX9310 is not set -# CONFIG_SX9324 is not set -# CONFIG_SX9360 is not set -# CONFIG_SX9500 is not set -# CONFIG_SRF08 is not set -# CONFIG_VCNL3020 is not set -# CONFIG_VL53L0X_I2C is not set -# end of Proximity and distance sensors - -# -# Resolver to digital converters -# -# CONFIG_AD2S90 is not set -# CONFIG_AD2S1200 is not set -# end of Resolver to digital converters - -# -# Temperature sensors -# -# CONFIG_LTC2983 is not set -# CONFIG_MAXIM_THERMOCOUPLE is not set -CONFIG_HID_SENSOR_TEMP=m -# CONFIG_MLX90614 is not set -# CONFIG_MLX90632 is not set -# CONFIG_TMP006 is not set -# CONFIG_TMP007 is not set -# CONFIG_TMP117 is not set -# CONFIG_TSYS01 is not set -# CONFIG_TSYS02D is not set -# CONFIG_MAX30208 is not set -# CONFIG_MAX31856 is not set -# CONFIG_MAX31865 is not set -# end of Temperature sensors - -CONFIG_NTB=m -# CONFIG_NTB_MSI is not set -# CONFIG_NTB_AMD is not set -# CONFIG_NTB_IDT is not set -# CONFIG_NTB_INTEL is not set -# CONFIG_NTB_EPF is not set -# CONFIG_NTB_SWITCHTEC is not set -# CONFIG_NTB_PINGPONG is not set -# CONFIG_NTB_TOOL is not set -# CONFIG_NTB_PERF is not set -# CONFIG_NTB_TRANSPORT is not set -CONFIG_PWM=y -CONFIG_PWM_SYSFS=y -# CONFIG_PWM_DEBUG is not set -# CONFIG_PWM_CLK is not set -# CONFIG_PWM_DWC is not set -CONFIG_PWM_LPSS=m -CONFIG_PWM_LPSS_PCI=m -CONFIG_PWM_LPSS_PLATFORM=m -# CONFIG_PWM_PCA9685 is not set - -# -# IRQ chip support -# -# end of IRQ chip support - -# CONFIG_IPACK_BUS is not set -CONFIG_RESET_CONTROLLER=y -# CONFIG_RESET_TI_SYSCON is not set -# CONFIG_RESET_TI_TPS380X is not set - -# -# PHY Subsystem -# -# CONFIG_GENERIC_PHY is not set -# CONFIG_USB_LGM_PHY is not set -# CONFIG_PHY_CAN_TRANSCEIVER is not set - -# -# PHY drivers for Broadcom platforms -# -# CONFIG_BCM_KONA_USB2_PHY is not set -# end of PHY drivers for Broadcom platforms - -# CONFIG_PHY_PXA_28NM_HSIC is not set -# CONFIG_PHY_PXA_28NM_USB2 is not set -# CONFIG_PHY_CPCAP_USB is not set -# CONFIG_PHY_INTEL_LGM_EMMC is not set -# end of PHY Subsystem - -CONFIG_POWERCAP=y -CONFIG_INTEL_RAPL_CORE=m -CONFIG_INTEL_RAPL=m -# CONFIG_INTEL_RAPL_TPMI is not set -CONFIG_IDLE_INJECT=y -# CONFIG_MCB is not set - -# -# Performance monitor support -# -# CONFIG_DWC_PCIE_PMU is not set -# end of Performance monitor support - -CONFIG_RAS=y -CONFIG_RAS_CEC=y -# CONFIG_RAS_CEC_DEBUG is not set -# CONFIG_USB4 is not set - -# -# Android -# -# CONFIG_ANDROID_BINDER_IPC is not set -# end of Android - -CONFIG_LIBNVDIMM=y -CONFIG_BLK_DEV_PMEM=y -CONFIG_ND_CLAIM=y -CONFIG_ND_BTT=y -CONFIG_BTT=y -CONFIG_ND_PFN=y -CONFIG_NVDIMM_PFN=y -CONFIG_NVDIMM_DAX=y -CONFIG_NVDIMM_KEYS=y -# CONFIG_NVDIMM_SECURITY_TEST is not set -CONFIG_DAX=y -CONFIG_DEV_DAX=y -CONFIG_DEV_DAX_PMEM=y -CONFIG_DEV_DAX_HMEM=y -CONFIG_DEV_DAX_CXL=m -CONFIG_DEV_DAX_HMEM_DEVICES=y -CONFIG_DEV_DAX_KMEM=y -CONFIG_NVMEM=y -CONFIG_NVMEM_SYSFS=y - -# -# Layout Types -# -# CONFIG_NVMEM_LAYOUT_SL28_VPD is not set -# CONFIG_NVMEM_LAYOUT_ONIE_TLV is not set -# end of Layout Types - -# CONFIG_NVMEM_RMEM is not set - -# -# HW tracing support -# -CONFIG_STM=m -CONFIG_STM_PROTO_BASIC=m -CONFIG_STM_PROTO_SYS_T=m -CONFIG_STM_DUMMY=m -CONFIG_STM_SOURCE_CONSOLE=m -CONFIG_STM_SOURCE_HEARTBEAT=m -CONFIG_STM_SOURCE_FTRACE=m -CONFIG_INTEL_TH=m -CONFIG_INTEL_TH_PCI=m -CONFIG_INTEL_TH_ACPI=m -CONFIG_INTEL_TH_GTH=m -CONFIG_INTEL_TH_STH=m -CONFIG_INTEL_TH_MSU=m -CONFIG_INTEL_TH_PTI=m -# CONFIG_INTEL_TH_DEBUG is not set -# end of HW tracing support - -# CONFIG_FPGA is not set -CONFIG_TEE=m -CONFIG_AMDTEE=m -# CONFIG_SIOX is not set -# CONFIG_SLIMBUS is not set -# CONFIG_INTERCONNECT is not set -# CONFIG_COUNTER is not set -# CONFIG_MOST is not set -# CONFIG_PECI is not set -# CONFIG_HTE is not set -# end of Device Drivers - -# -# File systems -# -CONFIG_DCACHE_WORD_ACCESS=y -# CONFIG_VALIDATE_FS_PARSER is not set -CONFIG_FS_IOMAP=y -CONFIG_BUFFER_HEAD=y -CONFIG_LEGACY_DIRECT_IO=y -# CONFIG_EXT2_FS is not set -CONFIG_EXT3_FS=m -# CONFIG_EXT3_FS_POSIX_ACL is not set -# CONFIG_EXT3_FS_SECURITY is not set -CONFIG_EXT4_FS=y -CONFIG_EXT4_USE_FOR_EXT2=y -CONFIG_EXT4_FS_POSIX_ACL=y -CONFIG_EXT4_FS_SECURITY=y -# CONFIG_EXT4_DEBUG is not set -CONFIG_JBD2=y -# CONFIG_JBD2_DEBUG is not set -CONFIG_FS_MBCACHE=y -# CONFIG_REISERFS_FS is not set -# CONFIG_JFS_FS is not set -CONFIG_XFS_FS=m -CONFIG_XFS_SUPPORT_V4=y -CONFIG_XFS_SUPPORT_ASCII_CI=y -CONFIG_XFS_QUOTA=y -CONFIG_XFS_POSIX_ACL=y -# CONFIG_XFS_RT is not set -# CONFIG_XFS_ONLINE_SCRUB is not set -# CONFIG_XFS_WARN is not set -# CONFIG_XFS_DEBUG is not set -CONFIG_GFS2_FS=m -CONFIG_GFS2_FS_LOCKING_DLM=y -# CONFIG_OCFS2_FS is not set -CONFIG_BTRFS_FS=m -# CONFIG_BTRFS_FS_POSIX_ACL is not set -# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set -# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set -# CONFIG_BTRFS_DEBUG is not set -# CONFIG_BTRFS_ASSERT is not set -# CONFIG_BTRFS_FS_REF_VERIFY is not set -# CONFIG_NILFS2_FS is not set -# CONFIG_F2FS_FS is not set -# CONFIG_ZONEFS_FS is not set -CONFIG_FS_DAX=y -CONFIG_FS_DAX_PMD=y -CONFIG_FS_POSIX_ACL=y -CONFIG_EXPORTFS=y -CONFIG_EXPORTFS_BLOCK_OPS=y -CONFIG_FILE_LOCKING=y -# CONFIG_FS_ENCRYPTION is not set -# CONFIG_FS_VERITY is not set -CONFIG_FSNOTIFY=y -CONFIG_DNOTIFY=y -CONFIG_INOTIFY_USER=y -CONFIG_FANOTIFY=y -CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y -CONFIG_QUOTA=y -CONFIG_QUOTA_NETLINK_INTERFACE=y -# CONFIG_QUOTA_DEBUG is not set -CONFIG_QUOTA_TREE=y -# CONFIG_QFMT_V1 is not set -CONFIG_QFMT_V2=y -CONFIG_QUOTACTL=y -CONFIG_AUTOFS_FS=y -CONFIG_FUSE_FS=m -CONFIG_CUSE=m -CONFIG_VIRTIO_FS=m -CONFIG_FUSE_DAX=y -CONFIG_VIRT_FUSE=m -CONFIG_OVERLAY_FS=m -CONFIG_OVERLAY_FS_REDIRECT_DIR=y -CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y -CONFIG_OVERLAY_FS_INDEX=y -# CONFIG_OVERLAY_FS_NFS_EXPORT is not set -# CONFIG_OVERLAY_FS_XINO_AUTO is not set -# CONFIG_OVERLAY_FS_METACOPY is not set -# CONFIG_OVERLAY_FS_DEBUG is not set - -# -# Caches -# -CONFIG_NETFS_SUPPORT=m -CONFIG_NETFS_STATS=y -CONFIG_FSCACHE=m -CONFIG_FSCACHE_STATS=y -# CONFIG_FSCACHE_DEBUG is not set -CONFIG_CACHEFILES=m -# CONFIG_CACHEFILES_DEBUG is not set -# CONFIG_CACHEFILES_ERROR_INJECTION is not set -CONFIG_CACHEFILES_ONDEMAND=y -# end of Caches - -# -# CD-ROM/DVD Filesystems -# -CONFIG_ISO9660_FS=m -CONFIG_JOLIET=y -CONFIG_ZISOFS=y -CONFIG_UDF_FS=m -# end of CD-ROM/DVD Filesystems - -# -# DOS/FAT/EXFAT/NT Filesystems -# -CONFIG_FAT_FS=m -CONFIG_MSDOS_FS=m -CONFIG_VFAT_FS=m -CONFIG_FAT_DEFAULT_CODEPAGE=437 -CONFIG_FAT_DEFAULT_IOCHARSET="ascii" -# CONFIG_FAT_DEFAULT_UTF8 is not set -# CONFIG_EXFAT_FS is not set -# CONFIG_NTFS_FS is not set -CONFIG_NTFS3_FS=m -# CONFIG_NTFS3_64BIT_CLUSTER is not set -# CONFIG_NTFS3_LZX_XPRESS is not set -# CONFIG_NTFS3_FS_POSIX_ACL is not set -# end of DOS/FAT/EXFAT/NT Filesystems - -# -# Pseudo filesystems -# -CONFIG_PROC_FS=y -CONFIG_PROC_KCORE=y -CONFIG_PROC_VMCORE=y -CONFIG_PROC_VMCORE_DEVICE_DUMP=y -CONFIG_PROC_SYSCTL=y -CONFIG_PROC_PAGE_MONITOR=y -CONFIG_PROC_CHILDREN=y -CONFIG_PROC_PID_ARCH_STATUS=y -CONFIG_PROC_CPU_RESCTRL=y -CONFIG_KERNFS=y -CONFIG_SYSFS=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_TMPFS_XATTR=y -# CONFIG_TMPFS_INODE64 is not set -# CONFIG_TMPFS_QUOTA is not set -CONFIG_HUGETLBFS=y -CONFIG_HUGETLB_PAGE=y -CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP=y -CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON=y -CONFIG_ARCH_HAS_GIGANTIC_PAGE=y -CONFIG_CONFIGFS_FS=y -CONFIG_EFIVAR_FS=y -# end of Pseudo filesystems - -CONFIG_MISC_FILESYSTEMS=y -# CONFIG_ORANGEFS_FS is not set -# CONFIG_ADFS_FS is not set -# CONFIG_AFFS_FS is not set -# CONFIG_ECRYPT_FS is not set -# CONFIG_HFS_FS is not set -# CONFIG_HFSPLUS_FS is not set -# CONFIG_BEFS_FS is not set -# CONFIG_BFS_FS is not set -# CONFIG_EFS_FS is not set -# CONFIG_JFFS2_FS is not set -# CONFIG_UBIFS_FS is not set -CONFIG_CRAMFS=m -CONFIG_CRAMFS_BLOCKDEV=y -# CONFIG_CRAMFS_MTD is not set -CONFIG_SQUASHFS=m -# CONFIG_SQUASHFS_FILE_CACHE is not set -CONFIG_SQUASHFS_FILE_DIRECT=y -CONFIG_SQUASHFS_DECOMP_SINGLE=y -# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set -CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y -# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set -# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set -CONFIG_SQUASHFS_XATTR=y -CONFIG_SQUASHFS_ZLIB=y -# CONFIG_SQUASHFS_LZ4 is not set -CONFIG_SQUASHFS_LZO=y -CONFIG_SQUASHFS_XZ=y -# CONFIG_SQUASHFS_ZSTD is not set -# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set -# CONFIG_SQUASHFS_EMBEDDED is not set -CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 -# CONFIG_VXFS_FS is not set -# CONFIG_MINIX_FS is not set -# CONFIG_OMFS_FS is not set -# CONFIG_HPFS_FS is not set -# CONFIG_QNX4FS_FS is not set -# CONFIG_QNX6FS_FS is not set -CONFIG_RESCTRL_FS=y -CONFIG_RESCTRL_FS_PSEUDO_LOCK=y -# CONFIG_ROMFS_FS is not set -CONFIG_PSTORE=y -CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 -CONFIG_PSTORE_COMPRESS=y -CONFIG_PSTORE_CONSOLE=y -# CONFIG_PSTORE_PMSG is not set -# CONFIG_PSTORE_FTRACE is not set -CONFIG_PSTORE_RAM=y -# CONFIG_PSTORE_BLK is not set -# CONFIG_SYSV_FS is not set -# CONFIG_UFS_FS is not set -CONFIG_EROFS_FS=m -# CONFIG_EROFS_FS_DEBUG is not set -CONFIG_EROFS_FS_XATTR=y -CONFIG_EROFS_FS_POSIX_ACL=y -CONFIG_EROFS_FS_SECURITY=y -CONFIG_EROFS_FS_ZIP=y -CONFIG_EROFS_FS_ZIP_LZMA=y -CONFIG_EROFS_FS_ZIP_DEFLATE=y -CONFIG_EROFS_FS_ONDEMAND=y -# CONFIG_EROFS_FS_PCPU_KTHREAD is not set -CONFIG_NETWORK_FILESYSTEMS=y -CONFIG_NFS_FS=m -# CONFIG_NFS_V2 is not set -CONFIG_NFS_V3=m -CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=m -# CONFIG_NFS_SWAP is not set -CONFIG_NFS_V4_1=y -CONFIG_NFS_V4_2=y -CONFIG_PNFS_FILE_LAYOUT=m -CONFIG_PNFS_BLOCK=m -CONFIG_PNFS_FLEXFILE_LAYOUT=m -CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" -# CONFIG_NFS_V4_1_MIGRATION is not set -CONFIG_NFS_V4_SECURITY_LABEL=y -CONFIG_NFS_FSCACHE=y -# CONFIG_NFS_USE_LEGACY_DNS is not set -CONFIG_NFS_USE_KERNEL_DNS=y -CONFIG_NFS_DEBUG=y -CONFIG_NFS_DISABLE_UDP_SUPPORT=y -# CONFIG_NFS_V4_2_READ_PLUS is not set -CONFIG_NFSD=m -# CONFIG_NFSD_V2 is not set -CONFIG_NFSD_V3_ACL=y -CONFIG_NFSD_V4=y -CONFIG_NFSD_PNFS=y -# CONFIG_NFSD_BLOCKLAYOUT is not set -CONFIG_NFSD_SCSILAYOUT=y -# CONFIG_NFSD_FLEXFILELAYOUT is not set -# CONFIG_NFSD_V4_2_INTER_SSC is not set -CONFIG_NFSD_V4_SECURITY_LABEL=y -CONFIG_GRACE_PERIOD=m -CONFIG_LOCKD=m -CONFIG_LOCKD_V4=y -CONFIG_NFS_ACL_SUPPORT=m -CONFIG_NFS_COMMON=y -CONFIG_NFS_V4_2_SSC_HELPER=y -CONFIG_SUNRPC=m -CONFIG_SUNRPC_GSS=m -CONFIG_SUNRPC_BACKCHANNEL=y -CONFIG_RPCSEC_GSS_KRB5=m -CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1=y -# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA is not set -# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 is not set -CONFIG_SUNRPC_DEBUG=y -CONFIG_SUNRPC_XPRT_RDMA=m -CONFIG_CEPH_FS=m -# CONFIG_CEPH_FSCACHE is not set -CONFIG_CEPH_FS_POSIX_ACL=y -# CONFIG_CEPH_FS_SECURITY_LABEL is not set -CONFIG_CIFS=m -# CONFIG_CIFS_STATS2 is not set -CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y -CONFIG_CIFS_UPCALL=y -CONFIG_CIFS_XATTR=y -CONFIG_CIFS_POSIX=y -CONFIG_CIFS_DEBUG=y -# CONFIG_CIFS_DEBUG2 is not set -# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set -CONFIG_CIFS_DFS_UPCALL=y -# CONFIG_CIFS_SWN_UPCALL is not set -# CONFIG_CIFS_SMB_DIRECT is not set -# CONFIG_CIFS_FSCACHE is not set -# CONFIG_SMB_SERVER is not set -CONFIG_SMBFS=m -# CONFIG_CODA_FS is not set -# CONFIG_AFS_FS is not set -CONFIG_NLS=y -CONFIG_NLS_DEFAULT="utf8" -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_CODEPAGE_737=m -CONFIG_NLS_CODEPAGE_775=m -CONFIG_NLS_CODEPAGE_850=m -CONFIG_NLS_CODEPAGE_852=m -CONFIG_NLS_CODEPAGE_855=m -CONFIG_NLS_CODEPAGE_857=m -CONFIG_NLS_CODEPAGE_860=m -CONFIG_NLS_CODEPAGE_861=m -CONFIG_NLS_CODEPAGE_862=m -CONFIG_NLS_CODEPAGE_863=m -CONFIG_NLS_CODEPAGE_864=m -CONFIG_NLS_CODEPAGE_865=m -CONFIG_NLS_CODEPAGE_866=m -CONFIG_NLS_CODEPAGE_869=m -CONFIG_NLS_CODEPAGE_936=m -CONFIG_NLS_CODEPAGE_950=m -CONFIG_NLS_CODEPAGE_932=m -CONFIG_NLS_CODEPAGE_949=m -CONFIG_NLS_CODEPAGE_874=m -CONFIG_NLS_ISO8859_8=m -CONFIG_NLS_CODEPAGE_1250=m -CONFIG_NLS_CODEPAGE_1251=m -CONFIG_NLS_ASCII=y -CONFIG_NLS_ISO8859_1=m -CONFIG_NLS_ISO8859_2=m -CONFIG_NLS_ISO8859_3=m -CONFIG_NLS_ISO8859_4=m -CONFIG_NLS_ISO8859_5=m -CONFIG_NLS_ISO8859_6=m -CONFIG_NLS_ISO8859_7=m -CONFIG_NLS_ISO8859_9=m -CONFIG_NLS_ISO8859_13=m -CONFIG_NLS_ISO8859_14=m -CONFIG_NLS_ISO8859_15=m -CONFIG_NLS_KOI8_R=m -CONFIG_NLS_KOI8_U=m -CONFIG_NLS_MAC_ROMAN=m -CONFIG_NLS_MAC_CELTIC=m -CONFIG_NLS_MAC_CENTEURO=m -CONFIG_NLS_MAC_CROATIAN=m -CONFIG_NLS_MAC_CYRILLIC=m -CONFIG_NLS_MAC_GAELIC=m -CONFIG_NLS_MAC_GREEK=m -CONFIG_NLS_MAC_ICELAND=m -CONFIG_NLS_MAC_INUIT=m -CONFIG_NLS_MAC_ROMANIAN=m -CONFIG_NLS_MAC_TURKISH=m -CONFIG_NLS_UTF8=m -CONFIG_NLS_UCS2_UTILS=m -CONFIG_DLM=m -CONFIG_DLM_DEBUG=y -# CONFIG_UNICODE is not set -CONFIG_IO_WQ=y -# end of File systems - -# -# Security options -# -CONFIG_KEYS=y -# CONFIG_KEYS_REQUEST_CACHE is not set -CONFIG_PERSISTENT_KEYRINGS=y -CONFIG_TRUSTED_KEYS=y -CONFIG_TRUSTED_KEYS_TPM=y -CONFIG_ENCRYPTED_KEYS=y -# CONFIG_USER_DECRYPTED_DATA is not set -# CONFIG_KEY_DH_OPERATIONS is not set -# CONFIG_SECURITY_DMESG_RESTRICT is not set -CONFIG_SECURITY=y -CONFIG_SECURITYFS=y -CONFIG_SECURITY_NETWORK=y -CONFIG_SECURITY_INFINIBAND=y -CONFIG_SECURITY_NETWORK_XFRM=y -CONFIG_SECURITY_PATH=y -CONFIG_INTEL_TXT=y -CONFIG_LSM_MMAP_MIN_ADDR=65535 -CONFIG_HARDENED_USERCOPY=y -CONFIG_FORTIFY_SOURCE=y -# CONFIG_STATIC_USERMODEHELPER is not set -CONFIG_SECURITY_SELINUX=y -CONFIG_SECURITY_SELINUX_BOOTPARAM=y -CONFIG_SECURITY_SELINUX_DEVELOP=y -CONFIG_SECURITY_SELINUX_AVC_STATS=y -CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9 -CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256 -# CONFIG_SECURITY_SELINUX_DEBUG is not set -# CONFIG_SECURITY_SMACK is not set -# CONFIG_SECURITY_TOMOYO is not set -# CONFIG_SECURITY_APPARMOR is not set -# CONFIG_SECURITY_LOADPIN is not set -CONFIG_SECURITY_YAMA=y -# CONFIG_SECURITY_SAFESETID is not set -# CONFIG_SECURITY_LOCKDOWN_LSM is not set -# CONFIG_SECURITY_LANDLOCK is not set -CONFIG_INTEGRITY=y -CONFIG_INTEGRITY_SIGNATURE=y -CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y -CONFIG_INTEGRITY_TRUSTED_KEYRING=y -CONFIG_INTEGRITY_PLATFORM_KEYRING=y -# CONFIG_INTEGRITY_MACHINE_KEYRING is not set -CONFIG_LOAD_UEFI_KEYS=y -CONFIG_INTEGRITY_AUDIT=y -CONFIG_IMA=y -# CONFIG_IMA_KEXEC is not set -CONFIG_IMA_MEASURE_PCR_IDX=10 -CONFIG_IMA_LSM_RULES=y -# CONFIG_IMA_NG_TEMPLATE is not set -CONFIG_IMA_SIG_TEMPLATE=y -CONFIG_IMA_DEFAULT_TEMPLATE="ima-sig" -# CONFIG_IMA_DEFAULT_HASH_SHA1 is not set -CONFIG_IMA_DEFAULT_HASH_SHA256=y -# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set -# CONFIG_IMA_DEFAULT_HASH_SM3 is not set -CONFIG_IMA_DEFAULT_HASH="sha256" -CONFIG_IMA_WRITE_POLICY=y -CONFIG_IMA_READ_POLICY=y -CONFIG_IMA_APPRAISE=y -# CONFIG_IMA_ARCH_POLICY is not set -CONFIG_IMA_APPRAISE_BUILD_POLICY=y -# CONFIG_IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS is not set -# CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS is not set -# CONFIG_IMA_APPRAISE_REQUIRE_MODULE_SIGS is not set -# CONFIG_IMA_APPRAISE_REQUIRE_POLICY_SIGS is not set -CONFIG_IMA_APPRAISE_BOOTPARAM=y -# CONFIG_IMA_APPRAISE_MODSIG is not set -CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY=y -CONFIG_IMA_BLACKLIST_KEYRING=y -CONFIG_IMA_LOAD_X509=y -CONFIG_IMA_X509_PATH="/etc/keys/x509_ima.der" -# CONFIG_IMA_APPRAISE_SIGNED_INIT is not set -CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y -CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y -# CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT is not set -# CONFIG_IMA_DISABLE_HTABLE is not set -CONFIG_EVM=y -CONFIG_EVM_ATTR_FSUUID=y -# CONFIG_EVM_ADD_XATTRS is not set -CONFIG_EVM_LOAD_X509=y -CONFIG_EVM_X509_PATH="/etc/keys/x509_evm.der" -CONFIG_DEFAULT_SECURITY_SELINUX=y -# CONFIG_DEFAULT_SECURITY_DAC is not set -CONFIG_LSM="integrity,selinux,smack,tomoyo,apparmor" - -# -# Kernel hardening options -# - -# -# Memory initialization -# -CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y -CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y -CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y -CONFIG_INIT_STACK_NONE=y -# CONFIG_INIT_STACK_ALL_PATTERN is not set -# CONFIG_INIT_STACK_ALL_ZERO is not set -# CONFIG_GCC_PLUGIN_STACKLEAK is not set -# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set -# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set -CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y -# CONFIG_ZERO_CALL_USED_REGS is not set -# end of Memory initialization - -# -# Hardening of kernel data structures -# -CONFIG_LIST_HARDENED=y -CONFIG_BUG_ON_DATA_CORRUPTION=y -# end of Hardening of kernel data structures - -CONFIG_CC_HAS_RANDSTRUCT=y -CONFIG_RANDSTRUCT_NONE=y -# CONFIG_RANDSTRUCT_FULL is not set -# CONFIG_RANDSTRUCT_PERFORMANCE is not set -# end of Kernel hardening options -# end of Security options - -CONFIG_XOR_BLOCKS=m -CONFIG_ASYNC_CORE=m -CONFIG_ASYNC_MEMCPY=m -CONFIG_ASYNC_XOR=m -CONFIG_ASYNC_PQ=m -CONFIG_ASYNC_RAID6_RECOV=m -CONFIG_CRYPTO=y - -# -# Crypto core or helper -# -CONFIG_CRYPTO_FIPS=y -CONFIG_CRYPTO_FIPS_NAME="Linux Kernel Cryptographic API" -# CONFIG_CRYPTO_FIPS_CUSTOM_VERSION is not set -CONFIG_CRYPTO_ALGAPI=y -CONFIG_CRYPTO_ALGAPI2=y -CONFIG_CRYPTO_AEAD=y -CONFIG_CRYPTO_AEAD2=y -CONFIG_CRYPTO_SIG2=y -CONFIG_CRYPTO_SKCIPHER=y -CONFIG_CRYPTO_SKCIPHER2=y -CONFIG_CRYPTO_HASH=y -CONFIG_CRYPTO_HASH2=y -CONFIG_CRYPTO_RNG=y -CONFIG_CRYPTO_RNG2=y -CONFIG_CRYPTO_RNG_DEFAULT=m -CONFIG_CRYPTO_AKCIPHER2=y -CONFIG_CRYPTO_AKCIPHER=y -CONFIG_CRYPTO_KPP2=y -CONFIG_CRYPTO_KPP=m -CONFIG_CRYPTO_ACOMP2=y -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_MANAGER2=y -CONFIG_CRYPTO_USER=m -# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set -# CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is not set -CONFIG_CRYPTO_NULL=y -CONFIG_CRYPTO_NULL2=y -CONFIG_CRYPTO_PCRYPT=m -CONFIG_CRYPTO_CRYPTD=y -CONFIG_CRYPTO_AUTHENC=m -CONFIG_CRYPTO_TEST=m -CONFIG_CRYPTO_SIMD=y -# end of Crypto core or helper - -# -# Public-key cryptography -# -CONFIG_CRYPTO_RSA=y -CONFIG_CRYPTO_DH=m -# CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set -CONFIG_CRYPTO_ECC=m -CONFIG_CRYPTO_ECDH=m -# CONFIG_CRYPTO_ECDSA is not set -# CONFIG_CRYPTO_ECRDSA is not set -CONFIG_CRYPTO_SM2=y -# CONFIG_CRYPTO_CURVE25519 is not set -# end of Public-key cryptography - -# -# Block ciphers -# -CONFIG_CRYPTO_AES=y -# CONFIG_CRYPTO_AES_TI is not set -CONFIG_CRYPTO_ANUBIS=m -# CONFIG_CRYPTO_ARIA is not set -CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_BLOWFISH_COMMON=m -CONFIG_CRYPTO_CAMELLIA=m -CONFIG_CRYPTO_CAST_COMMON=m -CONFIG_CRYPTO_CAST5=m -CONFIG_CRYPTO_CAST6=m -CONFIG_CRYPTO_DES=m -CONFIG_CRYPTO_FCRYPT=m -CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SEED=m -CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_SM4=m -CONFIG_CRYPTO_SM4_GENERIC=m -CONFIG_CRYPTO_TEA=m -CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_TWOFISH_COMMON=m -# end of Block ciphers - -# -# Length-preserving ciphers and modes -# -# CONFIG_CRYPTO_ADIANTUM is not set -CONFIG_CRYPTO_ARC4=m -CONFIG_CRYPTO_CHACHA20=m -CONFIG_CRYPTO_CBC=y -CONFIG_CRYPTO_CFB=y -CONFIG_CRYPTO_CTR=y -CONFIG_CRYPTO_CTS=y -CONFIG_CRYPTO_ECB=y -# CONFIG_CRYPTO_HCTR2 is not set -# CONFIG_CRYPTO_KEYWRAP is not set -CONFIG_CRYPTO_LRW=m -CONFIG_CRYPTO_OFB=y -CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_XTS=y -# end of Length-preserving ciphers and modes - -# -# AEAD (authenticated encryption with associated data) ciphers -# -# CONFIG_CRYPTO_AEGIS128 is not set -CONFIG_CRYPTO_CHACHA20POLY1305=m -CONFIG_CRYPTO_CCM=m -CONFIG_CRYPTO_GCM=y -CONFIG_CRYPTO_GENIV=m -CONFIG_CRYPTO_SEQIV=m -CONFIG_CRYPTO_ECHAINIV=m -CONFIG_CRYPTO_ESSIV=m -# end of AEAD (authenticated encryption with associated data) ciphers - -# -# Hashes, digests, and MACs -# -CONFIG_CRYPTO_BLAKE2B=m -CONFIG_CRYPTO_CMAC=m -CONFIG_CRYPTO_GHASH=y -CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_MD4=m -CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_POLY1305=m -CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_SHA1=y -CONFIG_CRYPTO_SHA256=y -CONFIG_CRYPTO_SHA512=y -CONFIG_CRYPTO_SHA3=y -CONFIG_CRYPTO_SM3=y -CONFIG_CRYPTO_SM3_GENERIC=y -# CONFIG_CRYPTO_STREEBOG is not set -CONFIG_CRYPTO_VMAC=m -CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_XXHASH=m -# end of Hashes, digests, and MACs - -# -# CRCs (cyclic redundancy checks) -# -CONFIG_CRYPTO_CRC32C=y -CONFIG_CRYPTO_CRC32=m -CONFIG_CRYPTO_CRCT10DIF=y -CONFIG_CRYPTO_CRC64_ROCKSOFT=m -# end of CRCs (cyclic redundancy checks) - -# -# Compression -# -CONFIG_CRYPTO_DEFLATE=y -CONFIG_CRYPTO_LZO=y -# CONFIG_CRYPTO_842 is not set -CONFIG_CRYPTO_LZ4=m -CONFIG_CRYPTO_LZ4HC=m -CONFIG_CRYPTO_ZSTD=m -# end of Compression - -# -# Random number generation -# -CONFIG_CRYPTO_ANSI_CPRNG=m -CONFIG_CRYPTO_DRBG_MENU=y -CONFIG_CRYPTO_DRBG_HMAC=y -CONFIG_CRYPTO_DRBG_HASH=y -CONFIG_CRYPTO_DRBG_CTR=y -CONFIG_CRYPTO_DRBG=y -CONFIG_CRYPTO_JITTERENTROPY=y -# CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE is not set -# end of Random number generation - -# -# Userspace interface -# -CONFIG_CRYPTO_USER_API=y -CONFIG_CRYPTO_USER_API_HASH=y -CONFIG_CRYPTO_USER_API_SKCIPHER=y -CONFIG_CRYPTO_USER_API_RNG=y -# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set -CONFIG_CRYPTO_USER_API_AEAD=y -CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y -# CONFIG_CRYPTO_STATS is not set -# end of Userspace interface - -CONFIG_CRYPTO_HASH_INFO=y - -# -# Accelerated Cryptographic Algorithms for CPU (x86) -# -CONFIG_CRYPTO_CURVE25519_X86=m -CONFIG_CRYPTO_AES_NI_INTEL=y -CONFIG_CRYPTO_BLOWFISH_X86_64=m -CONFIG_CRYPTO_CAMELLIA_X86_64=m -CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=m -CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m -CONFIG_CRYPTO_CAST5_AVX_X86_64=m -CONFIG_CRYPTO_CAST6_AVX_X86_64=m -CONFIG_CRYPTO_DES3_EDE_X86_64=m -CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m -CONFIG_CRYPTO_SERPENT_AVX_X86_64=m -CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m -CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64=m -CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64=m -CONFIG_CRYPTO_SM4_ZHAOXIN_GMI=m -CONFIG_CRYPTO_TWOFISH_X86_64=m -CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m -CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m -# CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64 is not set -# CONFIG_CRYPTO_ARIA_AESNI_AVX2_X86_64 is not set -# CONFIG_CRYPTO_ARIA_GFNI_AVX512_X86_64 is not set -CONFIG_CRYPTO_CHACHA20_X86_64=m -# CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 is not set -# CONFIG_CRYPTO_NHPOLY1305_SSE2 is not set -# CONFIG_CRYPTO_NHPOLY1305_AVX2 is not set -CONFIG_CRYPTO_BLAKE2S_X86=y -# CONFIG_CRYPTO_POLYVAL_CLMUL_NI is not set -CONFIG_CRYPTO_POLY1305_X86_64=m -CONFIG_CRYPTO_SHA1_SSSE3=y -CONFIG_CRYPTO_SHA256_SSSE3=y -CONFIG_CRYPTO_SHA512_SSSE3=y -CONFIG_CRYPTO_SM3_AVX_X86_64=m -CONFIG_CRYPTO_SM3_ZHAOXIN_GMI=m -CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m -CONFIG_CRYPTO_CRC32C_INTEL=m -CONFIG_CRYPTO_CRC32_PCLMUL=m -CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m -CONFIG_CRYPTO_SM2_ZHAOXIN_GMI=m -# end of Accelerated Cryptographic Algorithms for CPU (x86) - -CONFIG_CRYPTO_HW=y -CONFIG_CRYPTO_DEV_PADLOCK=m -CONFIG_CRYPTO_DEV_PADLOCK_AES=m -CONFIG_CRYPTO_DEV_PADLOCK_SHA=m -CONFIG_CRYPTO_DEV_ZHAOXIN=m -CONFIG_CRYPTO_DEV_ZHAOXIN_AES=m -CONFIG_CRYPTO_DEV_ZHAOXIN_SHA=m -# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set -# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set -CONFIG_CRYPTO_DEV_CCP=y -CONFIG_CRYPTO_DEV_CCP_DD=m -CONFIG_CRYPTO_DEV_SP_CCP=y -CONFIG_CRYPTO_DEV_CCP_CRYPTO=m -CONFIG_CRYPTO_DEV_SP_PSP=y -CONFIG_HYGON_GM=y -CONFIG_HYGON_PSP2CPU_CMD=y -CONFIG_TDM_DEV_HYGON=y -CONFIG_TDM_KERNEL_GUARD=m -CONFIG_CRYPTO_DEV_HCT=m -# CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set -CONFIG_CRYPTO_DEV_NITROX=m -CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m -CONFIG_CRYPTO_DEV_QAT=m -CONFIG_CRYPTO_DEV_QAT_DH895xCC=m -CONFIG_CRYPTO_DEV_QAT_C3XXX=m -CONFIG_CRYPTO_DEV_QAT_C62X=m -CONFIG_CRYPTO_DEV_QAT_4XXX=m -# CONFIG_CRYPTO_DEV_QAT_420XX is not set -CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m -CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m -CONFIG_CRYPTO_DEV_QAT_C62XVF=m -# CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION is not set -CONFIG_CRYPTO_DEV_IAA_CRYPTO=m -CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS=y -CONFIG_CRYPTO_DEV_CHELSIO=m -# CONFIG_CRYPTO_DEV_VIRTIO is not set -# CONFIG_CRYPTO_DEV_SAFEXCEL is not set -# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set -CONFIG_CRYPTO_DEV_TSSE=m -CONFIG_ASYMMETRIC_KEY_TYPE=y -CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y -CONFIG_X509_CERTIFICATE_PARSER=y -# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set -CONFIG_PKCS7_MESSAGE_PARSER=y -# CONFIG_PKCS7_TEST_KEY is not set -CONFIG_SIGNED_PE_FILE_VERIFICATION=y -# CONFIG_FIPS_SIGNATURE_SELFTEST is not set - -# -# Certificates for signature checking -# -CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" -CONFIG_MODULE_SIG_KEY_TYPE_RSA=y -# CONFIG_MODULE_SIG_KEY_TYPE_ECDSA is not set -CONFIG_SYSTEM_TRUSTED_KEYRING=y -CONFIG_SYSTEM_TRUSTED_KEYS="" -CONFIG_SYSTEM_EXTRA_CERTIFICATE=y -CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE=8192 -CONFIG_SECONDARY_TRUSTED_KEYRING=y -CONFIG_SYSTEM_BLACKLIST_KEYRING=y -CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" -# CONFIG_SYSTEM_REVOCATION_LIST is not set -# CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE is not set -# end of Certificates for signature checking - -CONFIG_BINARY_PRINTF=y - -# -# Library routines -# -CONFIG_RAID6_PQ=m -CONFIG_RAID6_PQ_BENCHMARK=y -# CONFIG_PACKING is not set -CONFIG_BITREVERSE=y -CONFIG_GENERIC_STRNCPY_FROM_USER=y -CONFIG_GENERIC_STRNLEN_USER=y -CONFIG_GENERIC_NET_UTILS=y -CONFIG_CORDIC=m -# CONFIG_PRIME_NUMBERS is not set -CONFIG_RATIONAL=y -CONFIG_GENERIC_PCI_IOMAP=y -CONFIG_GENERIC_IOMAP=y -CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y -CONFIG_ARCH_HAS_FAST_MULTIPLIER=y -CONFIG_ARCH_USE_SYM_ANNOTATIONS=y - -# -# Crypto library routines -# -CONFIG_CRYPTO_LIB_UTILS=y -CONFIG_CRYPTO_LIB_AES=y -CONFIG_CRYPTO_LIB_ARC4=m -CONFIG_CRYPTO_LIB_GF128MUL=y -CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S=y -CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y -CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA=m -CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m -CONFIG_CRYPTO_LIB_CHACHA=m -CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519=m -CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m -CONFIG_CRYPTO_LIB_CURVE25519=m -CONFIG_CRYPTO_LIB_DES=m -CONFIG_CRYPTO_LIB_POLY1305_RSIZE=11 -CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305=m -CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m -CONFIG_CRYPTO_LIB_POLY1305=m -CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m -CONFIG_CRYPTO_LIB_SHA1=y -CONFIG_CRYPTO_LIB_SHA256=y -# end of Crypto library routines - -CONFIG_CRC_CCITT=y -CONFIG_CRC16=y -CONFIG_CRC_T10DIF=y -CONFIG_CRC64_ROCKSOFT=m -CONFIG_CRC_ITU_T=m -CONFIG_CRC32=y -# CONFIG_CRC32_SELFTEST is not set -CONFIG_CRC32_SLICEBY8=y -# CONFIG_CRC32_SLICEBY4 is not set -# CONFIG_CRC32_SARWATE is not set -# CONFIG_CRC32_BIT is not set -CONFIG_CRC64=m -# CONFIG_CRC4 is not set -CONFIG_CRC7=m -CONFIG_LIBCRC32C=m -CONFIG_CRC8=m -CONFIG_XXHASH=y -# CONFIG_RANDOM32_SELFTEST is not set -CONFIG_ZLIB_INFLATE=y -CONFIG_ZLIB_DEFLATE=y -CONFIG_LZO_COMPRESS=y -CONFIG_LZO_DECOMPRESS=y -CONFIG_LZ4_COMPRESS=m -CONFIG_LZ4HC_COMPRESS=m -CONFIG_LZ4_DECOMPRESS=y -CONFIG_ZSTD_COMMON=y -CONFIG_ZSTD_COMPRESS=m -CONFIG_ZSTD_DECOMPRESS=y -CONFIG_XZ_DEC=y -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y -CONFIG_XZ_DEC_MICROLZMA=y -CONFIG_XZ_DEC_BCJ=y -# CONFIG_XZ_DEC_TEST is not set -CONFIG_DECOMPRESS_GZIP=y -CONFIG_DECOMPRESS_BZIP2=y -CONFIG_DECOMPRESS_LZMA=y -CONFIG_DECOMPRESS_XZ=y -CONFIG_DECOMPRESS_LZO=y -CONFIG_DECOMPRESS_LZ4=y -CONFIG_DECOMPRESS_ZSTD=y -CONFIG_GENERIC_ALLOCATOR=y -CONFIG_REED_SOLOMON=y -CONFIG_REED_SOLOMON_ENC8=y -CONFIG_REED_SOLOMON_DEC8=y -CONFIG_TEXTSEARCH=y -CONFIG_TEXTSEARCH_KMP=m -CONFIG_TEXTSEARCH_BM=m -CONFIG_TEXTSEARCH_FSM=m -CONFIG_BTREE=y -CONFIG_INTERVAL_TREE=y -CONFIG_XARRAY_MULTI=y -CONFIG_ASSOCIATIVE_ARRAY=y -CONFIG_HAS_IOMEM=y -CONFIG_HAS_IOPORT=y -CONFIG_HAS_IOPORT_MAP=y -CONFIG_HAS_DMA=y -CONFIG_DMA_OPS=y -CONFIG_NEED_SG_DMA_FLAGS=y -CONFIG_NEED_SG_DMA_LENGTH=y -CONFIG_NEED_DMA_MAP_STATE=y -CONFIG_ARCH_DMA_ADDR_T_64BIT=y -CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED=y -CONFIG_SWIOTLB=y -# CONFIG_SWIOTLB_DYNAMIC is not set -CONFIG_DMA_COHERENT_POOL=y -CONFIG_DMA_CMA=y -# CONFIG_DMA_NUMA_CMA is not set - -# -# Default contiguous memory area size: -# -CONFIG_CMA_SIZE_MBYTES=0 -CONFIG_CMA_SIZE_SEL_MBYTES=y -# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set -# CONFIG_CMA_SIZE_SEL_MIN is not set -# CONFIG_CMA_SIZE_SEL_MAX is not set -CONFIG_CMA_ALIGNMENT=8 -# CONFIG_DMA_API_DEBUG is not set -# CONFIG_DMA_MAP_BENCHMARK is not set -CONFIG_SGL_ALLOC=y -CONFIG_CHECK_SIGNATURE=y -CONFIG_CPUMASK_OFFSTACK=y -CONFIG_CPU_RMAP=y -CONFIG_DQL=y -CONFIG_GLOB=y -# CONFIG_GLOB_SELFTEST is not set -CONFIG_NLATTR=y -CONFIG_CLZ_TAB=y -CONFIG_IRQ_POLL=y -CONFIG_MPILIB=y -CONFIG_SIGNATURE=y -CONFIG_DIMLIB=y -CONFIG_OID_REGISTRY=y -CONFIG_UCS2_STRING=y -CONFIG_HAVE_GENERIC_VDSO=y -CONFIG_GENERIC_GETTIMEOFDAY=y -CONFIG_GENERIC_VDSO_TIME_NS=y -CONFIG_FONT_SUPPORT=y -# CONFIG_FONTS is not set -CONFIG_FONT_8x8=y -CONFIG_FONT_8x16=y -CONFIG_SG_POOL=y -CONFIG_ARCH_HAS_PMEM_API=y -CONFIG_MEMREGION=y -CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION=y -CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y -CONFIG_ARCH_HAS_COPY_MC=y -CONFIG_ARCH_STACKWALK=y -CONFIG_STACKDEPOT=y -CONFIG_SBITMAP=y -CONFIG_PARMAN=m -CONFIG_OBJAGG=m -# end of Library routines - -CONFIG_PLDMFW=y -CONFIG_ASN1_ENCODER=y - -# -# Kernel hacking -# - -# -# printk and dmesg options -# -CONFIG_PRINTK_TIME=y -# CONFIG_PRINTK_CALLER is not set -# CONFIG_STACKTRACE_BUILD_ID is not set -CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 -CONFIG_CONSOLE_LOGLEVEL_QUIET=4 -CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 -CONFIG_BOOT_PRINTK_DELAY=y -CONFIG_DYNAMIC_DEBUG=y -CONFIG_DYNAMIC_DEBUG_CORE=y -CONFIG_SYMBOLIC_ERRNAME=y -CONFIG_DEBUG_BUGVERBOSE=y -# end of printk and dmesg options - -CONFIG_DEBUG_KERNEL=y -CONFIG_DEBUG_MISC=y - -# -# Compile-time checks and compiler options -# -CONFIG_DEBUG_INFO=y -CONFIG_AS_HAS_NON_CONST_LEB128=y -# CONFIG_DEBUG_INFO_NONE is not set -CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y -# CONFIG_DEBUG_INFO_DWARF4 is not set -# CONFIG_DEBUG_INFO_DWARF5 is not set -# CONFIG_DEBUG_INFO_REDUCED is not set -CONFIG_DEBUG_INFO_COMPRESSED_NONE=y -# CONFIG_DEBUG_INFO_COMPRESSED_ZLIB is not set -# CONFIG_DEBUG_INFO_COMPRESSED_ZSTD is not set -# CONFIG_DEBUG_INFO_SPLIT is not set -CONFIG_DEBUG_INFO_BTF=y -# CONFIG_GDB_SCRIPTS is not set -CONFIG_FRAME_WARN=2048 -CONFIG_STRIP_ASM_SYMS=y -# CONFIG_READABLE_ASM is not set -# CONFIG_HEADERS_INSTALL is not set -CONFIG_DEBUG_SECTION_MISMATCH=y -CONFIG_SECTION_MISMATCH_WARN_ONLY=y -CONFIG_OBJTOOL=y -# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set -# end of Compile-time checks and compiler options - -# -# Generic Kernel Debugging Instruments -# -CONFIG_MAGIC_SYSRQ=y -CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 -CONFIG_MAGIC_SYSRQ_SERIAL=y -CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" -CONFIG_DEBUG_FS=y -CONFIG_DEBUG_FS_ALLOW_ALL=y -# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set -# CONFIG_DEBUG_FS_ALLOW_NONE is not set -CONFIG_HAVE_ARCH_KGDB=y -CONFIG_KGDB=y -CONFIG_KGDB_HONOUR_BLOCKLIST=y -CONFIG_KGDB_SERIAL_CONSOLE=y -CONFIG_KGDB_TESTS=y -# CONFIG_KGDB_TESTS_ON_BOOT is not set -CONFIG_KGDB_LOW_LEVEL_TRAP=y -CONFIG_KGDB_KDB=y -CONFIG_KDB_DEFAULT_ENABLE=0x1 -CONFIG_KDB_KEYBOARD=y -CONFIG_KDB_CONTINUE_CATASTROPHIC=0 -CONFIG_ARCH_HAS_EARLY_DEBUG=y -CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y -# CONFIG_UBSAN is not set -CONFIG_HAVE_ARCH_KCSAN=y -CONFIG_HAVE_KCSAN_COMPILER=y -# CONFIG_KCSAN is not set -# end of Generic Kernel Debugging Instruments - -# -# Networking Debugging -# -# CONFIG_NET_DEV_REFCNT_TRACKER is not set -# CONFIG_NET_NS_REFCNT_TRACKER is not set -# CONFIG_DEBUG_NET is not set -# end of Networking Debugging - -# -# Memory Debugging -# -# CONFIG_PAGE_EXTENSION is not set -# CONFIG_DEBUG_PAGEALLOC is not set -CONFIG_SLUB_DEBUG=y -# CONFIG_SLUB_DEBUG_ON is not set -# CONFIG_PAGE_OWNER is not set -# CONFIG_PAGE_TABLE_CHECK is not set -# CONFIG_PAGE_POISONING is not set -# CONFIG_DEBUG_PAGE_REF is not set -# CONFIG_DEBUG_RODATA_TEST is not set -CONFIG_ARCH_HAS_DEBUG_WX=y -# CONFIG_DEBUG_WX is not set -CONFIG_GENERIC_PTDUMP=y -# CONFIG_PTDUMP_DEBUGFS is not set -CONFIG_HAVE_DEBUG_KMEMLEAK=y -# CONFIG_DEBUG_KMEMLEAK is not set -# CONFIG_PER_VMA_LOCK_STATS is not set -# CONFIG_DEBUG_OBJECTS is not set -# CONFIG_SHRINKER_DEBUG is not set -# CONFIG_DEBUG_STACK_USAGE is not set -# CONFIG_SCHED_STACK_END_CHECK is not set -CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y -# CONFIG_DEBUG_VM is not set -# CONFIG_DEBUG_VM_PGTABLE is not set -CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y -# CONFIG_DEBUG_VIRTUAL is not set -CONFIG_DEBUG_MEMORY_INIT=y -# CONFIG_DEBUG_PER_CPU_MAPS is not set -CONFIG_ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP=y -# CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP is not set -CONFIG_HAVE_ARCH_KASAN=y -CONFIG_HAVE_ARCH_KASAN_VMALLOC=y -CONFIG_CC_HAS_KASAN_GENERIC=y -CONFIG_CC_HAS_KASAN_SW_TAGS=y -CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y -# CONFIG_KASAN is not set -CONFIG_HAVE_ARCH_KFENCE=y -CONFIG_KFENCE=y -CONFIG_KFENCE_SAMPLE_INTERVAL=0 -CONFIG_KFENCE_NUM_OBJECTS=255 -CONFIG_KFENCE_DEFERRABLE=y -CONFIG_KFENCE_STRESS_TEST_FAULTS=0 -CONFIG_HAVE_ARCH_KMSAN=y -# end of Memory Debugging - -CONFIG_DEBUG_SHIRQ=y - -# -# Debug Oops, Lockups and Hangs -# -CONFIG_PANIC_ON_OOPS=y -CONFIG_PANIC_ON_OOPS_VALUE=1 -CONFIG_PANIC_TIMEOUT=1 -CONFIG_LOCKUP_DETECTOR=y -CONFIG_SOFTLOCKUP_DETECTOR=y -# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set -CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y -# CONFIG_SDEI_WATCHDOG is not set -CONFIG_HARDLOCKUP_DETECTOR=y -# CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY is not set -CONFIG_HARDLOCKUP_DETECTOR_PERF=y -# CONFIG_HARDLOCKUP_DETECTOR_BUDDY is not set -# CONFIG_HARDLOCKUP_DETECTOR_ARCH is not set -CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER=y -CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y -CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y -CONFIG_DETECT_HUNG_TASK=y -CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 -# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set -# CONFIG_WQ_WATCHDOG is not set -# CONFIG_WQ_CPU_INTENSIVE_REPORT is not set -# CONFIG_TEST_LOCKUP is not set -# end of Debug Oops, Lockups and Hangs - -# -# Scheduler Debugging -# -CONFIG_SCHED_DEBUG=y -CONFIG_SCHED_INFO=y -CONFIG_SCHEDSTATS=y -CONFIG_SCHED_ACPU=y -# end of Scheduler Debugging - -# CONFIG_DEBUG_TIMEKEEPING is not set -# CONFIG_DEBUG_PREEMPT is not set - -# -# Lock Debugging (spinlocks, mutexes, etc...) -# -CONFIG_LOCK_DEBUGGING_SUPPORT=y -# CONFIG_PROVE_LOCKING is not set -# CONFIG_LOCK_STAT is not set -# CONFIG_DEBUG_RT_MUTEXES is not set -# CONFIG_DEBUG_SPINLOCK is not set -# CONFIG_DEBUG_MUTEXES is not set -# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set -# CONFIG_DEBUG_RWSEMS is not set -# CONFIG_DEBUG_LOCK_ALLOC is not set -# CONFIG_DEBUG_ATOMIC_SLEEP is not set -# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set -# CONFIG_LOCK_TORTURE_TEST is not set -# CONFIG_WW_MUTEX_SELFTEST is not set -# CONFIG_SCF_TORTURE_TEST is not set -# CONFIG_CSD_LOCK_WAIT_DEBUG is not set -# end of Lock Debugging (spinlocks, mutexes, etc...) - -# CONFIG_NMI_CHECK_CPU is not set -# CONFIG_DEBUG_IRQFLAGS is not set -CONFIG_STACKTRACE=y -# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set -# CONFIG_DEBUG_KOBJECT is not set - -# -# Debug kernel data structures -# -CONFIG_DEBUG_LIST=y -# CONFIG_DEBUG_PLIST is not set -# CONFIG_DEBUG_SG is not set -# CONFIG_DEBUG_NOTIFIERS is not set -# CONFIG_DEBUG_MAPLE_TREE is not set -# end of Debug kernel data structures - -# -# RCU Debugging -# -# CONFIG_RCU_SCALE_TEST is not set -# CONFIG_RCU_TORTURE_TEST is not set -# CONFIG_RCU_REF_SCALE_TEST is not set -CONFIG_RCU_CPU_STALL_TIMEOUT=60 -CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 -# CONFIG_RCU_CPU_STALL_CPUTIME is not set -# CONFIG_RCU_TRACE is not set -# CONFIG_RCU_EQS_DEBUG is not set -# end of RCU Debugging - -# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set -# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set -# CONFIG_LATENCYTOP is not set -# CONFIG_DEBUG_CGROUP_REF is not set -CONFIG_USER_STACKTRACE_SUPPORT=y -CONFIG_NOP_TRACER=y -CONFIG_HAVE_RETHOOK=y -CONFIG_RETHOOK=y -CONFIG_HAVE_FUNCTION_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y -CONFIG_HAVE_DYNAMIC_FTRACE=y -CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y -CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y -CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y -CONFIG_HAVE_DYNAMIC_FTRACE_NO_PATCHABLE=y -CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y -CONFIG_HAVE_SYSCALL_TRACEPOINTS=y -CONFIG_HAVE_FENTRY=y -CONFIG_HAVE_OBJTOOL_MCOUNT=y -CONFIG_HAVE_OBJTOOL_NOP_MCOUNT=y -CONFIG_HAVE_C_RECORDMCOUNT=y -CONFIG_HAVE_BUILDTIME_MCOUNT_SORT=y -CONFIG_BUILDTIME_MCOUNT_SORT=y -CONFIG_TRACER_MAX_TRACE=y -CONFIG_TRACE_CLOCK=y -CONFIG_RING_BUFFER=y -CONFIG_EVENT_TRACING=y -CONFIG_CONTEXT_SWITCH_TRACER=y -CONFIG_TRACING=y -CONFIG_GENERIC_TRACER=y -CONFIG_TRACING_SUPPORT=y -CONFIG_FTRACE=y -# CONFIG_BOOTTIME_TRACING is not set -CONFIG_FUNCTION_TRACER=y -CONFIG_FUNCTION_GRAPH_TRACER=y -# CONFIG_FUNCTION_GRAPH_RETVAL is not set -CONFIG_DYNAMIC_FTRACE=y -CONFIG_DYNAMIC_FTRACE_WITH_REGS=y -CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y -CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y -# CONFIG_FPROBE is not set -CONFIG_FUNCTION_PROFILER=y -CONFIG_STACK_TRACER=y -# CONFIG_IRQSOFF_TRACER is not set -# CONFIG_PREEMPT_TRACER is not set -CONFIG_SCHED_TRACER=y -CONFIG_HWLAT_TRACER=y -CONFIG_OSNOISE_TRACER=y -CONFIG_TIMERLAT_TRACER=y -# CONFIG_MMIOTRACE is not set -CONFIG_FTRACE_SYSCALLS=y -CONFIG_TRACER_SNAPSHOT=y -# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set -CONFIG_BRANCH_PROFILE_NONE=y -# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set -CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_PROBE_EVENTS_BTF_ARGS=y -CONFIG_KPROBE_EVENTS=y -# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set -CONFIG_UPROBE_EVENTS=y -CONFIG_BPF_EVENTS=y -CONFIG_DYNAMIC_EVENTS=y -CONFIG_PROBE_EVENTS=y -# CONFIG_BPF_KPROBE_OVERRIDE is not set -CONFIG_FTRACE_MCOUNT_RECORD=y -CONFIG_FTRACE_MCOUNT_USE_CC=y -CONFIG_TRACING_MAP=y -CONFIG_SYNTH_EVENTS=y -# CONFIG_USER_EVENTS is not set -CONFIG_HIST_TRIGGERS=y -# CONFIG_TRACE_EVENT_INJECT is not set -# CONFIG_TRACEPOINT_BENCHMARK is not set -CONFIG_RING_BUFFER_BENCHMARK=m -# CONFIG_TRACE_EVAL_MAP_FILE is not set -# CONFIG_FTRACE_RECORD_RECURSION is not set -# CONFIG_FTRACE_STARTUP_TEST is not set -# CONFIG_FTRACE_SORT_STARTUP_TEST is not set -# CONFIG_RING_BUFFER_STARTUP_TEST is not set -# CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS is not set -# CONFIG_PREEMPTIRQ_DELAY_TEST is not set -# CONFIG_SYNTH_EVENT_GEN_TEST is not set -# CONFIG_KPROBE_EVENT_GEN_TEST is not set -# CONFIG_HIST_TRIGGERS_DEBUG is not set -# CONFIG_RV is not set -CONFIG_PROVIDE_OHCI1394_DMA_INIT=y -# CONFIG_SAMPLES is not set -CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y -CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y -CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y -CONFIG_STRICT_DEVMEM=y -# CONFIG_IO_STRICT_DEVMEM is not set - -# -# x86 Debugging -# -CONFIG_EARLY_PRINTK_USB=y -# CONFIG_X86_VERBOSE_BOOTUP is not set -CONFIG_EARLY_PRINTK=y -CONFIG_EARLY_PRINTK_DBGP=y -CONFIG_EARLY_PRINTK_USB_XDBC=y -# CONFIG_EFI_PGT_DUMP is not set -# CONFIG_DEBUG_TLBFLUSH is not set -CONFIG_HAVE_MMIOTRACE_SUPPORT=y -CONFIG_X86_DECODER_SELFTEST=y -CONFIG_IO_DELAY_0X80=y -# CONFIG_IO_DELAY_0XED is not set -# CONFIG_IO_DELAY_UDELAY is not set -# CONFIG_IO_DELAY_NONE is not set -CONFIG_DEBUG_BOOT_PARAMS=y -# CONFIG_CPA_DEBUG is not set -# CONFIG_DEBUG_ENTRY is not set -# CONFIG_DEBUG_NMI_SELFTEST is not set -# CONFIG_X86_DEBUG_FPU is not set -# CONFIG_PUNIT_ATOM_DEBUG is not set -CONFIG_UNWINDER_ORC=y -# CONFIG_UNWINDER_FRAME_POINTER is not set -# end of x86 Debugging - -# -# Kernel Testing and Coverage -# -# CONFIG_KUNIT is not set -# CONFIG_NOTIFIER_ERROR_INJECTION is not set -CONFIG_FUNCTION_ERROR_INJECTION=y -# CONFIG_FAULT_INJECTION is not set -CONFIG_ARCH_HAS_KCOV=y -CONFIG_CC_HAS_SANCOV_TRACE_PC=y -# CONFIG_KCOV is not set -CONFIG_RUNTIME_TESTING_MENU=y -# CONFIG_TEST_DHRY is not set -# CONFIG_LKDTM is not set -# CONFIG_TEST_MIN_HEAP is not set -# CONFIG_TEST_DIV64 is not set -# CONFIG_BACKTRACE_SELF_TEST is not set -# CONFIG_TEST_REF_TRACKER is not set -# CONFIG_RBTREE_TEST is not set -# CONFIG_REED_SOLOMON_TEST is not set -# CONFIG_INTERVAL_TREE_TEST is not set -# CONFIG_PERCPU_TEST is not set -CONFIG_ATOMIC64_SELFTEST=y -CONFIG_ASYNC_RAID6_TEST=m -# CONFIG_TEST_HEXDUMP is not set -# CONFIG_STRING_SELFTEST is not set -# CONFIG_TEST_STRING_HELPERS is not set -CONFIG_TEST_KSTRTOX=y -# CONFIG_TEST_PRINTF is not set -# CONFIG_TEST_SCANF is not set -# CONFIG_TEST_BITMAP is not set -# CONFIG_TEST_UUID is not set -# CONFIG_TEST_XARRAY is not set -# CONFIG_TEST_MAPLE_TREE is not set -# CONFIG_TEST_RHASHTABLE is not set -# CONFIG_TEST_IDA is not set -# CONFIG_TEST_PARMAN is not set -# CONFIG_TEST_LKM is not set -# CONFIG_TEST_BITOPS is not set -# CONFIG_TEST_VMALLOC is not set -# CONFIG_TEST_USER_COPY is not set -CONFIG_TEST_BPF=m -# CONFIG_TEST_BLACKHOLE_DEV is not set -# CONFIG_FIND_BIT_BENCHMARK is not set -# CONFIG_TEST_FIRMWARE is not set -# CONFIG_TEST_SYSCTL is not set -# CONFIG_TEST_UDELAY is not set -# CONFIG_TEST_STATIC_KEYS is not set -# CONFIG_TEST_DYNAMIC_DEBUG is not set -# CONFIG_TEST_KMOD is not set -# CONFIG_TEST_MEMCAT_P is not set -CONFIG_TEST_LIVEPATCH=m -# CONFIG_TEST_OBJAGG is not set -# CONFIG_TEST_MEMINIT is not set -# CONFIG_TEST_HMM is not set -# CONFIG_TEST_FREE_PAGES is not set -# CONFIG_TEST_FPU is not set -# CONFIG_TEST_CLOCKSOURCE_WATCHDOG is not set -CONFIG_ARCH_USE_MEMTEST=y -# CONFIG_MEMTEST is not set -# CONFIG_HYPERV_TESTING is not set -# end of Kernel Testing and Coverage - -# -# Rust hacking -# -# end of Rust hacking -# end of Kernel hacking -- Gitee From 3dea9ce45d94fd5defcce632a653bcb67a7fb0be Mon Sep 17 00:00:00 2001 From: Guixin Liu Date: Fri, 23 Aug 2024 11:12:28 +0800 Subject: [PATCH 1187/2138] anolis: config: open hyperv on arm64 ANBZ: #9806 Open hyperv and pci_hyperv kconfig on arm64 by default. Signed-off-by: Guixin Liu Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/3759 --- anolis/configs/L1-RECOMMEND/arm64/CONFIG_FB_HYPERV | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV | 1 - anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV_BALLOON | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV_UTILS | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV_VSOCKETS | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_UIO_HV_GENERIC | 1 + anolis/configs/L1-RECOMMEND/{x86 => default}/CONFIG_HYPERV | 0 .../configs/L1-RECOMMEND/{x86 => default}/CONFIG_HYPERV_KEYBOARD | 0 .../configs/L1-RECOMMEND/{x86 => default}/CONFIG_HYPERV_STORAGE | 0 anolis/configs/L1-RECOMMEND/{x86 => default}/CONFIG_PCI_HYPERV | 0 .../L1-RECOMMEND/{x86 => default}/CONFIG_PCI_HYPERV_INTERFACE | 0 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_HYPERV_MOUSE | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_HYPERV_NET | 1 + anolis/configs/L2-OPTIONAL/{x86 => default}/CONFIG_DRM_HYPERV | 0 .../configs/L2-OPTIONAL/{x86 => default}/CONFIG_HYPERV_TESTING | 0 15 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_FB_HYPERV delete mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV_BALLOON create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV_UTILS create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV_VSOCKETS create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_UIO_HV_GENERIC rename anolis/configs/L1-RECOMMEND/{x86 => default}/CONFIG_HYPERV (100%) rename anolis/configs/L1-RECOMMEND/{x86 => default}/CONFIG_HYPERV_KEYBOARD (100%) rename anolis/configs/L1-RECOMMEND/{x86 => default}/CONFIG_HYPERV_STORAGE (100%) rename anolis/configs/L1-RECOMMEND/{x86 => default}/CONFIG_PCI_HYPERV (100%) rename anolis/configs/L1-RECOMMEND/{x86 => default}/CONFIG_PCI_HYPERV_INTERFACE (100%) create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_HYPERV_MOUSE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HYPERV_NET rename anolis/configs/L2-OPTIONAL/{x86 => default}/CONFIG_DRM_HYPERV (100%) rename anolis/configs/L2-OPTIONAL/{x86 => default}/CONFIG_HYPERV_TESTING (100%) diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FB_HYPERV b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FB_HYPERV new file mode 100644 index 000000000000..5f8bd917319b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FB_HYPERV @@ -0,0 +1 @@ +# CONFIG_FB_HYPERV is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV deleted file mode 100644 index 94db0ce5eef1..000000000000 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_HYPERV is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV_BALLOON b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV_BALLOON new file mode 100644 index 000000000000..c2575260ff60 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV_BALLOON @@ -0,0 +1 @@ +# CONFIG_HYPERV_BALLOON is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV_UTILS b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV_UTILS new file mode 100644 index 000000000000..fc3232dbe556 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV_UTILS @@ -0,0 +1 @@ +# CONFIG_HYPERV_UTILS is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV_VSOCKETS b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV_VSOCKETS new file mode 100644 index 000000000000..ad138e65d226 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HYPERV_VSOCKETS @@ -0,0 +1 @@ +# CONFIG_HYPERV_VSOCKETS is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UIO_HV_GENERIC b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UIO_HV_GENERIC new file mode 100644 index 000000000000..ba62e18258ee --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UIO_HV_GENERIC @@ -0,0 +1 @@ +# CONFIG_UIO_HV_GENERIC is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV b/anolis/configs/L1-RECOMMEND/default/CONFIG_HYPERV similarity index 100% rename from anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV rename to anolis/configs/L1-RECOMMEND/default/CONFIG_HYPERV diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_KEYBOARD b/anolis/configs/L1-RECOMMEND/default/CONFIG_HYPERV_KEYBOARD similarity index 100% rename from anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_KEYBOARD rename to anolis/configs/L1-RECOMMEND/default/CONFIG_HYPERV_KEYBOARD diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_STORAGE b/anolis/configs/L1-RECOMMEND/default/CONFIG_HYPERV_STORAGE similarity index 100% rename from anolis/configs/L1-RECOMMEND/x86/CONFIG_HYPERV_STORAGE rename to anolis/configs/L1-RECOMMEND/default/CONFIG_HYPERV_STORAGE diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PCI_HYPERV b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCI_HYPERV similarity index 100% rename from anolis/configs/L1-RECOMMEND/x86/CONFIG_PCI_HYPERV rename to anolis/configs/L1-RECOMMEND/default/CONFIG_PCI_HYPERV diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_PCI_HYPERV_INTERFACE b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCI_HYPERV_INTERFACE similarity index 100% rename from anolis/configs/L1-RECOMMEND/x86/CONFIG_PCI_HYPERV_INTERFACE rename to anolis/configs/L1-RECOMMEND/default/CONFIG_PCI_HYPERV_INTERFACE diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_HYPERV_MOUSE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_HYPERV_MOUSE new file mode 100644 index 000000000000..4207656134b8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HID_HYPERV_MOUSE @@ -0,0 +1 @@ +# CONFIG_HID_HYPERV_MOUSE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HYPERV_NET b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HYPERV_NET new file mode 100644 index 000000000000..4868361bd13d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HYPERV_NET @@ -0,0 +1 @@ +# CONFIG_HYPERV_NET is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_HYPERV b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_HYPERV similarity index 100% rename from anolis/configs/L2-OPTIONAL/x86/CONFIG_DRM_HYPERV rename to anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_HYPERV diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HYPERV_TESTING b/anolis/configs/L2-OPTIONAL/default/CONFIG_HYPERV_TESTING similarity index 100% rename from anolis/configs/L2-OPTIONAL/x86/CONFIG_HYPERV_TESTING rename to anolis/configs/L2-OPTIONAL/default/CONFIG_HYPERV_TESTING -- Gitee From e0847e5e54867c400fe1d96eb290915f8bafd12c Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Thu, 22 Aug 2024 19:04:00 +0800 Subject: [PATCH 1188/2138] anolis: configs: set CONFIG_DEV_DAX_{HMEM,KMEM} to m ANBZ: #9805 Set them to m to allow out-of-tree modules override them. Signed-off-by: Qiao Ma Reviewed-by: Teng Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/3758 --- .../configs/L1-RECOMMEND/{arm64 => default}/CONFIG_DEV_DAX_HMEM | 0 anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX_HMEM | 1 - anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX_KMEM | 2 +- 3 files changed, 1 insertion(+), 2 deletions(-) rename anolis/configs/L1-RECOMMEND/{arm64 => default}/CONFIG_DEV_DAX_HMEM (100%) delete mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX_HMEM diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEV_DAX_HMEM b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEV_DAX_HMEM similarity index 100% rename from anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEV_DAX_HMEM rename to anolis/configs/L1-RECOMMEND/default/CONFIG_DEV_DAX_HMEM diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX_HMEM b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX_HMEM deleted file mode 100644 index ebdd7cb1a555..000000000000 --- a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX_HMEM +++ /dev/null @@ -1 +0,0 @@ -CONFIG_DEV_DAX_HMEM=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX_KMEM b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX_KMEM index ab22bc2e6d92..e301a496b274 100644 --- a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX_KMEM +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEV_DAX_KMEM @@ -1 +1 @@ -CONFIG_DEV_DAX_KMEM=y +CONFIG_DEV_DAX_KMEM=m -- Gitee From 19f1d96ba74d7ce5c264b4d7b0d3017c37521e9e Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Thu, 22 Aug 2024 20:16:13 +0800 Subject: [PATCH 1189/2138] anolis: configs: add checking rule for CXL related configs ANBZ: #9805 It is important to make them to m, so add a checking rule to ensure they are m, to avoid unintentional errors and modifications. Signed-off-by: Qiao Ma Reviewed-by: Teng Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/3758 --- anolis/configs/examination/EXTRA/x86.config | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 anolis/configs/examination/EXTRA/x86.config diff --git a/anolis/configs/examination/EXTRA/x86.config b/anolis/configs/examination/EXTRA/x86.config new file mode 100644 index 000000000000..67d4de0705b3 --- /dev/null +++ b/anolis/configs/examination/EXTRA/x86.config @@ -0,0 +1,12 @@ +## These CXL & DEV_DAX* related kconfigs must be m to allow out-of-tree modules override them. +CONFIG_CXL_BUS=m +CONFIG_CXL_PCI=m +CONFIG_CXL_ACPI=m +CONFIG_CXL_PMEM=m +CONFIG_CXL_MEM=m +CONFIG_CXL_PORT=m +CONFIG_CXL_REGION=y +CONFIG_CXL_PMU=m +CONFIG_DEV_DAX_CXL=m +CONFIG_DEV_DAX_HMEM=m +CONFIG_DEV_DAX_KMEM=m \ No newline at end of file -- Gitee From 07d40c661da89acc3229bf5f7e18fa406c782455 Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Thu, 22 Aug 2024 20:17:37 +0800 Subject: [PATCH 1190/2138] anolis: configs: teach `make dist-configs-check` check extra config rules. ANBZ: #9805 Teach command `make dist-configs-check` to check to new checking rule located in anolis/configs/examination/EXTRA/ directory. Signed-off-by: Qiao Ma Reviewed-by: Teng Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/3758 --- anolis/configs/Makefile | 2 +- anolis/configs/examination/configs-check.sh | 24 +++++++++++++++------ 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/anolis/configs/Makefile b/anolis/configs/Makefile index 797fdec6a21a..a6af2befecd4 100644 --- a/anolis/configs/Makefile +++ b/anolis/configs/Makefile @@ -25,7 +25,7 @@ dist-configs-import: dist-configs-export: @sh scripts/export_configs.sh -dist-configs-check: +dist-configs-check: dist-configs @sh examination/configs-check.sh dist-configs-modify: diff --git a/anolis/configs/examination/configs-check.sh b/anolis/configs/examination/configs-check.sh index fff2579be94a..468b8a00aa27 100644 --- a/anolis/configs/examination/configs-check.sh +++ b/anolis/configs/examination/configs-check.sh @@ -15,13 +15,25 @@ final_exit_status=0 function check_arch() { local arch=$1 + + local opt="--rules ${SCRIPT_DIR}/L0-MANDATORY/${arch}.config + --level L0-MANDATORY + --rules ${SCRIPT_DIR}/L1-RECOMMEND/${arch}.config + --level L1-RECOMMEND " + + if [ -f ${SCRIPT_DIR}/EXTRA/${arch}.config ]; then + opt="${opt} --rules ${SCRIPT_DIR}/EXTRA/${arch}.config + --level L0-MANDATORY " + fi + + if [ -f ${SCRIPT_DIR}/../../../arch/${arch}/configs/anolis_defconfig ]; then + opt="${opt} ${SCRIPT_DIR}/../../../arch/${arch}/configs/anolis_defconfig" + else + opt="${opt} ${DIST_OUTPUT}/kernel-ANCK-generic-${arch}.config" + fi + echo "* Checking configs for arch: $arch" - python3 ${SCRIPT_DIR}/anolis_kconfig_check.py check \ - --rules ${SCRIPT_DIR}/L0-MANDATORY/${arch}.config \ - --level L0-MANDATORY \ - --rules ${SCRIPT_DIR}/L1-RECOMMEND/${arch}.config \ - --level L1-RECOMMEND \ - ${SCRIPT_DIR}/../../../arch/${arch}/configs/anolis_defconfig + python3 ${SCRIPT_DIR}/anolis_kconfig_check.py check ${opt} local ret=$? if [ $final_exit_status -eq 0 ]; then -- Gitee From 9fbd216affabec218cfe58f6047f0b1ec44aaf22 Mon Sep 17 00:00:00 2001 From: Qinyun Tan Date: Tue, 27 Aug 2024 14:01:12 +0800 Subject: [PATCH 1191/2138] anolis: configs: bugfix for gcov and 64k kconfig_layout ANBZ: #9673 From commit 4da35abc6540 ("anolis: configs: add 64k config") and commit a1695d013580 ("anolis: configs: add gcov configs"), there has a mistake about gcov and 64k kconfig layout which is these kconfig are not based on debug config. Just correct it. Fixes: 4da35abc6540 ("anolis: configs: add 64k config") Fixes: a1695d013580 ("anolis: configs: add gcov configs") Signed-off-by: Qinyun Tan Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/3775 --- anolis/configs/scripts/kconfig_layout | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/anolis/configs/scripts/kconfig_layout b/anolis/configs/scripts/kconfig_layout index 67772f224d11..a1933e7412ab 100644 --- a/anolis/configs/scripts/kconfig_layout +++ b/anolis/configs/scripts/kconfig_layout @@ -4,5 +4,5 @@ ANCK/debug-x86 x86 null generic/default;generic/x86;debug/default;debug/x86 ANCK/gcov-x86 x86 null generic/default;generic/x86;gcov/default ANCK/generic-arm64 arm64 null generic/default;generic/arm64 ANCK/debug-arm64 arm64 null generic/default;generic/arm64;debug/default;debug/arm64 -ANCK/gcov-arm64 arm64 null generic/default;generic/arm64;debug/default;debug/arm64;gcov/default -ANCK/arm64-64k arm64 null generic/default;generic/arm64;debug/default;debug/arm64;64k/arm64 +ANCK/gcov-arm64 arm64 null generic/default;generic/arm64;gcov/default +ANCK/arm64-64k arm64 null generic/default;generic/arm64;64k/arm64 -- Gitee From de4df3850a008c8149f3f9f10fdc8d6802f96d9a Mon Sep 17 00:00:00 2001 From: Guixin Liu Date: Mon, 26 Aug 2024 15:13:22 +0800 Subject: [PATCH 1192/2138] anolis: kabi: reserve some space for kABI ANBZ: #9814 Reserve some space for kABI. Signed-off-by: Guixin Liu Reviewed-by: Joseph Qi Reviewed-by: Tianchen Ding Reviewed-by: Yi Tao Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3764 --- arch/arm64/include/asm/kvm_host.h | 2 + arch/arm64/include/asm/probes.h | 4 + arch/x86/include/asm/kprobes.h | 3 + arch/x86/include/asm/kvm_host.h | 2 + arch/x86/include/asm/thread_info.h | 2 + block/blk-cgroup.h | 14 ++++ block/blk-mq.h | 4 + block/elevator.h | 10 +++ drivers/block/brd.c | 3 + drivers/pci/controller/dwc/pcie-designware.h | 6 ++ drivers/pci/pci.h | 9 +++ drivers/ptp/ptp_private.h | 5 ++ fs/eventfd.c | 3 + fs/mount.h | 3 + fs/ubifs/debug.h | 5 ++ fs/ubifs/ubifs.h | 12 +++ include/acpi/acpi_bus.h | 2 + include/drm/drm_atomic.h | 4 + include/drm/drm_client.h | 2 + include/drm/drm_connector.h | 24 ++++++ include/drm/drm_crtc.h | 4 + include/drm/drm_modeset_helper_vtables.h | 2 + include/drm/drm_modeset_lock.h | 2 + include/drm/drm_plane.h | 2 + include/drm/drm_writeback.h | 13 ++++ include/drm/ttm/ttm_resource.h | 3 + include/linux/backing-dev-defs.h | 11 +++ include/linux/binfmts.h | 3 + include/linux/bio.h | 10 +++ include/linux/blk-integrity.h | 3 + include/linux/blk-mq.h | 23 ++++++ include/linux/blk_types.h | 11 +++ include/linux/blkdev.h | 28 +++++++ include/linux/bpf-cgroup-defs.h | 4 + include/linux/bpf.h | 39 ++++++++++ include/linux/bpf_local_storage.h | 2 + include/linux/bpf_verifier.h | 10 +++ include/linux/cgroup-defs.h | 37 ++++++++++ include/linux/coredump.h | 2 + include/linux/cpuidle.h | 18 +++++ include/linux/cred.h | 9 +++ include/linux/crypto.h | 2 + include/linux/dcache.h | 9 +++ include/linux/delayacct.h | 7 ++ include/linux/device.h | 17 +++++ include/linux/device/bus.h | 3 + include/linux/device/class.h | 5 ++ include/linux/device/driver.h | 5 ++ include/linux/dma-fence.h | 2 + include/linux/dma-map-ops.h | 7 ++ include/linux/energy_model.h | 5 ++ include/linux/ethtool.h | 21 ++++++ include/linux/exportfs.h | 4 + include/linux/fb.h | 2 + include/linux/filelock.h | 10 +++ include/linux/filter.h | 3 + include/linux/fs.h | 33 +++++++++ include/linux/ftrace.h | 2 + include/linux/fwnode.h | 13 ++++ include/linux/hrtimer.h | 11 +++ include/linux/i2c.h | 9 +++ include/linux/if_macvlan.h | 3 + include/linux/inetdevice.h | 5 ++ include/linux/interrupt.h | 2 + include/linux/iommu.h | 17 +++++ include/linux/ioport.h | 7 ++ include/linux/ipv6.h | 6 ++ include/linux/irq.h | 12 +++ include/linux/irq_work.h | 5 ++ include/linux/irqdesc.h | 5 ++ include/linux/irqdomain.h | 6 ++ include/linux/kernfs.h | 11 +++ include/linux/key-type.h | 4 + include/linux/key.h | 3 + include/linux/kobject.h | 15 ++++ include/linux/kprobes.h | 2 + include/linux/kvm_host.h | 6 ++ include/linux/list_lru.h | 3 + include/linux/memcontrol.h | 16 ++++ include/linux/mempolicy.h | 4 + include/linux/mempool.h | 5 ++ include/linux/memremap.h | 13 ++++ include/linux/mm.h | 9 +++ include/linux/mm_types.h | 14 ++++ include/linux/mmzone.h | 13 ++++ include/linux/module.h | 5 ++ include/linux/mount.h | 3 + include/linux/msi.h | 3 + include/linux/net.h | 9 +++ include/linux/netdevice.h | 64 ++++++++++++++++ include/linux/netfilter.h | 2 + include/linux/netfilter/nf_conntrack_common.h | 3 + include/linux/netfilter/x_tables.h | 3 + include/linux/ns_common.h | 3 + include/linux/nsproxy.h | 9 +++ include/linux/of.h | 7 ++ include/linux/pagemap.h | 5 ++ include/linux/pci-epc.h | 4 + include/linux/pci-epf.h | 6 ++ include/linux/pci.h | 35 +++++++++ include/linux/pci_hotplug.h | 18 +++++ include/linux/perf_event.h | 22 ++++++ include/linux/pm.h | 13 ++++ include/linux/pm_qos.h | 6 ++ include/linux/pm_wakeup.h | 4 + include/linux/posix-clock.h | 14 ++++ include/linux/posix-timers.h | 6 ++ include/linux/psi_types.h | 3 + include/linux/ptp_clock_kernel.h | 6 ++ include/linux/quota.h | 12 +++ include/linux/rcu_segcblist.h | 6 ++ include/linux/rmap.h | 3 + include/linux/sbitmap.h | 2 + include/linux/sched.h | 49 +++++++++++++ include/linux/sched/signal.h | 5 ++ include/linux/sched/topology.h | 11 +++ include/linux/sched/user.h | 3 + include/linux/shrinker.h | 7 ++ include/linux/skbuff.h | 12 +++ include/linux/skmsg.h | 5 ++ include/linux/srcutree.h | 9 +++ include/linux/stat.h | 5 +- include/linux/stop_machine.h | 2 + include/linux/sunrpc/svc.h | 6 ++ include/linux/swap.h | 8 ++ include/linux/sysfs.h | 2 + include/linux/task_io_accounting.h | 9 +++ include/linux/tcp.h | 11 +++ include/linux/timer.h | 6 ++ include/linux/trace_events.h | 2 + include/linux/tracepoint-defs.h | 3 + include/linux/tty_port.h | 6 ++ include/linux/user_namespace.h | 5 ++ include/linux/watchdog.h | 6 ++ include/linux/workqueue.h | 11 +++ include/linux/writeback.h | 3 + include/linux/xattr.h | 2 + include/net/dcbnl.h | 10 +++ include/net/devlink.h | 9 +++ include/net/dst.h | 9 +++ include/net/dst_ops.h | 10 +++ include/net/fib_rules.h | 9 +++ include/net/flow.h | 12 +++ include/net/flow_dissector.h | 3 + include/net/flow_offload.h | 5 ++ include/net/genetlink.h | 11 +++ include/net/if_inet6.h | 17 +++++ include/net/ip6_fib.h | 7 ++ include/net/l3mdev.h | 5 ++ include/net/lwtunnel.h | 6 ++ include/net/neighbour.h | 15 ++++ include/net/net_namespace.h | 5 ++ include/net/netdev_rx_queue.h | 9 +++ include/net/netfilter/nf_conntrack.h | 3 + include/net/netfilter/nf_conntrack_ecache.h | 1 + include/net/netlink.h | 5 ++ include/net/netns/can.h | 2 + include/net/netns/ipv4.h | 5 ++ include/net/netns/ipv6.h | 4 + include/net/netns/netfilter.h | 2 + include/net/netns/nftables.h | 2 + include/net/netns/sctp.h | 3 + include/net/netns/smc.h | 33 +++++++++ include/net/netns/unix.h | 3 + include/net/netns/xfrm.h | 2 + include/net/nexthop.h | 7 ++ include/net/page_pool/types.h | 5 ++ include/net/rtnetlink.h | 9 +++ include/net/sch_generic.h | 16 ++++ include/net/snmp.h | 2 + include/net/sock.h | 20 +++++ include/net/sock_reuseport.h | 3 + include/net/tcp.h | 8 ++ include/net/tls.h | 17 +++++ include/net/xdp.h | 7 ++ include/net/xfrm.h | 2 + include/net/xsk_buff_pool.h | 4 + include/scsi/scsi_cmnd.h | 5 ++ include/scsi/scsi_device.h | 14 ++++ include/scsi/scsi_host.h | 12 +++ kernel/audit.h | 2 + kernel/bpf/devmap.c | 2 + kernel/locking/rtmutex_common.h | 2 + kernel/module/sysfs.c | 2 + kernel/sched/cpuacct.c | 7 ++ kernel/sched/cpudeadline.h | 5 ++ kernel/sched/fair.c | 6 ++ kernel/sched/sched.h | 73 +++++++++++++++++++ kernel/workqueue.c | 6 ++ kernel/workqueue_internal.h | 5 ++ mm/internal.h | 3 + mm/ksm.c | 3 + mm/memcontrol.c | 3 + net/devlink/devl_internal.h | 4 + net/ipv4/route.c | 3 + net/ipv6/route.c | 3 + 196 files changed, 1641 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index b84ed3ad91a9..a89b35070a35 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -277,6 +277,8 @@ struct kvm_arch { * the associated pKVM instance in the hypervisor. */ struct kvm_protected_vm pkvm; + + CK_KABI_RESERVE(1) }; struct kvm_vcpu_fault_info { diff --git a/arch/arm64/include/asm/probes.h b/arch/arm64/include/asm/probes.h index 006946745352..c6da8b300c12 100644 --- a/arch/arm64/include/asm/probes.h +++ b/arch/arm64/include/asm/probes.h @@ -8,6 +8,7 @@ #define _ARM_PROBES_H #include +#include typedef u32 probe_opcode_t; typedef void (probes_handler_t) (u32 opcode, long addr, struct pt_regs *); @@ -24,6 +25,9 @@ struct arch_probe_insn { typedef u32 kprobe_opcode_t; struct arch_specific_insn { struct arch_probe_insn api; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; #endif diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h index a2e9317aad49..a5e4a019b4cc 100644 --- a/arch/x86/include/asm/kprobes.h +++ b/arch/x86/include/asm/kprobes.h @@ -80,6 +80,9 @@ struct arch_specific_insn { void (*emulate_op)(struct kprobe *p, struct pt_regs *regs); /* Number of bytes of text poked */ int tp_len; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; struct arch_optimized_insn { diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 508d6dccb3c4..b96fe390a9c4 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1463,6 +1463,8 @@ struct kvm_arch { */ #define SPLIT_DESC_CACHE_MIN_NR_OBJECTS (SPTE_ENT_PER_PAGE + 1) struct kvm_mmu_memory_cache split_desc_cache; + + CK_KABI_RESERVE(1) }; struct kvm_vm_stat { diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index d63b02940747..2c6554052ace 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -60,6 +60,8 @@ struct thread_info { #ifdef CONFIG_SMP u32 cpu; /* current CPU */ #endif + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; #define INIT_THREAD_INFO(tsk) \ diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index 5b0bdc268ade..315e7eb6925a 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h @@ -49,6 +49,10 @@ struct blkg_iostat_set { int lqueued; /* queued in llist */ struct blkg_iostat cur; struct blkg_iostat last; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) }; /* association between a blk cgroup and a request queue */ @@ -88,6 +92,8 @@ struct blkcg_gq { int last_use; struct rcu_head rcu_head; + + CK_KABI_RESERVE(1) }; struct blkcg { @@ -114,6 +120,11 @@ struct blkcg { #ifdef CONFIG_CGROUP_WRITEBACK struct list_head cgwb_list; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) @@ -183,6 +194,9 @@ struct blkcg_policy { blkcg_pol_free_pd_fn *pd_free_fn; blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; blkcg_pol_stat_pd_fn *pd_stat_fn; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; extern struct blkcg blkcg_root; diff --git a/block/blk-mq.h b/block/blk-mq.h index 49e672e0211f..cc3dc4abb2e8 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -158,6 +158,10 @@ struct blk_mq_alloc_data { /* input & output parameter */ struct blk_mq_ctx *ctx; struct blk_mq_hw_ctx *hctx; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) }; struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, diff --git a/block/elevator.h b/block/elevator.h index 7ca3d7b6ed82..d07757ab6e34 100644 --- a/block/elevator.h +++ b/block/elevator.h @@ -48,6 +48,11 @@ struct elevator_mq_ops { struct request *(*next_request)(struct request_queue *, struct request *); void (*init_icq)(struct io_cq *); void (*exit_icq)(struct io_cq *); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #define ELV_NAME_MAX (16) @@ -84,6 +89,11 @@ struct elevator_type /* managed by elevator core */ char icq_cache_name[ELV_NAME_MAX + 6]; /* elvname + "_io_cq" */ struct list_head list; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; static inline bool elevator_tryget(struct elevator_type *e) diff --git a/drivers/block/brd.c b/drivers/block/brd.c index d816d1512531..e7be237c90d3 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -44,6 +44,9 @@ struct brd_device { */ struct xarray brd_pages; u64 brd_nr_pages; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /* diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index ef0b2efa9f93..74d500819e92 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -341,6 +341,8 @@ struct dw_pcie_ep_ops { * driver. */ unsigned int (*func_conf_select)(struct dw_pcie_ep *ep, u8 func_no); + + CK_KABI_RESERVE(1) }; struct dw_pcie_ep_func { @@ -364,6 +366,8 @@ struct dw_pcie_ep { void __iomem *msi_mem; phys_addr_t msi_mem_phys; struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS]; + + CK_KABI_RESERVE(1) }; struct dw_pcie_ops { @@ -378,6 +382,8 @@ struct dw_pcie_ops { enum dw_pcie_ltssm (*get_ltssm)(struct dw_pcie *pcie); int (*start_link)(struct dw_pcie *pcie); void (*stop_link)(struct dw_pcie *pcie); + + CK_KABI_RESERVE(1) }; struct dw_pcie { diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index d69a17947ffc..f4b3d49ff8aa 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -314,6 +314,15 @@ struct pci_sriov { u16 subsystem_device; /* VF subsystem device */ resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */ bool drivers_autoprobe; /* Auto probing of VFs by driver */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; #ifdef CONFIG_PCI_DOE diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h index b8d4f61f14be..63a846e63b9b 100644 --- a/drivers/ptp/ptp_private.h +++ b/drivers/ptp/ptp_private.h @@ -53,6 +53,11 @@ struct ptp_clock { struct mutex n_vclocks_mux; /* protect concurrent n_vclocks access */ bool is_virtual_clock; bool has_cycles; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #define info_to_vclock(d) container_of((d), struct ptp_vclock, info) diff --git a/fs/eventfd.c b/fs/eventfd.c index 33a918f9566c..3318d311e648 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c @@ -41,6 +41,9 @@ struct eventfd_ctx { __u64 count; unsigned int flags; int id; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; __u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, __poll_t mask) diff --git a/fs/mount.h b/fs/mount.h index 130c07c2f8d2..97a62879ef8e 100644 --- a/fs/mount.h +++ b/fs/mount.h @@ -4,6 +4,7 @@ #include #include #include +#include struct mnt_namespace { struct ns_common ns; @@ -77,6 +78,8 @@ struct mount { int mnt_expiry_mark; /* true if marked for expiry */ struct hlist_head mnt_pins; struct hlist_head mnt_stuck_children; + + CK_KABI_RESERVE(1) } __randomize_layout; #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */ diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index ed966108da80..b477203de531 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h @@ -11,6 +11,8 @@ #ifndef __UBIFS_DEBUG_H__ #define __UBIFS_DEBUG_H__ +#include + /* Checking helper functions */ typedef int (*dbg_leaf_callback)(struct ubifs_info *c, struct ubifs_zbranch *zbr, void *priv); @@ -115,6 +117,9 @@ struct ubifs_debug_info { struct dentry *dfs_chk_fs; struct dentry *dfs_tst_rcvry; struct dentry *dfs_ro_error; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /** diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index ebb3ad6b5e7e..eff0e642a52e 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -428,6 +428,9 @@ struct ubifs_inode { pgoff_t read_in_a_row; int data_len; void *data; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /** @@ -1522,6 +1525,15 @@ struct ubifs_info { struct ubifs_debug_info *dbg; struct ubifs_stats_info *stats; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; extern struct list_head ubifs_infos; diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index d9c20ae23b63..a867baf7beab 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -282,6 +282,8 @@ struct acpi_device_power { struct acpi_device_power_flags flags; struct acpi_device_power_state states[ACPI_D_STATE_COUNT]; /* Power states (D0-D3Cold) */ u8 state_for_enumeration; /* Deepest power state for enumeration */ + + CK_KABI_RESERVE(1) }; struct acpi_dep_data { diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index 9a022caacf93..5eb7e205a9bc 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -239,6 +239,8 @@ struct drm_private_state_funcs { */ void (*atomic_print_state)(struct drm_printer *p, const struct drm_private_state *state); + + CK_KABI_RESERVE(1) }; /** @@ -338,6 +340,8 @@ struct drm_private_state { * @obj: backpointer to the private object */ struct drm_private_obj *obj; + + CK_KABI_RESERVE(1) }; struct __drm_private_objs_state { diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h index c0a14b40c039..d3b9612b37da 100644 --- a/include/drm/drm_client.h +++ b/include/drm/drm_client.h @@ -153,6 +153,8 @@ struct drm_client_buffer { * @fb: DRM framebuffer */ struct drm_framebuffer *fb; + + CK_KABI_RESERVE(1) }; struct drm_client_buffer * diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index d300fde6c1a4..baa830c768a3 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -321,6 +321,9 @@ struct drm_hdmi_info { /** @dsc_cap: DSC capabilities of the sink */ struct drm_hdmi_dsc_cap dsc_cap; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /** @@ -383,6 +386,8 @@ enum drm_panel_orientation { struct drm_monitor_range_info { u16 min_vfreq; u16 max_vfreq; + + CK_KABI_RESERVE(1) }; /** @@ -816,6 +821,9 @@ struct drm_display_info { * @quirks: EDID based quirks. Internal to EDID parsing. */ u32 quirks; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; int drm_display_info_set_bus_formats(struct drm_display_info *info, @@ -1335,6 +1343,9 @@ struct drm_connector_funcs { * Allows connectors to create connector-specific debugfs files. */ void (*debugfs_init)(struct drm_connector *connector, struct dentry *root); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /** @@ -1876,6 +1887,19 @@ struct drm_connector { /** @hdr_sink_metadata: HDR Metadata Information read from sink */ struct hdr_sink_metadata hdr_sink_metadata; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) + CK_KABI_RESERVE(9) + CK_KABI_RESERVE(10) + CK_KABI_RESERVE(11) + CK_KABI_RESERVE(12) }; #define obj_to_connector(x) container_of(x, struct drm_connector, base) diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 8b48a1974da3..6fcb4893a521 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -385,6 +385,8 @@ struct drm_crtc_state { /** @state: backpointer to global drm_atomic_state */ struct drm_atomic_state *state; + + CK_KABI_RESERVE(1) }; /** @@ -1175,6 +1177,8 @@ struct drm_crtc { * Initialized via drm_self_refresh_helper_init(). */ struct drm_self_refresh_data *self_refresh_data; + + CK_KABI_RESERVE(1) }; /** diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index 159213786e6e..190ce62d5dc0 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -1509,6 +1509,8 @@ struct drm_mode_config_helper_funcs { * This hook is optional. */ int (*atomic_commit_setup)(struct drm_atomic_state *state); + + CK_KABI_RESERVE(1) }; #endif diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h index ec4f543c3d95..b65d1ff10884 100644 --- a/include/drm/drm_modeset_lock.h +++ b/include/drm/drm_modeset_lock.h @@ -72,6 +72,8 @@ struct drm_modeset_acquire_ctx { /* Perform interruptible waits on this context. */ bool interruptible; + + CK_KABI_RESERVE(1) }; /** diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index fef775200a81..e1ad7ee742dc 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -758,6 +758,8 @@ struct drm_plane { * scaling. */ struct drm_property *scaling_filter_property; + + CK_KABI_RESERVE(1) }; #define obj_to_plane(x) container_of(x, struct drm_plane, base) diff --git a/include/drm/drm_writeback.h b/include/drm/drm_writeback.h index 17e576c80169..da540bf80762 100644 --- a/include/drm/drm_writeback.h +++ b/include/drm/drm_writeback.h @@ -84,6 +84,19 @@ struct drm_writeback_connector { * The name of the connector's fence timeline. */ char timeline_name[32]; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) + CK_KABI_RESERVE(9) + CK_KABI_RESERVE(10) + CK_KABI_RESERVE(11) + CK_KABI_RESERVE(12) }; /** diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h index 78a226eba953..279733a10c56 100644 --- a/include/drm/ttm/ttm_resource.h +++ b/include/drm/ttm/ttm_resource.h @@ -174,6 +174,9 @@ struct ttm_resource_manager { * bdev->lru_lock. */ uint64_t usage; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /** diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index 2ad261082bba..edd7b9d621e9 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -13,6 +13,7 @@ #include #include #include +#include struct page; struct device; @@ -158,6 +159,11 @@ struct bdi_writeback { struct rcu_head rcu; }; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct backing_dev_info { @@ -201,6 +207,11 @@ struct backing_dev_info { #ifdef CONFIG_DEBUG_FS struct dentry *debug_dir; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct wb_lock_cookie { diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 8d51f69f9f5e..636c2b5b1bb9 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -62,6 +62,9 @@ struct linux_binprm { struct rlimit rlim_stack; /* Saved RLIMIT_STACK used during exec. */ char buf[BINPRM_BUF_SIZE]; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) } __randomize_layout; #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0 diff --git a/include/linux/bio.h b/include/linux/bio.h index 797e17573e71..356c5d2b7895 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -9,6 +9,7 @@ /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ #include #include +#include #ifdef CONFIG_THP_SWAP #if HPAGE_PMD_NR > 256 @@ -369,6 +370,10 @@ struct bio_integrity_payload { struct work_struct bip_work; /* I/O completion */ struct bio_vec *bip_vec; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + struct bio_vec bip_inline_vecs[];/* embedded bvec array */ }; @@ -728,6 +733,11 @@ struct bio_set { * Hot un-plug notifier for the per-cpu cache, if used */ struct hlist_node cpuhp_dead; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; static inline bool bioset_initialized(struct bio_set *bs) diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h index f7cc8080672c..a8d2a0f8faf3 100644 --- a/include/linux/blk-integrity.h +++ b/include/linux/blk-integrity.h @@ -33,6 +33,9 @@ struct blk_integrity_profile { integrity_prepare_fn *prepare_fn; integrity_complete_fn *complete_fn; const char *name; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; #ifdef CONFIG_BLK_DEV_INTEGRITY diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 958ed7e89b30..991f87788b05 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -8,6 +8,7 @@ #include #include #include +#include struct blk_mq_tags; struct blk_flush_queue; @@ -429,6 +430,11 @@ struct blk_mq_hw_ctx { * q->unused_hctx_list. */ struct list_head hctx_list; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; /** @@ -515,6 +521,11 @@ struct blk_mq_tag_set { struct mutex tag_list_lock; struct list_head tag_list; struct srcu_struct *srcu; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; /** @@ -526,6 +537,8 @@ struct blk_mq_tag_set { struct blk_mq_queue_data { struct request *rq; bool last; + + CK_KABI_RESERVE(1) }; typedef bool (busy_tag_iter_fn)(struct request *, void *); @@ -645,6 +658,11 @@ struct blk_mq_ops { */ void (*show_rq)(struct seq_file *m, struct request *rq); #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; enum { @@ -749,6 +767,11 @@ struct blk_mq_tags { * request pool */ spinlock_t lock; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; static inline struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index ce14257bbf74..d8f5999e4748 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -11,6 +11,7 @@ #include #include #include +#include struct bio_set; struct bio; @@ -76,6 +77,11 @@ struct block_device { * path */ struct device bd_device; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } __randomize_layout; #define bdev_whole(_bdev) \ @@ -325,6 +331,11 @@ struct bio { unsigned long bi_ext_flags; /* extend the bi_flags */ + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + /* * We can inline a number of vecs at the end of the bio, to avoid * double allocations for a small number of bio_vecs. This member diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 53ad9ddcc776..4412c7c64468 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -110,6 +110,9 @@ struct blk_integrity { unsigned char tuple_size; unsigned char interval_exp; unsigned char tag_size; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; typedef unsigned int __bitwise blk_mode_t; @@ -207,6 +210,11 @@ struct gendisk { * devices that do not have multiple independent access ranges. */ struct blk_independent_access_ranges *ia_ranges; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; static inline bool disk_live(struct gendisk *disk) @@ -337,6 +345,11 @@ struct queue_limits { * due to possible offsets. */ unsigned int dma_alignment; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx, @@ -543,6 +556,11 @@ struct request_queue { struct mutex debugfs_mutex; bool mq_sysfs_init_done; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; /* Keep blk_queue_flag_name[] in sync with the definitions below */ @@ -995,6 +1013,11 @@ struct blk_plug { bool has_elevator; struct list_head cb_list; /* md requires an unplug callback */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct blk_plug_cb; @@ -1427,6 +1450,11 @@ struct block_device_operations { * driver. */ int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #ifdef CONFIG_COMPAT diff --git a/include/linux/bpf-cgroup-defs.h b/include/linux/bpf-cgroup-defs.h index 7b121bd780eb..cf06058b7381 100644 --- a/include/linux/bpf-cgroup-defs.h +++ b/include/linux/bpf-cgroup-defs.h @@ -7,6 +7,7 @@ #include #include #include +#include struct bpf_prog_array; @@ -70,6 +71,9 @@ struct cgroup_bpf { /* cgroup_bpf is released using a work queue */ struct work_struct release_work; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; #else /* CONFIG_CGROUP_BPF */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 035e627f94f6..8dff572dedcd 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -172,6 +172,11 @@ struct bpf_map_ops { /* bpf_iter info used to open a seq_file */ const struct bpf_iter_seq_info *iter_seq_info; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; enum { @@ -300,6 +305,11 @@ struct bpf_map { bool free_after_rcu_gp; atomic64_t sleepable_refcnt; s64 __percpu *elem_count; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; static inline const char *btf_field_type_name(enum btf_field_type type) @@ -918,6 +928,9 @@ struct bpf_insn_access_aux { }; }; struct bpf_verifier_log *log; /* for verbose logs */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; static inline void @@ -1024,6 +1037,9 @@ struct btf_func_model { u8 nr_args; u8 arg_size[MAX_BPF_FUNC_ARGS]; u8 arg_flags[MAX_BPF_FUNC_ARGS]; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /* Restore arguments before returning from trampoline to let original function @@ -1124,6 +1140,8 @@ struct bpf_ksym { struct list_head lnode; struct latch_tree_node tnode; bool prog; + + CK_KABI_RESERVE(1) }; enum bpf_tramp_prog_type { @@ -1144,6 +1162,8 @@ struct bpf_tramp_image { struct rcu_head rcu; struct work_struct work; }; + + CK_KABI_RESERVE(1) }; struct bpf_trampoline { @@ -1172,6 +1192,9 @@ struct bpf_trampoline { /* Executable image of trampoline */ struct bpf_tramp_image *cur_image; struct module *mod; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; struct bpf_attach_target_info { @@ -1488,6 +1511,11 @@ struct bpf_prog_aux { struct work_struct work; struct rcu_head rcu; }; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct bpf_prog { @@ -1522,6 +1550,11 @@ struct bpf_prog { DECLARE_FLEX_ARRAY(struct sock_filter, insns); DECLARE_FLEX_ARRAY(struct bpf_insn, insnsi); }; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct bpf_array_aux { @@ -1566,6 +1599,8 @@ struct bpf_link_ops { struct bpf_link_info *info); int (*update_map)(struct bpf_link *link, struct bpf_map *new_map, struct bpf_map *old_map); + + CK_KABI_RESERVE(1) }; struct bpf_tramp_link { @@ -2256,6 +2291,10 @@ struct bpf_iter_aux_info { enum bpf_iter_task_type type; u32 pid; } task; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) }; typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog, diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h index 173ec7f43ed1..f77c2087dd08 100644 --- a/include/linux/bpf_local_storage.h +++ b/include/linux/bpf_local_storage.h @@ -94,6 +94,8 @@ struct bpf_local_storage { */ struct rcu_head rcu; raw_spinlock_t lock; /* Protect adding/removing from the "list" */ + + CK_KABI_RESERVE(1) }; /* U16_MAX is much more than enough for sk local storage diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 92919d52f7e1..d7d586015a66 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -317,6 +317,8 @@ struct bpf_func_state { struct bpf_reference_state *refs; int allocated_stack; struct bpf_stack_state *stack; + + CK_KABI_RESERVE(1) }; struct bpf_idx_pair { @@ -522,6 +524,9 @@ struct bpf_insn_aux_data { * accepts callback function as a parameter. */ bool calls_callback; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ @@ -672,6 +677,11 @@ struct bpf_verifier_env { * e.g., in reg_type_str() to generate reg_type string */ char tmp_str_buf[TMP_STR_BUF_LEN]; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log, diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index e96206d91c81..a2e401bed012 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -143,6 +143,9 @@ struct cgroup_file { struct kernfs_node *kn; unsigned long notified_at; struct timer_list notify_timer; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /* @@ -195,6 +198,11 @@ struct cgroup_subsys_state { struct work_struct destroy_work; struct rcu_work destroy_rwork; + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + /* * PI: the parent css. Placed here for cache proximity to following * fields of the containing structure. @@ -297,6 +305,11 @@ struct css_set { /* For RCU-protected deletion */ struct rcu_head rcu_head; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct cgroup_base_stat { @@ -373,6 +386,11 @@ struct cgroup_rstat_cpu { */ struct cgroup *updated_children; /* terminated by self cgroup */ struct cgroup *updated_next; /* NULL iff not on the list */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct cgroup_freezer_state { @@ -530,6 +548,11 @@ struct cgroup { struct bpf_local_storage __rcu *bpf_cgrp_storage; #endif + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + /* All ancestors including self */ struct cgroup *ancestors[]; }; @@ -573,6 +596,11 @@ struct cgroup_root { /* The name for this hierarchy - may be empty */ char name[MAX_CGROUP_ROOT_NAMELEN]; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; /* @@ -665,6 +693,8 @@ struct cftype { #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lock_class_key lockdep_key; #endif + + CK_KABI_RESERVE(1) }; /* @@ -696,6 +726,11 @@ struct cgroup_subsys { void (*release)(struct task_struct *task); void (*bind)(struct cgroup_subsys_state *root_css); + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + bool early_init:1; /* @@ -817,6 +852,8 @@ struct sock_cgroup_data { #ifdef CONFIG_CGROUP_NET_PRIO u16 prioidx; /* v1 */ #endif + + CK_KABI_RESERVE(1) }; static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd) diff --git a/include/linux/coredump.h b/include/linux/coredump.h index d3eba4360150..52c7c487c9fa 100644 --- a/include/linux/coredump.h +++ b/include/linux/coredump.h @@ -28,6 +28,8 @@ struct coredump_params { int vma_count; size_t vma_data_size; struct core_vma_metadata *vma_meta; + + CK_KABI_RESERVE(1) }; /* diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 3183aeb7f5b4..3fd1fe4e6531 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -44,6 +44,11 @@ struct cpuidle_state_usage { unsigned long long s2idle_usage; unsigned long long s2idle_time; /* in US */ #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct cpuidle_state { @@ -74,6 +79,11 @@ struct cpuidle_state { int (*enter_s2idle)(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; /* Idle State Flags */ @@ -111,6 +121,11 @@ struct cpuidle_device { cpumask_t coupled_cpus; struct cpuidle_coupled *coupled; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); @@ -165,6 +180,9 @@ struct cpuidle_driver { /* preferred governor to switch at register time */ const char *governor; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; #ifdef CONFIG_CPU_IDLE diff --git a/include/linux/cred.h b/include/linux/cred.h index bb55703e1166..023712a4ca86 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -143,6 +143,15 @@ struct cred { int non_rcu; /* Can we skip RCU deletion? */ struct rcu_head rcu; /* RCU deletion hook */ }; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) } __randomize_layout; extern void __put_cred(struct cred *); diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 31f6fee0c36c..772a8fcb13bb 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -430,6 +430,8 @@ struct crypto_tfm { struct crypto_alg *__crt_alg; + CK_KABI_RESERVE(1) + void *__crt_ctx[] CRYPTO_MINALIGN_ATTR; }; diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 344f41a3e052..05b9b6e86c3a 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -14,6 +14,7 @@ #include #include #include +#include struct path; struct file; @@ -111,6 +112,9 @@ struct dentry { struct hlist_bl_node d_in_lookup_hash; /* only for in-lookup ones */ struct rcu_head d_rcu; } d_u; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) } __randomize_layout; /* @@ -140,6 +144,11 @@ struct dentry_operations { struct vfsmount *(*d_automount)(struct path *); int (*d_manage)(const struct path *, bool); struct dentry *(*d_real)(struct dentry *, const struct inode *); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } ____cacheline_aligned; /* diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h index 6639f48dac36..40f9990da3d9 100644 --- a/include/linux/delayacct.h +++ b/include/linux/delayacct.h @@ -55,6 +55,13 @@ struct task_delay_info { u32 compact_count; /* total count of memory compact */ u32 wpcopy_count; /* total count of write-protect copy */ u32 irq_count; /* total count of IRQ/SOFTIRQ */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) }; #endif diff --git a/include/linux/device.h b/include/linux/device.h index 3627b26b243e..092bbdb60cdf 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -805,6 +805,23 @@ struct device { #ifdef CONFIG_DMA_OPS_BYPASS bool dma_ops_bypass : 1; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) + CK_KABI_RESERVE(9) + CK_KABI_RESERVE(10) + CK_KABI_RESERVE(11) + CK_KABI_RESERVE(12) + CK_KABI_RESERVE(13) + CK_KABI_RESERVE(14) + CK_KABI_RESERVE(15) + CK_KABI_RESERVE(16) }; /** diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h index ae10c4322754..ccaa3cf10c56 100644 --- a/include/linux/device/bus.h +++ b/include/linux/device/bus.h @@ -107,6 +107,9 @@ struct bus_type { const struct iommu_ops *iommu_ops; bool need_parent_lock; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; int __must_check bus_register(const struct bus_type *bus); diff --git a/include/linux/device/class.h b/include/linux/device/class.h index abf3d3bfb6fe..3048719f63c2 100644 --- a/include/linux/device/class.h +++ b/include/linux/device/class.h @@ -69,6 +69,11 @@ struct class { void (*get_ownership)(const struct device *dev, kuid_t *uid, kgid_t *gid); const struct dev_pm_ops *pm; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct class_dev_iter { diff --git a/include/linux/device/driver.h b/include/linux/device/driver.h index 7738f458995f..1d806b1d593c 100644 --- a/include/linux/device/driver.h +++ b/include/linux/device/driver.h @@ -119,6 +119,11 @@ struct device_driver { void (*coredump) (struct device *dev); struct driver_private *p; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index b3772edca2e6..f92fc1461977 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -277,6 +277,8 @@ struct dma_fence_ops { * This callback is optional. */ void (*set_deadline)(struct dma_fence *fence, ktime_t deadline); + + CK_KABI_RESERVE(1) }; void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index 0ce2ae6c944d..7b5b7dd2cd95 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -82,6 +82,13 @@ struct dma_map_ops { size_t (*max_mapping_size)(struct device *dev); size_t (*opt_mapping_size)(void); unsigned long (*get_merge_boundary)(struct device *dev); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) }; #ifdef CONFIG_DMA_OPS diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index b9caa01dfac4..2934c3a4edae 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -24,6 +24,8 @@ struct em_perf_state { unsigned long power; unsigned long cost; unsigned long flags; + + CK_KABI_RESERVE(1) }; /* @@ -56,6 +58,9 @@ struct em_perf_domain { struct em_perf_state *table; int nr_perf_states; unsigned long flags; + + CK_KABI_RESERVE(1) + unsigned long cpus[]; }; diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 1b523fd48586..a01872f6b49a 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -172,6 +172,8 @@ struct ethtool_link_ksettings { __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising); } link_modes; u32 lanes; + + CK_KABI_RESERVE(1) }; /** @@ -388,6 +390,8 @@ struct ethtool_pause_stats { u64 tx_pause_frames; u64 rx_pause_frames; ); + + CK_KABI_RESERVE(1) }; #define ETHTOOL_MAX_LANES 8 @@ -912,6 +916,23 @@ struct ethtool_ops { int (*set_mm)(struct net_device *dev, struct ethtool_mm_cfg *cfg, struct netlink_ext_ack *extack); void (*get_mm_stats)(struct net_device *dev, struct ethtool_mm_stats *stats); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) + CK_KABI_RESERVE(9) + CK_KABI_RESERVE(10) + CK_KABI_RESERVE(11) + CK_KABI_RESERVE(12) + CK_KABI_RESERVE(13) + CK_KABI_RESERVE(14) + CK_KABI_RESERVE(15) + CK_KABI_RESERVE(16) }; int ethtool_check_ops(const struct ethtool_ops *ops); diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index 11fbd0ee1370..14bc03473cc0 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h @@ -3,6 +3,7 @@ #define LINUX_EXPORTFS_H 1 #include +#include struct dentry; struct iattr; @@ -225,6 +226,9 @@ struct export_operations { */ #define EXPORT_OP_FLUSH_ON_CLOSE (0x20) /* fs flushes file data on close */ unsigned long flags; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; extern int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid, diff --git a/include/linux/fb.h b/include/linux/fb.h index 322b4d20afa5..6b088a55932c 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -218,6 +218,8 @@ struct fb_deferred_io { struct list_head pagereflist; /* list of pagerefs for touched pages */ /* callback */ void (*deferred_io)(struct fb_info *info, struct list_head *pagelist); + + CK_KABI_RESERVE(1) }; #endif diff --git a/include/linux/filelock.h b/include/linux/filelock.h index 95e868e09e29..2225f53aa55f 100644 --- a/include/linux/filelock.h +++ b/include/linux/filelock.h @@ -31,6 +31,9 @@ struct file_lock; struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; struct lock_manager_operations { @@ -45,6 +48,11 @@ struct lock_manager_operations { bool (*lm_breaker_owns_lease)(struct file_lock *); bool (*lm_lock_expirable)(struct file_lock *cfl); void (*lm_expire_lock)(void); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct lock_manager { @@ -124,6 +132,8 @@ struct file_lock { struct inode *inode; } ceph; } fl_u; + + CK_KABI_RESERVE(1) } __randomize_layout; struct file_lock_context { diff --git a/include/linux/filter.h b/include/linux/filter.h index 5090e940ba3e..dabd2ea9c7dd 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -570,6 +570,9 @@ struct bpf_prog_stats { u64_stats_t nsecs; u64_stats_t misses; struct u64_stats_sync syncp; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) } __aligned(2 * sizeof(u64)); struct sk_filter { diff --git a/include/linux/fs.h b/include/linux/fs.h index aeb033344a6b..f4e5f3b61833 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -443,6 +443,11 @@ struct address_space_operations { sector_t *span); void (*swap_deactivate)(struct file *file); int (*swap_rw)(struct kiocb *iocb, struct iov_iter *iter); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; extern const struct address_space_operations empty_aops; @@ -489,6 +494,11 @@ struct address_space { struct list_head private_list; struct rw_semaphore i_mmap_rwsem; void *private_data; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } __attribute__((aligned(sizeof(long)))) __randomize_layout; /* * On most architectures that alignment is already the case; but @@ -748,6 +758,9 @@ struct inode { #endif void *i_private; /* fs or device private pointer */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) } __randomize_layout; struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode); @@ -1954,6 +1967,11 @@ struct file_operations { int (*uring_cmd)(struct io_uring_cmd *ioucmd, unsigned int issue_flags); int (*uring_cmd_iopoll)(struct io_uring_cmd *, struct io_comp_batch *, unsigned int poll_flags); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } __randomize_layout; /* Wrap a directory iterator that needs exclusive inode access */ @@ -2004,6 +2022,11 @@ struct inode_operations { struct dentry *dentry, struct fileattr *fa); int (*fileattr_get)(struct dentry *dentry, struct fileattr *fa); struct offset_ctx *(*get_offset_ctx)(struct inode *inode); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } ____cacheline_aligned; static inline ssize_t call_read_iter(struct file *file, struct kiocb *kio, @@ -2087,6 +2110,11 @@ struct super_operations { long (*free_cached_objects)(struct super_block *, struct shrink_control *); void (*shutdown)(struct super_block *sb); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; /* @@ -2391,6 +2419,11 @@ struct file_system_type { struct lock_class_key i_mutex_key; struct lock_class_key invalidate_lock_key; struct lock_class_key i_mutex_dir_key; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME) diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index e8921871ef9a..4281d27af70a 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -340,6 +340,8 @@ struct ftrace_ops { unsigned long direct_call; #endif #endif + + CK_KABI_RESERVE(1) }; extern struct ftrace_ops __rcu *ftrace_ops_list; diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index 525cc031596b..64a0c689361c 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h @@ -13,6 +13,7 @@ #include #include #include +#include struct fwnode_operations; struct device; @@ -45,6 +46,13 @@ struct fwnode_handle { struct list_head suppliers; struct list_head consumers; u8 flags; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) }; /* @@ -166,6 +174,11 @@ struct fwnode_operations { void __iomem *(*iomap)(struct fwnode_handle *fwnode, int index); int (*irq_get)(const struct fwnode_handle *fwnode, unsigned int index); int (*add_links)(struct fwnode_handle *fwnode); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #define fwnode_has_op(fwnode, op) \ diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 254d4a898179..461742279f61 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -20,6 +20,7 @@ #include #include #include +#include struct hrtimer_clock_base; struct hrtimer_cpu_base; @@ -124,6 +125,10 @@ struct hrtimer { u8 is_rel; u8 is_soft; u8 is_hard; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) }; /** @@ -165,6 +170,9 @@ struct hrtimer_clock_base { struct timerqueue_head active; ktime_t (*get_time)(void); ktime_t offset; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) } __hrtimer_clock_base_align; enum hrtimer_base_type { @@ -237,6 +245,9 @@ struct hrtimer_cpu_base { ktime_t softirq_expires_next; struct hrtimer *softirq_next_timer; struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) } ____cacheline_aligned; static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) diff --git a/include/linux/i2c.h b/include/linux/i2c.h index a3166100f0cc..68ee918badc6 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -566,6 +566,9 @@ struct i2c_algorithm { int (*reg_slave)(struct i2c_client *client); int (*unreg_slave)(struct i2c_client *client); #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /** @@ -655,6 +658,9 @@ struct i2c_bus_recovery_info { struct pinctrl *pinctrl; struct pinctrl_state *pins_default; struct pinctrl_state *pins_gpio; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; int i2c_recover_bus(struct i2c_adapter *adap); @@ -751,6 +757,9 @@ struct i2c_adapter { /* 7bit address space */ DECLARE_BITMAP(addrs_in_instantiation, 1 << 7); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; #define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev) diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index 523025106a64..bbdf70d2166b 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -35,6 +35,9 @@ struct macvlan_dev { #ifdef CONFIG_NET_POLL_CONTROLLER struct netpoll *netpoll; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; static inline void macvlan_count_rx(const struct macvlan_dev *vlan, diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index ddb27fc0ee8c..f2fa95b0c0e8 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -18,6 +18,8 @@ struct ipv4_devconf { void *sysctl; int data[IPV4_DEVCONF_MAX]; DECLARE_BITMAP(state, IPV4_DEVCONF_MAX); + + CK_KABI_RESERVE(1) }; #define MC_HASH_SZ_LOG 9 @@ -50,6 +52,9 @@ struct in_device { struct neigh_parms *arp_parms; struct ipv4_devconf cnf; struct rcu_head rcu_head; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; #define IPV4_DEVCONF(cnf, attr) ((cnf).data[IPV4_DEVCONF_ ## attr - 1]) diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 4a1dc88ddbff..46612e4aaa36 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -289,6 +289,8 @@ struct irq_affinity { unsigned int set_size[IRQ_AFFINITY_MAX_SETS]; void (*calc_sets)(struct irq_affinity *, unsigned int nvecs); void *priv; + + CK_KABI_RESERVE(1) }; /** diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 95c86501a7a2..2ff402412f8d 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -115,6 +115,11 @@ struct iommu_domain { int users; }; }; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; static inline bool iommu_is_dma_domain(struct iommu_domain *domain) @@ -230,6 +235,9 @@ struct iommu_iotlb_gather { size_t pgsize; struct list_head freelist; bool queued; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /** @@ -337,6 +345,15 @@ struct iommu_ops { const struct iommu_domain_ops *default_domain_ops; unsigned long pgsize_bitmap; struct module *owner; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; /** diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 25d768d48970..8144354f6da2 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -14,6 +14,8 @@ #include #include #include +#include + /* * Resources are tree-like, allowing * nesting etc.. @@ -25,6 +27,11 @@ struct resource { unsigned long flags; unsigned long desc; struct resource *parent, *sibling, *child; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; /* diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index af8a771a053c..443ded24c730 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -3,6 +3,7 @@ #define _IPV6_H #include +#include #define ipv6_optlen(p) (((p)->hdrlen+1) << 3) #define ipv6_authlen(p) (((p)->hdrlen+2) << 2) @@ -84,6 +85,11 @@ struct ipv6_devconf { __u8 ndisc_evict_nocarrier; struct ctl_table_header *sysctl_header; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct ipv6_params { diff --git a/include/linux/irq.h b/include/linux/irq.h index 90081afa10ce..528487d6be71 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -160,6 +160,9 @@ struct irq_common_data { #ifdef CONFIG_GENERIC_IRQ_IPI unsigned int ipi_offset; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /** @@ -187,6 +190,11 @@ struct irq_data { struct irq_data *parent_data; #endif void *chip_data; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; /* @@ -548,6 +556,8 @@ struct irq_chip { void (*irq_nmi_teardown)(struct irq_data *data); unsigned long flags; + + CK_KABI_RESERVE(1) }; /* @@ -1023,6 +1033,8 @@ struct irq_chip_type { u32 type; u32 mask_cache_priv; u32 *mask_cache; + + CK_KABI_RESERVE(1) }; /** diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index 136f2980cba3..e31f999896de 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -18,6 +18,11 @@ struct irq_work { struct __call_single_node node; void (*func)(struct irq_work *); struct rcuwait irqwait; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #define __IRQ_WORK_INIT(_func, _flags) (struct irq_work){ \ diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index d9451d456a73..6a0a88377073 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -105,6 +105,11 @@ struct irq_desc { #ifdef CONFIG_HARDIRQS_SW_RESEND struct hlist_node resend_node; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } ____cacheline_internodealigned_in_smp; #ifdef CONFIG_SPARSE_IRQ diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 51c254b7fec2..a57e45f80986 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -174,6 +174,12 @@ struct irq_domain { irq_hw_number_t hwirq_max; unsigned int revmap_size; struct radix_tree_root revmap_tree; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + struct irq_data __rcu *revmap[]; }; diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 5a952d00ea15..226cada23934 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -19,6 +19,7 @@ #include #include #include +#include struct file; struct dentry; @@ -165,6 +166,8 @@ struct kernfs_elem_dir { * node has changed during negative dentry revalidation. */ unsigned long rev; + + CK_KABI_RESERVE(1) }; struct kernfs_elem_symlink { @@ -244,6 +247,11 @@ struct kernfs_syscall_ops { const char *new_name); int (*show_path)(struct seq_file *sf, struct kernfs_node *kn, struct kernfs_root *root); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct kernfs_node *kernfs_root_to_node(struct kernfs_root *root); @@ -318,6 +326,9 @@ struct kernfs_ops { struct poll_table_struct *pt); int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /* diff --git a/include/linux/key-type.h b/include/linux/key-type.h index 5caf3ce82373..547cb56c4b8f 100644 --- a/include/linux/key-type.h +++ b/include/linux/key-type.h @@ -36,6 +36,8 @@ struct key_preparsed_payload { size_t datalen; /* Raw datalen */ size_t quotalen; /* Quota length for proposed payload */ time64_t expiry; /* Expiry time of key */ + + CK_KABI_RESERVE(1) } __randomize_layout; typedef int (*request_key_actor_t)(struct key *auth_key, void *aux); @@ -56,6 +58,8 @@ struct key_match_data { unsigned lookup_type; /* Type of lookup for this search. */ #define KEYRING_SEARCH_LOOKUP_DIRECT 0x0000 /* Direct lookup by description. */ #define KEYRING_SEARCH_LOOKUP_ITERATE 0x0001 /* Iterative search. */ + + CK_KABI_RESERVE(1) }; /* diff --git a/include/linux/key.h b/include/linux/key.h index 938d7ecfb495..79207da6e10a 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -20,6 +20,7 @@ #include #include #include +#include #ifdef __KERNEL__ #include @@ -278,6 +279,8 @@ struct key { * restriction. */ struct key_restriction *restrict_link; + + CK_KABI_RESERVE(1) }; extern struct key *key_alloc(struct key_type *type, diff --git a/include/linux/kobject.h b/include/linux/kobject.h index c30affcc43b4..a4545ddb3d55 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -79,6 +79,11 @@ struct kobject { #ifdef CONFIG_DEBUG_KOBJECT_RELEASE struct delayed_work release; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; __printf(2, 3) int kobject_set_name(struct kobject *kobj, const char *name, ...); @@ -120,6 +125,11 @@ struct kobj_type { const struct kobj_ns_type_operations *(*child_ns_type)(const struct kobject *kobj); const void *(*namespace)(const struct kobject *kobj); void (*get_ownership)(const struct kobject *kobj, kuid_t *uid, kgid_t *gid); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct kobj_uevent_env { @@ -170,6 +180,11 @@ struct kset { spinlock_t list_lock; struct kobject kobj; const struct kset_uevent_ops *uevent_ops; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } __randomize_layout; void kset_init(struct kset *kset); diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 45d5b0a76b0b..5d6adf9ac86d 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -92,6 +92,8 @@ struct kprobe { * Protected by kprobe_mutex after this kprobe is registered. */ u32 flags; + + CK_KABI_RESERVE(1) }; /* Kprobe status flags */ diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 88ab1da9c255..f3f8e6112a7e 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -393,6 +393,8 @@ struct kvm_vcpu { */ struct kvm_memory_slot *last_used_slot; u64 last_used_slot_gen; + + CK_KABI_RESERVE(1) }; /* @@ -589,6 +591,8 @@ struct kvm_memory_slot { u32 flags; short id; u16 as_id; + + CK_KABI_RESERVE(1) }; static inline bool kvm_slot_dirty_track_enabled(const struct kvm_memory_slot *slot) @@ -809,6 +813,8 @@ struct kvm { struct notifier_block pm_notifier; #endif char stats_id[KVM_STATS_NAME_SIZE]; + + CK_KABI_RESERVE(1) }; #define kvm_err(fmt, ...) \ diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index b35968ee9fb5..069d0515b50c 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -54,6 +54,9 @@ struct list_lru { bool memcg_aware; struct xarray xa; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; void list_lru_destroy(struct list_lru *lru); diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 2986a43acaef..bb10a4d9d17e 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -65,6 +65,8 @@ struct mem_cgroup_reclaim_cookie { struct mem_cgroup_id { int id; refcount_t ref; + + CK_KABI_RESERVE(1) }; /* @@ -139,6 +141,11 @@ struct mem_cgroup_per_node { bool on_tree; struct mem_cgroup *memcg; /* Back pointer, we cannot */ /* use container_of */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct mem_cgroup_threshold { @@ -333,6 +340,15 @@ struct mem_cgroup { struct lru_gen_mm_list mm_list; #endif + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) + struct mem_cgroup_per_node *nodeinfo[]; }; diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index d232de7cdc56..71b4a50fb589 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -52,6 +52,10 @@ struct mempolicy { nodemask_t cpuset_mems_allowed; /* relative to these nodes */ nodemask_t user_nodemask; /* nodemask passed by user */ } w; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) }; /* diff --git a/include/linux/mempool.h b/include/linux/mempool.h index 4aae6c06c5f2..e090794f89e1 100644 --- a/include/linux/mempool.h +++ b/include/linux/mempool.h @@ -5,6 +5,7 @@ #ifndef _LINUX_MEMPOOL_H #define _LINUX_MEMPOOL_H +#include #include #include @@ -23,6 +24,10 @@ typedef struct mempool_s { mempool_alloc_t *alloc; mempool_free_t *free; wait_queue_head_t wait; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) } mempool_t; static inline bool mempool_initialized(mempool_t *pool) diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 1314d9c5f05b..fae593fa9012 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -25,6 +25,9 @@ struct vmem_altmap { unsigned long free; unsigned long align; unsigned long alloc; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /* @@ -99,6 +102,9 @@ struct dev_pagemap_ops { */ int (*memory_failure)(struct dev_pagemap *pgmap, unsigned long pfn, unsigned long nr_pages, int mf_flags); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; #define PGMAP_ALTMAP_VALID (1 << 0) @@ -133,6 +139,13 @@ struct dev_pagemap { const struct dev_pagemap_ops *ops; void *owner; int nr_range; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + union { struct range range; DECLARE_FLEX_ARRAY(struct range, ranges); diff --git a/include/linux/mm.h b/include/linux/mm.h index 753ad4350795..5b5d6e89afa8 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -553,6 +553,10 @@ struct vm_fault { * page table to avoid allocation from * atomic context. */ + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; /* @@ -632,6 +636,11 @@ struct vm_operations_struct { */ struct page *(*find_special_page)(struct vm_area_struct *vma, unsigned long addr); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #ifdef CONFIG_NUMA_BALANCING diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 20c96ce98751..d11645aca1ba 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -690,6 +690,11 @@ struct vm_area_struct { struct vma_numab_state *numab_state; /* NUMA Balancing state */ #endif struct vm_userfaultfd_ctx vm_userfaultfd_ctx; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } __randomize_layout; #ifdef CONFIG_SCHED_MM_CID @@ -949,6 +954,15 @@ struct mm_struct { #endif /* CONFIG_LRU_GEN */ } __randomize_layout; + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) + /* * The mm_cpumask needs to be at the end of mm_struct, because it * is dynamically sized based on nr_cpu_ids. diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 05092c37a430..d797575c68c2 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -643,6 +643,7 @@ struct lruvec { #ifdef CONFIG_MEMCG struct pglist_data *pgdat; #endif + CK_KABI_RESERVE(1) }; /* Isolate unmapped pages */ @@ -713,6 +714,8 @@ struct per_cpu_zonestat { struct per_cpu_nodestat { s8 stat_threshold; s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS]; + + CK_KABI_RESERVE(1) }; #endif /* !__GENERATING_BOUNDS.H */ @@ -985,6 +988,11 @@ struct zone { /* Zone statistics */ atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS]; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } ____cacheline_internodealigned_in_smp; enum pgdat_flags { @@ -1403,6 +1411,11 @@ typedef struct pglist_data { #ifdef CONFIG_MEMORY_FAILURE struct memory_failure_stats mf_stats; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } pg_data_t; #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) diff --git a/include/linux/module.h b/include/linux/module.h index a98e188cf37b..362258391d38 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -583,6 +583,11 @@ struct module { #ifdef CONFIG_DYNAMIC_DEBUG_CORE struct _ddebug_info dyndbg_info; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } ____cacheline_aligned __randomize_layout; #ifndef MODULE_ARCH_INIT #define MODULE_ARCH_INIT {} diff --git a/include/linux/mount.h b/include/linux/mount.h index 4f40b40306d0..802437cd61cb 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -12,6 +12,7 @@ #include #include +#include struct super_block; struct dentry; @@ -72,6 +73,8 @@ struct vfsmount { struct super_block *mnt_sb; /* pointer to superblock */ int mnt_flags; struct mnt_idmap *mnt_idmap; + + CK_KABI_RESERVE(1) } __randomize_layout; static inline struct mnt_idmap *mnt_idmap(const struct vfsmount *mnt) diff --git a/include/linux/msi.h b/include/linux/msi.h index ddace8c34dcf..e82058180004 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -203,6 +203,9 @@ struct msi_desc { struct pci_msi_desc pci; struct msi_desc_data data; }; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /* diff --git a/include/linux/net.h b/include/linux/net.h index c9b4a63791a4..6b789115b39e 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -223,6 +223,15 @@ struct proto_ops { int (*sendmsg_locked)(struct sock *sk, struct msghdr *msg, size_t size); int (*set_rcvlowat)(struct sock *sk, int val); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; #define DECLARE_SOCKADDR(type, dst, src) \ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 1576e7443eee..210abf138efa 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -257,6 +257,8 @@ struct netdev_hw_addr_list { /* Auxiliary tree for faster lookup on addition and deletion */ struct rb_root tree; + + CK_KABI_RESERVE(1) }; #define netdev_hw_addr_list_count(l) ((l)->count) @@ -319,6 +321,9 @@ struct header_ops { const unsigned char *haddr); bool (*validate)(const char *ll_header, unsigned int len); __be16 (*parse_protocol)(const struct sk_buff *skb); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /* These flag bits are private to the generic network queueing @@ -380,6 +385,11 @@ struct napi_struct { /* control-path-only fields follow */ struct list_head dev_list; struct hlist_node napi_hash_node; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; enum { @@ -658,6 +668,14 @@ struct netdev_queue { #ifdef CONFIG_BQL struct dql dql; #endif + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) } ____cacheline_aligned_in_smp; extern int sysctl_fb_tunnels_only_for_init_net; @@ -823,6 +841,9 @@ struct xps_dev_maps { struct rcu_head rcu; unsigned int nr_ids; s16 num_tc; + + CK_KABI_RESERVE(1) + struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */ }; @@ -1038,6 +1059,11 @@ struct xfrmdev_ops { int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack); void (*xdo_dev_policy_delete) (struct xfrm_policy *x); void (*xdo_dev_policy_free) (struct xfrm_policy *x); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #endif @@ -1645,6 +1671,23 @@ struct net_device_ops { int (*ndo_hwtstamp_set)(struct net_device *dev, struct kernel_hwtstamp_config *kernel_config, struct netlink_ext_ack *extack); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) + CK_KABI_RESERVE(9) + CK_KABI_RESERVE(10) + CK_KABI_RESERVE(11) + CK_KABI_RESERVE(12) + CK_KABI_RESERVE(13) + CK_KABI_RESERVE(14) + CK_KABI_RESERVE(15) + CK_KABI_RESERVE(16) }; /** @@ -2417,6 +2460,22 @@ struct net_device { struct rtnl_hw_stats64 *offload_xstats_l3; struct devlink_port *devlink_port; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) + CK_KABI_RESERVE(9) + CK_KABI_RESERVE(10) + CK_KABI_RESERVE(11) + CK_KABI_RESERVE(13) + CK_KABI_RESERVE(14) + CK_KABI_RESERVE(15) + CK_KABI_RESERVE(16) }; #define to_net_dev(d) container_of(d, struct net_device, dev) @@ -2719,6 +2778,11 @@ struct offload_callbacks { struct sk_buff *(*gro_receive)(struct list_head *head, struct sk_buff *skb); int (*gro_complete)(struct sk_buff *skb, int nhoff); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct packet_offload { diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index cc5a2a220af8..840254d1dbb3 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -179,6 +179,8 @@ struct nf_sockopt_ops { int (*get)(struct sock *sk, int optval, void __user *user, int *len); /* Use the module struct to lock set/get code in place */ struct module *owner; + + CK_KABI_RESERVE(1) }; /* Function to register/unregister hook points. */ diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h index 2770db2fa080..0b65d9b6597d 100644 --- a/include/linux/netfilter/nf_conntrack_common.h +++ b/include/linux/netfilter/nf_conntrack_common.h @@ -4,6 +4,7 @@ #include #include +#include struct ip_conntrack_stat { unsigned int found; @@ -19,6 +20,8 @@ struct ip_conntrack_stat { unsigned int expect_delete; unsigned int search_restart; unsigned int chaintoolong; + + CK_KABI_RESERVE(1) }; #define NFCT_INFOMASK 7UL diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 5897f3dbaf7c..743ece7a7bbb 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -217,6 +217,9 @@ struct xt_target { unsigned short proto; unsigned short family; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /* Furniture shopping... */ diff --git a/include/linux/ns_common.h b/include/linux/ns_common.h index 0f1d024bd958..1a15faeaf768 100644 --- a/include/linux/ns_common.h +++ b/include/linux/ns_common.h @@ -3,6 +3,7 @@ #define _LINUX_NS_COMMON_H #include +#include struct proc_ns_operations; @@ -11,6 +12,8 @@ struct ns_common { const struct proc_ns_operations *ops; unsigned int inum; refcount_t count; + + CK_KABI_RESERVE(1) }; #endif diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h index 771cb0285872..c9162ab4fd25 100644 --- a/include/linux/nsproxy.h +++ b/include/linux/nsproxy.h @@ -38,6 +38,15 @@ struct nsproxy { struct time_namespace *time_ns; struct time_namespace *time_ns_for_children; struct cgroup_namespace *cgroup_ns; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; extern struct nsproxy init_nsproxy; diff --git a/include/linux/of.h b/include/linux/of.h index 024dda54b9c7..8917b1e23e52 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -65,6 +65,13 @@ struct device_node { unsigned int unique_id; struct of_irq_controller *irq_trans; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) }; #define MAX_PHANDLE_ARGS 16 diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 15793a4af9d4..03046c9394d9 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -1283,6 +1283,11 @@ struct readahead_control { unsigned int _batch_count; bool _workingset; unsigned long _pflags; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #define DEFINE_READAHEAD(ractl, f, r, m, i) \ diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index 5cb694031072..f4ce2e9b7e94 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h @@ -89,6 +89,8 @@ struct pci_epc_ops { const struct pci_epc_features* (*get_features)(struct pci_epc *epc, u8 func_no, u8 vfunc_no); struct module *owner; + + CK_KABI_RESERVE(1) }; /** @@ -150,6 +152,8 @@ struct pci_epc { /* mutex to protect against concurrent access of EP controller */ struct mutex lock; unsigned long function_num_map; + + CK_KABI_RESERVE(1) }; /** diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h index 3f44b6aec477..5e43a8af6e53 100644 --- a/include/linux/pci-epf.h +++ b/include/linux/pci-epf.h @@ -51,6 +51,8 @@ struct pci_epf_header { u16 subsys_vendor_id; u16 subsys_id; enum pci_interrupt_pin interrupt_pin; + + CK_KABI_RESERVE(1) }; /** @@ -121,6 +123,8 @@ struct pci_epf_bar { size_t size; enum pci_barno barno; int flags; + + CK_KABI_RESERVE(1) }; /** @@ -180,6 +184,8 @@ struct pci_epf { unsigned long vfunction_num_map; struct list_head pci_vepf; const struct pci_epc_event_ops *event_ops; + + CK_KABI_RESERVE(1) }; /** diff --git a/include/linux/pci.h b/include/linux/pci.h index 7b2404e7f540..379ac091df36 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -540,6 +540,23 @@ struct pci_dev { /* These methods index pci_reset_fn_methods[] */ u8 reset_methods[PCI_NUM_RESET_METHODS]; /* In priority order */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) + CK_KABI_RESERVE(9) + CK_KABI_RESERVE(10) + CK_KABI_RESERVE(11) + CK_KABI_RESERVE(12) + CK_KABI_RESERVE(13) + CK_KABI_RESERVE(14) + CK_KABI_RESERVE(15) + CK_KABI_RESERVE(16) }; static inline struct pci_dev *pci_physfn(struct pci_dev *dev) @@ -689,6 +706,15 @@ struct pci_bus { struct bin_attribute *legacy_mem; /* Legacy mem */ unsigned int is_added:1; unsigned int unsafe_warn:1; /* warned about RW1C config write */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; #define to_pci_bus(n) container_of(n, struct pci_bus, dev) @@ -944,6 +970,15 @@ struct pci_driver { struct device_driver driver; struct pci_dynids dynids; bool driver_managed_dma; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; static inline struct pci_driver *to_pci_driver(struct device_driver *drv) diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index 3a10d6ec3ee7..256b124d3d74 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h @@ -45,6 +45,15 @@ struct hotplug_slot_ops { int (*get_latch_status) (struct hotplug_slot *slot, u8 *value); int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value); int (*reset_slot) (struct hotplug_slot *slot, bool probe); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; /** @@ -63,6 +72,15 @@ struct hotplug_slot { struct pci_slot *pci_slot; struct module *owner; const char *mod_name; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; static inline const char *hotplug_slot_name(const struct hotplug_slot *slot) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 43538ac32380..a6a0b4408016 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -540,6 +540,11 @@ struct pmu { * Check period value for PERF_EVENT_IOC_PERIOD ioctl. */ int (*check_period) (struct perf_event *event, u64 value); /* optional */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; enum perf_addr_filter_action_t { @@ -841,6 +846,19 @@ struct perf_event { */ __u32 orig_type; #endif /* CONFIG_PERF_EVENTS */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) + CK_KABI_RESERVE(9) + CK_KABI_RESERVE(10) + CK_KABI_RESERVE(11) + CK_KABI_RESERVE(12) }; /* @@ -966,6 +984,8 @@ struct perf_event_context { * that until the signal is delivered. */ local_t nr_pending; + + CK_KABI_RESERVE(1) }; /* @@ -1009,6 +1029,8 @@ struct perf_cpu_context { int heap_size; struct perf_event **heap; struct perf_event *heap_default[2]; + + CK_KABI_RESERVE(1) }; struct perf_output_handle { diff --git a/include/linux/pm.h b/include/linux/pm.h index 629c1633bbd0..8d54915df1af 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -640,6 +640,13 @@ struct pm_subsys_data { #ifdef CONFIG_PM_GENERIC_DOMAINS struct pm_domain_data *domain_data; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) }; /* @@ -720,6 +727,9 @@ struct dev_pm_info { struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ void (*set_latency_tolerance)(struct device *, s32); struct dev_pm_qos *qos; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; extern int dev_pm_get_subsys_data(struct device *dev); @@ -746,6 +756,9 @@ struct dev_pm_domain { int (*activate)(struct device *dev); void (*sync)(struct device *dev); void (*dismiss)(struct device *dev); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /* diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h index 4a69d4af3ff8..444b651f9f84 100644 --- a/include/linux/pm_qos.h +++ b/include/linux/pm_qos.h @@ -112,6 +112,9 @@ struct dev_pm_qos_request { struct freq_qos_request freq; } data; struct device *dev; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; struct dev_pm_qos { @@ -122,6 +125,9 @@ struct dev_pm_qos { struct dev_pm_qos_request *resume_latency_req; struct dev_pm_qos_request *latency_tolerance_req; struct dev_pm_qos_request *flags_req; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /* Action requested to pm_qos_update_target */ diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index 6eb9adaef52b..0427a8c37db7 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h @@ -14,6 +14,7 @@ #endif #include +#include struct wake_irq; @@ -61,6 +62,9 @@ struct wakeup_source { struct device *dev; bool active:1; bool autosleep_enabled:1; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; #define for_each_wakeup_source(ws) \ diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h index 468328b1e1dd..4df1db8896e3 100644 --- a/include/linux/posix-clock.h +++ b/include/linux/posix-clock.h @@ -62,6 +62,11 @@ struct posix_clock_operations { ssize_t (*read) (struct posix_clock *pc, uint flags, char __user *buf, size_t cnt); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; /** @@ -88,6 +93,15 @@ struct posix_clock { struct device *dev; struct rw_semaphore rwsem; bool zombie; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; /** diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index d607f51404fc..9a9db1da4723 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -2,6 +2,7 @@ #ifndef _linux_POSIX_TIMERS_H #define _linux_POSIX_TIMERS_H +#include #include #include #include @@ -145,6 +146,11 @@ struct posix_cputimers_work { struct callback_head work; struct mutex mutex; unsigned int scheduled; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; static inline void posix_cputimers_init(struct posix_cputimers *pct) diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h index f1fd3a8044e0..4730a6bf41a1 100644 --- a/include/linux/psi_types.h +++ b/include/linux/psi_types.h @@ -205,6 +205,9 @@ struct psi_group { u64 rtpoll_total[NR_PSI_STATES - 1]; u64 rtpoll_next_update; u64 rtpoll_until; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; #else /* CONFIG_PSI */ diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index 1ef4e0f9bd2a..bae7ae9f23d9 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -193,6 +193,12 @@ struct ptp_clock_info { int (*verify)(struct ptp_clock_info *ptp, unsigned int pin, enum ptp_pin_function func, unsigned int chan); long (*do_aux_work)(struct ptp_clock_info *ptp); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) }; struct ptp_clock; diff --git a/include/linux/quota.h b/include/linux/quota.h index 07071e64abf3..574cc9ad5192 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -47,6 +47,7 @@ #include #include #include +#include #undef USRQUOTA #undef GRPQUOTA @@ -318,6 +319,9 @@ struct quota_format_ops { int (*commit_dqblk)(struct dquot *dquot); /* Write structure for one user */ int (*release_dqblk)(struct dquot *dquot); /* Called when last reference to dquot is being dropped */ int (*get_next_id)(struct super_block *sb, struct kqid *qid); /* Get next ID with existing structure in the quota file */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /* Operations working with dquots */ @@ -337,6 +341,9 @@ struct dquot_operations { int (*get_inode_usage) (struct inode *, qsize_t *); /* Get next ID with active quota structure */ int (*get_next_id) (struct super_block *sb, struct kqid *qid); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; struct path; @@ -440,6 +447,11 @@ struct quotactl_ops { int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *); int (*get_state)(struct super_block *, struct qc_state *); int (*rm_xquota)(struct super_block *, unsigned int); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct quota_format_type { diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h index 659d13a7ddaa..d1933edbce59 100644 --- a/include/linux/rcu_segcblist.h +++ b/include/linux/rcu_segcblist.h @@ -16,6 +16,7 @@ #include #include +#include /* Simple unsegmented callback lists. */ struct rcu_cblist { @@ -214,6 +215,11 @@ struct rcu_segcblist { #endif long seglen[RCU_CBLIST_NSEGS]; u8 flags; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #define RCU_SEGCBLIST_INITIALIZER(n) \ diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 3e093c29021a..0d157c0ee981 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -5,6 +5,7 @@ * Declarations for Reverse Mapping functions in mm/rmap.c */ +#include #include #include #include @@ -64,6 +65,8 @@ struct anon_vma { /* Interval tree of private "related" vmas */ struct rb_root_cached rb_root; + + CK_KABI_RESERVE(1) }; /* diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h index 189140bf11fc..bcd203c5d4ea 100644 --- a/include/linux/sbitmap.h +++ b/include/linux/sbitmap.h @@ -82,6 +82,8 @@ struct sbitmap { * cachelines until the map is exhausted. */ unsigned int __percpu *alloc_hint; + + CK_KABI_RESERVE(1) }; #define SBQ_WAIT_QUEUES 8 diff --git a/include/linux/sched.h b/include/linux/sched.h index 7839b5feba6b..5a0298975a06 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -11,6 +11,7 @@ #include +#include #include #include #include @@ -396,6 +397,11 @@ struct sched_info { unsigned long long last_queued; #endif /* CONFIG_SCHED_INFO */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; /* @@ -415,6 +421,11 @@ struct sched_info { struct load_weight { unsigned long weight; u32 inv_weight; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; /** @@ -508,6 +519,11 @@ struct sched_avg { unsigned long runnable_avg; unsigned long util_avg; struct util_est util_est; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } ____cacheline_aligned; struct sched_statistics { @@ -558,6 +574,11 @@ struct sched_statistics { #endif #endif /* CONFIG_SCHEDSTATS */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } ____cacheline_aligned; struct sched_entity { @@ -616,6 +637,15 @@ struct sched_entity { #if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) unsigned int ht_ratio; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; struct sched_rt_entity { @@ -634,6 +664,11 @@ struct sched_rt_entity { /* rq "owned" by this entity/group: */ struct rt_rq *my_q; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } __randomize_layout; struct sched_dl_entity { @@ -707,6 +742,11 @@ struct sched_dl_entity { */ struct sched_dl_entity *pi_se; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #ifdef CONFIG_UCLAMP_TASK @@ -1577,6 +1617,15 @@ struct task_struct { }; unsigned long wait_moment; + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) + /* * New fields for task_struct should be added above here, so that * they are included in the randomized portion of task_struct. diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 0014d3adaf84..603ad8b4ada6 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -245,6 +245,11 @@ struct signal_struct { * and may have inconsistent * permissions. */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } __randomize_layout; /* diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 4c14fe127223..e978832c93b2 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -82,6 +82,11 @@ struct sched_domain_shared { atomic_t nr_busy_cpus; int has_idle_cores; int nr_idle_scan; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct sched_domain { @@ -152,6 +157,12 @@ struct sched_domain { struct sched_domain_shared *shared; unsigned int span_weight; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + /* * Span of all CPUs in this domain. * diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h index 4cc52698e214..3fd7251ba2a6 100644 --- a/include/linux/sched/user.h +++ b/include/linux/sched/user.h @@ -34,6 +34,9 @@ struct user_struct { /* Miscellaneous per-user rate limit */ struct ratelimit_state ratelimit; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; extern int uids_sysfs_init(void); diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index 224293b2dd06..ac9565dd6f46 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h @@ -4,6 +4,7 @@ #include #include +#include /* * This struct is used to pass information from page reclaim to the shrinkers. @@ -83,6 +84,12 @@ struct shrinker { #endif /* objs pending delete, per node */ atomic_long_t *nr_deferred; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) }; #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 5f11f9873341..5703f351e0a7 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1043,6 +1043,18 @@ struct sk_buff { ); /* end headers group */ + /* + * kABI: The kmem_caches of struct sk_buff are initialized with + * SLAB_HWCACHE_ALIGN flag, such as skbuff_head_cache and + * skbuff_fclone_cache, which causes each skb to be forcibly + * aligned with cacheline size(64 bytes). + * Reserve 24 bytes, total 256 bytes, this will not break + * cacheline alignment. + */ + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + /* These elements must be at the end, see alloc_skb() for details. */ sk_buff_data_t tail; sk_buff_data_t end; diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 6ccfd9236387..4d793604e574 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -108,6 +108,11 @@ struct sk_psock { struct delayed_work work; struct sock *sk_pair; struct rcu_work rwork; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len, diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index 8f3f72480e78..2da11d07b547 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -41,6 +41,15 @@ struct srcu_data { /* ->srcu_data_have_cbs[]. */ int cpu; struct srcu_struct *ssp; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; /* diff --git a/include/linux/stat.h b/include/linux/stat.h index 52150570d37a..d487187976ff 100644 --- a/include/linux/stat.h +++ b/include/linux/stat.h @@ -2,7 +2,7 @@ #ifndef _LINUX_STAT_H #define _LINUX_STAT_H - +#include #include #include @@ -53,6 +53,9 @@ struct kstat { u32 dio_mem_align; u32 dio_offset_align; u64 change_cookie; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /* These definitions are internal to the kernel for now. Mainly used by nfsd. */ diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index ea7a74ea7389..59e41990f8b5 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h @@ -27,6 +27,8 @@ struct cpu_stop_work { unsigned long caller; void *arg; struct cpu_stop_done *done; + + CK_KABI_RESERVE(1) }; int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg); diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 3d8b215f32d5..54673955d109 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -44,6 +44,9 @@ struct svc_pool { struct percpu_counter sp_threads_woken; unsigned long sp_flags; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) } ____cacheline_aligned_in_smp; /* bits for sp_flags */ @@ -96,6 +99,9 @@ struct svc_serv { * entries in the svc_cb_list */ bool sv_bc_enabled; /* service uses backchannel */ #endif /* CONFIG_SUNRPC_BACKCHANNEL */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /** diff --git a/include/linux/swap.h b/include/linux/swap.h index 0201dd8c49e7..d7f44a91dec1 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -321,6 +321,14 @@ struct swap_info_struct { */ struct work_struct discard_work; /* discard worker */ struct swap_cluster_list discard_clusters; /* discard clusters list */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + struct plist_node avail_lists[]; /* * entries in swap_avail_heads, one * entry per node. diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index fd3fe5c8c17f..9f10e60e32db 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -183,6 +183,8 @@ struct bin_attribute { char *, loff_t, size_t); int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr, struct vm_area_struct *vma); + + CK_KABI_RESERVE(1) }; /** diff --git a/include/linux/task_io_accounting.h b/include/linux/task_io_accounting.h index 6f6acce064de..6b9e504b7a61 100644 --- a/include/linux/task_io_accounting.h +++ b/include/linux/task_io_accounting.h @@ -43,4 +43,13 @@ struct task_io_accounting { */ u64 cancelled_write_bytes; #endif /* CONFIG_TASK_IO_ACCOUNTING */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 9b371aa7c796..daafa1545818 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -165,6 +165,9 @@ struct tcp_request_sock { * after data-in-SYN. */ u8 syn_tos; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req) @@ -460,6 +463,11 @@ struct tcp_sock { */ struct request_sock __rcu *fastopen_rsk; struct saved_syn *saved_syn; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; enum tsq_enum { @@ -505,6 +513,9 @@ struct tcp_timewait_sock { #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *tw_md5_key; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk) diff --git a/include/linux/timer.h b/include/linux/timer.h index 9162f275819a..38fbcdc78310 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -7,6 +7,7 @@ #include #include #include +#include struct timer_list { /* @@ -21,6 +22,11 @@ struct timer_list { #ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #ifdef CONFIG_LOCKDEP diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index aa1bc4172662..3a14307c19cb 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -126,6 +126,8 @@ struct trace_iterator { long idx; /* All new field here will be zeroed out in pipe_read */ + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; enum trace_iter_flags { diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h index 4dc4955f0fbf..00eb4f999952 100644 --- a/include/linux/tracepoint-defs.h +++ b/include/linux/tracepoint-defs.h @@ -10,6 +10,7 @@ #include #include +#include struct static_call_key; @@ -39,6 +40,8 @@ struct tracepoint { int (*regfunc)(void); void (*unregfunc)(void); struct tracepoint_func __rcu *funcs; + + CK_KABI_RESERVE(1) }; #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS diff --git a/include/linux/tty_port.h b/include/linux/tty_port.h index 6b367eb17979..7187b88d706c 100644 --- a/include/linux/tty_port.h +++ b/include/linux/tty_port.h @@ -44,6 +44,8 @@ struct tty_port_client_operations { void (*lookahead_buf)(struct tty_port *port, const u8 *cp, const u8 *fp, size_t count); void (*write_wakeup)(struct tty_port *port); + + CK_KABI_RESERVE(1) }; extern const struct tty_port_client_operations tty_port_default_client_ops; @@ -121,6 +123,10 @@ struct tty_port { int drain_delay; struct kref kref; void *client_data; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) }; /* tty_port::iflags bits -- use atomic bit ops */ diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index 733f2a97589b..c814a21eff99 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -102,6 +102,11 @@ struct user_namespace { struct ucounts *ucounts; long ucount_max[UCOUNT_COUNTS]; long rlimit_max[UCOUNT_RLIMIT_COUNTS]; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } __randomize_layout; struct ucounts { diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h index 99660197a36c..7d07a1643076 100644 --- a/include/linux/watchdog.h +++ b/include/linux/watchdog.h @@ -53,6 +53,9 @@ struct watchdog_ops { unsigned int (*get_timeleft)(struct watchdog_device *); int (*restart)(struct watchdog_device *, unsigned long, void *); long (*ioctl)(struct watchdog_device *, unsigned int, unsigned long); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /** struct watchdog_device - The structure that defines a watchdog device @@ -119,6 +122,9 @@ struct watchdog_device { #define WDOG_STOP_ON_UNREGISTER 4 /* Should be stopped on unregister */ #define WDOG_NO_PING_ON_SUSPEND 5 /* Ping worker should be stopped on suspend */ struct list_head deferred; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; #define WATCHDOG_NOWAYOUT IS_BUILTIN(CONFIG_WATCHDOG_NOWAYOUT) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 52c6dd6d80ac..12d830469e60 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -14,6 +14,7 @@ #include #include #include +#include struct workqueue_struct; @@ -102,6 +103,11 @@ struct work_struct { #ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #define WORK_DATA_INIT() ATOMIC_LONG_INIT((unsigned long)WORK_STRUCT_NO_POOL) @@ -115,6 +121,11 @@ struct delayed_work { /* target workqueue and CPU ->timer uses to queue ->work */ struct workqueue_struct *wq; int cpu; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct rcu_work { diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 083387c00f0c..6cc3a8bcb533 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -89,6 +89,9 @@ struct writeback_control { size_t wb_lcand_bytes; /* bytes written by last candidate */ size_t wb_tcand_bytes; /* bytes written by this candidate */ #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; static inline blk_opf_t wbc_to_write_flags(struct writeback_control *wbc) diff --git a/include/linux/xattr.h b/include/linux/xattr.h index d20051865800..25d9bda1177b 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -45,6 +45,8 @@ struct xattr_handler { struct mnt_idmap *idmap, struct dentry *dentry, struct inode *inode, const char *name, const void *buffer, size_t size, int flags); + + CK_KABI_RESERVE(1) }; /** diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h index 42207fc44660..60da3f65e3e8 100644 --- a/include/net/dcbnl.h +++ b/include/net/dcbnl.h @@ -9,6 +9,7 @@ #define __NET_DCBNL_H__ #include +#include struct net_device; @@ -131,6 +132,15 @@ struct dcbnl_rtnl_ops { /* rewrite */ int (*dcbnl_setrewr)(struct net_device *dev, struct dcb_app *app); int (*dcbnl_delrewr)(struct net_device *dev, struct dcb_app *app); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; #endif /* __NET_DCBNL_H__ */ diff --git a/include/net/devlink.h b/include/net/devlink.h index 29fd1b4ee654..d98e0604328f 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1506,6 +1506,15 @@ struct devlink_ops { enum devlink_selftest_status (*selftest_run)(struct devlink *devlink, unsigned int id, struct netlink_ext_ack *extack); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; void *devlink_priv(struct devlink *devlink); diff --git a/include/net/dst.h b/include/net/dst.h index 78884429deed..c2f211a628b6 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -92,6 +92,15 @@ struct dst_entry { #ifdef CONFIG_64BIT struct lwtunnel_state *lwtstate; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; struct dst_metrics { diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h index 3a9001a042a5..b4d97a2e5281 100644 --- a/include/net/dst_ops.h +++ b/include/net/dst_ops.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NET_DST_OPS_H #define _NET_DST_OPS_H +#include #include #include #include @@ -41,6 +42,15 @@ struct dst_ops { struct kmem_cache *kmem_cachep; struct percpu_counter pcpuc_entries ____cacheline_aligned_in_smp; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; static inline int dst_entries_get_fast(struct dst_ops *dst) diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index 82da359bca03..7e2adbcaa576 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h @@ -44,6 +44,15 @@ struct fib_rule { struct fib_rule_port_range sport_range; struct fib_rule_port_range dport_range; struct rcu_head rcu; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; struct fib_lookup_arg { diff --git a/include/net/flow.h b/include/net/flow.h index 335bbc52171c..f8dfd041541d 100644 --- a/include/net/flow.h +++ b/include/net/flow.h @@ -12,6 +12,7 @@ #include #include #include +#include struct flow_keys; @@ -42,6 +43,9 @@ struct flowi_common { kuid_t flowic_uid; __u32 flowic_multipath_hash; struct flowi_tunnel flowic_tun_key; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; union flowi_uli { @@ -88,6 +92,9 @@ struct flowi4 { #define fl4_icmp_code uli.icmpt.code #define fl4_mh_type uli.mht.type #define fl4_gre_key uli.gre_key + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) } __attribute__((__aligned__(BITS_PER_LONG/8))); static inline void flowi4_init_output(struct flowi4 *fl4, int oif, @@ -149,6 +156,9 @@ struct flowi6 { #define fl6_mh_type uli.mht.type #define fl6_gre_key uli.gre_key __u32 mp_hash; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) } __attribute__((__aligned__(BITS_PER_LONG/8))); struct flowi { @@ -168,6 +178,8 @@ struct flowi { #define flowi_secid u.__fl_common.flowic_secid #define flowi_tun_key u.__fl_common.flowic_tun_key #define flowi_uid u.__fl_common.flowic_uid + + CK_KABI_RESERVE(1) } __attribute__((__aligned__(BITS_PER_LONG/8))); static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4) diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index 1a7131d6cb0e..fcf794974311 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -6,6 +6,7 @@ #include #include #include +#include #include struct bpf_prog; @@ -382,6 +383,8 @@ struct flow_dissector { unsigned long long used_keys; /* each bit represents presence of one key id */ unsigned short int offset[FLOW_DISSECTOR_KEY_MAX]; + + CK_KABI_RESERVE(1) }; struct flow_keys_basic { diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 9efa9a59e81f..8e2ae4b44668 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -329,6 +329,11 @@ struct flow_action_entry { } pppoe; }; struct flow_action_cookie *user_cookie; /* user defined action cookie */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; struct flow_action { diff --git a/include/net/genetlink.h b/include/net/genetlink.h index e8c34aa4a640..fc52a9f716ff 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -18,6 +18,8 @@ struct genl_multicast_group { char name[GENL_NAMSIZ]; u8 flags; u8 cap_sys_admin:1; + + CK_KABI_RESERVE(1) }; struct genl_split_ops; @@ -89,6 +91,8 @@ struct genl_family { int id; /* starting number of multicast group IDs in this family */ unsigned int mcgrp_offset; + + CK_KABI_RESERVE(1) }; /** @@ -113,6 +117,8 @@ struct genl_info { possible_net_t _net; void * user_ptr[2]; struct netlink_ext_ack *extack; + + CK_KABI_RESERVE(1) }; static inline struct net *genl_info_net(const struct genl_info *info) @@ -195,6 +201,11 @@ struct genl_ops { u8 internal_flags; u8 flags; u8 validate; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; /** diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index 31bf475eca76..9382c06e0bd1 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -219,6 +219,23 @@ struct inet6_dev { struct rcu_head rcu; unsigned int ra_mtu; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) + CK_KABI_RESERVE(9) + CK_KABI_RESERVE(10) + CK_KABI_RESERVE(11) + CK_KABI_RESERVE(12) + CK_KABI_RESERVE(13) + CK_KABI_RESERVE(14) + CK_KABI_RESERVE(15) + CK_KABI_RESERVE(16) }; static inline void ipv6_eth_mc_map(const struct in6_addr *addr, char *buf) diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 1121d614942c..e2e9c7109efb 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -84,6 +84,8 @@ struct fib6_node { int fn_sernum; struct fib6_info __rcu *rr_ptr; struct rcu_head rcu; + + CK_KABI_RESERVE(1) }; struct fib6_gc_args { @@ -203,6 +205,9 @@ struct fib6_info { struct rcu_head rcu; struct nexthop *nh; + + CK_KABI_RESERVE(1) + struct fib6_nh fib6_nh[]; }; @@ -219,6 +224,8 @@ struct rt6_info { /* more non-fragment space at head required */ unsigned short rt6i_nfheader_len; + + CK_KABI_RESERVE(1) }; struct fib6_result { diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h index 031c661aa14d..723a63f7cce6 100644 --- a/include/net/l3mdev.h +++ b/include/net/l3mdev.h @@ -43,6 +43,11 @@ struct l3mdev_ops { /* IPv6 ops */ struct dst_entry * (*l3mdev_link_scope_lookup)(const struct net_device *dev, struct flowi6 *fl6); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #ifdef CONFIG_NET_L3_MASTER_DEV diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h index 53bd2d02a4f0..c9d552c18f86 100644 --- a/include/net/lwtunnel.h +++ b/include/net/lwtunnel.h @@ -33,6 +33,12 @@ struct lwtunnel_state { int (*orig_output)(struct net *net, struct sock *sk, struct sk_buff *skb); int (*orig_input)(struct sk_buff *); struct rcu_head rcu; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + __u8 data[]; }; diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 0d28172193fa..eccce6a78198 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -86,6 +86,8 @@ struct neigh_parms { u32 qlen; int data[NEIGH_VAR_DATA_MAX]; DECLARE_BITMAP(data_state, NEIGH_VAR_DATA_MAX); + + CK_KABI_RESERVE(1) }; static inline void neigh_var_set(struct neigh_parms *p, int index, int val) @@ -162,6 +164,10 @@ struct neighbour { struct rcu_head rcu; struct net_device *dev; netdevice_tracker dev_tracker; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + u8 primary_key[]; } __randomize_layout; @@ -234,6 +240,15 @@ struct neigh_table { struct neigh_statistics __percpu *stats; struct neigh_hash_table __rcu *nht; struct pneigh_entry **phash_buckets; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; enum { diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 958c805df191..4b70533d57cd 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -191,6 +191,11 @@ struct net { #if IS_ENABLED(CONFIG_SMC) struct netns_smc smc; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } __randomize_layout; #include diff --git a/include/net/netdev_rx_queue.h b/include/net/netdev_rx_queue.h index cdcafb30d437..49a5dd290587 100644 --- a/include/net/netdev_rx_queue.h +++ b/include/net/netdev_rx_queue.h @@ -21,6 +21,15 @@ struct netdev_rx_queue { #ifdef CONFIG_XDP_SOCKETS struct xsk_buff_pool *pool; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) } ____cacheline_aligned_in_smp; /* diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 4085765c3370..e1ec22070b4d 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -123,6 +123,9 @@ struct nf_conn { /* Storage reserved for other modules, must be the last member */ union nf_conntrack_proto proto; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; static inline struct nf_conn * diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h index 0c1dac318e02..908f11a8cf83 100644 --- a/include/net/netfilter/nf_conntrack_ecache.h +++ b/include/net/netfilter/nf_conntrack_ecache.h @@ -63,6 +63,7 @@ struct nf_exp_event { struct nf_ct_event_notifier { int (*ct_event)(unsigned int events, const struct nf_ct_event *item); int (*exp_event)(unsigned int events, const struct nf_exp_event *item); + CK_KABI_RESERVE(1) }; void nf_conntrack_register_notifier(struct net *net, diff --git a/include/net/netlink.h b/include/net/netlink.h index 8a7cd1170e1f..117837ebcf6c 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -359,6 +359,11 @@ struct nla_policy { int (*validate)(const struct nlattr *attr, struct netlink_ext_ack *extack); }; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #define NLA_POLICY_ETH_ADDR NLA_POLICY_EXACT_LEN(ETH_ALEN) diff --git a/include/net/netns/can.h b/include/net/netns/can.h index 48b79f7e6236..ecee28c5e372 100644 --- a/include/net/netns/can.h +++ b/include/net/netns/can.h @@ -36,6 +36,8 @@ struct netns_can { /* CAN GW per-net gateway jobs */ struct hlist_head cgw_list; + + CK_KABI_RESERVE(1) }; #endif /* __NETNS_CAN_H__ */ diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 7a41c4791536..536145eb1228 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -239,5 +239,10 @@ struct netns_ipv4 { atomic_t rt_genid; siphash_key_t ip_id_key; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #endif diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index 5f2cfd84570a..75a669da99b8 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -56,6 +56,8 @@ struct netns_sysctl_ipv6 { u8 skip_notify_on_dev_down; u8 fib_notify_on_flag_change; u8 icmpv6_error_anycast_as_unicast; + + CK_KABI_RESERVE(1) }; struct netns_ipv6 { @@ -119,6 +121,8 @@ struct netns_ipv6 { u32 seq; } ip6addrlbl_table; struct ioam6_pernet_data *ioam6_data; + + CK_KABI_RESERVE(1) }; #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h index a6a0bf4a247e..5f283ec350af 100644 --- a/include/net/netns/netfilter.h +++ b/include/net/netns/netfilter.h @@ -33,5 +33,7 @@ struct netns_nf { #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) unsigned int defrag_ipv6_users; #endif + + CK_KABI_RESERVE(1) }; #endif diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h index cc8060c017d5..221ce4a63eeb 100644 --- a/include/net/netns/nftables.h +++ b/include/net/netns/nftables.h @@ -4,6 +4,8 @@ struct netns_nftables { u8 gencursor; + + CK_KABI_RESERVE(1) }; #endif diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h index 7eff3d981b89..6322fcb6ab9d 100644 --- a/include/net/netns/sctp.h +++ b/include/net/netns/sctp.h @@ -179,6 +179,9 @@ struct netns_sctp { #ifdef CONFIG_NET_L3_MASTER_DEV int l3mdev_accept; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; #endif /* __NETNS_SCTP_H__ */ diff --git a/include/net/netns/smc.h b/include/net/netns/smc.h index 582212ada3ba..d55342489ef5 100644 --- a/include/net/netns/smc.h +++ b/include/net/netns/smc.h @@ -22,5 +22,38 @@ struct netns_smc { int sysctl_smcr_testlink_time; int sysctl_wmem; int sysctl_rmem; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) + CK_KABI_RESERVE(9) + CK_KABI_RESERVE(10) + CK_KABI_RESERVE(11) + CK_KABI_RESERVE(12) + CK_KABI_RESERVE(13) + CK_KABI_RESERVE(14) + CK_KABI_RESERVE(15) + CK_KABI_RESERVE(16) + CK_KABI_RESERVE(17) + CK_KABI_RESERVE(18) + CK_KABI_RESERVE(19) + CK_KABI_RESERVE(20) + CK_KABI_RESERVE(21) + CK_KABI_RESERVE(22) + CK_KABI_RESERVE(23) + CK_KABI_RESERVE(24) + CK_KABI_RESERVE(25) + CK_KABI_RESERVE(26) + CK_KABI_RESERVE(27) + CK_KABI_RESERVE(28) + CK_KABI_RESERVE(29) + CK_KABI_RESERVE(30) + CK_KABI_RESERVE(31) + CK_KABI_RESERVE(32) }; #endif diff --git a/include/net/netns/unix.h b/include/net/netns/unix.h index 9859d134d5a8..7df05c34e7be 100644 --- a/include/net/netns/unix.h +++ b/include/net/netns/unix.h @@ -17,6 +17,9 @@ struct netns_unix { struct unix_table table; int sysctl_max_dgram_qlen; struct ctl_table_header *ctl; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; #endif /* __NETNS_UNIX_H__ */ diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h index 423b52eca908..e53dc9e98cec 100644 --- a/include/net/netns/xfrm.h +++ b/include/net/netns/xfrm.h @@ -83,6 +83,8 @@ struct netns_xfrm { spinlock_t xfrm_policy_lock; struct mutex xfrm_cfg_mutex; + + CK_KABI_RESERVE(1) }; #endif diff --git a/include/net/nexthop.h b/include/net/nexthop.h index 2b12725de9c0..73d1071a1b3a 100644 --- a/include/net/nexthop.h +++ b/include/net/nexthop.h @@ -114,6 +114,10 @@ struct nh_grp_entry { struct list_head nh_list; struct nexthop *nh_parent; /* nexthop of group with this entry */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) }; struct nh_group { @@ -126,6 +130,9 @@ struct nh_group { bool has_v4; struct nh_res_table __rcu *res_table; + + CK_KABI_RESERVE(1) + struct nh_grp_entry nh_entries[]; }; diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h index 887e7946a597..7835c8ecbda8 100644 --- a/include/net/page_pool/types.h +++ b/include/net/page_pool/types.h @@ -68,6 +68,9 @@ struct page_pool_params { /* private: used by test code only */ void (*init_callback)(struct page *page, void *arg); void *init_arg; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; #ifdef CONFIG_PAGE_POOL_STATS @@ -180,6 +183,8 @@ struct page_pool { refcount_t user_cnt; u64 destroy_cnt; + + CK_KABI_RESERVE(1) }; struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp); diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index c1fa6fee0acf..135176bc4eec 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -152,6 +152,15 @@ struct rtnl_link_ops { int (*fill_linkxstats)(struct sk_buff *skb, const struct net_device *dev, int *prividx, int attr); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; int __rtnl_link_register(struct rtnl_link_ops *ops); diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 326d3a322c10..0c1718df1ed7 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -127,6 +127,16 @@ struct Qdisc { struct rcu_head rcu; netdevice_tracker dev_tracker; struct lock_class_key root_lock_key; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) + /* private data */ long privdata[] ____cacheline_aligned; }; @@ -278,6 +288,8 @@ struct Qdisc_class_ops { struct sk_buff *skb, struct tcmsg*); int (*dump_stats)(struct Qdisc *, unsigned long, struct gnet_dump *); + + CK_KABI_RESERVE(1) }; /* Qdisc_class_ops flag values */ @@ -323,6 +335,8 @@ struct Qdisc_ops { u32 (*egress_block_get)(struct Qdisc *sch); struct module *owner; + + CK_KABI_RESERVE(1) }; @@ -398,6 +412,8 @@ struct tcf_proto_ops { struct module *owner; int flags; + + CK_KABI_RESERVE(1) }; /* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags diff --git a/include/net/snmp.h b/include/net/snmp.h index 468a67836e2f..58b51cf331ef 100644 --- a/include/net/snmp.h +++ b/include/net/snmp.h @@ -91,6 +91,8 @@ struct icmpv6msg_mib_device { #define TCP_MIB_MAX __TCP_MIB_MAX struct tcp_mib { unsigned long mibs[TCP_MIB_MAX]; + + CK_KABI_RESERVE(1) }; /* UDP */ diff --git a/include/net/sock.h b/include/net/sock.h index dc625f94ee37..e4fa259c920f 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -545,6 +545,15 @@ struct sock { struct rcu_head sk_rcu; netns_tracker ns_tracker; struct hlist_node sk_bind2_node; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; enum sk_pacing { @@ -1369,6 +1378,15 @@ struct proto { struct list_head node; int (*diag_destroy)(struct sock *sk, int err); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) } __randomize_layout; int proto_register(struct proto *prot, int alloc_slab); @@ -1532,6 +1550,8 @@ proto_memory_pressure(struct proto *prot) struct prot_inuse { int all; int val[PROTO_INUSE_NR]; + + CK_KABI_RESERVE(1) }; static inline void sock_prot_inuse_add(const struct net *net, diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h index 6ec140b0a61b..d79eab371b3b 100644 --- a/include/net/sock_reuseport.h +++ b/include/net/sock_reuseport.h @@ -26,6 +26,9 @@ struct sock_reuseport { unsigned int bind_inany:1; unsigned int has_conns:1; struct bpf_prog __rcu *prog; /* optional BPF sock selector */ + + CK_KABI_RESERVE(1) + struct sock *socks[]; /* array of sock pointers */ }; diff --git a/include/net/tcp.h b/include/net/tcp.h index b3917af309e0..e6399c51acf8 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1060,6 +1060,9 @@ struct rate_sample { bool is_app_limited; /* is sample from packet with bubble in pipe? */ bool is_retrans; /* is sample from retransmission? */ bool is_ack_delayed; /* is this (likely) a delayed ACK? */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; struct tcp_congestion_ops { @@ -1112,6 +1115,11 @@ struct tcp_congestion_ops { void (*init)(struct sock *sk); /* cleanup private data (optional) */ void (*release)(struct sock *sk); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } ____cacheline_aligned_in_smp; int tcp_register_congestion_control(struct tcp_congestion_ops *type); diff --git a/include/net/tls.h b/include/net/tls.h index 6c642ea18050..f0f8309c2db6 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -191,6 +191,11 @@ enum tls_context_flags { struct cipher_context { char *iv; char *rec_seq; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; union tls_crypto_context { @@ -261,6 +266,11 @@ struct tls_context { struct list_head list; refcount_t refcount; struct rcu_head rcu; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; enum tls_offload_ctx_dir { @@ -279,6 +289,13 @@ struct tlsdev_ops { int (*tls_dev_resync)(struct net_device *netdev, struct sock *sk, u32 seq, u8 *rcd_sn, enum tls_offload_ctx_dir direction); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) }; enum tls_offload_sync_type { diff --git a/include/net/xdp.h b/include/net/xdp.h index de08c8e0d134..97bfe6ae09ae 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -64,6 +64,11 @@ struct xdp_rxq_info { struct xdp_mem_info mem; unsigned int napi_id; u32 frag_size; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) } ____cacheline_aligned; /* perf critical, avoid false-sharing */ struct xdp_txq_info { @@ -175,6 +180,8 @@ struct xdp_frame { struct net_device *dev_rx; /* used by cpumap */ u32 frame_sz; u32 flags; /* supported values defined in xdp_buff_flags */ + + CK_KABI_RESERVE(1) }; static __always_inline bool xdp_frame_has_frags(struct xdp_frame *frame) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 93a9866ee481..87bf8fa5de59 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -124,6 +124,8 @@ struct xfrm_state_walk { u8 proto; u32 seq; struct xfrm_address_filter *filter; + + CK_KABI_RESERVE(1) }; enum { diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h index b0bdff26fc88..4d1797ec6526 100644 --- a/include/net/xsk_buff_pool.h +++ b/include/net/xsk_buff_pool.h @@ -87,6 +87,10 @@ struct xsk_buff_pool { * sockets share a single cq when the same netdev and queue id is shared. */ spinlock_t cq_lock; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + struct xdp_buff_xsk *free_heads[]; }; diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index 6ae00983a612..ccb8e7ae1699 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -141,6 +141,11 @@ struct scsi_cmnd { * to be at an address < 16Mb). */ int result; /* Status code from lower level driver */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; /* Variant of blk_mq_rq_from_pdu() that verifies the type of its argument. */ diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index c38f4fe5e64c..b3d2868c9a7e 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -280,6 +280,14 @@ struct scsi_device { struct mutex state_mutex; enum scsi_device_state sdev_state; struct task_struct *quiesced_by; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + unsigned long sdev_data[]; } __attribute__((aligned(sizeof(unsigned long)))); @@ -366,6 +374,12 @@ struct scsi_target { char scsi_level; enum scsi_target_state state; void *hostdata; /* available to low-level driver */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + unsigned long starget_data[]; /* for the transport */ /* starget_data must be the last element!!!! */ } __attribute__((aligned(sizeof(unsigned long)))); diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index f9d5ce6170a7..9232930d73c4 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -497,6 +497,11 @@ struct scsi_host_template { /* Delay for runtime autosuspend */ int rpm_autosuspend_delay; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; /* @@ -710,6 +715,13 @@ struct Scsi_Host { */ struct device *dma_dev; + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + /* * We should ensure that this is aligned, both for better performance * and also because some compilers (m68k) don't automatically force diff --git a/kernel/audit.h b/kernel/audit.h index a60d2840559e..e71720eb3ebd 100644 --- a/kernel/audit.h +++ b/kernel/audit.h @@ -208,6 +208,8 @@ struct audit_context { }; int fds[2]; struct audit_proctitle proctitle; + + CK_KABI_RESERVE(1) }; extern bool audit_ever_enabled; diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 5f2356b47b2d..ebe0176900b8 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -60,6 +60,8 @@ struct xdp_dev_bulk_queue { struct net_device *dev_rx; struct bpf_prog *xdp_prog; unsigned int count; + + CK_KABI_RESERVE(1) }; struct bpf_dtab_netdev { diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h index 1162e07cdaea..ce79c4dfe987 100644 --- a/kernel/locking/rtmutex_common.h +++ b/kernel/locking/rtmutex_common.h @@ -56,6 +56,8 @@ struct rt_mutex_waiter { struct rt_mutex_base *lock; unsigned int wake_state; struct ww_acquire_ctx *ww_ctx; + + CK_KABI_RESERVE(1) }; /** diff --git a/kernel/module/sysfs.c b/kernel/module/sysfs.c index c921bf044050..a5c59142f2e6 100644 --- a/kernel/module/sysfs.c +++ b/kernel/module/sysfs.c @@ -22,6 +22,8 @@ struct module_sect_attr { struct bin_attribute battr; unsigned long address; + + CK_KABI_RESERVE(1) }; struct module_sect_attrs { diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 6d87a617d00e..3322061596a2 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -7,6 +7,8 @@ * (balbir@in.ibm.com). */ +#include + /* Time spent by the tasks of the CPU accounting group executing in ... */ enum cpuacct_stat_index { CPUACCT_STAT_USER, /* ... user mode */ @@ -50,6 +52,11 @@ struct cpuacct { #ifdef CONFIG_SCHED_SLI unsigned long avenrun_r[3]; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp) diff --git a/kernel/sched/cpudeadline.h b/kernel/sched/cpudeadline.h index 0adeda93b5fb..aad1c1c3e19a 100644 --- a/kernel/sched/cpudeadline.h +++ b/kernel/sched/cpudeadline.h @@ -13,6 +13,11 @@ struct cpudl { int size; cpumask_var_t free_cpus; struct cpudl_item *elements; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #ifdef CONFIG_SMP diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f64862a1978d..06d5e8a39e75 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1481,6 +1481,12 @@ struct numa_group { struct rcu_head rcu; unsigned long total_faults; unsigned long max_faults_cpu; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + /* * faults[] array is split into two regions: faults_mem and faults_cpu. * diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c6d22cef8dd6..ab688805a674 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -89,6 +89,7 @@ #endif #include +#include #include "cpupri.h" #include "cpudeadline.h" @@ -299,6 +300,11 @@ struct rt_bandwidth { u64 rt_runtime; struct hrtimer rt_period_timer; unsigned int rt_period_active; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; static inline int dl_bandwidth_enabled(void) @@ -328,6 +334,11 @@ struct dl_bw { raw_spinlock_t lock; u64 bw; u64 total_bw; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; extern void init_dl_bw(struct dl_bw *dl_b); @@ -421,6 +432,11 @@ struct cfs_bandwidth { int nr_burst; u64 throttled_time; u64 burst_time; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) #endif }; @@ -483,6 +499,15 @@ struct task_group { #ifdef CONFIG_SCHED_SLI struct sched_cgroup_lat_stat_cpu __percpu *lat_stat_cpu; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; #ifdef CONFIG_FAIR_GROUP_SCHED @@ -721,6 +746,15 @@ struct cfs_rq { #endif /* CONFIG_FAIR_GROUP_SCHED */ unsigned long nr_uninterruptible; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; static inline int rt_bandwidth_enabled(void) @@ -769,6 +803,11 @@ struct rt_rq { #endif unsigned long nr_uninterruptible; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq) @@ -837,6 +876,11 @@ struct dl_rq { * by the GRUB algorithm. */ u64 bw_ratio; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; #ifdef CONFIG_FAIR_GROUP_SCHED @@ -963,6 +1007,11 @@ struct root_domain { * CPUs of the rd. Protected by RCU. */ struct perf_domain __rcu *pd; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; extern void init_defrootdomain(void); @@ -1261,6 +1310,15 @@ struct rq { u64 last_acpu_update_time; u64 last_acpu_update_time_task; #endif + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + CK_KABI_RESERVE(5) + CK_KABI_RESERVE(6) + CK_KABI_RESERVE(7) + CK_KABI_RESERVE(8) }; #ifdef CONFIG_FAIR_GROUP_SCHED @@ -2002,6 +2060,11 @@ struct sched_group_capacity { int id; #endif + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + unsigned long cpumask[]; /* Balance mask */ }; @@ -2015,6 +2078,11 @@ struct sched_group { int asym_prefer_cpu; /* CPU of highest priority in group */ int flags; + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) + /* * The CPUs this group covers. * @@ -2407,6 +2475,11 @@ struct sched_class { #endif void (*update_nr_uninterruptible)(struct task_struct *p, long inc); void (*update_nr_iowait)(struct task_struct *p, long inc); + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; static inline void put_prev_task(struct rq *rq, struct task_struct *prev) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 7fa1c7c9151a..bb8c6520225e 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -199,6 +199,9 @@ struct worker_pool { * from get_work_pool(). */ struct rcu_head rcu; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /* @@ -323,6 +326,9 @@ struct workqueue_struct { /* hot fields used during command issue, aligned to cacheline */ unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */ struct pool_workqueue __percpu __rcu **cpu_pwq; /* I: per-cpu pwqs */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; static struct kmem_cache *pwq_cache; diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h index f6275944ada7..f8158ed6637a 100644 --- a/kernel/workqueue_internal.h +++ b/kernel/workqueue_internal.h @@ -59,6 +59,11 @@ struct worker { /* used only by rescuers to point to the target workqueue */ struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + CK_KABI_RESERVE(3) + CK_KABI_RESERVE(4) }; /** diff --git a/mm/internal.h b/mm/internal.h index d971c8e67738..960547d85c40 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -579,6 +579,9 @@ struct compact_control { * ensure forward progress. */ bool alloc_contig; /* alloc_contig_range allocation */ + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /* diff --git a/mm/ksm.c b/mm/ksm.c index 9aafdc73efa2..0bc985c93f5d 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -140,6 +140,9 @@ struct ksm_scan { unsigned long address; struct ksm_rmap_item **rmap_list; unsigned long seqnr; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; /** diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e6a0d5034741..116c87e0af8a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -741,6 +741,9 @@ struct memcg_vmstats_percpu { /* Cgroup1: threshold notifications & softlimit tree updates */ unsigned long nr_page_events; unsigned long targets[MEM_CGROUP_NTARGETS]; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; struct memcg_vmstats { diff --git a/net/devlink/devl_internal.h b/net/devlink/devl_internal.h index f6b5fea2e13c..4c5b1d13bcfa 100644 --- a/net/devlink/devl_internal.h +++ b/net/devlink/devl_internal.h @@ -52,6 +52,10 @@ struct devlink { */ struct mutex lock; struct lock_class_key lock_key; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) + u8 reload_failed:1; refcount_t refcount; struct rcu_work rwork; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 285482060082..2eaa8c1ba92f 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1494,6 +1494,9 @@ struct uncached_list { spinlock_t lock; struct list_head head; struct list_head quarantine; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index fc5c53462025..c214af56c126 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -132,6 +132,9 @@ struct uncached_list { spinlock_t lock; struct list_head head; struct list_head quarantine; + + CK_KABI_RESERVE(1) + CK_KABI_RESERVE(2) }; static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list); -- Gitee From b2102a03311d8cfd0f77c6a7cefd984607f7b056 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Thu, 15 Aug 2024 18:42:01 +0800 Subject: [PATCH 1193/2138] anolis: x86/mce/zhaoxin: Update mcelog to decode PCIE, ZDI/ZPI and DRAM errors ANBZ: #9754 1. Adjusted some code logic Avoid having no log information when a CPER_SEC_PROC_GENERIC type error occurs on non-Zhaoxin platforms. 2. Optimized some code Removed some redundant function parameters and adjusted the types of some function parameters. Signed-off-by: leoliu-oc Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3706 --- arch/x86/include/asm/mce.h | 4 ++-- arch/x86/kernel/acpi/apei.c | 14 +++++++++----- arch/x86/kernel/cpu/mce/apei.c | 18 ++++++++++-------- drivers/acpi/apei/apei-base.c | 3 ++- drivers/acpi/apei/ghes.c | 19 +++++++++---------- include/acpi/apei.h | 2 +- 6 files changed, 33 insertions(+), 27 deletions(-) diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index eff1cf90895c..a02d2215a79f 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -289,11 +289,11 @@ struct cper_sec_mem_err; extern void apei_mce_report_mem_error(int corrected, struct cper_sec_mem_err *mem_err); -extern void zx_apei_mce_report_mem_error(int corrected, struct cper_sec_mem_err *mem_err); +extern void zx_apei_mce_report_mem_error(struct cper_sec_mem_err *mem_err); struct cper_sec_pcie; extern void zx_apei_mce_report_pcie_error(int corrected, struct cper_sec_pcie *pcie_err); struct cper_sec_proc_generic; -extern void zx_apei_mce_report_zdi_error(int corrected, struct cper_sec_proc_generic *zdi_err); +extern void zx_apei_mce_report_zdi_error(struct cper_sec_proc_generic *zdi_err); /* * Enumerate new IP types and HWID values in AMD processors which support diff --git a/arch/x86/kernel/acpi/apei.c b/arch/x86/kernel/acpi/apei.c index 26d9963b66bd..e3782035d7c3 100644 --- a/arch/x86/kernel/acpi/apei.c +++ b/arch/x86/kernel/acpi/apei.c @@ -42,7 +42,7 @@ void arch_apei_report_mem_error(int sev, struct cper_sec_mem_err *mem_err) #ifdef CONFIG_X86_MCE if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN || boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) - zx_apei_mce_report_mem_error(sev, mem_err); + zx_apei_mce_report_mem_error(mem_err); else apei_mce_report_mem_error(sev, mem_err); #endif @@ -57,13 +57,17 @@ void arch_apei_report_pcie_error(int sev, struct cper_sec_pcie *pcie_err) #endif } -void arch_apei_report_zdi_error(int sev, struct cper_sec_proc_generic *zdi_err) +bool arch_apei_report_zdi_error(guid_t *sec_type, struct cper_sec_proc_generic *zdi_err) { #ifdef CONFIG_X86_MCE - if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN || - boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) - zx_apei_mce_report_zdi_error(sev, zdi_err); + if ((boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) && + (guid_equal(sec_type, &CPER_SEC_PROC_GENERIC))) { + zx_apei_mce_report_zdi_error(zdi_err); + return true; + } #endif + return false; } int arch_apei_report_x86_error(struct cper_ia_proc_ctx *ctx_info, u64 lapic_id) diff --git a/arch/x86/kernel/cpu/mce/apei.c b/arch/x86/kernel/cpu/mce/apei.c index 7c23ae2e3006..c77cffffc696 100644 --- a/arch/x86/kernel/cpu/mce/apei.c +++ b/arch/x86/kernel/cpu/mce/apei.c @@ -63,7 +63,7 @@ void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err) } EXPORT_SYMBOL_GPL(apei_mce_report_mem_error); -void zx_apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err) +void zx_apei_mce_report_mem_error(struct cper_sec_mem_err *mem_err) { struct mce m; int apei_error = 0; @@ -92,18 +92,19 @@ void zx_apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err apei_error = apei_write_mce(&m); break; case 8: - if (mem_err->requestor_id == 2) + if (mem_err->requestor_id == 2) { m.status = 0x98200040000400b0; - else if (mem_err->requestor_id == 3) { + } else if (mem_err->requestor_id == 3) { m.status = 0xba400000000600a0; apei_error = apei_write_mce(&m); - } else if (mem_err->requestor_id == 4) + } else if (mem_err->requestor_id == 4) { m.status = 0x98200100000300b0; - else if (mem_err->requestor_id == 5) { + } else if (mem_err->requestor_id == 5) { m.status = 0xba000000000500b0; apei_error = apei_write_mce(&m); - } else + } else { pr_info("Undefined Parity error\n"); + } break; case 10: if (mem_err->requestor_id == 6) { @@ -112,8 +113,9 @@ void zx_apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err } else if (mem_err->requestor_id == 7) { m.status = 0xba000000000800b0; apei_error = apei_write_mce(&m); - } else + } else { pr_info("Undefined dvad error\n"); + } break; case 13: m.status = 0x9c200040000100c0; @@ -163,7 +165,7 @@ void zx_apei_mce_report_pcie_error(int severity, struct cper_sec_pcie *pcie_err) } EXPORT_SYMBOL_GPL(zx_apei_mce_report_pcie_error); -void zx_apei_mce_report_zdi_error(int severity, struct cper_sec_proc_generic *zdi_err) +void zx_apei_mce_report_zdi_error(struct cper_sec_proc_generic *zdi_err) { struct mce m; int apei_error = 0; diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c index 05ee09357bd7..ad8d5d5e97cc 100644 --- a/drivers/acpi/apei/apei-base.c +++ b/drivers/acpi/apei/apei-base.c @@ -778,8 +778,9 @@ void __weak arch_apei_report_pcie_error(int sev, struct cper_sec_pcie *pcie_err) } EXPORT_SYMBOL_GPL(arch_apei_report_pcie_error); -void __weak arch_apei_report_zdi_error(int sev, struct cper_sec_proc_generic *zdi_err) +bool __weak arch_apei_report_zdi_error(guid_t *sec_type, struct cper_sec_proc_generic *zdi_err) { + return false; } EXPORT_SYMBOL_GPL(arch_apei_report_zdi_error); diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 9ccf2a51c64a..6f8ee3041ee9 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -796,7 +796,7 @@ static bool ghes_do_proc(struct ghes *ghes, atomic_notifier_call_chain(&ghes_report_chain, sev, mem_err); - arch_apei_report_mem_error(sec_sev, mem_err); + arch_apei_report_mem_error(sev, mem_err); queued = ghes_handle_memory_failure(gdata, sev, sync); } else if (guid_equal(sec_type, &CPER_SEC_PCIE)) { @@ -807,17 +807,16 @@ static bool ghes_do_proc(struct ghes *ghes, } else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) { queued = ghes_handle_arm_hw_error(gdata, sev, sync); - } else if (guid_equal(sec_type, &CPER_SEC_PROC_GENERIC)) { - struct cper_sec_proc_generic *zdi_err = acpi_hest_get_payload(gdata); - - arch_apei_report_zdi_error(sec_sev, zdi_err); } else { void *err = acpi_hest_get_payload(gdata); - ghes_defer_non_standard_event(gdata, sev); - log_non_standard_event(sec_type, fru_id, fru_text, - sec_sev, err, - gdata->error_data_length); + if (!arch_apei_report_zdi_error(sec_type, + (struct cper_sec_proc_generic *)err)) { + ghes_defer_non_standard_event(gdata, sev); + log_non_standard_event(sec_type, fru_id, fru_text, + sec_sev, err, + gdata->error_data_length); + } } } @@ -1246,7 +1245,7 @@ static int ghes_in_nmi_queue_one_entry(struct ghes *ghes, struct cper_sec_proc_generic *zdi_err = acpi_hest_get_payload(gdata); - arch_apei_report_zdi_error(sev, zdi_err); + arch_apei_report_zdi_error(sec_type, zdi_err); } } ghes_print_queued_estatus(); diff --git a/include/acpi/apei.h b/include/acpi/apei.h index fcb5814a3f43..808cfa7d16b1 100644 --- a/include/acpi/apei.h +++ b/include/acpi/apei.h @@ -53,7 +53,7 @@ int erst_clear(u64 record_id); int arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr, void *data); void arch_apei_report_mem_error(int sev, struct cper_sec_mem_err *mem_err); void arch_apei_report_pcie_error(int sev, struct cper_sec_pcie *pcie_err); -void arch_apei_report_zdi_error(int sev, struct cper_sec_proc_generic *zdi_err); +bool arch_apei_report_zdi_error(guid_t *sec_type, struct cper_sec_proc_generic *zdi_err); #endif #endif -- Gitee From ef5fc250dabcd2826c111fae290c02b4b77febf8 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 29 Jan 2024 13:46:35 +0100 Subject: [PATCH 1194/2138] arm64/mm: make set_ptes() robust when OAs cross 48-bit boundary ANBZ: #9728 commit 6e8f588708971e0626f5be808e8c4b6cdb86eb0b upstream Patch series "mm/memory: optimize fork() with PTE-mapped THP", v3. Now that the rmap overhaul[1] is upstream that provides a clean interface for rmap batching, let's implement PTE batching during fork when processing PTE-mapped THPs. This series is partially based on Ryan's previous work[2] to implement cont-pte support on arm64, but its a complete rewrite based on [1] to optimize all architectures independent of any such PTE bits, and to use the new rmap batching functions that simplify the code and prepare for further rmap accounting changes. We collect consecutive PTEs that map consecutive pages of the same large folio, making sure that the other PTE bits are compatible, and (a) adjust the refcount only once per batch, (b) call rmap handling functions only once per batch and (c) perform batch PTE setting/updates. While this series should be beneficial for adding cont-pte support on ARM64[2], it's one of the requirements for maintaining a total mapcount[3] for large folios with minimal added overhead and further changes[4] that build up on top of the total mapcount. Independent of all that, this series results in a speedup during fork with PTE-mapped THP, which is the default with THPs that are smaller than a PMD (for example, 16KiB to 1024KiB mTHPs for anonymous memory[5]). On an Intel Xeon Silver 4210R CPU, fork'ing with 1GiB of PTE-mapped folios of the same size (stddev < 1%) results in the following runtimes for fork() (shorter is better): Folio Size | v6.8-rc1 | New | Change ------------------------------------------ 4KiB | 0.014328 | 0.014035 | - 2% 16KiB | 0.014263 | 0.01196 | -16% 32KiB | 0.014334 | 0.01094 | -24% 64KiB | 0.014046 | 0.010444 | -26% 128KiB | 0.014011 | 0.010063 | -28% 256KiB | 0.013993 | 0.009938 | -29% 512KiB | 0.013983 | 0.00985 | -30% 1024KiB | 0.013986 | 0.00982 | -30% 2048KiB | 0.014305 | 0.010076 | -30% Note that these numbers are even better than the ones from v1 (verified over multiple reboots), even though there were only minimal code changes. Well, I removed a pte_mkclean() call for anon folios, maybe that also plays a role. But my experience is that fork() is extremely sensitive to code size, inlining, ... so I suspect we'll see on other architectures rather a change of -20% instead of -30%, and it will be easy to "lose" some of that speedup in the future by subtle code changes. Next up is PTE batching when unmapping. Only tested on x86-64. Compile-tested on most other architectures. [1] https://lkml.kernel.org/r/20231220224504.646757-1-david@redhat.com [2] https://lkml.kernel.org/r/20231218105100.172635-1-ryan.roberts@arm.com [3] https://lkml.kernel.org/r/20230809083256.699513-1-david@redhat.com [4] https://lkml.kernel.org/r/20231124132626.235350-1-david@redhat.com [5] https://lkml.kernel.org/r/20231207161211.2374093-1-ryan.roberts@arm.com This patch (of 15): Since the high bits [51:48] of an OA are not stored contiguously in the PTE, there is a theoretical bug in set_ptes(), which just adds PAGE_SIZE to the pte to get the pte with the next pfn. This works until the pfn crosses the 48-bit boundary, at which point we overflow into the upper attributes. Of course one could argue (and Matthew Wilcox has :) that we will never see a folio cross this boundary because we only allow naturally aligned power-of-2 allocation, so this would require a half-petabyte folio. So its only a theoretical bug. But its better that the code is robust regardless. I've implemented pte_next_pfn() as part of the fix, which is an opt-in core-mm interface. So that is now available to the core-mm, which will be needed shortly to support forthcoming fork()-batching optimizations. Link: https://lkml.kernel.org/r/20240129124649.189745-1-david@redhat.com Link: https://lkml.kernel.org/r/20240125173534.1659317-1-ryan.roberts@arm.com Link: https://lkml.kernel.org/r/20240129124649.189745-2-david@redhat.com Fixes: 4a169d61c2ed ("arm64: implement the new page table range API") Closes: https://lore.kernel.org/linux-mm/fdaeb9a5-d890-499a-92c8-d171df43ad01@arm.com/ Signed-off-by: Ryan Roberts Signed-off-by: David Hildenbrand Reviewed-by: Catalin Marinas Reviewed-by: David Hildenbrand Tested-by: Ryan Roberts Reviewed-by: Mike Rapoport (IBM) Cc: Albert Ou Cc: Alexander Gordeev Cc: Aneesh Kumar K.V Cc: Christian Borntraeger Cc: Christophe Leroy Cc: David S. Miller Cc: Dinh Nguyen Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Matthew Wilcox Cc: Michael Ellerman Cc: Naveen N. Rao Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Russell King (Oracle) Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Will Deacon Cc: Alexandre Ghiti Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3781 --- arch/arm64/include/asm/pgtable.h | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index a0a07932d6e6..82cf1de4c435 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -350,6 +350,21 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, set_pte(ptep, pte); } +/* + * Select all bits except the pfn + */ +static inline pgprot_t pte_pgprot(pte_t pte) +{ + unsigned long pfn = pte_pfn(pte); + + return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte)); +} + +#define pte_next_pfn pte_next_pfn +static inline pte_t pte_next_pfn(pte_t pte) +{ + return pfn_pte(pte_pfn(pte) + 1, pte_pgprot(pte)); +} static inline void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, unsigned int nr) @@ -362,7 +377,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr, break; ptep++; addr += PAGE_SIZE; - pte_val(pte) += PAGE_SIZE; + pte = pte_next_pfn(pte); } } #define set_ptes set_ptes @@ -441,16 +456,6 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte) return clear_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE)); } -/* - * Select all bits except the pfn - */ -static inline pgprot_t pte_pgprot(pte_t pte) -{ - unsigned long pfn = pte_pfn(pte); - - return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte)); -} - #ifdef CONFIG_NUMA_BALANCING /* * See the comment in include/linux/pgtable.h -- Gitee From f407412dd45b1b6c7d0b8897d49f6e26147f6902 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 29 Jan 2024 13:46:36 +0100 Subject: [PATCH 1195/2138] arm/pgtable: define PFN_PTE_SHIFT ANBZ: #9728 commit 12b884f2e09ab42d3879a3e2c703e7157691013c upstream We want to make use of pte_next_pfn() outside of set_ptes(). Let's simply define PFN_PTE_SHIFT, required by pte_next_pfn(). Link: https://lkml.kernel.org/r/20240129124649.189745-3-david@redhat.com Signed-off-by: David Hildenbrand Tested-by: Ryan Roberts Reviewed-by: Mike Rapoport (IBM) Cc: Albert Ou Cc: Alexander Gordeev Cc: Alexandre Ghiti Cc: Aneesh Kumar K.V Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christophe Leroy Cc: David S. Miller Cc: Dinh Nguyen Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Matthew Wilcox Cc: Michael Ellerman Cc: Naveen N. Rao Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Russell King (Oracle) Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3781 --- arch/arm/include/asm/pgtable.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index d657b84b6bf7..be91e376df79 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -209,6 +209,8 @@ static inline void __sync_icache_dcache(pte_t pteval) extern void __sync_icache_dcache(pte_t pteval); #endif +#define PFN_PTE_SHIFT PAGE_SHIFT + void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval, unsigned int nr); #define set_ptes set_ptes -- Gitee From de94d4d3df33c99b5b3c58b0f6a274f501c32b0c Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 29 Jan 2024 13:46:37 +0100 Subject: [PATCH 1196/2138] nios2/pgtable: define PFN_PTE_SHIFT ANBZ: #9728 commit 3a6a6c3fbda8f50fc9f0e5fede8a0f70abdea033 upstream We want to make use of pte_next_pfn() outside of set_ptes(). Let's simply define PFN_PTE_SHIFT, required by pte_next_pfn(). Link: https://lkml.kernel.org/r/20240129124649.189745-4-david@redhat.com Signed-off-by: David Hildenbrand Tested-by: Ryan Roberts Reviewed-by: Mike Rapoport (IBM) Cc: Albert Ou Cc: Alexander Gordeev Cc: Alexandre Ghiti Cc: Aneesh Kumar K.V Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christophe Leroy Cc: David S. Miller Cc: Dinh Nguyen Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Matthew Wilcox Cc: Michael Ellerman Cc: Naveen N. Rao Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Russell King (Oracle) Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3781 --- arch/nios2/include/asm/pgtable.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h index 5144506dfa69..d052dfcbe8d3 100644 --- a/arch/nios2/include/asm/pgtable.h +++ b/arch/nios2/include/asm/pgtable.h @@ -178,6 +178,8 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) *ptep = pteval; } +#define PFN_PTE_SHIFT 0 + static inline void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, unsigned int nr) { -- Gitee From 72c809ec9e21ca2bea410a3e99e586001f0d7032 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 29 Jan 2024 13:46:38 +0100 Subject: [PATCH 1197/2138] powerpc/pgtable: define PFN_PTE_SHIFT ANBZ: #9728 commit f7dc4d689e6fafe3d8424f600b924f2d59d1a3cf upstream We want to make use of pte_next_pfn() outside of set_ptes(). Let's simply define PFN_PTE_SHIFT, required by pte_next_pfn(). Link: https://lkml.kernel.org/r/20240129124649.189745-5-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Christophe Leroy Tested-by: Ryan Roberts Reviewed-by: Mike Rapoport (IBM) Cc: Albert Ou Cc: Alexander Gordeev Cc: Alexandre Ghiti Cc: Aneesh Kumar K.V Cc: Catalin Marinas Cc: Christian Borntraeger Cc: David S. Miller Cc: Dinh Nguyen Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Matthew Wilcox Cc: Michael Ellerman Cc: Naveen N. Rao Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Russell King (Oracle) Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3781 --- arch/powerpc/include/asm/pgtable.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index d0ee46de248e..db2fe941e4c8 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -41,6 +41,8 @@ struct mm_struct; #ifndef __ASSEMBLY__ +#define PFN_PTE_SHIFT PTE_RPN_SHIFT + void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, unsigned int nr); #define set_ptes set_ptes -- Gitee From 181b59310e16804dcc48295755296f57ba595e6e Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 29 Jan 2024 13:46:39 +0100 Subject: [PATCH 1198/2138] riscv/pgtable: define PFN_PTE_SHIFT ANBZ: #9728 commit 57c254b2fb31f0160829f4bf1cb993a9e9c302a8 upstream We want to make use of pte_next_pfn() outside of set_ptes(). Let's simply define PFN_PTE_SHIFT, required by pte_next_pfn(). Link: https://lkml.kernel.org/r/20240129124649.189745-6-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Alexandre Ghiti Tested-by: Ryan Roberts Reviewed-by: Mike Rapoport (IBM) Cc: Albert Ou Cc: Alexander Gordeev Cc: Aneesh Kumar K.V Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christophe Leroy Cc: David S. Miller Cc: Dinh Nguyen Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Matthew Wilcox Cc: Michael Ellerman Cc: Naveen N. Rao Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Russell King (Oracle) Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3781 --- arch/riscv/include/asm/pgtable.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 37829dab4a0a..8fccba6ae37c 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -528,6 +528,8 @@ static inline void __set_pte_at(pte_t *ptep, pte_t pteval) set_pte(ptep, pteval); } +#define PFN_PTE_SHIFT _PAGE_PFN_SHIFT + static inline void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval, unsigned int nr) { -- Gitee From 6630267bf6f42ca7af1c97f6dedb1dc9b84a9253 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 29 Jan 2024 13:46:40 +0100 Subject: [PATCH 1199/2138] s390/pgtable: define PFN_PTE_SHIFT ANBZ: #9728 commit 4555ac8b3c16f67f74c04ff71ce8c4a8fcee973a upstream We want to make use of pte_next_pfn() outside of set_ptes(). Let's simply define PFN_PTE_SHIFT, required by pte_next_pfn(). Link: https://lkml.kernel.org/r/20240129124649.189745-7-david@redhat.com Signed-off-by: David Hildenbrand Tested-by: Ryan Roberts Reviewed-by: Mike Rapoport (IBM) Cc: Albert Ou Cc: Alexander Gordeev Cc: Alexandre Ghiti Cc: Aneesh Kumar K.V Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christophe Leroy Cc: David S. Miller Cc: Dinh Nguyen Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Matthew Wilcox Cc: Michael Ellerman Cc: Naveen N. Rao Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Russell King (Oracle) Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3781 --- arch/s390/include/asm/pgtable.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index da2e91b5b192..0250073f522f 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -1324,6 +1324,8 @@ pgprot_t pgprot_writecombine(pgprot_t prot); #define pgprot_writethrough pgprot_writethrough pgprot_t pgprot_writethrough(pgprot_t prot); +#define PFN_PTE_SHIFT PAGE_SHIFT + /* * Set multiple PTEs to consecutive pages with a single call. All PTEs * are within the same folio, PMD and VMA. -- Gitee From ea380c3d16918c5587e19927d5352e911ec3e6bc Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 29 Jan 2024 13:46:41 +0100 Subject: [PATCH 1200/2138] sparc/pgtable: define PFN_PTE_SHIFT ANBZ: #9728 commit ce7a9de353da053e55a68e2441196114547e38d0 upstream We want to make use of pte_next_pfn() outside of set_ptes(). Let's simply define PFN_PTE_SHIFT, required by pte_next_pfn(). Link: https://lkml.kernel.org/r/20240129124649.189745-8-david@redhat.com Signed-off-by: David Hildenbrand Tested-by: Ryan Roberts Reviewed-by: Mike Rapoport (IBM) Cc: Albert Ou Cc: Alexander Gordeev Cc: Alexandre Ghiti Cc: Aneesh Kumar K.V Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christophe Leroy Cc: David S. Miller Cc: Dinh Nguyen Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Matthew Wilcox Cc: Michael Ellerman Cc: Naveen N. Rao Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Russell King (Oracle) Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3781 --- arch/sparc/include/asm/pgtable_64.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 5e41033bf4ca..be9bcc50e4cb 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -928,6 +928,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm, PAGE_SHIFT); } +#define PFN_PTE_SHIFT PAGE_SHIFT + static inline void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, unsigned int nr) { -- Gitee From c0b70ec478dbadbcd27ba24d55550c5db2890f70 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 29 Jan 2024 13:46:42 +0100 Subject: [PATCH 1201/2138] mm/pgtable: make pte_next_pfn() independent of set_ptes() ANBZ: #9728 commit 6cdfa1d5d5d8285108495c33588c48cdda81b647 upstream Let's provide pte_next_pfn(), independently of set_ptes(). This allows for using the generic pte_next_pfn() version in some arch-specific set_ptes() implementations, and prepares for reusing pte_next_pfn() in other context. Link: https://lkml.kernel.org/r/20240129124649.189745-9-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Christophe Leroy Tested-by: Ryan Roberts Reviewed-by: Mike Rapoport (IBM) Cc: Albert Ou Cc: Alexander Gordeev Cc: Alexandre Ghiti Cc: Aneesh Kumar K.V Cc: Catalin Marinas Cc: Christian Borntraeger Cc: David S. Miller Cc: Dinh Nguyen Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Matthew Wilcox Cc: Michael Ellerman Cc: Naveen N. Rao Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Russell King (Oracle) Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3781 --- include/linux/pgtable.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 8b7daccd11be..503a6e108e27 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -205,7 +205,6 @@ static inline int pmd_young(pmd_t pmd) #define arch_flush_lazy_mmu_mode() do {} while (0) #endif -#ifndef set_ptes #ifndef pte_next_pfn static inline pte_t pte_next_pfn(pte_t pte) @@ -214,6 +213,7 @@ static inline pte_t pte_next_pfn(pte_t pte) } #endif +#ifndef set_ptes /** * set_ptes - Map consecutive pages to a contiguous range of addresses. * @mm: Address space to map the pages into. -- Gitee From 97533cd4a5cfb2a2b77175d6fb327a890bab6ce6 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 29 Jan 2024 13:46:43 +0100 Subject: [PATCH 1202/2138] arm/mm: use pte_next_pfn() in set_ptes() ANBZ: #9728 commit e5ea320aec811c0e5cddefda17052579e0306415 upstream Let's use our handy helper now that it's available on all archs. Link: https://lkml.kernel.org/r/20240129124649.189745-10-david@redhat.com Signed-off-by: David Hildenbrand Tested-by: Ryan Roberts Reviewed-by: Mike Rapoport (IBM) Cc: Albert Ou Cc: Alexander Gordeev Cc: Alexandre Ghiti Cc: Aneesh Kumar K.V Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christophe Leroy Cc: David S. Miller Cc: Dinh Nguyen Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Matthew Wilcox Cc: Michael Ellerman Cc: Naveen N. Rao Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Russell King (Oracle) Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3781 --- arch/arm/mm/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 073de5b24560..735cca0ccfe2 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1822,6 +1822,6 @@ void set_ptes(struct mm_struct *mm, unsigned long addr, if (--nr == 0) break; ptep++; - pte_val(pteval) += PAGE_SIZE; + pteval = pte_next_pfn(pteval); } } -- Gitee From fec0f842d13b34e33e407ed1b9f5a545e2a2f292 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 29 Jan 2024 13:46:44 +0100 Subject: [PATCH 1203/2138] powerpc/mm: use pte_next_pfn() in set_ptes() ANBZ: #9728 commit 802cc2ab33b0d8a013c216ca7f4caa9034bfc257 upstream Let's use our handy new helper. Note that the implementation is slightly different, but shouldn't really make a difference in practice. Link: https://lkml.kernel.org/r/20240129124649.189745-11-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Christophe Leroy Tested-by: Ryan Roberts Reviewed-by: Mike Rapoport (IBM) Cc: Albert Ou Cc: Alexander Gordeev Cc: Alexandre Ghiti Cc: Aneesh Kumar K.V Cc: Catalin Marinas Cc: Christian Borntraeger Cc: David S. Miller Cc: Dinh Nguyen Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Matthew Wilcox Cc: Michael Ellerman Cc: Naveen N. Rao Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Russell King (Oracle) Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3781 --- arch/powerpc/mm/pgtable.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index 4d69bfb9bc11..79b7b35c4899 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -220,10 +220,7 @@ void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, break; ptep++; addr += PAGE_SIZE; - /* - * increment the pfn. - */ - pte = pfn_pte(pte_pfn(pte) + 1, pte_pgprot((pte))); + pte = pte_next_pfn(pte); } } -- Gitee From 4910530e49e0e6e58bd333ac47d182c53857aeb4 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 29 Jan 2024 13:46:45 +0100 Subject: [PATCH 1204/2138] mm/memory: factor out copying the actual PTE in copy_present_pte() ANBZ: #9728 commit 23ed190868a65525b8941370630fbb215f12ebe8 upstream Let's prepare for further changes. Link: https://lkml.kernel.org/r/20240129124649.189745-12-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Ryan Roberts Reviewed-by: Mike Rapoport (IBM) Cc: Albert Ou Cc: Alexander Gordeev Cc: Alexandre Ghiti Cc: Aneesh Kumar K.V Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christophe Leroy Cc: David S. Miller Cc: Dinh Nguyen Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Matthew Wilcox Cc: Michael Ellerman Cc: Naveen N. Rao Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Russell King (Oracle) Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3781 --- mm/memory.c | 63 ++++++++++++++++++++++++++++------------------------- 1 file changed, 33 insertions(+), 30 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 3d5aeb9213e4..ef59864aadfc 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -919,6 +919,29 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma return 0; } +static inline void __copy_present_pte(struct vm_area_struct *dst_vma, + struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte, + pte_t pte, unsigned long addr) +{ + struct mm_struct *src_mm = src_vma->vm_mm; + + /* If it's a COW mapping, write protect it both processes. */ + if (is_cow_mapping(src_vma->vm_flags) && pte_write(pte)) { + ptep_set_wrprotect(src_mm, addr, src_pte); + pte = pte_wrprotect(pte); + } + + /* If it's a shared mapping, mark it clean in the child. */ + if (src_vma->vm_flags & VM_SHARED) + pte = pte_mkclean(pte); + pte = pte_mkold(pte); + + if (!userfaultfd_wp(dst_vma)) + pte = pte_clear_uffd_wp(pte); + + set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); +} + /* * Copy one pte. Returns 0 if succeeded, or -EAGAIN if one preallocated page * is required to copy this pte. @@ -928,23 +951,23 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss, struct folio **prealloc) { - struct mm_struct *src_mm = src_vma->vm_mm; - unsigned long vm_flags = src_vma->vm_flags; pte_t pte = ptep_get(src_pte); struct page *page; struct folio *folio; page = vm_normal_page(src_vma, addr, pte); - if (page) - folio = page_folio(page); - if (page && folio_test_anon(folio)) { + if (unlikely(!page)) + goto copy_pte; + + folio = page_folio(page); + folio_get(folio); + if (folio_test_anon(folio)) { /* * If this page may have been pinned by the parent process, * copy the page immediately for the child so that we'll always * guarantee the pinned page won't be randomly replaced in the * future. */ - folio_get(folio); if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, src_vma))) { /* Page may be pinned, we have to copy. */ folio_put(folio); @@ -952,34 +975,14 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, addr, rss, prealloc, page); } rss[MM_ANONPAGES]++; - } else if (page) { - folio_get(folio); + VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio); + } else { folio_dup_file_rmap_pte(folio, page); rss[mm_counter_file(page)]++; } - /* - * If it's a COW mapping, write protect it both - * in the parent and the child - */ - if (is_cow_mapping(vm_flags) && pte_write(pte)) { - ptep_set_wrprotect(src_mm, addr, src_pte); - pte = pte_wrprotect(pte); - } - VM_BUG_ON(page && folio_test_anon(folio) && PageAnonExclusive(page)); - - /* - * If it's a shared mapping, mark it clean in - * the child - */ - if (vm_flags & VM_SHARED) - pte = pte_mkclean(pte); - pte = pte_mkold(pte); - - if (!userfaultfd_wp(dst_vma)) - pte = pte_clear_uffd_wp(pte); - - set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); +copy_pte: + __copy_present_pte(dst_vma, src_vma, dst_pte, src_pte, pte, addr); return 0; } -- Gitee From 73eee4a9c7b153e3fad37a2f880668a6325ee8e4 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 29 Jan 2024 13:46:46 +0100 Subject: [PATCH 1205/2138] mm/memory: pass PTE to copy_present_pte() ANBZ: #9728 commit 53723298ba436830fdf0744c19b57b2a18f44041 upstream We already read it, let's just forward it. This patch is based on work by Ryan Roberts. [david@redhat.com: fix the hmm "exclusive_cow" selftest] Link: https://lkml.kernel.org/r/13f296b8-e882-47fd-b939-c2141dc28717@redhat.com Link: https://lkml.kernel.org/r/20240129124649.189745-13-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Ryan Roberts Reviewed-by: Mike Rapoport (IBM) Cc: Albert Ou Cc: Alexander Gordeev Cc: Alexandre Ghiti Cc: Aneesh Kumar K.V Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christophe Leroy Cc: David S. Miller Cc: Dinh Nguyen Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Matthew Wilcox Cc: Michael Ellerman Cc: Naveen N. Rao Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Russell King (Oracle) Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3781 --- mm/memory.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index ef59864aadfc..86deaafbca70 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -948,10 +948,9 @@ static inline void __copy_present_pte(struct vm_area_struct *dst_vma, */ static inline int copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, - pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss, - struct folio **prealloc) + pte_t *dst_pte, pte_t *src_pte, pte_t pte, unsigned long addr, + int *rss, struct folio **prealloc) { - pte_t pte = ptep_get(src_pte); struct page *page; struct folio *folio; @@ -1083,6 +1082,8 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, progress += 8; continue; } + ptent = ptep_get(src_pte); + VM_WARN_ON_ONCE(!pte_present(ptent)); /* * Device exclusive entry restored, continue by copying @@ -1092,7 +1093,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, } /* copy_present_pte() will clear `*prealloc' if consumed */ ret = copy_present_pte(dst_vma, src_vma, dst_pte, src_pte, - addr, rss, &prealloc); + ptent, addr, rss, &prealloc); /* * If we need a pre-allocated page for this pte, drop the * locks, allocate, and try again. -- Gitee From 6772317f30dd7e8582752501693a6cda94a91825 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 29 Jan 2024 13:46:47 +0100 Subject: [PATCH 1206/2138] mm/memory: optimize fork() with PTE-mapped THP ANBZ: #9728 commit f8d937761d65c87e9987b88ea7beb7bddc333a0e upstream Let's implement PTE batching when consecutive (present) PTEs map consecutive pages of the same large folio, and all other PTE bits besides the PFNs are equal. We will optimize folio_pte_batch() separately, to ignore selected PTE bits. This patch is based on work by Ryan Roberts. Use __always_inline for __copy_present_ptes() and keep the handling for single PTEs completely separate from the multi-PTE case: we really want the compiler to optimize for the single-PTE case with small folios, to not degrade performance. Note that PTE batching will never exceed a single page table and will always stay within VMA boundaries. Further, processing PTE-mapped THP that maybe pinned and have PageAnonExclusive set on at least one subpage should work as expected, but there is room for improvement: We will repeatedly (1) detect a PTE batch (2) detect that we have to copy a page (3) fall back and allocate a single page to copy a single page. For now we won't care as pinned pages are a corner case, and we should rather look into maintaining only a single PageAnonExclusive bit for large folios. Link: https://lkml.kernel.org/r/20240129124649.189745-14-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Ryan Roberts Reviewed-by: Mike Rapoport (IBM) Cc: Albert Ou Cc: Alexander Gordeev Cc: Alexandre Ghiti Cc: Aneesh Kumar K.V Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christophe Leroy Cc: David S. Miller Cc: Dinh Nguyen Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Matthew Wilcox Cc: Michael Ellerman Cc: Naveen N. Rao Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Russell King (Oracle) Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3781 --- include/linux/pgtable.h | 31 +++++++++++ mm/memory.c | 112 +++++++++++++++++++++++++++++++++------- 2 files changed, 124 insertions(+), 19 deletions(-) diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 503a6e108e27..5f8989a863b8 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -643,6 +643,37 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres } #endif +#ifndef wrprotect_ptes +/** + * wrprotect_ptes - Write-protect PTEs that map consecutive pages of the same + * folio. + * @mm: Address space the pages are mapped into. + * @addr: Address the first page is mapped at. + * @ptep: Page table pointer for the first entry. + * @nr: Number of entries to write-protect. + * + * May be overridden by the architecture; otherwise, implemented as a simple + * loop over ptep_set_wrprotect(). + * + * Note that PTE bits in the PTE range besides the PFN can differ. For example, + * some PTEs might be write-protected. + * + * Context: The caller holds the page table lock. The PTEs map consecutive + * pages that belong to the same folio. The PTEs are all in the same PMD. + */ +static inline void wrprotect_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned int nr) +{ + for (;;) { + ptep_set_wrprotect(mm, addr, ptep); + if (--nr == 0) + break; + ptep++; + addr += PAGE_SIZE; + } +} +#endif + /* * On some architectures hardware does not set page access bit when accessing * memory page, it is responsibility of software setting this bit. It brings diff --git a/mm/memory.c b/mm/memory.c index 86deaafbca70..f2114bf68982 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -919,15 +919,15 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma return 0; } -static inline void __copy_present_pte(struct vm_area_struct *dst_vma, +static __always_inline void __copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte, - pte_t pte, unsigned long addr) + pte_t pte, unsigned long addr, int nr) { struct mm_struct *src_mm = src_vma->vm_mm; /* If it's a COW mapping, write protect it both processes. */ if (is_cow_mapping(src_vma->vm_flags) && pte_write(pte)) { - ptep_set_wrprotect(src_mm, addr, src_pte); + wrprotect_ptes(src_mm, addr, src_pte, nr); pte = pte_wrprotect(pte); } @@ -939,26 +939,93 @@ static inline void __copy_present_pte(struct vm_area_struct *dst_vma, if (!userfaultfd_wp(dst_vma)) pte = pte_clear_uffd_wp(pte); - set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); + set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr); +} + +/* + * Detect a PTE batch: consecutive (present) PTEs that map consecutive + * pages of the same folio. + * + * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN. + */ +static inline int folio_pte_batch(struct folio *folio, unsigned long addr, + pte_t *start_ptep, pte_t pte, int max_nr) +{ + unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio); + const pte_t *end_ptep = start_ptep + max_nr; + pte_t expected_pte = pte_next_pfn(pte); + pte_t *ptep = start_ptep + 1; + + VM_WARN_ON_FOLIO(!pte_present(pte), folio); + + while (ptep != end_ptep) { + pte = ptep_get(ptep); + + if (!pte_same(pte, expected_pte)) + break; + + /* + * Stop immediately once we reached the end of the folio. In + * corner cases the next PFN might fall into a different + * folio. + */ + if (pte_pfn(pte) == folio_end_pfn) + break; + + expected_pte = pte_next_pfn(expected_pte); + ptep++; + } + + return ptep - start_ptep; } /* - * Copy one pte. Returns 0 if succeeded, or -EAGAIN if one preallocated page - * is required to copy this pte. + * Copy one present PTE, trying to batch-process subsequent PTEs that map + * consecutive pages of the same folio by copying them as well. + * + * Returns -EAGAIN if one preallocated page is required to copy the next PTE. + * Otherwise, returns the number of copied PTEs (at least 1). */ static inline int -copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, +copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte, pte_t pte, unsigned long addr, - int *rss, struct folio **prealloc) + int max_nr, int *rss, struct folio **prealloc) { struct page *page; struct folio *folio; + int err, nr; page = vm_normal_page(src_vma, addr, pte); if (unlikely(!page)) goto copy_pte; folio = page_folio(page); + + /* + * If we likely have to copy, just don't bother with batching. Make + * sure that the common "small folio" case is as fast as possible + * by keeping the batching logic separate. + */ + if (unlikely(!*prealloc && folio_test_large(folio) && max_nr != 1)) { + nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr); + folio_ref_add(folio, nr); + if (folio_test_anon(folio)) { + if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page, + nr, src_vma))) { + folio_ref_sub(folio, nr); + return -EAGAIN; + } + rss[MM_ANONPAGES] += nr; + VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio); + } else { + folio_dup_file_rmap_ptes(folio, page, nr); + rss[mm_counter_file(page)] += nr; + } + __copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte, + addr, nr); + return nr; + } + folio_get(folio); if (folio_test_anon(folio)) { /* @@ -970,8 +1037,9 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, src_vma))) { /* Page may be pinned, we have to copy. */ folio_put(folio); - return copy_present_page(dst_vma, src_vma, dst_pte, src_pte, - addr, rss, prealloc, page); + err = copy_present_page(dst_vma, src_vma, dst_pte, src_pte, + addr, rss, prealloc, page); + return err ? err : 1; } rss[MM_ANONPAGES]++; VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio); @@ -981,8 +1049,8 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, } copy_pte: - __copy_present_pte(dst_vma, src_vma, dst_pte, src_pte, pte, addr); - return 0; + __copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte, addr, 1); + return 1; } static inline struct folio *folio_prealloc(struct mm_struct *src_mm, @@ -1019,10 +1087,11 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, pte_t *src_pte, *dst_pte; pte_t ptent; spinlock_t *src_ptl, *dst_ptl; - int progress, ret = 0; + int progress, max_nr, ret = 0; int rss[NR_MM_COUNTERS]; swp_entry_t entry = (swp_entry_t){0}; struct folio *prealloc = NULL; + int nr; again: progress = 0; @@ -1053,6 +1122,8 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, arch_enter_lazy_mmu_mode(); do { + nr = 1; + /* * We are holding two locks at this point - either of them * could generate latencies in another task on another CPU. @@ -1091,9 +1162,10 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, */ WARN_ON_ONCE(ret != -ENOENT); } - /* copy_present_pte() will clear `*prealloc' if consumed */ - ret = copy_present_pte(dst_vma, src_vma, dst_pte, src_pte, - ptent, addr, rss, &prealloc); + /* copy_present_ptes() will clear `*prealloc' if consumed */ + max_nr = (end - addr) / PAGE_SIZE; + ret = copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, + ptent, addr, max_nr, rss, &prealloc); /* * If we need a pre-allocated page for this pte, drop the * locks, allocate, and try again. @@ -1110,8 +1182,10 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, folio_put(prealloc); prealloc = NULL; } - progress += 8; - } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); + nr = ret; + progress += 8 * nr; + } while (dst_pte += nr, src_pte += nr, addr += PAGE_SIZE * nr, + addr != end); arch_leave_lazy_mmu_mode(); pte_unmap_unlock(orig_src_pte, src_ptl); @@ -1132,7 +1206,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, prealloc = folio_prealloc(src_mm, src_vma, addr, false); if (!prealloc) return -ENOMEM; - } else if (ret) { + } else if (ret < 0) { VM_WARN_ON_ONCE(1); } -- Gitee From 874c91ec89774511cc5121350c32ca04bf6b2320 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 29 Jan 2024 13:46:48 +0100 Subject: [PATCH 1207/2138] mm/memory: ignore dirty/accessed/soft-dirty bits in folio_pte_batch() ANBZ: #9728 commit 25365e10699aa0e320345d019194fbea9f37a4ae upstream Let's always ignore the accessed/young bit: we'll always mark the PTE as old in our child process during fork, and upcoming users will similarly not care. Ignore the dirty bit only if we don't want to duplicate the dirty bit into the child process during fork. Maybe, we could just set all PTEs in the child dirty if any PTE is dirty. For now, let's keep the behavior unchanged, this can be optimized later if required. Ignore the soft-dirty bit only if the bit doesn't have any meaning in the src vma, and similarly won't have any in the copied dst vma. For now, we won't bother with the uffd-wp bit. Link: https://lkml.kernel.org/r/20240129124649.189745-15-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Ryan Roberts Cc: Albert Ou Cc: Alexander Gordeev Cc: Alexandre Ghiti Cc: Aneesh Kumar K.V Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christophe Leroy Cc: David S. Miller Cc: Dinh Nguyen Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Matthew Wilcox Cc: Michael Ellerman Cc: Naveen N. Rao Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Russell King (Oracle) Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Will Deacon Cc: Mike Rapoport (IBM) Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3781 --- mm/memory.c | 36 +++++++++++++++++++++++++++++++----- 1 file changed, 31 insertions(+), 5 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index f2114bf68982..311734f5ed90 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -942,24 +942,44 @@ static __always_inline void __copy_present_ptes(struct vm_area_struct *dst_vma, set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr); } +/* Flags for folio_pte_batch(). */ +typedef int __bitwise fpb_t; + +/* Compare PTEs after pte_mkclean(), ignoring the dirty bit. */ +#define FPB_IGNORE_DIRTY ((__force fpb_t)BIT(0)) + +/* Compare PTEs after pte_clear_soft_dirty(), ignoring the soft-dirty bit. */ +#define FPB_IGNORE_SOFT_DIRTY ((__force fpb_t)BIT(1)) + +static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags) +{ + if (flags & FPB_IGNORE_DIRTY) + pte = pte_mkclean(pte); + if (likely(flags & FPB_IGNORE_SOFT_DIRTY)) + pte = pte_clear_soft_dirty(pte); + return pte_mkold(pte); +} + /* * Detect a PTE batch: consecutive (present) PTEs that map consecutive * pages of the same folio. * - * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN. + * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN, + * the accessed bit, dirty bit (with FPB_IGNORE_DIRTY) and soft-dirty bit + * (with FPB_IGNORE_SOFT_DIRTY). */ static inline int folio_pte_batch(struct folio *folio, unsigned long addr, - pte_t *start_ptep, pte_t pte, int max_nr) + pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags) { unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio); const pte_t *end_ptep = start_ptep + max_nr; - pte_t expected_pte = pte_next_pfn(pte); + pte_t expected_pte = __pte_batch_clear_ignored(pte_next_pfn(pte), flags); pte_t *ptep = start_ptep + 1; VM_WARN_ON_FOLIO(!pte_present(pte), folio); while (ptep != end_ptep) { - pte = ptep_get(ptep); + pte = __pte_batch_clear_ignored(ptep_get(ptep), flags); if (!pte_same(pte, expected_pte)) break; @@ -993,6 +1013,7 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma { struct page *page; struct folio *folio; + fpb_t flags = 0; int err, nr; page = vm_normal_page(src_vma, addr, pte); @@ -1007,7 +1028,12 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma * by keeping the batching logic separate. */ if (unlikely(!*prealloc && folio_test_large(folio) && max_nr != 1)) { - nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr); + if (src_vma->vm_flags & VM_SHARED) + flags |= FPB_IGNORE_DIRTY; + if (!vma_soft_dirty_enabled(src_vma)) + flags |= FPB_IGNORE_SOFT_DIRTY; + + nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr, flags); folio_ref_add(folio, nr); if (folio_test_anon(folio)) { if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page, -- Gitee From 4cb3c318249b2509e7cac967cf8ff5ae43dbdaaa Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 29 Jan 2024 13:46:49 +0100 Subject: [PATCH 1208/2138] mm/memory: ignore writable bit in folio_pte_batch() ANBZ: #9728 commit d7c0e5f722ab229153c22efc836bf220479bdce6 upstream ... and conditionally return to the caller if any PTE except the first one is writable. fork() has to make sure to properly write-protect in case any PTE is writable. Other users (e.g., page unmaping) are expected to not care. Link: https://lkml.kernel.org/r/20240129124649.189745-16-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Ryan Roberts Cc: Albert Ou Cc: Alexander Gordeev Cc: Alexandre Ghiti Cc: Aneesh Kumar K.V Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christophe Leroy Cc: David S. Miller Cc: Dinh Nguyen Cc: Gerald Schaefer Cc: Heiko Carstens Cc: Matthew Wilcox Cc: Michael Ellerman Cc: Naveen N. Rao Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Russell King (Oracle) Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Will Deacon Cc: Mike Rapoport (IBM) Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3781 --- mm/memory.c | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 311734f5ed90..a4c790c8eb9e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -957,7 +957,7 @@ static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags) pte = pte_mkclean(pte); if (likely(flags & FPB_IGNORE_SOFT_DIRTY)) pte = pte_clear_soft_dirty(pte); - return pte_mkold(pte); + return pte_wrprotect(pte_mkold(pte)); } /* @@ -965,21 +965,32 @@ static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags) * pages of the same folio. * * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN, - * the accessed bit, dirty bit (with FPB_IGNORE_DIRTY) and soft-dirty bit - * (with FPB_IGNORE_SOFT_DIRTY). + * the accessed bit, writable bit, dirty bit (with FPB_IGNORE_DIRTY) and + * soft-dirty bit (with FPB_IGNORE_SOFT_DIRTY). + * + * If "any_writable" is set, it will indicate if any other PTE besides the + * first (given) PTE is writable. */ static inline int folio_pte_batch(struct folio *folio, unsigned long addr, - pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags) + pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags, + bool *any_writable) { unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio); const pte_t *end_ptep = start_ptep + max_nr; pte_t expected_pte = __pte_batch_clear_ignored(pte_next_pfn(pte), flags); pte_t *ptep = start_ptep + 1; + bool writable; + + if (any_writable) + *any_writable = false; VM_WARN_ON_FOLIO(!pte_present(pte), folio); while (ptep != end_ptep) { - pte = __pte_batch_clear_ignored(ptep_get(ptep), flags); + pte = ptep_get(ptep); + if (any_writable) + writable = !!pte_write(pte); + pte = __pte_batch_clear_ignored(pte, flags); if (!pte_same(pte, expected_pte)) break; @@ -992,6 +1003,9 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr, if (pte_pfn(pte) == folio_end_pfn) break; + if (any_writable) + *any_writable |= writable; + expected_pte = pte_next_pfn(expected_pte); ptep++; } @@ -1013,6 +1027,7 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma { struct page *page; struct folio *folio; + bool any_writable; fpb_t flags = 0; int err, nr; @@ -1033,7 +1048,8 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma if (!vma_soft_dirty_enabled(src_vma)) flags |= FPB_IGNORE_SOFT_DIRTY; - nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr, flags); + nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr, flags, + &any_writable); folio_ref_add(folio, nr); if (folio_test_anon(folio)) { if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page, @@ -1047,6 +1063,8 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma folio_dup_file_rmap_ptes(folio, page, nr); rss[mm_counter_file(page)] += nr; } + if (any_writable) + pte = pte_mkwrite(pte, src_vma); __copy_present_ptes(dst_vma, src_vma, dst_pte, src_pte, pte, addr, nr); return nr; -- Gitee From 92cdf8704cbd1ad8294a2e714a5786ab77e3f278 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Thu, 11 Jan 2024 15:24:27 +0000 Subject: [PATCH 1209/2138] mm: convert to should_zap_page() to should_zap_folio() ANBZ: #9728 commit eabafaaa957553142cdafc8ae804fb679e5a5f5e upstream Make should_zap_page() take a folio and rename it to should_zap_folio() as preparation for converting mm counter functions to take a folio. Saves a call to compound_head() hidden inside PageAnon(). [wangkefeng.wang@huawei.com: fix used-uninitialized warning] Link: https://lkml.kernel.org/r/962a7993-fce9-4de8-85cd-25e290f25736@huawei.com Link: https://lkml.kernel.org/r/20240111152429.3374566-9-willy@infradead.org Signed-off-by: Kefeng Wang Signed-off-by: Matthew Wilcox (Oracle) Cc: David Hildenbrand Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3789 --- mm/memory.c | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index a4c790c8eb9e..edd88d50e45f 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1480,19 +1480,20 @@ static inline bool should_zap_cows(struct zap_details *details) return details->even_cows; } -/* Decides whether we should zap this page with the page pointer specified */ -static inline bool should_zap_page(struct zap_details *details, struct page *page) +/* Decides whether we should zap this folio with the folio pointer specified */ +static inline bool should_zap_folio(struct zap_details *details, + struct folio *folio) { - /* If we can make a decision without *page.. */ + /* If we can make a decision without *folio.. */ if (should_zap_cows(details)) return true; - /* E.g. the caller passes NULL for the case of a zero page */ - if (!page) + /* E.g. the caller passes NULL for the case of a zero folio */ + if (!folio) return true; - /* Otherwise we should only zap non-anon pages */ - return !PageAnon(page); + /* Otherwise we should only zap non-anon folios */ + return !folio_test_anon(folio); } static inline bool zap_drop_file_uffd_wp(struct zap_details *details) @@ -1545,7 +1546,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, arch_enter_lazy_mmu_mode(); do { pte_t ptent = ptep_get(pte); - struct folio *folio; + struct folio *folio = NULL; struct page *page; if (pte_none(ptent)) @@ -1558,7 +1559,10 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, unsigned int delay_rmap; page = vm_normal_page(vma, addr, ptent); - if (unlikely(!should_zap_page(details, page))) + if (page) + folio = page_folio(page); + + if (unlikely(!should_zap_folio(details, folio))) continue; ptent = ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm); @@ -1571,7 +1575,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, continue; } - folio = page_folio(page); delay_rmap = 0; if (!folio_test_anon(folio)) { if (pte_dirty(ptent)) { @@ -1603,7 +1606,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, is_device_exclusive_entry(entry)) { page = pfn_swap_entry_to_page(entry); folio = page_folio(page); - if (unlikely(!should_zap_page(details, page))) + if (unlikely(!should_zap_folio(details, folio))) continue; /* * Both device private/exclusive mappings should only @@ -1625,9 +1628,10 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, print_bad_pte(vma, addr, ptent, NULL); } else if (is_migration_entry(entry)) { page = pfn_swap_entry_to_page(entry); - if (!should_zap_page(details, page)) + folio = page_folio(page); + if (!should_zap_folio(details, folio)) continue; - rss[mm_counter(page)]--; + rss[mm_counter(&folio->page)]--; } else if (pte_marker_entry_uffd_wp(entry)) { /* * For anon: always drop the marker; for file: only -- Gitee From 2629c038bdfc650036aa18ffce7e05b91bbd64f4 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 14 Feb 2024 21:44:26 +0100 Subject: [PATCH 1210/2138] mm/memory: factor out zapping of present pte into zap_present_pte() ANBZ: #9728 commit 789753e17c4d6593932f07e40b740373123296a6 upstream Patch series "mm/memory: optimize unmap/zap with PTE-mapped THP", v3. This series is based on [1]. Similar to what we did with fork(), let's implement PTE batching during unmap/zap when processing PTE-mapped THPs. We collect consecutive PTEs that map consecutive pages of the same large folio, making sure that the other PTE bits are compatible, and (a) adjust the refcount only once per batch, (b) call rmap handling functions only once per batch, (c) perform batch PTE setting/updates and (d) perform TLB entry removal once per batch. Ryan was previously working on this in the context of cont-pte for arm64, int latest iteration [2] with a focus on arm6 with cont-pte only. This series implements the optimization for all architectures, independent of such PTE bits, teaches MMU gather/TLB code to be fully aware of such large-folio-pages batches as well, and amkes use of our new rmap batching function when removing the rmap. To achieve that, we have to enlighten MMU gather / page freeing code (i.e., everything that consumes encoded_page) to process unmapping of consecutive pages that all belong to the same large folio. I'm being very careful to not degrade order-0 performance, and it looks like I managed to achieve that. While this series should -- similar to [1] -- be beneficial for adding cont-pte support on arm64[2], it's one of the requirements for maintaining a total mapcount[3] for large folios with minimal added overhead and further changes[4] that build up on top of the total mapcount. Independent of all that, this series results in a speedup during munmap() and similar unmapping (process teardown, MADV_DONTNEED on larger ranges) with PTE-mapped THP, which is the default with THPs that are smaller than a PMD (for example, 16KiB to 1024KiB mTHPs for anonymous memory[5]). On an Intel Xeon Silver 4210R CPU, munmap'ing a 1GiB VMA backed by PTE-mapped folios of the same size (stddev < 1%) results in the following runtimes for munmap() in seconds (shorter is better): Folio Size | mm-unstable | New | Change --------------------------------------------- 4KiB | 0.058110 | 0.057715 | - 1% 16KiB | 0.044198 | 0.035469 | -20% 32KiB | 0.034216 | 0.023522 | -31% 64KiB | 0.029207 | 0.018434 | -37% 128KiB | 0.026579 | 0.014026 | -47% 256KiB | 0.025130 | 0.011756 | -53% 512KiB | 0.024292 | 0.010703 | -56% 1024KiB | 0.023812 | 0.010294 | -57% 2048KiB | 0.023785 | 0.009910 | -58% [1] https://lkml.kernel.org/r/20240129124649.189745-1-david@redhat.com [2] https://lkml.kernel.org/r/20231218105100.172635-1-ryan.roberts@arm.com [3] https://lkml.kernel.org/r/20230809083256.699513-1-david@redhat.com [4] https://lkml.kernel.org/r/20231124132626.235350-1-david@redhat.com [5] https://lkml.kernel.org/r/20231207161211.2374093-1-ryan.roberts@arm.com This patch (of 10): Let's prepare for further changes by factoring out processing of present PTEs. Link: https://lkml.kernel.org/r/20240214204435.167852-1-david@redhat.com Link: https://lkml.kernel.org/r/20240214204435.167852-2-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Ryan Roberts Cc: Alexander Gordeev Cc: Aneesh Kumar K.V Cc: Arnd Bergmann Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christophe Leroy Cc: Heiko Carstens Cc: linuxppc-dev@lists.ozlabs.org Cc: Matthew Wilcox (Oracle) Cc: Michael Ellerman Cc: Michal Hocko Cc: "Naveen N. Rao" Cc: Nicholas Piggin Cc: Peter Zijlstra (Intel) Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Will Deacon Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3789 --- mm/memory.c | 94 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 53 insertions(+), 41 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index edd88d50e45f..3ae7a120cc72 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1523,13 +1523,61 @@ zap_install_uffd_wp_if_needed(struct vm_area_struct *vma, pte_install_uffd_wp_if_needed(vma, addr, pte, pteval); } +static inline void zap_present_pte(struct mmu_gather *tlb, + struct vm_area_struct *vma, pte_t *pte, pte_t ptent, + unsigned long addr, struct zap_details *details, + int *rss, bool *force_flush, bool *force_break) +{ + struct mm_struct *mm = tlb->mm; + struct folio *folio = NULL; + bool delay_rmap = false; + struct page *page; + + page = vm_normal_page(vma, addr, ptent); + if (page) + folio = page_folio(page); + + if (unlikely(!should_zap_folio(details, folio))) + return; + ptent = ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm); + arch_check_zapped_pte(vma, ptent); + tlb_remove_tlb_entry(tlb, pte, addr); + zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent); + if (unlikely(!page)) { + ksm_might_unmap_zero_page(mm, ptent); + return; + } + + if (!folio_test_anon(folio)) { + if (pte_dirty(ptent)) { + folio_mark_dirty(folio); + if (tlb_delay_rmap(tlb)) { + delay_rmap = true; + *force_flush = true; + } + } + if (pte_young(ptent) && likely(vma_has_recency(vma))) + folio_mark_accessed(folio); + } + rss[mm_counter(page)]--; + if (!delay_rmap) { + folio_remove_rmap_pte(folio, page, vma); + if (unlikely(page_mapcount(page) < 0)) + print_bad_pte(vma, addr, ptent, page); + } + if (unlikely(__tlb_remove_page(tlb, page, delay_rmap))) { + *force_flush = true; + *force_break = true; + } +} + static unsigned long zap_pte_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, struct zap_details *details) { + bool force_flush = false, force_break = false; struct mm_struct *mm = tlb->mm; - int force_flush = 0; int rss[NR_MM_COUNTERS]; spinlock_t *ptl; pte_t *start_pte; @@ -1546,7 +1594,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, arch_enter_lazy_mmu_mode(); do { pte_t ptent = ptep_get(pte); - struct folio *folio = NULL; + struct folio *folio; struct page *page; if (pte_none(ptent)) @@ -1556,45 +1604,9 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, break; if (pte_present(ptent)) { - unsigned int delay_rmap; - - page = vm_normal_page(vma, addr, ptent); - if (page) - folio = page_folio(page); - - if (unlikely(!should_zap_folio(details, folio))) - continue; - ptent = ptep_get_and_clear_full(mm, addr, pte, - tlb->fullmm); - arch_check_zapped_pte(vma, ptent); - tlb_remove_tlb_entry(tlb, pte, addr); - zap_install_uffd_wp_if_needed(vma, addr, pte, details, - ptent); - if (unlikely(!page)) { - ksm_might_unmap_zero_page(mm, ptent); - continue; - } - - delay_rmap = 0; - if (!folio_test_anon(folio)) { - if (pte_dirty(ptent)) { - folio_mark_dirty(folio); - if (tlb_delay_rmap(tlb)) { - delay_rmap = 1; - force_flush = 1; - } - } - if (pte_young(ptent) && likely(vma_has_recency(vma))) - folio_mark_accessed(folio); - } - rss[mm_counter(page)]--; - if (!delay_rmap) { - folio_remove_rmap_pte(folio, page, vma); - if (unlikely(page_mapcount(page) < 0)) - print_bad_pte(vma, addr, ptent, page); - } - if (unlikely(__tlb_remove_page(tlb, page, delay_rmap))) { - force_flush = 1; + zap_present_pte(tlb, vma, pte, ptent, addr, details, + rss, &force_flush, &force_break); + if (unlikely(force_break)) { addr += PAGE_SIZE; break; } -- Gitee From b8337b60ddc4ebf7b0d3f56283ba85b87503666b Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 14 Feb 2024 21:44:27 +0100 Subject: [PATCH 1211/2138] mm/memory: handle !page case in zap_present_pte() separately ANBZ: #9728 commit 0cf18e839f64fff9a58569cc9a596bf97310e044 upstream We don't need uptodate accessed/dirty bits, so in theory we could replace ptep_get_and_clear_full() by an optimized ptep_clear_full() function. Let's rely on the provided pte. Further, there is no scenario where we would have to insert uffd-wp markers when zapping something that is not a normal page (i.e., zeropage). Add a sanity check to make sure this remains true. should_zap_folio() no longer has to handle NULL pointers. This change replaces 2/3 "!page/!folio" checks by a single "!page" one. Note that arch_check_zapped_pte() on x86-64 checks the HW-dirty bit to detect shadow stack entries. But for shadow stack entries, the HW dirty bit (in combination with non-writable PTEs) is set by software. So for the arch_check_zapped_pte() check, we don't have to sync against HW setting the HW dirty bit concurrently, it is always set. Link: https://lkml.kernel.org/r/20240214204435.167852-3-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Ryan Roberts Cc: Alexander Gordeev Cc: Aneesh Kumar K.V Cc: Arnd Bergmann Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christophe Leroy Cc: Heiko Carstens Cc: Matthew Wilcox (Oracle) Cc: Michael Ellerman Cc: Michal Hocko Cc: "Naveen N. Rao" Cc: Nicholas Piggin Cc: Peter Zijlstra (Intel) Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Will Deacon Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3789 --- mm/memory.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 3ae7a120cc72..338d743fa81f 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1488,10 +1488,6 @@ static inline bool should_zap_folio(struct zap_details *details, if (should_zap_cows(details)) return true; - /* E.g. the caller passes NULL for the case of a zero folio */ - if (!folio) - return true; - /* Otherwise we should only zap non-anon folios */ return !folio_test_anon(folio); } @@ -1529,24 +1525,28 @@ static inline void zap_present_pte(struct mmu_gather *tlb, int *rss, bool *force_flush, bool *force_break) { struct mm_struct *mm = tlb->mm; - struct folio *folio = NULL; bool delay_rmap = false; + struct folio *folio; struct page *page; page = vm_normal_page(vma, addr, ptent); - if (page) - folio = page_folio(page); + if (!page) { + /* We don't need up-to-date accessed/dirty bits. */ + ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm); + arch_check_zapped_pte(vma, ptent); + tlb_remove_tlb_entry(tlb, pte, addr); + VM_WARN_ON_ONCE(userfaultfd_wp(vma)); + ksm_might_unmap_zero_page(mm, ptent); + return; + } + folio = page_folio(page); if (unlikely(!should_zap_folio(details, folio))) return; ptent = ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm); arch_check_zapped_pte(vma, ptent); tlb_remove_tlb_entry(tlb, pte, addr); zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent); - if (unlikely(!page)) { - ksm_might_unmap_zero_page(mm, ptent); - return; - } if (!folio_test_anon(folio)) { if (pte_dirty(ptent)) { -- Gitee From adf497b5ef88f245f28eb35c17954afee2a6e3aa Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 14 Feb 2024 21:44:28 +0100 Subject: [PATCH 1212/2138] mm/memory: further separate anon and pagecache folio handling in zap_present_pte() ANBZ: #9728 commit d11838ed63ee842fc9ef335b9f3aee3aa26f2ab5 upstream We don't need up-to-date accessed-dirty information for anon folios and can simply work with the ptent we already have. Also, we know the RSS counter we want to update. We can safely move arch_check_zapped_pte() + tlb_remove_tlb_entry() + zap_install_uffd_wp_if_needed() after updating the folio and RSS. While at it, only call zap_install_uffd_wp_if_needed() if there is even any chance that pte_install_uffd_wp_if_needed() would do *something*. That is, just don't bother if uffd-wp does not apply. Link: https://lkml.kernel.org/r/20240214204435.167852-4-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Ryan Roberts Cc: Alexander Gordeev Cc: Aneesh Kumar K.V Cc: Arnd Bergmann Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christophe Leroy Cc: Heiko Carstens Cc: Matthew Wilcox (Oracle) Cc: Michael Ellerman Cc: Michal Hocko Cc: "Naveen N. Rao" Cc: Nicholas Piggin Cc: Peter Zijlstra (Intel) Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Will Deacon Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3789 --- mm/memory.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 338d743fa81f..73ba64f5d665 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1543,12 +1543,9 @@ static inline void zap_present_pte(struct mmu_gather *tlb, folio = page_folio(page); if (unlikely(!should_zap_folio(details, folio))) return; - ptent = ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm); - arch_check_zapped_pte(vma, ptent); - tlb_remove_tlb_entry(tlb, pte, addr); - zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent); if (!folio_test_anon(folio)) { + ptent = ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm); if (pte_dirty(ptent)) { folio_mark_dirty(folio); if (tlb_delay_rmap(tlb)) { @@ -1558,8 +1555,17 @@ static inline void zap_present_pte(struct mmu_gather *tlb, } if (pte_young(ptent) && likely(vma_has_recency(vma))) folio_mark_accessed(folio); + rss[mm_counter(page)]--; + } else { + /* We don't need up-to-date accessed/dirty bits. */ + ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm); + rss[MM_ANONPAGES]--; } - rss[mm_counter(page)]--; + arch_check_zapped_pte(vma, ptent); + tlb_remove_tlb_entry(tlb, pte, addr); + if (unlikely(userfaultfd_pte_wp(vma, ptent))) + zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent); + if (!delay_rmap) { folio_remove_rmap_pte(folio, page, vma); if (unlikely(page_mapcount(page) < 0)) -- Gitee From 67ed65c946fb5fcfc39f8310fbae74843c014350 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 14 Feb 2024 21:44:29 +0100 Subject: [PATCH 1213/2138] mm/memory: factor out zapping folio pte into zap_present_folio_pte() ANBZ: #9728 commit 2b42a7e531509577bd822aece610cd6d0dbf0dd7 upstream Let's prepare for further changes by factoring it out into a separate function. Link: https://lkml.kernel.org/r/20240214204435.167852-5-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Ryan Roberts Cc: Alexander Gordeev Cc: Aneesh Kumar K.V Cc: Arnd Bergmann Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christophe Leroy Cc: Heiko Carstens Cc: Matthew Wilcox (Oracle) Cc: Michael Ellerman Cc: Michal Hocko Cc: "Naveen N. Rao" Cc: Nicholas Piggin Cc: Peter Zijlstra (Intel) Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Will Deacon Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3789 --- mm/memory.c | 53 ++++++++++++++++++++++++++++++++--------------------- 1 file changed, 32 insertions(+), 21 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 73ba64f5d665..7509ce1fb4da 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1519,30 +1519,14 @@ zap_install_uffd_wp_if_needed(struct vm_area_struct *vma, pte_install_uffd_wp_if_needed(vma, addr, pte, pteval); } -static inline void zap_present_pte(struct mmu_gather *tlb, - struct vm_area_struct *vma, pte_t *pte, pte_t ptent, - unsigned long addr, struct zap_details *details, - int *rss, bool *force_flush, bool *force_break) +static inline void zap_present_folio_pte(struct mmu_gather *tlb, + struct vm_area_struct *vma, struct folio *folio, + struct page *page, pte_t *pte, pte_t ptent, unsigned long addr, + struct zap_details *details, int *rss, bool *force_flush, + bool *force_break) { struct mm_struct *mm = tlb->mm; bool delay_rmap = false; - struct folio *folio; - struct page *page; - - page = vm_normal_page(vma, addr, ptent); - if (!page) { - /* We don't need up-to-date accessed/dirty bits. */ - ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm); - arch_check_zapped_pte(vma, ptent); - tlb_remove_tlb_entry(tlb, pte, addr); - VM_WARN_ON_ONCE(userfaultfd_wp(vma)); - ksm_might_unmap_zero_page(mm, ptent); - return; - } - - folio = page_folio(page); - if (unlikely(!should_zap_folio(details, folio))) - return; if (!folio_test_anon(folio)) { ptent = ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm); @@ -1577,6 +1561,33 @@ static inline void zap_present_pte(struct mmu_gather *tlb, } } +static inline void zap_present_pte(struct mmu_gather *tlb, + struct vm_area_struct *vma, pte_t *pte, pte_t ptent, + unsigned long addr, struct zap_details *details, + int *rss, bool *force_flush, bool *force_break) +{ + struct mm_struct *mm = tlb->mm; + struct folio *folio; + struct page *page; + + page = vm_normal_page(vma, addr, ptent); + if (!page) { + /* We don't need up-to-date accessed/dirty bits. */ + ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm); + arch_check_zapped_pte(vma, ptent); + tlb_remove_tlb_entry(tlb, pte, addr); + VM_WARN_ON_ONCE(userfaultfd_wp(vma)); + ksm_might_unmap_zero_page(mm, ptent); + return; + } + + folio = page_folio(page); + if (unlikely(!should_zap_folio(details, folio))) + return; + zap_present_folio_pte(tlb, vma, folio, page, pte, ptent, addr, details, + rss, force_flush, force_break); +} + static unsigned long zap_pte_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, -- Gitee From 43646bc11488772c5948d1dc06482331c08a975c Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 14 Feb 2024 21:44:30 +0100 Subject: [PATCH 1214/2138] mm/mmu_gather: pass "delay_rmap" instead of encoded page to __tlb_remove_page_size() ANBZ: #9728 commit c30d6bc8d0153630e600e8f67ba88c670d9e1b0c upstream We have two bits available in the encoded page pointer to store additional information. Currently, we use one bit to request delay of the rmap removal until after a TLB flush. We want to make use of the remaining bit internally for batching of multiple pages of the same folio, specifying that the next encoded page pointer in an array is actually "nr_pages". So pass page + delay_rmap flag instead of an encoded page, to handle the encoding internally. Link: https://lkml.kernel.org/r/20240214204435.167852-6-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Ryan Roberts Cc: Alexander Gordeev Cc: Aneesh Kumar K.V Cc: Arnd Bergmann Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christophe Leroy Cc: Heiko Carstens Cc: Matthew Wilcox (Oracle) Cc: Michael Ellerman Cc: Michal Hocko Cc: "Naveen N. Rao" Cc: Nicholas Piggin Cc: Peter Zijlstra (Intel) Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Will Deacon Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3789 --- arch/s390/include/asm/tlb.h | 13 ++++++------- include/asm-generic/tlb.h | 12 ++++++------ mm/mmu_gather.c | 7 ++++--- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index 383b1f91442c..1eb1df478e0c 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -25,8 +25,7 @@ void __tlb_remove_table(void *_table); static inline void tlb_flush(struct mmu_gather *tlb); static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, - struct encoded_page *page, - int page_size); + struct page *page, bool delay_rmap, int page_size); #define tlb_flush tlb_flush #define pte_free_tlb pte_free_tlb @@ -42,14 +41,14 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page * has already been freed, so just do free_page_and_swap_cache. * - * s390 doesn't delay rmap removal, so there is nothing encoded in - * the page pointer. + * s390 doesn't delay rmap removal. */ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, - struct encoded_page *page, - int page_size) + struct page *page, bool delay_rmap, int page_size) { - free_page_and_swap_cache(encoded_page_ptr(page)); + VM_WARN_ON_ONCE(delay_rmap); + + free_page_and_swap_cache(page); return false; } diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 129a3a759976..2eb7b0d4f5d2 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -260,9 +260,8 @@ struct mmu_gather_batch { */ #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) -extern bool __tlb_remove_page_size(struct mmu_gather *tlb, - struct encoded_page *page, - int page_size); +extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, + bool delay_rmap, int page_size); #ifdef CONFIG_SMP /* @@ -462,13 +461,14 @@ static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) static inline void tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) { - if (__tlb_remove_page_size(tlb, encode_page(page, 0), page_size)) + if (__tlb_remove_page_size(tlb, page, false, page_size)) tlb_flush_mmu(tlb); } -static __always_inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page, unsigned int flags) +static __always_inline bool __tlb_remove_page(struct mmu_gather *tlb, + struct page *page, bool delay_rmap) { - return __tlb_remove_page_size(tlb, encode_page(page, flags), PAGE_SIZE); + return __tlb_remove_page_size(tlb, page, delay_rmap, PAGE_SIZE); } /* tlb_remove_page diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index 604ddf08affe..ac733d81b112 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -116,7 +116,8 @@ static void tlb_batch_list_free(struct mmu_gather *tlb) tlb->local.next = NULL; } -bool __tlb_remove_page_size(struct mmu_gather *tlb, struct encoded_page *page, int page_size) +bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, + bool delay_rmap, int page_size) { struct mmu_gather_batch *batch; @@ -131,13 +132,13 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct encoded_page *page, i * Add the page and check if we are full. If so * force a flush. */ - batch->encoded_pages[batch->nr++] = page; + batch->encoded_pages[batch->nr++] = encode_page(page, delay_rmap); if (batch->nr == batch->max) { if (!tlb_next_batch(tlb)) return true; batch = tlb->active; } - VM_BUG_ON_PAGE(batch->nr > batch->max, encoded_page_ptr(page)); + VM_BUG_ON_PAGE(batch->nr > batch->max, page); return false; } -- Gitee From d223f1d48704913d9fc05c511aed87fcc68d35b3 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 14 Feb 2024 21:44:31 +0100 Subject: [PATCH 1215/2138] mm/mmu_gather: define ENCODED_PAGE_FLAG_DELAY_RMAP ANBZ: #9728 commit da510964c095cb5e070800ef38752c453d2aa71d upstream Nowadays, encoded pages are only used in mmu_gather handling. Let's update the documentation, and define ENCODED_PAGE_BIT_DELAY_RMAP. While at it, rename ENCODE_PAGE_BITS to ENCODED_PAGE_BITS. If encoded page pointers would ever be used in other context again, we'd likely want to change the defines to reflect their context (e.g., ENCODED_PAGE_FLAG_MMU_GATHER_DELAY_RMAP). For now, let's keep it simple. This is a preparation for using the remaining spare bit to indicate that the next item in an array of encoded pages is a "nr_pages" argument and not an encoded page. Link: https://lkml.kernel.org/r/20240214204435.167852-7-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Ryan Roberts Cc: Alexander Gordeev Cc: Aneesh Kumar K.V Cc: Arnd Bergmann Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christophe Leroy Cc: Heiko Carstens Cc: Matthew Wilcox (Oracle) Cc: Michael Ellerman Cc: Michal Hocko Cc: "Naveen N. Rao" Cc: Nicholas Piggin Cc: Peter Zijlstra (Intel) Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Will Deacon Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3789 --- include/linux/mm_types.h | 17 +++++++++++------ mm/mmu_gather.c | 5 +++-- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index d11645aca1ba..bb4f507df475 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -221,8 +221,8 @@ struct page { * * An 'encoded_page' pointer is a pointer to a regular 'struct page', but * with the low bits of the pointer indicating extra context-dependent - * information. Not super-common, but happens in mmu_gather and mlock - * handling, and this acts as a type system check on that use. + * information. Only used in mmu_gather handling, and this acts as a type + * system check on that use. * * We only really have two guaranteed bits in general, although you could * play with 'struct page' alignment (see CONFIG_HAVE_ALIGNED_STRUCT_PAGE) @@ -231,21 +231,26 @@ struct page { * Use the supplied helper functions to endcode/decode the pointer and bits. */ struct encoded_page; -#define ENCODE_PAGE_BITS 3ul + +#define ENCODED_PAGE_BITS 3ul + +/* Perform rmap removal after we have flushed the TLB. */ +#define ENCODED_PAGE_BIT_DELAY_RMAP 1ul + static __always_inline struct encoded_page *encode_page(struct page *page, unsigned long flags) { - BUILD_BUG_ON(flags > ENCODE_PAGE_BITS); + BUILD_BUG_ON(flags > ENCODED_PAGE_BITS); return (struct encoded_page *)(flags | (unsigned long)page); } static inline unsigned long encoded_page_flags(struct encoded_page *page) { - return ENCODE_PAGE_BITS & (unsigned long)page; + return ENCODED_PAGE_BITS & (unsigned long)page; } static inline struct page *encoded_page_ptr(struct encoded_page *page) { - return (struct page *)(~ENCODE_PAGE_BITS & (unsigned long)page); + return (struct page *)(~ENCODED_PAGE_BITS & (unsigned long)page); } /* diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index ac733d81b112..6540c99c6758 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -53,7 +53,7 @@ static void tlb_flush_rmap_batch(struct mmu_gather_batch *batch, struct vm_area_ for (int i = 0; i < batch->nr; i++) { struct encoded_page *enc = batch->encoded_pages[i]; - if (encoded_page_flags(enc)) { + if (encoded_page_flags(enc) & ENCODED_PAGE_BIT_DELAY_RMAP) { struct page *page = encoded_page_ptr(enc); folio_remove_rmap_pte(page_folio(page), page, vma); } @@ -119,6 +119,7 @@ static void tlb_batch_list_free(struct mmu_gather *tlb) bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, bool delay_rmap, int page_size) { + int flags = delay_rmap ? ENCODED_PAGE_BIT_DELAY_RMAP : 0; struct mmu_gather_batch *batch; VM_BUG_ON(!tlb->end); @@ -132,7 +133,7 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, * Add the page and check if we are full. If so * force a flush. */ - batch->encoded_pages[batch->nr++] = encode_page(page, delay_rmap); + batch->encoded_pages[batch->nr++] = encode_page(page, flags); if (batch->nr == batch->max) { if (!tlb_next_batch(tlb)) return true; -- Gitee From 43e4e851e096407ec22228a3ac1e949e236e77a7 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 14 Feb 2024 21:44:32 +0100 Subject: [PATCH 1216/2138] mm/mmu_gather: add tlb_remove_tlb_entries() ANBZ: #9728 commit 4d5bf0b6183f79ea361dd506365d2a471270735c upstream Let's add a helper that lets us batch-process multiple consecutive PTEs. Note that the loop will get optimized out on all architectures except on powerpc. We have to add an early define of __tlb_remove_tlb_entry() on ppc to make the compiler happy (and avoid making tlb_remove_tlb_entries() a macro). [arnd@kernel.org: change __tlb_remove_tlb_entry() to an inline function] Link: https://lkml.kernel.org/r/20240221154549.2026073-1-arnd@kernel.org Link: https://lkml.kernel.org/r/20240214204435.167852-8-david@redhat.com Signed-off-by: David Hildenbrand Signed-off-by: Arnd Bergmann Reviewed-by: Ryan Roberts Cc: Alexander Gordeev Cc: Aneesh Kumar K.V Cc: Arnd Bergmann Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christophe Leroy Cc: Heiko Carstens Cc: Matthew Wilcox (Oracle) Cc: Michael Ellerman Cc: Michal Hocko Cc: "Naveen N. Rao" Cc: Nicholas Piggin Cc: Peter Zijlstra (Intel) Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Will Deacon Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3789 --- arch/powerpc/include/asm/tlb.h | 2 ++ include/asm-generic/tlb.h | 24 +++++++++++++++++++++++- 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h index b3de6102a907..1ca7d4c4b90d 100644 --- a/arch/powerpc/include/asm/tlb.h +++ b/arch/powerpc/include/asm/tlb.h @@ -19,6 +19,8 @@ #include +static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, + unsigned long address); #define __tlb_remove_tlb_entry __tlb_remove_tlb_entry #define tlb_flush tlb_flush diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 2eb7b0d4f5d2..127a8230a40a 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -592,7 +592,9 @@ static inline void tlb_flush_p4d_range(struct mmu_gather *tlb, } #ifndef __tlb_remove_tlb_entry -#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) +static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address) +{ +} #endif /** @@ -608,6 +610,26 @@ static inline void tlb_flush_p4d_range(struct mmu_gather *tlb, __tlb_remove_tlb_entry(tlb, ptep, address); \ } while (0) +/** + * tlb_remove_tlb_entries - remember unmapping of multiple consecutive ptes for + * later tlb invalidation. + * + * Similar to tlb_remove_tlb_entry(), but remember unmapping of multiple + * consecutive ptes instead of only a single one. + */ +static inline void tlb_remove_tlb_entries(struct mmu_gather *tlb, + pte_t *ptep, unsigned int nr, unsigned long address) +{ + tlb_flush_pte_range(tlb, address, PAGE_SIZE * nr); + for (;;) { + __tlb_remove_tlb_entry(tlb, ptep, address); + if (--nr == 0) + break; + ptep++; + address += PAGE_SIZE; + } +} + #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ do { \ unsigned long _sz = huge_page_size(h); \ -- Gitee From 1becd4107feab2dbcf6a7965fc3da08cc7519d5d Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 14 Feb 2024 21:44:33 +0100 Subject: [PATCH 1217/2138] mm/mmu_gather: add __tlb_remove_folio_pages() ANBZ: #9728 commit d7f861b9c43aadbe384ab1382d2e76750bedc91e upstream Add __tlb_remove_folio_pages(), which will remove multiple consecutive pages that belong to the same large folio, instead of only a single page. We'll be using this function when optimizing unmapping/zapping of large folios that are mapped by PTEs. We're using the remaining spare bit in an encoded_page to indicate that the next enoced page in an array contains actually shifted "nr_pages". Teach swap/freeing code about putting multiple folio references, and delayed rmap handling to remove page ranges of a folio. This extension allows for still gathering almost as many small folios as we used to (-1, because we have to prepare for a possibly bigger next entry), but still allows for gathering consecutive pages that belong to the same large folio. Note that we don't pass the folio pointer, because it is not required for now. Further, we don't support page_size != PAGE_SIZE, it won't be required for simple PTE batching. We have to provide a separate s390 implementation, but it's fairly straight forward. Another, more invasive and likely more expensive, approach would be to use folio+range or a PFN range instead of page+nr_pages. But, we should do that consistently for the whole mmu_gather. For now, let's keep it simple and add "nr_pages" only. Note that it is now possible to gather significantly more pages: In the past, we were able to gather ~10000 pages, now we can also gather ~5000 folio fragments that span multiple pages. A folio fragment on x86-64 can span up to 512 pages (2 MiB THP) and on arm64 with 64k in theory 8192 pages (512 MiB THP). Gathering more memory is not considered something we should worry about, especially because these are already corner cases. While we can gather more total memory, we won't free more folio fragments. As long as page freeing time primarily only depends on the number of involved folios, there is no effective change for !preempt configurations. However, we'll adjust tlb_batch_pages_flush() separately to handle corner cases where page freeing time grows proportionally with the actual memory size. Link: https://lkml.kernel.org/r/20240214204435.167852-9-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Ryan Roberts Cc: Alexander Gordeev Cc: Aneesh Kumar K.V Cc: Arnd Bergmann Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christophe Leroy Cc: Heiko Carstens Cc: Matthew Wilcox (Oracle) Cc: Michael Ellerman Cc: Michal Hocko Cc: "Naveen N. Rao" Cc: Nicholas Piggin Cc: Peter Zijlstra (Intel) Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Will Deacon Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3789 --- arch/s390/include/asm/tlb.h | 17 +++++++++++ include/asm-generic/tlb.h | 8 +++++ include/linux/mm_types.h | 20 ++++++++++++ mm/mmu_gather.c | 61 +++++++++++++++++++++++++++++++------ mm/swap.c | 12 ++++++-- mm/swap_state.c | 15 +++++++-- 6 files changed, 119 insertions(+), 14 deletions(-) diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index 1eb1df478e0c..b76c8f028bad 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -26,6 +26,8 @@ void __tlb_remove_table(void *_table); static inline void tlb_flush(struct mmu_gather *tlb); static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, bool delay_rmap, int page_size); +static inline bool __tlb_remove_folio_pages(struct mmu_gather *tlb, + struct page *page, unsigned int nr_pages, bool delay_rmap); #define tlb_flush tlb_flush #define pte_free_tlb pte_free_tlb @@ -52,6 +54,21 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, return false; } +static inline bool __tlb_remove_folio_pages(struct mmu_gather *tlb, + struct page *page, unsigned int nr_pages, bool delay_rmap) +{ + struct encoded_page *encoded_pages[] = { + encode_page(page, ENCODED_PAGE_BIT_NR_PAGES_NEXT), + encode_nr_pages(nr_pages), + }; + + VM_WARN_ON_ONCE(delay_rmap); + VM_WARN_ON_ONCE(page_folio(page) != page_folio(page + nr_pages - 1)); + + free_pages_and_swap_cache(encoded_pages, ARRAY_SIZE(encoded_pages)); + return false; +} + static inline void tlb_flush(struct mmu_gather *tlb) { __tlb_flush_mm_lazy(tlb->mm); diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 127a8230a40a..709830274b75 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -69,6 +69,7 @@ * * - tlb_remove_page() / __tlb_remove_page() * - tlb_remove_page_size() / __tlb_remove_page_size() + * - __tlb_remove_folio_pages() * * __tlb_remove_page_size() is the basic primitive that queues a page for * freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a @@ -78,6 +79,11 @@ * tlb_remove_page() and tlb_remove_page_size() imply the call to * tlb_flush_mmu() when required and has no return value. * + * __tlb_remove_folio_pages() is similar to __tlb_remove_page(), however, + * instead of removing a single page, remove the given number of consecutive + * pages that are all part of the same (large) folio: just like calling + * __tlb_remove_page() on each page individually. + * * - tlb_change_page_size() * * call before __tlb_remove_page*() to set the current page-size; implies a @@ -262,6 +268,8 @@ struct mmu_gather_batch { extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, bool delay_rmap, int page_size); +bool __tlb_remove_folio_pages(struct mmu_gather *tlb, struct page *page, + unsigned int nr_pages, bool delay_rmap); #ifdef CONFIG_SMP /* diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index bb4f507df475..52be7d14012b 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -237,6 +237,15 @@ struct encoded_page; /* Perform rmap removal after we have flushed the TLB. */ #define ENCODED_PAGE_BIT_DELAY_RMAP 1ul +/* + * The next item in an encoded_page array is the "nr_pages" argument, specifying + * the number of consecutive pages starting from this page, that all belong to + * the same folio. For example, "nr_pages" corresponds to the number of folio + * references that must be dropped. If this bit is not set, "nr_pages" is + * implicitly 1. + */ +#define ENCODED_PAGE_BIT_NR_PAGES_NEXT 2ul + static __always_inline struct encoded_page *encode_page(struct page *page, unsigned long flags) { BUILD_BUG_ON(flags > ENCODED_PAGE_BITS); @@ -253,6 +262,17 @@ static inline struct page *encoded_page_ptr(struct encoded_page *page) return (struct page *)(~ENCODED_PAGE_BITS & (unsigned long)page); } +static __always_inline struct encoded_page *encode_nr_pages(unsigned long nr) +{ + VM_WARN_ON_ONCE((nr << 2) >> 2 != nr); + return (struct encoded_page *)(nr << 2); +} + +static __always_inline unsigned long encoded_nr_pages(struct encoded_page *page) +{ + return ((unsigned long)page) >> 2; +} + /* * A swap entry has to fit into a "unsigned long", as the entry is hidden * in the "index" field of the swapper address space. diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index 6540c99c6758..d175c0f1e2c8 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -50,12 +50,21 @@ static bool tlb_next_batch(struct mmu_gather *tlb) #ifdef CONFIG_SMP static void tlb_flush_rmap_batch(struct mmu_gather_batch *batch, struct vm_area_struct *vma) { + struct encoded_page **pages = batch->encoded_pages; + for (int i = 0; i < batch->nr; i++) { - struct encoded_page *enc = batch->encoded_pages[i]; + struct encoded_page *enc = pages[i]; if (encoded_page_flags(enc) & ENCODED_PAGE_BIT_DELAY_RMAP) { struct page *page = encoded_page_ptr(enc); - folio_remove_rmap_pte(page_folio(page), page, vma); + unsigned int nr_pages = 1; + + if (unlikely(encoded_page_flags(enc) & + ENCODED_PAGE_BIT_NR_PAGES_NEXT)) + nr_pages = encoded_nr_pages(pages[++i]); + + folio_remove_rmap_ptes(page_folio(page), page, nr_pages, + vma); } } } @@ -89,18 +98,26 @@ static void tlb_batch_pages_flush(struct mmu_gather *tlb) for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { struct encoded_page **pages = batch->encoded_pages; - do { + while (batch->nr) { /* * limit free batch count when PAGE_SIZE > 4K */ unsigned int nr = min(512U, batch->nr); + /* + * Make sure we cover page + nr_pages, and don't leave + * nr_pages behind when capping the number of entries. + */ + if (unlikely(encoded_page_flags(pages[nr - 1]) & + ENCODED_PAGE_BIT_NR_PAGES_NEXT)) + nr++; + free_pages_and_swap_cache(pages, nr); pages += nr; batch->nr -= nr; cond_resched(); - } while (batch->nr); + } } tlb->active = &tlb->local; } @@ -116,8 +133,9 @@ static void tlb_batch_list_free(struct mmu_gather *tlb) tlb->local.next = NULL; } -bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, - bool delay_rmap, int page_size) +static bool __tlb_remove_folio_pages_size(struct mmu_gather *tlb, + struct page *page, unsigned int nr_pages, bool delay_rmap, + int page_size) { int flags = delay_rmap ? ENCODED_PAGE_BIT_DELAY_RMAP : 0; struct mmu_gather_batch *batch; @@ -126,6 +144,8 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, #ifdef CONFIG_MMU_GATHER_PAGE_SIZE VM_WARN_ON(tlb->page_size != page_size); + VM_WARN_ON_ONCE(nr_pages != 1 && page_size != PAGE_SIZE); + VM_WARN_ON_ONCE(page_folio(page) != page_folio(page + nr_pages - 1)); #endif batch = tlb->active; @@ -133,17 +153,40 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, * Add the page and check if we are full. If so * force a flush. */ - batch->encoded_pages[batch->nr++] = encode_page(page, flags); - if (batch->nr == batch->max) { + if (likely(nr_pages == 1)) { + batch->encoded_pages[batch->nr++] = encode_page(page, flags); + } else { + flags |= ENCODED_PAGE_BIT_NR_PAGES_NEXT; + batch->encoded_pages[batch->nr++] = encode_page(page, flags); + batch->encoded_pages[batch->nr++] = encode_nr_pages(nr_pages); + } + /* + * Make sure that we can always add another "page" + "nr_pages", + * requiring two entries instead of only a single one. + */ + if (batch->nr >= batch->max - 1) { if (!tlb_next_batch(tlb)) return true; batch = tlb->active; } - VM_BUG_ON_PAGE(batch->nr > batch->max, page); + VM_BUG_ON_PAGE(batch->nr > batch->max - 1, page); return false; } +bool __tlb_remove_folio_pages(struct mmu_gather *tlb, struct page *page, + unsigned int nr_pages, bool delay_rmap) +{ + return __tlb_remove_folio_pages_size(tlb, page, nr_pages, delay_rmap, + PAGE_SIZE); +} + +bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, + bool delay_rmap, int page_size) +{ + return __tlb_remove_folio_pages_size(tlb, page, 1, delay_rmap, page_size); +} + #endif /* MMU_GATHER_NO_GATHER */ #ifdef CONFIG_MMU_GATHER_TABLE_FREE diff --git a/mm/swap.c b/mm/swap.c index 42082eba42de..104da8994f27 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -959,11 +959,17 @@ void release_pages(release_pages_arg arg, int nr) unsigned int lock_batch; for (i = 0; i < nr; i++) { + unsigned int nr_refs = 1; struct folio *folio; /* Turn any of the argument types into a folio */ folio = page_folio(encoded_page_ptr(encoded[i])); + /* Is our next entry actually "nr_pages" -> "nr_refs" ? */ + if (unlikely(encoded_page_flags(encoded[i]) & + ENCODED_PAGE_BIT_NR_PAGES_NEXT)) + nr_refs = encoded_nr_pages(encoded[++i]); + /* * Make sure the IRQ-safe lock-holding time does not get * excessive with a continuous string of pages from the @@ -982,14 +988,14 @@ void release_pages(release_pages_arg arg, int nr) unlock_page_lruvec_irqrestore(lruvec, flags); lruvec = NULL; } - if (put_devmap_managed_page(&folio->page)) + if (put_devmap_managed_page_refs(&folio->page, nr_refs)) continue; - if (folio_put_testzero(folio)) + if (folio_ref_sub_and_test(folio, nr_refs)) free_zone_device_page(&folio->page); continue; } - if (!folio_put_testzero(folio)) + if (!folio_ref_sub_and_test(folio, nr_refs)) continue; if (folio_test_large(folio)) { diff --git a/mm/swap_state.c b/mm/swap_state.c index b3b14bd0dd64..1e3497c7b634 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -310,8 +310,19 @@ void free_page_and_swap_cache(struct page *page) void free_pages_and_swap_cache(struct encoded_page **pages, int nr) { lru_add_drain(); - for (int i = 0; i < nr; i++) - free_swap_cache(encoded_page_ptr(pages[i])); + for (int i = 0; i < nr; i++) { + struct page *page = encoded_page_ptr(pages[i]); + + /* + * Skip over the "nr_pages" entry. It's sufficient to call + * free_swap_cache() only once per folio. + */ + if (unlikely(encoded_page_flags(pages[i]) & + ENCODED_PAGE_BIT_NR_PAGES_NEXT)) + i++; + + free_swap_cache(page); + } release_pages(pages, nr); } -- Gitee From 52ba33928a0adeec6dc2aa92114ac9eb154210c9 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 14 Feb 2024 21:44:34 +0100 Subject: [PATCH 1218/2138] mm/mmu_gather: improve cond_resched() handling with large folios and expensive page freeing ANBZ: #9728 commit e61abd4490684de379b4a2ef1be2dbde39ac1ced upstream In tlb_batch_pages_flush(), we can end up freeing up to 512 pages or now up to 256 folio fragments that span more than one page, before we conditionally reschedule. It's a pain that we have to handle cond_resched() in tlb_batch_pages_flush() manually and cannot simply handle it in release_pages() -- release_pages() can be called from atomic context. Well, in a perfect world we wouldn't have to make our code more complicated at all. With page poisoning and init_on_free, we might now run into soft lockups when we free a lot of rather large folio fragments, because page freeing time then depends on the actual memory size we are freeing instead of on the number of folios that are involved. In the absolute (unlikely) worst case, on arm64 with 64k we will be able to free up to 256 folio fragments that each span 512 MiB: zeroing out 128 GiB does sound like it might take a while. But instead of ignoring this unlikely case, let's just handle it. So, let's teach tlb_batch_pages_flush() that there are some configurations where page freeing is horribly slow, and let's reschedule more frequently -- similarly like we did for now before we had large folio fragments in there. Avoid yet another loop over all encoded pages in the common case by handling that separately. Note that with page poisoning/zeroing, we might now end up freeing only a single folio fragment at a time that might exceed the old 512 pages limit: but if we cannot even free a single MAX_ORDER page on a system without running into soft lockups, something else is already completely bogus. Freeing a PMD-mapped THP would similarly cause trouble. In theory, we might even free 511 order-0 pages + a single MAX_ORDER page, effectively having to zero out 8703 pages on arm64 with 64k, translating to ~544 MiB of memory: however, if 512 MiB doesn't result in soft lockups, 544 MiB is unlikely to result in soft lockups, so we won't care about that for the time being. In the future, we might want to detect if handling cond_resched() is required at all, and just not do any of that with full preemption enabled. Link: https://lkml.kernel.org/r/20240214204435.167852-10-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Ryan Roberts Cc: Alexander Gordeev Cc: Aneesh Kumar K.V Cc: Arnd Bergmann Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christophe Leroy Cc: Heiko Carstens Cc: Matthew Wilcox (Oracle) Cc: Michael Ellerman Cc: Michal Hocko Cc: "Naveen N. Rao" Cc: Nicholas Piggin Cc: Peter Zijlstra (Intel) Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Will Deacon Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3789 --- mm/mmu_gather.c | 58 ++++++++++++++++++++++++++++++++++++------------- 1 file changed, 43 insertions(+), 15 deletions(-) diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index d175c0f1e2c8..99b3e9408aa0 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -91,18 +91,21 @@ void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma) } #endif -static void tlb_batch_pages_flush(struct mmu_gather *tlb) -{ - struct mmu_gather_batch *batch; +/* + * We might end up freeing a lot of pages. Reschedule on a regular + * basis to avoid soft lockups in configurations without full + * preemption enabled. The magic number of 512 folios seems to work. + */ +#define MAX_NR_FOLIOS_PER_FREE 512 - for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { - struct encoded_page **pages = batch->encoded_pages; +static void __tlb_batch_free_encoded_pages(struct mmu_gather_batch *batch) +{ + struct encoded_page **pages = batch->encoded_pages; + unsigned int nr, nr_pages; - while (batch->nr) { - /* - * limit free batch count when PAGE_SIZE > 4K - */ - unsigned int nr = min(512U, batch->nr); + while (batch->nr) { + if (!page_poisoning_enabled_static() && !want_init_on_free()) { + nr = min(MAX_NR_FOLIOS_PER_FREE, batch->nr); /* * Make sure we cover page + nr_pages, and don't leave @@ -111,14 +114,39 @@ static void tlb_batch_pages_flush(struct mmu_gather *tlb) if (unlikely(encoded_page_flags(pages[nr - 1]) & ENCODED_PAGE_BIT_NR_PAGES_NEXT)) nr++; + } else { + /* + * With page poisoning and init_on_free, the time it + * takes to free memory grows proportionally with the + * actual memory size. Therefore, limit based on the + * actual memory size and not the number of involved + * folios. + */ + for (nr = 0, nr_pages = 0; + nr < batch->nr && nr_pages < MAX_NR_FOLIOS_PER_FREE; + nr++) { + if (unlikely(encoded_page_flags(pages[nr]) & + ENCODED_PAGE_BIT_NR_PAGES_NEXT)) + nr_pages += encoded_nr_pages(pages[++nr]); + else + nr_pages++; + } + } - free_pages_and_swap_cache(pages, nr); - pages += nr; - batch->nr -= nr; + free_pages_and_swap_cache(pages, nr); + pages += nr; + batch->nr -= nr; - cond_resched(); - } + cond_resched(); } +} + +static void tlb_batch_pages_flush(struct mmu_gather *tlb) +{ + struct mmu_gather_batch *batch; + + for (batch = &tlb->local; batch && batch->nr; batch = batch->next) + __tlb_batch_free_encoded_pages(batch); tlb->active = &tlb->local; } -- Gitee From 9f67f148f1dac1d51e5cbc9c54474da9939b36fb Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 14 Feb 2024 21:44:35 +0100 Subject: [PATCH 1219/2138] mm/memory: optimize unmap/zap with PTE-mapped THP ANBZ: #9728 commit 10ebac4f95e7a9951c453d6c66d9beb5a35db338 upstream Similar to how we optimized fork(), let's implement PTE batching when consecutive (present) PTEs map consecutive pages of the same large folio. Most infrastructure we need for batching (mmu gather, rmap) is already there. We only have to add get_and_clear_full_ptes() and clear_full_ptes(). Similarly, extend zap_install_uffd_wp_if_needed() to process a PTE range. We won't bother sanity-checking the mapcount of all subpages, but only check the mapcount of the first subpage we process. If there is a real problem hiding somewhere, we can trigger it simply by using small folios, or when we zap single pages of a large folio. Ideally, we had that check in rmap code (including for delayed rmap), but then we cannot print the PTE. Let's keep it simple for now. If we ever have a cheap folio_mapcount(), we might just want to check for underflows there. To keep small folios as fast as possible force inlining of a specialized variant using __always_inline with nr=1. Link: https://lkml.kernel.org/r/20240214204435.167852-11-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Ryan Roberts Cc: Alexander Gordeev Cc: Aneesh Kumar K.V Cc: Arnd Bergmann Cc: Catalin Marinas Cc: Christian Borntraeger Cc: Christophe Leroy Cc: Heiko Carstens Cc: Matthew Wilcox (Oracle) Cc: Michael Ellerman Cc: Michal Hocko Cc: "Naveen N. Rao" Cc: Nicholas Piggin Cc: Peter Zijlstra (Intel) Cc: Sven Schnelle Cc: Vasily Gorbik Cc: Will Deacon Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3789 --- include/linux/pgtable.h | 70 +++++++++++++++++++++++++++++++ mm/memory.c | 92 +++++++++++++++++++++++++++++------------ 2 files changed, 136 insertions(+), 26 deletions(-) diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 5f8989a863b8..23949a9be594 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -573,6 +573,76 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, } #endif +#ifndef get_and_clear_full_ptes +/** + * get_and_clear_full_ptes - Clear present PTEs that map consecutive pages of + * the same folio, collecting dirty/accessed bits. + * @mm: Address space the pages are mapped into. + * @addr: Address the first page is mapped at. + * @ptep: Page table pointer for the first entry. + * @nr: Number of entries to clear. + * @full: Whether we are clearing a full mm. + * + * May be overridden by the architecture; otherwise, implemented as a simple + * loop over ptep_get_and_clear_full(), merging dirty/accessed bits into the + * returned PTE. + * + * Note that PTE bits in the PTE range besides the PFN can differ. For example, + * some PTEs might be write-protected. + * + * Context: The caller holds the page table lock. The PTEs map consecutive + * pages that belong to the same folio. The PTEs are all in the same PMD. + */ +static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, unsigned int nr, int full) +{ + pte_t pte, tmp_pte; + + pte = ptep_get_and_clear_full(mm, addr, ptep, full); + while (--nr) { + ptep++; + addr += PAGE_SIZE; + tmp_pte = ptep_get_and_clear_full(mm, addr, ptep, full); + if (pte_dirty(tmp_pte)) + pte = pte_mkdirty(pte); + if (pte_young(tmp_pte)) + pte = pte_mkyoung(pte); + } + return pte; +} +#endif + +#ifndef clear_full_ptes +/** + * clear_full_ptes - Clear present PTEs that map consecutive pages of the same + * folio. + * @mm: Address space the pages are mapped into. + * @addr: Address the first page is mapped at. + * @ptep: Page table pointer for the first entry. + * @nr: Number of entries to clear. + * @full: Whether we are clearing a full mm. + * + * May be overridden by the architecture; otherwise, implemented as a simple + * loop over ptep_get_and_clear_full(). + * + * Note that PTE bits in the PTE range besides the PFN can differ. For example, + * some PTEs might be write-protected. + * + * Context: The caller holds the page table lock. The PTEs map consecutive + * pages that belong to the same folio. The PTEs are all in the same PMD. + */ +static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned int nr, int full) +{ + for (;;) { + ptep_get_and_clear_full(mm, addr, ptep, full); + if (--nr == 0) + break; + ptep++; + addr += PAGE_SIZE; + } +} +#endif /* * If two threads concurrently fault at the same page, the thread that diff --git a/mm/memory.c b/mm/memory.c index 7509ce1fb4da..bb2591de5ef1 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1506,7 +1506,7 @@ static inline bool zap_drop_file_uffd_wp(struct zap_details *details) */ static inline void zap_install_uffd_wp_if_needed(struct vm_area_struct *vma, - unsigned long addr, pte_t *pte, + unsigned long addr, pte_t *pte, int nr, struct zap_details *details, pte_t pteval) { /* Zap on anonymous always means dropping everything */ @@ -1516,20 +1516,27 @@ zap_install_uffd_wp_if_needed(struct vm_area_struct *vma, if (zap_drop_file_uffd_wp(details)) return; - pte_install_uffd_wp_if_needed(vma, addr, pte, pteval); + for (;;) { + /* the PFN in the PTE is irrelevant. */ + pte_install_uffd_wp_if_needed(vma, addr, pte, pteval); + if (--nr == 0) + break; + pte++; + addr += PAGE_SIZE; + } } -static inline void zap_present_folio_pte(struct mmu_gather *tlb, +static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb, struct vm_area_struct *vma, struct folio *folio, - struct page *page, pte_t *pte, pte_t ptent, unsigned long addr, - struct zap_details *details, int *rss, bool *force_flush, - bool *force_break) + struct page *page, pte_t *pte, pte_t ptent, unsigned int nr, + unsigned long addr, struct zap_details *details, int *rss, + bool *force_flush, bool *force_break) { struct mm_struct *mm = tlb->mm; bool delay_rmap = false; if (!folio_test_anon(folio)) { - ptent = ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm); + ptent = get_and_clear_full_ptes(mm, addr, pte, nr, tlb->fullmm); if (pte_dirty(ptent)) { folio_mark_dirty(folio); if (tlb_delay_rmap(tlb)) { @@ -1539,36 +1546,49 @@ static inline void zap_present_folio_pte(struct mmu_gather *tlb, } if (pte_young(ptent) && likely(vma_has_recency(vma))) folio_mark_accessed(folio); - rss[mm_counter(page)]--; + rss[mm_counter(page)] -= nr; } else { /* We don't need up-to-date accessed/dirty bits. */ - ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm); - rss[MM_ANONPAGES]--; + clear_full_ptes(mm, addr, pte, nr, tlb->fullmm); + rss[MM_ANONPAGES] -= nr; } + /* Checking a single PTE in a batch is sufficient. */ arch_check_zapped_pte(vma, ptent); - tlb_remove_tlb_entry(tlb, pte, addr); + tlb_remove_tlb_entries(tlb, pte, nr, addr); if (unlikely(userfaultfd_pte_wp(vma, ptent))) - zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent); + zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, + ptent); if (!delay_rmap) { - folio_remove_rmap_pte(folio, page, vma); + folio_remove_rmap_ptes(folio, page, nr, vma); + + /* Only sanity-check the first page in a batch. */ if (unlikely(page_mapcount(page) < 0)) print_bad_pte(vma, addr, ptent, page); } - if (unlikely(__tlb_remove_page(tlb, page, delay_rmap))) { + if (unlikely(__tlb_remove_folio_pages(tlb, page, nr, delay_rmap))) { *force_flush = true; *force_break = true; } } -static inline void zap_present_pte(struct mmu_gather *tlb, +/* + * Zap or skip at least one present PTE, trying to batch-process subsequent + * PTEs that map consecutive pages of the same folio. + * + * Returns the number of processed (skipped or zapped) PTEs (at least 1). + */ +static inline int zap_present_ptes(struct mmu_gather *tlb, struct vm_area_struct *vma, pte_t *pte, pte_t ptent, - unsigned long addr, struct zap_details *details, - int *rss, bool *force_flush, bool *force_break) + unsigned int max_nr, unsigned long addr, + struct zap_details *details, int *rss, bool *force_flush, + bool *force_break) { + const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY; struct mm_struct *mm = tlb->mm; struct folio *folio; struct page *page; + int nr; page = vm_normal_page(vma, addr, ptent); if (!page) { @@ -1578,14 +1598,29 @@ static inline void zap_present_pte(struct mmu_gather *tlb, tlb_remove_tlb_entry(tlb, pte, addr); VM_WARN_ON_ONCE(userfaultfd_wp(vma)); ksm_might_unmap_zero_page(mm, ptent); - return; + return 1; } folio = page_folio(page); if (unlikely(!should_zap_folio(details, folio))) - return; - zap_present_folio_pte(tlb, vma, folio, page, pte, ptent, addr, details, - rss, force_flush, force_break); + return 1; + + /* + * Make sure that the common "small folio" case is as fast as possible + * by keeping the batching logic separate. + */ + if (unlikely(folio_test_large(folio) && max_nr != 1)) { + nr = folio_pte_batch(folio, addr, pte, ptent, max_nr, fpb_flags, + NULL); + + zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr, + addr, details, rss, force_flush, + force_break); + return nr; + } + zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, 1, addr, + details, rss, force_flush, force_break); + return 1; } static unsigned long zap_pte_range(struct mmu_gather *tlb, @@ -1600,6 +1635,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, pte_t *start_pte; pte_t *pte; swp_entry_t entry; + int nr; tlb_change_page_size(tlb, PAGE_SIZE); init_rss_vec(rss); @@ -1613,7 +1649,9 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, pte_t ptent = ptep_get(pte); struct folio *folio; struct page *page; + int max_nr; + nr = 1; if (pte_none(ptent)) continue; @@ -1621,10 +1659,12 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, break; if (pte_present(ptent)) { - zap_present_pte(tlb, vma, pte, ptent, addr, details, - rss, &force_flush, &force_break); + max_nr = (end - addr) / PAGE_SIZE; + nr = zap_present_ptes(tlb, vma, pte, ptent, max_nr, + addr, details, rss, &force_flush, + &force_break); if (unlikely(force_break)) { - addr += PAGE_SIZE; + addr += nr * PAGE_SIZE; break; } continue; @@ -1678,8 +1718,8 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, WARN_ON_ONCE(1); } pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); - zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent); - } while (pte++, addr += PAGE_SIZE, addr != end); + zap_install_uffd_wp_if_needed(vma, addr, pte, 1, details, ptent); + } while (pte += nr, addr += PAGE_SIZE * nr, addr != end); add_mm_rss_vec(mm, rss); arch_leave_lazy_mmu_mode(); -- Gitee From e635254dba0beb52a2b891d46eb868278106f43c Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Wed, 13 Mar 2024 17:31:07 -0400 Subject: [PATCH 1220/2138] mm/memory: fix missing pte marker for !page on pte zaps ANBZ: #9728 commit f8572367eaff6739e3bc238ba93b86cd7881c0ff upstream Commit 0cf18e839f64 of large folio zap work broke uffd-wp. Now mm's uffd unit test "wp-unpopulated" will trigger this WARN_ON_ONCE(). The WARN_ON_ONCE() asserts that an VMA cannot be registered with userfaultfd-wp if it contains a !normal page, but it's actually possible. One example is an anonymous vma, register with uffd-wp, read anything will install a zero page. Then when zap on it, this should trigger. What's more, removing that WARN_ON_ONCE may not be enough either, because we should also not rely on "whether it's a normal page" to decide whether pte marker is needed. For example, one can register wr-protect over some DAX regions to track writes when UFFD_FEATURE_WP_ASYNC enabled, in which case it can have page==NULL for a devmap but we may want to keep the marker around. Link: https://lkml.kernel.org/r/20240313213107.235067-1-peterx@redhat.com Fixes: 0cf18e839f64 ("mm/memory: handle !page case in zap_present_pte() separately") Signed-off-by: Peter Xu Acked-by: David Hildenbrand Cc: Muhammad Usama Anjum Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3789 --- mm/memory.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mm/memory.c b/mm/memory.c index bb2591de5ef1..94a53ead93a7 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1596,7 +1596,9 @@ static inline int zap_present_ptes(struct mmu_gather *tlb, ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm); arch_check_zapped_pte(vma, ptent); tlb_remove_tlb_entry(tlb, pte, addr); - VM_WARN_ON_ONCE(userfaultfd_wp(vma)); + if (userfaultfd_pte_wp(vma, ptent)) + zap_install_uffd_wp_if_needed(vma, addr, pte, 1, + details, ptent); ksm_might_unmap_zero_page(mm, ptent); return 1; } -- Gitee From 24133a2c6bab76b4f2904d70e920f4500998c370 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Thu, 5 Oct 2023 15:07:30 +0100 Subject: [PATCH 1221/2138] arm64/mm: Hoist synchronization out of set_ptes() loop ANBZ: #9728 commit 3425cec42c3ce0f65fe74e412756b567b152e61d upstream set_ptes() sets a physically contiguous block of memory (which all belongs to the same folio) to a contiguous block of ptes. The arm64 implementation of this previously just looped, operating on each individual pte. But the __sync_icache_dcache() and mte_sync_tags() operations can both be hoisted out of the loop so that they are performed once for the contiguous set of pages (which may be less than the whole folio). This should result in minor performance gains. __sync_icache_dcache() already acts on the whole folio, and sets a flag in the folio so that it skips duplicate calls. But by hoisting the call, all the pte testing is done only once. mte_sync_tags() operates on each individual page with its own loop. But by passing the number of pages explicitly, we can rely solely on its loop and do the checks only once. This approach also makes it robust for the future, rather than assuming if a head page of a compound page is being mapped, then the whole compound page is being mapped, instead we explicitly know how many pages are being mapped. The old assumption may not continue to hold once the "anonymous large folios" feature is merged. Signed-off-by: Ryan Roberts Reviewed-by: Steven Price Link: https://lore.kernel.org/r/20231005140730.2191134-1-ryan.roberts@arm.com Signed-off-by: Catalin Marinas Signed-off-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3792 --- arch/arm64/include/asm/mte.h | 4 ++-- arch/arm64/include/asm/pgtable.h | 27 +++++++++++++++++---------- arch/arm64/kernel/mte.c | 4 ++-- 3 files changed, 21 insertions(+), 14 deletions(-) diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h index 79474232d413..cc83edbaef36 100644 --- a/arch/arm64/include/asm/mte.h +++ b/arch/arm64/include/asm/mte.h @@ -90,7 +90,7 @@ static inline bool try_page_mte_tagging(struct page *page) } void mte_zero_clear_page_tags(void *addr); -void mte_sync_tags(pte_t pte); +void mte_sync_tags(pte_t pte, unsigned int nr_pages); void mte_copy_page_tags(void *kto, const void *kfrom); void mte_thread_init_user(void); int mte_copy_mc_page_tags(void *kto, const void *kfrom); @@ -123,7 +123,7 @@ static inline bool try_page_mte_tagging(struct page *page) static inline void mte_zero_clear_page_tags(void *addr) { } -static inline void mte_sync_tags(pte_t pte) +static inline void mte_sync_tags(pte_t pte, unsigned int nr_pages) { } static inline void mte_copy_page_tags(void *kto, const void *kfrom) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 82cf1de4c435..65de7f262fa5 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -330,8 +330,7 @@ static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep, __func__, pte_val(old_pte), pte_val(pte)); } -static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte) +static inline void __sync_cache_and_tags(pte_t pte, unsigned int nr_pages) { if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte)) __sync_icache_dcache(pte); @@ -344,11 +343,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, */ if (system_supports_mte() && pte_access_permitted(pte, false) && !pte_special(pte) && pte_tagged(pte)) - mte_sync_tags(pte); - - __check_safe_pte_update(mm, ptep, pte); - - set_pte(ptep, pte); + mte_sync_tags(pte, nr_pages); } /* * Select all bits except the pfn @@ -370,9 +365,11 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, unsigned int nr) { page_table_check_ptes_set(mm, ptep, pte, nr); + __sync_cache_and_tags(pte, nr); for (;;) { - __set_pte_at(mm, addr, ptep, pte); + __check_safe_pte_update(mm, ptep, pte); + set_pte(ptep, pte); if (--nr == 0) break; ptep++; @@ -542,18 +539,28 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd) #define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT) #define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) +static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned int nr) +{ + __sync_cache_and_tags(pte, nr); + __check_safe_pte_update(mm, ptep, pte); + set_pte(ptep, pte); +} + static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) { page_table_check_pmd_set(mm, pmdp, pmd); - return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd)); + return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd), + PMD_SIZE >> PAGE_SHIFT); } static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, pud_t *pudp, pud_t pud) { page_table_check_pud_set(mm, pudp, pud); - return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud)); + return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud), + PUD_SIZE >> PAGE_SHIFT); } #define __p4d_to_phys(p4d) __pte_to_phys(p4d_pte(p4d)) diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index 4edecaac8f91..2fb5e7a7a4d5 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -35,10 +35,10 @@ DEFINE_STATIC_KEY_FALSE(mte_async_or_asymm_mode); EXPORT_SYMBOL_GPL(mte_async_or_asymm_mode); #endif -void mte_sync_tags(pte_t pte) +void mte_sync_tags(pte_t pte, unsigned int nr_pages) { struct page *page = pte_page(pte); - long i, nr_pages = compound_nr(page); + unsigned int i; /* if PG_mte_tagged is set, tags have already been initialised */ for (i = 0; i < nr_pages; i++, page++) { -- Gitee From 5b8f435851f4a88a630fde273184601357fe70d0 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 17 Oct 2023 11:57:55 +0100 Subject: [PATCH 1222/2138] arm64: Mark the 'addr' argument to set_ptes() and __set_pte_at() as unused ANBZ: #9728 commit dba2ff4922b3cf573c25c3886e869258a6076030 upstream This argument is not used by the arm64 implementation. Mark it as __always_unused and also remove the unnecessary 'addr' increment in set_ptes(). Signed-off-by: Catalin Marinas Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202310140531.BQQwt3NQ-lkp@intel.com/ Cc: Will Deacon Tested-by: Ryan Roberts Link: https://lore.kernel.org/r/ZS6EvMiJ0QF5INkv@arm.com Signed-off-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3792 --- arch/arm64/include/asm/pgtable.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 65de7f262fa5..42917c7ed987 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -361,8 +361,9 @@ static inline pte_t pte_next_pfn(pte_t pte) return pfn_pte(pte_pfn(pte) + 1, pte_pgprot(pte)); } -static inline void set_ptes(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte, unsigned int nr) +static inline void set_ptes(struct mm_struct *mm, + unsigned long __always_unused addr, + pte_t *ptep, pte_t pte, unsigned int nr) { page_table_check_ptes_set(mm, ptep, pte, nr); __sync_cache_and_tags(pte, nr); @@ -373,7 +374,6 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr, if (--nr == 0) break; ptep++; - addr += PAGE_SIZE; pte = pte_next_pfn(pte); } } @@ -539,7 +539,8 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd) #define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT) #define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) -static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, +static inline void __set_pte_at(struct mm_struct *mm, + unsigned long __always_unused addr, pte_t *ptep, pte_t pte, unsigned int nr) { __sync_cache_and_tags(pte, nr); -- Gitee From 94b65514134f050da43139510a2041b0da05b730 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Thu, 15 Feb 2024 10:31:48 +0000 Subject: [PATCH 1223/2138] mm: clarify the spec for set_ptes() ANBZ: #9728 commit 6280d7317ccae19c776a3b6cf9848c964f958091 upstream Patch series "Transparent Contiguous PTEs for User Mappings", v6. This is a series to opportunistically and transparently use contpte mappings (set the contiguous bit in ptes) for user memory when those mappings meet the requirements. The change benefits arm64, but there is some (very) minor refactoring for x86 to enable its integration with core-mm. It is part of a wider effort to improve performance by allocating and mapping variable-sized blocks of memory (folios). One aim is for the 4K kernel to approach the performance of the 16K kernel, but without breaking compatibility and without the associated increase in memory. Another aim is to benefit the 16K and 64K kernels by enabling 2M THP, since this is the contpte size for those kernels. We have good performance data that demonstrates both aims are being met (see below). Of course this is only one half of the change. We require the mapped physical memory to be the correct size and alignment for this to actually be useful (i.e. 64K for 4K pages, or 2M for 16K/64K pages). Fortunately folios are solving this problem for us. Filesystems that support it (XFS, AFS, EROFS, tmpfs, ...) will allocate large folios up to the PMD size today, and more filesystems are coming. And for anonymous memory, "multi-size THP" is now upstream. Patch Layout ============ In this version, I've split the patches to better show each optimization: - 1-2: mm prep: misc code and docs cleanups - 3-6: mm,arm64,x86 prep: Add pte_advance_pfn() and make pte_next_pfn() a generic wrapper around it - 7-11: arm64 prep: Refactor ptep helpers into new layer - 12: functional contpte implementation - 23-18: various optimizations on top of the contpte implementation Testing ======= I've tested this series on both Ampere Altra (bare metal) and Apple M2 (VM): - mm selftests (inc new tests written for multi-size THP); no regressions - Speedometer Java script benchmark in Chromium web browser; no issues - Kernel compilation; no issues - Various tests under high memory pressure with swap enabled; no issues Performance =========== High Level Use Cases ~~~~~~~~~~~~~~~~~~~~ First some high level use cases (kernel compilation and speedometer JavaScript benchmarks). These are running on Ampere Altra (I've seen similar improvements on Android/Pixel 6). baseline: mm-unstable (mTHP switched off) mTHP: + enable 16K, 32K, 64K mTHP sizes "always" mTHP + contpte: + this series mTHP + contpte + exefolio: + patch at [6], which series supports Kernel Compilation with -j8 (negative is faster): | kernel | real-time | kern-time | user-time | |---------------------------|-----------|-----------|-----------| | baseline | 0.0% | 0.0% | 0.0% | | mTHP | -5.0% | -39.1% | -0.7% | | mTHP + contpte | -6.0% | -41.4% | -1.5% | | mTHP + contpte + exefolio | -7.8% | -43.1% | -3.4% | Kernel Compilation with -j80 (negative is faster): | kernel | real-time | kern-time | user-time | |---------------------------|-----------|-----------|-----------| | baseline | 0.0% | 0.0% | 0.0% | | mTHP | -5.0% | -36.6% | -0.6% | | mTHP + contpte | -6.1% | -38.2% | -1.6% | | mTHP + contpte + exefolio | -7.4% | -39.2% | -3.2% | Speedometer (positive is faster): | kernel | runs_per_min | |:--------------------------|--------------| | baseline | 0.0% | | mTHP | 1.5% | | mTHP + contpte | 3.2% | | mTHP + contpte + exefolio | 4.5% | Micro Benchmarks ~~~~~~~~~~~~~~~~ The following microbenchmarks are intended to demonstrate the performance of fork() and munmap() do not regress. I'm showing results for order-0 (4K) mappings, and for order-9 (2M) PTE-mapped THP. Thanks to David for sharing his benchmarks. baseline: mm-unstable + batch zap [7] series contpte-basic: + patches 0-19; functional contpte implementation contpte-batch: + patches 20-23; implement new batched APIs contpte-inline: + patch 24; __always_inline to help compiler contpte-fold: + patch 25; fold contpte mapping when sensible Primary platform is Ampere Altra bare metal. I'm also showing results for M2 VM (on top of MacOS) for reference, although experience suggests this might not be the most reliable for performance numbers of this sort: | FORK | order-0 | order-9 | | Ampere Altra |------------------------|------------------------| | (pte-map) | mean | stdev | mean | stdev | |----------------|------------|-----------|------------|-----------| | baseline | 0.0% | 2.7% | 0.0% | 0.2% | | contpte-basic | 6.3% | 1.4% | 1948.7% | 0.2% | | contpte-batch | 7.6% | 2.0% | -1.9% | 0.4% | | contpte-inline | 3.6% | 1.5% | -1.0% | 0.2% | | contpte-fold | 4.6% | 2.1% | -1.8% | 0.2% | | MUNMAP | order-0 | order-9 | | Ampere Altra |------------------------|------------------------| | (pte-map) | mean | stdev | mean | stdev | |----------------|------------|-----------|------------|-----------| | baseline | 0.0% | 0.5% | 0.0% | 0.3% | | contpte-basic | 1.8% | 0.3% | 1104.8% | 0.1% | | contpte-batch | -0.3% | 0.4% | 2.7% | 0.1% | | contpte-inline | -0.1% | 0.6% | 0.9% | 0.1% | | contpte-fold | 0.1% | 0.6% | 0.8% | 0.1% | | FORK | order-0 | order-9 | | Apple M2 VM |------------------------|------------------------| | (pte-map) | mean | stdev | mean | stdev | |----------------|------------|-----------|------------|-----------| | baseline | 0.0% | 1.4% | 0.0% | 0.8% | | contpte-basic | 6.8% | 1.2% | 469.4% | 1.4% | | contpte-batch | -7.7% | 2.0% | -8.9% | 0.7% | | contpte-inline | -6.0% | 2.1% | -6.0% | 2.0% | | contpte-fold | 5.9% | 1.4% | -6.4% | 1.4% | | MUNMAP | order-0 | order-9 | | Apple M2 VM |------------------------|------------------------| | (pte-map) | mean | stdev | mean | stdev | |----------------|------------|-----------|------------|-----------| | baseline | 0.0% | 0.6% | 0.0% | 0.4% | | contpte-basic | 1.6% | 0.6% | 233.6% | 0.7% | | contpte-batch | 1.9% | 0.3% | -3.9% | 0.4% | | contpte-inline | 2.2% | 0.8% | -1.6% | 0.9% | | contpte-fold | 1.5% | 0.7% | -1.7% | 0.7% | Misc ~~~~ John Hubbard at Nvidia has indicated dramatic 10x performance improvements for some workloads at [8], when using 64K base page kernel. [1] https://lore.kernel.org/linux-arm-kernel/20230622144210.2623299-1-ryan.roberts@arm.com/ [2] https://lore.kernel.org/linux-arm-kernel/20231115163018.1303287-1-ryan.roberts@arm.com/ [3] https://lore.kernel.org/linux-arm-kernel/20231204105440.61448-1-ryan.roberts@arm.com/ [4] https://lore.kernel.org/lkml/20231218105100.172635-1-ryan.roberts@arm.com/ [5] https://lore.kernel.org/linux-mm/633af0a7-0823-424f-b6ef-374d99483f05@arm.com/ [6] https://lore.kernel.org/lkml/08c16f7d-f3b3-4f22-9acc-da943f647dc3@arm.com/ [7] https://lore.kernel.org/linux-mm/20240214204435.167852-1-david@redhat.com/ [8] https://lore.kernel.org/linux-mm/c507308d-bdd4-5f9e-d4ff-e96e4520be85@nvidia.com/ [9] https://gitlab.arm.com/linux-arm/linux-rr/-/tree/features/granule_perf/contpte-lkml_v6 This patch (of 18): set_ptes() spec implies that it can only be used to set a present pte because it interprets the PFN field to increment it. However, set_pte_at() has been implemented on top of set_ptes() since set_ptes() was introduced, and set_pte_at() allows setting a pte to a not-present state. So clarify the spec to state that when nr==1, new state of pte may be present or not present. When nr>1, new state of all ptes must be present. While we are at it, tighten the spec to set requirements around the initial state of ptes; when nr==1 it may be either present or not-present. But when nr>1 all ptes must initially be not-present. All set_ptes() callsites already conform to this requirement. Stating it explicitly is useful because it allows for a simplification to the upcoming arm64 contpte implementation. Link: https://lkml.kernel.org/r/20240215103205.2607016-1-ryan.roberts@arm.com Link: https://lkml.kernel.org/r/20240215103205.2607016-2-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Acked-by: David Hildenbrand Cc: Alistair Popple Cc: Andrey Ryabinin Cc: Ard Biesheuvel Cc: Barry Song <21cnbao@gmail.com> Cc: Borislav Petkov (AMD) Cc: Catalin Marinas Cc: Dave Hansen Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: James Morse Cc: John Hubbard Cc: Kefeng Wang Cc: Marc Zyngier Cc: Mark Rutland Cc: Matthew Wilcox (Oracle) Cc: Thomas Gleixner Cc: Will Deacon Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3792 --- include/linux/pgtable.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 23949a9be594..9d430a92ad21 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -222,6 +222,10 @@ static inline pte_t pte_next_pfn(pte_t pte) * @pte: Page table entry for the first page. * @nr: Number of pages to map. * + * When nr==1, initial state of pte may be present or not present, and new state + * may be present or not present. When nr>1, initial state of all ptes must be + * not present, and new state must be present. + * * May be overridden by the architecture, or the architecture can define * set_pte() and PFN_PTE_SHIFT. * -- Gitee From 3a81029dc10409a713d34ada088678cd60b8455a Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Thu, 15 Feb 2024 10:31:49 +0000 Subject: [PATCH 1224/2138] mm: thp: batch-collapse PMD with set_ptes() ANBZ: #9728 commit 2bdba9868a4ffcb1492db7272f34b54387910177 upstream Refactor __split_huge_pmd_locked() so that a present PMD can be collapsed to PTEs in a single batch using set_ptes(). This should improve performance a little bit, but the real motivation is to remove the need for the arm64 backend to have to fold the contpte entries. Instead, since the ptes are set as a batch, the contpte blocks can be initially set up pre-folded (once the arm64 contpte support is added in the next few patches). This leads to noticeable performance improvement during split. Link: https://lkml.kernel.org/r/20240215103205.2607016-3-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Acked-by: David Hildenbrand Cc: Alistair Popple Cc: Andrey Ryabinin Cc: Ard Biesheuvel Cc: Barry Song <21cnbao@gmail.com> Cc: Borislav Petkov (AMD) Cc: Catalin Marinas Cc: Dave Hansen Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: James Morse Cc: John Hubbard Cc: Kefeng Wang Cc: Marc Zyngier Cc: Mark Rutland Cc: Matthew Wilcox (Oracle) Cc: Thomas Gleixner Cc: Will Deacon Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3792 --- mm/huge_memory.c | 58 +++++++++++++++++++++++++++--------------------- 1 file changed, 33 insertions(+), 25 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index d2246d61c035..983d50b04b56 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2394,15 +2394,16 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, pte = pte_offset_map(&_pmd, haddr); VM_BUG_ON(!pte); - for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { - pte_t entry; - /* - * Note that NUMA hinting access restrictions are not - * transferred to avoid any possibility of altering - * permissions across VMAs. - */ - if (freeze || pmd_migration) { + + /* + * Note that NUMA hinting access restrictions are not transferred to + * avoid any possibility of altering permissions across VMAs. + */ + if (freeze || pmd_migration) { + for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { + pte_t entry; swp_entry_t swp_entry; + if (write) swp_entry = make_writable_migration_entry( page_to_pfn(page + i)); @@ -2421,25 +2422,32 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, entry = pte_swp_mksoft_dirty(entry); if (uffd_wp) entry = pte_swp_mkuffd_wp(entry); - } else { - entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot)); - if (write) - entry = pte_mkwrite(entry, vma); - if (!young) - entry = pte_mkold(entry); - /* NOTE: this may set soft-dirty too on some archs */ - if (dirty) - entry = pte_mkdirty(entry); - if (soft_dirty) - entry = pte_mksoft_dirty(entry); - if (uffd_wp) - entry = pte_mkuffd_wp(entry); + + VM_WARN_ON(!pte_none(ptep_get(pte + i))); + set_pte_at(mm, addr, pte + i, entry); } - VM_BUG_ON(!pte_none(ptep_get(pte))); - set_pte_at(mm, addr, pte, entry); - pte++; + } else { + pte_t entry; + + entry = mk_pte(page, READ_ONCE(vma->vm_page_prot)); + if (write) + entry = pte_mkwrite(entry, vma); + if (!young) + entry = pte_mkold(entry); + /* NOTE: this may set soft-dirty too on some archs */ + if (dirty) + entry = pte_mkdirty(entry); + if (soft_dirty) + entry = pte_mksoft_dirty(entry); + if (uffd_wp) + entry = pte_mkuffd_wp(entry); + + for (i = 0; i < HPAGE_PMD_NR; i++) + VM_WARN_ON(!pte_none(ptep_get(pte + i))); + + set_ptes(mm, haddr, pte, entry, HPAGE_PMD_NR); } - pte_unmap(pte - 1); + pte_unmap(pte); if (!pmd_migration) folio_remove_rmap_pmd(folio, page, vma); -- Gitee From 4180f10990da6fb8b975957920a58e5021179966 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Thu, 15 Feb 2024 10:31:50 +0000 Subject: [PATCH 1225/2138] mm: introduce pte_advance_pfn() and use for pte_next_pfn() ANBZ: #9728 commit 583ceaaa339960e673ac0029f323bb1c6ffc96d7 upstream The goal is to be able to advance a PTE by an arbitrary number of PFNs. So introduce a new API that takes a nr param. Define the default implementation here and allow for architectures to override. pte_next_pfn() becomes a wrapper around pte_advance_pfn(). Follow up commits will convert each overriding architecture's pte_next_pfn() to pte_advance_pfn(). Link: https://lkml.kernel.org/r/20240215103205.2607016-4-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Acked-by: David Hildenbrand Cc: Alistair Popple Cc: Andrey Ryabinin Cc: Ard Biesheuvel Cc: Barry Song <21cnbao@gmail.com> Cc: Borislav Petkov (AMD) Cc: Catalin Marinas Cc: Dave Hansen Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: James Morse Cc: John Hubbard Cc: Kefeng Wang Cc: Marc Zyngier Cc: Mark Rutland Cc: Matthew Wilcox (Oracle) Cc: Thomas Gleixner Cc: Will Deacon Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3792 --- include/linux/pgtable.h | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 9d430a92ad21..c9c8f5cfaaaa 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -205,14 +205,17 @@ static inline int pmd_young(pmd_t pmd) #define arch_flush_lazy_mmu_mode() do {} while (0) #endif - #ifndef pte_next_pfn -static inline pte_t pte_next_pfn(pte_t pte) +#ifndef pte_advance_pfn +static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr) { - return __pte(pte_val(pte) + (1UL << PFN_PTE_SHIFT)); + return __pte(pte_val(pte) + (nr << PFN_PTE_SHIFT)); } #endif +#define pte_next_pfn(pte) pte_advance_pfn(pte, 1) +#endif + #ifndef set_ptes /** * set_ptes - Map consecutive pages to a contiguous range of addresses. -- Gitee From a64db6c9ad07544fcf4a43dae1333075b1b65e56 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Thu, 15 Feb 2024 10:31:51 +0000 Subject: [PATCH 1226/2138] arm64/mm: convert pte_next_pfn() to pte_advance_pfn() ANBZ: #9728 commit c1bd2b4028ae5b4d2ada64b31c40cc44cdf00972 upstream Core-mm needs to be able to advance the pfn by an arbitrary amount, so override the new pte_advance_pfn() API to do so. Link: https://lkml.kernel.org/r/20240215103205.2607016-5-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Acked-by: David Hildenbrand Acked-by: Mark Rutland Acked-by: Catalin Marinas Cc: Alistair Popple Cc: Andrey Ryabinin Cc: Ard Biesheuvel Cc: Barry Song <21cnbao@gmail.com> Cc: Borislav Petkov (AMD) Cc: Dave Hansen Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: James Morse Cc: John Hubbard Cc: Kefeng Wang Cc: Marc Zyngier Cc: Matthew Wilcox (Oracle) Cc: Thomas Gleixner Cc: Will Deacon Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3792 --- arch/arm64/include/asm/pgtable.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 42917c7ed987..40dbaa57cacf 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -355,10 +355,10 @@ static inline pgprot_t pte_pgprot(pte_t pte) return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte)); } -#define pte_next_pfn pte_next_pfn -static inline pte_t pte_next_pfn(pte_t pte) +#define pte_advance_pfn pte_advance_pfn +static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr) { - return pfn_pte(pte_pfn(pte) + 1, pte_pgprot(pte)); + return pfn_pte(pte_pfn(pte) + nr, pte_pgprot(pte)); } static inline void set_ptes(struct mm_struct *mm, @@ -374,7 +374,7 @@ static inline void set_ptes(struct mm_struct *mm, if (--nr == 0) break; ptep++; - pte = pte_next_pfn(pte); + pte = pte_advance_pfn(pte, 1); } } #define set_ptes set_ptes -- Gitee From 5ef42178b0dd5b6d82e9716b9330f1f72c008d49 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Thu, 15 Feb 2024 10:31:52 +0000 Subject: [PATCH 1227/2138] x86/mm: convert pte_next_pfn() to pte_advance_pfn() ANBZ: #9728 commit 506b586769ecef8c83fff64de227e7fa84b7be42 upstream Core-mm needs to be able to advance the pfn by an arbitrary amount, so override the new pte_advance_pfn() API to do so. Link: https://lkml.kernel.org/r/20240215103205.2607016-6-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Reviewed-by: David Hildenbrand Cc: Alistair Popple Cc: Andrey Ryabinin Cc: Ard Biesheuvel Cc: Barry Song <21cnbao@gmail.com> Cc: Borislav Petkov (AMD) Cc: Catalin Marinas Cc: Dave Hansen Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: James Morse Cc: John Hubbard Cc: Kefeng Wang Cc: Marc Zyngier Cc: Mark Rutland Cc: Matthew Wilcox (Oracle) Cc: Thomas Gleixner Cc: Will Deacon Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3792 --- arch/x86/include/asm/pgtable.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index d03fe4fb41f4..993d49cd379a 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -939,13 +939,13 @@ static inline int pte_same(pte_t a, pte_t b) return a.pte == b.pte; } -static inline pte_t pte_next_pfn(pte_t pte) +static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr) { if (__pte_needs_invert(pte_val(pte))) - return __pte(pte_val(pte) - (1UL << PFN_PTE_SHIFT)); - return __pte(pte_val(pte) + (1UL << PFN_PTE_SHIFT)); + return __pte(pte_val(pte) - (nr << PFN_PTE_SHIFT)); + return __pte(pte_val(pte) + (nr << PFN_PTE_SHIFT)); } -#define pte_next_pfn pte_next_pfn +#define pte_advance_pfn pte_advance_pfn static inline int pte_present(pte_t a) { -- Gitee From d7c7ab73f7c7b0699c276bd2467ffa049d3c0cfb Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Thu, 15 Feb 2024 10:31:53 +0000 Subject: [PATCH 1228/2138] mm: tidy up pte_next_pfn() definition ANBZ: #9728 commit fb23bf6bd288db3187c27b971e558a3e9f70ae96 upstream Now that the all architecture overrides of pte_next_pfn() have been replaced with pte_advance_pfn(), we can simplify the definition of the generic pte_next_pfn() macro so that it is unconditionally defined. Link: https://lkml.kernel.org/r/20240215103205.2607016-7-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Acked-by: David Hildenbrand Cc: Alistair Popple Cc: Andrey Ryabinin Cc: Ard Biesheuvel Cc: Barry Song <21cnbao@gmail.com> Cc: Borislav Petkov (AMD) Cc: Catalin Marinas Cc: Dave Hansen Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: James Morse Cc: John Hubbard Cc: Kefeng Wang Cc: Marc Zyngier Cc: Mark Rutland Cc: Matthew Wilcox (Oracle) Cc: Thomas Gleixner Cc: Will Deacon Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3792 --- include/linux/pgtable.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index c9c8f5cfaaaa..0651e89bf5dc 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -205,7 +205,6 @@ static inline int pmd_young(pmd_t pmd) #define arch_flush_lazy_mmu_mode() do {} while (0) #endif -#ifndef pte_next_pfn #ifndef pte_advance_pfn static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr) { @@ -214,7 +213,6 @@ static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr) #endif #define pte_next_pfn(pte) pte_advance_pfn(pte, 1) -#endif #ifndef set_ptes /** -- Gitee From b959b0b571190cff965562449bc1cb788b253851 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Thu, 15 Feb 2024 10:31:54 +0000 Subject: [PATCH 1229/2138] arm64/mm: convert READ_ONCE(*ptep) to ptep_get(ptep) ANBZ: #9728 commit 532736558e8ef2865eae1d84b52dda4422cac810 upstream There are a number of places in the arch code that read a pte by using the READ_ONCE() macro. Refactor these call sites to instead use the ptep_get() helper, which itself is a READ_ONCE(). Generated code should be the same. This will benefit us when we shortly introduce the transparent contpte support. In this case, ptep_get() will become more complex so we now have all the code abstracted through it. Link: https://lkml.kernel.org/r/20240215103205.2607016-8-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Tested-by: John Hubbard Acked-by: Mark Rutland Acked-by: Catalin Marinas Cc: Alistair Popple Cc: Andrey Ryabinin Cc: Ard Biesheuvel Cc: Barry Song <21cnbao@gmail.com> Cc: Borislav Petkov (AMD) Cc: Dave Hansen Cc: David Hildenbrand Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: James Morse Cc: Kefeng Wang Cc: Marc Zyngier Cc: Matthew Wilcox (Oracle) Cc: Thomas Gleixner Cc: Will Deacon Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3792 --- arch/arm64/include/asm/pgtable.h | 12 +++++++++--- arch/arm64/kernel/efi.c | 2 +- arch/arm64/mm/fault.c | 4 ++-- arch/arm64/mm/hugetlbpage.c | 6 +++--- arch/arm64/mm/kasan_init.c | 2 +- arch/arm64/mm/mmu.c | 12 ++++++------ arch/arm64/mm/pageattr.c | 4 ++-- arch/arm64/mm/trans_pgd.c | 2 +- 8 files changed, 25 insertions(+), 19 deletions(-) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 40dbaa57cacf..69732847a6c6 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -280,6 +280,12 @@ static inline void set_pte(pte_t *ptep, pte_t pte) } } +#define ptep_get ptep_get +static inline pte_t ptep_get(pte_t *ptep) +{ + return READ_ONCE(*ptep); +} + extern void __sync_icache_dcache(pte_t pteval); bool pgattr_change_is_safe(u64 old, u64 new); @@ -307,7 +313,7 @@ static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep, if (!IS_ENABLED(CONFIG_DEBUG_VM)) return; - old_pte = READ_ONCE(*ptep); + old_pte = ptep_get(ptep); if (!pte_valid(old_pte) || !pte_valid(pte)) return; @@ -912,7 +918,7 @@ static inline int __ptep_test_and_clear_young(pte_t *ptep) { pte_t old_pte, pte; - pte = READ_ONCE(*ptep); + pte = ptep_get(ptep); do { old_pte = pte; pte = pte_mkold(pte); @@ -994,7 +1000,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres { pte_t old_pte, pte; - pte = READ_ONCE(*ptep); + pte = ptep_get(ptep); do { old_pte = pte; pte = pte_wrprotect(pte); diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index 2b478ca356b0..e72d62416b1a 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -107,7 +107,7 @@ static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data) { struct set_perm_data *spd = data; const efi_memory_desc_t *md = spd->md; - pte_t pte = READ_ONCE(*ptep); + pte_t pte = ptep_get(ptep); if (md->attribute & EFI_MEMORY_RO) pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index a6d1c333719f..57bbd8a8a9b9 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -191,7 +191,7 @@ static void show_pte(unsigned long addr) if (!ptep) break; - pte = READ_ONCE(*ptep); + pte = ptep_get(ptep); pr_cont(", pte=%016llx", pte_val(pte)); pte_unmap(ptep); } while(0); @@ -214,7 +214,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma, pte_t entry, int dirty) { pteval_t old_pteval, pteval; - pte_t pte = READ_ONCE(*ptep); + pte_t pte = ptep_get(ptep); if (pte_same(pte, entry)) return 0; diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 13fd592228b1..de1a7102b5ae 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -492,7 +492,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm, size_t pgsize; pte_t pte; - if (!pte_cont(READ_ONCE(*ptep))) { + if (!pte_cont(ptep_get(ptep))) { ptep_set_wrprotect(mm, addr, ptep); return; } @@ -517,7 +517,7 @@ pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, size_t pgsize; int ncontig; - if (!pte_cont(READ_ONCE(*ptep))) + if (!pte_cont(ptep_get(ptep))) return ptep_clear_flush(vma, addr, ptep); ncontig = find_num_contig(mm, addr, ptep, &pgsize); @@ -551,7 +551,7 @@ pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr * when the permission changes from executable to non-executable * in cases where cpu is affected with errata #2645198. */ - if (pte_user_exec(READ_ONCE(*ptep))) + if (pte_user_exec(ptep_get(ptep))) return huge_ptep_clear_flush(vma, addr, ptep); } return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index f17d066e85eb..1b96e0ad6661 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -113,7 +113,7 @@ static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr, memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE); next = addr + PAGE_SIZE; set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL)); - } while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep))); + } while (ptep++, addr = next, addr != end && pte_none(ptep_get(ptep))); } static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr, diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 1884c70f359b..ee8641b3f139 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -225,7 +225,7 @@ static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end, ptep = pte_set_fixmap_offset(pmdp, addr); do { - pte_t old_pte = READ_ONCE(*ptep); + pte_t old_pte = ptep_get(ptep); set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot)); @@ -234,7 +234,7 @@ static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end, * only allow updates to the permission attributes. */ BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), - READ_ONCE(pte_val(*ptep)))); + pte_val(ptep_get(ptep)))); phys += PAGE_SIZE; } while (ptep++, addr += PAGE_SIZE, addr != end); @@ -919,7 +919,7 @@ static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr, do { ptep = pte_offset_kernel(pmdp, addr); - pte = READ_ONCE(*ptep); + pte = ptep_get(ptep); if (pte_none(pte)) continue; @@ -1052,7 +1052,7 @@ static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr, do { ptep = pte_offset_kernel(pmdp, addr); - pte = READ_ONCE(*ptep); + pte = ptep_get(ptep); /* * This is just a sanity check here which verifies that @@ -1071,7 +1071,7 @@ static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr, */ ptep = pte_offset_kernel(pmdp, 0UL); for (i = 0; i < PTRS_PER_PTE; i++) { - if (!pte_none(READ_ONCE(ptep[i]))) + if (!pte_none(ptep_get(&ptep[i]))) return; } @@ -1541,7 +1541,7 @@ pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte * when the permission changes from executable to non-executable * in cases where cpu is affected with errata #2645198. */ - if (pte_user_exec(READ_ONCE(*ptep))) + if (pte_user_exec(ptep_get(ptep))) return ptep_clear_flush(vma, addr, ptep); } return ptep_get_and_clear(vma->vm_mm, addr, ptep); diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 801ac339298a..76ea4f20c384 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -32,7 +32,7 @@ bool can_set_block_and_cont_map(void) static int change_page_range(pte_t *ptep, unsigned long addr, void *data) { struct page_change_data *cdata = data; - pte_t pte = READ_ONCE(*ptep); + pte_t pte = ptep_get(ptep); pte = clear_pte_bit(pte, cdata->clear_mask); pte = set_pte_bit(pte, cdata->set_mask); @@ -262,5 +262,5 @@ bool kernel_page_present(struct page *page) return true; ptep = pte_offset_kernel(pmdp, addr); - return pte_valid(READ_ONCE(*ptep)); + return pte_valid(ptep_get(ptep)); } diff --git a/arch/arm64/mm/trans_pgd.c b/arch/arm64/mm/trans_pgd.c index 7b14df3c6477..f71ab4704cce 100644 --- a/arch/arm64/mm/trans_pgd.c +++ b/arch/arm64/mm/trans_pgd.c @@ -33,7 +33,7 @@ static void *trans_alloc(struct trans_pgd_info *info) static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr) { - pte_t pte = READ_ONCE(*src_ptep); + pte_t pte = ptep_get(src_ptep); if (pte_valid(pte)) { /* -- Gitee From 81262bb92625a74c6403fda576b5b02647d64e9a Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Thu, 15 Feb 2024 10:31:55 +0000 Subject: [PATCH 1230/2138] arm64/mm: convert set_pte_at() to set_ptes(..., 1) ANBZ: #9728 commit 659e193027910a5d3083e34b488ab459d2ef5082 upstream Since set_ptes() was introduced, set_pte_at() has been implemented as a generic macro around set_ptes(..., 1). So this change should continue to generate the same code. However, making this change prepares us for the transparent contpte support. It means we can reroute set_ptes() to __set_ptes(). Since set_pte_at() is a generic macro, there will be no equivalent __set_pte_at() to reroute to. Note that a couple of calls to set_pte_at() remain in the arch code. This is intentional, since those call sites are acting on behalf of core-mm and should continue to call into the public set_ptes() rather than the arch-private __set_ptes(). Link: https://lkml.kernel.org/r/20240215103205.2607016-9-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Tested-by: John Hubbard Acked-by: Mark Rutland Acked-by: Catalin Marinas Cc: Alistair Popple Cc: Andrey Ryabinin Cc: Ard Biesheuvel Cc: Barry Song <21cnbao@gmail.com> Cc: Borislav Petkov (AMD) Cc: Dave Hansen Cc: David Hildenbrand Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: James Morse Cc: Kefeng Wang Cc: Marc Zyngier Cc: Matthew Wilcox (Oracle) Cc: Thomas Gleixner Cc: Will Deacon Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3792 --- arch/arm64/include/asm/pgtable.h | 2 +- arch/arm64/kernel/mte.c | 2 +- arch/arm64/kvm/guest.c | 2 +- arch/arm64/mm/fault.c | 2 +- arch/arm64/mm/hugetlbpage.c | 10 +++++----- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 69732847a6c6..0f4f303de9ae 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -1092,7 +1092,7 @@ static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio) #endif /* CONFIG_ARM64_MTE */ /* - * On AArch64, the cache coherency is handled via the set_pte_at() function. + * On AArch64, the cache coherency is handled via the set_ptes() function. */ static inline void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index 2fb5e7a7a4d5..b99b718164ed 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -67,7 +67,7 @@ int memcmp_pages(struct page *page1, struct page *page2) /* * If the page content is identical but at least one of the pages is * tagged, return non-zero to avoid KSM merging. If only one of the - * pages is tagged, set_pte_at() may zero or change the tags of the + * pages is tagged, set_ptes() may zero or change the tags of the * other page via mte_sync_tags(). */ if (page_mte_tagged(page1) || page_mte_tagged(page2)) diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index efe82cc86bd1..ce238ef9e113 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -1073,7 +1073,7 @@ int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, } else { /* * Only locking to serialise with a concurrent - * set_pte_at() in the VMM but still overriding the + * set_ptes() in the VMM but still overriding the * tags, hence ignoring the return value. */ try_page_mte_tagging(page); diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 57bbd8a8a9b9..06869643278a 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -205,7 +205,7 @@ static void show_pte(unsigned long addr) * * It needs to cope with hardware update of the accessed/dirty state by other * agents in the system and can safely skip the __sync_icache_dcache() call as, - * like set_pte_at(), the PTE is never changed from no-exec to exec here. + * like set_ptes(), the PTE is never changed from no-exec to exec here. * * Returns whether or not the PTE actually changed. */ diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index de1a7102b5ae..dce6a0e59a67 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -254,12 +254,12 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, if (!pte_present(pte)) { for (i = 0; i < ncontig; i++, ptep++, addr += pgsize) - set_pte_at(mm, addr, ptep, pte); + set_ptes(mm, addr, ptep, pte, 1); return; } if (!pte_cont(pte)) { - set_pte_at(mm, addr, ptep, pte); + set_ptes(mm, addr, ptep, pte, 1); return; } @@ -270,7 +270,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, clear_flush(mm, addr, ptep, pgsize, ncontig); for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) - set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot)); + set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1); } pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, @@ -478,7 +478,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma, hugeprot = pte_pgprot(pte); for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) - set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot)); + set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1); return 1; } @@ -507,7 +507,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm, pfn = pte_pfn(pte); for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) - set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot)); + set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1); } pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, -- Gitee From 69d5a8e33fb0ac9bd0abc68401b712d6d4943885 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Thu, 15 Feb 2024 10:31:56 +0000 Subject: [PATCH 1231/2138] arm64/mm: convert ptep_clear() to ptep_get_and_clear() ANBZ: #9728 commit cbb0294fdd72a5f63ec59fad5c0a98d63bd572fc upstream ptep_clear() is a generic wrapper around the arch-implemented ptep_get_and_clear(). We are about to convert ptep_get_and_clear() into a public version and private version (__ptep_get_and_clear()) to support the transparent contpte work. We won't have a private version of ptep_clear() so let's convert it to directly call ptep_get_and_clear(). Link: https://lkml.kernel.org/r/20240215103205.2607016-10-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Tested-by: John Hubbard Acked-by: Mark Rutland Acked-by: Catalin Marinas Cc: Alistair Popple Cc: Andrey Ryabinin Cc: Ard Biesheuvel Cc: Barry Song <21cnbao@gmail.com> Cc: Borislav Petkov (AMD) Cc: Dave Hansen Cc: David Hildenbrand Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: James Morse Cc: Kefeng Wang Cc: Marc Zyngier Cc: Matthew Wilcox (Oracle) Cc: Thomas Gleixner Cc: Will Deacon Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3792 --- arch/arm64/mm/hugetlbpage.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index dce6a0e59a67..d2a874f283a8 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -236,7 +236,7 @@ static void clear_flush(struct mm_struct *mm, unsigned long i, saddr = addr; for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) - ptep_clear(mm, addr, ptep); + ptep_get_and_clear(mm, addr, ptep); flush_tlb_range(&vma, saddr, addr); } -- Gitee From 1282e1394873299d4d194061a95f68be435f5c7d Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Thu, 15 Feb 2024 10:31:57 +0000 Subject: [PATCH 1232/2138] arm64/mm: new ptep layer to manage contig bit ANBZ: #9728 commit 5a00bfd6a52cf31e93d5f1b734087deb32a3cffa upstream Create a new layer for the in-table PTE manipulation APIs. For now, The existing API is prefixed with double underscore to become the arch-private API and the public API is just a simple wrapper that calls the private API. The public API implementation will subsequently be used to transparently manipulate the contiguous bit where appropriate. But since there are already some contig-aware users (e.g. hugetlb, kernel mapper), we must first ensure those users use the private API directly so that the future contig-bit manipulations in the public API do not interfere with those existing uses. The following APIs are treated this way: - ptep_get - set_pte - set_ptes - pte_clear - ptep_get_and_clear - ptep_test_and_clear_young - ptep_clear_flush_young - ptep_set_wrprotect - ptep_set_access_flags Link: https://lkml.kernel.org/r/20240215103205.2607016-11-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Tested-by: John Hubbard Acked-by: Mark Rutland Acked-by: Catalin Marinas Cc: Alistair Popple Cc: Andrey Ryabinin Cc: Ard Biesheuvel Cc: Barry Song <21cnbao@gmail.com> Cc: Borislav Petkov (AMD) Cc: Dave Hansen Cc: David Hildenbrand Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: James Morse Cc: Kefeng Wang Cc: Marc Zyngier Cc: Matthew Wilcox (Oracle) Cc: Thomas Gleixner Cc: Will Deacon Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3792 --- arch/arm64/include/asm/pgtable.h | 83 +++++++++++++++++--------------- arch/arm64/kernel/efi.c | 4 +- arch/arm64/kernel/mte.c | 2 +- arch/arm64/kvm/guest.c | 2 +- arch/arm64/mm/fault.c | 12 ++--- arch/arm64/mm/fixmap.c | 4 +- arch/arm64/mm/hugetlbpage.c | 40 +++++++-------- arch/arm64/mm/kasan_init.c | 6 +-- arch/arm64/mm/mmu.c | 14 +++--- arch/arm64/mm/pageattr.c | 6 +-- arch/arm64/mm/trans_pgd.c | 6 +-- 11 files changed, 93 insertions(+), 86 deletions(-) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 0f4f303de9ae..6c9866648273 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -93,7 +93,8 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys) __pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) #define pte_none(pte) (!pte_val(pte)) -#define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0)) +#define __pte_clear(mm, addr, ptep) \ + __set_pte(ptep, __pte(0)) #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) /* @@ -137,7 +138,7 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys) * so that we don't erroneously return false for pages that have been * remapped as PROT_NONE but are yet to be flushed from the TLB. * Note that we can't make any assumptions based on the state of the access - * flag, since ptep_clear_flush_young() elides a DSB when invalidating the + * flag, since __ptep_clear_flush_young() elides a DSB when invalidating the * TLB. */ #define pte_accessible(mm, pte) \ @@ -266,7 +267,7 @@ static inline pte_t pte_mkdevmap(pte_t pte) return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL)); } -static inline void set_pte(pte_t *ptep, pte_t pte) +static inline void __set_pte(pte_t *ptep, pte_t pte) { WRITE_ONCE(*ptep, pte); @@ -280,8 +281,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte) } } -#define ptep_get ptep_get -static inline pte_t ptep_get(pte_t *ptep) +static inline pte_t __ptep_get(pte_t *ptep) { return READ_ONCE(*ptep); } @@ -313,7 +313,7 @@ static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep, if (!IS_ENABLED(CONFIG_DEBUG_VM)) return; - old_pte = ptep_get(ptep); + old_pte = __ptep_get(ptep); if (!pte_valid(old_pte) || !pte_valid(pte)) return; @@ -322,7 +322,7 @@ static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep, /* * Check for potential race with hardware updates of the pte - * (ptep_set_access_flags safely changes valid ptes without going + * (__ptep_set_access_flags safely changes valid ptes without going * through an invalid entry). */ VM_WARN_ONCE(!pte_young(pte), @@ -367,23 +367,22 @@ static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr) return pfn_pte(pte_pfn(pte) + nr, pte_pgprot(pte)); } -static inline void set_ptes(struct mm_struct *mm, - unsigned long __always_unused addr, - pte_t *ptep, pte_t pte, unsigned int nr) +static inline void __set_ptes(struct mm_struct *mm, + unsigned long __always_unused addr, + pte_t *ptep, pte_t pte, unsigned int nr) { page_table_check_ptes_set(mm, ptep, pte, nr); __sync_cache_and_tags(pte, nr); for (;;) { __check_safe_pte_update(mm, ptep, pte); - set_pte(ptep, pte); + __set_pte(ptep, pte); if (--nr == 0) break; ptep++; pte = pte_advance_pfn(pte, 1); } } -#define set_ptes set_ptes /* * Huge pte definitions. @@ -551,7 +550,7 @@ static inline void __set_pte_at(struct mm_struct *mm, { __sync_cache_and_tags(pte, nr); __check_safe_pte_update(mm, ptep, pte); - set_pte(ptep, pte); + __set_pte(ptep, pte); } static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, @@ -868,8 +867,7 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) return pte_pmd(pte_modify(pmd_pte(pmd), newprot)); } -#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS -extern int ptep_set_access_flags(struct vm_area_struct *vma, +extern int __ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, pte_t entry, int dirty); @@ -879,7 +877,8 @@ static inline int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t entry, int dirty) { - return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty); + return __ptep_set_access_flags(vma, address, (pte_t *)pmdp, + pmd_pte(entry), dirty); } static inline int pud_devmap(pud_t pud) @@ -913,12 +912,13 @@ static inline bool pud_user_accessible_page(pud_t pud) /* * Atomic pte/pmd modifications. */ -#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG -static inline int __ptep_test_and_clear_young(pte_t *ptep) +static inline int __ptep_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, + pte_t *ptep) { pte_t old_pte, pte; - pte = ptep_get(ptep); + pte = __ptep_get(ptep); do { old_pte = pte; pte = pte_mkold(pte); @@ -929,18 +929,10 @@ static inline int __ptep_test_and_clear_young(pte_t *ptep) return pte_young(pte); } -static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, - unsigned long address, - pte_t *ptep) -{ - return __ptep_test_and_clear_young(ptep); -} - -#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH -static inline int ptep_clear_flush_young(struct vm_area_struct *vma, +static inline int __ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { - int young = ptep_test_and_clear_young(vma, address, ptep); + int young = __ptep_test_and_clear_young(vma, address, ptep); if (young) { /* @@ -963,12 +955,11 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { - return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp); + return __ptep_test_and_clear_young(vma, address, (pte_t *)pmdp); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ -#define __HAVE_ARCH_PTEP_GET_AND_CLEAR -static inline pte_t ptep_get_and_clear(struct mm_struct *mm, +static inline pte_t __ptep_get_and_clear(struct mm_struct *mm, unsigned long address, pte_t *ptep) { pte_t pte = __pte(xchg_relaxed(&pte_val(*ptep), 0)); @@ -992,15 +983,15 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ /* - * ptep_set_wrprotect - mark read-only while trasferring potential hardware + * __ptep_set_wrprotect - mark read-only while trasferring potential hardware * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit. */ -#define __HAVE_ARCH_PTEP_SET_WRPROTECT -static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) +static inline void __ptep_set_wrprotect(struct mm_struct *mm, + unsigned long address, pte_t *ptep) { pte_t old_pte, pte; - pte = ptep_get(ptep); + pte = __ptep_get(ptep); do { old_pte = pte; pte = pte_wrprotect(pte); @@ -1014,7 +1005,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long address, pmd_t *pmdp) { - ptep_set_wrprotect(mm, address, (pte_t *)pmdp); + __ptep_set_wrprotect(mm, address, (pte_t *)pmdp); } #define pmdp_establish pmdp_establish @@ -1092,7 +1083,7 @@ static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio) #endif /* CONFIG_ARM64_MTE */ /* - * On AArch64, the cache coherency is handled via the set_ptes() function. + * On AArch64, the cache coherency is handled via the __set_ptes() function. */ static inline void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, @@ -1144,6 +1135,22 @@ extern pte_t ptep_modify_prot_start(struct vm_area_struct *vma, extern void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t old_pte, pte_t new_pte); + +#define ptep_get __ptep_get +#define set_pte __set_pte +#define set_ptes __set_ptes +#define pte_clear __pte_clear +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR +#define ptep_get_and_clear __ptep_get_and_clear +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +#define ptep_test_and_clear_young __ptep_test_and_clear_young +#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH +#define ptep_clear_flush_young __ptep_clear_flush_young +#define __HAVE_ARCH_PTEP_SET_WRPROTECT +#define ptep_set_wrprotect __ptep_set_wrprotect +#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS +#define ptep_set_access_flags __ptep_set_access_flags + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_PGTABLE_H */ diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index e72d62416b1a..89d104c0bce6 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -107,7 +107,7 @@ static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data) { struct set_perm_data *spd = data; const efi_memory_desc_t *md = spd->md; - pte_t pte = ptep_get(ptep); + pte_t pte = __ptep_get(ptep); if (md->attribute & EFI_MEMORY_RO) pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); @@ -116,7 +116,7 @@ static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data) else if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti() && spd->has_bti) pte = set_pte_bit(pte, __pgprot(PTE_GP)); - set_pte(ptep, pte); + __set_pte(ptep, pte); return 0; } diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index b99b718164ed..cea96ee75d22 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -67,7 +67,7 @@ int memcmp_pages(struct page *page1, struct page *page2) /* * If the page content is identical but at least one of the pages is * tagged, return non-zero to avoid KSM merging. If only one of the - * pages is tagged, set_ptes() may zero or change the tags of the + * pages is tagged, __set_ptes() may zero or change the tags of the * other page via mte_sync_tags(). */ if (page_mte_tagged(page1) || page_mte_tagged(page2)) diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index ce238ef9e113..135fcf3fc4bb 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -1073,7 +1073,7 @@ int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, } else { /* * Only locking to serialise with a concurrent - * set_ptes() in the VMM but still overriding the + * __set_ptes() in the VMM but still overriding the * tags, hence ignoring the return value. */ try_page_mte_tagging(page); diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 06869643278a..0f3983c4769c 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -191,7 +191,7 @@ static void show_pte(unsigned long addr) if (!ptep) break; - pte = ptep_get(ptep); + pte = __ptep_get(ptep); pr_cont(", pte=%016llx", pte_val(pte)); pte_unmap(ptep); } while(0); @@ -205,16 +205,16 @@ static void show_pte(unsigned long addr) * * It needs to cope with hardware update of the accessed/dirty state by other * agents in the system and can safely skip the __sync_icache_dcache() call as, - * like set_ptes(), the PTE is never changed from no-exec to exec here. + * like __set_ptes(), the PTE is never changed from no-exec to exec here. * * Returns whether or not the PTE actually changed. */ -int ptep_set_access_flags(struct vm_area_struct *vma, - unsigned long address, pte_t *ptep, - pte_t entry, int dirty) +int __ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, + pte_t entry, int dirty) { pteval_t old_pteval, pteval; - pte_t pte = ptep_get(ptep); + pte_t pte = __ptep_get(ptep); if (pte_same(pte, entry)) return 0; diff --git a/arch/arm64/mm/fixmap.c b/arch/arm64/mm/fixmap.c index c0a3301203bd..bfc02568805a 100644 --- a/arch/arm64/mm/fixmap.c +++ b/arch/arm64/mm/fixmap.c @@ -121,9 +121,9 @@ void __set_fixmap(enum fixed_addresses idx, ptep = fixmap_pte(addr); if (pgprot_val(flags)) { - set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags)); + __set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags)); } else { - pte_clear(&init_mm, addr, ptep); + __pte_clear(&init_mm, addr, ptep); flush_tlb_kernel_range(addr, addr+PAGE_SIZE); } } diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index d2a874f283a8..b1e8d8f8dae4 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -152,14 +152,14 @@ pte_t huge_ptep_get(pte_t *ptep) { int ncontig, i; size_t pgsize; - pte_t orig_pte = ptep_get(ptep); + pte_t orig_pte = __ptep_get(ptep); if (!pte_present(orig_pte) || !pte_cont(orig_pte)) return orig_pte; ncontig = num_contig_ptes(page_size(pte_page(orig_pte)), &pgsize); for (i = 0; i < ncontig; i++, ptep++) { - pte_t pte = ptep_get(ptep); + pte_t pte = __ptep_get(ptep); if (pte_dirty(pte)) orig_pte = pte_mkdirty(orig_pte); @@ -184,11 +184,11 @@ static pte_t get_clear_contig(struct mm_struct *mm, unsigned long pgsize, unsigned long ncontig) { - pte_t orig_pte = ptep_get(ptep); + pte_t orig_pte = __ptep_get(ptep); unsigned long i; for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) { - pte_t pte = ptep_get_and_clear(mm, addr, ptep); + pte_t pte = __ptep_get_and_clear(mm, addr, ptep); /* * If HW_AFDBM is enabled, then the HW could turn on @@ -236,7 +236,7 @@ static void clear_flush(struct mm_struct *mm, unsigned long i, saddr = addr; for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) - ptep_get_and_clear(mm, addr, ptep); + __ptep_get_and_clear(mm, addr, ptep); flush_tlb_range(&vma, saddr, addr); } @@ -254,12 +254,12 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, if (!pte_present(pte)) { for (i = 0; i < ncontig; i++, ptep++, addr += pgsize) - set_ptes(mm, addr, ptep, pte, 1); + __set_ptes(mm, addr, ptep, pte, 1); return; } if (!pte_cont(pte)) { - set_ptes(mm, addr, ptep, pte, 1); + __set_ptes(mm, addr, ptep, pte, 1); return; } @@ -270,7 +270,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, clear_flush(mm, addr, ptep, pgsize, ncontig); for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) - set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1); + __set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1); } pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, @@ -400,7 +400,7 @@ void huge_pte_clear(struct mm_struct *mm, unsigned long addr, ncontig = num_contig_ptes(sz, &pgsize); for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) - pte_clear(mm, addr, ptep); + __pte_clear(mm, addr, ptep); } pte_t huge_ptep_get_and_clear(struct mm_struct *mm, @@ -408,10 +408,10 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, { int ncontig; size_t pgsize; - pte_t orig_pte = ptep_get(ptep); + pte_t orig_pte = __ptep_get(ptep); if (!pte_cont(orig_pte)) - return ptep_get_and_clear(mm, addr, ptep); + return __ptep_get_and_clear(mm, addr, ptep); ncontig = find_num_contig(mm, addr, ptep, &pgsize); @@ -431,11 +431,11 @@ static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig) { int i; - if (pte_write(pte) != pte_write(ptep_get(ptep))) + if (pte_write(pte) != pte_write(__ptep_get(ptep))) return 1; for (i = 0; i < ncontig; i++) { - pte_t orig_pte = ptep_get(ptep + i); + pte_t orig_pte = __ptep_get(ptep + i); if (pte_dirty(pte) != pte_dirty(orig_pte)) return 1; @@ -459,7 +459,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma, pte_t orig_pte; if (!pte_cont(pte)) - return ptep_set_access_flags(vma, addr, ptep, pte, dirty); + return __ptep_set_access_flags(vma, addr, ptep, pte, dirty); ncontig = find_num_contig(mm, addr, ptep, &pgsize); dpfn = pgsize >> PAGE_SHIFT; @@ -478,7 +478,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma, hugeprot = pte_pgprot(pte); for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) - set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1); + __set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1); return 1; } @@ -492,8 +492,8 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm, size_t pgsize; pte_t pte; - if (!pte_cont(ptep_get(ptep))) { - ptep_set_wrprotect(mm, addr, ptep); + if (!pte_cont(__ptep_get(ptep))) { + __ptep_set_wrprotect(mm, addr, ptep); return; } @@ -507,7 +507,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm, pfn = pte_pfn(pte); for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) - set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1); + __set_ptes(mm, addr, ptep, pfn_pte(pfn, hugeprot), 1); } pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, @@ -517,7 +517,7 @@ pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, size_t pgsize; int ncontig; - if (!pte_cont(ptep_get(ptep))) + if (!pte_cont(__ptep_get(ptep))) return ptep_clear_flush(vma, addr, ptep); ncontig = find_num_contig(mm, addr, ptep, &pgsize); @@ -551,7 +551,7 @@ pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr * when the permission changes from executable to non-executable * in cases where cpu is affected with errata #2645198. */ - if (pte_user_exec(ptep_get(ptep))) + if (pte_user_exec(__ptep_get(ptep))) return huge_ptep_clear_flush(vma, addr, ptep); } return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index 1b96e0ad6661..28856f511fb6 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -112,8 +112,8 @@ static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr, if (!early) memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE); next = addr + PAGE_SIZE; - set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL)); - } while (ptep++, addr = next, addr != end && pte_none(ptep_get(ptep))); + __set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL)); + } while (ptep++, addr = next, addr != end && pte_none(__ptep_get(ptep))); } static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr, @@ -266,7 +266,7 @@ static void __init kasan_init_shadow(void) * so we should make sure that it maps the zero page read-only. */ for (i = 0; i < PTRS_PER_PTE; i++) - set_pte(&kasan_early_shadow_pte[i], + __set_pte(&kasan_early_shadow_pte[i], pfn_pte(sym_to_pfn(kasan_early_shadow_page), PAGE_KERNEL_RO)); diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index ee8641b3f139..9c975759cff0 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -225,16 +225,16 @@ static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end, ptep = pte_set_fixmap_offset(pmdp, addr); do { - pte_t old_pte = ptep_get(ptep); + pte_t old_pte = __ptep_get(ptep); - set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot)); + __set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot)); /* * After the PTE entry has been populated once, we * only allow updates to the permission attributes. */ BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), - pte_val(ptep_get(ptep)))); + pte_val(__ptep_get(ptep)))); phys += PAGE_SIZE; } while (ptep++, addr += PAGE_SIZE, addr != end); @@ -919,12 +919,12 @@ static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr, do { ptep = pte_offset_kernel(pmdp, addr); - pte = ptep_get(ptep); + pte = __ptep_get(ptep); if (pte_none(pte)) continue; WARN_ON(!pte_present(pte)); - pte_clear(&init_mm, addr, ptep); + __pte_clear(&init_mm, addr, ptep); flush_tlb_kernel_range(addr, addr + PAGE_SIZE); if (free_mapped) free_hotplug_page_range(pte_page(pte), @@ -1052,7 +1052,7 @@ static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr, do { ptep = pte_offset_kernel(pmdp, addr); - pte = ptep_get(ptep); + pte = __ptep_get(ptep); /* * This is just a sanity check here which verifies that @@ -1071,7 +1071,7 @@ static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr, */ ptep = pte_offset_kernel(pmdp, 0UL); for (i = 0; i < PTRS_PER_PTE; i++) { - if (!pte_none(ptep_get(&ptep[i]))) + if (!pte_none(__ptep_get(&ptep[i]))) return; } diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 76ea4f20c384..32ed0e6bb611 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -32,12 +32,12 @@ bool can_set_block_and_cont_map(void) static int change_page_range(pte_t *ptep, unsigned long addr, void *data) { struct page_change_data *cdata = data; - pte_t pte = ptep_get(ptep); + pte_t pte = __ptep_get(ptep); pte = clear_pte_bit(pte, cdata->clear_mask); pte = set_pte_bit(pte, cdata->set_mask); - set_pte(ptep, pte); + __set_pte(ptep, pte); return 0; } @@ -262,5 +262,5 @@ bool kernel_page_present(struct page *page) return true; ptep = pte_offset_kernel(pmdp, addr); - return pte_valid(ptep_get(ptep)); + return pte_valid(__ptep_get(ptep)); } diff --git a/arch/arm64/mm/trans_pgd.c b/arch/arm64/mm/trans_pgd.c index f71ab4704cce..5139a28130c0 100644 --- a/arch/arm64/mm/trans_pgd.c +++ b/arch/arm64/mm/trans_pgd.c @@ -33,7 +33,7 @@ static void *trans_alloc(struct trans_pgd_info *info) static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr) { - pte_t pte = ptep_get(src_ptep); + pte_t pte = __ptep_get(src_ptep); if (pte_valid(pte)) { /* @@ -41,7 +41,7 @@ static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr) * read only (code, rodata). Clear the RDONLY bit from * the temporary mappings we use during restore. */ - set_pte(dst_ptep, pte_mkwrite_novma(pte)); + __set_pte(dst_ptep, pte_mkwrite_novma(pte)); } else if ((debug_pagealloc_enabled() || is_kfence_address((void *)addr)) && !pte_none(pte)) { /* @@ -55,7 +55,7 @@ static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr) */ BUG_ON(!pfn_valid(pte_pfn(pte))); - set_pte(dst_ptep, pte_mkpresent(pte_mkwrite_novma(pte))); + __set_pte(dst_ptep, pte_mkpresent(pte_mkwrite_novma(pte))); } } -- Gitee From 97421d8878e686deb81483c42ac4c878c59cb1d7 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Thu, 15 Feb 2024 10:31:58 +0000 Subject: [PATCH 1233/2138] arm64/mm: dplit __flush_tlb_range() to elide trailing DSB ANBZ: #9728 commit d9d8dc2bd3fb2689309f704fe85e6dde2b1bd73a upstream Split __flush_tlb_range() into __flush_tlb_range_nosync() + __flush_tlb_range(), in the same way as the existing flush_tlb_page() arrangement. This allows calling __flush_tlb_range_nosync() to elide the trailing DSB. Forthcoming "contpte" code will take advantage of this when clearing the young bit from a contiguous range of ptes. Ordering between dsb and mmu_notifier_arch_invalidate_secondary_tlbs() has changed, but now aligns with the ordering of __flush_tlb_page(). It has been discussed that __flush_tlb_page() may be wrong though. Regardless, both will be resolved separately if needed. Link: https://lkml.kernel.org/r/20240215103205.2607016-12-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Reviewed-by: David Hildenbrand Tested-by: John Hubbard Acked-by: Mark Rutland Acked-by: Catalin Marinas Cc: Alistair Popple Cc: Andrey Ryabinin Cc: Ard Biesheuvel Cc: Barry Song <21cnbao@gmail.com> Cc: Borislav Petkov (AMD) Cc: Dave Hansen Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: James Morse Cc: Kefeng Wang Cc: Marc Zyngier Cc: Matthew Wilcox (Oracle) Cc: Thomas Gleixner Cc: Will Deacon Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3792 --- arch/arm64/include/asm/tlbflush.h | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 557a8fdaf6c3..141b47351bc5 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -406,7 +406,7 @@ do { \ #define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \ __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false) -static inline void __flush_tlb_range(struct vm_area_struct *vma, +static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long stride, bool last_level, int tlb_level) @@ -438,10 +438,19 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, else __flush_tlb_range_op(vae1is, start, pages, stride, asid, tlb_level, true); - dsb(ish); mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end); } +static inline void __flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end, + unsigned long stride, bool last_level, + int tlb_level) +{ + __flush_tlb_range_nosync(vma, start, end, stride, + last_level, tlb_level); + dsb(ish); +} + static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { -- Gitee From b73d0e2f381c2a514bbfe8eb71222e03cddf85cb Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Thu, 15 Feb 2024 10:31:59 +0000 Subject: [PATCH 1234/2138] arm64/mm: wire up PTE_CONT for user mappings ANBZ: #9728 commit 4602e5757bcceb231c3a13c36c373ad4a750eddb upstream With the ptep API sufficiently refactored, we can now introduce a new "contpte" API layer, which transparently manages the PTE_CONT bit for user mappings. In this initial implementation, only suitable batches of PTEs, set via set_ptes(), are mapped with the PTE_CONT bit. Any subsequent modification of individual PTEs will cause an "unfold" operation to repaint the contpte block as individual PTEs before performing the requested operation. While, a modification of a single PTE could cause the block of PTEs to which it belongs to become eligible for "folding" into a contpte entry, "folding" is not performed in this initial implementation due to the costs of checking the requirements are met. Due to this, contpte mappings will degrade back to normal pte mappings over time if/when protections are changed. This will be solved in a future patch. Since a contpte block only has a single access and dirty bit, the semantic here changes slightly; when getting a pte (e.g. ptep_get()) that is part of a contpte mapping, the access and dirty information are pulled from the block (so all ptes in the block return the same access/dirty info). When changing the access/dirty info on a pte (e.g. ptep_set_access_flags()) that is part of a contpte mapping, this change will affect the whole contpte block. This is works fine in practice since we guarantee that only a single folio is mapped by a contpte block, and the core-mm tracks access/dirty information per folio. In order for the public functions, which used to be pure inline, to continue to be callable by modules, export all the contpte_* symbols that are now called by those public inline functions. The feature is enabled/disabled with the ARM64_CONTPTE Kconfig parameter at build time. It defaults to enabled as long as its dependency, TRANSPARENT_HUGEPAGE is also enabled. The core-mm depends upon TRANSPARENT_HUGEPAGE to be able to allocate large folios, so if its not enabled, then there is no chance of meeting the physical contiguity requirement for contpte mappings. Link: https://lkml.kernel.org/r/20240215103205.2607016-13-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Acked-by: Ard Biesheuvel Tested-by: John Hubbard Acked-by: Mark Rutland Reviewed-by: Catalin Marinas Cc: Alistair Popple Cc: Andrey Ryabinin Cc: Barry Song <21cnbao@gmail.com> Cc: Borislav Petkov (AMD) Cc: Dave Hansen Cc: David Hildenbrand Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: James Morse Cc: Kefeng Wang Cc: Marc Zyngier Cc: Matthew Wilcox (Oracle) Cc: Thomas Gleixner Cc: Will Deacon Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3792 --- arch/arm64/Kconfig | 9 + arch/arm64/include/asm/pgtable.h | 167 ++++++++++++++++++ arch/arm64/mm/Makefile | 1 + arch/arm64/mm/contpte.c | 285 +++++++++++++++++++++++++++++++ include/linux/efi.h | 5 + 5 files changed, 467 insertions(+) create mode 100644 arch/arm64/mm/contpte.c diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index df023d477f4b..931d0dcd12fd 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -2285,6 +2285,15 @@ config UNWIND_PATCH_PAC_INTO_SCS select UNWIND_TABLES select DYNAMIC_SCS +config ARM64_CONTPTE + bool "Contiguous PTE mappings for user memory" if EXPERT + depends on TRANSPARENT_HUGEPAGE + default y + help + When enabled, user mappings are configured using the PTE contiguous + bit, for any mappings that meet the size and alignment requirements. + This reduces TLB pressure and improves performance. + endmenu # "Kernel Features" menu "Boot options" diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 6c9866648273..49ee410ed4b9 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -133,6 +133,10 @@ static inline pteval_t __phys_to_pte_val(phys_addr_t phys) */ #define pte_valid_not_user(pte) \ ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN)) +/* + * Returns true if the pte is valid and has the contiguous bit set. + */ +#define pte_valid_cont(pte) (pte_valid(pte) && pte_cont(pte)) /* * Could the pte be present in the TLB? We must check mm_tlb_flush_pending * so that we don't erroneously return false for pages that have been @@ -1136,6 +1140,167 @@ extern void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t old_pte, pte_t new_pte); +#ifdef CONFIG_ARM64_CONTPTE + +/* + * The contpte APIs are used to transparently manage the contiguous bit in ptes + * where it is possible and makes sense to do so. The PTE_CONT bit is considered + * a private implementation detail of the public ptep API (see below). + */ +extern void __contpte_try_unfold(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte); +extern pte_t contpte_ptep_get(pte_t *ptep, pte_t orig_pte); +extern pte_t contpte_ptep_get_lockless(pte_t *orig_ptep); +extern void contpte_set_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned int nr); +extern int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep); +extern int contpte_ptep_clear_flush_young(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep); +extern int contpte_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t entry, int dirty); + +static inline void contpte_try_unfold(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ + if (unlikely(pte_valid_cont(pte))) + __contpte_try_unfold(mm, addr, ptep, pte); +} + +/* + * The below functions constitute the public API that arm64 presents to the + * core-mm to manipulate PTE entries within their page tables (or at least this + * is the subset of the API that arm64 needs to implement). These public + * versions will automatically and transparently apply the contiguous bit where + * it makes sense to do so. Therefore any users that are contig-aware (e.g. + * hugetlb, kernel mapper) should NOT use these APIs, but instead use the + * private versions, which are prefixed with double underscore. All of these + * APIs except for ptep_get_lockless() are expected to be called with the PTL + * held. Although the contiguous bit is considered private to the + * implementation, it is deliberately allowed to leak through the getters (e.g. + * ptep_get()), back to core code. This is required so that pte_leaf_size() can + * provide an accurate size for perf_get_pgtable_size(). But this leakage means + * its possible a pte will be passed to a setter with the contiguous bit set, so + * we explicitly clear the contiguous bit in those cases to prevent accidentally + * setting it in the pgtable. + */ + +#define ptep_get ptep_get +static inline pte_t ptep_get(pte_t *ptep) +{ + pte_t pte = __ptep_get(ptep); + + if (likely(!pte_valid_cont(pte))) + return pte; + + return contpte_ptep_get(ptep, pte); +} + +#define ptep_get_lockless ptep_get_lockless +static inline pte_t ptep_get_lockless(pte_t *ptep) +{ + pte_t pte = __ptep_get(ptep); + + if (likely(!pte_valid_cont(pte))) + return pte; + + return contpte_ptep_get_lockless(ptep); +} + +static inline void set_pte(pte_t *ptep, pte_t pte) +{ + /* + * We don't have the mm or vaddr so cannot unfold contig entries (since + * it requires tlb maintenance). set_pte() is not used in core code, so + * this should never even be called. Regardless do our best to service + * any call and emit a warning if there is any attempt to set a pte on + * top of an existing contig range. + */ + pte_t orig_pte = __ptep_get(ptep); + + WARN_ON_ONCE(pte_valid_cont(orig_pte)); + __set_pte(ptep, pte_mknoncont(pte)); +} + +#define set_ptes set_ptes +static inline void set_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned int nr) +{ + pte = pte_mknoncont(pte); + + if (likely(nr == 1)) { + contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); + __set_ptes(mm, addr, ptep, pte, 1); + } else { + contpte_set_ptes(mm, addr, ptep, pte, nr); + } +} + +static inline void pte_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); + __pte_clear(mm, addr, ptep); +} + +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); + return __ptep_get_and_clear(mm, addr, ptep); +} + +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + pte_t orig_pte = __ptep_get(ptep); + + if (likely(!pte_valid_cont(orig_pte))) + return __ptep_test_and_clear_young(vma, addr, ptep); + + return contpte_ptep_test_and_clear_young(vma, addr, ptep); +} + +#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH +static inline int ptep_clear_flush_young(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + pte_t orig_pte = __ptep_get(ptep); + + if (likely(!pte_valid_cont(orig_pte))) + return __ptep_clear_flush_young(vma, addr, ptep); + + return contpte_ptep_clear_flush_young(vma, addr, ptep); +} + +#define __HAVE_ARCH_PTEP_SET_WRPROTECT +static inline void ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); + __ptep_set_wrprotect(mm, addr, ptep); +} + +#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS +static inline int ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t entry, int dirty) +{ + pte_t orig_pte = __ptep_get(ptep); + + entry = pte_mknoncont(entry); + + if (likely(!pte_valid_cont(orig_pte))) + return __ptep_set_access_flags(vma, addr, ptep, entry, dirty); + + return contpte_ptep_set_access_flags(vma, addr, ptep, entry, dirty); +} + +#else /* CONFIG_ARM64_CONTPTE */ + #define ptep_get __ptep_get #define set_pte __set_pte #define set_ptes __set_ptes @@ -1151,6 +1316,8 @@ extern void ptep_modify_prot_commit(struct vm_area_struct *vma, #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS #define ptep_set_access_flags __ptep_set_access_flags +#endif /* CONFIG_ARM64_CONTPTE */ + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_PGTABLE_H */ diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile index dbd1bc95967d..60454256945b 100644 --- a/arch/arm64/mm/Makefile +++ b/arch/arm64/mm/Makefile @@ -3,6 +3,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \ cache.o copypage.o flush.o \ ioremap.o mmap.o pgd.o mmu.o \ context.o proc.o pageattr.o fixmap.o +obj-$(CONFIG_ARM64_CONTPTE) += contpte.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_PTDUMP_CORE) += ptdump.o obj-$(CONFIG_PTDUMP_DEBUGFS) += ptdump_debugfs.o diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c new file mode 100644 index 000000000000..6d7f40667fa2 --- /dev/null +++ b/arch/arm64/mm/contpte.c @@ -0,0 +1,285 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2023 ARM Ltd. + */ + +#include +#include +#include +#include + +static inline bool mm_is_user(struct mm_struct *mm) +{ + /* + * Don't attempt to apply the contig bit to kernel mappings, because + * dynamically adding/removing the contig bit can cause page faults. + * These racing faults are ok for user space, since they get serialized + * on the PTL. But kernel mappings can't tolerate faults. + */ + if (unlikely(mm_is_efi(mm))) + return false; + return mm != &init_mm; +} + +static inline pte_t *contpte_align_down(pte_t *ptep) +{ + return PTR_ALIGN_DOWN(ptep, sizeof(*ptep) * CONT_PTES); +} + +static void contpte_convert(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ + struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0); + unsigned long start_addr; + pte_t *start_ptep; + int i; + + start_ptep = ptep = contpte_align_down(ptep); + start_addr = addr = ALIGN_DOWN(addr, CONT_PTE_SIZE); + pte = pfn_pte(ALIGN_DOWN(pte_pfn(pte), CONT_PTES), pte_pgprot(pte)); + + for (i = 0; i < CONT_PTES; i++, ptep++, addr += PAGE_SIZE) { + pte_t ptent = __ptep_get_and_clear(mm, addr, ptep); + + if (pte_dirty(ptent)) + pte = pte_mkdirty(pte); + + if (pte_young(ptent)) + pte = pte_mkyoung(pte); + } + + __flush_tlb_range(&vma, start_addr, addr, PAGE_SIZE, true, 3); + + __set_ptes(mm, start_addr, start_ptep, pte, CONT_PTES); +} + +void __contpte_try_unfold(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ + /* + * We have already checked that the ptes are contiguous in + * contpte_try_unfold(), so just check that the mm is user space. + */ + if (!mm_is_user(mm)) + return; + + pte = pte_mknoncont(pte); + contpte_convert(mm, addr, ptep, pte); +} +EXPORT_SYMBOL(__contpte_try_unfold); + +pte_t contpte_ptep_get(pte_t *ptep, pte_t orig_pte) +{ + /* + * Gather access/dirty bits, which may be populated in any of the ptes + * of the contig range. We are guaranteed to be holding the PTL, so any + * contiguous range cannot be unfolded or otherwise modified under our + * feet. + */ + + pte_t pte; + int i; + + ptep = contpte_align_down(ptep); + + for (i = 0; i < CONT_PTES; i++, ptep++) { + pte = __ptep_get(ptep); + + if (pte_dirty(pte)) + orig_pte = pte_mkdirty(orig_pte); + + if (pte_young(pte)) + orig_pte = pte_mkyoung(orig_pte); + } + + return orig_pte; +} +EXPORT_SYMBOL(contpte_ptep_get); + +pte_t contpte_ptep_get_lockless(pte_t *orig_ptep) +{ + /* + * Gather access/dirty bits, which may be populated in any of the ptes + * of the contig range. We may not be holding the PTL, so any contiguous + * range may be unfolded/modified/refolded under our feet. Therefore we + * ensure we read a _consistent_ contpte range by checking that all ptes + * in the range are valid and have CONT_PTE set, that all pfns are + * contiguous and that all pgprots are the same (ignoring access/dirty). + * If we find a pte that is not consistent, then we must be racing with + * an update so start again. If the target pte does not have CONT_PTE + * set then that is considered consistent on its own because it is not + * part of a contpte range. + */ + + pgprot_t orig_prot; + unsigned long pfn; + pte_t orig_pte; + pgprot_t prot; + pte_t *ptep; + pte_t pte; + int i; + +retry: + orig_pte = __ptep_get(orig_ptep); + + if (!pte_valid_cont(orig_pte)) + return orig_pte; + + orig_prot = pte_pgprot(pte_mkold(pte_mkclean(orig_pte))); + ptep = contpte_align_down(orig_ptep); + pfn = pte_pfn(orig_pte) - (orig_ptep - ptep); + + for (i = 0; i < CONT_PTES; i++, ptep++, pfn++) { + pte = __ptep_get(ptep); + prot = pte_pgprot(pte_mkold(pte_mkclean(pte))); + + if (!pte_valid_cont(pte) || + pte_pfn(pte) != pfn || + pgprot_val(prot) != pgprot_val(orig_prot)) + goto retry; + + if (pte_dirty(pte)) + orig_pte = pte_mkdirty(orig_pte); + + if (pte_young(pte)) + orig_pte = pte_mkyoung(orig_pte); + } + + return orig_pte; +} +EXPORT_SYMBOL(contpte_ptep_get_lockless); + +void contpte_set_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned int nr) +{ + unsigned long next; + unsigned long end; + unsigned long pfn; + pgprot_t prot; + + /* + * The set_ptes() spec guarantees that when nr > 1, the initial state of + * all ptes is not-present. Therefore we never need to unfold or + * otherwise invalidate a range before we set the new ptes. + * contpte_set_ptes() should never be called for nr < 2. + */ + VM_WARN_ON(nr == 1); + + if (!mm_is_user(mm)) + return __set_ptes(mm, addr, ptep, pte, nr); + + end = addr + (nr << PAGE_SHIFT); + pfn = pte_pfn(pte); + prot = pte_pgprot(pte); + + do { + next = pte_cont_addr_end(addr, end); + nr = (next - addr) >> PAGE_SHIFT; + pte = pfn_pte(pfn, prot); + + if (((addr | next | (pfn << PAGE_SHIFT)) & ~CONT_PTE_MASK) == 0) + pte = pte_mkcont(pte); + else + pte = pte_mknoncont(pte); + + __set_ptes(mm, addr, ptep, pte, nr); + + addr = next; + ptep += nr; + pfn += nr; + + } while (addr != end); +} +EXPORT_SYMBOL(contpte_set_ptes); + +int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + /* + * ptep_clear_flush_young() technically requires us to clear the access + * flag for a _single_ pte. However, the core-mm code actually tracks + * access/dirty per folio, not per page. And since we only create a + * contig range when the range is covered by a single folio, we can get + * away with clearing young for the whole contig range here, so we avoid + * having to unfold. + */ + + int young = 0; + int i; + + ptep = contpte_align_down(ptep); + addr = ALIGN_DOWN(addr, CONT_PTE_SIZE); + + for (i = 0; i < CONT_PTES; i++, ptep++, addr += PAGE_SIZE) + young |= __ptep_test_and_clear_young(vma, addr, ptep); + + return young; +} +EXPORT_SYMBOL(contpte_ptep_test_and_clear_young); + +int contpte_ptep_clear_flush_young(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + int young; + + young = contpte_ptep_test_and_clear_young(vma, addr, ptep); + + if (young) { + /* + * See comment in __ptep_clear_flush_young(); same rationale for + * eliding the trailing DSB applies here. + */ + addr = ALIGN_DOWN(addr, CONT_PTE_SIZE); + __flush_tlb_range_nosync(vma, addr, addr + CONT_PTE_SIZE, + PAGE_SIZE, true, 3); + } + + return young; +} +EXPORT_SYMBOL(contpte_ptep_clear_flush_young); + +int contpte_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t entry, int dirty) +{ + unsigned long start_addr; + pte_t orig_pte; + int i; + + /* + * Gather the access/dirty bits for the contiguous range. If nothing has + * changed, its a noop. + */ + orig_pte = pte_mknoncont(ptep_get(ptep)); + if (pte_val(orig_pte) == pte_val(entry)) + return 0; + + /* + * We can fix up access/dirty bits without having to unfold the contig + * range. But if the write bit is changing, we must unfold. + */ + if (pte_write(orig_pte) == pte_write(entry)) { + /* + * For HW access management, we technically only need to update + * the flag on a single pte in the range. But for SW access + * management, we need to update all the ptes to prevent extra + * faults. Avoid per-page tlb flush in __ptep_set_access_flags() + * and instead flush the whole range at the end. + */ + ptep = contpte_align_down(ptep); + start_addr = addr = ALIGN_DOWN(addr, CONT_PTE_SIZE); + + for (i = 0; i < CONT_PTES; i++, ptep++, addr += PAGE_SIZE) + __ptep_set_access_flags(vma, addr, ptep, entry, 0); + + if (dirty) + __flush_tlb_range(vma, start_addr, addr, + PAGE_SIZE, true, 3); + } else { + __contpte_try_unfold(vma->vm_mm, addr, ptep, orig_pte); + __ptep_set_access_flags(vma, addr, ptep, entry, dirty); + } + + return 1; +} +EXPORT_SYMBOL(contpte_ptep_set_access_flags); diff --git a/include/linux/efi.h b/include/linux/efi.h index 80b21d1c6eaf..aab980c98a23 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -694,6 +694,11 @@ extern struct efi { extern struct mm_struct efi_mm; +static inline bool mm_is_efi(struct mm_struct *mm) +{ + return IS_ENABLED(CONFIG_EFI) && mm == &efi_mm; +} + static inline int efi_guidcmp (efi_guid_t left, efi_guid_t right) { -- Gitee From 51d07e3cefee76a203a8bbc4b6b1d184cf3c0326 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Thu, 15 Feb 2024 10:32:00 +0000 Subject: [PATCH 1235/2138] arm64/mm: implement new wrprotect_ptes() batch API ANBZ: #9728 commit 311a6cf29690bb8295327bad0e76e0ad48cadcc4 upstream Optimize the contpte implementation to fix some of the fork performance regression introduced by the initial contpte commit. Subsequent patches will solve it entirely. During fork(), any private memory in the parent must be write-protected. Previously this was done 1 PTE at a time. But the core-mm supports batched wrprotect via the new wrprotect_ptes() API. So let's implement that API and for fully covered contpte mappings, we no longer need to unfold the contpte. This has 2 benefits: - reduced unfolding, reduces the number of tlbis that must be issued. - The memory remains contpte-mapped ("folded") in the parent, so it continues to benefit from the more efficient use of the TLB after the fork. The optimization to wrprotect a whole contpte block without unfolding is possible thanks to the tightening of the Arm ARM in respect to the definition and behaviour when 'Misprogramming the Contiguous bit'. See section D21194 at https://developer.arm.com/documentation/102105/ja-07/ Link: https://lkml.kernel.org/r/20240215103205.2607016-14-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Tested-by: John Hubbard Acked-by: Mark Rutland Acked-by: Catalin Marinas Cc: Alistair Popple Cc: Andrey Ryabinin Cc: Ard Biesheuvel Cc: Barry Song <21cnbao@gmail.com> Cc: Borislav Petkov (AMD) Cc: Dave Hansen Cc: David Hildenbrand Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: James Morse Cc: Kefeng Wang Cc: Marc Zyngier Cc: Matthew Wilcox (Oracle) Cc: Thomas Gleixner Cc: Will Deacon Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3792 --- arch/arm64/include/asm/pgtable.h | 61 ++++++++++++++++++++++++++------ arch/arm64/mm/contpte.c | 38 ++++++++++++++++++++ 2 files changed, 89 insertions(+), 10 deletions(-) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 49ee410ed4b9..629e14120ba3 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -986,16 +986,12 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ -/* - * __ptep_set_wrprotect - mark read-only while trasferring potential hardware - * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit. - */ -static inline void __ptep_set_wrprotect(struct mm_struct *mm, - unsigned long address, pte_t *ptep) +static inline void ___ptep_set_wrprotect(struct mm_struct *mm, + unsigned long address, pte_t *ptep, + pte_t pte) { - pte_t old_pte, pte; + pte_t old_pte; - pte = __ptep_get(ptep); do { old_pte = pte; pte = pte_wrprotect(pte); @@ -1004,6 +1000,25 @@ static inline void __ptep_set_wrprotect(struct mm_struct *mm, } while (pte_val(pte) != pte_val(old_pte)); } +/* + * __ptep_set_wrprotect - mark read-only while trasferring potential hardware + * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit. + */ +static inline void __ptep_set_wrprotect(struct mm_struct *mm, + unsigned long address, pte_t *ptep) +{ + ___ptep_set_wrprotect(mm, address, ptep, __ptep_get(ptep)); +} + +static inline void __wrprotect_ptes(struct mm_struct *mm, unsigned long address, + pte_t *ptep, unsigned int nr) +{ + unsigned int i; + + for (i = 0; i < nr; i++, address += PAGE_SIZE, ptep++) + __ptep_set_wrprotect(mm, address, ptep); +} + #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define __HAVE_ARCH_PMDP_SET_WRPROTECT static inline void pmdp_set_wrprotect(struct mm_struct *mm, @@ -1157,6 +1172,8 @@ extern int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep); extern int contpte_ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep); +extern void contpte_wrprotect_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned int nr); extern int contpte_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t entry, int dirty); @@ -1276,12 +1293,35 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma, return contpte_ptep_clear_flush_young(vma, addr, ptep); } +#define wrprotect_ptes wrprotect_ptes +static inline void wrprotect_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned int nr) +{ + if (likely(nr == 1)) { + /* + * Optimization: wrprotect_ptes() can only be called for present + * ptes so we only need to check contig bit as condition for + * unfold, and we can remove the contig bit from the pte we read + * to avoid re-reading. This speeds up fork() which is sensitive + * for order-0 folios. Equivalent to contpte_try_unfold(). + */ + pte_t orig_pte = __ptep_get(ptep); + + if (unlikely(pte_cont(orig_pte))) { + __contpte_try_unfold(mm, addr, ptep, orig_pte); + orig_pte = pte_mknoncont(orig_pte); + } + ___ptep_set_wrprotect(mm, addr, ptep, orig_pte); + } else { + contpte_wrprotect_ptes(mm, addr, ptep, nr); + } +} + #define __HAVE_ARCH_PTEP_SET_WRPROTECT static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); - __ptep_set_wrprotect(mm, addr, ptep); + wrprotect_ptes(mm, addr, ptep, 1); } #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS @@ -1313,6 +1353,7 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma, #define ptep_clear_flush_young __ptep_clear_flush_young #define __HAVE_ARCH_PTEP_SET_WRPROTECT #define ptep_set_wrprotect __ptep_set_wrprotect +#define wrprotect_ptes __wrprotect_ptes #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS #define ptep_set_access_flags __ptep_set_access_flags diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c index 6d7f40667fa2..bedb58524535 100644 --- a/arch/arm64/mm/contpte.c +++ b/arch/arm64/mm/contpte.c @@ -26,6 +26,26 @@ static inline pte_t *contpte_align_down(pte_t *ptep) return PTR_ALIGN_DOWN(ptep, sizeof(*ptep) * CONT_PTES); } +static void contpte_try_unfold_partial(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned int nr) +{ + /* + * Unfold any partially covered contpte block at the beginning and end + * of the range. + */ + + if (ptep != contpte_align_down(ptep) || nr < CONT_PTES) + contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); + + if (ptep + nr != contpte_align_down(ptep + nr)) { + unsigned long last_addr = addr + PAGE_SIZE * (nr - 1); + pte_t *last_ptep = ptep + nr - 1; + + contpte_try_unfold(mm, last_addr, last_ptep, + __ptep_get(last_ptep)); + } +} + static void contpte_convert(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { @@ -238,6 +258,24 @@ int contpte_ptep_clear_flush_young(struct vm_area_struct *vma, } EXPORT_SYMBOL(contpte_ptep_clear_flush_young); +void contpte_wrprotect_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned int nr) +{ + /* + * If wrprotecting an entire contig range, we can avoid unfolding. Just + * set wrprotect and wait for the later mmu_gather flush to invalidate + * the tlb. Until the flush, the page may or may not be wrprotected. + * After the flush, it is guaranteed wrprotected. If it's a partial + * range though, we must unfold, because we can't have a case where + * CONT_PTE is set but wrprotect applies to a subset of the PTEs; this + * would cause it to continue to be unpredictable after the flush. + */ + + contpte_try_unfold_partial(mm, addr, ptep, nr); + __wrprotect_ptes(mm, addr, ptep, nr); +} +EXPORT_SYMBOL(contpte_wrprotect_ptes); + int contpte_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t entry, int dirty) -- Gitee From f7a77289745b75e6dc0cf18559c69859f8df2128 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Thu, 15 Feb 2024 10:32:01 +0000 Subject: [PATCH 1236/2138] arm64/mm: implement new [get_and_]clear_full_ptes() batch APIs ANBZ: #9728 commit 6b1e4efb6f5499ae8f9f5cdda7502285a0edbf51 upstream Optimize the contpte implementation to fix some of the exit/munmap/dontneed performance regression introduced by the initial contpte commit. Subsequent patches will solve it entirely. During exit(), munmap() or madvise(MADV_DONTNEED), mappings must be cleared. Previously this was done 1 PTE at a time. But the core-mm supports batched clear via the new [get_and_]clear_full_ptes() APIs. So let's implement those APIs and for fully covered contpte mappings, we no longer need to unfold the contpte. This significantly reduces unfolding operations, reducing the number of tlbis that must be issued. Link: https://lkml.kernel.org/r/20240215103205.2607016-15-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Tested-by: John Hubbard Acked-by: Mark Rutland Acked-by: Catalin Marinas Cc: Alistair Popple Cc: Andrey Ryabinin Cc: Ard Biesheuvel Cc: Barry Song <21cnbao@gmail.com> Cc: Borislav Petkov (AMD) Cc: Dave Hansen Cc: David Hildenbrand Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: James Morse Cc: Kefeng Wang Cc: Marc Zyngier Cc: Matthew Wilcox (Oracle) Cc: Thomas Gleixner Cc: Will Deacon Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3792 --- arch/arm64/include/asm/pgtable.h | 67 ++++++++++++++++++++++++++++++++ arch/arm64/mm/contpte.c | 17 ++++++++ 2 files changed, 84 insertions(+) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 629e14120ba3..18751a7e1201 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -973,6 +973,37 @@ static inline pte_t __ptep_get_and_clear(struct mm_struct *mm, return pte; } +static inline void __clear_full_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned int nr, int full) +{ + for (;;) { + __ptep_get_and_clear(mm, addr, ptep); + if (--nr == 0) + break; + ptep++; + addr += PAGE_SIZE; + } +} + +static inline pte_t __get_and_clear_full_ptes(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, + unsigned int nr, int full) +{ + pte_t pte, tmp_pte; + + pte = __ptep_get_and_clear(mm, addr, ptep); + while (--nr) { + ptep++; + addr += PAGE_SIZE; + tmp_pte = __ptep_get_and_clear(mm, addr, ptep); + if (pte_dirty(tmp_pte)) + pte = pte_mkdirty(pte); + if (pte_young(tmp_pte)) + pte = pte_mkyoung(pte); + } + return pte; +} + #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, @@ -1168,6 +1199,11 @@ extern pte_t contpte_ptep_get(pte_t *ptep, pte_t orig_pte); extern pte_t contpte_ptep_get_lockless(pte_t *orig_ptep); extern void contpte_set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, unsigned int nr); +extern void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned int nr, int full); +extern pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, + unsigned int nr, int full); extern int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep); extern int contpte_ptep_clear_flush_young(struct vm_area_struct *vma, @@ -1261,6 +1297,35 @@ static inline void pte_clear(struct mm_struct *mm, __pte_clear(mm, addr, ptep); } +#define clear_full_ptes clear_full_ptes +static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned int nr, int full) +{ + if (likely(nr == 1)) { + contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); + __clear_full_ptes(mm, addr, ptep, nr, full); + } else { + contpte_clear_full_ptes(mm, addr, ptep, nr, full); + } +} + +#define get_and_clear_full_ptes get_and_clear_full_ptes +static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, + unsigned int nr, int full) +{ + pte_t pte; + + if (likely(nr == 1)) { + contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); + pte = __get_and_clear_full_ptes(mm, addr, ptep, nr, full); + } else { + pte = contpte_get_and_clear_full_ptes(mm, addr, ptep, nr, full); + } + + return pte; +} + #define __HAVE_ARCH_PTEP_GET_AND_CLEAR static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) @@ -1345,6 +1410,8 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma, #define set_pte __set_pte #define set_ptes __set_ptes #define pte_clear __pte_clear +#define clear_full_ptes __clear_full_ptes +#define get_and_clear_full_ptes __get_and_clear_full_ptes #define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define ptep_get_and_clear __ptep_get_and_clear #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c index bedb58524535..50e0173dc5ee 100644 --- a/arch/arm64/mm/contpte.c +++ b/arch/arm64/mm/contpte.c @@ -212,6 +212,23 @@ void contpte_set_ptes(struct mm_struct *mm, unsigned long addr, } EXPORT_SYMBOL(contpte_set_ptes); +void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned int nr, int full) +{ + contpte_try_unfold_partial(mm, addr, ptep, nr); + __clear_full_ptes(mm, addr, ptep, nr, full); +} +EXPORT_SYMBOL(contpte_clear_full_ptes); + +pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, + unsigned int nr, int full) +{ + contpte_try_unfold_partial(mm, addr, ptep, nr); + return __get_and_clear_full_ptes(mm, addr, ptep, nr, full); +} +EXPORT_SYMBOL(contpte_get_and_clear_full_ptes); + int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { -- Gitee From 1d7782e29827ee6885540438194bb5c0d6ae7034 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Thu, 15 Feb 2024 10:32:02 +0000 Subject: [PATCH 1237/2138] mm: add pte_batch_hint() to reduce scanning in folio_pte_batch() ANBZ: #9728 commit c6ec76a2ebc5829e5826b218d2e1475ec11b333e upstream Some architectures (e.g. arm64) can tell from looking at a pte, if some follow-on ptes also map contiguous physical memory with the same pgprot. (for arm64, these are contpte mappings). Take advantage of this knowledge to optimize folio_pte_batch() so that it can skip these ptes when scanning to create a batch. By default, if an arch does not opt-in, folio_pte_batch() returns a compile-time 1, so the changes are optimized out and the behaviour is as before. arm64 will opt-in to providing this hint in the next patch, which will greatly reduce the cost of ptep_get() when scanning a range of contptes. Link: https://lkml.kernel.org/r/20240215103205.2607016-16-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Acked-by: David Hildenbrand Tested-by: John Hubbard Cc: Alistair Popple Cc: Andrey Ryabinin Cc: Ard Biesheuvel Cc: Barry Song <21cnbao@gmail.com> Cc: Borislav Petkov (AMD) Cc: Catalin Marinas Cc: Dave Hansen Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: James Morse Cc: Kefeng Wang Cc: Marc Zyngier Cc: Mark Rutland Cc: Matthew Wilcox (Oracle) Cc: Thomas Gleixner Cc: Will Deacon Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3792 --- include/linux/pgtable.h | 21 +++++++++++++++++++++ mm/memory.c | 19 ++++++++++++------- 2 files changed, 33 insertions(+), 7 deletions(-) diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 0651e89bf5dc..4a8a984b9054 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -205,6 +205,27 @@ static inline int pmd_young(pmd_t pmd) #define arch_flush_lazy_mmu_mode() do {} while (0) #endif +#ifndef pte_batch_hint +/** + * pte_batch_hint - Number of pages that can be added to batch without scanning. + * @ptep: Page table pointer for the entry. + * @pte: Page table entry. + * + * Some architectures know that a set of contiguous ptes all map the same + * contiguous memory with the same permissions. In this case, it can provide a + * hint to aid pte batching without the core code needing to scan every pte. + * + * An architecture implementation may ignore the PTE accessed state. Further, + * the dirty state must apply atomically to all the PTEs described by the hint. + * + * May be overridden by the architecture, else pte_batch_hint is always 1. + */ +static inline unsigned int pte_batch_hint(pte_t *ptep, pte_t pte) +{ + return 1; +} +#endif + #ifndef pte_advance_pfn static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr) { diff --git a/mm/memory.c b/mm/memory.c index 94a53ead93a7..34e76ff266de 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -977,16 +977,20 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr, { unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio); const pte_t *end_ptep = start_ptep + max_nr; - pte_t expected_pte = __pte_batch_clear_ignored(pte_next_pfn(pte), flags); - pte_t *ptep = start_ptep + 1; + pte_t expected_pte, *ptep; bool writable; + int nr; if (any_writable) *any_writable = false; VM_WARN_ON_FOLIO(!pte_present(pte), folio); - while (ptep != end_ptep) { + nr = pte_batch_hint(start_ptep, pte); + expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags); + ptep = start_ptep + nr; + + while (ptep < end_ptep) { pte = ptep_get(ptep); if (any_writable) writable = !!pte_write(pte); @@ -1000,17 +1004,18 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr, * corner cases the next PFN might fall into a different * folio. */ - if (pte_pfn(pte) == folio_end_pfn) + if (pte_pfn(pte) >= folio_end_pfn) break; if (any_writable) *any_writable |= writable; - expected_pte = pte_next_pfn(expected_pte); - ptep++; + nr = pte_batch_hint(ptep, pte); + expected_pte = pte_advance_pfn(expected_pte, nr); + ptep += nr; } - return ptep - start_ptep; + return min(ptep - start_ptep, max_nr); } /* -- Gitee From 4f0489534ec460f8487cbff798379ce881c4ad24 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Thu, 15 Feb 2024 10:32:03 +0000 Subject: [PATCH 1238/2138] arm64/mm: implement pte_batch_hint() ANBZ: #9728 commit fb5451e5f72b31002760083a99fbb41771c4f1ad upstream When core code iterates over a range of ptes and calls ptep_get() for each of them, if the range happens to cover contpte mappings, the number of pte reads becomes amplified by a factor of the number of PTEs in a contpte block. This is because for each call to ptep_get(), the implementation must read all of the ptes in the contpte block to which it belongs to gather the access and dirty bits. This causes a hotspot for fork(), as well as operations that unmap memory such as munmap(), exit and madvise(MADV_DONTNEED). Fortunately we can fix this by implementing pte_batch_hint() which allows their iterators to skip getting the contpte tail ptes when gathering the batch of ptes to operate on. This results in the number of PTE reads returning to 1 per pte. Link: https://lkml.kernel.org/r/20240215103205.2607016-17-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Acked-by: Mark Rutland Reviewed-by: David Hildenbrand Tested-by: John Hubbard Acked-by: Catalin Marinas Cc: Alistair Popple Cc: Andrey Ryabinin Cc: Ard Biesheuvel Cc: Barry Song <21cnbao@gmail.com> Cc: Borislav Petkov (AMD) Cc: Dave Hansen Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: James Morse Cc: Kefeng Wang Cc: Marc Zyngier Cc: Matthew Wilcox (Oracle) Cc: Thomas Gleixner Cc: Will Deacon Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3792 --- arch/arm64/include/asm/pgtable.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 18751a7e1201..6302a1f56d87 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -1221,6 +1221,15 @@ static inline void contpte_try_unfold(struct mm_struct *mm, unsigned long addr, __contpte_try_unfold(mm, addr, ptep, pte); } +#define pte_batch_hint pte_batch_hint +static inline unsigned int pte_batch_hint(pte_t *ptep, pte_t pte) +{ + if (!pte_valid_cont(pte)) + return 1; + + return CONT_PTES - (((unsigned long)ptep >> 3) & (CONT_PTES - 1)); +} + /* * The below functions constitute the public API that arm64 presents to the * core-mm to manipulate PTE entries within their page tables (or at least this -- Gitee From eab4956206bb033a6e7801136c72f0af06d5be15 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Thu, 15 Feb 2024 10:32:04 +0000 Subject: [PATCH 1239/2138] arm64/mm: __always_inline to improve fork() perf ANBZ: #9728 commit b972fc6afba002319fe23bc698ce6431ee43868c upstream As set_ptes() and wrprotect_ptes() become a bit more complex, the compiler may choose not to inline them. But this is critical for fork() performance. So mark the functions, along with contpte_try_unfold() which is called by them, as __always_inline. This is worth ~1% on the fork() microbenchmark with order-0 folios (the common case). Link: https://lkml.kernel.org/r/20240215103205.2607016-18-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Acked-by: Mark Rutland Acked-by: Catalin Marinas Cc: Alistair Popple Cc: Andrey Ryabinin Cc: Ard Biesheuvel Cc: Barry Song <21cnbao@gmail.com> Cc: Borislav Petkov (AMD) Cc: Dave Hansen Cc: David Hildenbrand Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: James Morse Cc: John Hubbard Cc: Kefeng Wang Cc: Marc Zyngier Cc: Matthew Wilcox (Oracle) Cc: Thomas Gleixner Cc: Will Deacon Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3792 --- arch/arm64/include/asm/pgtable.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 6302a1f56d87..4794c4f6e53c 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -1214,8 +1214,8 @@ extern int contpte_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t entry, int dirty); -static inline void contpte_try_unfold(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte) +static __always_inline void contpte_try_unfold(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, pte_t pte) { if (unlikely(pte_valid_cont(pte))) __contpte_try_unfold(mm, addr, ptep, pte); @@ -1286,7 +1286,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte) } #define set_ptes set_ptes -static inline void set_ptes(struct mm_struct *mm, unsigned long addr, +static __always_inline void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, unsigned int nr) { pte = pte_mknoncont(pte); @@ -1368,8 +1368,8 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma, } #define wrprotect_ptes wrprotect_ptes -static inline void wrprotect_ptes(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, unsigned int nr) +static __always_inline void wrprotect_ptes(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, unsigned int nr) { if (likely(nr == 1)) { /* -- Gitee From 219c3f6219d2435f2456e93863b2cf8f4de832b7 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Thu, 15 Feb 2024 10:32:05 +0000 Subject: [PATCH 1240/2138] arm64/mm: automatically fold contpte mappings ANBZ: #9728 commit f0c2264958e18bc7bc35b567d51b99461e4de34f upstream There are situations where a change to a single PTE could cause the contpte block in which it resides to become foldable (i.e. could be repainted with the contiguous bit). Such situations arise, for example, when user space temporarily changes protections, via mprotect, for individual pages, such can be the case for certain garbage collectors. We would like to detect when such a PTE change occurs. However this can be expensive due to the amount of checking required. Therefore only perform the checks when an indiviual PTE is modified via mprotect (ptep_modify_prot_commit() -> set_pte_at() -> set_ptes(nr=1)) and only when we are setting the final PTE in a contpte-aligned block. Link: https://lkml.kernel.org/r/20240215103205.2607016-19-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Acked-by: Mark Rutland Acked-by: Catalin Marinas Cc: Alistair Popple Cc: Andrey Ryabinin Cc: Ard Biesheuvel Cc: Barry Song <21cnbao@gmail.com> Cc: Borislav Petkov (AMD) Cc: Dave Hansen Cc: David Hildenbrand Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: James Morse Cc: John Hubbard Cc: Kefeng Wang Cc: Marc Zyngier Cc: Matthew Wilcox (Oracle) Cc: Thomas Gleixner Cc: Will Deacon Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3792 --- arch/arm64/include/asm/pgtable.h | 26 +++++++++++++ arch/arm64/mm/contpte.c | 64 ++++++++++++++++++++++++++++++++ 2 files changed, 90 insertions(+) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 4794c4f6e53c..1e4e385b6e4a 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -1193,6 +1193,8 @@ extern void ptep_modify_prot_commit(struct vm_area_struct *vma, * where it is possible and makes sense to do so. The PTE_CONT bit is considered * a private implementation detail of the public ptep API (see below). */ +extern void __contpte_try_fold(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte); extern void __contpte_try_unfold(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); extern pte_t contpte_ptep_get(pte_t *ptep, pte_t orig_pte); @@ -1214,6 +1216,29 @@ extern int contpte_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t entry, int dirty); +static __always_inline void contpte_try_fold(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, pte_t pte) +{ + /* + * Only bother trying if both the virtual and physical addresses are + * aligned and correspond to the last entry in a contig range. The core + * code mostly modifies ranges from low to high, so this is the likely + * the last modification in the contig range, so a good time to fold. + * We can't fold special mappings, because there is no associated folio. + */ + + const unsigned long contmask = CONT_PTES - 1; + bool valign = ((addr >> PAGE_SHIFT) & contmask) == contmask; + + if (unlikely(valign)) { + bool palign = (pte_pfn(pte) & contmask) == contmask; + + if (unlikely(palign && + pte_valid(pte) && !pte_cont(pte) && !pte_special(pte))) + __contpte_try_fold(mm, addr, ptep, pte); + } +} + static __always_inline void contpte_try_unfold(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { @@ -1294,6 +1319,7 @@ static __always_inline void set_ptes(struct mm_struct *mm, unsigned long addr, if (likely(nr == 1)) { contpte_try_unfold(mm, addr, ptep, __ptep_get(ptep)); __set_ptes(mm, addr, ptep, pte, 1); + contpte_try_fold(mm, addr, ptep, pte); } else { contpte_set_ptes(mm, addr, ptep, pte, nr); } diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c index 50e0173dc5ee..16788f07716d 100644 --- a/arch/arm64/mm/contpte.c +++ b/arch/arm64/mm/contpte.c @@ -73,6 +73,70 @@ static void contpte_convert(struct mm_struct *mm, unsigned long addr, __set_ptes(mm, start_addr, start_ptep, pte, CONT_PTES); } +void __contpte_try_fold(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ + /* + * We have already checked that the virtual and pysical addresses are + * correctly aligned for a contpte mapping in contpte_try_fold() so the + * remaining checks are to ensure that the contpte range is fully + * covered by a single folio, and ensure that all the ptes are valid + * with contiguous PFNs and matching prots. We ignore the state of the + * access and dirty bits for the purpose of deciding if its a contiguous + * range; the folding process will generate a single contpte entry which + * has a single access and dirty bit. Those 2 bits are the logical OR of + * their respective bits in the constituent pte entries. In order to + * ensure the contpte range is covered by a single folio, we must + * recover the folio from the pfn, but special mappings don't have a + * folio backing them. Fortunately contpte_try_fold() already checked + * that the pte is not special - we never try to fold special mappings. + * Note we can't use vm_normal_page() for this since we don't have the + * vma. + */ + + unsigned long folio_start, folio_end; + unsigned long cont_start, cont_end; + pte_t expected_pte, subpte; + struct folio *folio; + struct page *page; + unsigned long pfn; + pte_t *orig_ptep; + pgprot_t prot; + + int i; + + if (!mm_is_user(mm)) + return; + + page = pte_page(pte); + folio = page_folio(page); + folio_start = addr - (page - &folio->page) * PAGE_SIZE; + folio_end = folio_start + folio_nr_pages(folio) * PAGE_SIZE; + cont_start = ALIGN_DOWN(addr, CONT_PTE_SIZE); + cont_end = cont_start + CONT_PTE_SIZE; + + if (folio_start > cont_start || folio_end < cont_end) + return; + + pfn = ALIGN_DOWN(pte_pfn(pte), CONT_PTES); + prot = pte_pgprot(pte_mkold(pte_mkclean(pte))); + expected_pte = pfn_pte(pfn, prot); + orig_ptep = ptep; + ptep = contpte_align_down(ptep); + + for (i = 0; i < CONT_PTES; i++) { + subpte = pte_mkold(pte_mkclean(__ptep_get(ptep))); + if (!pte_same(subpte, expected_pte)) + return; + expected_pte = pte_advance_pfn(expected_pte, 1); + ptep++; + } + + pte = pte_mkcont(pte); + contpte_convert(mm, addr, orig_ptep, pte); +} +EXPORT_SYMBOL(__contpte_try_fold); + void __contpte_try_unfold(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { -- Gitee From 04b7dadd1ce459d9e25158bc218b82b1a7ce03df Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 26 Feb 2024 12:03:20 +0000 Subject: [PATCH 1241/2138] arm64/mm: export contpte symbols only to GPL users ANBZ: #9728 commit 912609e96cd728766373d84903f12a6d836de518 upstream Patch series "Address some contpte nits". These 2 patches address some nits raised by Catalin late in the review cycle for my contpte series [1]. [1] https://lore.kernel.org/linux-mm/20240215103205.2607016-1-ryan.roberts@arm.com/ This patch (of 2): The contpte symbols must be exported since some of the public inline ptep_* APIs are called from modules and these inlines now call the contpte functions. Originally they were exported as EXPORT_SYMBOL() for fear of breaking out-of-tree modules. But we subsequently concluded that EXPORT_SYMBOL_GPL() should be safe since these functions are deeply core mm routines, and any module operating at this level is not going to be able to survive on EXPORT_SYMBOL alone. Link: https://lkml.kernel.org/r/20240226120321.1055731-1-ryan.roberts@arm.com Link: https://lore.kernel.org/linux-mm/f9fc2b31-11cb-4969-8961-9c89fea41b74@nvidia.com/ Link: https://lkml.kernel.org/r/20240226120321.1055731-2-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Acked-by: David Hildenbrand Acked-by: Catalin Marinas Cc: John Hubbard Cc: Mark Rutland Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3792 --- arch/arm64/mm/contpte.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c index 16788f07716d..be0a226c4ff9 100644 --- a/arch/arm64/mm/contpte.c +++ b/arch/arm64/mm/contpte.c @@ -135,7 +135,7 @@ void __contpte_try_fold(struct mm_struct *mm, unsigned long addr, pte = pte_mkcont(pte); contpte_convert(mm, addr, orig_ptep, pte); } -EXPORT_SYMBOL(__contpte_try_fold); +EXPORT_SYMBOL_GPL(__contpte_try_fold); void __contpte_try_unfold(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) @@ -150,7 +150,7 @@ void __contpte_try_unfold(struct mm_struct *mm, unsigned long addr, pte = pte_mknoncont(pte); contpte_convert(mm, addr, ptep, pte); } -EXPORT_SYMBOL(__contpte_try_unfold); +EXPORT_SYMBOL_GPL(__contpte_try_unfold); pte_t contpte_ptep_get(pte_t *ptep, pte_t orig_pte) { @@ -178,7 +178,7 @@ pte_t contpte_ptep_get(pte_t *ptep, pte_t orig_pte) return orig_pte; } -EXPORT_SYMBOL(contpte_ptep_get); +EXPORT_SYMBOL_GPL(contpte_ptep_get); pte_t contpte_ptep_get_lockless(pte_t *orig_ptep) { @@ -231,7 +231,7 @@ pte_t contpte_ptep_get_lockless(pte_t *orig_ptep) return orig_pte; } -EXPORT_SYMBOL(contpte_ptep_get_lockless); +EXPORT_SYMBOL_GPL(contpte_ptep_get_lockless); void contpte_set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, unsigned int nr) @@ -274,7 +274,7 @@ void contpte_set_ptes(struct mm_struct *mm, unsigned long addr, } while (addr != end); } -EXPORT_SYMBOL(contpte_set_ptes); +EXPORT_SYMBOL_GPL(contpte_set_ptes); void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned int nr, int full) @@ -282,7 +282,7 @@ void contpte_clear_full_ptes(struct mm_struct *mm, unsigned long addr, contpte_try_unfold_partial(mm, addr, ptep, nr); __clear_full_ptes(mm, addr, ptep, nr, full); } -EXPORT_SYMBOL(contpte_clear_full_ptes); +EXPORT_SYMBOL_GPL(contpte_clear_full_ptes); pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, @@ -291,7 +291,7 @@ pte_t contpte_get_and_clear_full_ptes(struct mm_struct *mm, contpte_try_unfold_partial(mm, addr, ptep, nr); return __get_and_clear_full_ptes(mm, addr, ptep, nr, full); } -EXPORT_SYMBOL(contpte_get_and_clear_full_ptes); +EXPORT_SYMBOL_GPL(contpte_get_and_clear_full_ptes); int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) @@ -316,7 +316,7 @@ int contpte_ptep_test_and_clear_young(struct vm_area_struct *vma, return young; } -EXPORT_SYMBOL(contpte_ptep_test_and_clear_young); +EXPORT_SYMBOL_GPL(contpte_ptep_test_and_clear_young); int contpte_ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) @@ -337,7 +337,7 @@ int contpte_ptep_clear_flush_young(struct vm_area_struct *vma, return young; } -EXPORT_SYMBOL(contpte_ptep_clear_flush_young); +EXPORT_SYMBOL_GPL(contpte_ptep_clear_flush_young); void contpte_wrprotect_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned int nr) @@ -355,7 +355,7 @@ void contpte_wrprotect_ptes(struct mm_struct *mm, unsigned long addr, contpte_try_unfold_partial(mm, addr, ptep, nr); __wrprotect_ptes(mm, addr, ptep, nr); } -EXPORT_SYMBOL(contpte_wrprotect_ptes); +EXPORT_SYMBOL_GPL(contpte_wrprotect_ptes); int contpte_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, @@ -401,4 +401,4 @@ int contpte_ptep_set_access_flags(struct vm_area_struct *vma, return 1; } -EXPORT_SYMBOL(contpte_ptep_set_access_flags); +EXPORT_SYMBOL_GPL(contpte_ptep_set_access_flags); -- Gitee From 4ada7a4beda58e151f29e865fde23d219df5d1d7 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 26 Feb 2024 12:03:21 +0000 Subject: [PATCH 1242/2138] arm64/mm: improve comment in contpte_ptep_get_lockless() ANBZ: #9728 commit 94c18d5f7e0d612ce3fb9cb4aa8cfb1308d57a0a upstream Make clear the atmicity/consistency requirements of the API and how we achieve them. Link: https://lore.kernel.org/linux-mm/Zc-Tqqfksho3BHmU@arm.com/ Link: https://lkml.kernel.org/r/20240226120321.1055731-3-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Acked-by: David Hildenbrand Reviewed-by: Catalin Marinas Cc: John Hubbard Cc: Mark Rutland Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3792 --- arch/arm64/mm/contpte.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/arch/arm64/mm/contpte.c b/arch/arm64/mm/contpte.c index be0a226c4ff9..1b64b4c3f8bf 100644 --- a/arch/arm64/mm/contpte.c +++ b/arch/arm64/mm/contpte.c @@ -183,16 +183,20 @@ EXPORT_SYMBOL_GPL(contpte_ptep_get); pte_t contpte_ptep_get_lockless(pte_t *orig_ptep) { /* - * Gather access/dirty bits, which may be populated in any of the ptes - * of the contig range. We may not be holding the PTL, so any contiguous - * range may be unfolded/modified/refolded under our feet. Therefore we - * ensure we read a _consistent_ contpte range by checking that all ptes - * in the range are valid and have CONT_PTE set, that all pfns are - * contiguous and that all pgprots are the same (ignoring access/dirty). - * If we find a pte that is not consistent, then we must be racing with - * an update so start again. If the target pte does not have CONT_PTE - * set then that is considered consistent on its own because it is not - * part of a contpte range. + * The ptep_get_lockless() API requires us to read and return *orig_ptep + * so that it is self-consistent, without the PTL held, so we may be + * racing with other threads modifying the pte. Usually a READ_ONCE() + * would suffice, but for the contpte case, we also need to gather the + * access and dirty bits from across all ptes in the contiguous block, + * and we can't read all of those neighbouring ptes atomically, so any + * contiguous range may be unfolded/modified/refolded under our feet. + * Therefore we ensure we read a _consistent_ contpte range by checking + * that all ptes in the range are valid and have CONT_PTE set, that all + * pfns are contiguous and that all pgprots are the same (ignoring + * access/dirty). If we find a pte that is not consistent, then we must + * be racing with an update so start again. If the target pte does not + * have CONT_PTE set then that is considered consistent on its own + * because it is not part of a contpte range. */ pgprot_t orig_prot; -- Gitee From f33cbaf9c0a89ca73937b98434ae743f1993e022 Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Tue, 3 Sep 2024 10:11:32 +0800 Subject: [PATCH 1243/2138] anolis: configs: generate config files with dummy pahole ANBZ: #9853 Some kconfigs are enable/disabled depends on the toolchain's version. Most of these differences could be avoid by using dummy gcc, ld and nm located in scripts/dummy-tools/, with cmd `make CROSS_COMPILE=scripts/dummy-tools/ xxxconfig`, but CONFIG_PAHOLE_VERSION is an exception, it still depends on the host's pahole tool. If pahole is not installed on host, CONFIG_MODULE_ALLOW_BTF_MISMATCH will be invisible in the final generated config files. And then if `make listnewconfig` is checked on another host with pahole installed, it will warning that this config is newly imported. To fix this problem, generating config files with dummy pahole tool. Fixes: 924c80eb23ee ("anolis: configs: add kconfigs baseline framework") Signed-off-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/3793 --- .../configs/L2-OPTIONAL/default/CONFIG_PAHOLE_VERSION | 2 +- anolis/configs/scripts/anolis_kconfig.py | 10 ++++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PAHOLE_VERSION b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAHOLE_VERSION index d86b86b274da..d21e5c7d60c5 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_PAHOLE_VERSION +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PAHOLE_VERSION @@ -1 +1 @@ -CONFIG_PAHOLE_VERSION=124 +CONFIG_PAHOLE_VERSION=9999 diff --git a/anolis/configs/scripts/anolis_kconfig.py b/anolis/configs/scripts/anolis_kconfig.py index 3db9215f0eb5..5a19d9df5945 100644 --- a/anolis/configs/scripts/anolis_kconfig.py +++ b/anolis/configs/scripts/anolis_kconfig.py @@ -483,8 +483,9 @@ class ImportOpTranslater(): self.files_info[(dist, arch, None)] = new_path cmd = f"cp {path} {new_path}\n" if refresh == "REFRESH": - cmd += f"KCONFIG_CONFIG={new_path} ARCH={arch} CROSS_COMPILE=scripts/dummy-tools/ " - cmd += f"make -C {self.src_root} olddefconfig > /dev/null\n" + cmd += f"make KCONFIG_CONFIG={new_path} ARCH={arch} CROSS_COMPILE=scripts/dummy-tools/ " + cmd += f"PAHOLE=scripts/dummy-tools/pahole " + cmd += f"-C {self.src_root} olddefconfig > /dev/null\n" cmd += f"rm -f {new_path}.old \n" return cmd @@ -669,8 +670,9 @@ class GenerateTranslater(): # refresh configs cmd += f"echo \"* generated file: {final_path}\"\n" - cmd += f"KCONFIG_CONFIG={final_path} ARCH={e.arch} CROSS_COMPILE=scripts/dummy-tools/ " - cmd += f"make -C {self.src_root} olddefconfig > /dev/null\n" + cmd += f"make KCONFIG_CONFIG={final_path} ARCH={e.arch} CROSS_COMPILE=scripts/dummy-tools/ " + cmd += f"PAHOLE=scripts/dummy-tools/pahole " + cmd += f"-C {self.src_root} olddefconfig > /dev/null\n" cmd += f"rm -f {final_path}.old \n" cmd += f"echo \"* processed file: {final_path}\"\n" -- Gitee From 19774942f36c3eaa434d410a573949590846964e Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Tue, 20 Feb 2024 14:16:31 +0800 Subject: [PATCH 1244/2138] mm: compaction: update the cc->nr_migratepages when allocating or freeing the freepages ANBZ: #9728 commit ab755bf4249b992fc2140d615ab0a686d50765b4 upstream Currently we will use 'cc->nr_freepages >= cc->nr_migratepages' comparison to ensure that enough freepages are isolated in isolate_freepages(), however it just decreases the cc->nr_freepages without updating cc->nr_migratepages in compaction_alloc(), which will waste more CPU cycles and cause too many freepages to be isolated. So we should also update the cc->nr_migratepages when allocating or freeing the freepages to avoid isolating excess freepages. And I can see fewer free pages are scanned and isolated when running thpcompact on my Arm64 server: k6.7 k6.7_patched Ops Compaction pages isolated 120692036.00 118160797.00 Ops Compaction migrate scanned 131210329.00 154093268.00 Ops Compaction free scanned 1090587971.00 1080632536.00 Ops Compact scan efficiency 12.03 14.26 Moreover, I did not see an obvious latency improvements, this is likely because isolating freepages is not the bottleneck in the thpcompact test case. k6.7 k6.7_patched Amean fault-both-1 1089.76 ( 0.00%) 1080.16 * 0.88%* Amean fault-both-3 1616.48 ( 0.00%) 1636.65 * -1.25%* Amean fault-both-5 2266.66 ( 0.00%) 2219.20 * 2.09%* Amean fault-both-7 2909.84 ( 0.00%) 2801.90 * 3.71%* Amean fault-both-12 4861.26 ( 0.00%) 4733.25 * 2.63%* Amean fault-both-18 7351.11 ( 0.00%) 6950.51 * 5.45%* Amean fault-both-24 9059.30 ( 0.00%) 9159.99 * -1.11%* Amean fault-both-30 10685.68 ( 0.00%) 11399.02 * -6.68%* Link: https://lkml.kernel.org/r/6440493f18da82298152b6305d6b41c2962a3ce6.1708409245.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Acked-by: Mel Gorman Reviewed-by: Vlastimil Babka Cc: Masami Hiramatsu Cc: Mathieu Desnoyers Cc: Steven Rostedt Cc: Zi Yan Signed-off-by: Andrew Morton Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3795 --- include/trace/events/compaction.h | 6 +++--- mm/compaction.c | 12 ++++++++++-- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h index 2b2a975efd20..d05759d18538 100644 --- a/include/trace/events/compaction.h +++ b/include/trace/events/compaction.h @@ -78,10 +78,10 @@ DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_fast_isolate_freepage #ifdef CONFIG_COMPACTION TRACE_EVENT(mm_compaction_migratepages, - TP_PROTO(struct compact_control *cc, + TP_PROTO(unsigned int nr_migratepages, unsigned int nr_succeeded), - TP_ARGS(cc, nr_succeeded), + TP_ARGS(nr_migratepages, nr_succeeded), TP_STRUCT__entry( __field(unsigned long, nr_migrated) @@ -90,7 +90,7 @@ TRACE_EVENT(mm_compaction_migratepages, TP_fast_assign( __entry->nr_migrated = nr_succeeded; - __entry->nr_failed = cc->nr_migratepages - nr_succeeded; + __entry->nr_failed = nr_migratepages - nr_succeeded; ), TP_printk("nr_migrated=%lu nr_failed=%lu", diff --git a/mm/compaction.c b/mm/compaction.c index 61c741f11e9b..8fc17fde0d12 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1775,6 +1775,7 @@ static struct folio *compaction_alloc(struct folio *src, unsigned long data) dst = list_entry(cc->freepages.next, struct folio, lru); list_del(&dst->lru); cc->nr_freepages--; + cc->nr_migratepages--; return dst; } @@ -1790,6 +1791,7 @@ static void compaction_free(struct folio *dst, unsigned long data) list_add(&dst->lru, &cc->freepages); cc->nr_freepages++; + cc->nr_migratepages++; } /* possible outcome of isolate_migratepages */ @@ -2386,7 +2388,7 @@ compact_zone(struct compact_control *cc, struct capture_control *capc) unsigned long last_migrated_pfn; const bool sync = cc->mode != MIGRATE_ASYNC; bool update_cached; - unsigned int nr_succeeded = 0; + unsigned int nr_succeeded = 0, nr_migratepages; /* * These counters track activities during zone compaction. Initialize @@ -2512,11 +2514,17 @@ compact_zone(struct compact_control *cc, struct capture_control *capc) pageblock_start_pfn(cc->migrate_pfn - 1)); } + /* + * Record the number of pages to migrate since the + * compaction_alloc/free() will update cc->nr_migratepages + * properly. + */ + nr_migratepages = cc->nr_migratepages; err = migrate_pages(&cc->migratepages, compaction_alloc, compaction_free, (unsigned long)cc, cc->mode, MR_COMPACTION, &nr_succeeded); - trace_mm_compaction_migratepages(cc, nr_succeeded); + trace_mm_compaction_migratepages(nr_migratepages, nr_succeeded); /* All pages were either migrated or will be released */ cc->nr_migratepages = 0; -- Gitee From c2f9616549fea2daec5baf6947aa3e4c5c667691 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 22 Jan 2024 21:01:53 +0800 Subject: [PATCH 1245/2138] mm: compaction: limit the suitable target page order to be less than cc->order ANBZ: #9728 commit 1883e8ac96ddd73a87db7f2f8c06111148a3db6f upstream It can not improve the fragmentation if we isolate the target free pages exceeding cc->order, especially when the cc->order is less than pageblock_order. For example, suppose the pageblock_order is MAX_ORDER (size is 4M) and cc->order is 2M THP size, we should not isolate other 2M free pages to be the migration target, which can not improve the fragmentation. Moreover this is also applicable for large folio compaction. Link: https://lkml.kernel.org/r/afcd9377351c259df7a25a388a4a0d5862b986f4.1705928395.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Acked-by: Mel Gorman Cc: Vlastimil Babka Cc: Zi Yan Signed-off-by: Andrew Morton Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3795 --- mm/compaction.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mm/compaction.c b/mm/compaction.c index 8fc17fde0d12..61b74e8c1e19 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1347,12 +1347,14 @@ static bool suitable_migration_target(struct compact_control *cc, { /* If the page is a large free page, then disallow migration */ if (PageBuddy(page)) { + int order = cc->order > 0 ? cc->order : pageblock_order; + /* * We are checking page_order without zone->lock taken. But * the only small danger is that we skip a potentially suitable * pageblock, so it's not worth to check order for valid range. */ - if (buddy_order_unsafe(page) >= pageblock_order) + if (buddy_order_unsafe(page) >= order) return false; } -- Gitee From aa6955946c2f4d324369b598869830adad68d52a Mon Sep 17 00:00:00 2001 From: Zi Yan Date: Tue, 20 Feb 2024 13:32:17 -0500 Subject: [PATCH 1246/2138] mm/page_alloc: remove unused fpi_flags in free_pages_prepare() ANBZ: #9728 commit 5267fe5d092e80a83740e5a1f6d5638d88ac7309 upstream Patch series "Enable >0 order folio memory compaction", v7. This patchset enables >0 order folio memory compaction, which is one of the prerequisitions for large folio support[1]. I am aware of that split free pages is necessary for folio migration in compaction, since if >0 order free pages are never split and no order-0 free page is scanned, compaction will end prematurely due to migration returns -ENOMEM. Free page split becomes a must instead of an optimization. lkp ncompare results (on a 8-CPU (Intel Xeon E5-2650 v4 @2.20GHz) 16G VM) for default LRU (-no-mglru) and CONFIG_LRU_GEN are shown at the bottom, copied from V3[4]. In sum, most of vm-scalability applications do not see performance change, and the others see ~4% to ~26% performance boost under default LRU and ~2% to ~6% performance boost under CONFIG_LRU_GEN. Overview === To support >0 order folio compaction, the patchset changes how free pages used for migration are kept during compaction. Free pages used to be split into order-0 pages that are post allocation processed (i.e., PageBuddy flag cleared, page order stored in page->private is zeroed, and page reference is set to 1). Now all free pages are kept in a NR_PAGE_ORDER array of page lists based on their order without post allocation process. When migrate_pages() asks for a new page, one of the free pages, based on the requested page order, is then processed and given out. And THP <2MB would need this feature. [1] https://lore.kernel.org/linux-mm/f8d47176-03a8-99bf-a813-b5942830fd73@arm.com/ [2] https://lore.kernel.org/linux-mm/20231113170157.280181-1-zi.yan@sent.com/ [3] https://lore.kernel.org/linux-mm/20240123034636.1095672-1-zi.yan@sent.com/ [4] https://lore.kernel.org/linux-mm/20240202161554.565023-1-zi.yan@sent.com/ [5] https://lore.kernel.org/linux-mm/20240212163510.859822-1-zi.yan@sent.com/ [6] https://lore.kernel.org/linux-mm/20240214220420.1229173-1-zi.yan@sent.com/ [7] https://lore.kernel.org/linux-mm/20240216170432.1268753-1-zi.yan@sent.com/ This patch (of 4): Commit 0a54864f8dfb ("kasan: remove PG_skip_kasan_poison flag") removes the use of fpi_flags in should_skip_kasan_poison() and fpi_flags is only passed to should_skip_kasan_poison() in free_pages_prepare(). Remove the unused parameter. Link: https://lkml.kernel.org/r/20240220183220.1451315-1-zi.yan@sent.com Link: https://lkml.kernel.org/r/20240220183220.1451315-2-zi.yan@sent.com Signed-off-by: Zi Yan Reviewed-by: Vlastimil Babka Reviewed-by: David Hildenbrand Cc: Adam Manzanares Cc: Baolin Wang Cc: "Huang, Ying" Cc: Johannes Weiner Cc: Kemeng Shi Cc: Kirill A. Shutemov Cc: Luis Chamberlain Cc: Matthew Wilcox (Oracle) Cc: Mel Gorman Cc: Ryan Roberts Cc: Vishal Moola (Oracle) Cc: Yin Fengwei Cc: Yu Zhao Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3795 --- mm/page_alloc.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 245d0348e4a9..5e89aa4fa683 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1064,7 +1064,7 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page) * on-demand allocation and then freed again before the deferred pages * initialization is done, but this is not likely to happen. */ -static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags) +static inline bool should_skip_kasan_poison(struct page *page) { if (IS_ENABLED(CONFIG_KASAN_GENERIC)) return deferred_pages_enabled(); @@ -1084,10 +1084,10 @@ static void kernel_init_pages(struct page *page, int numpages) } static __always_inline bool free_pages_prepare(struct page *page, - unsigned int order, fpi_t fpi_flags) + unsigned int order) { int bad = 0; - bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags); + bool skip_kasan_poison = should_skip_kasan_poison(page); bool init = want_init_on_free(); struct folio *folio = page_folio(page); @@ -1289,7 +1289,7 @@ static void __free_pages_ok(struct page *page, unsigned int order, unsigned long pfn = page_to_pfn(page); struct zone *zone = page_zone(page); - if (!free_pages_prepare(page, order, fpi_flags)) + if (!free_pages_prepare(page, order)) return; if (unlikely(!order && kfence_free_page(page))) @@ -2344,7 +2344,7 @@ static bool free_unref_page_prepare(struct page *page, unsigned long pfn, { int migratetype; - if (!free_pages_prepare(page, order, FPI_NONE)) + if (!free_pages_prepare(page, order)) return false; migratetype = get_pfnblock_migratetype(page, pfn); -- Gitee From 6d6820dac01da1548606cb92e9064261d7e16d90 Mon Sep 17 00:00:00 2001 From: Zi Yan Date: Tue, 20 Feb 2024 13:32:18 -0500 Subject: [PATCH 1247/2138] mm/compaction: enable compacting >0 order folios. ANBZ: #9728 commit ee6f62fd34f0bb99ef93f799bcf5fc6a6b24945b upstream migrate_pages() supports >0 order folio migration and during compaction, even if compaction_alloc() cannot provide >0 order free pages, migrate_pages() can split the source page and try to migrate the base pages from the split. It can be a baseline and start point for adding support for compacting >0 order folios. Link: https://lkml.kernel.org/r/20240220183220.1451315-3-zi.yan@sent.com Signed-off-by: Zi Yan Suggested-by: Huang Ying Reviewed-by: Baolin Wang Reviewed-by: Vlastimil Babka Tested-by: Baolin Wang Tested-by: Yu Zhao Cc: Adam Manzanares Cc: David Hildenbrand Cc: Johannes Weiner Cc: Kemeng Shi Cc: Kirill A. Shutemov Cc: Luis Chamberlain Cc: Matthew Wilcox (Oracle) Cc: Mel Gorman Cc: Ryan Roberts Cc: Vishal Moola (Oracle) Cc: Vlastimil Babka Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3795 --- mm/compaction.c | 99 +++++++++++++++++++++++++++++++++++++------------ 1 file changed, 76 insertions(+), 23 deletions(-) diff --git a/mm/compaction.c b/mm/compaction.c index 61b74e8c1e19..4c22fef5877b 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -40,9 +40,22 @@ static inline void count_compact_events(enum vm_event_item item, long delta) { count_vm_events(item, delta); } + +/* + * order == -1 is expected when compacting proactively via + * 1. /proc/sys/vm/compact_memory + * 2. /sys/devices/system/node/nodex/compact + * 3. /proc/sys/vm/compaction_proactiveness + */ +static inline bool is_via_compact_memory(int order) +{ + return order == -1; +} + #else #define count_compact_event(item) do { } while (0) #define count_compact_events(item, delta) do { } while (0) +static inline bool is_via_compact_memory(int order) { return false; } #endif #if defined CONFIG_COMPACTION || defined CONFIG_CMA @@ -817,6 +830,32 @@ static bool too_many_isolated(struct compact_control *cc) return too_many; } +/** + * skip_isolation_on_order() - determine when to skip folio isolation based on + * folio order and compaction target order + * @order: to-be-isolated folio order + * @target_order: compaction target order + * + * This avoids unnecessary folio isolations during compaction. + */ +static bool skip_isolation_on_order(int order, int target_order) +{ + /* + * Unless we are performing global compaction (i.e., + * is_via_compact_memory), skip any folios that are larger than the + * target order: we wouldn't be here if we'd have a free folio with + * the desired target_order, so migrating this folio would likely fail + * later. + */ + if (!is_via_compact_memory(target_order) && order >= target_order) + return true; + /* + * We limit memory compaction to pageblocks and won't try + * creating free blocks of memory that are larger than that. + */ + return order >= pageblock_order; +} + /** * isolate_migratepages_block() - isolate all migrate-able pages within * a single pageblock @@ -947,7 +986,22 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, valid_page = page; } - if (PageHuge(page) && cc->alloc_contig) { + if (PageHuge(page)) { + /* + * skip hugetlbfs if we are not compacting for pages + * bigger than its order. THPs and other compound pages + * are handled below. + */ + if (!cc->alloc_contig) { + const unsigned int order = compound_order(page); + + if (order <= MAX_ORDER) { + low_pfn += (1UL << order) - 1; + nr_scanned += (1UL << order) - 1; + } + goto isolate_fail; + } + /* for alloc_contig case */ if (locked) { unlock_page_lruvec_irqrestore(locked, flags); locked = NULL; @@ -1008,21 +1062,24 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, } /* - * Regardless of being on LRU, compound pages such as THP and - * hugetlbfs are not to be compacted unless we are attempting - * an allocation much larger than the huge page size (eg CMA). - * We can potentially save a lot of iterations if we skip them - * at once. The check is racy, but we can consider only valid - * values and the only danger is skipping too much. + * Regardless of being on LRU, compound pages such as THP + * (hugetlbfs is handled above) are not to be compacted unless + * we are attempting an allocation larger than the compound + * page size. We can potentially save a lot of iterations if we + * skip them at once. The check is racy, but we can consider + * only valid values and the only danger is skipping too much. */ if (PageCompound(page) && !cc->alloc_contig) { const unsigned int order = compound_order(page); - if (likely(order <= MAX_ORDER)) { - low_pfn += (1UL << order) - 1; - nr_scanned += (1UL << order) - 1; + /* Skip based on page order and compaction target order. */ + if (skip_isolation_on_order(order, cc->order)) { + if (order <= MAX_ORDER) { + low_pfn += (1UL << order) - 1; + nr_scanned += (1UL << order) - 1; + } + goto isolate_fail; } - goto isolate_fail; } /* @@ -1147,10 +1204,11 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, } /* - * folio become large since the non-locked check, - * and it's on LRU. + * Check LRU folio order under the lock */ - if (unlikely(folio_test_large(folio) && !cc->alloc_contig)) { + if (unlikely(skip_isolation_on_order(folio_order(folio), + cc->order) && + !cc->alloc_contig)) { low_pfn += folio_nr_pages(folio) - 1; nr_scanned += folio_nr_pages(folio) - 1; folio_set_lru(folio); @@ -1767,6 +1825,10 @@ static struct folio *compaction_alloc(struct folio *src, unsigned long data) struct compact_control *cc = (struct compact_control *)data; struct folio *dst; + /* this makes migrate_pages() split the source page and retry */ + if (folio_test_large(src)) + return NULL; + if (list_empty(&cc->freepages)) { isolate_freepages(cc); @@ -2069,15 +2131,6 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc) return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; } -/* - * order == -1 is expected when compacting via - * /proc/sys/vm/compact_memory - */ -static inline bool is_via_compact_memory(int order) -{ - return order == -1; -} - /* * Determine whether kswapd is (or recently was!) running on this node. * -- Gitee From dda62e03f84194e326c233ba1abc727a71a3d046 Mon Sep 17 00:00:00 2001 From: Zi Yan Date: Tue, 20 Feb 2024 13:32:19 -0500 Subject: [PATCH 1248/2138] mm/compaction: add support for >0 order folio memory compaction. ANBZ: #9728 commit 733aea0b3a7bba0451dfc19322665de13a5b7af4 upstream Before last commit, memory compaction only migrates order-0 folios and skips >0 order folios. Last commit splits all >0 order folios during compaction. This commit migrates >0 order folios during compaction by keeping isolated free pages at their original size without splitting them into order-0 pages and using them directly during migration process. What is different from the prior implementation: 1. All isolated free pages are kept in a NR_PAGE_ORDERS array of page lists, where each page list stores free pages in the same order. 2. All free pages are not post_alloc_hook() processed nor buddy pages, although their orders are stored in first page's private like buddy pages. 3. During migration, in new page allocation time (i.e., in compaction_alloc()), free pages are then processed by post_alloc_hook(). When migration fails and a new page is returned (i.e., in compaction_free()), free pages are restored by reversing the post_alloc_hook() operations using newly added free_pages_prepare_fpi_none(). Step 3 is done for a latter optimization that splitting and/or merging free pages during compaction becomes easier. Note: without splitting free pages, compaction can end prematurely due to migration will return -ENOMEM even if there is free pages. This happens when no order-0 free page exist and compaction_alloc() return NULL. Link: https://lkml.kernel.org/r/20240220183220.1451315-4-zi.yan@sent.com Signed-off-by: Zi Yan Reviewed-by: Baolin Wang Reviewed-by: Vlastimil Babka Tested-by: Baolin Wang Tested-by: Yu Zhao Cc: Adam Manzanares Cc: David Hildenbrand Cc: Huang Ying Cc: Johannes Weiner Cc: Kemeng Shi Cc: Kirill A. Shutemov Cc: Luis Chamberlain Cc: Matthew Wilcox (Oracle) Cc: Mel Gorman Cc: Ryan Roberts Cc: Vishal Moola (Oracle) Cc: Vlastimil Babka Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3795 --- mm/compaction.c | 140 +++++++++++++++++++++++++++--------------------- mm/internal.h | 4 +- mm/page_alloc.c | 2 +- 3 files changed, 83 insertions(+), 63 deletions(-) diff --git a/mm/compaction.c b/mm/compaction.c index 4c22fef5877b..bc87474ac83d 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -79,45 +79,56 @@ static inline bool is_via_compact_memory(int order) { return false; } #define COMPACTION_HPAGE_ORDER (PMD_SHIFT - PAGE_SHIFT) #endif -static unsigned long release_freepages(struct list_head *freelist) +static void split_map_pages(struct list_head *freepages) { + unsigned int i, order; struct page *page, *next; - unsigned long high_pfn = 0; + LIST_HEAD(tmp_list); - list_for_each_entry_safe(page, next, freelist, lru) { - unsigned long pfn = page_to_pfn(page); - list_del(&page->lru); - __free_page(page); - if (pfn > high_pfn) - high_pfn = pfn; - } + for (order = 0; order < NR_PAGE_ORDERS; order++) { + list_for_each_entry_safe(page, next, &freepages[order], lru) { + unsigned int nr_pages; - return high_pfn; + list_del(&page->lru); + + nr_pages = 1 << order; + + post_alloc_hook(page, order, __GFP_MOVABLE); + if (order) + split_page(page, order); + + for (i = 0; i < nr_pages; i++) { + list_add(&page->lru, &tmp_list); + page++; + } + } + list_splice_init(&tmp_list, &freepages[0]); + } } -static void split_map_pages(struct list_head *list) +static unsigned long release_free_list(struct list_head *freepages) { - unsigned int i, order, nr_pages; - struct page *page, *next; - LIST_HEAD(tmp_list); - - list_for_each_entry_safe(page, next, list, lru) { - list_del(&page->lru); + int order; + unsigned long high_pfn = 0; - order = page_private(page); - nr_pages = 1 << order; + for (order = 0; order < NR_PAGE_ORDERS; order++) { + struct page *page, *next; - post_alloc_hook(page, order, __GFP_MOVABLE); - if (order) - split_page(page, order); + list_for_each_entry_safe(page, next, &freepages[order], lru) { + unsigned long pfn = page_to_pfn(page); - for (i = 0; i < nr_pages; i++) { - list_add(&page->lru, &tmp_list); - page++; + list_del(&page->lru); + /* + * Convert free pages into post allocation pages, so + * that we can free them via __free_page. + */ + post_alloc_hook(page, order, __GFP_MOVABLE); + __free_pages(page, order); + if (pfn > high_pfn) + high_pfn = pfn; } } - - list_splice(&tmp_list, list); + return high_pfn; } #ifdef CONFIG_COMPACTION @@ -670,7 +681,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, nr_scanned += isolated - 1; total_isolated += isolated; cc->nr_freepages += isolated; - list_add_tail(&page->lru, freelist); + list_add_tail(&page->lru, &freelist[order]); if (!strict && cc->nr_migratepages <= cc->nr_freepages) { blockpfn += isolated; @@ -736,7 +747,11 @@ isolate_freepages_range(struct compact_control *cc, unsigned long start_pfn, unsigned long end_pfn) { unsigned long isolated, pfn, block_start_pfn, block_end_pfn; - LIST_HEAD(freelist); + int order; + struct list_head tmp_freepages[NR_PAGE_ORDERS]; + + for (order = 0; order < NR_PAGE_ORDERS; order++) + INIT_LIST_HEAD(&tmp_freepages[order]); pfn = start_pfn; block_start_pfn = pageblock_start_pfn(pfn); @@ -767,7 +782,7 @@ isolate_freepages_range(struct compact_control *cc, break; isolated = isolate_freepages_block(cc, &isolate_start_pfn, - block_end_pfn, &freelist, 0, true); + block_end_pfn, tmp_freepages, 0, true); /* * In strict mode, isolate_freepages_block() returns 0 if @@ -784,15 +799,15 @@ isolate_freepages_range(struct compact_control *cc, */ } - /* __isolate_free_page() does not map the pages */ - split_map_pages(&freelist); - if (pfn < end_pfn) { /* Loop terminated early, cleanup. */ - release_freepages(&freelist); + release_free_list(tmp_freepages); return 0; } + /* __isolate_free_page() does not map the pages */ + split_map_pages(tmp_freepages); + /* We don't use freelists for anything. */ return pfn; } @@ -1500,7 +1515,7 @@ fast_isolate_around(struct compact_control *cc, unsigned long pfn) if (!page) return; - isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false); + isolate_freepages_block(cc, &start_pfn, end_pfn, cc->freepages, 1, false); /* Skip this pageblock in the future as it's full or nearly full */ if (start_pfn == end_pfn && !cc->no_set_skip_hint) @@ -1629,7 +1644,7 @@ static void fast_isolate_freepages(struct compact_control *cc) nr_scanned += nr_isolated - 1; total_isolated += nr_isolated; cc->nr_freepages += nr_isolated; - list_add_tail(&page->lru, &cc->freepages); + list_add_tail(&page->lru, &cc->freepages[order]); count_compact_events(COMPACTISOLATED, nr_isolated); } else { /* If isolation fails, abort the search */ @@ -1703,13 +1718,12 @@ static void isolate_freepages(struct compact_control *cc) unsigned long isolate_start_pfn; /* exact pfn we start at */ unsigned long block_end_pfn; /* end of current pageblock */ unsigned long low_pfn; /* lowest pfn scanner is able to scan */ - struct list_head *freelist = &cc->freepages; unsigned int stride; /* Try a small search of the free lists for a candidate */ fast_isolate_freepages(cc); if (cc->nr_freepages) - goto splitmap; + return; /* * Initialise the free scanner. The starting point is where we last @@ -1769,7 +1783,7 @@ static void isolate_freepages(struct compact_control *cc) /* Found a block suitable for isolating free pages from. */ nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn, - block_end_pfn, freelist, stride, false); + block_end_pfn, cc->freepages, stride, false); /* Update the skip hint if the full pageblock was scanned */ if (isolate_start_pfn == block_end_pfn) @@ -1810,10 +1824,6 @@ static void isolate_freepages(struct compact_control *cc) * and the loop terminated due to isolate_start_pfn < low_pfn */ cc->free_pfn = isolate_start_pfn; - -splitmap: - /* __isolate_free_page() does not map the pages */ - split_map_pages(freelist); } /* @@ -1824,24 +1834,22 @@ static struct folio *compaction_alloc(struct folio *src, unsigned long data) { struct compact_control *cc = (struct compact_control *)data; struct folio *dst; + int order = folio_order(src); - /* this makes migrate_pages() split the source page and retry */ - if (folio_test_large(src)) - return NULL; - - if (list_empty(&cc->freepages)) { + if (list_empty(&cc->freepages[order])) { isolate_freepages(cc); - - if (list_empty(&cc->freepages)) + if (list_empty(&cc->freepages[order])) return NULL; } - dst = list_entry(cc->freepages.next, struct folio, lru); + dst = list_first_entry(&cc->freepages[order], struct folio, lru); list_del(&dst->lru); - cc->nr_freepages--; - cc->nr_migratepages--; - - return dst; + post_alloc_hook(&dst->page, order, __GFP_MOVABLE); + if (order) + prep_compound_page(&dst->page, order); + cc->nr_freepages -= 1 << order; + cc->nr_migratepages -= 1 << order; + return page_rmappable_folio(&dst->page); } /* @@ -1852,10 +1860,19 @@ static struct folio *compaction_alloc(struct folio *src, unsigned long data) static void compaction_free(struct folio *dst, unsigned long data) { struct compact_control *cc = (struct compact_control *)data; + int order = folio_order(dst); + struct page *page = &dst->page; - list_add(&dst->lru, &cc->freepages); - cc->nr_freepages++; - cc->nr_migratepages++; + if (folio_put_testzero(dst)) { + free_pages_prepare(page, order); + list_add(&dst->lru, &cc->freepages[order]); + cc->nr_freepages += 1 << order; + } + cc->nr_migratepages += 1 << order; + /* + * someone else has referenced the page, we cannot take it back to our + * free list. + */ } /* possible outcome of isolate_migratepages */ @@ -2444,6 +2461,7 @@ compact_zone(struct compact_control *cc, struct capture_control *capc) const bool sync = cc->mode != MIGRATE_ASYNC; bool update_cached; unsigned int nr_succeeded = 0, nr_migratepages; + int order; /* * These counters track activities during zone compaction. Initialize @@ -2453,7 +2471,8 @@ compact_zone(struct compact_control *cc, struct capture_control *capc) cc->total_free_scanned = 0; cc->nr_migratepages = 0; cc->nr_freepages = 0; - INIT_LIST_HEAD(&cc->freepages); + for (order = 0; order < NR_PAGE_ORDERS; order++) + INIT_LIST_HEAD(&cc->freepages[order]); INIT_LIST_HEAD(&cc->migratepages); cc->migratetype = gfp_migratetype(cc->gfp_mask); @@ -2653,7 +2672,7 @@ compact_zone(struct compact_control *cc, struct capture_control *capc) * so we don't leave any returned pages behind in the next attempt. */ if (cc->nr_freepages > 0) { - unsigned long free_pfn = release_freepages(&cc->freepages); + unsigned long free_pfn = release_free_list(cc->freepages); cc->nr_freepages = 0; VM_BUG_ON(free_pfn == 0); @@ -2672,7 +2691,6 @@ compact_zone(struct compact_control *cc, struct capture_control *capc) trace_mm_compaction_end(cc, start_pfn, end_pfn, sync, ret); - VM_BUG_ON(!list_empty(&cc->freepages)); VM_BUG_ON(!list_empty(&cc->migratepages)); return ret; diff --git a/mm/internal.h b/mm/internal.h index 960547d85c40..20736585cb98 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -508,6 +508,8 @@ extern void prep_compound_page(struct page *page, unsigned int order); extern void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags); +extern bool free_pages_prepare(struct page *page, unsigned int order); + extern int user_min_free_kbytes; extern void free_unref_page(struct page *page, unsigned int order); @@ -542,7 +544,7 @@ int split_free_page(struct page *free_page, * completes when free_pfn <= migrate_pfn */ struct compact_control { - struct list_head freepages; /* List of free pages to migrate to */ + struct list_head freepages[NR_PAGE_ORDERS]; /* List of free pages to migrate to */ struct list_head migratepages; /* List of pages being migrated */ unsigned int nr_freepages; /* Number of isolated free pages */ unsigned int nr_migratepages; /* Number of pages to migrate */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5e89aa4fa683..9131776ceadc 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1083,7 +1083,7 @@ static void kernel_init_pages(struct page *page, int numpages) kasan_enable_current(); } -static __always_inline bool free_pages_prepare(struct page *page, +__always_inline bool free_pages_prepare(struct page *page, unsigned int order) { int bad = 0; -- Gitee From 6a2b72ed6a542a3f76babf6f7086cdcbfa9bd49e Mon Sep 17 00:00:00 2001 From: Zi Yan Date: Tue, 20 Feb 2024 13:32:20 -0500 Subject: [PATCH 1249/2138] mm/compaction: optimize >0 order folio compaction with free page split. ANBZ: #9728 commit 73318e2cafe53e8b7c8899d990cf8eaca32184d0 upstream During migration in a memory compaction, free pages are placed in an array of page lists based on their order. But the desired free page order (i.e., the order of a source page) might not be always present, thus leading to migration failures and premature compaction termination. Split a high order free pages when source migration page has a lower order to increase migration successful rate. Note: merging free pages when a migration fails and a lower order free page is returned via compaction_free() is possible, but there is too much work. Since the free pages are not buddy pages, it is hard to identify these free pages using existing PFN-based page merging algorithm. Link: https://lkml.kernel.org/r/20240220183220.1451315-5-zi.yan@sent.com Signed-off-by: Zi Yan Reviewed-by: Baolin Wang Reviewed-by: Vlastimil Babka Tested-by: Baolin Wang Tested-by: Yu Zhao Cc: Adam Manzanares Cc: David Hildenbrand Cc: Huang Ying Cc: Johannes Weiner Cc: Kemeng Shi Cc: Kirill A. Shutemov Cc: Luis Chamberlain Cc: Matthew Wilcox (Oracle) Cc: Mel Gorman Cc: Ryan Roberts Cc: Vishal Moola (Oracle) Cc: Vlastimil Babka Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3795 --- mm/compaction.c | 35 ++++++++++++++++++++++++++++++----- 1 file changed, 30 insertions(+), 5 deletions(-) diff --git a/mm/compaction.c b/mm/compaction.c index bc87474ac83d..1b583b5cb1dd 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1835,15 +1835,40 @@ static struct folio *compaction_alloc(struct folio *src, unsigned long data) struct compact_control *cc = (struct compact_control *)data; struct folio *dst; int order = folio_order(src); + bool has_isolated_pages = false; + int start_order; + struct page *freepage; + unsigned long size; + +again: + for (start_order = order; start_order < NR_PAGE_ORDERS; start_order++) + if (!list_empty(&cc->freepages[start_order])) + break; - if (list_empty(&cc->freepages[order])) { - isolate_freepages(cc); - if (list_empty(&cc->freepages[order])) + /* no free pages in the list */ + if (start_order == NR_PAGE_ORDERS) { + if (has_isolated_pages) return NULL; + isolate_freepages(cc); + has_isolated_pages = true; + goto again; + } + + freepage = list_first_entry(&cc->freepages[start_order], struct page, + lru); + size = 1 << start_order; + + list_del(&freepage->lru); + + while (start_order > order) { + start_order--; + size >>= 1; + + list_add(&freepage[size].lru, &cc->freepages[start_order]); + set_page_private(&freepage[size], start_order); } + dst = (struct folio *)freepage; - dst = list_first_entry(&cc->freepages[order], struct folio, lru); - list_del(&dst->lru); post_alloc_hook(&dst->page, order, __GFP_MOVABLE); if (order) prep_compound_page(&dst->page, order); -- Gitee From 4541785464304a30acc94c185adcf70896119349 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Fri, 28 Jun 2024 17:02:12 +0800 Subject: [PATCH 1250/2138] anolis: x86/hpet: Read HPET directly if panic in progress MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #9451 When the clocksource of the system is HPET,a CPU executing read_hpet might be interrupted by #GP/#PF to executing the panic,this may lead to read_hpet dead loops: CPU x CPU x ---- ---- read_hpet() arch_spin_trylock(&hpet.lock) [CPU x got the hpet.lock] #GP/#PF happened panic() kmsg_dump() pstore_dump() pstore_record_init() ktime_get_real_fast_ns() read_hpet() [dead loops] To avoid this dead loops, read HPET directly if panic in progress. Signed-off-by: leoliu-oc Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3449 --- arch/x86/kernel/hpet.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 046bc9d57e99..2626fa052b45 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -804,6 +804,12 @@ static u64 read_hpet(struct clocksource *cs) if (in_nmi()) return (u64)hpet_readl(HPET_COUNTER); + /* + * Read HPET directly if panic in progress. + */ + if (unlikely(atomic_read(&panic_cpu) != PANIC_CPU_INVALID)) + return (u64)hpet_readl(HPET_COUNTER); + /* * Read the current state of the lock and HPET value atomically. */ -- Gitee From 671f7c5fd8b1912c58aa752cb6eafe170f908a57 Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Tue, 10 Sep 2024 10:26:49 +0800 Subject: [PATCH 1251/2138] anolis: configs: disable CONFIG_RAS_CEC ANBZ: #10825 CONFIG_RAS_CEC enabled will cause mcelog and CE-offline unavailable, so disable it. Signed-off-by: Qiao Ma Reviewed-by: Cruz Zhao Reviewed-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/3816 --- anolis/configs/L0-MANDATORY/x86/CONFIG_RAS_CEC | 1 + anolis/configs/L1-RECOMMEND/x86/CONFIG_RAS_CEC | 1 - anolis/configs/L1-RECOMMEND/x86/CONFIG_RAS_CEC_DEBUG | 1 - anolis/configs/examination/EXTRA/x86.config | 5 ++++- 4 files changed, 5 insertions(+), 3 deletions(-) create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_RAS_CEC delete mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_RAS_CEC delete mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_RAS_CEC_DEBUG diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_RAS_CEC b/anolis/configs/L0-MANDATORY/x86/CONFIG_RAS_CEC new file mode 100644 index 000000000000..d1b75f27c7d8 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_RAS_CEC @@ -0,0 +1 @@ +# CONFIG_RAS_CEC is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_RAS_CEC b/anolis/configs/L1-RECOMMEND/x86/CONFIG_RAS_CEC deleted file mode 100644 index 7b0901ca1fb1..000000000000 --- a/anolis/configs/L1-RECOMMEND/x86/CONFIG_RAS_CEC +++ /dev/null @@ -1 +0,0 @@ -CONFIG_RAS_CEC=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_RAS_CEC_DEBUG b/anolis/configs/L1-RECOMMEND/x86/CONFIG_RAS_CEC_DEBUG deleted file mode 100644 index 116af5def7d5..000000000000 --- a/anolis/configs/L1-RECOMMEND/x86/CONFIG_RAS_CEC_DEBUG +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_RAS_CEC_DEBUG is not set diff --git a/anolis/configs/examination/EXTRA/x86.config b/anolis/configs/examination/EXTRA/x86.config index 67d4de0705b3..8a1cd75223ae 100644 --- a/anolis/configs/examination/EXTRA/x86.config +++ b/anolis/configs/examination/EXTRA/x86.config @@ -9,4 +9,7 @@ CONFIG_CXL_REGION=y CONFIG_CXL_PMU=m CONFIG_DEV_DAX_CXL=m CONFIG_DEV_DAX_HMEM=m -CONFIG_DEV_DAX_KMEM=m \ No newline at end of file +CONFIG_DEV_DAX_KMEM=m + +## (ANBZ#10825) +# CONFIG_RAS_CEC is not set \ No newline at end of file -- Gitee From 8cc158da9d7709245a3cb970b8ab6f15960d446e Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Mon, 9 Sep 2024 19:31:32 +0800 Subject: [PATCH 1252/2138] anolis: configs: mark CONFIG_FCOE important ANBZ: #10820 Some platform depends on it, mark it important. Signed-off-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/3813 --- .../{L1-RECOMMEND => L0-MANDATORY}/default/CONFIG_FCOE | 0 anolis/configs/examination/EXTRA/arm64.config | 2 ++ anolis/configs/examination/EXTRA/x86.config | 5 ++++- 3 files changed, 6 insertions(+), 1 deletion(-) rename anolis/configs/{L1-RECOMMEND => L0-MANDATORY}/default/CONFIG_FCOE (100%) create mode 100644 anolis/configs/examination/EXTRA/arm64.config diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FCOE b/anolis/configs/L0-MANDATORY/default/CONFIG_FCOE similarity index 100% rename from anolis/configs/L1-RECOMMEND/default/CONFIG_FCOE rename to anolis/configs/L0-MANDATORY/default/CONFIG_FCOE diff --git a/anolis/configs/examination/EXTRA/arm64.config b/anolis/configs/examination/EXTRA/arm64.config new file mode 100644 index 000000000000..58855622c402 --- /dev/null +++ b/anolis/configs/examination/EXTRA/arm64.config @@ -0,0 +1,2 @@ +## (ANBZ#10820) +CONFIG_FCOE=m diff --git a/anolis/configs/examination/EXTRA/x86.config b/anolis/configs/examination/EXTRA/x86.config index 8a1cd75223ae..66f96bae3a6c 100644 --- a/anolis/configs/examination/EXTRA/x86.config +++ b/anolis/configs/examination/EXTRA/x86.config @@ -12,4 +12,7 @@ CONFIG_DEV_DAX_HMEM=m CONFIG_DEV_DAX_KMEM=m ## (ANBZ#10825) -# CONFIG_RAS_CEC is not set \ No newline at end of file +# CONFIG_RAS_CEC is not set + +## (ANBZ#10820) +CONFIG_FCOE=m -- Gitee From 3de52d634b48fef08b9e084f2277c3764bd0dab2 Mon Sep 17 00:00:00 2001 From: "zhouzhixin.zzx" Date: Mon, 9 Sep 2024 14:44:24 +0800 Subject: [PATCH 1253/2138] anolis: mm: async fork: introduce async fork interface ANBZ: #10835 Under normal circumstances, parent will be blocked synchronously in copy_mm() when fork(2) is called, and the duration may be long with large memory usage. It will cause the parent out of service from the user's view. For example, a redis instance creates a memory snapshot based on fork(2), it's trival to produce latency spikes. Async fork is designed to reduce block duration when parent process calls fork(2). It can be controlled by each memcg and can be switched on/off in the fly. It's disabled by default, user can enable it by: # echo 1 > /path/to/memcg/memory.async_fork And disable it by: # echo 0 > /path/to/memcg/memory.async_fork Child memcg will inherit parent memcg's configuration. This interfaces the interfaces and stubs of async fork, which split the copy_page_range() function into fast path and slow path. The parent is expected to excute only the fast path in fork(2), while the child process will excute the slow path before return to user mode. Signed-off-by: zhouzhixin.zzx Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3820 --- arch/x86/include/asm/pgtable.h | 4 +- include/linux/memcontrol.h | 28 ++++++++++ include/linux/mm.h | 95 ++++++++++++++++++++++++++++++++++ include/linux/mm_types.h | 8 +++ kernel/fork.c | 41 ++++++++++++++- kernel/sched/core.c | 2 + mm/Kconfig | 12 +++++ mm/Makefile | 1 + mm/async_fork.c | 40 ++++++++++++++ mm/gup.c | 9 ++++ mm/khugepaged.c | 6 +++ mm/madvise.c | 2 + mm/memcontrol.c | 36 +++++++++++++ mm/memory.c | 5 ++ mm/mempolicy.c | 2 + mm/mlock.c | 2 + mm/mmap.c | 17 ++++++ mm/mprotect.c | 2 + mm/mremap.c | 2 + 19 files changed, 311 insertions(+), 3 deletions(-) create mode 100644 mm/async_fork.c diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 993d49cd379a..2f291fe56c1d 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -1038,8 +1038,8 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) static inline int pmd_bad(pmd_t pmd) { - return (pmd_flags(pmd) & ~(_PAGE_USER | _PAGE_ACCESSED)) != - (_KERNPG_TABLE & ~_PAGE_ACCESSED); + return (pmd_flags(pmd) & ~(_PAGE_USER | _PAGE_ACCESSED | _PAGE_RW)) != + (_KERNPG_TABLE & ~(_PAGE_ACCESSED | _PAGE_RW)); } static inline unsigned long pages_to_mb(unsigned long npg) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index bb10a4d9d17e..9a1e6b5cdb31 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -340,6 +340,10 @@ struct mem_cgroup { struct lru_gen_mm_list mm_list; #endif +#ifdef CONFIG_ASYNC_FORK + unsigned long async_fork; +#endif + CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) CK_KABI_RESERVE(3) @@ -1613,6 +1617,30 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, } #endif /* CONFIG_MEMCG */ +#ifdef CONFIG_ASYNC_FORK +static inline unsigned long task_async_fork(struct task_struct *p) +{ + struct mem_cgroup *task_memcg; + unsigned long async_fork = 0UL; + + if (!async_fork_enabled() || mem_cgroup_disabled()) + return 0UL; + + rcu_read_lock(); + task_memcg = mem_cgroup_from_task(p); + if (task_memcg) + async_fork = task_memcg->async_fork; + rcu_read_unlock(); + + return async_fork; +} +#else +static inline unsigned long task_async_fork(struct task_struct *p) +{ + return 0UL; +} +#endif + static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx) { __mod_lruvec_kmem_state(p, idx, 1); diff --git a/include/linux/mm.h b/include/linux/mm.h index 5b5d6e89afa8..6af90773f09a 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -4081,4 +4081,99 @@ static inline void accept_memory(phys_addr_t start, phys_addr_t end) #endif +#ifdef CONFIG_ASYNC_FORK +#define ASYNC_FORK_CANDIDATE 0 +DECLARE_STATIC_KEY_FALSE(async_fork_enabled_key); +DECLARE_STATIC_KEY_FALSE(async_fork_staging_key); +static inline bool async_fork_enabled(void) +{ + return static_branch_unlikely(&async_fork_enabled_key); +} +static inline bool async_fork_staging(void) +{ + return static_branch_unlikely(&async_fork_staging_key); +} + +int async_fork_cpr_fast(struct vm_area_struct *vma, struct vm_area_struct *mpnt); +void async_fork_cpr_bind(struct mm_struct *oldmm, struct mm_struct *mm, int err); +void async_fork_cpr_rest(void); +void async_fork_cpr_done(struct mm_struct *mm, bool r, bool l); + +bool __is_pmd_async_fork(pmd_t pmd); +void __async_fork_fixup_pmd(struct vm_area_struct *mpnt, pmd_t *pmd, + unsigned long addr); +void __async_fork_fixup_vma(struct vm_area_struct *mpnt); + +static inline bool is_pmd_async_fork(pmd_t pmd) +{ + if (async_fork_staging()) + return __is_pmd_async_fork(pmd); + return false; +} +static inline void async_fork_fixup_pmd(struct vm_area_struct *mpnt, pmd_t *pmd, + unsigned long addr) +{ + if (async_fork_staging()) + __async_fork_fixup_pmd(mpnt, pmd, addr); +} +static inline void async_fork_fixup_vma(struct vm_area_struct *mpnt) +{ + if (async_fork_staging()) + __async_fork_fixup_vma(mpnt); +} +#else +static inline bool async_fork_enabled(void) +{ + return false; +} +static inline bool async_fork_staging(void) +{ + return false; +} + +static inline int async_fork_cpr_fast(struct vm_area_struct *vma, + struct vm_area_struct *mpnt) +{ + return -EOPNOTSUPP; +} +static inline void async_fork_cpr_bind(struct mm_struct *oldmm, + struct mm_struct *mm, int err) +{ +} +static inline void async_fork_cpr_rest(void) +{ +} +static inline void async_fork_cpr_done(struct mm_struct *mm, bool r, bool l) +{ +} + +static inline bool is_pmd_async_fork(pmd_t pmd) +{ + return false; +} +static inline void async_fork_fixup_pmd(struct vm_area_struct *mpnt, + pmd_t *pmd, unsigned long addr) +{ +} +static inline void async_fork_fixup_vma(struct vm_area_struct *mpnt) +{ +} +#endif + +static inline bool is_pmd_transient(pmd_t pmd) +{ + if (is_pmd_async_fork(pmd)) + return true; + return false; +} +static inline void fixup_pmd(struct vm_area_struct *vma, + pmd_t *pmd, unsigned long addr) +{ + async_fork_fixup_pmd(vma, pmd, addr); +} +static inline void fixup_vma(struct vm_area_struct *vma) +{ + async_fork_fixup_vma(vma); +} + #endif /* _LINUX_MM_H */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 52be7d14012b..88e0b3f3ff8e 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -716,6 +716,10 @@ struct vm_area_struct { #endif struct vm_userfaultfd_ctx vm_userfaultfd_ctx; +#ifdef CONFIG_ASYNC_FORK + struct vm_area_struct *async_fork_vma; +#endif + CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) CK_KABI_RESERVE(3) @@ -977,6 +981,10 @@ struct mm_struct { #endif } lru_gen; #endif /* CONFIG_LRU_GEN */ +#ifdef CONFIG_ASYNC_FORK + struct mm_struct *async_fork_mm; + unsigned long async_fork_flags; +#endif } __randomize_layout; CK_KABI_RESERVE(1) diff --git a/kernel/fork.c b/kernel/fork.c index 50cca073320e..23325fefb0ea 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -505,6 +505,8 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) { struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + fixup_vma(orig); + if (!new) return NULL; @@ -658,12 +660,23 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, LIST_HEAD(uf); VMA_ITERATOR(old_vmi, oldmm, 0); VMA_ITERATOR(vmi, mm, 0); +#ifdef CONFIG_ASYNC_FORK + unsigned long async_fork; +#endif uprobe_start_dup_mmap(); if (mmap_write_lock_killable(oldmm)) { retval = -EINTR; goto fail_uprobe_end; } +#ifdef CONFIG_ASYNC_FORK + /* Get task_async_fork with oldmm's mmap write lock hold. */ + rcu_read_lock(); + async_fork = task_async_fork(current); + if (async_fork) + set_bit(ASYNC_FORK_CANDIDATE, &oldmm->async_fork_flags); + rcu_read_unlock(); +#endif flush_cache_dup_mm(oldmm); uprobe_dup_mmap(oldmm, mm); /* @@ -760,8 +773,16 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, goto fail_nomem_vmi_store; mm->map_count++; - if (!(tmp->vm_flags & VM_WIPEONFORK)) + if (!(tmp->vm_flags & VM_WIPEONFORK)) { +#ifdef CONFIG_ASYNC_FORK + if (async_fork) + retval = async_fork_cpr_fast(tmp, mpnt); + else + retval = copy_page_range(tmp, mpnt); +#else retval = copy_page_range(tmp, mpnt); +#endif + } if (tmp->vm_ops && tmp->vm_ops->open) tmp->vm_ops->open(tmp); @@ -778,6 +799,10 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, out: mmap_write_unlock(mm); flush_tlb_mm(oldmm); +#ifdef CONFIG_ASYNC_FORK + if (async_fork) + async_fork_cpr_bind(oldmm, mm, retval); +#endif mmap_write_unlock(oldmm); dup_userfaultfd_complete(&uf); fail_uprobe_end: @@ -922,6 +947,9 @@ void __mmdrop(struct mm_struct *mm) cleanup_lazy_tlbs(mm); WARN_ON_ONCE(mm == current->active_mm); +#ifdef CONFIG_ASYNC_FORK + BUG_ON(mm->async_fork_mm); +#endif mm_free_pgd(mm); destroy_context(mm); mmu_notifier_subscriptions_destroy(mm); @@ -1293,6 +1321,11 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, mm_init_uprobes_state(mm); hugetlb_count_init(mm); +#ifdef CONFIG_ASYNC_FORK + mm->async_fork_mm = NULL; + mm->async_fork_flags = 0; +#endif + if (current->mm) { mm->flags = mmf_init_flags(current->mm->flags); mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK; @@ -2767,6 +2800,12 @@ __latent_entropy struct task_struct *copy_process( exit_task_namespaces(p); bad_fork_cleanup_mm: if (p->mm) { +#ifdef CONFIG_ASYNC_FORK + if (p->mm->async_fork_mm) { + WARN_ON_ONCE(clone_flags & CLONE_VM); + async_fork_cpr_done(p->mm, true, false); + } +#endif mm_clear_owner(p->mm, p); mmput(p->mm); } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 7b76f57ee83a..71c397cba58b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5478,6 +5478,8 @@ asmlinkage __visible void schedule_tail(struct task_struct *prev) finish_task_switch(prev); preempt_enable(); + async_fork_cpr_rest(); + if (current->set_child_tid) put_user(task_pid_vnr(current), current->set_child_tid); diff --git a/mm/Kconfig b/mm/Kconfig index c11cd01169e8..1f5ad4ec7f1d 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -1282,6 +1282,18 @@ config LOCK_MM_AND_FIND_VMA bool depends on !STACK_GROWSUP +config ASYNC_FORK + bool "Support copy mm asynchronously when fork" + depends on MMU && MEMCG + default y + help + This introduces a new mechanism to reduce block duration when parent + process calls fork(2). It will be very effective for processes with + large memory usage. For example, DB uses fork(2) to create a memory + snapshot, and wants fork(2) returns as soon as possible to reduce + unserviceable duration. Note that it won't speed up child's return + from fork(2). + source "mm/damon/Kconfig" endmenu diff --git a/mm/Makefile b/mm/Makefile index ec65984e2ade..08b22aa58959 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -138,3 +138,4 @@ obj-$(CONFIG_IO_MAPPING) += io-mapping.o obj-$(CONFIG_HAVE_BOOTMEM_INFO_NODE) += bootmem_info.o obj-$(CONFIG_GENERIC_IOREMAP) += ioremap.o obj-$(CONFIG_SHRINKER_DEBUG) += shrinker_debug.o +obj-$(CONFIG_ASYNC_FORK) += async_fork.o diff --git a/mm/async_fork.c b/mm/async_fork.c new file mode 100644 index 000000000000..0a2981086429 --- /dev/null +++ b/mm/async_fork.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +DEFINE_STATIC_KEY_FALSE(async_fork_enabled_key); +DEFINE_STATIC_KEY_FALSE(async_fork_staging_key); + +noinline int async_fork_cpr_fast(struct vm_area_struct *vma, + struct vm_area_struct *mpnt) +{ + return -EOPNOTSUPP; +} + +noinline void async_fork_cpr_bind(struct mm_struct *oldmm, + struct mm_struct *mm, int err) +{ +} + +noinline void async_fork_cpr_rest(void) +{ +} + +noinline void async_fork_cpr_done(struct mm_struct *mm, bool r, bool l) +{ +} + +noinline bool __is_pmd_async_fork(pmd_t pmd) +{ + return false; +} + +noinline void __async_fork_fixup_pmd(struct vm_area_struct *mpnt, pmd_t *pmd, + unsigned long addr) +{ +} + +noinline void __async_fork_fixup_vma(struct vm_area_struct *mpnt) +{ +} diff --git a/mm/gup.c b/mm/gup.c index 2576962d4538..32249c7f94da 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -503,6 +503,12 @@ static struct page *follow_page_pte(struct vm_area_struct *vma, (FOLL_PIN | FOLL_GET))) return ERR_PTR(-EINVAL); + if (is_pmd_transient(*pmd)) { + fixup_pmd(vma, pmd, address); + if (is_pmd_transient(*pmd)) + return no_page_table(vma, flags); + } + ptep = pte_offset_map_lock(mm, pmd, address, &ptl); if (!ptep) return no_page_table(vma, flags); @@ -2576,6 +2582,9 @@ static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, int nr_start = *nr, ret = 0; pte_t *ptep, *ptem; + if (is_pmd_transient(pmd)) + return 0; + ptem = ptep = pte_offset_map(&pmd, addr); if (!ptep) return 0; diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 60b445bbc6e9..c923465c6af5 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -2346,6 +2346,12 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result, if (unlikely(!mmap_read_trylock(mm))) goto breakouterloop_mmap_lock; +#ifdef CONFIG_ASYNC_FORK + /* Don't scan processes in the state of async fork. */ + if (mm->async_fork_mm) + vma = NULL; +#endif + progress++; if (unlikely(hpage_collapse_test_exit(mm))) goto breakouterloop; diff --git a/mm/madvise.c b/mm/madvise.c index 98fdb9288a68..8969f30279cb 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -1017,6 +1017,8 @@ static int madvise_vma_behavior(struct vm_area_struct *vma, struct anon_vma_name *anon_name; unsigned long new_flags = vma->vm_flags; + fixup_vma(vma); + switch (behavior) { case MADV_REMOVE: return madvise_remove(vma, prev, start, end); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 116c87e0af8a..3d41a19fb3b0 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4206,6 +4206,25 @@ static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, return 0; } +#ifdef CONFIG_ASYNC_FORK +static u64 mem_cgroup_async_fork_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + + return memcg->async_fork; +} + +static int mem_cgroup_async_fork_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + + memcg->async_fork = val; + return 0; +} +#endif + static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) { struct mem_cgroup_threshold_ary *t; @@ -5150,6 +5169,13 @@ static struct cftype mem_cgroup_legacy_files[] = { .write = mem_cgroup_reset, .read_u64 = mem_cgroup_read_u64, }, +#ifdef CONFIG_ASYNC_FORK + { + .name = "async_fork", + .read_u64 = mem_cgroup_async_fork_read, + .write_u64 = mem_cgroup_async_fork_write, + }, +#endif { }, /* terminate */ }; @@ -5397,6 +5423,9 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) if (parent) { WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent)); WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable)); +#ifdef CONFIG_ASYNC_FORK + memcg->async_fork = parent->async_fork; +#endif page_counter_init(&memcg->memory, &parent->memory); page_counter_init(&memcg->swap, &parent->swap); @@ -6854,6 +6883,13 @@ static struct cftype memory_files[] = { .flags = CFTYPE_NS_DELEGATABLE, .write = memory_reclaim, }, +#ifdef CONFIG_ASYNC_FORK + { + .name = "async_fork", + .read_u64 = mem_cgroup_async_fork_read, + .write_u64 = mem_cgroup_async_fork_write, + }, +#endif { } /* terminate */ }; diff --git a/mm/memory.c b/mm/memory.c index 34e76ff266de..48bad37414d9 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1780,6 +1780,9 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, */ spin_unlock(ptl); } + + fixup_pmd(vma, pmd, addr); + if (pmd_none(*pmd)) { addr = next; continue; @@ -5639,6 +5642,8 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, return 0; } } + + fixup_pmd(vma, vmf.pmd, address); } return handle_pte_fault(&vmf); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 94c74c594d10..ef427cb86dc4 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -634,6 +634,8 @@ unsigned long change_prot_numa(struct vm_area_struct *vma, struct mmu_gather tlb; long nr_updated; + fixup_vma(vma); + tlb_gather_mmu(&tlb, vma->vm_mm); nr_updated = change_protection(&tlb, vma, addr, end, MM_CP_PROT_NUMA); diff --git a/mm/mlock.c b/mm/mlock.c index f79d8262c1a0..7e984843d733 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -491,6 +491,8 @@ static int mlock_fixup(struct vma_iterator *vmi, struct vm_area_struct *vma, /* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */ goto out; + fixup_vma(vma); + pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); *prev = vma_merge(vmi, mm, *prev, start, end, newflags, vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), diff --git a/mm/mmap.c b/mm/mmap.c index e4dfeaef668a..70de32960581 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -350,6 +350,10 @@ anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) { struct anon_vma_chain *avc; +#ifdef CONFIG_ASYNC_FORK + WARN_ON_ONCE(vma->async_fork_vma); +#endif + list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root); } @@ -888,6 +892,9 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm, if (vm_flags & VM_SPECIAL) return NULL; + if (prev) + fixup_vma(prev); + /* Does the input range span an existing VMA? (cases 5 - 8) */ curr = find_vma_intersection(mm, prev ? prev->vm_end : 0, end); @@ -897,6 +904,9 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm, else next = NULL; /* case 5 */ + if (next) + fixup_vma(next); + if (prev) { vma_start = prev->vm_start; vma_pgoff = prev->vm_pgoff; @@ -2000,6 +2010,8 @@ static int expand_upwards(struct vm_area_struct *vma, unsigned long address) return -ENOMEM; } + fixup_vma(vma); + /* Lock the VMA before expanding to prevent concurrent page faults */ vma_start_write(vma); /* @@ -2093,6 +2105,8 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address) return -ENOMEM; } + fixup_vma(vma); + /* Lock the VMA before expanding to prevent concurrent page faults */ vma_start_write(vma); /* @@ -2357,6 +2371,8 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, struct vm_area_struct *new; int err; + fixup_vma(vma); + WARN_ON(vma->vm_start >= addr); WARN_ON(vma->vm_end <= addr); @@ -2505,6 +2521,7 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, if (error) goto end_split_failed; } + fixup_vma(next); vma_start_write(next); mas_set(&mas_detach, count); error = mas_store_gfp(&mas_detach, next, GFP_KERNEL); diff --git a/mm/mprotect.c b/mm/mprotect.c index 7e870a8c9402..1fb086eb0e83 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -765,6 +765,8 @@ static int do_mprotect_pkey(unsigned long start, size_t len, unsigned long newflags; int new_vma_pkey; + fixup_vma(vma); + if (vma->vm_start != tmp) { error = -ENOMEM; break; diff --git a/mm/mremap.c b/mm/mremap.c index df71010baabe..b688b238b61f 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -761,6 +761,8 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr, if (!vma) return ERR_PTR(-EFAULT); + fixup_vma(vma); + /* * !old_len is a special case where an attempt is made to 'duplicate' * a mapping. This makes no sense for private mappings as it will -- Gitee From 28efdfbc971cdc1bee592ae32cea7060bac2b746 Mon Sep 17 00:00:00 2001 From: "zhouzhixin.zzx" Date: Tue, 10 Sep 2024 15:46:08 +0800 Subject: [PATCH 1254/2138] anolis: configs: enable CONFIG_ASYNC_FORK by default ANBZ: #10835 This enables CONFIG_ASYNC_FORK for both x86 and arm64. Signed-off-by: zhouzhixin.zzx Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3820 --- anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_FORK | 1 + 1 file changed, 1 insertion(+) create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_FORK diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_FORK b/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_FORK new file mode 100644 index 000000000000..16d0f9953bbf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ASYNC_FORK @@ -0,0 +1 @@ +CONFIG_ASYNC_FORK=y -- Gitee From 5d46a551b9d919cf1fa7077eb7e425e1f3c66e30 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 13 Sep 2023 17:51:24 +0800 Subject: [PATCH 1255/2138] mm: migrate: remove PageTransHuge check in numamigrate_isolate_page() ANBZ: #9728 commit a8ac4a767dcd9d87d8229045904d9fe15ea5e0e8 upstream Patch series "mm: migrate: more folio conversion and unification", v3. Convert more migrate functions to use a folio, it is also a preparation for large folio migration support when balancing numa. This patch (of 8): The assert VM_BUG_ON_PAGE(order && !PageTransHuge(page), page) is not very useful, 1) for a tail/base page, order = 0, for a head page, the order > 0 && PageTransHuge() is true 2) there is a PageCompound() check and only base page is handled in do_numa_page(), and do_huge_pmd_numa_page() only handle PMD-mapped THP 3) even though the page is a tail page, isolate_lru_page() will post a warning, and fail to isolate the page 4) if large folio/pte-mapped THP migration supported in the future, we could migrate the entire folio if numa fault on a tail page so just remove the check. Link: https://lkml.kernel.org/r/20230913095131.2426871-1-wangkefeng.wang@huawei.com Link: https://lkml.kernel.org/r/20230913095131.2426871-2-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Suggested-by: Matthew Wilcox (Oracle) Cc: David Hildenbrand Cc: Huang Ying Cc: Hugh Dickins Cc: Mike Kravetz Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- mm/migrate.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/mm/migrate.c b/mm/migrate.c index d9e464f96f0e..116e082985dd 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2506,8 +2506,6 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) int nr_pages = thp_nr_pages(page); int order = compound_order(page); - VM_BUG_ON_PAGE(order && !PageTransHuge(page), page); - /* Do not migrate THP mapped by multiple processes */ if (PageTransHuge(page) && total_mapcount(page) > 1) return 0; -- Gitee From 756dda2050addfbb25d9667903f80dde25dfcb05 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 13 Sep 2023 17:51:25 +0800 Subject: [PATCH 1256/2138] mm: migrate: remove THP mapcount check in numamigrate_isolate_page() ANBZ: #9728 commit 728be28fae8c838d52c91dce4867133798146357 upstream The check of THP mapped by multiple processes was introduced by commit 04fa5d6a6547 ("mm: migrate: check page_count of THP before migrating") and refactor by commit 340ef3902cf2 ("mm: numa: cleanup flow of transhuge page migration"), which is out of date, since migrate_misplaced_page() is now using the standard migrate_pages() for small pages and THPs, the reference count checking is in folio_migrate_mapping(), so let's remove the special check for THP. Link: https://lkml.kernel.org/r/20230913095131.2426871-3-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Suggested-by: Matthew Wilcox (Oracle) Reviewed-by: "Huang, Ying" Cc: David Hildenbrand Cc: Hugh Dickins Cc: Mike Kravetz Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- mm/migrate.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/mm/migrate.c b/mm/migrate.c index 116e082985dd..d07e1c7fd671 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2506,10 +2506,6 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) int nr_pages = thp_nr_pages(page); int order = compound_order(page); - /* Do not migrate THP mapped by multiple processes */ - if (PageTransHuge(page) && total_mapcount(page) > 1) - return 0; - /* Avoid migrating to a node that is nearly full */ if (!migrate_balanced_pgdat(pgdat, nr_pages)) { int z; -- Gitee From 15376b35473e9f47b92831f4c44d79e422589eee Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 13 Sep 2023 17:51:26 +0800 Subject: [PATCH 1257/2138] mm: migrate: convert numamigrate_isolate_page() to numamigrate_isolate_folio() ANBZ: #9728 commit 2ac9e99f3b21b2864305fbfba4bae5913274c409 upstream Rename numamigrate_isolate_page() to numamigrate_isolate_folio(), then make it takes a folio and use folio API to save compound_head() calls. Link: https://lkml.kernel.org/r/20230913095131.2426871-4-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Reviewed-by: Zi Yan Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Mike Kravetz Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- mm/migrate.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/mm/migrate.c b/mm/migrate.c index d07e1c7fd671..c4805c527c89 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2501,10 +2501,9 @@ static struct folio *alloc_misplaced_dst_folio(struct folio *src, return __folio_alloc_node(gfp, order, nid); } -static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) +static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio) { - int nr_pages = thp_nr_pages(page); - int order = compound_order(page); + int nr_pages = folio_nr_pages(folio); /* Avoid migrating to a node that is nearly full */ if (!migrate_balanced_pgdat(pgdat, nr_pages)) { @@ -2524,22 +2523,23 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) if (z < 0) return 0; - wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE); + wakeup_kswapd(pgdat->node_zones + z, 0, + folio_order(folio), ZONE_MOVABLE); return 0; } - if (!isolate_lru_page(page)) + if (!folio_isolate_lru(folio)) return 0; - mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_is_file_lru(page), + node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio), nr_pages); /* - * Isolating the page has taken another reference, so the - * caller's reference can be safely dropped without the page + * Isolating the folio has taken another reference, so the + * caller's reference can be safely dropped without the folio * disappearing underneath us during migration. */ - put_page(page); + folio_put(folio); return 1; } @@ -2573,7 +2573,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, if (page_is_file_lru(page) && PageDirty(page)) goto out; - isolated = numamigrate_isolate_page(pgdat, page); + isolated = numamigrate_isolate_folio(pgdat, page_folio(page)); if (!isolated) goto out; -- Gitee From 212598adf3ab461466ebf600a42b736c286b0070 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 13 Sep 2023 17:51:27 +0800 Subject: [PATCH 1258/2138] mm: migrate: convert migrate_misplaced_page() to migrate_misplaced_folio() ANBZ: #9728 commit 73eab3ca481e5be0f1fd8140365d604482f84ee1 upstream At present, numa balance only support base page and PMD-mapped THP, but we will expand to support to migrate large folio/pte-mapped THP in the future, it is better to make migrate_misplaced_page() to take a folio instead of a page, and rename it to migrate_misplaced_folio(), it is a preparation, also this remove several compound_head() calls. Link: https://lkml.kernel.org/r/20230913095131.2426871-5-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Reviewed-by: Zi Yan Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Mike Kravetz Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- include/linux/migrate.h | 4 ++-- mm/huge_memory.c | 2 +- mm/memory.c | 2 +- mm/migrate.c | 39 +++++++++++++++++++++------------------ 4 files changed, 25 insertions(+), 22 deletions(-) diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 711dd9412561..2ce13e8a309b 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -142,10 +142,10 @@ const struct movable_operations *page_movable_ops(struct page *page) } #ifdef CONFIG_NUMA_BALANCING -int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, +int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, int node); #else -static inline int migrate_misplaced_page(struct page *page, +static inline int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, int node) { return -EAGAIN; /* can't migrate now */ diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 983d50b04b56..2cf56a22895f 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1719,7 +1719,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) spin_unlock(vmf->ptl); writable = false; - migrated = migrate_misplaced_page(page, vma, target_nid); + migrated = migrate_misplaced_folio(page_folio(page), vma, target_nid); if (migrated) { flags |= TNF_MIGRATED; page_nid = target_nid; diff --git a/mm/memory.c b/mm/memory.c index 48bad37414d9..255a2d643ffb 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5343,7 +5343,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) writable = false; /* Migrate to the requested node */ - if (migrate_misplaced_page(page, vma, target_nid)) { + if (migrate_misplaced_folio(page_folio(page), vma, target_nid)) { page_nid = target_nid; flags |= TNF_MIGRATED; task_numa_fault(last_cpupid, page_nid, 1, flags); diff --git a/mm/migrate.c b/mm/migrate.c index c4805c527c89..fc0737363e60 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2544,55 +2544,58 @@ static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio) } /* - * Attempt to migrate a misplaced page to the specified destination + * Attempt to migrate a misplaced folio to the specified destination * node. Caller is expected to have an elevated reference count on - * the page that will be dropped by this function before returning. + * the folio that will be dropped by this function before returning. */ -int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, - int node) +int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, + int node) { pg_data_t *pgdat = NODE_DATA(node); int isolated; int nr_remaining; unsigned int nr_succeeded; LIST_HEAD(migratepages); - int nr_pages = thp_nr_pages(page); + int nr_pages = folio_nr_pages(folio); /* - * Don't migrate file pages that are mapped in multiple processes + * Don't migrate file folios that are mapped in multiple processes * with execute permissions as they are probably shared libraries. + * To check if the folio is shared, ideally we want to make sure + * every page is mapped to the same process. Doing that is very + * expensive, so check the estimated mapcount of the folio instead. */ - if (page_mapcount(page) != 1 && page_is_file_lru(page) && + if (folio_estimated_sharers(folio) != 1 && folio_is_file_lru(folio) && (vma->vm_flags & VM_EXEC)) goto out; /* - * Also do not migrate dirty pages as not all filesystems can move - * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles. + * Also do not migrate dirty folios as not all filesystems can move + * dirty folios in MIGRATE_ASYNC mode which is a waste of cycles. */ - if (page_is_file_lru(page) && PageDirty(page)) + if (folio_is_file_lru(folio) && folio_test_dirty(folio)) goto out; - isolated = numamigrate_isolate_folio(pgdat, page_folio(page)); + isolated = numamigrate_isolate_folio(pgdat, folio); if (!isolated) goto out; - list_add(&page->lru, &migratepages); + list_add(&folio->lru, &migratepages); nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio, NULL, node, MIGRATE_ASYNC, MR_NUMA_MISPLACED, &nr_succeeded); if (nr_remaining) { if (!list_empty(&migratepages)) { - list_del(&page->lru); - mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + - page_is_file_lru(page), -nr_pages); - putback_lru_page(page); + list_del(&folio->lru); + node_stat_mod_folio(folio, NR_ISOLATED_ANON + + folio_is_file_lru(folio), -nr_pages); + folio_putback_lru(folio); } isolated = 0; } if (nr_succeeded) { count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded); - if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node)) + if (!node_is_toptier(folio_nid(folio)) && node_is_toptier(node)) mod_node_page_state(pgdat, PGPROMOTE_SUCCESS, nr_succeeded); } @@ -2600,7 +2603,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, return isolated; out: - put_page(page); + folio_put(folio); return 0; } #endif /* CONFIG_NUMA_BALANCING */ -- Gitee From e54feef5b745184c4bb0f6b6e7aabb944c4ba631 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 13 Sep 2023 17:51:28 +0800 Subject: [PATCH 1259/2138] mm: migrate: use __folio_test_movable() ANBZ: #9728 commit 7e2a5e5ab217d5e4166cdbdf4af8c5e34b6200bb upstream Use __folio_test_movable(), no need to convert from folio to page again. Link: https://lkml.kernel.org/r/20230913095131.2426871-6-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Reviewed-by: Matthew Wilcox (Oracle) Reviewed-by: David Hildenbrand Reviewed-by: Zi Yan Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Mike Kravetz Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- mm/migrate.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/mm/migrate.c b/mm/migrate.c index fc0737363e60..6ef8be894fdb 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -157,8 +157,8 @@ void putback_movable_pages(struct list_head *l) list_del(&folio->lru); /* * We isolated non-lru movable folio so here we can use - * __PageMovable because LRU folio's mapping cannot have - * PAGE_MAPPING_MOVABLE. + * __folio_test_movable because LRU folio's mapping cannot + * have PAGE_MAPPING_MOVABLE. */ if (unlikely(__folio_test_movable(folio))) { VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio); @@ -953,7 +953,7 @@ static int move_to_new_folio(struct folio *dst, struct folio *src, enum migrate_mode mode) { int rc = -EAGAIN; - bool is_lru = !__PageMovable(&src->page); + bool is_lru = !__folio_test_movable(src); VM_BUG_ON_FOLIO(!folio_test_locked(src), src); VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst); @@ -1000,7 +1000,7 @@ static int move_to_new_folio(struct folio *dst, struct folio *src, * src is freed; but stats require that PageAnon be left as PageAnon. */ if (rc == MIGRATEPAGE_SUCCESS) { - if (__PageMovable(&src->page)) { + if (__folio_test_movable(src)) { VM_BUG_ON_FOLIO(!folio_test_isolated(src), src); /* @@ -1091,7 +1091,7 @@ static void migrate_folio_done(struct folio *src, /* * Compaction can migrate also non-LRU pages which are * not accounted to NR_ISOLATED_*. They can be recognized - * as __PageMovable + * as __folio_test_movable */ if (likely(!__folio_test_movable(src)) && reason != MR_DEMOTION) mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON + @@ -1112,7 +1112,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio, int rc = -EAGAIN; int old_page_state = 0; struct anon_vma *anon_vma = NULL; - bool is_lru = !__PageMovable(&src->page); + bool is_lru = !__folio_test_movable(src); bool locked = false; bool dst_locked = false; @@ -1273,7 +1273,7 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private, int rc; int old_page_state = 0; struct anon_vma *anon_vma = NULL; - bool is_lru = !__PageMovable(&src->page); + bool is_lru = !__folio_test_movable(src); struct list_head *prev; __migrate_folio_extract(dst, &old_page_state, &anon_vma); -- Gitee From dd6089c7052380ab414a34b5f3b62dbff8be2692 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 13 Sep 2023 17:51:29 +0800 Subject: [PATCH 1260/2138] mm: migrate: use a folio in add_page_for_migration() ANBZ: #9728 commit d64cfccbc805663a2c5691f638cf9198b9676a9f upstream Use a folio in add_page_for_migration() to save compound_head() calls. Link: https://lkml.kernel.org/r/20230913095131.2426871-7-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Reviewed-by: Zi Yan Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Mike Kravetz Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- mm/migrate.c | 40 +++++++++++++++++++--------------------- 1 file changed, 19 insertions(+), 21 deletions(-) diff --git a/mm/migrate.c b/mm/migrate.c index 6ef8be894fdb..35fcb5738bf6 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2070,6 +2070,7 @@ static int add_page_for_migration(struct mm_struct *mm, const void __user *p, struct vm_area_struct *vma; unsigned long addr; struct page *page; + struct folio *folio; int err; bool isolated; @@ -2092,45 +2093,42 @@ static int add_page_for_migration(struct mm_struct *mm, const void __user *p, if (!page) goto out; - if (is_zone_device_page(page)) - goto out_putpage; + folio = page_folio(page); + if (folio_is_zone_device(folio)) + goto out_putfolio; err = 0; - if (page_to_nid(page) == node) - goto out_putpage; + if (folio_nid(folio) == node) + goto out_putfolio; err = -EACCES; if (page_mapcount(page) > 1 && !migrate_all) - goto out_putpage; + goto out_putfolio; - if (PageHuge(page)) { + if (folio_test_hugetlb(folio)) { if (PageHead(page)) { - isolated = isolate_hugetlb(page_folio(page), pagelist); + isolated = isolate_hugetlb(folio, pagelist); err = isolated ? 1 : -EBUSY; } } else { - struct page *head; - - head = compound_head(page); - isolated = isolate_lru_page(head); + isolated = folio_isolate_lru(folio); if (!isolated) { err = -EBUSY; - goto out_putpage; + goto out_putfolio; } err = 1; - list_add_tail(&head->lru, pagelist); - mod_node_page_state(page_pgdat(head), - NR_ISOLATED_ANON + page_is_file_lru(head), - thp_nr_pages(head)); + list_add_tail(&folio->lru, pagelist); + node_stat_mod_folio(folio, + NR_ISOLATED_ANON + folio_is_file_lru(folio), + folio_nr_pages(folio)); } -out_putpage: +out_putfolio: /* - * Either remove the duplicate refcount from - * isolate_lru_page() or drop the page ref if it was - * not isolated. + * Either remove the duplicate refcount from folio_isolate_lru() + * or drop the folio ref if it was not isolated. */ - put_page(page); + folio_put(folio); out: mmap_read_unlock(mm); return err; -- Gitee From d43fd80ac3f01df6b58ffb9b83becf43b23fd540 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 13 Sep 2023 17:51:30 +0800 Subject: [PATCH 1261/2138] mm: migrate: remove PageHead() check for HugeTLB in add_page_for_migration() ANBZ: #9728 commit b426ed7889be80359cb4edef142e5c5fa697b068 upstream There is some different between hugeTLB and THP behave when passed the address of a tail page, for THP, it will migrate the entire THP page, but for HugeTLB, it will return -EACCES, or -ENOENT before commit e66f17ff7177 ("mm/hugetlb: take page table lock in follow_huge_pmd()"), -EACCES The page is mapped by multiple processes and can be moved only if MPOL_MF_MOVE_ALL is specified. -ENOENT The page is not present. But when check manual[1], both of the two errnos are not suitable, it is better to keep the same behave between hugetlb and THP when passed the address of a tail page, so let's just remove the PageHead() check for HugeTLB. [1] https://man7.org/linux/man-pages/man2/move_pages.2.html Link: https://lkml.kernel.org/r/20230913095131.2426871-8-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Suggested-by: Mike Kravetz Acked-by: Zi Yan Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- mm/migrate.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/mm/migrate.c b/mm/migrate.c index 35fcb5738bf6..e73df539a461 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2106,10 +2106,8 @@ static int add_page_for_migration(struct mm_struct *mm, const void __user *p, goto out_putfolio; if (folio_test_hugetlb(folio)) { - if (PageHead(page)) { - isolated = isolate_hugetlb(folio, pagelist); - err = isolated ? 1 : -EBUSY; - } + isolated = isolate_hugetlb(folio, pagelist); + err = isolated ? 1 : -EBUSY; } else { isolated = folio_isolate_lru(folio); if (!isolated) { -- Gitee From 4d1099622884ea6eedbde378cfb571b905748a31 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 13 Sep 2023 17:51:31 +0800 Subject: [PATCH 1262/2138] mm: migrate: remove isolated variable in add_page_for_migration() ANBZ: #9728 commit fa1df3f6287e1e1fd8b5309828238e2c728e985f upstream Directly check the return of isolate_hugetlb() and folio_isolate_lru() to remove isolated variable, also setup err = -EBUSY in advance before isolation, and update err only when successfully queued for migration, which could help us to unify and simplify code a bit. Link: https://lkml.kernel.org/r/20230913095131.2426871-9-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Reviewed-by: Zi Yan Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Mike Kravetz Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- mm/migrate.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/mm/migrate.c b/mm/migrate.c index e73df539a461..6795c1a4637a 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2072,7 +2072,6 @@ static int add_page_for_migration(struct mm_struct *mm, const void __user *p, struct page *page; struct folio *folio; int err; - bool isolated; mmap_read_lock(mm); addr = (unsigned long)untagged_addr_remote(mm, p); @@ -2105,15 +2104,13 @@ static int add_page_for_migration(struct mm_struct *mm, const void __user *p, if (page_mapcount(page) > 1 && !migrate_all) goto out_putfolio; + err = -EBUSY; if (folio_test_hugetlb(folio)) { - isolated = isolate_hugetlb(folio, pagelist); - err = isolated ? 1 : -EBUSY; + if (isolate_hugetlb(folio, pagelist)) + err = 1; } else { - isolated = folio_isolate_lru(folio); - if (!isolated) { - err = -EBUSY; + if (!folio_isolate_lru(folio)) goto out_putfolio; - } err = 1; list_add_tail(&folio->lru, pagelist); -- Gitee From 271173b5a148e829572dcecd67acb33e811a1e69 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Thu, 21 Sep 2023 15:44:12 +0800 Subject: [PATCH 1263/2138] mm: memory: add vm_normal_folio_pmd() ANBZ: #9728 commit 65610453459f9048678a0daef89d592e412ec00a upstream Patch series "mm: convert numa balancing functions to use a folio", v2. do_numa_pages() only handles non-compound pages, and only PMD-mapped THPs are handled in do_huge_pmd_numa_page(). But a large, PTE-mapped folio will be supported so let's convert more numa balancing functions to use/take a folio in preparation for that, no functional change intended for now. This patch (of 6): The new vm_normal_folio_pmd() wrapper is similar to vm_normal_folio(), which allow them to completely replace the struct page variables with struct folio variables. Link: https://lkml.kernel.org/r/20230921074417.24004-1-wangkefeng.wang@huawei.com Link: https://lkml.kernel.org/r/20230921074417.24004-2-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Mike Kravetz Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- include/linux/mm.h | 2 ++ mm/memory.c | 10 ++++++++++ 2 files changed, 12 insertions(+) diff --git a/include/linux/mm.h b/include/linux/mm.h index 6af90773f09a..8518369a60ab 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2342,6 +2342,8 @@ struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr, pte_t pte); struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte); +struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma, + unsigned long addr, pmd_t pmd); struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd); diff --git a/mm/memory.c b/mm/memory.c index 255a2d643ffb..4b6751eee205 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -691,6 +691,16 @@ struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, out: return pfn_to_page(pfn); } + +struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma, + unsigned long addr, pmd_t pmd) +{ + struct page *page = vm_normal_page_pmd(vma, addr, pmd); + + if (page) + return page_folio(page); + return NULL; +} #endif static void restore_exclusive_pte(struct vm_area_struct *vma, -- Gitee From d43e9fc5c6f632a9ebb7cbc1280ec0e064cc139b Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Thu, 21 Sep 2023 15:44:13 +0800 Subject: [PATCH 1264/2138] mm: huge_memory: use a folio in do_huge_pmd_numa_page() ANBZ: #9728 commit 667ffc31aa95e7023707924b08415523208bce9d upstream Use a folio in do_huge_pmd_numa_page(), reduce three page_folio() calls to one, no functional change intended. Link: https://lkml.kernel.org/r/20230921074417.24004-3-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Mike Kravetz Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 [Fixes conflicts] Signed-off-by: Qinyun Tan --- mm/huge_memory.c | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 2cf56a22895f..988b82d30a5a 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1669,9 +1669,9 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) struct vm_area_struct *vma = vmf->vma; pmd_t oldpmd = vmf->orig_pmd; pmd_t pmd; - struct page *page; + struct folio *folio; unsigned long haddr = vmf->address & HPAGE_PMD_MASK; - int page_nid = NUMA_NO_NODE; + int nid = NUMA_NO_NODE; int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK); bool migrated = false, writable = false; int flags = 0; @@ -1693,37 +1693,35 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) can_change_pmd_writable(vma, vmf->address, pmd)) writable = true; - page = vm_normal_page_pmd(vma, haddr, pmd); - if (!page) + folio = vm_normal_folio_pmd(vma, haddr, pmd); + if (!folio) goto out_map; /* See similar comment in do_numa_page for explanation */ if (!writable) flags |= TNF_NO_GROUP; - page_nid = page_to_nid(page); + nid = folio_nid(folio); /* * For memory tiering mode, cpupid of slow memory page is used * to record page access time. So use default value. */ - if (node_is_toptier(page_nid)) - last_cpupid = page_cpupid_last(page); - target_nid = numa_migrate_prep(page, vma, haddr, page_nid, - &flags); - + if (node_is_toptier(nid)) + last_cpupid = page_cpupid_last(&folio->page); + target_nid = numa_migrate_prep(&folio->page, vma, haddr, nid, &flags); if (target_nid == NUMA_NO_NODE) { - put_page(page); + folio_put(folio); goto out_map; } spin_unlock(vmf->ptl); writable = false; - migrated = migrate_misplaced_folio(page_folio(page), vma, target_nid); + migrated = migrate_misplaced_folio(folio, vma, target_nid); if (migrated) { flags |= TNF_MIGRATED; - page_nid = target_nid; - task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags); + nid = target_nid; + task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags); return 0; } @@ -1743,8 +1741,8 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); spin_unlock(vmf->ptl); - if (page_nid != NUMA_NO_NODE) - task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags); + if (nid != NUMA_NO_NODE) + task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags); return 0; } -- Gitee From 0a54cb48cfb2f7d09cd28ec25b2d82d342da49bc Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Thu, 21 Sep 2023 15:44:14 +0800 Subject: [PATCH 1265/2138] mm: memory: use a folio in do_numa_page() ANBZ: #9728 commit 6695cf68b15c215d33b8add64c33e01e3cbe236c upstream Numa balancing only try to migrate non-compound page in do_numa_page(), use a folio in it to save several compound_head calls, note we use folio_estimated_sharers(), it is enough to check the folio sharers since only normal page is handled, if large folio numa balancing is supported, a precise folio sharers check would be used, no functional change intended. Link: https://lkml.kernel.org/r/20230921074417.24004-4-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Mike Kravetz Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 [Fixes conflicts] Signed-off-by: Qinyun Tan --- mm/memory.c | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 4b6751eee205..029855a624ae 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5275,8 +5275,8 @@ int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, static vm_fault_t do_numa_page(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; - struct page *page = NULL; - int page_nid = NUMA_NO_NODE; + struct folio *folio = NULL; + int nid = NUMA_NO_NODE; bool writable = false; int last_cpupid; int target_nid; @@ -5307,12 +5307,12 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) can_change_pte_writable(vma, vmf->address, pte)) writable = true; - page = vm_normal_page(vma, vmf->address, pte); - if (!page || is_zone_device_page(page)) + folio = vm_normal_folio(vma, vmf->address, pte); + if (!folio || folio_is_zone_device(folio)) goto out_map; /* TODO: handle PTE-mapped THP */ - if (PageCompound(page)) + if (folio_test_large(folio)) goto out_map; /* @@ -5327,36 +5327,36 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) flags |= TNF_NO_GROUP; /* - * Flag if the page is shared between multiple address spaces. This + * Flag if the folio is shared between multiple address spaces. This * is later used when determining whether to group tasks together */ - if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED)) + if (folio_estimated_sharers(folio) > 1 && (vma->vm_flags & VM_SHARED)) flags |= TNF_SHARED; - page_nid = page_to_nid(page); + nid = folio_nid(folio); /* * For memory tiering mode, cpupid of slow memory page is used * to record page access time. So use default value. */ if ((sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) && - !node_is_toptier(page_nid)) + !node_is_toptier(nid)) last_cpupid = (-1 & LAST_CPUPID_MASK); else - last_cpupid = page_cpupid_last(page); - target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid, - &flags); + last_cpupid = page_cpupid_last(&folio->page); + target_nid = numa_migrate_prep(&folio->page, vma, vmf->address, nid, + &flags); if (target_nid == NUMA_NO_NODE) { - put_page(page); + folio_put(folio); goto out_map; } pte_unmap_unlock(vmf->pte, vmf->ptl); writable = false; /* Migrate to the requested node */ - if (migrate_misplaced_folio(page_folio(page), vma, target_nid)) { - page_nid = target_nid; + if (migrate_misplaced_folio(folio, vma, target_nid)) { + nid = target_nid; flags |= TNF_MIGRATED; - task_numa_fault(last_cpupid, page_nid, 1, flags); + task_numa_fault(last_cpupid, nid, 1, flags); return 0; } @@ -5369,6 +5369,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; } + out_map: /* * Make it present again, depending on how arch implements @@ -5383,8 +5384,8 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); pte_unmap_unlock(vmf->pte, vmf->ptl); - if (page_nid != NUMA_NO_NODE) - task_numa_fault(last_cpupid, page_nid, 1, flags); + if (nid != NUMA_NO_NODE) + task_numa_fault(last_cpupid, nid, 1, flags); return 0; } -- Gitee From de97afcf0804b9481162ff05ac540eda0b54ce51 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Thu, 21 Sep 2023 15:44:15 +0800 Subject: [PATCH 1266/2138] mm: memory: make numa_migrate_prep() to take a folio ANBZ: #9728 commit cda6d93672ac5dd8af778a3f3e6082e12233b65b upstream In preparation for large folio numa balancing, make numa_migrate_prep() to take a folio, no functional change intended. Link: https://lkml.kernel.org/r/20230921074417.24004-5-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Mike Kravetz Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- mm/huge_memory.c | 2 +- mm/internal.h | 2 +- mm/memory.c | 9 ++++----- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 988b82d30a5a..d97525ffae28 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1708,7 +1708,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) */ if (node_is_toptier(nid)) last_cpupid = page_cpupid_last(&folio->page); - target_nid = numa_migrate_prep(&folio->page, vma, haddr, nid, &flags); + target_nid = numa_migrate_prep(folio, vma, haddr, nid, &flags); if (target_nid == NUMA_NO_NODE) { folio_put(folio); goto out_map; diff --git a/mm/internal.h b/mm/internal.h index 20736585cb98..ce29c2e837b4 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1057,7 +1057,7 @@ void vunmap_range_noflush(unsigned long start, unsigned long end); void __vunmap_range_noflush(unsigned long start, unsigned long end); -int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, +int numa_migrate_prep(struct folio *folio, struct vm_area_struct *vma, unsigned long addr, int page_nid, int *flags); void free_zone_device_page(struct page *page); diff --git a/mm/memory.c b/mm/memory.c index 029855a624ae..c166348c2282 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5255,10 +5255,10 @@ static vm_fault_t do_fault(struct vm_fault *vmf) return ret; } -int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, +int numa_migrate_prep(struct folio *folio, struct vm_area_struct *vma, unsigned long addr, int page_nid, int *flags) { - get_page(page); + folio_get(folio); /* Record the current PID acceesing VMA */ vma_set_access_pid_bit(vma); @@ -5269,7 +5269,7 @@ int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, *flags |= TNF_FAULT_LOCAL; } - return mpol_misplaced(page, vma, addr); + return mpol_misplaced(&folio->page, vma, addr); } static vm_fault_t do_numa_page(struct vm_fault *vmf) @@ -5343,8 +5343,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) last_cpupid = (-1 & LAST_CPUPID_MASK); else last_cpupid = page_cpupid_last(&folio->page); - target_nid = numa_migrate_prep(&folio->page, vma, vmf->address, nid, - &flags); + target_nid = numa_migrate_prep(folio, vma, vmf->address, nid, &flags); if (target_nid == NUMA_NO_NODE) { folio_put(folio); goto out_map; -- Gitee From a9a7da299962df49e758e774e0bc937cf3cf5e34 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Thu, 21 Sep 2023 15:44:16 +0800 Subject: [PATCH 1267/2138] mm: mempolicy: make mpol_misplaced() to take a folio ANBZ: #9728 commit 75c70128a67311070115b90d826a229d4bbbb2b5 upstream In preparation for large folio numa balancing, make mpol_misplaced() to take a folio, no functional change intended. Link: https://lkml.kernel.org/r/20230921074417.24004-6-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Mike Kravetz Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- include/linux/mempolicy.h | 5 +++-- mm/memory.c | 2 +- mm/mempolicy.c | 22 ++++++++++++---------- 3 files changed, 16 insertions(+), 13 deletions(-) diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 71b4a50fb589..4888f8ee09b3 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -178,7 +178,7 @@ extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol); /* Check if a vma is migratable */ extern bool vma_migratable(struct vm_area_struct *vma); -extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); +int mpol_misplaced(struct folio *, struct vm_area_struct *, unsigned long); extern void mpol_put_task_policy(struct task_struct *); static inline bool mpol_is_preferred_many(struct mempolicy *pol) @@ -282,7 +282,8 @@ static inline int mpol_parse_str(char *str, struct mempolicy **mpol) } #endif -static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma, +static inline int mpol_misplaced(struct folio *folio, + struct vm_area_struct *vma, unsigned long address) { return -1; /* no node preference */ diff --git a/mm/memory.c b/mm/memory.c index c166348c2282..a135ccb6e7a9 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5269,7 +5269,7 @@ int numa_migrate_prep(struct folio *folio, struct vm_area_struct *vma, *flags |= TNF_FAULT_LOCAL; } - return mpol_misplaced(&folio->page, vma, addr); + return mpol_misplaced(folio, vma, addr); } static vm_fault_t do_numa_page(struct vm_fault *vmf) diff --git a/mm/mempolicy.c b/mm/mempolicy.c index ef427cb86dc4..a2cea3f092bf 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2545,24 +2545,25 @@ static void sp_free(struct sp_node *n) } /** - * mpol_misplaced - check whether current page node is valid in policy + * mpol_misplaced - check whether current folio node is valid in policy * - * @page: page to be checked - * @vma: vm area where page mapped - * @addr: virtual address where page mapped + * @folio: folio to be checked + * @vma: vm area where folio mapped + * @addr: virtual address in @vma for shared policy lookup and interleave policy * - * Lookup current policy node id for vma,addr and "compare to" page's + * Lookup current policy node id for vma,addr and "compare to" folio's * node id. Policy determination "mimics" alloc_page_vma(). * Called from fault path where we know the vma and faulting address. * * Return: NUMA_NO_NODE if the page is in a node that is valid for this - * policy, or a suitable node ID to allocate a replacement page from. + * policy, or a suitable node ID to allocate a replacement folio from. */ -int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) +int mpol_misplaced(struct folio *folio, struct vm_area_struct *vma, + unsigned long addr) { struct mempolicy *pol; struct zoneref *z; - int curnid = page_to_nid(page); + int curnid = folio_nid(folio); unsigned long pgoff; int thiscpu = raw_smp_processor_id(); int thisnid = cpu_to_node(thiscpu); @@ -2618,11 +2619,12 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long BUG(); } - /* Migrate the page towards the node whose CPU is referencing it */ + /* Migrate the folio towards the node whose CPU is referencing it */ if (pol->flags & MPOL_F_MORON) { polnid = thisnid; - if (!should_numa_migrate_memory(current, page, curnid, thiscpu)) + if (!should_numa_migrate_memory(current, &folio->page, curnid, + thiscpu)) goto out; } -- Gitee From de80035fd1dd559b98eeb6a5df0327433b6fc779 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Thu, 21 Sep 2023 15:44:17 +0800 Subject: [PATCH 1268/2138] sched/numa, mm: make numa migrate functions to take a folio ANBZ: #9728 commit 8c9ae56dc73b5ae48a14000b96292bd4f2aeb710 upstream The cpupid (or access time) is stored in the head page for THP, so it is safely to make should_numa_migrate_memory() and numa_hint_fault_latency() to take a folio. This is in preparation for large folio numa balancing. Link: https://lkml.kernel.org/r/20230921074417.24004-7-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Mike Kravetz Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- include/linux/sched/numa_balancing.h | 6 +++--- kernel/sched/fair.c | 12 ++++++------ mm/mempolicy.c | 2 +- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/include/linux/sched/numa_balancing.h b/include/linux/sched/numa_balancing.h index b69afb8630db..52b22c5c396d 100644 --- a/include/linux/sched/numa_balancing.h +++ b/include/linux/sched/numa_balancing.h @@ -30,8 +30,8 @@ extern void task_numa_fault(int last_node, int node, int pages, int flags); extern pid_t task_numa_group_id(struct task_struct *p); extern void set_numabalancing_state(bool enabled); extern void task_numa_free(struct task_struct *p, bool final); -extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page, - int src_nid, int dst_cpu); +bool should_numa_migrate_memory(struct task_struct *p, struct folio *folio, + int src_nid, int dst_cpu); #else static inline void task_numa_fault(int last_node, int node, int pages, int flags) @@ -48,7 +48,7 @@ static inline void task_numa_free(struct task_struct *p, bool final) { } static inline bool should_numa_migrate_memory(struct task_struct *p, - struct page *page, int src_nid, int dst_cpu) + struct folio *folio, int src_nid, int dst_cpu) { return true; } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 06d5e8a39e75..861905a9813a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1870,12 +1870,12 @@ static bool pgdat_free_space_enough(struct pglist_data *pgdat) * The smaller the hint page fault latency, the higher the possibility * for the page to be hot. */ -static int numa_hint_fault_latency(struct page *page) +static int numa_hint_fault_latency(struct folio *folio) { int last_time, time; time = jiffies_to_msecs(jiffies); - last_time = xchg_page_access_time(page, time); + last_time = xchg_page_access_time(&folio->page, time); return (time - last_time) & PAGE_ACCESS_TIME_MASK; } @@ -1932,7 +1932,7 @@ static void numa_promotion_adjust_threshold(struct pglist_data *pgdat, } } -bool should_numa_migrate_memory(struct task_struct *p, struct page * page, +bool should_numa_migrate_memory(struct task_struct *p, struct folio *folio, int src_nid, int dst_cpu) { struct numa_group *ng = deref_curr_numa_group(p); @@ -1962,16 +1962,16 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page, numa_promotion_adjust_threshold(pgdat, rate_limit, def_th); th = pgdat->nbp_threshold ? : def_th; - latency = numa_hint_fault_latency(page); + latency = numa_hint_fault_latency(folio); if (latency >= th) return false; return !numa_promotion_rate_limit(pgdat, rate_limit, - thp_nr_pages(page)); + folio_nr_pages(folio)); } this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid); - last_cpupid = page_cpupid_xchg_last(page, this_cpupid); + last_cpupid = page_cpupid_xchg_last(&folio->page, this_cpupid); if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) && !node_is_toptier(src_nid) && !cpupid_valid(last_cpupid)) diff --git a/mm/mempolicy.c b/mm/mempolicy.c index a2cea3f092bf..4966f6644c8c 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2623,7 +2623,7 @@ int mpol_misplaced(struct folio *folio, struct vm_area_struct *vma, if (pol->flags & MPOL_F_MORON) { polnid = thisnid; - if (!should_numa_migrate_memory(current, &folio->page, curnid, + if (!should_numa_migrate_memory(current, folio, curnid, thiscpu)) goto out; } -- Gitee From 50a953f308610c2e0cd3e914eafbf3d8281d9678 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 18 Oct 2023 22:07:48 +0800 Subject: [PATCH 1269/2138] mm_types: add virtual and _last_cpupid into struct folio ANBZ: #9728 commit 1d44f2e6d178163a94980fd5f9a4b04b6b36535b upstream Patch series "mm: convert page cpupid functions to folios", v3. The cpupid(or access time) used by numa balancing is stored in flags or _last_cpupid(if LAST_CPUPID_NOT_IN_PAGE_FLAGS) of page, this is to convert page cpupid to folio cpupid, a new _last_cpupid is added into folio, which make us to use folio->_last_cpupid directly, and the page cpupid functions are converted to folio ones. page_cpupid_last() -> folio_last_cpupid() xchg_page_access_time() -> folio_xchg_access_time() page_cpupid_xchg_last() -> folio_xchg_last_cpupid() This patch (of 19): If WANT_PAGE_VIRTUAL and LAST_CPUPID_NOT_IN_PAGE_FLAGS defined, the 'virtual' and '_last_cpupid' are in struct page, and since _last_cpupid is used by numa balancing feature, it is better to move it before KMSAN metadata from struct page, also add them into struct folio to make us to access them from folio directly. Link: https://lkml.kernel.org/r/20231018140806.2783514-1-wangkefeng.wang@huawei.com Link: https://lkml.kernel.org/r/20231018140806.2783514-2-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: Huang Ying Cc: Ingo Molnar Cc: Juri Lelli Cc: Matthew Wilcox (Oracle) Cc: Peter Zijlstra Cc: Vincent Guittot Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- include/linux/mm_types.h | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 88e0b3f3ff8e..fc1c23a6fb17 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -199,6 +199,10 @@ struct page { not kmapped, ie. highmem) */ #endif /* WANT_PAGE_VIRTUAL */ +#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS + int _last_cpupid; +#endif + #ifdef CONFIG_KMSAN /* * KMSAN metadata for this page: @@ -210,10 +214,6 @@ struct page { struct page *kmsan_shadow; struct page *kmsan_origin; #endif - -#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS - int _last_cpupid; -#endif } _struct_page_alignment; /* @@ -297,6 +297,8 @@ typedef struct { * @_refcount: Do not access this member directly. Use folio_ref_count() * to find how many references there are to this folio. * @memcg_data: Memory Control Group data. + * @virtual: Virtual address in the kernel direct map. + * @_last_cpupid: IDs of last CPU and last process that accessed the folio. * @_entire_mapcount: Do not use directly, call folio_entire_mapcount(). * @_nr_pages_mapped: Do not use directly, call folio_mapcount(). * @_pincount: Do not use directly, call folio_maybe_dma_pinned(). @@ -342,6 +344,12 @@ struct folio { atomic_t _refcount; #ifdef CONFIG_MEMCG unsigned long memcg_data; +#endif +#if defined(WANT_PAGE_VIRTUAL) + void *virtual; +#endif +#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS + int _last_cpupid; #endif /* private: the union with struct page is transitional */ }; @@ -398,6 +406,12 @@ FOLIO_MATCH(_refcount, _refcount); #ifdef CONFIG_MEMCG FOLIO_MATCH(memcg_data, memcg_data); #endif +#if defined(WANT_PAGE_VIRTUAL) +FOLIO_MATCH(virtual, virtual); +#endif +#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS +FOLIO_MATCH(_last_cpupid, _last_cpupid); +#endif #undef FOLIO_MATCH #define FOLIO_MATCH(pg, fl) \ static_assert(offsetof(struct folio, fl) == \ -- Gitee From b2e32dcfabd516e3b9a08ac405877a3e6935cff2 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 18 Oct 2023 22:07:49 +0800 Subject: [PATCH 1270/2138] mm: add folio_last_cpupid() ANBZ: #9728 commit 155c98cfcf961327adedabd629edfc2301cf354b upstream Add folio_last_cpupid() wrapper, which is required to convert page_cpupid_last() to folio vertion later in the series. Link: https://lkml.kernel.org/r/20231018140806.2783514-3-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: Huang Ying Cc: Ingo Molnar Cc: Juri Lelli Cc: Matthew Wilcox (Oracle) Cc: Peter Zijlstra Cc: Vincent Guittot Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- include/linux/mm.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/include/linux/mm.h b/include/linux/mm.h index 8518369a60ab..84722bf7caf3 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1800,6 +1800,11 @@ static inline void vma_set_access_pid_bit(struct vm_area_struct *vma) } #endif /* CONFIG_NUMA_BALANCING */ +static inline int folio_last_cpupid(struct folio *folio) +{ + return page_cpupid_last(&folio->page); +} + #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) /* -- Gitee From be0ebb32574ac78736cba5571a451f0770857f81 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 18 Oct 2023 22:07:50 +0800 Subject: [PATCH 1271/2138] mm: memory: use folio_last_cpupid() in do_numa_page() ANBZ: #9728 commit 67b33e3ff58374b3fca929933ccc04a1858fda6a upstream Convert to use folio_last_cpupid() in do_numa_page(). Link: https://lkml.kernel.org/r/20231018140806.2783514-4-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: Huang Ying Cc: Ingo Molnar Cc: Juri Lelli Cc: Matthew Wilcox (Oracle) Cc: Peter Zijlstra Cc: Vincent Guittot Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- mm/memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/memory.c b/mm/memory.c index a135ccb6e7a9..beb2e5bfc542 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5342,7 +5342,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) !node_is_toptier(nid)) last_cpupid = (-1 & LAST_CPUPID_MASK); else - last_cpupid = page_cpupid_last(&folio->page); + last_cpupid = folio_last_cpupid(folio); target_nid = numa_migrate_prep(folio, vma, vmf->address, nid, &flags); if (target_nid == NUMA_NO_NODE) { folio_put(folio); -- Gitee From 324f45c2ae8b2a2912601138d375c033ca4d7879 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 18 Oct 2023 22:07:51 +0800 Subject: [PATCH 1272/2138] mm: huge_memory: use folio_last_cpupid() in do_huge_pmd_numa_page() ANBZ: #9728 commit c4a8d2faab1f9165df1543795254b1c2470ce7f8 upstream Convert to use folio_last_cpupid() in do_huge_pmd_numa_page(). Link: https://lkml.kernel.org/r/20231018140806.2783514-5-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: Huang Ying Cc: Ingo Molnar Cc: Juri Lelli Cc: Matthew Wilcox (Oracle) Cc: Peter Zijlstra Cc: Vincent Guittot Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- mm/huge_memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index d97525ffae28..2549f92ce4dd 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1707,7 +1707,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) * to record page access time. So use default value. */ if (node_is_toptier(nid)) - last_cpupid = page_cpupid_last(&folio->page); + last_cpupid = folio_last_cpupid(folio); target_nid = numa_migrate_prep(folio, vma, haddr, nid, &flags); if (target_nid == NUMA_NO_NODE) { folio_put(folio); -- Gitee From 5f6ed5c2e0b7848b08356f6dfc62a57df6eb83d4 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 18 Oct 2023 22:07:52 +0800 Subject: [PATCH 1273/2138] mm: huge_memory: use folio_last_cpupid() in __split_huge_page_tail() ANBZ: #9728 commit 19c1ac02ce02158fa22eb53f2750525ae93da9ef upstream Convert to use folio_last_cpupid() in __split_huge_page_tail(). Link: https://lkml.kernel.org/r/20231018140806.2783514-6-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: Huang Ying Cc: Ingo Molnar Cc: Juri Lelli Cc: Matthew Wilcox (Oracle) Cc: Peter Zijlstra Cc: Vincent Guittot Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- mm/huge_memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 2549f92ce4dd..f6329fd280a4 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2676,7 +2676,7 @@ static void __split_huge_page_tail(struct folio *folio, int tail, if (page_is_idle(head)) set_page_idle(page_tail); - page_cpupid_xchg_last(page_tail, page_cpupid_last(head)); + page_cpupid_xchg_last(page_tail, folio_last_cpupid(folio)); /* * always add to the tail because some iterators expect new -- Gitee From 458336a426bbd45e33718a9d6bf476eb1f68fa62 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 18 Oct 2023 22:07:53 +0800 Subject: [PATCH 1274/2138] mm: remove page_cpupid_last() ANBZ: #9728 commit f39eac30a8f334f0765ef78fe4d13b3fd5bfa3fd upstream Since all calls use folio_last_cpupid(), remove page_cpupid_last(). Link: https://lkml.kernel.org/r/20231018140806.2783514-7-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: Huang Ying Cc: Ingo Molnar Cc: Juri Lelli Cc: Matthew Wilcox (Oracle) Cc: Peter Zijlstra Cc: Vincent Guittot Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- include/linux/mm.h | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 84722bf7caf3..202eec990085 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1706,18 +1706,18 @@ static inline int page_cpupid_xchg_last(struct page *page, int cpupid) return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK); } -static inline int page_cpupid_last(struct page *page) +static inline int folio_last_cpupid(struct folio *folio) { - return page->_last_cpupid; + return folio->_last_cpupid; } static inline void page_cpupid_reset_last(struct page *page) { page->_last_cpupid = -1 & LAST_CPUPID_MASK; } #else -static inline int page_cpupid_last(struct page *page) +static inline int folio_last_cpupid(struct folio *folio) { - return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; + return (folio->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; } extern int page_cpupid_xchg_last(struct page *page, int cpupid); @@ -1756,9 +1756,9 @@ static inline int xchg_page_access_time(struct page *page, int time) return 0; } -static inline int page_cpupid_last(struct page *page) +static inline int folio_last_cpupid(struct folio *folio) { - return page_to_nid(page); /* XXX */ + return folio_nid(folio); /* XXX */ } static inline int cpupid_to_nid(int cpupid) @@ -1800,11 +1800,6 @@ static inline void vma_set_access_pid_bit(struct vm_area_struct *vma) } #endif /* CONFIG_NUMA_BALANCING */ -static inline int folio_last_cpupid(struct folio *folio) -{ - return page_cpupid_last(&folio->page); -} - #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) /* -- Gitee From bb3a6d6f5eb0f7b570c248677f75065a5c109aae Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 18 Oct 2023 22:07:54 +0800 Subject: [PATCH 1275/2138] mm: add folio_xchg_access_time() ANBZ: #9728 commit 55c199385c4465e9abe1a3d6d1aba348d0356e03 upstream Add folio_xchg_access_time() wrapper, which is required to convert xchg_page_access_time() to folio vertion later in the series. Link: https://lkml.kernel.org/r/20231018140806.2783514-8-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: Huang Ying Cc: Ingo Molnar Cc: Juri Lelli Cc: Matthew Wilcox (Oracle) Cc: Peter Zijlstra Cc: Vincent Guittot Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- include/linux/mm.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/include/linux/mm.h b/include/linux/mm.h index 202eec990085..c88ac6d839d9 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1800,6 +1800,11 @@ static inline void vma_set_access_pid_bit(struct vm_area_struct *vma) } #endif /* CONFIG_NUMA_BALANCING */ +static inline int folio_xchg_access_time(struct folio *folio, int time) +{ + return xchg_page_access_time(&folio->page, time); +} + #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) /* -- Gitee From a7a2a269eaeeb2529266309d93e1c834cc951013 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 18 Oct 2023 22:07:55 +0800 Subject: [PATCH 1276/2138] sched/fair: use folio_xchg_access_time() in numa_hint_fault_latency() ANBZ: #9728 commit 0b201c3624ae9f58ebfff8484f304f3008fb01b8 upstream Convert to use folio_xchg_access_time() in numa_hint_fault_latency(). Link: https://lkml.kernel.org/r/20231018140806.2783514-9-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: Huang Ying Cc: Ingo Molnar Cc: Juri Lelli Cc: Matthew Wilcox (Oracle) Cc: Peter Zijlstra Cc: Vincent Guittot Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- kernel/sched/fair.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 861905a9813a..7fa914fe6f57 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1875,7 +1875,7 @@ static int numa_hint_fault_latency(struct folio *folio) int last_time, time; time = jiffies_to_msecs(jiffies); - last_time = xchg_page_access_time(&folio->page, time); + last_time = folio_xchg_access_time(folio, time); return (time - last_time) & PAGE_ACCESS_TIME_MASK; } -- Gitee From 01b66cf4bcdcd281f1edfae4c66bf3eadcbe22ad Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 18 Oct 2023 22:07:56 +0800 Subject: [PATCH 1277/2138] mm: mprotect: use a folio in change_pte_range() ANBZ: #9728 commit ec1778807a8053d14cde7cfd75fbd66e0c7b9c9f upstream Use a folio in change_pte_range() to save three compound_head() calls. Since now only normal and PMD-mapped page is handled by numa balancing, it is enough to only update the entire folio's access time. Link: https://lkml.kernel.org/r/20231018140806.2783514-10-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: Huang Ying Cc: Ingo Molnar Cc: Juri Lelli Cc: Matthew Wilcox (Oracle) Cc: Peter Zijlstra Cc: Vincent Guittot Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- mm/mprotect.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/mm/mprotect.c b/mm/mprotect.c index 1fb086eb0e83..534ad4d0ecac 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -114,7 +114,7 @@ static long change_pte_range(struct mmu_gather *tlb, * pages. See similar comment in change_huge_pmd. */ if (prot_numa) { - struct page *page; + struct folio *folio; int nid; bool toptier; @@ -122,13 +122,14 @@ static long change_pte_range(struct mmu_gather *tlb, if (pte_protnone(oldpte)) continue; - page = vm_normal_page(vma, addr, oldpte); - if (!page || is_zone_device_page(page) || PageKsm(page)) + folio = vm_normal_folio(vma, addr, oldpte); + if (!folio || folio_is_zone_device(folio) || + folio_test_ksm(folio)) continue; /* Also skip shared copy-on-write pages */ if (is_cow_mapping(vma->vm_flags) && - page_count(page) != 1) + folio_ref_count(folio) != 1) continue; /* @@ -136,14 +137,15 @@ static long change_pte_range(struct mmu_gather *tlb, * it cannot move them all from MIGRATE_ASYNC * context. */ - if (page_is_file_lru(page) && PageDirty(page)) + if (folio_is_file_lru(folio) && + folio_test_dirty(folio)) continue; /* * Don't mess with PTEs if page is already on the node * a single-threaded process is running on. */ - nid = page_to_nid(page); + nid = folio_nid(folio); if (target_node == nid) continue; toptier = node_is_toptier(nid); @@ -157,7 +159,7 @@ static long change_pte_range(struct mmu_gather *tlb, continue; if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING && !toptier) - xchg_page_access_time(page, + folio_xchg_access_time(folio, jiffies_to_msecs(jiffies)); } -- Gitee From 97eb574e978c976c4aa6a3d4d37e9117fa6d1570 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 18 Oct 2023 22:07:57 +0800 Subject: [PATCH 1278/2138] mm: huge_memory: use a folio in change_huge_pmd() ANBZ: #9728 commit d986ba2b1953f761d3859c22160e82c58ed4287d upstream Use a folio in change_huge_pmd(), which helps to remove last xchg_page_access_time() caller. Link: https://lkml.kernel.org/r/20231018140806.2783514-11-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: Huang Ying Cc: Ingo Molnar Cc: Juri Lelli Cc: Matthew Wilcox (Oracle) Cc: Peter Zijlstra Cc: Vincent Guittot Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- mm/huge_memory.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index f6329fd280a4..36b4317b2346 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1998,7 +1998,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION if (is_swap_pmd(*pmd)) { swp_entry_t entry = pmd_to_swp_entry(*pmd); - struct page *page = pfn_swap_entry_to_page(entry); + struct folio *folio = page_folio(pfn_swap_entry_to_page(entry)); pmd_t newpmd; VM_BUG_ON(!is_pmd_migration_entry(*pmd)); @@ -2007,7 +2007,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, * A protection check is difficult so * just be safe and disable write */ - if (PageAnon(page)) + if (folio_test_anon(folio)) entry = make_readable_exclusive_migration_entry(swp_offset(entry)); else entry = make_readable_migration_entry(swp_offset(entry)); @@ -2029,7 +2029,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, #endif if (prot_numa) { - struct page *page; + struct folio *folio; bool toptier; /* * Avoid trapping faults against the zero page. The read-only @@ -2042,8 +2042,8 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, if (pmd_protnone(*pmd)) goto unlock; - page = pmd_page(*pmd); - toptier = node_is_toptier(page_to_nid(page)); + folio = page_folio(pmd_page(*pmd)); + toptier = node_is_toptier(folio_nid(folio)); /* * Skip scanning top tier node if normal numa * balancing is disabled @@ -2054,7 +2054,8 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING && !toptier) - xchg_page_access_time(page, jiffies_to_msecs(jiffies)); + folio_xchg_access_time(folio, + jiffies_to_msecs(jiffies)); } /* * In case prot_numa, we are under mmap_read_lock(mm). It's critical -- Gitee From 47bb8b6e69637a0ec5a80c8fa37c7a52c75d734a Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 18 Oct 2023 22:07:58 +0800 Subject: [PATCH 1279/2138] mm: remove xchg_page_access_time() ANBZ: #9728 commit f393084382fa3bbd5840b428d538dbcb33be0186 upstream Since all calls use folio_xchg_access_time(), remove xchg_page_access_time(). Link: https://lkml.kernel.org/r/20231018140806.2783514-12-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: Huang Ying Cc: Ingo Molnar Cc: Juri Lelli Cc: Matthew Wilcox (Oracle) Cc: Peter Zijlstra Cc: Vincent Guittot Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- include/linux/mm.h | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index c88ac6d839d9..f5abf79f9bba 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1728,11 +1728,12 @@ static inline void page_cpupid_reset_last(struct page *page) } #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */ -static inline int xchg_page_access_time(struct page *page, int time) +static inline int folio_xchg_access_time(struct folio *folio, int time) { int last_time; - last_time = page_cpupid_xchg_last(page, time >> PAGE_ACCESS_TIME_BUCKETS); + last_time = page_cpupid_xchg_last(&folio->page, + time >> PAGE_ACCESS_TIME_BUCKETS); return last_time << PAGE_ACCESS_TIME_BUCKETS; } @@ -1751,7 +1752,7 @@ static inline int page_cpupid_xchg_last(struct page *page, int cpupid) return page_to_nid(page); /* XXX */ } -static inline int xchg_page_access_time(struct page *page, int time) +static inline int folio_xchg_access_time(struct folio *folio, int time) { return 0; } @@ -1800,11 +1801,6 @@ static inline void vma_set_access_pid_bit(struct vm_area_struct *vma) } #endif /* CONFIG_NUMA_BALANCING */ -static inline int folio_xchg_access_time(struct folio *folio, int time) -{ - return xchg_page_access_time(&folio->page, time); -} - #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) /* -- Gitee From d2784375a560855fee12a9f30023d7f62507a704 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 18 Oct 2023 22:07:59 +0800 Subject: [PATCH 1280/2138] mm: add folio_xchg_last_cpupid() ANBZ: #9728 commit 136d0b47576f8701d68c2d504e7237d9fdc4ebbd upstream Add folio_xchg_last_cpupid() wrapper, which is required to convert page_cpupid_xchg_last() to folio vertion later in the series. Link: https://lkml.kernel.org/r/20231018140806.2783514-13-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: Huang Ying Cc: Ingo Molnar Cc: Juri Lelli Cc: Matthew Wilcox (Oracle) Cc: Peter Zijlstra Cc: Vincent Guittot Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- include/linux/mm.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/include/linux/mm.h b/include/linux/mm.h index f5abf79f9bba..a15985c8a3f6 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1801,6 +1801,11 @@ static inline void vma_set_access_pid_bit(struct vm_area_struct *vma) } #endif /* CONFIG_NUMA_BALANCING */ +static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid) +{ + return page_cpupid_xchg_last(&folio->page, cpupid); +} + #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) /* -- Gitee From 7bfa9427083a727ce2c4a3314324dd868331eda6 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 18 Oct 2023 22:08:00 +0800 Subject: [PATCH 1281/2138] sched/fair: use folio_xchg_last_cpupid() in should_numa_migrate_memory() ANBZ: #9728 commit 1b143cc77f2074dd43b610d6bfffc822d20b6e16 upstream Convert to use folio_xchg_last_cpupid() in should_numa_migrate_memory(). Link: https://lkml.kernel.org/r/20231018140806.2783514-14-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: Huang Ying Cc: Ingo Molnar Cc: Juri Lelli Cc: Matthew Wilcox (Oracle) Cc: Peter Zijlstra Cc: Vincent Guittot Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- kernel/sched/fair.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7fa914fe6f57..98abc91a9a56 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1971,7 +1971,7 @@ bool should_numa_migrate_memory(struct task_struct *p, struct folio *folio, } this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid); - last_cpupid = page_cpupid_xchg_last(&folio->page, this_cpupid); + last_cpupid = folio_xchg_last_cpupid(folio, this_cpupid); if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) && !node_is_toptier(src_nid) && !cpupid_valid(last_cpupid)) -- Gitee From 17b38bbe4739a63fb21980b58c862a0d4a2bbc17 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 18 Oct 2023 22:08:01 +0800 Subject: [PATCH 1282/2138] mm: migrate: use folio_xchg_last_cpupid() in folio_migrate_flags() ANBZ: #9728 commit 4e694fe4d2fa3031392bdbeaa88066f67c886a0c upstream Convert to use folio_xchg_last_cpupid() in folio_migrate_flags(), also directly use folio_nid() instead of page_to_nid(&folio->page). Link: https://lkml.kernel.org/r/20231018140806.2783514-15-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: Huang Ying Cc: Ingo Molnar Cc: Juri Lelli Cc: Matthew Wilcox (Oracle) Cc: Peter Zijlstra Cc: Vincent Guittot Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- mm/migrate.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mm/migrate.c b/mm/migrate.c index 6795c1a4637a..356b1b3fd955 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -595,20 +595,20 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio) * Copy NUMA information to the new page, to prevent over-eager * future migrations of this same page. */ - cpupid = page_cpupid_xchg_last(&folio->page, -1); + cpupid = folio_xchg_last_cpupid(folio, -1); /* * For memory tiering mode, when migrate between slow and fast * memory node, reset cpupid, because that is used to record * page access time in slow memory node. */ if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) { - bool f_toptier = node_is_toptier(page_to_nid(&folio->page)); - bool t_toptier = node_is_toptier(page_to_nid(&newfolio->page)); + bool f_toptier = node_is_toptier(folio_nid(folio)); + bool t_toptier = node_is_toptier(folio_nid(newfolio)); if (f_toptier != t_toptier) cpupid = -1; } - page_cpupid_xchg_last(&newfolio->page, cpupid); + folio_xchg_last_cpupid(newfolio, cpupid); folio_migrate_ksm(newfolio, folio); /* -- Gitee From 33d87729319bdfa76a0127f3abcc74472b8d0a7b Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 18 Oct 2023 22:08:02 +0800 Subject: [PATCH 1283/2138] mm: huge_memory: use folio_xchg_last_cpupid() in __split_huge_page_tail() ANBZ: #9728 commit c82530113480f8db9dd9584c51ec9326e6ce9790 upstream Convert to use folio_xchg_last_cpupid() in __split_huge_page_tail(). Link: https://lkml.kernel.org/r/20231018140806.2783514-16-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: Huang Ying Cc: Ingo Molnar Cc: Juri Lelli Cc: Matthew Wilcox (Oracle) Cc: Peter Zijlstra Cc: Vincent Guittot Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- mm/huge_memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 36b4317b2346..41bc1a86afb5 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2677,7 +2677,7 @@ static void __split_huge_page_tail(struct folio *folio, int tail, if (page_is_idle(head)) set_page_idle(page_tail); - page_cpupid_xchg_last(page_tail, folio_last_cpupid(folio)); + folio_xchg_last_cpupid(new_folio, folio_last_cpupid(folio)); /* * always add to the tail because some iterators expect new -- Gitee From 1a7e376d631afa4963651a52ed76bd47e1d57324 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 18 Oct 2023 22:08:03 +0800 Subject: [PATCH 1284/2138] mm: make finish_mkwrite_fault() static ANBZ: #9728 commit c08b7e3830dbf24dd2552ddeea84f00393842f1b upstream Make finish_mkwrite_fault static since it is not used outside of memory.c. Link: https://lkml.kernel.org/r/20231018140806.2783514-17-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: Huang Ying Cc: Ingo Molnar Cc: Juri Lelli Cc: Matthew Wilcox (Oracle) Cc: Peter Zijlstra Cc: Vincent Guittot Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- include/linux/mm.h | 1 - mm/memory.c | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index a15985c8a3f6..ea68644c4341 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1352,7 +1352,6 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio, struct page *page, unsigned int nr, unsigned long addr); vm_fault_t finish_fault(struct vm_fault *vmf); -vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); #endif /* diff --git a/mm/memory.c b/mm/memory.c index beb2e5bfc542..8b1a6399ce8e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3637,7 +3637,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before * we acquired PTE lock. */ -vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf) +static vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf) { WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, -- Gitee From cfd746be8de8842fd5d7ce95ba0bbd002bfc7470 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 18 Oct 2023 22:08:04 +0800 Subject: [PATCH 1285/2138] mm: convert wp_page_reuse() and finish_mkwrite_fault() to take a folio ANBZ: #9728 commit a86bc96b77df40c27ead5ef4ac3837904b7eb53f upstream Saves one compound_head() call, also in preparation for page_cpupid_xchg_last() conversion. Link: https://lkml.kernel.org/r/20231018140806.2783514-18-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: Huang Ying Cc: Ingo Molnar Cc: Juri Lelli Cc: Matthew Wilcox (Oracle) Cc: Peter Zijlstra Cc: Vincent Guittot Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- mm/memory.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 8b1a6399ce8e..226c15525629 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3426,7 +3426,7 @@ static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf) * case, all we need to do here is to mark the page as writable and update * any related book-keeping. */ -static inline void wp_page_reuse(struct vm_fault *vmf) +static inline void wp_page_reuse(struct vm_fault *vmf, struct folio *folio) __releases(vmf->ptl) { struct vm_area_struct *vma = vmf->vma; @@ -3434,7 +3434,7 @@ static inline void wp_page_reuse(struct vm_fault *vmf) pte_t entry; VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE)); - VM_BUG_ON(page && PageAnon(page) && !PageAnonExclusive(page)); + VM_BUG_ON(folio && folio_test_anon(folio) && !PageAnonExclusive(page)); /* * Clear the pages cpupid information as the existing @@ -3626,6 +3626,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) * writeable once the page is prepared * * @vmf: structure describing the fault + * @folio: the folio of vmf->page * * This function handles all that is needed to finish a write page fault in a * shared mapping due to PTE being read-only once the mapped page is prepared. @@ -3637,7 +3638,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) * Return: %0 on success, %VM_FAULT_NOPAGE when PTE got changed before * we acquired PTE lock. */ -static vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf) +static vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf, struct folio *folio) { WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, @@ -3653,7 +3654,7 @@ static vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf) pte_unmap_unlock(vmf->pte, vmf->ptl); return VM_FAULT_NOPAGE; } - wp_page_reuse(vmf); + wp_page_reuse(vmf, folio); return 0; } @@ -3678,9 +3679,9 @@ static vm_fault_t wp_pfn_shared(struct vm_fault *vmf) ret = vma->vm_ops->pfn_mkwrite(vmf); if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)) return ret; - return finish_mkwrite_fault(vmf); + return finish_mkwrite_fault(vmf, NULL); } - wp_page_reuse(vmf); + wp_page_reuse(vmf, NULL); return 0; } @@ -3708,14 +3709,14 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio) folio_put(folio); return tmp; } - tmp = finish_mkwrite_fault(vmf); + tmp = finish_mkwrite_fault(vmf, folio); if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) { folio_unlock(folio); folio_put(folio); return tmp; } } else { - wp_page_reuse(vmf); + wp_page_reuse(vmf, folio); folio_lock(folio); } ret |= fault_dirty_shared_page(vmf); @@ -3843,7 +3844,7 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; } - wp_page_reuse(vmf); + wp_page_reuse(vmf, folio); return 0; } -- Gitee From d9b94815b1a8f59e619edcdc7a37d1a004633ee6 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 18 Oct 2023 22:08:05 +0800 Subject: [PATCH 1286/2138] mm: use folio_xchg_last_cpupid() in wp_page_reuse() ANBZ: #9728 commit c2c3b5148052cef670d359b81d338d20b96bf47f upstream Convert to use folio_xchg_last_cpupid() in wp_page_reuse(), and remove page variable. Since now only normal and PMD-mapped page is handled by numa balancing, it's enough to only update the entire folio's last cpupid. Link: https://lkml.kernel.org/r/20231018140806.2783514-19-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: Huang Ying Cc: Ingo Molnar Cc: Juri Lelli Cc: Matthew Wilcox (Oracle) Cc: Peter Zijlstra Cc: Vincent Guittot Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- mm/memory.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 226c15525629..3c18afc8741a 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3430,19 +3430,20 @@ static inline void wp_page_reuse(struct vm_fault *vmf, struct folio *folio) __releases(vmf->ptl) { struct vm_area_struct *vma = vmf->vma; - struct page *page = vmf->page; pte_t entry; VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE)); - VM_BUG_ON(folio && folio_test_anon(folio) && !PageAnonExclusive(page)); - /* - * Clear the pages cpupid information as the existing - * information potentially belongs to a now completely - * unrelated process. - */ - if (page) - page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1); + if (folio) { + VM_BUG_ON(folio_test_anon(folio) && + !PageAnonExclusive(vmf->page)); + /* + * Clear the folio's cpupid information as the existing + * information potentially belongs to a now completely + * unrelated process. + */ + folio_xchg_last_cpupid(folio, (1 << LAST_CPUPID_SHIFT) - 1); + } flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); entry = pte_mkyoung(vmf->orig_pte); -- Gitee From 39ad3b69a222bb87fa0df1a299ddced22caf1c5d Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 18 Oct 2023 22:08:06 +0800 Subject: [PATCH 1287/2138] mm: remove page_cpupid_xchg_last() ANBZ: #9728 commit 8f0f4788b1247c2f92ecacd8f86ce0b379b807b9 upstream Since all calls use folio_xchg_last_cpupid(), remove page_cpupid_xchg_last(). Link: https://lkml.kernel.org/r/20231018140806.2783514-20-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: Huang Ying Cc: Ingo Molnar Cc: Juri Lelli Cc: Matthew Wilcox (Oracle) Cc: Peter Zijlstra Cc: Vincent Guittot Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- include/linux/mm.h | 19 +++++++------------ mm/mmzone.c | 6 +++--- 2 files changed, 10 insertions(+), 15 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index ea68644c4341..acb5f7808f58 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1700,9 +1700,9 @@ static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid) #define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid) #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS -static inline int page_cpupid_xchg_last(struct page *page, int cpupid) +static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid) { - return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK); + return xchg(&folio->_last_cpupid, cpupid & LAST_CPUPID_MASK); } static inline int folio_last_cpupid(struct folio *folio) @@ -1719,7 +1719,7 @@ static inline int folio_last_cpupid(struct folio *folio) return (folio->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; } -extern int page_cpupid_xchg_last(struct page *page, int cpupid); +int folio_xchg_last_cpupid(struct folio *folio, int cpupid); static inline void page_cpupid_reset_last(struct page *page) { @@ -1731,8 +1731,8 @@ static inline int folio_xchg_access_time(struct folio *folio, int time) { int last_time; - last_time = page_cpupid_xchg_last(&folio->page, - time >> PAGE_ACCESS_TIME_BUCKETS); + last_time = folio_xchg_last_cpupid(folio, + time >> PAGE_ACCESS_TIME_BUCKETS); return last_time << PAGE_ACCESS_TIME_BUCKETS; } @@ -1746,9 +1746,9 @@ static inline void vma_set_access_pid_bit(struct vm_area_struct *vma) } } #else /* !CONFIG_NUMA_BALANCING */ -static inline int page_cpupid_xchg_last(struct page *page, int cpupid) +static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid) { - return page_to_nid(page); /* XXX */ + return folio_nid(folio); /* XXX */ } static inline int folio_xchg_access_time(struct folio *folio, int time) @@ -1800,11 +1800,6 @@ static inline void vma_set_access_pid_bit(struct vm_area_struct *vma) } #endif /* CONFIG_NUMA_BALANCING */ -static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid) -{ - return page_cpupid_xchg_last(&folio->page, cpupid); -} - #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) /* diff --git a/mm/mmzone.c b/mm/mmzone.c index 68e1511be12d..b594d3f268fe 100644 --- a/mm/mmzone.c +++ b/mm/mmzone.c @@ -93,19 +93,19 @@ void lruvec_init(struct lruvec *lruvec) } #if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) -int page_cpupid_xchg_last(struct page *page, int cpupid) +int folio_xchg_last_cpupid(struct folio *folio, int cpupid) { unsigned long old_flags, flags; int last_cpupid; - old_flags = READ_ONCE(page->flags); + old_flags = READ_ONCE(folio->flags); do { flags = old_flags; last_cpupid = (flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK; flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT); flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT; - } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags))); + } while (unlikely(!try_cmpxchg(&folio->flags, &old_flags, flags))); return last_cpupid; } -- Gitee From 6f1a3b4987408b4b20af91dd524374070f98aa76 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 27 Feb 2024 21:15:48 +0100 Subject: [PATCH 1288/2138] mm: convert folio_estimated_sharers() to folio_likely_mapped_shared() ANBZ: #9728 commit ebb34f78d72c2320620ba6d55cb22a52949047a1 upstream Callers of folio_estimated_sharers() only care about "mapped shared vs. mapped exclusively", not the exact estimate of sharers. Let's consolidate and unify the condition users are checking. While at it clarify the semantics and extend the discussion on the fuzziness. Use the "likely mapped shared" terminology to better express what the (adjusted) function actually checks. Whether a partially-mappable folio is more likely to not be partially mapped than partially mapped is debatable. In the future, we might be able to improve our estimate for partially-mappable folios, though. Note that we will now consistently detect "mapped shared" only if the first subpage is actually mapped multiple times. When the first subpage is not mapped, we will consistently detect it as "mapped exclusively". This change should currently only affect the usage in madvise_free_pte_range() and queue_folios_pte_range() for large folios: if the first page was already unmapped, we would have skipped the folio. [david@redhat.com: folio_likely_mapped_shared() kerneldoc fixup] Link: https://lkml.kernel.org/r/dd0ad9f2-2d7a-45f3-9ba3-979488c7dd27@redhat.com Link: https://lkml.kernel.org/r/20240227201548.857831-1-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Khalid Aziz Acked-by: Barry Song Reviewed-by: Vishal Moola (Oracle) Reviewed-by: Ryan Roberts Reviewed-by: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- include/linux/mm.h | 48 ++++++++++++++++++++++++++++++++++++---------- mm/huge_memory.c | 2 +- mm/madvise.c | 6 +++--- mm/memory.c | 2 +- mm/mempolicy.c | 9 ++++----- mm/migrate.c | 8 ++++---- 6 files changed, 51 insertions(+), 24 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index acb5f7808f58..44b8711bdc20 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2137,21 +2137,49 @@ static inline size_t folio_size(struct folio *folio) } /** - * folio_estimated_sharers - Estimate the number of sharers of a folio. + * folio_likely_mapped_shared - Estimate if the folio is mapped into the page + * tables of more than one MM * @folio: The folio. * - * folio_estimated_sharers() aims to serve as a function to efficiently - * estimate the number of processes sharing a folio. This is done by - * looking at the precise mapcount of the first subpage in the folio, and - * assuming the other subpages are the same. This may not be true for large - * folios. If you want exact mapcounts for exact calculations, look at - * page_mapcount() or folio_total_mapcount(). + * This function checks if the folio is currently mapped into more than one + * MM ("mapped shared"), or if the folio is only mapped into a single MM + * ("mapped exclusively"). * - * Return: The estimated number of processes sharing a folio. + * As precise information is not easily available for all folios, this function + * estimates the number of MMs ("sharers") that are currently mapping a folio + * using the number of times the first page of the folio is currently mapped + * into page tables. + * + * For small anonymous folios (except KSM folios) and anonymous hugetlb folios, + * the return value will be exactly correct, because they can only be mapped + * at most once into an MM, and they cannot be partially mapped. + * + * For other folios, the result can be fuzzy: + * #. For partially-mappable large folios (THP), the return value can wrongly + * indicate "mapped exclusively" (false negative) when the folio is + * only partially mapped into at least one MM. + * #. For pagecache folios (including hugetlb), the return value can wrongly + * indicate "mapped shared" (false positive) when two VMAs in the same MM + * cover the same file range. + * #. For (small) KSM folios, the return value can wrongly indicate "mapped + * shared" (false negative), when the folio is mapped multiple times into + * the same MM. + * + * Further, this function only considers current page table mappings that + * are tracked using the folio mapcount(s). + * + * This function does not consider: + * #. If the folio might get mapped in the (near) future (e.g., swapcache, + * pagecache, temporary unmapping for migration). + * #. If the folio is mapped differently (VM_PFNMAP). + * #. If hugetlb page table sharing applies. Callers might want to check + * hugetlb_pmd_shared(). + * + * Return: Whether the folio is estimated to be mapped into more than one MM. */ -static inline int folio_estimated_sharers(struct folio *folio) +static inline bool folio_likely_mapped_shared(struct folio *folio) { - return page_mapcount(folio_page(folio, 0)); + return page_mapcount(folio_page(folio, 0)) > 1; } #ifndef HAVE_ARCH_MAKE_PAGE_ACCESSIBLE diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 41bc1a86afb5..f22a139d53c6 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1780,7 +1780,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, * If other processes are mapping this folio, we couldn't discard * the folio unless they all do MADV_FREE so let's skip the folio. */ - if (folio_estimated_sharers(folio) != 1) + if (folio_likely_mapped_shared(folio)) goto out; if (!folio_trylock(folio)) diff --git a/mm/madvise.c b/mm/madvise.c index 8969f30279cb..68b727918ade 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -383,7 +383,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, folio = pfn_folio(pmd_pfn(orig_pmd)); /* Do not interfere with other mappings of this folio */ - if (folio_estimated_sharers(folio) != 1) + if (folio_likely_mapped_shared(folio)) goto huge_unlock; if (pageout_anon_only_filter && !folio_test_anon(folio)) @@ -459,7 +459,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, if (folio_test_large(folio)) { int err; - if (folio_estimated_sharers(folio) != 1) + if (folio_likely_mapped_shared(folio)) break; if (pageout_anon_only_filter && !folio_test_anon(folio)) break; @@ -683,7 +683,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, if (folio_test_large(folio)) { int err; - if (folio_estimated_sharers(folio) != 1) + if (folio_likely_mapped_shared(folio)) break; if (!folio_trylock(folio)) break; diff --git a/mm/memory.c b/mm/memory.c index 3c18afc8741a..636c3bdbff46 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5332,7 +5332,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) * Flag if the folio is shared between multiple address spaces. This * is later used when determining whether to group tasks together */ - if (folio_estimated_sharers(folio) > 1 && (vma->vm_flags & VM_SHARED)) + if (folio_likely_mapped_shared(folio) && (vma->vm_flags & VM_SHARED)) flags |= TNF_SHARED; nid = folio_nid(folio); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 4966f6644c8c..39d703a41a5c 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -607,7 +607,7 @@ static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask, * expensive, so check the estimated sharers of the folio instead. */ if ((flags & MPOL_MF_MOVE_ALL) || - (folio_estimated_sharers(folio) == 1 && !hugetlb_pmd_shared(pte))) + (!folio_likely_mapped_shared(folio) && !hugetlb_pmd_shared(pte))) if (!isolate_hugetlb(folio, qp->pagelist)) qp->nr_failed++; unlock: @@ -1028,11 +1028,10 @@ static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist, * Unless MPOL_MF_MOVE_ALL, we try to avoid migrating a shared folio. * Choosing not to migrate a shared folio is not counted as a failure. * - * To check if the folio is shared, ideally we want to make sure - * every page is mapped to the same process. Doing that is very - * expensive, so check the estimated sharers of the folio instead. + * See folio_likely_mapped_shared() on possible imprecision when we + * cannot easily detect if a folio is shared. */ - if ((flags & MPOL_MF_MOVE_ALL) || folio_estimated_sharers(folio) == 1) { + if ((flags & MPOL_MF_MOVE_ALL) || !folio_likely_mapped_shared(folio)) { if (folio_isolate_lru(folio)) { list_add_tail(&folio->lru, foliolist); node_stat_mod_folio(folio, diff --git a/mm/migrate.c b/mm/migrate.c index 356b1b3fd955..6fa2da281aed 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2554,11 +2554,11 @@ int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, /* * Don't migrate file folios that are mapped in multiple processes * with execute permissions as they are probably shared libraries. - * To check if the folio is shared, ideally we want to make sure - * every page is mapped to the same process. Doing that is very - * expensive, so check the estimated mapcount of the folio instead. + * + * See folio_likely_mapped_shared() on possible imprecision when we + * cannot easily detect if a folio is shared. */ - if (folio_estimated_sharers(folio) != 1 && folio_is_file_lru(folio) && + if (folio_likely_mapped_shared(folio) && folio_is_file_lru(folio) && (vma->vm_flags & VM_EXEC)) goto out; -- Gitee From 886ec14a27b018ef41499db7e128598dd4a689ac Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Fri, 29 Mar 2024 14:56:45 +0800 Subject: [PATCH 1289/2138] mm: factor out the numa mapping rebuilding into a new helper ANBZ: #9728 commit 6b0ed7b3c77547d2308983a26db11a0d14a60ace upstream Patch series "support multi-size THP numa balancing", v2. This patchset tries to support mTHP numa balancing, as a simple solution to start, the NUMA balancing algorithm for mTHP will follow the THP strategy as the basic support. Please find details in each patch. This patch (of 2): To support large folio's numa balancing, factor out the numa mapping rebuilding into a new helper as a preparation. Link: https://lkml.kernel.org/r/cover.1712132950.git.baolin.wang@linux.alibaba.com Link: https://lkml.kernel.org/r/cover.1711683069.git.baolin.wang@linux.alibaba.com Link: https://lkml.kernel.org/r/8bc2586bdd8dbbe6d83c09b77b360ec8fcac3736.1711683069.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Reviewed-by: "Huang, Ying" Cc: David Hildenbrand Cc: John Hubbard Cc: Kefeng Wang Cc: Mel Gorman Cc: Ryan Roberts Signed-off-by: Andrew Morton Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- mm/memory.c | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 636c3bdbff46..53fbb983f213 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5274,6 +5274,20 @@ int numa_migrate_prep(struct folio *folio, struct vm_area_struct *vma, return mpol_misplaced(folio, vma, addr); } +static void numa_rebuild_single_mapping(struct vm_fault *vmf, struct vm_area_struct *vma, + bool writable) +{ + pte_t pte, old_pte; + + old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte); + pte = pte_modify(old_pte, vma->vm_page_prot); + pte = pte_mkyoung(pte); + if (writable) + pte = pte_mkwrite(pte, vma); + ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte); + update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); +} + static vm_fault_t do_numa_page(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; @@ -5376,13 +5390,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) * Make it present again, depending on how arch implements * non-accessible ptes, some can allow access by kernel mode. */ - old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte); - pte = pte_modify(old_pte, vma->vm_page_prot); - pte = pte_mkyoung(pte); - if (writable) - pte = pte_mkwrite(pte, vma); - ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte); - update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); + numa_rebuild_single_mapping(vmf, vma, writable); pte_unmap_unlock(vmf->pte, vmf->ptl); if (nid != NUMA_NO_NODE) -- Gitee From c531ab64c7e3da1a5927239d9971395b0e260868 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Fri, 29 Mar 2024 14:56:46 +0800 Subject: [PATCH 1290/2138] mm: support multi-size THP numa balancing ANBZ: #9728 commit d2136d749d76af980b3accd72704eea4eab625bd upstream Now the anonymous page allocation already supports multi-size THP (mTHP), but the numa balancing still prohibits mTHP migration even though it is an exclusive mapping, which is unreasonable. Allow scanning mTHP: Commit 859d4adc3415 ("mm: numa: do not trap faults on shared data section pages") skips shared CoW pages' NUMA page migration to avoid shared data segment migration. In addition, commit 80d47f5de5e3 ("mm: don't try to NUMA-migrate COW pages that have other uses") change to use page_count() to avoid GUP pages migration, that will also skip the mTHP numa scanning. Theoretically, we can use folio_maybe_dma_pinned() to detect the GUP issue, although there is still a GUP race, the issue seems to have been resolved by commit 80d47f5de5e3. Meanwhile, use the folio_likely_mapped_shared() to skip shared CoW pages though this is not a precise sharers count. To check if the folio is shared, ideally we want to make sure every page is mapped to the same process, but doing that seems expensive and using the estimated mapcount seems can work when running autonuma benchmark. Allow migrating mTHP: As mentioned in the previous thread[1], large folios (including THP) are more susceptible to false sharing issues among threads than 4K base page, leading to pages ping-pong back and forth during numa balancing, which is currently not easy to resolve. Therefore, as a start to support mTHP numa balancing, we can follow the PMD mapped THP's strategy, that means we can reuse the 2-stage filter in should_numa_migrate_memory() to check if the mTHP is being heavily contended among threads (through checking the CPU id and pid of the last access) to avoid false sharing at some degree. Thus, we can restore all PTE maps upon the first hint page fault of a large folio to follow the PMD mapped THP's strategy. In the future, we can continue to optimize the NUMA balancing algorithm to avoid the false sharing issue with large folios as much as possible. Performance data: Machine environment: 2 nodes, 128 cores Intel(R) Xeon(R) Platinum Base: 2024-03-25 mm-unstable branch Enable mTHP to run autonuma-benchmark mTHP:16K Base Patched numa01 numa01 224.70 143.48 numa01_THREAD_ALLOC numa01_THREAD_ALLOC 118.05 47.43 numa02 numa02 13.45 9.29 numa02_SMT numa02_SMT 14.80 7.50 mTHP:64K Base Patched numa01 numa01 216.15 114.40 numa01_THREAD_ALLOC numa01_THREAD_ALLOC 115.35 47.41 numa02 numa02 13.24 9.25 numa02_SMT numa02_SMT 14.67 7.34 mTHP:128K Base Patched numa01 numa01 205.13 144.45 numa01_THREAD_ALLOC numa01_THREAD_ALLOC 112.93 41.88 numa02 numa02 13.16 9.18 numa02_SMT numa02_SMT 14.81 7.49 [1] https://lore.kernel.org/all/20231117100745.fnpijbk4xgmals3k@techsingularity.net/ [baolin.wang@linux.alibaba.com: v3] Link: https://lkml.kernel.org/r/c33a5c0b0a0323b1f8ed53772f50501f4b196e25.1712132950.git.baolin.wang@linux.alibaba.com Link: https://lkml.kernel.org/r/d28d276d599c26df7f38c9de8446f60e22dd1950.1711683069.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Reviewed-by: "Huang, Ying" Cc: David Hildenbrand Cc: John Hubbard Cc: Kefeng Wang Cc: Mel Gorman Cc: Ryan Roberts Signed-off-by: Andrew Morton Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- mm/memory.c | 64 ++++++++++++++++++++++++++++++++++++++++----------- mm/mprotect.c | 3 ++- 2 files changed, 53 insertions(+), 14 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 53fbb983f213..ba1daf2cabe2 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5275,17 +5275,51 @@ int numa_migrate_prep(struct folio *folio, struct vm_area_struct *vma, } static void numa_rebuild_single_mapping(struct vm_fault *vmf, struct vm_area_struct *vma, + unsigned long fault_addr, pte_t *fault_pte, bool writable) { pte_t pte, old_pte; - old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte); + old_pte = ptep_modify_prot_start(vma, fault_addr, fault_pte); pte = pte_modify(old_pte, vma->vm_page_prot); pte = pte_mkyoung(pte); if (writable) pte = pte_mkwrite(pte, vma); - ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte); - update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); + ptep_modify_prot_commit(vma, fault_addr, fault_pte, old_pte, pte); + update_mmu_cache_range(vmf, vma, fault_addr, fault_pte, 1); +} + +static void numa_rebuild_large_mapping(struct vm_fault *vmf, struct vm_area_struct *vma, + struct folio *folio, pte_t fault_pte, + bool ignore_writable, bool pte_write_upgrade) +{ + int nr = pte_pfn(fault_pte) - folio_pfn(folio); + unsigned long start = max(vmf->address - nr * PAGE_SIZE, vma->vm_start); + unsigned long end = min(vmf->address + (folio_nr_pages(folio) - nr) * PAGE_SIZE, vma->vm_end); + pte_t *start_ptep = vmf->pte - (vmf->address - start) / PAGE_SIZE; + unsigned long addr; + + /* Restore all PTEs' mapping of the large folio */ + for (addr = start; addr != end; start_ptep++, addr += PAGE_SIZE) { + pte_t ptent = ptep_get(start_ptep); + bool writable = false; + + if (!pte_present(ptent) || !pte_protnone(ptent)) + continue; + + if (pfn_folio(pte_pfn(ptent)) != folio) + continue; + + if (!ignore_writable) { + ptent = pte_modify(ptent, vma->vm_page_prot); + writable = pte_write(ptent); + if (!writable && pte_write_upgrade && + can_change_pte_writable(vma, addr, ptent)) + writable = true; + } + + numa_rebuild_single_mapping(vmf, vma, addr, start_ptep, writable); + } } static vm_fault_t do_numa_page(struct vm_fault *vmf) @@ -5293,11 +5327,12 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) struct vm_area_struct *vma = vmf->vma; struct folio *folio = NULL; int nid = NUMA_NO_NODE; - bool writable = false; + bool writable = false, ignore_writable = false; + bool pte_write_upgrade = vma_wants_manual_pte_write_upgrade(vma); int last_cpupid; int target_nid; pte_t pte, old_pte; - int flags = 0; + int flags = 0, nr_pages; /* * The "pte" at this point cannot be used safely without @@ -5319,7 +5354,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) * is only valid while holding the PT lock. */ writable = pte_write(pte); - if (!writable && vma_wants_manual_pte_write_upgrade(vma) && + if (!writable && pte_write_upgrade && can_change_pte_writable(vma, vmf->address, pte)) writable = true; @@ -5327,10 +5362,6 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) if (!folio || folio_is_zone_device(folio)) goto out_map; - /* TODO: handle PTE-mapped THP */ - if (folio_test_large(folio)) - goto out_map; - /* * Avoid grouping on RO pages in general. RO pages shouldn't hurt as * much anyway since they can be in shared cache state. This misses @@ -5350,6 +5381,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) flags |= TNF_SHARED; nid = folio_nid(folio); + nr_pages = folio_nr_pages(folio); /* * For memory tiering mode, cpupid of slow memory page is used * to record page access time. So use default value. @@ -5366,12 +5398,13 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) } pte_unmap_unlock(vmf->pte, vmf->ptl); writable = false; + ignore_writable = true; /* Migrate to the requested node */ if (migrate_misplaced_folio(folio, vma, target_nid)) { nid = target_nid; flags |= TNF_MIGRATED; - task_numa_fault(last_cpupid, nid, 1, flags); + task_numa_fault(last_cpupid, nid, nr_pages, flags); return 0; } @@ -5390,11 +5423,16 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) * Make it present again, depending on how arch implements * non-accessible ptes, some can allow access by kernel mode. */ - numa_rebuild_single_mapping(vmf, vma, writable); + if (folio && folio_test_large(folio)) + numa_rebuild_large_mapping(vmf, vma, folio, pte, ignore_writable, + pte_write_upgrade); + else + numa_rebuild_single_mapping(vmf, vma, vmf->address, vmf->pte, + writable); pte_unmap_unlock(vmf->pte, vmf->ptl); if (nid != NUMA_NO_NODE) - task_numa_fault(last_cpupid, nid, 1, flags); + task_numa_fault(last_cpupid, nid, nr_pages, flags); return 0; } diff --git a/mm/mprotect.c b/mm/mprotect.c index 534ad4d0ecac..c295085068ed 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -129,7 +129,8 @@ static long change_pte_range(struct mmu_gather *tlb, /* Also skip shared copy-on-write pages */ if (is_cow_mapping(vma->vm_flags) && - folio_ref_count(folio) != 1) + (folio_maybe_dma_pinned(folio) || + folio_likely_mapped_shared(folio))) continue; /* -- Gitee From 156daa2e59a49c738bacb2ef6661b16727831bae Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 12 Jun 2024 20:28:22 +0800 Subject: [PATCH 1291/2138] mm: fix possible OOB in numa_rebuild_large_mapping() ANBZ: #9728 commit cfdd12b48202398a879e8bc4e7fa023f4d473f62 upstream The large folio is mapped with folio size(not greater PMD_SIZE) aligned virtual address during the pagefault, ie, 'addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE)' in do_anonymous_page(). But after the mremap(), the virtual address only requires PAGE_SIZE alignment. Also pte is moved to new in move_page_tables(), then traversal of the new pte in the numa_rebuild_large_mapping() could hit the following issue, Unable to handle kernel paging request at virtual address 00000a80c021a788 Mem abort info: ESR = 0x0000000096000004 EC = 0x25: DABT (current EL), IL = 32 bits SET = 0, FnV = 0 EA = 0, S1PTW = 0 FSC = 0x04: level 0 translation fault Data abort info: ISV = 0, ISS = 0x00000004, ISS2 = 0x00000000 CM = 0, WnR = 0, TnD = 0, TagAccess = 0 GCS = 0, Overlay = 0, DirtyBit = 0, Xs = 0 user pgtable: 4k pages, 48-bit VAs, pgdp=00002040341a6000 [00000a80c021a788] pgd=0000000000000000, p4d=0000000000000000 Internal error: Oops: 0000000096000004 [#1] SMP ... CPU: 76 PID: 15187 Comm: git Kdump: loaded Tainted: G W 6.10.0-rc2+ #209 Hardware name: Huawei TaiShan 2280 V2/BC82AMDD, BIOS 1.79 08/21/2021 pstate: 60400009 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) pc : numa_rebuild_large_mapping+0x338/0x638 lr : numa_rebuild_large_mapping+0x320/0x638 sp : ffff8000b41c3b00 x29: ffff8000b41c3b30 x28: ffff8000812a0000 x27: 00000000000a8000 x26: 00000000000000a8 x25: 0010000000000001 x24: ffff20401c7170f0 x23: 0000ffff33a1e000 x22: 0000ffff33a76000 x21: ffff20400869eca0 x20: 0000ffff33976000 x19: 00000000000000a8 x18: ffffffffffffffff x17: 0000000000000000 x16: 0000000000000020 x15: ffff8000b41c36a8 x14: 0000000000000000 x13: 205d373831353154 x12: 5b5d333331363732 x11: 000000000011ff78 x10: 000000000011ff10 x9 : ffff800080273f30 x8 : 000000320400869e x7 : c0000000ffffd87f x6 : 00000000001e6ba8 x5 : ffff206f3fb5af88 x4 : 0000000000000000 x3 : 0000000000000000 x2 : 0000000000000000 x1 : fffffdffc0000000 x0 : 00000a80c021a780 Call trace: numa_rebuild_large_mapping+0x338/0x638 do_numa_page+0x3e4/0x4e0 handle_pte_fault+0x1bc/0x238 __handle_mm_fault+0x20c/0x400 handle_mm_fault+0xa8/0x288 do_page_fault+0x124/0x498 do_translation_fault+0x54/0x80 do_mem_abort+0x4c/0xa8 el0_da+0x40/0x110 el0t_64_sync_handler+0xe4/0x158 el0t_64_sync+0x188/0x190 Fix it by making the start and end not only within the vma range, but also within the page table range. Link: https://lkml.kernel.org/r/20240612122822.4033433-1-wangkefeng.wang@huawei.com Fixes: d2136d749d76 ("mm: support multi-size THP numa balancing") Signed-off-by: Kefeng Wang Acked-by: David Hildenbrand Reviewed-by: Baolin Wang Cc: "Huang, Ying" Cc: John Hubbard Cc: Liu Shixin Cc: Mel Gorman Cc: Ryan Roberts Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3799 --- mm/memory.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index ba1daf2cabe2..49b5e8f4d626 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5294,10 +5294,16 @@ static void numa_rebuild_large_mapping(struct vm_fault *vmf, struct vm_area_stru bool ignore_writable, bool pte_write_upgrade) { int nr = pte_pfn(fault_pte) - folio_pfn(folio); - unsigned long start = max(vmf->address - nr * PAGE_SIZE, vma->vm_start); - unsigned long end = min(vmf->address + (folio_nr_pages(folio) - nr) * PAGE_SIZE, vma->vm_end); - pte_t *start_ptep = vmf->pte - (vmf->address - start) / PAGE_SIZE; - unsigned long addr; + unsigned long start, end, addr = vmf->address; + unsigned long addr_start = addr - (nr << PAGE_SHIFT); + unsigned long pt_start = ALIGN_DOWN(addr, PMD_SIZE); + pte_t *start_ptep; + + /* Stay within the VMA and within the page table. */ + start = max3(addr_start, pt_start, vma->vm_start); + end = min3(addr_start + folio_size(folio), pt_start + PMD_SIZE, + vma->vm_end); + start_ptep = vmf->pte - ((addr - start) >> PAGE_SHIFT); /* Restore all PTEs' mapping of the large folio */ for (addr = start; addr != end; start_ptep++, addr += PAGE_SIZE) { -- Gitee From f2dc7e06441657917cdb5fd086d90ec75f3f6227 Mon Sep 17 00:00:00 2001 From: Guixin Liu Date: Wed, 11 Sep 2024 11:29:47 +0800 Subject: [PATCH 1292/2138] anolis: module: introduce the sig_enforce_subsys param ANBZ: #10838 Introduce the sig_enforce_subsys module parameter to allow users to control the enforcement of signature verification for a specific subsystem module. Now we support gpu, block and net subsys. Signed-off-by: Guixin Liu Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/3823 --- .../admin-guide/kernel-parameters.txt | 7 +++ kernel/module/internal.h | 11 ++++ kernel/module/main.c | 7 ++- kernel/module/signing.c | 56 +++++++++++++++++++ 4 files changed, 80 insertions(+), 1 deletion(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 96f2afba6d90..4c28ffed07a1 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -3452,6 +3452,13 @@ Note that if CONFIG_MODULE_SIG_FORCE is set, that is always true, so this option does nothing. + module.sig_enforce_subsys + [KNL] When CONFIG_MODULE_SIG is set, this means that + modules the user set without (valid) signatures will + fail to load. Note that CONFIG_MODULE_SIG_FORCE is set, + that is always true, so this option does nothing. + Now we support gpu, block and net. + module_blacklist= [KNL] Do not load a comma-separated list of modules. Useful for debugging problem modules. diff --git a/kernel/module/internal.h b/kernel/module/internal.h index c8b7b4dcf782..bf1b643ef970 100644 --- a/kernel/module/internal.h +++ b/kernel/module/internal.h @@ -82,6 +82,8 @@ struct load_info { struct { unsigned int sym, str, mod, vers, info, pcpu; } index; + + unsigned long subsys; }; enum mod_license { @@ -330,11 +332,20 @@ int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, #ifdef CONFIG_MODULE_SIG int module_sig_check(struct load_info *info, int flags); +int force_subsys_sig_check(struct load_info *info); +void set_module_subsys(struct load_info *info, const char *name); #else /* !CONFIG_MODULE_SIG */ static inline int module_sig_check(struct load_info *info, int flags) { return 0; } + +static inline int force_subsys_sig_check(struct load_info *info) +{ + return 0; +} + +static inline void set_module_subsys(struct load_info *info, const char *name) { } #endif /* !CONFIG_MODULE_SIG */ #ifdef CONFIG_DEBUG_KMEMLEAK diff --git a/kernel/module/main.c b/kernel/module/main.c index b00e31721a73..a2cfac94394a 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -1364,7 +1364,7 @@ static bool ignore_undef_symbol(Elf_Half emachine, const char *name) } /* Change all symbols so that st_value encodes the pointer directly. */ -static int simplify_symbols(struct module *mod, const struct load_info *info) +static int simplify_symbols(struct module *mod, struct load_info *info) { Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; Elf_Sym *sym = (void *)symsec->sh_addr; @@ -1406,6 +1406,7 @@ static int simplify_symbols(struct module *mod, const struct load_info *info) ksym = resolve_symbol_wait(mod, info, name); /* Ok if resolved. */ if (ksym && !IS_ERR(ksym)) { + set_module_subsys(info, name); sym[i].st_value = kernel_symbol_value(ksym); break; } @@ -2921,6 +2922,10 @@ static int load_module(struct load_info *info, const char __user *uargs, if (err < 0) goto free_modinfo; + err = force_subsys_sig_check(info); + if (err < 0) + goto free_modinfo; + err = apply_relocations(mod, info); if (err < 0) goto free_modinfo; diff --git a/kernel/module/signing.c b/kernel/module/signing.c index a2ff4242e623..67919f8b2037 100644 --- a/kernel/module/signing.c +++ b/kernel/module/signing.c @@ -22,6 +22,62 @@ static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE); module_param(sig_enforce, bool_enable_only, 0644); +static char *sig_enforce_subsys = ""; +module_param(sig_enforce_subsys, charp, 0644); +MODULE_PARM_DESC(sig_enforce_subsys, "Enforce subsys modules signature check"); + +enum modules_subsys { + MODULE_SUBSYS_GPU, + MODULE_SUBSYS_BLOCK, + MODULE_SUBSYS_NET, +}; + +void set_module_subsys(struct load_info *info, const char *name) +{ + char *key_intf_blk = "device_add_disk"; + char *key_intf_scsi = "scsi_host_alloc"; + char *key_intf_net = "register_netdev"; + char *key_intf_gpu = "drm_"; + + if (info->subsys) + return; + + if (!strncmp(name, key_intf_gpu, strlen(key_intf_gpu))) + set_bit(MODULE_SUBSYS_GPU, &info->subsys); + + if (!strncmp(name, key_intf_blk, strlen(key_intf_blk)) || + !strncmp(name, key_intf_scsi, strlen(key_intf_scsi))) + set_bit(MODULE_SUBSYS_BLOCK, &info->subsys); + + /* register_netdev or register_netdevice */ + if (!strncmp(name, key_intf_net, strlen(key_intf_net))) + set_bit(MODULE_SUBSYS_NET, &info->subsys); +} + +int force_subsys_sig_check(struct load_info *info) +{ + if (info->sig_ok) + return 0; + + if (test_bit(MODULE_SUBSYS_GPU, &info->subsys) && + parse_option_str(sig_enforce_subsys, "gpu")) + goto err; + + if (test_bit(MODULE_SUBSYS_BLOCK, &info->subsys) && + parse_option_str(sig_enforce_subsys, "block")) + goto err; + + if (test_bit(MODULE_SUBSYS_NET, &info->subsys) && + parse_option_str(sig_enforce_subsys, "net")) + goto err; + + return 0; +err: + pr_notice("%s: Loading is rejected, because of wrong signature or key missing!\n", + info->name); + return -EKEYREJECTED; +} + /* * Export sig_enforce kernel cmdline parameter to allow other subsystems rely * on that instead of directly to CONFIG_MODULE_SIG_FORCE config. -- Gitee From 8e26939f964416bd1c42200f7550e87213fbc412 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Tue, 27 Feb 2024 23:42:01 +1300 Subject: [PATCH 1293/2138] mm: make folio_pte_batch available outside of mm/memory.c ANBZ: #9728 commit ac96cc4d1ceda01d08deda1e45b9f1b55b0624d2 upstream. madvise, mprotect and some others might need folio_pte_batch to check if a range of PTEs are completely mapped to a large folio with contiguous physical addresses. Let's make it available in mm/internal.h. While at it, add proper kernel doc and sanity-check more input parameters using two additional VM_WARN_ON_FOLIO(). [21cnbao@gmail.com: build fix] Link: https://lkml.kernel.org/r/CAGsJ_4wWzG-37D82vqP_zt+Fcbz+URVe5oXLBc4M5wbN8A_gpQ@mail.gmail.com [david@redhat.com: improve the doc for the exported func] Link: https://lkml.kernel.org/r/20240227104201.337988-1-21cnbao@gmail.com Signed-off-by: David Hildenbrand Signed-off-by: Barry Song Suggested-by: David Hildenbrand Reviewed-by: Ryan Roberts Acked-by: David Hildenbrand Cc: Lance Yang Cc: Yin Fengwei Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3825 --- mm/internal.h | 92 +++++++++++++++++++++++++++++++++++++++++++++++++++ mm/memory.c | 76 ------------------------------------------ 2 files changed, 92 insertions(+), 76 deletions(-) diff --git a/mm/internal.h b/mm/internal.h index ce29c2e837b4..10d798d6be51 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -127,6 +127,98 @@ static inline void vma_close(struct vm_area_struct *vma) vma->vm_ops = &vma_dummy_vm_ops; } } +#ifdef CONFIG_MMU + +/* Flags for folio_pte_batch(). */ +typedef int __bitwise fpb_t; + +/* Compare PTEs after pte_mkclean(), ignoring the dirty bit. */ +#define FPB_IGNORE_DIRTY ((__force fpb_t)BIT(0)) + +/* Compare PTEs after pte_clear_soft_dirty(), ignoring the soft-dirty bit. */ +#define FPB_IGNORE_SOFT_DIRTY ((__force fpb_t)BIT(1)) + +static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags) +{ + if (flags & FPB_IGNORE_DIRTY) + pte = pte_mkclean(pte); + if (likely(flags & FPB_IGNORE_SOFT_DIRTY)) + pte = pte_clear_soft_dirty(pte); + return pte_wrprotect(pte_mkold(pte)); +} + +/** + * folio_pte_batch - detect a PTE batch for a large folio + * @folio: The large folio to detect a PTE batch for. + * @addr: The user virtual address the first page is mapped at. + * @start_ptep: Page table pointer for the first entry. + * @pte: Page table entry for the first page. + * @max_nr: The maximum number of table entries to consider. + * @flags: Flags to modify the PTE batch semantics. + * @any_writable: Optional pointer to indicate whether any entry except the + * first one is writable. + * + * Detect a PTE batch: consecutive (present) PTEs that map consecutive + * pages of the same large folio. + * + * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN, + * the accessed bit, writable bit, dirty bit (with FPB_IGNORE_DIRTY) and + * soft-dirty bit (with FPB_IGNORE_SOFT_DIRTY). + * + * start_ptep must map any page of the folio. max_nr must be at least one and + * must be limited by the caller so scanning cannot exceed a single page table. + * + * Return: the number of table entries in the batch. + */ +static inline int folio_pte_batch(struct folio *folio, unsigned long addr, + pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags, + bool *any_writable) +{ + unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio); + const pte_t *end_ptep = start_ptep + max_nr; + pte_t expected_pte, *ptep; + bool writable; + int nr; + + if (any_writable) + *any_writable = false; + + VM_WARN_ON_FOLIO(!pte_present(pte), folio); + VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio); + VM_WARN_ON_FOLIO(page_folio(pfn_to_page(pte_pfn(pte))) != folio, folio); + + nr = pte_batch_hint(start_ptep, pte); + expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags); + ptep = start_ptep + nr; + + while (ptep < end_ptep) { + pte = ptep_get(ptep); + if (any_writable) + writable = !!pte_write(pte); + pte = __pte_batch_clear_ignored(pte, flags); + + if (!pte_same(pte, expected_pte)) + break; + + /* + * Stop immediately once we reached the end of the folio. In + * corner cases the next PFN might fall into a different + * folio. + */ + if (pte_pfn(pte) >= folio_end_pfn) + break; + + if (any_writable) + *any_writable |= writable; + + nr = pte_batch_hint(ptep, pte); + expected_pte = pte_advance_pfn(expected_pte, nr); + ptep += nr; + } + + return min(ptep - start_ptep, max_nr); +} +#endif /* CONFIG_MMU */ void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio, int nr_throttled); diff --git a/mm/memory.c b/mm/memory.c index 49b5e8f4d626..3a4b0a4f1df6 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -952,82 +952,6 @@ static __always_inline void __copy_present_ptes(struct vm_area_struct *dst_vma, set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr); } -/* Flags for folio_pte_batch(). */ -typedef int __bitwise fpb_t; - -/* Compare PTEs after pte_mkclean(), ignoring the dirty bit. */ -#define FPB_IGNORE_DIRTY ((__force fpb_t)BIT(0)) - -/* Compare PTEs after pte_clear_soft_dirty(), ignoring the soft-dirty bit. */ -#define FPB_IGNORE_SOFT_DIRTY ((__force fpb_t)BIT(1)) - -static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags) -{ - if (flags & FPB_IGNORE_DIRTY) - pte = pte_mkclean(pte); - if (likely(flags & FPB_IGNORE_SOFT_DIRTY)) - pte = pte_clear_soft_dirty(pte); - return pte_wrprotect(pte_mkold(pte)); -} - -/* - * Detect a PTE batch: consecutive (present) PTEs that map consecutive - * pages of the same folio. - * - * All PTEs inside a PTE batch have the same PTE bits set, excluding the PFN, - * the accessed bit, writable bit, dirty bit (with FPB_IGNORE_DIRTY) and - * soft-dirty bit (with FPB_IGNORE_SOFT_DIRTY). - * - * If "any_writable" is set, it will indicate if any other PTE besides the - * first (given) PTE is writable. - */ -static inline int folio_pte_batch(struct folio *folio, unsigned long addr, - pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags, - bool *any_writable) -{ - unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio); - const pte_t *end_ptep = start_ptep + max_nr; - pte_t expected_pte, *ptep; - bool writable; - int nr; - - if (any_writable) - *any_writable = false; - - VM_WARN_ON_FOLIO(!pte_present(pte), folio); - - nr = pte_batch_hint(start_ptep, pte); - expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags); - ptep = start_ptep + nr; - - while (ptep < end_ptep) { - pte = ptep_get(ptep); - if (any_writable) - writable = !!pte_write(pte); - pte = __pte_batch_clear_ignored(pte, flags); - - if (!pte_same(pte, expected_pte)) - break; - - /* - * Stop immediately once we reached the end of the folio. In - * corner cases the next PFN might fall into a different - * folio. - */ - if (pte_pfn(pte) >= folio_end_pfn) - break; - - if (any_writable) - *any_writable |= writable; - - nr = pte_batch_hint(ptep, pte); - expected_pte = pte_advance_pfn(expected_pte, nr); - ptep += nr; - } - - return min(ptep - start_ptep, max_nr); -} - /* * Copy one present PTE, trying to batch-process subsequent PTEs that map * consecutive pages of the same folio by copying them as well. -- Gitee From b49a541b18434b63ae2d1dfa9d762001f1662c42 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 8 Apr 2024 19:39:40 +0100 Subject: [PATCH 1294/2138] mm: swap: remove CLUSTER_FLAG_HUGE from swap_cluster_info:flags ANBZ: #9728 commit d7d0d389ff90644546ffcb8e15ea3ccaf6138958 upstream. Patch series "Swap-out mTHP without splitting", v7. This series adds support for swapping out multi-size THP (mTHP) without needing to first split the large folio via split_huge_page_to_list_to_order(). It closely follows the approach already used to swap-out PMD-sized THP. There are a couple of reasons for swapping out mTHP without splitting: - Performance: It is expensive to split a large folio and under extreme memory pressure some workloads regressed performance when using 64K mTHP vs 4K small folios because of this extra cost in the swap-out path. This series not only eliminates the regression but makes it faster to swap out 64K mTHP vs 4K small folios. - Memory fragmentation avoidance: If we can avoid splitting a large folio memory is less likely to become fragmented, making it easier to re-allocate a large folio in future. - Performance: Enables a separate series [7] to swap-in whole mTHPs, which means we won't lose the TLB-efficiency benefits of mTHP once the memory has been through a swap cycle. I've done what I thought was the smallest change possible, and as a result, this approach is only employed when the swap is backed by a non-rotating block device (just as PMD-sized THP is supported today). Discussion against the RFC concluded that this is sufficient. Performance Testing =================== I've run some swap performance tests on Ampere Altra VM (arm64) with 8 CPUs. The VM is set up with a 35G block ram device as the swap device and the test is run from inside a memcg limited to 40G memory. I've then run `usemem` from vm-scalability with 70 processes, each allocating and writing 1G of memory. I've repeated everything 6 times and taken the mean performance improvement relative to 4K page baseline: | alloc size | baseline | + this series | | | mm-unstable (~v6.9-rc1) | | |:-----------|------------------------:|------------------------:| | 4K Page | 0.0% | 1.3% | | 64K THP | -13.6% | 46.3% | | 2M THP | 91.4% | 89.6% | So with this change, the 64K swap performance goes from a 14% regression to a 46% improvement. While 2M shows a small regression I'm confident that this is just noise. [1] https://lore.kernel.org/linux-mm/20231010142111.3997780-1-ryan.roberts@arm.com/ [2] https://lore.kernel.org/linux-mm/20231017161302.2518826-1-ryan.roberts@arm.com/ [3] https://lore.kernel.org/linux-mm/20231025144546.577640-1-ryan.roberts@arm.com/ [4] https://lore.kernel.org/linux-mm/20240311150058.1122862-1-ryan.roberts@arm.com/ [5] https://lore.kernel.org/linux-mm/20240327144537.4165578-1-ryan.roberts@arm.com/ [6] https://lore.kernel.org/linux-mm/20240403114032.1162100-1-ryan.roberts@arm.com/ [7] https://lore.kernel.org/linux-mm/20240304081348.197341-1-21cnbao@gmail.com/ [8] https://lore.kernel.org/linux-mm/CAGsJ_4yMOow27WDvN2q=E4HAtDd2PJ=OQ5Pj9DG+6FLWwNuXUw@mail.gmail.com/ [9] https://lore.kernel.org/linux-mm/579d5127-c763-4001-9625-4563a9316ac3@redhat.com/ This patch (of 7): As preparation for supporting small-sized THP in the swap-out path, without first needing to split to order-0, Remove the CLUSTER_FLAG_HUGE, which, when present, always implies PMD-sized THP, which is the same as the cluster size. The only use of the flag was to determine whether a swap entry refers to a single page or a PMD-sized THP in swap_page_trans_huge_swapped(). Instead of relying on the flag, we now pass in order, which originates from the folio's order. This allows the logic to work for folios of any order. The one snag is that one of the swap_page_trans_huge_swapped() call sites does not have the folio. But it was only being called there to shortcut a call __try_to_reclaim_swap() in some cases. __try_to_reclaim_swap() gets the folio and (via some other functions) calls swap_page_trans_huge_swapped(). So I've removed the problematic call site and believe the new logic should be functionally equivalent. That said, removing the fast path means that we will take a reference and trylock a large folio much more often, which we would like to avoid. The next patch will solve this. Removing CLUSTER_FLAG_HUGE also means we can remove split_swap_cluster() which used to be called during folio splitting, since split_swap_cluster()'s only job was to remove the flag. Link: https://lkml.kernel.org/r/20240408183946.2991168-1-ryan.roberts@arm.com Link: https://lkml.kernel.org/r/20240408183946.2991168-2-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Reviewed-by: "Huang, Ying" Acked-by: Chris Li Acked-by: David Hildenbrand Cc: Barry Song <21cnbao@gmail.com> Cc: Gao Xiang Cc: Kefeng Wang Cc: Lance Yang Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Yang Shi Cc: Yu Zhao Cc: Barry Song Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3825 --- include/linux/swap.h | 10 ---------- mm/huge_memory.c | 3 --- mm/swapfile.c | 47 ++++++++------------------------------------ 3 files changed, 8 insertions(+), 52 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index d7f44a91dec1..6f8eb804ef7e 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -259,7 +259,6 @@ struct swap_cluster_info { }; #define CLUSTER_FLAG_FREE 1 /* This cluster is free */ #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */ -#define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */ /* * We assign a cluster to each CPU, so each CPU can allocate swap entry from @@ -605,15 +604,6 @@ static inline int add_swap_extent(struct swap_info_struct *sis, } #endif /* CONFIG_SWAP */ -#ifdef CONFIG_THP_SWAP -extern int split_swap_cluster(swp_entry_t entry); -#else -static inline int split_swap_cluster(swp_entry_t entry) -{ - return 0; -} -#endif - #ifdef CONFIG_MEMCG static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) { diff --git a/mm/huge_memory.c b/mm/huge_memory.c index f22a139d53c6..207d13f92185 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2760,9 +2760,6 @@ static void __split_huge_page(struct page *page, struct list_head *list, shmem_uncharge(head->mapping->host, nr_dropped); remap_page(folio, nr); - if (folio_test_swapcache(folio)) - split_swap_cluster(folio->swap); - for (i = 0; i < nr; i++) { struct page *subpage = head + i; if (subpage == page) diff --git a/mm/swapfile.c b/mm/swapfile.c index d59faced1133..c7e2488e832c 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -342,18 +342,6 @@ static inline void cluster_set_null(struct swap_cluster_info *info) info->data = 0; } -static inline bool cluster_is_huge(struct swap_cluster_info *info) -{ - if (IS_ENABLED(CONFIG_THP_SWAP)) - return info->flags & CLUSTER_FLAG_HUGE; - return false; -} - -static inline void cluster_clear_huge(struct swap_cluster_info *info) -{ - info->flags &= ~CLUSTER_FLAG_HUGE; -} - static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si, unsigned long offset) { @@ -1021,7 +1009,7 @@ static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot) offset = idx * SWAPFILE_CLUSTER; ci = lock_cluster(si, offset); alloc_cluster(si, idx); - cluster_set_count_flag(ci, SWAPFILE_CLUSTER, CLUSTER_FLAG_HUGE); + cluster_set_count(ci, SWAPFILE_CLUSTER); memset(si->swap_map + offset, SWAP_HAS_CACHE, SWAPFILE_CLUSTER); unlock_cluster(ci); @@ -1359,7 +1347,6 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry) ci = lock_cluster_or_swap_info(si, offset); if (size == SWAPFILE_CLUSTER) { - VM_BUG_ON(!cluster_is_huge(ci)); map = si->swap_map + offset; for (i = 0; i < SWAPFILE_CLUSTER; i++) { val = map[i]; @@ -1367,7 +1354,6 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry) if (val == SWAP_HAS_CACHE) free_entries++; } - cluster_clear_huge(ci); if (free_entries == SWAPFILE_CLUSTER) { unlock_cluster_or_swap_info(si, ci); spin_lock(&si->lock); @@ -1389,23 +1375,6 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry) unlock_cluster_or_swap_info(si, ci); } -#ifdef CONFIG_THP_SWAP -int split_swap_cluster(swp_entry_t entry) -{ - struct swap_info_struct *si; - struct swap_cluster_info *ci; - unsigned long offset = swp_offset(entry); - - si = _swap_info_get(entry); - if (!si) - return -EBUSY; - ci = lock_cluster(si, offset); - cluster_clear_huge(ci); - unlock_cluster(ci); - return 0; -} -#endif - static int swp_entry_cmp(const void *ent1, const void *ent2) { const swp_entry_t *e1 = ent1, *e2 = ent2; @@ -1513,22 +1482,23 @@ int swp_swapcount(swp_entry_t entry) } static bool swap_page_trans_huge_swapped(struct swap_info_struct *si, - swp_entry_t entry) + swp_entry_t entry, int order) { struct swap_cluster_info *ci; unsigned char *map = si->swap_map; + unsigned int nr_pages = 1 << order; unsigned long roffset = swp_offset(entry); - unsigned long offset = round_down(roffset, SWAPFILE_CLUSTER); + unsigned long offset = round_down(roffset, nr_pages); int i; bool ret = false; ci = lock_cluster_or_swap_info(si, offset); - if (!ci || !cluster_is_huge(ci)) { + if (!ci || nr_pages == 1) { if (swap_count(map[roffset])) ret = true; goto unlock_out; } - for (i = 0; i < SWAPFILE_CLUSTER; i++) { + for (i = 0; i < nr_pages; i++) { if (swap_count(map[offset + i])) { ret = true; break; @@ -1550,7 +1520,7 @@ static bool folio_swapped(struct folio *folio) if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!folio_test_large(folio))) return swap_swapcount(si, entry) != 0; - return swap_page_trans_huge_swapped(si, entry); + return swap_page_trans_huge_swapped(si, entry, folio_order(folio)); } /** @@ -1616,8 +1586,7 @@ int free_swap_and_cache(swp_entry_t entry) } count = __swap_entry_free(p, entry); - if (count == SWAP_HAS_CACHE && - !swap_page_trans_huge_swapped(p, entry)) + if (count == SWAP_HAS_CACHE) __try_to_reclaim_swap(p, swp_offset(entry), TTRS_UNMAPPED | TTRS_FULL); put_swap_device(p); -- Gitee From df7fc187b1b71ba864fb7474507055de3c383ee1 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 8 Apr 2024 19:39:41 +0100 Subject: [PATCH 1295/2138] mm: swap: free_swap_and_cache_nr() as batched free_swap_and_cache() ANBZ: #9728 commit a62fb92ac12ed39df4930dca599a3b427552882a upstream. Now that we no longer have a convenient flag in the cluster to determine if a folio is large, free_swap_and_cache() will take a reference and lock a large folio much more often, which could lead to contention and (e.g.) failure to split large folios, etc. Let's solve that problem by batch freeing swap and cache with a new function, free_swap_and_cache_nr(), to free a contiguous range of swap entries together. This allows us to first drop a reference to each swap slot before we try to release the cache folio. This means we only try to release the folio once, only taking the reference and lock once - much better than the previous 512 times for the 2M THP case. Contiguous swap entries are gathered in zap_pte_range() and madvise_free_pte_range() in a similar way to how present ptes are already gathered in zap_pte_range(). While we are at it, let's simplify by converting the return type of both functions to void. The return value was used only by zap_pte_range() to print a bad pte, and was ignored by everyone else, so the extra reporting wasn't exactly guaranteed. We will still get the warning with most of the information from get_swap_device(). With the batch version, we wouldn't know which pte was bad anyway so could print the wrong one. [ryan.roberts@arm.com: fix a build warning on parisc] Link: https://lkml.kernel.org/r/20240409111840.3173122-1-ryan.roberts@arm.com Link: https://lkml.kernel.org/r/20240408183946.2991168-3-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Acked-by: David Hildenbrand Cc: Barry Song <21cnbao@gmail.com> Cc: Barry Song Cc: Chris Li Cc: Gao Xiang Cc: "Huang, Ying" Cc: Kefeng Wang Cc: Lance Yang Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Yang Shi Cc: Yu Zhao Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3825 --- include/linux/pgtable.h | 29 ++++++++++++ include/linux/swap.h | 12 +++-- mm/internal.h | 64 +++++++++++++++++++++++++++ mm/madvise.c | 12 +++-- mm/memory.c | 13 +++--- mm/swapfile.c | 97 +++++++++++++++++++++++++++++++++-------- 6 files changed, 196 insertions(+), 31 deletions(-) diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 4a8a984b9054..186c1bbd3a5f 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -701,6 +701,35 @@ static inline void pte_clear_not_present_full(struct mm_struct *mm, } #endif +#ifndef clear_not_present_full_ptes +/** + * clear_not_present_full_ptes - Clear multiple not present PTEs which are + * consecutive in the pgtable. + * @mm: Address space the ptes represent. + * @addr: Address of the first pte. + * @ptep: Page table pointer for the first entry. + * @nr: Number of entries to clear. + * @full: Whether we are clearing a full mm. + * + * May be overridden by the architecture; otherwise, implemented as a simple + * loop over pte_clear_not_present_full(). + * + * Context: The caller holds the page table lock. The PTEs are all not present. + * The PTEs are all in the same PMD. + */ +static inline void clear_not_present_full_ptes(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, unsigned int nr, int full) +{ + for (;;) { + pte_clear_not_present_full(mm, addr, ptep, full); + if (--nr == 0) + break; + ptep++; + addr += PAGE_SIZE; + } +} +#endif + #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH extern pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, diff --git a/include/linux/swap.h b/include/linux/swap.h index 6f8eb804ef7e..e3781e297bd4 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -485,7 +485,7 @@ extern int swap_duplicate(swp_entry_t); extern int swapcache_prepare(swp_entry_t); extern void swap_free(swp_entry_t); extern void swapcache_free_entries(swp_entry_t *entries, int n); -extern int free_swap_and_cache(swp_entry_t); +extern void free_swap_and_cache_nr(swp_entry_t entry, int nr); int swap_type_of(dev_t device, sector_t offset); int find_first_swap(dev_t *device); extern unsigned int count_swap_pages(int, int); @@ -535,8 +535,9 @@ static inline void put_swap_device(struct swap_info_struct *si) #define free_pages_and_swap_cache(pages, nr) \ release_pages((pages), (nr)); -/* used to sanity check ptes in zap_pte_range when CONFIG_SWAP=0 */ -#define free_swap_and_cache(e) is_pfn_swap_entry(e) +static inline void free_swap_and_cache_nr(swp_entry_t entry, int nr) +{ +} static inline void free_swap_cache(struct page *page) { @@ -604,6 +605,11 @@ static inline int add_swap_extent(struct swap_info_struct *sis, } #endif /* CONFIG_SWAP */ +static inline void free_swap_and_cache(swp_entry_t entry) +{ + free_swap_and_cache_nr(entry, 1); +} + #ifdef CONFIG_MEMCG static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) { diff --git a/mm/internal.h b/mm/internal.h index 10d798d6be51..8e5db8a11896 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -11,6 +11,8 @@ #include #include #include +#include +#include #include struct folio_batch; @@ -218,6 +220,68 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr, return min(ptep - start_ptep, max_nr); } + +/** + * pte_next_swp_offset - Increment the swap entry offset field of a swap pte. + * @pte: The initial pte state; is_swap_pte(pte) must be true and + * non_swap_entry() must be false. + * + * Increments the swap offset, while maintaining all other fields, including + * swap type, and any swp pte bits. The resulting pte is returned. + */ +static inline pte_t pte_next_swp_offset(pte_t pte) +{ + swp_entry_t entry = pte_to_swp_entry(pte); + pte_t new = __swp_entry_to_pte(__swp_entry(swp_type(entry), + (swp_offset(entry) + 1))); + + if (pte_swp_soft_dirty(pte)) + new = pte_swp_mksoft_dirty(new); + if (pte_swp_exclusive(pte)) + new = pte_swp_mkexclusive(new); + if (pte_swp_uffd_wp(pte)) + new = pte_swp_mkuffd_wp(new); + + return new; +} + +/** + * swap_pte_batch - detect a PTE batch for a set of contiguous swap entries + * @start_ptep: Page table pointer for the first entry. + * @max_nr: The maximum number of table entries to consider. + * @pte: Page table entry for the first entry. + * + * Detect a batch of contiguous swap entries: consecutive (non-present) PTEs + * containing swap entries all with consecutive offsets and targeting the same + * swap type, all with matching swp pte bits. + * + * max_nr must be at least one and must be limited by the caller so scanning + * cannot exceed a single page table. + * + * Return: the number of table entries in the batch. + */ +static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte) +{ + pte_t expected_pte = pte_next_swp_offset(pte); + const pte_t *end_ptep = start_ptep + max_nr; + pte_t *ptep = start_ptep + 1; + + VM_WARN_ON(max_nr < 1); + VM_WARN_ON(!is_swap_pte(pte)); + VM_WARN_ON(non_swap_entry(pte_to_swp_entry(pte))); + + while (ptep < end_ptep) { + pte = ptep_get(ptep); + + if (!pte_same(pte, expected_pte)) + break; + + expected_pte = pte_next_swp_offset(expected_pte); + ptep++; + } + + return ptep - start_ptep; +} #endif /* CONFIG_MMU */ void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio, diff --git a/mm/madvise.c b/mm/madvise.c index 68b727918ade..bc7e6689db4c 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -634,6 +634,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, struct folio *folio; int nr_swap = 0; unsigned long next; + int nr, max_nr; next = pmd_addr_end(addr, end); if (pmd_trans_huge(*pmd)) @@ -646,7 +647,8 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, return 0; flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); - for (; addr != end; pte++, addr += PAGE_SIZE) { + for (; addr != end; pte += nr, addr += PAGE_SIZE * nr) { + nr = 1; ptent = ptep_get(pte); if (pte_none(ptent)) @@ -661,9 +663,11 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, entry = pte_to_swp_entry(ptent); if (!non_swap_entry(entry)) { - nr_swap--; - free_swap_and_cache(entry); - pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); + max_nr = (end - addr) / PAGE_SIZE; + nr = swap_pte_batch(pte, max_nr, ptent); + nr_swap -= nr; + free_swap_and_cache_nr(entry, nr); + clear_not_present_full_ptes(mm, addr, pte, nr, tlb->fullmm); } else if (is_hwpoison_entry(entry) || is_poisoned_swp_entry(entry)) { pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); diff --git a/mm/memory.c b/mm/memory.c index 3a4b0a4f1df6..556ab033c0df 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1630,12 +1630,13 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, folio_remove_rmap_pte(folio, page, vma); folio_put(folio); } else if (!non_swap_entry(entry)) { - /* Genuine swap entry, hence a private anon page */ + max_nr = (end - addr) / PAGE_SIZE; + nr = swap_pte_batch(pte, max_nr, ptent); + /* Genuine swap entries, hence a private anon pages */ if (!should_zap_cows(details)) continue; - rss[MM_SWAPENTS]--; - if (unlikely(!free_swap_and_cache(entry))) - print_bad_pte(vma, addr, ptent, NULL); + rss[MM_SWAPENTS] -= nr; + free_swap_and_cache_nr(entry, nr); } else if (is_migration_entry(entry)) { page = pfn_swap_entry_to_page(entry); folio = page_folio(page); @@ -1658,8 +1659,8 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, /* We should have covered all the swap entry types */ WARN_ON_ONCE(1); } - pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); - zap_install_uffd_wp_if_needed(vma, addr, pte, 1, details, ptent); + clear_not_present_full_ptes(mm, addr, pte, nr, tlb->fullmm); + zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, ptent); } while (pte += nr, addr += PAGE_SIZE * nr, addr != end); add_mm_rss_vec(mm, rss); diff --git a/mm/swapfile.c b/mm/swapfile.c index c7e2488e832c..20a455beda1e 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -129,7 +129,11 @@ static inline unsigned char swap_count(unsigned char ent) /* Reclaim the swap entry if swap is getting full*/ #define TTRS_FULL 0x4 -/* returns 1 if swap entry is freed */ +/* + * returns number of pages in the folio that backs the swap entry. If positive, + * the folio was reclaimed. If negative, the folio was not reclaimed. If 0, no + * folio was associated with the swap entry. + */ static int __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset, unsigned long flags) { @@ -154,6 +158,7 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si, ret = folio_free_swap(folio); folio_unlock(folio); } + ret = ret ? folio_nr_pages(folio) : -folio_nr_pages(folio); folio_put(folio); return ret; } @@ -889,7 +894,7 @@ static int scan_swap_map_slots(struct swap_info_struct *si, swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY); spin_lock(&si->lock); /* entry was freed successfully, try to use this again */ - if (swap_was_freed) + if (swap_was_freed > 0) goto checks; goto scan; /* check next one */ } @@ -1566,32 +1571,88 @@ bool folio_free_swap(struct folio *folio) return true; } -/* - * Free the swap entry like above, but also try to - * free the page cache entry if it is the last user. +/** + * free_swap_and_cache_nr() - Release reference on range of swap entries and + * reclaim their cache if no more references remain. + * @entry: First entry of range. + * @nr: Number of entries in range. + * + * For each swap entry in the contiguous range, release a reference. If any swap + * entries become free, try to reclaim their underlying folios, if present. The + * offset range is defined by [entry.offset, entry.offset + nr). */ -int free_swap_and_cache(swp_entry_t entry) +void free_swap_and_cache_nr(swp_entry_t entry, int nr) { - struct swap_info_struct *p; + const unsigned long start_offset = swp_offset(entry); + const unsigned long end_offset = start_offset + nr; + unsigned int type = swp_type(entry); + struct swap_info_struct *si; + bool any_only_cache = false; + unsigned long offset; unsigned char count; if (non_swap_entry(entry)) - return 1; + return; - p = get_swap_device(entry); - if (p) { - if (WARN_ON(data_race(!p->swap_map[swp_offset(entry)]))) { - put_swap_device(p); - return 0; + si = get_swap_device(entry); + if (!si) + return; + + if (WARN_ON(end_offset > si->max)) + goto out; + + /* + * First free all entries in the range. + */ + for (offset = start_offset; offset < end_offset; offset++) { + if (data_race(si->swap_map[offset])) { + count = __swap_entry_free(si, swp_entry(type, offset)); + if (count == SWAP_HAS_CACHE) + any_only_cache = true; + } else { + WARN_ON_ONCE(1); } + } + + /* + * Short-circuit the below loop if none of the entries had their + * reference drop to zero. + */ + if (!any_only_cache) + goto out; - count = __swap_entry_free(p, entry); - if (count == SWAP_HAS_CACHE) - __try_to_reclaim_swap(p, swp_offset(entry), + /* + * Now go back over the range trying to reclaim the swap cache. This is + * more efficient for large folios because we will only try to reclaim + * the swap once per folio in the common case. If we do + * __swap_entry_free() and __try_to_reclaim_swap() in the same loop, the + * latter will get a reference and lock the folio for every individual + * page but will only succeed once the swap slot for every subpage is + * zero. + */ + for (offset = start_offset; offset < end_offset; offset += nr) { + nr = 1; + if (READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) { + /* + * Folios are always naturally aligned in swap so + * advance forward to the next boundary. Zero means no + * folio was found for the swap entry, so advance by 1 + * in this case. Negative value means folio was found + * but could not be reclaimed. Here we can still advance + * to the next boundary. + */ + nr = __try_to_reclaim_swap(si, offset, TTRS_UNMAPPED | TTRS_FULL); - put_swap_device(p); + if (nr == 0) + nr = 1; + else if (nr < 0) + nr = -nr; + nr = ALIGN(offset + 1, nr) - offset; + } } - return p != NULL; + +out: + put_swap_device(si); } #ifdef CONFIG_HIBERNATION -- Gitee From a01013f19e2e3aa5c6b1c39e58b0617a296cd970 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 8 Apr 2024 19:39:42 +0100 Subject: [PATCH 1296/2138] mm: swap: simplify struct percpu_cluster ANBZ: #9728 commit 14c62da21b2b865f4fc0c49edd74ed7299927d35 upstream. struct percpu_cluster stores the index of cpu's current cluster and the offset of the next entry that will be allocated for the cpu. These two pieces of information are redundant because the cluster index is just (offset / SWAPFILE_CLUSTER). The only reason for explicitly keeping the cluster index is because the structure used for it also has a flag to indicate "no cluster". However this data structure also contains a spin lock, which is never used in this context, as a side effect the code copies the spinlock_t structure, which is questionable coding practice in my view. So let's clean this up and store only the next offset, and use a sentinal value (SWAP_NEXT_INVALID) to indicate "no cluster". SWAP_NEXT_INVALID is chosen to be 0, because 0 will never be seen legitimately; The first page in the swap file is the swap header, which is always marked bad to prevent it from being allocated as an entry. This also prevents the cluster to which it belongs being marked free, so it will never appear on the free list. This change saves 16 bytes per cpu. And given we are shortly going to extend this mechanism to be per-cpu-AND-per-order, we will end up saving 16 * 9 = 144 bytes per cpu, which adds up if you have 256 cpus in the system. Link: https://lkml.kernel.org/r/20240408183946.2991168-4-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Reviewed-by: "Huang, Ying" Cc: Barry Song <21cnbao@gmail.com> Cc: Barry Song Cc: Chris Li Cc: David Hildenbrand Cc: Gao Xiang Cc: Kefeng Wang Cc: Lance Yang Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Yang Shi Cc: Yu Zhao Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3825 --- include/linux/swap.h | 9 ++++++++- mm/swapfile.c | 22 +++++++++++----------- 2 files changed, 19 insertions(+), 12 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index e3781e297bd4..981bfeea9785 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -260,13 +260,20 @@ struct swap_cluster_info { #define CLUSTER_FLAG_FREE 1 /* This cluster is free */ #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */ +/* + * The first page in the swap file is the swap header, which is always marked + * bad to prevent it from being allocated as an entry. This also prevents the + * cluster to which it belongs being marked free. Therefore 0 is safe to use as + * a sentinel to indicate next is not valid in percpu_cluster. + */ +#define SWAP_NEXT_INVALID 0 + /* * We assign a cluster to each CPU, so each CPU can allocate swap entry from * its own cluster and swapout sequentially. The purpose is to optimize swapout * throughput. */ struct percpu_cluster { - struct swap_cluster_info index; /* Current cluster index */ unsigned int next; /* Likely next allocation offset */ }; diff --git a/mm/swapfile.c b/mm/swapfile.c index 20a455beda1e..0562bab703f6 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -608,7 +608,7 @@ scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si, return false; percpu_cluster = this_cpu_ptr(si->percpu_cluster); - cluster_set_null(&percpu_cluster->index); + percpu_cluster->next = SWAP_NEXT_INVALID; return true; } @@ -621,14 +621,14 @@ static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si, { struct percpu_cluster *cluster; struct swap_cluster_info *ci; - unsigned long tmp, max; + unsigned int tmp, max; new_cluster: cluster = this_cpu_ptr(si->percpu_cluster); - if (cluster_is_null(&cluster->index)) { + tmp = cluster->next; + if (tmp == SWAP_NEXT_INVALID) { if (!cluster_list_empty(&si->free_clusters)) { - cluster->index = si->free_clusters.head; - cluster->next = cluster_next(&cluster->index) * + tmp = cluster_next(&si->free_clusters.head) * SWAPFILE_CLUSTER; } else if (!cluster_list_empty(&si->discard_clusters)) { /* @@ -648,9 +648,7 @@ static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si, * Other CPUs can use our cluster if they can't find a free cluster, * check if there is still free entry in the cluster */ - tmp = cluster->next; - max = min_t(unsigned long, si->max, - (cluster_next(&cluster->index) + 1) * SWAPFILE_CLUSTER); + max = min_t(unsigned long, si->max, ALIGN(tmp + 1, SWAPFILE_CLUSTER)); if (tmp < max) { ci = lock_cluster(si, tmp); while (tmp < max) { @@ -661,12 +659,13 @@ static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si, unlock_cluster(ci); } if (tmp >= max) { - cluster_set_null(&cluster->index); + cluster->next = SWAP_NEXT_INVALID; goto new_cluster; } - cluster->next = tmp + 1; *offset = tmp; *scan_base = tmp; + tmp += 1; + cluster->next = tmp < max ? tmp : SWAP_NEXT_INVALID; return true; } @@ -3153,8 +3152,9 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) } for_each_possible_cpu(cpu) { struct percpu_cluster *cluster; + cluster = per_cpu_ptr(p->percpu_cluster, cpu); - cluster_set_null(&cluster->index); + cluster->next = SWAP_NEXT_INVALID; } } else { atomic_inc(&nr_rotate_swap); -- Gitee From 4b373d5bc7eb214bfc748d5d4f065809cdfdf969 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 8 Apr 2024 19:39:43 +0100 Subject: [PATCH 1297/2138] mm: swap: update get_swap_pages() to take folio order ANBZ: #9728 commit 9faaa0f8168bfcd81469b0724b25ba3093097a08 upstream. We are about to allow swap storage of any mTHP size. To prepare for that, let's change get_swap_pages() to take a folio order parameter instead of nr_pages. This makes the interface self-documenting; a power-of-2 number of pages must be provided. We will also need the order internally so this simplifies accessing it. Link: https://lkml.kernel.org/r/20240408183946.2991168-5-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Reviewed-by: "Huang, Ying" Reviewed-by: David Hildenbrand Cc: Barry Song <21cnbao@gmail.com> Cc: Barry Song Cc: Chris Li Cc: Gao Xiang Cc: Kefeng Wang Cc: Lance Yang Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Yang Shi Cc: Yu Zhao Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3825 --- include/linux/swap.h | 2 +- mm/swap_slots.c | 6 +++--- mm/swapfile.c | 13 +++++++------ 3 files changed, 11 insertions(+), 10 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index 981bfeea9785..8ede96bad860 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -485,7 +485,7 @@ swp_entry_t folio_alloc_swap(struct folio *folio); bool folio_free_swap(struct folio *folio); void put_swap_folio(struct folio *folio, swp_entry_t entry); extern swp_entry_t get_swap_page_of_type(int); -extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size); +extern int get_swap_pages(int n, swp_entry_t swp_entries[], int order); extern int add_swap_count_continuation(swp_entry_t, gfp_t); extern void swap_shmem_alloc(swp_entry_t); extern int swap_duplicate(swp_entry_t); diff --git a/mm/swap_slots.c b/mm/swap_slots.c index 0bec1f705f8e..dceef07c7a5d 100644 --- a/mm/swap_slots.c +++ b/mm/swap_slots.c @@ -264,7 +264,7 @@ static int refill_swap_slots_cache(struct swap_slots_cache *cache) cache->cur = 0; if (swap_slot_cache_active) cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE, - cache->slots, 1); + cache->slots, 0); return cache->nr; } @@ -308,7 +308,7 @@ swp_entry_t folio_alloc_swap(struct folio *folio) if (folio_test_large(folio)) { if (IS_ENABLED(CONFIG_THP_SWAP) && arch_thp_swp_supported()) - get_swap_pages(1, &entry, folio_nr_pages(folio)); + get_swap_pages(1, &entry, folio_order(folio)); goto out; } @@ -340,7 +340,7 @@ swp_entry_t folio_alloc_swap(struct folio *folio) goto out; } - get_swap_pages(1, &entry, 1); + get_swap_pages(1, &entry, 0); out: if (mem_cgroup_try_charge_swap(folio, entry)) { put_swap_folio(folio, entry); diff --git a/mm/swapfile.c b/mm/swapfile.c index 0562bab703f6..043e41483fb1 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -277,15 +277,15 @@ static void discard_swap_cluster(struct swap_info_struct *si, #ifdef CONFIG_THP_SWAP #define SWAPFILE_CLUSTER HPAGE_PMD_NR -#define swap_entry_size(size) (size) +#define swap_entry_order(order) (order) #else #define SWAPFILE_CLUSTER 256 /* - * Define swap_entry_size() as constant to let compiler to optimize + * Define swap_entry_order() as constant to let compiler to optimize * out some code if !CONFIG_THP_SWAP */ -#define swap_entry_size(size) 1 +#define swap_entry_order(order) 0 #endif #define LATENCY_LIMIT 256 @@ -1036,9 +1036,10 @@ static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx) swap_range_free(si, offset, SWAPFILE_CLUSTER); } -int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size) +int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_order) { - unsigned long size = swap_entry_size(entry_size); + int order = swap_entry_order(entry_order); + unsigned long size = 1 << order; struct swap_info_struct *si, *next; long avail_pgs; int n_ret = 0; @@ -1343,7 +1344,7 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry) unsigned char *map; unsigned int i, free_entries = 0; unsigned char val; - int size = swap_entry_size(folio_nr_pages(folio)); + int size = 1 << swap_entry_order(folio_order(folio)); si = _swap_info_get(entry); if (!si) -- Gitee From 822f2dc92b75c17e60b6fc7e03c0371480b86e0f Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 8 Apr 2024 19:39:44 +0100 Subject: [PATCH 1298/2138] mm: swap: allow storage of all mTHP orders ANBZ: #9728 commit 845982eb264bc64b0c3242ace217fb574f56a299 upstream. Multi-size THP enables performance improvements by allocating large, pte-mapped folios for anonymous memory. However I've observed that on an arm64 system running a parallel workload (e.g. kernel compilation) across many cores, under high memory pressure, the speed regresses. This is due to bottlenecking on the increased number of TLBIs added due to all the extra folio splitting when the large folios are swapped out. Therefore, solve this regression by adding support for swapping out mTHP without needing to split the folio, just like is already done for PMD-sized THP. This change only applies when CONFIG_THP_SWAP is enabled, and when the swap backing store is a non-rotating block device. These are the same constraints as for the existing PMD-sized THP swap-out support. Note that no attempt is made to swap-in (m)THP here - this is still done page-by-page, like for PMD-sized THP. But swapping-out mTHP is a prerequisite for swapping-in mTHP. The main change here is to improve the swap entry allocator so that it can allocate any power-of-2 number of contiguous entries between [1, (1 << PMD_ORDER)]. This is done by allocating a cluster for each distinct order and allocating sequentially from it until the cluster is full. This ensures that we don't need to search the map and we get no fragmentation due to alignment padding for different orders in the cluster. If there is no current cluster for a given order, we attempt to allocate a free cluster from the list. If there are no free clusters, we fail the allocation and the caller can fall back to splitting the folio and allocates individual entries (as per existing PMD-sized THP fallback). The per-order current clusters are maintained per-cpu using the existing infrastructure. This is done to avoid interleving pages from different tasks, which would prevent IO being batched. This is already done for the order-0 allocations so we follow the same pattern. As is done for order-0 per-cpu clusters, the scanner now can steal order-0 entries from any per-cpu-per-order reserved cluster. This ensures that when the swap file is getting full, space doesn't get tied up in the per-cpu reserves. This change only modifies swap to be able to accept any order mTHP. It doesn't change the callers to elide doing the actual split. That will be done in separate changes. Link: https://lkml.kernel.org/r/20240408183946.2991168-6-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Reviewed-by: "Huang, Ying" Cc: Barry Song <21cnbao@gmail.com> Cc: Barry Song Cc: Chris Li Cc: David Hildenbrand Cc: Gao Xiang Cc: Kefeng Wang Cc: Lance Yang Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Yang Shi Cc: Yu Zhao Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3825 --- include/linux/swap.h | 8 ++- mm/swapfile.c | 162 ++++++++++++++++++++++++------------------- 2 files changed, 98 insertions(+), 72 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index 8ede96bad860..2089db1cab10 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -268,13 +268,19 @@ struct swap_cluster_info { */ #define SWAP_NEXT_INVALID 0 +#ifdef CONFIG_THP_SWAP +#define SWAP_NR_ORDERS (PMD_ORDER + 1) +#else +#define SWAP_NR_ORDERS 1 +#endif + /* * We assign a cluster to each CPU, so each CPU can allocate swap entry from * its own cluster and swapout sequentially. The purpose is to optimize swapout * throughput. */ struct percpu_cluster { - unsigned int next; /* Likely next allocation offset */ + unsigned int next[SWAP_NR_ORDERS]; /* Likely next allocation offset */ }; struct swap_cluster_list { diff --git a/mm/swapfile.c b/mm/swapfile.c index 043e41483fb1..16cd196c04ac 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -550,10 +550,12 @@ static void free_cluster(struct swap_info_struct *si, unsigned long idx) /* * The cluster corresponding to page_nr will be used. The cluster will be - * removed from free cluster list and its usage counter will be increased. + * removed from free cluster list and its usage counter will be increased by + * count. */ -static void inc_cluster_info_page(struct swap_info_struct *p, - struct swap_cluster_info *cluster_info, unsigned long page_nr) +static void add_cluster_info_page(struct swap_info_struct *p, + struct swap_cluster_info *cluster_info, unsigned long page_nr, + unsigned long count) { unsigned long idx = page_nr / SWAPFILE_CLUSTER; @@ -562,9 +564,19 @@ static void inc_cluster_info_page(struct swap_info_struct *p, if (cluster_is_free(&cluster_info[idx])) alloc_cluster(p, idx); - VM_BUG_ON(cluster_count(&cluster_info[idx]) >= SWAPFILE_CLUSTER); + VM_BUG_ON(cluster_count(&cluster_info[idx]) + count > SWAPFILE_CLUSTER); cluster_set_count(&cluster_info[idx], - cluster_count(&cluster_info[idx]) + 1); + cluster_count(&cluster_info[idx]) + count); +} + +/* + * The cluster corresponding to page_nr will be used. The cluster will be + * removed from free cluster list and its usage counter will be increased by 1. + */ +static void inc_cluster_info_page(struct swap_info_struct *p, + struct swap_cluster_info *cluster_info, unsigned long page_nr) +{ + add_cluster_info_page(p, cluster_info, page_nr, 1); } /* @@ -594,7 +606,7 @@ static void dec_cluster_info_page(struct swap_info_struct *p, */ static bool scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si, - unsigned long offset) + unsigned long offset, int order) { struct percpu_cluster *percpu_cluster; bool conflict; @@ -608,24 +620,39 @@ scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si, return false; percpu_cluster = this_cpu_ptr(si->percpu_cluster); - percpu_cluster->next = SWAP_NEXT_INVALID; + percpu_cluster->next[order] = SWAP_NEXT_INVALID; + return true; +} + +static inline bool swap_range_empty(char *swap_map, unsigned int start, + unsigned int nr_pages) +{ + unsigned int i; + + for (i = 0; i < nr_pages; i++) { + if (swap_map[start + i]) + return false; + } + return true; } /* - * Try to get a swap entry from current cpu's swap entry pool (a cluster). This - * might involve allocating a new cluster for current CPU too. + * Try to get swap entries with specified order from current cpu's swap entry + * pool (a cluster). This might involve allocating a new cluster for current CPU + * too. */ static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si, - unsigned long *offset, unsigned long *scan_base) + unsigned long *offset, unsigned long *scan_base, int order) { + unsigned int nr_pages = 1 << order; struct percpu_cluster *cluster; struct swap_cluster_info *ci; unsigned int tmp, max; new_cluster: cluster = this_cpu_ptr(si->percpu_cluster); - tmp = cluster->next; + tmp = cluster->next[order]; if (tmp == SWAP_NEXT_INVALID) { if (!cluster_list_empty(&si->free_clusters)) { tmp = cluster_next(&si->free_clusters.head) * @@ -646,26 +673,27 @@ static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si, /* * Other CPUs can use our cluster if they can't find a free cluster, - * check if there is still free entry in the cluster + * check if there is still free entry in the cluster, maintaining + * natural alignment. */ max = min_t(unsigned long, si->max, ALIGN(tmp + 1, SWAPFILE_CLUSTER)); if (tmp < max) { ci = lock_cluster(si, tmp); while (tmp < max) { - if (!si->swap_map[tmp]) + if (swap_range_empty(si->swap_map, tmp, nr_pages)) break; - tmp++; + tmp += nr_pages; } unlock_cluster(ci); } if (tmp >= max) { - cluster->next = SWAP_NEXT_INVALID; + cluster->next[order] = SWAP_NEXT_INVALID; goto new_cluster; } *offset = tmp; *scan_base = tmp; - tmp += 1; - cluster->next = tmp < max ? tmp : SWAP_NEXT_INVALID; + tmp += nr_pages; + cluster->next[order] = tmp < max ? tmp : SWAP_NEXT_INVALID; return true; } @@ -790,13 +818,14 @@ static bool swap_offset_available_and_locked(struct swap_info_struct *si, static int scan_swap_map_slots(struct swap_info_struct *si, unsigned char usage, int nr, - swp_entry_t slots[]) + swp_entry_t slots[], int order) { struct swap_cluster_info *ci; unsigned long offset; unsigned long scan_base; unsigned long last_in_cluster = 0; int latency_ration = LATENCY_LIMIT; + unsigned int nr_pages = 1 << order; int n_ret = 0; bool scanned_many = false; @@ -811,6 +840,25 @@ static int scan_swap_map_slots(struct swap_info_struct *si, * And we let swap pages go all over an SSD partition. Hugh */ + if (order > 0) { + /* + * Should not even be attempting large allocations when huge + * page swap is disabled. Warn and fail the allocation. + */ + if (!IS_ENABLED(CONFIG_THP_SWAP) || + nr_pages > SWAPFILE_CLUSTER) { + VM_WARN_ON_ONCE(1); + return 0; + } + + /* + * Swapfile is not block device or not using clusters so unable + * to allocate large entries. + */ + if (!(si->flags & SWP_BLKDEV) || !si->cluster_info) + return 0; + } + si->flags += SWP_SCANNING; /* * Use percpu scan base for SSD to reduce lock contention on @@ -825,8 +873,11 @@ static int scan_swap_map_slots(struct swap_info_struct *si, /* SSD algorithm */ if (si->cluster_info) { - if (!scan_swap_map_try_ssd_cluster(si, &offset, &scan_base)) + if (!scan_swap_map_try_ssd_cluster(si, &offset, &scan_base, order)) { + if (order > 0) + goto no_page; goto scan; + } } else if (unlikely(!si->cluster_nr--)) { if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) { si->cluster_nr = SWAPFILE_CLUSTER - 1; @@ -868,13 +919,16 @@ static int scan_swap_map_slots(struct swap_info_struct *si, checks: if (si->cluster_info) { - while (scan_swap_map_ssd_cluster_conflict(si, offset)) { + while (scan_swap_map_ssd_cluster_conflict(si, offset, order)) { /* take a break if we already got some slots */ if (n_ret) goto done; if (!scan_swap_map_try_ssd_cluster(si, &offset, - &scan_base)) + &scan_base, order)) { + if (order > 0) + goto no_page; goto scan; + } } } if (!(si->flags & SWP_WRITEOK)) @@ -905,11 +959,11 @@ static int scan_swap_map_slots(struct swap_info_struct *si, else goto done; } - WRITE_ONCE(si->swap_map[offset], usage); - inc_cluster_info_page(si, si->cluster_info, offset); + memset(si->swap_map + offset, usage, nr_pages); + add_cluster_info_page(si, si->cluster_info, offset, nr_pages); unlock_cluster(ci); - swap_range_alloc(si, offset, 1); + swap_range_alloc(si, offset, nr_pages); slots[n_ret++] = swp_entry(si->type, offset); /* got enough slots or reach max slots? */ @@ -930,8 +984,10 @@ static int scan_swap_map_slots(struct swap_info_struct *si, /* try to get more slots in cluster */ if (si->cluster_info) { - if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base)) + if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base, order)) goto checks; + if (order > 0) + goto done; } else if (si->cluster_nr && !si->swap_map[++offset]) { /* non-ssd case, still more slots in cluster? */ --si->cluster_nr; @@ -958,11 +1014,13 @@ static int scan_swap_map_slots(struct swap_info_struct *si, } done: - set_cluster_next(si, offset + 1); + if (order == 0) + set_cluster_next(si, offset + 1); si->flags -= SWP_SCANNING; return n_ret; scan: + VM_WARN_ON(order > 0); spin_unlock(&si->lock); while (++offset <= READ_ONCE(si->highest_bit)) { if (unlikely(--latency_ration < 0)) { @@ -991,38 +1049,6 @@ static int scan_swap_map_slots(struct swap_info_struct *si, return n_ret; } -static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot) -{ - unsigned long idx; - struct swap_cluster_info *ci; - unsigned long offset; - - /* - * Should not even be attempting cluster allocations when huge - * page swap is disabled. Warn and fail the allocation. - */ - if (!IS_ENABLED(CONFIG_THP_SWAP)) { - VM_WARN_ON_ONCE(1); - return 0; - } - - if (cluster_list_empty(&si->free_clusters)) - return 0; - - idx = cluster_list_first(&si->free_clusters); - offset = idx * SWAPFILE_CLUSTER; - ci = lock_cluster(si, offset); - alloc_cluster(si, idx); - cluster_set_count(ci, SWAPFILE_CLUSTER); - - memset(si->swap_map + offset, SWAP_HAS_CACHE, SWAPFILE_CLUSTER); - unlock_cluster(ci); - swap_range_alloc(si, offset, SWAPFILE_CLUSTER); - *slot = swp_entry(si->type, offset); - - return 1; -} - static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx) { unsigned long offset = idx * SWAPFILE_CLUSTER; @@ -1045,9 +1071,6 @@ int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_order) int n_ret = 0; int node; - /* Only single cluster request supported */ - WARN_ON_ONCE(n_goal > 1 && size == SWAPFILE_CLUSTER); - spin_lock(&swap_avail_lock); avail_pgs = atomic_long_read(&nr_swap_pages) / size; @@ -1083,14 +1106,10 @@ int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_order) spin_unlock(&si->lock); goto nextsi; } - if (size == SWAPFILE_CLUSTER) { - if (si->flags & SWP_BLKDEV) - n_ret = swap_alloc_cluster(si, swp_entries); - } else - n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE, - n_goal, swp_entries); + n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE, + n_goal, swp_entries, order); spin_unlock(&si->lock); - if (n_ret || size == SWAPFILE_CLUSTER) + if (n_ret || size > 1) goto check_out; cond_resched(); @@ -1667,7 +1686,7 @@ swp_entry_t get_swap_page_of_type(int type) /* This is called for allocating swap entry, not cache */ spin_lock(&si->lock); - if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry)) + if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry, 0)) atomic_long_dec(&nr_swap_pages); spin_unlock(&si->lock); fail: @@ -3117,7 +3136,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) p->flags |= SWP_SYNCHRONOUS_IO; if (p->bdev && bdev_nonrot(p->bdev)) { - int cpu; + int cpu, i; unsigned long ci, nr_cluster; p->flags |= SWP_SOLIDSTATE; @@ -3155,7 +3174,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) struct percpu_cluster *cluster; cluster = per_cpu_ptr(p->percpu_cluster, cpu); - cluster->next = SWAP_NEXT_INVALID; + for (i = 0; i < SWAP_NR_ORDERS; i++) + cluster->next[i] = SWAP_NEXT_INVALID; } } else { atomic_inc(&nr_rotate_swap); -- Gitee From 46f47eb1175d1fdc72f60621d1cbf072bf3ac52e Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 8 Apr 2024 19:39:45 +0100 Subject: [PATCH 1299/2138] mm: vmscan: avoid split during shrink_folio_list() ANBZ: #9728 commit 5ed890ce5147855c5360affd5e5419ed68a54100 upstream. Now that swap supports storing all mTHP sizes, avoid splitting large folios before swap-out. This benefits performance of the swap-out path by eliding split_folio_to_list(), which is expensive, and also sets us up for swapping in large folios in a future series. If the folio is partially mapped, we continue to split it since we want to avoid the extra IO overhead and storage of writing out pages uneccessarily. THP_SWPOUT and THP_SWPOUT_FALLBACK counters should continue to count events only for PMD-mappable folios to avoid user confusion. THP_SWPOUT already has the appropriate guard. Add a guard for THP_SWPOUT_FALLBACK. It may be appropriate to add per-size counters in future. Link: https://lkml.kernel.org/r/20240408183946.2991168-7-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Reviewed-by: David Hildenbrand Reviewed-by: Barry Song Cc: Barry Song <21cnbao@gmail.com> Cc: Chris Li Cc: Gao Xiang Cc: "Huang, Ying" Cc: Kefeng Wang Cc: Lance Yang Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Yang Shi Cc: Yu Zhao Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3825 --- mm/vmscan.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index 49456b725755..74178d8d368d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1905,24 +1905,22 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, if (!can_split_folio(folio, NULL)) goto activate_locked; /* - * Split folios without a PMD map right - * away. Chances are some or all of the - * tail pages can be freed without IO. + * Split partially mapped folios right away. + * We can free the unmapped pages without IO. */ - if (!folio_entire_mapcount(folio) && - split_folio_to_list(folio, - folio_list)) + if (data_race(!list_empty(&folio->_deferred_list)) && + split_folio_to_list(folio, folio_list)) goto activate_locked; } if (!add_to_swap(folio)) { if (!folio_test_large(folio)) goto activate_locked_split; /* Fallback to swap normal pages */ - if (split_folio_to_list(folio, - folio_list)) + if (split_folio_to_list(folio, folio_list)) goto activate_locked; #ifdef CONFIG_TRANSPARENT_HUGEPAGE - count_vm_event(THP_SWPOUT_FALLBACK); + if (nr_pages >= HPAGE_PMD_NR) + count_vm_event(THP_SWPOUT_FALLBACK); #endif if (!add_to_swap(folio)) goto activate_locked_split; -- Gitee From f23b7b2bd4974fa9ef9fc45f811ff0a20852a3a1 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 8 Apr 2024 19:39:46 +0100 Subject: [PATCH 1300/2138] mm: madvise: avoid split during MADV_PAGEOUT and MADV_COLD ANBZ: #9728 commit 3931b871c4936c00c4e27c469056d8da47a3493f upstream. Rework madvise_cold_or_pageout_pte_range() to avoid splitting any large folio that is fully and contiguously mapped in the pageout/cold vm range. This change means that large folios will be maintained all the way to swap storage. This both improves performance during swap-out, by eliding the cost of splitting the folio, and sets us up nicely for maintaining the large folio when it is swapped back in (to be covered in a separate series). Folios that are not fully mapped in the target range are still split, but note that behavior is changed so that if the split fails for any reason (folio locked, shared, etc) we now leave it as is and move to the next pte in the range and continue work on the proceeding folios. Previously any failure of this sort would cause the entire operation to give up and no folios mapped at higher addresses were paged out or made cold. Given large folios are becoming more common, this old behavior would have likely lead to wasted opportunities. While we are at it, change the code that clears young from the ptes to use ptep_test_and_clear_young(), via the new mkold_ptes() batch helper function. This is more efficent than get_and_clear/modify/set, especially for contpte mappings on arm64, where the old approach would require unfolding/refolding and the new approach can be done in place. Link: https://lkml.kernel.org/r/20240408183946.2991168-8-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Reviewed-by: Barry Song Acked-by: David Hildenbrand Cc: Barry Song <21cnbao@gmail.com> Cc: Chris Li Cc: Gao Xiang Cc: "Huang, Ying" Cc: Kefeng Wang Cc: Lance Yang Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Yang Shi Cc: Yu Zhao Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3825 --- include/linux/pgtable.h | 30 ++++++++++++++ mm/internal.h | 12 +++++- mm/madvise.c | 87 +++++++++++++++++++++++------------------ mm/memory.c | 4 +- 4 files changed, 92 insertions(+), 41 deletions(-) diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 186c1bbd3a5f..8ac0192ea210 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -354,6 +354,36 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, } #endif +#ifndef mkold_ptes +/** + * mkold_ptes - Mark PTEs that map consecutive pages of the same folio as old. + * @vma: VMA the pages are mapped into. + * @addr: Address the first page is mapped at. + * @ptep: Page table pointer for the first entry. + * @nr: Number of entries to mark old. + * + * May be overridden by the architecture; otherwise, implemented as a simple + * loop over ptep_test_and_clear_young(). + * + * Note that PTE bits in the PTE range besides the PFN can differ. For example, + * some PTEs might be write-protected. + * + * Context: The caller holds the page table lock. The PTEs map consecutive + * pages that belong to the same folio. The PTEs are all in the same PMD. + */ +static inline void mkold_ptes(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep, unsigned int nr) +{ + for (;;) { + ptep_test_and_clear_young(vma, addr, ptep); + if (--nr == 0) + break; + ptep++; + addr += PAGE_SIZE; + } +} +#endif + #ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, diff --git a/mm/internal.h b/mm/internal.h index 8e5db8a11896..8dca6db71749 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -159,6 +159,8 @@ static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags) * @flags: Flags to modify the PTE batch semantics. * @any_writable: Optional pointer to indicate whether any entry except the * first one is writable. + * @any_young: Optional pointer to indicate whether any entry except the + * first one is young. * * Detect a PTE batch: consecutive (present) PTEs that map consecutive * pages of the same large folio. @@ -174,16 +176,18 @@ static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags) */ static inline int folio_pte_batch(struct folio *folio, unsigned long addr, pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags, - bool *any_writable) + bool *any_writable, bool *any_young) { unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio); const pte_t *end_ptep = start_ptep + max_nr; pte_t expected_pte, *ptep; - bool writable; + bool writable, young; int nr; if (any_writable) *any_writable = false; + if (any_young) + *any_young = false; VM_WARN_ON_FOLIO(!pte_present(pte), folio); VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio); @@ -197,6 +201,8 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr, pte = ptep_get(ptep); if (any_writable) writable = !!pte_write(pte); + if (any_young) + young = !!pte_young(pte); pte = __pte_batch_clear_ignored(pte, flags); if (!pte_same(pte, expected_pte)) @@ -212,6 +218,8 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr, if (any_writable) *any_writable |= writable; + if (any_young) + *any_young |= young; nr = pte_batch_hint(ptep, pte); expected_pte = pte_advance_pfn(expected_pte, nr); diff --git a/mm/madvise.c b/mm/madvise.c index bc7e6689db4c..9dce9cb03883 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -353,6 +353,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, struct folio *folio = NULL; LIST_HEAD(folio_list); bool pageout_anon_only_filter; + int nr; if (fatal_signal_pending(current)) return -EINTR; @@ -439,7 +440,8 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, return 0; flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); - for (; addr < end; pte++, addr += PAGE_SIZE) { + for (; addr < end; pte += nr, addr += nr * PAGE_SIZE) { + nr = 1; ptent = ptep_get(pte); if (pte_none(ptent)) @@ -453,55 +455,66 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, continue; /* - * Creating a THP page is expensive so split it only if we - * are sure it's worth. Split it if we are only owner. + * If we encounter a large folio, only split it if it is not + * fully mapped within the range we are operating on. Otherwise + * leave it as is so that it can be swapped out whole. If we + * fail to split a folio, leave it in place and advance to the + * next pte in the range. */ if (folio_test_large(folio)) { - int err; - - if (folio_likely_mapped_shared(folio)) - break; - if (pageout_anon_only_filter && !folio_test_anon(folio)) - break; - if (!folio_trylock(folio)) - break; - folio_get(folio); - arch_leave_lazy_mmu_mode(); - pte_unmap_unlock(start_pte, ptl); - start_pte = NULL; - err = split_folio(folio); - folio_unlock(folio); - folio_put(folio); - if (err) - break; - start_pte = pte = - pte_offset_map_lock(mm, pmd, addr, &ptl); - if (!start_pte) - break; - arch_enter_lazy_mmu_mode(); - pte--; - addr -= PAGE_SIZE; - continue; + const fpb_t fpb_flags = FPB_IGNORE_DIRTY | + FPB_IGNORE_SOFT_DIRTY; + int max_nr = (end - addr) / PAGE_SIZE; + bool any_young; + + nr = folio_pte_batch(folio, addr, pte, ptent, max_nr, + fpb_flags, NULL, &any_young); + if (any_young) + ptent = pte_mkyoung(ptent); + + if (nr < folio_nr_pages(folio)) { + int err; + + if (folio_likely_mapped_shared(folio)) + continue; + if (pageout_anon_only_filter && !folio_test_anon(folio)) + continue; + if (!folio_trylock(folio)) + continue; + folio_get(folio); + arch_leave_lazy_mmu_mode(); + pte_unmap_unlock(start_pte, ptl); + start_pte = NULL; + err = split_folio(folio); + folio_unlock(folio); + folio_put(folio); + start_pte = pte = + pte_offset_map_lock(mm, pmd, addr, &ptl); + if (!start_pte) + break; + arch_enter_lazy_mmu_mode(); + if (!err) + nr = 0; + continue; + } } /* * Do not interfere with other mappings of this folio and - * non-LRU folio. + * non-LRU folio. If we have a large folio at this point, we + * know it is fully mapped so if its mapcount is the same as its + * number of pages, it must be exclusive. */ - if (!folio_test_lru(folio) || folio_mapcount(folio) != 1) + if (!folio_test_lru(folio) || + folio_mapcount(folio) != folio_nr_pages(folio)) continue; if (pageout_anon_only_filter && !folio_test_anon(folio)) continue; - VM_BUG_ON_FOLIO(folio_test_large(folio), folio); - if (pte_young(ptent)) { - ptent = ptep_get_and_clear_full(mm, addr, pte, - tlb->fullmm); - ptent = pte_mkold(ptent); - set_pte_at(mm, addr, pte, ptent); - tlb_remove_tlb_entry(tlb, pte, addr); + mkold_ptes(vma, addr, pte, nr); + tlb_remove_tlb_entries(tlb, pte, nr, addr); } /* diff --git a/mm/memory.c b/mm/memory.c index 556ab033c0df..511249d7a7d6 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -988,7 +988,7 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma flags |= FPB_IGNORE_SOFT_DIRTY; nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr, flags, - &any_writable); + &any_writable, NULL); folio_ref_add(folio, nr); if (folio_test_anon(folio)) { if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page, @@ -1552,7 +1552,7 @@ static inline int zap_present_ptes(struct mmu_gather *tlb, */ if (unlikely(folio_test_large(folio) && max_nr != 1)) { nr = folio_pte_batch(folio, addr, pte, ptent, max_nr, fpb_flags, - NULL); + NULL, NULL); zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr, addr, details, rss, force_flush, -- Gitee From c2fc224985bdd92d7e980708052c683ae9b11623 Mon Sep 17 00:00:00 2001 From: Li RongQing Date: Mon, 15 Jan 2024 11:09:14 +0800 Subject: [PATCH 1301/2138] virtio_fs: remove duplicate check if queue is broken ANBZ: #10812 commit f9c29137392e77319f9974c2cdf27d087f05abee upstream. virtqueue_enable_cb() will call virtqueue_poll() which will check if queue is broken at beginning, so remove the virtqueue_is_broken() call Signed-off-by: Li RongQing Reviewed-by: Stefan Hajnoczi Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3810 --- fs/fuse/virtio_fs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index 624997d8fc50..97689e079d60 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -360,7 +360,7 @@ static void virtio_fs_hiprio_done_work(struct work_struct *work) kfree(req); dec_in_flight_req(fsvq); } - } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq))); + } while (!virtqueue_enable_cb(vq)); spin_unlock(&fsvq->lock); } @@ -642,7 +642,7 @@ static void virtio_fs_requests_done_work(struct work_struct *work) list_move_tail(&req->list, &reqs); spin_unlock(&fpq->lock); } - } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq))); + } while (!virtqueue_enable_cb(vq)); spin_unlock(&fsvq->lock); /* End requests */ -- Gitee From 59592dfe0eee66a15f04a12a42c8e6d52b2a5712 Mon Sep 17 00:00:00 2001 From: Peter-Jan Gootzen Date: Fri, 17 May 2024 21:04:34 +0200 Subject: [PATCH 1302/2138] virtio-fs: let -ENOMEM bubble up or burst gently ANBZ: #10812 commit 2106e1f444d9d79f5c3784e5847bfdb6cc3ca69f upstream. Currently, when the enqueueing of a request or forget operation fails with -ENOMEM, the enqueueing is retried after a timeout. This patch removes this behavior and treats -ENOMEM in these scenarios like any other error. By bubbling up the error to user space in the case of a request, and by dropping the operation in case of a forget. This behavior matches that of the FUSE layer above, and also simplifies the error handling. The latter will come in handy for upcoming patches that optimize the retrying of operations in case of -ENOSPC. Signed-off-by: Peter-Jan Gootzen Reviewed-by: Max Gurtovoy Reviewed-by: Yoray Zack Message-Id: <20240517190435.152096-2-pgootzen@nvidia.com> Signed-off-by: Michael S. Tsirkin Reviewed-by: Stefan Hajnoczi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3810 --- fs/fuse/virtio_fs.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index 97689e079d60..6b19d1a23594 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -400,7 +400,7 @@ static void virtio_fs_request_dispatch_work(struct work_struct *work) ret = virtio_fs_enqueue_req(fsvq, req, true); if (ret < 0) { - if (ret == -ENOMEM || ret == -ENOSPC) { + if (ret == -ENOSPC) { spin_lock(&fsvq->lock); list_add_tail(&req->list, &fsvq->queued_reqs); schedule_delayed_work(&fsvq->dispatch_work, @@ -447,7 +447,7 @@ static int send_forget_request(struct virtio_fs_vq *fsvq, ret = virtqueue_add_outbuf(vq, &sg, 1, forget, GFP_ATOMIC); if (ret < 0) { - if (ret == -ENOMEM || ret == -ENOSPC) { + if (ret == -ENOSPC) { pr_debug("virtio-fs: Could not queue FORGET: err=%d. Will try later\n", ret); list_add_tail(&forget->list, &fsvq->queued_reqs); @@ -1314,7 +1314,7 @@ __releases(fiq->lock) fsvq = &fs->vqs[queue_id]; ret = virtio_fs_enqueue_req(fsvq, req, false); if (ret < 0) { - if (ret == -ENOMEM || ret == -ENOSPC) { + if (ret == -ENOSPC) { /* * Virtqueue full. Retry submission from worker * context as we might be holding fc->bg_lock. -- Gitee From 40d836e3429361ef92e5e8bb5b3f6a41a1d4eaa8 Mon Sep 17 00:00:00 2001 From: Peter-Jan Gootzen Date: Fri, 17 May 2024 21:04:35 +0200 Subject: [PATCH 1303/2138] virtio-fs: improved request latencies when Virtio queue is full ANBZ: #10812 commit 106e4df1206b2c239ba13a9ec2fd1e9b754bd455 upstream. Currently, when the Virtio queue is full, a work item is scheduled to execute in 1ms that retries adding the request to the queue. This is a large amount of time on the scale on which a virtio-fs device can operate. When using a DPU this is around 30-40us baseline without going to a remote server (4k, QD=1). This patch changes the retrying behavior to immediately filling the Virtio queue up again when a completion has been received. This reduces the 99.9th percentile latencies in our tests by 60x and slightly increases the overall throughput, when using a workload IO depth 2x the size of the Virtio queue and a DPU-powered virtio-fs device (NVIDIA BlueField DPU). Signed-off-by: Peter-Jan Gootzen Reviewed-by: Max Gurtovoy Reviewed-by: Yoray Zack Message-Id: <20240517190435.152096-3-pgootzen@nvidia.com> Signed-off-by: Michael S. Tsirkin Reviewed-by: Stefan Hajnoczi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3810 --- fs/fuse/virtio_fs.c | 34 +++++++++++++++++++--------------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index 6b19d1a23594..b0a52bb015ae 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -47,7 +47,7 @@ struct virtio_fs_vq { struct work_struct done_work; struct list_head queued_reqs; struct list_head end_reqs; /* End these requests */ - struct delayed_work dispatch_work; + struct work_struct dispatch_work; struct fuse_dev *fud; bool connected; long in_flight; @@ -207,7 +207,7 @@ static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq) } flush_work(&fsvq->done_work); - flush_delayed_work(&fsvq->dispatch_work); + flush_work(&fsvq->dispatch_work); } static void virtio_fs_drain_all_queues_locked(struct virtio_fs *fs) @@ -361,6 +361,10 @@ static void virtio_fs_hiprio_done_work(struct work_struct *work) dec_in_flight_req(fsvq); } } while (!virtqueue_enable_cb(vq)); + + if (!list_empty(&fsvq->queued_reqs)) + schedule_work(&fsvq->dispatch_work); + spin_unlock(&fsvq->lock); } @@ -368,7 +372,7 @@ static void virtio_fs_request_dispatch_work(struct work_struct *work) { struct fuse_req *req; struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq, - dispatch_work.work); + dispatch_work); int ret; pr_debug("virtio-fs: worker %s called.\n", __func__); @@ -403,8 +407,6 @@ static void virtio_fs_request_dispatch_work(struct work_struct *work) if (ret == -ENOSPC) { spin_lock(&fsvq->lock); list_add_tail(&req->list, &fsvq->queued_reqs); - schedule_delayed_work(&fsvq->dispatch_work, - msecs_to_jiffies(1)); spin_unlock(&fsvq->lock); return; } @@ -451,8 +453,6 @@ static int send_forget_request(struct virtio_fs_vq *fsvq, pr_debug("virtio-fs: Could not queue FORGET: err=%d. Will try later\n", ret); list_add_tail(&forget->list, &fsvq->queued_reqs); - schedule_delayed_work(&fsvq->dispatch_work, - msecs_to_jiffies(1)); if (!in_flight) inc_in_flight_req(fsvq); /* Queue is full */ @@ -484,7 +484,7 @@ static void virtio_fs_hiprio_dispatch_work(struct work_struct *work) { struct virtio_fs_forget *forget; struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq, - dispatch_work.work); + dispatch_work); pr_debug("virtio-fs: worker %s called.\n", __func__); while (1) { spin_lock(&fsvq->lock); @@ -662,6 +662,12 @@ static void virtio_fs_requests_done_work(struct work_struct *work) virtio_fs_request_complete(req, fsvq); } } + + /* Try to push previously queued requests, as the queue might no longer be full */ + spin_lock(&fsvq->lock); + if (!list_empty(&fsvq->queued_reqs)) + schedule_work(&fsvq->dispatch_work); + spin_unlock(&fsvq->lock); } static void virtio_fs_map_queues(struct virtio_device *vdev, struct virtio_fs *fs) @@ -723,12 +729,12 @@ static void virtio_fs_init_vq(struct virtio_fs_vq *fsvq, char *name, if (vq_type == VQ_REQUEST) { INIT_WORK(&fsvq->done_work, virtio_fs_requests_done_work); - INIT_DELAYED_WORK(&fsvq->dispatch_work, - virtio_fs_request_dispatch_work); + INIT_WORK(&fsvq->dispatch_work, + virtio_fs_request_dispatch_work); } else { INIT_WORK(&fsvq->done_work, virtio_fs_hiprio_done_work); - INIT_DELAYED_WORK(&fsvq->dispatch_work, - virtio_fs_hiprio_dispatch_work); + INIT_WORK(&fsvq->dispatch_work, + virtio_fs_hiprio_dispatch_work); } } @@ -1322,8 +1328,6 @@ __releases(fiq->lock) spin_lock(&fsvq->lock); list_add_tail(&req->list, &fsvq->queued_reqs); inc_in_flight_req(fsvq); - schedule_delayed_work(&fsvq->dispatch_work, - msecs_to_jiffies(1)); spin_unlock(&fsvq->lock); return; } @@ -1333,7 +1337,7 @@ __releases(fiq->lock) /* Can't end request in submission context. Use a worker */ spin_lock(&fsvq->lock); list_add_tail(&req->list, &fsvq->end_reqs); - schedule_delayed_work(&fsvq->dispatch_work, 0); + schedule_work(&fsvq->dispatch_work); spin_unlock(&fsvq->lock); return; } -- Gitee From 2bdce01a693db36b30e22388856cabecadef73aa Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Thu, 25 Jul 2024 10:53:34 -0700 Subject: [PATCH 1304/2138] fuse: check aborted connection before adding requests to pending list for resending ANBZ: #10812 commit 97f30876c94382d1b01d45c2c76be8911b196527 upstream. There is a race condition where inflight requests will not be aborted if they are in the middle of being re-sent when the connection is aborted. If fuse_resend has already moved all the requests in the fpq->processing lists to its private queue ("to_queue") and then the connection starts and finishes aborting, these requests will be added to the pending queue and remain on it indefinitely. Fixes: 760eac73f9f6 ("fuse: Introduce a new notification type for resend pending requests") Signed-off-by: Joanne Koong Reviewed-by: Josef Bacik Reviewed-by: Jingbo Xu Cc: # v6.9 Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3810 --- fs/fuse/dev.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 60f0bd6b0099..bf979dba93cd 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -31,6 +31,8 @@ MODULE_ALIAS("devname:fuse"); static struct kmem_cache *fuse_req_cachep; +static void end_requests(struct list_head *head); + static struct fuse_dev *fuse_get_dev(struct file *file) { /* @@ -1822,6 +1824,13 @@ static void fuse_resend(struct fuse_conn *fc) } spin_lock(&fiq->lock); + if (!fiq->connected) { + spin_unlock(&fiq->lock); + list_for_each_entry(req, &to_queue, list) + clear_bit(FR_PENDING, &req->flags); + end_requests(&to_queue); + return; + } /* iq and pq requests are both oldest to newest */ list_splice(&to_queue, &fiq->pending); fiq->ops->wake_pending_and_unlock(fiq); -- Gitee From 7790e8cfcadb1822e2c5473284d294fee822a7f1 Mon Sep 17 00:00:00 2001 From: Aubrey Li Date: Thu, 19 Sep 2024 09:52:32 +0800 Subject: [PATCH 1305/2138] anolis: configs: enable CONFIG_FW_LOADER_COMPRESS by default ANBZ: #10846 This enables the following three configs for x86 to fix graphics driver firmware load failed issue. - CONFIG_FW_LOADER_COMPRESS - CONFIG_FW_LOADER_COMPRESS_XZ - CONFIG_FW_LOADER_COMPRESS_ZSTD Signed-off-by: Aubrey Li Reviewed-by: Xuchun Shang Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/3828 --- .../default => L1-RECOMMEND/arm64}/CONFIG_FW_LOADER_COMPRESS | 0 anolis/configs/L1-RECOMMEND/x86/CONFIG_FW_LOADER_COMPRESS | 1 + anolis/configs/L1-RECOMMEND/x86/CONFIG_FW_LOADER_COMPRESS_XZ | 1 + anolis/configs/L1-RECOMMEND/x86/CONFIG_FW_LOADER_COMPRESS_ZSTD | 1 + 4 files changed, 3 insertions(+) rename anolis/configs/{L2-OPTIONAL/default => L1-RECOMMEND/arm64}/CONFIG_FW_LOADER_COMPRESS (100%) create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_FW_LOADER_COMPRESS create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_FW_LOADER_COMPRESS_XZ create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_FW_LOADER_COMPRESS_ZSTD diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FW_LOADER_COMPRESS b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FW_LOADER_COMPRESS similarity index 100% rename from anolis/configs/L2-OPTIONAL/default/CONFIG_FW_LOADER_COMPRESS rename to anolis/configs/L1-RECOMMEND/arm64/CONFIG_FW_LOADER_COMPRESS diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_FW_LOADER_COMPRESS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FW_LOADER_COMPRESS new file mode 100644 index 000000000000..da865e89877f --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FW_LOADER_COMPRESS @@ -0,0 +1 @@ +CONFIG_FW_LOADER_COMPRESS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_FW_LOADER_COMPRESS_XZ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FW_LOADER_COMPRESS_XZ new file mode 100644 index 000000000000..fc7cc884ba61 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FW_LOADER_COMPRESS_XZ @@ -0,0 +1 @@ +CONFIG_FW_LOADER_COMPRESS_XZ=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_FW_LOADER_COMPRESS_ZSTD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FW_LOADER_COMPRESS_ZSTD new file mode 100644 index 000000000000..71aa01bf1211 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FW_LOADER_COMPRESS_ZSTD @@ -0,0 +1 @@ +CONFIG_FW_LOADER_COMPRESS_ZSTD=y -- Gitee From 273ce02d15ebf57bac6ece06f0c32da141da7daa Mon Sep 17 00:00:00 2001 From: Xin Hao Date: Thu, 14 Sep 2023 00:49:37 +0800 Subject: [PATCH 1306/2138] mm: memcg: add THP swap out info for anonymous reclaim ANBZ: #9728 commit 811244a501b967b00fecb1ae906d5dc6329c91e0 upstream. At present, we support per-memcg reclaim strategy, however we do not know the number of transparent huge pages being reclaimed, as we know the transparent huge pages need to be splited before reclaim them, and they will bring some performance bottleneck effect. for example, when two memcg (A & B) are doing reclaim for anonymous pages at same time, and 'A' memcg is reclaiming a large number of transparent huge pages, we can better analyze that the performance bottleneck will be caused by 'A' memcg. therefore, in order to better analyze such problems, there add THP swap out info for per-memcg. [akpm@linux-foundation.orgL fix swap_writepage_fs(), per Johannes] Link: https://lkml.kernel.org/r/20230913213343.GB48476@cmpxchg.org Link: https://lkml.kernel.org/r/20230913164938.16918-1-vernhao@tencent.com Signed-off-by: Xin Hao Suggested-by: Johannes Weiner Acked-by: Johannes Weiner Cc: Michal Hocko Cc: Roman Gushchin Cc: Shakeel Butt Cc: Muchun Song Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3837 --- Documentation/admin-guide/cgroup-v2.rst | 9 +++++++++ mm/memcontrol.c | 2 ++ mm/page_io.c | 8 ++++---- mm/vmscan.c | 4 +++- 4 files changed, 18 insertions(+), 5 deletions(-) diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index aa0edf0d07f0..6f53cb81067a 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1533,6 +1533,15 @@ PAGE_SIZE multiple when read back. collapsing an existing range of pages. This counter is not present when CONFIG_TRANSPARENT_HUGEPAGE is not set. + thp_swpout (npn) + Number of transparent hugepages which are swapout in one piece + without splitting. + + thp_swpout_fallback (npn) + Number of transparent hugepages which were split before swapout. + Usually because failed to allocate some continuous swap space + for the huge page. + memory.numa_stat A read-only nested-keyed file which exists on non-root cgroups. diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3d41a19fb3b0..01733b9fb62d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -710,6 +710,8 @@ static const unsigned int memcg_vm_event_stat[] = { #ifdef CONFIG_TRANSPARENT_HUGEPAGE THP_FAULT_ALLOC, THP_COLLAPSE_ALLOC, + THP_SWPOUT, + THP_SWPOUT_FALLBACK, #endif }; diff --git a/mm/page_io.c b/mm/page_io.c index fe4c21af23f2..cb559ae324c6 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -208,8 +208,10 @@ int swap_writepage(struct page *page, struct writeback_control *wbc) static inline void count_swpout_vm_event(struct folio *folio) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (unlikely(folio_test_pmd_mappable(folio))) + if (unlikely(folio_test_pmd_mappable(folio))) { + count_memcg_folio_events(folio, THP_SWPOUT, 1); count_vm_event(THP_SWPOUT); + } #endif count_vm_events(PSWPOUT, folio_nr_pages(folio)); } @@ -278,9 +280,6 @@ static void sio_write_complete(struct kiocb *iocb, long ret) set_page_dirty(page); ClearPageReclaim(page); } - } else { - for (p = 0; p < sio->pages; p++) - count_swpout_vm_event(page_folio(sio->bvec[p].bv_page)); } for (p = 0; p < sio->pages; p++) @@ -296,6 +295,7 @@ static void swap_writepage_fs(struct page *page, struct writeback_control *wbc) struct file *swap_file = sis->swap_file; loff_t pos = page_file_offset(page); + count_swpout_vm_event(page_folio(page)); set_page_writeback(page); unlock_page(page); if (wbc->swap_plug) diff --git a/mm/vmscan.c b/mm/vmscan.c index 74178d8d368d..617a57aedc71 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1919,8 +1919,10 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, if (split_folio_to_list(folio, folio_list)) goto activate_locked; #ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (nr_pages >= HPAGE_PMD_NR) + if (nr_pages >= HPAGE_PMD_NR) { + count_memcg_folio_events(folio, THP_SWPOUT_FALLBACK, 1); count_vm_event(THP_SWPOUT_FALLBACK); + } #endif if (!add_to_swap(folio)) goto activate_locked_split; -- Gitee From 464416c9e5a29ea6360ec8e570de2cb0c20262d3 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Tue, 16 Jan 2024 14:12:35 +0000 Subject: [PATCH 1307/2138] tools/mm: add thpmaps script to dump THP usage info ANBZ: #9728 commit 2444172cfde45a3d6e655f50c620727c76bab4a2 upstream. With the proliferation of large folios for file-backed memory, and more recently the introduction of multi-size THP for anonymous memory, it is becoming useful to be able to see exactly how large folios are mapped into processes. For some architectures (e.g. arm64), if most memory is mapped using contpte-sized and -aligned blocks, TLB usage can be optimized so it's useful to see where these requirements are and are not being met. thpmaps is a Python utility that reads /proc//smaps, /proc//pagemap and /proc/kpageflags to print information about how transparent huge pages (both file and anon) are mapped to a specified process or cgroup. It aims to help users debug and optimize their workloads. In future we may wish to introduce stats directly into the kernel (e.g. smaps or similar), but for now this provides a short term solution without the need to introduce any new ABI. Run with help option for a full listing of the arguments: # ./thpmaps --help --8<-- usage: thpmaps [-h] [--pid pid | --cgroup path] [--rollup] [--cont size[KMG]] [--inc-smaps] [--inc-empty] [--periodic sleep_ms] Prints information about how transparent huge pages are mapped, either system-wide, or for a specified process or cgroup. When run with --pid, the user explicitly specifies the set of pids to scan. e.g. "--pid 10 [--pid 134 ...]". When run with --cgroup, the user passes either a v1 or v2 cgroup and all pids that belong to the cgroup subtree are scanned. When run with neither --pid nor --cgroup, the full set of pids on the system is gathered from /proc and scanned as if the user had provided "--pid 1 --pid 2 ...". A default set of statistics is always generated for THP mappings. However, it is also possible to generate additional statistics for "contiguous block mappings" where the block size is user-defined. Statistics are maintained independently for anonymous and file-backed (pagecache) memory and are shown both in kB and as a percentage of either total anonymous or total file-backed memory as appropriate. THP Statistics -------------- Statistics are always generated for fully- and contiguously-mapped THPs whose mapping address is aligned to their size, for each supported by the system. Separate counters describe THPs mapped by PTE vs those mapped by PMD. (Although note a THP can only be mapped by PMD if it is PMD-sized): - anon-thp-pte-aligned-kB - file-thp-pte-aligned-kB - anon-thp-pmd-aligned-kB - file-thp-pmd-aligned-kB Similarly, statistics are always generated for fully- and contiguously- mapped THPs whose mapping address is *not* aligned to their size, for each supported by the system. Due to the unaligned mapping, it is impossible to map by PMD, so there are only PTE counters for this case: - anon-thp-pte-unaligned-kB - file-thp-pte-unaligned-kB Statistics are also always generated for mapped pages that belong to a THP but where the is THP is *not* fully- and contiguously- mapped. These "partial" mappings are all counted in the same counter regardless of the size of the THP that is partially mapped: - anon-thp-pte-partial - file-thp-pte-partial Contiguous Block Statistics --------------------------- An optional, additional set of statistics is generated for every contiguous block size specified with `--cont `. These statistics show how much memory is mapped in contiguous blocks of and also aligned to . A given contiguous block must all belong to the same THP, but there is no requirement for it to be the *whole* THP. Separate counters describe contiguous blocks mapped by PTE vs those mapped by PMD: - anon-cont-pte-aligned-kB - file-cont-pte-aligned-kB - anon-cont-pmd-aligned-kB - file-cont-pmd-aligned-kB As an example, if monitoring 64K contiguous blocks (--cont 64K), there are a number of sources that could provide such blocks: a fully- and contiguously-mapped 64K THP that is aligned to a 64K boundary would provide 1 block. A fully- and contiguously-mapped 128K THP that is aligned to at least a 64K boundary would provide 2 blocks. Or a 128K THP that maps its first 100K, but contiguously and starting at a 64K boundary would provide 1 block. A fully- and contiguously-mapped 2M THP would provide 32 blocks. There are many other possible permutations. options: -h, --help show this help message and exit --pid pid Process id of the target process. Maybe issued multiple times to scan multiple processes. --pid and --cgroup are mutually exclusive. If neither are provided, all processes are scanned to provide system-wide information. --cgroup path Path to the target cgroup in sysfs. Iterates over every pid in the cgroup and its children. --pid and --cgroup are mutually exclusive. If neither are provided, all processes are scanned to provide system-wide information. --rollup Sum the per-vma statistics to provide a summary over the whole system, process or cgroup. --cont size[KMG] Adds stats for memory that is mapped in contiguous blocks of and also aligned to . May be issued multiple times to track multiple sized blocks. Useful to infer e.g. arm64 contpte and hpa mappings. Size must be a power-of-2 number of pages. --inc-smaps Include all numerical, additive /proc//smaps stats in the output. --inc-empty Show all statistics including those whose value is 0. --periodic sleep_ms Run in a loop, polling every sleep_ms milliseconds. Requires root privilege to access pagemap and kpageflags. --8<-- Example command to summarise fully and partially mapped THPs and 64K contiguous blocks over all VMAs in all processes in the system (--inc-empty forces printing stats that are 0): # ./thpmaps --cont 64K --rollup --inc-empty --8<-- anon-thp-pmd-aligned-2048kB: 139264 kB ( 6%) file-thp-pmd-aligned-2048kB: 0 kB ( 0%) anon-thp-pte-aligned-16kB: 0 kB ( 0%) anon-thp-pte-aligned-32kB: 0 kB ( 0%) anon-thp-pte-aligned-64kB: 72256 kB ( 3%) anon-thp-pte-aligned-128kB: 0 kB ( 0%) anon-thp-pte-aligned-256kB: 0 kB ( 0%) anon-thp-pte-aligned-512kB: 0 kB ( 0%) anon-thp-pte-aligned-1024kB: 0 kB ( 0%) anon-thp-pte-aligned-2048kB: 0 kB ( 0%) anon-thp-pte-unaligned-16kB: 0 kB ( 0%) anon-thp-pte-unaligned-32kB: 0 kB ( 0%) anon-thp-pte-unaligned-64kB: 0 kB ( 0%) anon-thp-pte-unaligned-128kB: 0 kB ( 0%) anon-thp-pte-unaligned-256kB: 0 kB ( 0%) anon-thp-pte-unaligned-512kB: 0 kB ( 0%) anon-thp-pte-unaligned-1024kB: 0 kB ( 0%) anon-thp-pte-unaligned-2048kB: 0 kB ( 0%) anon-thp-pte-partial: 63232 kB ( 3%) file-thp-pte-aligned-16kB: 809024 kB (47%) file-thp-pte-aligned-32kB: 43168 kB ( 3%) file-thp-pte-aligned-64kB: 98496 kB ( 6%) file-thp-pte-aligned-128kB: 17536 kB ( 1%) file-thp-pte-aligned-256kB: 0 kB ( 0%) file-thp-pte-aligned-512kB: 0 kB ( 0%) file-thp-pte-aligned-1024kB: 0 kB ( 0%) file-thp-pte-aligned-2048kB: 0 kB ( 0%) file-thp-pte-unaligned-16kB: 21712 kB ( 1%) file-thp-pte-unaligned-32kB: 704 kB ( 0%) file-thp-pte-unaligned-64kB: 896 kB ( 0%) file-thp-pte-unaligned-128kB: 44928 kB ( 3%) file-thp-pte-unaligned-256kB: 0 kB ( 0%) file-thp-pte-unaligned-512kB: 0 kB ( 0%) file-thp-pte-unaligned-1024kB: 0 kB ( 0%) file-thp-pte-unaligned-2048kB: 0 kB ( 0%) file-thp-pte-partial: 9252 kB ( 1%) anon-cont-pmd-aligned-64kB: 139264 kB ( 6%) file-cont-pmd-aligned-64kB: 0 kB ( 0%) anon-cont-pte-aligned-64kB: 100672 kB ( 4%) file-cont-pte-aligned-64kB: 161856 kB ( 9%) --8<-- Link: https://lkml.kernel.org/r/20240116141235.960842-1-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Tested-by: Barry Song Cc: Alistair Popple Cc: David Hildenbrand Cc: John Hubbard Cc: Kefeng Wang Cc: Matthew Wilcox (Oracle) Cc: William Kucharski Cc: Zenghui Yu Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3837 --- tools/mm/Makefile | 9 +- tools/mm/thpmaps | 675 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 680 insertions(+), 4 deletions(-) create mode 100644 tools/mm/thpmaps diff --git a/tools/mm/Makefile b/tools/mm/Makefile index 1c5606cc3334..7bb03606b9ea 100644 --- a/tools/mm/Makefile +++ b/tools/mm/Makefile @@ -3,7 +3,8 @@ # include ../scripts/Makefile.include -TARGETS=page-types slabinfo page_owner_sort +BUILD_TARGETS=page-types slabinfo page_owner_sort +INSTALL_TARGETS = $(BUILD_TARGETS) thpmaps LIB_DIR = ../lib/api LIBS = $(LIB_DIR)/libapi.a @@ -11,9 +12,9 @@ LIBS = $(LIB_DIR)/libapi.a CFLAGS += -Wall -Wextra -I../lib/ -pthread LDFLAGS += $(LIBS) -pthread -all: $(TARGETS) +all: $(BUILD_TARGETS) -$(TARGETS): $(LIBS) +$(BUILD_TARGETS): $(LIBS) $(LIBS): make -C $(LIB_DIR) @@ -29,4 +30,4 @@ sbindir ?= /usr/sbin install: all install -d $(DESTDIR)$(sbindir) - install -m 755 -p $(TARGETS) $(DESTDIR)$(sbindir) + install -m 755 -p $(INSTALL_TARGETS) $(DESTDIR)$(sbindir) diff --git a/tools/mm/thpmaps b/tools/mm/thpmaps new file mode 100644 index 000000000000..803e0318f2fe --- /dev/null +++ b/tools/mm/thpmaps @@ -0,0 +1,675 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (C) 2024 ARM Ltd. +# +# Utility providing smaps-like output detailing transparent hugepage usage. +# For more info, run: +# ./thpmaps --help +# +# Requires numpy: +# pip3 install numpy + + +import argparse +import collections +import math +import os +import re +import resource +import shutil +import sys +import textwrap +import time +import numpy as np + + +with open('/sys/kernel/mm/transparent_hugepage/hpage_pmd_size') as f: + PAGE_SIZE = resource.getpagesize() + PAGE_SHIFT = int(math.log2(PAGE_SIZE)) + PMD_SIZE = int(f.read()) + PMD_ORDER = int(math.log2(PMD_SIZE / PAGE_SIZE)) + + +def align_forward(v, a): + return (v + (a - 1)) & ~(a - 1) + + +def align_offset(v, a): + return v & (a - 1) + + +def kbnr(kb): + # Convert KB to number of pages. + return (kb << 10) >> PAGE_SHIFT + + +def nrkb(nr): + # Convert number of pages to KB. + return (nr << PAGE_SHIFT) >> 10 + + +def odkb(order): + # Convert page order to KB. + return (PAGE_SIZE << order) >> 10 + + +def cont_ranges_all(search, index): + # Given a list of arrays, find the ranges for which values are monotonically + # incrementing in all arrays. all arrays in search and index must be the + # same size. + sz = len(search[0]) + r = np.full(sz, 2) + d = np.diff(search[0]) == 1 + for dd in [np.diff(arr) == 1 for arr in search[1:]]: + d &= dd + r[1:] -= d + r[:-1] -= d + return [np.repeat(arr, r).reshape(-1, 2) for arr in index] + + +class ArgException(Exception): + pass + + +class FileIOException(Exception): + pass + + +class BinArrayFile: + # Base class used to read /proc//pagemap and /proc/kpageflags into a + # numpy array. Use inherrited class in a with clause to ensure file is + # closed when it goes out of scope. + def __init__(self, filename, element_size): + self.element_size = element_size + self.filename = filename + self.fd = os.open(self.filename, os.O_RDONLY) + + def cleanup(self): + os.close(self.fd) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.cleanup() + + def _readin(self, offset, buffer): + length = os.preadv(self.fd, (buffer,), offset) + if len(buffer) != length: + raise FileIOException('error: {} failed to read {} bytes at {:x}' + .format(self.filename, len(buffer), offset)) + + def _toarray(self, buf): + assert(self.element_size == 8) + return np.frombuffer(buf, dtype=np.uint64) + + def getv(self, vec): + vec *= self.element_size + offsets = vec[:, 0] + lengths = (np.diff(vec) + self.element_size).reshape(len(vec)) + buf = bytearray(int(np.sum(lengths))) + view = memoryview(buf) + pos = 0 + for offset, length in zip(offsets, lengths): + offset = int(offset) + length = int(length) + self._readin(offset, view[pos:pos+length]) + pos += length + return self._toarray(buf) + + def get(self, index, nr=1): + offset = index * self.element_size + length = nr * self.element_size + buf = bytearray(length) + self._readin(offset, buf) + return self._toarray(buf) + + +PM_PAGE_PRESENT = 1 << 63 +PM_PFN_MASK = (1 << 55) - 1 + +class PageMap(BinArrayFile): + # Read ranges of a given pid's pagemap into a numpy array. + def __init__(self, pid='self'): + super().__init__(f'/proc/{pid}/pagemap', 8) + + +KPF_ANON = 1 << 12 +KPF_COMPOUND_HEAD = 1 << 15 +KPF_COMPOUND_TAIL = 1 << 16 +KPF_THP = 1 << 22 + +class KPageFlags(BinArrayFile): + # Read ranges of /proc/kpageflags into a numpy array. + def __init__(self): + super().__init__(f'/proc/kpageflags', 8) + + +vma_all_stats = set([ + "Size", + "Rss", + "Pss", + "Pss_Dirty", + "Shared_Clean", + "Shared_Dirty", + "Private_Clean", + "Private_Dirty", + "Referenced", + "Anonymous", + "KSM", + "LazyFree", + "AnonHugePages", + "ShmemPmdMapped", + "FilePmdMapped", + "Shared_Hugetlb", + "Private_Hugetlb", + "Swap", + "SwapPss", + "Locked", +]) + +vma_min_stats = set([ + "Rss", + "Anonymous", + "AnonHugePages", + "ShmemPmdMapped", + "FilePmdMapped", +]) + +VMA = collections.namedtuple('VMA', [ + 'name', + 'start', + 'end', + 'read', + 'write', + 'execute', + 'private', + 'pgoff', + 'major', + 'minor', + 'inode', + 'stats', +]) + +class VMAList: + # A container for VMAs, parsed from /proc//smaps. Iterate over the + # instance to receive VMAs. + def __init__(self, pid='self', stats=[]): + self.vmas = [] + with open(f'/proc/{pid}/smaps', 'r') as file: + for line in file: + elements = line.split() + if '-' in elements[0]: + start, end = map(lambda x: int(x, 16), elements[0].split('-')) + major, minor = map(lambda x: int(x, 16), elements[3].split(':')) + self.vmas.append(VMA( + name=elements[5] if len(elements) == 6 else '', + start=start, + end=end, + read=elements[1][0] == 'r', + write=elements[1][1] == 'w', + execute=elements[1][2] == 'x', + private=elements[1][3] == 'p', + pgoff=int(elements[2], 16), + major=major, + minor=minor, + inode=int(elements[4], 16), + stats={}, + )) + else: + param = elements[0][:-1] + if param in stats: + value = int(elements[1]) + self.vmas[-1].stats[param] = {'type': None, 'value': value} + + def __iter__(self): + yield from self.vmas + + +def thp_parse(vma, kpageflags, ranges, indexes, vfns, pfns, anons, heads): + # Given 4 same-sized arrays representing a range within a page table backed + # by THPs (vfns: virtual frame numbers, pfns: physical frame numbers, anons: + # True if page is anonymous, heads: True if page is head of a THP), return a + # dictionary of statistics describing the mapped THPs. + stats = { + 'file': { + 'partial': 0, + 'aligned': [0] * (PMD_ORDER + 1), + 'unaligned': [0] * (PMD_ORDER + 1), + }, + 'anon': { + 'partial': 0, + 'aligned': [0] * (PMD_ORDER + 1), + 'unaligned': [0] * (PMD_ORDER + 1), + }, + } + + for rindex, rpfn in zip(ranges[0], ranges[2]): + index_next = int(rindex[0]) + index_end = int(rindex[1]) + 1 + pfn_end = int(rpfn[1]) + 1 + + folios = indexes[index_next:index_end][heads[index_next:index_end]] + + # Account pages for any partially mapped THP at the front. In that case, + # the first page of the range is a tail. + nr = (int(folios[0]) if len(folios) else index_end) - index_next + stats['anon' if anons[index_next] else 'file']['partial'] += nr + + # Account pages for any partially mapped THP at the back. In that case, + # the next page after the range is a tail. + if len(folios): + flags = int(kpageflags.get(pfn_end)[0]) + if flags & KPF_COMPOUND_TAIL: + nr = index_end - int(folios[-1]) + folios = folios[:-1] + index_end -= nr + stats['anon' if anons[index_end - 1] else 'file']['partial'] += nr + + # Account fully mapped THPs in the middle of the range. + if len(folios): + folio_nrs = np.append(np.diff(folios), np.uint64(index_end - folios[-1])) + folio_orders = np.log2(folio_nrs).astype(np.uint64) + for index, order in zip(folios, folio_orders): + index = int(index) + order = int(order) + nr = 1 << order + vfn = int(vfns[index]) + align = 'aligned' if align_forward(vfn, nr) == vfn else 'unaligned' + anon = 'anon' if anons[index] else 'file' + stats[anon][align][order] += nr + + # Account PMD-mapped THPs spearately, so filter out of the stats. There is a + # race between acquiring the smaps stats and reading pagemap, where memory + # could be deallocated. So clamp to zero incase it would have gone negative. + anon_pmd_mapped = vma.stats['AnonHugePages']['value'] + file_pmd_mapped = vma.stats['ShmemPmdMapped']['value'] + \ + vma.stats['FilePmdMapped']['value'] + stats['anon']['aligned'][PMD_ORDER] = max(0, stats['anon']['aligned'][PMD_ORDER] - kbnr(anon_pmd_mapped)) + stats['file']['aligned'][PMD_ORDER] = max(0, stats['file']['aligned'][PMD_ORDER] - kbnr(file_pmd_mapped)) + + rstats = { + f"anon-thp-pmd-aligned-{odkb(PMD_ORDER)}kB": {'type': 'anon', 'value': anon_pmd_mapped}, + f"file-thp-pmd-aligned-{odkb(PMD_ORDER)}kB": {'type': 'file', 'value': file_pmd_mapped}, + } + + def flatten_sub(type, subtype, stats): + param = f"{type}-thp-pte-{subtype}-{{}}kB" + for od, nr in enumerate(stats[2:], 2): + rstats[param.format(odkb(od))] = {'type': type, 'value': nrkb(nr)} + + def flatten_type(type, stats): + flatten_sub(type, 'aligned', stats['aligned']) + flatten_sub(type, 'unaligned', stats['unaligned']) + rstats[f"{type}-thp-pte-partial"] = {'type': type, 'value': nrkb(stats['partial'])} + + flatten_type('anon', stats['anon']) + flatten_type('file', stats['file']) + + return rstats + + +def cont_parse(vma, order, ranges, anons, heads): + # Given 4 same-sized arrays representing a range within a page table backed + # by THPs (vfns: virtual frame numbers, pfns: physical frame numbers, anons: + # True if page is anonymous, heads: True if page is head of a THP), return a + # dictionary of statistics describing the contiguous blocks. + nr_cont = 1 << order + nr_anon = 0 + nr_file = 0 + + for rindex, rvfn, rpfn in zip(*ranges): + index_next = int(rindex[0]) + index_end = int(rindex[1]) + 1 + vfn_start = int(rvfn[0]) + pfn_start = int(rpfn[0]) + + if align_offset(pfn_start, nr_cont) != align_offset(vfn_start, nr_cont): + continue + + off = align_forward(vfn_start, nr_cont) - vfn_start + index_next += off + + while index_next + nr_cont <= index_end: + folio_boundary = heads[index_next+1:index_next+nr_cont].any() + if not folio_boundary: + if anons[index_next]: + nr_anon += nr_cont + else: + nr_file += nr_cont + index_next += nr_cont + + # Account blocks that are PMD-mapped spearately, so filter out of the stats. + # There is a race between acquiring the smaps stats and reading pagemap, + # where memory could be deallocated. So clamp to zero incase it would have + # gone negative. + anon_pmd_mapped = vma.stats['AnonHugePages']['value'] + file_pmd_mapped = vma.stats['ShmemPmdMapped']['value'] + \ + vma.stats['FilePmdMapped']['value'] + nr_anon = max(0, nr_anon - kbnr(anon_pmd_mapped)) + nr_file = max(0, nr_file - kbnr(file_pmd_mapped)) + + rstats = { + f"anon-cont-pmd-aligned-{nrkb(nr_cont)}kB": {'type': 'anon', 'value': anon_pmd_mapped}, + f"file-cont-pmd-aligned-{nrkb(nr_cont)}kB": {'type': 'file', 'value': file_pmd_mapped}, + } + + rstats[f"anon-cont-pte-aligned-{nrkb(nr_cont)}kB"] = {'type': 'anon', 'value': nrkb(nr_anon)} + rstats[f"file-cont-pte-aligned-{nrkb(nr_cont)}kB"] = {'type': 'file', 'value': nrkb(nr_file)} + + return rstats + + +def vma_print(vma, pid): + # Prints a VMA instance in a format similar to smaps. The main difference is + # that the pid is included as the first value. + print("{:010d}: {:016x}-{:016x} {}{}{}{} {:08x} {:02x}:{:02x} {:08x} {}" + .format( + pid, vma.start, vma.end, + 'r' if vma.read else '-', 'w' if vma.write else '-', + 'x' if vma.execute else '-', 'p' if vma.private else 's', + vma.pgoff, vma.major, vma.minor, vma.inode, vma.name + )) + + +def stats_print(stats, tot_anon, tot_file, inc_empty): + # Print a statistics dictionary. + label_field = 32 + for label, stat in stats.items(): + type = stat['type'] + value = stat['value'] + if value or inc_empty: + pad = max(0, label_field - len(label) - 1) + if type == 'anon' and tot_anon > 0: + percent = f' ({value / tot_anon:3.0%})' + elif type == 'file' and tot_file > 0: + percent = f' ({value / tot_file:3.0%})' + else: + percent = '' + print(f"{label}:{' ' * pad}{value:8} kB{percent}") + + +def vma_parse(vma, pagemap, kpageflags, contorders): + # Generate thp and cont statistics for a single VMA. + start = vma.start >> PAGE_SHIFT + end = vma.end >> PAGE_SHIFT + + pmes = pagemap.get(start, end - start) + present = pmes & PM_PAGE_PRESENT != 0 + pfns = pmes & PM_PFN_MASK + pfns = pfns[present] + vfns = np.arange(start, end, dtype=np.uint64) + vfns = vfns[present] + + pfn_vec = cont_ranges_all([pfns], [pfns])[0] + flags = kpageflags.getv(pfn_vec) + anons = flags & KPF_ANON != 0 + heads = flags & KPF_COMPOUND_HEAD != 0 + thps = flags & KPF_THP != 0 + + vfns = vfns[thps] + pfns = pfns[thps] + anons = anons[thps] + heads = heads[thps] + + indexes = np.arange(len(vfns), dtype=np.uint64) + ranges = cont_ranges_all([vfns, pfns], [indexes, vfns, pfns]) + + thpstats = thp_parse(vma, kpageflags, ranges, indexes, vfns, pfns, anons, heads) + contstats = [cont_parse(vma, order, ranges, anons, heads) for order in contorders] + + tot_anon = vma.stats['Anonymous']['value'] + tot_file = vma.stats['Rss']['value'] - tot_anon + + return { + **thpstats, + **{k: v for s in contstats for k, v in s.items()} + }, tot_anon, tot_file + + +def do_main(args): + pids = set() + rollup = {} + rollup_anon = 0 + rollup_file = 0 + + if args.cgroup: + strict = False + for walk_info in os.walk(args.cgroup): + cgroup = walk_info[0] + with open(f'{cgroup}/cgroup.procs') as pidfile: + for line in pidfile.readlines(): + pids.add(int(line.strip())) + elif args.pid: + strict = True + pids = pids.union(args.pid) + else: + strict = False + for pid in os.listdir('/proc'): + if pid.isdigit(): + pids.add(int(pid)) + + if not args.rollup: + print(" PID START END PROT OFFSET DEV INODE OBJECT") + + for pid in pids: + try: + with PageMap(pid) as pagemap: + with KPageFlags() as kpageflags: + for vma in VMAList(pid, vma_all_stats if args.inc_smaps else vma_min_stats): + if (vma.read or vma.write or vma.execute) and vma.stats['Rss']['value'] > 0: + stats, vma_anon, vma_file = vma_parse(vma, pagemap, kpageflags, args.cont) + else: + stats = {} + vma_anon = 0 + vma_file = 0 + if args.inc_smaps: + stats = {**vma.stats, **stats} + if args.rollup: + for k, v in stats.items(): + if k in rollup: + assert(rollup[k]['type'] == v['type']) + rollup[k]['value'] += v['value'] + else: + rollup[k] = v + rollup_anon += vma_anon + rollup_file += vma_file + else: + vma_print(vma, pid) + stats_print(stats, vma_anon, vma_file, args.inc_empty) + except (FileNotFoundError, ProcessLookupError, FileIOException): + if strict: + raise + + if args.rollup: + stats_print(rollup, rollup_anon, rollup_file, args.inc_empty) + + +def main(): + docs_width = shutil.get_terminal_size().columns + docs_width -= 2 + docs_width = min(80, docs_width) + + def format(string): + text = re.sub(r'\s+', ' ', string) + text = re.sub(r'\s*\\n\s*', '\n', text) + paras = text.split('\n') + paras = [textwrap.fill(p, width=docs_width) for p in paras] + return '\n'.join(paras) + + def formatter(prog): + return argparse.RawDescriptionHelpFormatter(prog, width=docs_width) + + def size2order(human): + units = { + "K": 2**10, "M": 2**20, "G": 2**30, + "k": 2**10, "m": 2**20, "g": 2**30, + } + unit = 1 + if human[-1] in units: + unit = units[human[-1]] + human = human[:-1] + try: + size = int(human) + except ValueError: + raise ArgException('error: --cont value must be integer size with optional KMG unit') + size *= unit + order = int(math.log2(size / PAGE_SIZE)) + if order < 1: + raise ArgException('error: --cont value must be size of at least 2 pages') + if (1 << order) * PAGE_SIZE != size: + raise ArgException('error: --cont value must be size of power-of-2 pages') + if order > PMD_ORDER: + raise ArgException('error: --cont value must be less than or equal to PMD order') + return order + + parser = argparse.ArgumentParser(formatter_class=formatter, + description=format("""Prints information about how transparent huge + pages are mapped, either system-wide, or for a specified + process or cgroup.\\n + \\n + When run with --pid, the user explicitly specifies the set + of pids to scan. e.g. "--pid 10 [--pid 134 ...]". When run + with --cgroup, the user passes either a v1 or v2 cgroup and + all pids that belong to the cgroup subtree are scanned. When + run with neither --pid nor --cgroup, the full set of pids on + the system is gathered from /proc and scanned as if the user + had provided "--pid 1 --pid 2 ...".\\n + \\n + A default set of statistics is always generated for THP + mappings. However, it is also possible to generate + additional statistics for "contiguous block mappings" where + the block size is user-defined.\\n + \\n + Statistics are maintained independently for anonymous and + file-backed (pagecache) memory and are shown both in kB and + as a percentage of either total anonymous or total + file-backed memory as appropriate.\\n + \\n + THP Statistics\\n + --------------\\n + \\n + Statistics are always generated for fully- and + contiguously-mapped THPs whose mapping address is aligned to + their size, for each supported by the system. + Separate counters describe THPs mapped by PTE vs those + mapped by PMD. (Although note a THP can only be mapped by + PMD if it is PMD-sized):\\n + \\n + - anon-thp-pte-aligned-kB\\n + - file-thp-pte-aligned-kB\\n + - anon-thp-pmd-aligned-kB\\n + - file-thp-pmd-aligned-kB\\n + \\n + Similarly, statistics are always generated for fully- and + contiguously-mapped THPs whose mapping address is *not* + aligned to their size, for each supported by the + system. Due to the unaligned mapping, it is impossible to + map by PMD, so there are only PTE counters for this case:\\n + \\n + - anon-thp-pte-unaligned-kB\\n + - file-thp-pte-unaligned-kB\\n + \\n + Statistics are also always generated for mapped pages that + belong to a THP but where the is THP is *not* fully- and + contiguously- mapped. These "partial" mappings are all + counted in the same counter regardless of the size of the + THP that is partially mapped:\\n + \\n + - anon-thp-pte-partial\\n + - file-thp-pte-partial\\n + \\n + Contiguous Block Statistics\\n + ---------------------------\\n + \\n + An optional, additional set of statistics is generated for + every contiguous block size specified with `--cont `. + These statistics show how much memory is mapped in + contiguous blocks of and also aligned to . A + given contiguous block must all belong to the same THP, but + there is no requirement for it to be the *whole* THP. + Separate counters describe contiguous blocks mapped by PTE + vs those mapped by PMD:\\n + \\n + - anon-cont-pte-aligned-kB\\n + - file-cont-pte-aligned-kB\\n + - anon-cont-pmd-aligned-kB\\n + - file-cont-pmd-aligned-kB\\n + \\n + As an example, if monitoring 64K contiguous blocks (--cont + 64K), there are a number of sources that could provide such + blocks: a fully- and contiguously-mapped 64K THP that is + aligned to a 64K boundary would provide 1 block. A fully- + and contiguously-mapped 128K THP that is aligned to at least + a 64K boundary would provide 2 blocks. Or a 128K THP that + maps its first 100K, but contiguously and starting at a 64K + boundary would provide 1 block. A fully- and + contiguously-mapped 2M THP would provide 32 blocks. There + are many other possible permutations.\\n"""), + epilog=format("""Requires root privilege to access pagemap and + kpageflags.""")) + + group = parser.add_mutually_exclusive_group(required=False) + group.add_argument('--pid', + metavar='pid', required=False, type=int, default=[], action='append', + help="""Process id of the target process. Maybe issued multiple times to + scan multiple processes. --pid and --cgroup are mutually exclusive. + If neither are provided, all processes are scanned to provide + system-wide information.""") + + group.add_argument('--cgroup', + metavar='path', required=False, + help="""Path to the target cgroup in sysfs. Iterates over every pid in + the cgroup and its children. --pid and --cgroup are mutually + exclusive. If neither are provided, all processes are scanned to + provide system-wide information.""") + + parser.add_argument('--rollup', + required=False, default=False, action='store_true', + help="""Sum the per-vma statistics to provide a summary over the whole + system, process or cgroup.""") + + parser.add_argument('--cont', + metavar='size[KMG]', required=False, default=[], action='append', + help="""Adds stats for memory that is mapped in contiguous blocks of + and also aligned to . May be issued multiple times to + track multiple sized blocks. Useful to infer e.g. arm64 contpte and + hpa mappings. Size must be a power-of-2 number of pages.""") + + parser.add_argument('--inc-smaps', + required=False, default=False, action='store_true', + help="""Include all numerical, additive /proc//smaps stats in the + output.""") + + parser.add_argument('--inc-empty', + required=False, default=False, action='store_true', + help="""Show all statistics including those whose value is 0.""") + + parser.add_argument('--periodic', + metavar='sleep_ms', required=False, type=int, + help="""Run in a loop, polling every sleep_ms milliseconds.""") + + args = parser.parse_args() + + try: + args.cont = [size2order(cont) for cont in args.cont] + except ArgException as e: + parser.print_usage() + raise + + if args.periodic: + while True: + do_main(args) + print() + time.sleep(args.periodic / 1000) + else: + do_main(args) + + +if __name__ == "__main__": + try: + main() + except Exception as e: + prog = os.path.basename(sys.argv[0]) + print(f'{prog}: {e}') + exit(1) -- Gitee From 8ce1de40eaa0dd25dabd700d7ace92110377a0eb Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 17 Jan 2024 18:39:54 +0800 Subject: [PATCH 1308/2138] mm: memory: move mem_cgroup_charge() into alloc_anon_folio() ANBZ: #9728 commit 085ff35e76368455c629b194bf3cb62dd82eadf6 upstream. The GFP flags from vma_thp_gfp_mask() according to user configuration only used for large folio allocation but not for memory cgroup charge, and GFP_KERNEL is used for both order-0 and large order folio when memory cgroup charge at present. However, mem_cgroup_charge() uses the GFP flags in a fairly sophisticated way. In addition to checking gfpflags_allow_blocking(), it pays attention to __GFP_NORETRY and __GFP_RETRY_MAYFAIL to ensure that processes within this memcg do not exceed their quotas. So we'd better to move mem_cgroup_charge() into alloc_anon_folio(), 1) it will make us to allocate as much as possible large order folio, because we could try the next order if mem_cgroup_charge() fails, although the memcg's memory usage is close to its limits. 2) using same GFP flags for allocation and charge is to be consistent with PMD THP firstly, in addition, according to GFP flag returned from vma_thp_gfp_mask(), GFP_TRANSHUGE_LIGHT could make us skip direct reclaim, _GFP_NORETRY will make us skip mem_cgroup_oom() and won't trigger memory cgroup oom from large order(order <= COSTLY_ORDER) folio charging. Link: https://lkml.kernel.org/r/20240122011612.501029-1-wangkefeng.wang@huawei.com Link: https://lkml.kernel.org/r/20240117103954.2756050-1-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Reviewed-by: Ryan Roberts Cc: David Hildenbrand Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Roman Gushchin Cc: Johannes Weiner Cc: Shakeel Butt Cc: Muchun Song Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3837 --- mm/memory.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 511249d7a7d6..3053a6121541 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4439,8 +4439,8 @@ static bool pte_range_none(pte_t *pte, int nr_pages) static struct folio *alloc_anon_folio(struct vm_fault *vmf) { -#ifdef CONFIG_TRANSPARENT_HUGEPAGE struct vm_area_struct *vma = vmf->vma; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE unsigned long orders; struct folio *folio; unsigned long addr; @@ -4492,15 +4492,21 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf) addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); folio = vma_alloc_folio(gfp, order, vma, addr, true); if (folio) { + if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) { + folio_put(folio); + goto next; + } + folio_throttle_swaprate(folio, gfp); clear_huge_page(&folio->page, vmf->address, 1 << order); return folio; } +next: order = next_order(&orders, order); } fallback: #endif - return vma_alloc_zeroed_movable_folio(vmf->vma, vmf->address); + return folio_prealloc(vma->vm_mm, vma, vmf->address, true); } /* @@ -4567,10 +4573,6 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) nr_pages = folio_nr_pages(folio); addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE); - if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL)) - goto oom_free_page; - folio_throttle_swaprate(folio, GFP_KERNEL); - /* * The memory barrier inside __folio_mark_uptodate makes sure that * preceding stores to the page contents become visible before @@ -4624,8 +4626,6 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) release: folio_put(folio); goto unlock; -oom_free_page: - folio_put(folio); oom: return VM_FAULT_OOM; } -- Gitee From 25ed7c6773d002f93800dac68e985f7af4ee8ff0 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Fri, 12 Apr 2024 23:48:55 +1200 Subject: [PATCH 1309/2138] mm: add per-order mTHP anon_fault_alloc and anon_fault_fallback counters ANBZ: #9728 commit ec33687c674934dfefd782a8ffd58370b080b503 upstream. Patch series "mm: add per-order mTHP alloc and swpout counters", v6. The patchset introduces a framework to facilitate mTHP counters, starting with the allocation and swap-out counters. Currently, only four new nodes are appended to the stats directory for each mTHP size. /sys/kernel/mm/transparent_hugepage/hugepages-/stats anon_fault_alloc anon_fault_fallback anon_fault_fallback_charge anon_swpout anon_swpout_fallback These nodes are crucial for us to monitor the fragmentation levels of both the buddy system and the swap partitions. In the future, we may consider adding additional nodes for further insights. This patch (of 4): Profiling a system blindly with mTHP has become challenging due to the lack of visibility into its operations. Presenting the success rate of mTHP allocations appears to be pressing need. Recently, I've been experiencing significant difficulty debugging performance improvements and regressions without these figures. It's crucial for us to understand the true effectiveness of mTHP in real-world scenarios, especially in systems with fragmented memory. This patch establishes the framework for per-order mTHP counters. It begins by introducing the anon_fault_alloc and anon_fault_fallback counters. Additionally, to maintain consistency with thp_fault_fallback_charge in /proc/vmstat, this patch also tracks anon_fault_fallback_charge when mem_cgroup_charge fails for mTHP. Incorporating additional counters should now be straightforward as well. Link: https://lkml.kernel.org/r/20240412114858.407208-1-21cnbao@gmail.com Link: https://lkml.kernel.org/r/20240412114858.407208-2-21cnbao@gmail.com Signed-off-by: Barry Song Acked-by: David Hildenbrand Cc: Chris Li Cc: Domenico Cerasuolo Cc: Kairui Song Cc: Matthew Wilcox (Oracle) Cc: Peter Xu Cc: Ryan Roberts Cc: Suren Baghdasaryan Cc: Yosry Ahmed Cc: Yu Zhao Cc: Jonathan Corbet Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3837 --- include/linux/huge_mm.h | 21 +++++++++++++++++ mm/huge_memory.c | 52 +++++++++++++++++++++++++++++++++++++++++ mm/memory.c | 5 ++++ 3 files changed, 78 insertions(+) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 09179237dac4..27f4987fc3dc 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -257,6 +257,27 @@ unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma, enforce_sysfs, orders); } +enum mthp_stat_item { + MTHP_STAT_ANON_FAULT_ALLOC, + MTHP_STAT_ANON_FAULT_FALLBACK, + MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, + __MTHP_STAT_COUNT +}; + +struct mthp_stat { + unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT]; +}; + +DECLARE_PER_CPU(struct mthp_stat, mthp_stats); + +static inline void count_mthp_stat(int order, enum mthp_stat_item item) +{ + if (order <= 0 || order > PMD_ORDER) + return; + + this_cpu_inc(mthp_stats.stats[order][item]); +} + #define transparent_hugepage_use_zero_page() \ (transparent_hugepage_flags & \ (1<stats[order][item]; + } + + return sum; +} + +#define DEFINE_MTHP_STAT_ATTR(_name, _index) \ +static ssize_t _name##_show(struct kobject *kobj, \ + struct kobj_attribute *attr, char *buf) \ +{ \ + int order = to_thpsize(kobj)->order; \ + \ + return sysfs_emit(buf, "%lu\n", sum_mthp_stat(order, _index)); \ +} \ +static struct kobj_attribute _name##_attr = __ATTR_RO(_name) + +DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC); +DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK); +DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); + +static struct attribute *stats_attrs[] = { + &anon_fault_alloc_attr.attr, + &anon_fault_fallback_attr.attr, + &anon_fault_fallback_charge_attr.attr, + NULL, +}; + +static struct attribute_group stats_attr_group = { + .name = "stats", + .attrs = stats_attrs, +}; + static struct thpsize *thpsize_create(int order, struct kobject *parent) { unsigned long size = (PAGE_SIZE << order) / SZ_1K; @@ -537,6 +579,12 @@ static struct thpsize *thpsize_create(int order, struct kobject *parent) return ERR_PTR(ret); } + ret = sysfs_create_group(&thpsize->kobj, &stats_attr_group); + if (ret) { + kobject_put(&thpsize->kobj); + return ERR_PTR(ret); + } + thpsize->order = order; return thpsize; } @@ -834,6 +882,8 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, folio_put(folio); count_vm_event(THP_FAULT_FALLBACK); count_vm_event(THP_FAULT_FALLBACK_CHARGE); + count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK); + count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); return VM_FAULT_FALLBACK; } folio_throttle_swaprate(folio, gfp); @@ -883,6 +933,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, mm_inc_nr_ptes(vma->vm_mm); spin_unlock(vmf->ptl); count_vm_event(THP_FAULT_ALLOC); + count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_ALLOC); count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); } @@ -1003,6 +1054,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf) folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, vma, haddr, true); if (unlikely(!folio)) { count_vm_event(THP_FAULT_FALLBACK); + count_mthp_stat(HPAGE_PMD_ORDER, MTHP_STAT_ANON_FAULT_FALLBACK); return VM_FAULT_FALLBACK; } return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp); diff --git a/mm/memory.c b/mm/memory.c index 3053a6121541..e76086e1b6ef 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4493,6 +4493,7 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf) folio = vma_alloc_folio(gfp, order, vma, addr, true); if (folio) { if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) { + count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); folio_put(folio); goto next; } @@ -4501,6 +4502,7 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf) return folio; } next: + count_mthp_stat(order, MTHP_STAT_ANON_FAULT_FALLBACK); order = next_order(&orders, order); } @@ -4610,6 +4612,9 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) folio_ref_add(folio, nr_pages - 1); add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_FAULT_ALLOC); +#endif folio_add_new_anon_rmap(folio, vma, addr); folio_add_lru_vma(folio, vma); setpte: -- Gitee From 10f2484f3446ea5cf07270008db02dd261fa48b4 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Fri, 12 Apr 2024 23:48:56 +1200 Subject: [PATCH 1310/2138] mm: add per-order mTHP anon_swpout and anon_swpout_fallback counters ANBZ: #9728 commit d0f048ac39f6a71566d3f49a5922dfd7fa0d585b upstream. This helps to display the fragmentation situation of the swapfile, knowing the proportion of how much we haven't split large folios. So far, we only support non-split swapout for anon memory, with the possibility of expanding to shmem in the future. So, we add the "anon" prefix to the counter names. Link: https://lkml.kernel.org/r/20240412114858.407208-3-21cnbao@gmail.com Signed-off-by: Barry Song Reviewed-by: Ryan Roberts Acked-by: David Hildenbrand Cc: Chris Li Cc: Domenico Cerasuolo Cc: Kairui Song Cc: Matthew Wilcox (Oracle) Cc: Peter Xu Cc: Ryan Roberts Cc: Suren Baghdasaryan Cc: Yosry Ahmed Cc: Yu Zhao Cc: Jonathan Corbet Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3837 --- include/linux/huge_mm.h | 2 ++ mm/huge_memory.c | 4 ++++ mm/page_io.c | 1 + mm/vmscan.c | 3 +++ 4 files changed, 10 insertions(+) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 27f4987fc3dc..eaf43afc411e 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -261,6 +261,8 @@ enum mthp_stat_item { MTHP_STAT_ANON_FAULT_ALLOC, MTHP_STAT_ANON_FAULT_FALLBACK, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, + MTHP_STAT_ANON_SWPOUT, + MTHP_STAT_ANON_SWPOUT_FALLBACK, __MTHP_STAT_COUNT }; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 7f84bee7a848..50f4afb130c1 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -543,11 +543,15 @@ static struct kobj_attribute _name##_attr = __ATTR_RO(_name) DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC); DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK); DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); +DEFINE_MTHP_STAT_ATTR(anon_swpout, MTHP_STAT_ANON_SWPOUT); +DEFINE_MTHP_STAT_ATTR(anon_swpout_fallback, MTHP_STAT_ANON_SWPOUT_FALLBACK); static struct attribute *stats_attrs[] = { &anon_fault_alloc_attr.attr, &anon_fault_fallback_attr.attr, &anon_fault_fallback_charge_attr.attr, + &anon_swpout_attr.attr, + &anon_swpout_fallback_attr.attr, NULL, }; diff --git a/mm/page_io.c b/mm/page_io.c index cb559ae324c6..9302070d463e 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -212,6 +212,7 @@ static inline void count_swpout_vm_event(struct folio *folio) count_memcg_folio_events(folio, THP_SWPOUT, 1); count_vm_event(THP_SWPOUT); } + count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_SWPOUT); #endif count_vm_events(PSWPOUT, folio_nr_pages(folio)); } diff --git a/mm/vmscan.c b/mm/vmscan.c index 617a57aedc71..e2da0582ecc6 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1913,6 +1913,8 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, goto activate_locked; } if (!add_to_swap(folio)) { + int __maybe_unused order = folio_order(folio); + if (!folio_test_large(folio)) goto activate_locked_split; /* Fallback to swap normal pages */ @@ -1923,6 +1925,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, count_memcg_folio_events(folio, THP_SWPOUT_FALLBACK, 1); count_vm_event(THP_SWPOUT_FALLBACK); } + count_mthp_stat(order, MTHP_STAT_ANON_SWPOUT_FALLBACK); #endif if (!add_to_swap(folio)) goto activate_locked_split; -- Gitee From ebb01290dd73a75ce2721ad3000b3d0590a72b52 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Fri, 12 Apr 2024 23:48:57 +1200 Subject: [PATCH 1311/2138] mm: add docs for per-order mTHP counters and transhuge_page ABI ANBZ: #9728 commit 42248b9d34ea1be1b959d343fff465906cb787fc upstream. This patch includes documentation for mTHP counters and an ABI file for sys-kernel-mm-transparent-hugepage, which appears to have been missing for some time. [v-songbaohua@oppo.com: fix the name and unexpected indentation] Link: https://lkml.kernel.org/r/20240415054538.17071-1-21cnbao@gmail.com Link: https://lkml.kernel.org/r/20240412114858.407208-4-21cnbao@gmail.com Signed-off-by: Barry Song Reviewed-by: Ryan Roberts Reviewed-by: David Hildenbrand Cc: Chris Li Cc: Domenico Cerasuolo Cc: Kairui Song Cc: Matthew Wilcox (Oracle) Cc: Peter Xu Cc: Ryan Roberts Cc: Suren Baghdasaryan Cc: Yosry Ahmed Cc: Yu Zhao Cc: Jonathan Corbet Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3837 --- .../sysfs-kernel-mm-transparent-hugepage | 18 ++++++++++++ Documentation/admin-guide/mm/transhuge.rst | 28 +++++++++++++++++++ 2 files changed, 46 insertions(+) create mode 100644 Documentation/ABI/testing/sysfs-kernel-mm-transparent-hugepage diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-transparent-hugepage b/Documentation/ABI/testing/sysfs-kernel-mm-transparent-hugepage new file mode 100644 index 000000000000..7bfbb9cc2c11 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-kernel-mm-transparent-hugepage @@ -0,0 +1,18 @@ +What: /sys/kernel/mm/transparent_hugepage/ +Date: April 2024 +Contact: Linux memory management mailing list +Description: + /sys/kernel/mm/transparent_hugepage/ contains a number of files and + subdirectories, + + - defrag + - enabled + - hpage_pmd_size + - khugepaged + - shmem_enabled + - use_zero_page + - subdirectories of the form hugepages-kB, where + is the page size of the hugepages supported by the kernel/CPU + combination. + + See Documentation/admin-guide/mm/transhuge.rst for details. diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index 04eb45a2f940..e0fe17affeb3 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -447,6 +447,34 @@ thp_swpout_fallback Usually because failed to allocate some continuous swap space for the huge page. +In /sys/kernel/mm/transparent_hugepage/hugepages-kB/stats, There are +also individual counters for each huge page size, which can be utilized to +monitor the system's effectiveness in providing huge pages for usage. Each +counter has its own corresponding file. + +anon_fault_alloc + is incremented every time a huge page is successfully + allocated and charged to handle a page fault. + +anon_fault_fallback + is incremented if a page fault fails to allocate or charge + a huge page and instead falls back to using huge pages with + lower orders or small pages. + +anon_fault_fallback_charge + is incremented if a page fault fails to charge a huge page and + instead falls back to using huge pages with lower orders or + small pages even though the allocation was successful. + +anon_swpout + is incremented every time a huge page is swapped out in one + piece without splitting. + +anon_swpout_fallback + is incremented if a huge page has to be split before swapout. + Usually because failed to allocate some continuous swap space + for the huge page. + As the system ages, allocating huge pages may be expensive as the system uses memory compaction to copy data around memory to free a huge page for use. There are some counters in ``/proc/vmstat`` to help -- Gitee From 054ab9b23523a6d320e57b96bcf4c3ad2f34382e Mon Sep 17 00:00:00 2001 From: Barry Song Date: Fri, 12 Apr 2024 23:48:58 +1200 Subject: [PATCH 1312/2138] mm: correct the docs for thp_fault_alloc and thp_fault_fallback ANBZ: #9728 commit a14421ae2a99378c4103bb03606465ab13e75509 upstream. The documentation does not align with the code. In __do_huge_pmd_anonymous_page(), THP_FAULT_FALLBACK is incremented when mem_cgroup_charge() fails, despite the allocation succeeding, whereas THP_FAULT_ALLOC is only incremented after a successful charge. Link: https://lkml.kernel.org/r/20240412114858.407208-5-21cnbao@gmail.com Signed-off-by: Barry Song Reviewed-by: Ryan Roberts Reviewed-by: David Hildenbrand Cc: Chris Li Cc: Domenico Cerasuolo Cc: Kairui Song Cc: Matthew Wilcox (Oracle) Cc: Peter Xu Cc: Ryan Roberts Cc: Suren Baghdasaryan Cc: Yosry Ahmed Cc: Yu Zhao Cc: Jonathan Corbet Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3837 --- Documentation/admin-guide/mm/transhuge.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index e0fe17affeb3..f82300b9193f 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -369,7 +369,7 @@ monitor how successfully the system is providing huge pages for use. thp_fault_alloc is incremented every time a huge page is successfully - allocated to handle a page fault. + allocated and charged to handle a page fault. thp_collapse_alloc is incremented by khugepaged when it has found @@ -377,7 +377,7 @@ thp_collapse_alloc successfully allocated a new huge page to store the data. thp_fault_fallback - is incremented if a page fault fails to allocate + is incremented if a page fault fails to allocate or charge a huge page and instead falls back to using small pages. thp_fault_fallback_charge -- Gitee From d1a337bff49ea2d1b49bdd02156a6bb684fd213c Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Thu, 23 May 2024 10:36:39 +0800 Subject: [PATCH 1313/2138] mm: drop the 'anon_' prefix for swap-out mTHP counters ANBZ: #9728 commit 0d648dd5c899f33154b98a6aef6e3dab0f4de613 upstream. The mTHP swap related counters: 'anon_swpout' and 'anon_swpout_fallback' are confusing with an 'anon_' prefix, since the shmem can swap out non-anonymous pages. So drop the 'anon_' prefix to keep consistent with the old swap counter names. This is needed in 6.10-rcX to avoid having an inconsistent ABI out in the field. Link: https://lkml.kernel.org/r/7a8989c13299920d7589007a30065c3e2c19f0e0.1716431702.git.baolin.wang@linux.alibaba.com Fixes: d0f048ac39f6 ("mm: add per-order mTHP anon_swpout and anon_swpout_fallback counters") Fixes: 42248b9d34ea ("mm: add docs for per-order mTHP counters and transhuge_page ABI") Signed-off-by: Baolin Wang Suggested-by: "Huang, Ying" Acked-by: Barry Song Cc: David Hildenbrand Cc: Lance Yang Cc: Matthew Wilcox (Oracle) Cc: Ryan Roberts Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3837 --- Documentation/admin-guide/mm/transhuge.rst | 4 ++-- include/linux/huge_mm.h | 4 ++-- mm/huge_memory.c | 8 ++++---- mm/page_io.c | 2 +- mm/vmscan.c | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index f82300b9193f..9ca1f3a7f5bb 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -466,11 +466,11 @@ anon_fault_fallback_charge instead falls back to using huge pages with lower orders or small pages even though the allocation was successful. -anon_swpout +swpout is incremented every time a huge page is swapped out in one piece without splitting. -anon_swpout_fallback +swpout_fallback is incremented if a huge page has to be split before swapout. Usually because failed to allocate some continuous swap space for the huge page. diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index eaf43afc411e..7821ff1fc133 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -261,8 +261,8 @@ enum mthp_stat_item { MTHP_STAT_ANON_FAULT_ALLOC, MTHP_STAT_ANON_FAULT_FALLBACK, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, - MTHP_STAT_ANON_SWPOUT, - MTHP_STAT_ANON_SWPOUT_FALLBACK, + MTHP_STAT_SWPOUT, + MTHP_STAT_SWPOUT_FALLBACK, __MTHP_STAT_COUNT }; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 50f4afb130c1..858406c8bf65 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -543,15 +543,15 @@ static struct kobj_attribute _name##_attr = __ATTR_RO(_name) DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC); DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK); DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); -DEFINE_MTHP_STAT_ATTR(anon_swpout, MTHP_STAT_ANON_SWPOUT); -DEFINE_MTHP_STAT_ATTR(anon_swpout_fallback, MTHP_STAT_ANON_SWPOUT_FALLBACK); +DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT); +DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK); static struct attribute *stats_attrs[] = { &anon_fault_alloc_attr.attr, &anon_fault_fallback_attr.attr, &anon_fault_fallback_charge_attr.attr, - &anon_swpout_attr.attr, - &anon_swpout_fallback_attr.attr, + &swpout_attr.attr, + &swpout_fallback_attr.attr, NULL, }; diff --git a/mm/page_io.c b/mm/page_io.c index 9302070d463e..7d402c66a01f 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -212,7 +212,7 @@ static inline void count_swpout_vm_event(struct folio *folio) count_memcg_folio_events(folio, THP_SWPOUT, 1); count_vm_event(THP_SWPOUT); } - count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_SWPOUT); + count_mthp_stat(folio_order(folio), MTHP_STAT_SWPOUT); #endif count_vm_events(PSWPOUT, folio_nr_pages(folio)); } diff --git a/mm/vmscan.c b/mm/vmscan.c index e2da0582ecc6..f47ee6f06381 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1925,7 +1925,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, count_memcg_folio_events(folio, THP_SWPOUT_FALLBACK, 1); count_vm_event(THP_SWPOUT_FALLBACK); } - count_mthp_stat(order, MTHP_STAT_ANON_SWPOUT_FALLBACK); + count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK); #endif if (!add_to_swap(folio)) goto activate_locked_split; -- Gitee From a647e02ca3ebf969972902398531b9697f023228 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Fri, 24 May 2024 08:50:48 +1200 Subject: [PATCH 1314/2138] mm: huge_mm: fix undefined reference to `mthp_stats' for CONFIG_SYSFS=n ANBZ: #9728 commit 94d46bf17916965e918bd2f3d2eec057f7c5578d upstream. if CONFIG_SYSFS is not enabled in config, we get the below error, All errors (new ones prefixed by >>): s390-linux-ld: mm/memory.o: in function `count_mthp_stat': >> include/linux/huge_mm.h:285:(.text+0x191c): undefined reference to `mthp_stats' s390-linux-ld: mm/huge_memory.o:(.rodata+0x10): undefined reference to `mthp_stats' vim +285 include/linux/huge_mm.h 279 280 static inline void count_mthp_stat(int order, enum mthp_stat_item item) 281 { 282 if (order <= 0 || order > PMD_ORDER) 283 return; 284 > 285 this_cpu_inc(mthp_stats.stats[order][item]); 286 } 287 Link: https://lkml.kernel.org/r/20240523210045.40444-1-21cnbao@gmail.com Fixes: ec33687c6749 ("mm: add per-order mTHP anon_fault_alloc and anon_fault_fallback counters") Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202405231728.tCAogiSI-lkp@intel.com/ Signed-off-by: Barry Song Tested-by: Yujie Liu Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3837 --- include/linux/huge_mm.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 7821ff1fc133..ad909792c113 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -270,6 +270,7 @@ struct mthp_stat { unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT]; }; +#ifdef CONFIG_SYSFS DECLARE_PER_CPU(struct mthp_stat, mthp_stats); static inline void count_mthp_stat(int order, enum mthp_stat_item item) @@ -279,6 +280,11 @@ static inline void count_mthp_stat(int order, enum mthp_stat_item item) this_cpu_inc(mthp_stats.stats[order][item]); } +#else +static inline void count_mthp_stat(int order, enum mthp_stat_item item) +{ +} +#endif #define transparent_hugepage_use_zero_page() \ (transparent_hugepage_flags & \ -- Gitee From ee5d7aac208d9a5de184f2518fb7cacd48d33049 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Fri, 29 Mar 2024 14:59:33 +0800 Subject: [PATCH 1315/2138] mm: huge_memory: add the missing folio_test_pmd_mappable() for THP split statistics ANBZ: #9728 commit 835c3a25aa373d486514e4e0f5a7450ea82ae489 upstream. Now the mTHP can also be split or added into the deferred list, so add folio_test_pmd_mappable() validation for PMD mapped THP, to avoid confusion with PMD mapped THP related statistics. [baolin.wang@linux.alibaba.com: check THP earlier in case folio is split, per Lance] Link: https://lkml.kernel.org/r/b99f8cb14bc85fdb6ab43721d1331cb5ebed2581.1713771041.git.baolin.wang@linux.alibaba.com Link: https://lkml.kernel.org/r/a5341defeef27c9ac7b85c97f030f93e4368bbc1.1711694852.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Acked-by: David Hildenbrand Reviewed-by: Lance Yang Cc: Muchun Song Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3837 --- mm/huge_memory.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 858406c8bf65..cd0e72c24c31 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2875,6 +2875,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) XA_STATE(xas, &folio->mapping->i_pages, folio->index); struct anon_vma *anon_vma = NULL; struct address_space *mapping = NULL; + bool is_thp = folio_test_pmd_mappable(folio); int extra_pins, ret; pgoff_t end; bool is_hzp; @@ -3018,7 +3019,8 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) i_mmap_unlock_read(mapping); out: xas_destroy(&xas); - count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); + if (is_thp) + count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); return ret; } @@ -3086,7 +3088,8 @@ void deferred_split_folio(struct folio *folio) spin_lock_irqsave(&ds_queue->split_queue_lock, flags); if (list_empty(&folio->_deferred_list)) { - count_vm_event(THP_DEFERRED_SPLIT_PAGE); + if (folio_test_pmd_mappable(folio)) + count_vm_event(THP_DEFERRED_SPLIT_PAGE); list_add_tail(&folio->_deferred_list, &ds_queue->split_queue); ds_queue->split_queue_len++; #ifdef CONFIG_MEMCG -- Gitee From 61971ac86d1aa706a811aa0cc3d31912ca83b1dd Mon Sep 17 00:00:00 2001 From: Lance Yang Date: Fri, 28 Jun 2024 21:07:49 +0800 Subject: [PATCH 1316/2138] mm: add per-order mTHP split counters ANBZ: #9728 commit f216c845f3c772e54d27fe209fd300b10e7bf54a upstream. Patch series "mm: introduce per-order mTHP split counters", v3. At present, the split counters in THP statistics no longer include PTE-mapped mTHP. Therefore, we want to introduce per-order mTHP split counters to monitor the frequency of mTHP splits. This will assist developers in better analyzing and optimizing system performance. /sys/kernel/mm/transparent_hugepage/hugepages-/stats split split_failed split_deferred This patch (of 2): Currently, the split counters in THP statistics no longer include PTE-mapped mTHP. Therefore, we propose introducing per-order mTHP split counters to monitor the frequency of mTHP splits. This will help developers better analyze and optimize system performance. /sys/kernel/mm/transparent_hugepage/hugepages-/stats split split_failed split_deferred [ioworker0@gmail.com: make things more readable, per Barry and Baolin] Link: https://lkml.kernel.org/r/20240704012905.42971-2-ioworker0@gmail.com [ioworker0@gmail.com: use == for `order' test, per David] Link: https://lkml.kernel.org/r/20240705113119.82210-1-ioworker0@gmail.com Link: https://lkml.kernel.org/r/20240704012905.42971-1-ioworker0@gmail.com Link: https://lkml.kernel.org/r/20240704012905.42971-2-ioworker0@gmail.com Link: https://lkml.kernel.org/r/20240628130750.73097-1-ioworker0@gmail.com Link: https://lkml.kernel.org/r/20240628130750.73097-2-ioworker0@gmail.com Signed-off-by: Mingzhe Yang Signed-off-by: Lance Yang Reviewed-by: Ryan Roberts Acked-by: Barry Song Reviewed-by: Baolin Wang Acked-by: David Hildenbrand Cc: Bang Li Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3837 --- include/linux/huge_mm.h | 3 +++ mm/huge_memory.c | 12 ++++++++++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index ad909792c113..e969dc607752 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -263,6 +263,9 @@ enum mthp_stat_item { MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, MTHP_STAT_SWPOUT, MTHP_STAT_SWPOUT_FALLBACK, + MTHP_STAT_SPLIT, + MTHP_STAT_SPLIT_FAILED, + MTHP_STAT_SPLIT_DEFERRED, __MTHP_STAT_COUNT }; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index cd0e72c24c31..87d8dc892ea7 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -545,6 +545,9 @@ DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK); DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT); DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK); +DEFINE_MTHP_STAT_ATTR(split, MTHP_STAT_SPLIT); +DEFINE_MTHP_STAT_ATTR(split_failed, MTHP_STAT_SPLIT_FAILED); +DEFINE_MTHP_STAT_ATTR(split_deferred, MTHP_STAT_SPLIT_DEFERRED); static struct attribute *stats_attrs[] = { &anon_fault_alloc_attr.attr, @@ -552,6 +555,9 @@ static struct attribute *stats_attrs[] = { &anon_fault_fallback_charge_attr.attr, &swpout_attr.attr, &swpout_fallback_attr.attr, + &split_attr.attr, + &split_failed_attr.attr, + &split_deferred_attr.attr, NULL, }; @@ -2875,7 +2881,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) XA_STATE(xas, &folio->mapping->i_pages, folio->index); struct anon_vma *anon_vma = NULL; struct address_space *mapping = NULL; - bool is_thp = folio_test_pmd_mappable(folio); + int order = folio_order(folio); int extra_pins, ret; pgoff_t end; bool is_hzp; @@ -3019,8 +3025,9 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) i_mmap_unlock_read(mapping); out: xas_destroy(&xas); - if (is_thp) + if (order == HPAGE_PMD_ORDER) count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); + count_mthp_stat(order, !ret ? MTHP_STAT_SPLIT : MTHP_STAT_SPLIT_FAILED); return ret; } @@ -3090,6 +3097,7 @@ void deferred_split_folio(struct folio *folio) if (list_empty(&folio->_deferred_list)) { if (folio_test_pmd_mappable(folio)) count_vm_event(THP_DEFERRED_SPLIT_PAGE); + count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED); list_add_tail(&folio->_deferred_list, &ds_queue->split_queue); ds_queue->split_queue_len++; #ifdef CONFIG_MEMCG -- Gitee From 74d18b5a0f4ed7a8e5c12ca3ac1cb67d17c5cb0b Mon Sep 17 00:00:00 2001 From: Lance Yang Date: Fri, 28 Jun 2024 21:07:50 +0800 Subject: [PATCH 1317/2138] mm: add docs for per-order mTHP split counters ANBZ: #9728 commit 9b89e018990de47c72ef8b2ca29204f88fda8f05 upstream. This commit introduces documentation for mTHP split counters in transhuge.rst. [ioworker0@gmail.com: improve the doc as suggested by Ryan] Link: https://lkml.kernel.org/r/20240704012905.42971-3-ioworker0@gmail.com [ioworker0@gmail.com: tweak Documentation/admin-guide/mm/transhuge.rst] Link: https://lkml.kernel.org/r/20240707013659.1151-1-ioworker0@gmail.com Link: https://lkml.kernel.org/r/20240628130750.73097-3-ioworker0@gmail.com Signed-off-by: Mingzhe Yang Signed-off-by: Lance Yang Reviewed-by: Barry Song Reviewed-by: Ryan Roberts Acked-by: David Hildenbrand Cc: Bang Li Cc: Baolin Wang Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3837 --- Documentation/admin-guide/mm/transhuge.rst | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index 9ca1f3a7f5bb..53cf4f24e5e9 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -343,10 +343,6 @@ also applies to the regions registered in khugepaged. Monitoring usage ================ -.. note:: - Currently the below counters only record events relating to - PMD-sized THP. Events relating to other THP sizes are not included. - The number of PMD-sized anonymous transparent huge pages currently used by the system is available by reading the AnonHugePages field in ``/proc/meminfo``. To identify what applications are using PMD-sized anonymous transparent huge @@ -475,6 +471,21 @@ swpout_fallback Usually because failed to allocate some continuous swap space for the huge page. +split + is incremented every time a huge page is successfully split into + smaller orders. This can happen for a variety of reasons but a + common reason is that a huge page is old and is being reclaimed. + +split_failed + is incremented if kernel fails to split huge + page. This can happen if the page was pinned by somebody. + +split_deferred + is incremented when a huge page is put onto split queue. + This happens when a huge page is partially unmapped and splitting + it would free up some memory. Pages on split queue are going to + be split under memory pressure, if splitting is possible. + As the system ages, allocating huge pages may be expensive as the system uses memory compaction to copy data around memory to free a huge page for use. There are some counters in ``/proc/vmstat`` to help -- Gitee From b0f735b6148d59db5efe8c599638649078ca1e90 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Wed, 12 May 2021 10:34:50 +0800 Subject: [PATCH 1318/2138] anolis: dax: add helper to copy pmem range ANBZ: #9794 Add a helper to copy two pmem rage in a file system, following patch will use this helper. Signed-off-by: Xiaoguang Wang Signed-off-by: Gao Xiang [ joe: revive bdev_dax_pgoff and adapt the new dax_direct_access API ] Signed-off-by: Joseph Qi Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3831 --- drivers/dax/super.c | 14 ++++++++++++++ fs/dax.c | 40 ++++++++++++++++++++++++++++++++++++++++ include/linux/dax.h | 4 ++++ 3 files changed, 58 insertions(+) diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 0da9232ea175..6f2bc78f6184 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -71,6 +71,20 @@ void dax_remove_host(struct gendisk *disk) } EXPORT_SYMBOL_GPL(dax_remove_host); +int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size, + pgoff_t *pgoff) +{ + sector_t start_sect = bdev ? get_start_sect(bdev) : 0; + phys_addr_t phys_off = (start_sect + sector) * 512; + + if (pgoff) + *pgoff = PHYS_PFN(phys_off); + if (phys_off % PAGE_SIZE || size % PAGE_SIZE) + return -EINVAL; + return 0; +} +EXPORT_SYMBOL(bdev_dax_pgoff); + /** * fs_dax_get_by_bdev() - temporary lookup mechanism for filesystem-dax * @bdev: block device to find a dax_device for diff --git a/fs/dax.c b/fs/dax.c index 8c09578fa035..2cf9efed9059 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -1063,6 +1063,46 @@ int dax_writeback_mapping_range(struct address_space *mapping, } EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); +int dax_copy_range(struct block_device *bdev, struct dax_device *dax_dev, + u64 src_addr, u64 dst_addr, size_t size) +{ + const sector_t src_sector = src_addr >> SECTOR_SHIFT; + const sector_t dst_sector = dst_addr >> SECTOR_SHIFT; + pgoff_t spgoff, dpgoff; + int id, rc; + long length; + void *saddr, *daddr; + + rc = bdev_dax_pgoff(bdev, src_sector, size, &spgoff); + if (rc) + return rc; + + rc = bdev_dax_pgoff(bdev, dst_sector, size, &dpgoff); + if (rc) + return rc; + + id = dax_read_lock(); + length = dax_direct_access(dax_dev, spgoff, PHYS_PFN(size), DAX_ACCESS, + &saddr, NULL); + if (length < 0) { + rc = length; + goto out; + } + + length = dax_direct_access(dax_dev, dpgoff, PHYS_PFN(size), DAX_ACCESS, + &daddr, NULL); + if (length < 0) { + rc = length; + goto out; + } + + rc = copy_mc_to_kernel(daddr, saddr, size); +out: + dax_read_unlock(id); + return rc; +} +EXPORT_SYMBOL_GPL(dax_copy_range); + static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos, size_t size, void **kaddr, pfn_t *pfnp) { diff --git a/include/linux/dax.h b/include/linux/dax.h index b463502b16e1..66e663664acc 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -128,6 +128,8 @@ void set_dax_nocache(struct dax_device *dax_dev); void set_dax_nomc(struct dax_device *dax_dev); struct writeback_control; +int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size, + pgoff_t *pgoff); #if defined(CONFIG_BLOCK) && defined(CONFIG_FS_DAX) int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk); void dax_remove_host(struct gendisk *disk); @@ -248,6 +250,8 @@ vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); int dax_invalidate_mapping_entry_sync(struct address_space *mapping, pgoff_t index); +int dax_copy_range(struct block_device *bdev, struct dax_device *dax_dev, + u64 src_addr, u64 dst_addr, size_t size); int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff, struct inode *dest, loff_t destoff, loff_t len, bool *is_same, -- Gitee From 258401cc4c161d18ec2be1e3ee26c0ac623c7463 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Mon, 15 Jan 2024 14:20:26 +0800 Subject: [PATCH 1319/2138] anolis: xfs: add capability to unshare shared range of files that don't trigger cow ANBZ: #9794 In current implementation, if user modifies a file range which shares physical data blocks with other files, xfs will allocate new physical data blocks for this range, map new physical data blocks to modified file, and unshare old physical data blocks. Unfortunately, this behavior may result in that modified file has many fragments. Say this example workload, initially app creates a file with one big written extent, app will do random read/write operations in this file, and user will often do snapshot for this file in at some point. User very cares about the time consumed to doing reflink. In current xfs cow implementation, we'll always allocate new physical data blocks for source file, and source file may have many small extents, which will increase the time consumed to doing reflink. To improve this situation, add a choice that user can unshare other files' data range which share same physical data blocks. For example, when source file is modified, we query files that share same physical data blocks by querying reverse map tree and unshare shared physical data blocks for found files, then source file's physical data blocks will remain stable. Signed-off-by: Xiaoguang Wang Signed-off-by: Gao Xiang [ joe: xfs_iunlock before call into xfs_reflink_unshare_other_owners, remove xfs_trans_unreserve_quota_nblks, clear i_reflink_flags in xfs_iget_recycle, and adpat the new APIs ] Signed-off-by: Joseph Qi Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3831 --- fs/xfs/libxfs/xfs_fs.h | 8 ++ fs/xfs/xfs_icache.c | 2 + fs/xfs/xfs_inode.h | 3 + fs/xfs/xfs_ioctl.c | 18 ++++ fs/xfs/xfs_reflink.c | 230 +++++++++++++++++++++++++++++++++++++++++ 5 files changed, 261 insertions(+) diff --git a/fs/xfs/libxfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h index 6360073865db..cbd68a3699bf 100644 --- a/fs/xfs/libxfs/xfs_fs.h +++ b/fs/xfs/libxfs/xfs_fs.h @@ -766,6 +766,11 @@ struct xfs_scrub_metadata { # define XFS_XATTR_LIST_MAX 65536 #endif +enum { + XFS_REFLINK_NORMAL = 0, + XFS_REFLINK_PRIMARY = (1 << 0), + XFS_REFLINK_SECONDARY = (1 << 1), +}; /* * ioctl commands that are used by Linux filesystems @@ -840,6 +845,9 @@ struct xfs_scrub_metadata { /* XFS_IOC_GETFSUUID ---------- deprecated 140 */ +#define XFS_IOC_SET_REFLINK_FLAGS _IOW('X', 200, uint32_t) +#define XFS_IOC_GET_REFLINK_FLAGS _IOR('X', 201, uint32_t) + #ifndef HAVE_BBMACROS /* * Block I/O parameterization. A basic block (BB) is the lowest size of diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index 57a9f2317525..1f74d7a2ac41 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -107,6 +107,7 @@ xfs_inode_alloc( ip->i_diflags2 = mp->m_ino_geo.new_diflags2; ip->i_nblocks = 0; ip->i_forkoff = 0; + ip->i_reflink_flags = 0; ip->i_sick = 0; ip->i_checked = 0; INIT_WORK(&ip->i_ioend_work, xfs_end_io); @@ -385,6 +386,7 @@ xfs_iget_recycle( */ ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS; ip->i_flags |= XFS_INEW; + ip->i_reflink_flags = 0; xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); inode->i_state = I_NEW; diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 0f2999b84e7d..298ca93c0bc5 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -85,6 +85,9 @@ typedef struct xfs_inode { */ xfs_agino_t i_prev_unlinked; + /* flags for controlling reflink cow behavior */ + uint32_t i_reflink_flags; + /* VFS inode */ struct inode i_vnode; /* embedded VFS inode */ diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index 32e718043e0e..b3ff5fa49c85 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -2171,6 +2171,24 @@ xfs_file_ioctl( return error; } + case XFS_IOC_SET_REFLINK_FLAGS: { + uint32_t in; + + if (get_user(in, (uint32_t __user *)arg)) + return -EFAULT; + + xfs_ilock(ip, XFS_ILOCK_EXCL); + ip->i_reflink_flags = in; + xfs_iunlock(ip, XFS_ILOCK_EXCL); + return 0; + } + + case XFS_IOC_GET_REFLINK_FLAGS: { + if (put_user(ip->i_reflink_flags, (uint32_t __user *)arg)) + return -EFAULT; + return 0; + } + default: return -ENOTTY; } diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index 3431d0d8b6f3..055dfca636cd 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c @@ -27,6 +27,8 @@ #include "xfs_quota.h" #include "xfs_reflink.h" #include "xfs_iomap.h" +#include "xfs_rmap.h" +#include "xfs_rmap_btree.h" #include "xfs_ag.h" #include "xfs_ag_resv.h" @@ -500,6 +502,227 @@ xfs_reflink_fill_delalloc( return error; } +struct xfs_rmap_info { + bool found; /* output */ + uint64_t owner; /* intput */ + xfs_agblock_t startblock; /* input */; + struct xfs_rmap_irec rec; /* output */ +}; + +STATIC int +xfs_reflink_query_rmap_owner_helper( + struct xfs_btree_cur *cur, + const struct xfs_rmap_irec *rec, + void *priv) +{ + struct xfs_rmap_info *info = priv; + + if ((rec->rm_owner != info->owner) && + (info->startblock >= rec->rm_startblock) && + (info->startblock < (rec->rm_startblock + rec->rm_blockcount))) { + info->rec = *rec; + info->found = true; + return -ECANCELED; + } + return 0; +} + +STATIC int +xfs_reflink_unshare_range( + struct xfs_mount *mp, + struct xfs_bmbt_irec *oimap, + struct xfs_rmap_irec *rmap) +{ + struct xfs_inode *ip; + xfs_fileoff_t offset_fsb = oimap->br_startoff; + xfs_filblks_t count_fsb = oimap->br_blockcount; + struct xfs_trans *tp; + int nimaps, error = 0; + bool shared, found; + xfs_filblks_t resaligned; + xfs_extlen_t resblks = 0; + uint lockmode = XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL; + struct xfs_bmbt_irec imap = *oimap; + struct xfs_bmbt_irec cmap; + + error = xfs_iget(mp, NULL, rmap->rm_owner, 0, 0, &ip); + if (error < 0) + return error; + + xfs_ilock(ip, lockmode); + xfs_flush_unmap_range(ip, XFS_FSB_TO_B(mp, imap.br_startoff), + XFS_FSB_TO_B(mp, imap.br_blockcount)); + + error = xfs_find_trim_cow_extent(ip, &imap, &cmap, &shared, &found); + if (error || !shared) + goto error; + + if (found) + goto convert; + + resaligned = xfs_aligned_fsb_count(imap.br_startoff, + imap.br_blockcount, xfs_get_cowextsz_hint(ip)); + resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); + + xfs_iunlock(ip, XFS_ILOCK_EXCL); + error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp); + if (error) { + lockmode = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL; + goto error; + } + + xfs_ilock(ip, XFS_ILOCK_EXCL); + + error = xfs_qm_dqattach_locked(ip, false); + if (error) + goto out_trans_cancel; + + /* + * Check for an overlapping extent again now that we dropped the ilock. + */ + error = xfs_find_trim_cow_extent(ip, &imap, &cmap, &shared, &found); + if (error || !shared) + goto out_trans_cancel; + if (found) { + xfs_trans_cancel(tp); + goto convert; + } + + error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0, + XFS_QMOPT_RES_REGBLKS); + if (error) + goto out_trans_cancel; + + xfs_trans_ijoin(tp, ip, 0); + + /* Allocate the entire reservation as zeroed blocks. */ + nimaps = 1; + error = xfs_bmapi_write(tp, ip, imap.br_startoff, imap.br_blockcount, + XFS_BMAPI_COWFORK | XFS_BMAPI_ZERO, resblks, &cmap, &nimaps); + if (error) + goto out_trans_cancel; + + xfs_inode_set_cowblocks_tag(ip); + error = xfs_trans_commit(tp); + if (error) + goto error; + + /* + * Allocation succeeded but the requested range was not even partially + * satisfied? Bail out! + */ + if (nimaps == 0) { + error = -ENOSPC; + goto error; + } +convert: + xfs_trim_extent(&cmap, offset_fsb, count_fsb); + trace_xfs_reflink_convert_cow(ip, &cmap); + error = xfs_reflink_convert_cow_locked(ip, offset_fsb, count_fsb); + if (error) + goto error; + cmap.br_state = XFS_EXT_NORM; + dax_copy_range(xfs_inode_buftarg(ip)->bt_bdev, + xfs_inode_buftarg(ip)->bt_daxdev, + BBTOB(xfs_fsb_to_db(ip, oimap->br_startblock)), + BBTOB(xfs_fsb_to_db(ip, cmap.br_startblock)), + XFS_FSB_TO_B(mp, cmap.br_blockcount)); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + xfs_reflink_end_cow(ip, XFS_FSB_TO_B(mp, cmap.br_startoff), + XFS_FSB_TO_B(mp, cmap.br_blockcount)); + xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL); + xfs_irele(ip); + return error; + +out_trans_cancel: + xfs_trans_cancel(tp); + +error: + xfs_iunlock(ip, lockmode); + xfs_irele(ip); + return error; +} + +STATIC int +xfs_reflink_query_rmap_owner( + struct xfs_inode *ip, + struct xfs_mount *mp, + struct xfs_bmbt_irec *imap, + struct xfs_rmap_info *info) +{ + struct xfs_trans *tp = NULL; + struct xfs_btree_cur *cur = NULL; + struct xfs_rmap_irec rmap_low, rmap_high; + struct xfs_perag *pag; + struct xfs_buf *agbp = NULL; + xfs_fsblock_t fsbno = imap->br_startblock; + xfs_filblks_t bcnt = imap->br_blockcount; + xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, fsbno); + xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(mp, fsbno); + int error = 0; + + error = xfs_trans_alloc_empty(mp, &tp); + if (error) + return error; + + pag = xfs_perag_get(mp, agno); + error = xfs_alloc_read_agf(pag, tp, 0, &agbp); + if (error) { + xfs_perag_put(pag); + goto out_cancel_tp; + } + + cur = xfs_rmapbt_init_cursor(mp, tp, agbp, pag); + xfs_perag_put(pag); + + /* Construct a range for rmap query */ + memset(&rmap_low, 0, sizeof(rmap_low)); + memset(&rmap_high, 0xFF, sizeof(rmap_high)); + rmap_low.rm_startblock = rmap_high.rm_startblock = agbno; + rmap_low.rm_blockcount = rmap_high.rm_blockcount = bcnt; + + error = xfs_rmap_query_range(cur, &rmap_low, &rmap_high, + xfs_reflink_query_rmap_owner_helper, info); + if (error == -ECANCELED) + error = 0; + + xfs_btree_del_cursor(cur, error); + xfs_trans_brelse(tp, agbp); + +out_cancel_tp: + xfs_trans_cancel(tp); + return error; +} + +STATIC int +xfs_reflink_unshare_other_owners( + struct xfs_inode *ip, + struct xfs_mount *mp, + struct xfs_bmbt_irec *imap) +{ + int error; + struct xfs_rmap_info info; + int i = 0; + + do { + info.found = false; + info.owner = ip->i_ino; + info.startblock = XFS_FSB_TO_AGBNO(mp, imap->br_startblock); + error = xfs_reflink_query_rmap_owner(ip, mp, imap, &info); + if (error < 0 || !info.found) + return error; + + xfs_reflink_unshare_range(mp, imap, &info.rec); + /* + * FIXME: 64 is chosen as limition intentionally, in case there + * are too many snapshot files, unshare operations here will take + * too much time, needs a better solution. + */ + } while (++i <= 64); + + return 0; +} + /* Allocate all CoW reservations covering a range of blocks in a file. */ int xfs_reflink_allocate_cow( @@ -510,6 +733,7 @@ xfs_reflink_allocate_cow( uint *lockmode, bool convert_now) { + struct xfs_mount *mp = ip->i_mount; int error; bool found; @@ -528,6 +752,12 @@ xfs_reflink_allocate_cow( return xfs_reflink_convert_unwritten(ip, imap, cmap, convert_now); + if (ip->i_reflink_flags & XFS_REFLINK_PRIMARY) { + xfs_iunlock(ip, *lockmode); + xfs_reflink_unshare_other_owners(ip, mp, imap); + xfs_ilock(ip, *lockmode); + } + /* * CoW fork does not have an extent and data extent is shared. * Allocate a real extent in the CoW fork. -- Gitee From f99c7391d1696bf7f7cc9327dd2ae8fbdbb95b6a Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Tue, 7 Sep 2021 14:28:19 +0800 Subject: [PATCH 1320/2138] anolis: xfs: bypass xfs_break_layouts() for inodes flagged with XFS_REFLINK_{PRIMARY, SECONDARY} ANBZ: #9794 xfs_break_layouts(BREAK_UNMAP) is a very expensive operation, especially for files whose size are more than dozens of GBs, it'll traverse whole dax pages to check whether there are dax pages currently under dio, see https://lwn.net/Articles/737273/ for detailed info. But if users can ensure that they won't issue dio operations on some files, we can bypass expensive xfs_break_layouts(BREAK_UNMAP) calls. Currently, users can use XFS_REFLINK_{PRIMARY, SECONDARY} to inform kernel that xfs_break_layouts(BREAK_UNMAP) can be bypassed safely. Signed-off-by: Xiaoguang Wang Signed-off-by: Gao Xiang Signed-off-by: Joseph Qi Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3831 --- fs/xfs/xfs_file.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 3b9d43d5c746..ffc4675e28f5 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -892,6 +892,7 @@ xfs_break_layouts( { bool retry; int error; + struct xfs_inode *ip = XFS_I(inode); ASSERT(xfs_isilocked(XFS_I(inode), XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)); @@ -899,6 +900,17 @@ xfs_break_layouts( retry = false; switch (reason) { case BREAK_UNMAP: + /* + * For inodes flagged with XFS_REFLINK_{PRIMARY, SECONDARY}, users + * can ensure there are no inflight dio operations on these inodes, + * so we can bypass xfs_break_dax_layouts(BREAK_UNMAP) safely. + */ + if (ip->i_reflink_flags & (XFS_REFLINK_PRIMARY | + XFS_REFLINK_SECONDARY)) { + error = 0; + break; + } + error = xfs_break_dax_layouts(inode, &retry); if (error || retry) break; -- Gitee From 83f630bd6c7403094f7cb7781647d154af8f01e4 Mon Sep 17 00:00:00 2001 From: Xu Yu Date: Tue, 23 Jan 2024 11:44:30 +0800 Subject: [PATCH 1321/2138] anolis: xfs: relocate XFS_REFLINK_{PRIMARY, SECONDARY} check ANBZ: #9794 This relocates XFS_REFLINK_{PRIMARY, SECONDARY} check in xfs_break_dax_layouts, instead of xfs_break_layouts. Signed-off-by: Xu Yu Signed-off-by: Joseph Qi Acked-by: Gao Xiang Link: https://gitee.com/anolis/cloud-kernel/pulls/3831 --- fs/xfs/xfs_file.c | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index ffc4675e28f5..cd399696f269 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -870,9 +870,18 @@ xfs_break_dax_layouts( struct inode *inode, bool *retry) { + struct xfs_inode *ip = XFS_I(inode); struct page *page; - ASSERT(xfs_isilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL)); + ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL)); + + /* + * For inodes flagged with XFS_REFLINK_{PRIMARY, SECONDARY}, users + * can ensure there are no inflight dio operations on these inodes, + * so we can bypass xfs_break_dax_layouts(BREAK_UNMAP) safely. + */ + if (ip->i_reflink_flags & (XFS_REFLINK_PRIMARY | XFS_REFLINK_SECONDARY)) + return 0; page = dax_layout_busy_page(inode->i_mapping); if (!page) @@ -892,7 +901,6 @@ xfs_break_layouts( { bool retry; int error; - struct xfs_inode *ip = XFS_I(inode); ASSERT(xfs_isilocked(XFS_I(inode), XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)); @@ -900,17 +908,6 @@ xfs_break_layouts( retry = false; switch (reason) { case BREAK_UNMAP: - /* - * For inodes flagged with XFS_REFLINK_{PRIMARY, SECONDARY}, users - * can ensure there are no inflight dio operations on these inodes, - * so we can bypass xfs_break_dax_layouts(BREAK_UNMAP) safely. - */ - if (ip->i_reflink_flags & (XFS_REFLINK_PRIMARY | - XFS_REFLINK_SECONDARY)) { - error = 0; - break; - } - error = xfs_break_dax_layouts(inode, &retry); if (error || retry) break; -- Gitee From f1c74747e787a574212337dae6baea4177ec03e6 Mon Sep 17 00:00:00 2001 From: Xu Yu Date: Thu, 25 Jan 2024 19:20:35 +0800 Subject: [PATCH 1322/2138] anolis: mm: support fast reflink ANBZ: #9794 Reflink will takes too much times to copy-on-write in the dirty pages. Redis will block the progress to wait for completion, hence that will not acceptiable for us to make the feature productization. The patch will handle with the copy-on-write in the asynchronous thread. and we will not handle with pmd aligned entry directly, and let it behind for background work. [ Xu Yu: - Employ async fork framework. - Fix wrong usage of pmd_write. - Move work_struct of fast reflink from vm_area_struct to address_space, and update corresponding routines. ] Signed-off-by: zhongjiang-ali Signed-off-by: Xu Yu [ joe: change pmdp_clear_tbl_wrprotect to accept vma to adapt the new pmd_mkwrite ] Signed-off-by: Joseph Qi Acked-by: Gao Xiang Link: https://gitee.com/anolis/cloud-kernel/pulls/3831 --- fs/remap_range.c | 16 +- fs/xfs/xfs_reflink.c | 9 ++ include/linux/fs.h | 4 + include/linux/mm.h | 16 ++ include/linux/mm_types.h | 2 + mm/memory.c | 333 +++++++++++++++++++++++++++++++++++++++ mm/truncate.c | 8 + 7 files changed, 384 insertions(+), 4 deletions(-) diff --git a/fs/remap_range.c b/fs/remap_range.c index 87ae4f0dc3aa..2fdc13c901ef 100644 --- a/fs/remap_range.c +++ b/fs/remap_range.c @@ -315,10 +315,18 @@ __generic_remap_file_range_prep(struct file *file_in, loff_t pos_in, if (!same_inode) inode_dio_wait(inode_out); - ret = filemap_write_and_wait_range(inode_in->i_mapping, - pos_in, pos_in + *len - 1); - if (ret) - return ret; + if (remap_flags & REMAP_FILE_FAST_REFLINK) { + ret = fast_reflink_apply(inode_in->i_mapping, + pos_in >> PAGE_SHIFT, + (pos_in + *len - 1) >> PAGE_SHIFT); + if (ret) + return ret; + } else { + ret = filemap_write_and_wait_range(inode_in->i_mapping, + pos_in, pos_in + *len - 1); + if (ret) + return ret; + } ret = filemap_write_and_wait_range(inode_out->i_mapping, pos_out, pos_out + *len - 1); diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index 055dfca636cd..56ed2f9c7154 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c @@ -1710,6 +1710,15 @@ xfs_reflink_remap_prep( if (IS_DAX(inode_in) != IS_DAX(inode_out)) goto out_unlock; + /* + * For inodes flagged with XFS_REFLINK_{PRIMARY, SECONDARY}, + * users do not need persistence, so we can apply fast reflink, + * i.e., write protect without flushing dirty. + */ + if (src->i_reflink_flags & (XFS_REFLINK_PRIMARY | + XFS_REFLINK_SECONDARY)) + remap_flags |= REMAP_FILE_FAST_REFLINK; + if (!IS_DAX(inode_in)) ret = generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out, len, remap_flags); diff --git a/include/linux/fs.h b/include/linux/fs.h index f4e5f3b61833..2d5a642e56f7 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -495,6 +495,8 @@ struct address_space { struct rw_semaphore i_mmap_rwsem; void *private_data; + struct fast_reflink_work *fast_reflink_work; + CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) CK_KABI_RESERVE(3) @@ -1912,6 +1914,8 @@ struct dir_context { */ #define REMAP_FILE_ADVISORY (REMAP_FILE_CAN_SHORTEN) +#define REMAP_FILE_FAST_REFLINK (1 << 2) + /* * These flags control the behavior of vfs_copy_file_range(). * They are not available to the user via syscall. diff --git a/include/linux/mm.h b/include/linux/mm.h index 44b8711bdc20..a61842b79ae3 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -4190,8 +4190,22 @@ static inline void async_fork_fixup_vma(struct vm_area_struct *mpnt) } #endif +struct fast_reflink_work { + struct work_struct work; + struct address_space *mapping; +}; + +int fast_reflink_apply(struct address_space *mapping, pgoff_t start, + pgoff_t end); +bool is_pmd_fast_reflink(pmd_t pmd); +void fast_reflink_fixup_pmd(struct vm_area_struct *vma, pmd_t *pmd, + unsigned long addr); +void fast_reflink_fixup_vma(struct vm_area_struct *vma); + static inline bool is_pmd_transient(pmd_t pmd) { + if (is_pmd_fast_reflink(pmd)) + return true; if (is_pmd_async_fork(pmd)) return true; return false; @@ -4199,10 +4213,12 @@ static inline bool is_pmd_transient(pmd_t pmd) static inline void fixup_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr) { + fast_reflink_fixup_pmd(vma, pmd, addr); async_fork_fixup_pmd(vma, pmd, addr); } static inline void fixup_vma(struct vm_area_struct *vma) { + fast_reflink_fixup_vma(vma); async_fork_fixup_vma(vma); } diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index fc1c23a6fb17..4fd65d52ccfa 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -734,6 +734,8 @@ struct vm_area_struct { struct vm_area_struct *async_fork_vma; #endif + bool fast_reflink; + CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) CK_KABI_RESERVE(3) diff --git a/mm/memory.c b/mm/memory.c index e76086e1b6ef..44ca6de266da 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -6643,3 +6643,336 @@ void ptlock_free(struct ptdesc *ptdesc) kmem_cache_free(page_ptl_cachep, ptdesc->ptl); } #endif + +/* Fast reflink */ +static inline bool is_pmd_tbl_wrprotect(pmd_t pmd) +{ +#if defined(CONFIG_ARM64) +#define PMD_SECT_AP_WRPROTECT (_AT(pmdval_t, 2) << 61) /* APTable[1:0] */ + return (pmd_val(pmd) & PMD_TABLE_BIT) && + (pmd_val(pmd) & PMD_SECT_AP_WRPROTECT); +#elif defined(CONFIG_X86) + return (pmd_flags(pmd) & ~_PAGE_USER) == (_KERNPG_TABLE & ~_PAGE_RW); +#else + return false; +#endif +} + +static inline void pmdp_set_tbl_wrprotect(struct mm_struct *mm, + unsigned long addr, pmd_t *pmdp) +{ +#if defined(CONFIG_ARM64) + set_pmd(pmdp, __pmd(pmd_val(*pmdp) | PMD_SECT_AP_WRPROTECT)); +#elif defined(CONFIG_X86) + pmdp_set_wrprotect(mm, addr, pmdp); +#endif +} + +static inline void pmdp_clear_tbl_wrprotect(pmd_t *pmdp, + struct vm_area_struct *vma) +{ +#if defined(CONFIG_ARM64) + set_pmd(pmdp, __pmd(pmd_val(*pmdp) & ~PMD_SECT_AP_WRPROTECT)); +#elif defined(CONFIG_X86) + set_pmd(pmdp, pmd_mkwrite(*pmdp, vma)); +#endif +} + +bool is_pmd_fast_reflink(pmd_t pmd) +{ + return !is_swap_pmd(pmd) && !pmd_trans_huge(pmd) && + !pmd_devmap(pmd) && is_pmd_tbl_wrprotect(pmd); +} + +static int follow_pmd(struct mm_struct *mm, unsigned long address, + pmd_t **pmdp) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + + pgd = pgd_offset(mm, address); + if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) + goto out; + + p4d = p4d_offset(pgd, address); + if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d))) + goto out; + + pud = pud_offset(p4d, address); + if (pud_none(*pud) || unlikely(pud_bad(*pud))) + goto out; + + pmd = pmd_offset(pud, address); + if (pmd_huge(*pmd)) + goto found; + + if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) + goto out; + +found: + *pmdp = pmd; + return 0; +out: + return -EINVAL; +} + +static void fr_apply_pte_range(struct vm_area_struct *vma, pmd_t *pmd, + unsigned long start, unsigned long end) +{ + pte_t *start_pte; + pte_t *ptep, pte; + spinlock_t *ptl; + + start_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); + ptep = start_pte; + + do { + pte = *ptep; + if (pte_none(pte)) + continue; + + if (!pte_dirty(pte) && !pte_write(pte)) + continue; + + /* The caller is responsible for tlb flush. */ + pte = ptep_get_and_clear(vma->vm_mm, start, ptep); + pte = pte_wrprotect(pte); + pte = pte_mkclean(pte); + set_pte_at(vma->vm_mm, start, ptep, pte); + } while (ptep++, start += PAGE_SIZE, start != end); + + pte_unmap_unlock(start_pte, ptl); +} + +static void fr_apply_vma(struct vm_area_struct *vma) +{ + struct mm_struct *mm = vma->vm_mm; + unsigned long start = vma->vm_start; + unsigned long end = vma->vm_end; + unsigned long next; + spinlock_t *pml; + pmd_t *pmdp = NULL; + pmd_t pmd; + bool applied = false; + + do { + next = pmd_addr_end(start, end); + if (follow_pmd(mm, start, &pmdp)) + continue; + + pml = pmd_lock(mm, pmdp); + if (pmd_huge(*pmdp)) { +#ifdef CONFIG_FS_DAX_PMD + if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp)) + goto unlock_pmd; + + pmd = pmdp_invalidate(vma, start, pmdp); + pmd = pmd_wrprotect(pmd); + pmd = pmd_mkclean(pmd); + set_pmd_at(mm, start, pmdp, pmd); +unlock_pmd: +#endif + spin_unlock(pml); + continue; + } + + if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp))) { + spin_unlock(pml); + continue; + } + + if (IS_ALIGNED(start, PMD_SIZE) && (start + PMD_SIZE <= end)) { + pmdp_set_tbl_wrprotect(mm, start, pmdp); + flush_tlb_range(vma, start, start + PMD_SIZE); + applied = true; + spin_unlock(pml); + continue; + } else { + spin_unlock(pml); + fr_apply_pte_range(vma, pmdp, start, next); + flush_tlb_range(vma, start, next); + continue; + } + } while (start = next, start != end); + + if (applied) + vma->fast_reflink = applied; +} + +static void fast_reflink_fixup(struct work_struct *work); +int fast_reflink_apply(struct address_space *mapping, pgoff_t start, + pgoff_t end) +{ + struct vm_area_struct *vma; + + i_mmap_lock_read(mapping); + vma_interval_tree_foreach(vma, &mapping->i_mmap, start, end) { + if (!(vma->vm_flags & VM_SHARED)) + continue; + + fr_apply_vma(vma); + } + i_mmap_unlock_read(mapping); + + if (!mapping->fast_reflink_work) { + struct fast_reflink_work *fr_work; + + fr_work = kmalloc(sizeof(*fr_work), GFP_KERNEL|__GFP_NOFAIL); + INIT_WORK(&fr_work->work, fast_reflink_fixup); + fr_work->mapping = mapping; + mapping->fast_reflink_work = fr_work; + } + schedule_work(&mapping->fast_reflink_work->work); + + return 0; +} + +static void fr_fixup_pte_range(struct vm_area_struct *vma, pmd_t *pmd, + unsigned long start, unsigned long end) +{ + pte_t *start_pte; + pte_t *ptep, pte; + spinlock_t *ptl; + + start_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); + ptep = start_pte; + + /* Already fixed up */ + if (unlikely(!is_pmd_fast_reflink(*pmd))) + goto out; + + do { + pte = *ptep; + if (pte_none(pte)) + continue; + + if (!pte_dirty(pte) && !pte_write(pte)) + continue; + + /* The caller is responsible for tlb flush. */ + pte = ptep_get_and_clear(vma->vm_mm, start, ptep); + pte = pte_wrprotect(pte); + pte = pte_mkclean(pte); + set_pte_at(vma->vm_mm, start, ptep, pte); + } while (ptep++, start += PAGE_SIZE, start != end); + +out: + pte_unmap_unlock(start_pte, ptl); +} + +static void fr_fixup_pmd_range(struct vm_area_struct *vma, pud_t *pud, + unsigned long start, unsigned long end) +{ + pmd_t *pmd; + unsigned long next; + spinlock_t *pml; + + pmd = pmd_offset(pud, start); + do { + next = pmd_addr_end(start, end); + if (pmd_none(*pmd)) + continue; + + pml = pmd_lock(vma->vm_mm, pmd); + if (is_pmd_fast_reflink(*pmd)) { + spin_unlock(pml); + fr_fixup_pte_range(vma, pmd, start, next); + + pml = pmd_lock(vma->vm_mm, pmd); + if (is_pmd_fast_reflink(*pmd)) + pmdp_clear_tbl_wrprotect(pmd, vma); + } + spin_unlock(pml); + } while (pmd++, start = next, start != end); +} + +static void fr_fixup_pud_range(struct vm_area_struct *vma, p4d_t *p4d, + unsigned long start, unsigned long end) +{ + pud_t *pud; + unsigned long next; + + pud = pud_offset(p4d, start); + do { + next = pud_addr_end(start, end); + if (pud_none_or_clear_bad(pud)) + continue; + fr_fixup_pmd_range(vma, pud, start, next); + } while (pud++, start = next, start != end); +} + +static void fr_fixup_p4d_range(struct vm_area_struct *vma, pgd_t *pgd, + unsigned long start, unsigned long end) +{ + p4d_t *p4d; + unsigned long next; + + p4d = p4d_offset(pgd, start); + do { + next = p4d_addr_end(start, end); + if (p4d_none_or_clear_bad(p4d)) + continue; + fr_fixup_pud_range(vma, p4d, start, next); + } while (p4d++, start = next, start != end); +} + +static void fr_fixup_page_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + pgd_t *pgd; + unsigned long next; + + pgd = pgd_offset(vma->vm_mm, start); + do { + next = pgd_addr_end(start, end); + if (pgd_none_or_clear_bad(pgd)) + continue; + fr_fixup_p4d_range(vma, pgd, start, next); + } while (pgd++, start = next, start != end); +} + +/* The mmap_lock (read/write) of vma->vm_mm is held */ +void fast_reflink_fixup_vma(struct vm_area_struct *vma) +{ + if (!vma->fast_reflink) + return; + + fr_fixup_page_range(vma, vma->vm_start, vma->vm_end); + vma->fast_reflink = false; +#ifdef CONFIG_ARM64 + flush_tlb_range(vma, vma->vm_start, vma->vm_end); +#endif +} + +/* The mmap_lock (read) of vma->vm_mm is held */ +void fast_reflink_fixup_pmd(struct vm_area_struct *vma, pmd_t *pmd, + unsigned long addr) +{ + if (!is_pmd_fast_reflink(*pmd) || !vma->fast_reflink) + return; + + addr &= PMD_MASK; + fr_fixup_page_range(vma, addr, addr + PMD_SIZE); + VM_WARN_ON_ONCE(is_pmd_fast_reflink(*pmd)); + +#ifdef CONFIG_ARM64 + flush_tlb_range(vma, addr & PMD_MASK, (addr & PMD_MASK) + PMD_SIZE); +#endif +} + +static void fast_reflink_fixup(struct work_struct *work) +{ + struct fast_reflink_work *fr_work; + struct address_space *mapping; + struct vm_area_struct *vma; + + fr_work = container_of(work, struct fast_reflink_work, work); + mapping = fr_work->mapping; + + i_mmap_lock_read(mapping); + vma_interval_tree_foreach(vma, &mapping->i_mmap, 0, ULONG_MAX) + fast_reflink_fixup_vma(vma); + i_mmap_unlock_read(mapping); +} diff --git a/mm/truncate.c b/mm/truncate.c index 70c09213bb92..daaaf558db60 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -469,6 +469,14 @@ void truncate_inode_pages_final(struct address_space *mapping) */ mapping_set_exiting(mapping); + /* Flush fast reflink work if any. */ + if (unlikely(mapping->fast_reflink_work)) { + flush_work(&mapping->fast_reflink_work->work); + + kfree(mapping->fast_reflink_work); + mapping->fast_reflink_work = NULL; + } + if (!mapping_empty(mapping)) { /* * As truncation uses a lockless tree lookup, cycle -- Gitee From 7722a6de70339b11a34e85a0c75457c757ca9bc0 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Tue, 9 Apr 2024 14:50:34 +0800 Subject: [PATCH 1323/2138] anolis: xfs: eliminate rmap when unshare range ANBZ: #9794 Currently we'll only have one reflink file at the same time under XFS_REFLINK_PRIMARY, so we don't have to use rmap since it's a bit costly. So record reflink file when doing file clone range, then we can directly do unshare range without rmap query. Signed-off-by: Joseph Qi Signed-off-by: Gao Xiang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3831 --- fs/xfs/xfs_file.c | 6 +++ fs/xfs/xfs_icache.c | 2 + fs/xfs/xfs_inode.h | 5 ++ fs/xfs/xfs_reflink.c | 119 +++---------------------------------------- 4 files changed, 21 insertions(+), 111 deletions(-) diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index cd399696f269..232b2bed1286 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -1218,6 +1218,12 @@ xfs_file_remap_range( if (xfs_file_sync_writes(file_in) || xfs_file_sync_writes(file_out)) xfs_log_force_inode(dest); + + if (src->i_reflink_flags & XFS_REFLINK_PRIMARY) { + /* TODO: WARN_ON if src->i_reflink_ino is still valid */ + src->i_reflink_ino = dest->i_ino; + } + out_unlock: xfs_iunlock2_remapping(src, dest); if (ret) diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index 1f74d7a2ac41..86485e2e9324 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -108,6 +108,7 @@ xfs_inode_alloc( ip->i_nblocks = 0; ip->i_forkoff = 0; ip->i_reflink_flags = 0; + ip->i_reflink_ino = 0; ip->i_sick = 0; ip->i_checked = 0; INIT_WORK(&ip->i_ioend_work, xfs_end_io); @@ -387,6 +388,7 @@ xfs_iget_recycle( ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS; ip->i_flags |= XFS_INEW; ip->i_reflink_flags = 0; + ip->i_reflink_ino = 0; xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); inode->i_state = I_NEW; diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 298ca93c0bc5..c0383220acb8 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -87,6 +87,11 @@ typedef struct xfs_inode { /* flags for controlling reflink cow behavior */ uint32_t i_reflink_flags; + /* + * Saved reflink ino for the sake of quick unshare, currently we + * only support one reflink file under flag XFS_REFLINK_PRIMARY + */ + xfs_ino_t i_reflink_ino; /* VFS inode */ struct inode i_vnode; /* embedded VFS inode */ diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index 56ed2f9c7154..84a253e990ce 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c @@ -502,37 +502,12 @@ xfs_reflink_fill_delalloc( return error; } -struct xfs_rmap_info { - bool found; /* output */ - uint64_t owner; /* intput */ - xfs_agblock_t startblock; /* input */; - struct xfs_rmap_irec rec; /* output */ -}; - -STATIC int -xfs_reflink_query_rmap_owner_helper( - struct xfs_btree_cur *cur, - const struct xfs_rmap_irec *rec, - void *priv) -{ - struct xfs_rmap_info *info = priv; - - if ((rec->rm_owner != info->owner) && - (info->startblock >= rec->rm_startblock) && - (info->startblock < (rec->rm_startblock + rec->rm_blockcount))) { - info->rec = *rec; - info->found = true; - return -ECANCELED; - } - return 0; -} - STATIC int xfs_reflink_unshare_range( - struct xfs_mount *mp, - struct xfs_bmbt_irec *oimap, - struct xfs_rmap_irec *rmap) + struct xfs_inode *src, + struct xfs_bmbt_irec *oimap) { + struct xfs_mount *mp = src->i_mount; struct xfs_inode *ip; xfs_fileoff_t offset_fsb = oimap->br_startoff; xfs_filblks_t count_fsb = oimap->br_blockcount; @@ -545,7 +520,10 @@ xfs_reflink_unshare_range( struct xfs_bmbt_irec imap = *oimap; struct xfs_bmbt_irec cmap; - error = xfs_iget(mp, NULL, rmap->rm_owner, 0, 0, &ip); + if (WARN_ON(!src->i_reflink_ino)) + return -EINVAL; + + error = xfs_iget(mp, NULL, src->i_reflink_ino, 0, 0, &ip); if (error < 0) return error; @@ -643,86 +621,6 @@ xfs_reflink_unshare_range( return error; } -STATIC int -xfs_reflink_query_rmap_owner( - struct xfs_inode *ip, - struct xfs_mount *mp, - struct xfs_bmbt_irec *imap, - struct xfs_rmap_info *info) -{ - struct xfs_trans *tp = NULL; - struct xfs_btree_cur *cur = NULL; - struct xfs_rmap_irec rmap_low, rmap_high; - struct xfs_perag *pag; - struct xfs_buf *agbp = NULL; - xfs_fsblock_t fsbno = imap->br_startblock; - xfs_filblks_t bcnt = imap->br_blockcount; - xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, fsbno); - xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(mp, fsbno); - int error = 0; - - error = xfs_trans_alloc_empty(mp, &tp); - if (error) - return error; - - pag = xfs_perag_get(mp, agno); - error = xfs_alloc_read_agf(pag, tp, 0, &agbp); - if (error) { - xfs_perag_put(pag); - goto out_cancel_tp; - } - - cur = xfs_rmapbt_init_cursor(mp, tp, agbp, pag); - xfs_perag_put(pag); - - /* Construct a range for rmap query */ - memset(&rmap_low, 0, sizeof(rmap_low)); - memset(&rmap_high, 0xFF, sizeof(rmap_high)); - rmap_low.rm_startblock = rmap_high.rm_startblock = agbno; - rmap_low.rm_blockcount = rmap_high.rm_blockcount = bcnt; - - error = xfs_rmap_query_range(cur, &rmap_low, &rmap_high, - xfs_reflink_query_rmap_owner_helper, info); - if (error == -ECANCELED) - error = 0; - - xfs_btree_del_cursor(cur, error); - xfs_trans_brelse(tp, agbp); - -out_cancel_tp: - xfs_trans_cancel(tp); - return error; -} - -STATIC int -xfs_reflink_unshare_other_owners( - struct xfs_inode *ip, - struct xfs_mount *mp, - struct xfs_bmbt_irec *imap) -{ - int error; - struct xfs_rmap_info info; - int i = 0; - - do { - info.found = false; - info.owner = ip->i_ino; - info.startblock = XFS_FSB_TO_AGBNO(mp, imap->br_startblock); - error = xfs_reflink_query_rmap_owner(ip, mp, imap, &info); - if (error < 0 || !info.found) - return error; - - xfs_reflink_unshare_range(mp, imap, &info.rec); - /* - * FIXME: 64 is chosen as limition intentionally, in case there - * are too many snapshot files, unshare operations here will take - * too much time, needs a better solution. - */ - } while (++i <= 64); - - return 0; -} - /* Allocate all CoW reservations covering a range of blocks in a file. */ int xfs_reflink_allocate_cow( @@ -733,7 +631,6 @@ xfs_reflink_allocate_cow( uint *lockmode, bool convert_now) { - struct xfs_mount *mp = ip->i_mount; int error; bool found; @@ -754,7 +651,7 @@ xfs_reflink_allocate_cow( if (ip->i_reflink_flags & XFS_REFLINK_PRIMARY) { xfs_iunlock(ip, *lockmode); - xfs_reflink_unshare_other_owners(ip, mp, imap); + xfs_reflink_unshare_range(ip, imap); xfs_ilock(ip, *lockmode); } -- Gitee From c5f91334f77be72a71648c3a1ba238138019b650 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Tue, 23 Apr 2024 06:50:22 +0800 Subject: [PATCH 1324/2138] anolis: xfs: replace `i_reflink_ino` with pointers `i_reflink_opt_ip` ANBZ: #9794 It's better to directly inode pointers to get the other reflink file. And it becomes quite easy to (dis)connect the reflink pair with inode pointers other than inode numbers. Signed-off-by: Gao Xiang Signed-off-by: Joseph Qi Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3831 --- fs/xfs/xfs_file.c | 8 +++++--- fs/xfs/xfs_icache.c | 4 ++-- fs/xfs/xfs_inode.c | 27 +++++++++++++++++++++++++++ fs/xfs/xfs_inode.h | 4 ++-- fs/xfs/xfs_mount.h | 2 ++ fs/xfs/xfs_reflink.c | 28 ++++++++++++++++++++++++---- fs/xfs/xfs_super.c | 2 ++ 7 files changed, 64 insertions(+), 11 deletions(-) diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 232b2bed1286..9f82b86e659c 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -1219,9 +1219,11 @@ xfs_file_remap_range( if (xfs_file_sync_writes(file_in) || xfs_file_sync_writes(file_out)) xfs_log_force_inode(dest); - if (src->i_reflink_flags & XFS_REFLINK_PRIMARY) { - /* TODO: WARN_ON if src->i_reflink_ino is still valid */ - src->i_reflink_ino = dest->i_ino; + if (remapped && (src->i_reflink_flags & XFS_REFLINK_PRIMARY)) { + mutex_lock(&mp->m_reflink_opt_lock); + src->i_reflink_opt_ip = dest; + dest->i_reflink_opt_ip = src; + mutex_unlock(&mp->m_reflink_opt_lock); } out_unlock: diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index 86485e2e9324..fc980ae5e82a 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -108,7 +108,7 @@ xfs_inode_alloc( ip->i_nblocks = 0; ip->i_forkoff = 0; ip->i_reflink_flags = 0; - ip->i_reflink_ino = 0; + ip->i_reflink_opt_ip = NULL; ip->i_sick = 0; ip->i_checked = 0; INIT_WORK(&ip->i_ioend_work, xfs_end_io); @@ -388,7 +388,7 @@ xfs_iget_recycle( ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS; ip->i_flags |= XFS_INEW; ip->i_reflink_flags = 0; - ip->i_reflink_ino = 0; + ip->i_reflink_opt_ip = NULL; xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); inode->i_state = I_NEW; diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 6f7dca1c14c7..db11c8577e90 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -1698,6 +1698,30 @@ xfs_inode_needs_inactive( return xfs_can_free_eofblocks(ip); } +STATIC void +xfs_reflink_opt_disconnect( + struct xfs_mount *mp, + struct xfs_inode *ip, + bool unexpected) +{ + bool valid = false; + + if (!(ip->i_reflink_flags & (XFS_REFLINK_PRIMARY | + XFS_REFLINK_SECONDARY))) + return; + + mutex_lock(&mp->m_reflink_opt_lock); + if (ip->i_reflink_opt_ip) { + ip->i_reflink_opt_ip->i_reflink_opt_ip = NULL; + ip->i_reflink_opt_ip = NULL; + valid = true; + } + mutex_unlock(&mp->m_reflink_opt_lock); + if (valid && unexpected) + xfs_warn(mp, "unexpectedly, inactive reflink file in advance %llu", + ip->i_ino); +} + /* * xfs_inactive * @@ -1750,6 +1774,7 @@ xfs_inactive( if (xfs_can_free_eofblocks(ip)) error = xfs_free_eofblocks(ip); + xfs_reflink_opt_disconnect(mp, ip, true); goto out; } @@ -1781,6 +1806,8 @@ xfs_inactive( if (error) goto out; + xfs_reflink_opt_disconnect(mp, ip, false); + /* * If there are attributes associated with the file then blow them away * now. The code calls a routine that recursively deconstructs the diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index c0383220acb8..fb73dbc2e88c 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -88,10 +88,10 @@ typedef struct xfs_inode { /* flags for controlling reflink cow behavior */ uint32_t i_reflink_flags; /* - * Saved reflink ino for the sake of quick unshare, currently we + * Saved reflink ip for the sake of quick unshare, currently we * only support one reflink file under flag XFS_REFLINK_PRIMARY */ - xfs_ino_t i_reflink_ino; + struct xfs_inode *i_reflink_opt_ip; /* VFS inode */ struct inode i_vnode; /* embedded VFS inode */ diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index d19cca099bc3..5fa454b3b7f6 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -250,6 +250,8 @@ typedef struct xfs_mount { /* cpus that have inodes queued for inactivation */ struct cpumask m_inodegc_cpumask; + + struct mutex m_reflink_opt_lock; } xfs_mount_t; #define M_IGEO(mp) (&(mp)->m_ino_geo) diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index 84a253e990ce..e623a995a675 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c @@ -520,12 +520,20 @@ xfs_reflink_unshare_range( struct xfs_bmbt_irec imap = *oimap; struct xfs_bmbt_irec cmap; - if (WARN_ON(!src->i_reflink_ino)) +retry: + mutex_lock(&mp->m_reflink_opt_lock); + if (WARN_ON(!src->i_reflink_opt_ip)) { + mutex_unlock(&mp->m_reflink_opt_lock); return -EINVAL; + } - error = xfs_iget(mp, NULL, src->i_reflink_ino, 0, 0, &ip); - if (error < 0) - return error; + if (!igrab(VFS_I(src->i_reflink_opt_ip))) { + mutex_unlock(&mp->m_reflink_opt_lock); + delay(1); + goto retry; + } + ip = src->i_reflink_opt_ip; + mutex_unlock(&mp->m_reflink_opt_lock); xfs_ilock(ip, lockmode); xfs_flush_unmap_range(ip, XFS_FSB_TO_B(mp, imap.br_startoff), @@ -1607,6 +1615,18 @@ xfs_reflink_remap_prep( if (IS_DAX(inode_in) != IS_DAX(inode_out)) goto out_unlock; + if (src->i_reflink_flags & XFS_REFLINK_PRIMARY) { + if (!(dest->i_reflink_flags & XFS_REFLINK_SECONDARY)) + goto out_unlock; + if (pos_in != pos_out) + goto out_unlock; + if (src->i_reflink_opt_ip || dest->i_reflink_opt_ip) { + xfs_warn(src->i_mount, + "src(XFS_REFLINK_PRIMARY) and/or dest(XFS_REFLINK_SECONDARY) is already paired with FICLONE"); + goto out_unlock; + } + } + /* * For inodes flagged with XFS_REFLINK_{PRIMARY, SECONDARY}, * users do not need persistence, so we can apply fast reflink, diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 13007b6bc9f3..e5308687b9ae 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -2002,6 +2002,8 @@ static int xfs_init_fs_context( INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC); spin_lock_init(&mp->m_perag_lock); mutex_init(&mp->m_growlock); + mutex_init(&mp->m_reflink_opt_lock); + INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker); INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker); mp->m_kobj.kobject.kset = xfs_kset; -- Gitee From eaa4e7ca3d5a3ba505c36005875d126fed0e00cb Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Tue, 23 Apr 2024 07:39:06 +0800 Subject: [PATCH 1325/2138] anolis: dax, xfs: support memory failure without XFS rmapbt ANBZ: #9794 Record the inode mapping into `page->mapping` instead of just marking PAGE_MAPPING_DAX_SHARED. Since `page->private` is not used for now, let's record `pgoff` into `page->private` instead of `page->index` to match the upstream logic. Finally, use `ip->i_reflink_opt_ip` to get the other reflink file since such files are only reflinked by FICLONE other than FICLONERANGE. Signed-off-by: Gao Xiang [ joe: fix code conflicts ] Signed-off-by: Joseph Qi Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3831 --- fs/dax.c | 23 +++++-- fs/xfs/xfs_file.c | 5 ++ fs/xfs/xfs_notify_failure.c | 131 ++++++++++++++++++++++++++++++++++-- include/linux/page-flags.h | 2 +- include/linux/pagemap.h | 2 + 5 files changed, 151 insertions(+), 12 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index 2cf9efed9059..400ad547dee1 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -322,23 +322,34 @@ static unsigned long dax_end_pfn(void *entry) static inline bool dax_page_is_shared(struct page *page) { - return page->mapping == PAGE_MAPPING_DAX_SHARED; + return (unsigned long)READ_ONCE(page->mapping) & PAGE_MAPPING_DAX_SHARED; } /* * Set the page->mapping with PAGE_MAPPING_DAX_SHARED flag, increase the * refcount. */ -static inline void dax_page_share_get(struct page *page) +static inline void dax_page_share_get(struct page *page, + struct address_space *mapping, pgoff_t index) { - if (page->mapping != PAGE_MAPPING_DAX_SHARED) { + struct address_space *oldmapping = READ_ONCE(page->mapping); + + if (!((unsigned long)oldmapping & PAGE_MAPPING_DAX_SHARED)) { /* * Reset the index if the page was already mapped * regularly before. */ - if (page->mapping) + if (oldmapping) page->share = 1; - page->mapping = PAGE_MAPPING_DAX_SHARED; + + if (test_bit(AS_FSDAX_NORMAP, &mapping->flags)) { + /* Note that we (ab)use page->private to keep index for now */ + WRITE_ONCE(page->private, index); + /* paired with smp_mb() in xfs_dax_notify_ddev_failure2() */ + smp_mb(); + } + WRITE_ONCE(page->mapping, + (void *)((unsigned long)mapping | PAGE_MAPPING_DAX_SHARED)); } page->share++; } @@ -367,7 +378,7 @@ static void dax_associate_entry(void *entry, struct address_space *mapping, struct page *page = pfn_to_page(pfn); if (shared) { - dax_page_share_get(page); + dax_page_share_get(page, mapping, index); } else { WARN_ON_ONCE(page->mapping); page->mapping = mapping; diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 9f82b86e659c..e4948a481438 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -1224,6 +1224,11 @@ xfs_file_remap_range( src->i_reflink_opt_ip = dest; dest->i_reflink_opt_ip = src; mutex_unlock(&mp->m_reflink_opt_lock); + + if (!xfs_has_rmapbt(mp)) { + set_bit(AS_FSDAX_NORMAP, &VFS_I(src)->i_mapping->flags); + set_bit(AS_FSDAX_NORMAP, &VFS_I(dest)->i_mapping->flags); + } } out_unlock: diff --git a/fs/xfs/xfs_notify_failure.c b/fs/xfs/xfs_notify_failure.c index a7daa522e00f..30655551dc75 100644 --- a/fs/xfs/xfs_notify_failure.c +++ b/fs/xfs/xfs_notify_failure.c @@ -22,6 +22,7 @@ #include #include +#include struct xfs_failure_info { xfs_agblock_t startblock; @@ -173,6 +174,128 @@ xfs_dax_notify_ddev_failure( return error; } +static int +xfs_mf_dax_kill_procs( + struct xfs_mount *mp, + struct address_space *mapping, + pgoff_t pgoff, + unsigned long nrpages, + int mf_flags, + bool share) +{ + int rc, rc2 = 0; + + if (share) { + struct xfs_inode *ip = XFS_I(mapping->host); + + mutex_lock(&mp->m_reflink_opt_lock); + if (ip->i_reflink_opt_ip) { + rc2 = mf_dax_kill_procs(VFS_I(ip->i_reflink_opt_ip)->i_mapping, + pgoff, nrpages, mf_flags); + } else { + xfs_warn(mp, "this mode should be only used with REFLINK_PRIMARY|REFLINK_SECONDARY @ ino %llu", + ip->i_ino); + } + mutex_unlock(&mp->m_reflink_opt_lock); + } + rc = mf_dax_kill_procs(mapping, pgoff, nrpages, mf_flags); + iput(mapping->host); + return rc ? rc : rc2; +} + +static int +xfs_dax_notify_ddev_failure2( + struct dax_device *dax_dev, + struct xfs_mount *mp, + loff_t pos, + size_t size, + int mf_flags) +{ + struct address_space *lmapping = NULL; + bool lshare = false; + pfn_t pfn; + pgoff_t pgoff, lpgoff; + unsigned long nrpages; + long length; + int rc, id; + + rc = bdev_dax_pgoff(mp->m_ddev_targp->bt_bdev, pos >> SECTOR_SHIFT, + size, &pgoff); + if (rc) + return rc; + id = dax_read_lock(); + length = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), DAX_ACCESS, + NULL, &pfn); + if (length < 0) { + rc = length; + goto out; + } + + if (PFN_PHYS(length) < size) { + rc = -EINVAL; + goto out; + } + rc = 0; + while (length) { + struct page *page; + struct address_space *mapping; + bool share = false; + + page = pfn_t_to_page(pfn); + pfn.val++; + --length; + +retry: + rcu_read_lock(); + mapping = page ? READ_ONCE(page->mapping) : NULL; + if (mapping) { + share = (unsigned long)mapping & PAGE_MAPPING_DAX_SHARED; + mapping = (void *)((unsigned long)mapping & ~PAGE_MAPPING_DAX_SHARED); + if (!igrab(mapping->host)) { + rcu_read_unlock(); + goto retry; + } + /* paired with smp_mb() in dax_page_share_get() to ensure valid index */ + smp_mb(); + if (!share) { + pgoff = READ_ONCE(page->index); + } else { + WARN_ON(!test_bit(AS_FSDAX_NORMAP, &mapping->flags)); + pgoff = READ_ONCE(page->private); + } + } + rcu_read_unlock(); + + if (lmapping) { + if (mapping != lmapping || share != lshare || + lpgoff + nrpages != pgoff) { + rc = xfs_mf_dax_kill_procs(mp, lmapping, lpgoff, + nrpages, mf_flags, lshare); + if (rc) + break; + } else { + nrpages++; + continue; + } + } + lmapping = mapping; + lpgoff = pgoff; + lshare = share; + nrpages = 1; + } + + if (lmapping) { + int rc2; + + rc2 = xfs_mf_dax_kill_procs(mp, lmapping, lpgoff, nrpages, mf_flags, lshare); + if (!rc) + rc = rc2; + } +out: + dax_read_unlock(id); + return rc; +} + static int xfs_dax_notify_failure( struct dax_device *dax_dev, @@ -202,11 +325,6 @@ xfs_dax_notify_failure( return -EFSCORRUPTED; } - if (!xfs_has_rmapbt(mp)) { - xfs_debug(mp, "notify_failure() needs rmapbt enabled!"); - return -EOPNOTSUPP; - } - ddev_start = mp->m_ddev_targp->bt_dax_part_off; ddev_end = ddev_start + bdev_nr_bytes(mp->m_ddev_targp->bt_bdev) - 1; @@ -226,6 +344,9 @@ xfs_dax_notify_failure( if (offset + len - 1 > ddev_end) len = ddev_end - offset + 1; + if (!xfs_has_rmapbt(mp)) + return xfs_dax_notify_ddev_failure2(dax_dev, mp, offset, len, + mf_flags); return xfs_dax_notify_ddev_failure(mp, BTOBB(offset), BTOBB(len), mf_flags); } diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index aae9f6230dba..56052b705f9a 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -662,7 +662,7 @@ __PAGEFLAG(Kfence, kfence, PF_ANY) * Different with flags above, this flag is used only for fsdax mode. It * indicates that this page->mapping is now under reflink case. */ -#define PAGE_MAPPING_DAX_SHARED ((void *)0x1) +#define PAGE_MAPPING_DAX_SHARED 0x1UL static __always_inline bool folio_mapping_flags(struct folio *folio) { diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 03046c9394d9..c50f811fbf4a 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -206,6 +206,8 @@ enum mapping_flags { AS_RELEASE_ALWAYS, /* Call ->release_folio(), even if no private data */ AS_STABLE_WRITES, /* must wait for writeback before modifying folio contents */ + + AS_FSDAX_NORMAP = 30, }; /** -- Gitee From c4f9d0bc08258229024a0645435953bc83a58472 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Mon, 13 May 2024 11:46:56 +0800 Subject: [PATCH 1326/2138] anolis: xfs: fix build failure when CONFIG_FS_DAX is off ANBZ: #9794 Currently xfs_reflink_unshare_range() is useless if CONFIG_FS_DAX is off. I'm not sure who will need it without PMEM. Signed-off-by: Gao Xiang Signed-off-by: Joseph Qi Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3831 --- fs/xfs/xfs_reflink.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index e623a995a675..59ef06076bd3 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c @@ -502,6 +502,7 @@ xfs_reflink_fill_delalloc( return error; } +#ifdef CONFIG_FS_DAX STATIC int xfs_reflink_unshare_range( struct xfs_inode *src, @@ -628,6 +629,15 @@ xfs_reflink_unshare_range( xfs_irele(ip); return error; } +#else +STATIC int +xfs_reflink_unshare_range( + struct xfs_inode *src, + struct xfs_bmbt_irec *oimap) +{ + return 0; +} +#endif /* Allocate all CoW reservations covering a range of blocks in a file. */ int -- Gitee From b3f7b20561626ff6cc3125ff68118736640ddc81 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Sun, 28 Apr 2024 16:52:31 +0800 Subject: [PATCH 1327/2138] anolis: xfs: never block page fault during inode inactivation ANBZ: #9794 Inode block truncation takes time. Before unmapping these blocks, they are still shared in the primary-secondary inode pair. When a page fault happens, we still need to unshare such extents but it's unneeded since the secondary inode here is no longer valid. In order to resolve that, let's COW-bypass such reclaiming inodes explicitly. Another issue here is sync `iput()` can do the final inode eviction. So deferred inode inactivation is also needed, a dedicated doubly list is designed to maintain such inactivation jobs since a new ioctl needs to wait a single inactivation job, IOWs, we need to remove a single node in the list. Thus, the original lockless list is unsuiable for us. Signed-off-by: Gao Xiang [ joe: move secondary_evicting logic right after unshare and fix code conflicts ] Signed-off-by: Joseph Qi Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3831 --- fs/xfs/libxfs/xfs_fs.h | 1 + fs/xfs/xfs_icache.c | 54 +++++++++++++++++++++++- fs/xfs/xfs_icache.h | 2 + fs/xfs/xfs_inode.c | 9 ++-- fs/xfs/xfs_inode.h | 1 + fs/xfs/xfs_ioctl.c | 96 ++++++++++++++++++++++++++++++++++++++++++ fs/xfs/xfs_mount.h | 4 ++ fs/xfs/xfs_reflink.c | 34 +++++++++------ fs/xfs/xfs_super.c | 5 +++ 9 files changed, 189 insertions(+), 17 deletions(-) diff --git a/fs/xfs/libxfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h index cbd68a3699bf..7a38c22787bc 100644 --- a/fs/xfs/libxfs/xfs_fs.h +++ b/fs/xfs/libxfs/xfs_fs.h @@ -847,6 +847,7 @@ enum { #define XFS_IOC_SET_REFLINK_FLAGS _IOW('X', 200, uint32_t) #define XFS_IOC_GET_REFLINK_FLAGS _IOR('X', 201, uint32_t) +#define XFS_IOC_WAIT_REFLINK_SECONDARY _IOW('X', 202, uint32_t) #ifndef HAVE_BBMACROS /* diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index fc980ae5e82a..6f80df4f6bd9 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -116,7 +116,7 @@ xfs_inode_alloc( spin_lock_init(&ip->i_ioend_lock); ip->i_next_unlinked = NULLAGINO; ip->i_prev_unlinked = 0; - + INIT_LIST_HEAD(&ip->i_reflink_opt_gclist); return ip; } @@ -1828,7 +1828,7 @@ xfs_inodegc_set_reclaimable( * This is the last chance to make changes to an otherwise unreferenced file * before incore reclamation happens. */ -static int +int xfs_inodegc_inactivate( struct xfs_inode *ip) { @@ -1841,6 +1841,40 @@ xfs_inodegc_inactivate( } +void +xfs_inodegc_reflink_opt_worker( + struct work_struct *work) +{ + struct xfs_mount *mp = container_of(work, struct xfs_mount, + m_reflink_opt_gcwork); + struct xfs_inode *ip; + + while (1) { + spin_lock(&mp->m_reflink_opt_gclock); + /* + * fg ioctl can handle a specific inode too. In that case, + * we will not see such inode on the list anymore. + */ + if (list_empty(&mp->m_reflink_opt_gclist)) { + spin_unlock(&mp->m_reflink_opt_gclock); + break; + } + ip = list_first_entry(&mp->m_reflink_opt_gclist, + struct xfs_inode, i_reflink_opt_gclist); + /* + * Or we detach it ourselves in the gclock, in that case, + * fg ioctl will hit list_empty (fg ioctl also check + * list_empty under gclock). + */ + list_del_init(&ip->i_reflink_opt_gclist); + spin_unlock(&mp->m_reflink_opt_gclock); + + ASSERT(ip->i_flags & XFS_NEED_INACTIVE); + xfs_iflags_set(ip, XFS_INACTIVATING); + xfs_inodegc_inactivate(ip); + } +} + void xfs_inodegc_worker( struct work_struct *work) @@ -2075,6 +2109,22 @@ xfs_inodegc_queue( unsigned long queue_delay = 1; trace_xfs_inode_set_need_inactive(ip); + + if ((ip->i_reflink_flags & XFS_REFLINK_SECONDARY) && + /* ip->i_reflink_opt_ip won't be changed here since we're the owner */ + READ_ONCE(ip->i_reflink_opt_ip)) { + /* gclist will be attached before marking XFS_NEED_INACTIVE */ + spin_lock(&mp->m_reflink_opt_gclock); + list_add_tail(&ip->i_reflink_opt_gclist, + &mp->m_reflink_opt_gclist); + queue_work(mp->m_inodegc_wq, + &mp->m_reflink_opt_gcwork); + spin_unlock(&mp->m_reflink_opt_gclock); + wake_up_all(&mp->m_reflink_opt_wait); + xfs_iflags_set(ip, XFS_NEED_INACTIVE); + return; + } + spin_lock(&ip->i_flags_lock); ip->i_flags |= XFS_NEED_INACTIVE; spin_unlock(&ip->i_flags_lock); diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h index 905944dafbe5..6646eb2a7654 100644 --- a/fs/xfs/xfs_icache.h +++ b/fs/xfs/xfs_icache.h @@ -70,10 +70,12 @@ void xfs_inode_clear_eofblocks_tag(struct xfs_inode *ip); void xfs_inode_set_cowblocks_tag(struct xfs_inode *ip); void xfs_inode_clear_cowblocks_tag(struct xfs_inode *ip); +int xfs_inodegc_inactivate(struct xfs_inode *ip); void xfs_blockgc_worker(struct work_struct *work); void xfs_blockgc_stop(struct xfs_mount *mp); void xfs_blockgc_start(struct xfs_mount *mp); +void xfs_inodegc_reflink_opt_worker(struct work_struct *work); void xfs_inodegc_worker(struct work_struct *work); void xfs_inodegc_push(struct xfs_mount *mp); int xfs_inodegc_flush(struct xfs_mount *mp); diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index db11c8577e90..0c9970e3359f 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -1717,9 +1717,12 @@ xfs_reflink_opt_disconnect( valid = true; } mutex_unlock(&mp->m_reflink_opt_lock); - if (valid && unexpected) - xfs_warn(mp, "unexpectedly, inactive reflink file in advance %llu", - ip->i_ino); + if (valid) { + wake_up_all(&mp->m_reflink_opt_wait); + if (unexpected) + xfs_warn(mp, "unexpectedly, inactive reflink file in advance %llu", + ip->i_ino); + } } /* diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index fb73dbc2e88c..1736c10333aa 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -92,6 +92,7 @@ typedef struct xfs_inode { * only support one reflink file under flag XFS_REFLINK_PRIMARY */ struct xfs_inode *i_reflink_opt_ip; + struct list_head i_reflink_opt_gclist; /* VFS inode */ struct inode i_vnode; /* embedded VFS inode */ diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index b3ff5fa49c85..fe49b92f4926 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -1892,6 +1892,93 @@ xfs_fs_eofblocks_from_user( #define XFS_IOC_ALLOCSP64 _IOW ('X', 36, struct xfs_flock64) #define XFS_IOC_FREESP64 _IOW ('X', 37, struct xfs_flock64) +static bool +xfs_need_wait_reflink_secondary( + struct xfs_mount *mp, + struct xfs_inode *ip) +{ + struct xfs_inode *sip; + + mutex_lock(&mp->m_reflink_opt_lock); + sip = ip->i_reflink_opt_ip; + if (!sip /* pair nolonger valid */ || + (READ_ONCE(sip->i_flags) & XFS_NEED_INACTIVE) /* retry now */) { + mutex_unlock(&mp->m_reflink_opt_lock); + return false; + } + mutex_unlock(&mp->m_reflink_opt_lock); + return true; +} + +int +xfs_ioc_wait_reflink_secondary( + struct xfs_mount *mp, + struct xfs_inode *ip, + u32 timeout_sec) +{ + struct xfs_inode *sip; + unsigned long expire = 0; + + if (!(ip->i_reflink_flags & XFS_REFLINK_PRIMARY)) + return -EINVAL; + if (timeout_sec) + expire = jiffies + HZ * timeout_sec; +retry: + mutex_lock(&mp->m_reflink_opt_lock); + sip = ip->i_reflink_opt_ip; + if (!sip) { + mutex_unlock(&mp->m_reflink_opt_lock); + return 0; + } + spin_lock(&sip->i_flags_lock); + /* + * We need to consider if this inode needs to be inactive + * immediately here. + */ + /* already inactivating now by others? */ + if ((sip->i_flags & XFS_INACTIVATING) || + /* the inode isn't reclaimable (active or race). */ + !(sip->i_flags & (XFS_NEED_INACTIVE | XFS_INACTIVATING))) { + spin_unlock(&sip->i_flags_lock); + mutex_unlock(&mp->m_reflink_opt_lock); + if (fatal_signal_pending(current)) + return -EINTR; + if (timeout_sec) { + if (time_after(jiffies, expire)) + return -ETIMEDOUT; + wait_event_killable_timeout(mp->m_reflink_opt_wait, + !xfs_need_wait_reflink_secondary(mp, ip), + HZ * timeout_sec); + } else { + wait_event_killable(mp->m_reflink_opt_wait, + !xfs_need_wait_reflink_secondary(mp, ip)); + } + goto retry; + } + spin_unlock(&sip->i_flags_lock); + + /* + * gcwork is already on the list since XFS_NEED_INACTIVE is + * set afterwards, let's try to drop this from gcwork list. + */ + spin_lock(&mp->m_reflink_opt_gclock); + /* if the bg kworker decides to handle instead, list_empty will be hit */ + if (list_empty(&sip->i_reflink_opt_gclist)) { + spin_unlock(&mp->m_reflink_opt_gclock); + mutex_unlock(&mp->m_reflink_opt_lock); + goto retry; + } + list_del_init(&sip->i_reflink_opt_gclist); + spin_unlock(&mp->m_reflink_opt_gclock); + mutex_unlock(&mp->m_reflink_opt_lock); + + /* XFS_NEED_INACTIVE will be stable here. */ + ASSERT(sip->i_flags & XFS_NEED_INACTIVE); + xfs_iflags_set(sip, XFS_INACTIVATING); + xfs_inodegc_inactivate(sip); + return 0; +} + /* * Note: some of the ioctl's return positive numbers as a * byte count indicating success, such as readlink_by_handle. @@ -2189,6 +2276,15 @@ xfs_file_ioctl( return 0; } + case XFS_IOC_WAIT_REFLINK_SECONDARY: { + u32 timeout_sec; + + if (get_user(timeout_sec, (uint32_t __user *)arg)) + return -EFAULT; + + return xfs_ioc_wait_reflink_secondary(mp, ip, timeout_sec); + } + default: return -ENOTTY; } diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 5fa454b3b7f6..348d2eb9f649 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -252,6 +252,10 @@ typedef struct xfs_mount { struct cpumask m_inodegc_cpumask; struct mutex m_reflink_opt_lock; + spinlock_t m_reflink_opt_gclock; + struct list_head m_reflink_opt_gclist; + struct work_struct m_reflink_opt_gcwork; + struct wait_queue_head m_reflink_opt_wait; } xfs_mount_t; #define M_IGEO(mp) (&(mp)->m_ino_geo) diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index 59ef06076bd3..cce649e5e1e0 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c @@ -506,7 +506,8 @@ xfs_reflink_fill_delalloc( STATIC int xfs_reflink_unshare_range( struct xfs_inode *src, - struct xfs_bmbt_irec *oimap) + struct xfs_bmbt_irec *oimap, + bool *secondary_evicting) { struct xfs_mount *mp = src->i_mount; struct xfs_inode *ip; @@ -521,19 +522,13 @@ xfs_reflink_unshare_range( struct xfs_bmbt_irec imap = *oimap; struct xfs_bmbt_irec cmap; -retry: mutex_lock(&mp->m_reflink_opt_lock); - if (WARN_ON(!src->i_reflink_opt_ip)) { - mutex_unlock(&mp->m_reflink_opt_lock); - return -EINVAL; - } - - if (!igrab(VFS_I(src->i_reflink_opt_ip))) { + ip = src->i_reflink_opt_ip; + if (!ip || !igrab(VFS_I(ip))) { mutex_unlock(&mp->m_reflink_opt_lock); - delay(1); - goto retry; + *secondary_evicting = true; + return 0; } - ip = src->i_reflink_opt_ip; mutex_unlock(&mp->m_reflink_opt_lock); xfs_ilock(ip, lockmode); @@ -651,6 +646,7 @@ xfs_reflink_allocate_cow( { int error; bool found; + bool secondary_evicting = false; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); if (!ip->i_cowfp) { @@ -669,8 +665,22 @@ xfs_reflink_allocate_cow( if (ip->i_reflink_flags & XFS_REFLINK_PRIMARY) { xfs_iunlock(ip, *lockmode); - xfs_reflink_unshare_range(ip, imap); + error = xfs_reflink_unshare_range(ip, imap, + &secondary_evicting); xfs_ilock(ip, *lockmode); + if (error) { + xfs_warn(ip->i_mount, + "failed to unshare secondary range @ ino %llu", + ip->i_ino); + } else if (secondary_evicting) { + /* + * It's impossible to have another reflink here (racing with + * FICLONE) since ip takes XFS_MMAPLOCK_SHARED lock and FICLONE + * needs XFS_MMAPLOCK_EXEC. + */ + *shared = false; + return 0; + } } /* diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index e5308687b9ae..b6db17213816 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -1528,6 +1528,11 @@ xfs_fs_fill_super( #endif sb->s_op = &xfs_super_operations; + spin_lock_init(&mp->m_reflink_opt_gclock); + INIT_LIST_HEAD(&mp->m_reflink_opt_gclist); + INIT_WORK(&mp->m_reflink_opt_gcwork, xfs_inodegc_reflink_opt_worker); + init_waitqueue_head(&mp->m_reflink_opt_wait); + /* * Delay mount work if the debug hook is set. This is debug * instrumention to coordinate simulation of xfs mount failures with -- Gitee From 1ba9eada5d014c3cbe0c39497339cc13dd046fd9 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Fri, 17 May 2024 10:40:13 +0800 Subject: [PATCH 1328/2138] anolis: xfs: add more checks for XFS_IOC_SET_REFLINK_FLAGS ANBZ: #9794 Otherwise the related paths will be risky. Signed-off-by: Gao Xiang Signed-off-by: Joseph Qi Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3831 --- fs/xfs/xfs_ioctl.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index fe49b92f4926..720b9ed7c072 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -2264,8 +2264,23 @@ xfs_file_ioctl( if (get_user(in, (uint32_t __user *)arg)) return -EFAULT; + /* invalid values */ + if ((in & ~(XFS_REFLINK_PRIMARY | XFS_REFLINK_SECONDARY)) || + (in & (XFS_REFLINK_PRIMARY | XFS_REFLINK_SECONDARY)) == + (XFS_REFLINK_PRIMARY | XFS_REFLINK_SECONDARY)) + return -EINVAL; + + /* clearing all flags is unallowed */ + if (!in) + return -EINVAL; + xfs_ilock(ip, XFS_ILOCK_EXCL); - ip->i_reflink_flags = in; + if (!ip->i_reflink_flags) { + ip->i_reflink_flags = in; + } else if (ip->i_reflink_flags != in) { + xfs_iunlock(ip, XFS_ILOCK_EXCL); + return -EINVAL; + } xfs_iunlock(ip, XFS_ILOCK_EXCL); return 0; } -- Gitee From c107cff5b45620cc67455b3932f9e480418bfeb4 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Fri, 17 May 2024 10:46:39 +0800 Subject: [PATCH 1329/2138] anolis: xfs: export xfs_wait_reflink_secondary() for khotfixes ANBZ: #9794 It's used for khotfixes only to workaround old userspace applications. I'm not sure if it has other use cases for now. Signed-off-by: Gao Xiang Signed-off-by: Joseph Qi Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3831 --- fs/xfs/xfs_ioctl.c | 4 ++-- fs/xfs/xfs_ioctl.h | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index 720b9ed7c072..458c79ea81d3 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -1911,7 +1911,7 @@ xfs_need_wait_reflink_secondary( } int -xfs_ioc_wait_reflink_secondary( +xfs_wait_reflink_secondary( struct xfs_mount *mp, struct xfs_inode *ip, u32 timeout_sec) @@ -2297,7 +2297,7 @@ xfs_file_ioctl( if (get_user(timeout_sec, (uint32_t __user *)arg)) return -EFAULT; - return xfs_ioc_wait_reflink_secondary(mp, ip, timeout_sec); + return xfs_wait_reflink_secondary(mp, ip, timeout_sec); } default: diff --git a/fs/xfs/xfs_ioctl.h b/fs/xfs/xfs_ioctl.h index 38be600b5e1e..f74bb55133d9 100644 --- a/fs/xfs/xfs_ioctl.h +++ b/fs/xfs/xfs_ioctl.h @@ -69,4 +69,6 @@ int xfs_fsbulkstat_one_fmt(struct xfs_ibulk *breq, const struct xfs_bulkstat *bstat); int xfs_fsinumbers_fmt(struct xfs_ibulk *breq, const struct xfs_inumbers *igrp); +int xfs_wait_reflink_secondary(struct xfs_mount *mp, struct xfs_inode *ip, u32 timeout_sec); + #endif -- Gitee From 86e20077f594689aa29a76c65670dec52a49c693 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Thu, 23 May 2024 23:13:45 +0800 Subject: [PATCH 1330/2138] anolis: xfs: try to flush logs for secondary inodes ANBZ: #9794 Currently, "xfs_reflink_find_shared" could be stuck for tens of milliseconds caused by that AGF lock is taken while doing xfs_log_force(). For more details, see [1]. Let's xfs_log_force() in advance periodically to workaround the issue. It also reduces too many logs pending. [1] https://lore.kernel.org/r/20240527061006.4045908-1-hsiangkao@linux.alibaba.com Signed-off-by: Gao Xiang Signed-off-by: Joseph Qi Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3831 --- fs/xfs/xfs_globals.c | 1 + fs/xfs/xfs_inode.c | 14 ++++++++++++++ fs/xfs/xfs_linux.h | 1 + fs/xfs/xfs_sysctl.c | 9 +++++++++ fs/xfs/xfs_sysctl.h | 1 + 5 files changed, 26 insertions(+) diff --git a/fs/xfs/xfs_globals.c b/fs/xfs/xfs_globals.c index 9edc1f2bc939..afeeef53a7b0 100644 --- a/fs/xfs/xfs_globals.c +++ b/fs/xfs/xfs_globals.c @@ -30,6 +30,7 @@ xfs_param_t xfs_params = { .inherit_nodfrg = { 0, 1, 1 }, .fstrm_timer = { 1, 30*100, 3600*100}, .blockgc_timer = { 1, 300, 3600*24}, + .reflink_inactive_force_log_period = { 0, 5, 1000 }, }; struct xfs_globals xfs_globals = { diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 0c9970e3359f..73c8b74d9405 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -1356,6 +1356,8 @@ xfs_itruncate_extents_flags( xfs_fileoff_t first_unmap_block; xfs_filblks_t unmap_len; int error = 0; + bool secondary_inactive = false; + int force_count = 0; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); ASSERT(!atomic_read(&VFS_I(ip)->i_count) || @@ -1386,9 +1388,13 @@ xfs_itruncate_extents_flags( return 0; } + if (!new_size && (ip->i_reflink_flags & XFS_REFLINK_SECONDARY)) + secondary_inactive = true; + unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1; while (unmap_len > 0) { ASSERT(tp->t_highest_agno == NULLAGNUMBER); + error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len, flags, XFS_ITRUNC_MAX_EXTENTS); if (error) @@ -1398,6 +1404,14 @@ xfs_itruncate_extents_flags( error = xfs_defer_finish(&tp); if (error) goto out; + + if (secondary_inactive) { + if (xfs_reflink_inactive_force_log_period && + ++force_count >= xfs_reflink_inactive_force_log_period) { + xfs_log_force(mp, 0); + force_count = 0; + } + } } if (whichfork == XFS_DATA_FORK) { diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h index e9d317a3dafe..41828f4e5d7c 100644 --- a/fs/xfs/xfs_linux.h +++ b/fs/xfs/xfs_linux.h @@ -103,6 +103,7 @@ typedef __u32 xfs_nlink_t; #define xfs_inherit_nodefrag xfs_params.inherit_nodfrg.val #define xfs_fstrm_centisecs xfs_params.fstrm_timer.val #define xfs_blockgc_secs xfs_params.blockgc_timer.val +#define xfs_reflink_inactive_force_log_period xfs_params.reflink_inactive_force_log_period.val #define current_cpu() (raw_smp_processor_id()) #define current_set_flags_nested(sp, f) \ diff --git a/fs/xfs/xfs_sysctl.c b/fs/xfs/xfs_sysctl.c index fade33735393..6b93b230166c 100644 --- a/fs/xfs/xfs_sysctl.c +++ b/fs/xfs/xfs_sysctl.c @@ -113,6 +113,15 @@ static struct ctl_table xfs_table[] = { .extra1 = &xfs_params.syncd_timer.min, .extra2 = &xfs_params.syncd_timer.max }, + { + .procname = "reflink_inactive_force_log_period", + .data = &xfs_params.reflink_inactive_force_log_period.val, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &xfs_params.reflink_inactive_force_log_period.min, + .extra2 = &xfs_params.reflink_inactive_force_log_period.max + }, { .procname = "inherit_sync", .data = &xfs_params.inherit_sync.val, diff --git a/fs/xfs/xfs_sysctl.h b/fs/xfs/xfs_sysctl.h index f78ad6b10ea5..726eb447bb49 100644 --- a/fs/xfs/xfs_sysctl.h +++ b/fs/xfs/xfs_sysctl.h @@ -36,6 +36,7 @@ typedef struct xfs_param { xfs_sysctl_val_t inherit_nodfrg;/* Inherit the "nodefrag" inode flag. */ xfs_sysctl_val_t fstrm_timer; /* Filestream dir-AG assoc'n timeout. */ xfs_sysctl_val_t blockgc_timer; /* Interval between blockgc scans */ + xfs_sysctl_val_t reflink_inactive_force_log_period; } xfs_param_t; /* -- Gitee From 223a164461af2d8462f6bef2cb1e7ebf2aa9dd7d Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Thu, 14 Dec 2023 14:34:23 -0800 Subject: [PATCH 1331/2138] mm: align larger anonymous mappings on THP boundaries ANBZ: #9728 commit efa7df3e3bb5da8e6abbe37727417f32a37fba47 upstream. Align larger anonymous memory mappings on THP boundaries by going through thp_get_unmapped_area if THPs are enabled for the current process. With this patch, larger anonymous mappings are now THP aligned. When a malloc library allocates a 2MB or larger arena, that arena can now be mapped with THPs right from the start, which can result in better TLB hit rates and execution time. Link: https://lkml.kernel.org/r/20220809142457.4751229f@imladris.surriel.com Link: https://lkml.kernel.org/r/20231214223423.1133074-1-yang@os.amperecomputing.com Signed-off-by: Rik van Riel Reviewed-by: Yang Shi Cc: Matthew Wilcox Cc: Christopher Lameter Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3846 --- mm/mmap.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mm/mmap.c b/mm/mmap.c index 70de32960581..a1c16ca47670 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1841,6 +1841,9 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, */ pgoff = 0; get_area = shmem_get_unmapped_area; + } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { + /* Ensures that larger anonymous mappings are THP aligned. */ + get_area = thp_get_unmapped_area; } addr = get_area(file, addr, len, pgoff, flags); -- Gitee From 16e4615216359f2810d07187603cbbbde2e5512e Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Wed, 20 Dec 2023 22:59:43 -0800 Subject: [PATCH 1332/2138] mm: mmap: map MAP_STACK to VM_NOHUGEPAGE ANBZ: #9728 commit c4608d1bf7c6536d1a3d233eb21e50678681564e upstream. commit efa7df3e3bb5 ("mm: align larger anonymous mappings on THP boundaries") incured regression for stress-ng pthread benchmark [1]. It is because THP get allocated to pthread's stack area much more possible than before. Pthread's stack area is allocated by mmap without VM_GROWSDOWN or VM_GROWSUP flag, so kernel can't tell whether it is a stack area or not. The MAP_STACK flag is used to mark the stack area, but it is a no-op on Linux. Mapping MAP_STACK to VM_NOHUGEPAGE to prevent from allocating THP for such stack area. With this change the stack area looks like: fffd18e10000-fffd19610000 rw-p 00000000 00:00 0 Size: 8192 kB KernelPageSize: 4 kB MMUPageSize: 4 kB Rss: 12 kB Pss: 12 kB Pss_Dirty: 12 kB Shared_Clean: 0 kB Shared_Dirty: 0 kB Private_Clean: 0 kB Private_Dirty: 12 kB Referenced: 12 kB Anonymous: 12 kB KSM: 0 kB LazyFree: 0 kB AnonHugePages: 0 kB ShmemPmdMapped: 0 kB FilePmdMapped: 0 kB Shared_Hugetlb: 0 kB Private_Hugetlb: 0 kB Swap: 0 kB SwapPss: 0 kB Locked: 0 kB THPeligible: 0 VmFlags: rd wr mr mw me ac nh The "nh" flag is set. [1] https://lore.kernel.org/linux-mm/202312192310.56367035-oliver.sang@intel.com/ Link: https://lkml.kernel.org/r/20231221065943.2803551-2-shy828301@gmail.com Fixes: efa7df3e3bb5 ("mm: align larger anonymous mappings on THP boundaries") Signed-off-by: Yang Shi Reported-by: kernel test robot Tested-by: Oliver Sang Reviewed-by: Yin Fengwei Cc: Rik van Riel Cc: Matthew Wilcox Cc: Christopher Lameter Cc: Huang, Ying Cc: Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3846 --- include/linux/mman.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/include/linux/mman.h b/include/linux/mman.h index b2e2677ea156..1199d73d56cb 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h @@ -154,10 +154,11 @@ calc_vm_prot_bits(unsigned long prot, unsigned long pkey) static inline unsigned long calc_vm_flag_bits(struct file *file, unsigned long flags) { - return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) | - _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) | - _calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) | - arch_calc_vm_flag_bits(file, flags); + return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN) | + _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED) | + _calc_vm_trans(flags, MAP_SYNC, VM_SYNC) | + _calc_vm_trans(flags, MAP_STACK, VM_NOHUGEPAGE) | + arch_calc_vm_flag_bits(file, flags); } unsigned long vm_commit_limit(void); -- Gitee From 4645bd2e90a89cd3ac172ff3e5b52a5faeb369bd Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Tue, 23 Jan 2024 17:14:20 +0000 Subject: [PATCH 1333/2138] mm: thp_get_unmapped_area must honour topdown preference ANBZ: #9728 commit 96204e15310c218fd9355bdcacd02fed1d18070e upstream. The addition of commit efa7df3e3bb5 ("mm: align larger anonymous mappings on THP boundaries") caused the "virtual_address_range" mm selftest to start failing on arm64. Let's fix that regression. There were 2 visible problems when running the test; 1) it takes much longer to execute, and 2) the test fails. Both are related: The (first part of the) test allocates as many 1GB anonymous blocks as it can in the low 256TB of address space, passing NULL as the addr hint to mmap. Before the faulty patch, all allocations were abutted and contained in a single, merged VMA. However, after this patch, each allocation is in its own VMA, and there is a 2M gap between each VMA. This causes the 2 problems in the test: 1) mmap becomes MUCH slower because there are so many VMAs to check to find a new 1G gap. 2) mmap fails once it hits the VMA limit (/proc/sys/vm/max_map_count). Hitting this limit then causes a subsequent calloc() to fail, which causes the test to fail. The problem is that arm64 (unlike x86) selects ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT. But __thp_get_unmapped_area() allocates len+2M then always aligns to the bottom of the discovered gap. That causes the 2M hole. Fix this by detecting cases where we can still achive the alignment goal when moved to the top of the allocated area, if configured to prefer top-down allocation. While we are at it, fix thp_get_unmapped_area's use of pgoff, which should always be zero for anonymous mappings. Prior to the faulty change, while it was possible for user space to pass in pgoff!=0, the old mm->get_unmapped_area() handler would not use it. thp_get_unmapped_area() does use it, so let's explicitly zero it before calling the handler. This should also be the correct behavior for arches that define their own get_unmapped_area() handler. Link: https://lkml.kernel.org/r/20240123171420.3970220-1-ryan.roberts@arm.com Fixes: efa7df3e3bb5 ("mm: align larger anonymous mappings on THP boundaries") Closes: https://lore.kernel.org/linux-mm/1e8f5ac7-54ce-433a-ae53-81522b2320e1@arm.com/ Signed-off-by: Ryan Roberts Reviewed-by: Yang Shi Cc: Matthew Wilcox (Oracle) Cc: Rik van Riel Cc: Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3846 --- mm/huge_memory.c | 10 ++++++++-- mm/mmap.c | 6 ++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 87d8dc892ea7..065f55725c25 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -830,7 +830,7 @@ static unsigned long __thp_get_unmapped_area(struct file *filp, { loff_t off_end = off + len; loff_t off_align = round_up(off, size); - unsigned long len_pad, ret; + unsigned long len_pad, ret, off_sub; if (!IS_ENABLED(CONFIG_64BIT) || in_compat_syscall()) return 0; @@ -859,7 +859,13 @@ static unsigned long __thp_get_unmapped_area(struct file *filp, if (ret == addr) return addr; - ret += (off - ret) & (size - 1); + off_sub = (off - ret) & (size - 1); + + if (current->mm->get_unmapped_area == arch_get_unmapped_area_topdown && + !off_sub) + return ret + size; + + ret += off_sub; return ret; } diff --git a/mm/mmap.c b/mm/mmap.c index a1c16ca47670..41f1ddf071b8 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1837,15 +1837,17 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, /* * mmap_region() will call shmem_zero_setup() to create a file, * so use shmem's get_unmapped_area in case it can be huge. - * do_mmap() will clear pgoff, so match alignment. */ - pgoff = 0; get_area = shmem_get_unmapped_area; } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { /* Ensures that larger anonymous mappings are THP aligned. */ get_area = thp_get_unmapped_area; } + /* Always treat pgoff as zero for anonymous memory. */ + if (!file) + pgoff = 0; + addr = get_area(file, addr, len, pgoff, flags); if (IS_ERR_VALUE(addr)) return addr; -- Gitee From e8b56e160f5670f816c91faa51370be4567e2395 Mon Sep 17 00:00:00 2001 From: Chuanhua Han Date: Wed, 29 May 2024 20:28:19 +1200 Subject: [PATCH 1334/2138] mm: swap: introduce swap_free_nr() for batched swap_free() ANBZ: #9728 commit ebfba0045176cb013f49cb3e5bd9f0b16eba203c upstream. Patch series "large folios swap-in: handle refault cases first", v5. This patchset is extracted from the large folio swapin series[1], primarily addressing the handling of scenarios involving large folios in the swap cache. Currently, it is particularly focused on addressing the refaulting of mTHP, which is still undergoing reclamation. This approach aims to streamline code review and expedite the integration of this segment into the MM tree. It relies on Ryan's swap-out series[2], leveraging the helper function swap_pte_batch() introduced by that series. Presently, do_swap_page only encounters a large folio in the swap cache before the large folio is released by vmscan. However, the code should remain equally useful once we support large folio swap-in via swapin_readahead(). This approach can effectively reduce page faults and eliminate most redundant checks and early exits for MTE restoration in recent MTE patchset[3]. The large folio swap-in for SWP_SYNCHRONOUS_IO and swapin_readahead() will be split into separate patch sets and sent at a later time. [1] https://lore.kernel.org/linux-mm/20240304081348.197341-1-21cnbao@gmail.com/ [2] https://lore.kernel.org/linux-mm/20240408183946.2991168-1-ryan.roberts@arm.com/ [3] https://lore.kernel.org/linux-mm/20240322114136.61386-1-21cnbao@gmail.com/ This patch (of 6): While swapping in a large folio, we need to free swaps related to the whole folio. To avoid frequently acquiring and releasing swap locks, it is better to introduce an API for batched free. Furthermore, this new function, swap_free_nr(), is designed to efficiently handle various scenarios for releasing a specified number, nr, of swap entries. Link: https://lkml.kernel.org/r/20240529082824.150954-1-21cnbao@gmail.com Link: https://lkml.kernel.org/r/20240529082824.150954-2-21cnbao@gmail.com Signed-off-by: Chuanhua Han Co-developed-by: Barry Song Signed-off-by: Barry Song Reviewed-by: Ryan Roberts Acked-by: Chris Li Reviewed-by: "Huang, Ying" Cc: Baolin Wang Cc: David Hildenbrand Cc: Gao Xiang Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kairui Song Cc: Matthew Wilcox (Oracle) Cc: Suren Baghdasaryan Cc: Yosry Ahmed Cc: Yu Zhao Cc: Zi Yan Cc: Andreas Larsson Cc: Christoph Hellwig Cc: "David S. Miller" Cc: Khalid Aziz Cc: Len Brown Cc: Pavel Machek Cc: "Rafael J. Wysocki" Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3852 --- include/linux/swap.h | 5 +++++ mm/swapfile.c | 47 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+) diff --git a/include/linux/swap.h b/include/linux/swap.h index 2089db1cab10..8237ad35af60 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -497,6 +497,7 @@ extern void swap_shmem_alloc(swp_entry_t); extern int swap_duplicate(swp_entry_t); extern int swapcache_prepare(swp_entry_t); extern void swap_free(swp_entry_t); +extern void swap_free_nr(swp_entry_t entry, int nr_pages); extern void swapcache_free_entries(swp_entry_t *entries, int n); extern void free_swap_and_cache_nr(swp_entry_t entry, int nr); int swap_type_of(dev_t device, sector_t offset); @@ -579,6 +580,10 @@ static inline void swap_free(swp_entry_t swp) { } +static inline void swap_free_nr(swp_entry_t entry, int nr_pages) +{ +} + static inline void put_swap_folio(struct folio *folio, swp_entry_t swp) { } diff --git a/mm/swapfile.c b/mm/swapfile.c index 16cd196c04ac..7129cf6678b7 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1351,6 +1351,53 @@ void swap_free(swp_entry_t entry) __swap_entry_free(p, entry); } +static void cluster_swap_free_nr(struct swap_info_struct *sis, + unsigned long offset, int nr_pages) +{ + struct swap_cluster_info *ci; + DECLARE_BITMAP(to_free, BITS_PER_LONG) = { 0 }; + int i, nr; + + ci = lock_cluster_or_swap_info(sis, offset); + while (nr_pages) { + nr = min(BITS_PER_LONG, nr_pages); + for (i = 0; i < nr; i++) { + if (!__swap_entry_free_locked(sis, offset + i, 1)) + bitmap_set(to_free, i, 1); + } + if (!bitmap_empty(to_free, BITS_PER_LONG)) { + unlock_cluster_or_swap_info(sis, ci); + for_each_set_bit(i, to_free, BITS_PER_LONG) + free_swap_slot(swp_entry(sis->type, offset + i)); + if (nr == nr_pages) + return; + bitmap_clear(to_free, 0, BITS_PER_LONG); + ci = lock_cluster_or_swap_info(sis, offset); + } + offset += nr; + nr_pages -= nr; + } + unlock_cluster_or_swap_info(sis, ci); +} + +void swap_free_nr(swp_entry_t entry, int nr_pages) +{ + int nr; + struct swap_info_struct *sis; + unsigned long offset = swp_offset(entry); + + sis = _swap_info_get(entry); + if (!sis) + return; + + while (nr_pages) { + nr = min_t(int, nr_pages, SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER); + cluster_swap_free_nr(sis, offset, nr); + offset += nr; + nr_pages -= nr; + } +} + /* * Called after dropping swapcache to decrease refcnt to swap entries. */ -- Gitee From 076e2b483afddc884e384434d719e43d20c0251d Mon Sep 17 00:00:00 2001 From: Barry Song Date: Wed, 29 May 2024 20:28:20 +1200 Subject: [PATCH 1335/2138] mm: remove the implementation of swap_free() and always use swap_free_nr() ANBZ: #9728 commit 54f7a49c20ebb5189980c53e6e66709d22bee572 upstream. To streamline maintenance efforts, we propose removing the implementation of swap_free(). Instead, we can simply invoke swap_free_nr() with nr set to 1. swap_free_nr() is designed with a bitmap consisting of only one long, resulting in overhead that can be ignored for cases where nr equals 1. A prime candidate for leveraging swap_free_nr() lies within kernel/power/swap.c. Implementing this change facilitates the adoption of batch processing for hibernation. Link: https://lkml.kernel.org/r/20240529082824.150954-3-21cnbao@gmail.com Signed-off-by: Barry Song Suggested-by: "Huang, Ying" Reviewed-by: "Huang, Ying" Acked-by: Chris Li Reviewed-by: Ryan Roberts Cc: "Rafael J. Wysocki" Cc: Pavel Machek Cc: Len Brown Cc: Hugh Dickins Cc: Christoph Hellwig Cc: Andreas Larsson Cc: Baolin Wang Cc: Chuanhua Han Cc: David Hildenbrand Cc: "David S. Miller" Cc: Gao Xiang Cc: Johannes Weiner Cc: Kairui Song Cc: Khalid Aziz Cc: Matthew Wilcox (Oracle) Cc: Suren Baghdasaryan Cc: Yosry Ahmed Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3852 --- include/linux/swap.h | 10 +++++----- kernel/power/swap.c | 5 ++--- mm/swapfile.c | 17 ++++------------- 3 files changed, 11 insertions(+), 21 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index 8237ad35af60..dfbd47e66ec0 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -496,7 +496,6 @@ extern int add_swap_count_continuation(swp_entry_t, gfp_t); extern void swap_shmem_alloc(swp_entry_t); extern int swap_duplicate(swp_entry_t); extern int swapcache_prepare(swp_entry_t); -extern void swap_free(swp_entry_t); extern void swap_free_nr(swp_entry_t entry, int nr_pages); extern void swapcache_free_entries(swp_entry_t *entries, int n); extern void free_swap_and_cache_nr(swp_entry_t entry, int nr); @@ -576,10 +575,6 @@ static inline int swapcache_prepare(swp_entry_t swp) return 0; } -static inline void swap_free(swp_entry_t swp) -{ -} - static inline void swap_free_nr(swp_entry_t entry, int nr_pages) { } @@ -628,6 +623,11 @@ static inline void free_swap_and_cache(swp_entry_t entry) free_swap_and_cache_nr(entry, 1); } +static inline void swap_free(swp_entry_t entry) +{ + swap_free_nr(entry, 1); +} + #ifdef CONFIG_MEMCG static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) { diff --git a/kernel/power/swap.c b/kernel/power/swap.c index d71c590550d2..b1896346fde1 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -201,12 +201,11 @@ void free_all_swap_pages(int swap) while ((node = swsusp_extents.rb_node)) { struct swsusp_extent *ext; - unsigned long offset; ext = rb_entry(node, struct swsusp_extent, node); rb_erase(node, &swsusp_extents); - for (offset = ext->start; offset <= ext->end; offset++) - swap_free(swp_entry(swap, offset)); + swap_free_nr(swp_entry(swap, ext->start), + ext->end - ext->start + 1); kfree(ext); } diff --git a/mm/swapfile.c b/mm/swapfile.c index 7129cf6678b7..eb5aaac66f4d 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1338,19 +1338,6 @@ static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry) swap_range_free(p, offset, 1); } -/* - * Caller has made sure that the swap device corresponding to entry - * is still around or has not been recycled. - */ -void swap_free(swp_entry_t entry) -{ - struct swap_info_struct *p; - - p = _swap_info_get(entry); - if (p) - __swap_entry_free(p, entry); -} - static void cluster_swap_free_nr(struct swap_info_struct *sis, unsigned long offset, int nr_pages) { @@ -1380,6 +1367,10 @@ static void cluster_swap_free_nr(struct swap_info_struct *sis, unlock_cluster_or_swap_info(sis, ci); } +/* + * Caller has made sure that the swap device corresponding to entry + * is still around or has not been recycled. + */ void swap_free_nr(swp_entry_t entry, int nr_pages) { int nr; -- Gitee From 31253fee80bffe373531eee7aa1bff3a084f352e Mon Sep 17 00:00:00 2001 From: Barry Song Date: Wed, 29 May 2024 20:28:21 +1200 Subject: [PATCH 1336/2138] mm: introduce pte_move_swp_offset() helper which can move offset bidirectionally ANBZ: #9728 commit 3f9abcaa3e9c3910893ccbe6085aa0452e72896d upstream. There could arise a necessity to obtain the first pte_t from a swap pte_t located in the middle. For instance, this may occur within the context of do_swap_page(), where a page fault can potentially occur in any PTE of a large folio. To address this, the following patch introduces pte_move_swp_offset(), a function capable of bidirectional movement by a specified delta argument. Consequently, pte_next_swp_offset() will directly invoke it with delta = 1. Link: https://lkml.kernel.org/r/20240529082824.150954-4-21cnbao@gmail.com Signed-off-by: Barry Song Suggested-by: "Huang, Ying" Reviewed-by: Ryan Roberts Reviewed-by: "Huang, Ying" Cc: Andreas Larsson Cc: Baolin Wang Cc: Chris Li Cc: Christoph Hellwig Cc: Chuanhua Han Cc: David Hildenbrand Cc: "David S. Miller" Cc: Gao Xiang Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kairui Song Cc: Khalid Aziz Cc: Len Brown Cc: Matthew Wilcox (Oracle) Cc: Pavel Machek Cc: "Rafael J. Wysocki" Cc: Suren Baghdasaryan Cc: Yosry Ahmed Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3852 --- mm/internal.h | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/mm/internal.h b/mm/internal.h index 8dca6db71749..3c90a44ac7b3 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -230,18 +230,21 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr, } /** - * pte_next_swp_offset - Increment the swap entry offset field of a swap pte. + * pte_move_swp_offset - Move the swap entry offset field of a swap pte + * forward or backward by delta * @pte: The initial pte state; is_swap_pte(pte) must be true and * non_swap_entry() must be false. + * @delta: The direction and the offset we are moving; forward if delta + * is positive; backward if delta is negative * - * Increments the swap offset, while maintaining all other fields, including + * Moves the swap offset, while maintaining all other fields, including * swap type, and any swp pte bits. The resulting pte is returned. */ -static inline pte_t pte_next_swp_offset(pte_t pte) +static inline pte_t pte_move_swp_offset(pte_t pte, long delta) { swp_entry_t entry = pte_to_swp_entry(pte); pte_t new = __swp_entry_to_pte(__swp_entry(swp_type(entry), - (swp_offset(entry) + 1))); + (swp_offset(entry) + delta))); if (pte_swp_soft_dirty(pte)) new = pte_swp_mksoft_dirty(new); @@ -253,6 +256,20 @@ static inline pte_t pte_next_swp_offset(pte_t pte) return new; } + +/** + * pte_next_swp_offset - Increment the swap entry offset field of a swap pte. + * @pte: The initial pte state; is_swap_pte(pte) must be true and + * non_swap_entry() must be false. + * + * Increments the swap offset, while maintaining all other fields, including + * swap type, and any swp pte bits. The resulting pte is returned. + */ +static inline pte_t pte_next_swp_offset(pte_t pte) +{ + return pte_move_swp_offset(pte, 1); +} + /** * swap_pte_batch - detect a PTE batch for a set of contiguous swap entries * @start_ptep: Page table pointer for the first entry. -- Gitee From f8e876ad826df894e970eca24beb59b1deb815fb Mon Sep 17 00:00:00 2001 From: Barry Song Date: Wed, 29 May 2024 20:28:22 +1200 Subject: [PATCH 1337/2138] mm: introduce arch_do_swap_page_nr() which allows restore metadata for nr pages ANBZ: #9728 commit 29f252cdc293f4a50b5d3dcbed53701d8444614d upstream. Should do_swap_page() have the capability to directly map a large folio, metadata restoration becomes necessary for a specified number of pages denoted as nr. It's important to highlight that metadata restoration is solely required by the SPARC platform, which, however, does not enable THP_SWAP. Consequently, in the present kernel configuration, there exists no practical scenario where users necessitate the restoration of nr metadata. Platforms implementing THP_SWAP might invoke this function with nr values exceeding 1, subsequent to do_swap_page() successfully mapping an entire large folio. Nonetheless, their arch_do_swap_page_nr() functions remain empty. Link: https://lkml.kernel.org/r/20240529082824.150954-5-21cnbao@gmail.com Signed-off-by: Barry Song Reviewed-by: Ryan Roberts Reviewed-by: Khalid Aziz Cc: "David S. Miller" Cc: Andreas Larsson Cc: Baolin Wang Cc: Chris Li Cc: Christoph Hellwig Cc: Chuanhua Han Cc: David Hildenbrand Cc: Gao Xiang Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kairui Song Cc: Len Brown Cc: Matthew Wilcox (Oracle) Cc: Pavel Machek Cc: "Rafael J. Wysocki" Cc: Suren Baghdasaryan Cc: Yosry Ahmed Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3852 --- include/linux/pgtable.h | 26 ++++++++++++++++++++------ mm/memory.c | 3 ++- 2 files changed, 22 insertions(+), 7 deletions(-) diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 8ac0192ea210..e06b7ab08770 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -1063,6 +1063,15 @@ static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b) }) #ifndef __HAVE_ARCH_DO_SWAP_PAGE +static inline void arch_do_swap_page_nr(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long addr, + pte_t pte, pte_t oldpte, + int nr) +{ + +} +#else /* * Some architectures support metadata associated with a page. When a * page is being swapped out, this metadata must be saved so it can be @@ -1071,12 +1080,17 @@ static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b) * page as metadata for the page. arch_do_swap_page() can restore this * metadata when a page is swapped back in. */ -static inline void arch_do_swap_page(struct mm_struct *mm, - struct vm_area_struct *vma, - unsigned long addr, - pte_t pte, pte_t oldpte) -{ - +static inline void arch_do_swap_page_nr(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long addr, + pte_t pte, pte_t oldpte, + int nr) +{ + for (int i = 0; i < nr; i++) { + arch_do_swap_page(vma->vm_mm, vma, addr + i * PAGE_SIZE, + pte_advance_pfn(pte, i), + pte_advance_pfn(oldpte, i)); + } } #endif diff --git a/mm/memory.c b/mm/memory.c index 44ca6de266da..8dbfb97c2fa8 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4372,7 +4372,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) VM_BUG_ON(!folio_test_anon(folio) || (pte_write(pte) && !PageAnonExclusive(page))); set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); - arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte); + arch_do_swap_page_nr(vma->vm_mm, vma, vmf->address, + pte, vmf->orig_pte, 1); folio_unlock(folio); if (folio != swapcache && swapcache) { -- Gitee From 05a97518cb83d5bac7d984b04f04397d5d8fb531 Mon Sep 17 00:00:00 2001 From: Chuanhua Han Date: Wed, 29 May 2024 20:28:23 +1200 Subject: [PATCH 1338/2138] mm: swap: make should_try_to_free_swap() support large-folio ANBZ: #9728 commit 4c3f966436873435600b00e5c2c6c8933607e236 upstream. The function should_try_to_free_swap() operates under the assumption that swap-in always occurs at the normal page granularity, i.e., folio_nr_pages() = 1. However, in reality, for large folios, add_to_swap_cache() will invoke folio_ref_add(folio, nr). To accommodate large folio swap-in, this patch eliminates this assumption. Link: https://lkml.kernel.org/r/20240529082824.150954-6-21cnbao@gmail.com Signed-off-by: Chuanhua Han Co-developed-by: Barry Song Signed-off-by: Barry Song Acked-by: Chris Li Reviewed-by: Ryan Roberts Reviewed-by: "Huang, Ying" Reviewed-by: David Hildenbrand Cc: Andreas Larsson Cc: Baolin Wang Cc: Christoph Hellwig Cc: "David S. Miller" Cc: Gao Xiang Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kairui Song Cc: Khalid Aziz Cc: Len Brown Cc: Matthew Wilcox (Oracle) Cc: Pavel Machek Cc: "Rafael J. Wysocki" Cc: Suren Baghdasaryan Cc: Yosry Ahmed Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3852 --- mm/memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/memory.c b/mm/memory.c index 8dbfb97c2fa8..8c2080271e21 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3983,7 +3983,7 @@ static inline bool should_try_to_free_swap(struct folio *folio, * reference only in case it's likely that we'll be the exlusive user. */ return (fault_flags & FAULT_FLAG_WRITE) && !folio_test_ksm(folio) && - folio_ref_count(folio) == 2; + folio_ref_count(folio) == (1 + folio_nr_pages(folio)); } static vm_fault_t pte_marker_clear(struct vm_fault *vmf) -- Gitee From 047f69d9a10dcc717c54016b7bbb5f7f1c81d9b6 Mon Sep 17 00:00:00 2001 From: Chuanhua Han Date: Wed, 29 May 2024 20:28:24 +1200 Subject: [PATCH 1339/2138] mm: swap: entirely map large folios found in swapcache ANBZ: #9728 commit 508758960b8d89fa464abce2f9897973c8e8d4f0 upstream. When a large folio is found in the swapcache, the current implementation requires calling do_swap_page() nr_pages times, resulting in nr_pages page faults. This patch opts to map the entire large folio at once to minimize page faults. Additionally, redundant checks and early exits for ARM64 MTE restoring are removed. Link: https://lkml.kernel.org/r/20240529082824.150954-7-21cnbao@gmail.com Signed-off-by: Chuanhua Han Co-developed-by: Barry Song Signed-off-by: Barry Song Reviewed-by: Ryan Roberts Reviewed-by: "Huang, Ying" Cc: Andreas Larsson Cc: Baolin Wang Cc: Chris Li Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "David S. Miller" Cc: Gao Xiang Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kairui Song Cc: Khalid Aziz Cc: Len Brown Cc: Matthew Wilcox (Oracle) Cc: Pavel Machek Cc: "Rafael J. Wysocki" Cc: Suren Baghdasaryan Cc: Yosry Ahmed Cc: Yu Zhao Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3852 --- mm/memory.c | 59 +++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 48 insertions(+), 11 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 8c2080271e21..496ce1483e46 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4074,6 +4074,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) pte_t pte; vm_fault_t ret = 0; void *shadow = NULL; + int nr_pages; + unsigned long page_idx; + unsigned long address; + pte_t *ptep; if (!pte_unmap_same(vmf)) goto out; @@ -4272,6 +4276,38 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) goto out_nomap; } + nr_pages = 1; + page_idx = 0; + address = vmf->address; + ptep = vmf->pte; + if (folio_test_large(folio) && folio_test_swapcache(folio)) { + int nr = folio_nr_pages(folio); + unsigned long idx = folio_page_idx(folio, page); + unsigned long folio_start = address - idx * PAGE_SIZE; + unsigned long folio_end = folio_start + nr * PAGE_SIZE; + pte_t *folio_ptep; + pte_t folio_pte; + + if (unlikely(folio_start < max(address & PMD_MASK, vma->vm_start))) + goto check_folio; + if (unlikely(folio_end > pmd_addr_end(address, vma->vm_end))) + goto check_folio; + + folio_ptep = vmf->pte - idx; + folio_pte = ptep_get(folio_ptep); + if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) || + swap_pte_batch(folio_ptep, nr, folio_pte) != nr) + goto check_folio; + + page_idx = idx; + address = folio_start; + ptep = folio_ptep; + nr_pages = nr; + entry = folio->swap; + page = &folio->page; + } + +check_folio: /* * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte * must never point at an anonymous page in the swapcache that is @@ -4331,12 +4367,12 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) * We're already holding a reference on the page but haven't mapped it * yet. */ - swap_free(entry); + swap_free_nr(entry, nr_pages); if (should_try_to_free_swap(folio, vma, vmf->flags)) folio_free_swap(folio); - inc_mm_counter(vma->vm_mm, MM_ANONPAGES); - dec_mm_counter(vma->vm_mm, MM_SWAPENTS); + add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages); + add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages); pte = mk_pte(page, vma->vm_page_prot); /* @@ -4353,27 +4389,28 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) } rmap_flags |= RMAP_EXCLUSIVE; } - flush_icache_page(vma, page); + folio_ref_add(folio, nr_pages - 1); + flush_icache_pages(vma, page, nr_pages); if (pte_swp_soft_dirty(vmf->orig_pte)) pte = pte_mksoft_dirty(pte); if (pte_swp_uffd_wp(vmf->orig_pte)) pte = pte_mkuffd_wp(pte); - vmf->orig_pte = pte; + vmf->orig_pte = pte_advance_pfn(pte, page_idx); /* ksm created a completely new copy */ if (unlikely(folio != swapcache && swapcache)) { - folio_add_new_anon_rmap(folio, vma, vmf->address); + folio_add_new_anon_rmap(folio, vma, address); folio_add_lru_vma(folio, vma); } else { - folio_add_anon_rmap_pte(folio, page, vma, vmf->address, + folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address, rmap_flags); } VM_BUG_ON(!folio_test_anon(folio) || (pte_write(pte) && !PageAnonExclusive(page))); - set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); - arch_do_swap_page_nr(vma->vm_mm, vma, vmf->address, - pte, vmf->orig_pte, 1); + set_ptes(vma->vm_mm, address, ptep, pte, nr_pages); + arch_do_swap_page_nr(vma->vm_mm, vma, address, + pte, pte, nr_pages); folio_unlock(folio); if (folio != swapcache && swapcache) { @@ -4397,7 +4434,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) } /* No need to invalidate - it was non-present before */ - update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); + update_mmu_cache_range(vmf, vma, address, ptep, nr_pages); unlock: if (vmf->pte) pte_unmap_unlock(vmf->pte, vmf->ptl); -- Gitee From 115ecef3e2853760ed9bcadfd51051034e554930 Mon Sep 17 00:00:00 2001 From: Song Gao Date: Thu, 13 Jun 2024 20:05:39 +0800 Subject: [PATCH 1340/2138] anolis: LoongArch: KVM: Fix pmu build error ANBZ: #9569 This bug from the PR: https://gitee.com/anolis/cloud-kernel/pulls/3517 [...] arch/loongarch/kvm/vcpu.c:1253:13: error: redefinition of 'kvm_lose_pmu' 1253 | static void kvm_lose_pmu(struct kvm_vcpu *vcpu) | ^~~~~~~~~~~~ arch/loongarch/kvm/vcpu.c:202:13: note: previous definition of 'kvm_lose_pmu' with type 'void(struct kvm_vcpu *)' 202 | static void kvm_lose_pmu(struct kvm_vcpu *vcpu) | ^~~~~~~~~~~~ arch/loongarch/kvm/vcpu.c: In function 'kvm_lose_pmu': arch/loongarch/kvm/vcpu.c:1257:38: error: 'KVM_LARCH_PERF' undeclared (first use in this function); did you mean 'KVM_LARCH_LSX'? 1257 | if (!(vcpu->arch.aux_inuse & KVM_LARCH_PERF)) | ^~~~~~~~~~~~~~ | KVM_LARCH_LSX arch/loongarch/kvm/vcpu.c:1280:35: error: 'KVM_PMU_PLV_ENABLE' undeclared (first use in this function); did you mean 'KVM_PV_ENABLE'? 1280 | & KVM_PMU_PLV_ENABLE) == 0) | ^~~~~~~~~~~~~~~~~~ | KVM_PV_ENABLE [..] Signed-off-by: Song Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3535 Reviewed-by: Juxin Gao --- arch/loongarch/include/asm/kvm_vcpu.h | 2 - arch/loongarch/include/uapi/asm/kvm.h | 4 +- arch/loongarch/kvm/vcpu.c | 74 +-------------------------- 3 files changed, 3 insertions(+), 77 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h index 1da24994b838..9f53950959da 100644 --- a/arch/loongarch/include/asm/kvm_vcpu.h +++ b/arch/loongarch/include/asm/kvm_vcpu.h @@ -75,8 +75,6 @@ static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { } static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { } #endif -int kvm_own_pmu(struct kvm_vcpu *vcpu); - void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz); void kvm_reset_timer(struct kvm_vcpu *vcpu); void kvm_save_timer(struct kvm_vcpu *vcpu); diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h index dc6ae66771c5..af676247dd60 100644 --- a/arch/loongarch/include/uapi/asm/kvm.h +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -91,8 +91,8 @@ struct kvm_fpu { #define KVM_LOONGARCH_VCPU_PVTIME_GPA 0 /* Device Control API on vm fd */ -#define KVM_LOONGARCH_VM_FEAT_CTRL 0 -#define KVM_LOONGARCH_VM_FEAT_PMU 0 +#define KVM_LOONGARCH_VM_FEAT_CTRL 1000 +#define KVM_LOONGARCH_VM_FEAT_PMU 1000 struct kvm_debug_exit_arch { }; diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index b91612200662..c1f6363b6372 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -1233,77 +1233,6 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu) preempt_enable(); } -int kvm_own_pmu(struct kvm_vcpu *vcpu) -{ - unsigned long val; - - if (!kvm_guest_has_pmu(&vcpu->arch)) - return -EINVAL; - - preempt_disable(); - val = read_csr_gcfg() & ~CSR_GCFG_GPERF; - val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT; - write_csr_gcfg(val); - - vcpu->arch.aux_inuse |= KVM_LARCH_PERF; - preempt_enable(); - return 0; -} - -static void kvm_lose_pmu(struct kvm_vcpu *vcpu) -{ - struct loongarch_csrs *csr = vcpu->arch.csr; - - if (!(vcpu->arch.aux_inuse & KVM_LARCH_PERF)) - return; - - /* save guest pmu csr */ - kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0); - kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0); - kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1); - kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1); - kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2); - kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2); - kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3); - kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3); - kvm_write_hw_gcsr(LOONGARCH_CSR_PERFCTRL0, 0); - kvm_write_hw_gcsr(LOONGARCH_CSR_PERFCTRL1, 0); - kvm_write_hw_gcsr(LOONGARCH_CSR_PERFCTRL2, 0); - kvm_write_hw_gcsr(LOONGARCH_CSR_PERFCTRL3, 0); - /* Disable pmu access from guest */ - write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF); - - if (((kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0) | - kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1) | - kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2) | - kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3)) - & KVM_PMU_PLV_ENABLE) == 0) - vcpu->arch.aux_inuse &= ~KVM_LARCH_PERF; -} - -static void kvm_restore_pmu(struct kvm_vcpu *vcpu) -{ - unsigned long val; - struct loongarch_csrs *csr = vcpu->arch.csr; - - if (!(vcpu->arch.aux_inuse & KVM_LARCH_PERF)) - return; - - /* Set PM0-PM(num) to Guest */ - val = read_csr_gcfg() & ~CSR_GCFG_GPERF; - val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT; - write_csr_gcfg(val); - kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0); - kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0); - kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1); - kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1); - kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2); - kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2); - kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3); - kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3); -} - - int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) { int intr = (int)irq->irq; @@ -1446,7 +1375,7 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) /* Control guest page CCA attribute */ change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT); - /* Restore hardware perf csr */ + /* Restore hardware PMU CSRs */ kvm_restore_pmu(vcpu); kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu); @@ -1534,7 +1463,6 @@ static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu) struct loongarch_csrs *csr = vcpu->arch.csr; kvm_lose_fpu(vcpu); - kvm_lose_pmu(vcpu); /* * Update CSR state from hardware if software CSR state is stale, -- Gitee From 6090fdba12957ea4c65cfbe91599385b2c268a32 Mon Sep 17 00:00:00 2001 From: Jiawen Wu Date: Wed, 3 Jan 2024 10:08:49 +0800 Subject: [PATCH 1341/2138] net: ngbe: convert phylib to phylink ANBZ: #8484 commit bc2426d74aa35cd8ec9c97a253ef57c2c5cd730c upstream. Implement phylink in ngbe driver, to handle phy uniformly for Wangxun ethernet devices. Signed-off-by: Jiawen Wu Reviewed-by: Russell King (Oracle) Signed-off-by: David S. Miller Link: https://lore.kernel.org/all/20240103020854.1656604-4-jiawenwu@trustnetic.com Signed-off-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/2984 --- .../net/ethernet/wangxun/ngbe/ngbe_ethtool.c | 6 +- drivers/net/ethernet/wangxun/ngbe/ngbe_main.c | 12 +- drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c | 118 +++++++++--------- drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h | 1 - 4 files changed, 70 insertions(+), 67 deletions(-) diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c index afbdf6919071..0f87898a55b2 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c @@ -44,9 +44,9 @@ static int ngbe_set_wol(struct net_device *netdev, static const struct ethtool_ops ngbe_ethtool_ops = { .get_drvinfo = wx_get_drvinfo, .get_link = ethtool_op_get_link, - .get_link_ksettings = phy_ethtool_get_link_ksettings, - .set_link_ksettings = phy_ethtool_set_link_ksettings, - .nway_reset = phy_ethtool_nway_reset, + .get_link_ksettings = wx_get_link_ksettings, + .set_link_ksettings = wx_set_link_ksettings, + .nway_reset = wx_nway_reset, .get_wol = ngbe_get_wol, .set_wol = ngbe_set_wol, .get_sset_count = wx_get_sset_count, diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index a5c623fd023e..db5cae8384e5 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -336,7 +336,7 @@ static void ngbe_disable_device(struct wx *wx) static void ngbe_down(struct wx *wx) { - phy_stop(wx->phydev); + phylink_stop(wx->phylink); ngbe_disable_device(wx); wx_clean_all_tx_rings(wx); wx_clean_all_rx_rings(wx); @@ -359,7 +359,7 @@ static void ngbe_up(struct wx *wx) if (wx->gpio_ctrl) ngbe_sfp_modules_txrx_powerctl(wx, true); - phy_start(wx->phydev); + phylink_start(wx->phylink); } /** @@ -388,7 +388,7 @@ static int ngbe_open(struct net_device *netdev) if (err) goto err_free_resources; - err = ngbe_phy_connect(wx); + err = phylink_connect_phy(wx->phylink, wx->phydev); if (err) goto err_free_irq; @@ -404,7 +404,7 @@ static int ngbe_open(struct net_device *netdev) return 0; err_dis_phy: - phy_disconnect(wx->phydev); + phylink_disconnect_phy(wx->phylink); err_free_irq: wx_free_irq(wx); err_free_resources: @@ -430,7 +430,7 @@ static int ngbe_close(struct net_device *netdev) ngbe_down(wx); wx_free_irq(wx); wx_free_resources(wx); - phy_disconnect(wx->phydev); + phylink_disconnect_phy(wx->phylink); wx_control_hw(wx, false); return 0; @@ -681,6 +681,7 @@ static int ngbe_probe(struct pci_dev *pdev, return 0; err_register: + phylink_destroy(wx->phylink); wx_control_hw(wx, false); err_clear_interrupt_scheme: wx_clear_interrupt_scheme(wx); @@ -710,6 +711,7 @@ static void ngbe_remove(struct pci_dev *pdev) netdev = wx->netdev; unregister_netdev(netdev); + phylink_destroy(wx->phylink); pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c index 2afae24c0c69..cc75856f231a 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c @@ -56,22 +56,26 @@ static int ngbe_phy_write_reg_c22(struct mii_bus *bus, int phy_addr, return ret; } -static void ngbe_handle_link_change(struct net_device *dev) +static void ngbe_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) { - struct wx *wx = netdev_priv(dev); - struct phy_device *phydev; - u32 lan_speed, reg; +} + +static void ngbe_mac_link_down(struct phylink_config *config, + unsigned int mode, phy_interface_t interface) +{ +} - phydev = wx->phydev; - if (!(wx->link != phydev->link || - wx->speed != phydev->speed || - wx->duplex != phydev->duplex)) - return; +static void ngbe_mac_link_up(struct phylink_config *config, + struct phy_device *phy, + unsigned int mode, phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct wx *wx = phylink_to_wx(config); + u32 lan_speed, reg; - wx->link = phydev->link; - wx->speed = phydev->speed; - wx->duplex = phydev->duplex; - switch (phydev->speed) { + switch (speed) { case SPEED_10: lan_speed = 0; break; @@ -83,58 +87,51 @@ static void ngbe_handle_link_change(struct net_device *dev) lan_speed = 2; break; } + wr32m(wx, NGBE_CFG_LAN_SPEED, 0x3, lan_speed); - if (phydev->link) { - reg = rd32(wx, WX_MAC_TX_CFG); - reg &= ~WX_MAC_TX_CFG_SPEED_MASK; - reg |= WX_MAC_TX_CFG_SPEED_1G | WX_MAC_TX_CFG_TE; - wr32(wx, WX_MAC_TX_CFG, reg); - /* Re configure MAC RX */ - reg = rd32(wx, WX_MAC_RX_CFG); - wr32(wx, WX_MAC_RX_CFG, reg); - wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); - reg = rd32(wx, WX_MAC_WDG_TIMEOUT); - wr32(wx, WX_MAC_WDG_TIMEOUT, reg); - } - phy_print_status(phydev); + reg = rd32(wx, WX_MAC_TX_CFG); + reg &= ~WX_MAC_TX_CFG_SPEED_MASK; + reg |= WX_MAC_TX_CFG_SPEED_1G | WX_MAC_TX_CFG_TE; + wr32(wx, WX_MAC_TX_CFG, reg); + + /* Re configure MAC Rx */ + reg = rd32(wx, WX_MAC_RX_CFG); + wr32(wx, WX_MAC_RX_CFG, reg); + wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); + reg = rd32(wx, WX_MAC_WDG_TIMEOUT); + wr32(wx, WX_MAC_WDG_TIMEOUT, reg); } -int ngbe_phy_connect(struct wx *wx) +static const struct phylink_mac_ops ngbe_mac_ops = { + .mac_config = ngbe_mac_config, + .mac_link_down = ngbe_mac_link_down, + .mac_link_up = ngbe_mac_link_up, +}; + +static int ngbe_phylink_init(struct wx *wx) { - int ret; + struct phylink_config *config; + phy_interface_t phy_mode; + struct phylink *phylink; - /* The MAC only has add the Tx delay and it can not be modified. - * So just disable TX delay in PHY, and it is does not matter to - * internal phy. - */ - ret = phy_connect_direct(wx->netdev, - wx->phydev, - ngbe_handle_link_change, - PHY_INTERFACE_MODE_RGMII_RXID); - if (ret) { - wx_err(wx, "PHY connect failed.\n"); - return ret; - } + config = &wx->phylink_config; + config->dev = &wx->netdev->dev; + config->type = PHYLINK_NETDEV; + config->mac_capabilities = MAC_1000FD | MAC_100FD | MAC_10FD | + MAC_SYM_PAUSE | MAC_ASYM_PAUSE; + config->mac_managed_pm = true; - return 0; -} + phy_mode = PHY_INTERFACE_MODE_RGMII_ID; + __set_bit(PHY_INTERFACE_MODE_RGMII_ID, config->supported_interfaces); -static void ngbe_phy_fixup(struct wx *wx) -{ - struct phy_device *phydev = wx->phydev; - struct ethtool_eee eee; - - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); - - phydev->mac_managed_pm = true; - if (wx->mac_type != em_mac_type_mdi) - return; - /* disable EEE, internal phy does not support eee */ - memset(&eee, 0, sizeof(eee)); - phy_ethtool_set_eee(phydev, &eee); + phylink = phylink_create(config, NULL, phy_mode, &ngbe_mac_ops); + if (IS_ERR(phylink)) + return PTR_ERR(phylink); + + wx->phylink = phylink; + + return 0; } int ngbe_mdio_init(struct wx *wx) @@ -169,11 +166,16 @@ int ngbe_mdio_init(struct wx *wx) return -ENODEV; phy_attached_info(wx->phydev); - ngbe_phy_fixup(wx); wx->link = 0; wx->speed = 0; wx->duplex = 0; + ret = ngbe_phylink_init(wx); + if (ret) { + wx_err(wx, "failed to init phylink: %d\n", ret); + return ret; + } + return 0; } diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h index 0a6400dd89c4..f610b771888a 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h @@ -7,6 +7,5 @@ #ifndef _NGBE_MDIO_H_ #define _NGBE_MDIO_H_ -int ngbe_phy_connect(struct wx *wx); int ngbe_mdio_init(struct wx *wx); #endif /* _NGBE_MDIO_H_ */ -- Gitee From 081f49c08b7d8d7bdf274a2821c96295eb7c9f4e Mon Sep 17 00:00:00 2001 From: Jiawen Wu Date: Wed, 3 Jan 2024 10:08:50 +0800 Subject: [PATCH 1342/2138] net: wangxun: add flow control support ANBZ: #8484 commit 2fe2ca09da953bac778eab5dfb309b4e7d274b1a upstream. Add support to set pause params with ethtool -A and get pause params with ethtool -a, for ethernet driver txgbe and ngbe. Signed-off-by: Jiawen Wu Reviewed-by: Russell King (Oracle) Signed-off-by: David S. Miller Link: https://lore.kernel.org/all/20240103020854.1656604-5-jiawenwu@trustnetic.com Signed-off-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/2984 --- .../net/ethernet/wangxun/libwx/wx_ethtool.c | 18 ++ .../net/ethernet/wangxun/libwx/wx_ethtool.h | 4 + drivers/net/ethernet/wangxun/libwx/wx_hw.c | 172 ++++++++++++++++++ drivers/net/ethernet/wangxun/libwx/wx_hw.h | 1 + drivers/net/ethernet/wangxun/libwx/wx_type.h | 48 +++++ .../net/ethernet/wangxun/ngbe/ngbe_ethtool.c | 2 + drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c | 2 + .../ethernet/wangxun/txgbe/txgbe_ethtool.c | 2 + .../net/ethernet/wangxun/txgbe/txgbe_phy.c | 2 + 9 files changed, 251 insertions(+) diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c index 12feb8a5ee75..e4d2bbf7dad6 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c @@ -211,3 +211,21 @@ int wx_set_link_ksettings(struct net_device *netdev, return phylink_ethtool_ksettings_set(wx->phylink, cmd); } EXPORT_SYMBOL(wx_set_link_ksettings); + +void wx_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct wx *wx = netdev_priv(netdev); + + phylink_ethtool_get_pauseparam(wx->phylink, pause); +} +EXPORT_SYMBOL(wx_get_pauseparam); + +int wx_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct wx *wx = netdev_priv(netdev); + + return phylink_ethtool_set_pauseparam(wx->phylink, pause); +} +EXPORT_SYMBOL(wx_set_pauseparam); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h index f15cc445ae0f..7d3d85f212eb 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h @@ -18,4 +18,8 @@ int wx_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd); int wx_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd); +void wx_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause); +int wx_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause); #endif /* _WX_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index 533e912af089..d11f7d8db194 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -1158,6 +1158,81 @@ static void wx_set_rxpba(struct wx *wx) wr32(wx, WX_TDM_PB_THRE(0), txpbthresh); } +#define WX_ETH_FRAMING 20 + +/** + * wx_hpbthresh - calculate high water mark for flow control + * + * @wx: board private structure to calculate for + **/ +static int wx_hpbthresh(struct wx *wx) +{ + struct net_device *dev = wx->netdev; + int link, tc, kb, marker; + u32 dv_id, rx_pba; + + /* Calculate max LAN frame size */ + link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + WX_ETH_FRAMING; + tc = link; + + /* Calculate delay value for device */ + dv_id = WX_DV(link, tc); + + /* Delay value is calculated in bit times convert to KB */ + kb = WX_BT2KB(dv_id); + rx_pba = rd32(wx, WX_RDB_PB_SZ(0)) >> WX_RDB_PB_SZ_SHIFT; + + marker = rx_pba - kb; + + /* It is possible that the packet buffer is not large enough + * to provide required headroom. In this case throw an error + * to user and a do the best we can. + */ + if (marker < 0) { + dev_warn(&wx->pdev->dev, + "Packet Buffer can not provide enough headroom to support flow control. Decrease MTU or number of traffic classes\n"); + marker = tc + 1; + } + + return marker; +} + +/** + * wx_lpbthresh - calculate low water mark for flow control + * + * @wx: board private structure to calculate for + **/ +static int wx_lpbthresh(struct wx *wx) +{ + struct net_device *dev = wx->netdev; + u32 dv_id; + int tc; + + /* Calculate max LAN frame size */ + tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN; + + /* Calculate delay value for device */ + dv_id = WX_LOW_DV(tc); + + /* Delay value is calculated in bit times convert to KB */ + return WX_BT2KB(dv_id); +} + +/** + * wx_pbthresh_setup - calculate and setup high low water marks + * + * @wx: board private structure to calculate for + **/ +static void wx_pbthresh_setup(struct wx *wx) +{ + wx->fc.high_water = wx_hpbthresh(wx); + wx->fc.low_water = wx_lpbthresh(wx); + + /* Low water marks must not be larger than high water marks */ + if (wx->fc.low_water > wx->fc.high_water) + wx->fc.low_water = 0; +} + static void wx_configure_port(struct wx *wx) { u32 value, i; @@ -1584,6 +1659,7 @@ static void wx_configure_isb(struct wx *wx) void wx_configure(struct wx *wx) { wx_set_rxpba(wx); + wx_pbthresh_setup(wx); wx_configure_port(wx); wx_set_rx_mode(wx->netdev); @@ -2003,6 +2079,102 @@ int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) } EXPORT_SYMBOL(wx_vlan_rx_kill_vid); +static void wx_enable_rx_drop(struct wx *wx, struct wx_ring *ring) +{ + u16 reg_idx = ring->reg_idx; + u32 srrctl; + + srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx)); + srrctl |= WX_PX_RR_CFG_DROP_EN; + + wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl); +} + +static void wx_disable_rx_drop(struct wx *wx, struct wx_ring *ring) +{ + u16 reg_idx = ring->reg_idx; + u32 srrctl; + + srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx)); + srrctl &= ~WX_PX_RR_CFG_DROP_EN; + + wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl); +} + +int wx_fc_enable(struct wx *wx, bool tx_pause, bool rx_pause) +{ + u16 pause_time = WX_DEFAULT_FCPAUSE; + u32 mflcn_reg, fccfg_reg, reg; + u32 fcrtl, fcrth; + int i; + + /* Low water mark of zero causes XOFF floods */ + if (tx_pause && wx->fc.high_water) { + if (!wx->fc.low_water || wx->fc.low_water >= wx->fc.high_water) { + wx_err(wx, "Invalid water mark configuration\n"); + return -EINVAL; + } + } + + /* Disable any previous flow control settings */ + mflcn_reg = rd32(wx, WX_MAC_RX_FLOW_CTRL); + mflcn_reg &= ~WX_MAC_RX_FLOW_CTRL_RFE; + + fccfg_reg = rd32(wx, WX_RDB_RFCC); + fccfg_reg &= ~WX_RDB_RFCC_RFCE_802_3X; + + if (rx_pause) + mflcn_reg |= WX_MAC_RX_FLOW_CTRL_RFE; + if (tx_pause) + fccfg_reg |= WX_RDB_RFCC_RFCE_802_3X; + + /* Set 802.3x based flow control settings. */ + wr32(wx, WX_MAC_RX_FLOW_CTRL, mflcn_reg); + wr32(wx, WX_RDB_RFCC, fccfg_reg); + + /* Set up and enable Rx high/low water mark thresholds, enable XON. */ + if (tx_pause && wx->fc.high_water) { + fcrtl = (wx->fc.low_water << 10) | WX_RDB_RFCL_XONE; + wr32(wx, WX_RDB_RFCL, fcrtl); + fcrth = (wx->fc.high_water << 10) | WX_RDB_RFCH_XOFFE; + } else { + wr32(wx, WX_RDB_RFCL, 0); + /* In order to prevent Tx hangs when the internal Tx + * switch is enabled we must set the high water mark + * to the Rx packet buffer size - 24KB. This allows + * the Tx switch to function even under heavy Rx + * workloads. + */ + fcrth = rd32(wx, WX_RDB_PB_SZ(0)) - 24576; + } + + wr32(wx, WX_RDB_RFCH, fcrth); + + /* Configure pause time */ + reg = pause_time * 0x00010001; + wr32(wx, WX_RDB_RFCV, reg); + + /* Configure flow control refresh threshold value */ + wr32(wx, WX_RDB_RFCRT, pause_time / 2); + + /* We should set the drop enable bit if: + * Number of Rx queues > 1 and flow control is disabled + * + * This allows us to avoid head of line blocking for security + * and performance reasons. + */ + if (wx->num_rx_queues > 1 && !tx_pause) { + for (i = 0; i < wx->num_rx_queues; i++) + wx_enable_rx_drop(wx, wx->rx_ring[i]); + } else { + for (i = 0; i < wx->num_rx_queues; i++) + wx_disable_rx_drop(wx, wx->rx_ring[i]); + } + + return 0; +} +EXPORT_SYMBOL(wx_fc_enable); + /** * wx_update_stats - Update the board statistics counters. * @wx: board private structure diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h index 12c20a7c364d..9e219fa717a2 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h @@ -41,6 +41,7 @@ int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count); int wx_sw_init(struct wx *wx); int wx_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid); int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid); +int wx_fc_enable(struct wx *wx, bool tx_pause, bool rx_pause); void wx_update_stats(struct wx *wx); void wx_clear_hw_cntrs(struct wx *wx); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 5b064c434053..561f752defec 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -131,6 +131,15 @@ #define WX_RDB_PFCMACDAH 0x19214 #define WX_RDB_LXOFFTXC 0x19218 #define WX_RDB_LXONTXC 0x1921C +/* Flow Control Registers */ +#define WX_RDB_RFCV 0x19200 +#define WX_RDB_RFCL 0x19220 +#define WX_RDB_RFCL_XONE BIT(31) +#define WX_RDB_RFCH 0x19260 +#define WX_RDB_RFCH_XOFFE BIT(31) +#define WX_RDB_RFCRT 0x192A0 +#define WX_RDB_RFCC 0x192A4 +#define WX_RDB_RFCC_RFCE_802_3X BIT(3) /* ring assignment */ #define WX_RDB_PL_CFG(_i) (0x19300 + ((_i) * 4)) #define WX_RDB_PL_CFG_L4HDR BIT(1) @@ -331,6 +340,7 @@ enum WX_MSCA_CMD_value { #define WX_PX_MPRC(_i) (0x01020 + ((_i) * 0x40)) /* PX_RR_CFG bit definitions */ #define WX_PX_RR_CFG_VLAN BIT(31) +#define WX_PX_RR_CFG_DROP_EN BIT(30) #define WX_PX_RR_CFG_SPLIT_MODE BIT(26) #define WX_PX_RR_CFG_RR_THER_SHIFT 16 #define WX_PX_RR_CFG_RR_HDR_SZ GENMASK(15, 12) @@ -368,6 +378,38 @@ enum WX_MSCA_CMD_value { #define WX_MAC_STATE_MODIFIED 0x2 #define WX_MAC_STATE_IN_USE 0x4 +/* BitTimes (BT) conversion */ +#define WX_BT2KB(BT) (((BT) + (8 * 1024 - 1)) / (8 * 1024)) +#define WX_B2BT(BT) ((BT) * 8) + +/* Calculate Delay to respond to PFC */ +#define WX_PFC_D 672 +/* Calculate Cable Delay */ +#define WX_CABLE_DC 5556 /* Delay Copper */ +/* Calculate Delay incurred from higher layer */ +#define WX_HD 6144 + +/* Calculate Interface Delay */ +#define WX_PHY_D 12800 +#define WX_MAC_D 4096 +#define WX_XAUI_D (2 * 1024) +#define WX_ID (WX_MAC_D + WX_XAUI_D + WX_PHY_D) +/* Calculate PCI Bus delay for low thresholds */ +#define WX_PCI_DELAY 10000 + +/* Calculate delay value in bit times */ +#define WX_DV(_max_frame_link, _max_frame_tc) \ + ((36 * (WX_B2BT(_max_frame_link) + WX_PFC_D + \ + (2 * WX_CABLE_DC) + (2 * WX_ID) + WX_HD) / 25 + 1) + \ + 2 * WX_B2BT(_max_frame_tc)) + +/* Calculate low threshold delay values */ +#define WX_LOW_DV(_max_frame_tc) \ + (2 * (2 * WX_B2BT(_max_frame_tc) + (36 * WX_PCI_DELAY / 25) + 1)) + +/* flow control */ +#define WX_DEFAULT_FCPAUSE 0xFFFF + #define WX_MAX_RXD 8192 #define WX_MAX_TXD 8192 @@ -880,6 +922,11 @@ enum wx_isb_idx { WX_ISB_MAX }; +struct wx_fc_info { + u32 high_water; /* Flow Ctrl High-water */ + u32 low_water; /* Flow Ctrl Low-water */ +}; + /* Statistics counters collected by the MAC */ struct wx_hw_stats { u64 gprc; @@ -920,6 +967,7 @@ struct wx { enum sp_media_type media_type; struct wx_eeprom_info eeprom; struct wx_addr_filter_info addr_ctrl; + struct wx_fc_info fc; struct wx_mac_addr *mac_table; u16 device_id; u16 vendor_id; diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c index 0f87898a55b2..9a89f9576180 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c @@ -54,6 +54,8 @@ static const struct ethtool_ops ngbe_ethtool_ops = { .get_ethtool_stats = wx_get_ethtool_stats, .get_eth_mac_stats = wx_get_mac_stats, .get_pause_stats = wx_get_pause_stats, + .get_pauseparam = wx_get_pauseparam, + .set_pauseparam = wx_set_pauseparam, }; void ngbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c index cc75856f231a..ec54b18c5fe7 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c @@ -75,6 +75,8 @@ static void ngbe_mac_link_up(struct phylink_config *config, struct wx *wx = phylink_to_wx(config); u32 lan_speed, reg; + wx_fc_enable(wx, tx_pause, rx_pause); + switch (speed) { case SPEED_10: lan_speed = 0; diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index 60f351a3b89d..cdaa19528248 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -21,6 +21,8 @@ static const struct ethtool_ops txgbe_ethtool_ops = { .get_ethtool_stats = wx_get_ethtool_stats, .get_eth_mac_stats = wx_get_mac_stats, .get_pause_stats = wx_get_pause_stats, + .get_pauseparam = wx_get_pauseparam, + .set_pauseparam = wx_set_pauseparam, }; void txgbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c index 3c0524d19866..b1b5cdc04a92 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -190,6 +190,8 @@ static void txgbe_mac_link_up(struct phylink_config *config, struct wx *wx = phylink_to_wx(config); u32 txcfg, wdg; + wx_fc_enable(wx, tx_pause, rx_pause); + txcfg = rd32(wx, WX_MAC_TX_CFG); txcfg &= ~WX_MAC_TX_CFG_SPEED_MASK; -- Gitee From f9e9bae9ed4e5bd733448f325c90cc7654a39aba Mon Sep 17 00:00:00 2001 From: Jiawen Wu Date: Wed, 3 Jan 2024 10:08:51 +0800 Subject: [PATCH 1343/2138] net: wangxun: add ethtool_ops for ring parameters ANBZ: #8484 commit 883b5984a5d2900468af5ab979cae90547a78da4 upstream. Support to query RX/TX depth with ethtool -g, and change RX/TX depth with ethtool -G. Signed-off-by: Jiawen Wu Signed-off-by: David S. Miller Link: https://lore.kernel.org/all/20231212080438.1361308-6-jiawenwu@trustnetic.com Signed-off-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/2984 --- .../net/ethernet/wangxun/libwx/wx_ethtool.c | 18 +++++ .../net/ethernet/wangxun/libwx/wx_ethtool.h | 4 ++ drivers/net/ethernet/wangxun/libwx/wx_lib.c | 66 +++++++++++++++++++ drivers/net/ethernet/wangxun/libwx/wx_lib.h | 2 + drivers/net/ethernet/wangxun/libwx/wx_type.h | 6 ++ .../net/ethernet/wangxun/ngbe/ngbe_ethtool.c | 53 +++++++++++++++ drivers/net/ethernet/wangxun/ngbe/ngbe_main.c | 4 +- drivers/net/ethernet/wangxun/ngbe/ngbe_type.h | 3 + .../ethernet/wangxun/txgbe/txgbe_ethtool.c | 50 ++++++++++++++ .../net/ethernet/wangxun/txgbe/txgbe_main.c | 8 ++- .../net/ethernet/wangxun/txgbe/txgbe_type.h | 3 + 11 files changed, 214 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c index e4d2bbf7dad6..77da6111fbce 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c @@ -229,3 +229,21 @@ int wx_set_pauseparam(struct net_device *netdev, return phylink_ethtool_set_pauseparam(wx->phylink, pause); } EXPORT_SYMBOL(wx_set_pauseparam); + +void wx_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct wx *wx = netdev_priv(netdev); + + ring->rx_max_pending = WX_MAX_RXD; + ring->tx_max_pending = WX_MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = wx->rx_ring_count; + ring->tx_pending = wx->tx_ring_count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} +EXPORT_SYMBOL(wx_get_ringparam); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h index 7d3d85f212eb..7651ec4b7dd9 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h @@ -22,4 +22,8 @@ void wx_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause); int wx_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause); +void wx_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack); #endif /* _WX_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index eea9fc0df873..d5d4ff0c9450 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -2674,4 +2674,70 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features) } EXPORT_SYMBOL(wx_set_features); +void wx_set_ring(struct wx *wx, u32 new_tx_count, + u32 new_rx_count, struct wx_ring *temp_ring) +{ + int i, err = 0; + + /* Setup new Tx resources and free the old Tx resources in that order. + * We can then assign the new resources to the rings via a memcpy. + * The advantage to this approach is that we are guaranteed to still + * have resources even in the case of an allocation failure. + */ + if (new_tx_count != wx->tx_ring_count) { + for (i = 0; i < wx->num_tx_queues; i++) { + memcpy(&temp_ring[i], wx->tx_ring[i], + sizeof(struct wx_ring)); + + temp_ring[i].count = new_tx_count; + err = wx_setup_tx_resources(&temp_ring[i]); + if (err) { + wx_err(wx, "setup new tx resources failed, keep using the old config\n"); + while (i) { + i--; + wx_free_tx_resources(&temp_ring[i]); + } + return; + } + } + + for (i = 0; i < wx->num_tx_queues; i++) { + wx_free_tx_resources(wx->tx_ring[i]); + + memcpy(wx->tx_ring[i], &temp_ring[i], + sizeof(struct wx_ring)); + } + + wx->tx_ring_count = new_tx_count; + } + + /* Repeat the process for the Rx rings if needed */ + if (new_rx_count != wx->rx_ring_count) { + for (i = 0; i < wx->num_rx_queues; i++) { + memcpy(&temp_ring[i], wx->rx_ring[i], + sizeof(struct wx_ring)); + + temp_ring[i].count = new_rx_count; + err = wx_setup_rx_resources(&temp_ring[i]); + if (err) { + wx_err(wx, "setup new rx resources failed, keep using the old config\n"); + while (i) { + i--; + wx_free_rx_resources(&temp_ring[i]); + } + return; + } + } + + for (i = 0; i < wx->num_rx_queues; i++) { + wx_free_rx_resources(wx->rx_ring[i]); + memcpy(wx->rx_ring[i], &temp_ring[i], + sizeof(struct wx_ring)); + } + + wx->rx_ring_count = new_rx_count; + } +} +EXPORT_SYMBOL(wx_set_ring); + MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_lib.h index df1f4a5951f0..af1381c13d9e 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.h @@ -29,5 +29,7 @@ int wx_setup_resources(struct wx *wx); void wx_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats); int wx_set_features(struct net_device *netdev, netdev_features_t features); +void wx_set_ring(struct wx *wx, u32 new_tx_count, + u32 new_rx_count, struct wx_ring *temp_ring); #endif /* _NGBE_LIB_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 561f752defec..24588bc1eb57 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -412,6 +412,12 @@ enum WX_MSCA_CMD_value { #define WX_MAX_RXD 8192 #define WX_MAX_TXD 8192 +#define WX_MIN_RXD 128 +#define WX_MIN_TXD 128 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define WX_REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define WX_REQ_TX_DESCRIPTOR_MULTIPLE 8 #define WX_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */ #define VMDQ_P(p) p diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c index 9a89f9576180..52d4167dcabe 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c @@ -7,7 +7,10 @@ #include "../libwx/wx_ethtool.h" #include "../libwx/wx_type.h" +#include "../libwx/wx_lib.h" +#include "../libwx/wx_hw.h" #include "ngbe_ethtool.h" +#include "ngbe_type.h" static void ngbe_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) @@ -41,6 +44,54 @@ static int ngbe_set_wol(struct net_device *netdev, return 0; } +static int ngbe_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct wx *wx = netdev_priv(netdev); + u32 new_rx_count, new_tx_count; + struct wx_ring *temp_ring; + int i; + + new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE); + + new_rx_count = clamp_t(u32, ring->rx_pending, WX_MIN_RXD, WX_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, WX_REQ_RX_DESCRIPTOR_MULTIPLE); + + if (new_tx_count == wx->tx_ring_count && + new_rx_count == wx->rx_ring_count) + return 0; + + if (!netif_running(wx->netdev)) { + for (i = 0; i < wx->num_tx_queues; i++) + wx->tx_ring[i]->count = new_tx_count; + for (i = 0; i < wx->num_rx_queues; i++) + wx->rx_ring[i]->count = new_rx_count; + wx->tx_ring_count = new_tx_count; + wx->rx_ring_count = new_rx_count; + + return 0; + } + + /* allocate temporary buffer to store rings in */ + i = max_t(int, wx->num_tx_queues, wx->num_rx_queues); + temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL); + if (!temp_ring) + return -ENOMEM; + + ngbe_down(wx); + + wx_set_ring(wx, new_tx_count, new_rx_count, temp_ring); + kvfree(temp_ring); + + wx_configure(wx); + ngbe_up(wx); + + return 0; +} + static const struct ethtool_ops ngbe_ethtool_ops = { .get_drvinfo = wx_get_drvinfo, .get_link = ethtool_op_get_link, @@ -56,6 +107,8 @@ static const struct ethtool_ops ngbe_ethtool_ops = { .get_pause_stats = wx_get_pause_stats, .get_pauseparam = wx_get_pauseparam, .set_pauseparam = wx_set_pauseparam, + .get_ringparam = wx_get_ringparam, + .set_ringparam = ngbe_set_ringparam, }; void ngbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index db5cae8384e5..96d80c595cb8 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -334,7 +334,7 @@ static void ngbe_disable_device(struct wx *wx) wx_update_stats(wx); } -static void ngbe_down(struct wx *wx) +void ngbe_down(struct wx *wx) { phylink_stop(wx->phylink); ngbe_disable_device(wx); @@ -342,7 +342,7 @@ static void ngbe_down(struct wx *wx) wx_clean_all_rx_rings(wx); } -static void ngbe_up(struct wx *wx) +void ngbe_up(struct wx *wx) { wx_configure_vectors(wx); diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h index ff754d69bdf6..0a98080a197a 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h @@ -130,4 +130,7 @@ extern char ngbe_driver_name[]; +void ngbe_down(struct wx *wx); +void ngbe_up(struct wx *wx); + #endif /* _NGBE_TYPE_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index cdaa19528248..bd817248a831 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -7,9 +7,57 @@ #include "../libwx/wx_ethtool.h" #include "../libwx/wx_type.h" +#include "../libwx/wx_lib.h" #include "txgbe_type.h" #include "txgbe_ethtool.h" +static int txgbe_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct wx *wx = netdev_priv(netdev); + u32 new_rx_count, new_tx_count; + struct wx_ring *temp_ring; + int i; + + new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE); + + new_rx_count = clamp_t(u32, ring->rx_pending, WX_MIN_RXD, WX_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, WX_REQ_RX_DESCRIPTOR_MULTIPLE); + + if (new_tx_count == wx->tx_ring_count && + new_rx_count == wx->rx_ring_count) + return 0; + + if (!netif_running(wx->netdev)) { + for (i = 0; i < wx->num_tx_queues; i++) + wx->tx_ring[i]->count = new_tx_count; + for (i = 0; i < wx->num_rx_queues; i++) + wx->rx_ring[i]->count = new_rx_count; + wx->tx_ring_count = new_tx_count; + wx->rx_ring_count = new_rx_count; + + return 0; + } + + /* allocate temporary buffer to store rings in */ + i = max_t(int, wx->num_tx_queues, wx->num_rx_queues); + temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL); + if (!temp_ring) + return -ENOMEM; + + txgbe_down(wx); + + wx_set_ring(wx, new_tx_count, new_rx_count, temp_ring); + kvfree(temp_ring); + + txgbe_up(wx); + + return 0; +} + static const struct ethtool_ops txgbe_ethtool_ops = { .get_drvinfo = wx_get_drvinfo, .nway_reset = wx_nway_reset, @@ -23,6 +71,8 @@ static const struct ethtool_ops txgbe_ethtool_ops = { .get_pause_stats = wx_get_pause_stats, .get_pauseparam = wx_get_pauseparam, .set_pauseparam = wx_set_pauseparam, + .get_ringparam = wx_get_ringparam, + .set_ringparam = txgbe_set_ringparam, }; void txgbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index 1007ae2541ce..bcc47bc6264a 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -288,7 +288,7 @@ static void txgbe_disable_device(struct wx *wx) wx_update_stats(wx); } -static void txgbe_down(struct wx *wx) +void txgbe_down(struct wx *wx) { txgbe_disable_device(wx); txgbe_reset(wx); @@ -298,6 +298,12 @@ static void txgbe_down(struct wx *wx) wx_clean_all_rx_rings(wx); } +void txgbe_up(struct wx *wx) +{ + wx_configure(wx); + txgbe_up_complete(wx); +} + /** * txgbe_init_type_code - Initialize the shared code * @wx: pointer to hardware structure diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index 5494ea88df0a..801fd0aed1ff 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -129,6 +129,9 @@ extern char txgbe_driver_name[]; +void txgbe_down(struct wx *wx); +void txgbe_up(struct wx *wx); + #define NODE_PROP(_NAME, _PROP) \ (const struct software_node) { \ .name = _NAME, \ -- Gitee From fc56c4fb919c0816580f955d314beb83ef0b8885 Mon Sep 17 00:00:00 2001 From: Jiawen Wu Date: Wed, 3 Jan 2024 10:08:52 +0800 Subject: [PATCH 1344/2138] net: wangxun: add coalesce options support ANBZ: #8484 commit 4ac2d9dff4b01fb210f951dcb67badcc2a1aa427 upstream. Support to show RX/TX coalesce with ethtool -c and set RX/TX coalesce with ethtool -C. Signed-off-by: Jiawen Wu Signed-off-by: David S. Miller Link: https://lore.kernel.org/all/20240103020854.1656604-7-jiawenwu@trustnetic.com Signed-off-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/2984 --- .../net/ethernet/wangxun/libwx/wx_ethtool.c | 101 ++++++++++++++++++ .../net/ethernet/wangxun/libwx/wx_ethtool.h | 8 ++ drivers/net/ethernet/wangxun/libwx/wx_lib.c | 2 +- drivers/net/ethernet/wangxun/libwx/wx_lib.h | 1 + drivers/net/ethernet/wangxun/libwx/wx_type.h | 1 + .../net/ethernet/wangxun/ngbe/ngbe_ethtool.c | 4 + .../ethernet/wangxun/txgbe/txgbe_ethtool.c | 4 + 7 files changed, 120 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c index 77da6111fbce..ccc3f1697a76 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c @@ -8,6 +8,7 @@ #include "wx_type.h" #include "wx_ethtool.h" #include "wx_hw.h" +#include "wx_lib.h" struct wx_stats { char stat_string[ETH_GSTRING_LEN]; @@ -247,3 +248,103 @@ void wx_get_ringparam(struct net_device *netdev, ring->rx_jumbo_pending = 0; } EXPORT_SYMBOL(wx_get_ringparam); + +int wx_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct wx *wx = netdev_priv(netdev); + + ec->tx_max_coalesced_frames_irq = wx->tx_work_limit; + /* only valid if in constant ITR mode */ + if (wx->rx_itr_setting <= 1) + ec->rx_coalesce_usecs = wx->rx_itr_setting; + else + ec->rx_coalesce_usecs = wx->rx_itr_setting >> 2; + + /* if in mixed tx/rx queues per vector mode, report only rx settings */ + if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) + return 0; + + /* only valid if in constant ITR mode */ + if (wx->tx_itr_setting <= 1) + ec->tx_coalesce_usecs = wx->tx_itr_setting; + else + ec->tx_coalesce_usecs = wx->tx_itr_setting >> 2; + + return 0; +} +EXPORT_SYMBOL(wx_get_coalesce); + +int wx_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct wx *wx = netdev_priv(netdev); + u16 tx_itr_param, rx_itr_param; + struct wx_q_vector *q_vector; + u16 max_eitr; + int i; + + if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) { + /* reject Tx specific changes in case of mixed RxTx vectors */ + if (ec->tx_coalesce_usecs) + return -EOPNOTSUPP; + } + + if (ec->tx_max_coalesced_frames_irq) + wx->tx_work_limit = ec->tx_max_coalesced_frames_irq; + + if (wx->mac.type == wx_mac_sp) + max_eitr = WX_SP_MAX_EITR; + else + max_eitr = WX_EM_MAX_EITR; + + if ((ec->rx_coalesce_usecs > (max_eitr >> 2)) || + (ec->tx_coalesce_usecs > (max_eitr >> 2))) + return -EINVAL; + + if (ec->rx_coalesce_usecs > 1) + wx->rx_itr_setting = ec->rx_coalesce_usecs << 2; + else + wx->rx_itr_setting = ec->rx_coalesce_usecs; + + if (wx->rx_itr_setting == 1) + rx_itr_param = WX_20K_ITR; + else + rx_itr_param = wx->rx_itr_setting; + + if (ec->tx_coalesce_usecs > 1) + wx->tx_itr_setting = ec->tx_coalesce_usecs << 2; + else + wx->tx_itr_setting = ec->tx_coalesce_usecs; + + if (wx->tx_itr_setting == 1) { + if (wx->mac.type == wx_mac_sp) + tx_itr_param = WX_12K_ITR; + else + tx_itr_param = WX_20K_ITR; + } else { + tx_itr_param = wx->tx_itr_setting; + } + + /* mixed Rx/Tx */ + if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) + wx->tx_itr_setting = wx->rx_itr_setting; + + for (i = 0; i < wx->num_q_vectors; i++) { + q_vector = wx->q_vector[i]; + if (q_vector->tx.count && !q_vector->rx.count) + /* tx only */ + q_vector->itr = tx_itr_param; + else + /* rx only or mixed */ + q_vector->itr = rx_itr_param; + wx_write_eitr(q_vector); + } + + return 0; +} +EXPORT_SYMBOL(wx_set_coalesce); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h index 7651ec4b7dd9..3cd0495a6fbb 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h @@ -26,4 +26,12 @@ void wx_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack); +int wx_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack); +int wx_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack); #endif /* _WX_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index d5d4ff0c9450..0587d2d49c86 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -2083,7 +2083,7 @@ static void wx_set_ivar(struct wx *wx, s8 direction, * when it needs to update EITR registers at runtime. Hardware * specific quirks/differences are taken care of here. */ -static void wx_write_eitr(struct wx_q_vector *q_vector) +void wx_write_eitr(struct wx_q_vector *q_vector) { struct wx *wx = q_vector->wx; int v_idx = q_vector->v_idx; diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_lib.h index af1381c13d9e..ec909e876720 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.h @@ -21,6 +21,7 @@ void wx_free_irq(struct wx *wx); int wx_setup_isb_resources(struct wx *wx); void wx_free_isb_resources(struct wx *wx); u32 wx_misc_isb(struct wx *wx, enum wx_isb_idx idx); +void wx_write_eitr(struct wx_q_vector *q_vector); void wx_configure_vectors(struct wx *wx); void wx_clean_all_rx_rings(struct wx *wx); void wx_clean_all_tx_rings(struct wx *wx); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 24588bc1eb57..17cdffe388d0 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -315,6 +315,7 @@ enum WX_MSCA_CMD_value { #define WX_PX_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ #define WX_7K_ITR 595 #define WX_12K_ITR 336 +#define WX_20K_ITR 200 #define WX_SP_MAX_EITR 0x00000FF8U #define WX_EM_MAX_EITR 0x00007FFCU diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c index 52d4167dcabe..81cb1c23fa84 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c @@ -93,6 +93,8 @@ static int ngbe_set_ringparam(struct net_device *netdev, } static const struct ethtool_ops ngbe_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ, .get_drvinfo = wx_get_drvinfo, .get_link = ethtool_op_get_link, .get_link_ksettings = wx_get_link_ksettings, @@ -109,6 +111,8 @@ static const struct ethtool_ops ngbe_ethtool_ops = { .set_pauseparam = wx_set_pauseparam, .get_ringparam = wx_get_ringparam, .set_ringparam = ngbe_set_ringparam, + .get_coalesce = wx_get_coalesce, + .set_coalesce = wx_set_coalesce, }; void ngbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index bd817248a831..9a6856cca411 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -59,6 +59,8 @@ static int txgbe_set_ringparam(struct net_device *netdev, } static const struct ethtool_ops txgbe_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ, .get_drvinfo = wx_get_drvinfo, .nway_reset = wx_nway_reset, .get_link = ethtool_op_get_link, @@ -73,6 +75,8 @@ static const struct ethtool_ops txgbe_ethtool_ops = { .set_pauseparam = wx_set_pauseparam, .get_ringparam = wx_get_ringparam, .set_ringparam = txgbe_set_ringparam, + .get_coalesce = wx_get_coalesce, + .set_coalesce = wx_set_coalesce, }; void txgbe_set_ethtool_ops(struct net_device *netdev) -- Gitee From aa902beb50d6ca0de54e7c1fc723c20ea58bf545 Mon Sep 17 00:00:00 2001 From: Jiawen Wu Date: Wed, 3 Jan 2024 10:08:54 +0800 Subject: [PATCH 1345/2138] net: wangxun: add ethtool_ops for msglevel ANBZ: #8484 commit b746dc6bdde5a9a03309f208733a08665d4a0cb4 Add support to get and set msglevel for driver txgbe and ngbe. Signed-off-by: Jiawen Wu Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller Link: https://lore.kernel.org/all/20240103020854.1656604-9-jiawenwu@trustnetic.com Signed-off-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/2984 --- drivers/net/ethernet/wangxun/libwx/wx_ethtool.c | 16 ++++++++++++++++ drivers/net/ethernet/wangxun/libwx/wx_ethtool.h | 2 ++ drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c | 2 ++ .../net/ethernet/wangxun/txgbe/txgbe_ethtool.c | 2 ++ 4 files changed, 22 insertions(+) diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c index ccc3f1697a76..f3c7e19dff5c 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c @@ -348,3 +348,19 @@ int wx_set_coalesce(struct net_device *netdev, return 0; } EXPORT_SYMBOL(wx_set_coalesce); + +u32 wx_get_msglevel(struct net_device *netdev) +{ + struct wx *wx = netdev_priv(netdev); + + return wx->msg_enable; +} +EXPORT_SYMBOL(wx_get_msglevel); + +void wx_set_msglevel(struct net_device *netdev, u32 data) +{ + struct wx *wx = netdev_priv(netdev); + + wx->msg_enable = data; +} +EXPORT_SYMBOL(wx_set_msglevel); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h index 3cd0495a6fbb..d79157532d3d 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h @@ -34,4 +34,6 @@ int wx_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack); +u32 wx_get_msglevel(struct net_device *netdev); +void wx_set_msglevel(struct net_device *netdev, u32 data); #endif /* _WX_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c index 81cb1c23fa84..5800bd8c8696 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c @@ -113,6 +113,8 @@ static const struct ethtool_ops ngbe_ethtool_ops = { .set_ringparam = ngbe_set_ringparam, .get_coalesce = wx_get_coalesce, .set_coalesce = wx_set_coalesce, + .get_msglevel = wx_get_msglevel, + .set_msglevel = wx_set_msglevel, }; void ngbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index 9a6856cca411..fa83cac320d3 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -77,6 +77,8 @@ static const struct ethtool_ops txgbe_ethtool_ops = { .set_ringparam = txgbe_set_ringparam, .get_coalesce = wx_get_coalesce, .set_coalesce = wx_set_coalesce, + .get_msglevel = wx_get_msglevel, + .set_msglevel = wx_set_msglevel, }; void txgbe_set_ethtool_ops(struct net_device *netdev) -- Gitee From 29f6154be4d7b02f09eefa8e42819bf23737d7c2 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 11 Jan 2024 11:33:11 -0800 Subject: [PATCH 1346/2138] net: fill in MODULE_DESCRIPTION()s for wx_lib ANBZ: #8484 commit 907ee6681788556b9ade3ad0a1f6f4aea192399c stream. W=1 builds now warn if module is built without a MODULE_DESCRIPTION(). Add a description to Wangxun's common code lib. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller Link: https://lore.kernel.org/all/20240111193311.4152859-1-kuba@kernel.org Signed-off-by: Duanqiang Wen Link: https://gitee.com/anolis/cloud-kernel/pulls/2984 --- drivers/net/ethernet/wangxun/libwx/wx_lib.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index 0587d2d49c86..9ebe5493b7f2 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -2740,4 +2740,5 @@ void wx_set_ring(struct wx *wx, u32 new_tx_count, } EXPORT_SYMBOL(wx_set_ring); +MODULE_DESCRIPTION("Common library for Wangxun(R) Ethernet drivers."); MODULE_LICENSE("GPL"); -- Gitee From 9fdcb45941ef90ca5f023a7d00800e89de8a81e4 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Fri, 29 Sep 2023 20:25:38 -0700 Subject: [PATCH 1347/2138] shmem: shrink shmem_inode_info: dir_offsets in a union ANBZ: #9728 commit ee615d4585cfc305bf6c218a62123c3051f8b4a3 upstream. Patch series "shmem,tmpfs: general maintenance". Mostly just cosmetic mods in mm/shmem.c, but the last two enforcing the "size=" limit better. 8/8 goes into percpu counter territory, and could stand alone. This patch (of 8): Shave 32 bytes off (the 64-bit) shmem_inode_info. There was a 4-byte pahole after stop_eviction, better filled by fsflags. And the 24-byte dir_offsets can only be used by directories, whereas shrinklist and swaplist only by shmem_mapping() inodes (regular files or long symlinks): so put those into a union. No change in mm/shmem.c is required for this. Link: https://lkml.kernel.org/r/c7441dc6-f3bb-dd60-c670-9f5cbd9f266@google.com Link: https://lkml.kernel.org/r/86ebb4b-c571-b9e8-27f5-cb82ec50357e@google.com Signed-off-by: Hugh Dickins Reviewed-by: Chuck Lever Reviewed-by: Jan Kara Cc: Axel Rasmussen Cc: Carlos Maiolino Cc: Christian Brauner Cc: Johannes Weiner Cc: Matthew Wilcox (Oracle) Cc: Darrick J. Wong Cc: Dave Chinner Cc: Tim Chen Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Acked-by: Joseph Qi Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3858 --- include/linux/shmem_fs.h | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 134c686c8676..f0c6bf982832 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -23,18 +23,22 @@ struct shmem_inode_info { unsigned long flags; unsigned long alloced; /* data pages alloced to file */ unsigned long swapped; /* subtotal assigned to swap */ - pgoff_t fallocend; /* highest fallocate endindex */ - struct list_head shrinklist; /* shrinkable hpage inodes */ - struct list_head swaplist; /* chain of maybes on swap */ + union { + struct offset_ctx dir_offsets; /* stable directory offsets */ + struct { + struct list_head shrinklist; /* shrinkable hpage inodes */ + struct list_head swaplist; /* chain of maybes on swap */ + }; + }; + struct timespec64 i_crtime; /* file creation time */ struct shared_policy policy; /* NUMA memory alloc policy */ struct simple_xattrs xattrs; /* list of xattrs */ + pgoff_t fallocend; /* highest fallocate endindex */ + unsigned int fsflags; /* for FS_IOC_[SG]ETFLAGS */ atomic_t stop_eviction; /* hold when working on inode */ - struct timespec64 i_crtime; /* file creation time */ - unsigned int fsflags; /* flags for FS_IOC_[SG]ETFLAGS */ #ifdef CONFIG_TMPFS_QUOTA struct dquot __rcu *i_dquot[MAXQUOTAS]; #endif - struct offset_ctx dir_offsets; /* stable entry offsets */ struct inode vfs_inode; }; -- Gitee From 50c48c284fff1fd1d4c4c2fb204684079bf8f129 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Fri, 29 Sep 2023 20:26:53 -0700 Subject: [PATCH 1348/2138] shmem: remove vma arg from shmem_get_folio_gfp() ANBZ: #9728 commit e3e1a5067fd2f1b3f4f7c651f5b33082962d1aa1 upstream. The vma is already there in vmf->vma, so no need for a separate arg. Link: https://lkml.kernel.org/r/d9ce6f65-a2ed-48f4-4299-fdb0544875c5@google.com Signed-off-by: Hugh Dickins Reviewed-by: Jan Kara Cc: Axel Rasmussen Cc: Carlos Maiolino Cc: Christian Brauner Cc: Chuck Lever Cc: Darrick J. Wong Cc: Dave Chinner Cc: Johannes Weiner Cc: Matthew Wilcox (Oracle) Cc: Tim Chen Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Acked-by: Joseph Qi Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3858 --- mm/shmem.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index db7dd45c9181..86837950793d 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1943,14 +1943,13 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, * vm. If we swap it in we mark it dirty since we also free the swap * entry since a page cannot live in both the swap and page cache. * - * vma, vmf, and fault_type are only supplied by shmem_fault: - * otherwise they are NULL. + * vmf and fault_type are only supplied by shmem_fault: otherwise they are NULL. */ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, struct folio **foliop, enum sgp_type sgp, gfp_t gfp, - struct vm_area_struct *vma, struct vm_fault *vmf, - vm_fault_t *fault_type) + struct vm_fault *vmf, vm_fault_t *fault_type) { + struct vm_area_struct *vma = vmf ? vmf->vma : NULL; struct address_space *mapping = inode->i_mapping; struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_sb_info *sbinfo; @@ -2163,7 +2162,7 @@ int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop, enum sgp_type sgp) { return shmem_get_folio_gfp(inode, index, foliop, sgp, - mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL); + mapping_gfp_mask(inode->i_mapping), NULL, NULL); } /* @@ -2247,7 +2246,7 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf) } err = shmem_get_folio_gfp(inode, vmf->pgoff, &folio, SGP_CACHE, - gfp, vma, vmf, &ret); + gfp, vmf, &ret); if (err) return vmf_error(err); if (folio) @@ -4916,7 +4915,7 @@ struct folio *shmem_read_folio_gfp(struct address_space *mapping, BUG_ON(!shmem_mapping(mapping)); error = shmem_get_folio_gfp(inode, index, &folio, SGP_CACHE, - gfp, NULL, NULL, NULL); + gfp, NULL, NULL); if (error) return ERR_PTR(error); -- Gitee From b30fd71d76ad6a8b92df10553d6190356defdd2f Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Fri, 29 Sep 2023 20:27:53 -0700 Subject: [PATCH 1349/2138] shmem: factor shmem_falloc_wait() out of shmem_fault() ANBZ: #9728 commit f0a9ad1d4d9ba3c694bca91d8d67be9a4a33b902 upstream. That Trinity livelock shmem_falloc avoidance block is unlikely, and a distraction from the proper business of shmem_fault(): separate it out. (This used to help compilers save stack on the fault path too, but both gcc and clang nowadays seem to make better choices anyway.) Link: https://lkml.kernel.org/r/6fe379a4-6176-9225-9263-fe60d2633c0@google.com Signed-off-by: Hugh Dickins Reviewed-by: Jan Kara Cc: Axel Rasmussen Cc: Carlos Maiolino Cc: Christian Brauner Cc: Chuck Lever Cc: Darrick J. Wong Cc: Dave Chinner Cc: Johannes Weiner Cc: Matthew Wilcox (Oracle) Cc: Tim Chen Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Acked-by: Joseph Qi Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3858 --- mm/shmem.c | 126 +++++++++++++++++++++++++++++------------------------ 1 file changed, 69 insertions(+), 57 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index 86837950793d..a70dfc034f21 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2170,87 +2170,99 @@ int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop, * entry unconditionally - even if something else had already woken the * target. */ -static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) +static int synchronous_wake_function(wait_queue_entry_t *wait, + unsigned int mode, int sync, void *key) { int ret = default_wake_function(wait, mode, sync, key); list_del_init(&wait->entry); return ret; } +/* + * Trinity finds that probing a hole which tmpfs is punching can + * prevent the hole-punch from ever completing: which in turn + * locks writers out with its hold on i_rwsem. So refrain from + * faulting pages into the hole while it's being punched. Although + * shmem_undo_range() does remove the additions, it may be unable to + * keep up, as each new page needs its own unmap_mapping_range() call, + * and the i_mmap tree grows ever slower to scan if new vmas are added. + * + * It does not matter if we sometimes reach this check just before the + * hole-punch begins, so that one fault then races with the punch: + * we just need to make racing faults a rare case. + * + * The implementation below would be much simpler if we just used a + * standard mutex or completion: but we cannot take i_rwsem in fault, + * and bloating every shmem inode for this unlikely case would be sad. + */ +static vm_fault_t shmem_falloc_wait(struct vm_fault *vmf, struct inode *inode) +{ + struct shmem_falloc *shmem_falloc; + struct file *fpin = NULL; + vm_fault_t ret = 0; + + spin_lock(&inode->i_lock); + shmem_falloc = inode->i_private; + if (shmem_falloc && + shmem_falloc->waitq && + vmf->pgoff >= shmem_falloc->start && + vmf->pgoff < shmem_falloc->next) { + wait_queue_head_t *shmem_falloc_waitq; + DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function); + + ret = VM_FAULT_NOPAGE; + fpin = maybe_unlock_mmap_for_io(vmf, NULL); + shmem_falloc_waitq = shmem_falloc->waitq; + prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait, + TASK_UNINTERRUPTIBLE); + spin_unlock(&inode->i_lock); + schedule(); + + /* + * shmem_falloc_waitq points into the shmem_fallocate() + * stack of the hole-punching task: shmem_falloc_waitq + * is usually invalid by the time we reach here, but + * finish_wait() does not dereference it in that case; + * though i_lock needed lest racing with wake_up_all(). + */ + spin_lock(&inode->i_lock); + finish_wait(shmem_falloc_waitq, &shmem_fault_wait); + } + spin_unlock(&inode->i_lock); + if (fpin) { + fput(fpin); + ret = VM_FAULT_RETRY; + } + return ret; +} + static vm_fault_t shmem_fault(struct vm_fault *vmf) { - struct vm_area_struct *vma = vmf->vma; - struct inode *inode = file_inode(vma->vm_file); + struct inode *inode = file_inode(vmf->vma->vm_file); gfp_t gfp = mapping_gfp_mask(inode->i_mapping); struct folio *folio = NULL; + vm_fault_t ret = 0; int err; - vm_fault_t ret = VM_FAULT_LOCKED; /* * Trinity finds that probing a hole which tmpfs is punching can - * prevent the hole-punch from ever completing: which in turn - * locks writers out with its hold on i_rwsem. So refrain from - * faulting pages into the hole while it's being punched. Although - * shmem_undo_range() does remove the additions, it may be unable to - * keep up, as each new page needs its own unmap_mapping_range() call, - * and the i_mmap tree grows ever slower to scan if new vmas are added. - * - * It does not matter if we sometimes reach this check just before the - * hole-punch begins, so that one fault then races with the punch: - * we just need to make racing faults a rare case. - * - * The implementation below would be much simpler if we just used a - * standard mutex or completion: but we cannot take i_rwsem in fault, - * and bloating every shmem inode for this unlikely case would be sad. + * prevent the hole-punch from ever completing: noted in i_private. */ if (unlikely(inode->i_private)) { - struct shmem_falloc *shmem_falloc; - - spin_lock(&inode->i_lock); - shmem_falloc = inode->i_private; - if (shmem_falloc && - shmem_falloc->waitq && - vmf->pgoff >= shmem_falloc->start && - vmf->pgoff < shmem_falloc->next) { - struct file *fpin; - wait_queue_head_t *shmem_falloc_waitq; - DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function); - - ret = VM_FAULT_NOPAGE; - fpin = maybe_unlock_mmap_for_io(vmf, NULL); - if (fpin) - ret = VM_FAULT_RETRY; - - shmem_falloc_waitq = shmem_falloc->waitq; - prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait, - TASK_UNINTERRUPTIBLE); - spin_unlock(&inode->i_lock); - schedule(); - - /* - * shmem_falloc_waitq points into the shmem_fallocate() - * stack of the hole-punching task: shmem_falloc_waitq - * is usually invalid by the time we reach here, but - * finish_wait() does not dereference it in that case; - * though i_lock needed lest racing with wake_up_all(). - */ - spin_lock(&inode->i_lock); - finish_wait(shmem_falloc_waitq, &shmem_fault_wait); - spin_unlock(&inode->i_lock); - - if (fpin) - fput(fpin); + ret = shmem_falloc_wait(vmf, inode); + if (ret) return ret; - } - spin_unlock(&inode->i_lock); } + WARN_ON_ONCE(vmf->page != NULL); err = shmem_get_folio_gfp(inode, vmf->pgoff, &folio, SGP_CACHE, gfp, vmf, &ret); if (err) return vmf_error(err); - if (folio) + if (folio) { vmf->page = folio_file_page(folio, vmf->pgoff); + ret |= VM_FAULT_LOCKED; + } return ret; } -- Gitee From b12340ed027add43507b6bffcf6079dd017ffc0e Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Fri, 29 Sep 2023 20:28:50 -0700 Subject: [PATCH 1350/2138] shmem: trivial tidyups, removing extra blank lines, etc ANBZ: #9728 commit 9be7d5b06648b808989e99c5d0bea1be47c5a384 upstream. Mostly removing a few superfluous blank lines, joining short arglines, imposing some 80-column observance, correcting a couple of comments. None of it more interesting than deleting a repeated INIT_LIST_HEAD(). Link: https://lkml.kernel.org/r/b3983d28-5d3f-8649-36af-b819285d7a9e@google.com Signed-off-by: Hugh Dickins Reviewed-by: Jan Kara Cc: Axel Rasmussen Cc: Carlos Maiolino Cc: Christian Brauner Cc: Chuck Lever Cc: Darrick J. Wong Cc: Dave Chinner Cc: Johannes Weiner Cc: Matthew Wilcox (Oracle) Cc: Tim Chen Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Acked-by: Joseph Qi Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3858 --- mm/shmem.c | 56 ++++++++++++++++++++---------------------------------- 1 file changed, 21 insertions(+), 35 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index a70dfc034f21..cac807e77dbc 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -761,7 +761,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ /* - * Like filemap_add_folio, but error if expected item has gone. + * Somewhat like filemap_add_folio, but error if expected item has gone. */ static int shmem_add_to_page_cache(struct folio *folio, struct address_space *mapping, @@ -830,7 +830,7 @@ static int shmem_add_to_page_cache(struct folio *folio, } /* - * Like delete_from_page_cache, but substitutes swap for @folio. + * Somewhat like filemap_remove_folio, but substitutes swap for @folio. */ static void shmem_delete_from_page_cache(struct folio *folio, void *radswap) { @@ -892,7 +892,6 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping, cond_resched_rcu(); } } - rcu_read_unlock(); return swapped << PAGE_SHIFT; @@ -1235,7 +1234,6 @@ static int shmem_setattr(struct mnt_idmap *idmap, if (i_uid_needs_update(idmap, attr, inode) || i_gid_needs_update(idmap, attr, inode)) { error = dquot_transfer(idmap, inode, attr); - if (error) return error; } @@ -2475,7 +2473,6 @@ static struct inode *__shmem_get_inode(struct mnt_idmap *idmap, if (err) return ERR_PTR(err); - inode = new_inode(sb); if (!inode) { shmem_free_inode(sb, 0); @@ -2500,11 +2497,10 @@ static struct inode *__shmem_get_inode(struct mnt_idmap *idmap, shmem_set_inode_flags(inode, info->fsflags); INIT_LIST_HEAD(&info->shrinklist); INIT_LIST_HEAD(&info->swaplist); - INIT_LIST_HEAD(&info->swaplist); - if (sbinfo->noswap) - mapping_set_unevictable(inode->i_mapping); simple_xattrs_init(&info->xattrs); cache_no_acl(inode); + if (sbinfo->noswap) + mapping_set_unevictable(inode->i_mapping); mapping_set_large_folios(inode->i_mapping); switch (mode & S_IFMT) { @@ -2716,7 +2712,6 @@ shmem_write_begin(struct file *file, struct address_space *mapping, } ret = shmem_get_folio(inode, index, &folio, SGP_WRITE); - if (ret) return ret; @@ -3248,8 +3243,7 @@ shmem_mknod(struct mnt_idmap *idmap, struct inode *dir, error = simple_acl_create(dir, inode); if (error) goto out_iput; - error = security_inode_init_security(inode, dir, - &dentry->d_name, + error = security_inode_init_security(inode, dir, &dentry->d_name, shmem_initxattrs, NULL); if (error && error != -EOPNOTSUPP) goto out_iput; @@ -3278,14 +3272,11 @@ shmem_tmpfile(struct mnt_idmap *idmap, struct inode *dir, int error; inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, 0, VM_NORESERVE); - if (IS_ERR(inode)) { error = PTR_ERR(inode); goto err_out; } - - error = security_inode_init_security(inode, dir, - NULL, + error = security_inode_init_security(inode, dir, NULL, shmem_initxattrs, NULL); if (error && error != -EOPNOTSUPP) goto out_iput; @@ -3322,7 +3313,8 @@ static int shmem_create(struct mnt_idmap *idmap, struct inode *dir, /* * Link a file.. */ -static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) +static int shmem_link(struct dentry *old_dentry, struct inode *dir, + struct dentry *dentry) { struct inode *inode = d_inode(old_dentry); int ret = 0; @@ -3353,7 +3345,7 @@ static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentr inode_inc_iversion(dir); inc_nlink(inode); ihold(inode); /* New dentry reference */ - dget(dentry); /* Extra pinning count for the created dentry */ + dget(dentry); /* Extra pinning count for the created dentry */ d_instantiate(dentry, inode); out: return ret; @@ -3373,7 +3365,7 @@ static int shmem_unlink(struct inode *dir, struct dentry *dentry) inode_set_ctime_current(inode)); inode_inc_iversion(dir); drop_nlink(inode); - dput(dentry); /* Undo the count from "create" - this does all the work */ + dput(dentry); /* Undo the count from "create" - does all the work */ return 0; } @@ -3483,7 +3475,6 @@ static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir, inode = shmem_get_inode(idmap, dir->i_sb, dir, S_IFLNK | 0777, 0, VM_NORESERVE); - if (IS_ERR(inode)) return PTR_ERR(inode); @@ -3537,8 +3528,7 @@ static void shmem_put_link(void *arg) folio_put(arg); } -static const char *shmem_get_link(struct dentry *dentry, - struct inode *inode, +static const char *shmem_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *done) { struct folio *folio = NULL; @@ -3612,8 +3602,7 @@ static int shmem_fileattr_set(struct mnt_idmap *idmap, * Callback for security_inode_init_security() for acquiring xattrs. */ static int shmem_initxattrs(struct inode *inode, - const struct xattr *xattr_array, - void *fs_info) + const struct xattr *xattr_array, void *fs_info) { struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); @@ -3797,7 +3786,6 @@ static struct dentry *shmem_find_alias(struct inode *inode) return alias ?: d_find_any_alias(inode); } - static struct dentry *shmem_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { @@ -4381,8 +4369,8 @@ static int shmem_fill_super(struct super_block *sb, struct fs_context *fc) } #endif /* CONFIG_TMPFS_QUOTA */ - inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL, S_IFDIR | sbinfo->mode, 0, - VM_NORESERVE); + inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL, + S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); if (IS_ERR(inode)) { error = PTR_ERR(inode); goto failed; @@ -4685,11 +4673,9 @@ static ssize_t shmem_enabled_show(struct kobject *kobj, for (i = 0; i < ARRAY_SIZE(values); i++) { len += sysfs_emit_at(buf, len, - shmem_huge == values[i] ? "%s[%s]" : "%s%s", - i ? " " : "", - shmem_format_huge(values[i])); + shmem_huge == values[i] ? "%s[%s]" : "%s%s", + i ? " " : "", shmem_format_huge(values[i])); } - len += sysfs_emit_at(buf, len, "\n"); return len; @@ -4786,8 +4772,9 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range); #define shmem_acct_size(flags, size) 0 #define shmem_unacct_size(flags, size) do {} while (0) -static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap, struct super_block *sb, struct inode *dir, - umode_t mode, dev_t dev, unsigned long flags) +static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap, + struct super_block *sb, struct inode *dir, + umode_t mode, dev_t dev, unsigned long flags) { struct inode *inode = ramfs_get_inode(sb, dir, mode, dev); return inode ? inode : ERR_PTR(-ENOSPC); @@ -4797,8 +4784,8 @@ static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap, struct supe /* common code */ -static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size, - unsigned long flags, unsigned int i_flags) +static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, + loff_t size, unsigned long flags, unsigned int i_flags) { struct inode *inode; struct file *res; @@ -4817,7 +4804,6 @@ static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, l inode = shmem_get_inode(&nop_mnt_idmap, mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0, flags); - if (IS_ERR(inode)) { shmem_unacct_size(flags, size); return ERR_CAST(inode); -- Gitee From c3844aec90e8a9a243753471e6f0d773bcb456ae Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Fri, 29 Sep 2023 20:30:03 -0700 Subject: [PATCH 1351/2138] shmem: shmem_acct_blocks() and shmem_inode_acct_blocks() ANBZ: #9728 commit 4199f51a7eb2054d68964efbd8d39c68053a8714 upstream. By historical accident, shmem_acct_block() and shmem_inode_acct_block() were never pluralized when the pages argument was added, despite their complements being shmem_unacct_blocks() and shmem_inode_unacct_blocks() all along. It has been an irritation: fix their naming at last. Link: https://lkml.kernel.org/r/9124094-e4ab-8be7-ef80-9a87bdc2e4fc@google.com Signed-off-by: Hugh Dickins Reviewed-by: Jan Kara Cc: Axel Rasmussen Cc: Carlos Maiolino Cc: Christian Brauner Cc: Chuck Lever Cc: Darrick J. Wong Cc: Dave Chinner Cc: Johannes Weiner Cc: Matthew Wilcox (Oracle) Cc: Tim Chen Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Acked-by: Joseph Qi Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3858 --- mm/shmem.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index cac807e77dbc..0baadb70745e 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -189,10 +189,10 @@ static inline int shmem_reacct_size(unsigned long flags, /* * ... whereas tmpfs objects are accounted incrementally as * pages are allocated, in order to allow large sparse files. - * shmem_get_folio reports shmem_acct_block failure as -ENOSPC not -ENOMEM, + * shmem_get_folio reports shmem_acct_blocks failure as -ENOSPC not -ENOMEM, * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. */ -static inline int shmem_acct_block(unsigned long flags, long pages) +static inline int shmem_acct_blocks(unsigned long flags, long pages) { if (!(flags & VM_NORESERVE)) return 0; @@ -207,13 +207,13 @@ static inline void shmem_unacct_blocks(unsigned long flags, long pages) vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE)); } -static int shmem_inode_acct_block(struct inode *inode, long pages) +static int shmem_inode_acct_blocks(struct inode *inode, long pages) { struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); int err = -ENOSPC; - if (shmem_acct_block(info->flags, pages)) + if (shmem_acct_blocks(info->flags, pages)) return err; might_sleep(); /* when quotas */ @@ -447,7 +447,7 @@ bool shmem_charge(struct inode *inode, long pages) { struct address_space *mapping = inode->i_mapping; - if (shmem_inode_acct_block(inode, pages)) + if (shmem_inode_acct_blocks(inode, pages)) return false; /* nrpages adjustment first, then shmem_recalc_inode() when balanced */ @@ -1693,7 +1693,7 @@ static struct folio *shmem_alloc_and_acct_folio(gfp_t gfp, struct inode *inode, huge = false; nr = huge ? HPAGE_PMD_NR : 1; - err = shmem_inode_acct_block(inode, nr); + err = shmem_inode_acct_blocks(inode, nr); if (err) goto failed; @@ -2591,7 +2591,7 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pmd, int ret; pgoff_t max_off; - if (shmem_inode_acct_block(inode, 1)) { + if (shmem_inode_acct_blocks(inode, 1)) { /* * We may have got a page, returned -ENOENT triggering a retry, * and now we find ourselves with -ENOMEM. Release the page, to -- Gitee From 7460fc6c524a70640af417390a5f4f3ef578cbf2 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Fri, 29 Sep 2023 20:31:27 -0700 Subject: [PATCH 1352/2138] shmem: move memcg charge out of shmem_add_to_page_cache() ANBZ: #9728 commit 054a9f7ccd0a60607fb9bbe1e06ca671494971bf upstream. Extract shmem's memcg charging out of shmem_add_to_page_cache(): it's misleading done there, because many calls are dealing with a swapcache page, whose memcg is nowadays always remembered while swapped out, then the charge re-levied when it's brought back into swapcache. Temporarily move it back up to the shmem_get_folio_gfp() level, where the memcg was charged before v5.8; but the next commit goes on to move it back down to a new home. In making this change, it becomes clear that shmem_swapin_folio() does not need to know the vma, just the fault mm (if any): call it fault_mm rather than charge_mm - let mem_cgroup_charge() decide whom to charge. Link: https://lkml.kernel.org/r/4b2143c5-bf32-64f0-841-81a81158dac@google.com Signed-off-by: Hugh Dickins Reviewed-by: Jan Kara Cc: Axel Rasmussen Cc: Carlos Maiolino Cc: Christian Brauner Cc: Chuck Lever Cc: Darrick J. Wong Cc: Dave Chinner Cc: Johannes Weiner Cc: Matthew Wilcox (Oracle) Cc: Tim Chen Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Acked-by: Joseph Qi Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3858 --- mm/shmem.c | 68 +++++++++++++++++++++++------------------------------- 1 file changed, 29 insertions(+), 39 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index 0baadb70745e..cd5805ee3f2e 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -146,9 +146,8 @@ static unsigned long shmem_default_max_inodes(void) #endif static int shmem_swapin_folio(struct inode *inode, pgoff_t index, - struct folio **foliop, enum sgp_type sgp, - gfp_t gfp, struct vm_area_struct *vma, - vm_fault_t *fault_type); + struct folio **foliop, enum sgp_type sgp, gfp_t gfp, + struct mm_struct *fault_mm, vm_fault_t *fault_type); static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) { @@ -765,12 +764,10 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, */ static int shmem_add_to_page_cache(struct folio *folio, struct address_space *mapping, - pgoff_t index, void *expected, gfp_t gfp, - struct mm_struct *charge_mm) + pgoff_t index, void *expected, gfp_t gfp) { XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio)); long nr = folio_nr_pages(folio); - int error; VM_BUG_ON_FOLIO(index != round_down(index, nr), folio); VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); @@ -781,16 +778,7 @@ static int shmem_add_to_page_cache(struct folio *folio, folio->mapping = mapping; folio->index = index; - if (!folio_test_swapcache(folio)) { - error = mem_cgroup_charge(folio, charge_mm, gfp); - if (error) { - if (folio_test_pmd_mappable(folio)) { - count_vm_event(THP_FILE_FALLBACK); - count_vm_event(THP_FILE_FALLBACK_CHARGE); - } - goto error; - } - } + gfp &= GFP_RECLAIM_MASK; folio_throttle_swaprate(folio, gfp); do { @@ -818,15 +806,12 @@ static int shmem_add_to_page_cache(struct folio *folio, } while (xas_nomem(&xas, gfp)); if (xas_error(&xas)) { - error = xas_error(&xas); - goto error; + folio->mapping = NULL; + folio_ref_sub(folio, nr); + return xas_error(&xas); } return 0; -error: - folio->mapping = NULL; - folio_ref_sub(folio, nr); - return error; } /* @@ -1346,10 +1331,8 @@ static int shmem_unuse_swap_entries(struct inode *inode, if (!xa_is_value(folio)) continue; - error = shmem_swapin_folio(inode, indices[i], - &folio, SGP_CACHE, - mapping_gfp_mask(mapping), - NULL, NULL); + error = shmem_swapin_folio(inode, indices[i], &folio, SGP_CACHE, + mapping_gfp_mask(mapping), NULL, NULL); if (error == 0) { folio_unlock(folio); folio_put(folio); @@ -1832,12 +1815,11 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index, */ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, struct folio **foliop, enum sgp_type sgp, - gfp_t gfp, struct vm_area_struct *vma, + gfp_t gfp, struct mm_struct *fault_mm, vm_fault_t *fault_type) { struct address_space *mapping = inode->i_mapping; struct shmem_inode_info *info = SHMEM_I(inode); - struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL; struct swap_info_struct *si; struct folio *folio = NULL; swp_entry_t swap; @@ -1865,7 +1847,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, if (fault_type) { *fault_type |= VM_FAULT_MAJOR; count_vm_event(PGMAJFAULT); - count_memcg_event_mm(charge_mm, PGMAJFAULT); + count_memcg_event_mm(fault_mm, PGMAJFAULT); } /* Here we actually start the io */ folio = shmem_swapin(swap, gfp, info, index); @@ -1902,8 +1884,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, } error = shmem_add_to_page_cache(folio, mapping, index, - swp_to_radix_entry(swap), gfp, - charge_mm); + swp_to_radix_entry(swap), gfp); if (error) goto failed; @@ -1951,7 +1932,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, struct address_space *mapping = inode->i_mapping; struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_sb_info *sbinfo; - struct mm_struct *charge_mm; + struct mm_struct *fault_mm; struct folio *folio; pgoff_t hindex; gfp_t huge_gfp; @@ -1968,7 +1949,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, } sbinfo = SHMEM_SB(inode->i_sb); - charge_mm = vma ? vma->vm_mm : NULL; + fault_mm = vma ? vma->vm_mm : NULL; folio = filemap_get_entry(mapping, index); if (folio && vma && userfaultfd_minor(vma)) { @@ -1980,7 +1961,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, if (xa_is_value(folio)) { error = shmem_swapin_folio(inode, index, &folio, - sgp, gfp, vma, fault_type); + sgp, gfp, fault_mm, fault_type); if (error == -EEXIST) goto repeat; @@ -2066,9 +2047,16 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, if (sgp == SGP_WRITE) __folio_set_referenced(folio); - error = shmem_add_to_page_cache(folio, mapping, hindex, - NULL, gfp & GFP_RECLAIM_MASK, - charge_mm); + error = mem_cgroup_charge(folio, fault_mm, gfp); + if (error) { + if (folio_test_pmd_mappable(folio)) { + count_vm_event(THP_FILE_FALLBACK); + count_vm_event(THP_FILE_FALLBACK_CHARGE); + } + goto unacct; + } + + error = shmem_add_to_page_cache(folio, mapping, hindex, NULL, gfp); if (error) goto unacct; @@ -2663,8 +2651,10 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pmd, if (unlikely(pgoff >= max_off)) goto out_release; - ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, - gfp & GFP_RECLAIM_MASK, dst_vma->vm_mm); + ret = mem_cgroup_charge(folio, dst_vma->vm_mm, gfp); + if (ret) + goto out_release; + ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, gfp); if (ret) goto out_release; -- Gitee From 16d4b0922bc4fba911e41ea105508e8cf0dc75fb Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Fri, 29 Sep 2023 20:32:40 -0700 Subject: [PATCH 1353/2138] shmem: _add_to_page_cache() before shmem_inode_acct_blocks() ANBZ: #9728 commit 3022fd7af9604d44ec43da8a4398872989599b18 upstream. There has been a recurring problem, that when a tmpfs volume is being filled by racing threads, some fail with ENOSPC (or consequent SIGBUS or EFAULT) even though all allocations were within the permitted size. This was a problem since early days, but magnified and complicated by the addition of huge pages. We have often worked around it by adding some slop to the tmpfs size, but it's hard to say how much is needed, and some users prefer not to do that e.g. keeping sparse files in a tightly tailored tmpfs helps to prevent accidental writing to holes. This comes from the allocation sequence: 1. check page cache for existing folio 2. check and reserve from vm_enough_memory 3. check and account from size of tmpfs 4. if huge, check page cache for overlapping folio 5. allocate physical folio, huge or small 6. check and charge from mem cgroup limit 7. add to page cache (but maybe another folio already got in). Concurrent tasks allocating at the same position could deplete the size allowance and fail. Doing vm_enough_memory and size checks before the folio allocation was intentional (to limit the load on the page allocator from this source) and still has some virtue; but memory cgroup never did that, so I think it's better reordered to favour predictable behaviour. 1. check page cache for existing folio 2. if huge, check page cache for overlapping folio 3. allocate physical folio, huge or small 4. check and charge from mem cgroup limit 5. add to page cache (but maybe another folio already got in) 6. check and reserve from vm_enough_memory 7. check and account from size of tmpfs. The folio lock held from allocation onwards ensures that the !uptodate folio cannot be used by others, and can safely be deleted from the cache if checks 6 or 7 subsequently fail (and those waiting on folio lock already check that the folio was not truncated once they get the lock); and the early addition to page cache ensures that racers find it before they try to duplicate the accounting. Seize the opportunity to tidy up shmem_get_folio_gfp()'s ENOSPC retrying, which can be combined inside the new shmem_alloc_and_add_folio(): doing 2 splits twice (once huge, once nonhuge) is not exactly equivalent to trying 5 splits (and giving up early on huge), but let's keep it simple unless more complication proves necessary. Userfaultfd is a foreign country: they do things differently there, and for good reason - to avoid mmap_lock deadlock. Leave ordering in shmem_mfill_atomic_pte() untouched for now, but I would rather like to mesh it better with shmem_get_folio_gfp() in the future. Link: https://lkml.kernel.org/r/22ddd06-d919-33b-1219-56335c1bf28e@google.com Signed-off-by: Hugh Dickins Cc: Axel Rasmussen Cc: Carlos Maiolino Cc: Christian Brauner Cc: Chuck Lever Cc: Darrick J. Wong Cc: Dave Chinner Cc: Jan Kara Cc: Johannes Weiner Cc: Matthew Wilcox (Oracle) Cc: Tim Chen Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Acked-by: Joseph Qi Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3858 --- mm/shmem.c | 229 +++++++++++++++++++++++++++-------------------------- 1 file changed, 118 insertions(+), 111 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index cd5805ee3f2e..aa78b71eba12 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -794,13 +794,11 @@ static int shmem_add_to_page_cache(struct folio *folio, xas_store(&xas, folio); if (xas_error(&xas)) goto unlock; - if (folio_test_pmd_mappable(folio)) { - count_vm_event(THP_FILE_ALLOC); + if (folio_test_pmd_mappable(folio)) __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr); - } - mapping->nrpages += nr; __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr); __lruvec_stat_mod_folio(folio, NR_SHMEM, nr); + mapping->nrpages += nr; unlock: xas_unlock_irq(&xas); } while (xas_nomem(&xas, gfp)); @@ -1634,25 +1632,17 @@ static struct folio *shmem_alloc_hugefolio(gfp_t gfp, struct shmem_inode_info *info, pgoff_t index) { struct vm_area_struct pvma; - struct address_space *mapping = info->vfs_inode.i_mapping; - pgoff_t hindex; struct folio *folio; - hindex = round_down(index, HPAGE_PMD_NR); - if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1, - XA_PRESENT)) - return NULL; - - shmem_pseudo_vma_init(&pvma, info, hindex); + shmem_pseudo_vma_init(&pvma, info, index); folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, &pvma, 0, true); shmem_pseudo_vma_destroy(&pvma); - if (!folio) - count_vm_event(THP_FILE_FALLBACK); + return folio; } static struct folio *shmem_alloc_folio(gfp_t gfp, - struct shmem_inode_info *info, pgoff_t index) + struct shmem_inode_info *info, pgoff_t index) { struct vm_area_struct pvma; struct folio *folio; @@ -1664,36 +1654,101 @@ static struct folio *shmem_alloc_folio(gfp_t gfp, return folio; } -static struct folio *shmem_alloc_and_acct_folio(gfp_t gfp, struct inode *inode, - pgoff_t index, bool huge) +static struct folio *shmem_alloc_and_add_folio(gfp_t gfp, + struct inode *inode, pgoff_t index, + struct mm_struct *fault_mm, bool huge) { + struct address_space *mapping = inode->i_mapping; struct shmem_inode_info *info = SHMEM_I(inode); struct folio *folio; - int nr; - int err; + long pages; + int error; if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) huge = false; - nr = huge ? HPAGE_PMD_NR : 1; - err = shmem_inode_acct_blocks(inode, nr); - if (err) - goto failed; + if (huge) { + pages = HPAGE_PMD_NR; + index = round_down(index, HPAGE_PMD_NR); + + /* + * Check for conflict before waiting on a huge allocation. + * Conflict might be that a huge page has just been allocated + * and added to page cache by a racing thread, or that there + * is already at least one small page in the huge extent. + * Be careful to retry when appropriate, but not forever! + * Elsewhere -EEXIST would be the right code, but not here. + */ + if (xa_find(&mapping->i_pages, &index, + index + HPAGE_PMD_NR - 1, XA_PRESENT)) + return ERR_PTR(-E2BIG); - if (huge) folio = shmem_alloc_hugefolio(gfp, info, index); - else + if (!folio) + count_vm_event(THP_FILE_FALLBACK); + } else { + pages = 1; folio = shmem_alloc_folio(gfp, info, index); - if (folio) { - __folio_set_locked(folio); - __folio_set_swapbacked(folio); - return folio; + } + if (!folio) + return ERR_PTR(-ENOMEM); + + __folio_set_locked(folio); + __folio_set_swapbacked(folio); + + gfp &= GFP_RECLAIM_MASK; + error = mem_cgroup_charge(folio, fault_mm, gfp); + if (error) { + if (xa_find(&mapping->i_pages, &index, + index + pages - 1, XA_PRESENT)) { + error = -EEXIST; + } else if (huge) { + count_vm_event(THP_FILE_FALLBACK); + count_vm_event(THP_FILE_FALLBACK_CHARGE); + } + goto unlock; } - err = -ENOMEM; - shmem_inode_unacct_blocks(inode, nr); -failed: - return ERR_PTR(err); + error = shmem_add_to_page_cache(folio, mapping, index, NULL, gfp); + if (error) + goto unlock; + + error = shmem_inode_acct_blocks(inode, pages); + if (error) { + struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); + long freed; + /* + * Try to reclaim some space by splitting a few + * large folios beyond i_size on the filesystem. + */ + shmem_unused_huge_shrink(sbinfo, NULL, 2); + /* + * And do a shmem_recalc_inode() to account for freed pages: + * except our folio is there in cache, so not quite balanced. + */ + spin_lock(&info->lock); + freed = pages + info->alloced - info->swapped - + READ_ONCE(mapping->nrpages); + if (freed > 0) + info->alloced -= freed; + spin_unlock(&info->lock); + if (freed > 0) + shmem_inode_unacct_blocks(inode, freed); + error = shmem_inode_acct_blocks(inode, pages); + if (error) { + filemap_remove_folio(folio); + goto unlock; + } + } + + shmem_recalc_inode(inode, pages, 0); + folio_add_lru(folio); + return folio; + +unlock: + folio_unlock(folio); + folio_put(folio); + return ERR_PTR(error); } /* @@ -1929,29 +1984,22 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, struct vm_fault *vmf, vm_fault_t *fault_type) { struct vm_area_struct *vma = vmf ? vmf->vma : NULL; - struct address_space *mapping = inode->i_mapping; - struct shmem_inode_info *info = SHMEM_I(inode); - struct shmem_sb_info *sbinfo; struct mm_struct *fault_mm; struct folio *folio; - pgoff_t hindex; - gfp_t huge_gfp; int error; - int once = 0; - int alloced = 0; + bool alloced; if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) return -EFBIG; repeat: if (sgp <= SGP_CACHE && - ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { + ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) return -EINVAL; - } - sbinfo = SHMEM_SB(inode->i_sb); + alloced = false; fault_mm = vma ? vma->vm_mm : NULL; - folio = filemap_get_entry(mapping, index); + folio = filemap_get_entry(inode->i_mapping, index); if (folio && vma && userfaultfd_minor(vma)) { if (!xa_is_value(folio)) folio_put(folio); @@ -1973,7 +2021,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, folio_lock(folio); /* Has the folio been truncated or swapped out? */ - if (unlikely(folio->mapping != mapping)) { + if (unlikely(folio->mapping != inode->i_mapping)) { folio_unlock(folio); folio_put(folio); goto repeat; @@ -2008,65 +2056,38 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, return 0; } - if (!shmem_is_huge(inode, index, false, - vma ? vma->vm_mm : NULL, vma ? vma->vm_flags : 0)) - goto alloc_nohuge; + if (shmem_is_huge(inode, index, false, fault_mm, + vma ? vma->vm_flags : 0)) { + gfp_t huge_gfp; - huge_gfp = vma_thp_gfp_mask(vma); - huge_gfp = limit_gfp_mask(huge_gfp, gfp); - folio = shmem_alloc_and_acct_folio(huge_gfp, inode, index, true); - if (IS_ERR(folio)) { -alloc_nohuge: - folio = shmem_alloc_and_acct_folio(gfp, inode, index, false); + huge_gfp = vma_thp_gfp_mask(vma); + huge_gfp = limit_gfp_mask(huge_gfp, gfp); + folio = shmem_alloc_and_add_folio(huge_gfp, + inode, index, fault_mm, true); + if (!IS_ERR(folio)) { + count_vm_event(THP_FILE_ALLOC); + goto alloced; + } + if (PTR_ERR(folio) == -EEXIST) + goto repeat; } - if (IS_ERR(folio)) { - int retry = 5; + folio = shmem_alloc_and_add_folio(gfp, inode, index, fault_mm, false); + if (IS_ERR(folio)) { error = PTR_ERR(folio); + if (error == -EEXIST) + goto repeat; folio = NULL; - if (error != -ENOSPC) - goto unlock; - /* - * Try to reclaim some space by splitting a large folio - * beyond i_size on the filesystem. - */ - while (retry--) { - int ret; - - ret = shmem_unused_huge_shrink(sbinfo, NULL, 1); - if (ret == SHRINK_STOP) - break; - if (ret) - goto alloc_nohuge; - } goto unlock; } - hindex = round_down(index, folio_nr_pages(folio)); - - if (sgp == SGP_WRITE) - __folio_set_referenced(folio); - - error = mem_cgroup_charge(folio, fault_mm, gfp); - if (error) { - if (folio_test_pmd_mappable(folio)) { - count_vm_event(THP_FILE_FALLBACK); - count_vm_event(THP_FILE_FALLBACK_CHARGE); - } - goto unacct; - } - - error = shmem_add_to_page_cache(folio, mapping, hindex, NULL, gfp); - if (error) - goto unacct; - - folio_add_lru(folio); - shmem_recalc_inode(inode, folio_nr_pages(folio), 0); +alloced: alloced = true; - if (folio_test_pmd_mappable(folio) && DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) < folio_next_index(folio) - 1) { + struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); + struct shmem_inode_info *info = SHMEM_I(inode); /* * Part of the large folio is beyond i_size: subject * to shrink under memory pressure. @@ -2084,6 +2105,8 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, spin_unlock(&sbinfo->shrinklist_lock); } + if (sgp == SGP_WRITE) + folio_set_referenced(folio); /* * Let SGP_FALLOC use the SGP_WRITE optimization on a new folio. */ @@ -2107,11 +2130,6 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, /* Perhaps the file has been truncated since we checked */ if (sgp <= SGP_CACHE && ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { - if (alloced) { - folio_clear_dirty(folio); - filemap_remove_folio(folio); - shmem_recalc_inode(inode, 0, 0); - } error = -EINVAL; goto unlock; } @@ -2122,25 +2140,14 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, /* * Error recovery. */ -unacct: - shmem_inode_unacct_blocks(inode, folio_nr_pages(folio)); - - if (folio_test_large(folio)) { - folio_unlock(folio); - folio_put(folio); - goto alloc_nohuge; - } unlock: + if (alloced) + filemap_remove_folio(folio); + shmem_recalc_inode(inode, 0, 0); if (folio) { folio_unlock(folio); folio_put(folio); } - if (error == -ENOSPC && !once++) { - shmem_recalc_inode(inode, 0, 0); - goto repeat; - } - if (error == -EEXIST) - goto repeat; return error; } -- Gitee From 0e799cdb2f49027437e0d50170152af61e6b59a2 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Fri, 29 Sep 2023 20:42:45 -0700 Subject: [PATCH 1354/2138] shmem,percpu_counter: add _limited_add(fbc, limit, amount) ANBZ: #9728 commit beb9868628445306958fd7b2da1cd369a4a381cc upstream. Percpu counter's compare and add are separate functions: without locking around them (which would defeat their purpose), it has been possible to overflow the intended limit. Imagine all the other CPUs fallocating tmpfs huge pages to the limit, in between this CPU's compare and its add. I have not seen reports of that happening; but tmpfs's recent addition of dquot_alloc_block_nodirty() in between the compare and the add makes it even more likely, and I'd be uncomfortable to leave it unfixed. Introduce percpu_counter_limited_add(fbc, limit, amount) to prevent it. I believe this implementation is correct, and slightly more efficient than the combination of compare and add (taking the lock once rather than twice when nearing full - the last 128MiB of a tmpfs volume on a machine with 128 CPUs and 4KiB pages); but it does beg for a better design - when nearing full, there is no new batching, but the costly percpu counter sum across CPUs still has to be done, while locked. Follow __percpu_counter_sum()'s example, including cpu_dying_mask as well as cpu_online_mask: but shouldn't __percpu_counter_compare() and __percpu_counter_limited_add() then be adding a num_dying_cpus() to num_online_cpus(), when they calculate the maximum which could be held across CPUs? But the times when it matters would be vanishingly rare. Link: https://lkml.kernel.org/r/bb817848-2d19-bcc8-39ca-ea179af0f0b4@google.com Signed-off-by: Hugh Dickins Reviewed-by: Jan Kara Cc: Tim Chen Cc: Dave Chinner Cc: Darrick J. Wong Cc: Axel Rasmussen Cc: Carlos Maiolino Cc: Christian Brauner Cc: Chuck Lever Cc: Johannes Weiner Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Acked-by: Joseph Qi Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3858 --- include/linux/percpu_counter.h | 23 +++++++++++++++ lib/percpu_counter.c | 53 ++++++++++++++++++++++++++++++++++ mm/shmem.c | 10 +++---- 3 files changed, 81 insertions(+), 5 deletions(-) diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index d01351b1526f..8cb7c071bd5c 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -57,6 +57,8 @@ void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch); s64 __percpu_counter_sum(struct percpu_counter *fbc); int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch); +bool __percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, + s64 amount, s32 batch); void percpu_counter_sync(struct percpu_counter *fbc); static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) @@ -69,6 +71,13 @@ static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) percpu_counter_add_batch(fbc, amount, percpu_counter_batch); } +static inline bool +percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount) +{ + return __percpu_counter_limited_add(fbc, limit, amount, + percpu_counter_batch); +} + /* * With percpu_counter_add_local() and percpu_counter_sub_local(), counts * are accumulated in local per cpu counter and not in fbc->count until @@ -185,6 +194,20 @@ percpu_counter_add(struct percpu_counter *fbc, s64 amount) local_irq_restore(flags); } +static inline bool +percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount) +{ + unsigned long flags; + s64 count; + + local_irq_save(flags); + count = fbc->count + amount; + if (count <= limit) + fbc->count = count; + local_irq_restore(flags); + return count <= limit; +} + /* non-SMP percpu_counter_add_local is the same with percpu_counter_add */ static inline void percpu_counter_add_local(struct percpu_counter *fbc, s64 amount) diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 9073430dc865..58a3392f471b 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -278,6 +278,59 @@ int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) } EXPORT_SYMBOL(__percpu_counter_compare); +/* + * Compare counter, and add amount if the total is within limit. + * Return true if amount was added, false if it would exceed limit. + */ +bool __percpu_counter_limited_add(struct percpu_counter *fbc, + s64 limit, s64 amount, s32 batch) +{ + s64 count; + s64 unknown; + unsigned long flags; + bool good; + + if (amount > limit) + return false; + + local_irq_save(flags); + unknown = batch * num_online_cpus(); + count = __this_cpu_read(*fbc->counters); + + /* Skip taking the lock when safe */ + if (abs(count + amount) <= batch && + fbc->count + unknown <= limit) { + this_cpu_add(*fbc->counters, amount); + local_irq_restore(flags); + return true; + } + + raw_spin_lock(&fbc->lock); + count = fbc->count + amount; + + /* Skip percpu_counter_sum() when safe */ + if (count + unknown > limit) { + s32 *pcount; + int cpu; + + for_each_cpu_or(cpu, cpu_online_mask, cpu_dying_mask) { + pcount = per_cpu_ptr(fbc->counters, cpu); + count += *pcount; + } + } + + good = count <= limit; + if (good) { + count = __this_cpu_read(*fbc->counters); + fbc->count += count + amount; + __this_cpu_sub(*fbc->counters, count); + } + + raw_spin_unlock(&fbc->lock); + local_irq_restore(flags); + return good; +} + static int __init percpu_counter_startup(void) { int ret; diff --git a/mm/shmem.c b/mm/shmem.c index aa78b71eba12..39b10a656bf8 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -217,15 +217,15 @@ static int shmem_inode_acct_blocks(struct inode *inode, long pages) might_sleep(); /* when quotas */ if (sbinfo->max_blocks) { - if (percpu_counter_compare(&sbinfo->used_blocks, - sbinfo->max_blocks - pages) > 0) + if (!percpu_counter_limited_add(&sbinfo->used_blocks, + sbinfo->max_blocks, pages)) goto unacct; err = dquot_alloc_block_nodirty(inode, pages); - if (err) + if (err) { + percpu_counter_sub(&sbinfo->used_blocks, pages); goto unacct; - - percpu_counter_add(&sbinfo->used_blocks, pages); + } } else { err = dquot_alloc_block_nodirty(inode, pages); if (err) -- Gitee From 07f9c9e0ade32b90274adfa58a9ef066eee05929 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Wed, 11 Oct 2023 21:40:09 -0700 Subject: [PATCH 1355/2138] percpu_counter: extend _limited_add() to negative amounts ANBZ: #9728 commit 1431996bf9088ee59f8017637ab9a7f89909ae6 upstream. Though tmpfs does not need it, percpu_counter_limited_add() can be twice as useful if it works sensibly with negative amounts (subs) - typically decrements towards a limit of 0 or nearby: as suggested by Dave Chinner. And in the course of that reworking, skip the percpu counter sum if it is already obvious that the limit would be passed: as suggested by Tim Chen. Extend the comment above __percpu_counter_limited_add(), defining the behaviour with positive and negative amounts, allowing negative limits, but not bothering about overflow beyond S64_MAX. Link: https://lkml.kernel.org/r/8f86083b-c452-95d4-365b-f16a2e4ebcd4@google.com Signed-off-by: Hugh Dickins Cc: Axel Rasmussen Cc: Carlos Maiolino Cc: Christian Brauner Cc: Chuck Lever Cc: Darrick J. Wong Cc: Dave Chinner Cc: Jan Kara Cc: Johannes Weiner Cc: Matthew Wilcox (Oracle) Cc: Tim Chen Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Acked-by: Joseph Qi Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3858 --- include/linux/percpu_counter.h | 11 +++++-- lib/percpu_counter.c | 54 +++++++++++++++++++++++++--------- 2 files changed, 49 insertions(+), 16 deletions(-) diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index 8cb7c071bd5c..3a44dd1e33d2 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -198,14 +198,21 @@ static inline bool percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount) { unsigned long flags; + bool good = false; s64 count; + if (amount == 0) + return true; + local_irq_save(flags); count = fbc->count + amount; - if (count <= limit) + if ((amount > 0 && count <= limit) || + (amount < 0 && count >= limit)) { fbc->count = count; + good = true; + } local_irq_restore(flags); - return count <= limit; + return good; } /* non-SMP percpu_counter_add_local is the same with percpu_counter_add */ diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 58a3392f471b..44dd133594d4 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -279,8 +279,16 @@ int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) EXPORT_SYMBOL(__percpu_counter_compare); /* - * Compare counter, and add amount if the total is within limit. - * Return true if amount was added, false if it would exceed limit. + * Compare counter, and add amount if total is: less than or equal to limit if + * amount is positive, or greater than or equal to limit if amount is negative. + * Return true if amount is added, or false if total would be beyond the limit. + * + * Negative limit is allowed, but unusual. + * When negative amounts (subs) are given to percpu_counter_limited_add(), + * the limit would most naturally be 0 - but other limits are also allowed. + * + * Overflow beyond S64_MAX is not allowed for: counter, limit and amount + * are all assumed to be sane (far from S64_MIN and S64_MAX). */ bool __percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount, s32 batch) @@ -288,10 +296,10 @@ bool __percpu_counter_limited_add(struct percpu_counter *fbc, s64 count; s64 unknown; unsigned long flags; - bool good; + bool good = false; - if (amount > limit) - return false; + if (amount == 0) + return true; local_irq_save(flags); unknown = batch * num_online_cpus(); @@ -299,7 +307,8 @@ bool __percpu_counter_limited_add(struct percpu_counter *fbc, /* Skip taking the lock when safe */ if (abs(count + amount) <= batch && - fbc->count + unknown <= limit) { + ((amount > 0 && fbc->count + unknown <= limit) || + (amount < 0 && fbc->count - unknown >= limit))) { this_cpu_add(*fbc->counters, amount); local_irq_restore(flags); return true; @@ -309,7 +318,19 @@ bool __percpu_counter_limited_add(struct percpu_counter *fbc, count = fbc->count + amount; /* Skip percpu_counter_sum() when safe */ - if (count + unknown > limit) { + if (amount > 0) { + if (count - unknown > limit) + goto out; + if (count + unknown <= limit) + good = true; + } else { + if (count + unknown < limit) + goto out; + if (count - unknown >= limit) + good = true; + } + + if (!good) { s32 *pcount; int cpu; @@ -317,15 +338,20 @@ bool __percpu_counter_limited_add(struct percpu_counter *fbc, pcount = per_cpu_ptr(fbc->counters, cpu); count += *pcount; } + if (amount > 0) { + if (count > limit) + goto out; + } else { + if (count < limit) + goto out; + } + good = true; } - good = count <= limit; - if (good) { - count = __this_cpu_read(*fbc->counters); - fbc->count += count + amount; - __this_cpu_sub(*fbc->counters, count); - } - + count = __this_cpu_read(*fbc->counters); + fbc->count += count + amount; + __this_cpu_sub(*fbc->counters, count); +out: raw_spin_unlock(&fbc->lock); local_irq_restore(flags); return good; -- Gitee From 156a6b52c0928b944995b76222d64e7c1ef6e433 Mon Sep 17 00:00:00 2001 From: Ashok Raj Date: Thu, 25 Jan 2024 00:22:51 -0800 Subject: [PATCH 1356/2138] platform/x86/intel/ifs: Trace on all HT threads when executing a test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #10908 commit def1ed0db2a66eed5de593748ffe131615edb45e upstream. Enable the trace function on all HT threads. Currently, the trace is called from some arbitrary CPU where the test was invoked. This change gives visibility to the exact errors as seen by each participating HT threads, and not just what was seen from the primary thread. Sample output below. # TASK-PID CPU# ||||| TIMESTAMP FUNCTION # | | | ||||| | | migration/0-18 [000] d..1. 527287.084668: start: 0000, stop: 007f, status: 0000000000007f80 migration/128-785 [128] d..1. 527287.084669: start: 0000, stop: 007f, status: 0000000000007f80 Intel-SIG: commit def1ed0db2a6 platform/x86/intel/ifs: Trace on all HT threads when executing a test Backport to support Intel IFS(In Field Scan) SBAF(Structural Based Functional Test at Field) Signed-off-by: Ashok Raj Reviewed-by: Tony Luck Link: https://lore.kernel.org/r/20240125082254.424859-3-ashok.raj@intel.com Reviewed-by: Ilpo Järvinen Signed-off-by: Ilpo Järvinen [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3860 --- drivers/platform/x86/intel/ifs/runtest.c | 46 +++++++++++++++++------- include/trace/events/intel_ifs.h | 9 ++--- 2 files changed, 36 insertions(+), 19 deletions(-) diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c index e9d7a8c84e05..e945938b9d65 100644 --- a/drivers/platform/x86/intel/ifs/runtest.c +++ b/drivers/platform/x86/intel/ifs/runtest.c @@ -23,6 +23,12 @@ /* Max retries on the same chunk */ #define MAX_IFS_RETRIES 5 +struct run_params { + struct ifs_data *ifsd; + union ifs_scan *activate; + union ifs_status status; +}; + /* * Number of TSC cycles that a logical CPU will wait for the other * logical CPU on the core in the WRMSR(ACTIVATE_SCAN). @@ -140,10 +146,22 @@ static bool can_restart(union ifs_status status) */ static int doscan(void *data) { - int cpu = smp_processor_id(); - u64 *msrs = data; + int cpu = smp_processor_id(), start, stop; + struct run_params *params = data; + union ifs_status status; + struct ifs_data *ifsd; int first; + ifsd = params->ifsd; + + if (ifsd->generation) { + start = params->activate->gen2.start; + stop = params->activate->gen2.stop; + } else { + start = params->activate->gen0.start; + stop = params->activate->gen0.stop; + } + /* Only the first logical CPU on a core reports result */ first = cpumask_first(cpu_smt_mask(cpu)); @@ -155,12 +173,14 @@ static int doscan(void *data) * take up to 200 milliseconds (in the case where all chunks * are processed in a single pass) before it retires. */ - wrmsrl(MSR_ACTIVATE_SCAN, msrs[0]); + wrmsrl(MSR_ACTIVATE_SCAN, params->activate->data); + rdmsrl(MSR_SCAN_STATUS, status.data); - if (cpu == first) { - /* Pass back the result of the scan */ - rdmsrl(MSR_SCAN_STATUS, msrs[1]); - } + trace_ifs_status(start, stop, status.data); + + /* Pass back the result of the scan */ + if (cpu == first) + params->status = status; return 0; } @@ -179,7 +199,7 @@ static void ifs_test_core(int cpu, struct device *dev) struct ifs_data *ifsd; int to_start, to_stop; int status_chunk; - u64 msrvals[2]; + struct run_params params; int retries; ifsd = ifs_get_data(dev); @@ -190,6 +210,8 @@ static void ifs_test_core(int cpu, struct device *dev) to_start = 0; to_stop = ifsd->valid_chunks - 1; + params.ifsd = ifs_get_data(dev); + if (ifsd->generation) { activate.gen2.start = to_start; activate.gen2.stop = to_stop; @@ -207,12 +229,10 @@ static void ifs_test_core(int cpu, struct device *dev) break; } - msrvals[0] = activate.data; - stop_core_cpuslocked(cpu, doscan, msrvals); - - status.data = msrvals[1]; + params.activate = &activate; + stop_core_cpuslocked(cpu, doscan, ¶ms); - trace_ifs_status(cpu, to_start, to_stop, status.data); + status = params.status; /* Some cases can be retried, give up for others */ if (!can_restart(status)) diff --git a/include/trace/events/intel_ifs.h b/include/trace/events/intel_ifs.h index af0af3f1d9b7..8462dfb7a020 100644 --- a/include/trace/events/intel_ifs.h +++ b/include/trace/events/intel_ifs.h @@ -10,26 +10,23 @@ TRACE_EVENT(ifs_status, - TP_PROTO(int cpu, int start, int stop, u64 status), + TP_PROTO(int start, int stop, u64 status), - TP_ARGS(cpu, start, stop, status), + TP_ARGS(start, stop, status), TP_STRUCT__entry( __field( u64, status ) - __field( int, cpu ) __field( u16, start ) __field( u16, stop ) ), TP_fast_assign( - __entry->cpu = cpu; __entry->start = start; __entry->stop = stop; __entry->status = status; ), - TP_printk("cpu: %d, start: %.4x, stop: %.4x, status: %.16llx", - __entry->cpu, + TP_printk("start: %.4x, stop: %.4x, status: %.16llx", __entry->start, __entry->stop, __entry->status) -- Gitee From a15ad5905db4413ba886bc1e62b575e528208348 Mon Sep 17 00:00:00 2001 From: Ashok Raj Date: Thu, 25 Jan 2024 00:22:52 -0800 Subject: [PATCH 1357/2138] platform/x86/intel/ifs: Add current batch number to trace output MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #10908 commit e272d1e1188e55259dd0e3ba2f8f744a531fdd59 upstream. Add the current batch number in the trace output. When there are failures, it's important to know which test content resulted in failure. # TASK-PID CPU# ||||| TIMESTAMP FUNCTION # | | | ||||| | | migration/0-18 [000] d..1. 527287.084668: ifs_status: batch: 02, start: 0000, stop: 007f, status: 0000000000007f80 migration/128-785 [128] d..1. 527287.084669: ifs_status: batch: 02, start: 0000, stop: 007f, status: 0000000000007f80 Intel-SIG: commit e272d1e1188e platform/x86/intel/ifs: Add current batch number to trace output Backport to support Intel IFS(In Field Scan) SBAF(Structural Based Functional Test at Field) Signed-off-by: Ashok Raj Reviewed-by: Tony Luck Link: https://lore.kernel.org/r/20240125082254.424859-4-ashok.raj@intel.com Reviewed-by: Ilpo Järvinen Signed-off-by: Ilpo Järvinen [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3860 --- drivers/platform/x86/intel/ifs/runtest.c | 2 +- include/trace/events/intel_ifs.h | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c index e945938b9d65..3f2d3c98092b 100644 --- a/drivers/platform/x86/intel/ifs/runtest.c +++ b/drivers/platform/x86/intel/ifs/runtest.c @@ -176,7 +176,7 @@ static int doscan(void *data) wrmsrl(MSR_ACTIVATE_SCAN, params->activate->data); rdmsrl(MSR_SCAN_STATUS, status.data); - trace_ifs_status(start, stop, status.data); + trace_ifs_status(ifsd->cur_batch, start, stop, status.data); /* Pass back the result of the scan */ if (cpu == first) diff --git a/include/trace/events/intel_ifs.h b/include/trace/events/intel_ifs.h index 8462dfb7a020..8ce2de120f2d 100644 --- a/include/trace/events/intel_ifs.h +++ b/include/trace/events/intel_ifs.h @@ -10,23 +10,26 @@ TRACE_EVENT(ifs_status, - TP_PROTO(int start, int stop, u64 status), + TP_PROTO(int batch, int start, int stop, u64 status), - TP_ARGS(start, stop, status), + TP_ARGS(batch, start, stop, status), TP_STRUCT__entry( + __field( int, batch ) __field( u64, status ) __field( u16, start ) __field( u16, stop ) ), TP_fast_assign( + __entry->batch = batch; __entry->start = start; __entry->stop = stop; __entry->status = status; ), - TP_printk("start: %.4x, stop: %.4x, status: %.16llx", + TP_printk("batch: %.2d, start: %.4x, stop: %.4x, status: %.16llx", + __entry->batch, __entry->start, __entry->stop, __entry->status) -- Gitee From aa726da48cce99dd3c5cd0604d6e897c59655d02 Mon Sep 17 00:00:00 2001 From: Ashok Raj Date: Thu, 25 Jan 2024 00:22:53 -0800 Subject: [PATCH 1358/2138] platform/x86/intel/ifs: Replace the exit rendezvous with an entry rendezvous for ARRAY_BIST MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #10908 commit ea15f34d5fb77a0db0dd9f983b647fe5b613cf73 upstream. ARRAY_BIST requires the test to be invoked only from one of the HT siblings of a core. If the other sibling was in mwait(), that didn't permit the test to complete and resulted in several retries before the test could finish. The exit rendezvous was introduced to keep the HT sibling busy until the primary CPU completed the test to avoid those retries. What is actually needed is to ensure that both the threads rendezvous *before* the wrmsr to trigger the test to give good chance to complete the test. The `stop_machine()` function returns only after all the CPUs complete running the function, and provides an exit rendezvous implicitly. In kernel/stop_machine.c::multi_cpu_stop(), every CPU in the mask needs to complete reaching MULTI_STOP_RUN. When all CPUs complete, the state machine moves to next state, i.e MULTI_STOP_EXIT. Thus the underlying API stop_core_cpuslocked() already provides an exit rendezvous. Add the rendezvous earlier in order to ensure the wrmsr is triggered after all CPUs reach the do_array_test(). Remove the exit rendezvous since stop_core_cpuslocked() already guarantees that. Intel-SIG: commit ea15f34d5fb7 platform/x86/intel/ifs: Replace the exit rendezvous with an entry rendezvous for ARRAY_BIST Backport to support Intel IFS(In Field Scan) SBAF(Structural Based Functional Test at Field) Signed-off-by: Ashok Raj Reviewed-by: Tony Luck Link: https://lore.kernel.org/r/20240125082254.424859-5-ashok.raj@intel.com Reviewed-by: Ilpo Järvinen Signed-off-by: Ilpo Järvinen [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3860 --- drivers/platform/x86/intel/ifs/runtest.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c index 3f2d3c98092b..5d4f440710a2 100644 --- a/drivers/platform/x86/intel/ifs/runtest.c +++ b/drivers/platform/x86/intel/ifs/runtest.c @@ -271,7 +271,7 @@ static void ifs_test_core(int cpu, struct device *dev) } #define SPINUNIT 100 /* 100 nsec */ -static atomic_t array_cpus_out; +static atomic_t array_cpus_in; /* * Simplified cpu sibling rendezvous loop based on microcode loader __wait_for_cpus() @@ -298,6 +298,8 @@ static int do_array_test(void *data) int cpu = smp_processor_id(); int first; + wait_for_sibling_cpu(&array_cpus_in, NSEC_PER_SEC); + /* * Only one logical CPU on a core needs to trigger the Array test via MSR write. */ @@ -309,9 +311,6 @@ static int do_array_test(void *data) rdmsrl(MSR_ARRAY_BIST, command->data); } - /* Tests complete faster if the sibling is spinning here */ - wait_for_sibling_cpu(&array_cpus_out, NSEC_PER_SEC); - return 0; } @@ -332,7 +331,7 @@ static void ifs_array_test_core(int cpu, struct device *dev) timed_out = true; break; } - atomic_set(&array_cpus_out, 0); + atomic_set(&array_cpus_in, 0); stop_core_cpuslocked(cpu, do_array_test, &command); if (command.ctrl_result) -- Gitee From 581e4b481cf4f68823668ef3108af2a1cb0853ba Mon Sep 17 00:00:00 2001 From: Ashok Raj Date: Thu, 25 Jan 2024 00:22:54 -0800 Subject: [PATCH 1359/2138] platform/x86/intel/ifs: Add an entry rendezvous for SAF MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #10908 commit ad630f5d92717632a15e1d0b92e5421e6eac7c8d upstream. The activation for Scan at Field (SAF) includes a parameter to make microcode wait for both threads to join. It's preferable to perform an entry rendezvous before the activation to ensure that they start the `wrmsr` close enough to each other. In some cases it has been observed that one of the threads might be just a bit late to arrive. An entry rendezvous reduces the likelihood of these cases occurring. Add an entry rendezvous to ensure the activation on both threads happen close enough to each other. Intel-SIG: commit ad630f5d9271 platform/x86/intel/ifs: Add an entry rendezvous for SAF Backport to support Intel IFS(In Field Scan) SBAF(Structural Based Functional Test at Field) Signed-off-by: Ashok Raj Reviewed-by: Tony Luck Link: https://lore.kernel.org/r/20240125082254.424859-6-ashok.raj@intel.com Reviewed-by: Ilpo Järvinen Signed-off-by: Ilpo Järvinen [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3860 --- drivers/platform/x86/intel/ifs/runtest.c | 48 +++++++++++++----------- 1 file changed, 26 insertions(+), 22 deletions(-) diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c index 5d4f440710a2..1f4065fba282 100644 --- a/drivers/platform/x86/intel/ifs/runtest.c +++ b/drivers/platform/x86/intel/ifs/runtest.c @@ -140,6 +140,29 @@ static bool can_restart(union ifs_status status) return false; } +#define SPINUNIT 100 /* 100 nsec */ +static atomic_t array_cpus_in; +static atomic_t scan_cpus_in; + +/* + * Simplified cpu sibling rendezvous loop based on microcode loader __wait_for_cpus() + */ +static void wait_for_sibling_cpu(atomic_t *t, long long timeout) +{ + int cpu = smp_processor_id(); + const struct cpumask *smt_mask = cpu_smt_mask(cpu); + int all_cpus = cpumask_weight(smt_mask); + + atomic_inc(t); + while (atomic_read(t) < all_cpus) { + if (timeout < SPINUNIT) + return; + ndelay(SPINUNIT); + timeout -= SPINUNIT; + touch_nmi_watchdog(); + } +} + /* * Execute the scan. Called "simultaneously" on all threads of a core * at high priority using the stop_cpus mechanism. @@ -165,6 +188,8 @@ static int doscan(void *data) /* Only the first logical CPU on a core reports result */ first = cpumask_first(cpu_smt_mask(cpu)); + wait_for_sibling_cpu(&scan_cpus_in, NSEC_PER_SEC); + /* * This WRMSR will wait for other HT threads to also write * to this MSR (at most for activate.delay cycles). Then it @@ -230,6 +255,7 @@ static void ifs_test_core(int cpu, struct device *dev) } params.activate = &activate; + atomic_set(&scan_cpus_in, 0); stop_core_cpuslocked(cpu, doscan, ¶ms); status = params.status; @@ -270,28 +296,6 @@ static void ifs_test_core(int cpu, struct device *dev) } } -#define SPINUNIT 100 /* 100 nsec */ -static atomic_t array_cpus_in; - -/* - * Simplified cpu sibling rendezvous loop based on microcode loader __wait_for_cpus() - */ -static void wait_for_sibling_cpu(atomic_t *t, long long timeout) -{ - int cpu = smp_processor_id(); - const struct cpumask *smt_mask = cpu_smt_mask(cpu); - int all_cpus = cpumask_weight(smt_mask); - - atomic_inc(t); - while (atomic_read(t) < all_cpus) { - if (timeout < SPINUNIT) - return; - ndelay(SPINUNIT); - timeout -= SPINUNIT; - touch_nmi_watchdog(); - } -} - static int do_array_test(void *data) { union ifs_array *command = data; -- Gitee From a8b4143b2ba878775a5a04c17468af402eb27107 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ilpo=20J=C3=A4rvinen?= Date: Thu, 25 Jan 2024 15:03:28 +0200 Subject: [PATCH 1360/2138] platform/x86/intel/ifs: Remove unnecessary initialization of 'ret' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #10908 commit 682c259a849610c7864cc75d52415c782c78653a upstream. The ret variable is unconditionally assigned in ifs_load_firmware(). Therefore, remove its unnecessary initialization. Intel-SIG: commit 682c259a8496 platform/x86/intel/ifs: Remove unnecessary initialization of 'ret' Backport to support Intel IFS(In Field Scan) SBAF(Structural Based Functional Test at Field) Reviewed-by: Ashok Raj Link: https://lore.kernel.org/r/20240125130328.11253-1-ilpo.jarvinen@linux.intel.com Signed-off-by: Ilpo Järvinen [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3860 --- drivers/platform/x86/intel/ifs/load.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c index 2cf3b4a8813f..584c44387e10 100644 --- a/drivers/platform/x86/intel/ifs/load.c +++ b/drivers/platform/x86/intel/ifs/load.c @@ -383,7 +383,7 @@ int ifs_load_firmware(struct device *dev) unsigned int expected_size; const struct firmware *fw; char scan_path[64]; - int ret = -EINVAL; + int ret; snprintf(scan_path, sizeof(scan_path), "intel/ifs_%d/%02x-%02x-%02x-%02x.scan", test->test_num, boot_cpu_data.x86, boot_cpu_data.x86_model, -- Gitee From a300470a903683d5611a8e4e9f70b605c98dbe7a Mon Sep 17 00:00:00 2001 From: Jithu Joseph Date: Fri, 12 Apr 2024 10:23:47 -0700 Subject: [PATCH 1361/2138] platform/x86/intel/ifs: Classify error scenarios correctly ANBZ: #10908 commit 02153e5dcb361d4a8538363362d78e3a88adf6ee upstream. "Scan controller error" means that scan hardware encountered an error prior to doing an actual test on the target CPU. It does not mean that there is an actual cpu/core failure. "scan signature failure" indicates that the test result on the target core did not match the expected value and should be treated as a cpu failure. Current driver classifies both these scenarios as failures. Modify the driver to classify this situation with a more appropriate "untested" status instead of "fail" status. Intel-SIG: commit 02153e5dcb36 platform/x86/intel/ifs: Classify error scenarios correctly Backport to support Intel IFS(In Field Scan) SBAF(Structural Based Functional Test at Field) Signed-off-by: Jithu Joseph Reviewed-by: Tony Luck Reviewed-by: Ashok Raj Reviewed-by: Kuppuswamy Sathyanarayanan Link: https://lore.kernel.org/r/20240412172349.544064-2-jithu.joseph@intel.com Reviewed-by: Hans de Goede Signed-off-by: Hans de Goede [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3860 --- drivers/platform/x86/intel/ifs/runtest.c | 27 +++++++++++++----------- 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c index 1f4065fba282..be3d51ed0e47 100644 --- a/drivers/platform/x86/intel/ifs/runtest.c +++ b/drivers/platform/x86/intel/ifs/runtest.c @@ -69,6 +69,19 @@ static const char * const scan_test_status[] = { static void message_not_tested(struct device *dev, int cpu, union ifs_status status) { + struct ifs_data *ifsd = ifs_get_data(dev); + + /* + * control_error is set when the microcode runs into a problem + * loading the image from the reserved BIOS memory, or it has + * been corrupted. Reloading the image may fix this issue. + */ + if (status.control_error) { + dev_warn(dev, "CPU(s) %*pbl: Scan controller error. Batch: %02x version: 0x%x\n", + cpumask_pr_args(cpu_smt_mask(cpu)), ifsd->cur_batch, ifsd->loaded_version); + return; + } + if (status.error_code < ARRAY_SIZE(scan_test_status)) { dev_info(dev, "CPU(s) %*pbl: SCAN operation did not start. %s\n", cpumask_pr_args(cpu_smt_mask(cpu)), @@ -90,16 +103,6 @@ static void message_fail(struct device *dev, int cpu, union ifs_status status) { struct ifs_data *ifsd = ifs_get_data(dev); - /* - * control_error is set when the microcode runs into a problem - * loading the image from the reserved BIOS memory, or it has - * been corrupted. Reloading the image may fix this issue. - */ - if (status.control_error) { - dev_err(dev, "CPU(s) %*pbl: could not execute from loaded scan image. Batch: %02x version: 0x%x\n", - cpumask_pr_args(cpu_smt_mask(cpu)), ifsd->cur_batch, ifsd->loaded_version); - } - /* * signature_error is set when the output from the scan chains does not * match the expected signature. This might be a transient problem (e.g. @@ -285,10 +288,10 @@ static void ifs_test_core(int cpu, struct device *dev) /* Update status for this core */ ifsd->scan_details = status.data; - if (status.control_error || status.signature_error) { + if (status.signature_error) { ifsd->status = SCAN_TEST_FAIL; message_fail(dev, cpu, status); - } else if (status.error_code) { + } else if (status.control_error || status.error_code) { ifsd->status = SCAN_NOT_TESTED; message_not_tested(dev, cpu, status); } else { -- Gitee From cba887bb83f006e20eed2516ea4a551feb512410 Mon Sep 17 00:00:00 2001 From: Jithu Joseph Date: Fri, 12 Apr 2024 10:23:48 -0700 Subject: [PATCH 1362/2138] platform/x86/intel/ifs: trace: display batch num in hex ANBZ: #10908 commit 15b429f4e047dc4f55bc38bc8e2557a812a7d822 upstream. In Field Scan test image files are named in ff-mm-ss-.scan format. Current trace output, prints the batch number in decimal format. Make it easier to correlate the trace line to a test image file by showing the batch number also in hex format. Add 0x prefix to all fields in the trace line to make the type explicit. Intel-SIG: commit 15b429f4e047 platform/x86/intel/ifs: trace: display batch num in hex Backport to support Intel IFS(In Field Scan) SBAF(Structural Based Functional Test at Field) Signed-off-by: Jithu Joseph Reviewed-by: Tony Luck Reviewed-by: Ashok Raj Reviewed-by: Kuppuswamy Sathyanarayanan Link: https://lore.kernel.org/r/20240412172349.544064-3-jithu.joseph@intel.com Reviewed-by: Hans de Goede Signed-off-by: Hans de Goede [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3860 --- include/trace/events/intel_ifs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/trace/events/intel_ifs.h b/include/trace/events/intel_ifs.h index 8ce2de120f2d..0d88ebf2c980 100644 --- a/include/trace/events/intel_ifs.h +++ b/include/trace/events/intel_ifs.h @@ -28,7 +28,7 @@ TRACE_EVENT(ifs_status, __entry->status = status; ), - TP_printk("batch: %.2d, start: %.4x, stop: %.4x, status: %.16llx", + TP_printk("batch: 0x%.2x, start: 0x%.4x, stop: 0x%.4x, status: 0x%.16llx", __entry->batch, __entry->start, __entry->stop, -- Gitee From 96f064eaade441aa1696bdb5806af461be9ffeaa Mon Sep 17 00:00:00 2001 From: Jithu Joseph Date: Fri, 12 Apr 2024 10:23:49 -0700 Subject: [PATCH 1363/2138] platform/x86/intel/ifs: Disable irq during one load stage ANBZ: #10908 commit bd25a3f5ed51540d873c6c581f4dab08aedc73ea upstream. One of the stages in IFS image loading process involves loading individual chunks (test patterns) from test image file to secure memory. Driver issues a WRMSR(MSR_AUTHENTICATE_AND_COPY_CHUNK) operation to do this. This operation can take up to 5 msec, and if an interrupt occurs in between, the AUTH_AND_COPY_CHUNK u-code implementation aborts the operation. Interrupt sources such as NMI or SMI are handled by retrying. Regular interrupts may occur frequently enough to prevent this operation from ever completing. Disable irq on local cpu around the aforementioned WRMSR to allow the operation to complete. Intel-SIG: commit bd25a3f5ed51 platform/x86/intel/ifs: Disable irq during one load stage Backport to support Intel IFS(In Field Scan) SBAF(Structural Based Functional Test at Field) Signed-off-by: Jithu Joseph Reviewed-by: Tony Luck Reviewed-by: Ashok Raj Reviewed-by: Kuppuswamy Sathyanarayanan Link: https://lore.kernel.org/r/20240412172349.544064-4-jithu.joseph@intel.com Reviewed-by: Hans de Goede Signed-off-by: Hans de Goede [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3860 --- drivers/platform/x86/intel/ifs/load.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c index 584c44387e10..39f19cb51749 100644 --- a/drivers/platform/x86/intel/ifs/load.c +++ b/drivers/platform/x86/intel/ifs/load.c @@ -233,7 +233,9 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev) chunk_table[0] = starting_chunk_nr + i; chunk_table[1] = linear_addr; do { + local_irq_disable(); wrmsrl(MSR_AUTHENTICATE_AND_COPY_CHUNK, (u64)chunk_table); + local_irq_enable(); rdmsrl(MSR_CHUNKS_AUTHENTICATION_STATUS, chunk_status.data); err_code = chunk_status.error_code; } while (err_code == AUTH_INTERRUPTED_ERROR && --retry_count); -- Gitee From 99a1f28eebbb4044975cdfcf004d015beeb0eeb1 Mon Sep 17 00:00:00 2001 From: Pengfei Xu Date: Fri, 31 May 2024 15:53:47 +0800 Subject: [PATCH 1364/2138] selftests: ifs: verify test interfaces are created by the driver ANBZ: #10908 commit 8e51106d02d32cd83807fa56c602020c2309ace0 upstream. IFS (In Field Scan) driver exposes its functionality via sysfs interfaces. Applications prepare and exercise the tests by interacting with the aforementioned sysfs files. Verify that the necessary sysfs entries are created after loading the IFS driver. Initialize test variables needed for building subsequent kself-test cases. Intel-SIG: commit 8e51106d02d3 selftests: ifs: verify test interfaces are created by the driver Backport to support Intel IFS(In Field Scan) SBAF(Structural Based Functional Test at Field) Reviewed-by: Jithu Joseph Reviewed-by: Kuppuswamy Sathyanarayanan Co-developed-by: Ashok Raj Signed-off-by: Ashok Raj Signed-off-by: Pengfei Xu Acked-by: Jithu Joseph Signed-off-by: Shuah Khan [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3860 --- MAINTAINERS | 1 + tools/testing/selftests/Makefile | 1 + .../drivers/platform/x86/intel/ifs/Makefile | 6 + .../platform/x86/intel/ifs/test_ifs.sh | 179 ++++++++++++++++++ 4 files changed, 187 insertions(+) create mode 100644 tools/testing/selftests/drivers/platform/x86/intel/ifs/Makefile create mode 100755 tools/testing/selftests/drivers/platform/x86/intel/ifs/test_ifs.sh diff --git a/MAINTAINERS b/MAINTAINERS index 02ec5c36d214..1617ce15cdd2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10603,6 +10603,7 @@ R: Tony Luck S: Maintained F: drivers/platform/x86/intel/ifs F: include/trace/events/intel_ifs.h +F: tools/testing/selftests/drivers/platform/x86/intel/ifs/ INTEL INTEGRATED SENSOR HUB DRIVER M: Srinivas Pandruvada diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 5b61b8bb29f8..5c77c4e00443 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile @@ -18,6 +18,7 @@ TARGETS += drivers/dma-buf TARGETS += drivers/s390x/uvdevice TARGETS += drivers/net/bonding TARGETS += drivers/net/team +TARGETS += drivers/platform/x86/intel/ifs TARGETS += efivarfs TARGETS += exec TARGETS += fchmodat2 diff --git a/tools/testing/selftests/drivers/platform/x86/intel/ifs/Makefile b/tools/testing/selftests/drivers/platform/x86/intel/ifs/Makefile new file mode 100644 index 000000000000..03d0449d307c --- /dev/null +++ b/tools/testing/selftests/drivers/platform/x86/intel/ifs/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# Makefile for ifs(In Field Scan) selftests + +TEST_PROGS := test_ifs.sh + +include ../../../../../lib.mk diff --git a/tools/testing/selftests/drivers/platform/x86/intel/ifs/test_ifs.sh b/tools/testing/selftests/drivers/platform/x86/intel/ifs/test_ifs.sh new file mode 100755 index 000000000000..90d099578199 --- /dev/null +++ b/tools/testing/selftests/drivers/platform/x86/intel/ifs/test_ifs.sh @@ -0,0 +1,179 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# Test the functionality of the Intel IFS(In Field Scan) driver. +# + +# Matched with kselftest framework: tools/testing/selftests/kselftest.h +readonly KSFT_PASS=0 +readonly KSFT_FAIL=1 +readonly KSFT_XFAIL=2 +readonly KSFT_SKIP=4 + +readonly IFS_SCAN_MODE="0" +readonly IFS_PATH="/sys/devices/virtual/misc/intel_ifs" +readonly IFS_SCAN_SYSFS_PATH="${IFS_PATH}_${IFS_SCAN_MODE}" +readonly PASS="PASS" +readonly FAIL="FAIL" +readonly INFO="INFO" +readonly XFAIL="XFAIL" +readonly SKIP="SKIP" +readonly IFS_NAME="intel_ifs" + +# Matches arch/x86/include/asm/intel-family.h and +# drivers/platform/x86/intel/ifs/core.c requirement as follows +readonly SAPPHIRERAPIDS_X="8f" +readonly EMERALDRAPIDS_X="cf" + +readonly INTEL_FAM6="06" + +FML="" +MODEL="" + +TRUE="true" +FALSE="false" +RESULT=$KSFT_PASS +export INTERVAL_TIME=1 +# For IFS cleanup tags +ORIGIN_IFS_LOADED="" +IFS_LOG="/tmp/ifs_logs.$$" + +append_log() +{ + echo -e "$1" | tee -a "$IFS_LOG" +} + +ifs_scan_result_summary() +{ + local failed_info pass_num skip_num fail_num + + if [[ -e "$IFS_LOG" ]]; then + failed_info=$(grep ^"\[${FAIL}\]" "$IFS_LOG") + fail_num=$(grep -c ^"\[${FAIL}\]" "$IFS_LOG") + skip_num=$(grep -c ^"\[${SKIP}\]" "$IFS_LOG") + pass_num=$(grep -c ^"\[${PASS}\]" "$IFS_LOG") + + if [[ "$fail_num" -ne 0 ]]; then + RESULT=$KSFT_FAIL + echo "[$INFO] IFS test failure summary:" + echo "$failed_info" + elif [[ "$skip_num" -ne 0 ]]; then + RESULT=$KSFT_SKIP + fi + echo "[$INFO] IFS test pass:$pass_num, skip:$skip_num, fail:$fail_num" + else + echo "[$INFO] No file $IFS_LOG for IFS scan summary" + fi +} + +ifs_cleanup() +{ + lsmod | grep -q "$IFS_NAME" && [[ "$ORIGIN_IFS_LOADED" == "$FALSE" ]] && { + echo "[$INFO] modprobe -r $IFS_NAME" + modprobe -r "$IFS_NAME" + } + + ifs_scan_result_summary + [[ -e "$IFS_LOG" ]] && rm -rf "$IFS_LOG" + + echo "[RESULT] IFS test exit with $RESULT" + exit "$RESULT" +} + +test_exit() +{ + local info=$1 + RESULT=$2 + + declare -A EXIT_MAP + EXIT_MAP[$KSFT_PASS]=$PASS + EXIT_MAP[$KSFT_FAIL]=$FAIL + EXIT_MAP[$KSFT_XFAIL]=$XFAIL + EXIT_MAP[$KSFT_SKIP]=$SKIP + + append_log "[${EXIT_MAP[$RESULT]}] $info" + ifs_cleanup +} + +get_cpu_fms() +{ + FML=$(grep -m 1 "family" /proc/cpuinfo | awk -F ":" '{printf "%02x",$2;}') + MODEL=$(grep -m 1 "model" /proc/cpuinfo | awk -F ":" '{printf "%02x",$2;}') +} + +check_cpu_ifs_support_interval_time() +{ + get_cpu_fms + + if [[ "$FML" != "$INTEL_FAM6" ]]; then + test_exit "CPU family:$FML does not support IFS" "$KSFT_SKIP" + fi + + # Ucode has time interval requirement for IFS scan on same CPU as follows: + case $MODEL in + "$SAPPHIRERAPIDS_X") + INTERVAL_TIME=180; + ;; + "$EMERALDRAPIDS_X") + INTERVAL_TIME=30; + ;; + *) + # Set default interval time for other platforms + INTERVAL_TIME=1; + append_log "[$INFO] CPU FML:$FML model:0x$MODEL, default: 1s interval time" + ;; + esac +} + +check_ifs_loaded() +{ + local ifs_info="" + + ifs_info=$(lsmod | grep "$IFS_NAME") + if [[ -z "$ifs_info" ]]; then + append_log "[$INFO] modprobe $IFS_NAME" + modprobe "$IFS_NAME" || { + test_exit "Check if CONFIG_INTEL_IFS is set to m or \ +platform doesn't support ifs" "$KSFT_SKIP" + } + ifs_info=$(lsmod | grep "$IFS_NAME") + [[ -n "$ifs_info" ]] || test_exit "No ifs module listed by lsmod" "$KSFT_FAIL" + fi +} + +test_ifs_scan_entry() +{ + local ifs_info="" + + ifs_info=$(lsmod | grep "$IFS_NAME") + + if [[ -z "$ifs_info" ]]; then + ORIGIN_IFS_LOADED="$FALSE" + check_ifs_loaded + else + ORIGIN_IFS_LOADED="$TRUE" + append_log "[$INFO] Module $IFS_NAME is already loaded" + fi + + if [[ -d "$IFS_SCAN_SYSFS_PATH" ]]; then + append_log "[$PASS] IFS sysfs $IFS_SCAN_SYSFS_PATH entry is created\n" + else + test_exit "No sysfs entry in $IFS_SCAN_SYSFS_PATH" "$KSFT_FAIL" + fi +} + +prepare_ifs_test_env() +{ + check_cpu_ifs_support_interval_time +} + +test_ifs() +{ + prepare_ifs_test_env + + test_ifs_scan_entry +} + +trap ifs_cleanup SIGTERM SIGINT +test_ifs +ifs_cleanup -- Gitee From 378b5857c07c3b30f7b79cf9a0c13dff1f186715 Mon Sep 17 00:00:00 2001 From: Pengfei Xu Date: Fri, 31 May 2024 15:53:48 +0800 Subject: [PATCH 1365/2138] selftests: ifs: verify test image loading functionality ANBZ: #10908 commit 20cef3039dcd6930e1a08c948a360eac5c0fce88 upstream. Scan test image files have to be loaded before starting IFS test. Verify that In Field scan driver is able to load valid test image files. Also check if loading an invalid test image file fails. Intel-SIG: commit 20cef3039dcd selftests: ifs: verify test image loading functionality Backport to support Intel IFS(In Field Scan) SBAF(Structural Based Functional Test at Field) Reviewed-by: Jithu Joseph Reviewed-by: Kuppuswamy Sathyanarayanan Co-developed-by: Ashok Raj Signed-off-by: Ashok Raj Signed-off-by: Pengfei Xu Acked-by: Jithu Joseph Signed-off-by: Shuah Khan [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3860 --- .../platform/x86/intel/ifs/test_ifs.sh | 121 +++++++++++++++++- 1 file changed, 120 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/drivers/platform/x86/intel/ifs/test_ifs.sh b/tools/testing/selftests/drivers/platform/x86/intel/ifs/test_ifs.sh index 90d099578199..dc78ad9100ca 100755 --- a/tools/testing/selftests/drivers/platform/x86/intel/ifs/test_ifs.sh +++ b/tools/testing/selftests/drivers/platform/x86/intel/ifs/test_ifs.sh @@ -10,6 +10,7 @@ readonly KSFT_FAIL=1 readonly KSFT_XFAIL=2 readonly KSFT_SKIP=4 +readonly IMG_PATH="/lib/firmware/intel/ifs_0" readonly IFS_SCAN_MODE="0" readonly IFS_PATH="/sys/devices/virtual/misc/intel_ifs" readonly IFS_SCAN_SYSFS_PATH="${IFS_PATH}_${IFS_SCAN_MODE}" @@ -29,14 +30,18 @@ readonly INTEL_FAM6="06" FML="" MODEL="" - +STEPPING="" +CPU_FMS="" TRUE="true" FALSE="false" RESULT=$KSFT_PASS +IMAGE_NAME="" export INTERVAL_TIME=1 # For IFS cleanup tags ORIGIN_IFS_LOADED="" +IFS_IMAGE_NEED_RESTORE=$FALSE IFS_LOG="/tmp/ifs_logs.$$" +DEFAULT_IMG_ID="" append_log() { @@ -68,6 +73,13 @@ ifs_scan_result_summary() ifs_cleanup() { + echo "[$INFO] Restore environment after IFS test" + + # Restore ifs origin image if origin image backup step is needed + [[ "$IFS_IMAGE_NEED_RESTORE" == "$TRUE" ]] && { + mv -f "$IMG_PATH"/"$IMAGE_NAME"_origin "$IMG_PATH"/"$IMAGE_NAME" + } + lsmod | grep -q "$IFS_NAME" && [[ "$ORIGIN_IFS_LOADED" == "$FALSE" ]] && { echo "[$INFO] modprobe -r $IFS_NAME" modprobe -r "$IFS_NAME" @@ -80,6 +92,21 @@ ifs_cleanup() exit "$RESULT" } +do_cmd() +{ + local cmd=$* + local ret="" + + append_log "[$INFO] $cmd" + eval "$cmd" + ret=$? + if [[ $ret -ne 0 ]]; then + append_log "[$FAIL] $cmd failed. Return code is $ret" + RESULT=$KSFT_XFAIL + ifs_cleanup + fi +} + test_exit() { local info=$1 @@ -99,6 +126,8 @@ get_cpu_fms() { FML=$(grep -m 1 "family" /proc/cpuinfo | awk -F ":" '{printf "%02x",$2;}') MODEL=$(grep -m 1 "model" /proc/cpuinfo | awk -F ":" '{printf "%02x",$2;}') + STEPPING=$(grep -m 1 "stepping" /proc/cpuinfo | awk -F ":" '{printf "%02x",$2;}') + CPU_FMS="${FML}-${MODEL}-${STEPPING}" } check_cpu_ifs_support_interval_time() @@ -162,9 +191,93 @@ test_ifs_scan_entry() fi } +load_image() +{ + local image_id=$1 + local image_info="" + local ret="" + + check_ifs_loaded + if [[ -e "${IMG_PATH}/${IMAGE_NAME}" ]]; then + append_log "[$INFO] echo 0x$image_id > ${IFS_SCAN_SYSFS_PATH}/current_batch" + echo "0x$image_id" > "$IFS_SCAN_SYSFS_PATH"/current_batch 2>/dev/null + ret=$? + [[ "$ret" -eq 0 ]] || { + append_log "[$FAIL] Load ifs image $image_id failed with ret:$ret\n" + return "$ret" + } + image_info=$(cat ${IFS_SCAN_SYSFS_PATH}/current_batch) + if [[ "$image_info" == 0x"$image_id" ]]; then + append_log "[$PASS] load IFS current_batch:$image_info" + else + append_log "[$FAIL] current_batch:$image_info is not expected:$image_id" + return "$KSFT_FAIL" + fi + else + append_log "[$FAIL] No IFS image file ${IMG_PATH}/${IMAGE_NAME}"\ + return "$KSFT_FAIL" + fi + return 0 +} + +test_load_origin_ifs_image() +{ + local image_id=$1 + + IMAGE_NAME="${CPU_FMS}-${image_id}.scan" + + load_image "$image_id" || return $? + return 0 +} + +test_load_bad_ifs_image() +{ + local image_id=$1 + + IMAGE_NAME="${CPU_FMS}-${image_id}.scan" + + do_cmd "mv -f ${IMG_PATH}/${IMAGE_NAME} ${IMG_PATH}/${IMAGE_NAME}_origin" + + # Set IFS_IMAGE_NEED_RESTORE to true before corrupt the origin ifs image file + IFS_IMAGE_NEED_RESTORE=$TRUE + do_cmd "dd if=/dev/urandom of=${IMG_PATH}/${IMAGE_NAME} bs=1K count=6 2>/dev/null" + + # Use the specified judgment for negative testing + append_log "[$INFO] echo 0x$image_id > ${IFS_SCAN_SYSFS_PATH}/current_batch" + echo "0x$image_id" > "$IFS_SCAN_SYSFS_PATH"/current_batch 2>/dev/null + ret=$? + if [[ "$ret" -ne 0 ]]; then + append_log "[$PASS] Load invalid ifs image failed with ret:$ret not 0 as expected" + else + append_log "[$FAIL] Load invalid ifs image ret:$ret unexpectedly" + fi + + do_cmd "mv -f ${IMG_PATH}/${IMAGE_NAME}_origin ${IMG_PATH}/${IMAGE_NAME}" + IFS_IMAGE_NEED_RESTORE=$FALSE +} + +test_bad_and_origin_ifs_image() +{ + local image_id=$1 + + append_log "[$INFO] Test loading bad and then loading original IFS image:" + test_load_origin_ifs_image "$image_id" || return $? + test_load_bad_ifs_image "$image_id" + # Load origin image again and make sure it's worked + test_load_origin_ifs_image "$image_id" || return $? + append_log "[$INFO] Loading invalid IFS image and then loading initial image passed.\n" +} + prepare_ifs_test_env() { check_cpu_ifs_support_interval_time + + DEFAULT_IMG_ID=$(find $IMG_PATH -maxdepth 1 -name "${CPU_FMS}-[0-9a-fA-F][0-9a-fA-F].scan" \ + 2>/dev/null \ + | sort \ + | head -n 1 \ + | awk -F "-" '{print $NF}' \ + | cut -d "." -f 1) } test_ifs() @@ -172,6 +285,12 @@ test_ifs() prepare_ifs_test_env test_ifs_scan_entry + + if [[ -z "$DEFAULT_IMG_ID" ]]; then + append_log "[$SKIP] No proper ${IMG_PATH}/${CPU_FMS}-*.scan, skip ifs_0 scan" + else + test_bad_and_origin_ifs_image "$DEFAULT_IMG_ID" + fi } trap ifs_cleanup SIGTERM SIGINT -- Gitee From e8c2cbd43872bc97d6dc0344dc4aec03d06236bf Mon Sep 17 00:00:00 2001 From: Pengfei Xu Date: Fri, 31 May 2024 15:53:49 +0800 Subject: [PATCH 1366/2138] selftests: ifs: verify IFS scan test functionality ANBZ: #10908 commit 3170f7acfba15895844dc2c0f2d2ff6fd77ad2e1 upstream. Two selftests are added to verify IFS scan test feature: 1. Perform IFS scan test once on each CPU using all the available image files. 2. Perform IFS scan test with the default image on a random cpu for 3 rounds. Intel-SIG: commit 3170f7acfba1 selftests: ifs: verify IFS scan test functionality Backport to support Intel IFS(In Field Scan) SBAF(Structural Based Functional Test at Field) Reviewed-by: Jithu Joseph Reviewed-by: Kuppuswamy Sathyanarayanan Co-developed-by: Ashok Raj Signed-off-by: Ashok Raj Signed-off-by: Pengfei Xu Acked-by: Jithu Joseph Signed-off-by: Shuah Khan [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3860 --- .../platform/x86/intel/ifs/test_ifs.sh | 190 +++++++++++++++++- 1 file changed, 189 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/drivers/platform/x86/intel/ifs/test_ifs.sh b/tools/testing/selftests/drivers/platform/x86/intel/ifs/test_ifs.sh index dc78ad9100ca..82fc5a461b12 100755 --- a/tools/testing/selftests/drivers/platform/x86/intel/ifs/test_ifs.sh +++ b/tools/testing/selftests/drivers/platform/x86/intel/ifs/test_ifs.sh @@ -10,16 +10,25 @@ readonly KSFT_FAIL=1 readonly KSFT_XFAIL=2 readonly KSFT_SKIP=4 +readonly CPU_SYSFS="/sys/devices/system/cpu" +readonly CPU_OFFLINE_SYSFS="${CPU_SYSFS}/offline" readonly IMG_PATH="/lib/firmware/intel/ifs_0" readonly IFS_SCAN_MODE="0" +readonly IFS_ARRAY_BIST_SCAN_MODE="1" readonly IFS_PATH="/sys/devices/virtual/misc/intel_ifs" readonly IFS_SCAN_SYSFS_PATH="${IFS_PATH}_${IFS_SCAN_MODE}" +readonly RUN_TEST="run_test" +readonly STATUS="status" +readonly DETAILS="details" +readonly STATUS_PASS="pass" readonly PASS="PASS" readonly FAIL="FAIL" readonly INFO="INFO" readonly XFAIL="XFAIL" readonly SKIP="SKIP" readonly IFS_NAME="intel_ifs" +readonly ALL="all" +readonly SIBLINGS="siblings" # Matches arch/x86/include/asm/intel-family.h and # drivers/platform/x86/intel/ifs/core.c requirement as follows @@ -28,6 +37,7 @@ readonly EMERALDRAPIDS_X="cf" readonly INTEL_FAM6="06" +LOOP_TIMES=3 FML="" MODEL="" STEPPING="" @@ -36,11 +46,13 @@ TRUE="true" FALSE="false" RESULT=$KSFT_PASS IMAGE_NAME="" -export INTERVAL_TIME=1 +INTERVAL_TIME=1 +OFFLINE_CPUS="" # For IFS cleanup tags ORIGIN_IFS_LOADED="" IFS_IMAGE_NEED_RESTORE=$FALSE IFS_LOG="/tmp/ifs_logs.$$" +RANDOM_CPU="" DEFAULT_IMG_ID="" append_log() @@ -48,6 +60,35 @@ append_log() echo -e "$1" | tee -a "$IFS_LOG" } +online_offline_cpu_list() +{ + local on_off=$1 + local target_cpus=$2 + local cpu="" + local cpu_start="" + local cpu_end="" + local i="" + + if [[ -n "$target_cpus" ]]; then + for cpu in $(echo "$target_cpus" | tr ',' ' '); do + if [[ "$cpu" == *"-"* ]]; then + cpu_start="" + cpu_end="" + i="" + cpu_start=$(echo "$cpu" | cut -d "-" -f 1) + cpu_end=$(echo "$cpu" | cut -d "-" -f 2) + for((i=cpu_start;i<=cpu_end;i++)); do + append_log "[$INFO] echo $on_off > \ +${CPU_SYSFS}/cpu${i}/online" + echo "$on_off" > "$CPU_SYSFS"/cpu"$i"/online + done + else + set_target_cpu "$on_off" "$cpu" + fi + done + fi +} + ifs_scan_result_summary() { local failed_info pass_num skip_num fail_num @@ -80,6 +121,9 @@ ifs_cleanup() mv -f "$IMG_PATH"/"$IMAGE_NAME"_origin "$IMG_PATH"/"$IMAGE_NAME" } + # Restore the CPUs to the state before testing + [[ -z "$OFFLINE_CPUS" ]] || online_offline_cpu_list "0" "$OFFLINE_CPUS" + lsmod | grep -q "$IFS_NAME" && [[ "$ORIGIN_IFS_LOADED" == "$FALSE" ]] && { echo "[$INFO] modprobe -r $IFS_NAME" modprobe -r "$IFS_NAME" @@ -122,6 +166,23 @@ test_exit() ifs_cleanup } +online_all_cpus() +{ + local off_cpus="" + + OFFLINE_CPUS=$(cat "$CPU_OFFLINE_SYSFS") + online_offline_cpu_list "1" "$OFFLINE_CPUS" + + off_cpus=$(cat "$CPU_OFFLINE_SYSFS") + if [[ -z "$off_cpus" ]]; then + append_log "[$INFO] All CPUs are online." + else + append_log "[$XFAIL] There is offline cpu:$off_cpus after online all cpu!" + RESULT=$KSFT_XFAIL + ifs_cleanup + fi +} + get_cpu_fms() { FML=$(grep -m 1 "family" /proc/cpuinfo | awk -F ":" '{printf "%02x",$2;}') @@ -268,10 +329,135 @@ test_bad_and_origin_ifs_image() append_log "[$INFO] Loading invalid IFS image and then loading initial image passed.\n" } +ifs_test_cpu() +{ + local ifs_mode=$1 + local cpu_num=$2 + local image_id status details ret result result_info + + echo "$cpu_num" > "$IFS_PATH"_"$ifs_mode"/"$RUN_TEST" + ret=$? + + status=$(cat "${IFS_PATH}_${ifs_mode}/${STATUS}") + details=$(cat "${IFS_PATH}_${ifs_mode}/${DETAILS}") + + if [[ "$ret" -eq 0 && "$status" == "$STATUS_PASS" ]]; then + result="$PASS" + else + result="$FAIL" + fi + + cpu_num=$(cat "${CPU_SYSFS}/cpu${cpu_num}/topology/thread_siblings_list") + + # There is no image file for IFS ARRAY BIST scan + if [[ -e "${IFS_PATH}_${ifs_mode}/current_batch" ]]; then + image_id=$(cat "${IFS_PATH}_${ifs_mode}/current_batch") + result_info=$(printf "[%s] ifs_%1d cpu(s):%s, current_batch:0x%02x, \ +ret:%2d, status:%s, details:0x%016x" \ + "$result" "$ifs_mode" "$cpu_num" "$image_id" "$ret" \ + "$status" "$details") + else + result_info=$(printf "[%s] ifs_%1d cpu(s):%s, ret:%2d, status:%s, details:0x%016x" \ + "$result" "$ifs_mode" "$cpu_num" "$ret" "$status" "$details") + fi + + append_log "$result_info" +} + +ifs_test_cpus() +{ + local cpus_type=$1 + local ifs_mode=$2 + local image_id=$3 + local cpu_max_num="" + local cpu_num="" + + case "$cpus_type" in + "$ALL") + cpu_max_num=$(($(nproc) - 1)) + cpus=$(seq 0 $cpu_max_num) + ;; + "$SIBLINGS") + cpus=$(cat ${CPU_SYSFS}/cpu*/topology/thread_siblings_list \ + | sed -e 's/,.*//' \ + | sed -e 's/-.*//' \ + | sort -n \ + | uniq) + ;; + *) + test_exit "Invalid cpus_type:$cpus_type" "$KSFT_XFAIL" + ;; + esac + + for cpu_num in $cpus; do + ifs_test_cpu "$ifs_mode" "$cpu_num" + done + + if [[ -z "$image_id" ]]; then + append_log "[$INFO] ifs_$ifs_mode test $cpus_type cpus completed\n" + else + append_log "[$INFO] ifs_$ifs_mode $cpus_type cpus with $CPU_FMS-$image_id.scan \ +completed\n" + fi +} + +test_ifs_same_cpu_loop() +{ + local ifs_mode=$1 + local cpu_num=$2 + local loop_times=$3 + + append_log "[$INFO] Test ifs mode $ifs_mode on CPU:$cpu_num for $loop_times rounds:" + [[ "$ifs_mode" == "$IFS_SCAN_MODE" ]] && { + load_image "$DEFAULT_IMG_ID" || return $? + } + for (( i=1; i<=loop_times; i++ )); do + append_log "[$INFO] Loop iteration: $i in total of $loop_times" + # Only IFS scan needs the interval time + if [[ "$ifs_mode" == "$IFS_SCAN_MODE" ]]; then + do_cmd "sleep $INTERVAL_TIME" + elif [[ "$ifs_mode" == "$IFS_ARRAY_BIST_SCAN_MODE" ]]; then + true + else + test_exit "Invalid ifs_mode:$ifs_mode" "$KSFT_XFAIL" + fi + + ifs_test_cpu "$ifs_mode" "$cpu_num" + done + append_log "[$INFO] $loop_times rounds of ifs_$ifs_mode test on CPU:$cpu_num completed.\n" +} + +test_ifs_scan_available_imgs() +{ + local image_ids="" + local image_id="" + + append_log "[$INFO] Test ifs scan with available images:" + image_ids=$(find "$IMG_PATH" -maxdepth 1 -name "${CPU_FMS}-[0-9a-fA-F][0-9a-fA-F].scan" \ + 2>/dev/null \ + | sort \ + | awk -F "-" '{print $NF}' \ + | cut -d "." -f 1) + + for image_id in $image_ids; do + load_image "$image_id" || return $? + + ifs_test_cpus "$SIBLINGS" "$IFS_SCAN_MODE" "$image_id" + # IFS scan requires time interval for the scan on the same CPU + do_cmd "sleep $INTERVAL_TIME" + done +} + prepare_ifs_test_env() { + local max_cpu="" + check_cpu_ifs_support_interval_time + online_all_cpus + max_cpu=$(($(nproc) - 1)) + RANDOM_CPU=$(shuf -i 0-$max_cpu -n 1) + DEFAULT_IMG_ID=$(find $IMG_PATH -maxdepth 1 -name "${CPU_FMS}-[0-9a-fA-F][0-9a-fA-F].scan" \ 2>/dev/null \ | sort \ @@ -290,6 +476,8 @@ test_ifs() append_log "[$SKIP] No proper ${IMG_PATH}/${CPU_FMS}-*.scan, skip ifs_0 scan" else test_bad_and_origin_ifs_image "$DEFAULT_IMG_ID" + test_ifs_scan_available_imgs + test_ifs_same_cpu_loop "$IFS_SCAN_MODE" "$RANDOM_CPU" "$LOOP_TIMES" fi } -- Gitee From 3b1f118552801963ce8ec9cc350876b409cb78c7 Mon Sep 17 00:00:00 2001 From: Pengfei Xu Date: Fri, 31 May 2024 15:53:50 +0800 Subject: [PATCH 1367/2138] selftests: ifs: verify IFS ARRAY BIST functionality ANBZ: #10908 commit bb408dae9e73803eab8a648115d6c4a1bca4dba3 upstream. There are two selftest scenarios for ARRAY BIST(Board Integrated System Test) tests: 1. Perform IFS ARRAY BIST tests once on each CPU. 2. Perform IFS ARRAY BIST tests on a random CPU with 3 rounds. These are not meant to be exhaustive, but are some minimal tests for for checking IFS ARRAY BIST. Intel-SIG: commit bb408dae9e73 selftests: ifs: verify IFS ARRAY BIST functionality Backport to support Intel IFS(In Field Scan) SBAF(Structural Based Functional Test at Field) Reviewed-by: Jithu Joseph Reviewed-by: Kuppuswamy Sathyanarayanan Co-developed-by: Ashok Raj Signed-off-by: Ashok Raj Signed-off-by: Pengfei Xu Acked-by: Jithu Joseph Signed-off-by: Shuah Khan [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3860 --- .../selftests/drivers/platform/x86/intel/ifs/test_ifs.sh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools/testing/selftests/drivers/platform/x86/intel/ifs/test_ifs.sh b/tools/testing/selftests/drivers/platform/x86/intel/ifs/test_ifs.sh index 82fc5a461b12..8b68964b29f4 100755 --- a/tools/testing/selftests/drivers/platform/x86/intel/ifs/test_ifs.sh +++ b/tools/testing/selftests/drivers/platform/x86/intel/ifs/test_ifs.sh @@ -17,6 +17,7 @@ readonly IFS_SCAN_MODE="0" readonly IFS_ARRAY_BIST_SCAN_MODE="1" readonly IFS_PATH="/sys/devices/virtual/misc/intel_ifs" readonly IFS_SCAN_SYSFS_PATH="${IFS_PATH}_${IFS_SCAN_MODE}" +readonly IFS_ARRAY_BIST_SYSFS_PATH="${IFS_PATH}_${IFS_ARRAY_BIST_SCAN_MODE}" readonly RUN_TEST="run_test" readonly STATUS="status" readonly DETAILS="details" @@ -479,6 +480,13 @@ test_ifs() test_ifs_scan_available_imgs test_ifs_same_cpu_loop "$IFS_SCAN_MODE" "$RANDOM_CPU" "$LOOP_TIMES" fi + + if [[ -d "$IFS_ARRAY_BIST_SYSFS_PATH" ]]; then + ifs_test_cpus "$SIBLINGS" "$IFS_ARRAY_BIST_SCAN_MODE" + test_ifs_same_cpu_loop "$IFS_ARRAY_BIST_SCAN_MODE" "$RANDOM_CPU" "$LOOP_TIMES" + else + append_log "[$SKIP] No $IFS_ARRAY_BIST_SYSFS_PATH, skip IFS ARRAY BIST scan" + fi } trap ifs_cleanup SIGTERM SIGINT -- Gitee From 2123aaa73ed053ad6c9d689d03bd926bb676c4ce Mon Sep 17 00:00:00 2001 From: Kuppuswamy Sathyanarayanan Date: Thu, 1 Aug 2024 05:18:11 +0000 Subject: [PATCH 1368/2138] platform/x86/intel/ifs: Refactor MSR usage in IFS test code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #10908 commit 7e597d496dfd69c8940a924bc2cc96f1666d33a9 upstream. IFS tests such as Scan at Field (SAF) or Structural Based Functional Test at Field (SBAF), require the user to load a test image. The image loading process is similar across these tests, with the only difference being MSR addresses used. To reuse the code between these tests, remove the hard coding of MSR addresses and allow the driver to pass the MSR addresses per IFS test (via driver device data). Add a new structure named "struct ifs_test_msrs" to specify the test-specific MSR addresses. Each IFS test will provide this structure, enabling them to reuse the common code. This is a preliminary patch in preparation for the addition of SBAF support. Intel-SIG: commit 7e597d496dfd platform/x86/intel/ifs: Refactor MSR usage in IFS test code Backport to support Intel IFS(In Field Scan) SBAF(Structural Based Functional Test at Field) Reviewed-by: Ashok Raj Reviewed-by: Tony Luck Reviewed-by: Ilpo Järvinen Signed-off-by: Kuppuswamy Sathyanarayanan Link: https://lore.kernel.org/r/20240801051814.1935149-2-sathyanarayanan.kuppuswamy@linux.intel.com Signed-off-by: Hans de Goede [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3860 --- drivers/platform/x86/intel/ifs/core.c | 9 +++++++++ drivers/platform/x86/intel/ifs/ifs.h | 25 +++++++++++++++++++++++++ drivers/platform/x86/intel/ifs/load.c | 24 ++++++++++++++---------- 3 files changed, 48 insertions(+), 10 deletions(-) diff --git a/drivers/platform/x86/intel/ifs/core.c b/drivers/platform/x86/intel/ifs/core.c index 7b11198d85a1..1a7ca74abb61 100644 --- a/drivers/platform/x86/intel/ifs/core.c +++ b/drivers/platform/x86/intel/ifs/core.c @@ -40,9 +40,18 @@ static const struct ifs_test_caps array_test = { .test_num = IFS_TYPE_ARRAY_BIST, }; +static const struct ifs_test_msrs scan_msrs = { + .copy_hashes = MSR_COPY_SCAN_HASHES, + .copy_hashes_status = MSR_SCAN_HASHES_STATUS, + .copy_chunks = MSR_AUTHENTICATE_AND_COPY_CHUNK, + .copy_chunks_status = MSR_CHUNKS_AUTHENTICATION_STATUS, + .test_ctrl = MSR_SAF_CTRL, +}; + static struct ifs_device ifs_devices[] = { [IFS_TYPE_SAF] = { .test_caps = &scan_test, + .test_msrs = &scan_msrs, .misc = { .name = "intel_ifs_0", .minor = MISC_DYNAMIC_MINOR, diff --git a/drivers/platform/x86/intel/ifs/ifs.h b/drivers/platform/x86/intel/ifs/ifs.h index 56b9f3e3cf76..738cbc7a5d00 100644 --- a/drivers/platform/x86/intel/ifs/ifs.h +++ b/drivers/platform/x86/intel/ifs/ifs.h @@ -266,6 +266,22 @@ struct ifs_test_caps { int test_num; }; +/** + * struct ifs_test_msrs - MSRs used in IFS tests + * @copy_hashes: Copy test hash data + * @copy_hashes_status: Status of copied test hash data + * @copy_chunks: Copy chunks of the test data + * @copy_chunks_status: Status of the copied test data chunks + * @test_ctrl: Control the test attributes + */ +struct ifs_test_msrs { + u32 copy_hashes; + u32 copy_hashes_status; + u32 copy_chunks; + u32 copy_chunks_status; + u32 test_ctrl; +}; + /** * struct ifs_data - attributes related to intel IFS driver * @loaded_version: stores the currently loaded ifs image version. @@ -299,6 +315,7 @@ struct ifs_work { struct ifs_device { const struct ifs_test_caps *test_caps; + const struct ifs_test_msrs *test_msrs; struct ifs_data rw_data; struct miscdevice misc; }; @@ -319,6 +336,14 @@ static inline const struct ifs_test_caps *ifs_get_test_caps(struct device *dev) return d->test_caps; } +static inline const struct ifs_test_msrs *ifs_get_test_msrs(struct device *dev) +{ + struct miscdevice *m = dev_get_drvdata(dev); + struct ifs_device *d = container_of(m, struct ifs_device, misc); + + return d->test_msrs; +} + extern bool *ifs_pkg_auth; int ifs_load_firmware(struct device *dev); int do_core_test(int cpu, struct device *dev); diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c index 39f19cb51749..ad0c107f0922 100644 --- a/drivers/platform/x86/intel/ifs/load.c +++ b/drivers/platform/x86/intel/ifs/load.c @@ -118,15 +118,17 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work) union ifs_scan_hashes_status hashes_status; union ifs_chunks_auth_status chunk_status; struct device *dev = local_work->dev; + const struct ifs_test_msrs *msrs; int i, num_chunks, chunk_size; struct ifs_data *ifsd; u64 linear_addr, base; u32 err_code; ifsd = ifs_get_data(dev); + msrs = ifs_get_test_msrs(dev); /* run scan hash copy */ - wrmsrl(MSR_COPY_SCAN_HASHES, ifs_hash_ptr); - rdmsrl(MSR_SCAN_HASHES_STATUS, hashes_status.data); + wrmsrl(msrs->copy_hashes, ifs_hash_ptr); + rdmsrl(msrs->copy_hashes_status, hashes_status.data); /* enumerate the scan image information */ num_chunks = hashes_status.num_chunks; @@ -147,8 +149,8 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work) linear_addr = base + i * chunk_size; linear_addr |= i; - wrmsrl(MSR_AUTHENTICATE_AND_COPY_CHUNK, linear_addr); - rdmsrl(MSR_CHUNKS_AUTHENTICATION_STATUS, chunk_status.data); + wrmsrl(msrs->copy_chunks, linear_addr); + rdmsrl(msrs->copy_chunks_status, chunk_status.data); ifsd->valid_chunks = chunk_status.valid_chunks; err_code = chunk_status.error_code; @@ -180,6 +182,7 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev) union ifs_scan_hashes_status_gen2 hashes_status; union ifs_chunks_auth_status_gen2 chunk_status; u32 err_code, valid_chunks, total_chunks; + const struct ifs_test_msrs *msrs; int i, num_chunks, chunk_size; union meta_data *ifs_meta; int starting_chunk_nr; @@ -189,10 +192,11 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev) int retry_count; ifsd = ifs_get_data(dev); + msrs = ifs_get_test_msrs(dev); if (need_copy_scan_hashes(ifsd)) { - wrmsrl(MSR_COPY_SCAN_HASHES, ifs_hash_ptr); - rdmsrl(MSR_SCAN_HASHES_STATUS, hashes_status.data); + wrmsrl(msrs->copy_hashes, ifs_hash_ptr); + rdmsrl(msrs->copy_hashes_status, hashes_status.data); /* enumerate the scan image information */ chunk_size = hashes_status.chunk_size * SZ_1K; @@ -212,8 +216,8 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev) } if (ifsd->generation >= IFS_GEN_STRIDE_AWARE) { - wrmsrl(MSR_SAF_CTRL, INVALIDATE_STRIDE); - rdmsrl(MSR_CHUNKS_AUTHENTICATION_STATUS, chunk_status.data); + wrmsrl(msrs->test_ctrl, INVALIDATE_STRIDE); + rdmsrl(msrs->copy_chunks_status, chunk_status.data); if (chunk_status.valid_chunks != 0) { dev_err(dev, "Couldn't invalidate installed stride - %d\n", chunk_status.valid_chunks); @@ -234,9 +238,9 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev) chunk_table[1] = linear_addr; do { local_irq_disable(); - wrmsrl(MSR_AUTHENTICATE_AND_COPY_CHUNK, (u64)chunk_table); + wrmsrl(msrs->copy_chunks, (u64)chunk_table); local_irq_enable(); - rdmsrl(MSR_CHUNKS_AUTHENTICATION_STATUS, chunk_status.data); + rdmsrl(msrs->copy_chunks_status, chunk_status.data); err_code = chunk_status.error_code; } while (err_code == AUTH_INTERRUPTED_ERROR && --retry_count); -- Gitee From a97ebb6803b2817c0e9ad716f8cfcae81ff07a4f Mon Sep 17 00:00:00 2001 From: Jithu Joseph Date: Thu, 1 Aug 2024 05:18:12 +0000 Subject: [PATCH 1369/2138] platform/x86/intel/ifs: Add SBAF test image loading support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #10908 commit 0a3e4e94d137daacd5ec092365080eed847f8f01 upstream. Structural Based Functional Test at Field (SBAF) is a new type of testing that provides comprehensive core test coverage complementing existing IFS tests like Scan at Field (SAF) or ArrayBist. SBAF device will appear as a new device instance (intel_ifs_2) under /sys/devices/virtual/misc. The user interaction necessary to load the test image and test a particular core is the same as the existing scan test (intel_ifs_0). During the loading stage, the driver will look for a file named ff-mm-ss-.sbft in the /lib/firmware/intel/ifs_2 directory. The hardware interaction needed for loading the image is similar to SAF, with the only difference being the MSR addresses used. Reuse the SAF image loading code, passing the SBAF-specific MSR addresses via struct ifs_test_msrs in the driver device data. Unlike SAF, the SBAF test image chunks are further divided into smaller logical entities called bundles. Since the SBAF test is initiated per bundle, cache the maximum number of bundles in the current image, which is used for iterating through bundles during SBAF test execution. Intel-SIG: commit 0a3e4e94d137 platform/x86/intel/ifs: Add SBAF test image loading support Backport to support Intel IFS(In Field Scan) SBAF(Structural Based Functional Test at Field) Reviewed-by: Ashok Raj Reviewed-by: Tony Luck Reviewed-by: Ilpo Järvinen Signed-off-by: Jithu Joseph Co-developed-by: Kuppuswamy Sathyanarayanan Signed-off-by: Kuppuswamy Sathyanarayanan Link: https://lore.kernel.org/r/20240801051814.1935149-3-sathyanarayanan.kuppuswamy@linux.intel.com Signed-off-by: Hans de Goede [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3860 --- arch/x86/include/asm/msr-index.h | 2 ++ drivers/platform/x86/intel/ifs/core.c | 24 +++++++++++++++++ drivers/platform/x86/intel/ifs/ifs.h | 37 ++++++++++++++++++++++++++- drivers/platform/x86/intel/ifs/load.c | 16 +++++++++--- 4 files changed, 74 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index edb0f0a2c57b..d449f6dad529 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -248,6 +248,8 @@ #define MSR_INTEGRITY_CAPS_ARRAY_BIST BIT(MSR_INTEGRITY_CAPS_ARRAY_BIST_BIT) #define MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT 4 #define MSR_INTEGRITY_CAPS_PERIODIC_BIST BIT(MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT) +#define MSR_INTEGRITY_CAPS_SBAF_BIT 8 +#define MSR_INTEGRITY_CAPS_SBAF BIT(MSR_INTEGRITY_CAPS_SBAF_BIT) #define MSR_INTEGRITY_CAPS_SAF_GEN_MASK GENMASK_ULL(10, 9) #define MSR_LBR_NHM_FROM 0x00000680 diff --git a/drivers/platform/x86/intel/ifs/core.c b/drivers/platform/x86/intel/ifs/core.c index 1a7ca74abb61..4f571b79f028 100644 --- a/drivers/platform/x86/intel/ifs/core.c +++ b/drivers/platform/x86/intel/ifs/core.c @@ -33,6 +33,7 @@ bool *ifs_pkg_auth; static const struct ifs_test_caps scan_test = { .integrity_cap_bit = MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT, .test_num = IFS_TYPE_SAF, + .image_suffix = "scan", }; static const struct ifs_test_caps array_test = { @@ -48,6 +49,20 @@ static const struct ifs_test_msrs scan_msrs = { .test_ctrl = MSR_SAF_CTRL, }; +static const struct ifs_test_msrs sbaf_msrs = { + .copy_hashes = MSR_COPY_SBAF_HASHES, + .copy_hashes_status = MSR_SBAF_HASHES_STATUS, + .copy_chunks = MSR_AUTHENTICATE_AND_COPY_SBAF_CHUNK, + .copy_chunks_status = MSR_SBAF_CHUNKS_AUTHENTICATION_STATUS, + .test_ctrl = MSR_SBAF_CTRL, +}; + +static const struct ifs_test_caps sbaf_test = { + .integrity_cap_bit = MSR_INTEGRITY_CAPS_SBAF_BIT, + .test_num = IFS_TYPE_SBAF, + .image_suffix = "sbft", +}; + static struct ifs_device ifs_devices[] = { [IFS_TYPE_SAF] = { .test_caps = &scan_test, @@ -66,6 +81,15 @@ static struct ifs_device ifs_devices[] = { .groups = plat_ifs_array_groups, }, }, + [IFS_TYPE_SBAF] = { + .test_caps = &sbaf_test, + .test_msrs = &sbaf_msrs, + .misc = { + .name = "intel_ifs_2", + .minor = MISC_DYNAMIC_MINOR, + .groups = plat_ifs_groups, + }, + }, }; #define IFS_NUMTESTS ARRAY_SIZE(ifs_devices) diff --git a/drivers/platform/x86/intel/ifs/ifs.h b/drivers/platform/x86/intel/ifs/ifs.h index 738cbc7a5d00..600bb8a1b285 100644 --- a/drivers/platform/x86/intel/ifs/ifs.h +++ b/drivers/platform/x86/intel/ifs/ifs.h @@ -126,11 +126,38 @@ * The driver does not make use of this, it only tests one core at a time. * * .. [#f1] https://github.com/intel/TBD + * + * + * Structural Based Functional Test at Field (SBAF): + * ------------------------------------------------ + * + * SBAF is a new type of testing that provides comprehensive core test + * coverage complementing Scan at Field (SAF) testing. SBAF mimics the + * manufacturing screening environment and leverages the same test suite. + * It makes use of Design For Test (DFT) observation sites and features + * to maximize coverage in minimum time. + * + * Similar to the SAF test, SBAF isolates the core under test from the + * rest of the system during execution. Upon completion, the core + * seamlessly resets to its pre-test state and resumes normal operation. + * Any machine checks or hangs encountered during the test are confined to + * the isolated core, preventing disruption to the overall system. + * + * Like the SAF test, the SBAF test is also divided into multiple batches, + * and each batch test can take hundreds of milliseconds (100-200 ms) to + * complete. If such a lengthy interruption is undesirable, it is + * recommended to relocate the time-sensitive applications to other cores. */ #include #include #define MSR_ARRAY_BIST 0x00000105 + +#define MSR_COPY_SBAF_HASHES 0x000002b8 +#define MSR_SBAF_HASHES_STATUS 0x000002b9 +#define MSR_AUTHENTICATE_AND_COPY_SBAF_CHUNK 0x000002ba +#define MSR_SBAF_CHUNKS_AUTHENTICATION_STATUS 0x000002bb + #define MSR_COPY_SCAN_HASHES 0x000002c2 #define MSR_SCAN_HASHES_STATUS 0x000002c3 #define MSR_AUTHENTICATE_AND_COPY_CHUNK 0x000002c4 @@ -140,6 +167,7 @@ #define MSR_ARRAY_TRIGGER 0x000002d6 #define MSR_ARRAY_STATUS 0x000002d7 #define MSR_SAF_CTRL 0x000004f0 +#define MSR_SBAF_CTRL 0x000004f8 #define SCAN_NOT_TESTED 0 #define SCAN_TEST_PASS 1 @@ -147,6 +175,7 @@ #define IFS_TYPE_SAF 0 #define IFS_TYPE_ARRAY_BIST 1 +#define IFS_TYPE_SBAF 2 #define ARRAY_GEN0 0 #define ARRAY_GEN1 1 @@ -196,7 +225,8 @@ union ifs_chunks_auth_status_gen2 { u16 valid_chunks; u16 total_chunks; u32 error_code :8; - u32 rsvd2 :24; + u32 rsvd2 :8; + u32 max_bundle :16; }; }; @@ -261,9 +291,12 @@ union ifs_array { #define IFS_SW_TIMEOUT 0xFD #define IFS_SW_PARTIAL_COMPLETION 0xFE +#define IFS_SUFFIX_SZ 5 + struct ifs_test_caps { int integrity_cap_bit; int test_num; + char image_suffix[IFS_SUFFIX_SZ]; }; /** @@ -294,6 +327,7 @@ struct ifs_test_msrs { * @generation: IFS test generation enumerated by hardware * @chunk_size: size of a test chunk * @array_gen: test generation of array test + * @max_bundle: maximum bundle index */ struct ifs_data { int loaded_version; @@ -306,6 +340,7 @@ struct ifs_data { u32 generation; u32 chunk_size; u32 array_gen; + u32 max_bundle; }; struct ifs_work { diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c index ad0c107f0922..de54bd1a5970 100644 --- a/drivers/platform/x86/intel/ifs/load.c +++ b/drivers/platform/x86/intel/ifs/load.c @@ -261,20 +261,22 @@ static int copy_hashes_authenticate_chunks_gen2(struct device *dev) return -EIO; } ifsd->valid_chunks = valid_chunks; + ifsd->max_bundle = chunk_status.max_bundle; return 0; } static int validate_ifs_metadata(struct device *dev) { + const struct ifs_test_caps *test = ifs_get_test_caps(dev); struct ifs_data *ifsd = ifs_get_data(dev); union meta_data *ifs_meta; char test_file[64]; int ret = -EINVAL; - snprintf(test_file, sizeof(test_file), "%02x-%02x-%02x-%02x.scan", + snprintf(test_file, sizeof(test_file), "%02x-%02x-%02x-%02x.%s", boot_cpu_data.x86, boot_cpu_data.x86_model, - boot_cpu_data.x86_stepping, ifsd->cur_batch); + boot_cpu_data.x86_stepping, ifsd->cur_batch, test->image_suffix); ifs_meta = (union meta_data *)find_meta_data(ifs_header_ptr, META_TYPE_IFS); if (!ifs_meta) { @@ -304,6 +306,12 @@ static int validate_ifs_metadata(struct device *dev) return ret; } + if (ifs_meta->test_type != test->test_num) { + dev_warn(dev, "Metadata test_type %d mismatches with device type\n", + ifs_meta->test_type); + return ret; + } + return 0; } @@ -391,9 +399,9 @@ int ifs_load_firmware(struct device *dev) char scan_path[64]; int ret; - snprintf(scan_path, sizeof(scan_path), "intel/ifs_%d/%02x-%02x-%02x-%02x.scan", + snprintf(scan_path, sizeof(scan_path), "intel/ifs_%d/%02x-%02x-%02x-%02x.%s", test->test_num, boot_cpu_data.x86, boot_cpu_data.x86_model, - boot_cpu_data.x86_stepping, ifsd->cur_batch); + boot_cpu_data.x86_stepping, ifsd->cur_batch, test->image_suffix); ret = request_firmware_direct(&fw, scan_path, dev); if (ret) { -- Gitee From af650818c8b69ee3602cd53e2ada704e14dccbe1 Mon Sep 17 00:00:00 2001 From: Jithu Joseph Date: Thu, 1 Aug 2024 05:18:13 +0000 Subject: [PATCH 1370/2138] platform/x86/intel/ifs: Add SBAF test support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #10908 commit 3c4d06bd6e3713235fba5aa5eed9d1898239ec1f upstream. In a core, the SBAF test engine is shared between sibling CPUs. An SBAF test image contains multiple bundles. Each bundle is further composed of subunits called programs. When a SBAF test (for a particular core) is triggered by the user, each SBAF bundle from the loaded test image is executed sequentially on all the threads on the core using the stop_core_cpuslocked mechanism. Each bundle execution is initiated by writing to MSR_ACTIVATE_SBAF. SBAF test bundle execution may be aborted when an interrupt occurs or if the CPU does not have enough power budget for the test. In these cases the kernel restarts the test from the aborted bundle. SBAF execution is not retried if the test fails or if the test makes no forward progress after 5 retries. Intel-SIG: commit 3c4d06bd6e37 platform/x86/intel/ifs: Add SBAF test support Backport to support Intel IFS(In Field Scan) SBAF(Structural Based Functional Test at Field) Reviewed-by: Ashok Raj Reviewed-by: Tony Luck Reviewed-by: Ilpo Järvinen Signed-off-by: Jithu Joseph Signed-off-by: Kuppuswamy Sathyanarayanan Link: https://lore.kernel.org/r/20240801051814.1935149-4-sathyanarayanan.kuppuswamy@linux.intel.com Signed-off-by: Hans de Goede [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3860 --- drivers/platform/x86/intel/ifs/ifs.h | 30 +++ drivers/platform/x86/intel/ifs/runtest.c | 232 +++++++++++++++++++++++ 2 files changed, 262 insertions(+) diff --git a/drivers/platform/x86/intel/ifs/ifs.h b/drivers/platform/x86/intel/ifs/ifs.h index 600bb8a1b285..b261be46bce8 100644 --- a/drivers/platform/x86/intel/ifs/ifs.h +++ b/drivers/platform/x86/intel/ifs/ifs.h @@ -157,6 +157,8 @@ #define MSR_SBAF_HASHES_STATUS 0x000002b9 #define MSR_AUTHENTICATE_AND_COPY_SBAF_CHUNK 0x000002ba #define MSR_SBAF_CHUNKS_AUTHENTICATION_STATUS 0x000002bb +#define MSR_ACTIVATE_SBAF 0x000002bc +#define MSR_SBAF_STATUS 0x000002bd #define MSR_COPY_SCAN_HASHES 0x000002c2 #define MSR_SCAN_HASHES_STATUS 0x000002c3 @@ -283,6 +285,34 @@ union ifs_array { }; }; +/* MSR_ACTIVATE_SBAF bit fields */ +union ifs_sbaf { + u64 data; + struct { + u32 bundle_idx :9; + u32 rsvd1 :5; + u32 pgm_idx :2; + u32 rsvd2 :16; + u32 delay :31; + u32 sigmce :1; + }; +}; + +/* MSR_SBAF_STATUS bit fields */ +union ifs_sbaf_status { + u64 data; + struct { + u32 bundle_idx :9; + u32 rsvd1 :5; + u32 pgm_idx :2; + u32 rsvd2 :16; + u32 error_code :8; + u32 rsvd3 :21; + u32 test_fail :1; + u32 sbaf_status :2; + }; +}; + /* * Driver populated error-codes * 0xFD: Test timed out before completing all the chunks. diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c index be3d51ed0e47..3e1cdde80a4f 100644 --- a/drivers/platform/x86/intel/ifs/runtest.c +++ b/drivers/platform/x86/intel/ifs/runtest.c @@ -29,6 +29,13 @@ struct run_params { union ifs_status status; }; +struct sbaf_run_params { + struct ifs_data *ifsd; + int *retry_cnt; + union ifs_sbaf *activate; + union ifs_sbaf_status status; +}; + /* * Number of TSC cycles that a logical CPU will wait for the other * logical CPU on the core in the WRMSR(ACTIVATE_SCAN). @@ -146,6 +153,7 @@ static bool can_restart(union ifs_status status) #define SPINUNIT 100 /* 100 nsec */ static atomic_t array_cpus_in; static atomic_t scan_cpus_in; +static atomic_t sbaf_cpus_in; /* * Simplified cpu sibling rendezvous loop based on microcode loader __wait_for_cpus() @@ -387,6 +395,224 @@ static void ifs_array_test_gen1(int cpu, struct device *dev) ifsd->status = SCAN_TEST_PASS; } +#define SBAF_STATUS_PASS 0 +#define SBAF_STATUS_SIGN_FAIL 1 +#define SBAF_STATUS_INTR 2 +#define SBAF_STATUS_TEST_FAIL 3 + +enum sbaf_status_err_code { + IFS_SBAF_NO_ERROR = 0, + IFS_SBAF_OTHER_THREAD_COULD_NOT_JOIN = 1, + IFS_SBAF_INTERRUPTED_BEFORE_RENDEZVOUS = 2, + IFS_SBAF_UNASSIGNED_ERROR_CODE3 = 3, + IFS_SBAF_INVALID_BUNDLE_INDEX = 4, + IFS_SBAF_MISMATCH_ARGS_BETWEEN_THREADS = 5, + IFS_SBAF_CORE_NOT_CAPABLE_CURRENTLY = 6, + IFS_SBAF_UNASSIGNED_ERROR_CODE7 = 7, + IFS_SBAF_EXCEED_NUMBER_OF_THREADS_CONCURRENT = 8, + IFS_SBAF_INTERRUPTED_DURING_EXECUTION = 9, + IFS_SBAF_INVALID_PROGRAM_INDEX = 0xA, + IFS_SBAF_CORRUPTED_CHUNK = 0xB, + IFS_SBAF_DID_NOT_START = 0xC, +}; + +static const char * const sbaf_test_status[] = { + [IFS_SBAF_NO_ERROR] = "SBAF no error", + [IFS_SBAF_OTHER_THREAD_COULD_NOT_JOIN] = "Other thread could not join.", + [IFS_SBAF_INTERRUPTED_BEFORE_RENDEZVOUS] = "Interrupt occurred prior to SBAF coordination.", + [IFS_SBAF_UNASSIGNED_ERROR_CODE3] = "Unassigned error code 0x3", + [IFS_SBAF_INVALID_BUNDLE_INDEX] = "Non-valid sbaf bundles. Reload test image", + [IFS_SBAF_MISMATCH_ARGS_BETWEEN_THREADS] = "Mismatch in arguments between threads T0/T1.", + [IFS_SBAF_CORE_NOT_CAPABLE_CURRENTLY] = "Core not capable of performing SBAF currently", + [IFS_SBAF_UNASSIGNED_ERROR_CODE7] = "Unassigned error code 0x7", + [IFS_SBAF_EXCEED_NUMBER_OF_THREADS_CONCURRENT] = "Exceeded number of Logical Processors (LP) allowed to run Scan-At-Field concurrently", + [IFS_SBAF_INTERRUPTED_DURING_EXECUTION] = "Interrupt occurred prior to SBAF start", + [IFS_SBAF_INVALID_PROGRAM_INDEX] = "SBAF program index not valid", + [IFS_SBAF_CORRUPTED_CHUNK] = "SBAF operation aborted due to corrupted chunk", + [IFS_SBAF_DID_NOT_START] = "SBAF operation did not start", +}; + +static void sbaf_message_not_tested(struct device *dev, int cpu, u64 status_data) +{ + union ifs_sbaf_status status = (union ifs_sbaf_status)status_data; + + if (status.error_code < ARRAY_SIZE(sbaf_test_status)) { + dev_info(dev, "CPU(s) %*pbl: SBAF operation did not start. %s\n", + cpumask_pr_args(cpu_smt_mask(cpu)), + sbaf_test_status[status.error_code]); + } else if (status.error_code == IFS_SW_TIMEOUT) { + dev_info(dev, "CPU(s) %*pbl: software timeout during scan\n", + cpumask_pr_args(cpu_smt_mask(cpu))); + } else if (status.error_code == IFS_SW_PARTIAL_COMPLETION) { + dev_info(dev, "CPU(s) %*pbl: %s\n", + cpumask_pr_args(cpu_smt_mask(cpu)), + "Not all SBAF bundles executed. Maximum forward progress retries exceeded"); + } else { + dev_info(dev, "CPU(s) %*pbl: SBAF unknown status %llx\n", + cpumask_pr_args(cpu_smt_mask(cpu)), status.data); + } +} + +static void sbaf_message_fail(struct device *dev, int cpu, union ifs_sbaf_status status) +{ + /* Failed signature check is set when SBAF signature did not match the expected value */ + if (status.sbaf_status == SBAF_STATUS_SIGN_FAIL) { + dev_err(dev, "CPU(s) %*pbl: Failed signature check\n", + cpumask_pr_args(cpu_smt_mask(cpu))); + } + + /* Failed to reach end of test */ + if (status.sbaf_status == SBAF_STATUS_TEST_FAIL) { + dev_err(dev, "CPU(s) %*pbl: Failed to complete test\n", + cpumask_pr_args(cpu_smt_mask(cpu))); + } +} + +static bool sbaf_bundle_completed(union ifs_sbaf_status status) +{ + return !(status.sbaf_status || status.error_code); +} + +static bool sbaf_can_restart(union ifs_sbaf_status status) +{ + enum sbaf_status_err_code err_code = status.error_code; + + /* Signature for chunk is bad, or scan test failed */ + if (status.sbaf_status == SBAF_STATUS_SIGN_FAIL || + status.sbaf_status == SBAF_STATUS_TEST_FAIL) + return false; + + switch (err_code) { + case IFS_SBAF_NO_ERROR: + case IFS_SBAF_OTHER_THREAD_COULD_NOT_JOIN: + case IFS_SBAF_INTERRUPTED_BEFORE_RENDEZVOUS: + case IFS_SBAF_EXCEED_NUMBER_OF_THREADS_CONCURRENT: + case IFS_SBAF_INTERRUPTED_DURING_EXECUTION: + return true; + case IFS_SBAF_UNASSIGNED_ERROR_CODE3: + case IFS_SBAF_INVALID_BUNDLE_INDEX: + case IFS_SBAF_MISMATCH_ARGS_BETWEEN_THREADS: + case IFS_SBAF_CORE_NOT_CAPABLE_CURRENTLY: + case IFS_SBAF_UNASSIGNED_ERROR_CODE7: + case IFS_SBAF_INVALID_PROGRAM_INDEX: + case IFS_SBAF_CORRUPTED_CHUNK: + case IFS_SBAF_DID_NOT_START: + break; + } + return false; +} + +/* + * Execute the SBAF test. Called "simultaneously" on all threads of a core + * at high priority using the stop_cpus mechanism. + */ +static int dosbaf(void *data) +{ + struct sbaf_run_params *run_params = data; + int cpu = smp_processor_id(); + union ifs_sbaf_status status; + struct ifs_data *ifsd; + int first; + + ifsd = run_params->ifsd; + + /* Only the first logical CPU on a core reports result */ + first = cpumask_first(cpu_smt_mask(cpu)); + wait_for_sibling_cpu(&sbaf_cpus_in, NSEC_PER_SEC); + + /* + * This WRMSR will wait for other HT threads to also write + * to this MSR (at most for activate.delay cycles). Then it + * starts scan of each requested bundle. The core test happens + * during the "execution" of the WRMSR. + */ + wrmsrl(MSR_ACTIVATE_SBAF, run_params->activate->data); + rdmsrl(MSR_SBAF_STATUS, status.data); + + /* Pass back the result of the test */ + if (cpu == first) + run_params->status = status; + + return 0; +} + +static void ifs_sbaf_test_core(int cpu, struct device *dev) +{ + struct sbaf_run_params run_params; + union ifs_sbaf_status status = {}; + union ifs_sbaf activate; + unsigned long timeout; + struct ifs_data *ifsd; + int stop_bundle; + int retries; + + ifsd = ifs_get_data(dev); + + activate.data = 0; + activate.delay = IFS_THREAD_WAIT; + + timeout = jiffies + 2 * HZ; + retries = MAX_IFS_RETRIES; + activate.bundle_idx = 0; + stop_bundle = ifsd->max_bundle; + + while (activate.bundle_idx <= stop_bundle) { + if (time_after(jiffies, timeout)) { + status.error_code = IFS_SW_TIMEOUT; + break; + } + + atomic_set(&sbaf_cpus_in, 0); + + run_params.ifsd = ifsd; + run_params.activate = &activate; + run_params.retry_cnt = &retries; + stop_core_cpuslocked(cpu, dosbaf, &run_params); + + status = run_params.status; + + if (sbaf_bundle_completed(status)) { + activate.bundle_idx = status.bundle_idx + 1; + activate.pgm_idx = 0; + retries = MAX_IFS_RETRIES; + continue; + } + + /* Some cases can be retried, give up for others */ + if (!sbaf_can_restart(status)) + break; + + if (status.pgm_idx == activate.pgm_idx) { + /* If no progress retry */ + if (--retries == 0) { + if (status.error_code == IFS_NO_ERROR) + status.error_code = IFS_SW_PARTIAL_COMPLETION; + break; + } + } else { + /* if some progress, more pgms remaining in bundle, reset retries */ + retries = MAX_IFS_RETRIES; + activate.bundle_idx = status.bundle_idx; + activate.pgm_idx = status.pgm_idx; + } + } + + /* Update status for this core */ + ifsd->scan_details = status.data; + + if (status.sbaf_status == SBAF_STATUS_SIGN_FAIL || + status.sbaf_status == SBAF_STATUS_TEST_FAIL) { + ifsd->status = SCAN_TEST_FAIL; + sbaf_message_fail(dev, cpu, status); + } else if (status.error_code || status.sbaf_status == SBAF_STATUS_INTR || + (activate.bundle_idx < stop_bundle)) { + ifsd->status = SCAN_NOT_TESTED; + sbaf_message_not_tested(dev, cpu, status.data); + } else { + ifsd->status = SCAN_TEST_PASS; + } +} + /* * Initiate per core test. It wakes up work queue threads on the target cpu and * its sibling cpu. Once all sibling threads wake up, the scan test gets executed and @@ -420,6 +646,12 @@ int do_core_test(int cpu, struct device *dev) else ifs_array_test_gen1(cpu, dev); break; + case IFS_TYPE_SBAF: + if (!ifsd->loaded) + ret = -EPERM; + else + ifs_sbaf_test_core(cpu, dev); + break; default: ret = -EINVAL; } -- Gitee From c0b6cdabd0953e84416e6d4bd0c37173cc4c936f Mon Sep 17 00:00:00 2001 From: Jithu Joseph Date: Thu, 1 Aug 2024 05:18:14 +0000 Subject: [PATCH 1371/2138] trace: platform/x86/intel/ifs: Add SBAF trace support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #10908 commit 61b74964536e86445d43acff5cff6ad907ba9321 upstream. Add tracing support for the SBAF IFS tests, which may be useful for debugging systems that fail these tests. Log details like test content batch number, SBAF bundle ID, program index and the exact errors or warnings encountered by each HT thread during the test. Intel-SIG: commit 61b74964536e trace: platform/x86/intel/ifs: Add SBAF trace support Backport to support Intel IFS(In Field Scan) SBAF(Structural Based Functional Test at Field) Reviewed-by: Ashok Raj Reviewed-by: Tony Luck Reviewed-by: Ilpo Järvinen Reviewed-by: Steven Rostedt (Google) Signed-off-by: Jithu Joseph Signed-off-by: Kuppuswamy Sathyanarayanan Link: https://lore.kernel.org/r/20240801051814.1935149-5-sathyanarayanan.kuppuswamy@linux.intel.com Signed-off-by: Hans de Goede [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3860 --- drivers/platform/x86/intel/ifs/runtest.c | 1 + include/trace/events/intel_ifs.h | 27 ++++++++++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c index 3e1cdde80a4f..f978dd05d4d8 100644 --- a/drivers/platform/x86/intel/ifs/runtest.c +++ b/drivers/platform/x86/intel/ifs/runtest.c @@ -528,6 +528,7 @@ static int dosbaf(void *data) */ wrmsrl(MSR_ACTIVATE_SBAF, run_params->activate->data); rdmsrl(MSR_SBAF_STATUS, status.data); + trace_ifs_sbaf(ifsd->cur_batch, *run_params->activate, status); /* Pass back the result of the test */ if (cpu == first) diff --git a/include/trace/events/intel_ifs.h b/include/trace/events/intel_ifs.h index 0d88ebf2c980..70323acde1de 100644 --- a/include/trace/events/intel_ifs.h +++ b/include/trace/events/intel_ifs.h @@ -35,6 +35,33 @@ TRACE_EVENT(ifs_status, __entry->status) ); +TRACE_EVENT(ifs_sbaf, + + TP_PROTO(int batch, union ifs_sbaf activate, union ifs_sbaf_status status), + + TP_ARGS(batch, activate, status), + + TP_STRUCT__entry( + __field( u64, status ) + __field( int, batch ) + __field( u16, bundle ) + __field( u16, pgm ) + ), + + TP_fast_assign( + __entry->status = status.data; + __entry->batch = batch; + __entry->bundle = activate.bundle_idx; + __entry->pgm = activate.pgm_idx; + ), + + TP_printk("batch: 0x%.2x, bundle_idx: 0x%.4x, pgm_idx: 0x%.4x, status: 0x%.16llx", + __entry->batch, + __entry->bundle, + __entry->pgm, + __entry->status) +); + #endif /* _TRACE_IFS_H */ /* This part must be outside protection */ -- Gitee From 46523f9221ae8bd681d96afa23d69a24999bbfd8 Mon Sep 17 00:00:00 2001 From: Kuppuswamy Sathyanarayanan Date: Fri, 23 Aug 2024 18:43:37 +0000 Subject: [PATCH 1372/2138] platform/x86/intel/ifs: Fix SBAF title underline length ANBZ: #10908 commit 1e701372d7ac1939d5f8a1dc8172de00192394a8 upstream. In commit # 0a3e4e94d137 ("platform/x86/intel/ifs: Add SBAF test image loading support"), the documentation for "Structural Based Functional Test at Field (SBAF)" had an incomplete underline. This resulted in the following build warning: Documentation/arch/x86/ifs:2: drivers/platform/x86/intel/ifs/ifs.h:131: WARNING: Title underline too short. Fix it by extending the dotted lines to match the length of the title. Intel-SIG: commit 1e701372d7ac platform/x86/intel/ifs: Fix SBAF title underline length Backport to support Intel IFS(In Field Scan) SBAF(Structural Based Functional Test at Field) Fixes: 0a3e4e94d137 ("platform/x86/intel/ifs: Add SBAF test image loading support") Reported-by: Stephen Rothwell Closes: https://lore.kernel.org/lkml/20240820134354.2aec355d@canb.auug.org.au/T/#u Signed-off-by: Kuppuswamy Sathyanarayanan Reviewed-by: Jithu Joseph Link: https://lore.kernel.org/r/20240823184337.2923179-1-sathyanarayanan.kuppuswamy@linux.intel.com Signed-off-by: Hans de Goede [ Aichun Shi: amend commit log ] Signed-off-by: Aichun Shi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3860 --- drivers/platform/x86/intel/ifs/ifs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/platform/x86/intel/ifs/ifs.h b/drivers/platform/x86/intel/ifs/ifs.h index b261be46bce8..5c3c0dfa1bf8 100644 --- a/drivers/platform/x86/intel/ifs/ifs.h +++ b/drivers/platform/x86/intel/ifs/ifs.h @@ -129,7 +129,7 @@ * * * Structural Based Functional Test at Field (SBAF): - * ------------------------------------------------ + * ------------------------------------------------- * * SBAF is a new type of testing that provides comprehensive core test * coverage complementing Scan at Field (SAF) testing. SBAF mimics the -- Gitee From e1924cbb6039d0658772cc01b1daf1b07c380126 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 6 Aug 2024 19:03:10 +0300 Subject: [PATCH 1373/2138] intel_idle: add Granite Rapids Xeon support ANBZ: #9700 commit 370406bf5738dade8ac95a2ee95c29299d4ac902 upstream. Add Granite Rapids Xeon C-states, which are C1, C1E, C6, and C6P. Comparing to previous Xeon Generations (e.g., Emerald Rapids), C6 requests end up only in core C6 state, and no package C-state promotion takes place even if all cores in the package are in core C6. C6P requests also end up in core C6, but if all cores have requested C6P, the SoC will enter the package C6 state. Intel-SIG: commit 370406bf5738 intel_idle: add Granite Rapids Xeon support. Backport intel_idle GNR support Signed-off-by: Artem Bityutskiy Link: https://patch.msgid.link/20240806160310.3719205-1-artem.bityutskiy@linux.intel.com [ rjw: Changelog edits ] Signed-off-by: Rafael J. Wysocki [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3865 --- drivers/idle/intel_idle.c | 46 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 670a041eb910..cd6100f10581 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -993,6 +993,45 @@ static struct cpuidle_state spr_cstates[] __initdata = { .enter = NULL } }; +static struct cpuidle_state gnr_cstates[] __initdata = { + { + .name = "C1", + .desc = "MWAIT 0x00", + .flags = MWAIT2flg(0x00), + .exit_latency = 1, + .target_residency = 1, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C1E", + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, + .exit_latency = 4, + .target_residency = 4, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6", + .desc = "MWAIT 0x20", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | + CPUIDLE_FLAG_INIT_XSTATE, + .exit_latency = 170, + .target_residency = 650, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6P", + .desc = "MWAIT 0x21", + .flags = MWAIT2flg(0x21) | CPUIDLE_FLAG_TLB_FLUSHED | + CPUIDLE_FLAG_INIT_XSTATE, + .exit_latency = 210, + .target_residency = 1000, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .enter = NULL } +}; + static struct cpuidle_state atom_cstates[] __initdata = { { .name = "C1E", @@ -1391,6 +1430,12 @@ static const struct idle_cpu idle_cpu_spr __initconst = { .use_acpi = true, }; +static const struct idle_cpu idle_cpu_gnr __initconst = { + .state_table = gnr_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct idle_cpu idle_cpu_avn __initconst = { .state_table = avn_cstates, .disable_promotion_to_c1e = true, @@ -1464,6 +1509,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &idle_cpu_gmt), X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &idle_cpu_spr), X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &idle_cpu_spr), + X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, &idle_cpu_gnr), X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &idle_cpu_knl), X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &idle_cpu_knl), X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &idle_cpu_bxt), -- Gitee From c442b704dc8417e3130479465df12c883c77fb0f Mon Sep 17 00:00:00 2001 From: Jiawen Wu Date: Wed, 3 Jan 2024 10:08:53 +0800 Subject: [PATCH 1374/2138] net: wangxun: add ethtool_ops for channel number ANBZ: #9013 commit 937d46ecc5f941b26270bdf7ce37495f12b25955 upstream. Add support to get RX/TX queue number with ethtool -l, and set RX/TX queue number with ethtool -L. Since interrupts need to be rescheduled, adjust the allocation of msix enties. Signed-off-by: Duanqiang Wen Signed-off-by: Jiawen Wu Link: https://lore.kernel.org/all/20240103020854.1656604-8-jiawenwu@trustnetic.com Signed-off-by: David S. Miller Link: https://gitee.com/anolis/cloud-kernel/pulls/3172 --- .../net/ethernet/wangxun/libwx/wx_ethtool.c | 57 ++++++++++ .../net/ethernet/wangxun/libwx/wx_ethtool.h | 4 + drivers/net/ethernet/wangxun/libwx/wx_hw.c | 103 +++++++++++++++++- drivers/net/ethernet/wangxun/libwx/wx_lib.c | 86 ++++++++++----- drivers/net/ethernet/wangxun/libwx/wx_type.h | 31 +++++- .../net/ethernet/wangxun/ngbe/ngbe_ethtool.c | 15 +++ drivers/net/ethernet/wangxun/ngbe/ngbe_main.c | 69 +++++++----- drivers/net/ethernet/wangxun/ngbe/ngbe_type.h | 4 +- .../ethernet/wangxun/txgbe/txgbe_ethtool.c | 15 +++ .../net/ethernet/wangxun/txgbe/txgbe_main.c | 46 +++++++- .../net/ethernet/wangxun/txgbe/txgbe_phy.c | 12 +- .../net/ethernet/wangxun/txgbe/txgbe_type.h | 6 +- 12 files changed, 380 insertions(+), 68 deletions(-) diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c index f3c7e19dff5c..152049600148 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c @@ -364,3 +364,60 @@ void wx_set_msglevel(struct net_device *netdev, u32 data) wx->msg_enable = data; } EXPORT_SYMBOL(wx_set_msglevel); + +static unsigned int wx_max_channels(struct wx *wx) +{ + unsigned int max_combined; + + if (!wx->msix_q_entries) { + /* We only support one q_vector without MSI-X */ + max_combined = 1; + } else { + /* support up to max allowed queues with RSS */ + if (wx->mac.type == wx_mac_sp) + max_combined = 63; + else + max_combined = 8; + } + + return max_combined; +} + +void wx_get_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + struct wx *wx = netdev_priv(dev); + + /* report maximum channels */ + ch->max_combined = wx_max_channels(wx); + + /* report info for other vector */ + if (wx->msix_q_entries) { + ch->max_other = 1; + ch->other_count = 1; + } + + /* record RSS queues */ + ch->combined_count = wx->ring_feature[RING_F_RSS].indices; +} +EXPORT_SYMBOL(wx_get_channels); + +int wx_set_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + unsigned int count = ch->combined_count; + struct wx *wx = netdev_priv(dev); + + /* verify other_count has not changed */ + if (ch->other_count != 1) + return -EINVAL; + + /* verify the number of channels does not exceed hardware limits */ + if (count > wx_max_channels(wx)) + return -EINVAL; + + wx->ring_feature[RING_F_RSS].limit = count; + + return 0; +} +EXPORT_SYMBOL(wx_set_channels); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h index d79157532d3d..fee7260384ef 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h @@ -36,4 +36,8 @@ int wx_set_coalesce(struct net_device *netdev, struct netlink_ext_ack *extack); u32 wx_get_msglevel(struct net_device *netdev); void wx_set_msglevel(struct net_device *netdev, u32 data); +void wx_get_channels(struct net_device *dev, + struct ethtool_channels *ch); +int wx_set_channels(struct net_device *dev, + struct ethtool_channels *ch); #endif /* _WX_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index d11f7d8db194..1db754615cca 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -149,9 +149,9 @@ void wx_irq_disable(struct wx *wx) int vector; for (vector = 0; vector < wx->num_q_vectors; vector++) - synchronize_irq(wx->msix_entries[vector].vector); + synchronize_irq(wx->msix_q_entries[vector].vector); - synchronize_irq(wx->msix_entries[vector].vector); + synchronize_irq(wx->msix_entry->vector); } else { synchronize_irq(pdev->irq); } @@ -1597,6 +1597,72 @@ static void wx_restore_vlan(struct wx *wx) wx_vlan_rx_add_vid(wx->netdev, htons(ETH_P_8021Q), vid); } +static void wx_store_reta(struct wx *wx) +{ + u8 *indir_tbl = wx->rss_indir_tbl; + u32 reta = 0; + u32 i; + + /* Fill out the redirection table as follows: + * - 8 bit wide entries containing 4 bit RSS index + */ + for (i = 0; i < WX_MAX_RETA_ENTRIES; i++) { + reta |= indir_tbl[i] << (i & 0x3) * 8; + if ((i & 3) == 3) { + wr32(wx, WX_RDB_RSSTBL(i >> 2), reta); + reta = 0; + } + } +} + +static void wx_setup_reta(struct wx *wx) +{ + u16 rss_i = wx->ring_feature[RING_F_RSS].indices; + u32 random_key_size = WX_RSS_KEY_SIZE / 4; + u32 i, j; + + /* Fill out hash function seeds */ + for (i = 0; i < random_key_size; i++) + wr32(wx, WX_RDB_RSSRK(i), wx->rss_key[i]); + + /* Fill out redirection table */ + memset(wx->rss_indir_tbl, 0, sizeof(wx->rss_indir_tbl)); + + for (i = 0, j = 0; i < WX_MAX_RETA_ENTRIES; i++, j++) { + if (j == rss_i) + j = 0; + + wx->rss_indir_tbl[i] = j; + } + + wx_store_reta(wx); +} + +static void wx_setup_mrqc(struct wx *wx) +{ + u32 rss_field = 0; + + /* Disable indicating checksum in descriptor, enables RSS hash */ + wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_PCSD, WX_PSR_CTL_PCSD); + + /* Perform hash on these packet types */ + rss_field = WX_RDB_RA_CTL_RSS_IPV4 | + WX_RDB_RA_CTL_RSS_IPV4_TCP | + WX_RDB_RA_CTL_RSS_IPV4_UDP | + WX_RDB_RA_CTL_RSS_IPV6 | + WX_RDB_RA_CTL_RSS_IPV6_TCP | + WX_RDB_RA_CTL_RSS_IPV6_UDP; + + netdev_rss_key_fill(wx->rss_key, sizeof(wx->rss_key)); + + wx_setup_reta(wx); + + if (wx->rss_enabled) + rss_field |= WX_RDB_RA_CTL_RSS_EN; + + wr32(wx, WX_RDB_RA_CTL, rss_field); +} + /** * wx_configure_rx - Configure Receive Unit after Reset * @wx: pointer to private structure @@ -1629,6 +1695,8 @@ void wx_configure_rx(struct wx *wx) wr32(wx, WX_PSR_CTL, psrctl); } + wx_setup_mrqc(wx); + /* set_rx_buffer_len must be called before ring initialization */ wx_set_rx_buffer_len(wx); @@ -1826,6 +1894,28 @@ int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count) } EXPORT_SYMBOL(wx_get_pcie_msix_counts); +/** + * wx_init_rss_key - Initialize wx RSS key + * @wx: device handle + * + * Allocates and initializes the RSS key if it is not allocated. + **/ +static int wx_init_rss_key(struct wx *wx) +{ + u32 *rss_key; + + if (!wx->rss_key) { + rss_key = kzalloc(WX_RSS_KEY_SIZE, GFP_KERNEL); + if (unlikely(!rss_key)) + return -ENOMEM; + + netdev_rss_key_fill(rss_key, WX_RSS_KEY_SIZE); + wx->rss_key = rss_key; + } + + return 0; +} + int wx_sw_init(struct wx *wx) { struct pci_dev *pdev = wx->pdev; @@ -1853,14 +1943,23 @@ int wx_sw_init(struct wx *wx) wx->subsystem_device_id = swab16((u16)ssid); } + err = wx_init_rss_key(wx); + if (err < 0) { + wx_err(wx, "rss key allocation failed\n"); + return err; + } + wx->mac_table = kcalloc(wx->mac.num_rar_entries, sizeof(struct wx_mac_addr), GFP_KERNEL); if (!wx->mac_table) { wx_err(wx, "mac_table allocation failed\n"); + kfree(wx->rss_key); return -ENOMEM; } + wx->msix_in_use = false; + return 0; } EXPORT_SYMBOL(wx_sw_init); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index 9ebe5493b7f2..59ac53185ab8 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -1568,8 +1568,14 @@ EXPORT_SYMBOL(wx_napi_disable_all); **/ static void wx_set_rss_queues(struct wx *wx) { - wx->num_rx_queues = wx->mac.max_rx_queues; - wx->num_tx_queues = wx->mac.max_tx_queues; + struct wx_ring_feature *f; + + /* set mask for 16 queue limit of RSS */ + f = &wx->ring_feature[RING_F_RSS]; + f->indices = f->limit; + + wx->num_rx_queues = f->limit; + wx->num_tx_queues = f->limit; } static void wx_set_num_queues(struct wx *wx) @@ -1595,35 +1601,51 @@ static int wx_acquire_msix_vectors(struct wx *wx) struct irq_affinity affd = {0, }; int nvecs, i; - nvecs = min_t(int, num_online_cpus(), wx->mac.max_msix_vectors); + /* We start by asking for one vector per queue pair */ + nvecs = max(wx->num_rx_queues, wx->num_tx_queues); + nvecs = min_t(int, nvecs, num_online_cpus()); + nvecs = min_t(int, nvecs, wx->mac.max_msix_vectors); - wx->msix_entries = kcalloc(nvecs, - sizeof(struct msix_entry), - GFP_KERNEL); - if (!wx->msix_entries) + wx->msix_q_entries = kcalloc(nvecs, sizeof(struct msix_entry), + GFP_KERNEL); + if (!wx->msix_q_entries) return -ENOMEM; + /* One for non-queue interrupts */ + nvecs += 1; + + if (!wx->msix_in_use) { + wx->msix_entry = kcalloc(1, sizeof(struct msix_entry), + GFP_KERNEL); + if (!wx->msix_entry) { + kfree(wx->msix_q_entries); + wx->msix_q_entries = NULL; + return -ENOMEM; + } + } + nvecs = pci_alloc_irq_vectors_affinity(wx->pdev, nvecs, nvecs, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &affd); if (nvecs < 0) { wx_err(wx, "Failed to allocate MSI-X interrupts. Err: %d\n", nvecs); - kfree(wx->msix_entries); - wx->msix_entries = NULL; + kfree(wx->msix_q_entries); + wx->msix_q_entries = NULL; + kfree(wx->msix_entry); + wx->msix_entry = NULL; return nvecs; } + wx->msix_entry->entry = 0; + wx->msix_entry->vector = pci_irq_vector(wx->pdev, 0); + nvecs -= 1; for (i = 0; i < nvecs; i++) { - wx->msix_entries[i].entry = i; - wx->msix_entries[i].vector = pci_irq_vector(wx->pdev, i); + wx->msix_q_entries[i].entry = i; + wx->msix_q_entries[i].vector = pci_irq_vector(wx->pdev, i + 1); } - /* one for msix_other */ - nvecs -= 1; wx->num_q_vectors = nvecs; - wx->num_rx_queues = nvecs; - wx->num_tx_queues = nvecs; return 0; } @@ -1645,9 +1667,11 @@ static int wx_set_interrupt_capability(struct wx *wx) if (ret == 0 || (ret == -ENOMEM)) return ret; - wx->num_rx_queues = 1; - wx->num_tx_queues = 1; - wx->num_q_vectors = 1; + /* Disable RSS */ + dev_warn(&wx->pdev->dev, "Disabling RSS support\n"); + wx->ring_feature[RING_F_RSS].limit = 1; + + wx_set_num_queues(wx); /* minmum one for queue, one for misc*/ nvecs = 1; @@ -1906,8 +1930,12 @@ void wx_reset_interrupt_capability(struct wx *wx) return; if (pdev->msix_enabled) { - kfree(wx->msix_entries); - wx->msix_entries = NULL; + kfree(wx->msix_q_entries); + wx->msix_q_entries = NULL; + if (!wx->msix_in_use) { + kfree(wx->msix_entry); + wx->msix_entry = NULL; + } } pci_free_irq_vectors(wx->pdev); } @@ -1979,7 +2007,7 @@ void wx_free_irq(struct wx *wx) for (vector = 0; vector < wx->num_q_vectors; vector++) { struct wx_q_vector *q_vector = wx->q_vector[vector]; - struct msix_entry *entry = &wx->msix_entries[vector]; + struct msix_entry *entry = &wx->msix_q_entries[vector]; /* free only the irqs that were actually requested */ if (!q_vector->rx.ring && !q_vector->tx.ring) @@ -1989,7 +2017,7 @@ void wx_free_irq(struct wx *wx) } if (wx->mac.type == wx_mac_em) - free_irq(wx->msix_entries[vector].vector, wx); + free_irq(wx->msix_entry->vector, wx); } EXPORT_SYMBOL(wx_free_irq); @@ -2066,6 +2094,7 @@ static void wx_set_ivar(struct wx *wx, s8 direction, wr32(wx, WX_PX_MISC_IVAR, ivar); } else { /* tx or rx causes */ + msix_vector += 1; /* offset for queue vectors */ msix_vector |= WX_PX_IVAR_ALLOC_VAL; index = ((16 * (queue & 1)) + (8 * direction)); ivar = rd32(wx, WX_PX_IVAR(queue >> 1)); @@ -2096,7 +2125,7 @@ void wx_write_eitr(struct wx_q_vector *q_vector) itr_reg |= WX_PX_ITR_CNT_WDIS; - wr32(wx, WX_PX_ITR(v_idx), itr_reg); + wr32(wx, WX_PX_ITR(v_idx + 1), itr_reg); } /** @@ -2142,9 +2171,9 @@ void wx_configure_vectors(struct wx *wx) wx_write_eitr(q_vector); } - wx_set_ivar(wx, -1, 0, v_idx); + wx_set_ivar(wx, -1, 0, 0); if (pdev->msix_enabled) - wr32(wx, WX_PX_ITR(v_idx), 1950); + wr32(wx, WX_PX_ITR(0), 1950); } EXPORT_SYMBOL(wx_configure_vectors); @@ -2657,11 +2686,14 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features) netdev_features_t changed = netdev->features ^ features; struct wx *wx = netdev_priv(netdev); - if (changed & NETIF_F_RXHASH) + if (features & NETIF_F_RXHASH) { wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN, WX_RDB_RA_CTL_RSS_EN); - else + wx->rss_enabled = true; + } else { wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN, 0); + wx->rss_enabled = false; + } netdev->features = features; diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 17cdffe388d0..b4dc4f341117 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -147,8 +147,16 @@ #define WX_RDB_PL_CFG_L2HDR BIT(3) #define WX_RDB_PL_CFG_TUN_TUNHDR BIT(4) #define WX_RDB_PL_CFG_TUN_OUTL2HDR BIT(5) +#define WX_RDB_RSSTBL(_i) (0x19400 + ((_i) * 4)) +#define WX_RDB_RSSRK(_i) (0x19480 + ((_i) * 4)) #define WX_RDB_RA_CTL 0x194F4 #define WX_RDB_RA_CTL_RSS_EN BIT(2) /* RSS Enable */ +#define WX_RDB_RA_CTL_RSS_IPV4_TCP BIT(16) +#define WX_RDB_RA_CTL_RSS_IPV4 BIT(17) +#define WX_RDB_RA_CTL_RSS_IPV6 BIT(20) +#define WX_RDB_RA_CTL_RSS_IPV6_TCP BIT(21) +#define WX_RDB_RA_CTL_RSS_IPV4_UDP BIT(22) +#define WX_RDB_RA_CTL_RSS_IPV6_UDP BIT(23) /******************************* PSR Registers *******************************/ /* psr control */ @@ -921,6 +929,19 @@ struct wx_q_vector { struct wx_ring ring[] ____cacheline_internodealigned_in_smp; }; +struct wx_ring_feature { + u16 limit; /* upper limit on feature indices */ + u16 indices; /* current value of indices */ + u16 mask; /* Mask used for feature to ring mapping */ + u16 offset; /* offset to start of feature */ +}; + +enum wx_ring_f_enum { + RING_F_NONE = 0, + RING_F_RSS, + RING_F_ARRAY_SIZE /* must be last in enum set */ +}; + enum wx_isb_idx { WX_ISB_HEADER, WX_ISB_MISC, @@ -1024,7 +1045,10 @@ struct wx { struct wx_q_vector *q_vector[64]; unsigned int queues_per_pool; - struct msix_entry *msix_entries; + struct msix_entry *msix_q_entries; + struct msix_entry *msix_entry; + bool msix_in_use; + struct wx_ring_feature ring_feature[RING_F_ARRAY_SIZE]; /* misc interrupt status block */ dma_addr_t isb_dma; @@ -1032,8 +1056,9 @@ struct wx { u32 isb_tag[WX_ISB_MAX]; #define WX_MAX_RETA_ENTRIES 128 +#define WX_RSS_INDIR_TBL_MAX 64 u8 rss_indir_tbl[WX_MAX_RETA_ENTRIES]; - + bool rss_enabled; #define WX_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ u32 *rss_key; u32 wol; @@ -1050,7 +1075,7 @@ struct wx { }; #define WX_INTR_ALL (~0ULL) -#define WX_INTR_Q(i) BIT(i) +#define WX_INTR_Q(i) BIT((i) + 1) /* register operations */ #define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c index 5800bd8c8696..cdf35733705f 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c @@ -92,6 +92,19 @@ static int ngbe_set_ringparam(struct net_device *netdev, return 0; } +static int ngbe_set_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + int err; + + err = wx_set_channels(dev, ch); + if (err < 0) + return err; + + /* use setup TC to update any traffic class queue mapping */ + return ngbe_setup_tc(dev, netdev_get_num_tc(dev)); +} + static const struct ethtool_ops ngbe_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ, @@ -115,6 +128,8 @@ static const struct ethtool_ops ngbe_ethtool_ops = { .set_coalesce = wx_set_coalesce, .get_msglevel = wx_get_msglevel, .set_msglevel = wx_set_msglevel, + .get_channels = wx_get_channels, + .set_channels = ngbe_set_channels, }; void ngbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index 96d80c595cb8..fdd6b4f70b7a 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -79,28 +79,6 @@ static void ngbe_init_type_code(struct wx *wx) } } -/** - * ngbe_init_rss_key - Initialize wx RSS key - * @wx: device handle - * - * Allocates and initializes the RSS key if it is not allocated. - **/ -static inline int ngbe_init_rss_key(struct wx *wx) -{ - u32 *rss_key; - - if (!wx->rss_key) { - rss_key = kzalloc(WX_RSS_KEY_SIZE, GFP_KERNEL); - if (unlikely(!rss_key)) - return -ENOMEM; - - netdev_rss_key_fill(rss_key, WX_RSS_KEY_SIZE); - wx->rss_key = rss_key; - } - - return 0; -} - /** * ngbe_sw_init - Initialize general software structures * @wx: board private structure to initialize @@ -134,8 +112,9 @@ static int ngbe_sw_init(struct wx *wx) dev_err(&pdev->dev, "Do not support MSI-X\n"); wx->mac.max_msix_vectors = msix_count; - if (ngbe_init_rss_key(wx)) - return -ENOMEM; + wx->ring_feature[RING_F_RSS].limit = min_t(int, NGBE_MAX_RSS_INDICES, + num_online_cpus()); + wx->rss_enabled = true; /* enable itr by default in dynamic mode */ wx->rx_itr_setting = 1; @@ -175,7 +154,7 @@ static void ngbe_irq_enable(struct wx *wx, bool queues) if (queues) wx_intr_enable(wx, NGBE_INTR_ALL); else - wx_intr_enable(wx, NGBE_INTR_MISC(wx)); + wx_intr_enable(wx, NGBE_INTR_MISC); } /** @@ -241,7 +220,7 @@ static int ngbe_request_msix_irqs(struct wx *wx) for (vector = 0; vector < wx->num_q_vectors; vector++) { struct wx_q_vector *q_vector = wx->q_vector[vector]; - struct msix_entry *entry = &wx->msix_entries[vector]; + struct msix_entry *entry = &wx->msix_q_entries[vector]; if (q_vector->tx.ring && q_vector->rx.ring) snprintf(q_vector->name, sizeof(q_vector->name) - 1, @@ -259,7 +238,7 @@ static int ngbe_request_msix_irqs(struct wx *wx) } } - err = request_irq(wx->msix_entries[vector].vector, + err = request_irq(wx->msix_entry->vector, ngbe_msix_other, 0, netdev->name, wx); if (err) { @@ -272,7 +251,7 @@ static int ngbe_request_msix_irqs(struct wx *wx) free_queue_irqs: while (vector) { vector--; - free_irq(wx->msix_entries[vector].vector, + free_irq(wx->msix_q_entries[vector].vector, wx->q_vector[vector]); } wx_reset_interrupt_capability(wx); @@ -480,6 +459,39 @@ static void ngbe_shutdown(struct pci_dev *pdev) } } +/** + * ngbe_setup_tc - routine to configure net_device for multiple traffic + * classes. + * + * @dev: net device to configure + * @tc: number of traffic classes to enable + */ +int ngbe_setup_tc(struct net_device *dev, u8 tc) +{ + struct wx *wx = netdev_priv(dev); + + /* Hardware has to reinitialize queues and interrupts to + * match packet buffer alignment. Unfortunately, the + * hardware is not flexible enough to do this dynamically. + */ + if (netif_running(dev)) + ngbe_close(dev); + + wx_clear_interrupt_scheme(wx); + + if (tc) + netdev_set_num_tc(dev, tc); + else + netdev_reset_tc(dev); + + wx_init_interrupt_scheme(wx); + + if (netif_running(dev)) + ngbe_open(dev); + + return 0; +} + static const struct net_device_ops ngbe_netdev_ops = { .ndo_open = ngbe_open, .ndo_stop = ngbe_close, @@ -715,6 +727,7 @@ static void ngbe_remove(struct pci_dev *pdev) pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); + kfree(wx->rss_key); kfree(wx->mac_table); wx_clear_interrupt_scheme(wx); diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h index 0a98080a197a..f48ed7fc1805 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h @@ -80,7 +80,7 @@ NGBE_PX_MISC_IEN_GPIO) #define NGBE_INTR_ALL 0x1FF -#define NGBE_INTR_MISC(A) BIT((A)->num_q_vectors) +#define NGBE_INTR_MISC BIT(0) #define NGBE_PHY_CONFIG(reg_offset) (0x14000 + ((reg_offset) * 4)) #define NGBE_CFG_LAN_SPEED 0x14440 @@ -105,6 +105,7 @@ #define NGBE_FW_CMD_ST_FAIL 0x70657376 #define NGBE_MAX_FDIR_INDICES 7 +#define NGBE_MAX_RSS_INDICES 8 #define NGBE_MAX_RX_QUEUES (NGBE_MAX_FDIR_INDICES + 1) #define NGBE_MAX_TX_QUEUES (NGBE_MAX_FDIR_INDICES + 1) @@ -132,5 +133,6 @@ extern char ngbe_driver_name[]; void ngbe_down(struct wx *wx); void ngbe_up(struct wx *wx); +int ngbe_setup_tc(struct net_device *dev, u8 tc); #endif /* _NGBE_TYPE_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index fa83cac320d3..084e2faf9db1 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -58,6 +58,19 @@ static int txgbe_set_ringparam(struct net_device *netdev, return 0; } +static int txgbe_set_channels(struct net_device *dev, + struct ethtool_channels *ch) +{ + int err; + + err = wx_set_channels(dev, ch); + if (err < 0) + return err; + + /* use setup TC to update any traffic class queue mapping */ + return txgbe_setup_tc(dev, netdev_get_num_tc(dev)); +} + static const struct ethtool_ops txgbe_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ, @@ -79,6 +92,8 @@ static const struct ethtool_ops txgbe_ethtool_ops = { .set_coalesce = wx_set_coalesce, .get_msglevel = wx_get_msglevel, .set_msglevel = wx_set_msglevel, + .get_channels = wx_get_channels, + .set_channels = txgbe_set_channels, }; void txgbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index bcc47bc6264a..3b151c410a5c 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -86,7 +86,7 @@ static void txgbe_irq_enable(struct wx *wx, bool queues) wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK); /* unmask interrupt */ - wx_intr_enable(wx, TXGBE_INTR_MISC(wx)); + wx_intr_enable(wx, TXGBE_INTR_MISC); if (queues) wx_intr_enable(wx, TXGBE_INTR_QALL(wx)); } @@ -145,7 +145,7 @@ static int txgbe_request_msix_irqs(struct wx *wx) for (vector = 0; vector < wx->num_q_vectors; vector++) { struct wx_q_vector *q_vector = wx->q_vector[vector]; - struct msix_entry *entry = &wx->msix_entries[vector]; + struct msix_entry *entry = &wx->msix_q_entries[vector]; if (q_vector->tx.ring && q_vector->rx.ring) snprintf(q_vector->name, sizeof(q_vector->name) - 1, @@ -168,7 +168,7 @@ static int txgbe_request_msix_irqs(struct wx *wx) free_queue_irqs: while (vector) { vector--; - free_irq(wx->msix_entries[vector].vector, + free_irq(wx->msix_q_entries[vector].vector, wx->q_vector[vector]); } wx_reset_interrupt_capability(wx); @@ -378,6 +378,10 @@ static int txgbe_sw_init(struct wx *wx) wx_err(wx, "Do not support MSI-X\n"); wx->mac.max_msix_vectors = msix_count; + wx->ring_feature[RING_F_RSS].limit = min_t(int, TXGBE_MAX_RSS_INDICES, + num_online_cpus()); + wx->rss_enabled = true; + /* enable itr by default in dynamic mode */ wx->rx_itr_setting = 1; wx->tx_itr_setting = 1; @@ -504,6 +508,41 @@ static void txgbe_shutdown(struct pci_dev *pdev) } } +/** + * txgbe_setup_tc - routine to configure net_device for multiple traffic + * classes. + * + * @dev: net device to configure + * @tc: number of traffic classes to enable + */ +int txgbe_setup_tc(struct net_device *dev, u8 tc) +{ + struct wx *wx = netdev_priv(dev); + + /* Hardware has to reinitialize queues and interrupts to + * match packet buffer alignment. Unfortunately, the + * hardware is not flexible enough to do this dynamically. + */ + if (netif_running(dev)) + txgbe_close(dev); + else + txgbe_reset(wx); + + wx_clear_interrupt_scheme(wx); + + if (tc) + netdev_set_num_tc(dev, tc); + else + netdev_reset_tc(dev); + + wx_init_interrupt_scheme(wx); + + if (netif_running(dev)) + txgbe_open(dev); + + return 0; +} + static const struct net_device_ops txgbe_netdev_ops = { .ndo_open = txgbe_open, .ndo_stop = txgbe_close, @@ -778,6 +817,7 @@ static void txgbe_remove(struct pci_dev *pdev) pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); + kfree(wx->rss_key); kfree(wx->mac_table); wx_clear_interrupt_scheme(wx); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c index b1b5cdc04a92..1b84d495d14e 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -487,7 +487,7 @@ static void txgbe_irq_handler(struct irq_desc *desc) } /* unmask interrupt */ - wx_intr_enable(wx, TXGBE_INTR_MISC(wx)); + wx_intr_enable(wx, TXGBE_INTR_MISC); } static int txgbe_gpio_init(struct txgbe *txgbe) @@ -531,7 +531,12 @@ static int txgbe_gpio_init(struct txgbe *txgbe) sizeof(*girq->parents), GFP_KERNEL); if (!girq->parents) return -ENOMEM; - girq->parents[0] = wx->msix_entries[wx->num_q_vectors].vector; + + /* now only suuported on MSI-X interrupt */ + if (!wx->msix_entry) + return -EPERM; + + girq->parents[0] = wx->msix_entry->vector; girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_bad_irq; @@ -749,6 +754,8 @@ int txgbe_init_phy(struct txgbe *txgbe) goto err_unregister_i2c; } + wx->msix_in_use = true; + return 0; err_unregister_i2c: @@ -781,4 +788,5 @@ void txgbe_remove_phy(struct txgbe *txgbe) phylink_destroy(txgbe->wx->phylink); xpcs_destroy(txgbe->xpcs); software_node_unregister_node_group(txgbe->nodes.group); + txgbe->wx->msix_in_use = false; } diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index 801fd0aed1ff..270a6fd9ad0b 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -98,6 +98,7 @@ #define TXGBE_MAX_MSIX_VECTORS 64 #define TXGBE_MAX_FDIR_INDICES 63 +#define TXGBE_MAX_RSS_INDICES 63 #define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) #define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) @@ -122,8 +123,8 @@ #define TXGBE_DEFAULT_RX_WORK 128 #endif -#define TXGBE_INTR_MISC(A) BIT((A)->num_q_vectors) -#define TXGBE_INTR_QALL(A) (TXGBE_INTR_MISC(A) - 1) +#define TXGBE_INTR_MISC BIT(0) +#define TXGBE_INTR_QALL(A) GENMASK((A)->num_q_vectors, 1) #define TXGBE_MAX_EITR GENMASK(11, 3) @@ -131,6 +132,7 @@ extern char txgbe_driver_name[]; void txgbe_down(struct wx *wx); void txgbe_up(struct wx *wx); +int txgbe_setup_tc(struct net_device *dev, u8 tc); #define NODE_PROP(_NAME, _PROP) \ (const struct software_node) { \ -- Gitee From 953e1c56cd9342801a898edfa72ed2a0bbd98e5b Mon Sep 17 00:00:00 2001 From: Liu Shixin Date: Sat, 31 Aug 2024 15:07:49 +0800 Subject: [PATCH 1375/2138] mm: shmem: Merge shmem_alloc_hugefolio() with shmem_alloc_folio() ANBZ: #9728 commit c7fcbe1041758d0dedc32502609a73a22884d7b8 openEuler. hulk inclusion category: cleanup bugzilla: https://gitee.com/openeuler/kernel/issues/IAIHPC -------------------------------- Commit 6f775463d002 ("mm: shmem: use folio_alloc_mpol() in shmem_alloc_folio()") merge shmem_alloc_hugefolio() with shmem_alloc_folio(). To avoid context conflicts in the subsequent patches, merge them. Dep-of: 3d95bc21cea5 ("mm: shmem: add THP validation for PMD-mapped THP related statistics") Signed-off-by: Liu Shixin [ cherry-picked from https://gitee.com/openeuler/kernel/tree/OLK-6.6/ ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3864 --- mm/shmem.c | 25 ++++++------------------- 1 file changed, 6 insertions(+), 19 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index 39b10a656bf8..a5705263d047 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1628,27 +1628,14 @@ static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp) return result; } -static struct folio *shmem_alloc_hugefolio(gfp_t gfp, +static struct folio *shmem_alloc_folio(gfp_t gfp, int order, struct shmem_inode_info *info, pgoff_t index) { struct vm_area_struct pvma; struct folio *folio; shmem_pseudo_vma_init(&pvma, info, index); - folio = vma_alloc_folio(gfp, HPAGE_PMD_ORDER, &pvma, 0, true); - shmem_pseudo_vma_destroy(&pvma); - - return folio; -} - -static struct folio *shmem_alloc_folio(gfp_t gfp, - struct shmem_inode_info *info, pgoff_t index) -{ - struct vm_area_struct pvma; - struct folio *folio; - - shmem_pseudo_vma_init(&pvma, info, index); - folio = vma_alloc_folio(gfp, 0, &pvma, 0, false); + folio = vma_alloc_folio(gfp, order, &pvma, 0, order == HPAGE_PMD_ORDER); shmem_pseudo_vma_destroy(&pvma); return folio; @@ -1683,12 +1670,12 @@ static struct folio *shmem_alloc_and_add_folio(gfp_t gfp, index + HPAGE_PMD_NR - 1, XA_PRESENT)) return ERR_PTR(-E2BIG); - folio = shmem_alloc_hugefolio(gfp, info, index); + folio = shmem_alloc_folio(gfp, HPAGE_PMD_ORDER, info, index); if (!folio) count_vm_event(THP_FILE_FALLBACK); } else { pages = 1; - folio = shmem_alloc_folio(gfp, info, index); + folio = shmem_alloc_folio(gfp, 0, info, index); } if (!folio) return ERR_PTR(-ENOMEM); @@ -1788,7 +1775,7 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp, */ gfp &= ~GFP_CONSTRAINT_MASK; VM_BUG_ON_FOLIO(folio_test_large(old), old); - new = shmem_alloc_folio(gfp, info, index); + new = shmem_alloc_folio(gfp, 0, info, index); if (!new) return -ENOMEM; @@ -2601,7 +2588,7 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pmd, if (!*foliop) { ret = -ENOMEM; - folio = shmem_alloc_folio(gfp, info, pgoff); + folio = shmem_alloc_folio(gfp, 0, info, pgoff); if (!folio) goto out_unacct_blocks; -- Gitee From 0174e36dd35d86a453c328a23d60f817aadb56df Mon Sep 17 00:00:00 2001 From: Bang Li Date: Wed, 22 May 2024 14:12:02 +0800 Subject: [PATCH 1376/2138] mm: add update_mmu_tlb_range() ANBZ: #9728 commit 23b1b44e6c61295084284aa7d87db863a7802b92 upstream. Patch series "Add update_mmu_tlb_range() to simplify code", v4. This series of commits mainly adds the update_mmu_tlb_range() to batch update tlb in an address range and implement update_mmu_tlb() using update_mmu_tlb_range(). After commit 19eaf44954df ("mm: thp: support allocation of anonymous multi-size THP"), We may need to batch update tlb of a certain address range by calling update_mmu_tlb() in a loop. Using the update_mmu_tlb_range(), we can simplify the code and possibly reduce the execution of some unnecessary code in some architectures. This patch (of 3): Add update_mmu_tlb_range(), we can batch update tlb of an address range. Link: https://lkml.kernel.org/r/20240522061204.117421-1-libang.li@antgroup.com Link: https://lkml.kernel.org/r/20240522061204.117421-2-libang.li@antgroup.com Signed-off-by: Bang Li Acked-by: David Hildenbrand Cc: Chris Zankel Cc: Huacai Chen Cc: Lance Yang Cc: Max Filippov Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Ryan Roberts Cc: Thomas Bogendoerfer Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3864 --- arch/loongarch/include/asm/pgtable.h | 2 ++ arch/mips/include/asm/pgtable.h | 2 ++ arch/riscv/include/asm/pgtable.h | 2 ++ arch/xtensa/include/asm/pgtable.h | 3 +++ arch/xtensa/mm/tlb.c | 6 ++++++ include/linux/pgtable.h | 7 +++++++ 6 files changed, 22 insertions(+) diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h index 29d9b12298bc..e48efd4a3e3e 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -472,6 +472,8 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf, #define __HAVE_ARCH_UPDATE_MMU_TLB #define update_mmu_tlb update_mmu_cache +#define update_mmu_tlb_range(vma, addr, ptep, nr) \ + update_mmu_cache_range(NULL, vma, addr, ptep, nr) static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 430b208c0130..58ada9791e5a 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -596,6 +596,8 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf, #define __HAVE_ARCH_UPDATE_MMU_TLB #define update_mmu_tlb update_mmu_cache +#define update_mmu_tlb_range(vma, address, ptep, nr) \ + update_mmu_cache_range(NULL, vma, address, ptep, nr) static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 8fccba6ae37c..a634a53dbd44 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -493,6 +493,8 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf, #define __HAVE_ARCH_UPDATE_MMU_TLB #define update_mmu_tlb update_mmu_cache +#define update_mmu_tlb_range(vma, addr, ptep, nr) \ + update_mmu_cache_range(NULL, vma, addr, ptep, nr) static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index 9a7e5e57ee9a..436158bd9030 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -413,6 +413,9 @@ typedef pte_t *pte_addr_t; void update_mmu_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); #define __HAVE_ARCH_UPDATE_MMU_TLB +void update_mmu_tlb_range(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, unsigned int nr); +#define update_mmu_tlb_range update_mmu_tlb_range #endif /* !defined (__ASSEMBLY__) */ diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c index 4f974b74883c..b1e1f63de72b 100644 --- a/arch/xtensa/mm/tlb.c +++ b/arch/xtensa/mm/tlb.c @@ -169,6 +169,12 @@ void update_mmu_tlb(struct vm_area_struct *vma, local_flush_tlb_page(vma, address); } +void update_mmu_tlb_range(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, unsigned int nr) +{ + local_flush_tlb_range(vma, address, address + PAGE_SIZE * nr); +} + #ifdef CONFIG_DEBUG_TLB_SANITY static unsigned get_pte_for_vaddr(unsigned vaddr) diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index e06b7ab08770..189d689ed159 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -708,6 +708,13 @@ static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr, * fault. This function updates TLB only, do nothing with cache or others. * It is the difference with function update_mmu_cache. */ +#ifndef update_mmu_tlb_range +static inline void update_mmu_tlb_range(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, unsigned int nr) +{ +} +#endif + #ifndef __HAVE_ARCH_UPDATE_MMU_TLB static inline void update_mmu_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) -- Gitee From 01314faf8009766adb5e0804cd674f83fda0130e Mon Sep 17 00:00:00 2001 From: Bang Li Date: Wed, 22 May 2024 14:12:03 +0800 Subject: [PATCH 1377/2138] mm: implement update_mmu_tlb() using update_mmu_tlb_range() ANBZ: #9728 commit 8f65aa32239f1c3f11b7a25bd5921223bafc5fed upstream. Let's make update_mmu_tlb() simply a generic wrapper around update_mmu_tlb_range(). Only the latter can now be overridden by the architecture. We can now remove __HAVE_ARCH_UPDATE_MMU_TLB as well. Link: https://lkml.kernel.org/r/20240522061204.117421-3-libang.li@antgroup.com Signed-off-by: Bang Li Acked-by: David Hildenbrand Cc: Chris Zankel Cc: Huacai Chen Cc: Lance Yang Cc: Max Filippov Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Ryan Roberts Cc: Thomas Bogendoerfer Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3864 --- arch/loongarch/include/asm/pgtable.h | 2 -- arch/mips/include/asm/pgtable.h | 2 -- arch/riscv/include/asm/pgtable.h | 2 -- arch/xtensa/include/asm/pgtable.h | 3 --- arch/xtensa/mm/tlb.c | 6 ------ include/linux/pgtable.h | 4 +--- 6 files changed, 1 insertion(+), 18 deletions(-) diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h index e48efd4a3e3e..f5300b66a39d 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -470,8 +470,6 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf, #define update_mmu_cache(vma, addr, ptep) \ update_mmu_cache_range(NULL, vma, addr, ptep, 1) -#define __HAVE_ARCH_UPDATE_MMU_TLB -#define update_mmu_tlb update_mmu_cache #define update_mmu_tlb_range(vma, addr, ptep, nr) \ update_mmu_cache_range(NULL, vma, addr, ptep, nr) diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 58ada9791e5a..daa48f28ce5e 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -594,8 +594,6 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf, #define update_mmu_cache(vma, address, ptep) \ update_mmu_cache_range(NULL, vma, address, ptep, 1) -#define __HAVE_ARCH_UPDATE_MMU_TLB -#define update_mmu_tlb update_mmu_cache #define update_mmu_tlb_range(vma, address, ptep, nr) \ update_mmu_cache_range(NULL, vma, address, ptep, nr) diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index a634a53dbd44..63d8a84826e9 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -491,8 +491,6 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf, #define update_mmu_cache(vma, addr, ptep) \ update_mmu_cache_range(NULL, vma, addr, ptep, 1) -#define __HAVE_ARCH_UPDATE_MMU_TLB -#define update_mmu_tlb update_mmu_cache #define update_mmu_tlb_range(vma, addr, ptep, nr) \ update_mmu_cache_range(NULL, vma, addr, ptep, nr) diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index 436158bd9030..1647a7cc3fbf 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -410,9 +410,6 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, typedef pte_t *pte_addr_t; -void update_mmu_tlb(struct vm_area_struct *vma, - unsigned long address, pte_t *ptep); -#define __HAVE_ARCH_UPDATE_MMU_TLB void update_mmu_tlb_range(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, unsigned int nr); #define update_mmu_tlb_range update_mmu_tlb_range diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c index b1e1f63de72b..f69feee19d59 100644 --- a/arch/xtensa/mm/tlb.c +++ b/arch/xtensa/mm/tlb.c @@ -163,12 +163,6 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) } } -void update_mmu_tlb(struct vm_area_struct *vma, - unsigned long address, pte_t *ptep) -{ - local_flush_tlb_page(vma, address); -} - void update_mmu_tlb_range(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, unsigned int nr) { diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 189d689ed159..4db811acb030 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -715,13 +715,11 @@ static inline void update_mmu_tlb_range(struct vm_area_struct *vma, } #endif -#ifndef __HAVE_ARCH_UPDATE_MMU_TLB static inline void update_mmu_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { + update_mmu_tlb_range(vma, address, ptep, 1); } -#define __HAVE_ARCH_UPDATE_MMU_TLB -#endif /* * Some architectures may be able to avoid expensive synchronization -- Gitee From 28c9928a4830914e5eee58405d94fe15cc47c321 Mon Sep 17 00:00:00 2001 From: Bang Li Date: Wed, 22 May 2024 14:12:04 +0800 Subject: [PATCH 1378/2138] mm: use update_mmu_tlb_range() to simplify code ANBZ: #9728 commit 6faa49d1c4404e0b949fd92f1e891c24870d4f86 upstream. Let us simplify the code by update_mmu_tlb_range(). Link: https://lkml.kernel.org/r/20240522061204.117421-4-libang.li@antgroup.com Signed-off-by: Bang Li Reviewed-by: Lance Yang Acked-by: David Hildenbrand Cc: Chris Zankel Cc: Huacai Chen Cc: Max Filippov Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Ryan Roberts Cc: Thomas Bogendoerfer Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3864 --- mm/memory.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 496ce1483e46..1bafa57b0b27 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4563,7 +4563,6 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) vm_fault_t ret = 0; int nr_pages = 1; pte_t entry; - int i; /* File mapping without ->vm_ops ? */ if (vma->vm_flags & VM_SHARED) @@ -4632,8 +4631,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) update_mmu_tlb(vma, addr, vmf->pte); goto release; } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) { - for (i = 0; i < nr_pages; i++) - update_mmu_tlb(vma, addr + PAGE_SIZE * i, vmf->pte + i); + update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages); goto release; } -- Gitee From c265b38d1c001df54fba70c9b1cad657fbfb09c0 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Tue, 11 Jun 2024 18:11:05 +0800 Subject: [PATCH 1379/2138] mm: memory: extend finish_fault() to support large folio ANBZ: #9728 commit 43e027e414232b1ce4fa6c96a582417e2c027f2d upstream. Patch series "add mTHP support for anonymous shmem", v5. Anonymous pages have already been supported for multi-size (mTHP) allocation through commit 19eaf44954df, that can allow THP to be configured through the sysfs interface located at '/sys/kernel/mm/transparent_hugepage/hugepage-XXkb/enabled'. However, the anonymous shmem will ignore the anonymous mTHP rule configured through the sysfs interface, and can only use the PMD-mapped THP, that is not reasonable. Many implement anonymous page sharing through mmap(MAP_SHARED | MAP_ANONYMOUS), especially in database usage scenarios, therefore, users expect to apply an unified mTHP strategy for anonymous pages, also including the anonymous shared pages, in order to enjoy the benefits of mTHP. For example, lower latency than PMD-mapped THP, smaller memory bloat than PMD-mapped THP, contiguous PTEs on ARM architecture to reduce TLB miss etc. As discussed in the bi-weekly MM meeting[1], the mTHP controls should control all of shmem, not only anonymous shmem, but support will be added iteratively. Therefore, this patch set starts with support for anonymous shmem. The primary strategy is similar to supporting anonymous mTHP. Introduce a new interface '/mm/transparent_hugepage/hugepage-XXkb/shmem_enabled', which can have almost the same values as the top-level '/sys/kernel/mm/transparent_hugepage/shmem_enabled', with adding a new additional "inherit" option and dropping the testing options 'force' and 'deny'. By default all sizes will be set to "never" except PMD size, which is set to "inherit". This ensures backward compatibility with the anonymous shmem enabled of the top level, meanwhile also allows independent control of anonymous shmem enabled for each mTHP. Use the page fault latency tool to measure the performance of 1G anonymous shmem with 32 threads on my machine environment with: ARM64 Architecture, 32 cores, 125G memory: base: mm-unstable user-time sys_time faults_per_sec_per_cpu faults_per_sec 0.04s 3.10s 83516.416 2669684.890 mm-unstable + patchset, anon shmem mTHP disabled user-time sys_time faults_per_sec_per_cpu faults_per_sec 0.02s 3.14s 82936.359 2630746.027 mm-unstable + patchset, anon shmem 64K mTHP enabled user-time sys_time faults_per_sec_per_cpu faults_per_sec 0.08s 0.31s 678630.231 17082522.495 From the data above, it is observed that the patchset has a minimal impact when mTHP is not enabled (some fluctuations observed during testing). When enabling 64K mTHP, there is a significant improvement of the page fault latency. [1] https://lore.kernel.org/all/f1783ff0-65bd-4b2b-8952-52b6822a0835@redhat.com/ This patch (of 6): Add large folio mapping establishment support for finish_fault() as a preparation, to support multi-size THP allocation of anonymous shmem pages in the following patches. Keep the same behavior (per-page fault) for non-anon shmem to avoid inflating the RSS unintentionally, and we can discuss what size of mapping to build when extending mTHP to control non-anon shmem in the future. [baolin.wang@linux.alibaba.com: avoid going beyond the PMD pagetable size] Link: https://lkml.kernel.org/r/b0e6a8b1-a32c-459e-ae67-fde5d28773e6@linux.alibaba.com [baolin.wang@linux.alibaba.com: use 'PTRS_PER_PTE' instead of 'PTRS_PER_PTE - 1'] Link: https://lkml.kernel.org/r/e1f5767a-2c9b-4e37-afe6-1de26fe54e41@linux.alibaba.com Link: https://lkml.kernel.org/r/cover.1718090413.git.baolin.wang@linux.alibaba.com Link: https://lkml.kernel.org/r/3a190892355989d42f59cf9f2f98b94694b0d24d.1718090413.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Reviewed-by: Zi Yan Reviewed-by: Kefeng Wang Cc: Daniel Gomez Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Lance Yang Cc: Pankaj Raghav Cc: Ryan Roberts Cc: Yang Shi Cc: Barry Song Signed-off-by: Andrew Morton [ shawnwang: remove unused type and add_mm_counter() ] Signed-off-by: Shawn Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3864 --- mm/memory.c | 57 +++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 49 insertions(+), 8 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 1bafa57b0b27..1e59371071ca 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4899,7 +4899,10 @@ vm_fault_t finish_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct page *page; + struct folio *folio; vm_fault_t ret; + int nr_pages; + unsigned long addr = vmf->address; /* Did we COW the page? */ if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) @@ -4930,22 +4933,60 @@ vm_fault_t finish_fault(struct vm_fault *vmf) return VM_FAULT_OOM; } + folio = page_folio(page); + nr_pages = folio_nr_pages(folio); + + /* + * Using per-page fault to maintain the uffd semantics, and same + * approach also applies to non-anonymous-shmem faults to avoid + * inflating the RSS of the process. + */ + if (!vma_is_anon_shmem(vma) || unlikely(userfaultfd_armed(vma))) { + nr_pages = 1; + } else if (nr_pages > 1) { + pgoff_t idx = folio_page_idx(folio, page); + /* The page offset of vmf->address within the VMA. */ + pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff; + /* The index of the entry in the pagetable for fault page. */ + pgoff_t pte_off = pte_index(vmf->address); + + /* + * Fallback to per-page fault in case the folio size in page + * cache beyond the VMA limits and PMD pagetable limits. + */ + if (unlikely(vma_off < idx || + vma_off + (nr_pages - idx) > vma_pages(vma) || + pte_off < idx || + pte_off + (nr_pages - idx) > PTRS_PER_PTE)) { + nr_pages = 1; + } else { + /* Now we can set mappings for the whole large folio. */ + addr = vmf->address - idx * PAGE_SIZE; + page = &folio->page; + } + } + vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, - vmf->address, &vmf->ptl); + addr, &vmf->ptl); if (!vmf->pte) return VM_FAULT_NOPAGE; /* Re-check under ptl */ - if (likely(!vmf_pte_changed(vmf))) { - struct folio *folio = page_folio(page); - - set_pte_range(vmf, folio, page, 1, vmf->address); - ret = 0; - } else { - update_mmu_tlb(vma, vmf->address, vmf->pte); + if (nr_pages == 1 && unlikely(vmf_pte_changed(vmf))) { + update_mmu_tlb(vma, addr, vmf->pte); + ret = VM_FAULT_NOPAGE; + goto unlock; + } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) { + update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages); ret = VM_FAULT_NOPAGE; + goto unlock; } + folio_ref_add(folio, nr_pages - 1); + set_pte_range(vmf, folio, page, nr_pages, addr); + ret = 0; + +unlock: pte_unmap_unlock(vmf->pte, vmf->ptl); return ret; } -- Gitee From a579065fa9be91c08d48218b6b3e3f37ea8e7449 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Tue, 11 Jun 2024 18:11:06 +0800 Subject: [PATCH 1380/2138] mm: shmem: add THP validation for PMD-mapped THP related statistics ANBZ: #9728 commit 3d95bc21cea558c7cdb2942b4d0223a571e93f27 upstream. In order to extend support for mTHP, add THP validation for PMD-mapped THP related statistics to avoid statistical confusion. Link: https://lkml.kernel.org/r/c4b04cbd51e6951cc2436a87be8eaa4a1516faec.1718090413.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Reviewed-by: Barry Song Cc: Daniel Gomez Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Kefeng Wang Cc: Lance Yang Cc: Pankaj Raghav Cc: Ryan Roberts Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3864 --- mm/shmem.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index a5705263d047..250d35ed59d9 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1671,7 +1671,7 @@ static struct folio *shmem_alloc_and_add_folio(gfp_t gfp, return ERR_PTR(-E2BIG); folio = shmem_alloc_folio(gfp, HPAGE_PMD_ORDER, info, index); - if (!folio) + if (!folio && pages == HPAGE_PMD_NR) count_vm_event(THP_FILE_FALLBACK); } else { pages = 1; @@ -1689,7 +1689,7 @@ static struct folio *shmem_alloc_and_add_folio(gfp_t gfp, if (xa_find(&mapping->i_pages, &index, index + pages - 1, XA_PRESENT)) { error = -EEXIST; - } else if (huge) { + } else if (pages == HPAGE_PMD_NR) { count_vm_event(THP_FILE_FALLBACK); count_vm_event(THP_FILE_FALLBACK_CHARGE); } @@ -2052,7 +2052,8 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, folio = shmem_alloc_and_add_folio(huge_gfp, inode, index, fault_mm, true); if (!IS_ERR(folio)) { - count_vm_event(THP_FILE_ALLOC); + if (folio_test_pmd_mappable(folio)) + count_vm_event(THP_FILE_ALLOC); goto alloced; } if (PTR_ERR(folio) == -EEXIST) -- Gitee From cb62ab8d938524daab57ceb95af33c0781b90dec Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Tue, 11 Jun 2024 18:11:07 +0800 Subject: [PATCH 1381/2138] mm: shmem: add multi-size THP sysfs interface for anonymous shmem ANBZ: #9728 commit 4b98995530b77a97912230d8e1564ba7738db19c upstream. To support the use of mTHP with anonymous shmem, add a new sysfs interface 'shmem_enabled' in the '/sys/kernel/mm/transparent_hugepage/hugepages-kB/' directory for each mTHP to control whether shmem is enabled for that mTHP, with a value similar to the top level 'shmem_enabled', which can be set to: "always", "inherit (to inherit the top level setting)", "within_size", "advise", "never". An 'inherit' option is added to ensure compatibility with these global settings, and the options 'force' and 'deny' are dropped, which are rather testing artifacts from the old ages. By default, PMD-sized hugepages have enabled="inherit" and all other hugepage sizes have enabled="never" for '/sys/kernel/mm/transparent_hugepage/hugepages-xxkB/shmem_enabled'. In addition, if top level value is 'force', then only PMD-sized hugepages have enabled="inherit", otherwise configuration will be failed and vice versa. That means now we will avoid using non-PMD sized THP to override the global huge allocation. [baolin.wang@linux.alibaba.com: fix transhuge.rst indentation] Link: https://lkml.kernel.org/r/b189d815-998b-4dfd-ba89-218ff51313f8@linux.alibaba.com [akpm@linux-foundation.org: reflow transhuge.rst addition to 80 cols] [baolin.wang@linux.alibaba.com: move huge_shmem_orders_lock under CONFIG_SYSFS] Link: https://lkml.kernel.org/r/eb34da66-7f12-44f3-a39e-2bcc90c33354@linux.alibaba.com [akpm@linux-foundation.org: huge_memory.c needs mm_types.h] Link: https://lkml.kernel.org/r/ffddfa8b3cb4266ff963099ab78cfd7184c57ac7.1718090413.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Cc: Barry Song Cc: Daniel Gomez Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Kefeng Wang Cc: Lance Yang Cc: Pankaj Raghav Cc: Ryan Roberts Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3864 --- Documentation/admin-guide/mm/transhuge.rst | 25 ++++++ include/linux/huge_mm.h | 10 +++ mm/huge_memory.c | 12 +-- mm/shmem.c | 96 ++++++++++++++++++++++ 4 files changed, 135 insertions(+), 8 deletions(-) diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index 53cf4f24e5e9..d4133529a97d 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -331,6 +331,31 @@ deny force Force the huge option on for all - very useful for testing; +Shmem can also use "multi-size THP" (mTHP) by adding a new sysfs knob to +control mTHP allocation: +'/sys/kernel/mm/transparent_hugepage/hugepages-kB/shmem_enabled', +and its value for each mTHP is essentially consistent with the global +setting. An 'inherit' option is added to ensure compatibility with these +global settings. Conversely, the options 'force' and 'deny' are dropped, +which are rather testing artifacts from the old ages. + +always + Attempt to allocate huge pages every time we need a new page; + +inherit + Inherit the top-level "shmem_enabled" value. By default, PMD-sized hugepages + have enabled="inherit" and all other hugepage sizes have enabled="never"; + +never + Do not allocate huge pages; + +within_size + Only allocate huge page if it will be fully within i_size. + Also respect fadvise()/madvise() hints; + +advise + Only allocate huge pages if requested with fadvise()/madvise(); + Need of application restart =========================== diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index e969dc607752..a63b50a868b1 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -6,6 +6,7 @@ #include #include /* only for vma_is_dax() */ +#include vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf); int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, @@ -63,6 +64,7 @@ ssize_t single_hugepage_flag_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf, enum transparent_hugepage_flag flag); extern struct kobj_attribute shmem_enabled_attr; +extern struct kobj_attribute thpsize_shmem_enabled_attr; #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) #define HPAGE_PMD_NR (1< #include #include +#include #include #include #include @@ -434,14 +435,6 @@ static void thpsize_release(struct kobject *kobj); static DEFINE_SPINLOCK(huge_anon_orders_lock); static LIST_HEAD(thpsize_list); -struct thpsize { - struct kobject kobj; - struct list_head node; - int order; -}; - -#define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj) - static ssize_t thpsize_enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -502,6 +495,9 @@ static struct kobj_attribute thpsize_enabled_attr = static struct attribute *thpsize_attrs[] = { &thpsize_enabled_attr.attr, +#ifdef CONFIG_SHMEM + &thpsize_shmem_enabled_attr.attr, +#endif NULL, }; diff --git a/mm/shmem.c b/mm/shmem.c index 250d35ed59d9..9b21e601040f 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -130,6 +130,13 @@ struct shmem_options { #define SHMEM_SEEN_QUOTA 32 }; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static unsigned long huge_shmem_orders_always __read_mostly; +static unsigned long huge_shmem_orders_madvise __read_mostly; +static unsigned long huge_shmem_orders_inherit __read_mostly; +static unsigned long huge_shmem_orders_within_size __read_mostly; +#endif + #ifdef CONFIG_TMPFS static unsigned long shmem_default_max_blocks(void) { @@ -4627,6 +4634,12 @@ void __init shmem_init(void) SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; else shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */ + + /* + * Default to setting PMD-sized THP to inherit the global setting and + * disable all other multi-size THPs. + */ + huge_shmem_orders_inherit = BIT(HPAGE_PMD_ORDER); #endif return; @@ -4686,6 +4699,11 @@ static ssize_t shmem_enabled_store(struct kobject *kobj, huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY) return -EINVAL; + /* Do not override huge allocation policy with non-PMD sized mTHP */ + if (huge == SHMEM_HUGE_FORCE && + huge_shmem_orders_inherit != BIT(HPAGE_PMD_ORDER)) + return -EINVAL; + shmem_huge = huge; if (shmem_huge > SHMEM_HUGE_DENY) SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; @@ -4693,6 +4711,84 @@ static ssize_t shmem_enabled_store(struct kobject *kobj, } struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled); +static DEFINE_SPINLOCK(huge_shmem_orders_lock); + +static ssize_t thpsize_shmem_enabled_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + int order = to_thpsize(kobj)->order; + const char *output; + + if (test_bit(order, &huge_shmem_orders_always)) + output = "[always] inherit within_size advise never"; + else if (test_bit(order, &huge_shmem_orders_inherit)) + output = "always [inherit] within_size advise never"; + else if (test_bit(order, &huge_shmem_orders_within_size)) + output = "always inherit [within_size] advise never"; + else if (test_bit(order, &huge_shmem_orders_madvise)) + output = "always inherit within_size [advise] never"; + else + output = "always inherit within_size advise [never]"; + + return sysfs_emit(buf, "%s\n", output); +} + +static ssize_t thpsize_shmem_enabled_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int order = to_thpsize(kobj)->order; + ssize_t ret = count; + + if (sysfs_streq(buf, "always")) { + spin_lock(&huge_shmem_orders_lock); + clear_bit(order, &huge_shmem_orders_inherit); + clear_bit(order, &huge_shmem_orders_madvise); + clear_bit(order, &huge_shmem_orders_within_size); + set_bit(order, &huge_shmem_orders_always); + spin_unlock(&huge_shmem_orders_lock); + } else if (sysfs_streq(buf, "inherit")) { + /* Do not override huge allocation policy with non-PMD sized mTHP */ + if (shmem_huge == SHMEM_HUGE_FORCE && + order != HPAGE_PMD_ORDER) + return -EINVAL; + + spin_lock(&huge_shmem_orders_lock); + clear_bit(order, &huge_shmem_orders_always); + clear_bit(order, &huge_shmem_orders_madvise); + clear_bit(order, &huge_shmem_orders_within_size); + set_bit(order, &huge_shmem_orders_inherit); + spin_unlock(&huge_shmem_orders_lock); + } else if (sysfs_streq(buf, "within_size")) { + spin_lock(&huge_shmem_orders_lock); + clear_bit(order, &huge_shmem_orders_always); + clear_bit(order, &huge_shmem_orders_inherit); + clear_bit(order, &huge_shmem_orders_madvise); + set_bit(order, &huge_shmem_orders_within_size); + spin_unlock(&huge_shmem_orders_lock); + } else if (sysfs_streq(buf, "madvise")) { + spin_lock(&huge_shmem_orders_lock); + clear_bit(order, &huge_shmem_orders_always); + clear_bit(order, &huge_shmem_orders_inherit); + clear_bit(order, &huge_shmem_orders_within_size); + set_bit(order, &huge_shmem_orders_madvise); + spin_unlock(&huge_shmem_orders_lock); + } else if (sysfs_streq(buf, "never")) { + spin_lock(&huge_shmem_orders_lock); + clear_bit(order, &huge_shmem_orders_always); + clear_bit(order, &huge_shmem_orders_inherit); + clear_bit(order, &huge_shmem_orders_within_size); + clear_bit(order, &huge_shmem_orders_madvise); + spin_unlock(&huge_shmem_orders_lock); + } else { + ret = -EINVAL; + } + + return ret; +} + +struct kobj_attribute thpsize_shmem_enabled_attr = + __ATTR(shmem_enabled, 0644, thpsize_shmem_enabled_show, thpsize_shmem_enabled_store); #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */ #else /* !CONFIG_SHMEM */ -- Gitee From 30f4773e5b2a703f78e14692d36152dbc0c428d9 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Tue, 11 Jun 2024 18:11:08 +0800 Subject: [PATCH 1382/2138] mm: shmem: add mTHP support for anonymous shmem ANBZ: #9728 commit e7a2ab7b3bb5d87f99f2ea3d4481d52fc5ceb52d upstream. Commit 19eaf44954df adds multi-size THP (mTHP) for anonymous pages, that can allow THP to be configured through the sysfs interface located at '/sys/kernel/mm/transparent_hugepage/hugepage-XXkb/enabled'. However, the anonymous shmem will ignore the anonymous mTHP rule configured through the sysfs interface, and can only use the PMD-mapped THP, that is not reasonable. Users expect to apply the mTHP rule for all anonymous pages, including the anonymous shmem, in order to enjoy the benefits of mTHP. For example, lower latency than PMD-mapped THP, smaller memory bloat than PMD-mapped THP, contiguous PTEs on ARM architecture to reduce TLB miss etc. In addition, the mTHP interfaces can be extended to support all shmem/tmpfs scenarios in the future, especially for the shmem mmap() case. The primary strategy is similar to supporting anonymous mTHP. Introduce a new interface '/mm/transparent_hugepage/hugepage-XXkb/shmem_enabled', which can have almost the same values as the top-level '/sys/kernel/mm/transparent_hugepage/shmem_enabled', with adding a new additional "inherit" option and dropping the testing options 'force' and 'deny'. By default all sizes will be set to "never" except PMD size, which is set to "inherit". This ensures backward compatibility with the anonymous shmem enabled of the top level, meanwhile also allows independent control of anonymous shmem enabled for each mTHP. Link: https://lkml.kernel.org/r/65796c1e72e51e15f3410195b5c2d5b6c160d411.1718090413.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Cc: Barry Song Cc: Daniel Gomez Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Kefeng Wang Cc: Lance Yang Cc: Pankaj Raghav Cc: Ryan Roberts Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3864 --- include/linux/huge_mm.h | 10 +++ mm/shmem.c | 187 +++++++++++++++++++++++++++++++++------- 2 files changed, 167 insertions(+), 30 deletions(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index a63b50a868b1..57159e0f5c09 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -578,6 +578,16 @@ static inline bool thp_migration_supported(void) { return false; } + +static inline int highest_order(unsigned long orders) +{ + return 0; +} + +static inline int next_order(unsigned long *orders, int prev) +{ + return 0; +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ static inline int split_folio_to_list(struct folio *folio, diff --git a/mm/shmem.c b/mm/shmem.c index 9b21e601040f..ae9b08a03714 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1635,6 +1635,107 @@ static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp) return result; } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static unsigned long shmem_allowable_huge_orders(struct inode *inode, + struct vm_area_struct *vma, pgoff_t index, + bool global_huge) +{ + unsigned long mask = READ_ONCE(huge_shmem_orders_always); + unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size); + unsigned long vm_flags = vma->vm_flags; + /* + * Check all the (large) orders below HPAGE_PMD_ORDER + 1 that + * are enabled for this vma. + */ + unsigned long orders = BIT(PMD_ORDER + 1) - 1; + loff_t i_size; + int order; + + if ((vm_flags & VM_NOHUGEPAGE) || + test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) + return 0; + + /* If the hardware/firmware marked hugepage support disabled. */ + if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED)) + return 0; + + /* + * Following the 'deny' semantics of the top level, force the huge + * option off from all mounts. + */ + if (shmem_huge == SHMEM_HUGE_DENY) + return 0; + + /* + * Only allow inherit orders if the top-level value is 'force', which + * means non-PMD sized THP can not override 'huge' mount option now. + */ + if (shmem_huge == SHMEM_HUGE_FORCE) + return READ_ONCE(huge_shmem_orders_inherit); + + /* Allow mTHP that will be fully within i_size. */ + order = highest_order(within_size_orders); + while (within_size_orders) { + index = round_up(index + 1, order); + i_size = round_up(i_size_read(inode), PAGE_SIZE); + if (i_size >> PAGE_SHIFT >= index) { + mask |= within_size_orders; + break; + } + + order = next_order(&within_size_orders, order); + } + + if (vm_flags & VM_HUGEPAGE) + mask |= READ_ONCE(huge_shmem_orders_madvise); + + if (global_huge) + mask |= READ_ONCE(huge_shmem_orders_inherit); + + return orders & mask; +} + +static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf, + struct address_space *mapping, pgoff_t index, + unsigned long orders) +{ + struct vm_area_struct *vma = vmf->vma; + unsigned long pages; + int order; + + orders = thp_vma_suitable_orders(vma, vmf->address, orders); + if (!orders) + return 0; + + /* Find the highest order that can add into the page cache */ + order = highest_order(orders); + while (orders) { + pages = 1UL << order; + index = round_down(index, pages); + if (!xa_find(&mapping->i_pages, &index, + index + pages - 1, XA_PRESENT)) + break; + order = next_order(&orders, order); + } + + return orders; +} +#else +static unsigned long shmem_allowable_huge_orders(struct inode *inode, + struct vm_area_struct *vma, pgoff_t index, + bool global_huge) +{ + return 0; +} + +static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf, + struct address_space *mapping, pgoff_t index, + unsigned long orders) +{ + return 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + static struct folio *shmem_alloc_folio(gfp_t gfp, int order, struct shmem_inode_info *info, pgoff_t index) { @@ -1648,38 +1749,55 @@ static struct folio *shmem_alloc_folio(gfp_t gfp, int order, return folio; } -static struct folio *shmem_alloc_and_add_folio(gfp_t gfp, - struct inode *inode, pgoff_t index, - struct mm_struct *fault_mm, bool huge) +static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf, + gfp_t gfp, struct inode *inode, pgoff_t index, + struct mm_struct *fault_mm, unsigned long orders) { struct address_space *mapping = inode->i_mapping; struct shmem_inode_info *info = SHMEM_I(inode); - struct folio *folio; + struct vm_area_struct *vma = vmf ? vmf->vma : NULL; + unsigned long suitable_orders = 0; + struct folio *folio = NULL; long pages; - int error; + int error, order; if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) - huge = false; + orders = 0; - if (huge) { - pages = HPAGE_PMD_NR; - index = round_down(index, HPAGE_PMD_NR); + if (orders > 0) { + if (vma && vma_is_anon_shmem(vma)) { + suitable_orders = shmem_suitable_orders(inode, vmf, + mapping, index, orders); + } else if (orders & BIT(HPAGE_PMD_ORDER)) { + pages = HPAGE_PMD_NR; + suitable_orders = BIT(HPAGE_PMD_ORDER); + index = round_down(index, HPAGE_PMD_NR); - /* - * Check for conflict before waiting on a huge allocation. - * Conflict might be that a huge page has just been allocated - * and added to page cache by a racing thread, or that there - * is already at least one small page in the huge extent. - * Be careful to retry when appropriate, but not forever! - * Elsewhere -EEXIST would be the right code, but not here. - */ - if (xa_find(&mapping->i_pages, &index, - index + HPAGE_PMD_NR - 1, XA_PRESENT)) - return ERR_PTR(-E2BIG); + /* + * Check for conflict before waiting on a huge allocation. + * Conflict might be that a huge page has just been allocated + * and added to page cache by a racing thread, or that there + * is already at least one small page in the huge extent. + * Be careful to retry when appropriate, but not forever! + * Elsewhere -EEXIST would be the right code, but not here. + */ + if (xa_find(&mapping->i_pages, &index, + index + HPAGE_PMD_NR - 1, XA_PRESENT)) + return ERR_PTR(-E2BIG); + } - folio = shmem_alloc_folio(gfp, HPAGE_PMD_ORDER, info, index); - if (!folio && pages == HPAGE_PMD_NR) - count_vm_event(THP_FILE_FALLBACK); + order = highest_order(suitable_orders); + while (suitable_orders) { + pages = 1UL << order; + index = round_down(index, pages); + folio = shmem_alloc_folio(gfp, order, info, index); + if (folio) + goto allocated; + + if (pages == HPAGE_PMD_NR) + count_vm_event(THP_FILE_FALLBACK); + order = next_order(&suitable_orders, order); + } } else { pages = 1; folio = shmem_alloc_folio(gfp, 0, info, index); @@ -1687,6 +1805,7 @@ static struct folio *shmem_alloc_and_add_folio(gfp_t gfp, if (!folio) return ERR_PTR(-ENOMEM); +allocated: __folio_set_locked(folio); __folio_set_swapbacked(folio); @@ -1981,7 +2100,8 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, struct mm_struct *fault_mm; struct folio *folio; int error; - bool alloced; + bool alloced, huge; + unsigned long orders = 0; if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) return -EFBIG; @@ -2050,14 +2170,21 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, return 0; } - if (shmem_is_huge(inode, index, false, fault_mm, - vma ? vma->vm_flags : 0)) { + huge = shmem_is_huge(inode, index, false, fault_mm, + vma ? vma->vm_flags : 0); + /* Find hugepage orders that are allowed for anonymous shmem. */ + if (vma && vma_is_anon_shmem(vma)) + orders = shmem_allowable_huge_orders(inode, vma, index, huge); + else if (huge) + orders = BIT(HPAGE_PMD_ORDER); + + if (orders > 0) { gfp_t huge_gfp; huge_gfp = vma_thp_gfp_mask(vma); huge_gfp = limit_gfp_mask(huge_gfp, gfp); - folio = shmem_alloc_and_add_folio(huge_gfp, - inode, index, fault_mm, true); + folio = shmem_alloc_and_add_folio(vmf, huge_gfp, + inode, index, fault_mm, orders); if (!IS_ERR(folio)) { if (folio_test_pmd_mappable(folio)) count_vm_event(THP_FILE_ALLOC); @@ -2067,7 +2194,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, goto repeat; } - folio = shmem_alloc_and_add_folio(gfp, inode, index, fault_mm, false); + folio = shmem_alloc_and_add_folio(vmf, gfp, inode, index, fault_mm, 0); if (IS_ERR(folio)) { error = PTR_ERR(folio); if (error == -EEXIST) @@ -2078,7 +2205,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, alloced: alloced = true; - if (folio_test_pmd_mappable(folio) && + if (folio_test_large(folio) && DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) < folio_next_index(folio) - 1) { struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); -- Gitee From 0f38fa9fb75a5437f2e60f4ffee427bf464cc614 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Tue, 11 Jun 2024 18:11:09 +0800 Subject: [PATCH 1383/2138] mm: shmem: add mTHP size alignment in shmem_get_unmapped_area ANBZ: #9728 commit 5a9dd10380a16b343aa87d80d5bcc24409a03f5b upstream. Although the top-level hugepage allocation can be turned off, anonymous shmem can still use mTHP by configuring the sysfs interface located at '/sys/kernel/mm/transparent_hugepage/hugepage-XXkb/shmem_enabled'. Therefore, add alignment for mTHP size to provide a suitable alignment address in shmem_get_unmapped_area(). Link: https://lkml.kernel.org/r/0c549b57cf7db07503af692d8546ecfad0fcce52.1718090413.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Tested-by: Lance Yang Cc: Barry Song Cc: Daniel Gomez Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Kefeng Wang Cc: Pankaj Raghav Cc: Ryan Roberts Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3864 --- mm/shmem.c | 40 +++++++++++++++++++++++++++++++--------- 1 file changed, 31 insertions(+), 9 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index ae9b08a03714..8b1ee4b2010f 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2392,6 +2392,7 @@ unsigned long shmem_get_unmapped_area(struct file *file, unsigned long inflated_len; unsigned long inflated_addr; unsigned long inflated_offset; + unsigned long hpage_size; if (len > TASK_SIZE) return -ENOMEM; @@ -2410,8 +2411,6 @@ unsigned long shmem_get_unmapped_area(struct file *file, if (shmem_huge == SHMEM_HUGE_DENY) return addr; - if (len < HPAGE_PMD_SIZE) - return addr; if (flags & MAP_FIXED) return addr; /* @@ -2423,8 +2422,11 @@ unsigned long shmem_get_unmapped_area(struct file *file, if (uaddr == addr) return addr; + hpage_size = HPAGE_PMD_SIZE; if (shmem_huge != SHMEM_HUGE_FORCE) { struct super_block *sb; + unsigned long __maybe_unused hpage_orders; + int order = 0; if (file) { VM_BUG_ON(file->f_op != &shmem_file_operations); @@ -2437,18 +2439,38 @@ unsigned long shmem_get_unmapped_area(struct file *file, if (IS_ERR(shm_mnt)) return addr; sb = shm_mnt->mnt_sb; + + /* + * Find the highest mTHP order used for anonymous shmem to + * provide a suitable alignment address. + */ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + hpage_orders = READ_ONCE(huge_shmem_orders_always); + hpage_orders |= READ_ONCE(huge_shmem_orders_within_size); + hpage_orders |= READ_ONCE(huge_shmem_orders_madvise); + if (SHMEM_SB(sb)->huge != SHMEM_HUGE_NEVER) + hpage_orders |= READ_ONCE(huge_shmem_orders_inherit); + + if (hpage_orders > 0) { + order = highest_order(hpage_orders); + hpage_size = PAGE_SIZE << order; + } +#endif } - if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER) + if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER && !order) return addr; } - offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1); - if (offset && offset + len < 2 * HPAGE_PMD_SIZE) + if (len < hpage_size) + return addr; + + offset = (pgoff << PAGE_SHIFT) & (hpage_size - 1); + if (offset && offset + len < 2 * hpage_size) return addr; - if ((addr & (HPAGE_PMD_SIZE-1)) == offset) + if ((addr & (hpage_size - 1)) == offset) return addr; - inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE; + inflated_len = len + hpage_size - PAGE_SIZE; if (inflated_len > TASK_SIZE) return addr; if (inflated_len < len) @@ -2460,10 +2482,10 @@ unsigned long shmem_get_unmapped_area(struct file *file, if (inflated_addr & ~PAGE_MASK) return addr; - inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1); + inflated_offset = inflated_addr & (hpage_size - 1); inflated_addr += offset - inflated_offset; if (inflated_offset > offset) - inflated_addr += HPAGE_PMD_SIZE; + inflated_addr += hpage_size; if (inflated_addr > TASK_SIZE - len) return addr; -- Gitee From 03d103bf1eb18e2e914332fa1182e7d2ef6e12ea Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Tue, 11 Jun 2024 18:11:10 +0800 Subject: [PATCH 1384/2138] mm: shmem: add mTHP counters for anonymous shmem ANBZ: #9728 commit 66f44583f9b617d74ffa2487e75a9c3adf344ddb upstream. Add mTHP counters for anonymous shmem. [baolin.wang@linux.alibaba.com: update Documentation/admin-guide/mm/transhuge.rst] Link: https://lkml.kernel.org/r/d86e2e7f-4141-432b-b2ba-c6691f36ef0b@linux.alibaba.com Link: https://lkml.kernel.org/r/4fd9e467d49ae4a747e428bcd821c7d13125ae67.1718090413.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Reviewed-by: Lance Yang Cc: Barry Song Cc: Daniel Gomez Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Kefeng Wang Cc: Pankaj Raghav Cc: Ryan Roberts Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3864 --- Documentation/admin-guide/mm/transhuge.rst | 13 +++++++++++++ include/linux/huge_mm.h | 3 +++ mm/huge_memory.c | 6 ++++++ mm/shmem.c | 18 +++++++++++++++--- 4 files changed, 37 insertions(+), 3 deletions(-) diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index d4133529a97d..2b57aabcb929 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -496,6 +496,19 @@ swpout_fallback Usually because failed to allocate some continuous swap space for the huge page. +file_alloc + is incremented every time a file huge page is successfully + allocated. + +file_fallback + is incremented if a file huge page is attempted to be allocated + but fails and instead falls back to using small pages. + +file_fallback_charge + is incremented if a file huge page cannot be charged and instead + falls back to using small pages even though the allocation was + successful. + split is incremented every time a huge page is successfully split into smaller orders. This can happen for a variety of reasons but a diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 57159e0f5c09..49fb66826460 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -273,6 +273,9 @@ enum mthp_stat_item { MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, MTHP_STAT_SWPOUT, MTHP_STAT_SWPOUT_FALLBACK, + MTHP_STAT_FILE_ALLOC, + MTHP_STAT_FILE_FALLBACK, + MTHP_STAT_FILE_FALLBACK_CHARGE, MTHP_STAT_SPLIT, MTHP_STAT_SPLIT_FAILED, MTHP_STAT_SPLIT_DEFERRED, diff --git a/mm/huge_memory.c b/mm/huge_memory.c index b940443bba53..05fb23684785 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -541,6 +541,9 @@ DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK); DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT); DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK); +DEFINE_MTHP_STAT_ATTR(file_alloc, MTHP_STAT_FILE_ALLOC); +DEFINE_MTHP_STAT_ATTR(file_fallback, MTHP_STAT_FILE_FALLBACK); +DEFINE_MTHP_STAT_ATTR(file_fallback_charge, MTHP_STAT_FILE_FALLBACK_CHARGE); DEFINE_MTHP_STAT_ATTR(split, MTHP_STAT_SPLIT); DEFINE_MTHP_STAT_ATTR(split_failed, MTHP_STAT_SPLIT_FAILED); DEFINE_MTHP_STAT_ATTR(split_deferred, MTHP_STAT_SPLIT_DEFERRED); @@ -551,6 +554,9 @@ static struct attribute *stats_attrs[] = { &anon_fault_fallback_charge_attr.attr, &swpout_attr.attr, &swpout_fallback_attr.attr, + &file_alloc_attr.attr, + &file_fallback_attr.attr, + &file_fallback_charge_attr.attr, &split_attr.attr, &split_failed_attr.attr, &split_deferred_attr.attr, diff --git a/mm/shmem.c b/mm/shmem.c index 8b1ee4b2010f..17d2e5bd8870 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1796,6 +1796,9 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf, if (pages == HPAGE_PMD_NR) count_vm_event(THP_FILE_FALLBACK); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + count_mthp_stat(order, MTHP_STAT_FILE_FALLBACK); +#endif order = next_order(&suitable_orders, order); } } else { @@ -1815,9 +1818,15 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf, if (xa_find(&mapping->i_pages, &index, index + pages - 1, XA_PRESENT)) { error = -EEXIST; - } else if (pages == HPAGE_PMD_NR) { - count_vm_event(THP_FILE_FALLBACK); - count_vm_event(THP_FILE_FALLBACK_CHARGE); + } else if (pages > 1) { + if (pages == HPAGE_PMD_NR) { + count_vm_event(THP_FILE_FALLBACK); + count_vm_event(THP_FILE_FALLBACK_CHARGE); + } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + count_mthp_stat(folio_order(folio), MTHP_STAT_FILE_FALLBACK); + count_mthp_stat(folio_order(folio), MTHP_STAT_FILE_FALLBACK_CHARGE); +#endif } goto unlock; } @@ -2188,6 +2197,9 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, if (!IS_ERR(folio)) { if (folio_test_pmd_mappable(folio)) count_vm_event(THP_FILE_ALLOC); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + count_mthp_stat(folio_order(folio), MTHP_STAT_FILE_ALLOC); +#endif goto alloced; } if (PTR_ERR(folio) == -EEXIST) -- Gitee From ba509f5df47bc8948a6a190b7041a39c7b681535 Mon Sep 17 00:00:00 2001 From: Bang Li Date: Fri, 28 Jun 2024 11:23:27 +0800 Subject: [PATCH 1385/2138] mm/shmem: fix input and output inconsistencies ANBZ: #9728 commit 843a2e24c24c5311831860c6b78ceacdd4627000 upstream. Commit 19eaf44954df ("mm: thp: support allocation of anonymous multi-size THP") added mTHP support for anonymous shmem. We can configure different policies through the multi-size THP sysfs interface for anonymous shmem. But when we configure the "advise" policy of /sys/kernel/mm/transparent_hugepage/hugepages-xxxkB/shmem_enabled, we cannot write the "advise", but write the "madvise", which is unreasonable. We should keep the output and input values consistent, which is more convenient for users. Link: https://lkml.kernel.org/r/20240628032327.16987-1-libang.li@antgroup.com Fixes: 61a57f1b1da9 ("mm: shmem: add multi-size THP sysfs interface for anonymous shmem") Signed-off-by: Bang Li Reviewed-by: Baolin Wang Cc: Bang Li Cc: David Hildenbrand Cc: Hugh Dickins Cc: Ryan Roberts Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3864 --- mm/shmem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/shmem.c b/mm/shmem.c index 17d2e5bd8870..72380d272a47 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -4927,7 +4927,7 @@ static ssize_t thpsize_shmem_enabled_store(struct kobject *kobj, clear_bit(order, &huge_shmem_orders_madvise); set_bit(order, &huge_shmem_orders_within_size); spin_unlock(&huge_shmem_orders_lock); - } else if (sysfs_streq(buf, "madvise")) { + } else if (sysfs_streq(buf, "advise")) { spin_lock(&huge_shmem_orders_lock); clear_bit(order, &huge_shmem_orders_always); clear_bit(order, &huge_shmem_orders_inherit); -- Gitee From 9f935d0f798443b7c52bcbfd00a46f0cb41a3145 Mon Sep 17 00:00:00 2001 From: Bang Li Date: Fri, 5 Jul 2024 11:23:09 +0800 Subject: [PATCH 1386/2138] mm: thp: support "THPeligible" semantics for mTHP with anonymous shmem ANBZ: #9728 commit 26c7d8413aaf113a54b54f63e151416a5c5c2a88 upstream. After the commit 7fb1b252afb5 ("mm: shmem: add mTHP support for anonymous shmem"), we can configure different policies through the multi-size THP sysfs interface for anonymous shmem. But currently "THPeligible" indicates only whether the mapping is eligible for allocating THP-pages as well as the THP is PMD mappable or not for anonymous shmem, we need to support semantics for mTHP with anonymous shmem similar to those for mTHP with anonymous memory. Link: https://lkml.kernel.org/r/20240705032309.24933-1-libang.li@antgroup.com Signed-off-by: Bang Li Reviewed-by: Baolin Wang Cc: David Hildenbrand Cc: Hugh Dickins Cc: Kefeng Wang Cc: Lance Yang Cc: Ryan Roberts Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3864 --- include/linux/shmem_fs.h | 9 +++++++++ mm/huge_memory.c | 13 +++++++++---- mm/shmem.c | 9 +-------- 3 files changed, 19 insertions(+), 12 deletions(-) diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index f0c6bf982832..41aa4e0d6dbc 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -117,12 +117,21 @@ int shmem_unuse(unsigned int type); #ifdef CONFIG_TRANSPARENT_HUGEPAGE extern bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force, struct mm_struct *mm, unsigned long vm_flags); +unsigned long shmem_allowable_huge_orders(struct inode *inode, + struct vm_area_struct *vma, pgoff_t index, + bool global_huge); #else static __always_inline bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force, struct mm_struct *mm, unsigned long vm_flags) { return false; } +static inline unsigned long shmem_allowable_huge_orders(struct inode *inode, + struct vm_area_struct *vma, pgoff_t index, + bool global_huge) +{ + return 0; +} #endif #ifdef CONFIG_SHMEM diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 05fb23684785..b5046fdb84e1 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -132,10 +132,15 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, * Must be done before hugepage flags check since shmem has its * own flags. */ - if (!in_pf && shmem_file(vma->vm_file)) - return shmem_is_huge(file_inode(vma->vm_file), vma->vm_pgoff, - !enforce_sysfs, vma->vm_mm, vm_flags) - ? orders : 0; + if (!in_pf && shmem_file(vma->vm_file)) { + bool global_huge = shmem_is_huge(file_inode(vma->vm_file), vma->vm_pgoff, + !enforce_sysfs, vma->vm_mm, vm_flags); + + if (!vma_is_anon_shmem(vma)) + return global_huge ? orders : 0; + return shmem_allowable_huge_orders(file_inode(vma->vm_file), + vma, vma->vm_pgoff, global_huge); + } if (!vma_is_anonymous(vma)) { /* diff --git a/mm/shmem.c b/mm/shmem.c index 72380d272a47..7dd675a90139 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1636,7 +1636,7 @@ static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp) } #ifdef CONFIG_TRANSPARENT_HUGEPAGE -static unsigned long shmem_allowable_huge_orders(struct inode *inode, +unsigned long shmem_allowable_huge_orders(struct inode *inode, struct vm_area_struct *vma, pgoff_t index, bool global_huge) { @@ -1721,13 +1721,6 @@ static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault return orders; } #else -static unsigned long shmem_allowable_huge_orders(struct inode *inode, - struct vm_area_struct *vma, pgoff_t index, - bool global_huge) -{ - return 0; -} - static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf, struct address_space *mapping, pgoff_t index, unsigned long orders) -- Gitee From b1ad17b9c0e9268969c43df7c7dfc3568d86585f Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Thu, 27 Jun 2024 10:39:50 +1000 Subject: [PATCH 1387/2138] mm/readahead: limit page cache size in page_cache_ra_order() ANBZ: #9728 commit 1f789a45c3f1aa77531db21768fca70b66c0eeb1 upstream. In page_cache_ra_order(), the maximal order of the page cache to be allocated shouldn't be larger than MAX_PAGECACHE_ORDER. Otherwise, it's possible the large page cache can't be supported by xarray when the corresponding xarray entry is split. For example, HPAGE_PMD_ORDER is 13 on ARM64 when the base page size is 64KB. The PMD-sized page cache can't be supported by xarray. Link: https://lkml.kernel.org/r/20240627003953.1262512-3-gshan@redhat.com Fixes: 793917d997df ("mm/readahead: Add large folio readahead") Signed-off-by: Gavin Shan Acked-by: David Hildenbrand Cc: Darrick J. Wong Cc: Don Dutile Cc: Hugh Dickins Cc: Linus Torvalds Cc: Matthew Wilcox (Oracle) Cc: Ryan Roberts Cc: William Kucharski Cc: Zhenyu Zhang Cc: [5.18+] Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3864 --- mm/readahead.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/mm/readahead.c b/mm/readahead.c index 5ea79401c31d..41500002fbc4 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -502,13 +502,13 @@ void page_cache_ra_order(struct readahead_control *ractl, limit = min(limit, index + ra->size - 1); - if (new_order < MAX_PAGECACHE_ORDER) { + if (new_order < MAX_PAGECACHE_ORDER) new_order += 2; - if (new_order > MAX_PAGECACHE_ORDER) - new_order = MAX_PAGECACHE_ORDER; - while ((1 << new_order) > ra->size) - new_order--; - } + + if (new_order > MAX_PAGECACHE_ORDER) + new_order = MAX_PAGECACHE_ORDER; + while ((1 << new_order) > ra->size) + new_order--; /* See comment in page_cache_ra_unbounded() */ nofs = memalloc_nofs_save(); -- Gitee From 1cb946dc200b1e32d577e9a3d1e3db42269a7a72 Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Mon, 15 Jul 2024 10:04:23 +1000 Subject: [PATCH 1388/2138] mm/huge_memory: avoid PMD-size page cache if needed ANBZ: #9728 commit d659b715e94ac039803d7601505d3473393fc0be upstream. xarray can't support arbitrary page cache size. the largest and supported page cache size is defined as MAX_PAGECACHE_ORDER by commit 099d90642a71 ("mm/filemap: make MAX_PAGECACHE_ORDER acceptable to xarray"). However, it's possible to have 512MB page cache in the huge memory's collapsing path on ARM64 system whose base page size is 64KB. 512MB page cache is breaking the limitation and a warning is raised when the xarray entry is split as shown in the following example. [root@dhcp-10-26-1-207 ~]# cat /proc/1/smaps | grep KernelPageSize KernelPageSize: 64 kB [root@dhcp-10-26-1-207 ~]# cat /tmp/test.c : int main(int argc, char **argv) { const char *filename = TEST_XFS_FILENAME; int fd = 0; void *buf = (void *)-1, *p; int pgsize = getpagesize(); int ret = 0; if (pgsize != 0x10000) { fprintf(stdout, "System with 64KB base page size is required!\n"); return -EPERM; } system("echo 0 > /sys/devices/virtual/bdi/253:0/read_ahead_kb"); system("echo 1 > /proc/sys/vm/drop_caches"); /* Open the xfs file */ fd = open(filename, O_RDONLY); assert(fd > 0); /* Create VMA */ buf = mmap(NULL, TEST_MEM_SIZE, PROT_READ, MAP_SHARED, fd, 0); assert(buf != (void *)-1); fprintf(stdout, "mapped buffer at 0x%p\n", buf); /* Populate VMA */ ret = madvise(buf, TEST_MEM_SIZE, MADV_NOHUGEPAGE); assert(ret == 0); ret = madvise(buf, TEST_MEM_SIZE, MADV_POPULATE_READ); assert(ret == 0); /* Collapse VMA */ ret = madvise(buf, TEST_MEM_SIZE, MADV_HUGEPAGE); assert(ret == 0); ret = madvise(buf, TEST_MEM_SIZE, MADV_COLLAPSE); if (ret) { fprintf(stdout, "Error %d to madvise(MADV_COLLAPSE)\n", errno); goto out; } /* Split xarray entry. Write permission is needed */ munmap(buf, TEST_MEM_SIZE); buf = (void *)-1; close(fd); fd = open(filename, O_RDWR); assert(fd > 0); fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, TEST_MEM_SIZE - pgsize, pgsize); out: if (buf != (void *)-1) munmap(buf, TEST_MEM_SIZE); if (fd > 0) close(fd); return ret; } [root@dhcp-10-26-1-207 ~]# gcc /tmp/test.c -o /tmp/test [root@dhcp-10-26-1-207 ~]# /tmp/test ------------[ cut here ]------------ WARNING: CPU: 25 PID: 7560 at lib/xarray.c:1025 xas_split_alloc+0xf8/0x128 Modules linked in: nft_fib_inet nft_fib_ipv4 nft_fib_ipv6 nft_fib \ nft_reject_inet nf_reject_ipv4 nf_reject_ipv6 nft_reject nft_ct \ nft_chain_nat nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 \ ip_set rfkill nf_tables nfnetlink vfat fat virtio_balloon drm fuse \ xfs libcrc32c crct10dif_ce ghash_ce sha2_ce sha256_arm64 virtio_net \ sha1_ce net_failover virtio_blk virtio_console failover dimlib virtio_mmio CPU: 25 PID: 7560 Comm: test Kdump: loaded Not tainted 6.10.0-rc7-gavin+ #9 Hardware name: QEMU KVM Virtual Machine, BIOS edk2-20240524-1.el9 05/24/2024 pstate: 83400005 (Nzcv daif +PAN -UAO +TCO +DIT -SSBS BTYPE=--) pc : xas_split_alloc+0xf8/0x128 lr : split_huge_page_to_list_to_order+0x1c4/0x780 sp : ffff8000ac32f660 x29: ffff8000ac32f660 x28: ffff0000e0969eb0 x27: ffff8000ac32f6c0 x26: 0000000000000c40 x25: ffff0000e0969eb0 x24: 000000000000000d x23: ffff8000ac32f6c0 x22: ffffffdfc0700000 x21: 0000000000000000 x20: 0000000000000000 x19: ffffffdfc0700000 x18: 0000000000000000 x17: 0000000000000000 x16: ffffd5f3708ffc70 x15: 0000000000000000 x14: 0000000000000000 x13: 0000000000000000 x12: 0000000000000000 x11: ffffffffffffffc0 x10: 0000000000000040 x9 : ffffd5f3708e692c x8 : 0000000000000003 x7 : 0000000000000000 x6 : ffff0000e0969eb8 x5 : ffffd5f37289e378 x4 : 0000000000000000 x3 : 0000000000000c40 x2 : 000000000000000d x1 : 000000000000000c x0 : 0000000000000000 Call trace: xas_split_alloc+0xf8/0x128 split_huge_page_to_list_to_order+0x1c4/0x780 truncate_inode_partial_folio+0xdc/0x160 truncate_inode_pages_range+0x1b4/0x4a8 truncate_pagecache_range+0x84/0xa0 xfs_flush_unmap_range+0x70/0x90 [xfs] xfs_file_fallocate+0xfc/0x4d8 [xfs] vfs_fallocate+0x124/0x2f0 ksys_fallocate+0x4c/0xa0 __arm64_sys_fallocate+0x24/0x38 invoke_syscall.constprop.0+0x7c/0xd8 do_el0_svc+0xb4/0xd0 el0_svc+0x44/0x1d8 el0t_64_sync_handler+0x134/0x150 el0t_64_sync+0x17c/0x180 Fix it by correcting the supported page cache orders, different sets for DAX and other files. With it corrected, 512MB page cache becomes disallowed on all non-DAX files on ARM64 system where the base page size is 64KB. After this patch is applied, the test program fails with error -EINVAL returned from __thp_vma_allowable_orders() and the madvise() system call to collapse the page caches. Link: https://lkml.kernel.org/r/20240715000423.316491-1-gshan@redhat.com Fixes: 6b24ca4a1a8d ("mm: Use multi-index entries in the page cache") Signed-off-by: Gavin Shan Acked-by: David Hildenbrand Reviewed-by: Ryan Roberts Acked-by: Zi Yan Cc: Baolin Wang Cc: Barry Song Cc: Don Dutile Cc: Matthew Wilcox (Oracle) Cc: Peter Xu Cc: Ryan Roberts Cc: William Kucharski Cc: [5.17+] Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3864 --- include/linux/huge_mm.h | 12 +++++++++--- mm/huge_memory.c | 12 ++++++++++-- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 49fb66826460..bac9a83d407b 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -77,14 +77,20 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr; #define THP_ORDERS_ALL_ANON ((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1))) /* - * Mask of all large folio orders supported for file THP. + * Mask of all large folio orders supported for file THP. Folios in a DAX + * file is never split and the MAX_PAGECACHE_ORDER limit does not apply to + * it. */ -#define THP_ORDERS_ALL_FILE (BIT(PMD_ORDER) | BIT(PUD_ORDER)) +#define THP_ORDERS_ALL_FILE_DAX \ + (BIT(PMD_ORDER) | BIT(PUD_ORDER)) +#define THP_ORDERS_ALL_FILE_DEFAULT \ + ((BIT(MAX_PAGECACHE_ORDER + 1) - 1) & ~BIT(0)) /* * Mask of all large folio orders supported for THP. */ -#define THP_ORDERS_ALL (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE) +#define THP_ORDERS_ALL \ + (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE_DAX | THP_ORDERS_ALL_FILE_DEFAULT) #define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \ (!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order))) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index b5046fdb84e1..32fcc9320dbe 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -81,9 +81,17 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, bool in_pf, bool enforce_sysfs, unsigned long orders) { + unsigned long supported_orders; + /* Check the intersection of requested and supported orders. */ - orders &= vma_is_anonymous(vma) ? - THP_ORDERS_ALL_ANON : THP_ORDERS_ALL_FILE; + if (vma_is_anonymous(vma)) + supported_orders = THP_ORDERS_ALL_ANON; + else if (vma_is_dax(vma)) + supported_orders = THP_ORDERS_ALL_FILE_DAX; + else + supported_orders = THP_ORDERS_ALL_FILE_DEFAULT; + + orders &= supported_orders; if (!orders) return 0; -- Gitee From 14540416b30e0eb31651fb89f6e6ce19123862aa Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Wed, 31 Jul 2024 13:46:19 +0800 Subject: [PATCH 1389/2138] mm: shmem: avoid allocating huge pages larger than MAX_PAGECACHE_ORDER for shmem ANBZ: #9728 commit b66b1b71d7ff5464d23a0ac6f73fae461b7264fd upstream. Similar to commit d659b715e94ac ("mm/huge_memory: avoid PMD-size page cache if needed"), ARM64 can support 512MB PMD-sized THP when the base page size is 64KB, which is larger than the maximum supported page cache size MAX_PAGECACHE_ORDER. This is not expected. To fix this issue, use THP_ORDERS_ALL_FILE_DEFAULT for shmem to filter allowable huge orders. [baolin.wang@linux.alibaba.com: remove comment, per Barry] Link: https://lkml.kernel.org/r/c55d7ef7-78aa-4ed6-b897-c3e03a3f3ab7@linux.alibaba.com [wangkefeng.wang@huawei.com: remove local `orders'] Link: https://lkml.kernel.org/r/87769ae8-b6c6-4454-925d-1864364af9c8@huawei.com Link: https://lkml.kernel.org/r/117121665254442c3c7f585248296495e5e2b45c.1722404078.git.baolin.wang@linux.alibaba.com Fixes: e7a2ab7b3bb5 ("mm: shmem: add mTHP support for anonymous shmem") Signed-off-by: Baolin Wang Signed-off-by: Kefeng Wang Reviewed-by: Barry Song Cc: Barry Song <21cnbao@gmail.com> Cc: David Hildenbrand Cc: Gavin Shan Cc: Hugh Dickins Cc: Lance Yang Cc: Matthew Wilcox Cc: Ryan Roberts Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3864 --- mm/shmem.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index 7dd675a90139..8feb39f9a2c2 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1643,11 +1643,6 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode, unsigned long mask = READ_ONCE(huge_shmem_orders_always); unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size); unsigned long vm_flags = vma->vm_flags; - /* - * Check all the (large) orders below HPAGE_PMD_ORDER + 1 that - * are enabled for this vma. - */ - unsigned long orders = BIT(PMD_ORDER + 1) - 1; loff_t i_size; int order; @@ -1692,7 +1687,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode, if (global_huge) mask |= READ_ONCE(huge_shmem_orders_inherit); - return orders & mask; + return THP_ORDERS_ALL_FILE_DEFAULT & mask; } static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf, -- Gitee From a4126b8ced36520884086c32a91d72a4e1f9506b Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Wed, 31 Jul 2024 13:46:20 +0800 Subject: [PATCH 1390/2138] mm: shmem: fix incorrect aligned index when checking conflicts ANBZ: #9728 commit 4cbf320b1500fe64fcef8c96ed74dfc1ae2c9e2c upstream. In the shmem_suitable_orders() function, xa_find() is used to check for conflicts in the pagecache to select suitable huge orders. However, when checking each huge order in every loop, the aligned index is calculated from the previous iteration, which may cause suitable huge orders to be missed. We should use the original index each time in the loop to calculate a new aligned index for checking conflicts to avoid this issue. Link: https://lkml.kernel.org/r/07433b0f16a152bffb8cee34934a5c040e8e2ad6.1722404078.git.baolin.wang@linux.alibaba.com Fixes: e7a2ab7b3bb5 ("mm: shmem: add mTHP support for anonymous shmem") Signed-off-by: Baolin Wang Acked-by: David Hildenbrand Cc: Barry Song <21cnbao@gmail.com> Cc: Gavin Shan Cc: Hugh Dickins Cc: Lance Yang Cc: Matthew Wilcox Cc: Ryan Roberts Cc: Zi Yan Cc: Barry Song Cc: Kefeng Wang Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3864 --- mm/shmem.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index 8feb39f9a2c2..16d38af5509c 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1695,6 +1695,7 @@ static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault unsigned long orders) { struct vm_area_struct *vma = vmf->vma; + pgoff_t aligned_index; unsigned long pages; int order; @@ -1706,9 +1707,9 @@ static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault order = highest_order(orders); while (orders) { pages = 1UL << order; - index = round_down(index, pages); - if (!xa_find(&mapping->i_pages, &index, - index + pages - 1, XA_PRESENT)) + aligned_index = round_down(index, pages); + if (!xa_find(&mapping->i_pages, &aligned_index, + aligned_index + pages - 1, XA_PRESENT)) break; order = next_order(&orders, order); } -- Gitee From 4b0dd1b01098fb456982050576ce0e9f3435715e Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Wed, 10 Jul 2024 10:55:01 +0100 Subject: [PATCH 1391/2138] mm: shmem: rename mTHP shmem counters ANBZ: #9728 commit 63d9866ab01ffd0d0835d5564107283a4afc0a38 upstream. The legacy PMD-sized THP counters at /proc/vmstat include thp_file_alloc, thp_file_fallback and thp_file_fallback_charge, which rather confusingly refer to shmem THP and do not include any other types of file pages. This is inconsistent since in most other places in the kernel, THP counters are explicitly separated for anon, shmem and file flavours. However, we are stuck with it since it constitutes a user ABI. Recently, commit 66f44583f9b6 ("mm: shmem: add mTHP counters for anonymous shmem") added equivalent mTHP stats for shmem, keeping the same "file_" prefix in the names. But in future, we may want to add extra stats to cover actual file pages, at which point, it would all become very confusing. So let's take the opportunity to rename these new counters "shmem_" before the change makes it upstream and the ABI becomes immutable. While we are at it, let's improve the documentation for the legacy counters to make it clear that they count shmem pages only. Link: https://lkml.kernel.org/r/20240710095503.3193901-1-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Reviewed-by: Baolin Wang Reviewed-by: Lance Yang Reviewed-by: Zi Yan Reviewed-by: Barry Song Acked-by: David Hildenbrand Cc: Daniel Gomez Cc: Hugh Dickins Cc: Jonathan Corbet Cc: Matthew Wilcox (Oracle) Cc: Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3864 --- Documentation/admin-guide/mm/transhuge.rst | 29 ++++++++++++---------- include/linux/huge_mm.h | 6 ++--- mm/huge_memory.c | 12 ++++----- mm/shmem.c | 8 +++--- 4 files changed, 29 insertions(+), 26 deletions(-) diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index 2b57aabcb929..ee0e1b545007 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -412,20 +412,23 @@ thp_collapse_alloc_failed the allocation. thp_file_alloc - is incremented every time a file huge page is successfully - allocated. + is incremented every time a shmem huge page is successfully + allocated (Note that despite being named after "file", the counter + measures only shmem). thp_file_fallback - is incremented if a file huge page is attempted to be allocated - but fails and instead falls back to using small pages. + is incremented if a shmem huge page is attempted to be allocated + but fails and instead falls back to using small pages. (Note that + despite being named after "file", the counter measures only shmem). thp_file_fallback_charge - is incremented if a file huge page cannot be charged and instead + is incremented if a shmem huge page cannot be charged and instead falls back to using small pages even though the allocation was - successful. + successful. (Note that despite being named after "file", the + counter measures only shmem). thp_file_mapped - is incremented every time a file huge page is mapped into + is incremented every time a file or shmem huge page is mapped into user address space. thp_split_page @@ -496,16 +499,16 @@ swpout_fallback Usually because failed to allocate some continuous swap space for the huge page. -file_alloc - is incremented every time a file huge page is successfully +shmem_alloc + is incremented every time a shmem huge page is successfully allocated. -file_fallback - is incremented if a file huge page is attempted to be allocated +shmem_fallback + is incremented if a shmem huge page is attempted to be allocated but fails and instead falls back to using small pages. -file_fallback_charge - is incremented if a file huge page cannot be charged and instead +shmem_fallback_charge + is incremented if a shmem huge page cannot be charged and instead falls back to using small pages even though the allocation was successful. diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index bac9a83d407b..ce1865838133 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -279,9 +279,9 @@ enum mthp_stat_item { MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, MTHP_STAT_SWPOUT, MTHP_STAT_SWPOUT_FALLBACK, - MTHP_STAT_FILE_ALLOC, - MTHP_STAT_FILE_FALLBACK, - MTHP_STAT_FILE_FALLBACK_CHARGE, + MTHP_STAT_SHMEM_ALLOC, + MTHP_STAT_SHMEM_FALLBACK, + MTHP_STAT_SHMEM_FALLBACK_CHARGE, MTHP_STAT_SPLIT, MTHP_STAT_SPLIT_FAILED, MTHP_STAT_SPLIT_DEFERRED, diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 32fcc9320dbe..73021810be57 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -554,9 +554,9 @@ DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK); DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT); DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK); -DEFINE_MTHP_STAT_ATTR(file_alloc, MTHP_STAT_FILE_ALLOC); -DEFINE_MTHP_STAT_ATTR(file_fallback, MTHP_STAT_FILE_FALLBACK); -DEFINE_MTHP_STAT_ATTR(file_fallback_charge, MTHP_STAT_FILE_FALLBACK_CHARGE); +DEFINE_MTHP_STAT_ATTR(shmem_alloc, MTHP_STAT_SHMEM_ALLOC); +DEFINE_MTHP_STAT_ATTR(shmem_fallback, MTHP_STAT_SHMEM_FALLBACK); +DEFINE_MTHP_STAT_ATTR(shmem_fallback_charge, MTHP_STAT_SHMEM_FALLBACK_CHARGE); DEFINE_MTHP_STAT_ATTR(split, MTHP_STAT_SPLIT); DEFINE_MTHP_STAT_ATTR(split_failed, MTHP_STAT_SPLIT_FAILED); DEFINE_MTHP_STAT_ATTR(split_deferred, MTHP_STAT_SPLIT_DEFERRED); @@ -567,9 +567,9 @@ static struct attribute *stats_attrs[] = { &anon_fault_fallback_charge_attr.attr, &swpout_attr.attr, &swpout_fallback_attr.attr, - &file_alloc_attr.attr, - &file_fallback_attr.attr, - &file_fallback_charge_attr.attr, + &shmem_alloc_attr.attr, + &shmem_fallback_attr.attr, + &shmem_fallback_charge_attr.attr, &split_attr.attr, &split_failed_attr.attr, &split_deferred_attr.attr, diff --git a/mm/shmem.c b/mm/shmem.c index 16d38af5509c..193a99a5c399 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1786,7 +1786,7 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf, if (pages == HPAGE_PMD_NR) count_vm_event(THP_FILE_FALLBACK); #ifdef CONFIG_TRANSPARENT_HUGEPAGE - count_mthp_stat(order, MTHP_STAT_FILE_FALLBACK); + count_mthp_stat(order, MTHP_STAT_SHMEM_FALLBACK); #endif order = next_order(&suitable_orders, order); } @@ -1813,8 +1813,8 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf, count_vm_event(THP_FILE_FALLBACK_CHARGE); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE - count_mthp_stat(folio_order(folio), MTHP_STAT_FILE_FALLBACK); - count_mthp_stat(folio_order(folio), MTHP_STAT_FILE_FALLBACK_CHARGE); + count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK); + count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK_CHARGE); #endif } goto unlock; @@ -2187,7 +2187,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, if (folio_test_pmd_mappable(folio)) count_vm_event(THP_FILE_ALLOC); #ifdef CONFIG_TRANSPARENT_HUGEPAGE - count_mthp_stat(folio_order(folio), MTHP_STAT_FILE_ALLOC); + count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_ALLOC); #endif goto alloced; } -- Gitee From 4b9c35c52dc14d0976c22778ba40454128ad19a0 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Thu, 8 Aug 2024 12:18:46 +0100 Subject: [PATCH 1392/2138] mm: cleanup count_mthp_stat() definition ANBZ: #9728 commit 246d3aa3e53151fa150f10257ddd8a4facd31a6a upstream. Patch series "Shmem mTHP controls and stats improvements", v3. This is a small series to tidy up the way the shmem controls and stats are exposed. These patches were previously part of the series at [2], but I decided to split them out since they can go in independently. This patch (of 2): Let's move count_mthp_stat() so that it's always defined, even when THP is disabled. Previously uses of the function in files such as shmem.c, which are compiled even when THP is disabled, required ugly THP ifdeferry. With this cleanup, we can remove those ifdefs and the function resolves to a nop when THP is disabled. I shortly plan to call count_mthp_stat() from more THP-invariant source files. Link: https://lkml.kernel.org/r/20240808111849.651867-1-ryan.roberts@arm.com Link: https://lkml.kernel.org/r/20240808111849.651867-2-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Acked-by: Barry Song Reviewed-by: Baolin Wang Reviewed-by: Lance Yang Acked-by: David Hildenbrand Cc: Gavin Shan Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3864 --- include/linux/huge_mm.h | 70 ++++++++++++++++++++--------------------- mm/memory.c | 2 -- mm/shmem.c | 6 ---- 3 files changed, 35 insertions(+), 43 deletions(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index ce1865838133..ee72945f942b 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -95,6 +95,41 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr; #define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \ (!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order))) +enum mthp_stat_item { + MTHP_STAT_ANON_FAULT_ALLOC, + MTHP_STAT_ANON_FAULT_FALLBACK, + MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, + MTHP_STAT_SWPOUT, + MTHP_STAT_SWPOUT_FALLBACK, + MTHP_STAT_SHMEM_ALLOC, + MTHP_STAT_SHMEM_FALLBACK, + MTHP_STAT_SHMEM_FALLBACK_CHARGE, + MTHP_STAT_SPLIT, + MTHP_STAT_SPLIT_FAILED, + MTHP_STAT_SPLIT_DEFERRED, + __MTHP_STAT_COUNT +}; + +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS) +struct mthp_stat { + unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT]; +}; + +DECLARE_PER_CPU(struct mthp_stat, mthp_stats); + +static inline void count_mthp_stat(int order, enum mthp_stat_item item) +{ + if (order <= 0 || order > PMD_ORDER) + return; + + this_cpu_inc(mthp_stats.stats[order][item]); +} +#else +static inline void count_mthp_stat(int order, enum mthp_stat_item item) +{ +} +#endif + #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define HPAGE_PMD_SHIFT PMD_SHIFT #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) @@ -273,41 +308,6 @@ struct thpsize { #define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj) -enum mthp_stat_item { - MTHP_STAT_ANON_FAULT_ALLOC, - MTHP_STAT_ANON_FAULT_FALLBACK, - MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, - MTHP_STAT_SWPOUT, - MTHP_STAT_SWPOUT_FALLBACK, - MTHP_STAT_SHMEM_ALLOC, - MTHP_STAT_SHMEM_FALLBACK, - MTHP_STAT_SHMEM_FALLBACK_CHARGE, - MTHP_STAT_SPLIT, - MTHP_STAT_SPLIT_FAILED, - MTHP_STAT_SPLIT_DEFERRED, - __MTHP_STAT_COUNT -}; - -struct mthp_stat { - unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT]; -}; - -#ifdef CONFIG_SYSFS -DECLARE_PER_CPU(struct mthp_stat, mthp_stats); - -static inline void count_mthp_stat(int order, enum mthp_stat_item item) -{ - if (order <= 0 || order > PMD_ORDER) - return; - - this_cpu_inc(mthp_stats.stats[order][item]); -} -#else -static inline void count_mthp_stat(int order, enum mthp_stat_item item) -{ -} -#endif - #define transparent_hugepage_use_zero_page() \ (transparent_hugepage_flags & \ (1<vm_mm, MM_ANONPAGES, nr_pages); -#ifdef CONFIG_TRANSPARENT_HUGEPAGE count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_FAULT_ALLOC); -#endif folio_add_new_anon_rmap(folio, vma, addr); folio_add_lru_vma(folio, vma); setpte: diff --git a/mm/shmem.c b/mm/shmem.c index 193a99a5c399..8fe32afd81c8 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1785,9 +1785,7 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf, if (pages == HPAGE_PMD_NR) count_vm_event(THP_FILE_FALLBACK); -#ifdef CONFIG_TRANSPARENT_HUGEPAGE count_mthp_stat(order, MTHP_STAT_SHMEM_FALLBACK); -#endif order = next_order(&suitable_orders, order); } } else { @@ -1812,10 +1810,8 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf, count_vm_event(THP_FILE_FALLBACK); count_vm_event(THP_FILE_FALLBACK_CHARGE); } -#ifdef CONFIG_TRANSPARENT_HUGEPAGE count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK); count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_FALLBACK_CHARGE); -#endif } goto unlock; } @@ -2186,9 +2182,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, if (!IS_ERR(folio)) { if (folio_test_pmd_mappable(folio)) count_vm_event(THP_FILE_ALLOC); -#ifdef CONFIG_TRANSPARENT_HUGEPAGE count_mthp_stat(folio_order(folio), MTHP_STAT_SHMEM_ALLOC); -#endif goto alloced; } if (PTR_ERR(folio) == -EEXIST) -- Gitee From fb3a684ad80c92157b496c30667ec3745e0b5ad9 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Thu, 8 Aug 2024 12:18:47 +0100 Subject: [PATCH 1393/2138] mm: tidy up shmem mTHP controls and stats ANBZ: #9728 commit 70e59a75283bdcc49c8ee104c2d49a22e4912305 upstream. Previously we had a situation where shmem mTHP controls and stats were not exposed for some supported sizes and were exposed for some unsupported sizes. So let's clean that up. Anon mTHP can support all large orders [2, PMD_ORDER]. But shmem can support all large orders [1, MAX_PAGECACHE_ORDER]. However, per-size shmem controls and stats were previously being exposed for all the anon mTHP orders, meaning order-1 was not present, and for arm64 64K base pages, orders 12 and 13 were exposed but were not supported internally. Tidy this all up by defining ctrl and stats attribute groups for anon and file separately. Anon ctrl and stats groups are populated for all orders in THP_ORDERS_ALL_ANON and file ctrl and stats groups are populated for all orders in THP_ORDERS_ALL_FILE_DEFAULT. Additionally, create "any" ctrl and stats attribute groups which are populated for all orders in (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE_DEFAULT). swpout stats use this since they apply to anon and shmem. The side-effect of all this is that different hugepage-*kB directories contain different sets of controls and stats, depending on which memory types support that size. This approach is preferred over the alternative, which is to populate dummy controls and stats for memory types that do not support a given size. [ryan.roberts@arm.com: file pages and shmem can also be split] Link: https://lkml.kernel.org/r/f7ced14c-8bc5-405f-bee7-94f63980f525@arm.comLink: https://lkml.kernel.org/r/20240808111849.651867-3-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Tested-by: Barry Song Reviewed-by: Baolin Wang Cc: David Hildenbrand Cc: Gavin Shan Cc: Hugh Dickins Cc: Lance Yang Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3864 --- mm/huge_memory.c | 140 +++++++++++++++++++++++++++++++++++++---------- 1 file changed, 112 insertions(+), 28 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 73021810be57..e0c9f00ec597 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -448,8 +448,8 @@ static void thpsize_release(struct kobject *kobj); static DEFINE_SPINLOCK(huge_anon_orders_lock); static LIST_HEAD(thpsize_list); -static ssize_t thpsize_enabled_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) +static ssize_t anon_enabled_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) { int order = to_thpsize(kobj)->order; const char *output; @@ -466,9 +466,9 @@ static ssize_t thpsize_enabled_show(struct kobject *kobj, return sysfs_emit(buf, "%s\n", output); } -static ssize_t thpsize_enabled_store(struct kobject *kobj, - struct kobj_attribute *attr, - const char *buf, size_t count) +static ssize_t anon_enabled_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) { int order = to_thpsize(kobj)->order; ssize_t ret = count; @@ -503,19 +503,35 @@ static ssize_t thpsize_enabled_store(struct kobject *kobj, return ret; } -static struct kobj_attribute thpsize_enabled_attr = - __ATTR(enabled, 0644, thpsize_enabled_show, thpsize_enabled_store); +static struct kobj_attribute anon_enabled_attr = + __ATTR(enabled, 0644, anon_enabled_show, anon_enabled_store); -static struct attribute *thpsize_attrs[] = { - &thpsize_enabled_attr.attr, +static struct attribute *anon_ctrl_attrs[] = { + &anon_enabled_attr.attr, + NULL, +}; + +static const struct attribute_group anon_ctrl_attr_grp = { + .attrs = anon_ctrl_attrs, +}; + +static struct attribute *file_ctrl_attrs[] = { #ifdef CONFIG_SHMEM &thpsize_shmem_enabled_attr.attr, #endif NULL, }; -static const struct attribute_group thpsize_attr_group = { - .attrs = thpsize_attrs, +static const struct attribute_group file_ctrl_attr_grp = { + .attrs = file_ctrl_attrs, +}; + +static struct attribute *any_ctrl_attrs[] = { + NULL, +}; + +static const struct attribute_group any_ctrl_attr_grp = { + .attrs = any_ctrl_attrs, }; static const struct kobj_type thpsize_ktype = { @@ -554,64 +570,132 @@ DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK); DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT); DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK); +#ifdef CONFIG_SHMEM DEFINE_MTHP_STAT_ATTR(shmem_alloc, MTHP_STAT_SHMEM_ALLOC); DEFINE_MTHP_STAT_ATTR(shmem_fallback, MTHP_STAT_SHMEM_FALLBACK); DEFINE_MTHP_STAT_ATTR(shmem_fallback_charge, MTHP_STAT_SHMEM_FALLBACK_CHARGE); +#endif DEFINE_MTHP_STAT_ATTR(split, MTHP_STAT_SPLIT); DEFINE_MTHP_STAT_ATTR(split_failed, MTHP_STAT_SPLIT_FAILED); DEFINE_MTHP_STAT_ATTR(split_deferred, MTHP_STAT_SPLIT_DEFERRED); -static struct attribute *stats_attrs[] = { +static struct attribute *anon_stats_attrs[] = { &anon_fault_alloc_attr.attr, &anon_fault_fallback_attr.attr, &anon_fault_fallback_charge_attr.attr, +#ifndef CONFIG_SHMEM &swpout_attr.attr, &swpout_fallback_attr.attr, +#endif + &split_deferred_attr.attr, + NULL, +}; + +static struct attribute_group anon_stats_attr_grp = { + .name = "stats", + .attrs = anon_stats_attrs, +}; + +static struct attribute *file_stats_attrs[] = { +#ifdef CONFIG_SHMEM &shmem_alloc_attr.attr, &shmem_fallback_attr.attr, &shmem_fallback_charge_attr.attr, +#endif + NULL, +}; + +static struct attribute_group file_stats_attr_grp = { + .name = "stats", + .attrs = file_stats_attrs, +}; + +static struct attribute *any_stats_attrs[] = { +#ifdef CONFIG_SHMEM + &swpout_attr.attr, + &swpout_fallback_attr.attr, +#endif &split_attr.attr, &split_failed_attr.attr, - &split_deferred_attr.attr, NULL, }; -static struct attribute_group stats_attr_group = { +static struct attribute_group any_stats_attr_grp = { .name = "stats", - .attrs = stats_attrs, + .attrs = any_stats_attrs, }; +static int sysfs_add_group(struct kobject *kobj, + const struct attribute_group *grp) +{ + int ret = -ENOENT; + + /* + * If the group is named, try to merge first, assuming the subdirectory + * was already created. This avoids the warning emitted by + * sysfs_create_group() if the directory already exists. + */ + if (grp->name) + ret = sysfs_merge_group(kobj, grp); + if (ret) + ret = sysfs_create_group(kobj, grp); + + return ret; +} + static struct thpsize *thpsize_create(int order, struct kobject *parent) { unsigned long size = (PAGE_SIZE << order) / SZ_1K; struct thpsize *thpsize; - int ret; + int ret = -ENOMEM; thpsize = kzalloc(sizeof(*thpsize), GFP_KERNEL); if (!thpsize) - return ERR_PTR(-ENOMEM); + goto err; + + thpsize->order = order; ret = kobject_init_and_add(&thpsize->kobj, &thpsize_ktype, parent, "hugepages-%lukB", size); if (ret) { kfree(thpsize); - return ERR_PTR(ret); + goto err; } - ret = sysfs_create_group(&thpsize->kobj, &thpsize_attr_group); - if (ret) { - kobject_put(&thpsize->kobj); - return ERR_PTR(ret); + + ret = sysfs_add_group(&thpsize->kobj, &any_ctrl_attr_grp); + if (ret) + goto err_put; + + ret = sysfs_add_group(&thpsize->kobj, &any_stats_attr_grp); + if (ret) + goto err_put; + + if (BIT(order) & THP_ORDERS_ALL_ANON) { + ret = sysfs_add_group(&thpsize->kobj, &anon_ctrl_attr_grp); + if (ret) + goto err_put; + + ret = sysfs_add_group(&thpsize->kobj, &anon_stats_attr_grp); + if (ret) + goto err_put; } - ret = sysfs_create_group(&thpsize->kobj, &stats_attr_group); - if (ret) { - kobject_put(&thpsize->kobj); - return ERR_PTR(ret); + if (BIT(order) & THP_ORDERS_ALL_FILE_DEFAULT) { + ret = sysfs_add_group(&thpsize->kobj, &file_ctrl_attr_grp); + if (ret) + goto err_put; + + ret = sysfs_add_group(&thpsize->kobj, &file_stats_attr_grp); + if (ret) + goto err_put; } - thpsize->order = order; return thpsize; +err_put: + kobject_put(&thpsize->kobj); +err: + return ERR_PTR(ret); } static void thpsize_release(struct kobject *kobj) @@ -651,7 +735,7 @@ static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) goto remove_hp_group; } - orders = THP_ORDERS_ALL_ANON; + orders = THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE_DEFAULT; order = highest_order(orders); while (orders) { thpsize = thpsize_create(order, *hugepage_kobj); -- Gitee From 855e607cc8fc8392378df4681eb8124312d85d0d Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Fri, 27 Sep 2024 17:18:41 +0800 Subject: [PATCH 1394/2138] anolis: mm: shmem: extend hugepage order checking in shmem_alloc_folio() ANBZ: #9728 Since mTHP support for anonymous shmem has been added, extend the hugepage checking of vma_alloc_folio() in shmem_alloc_folio() to all orders greater than 0. Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3864 --- mm/shmem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/shmem.c b/mm/shmem.c index 8fe32afd81c8..ba6f639170e6 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1732,7 +1732,7 @@ static struct folio *shmem_alloc_folio(gfp_t gfp, int order, struct folio *folio; shmem_pseudo_vma_init(&pvma, info, index); - folio = vma_alloc_folio(gfp, order, &pvma, 0, order == HPAGE_PMD_ORDER); + folio = vma_alloc_folio(gfp, order, &pvma, 0, order > 0); shmem_pseudo_vma_destroy(&pvma); return folio; -- Gitee From 95cb1e194e48747e3dbc58cfa994126b6221a6e8 Mon Sep 17 00:00:00 2001 From: Ran Xiaokai Date: Wed, 15 May 2024 10:47:54 +0800 Subject: [PATCH 1395/2138] mm/huge_memory: mark racy access onhuge_anon_orders_always ANBZ: #9728 commit 7f83bf14603ef41a44dc907594d749a283e22c37 upstream. huge_anon_orders_always is accessed lockless, it is better to use the READ_ONCE() wrapper. This is not fixing any visible bug, hopefully this can cease some KCSAN complains in the future. Also do that for huge_anon_orders_madvise. Link: https://lkml.kernel.org/r/20240515104754889HqrahFPePOIE1UlANHVAh@zte.com.cn Signed-off-by: Ran Xiaokai Acked-by: David Hildenbrand Reviewed-by: Lu Zhongjun Reviewed-by: xu xin Cc: Yang Yang Cc: Matthew Wilcox (Oracle) Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3867 --- include/linux/huge_mm.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index ee72945f942b..0058562d2490 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -165,8 +165,8 @@ static inline bool hugepage_flags_enabled(void) * So we don't need to look at huge_anon_orders_inherit. */ return hugepage_global_enabled() || - huge_anon_orders_always || - huge_anon_orders_madvise; + READ_ONCE(huge_anon_orders_always) || + READ_ONCE(huge_anon_orders_madvise); } static inline int highest_order(unsigned long orders) -- Gitee From b21f9f941e2676735bc174acd2e85d4b7860c669 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Thu, 4 Jul 2024 10:10:50 +0100 Subject: [PATCH 1396/2138] mm: fix khugepaged activation policy ANBZ: #9728 commit 00f58104202c472e487f0866fbd38832523fd4f9 upstream. Since the introduction of mTHP, the docuementation has stated that khugepaged would be enabled when any mTHP size is enabled, and disabled when all mTHP sizes are disabled. There are 2 problems with this; 1. this is not what was implemented by the code and 2. this is not the desirable behavior. Desirable behavior is for khugepaged to be enabled when any PMD-sized THP is enabled, anon or file. (Note that file THP is still controlled by the top-level control so we must always consider that, as well as the PMD-size mTHP control for anon). khugepaged only supports collapsing to PMD-sized THP so there is no value in enabling it when PMD-sized THP is disabled. So let's change the code and documentation to reflect this policy. Further, per-size enabled control modification events were not previously forwarded to khugepaged to give it an opportunity to start or stop. Consequently the following was resulting in khugepaged eroneously not being activated: echo never > /sys/kernel/mm/transparent_hugepage/enabled echo always > /sys/kernel/mm/transparent_hugepage/hugepages-2048kB/enabled [ryan.roberts@arm.com: v3] Link: https://lkml.kernel.org/r/20240705102849.2479686-1-ryan.roberts@arm.com Link: https://lkml.kernel.org/r/20240705102849.2479686-1-ryan.roberts@arm.com Link: https://lkml.kernel.org/r/20240704091051.2411934-1-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Fixes: 3485b88390b0 ("mm: thp: introduce multi-size THP sysfs interface") Closes: https://lore.kernel.org/linux-mm/7a0bbe69-1e3d-4263-b206-da007791a5c4@redhat.com/ Acked-by: David Hildenbrand Cc: Baolin Wang Cc: Barry Song Cc: Jonathan Corbet Cc: Lance Yang Cc: Yang Shi Cc: Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3867 --- Documentation/admin-guide/mm/transhuge.rst | 11 ++++---- include/linux/huge_mm.h | 12 -------- mm/huge_memory.c | 7 +++++ mm/khugepaged.c | 33 +++++++++++++++++----- 4 files changed, 38 insertions(+), 25 deletions(-) diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index ee0e1b545007..a71aa21a6874 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -202,12 +202,11 @@ PMD-mappable transparent hugepage:: cat /sys/kernel/mm/transparent_hugepage/hpage_pmd_size -khugepaged will be automatically started when one or more hugepage -sizes are enabled (either by directly setting "always" or "madvise", -or by setting "inherit" while the top-level enabled is set to "always" -or "madvise"), and it'll be automatically shutdown when the last -hugepage size is disabled (either by directly setting "never", or by -setting "inherit" while the top-level enabled is set to "never"). +khugepaged will be automatically started when PMD-sized THP is enabled +(either of the per-size anon control or the top-level control are set +to "always" or "madvise"), and it'll be automatically shutdown when +PMD-sized THP is disabled (when both the per-size anon control and the +top-level control are "never") Khugepaged controls ------------------- diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 0058562d2490..95fe4fa24c24 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -157,18 +157,6 @@ static inline bool hugepage_global_always(void) (1< 0) { + int err; + + err = start_stop_khugepaged(); + if (err) + ret = err; + } return ret; } diff --git a/mm/khugepaged.c b/mm/khugepaged.c index c923465c6af5..20327d0d1472 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -409,6 +409,26 @@ static inline int hpage_collapse_test_exit(struct mm_struct *mm) return atomic_read(&mm->mm_users) == 0; } +static bool hugepage_pmd_enabled(void) +{ + /* + * We cover both the anon and the file-backed case here; file-backed + * hugepages, when configured in, are determined by the global control. + * Anon pmd-sized hugepages are determined by the pmd-size control. + */ + if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && + hugepage_global_enabled()) + return true; + if (test_bit(PMD_ORDER, &huge_anon_orders_always)) + return true; + if (test_bit(PMD_ORDER, &huge_anon_orders_madvise)) + return true; + if (test_bit(PMD_ORDER, &huge_anon_orders_inherit) && + hugepage_global_enabled()) + return true; + return false; +} + void __khugepaged_enter(struct mm_struct *mm) { struct khugepaged_mm_slot *mm_slot; @@ -445,7 +465,7 @@ void khugepaged_enter_vma(struct vm_area_struct *vma, unsigned long vm_flags) { if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) && - hugepage_flags_enabled()) { + hugepage_pmd_enabled()) { if (thp_vma_allowable_order(vma, vm_flags, false, false, true, PMD_ORDER)) __khugepaged_enter(vma->vm_mm); @@ -2468,8 +2488,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result, static int khugepaged_has_work(void) { - return !list_empty(&khugepaged_scan.mm_head) && - hugepage_flags_enabled(); + return !list_empty(&khugepaged_scan.mm_head) && hugepage_pmd_enabled(); } static int khugepaged_wait_event(void) @@ -2542,7 +2561,7 @@ static void khugepaged_wait_work(void) return; } - if (hugepage_flags_enabled()) + if (hugepage_pmd_enabled()) wait_event_freezable(khugepaged_wait, khugepaged_wait_event()); } @@ -2573,7 +2592,7 @@ static void set_recommended_min_free_kbytes(void) int nr_zones = 0; unsigned long recommended_min; - if (!hugepage_flags_enabled()) { + if (!hugepage_pmd_enabled()) { calculate_min_free_kbytes(); goto update_wmarks; } @@ -2623,7 +2642,7 @@ int start_stop_khugepaged(void) int err = 0; mutex_lock(&khugepaged_mutex); - if (hugepage_flags_enabled()) { + if (hugepage_pmd_enabled()) { if (!khugepaged_thread) khugepaged_thread = kthread_run(khugepaged, NULL, "khugepaged"); @@ -2649,7 +2668,7 @@ int start_stop_khugepaged(void) void khugepaged_min_free_kbytes_update(void) { mutex_lock(&khugepaged_mutex); - if (hugepage_flags_enabled() && khugepaged_thread) + if (hugepage_pmd_enabled() && khugepaged_thread) set_recommended_min_free_kbytes(); mutex_unlock(&khugepaged_mutex); } -- Gitee From 8498165357fe31804877f90fbe5d3f46e9d04fda Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Wed, 3 Apr 2024 21:47:21 +0800 Subject: [PATCH 1397/2138] mm: page_alloc: use the correct THP order for THP PCP ANBZ: #9728 commit 6303d1c553c8d758f068de70a41668622b7a917c upstream. Commit 44042b449872 ("mm/page_alloc: allow high-order pages to be stored on the per-cpu lists") extends the PCP allocator to store THP pages, and it determines whether to cache THP pages in PCP by comparing with pageblock_order. But the pageblock_order is not always equal to THP order. It might also be MAX_PAGE_ORDER, which could prevent PCP from caching THP pages. Therefore, using HPAGE_PMD_ORDER instead to determine the need for caching THP for PCP will fix this issue Link: https://lkml.kernel.org/r/a25c9e14cd03907d5978b60546a69e6aa3fc2a7d.1712151833.git.baolin.wang@linux.alibaba.com Fixes: 44042b449872 ("mm/page_alloc: allow high-order pages to be stored on the per-cpu lists") Signed-off-by: Baolin Wang Acked-by: Vlastimil Babka Cc: Mel Gorman Reviewed-by: Barry Song Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3867 --- mm/page_alloc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9131776ceadc..88d16b47badb 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -524,7 +524,7 @@ static inline unsigned int order_to_pindex(int migratetype, int order) #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (order > PAGE_ALLOC_COSTLY_ORDER) { - VM_BUG_ON(order != pageblock_order); + VM_BUG_ON(order != HPAGE_PMD_ORDER); movable = migratetype == MIGRATE_MOVABLE; @@ -543,7 +543,7 @@ static inline int pindex_to_order(unsigned int pindex) #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (pindex >= NR_LOWORDER_PCP_LISTS) - order = pageblock_order; + order = HPAGE_PMD_ORDER; #else VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); #endif @@ -556,7 +556,7 @@ static inline bool pcp_allowed_order(unsigned int order) if (order <= PAGE_ALLOC_COSTLY_ORDER) return true; #ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (order == pageblock_order) + if (order == HPAGE_PMD_ORDER) return true; #endif return false; -- Gitee From a912925ae6ce82a5bd55348046fb19494b14dcf8 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 15 Jul 2024 02:04:32 +0900 Subject: [PATCH 1398/2138] fortify: fix warnings in fortify tests with KASAN ANBZ: #11097 commit 84679f04ceafd58d9b35f790203520b2930f1a03 upstream. When a software KASAN mode is enabled, the fortify tests emit warnings on some architectures. For example, for ARCH=arm, the combination of CONFIG_FORTIFY_SOURCE=y and CONFIG_KASAN=y produces the following warnings: TEST lib/test_fortify/read_overflow-memchr.log warning: unsafe memchr() usage lacked '__read_overflow' warning in lib/test_fortify/read_overflow-memchr.c TEST lib/test_fortify/read_overflow-memchr_inv.log warning: unsafe memchr_inv() usage lacked '__read_overflow' symbol in lib/test_fortify/read_overflow-memchr_inv.c TEST lib/test_fortify/read_overflow-memcmp.log warning: unsafe memcmp() usage lacked '__read_overflow' warning in lib/test_fortify/read_overflow-memcmp.c TEST lib/test_fortify/read_overflow-memscan.log warning: unsafe memscan() usage lacked '__read_overflow' symbol in lib/test_fortify/read_overflow-memscan.c TEST lib/test_fortify/read_overflow2-memcmp.log warning: unsafe memcmp() usage lacked '__read_overflow2' warning in lib/test_fortify/read_overflow2-memcmp.c [ more and more similar warnings... ] Commit 9c2d1328f88a ("kbuild: provide reasonable defaults for tool coverage") removed KASAN flags from non-kernel objects by default. It was an intended behavior because lib/test_fortify/*.c are unit tests that are not linked to the kernel. As it turns out, some architectures require -fsanitize=kernel-(hw)address to define __SANITIZE_ADDRESS__ for the fortify tests. Without __SANITIZE_ADDRESS__ defined, arch/arm/include/asm/string.h defines __NO_FORTIFY, thus excluding . This issue does not occur on x86 thanks to commit 4ec4190be4cf ("kasan, x86: don't rename memintrinsics in uninstrumented files"), but there are still some architectures that define __NO_FORTIFY in such a situation. Set KASAN_SANITIZE=y explicitly to the fortify tests. Fixes: 9c2d1328f88a ("kbuild: provide reasonable defaults for tool coverage") Reported-by: Arnd Bergmann Closes: https://lore.kernel.org/all/0e8dee26-41cc-41ae-9493-10cd1a8e3268@app.fastmail.com/ Signed-off-by: Masahiro Yamada Signed-off-by: Qiao Ma Reviewed-by: Tianchen Ding Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/3866 --- lib/Makefile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/Makefile b/lib/Makefile index 740109b6e2c8..af9c7479648b 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -438,3 +438,7 @@ $(obj)/$(TEST_FORTIFY_LOG): $(addprefix $(obj)/, $(TEST_FORTIFY_LOGS)) FORCE ifeq ($(CONFIG_FORTIFY_SOURCE),y) $(obj)/string.o: $(obj)/$(TEST_FORTIFY_LOG) endif + +# Some architectures define __NO_FORTIFY if __SANITIZE_ADDRESS__ is undefined. +# Pass CFLAGS_KASAN to avoid warnings. +$(foreach x, $(patsubst %.log,%.o,$(TEST_FORTIFY_LOGS)), $(eval KASAN_SANITIZE_$(x) := y)) -- Gitee From 178ec83c8a0784f9c3cae4f436ff546b539ef9f6 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sun, 28 Jul 2024 00:02:36 +0900 Subject: [PATCH 1399/2138] fortify: refactor test_fortify Makefile to fix some build problems ANBZ: #11097 commit 4e9903b0861c9df3464b82db4a7025863bac1897 upstream. There are some issues in the test_fortify Makefile code. Problem 1: cc-disable-warning invokes compiler dozens of times To see how many times the cc-disable-warning is evaluated, change this code: $(call cc-disable-warning,fortify-source) to: $(call cc-disable-warning,$(shell touch /tmp/fortify-$$$$)fortify-source) Then, build the kernel with CONFIG_FORTIFY_SOURCE=y. You will see a large number of '/tmp/fortify-' files created: $ ls -1 /tmp/fortify-* | wc 80 80 1600 This means the compiler was invoked 80 times just for checking the -Wno-fortify-source flag support. $(call cc-disable-warning,fortify-source) should be added to a simple variable instead of a recursive variable. Problem 2: do not recompile string.o when the test code is updated The test cases are independent of the kernel. However, when the test code is updated, $(obj)/string.o is rebuilt and vmlinux is relinked due to this dependency: $(obj)/string.o: $(obj)/$(TEST_FORTIFY_LOG) always-y is suitable for building the log files. Problem 3: redundant code clean-files += $(addsuffix .o, $(TEST_FORTIFY_LOGS)) ... is unneeded because the top Makefile globally cleans *.o files. This commit fixes these issues and makes the code readable. Signed-off-by: Masahiro Yamada Link: https://lore.kernel.org/r/20240727150302.1823750-2-masahiroy@kernel.org Signed-off-by: Kees Cook Signed-off-by: Qiao Ma Reviewed-by: Tianchen Ding Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/3866 --- lib/.gitignore | 2 -- lib/Makefile | 38 +------------------------------------ lib/test_fortify/.gitignore | 2 ++ lib/test_fortify/Makefile | 28 +++++++++++++++++++++++++++ scripts/remove-stale-files | 2 ++ 5 files changed, 33 insertions(+), 39 deletions(-) create mode 100644 lib/test_fortify/.gitignore create mode 100644 lib/test_fortify/Makefile diff --git a/lib/.gitignore b/lib/.gitignore index 54596b634ecb..101a4aa92fb5 100644 --- a/lib/.gitignore +++ b/lib/.gitignore @@ -5,5 +5,3 @@ /gen_crc32table /gen_crc64table /oid_registry_data.c -/test_fortify.log -/test_fortify/*.log diff --git a/lib/Makefile b/lib/Makefile index af9c7479648b..7ab8f09de8ec 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -405,40 +405,4 @@ obj-$(CONFIG_SIPHASH_KUNIT_TEST) += siphash_kunit.o obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o -# FORTIFY_SOURCE compile-time behavior tests -TEST_FORTIFY_SRCS = $(wildcard $(srctree)/$(src)/test_fortify/*-*.c) -TEST_FORTIFY_LOGS = $(patsubst $(srctree)/$(src)/%.c, %.log, $(TEST_FORTIFY_SRCS)) -TEST_FORTIFY_LOG = test_fortify.log - -quiet_cmd_test_fortify = TEST $@ - cmd_test_fortify = $(CONFIG_SHELL) $(srctree)/scripts/test_fortify.sh \ - $< $@ "$(NM)" $(CC) $(c_flags) \ - $(call cc-disable-warning,fortify-source) \ - -DKBUILD_EXTRA_WARN1 - -targets += $(TEST_FORTIFY_LOGS) -clean-files += $(TEST_FORTIFY_LOGS) -clean-files += $(addsuffix .o, $(TEST_FORTIFY_LOGS)) -$(obj)/test_fortify/%.log: $(src)/test_fortify/%.c \ - $(src)/test_fortify/test_fortify.h \ - $(srctree)/include/linux/fortify-string.h \ - $(srctree)/scripts/test_fortify.sh \ - FORCE - $(call if_changed,test_fortify) - -quiet_cmd_gen_fortify_log = GEN $@ - cmd_gen_fortify_log = cat /dev/null > $@ || true - -targets += $(TEST_FORTIFY_LOG) -clean-files += $(TEST_FORTIFY_LOG) -$(obj)/$(TEST_FORTIFY_LOG): $(addprefix $(obj)/, $(TEST_FORTIFY_LOGS)) FORCE - $(call if_changed,gen_fortify_log) - -# Fake dependency to trigger the fortify tests. -ifeq ($(CONFIG_FORTIFY_SOURCE),y) -$(obj)/string.o: $(obj)/$(TEST_FORTIFY_LOG) -endif - -# Some architectures define __NO_FORTIFY if __SANITIZE_ADDRESS__ is undefined. -# Pass CFLAGS_KASAN to avoid warnings. -$(foreach x, $(patsubst %.log,%.o,$(TEST_FORTIFY_LOGS)), $(eval KASAN_SANITIZE_$(x) := y)) +subdir-$(CONFIG_FORTIFY_SOURCE) += test_fortify diff --git a/lib/test_fortify/.gitignore b/lib/test_fortify/.gitignore new file mode 100644 index 000000000000..c1ba37d14b50 --- /dev/null +++ b/lib/test_fortify/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +/*.log diff --git a/lib/test_fortify/Makefile b/lib/test_fortify/Makefile new file mode 100644 index 000000000000..3907a2242ef9 --- /dev/null +++ b/lib/test_fortify/Makefile @@ -0,0 +1,28 @@ +# SPDX-License-Identifier: GPL-2.0 + +ccflags-y := $(call cc-disable-warning,fortify-source) + +quiet_cmd_test_fortify = TEST $@ + cmd_test_fortify = $(CONFIG_SHELL) $(srctree)/scripts/test_fortify.sh \ + $< $@ "$(NM)" $(CC) $(c_flags) -DKBUILD_EXTRA_WARN1 + +$(obj)/%.log: $(src)/%.c $(srctree)/scripts/test_fortify.sh \ + $(src)/test_fortify.h \ + $(srctree)/include/linux/fortify-string.h \ + FORCE + $(call if_changed,test_fortify) + +logs = $(patsubst $(src)/%.c, %.log, $(wildcard $(src)/*-*.c)) +targets += $(logs) + +quiet_cmd_gen_fortify_log = CAT $@ + cmd_gen_fortify_log = cat $(or $(real-prereqs),/dev/null) > $@ + +$(obj)/test_fortify.log: $(addprefix $(obj)/, $(logs)) FORCE + $(call if_changed,gen_fortify_log) + +always-y += test_fortify.log + +# Some architectures define __NO_FORTIFY if __SANITIZE_ADDRESS__ is undefined. +# Pass CFLAGS_KASAN to avoid warnings. +KASAN_SANITIZE := y diff --git a/scripts/remove-stale-files b/scripts/remove-stale-files index 8b1a636f8543..38eb84eb605b 100755 --- a/scripts/remove-stale-files +++ b/scripts/remove-stale-files @@ -39,3 +39,5 @@ rm -rf include/ksym find . -name '*.usyms' | xargs rm -f rm -f binkernel.spec + +rm -f lib/test_fortify.log -- Gitee From 7802a79d6ef9eb45434ba039a63af1059520299e Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sun, 28 Jul 2024 00:02:37 +0900 Subject: [PATCH 1400/2138] fortify: move test_fortify.sh to lib/test_fortify/ ANBZ: #11097 commit 5a8d0c46c9e024bed4805a9335fe6124d8a78d3a upstream. This script is only used in lib/test_fortify/. There is no reason to keep it in scripts/. Signed-off-by: Masahiro Yamada Link: https://lore.kernel.org/r/20240727150302.1823750-3-masahiroy@kernel.org Signed-off-by: Kees Cook Signed-off-by: Qiao Ma Reviewed-by: Tianchen Ding Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/3866 --- MAINTAINERS | 1 - lib/test_fortify/Makefile | 4 ++-- {scripts => lib/test_fortify}/test_fortify.sh | 0 3 files changed, 2 insertions(+), 3 deletions(-) rename {scripts => lib/test_fortify}/test_fortify.sh (100%) diff --git a/MAINTAINERS b/MAINTAINERS index 1617ce15cdd2..3181aef3d470 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8163,7 +8163,6 @@ F: lib/memcpy_kunit.c F: lib/strcat_kunit.c F: lib/strscpy_kunit.c F: lib/test_fortify/* -F: scripts/test_fortify.sh K: \b__NO_FORTIFY\b FPGA DFL DRIVERS diff --git a/lib/test_fortify/Makefile b/lib/test_fortify/Makefile index 3907a2242ef9..1826172c32d4 100644 --- a/lib/test_fortify/Makefile +++ b/lib/test_fortify/Makefile @@ -3,10 +3,10 @@ ccflags-y := $(call cc-disable-warning,fortify-source) quiet_cmd_test_fortify = TEST $@ - cmd_test_fortify = $(CONFIG_SHELL) $(srctree)/scripts/test_fortify.sh \ + cmd_test_fortify = $(CONFIG_SHELL) $(src)/test_fortify.sh \ $< $@ "$(NM)" $(CC) $(c_flags) -DKBUILD_EXTRA_WARN1 -$(obj)/%.log: $(src)/%.c $(srctree)/scripts/test_fortify.sh \ +$(obj)/%.log: $(src)/%.c $(src)/test_fortify.sh \ $(src)/test_fortify.h \ $(srctree)/include/linux/fortify-string.h \ FORCE diff --git a/scripts/test_fortify.sh b/lib/test_fortify/test_fortify.sh similarity index 100% rename from scripts/test_fortify.sh rename to lib/test_fortify/test_fortify.sh -- Gitee From c341857d18583b6b65ef65fbf6002dc08010c660 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sun, 28 Jul 2024 00:02:38 +0900 Subject: [PATCH 1401/2138] fortify: use if_changed_dep to record header dependency in *.cmd files ANBZ: #11097 commit 9c6b7fbbd7a2c2772a592adf9b2835371927a1d3 upstream. After building with CONFIG_FORTIFY_SOURCE=y, many .*.d files are left in lib/test_fortify/ because the compiler outputs header dependencies into *.d without fixdep being invoked. When compiling C files, if_changed_dep should be used so that the auto-generated header dependencies are recorded in .*.cmd files. Currently, if_changed is incorrectly used, and only two headers are hard-coded in lib/Makefile. In the previous patch version, the kbuild test robot detected new errors on GCC 7. GCC 7 or older does not produce test.d with the following test code: $ echo 'void b(void) __attribute__((__error__(""))); void a(void) { b(); }' | gcc -Wp,-MMD,test.d -c -o /dev/null -x c - Perhaps, this was a bug that existed in older GCC versions. Skip the tests for GCC<=7 for now, as this will be eventually solved when we bump the minimal supported GCC version. Link: https://lore.kernel.org/oe-kbuild-all/CAK7LNARmJcyyzL-jVJfBPi3W684LTDmuhMf1koF0TXoCpKTmcw@mail.gmail.com/T/#m13771bf78ae21adff22efc4d310c973fb4bcaf67 Signed-off-by: Masahiro Yamada Link: https://lore.kernel.org/r/20240727150302.1823750-4-masahiroy@kernel.org Signed-off-by: Kees Cook Signed-off-by: Qiao Ma Reviewed-by: Tianchen Ding Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/3866 --- lib/test_fortify/Makefile | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/test_fortify/Makefile b/lib/test_fortify/Makefile index 1826172c32d4..1c3f82ad8bb2 100644 --- a/lib/test_fortify/Makefile +++ b/lib/test_fortify/Makefile @@ -6,11 +6,8 @@ quiet_cmd_test_fortify = TEST $@ cmd_test_fortify = $(CONFIG_SHELL) $(src)/test_fortify.sh \ $< $@ "$(NM)" $(CC) $(c_flags) -DKBUILD_EXTRA_WARN1 -$(obj)/%.log: $(src)/%.c $(src)/test_fortify.sh \ - $(src)/test_fortify.h \ - $(srctree)/include/linux/fortify-string.h \ - FORCE - $(call if_changed,test_fortify) +$(obj)/%.log: $(src)/%.c $(src)/test_fortify.sh FORCE + $(call if_changed_dep,test_fortify) logs = $(patsubst $(src)/%.c, %.log, $(wildcard $(src)/*-*.c)) targets += $(logs) @@ -21,7 +18,10 @@ quiet_cmd_gen_fortify_log = CAT $@ $(obj)/test_fortify.log: $(addprefix $(obj)/, $(logs)) FORCE $(call if_changed,gen_fortify_log) -always-y += test_fortify.log +# GCC<=7 does not always produce *.d files. +# Run the tests only for GCC>=8 or Clang. +always-$(call gcc-min-version, 80000) += test_fortify.log +always-$(CONFIG_CC_IS_CLANG) += test_fortify.log # Some architectures define __NO_FORTIFY if __SANITIZE_ADDRESS__ is undefined. # Pass CFLAGS_KASAN to avoid warnings. -- Gitee From 77d8eee33ca00ae7839d908b22c19a9057910283 Mon Sep 17 00:00:00 2001 From: Qinyun Tan Date: Tue, 8 Oct 2024 19:09:41 +0800 Subject: [PATCH 1402/2138] anolis: dmaengine: xgene-dma: workaround for compilation errors. ANBZ: #11232 A compiler internal error was found when compiling allyesconfig with GCC-12.3.0-2 on x86_64, with the following error message: during GIMPLE pass: slp drivers/dma/xgene-dma.c: In function 'xgene_dma_prep_xor_desc.isra': drivers/dma/xgene-dma.c:414:13: internal compiler error: Segmentation fault 414 | static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan, After analysis, we identified that the error is an internal issue with GCC. The problem arises during the compilation of the function xgene_dma_prep_xor_desc located at drivers/dma/xgene-dma.c:414. Specifically, the function call xgene_dma_lookup_ext8(desc2, i - 1) within the expression: xgene_dma_set_src_buffer((i == 0) ? &desc1->m1 : xgene_dma_lookup_ext8(desc2, i - 1), &len, &src[i]); caused a compiler error. To avoid this issue, the first argument of xgene_dma_set_src_buffer can be evaluated outside of the function. No Functional Change! Signed-off-by: Qinyun Tan Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/3911 --- drivers/dma/xgene-dma.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index bb4ff8c86733..1e6cc5cdf52a 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -419,6 +419,7 @@ static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan, { struct xgene_dma_desc_hw *desc1, *desc2; size_t len = *nbytes; + __le64 *ext8; int i; desc1 = &desc_sw->desc1; @@ -440,9 +441,12 @@ static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan, /* Set 1st to 5th source addresses */ for (i = 0; i < src_cnt; i++) { len = *nbytes; - xgene_dma_set_src_buffer((i == 0) ? &desc1->m1 : - xgene_dma_lookup_ext8(desc2, i - 1), - &len, &src[i]); + if (i == 0) + ext8 = &desc1->m1; + else + ext8 = xgene_dma_lookup_ext8(desc2, i - 1); + + xgene_dma_set_src_buffer(ext8, &len, &src[i]); desc1->m2 |= cpu_to_le64((scf[i] << ((i + 1) * 8))); } -- Gitee From 074cdf9715898b4ec6355ae2dd3072af283c1c4f Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Fri, 28 Jun 2024 10:46:02 +0800 Subject: [PATCH 1403/2138] anolis: Add support for Zhaoxin I2C controller ANBZ: #9437 Zhaoxin I2C Linux driver support all bidirectional bus protocols speed specified in the I2C Specification 7.0. Signed-off-by: leoliu-oc Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/3626 --- drivers/i2c/busses/i2c-zhaoxin.c | 439 ++++++++++++++++--------------- 1 file changed, 226 insertions(+), 213 deletions(-) diff --git a/drivers/i2c/busses/i2c-zhaoxin.c b/drivers/i2c/busses/i2c-zhaoxin.c index 3d4cb36c1f17..ef6b03ec7fa3 100644 --- a/drivers/i2c/busses/i2c-zhaoxin.c +++ b/drivers/i2c/busses/i2c-zhaoxin.c @@ -1,29 +1,30 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Copyright(c) 2021 Shanghai Zhaoxin Semiconductor Corporation. + * Copyright(c) 2024 Shanghai Zhaoxin Semiconductor Corporation. * All rights reserved. */ +#define DRIVER_VERSION "1.6.1" + #include #include #include #include #include #include +#include #include #include #include -#define DRIVER_VERSION "1.5.1" - -#define ZX_I2C_NAME "i2c_zhaoxin" +#define ZX_I2C_NAME "i2c_zhaoxin" /* REG_CR Bit fields */ #define ZXI2C_REG_CR 0x00 #define ZXI2C_CR_ENABLE BIT(0) #define ZXI2C_CR_RX_END BIT(1) #define ZXI2C_CR_TX_END BIT(2) -#define ZXI2C_CR_END_MASK GENMASK(2, 1) +#define ZXI2C_CR_END_MASK GENMASK(2, 1) #define ZXI2C_CR_CPU_RDY BIT(3) #define ZXI2C_CR_MST_RST BIT(7) #define ZXI2C_CR_FIFO_MODE BIT(14) @@ -31,36 +32,36 @@ /* REG_TCR Bit fields */ #define ZXI2C_REG_TCR 0x02 #define ZXI2C_TCR_HS_MODE BIT(13) -#define ZXI2C_TCR_MASTER_READ BIT(14) +#define ZXI2C_TCR_READ BIT(14) #define ZXI2C_TCR_FAST BIT(15) /* REG_CSR Bit fields */ #define ZXI2C_REG_CSR 0x04 -#define ZXI2C_CSR_RCV_NOT_ACK BIT(0) -#define ZXI2C_CSR_READY_MASK BIT(1) +#define ZXI2C_CSR_RCV_NOT_ACK BIT(0) +#define ZXI2C_CSR_READY_MASK BIT(1) /* REG_ISR Bit fields */ #define ZXI2C_REG_ISR 0x06 #define ZXI2C_ISR_NACK_ADDR BIT(0) #define ZXI2C_ISR_BYTE_END BIT(1) -#define ZXI2C_ISR_SCL_TIMEOUT BIT(2) -#define ZXI2C_ISR_MASK_ALL GENMASK(2, 0) +#define ZXI2C_ISR_SCL_TIMEOUT BIT(2) +#define ZXI2C_ISR_MASK_ALL GENMASK(2, 0) #define ZXI2C_IRQ_FIFOEND BIT(3) #define ZXI2C_IRQ_FIFONACK BIT(4) -#define ZXI2C_IRQ_MASK (ZXI2C_ISR_MASK_ALL | ZXI2C_IRQ_FIFOEND | ZXI2C_IRQ_FIFONACK) +#define ZXI2C_IRQ_MASK (ZXI2C_ISR_MASK_ALL | ZXI2C_IRQ_FIFOEND | ZXI2C_IRQ_FIFONACK) /* REG_IMR Bit fields */ #define ZXI2C_REG_IMR 0x08 #define ZXI2C_IMR_ADDRNACK BIT(0) #define ZXI2C_IMR_BYTE BIT(1) -#define ZXI2C_IMR_SCL_TIMEOUT BIT(2) +#define ZXI2C_IMR_SCL_TIMEOUT BIT(2) #define ZXI2C_IMR_ENABLE_ALL GENMASK(2, 0) #define ZXI2C_REG_CLK 0x10 #define ZXI2C_CLK_50M BIT(0) #define ZXI2C_REG_REV 0x11 #define ZXI2C_REG_HCR 0x12 -#define ZXI2C_HCR_RST_FIFO GENMASK(1, 0) +#define ZXI2C_HCR_RST_FIFO GENMASK(1, 0) #define ZXI2C_REG_HTDR 0x13 #define ZXI2C_REG_HRDR 0x14 #define ZXI2C_REG_HTLR 0x15 @@ -72,30 +73,41 @@ #define ZXI2C_REG_TR 0x0C #define ZXI2C_REG_MCR 0x0E +enum { + ZXI2C_BYTE_MODE, + ZXI2C_FIFO_MODE +}; + struct zxi2c { - struct i2c_adapter adapter; - struct completion complete; - struct device *dev; - void __iomem *base; - struct clk *clk; - u16 tcr; - int irq; - u16 cmd_status; - u16 tr; - u16 mcr; - u16 csr; - u8 fstp; + struct i2c_adapter adapter; + struct completion complete; + struct device *dev; + void __iomem *base; + struct clk *clk; + struct i2c_msg *msg; + int irq; + int ret; + u16 tcr; + u16 tr; + u16 mcr; + u16 csr; + u8 fstp; u8 hrv; + bool last; + u16 xfer_len; + u16 xfered_len; + unsigned int mode; }; /* parameters Constants */ #define ZXI2C_GOLD_FSTP_100K 0xF3 #define ZXI2C_GOLD_FSTP_400K 0x38 -#define ZXI2C_GOLD_FSTP_1M 0x13 +#define ZXI2C_GOLD_FSTP_1M 0x13 #define ZXI2C_GOLD_FSTP_3400K 0x37 +#define ZXI2C_HS_MASTER_CODE (0x08 << 8) +#define ZXI2C_FIFO_SIZE 32 -#define ZXI2C_HS_MASTER_CODE (0x08 << 8) -#define ZXI2C_FIFO_SIZE 32 +#define ZXI2C_TIMEOUT 200 static int zxi2c_wait_bus_ready(struct zxi2c *i2c) { @@ -109,8 +121,8 @@ static int zxi2c_wait_bus_ready(struct zxi2c *i2c) dev_warn(i2c->dev, "timeout waiting for bus ready\n"); return -EBUSY; } - tmp = ioread16(i2c->base + ZXI2C_REG_CR); - iowrite16(tmp | ZXI2C_CR_END_MASK, i2c->base + ZXI2C_REG_CR); + tmp = ioread16(base + ZXI2C_REG_CR); + iowrite16(tmp | ZXI2C_CR_END_MASK, base + ZXI2C_REG_CR); msleep(20); } @@ -118,83 +130,159 @@ static int zxi2c_wait_bus_ready(struct zxi2c *i2c) return 0; } -static int zxi2c_wait_status(struct zxi2c *i2c, u8 status) +static int zxi2c_irq_xfer(struct zxi2c *i2c) { - unsigned long time_left; + u16 val; + struct i2c_msg *msg = i2c->msg; + u8 read = msg->flags & I2C_M_RD; + void __iomem *base = i2c->base; - time_left = wait_for_completion_timeout(&i2c->complete, msecs_to_jiffies(500)); - if (time_left <= 1) - return -ETIMEDOUT; + if (read) { + msg->buf[i2c->xfered_len] = readw(base + ZXI2C_REG_CDR) >> 8; + + val = readw(base + ZXI2C_REG_CR) | ZXI2C_CR_CPU_RDY; + if (i2c->xfered_len == msg->len - 2) + val |= ZXI2C_CR_RX_END; + writew(val, base + ZXI2C_REG_CR); + } else { + + val = readw(base + ZXI2C_REG_CSR); + if (val & ZXI2C_CSR_RCV_NOT_ACK) { + dev_dbg_ratelimited(i2c->dev, "write RCV NACK error\n"); + return -EIO; + } + + if (msg->len == 0) { + val = ZXI2C_CR_TX_END | ZXI2C_CR_CPU_RDY | ZXI2C_CR_ENABLE; + writew(val, base + ZXI2C_REG_CR); + return 0; + } + + if ((i2c->xfered_len + 1) == msg->len) { + if (i2c->last) + writeb(ZXI2C_CR_TX_END, base + ZXI2C_REG_CR); + } else { + writew(msg->buf[i2c->xfered_len + 1] & 0xFF, base + ZXI2C_REG_CDR); + writew(ZXI2C_CR_CPU_RDY | ZXI2C_CR_ENABLE, base + ZXI2C_REG_CR); + } + } + + i2c->xfered_len++; + + return i2c->xfered_len == msg->len; +} + +/* 'irq == true' means in interrupt context */ +int zxi2c_fifo_irq_xfer(struct zxi2c *i2c, bool irq) +{ + u16 i; + u8 tmp; + struct i2c_msg *msg = i2c->msg; + void __iomem *base = i2c->base; + bool read = !!(msg->flags & I2C_M_RD); - if (i2c->cmd_status & status) - return 0; + if (irq) { + /* get the received data */ + if (read) + for (i = 0; i < i2c->xfer_len; i++) + msg->buf[i2c->xfered_len + i] = ioread8(base + ZXI2C_REG_HRDR); + + i2c->xfered_len += i2c->xfer_len; + if (i2c->xfered_len == msg->len) + return 1; + } + + /* reset fifo buffer */ + tmp = ioread8(base + ZXI2C_REG_HCR); + iowrite8(tmp | ZXI2C_HCR_RST_FIFO, base + ZXI2C_REG_HCR); + + /* set xfer len */ + i2c->xfer_len = min_t(u16, msg->len - i2c->xfered_len, ZXI2C_FIFO_SIZE); + if (read) { + iowrite8(i2c->xfer_len - 1, base + ZXI2C_REG_HRLR); + } else { + iowrite8(i2c->xfer_len - 1, base + ZXI2C_REG_HTLR); + /* set write data */ + for (i = 0; i < i2c->xfer_len; i++) + iowrite8(msg->buf[i2c->xfered_len + i], base + ZXI2C_REG_HTDR); + } + + /* prepare to stop transmission */ + if (i2c->hrv && msg->len == (i2c->xfered_len + i2c->xfer_len)) { + tmp = ioread8(base + ZXI2C_REG_CR); + tmp |= read ? ZXI2C_CR_RX_END : ZXI2C_CR_TX_END; + iowrite8(tmp, base + ZXI2C_REG_CR); + } + + if (irq) { + /* continue transmission */ + tmp = ioread8(base + ZXI2C_REG_CR); + iowrite8(tmp |= ZXI2C_CR_CPU_RDY, base + ZXI2C_REG_CR); + } else { + u16 tcr_val = i2c->tcr; + + /* start transmission */ + tcr_val |= read ? ZXI2C_TCR_READ : 0; + writew(tcr_val | msg->addr, base + ZXI2C_REG_TCR); + } - return -EIO; + return 0; } static irqreturn_t zxi2c_isr(int irq, void *data) { struct zxi2c *i2c = data; + void __iomem *base = i2c->base; + u8 status; /* save the status and write-clear it */ - i2c->cmd_status = readw(i2c->base + ZXI2C_REG_ISR); - if (!i2c->cmd_status) + status = readw(base + ZXI2C_REG_ISR); + if (!status) return IRQ_NONE; - writew(i2c->cmd_status, i2c->base + ZXI2C_REG_ISR); + writew(status, base + ZXI2C_REG_ISR); + + i2c->ret = 0; + if (status & ZXI2C_ISR_NACK_ADDR) + i2c->ret = -EIO; + + if (!i2c->ret) { + if (i2c->mode == ZXI2C_BYTE_MODE) + i2c->ret = zxi2c_irq_xfer(i2c); + else + i2c->ret = zxi2c_fifo_irq_xfer(i2c, true); + } - complete(&i2c->complete); + if (i2c->ret) + complete(&i2c->complete); return IRQ_HANDLED; } -static int zxi2c_write(struct zxi2c *i2c, struct i2c_msg *msg, bool last) +static int zxi2c_write(struct zxi2c *i2c, struct i2c_msg *msg, int last) { - u16 val, tcr_val = i2c->tcr; - int xfer_len = 0; + u16 tcr_val = i2c->tcr; void __iomem *base = i2c->base; - writew(msg->buf[0] & 0xFF, base + ZXI2C_REG_CDR); - reinit_completion(&i2c->complete); - writew(tcr_val | msg->addr, base + ZXI2C_REG_TCR); - - while (xfer_len < msg->len) { - int err; + i2c->last = last; - err = zxi2c_wait_status(i2c, ZXI2C_ISR_BYTE_END); - if (err) - return err; + writew(msg->buf[0] & 0xFF, base + ZXI2C_REG_CDR); - xfer_len++; + reinit_completion(&i2c->complete); - val = readw(base + ZXI2C_REG_CSR); - if (val & ZXI2C_CSR_RCV_NOT_ACK) { - dev_dbg(i2c->dev, "write RCV NACK error\n"); - return -EIO; - } + tcr_val |= msg->addr & 0x7f; - if (msg->len == 0) { - val = ZXI2C_CR_TX_END | ZXI2C_CR_CPU_RDY | ZXI2C_CR_ENABLE; - writew(val, base + ZXI2C_REG_CR); - break; - } + writew(tcr_val, base + ZXI2C_REG_TCR); - if (xfer_len == msg->len) { - if (last) - writeb(ZXI2C_CR_TX_END, base + ZXI2C_REG_CR); - } else { - writew(msg->buf[xfer_len] & 0xFF, base + ZXI2C_REG_CDR); - writew(ZXI2C_CR_CPU_RDY | ZXI2C_CR_ENABLE, base + ZXI2C_REG_CR); - } - } + if (!wait_for_completion_timeout(&i2c->complete, ZXI2C_TIMEOUT)) + return -ETIMEDOUT; - return 0; + return i2c->ret; } static int zxi2c_read(struct zxi2c *i2c, struct i2c_msg *msg, bool first) { u16 val, tcr_val = i2c->tcr; - u32 xfer_len = 0; void __iomem *base = i2c->base; val = readw(base + ZXI2C_REG_CR); @@ -207,7 +295,7 @@ static int zxi2c_read(struct zxi2c *i2c, struct i2c_msg *msg, bool first) reinit_completion(&i2c->complete); - tcr_val |= ZXI2C_TCR_MASTER_READ | msg->addr; + tcr_val |= ZXI2C_TCR_READ | (msg->addr & 0x7f); writew(tcr_val, base + ZXI2C_REG_TCR); @@ -217,151 +305,85 @@ static int zxi2c_read(struct zxi2c *i2c, struct i2c_msg *msg, bool first) writew(val, base + ZXI2C_REG_CR); } - while (xfer_len < msg->len) { - int err; - - err = zxi2c_wait_status(i2c, ZXI2C_ISR_BYTE_END); - if (err) - return err; - - msg->buf[xfer_len] = readw(base + ZXI2C_REG_CDR) >> 8; - xfer_len++; - - val = readw(base + ZXI2C_REG_CR) | ZXI2C_CR_CPU_RDY; - if (xfer_len == msg->len - 1) - val |= ZXI2C_CR_RX_END; - writew(val, base + ZXI2C_REG_CR); - } + if (!wait_for_completion_timeout(&i2c->complete, ZXI2C_TIMEOUT)) + return -ETIMEDOUT; - return 0; + return i2c->ret; } -static int zxi2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) +int zxi2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { struct i2c_msg *msg; int i; int ret = 0; struct zxi2c *i2c = i2c_get_adapdata(adap); + i2c->mode = ZXI2C_BYTE_MODE; for (i = 0; ret >= 0 && i < num; i++) { - msg = &msgs[i]; - if (msg->len == 0) { - dev_dbg(i2c->dev, "zero len unsupported\n"); - return -ENODEV; - } + i2c->msg = msg = &msgs[i]; + i2c->xfered_len = 0; + if (msg->len == 0) + return -EIO; + if (msg->flags & I2C_M_RD) ret = zxi2c_read(i2c, msg, i == 0); else - ret = zxi2c_write(i2c, msg, i == (num - 1)); + ret = zxi2c_write(i2c, msg, (i + 1) == num); } return (ret < 0) ? ret : i; } -static int zxi2c_fifo_xfer(struct zxi2c *i2c, struct i2c_msg *msg) -{ - u16 xfered_len = 0; - u16 byte_left = msg->len; - u16 tcr_val = i2c->tcr; - void __iomem *base = i2c->base; - bool read = !!(msg->flags & I2C_M_RD); - - while (byte_left) { - u16 i; - u8 tmp; - int error; - u16 xfer_len = min_t(u16, byte_left, ZXI2C_FIFO_SIZE); - - byte_left -= xfer_len; - - /* reset fifo buffer */ - tmp = ioread8(base + ZXI2C_REG_HCR); - iowrite8(tmp | ZXI2C_HCR_RST_FIFO, base + ZXI2C_REG_HCR); - - /* set xfer len */ - if (read) { - iowrite8(xfer_len - 1, base + ZXI2C_REG_HRLR); - } else { - iowrite8(xfer_len - 1, base + ZXI2C_REG_HTLR); - /* set write data */ - for (i = 0; i < xfer_len; i++) - iowrite8(msg->buf[xfered_len + i], base + ZXI2C_REG_HTDR); - } - - /* prepare to stop transmission */ - if (i2c->hrv && !byte_left) { - tmp = ioread8(i2c->base + ZXI2C_REG_CR); - tmp |= read ? ZXI2C_CR_RX_END : ZXI2C_CR_TX_END; - iowrite8(tmp, base + ZXI2C_REG_CR); - } - - reinit_completion(&i2c->complete); - - if (xfered_len) { - /* continue transmission */ - tmp = ioread8(i2c->base + ZXI2C_REG_CR); - iowrite8(tmp |= ZXI2C_CR_CPU_RDY, i2c->base + ZXI2C_REG_CR); - } else { - /* start transmission */ - tcr_val |= (read ? ZXI2C_TCR_MASTER_READ : 0); - writew(tcr_val | msg->addr, base + ZXI2C_REG_TCR); - } - - error = zxi2c_wait_status(i2c, ZXI2C_IRQ_FIFOEND); - if (error) - return error; - - /* get the received data */ - if (read) - for (i = 0; i < xfer_len; i++) - msg->buf[xfered_len + i] = ioread8(base + ZXI2C_REG_HRDR); - - xfered_len += xfer_len; - } - - return 1; -} - static int zxi2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { u8 tmp; int ret; struct zxi2c *i2c = (struct zxi2c *)i2c_get_adapdata(adap); + void __iomem *base = i2c->base; ret = zxi2c_wait_bus_ready(i2c); if (ret) return ret; - tmp = ioread8(i2c->base + ZXI2C_REG_CR); + tmp = ioread8(base + ZXI2C_REG_CR); tmp &= ~(ZXI2C_CR_RX_END | ZXI2C_CR_TX_END); if (num == 1 && msgs->len >= 2 && (i2c->hrv || msgs->len <= ZXI2C_FIFO_SIZE)) { /* enable fifo mode */ - iowrite16(ZXI2C_CR_FIFO_MODE | tmp, i2c->base + ZXI2C_REG_CR); + iowrite16(ZXI2C_CR_FIFO_MODE | tmp, base + ZXI2C_REG_CR); /* clear irq status */ - iowrite8(ZXI2C_IRQ_MASK, i2c->base + ZXI2C_REG_ISR); + iowrite8(ZXI2C_IRQ_MASK, base + ZXI2C_REG_ISR); /* enable fifo irq */ - iowrite8(ZXI2C_ISR_NACK_ADDR | ZXI2C_IRQ_FIFOEND, i2c->base + ZXI2C_REG_IMR); - ret = zxi2c_fifo_xfer(i2c, msgs); + iowrite8(ZXI2C_ISR_NACK_ADDR | ZXI2C_IRQ_FIFOEND, base + ZXI2C_REG_IMR); + + i2c->msg = msgs; + i2c->mode = ZXI2C_FIFO_MODE; + i2c->xfer_len = i2c->xfered_len = 0; + + zxi2c_fifo_irq_xfer(i2c, 0); + + if (!wait_for_completion_timeout(&i2c->complete, ZXI2C_TIMEOUT)) { + dev_dbg(i2c->dev, "fifo mode timeout\n"); + return -ETIMEDOUT; + } + + ret = i2c->ret; } else { /* enable byte mode */ - iowrite16(tmp, i2c->base + ZXI2C_REG_CR); + iowrite16(tmp, base + ZXI2C_REG_CR); /* clear irq status */ - iowrite8(ZXI2C_IRQ_MASK, i2c->base + ZXI2C_REG_ISR); + iowrite8(ZXI2C_IRQ_MASK, base + ZXI2C_REG_ISR); /* enable byte irq */ - iowrite8(ZXI2C_ISR_NACK_ADDR | ZXI2C_IMR_BYTE, i2c->base + ZXI2C_REG_IMR); + iowrite8(ZXI2C_ISR_NACK_ADDR | ZXI2C_IMR_BYTE, base + ZXI2C_REG_IMR); + ret = zxi2c_xfer(adap, msgs, num); - if (ret < 0) - iowrite16(tmp | ZXI2C_CR_END_MASK, i2c->base + ZXI2C_REG_CR); - /* make sure the state machine is stopped */ - usleep_range(1, 2); + if (ret == -ETIMEDOUT) { + dev_dbg(i2c->dev, "byte mode timeout\n"); + iowrite16(tmp | ZXI2C_CR_END_MASK, base + ZXI2C_REG_CR); + } } /* dis interrupt */ - iowrite8(0, i2c->base + ZXI2C_REG_IMR); - - /* timeout may caused by another high-priority process, try again */ - if (ret == -ETIMEDOUT) - ret = -EAGAIN; + iowrite8(0, base + ZXI2C_REG_IMR); return ret; } @@ -386,7 +408,7 @@ static const u32 zxi2c_speed_params_table[][3] = { { I2C_MAX_FAST_MODE_FREQ, ZXI2C_TCR_FAST, ZXI2C_GOLD_FSTP_400K }, { I2C_MAX_FAST_MODE_PLUS_FREQ, ZXI2C_TCR_FAST, ZXI2C_GOLD_FSTP_1M }, { I2C_MAX_HIGH_SPEED_MODE_FREQ, ZXI2C_TCR_HS_MODE | ZXI2C_TCR_FAST, - ZXI2C_GOLD_FSTP_3400K }, + ZXI2C_GOLD_FSTP_3400K }, /* never reached, keep for debug. freq src is 27M mode */ { I2C_MAX_STANDARD_MODE_FREQ, 0, 0x83 }, { I2C_MAX_FAST_MODE_FREQ, ZXI2C_TCR_FAST, 0x1e }, @@ -395,9 +417,11 @@ static const u32 zxi2c_speed_params_table[][3] = { static void zxi2c_set_bus_speed(struct zxi2c *i2c) { - iowrite16(i2c->tr, i2c->base + ZXI2C_REG_TR); - iowrite8(ZXI2C_CLK_50M, i2c->base + ZXI2C_REG_CLK); - iowrite16(i2c->mcr, i2c->base + ZXI2C_REG_MCR); + void __iomem *base = i2c->base; + + iowrite16(i2c->tr, base + ZXI2C_REG_TR); + iowrite8(ZXI2C_CLK_50M, base + ZXI2C_REG_CLK); + iowrite16(i2c->mcr, base + ZXI2C_REG_MCR); } static void zxi2c_get_bus_speed(struct zxi2c *i2c) @@ -405,6 +429,7 @@ static void zxi2c_get_bus_speed(struct zxi2c *i2c) u8 i, count; u8 fstp; const u32 *params; + void __iomem *base = i2c->base; u32 acpi_speed = i2c_acpi_find_bus_speed(i2c->dev); @@ -416,20 +441,21 @@ static void zxi2c_get_bus_speed(struct zxi2c *i2c) i = i < count ? i : 1; params = zxi2c_speed_params_table[i]; - fstp = ioread8(i2c->base + ZXI2C_REG_TR); + fstp = ioread8(base + ZXI2C_REG_TR); if (abs(fstp - params[2]) > 0x10) { /* * if BIOS setting value far from golden value, * use golden value and warn user */ - dev_warn(i2c->dev, "speed:%d, fstp:0x%x, golden:0x%x\n", - params[0], fstp, params[2]); + dev_warn(i2c->dev, "speed:%d, fstp:0x%x, golden:0x%x\n", params[0], fstp, + params[2]); i2c->tr = params[2] | 0xff00; - } else + } else { i2c->tr = fstp | 0xff00; + } i2c->tcr = params[1]; - i2c->mcr = ioread16(i2c->base + ZXI2C_REG_MCR); + i2c->mcr = ioread16(base + ZXI2C_REG_MCR); /* for Hs-mode, use 0000 1000 as master code */ if (params[0] == I2C_MAX_HIGH_SPEED_MODE_FREQ) i2c->mcr |= ZXI2C_HS_MASTER_CODE; @@ -461,14 +487,17 @@ static int zxi2c_init(struct platform_device *pdev, struct zxi2c **pi2c) return i2c->irq; err = devm_request_irq(&pdev->dev, i2c->irq, zxi2c_isr, IRQF_SHARED, pdev->name, i2c); - if (err) - return dev_err_probe(&pdev->dev, err, "failed to request irq %i\n", i2c->irq); + if (err) { + dev_err(&pdev->dev, "failed to request irq %i\n", i2c->irq); + return err; + } i2c->dev = &pdev->dev; init_completion(&i2c->complete); platform_set_drvdata(pdev, i2c); *pi2c = i2c; + return 0; } @@ -489,36 +518,21 @@ static int zxi2c_probe(struct platform_device *pdev) adap = &i2c->adapter; adap->owner = THIS_MODULE; adap->algo = &zxi2c_algorithm; - adap->retries = 2; + adap->quirks = &zxi2c_quirks; adap->dev.parent = &pdev->dev; ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev)); snprintf(adap->name, sizeof(adap->name), "zhaoxin-%s-%s", dev_name(pdev->dev.parent), - dev_name(i2c->dev)); + dev_name(i2c->dev)); i2c_set_adapdata(adap, i2c); - error = i2c_add_adapter(adap); + error = devm_i2c_add_adapter(&pdev->dev, adap); if (error) return error; - dev_info(i2c->dev, "adapter /dev/i2c-%d registered. version %s\n", - adap->nr, DRIVER_VERSION); - - return 0; -} - -static int zxi2c_remove(struct platform_device *pdev) -{ - struct zxi2c *i2c = platform_get_drvdata(pdev); - - devm_free_irq(&pdev->dev, i2c->irq, i2c); - - i2c_del_adapter(&i2c->adapter); - - platform_set_drvdata(pdev, NULL); - - devm_kfree(&pdev->dev, i2c); + dev_info(i2c->dev, "adapter /dev/i2c-%d registered. version %s\n", adap->nr, + DRIVER_VERSION); return 0; } @@ -545,10 +559,9 @@ MODULE_DEVICE_TABLE(acpi, zxi2c_acpi_match); static struct platform_driver zxi2c_driver = { .probe = zxi2c_probe, - .remove = zxi2c_remove, .driver = { .name = ZX_I2C_NAME, - .acpi_match_table = ACPI_PTR(zxi2c_acpi_match), + .acpi_match_table = zxi2c_acpi_match, .pm = &zxi2c_pm, }, }; -- Gitee From e7250b815215939852af4a03173680027e0c00c3 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Tue, 18 Jun 2024 18:53:26 +0800 Subject: [PATCH 1404/2138] anolis: ata: ahci: Add support for AHCI SGPIO Enclosure Management ANBZ: #9438 To monitor and control auxiliary service in a drive enclosure, Zhaoxin AHCI controller adds enclosure management support in SGPIO protocols with two messages types: LED message type and SGPIO register interface message type. The LED message type uses a genernal ahci specific interface which has already been supported by default ahci driver, the SGPIO register interface message type based on SFF-8485 which defined by vendor specific, this patch adds support for it. Signed-off-by: leoliu-oc Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/3439 --- drivers/ata/Kconfig | 10 + drivers/ata/Makefile | 1 + drivers/ata/ahci_zhaoxin_sgpio.c | 706 +++++++++++++++++++++++++++++++ drivers/ata/ahci_zhaoxin_sgpio.h | 221 ++++++++++ drivers/ata/libahci.c | 6 + 5 files changed, 944 insertions(+) create mode 100644 drivers/ata/ahci_zhaoxin_sgpio.c create mode 100644 drivers/ata/ahci_zhaoxin_sgpio.h diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 0fd5a5bce3e4..423b4194f06b 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -115,6 +115,16 @@ config SATA_AHCI If unsure, say N. +config AHCI_ZHAOXIN_SGPIO + tristate "zhaoxin AHCI SGPIO support" + depends on SATA_AHCI + default y + help + This option enables support for Zhaoxin AHCI SGPIO. + Add support SGPIO mode and SGPIO GP mode. + + If unsure, say N. + config SATA_MOBILE_LPM_POLICY int "Default SATA Link Power Management policy for low power chipsets" range 0 4 diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index 4b846692e365..ee2cb6367b66 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile @@ -27,6 +27,7 @@ obj-$(CONFIG_AHCI_ST) += ahci_st.o libahci.o libahci_platform.o obj-$(CONFIG_AHCI_TEGRA) += ahci_tegra.o libahci.o libahci_platform.o obj-$(CONFIG_AHCI_XGENE) += ahci_xgene.o libahci.o libahci_platform.o obj-$(CONFIG_AHCI_QORIQ) += ahci_qoriq.o libahci.o libahci_platform.o +obj-$(CONFIG_AHCI_ZHAOXIN_SGPIO) += ahci_zhaoxin_sgpio.o # SFF w/ custom DMA obj-$(CONFIG_PDC_ADMA) += pdc_adma.o diff --git a/drivers/ata/ahci_zhaoxin_sgpio.c b/drivers/ata/ahci_zhaoxin_sgpio.c new file mode 100644 index 000000000000..ad0715bc389e --- /dev/null +++ b/drivers/ata/ahci_zhaoxin_sgpio.c @@ -0,0 +1,706 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * ahci_zhaoxin_sgpio.c - Driver for Zhaoxin sgpio + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ahci.h" +#include "libata.h" +#include "ahci_zhaoxin_sgpio.h" + +static LIST_HEAD(sgpio_zhaoxin_list); + +static unsigned int zhaoxin_em_type __read_mostly = AHCI_EM_MSG_LED_MODE; /*LED protocol*/ +module_param(zhaoxin_em_type, int, 0644); +MODULE_PARM_DESC(zhaoxin_em_type, + "AHCI Enclosure Management Message type control (1 = led on, 2 = sgpio on,3 = sgpio gp on)"); + +int ahci_wait_em_reset(struct sgpio_zhaoxin *sgpio_zhaoxin, u32 retry) +{ + void __iomem *mmio = sgpio_zhaoxin->mmio; + u32 em_ctl; + + if (!sgpio_zhaoxin || retry == 0) { + pr_err("In ahci wait em reset, invalid param\n"); + return -EINVAL; + } + + while (retry--) { /*EM_CTL needs reset at least 64ms*/ + em_ctl = readl(mmio + HOST_EM_CTL); + if (em_ctl & EM_CTL_RST) + usleep_range(10000, 20000); /*EM_CTL still in reset, usleep 10ms*/ + else + break; + + if (!retry) + pr_err("Wait for EM_CTL reset, time out\n"); + } + + return 0; +} + +void ahci_zhaoxin_set_em_sgpio(struct sgpio_zhaoxin *sgpio_zhaoxin) +{ + void __iomem *mmio = sgpio_zhaoxin->mmio; + void __iomem *em_mmio = mmio + SGPIO_OFFSET; + + u32 read; + + sgpio_zhaoxin->sgpio_reg.cfg_0.enable = 1; + + sgpio_zhaoxin->sgpio_reg.cfg_1.blink_gen_a = 0x7; + sgpio_zhaoxin->sgpio_reg.cfg_1.blink_gen_b = 0x3; + sgpio_zhaoxin->sgpio_reg.cfg_1.blink_gen_c = 0x0; + sgpio_zhaoxin->sgpio_reg.cfg_1.stretch_act_on = 0; + sgpio_zhaoxin->sgpio_reg.cfg_1.stretch_act_off = 0; + sgpio_zhaoxin->sgpio_reg.cfg_1.max_act_on = 2; + sgpio_zhaoxin->sgpio_reg.cfg_1.force_act_off = 1; + + sgpio_zhaoxin->sgpio_reg.gp_transmit_cfg.sload = 0xf; + sgpio_zhaoxin->sgpio_reg.gp_transmit_cfg.count = 0x0; + + sgpio_zhaoxin->sgpio_reg.transmit_0.sgpio_tx_0 = 0; + sgpio_zhaoxin->sgpio_reg.transmit_1.sgpio_tx_1 = 0; + sgpio_zhaoxin->sgpio_reg.gp_transmit_reg.sgpio_tx_gp = 0; + + sgpio_zhaoxin->sgpio_reg.receive_reg.sgpio_rx = 0x07070707; + sgpio_zhaoxin->sgpio_reg.gp_receive_reg.sgpio_rx_gp = 0; + + /*Setup SGPIO type*/ + read = readl(mmio + sgpio_zhaoxin->em_loc); + read = read | SGPIO_MESSAGE_HEAD; /*LED register MSG_HEAD, select SGPIO*/ + writel(read, mmio + sgpio_zhaoxin->em_loc); + + /*Setup gp mode*/ + writel(sgpio_zhaoxin->sgpio_reg.gp_transmit_cfg.sgpio_tx_gp_cfg, em_mmio + 0x38); + + /*Initial SGPIO CFG1*/ + writel(sgpio_zhaoxin->sgpio_reg.cfg_1.sgpio_cfg_1, em_mmio + 0x4); + + /*Initial SGPIO CFG0*/ + read = readl(em_mmio); + read |= sgpio_zhaoxin->sgpio_reg.cfg_0.sgpio_cfg_0; + writel(read, em_mmio); +} + +void ahci_zhaoxin_set_em_sgpio_gpmode(struct sgpio_zhaoxin *sgpio_zhaoxin) +{ + void __iomem *mmio = sgpio_zhaoxin->mmio; + void __iomem *em_mmio = mmio + SGPIO_OFFSET; + u32 read; + + sgpio_zhaoxin->sgpio_reg.cfg_0.enable = 1; + + sgpio_zhaoxin->sgpio_reg.gp_transmit_cfg.sload = 0xf; + sgpio_zhaoxin->sgpio_reg.gp_transmit_cfg.count = 0xff; + + sgpio_zhaoxin->sgpio_reg.transmit_0.sgpio_tx_0 = 0; + sgpio_zhaoxin->sgpio_reg.transmit_1.sgpio_tx_1 = 0; + sgpio_zhaoxin->sgpio_reg.gp_transmit_reg.sgpio_tx_gp = 0; + + sgpio_zhaoxin->sgpio_reg.receive_reg.sgpio_rx = 0; + sgpio_zhaoxin->sgpio_reg.gp_receive_reg.sgpio_rx_gp = 0xff0f0000; + + /*Setup SGPIO type*/ + read = readl(mmio + sgpio_zhaoxin->em_loc); + read |= SGPIO_MESSAGE_HEAD; + writel(read, mmio + sgpio_zhaoxin->em_loc); + + /*Setup gp mode*/ + writel(sgpio_zhaoxin->sgpio_reg.gp_transmit_cfg.sgpio_tx_gp_cfg, em_mmio + 0x38); + + /*Enable SGPIO*/ + writel(sgpio_zhaoxin->sgpio_reg.cfg_0.sgpio_cfg_0, em_mmio); +} + +static ssize_t ahci_em_type_sys_show(struct sgpio_zhaoxin *sgpio_zhaoxin, char *buf) +{ + return sprintf(buf, "0x%x\n", zhaoxin_em_type); +} +static ssize_t ahci_em_type_sys_store(struct sgpio_zhaoxin *sgpio_zhaoxin, const char *buf, + size_t count) +{ + int code = 0; + int rc = 0; + + if (kstrtouint(buf, 0, &code)) + return count; + + if (code == AHCI_EM_MSG_LED_MODE) { + zhaoxin_em_type = code; + } else if (code == AHCI_EM_MSG_SGPIO_MODE) { + rc = ahci_wait_em_reset(sgpio_zhaoxin, 7); /*wait at least 64ms*/ + if (rc < 0) { + pr_err("ahci wait em reset failed!\n"); + return rc; + } + zhaoxin_em_type = code; + ahci_zhaoxin_set_em_sgpio(sgpio_zhaoxin); + } else if (code == AHCI_EM_MSG_SGPIO_GP_MODE) { + rc = ahci_wait_em_reset(sgpio_zhaoxin, 7); /*wait at least 64ms*/ + if (rc < 0) { + pr_err("ahci wait em reset failed!\n"); + return rc; + } + zhaoxin_em_type = code; + ahci_zhaoxin_set_em_sgpio_gpmode(sgpio_zhaoxin); + } else + pr_err("Incorrect value:1 = LED on, 2 = SGPIO normal on, 3 = SGPIO GP on)\n"); + + return count; +} + +static ssize_t ahci_transmit_sgpio_message(unsigned long port_num, + struct sgpio_zhaoxin *sgpio_zhaoxin, u16 state, + ssize_t size) +{ + void __iomem *mmio = sgpio_zhaoxin->mmio; + void __iomem *em_mmio = mmio + SGPIO_OFFSET; + unsigned long flags; + + if (!(sgpio_zhaoxin->em_msg_type & EM_MSG_TYPE_SGPIO)) + return -EINVAL; + + spin_lock_irqsave(&sgpio_zhaoxin->wr_lock, flags); + + switch (port_num) { + case 0: + writel(SGPIO_MESSAGE_HEAD, mmio + sgpio_zhaoxin->em_loc); + writew(state, em_mmio + 0x22); + sgpio_zhaoxin->sgpio_reg.transmit_0.sgpio_tx_0 &= 0x0000ffff; + sgpio_zhaoxin->sgpio_reg.transmit_0.drive_0_active = (state & 0x3c0) >> 6; + sgpio_zhaoxin->sgpio_reg.transmit_0.drive_0_locate = (state & 0x38) >> 3; + sgpio_zhaoxin->sgpio_reg.transmit_0.drive_0_error = state & 0x7; + break; + case 1: + writel(SGPIO_MESSAGE_HEAD, mmio + sgpio_zhaoxin->em_loc); + writew(state, em_mmio + 0x20); + sgpio_zhaoxin->sgpio_reg.transmit_0.sgpio_tx_0 &= 0xffff0000; + sgpio_zhaoxin->sgpio_reg.transmit_0.drive_1_active = (state & 0x3c0) >> 6; + sgpio_zhaoxin->sgpio_reg.transmit_0.drive_1_locate = (state & 0x38) >> 3; + sgpio_zhaoxin->sgpio_reg.transmit_0.drive_1_error = state & 0x7; + break; + case 2: + writel(SGPIO_MESSAGE_HEAD, mmio + sgpio_zhaoxin->em_loc); + writew(state, em_mmio + 0x26); + sgpio_zhaoxin->sgpio_reg.transmit_1.sgpio_tx_1 &= 0x0000ffff; + sgpio_zhaoxin->sgpio_reg.transmit_1.drive_2_active = (state & 0x3c0) >> 6; + sgpio_zhaoxin->sgpio_reg.transmit_1.drive_2_locate = (state & 0x38) >> 3; + sgpio_zhaoxin->sgpio_reg.transmit_1.drive_2_error = state & 0x7; + break; + case 3: + writel(SGPIO_MESSAGE_HEAD, mmio + sgpio_zhaoxin->em_loc); + writew(state, em_mmio + 0x24); + sgpio_zhaoxin->sgpio_reg.transmit_1.sgpio_tx_1 &= 0xffff0000; + sgpio_zhaoxin->sgpio_reg.transmit_1.drive_3_active = (state & 0x3c0) >> 6; + sgpio_zhaoxin->sgpio_reg.transmit_1.drive_3_locate = (state & 0x38) >> 3; + sgpio_zhaoxin->sgpio_reg.transmit_1.drive_3_error = state & 0x7; + break; + default: + pr_err("Unsupported port number in this controller\n"); + break; + } + + spin_unlock_irqrestore(&sgpio_zhaoxin->wr_lock, flags); + + return size; +} + +static ssize_t ahci_transmit_sgpio_indicator(unsigned long port_num, + struct sgpio_zhaoxin *sgpio_zhaoxin, + u8 indicator_code, enum SGPIO_INDICATOR type, + ssize_t size) +{ + void __iomem *mmio = sgpio_zhaoxin->mmio; + void __iomem *em_mmio = mmio + SGPIO_OFFSET; + u16 state; + + if (!(sgpio_zhaoxin->em_msg_type & EM_MSG_TYPE_SGPIO)) + return -EINVAL; + + if (get_ahci_em_messages() && (zhaoxin_em_type != AHCI_EM_MSG_SGPIO_MODE)) { + pr_err("Current setting not SGPIO normal mode, quit\n"); + return -EINVAL; + } + + switch (port_num) { + case 0: + state = readw(em_mmio + 0x22); + break; + case 1: + state = readw(em_mmio + 0x20); + break; + case 2: + state = readw(em_mmio + 0x26); + break; + case 3: + state = readw(em_mmio + 0x24); + break; + default: + return -EINVAL; + } + + if (type == SGPIO_ACTIVITY) { + state &= 0xfc3f; + state |= (indicator_code&0xf) << 6; + } else if (type == SGPIO_LOCATE) { + state &= 0xffc7; + state |= (indicator_code&0x7) << 3; + } else if (type == SGPIO_ERROR) { + state &= 0xfff8; + state |= indicator_code & 0x7; + } else { + return -EINVAL; + } + + return ahci_transmit_sgpio_message(port_num, sgpio_zhaoxin, state, size); +} + +static ssize_t ahci_transmit_sgpio_indicator_gp(unsigned long port_num, + struct sgpio_zhaoxin *sgpio_zhaoxin, + u8 indicator_code, enum SGPIO_INDICATOR type, + ssize_t size) +{ + void __iomem *mmio = sgpio_zhaoxin->mmio; + void __iomem *em_mmio = mmio + SGPIO_OFFSET; + union SGPIO_TX_GP state; + unsigned long flags; + + if (!(sgpio_zhaoxin->em_msg_type & EM_MSG_TYPE_SGPIO)) + return -EINVAL; + + if (get_ahci_em_messages() && (zhaoxin_em_type != AHCI_EM_MSG_SGPIO_GP_MODE)) { + pr_err("Current setting not SGPIO_GP mode, quit\n"); + return -EINVAL; + } + + spin_lock_irqsave(&sgpio_zhaoxin->wr_lock, flags); + + state.sgpio_tx_gp = readl(em_mmio + 0x3c); + switch (port_num) { + case 0: + if (type == SGPIO_ACTIVITY) + state.D00 = indicator_code & 0x1; + else if (type == SGPIO_LOCATE) + state.D01 = indicator_code & 0x1; + else if (type == SGPIO_ERROR) + state.D02 = indicator_code & 0x1; + break; + case 1: + if (type == SGPIO_ACTIVITY) + state.D10 = indicator_code & 0x1; + else if (type == SGPIO_LOCATE) + state.D11 = indicator_code & 0x1; + else if (type == SGPIO_ERROR) + state.D12 = indicator_code & 0x1; + break; + case 2: + if (type == SGPIO_ACTIVITY) + state.D20 = indicator_code & 0x1; + else if (type == SGPIO_LOCATE) + state.D21 = indicator_code & 0x1; + else if (type == SGPIO_ERROR) + state.D22 = indicator_code & 0x1; + break; + case 3: + if (type == SGPIO_ACTIVITY) + state.D30 = indicator_code & 0x1; + else if (type == SGPIO_LOCATE) + state.D31 = indicator_code & 0x1; + else if (type == SGPIO_ERROR) + state.D32 = indicator_code & 0x1; + break; + default: + return -EINVAL; + } + + writel(SGPIO_MESSAGE_HEAD, mmio + sgpio_zhaoxin->em_loc); + writel(state.sgpio_tx_gp, em_mmio + 0x3c); + sgpio_zhaoxin->sgpio_reg.gp_transmit_reg.sgpio_tx_gp = state.sgpio_tx_gp; + + spin_unlock_irqrestore(&sgpio_zhaoxin->wr_lock, flags); + return size; +} + +static ssize_t sgpio_activity_store(struct sgpio_zhaoxin *sgpio_zhaoxin, const char *buf, + size_t count) +{ + unsigned long val = 0; + unsigned long port_num = 0; + unsigned long code = 0; + + if (kstrtoul(buf, 0, &val)) + return count; + + port_num = val & 0xf; + code = val >> 4; + + if (sgpio_zhaoxin->em_msg_type & EM_MSG_TYPE_SGPIO) { + switch (code) { + case 0x0: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + ACTIVITY_DISABLE, SGPIO_ACTIVITY, 1); + break; + case 0x1: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + ACTIVITY_ENABLE, SGPIO_ACTIVITY, 1); + break; + case 0x2: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + ACTIVITY_GA_FON, SGPIO_ACTIVITY, 1); + break; + case 0x3: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + ACTIVITY_GA_FOFF, SGPIO_ACTIVITY, 1); + break; + case 0x4: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + ACTIVITY_BRIEF_EN_EOF, SGPIO_ACTIVITY, 1); + break; + case 0x5: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + ACTIVITY_BRIEF_EN_SOF, SGPIO_ACTIVITY, 1); + break; + case 0x6: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + ACTIVITY_GB_FON, SGPIO_ACTIVITY, 1); + break; + case 0x7: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + ACTIVITY_GB_FOFF, SGPIO_ACTIVITY, 1); + break; + case 0x8: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + ACTIVITY_GC_FON, SGPIO_ACTIVITY, 1); + break; + case 0x9: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + ACTIVITY_GC_FOFF, SGPIO_ACTIVITY, 1); + break; + case 0x10: + ahci_transmit_sgpio_indicator_gp(port_num, sgpio_zhaoxin, + GP_OFF, SGPIO_ACTIVITY, 1); + break; + case 0x11: + ahci_transmit_sgpio_indicator_gp(port_num, sgpio_zhaoxin, + GP_ON, SGPIO_ACTIVITY, 1); + break; + default: + pr_err("Unsupported command for activity indicator, cmd:0x%lx\n", val); + break; + } + + return count; + } + + return -EINVAL; +} + +static ssize_t sgpio_locate_store(struct sgpio_zhaoxin *sgpio_zhaoxin, const char *buf, + size_t count) +{ + unsigned long val = 0; + unsigned long port_num = 0; + unsigned long code = 0; + + if (kstrtoul(buf, 0, &val)) + return count; + + port_num = val & 0xf; + code = val >> 4; + + if (sgpio_zhaoxin->em_msg_type & EM_MSG_TYPE_SGPIO) { + switch (code) { + case 0x0: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_DISABLE, SGPIO_LOCATE, 1); + break; + case 0x1: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_ENABLE, SGPIO_LOCATE, 1); + break; + case 0x2: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GA_FON, SGPIO_LOCATE, 1); + break; + case 0x3: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GA_FOFF, SGPIO_LOCATE, 1); + break; + case 0x4: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GB_FON, SGPIO_LOCATE, 1); + break; + case 0x5: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GB_FOFF, SGPIO_LOCATE, 1); + break; + case 0x6: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GC_FON, SGPIO_LOCATE, 1); + break; + case 0x7: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GC_FOFF, SGPIO_LOCATE, 1); + break; + case 0x10: + ahci_transmit_sgpio_indicator_gp(port_num, sgpio_zhaoxin, + GP_OFF, SGPIO_LOCATE, 1); + break; + case 0x11: + ahci_transmit_sgpio_indicator_gp(port_num, sgpio_zhaoxin, GP_ON, + SGPIO_LOCATE, 1); + break; + default: + pr_err("Unsupported command for locate indicator, cmd:0x%lx\n", val); + break; + } + + return count; + } + return -EINVAL; +} + +static ssize_t sgpio_error_store(struct sgpio_zhaoxin *sgpio_zhaoxin, const char *buf, size_t count) +{ + unsigned long val = 0; + unsigned long port_num = 0; + unsigned long code = 0; + + if (kstrtoul(buf, 0, &val)) + return count; + + port_num = val & 0xf; + code = val >> 4; + + if (sgpio_zhaoxin->em_msg_type & EM_MSG_TYPE_SGPIO) { + switch (code) { + case 0x0: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_DISABLE, SGPIO_ERROR, 1); + break; + case 0x1: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_ENABLE, SGPIO_ERROR, 1); + break; + case 0x2: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GA_FON, SGPIO_ERROR, 1); + break; + case 0x3: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GA_FOFF, SGPIO_ERROR, 1); + break; + case 0x4: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GB_FON, SGPIO_ERROR, 1); + break; + case 0x5: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GB_FOFF, SGPIO_ERROR, 1); + break; + case 0x6: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GC_FON, SGPIO_ERROR, 1); + break; + case 0x7: + ahci_transmit_sgpio_indicator(port_num, sgpio_zhaoxin, + LOCATE_ERROR_GC_FOFF, SGPIO_ERROR, 1); + break; + case 0x10: + ahci_transmit_sgpio_indicator_gp(port_num, sgpio_zhaoxin, + GP_OFF, SGPIO_ERROR, 1); + break; + case 0x11: + ahci_transmit_sgpio_indicator_gp(port_num, sgpio_zhaoxin, + GP_ON, SGPIO_ERROR, 1); + break; + default: + pr_err("Unsupport command for error indicator, cmd:0x%lx\n", val); + break; + } + + return count; + } + + return -EINVAL; +} + +static struct sgpio_zhaoxin_sysfs_attr dev_attr_ahci_em_type_sys = + __ATTR(ahci_em_type_sys, 0644, ahci_em_type_sys_show, + ahci_em_type_sys_store); +static struct sgpio_zhaoxin_sysfs_attr dev_attr_sgpio_activity = + __ATTR(sgpio_activity, 0200, NULL, sgpio_activity_store); +static struct sgpio_zhaoxin_sysfs_attr dev_attr_sgpio_locate = + __ATTR(sgpio_locate, 0200, NULL, sgpio_locate_store); +static struct sgpio_zhaoxin_sysfs_attr dev_attr_sgpio_error = + __ATTR(sgpio_error, 0200, NULL, sgpio_error_store); + +struct attribute *sgpio_attrs[] = { + &dev_attr_ahci_em_type_sys.attr, + &dev_attr_sgpio_activity.attr, + &dev_attr_sgpio_locate.attr, + &dev_attr_sgpio_error.attr, + NULL +}; + +static const struct attribute_group sgpio_attrs_group = { + .attrs = sgpio_attrs +}; +const struct attribute_group *sgpio_groups[] = { + &sgpio_attrs_group, + NULL +}; + +static ssize_t sgpio_zhaoxin_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) +{ + struct sgpio_zhaoxin_sysfs_attr *sgpio_zhaoxin_sysfs_attr = to_sgpio_attr(attr); + struct sgpio_zhaoxin *sgpio_zhaoxin = to_sgpio_obj(kobj); + + if (!sgpio_zhaoxin_sysfs_attr->show) + return -EIO; + + return sgpio_zhaoxin_sysfs_attr->show(sgpio_zhaoxin, buf); +} + +static ssize_t sgpio_zhaoxin_attr_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t len) +{ + struct sgpio_zhaoxin_sysfs_attr *sgpio_zhaoxin_sysfs_attr = to_sgpio_attr(attr); + struct sgpio_zhaoxin *sgpio_zhaoxin = to_sgpio_obj(kobj); + + if (!sgpio_zhaoxin_sysfs_attr->store) + return -EIO; + + return sgpio_zhaoxin_sysfs_attr->store(sgpio_zhaoxin, buf, len); +} + +const struct sysfs_ops sgpio_zhaoxin_sysfs_ops = { + .show = sgpio_zhaoxin_attr_show, + .store = sgpio_zhaoxin_attr_store, +}; + +const struct kobj_type sgpio_zhaoxin_ktype = { + .sysfs_ops = &sgpio_zhaoxin_sysfs_ops, + .default_groups = sgpio_groups, +}; + +void set_em_messages(struct sgpio_zhaoxin *sgpio_zhaoxin) +{ + void __iomem *mmio = sgpio_zhaoxin->mmio; + u32 em_loc = readl(mmio + HOST_EM_LOC); + u32 em_ctl = readl(mmio + HOST_EM_CTL); + u8 messages; + + if (!get_ahci_em_messages()) + return; + + messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16; + + if (messages) { + /* store em_loc */ + sgpio_zhaoxin->em_loc = ((em_loc >> 16) * 4); + sgpio_zhaoxin->em_buf_sz = ((em_loc & 0xff) * 4); + sgpio_zhaoxin->em_msg_type = messages; + } +} + +int add_sgpio_zhaoxin(void) +{ + struct pci_dev *pdev_cur = pci_get_device(PCI_VENDOR_ID_ZHAOXIN, 0x9083, NULL); + struct pci_dev *pdev_next = pdev_cur; + struct sgpio_zhaoxin *sgpio_zhaoxin; + int ret = 0; + + if (!get_ahci_em_messages()) + return 0; + + while (pdev_next) { + pdev_next = pci_get_device(PCI_VENDOR_ID_ZHAOXIN, 0x9083, pdev_cur); + + WARN_ON(MAX_TEST_RESULT_LEN <= 0); + + sgpio_zhaoxin = (struct sgpio_zhaoxin *)get_zeroed_page(GFP_KERNEL); + if (!sgpio_zhaoxin) + return -ENOMEM; + + list_add(&sgpio_zhaoxin->list, &sgpio_zhaoxin_list); + ret = kobject_init_and_add(&sgpio_zhaoxin->kobj, &sgpio_zhaoxin_ktype, + &(&pdev_cur->dev)->kobj, "zx_sgpio"); + if (ret) { + kobject_put(&sgpio_zhaoxin->kobj); + return -1; + } + + kobject_uevent(&sgpio_zhaoxin->kobj, KOBJ_ADD); + spin_lock_init(&sgpio_zhaoxin->wr_lock); + sgpio_zhaoxin->kobj_valid = 1; + sgpio_zhaoxin->mmio = pcim_iomap_table(pdev_cur)[5]; + set_em_messages(sgpio_zhaoxin); + ret = ahci_wait_em_reset(sgpio_zhaoxin, 7); /*wait at least 64ms*/ + if (ret < 0) { + pr_err("ahci wait em reset failed!\n"); + return ret; + } + + sgpio_zhaoxin->kobj_valid = 1; + + if (zhaoxin_em_type == AHCI_EM_MSG_SGPIO_GP_MODE) + ahci_zhaoxin_set_em_sgpio_gpmode(sgpio_zhaoxin); + else if (zhaoxin_em_type == AHCI_EM_MSG_SGPIO_MODE) + ahci_zhaoxin_set_em_sgpio(sgpio_zhaoxin); + + pdev_cur = pdev_next; + } + + return 0; +} + + +void remove_sgpio_zhaoxin(void) +{ + struct sgpio_zhaoxin *cur = NULL, *next = NULL; + + if (!get_ahci_em_messages()) + return; + + list_for_each_entry_safe(cur, next, &sgpio_zhaoxin_list, list) { + list_del(&cur->list); + if (cur->kobj_valid) + kobject_put(&cur->kobj); + + free_page((unsigned long)cur); + if (!next) + break; + } +} + +static int __init zhaoxin_sgpio_init(void) +{ + return add_sgpio_zhaoxin(); +} + +static void __exit zhaoxin_sgpio_exit(void) +{ + remove_sgpio_zhaoxin(); +} + +late_initcall(zhaoxin_sgpio_init); +module_exit(zhaoxin_sgpio_exit); + +MODULE_DESCRIPTION("Zhaoxin SGPIO driver"); +MODULE_AUTHOR("XanderChen"); +MODULE_LICENSE("GPL"); diff --git a/drivers/ata/ahci_zhaoxin_sgpio.h b/drivers/ata/ahci_zhaoxin_sgpio.h new file mode 100644 index 000000000000..b9fd7c665602 --- /dev/null +++ b/drivers/ata/ahci_zhaoxin_sgpio.h @@ -0,0 +1,221 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ACHI_ZHAOXIN_SGPIO_H +#define _ACHI_ZHAOXIN_SGPIO_H + +#define SGPIO_OFFSET 0x580 + +#define SGPIO_MESSAGE_HEAD 0x3000000 + +#define ACTIVITY_DISABLE 0x0 +#define ACTIVITY_ENABLE 0x1 +#define ACTIVITY_GA_FON 0x2 +#define ACTIVITY_GA_FOFF 0x3 +#define ACTIVITY_BRIEF_EN_EOF 0x4 +#define ACTIVITY_BRIEF_EN_SOF 0x5 +#define ACTIVITY_GB_FON 0x6 +#define ACTIVITY_GB_FOFF 0x7 +#define ACTIVITY_GC_FON 0x8 +#define ACTIVITY_GC_FOFF 0x9 +#define LOCATE_ERROR_DISABLE 0x0 +#define LOCATE_ERROR_ENABLE 0x1 +#define LOCATE_ERROR_GA_FON 0x2 +#define LOCATE_ERROR_GA_FOFF 0x3 +#define LOCATE_ERROR_GB_FON 0x4 +#define LOCATE_ERROR_GB_FOFF 0x5 +#define LOCATE_ERROR_GC_FON 0x6 +#define LOCATE_ERROR_GC_FOFF 0x7 + +#define GP_OFF 0x10 +#define GP_ON 0x11 + +#define to_sgpio_attr(x) container_of(x, struct sgpio_zhaoxin_sysfs_attr, attr) +#define to_sgpio_obj(x) container_of(x, struct sgpio_zhaoxin, kobj) +#define MAX_TEST_RESULT_LEN (PAGE_SIZE - sizeof(struct sgpio_zhaoxin) - 8) + +//SGPIO module parameter: 0-off, 1-LED, 2-SGPIO, 3-SGPIO_GP +enum ahci_em_msg_modes { + AHCI_EM_MSG_OFF = 0, + AHCI_EM_MSG_LED_MODE, + AHCI_EM_MSG_SGPIO_MODE, + AHCI_EM_MSG_SGPIO_GP_MODE, + AHCI_EM_MSG_NULL, +}; + +enum SGPIO_INDICATOR { + SGPIO_ACTIVITY, + SGPIO_LOCATE, + SGPIO_ERROR +}; + +enum SGPIO_CFG1 { + STRETCH_ACTIVITY_OFF, + STRETCH_ACTIVITY_ON, + FORCE_ACTIVITY_OFF, + MAXIMUM_ACTIVITY_ON, + BLINK_GENERATIOR_RATE_B, + BLINK_GENERATIOR_RATE_A, + BLINK_GENERATIOR_RATE_C +}; + +union SGPIO_CFG_0 { + struct { + u32 reserved0 :8; + u32 version :4; + u32 reserved1 :4; + u32 gp_register_count :4; + u32 cfg_register_count :3; + u32 enable :1; + u32 supported_drive_count :8; + }; + u32 sgpio_cfg_0; +}; + +union SGPIO_CFG_1 { + struct { + u32 reserved0 :4; + u32 blink_gen_c :4; + u32 blink_gen_a :4; + u32 blink_gen_b :4; + u32 max_act_on :4; + u32 force_act_off :4; + u32 stretch_act_on :4; + u32 stretch_act_off :4; + }; + u32 sgpio_cfg_1; +}; + +union SGPIO_RX { + struct { + u32 drive_3_input :3; + u32 reserved3 :5; + u32 drive_2_input :3; + u32 reserved2 :5; + u32 drive_1_input :3; + u32 reserved1 :5; + u32 drive_0_input :3; + u32 reserved0 :5; + }; + u32 sgpio_rx; +}; + +union SGPIO_RX_GP_CFG { + struct { + u32 reserved0 :16; + u32 count :8; + u32 reserved1 :8; + }; + u32 sgpio_rx_gp_cfg; +}; +union SGPIO_RX_GP { + struct { + u32 reserved0 :16; + u32 D22 :1; + u32 D30 :1; + u32 D31 :1; + u32 D32 :1; + u32 reserved1:4; + u32 D00 :1; + u32 D01 :1; + u32 D02 :1; + u32 D10 :1; + u32 D11 :1; + u32 D12 :1; + u32 D20 :1; + u32 D21 :1; + }; + u32 sgpio_rx_gp; +}; + +union SGPIO_TX_0 { + struct { + u32 drive_1_error :3; + u32 drive_1_locate :3; + u32 drive_1_active :4; + u32 reserved1 :6; + u32 drive_0_error :3; + u32 drive_0_locate :3; + u32 drive_0_active :4; + u32 reserved0 :6; + }; + u32 sgpio_tx_0; +}; + +union SGPIO_TX_1 { + struct { + u32 drive_3_error :3; + u32 drive_3_locate :3; + u32 drive_3_active :4; + u32 reserved3 :6; + u32 drive_2_error :3; + u32 drive_2_locate :3; + u32 drive_2_active :4; + u32 reserved2 :6; + }; + u32 sgpio_tx_1; +}; + +union SGPIO_TX_GP_CFG { + struct { + u32 reserved0 :16; + u32 count :8; + u32 sload :4; + u32 reserved1 :4; + }; + u32 sgpio_tx_gp_cfg; +}; + +union SGPIO_TX_GP { + struct { + u32 reserved0 :16; + u32 D22 :1; + u32 D30 :1; + u32 D31 :1; + u32 D32 :1; + u32 reserved1:4; + u32 D00 :1; + u32 D01 :1; + u32 D02 :1; + u32 D10 :1; + u32 D11 :1; + u32 D12 :1; + u32 D20 :1; + u32 D21 :1; + }; + u32 sgpio_tx_gp; +}; + +struct AHCI_SGPIO_REG { + union SGPIO_CFG_0 cfg_0; + union SGPIO_CFG_1 cfg_1; + union SGPIO_RX receive_reg; + union SGPIO_RX_GP_CFG gp_receive_cfg; + union SGPIO_RX_GP gp_receive_reg; + union SGPIO_TX_0 transmit_0; + union SGPIO_TX_1 transmit_1; + union SGPIO_TX_GP_CFG gp_transmit_cfg; + union SGPIO_TX_GP gp_transmit_reg; +}; + +struct sgpio_zhaoxin { + struct kobject kobj; + struct list_head list; + unsigned int kobj_valid; + unsigned int index; + u32 em_loc; /* enclosure management location */ + u32 em_buf_sz; /* EM buffer size in byte */ + u32 em_msg_type; /* EM message type */ + void __iomem *mmio; + spinlock_t wr_lock; /* protects sgpio register */ + struct AHCI_SGPIO_REG sgpio_reg; /* saved sgpio register */ +}; + +struct sgpio_zhaoxin_sysfs_attr { + struct attribute attr; + ssize_t (*show)(struct sgpio_zhaoxin *sgpio_zhaoxin, char *buf); + ssize_t (*store)(struct sgpio_zhaoxin *sgpio_zhaoxin, const char *buf, size_t count); +}; + +int get_ahci_em_messages(void); + +#endif /* _ACHI_ZHAOXIN_SGPIO_H */ diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index f1263364fa97..6524c5a02648 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -207,6 +207,12 @@ static int devslp_idle_timeout __read_mostly = 1000; module_param(devslp_idle_timeout, int, 0644); MODULE_PARM_DESC(devslp_idle_timeout, "device sleep idle timeout"); +int get_ahci_em_messages(void) +{ + return ahci_em_messages; +} +EXPORT_SYMBOL_GPL(get_ahci_em_messages); + static void ahci_enable_ahci(void __iomem *mmio) { int i; -- Gitee From 3b3fd204d44ffdf374cabf7bb2dd199c5fbcae40 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Mon, 5 Aug 2024 11:34:45 +0800 Subject: [PATCH 1405/2138] anolis: efi: cper: Add Zhaoxin/Centaur ZDI/ZPI error decode ANBZ: #9439 ZPI is the interconnection interface between sockets, ZDI is the interconnection interface between dies. When either zdi or zpi occurs error, it will trigger smi interrput, the smi handler will read error information from the zdi/zpi configuration space, fill it in the cper structure asscoiated with error and produce a sci or nmi interrput to notify the OS ,the OS driver will decode the cper structure to help user to annalyze the error. Because UEFI spec does not define the section type of ZDI/ZPI error. Zhaoxin defines ZDI/ZPI errors according to the error format defined by the Generic Processor Error Section type.When the error occurs, The BIOS will fill error information in the data structure corresponding to the Generic Processor Error Section type in the smi handler.However,the error information printed by default apei driver is not easy to read. The software has added some printed logs to make the ZDI/ZPI error information on the Zhaoxin/Centaur cpu vendor easier to read. Signed-off-by: leoliu-oc Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/3440 --- drivers/firmware/efi/cper.c | 55 +++++++++++++++++++++++++++++++++++++ include/linux/cper.h | 1 + 2 files changed, 56 insertions(+) diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 30a62a97ae98..993467ea49df 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -141,6 +141,59 @@ static const char * const proc_flag_strs[] = { "corrected", }; +static const char *const zdi_zpi_err_type_strs[] = { + "No Error", + "Training Error Status (PHY)", + "Data Link Protocol Error Status (DLL)", + "Surprise Down Error Status", + "Flow Control Protocol Error Status (TL)", + "Receiver Overflow Status (TL)", + "Receiver Error Status (PHY)", + "Bad TLP Status (DLL)", + "Bad Data Link Layer Packet (DLLP) Status (DLL)", + "REPLAY_NUM Rollover Status (DLL)", + "Replay Timer Timeout Status (DLL)", + "X16 Link Width Unreliable Status", + "ZPI X8 Link Width Unreliable Status", + "ZPI X4 Link Width Unreliable Status", + "ZPI X2 Link Width Unreliable Status", + "ZPI Gen3 Link Speed Unreliable Status", + "ZPI Gen2 Link Speed Unreliable Status", + "ZDI Gen3 Link Speed Unreliable Status", + "ZDI Gen4 Link Speed Unreliable Status", +}; + +const char *cper_zdi_zpi_err_type_str(unsigned int etype) +{ + return etype < ARRAY_SIZE(zdi_zpi_err_type_strs) ? + zdi_zpi_err_type_strs[etype] : "unknown error"; +} +EXPORT_SYMBOL_GPL(cper_zdi_zpi_err_type_str); + +static void cper_print_proc_generic_zdi_zpi(const char *pfx, + const struct cper_sec_proc_generic *zdi_zpi) +{ +#if IS_ENABLED(CONFIG_X86) + u8 etype = zdi_zpi->responder_id; + + if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN || + boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) { + if ((zdi_zpi->requestor_id & 0xff) == 7) { + pr_info("%s general processor error(zpi error)\n", pfx); + } else if ((zdi_zpi->requestor_id & 0xff) == 6) { + pr_info("%s general processor error(zdi error)\n", pfx); + } else { + pr_info("%s general processor error(unknown error)\n", pfx); + return; + } + pr_info("%s bus number %llx device number %llx function number 0\n", pfx, + ((zdi_zpi->requestor_id)>>8) & 0xff, zdi_zpi->requestor_id & 0xff); + pr_info("%s apic id %lld error_type: %s\n", pfx, zdi_zpi->proc_id, + cper_zdi_zpi_err_type_str(etype)); + } +#endif +} + static void cper_print_proc_generic(const char *pfx, const struct cper_sec_proc_generic *proc) { @@ -184,6 +237,8 @@ static void cper_print_proc_generic(const char *pfx, pfx, proc->responder_id); if (proc->validation_bits & CPER_PROC_VALID_IP) printk("%s""IP: 0x%016llx\n", pfx, proc->ip); + + cper_print_proc_generic_zdi_zpi(pfx, proc); } static const char * const mem_err_type_strs[] = { diff --git a/include/linux/cper.h b/include/linux/cper.h index c1a7dc325121..ba5ee2355370 100644 --- a/include/linux/cper.h +++ b/include/linux/cper.h @@ -578,4 +578,5 @@ void cper_estatus_print(const char *pfx, int cper_estatus_check_header(const struct acpi_hest_generic_status *estatus); int cper_estatus_check(const struct acpi_hest_generic_status *estatus); +const char *cper_zdi_zpi_err_type_str(unsigned int etype); #endif -- Gitee From 4f81bc7eeda8c7ea9aa8762c3b28c9c031611df4 Mon Sep 17 00:00:00 2001 From: Gerd Hoffmann Date: Wed, 13 Mar 2024 13:58:43 +0100 Subject: [PATCH 1406/2138] KVM: x86: Advertise max mappable GPA in CPUID.0x80000008.GuestPhysBits ANBZ: #11146 commit b628cb523c65420031b310050a3733aa7fbe2e88 upstream. Use the GuestPhysBits field in CPUID.0x80000008 to communicate the max mappable GPA to userspace, i.e. the max GPA that is addressable by the CPU itself. Typically this is identical to the max effective GPA, except in the case where the CPU supports MAXPHYADDR > 48 but does not support 5-level TDP (the CPU consults bits 51:48 of the GPA only when walking the fifth level TDP page table entry). Enumerating the max mappable GPA via CPUID will allow guest firmware to map resources like PCI bars in the highest possible address space, while ensuring that the GPA is addressable by the CPU. Without precise knowledge about the max mappable GPA, the guest must assume that 5-level paging is unsupported and thus restrict its mappings to the lower 48 bits. Advertise the max mappable GPA via KVM_GET_SUPPORTED_CPUID as userspace doesn't have easy access to whether or not 5-level paging is supported, and to play nice with userspace VMMs that reflect the supported CPUID directly into the guest. AMD's APM (3.35) defines GuestPhysBits (EAX[23:16]) as: Maximum guest physical address size in bits. This number applies only to guests using nested paging. When this field is zero, refer to the PhysAddrSize field for the maximum guest physical address size. Tom Lendacky confirmed that the purpose of GuestPhysBits is software use and KVM can use it as described above. Real hardware always returns zero. Leave GuestPhysBits as '0' when TDP is disabled in order to comply with the APM's statement that GuestPhysBits "applies only to guest using nested paging". As above, guest firmware will likely create suboptimal mappings, but that is a very minor issue and not a functional concern. Intel-SIG: commit b628cb523c65 KVM: x86: Advertise max mappable GPA in CPUID.0x80000008.GuestPhysBits Backport SRF MAXPHYADDR support Signed-off-by: Gerd Hoffmann Reviewed-by: Xiaoyao Li Link: https://lore.kernel.org/r/20240313125844.912415-3-kraxel@redhat.com [sean: massage changelog] Signed-off-by: Sean Christopherson [jz: amend commit log] Signed-off-by: Jason Zeng Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3889 --- arch/x86/kvm/cpuid.c | 28 +++++++++++++++++++++++++--- arch/x86/kvm/mmu.h | 2 ++ arch/x86/kvm/mmu/mmu.c | 5 +++++ 3 files changed, 32 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index a9f265216582..4c8e631bef59 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -1236,8 +1236,22 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) entry->eax = entry->ebx = entry->ecx = 0; break; case 0x80000008: { + /* + * GuestPhysAddrSize (EAX[23:16]) is intended for software + * use. + * + * KVM's ABI is to report the effective MAXPHYADDR for the + * guest in PhysAddrSize (phys_as), and the maximum + * *addressable* GPA in GuestPhysAddrSize (g_phys_as). + * + * GuestPhysAddrSize is valid if and only if TDP is enabled, + * in which case the max GPA that can be addressed by KVM may + * be less than the max GPA that can be legally generated by + * the guest, e.g. if MAXPHYADDR>48 but the CPU doesn't + * support 5-level TDP. + */ unsigned int virt_as = max((entry->eax >> 8) & 0xff, 48U); - unsigned int phys_as; + unsigned int phys_as, g_phys_as; /* * If TDP (NPT) is disabled use the adjusted host MAXPHYADDR as @@ -1246,15 +1260,23 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) * paging, too. * * If TDP is enabled, use the raw bare metal MAXPHYADDR as - * reductions to the HPAs do not affect GPAs. + * reductions to the HPAs do not affect GPAs. The max + * addressable GPA is the same as the max effective GPA, except + * that it's capped at 48 bits if 5-level TDP isn't supported + * (hardware processes bits 51:48 only when walking the fifth + * level page table). */ if (!tdp_enabled) { phys_as = boot_cpu_data.x86_phys_bits; + g_phys_as = 0; } else { phys_as = entry->eax & 0xff; + g_phys_as = phys_as; + if (kvm_mmu_get_max_tdp_level() < 5) + g_phys_as = min(g_phys_as, 48); } - entry->eax = phys_as | (virt_as << 8); + entry->eax = phys_as | (virt_as << 8) | (g_phys_as << 16); entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8)); entry->edx = 0; cpuid_entry_override(entry, CPUID_8000_0008_EBX); diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index f04cc5ade1cd..ade33a54306d 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -100,6 +100,8 @@ static inline u8 kvm_get_shadow_phys_bits(void) return boot_cpu_data.x86_phys_bits; } +u8 kvm_mmu_get_max_tdp_level(void); + void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask); void kvm_mmu_set_me_spte_mask(u64 me_value, u64 me_mask); void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 3517d7763fbc..31c5562f51b2 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -5181,6 +5181,11 @@ static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu) return max_tdp_level; } +u8 kvm_mmu_get_max_tdp_level(void) +{ + return tdp_root_level ? tdp_root_level : max_tdp_level; +} + static union kvm_mmu_page_role kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, union kvm_cpu_role cpu_role) -- Gitee From 5c02868ec65d075b10e9394e96e3aa78c5df44f8 Mon Sep 17 00:00:00 2001 From: Tao Su Date: Mon, 13 May 2024 09:40:03 +0800 Subject: [PATCH 1407/2138] KVM: selftests: x86: Prioritize getting max_gfn from GuestPhysBits ANBZ: #11146 commit 980b8bc01938c8bcc9742c1051f64b5f0ed178ac upstream. Use the max mappable GPA via GuestPhysBits advertised by KVM to calculate max_gfn. Currently some selftests (e.g. access_tracking_perf_test, dirty_log_test...) add RAM regions close to max_gfn, so guest may access GPA beyond its mappable range and cause infinite loop. Adjust max_gfn in vm_compute_max_gfn() since x86 selftests already overrides vm_compute_max_gfn() specifically to deal with goofy edge cases. Intel-SIG: commit 980b8bc01938 KVM: selftests: x86: Prioritize getting max_gfn from GuestPhysBits Backport SRF MAXPHYADDR support Reported-by: Yi Lai Signed-off-by: Tao Su Tested-by: Yi Lai Reviewed-by: Xiaoyao Li Link: https://lore.kernel.org/r/20240513014003.104593-1-tao1.su@linux.intel.com [sean: tweak name, add comment and sanity check] Signed-off-by: Sean Christopherson [jz: amend commit log] Signed-off-by: Jason Zeng Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3889 --- .../selftests/kvm/include/x86_64/processor.h | 1 + .../testing/selftests/kvm/lib/x86_64/processor.c | 15 +++++++++++++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index 25bc61dac5fb..490a0a7efb3c 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -272,6 +272,7 @@ struct kvm_x86_cpu_property { #define X86_PROPERTY_MAX_EXT_LEAF KVM_X86_CPU_PROPERTY(0x80000000, 0, EAX, 0, 31) #define X86_PROPERTY_MAX_PHY_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 0, 7) #define X86_PROPERTY_MAX_VIRT_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 8, 15) +#define X86_PROPERTY_GUEST_MAX_PHY_ADDR KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 16, 23) #define X86_PROPERTY_PHYS_ADDR_REDUCTION KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 6, 11) #define X86_PROPERTY_MAX_CENTAUR_LEAF KVM_X86_CPU_PROPERTY(0xC0000000, 0, EAX, 0, 31) diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index d8288374078e..5a035bc2b9c1 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -1248,9 +1248,20 @@ unsigned long vm_compute_max_gfn(struct kvm_vm *vm) { const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */ unsigned long ht_gfn, max_gfn, max_pfn; - uint8_t maxphyaddr; + uint8_t maxphyaddr, guest_maxphyaddr; - max_gfn = (1ULL << (vm->pa_bits - vm->page_shift)) - 1; + /* + * Use "guest MAXPHYADDR" from KVM if it's available. Guest MAXPHYADDR + * enumerates the max _mappable_ GPA, which can be less than the raw + * MAXPHYADDR, e.g. if MAXPHYADDR=52, KVM is using TDP, and the CPU + * doesn't support 5-level TDP. + */ + guest_maxphyaddr = kvm_cpu_property(X86_PROPERTY_GUEST_MAX_PHY_ADDR); + guest_maxphyaddr = guest_maxphyaddr ?: vm->pa_bits; + TEST_ASSERT(guest_maxphyaddr <= vm->pa_bits, + "Guest MAXPHYADDR should never be greater than raw MAXPHYADDR"); + + max_gfn = (1ULL << (guest_maxphyaddr - vm->page_shift)) - 1; /* Avoid reserved HyperTransport region on AMD processors. */ if (!host_cpu_is_amd) -- Gitee From e2f4c28532394a1a7486130d903154e89c00b6dc Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Mon, 12 Aug 2024 17:55:17 +0800 Subject: [PATCH 1408/2138] kfence: save freeing stack trace at calling time instead of freeing time ANBZ: #8499 commit c36be0cdf63d64dfd65bcf27b8ed400696b1c27a upstream. For kmem_cache with SLAB_TYPESAFE_BY_RCU, the freeing trace stack at calling kmem_cache_free() is more useful. While the following stack is meaningless and provides no help: freed by task 46 on cpu 0 at 656.840729s: rcu_do_batch+0x1ab/0x540 nocb_cb_wait+0x8f/0x260 rcu_nocb_cb_kthread+0x25/0x80 kthread+0xd2/0x100 ret_from_fork+0x34/0x50 ret_from_fork_asm+0x1a/0x30 Link: https://lkml.kernel.org/r/20240812095517.2357-1-dtcccc@linux.alibaba.com Signed-off-by: Tianchen Ding Reviewed-by: Marco Elver Cc: Alexander Potapenko Cc: Dmitry Vyukov Signed-off-by: Andrew Morton Signed-off-by: Tianchen Ding Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3901 --- mm/kfence/core.c | 41 +++++++++++++++++++++++++++++++---------- mm/kfence/kfence.h | 1 + mm/kfence/report.c | 7 ++++--- 3 files changed, 36 insertions(+), 13 deletions(-) diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 2fe1b509778a..8e6c412ca380 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -474,6 +474,13 @@ static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *m return pageaddr; } +static inline bool kfence_obj_allocated(const struct kfence_metadata *meta) +{ + enum kfence_object_state state = READ_ONCE(meta->state); + + return state == KFENCE_OBJECT_ALLOCATED || state == KFENCE_OBJECT_RCU_FREEING; +} + /* * Update the object's metadata state, including updating the alloc/free stacks * depending on the state transition. @@ -483,7 +490,13 @@ metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state nex unsigned long *stack_entries, size_t num_stack_entries) { struct kfence_track *track = - next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track; + next == KFENCE_OBJECT_ALLOCATED ? &meta->alloc_track : &meta->free_track; + + lockdep_assert_held(&meta->lock); + + /* Stack has been saved when calling rcu, skip. */ + if (READ_ONCE(meta->state) == KFENCE_OBJECT_RCU_FREEING) + goto out; if (stack_entries) { memcpy(track->stack_entries, stack_entries, @@ -500,6 +513,7 @@ metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state nex track->cpu = raw_smp_processor_id(); track->ts_nsec = local_clock(); /* Same source as printk timestamps. */ +out: /* * Pairs with READ_ONCE() in * kfence_shutdown_cache(), @@ -946,7 +960,7 @@ static inline bool __free_meta(void *addr, struct kfence_metadata *meta, bool zo raw_spin_lock_irqsave(&meta->lock, flags); - if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) { + if (!kfence_obj_allocated(meta) || meta->addr != (unsigned long)addr) { /* Invalid or double-free, bail out. */ this_cpu_counter->counter[KFENCE_COUNTER_BUGS]++; kfence_report_error((unsigned long)addr, false, NULL, meta, @@ -1870,7 +1884,7 @@ static void kfence_check_all_canary(void) for (i = 0; i < kpa->nr_objects; i++) { struct kfence_metadata *meta = &kpa->meta[i]; - if (meta->state == KFENCE_OBJECT_ALLOCATED) + if (kfence_obj_allocated(meta)) check_canary(meta); } } @@ -2349,12 +2363,11 @@ static void kfence_shutdown_cache_area(struct kmem_cache *s, struct kfence_pool_ * the lock will not help, as different critical section * serialization will have the same outcome. */ - if (READ_ONCE(meta->cache) != s || - READ_ONCE(meta->state) != KFENCE_OBJECT_ALLOCATED) + if (READ_ONCE(meta->cache) != s || !kfence_obj_allocated(meta)) continue; raw_spin_lock_irqsave(&meta->lock, flags); - in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED; + in_use = meta->cache == s && kfence_obj_allocated(meta); raw_spin_unlock_irqrestore(&meta->lock, flags); if (in_use) { @@ -2583,11 +2596,19 @@ void __kfence_free(void *addr) * the object, as the object page may be recycled for other-typed * objects once it has been freed. meta->cache may be NULL if the cache * was destroyed. + * Save the stack trace here so that reports show where the user freed + * the object. */ - if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU))) + if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU))) { + unsigned long flags; + + raw_spin_lock_irqsave(&meta->lock, flags); + metadata_update_state(meta, KFENCE_OBJECT_RCU_FREEING, NULL, 0); + raw_spin_unlock_irqrestore(&meta->lock, flags); call_rcu(&meta->rcu_head, rcu_guarded_free); - else + } else { kfence_guarded_free(addr, meta, false); + } } void __kfence_free_page(struct page *page, void *addr) @@ -2625,14 +2646,14 @@ bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs int distance = 0; meta = addr_to_metadata(addr - PAGE_SIZE); - if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) { + if (meta && kfence_obj_allocated(meta)) { to_report = meta; /* Data race ok; distance calculation approximate. */ distance = addr - data_race(meta->addr + meta->size); } meta = addr_to_metadata(addr + PAGE_SIZE); - if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) { + if (meta && kfence_obj_allocated(meta)) { /* Data race ok; distance calculation approximate. */ if (!to_report || distance > data_race(meta->addr) - addr) to_report = meta; diff --git a/mm/kfence/kfence.h b/mm/kfence/kfence.h index 071aec5feb96..e30b7578c52f 100644 --- a/mm/kfence/kfence.h +++ b/mm/kfence/kfence.h @@ -38,6 +38,7 @@ enum kfence_object_state { KFENCE_OBJECT_UNUSED, /* Object is unused. */ KFENCE_OBJECT_ALLOCATED, /* Object is currently allocated. */ + KFENCE_OBJECT_RCU_FREEING, /* Object was allocated, and then being freed by rcu. */ KFENCE_OBJECT_FREED, /* Object was allocated, and then freed. */ }; diff --git a/mm/kfence/report.c b/mm/kfence/report.c index e2f051e223ef..f6c4b5ff3785 100644 --- a/mm/kfence/report.c +++ b/mm/kfence/report.c @@ -111,7 +111,8 @@ static void kfence_print_stack(struct seq_file *seq, const struct kfence_metadat /* Timestamp matches printk timestamp format. */ seq_con_printf(seq, "%s by task %d on cpu %d at %lu.%06lus:\n", - show_alloc ? "allocated" : "freed", track->pid, + show_alloc ? "allocated" : meta->state == KFENCE_OBJECT_RCU_FREEING ? + "rcu freeing" : "freed", track->pid, track->cpu, (unsigned long)ts_sec, rem_nsec / 1000); if (track->num_stack_entries) { @@ -148,7 +149,7 @@ void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *met kfence_print_stack(seq, meta, true); - if (meta->state == KFENCE_OBJECT_FREED) { + if (meta->state == KFENCE_OBJECT_FREED || meta->state == KFENCE_OBJECT_RCU_FREEING) { seq_con_printf(seq, "\n"); kfence_print_stack(seq, meta, false); } @@ -325,7 +326,7 @@ bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *sla kpp->kp_slab_cache = meta->cache; kpp->kp_objp = (void *)meta->addr; kfence_to_kp_stack(&meta->alloc_track, kpp->kp_stack); - if (meta->state == KFENCE_OBJECT_FREED) + if (meta->state == KFENCE_OBJECT_FREED || meta->state == KFENCE_OBJECT_RCU_FREEING) kfence_to_kp_stack(&meta->free_track, kpp->kp_free_stack); /* get_stack_skipnr() ensures the first entry is outside allocator. */ kpp->kp_ret = kpp->kp_stack[0]; -- Gitee From d7fff0f664f59b2c89e38f34bcb1ebebc9de6155 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Wed, 29 May 2024 17:09:07 +0200 Subject: [PATCH 1409/2138] fuse: cleanup request queuing towards virtiofs ANBZ: #11247 commit 5de8acb41c86f1d335d165e0a350441ea3a1f480 upstream. Virtiofs has its own queuing mechanism, but still requests are first queued on fiq->pending to be immediately dequeued and queued onto the virtio queue. The queuing on fiq->pending is unnecessary and might even have some performance impact due to being a contention point. Forget requests are handled similarly. Move the queuing of requests and forgets into the fiq->ops->*. fuse_iqueue_ops are renamed to reflect the new semantics. Reviewed-by: Stefan Hajnoczi Fixed-by: Jingbo Xu Reviewed-by: Jingbo Xu Tested-by: Peter-Jan Gootzen Reviewed-by: Peter-Jan Gootzen Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3931 --- fs/fuse/dev.c | 159 ++++++++++++++++++++++++-------------------- fs/fuse/fuse_i.h | 19 ++---- fs/fuse/virtio_fs.c | 41 ++++-------- 3 files changed, 106 insertions(+), 113 deletions(-) diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index bf979dba93cd..a7a9c10e262f 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -194,11 +194,22 @@ unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args) } EXPORT_SYMBOL_GPL(fuse_len_args); -u64 fuse_get_unique(struct fuse_iqueue *fiq) +static u64 fuse_get_unique_locked(struct fuse_iqueue *fiq) { fiq->reqctr += FUSE_REQ_ID_STEP; return fiq->reqctr; } + +u64 fuse_get_unique(struct fuse_iqueue *fiq) +{ + u64 ret; + + spin_lock(&fiq->lock); + ret = fuse_get_unique_locked(fiq); + spin_unlock(&fiq->lock); + + return ret; +} EXPORT_SYMBOL_GPL(fuse_get_unique); static unsigned int fuse_req_hash(u64 unique) @@ -217,22 +228,68 @@ __releases(fiq->lock) spin_unlock(&fiq->lock); } +static void fuse_dev_queue_forget(struct fuse_iqueue *fiq, struct fuse_forget_link *forget) +{ + spin_lock(&fiq->lock); + if (fiq->connected) { + fiq->forget_list_tail->next = forget; + fiq->forget_list_tail = forget; + fuse_dev_wake_and_unlock(fiq); + } else { + kfree(forget); + spin_unlock(&fiq->lock); + } +} + +static void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req) +{ + spin_lock(&fiq->lock); + if (list_empty(&req->intr_entry)) { + list_add_tail(&req->intr_entry, &fiq->interrupts); + /* + * Pairs with smp_mb() implied by test_and_set_bit() + * from fuse_request_end(). + */ + smp_mb(); + if (test_bit(FR_FINISHED, &req->flags)) { + list_del_init(&req->intr_entry); + spin_unlock(&fiq->lock); + } else { + fuse_dev_wake_and_unlock(fiq); + } + } else { + spin_unlock(&fiq->lock); + } +} + +static void fuse_dev_queue_req(struct fuse_iqueue *fiq, struct fuse_req *req) +{ + spin_lock(&fiq->lock); + if (fiq->connected) { + if (req->in.h.opcode != FUSE_NOTIFY_REPLY) + req->in.h.unique = fuse_get_unique_locked(fiq); + list_add_tail(&req->list, &fiq->pending); + fuse_dev_wake_and_unlock(fiq); + } else { + spin_unlock(&fiq->lock); + req->out.h.error = -ENOTCONN; + fuse_request_end(req); + } +} + const struct fuse_iqueue_ops fuse_dev_fiq_ops = { - .wake_forget_and_unlock = fuse_dev_wake_and_unlock, - .wake_interrupt_and_unlock = fuse_dev_wake_and_unlock, - .wake_pending_and_unlock = fuse_dev_wake_and_unlock, + .send_forget = fuse_dev_queue_forget, + .send_interrupt = fuse_dev_queue_interrupt, + .send_req = fuse_dev_queue_req, }; EXPORT_SYMBOL_GPL(fuse_dev_fiq_ops); -static void queue_request_and_unlock(struct fuse_iqueue *fiq, - struct fuse_req *req) -__releases(fiq->lock) +static void fuse_send_one(struct fuse_iqueue *fiq, struct fuse_req *req) { req->in.h.len = sizeof(struct fuse_in_header) + fuse_len_args(req->args->in_numargs, (struct fuse_arg *) req->args->in_args); - list_add_tail(&req->list, &fiq->pending); - fiq->ops->wake_pending_and_unlock(fiq); + fiq->ops->send_req(fiq, req); } void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, @@ -243,15 +300,7 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, forget->forget_one.nodeid = nodeid; forget->forget_one.nlookup = nlookup; - spin_lock(&fiq->lock); - if (fiq->connected) { - fiq->forget_list_tail->next = forget; - fiq->forget_list_tail = forget; - fiq->ops->wake_forget_and_unlock(fiq); - } else { - kfree(forget); - spin_unlock(&fiq->lock); - } + fiq->ops->send_forget(fiq, forget); } static void flush_bg_queue(struct fuse_conn *fc) @@ -265,9 +314,7 @@ static void flush_bg_queue(struct fuse_conn *fc) req = list_first_entry(&fc->bg_queue, struct fuse_req, list); list_del(&req->list); fc->active_background++; - spin_lock(&fiq->lock); - req->in.h.unique = fuse_get_unique(fiq); - queue_request_and_unlock(fiq, req); + fuse_send_one(fiq, req); } } @@ -337,29 +384,12 @@ static int queue_interrupt(struct fuse_req *req) { struct fuse_iqueue *fiq = &req->fm->fc->iq; - spin_lock(&fiq->lock); /* Check for we've sent request to interrupt this req */ - if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) { - spin_unlock(&fiq->lock); + if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) return -EINVAL; - } - if (list_empty(&req->intr_entry)) { - list_add_tail(&req->intr_entry, &fiq->interrupts); - /* - * Pairs with smp_mb() implied by test_and_set_bit() - * from fuse_request_end(). - */ - smp_mb(); - if (test_bit(FR_FINISHED, &req->flags)) { - list_del_init(&req->intr_entry); - spin_unlock(&fiq->lock); - return 0; - } - fiq->ops->wake_interrupt_and_unlock(fiq); - } else { - spin_unlock(&fiq->lock); - } + fiq->ops->send_interrupt(fiq, req); + return 0; } @@ -414,21 +444,15 @@ static void __fuse_request_send(struct fuse_req *req) struct fuse_iqueue *fiq = &req->fm->fc->iq; BUG_ON(test_bit(FR_BACKGROUND, &req->flags)); - spin_lock(&fiq->lock); - if (!fiq->connected) { - spin_unlock(&fiq->lock); - req->out.h.error = -ENOTCONN; - } else { - req->in.h.unique = fuse_get_unique(fiq); - /* acquire extra reference, since request is still needed - after fuse_request_end() */ - __fuse_get_request(req); - queue_request_and_unlock(fiq, req); - request_wait_answer(req); - /* Pairs with smp_wmb() in fuse_request_end() */ - smp_rmb(); - } + /* acquire extra reference, since request is still needed after + fuse_request_end() */ + __fuse_get_request(req); + fuse_send_one(fiq, req); + + request_wait_answer(req); + /* Pairs with smp_wmb() in fuse_request_end() */ + smp_rmb(); } static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args) @@ -583,7 +607,6 @@ static int fuse_simple_notify_reply(struct fuse_mount *fm, { struct fuse_req *req; struct fuse_iqueue *fiq = &fm->fc->iq; - int err = 0; req = fuse_get_req(fm, false); if (IS_ERR(req)) @@ -594,16 +617,9 @@ static int fuse_simple_notify_reply(struct fuse_mount *fm, fuse_args_to_req(req, args); - spin_lock(&fiq->lock); - if (fiq->connected) { - queue_request_and_unlock(fiq, req); - } else { - err = -ENODEV; - spin_unlock(&fiq->lock); - fuse_put_request(req); - } + fuse_send_one(fiq, req); - return err; + return 0; } /* @@ -1078,9 +1094,9 @@ __releases(fiq->lock) return err ? err : reqsize; } -struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq, - unsigned int max, - unsigned int *countp) +static struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq, + unsigned int max, + unsigned int *countp) { struct fuse_forget_link *head = fiq->forget_list_head.next; struct fuse_forget_link **newhead = &head; @@ -1099,7 +1115,6 @@ struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq, return head; } -EXPORT_SYMBOL(fuse_dequeue_forget); static int fuse_read_single_forget(struct fuse_iqueue *fiq, struct fuse_copy_state *cs, @@ -1114,7 +1129,7 @@ __releases(fiq->lock) struct fuse_in_header ih = { .opcode = FUSE_FORGET, .nodeid = forget->forget_one.nodeid, - .unique = fuse_get_unique(fiq), + .unique = fuse_get_unique_locked(fiq), .len = sizeof(ih) + sizeof(arg), }; @@ -1145,7 +1160,7 @@ __releases(fiq->lock) struct fuse_batch_forget_in arg = { .count = 0 }; struct fuse_in_header ih = { .opcode = FUSE_BATCH_FORGET, - .unique = fuse_get_unique(fiq), + .unique = fuse_get_unique_locked(fiq), .len = sizeof(ih) + sizeof(arg), }; @@ -1833,7 +1848,7 @@ static void fuse_resend(struct fuse_conn *fc) } /* iq and pq requests are both oldest to newest */ list_splice(&to_queue, &fiq->pending); - fiq->ops->wake_pending_and_unlock(fiq); + fuse_dev_wake_and_unlock(fiq); } static int fuse_notify_resend(struct fuse_conn *fc) diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 554fed2c8320..776705e62ec3 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -422,22 +422,19 @@ struct fuse_iqueue; */ struct fuse_iqueue_ops { /** - * Signal that a forget has been queued + * Send one forget */ - void (*wake_forget_and_unlock)(struct fuse_iqueue *fiq) - __releases(fiq->lock); + void (*send_forget)(struct fuse_iqueue *fiq, struct fuse_forget_link *link); /** - * Signal that an INTERRUPT request has been queued + * Send interrupt for request */ - void (*wake_interrupt_and_unlock)(struct fuse_iqueue *fiq) - __releases(fiq->lock); + void (*send_interrupt)(struct fuse_iqueue *fiq, struct fuse_req *req); /** - * Signal that a request has been queued + * Send one request */ - void (*wake_pending_and_unlock)(struct fuse_iqueue *fiq) - __releases(fiq->lock); + void (*send_req)(struct fuse_iqueue *fiq, struct fuse_req *req); /** * Clean up when fuse_iqueue is destroyed @@ -1015,10 +1012,6 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, struct fuse_forget_link *fuse_alloc_forget(void); -struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq, - unsigned int max, - unsigned int *countp); - /* * Initialize READ or READDIR request */ diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index b0a52bb015ae..ac57d4ed8a45 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -1042,22 +1042,13 @@ static struct virtio_driver virtio_fs_driver = { #endif }; -static void virtio_fs_wake_forget_and_unlock(struct fuse_iqueue *fiq) -__releases(fiq->lock) +static void virtio_fs_send_forget(struct fuse_iqueue *fiq, struct fuse_forget_link *link) { - struct fuse_forget_link *link; struct virtio_fs_forget *forget; struct virtio_fs_forget_req *req; - struct virtio_fs *fs; - struct virtio_fs_vq *fsvq; - u64 unique; - - link = fuse_dequeue_forget(fiq, 1, NULL); - unique = fuse_get_unique(fiq); - - fs = fiq->priv; - fsvq = &fs->vqs[VQ_HIPRIO]; - spin_unlock(&fiq->lock); + struct virtio_fs *fs = fiq->priv; + struct virtio_fs_vq *fsvq = &fs->vqs[VQ_HIPRIO]; + u64 unique = fuse_get_unique(fiq); /* Allocate a buffer for the request */ forget = kmalloc(sizeof(*forget), GFP_NOFS | __GFP_NOFAIL); @@ -1077,8 +1068,7 @@ __releases(fiq->lock) kfree(link); } -static void virtio_fs_wake_interrupt_and_unlock(struct fuse_iqueue *fiq) -__releases(fiq->lock) +static void virtio_fs_send_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req) { /* * TODO interrupts. @@ -1087,7 +1077,6 @@ __releases(fiq->lock) * Exceptions are blocking lock operations; for example fcntl(F_SETLKW) * with shared lock between host and guest. */ - spin_unlock(&fiq->lock); } /* Count number of scatter-gather elements required */ @@ -1292,21 +1281,17 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq, return ret; } -static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq) -__releases(fiq->lock) +static void virtio_fs_send_req(struct fuse_iqueue *fiq, struct fuse_req *req) { unsigned int queue_id; struct virtio_fs *fs; - struct fuse_req *req; struct virtio_fs_vq *fsvq; int ret; - WARN_ON(list_empty(&fiq->pending)); - req = list_last_entry(&fiq->pending, struct fuse_req, list); + if (req->in.h.opcode != FUSE_NOTIFY_REPLY) + req->in.h.unique = fuse_get_unique(fiq); + clear_bit(FR_PENDING, &req->flags); - list_del_init(&req->list); - WARN_ON(!list_empty(&fiq->pending)); - spin_unlock(&fiq->lock); fs = fiq->priv; queue_id = VQ_REQUEST + fs->mq_map[raw_smp_processor_id()]; @@ -1344,10 +1329,10 @@ __releases(fiq->lock) } static const struct fuse_iqueue_ops virtio_fs_fiq_ops = { - .wake_forget_and_unlock = virtio_fs_wake_forget_and_unlock, - .wake_interrupt_and_unlock = virtio_fs_wake_interrupt_and_unlock, - .wake_pending_and_unlock = virtio_fs_wake_pending_and_unlock, - .release = virtio_fs_fiq_release, + .send_forget = virtio_fs_send_forget, + .send_interrupt = virtio_fs_send_interrupt, + .send_req = virtio_fs_send_req, + .release = virtio_fs_fiq_release, }; static inline void virtio_fs_ctx_set_defaults(struct fuse_fs_context *ctx) -- Gitee From 69ab4c5a25405564269d8f708c2d20d1e5167296 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Tue, 24 Sep 2024 10:47:23 +0200 Subject: [PATCH 1410/2138] fuse: clear FR_PENDING if abort is detected when sending request ANBZ: #11247 commit fcd2d9e1fdcd7cada612f2e8737fb13a2bce7d0e upstream. The (!fiq->connected) check was moved into the queuing method resulting in the following: Fixes: 5de8acb41c86 ("fuse: cleanup request queuing towards virtiofs") Reported-by: Lai, Yi Closes: https://lore.kernel.org/all/ZvFEAM6JfrBKsOU0@ly-workstation/ Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3931 --- fs/fuse/dev.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index a7a9c10e262f..6de23e156485 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -273,6 +273,7 @@ static void fuse_dev_queue_req(struct fuse_iqueue *fiq, struct fuse_req *req) } else { spin_unlock(&fiq->lock); req->out.h.error = -ENOTCONN; + clear_bit(FR_PENDING, &req->flags); fuse_request_end(req); } } -- Gitee From b39c9fa62636520513921af96d49718a3689f1af Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Fri, 28 Jun 2024 10:36:46 +0800 Subject: [PATCH 1411/2138] anolis: iommu/vt-d: Add support for detecting ACPI namespace device in RMRR ANBZ: #9436 As below, ZX-200 xHCI mcu is a RMRR ANDD device in some case. [060h 0096 2] Subtable Type : 0001 [Reserved Memory Region [062h 0098 2] Length : 0020 [064h 0100 2] Reserved : 0000 [066h 0102 2] PCI Segment Number : 0000 [068h 0104 8] Base Address : 00000000B5DA5000 [070h 0112 8] End Address (limit) : 00000000B5DDDFFF [078h 0120 1] Device Scope Type : 05 [Namespace Device] [079h 0121 1] Entry Length : 08 [07Ah 0122 2] Reserved : 0000 [07Ch 0124 1] Enumeration ID : 02 [07Dh 0125 1] PCI Bus Number : 09 [07Eh 0126 2] PCI Path : 12,00 iommu driver cannot find this device and build identity map for the RMRR region, DMAR faults would occur for xHCI controller. Add func dmar_acpi_bus_add_dev to find the RMRR ANDD device. Add func acpi_rmrr_andd_probe to build identity map for the RMRR region into the domain of the correspanding xHCI controller. Add func iova_reserve_domain_addr to keep away from RMRR region when using dma iova. Signed-off-by: leoliu-oc Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3629 --- drivers/iommu/dma-iommu.c | 19 ++++++++++++ drivers/iommu/intel/dmar.c | 59 ++++++++++++++++++++++++++++++++++++- drivers/iommu/intel/iommu.c | 59 +++++++++++++++++++++++++++++++++++++ drivers/iommu/iommu.c | 13 +++++++- include/linux/dmar.h | 9 ++++++ include/linux/iommu.h | 15 ++++++++++ 6 files changed, 172 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 2da969fc8990..4982f561adb1 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -478,6 +478,25 @@ static int iova_reserve_pci_windows(struct pci_dev *dev, return 0; } +int iova_reserve_domain_addr(struct iommu_domain *domain, dma_addr_t start, dma_addr_t end) +{ + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; + + unsigned long lo, hi; + + lo = iova_pfn(iovad, start); + hi = iova_pfn(iovad, end); + + if (!cookie) + return -EINVAL; + + reserve_iova(iovad, lo, hi); + + return 0; +} +EXPORT_SYMBOL_GPL(iova_reserve_domain_addr); + static int iova_reserve_iommu_regions(struct device *dev, struct iommu_domain *domain) { diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index 7a38e18b1819..e14d496a9cbd 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -767,6 +767,59 @@ static void __init dmar_acpi_insert_dev_scope(u8 device_number, device_number, dev_name(&adev->dev)); } +/* Return: > 0 if match found, 0 if no match found */ +bool dmar_rmrr_acpi_insert_dev_scope(u8 device_number, + struct acpi_device *adev, + void *start, void *end, + struct dmar_dev_scope *devices, + int devices_cnt) +{ + struct acpi_dmar_device_scope *scope; + struct device *tmp; + int i; + struct acpi_dmar_pci_path *path; + + for (; start < end; start += scope->length) { + scope = start; + if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE) + continue; + if (scope->enumeration_id != device_number) + continue; + path = (void *)(scope + 1); + pr_info("ACPI device \"%s\" under DMAR as %02x:%02x.%d\n", dev_name(&adev->dev), + scope->bus, path->device, path->function); + for_each_dev_scope(devices, devices_cnt, i, tmp) + if (tmp == NULL) { + devices[i].bus = scope->bus; + devices[i].devfn = PCI_DEVFN(path->device, path->function); + rcu_assign_pointer(devices[i].dev, get_device(&adev->dev)); + return true; + } + WARN_ON(i >= devices_cnt); + } + return false; +} + +static int dmar_acpi_bus_add_dev(u8 device_number, struct acpi_device *adev) +{ + struct dmar_drhd_unit *dmaru; + struct acpi_dmar_hardware_unit *drhd; + int ret; + + for_each_drhd_unit(dmaru) { + drhd = container_of(dmaru->hdr, struct acpi_dmar_hardware_unit, header); + ret = dmar_rmrr_acpi_insert_dev_scope(device_number, adev, (void *)(drhd+1), + ((void *)drhd)+drhd->header.length, + dmaru->devices, dmaru->devices_cnt); + if (ret) + break; + } + if (ret > 0) + ret = dmar_rmrr_add_acpi_dev(device_number, adev); + + return ret; +} + static int __init dmar_acpi_dev_scope_init(void) { struct acpi_dmar_andd *andd; @@ -794,7 +847,11 @@ static int __init dmar_acpi_dev_scope_init(void) andd->device_name); continue; } - dmar_acpi_insert_dev_scope(andd->device_number, adev); + + if (apply_zhaoxin_dmar_acpi_a_behavior()) + dmar_acpi_bus_add_dev(andd->device_number, adev); + else + dmar_acpi_insert_dev_scope(andd->device_number, adev); } } return 0; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 7f63a60337ce..6f993e34bb94 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -3480,6 +3480,24 @@ static int dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu) return ret; } +int dmar_rmrr_add_acpi_dev(u8 device_number, struct acpi_device *adev) +{ + int ret; + struct dmar_rmrr_unit *rmrru; + struct acpi_dmar_reserved_memory *rmrr; + + list_for_each_entry(rmrru, &dmar_rmrr_units, list) { + rmrr = container_of(rmrru->hdr, struct acpi_dmar_reserved_memory, header); + ret = dmar_rmrr_acpi_insert_dev_scope(device_number, adev, (void *)(rmrr + 1), + ((void *)rmrr) + rmrr->header.length, + rmrru->devices, rmrru->devices_cnt); + if (ret) + break; + } + + return 0; +} + int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) { int ret; @@ -3738,6 +3756,43 @@ static int __init platform_optin_force_iommu(void) return 1; } +static inline int acpi_rmrr_device_create_direct_mappings(struct iommu_domain *domain, + struct device *dev) +{ + int ret; + + pr_info("rmrr andd dev:%s enter to %s\n", dev_name(dev), __func__); + ret = __acpi_rmrr_device_create_direct_mappings(domain, dev); + + return ret; +} + +static inline int acpi_rmrr_andd_probe(struct device *dev) +{ + struct intel_iommu *iommu = NULL; + struct pci_dev *pci_device = NULL; + u8 bus, devfn; + int ret = 0; + + ret = iommu_probe_device(dev); + + iommu = device_to_iommu(dev, &bus, &devfn); + if (!iommu) { + pr_info("dpoint-- cannot get acpi device corresponding iommu\n"); + return -EINVAL; + } + + pci_device = pci_get_domain_bus_and_slot(iommu->segment, bus, devfn); + if (!pci_device) { + pr_info("dpoint-- cannot get acpi devie corresponding pci_device\n"); + return -EINVAL; + } + ret = acpi_rmrr_device_create_direct_mappings(iommu_get_domain_for_dev(&pci_device->dev), + dev); + + return ret; +} + static int __init probe_acpi_namespace_devices(void) { struct dmar_drhd_unit *drhd; @@ -3760,6 +3815,10 @@ static int __init probe_acpi_namespace_devices(void) list_for_each_entry(pn, &adev->physical_node_list, node) { ret = iommu_probe_device(pn->dev); + + if (apply_zhaoxin_dmar_acpi_a_behavior()) + ret = acpi_rmrr_andd_probe(dev); + if (ret) break; } diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 3f1029c0825e..1bb7a4a39d28 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1103,7 +1103,8 @@ static int iommu_create_device_direct_mappings(struct iommu_domain *domain, map_size = 0; } } - + if (apply_zhaoxin_dmar_acpi_a_behavior()) + iova_reserve_domain_addr(domain, start, end); } if (!list_empty(&mappings) && iommu_is_dma_domain(domain)) @@ -1171,6 +1172,16 @@ static struct group_device *iommu_group_alloc_device(struct iommu_group *group, return ERR_PTR(ret); } +int __acpi_rmrr_device_create_direct_mappings(struct iommu_domain *domain, struct device *dev) +{ + int ret; + + ret = iommu_create_device_direct_mappings(domain, dev); + + return ret; +} +EXPORT_SYMBOL_GPL(__acpi_rmrr_device_create_direct_mappings); + /** * iommu_group_add_device - add a device to an iommu group * @group: the group into which to add the device (reference should be held) diff --git a/include/linux/dmar.h b/include/linux/dmar.h index e34b601b71fd..543c53e84a70 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -112,6 +112,9 @@ extern int dmar_insert_dev_scope(struct dmar_pci_notify_info *info, void *start, void*end, u16 segment, struct dmar_dev_scope *devices, int devices_cnt); +extern bool dmar_rmrr_acpi_insert_dev_scope(u8 device_number, + struct acpi_device *adev, void *start, void *end, + struct dmar_dev_scope *devices, int devices_cnt); extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment, struct dmar_dev_scope *devices, int count); @@ -144,6 +147,7 @@ extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg); extern int dmar_parse_one_satc(struct acpi_dmar_header *hdr, void *arg); extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg); extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert); +extern int dmar_rmrr_add_acpi_dev(u8 device_number, struct acpi_device *adev); extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info); #else /* !CONFIG_INTEL_IOMMU: */ static inline int intel_iommu_init(void) { return -ENODEV; } @@ -155,6 +159,11 @@ static inline void intel_iommu_shutdown(void) { } #define dmar_release_one_atsr dmar_res_noop #define dmar_parse_one_satc dmar_res_noop +static inline int dmar_rmrr_add_acpi_dev(u8 device_number, struct acpi_device *adev) +{ + return 0; +} + static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) { return 0; diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 2ff402412f8d..eb86d5b80110 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -601,6 +601,21 @@ void iommu_set_dma_strict(void); extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags); +static inline bool apply_zhaoxin_dmar_acpi_a_behavior(void) +{ +#if defined(CONFIG_CPU_SUP_ZHAOXIN) || defined(CONFIG_CPU_SUP_CENTAUR) + if (((boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) || + (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN)) && + ((boot_cpu_data.x86 == 7) && (boot_cpu_data.x86_model == 0x3b))) + return true; +#endif + return false; +} + +extern int iova_reserve_domain_addr(struct iommu_domain *domain, dma_addr_t start, dma_addr_t end); + +int __acpi_rmrr_device_create_direct_mappings(struct iommu_domain *domain, struct device *dev); + static inline void iommu_flush_iotlb_all(struct iommu_domain *domain) { if (domain->ops->flush_iotlb_all) -- Gitee From 1ebddf2760d75c52e04407eade7e18050ca1cbe5 Mon Sep 17 00:00:00 2001 From: Cruz Zhao Date: Fri, 13 Oct 2023 15:16:06 +0800 Subject: [PATCH 1412/2138] anolis: watchdog: enlarge watchdog_thresh limit ANBZ: #6805 During the stress test, there're always false alarm of soft lockup, because of long loop, instead of true lockup. So we need to enlarge watchdog_thresh limit to 150 to void this kind of case. Signed-off-by: Cruz Zhao Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/2286 Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/3914 --- kernel/watchdog.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 68c0b1d8e467..616b21b4ca6a 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -853,7 +853,7 @@ int proc_watchdog_cpumask(struct ctl_table *table, int write, return err; } -static const int sixty = 60; +static int one_hundred_fifty = 150; static struct ctl_table watchdog_sysctls[] = { { @@ -872,7 +872,7 @@ static struct ctl_table watchdog_sysctls[] = { .mode = 0644, .proc_handler = proc_watchdog_thresh, .extra1 = SYSCTL_ZERO, - .extra2 = (void *)&sixty, + .extra2 = &one_hundred_fifty, }, { .procname = "watchdog_cpumask", -- Gitee From a2c47dbfe55a1e41d88c9021c82ac39820e674f0 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 28 Feb 2024 18:29:37 +0000 Subject: [PATCH 1413/2138] fuse: Remove fuse_writepage ANBZ: #11250 commit e1c420ac9968f40cc266ec648cce12fa55c891db upstream. The writepage operation is deprecated as it leads to worse performance under high memory pressure due to folios being written out in LRU order rather than sequentially within a file. Use filemap_migrate_folio() to support dirty folio migration instead of writepage. Signed-off-by: "Matthew Wilcox (Oracle)" Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3936 --- fs/fuse/file.c | 30 +----------------------------- 1 file changed, 1 insertion(+), 29 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index ef5a16c267b2..1671ec654410 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -2079,34 +2079,6 @@ static int fuse_writepage_locked(struct page *page) return error; } -static int fuse_writepage(struct page *page, struct writeback_control *wbc) -{ - struct fuse_conn *fc = get_fuse_conn(page->mapping->host); - int err; - - if (fuse_page_is_writeback(page->mapping->host, page->index)) { - /* - * ->writepages() should be called for sync() and friends. We - * should only get here on direct reclaim and then we are - * allowed to skip a page which is already in flight - */ - WARN_ON(wbc->sync_mode == WB_SYNC_ALL); - - redirty_page_for_writepage(wbc, page); - unlock_page(page); - return 0; - } - - if (wbc->sync_mode == WB_SYNC_NONE && - fc->num_background >= fc->congestion_threshold) - return AOP_WRITEPAGE_ACTIVATE; - - err = fuse_writepage_locked(page); - unlock_page(page); - - return err; -} - struct fuse_fill_wb_data { struct fuse_writepage_args *wpa; struct fuse_file *ff; @@ -3311,10 +3283,10 @@ static const struct file_operations fuse_file_operations = { static const struct address_space_operations fuse_file_aops = { .read_folio = fuse_read_folio, .readahead = fuse_readahead, - .writepage = fuse_writepage, .writepages = fuse_writepages, .launder_folio = fuse_launder_folio, .dirty_folio = filemap_dirty_folio, + .migrate_folio = filemap_migrate_folio, .bmap = fuse_bmap, .direct_IO = fuse_direct_IO, .write_begin = fuse_write_begin, -- Gitee From e7881111fdc68209e20b8fed3ba24fd8543f70ae Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 28 Feb 2024 18:29:38 +0000 Subject: [PATCH 1414/2138] fuse: Convert fuse_writepage_locked to take a folio ANBZ: #11250 commit e0887e095a803d238bd3e2b280baa4c5e70c650c upstream. The one remaining caller of fuse_writepage_locked() already has a folio, so convert this function entirely. Saves a few calls to compound_head() but no attempt is made to support large folios in this patch. Signed-off-by: "Matthew Wilcox (Oracle)" Signed-off-by: Miklos Szeredi [jingbo: export folio_copy()] Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3936 --- fs/fuse/file.c | 30 +++++++++++++++--------------- mm/util.c | 1 + 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 1671ec654410..6fef2b09c632 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -2015,26 +2015,26 @@ static void fuse_writepage_add_to_bucket(struct fuse_conn *fc, rcu_read_unlock(); } -static int fuse_writepage_locked(struct page *page) +static int fuse_writepage_locked(struct folio *folio) { - struct address_space *mapping = page->mapping; + struct address_space *mapping = folio->mapping; struct inode *inode = mapping->host; struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_writepage_args *wpa; struct fuse_args_pages *ap; - struct page *tmp_page; + struct folio *tmp_folio; int error = -ENOMEM; - set_page_writeback(page); + folio_start_writeback(folio); wpa = fuse_writepage_args_alloc(); if (!wpa) goto err; ap = &wpa->ia.ap; - tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); - if (!tmp_page) + tmp_folio = folio_alloc(GFP_NOFS | __GFP_HIGHMEM, 0); + if (!tmp_folio) goto err_free; error = -EIO; @@ -2043,21 +2043,21 @@ static int fuse_writepage_locked(struct page *page) goto err_nofile; fuse_writepage_add_to_bucket(fc, wpa); - fuse_write_args_fill(&wpa->ia, wpa->ia.ff, page_offset(page), 0); + fuse_write_args_fill(&wpa->ia, wpa->ia.ff, folio_pos(folio), 0); - copy_highpage(tmp_page, page); + folio_copy(tmp_folio, folio); wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE; wpa->next = NULL; ap->args.in_pages = true; ap->num_pages = 1; - ap->pages[0] = tmp_page; + ap->pages[0] = &tmp_folio->page; ap->descs[0].offset = 0; ap->descs[0].length = PAGE_SIZE; ap->args.end = fuse_writepage_end; wpa->inode = inode; inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); - inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP); + node_stat_add_folio(tmp_folio, NR_WRITEBACK_TEMP); spin_lock(&fi->lock); tree_insert(&fi->writepages, wpa); @@ -2065,17 +2065,17 @@ static int fuse_writepage_locked(struct page *page) fuse_flush_writepages(inode); spin_unlock(&fi->lock); - end_page_writeback(page); + folio_end_writeback(folio); return 0; err_nofile: - __free_page(tmp_page); + folio_put(tmp_folio); err_free: kfree(wpa); err: - mapping_set_error(page->mapping, error); - end_page_writeback(page); + mapping_set_error(folio->mapping, error); + folio_end_writeback(folio); return error; } @@ -2441,7 +2441,7 @@ static int fuse_launder_folio(struct folio *folio) /* Serialize with pending writeback for the same page */ fuse_wait_on_page_writeback(inode, folio->index); - err = fuse_writepage_locked(&folio->page); + err = fuse_writepage_locked(folio); if (!err) fuse_wait_on_page_writeback(inode, folio->index); } diff --git a/mm/util.c b/mm/util.c index 08d494896552..2f5c912cc0a2 100644 --- a/mm/util.c +++ b/mm/util.c @@ -809,6 +809,7 @@ void folio_copy(struct folio *dst, struct folio *src) cond_resched(); } } +EXPORT_SYMBOL(folio_copy); int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; int sysctl_overcommit_ratio __read_mostly = 50; -- Gitee From 48232247a6189672f7d71530ea20c5270154fcab Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Mon, 26 Aug 2024 14:19:02 -0700 Subject: [PATCH 1415/2138] fuse: drop unused fuse_mount arg in fuse_writepage_finish() ANBZ: #11250 commit 509a6458b44f72bb6854854c89cf76e56f11c9f1 upstream. Drop the unused "struct fuse_mount *fm" arg in fuse_writepage_finish(). No functional changes added. Signed-off-by: Joanne Koong Reviewed-by: Jingbo Xu Reviewed-by: Josef Bacik Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3936 --- fs/fuse/file.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 6fef2b09c632..0b8062e61f59 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1735,8 +1735,7 @@ static void fuse_writepage_free(struct fuse_writepage_args *wpa) kfree(wpa); } -static void fuse_writepage_finish(struct fuse_mount *fm, - struct fuse_writepage_args *wpa) +static void fuse_writepage_finish(struct fuse_writepage_args *wpa) { struct fuse_args_pages *ap = &wpa->ia.ap; struct inode *inode = wpa->inode; @@ -1795,7 +1794,7 @@ __acquires(fi->lock) out_free: fi->writectr--; rb_erase(&wpa->writepages_entry, &fi->writepages); - fuse_writepage_finish(fm, wpa); + fuse_writepage_finish(wpa); spin_unlock(&fi->lock); /* After rb_erase() aux request list is private */ @@ -1931,7 +1930,7 @@ static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args, fuse_send_writepage(fm, next, inarg->offset + inarg->size); } fi->writectr--; - fuse_writepage_finish(fm, wpa); + fuse_writepage_finish(wpa); spin_unlock(&fi->lock); fuse_writepage_free(wpa); } -- Gitee From 366f4c01b8537312c3c40d2c73f4df47a3e4c948 Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Mon, 26 Aug 2024 14:19:03 -0700 Subject: [PATCH 1416/2138] fuse: refactor finished writeback stats updates into helper function ANBZ: #11250 commit c04e3b2118192384153b4eac595768e2ffb7ac4a upstream. Move the logic for updating the bdi and page stats for a finished writeback into a separate helper function, where it can be called from both fuse_writepage_finish() and fuse_writepage_add() (in the case where there is already an auxiliary write request for the page). No functional changes added. Suggested by: Jingbo Xu Signed-off-by: Joanne Koong Reviewed-by: Josef Bacik Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3936 --- fs/fuse/file.c | 31 ++++++++++++++----------------- 1 file changed, 14 insertions(+), 17 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 0b8062e61f59..426920523d25 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1735,19 +1735,25 @@ static void fuse_writepage_free(struct fuse_writepage_args *wpa) kfree(wpa); } +static void fuse_writepage_finish_stat(struct inode *inode, struct page *page) +{ + struct backing_dev_info *bdi = inode_to_bdi(inode); + + dec_wb_stat(&bdi->wb, WB_WRITEBACK); + dec_node_page_state(page, NR_WRITEBACK_TEMP); + wb_writeout_inc(&bdi->wb); +} + static void fuse_writepage_finish(struct fuse_writepage_args *wpa) { struct fuse_args_pages *ap = &wpa->ia.ap; struct inode *inode = wpa->inode; struct fuse_inode *fi = get_fuse_inode(inode); - struct backing_dev_info *bdi = inode_to_bdi(inode); int i; - for (i = 0; i < ap->num_pages; i++) { - dec_wb_stat(&bdi->wb, WB_WRITEBACK); - dec_node_page_state(ap->pages[i], NR_WRITEBACK_TEMP); - wb_writeout_inc(&bdi->wb); - } + for (i = 0; i < ap->num_pages; i++) + fuse_writepage_finish_stat(inode, ap->pages[i]); + wake_up(&fi->page_waitq); } @@ -1799,14 +1805,9 @@ __acquires(fi->lock) /* After rb_erase() aux request list is private */ for (aux = wpa->next; aux; aux = next) { - struct backing_dev_info *bdi = inode_to_bdi(aux->inode); - next = aux->next; aux->next = NULL; - - dec_wb_stat(&bdi->wb, WB_WRITEBACK); - dec_node_page_state(aux->ia.ap.pages[0], NR_WRITEBACK_TEMP); - wb_writeout_inc(&bdi->wb); + fuse_writepage_finish_stat(aux->inode, aux->ia.ap.pages[0]); fuse_writepage_free(aux); } @@ -2175,11 +2176,7 @@ static bool fuse_writepage_add(struct fuse_writepage_args *new_wpa, spin_unlock(&fi->lock); if (tmp) { - struct backing_dev_info *bdi = inode_to_bdi(new_wpa->inode); - - dec_wb_stat(&bdi->wb, WB_WRITEBACK); - dec_node_page_state(new_ap->pages[0], NR_WRITEBACK_TEMP); - wb_writeout_inc(&bdi->wb); + fuse_writepage_finish_stat(new_wpa->inode, new_ap->pages[0]); fuse_writepage_free(new_wpa); } -- Gitee From 1cb29742dce56500dc9a5cfa959a3449e705d761 Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Mon, 26 Aug 2024 14:19:05 -0700 Subject: [PATCH 1417/2138] fuse: move initialization of fuse_file to fuse_writepages() instead of in callback ANBZ: #11250 commit 672c3b7457fcee9656c36a29a4b21ec4a652433e upstream. Prior to this change, data->ff is checked and if not initialized then initialized in the fuse_writepages_fill() callback, which gets called for every dirty page in the address space mapping. This logic is better placed in the main fuse_writepages() caller where data.ff is initialized before walking the dirty pages. No functional changes added. Signed-off-by: Joanne Koong Reviewed-by: Josef Bacik Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3936 --- fs/fuse/file.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 426920523d25..f2c59bd3a485 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -2229,13 +2229,6 @@ static int fuse_writepages_fill(struct folio *folio, struct page *tmp_page; int err; - if (!data->ff) { - err = -EIO; - data->ff = fuse_write_file_get(fi); - if (!data->ff) - goto out_unlock; - } - if (wpa && fuse_writepage_need_send(fc, &folio->page, ap, data)) { fuse_writepages_send(data); data->wpa = NULL; @@ -2314,13 +2307,13 @@ static int fuse_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct inode *inode = mapping->host; + struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_fill_wb_data data; int err; - err = -EIO; if (fuse_is_bad(inode)) - goto out; + return -EIO; if (wbc->sync_mode == WB_SYNC_NONE && fc->num_background >= fc->congestion_threshold) @@ -2328,7 +2321,9 @@ static int fuse_writepages(struct address_space *mapping, data.inode = inode; data.wpa = NULL; - data.ff = NULL; + data.ff = fuse_write_file_get(fi); + if (!data.ff) + return -EIO; err = -ENOMEM; data.orig_pages = kcalloc(fc->max_pages, @@ -2342,11 +2337,10 @@ static int fuse_writepages(struct address_space *mapping, WARN_ON(!data.wpa->ia.ap.num_pages); fuse_writepages_send(&data); } - if (data.ff) - fuse_file_put(data.ff, false); kfree(data.orig_pages); out: + fuse_file_put(data.ff, false); return err; } -- Gitee From 6948a8aaeae8f90d14395297e9baab71e825ae49 Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Mon, 26 Aug 2024 14:19:06 -0700 Subject: [PATCH 1418/2138] fuse: convert fuse_writepages_fill() to use a folio for its tmp page ANBZ: #11250 commit 9a8ebcf5e04e6cc9472bfcdd90b2aeef35a2f8f6 upstream. To pave the way for refactoring out the shared logic in fuse_writepages_fill() and fuse_writepage_locked(), this change converts the temporary page in fuse_writepages_fill() to use the folio API. This is similar to the change in commit e0887e095a80 ("fuse: Convert fuse_writepage_locked to take a folio"), which converted the tmp page in fuse_writepage_locked() to use the folio API. inc_node_page_state() is intentionally preserved here instead of converting to node_stat_add_folio() since it is updating the stat of the underlying page and to better maintain API symmetry with dec_node_page_stat() in fuse_writepage_finish_stat(). No functional changes added. Signed-off-by: Joanne Koong Reviewed-by: Josef Bacik Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3936 --- fs/fuse/file.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index f2c59bd3a485..dd0e8dee19fb 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -2226,7 +2226,7 @@ static int fuse_writepages_fill(struct folio *folio, struct inode *inode = data->inode; struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_conn *fc = get_fuse_conn(inode); - struct page *tmp_page; + struct folio *tmp_folio; int err; if (wpa && fuse_writepage_need_send(fc, &folio->page, ap, data)) { @@ -2235,8 +2235,8 @@ static int fuse_writepages_fill(struct folio *folio, } err = -ENOMEM; - tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); - if (!tmp_page) + tmp_folio = folio_alloc(GFP_NOFS | __GFP_HIGHMEM, 0); + if (!tmp_folio) goto out_unlock; /* @@ -2256,7 +2256,7 @@ static int fuse_writepages_fill(struct folio *folio, err = -ENOMEM; wpa = fuse_writepage_args_alloc(); if (!wpa) { - __free_page(tmp_page); + folio_put(tmp_folio); goto out_unlock; } fuse_writepage_add_to_bucket(fc, wpa); @@ -2274,14 +2274,14 @@ static int fuse_writepages_fill(struct folio *folio, } folio_start_writeback(folio); - copy_highpage(tmp_page, &folio->page); - ap->pages[ap->num_pages] = tmp_page; + folio_copy(tmp_folio, folio); + ap->pages[ap->num_pages] = &tmp_folio->page; ap->descs[ap->num_pages].offset = 0; ap->descs[ap->num_pages].length = PAGE_SIZE; data->orig_pages[ap->num_pages] = &folio->page; inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); - inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP); + inc_node_page_state(&tmp_folio->page, NR_WRITEBACK_TEMP); err = 0; if (data->wpa) { -- Gitee From a9713634602bb5ce455479b1c554cc003cc59180 Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Mon, 26 Aug 2024 14:19:07 -0700 Subject: [PATCH 1419/2138] fuse: move fuse file initialization to wpa allocation time ANBZ: #11250 commit 4046d3adcca42b7678f11c71e46bd32bafb4dad1 upstream. Before this change, wpa->ia.ff is initialized with an acquired reference on the fuse file right before it submits the writeback request. If there are auxiliary writebacks, then the initialization and reference acquisition needs to also be set before we submit the auxiliary writeback request. To make the logic simpler and to pave the way for a subsequent refactoring of fuse_writepages_fill() and fuse_writepage_locked(), this change initializes and acquires wpa->ia.ff when the wpa is allocated. No functional changes added. Signed-off-by: Joanne Koong Reviewed-by: Josef Bacik Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3936 --- fs/fuse/file.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index dd0e8dee19fb..2ddc543e4b25 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1728,8 +1728,7 @@ static void fuse_writepage_free(struct fuse_writepage_args *wpa) for (i = 0; i < ap->num_pages; i++) __free_page(ap->pages[i]); - if (wpa->ia.ff) - fuse_file_put(wpa->ia.ff, false); + fuse_file_put(wpa->ia.ff, false); kfree(ap->pages); kfree(wpa); @@ -1902,7 +1901,6 @@ static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args, wpa->next = next->next; next->next = NULL; - next->ia.ff = fuse_file_get(wpa->ia.ff); tree_insert(&fi->writepages, next); /* @@ -2121,7 +2119,6 @@ static void fuse_writepages_send(struct fuse_fill_wb_data *data) int num_pages = wpa->ia.ap.num_pages; int i; - wpa->ia.ff = fuse_file_get(data->ff); spin_lock(&fi->lock); list_add_tail(&wpa->queue_entry, &fi->queued_writes); fuse_flush_writepages(inode); @@ -2266,6 +2263,7 @@ static int fuse_writepages_fill(struct folio *folio, ap = &wpa->ia.ap; fuse_write_args_fill(&wpa->ia, data->ff, folio_pos(folio), 0); wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE; + wpa->ia.ff = fuse_file_get(data->ff); wpa->next = NULL; ap->args.in_pages = true; ap->args.end = fuse_writepage_end; -- Gitee From ba624dfc4ca4f6eff04f2ad5c229d151015ee3bf Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Mon, 26 Aug 2024 14:19:08 -0700 Subject: [PATCH 1420/2138] fuse: refactor out shared logic in fuse_writepages_fill() and fuse_writepage_locked() ANBZ: #11250 commit 0acad9289be33d324537d6c51988be0541b1139d upstream. This change refactors the shared logic in fuse_writepages_fill() and fuse_writepages_locked() into two separate helper functions, fuse_writepage_args_page_fill() and fuse_writepage_args_setup(). No functional changes added. Signed-off-by: Joanne Koong Reviewed-by: Josef Bacik Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3936 --- fs/fuse/file.c | 103 +++++++++++++++++++++++++++---------------------- 1 file changed, 57 insertions(+), 46 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 2ddc543e4b25..9dd011f02796 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -2013,49 +2013,77 @@ static void fuse_writepage_add_to_bucket(struct fuse_conn *fc, rcu_read_unlock(); } +static void fuse_writepage_args_page_fill(struct fuse_writepage_args *wpa, struct folio *folio, + struct folio *tmp_folio, uint32_t page_index) +{ + struct inode *inode = folio->mapping->host; + struct fuse_args_pages *ap = &wpa->ia.ap; + + folio_copy(tmp_folio, folio); + + ap->pages[page_index] = &tmp_folio->page; + ap->descs[page_index].offset = 0; + ap->descs[page_index].length = PAGE_SIZE; + + inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); + inc_node_page_state(&tmp_folio->page, NR_WRITEBACK_TEMP); +} + +static struct fuse_writepage_args *fuse_writepage_args_setup(struct folio *folio, + struct fuse_file *ff) +{ + struct inode *inode = folio->mapping->host; + struct fuse_conn *fc = get_fuse_conn(inode); + struct fuse_writepage_args *wpa; + struct fuse_args_pages *ap; + + wpa = fuse_writepage_args_alloc(); + if (!wpa) + return NULL; + + fuse_writepage_add_to_bucket(fc, wpa); + fuse_write_args_fill(&wpa->ia, ff, folio_pos(folio), 0); + wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE; + wpa->inode = inode; + wpa->ia.ff = ff; + + ap = &wpa->ia.ap; + ap->args.in_pages = true; + ap->args.end = fuse_writepage_end; + + return wpa; +} + static int fuse_writepage_locked(struct folio *folio) { struct address_space *mapping = folio->mapping; struct inode *inode = mapping->host; - struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_writepage_args *wpa; struct fuse_args_pages *ap; struct folio *tmp_folio; + struct fuse_file *ff; int error = -ENOMEM; - folio_start_writeback(folio); - - wpa = fuse_writepage_args_alloc(); - if (!wpa) - goto err; - ap = &wpa->ia.ap; - tmp_folio = folio_alloc(GFP_NOFS | __GFP_HIGHMEM, 0); if (!tmp_folio) - goto err_free; + goto err; error = -EIO; - wpa->ia.ff = fuse_write_file_get(fi); - if (!wpa->ia.ff) + ff = fuse_write_file_get(fi); + if (!ff) goto err_nofile; - fuse_writepage_add_to_bucket(fc, wpa); - fuse_write_args_fill(&wpa->ia, wpa->ia.ff, folio_pos(folio), 0); + wpa = fuse_writepage_args_setup(folio, ff); + error = -ENOMEM; + if (!wpa) + goto err_writepage_args; - folio_copy(tmp_folio, folio); - wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE; - wpa->next = NULL; - ap->args.in_pages = true; + ap = &wpa->ia.ap; ap->num_pages = 1; - ap->pages[0] = &tmp_folio->page; - ap->descs[0].offset = 0; - ap->descs[0].length = PAGE_SIZE; - ap->args.end = fuse_writepage_end; - wpa->inode = inode; - inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); - node_stat_add_folio(tmp_folio, NR_WRITEBACK_TEMP); + folio_start_writeback(folio); + fuse_writepage_args_page_fill(wpa, folio, tmp_folio, 0); spin_lock(&fi->lock); tree_insert(&fi->writepages, wpa); @@ -2067,13 +2095,12 @@ static int fuse_writepage_locked(struct folio *folio) return 0; +err_writepage_args: + fuse_file_put(ff, false); err_nofile: folio_put(tmp_folio); -err_free: - kfree(wpa); err: mapping_set_error(folio->mapping, error); - folio_end_writeback(folio); return error; } @@ -2251,36 +2278,20 @@ static int fuse_writepages_fill(struct folio *folio, */ if (data->wpa == NULL) { err = -ENOMEM; - wpa = fuse_writepage_args_alloc(); + wpa = fuse_writepage_args_setup(folio, data->ff); if (!wpa) { folio_put(tmp_folio); goto out_unlock; } - fuse_writepage_add_to_bucket(fc, wpa); - + fuse_file_get(wpa->ia.ff); data->max_pages = 1; - ap = &wpa->ia.ap; - fuse_write_args_fill(&wpa->ia, data->ff, folio_pos(folio), 0); - wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE; - wpa->ia.ff = fuse_file_get(data->ff); - wpa->next = NULL; - ap->args.in_pages = true; - ap->args.end = fuse_writepage_end; - ap->num_pages = 0; - wpa->inode = inode; } folio_start_writeback(folio); - folio_copy(tmp_folio, folio); - ap->pages[ap->num_pages] = &tmp_folio->page; - ap->descs[ap->num_pages].offset = 0; - ap->descs[ap->num_pages].length = PAGE_SIZE; + fuse_writepage_args_page_fill(wpa, folio, tmp_folio, ap->num_pages); data->orig_pages[ap->num_pages] = &folio->page; - inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); - inc_node_page_state(&tmp_folio->page, NR_WRITEBACK_TEMP); - err = 0; if (data->wpa) { /* -- Gitee From 981b55ea295701604e35586d4c234cd03ee795b5 Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Thu, 26 Sep 2024 11:02:22 +0800 Subject: [PATCH 1421/2138] anolis: spec: ensure vmlinux's BTF section unchanged ANBZ: #11097 The kernel building has 2 steps: 1. make bzImage. It generates BTF sections for vmlinuz and vmlinux, and the sections in vmlinuz has been compressed, and won't be modified later. 2. make modules. It generates BTF sections for modules, and it depends on the vmlinux's BTF. Usually, the vmlinux won't be recompiled in step 2, but some makefile bugs may trigger vmlinux recompile, caused vmlinux's BTF sections regenerated, which is not same as vmlinuz's BTF. Because module's BTF is bases on regenerated vmlinux's BTF, the module's BTF will mismatch to the vmlinuz's BTF, cause module loading failed, which looks like: BPF: [119013] STRUCT BPF: size=64 vlen=5 BPF: BPF: Invalid name BPF: failed to validate module [libata] BTF: -22 This commit checks vmlinux relink actions, and ensure vmlinuz's BTF section unchanged. Since extract btf section from arm64 Image.bz is too difficult, we check for x86 only. Signed-off-by: Qiao Ma Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3905 --- anolis/rpm/kernel.spec.template | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/anolis/rpm/kernel.spec.template b/anolis/rpm/kernel.spec.template index 89c843aa78c2..f8eb18ee9952 100644 --- a/anolis/rpm/kernel.spec.template +++ b/anolis/rpm/kernel.spec.template @@ -199,6 +199,9 @@ BuildRequires: bzip2, xz, findutils, gzip, m4, perl-interpreter, perl-Carp, perl BuildRequires: gcc, binutils, system-rpm-config, hmaccalc, python3-devel BuildRequires: net-tools, hostname, bc, bison, flex, elfutils-devel, dwarves BuildRequires: libnl3-devel +%ifarch x86_64 +BuildRequires: llvm +%endif %if %{with_doc} BuildRequires: xmlto, asciidoc, python3-sphinx %endif @@ -1040,6 +1043,20 @@ BuildKernel() { # build a BLS config for this kernel %{SOURCE43} "$KernelVer" "$RPM_BUILD_ROOT" "%{?variant}" + +%ifarch x86_64 + # ensure vmlinuz' btf section is same as vmlinux + # since extract btf section from arm64 Image.bz is too difficult, + # we check for x86 only. + ./scripts/extract-vmlinux $KernelImage > tmp_vmlinux + llvm-objcopy --dump-section=.BTF=tmp_btf_vmlinuz tmp_vmlinux + llvm-objcopy --dump-section=.BTF=tmp_btf_vmlinux vmlinux + if ! cmp tmp_btf_vmlinuz tmp_btf_vmlinux ; then + echo "detected BTF section in vmlinuz is not same as vmlinux !!!" + exit 1 + fi + rm -f tmp_btf_vmlinuz tmp_btf_vmlinux tmp_vmlinux +%endif } ### -- Gitee From e7196e1c3499089efab88b6a15d956bb1324f8b3 Mon Sep 17 00:00:00 2001 From: yangyun Date: Wed, 14 Aug 2024 17:36:00 +0800 Subject: [PATCH 1422/2138] fuse: add fast path for fuse_range_is_writeback ANBZ: #11284 commit ac5cffec53be0b0231b89470a357bd3a5814f599 upstream. In some cases, the fi->writepages may be empty. And there is no need to check fi->writepages with spin_lock, which may have an impact on performance due to lock contention. For example, in scenarios where multiple readers read the same file without any writers, or where the page cache is not enabled. Also remove the outdated comment since commit 6b2fb79963fb ("fuse: optimize writepages search") has optimize the situation by replacing list with rb-tree. Signed-off-by: yangyun Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3953 --- fs/fuse/file.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 9dd011f02796..483611f1821d 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -449,9 +449,6 @@ static struct fuse_writepage_args *fuse_find_writeback(struct fuse_inode *fi, /* * Check if any page in a range is under writeback - * - * This is currently done by walking the list of writepage requests - * for the inode, which can be pretty inefficient. */ static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from, pgoff_t idx_to) @@ -459,6 +456,9 @@ static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from, struct fuse_inode *fi = get_fuse_inode(inode); bool found; + if (RB_EMPTY_ROOT(&fi->writepages)) + return false; + spin_lock(&fi->lock); found = fuse_find_writeback(fi, idx_from, idx_to); spin_unlock(&fi->lock); -- Gitee From 65c91eb6670f68252f52d9f2701128e55a60ca94 Mon Sep 17 00:00:00 2001 From: Aurelien Aptel Date: Fri, 17 May 2024 16:10:28 +0000 Subject: [PATCH 1423/2138] fuse: use correct name fuse_conn_list in docstring ANBZ: #11284 commit 506b21c945b9716a1e092189c260d9400c52fa14 upstream. fuse_mount_list doesn't exist, use fuse_conn_list. Signed-off-by: Aurelien Aptel Reviewed-by: Bernd Schubert Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3953 --- fs/fuse/fuse_i.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 776705e62ec3..c57c5132d0ab 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -833,7 +833,7 @@ struct fuse_conn { /** Negotiated minor version */ unsigned minor; - /** Entry on the fuse_mount_list */ + /** Entry on the fuse_conn_list */ struct list_head entry; /** Device ID from the root super block */ -- Gitee From 06ef083723125ee1bc9f5fd2e01f76f3852ca8de Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 3 Jul 2024 10:38:42 -0400 Subject: [PATCH 1424/2138] fuse: add simple request tracepoints ANBZ: #11284 commit 396b209e405a571ce8e06d3760ffc3e389a944f1 upstream. I've been timing various fuse operations and it's quite annoying to do with kprobes. Add two tracepoints for sending and ending fuse requests to make it easier to debug and time various operations. Signed-off-by: Josef Bacik Reviewed-by: Bernd Schubert Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3953 --- fs/fuse/Makefile | 3 + fs/fuse/dev.c | 5 ++ fs/fuse/fuse_trace.h | 132 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 140 insertions(+) create mode 100644 fs/fuse/fuse_trace.h diff --git a/fs/fuse/Makefile b/fs/fuse/Makefile index d2bad0c85d8c..2b5cf7bc5b58 100644 --- a/fs/fuse/Makefile +++ b/fs/fuse/Makefile @@ -3,6 +3,9 @@ # Makefile for the FUSE filesystem. # +# Needed for trace events +ccflags-y = -I$(src) + obj-$(CONFIG_FUSE_FS) += fuse.o obj-$(CONFIG_CUSE) += cuse.o obj-$(CONFIG_VIRTIO_FS) += virtiofs.o diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 6de23e156485..1067d322fda0 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -22,6 +22,9 @@ #include #include +#define CREATE_TRACE_POINTS +#include "fuse_trace.h" + MODULE_ALIAS_MISCDEV(FUSE_MINOR); MODULE_ALIAS("devname:fuse"); @@ -290,6 +293,7 @@ static void fuse_send_one(struct fuse_iqueue *fiq, struct fuse_req *req) req->in.h.len = sizeof(struct fuse_in_header) + fuse_len_args(req->args->in_numargs, (struct fuse_arg *) req->args->in_args); + trace_fuse_request_send(req); fiq->ops->send_req(fiq, req); } @@ -336,6 +340,7 @@ void fuse_request_end(struct fuse_req *req) if (test_and_set_bit(FR_FINISHED, &req->flags)) goto put_request; + trace_fuse_request_end(req); /* * test_and_set_bit() implies smp_mb() between bit * changing and below FR_INTERRUPTED check. Pairs with diff --git a/fs/fuse/fuse_trace.h b/fs/fuse/fuse_trace.h new file mode 100644 index 000000000000..bbe9ddd8c716 --- /dev/null +++ b/fs/fuse/fuse_trace.h @@ -0,0 +1,132 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM fuse + +#if !defined(_TRACE_FUSE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_FUSE_H + +#include + +#define OPCODES \ + EM( FUSE_LOOKUP, "FUSE_LOOKUP") \ + EM( FUSE_FORGET, "FUSE_FORGET") \ + EM( FUSE_GETATTR, "FUSE_GETATTR") \ + EM( FUSE_SETATTR, "FUSE_SETATTR") \ + EM( FUSE_READLINK, "FUSE_READLINK") \ + EM( FUSE_SYMLINK, "FUSE_SYMLINK") \ + EM( FUSE_MKNOD, "FUSE_MKNOD") \ + EM( FUSE_MKDIR, "FUSE_MKDIR") \ + EM( FUSE_UNLINK, "FUSE_UNLINK") \ + EM( FUSE_RMDIR, "FUSE_RMDIR") \ + EM( FUSE_RENAME, "FUSE_RENAME") \ + EM( FUSE_LINK, "FUSE_LINK") \ + EM( FUSE_OPEN, "FUSE_OPEN") \ + EM( FUSE_READ, "FUSE_READ") \ + EM( FUSE_WRITE, "FUSE_WRITE") \ + EM( FUSE_STATFS, "FUSE_STATFS") \ + EM( FUSE_RELEASE, "FUSE_RELEASE") \ + EM( FUSE_FSYNC, "FUSE_FSYNC") \ + EM( FUSE_SETXATTR, "FUSE_SETXATTR") \ + EM( FUSE_GETXATTR, "FUSE_GETXATTR") \ + EM( FUSE_LISTXATTR, "FUSE_LISTXATTR") \ + EM( FUSE_REMOVEXATTR, "FUSE_REMOVEXATTR") \ + EM( FUSE_FLUSH, "FUSE_FLUSH") \ + EM( FUSE_INIT, "FUSE_INIT") \ + EM( FUSE_OPENDIR, "FUSE_OPENDIR") \ + EM( FUSE_READDIR, "FUSE_READDIR") \ + EM( FUSE_RELEASEDIR, "FUSE_RELEASEDIR") \ + EM( FUSE_FSYNCDIR, "FUSE_FSYNCDIR") \ + EM( FUSE_GETLK, "FUSE_GETLK") \ + EM( FUSE_SETLK, "FUSE_SETLK") \ + EM( FUSE_SETLKW, "FUSE_SETLKW") \ + EM( FUSE_ACCESS, "FUSE_ACCESS") \ + EM( FUSE_CREATE, "FUSE_CREATE") \ + EM( FUSE_INTERRUPT, "FUSE_INTERRUPT") \ + EM( FUSE_BMAP, "FUSE_BMAP") \ + EM( FUSE_DESTROY, "FUSE_DESTROY") \ + EM( FUSE_IOCTL, "FUSE_IOCTL") \ + EM( FUSE_POLL, "FUSE_POLL") \ + EM( FUSE_NOTIFY_REPLY, "FUSE_NOTIFY_REPLY") \ + EM( FUSE_BATCH_FORGET, "FUSE_BATCH_FORGET") \ + EM( FUSE_FALLOCATE, "FUSE_FALLOCATE") \ + EM( FUSE_READDIRPLUS, "FUSE_READDIRPLUS") \ + EM( FUSE_RENAME2, "FUSE_RENAME2") \ + EM( FUSE_LSEEK, "FUSE_LSEEK") \ + EM( FUSE_COPY_FILE_RANGE, "FUSE_COPY_FILE_RANGE") \ + EM( FUSE_SETUPMAPPING, "FUSE_SETUPMAPPING") \ + EM( FUSE_REMOVEMAPPING, "FUSE_REMOVEMAPPING") \ + EM( FUSE_SYNCFS, "FUSE_SYNCFS") \ + EM( FUSE_TMPFILE, "FUSE_TMPFILE") \ + EM( FUSE_STATX, "FUSE_STATX") \ + EMe(CUSE_INIT, "CUSE_INIT") + +/* + * This will turn the above table into TRACE_DEFINE_ENUM() for each of the + * entries. + */ +#undef EM +#undef EMe +#define EM(a, b) TRACE_DEFINE_ENUM(a); +#define EMe(a, b) TRACE_DEFINE_ENUM(a); + +OPCODES + +/* Now we redfine it with the table that __print_symbolic needs. */ +#undef EM +#undef EMe +#define EM(a, b) {a, b}, +#define EMe(a, b) {a, b} + +TRACE_EVENT(fuse_request_send, + TP_PROTO(const struct fuse_req *req), + + TP_ARGS(req), + + TP_STRUCT__entry( + __field(dev_t, connection) + __field(uint64_t, unique) + __field(enum fuse_opcode, opcode) + __field(uint32_t, len) + ), + + TP_fast_assign( + __entry->connection = req->fm->fc->dev; + __entry->unique = req->in.h.unique; + __entry->opcode = req->in.h.opcode; + __entry->len = req->in.h.len; + ), + + TP_printk("connection %u req %llu opcode %u (%s) len %u ", + __entry->connection, __entry->unique, __entry->opcode, + __print_symbolic(__entry->opcode, OPCODES), __entry->len) +); + +TRACE_EVENT(fuse_request_end, + TP_PROTO(const struct fuse_req *req), + + TP_ARGS(req), + + TP_STRUCT__entry( + __field(dev_t, connection) + __field(uint64_t, unique) + __field(uint32_t, len) + __field(int32_t, error) + ), + + TP_fast_assign( + __entry->connection = req->fm->fc->dev; + __entry->unique = req->in.h.unique; + __entry->len = req->out.h.len; + __entry->error = req->out.h.error; + ), + + TP_printk("connection %u req %llu len %u error %d", __entry->connection, + __entry->unique, __entry->len, __entry->error) +); + +#endif /* _TRACE_FUSE_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE fuse_trace +#include -- Gitee From 11052711ec5d5b5e6f100956319cbb26bfc75ef0 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Thu, 31 Aug 2023 23:07:55 +0300 Subject: [PATCH 1425/2138] fuse: factor out helper for FUSE_DEV_IOC_CLONE ANBZ: #11284 commit aed918310ea2542059eeab6c74defca95c30f77b upstream. In preparation to adding more fuse dev ioctls. Signed-off-by: Amir Goldstein Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3953 --- fs/fuse/dev.c | 59 ++++++++++++++++++++++++++++----------------------- 1 file changed, 33 insertions(+), 26 deletions(-) diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 1067d322fda0..42d167ceceac 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -2342,43 +2342,50 @@ static int fuse_device_clone(struct fuse_conn *fc, struct file *new) return 0; } -static long fuse_dev_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) +static long fuse_dev_ioctl_clone(struct file *file, __u32 __user *argp) { int res; int oldfd; struct fuse_dev *fud = NULL; struct fd f; + if (get_user(oldfd, argp)) + return -EFAULT; + + f = fdget(oldfd); + if (!f.file) + return -EINVAL; + + /* + * Check against file->f_op because CUSE + * uses the same ioctl handler. + */ + if (f.file->f_op == file->f_op) + fud = fuse_get_dev(f.file); + + res = -EINVAL; + if (fud) { + mutex_lock(&fuse_mutex); + res = fuse_device_clone(fud->fc, file); + mutex_unlock(&fuse_mutex); + } + + fdput(f); + return res; +} + +static long fuse_dev_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + void __user *argp = (void __user *)arg; + switch (cmd) { case FUSE_DEV_IOC_CLONE: - if (get_user(oldfd, (__u32 __user *)arg)) - return -EFAULT; + return fuse_dev_ioctl_clone(file, argp); - f = fdget(oldfd); - if (!f.file) - return -EINVAL; - - /* - * Check against file->f_op because CUSE - * uses the same ioctl handler. - */ - if (f.file->f_op == file->f_op) - fud = fuse_get_dev(f.file); - - res = -EINVAL; - if (fud) { - mutex_lock(&fuse_mutex); - res = fuse_device_clone(fud->fc, file); - mutex_unlock(&fuse_mutex); - } - fdput(f); - break; default: - res = -ENOTTY; - break; + return -ENOTTY; } - return res; } const struct file_operations fuse_dev_operations = { -- Gitee From 4a8bf63e8e6f49b59adfdf2043afd2ba68ee7f64 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Wed, 28 Feb 2024 16:50:49 +0100 Subject: [PATCH 1426/2138] fuse: use FUSE_ROOT_ID in fuse_get_root_inode() ANBZ: #11284 commit 253e52437119719eb87293ef6852e13bb5ad0960 upstream. ...when calling fuse_iget(). Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3953 --- fs/fuse/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 74c05299dcbd..3eef231e47c8 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -999,7 +999,7 @@ static struct inode *fuse_get_root_inode(struct super_block *sb, unsigned mode) attr.mode = mode; attr.ino = FUSE_ROOT_ID; attr.nlink = 1; - return fuse_iget(sb, 1, 0, &attr, 0, 0); + return fuse_iget(sb, FUSE_ROOT_ID, 0, &attr, 0, 0); } struct fuse_inode_handle { -- Gitee From cac03eb5a553fd9249e9e18098032310fe7261b2 Mon Sep 17 00:00:00 2001 From: Alexander Mikhalitsyn Date: Fri, 5 Jan 2024 16:21:27 +0100 Subject: [PATCH 1427/2138] fuse: fix typo for fuse_permission comment ANBZ: #11284 commit 2d09ab2203ece3d20aad6b3ba82a574c23da7556 upstream. Found by chance while working on support for idmapped mounts in fuse. Signed-off-by: Alexander Mikhalitsyn Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3953 --- fs/fuse/dir.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index ca865e7c4b55..dddcd8c630e0 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1492,7 +1492,7 @@ static int fuse_perm_getattr(struct inode *inode, int mask) * * 1) Local access checking ('default_permissions' mount option) based * on file mode. This is the plain old disk filesystem permission - * modell. + * model. * * 2) "Remote" access checking, where server is responsible for * checking permission in each inode operation. An exception to this -- Gitee From 6b1fc545ff971c60060e76f03f3f441f1e8acda0 Mon Sep 17 00:00:00 2001 From: Alexander Mikhalitsyn Date: Fri, 5 Jan 2024 16:21:29 +0100 Subject: [PATCH 1428/2138] fuse: __kuid_val/__kgid_val helpers in fuse_fill_attr_from_inode() ANBZ: #11284 commit 5a4d888e9f9beeb5062fbddf789278de5295e9f8 upstream. For the sake of consistency, let's use these helpers to extract {u,g}id_t values from k{u,g}id_t ones. There are no functional changes, just to make code cleaner. Signed-off-by: Alexander Mikhalitsyn Reviewed-by: Christian Brauner Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3953 --- fs/fuse/inode.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 3eef231e47c8..d33b04e6f616 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -1529,8 +1529,8 @@ static void fuse_fill_attr_from_inode(struct fuse_attr *attr, .ctimensec = ctime.tv_nsec, .mode = fi->inode.i_mode, .nlink = fi->inode.i_nlink, - .uid = fi->inode.i_uid.val, - .gid = fi->inode.i_gid.val, + .uid = __kuid_val(fi->inode.i_uid), + .gid = __kgid_val(fi->inode.i_gid), .rdev = fi->inode.i_rdev, .blksize = 1u << fi->inode.i_blkbits, }; -- Gitee From 134d2d4eda73475912f15b28bb1509baa5d72b81 Mon Sep 17 00:00:00 2001 From: Zhou Jifeng Date: Tue, 7 Nov 2023 16:13:50 +0800 Subject: [PATCH 1429/2138] fuse: Track process write operations in both direct and writethrough modes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #11284 commit 2e3f7dd08d70eca86a8cc9b4baf3da77c032d5fc upstream. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Due to the fact that fuse does not count the write IO of processes in the direct and writethrough write modes, user processes cannot track write_bytes through the “/proc/[pid]/io” path. For example, the system tool iotop cannot count the write operations of the corresponding process. Signed-off-by: Zhou Jifeng Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3953 --- fs/fuse/file.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 483611f1821d..c563bc353d81 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -19,6 +19,7 @@ #include #include #include +#include static int fuse_send_open(struct fuse_mount *fm, u64 nodeid, unsigned int open_flags, int opcode, @@ -1405,7 +1406,7 @@ static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from) struct address_space *mapping = file->f_mapping; ssize_t written = 0; struct inode *inode = mapping->host; - ssize_t err; + ssize_t err, count; struct fuse_conn *fc = get_fuse_conn(inode); if (fc->writeback_cache) { @@ -1427,10 +1428,12 @@ static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from) writethrough: inode_lock(inode); - err = generic_write_checks(iocb, from); + err = count = generic_write_checks(iocb, from); if (err <= 0) goto out; + task_io_account_write(count); + err = file_remove_privs(file); if (err) goto out; @@ -1668,6 +1671,7 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) fuse_dio_lock(iocb, from, &exclusive); res = generic_write_checks(iocb, from); if (res > 0) { + task_io_account_write(res); if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) { res = fuse_direct_IO(iocb, from); } else { -- Gitee From f0afd1ad55f5ab130b34ccbd6b0812d19ef25965 Mon Sep 17 00:00:00 2001 From: Jiachen Zhang Date: Thu, 8 Jun 2023 16:46:09 +0800 Subject: [PATCH 1430/2138] fuse: remove an unnecessary if statement ANBZ: #11284 commit 8a5fb186431326886ccc7b71d40aaf5e53b5d91a upstream. FUSE remote locking code paths never add any locking state to inode->i_flctx, so the locks_remove_posix() function called on file close will return without calling fuse_setlk(). Therefore, as the if statement to be removed in this commit will always be false, remove it for clearness. Signed-off-by: Jiachen Zhang Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3953 --- fs/fuse/file.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index c563bc353d81..76353487c345 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -2641,10 +2641,6 @@ static int fuse_setlk(struct file *file, struct file_lock *fl, int flock) return -ENOLCK; } - /* Unlock on close is handled by the flush method */ - if ((fl->fl_flags & FL_CLOSE_POSIX) == FL_CLOSE_POSIX) - return 0; - fuse_lk_fill(&args, file, fl, opcode, pid_nr, flock, &inarg); err = fuse_simple_request(fm, &args); -- Gitee From d87d8021af5731c2b79d2ab4103e217a31a402e2 Mon Sep 17 00:00:00 2001 From: Lei Huang Date: Tue, 29 Aug 2023 14:36:33 -0400 Subject: [PATCH 1431/2138] fuse: Fix missing FOLL_PIN for direct-io ANBZ: #11284 commit 738adade96b2ec414a44f3b1ed891fec3e0c03dd upstream. Our user space filesystem relies on fuse to provide POSIX interface. In our test, a known string is written into a file and the content is read back later to verify correct data returned. We observed wrong data returned in read buffer in rare cases although correct data are stored in our filesystem. Fuse kernel module calls iov_iter_get_pages2() to get the physical pages of the user-space read buffer passed in read(). The pages are not pinned to avoid page migration. When page migration occurs, the consequence are two-folds. 1) Applications do not receive correct data in read buffer. 2) fuse kernel writes data into a wrong place. Using iov_iter_extract_pages() to pin pages fixes the issue in our test. An auxiliary variable "struct page **pt_pages" is used in the patch to prepare the 2nd parameter for iov_iter_extract_pages() since iov_iter_get_pages2() uses a different type for the 2nd parameter. [SzM] add iov_iter_extract_will_pin(ii) and unpin only if true. Signed-off-by: Lei Huang Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3953 --- fs/fuse/file.c | 15 ++++++++++----- fs/fuse/fuse_i.h | 1 + 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 76353487c345..68ab69c08020 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -655,7 +655,8 @@ static void fuse_release_user_pages(struct fuse_args_pages *ap, for (i = 0; i < ap->num_pages; i++) { if (should_dirty) set_page_dirty_lock(ap->pages[i]); - put_page(ap->pages[i]); + if (ap->args.is_pinned) + unpin_user_page(ap->pages[i]); } } @@ -1495,10 +1496,13 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii, while (nbytes < *nbytesp && ap->num_pages < max_pages) { unsigned npages; size_t start; - ret = iov_iter_get_pages2(ii, &ap->pages[ap->num_pages], - *nbytesp - nbytes, - max_pages - ap->num_pages, - &start); + struct page **pt_pages; + + pt_pages = &ap->pages[ap->num_pages]; + ret = iov_iter_extract_pages(ii, &pt_pages, + *nbytesp - nbytes, + max_pages - ap->num_pages, + 0, &start); if (ret < 0) break; @@ -1515,6 +1519,7 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii, (PAGE_SIZE - ret) & (PAGE_SIZE - 1); } + ap->args.is_pinned = iov_iter_extract_will_pin(ii); ap->args.user_pages = true; if (write) ap->args.in_pages = true; diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index c57c5132d0ab..fd68d78e43e2 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -295,6 +295,7 @@ struct fuse_args { bool page_replace:1; bool may_block:1; bool is_ext:1; + bool is_pinned:1; struct fuse_in_arg in_args[3]; struct fuse_arg out_args[2]; void (*end)(struct fuse_mount *fm, struct fuse_args *args, int error); -- Gitee From c5a055eb19f3b6e7e986fa5852b987c7f30e0d25 Mon Sep 17 00:00:00 2001 From: Kemeng Shi Date: Sat, 7 Oct 2023 23:39:56 +0800 Subject: [PATCH 1432/2138] fuse: remove unneeded lock which protecting update of congestion_threshold ANBZ: #11284 commit efc4105a4cf9e300b8e9150147415fa235059293 upstream. Commit 670d21c6e17f6 ("fuse: remove reliance on bdi congestion") change how congestion_threshold is used and lock in fuse_conn_congestion_threshold_write is not needed anymore. 1. Access to supe_block is removed along with removing of bdi congestion. Then down_read(&fc->killsb) which protecting access to super_block is no needed. 2. Compare num_background and congestion_threshold without holding bg_lock. Then there is no need to hold bg_lock to update congestion_threshold. Signed-off-by: Kemeng Shi Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3953 --- fs/fuse/control.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/fs/fuse/control.c b/fs/fuse/control.c index ab62e4624256..1bf928e277fe 100644 --- a/fs/fuse/control.c +++ b/fs/fuse/control.c @@ -174,11 +174,7 @@ static ssize_t fuse_conn_congestion_threshold_write(struct file *file, if (!fc) goto out; - down_read(&fc->killsb); - spin_lock(&fc->bg_lock); - fc->congestion_threshold = val; - spin_unlock(&fc->bg_lock); - up_read(&fc->killsb); + WRITE_ONCE(fc->congestion_threshold, val); fuse_conn_put(fc); out: return ret; -- Gitee From 119b0898c8ada893e98dd21af4f3a23d6a7497d8 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Wed, 6 Mar 2024 16:20:58 +0100 Subject: [PATCH 1433/2138] fuse: get rid of ff->readdir.lock ANBZ: #11284 commit cdf6ac2a03d253f05d3e798f60f23dea1b176b92 upstream. The same protection is provided by file->f_pos_lock. Note, this relies on the fact that file->f_mode has FMODE_ATOMIC_POS. This flag is cleared by stream_open(), which would prevent locking of f_pos_lock. Prior to commit 7de64d521bf9 ("fuse: break up fuse_open_common()") FOPEN_STREAM on a directory would cause stream_open() to be called. After this commit this is not done anymore, so f_pos_lock will always be locked. Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3953 --- fs/fuse/file.c | 2 -- fs/fuse/fuse_i.h | 6 ------ fs/fuse/readdir.c | 4 ---- 3 files changed, 12 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 68ab69c08020..2211df69e0d1 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -75,7 +75,6 @@ struct fuse_file *fuse_file_alloc(struct fuse_mount *fm, bool release) } INIT_LIST_HEAD(&ff->write_entry); - mutex_init(&ff->readdir.lock); refcount_set(&ff->count, 1); RB_CLEAR_NODE(&ff->polled_node); init_waitqueue_head(&ff->poll_wait); @@ -88,7 +87,6 @@ struct fuse_file *fuse_file_alloc(struct fuse_mount *fm, bool release) void fuse_file_free(struct fuse_file *ff) { kfree(ff->release_args); - mutex_destroy(&ff->readdir.lock); kfree(ff); } diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index fd68d78e43e2..c33e34c04b81 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -230,12 +230,6 @@ struct fuse_file { /* Readdir related */ struct { - /* - * Protects below fields against (crazy) parallel readdir on - * same open file. Uncontended in the normal case. - */ - struct mutex lock; - /* Dir stream position */ loff_t pos; diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c index 9e6d587b3e67..d3f685104d48 100644 --- a/fs/fuse/readdir.c +++ b/fs/fuse/readdir.c @@ -590,15 +590,11 @@ int fuse_readdir(struct file *file, struct dir_context *ctx) if (fuse_is_bad(inode)) return -EIO; - mutex_lock(&ff->readdir.lock); - err = UNCACHED; if (ff->open_flags & FOPEN_CACHE_DIR) err = fuse_readdir_cached(file, ctx); if (err == UNCACHED) err = fuse_readdir_uncached(file, ctx); - mutex_unlock(&ff->readdir.lock); - return err; } -- Gitee From 9be9b443f3ba76361d9ec3e69400728bb4b9e94b Mon Sep 17 00:00:00 2001 From: Youling Tang Date: Thu, 20 Jun 2024 11:23:33 +0800 Subject: [PATCH 1434/2138] fs: Export in_group_or_capable() ANBZ: #11284 commit 9b6a14f08b4875aa22ea0b5bc35042e2580b311b upstream. Export in_group_or_capable() as a VFS helper function. Signed-off-by: Youling Tang Link: https://lore.kernel.org/r/20240620032335.147136-1-youling.tang@linux.dev Signed-off-by: Christian Brauner Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3953 --- fs/attr.c | 2 -- fs/inode.c | 1 + include/linux/fs.h | 2 ++ 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/attr.c b/fs/attr.c index a8ae5f6d9b16..5f0211c6f983 100644 --- a/fs/attr.c +++ b/fs/attr.c @@ -19,8 +19,6 @@ #include #include -#include "internal.h" - /** * setattr_should_drop_sgid - determine whether the setgid bit needs to be * removed diff --git a/fs/inode.c b/fs/inode.c index 030e07b169c2..2c44dda61a69 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -2609,6 +2609,7 @@ bool in_group_or_capable(struct mnt_idmap *idmap, return true; return false; } +EXPORT_SYMBOL(in_group_or_capable); /** * mode_strip_sgid - handle the sgid bit for non-directories diff --git a/include/linux/fs.h b/include/linux/fs.h index 2d5a642e56f7..45991a7a9a2e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1858,6 +1858,8 @@ void inode_init_owner(struct mnt_idmap *idmap, struct inode *inode, extern bool may_open_dev(const struct path *path); umode_t mode_strip_sgid(struct mnt_idmap *idmap, const struct inode *dir, umode_t mode); +bool in_group_or_capable(struct mnt_idmap *idmap, + const struct inode *inode, vfsgid_t vfsgid); /* * This is the "filldir" function type, used by readdir() to let -- Gitee From a23035e751feca0867b9463ce463b5175966ecb6 Mon Sep 17 00:00:00 2001 From: Youling Tang Date: Thu, 20 Jun 2024 11:23:35 +0800 Subject: [PATCH 1435/2138] fuse: Use in_group_or_capable() helper ANBZ: #11284 commit 153216cf7bd50842ffc3d500ea96adeb65db63ef upstream. Use the in_group_or_capable() helper function to simplify the code. Signed-off-by: Youling Tang Link: https://lore.kernel.org/r/20240620032335.147136-3-youling.tang@linux.dev Signed-off-by: Christian Brauner Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3953 --- fs/fuse/acl.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/fuse/acl.c b/fs/fuse/acl.c index 3d192b80a561..04cfd8fee992 100644 --- a/fs/fuse/acl.c +++ b/fs/fuse/acl.c @@ -146,8 +146,8 @@ int fuse_set_acl(struct mnt_idmap *idmap, struct dentry *dentry, * be stripped. */ if (fc->posix_acl && - !vfsgid_in_group_p(i_gid_into_vfsgid(&nop_mnt_idmap, inode)) && - !capable_wrt_inode_uidgid(&nop_mnt_idmap, inode, CAP_FSETID)) + !in_group_or_capable(&nop_mnt_idmap, inode, + i_gid_into_vfsgid(&nop_mnt_idmap, inode))) extra_flags |= FUSE_SETXATTR_ACL_KILL_SGID; ret = fuse_setxattr(inode, name, value, size, 0, extra_flags); -- Gitee From 386ebaf4f003aebb964da082af33ee23efdc61c4 Mon Sep 17 00:00:00 2001 From: yangyun Date: Sat, 14 Sep 2024 16:51:31 +0800 Subject: [PATCH 1436/2138] fuse: use exclusive lock when FUSE_I_CACHE_IO_MODE is set ANBZ: #11284 commit 2f3d8ff457982f4055fe8f7bf19d3821ba22c376 upstream. This may be a typo. The comment has said shared locks are not allowed when this bit is set. If using shared lock, the wait in `fuse_file_cached_io_open` may be forever. Fixes: 205c1d802683 ("fuse: allow parallel dio writes with FUSE_DIRECT_IO_ALLOW_MMAP") CC: stable@vger.kernel.org # v6.9 Signed-off-by: yangyun Reviewed-by: Bernd Schubert Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3953 --- fs/fuse/file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 2211df69e0d1..adb8afcecf67 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1349,7 +1349,7 @@ static bool fuse_dio_wr_exclusive_lock(struct kiocb *iocb, struct iov_iter *from /* shared locks are not allowed with parallel page cache IO */ if (test_bit(FUSE_I_CACHE_IO_MODE, &fi->state)) - return false; + return true; /* Parallel dio beyond EOF is not supported, at least for now. */ if (fuse_io_past_eof(iocb, from)) -- Gitee From ebc28ff0f50c39c143de8fef2531e18df342a485 Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Mon, 12 Feb 2024 19:11:48 -0500 Subject: [PATCH 1437/2138] virtiofs: export filesystem tags through sysfs ANBZ: #11285 commit a8f62f50b4e4ea92a938fca2ec1bd108d7f210e9 upstream. The virtiofs filesystem is mounted using a "tag" which is exported by the virtiofs device: # mount -t virtiofs /mnt The virtiofs driver knows about all the available tags but these are currently not exported to user space. People have asked for these tags to be exported to user space. Most recently Lennart Poettering has asked for it as he wants to scan the tags and mount virtiofs automatically in certain cases. https://gitlab.com/virtio-fs/virtiofsd/-/issues/128 This patch exports tags at /sys/fs/virtiofs//tag where N is the id of the virtiofs device. The filesystem tag can be obtained by reading this "tag" file. There is also a symlink at /sys/fs/virtiofs//device that points to the virtiofs device that exports this tag. This patch converts the existing struct virtio_fs into a full kobject. It already had a refcount so it's an easy change. The virtio_fs objects can then be exposed in a kset at /sys/fs/virtiofs/. Note that virtio_fs objects may live slightly longer than we wish for them to be exposed to userspace, so kobject_del() is called explicitly when the underlying virtio_device is removed. The virtio_fs object is freed when all references are dropped (e.g. active mounts) but disappears as soon as the virtiofs device is gone. Originally-by: Vivek Goyal Signed-off-by: Stefan Hajnoczi Reviewed-by: Vivek Goyal Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3954 --- Documentation/ABI/testing/sysfs-fs-virtiofs | 11 ++ fs/fuse/virtio_fs.c | 112 ++++++++++++++++---- 2 files changed, 101 insertions(+), 22 deletions(-) create mode 100644 Documentation/ABI/testing/sysfs-fs-virtiofs diff --git a/Documentation/ABI/testing/sysfs-fs-virtiofs b/Documentation/ABI/testing/sysfs-fs-virtiofs new file mode 100644 index 000000000000..4839dbce997e --- /dev/null +++ b/Documentation/ABI/testing/sysfs-fs-virtiofs @@ -0,0 +1,11 @@ +What: /sys/fs/virtiofs//tag +Date: Feb 2024 +Contact: virtio-fs@lists.linux.dev +Description: + [RO] The mount "tag" that can be used to mount this filesystem. + +What: /sys/fs/virtiofs//device +Date: Feb 2024 +Contact: virtio-fs@lists.linux.dev +Description: + Symlink to the virtio device that exports this filesystem. diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index ac57d4ed8a45..17d7fb3e9724 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -33,6 +33,9 @@ static DEFINE_MUTEX(virtio_fs_mutex); static LIST_HEAD(virtio_fs_instances); +/* The /sys/fs/virtio_fs/ kset */ +static struct kset *virtio_fs_kset; + enum { VQ_HIPRIO, VQ_REQUEST @@ -57,7 +60,7 @@ struct virtio_fs_vq { /* A virtio-fs device instance */ struct virtio_fs { - struct kref refcount; + struct kobject kobj; struct list_head list; /* on virtio_fs_instances */ char *tag; struct virtio_fs_vq *vqs; @@ -165,19 +168,41 @@ static inline void dec_in_flight_req(struct virtio_fs_vq *fsvq) complete(&fsvq->in_flight_zero); } -static void release_virtio_fs_obj(struct kref *ref) +static ssize_t tag_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct virtio_fs *fs = container_of(kobj, struct virtio_fs, kobj); + + return sysfs_emit(buf, fs->tag); +} + +static struct kobj_attribute virtio_fs_tag_attr = __ATTR_RO(tag); + +static struct attribute *virtio_fs_attrs[] = { + &virtio_fs_tag_attr.attr, + NULL +}; +ATTRIBUTE_GROUPS(virtio_fs); + +static void virtio_fs_ktype_release(struct kobject *kobj) { - struct virtio_fs *vfs = container_of(ref, struct virtio_fs, refcount); + struct virtio_fs *vfs = container_of(kobj, struct virtio_fs, kobj); kfree(vfs->mq_map); kfree(vfs->vqs); kfree(vfs); } +static const struct kobj_type virtio_fs_ktype = { + .release = virtio_fs_ktype_release, + .sysfs_ops = &kobj_sysfs_ops, + .default_groups = virtio_fs_groups, +}; + /* Make sure virtiofs_mutex is held */ static void virtio_fs_put(struct virtio_fs *fs) { - kref_put(&fs->refcount, release_virtio_fs_obj); + kobject_put(&fs->kobj); } static void virtio_fs_fiq_release(struct fuse_iqueue *fiq) @@ -248,25 +273,44 @@ static void virtio_fs_start_all_queues(struct virtio_fs *fs) } /* Add a new instance to the list or return -EEXIST if tag name exists*/ -static int virtio_fs_add_instance(struct virtio_fs *fs) +static int virtio_fs_add_instance(struct virtio_device *vdev, + struct virtio_fs *fs) { struct virtio_fs *fs2; - bool duplicate = false; + int ret; mutex_lock(&virtio_fs_mutex); list_for_each_entry(fs2, &virtio_fs_instances, list) { - if (strcmp(fs->tag, fs2->tag) == 0) - duplicate = true; + if (strcmp(fs->tag, fs2->tag) == 0) { + mutex_unlock(&virtio_fs_mutex); + return -EEXIST; + } + } + + /* Use the virtio_device's index as a unique identifier, there is no + * need to allocate our own identifiers because the virtio_fs instance + * is only visible to userspace as long as the underlying virtio_device + * exists. + */ + fs->kobj.kset = virtio_fs_kset; + ret = kobject_add(&fs->kobj, NULL, "%d", vdev->index); + if (ret < 0) { + mutex_unlock(&virtio_fs_mutex); + return ret; + } + + ret = sysfs_create_link(&fs->kobj, &vdev->dev.kobj, "device"); + if (ret < 0) { + kobject_del(&fs->kobj); + mutex_unlock(&virtio_fs_mutex); + return ret; } - if (!duplicate) - list_add_tail(&fs->list, &virtio_fs_instances); + list_add_tail(&fs->list, &virtio_fs_instances); mutex_unlock(&virtio_fs_mutex); - if (duplicate) - return -EEXIST; return 0; } @@ -279,7 +323,7 @@ static struct virtio_fs *virtio_fs_find_instance(const char *tag) list_for_each_entry(fs, &virtio_fs_instances, list) { if (strcmp(fs->tag, tag) == 0) { - kref_get(&fs->refcount); + kobject_get(&fs->kobj); goto found; } } @@ -936,7 +980,7 @@ static int virtio_fs_probe(struct virtio_device *vdev) fs = kzalloc(sizeof(*fs), GFP_KERNEL); if (!fs) return -ENOMEM; - kref_init(&fs->refcount); + kobject_init(&fs->kobj, &virtio_fs_ktype); vdev->priv = fs; ret = virtio_fs_read_tag(vdev, fs); @@ -958,7 +1002,7 @@ static int virtio_fs_probe(struct virtio_device *vdev) */ virtio_device_ready(vdev); - ret = virtio_fs_add_instance(fs); + ret = virtio_fs_add_instance(vdev, fs); if (ret < 0) goto out_vqs; @@ -967,11 +1011,10 @@ static int virtio_fs_probe(struct virtio_device *vdev) out_vqs: virtio_reset_device(vdev); virtio_fs_cleanup_vqs(vdev); - kfree(fs->vqs); out: vdev->priv = NULL; - kfree(fs); + kobject_put(&fs->kobj); return ret; } @@ -995,6 +1038,8 @@ static void virtio_fs_remove(struct virtio_device *vdev) mutex_lock(&virtio_fs_mutex); /* This device is going away. No one should get new reference */ list_del_init(&fs->list); + sysfs_remove_link(&fs->kobj, "device"); + kobject_del(&fs->kobj); virtio_fs_stop_all_queues(fs); virtio_fs_drain_all_queues_locked(fs); virtio_reset_device(vdev); @@ -1566,21 +1611,43 @@ static struct file_system_type virtio_fs_type = { .kill_sb = virtio_kill_sb, }; +static int __init virtio_fs_sysfs_init(void) +{ + virtio_fs_kset = kset_create_and_add("virtiofs", NULL, fs_kobj); + if (!virtio_fs_kset) + return -ENOMEM; + return 0; +} + +static void __exit virtio_fs_sysfs_exit(void) +{ + kset_unregister(virtio_fs_kset); + virtio_fs_kset = NULL; +} + static int __init virtio_fs_init(void) { int ret; - ret = register_virtio_driver(&virtio_fs_driver); + ret = virtio_fs_sysfs_init(); if (ret < 0) return ret; + ret = register_virtio_driver(&virtio_fs_driver); + if (ret < 0) + goto sysfs_exit; + ret = register_filesystem(&virtio_fs_type); - if (ret < 0) { - unregister_virtio_driver(&virtio_fs_driver); - return ret; - } + if (ret < 0) + goto unregister_virtio_driver; return 0; + +unregister_virtio_driver: + unregister_virtio_driver(&virtio_fs_driver); +sysfs_exit: + virtio_fs_sysfs_exit(); + return ret; } module_init(virtio_fs_init); @@ -1588,6 +1655,7 @@ static void __exit virtio_fs_exit(void) { unregister_filesystem(&virtio_fs_type); unregister_virtio_driver(&virtio_fs_driver); + virtio_fs_sysfs_exit(); } module_exit(virtio_fs_exit); -- Gitee From cb8f96f3056456a04fe61dbb902ec85ee346cb96 Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Mon, 12 Feb 2024 19:11:49 -0500 Subject: [PATCH 1438/2138] virtiofs: emit uevents on filesystem events ANBZ: #11285 commit 9086b2d9e9f3da0b0f939aa1d7ff74e9bf5b54c8 upstream. Alyssa Ross requested that virtiofs notifies userspace when filesytems become available. This can be used to detect when a filesystem with a given tag is hotplugged, for example. uevents allow userspace to detect changes without resorting to polling. The tag is included as a uevent property so it's easy for userspace to identify the filesystem in question even when the sysfs directory goes away during removal. Here are example uevents: # udevadm monitor -k -p KERNEL[111.113221] add /fs/virtiofs/2 (virtiofs) ACTION=add DEVPATH=/fs/virtiofs/2 SUBSYSTEM=virtiofs TAG=test KERNEL[165.527167] remove /fs/virtiofs/2 (virtiofs) ACTION=remove DEVPATH=/fs/virtiofs/2 SUBSYSTEM=virtiofs TAG=test Signed-off-by: Stefan Hajnoczi Reviewed-by: Vivek Goyal Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3954 --- fs/fuse/virtio_fs.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index 17d7fb3e9724..6d5fcea68c41 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -311,6 +311,8 @@ static int virtio_fs_add_instance(struct virtio_device *vdev, mutex_unlock(&virtio_fs_mutex); + kobject_uevent(&fs->kobj, KOBJ_ADD); + return 0; } @@ -1611,9 +1613,22 @@ static struct file_system_type virtio_fs_type = { .kill_sb = virtio_kill_sb, }; +static int virtio_fs_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) +{ + const struct virtio_fs *fs = container_of(kobj, struct virtio_fs, kobj); + + add_uevent_var(env, "TAG=%s", fs->tag); + return 0; +} + +static const struct kset_uevent_ops virtio_fs_uevent_ops = { + .uevent = virtio_fs_uevent, +}; + static int __init virtio_fs_sysfs_init(void) { - virtio_fs_kset = kset_create_and_add("virtiofs", NULL, fs_kobj); + virtio_fs_kset = kset_create_and_add("virtiofs", &virtio_fs_uevent_ops, + fs_kobj); if (!virtio_fs_kset) return -ENOMEM; return 0; -- Gitee From c056d2448981e87c873659837b92ea0d0d8a127b Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Tue, 27 Feb 2024 10:57:56 -0500 Subject: [PATCH 1439/2138] virtiofs: drop __exit from virtio_fs_sysfs_exit() ANBZ: #11285 commit d30ff89870482d88807393b592d5f0d1d4bc5e2a upstream. virtio_fs_sysfs_exit() is called by: - static int __init virtio_fs_init(void) - static void __exit virtio_fs_exit(void) Remove __exit from virtio_fs_sysfs_exit() since virtio_fs_init() is not an __exit function. Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202402270649.GYjNX0yw-lkp@intel.com/ Signed-off-by: Stefan Hajnoczi Reviewed-by: Randy Dunlap Tested-by: Randy Dunlap # build-tested Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3954 --- fs/fuse/virtio_fs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index 6d5fcea68c41..ebdbbb13a08b 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -1634,7 +1634,7 @@ static int __init virtio_fs_sysfs_init(void) return 0; } -static void __exit virtio_fs_sysfs_exit(void) +static void virtio_fs_sysfs_exit(void) { kset_unregister(virtio_fs_kset); virtio_fs_kset = NULL; -- Gitee From 6add5c075dcbc2bf11ad16be67c649cc354eccd4 Mon Sep 17 00:00:00 2001 From: Brian Foster Date: Thu, 25 Apr 2024 06:44:00 -0400 Subject: [PATCH 1440/2138] virtiofs: include a newline in sysfs tag ANBZ: #11285 commit 96d88f65adfbcaca153afd7d3e20d74ba379c599 upstream. The internal tag string doesn't contain a newline. Append one when emitting the tag via sysfs. [Stefan] Orthogonal to the newline issue, sysfs_emit(buf, "%s", fs->tag) is needed to prevent format string injection. Signed-off-by: Brian Foster Fixes: a8f62f50b4e4 ("virtiofs: export filesystem tags through sysfs") Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3954 --- fs/fuse/virtio_fs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index ebdbbb13a08b..2fbf58ca21ac 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -173,7 +173,7 @@ static ssize_t tag_show(struct kobject *kobj, { struct virtio_fs *fs = container_of(kobj, struct virtio_fs, kobj); - return sysfs_emit(buf, fs->tag); + return sysfs_emit(buf, "%s\n", fs->tag); } static struct kobj_attribute virtio_fs_tag_attr = __ATTR_RO(tag); -- Gitee From 752acdb107add54cf539cb3c4bb985a6e920e99f Mon Sep 17 00:00:00 2001 From: Max Gurtovoy Date: Sun, 25 Aug 2024 16:07:15 +0300 Subject: [PATCH 1441/2138] virtio_fs: introduce virtio_fs_put_locked helper ANBZ: #11285 commit 4045b6429874e07f14b5b41e326d4e6f866f8bbf upstream. Introduce a new helper function virtio_fs_put_locked to encapsulate the common pattern of releasing a virtio_fs reference while holding a lock. The existing virtio_fs_put helper will be used to release a virtio_fs reference while not holding a lock. Also add an assertion in case the lock is not taken when it should. Reviewed-by: Idan Zach Reviewed-by: Shai Malin Signed-off-by: Max Gurtovoy Message-Id: <20240825130716.9506-1-mgurtovoy@nvidia.com> Signed-off-by: Michael S. Tsirkin Reviewed-by: Stefan Hajnoczi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3954 --- fs/fuse/virtio_fs.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index 2fbf58ca21ac..17526383b478 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -200,18 +200,25 @@ static const struct kobj_type virtio_fs_ktype = { }; /* Make sure virtiofs_mutex is held */ -static void virtio_fs_put(struct virtio_fs *fs) +static void virtio_fs_put_locked(struct virtio_fs *fs) { + lockdep_assert_held(&virtio_fs_mutex); + kobject_put(&fs->kobj); } +static void virtio_fs_put(struct virtio_fs *fs) +{ + mutex_lock(&virtio_fs_mutex); + virtio_fs_put_locked(fs); + mutex_unlock(&virtio_fs_mutex); +} + static void virtio_fs_fiq_release(struct fuse_iqueue *fiq) { struct virtio_fs *vfs = fiq->priv; - mutex_lock(&virtio_fs_mutex); virtio_fs_put(vfs); - mutex_unlock(&virtio_fs_mutex); } static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq) @@ -1049,7 +1056,7 @@ static void virtio_fs_remove(struct virtio_device *vdev) vdev->priv = NULL; /* Put device reference on virtio_fs object */ - virtio_fs_put(fs); + virtio_fs_put_locked(fs); mutex_unlock(&virtio_fs_mutex); } @@ -1579,9 +1586,7 @@ static int virtio_fs_get_tree(struct fs_context *fsc) out_err: kfree(fc); - mutex_lock(&virtio_fs_mutex); virtio_fs_put(fs); - mutex_unlock(&virtio_fs_mutex); return err; } -- Gitee From cc5f944ee923b528ad8058d6782d8917faf45b68 Mon Sep 17 00:00:00 2001 From: Max Gurtovoy Date: Sun, 25 Aug 2024 16:07:16 +0300 Subject: [PATCH 1442/2138] virtio_fs: add sysfs entries for queue information ANBZ: #11285 commit 87cbdc396a31ce29b0849705e565c81564d5ed4b upstream. Introduce sysfs entries to provide visibility to the multiple queues used by the Virtio FS device. This enhancement allows users to query information about these queues. Specifically, add two sysfs entries: 1. Queue name: Provides the name of each queue (e.g. hiprio/requests.8). 2. CPU list: Shows the list of CPUs that can process requests for each queue. The CPU list feature is inspired by similar functionality in the block MQ layer, which provides analogous sysfs entries for block devices. These new sysfs entries will improve observability and aid in debugging and performance tuning of Virtio FS devices. Reviewed-by: Idan Zach Reviewed-by: Shai Malin Signed-off-by: Max Gurtovoy Message-Id: <20240825130716.9506-2-mgurtovoy@nvidia.com> Signed-off-by: Michael S. Tsirkin Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3954 --- fs/fuse/virtio_fs.c | 147 +++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 139 insertions(+), 8 deletions(-) diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index 17526383b478..4916ef656b49 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -55,12 +55,14 @@ struct virtio_fs_vq { bool connected; long in_flight; struct completion in_flight_zero; /* No inflight requests */ + struct kobject *kobj; char name[VQ_NAME_LEN]; } ____cacheline_aligned_in_smp; /* A virtio-fs device instance */ struct virtio_fs { struct kobject kobj; + struct kobject *mqs_kobj; struct list_head list; /* on virtio_fs_instances */ char *tag; struct virtio_fs_vq *vqs; @@ -199,6 +201,74 @@ static const struct kobj_type virtio_fs_ktype = { .default_groups = virtio_fs_groups, }; +static struct virtio_fs_vq *virtio_fs_kobj_to_vq(struct virtio_fs *fs, + struct kobject *kobj) +{ + int i; + + for (i = 0; i < fs->nvqs; i++) { + if (kobj == fs->vqs[i].kobj) + return &fs->vqs[i]; + } + return NULL; +} + +static ssize_t name_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct virtio_fs *fs = container_of(kobj->parent->parent, struct virtio_fs, kobj); + struct virtio_fs_vq *fsvq = virtio_fs_kobj_to_vq(fs, kobj); + + if (!fsvq) + return -EINVAL; + return sysfs_emit(buf, "%s\n", fsvq->name); +} + +static struct kobj_attribute virtio_fs_vq_name_attr = __ATTR_RO(name); + +static ssize_t cpu_list_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct virtio_fs *fs = container_of(kobj->parent->parent, struct virtio_fs, kobj); + struct virtio_fs_vq *fsvq = virtio_fs_kobj_to_vq(fs, kobj); + unsigned int cpu, qid; + const size_t size = PAGE_SIZE - 1; + bool first = true; + int ret = 0, pos = 0; + + if (!fsvq) + return -EINVAL; + + qid = fsvq->vq->index; + for (cpu = 0; cpu < nr_cpu_ids; cpu++) { + if (qid < VQ_REQUEST || (fs->mq_map[cpu] == qid - VQ_REQUEST)) { + if (first) + ret = snprintf(buf + pos, size - pos, "%u", cpu); + else + ret = snprintf(buf + pos, size - pos, ", %u", cpu); + + if (ret >= size - pos) + break; + first = false; + pos += ret; + } + } + ret = snprintf(buf + pos, size + 1 - pos, "\n"); + return pos + ret; +} + +static struct kobj_attribute virtio_fs_vq_cpu_list_attr = __ATTR_RO(cpu_list); + +static struct attribute *virtio_fs_vq_attrs[] = { + &virtio_fs_vq_name_attr.attr, + &virtio_fs_vq_cpu_list_attr.attr, + NULL +}; + +static struct attribute_group virtio_fs_vq_attr_group = { + .attrs = virtio_fs_vq_attrs, +}; + /* Make sure virtiofs_mutex is held */ static void virtio_fs_put_locked(struct virtio_fs *fs) { @@ -279,6 +349,50 @@ static void virtio_fs_start_all_queues(struct virtio_fs *fs) } } +static void virtio_fs_delete_queues_sysfs(struct virtio_fs *fs) +{ + struct virtio_fs_vq *fsvq; + int i; + + for (i = 0; i < fs->nvqs; i++) { + fsvq = &fs->vqs[i]; + kobject_put(fsvq->kobj); + } +} + +static int virtio_fs_add_queues_sysfs(struct virtio_fs *fs) +{ + struct virtio_fs_vq *fsvq; + char buff[12]; + int i, j, ret; + + for (i = 0; i < fs->nvqs; i++) { + fsvq = &fs->vqs[i]; + + sprintf(buff, "%d", i); + fsvq->kobj = kobject_create_and_add(buff, fs->mqs_kobj); + if (!fs->mqs_kobj) { + ret = -ENOMEM; + goto out_del; + } + + ret = sysfs_create_group(fsvq->kobj, &virtio_fs_vq_attr_group); + if (ret) { + kobject_put(fsvq->kobj); + goto out_del; + } + } + + return 0; + +out_del: + for (j = 0; j < i; j++) { + fsvq = &fs->vqs[j]; + kobject_put(fsvq->kobj); + } + return ret; +} + /* Add a new instance to the list or return -EEXIST if tag name exists*/ static int virtio_fs_add_instance(struct virtio_device *vdev, struct virtio_fs *fs) @@ -302,17 +416,22 @@ static int virtio_fs_add_instance(struct virtio_device *vdev, */ fs->kobj.kset = virtio_fs_kset; ret = kobject_add(&fs->kobj, NULL, "%d", vdev->index); - if (ret < 0) { - mutex_unlock(&virtio_fs_mutex); - return ret; + if (ret < 0) + goto out_unlock; + + fs->mqs_kobj = kobject_create_and_add("mqs", &fs->kobj); + if (!fs->mqs_kobj) { + ret = -ENOMEM; + goto out_del; } ret = sysfs_create_link(&fs->kobj, &vdev->dev.kobj, "device"); - if (ret < 0) { - kobject_del(&fs->kobj); - mutex_unlock(&virtio_fs_mutex); - return ret; - } + if (ret < 0) + goto out_put; + + ret = virtio_fs_add_queues_sysfs(fs); + if (ret) + goto out_remove; list_add_tail(&fs->list, &virtio_fs_instances); @@ -321,6 +440,16 @@ static int virtio_fs_add_instance(struct virtio_device *vdev, kobject_uevent(&fs->kobj, KOBJ_ADD); return 0; + +out_remove: + sysfs_remove_link(&fs->kobj, "device"); +out_put: + kobject_put(fs->mqs_kobj); +out_del: + kobject_del(&fs->kobj); +out_unlock: + mutex_unlock(&virtio_fs_mutex); + return ret; } /* Return the virtio_fs with a given tag, or NULL */ @@ -1047,7 +1176,9 @@ static void virtio_fs_remove(struct virtio_device *vdev) mutex_lock(&virtio_fs_mutex); /* This device is going away. No one should get new reference */ list_del_init(&fs->list); + virtio_fs_delete_queues_sysfs(fs); sysfs_remove_link(&fs->kobj, "device"); + kobject_put(fs->mqs_kobj); kobject_del(&fs->kobj); virtio_fs_stop_all_queues(fs); virtio_fs_drain_all_queues_locked(fs); -- Gitee From f5798a250d341a5ca16e0ff05e822b25c272113c Mon Sep 17 00:00:00 2001 From: gaojuxin Date: Sat, 12 Oct 2024 14:06:24 +0800 Subject: [PATCH 1443/2138] anolis: There need drm-direct.h for loongarch under current kernel version ANBZ: #11294 Besides, If you do not delete this patch, the following error will be reported when compiling the kernel image of the loongarch architecture. ./include/linux/dma-direct.h:53:10: fatal error: asm/dma-direct.h: No such file or directory Revert "LoongArch: Remove the unused dma-direct.h" This reverts commit f6758eb7928e8a069c6348010ecd4e635b03aeda. Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3969 --- arch/loongarch/include/asm/dma-direct.h | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 arch/loongarch/include/asm/dma-direct.h diff --git a/arch/loongarch/include/asm/dma-direct.h b/arch/loongarch/include/asm/dma-direct.h new file mode 100644 index 000000000000..75ccd808a2af --- /dev/null +++ b/arch/loongarch/include/asm/dma-direct.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _LOONGARCH_DMA_DIRECT_H +#define _LOONGARCH_DMA_DIRECT_H + +dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); +phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); + +#endif /* _LOONGARCH_DMA_DIRECT_H */ -- Gitee From bd82e2e6d8d423640879971fa678c6cb4671ae87 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Sat, 12 Oct 2024 15:50:20 +0800 Subject: [PATCH 1444/2138] anolis: iommu/dma: Fix not fully traversing iova reservations issue ANBZ: #9443 For multiple devices in the same iommu group, sorted later device (based on Bus:Dev.Func) have the RMRR. Sorted earlier device (without RMRR) initialized the iova domain causing the sorted later device goto done_unlock. Then, the sorted later device (with RMRR) cannot execute the iova_reserve_iommu_regions to reserve the RMRR in the group's iova domain, and other devices (in the same group) alloc iova in RMRR are permitted. DMA iova addresses conflict with RMRR in this case. Goto iova_reserve_iommu_regions could avoid the problem (make sure all devices of the same group execute reserve iova) Signed-off-by: leoliu-oc Acked-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/3442 --- drivers/iommu/dma-iommu.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 4982f561adb1..9a33d5435ea5 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -626,7 +626,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, } ret = 0; - goto done_unlock; + goto iova_reserve; } init_iova_domain(iovad, 1UL << order, base_pfn); @@ -639,6 +639,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, (!device_iommu_capable(dev, IOMMU_CAP_DEFERRED_FLUSH) || iommu_dma_init_fq(domain))) domain->type = IOMMU_DOMAIN_DMA; +iova_reserve: ret = iova_reserve_iommu_regions(dev, domain); done_unlock: -- Gitee From 5b81bcc3ada7ac54a1814ba6a86fd8126fcb4394 Mon Sep 17 00:00:00 2001 From: gaojuxin Date: Sat, 12 Oct 2024 15:34:29 +0800 Subject: [PATCH 1445/2138] anolis: xfs: fix build failure when CONFIG_FS_DAX is off ANBZ: #11294 This commit dddb2795c19d changes the parameters of the function xfs_reflink_unshare_range from two to three, but does not take into account when the CONFIG_FS_DAX is turned off Fixes: dddb2795c19d ("anolis: xfs: never block page fault during inode inactivation") Signed-off-by: gaojuxin Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/3970 --- fs/xfs/xfs_reflink.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index cce649e5e1e0..387322536ff3 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c @@ -628,7 +628,8 @@ xfs_reflink_unshare_range( STATIC int xfs_reflink_unshare_range( struct xfs_inode *src, - struct xfs_bmbt_irec *oimap) + struct xfs_bmbt_irec *oimap, + bool *secondary_evicting) { return 0; } -- Gitee From 43428427287f42c281eecc0e7ab5c7169cf46ea0 Mon Sep 17 00:00:00 2001 From: Shanpei Chen Date: Wed, 12 Jun 2019 17:08:58 +0800 Subject: [PATCH 1446/2138] anolis: sched: disable auto group by default ANBZ: #11234 Autogroup feature is used to improve interactivity for desktop application. Since our kernel runs on server, just like RHEL8, disable it by default to avoid unnecessary computing. More details, please refer https://lwn.net/Articles/416641/ Signed-off-by: Shanpei Chen Reviewed-by: Caspar Zhang Acked-by: Joseph Qi Signed-off-by: Cruz Zhao Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3912 --- kernel/sched/autogroup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c index 991fc9002535..e2c2d4e5735f 100644 --- a/kernel/sched/autogroup.c +++ b/kernel/sched/autogroup.c @@ -4,7 +4,7 @@ * Auto-group scheduling implementation: */ -unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1; +unsigned int __read_mostly sysctl_sched_autogroup_enabled; static struct autogroup autogroup_default; static atomic_t autogroup_seq_nr; -- Gitee From 03f59c3c3f6d52d2db87ad174909bdff41450235 Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Mon, 14 Oct 2024 17:46:39 +0800 Subject: [PATCH 1447/2138] anolis: Revert "selftests/bpf: dummy_st_ops should reject 0 for non-nullable params" ANBZ: #11306 This reverts commit e7d193073a223663612301c659e53795b991ca89. We cannot backport [1] directly now due to too much dependency. So just revert the commit from stable. [1] https://lore.kernel.org/all/20240424012821.595216-1-eddyz87@gmail.com/ Signed-off-by: Tianchen Ding Reviewed-by: Yuanhe Shu Link: https://gitee.com/anolis/cloud-kernel/pulls/3977 --- .../selftests/bpf/prog_tests/dummy_st_ops.c | 27 ------------------- 1 file changed, 27 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c b/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c index d3d94596ab79..dd926c00f414 100644 --- a/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c +++ b/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c @@ -147,31 +147,6 @@ static void test_dummy_sleepable(void) dummy_st_ops_success__destroy(skel); } -/* dummy_st_ops.test_sleepable() parameter is not marked as nullable, - * thus bpf_prog_test_run_opts() below should be rejected as it tries - * to pass NULL for this parameter. - */ -static void test_dummy_sleepable_reject_null(void) -{ - __u64 args[1] = {0}; - LIBBPF_OPTS(bpf_test_run_opts, attr, - .ctx_in = args, - .ctx_size_in = sizeof(args), - ); - struct dummy_st_ops_success *skel; - int fd, err; - - skel = dummy_st_ops_success__open_and_load(); - if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load")) - return; - - fd = bpf_program__fd(skel->progs.test_sleepable); - err = bpf_prog_test_run_opts(fd, &attr); - ASSERT_EQ(err, -EINVAL, "test_run"); - - dummy_st_ops_success__destroy(skel); -} - void test_dummy_st_ops(void) { if (test__start_subtest("dummy_st_ops_attach")) @@ -184,8 +159,6 @@ void test_dummy_st_ops(void) test_dummy_multiple_args(); if (test__start_subtest("dummy_sleepable")) test_dummy_sleepable(); - if (test__start_subtest("dummy_sleepable_reject_null")) - test_dummy_sleepable_reject_null(); RUN_TESTS(dummy_st_ops_fail); } -- Gitee From af1e9826d3fdcab73650522030d38efa37404a06 Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Mon, 14 Oct 2024 17:48:59 +0800 Subject: [PATCH 1448/2138] anolis: Revert "selftests/bpf: do not pass NULL for non-nullable params in dummy_st_ops" ANBZ: #11306 This reverts commit a1a629fc373c9179d34f5f86c1bc8222edfa0898. We cannot backport [1] directly now due to too much dependency. So just revert the commit from stable. [1] https://lore.kernel.org/all/20240424012821.595216-1-eddyz87@gmail.com/ Signed-off-by: Tianchen Ding Reviewed-by: Yuanhe Shu Link: https://gitee.com/anolis/cloud-kernel/pulls/3977 --- tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c | 7 ++----- tools/testing/selftests/bpf/progs/dummy_st_ops_success.c | 2 +- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c b/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c index dd926c00f414..f43fcb13d2c4 100644 --- a/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c +++ b/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c @@ -98,8 +98,7 @@ static void test_dummy_init_ptr_arg(void) static void test_dummy_multiple_args(void) { - struct bpf_dummy_ops_state st = { 7 }; - __u64 args[5] = {(__u64)&st, -100, 0x8a5f, 'c', 0x1234567887654321ULL}; + __u64 args[5] = {0, -100, 0x8a5f, 'c', 0x1234567887654321ULL}; LIBBPF_OPTS(bpf_test_run_opts, attr, .ctx_in = args, .ctx_size_in = sizeof(args), @@ -116,7 +115,6 @@ static void test_dummy_multiple_args(void) fd = bpf_program__fd(skel->progs.test_2); err = bpf_prog_test_run_opts(fd, &attr); ASSERT_OK(err, "test_run"); - args[0] = 7; for (i = 0; i < ARRAY_SIZE(args); i++) { snprintf(name, sizeof(name), "arg %zu", i); ASSERT_EQ(skel->bss->test_2_args[i], args[i], name); @@ -127,8 +125,7 @@ static void test_dummy_multiple_args(void) static void test_dummy_sleepable(void) { - struct bpf_dummy_ops_state st; - __u64 args[1] = {(__u64)&st}; + __u64 args[1] = {0}; LIBBPF_OPTS(bpf_test_run_opts, attr, .ctx_in = args, .ctx_size_in = sizeof(args), diff --git a/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c b/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c index ec0c595d47af..cc7b69b001aa 100644 --- a/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c +++ b/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c @@ -34,7 +34,7 @@ SEC("struct_ops/test_2") int BPF_PROG(test_2, struct bpf_dummy_ops_state *state, int a1, unsigned short a2, char a3, unsigned long a4) { - test_2_args[0] = state->val; + test_2_args[0] = (unsigned long)state; test_2_args[1] = a1; test_2_args[2] = a2; test_2_args[3] = a3; -- Gitee From 3b248fabd7ec080584e89edef71d6e27cf15d945 Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Mon, 14 Oct 2024 17:49:09 +0800 Subject: [PATCH 1449/2138] anolis: Revert "selftests/bpf: adjust dummy_st_ops_success to detect additional error" ANBZ: #11306 This reverts commit 264451a364dba5ca6cb2878126a9798dfc0b1a06. We cannot backport [1] directly now due to too much dependency. So just revert the commit from stable. [1] https://lore.kernel.org/all/20240424012821.595216-1-eddyz87@gmail.com/ Signed-off-by: Tianchen Ding Reviewed-by: Yuanhe Shu Link: https://gitee.com/anolis/cloud-kernel/pulls/3977 --- .../selftests/bpf/progs/dummy_st_ops_success.c | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c b/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c index cc7b69b001aa..1efa746c25dc 100644 --- a/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c +++ b/tools/testing/selftests/bpf/progs/dummy_st_ops_success.c @@ -11,17 +11,8 @@ int BPF_PROG(test_1, struct bpf_dummy_ops_state *state) { int ret; - /* Check that 'state' nullable status is detected correctly. - * If 'state' argument would be assumed non-null by verifier - * the code below would be deleted as dead (which it shouldn't). - * Hide it from the compiler behind 'asm' block to avoid - * unnecessary optimizations. - */ - asm volatile ( - "if %[state] != 0 goto +2;" - "r0 = 0xf2f3f4f5;" - "exit;" - ::[state]"p"(state)); + if (!state) + return 0xf2f3f4f5; ret = state->val; state->val = 0x5a; -- Gitee From 626d9ef5ae5e9fa2c3baf0c5a638195f6495c08b Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Thu, 14 Sep 2023 12:35:06 -0400 Subject: [PATCH 1450/2138] tracing/selftests: Update kprobe args char/string to match new functions ANBZ: #11302 commit f5d9e8e08f81c9e7c723de7abcce106808f0770c upstream. The function that the kprobe_args_char and kprobes_arg_string attaches to for its test has changed its name once again. Now we need to check for eventfs_create_dir(), and if it exists, use that, otherwise check for eventfs_add_dir() and if that exists use that, otherwise use the original tracefs_create_dir()! Link: https://lore.kernel.org/linux-trace-kernel/20230914163535.487267410@goodmis.org Cc: Mark Rutland Cc: Andrew Morton Cc: Ajay Kaher Acked-by: Masami Hiramatsu (Google) Signed-off-by: Steven Rostedt (Google) Signed-off-by: Yuanhe Shu Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/3974 --- .../selftests/ftrace/test.d/kprobe/kprobe_args_char.tc | 4 +++- .../selftests/ftrace/test.d/kprobe/kprobe_args_string.tc | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc index ce5d2e62731f..cd89d9ddfecd 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc @@ -34,7 +34,9 @@ mips*) esac : "Test get argument (1)" -if grep -q eventfs_add_dir available_filter_functions; then +if grep -q eventfs_create_dir available_filter_functions; then + DIR_NAME="eventfs_create_dir" +elif grep -q eventfs_add_dir available_filter_functions; then DIR_NAME="eventfs_add_dir" else DIR_NAME="tracefs_create_dir" diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc index dbc76ca50ab5..07707e81c152 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc @@ -40,7 +40,9 @@ sw_64) esac : "Test get argument (1)" -if grep -q eventfs_add_dir available_filter_functions; then +if grep -q eventfs_create_dir available_filter_functions; then + DIR_NAME="eventfs_create_dir" +elif grep -q eventfs_add_dir available_filter_functions; then DIR_NAME="eventfs_add_dir" else DIR_NAME="tracefs_create_dir" -- Gitee From 8ea7288bbddf832cd99b191906f3fd9355fee45d Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Fri, 28 Jun 2024 15:40:54 +0800 Subject: [PATCH 1451/2138] anolis: perf/x86/zhaoxin/uncore: update KX-7000 support ANBZ: #9444 1. Enhance perf kvm guest/host support to allow monitoring of either the host or guest independently. 2. Add architecture print information after successful loading of the KX7000 pmc core driver, indicating that the architecture of KX7000 is shijidadao. 3. Modify the KX8000 in the uncore driver to KX7000. 4. Add logic_op support for the KX7000 uncore. 5. For the KX7000 platform, it is necessary to configure the bit16 (bsPMCDynamicEn_P) of msr1877h to 0 (previously it was defaulted to 1) during the PMC driver loading, so that the KX7000 PMC HIF module can operate normally. Signed-off-by: leoliu-oc Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/3443 --- arch/x86/events/zhaoxin/core.c | 84 ++- arch/x86/events/zhaoxin/uncore.c | 1089 ++++++++++++++++-------------- arch/x86/events/zhaoxin/uncore.h | 77 +-- 3 files changed, 667 insertions(+), 583 deletions(-) diff --git a/arch/x86/events/zhaoxin/core.c b/arch/x86/events/zhaoxin/core.c index 2957b416a6db..e493b176b336 100644 --- a/arch/x86/events/zhaoxin/core.c +++ b/arch/x86/events/zhaoxin/core.c @@ -259,7 +259,10 @@ static void zhaoxin_pmu_disable_all(void) static void zhaoxin_pmu_enable_all(int added) { - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, + x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask); } static inline u64 zhaoxin_pmu_get_status(void) @@ -286,13 +289,31 @@ static inline void zxc_pmu_ack_status(u64 ack) zhaoxin_pmu_disable_all(); } -static void zhaoxin_pmu_disable_fixed(struct hw_perf_event *hwc) +static inline void zhaoxin_set_masks(struct perf_event *event, int idx) { - int idx = hwc->idx - INTEL_PMC_IDX_FIXED; - u64 ctrl_val, mask; + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); - mask = 0xfULL << (idx * 4); + if (event->attr.exclude_host) + __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask); + if (event->attr.exclude_guest) + __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask); +} +static inline void zhaoxin_clear_masks(struct perf_event *event, int idx) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask); + __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask); +} + +static void zhaoxin_pmu_disable_fixed(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + u64 ctrl_val, mask; + int idx = hwc->idx; + + mask = 0xfULL << ((idx - INTEL_PMC_IDX_FIXED) * 4); rdmsrl(hwc->config_base, ctrl_val); ctrl_val &= ~mask; wrmsrl(hwc->config_base, ctrl_val); @@ -301,19 +322,23 @@ static void zhaoxin_pmu_disable_fixed(struct hw_perf_event *hwc) static void zhaoxin_pmu_disable_event(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + zhaoxin_clear_masks(event, idx); if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { - zhaoxin_pmu_disable_fixed(hwc); + zhaoxin_pmu_disable_fixed(event); return; } x86_pmu_disable_event(event); } -static void zhaoxin_pmu_enable_fixed(struct hw_perf_event *hwc) +static void zhaoxin_pmu_enable_fixed(struct perf_event *event) { - int idx = hwc->idx - INTEL_PMC_IDX_FIXED; - u64 ctrl_val, bits, mask; + struct hw_perf_event *hwc = &event->hw; + u64 ctrl_val, mask, bits = 0; + int idx = hwc->idx; /* * Enable IRQ generation (0x8), @@ -326,6 +351,7 @@ static void zhaoxin_pmu_enable_fixed(struct hw_perf_event *hwc) if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) bits |= 0x1; + idx -= INTEL_PMC_IDX_FIXED; bits <<= (idx * 4); mask = 0xfULL << (idx * 4); @@ -338,9 +364,12 @@ static void zhaoxin_pmu_enable_fixed(struct hw_perf_event *hwc) static void zhaoxin_pmu_enable_event(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + zhaoxin_set_masks(event, idx); if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { - zhaoxin_pmu_enable_fixed(hwc); + zhaoxin_pmu_enable_fixed(event); return; } @@ -456,6 +485,19 @@ static ssize_t zhaoxin_event_sysfs_show(char *page, u64 config) return x86_event_sysfs_show(page, config, event); } +static struct perf_guest_switch_msr *zhaoxin_guest_get_msrs(int *nr, void *data) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; + + arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL; + arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask; + arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask; + *nr = 1; + + return arr; +} + static const struct x86_pmu zhaoxin_pmu __initconst = { .name = "zhaoxin", .handle_irq = zhaoxin_pmu_handle_irq, @@ -478,6 +520,8 @@ static const struct x86_pmu zhaoxin_pmu __initconst = { .format_attrs = zx_arch_formats_attr, .events_sysfs_show = zhaoxin_event_sysfs_show, + + .guest_get_msrs = zhaoxin_guest_get_msrs, }; static const struct { int id; char *name; } zx_arch_events_map[] __initconst = { @@ -581,8 +625,8 @@ __init int zhaoxin_pmu_init(void) x86_pmu.event_constraints = wudaokou_event_constraints; - zx_pmon_event_map[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0515; - zx_pmon_event_map[PERF_COUNT_HW_CACHE_MISSES] = 0x051a; + zx_pmon_event_map[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0515; + zx_pmon_event_map[PERF_COUNT_HW_CACHE_MISSES] = 0x051a; zx_pmon_event_map[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0700; zx_pmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x0709; @@ -595,8 +639,8 @@ __init int zhaoxin_pmu_init(void) x86_pmu.event_constraints = wudaokou_event_constraints; - zx_pmon_event_map[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0515; - zx_pmon_event_map[PERF_COUNT_HW_CACHE_MISSES] = 0x051a; + zx_pmon_event_map[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0515; + zx_pmon_event_map[PERF_COUNT_HW_CACHE_MISSES] = 0x051a; pr_cont("Lujiazui events, "); break; @@ -604,19 +648,22 @@ __init int zhaoxin_pmu_init(void) case 0x6b: zx_pmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = X86_CONFIG(.event = 0x02, .umask = 0x01, .inv = 0x01, - .cmask = 0x01); + .cmask = 0x01); memcpy(hw_cache_event_ids, lujiazui_hw_cache_event_ids, - sizeof(hw_cache_event_ids)); + sizeof(hw_cache_event_ids)); x86_pmu.event_constraints = wudaokou_event_constraints; - zx_pmon_event_map[PERF_COUNT_HW_CACHE_REFERENCES] = 0x051a; - zx_pmon_event_map[PERF_COUNT_HW_CACHE_MISSES] = 0; + zx_pmon_event_map[PERF_COUNT_HW_CACHE_REFERENCES] = 0x051a; + zx_pmon_event_map[PERF_COUNT_HW_CACHE_MISSES] = 0; if (boot_cpu_data.x86_model == 0x5b) pr_cont("Yongfeng events, "); + if (boot_cpu_data.x86_model == 0x6b) + pr_cont("Shijidadao events, "); + break; default: return -ENODEV; @@ -639,4 +686,3 @@ __init int zhaoxin_pmu_init(void) return 0; } - diff --git a/arch/x86/events/zhaoxin/uncore.c b/arch/x86/events/zhaoxin/uncore.c index 8d898a10d953..12f331334c40 100644 --- a/arch/x86/events/zhaoxin/uncore.c +++ b/arch/x86/events/zhaoxin/uncore.c @@ -7,7 +7,6 @@ static struct zhaoxin_uncore_type **uncore_msr_uncores = empty_uncore; static struct zhaoxin_uncore_type **uncore_pci_uncores = empty_uncore; static struct zhaoxin_uncore_type **uncore_mmio_uncores = empty_uncore; - static bool pcidrv_registered; static struct pci_driver *uncore_pci_driver; @@ -35,7 +34,7 @@ static int kh40000_pcibus_limit[KH40000_MAX_SUBNODE_NUMBER]; /* KX5000/KX6000 event control */ #define KX5000_UNC_CTL_EV_SEL_MASK 0x000000ff #define KX5000_UNC_CTL_UMASK_MASK 0x0000ff00 -#define KX5000_UNC_CTL_EDGE_DET (1 << 18) +#define KX5000_UNC_CTL_EDGE_DET (1 << 18) #define KX5000_UNC_CTL_EN (1 << 22) #define KX5000_UNC_CTL_INVERT (1 << 23) #define KX5000_UNC_CTL_CMASK_MASK 0x7000000 @@ -53,7 +52,7 @@ static int kh40000_pcibus_limit[KH40000_MAX_SUBNODE_NUMBER]; #define KX5000_UNC_FIXED_CTR_CTRL 0x395 /* KX5000/KX6000 uncore global control */ -#define KX5000_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 4) - 1) +#define KX5000_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 4) - 1) #define KX5000_UNC_GLOBAL_CTL_EN_FC (1ULL << 32) /* KX5000/KX6000 uncore register */ @@ -65,15 +64,14 @@ static int kh40000_pcibus_limit[KH40000_MAX_SUBNODE_NUMBER]; #define KH40000_PMON_CTL_UMASK_MASK 0x0000ff00 #define KH40000_PMON_CTL_RST (1 << 17) #define KH40000_PMON_CTL_EDGE_DET (1 << 18) -#define KH40000_PMON_CTL_EV_SEL_EXT (1 << 21) #define KH40000_PMON_CTL_EN (1 << 22) -#define KH40000_PMON_CTL_INVERT (1 << 23) -#define KH40000_PMON_CTL_TRESH_MASK 0xff000000 +#define KH40000_PMON_CTL_INVERT (1 << 23) +#define KH40000_PMON_CTL_THRESH_MASK 0xff000000 #define KH40000_PMON_RAW_EVENT_MASK (KH40000_PMON_CTL_EV_SEL_MASK | \ KH40000_PMON_CTL_UMASK_MASK | \ KH40000_PMON_CTL_EDGE_DET | \ KH40000_PMON_CTL_INVERT | \ - KH40000_PMON_CTL_TRESH_MASK) + KH40000_PMON_CTL_THRESH_MASK) /* KH40000 LLC register*/ #define KH40000_LLC_MSR_PMON_CTL0 0x1660 @@ -142,76 +140,94 @@ static int kh40000_pcibus_limit[KH40000_MAX_SUBNODE_NUMBER]; KH40000_PMON_BOX_CTL_RST_CTRS | \ KH40000_PMON_PCI_BOX_PMON_EN) -/* KX8000 LLC register*/ -#define KX8000_LLC_MSR_PMON_CTL0 0x1979 -#define KX8000_LLC_MSR_PMON_CTR0 0x1975 -#define KX8000_LLC_MSR_PMON_BLK_CTL 0x197e - -/* KX8000 MESH register*/ -#define KX8000_MESH_MSR_PMON_CTL0 0x1983 -#define KX8000_MESH_MSR_PMON_CTR0 0x197f -#define KX8000_MESH_MSR_PMON_BLK_CTL 0x1987 - -/* KX8000 HOMESTOP register*/ -#define KX8000_HOMESTOP_MSR_PMON_CTL0 0x196a -#define KX8000_HOMESTOP_MSR_PMON_CTR0 0x1966 -#define KX8000_HOMESTOP_MSR_PMON_BLK_CTL 0x196e -#define KX8000_HOMESTOP_MSR_PMON_FIXED_CTR 0x1970 -#define KX8000_HOMESTOP_MSR_PMON_FIXED_CTL 0x1971 - -/* KX8000 CCDie ZDI_PL register*/ -#define KX8000_CCD_ZDI_PL_MSR_PMON_CTL0 0x1960 -#define KX8000_CCD_ZDI_PL_MSR_PMON_CTR0 0x195c -#define KX8000_CCD_ZDI_PL_MSR_PMON_BLK_CTL 0x1964 - -/* KX8000 cIODie ZDI_PL register*/ -#define KX8000_IOD_ZDI_PL_MSR_PMON_CTL0 0x1894 -#define KX8000_IOD_ZDI_PL_MSR_PMON_CTR0 0x1890 -#define KX8000_IOD_ZDI_PL_MSR_PMON_BLK_CTL 0x1898 -#define KX8000_IOD_ZDI_PL_MSR_PMON_FIXED_CTR 0x189A -#define KX8000_IOD_ZDI_PL_MSR_PMON_FIXED_CTL 0x189B - -/* KX8000 MC register*/ -#define KX8000_MC_A0_CHy_PMON_FIXED_CTL 0xe30 -#define KX8000_MC_A0_CHy_PMON_FIXED_CTR 0xe08 -#define KX8000_MC_A0_CHy_PMON_CTR0 0xe00 -#define KX8000_MC_A0_CHy_PMON_CTL0 0xe20 -#define KX8000_MC_A0_CHy_PMON_BLK_CTL 0xe34 - -#define KX8000_MC_A1_CHy_PMON_FIXED_CTL 0xe70 -#define KX8000_MC_A1_CHy_PMON_FIXED_CTR 0xe48 -#define KX8000_MC_A1_CHy_PMON_CTR0 0xe40 -#define KX8000_MC_A1_CHy_PMON_CTL0 0xe60 -#define KX8000_MC_A1_CHy_PMON_BLK_CTL 0xe74 - -#define KX8000_MC_B0_CHy_PMON_FIXED_CTL 0xeb0 -#define KX8000_MC_B0_CHy_PMON_FIXED_CTR 0xe88 -#define KX8000_MC_B0_CHy_PMON_CTR0 0xe80 -#define KX8000_MC_B0_CHy_PMON_CTL0 0xea0 -#define KX8000_MC_B0_CHy_PMON_BLK_CTL 0xeb4 - -#define KX8000_MC_B1_CHy_PMON_FIXED_CTL 0xef0 -#define KX8000_MC_B1_CHy_PMON_FIXED_CTR 0xec8 -#define KX8000_MC_B1_CHy_PMON_CTR0 0xec0 -#define KX8000_MC_B1_CHy_PMON_CTL0 0xee0 -#define KX8000_MC_B1_CHy_PMON_BLK_CTL 0xef4 - -#define KX8000_ZDI_DL_MMIO_PMON_CTR0 0xf00 -#define KX8000_ZDI_DL_MMIO_PMON_CTL0 0xf28 -#define KX8000_ZDI_DL_MMIO_PMON_BLK_CTL 0xf44 -#define KX8000_IOD_ZDI_DL_MMIO_BASE_OFFSET 0x168 -#define KX8000_CCD_ZDI_DL_MMIO_BASE_OFFSET 0x170 -#define KX8000_ZDI_DL_MMIO_BASE_MASK 0x3fff -#define KX8000_ZDI_DL_MMIO_BASE_MASK 0x3fff -#define KX8000_ZDI_DL_MMIO_MEM0_MASK 0xfffff000 -#define KX8000_ZDI_DL_MMIO_SIZE 0x1000 - - +/* KX7000 event control */ +#define KX7000_PMON_CTL_EV_SEL_MASK 0x000000ff +#define KX7000_PMON_CTL_UMASK_MASK 0x0000ff00 +#define KX7000_PMON_CTL_RST (1 << 17) +#define KX7000_PMON_CTL_EDGE_DET (1 << 18) +#define KX7000_PMON_CTL_LOGIC_OP0 (1 << 19) +#define KX7000_PMON_CTL_LOGIC_OP1 (1 << 21) +#define KX7000_PMON_CTL_EN (1 << 22) +#define KX7000_PMON_CTL_INVERT (1 << 23) +#define KX7000_PMON_CTL_THRESH_MASK 0xff000000 +#define KX7000_PMON_RAW_EVENT_MASK (KX7000_PMON_CTL_EV_SEL_MASK | \ + KX7000_PMON_CTL_UMASK_MASK | \ + KX7000_PMON_CTL_EDGE_DET | \ + KX7000_PMON_CTL_LOGIC_OP0 | \ + KX7000_PMON_CTL_LOGIC_OP1 | \ + KX7000_PMON_CTL_INVERT | \ + KX7000_PMON_CTL_THRESH_MASK) + +/* KX7000 LLC register*/ +#define KX7000_LLC_MSR_PMON_CTL0 0x1979 +#define KX7000_LLC_MSR_PMON_CTR0 0x1975 +#define KX7000_LLC_MSR_PMON_BLK_CTL 0x197e + +/* KX7000 MESH register*/ +#define KX7000_MESH_MSR_PMON_CTL0 0x1983 +#define KX7000_MESH_MSR_PMON_CTR0 0x197f +#define KX7000_MESH_MSR_PMON_BLK_CTL 0x1987 + +/* KX7000 HOMESTOP register*/ +#define KX7000_HOMESTOP_MSR_PMON_CTL0 0x196a +#define KX7000_HOMESTOP_MSR_PMON_CTR0 0x1966 +#define KX7000_HOMESTOP_MSR_PMON_BLK_CTL 0x196e +#define KX7000_HOMESTOP_MSR_PMON_FIXED_CTR 0x1970 +#define KX7000_HOMESTOP_MSR_PMON_FIXED_CTL 0x1971 + +/* KX7000 CCDie ZDI_PL register*/ +#define KX7000_CCD_ZDI_PL_MSR_PMON_CTL0 0x1960 +#define KX7000_CCD_ZDI_PL_MSR_PMON_CTR0 0x195c +#define KX7000_CCD_ZDI_PL_MSR_PMON_BLK_CTL 0x1964 + +/* KX7000 cIODie ZDI_PL register*/ +#define KX7000_IOD_ZDI_PL_MSR_PMON_CTL0 0x1894 +#define KX7000_IOD_ZDI_PL_MSR_PMON_CTR0 0x1890 +#define KX7000_IOD_ZDI_PL_MSR_PMON_BLK_CTL 0x1898 +#define KX7000_IOD_ZDI_PL_MSR_PMON_FIXED_CTR 0x189A +#define KX7000_IOD_ZDI_PL_MSR_PMON_FIXED_CTL 0x189B + +/* KX7000 MC register*/ +#define KX7000_MC_A0_CHy_PMON_FIXED_CTL 0xe30 +#define KX7000_MC_A0_CHy_PMON_FIXED_CTR 0xe08 +#define KX7000_MC_A0_CHy_PMON_CTR0 0xe00 +#define KX7000_MC_A0_CHy_PMON_CTL0 0xe20 +#define KX7000_MC_A0_CHy_PMON_BLK_CTL 0xe34 + +#define KX7000_MC_A1_CHy_PMON_FIXED_CTL 0xe70 +#define KX7000_MC_A1_CHy_PMON_FIXED_CTR 0xe48 +#define KX7000_MC_A1_CHy_PMON_CTR0 0xe40 +#define KX7000_MC_A1_CHy_PMON_CTL0 0xe60 +#define KX7000_MC_A1_CHy_PMON_BLK_CTL 0xe74 + +#define KX7000_MC_B0_CHy_PMON_FIXED_CTL 0xeb0 +#define KX7000_MC_B0_CHy_PMON_FIXED_CTR 0xe88 +#define KX7000_MC_B0_CHy_PMON_CTR0 0xe80 +#define KX7000_MC_B0_CHy_PMON_CTL0 0xea0 +#define KX7000_MC_B0_CHy_PMON_BLK_CTL 0xeb4 + +#define KX7000_MC_B1_CHy_PMON_FIXED_CTL 0xef0 +#define KX7000_MC_B1_CHy_PMON_FIXED_CTR 0xec8 +#define KX7000_MC_B1_CHy_PMON_CTR0 0xec0 +#define KX7000_MC_B1_CHy_PMON_CTL0 0xee0 +#define KX7000_MC_B1_CHy_PMON_BLK_CTL 0xef4 + +#define KX7000_ZDI_DL_MMIO_PMON_CTR0 0xf00 +#define KX7000_ZDI_DL_MMIO_PMON_CTL0 0xf28 +#define KX7000_ZDI_DL_MMIO_PMON_BLK_CTL 0xf44 +#define KX7000_IOD_ZDI_DL_MMIO_BASE_OFFSET 0x168 +#define KX7000_CCD_ZDI_DL_MMIO_BASE_OFFSET 0x170 +#define KX7000_ZDI_DL_MMIO_BASE_MASK 0x3fff +#define KX7000_ZDI_DL_MMIO_BASE_MASK 0x3fff +#define KX7000_ZDI_DL_MMIO_MEM0_MASK 0xfffff000 +#define KX7000_ZDI_DL_MMIO_SIZE 0x1000 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); +DEFINE_UNCORE_FORMAT_ATTR(logic_op0, logic_op0, "config:19"); +DEFINE_UNCORE_FORMAT_ATTR(logic_op1, logic_op1, "config:21"); DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); DEFINE_UNCORE_FORMAT_ATTR(cmask3, cmask, "config:24-26"); DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31"); @@ -382,24 +398,24 @@ DEFINE_PER_CPU(cpumask_t, zx_subnode_core_bits); static void zx_gen_core_map(void) { - int i, nr, cpu; + int cpu, i; int cluster_id, subnode_id; for_each_present_cpu(cpu) { cluster_id = zx_topology_cluster_id(cpu); - for (i = 0; i < 4; i++) { - nr = (cluster_id << 2) + i; - cpumask_set_cpu(nr, &per_cpu(zx_cluster_core_bits, cpu)); + for_each_present_cpu(i) { + if (zx_topology_cluster_id(i) == cluster_id) + cpumask_set_cpu(i, &per_cpu(zx_cluster_core_bits, cpu)); } } for_each_present_cpu(cpu) { subnode_id = zx_topology_subnode_id(cpu); - for (i = 0; i < 8; i++) { - nr = (subnode_id << 3) + i; - cpumask_set_cpu(nr, &per_cpu(zx_subnode_core_bits, cpu)); + for_each_present_cpu(i) { + if (zx_topology_subnode_id(i) == subnode_id) + cpumask_set_cpu(i, &per_cpu(zx_subnode_core_bits, cpu)); } } } @@ -452,8 +468,8 @@ static u64 uncore_msr_read_counter(struct zhaoxin_uncore_box *box, struct perf_e return count; } -static void uncore_assign_hw_event(struct zhaoxin_uncore_box *box, - struct perf_event *event, int idx) +static void uncore_assign_hw_event(struct zhaoxin_uncore_box *box, struct perf_event *event, + int idx) { struct hw_perf_event *hwc = &event->hw; @@ -495,7 +511,7 @@ void uncore_perf_event_update(struct zhaoxin_uncore_box *box, struct perf_event /*KX5000/KX6000 uncore ops start*/ static void kx5000_uncore_msr_disable_event(struct zhaoxin_uncore_box *box, - struct perf_event *event) + struct perf_event *event) { wrmsrl(event->hw.config_base, 0); } @@ -511,8 +527,7 @@ static void kx5000_uncore_msr_enable_box(struct zhaoxin_uncore_box *box) KX5000_UNC_GLOBAL_CTL_EN_PC_ALL | KX5000_UNC_GLOBAL_CTL_EN_FC); } -static void kx5000_uncore_msr_enable_event(struct zhaoxin_uncore_box *box, - struct perf_event *event) +static void kx5000_uncore_msr_enable_event(struct zhaoxin_uncore_box *box, struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; @@ -550,7 +565,7 @@ static struct zhaoxin_uncore_ops kx5000_uncore_msr_ops = { static struct zhaoxin_uncore_type kx5000_uncore_box = { .name = "", - .num_counters = 4, + .num_counters = 4, .num_boxes = 1, .perf_ctr_bits = 48, .fixed_ctr_bits = 48, @@ -572,7 +587,7 @@ static struct zhaoxin_uncore_type *kx5000_msr_uncores[] = { /*KH40000 msr ops start*/ static void kh40000_uncore_msr_disable_event(struct zhaoxin_uncore_box *box, - struct perf_event *event) + struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; @@ -580,7 +595,7 @@ static void kh40000_uncore_msr_disable_event(struct zhaoxin_uncore_box *box, } static void kh40000_uncore_msr_enable_event(struct zhaoxin_uncore_box *box, - struct perf_event *event) + struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; @@ -650,57 +665,57 @@ static struct uncore_event_desc kh40000_uncore_zzi_box_events[] = { }; static struct zhaoxin_uncore_ops kh40000_uncore_msr_ops = { - .init_box = kh40000_uncore_msr_init_box, - .disable_box = kh40000_uncore_msr_disable_box, - .enable_box = kh40000_uncore_msr_enable_box, - .disable_event = kh40000_uncore_msr_disable_event, - .enable_event = kh40000_uncore_msr_enable_event, - .read_counter = uncore_msr_read_counter, + .init_box = kh40000_uncore_msr_init_box, + .disable_box = kh40000_uncore_msr_disable_box, + .enable_box = kh40000_uncore_msr_enable_box, + .disable_event = kh40000_uncore_msr_disable_event, + .enable_event = kh40000_uncore_msr_enable_event, + .read_counter = uncore_msr_read_counter, }; static struct zhaoxin_uncore_type kh40000_uncore_llc_box = { - .name = "llc", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .event_ctl = KH40000_LLC_MSR_PMON_CTL0, - .perf_ctr = KH40000_LLC_MSR_PMON_CTR0, - .event_mask = KH40000_PMON_RAW_EVENT_MASK, - .box_ctl = KH40000_LLC_MSR_PMON_BLK_CTL, - .event_descs = kh40000_uncore_llc_box_events, - .ops = &kh40000_uncore_msr_ops, - .format_group = &kh40000_uncore_format_group, + .name = "llc", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_ctl = KH40000_LLC_MSR_PMON_CTL0, + .perf_ctr = KH40000_LLC_MSR_PMON_CTR0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_LLC_MSR_PMON_BLK_CTL, + .event_descs = kh40000_uncore_llc_box_events, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, }; static struct zhaoxin_uncore_type kh40000_uncore_hif_box = { - .name = "hif", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .fixed_ctr_bits = 48, - .event_ctl = KH40000_HIF_MSR_PMON_CTL0, - .perf_ctr = KH40000_HIF_MSR_PMON_CTR0, - .fixed_ctr = KH40000_HIF_MSR_PMON_FIXED_CTR, - .fixed_ctl = KH40000_HIF_MSR_PMON_FIXED_CTL, - .event_mask = KH40000_PMON_RAW_EVENT_MASK, - .box_ctl = KH40000_HIF_MSR_PMON_BLK_CTL, - .event_descs = kh40000_uncore_hif_box_events, - .ops = &kh40000_uncore_msr_ops, - .format_group = &kh40000_uncore_format_group, + .name = "hif", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KH40000_HIF_MSR_PMON_CTL0, + .perf_ctr = KH40000_HIF_MSR_PMON_CTR0, + .fixed_ctr = KH40000_HIF_MSR_PMON_FIXED_CTR, + .fixed_ctl = KH40000_HIF_MSR_PMON_FIXED_CTL, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_HIF_MSR_PMON_BLK_CTL, + .event_descs = kh40000_uncore_hif_box_events, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, }; static struct zhaoxin_uncore_type kh40000_uncore_zzi_box = { - .name = "zzi", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .event_ctl = KH40000_ZZI_MSR_PMON_CTL0, - .perf_ctr = KH40000_ZZI_MSR_PMON_CTR0, - .event_mask = KH40000_PMON_RAW_EVENT_MASK, - .box_ctl = KH40000_ZZI_MSR_PMON_BLK_CTL, - .event_descs = kh40000_uncore_zzi_box_events, - .ops = &kh40000_uncore_msr_ops, - .format_group = &kh40000_uncore_format_group, + .name = "zzi", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_ctl = KH40000_ZZI_MSR_PMON_CTL0, + .perf_ctr = KH40000_ZZI_MSR_PMON_CTR0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_ZZI_MSR_PMON_BLK_CTL, + .event_descs = kh40000_uncore_zzi_box_events, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, }; static struct zhaoxin_uncore_type *kh40000_msr_uncores[] = { @@ -713,7 +728,7 @@ static struct zhaoxin_uncore_type *kh40000_msr_uncores[] = { /*KH40000 pci ops start*/ static void kh40000_uncore_pci_disable_event(struct zhaoxin_uncore_box *box, - struct perf_event *event) + struct perf_event *event) { struct pci_dev *pdev = box->pci_dev; struct hw_perf_event *hwc = &event->hw; @@ -722,7 +737,7 @@ static void kh40000_uncore_pci_disable_event(struct zhaoxin_uncore_box *box, } static void kh40000_uncore_pci_enable_event(struct zhaoxin_uncore_box *box, - struct perf_event *event) + struct perf_event *event) { struct pci_dev *pdev = box->pci_dev; struct hw_perf_event *hwc = &event->hw; @@ -754,8 +769,7 @@ static void kh40000_uncore_pci_enable_box(struct zhaoxin_uncore_box *box) } } -static u64 kh40000_uncore_pci_read_counter(struct zhaoxin_uncore_box *box, - struct perf_event *event) +static u64 kh40000_uncore_pci_read_counter(struct zhaoxin_uncore_box *box, struct perf_event *event) { struct pci_dev *pdev = box->pci_dev; struct hw_perf_event *hwc = &event->hw; @@ -796,102 +810,102 @@ static struct uncore_event_desc kh40000_uncore_pxptrf_events[] = { }; static struct zhaoxin_uncore_ops kh40000_uncore_pci_ops = { - .init_box = kh40000_uncore_pci_init_box, - .disable_box = kh40000_uncore_pci_disable_box, - .enable_box = kh40000_uncore_pci_enable_box, - .disable_event = kh40000_uncore_pci_disable_event, - .enable_event = kh40000_uncore_pci_enable_event, - .read_counter = kh40000_uncore_pci_read_counter + .init_box = kh40000_uncore_pci_init_box, + .disable_box = kh40000_uncore_pci_disable_box, + .enable_box = kh40000_uncore_pci_enable_box, + .disable_event = kh40000_uncore_pci_disable_event, + .enable_event = kh40000_uncore_pci_enable_event, + .read_counter = kh40000_uncore_pci_read_counter }; static struct zhaoxin_uncore_type kh40000_uncore_mc0 = { - .name = "mc0", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .fixed_ctr_bits = 48, - .fixed_ctr = KH40000_MC0_CHy_PMON_FIXED_CTR, - .fixed_ctl = KH40000_MC0_CHy_PMON_FIXED_CTL, - .event_descs = kh40000_uncore_imc_events, - .perf_ctr = KH40000_MC0_CHy_PMON_CTR0, - .event_ctl = KH40000_MC0_CHy_PMON_CTL0, - .event_mask = KH40000_PMON_RAW_EVENT_MASK, - .box_ctl = KH40000_MC0_CHy_PMON_BLK_CTL, - .ops = &kh40000_uncore_pci_ops, - .format_group = &kh40000_uncore_format_group + .name = "mc0", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KH40000_MC0_CHy_PMON_FIXED_CTR, + .fixed_ctl = KH40000_MC0_CHy_PMON_FIXED_CTL, + .event_descs = kh40000_uncore_imc_events, + .perf_ctr = KH40000_MC0_CHy_PMON_CTR0, + .event_ctl = KH40000_MC0_CHy_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_MC0_CHy_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group }; static struct zhaoxin_uncore_type kh40000_uncore_mc1 = { - .name = "mc1", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .fixed_ctr_bits = 48, - .fixed_ctr = KH40000_MC1_CHy_PMON_FIXED_CTR, - .fixed_ctl = KH40000_MC1_CHy_PMON_FIXED_CTL, - .event_descs = kh40000_uncore_imc_events, - .perf_ctr = KH40000_MC1_CHy_PMON_CTR0, - .event_ctl = KH40000_MC1_CHy_PMON_CTL0, - .event_mask = KH40000_PMON_RAW_EVENT_MASK, - .box_ctl = KH40000_MC1_CHy_PMON_BLK_CTL, - .ops = &kh40000_uncore_pci_ops, - .format_group = &kh40000_uncore_format_group + .name = "mc1", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KH40000_MC1_CHy_PMON_FIXED_CTR, + .fixed_ctl = KH40000_MC1_CHy_PMON_FIXED_CTL, + .event_descs = kh40000_uncore_imc_events, + .perf_ctr = KH40000_MC1_CHy_PMON_CTR0, + .event_ctl = KH40000_MC1_CHy_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_MC1_CHy_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group }; static struct zhaoxin_uncore_type kh40000_uncore_pci = { - .name = "pci", - .num_counters = 4, - .num_boxes = 10, - .perf_ctr_bits = 48, - .event_descs = kh40000_uncore_pci_events, - .perf_ctr = KH40000_PCI_PMON_CTR0, - .event_ctl = KH40000_PCI_PMON_CTL0, - .event_mask = KH40000_PMON_RAW_EVENT_MASK, - .box_ctl = KH40000_PCI_PMON_BLK_CTL, - .ops = &kh40000_uncore_pci_ops, - .format_group = &kh40000_uncore_format_group + .name = "pci", + .num_counters = 4, + .num_boxes = 10, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_pci_events, + .perf_ctr = KH40000_PCI_PMON_CTR0, + .event_ctl = KH40000_PCI_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_PCI_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group }; static struct zhaoxin_uncore_type kh40000_uncore_zpi_dll = { - .name = "zpi_dll", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .event_descs = kh40000_uncore_zpi_dll_events, - .perf_ctr = KH40000_ZPI_DLL_PMON_CTR0, - .event_ctl = KH40000_ZPI_DLL_PMON_CTL0, - .event_mask = KH40000_PMON_RAW_EVENT_MASK, - .box_ctl = KH40000_ZPI_DLL_PMON_BLK_CTL, - .ops = &kh40000_uncore_pci_ops, - .format_group = &kh40000_uncore_format_group + .name = "zpi_dll", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_zpi_dll_events, + .perf_ctr = KH40000_ZPI_DLL_PMON_CTR0, + .event_ctl = KH40000_ZPI_DLL_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_ZPI_DLL_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group }; static struct zhaoxin_uncore_type kh40000_uncore_zdi_dll = { - .name = "zdi_dll", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .event_descs = kh40000_uncore_zdi_dll_events, - .perf_ctr = KH40000_ZDI_DLL_PMON_CTR0, - .event_ctl = KH40000_ZDI_DLL_PMON_CTL0, - .event_mask = KH40000_PMON_RAW_EVENT_MASK, - .box_ctl = KH40000_ZDI_DLL_PMON_BLK_CTL, - .ops = &kh40000_uncore_pci_ops, - .format_group = &kh40000_uncore_format_group + .name = "zdi_dll", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_zdi_dll_events, + .perf_ctr = KH40000_ZDI_DLL_PMON_CTR0, + .event_ctl = KH40000_ZDI_DLL_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_ZDI_DLL_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group }; static struct zhaoxin_uncore_type kh40000_uncore_pxptrf = { - .name = "pxptrf", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .event_descs = kh40000_uncore_pxptrf_events, - .perf_ctr = KH40000_PXPTRF_PMON_CTR0, - .event_ctl = KH40000_PXPTRF_PMON_CTL0, - .event_mask = KH40000_PMON_RAW_EVENT_MASK, - .box_ctl = KH40000_PXPTRF_PMON_BLK_CTL, - .ops = &kh40000_uncore_pci_ops, - .format_group = &kh40000_uncore_format_group + .name = "pxptrf", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_pxptrf_events, + .perf_ctr = KH40000_PXPTRF_PMON_CTR0, + .event_ctl = KH40000_PXPTRF_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_PXPTRF_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group }; enum { @@ -904,12 +918,12 @@ enum { }; static struct zhaoxin_uncore_type *kh40000_pci_uncores[] = { - [KH40000_PCI_UNCORE_MC0] = &kh40000_uncore_mc0, - [KH40000_PCI_UNCORE_MC1] = &kh40000_uncore_mc1, - [KH40000_PCI_UNCORE_PCI] = &kh40000_uncore_pci, - [KH40000_PCI_UNCORE_ZPI_DLL] = &kh40000_uncore_zpi_dll, - [KH40000_PCI_UNCORE_ZDI_DLL] = &kh40000_uncore_zdi_dll, - [KH40000_PCI_UNCORE_PXPTRF] = &kh40000_uncore_pxptrf, + [KH40000_PCI_UNCORE_MC0] = &kh40000_uncore_mc0, + [KH40000_PCI_UNCORE_MC1] = &kh40000_uncore_mc1, + [KH40000_PCI_UNCORE_PCI] = &kh40000_uncore_pci, + [KH40000_PCI_UNCORE_ZPI_DLL] = &kh40000_uncore_zpi_dll, + [KH40000_PCI_UNCORE_ZDI_DLL] = &kh40000_uncore_zdi_dll, + [KH40000_PCI_UNCORE_PXPTRF] = &kh40000_uncore_pxptrf, NULL, }; @@ -988,337 +1002,379 @@ static const struct pci_device_id kh40000_uncore_pci_ids[] = { }; static struct pci_driver kh40000_uncore_pci_driver = { - .name = "kh40000_uncore", - .id_table = kh40000_uncore_pci_ids, + .name = "kh40000_uncore", + .id_table = kh40000_uncore_pci_ids, }; /*KH40000 pci ops end*/ - -/*KX8000 msr ops start*/ -static unsigned int kx8000_uncore_msr_offsets[] = { +/*KX7000 msr ops start*/ +static unsigned int kx7000_uncore_msr_offsets[] = { 0x0, 0x13, 0x27, 0x3b, 0x4f, 0x63, 0x77, 0x8b }; -static struct zhaoxin_uncore_type kx8000_uncore_mesh_box = { - .name = "mesh", - .num_counters = 4, - .num_boxes = 8, - .perf_ctr_bits = 48, - .event_ctl = KX8000_MESH_MSR_PMON_CTL0, - .perf_ctr = KX8000_MESH_MSR_PMON_CTR0, - .event_mask = KH40000_PMON_RAW_EVENT_MASK, - .box_ctl = KX8000_MESH_MSR_PMON_BLK_CTL, - .msr_offsets = kx8000_uncore_msr_offsets, - .ops = &kh40000_uncore_msr_ops, - .format_group = &kh40000_uncore_format_group, -}; - -static struct zhaoxin_uncore_type kx8000_uncore_llc_box = { - .name = "llc", - .num_counters = 4, - .num_boxes = 8, - .perf_ctr_bits = 48, - .event_ctl = KX8000_LLC_MSR_PMON_CTL0, - .perf_ctr = KX8000_LLC_MSR_PMON_CTR0, - .event_mask = KH40000_PMON_RAW_EVENT_MASK, - .box_ctl = KX8000_LLC_MSR_PMON_BLK_CTL, - .msr_offsets = kx8000_uncore_msr_offsets, - .ops = &kh40000_uncore_msr_ops, - .format_group = &kh40000_uncore_format_group, -}; - -static struct zhaoxin_uncore_type kx8000_uncore_homestop = { - .name = "homestop", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .fixed_ctr_bits = 48, - .event_ctl = KX8000_HOMESTOP_MSR_PMON_CTL0, - .perf_ctr = KX8000_HOMESTOP_MSR_PMON_CTR0, - .fixed_ctr = KX8000_HOMESTOP_MSR_PMON_FIXED_CTR, - .fixed_ctl = KX8000_HOMESTOP_MSR_PMON_FIXED_CTL, - .event_mask = KH40000_PMON_RAW_EVENT_MASK, - .box_ctl = KX8000_HOMESTOP_MSR_PMON_BLK_CTL, - .ops = &kh40000_uncore_msr_ops, - .format_group = &kh40000_uncore_format_group, -}; - -static struct zhaoxin_uncore_type kx8000_uncore_ccd_zdi_pl = { - .name = "ccd_zdi_pl", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .fixed_ctr_bits = 48, - .event_ctl = KX8000_CCD_ZDI_PL_MSR_PMON_CTL0, - .perf_ctr = KX8000_CCD_ZDI_PL_MSR_PMON_CTR0, - .event_mask = KH40000_PMON_RAW_EVENT_MASK, - .box_ctl = KX8000_CCD_ZDI_PL_MSR_PMON_BLK_CTL, - .ops = &kh40000_uncore_msr_ops, - .format_group = &kh40000_uncore_format_group, -}; - -static struct zhaoxin_uncore_type kx8000_uncore_iod_zdi_pl = { - .name = "iod_zdi_pl", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .fixed_ctr_bits = 48, - .event_ctl = KX8000_IOD_ZDI_PL_MSR_PMON_CTL0, - .perf_ctr = KX8000_IOD_ZDI_PL_MSR_PMON_CTR0, - .fixed_ctr = KX8000_IOD_ZDI_PL_MSR_PMON_FIXED_CTR, - .fixed_ctl = KX8000_IOD_ZDI_PL_MSR_PMON_FIXED_CTL, - .event_mask = KH40000_PMON_RAW_EVENT_MASK, - .box_ctl = KX8000_IOD_ZDI_PL_MSR_PMON_BLK_CTL, - .ops = &kh40000_uncore_msr_ops, - .format_group = &kh40000_uncore_format_group, -}; - - -static struct zhaoxin_uncore_type *kx8000_msr_uncores[] = { - &kx8000_uncore_llc_box, - &kx8000_uncore_mesh_box, - &kh40000_uncore_hif_box, - &kx8000_uncore_homestop, - &kx8000_uncore_ccd_zdi_pl, - &kx8000_uncore_iod_zdi_pl, +static struct attribute *kx7000_uncore_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_logic_op0.attr, + &format_attr_logic_op1.attr, + &format_attr_inv.attr, + &format_attr_thresh8.attr, NULL, }; -/*KX8000 msr ops end*/ -/*KX8000 pci ops start*/ -static unsigned int kx8000_mc_ctr_lh_offsets[] = { +static struct attribute_group kx7000_uncore_format_group = { + .name = "format", + .attrs = kx7000_uncore_formats_attr, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_mesh_box = { + .name = "mesh", + .num_counters = 4, + .num_boxes = 8, + .perf_ctr_bits = 48, + .event_ctl = KX7000_MESH_MSR_PMON_CTL0, + .perf_ctr = KX7000_MESH_MSR_PMON_CTR0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_MESH_MSR_PMON_BLK_CTL, + .msr_offsets = kx7000_uncore_msr_offsets, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_llc_box = { + .name = "llc", + .num_counters = 4, + .num_boxes = 8, + .perf_ctr_bits = 48, + .event_ctl = KX7000_LLC_MSR_PMON_CTL0, + .perf_ctr = KX7000_LLC_MSR_PMON_CTR0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_LLC_MSR_PMON_BLK_CTL, + .msr_offsets = kx7000_uncore_msr_offsets, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_hif_box = { + .name = "hif", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KH40000_HIF_MSR_PMON_CTL0, + .perf_ctr = KH40000_HIF_MSR_PMON_CTR0, + .fixed_ctr = KH40000_HIF_MSR_PMON_FIXED_CTR, + .fixed_ctl = KH40000_HIF_MSR_PMON_FIXED_CTL, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_HIF_MSR_PMON_BLK_CTL, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_homestop = { + .name = "homestop", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KX7000_HOMESTOP_MSR_PMON_CTL0, + .perf_ctr = KX7000_HOMESTOP_MSR_PMON_CTR0, + .fixed_ctr = KX7000_HOMESTOP_MSR_PMON_FIXED_CTR, + .fixed_ctl = KX7000_HOMESTOP_MSR_PMON_FIXED_CTL, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_HOMESTOP_MSR_PMON_BLK_CTL, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_ccd_zdi_pl = { + .name = "ccd_zdi_pl", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KX7000_CCD_ZDI_PL_MSR_PMON_CTL0, + .perf_ctr = KX7000_CCD_ZDI_PL_MSR_PMON_CTR0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_CCD_ZDI_PL_MSR_PMON_BLK_CTL, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_iod_zdi_pl = { + .name = "iod_zdi_pl", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KX7000_IOD_ZDI_PL_MSR_PMON_CTL0, + .perf_ctr = KX7000_IOD_ZDI_PL_MSR_PMON_CTR0, + .fixed_ctr = KX7000_IOD_ZDI_PL_MSR_PMON_FIXED_CTR, + .fixed_ctl = KX7000_IOD_ZDI_PL_MSR_PMON_FIXED_CTL, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_IOD_ZDI_PL_MSR_PMON_BLK_CTL, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kx7000_uncore_format_group, +}; + + +static struct zhaoxin_uncore_type *kx7000_msr_uncores[] = { + &kx7000_uncore_llc_box, + &kx7000_uncore_mesh_box, + &kx7000_uncore_hif_box, + &kx7000_uncore_homestop, + &kx7000_uncore_ccd_zdi_pl, + &kx7000_uncore_iod_zdi_pl, + NULL, +}; +/*KX7000 msr ops end*/ + +/*KX7000 pci ops start*/ +static unsigned int kx7000_mc_ctr_lh_offsets[] = { 0xc, 0xe, 0x10, 0x12, 0x14 }; -static u64 kx8000_uncore_pci_mc_read_counter(struct zhaoxin_uncore_box *box, - struct perf_event *event) +static u64 kx7000_uncore_pci_mc_read_counter(struct zhaoxin_uncore_box *box, + struct perf_event *event) { struct pci_dev *pdev = box->pci_dev; struct hw_perf_event *hwc = &event->hw; u64 count = 0; pci_read_config_word(pdev, hwc->event_base, (u16 *)&count + 3); - pci_read_config_dword(pdev, hwc->event_base + kx8000_mc_ctr_lh_offsets[hwc->idx], - (u32 *)&count); + pci_read_config_dword(pdev, hwc->event_base + kx7000_mc_ctr_lh_offsets[hwc->idx], + (u32 *)&count); return count; } -static struct zhaoxin_uncore_ops kx8000_uncore_pci_mc_ops = { - .init_box = kh40000_uncore_pci_init_box, - .disable_box = kh40000_uncore_pci_disable_box, - .enable_box = kh40000_uncore_pci_enable_box, - .disable_event = kh40000_uncore_pci_disable_event, - .enable_event = kh40000_uncore_pci_enable_event, - .read_counter = kx8000_uncore_pci_mc_read_counter +static struct zhaoxin_uncore_ops kx7000_uncore_pci_mc_ops = { + .init_box = kh40000_uncore_pci_init_box, + .disable_box = kh40000_uncore_pci_disable_box, + .enable_box = kh40000_uncore_pci_enable_box, + .disable_event = kh40000_uncore_pci_disable_event, + .enable_event = kh40000_uncore_pci_enable_event, + .read_counter = kx7000_uncore_pci_mc_read_counter }; -static struct zhaoxin_uncore_type kx8000_uncore_mc_a0 = { - .name = "mc_a0", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .fixed_ctr_bits = 48, - .fixed_ctr = KX8000_MC_A0_CHy_PMON_FIXED_CTR, - .fixed_ctl = KX8000_MC_A0_CHy_PMON_FIXED_CTL, - .perf_ctr = KX8000_MC_A0_CHy_PMON_CTR0, - .event_ctl = KX8000_MC_A0_CHy_PMON_CTL0, - .event_mask = KH40000_PMON_RAW_EVENT_MASK, - .box_ctl = KX8000_MC_A0_CHy_PMON_BLK_CTL, - .ops = &kx8000_uncore_pci_mc_ops, - .format_group = &kh40000_uncore_format_group -}; - -static struct zhaoxin_uncore_type kx8000_uncore_mc_a1 = { - .name = "mc_a1", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .fixed_ctr_bits = 48, - .fixed_ctr = KX8000_MC_A1_CHy_PMON_FIXED_CTR, - .fixed_ctl = KX8000_MC_A1_CHy_PMON_FIXED_CTL, - .perf_ctr = KX8000_MC_A1_CHy_PMON_CTR0, - .event_ctl = KX8000_MC_A1_CHy_PMON_CTL0, - .event_mask = KH40000_PMON_RAW_EVENT_MASK, - .box_ctl = KX8000_MC_A1_CHy_PMON_BLK_CTL, - .ops = &kx8000_uncore_pci_mc_ops, - .format_group = &kh40000_uncore_format_group -}; - -static struct zhaoxin_uncore_type kx8000_uncore_mc_b0 = { - .name = "mc_b0", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .fixed_ctr_bits = 48, - .fixed_ctr = KX8000_MC_B0_CHy_PMON_FIXED_CTR, - .fixed_ctl = KX8000_MC_B0_CHy_PMON_FIXED_CTL, - .perf_ctr = KX8000_MC_B0_CHy_PMON_CTR0, - .event_ctl = KX8000_MC_B0_CHy_PMON_CTL0, - .event_mask = KH40000_PMON_RAW_EVENT_MASK, - .box_ctl = KX8000_MC_B0_CHy_PMON_BLK_CTL, - .ops = &kx8000_uncore_pci_mc_ops, - .format_group = &kh40000_uncore_format_group -}; - -static struct zhaoxin_uncore_type kx8000_uncore_mc_b1 = { - .name = "mc_b1", - .num_counters = 4, - .num_boxes = 1, - .perf_ctr_bits = 48, - .fixed_ctr_bits = 48, - .fixed_ctr = KX8000_MC_B1_CHy_PMON_FIXED_CTR, - .fixed_ctl = KX8000_MC_B1_CHy_PMON_FIXED_CTL, - .perf_ctr = KX8000_MC_B1_CHy_PMON_CTR0, - .event_ctl = KX8000_MC_B1_CHy_PMON_CTL0, - .event_mask = KH40000_PMON_RAW_EVENT_MASK, - .box_ctl = KX8000_MC_B1_CHy_PMON_BLK_CTL, - .ops = &kx8000_uncore_pci_mc_ops, - .format_group = &kh40000_uncore_format_group -}; - -static struct zhaoxin_uncore_type kx8000_uncore_pci = { - .name = "pci", - .num_counters = 4, - .num_boxes = 17, - .perf_ctr_bits = 48, - .event_descs = kh40000_uncore_pci_events, - .perf_ctr = KH40000_PCI_PMON_CTR0, - .event_ctl = KH40000_PCI_PMON_CTL0, - .event_mask = KH40000_PMON_RAW_EVENT_MASK, - .box_ctl = KH40000_PCI_PMON_BLK_CTL, - .ops = &kh40000_uncore_pci_ops, - .format_group = &kh40000_uncore_format_group +static struct zhaoxin_uncore_type kx7000_uncore_mc_a0 = { + .name = "mc_a0", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KX7000_MC_A0_CHy_PMON_FIXED_CTR, + .fixed_ctl = KX7000_MC_A0_CHy_PMON_FIXED_CTL, + .perf_ctr = KX7000_MC_A0_CHy_PMON_CTR0, + .event_ctl = KX7000_MC_A0_CHy_PMON_CTL0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_MC_A0_CHy_PMON_BLK_CTL, + .ops = &kx7000_uncore_pci_mc_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_mc_a1 = { + .name = "mc_a1", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KX7000_MC_A1_CHy_PMON_FIXED_CTR, + .fixed_ctl = KX7000_MC_A1_CHy_PMON_FIXED_CTL, + .perf_ctr = KX7000_MC_A1_CHy_PMON_CTR0, + .event_ctl = KX7000_MC_A1_CHy_PMON_CTL0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_MC_A1_CHy_PMON_BLK_CTL, + .ops = &kx7000_uncore_pci_mc_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_mc_b0 = { + .name = "mc_b0", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KX7000_MC_B0_CHy_PMON_FIXED_CTR, + .fixed_ctl = KX7000_MC_B0_CHy_PMON_FIXED_CTL, + .perf_ctr = KX7000_MC_B0_CHy_PMON_CTR0, + .event_ctl = KX7000_MC_B0_CHy_PMON_CTL0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_MC_B0_CHy_PMON_BLK_CTL, + .ops = &kx7000_uncore_pci_mc_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_mc_b1 = { + .name = "mc_b1", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KX7000_MC_B1_CHy_PMON_FIXED_CTR, + .fixed_ctl = KX7000_MC_B1_CHy_PMON_FIXED_CTL, + .perf_ctr = KX7000_MC_B1_CHy_PMON_CTR0, + .event_ctl = KX7000_MC_B1_CHy_PMON_CTL0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_MC_B1_CHy_PMON_BLK_CTL, + .ops = &kx7000_uncore_pci_mc_ops, + .format_group = &kx7000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx7000_uncore_pci = { + .name = "pci", + .num_counters = 4, + .num_boxes = 17, + .perf_ctr_bits = 48, + .perf_ctr = KH40000_PCI_PMON_CTR0, + .event_ctl = KH40000_PCI_PMON_CTL0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_PCI_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kx7000_uncore_format_group, }; +static struct zhaoxin_uncore_type kx7000_uncore_pxptrf = { + .name = "pxptrf", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_pxptrf_events, + .perf_ctr = KH40000_PXPTRF_PMON_CTR0, + .event_ctl = KH40000_PXPTRF_PMON_CTL0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_PXPTRF_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kx7000_uncore_format_group, +}; enum { - KX8000_PCI_UNCORE_MC_A0, - KX8000_PCI_UNCORE_MC_A1, - KX8000_PCI_UNCORE_MC_B0, - KX8000_PCI_UNCORE_MC_B1, - KX8000_PCI_UNCORE_PCI, - KX8000_PCI_UNCORE_PXPTRF, -}; - -static struct zhaoxin_uncore_type *kx8000_pci_uncores[] = { - [KX8000_PCI_UNCORE_MC_A0] = &kx8000_uncore_mc_a0, - [KX8000_PCI_UNCORE_MC_A1] = &kx8000_uncore_mc_a1, - [KX8000_PCI_UNCORE_MC_B0] = &kx8000_uncore_mc_b0, - [KX8000_PCI_UNCORE_MC_B1] = &kx8000_uncore_mc_b1, - [KX8000_PCI_UNCORE_PCI] = &kx8000_uncore_pci, - [KX8000_PCI_UNCORE_PXPTRF] = &kh40000_uncore_pxptrf, + KX7000_PCI_UNCORE_MC_A0, + KX7000_PCI_UNCORE_MC_A1, + KX7000_PCI_UNCORE_MC_B0, + KX7000_PCI_UNCORE_MC_B1, + KX7000_PCI_UNCORE_PCI, + KX7000_PCI_UNCORE_PXPTRF, +}; + +static struct zhaoxin_uncore_type *kx7000_pci_uncores[] = { + [KX7000_PCI_UNCORE_MC_A0] = &kx7000_uncore_mc_a0, + [KX7000_PCI_UNCORE_MC_A1] = &kx7000_uncore_mc_a1, + [KX7000_PCI_UNCORE_MC_B0] = &kx7000_uncore_mc_b0, + [KX7000_PCI_UNCORE_MC_B1] = &kx7000_uncore_mc_b1, + [KX7000_PCI_UNCORE_PCI] = &kx7000_uncore_pci, + [KX7000_PCI_UNCORE_PXPTRF] = &kx7000_uncore_pxptrf, NULL, }; -static const struct pci_device_id kx8000_uncore_pci_ids[] = { +static const struct pci_device_id kx7000_uncore_pci_ids[] = { { /* MC Channe A0/A1/B0/B1 */ PCI_DEVICE(0x1D17, 0x31B2), - .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_MC_A0, 0), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_MC_A0, 0), }, { /* PCIE D2F0 */ PCI_DEVICE(0x1D17, 0x0717), - .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 0), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 0), }, { /* PCIE D2F1 */ PCI_DEVICE(0x1D17, 0x0718), - .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 1), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 1), }, { /* PCIE D2F2 */ PCI_DEVICE(0x1D17, 0x0733), - .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 2), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 2), }, { /* PCIE D2F3 */ PCI_DEVICE(0x1D17, 0x0734), - .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 3), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 3), }, { /* PCIE D3F0 */ PCI_DEVICE(0x1D17, 0x0719), - .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 4), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 4), }, { /* PCIE D3F1 */ PCI_DEVICE(0x1D17, 0x0735), - .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 5), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 5), }, { /* PCIE D3F2 */ PCI_DEVICE(0x1D17, 0x0739), - .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 6), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 6), }, { /* PCIE D3F3 */ PCI_DEVICE(0x1D17, 0x073A), - .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 7), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 7), }, { /* PCIE D4F0 */ PCI_DEVICE(0x1D17, 0x071B), - .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 8), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 8), }, { /* PCIE D4F1 */ PCI_DEVICE(0x1D17, 0x071C), - .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 9), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 9), }, { /* PCIE D4F2 */ PCI_DEVICE(0x1D17, 0x0736), - .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 10), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 10), }, { /* PCIE D4F3 */ PCI_DEVICE(0x1D17, 0x0737), - .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 11), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 11), }, { /* PCIE D4F4 */ PCI_DEVICE(0x1D17, 0x0738), - .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 12), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 12), }, { /* PCIE D5F0 */ PCI_DEVICE(0x1D17, 0x071D), - .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 13), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 13), }, { /* PCIE D5F1 */ PCI_DEVICE(0x1D17, 0x071E), - .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 14), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 14), }, { /* PCIE D5F2 */ PCI_DEVICE(0x1D17, 0x0732), - .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 15), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 15), }, { /* PCIE D5F3 */ PCI_DEVICE(0x1D17, 0x073B), - .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 16), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 16), }, { /* PXPTRF */ PCI_DEVICE(0x1D17, 0x31B4), - .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PXPTRF, 0), + .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PXPTRF, 0), }, { /* end: all zeroes */ } }; - -static struct pci_driver kx8000_uncore_pci_driver = { - .name = "kx8000_uncore", - .id_table = kx8000_uncore_pci_ids, +static struct pci_driver kx7000_uncore_pci_driver = { + .name = "kx7000_uncore", + .id_table = kx7000_uncore_pci_ids, }; -/*KX8000 pci ops end*/ +/*KX7000 pci ops end*/ -/*KX8000 mmio ops start*/ -static void kx8000_uncore_mmio_init_box(struct zhaoxin_uncore_box *box) +/*KX7000 mmio ops start*/ +static void kx7000_uncore_mmio_init_box(struct zhaoxin_uncore_box *box) { struct pci_dev *pdev = NULL; unsigned int box_ctl = uncore_mmio_box_ctl(box); @@ -1331,24 +1387,24 @@ static void kx8000_uncore_mmio_init_box(struct zhaoxin_uncore_box *box) return; if (!strcmp(box->pmu->name, "iod_zdi_dl")) - mmio_base_offset = KX8000_IOD_ZDI_DL_MMIO_BASE_OFFSET; + mmio_base_offset = KX7000_IOD_ZDI_DL_MMIO_BASE_OFFSET; else - mmio_base_offset = KX8000_CCD_ZDI_DL_MMIO_BASE_OFFSET; + mmio_base_offset = KX7000_CCD_ZDI_DL_MMIO_BASE_OFFSET; pci_read_config_dword(pdev, mmio_base_offset, &pci_dword); - addr = (u64)(pci_dword & KX8000_ZDI_DL_MMIO_BASE_MASK) << 32; + addr = (u64)(pci_dword & KX7000_ZDI_DL_MMIO_BASE_MASK) << 32; pci_read_config_dword(pdev, mmio_base_offset + 4, &pci_dword); - addr |= pci_dword & KX8000_ZDI_DL_MMIO_MEM0_MASK; + addr |= pci_dword & KX7000_ZDI_DL_MMIO_MEM0_MASK; - box->io_addr = ioremap(addr, KX8000_ZDI_DL_MMIO_SIZE); + box->io_addr = ioremap(addr, KX7000_ZDI_DL_MMIO_SIZE); if (!box->io_addr) return; writel(KH40000_PMON_PCI_BOX_CTL_INT, box->io_addr + box_ctl); } -static void kx8000_uncore_mmio_disable_box(struct zhaoxin_uncore_box *box) +static void kx7000_uncore_mmio_disable_box(struct zhaoxin_uncore_box *box) { u32 config; unsigned int box_ctl = uncore_mmio_box_ctl(box); @@ -1361,7 +1417,7 @@ static void kx8000_uncore_mmio_disable_box(struct zhaoxin_uncore_box *box) writel(config, box->io_addr + box_ctl); } -static void kx8000_uncore_mmio_enable_box(struct zhaoxin_uncore_box *box) +static void kx7000_uncore_mmio_enable_box(struct zhaoxin_uncore_box *box) { u32 config; unsigned int box_ctl = uncore_mmio_box_ctl(box); @@ -1374,8 +1430,8 @@ static void kx8000_uncore_mmio_enable_box(struct zhaoxin_uncore_box *box) writel(config, box->io_addr + box_ctl); } -static void kx8000_uncore_mmio_enable_event(struct zhaoxin_uncore_box *box, - struct perf_event *event) +static void kx7000_uncore_mmio_enable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; @@ -1385,8 +1441,8 @@ static void kx8000_uncore_mmio_enable_event(struct zhaoxin_uncore_box *box, writel(hwc->config | KH40000_PMON_CTL_EN, box->io_addr + hwc->config_base); } -static void kx8000_uncore_mmio_disable_event(struct zhaoxin_uncore_box *box, - struct perf_event *event) +static void kx7000_uncore_mmio_disable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; @@ -1402,8 +1458,7 @@ static void uncore_mmio_exit_box(struct zhaoxin_uncore_box *box) iounmap(box->io_addr); } -static u64 uncore_mmio_read_counter(struct zhaoxin_uncore_box *box, - struct perf_event *event) +static u64 uncore_mmio_read_counter(struct zhaoxin_uncore_box *box, struct perf_event *event) { u64 count = 0; u64 count_low = 0; @@ -1419,54 +1474,51 @@ static u64 uncore_mmio_read_counter(struct zhaoxin_uncore_box *box, return count; } -static struct zhaoxin_uncore_ops kx8000_uncore_mmio_ops = { - .init_box = kx8000_uncore_mmio_init_box, +static struct zhaoxin_uncore_ops kx7000_uncore_mmio_ops = { + .init_box = kx7000_uncore_mmio_init_box, .exit_box = uncore_mmio_exit_box, - .disable_box = kx8000_uncore_mmio_disable_box, - .enable_box = kx8000_uncore_mmio_enable_box, - .disable_event = kx8000_uncore_mmio_disable_event, - .enable_event = kx8000_uncore_mmio_enable_event, + .disable_box = kx7000_uncore_mmio_disable_box, + .enable_box = kx7000_uncore_mmio_enable_box, + .disable_event = kx7000_uncore_mmio_disable_event, + .enable_event = kx7000_uncore_mmio_enable_event, .read_counter = uncore_mmio_read_counter, }; -static struct zhaoxin_uncore_type kx8000_uncore_iod_zdi_dl = { +static struct zhaoxin_uncore_type kx7000_uncore_iod_zdi_dl = { .name = "iod_zdi_dl", .num_counters = 4, .num_boxes = 1, .perf_ctr_bits = 48, .fixed_ctr_bits = 48, - .perf_ctr = KX8000_ZDI_DL_MMIO_PMON_CTR0, - .event_ctl = KX8000_ZDI_DL_MMIO_PMON_CTL0, - .event_mask = KH40000_PMON_RAW_EVENT_MASK, - .box_ctl = KX8000_ZDI_DL_MMIO_PMON_BLK_CTL, - .ops = &kx8000_uncore_mmio_ops, - .format_group = &kh40000_uncore_format_group, + .perf_ctr = KX7000_ZDI_DL_MMIO_PMON_CTR0, + .event_ctl = KX7000_ZDI_DL_MMIO_PMON_CTL0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_ZDI_DL_MMIO_PMON_BLK_CTL, + .ops = &kx7000_uncore_mmio_ops, + .format_group = &kx7000_uncore_format_group, }; -static struct zhaoxin_uncore_type kx8000_uncore_ccd_zdi_dl = { +static struct zhaoxin_uncore_type kx7000_uncore_ccd_zdi_dl = { .name = "ccd_zdi_dl", .num_counters = 4, .num_boxes = 1, .perf_ctr_bits = 48, .fixed_ctr_bits = 48, - .perf_ctr = KX8000_ZDI_DL_MMIO_PMON_CTR0, - .event_ctl = KX8000_ZDI_DL_MMIO_PMON_CTL0, - .event_mask = KH40000_PMON_RAW_EVENT_MASK, - .box_ctl = KX8000_ZDI_DL_MMIO_PMON_BLK_CTL, - .ops = &kx8000_uncore_mmio_ops, - .format_group = &kh40000_uncore_format_group, + .perf_ctr = KX7000_ZDI_DL_MMIO_PMON_CTR0, + .event_ctl = KX7000_ZDI_DL_MMIO_PMON_CTL0, + .event_mask = KX7000_PMON_RAW_EVENT_MASK, + .box_ctl = KX7000_ZDI_DL_MMIO_PMON_BLK_CTL, + .ops = &kx7000_uncore_mmio_ops, + .format_group = &kx7000_uncore_format_group, }; -static struct zhaoxin_uncore_type *kx8000_mmio_uncores[] = { - &kx8000_uncore_iod_zdi_dl, - &kx8000_uncore_ccd_zdi_dl, +static struct zhaoxin_uncore_type *kx7000_mmio_uncores[] = { + &kx7000_uncore_iod_zdi_dl, + &kx7000_uncore_ccd_zdi_dl, NULL, }; -/*KX8000 mmio ops end*/ - - - +/*KX7000 mmio ops end*/ static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer) { struct zhaoxin_uncore_box *box; @@ -1517,8 +1569,7 @@ static void uncore_pmu_init_hrtimer(struct zhaoxin_uncore_box *box) box->hrtimer.function = uncore_pmu_hrtimer; } -static struct zhaoxin_uncore_box *uncore_alloc_box(struct zhaoxin_uncore_type *type, - int node) +static struct zhaoxin_uncore_box *uncore_alloc_box(struct zhaoxin_uncore_type *type, int node) { int i, size, numshared = type->num_shared_regs; struct zhaoxin_uncore_box *box; @@ -1551,9 +1602,8 @@ static bool is_box_event(struct zhaoxin_uncore_box *box, struct perf_event *even return &box->pmu->pmu == event->pmu; } -static int -uncore_collect_events(struct zhaoxin_uncore_box *box, struct perf_event *leader, - bool dogrp) +static int uncore_collect_events(struct zhaoxin_uncore_box *box, struct perf_event *leader, + bool dogrp) { struct perf_event *event; int n, max_count; @@ -1589,8 +1639,8 @@ uncore_collect_events(struct zhaoxin_uncore_box *box, struct perf_event *leader, return n; } -static struct event_constraint * -uncore_get_event_constraint(struct zhaoxin_uncore_box *box, struct perf_event *event) +static struct event_constraint *uncore_get_event_constraint(struct zhaoxin_uncore_box *box, + struct perf_event *event) { struct zhaoxin_uncore_type *type = box->pmu->type; struct event_constraint *c; @@ -1614,8 +1664,7 @@ uncore_get_event_constraint(struct zhaoxin_uncore_box *box, struct perf_event *e return &type->unconstrainted; } -static void uncore_put_event_constraint(struct zhaoxin_uncore_box *box, - struct perf_event *event) +static void uncore_put_event_constraint(struct zhaoxin_uncore_box *box, struct perf_event *event) { if (box->pmu->type->ops->put_constraint) box->pmu->type->ops->put_constraint(box, event); @@ -1812,8 +1861,7 @@ static void uncore_pmu_event_read(struct perf_event *event) uncore_perf_event_update(box, event); } -static int uncore_validate_group(struct zhaoxin_uncore_pmu *pmu, - struct perf_event *event) +static int uncore_validate_group(struct zhaoxin_uncore_pmu *pmu, struct perf_event *event) { struct perf_event *leader = event->group_leader; struct zhaoxin_uncore_box *fake_box; @@ -1950,8 +1998,7 @@ static void uncore_pmu_disable(struct pmu *pmu) uncore_pmu->type->ops->disable_box(box); } -static ssize_t cpumask_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, char *buf) { cpumask_t *active_mask; struct pmu *pmu; @@ -1968,6 +2015,7 @@ static ssize_t cpumask_show(struct device *dev, } else { active_mask = &uncore_cpu_mask; } + return cpumap_print_to_pagebuf(true, buf, active_mask); } static DEVICE_ATTR_RO(cpumask); @@ -2018,6 +2066,7 @@ static int uncore_pmu_register(struct zhaoxin_uncore_pmu *pmu) ret = perf_pmu_register(&pmu->pmu, pmu->name, -1); if (!ret) pmu->registered = true; + return ret; } @@ -2136,8 +2185,7 @@ static int __init uncore_type_init(struct zhaoxin_uncore_type *type, bool setid) return -ENOMEM; } -static int __init -uncore_types_init(struct zhaoxin_uncore_type **types, bool setid) +static int __init uncore_types_init(struct zhaoxin_uncore_type **types, bool setid) { int ret; @@ -2173,7 +2221,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id strscpy(mc_dev, "mc0", sizeof("mc0")); if (!strcmp(type->name, mc_dev)) loop = 2; - } else if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KX8000) { + } else if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KX7000) { strscpy(mc_dev, "mc_a0", sizeof("mc_a0")); if (!strcmp(type->name, mc_dev)) loop = 4; @@ -2252,14 +2300,13 @@ static void uncore_pci_remove(struct pci_dev *pdev) loop = 2; else loop = 1; - } else if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KX8000) { + } else if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KX7000) { if (!strcmp(boxes[0]->pmu->type->name, "mc_a0")) loop = 4; else loop = 1; } - for (i = 0; i < loop; i++) { box = boxes[i]; pmu = box->pmu; @@ -2313,8 +2360,7 @@ static void uncore_pci_exit(void) } } -static void uncore_change_type_ctx(struct zhaoxin_uncore_type *type, int old_cpu, - int new_cpu) +static void uncore_change_type_ctx(struct zhaoxin_uncore_type *type, int old_cpu, int new_cpu) { struct zhaoxin_uncore_pmu *pmu = type->pmus; struct zhaoxin_uncore_box *box; @@ -2360,8 +2406,7 @@ static void uncore_change_type_ctx(struct zhaoxin_uncore_type *type, int old_cpu } } -static void uncore_change_context(struct zhaoxin_uncore_type **uncores, - int old_cpu, int new_cpu) +static void uncore_change_context(struct zhaoxin_uncore_type **uncores, int old_cpu, int new_cpu) { for (; *uncores; uncores++) uncore_change_type_ctx(*uncores, old_cpu, new_cpu); @@ -2467,7 +2512,6 @@ static void kh40000_event_cpu_offline(int cpu) } else { uncore_box_unref(uncore_msr_subnode_uncores, subnode_id); } - } static int uncore_event_cpu_offline(unsigned int cpu) @@ -2484,8 +2528,8 @@ static int uncore_event_cpu_offline(unsigned int cpu) return 0; } -static int kx5000_allocate_boxes(struct zhaoxin_uncore_type **types, - unsigned int id, unsigned int cpu) +static int kx5000_allocate_boxes(struct zhaoxin_uncore_type **types, unsigned int id, + unsigned int cpu) { struct zhaoxin_uncore_box *box, *tmp; struct zhaoxin_uncore_type *type; @@ -2525,8 +2569,8 @@ static int kx5000_allocate_boxes(struct zhaoxin_uncore_type **types, return -ENOMEM; } -static int kh40000_allocate_boxes(struct zhaoxin_uncore_type **types, - unsigned int id, unsigned int cpu) +static int kh40000_allocate_boxes(struct zhaoxin_uncore_type **types, unsigned int id, + unsigned int cpu) { struct zhaoxin_uncore_box *box, *tmp; struct zhaoxin_uncore_type *type; @@ -2568,8 +2612,7 @@ static int kh40000_allocate_boxes(struct zhaoxin_uncore_type **types, return -ENOMEM; } -static int uncore_box_ref(struct zhaoxin_uncore_type **types, - int id, unsigned int cpu) +static int uncore_box_ref(struct zhaoxin_uncore_type **types, int id, unsigned int cpu) { struct zhaoxin_uncore_type *type; struct zhaoxin_uncore_pmu *pmu; @@ -2789,39 +2832,49 @@ static const struct zhaoxin_uncore_init_fun kh40000_uncore_init __initconst = { .pci_init = kh40000_uncore_pci_init, }; -void kx8000_uncore_cpu_init(void) +void kx7000_uncore_cpu_init(void) { - uncore_msr_uncores = kx8000_msr_uncores; + u64 val; + int cpu; + + uncore_msr_uncores = kx7000_msr_uncores; + + /* clear bit 16 of MSR 0x1877 so that HIF can work normally */ + for_each_present_cpu(cpu) { + rdmsrl_on_cpu(cpu, 0x1877, &val); + val = val & 0xfffffffffffeffffULL; + wrmsrl_on_cpu(cpu, 0x1877, val); + } } -int kx8000_uncore_pci_init(void) +int kx7000_uncore_pci_init(void) { - uncore_pci_uncores = kx8000_pci_uncores; - uncore_pci_driver = &kx8000_uncore_pci_driver; + uncore_pci_uncores = kx7000_pci_uncores; + uncore_pci_driver = &kx7000_uncore_pci_driver; return 0; } -void kx8000_uncore_mmio_init(void) +void kx7000_uncore_mmio_init(void) { - uncore_mmio_uncores = kx8000_mmio_uncores; + uncore_mmio_uncores = kx7000_mmio_uncores; } -static const struct zhaoxin_uncore_init_fun kx8000_uncore_init __initconst = { - .cpu_init = kx8000_uncore_cpu_init, - .pci_init = kx8000_uncore_pci_init, - .mmio_init = kx8000_uncore_mmio_init, +static const struct zhaoxin_uncore_init_fun kx7000_uncore_init __initconst = { + .cpu_init = kx7000_uncore_cpu_init, + .pci_init = kx7000_uncore_pci_init, + .mmio_init = kx7000_uncore_mmio_init, }; static const struct x86_cpu_id zhaoxin_uncore_match[] __initconst = { X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, ZHAOXIN_FAM7_KX5000, &kx5000_uncore_init), X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, ZHAOXIN_FAM7_KX6000, &kx5000_uncore_init), X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, ZHAOXIN_FAM7_KH40000, &kh40000_uncore_init), - X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, ZHAOXIN_FAM7_KX8000, &kx8000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, ZHAOXIN_FAM7_KX7000, &kx7000_uncore_init), X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, ZHAOXIN_FAM7_KX5000, &kx5000_uncore_init), X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, ZHAOXIN_FAM7_KX6000, &kx5000_uncore_init), X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, ZHAOXIN_FAM7_KH40000, &kh40000_uncore_init), - X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, ZHAOXIN_FAM7_KX8000, &kx8000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, ZHAOXIN_FAM7_KX7000, &kx7000_uncore_init), {}, }; MODULE_DEVICE_TABLE(x86cpu, zhaoxin_uncore_match); diff --git a/arch/x86/events/zhaoxin/uncore.h b/arch/x86/events/zhaoxin/uncore.h index 5d09696f8bc7..43ea06364175 100644 --- a/arch/x86/events/zhaoxin/uncore.h +++ b/arch/x86/events/zhaoxin/uncore.h @@ -9,10 +9,8 @@ #define ZHAOXIN_FAM7_KX5000 0x1b #define ZHAOXIN_FAM7_KX6000 0x3b -#define ZHAOXIN_FAM7_KH40000 0x5b -#define ZHAOXIN_FAM7_KX8000 0x6b - - +#define ZHAOXIN_FAM7_KH40000 0x5b +#define ZHAOXIN_FAM7_KX7000 0x6b #define UNCORE_PMU_NAME_LEN 32 #define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC) @@ -82,14 +80,14 @@ struct zhaoxin_uncore_ops { }; struct zhaoxin_uncore_pmu { - struct pmu pmu; - char name[UNCORE_PMU_NAME_LEN]; - int pmu_idx; - int func_id; - bool registered; - atomic_t activeboxes; - struct zhaoxin_uncore_type *type; - struct zhaoxin_uncore_box **boxes; + struct pmu pmu; + char name[UNCORE_PMU_NAME_LEN]; + int pmu_idx; + int func_id; + bool registered; + atomic_t activeboxes; + struct zhaoxin_uncore_type *type; + struct zhaoxin_uncore_box **boxes; }; struct zhaoxin_uncore_extra_reg { @@ -123,7 +121,7 @@ struct zhaoxin_uncore_box { struct zhaoxin_uncore_extra_reg shared_regs[]; }; -#define UNCORE_BOX_FLAG_INITIATED 0 +#define UNCORE_BOX_FLAG_INITIATED 0 struct uncore_event_desc { struct device_attribute attr; @@ -135,8 +133,7 @@ struct hw_info { u64 active_state; }; -ssize_t zx_uncore_event_show(struct device *dev, - struct device_attribute *attr, char *buf); +ssize_t zx_uncore_event_show(struct device *dev, struct device_attribute *attr, char *buf); #define ZHAOXIN_UNCORE_EVENT_DESC(_name, _config) \ { \ @@ -160,8 +157,7 @@ static inline bool uncore_pmc_fixed(int idx) return idx == UNCORE_PMC_IDX_FIXED; } -static inline -unsigned int uncore_mmio_box_ctl(struct zhaoxin_uncore_box *box) +static inline unsigned int uncore_mmio_box_ctl(struct zhaoxin_uncore_box *box) { return box->pmu->type->box_ctl + box->pmu->type->mmio_offset * box->pmu->pmu_idx; @@ -182,14 +178,12 @@ static inline unsigned int uncore_pci_fixed_ctr(struct zhaoxin_uncore_box *box) return box->pmu->type->fixed_ctr; } -static inline -unsigned int uncore_pci_event_ctl(struct zhaoxin_uncore_box *box, int idx) +static inline unsigned int uncore_pci_event_ctl(struct zhaoxin_uncore_box *box, int idx) { return idx * 4 + box->pmu->type->event_ctl; } -static inline -unsigned int uncore_pci_perf_ctr(struct zhaoxin_uncore_box *box, int idx) +static inline unsigned int uncore_pci_perf_ctr(struct zhaoxin_uncore_box *box, int idx) { if (!strncmp(box->pmu->type->name, "mc_", 3)) return idx * 2 + box->pmu->type->perf_ctr; @@ -225,24 +219,21 @@ static inline unsigned int uncore_msr_fixed_ctr(struct zhaoxin_uncore_box *box) return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box); } -static inline -unsigned int uncore_msr_event_ctl(struct zhaoxin_uncore_box *box, int idx) +static inline unsigned int uncore_msr_event_ctl(struct zhaoxin_uncore_box *box, int idx) { return box->pmu->type->event_ctl + (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + uncore_msr_box_offset(box); } -static inline -unsigned int uncore_msr_perf_ctr(struct zhaoxin_uncore_box *box, int idx) +static inline unsigned int uncore_msr_perf_ctr(struct zhaoxin_uncore_box *box, int idx) { return box->pmu->type->perf_ctr + (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + uncore_msr_box_offset(box); } -static inline -unsigned int uncore_fixed_ctl(struct zhaoxin_uncore_box *box) +static inline unsigned int uncore_fixed_ctl(struct zhaoxin_uncore_box *box) { if (box->pci_dev) return uncore_pci_fixed_ctl(box); @@ -250,8 +241,7 @@ unsigned int uncore_fixed_ctl(struct zhaoxin_uncore_box *box) return uncore_msr_fixed_ctl(box); } -static inline -unsigned int uncore_fixed_ctr(struct zhaoxin_uncore_box *box) +static inline unsigned int uncore_fixed_ctr(struct zhaoxin_uncore_box *box) { if (box->pci_dev) return uncore_pci_fixed_ctr(box); @@ -259,17 +249,17 @@ unsigned int uncore_fixed_ctr(struct zhaoxin_uncore_box *box) return uncore_msr_fixed_ctr(box); } -static inline -unsigned int uncore_event_ctl(struct zhaoxin_uncore_box *box, int idx) -{ if (box->pci_dev || box->io_addr) +static inline unsigned int uncore_event_ctl(struct zhaoxin_uncore_box *box, int idx) +{ + if (box->pci_dev || box->io_addr) return uncore_pci_event_ctl(box, idx); else return uncore_msr_event_ctl(box, idx); } -static inline -unsigned int uncore_perf_ctr(struct zhaoxin_uncore_box *box, int idx) -{ if (box->pci_dev || box->io_addr) +static inline unsigned int uncore_perf_ctr(struct zhaoxin_uncore_box *box, int idx) +{ + if (box->pci_dev || box->io_addr) return uncore_pci_perf_ctr(box, idx); else return uncore_msr_perf_ctr(box, idx); @@ -302,20 +292,17 @@ static inline void uncore_enable_box(struct zhaoxin_uncore_box *box) box->pmu->type->ops->enable_box(box); } -static inline void uncore_disable_event(struct zhaoxin_uncore_box *box, - struct perf_event *event) +static inline void uncore_disable_event(struct zhaoxin_uncore_box *box, struct perf_event *event) { box->pmu->type->ops->disable_event(box, event); } -static inline void uncore_enable_event(struct zhaoxin_uncore_box *box, - struct perf_event *event) +static inline void uncore_enable_event(struct zhaoxin_uncore_box *box, struct perf_event *event) { box->pmu->type->ops->enable_event(box, event); } -static inline u64 uncore_read_counter(struct zhaoxin_uncore_box *box, - struct perf_event *event) +static inline u64 uncore_read_counter(struct zhaoxin_uncore_box *box, struct perf_event *event) { return box->pmu->type->ops->read_counter(box, event); } @@ -351,12 +338,10 @@ static inline struct zhaoxin_uncore_box *uncore_event_to_box(struct perf_event * return event->pmu_private; } - static struct zhaoxin_uncore_box *uncore_pmu_to_box(struct zhaoxin_uncore_pmu *pmu, int cpu); static u64 uncore_msr_read_counter(struct zhaoxin_uncore_box *box, struct perf_event *event); static void uncore_mmio_exit_box(struct zhaoxin_uncore_box *box); -static u64 uncore_mmio_read_counter(struct zhaoxin_uncore_box *box, - struct perf_event *event); +static u64 uncore_mmio_read_counter(struct zhaoxin_uncore_box *box, struct perf_event *event); static void uncore_pmu_start_hrtimer(struct zhaoxin_uncore_box *box); static void uncore_pmu_cancel_hrtimer(struct zhaoxin_uncore_box *box); static void uncore_pmu_event_start(struct perf_event *event, int flags); @@ -365,7 +350,7 @@ static int uncore_pmu_event_add(struct perf_event *event, int flags); static void uncore_pmu_event_del(struct perf_event *event, int flags); static void uncore_pmu_event_read(struct perf_event *event); static void uncore_perf_event_update(struct zhaoxin_uncore_box *box, struct perf_event *event); -struct event_constraint * -uncore_get_constraint(struct zhaoxin_uncore_box *box, struct perf_event *event); +struct event_constraint *uncore_get_constraint(struct zhaoxin_uncore_box *box, + struct perf_event *event); void uncore_put_constraint(struct zhaoxin_uncore_box *box, struct perf_event *event); u64 uncore_shared_reg_config(struct zhaoxin_uncore_box *box, int idx); -- Gitee From 7957d48c36b74cfbd92a1264feda5f8241fce6a8 Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Tue, 15 Oct 2024 13:19:00 +0800 Subject: [PATCH 1452/2138] LoongArch: Fix kernel panic if no initrd with defconfig ANBZ: #11337 In the current code, if the rootfs is located on a SCSI (or NVME) device and root=/dev/sda3 (or root=/dev/nvme0n1p3) is specified as boot option, kernel boots failed with CONFIG_BLK_DEV_SD=m (or CONFIG_BLK_DEV_NVME=m) when there is no initrd with defconfig, here are the boot messages via the serial console: /dev/root: Can't open blockdev VFS: Cannot open root device "/dev/sda3" or unknown-block(0,0): error -6 /dev/root: Can't open blockdev VFS: Cannot open root device "/dev/nvme0n1p3" or unknown-block(0,0): error -6 Set CONFIG_BLK_DEV_SD=y and CONFIG_BLK_DEV_NVME=y in defconfig to avoid the potential failures, this is very useful for the developers because there is no need to make initrd everytime for testing. Signed-off-by: Tiezhu Yang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/3981 --- arch/loongarch/configs/anolis-debug_defconfig | 4 ++-- arch/loongarch/configs/anolis_defconfig | 4 ++-- arch/loongarch/configs/loongson3_defconfig | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/loongarch/configs/anolis-debug_defconfig b/arch/loongarch/configs/anolis-debug_defconfig index e13de1212610..d750f96d973f 100644 --- a/arch/loongarch/configs/anolis-debug_defconfig +++ b/arch/loongarch/configs/anolis-debug_defconfig @@ -2069,7 +2069,7 @@ CONFIG_BLK_DEV_RBD=m # NVME Support # CONFIG_NVME_CORE=m -CONFIG_BLK_DEV_NVME=m +CONFIG_BLK_DEV_NVME=y CONFIG_NVME_MULTIPATH=y # CONFIG_NVME_VERBOSE_ERRORS is not set # CONFIG_NVME_HWMON is not set @@ -2171,7 +2171,7 @@ CONFIG_SCSI_PROC_FS=y # # SCSI support type (disk, tape, CD-ROM) # -CONFIG_BLK_DEV_SD=m +CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=m CONFIG_CHR_DEV_SG=m diff --git a/arch/loongarch/configs/anolis_defconfig b/arch/loongarch/configs/anolis_defconfig index e13de1212610..d750f96d973f 100644 --- a/arch/loongarch/configs/anolis_defconfig +++ b/arch/loongarch/configs/anolis_defconfig @@ -2069,7 +2069,7 @@ CONFIG_BLK_DEV_RBD=m # NVME Support # CONFIG_NVME_CORE=m -CONFIG_BLK_DEV_NVME=m +CONFIG_BLK_DEV_NVME=y CONFIG_NVME_MULTIPATH=y # CONFIG_NVME_VERBOSE_ERRORS is not set # CONFIG_NVME_HWMON is not set @@ -2171,7 +2171,7 @@ CONFIG_SCSI_PROC_FS=y # # SCSI support type (disk, tape, CD-ROM) # -CONFIG_BLK_DEV_SD=m +CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=m CONFIG_CHR_DEV_SG=m diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index 4e2867185627..2b544d4b76d0 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -654,7 +654,7 @@ CONFIG_BLK_DEV_RAM_SIZE=8192 CONFIG_CDROM_PKTCDVD=m CONFIG_VIRTIO_BLK=m CONFIG_BLK_DEV_RBD=m -CONFIG_BLK_DEV_NVME=m +CONFIG_BLK_DEV_NVME=y CONFIG_NVME_MULTIPATH=y CONFIG_NVME_RDMA=m CONFIG_NVME_FC=m @@ -681,7 +681,7 @@ CONFIG_MISC_RTSX_PCI=m CONFIG_MISC_RTSX_USB=m CONFIG_UACCE=m CONFIG_PVPANIC=y -CONFIG_BLK_DEV_SD=m +CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=m CONFIG_CHR_DEV_SG=m -- Gitee From 2c2eecec6399df58bb44ba6a2a24da3e884d22f6 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Mon, 27 Nov 2023 10:20:56 -0800 Subject: [PATCH 1453/2138] bpftool: mark orphaned programs during prog show ANBZ: #11330 commit 876843ce1e4897e8ceade50bfa3d9a4ec483abf3 upstream. Commit ef01f4e25c17 ("bpf: restore the ebpf program ID for BPF_AUDIT_UNLOAD and PERF_BPF_EVENT_PROG_UNLOAD") stopped removing program's id from idr when the offloaded/bound netdev goes away. I was supposed to take a look and check in [0], but apparently I did not. Martin points out it might be useful to keep it that way for observability sake, but we at least need to mark those programs as unusable. Mark those programs as 'orphaned' and keep printing the list when we encounter ENODEV. 0: unspec tag 0000000000000000 xlated 0B not jited memlock 4096B orphaned [0]: https://lore.kernel.org/all/CAKH8qBtyR20ZWAc11z1-6pGb3Hd47AQUTbE_cfoktG59TqaJ7Q@mail.gmail.com/ v3: * use two spaces for " orphaned" (Quentin) Cc: netdev@vger.kernel.org Fixes: ef01f4e25c17 ("bpf: restore the ebpf program ID for BPF_AUDIT_UNLOAD and PERF_BPF_EVENT_PROG_UNLOAD") Signed-off-by: Stanislav Fomichev Reviewed-by: Quentin Monnet Link: https://lore.kernel.org/r/20231127182057.1081138-1-sdf@google.com Signed-off-by: Martin KaFai Lau Signed-off-by: Tianchen Ding Reviewed-by: Yuanhe Shu Link: https://gitee.com/anolis/cloud-kernel/pulls/3980 --- tools/bpf/bpftool/prog.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 90ae2ea61324..1a49233761e9 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -442,7 +442,7 @@ static void print_prog_header_json(struct bpf_prog_info *info, int fd) jsonw_uint_field(json_wtr, "recursion_misses", info->recursion_misses); } -static void print_prog_json(struct bpf_prog_info *info, int fd) +static void print_prog_json(struct bpf_prog_info *info, int fd, bool orphaned) { char *memlock; @@ -461,6 +461,7 @@ static void print_prog_json(struct bpf_prog_info *info, int fd) jsonw_uint_field(json_wtr, "uid", info->created_by_uid); } + jsonw_bool_field(json_wtr, "orphaned", orphaned); jsonw_uint_field(json_wtr, "bytes_xlated", info->xlated_prog_len); if (info->jited_prog_len) { @@ -527,7 +528,7 @@ static void print_prog_header_plain(struct bpf_prog_info *info, int fd) printf("\n"); } -static void print_prog_plain(struct bpf_prog_info *info, int fd) +static void print_prog_plain(struct bpf_prog_info *info, int fd, bool orphaned) { char *memlock; @@ -554,6 +555,9 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd) printf(" memlock %sB", memlock); free(memlock); + if (orphaned) + printf(" orphaned"); + if (info->nr_map_ids) show_prog_maps(fd, info->nr_map_ids); @@ -581,15 +585,15 @@ static int show_prog(int fd) int err; err = bpf_prog_get_info_by_fd(fd, &info, &len); - if (err) { + if (err && err != -ENODEV) { p_err("can't get prog info: %s", strerror(errno)); return -1; } if (json_output) - print_prog_json(&info, fd); + print_prog_json(&info, fd, err == -ENODEV); else - print_prog_plain(&info, fd); + print_prog_plain(&info, fd, err == -ENODEV); return 0; } -- Gitee From 4c80f3b801052c10979a8b6c968fbebbb20f55a6 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Mon, 27 Nov 2023 10:20:57 -0800 Subject: [PATCH 1454/2138] selftests/bpf: update test_offload to use new orphaned property ANBZ: #11330 commit cf9791631027a476f7cdb0e1b3ac6add16eff264 upstream. - filter orphaned programs by default - when trying to query orphaned program, don't expect bpftool failure Cc: netdev@vger.kernel.org Signed-off-by: Stanislav Fomichev Link: https://lore.kernel.org/r/20231127182057.1081138-2-sdf@google.com Signed-off-by: Martin KaFai Lau Signed-off-by: Tianchen Ding Reviewed-by: Yuanhe Shu Link: https://gitee.com/anolis/cloud-kernel/pulls/3980 --- tools/testing/selftests/bpf/test_offload.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py index 40cba8d368d9..6157f884d091 100755 --- a/tools/testing/selftests/bpf/test_offload.py +++ b/tools/testing/selftests/bpf/test_offload.py @@ -169,12 +169,14 @@ def bpftool(args, JSON=True, ns="", fail=True, include_stderr=False): return tool("bpftool", args, {"json":"-p"}, JSON=JSON, ns=ns, fail=fail, include_stderr=include_stderr) -def bpftool_prog_list(expected=None, ns=""): +def bpftool_prog_list(expected=None, ns="", exclude_orphaned=True): _, progs = bpftool("prog show", JSON=True, ns=ns, fail=True) # Remove the base progs for p in base_progs: if p in progs: progs.remove(p) + if exclude_orphaned: + progs = [ p for p in progs if not p['orphaned'] ] if expected is not None: if len(progs) != expected: fail(True, "%d BPF programs loaded, expected %d" % @@ -612,11 +614,9 @@ def pin_map(file_name, idx=0, expected=1): def check_dev_info_removed(prog_file=None, map_file=None): bpftool_prog_list(expected=0) + bpftool_prog_list(expected=1, exclude_orphaned=False) ret, err = bpftool("prog show pin %s" % (prog_file), fail=False) - fail(ret == 0, "Showing prog with removed device did not fail") - fail(err["error"].find("No such device") == -1, - "Showing prog with removed device expected ENODEV, error is %s" % - (err["error"])) + fail(ret != 0, "failed to show prog with removed device") bpftool_map_list(expected=0) ret, err = bpftool("map show pin %s" % (map_file), fail=False) @@ -1395,10 +1395,7 @@ try: start_test("Test multi-dev ASIC cross-dev destruction - orphaned...") ret, out = bpftool("prog show %s" % (progB), fail=False) - fail(ret == 0, "got information about orphaned program") - fail("error" not in out, "no error reported for get info on orphaned") - fail(out["error"] != "can't get prog info: No such device", - "wrong error for get info on orphaned") + fail(ret != 0, "couldn't get information about orphaned program") print("%s: OK" % (os.path.basename(__file__))) -- Gitee From 26308c55651be094234c81fd838639aa6754e8c0 Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Thu, 15 Aug 2024 15:59:51 +0800 Subject: [PATCH 1455/2138] selftests: udpgro: no need to load xdp for gro MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #11339 commit d7818402b1d80347c764001583f6d63fa68c2e1a upstream. After commit d7db7775ea2e ("net: veth: do not manipulate GRO when using XDP"), there is no need to load XDP program to enable GRO. On the other hand, the current test is failed due to loading the XDP program. e.g. # selftests: net: udpgro.sh # ipv4 # no GRO ok # no GRO chk cmsg ok # GRO ./udpgso_bench_rx: recv: bad packet len, got 1472, expected 14720 # # failed [...] # bad GRO lookup ok # multiple GRO socks ./udpgso_bench_rx: recv: bad packet len, got 1452, expected 14520 # # ./udpgso_bench_rx: recv: bad packet len, got 1452, expected 14520 # # failed ok 1 selftests: net: udpgro.sh After fix, all the test passed. # ./udpgro.sh ipv4 no GRO ok [...] multiple GRO socks ok Fixes: d7db7775ea2e ("net: veth: do not manipulate GRO when using XDP") Reported-by: Yi Chen Closes: https://issues.redhat.com/browse/RHEL-53858 Reviewed-by: Toke Høiland-Jørgensen Acked-by: Paolo Abeni Signed-off-by: Hangbin Liu Signed-off-by: David S. Miller Signed-off-by: Philo Lu Reviewed-by: D. Wythe Link: https://gitee.com/anolis/cloud-kernel/pulls/3982 --- tools/testing/selftests/net/udpgro.sh | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/tools/testing/selftests/net/udpgro.sh b/tools/testing/selftests/net/udpgro.sh index 53341c8135e8..d5ffd8c9172e 100755 --- a/tools/testing/selftests/net/udpgro.sh +++ b/tools/testing/selftests/net/udpgro.sh @@ -7,8 +7,6 @@ source net_helper.sh readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)" -BPF_FILE="xdp_dummy.o" - # set global exit status, but never reset nonzero one. check_err() { @@ -38,7 +36,7 @@ cfg_veth() { ip -netns "${PEER_NS}" addr add dev veth1 192.168.1.1/24 ip -netns "${PEER_NS}" addr add dev veth1 2001:db8::1/64 nodad ip -netns "${PEER_NS}" link set dev veth1 up - ip -n "${PEER_NS}" link set veth1 xdp object ${BPF_FILE} section xdp + ip netns exec "${PEER_NS}" ethtool -K veth1 gro on } run_one() { @@ -206,11 +204,6 @@ run_all() { return $ret } -if [ ! -f ${BPF_FILE} ]; then - echo "Missing ${BPF_FILE}. Run 'make' first" - exit -1 -fi - if [[ $# -eq 0 ]]; then run_all elif [[ $1 == "__subprocess" ]]; then -- Gitee From 2a596dd2fbd50295857046440aae19da9fa353b7 Mon Sep 17 00:00:00 2001 From: Yicong Yang Date: Wed, 7 Feb 2024 17:12:22 +0800 Subject: [PATCH 1456/2138] perf test: Skip metric w/o event name on arm64 in stat STD output linter ANBZ: #8895 commit 5f70c6c559908984ea93d61a62108b2aff017a99 upstream. stat+std_output.sh test fails on my arm64 machine: [root@localhost shell]# ./stat+std_output.sh Checking STD output: no args Unknown event name in TopDownL1 # 0.18 retiring [root@localhost shell]# ./stat+std_output.sh Checking STD output: no args [Success] Checking STD output: system wide [Success] Checking STD output: interval [Success] Checking STD output: per thread Unknown event name in tmux: server-1114960 # 0.41 frontend_bound When no args specified `perf stat` will add TopdownL1 metric group and the output will be like: [root@localhost shell]# perf stat -- stress-ng --vm 1 --timeout 1 stress-ng: info: [3351733] setting to a 1 second run per stressor stress-ng: info: [3351733] dispatching hogs: 1 vm stress-ng: info: [3351733] successful run completed in 1.02s Performance counter stats for 'stress-ng --vm 1 --timeout 1': 1,037.71 msec task-clock # 1.000 CPUs utilized 13 context-switches # 12.528 /sec 1 cpu-migrations # 0.964 /sec 67,544 page-faults # 65.090 K/sec 2,691,932,561 cycles # 2.594 GHz (74.56%) 6,571,333,653 instructions # 2.44 insn per cycle (74.92%) 521,863,142 branches # 502.901 M/sec (75.21%) 425,879 branch-misses # 0.08% of all branches (87.57%) TopDownL1 # 0.61 retiring (87.67%) # 0.03 frontend_bound (87.67%) # 0.02 bad_speculation (87.67%) # 0.34 backend_bound (74.61%) 1.038138390 seconds time elapsed 0.844849000 seconds user 0.189053000 seconds sys Metrics in group TopDownL1 don't have event name on arm64 but are not listed in the $skip_metric list which they should be listed. Add them to the skip list as what does for x86 platforms in [1]. [1] commit 4d60e83dfcee ("perf test: Skip metrics w/o event name in stat STD output linter") Signed-off-by: Yicong Yang Reviewed-by: Ian Rogers Cc: linuxarm@huawei.com Cc: kan.liang@linux.intel.com Signed-off-by: Namhyung Kim Link: https://lore.kernel.org/r/20240207091222.54096-1-yangyicong@huawei.com Signed-off-by: Jing Zhang Reviewed-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/3988 --- tools/perf/tests/shell/stat+std_output.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/tests/shell/stat+std_output.sh b/tools/perf/tests/shell/stat+std_output.sh index fb2b10547a11..25f8f8cf0485 100755 --- a/tools/perf/tests/shell/stat+std_output.sh +++ b/tools/perf/tests/shell/stat+std_output.sh @@ -12,7 +12,7 @@ stat_output=$(mktemp /tmp/__perf_test.stat_output.std.XXXXX) event_name=(cpu-clock task-clock context-switches cpu-migrations page-faults stalled-cycles-frontend stalled-cycles-backend cycles instructions branches branch-misses) event_metric=("CPUs utilized" "CPUs utilized" "/sec" "/sec" "/sec" "frontend cycles idle" "backend cycles idle" "GHz" "insn per cycle" "/sec" "of all branches") -skip_metric=("stalled cycles per insn" "tma_") +skip_metric=("stalled cycles per insn" "tma_" "retiring" "frontend_bound" "bad_speculation" "backend_bound") cleanup() { rm -f "${stat_output}" -- Gitee From 10d6ebd1e75ab737e8772db10bb777e77b6cb82a Mon Sep 17 00:00:00 2001 From: Likhitha Korrapati Date: Sun, 26 Nov 2023 02:09:14 -0500 Subject: [PATCH 1457/2138] perf test record+probe_libc_inet_pton: Fix call chain match on powerpc ANBZ: #4342 commit 72a2a0a494ec9aefbca4ad64f46b8e3370809993 upstream. The perf test "probe libc's inet_pton & backtrace it with ping" fails on powerpc as below: # perf test -v "probe libc's inet_pton & backtrace it with ping" 85: probe libc's inet_pton & backtrace it with ping : --- start --- test child forked, pid 96028 ping 96056 [002] 127271.101961: probe_libc:inet_pton: (7fffa1779a60) 7fffa1779a60 __GI___inet_pton+0x0 (/usr/lib64/glibc-hwcaps/power10/libc.so.6) 7fffa172a73c getaddrinfo+0x121c (/usr/lib64/glibc-hwcaps/power10/libc.so.6) FAIL: expected backtrace entry "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\(/usr/lib64/glibc-hwcaps/power10/libc.so.6\)$" got "7fffa172a73c getaddrinfo+0x121c (/usr/lib64/glibc-hwcaps/power10/libc.so.6)" test child finished with -1 ---- end ---- probe libc's inet_pton & backtrace it with ping: FAILED! This test installs a probe on libc's inet_pton function, which will use uprobes and then uses perf trace on a ping to localhost. It gets 3 levels deep backtrace and checks whether it is what we expected or not. The test started failing from RHEL 9.4 where as it works in previous distro version (RHEL 9.2). Test expects gaih_inet function to be part of backtrace. But in the glibc version (2.34-86) which is part of distro where it fails, this function is missing and hence the test is failing. From nm and ping command output we can confirm that gaih_inet function is not present in the expected backtrace for glibc version glibc-2.34-86 [root@xxx perf]# nm /usr/lib64/glibc-hwcaps/power10/libc.so.6 | grep gaih_inet 00000000001273e0 t gaih_inet_serv 00000000001cd8d8 r gaih_inet_typeproto [root@xxx perf]# perf script -i /tmp/perf.data.6E8 ping 104048 [000] 128582.508976: probe_libc:inet_pton: (7fff83779a60) 7fff83779a60 __GI___inet_pton+0x0 (/usr/lib64/glibc-hwcaps/power10/libc.so.6) 7fff8372a73c getaddrinfo+0x121c (/usr/lib64/glibc-hwcaps/power10/libc.so.6) 11dc73534 [unknown] (/usr/bin/ping) 7fff8362a8c4 __libc_start_call_main+0x84 (/usr/lib64/glibc-hwcaps/power10/libc.so.6) FAIL: expected backtrace entry "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\(/usr/lib64/glibc-hwcaps/power10/libc.so.6\)$" got "7fff9d52a73c getaddrinfo+0x121c (/usr/lib64/glibc-hwcaps/power10/libc.so.6)" With version glibc-2.34-60 gaih_inet function is present as part of the expected backtrace. So we cannot just remove the gaih_inet function from the backtrace. [root@xxx perf]# nm /usr/lib64/glibc-hwcaps/power10/libc.so.6 | grep gaih_inet 0000000000130490 t gaih_inet.constprop.0 000000000012e830 t gaih_inet_serv 00000000001d45e4 r gaih_inet_typeproto [root@xxx perf]# ./perf script -i /tmp/perf.data.b6S ping 67906 [000] 22699.591699: probe_libc:inet_pton_3: (7fffbdd80820) 7fffbdd80820 __GI___inet_pton+0x0 (/usr/lib64/glibc-hwcaps/power10/libc.so.6) 7fffbdd31160 gaih_inet.constprop.0+0xcd0 (/usr/lib64/glibc-hwcaps/power10/libc.so.6) 7fffbdd31c7c getaddrinfo+0x14c (/usr/lib64/glibc-hwcaps/power10/libc.so.6) 1140d3558 [unknown] (/usr/bin/ping) This patch solves this issue by doing a conditional skip. If there is a gaih_inet function present in the libc then it will be added to the expected backtrace else the function will be skipped from being added to the expected backtrace. Output with the patch [root@xxx perf]# ./perf test -v "probe libc's inet_pton & backtrace it with ping" 83: probe libc's inet_pton & backtrace it with ping : --- start --- test child forked, pid 102662 ping 102692 [000] 127935.549973: probe_libc:inet_pton: (7fff93379a60) 7fff93379a60 __GI___inet_pton+0x0 (/usr/lib64/glibc-hwcaps/power10/libc.so.6) 7fff9332a73c getaddrinfo+0x121c (/usr/lib64/glibc-hwcaps/power10/libc.so.6) 11ef03534 [unknown] (/usr/bin/ping) test child finished with 0 ---- end ---- probe libc's inet_pton & backtrace it with ping: Ok Reported-by: Disha Goel Reviewed-by: Athira Jajeev Reviewed-by: Ian Rogers Signed-off-by: Likhitha Korrapati Tested-by: Disha Goel Cc: Adrian Hunter Cc: Disha Goel Cc: James Clark Cc: Jiri Olsa Cc: Kajol Jain Cc: Madhavan Srinivasan Cc: Namhyung Kim Cc: linuxppc-dev@lists.ozlabs.org Link: https://lore.kernel.org/r/20231126070914.175332-1-likhitha@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Jing Zhang Reviewed-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/3989 --- tools/perf/tests/shell/record+probe_libc_inet_pton.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh index 89214a6d9951..1dcb91f8a847 100755 --- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh +++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh @@ -43,7 +43,10 @@ trace_libc_inet_pton_backtrace() { ;; ppc64|ppc64le) eventattr='max-stack=4' - echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected + # Add gaih_inet to expected backtrace only if it is part of libc. + if nm $libc | grep -F -q gaih_inet.; then + echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected + fi echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected echo ".*(\+0x[[:xdigit:]]+|\[unknown\])[[:space:]]\(.*/bin/ping.*\)$" >> $expected ;; -- Gitee From 9196ec4a912472a31151f6081ca759be30ad8f5e Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Tue, 15 Oct 2024 15:26:20 +0800 Subject: [PATCH 1458/2138] perf/tests: fix record+probe_libc_inet_pton test on aarch64 ANBZ: #4342 cherry-picked from https://lore.kernel.org/lkml/1728978807-81116-1-git-send-email-renyu.zj@linux.alibaba.com/ Since commit 1f85d016768f ("perf test record+probe_libc_inet_pton: Fix call chain match on x86_64") remove function getaddrinfo() on expected file, the test failed on aarch64. On aarch64, function getaddrinfo() show up in the call chain. $perf script -i /tmp/perf.data.1PV ping 2588319 [125] 500119.122843: probe_libc:inet_pton: (ffff9a4f7410) ffff9a4f7410 __GI___inet_pton+0x0 (/usr/lib64/libc-2.32.so) ffff9a4c5f7c getaddrinfo+0xec (/usr/lib64/libc-2.32.so) aaaad6d32b38 [unknown] (/usr/bin/ping) So just remove getaddrinfo() on x86_64. Fixes: 1f85d016768f ("perf test record+probe_libc_inet_pton: Fix call chain match on x86_64") Signed-off-by: Jing Zhang Reviewed-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/3989 --- tools/perf/tests/shell/record+probe_libc_inet_pton.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh index 1dcb91f8a847..6e79f024b536 100755 --- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh +++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh @@ -50,8 +50,12 @@ trace_libc_inet_pton_backtrace() { echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected echo ".*(\+0x[[:xdigit:]]+|\[unknown\])[[:space:]]\(.*/bin/ping.*\)$" >> $expected ;; + x86_64) + eventattr='max-stack=3' + echo ".*(\+0x[[:xdigit:]]+|\[unknown\])[[:space:]]\(.*/bin/ping.*\)$" >> $expected *) eventattr='max-stack=3' + echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected echo ".*(\+0x[[:xdigit:]]+|\[unknown\])[[:space:]]\(.*/bin/ping.*\)$" >> $expected ;; esac -- Gitee From c59054c3b23af9026a4e1d557ee89fc753793e02 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Thu, 23 Nov 2023 09:58:42 +0200 Subject: [PATCH 1459/2138] perf tests lib: Add perf_has_symbol.sh ANBZ: #11351 commit 96ba5999e8d86138cea90422f8c00309a7eedd3b upstream. Some shell tests depend on finding symbols for perf itself, and fail if perf has been stripped and no debug object is available. Add helper functions to check if perf has a needed symbol. This is preparation for amending the tests themselves to be skipped if a needed symbol is not found. The functions make use of the "Symbols" test which reads and checks symbols from a dso, perf itself by default. Note the "Symbols" test will find symbols using the same method as other perf tests, including, for example, looking in the buildid cache. An alternative would be to prevent the needed symbols from being stripped, which seems to work with gcc's externally_visible attribute, but that attribute is not supported by clang. Another alternative would be to use option -Wl,-E (which is already used when perf is built with perl support) which causes the linker to add all (global) symbols to the dynamic symbol table. Then the required symbols need only be made global in scope to avoid being strippable. However that goes beyond what is needed. Signed-off-by: Adrian Hunter Acked-by: Ian Rogers Cc: German Gomez Cc: James Clark Cc: Jiri Olsa Cc: Leo Yan Cc: Namhyung Kim Link: https://lore.kernel.org/r/20231123075848.9652-3-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Jing Zhang Reviewed-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/3991 --- tools/perf/tests/shell/lib/perf_has_symbol.sh | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 tools/perf/tests/shell/lib/perf_has_symbol.sh diff --git a/tools/perf/tests/shell/lib/perf_has_symbol.sh b/tools/perf/tests/shell/lib/perf_has_symbol.sh new file mode 100644 index 000000000000..5d59c32ae3e7 --- /dev/null +++ b/tools/perf/tests/shell/lib/perf_has_symbol.sh @@ -0,0 +1,21 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +perf_has_symbol() +{ + if perf test -vv "Symbols" 2>&1 | grep "[[:space:]]$1$"; then + echo "perf does have symbol '$1'" + return 0 + fi + echo "perf does not have symbol '$1'" + return 1 +} + +skip_test_missing_symbol() +{ + if ! perf_has_symbol "$1" ; then + echo "perf is missing symbols - skipping test" + exit 2 + fi + return 0 +} -- Gitee From d58dffb3b9dbe41061d583b66a54fb9f50b05056 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 20 Feb 2024 19:41:50 -0800 Subject: [PATCH 1460/2138] perf tests: Avoid fork in perf_has_symbol test ANBZ: #11351 commit 526f2ac9f6a1d668fddf925897b55341bef22644 upstream. perf test -vv Symbols is used to indentify symbols within the perf binary. Add the -F flag so that the test command doesn't fork the test before running. This removes a little overhead. Acked-by: Adrian Hunter Signed-off-by: Ian Rogers Cc: James Clark Cc: Justin Stitt Cc: Bill Wendling Cc: Nick Desaulniers Cc: Yang Jihong Cc: Nathan Chancellor Cc: Kan Liang Cc: Athira Jajeev Cc: llvm@lists.linux.dev Signed-off-by: Namhyung Kim Link: https://lore.kernel.org/r/20240221034155.1500118-4-irogers@google.com Signed-off-by: Jing Zhang Reviewed-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/3991 --- tools/perf/tests/shell/lib/perf_has_symbol.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/tests/shell/lib/perf_has_symbol.sh b/tools/perf/tests/shell/lib/perf_has_symbol.sh index 5d59c32ae3e7..561c93b75d77 100644 --- a/tools/perf/tests/shell/lib/perf_has_symbol.sh +++ b/tools/perf/tests/shell/lib/perf_has_symbol.sh @@ -3,7 +3,7 @@ perf_has_symbol() { - if perf test -vv "Symbols" 2>&1 | grep "[[:space:]]$1$"; then + if perf test -vv -F "Symbols" 2>&1 | grep "[[:space:]]$1$"; then echo "perf does have symbol '$1'" return 0 fi -- Gitee From c540bce410dc4060846faf79e09af30e6f99eb9f Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Thu, 23 Nov 2023 09:58:43 +0200 Subject: [PATCH 1461/2138] perf tests: Skip pipe test if noploop symbol is missing ANBZ: #11351 commit c9526a735082bba57da322332cbcef1bbdff5698 upstream. perf pipe recording and injection test depends on finding symbol noploop in perf, and fails if perf has been stripped and no debug object is available. In that case, skip the test instead. Example: Before: $ strip tools/perf/perf $ tools/perf/perf buildid-cache -p `realpath tools/perf/perf` $ tools/perf/perf test -v pipe 86: perf pipe recording and injection test : --- start --- test child forked, pid 47734 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.000 MB - ] 47741 47741 -1 |perf [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.000 MB - ] cannot find noploop function in pipe #1 test child finished with -1 ---- end ---- perf pipe recording and injection test: FAILED! After: $ tools/perf/perf test -v pipe 86: perf pipe recording and injection test : --- start --- test child forked, pid 48996 perf does not have symbol 'noploop' perf is missing symbols - skipping test test child finished with -2 ---- end ---- perf pipe recording and injection test: Skip Signed-off-by: Adrian Hunter Acked-by: Ian Rogers Cc: German Gomez Cc: James Clark Cc: Jiri Olsa Cc: Leo Yan Cc: Namhyung Kim Link: https://lore.kernel.org/r/20231123075848.9652-4-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Jing Zhang Reviewed-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/3991 --- tools/perf/tests/shell/pipe_test.sh | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tools/perf/tests/shell/pipe_test.sh b/tools/perf/tests/shell/pipe_test.sh index 8dd115dd35a7..a78d35d2cff0 100755 --- a/tools/perf/tests/shell/pipe_test.sh +++ b/tools/perf/tests/shell/pipe_test.sh @@ -2,10 +2,17 @@ # perf pipe recording and injection test # SPDX-License-Identifier: GPL-2.0 +shelldir=$(dirname "$0") +# shellcheck source=lib/perf_has_symbol.sh +. "${shelldir}"/lib/perf_has_symbol.sh + +sym="noploop" + +skip_test_missing_symbol ${sym} + data=$(mktemp /tmp/perf.data.XXXXXX) prog="perf test -w noploop" task="perf" -sym="noploop" if ! perf record -e task-clock:u -o - ${prog} | perf report -i - --task | grep ${task}; then echo "cannot find the test file in the perf report" -- Gitee From 706b48222a871234a96d6b5bf783739eaeb0a59b Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Wed, 1 Nov 2023 11:28:31 +0100 Subject: [PATCH 1462/2138] x86/CPU/AMD: Move the Zen3 BTC_NO detection to the Zen3 init function ANBZ: #11170 commit affc66cb96f865b3763a8e18add52e133d864f04 upstream. No functional changes. Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Nikolay Borisov Link: http://lore.kernel.org/r/20231120104152.13740-4-bp@alien8.de Signed-off-by: PrithivishS Reviewed-by: Kun(llfl) Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/3946 --- arch/x86/kernel/cpu/amd.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 9413fb767c6a..f9a978768136 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -1027,14 +1027,6 @@ static void init_amd_zen1(struct cpuinfo_x86 *c) /* Erratum 1076: CPB feature bit not being set in CPUID. */ if (!cpu_has(c, X86_FEATURE_CPB)) set_cpu_cap(c, X86_FEATURE_CPB); - - /* - * Zen3 (Fam19 model < 0x10) parts are not susceptible to - * Branch Type Confusion, but predate the allocation of the - * BTC_NO bit. - */ - if (c->x86 == 0x19 && !cpu_has(c, X86_FEATURE_BTC_NO)) - set_cpu_cap(c, X86_FEATURE_BTC_NO); } pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n"); @@ -1087,6 +1079,15 @@ static void init_amd_zen2(struct cpuinfo_x86 *c) static void init_amd_zen3(struct cpuinfo_x86 *c) { + if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) { + /* + * Zen3 (Fam19 model < 0x10) parts are not susceptible to + * Branch Type Confusion, but predate the allocation of the + * BTC_NO bit. + */ + if (!cpu_has(c, X86_FEATURE_BTC_NO)) + set_cpu_cap(c, X86_FEATURE_BTC_NO); + } } static void init_amd_zen4(struct cpuinfo_x86 *c) -- Gitee From e9d531cabcb02d7389334300354517c4585f411d Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Wed, 1 Nov 2023 11:20:01 +0100 Subject: [PATCH 1463/2138] x86/CPU/AMD: Call the spectral chicken in the Zen2 init function ANBZ: #11170 commit cfbf4f992bfce1fa9f2f347a79cbbea0368e7971 upstream. No functional change. Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Nikolay Borisov Link: http://lore.kernel.org/r/20231120104152.13740-6-bp@alien8.de Signed-off-by: PrithivishS Reviewed-by: Kun(llfl) Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/3946 --- arch/x86/kernel/cpu/amd.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index f9a978768136..7c14aba629e4 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -997,10 +997,8 @@ void init_spectral_chicken(struct cpuinfo_x86 *c) * * This suppresses speculation from the middle of a basic block, i.e. it * suppresses non-branch predictions. - * - * We use STIBP as a heuristic to filter out Zen2 from the rest of F17H */ - if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_AMD_STIBP)) { + if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) { if (!rdmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) { value |= MSR_ZEN2_SPECTRAL_CHICKEN_BIT; wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value); @@ -1073,6 +1071,7 @@ static void zen2_zenbleed_check(struct cpuinfo_x86 *c) static void init_amd_zen2(struct cpuinfo_x86 *c) { + init_spectral_chicken(c); fix_erratum_1386(c); zen2_zenbleed_check(c); } @@ -1127,7 +1126,7 @@ static void init_amd(struct cpuinfo_x86 *c) case 0x12: init_amd_ln(c); break; case 0x15: init_amd_bd(c); break; case 0x16: init_amd_jg(c); break; - case 0x17: init_spectral_chicken(c); + case 0x17: fallthrough; case 0x19: init_amd_zn(c); break; } -- Gitee From 36edb75c2653b2ce757bcdb8eb688a24393056a2 Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Wed, 1 Nov 2023 12:34:29 +0100 Subject: [PATCH 1464/2138] x86/CPU/AMD: Rename init_amd_zn() to init_amd_zen_common() ANBZ: #11170 commit 7c81ad8e8bc28a1847e87c5afe1bae6bffb2f73e upstream. Call it from all Zen init functions. Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Nikolay Borisov Link: http://lore.kernel.org/r/20231120104152.13740-7-bp@alien8.de Signed-off-by: PrithivishS Reviewed-by: Kun(llfl) Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/3946 --- arch/x86/kernel/cpu/amd.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 7c14aba629e4..5ef6a2c62ccb 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -1007,7 +1007,7 @@ void init_spectral_chicken(struct cpuinfo_x86 *c) #endif } -static void init_amd_zn(struct cpuinfo_x86 *c) +static void init_amd_zen_common(void) { setup_force_cpu_cap(X86_FEATURE_ZEN); #ifdef CONFIG_NUMA @@ -1017,6 +1017,7 @@ static void init_amd_zn(struct cpuinfo_x86 *c) static void init_amd_zen1(struct cpuinfo_x86 *c) { + init_amd_zen_common(); fix_erratum_1386(c); /* Fix up CPUID bits, but only if not virtualised. */ @@ -1067,10 +1068,12 @@ static void zen2_zenbleed_check(struct cpuinfo_x86 *c) } else { msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); } + } static void init_amd_zen2(struct cpuinfo_x86 *c) { + init_amd_zen_common(); init_spectral_chicken(c); fix_erratum_1386(c); zen2_zenbleed_check(c); @@ -1078,6 +1081,8 @@ static void init_amd_zen2(struct cpuinfo_x86 *c) static void init_amd_zen3(struct cpuinfo_x86 *c) { + init_amd_zen_common(); + if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) { /* * Zen3 (Fam19 model < 0x10) parts are not susceptible to @@ -1091,6 +1096,7 @@ static void init_amd_zen3(struct cpuinfo_x86 *c) static void init_amd_zen4(struct cpuinfo_x86 *c) { + init_amd_zen_common(); } static void init_amd(struct cpuinfo_x86 *c) @@ -1126,9 +1132,6 @@ static void init_amd(struct cpuinfo_x86 *c) case 0x12: init_amd_ln(c); break; case 0x15: init_amd_bd(c); break; case 0x16: init_amd_jg(c); break; - case 0x17: - fallthrough; - case 0x19: init_amd_zn(c); break; } if (boot_cpu_has(X86_FEATURE_ZEN1)) -- Gitee From 49a706676210048088cea5e6ffddfec8fc3764da Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Fri, 3 Nov 2023 19:58:53 +0100 Subject: [PATCH 1465/2138] x86/CPU/AMD: Get rid of amd_erratum_383[] ANBZ: #11170 commit 1709528f73d475d3c9ec514bc0dee0b41cadd871 upstream. Set it in init_amd_gh() unconditionally as that is the F10h init function. No functional changes. Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Nikolay Borisov Link: http://lore.kernel.org/r/20231120104152.13740-11-bp@alien8.de Signed-off-by: PrithivishS Reviewed-by: Kun(llfl) Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/3946 --- arch/x86/kernel/cpu/amd.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 5ef6a2c62ccb..f26503c1bef9 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -63,9 +63,6 @@ static const int amd_erratum_400[] = AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); -static const int amd_erratum_383[] = - AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); - static const int amd_erratum_1485[] = AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x19, 0x10, 0x0, 0x1f, 0xf), AMD_MODEL_RANGE(0x19, 0x60, 0x0, 0xaf, 0xf)); @@ -880,8 +877,7 @@ static void init_amd_gh(struct cpuinfo_x86 *c) */ msr_clear_bit(MSR_AMD64_BU_CFG2, 24); - if (cpu_has_amd_erratum(c, amd_erratum_383)) - set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); + set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); } static void init_amd_ln(struct cpuinfo_x86 *c) -- Gitee From 6bae0568bf9f7efd066f6225d83e4668436569f8 Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Fri, 3 Nov 2023 23:20:11 +0100 Subject: [PATCH 1466/2138] x86/CPU/AMD: Get rid of amd_erratum_400[] ANBZ: #11170 commit b3ffbbd282d4eb79f489853a171242c2a06bd8b8 upstream. Setting X86_BUG_AMD_E400 in init_amd() is early enough. No functional changes. Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Nikolay Borisov Link: http://lore.kernel.org/r/20231120104152.13740-12-bp@alien8.de Signed-off-by: PrithivishS Reviewed-by: Kun(llfl) Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/3946 --- arch/x86/kernel/cpu/amd.c | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index f26503c1bef9..3137c389989a 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -59,10 +59,6 @@ static u32 nodes_per_socket = 1; #define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) #define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) -static const int amd_erratum_400[] = - AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), - AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); - static const int amd_erratum_1485[] = AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x19, 0x10, 0x0, 0x1f, 0xf), AMD_MODEL_RANGE(0x19, 0x60, 0x0, 0xaf, 0xf)); @@ -769,15 +765,6 @@ static void early_init_amd(struct cpuinfo_x86 *c) if (c->x86 == 0x16 && c->x86_model <= 0xf) msr_set_bit(MSR_AMD64_LS_CFG, 15); - /* - * Check whether the machine is affected by erratum 400. This is - * used to select the proper idle routine and to enable the check - * whether the machine is affected in arch_post_acpi_init(), which - * sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check. - */ - if (cpu_has_amd_erratum(c, amd_erratum_400)) - set_cpu_bug(c, X86_BUG_AMD_E400); - early_detect_mem_encrypt(c); /* Re-enable TopologyExtensions if switched off by BIOS */ @@ -844,6 +831,16 @@ static void init_amd_k8(struct cpuinfo_x86 *c) msr_set_bit(MSR_K7_HWCR, 6); #endif set_cpu_bug(c, X86_BUG_SWAPGS_FENCE); + + /* + * Check models and steppings affected by erratum 400. This is + * used to select the proper idle routine and to enable the + * check whether the machine is affected in arch_post_acpi_subsys_init() + * which sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check. + */ + if (c->x86_model > 0x41 || + (c->x86_model == 0x41 && c->x86_stepping >= 0x2)) + setup_force_cpu_bug(X86_BUG_AMD_E400); } static void init_amd_gh(struct cpuinfo_x86 *c) @@ -878,6 +875,16 @@ static void init_amd_gh(struct cpuinfo_x86 *c) msr_clear_bit(MSR_AMD64_BU_CFG2, 24); set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); + + /* + * Check models and steppings affected by erratum 400. This is + * used to select the proper idle routine and to enable the + * check whether the machine is affected in arch_post_acpi_subsys_init() + * which sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check. + */ + if (c->x86_model > 0x2 || + (c->x86_model == 0x2 && c->x86_stepping >= 0x1)) + setup_force_cpu_bug(X86_BUG_AMD_E400); } static void init_amd_ln(struct cpuinfo_x86 *c) -- Gitee From 9d592de82ff99c0fca1d91e6b09b706f2b2a08a9 Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Fri, 3 Nov 2023 23:21:56 +0100 Subject: [PATCH 1467/2138] x86/CPU/AMD: Get rid of amd_erratum_1485[] ANBZ: #11170 commit 794c68b20408bb6899f90314e36e256924cc85a1 upstream. No functional changes. Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Nikolay Borisov Link: http://lore.kernel.org/r/20231120104152.13740-13-bp@alien8.de Signed-off-by: PrithivishS Reviewed-by: Kun(llfl) Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/3946 --- arch/x86/kernel/cpu/amd.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 3137c389989a..94a42ee5753b 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -59,10 +59,6 @@ static u32 nodes_per_socket = 1; #define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) #define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) -static const int amd_erratum_1485[] = - AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x19, 0x10, 0x0, 0x1f, 0xf), - AMD_MODEL_RANGE(0x19, 0x60, 0x0, 0xaf, 0xf)); - static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) { int osvw_id = *erratum++; @@ -1100,6 +1096,9 @@ static void init_amd_zen3(struct cpuinfo_x86 *c) static void init_amd_zen4(struct cpuinfo_x86 *c) { init_amd_zen_common(); + + if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) + msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT); } static void init_amd(struct cpuinfo_x86 *c) @@ -1213,10 +1212,6 @@ static void init_amd(struct cpuinfo_x86 *c) cpu_has(c, X86_FEATURE_AUTOIBRS)) WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS) < 0); - if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && - cpu_has_amd_erratum(c, amd_erratum_1485)) - msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT); - /* AMD CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */ clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE); } -- Gitee From 62ba69bfb5327e12638652a45f0b8890549c4211 Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Fri, 3 Nov 2023 23:40:48 +0100 Subject: [PATCH 1468/2138] x86/CPU/AMD: Drop now unused CPU erratum checking function ANBZ: #11170 commit 05f5f73936fa4c1bc0a852702edf53789398d278 upstream. Bye bye. Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Nikolay Borisov Link: http://lore.kernel.org/r/20231120104152.13740-14-bp@alien8.de Signed-off-by: PrithivishS Reviewed-by: Kun(llfl) Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/3946 --- arch/x86/kernel/cpu/amd.c | 56 --------------------------------------- 1 file changed, 56 deletions(-) diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 94a42ee5753b..eb4673a43d7c 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -34,62 +34,6 @@ */ static u32 nodes_per_socket = 1; -/* - * AMD errata checking - * - * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or - * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that - * have an OSVW id assigned, which it takes as first argument. Both take a - * variable number of family-specific model-stepping ranges created by - * AMD_MODEL_RANGE(). - * - * Example: - * - * const int amd_erratum_319[] = - * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), - * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), - * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); - */ - -#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } -#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } -#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ - ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) -#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) -#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) -#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) - -static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) -{ - int osvw_id = *erratum++; - u32 range; - u32 ms; - - if (osvw_id >= 0 && osvw_id < 65536 && - cpu_has(cpu, X86_FEATURE_OSVW)) { - u64 osvw_len; - - rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); - if (osvw_id < osvw_len) { - u64 osvw_bits; - - rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), - osvw_bits); - return osvw_bits & (1ULL << (osvw_id & 0x3f)); - } - } - - /* OSVW unavailable or ID unknown, match family-model-stepping range */ - ms = (cpu->x86_model << 4) | cpu->x86_stepping; - while ((range = *erratum++)) - if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && - (ms >= AMD_MODEL_RANGE_START(range)) && - (ms <= AMD_MODEL_RANGE_END(range))) - return true; - - return false; -} - static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) { u32 gprs[8] = { 0 }; -- Gitee From 0471cc0203c5704a01ff9a763e79bda3fe653d0a Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Thu, 4 Jan 2024 21:11:37 +0100 Subject: [PATCH 1469/2138] x86/CPU/AMD: Add X86_FEATURE_ZEN5 ANBZ: #11170 commit 3e4147f33f8b647775357bae0248b9a2aeebfcd2 upstream. Add a synthetic feature flag for Zen5. Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20240104201138.5072-1-bp@alien8.de Signed-off-by: PrithivishS Reviewed-by: Kun(llfl) Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/3946 --- arch/x86/include/asm/cpufeatures.h | 4 +--- arch/x86/kernel/cpu/amd.c | 25 +++++++++++++++++++++---- 2 files changed, 22 insertions(+), 7 deletions(-) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index dc1811133da0..f6d3ab0bed37 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -81,10 +81,8 @@ #define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */ #define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */ #define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */ - -/* CPU types for specific tunings: */ #define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */ -/* FREE, was #define X86_FEATURE_K7 ( 3*32+ 5) "" Athlon */ +#define X86_FEATURE_ZEN5 ( 3*32+ 5) /* "" CPU based on Zen5 microarchitecture */ #define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */ #define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */ #define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */ diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index eb4673a43d7c..19dc33b26f5f 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -542,7 +542,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) /* Figure out Zen generations: */ switch (c->x86) { - case 0x17: { + case 0x17: switch (c->x86_model) { case 0x00 ... 0x2f: case 0x50 ... 0x5f: @@ -558,8 +558,8 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) goto warn; } break; - } - case 0x19: { + + case 0x19: switch (c->x86_model) { case 0x00 ... 0x0f: case 0x20 ... 0x5f: @@ -573,7 +573,17 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) goto warn; } break; - } + + case 0x1a: + switch (c->x86_model) { + case 0x00 ... 0x0f: + setup_force_cpu_cap(X86_FEATURE_ZEN5); + break; + default: + goto warn; + } + break; + default: break; } @@ -1045,6 +1055,11 @@ static void init_amd_zen4(struct cpuinfo_x86 *c) msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT); } +static void init_amd_zen5(struct cpuinfo_x86 *c) +{ + init_amd_zen_common(); +} + static void init_amd(struct cpuinfo_x86 *c) { early_init_amd(c); @@ -1088,6 +1103,8 @@ static void init_amd(struct cpuinfo_x86 *c) init_amd_zen3(c); else if (boot_cpu_has(X86_FEATURE_ZEN4)) init_amd_zen4(c); + else if (boot_cpu_has(X86_FEATURE_ZEN5)) + init_amd_zen5(c); /* * Enable workaround for FXSAVE leak on CPUs -- Gitee From 46b178b7d6f406e1f751e37e9cba049357963b40 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Wed, 24 Jan 2024 16:07:49 -0600 Subject: [PATCH 1470/2138] x86/CPU/AMD: Add more models to X86_FEATURE_ZEN5 ANBZ: #11170 commit b9328fd636bd50da89e792e135b234ba8e6fe59f upstream. Add model ranges starting at 0x20, 0x40 and 0x70 to the synthetic feature flag X86_FEATURE_ZEN5. Signed-off-by: Mario Limonciello Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20240124220749.2983-1-mario.limonciello@amd.com Signed-off-by: PrithivishS Reviewed-by: Kun(llfl) Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/3946 --- arch/x86/kernel/cpu/amd.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 19dc33b26f5f..86dd7471f303 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -577,6 +577,9 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) case 0x1a: switch (c->x86_model) { case 0x00 ... 0x0f: + case 0x20 ... 0x2f: + case 0x40 ... 0x4f: + case 0x70 ... 0x7f: setup_force_cpu_cap(X86_FEATURE_ZEN5); break; default: -- Gitee From feb8455612cffbe688462decd49dc01a918e4119 Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Thu, 1 Feb 2024 17:10:24 +0100 Subject: [PATCH 1471/2138] x86/CPU/AMD: Do the common init on future Zens too ANBZ: #11170 commit 03ceaf678d444e67fb9c1a372458ba869aa37a60 upstream. There's no need to enable the common Zen init stuff for each new family - just do it by default on everything >= 0x17 family. Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Tom Lendacky Link: https://lore.kernel.org/r/20240201161024.30839-1-bp@alien8.de Signed-off-by: PrithivishS Reviewed-by: Kun(llfl) Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/3946 --- arch/x86/kernel/cpu/amd.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 86dd7471f303..309908814702 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -973,7 +973,6 @@ static void init_amd_zen_common(void) static void init_amd_zen1(struct cpuinfo_x86 *c) { - init_amd_zen_common(); fix_erratum_1386(c); /* Fix up CPUID bits, but only if not virtualised. */ @@ -1029,7 +1028,6 @@ static void zen2_zenbleed_check(struct cpuinfo_x86 *c) static void init_amd_zen2(struct cpuinfo_x86 *c) { - init_amd_zen_common(); init_spectral_chicken(c); fix_erratum_1386(c); zen2_zenbleed_check(c); @@ -1037,8 +1035,6 @@ static void init_amd_zen2(struct cpuinfo_x86 *c) static void init_amd_zen3(struct cpuinfo_x86 *c) { - init_amd_zen_common(); - if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) { /* * Zen3 (Fam19 model < 0x10) parts are not susceptible to @@ -1052,15 +1048,12 @@ static void init_amd_zen3(struct cpuinfo_x86 *c) static void init_amd_zen4(struct cpuinfo_x86 *c) { - init_amd_zen_common(); - if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT); } static void init_amd_zen5(struct cpuinfo_x86 *c) { - init_amd_zen_common(); } static void init_amd(struct cpuinfo_x86 *c) @@ -1098,6 +1091,13 @@ static void init_amd(struct cpuinfo_x86 *c) case 0x16: init_amd_jg(c); break; } + /* + * Save up on some future enablement work and do common Zen + * settings. + */ + if (c->x86 >= 0x17) + init_amd_zen_common(); + if (boot_cpu_has(X86_FEATURE_ZEN1)) init_amd_zen1(c); else if (boot_cpu_has(X86_FEATURE_ZEN2)) -- Gitee From 87e64401a802874f95a8138c55995764a49fb358 Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Mon, 25 Mar 2024 13:17:54 +0530 Subject: [PATCH 1472/2138] perf/x86/amd/core: Define a proper ref-cycles event for Zen 4 and later ANBZ: #11170 commit 68cdf1e6e8f2ce78ed7d8f5d80844fd75a9c54ff upstream. Add the "ref-cycles" event for AMD processors based on Zen 4 and later microarchitectures. The backing event is based on PMCx120 which counts cycles not in halt state in P0 frequency (same as MPERF). Signed-off-by: Sandipan Das Signed-off-by: Ingo Molnar Reviewed-by: Ian Rogers Link: https://lore.kernel.org/r/089155f19f7c7e65aeb1caa727a882e2ca9b8b04.1711352180.git.sandipan.das@amd.com Signed-off-by: PrithivishS Reviewed-by: Kun(llfl) Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/3946 --- arch/x86/events/amd/core.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index 8411b91e1307..4eacac39a501 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -273,8 +273,23 @@ static const u64 amd_zen2_perfmon_event_map[PERF_COUNT_HW_MAX] = [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00a9, }; +static const u64 amd_zen4_perfmon_event_map[PERF_COUNT_HW_MAX] = +{ + [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, + [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, + [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60, + [PERF_COUNT_HW_CACHE_MISSES] = 0x0964, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, + [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00a9, + [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x100000120, +}; + static u64 amd_pmu_event_map(int hw_event) { + if (cpu_feature_enabled(X86_FEATURE_ZEN4) || boot_cpu_data.x86 >= 0x1a) + return amd_zen4_perfmon_event_map[hw_event]; + if (cpu_feature_enabled(X86_FEATURE_ZEN2) || boot_cpu_data.x86 >= 0x19) return amd_zen2_perfmon_event_map[hw_event]; -- Gitee From ef537503c29e9fafa69a83eb8bc0aea56679dcd1 Mon Sep 17 00:00:00 2001 From: Wenkuan Wang Date: Wed, 10 Apr 2024 11:53:08 +0800 Subject: [PATCH 1473/2138] x86/CPU/AMD: Add models 0x10-0x1f to the Zen5 range ANBZ: #11170 commit 2718a7fdf292b2dcb49c856fa8a6a955ebbbc45f upstream. Add some more Zen5 models. Fixes: 3e4147f33f8b ("x86/CPU/AMD: Add X86_FEATURE_ZEN5") Signed-off-by: Wenkuan Wang Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20240423144111.1362-1-bp@kernel.org Signed-off-by: PrithivishS Reviewed-by: Kun(llfl) Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/3946 --- arch/x86/kernel/cpu/amd.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 309908814702..8118839daa08 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -576,8 +576,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c) case 0x1a: switch (c->x86_model) { - case 0x00 ... 0x0f: - case 0x20 ... 0x2f: + case 0x00 ... 0x2f: case 0x40 ... 0x4f: case 0x70 ... 0x7f: setup_force_cpu_cap(X86_FEATURE_ZEN5); -- Gitee From 973d652fe7d70b961d24079ed83c06097685ca2c Mon Sep 17 00:00:00 2001 From: Athira Rajeev Date: Wed, 27 Sep 2023 23:47:03 +0530 Subject: [PATCH 1474/2138] perf test: Fix parse-events tests to skip parametrized events ANBZ: #11402 commit ee33a0ef8468063b34eed4330b0023c1a8d62f8f upstream. Testcase "Parsing of all PMU events from sysfs" parse events for all PMUs, and not just cpu. In case of powerpc, the PowerVM environment supports events from hv_24x7 and hv_gpci PMU which is of example format like below: - hv_24x7/CPM_ADJUNCT_INST,domain=?,core=?/ - hv_gpci/event,partition_id=?/ The value for "?" needs to be filled in depending on system configuration. It is better to skip these parametrized events in this test as it is done in: 'commit b50d691e50e6 ("perf test: Fix "all PMU test" to skip parametrized events")' which handled a simialr instance with "all PMU test". Fix parse-events test to skip parametrized events since it needs proper setup of the parameters. Signed-off-by: Athira Rajeev Tested-by: Ian Rogers Tested-by: Sachin Sant Reviewed-by: Kajol Jain Cc: maddy@linux.ibm.com Cc: disgoel@linux.vnet.ibm.com Cc: linuxppc-dev@lists.ozlabs.org Link: https://lore.kernel.org/r/20230927181703.80936-1-atrajeev@linux.vnet.ibm.com Signed-off-by: Namhyung Kim Signed-off-by: Jing Zhang Reviewed-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/3996 --- tools/perf/tests/parse-events.c | 39 +++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c index d47f1f871164..2b66ffba3bb0 100644 --- a/tools/perf/tests/parse-events.c +++ b/tools/perf/tests/parse-events.c @@ -2514,9 +2514,14 @@ static int test__pmu_events(struct test_suite *test __maybe_unused, int subtest while ((pmu = perf_pmus__scan(pmu)) != NULL) { struct stat st; char path[PATH_MAX]; + char pmu_event[PATH_MAX]; + char *buf = NULL; + FILE *file; struct dirent *ent; + size_t len = 0; DIR *dir; int err; + int n; snprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s/events/", sysfs__mountpoint(), pmu->name); @@ -2538,11 +2543,45 @@ static int test__pmu_events(struct test_suite *test __maybe_unused, int subtest struct evlist_test e = { .name = NULL, }; char name[2 * NAME_MAX + 1 + 12 + 3]; int test_ret; + bool is_event_parameterized = 0; /* Names containing . are special and cannot be used directly */ if (strchr(ent->d_name, '.')) continue; + /* exclude parametrized ones (name contains '?') */ + n = snprintf(pmu_event, sizeof(pmu_event), "%s%s", path, ent->d_name); + if (n >= PATH_MAX) { + pr_err("pmu event name crossed PATH_MAX(%d) size\n", PATH_MAX); + continue; + } + + file = fopen(pmu_event, "r"); + if (!file) { + pr_debug("can't open pmu event file for '%s'\n", ent->d_name); + ret = combine_test_results(ret, TEST_FAIL); + continue; + } + + if (getline(&buf, &len, file) < 0) { + pr_debug(" pmu event: %s is a null event\n", ent->d_name); + ret = combine_test_results(ret, TEST_FAIL); + fclose(file); + continue; + } + + if (strchr(buf, '?')) + is_event_parameterized = 1; + + free(buf); + buf = NULL; + fclose(file); + + if (is_event_parameterized == 1) { + pr_debug("skipping parametrized PMU event: %s which contains ?\n", pmu_event); + continue; + } + snprintf(name, sizeof(name), "%s/event=%s/u", pmu->name, ent->d_name); e.name = name; -- Gitee From 0d2704d0a0be0e6d5eaee4288e400cd771a29801 Mon Sep 17 00:00:00 2001 From: Guixin Liu Date: Thu, 17 Oct 2024 15:03:37 +0800 Subject: [PATCH 1475/2138] anolis: configs: enable CONFIG_PCI_PF_STUB by default ANBZ: #11408 Now alibaba moc use pf_stub to support Alibaba PCIe IOHub SRIOV, open CONFIG_PCI_PF_STUB by default on arm64. Signed-off-by: Guixin Liu Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/3998 --- anolis/configs/L0-MANDATORY/arm64/CONFIG_PCI_PF_STUB | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_PCI_PF_STUB b/anolis/configs/L0-MANDATORY/arm64/CONFIG_PCI_PF_STUB index 35de7b23ab9d..46eee76194b0 100644 --- a/anolis/configs/L0-MANDATORY/arm64/CONFIG_PCI_PF_STUB +++ b/anolis/configs/L0-MANDATORY/arm64/CONFIG_PCI_PF_STUB @@ -1 +1 @@ -# CONFIG_PCI_PF_STUB is not set +CONFIG_PCI_PF_STUB=y -- Gitee From 89b7f01442676439d4beb1fa1d859b0d0fa5638a Mon Sep 17 00:00:00 2001 From: Guixin Liu Date: Thu, 17 Oct 2024 15:09:20 +0800 Subject: [PATCH 1476/2138] anolis: configs: open CONFIG_BCACHE by default ANBZ: #11408 Open bcache by default on x86_64 and arm64, some users will use bcache to setup ceph. Signed-off-by: Guixin Liu Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/3998 --- anolis/configs/L1-RECOMMEND/default/CONFIG_BCACHE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BCACHE b/anolis/configs/L1-RECOMMEND/default/CONFIG_BCACHE index 7091077fc7d0..c7813b08191a 100644 --- a/anolis/configs/L1-RECOMMEND/default/CONFIG_BCACHE +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_BCACHE @@ -1 +1 @@ -# CONFIG_BCACHE is not set +CONFIG_BCACHE=m -- Gitee From de335e51b483bffdb0c2ed6706222baba9287876 Mon Sep 17 00:00:00 2001 From: Guixin Liu Date: Thu, 17 Oct 2024 15:21:12 +0800 Subject: [PATCH 1477/2138] anolis: configs: open CONFIG_SCSI_MVSAS by default ANBZ: #11408 Open CONFIG_SCSI_MVSAS by default on x86_64 and arm64. Signed-off-by: Guixin Liu Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/3998 --- anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS index dd17532f6815..1576ebf4984b 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS @@ -1 +1 @@ -# CONFIG_SCSI_MVSAS is not set +CONFIG_SCSI_MVSAS=m -- Gitee From 259e0f945797c8e168e037951ffad4273fa1262b Mon Sep 17 00:00:00 2001 From: Guixin Liu Date: Thu, 17 Oct 2024 15:52:30 +0800 Subject: [PATCH 1478/2138] anolis: configs: open some ata vendor configs by default ANBZ: #11408 Open some ata vendor configs by default on x86_64 and arm64. Signed-off-by: Guixin Liu Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/3998 --- anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ACPI | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ALI | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_AMD | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ARTOP | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ATIIXP | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ATP867X | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_CMD64X | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT366 | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT37X | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X2N | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X3 | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_IT8213 | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_IT821X | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_JMICRON | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_MARVELL | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NETCELL | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NINJA32 | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_OLDPIIX | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_PDC2027X | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_PDC_OLD | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_RDC | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SCH | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SERVERWORKS | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SIL680 | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SIS | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_TOSHIBA | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_VIA | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_MV | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_NV | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_PROMISE | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_QSTOR | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIL | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIL24 | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIS | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SVW | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SX4 | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_ULI | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_VIA | 2 +- anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_VITESSE | 2 +- 39 files changed, 39 insertions(+), 39 deletions(-) diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ACPI b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ACPI index e40bacb6f515..c3bc7866f76d 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ACPI +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ACPI @@ -1 +1 @@ -# CONFIG_PATA_ACPI is not set +CONFIG_PATA_ACPI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ALI b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ALI index 15c8144544cf..fbd742a57098 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ALI +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ALI @@ -1 +1 @@ -# CONFIG_PATA_ALI is not set +CONFIG_PATA_ALI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_AMD b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_AMD index 33a58facc902..acf7db321ac7 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_AMD +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_AMD @@ -1 +1 @@ -# CONFIG_PATA_AMD is not set +CONFIG_PATA_AMD=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ARTOP b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ARTOP index a4e7eb3b0085..db2c10844088 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ARTOP +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ARTOP @@ -1 +1 @@ -# CONFIG_PATA_ARTOP is not set +CONFIG_PATA_ARTOP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ATIIXP b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ATIIXP index 2730e0e76380..0089d45c97a8 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ATIIXP +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ATIIXP @@ -1 +1 @@ -# CONFIG_PATA_ATIIXP is not set +CONFIG_PATA_ATIIXP=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ATP867X b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ATP867X index 5e66402b82d9..f5a03f81db1a 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ATP867X +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_ATP867X @@ -1 +1 @@ -# CONFIG_PATA_ATP867X is not set +CONFIG_PATA_ATP867X=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_CMD64X b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_CMD64X index eb4773083247..b065f35ad51a 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_CMD64X +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_CMD64X @@ -1 +1 @@ -# CONFIG_PATA_CMD64X is not set +CONFIG_PATA_CMD64X=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT366 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT366 index b67ac339dfd8..0f87284135d0 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT366 +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT366 @@ -1 +1 @@ -# CONFIG_PATA_HPT366 is not set +CONFIG_PATA_HPT366=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT37X b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT37X index 990af0509a62..c0f919942e5c 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT37X +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT37X @@ -1 +1 @@ -# CONFIG_PATA_HPT37X is not set +CONFIG_PATA_HPT37X=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X2N b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X2N index 471fcff45292..ab1bf6eb11d1 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X2N +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X2N @@ -1 +1 @@ -# CONFIG_PATA_HPT3X2N is not set +CONFIG_PATA_HPT3X2N=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X3 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X3 index 9fc36a00b62b..aba3749b399d 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X3 +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X3 @@ -1 +1 @@ -# CONFIG_PATA_HPT3X3 is not set +CONFIG_PATA_HPT3X3=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_IT8213 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_IT8213 index 47fbeb7f5a46..1118a468f51d 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_IT8213 +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_IT8213 @@ -1 +1 @@ -# CONFIG_PATA_IT8213 is not set +CONFIG_PATA_IT8213=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_IT821X b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_IT821X index c29d7f7cb42c..3b64260864a5 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_IT821X +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_IT821X @@ -1 +1 @@ -# CONFIG_PATA_IT821X is not set +CONFIG_PATA_IT821X=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_JMICRON b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_JMICRON index 55695c65f458..b2fb1f796515 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_JMICRON +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_JMICRON @@ -1 +1 @@ -# CONFIG_PATA_JMICRON is not set +CONFIG_PATA_JMICRON=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_MARVELL b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_MARVELL index 83041a0030e0..7e569fb2947c 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_MARVELL +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_MARVELL @@ -1 +1 @@ -# CONFIG_PATA_MARVELL is not set +CONFIG_PATA_MARVELL=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NETCELL b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NETCELL index e598ae30341e..d3ad10131bd2 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NETCELL +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NETCELL @@ -1 +1 @@ -# CONFIG_PATA_NETCELL is not set +CONFIG_PATA_NETCELL=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NINJA32 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NINJA32 index 9deb5dff7190..018ab5bca4d5 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NINJA32 +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_NINJA32 @@ -1 +1 @@ -# CONFIG_PATA_NINJA32 is not set +CONFIG_PATA_NINJA32=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_OLDPIIX b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_OLDPIIX index b35bb81d0ab3..a3a6f6f6fe64 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_OLDPIIX +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_OLDPIIX @@ -1 +1 @@ -# CONFIG_PATA_OLDPIIX is not set +CONFIG_PATA_OLDPIIX=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_PDC2027X b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_PDC2027X index c8ad0b82611b..30d5e6f20f88 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_PDC2027X +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_PDC2027X @@ -1 +1 @@ -# CONFIG_PATA_PDC2027X is not set +CONFIG_PATA_PDC2027X=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_PDC_OLD b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_PDC_OLD index d77240fcf1b5..10f98240cbbc 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_PDC_OLD +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_PDC_OLD @@ -1 +1 @@ -# CONFIG_PATA_PDC_OLD is not set +CONFIG_PATA_PDC_OLD=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_RDC b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_RDC index 48afa962bf61..011a98d5ac72 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_RDC +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_RDC @@ -1 +1 @@ -# CONFIG_PATA_RDC is not set +CONFIG_PATA_RDC=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SCH b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SCH index 52035bf53a84..2b0924997583 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SCH +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SCH @@ -1 +1 @@ -# CONFIG_PATA_SCH is not set +CONFIG_PATA_SCH=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SERVERWORKS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SERVERWORKS index 5cacac7a670b..3cded75960c5 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SERVERWORKS +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SERVERWORKS @@ -1 +1 @@ -# CONFIG_PATA_SERVERWORKS is not set +CONFIG_PATA_SERVERWORKS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SIL680 b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SIL680 index 75224a38b3bb..2558da4e3845 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SIL680 +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SIL680 @@ -1 +1 @@ -# CONFIG_PATA_SIL680 is not set +CONFIG_PATA_SIL680=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SIS b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SIS index 8c547495a6a1..238622337136 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SIS +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_SIS @@ -1 +1 @@ -# CONFIG_PATA_SIS is not set +CONFIG_PATA_SIS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_TOSHIBA b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_TOSHIBA index ffe9957f10dd..1fe4524d6153 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_TOSHIBA +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_TOSHIBA @@ -1 +1 @@ -# CONFIG_PATA_TOSHIBA is not set +CONFIG_PATA_TOSHIBA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_VIA b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_VIA index 9deaa6c3c904..b86e4cc69ab0 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_VIA +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_VIA @@ -1 +1 @@ -# CONFIG_PATA_VIA is not set +CONFIG_PATA_VIA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_MV b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_MV index 82f4fb00dca7..cb1877ac78c1 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_MV +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_MV @@ -1 +1 @@ -# CONFIG_SATA_MV is not set +CONFIG_SATA_MV=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_NV b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_NV index c8419327af60..47109d86c90b 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_NV +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_NV @@ -1 +1 @@ -# CONFIG_SATA_NV is not set +CONFIG_SATA_NV=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_PROMISE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_PROMISE index 2af59fe8ef91..0376859cb9ad 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_PROMISE +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_PROMISE @@ -1 +1 @@ -# CONFIG_SATA_PROMISE is not set +CONFIG_SATA_PROMISE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_QSTOR b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_QSTOR index 8b3cce9abcf6..e49e1046976f 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_QSTOR +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_QSTOR @@ -1 +1 @@ -# CONFIG_SATA_QSTOR is not set +CONFIG_SATA_QSTOR=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIL b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIL index 5d551ee6a8c8..831bdc200600 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIL +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIL @@ -1 +1 @@ -# CONFIG_SATA_SIL is not set +CONFIG_SATA_SIL=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIL24 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIL24 index 7526a06a0f31..999a46eb59c5 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIL24 +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIL24 @@ -1 +1 @@ -# CONFIG_SATA_SIL24 is not set +CONFIG_SATA_SIL24=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIS b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIS index 1cd4ae75f556..fa6474deeae5 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIS +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SIS @@ -1 +1 @@ -# CONFIG_SATA_SIS is not set +CONFIG_SATA_SIS=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SVW b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SVW index 625275c8d6cc..9eac60e9c2a2 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SVW +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SVW @@ -1 +1 @@ -# CONFIG_SATA_SVW is not set +CONFIG_SATA_SVW=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SX4 b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SX4 index abbd89112264..2423653ce6c6 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SX4 +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_SX4 @@ -1 +1 @@ -# CONFIG_SATA_SX4 is not set +CONFIG_SATA_SX4=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_ULI b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_ULI index b50271fc3780..51607fa85595 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_ULI +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_ULI @@ -1 +1 @@ -# CONFIG_SATA_ULI is not set +CONFIG_SATA_ULI=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_VIA b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_VIA index f5754463a7d8..f7822f142fc4 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_VIA +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_VIA @@ -1 +1 @@ -# CONFIG_SATA_VIA is not set +CONFIG_SATA_VIA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_VITESSE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_VITESSE index 59856ec7bcee..ca2d317e5e44 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_VITESSE +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SATA_VITESSE @@ -1 +1 @@ -# CONFIG_SATA_VITESSE is not set +CONFIG_SATA_VITESSE=m -- Gitee From 7f3ba756ba189a6a46637f86c1add243633676dd Mon Sep 17 00:00:00 2001 From: Guixin Liu Date: Thu, 17 Oct 2024 16:21:07 +0800 Subject: [PATCH 1479/2138] anolis: configs: open CONFIG_INFINIBAND_IRDMA by default ANBZ: #11408 Open CONFIG_INFINIBAND_IRDMA by default on x86_64 and arm64. Signed-off-by: Guixin Liu Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/3998 --- anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_IRDMA | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_IRDMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_IRDMA index fbb0af9a95b2..967fdb3b13a1 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_IRDMA +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_INFINIBAND_IRDMA @@ -1 +1 @@ -# CONFIG_INFINIBAND_IRDMA is not set +CONFIG_INFINIBAND_IRDMA=m -- Gitee From 1501c83e04fb3a255c061d55e256aa33b8c32a8c Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Fri, 18 Oct 2024 09:46:33 +0800 Subject: [PATCH 1480/2138] anolis: Revert "perf/tests: fix record+probe_libc_inet_pton test on aarch64" ANBZ: #4342 This reverts commit 755b563a6714ef37924109392c5a56c41ddc13fe. glibc-2.32 version has getaddrinfo() in the call chain, but glibc-2.38 version hasn't, So commit 755b563a6714 can't fix it and revert it. Signed-off-by: Jing Zhang Reviewed-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/4002 --- tools/perf/tests/shell/record+probe_libc_inet_pton.sh | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh index 6e79f024b536..1dcb91f8a847 100755 --- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh +++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh @@ -50,12 +50,8 @@ trace_libc_inet_pton_backtrace() { echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected echo ".*(\+0x[[:xdigit:]]+|\[unknown\])[[:space:]]\(.*/bin/ping.*\)$" >> $expected ;; - x86_64) - eventattr='max-stack=3' - echo ".*(\+0x[[:xdigit:]]+|\[unknown\])[[:space:]]\(.*/bin/ping.*\)$" >> $expected *) eventattr='max-stack=3' - echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected echo ".*(\+0x[[:xdigit:]]+|\[unknown\])[[:space:]]\(.*/bin/ping.*\)$" >> $expected ;; esac -- Gitee From dc5b82f7ea2526ff5b7c59ef895a4c7ffe759941 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Thu, 17 Oct 2024 11:58:24 +0800 Subject: [PATCH 1481/2138] anolis: crypto: ccp: Introduce hygon specific interface to support driver ANBZ: #11419 Hygon secure processors provide a lot of security functions, which require a lot of code to support. In order to prevent Hygon function code from invading the driver's native code, we introduce specific files for Hygon. We'll leave the native code unchanged as much as possible. In this patch, we add files as below: a. files for codes to support Hygon secure processor: drivers/crypto/ccp/hygon/sp-dev.h drivers/crypto/ccp/hygon/sp-pci.c drivers/crypto/ccp/hygon/psp-dev.c drivers/crypto/ccp/hygon/psp-dev.h b. header file to define data types and structures for HYGON Platform Security Processor: include/linux/psp-hygon.h c. header file to define userspace interface for HYGON Platform Security Processor: include/uapi/linux/psp-hygon.h We'll add more Hygon specific code in the following commits. In the following commits, we'll move the code for Hygon from the native file to the Hygon specific file. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4009 --- drivers/crypto/ccp/Makefile | 4 +- drivers/crypto/ccp/hygon/psp-dev.c | 19 ++++++++ drivers/crypto/ccp/hygon/psp-dev.h | 30 ++++++++++++ drivers/crypto/ccp/hygon/sp-dev.h | 30 ++++++++++++ drivers/crypto/ccp/hygon/sp-pci.c | 74 ++++++++++++++++++++++++++++++ drivers/crypto/ccp/sev-dev.c | 19 ++++++++ include/linux/psp-hygon.h | 17 +++++++ include/uapi/linux/psp-hygon.h | 14 ++++++ 8 files changed, 206 insertions(+), 1 deletion(-) create mode 100644 drivers/crypto/ccp/hygon/psp-dev.c create mode 100644 drivers/crypto/ccp/hygon/psp-dev.h create mode 100644 drivers/crypto/ccp/hygon/sp-dev.h create mode 100644 drivers/crypto/ccp/hygon/sp-pci.c create mode 100644 include/linux/psp-hygon.h create mode 100644 include/uapi/linux/psp-hygon.h diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 88086af2412e..fbce73f0deb8 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -8,12 +8,14 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_CCP) += ccp-dev.o \ ccp-dmaengine.o \ hygon/ccp-dev-v5.o ccp-$(CONFIG_CRYPTO_DEV_CCP_DEBUGFS) += ccp-debugfs.o -ccp-$(CONFIG_PCI) += sp-pci.o +ccp-$(CONFIG_PCI) += sp-pci.o \ + hygon/sp-pci.o ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ sev-dev.o \ tee-dev.o \ platform-access.o \ dbc.o \ + hygon/psp-dev.o \ psp-ringbuf.o \ csv-dev.o \ vpsp.o diff --git a/drivers/crypto/ccp/hygon/psp-dev.c b/drivers/crypto/ccp/hygon/psp-dev.c new file mode 100644 index 000000000000..736f9aaaa37a --- /dev/null +++ b/drivers/crypto/ccp/hygon/psp-dev.c @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON Platform Security Processor (PSP) interface + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include + +#include "psp-dev.h" + +/* Function and variable pointers for hooks */ +struct hygon_psp_hooks_table hygon_psp_hooks; diff --git a/drivers/crypto/ccp/hygon/psp-dev.h b/drivers/crypto/ccp/hygon/psp-dev.h new file mode 100644 index 000000000000..ebeade987053 --- /dev/null +++ b/drivers/crypto/ccp/hygon/psp-dev.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * HYGON Platform Security Processor (PSP) driver interface + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + */ + +#ifndef __CCP_HYGON_PSP_DEV_H__ +#define __CCP_HYGON_PSP_DEV_H__ + +#include + +#include "sp-dev.h" + +#include "../psp-dev.h" +#include "../sev-dev.h" + +/* + * Hooks table: a table of function and variable pointers filled in + * when psp init. + */ +extern struct hygon_psp_hooks_table { + bool sev_dev_hooks_installed; + struct mutex *sev_cmd_mutex; + int (*__sev_do_cmd_locked)(int cmd, void *data, int *psp_ret); +} hygon_psp_hooks; + +#endif /* __CCP_HYGON_PSP_DEV_H__ */ diff --git a/drivers/crypto/ccp/hygon/sp-dev.h b/drivers/crypto/ccp/hygon/sp-dev.h new file mode 100644 index 000000000000..e1996fc3b7c6 --- /dev/null +++ b/drivers/crypto/ccp/hygon/sp-dev.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * HYGON Secure Processor interface + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + */ + +#ifndef __CCP_HYGON_SP_DEV_H__ +#define __CCP_HYGON_SP_DEV_H__ + +#include +#include + +#include "../ccp-dev.h" +#include "../sp-dev.h" + +#ifdef CONFIG_X86_64 +static inline bool is_vendor_hygon(void) +{ + return boot_cpu_data.x86_vendor == X86_VENDOR_HYGON; +} +#else +static inline bool is_vendor_hygon(void) { return false; } +#endif + +extern const struct sp_dev_vdata hygon_dev_vdata[]; + +#endif /* __CCP_HYGON_SP_DEV_H__ */ diff --git a/drivers/crypto/ccp/hygon/sp-pci.c b/drivers/crypto/ccp/hygon/sp-pci.c new file mode 100644 index 000000000000..691127a0007b --- /dev/null +++ b/drivers/crypto/ccp/hygon/sp-pci.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON Secure Processor interface driver + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "sp-dev.h" + +#ifdef CONFIG_CRYPTO_DEV_SP_PSP +static const struct sev_vdata csvv1 = { + .cmdresp_reg = 0x10580, /* C2PMSG_32 */ + .cmdbuff_addr_lo_reg = 0x105e0, /* C2PMSG_56 */ + .cmdbuff_addr_hi_reg = 0x105e4, /* C2PMSG_57 */ +}; + +static const struct psp_vdata pspv1 = { + .sev = &csvv1, + .feature_reg = 0x105fc, /* C2PMSG_63 */ + .inten_reg = 0x10610, /* P2CMSG_INTEN */ + .intsts_reg = 0x10614, /* P2CMSG_INTSTS */ +#ifdef CONFIG_HYGON_PSP2CPU_CMD + .p2c_cmdresp_reg = 0x105e8, + .p2c_cmdbuff_addr_lo_reg = 0x105ec, + .p2c_cmdbuff_addr_hi_reg = 0x105f0, +#endif +}; + +static const struct psp_vdata pspv2 = { + .sev = &csvv1, + .feature_reg = 0x105fc, + .inten_reg = 0x10670, + .intsts_reg = 0x10674, +#ifdef CONFIG_HYGON_PSP2CPU_CMD + .p2c_cmdresp_reg = 0x105e8, + .p2c_cmdbuff_addr_lo_reg = 0x105ec, + .p2c_cmdbuff_addr_hi_reg = 0x105f0, +#endif +}; + +#endif + +const struct sp_dev_vdata hygon_dev_vdata[] = { + { /* 0 */ + .bar = 2, +#ifdef CONFIG_CRYPTO_DEV_SP_CCP + .ccp_vdata = &ccpv5a_hygon, +#endif +#ifdef CONFIG_CRYPTO_DEV_SP_PSP + .psp_vdata = &pspv1, +#endif + }, + { /* 1 */ + .bar = 2, +#ifdef CONFIG_CRYPTO_DEV_SP_CCP + .ccp_vdata = &ccpv5b_hygon, +#endif + }, + { /* 2 */ + .bar = 2, +#ifdef CONFIG_CRYPTO_DEV_SP_CCP + .ccp_vdata = &ccpv5a_hygon, +#endif +#ifdef CONFIG_CRYPTO_DEV_SP_PSP + .psp_vdata = &pspv2, +#endif + }, +}; diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index e5f41abb0129..54bea4a842a0 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -35,6 +35,8 @@ #include "sev-dev.h" #include "csv-dev.h" +#include "hygon/psp-dev.h" + #define DEVICE_NAME "sev" #define SEV_FW_FILE "amd/sev.fw" #define CSV_FW_FILE "hygon/csv.fw" @@ -2434,12 +2436,29 @@ static int sev_misc_init(struct sev_device *sev) return 0; } +/* Code to set all of the function and variable pointers */ +static void sev_dev_install_hooks(void) +{ + hygon_psp_hooks.sev_cmd_mutex = &sev_cmd_mutex; + hygon_psp_hooks.__sev_do_cmd_locked = __sev_do_cmd_locked; + + hygon_psp_hooks.sev_dev_hooks_installed = true; +} + int sev_dev_init(struct psp_device *psp) { struct device *dev = psp->dev; struct sev_device *sev; int ret = -ENOMEM; + /* + * Install sev-dev related function and variable pointers hooks only + * for Hygon vendor, install these hooks here, even though the + * following initialization fails. + */ + if (is_vendor_hygon()) + sev_dev_install_hooks(); + if (!boot_cpu_has(X86_FEATURE_SEV)) { dev_info_once(dev, "SEV: memory encryption not enabled by BIOS\n"); return 0; diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h new file mode 100644 index 000000000000..944db2e2ecc0 --- /dev/null +++ b/include/linux/psp-hygon.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * HYGON Platform Security Processor (PSP) driver interface + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + */ + +#ifndef __PSP_HYGON_H__ +#define __PSP_HYGON_H__ + +#ifdef CONFIG_CRYPTO_DEV_SP_PSP +#else /* !CONFIG_CRYPTO_DEV_SP_PSP */ +#endif /* CONFIG_CRYPTO_DEV_SP_PSP */ + +#endif /* __PSP_HYGON_H__ */ diff --git a/include/uapi/linux/psp-hygon.h b/include/uapi/linux/psp-hygon.h new file mode 100644 index 000000000000..e1ac9c04dc55 --- /dev/null +++ b/include/uapi/linux/psp-hygon.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Userspace interface for HYGON Platform Security Processor (PSP) + * commands. + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + */ + +#ifndef __PSP_HYGON_USER_H__ +#define __PSP_HYGON_USER_H__ + +#endif /* __PSP_HYGON_USER_H__ */ -- Gitee From be9ebcc7cba3e137a3ea557a923f3262e79bc68a Mon Sep 17 00:00:00 2001 From: hanliyang Date: Thu, 17 Oct 2024 14:21:39 +0800 Subject: [PATCH 1482/2138] anolis: crypto: ccp: Bind specific sp_dev_vdata for Hygon secure processor ANBZ: #11419 We have provided new sp_dev_vdata for Hygon secure processors in previous commit. To reduce the code intrusion, we bind Hygon secure processors to the new sp_dev_vdata. Fixes: 3b093e03f15b ("anolis: crypto: ccp: Add support to detect Hygon PSP on Hygon 2nd/3rd CPUs") Fixes: e940920b1f09 ("anolis: crypto: ccp: Add support to detect Hygon PSP on Hygon 4th CPUs") Fixes: 91329d1dd4c1 ("anolis: crypto: command co-processor: Add another mailbox interrupt support for PSP sending command to X86") Fixes: b119dce9caeb ("anolis:ccp: ccp-crypto support sm2 on Hygon generation 4th CPU") Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4009 --- drivers/crypto/ccp/sp-pci.c | 65 +++++-------------------------------- 1 file changed, 8 insertions(+), 57 deletions(-) diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index 8c5a34019aa2..d093ff250910 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -25,6 +25,8 @@ #include "ccp-dev.h" #include "psp-dev.h" +#include "hygon/sp-dev.h" + /* used for version string AA.BB.CC.DD */ #define AA GENMASK(31, 24) #define BB GENMASK(23, 16) @@ -129,13 +131,9 @@ static umode_t psp_firmware_is_visible(struct kobject *kobj, struct attribute *a if (!psp) return 0; -#ifdef CONFIG_X86 - if (attr == &dev_attr_bootloader_version.attr && - psp->vdata->bootloader_info_reg && boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) -#else + if (attr == &dev_attr_bootloader_version.attr && psp->vdata->bootloader_info_reg) -#endif val = ioread32(psp->io_regs + psp->vdata->bootloader_info_reg); if (attr == &dev_attr_tee_version.attr && @@ -421,12 +419,6 @@ static const struct sev_vdata sevv2 = { .cmdbuff_addr_hi_reg = 0x109e4, /* C2PMSG_57 */ }; -static const struct sev_vdata csvv1 = { - .cmdresp_reg = 0x10580, - .cmdbuff_addr_lo_reg = 0x105e0, - .cmdbuff_addr_hi_reg = 0x105e4, -}; - static const struct tee_vdata teev1 = { .cmdresp_reg = 0x10544, /* C2PMSG_17 */ .cmdbuff_addr_lo_reg = 0x10548, /* C2PMSG_18 */ @@ -463,11 +455,6 @@ static const struct psp_vdata pspv1 = { .feature_reg = 0x105fc, /* C2PMSG_63 */ .inten_reg = 0x10610, /* P2CMSG_INTEN */ .intsts_reg = 0x10614, /* P2CMSG_INTSTS */ -#ifdef CONFIG_HYGON_PSP2CPU_CMD - .p2c_cmdresp_reg = 0x105e8, - .p2c_cmdbuff_addr_lo_reg = 0x105ec, - .p2c_cmdbuff_addr_hi_reg = 0x105f0, -#endif }; static const struct psp_vdata pspv2 = { @@ -513,18 +500,6 @@ static const struct psp_vdata pspv6 = { .intsts_reg = 0x10514, /* P2CMSG_INTSTS */ }; -static const struct psp_vdata psp_csvv1 = { - .sev = &csvv1, - .feature_reg = 0x105fc, - .inten_reg = 0x10670, - .intsts_reg = 0x10674, -#ifdef CONFIG_HYGON_PSP2CPU_CMD - .p2c_cmdresp_reg = 0x105e8, - .p2c_cmdbuff_addr_lo_reg = 0x105ec, - .p2c_cmdbuff_addr_hi_reg = 0x105f0, -#endif -}; - #endif static const struct sp_dev_vdata dev_vdata[] = { @@ -589,30 +564,6 @@ static const struct sp_dev_vdata dev_vdata[] = { .bar = 2, #ifdef CONFIG_CRYPTO_DEV_SP_PSP .psp_vdata = &pspv6, -#endif - }, - { /* 9 */ - .bar = 2, -#ifdef CONFIG_CRYPTO_DEV_SP_CCP - .ccp_vdata = &ccpv5a_hygon, -#endif -#ifdef CONFIG_CRYPTO_DEV_SP_PSP - .psp_vdata = &pspv1, -#endif - }, - { /* 10 */ - .bar = 2, -#ifdef CONFIG_CRYPTO_DEV_SP_CCP - .ccp_vdata = &ccpv5b_hygon, -#endif - }, - { /* 11 */ - .bar = 2, -#ifdef CONFIG_CRYPTO_DEV_SP_CCP - .ccp_vdata = &ccpv5a_hygon, -#endif -#ifdef CONFIG_CRYPTO_DEV_SP_PSP - .psp_vdata = &psp_csvv1, #endif }, }; @@ -627,11 +578,11 @@ static const struct pci_device_id sp_pci_table[] = { { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[6] }, { PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] }, { PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] }, - { PCI_VDEVICE(HYGON, 0x1456), (kernel_ulong_t)&dev_vdata[9] }, - { PCI_VDEVICE(HYGON, 0x1468), (kernel_ulong_t)&dev_vdata[10] }, - { PCI_VDEVICE(HYGON, 0x1486), (kernel_ulong_t)&dev_vdata[11] }, - { PCI_VDEVICE(HYGON, 0x14b8), (kernel_ulong_t)&dev_vdata[10] }, - { PCI_VDEVICE(HYGON, 0x14a6), (kernel_ulong_t)&dev_vdata[11] }, + { PCI_VDEVICE(HYGON, 0x1456), (kernel_ulong_t)&hygon_dev_vdata[0] }, + { PCI_VDEVICE(HYGON, 0x1468), (kernel_ulong_t)&hygon_dev_vdata[1] }, + { PCI_VDEVICE(HYGON, 0x1486), (kernel_ulong_t)&hygon_dev_vdata[2] }, + { PCI_VDEVICE(HYGON, 0x14b8), (kernel_ulong_t)&hygon_dev_vdata[1] }, + { PCI_VDEVICE(HYGON, 0x14a6), (kernel_ulong_t)&hygon_dev_vdata[2] }, /* Last entry must be zero */ { 0, } }; -- Gitee From ce04cb85bd6876e2d4bb64b3ea92dfc6f730c4a4 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Thu, 17 Oct 2024 15:14:48 +0800 Subject: [PATCH 1483/2138] anolis: crypto: ccp: Move the fixup code for Hygon psp to Hygon specific files ANBZ: #11419 So far, we have introduced specific files to prevent intrusion into the native code. Move the fixup code for Hygon psp to these specific files. If the Hygon psp was not configured with CSV capability, we should report that the device is unavailable. Fixes: 39e18cb04c1f ("anolis: crypto: ccp: Fixup the capability of Hygon PSP during initialization") Fixes: 152dd47fca6c ("anolis: crypto: ccp: Return -ENODEV if Hygon PSP is not configured with CSV capability") Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4009 --- drivers/crypto/ccp/hygon/psp-dev.c | 11 +++++++++++ drivers/crypto/ccp/hygon/psp-dev.h | 2 ++ drivers/crypto/ccp/psp-dev.c | 17 ++++------------- 3 files changed, 17 insertions(+), 13 deletions(-) diff --git a/drivers/crypto/ccp/hygon/psp-dev.c b/drivers/crypto/ccp/hygon/psp-dev.c index 736f9aaaa37a..dd5285e1ba37 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.c +++ b/drivers/crypto/ccp/hygon/psp-dev.c @@ -17,3 +17,14 @@ /* Function and variable pointers for hooks */ struct hygon_psp_hooks_table hygon_psp_hooks; + +int fixup_hygon_psp_caps(struct psp_device *psp) +{ + /* the hygon psp is unavailable if bit0 is cleared in feature reg */ + if (!(psp->capability & PSP_CAPABILITY_SEV)) + return -ENODEV; + + psp->capability &= ~(PSP_CAPABILITY_TEE | + PSP_CAPABILITY_PSP_SECURITY_REPORTING); + return 0; +} diff --git a/drivers/crypto/ccp/hygon/psp-dev.h b/drivers/crypto/ccp/hygon/psp-dev.h index ebeade987053..e187d3f24bdf 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.h +++ b/drivers/crypto/ccp/hygon/psp-dev.h @@ -27,4 +27,6 @@ extern struct hygon_psp_hooks_table { int (*__sev_do_cmd_locked)(int cmd, void *data, int *psp_ret); } hygon_psp_hooks; +int fixup_hygon_psp_caps(struct psp_device *psp); + #endif /* __CCP_HYGON_PSP_DEV_H__ */ diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index b4aea8dbdc28..48338cb2dcb6 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -25,6 +25,8 @@ #include "tdm-dev.h" #endif +#include "hygon/psp-dev.h" + struct psp_device *psp_master; struct psp_misc_dev *psp_misc; @@ -236,17 +238,6 @@ static irqreturn_t psp_irq_handler_hygon(int irq, void *data) } #endif -static int hygon_fixup_psp_caps(struct psp_device *psp) -{ - /* the hygon psp is unavailable if bit0 cleared in feature reg */ - if (!(psp->capability & PSP_CAPABILITY_SEV)) - return -ENODEV; - - psp->capability &= ~(PSP_CAPABILITY_TEE | - PSP_CAPABILITY_PSP_SECURITY_REPORTING); - return 0; -} - static unsigned int psp_get_capability(struct psp_device *psp) { unsigned int val = ioread32(psp->io_regs + psp->vdata->feature_reg); @@ -270,8 +261,8 @@ static unsigned int psp_get_capability(struct psp_device *psp) * Return -ENODEV directly if hygon psp not configured with CSV * capability. */ - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { - if (hygon_fixup_psp_caps(psp)) + if (is_vendor_hygon()) { + if (fixup_hygon_psp_caps(psp)) return -ENODEV; } -- Gitee From 915e2e2a34d4fad95c52555808948f11eb86c3fb Mon Sep 17 00:00:00 2001 From: hanliyang Date: Thu, 22 Sep 2022 10:59:03 +0800 Subject: [PATCH 1484/2138] anolis: crypto: ccp: Reduce code intrusion in linux/psp-sev.h and uapi/linux/psp-sev.h ANBZ: #11419 The former patches have added definitions for Hygon secure functions to include/linux/psp-sev.h and include/uapi/linux/psp-sev.h. The commit e25c51b7ae05 ("anolis: crypto: ccp: Introduce hygon specific interface to support driver") provide Hygon specific files, we move the Hygon definitions from ../psp-sev.h to ../psp-hygon.h. In addition, move definitions from include/linux/psp-csv.h to include/linux/psp-hygon.h, and delete include/linux/psp-csv.h. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4009 --- arch/x86/kvm/svm/csv.c | 2 +- arch/x86/kvm/svm/sev.c | 1 + arch/x86/kvm/svm/svm.c | 1 + drivers/char/tpm/tcm_hygon.c | 2 +- drivers/char/tpm/tpm_hygon.c | 2 +- drivers/crypto/ccp/csv-dev.c | 2 +- drivers/crypto/ccp/psp-dev.c | 1 + drivers/crypto/ccp/psp-ringbuf.c | 1 + drivers/crypto/ccp/sev-dev.c | 3 +- drivers/crypto/ccp/tdm-dev.c | 2 +- drivers/crypto/ccp/vpsp.c | 2 +- include/linux/psp-csv.h | 288 ------------------ include/linux/psp-hygon.h | 508 +++++++++++++++++++++++++++++++ include/linux/psp-sev.h | 225 -------------- include/uapi/linux/psp-hygon.h | 44 +++ include/uapi/linux/psp-sev.h | 38 --- 16 files changed, 564 insertions(+), 558 deletions(-) delete mode 100644 include/linux/psp-csv.h diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 9b9d86169537..07dc910edb79 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 35258bcc1b62..b8740bca7312 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 3e0cfbd269e7..484a04089234 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include diff --git a/drivers/char/tpm/tcm_hygon.c b/drivers/char/tpm/tcm_hygon.c index ef63d1a0a902..63f5e61d9b3e 100644 --- a/drivers/char/tpm/tcm_hygon.c +++ b/drivers/char/tpm/tcm_hygon.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/char/tpm/tpm_hygon.c b/drivers/char/tpm/tpm_hygon.c index 37e2e1f19c8d..8e509df90290 100644 --- a/drivers/char/tpm/tpm_hygon.c +++ b/drivers/char/tpm/tpm_hygon.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/crypto/ccp/csv-dev.c b/drivers/crypto/ccp/csv-dev.c index b9a9ca4fa3c7..78d8d5c5a089 100644 --- a/drivers/crypto/ccp/csv-dev.c +++ b/drivers/crypto/ccp/csv-dev.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include "sev-dev.h" #include "csv-dev.h" diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index 48338cb2dcb6..9cf5f0e86532 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -14,6 +14,7 @@ #include #include #include +#include #include "sp-dev.h" #include "psp-dev.h" diff --git a/drivers/crypto/ccp/psp-ringbuf.c b/drivers/crypto/ccp/psp-ringbuf.c index 9b5f886c0b40..09768c9bce31 100644 --- a/drivers/crypto/ccp/psp-ringbuf.c +++ b/drivers/crypto/ccp/psp-ringbuf.c @@ -11,6 +11,7 @@ * published by the Free Software Foundation. */ +#include #include "psp-ringbuf.h" static void enqueue_data(struct csv_queue *queue, diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 54bea4a842a0..b93902e37caa 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -26,7 +26,8 @@ #include #include #include -#include +#include +#include #include #include diff --git a/drivers/crypto/ccp/tdm-dev.c b/drivers/crypto/ccp/tdm-dev.c index 99f6e8f7416d..71ab3f6caaab 100644 --- a/drivers/crypto/ccp/tdm-dev.c +++ b/drivers/crypto/ccp/tdm-dev.c @@ -9,7 +9,7 @@ */ #include #include -#include +#include #include #include #include diff --git a/drivers/crypto/ccp/vpsp.c b/drivers/crypto/ccp/vpsp.c index 3f18530c5353..183355b1c222 100644 --- a/drivers/crypto/ccp/vpsp.c +++ b/drivers/crypto/ccp/vpsp.c @@ -10,7 +10,7 @@ #include #include #include -#include +#include #include #ifdef pr_fmt diff --git a/include/linux/psp-csv.h b/include/linux/psp-csv.h deleted file mode 100644 index 2da1adea8d33..000000000000 --- a/include/linux/psp-csv.h +++ /dev/null @@ -1,288 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Hygon Secure Virtualization feature CSV driver interface - * - * Copyright (C) Hygon Info Technologies Ltd. - */ - -#ifndef __PSP_CSV_H__ -#define __PSP_CSV_H__ - -#include - -/** - * Guest/platform management commands for CSV3 - */ -enum csv3_cmd { - /* Guest launch commands */ - CSV3_CMD_SET_GUEST_PRIVATE_MEMORY = 0x200, - CSV3_CMD_LAUNCH_ENCRYPT_DATA = 0x201, - CSV3_CMD_LAUNCH_ENCRYPT_VMCB = 0x202, - /* Guest NPT(Nested Page Table) management commands */ - CSV3_CMD_UPDATE_NPT = 0x203, - - /* Guest migration commands */ - CSV3_CMD_SEND_ENCRYPT_DATA = 0x210, - CSV3_CMD_SEND_ENCRYPT_CONTEXT = 0x211, - CSV3_CMD_RECEIVE_ENCRYPT_DATA = 0x212, - CSV3_CMD_RECEIVE_ENCRYPT_CONTEXT = 0x213, - - /* Guest debug commands */ - CSV3_CMD_DBG_READ_VMSA = 0x220, - CSV3_CMD_DBG_READ_MEM = 0x221, - - /* Platform secure memory management commands */ - CSV3_CMD_SET_SMR = 0x230, - CSV3_CMD_SET_SMCR = 0x231, - - CSV3_CMD_MAX, -}; - -/** - * struct csv3_data_launch_encrypt_data - CSV3_CMD_LAUNCH_ENCRYPT_DATA command - * - * @handle: handle of the VM to update - * @gpa: guest address where data is copied - * @length: len of memory to be encrypted - * @data_blocks: memory regions to hold data page address - */ -struct csv3_data_launch_encrypt_data { - u32 handle; /* In */ - u32 reserved; /* In */ - u64 gpa; /* In */ - u32 length; /* In */ - u32 reserved1; /* In */ - u64 data_blocks[8]; /* In */ -} __packed; - -/** - * struct csv3_data_launch_encrypt_vmcb - CSV3_CMD_LAUNCH_ENCRYPT_VMCB command - * - * @handle: handle of the VM - * @vcpu_id: id of vcpu per vmsa/vmcb - * @vmsa_addr: memory address of initial vmsa data - * @vmsa_len: len of initial vmsa data - * @shadow_vmcb_addr: memory address of shadow vmcb data - * @shadow_vmcb_len: len of shadow vmcb data - * @secure_vmcb_addr: memory address of secure vmcb data - * @secure_vmcb_len: len of secure vmcb data - */ -struct csv3_data_launch_encrypt_vmcb { - u32 handle; /* In */ - u32 reserved; /* In */ - u32 vcpu_id; /* In */ - u32 reserved1; /* In */ - u64 vmsa_addr; /* In */ - u32 vmsa_len; /* In */ - u32 reserved2; /* In */ - u64 shadow_vmcb_addr; /* In */ - u32 shadow_vmcb_len; /* In */ - u32 reserved3; /* In */ - u64 secure_vmcb_addr; /* Out */ - u32 secure_vmcb_len; /* Out */ -} __packed; - -/** - * struct csv3_data_update_npt - CSV3_CMD_UPDATE_NPT command - * - * @handle: handle assigned to the VM - * @error_code: nested page fault error code - * @gpa: guest page address where npf happens - * @spa: physical address which maps to gpa in host page table - * @level: page level which can be mapped in nested page table - * @page_attr: page attribute for gpa - * @page_attr_mask: which page attribute bit should be set - * @npages: number of pages from gpa is handled. - */ -struct csv3_data_update_npt { - u32 handle; /* In */ - u32 reserved; /* In */ - u32 error_code; /* In */ - u32 reserved1; /* In */ - u64 gpa; /* In */ - u64 spa; /* In */ - u64 level; /* In */ - u64 page_attr; /* In */ - u64 page_attr_mask; /* In */ - u32 npages; /* In/Out */ -} __packed; - -/** - * struct csv3_data_mem_region - define a memory region - * - * @base_address: base address of a memory region - * @size: size of memory region - */ -struct csv3_data_memory_region { - u64 base_address; /* In */ - u64 size; /* In */ -} __packed; - -/** - * struct csv3_data_set_guest_private_memory - CSV3_CMD_SET_GUEST_PRIVATE_MEMORY - * command parameters - * - * @handle: handle assigned to the VM - * @nregions: number of memory regions - * @regions_paddr: address of memory containing multiple memory regions - */ -struct csv3_data_set_guest_private_memory { - u32 handle; /* In */ - u32 nregions; /* In */ - u64 regions_paddr; /* In */ -} __packed; - -/** - * struct csv3_data_set_smr - CSV3_CMD_SET_SMR command parameters - * - * @smr_entry_size: size of SMR entry - * @nregions: number of memory regions - * @regions_paddr: address of memory containing multiple memory regions - */ -struct csv3_data_set_smr { - u32 smr_entry_size; /* In */ - u32 nregions; /* In */ - u64 regions_paddr; /* In */ -} __packed; - -/** - * struct csv3_data_set_smcr - CSV3_CMD_SET_SMCR command parameters - * - * @base_address: start address of SMCR memory - * @size: size of SMCR memory - */ -struct csv3_data_set_smcr { - u64 base_address; /* In */ - u64 size; /* In */ -} __packed; - -/** - * struct csv3_data_dbg_read_vmsa - CSV3_CMD_DBG_READ_VMSA command parameters - * - * @handle: handle assigned to the VM - * @spa: system physical address of memory to get vmsa of the specific vcpu - * @size: size of the host memory - * @vcpu_id: the specific vcpu - */ -struct csv3_data_dbg_read_vmsa { - u32 handle; /* In */ - u32 reserved; /* In */ - u64 spa; /* In */ - u32 size; /* In */ - u32 vcpu_id; /* In */ -} __packed; - -/** - * struct csv3_data_dbg_read_mem - CSV3_CMD_DBG_READ_MEM command parameters - * - * @handle: handle assigned to the VM - * @gpa: guest physical address of the memory to access - * @spa: system physical address of memory to get data from gpa - * @size: size of guest memory to access - */ -struct csv3_data_dbg_read_mem { - u32 handle; /* In */ - u32 reserved; /* In */ - u64 gpa; /* In */ - u64 spa; /* In */ - u32 size; /* In */ -} __packed; - -/** - * struct csv3_data_send_encrypt_data - SEND_ENCRYPT_DATA command parameters - * - * @handle: handle of the VM to process - * @hdr_address: physical address containing packet header - * @hdr_len: len of packet header - * @guest_block: physical address containing multiple guest address - * @guest_len: len of guest block - * @flag: flag of send encrypt data - * 0x00000000: migrate pages in guest block - * 0x00000001: set readonly of pages in guest block - * others: invalid - * @trans_block: physical address of a page containing multiple host memory pages - * @trans_len: len of host memory region - */ -struct csv3_data_send_encrypt_data { - u32 handle; /* In */ - u32 reserved; /* In */ - u64 hdr_address; /* In */ - u32 hdr_len; /* In/Out */ - u32 reserved1; /* In */ - u64 guest_block; /* In */ - u32 guest_len; /* In */ - u32 flag; /* In */ - u64 trans_block; /* In */ - u32 trans_len; /* In/Out */ -} __packed; - -/** - * struct csv3_data_send_encrypt_context - SEND_ENCRYPT_CONTEXT command parameters - * - * @handle: handle of the VM to process - * @hdr_address: physical address containing packet header - * @hdr_len: len of packet header - * @trans_block: physical address of a page containing multiple host memory pages - * @trans_len: len of host memory region - */ -struct csv3_data_send_encrypt_context { - u32 handle; /* In */ - u32 reserved; /* In */ - u64 hdr_address; /* In */ - u32 hdr_len; /* In/Out */ - u32 reserved1; /* In */ - u64 trans_block; /* In */ - u32 trans_len; /* In/Out */ -} __packed; - -/** - * struct csv3_data_receive_encrypt_data - RECEIVE_ENCRYPT_DATA command parameters - * - * @handle: handle of the VM to process - * @hdr_address: physical address containing packet header blob - * @hdr_len: len of packet header - * @guest_block: system physical address containing multiple guest address - * @guest_len: len of guest block memory region - * @trans_block: physical address of a page containing multiple host memory pages - * @trans_len: len of host memory region - */ -struct csv3_data_receive_encrypt_data { - u32 handle; /* In */ - u32 reserved; /* In */ - u64 hdr_address; /* In */ - u32 hdr_len; /* In */ - u32 reserved1; /* In */ - u64 guest_block; /* In */ - u32 guest_len; /* In */ - u32 reserved2; /* In */ - u64 trans_block; /* In */ - u32 trans_len; /* In */ -} __packed; - -/** - * struct csv3_data_receive_encrypt_context - RECEIVE_ENCRYPT_CONTEXT command parameters - * - * @handle: handle of the VM to process - * @hdr_address: physical address containing packet header - * @hdr_len: len of packet header - * @trans_block: physical address of a page containing multiple host memory pages - * @trans_len: len of host memory region - * @shadow_vmcb_block: physical address of a page containing multiple shadow vmcb address - * @secure_vmcb_block: physical address of a page containing multiple secure vmcb address - * @vmcb_block_len: len of shadow/secure vmcb block - */ -struct csv3_data_receive_encrypt_context { - u32 handle; /* In */ - u32 reserved; /* In */ - u64 hdr_address; /* In */ - u32 hdr_len; /* In */ - u32 reserved1; /* In */ - u64 trans_block; /* In */ - u32 trans_len; /* In */ - u32 reserved2; /* In */ - u64 shadow_vmcb_block; /* In */ - u64 secure_vmcb_block; /* In */ - u32 vmcb_block_len; /* In */ -} __packed; - -#endif diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index 944db2e2ecc0..aae8a7da1dac 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -10,8 +10,516 @@ #ifndef __PSP_HYGON_H__ #define __PSP_HYGON_H__ +#include +#include + +/*****************************************************************************/ +/***************************** CSV interface *********************************/ +/*****************************************************************************/ + +#define CSV_FW_MAX_SIZE 0x80000 /* 512KB */ + +/** + * Guest/platform management commands for CSV + */ +enum csv_cmd { + CSV_CMD_RING_BUFFER = 0x00F, + CSV_CMD_HGSC_CERT_IMPORT = 0x300, + CSV_CMD_MAX, +}; + +/** + * Guest/platform management commands for CSV3 + */ +enum csv3_cmd { + /* Guest launch commands */ + CSV3_CMD_SET_GUEST_PRIVATE_MEMORY = 0x200, + CSV3_CMD_LAUNCH_ENCRYPT_DATA = 0x201, + CSV3_CMD_LAUNCH_ENCRYPT_VMCB = 0x202, + /* Guest NPT(Nested Page Table) management commands */ + CSV3_CMD_UPDATE_NPT = 0x203, + + /* Guest migration commands */ + CSV3_CMD_SEND_ENCRYPT_DATA = 0x210, + CSV3_CMD_SEND_ENCRYPT_CONTEXT = 0x211, + CSV3_CMD_RECEIVE_ENCRYPT_DATA = 0x212, + CSV3_CMD_RECEIVE_ENCRYPT_CONTEXT = 0x213, + + /* Guest debug commands */ + CSV3_CMD_DBG_READ_VMSA = 0x220, + CSV3_CMD_DBG_READ_MEM = 0x221, + + /* Platform secure memory management commands */ + CSV3_CMD_SET_SMR = 0x230, + CSV3_CMD_SET_SMCR = 0x231, + + CSV3_CMD_MAX, +}; + +/** + * CSV communication state + */ +enum csv_comm_state { + CSV_COMM_MAILBOX_ON = 0x0, + CSV_COMM_RINGBUFFER_ON = 0x1, + + CSV_COMM_MAX +}; + +/** + * Ring Buffer Mode regions: + * There are 4 regions and every region is a 4K area that must be 4K aligned. + * To accomplish this allocate an amount that is the size of area and the + * required alignment. + * The aligned address will be calculated from the returned address. + */ +#define CSV_RING_BUFFER_SIZE (32 * 1024) +#define CSV_RING_BUFFER_ALIGN (4 * 1024) +#define CSV_RING_BUFFER_LEN (CSV_RING_BUFFER_SIZE + CSV_RING_BUFFER_ALIGN) +#define CSV_RING_BUFFER_ESIZE 16 + +/** + * struct csv_data_hgsc_cert_import - HGSC_CERT_IMPORT command parameters + * + * @hgscsk_cert_address: HGSCSK certificate chain + * @hgscsk_cert_len: len of HGSCSK certificate + * @hgsc_cert_address: HGSC certificate chain + * @hgsc_cert_len: len of HGSC certificate + */ +struct csv_data_hgsc_cert_import { + u64 hgscsk_cert_address; /* In */ + u32 hgscsk_cert_len; /* In */ + u32 reserved; /* In */ + u64 hgsc_cert_address; /* In */ + u32 hgsc_cert_len; /* In */ +} __packed; + +#define CSV_COMMAND_PRIORITY_HIGH 0 +#define CSV_COMMAND_PRIORITY_LOW 1 +#define CSV_COMMAND_PRIORITY_NUM 2 + +struct csv_cmdptr_entry { + u16 cmd_id; + u16 cmd_flags; + u32 sw_data; + u64 cmd_buf_ptr; +} __packed; + +struct csv_statval_entry { + u16 status; + u16 reserved0; + u32 reserved1; + u64 reserved2; +} __packed; + +struct csv_queue { + u32 head; + u32 tail; + u32 mask; /* mask = (size - 1), inicates the elements max count */ + u32 esize; /* size of an element */ + u64 data; + u64 data_align; +} __packed; + +struct csv_ringbuffer_queue { + struct csv_queue cmd_ptr; + struct csv_queue stat_val; +} __packed; + +/** + * struct csv_data_ring_buffer - RING_BUFFER command parameters + * + * @queue_lo_cmdptr_address: physical address of the region to be used for + * low priority queue's CmdPtr ring buffer + * @queue_lo_statval_address: physical address of the region to be used for + * low priority queue's StatVal ring buffer + * @queue_hi_cmdptr_address: physical address of the region to be used for + * high priority queue's CmdPtr ring buffer + * @queue_hi_statval_address: physical address of the region to be used for + * high priority queue's StatVal ring buffer + * @queue_lo_size: size of the low priority queue in 4K pages. Must be 1 + * @queue_hi_size: size of the high priority queue in 4K pages. Must be 1 + * @queue_lo_threshold: queue(low) size, below which an interrupt may be generated + * @queue_hi_threshold: queue(high) size, below which an interrupt may be generated + * @int_on_empty: unconditionally interrupt when both queues are found empty + */ +struct csv_data_ring_buffer { + u64 queue_lo_cmdptr_address; /* In */ + u64 queue_lo_statval_address; /* In */ + u64 queue_hi_cmdptr_address; /* In */ + u64 queue_hi_statval_address; /* In */ + u8 queue_lo_size; /* In */ + u8 queue_hi_size; /* In */ + u16 queue_lo_threshold; /* In */ + u16 queue_hi_threshold; /* In */ + u16 int_on_empty; /* In */ +} __packed; + +/** + * struct csv3_data_launch_encrypt_data - CSV3_CMD_LAUNCH_ENCRYPT_DATA command + * + * @handle: handle of the VM to update + * @gpa: guest address where data is copied + * @length: len of memory to be encrypted + * @data_blocks: memory regions to hold data page address + */ +struct csv3_data_launch_encrypt_data { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 gpa; /* In */ + u32 length; /* In */ + u32 reserved1; /* In */ + u64 data_blocks[8]; /* In */ +} __packed; + +/** + * struct csv3_data_launch_encrypt_vmcb - CSV3_CMD_LAUNCH_ENCRYPT_VMCB command + * + * @handle: handle of the VM + * @vcpu_id: id of vcpu per vmsa/vmcb + * @vmsa_addr: memory address of initial vmsa data + * @vmsa_len: len of initial vmsa data + * @shadow_vmcb_addr: memory address of shadow vmcb data + * @shadow_vmcb_len: len of shadow vmcb data + * @secure_vmcb_addr: memory address of secure vmcb data + * @secure_vmcb_len: len of secure vmcb data + */ +struct csv3_data_launch_encrypt_vmcb { + u32 handle; /* In */ + u32 reserved; /* In */ + u32 vcpu_id; /* In */ + u32 reserved1; /* In */ + u64 vmsa_addr; /* In */ + u32 vmsa_len; /* In */ + u32 reserved2; /* In */ + u64 shadow_vmcb_addr; /* In */ + u32 shadow_vmcb_len; /* In */ + u32 reserved3; /* In */ + u64 secure_vmcb_addr; /* Out */ + u32 secure_vmcb_len; /* Out */ +} __packed; + +/** + * struct csv3_data_update_npt - CSV3_CMD_UPDATE_NPT command + * + * @handle: handle assigned to the VM + * @error_code: nested page fault error code + * @gpa: guest page address where npf happens + * @spa: physical address which maps to gpa in host page table + * @level: page level which can be mapped in nested page table + * @page_attr: page attribute for gpa + * @page_attr_mask: which page attribute bit should be set + * @npages: number of pages from gpa is handled. + */ +struct csv3_data_update_npt { + u32 handle; /* In */ + u32 reserved; /* In */ + u32 error_code; /* In */ + u32 reserved1; /* In */ + u64 gpa; /* In */ + u64 spa; /* In */ + u64 level; /* In */ + u64 page_attr; /* In */ + u64 page_attr_mask; /* In */ + u32 npages; /* In/Out */ +} __packed; + +/** + * struct csv3_data_mem_region - define a memory region + * + * @base_address: base address of a memory region + * @size: size of memory region + */ +struct csv3_data_memory_region { + u64 base_address; /* In */ + u64 size; /* In */ +} __packed; + +/** + * struct csv3_data_set_guest_private_memory - CSV3_CMD_SET_GUEST_PRIVATE_MEMORY + * command parameters + * + * @handle: handle assigned to the VM + * @nregions: number of memory regions + * @regions_paddr: address of memory containing multiple memory regions + */ +struct csv3_data_set_guest_private_memory { + u32 handle; /* In */ + u32 nregions; /* In */ + u64 regions_paddr; /* In */ +} __packed; + +/** + * struct csv3_data_set_smr - CSV3_CMD_SET_SMR command parameters + * + * @smr_entry_size: size of SMR entry + * @nregions: number of memory regions + * @regions_paddr: address of memory containing multiple memory regions + */ +struct csv3_data_set_smr { + u32 smr_entry_size; /* In */ + u32 nregions; /* In */ + u64 regions_paddr; /* In */ +} __packed; + +/** + * struct csv3_data_set_smcr - CSV3_CMD_SET_SMCR command parameters + * + * @base_address: start address of SMCR memory + * @size: size of SMCR memory + */ +struct csv3_data_set_smcr { + u64 base_address; /* In */ + u64 size; /* In */ +} __packed; + +/** + * struct csv3_data_dbg_read_vmsa - CSV3_CMD_DBG_READ_VMSA command parameters + * + * @handle: handle assigned to the VM + * @spa: system physical address of memory to get vmsa of the specific vcpu + * @size: size of the host memory + * @vcpu_id: the specific vcpu + */ +struct csv3_data_dbg_read_vmsa { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 spa; /* In */ + u32 size; /* In */ + u32 vcpu_id; /* In */ +} __packed; + +/** + * struct csv3_data_dbg_read_mem - CSV3_CMD_DBG_READ_MEM command parameters + * + * @handle: handle assigned to the VM + * @gpa: guest physical address of the memory to access + * @spa: system physical address of memory to get data from gpa + * @size: size of guest memory to access + */ +struct csv3_data_dbg_read_mem { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 gpa; /* In */ + u64 spa; /* In */ + u32 size; /* In */ +} __packed; + +/** + * struct csv3_data_send_encrypt_data - SEND_ENCRYPT_DATA command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header + * @hdr_len: len of packet header + * @guest_block: physical address containing multiple guest address + * @guest_len: len of guest block + * @flag: flag of send encrypt data + * 0x00000000: migrate pages in guest block + * 0x00000001: set readonly of pages in guest block + * others: invalid + * @trans_block: physical address of a page containing multiple host memory pages + * @trans_len: len of host memory region + */ +struct csv3_data_send_encrypt_data { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In/Out */ + u32 reserved1; /* In */ + u64 guest_block; /* In */ + u32 guest_len; /* In */ + u32 flag; /* In */ + u64 trans_block; /* In */ + u32 trans_len; /* In/Out */ +} __packed; + +/** + * struct csv3_data_send_encrypt_context - SEND_ENCRYPT_CONTEXT command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header + * @hdr_len: len of packet header + * @trans_block: physical address of a page containing multiple host memory pages + * @trans_len: len of host memory region + */ +struct csv3_data_send_encrypt_context { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In/Out */ + u32 reserved1; /* In */ + u64 trans_block; /* In */ + u32 trans_len; /* In/Out */ +} __packed; + +/** + * struct csv3_data_receive_encrypt_data - RECEIVE_ENCRYPT_DATA command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header blob + * @hdr_len: len of packet header + * @guest_block: system physical address containing multiple guest address + * @guest_len: len of guest block memory region + * @trans_block: physical address of a page containing multiple host memory pages + * @trans_len: len of host memory region + */ +struct csv3_data_receive_encrypt_data { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In */ + u32 reserved1; /* In */ + u64 guest_block; /* In */ + u32 guest_len; /* In */ + u32 reserved2; /* In */ + u64 trans_block; /* In */ + u32 trans_len; /* In */ +} __packed; + +/** + * struct csv3_data_receive_encrypt_context - RECEIVE_ENCRYPT_CONTEXT command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header + * @hdr_len: len of packet header + * @trans_block: physical address of a page containing multiple host memory pages + * @trans_len: len of host memory region + * @shadow_vmcb_block: physical address of a page containing multiple shadow vmcb address + * @secure_vmcb_block: physical address of a page containing multiple secure vmcb address + * @vmcb_block_len: len of shadow/secure vmcb block + */ +struct csv3_data_receive_encrypt_context { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In */ + u32 reserved1; /* In */ + u64 trans_block; /* In */ + u32 trans_len; /* In */ + u32 reserved2; /* In */ + u64 shadow_vmcb_block; /* In */ + u64 secure_vmcb_block; /* In */ + u32 vmcb_block_len; /* In */ +} __packed; + +/** + * enum VPSP_CMD_STATUS - virtual psp command status + * + * @VPSP_INIT: the initial command from guest + * @VPSP_RUNNING: the middle command to check and run ringbuffer command + * @VPSP_FINISH: inform the guest that the command ran successfully + */ +enum VPSP_CMD_STATUS { + VPSP_INIT = 0, + VPSP_RUNNING, + VPSP_FINISH, + VPSP_MAX +}; + +/** + * struct vpsp_cmd - virtual psp command + * + * @cmd_id: the command id is used to distinguish different commands + * @is_high_rb: indicates the ringbuffer level in which the command is placed + */ +struct vpsp_cmd { + u32 cmd_id : 31; + u32 is_high_rb : 1; +}; + +/** + * struct vpsp_ret - virtual psp return result + * + * @pret: the return code from device + * @resv: reserved bits + * @index: used to distinguish the position of command in the ringbuffer + * @status: indicates the current status of the related command + */ +struct vpsp_ret { + u32 pret : 16; + u32 resv : 2; + u32 index : 12; + u32 status : 2; +}; + +struct kvm_vpsp { + struct kvm *kvm; + int (*write_guest)(struct kvm *kvm, gpa_t gpa, const void *data, unsigned long len); + int (*read_guest)(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); +}; + +#define PSP_VID_MASK 0xff +#define PSP_VID_SHIFT 56 +#define PUT_PSP_VID(hpa, vid) ((__u64)(hpa) | ((__u64)(PSP_VID_MASK & vid) << PSP_VID_SHIFT)) +#define GET_PSP_VID(hpa) ((__u16)((__u64)(hpa) >> PSP_VID_SHIFT) & PSP_VID_MASK) +#define CLEAR_PSP_VID(hpa) ((__u64)(hpa) & ~((__u64)PSP_VID_MASK << PSP_VID_SHIFT)) + #ifdef CONFIG_CRYPTO_DEV_SP_PSP + +int psp_do_cmd(int cmd, void *data, int *psp_ret); + +int csv_ring_buffer_queue_init(void); +int csv_ring_buffer_queue_free(void); +int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags); +int csv_check_stat_queue_status(int *psp_ret); + +/** + * csv_issue_ringbuf_cmds_external_user - issue CSV commands into a ring + * buffer. + */ +int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret); + +int vpsp_try_get_result(uint32_t vid, uint8_t prio, uint32_t index, + void *data, struct vpsp_ret *psp_ret); + +int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret); + +int vpsp_get_vid(uint32_t *vid, pid_t pid); + +int vpsp_get_default_vid_permission(void); + +int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, + gpa_t table_gpa); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ + +static inline int psp_do_cmd(int cmd, void *data, int *psp_ret) { return -ENODEV; } + +static inline int csv_ring_buffer_queue_init(void) { return -ENODEV; } +static inline int csv_ring_buffer_queue_free(void) { return -ENODEV; } +static inline +int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) { return -ENODEV; } +static inline int csv_check_stat_queue_status(int *psp_ret) { return -ENODEV; } + +static inline int +csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret) { return -ENODEV; } + +static inline int +vpsp_try_get_result(uint8_t prio, uint32_t index, + void *data, struct vpsp_ret *psp_ret) { return -ENODEV; } + +static inline int +vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret) { return -ENODEV; } + +static inline int +vpsp_get_default_vid_permission(void) { return -ENODEV; } + +static inline int +kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, + gpa_t psp_ret_gpa, gpa_t table_gpa) { return -ENODEV; } #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ +typedef int (*p2c_notifier_t)(uint32_t id, uint64_t data); + +#ifdef CONFIG_HYGON_PSP2CPU_CMD + +int psp_register_cmd_notifier(uint32_t cmd_id, p2c_notifier_t notifier); +int psp_unregister_cmd_notifier(uint32_t cmd_id, p2c_notifier_t notifier); + +#else /* !CONFIG_HYGON_PSP2CPU_CMD */ + +int psp_register_cmd_notifier(uint32_t cmd_id, p2c_notifier_t notifier) { return -ENODEV; } +int psp_unregister_cmd_notifier(uint32_t cmd_id, p2c_notifier_t notifier) { return -ENODEV; } + +#endif /* CONFIG_HYGON_PSP2CPU_CMD */ + #endif /* __PSP_HYGON_H__ */ diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 9a144026f89a..76ee067a962c 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -13,12 +13,9 @@ #define __PSP_SEV_H__ #include -#include #define SEV_FW_BLOB_MAX_SIZE 0x4000 /* 16KB */ -#define CSV_FW_MAX_SIZE 0x80000 /* 512KB */ - /** * SEV platform state */ @@ -84,34 +81,6 @@ enum sev_cmd { SEV_CMD_MAX, }; -/** - * CSV communication state - */ -enum csv_comm_state { - CSV_COMM_MAILBOX_ON = 0x0, - CSV_COMM_RINGBUFFER_ON = 0x1, - - CSV_COMM_MAX -}; - -enum csv_cmd { - CSV_CMD_RING_BUFFER = 0x00F, - CSV_CMD_HGSC_CERT_IMPORT = 0x300, - CSV_CMD_MAX, -}; - -/** - * Ring Buffer Mode regions: - * There are 4 regions and every region is a 4K area that must be 4K aligned. - * To accomplish this allocate an amount that is the size of area and the - * required alignment. - * The aligned address will be calculated from the returned address. - */ -#define CSV_RING_BUFFER_SIZE (32 * 1024) -#define CSV_RING_BUFFER_ALIGN (4 * 1024) -#define CSV_RING_BUFFER_LEN (CSV_RING_BUFFER_SIZE + CSV_RING_BUFFER_ALIGN) -#define CSV_RING_BUFFER_ESIZE 16 - /** * struct sev_data_init - INIT command parameters * @@ -555,149 +524,8 @@ struct sev_data_attestation_report { u32 len; /* In/Out */ } __packed; -/** - * struct csv_data_hgsc_cert_import - HGSC_CERT_IMPORT command parameters - * - * @hgscsk_cert_address: HGSCSK certificate chain - * @hgscsk_cert_len: len of HGSCSK certificate - * @hgsc_cert_address: HGSC certificate chain - * @hgsc_cert_len: len of HGSC certificate - */ -struct csv_data_hgsc_cert_import { - u64 hgscsk_cert_address; /* In */ - u32 hgscsk_cert_len; /* In */ - u32 reserved; /* In */ - u64 hgsc_cert_address; /* In */ - u32 hgsc_cert_len; /* In */ -} __packed; - -#define CSV_COMMAND_PRIORITY_HIGH 0 -#define CSV_COMMAND_PRIORITY_LOW 1 -#define CSV_COMMAND_PRIORITY_NUM 2 - -struct csv_cmdptr_entry { - u16 cmd_id; - u16 cmd_flags; - u32 sw_data; - u64 cmd_buf_ptr; -} __packed; - -struct csv_statval_entry { - u16 status; - u16 reserved0; - u32 reserved1; - u64 reserved2; -} __packed; - -struct csv_queue { - u32 head; - u32 tail; - u32 mask; /* mask = (size - 1), inicates the elements max count */ - u32 esize; /* size of an element */ - u64 data; - u64 data_align; -} __packed; - -struct csv_ringbuffer_queue { - struct csv_queue cmd_ptr; - struct csv_queue stat_val; -} __packed; - -/** - * struct csv_data_ring_buffer - RING_BUFFER command parameters - * - * @queue_lo_cmdptr_address: physical address of the region to be used for - * low priority queue's CmdPtr ring buffer - * @queue_lo_statval_address: physical address of the region to be used for - * low priority queue's StatVal ring buffer - * @queue_hi_cmdptr_address: physical address of the region to be used for - * high priority queue's CmdPtr ring buffer - * @queue_hi_statval_address: physical address of the region to be used for - * high priority queue's StatVal ring buffer - * @queue_lo_size: size of the low priority queue in 4K pages. Must be 1 - * @queue_hi_size: size of the high priority queue in 4K pages. Must be 1 - * @queue_lo_threshold: queue(low) size, below which an interrupt may be generated - * @queue_hi_threshold: queue(high) size, below which an interrupt may be generated - * @int_on_empty: unconditionally interrupt when both queues are found empty - */ -struct csv_data_ring_buffer { - u64 queue_lo_cmdptr_address; /* In */ - u64 queue_lo_statval_address; /* In */ - u64 queue_hi_cmdptr_address; /* In */ - u64 queue_hi_statval_address; /* In */ - u8 queue_lo_size; /* In */ - u8 queue_hi_size; /* In */ - u16 queue_lo_threshold; /* In */ - u16 queue_hi_threshold; /* In */ - u16 int_on_empty; /* In */ -} __packed; - -/** - * enum VPSP_CMD_STATUS - virtual psp command status - * - * @VPSP_INIT: the initial command from guest - * @VPSP_RUNNING: the middle command to check and run ringbuffer command - * @VPSP_FINISH: inform the guest that the command ran successfully - */ -enum VPSP_CMD_STATUS { - VPSP_INIT = 0, - VPSP_RUNNING, - VPSP_FINISH, - VPSP_MAX -}; - -/** - * struct vpsp_cmd - virtual psp command - * - * @cmd_id: the command id is used to distinguish different commands - * @is_high_rb: indicates the ringbuffer level in which the command is placed - */ -struct vpsp_cmd { - u32 cmd_id : 31; - u32 is_high_rb : 1; -}; - -/** - * struct vpsp_ret - virtual psp return result - * - * @pret: the return code from device - * @resv: reserved bits - * @index: used to distinguish the position of command in the ringbuffer - * @status: indicates the current status of the related command - */ -struct vpsp_ret { - u32 pret : 16; - u32 resv : 2; - u32 index : 12; - u32 status : 2; -}; - -struct kvm_vpsp { - struct kvm *kvm; - int (*write_guest)(struct kvm *kvm, gpa_t gpa, const void *data, unsigned long len); - int (*read_guest)(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); -}; - -#define PSP_VID_MASK 0xff -#define PSP_VID_SHIFT 56 -#define PUT_PSP_VID(hpa, vid) ((__u64)(hpa) | ((__u64)(PSP_VID_MASK & vid) << PSP_VID_SHIFT)) -#define GET_PSP_VID(hpa) ((__u16)((__u64)(hpa) >> PSP_VID_SHIFT) & PSP_VID_MASK) -#define CLEAR_PSP_VID(hpa) ((__u64)(hpa) & ~((__u64)PSP_VID_MASK << PSP_VID_SHIFT)) - -#ifdef CONFIG_HYGON_PSP2CPU_CMD - -typedef int (*p2c_notifier_t)(uint32_t id, uint64_t data); - -int psp_register_cmd_notifier(uint32_t cmd_id, int (*notifier)(uint32_t id, uint64_t data)); - -int psp_unregister_cmd_notifier(uint32_t cmd_id, int (*notifier)(uint32_t id, uint64_t data)); - -#endif - #ifdef CONFIG_CRYPTO_DEV_SP_PSP -int psp_do_cmd(int cmd, void *data, int *psp_ret); - /** * sev_platform_init - perform SEV INIT command * @@ -812,36 +640,8 @@ int sev_guest_decommission(struct sev_data_decommission *data, int *error); void *psp_copy_user_blob(u64 uaddr, u32 len); -int csv_ring_buffer_queue_init(void); - -int csv_ring_buffer_queue_free(void); - -int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags); - -int csv_check_stat_queue_status(int *psp_ret); - -/** - * csv_issue_ringbuf_cmds_external_user - issue CSV commands into a ring - * buffer. - */ -int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret); - -int vpsp_try_get_result(uint32_t vid, uint8_t prio, uint32_t index, - void *data, struct vpsp_ret *psp_ret); - -int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret); - -int vpsp_get_vid(uint32_t *vid, pid_t pid); - -int vpsp_get_default_vid_permission(void); - -int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, - gpa_t table_gpa); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ -static inline int -psp_do_cmd(int cmd, void *data, int *psp_ret) { return -ENODEV; } - static inline int sev_platform_status(struct sev_user_data_status *status, int *error) { return -ENODEV; } @@ -863,31 +663,6 @@ sev_issue_cmd_external_user(struct file *filep, unsigned int id, void *data, int static inline void *psp_copy_user_blob(u64 __user uaddr, u32 len) { return ERR_PTR(-EINVAL); } -static inline int csv_ring_buffer_queue_init(void) { return -ENODEV; } - -static inline int csv_ring_buffer_queue_free(void) { return -ENODEV; } - -static inline -int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) { return -ENODEV; } - -static inline int csv_check_stat_queue_status(int *psp_ret) { return -ENODEV; } - -static inline int -csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret) { return -ENODEV; } - -static inline int -vpsp_try_get_result(uint8_t prio, uint32_t index, - void *data, struct vpsp_ret *psp_ret) { return -ENODEV; } - -static inline int -vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret) { return -ENODEV; } - -static inline int -vpsp_get_default_vid_permission(void) { return -ENODEV; } - -static inline int -kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, - gpa_t psp_ret_gpa, gpa_t table_gpa) { return -ENODEV; } #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ #endif /* __PSP_SEV_H__ */ diff --git a/include/uapi/linux/psp-hygon.h b/include/uapi/linux/psp-hygon.h index e1ac9c04dc55..0e65afbeea3c 100644 --- a/include/uapi/linux/psp-hygon.h +++ b/include/uapi/linux/psp-hygon.h @@ -11,4 +11,48 @@ #ifndef __PSP_HYGON_USER_H__ #define __PSP_HYGON_USER_H__ +#include + +/*****************************************************************************/ +/***************************** CSV interface *********************************/ +/*****************************************************************************/ + +/** + * CSV guest/platform commands + */ +enum { + CSV_PLATFORM_INIT = 101, + CSV_PLATFORM_SHUTDOWN = 102, + CSV_DOWNLOAD_FIRMWARE = 128, + CSV_HGSC_CERT_IMPORT = 201, + + CSV_MAX, +}; + +/** + * struct csv_user_data_hgsc_cert_import - HGSC_CERT_IMPORT command parameters + * + * @hgscsk_cert_address: HGSCSK certificate chain + * @hgscsk_cert_len: length of HGSCSK certificate + * @hgsc_cert_address: HGSC certificate chain + * @hgsc_cert_len: length of HGSC certificate + */ +struct csv_user_data_hgsc_cert_import { + __u64 hgscsk_cert_address; /* In */ + __u32 hgscsk_cert_len; /* In */ + __u64 hgsc_cert_address; /* In */ + __u32 hgsc_cert_len; /* In */ +} __packed; + +/** + * struct csv_user_data_download_firmware - DOWNLOAD_FIRMWARE command parameters + * + * @address: physical address of CSV firmware image + * @length: length of the CSV firmware image + */ +struct csv_user_data_download_firmware { + __u64 address; /* In */ + __u32 length; /* In */ +} __packed; + #endif /* __PSP_HYGON_USER_H__ */ diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h index 07db804852a2..1c9da485318f 100644 --- a/include/uapi/linux/psp-sev.h +++ b/include/uapi/linux/psp-sev.h @@ -32,18 +32,6 @@ enum { SEV_MAX, }; -/** - * CSV platform commands - */ -enum { - CSV_PLATFORM_INIT = 101, - CSV_PLATFORM_SHUTDOWN = 102, - CSV_DOWNLOAD_FIRMWARE = 128, - CSV_HGSC_CERT_IMPORT = 201, - - CSV_MAX, -}; - /** * SEV Firmware status code */ @@ -166,32 +154,6 @@ struct sev_user_data_get_id2 { __u32 length; /* In/Out */ } __packed; -/** - * struct csv_user_data_hgsc_cert_import - HGSC_CERT_IMPORT command parameters - * - * @hgscsk_cert_address: HGSCSK certificate chain - * @hgscsk_cert_len: length of HGSCSK certificate - * @hgsc_cert_address: HGSC certificate chain - * @hgsc_cert_len: length of HGSC certificate - */ -struct csv_user_data_hgsc_cert_import { - __u64 hgscsk_cert_address; /* In */ - __u32 hgscsk_cert_len; /* In */ - __u64 hgsc_cert_address; /* In */ - __u32 hgsc_cert_len; /* In */ -} __packed; - -/** - * struct csv_user_data_download_firmware - DOWNLOAD_FIRMWARE command parameters - * - * @address: physical address of CSV firmware image - * @length: length of the CSV firmware image - */ -struct csv_user_data_download_firmware { - __u64 address; /* In */ - __u32 length; /* In */ -} __packed; - /** * struct sev_issue_cmd - SEV ioctl parameters * -- Gitee From 1aad2cd4859f0bb59df2e051e5875453e6171cc3 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Thu, 22 Sep 2022 10:59:03 +0800 Subject: [PATCH 1485/2138] anolis: crypto: ccp: Move CSV_HGSC_CERT_IMPORT ioctl handler to hygon/csv-dev.c ANBZ: #11419 To reduce the code intrusion, we move CSV_HGSC_CERT_IMPORT ioctl handler from .../sev-dev.c to .../hygon/csv-dev.c. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4009 --- drivers/crypto/ccp/Makefile | 1 + drivers/crypto/ccp/hygon/csv-dev.c | 92 ++++++++++++++++++++++++++++++ drivers/crypto/ccp/hygon/csv-dev.h | 18 ++++++ drivers/crypto/ccp/sev-dev.c | 88 ++++------------------------ 4 files changed, 122 insertions(+), 77 deletions(-) create mode 100644 drivers/crypto/ccp/hygon/csv-dev.c create mode 100644 drivers/crypto/ccp/hygon/csv-dev.h diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index fbce73f0deb8..3de149462f87 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -16,6 +16,7 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ platform-access.o \ dbc.o \ hygon/psp-dev.o \ + hygon/csv-dev.o \ psp-ringbuf.o \ csv-dev.o \ vpsp.o diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c new file mode 100644 index 000000000000..051eab8261a2 --- /dev/null +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON CSV interface + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + +#include "csv-dev.h" +#include "psp-dev.h" + +int csv_cmd_buffer_len(int cmd) +{ + switch (cmd) { + case CSV_CMD_HGSC_CERT_IMPORT: return sizeof(struct csv_data_hgsc_cert_import); + case CSV_CMD_RING_BUFFER: return sizeof(struct csv_data_ring_buffer); + case CSV3_CMD_LAUNCH_ENCRYPT_DATA: return sizeof(struct csv3_data_launch_encrypt_data); + case CSV3_CMD_LAUNCH_ENCRYPT_VMCB: return sizeof(struct csv3_data_launch_encrypt_vmcb); + case CSV3_CMD_UPDATE_NPT: return sizeof(struct csv3_data_update_npt); + case CSV3_CMD_SET_SMR: return sizeof(struct csv3_data_set_smr); + case CSV3_CMD_SET_SMCR: return sizeof(struct csv3_data_set_smcr); + case CSV3_CMD_SET_GUEST_PRIVATE_MEMORY: + return sizeof(struct csv3_data_set_guest_private_memory); + case CSV3_CMD_DBG_READ_VMSA: return sizeof(struct csv3_data_dbg_read_vmsa); + case CSV3_CMD_DBG_READ_MEM: return sizeof(struct csv3_data_dbg_read_mem); + case CSV3_CMD_SEND_ENCRYPT_DATA: return sizeof(struct csv3_data_send_encrypt_data); + case CSV3_CMD_SEND_ENCRYPT_CONTEXT: + return sizeof(struct csv3_data_send_encrypt_context); + case CSV3_CMD_RECEIVE_ENCRYPT_DATA: + return sizeof(struct csv3_data_receive_encrypt_data); + case CSV3_CMD_RECEIVE_ENCRYPT_CONTEXT: + return sizeof(struct csv3_data_receive_encrypt_context); + default: return 0; + } +} + +int csv_ioctl_do_hgsc_import(struct sev_issue_cmd *argp) +{ + struct csv_user_data_hgsc_cert_import input; + struct csv_data_hgsc_cert_import *data; + void *hgscsk_blob, *hgsc_blob; + int ret; + + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) + return -EFAULT; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + /* copy HGSCSK certificate blobs from userspace */ + hgscsk_blob = psp_copy_user_blob(input.hgscsk_cert_address, input.hgscsk_cert_len); + if (IS_ERR(hgscsk_blob)) { + ret = PTR_ERR(hgscsk_blob); + goto e_free; + } + + data->hgscsk_cert_address = __psp_pa(hgscsk_blob); + data->hgscsk_cert_len = input.hgscsk_cert_len; + + /* copy HGSC certificate blobs from userspace */ + hgsc_blob = psp_copy_user_blob(input.hgsc_cert_address, input.hgsc_cert_len); + if (IS_ERR(hgsc_blob)) { + ret = PTR_ERR(hgsc_blob); + goto e_free_hgscsk; + } + + data->hgsc_cert_address = __psp_pa(hgsc_blob); + data->hgsc_cert_len = input.hgsc_cert_len; + + ret = hygon_psp_hooks.__sev_do_cmd_locked(CSV_CMD_HGSC_CERT_IMPORT, + data, &argp->error); + + kfree(hgsc_blob); +e_free_hgscsk: + kfree(hgscsk_blob); +e_free: + kfree(data); + return ret; +} diff --git a/drivers/crypto/ccp/hygon/csv-dev.h b/drivers/crypto/ccp/hygon/csv-dev.h new file mode 100644 index 000000000000..0dfe81b21396 --- /dev/null +++ b/drivers/crypto/ccp/hygon/csv-dev.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * HYGON CSV driver interface + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + */ + +#ifndef __CCP_HYGON_CSV_DEV_H__ +#define __CCP_HYGON_CSV_DEV_H__ + +#include + +int csv_cmd_buffer_len(int cmd); +int csv_ioctl_do_hgsc_import(struct sev_issue_cmd *argp); + +#endif /* __CCP_HYGON_CSV_DEV_H__ */ diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index b93902e37caa..956526c83d22 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -37,6 +37,7 @@ #include "csv-dev.h" #include "hygon/psp-dev.h" +#include "hygon/csv-dev.h" #define DEVICE_NAME "sev" #define SEV_FW_FILE "amd/sev.fw" @@ -185,39 +186,16 @@ static int csv_wait_cmd_ioc_ring_buffer(struct sev_device *sev, static int sev_cmd_buffer_len(int cmd) { - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { - switch (cmd) { - case CSV_CMD_HGSC_CERT_IMPORT: - return sizeof(struct csv_data_hgsc_cert_import); - case CSV_CMD_RING_BUFFER: - return sizeof(struct csv_data_ring_buffer); - case CSV3_CMD_LAUNCH_ENCRYPT_DATA: - return sizeof(struct csv3_data_launch_encrypt_data); - case CSV3_CMD_LAUNCH_ENCRYPT_VMCB: - return sizeof(struct csv3_data_launch_encrypt_vmcb); - case CSV3_CMD_UPDATE_NPT: - return sizeof(struct csv3_data_update_npt); - case CSV3_CMD_SET_SMR: - return sizeof(struct csv3_data_set_smr); - case CSV3_CMD_SET_SMCR: - return sizeof(struct csv3_data_set_smcr); - case CSV3_CMD_SET_GUEST_PRIVATE_MEMORY: - return sizeof(struct csv3_data_set_guest_private_memory); - case CSV3_CMD_DBG_READ_VMSA: - return sizeof(struct csv3_data_dbg_read_vmsa); - case CSV3_CMD_DBG_READ_MEM: - return sizeof(struct csv3_data_dbg_read_mem); - case CSV3_CMD_SEND_ENCRYPT_DATA: - return sizeof(struct csv3_data_send_encrypt_data); - case CSV3_CMD_SEND_ENCRYPT_CONTEXT: - return sizeof(struct csv3_data_send_encrypt_context); - case CSV3_CMD_RECEIVE_ENCRYPT_DATA: - return sizeof(struct csv3_data_receive_encrypt_data); - case CSV3_CMD_RECEIVE_ENCRYPT_CONTEXT: - return sizeof(struct csv3_data_receive_encrypt_context); - default: - break; - } + /* + * The Hygon CSV command may conflict with AMD SEV command, so it's + * preferred to check whether it's a CSV-specific command for Hygon + * psp. + */ + if (is_vendor_hygon()) { + int r = csv_cmd_buffer_len(cmd); + + if (r) + return r; } switch (cmd) { @@ -1587,50 +1565,6 @@ static int csv_ioctl_do_download_firmware(struct sev_issue_cmd *argp) return ret; } -static int csv_ioctl_do_hgsc_import(struct sev_issue_cmd *argp) -{ - struct csv_user_data_hgsc_cert_import input; - struct csv_data_hgsc_cert_import *data; - void *hgscsk_blob, *hgsc_blob; - int ret; - - if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) - return -EFAULT; - - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) - return -ENOMEM; - - /* copy HGSCSK certificate blobs from userspace */ - hgscsk_blob = psp_copy_user_blob(input.hgscsk_cert_address, input.hgscsk_cert_len); - if (IS_ERR(hgscsk_blob)) { - ret = PTR_ERR(hgscsk_blob); - goto e_free; - } - - data->hgscsk_cert_address = __psp_pa(hgscsk_blob); - data->hgscsk_cert_len = input.hgscsk_cert_len; - - /* copy HGSC certificate blobs from userspace */ - hgsc_blob = psp_copy_user_blob(input.hgsc_cert_address, input.hgsc_cert_len); - if (IS_ERR(hgsc_blob)) { - ret = PTR_ERR(hgsc_blob); - goto e_free_hgscsk; - } - - data->hgsc_cert_address = __psp_pa(hgsc_blob); - data->hgsc_cert_len = input.hgsc_cert_len; - - ret = __sev_do_cmd_locked(CSV_CMD_HGSC_CERT_IMPORT, data, &argp->error); - - kfree(hgsc_blob); -e_free_hgscsk: - kfree(hgscsk_blob); -e_free: - kfree(data); - return ret; -} - static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) { void __user *argp = (void __user *)arg; -- Gitee From 1625aecae2eaa98569a0f92bc86d0c943334546c Mon Sep 17 00:00:00 2001 From: hanliyang Date: Thu, 17 Oct 2024 18:04:51 +0800 Subject: [PATCH 1486/2138] anolis: crypto: ccp: Move Hygon CSV api version interface to hygon/csv-dev.c ANBZ: #11419 To reduce the code intrusion, we move the code about Hygon CSV API version from .../sev-dev.c to .../hygon/csv-dev.c. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4009 --- drivers/crypto/ccp/hygon/csv-dev.c | 20 ++++++++++++++++++++ drivers/crypto/ccp/hygon/csv-dev.h | 8 ++++++++ drivers/crypto/ccp/sev-dev.c | 23 +++++++---------------- 3 files changed, 35 insertions(+), 16 deletions(-) diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index 051eab8261a2..a4d36e5fc31f 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -18,6 +18,26 @@ #include "csv-dev.h" #include "psp-dev.h" +/* + * Hygon CSV build info: + * Hygon CSV build info is 32-bit in length other than 8-bit as that + * in AMD SEV. + */ +u32 hygon_csv_build; + +/* + * csv_update_api_version used to update the api version of HYGON CSV + * firmwareat driver side. + * Currently, we only need to update @hygon_csv_build. + */ +void csv_update_api_version(struct sev_user_data_status *status) +{ + if (status) { + hygon_csv_build = (status->flags >> 9) | + ((u32)status->build << 23); + } +} + int csv_cmd_buffer_len(int cmd) { switch (cmd) { diff --git a/drivers/crypto/ccp/hygon/csv-dev.h b/drivers/crypto/ccp/hygon/csv-dev.h index 0dfe81b21396..e26d46ac5110 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.h +++ b/drivers/crypto/ccp/hygon/csv-dev.h @@ -12,7 +12,15 @@ #include +extern u32 hygon_csv_build; + +void csv_update_api_version(struct sev_user_data_status *status); int csv_cmd_buffer_len(int cmd); int csv_ioctl_do_hgsc_import(struct sev_issue_cmd *argp); +static inline bool csv_version_greater_or_equal(u32 build) +{ + return hygon_csv_build >= build; +} + #endif /* __CCP_HYGON_CSV_DEV_H__ */ diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 956526c83d22..24060a79adc3 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -109,13 +109,6 @@ static void *sev_es_tmr; #define NV_LENGTH (32 * 1024) static void *sev_init_ex_buffer; -/* - * Hygon CSV build info: - * Hygon CSV build info is 32-bit in length other than 8-bit as that - * in AMD SEV. - */ -static u32 hygon_csv_build; - static inline bool sev_version_greater_or_equal(u8 maj, u8 min) { struct sev_device *sev = psp_master->sev_data; @@ -129,11 +122,6 @@ static inline bool sev_version_greater_or_equal(u8 maj, u8 min) return false; } -static inline bool csv_version_greater_or_equal(u32 build) -{ - return hygon_csv_build >= build; -} - static void sev_irq_handler(int irq, void *data, unsigned int status) { struct sev_device *sev = data; @@ -880,7 +868,7 @@ static int __sev_platform_init_locked(int *error) dev_dbg(sev->dev, "SEV firmware initialized\n"); - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + if (is_vendor_hygon()) dev_info(sev->dev, "CSV API:%d.%d build:%d\n", sev->api_major, sev->api_minor, hygon_csv_build); else @@ -1132,9 +1120,12 @@ static int sev_get_api_version(void) sev->build = status.build; sev->state = status.state; - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) - hygon_csv_build = (status.flags >> 9) | - ((u32)status.build << 23); + /* + * The api version fields of HYGON CSV firmware are not consistent + * with AMD SEV firmware. + */ + if (is_vendor_hygon()) + csv_update_api_version(&status); return 0; } -- Gitee From 74fc668e1dcd2a2431da04c9e75378e42c22d85f Mon Sep 17 00:00:00 2001 From: hanliyang Date: Thu, 17 Oct 2024 19:53:41 +0800 Subject: [PATCH 1487/2138] anolis: crypto: ccp: Move CSV_DOWNLOAD_FIRMWARE ioctl handler to hygon/csv-dev.c ANBZ: #11419 To reduce the code intrusion, we move CSV_DOWNLOAD_FIRMWARE ioctl handler from .../sev-dev.c to .../hygon/csv-dev.c. Replace 'boot_cpu_data.x86_vendor' with is_vendor_hygon() in the code path for update firmware. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4009 --- drivers/crypto/ccp/hygon/csv-dev.c | 71 +++++++++++++++++++++++++ drivers/crypto/ccp/hygon/csv-dev.h | 3 ++ drivers/crypto/ccp/sev-dev.c | 83 +++--------------------------- 3 files changed, 80 insertions(+), 77 deletions(-) diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index a4d36e5fc31f..d2036aa43ff0 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -110,3 +110,74 @@ int csv_ioctl_do_hgsc_import(struct sev_issue_cmd *argp) kfree(data); return ret; } + +int csv_ioctl_do_download_firmware(struct sev_issue_cmd *argp) +{ + struct sev_data_download_firmware *data = NULL; + struct csv_user_data_download_firmware input; + int ret, order; + struct page *p; + u64 data_size; + + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + /* Only support DOWNLOAD_FIRMWARE if build greater or equal 1667 */ + if (!csv_version_greater_or_equal(1667)) { + pr_err("DOWNLOAD_FIRMWARE not supported\n"); + return -EIO; + } + + if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) + return -EFAULT; + + if (!input.address) { + argp->error = SEV_RET_INVALID_ADDRESS; + return -EINVAL; + } + + if (!input.length || input.length > CSV_FW_MAX_SIZE) { + argp->error = SEV_RET_INVALID_LEN; + return -EINVAL; + } + + /* + * CSV FW expects the physical address given to it to be 32 + * byte aligned. Memory allocated has structure placed at the + * beginning followed by the firmware being passed to the CSV + * FW. Allocate enough memory for data structure + alignment + * padding + CSV FW. + */ + data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32); + + order = get_order(input.length + data_size); + p = alloc_pages(GFP_KERNEL, order); + if (!p) + return -ENOMEM; + + /* + * Copy firmware data to a kernel allocated contiguous + * memory region. + */ + data = page_address(p); + if (copy_from_user((void *)(page_address(p) + data_size), + (void *)input.address, input.length)) { + ret = -EFAULT; + goto err_free_page; + } + + data->address = __psp_pa(page_address(p) + data_size); + data->len = input.length; + + ret = hygon_psp_hooks.__sev_do_cmd_locked(SEV_CMD_DOWNLOAD_FIRMWARE, + data, &argp->error); + if (ret) + pr_err("Failed to update CSV firmware: %#x\n", argp->error); + else + pr_info("CSV firmware update successful\n"); + +err_free_page: + __free_pages(p, order); + + return ret; +} diff --git a/drivers/crypto/ccp/hygon/csv-dev.h b/drivers/crypto/ccp/hygon/csv-dev.h index e26d46ac5110..a9bac6eae97b 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.h +++ b/drivers/crypto/ccp/hygon/csv-dev.h @@ -12,11 +12,14 @@ #include +#define CSV_FW_FILE "hygon/csv.fw" + extern u32 hygon_csv_build; void csv_update_api_version(struct sev_user_data_status *status); int csv_cmd_buffer_len(int cmd); int csv_ioctl_do_hgsc_import(struct sev_issue_cmd *argp); +int csv_ioctl_do_download_firmware(struct sev_issue_cmd *argp); static inline bool csv_version_greater_or_equal(u32 build) { diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 24060a79adc3..66137b7c0f11 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -41,7 +41,6 @@ #define DEVICE_NAME "sev" #define SEV_FW_FILE "amd/sev.fw" -#define CSV_FW_FILE "hygon/csv.fw" #define SEV_FW_NAME_SIZE 64 DEFINE_MUTEX(sev_cmd_mutex); @@ -1136,7 +1135,7 @@ static int sev_get_firmware(struct device *dev, char fw_name_specific[SEV_FW_NAME_SIZE]; char fw_name_subset[SEV_FW_NAME_SIZE]; - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + if (is_vendor_hygon()) { /* Check for CSV FW to using generic name: csv.fw */ if (firmware_request_nowarn(firmware, CSV_FW_FILE, dev) >= 0) return 0; @@ -1183,14 +1182,14 @@ static int sev_update_firmware(struct device *dev) u64 data_size; if (!sev_version_greater_or_equal(0, 15) && - (boot_cpu_data.x86_vendor != X86_VENDOR_HYGON || - !csv_version_greater_or_equal(1667))) { + !(is_vendor_hygon() && csv_version_greater_or_equal(1667))) { dev_dbg(dev, "DOWNLOAD_FIRMWARE not supported\n"); return -1; } if (sev_get_firmware(dev, &firmware) == -ENOENT) { - dev_dbg(dev, "No SEV firmware file present\n"); + dev_dbg(dev, "No %s firmware file present\n", + is_vendor_hygon() ? "CSV" : "SEV"); return -1; } @@ -1231,13 +1230,10 @@ static int sev_update_firmware(struct device *dev) if (ret) dev_dbg(dev, "Failed to update %s firmware: %#x\n", - (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) - ? "CSV" : "SEV", - error); + is_vendor_hygon() ? "CSV" : "SEV", error); else dev_info(dev, "%s firmware update successful\n", - (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) - ? "CSV" : "SEV"); + is_vendor_hygon() ? "CSV" : "SEV"); __free_pages(p, order); @@ -1489,73 +1485,6 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable) return ret; } -static int csv_ioctl_do_download_firmware(struct sev_issue_cmd *argp) -{ - struct sev_data_download_firmware *data = NULL; - struct csv_user_data_download_firmware input; - int ret, order; - struct page *p; - u64 data_size; - - /* Only support DOWNLOAD_FIRMWARE if build greater or equal 1667 */ - if (!csv_version_greater_or_equal(1667)) { - pr_err("DOWNLOAD_FIRMWARE not supported\n"); - return -EIO; - } - - if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) - return -EFAULT; - - if (!input.address) { - argp->error = SEV_RET_INVALID_ADDRESS; - return -EINVAL; - } - - if (!input.length || input.length > CSV_FW_MAX_SIZE) { - argp->error = SEV_RET_INVALID_LEN; - return -EINVAL; - } - - /* - * CSV FW expects the physical address given to it to be 32 - * byte aligned. Memory allocated has structure placed at the - * beginning followed by the firmware being passed to the CSV - * FW. Allocate enough memory for data structure + alignment - * padding + CSV FW. - */ - data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32); - - order = get_order(input.length + data_size); - p = alloc_pages(GFP_KERNEL, order); - if (!p) - return -ENOMEM; - - /* - * Copy firmware data to a kernel allocated contiguous - * memory region. - */ - data = page_address(p); - if (copy_from_user((void *)(page_address(p) + data_size), - (void *)input.address, input.length)) { - ret = -EFAULT; - goto err_free_page; - } - - data->address = __psp_pa(page_address(p) + data_size); - data->len = input.length; - - ret = __sev_do_cmd_locked(SEV_CMD_DOWNLOAD_FIRMWARE, data, &argp->error); - if (ret) - pr_err("Failed to update CSV firmware: %#x\n", argp->error); - else - pr_info("CSV firmware update successful\n"); - -err_free_page: - __free_pages(p, order); - - return ret; -} - static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) { void __user *argp = (void __user *)arg; -- Gitee From 4d9f2d741b2480158937ad744c3e74c48237408b Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 18 Oct 2024 11:08:27 +0800 Subject: [PATCH 1488/2138] anolis: crypto: ccp: Reduce more code intrusion in psp-dev.c and sev-dev.c ANBZ: #11419 So far, the CSV RING BUFFER interfaces and PSP communication interfaces that to support Hygon secure functions are placed in psp-dev.c and sev-dev.c. We move most of these code to .../hygon/psp-dev.c and .../hygon/csv-dev.c. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4009 --- drivers/crypto/ccp/hygon/csv-dev.c | 819 ++++++++++++++++++++++ drivers/crypto/ccp/hygon/csv-dev.h | 8 + drivers/crypto/ccp/hygon/psp-dev.c | 692 +++++++++++++++++++ drivers/crypto/ccp/hygon/psp-dev.h | 31 + drivers/crypto/ccp/psp-dev.c | 528 +-------------- drivers/crypto/ccp/psp-dev.h | 15 - drivers/crypto/ccp/psp-ringbuf.c | 5 + drivers/crypto/ccp/psp-ringbuf.h | 3 +- drivers/crypto/ccp/sev-dev.c | 1017 +--------------------------- include/linux/psp-hygon.h | 4 + 10 files changed, 1605 insertions(+), 1517 deletions(-) diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index d2036aa43ff0..1847a8b9f465 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -25,6 +25,22 @@ */ u32 hygon_csv_build; +int csv_comm_mode = CSV_COMM_MAILBOX_ON; + +/* defination of variabled used by virtual psp */ +enum VPSP_RB_CHECK_STATUS { + RB_NOT_CHECK = 0, + RB_CHECKING, + RB_CHECKED, + RB_CHECK_MAX +}; +#define VPSP_RB_IS_SUPPORTED(buildid) (buildid >= 1913) +#define VPSP_CMD_STATUS_RUNNING 0xffff +static DEFINE_MUTEX(vpsp_rb_mutex); +struct csv_ringbuffer_queue vpsp_ring_buffer[CSV_COMMAND_PRIORITY_NUM]; +static uint8_t vpsp_rb_supported; +static atomic_t vpsp_rb_check_status = ATOMIC_INIT(RB_NOT_CHECK); + /* * csv_update_api_version used to update the api version of HYGON CSV * firmwareat driver side. @@ -181,3 +197,806 @@ int csv_ioctl_do_download_firmware(struct sev_issue_cmd *argp) return ret; } + +/* + * __csv_ring_buffer_enter_locked issues command to switch to RING BUFFER + * mode, the caller must acquire the mutex lock. + */ +static int __csv_ring_buffer_enter_locked(int *error) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + struct csv_data_ring_buffer *data; + struct csv_ringbuffer_queue *low_queue; + struct csv_ringbuffer_queue *hi_queue; + int ret = 0; + + if (!psp || !psp->sev_data || !hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + sev = psp->sev_data; + + if (csv_comm_mode == CSV_COMM_RINGBUFFER_ON) + return -EEXIST; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + low_queue = &sev->ring_buffer[CSV_COMMAND_PRIORITY_LOW]; + hi_queue = &sev->ring_buffer[CSV_COMMAND_PRIORITY_HIGH]; + + data->queue_lo_cmdptr_address = __psp_pa(low_queue->cmd_ptr.data_align); + data->queue_lo_statval_address = __psp_pa(low_queue->stat_val.data_align); + data->queue_hi_cmdptr_address = __psp_pa(hi_queue->cmd_ptr.data_align); + data->queue_hi_statval_address = __psp_pa(hi_queue->stat_val.data_align); + data->queue_lo_size = 1; + data->queue_hi_size = 1; + data->int_on_empty = 1; + + ret = hygon_psp_hooks.__sev_do_cmd_locked(CSV_CMD_RING_BUFFER, data, error); + if (!ret) { + iowrite32(0, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + csv_comm_mode = CSV_COMM_RINGBUFFER_ON; + } + + kfree(data); + return ret; +} + +static int csv_wait_cmd_ioc_ring_buffer(struct sev_device *sev, + unsigned int *reg, + unsigned int timeout) +{ + int ret; + + ret = wait_event_timeout(sev->int_queue, + sev->int_rcvd, timeout * HZ); + if (!ret) + return -ETIMEDOUT; + + *reg = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + + return 0; +} + +static int csv_get_cmd_status(struct sev_device *sev, int prio, int index) +{ + struct csv_queue *queue = &sev->ring_buffer[prio].stat_val; + struct csv_statval_entry *statval = (struct csv_statval_entry *)queue->data; + + return statval[index].status; +} + +static int __csv_do_ringbuf_cmds_locked(int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + unsigned int rb_tail; + unsigned int rb_ctl; + int last_cmd_index; + unsigned int reg, ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + if (*hygon_psp_hooks.psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + /* update rb tail */ + rb_tail = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + rb_tail &= (~PSP_RBTAIL_QHI_TAIL_MASK); + rb_tail |= (sev->ring_buffer[CSV_COMMAND_PRIORITY_HIGH].cmd_ptr.tail + << PSP_RBTAIL_QHI_TAIL_SHIFT); + rb_tail &= (~PSP_RBTAIL_QLO_TAIL_MASK); + rb_tail |= sev->ring_buffer[CSV_COMMAND_PRIORITY_LOW].cmd_ptr.tail; + iowrite32(rb_tail, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + /* update rb ctl to trigger psp irq */ + sev->int_rcvd = 0; + + /* PSP response to x86 only when all queue is empty or error happends */ + rb_ctl = PSP_RBCTL_X86_WRITES | + PSP_RBCTL_RBMODE_ACT | + PSP_RBCTL_CLR_INTSTAT; + iowrite32(rb_ctl, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for all commands in ring buffer completed */ + ret = csv_wait_cmd_ioc_ring_buffer(sev, ®, + (*hygon_psp_hooks.psp_timeout) * 10); + if (ret) { + if (psp_ret) + *psp_ret = 0; + dev_err(sev->dev, "csv ringbuffer mode command timed out, disabling PSP\n"); + *hygon_psp_hooks.psp_dead = true; + + return ret; + } + + /* cmd error happends */ + if (reg & PSP_RBHEAD_QPAUSE_INT_STAT) + ret = -EFAULT; + + if (psp_ret) { + last_cmd_index = (reg & PSP_RBHEAD_QHI_HEAD_MASK) + >> PSP_RBHEAD_QHI_HEAD_SHIFT; + *psp_ret = csv_get_cmd_status(sev, CSV_COMMAND_PRIORITY_HIGH, + last_cmd_index); + if (*psp_ret == 0) { + last_cmd_index = reg & PSP_RBHEAD_QLO_HEAD_MASK; + *psp_ret = csv_get_cmd_status(sev, + CSV_COMMAND_PRIORITY_LOW, last_cmd_index); + } + } + + return ret; +} + +/* + * csv_do_ringbuf_cmds will enter RING BUFFER mode and handling commands + * queued in RING BUFFER queues, the user is obligate to manage RING + * BUFFER queues including allocate, enqueue and free, etc. + */ +int csv_do_ringbuf_cmds(int *psp_ret) +{ + struct sev_user_data_status data; + int rc; + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); + + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + if (mutex_enabled) { + if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + } + + rc = __csv_ring_buffer_enter_locked(psp_ret); + if (rc) + goto cmd_unlock; + + rc = __csv_do_ringbuf_cmds_locked(psp_ret); + + /* exit ringbuf mode by send CMD in mailbox mode */ + hygon_psp_hooks.__sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, NULL); + csv_comm_mode = CSV_COMM_MAILBOX_ON; + +cmd_unlock: + if (mutex_enabled) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + + return rc; +} + +void csv_restore_mailbox_mode_postprocess(void) +{ + csv_comm_mode = CSV_COMM_MAILBOX_ON; + csv_ring_buffer_queue_free(); +} + +/* + * __csv_ring_buffer_queue_init will allocate memory for command queue + * and status queue. If error occurs, this function will return directly, + * the caller must free the memories allocated for queues. + * + * Function csv_ring_buffer_queue_free() can be used to handling error + * return by this function and cleanup ring buffer queues when exiting + * from RING BUFFER mode. + * + * Return -ENOMEM if fail to allocate memory for queues, otherwise 0 + */ +static int __csv_ring_buffer_queue_init(struct csv_ringbuffer_queue *ring_buffer) +{ + void *cmd_ptr_buffer = NULL; + void *stat_val_buffer = NULL; + + /* If reach here, the command and status queues must be NULL */ + WARN_ON(ring_buffer->cmd_ptr.data || + ring_buffer->stat_val.data); + + cmd_ptr_buffer = kzalloc(CSV_RING_BUFFER_LEN, GFP_KERNEL); + if (!cmd_ptr_buffer) + return -ENOMEM; + + /* the command queue will points to @cmd_ptr_buffer */ + csv_queue_init(&ring_buffer->cmd_ptr, cmd_ptr_buffer, + CSV_RING_BUFFER_SIZE, CSV_RING_BUFFER_ESIZE); + + stat_val_buffer = kzalloc(CSV_RING_BUFFER_LEN, GFP_KERNEL); + if (!stat_val_buffer) + return -ENOMEM; + + /* the status queue will points to @stat_val_buffer */ + csv_queue_init(&ring_buffer->stat_val, stat_val_buffer, + CSV_RING_BUFFER_SIZE, CSV_RING_BUFFER_ESIZE); + return 0; +} + +int csv_ring_buffer_queue_init(void) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + int i, ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + for (i = CSV_COMMAND_PRIORITY_HIGH; i < CSV_COMMAND_PRIORITY_NUM; i++) { + ret = __csv_ring_buffer_queue_init(&sev->ring_buffer[i]); + if (ret) + goto e_free; + } + + return 0; + +e_free: + csv_ring_buffer_queue_free(); + return ret; +} +EXPORT_SYMBOL_GPL(csv_ring_buffer_queue_init); + +int csv_ring_buffer_queue_free(void) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + struct csv_ringbuffer_queue *ring_buffer; + int i; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + for (i = 0; i < CSV_COMMAND_PRIORITY_NUM; i++) { + ring_buffer = &sev->ring_buffer[i]; + + /* + * If command queue is not NULL, it must points to memory + * that allocated in __csv_ring_buffer_queue_init(). + */ + if (ring_buffer->cmd_ptr.data) { + kfree((void *)ring_buffer->cmd_ptr.data); + csv_queue_cleanup(&ring_buffer->cmd_ptr); + } + + /* + * If status queue is not NULL, it must points to memory + * that allocated in __csv_ring_buffer_queue_init(). + */ + if (ring_buffer->stat_val.data) { + kfree((void *)ring_buffer->stat_val.data); + csv_queue_cleanup(&ring_buffer->stat_val); + } + } + return 0; +} +EXPORT_SYMBOL_GPL(csv_ring_buffer_queue_free); + +int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + struct csv_cmdptr_entry cmdptr = { }; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + cmdptr.cmd_buf_ptr = __psp_pa(data); + cmdptr.cmd_id = cmd; + cmdptr.cmd_flags = flags; + + if (csv_enqueue_cmd(&sev->ring_buffer[prio].cmd_ptr, &cmdptr, 1) != 1) + return -EFAULT; + + return 0; +} +EXPORT_SYMBOL_GPL(csv_fill_cmd_queue); + +int csv_check_stat_queue_status(int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + unsigned int len; + int prio; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + for (prio = CSV_COMMAND_PRIORITY_HIGH; + prio < CSV_COMMAND_PRIORITY_NUM; prio++) { + do { + struct csv_statval_entry statval; + + len = csv_dequeue_stat(&sev->ring_buffer[prio].stat_val, + &statval, 1); + if (len) { + if (statval.status != 0) { + *psp_ret = statval.status; + return -EFAULT; + } + } + } while (len); + } + + return 0; +} +EXPORT_SYMBOL_GPL(csv_check_stat_queue_status); + +static int get_queue_tail(struct csv_ringbuffer_queue *ringbuffer) +{ + return ringbuffer->cmd_ptr.tail & ringbuffer->cmd_ptr.mask; +} + +static int get_queue_head(struct csv_ringbuffer_queue *ringbuffer) +{ + return ringbuffer->cmd_ptr.head & ringbuffer->cmd_ptr.mask; +} + +static void vpsp_set_cmd_status(int prio, int index, int status) +{ + struct csv_queue *ringbuf = &vpsp_ring_buffer[prio].stat_val; + struct csv_statval_entry *statval = (struct csv_statval_entry *)ringbuf->data; + + statval[index].status = status; +} + +static int vpsp_get_cmd_status(int prio, int index) +{ + struct csv_queue *ringbuf = &vpsp_ring_buffer[prio].stat_val; + struct csv_statval_entry *statval = (struct csv_statval_entry *)ringbuf->data; + + return statval[index].status; +} + +static unsigned int vpsp_queue_cmd_size(int prio) +{ + return csv_cmd_queue_size(&vpsp_ring_buffer[prio].cmd_ptr); +} + +static int vpsp_dequeue_cmd(int prio, int index, + struct csv_cmdptr_entry *cmd_ptr) +{ + mutex_lock(&vpsp_rb_mutex); + + /* The status update must be before the head update */ + vpsp_set_cmd_status(prio, index, 0); + csv_dequeue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, (void *)cmd_ptr, 1); + + mutex_unlock(&vpsp_rb_mutex); + + return 0; +} + +/* + * Populate the command from the virtual machine to the queue to + * support execution in ringbuffer mode + */ +static int vpsp_fill_cmd_queue(uint32_t vid, int prio, int cmd, void *data, uint16_t flags) +{ + struct csv_cmdptr_entry cmdptr = { }; + int index = -1; + + cmdptr.cmd_buf_ptr = PUT_PSP_VID(__psp_pa(data), vid); + cmdptr.cmd_id = cmd; + cmdptr.cmd_flags = flags; + + mutex_lock(&vpsp_rb_mutex); + index = get_queue_tail(&vpsp_ring_buffer[prio]); + + /* If status is equal to VPSP_CMD_STATUS_RUNNING, then the queue is full */ + if (vpsp_get_cmd_status(prio, index) == VPSP_CMD_STATUS_RUNNING) { + index = -1; + goto out; + } + + /* The status must be written first, and then the cmd can be enqueued */ + vpsp_set_cmd_status(prio, index, VPSP_CMD_STATUS_RUNNING); + if (csv_enqueue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, &cmdptr, 1) != 1) { + vpsp_set_cmd_status(prio, index, 0); + index = -1; + goto out; + } + +out: + mutex_unlock(&vpsp_rb_mutex); + return index; +} + +static void vpsp_ring_update_head(struct csv_ringbuffer_queue *ring_buffer, + uint32_t new_head) +{ + uint32_t orig_head = get_queue_head(ring_buffer); + uint32_t comple_num = 0; + + if (new_head >= orig_head) + comple_num = new_head - orig_head; + else + comple_num = ring_buffer->cmd_ptr.mask - (orig_head - new_head) + + 1; + + ring_buffer->cmd_ptr.head += comple_num; +} + +static int vpsp_ring_buffer_queue_init(void) +{ + int i; + int ret; + + for (i = CSV_COMMAND_PRIORITY_HIGH; i < CSV_COMMAND_PRIORITY_NUM; i++) { + ret = __csv_ring_buffer_queue_init(&vpsp_ring_buffer[i]); + if (ret) + return ret; + } + + return 0; +} + +static int vpsp_psp_mutex_trylock(void) +{ + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); + + if (mutex_enabled) + return psp_mutex_trylock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + return mutex_trylock(hygon_psp_hooks.sev_cmd_mutex); +} + +static int vpsp_psp_mutex_unlock(void) +{ + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); + + if (mutex_enabled) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + + return 0; +} + +static int __vpsp_ring_buffer_enter_locked(int *error) +{ + int ret; + struct csv_data_ring_buffer *data; + struct csv_ringbuffer_queue *low_queue; + struct csv_ringbuffer_queue *hi_queue; + struct sev_device *sev = psp_master->sev_data; + + if (csv_comm_mode == CSV_COMM_RINGBUFFER_ON) + return -EEXIST; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + low_queue = &vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]; + hi_queue = &vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]; + + data->queue_lo_cmdptr_address = __psp_pa(low_queue->cmd_ptr.data_align); + data->queue_lo_statval_address = __psp_pa(low_queue->stat_val.data_align); + data->queue_hi_cmdptr_address = __psp_pa(hi_queue->cmd_ptr.data_align); + data->queue_hi_statval_address = __psp_pa(hi_queue->stat_val.data_align); + data->queue_lo_size = 1; + data->queue_hi_size = 1; + data->int_on_empty = 1; + + ret = hygon_psp_hooks.__sev_do_cmd_locked(CSV_CMD_RING_BUFFER, data, error); + if (!ret) { + iowrite32(0, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + csv_comm_mode = CSV_COMM_RINGBUFFER_ON; + } + + kfree(data); + return ret; +} + +static int __vpsp_do_ringbuf_cmds_locked(int *psp_ret, uint8_t prio, int index) +{ + struct psp_device *psp = psp_master; + unsigned int reg, ret = 0; + unsigned int rb_tail, rb_head; + unsigned int rb_ctl; + struct sev_device *sev; + + if (!psp) + return -ENODEV; + + if (*hygon_psp_hooks.psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + /* update rb tail */ + rb_tail = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + rb_tail &= (~PSP_RBTAIL_QHI_TAIL_MASK); + rb_tail |= (get_queue_tail(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]) + << PSP_RBTAIL_QHI_TAIL_SHIFT); + rb_tail &= (~PSP_RBTAIL_QLO_TAIL_MASK); + rb_tail |= get_queue_tail(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]); + iowrite32(rb_tail, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + /* update rb head */ + rb_head = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + rb_head &= (~PSP_RBHEAD_QHI_HEAD_MASK); + rb_head |= (get_queue_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]) + << PSP_RBHEAD_QHI_HEAD_SHIFT); + rb_head &= (~PSP_RBHEAD_QLO_HEAD_MASK); + rb_head |= get_queue_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]); + iowrite32(rb_head, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + + /* update rb ctl to trigger psp irq */ + sev->int_rcvd = 0; + /* PSP response to x86 only when all queue is empty or error happends */ + rb_ctl = (PSP_RBCTL_X86_WRITES | PSP_RBCTL_RBMODE_ACT | PSP_RBCTL_CLR_INTSTAT); + iowrite32(rb_ctl, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for all commands in ring buffer completed */ + ret = csv_wait_cmd_ioc_ring_buffer(sev, ®, (*hygon_psp_hooks.psp_timeout) * 10); + if (ret) { + if (psp_ret) + *psp_ret = 0; + + dev_err(psp->dev, "csv command in ringbuffer mode timed out, disabling PSP\n"); + *hygon_psp_hooks.psp_dead = true; + return ret; + } + /* cmd error happends */ + if (reg & PSP_RBHEAD_QPAUSE_INT_STAT) + ret = -EFAULT; + + /* update head */ + vpsp_ring_update_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH], + (reg & PSP_RBHEAD_QHI_HEAD_MASK) >> PSP_RBHEAD_QHI_HEAD_SHIFT); + vpsp_ring_update_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW], + reg & PSP_RBHEAD_QLO_HEAD_MASK); + + if (psp_ret) + *psp_ret = vpsp_get_cmd_status(prio, index); + + return ret; +} + +static int vpsp_do_ringbuf_cmds_locked(int *psp_ret, uint8_t prio, int index) +{ + struct sev_user_data_status data; + int rc; + + rc = __vpsp_ring_buffer_enter_locked(psp_ret); + if (rc) + goto end; + + rc = __vpsp_do_ringbuf_cmds_locked(psp_ret, prio, index); + + /* exit ringbuf mode by send CMD in mailbox mode */ + hygon_psp_hooks.__sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, + &data, NULL); + csv_comm_mode = CSV_COMM_MAILBOX_ON; + +end: + return rc; +} + +/** + * struct user_data_status - PLATFORM_STATUS command parameters + * + * @major: major API version + * @minor: minor API version + * @state: platform state + * @owner: self-owned or externally owned + * @chip_secure: ES or MP chip + * @fw_enc: is this FW is encrypted + * @fw_sign: is this FW is signed + * @config_es: platform config flags for csv-es + * @build: Firmware Build ID for this API version + * @bl_version_debug: Bootloader VERSION_DEBUG field + * @bl_version_minor: Bootloader VERSION_MINOR field + * @bl_version_major: Bootloader VERSION_MAJOR field + * @guest_count: number of active guests + * @reserved: should set to zero + */ +struct user_data_status { + uint8_t api_major; /* Out */ + uint8_t api_minor; /* Out */ + uint8_t state; /* Out */ + uint8_t owner : 1, /* Out */ + chip_secure : 1, /* Out */ + fw_enc : 1, /* Out */ + fw_sign : 1, /* Out */ + reserved1 : 4; /*reserved*/ + uint32_t config_es : 1, /* Out */ + build : 31; /* Out */ + uint32_t guest_count; /* Out */ +} __packed; + +/* + * Check whether the firmware supports ringbuffer mode and parse + * commands from the virtual machine + */ +static int vpsp_rb_check_and_cmd_prio_parse(uint8_t *prio, + struct vpsp_cmd *vcmd) +{ + int ret, error; + int rb_supported; + int rb_check_old = RB_NOT_CHECK; + struct user_data_status *status = NULL; + + if (atomic_try_cmpxchg(&vpsp_rb_check_status, &rb_check_old, + RB_CHECKING)) { + /* get buildid to check if the firmware supports ringbuffer mode */ + status = kzalloc(sizeof(*status), GFP_KERNEL); + if (!status) { + atomic_set(&vpsp_rb_check_status, RB_CHECKED); + goto end; + } + ret = sev_platform_status((struct sev_user_data_status *)status, + &error); + if (ret) { + pr_warn("failed to get status[%#x], use default command mode.\n", error); + atomic_set(&vpsp_rb_check_status, RB_CHECKED); + goto end; + } + + /* check if the firmware supports the ringbuffer mode */ + if (VPSP_RB_IS_SUPPORTED(status->build)) { + if (vpsp_ring_buffer_queue_init()) { + pr_warn("vpsp_ring_buffer_queue_init fail, use default command mode\n"); + atomic_set(&vpsp_rb_check_status, RB_CHECKED); + goto end; + } + WRITE_ONCE(vpsp_rb_supported, 1); + } + + atomic_set(&vpsp_rb_check_status, RB_CHECKED); + } + +end: + rb_supported = READ_ONCE(vpsp_rb_supported); + /* parse prio by vcmd */ + if (rb_supported && vcmd->is_high_rb) + *prio = CSV_COMMAND_PRIORITY_HIGH; + else + *prio = CSV_COMMAND_PRIORITY_LOW; + /* clear rb level bit in vcmd */ + vcmd->is_high_rb = 0; + + kfree(status); + return rb_supported; +} + +int __vpsp_do_cmd_locked(uint32_t vid, int cmd, void *data, int *psp_ret); +/* + * Try to obtain the result again by the command index, this + * interface is used in ringbuffer mode + */ +int vpsp_try_get_result(uint32_t vid, uint8_t prio, uint32_t index, void *data, + struct vpsp_ret *psp_ret) +{ + int ret = 0; + struct csv_cmdptr_entry cmd = {0}; + + /* Get the retult directly if the command has been executed */ + if (index >= 0 && vpsp_get_cmd_status(prio, index) != + VPSP_CMD_STATUS_RUNNING) { + psp_ret->pret = vpsp_get_cmd_status(prio, index); + psp_ret->status = VPSP_FINISH; + return 0; + } + + if (vpsp_psp_mutex_trylock()) { + /* Use mailbox mode to execute a command if there is only one command */ + if (vpsp_queue_cmd_size(prio) == 1) { + /* dequeue command from queue*/ + vpsp_dequeue_cmd(prio, index, &cmd); + + ret = __vpsp_do_cmd_locked(vid, cmd.cmd_id, data, + (int *)psp_ret); + psp_ret->status = VPSP_FINISH; + vpsp_psp_mutex_unlock(); + if (unlikely(ret)) { + if (ret == -EIO) { + ret = 0; + } else { + pr_err("[%s]: psp do cmd error, %d\n", + __func__, psp_ret->pret); + ret = -EIO; + goto end; + } + } + } else { + ret = vpsp_do_ringbuf_cmds_locked((int *)psp_ret, prio, + index); + psp_ret->status = VPSP_FINISH; + vpsp_psp_mutex_unlock(); + if (unlikely(ret)) { + pr_err("[%s]: vpsp_do_ringbuf_cmds_locked failed %d\n", + __func__, ret); + goto end; + } + } + } else { + /* Change the command to the running state if getting the mutex fails */ + psp_ret->index = index; + psp_ret->status = VPSP_RUNNING; + return 0; + } +end: + return ret; +} +EXPORT_SYMBOL_GPL(vpsp_try_get_result); + +/* + * Send the virtual psp command to the PSP device and try to get the + * execution result, the interface and the vpsp_try_get_result + * interface are executed asynchronously. If the execution succeeds, + * the result is returned to the VM. If the execution fails, the + * vpsp_try_get_result interface will be used to obtain the result + * later again + */ +int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret) +{ + int ret = 0; + int rb_supported; + int index = -1; + uint8_t prio = CSV_COMMAND_PRIORITY_LOW; + + /* ringbuffer mode check and parse command prio*/ + rb_supported = vpsp_rb_check_and_cmd_prio_parse(&prio, + (struct vpsp_cmd *)&cmd); + if (rb_supported) { + /* fill command in ringbuffer's queue and get index */ + index = vpsp_fill_cmd_queue(vid, prio, cmd, data, 0); + if (unlikely(index < 0)) { + /* do mailbox command if queuing failed*/ + ret = vpsp_do_cmd(vid, cmd, data, (int *)psp_ret); + if (unlikely(ret)) { + if (ret == -EIO) { + ret = 0; + } else { + pr_err("[%s]: psp do cmd error, %d\n", + __func__, psp_ret->pret); + ret = -EIO; + goto end; + } + } + psp_ret->status = VPSP_FINISH; + goto end; + } + + /* try to get result from the ringbuffer command */ + ret = vpsp_try_get_result(vid, prio, index, data, psp_ret); + if (unlikely(ret)) { + pr_err("[%s]: vpsp_try_get_result failed %d\n", __func__, ret); + goto end; + } + } else { + /* mailbox mode */ + ret = vpsp_do_cmd(vid, cmd, data, (int *)psp_ret); + if (unlikely(ret)) { + if (ret == -EIO) { + ret = 0; + } else { + pr_err("[%s]: psp do cmd error, %d\n", + __func__, psp_ret->pret); + ret = -EIO; + goto end; + } + } + psp_ret->status = VPSP_FINISH; + } + +end: + return ret; +} +EXPORT_SYMBOL_GPL(vpsp_try_do_cmd); diff --git a/drivers/crypto/ccp/hygon/csv-dev.h b/drivers/crypto/ccp/hygon/csv-dev.h index a9bac6eae97b..b2b46ae779a9 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.h +++ b/drivers/crypto/ccp/hygon/csv-dev.h @@ -15,9 +15,12 @@ #define CSV_FW_FILE "hygon/csv.fw" extern u32 hygon_csv_build; +extern int csv_comm_mode; void csv_update_api_version(struct sev_user_data_status *status); int csv_cmd_buffer_len(int cmd); +void csv_restore_mailbox_mode_postprocess(void); +int csv_do_ringbuf_cmds(int *psp_ret); int csv_ioctl_do_hgsc_import(struct sev_issue_cmd *argp); int csv_ioctl_do_download_firmware(struct sev_issue_cmd *argp); @@ -26,4 +29,9 @@ static inline bool csv_version_greater_or_equal(u32 build) return hygon_csv_build >= build; } +static inline bool csv_in_ring_buffer_mode(void) +{ + return csv_comm_mode == CSV_COMM_RINGBUFFER_ON; +} + #endif /* __CCP_HYGON_CSV_DEV_H__ */ diff --git a/drivers/crypto/ccp/hygon/psp-dev.c b/drivers/crypto/ccp/hygon/psp-dev.c index dd5285e1ba37..d60009a5e0d7 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.c +++ b/drivers/crypto/ccp/hygon/psp-dev.c @@ -11,13 +11,412 @@ * published by the Free Software Foundation. */ +#include #include +#include +#include +#include +#include +#include #include "psp-dev.h" /* Function and variable pointers for hooks */ struct hygon_psp_hooks_table hygon_psp_hooks; +static struct psp_misc_dev *psp_misc; +#define HYGON_PSP_IOC_TYPE 'H' +enum HYGON_PSP_OPCODE { + HYGON_PSP_MUTEX_ENABLE = 1, + HYGON_PSP_MUTEX_DISABLE, + HYGON_VPSP_CTRL_OPT, + HYGON_PSP_OPCODE_MAX_NR, +}; + +enum VPSP_DEV_CTRL_OPCODE { + VPSP_OP_VID_ADD, + VPSP_OP_VID_DEL, + VPSP_OP_SET_DEFAULT_VID_PERMISSION, + VPSP_OP_GET_DEFAULT_VID_PERMISSION, +}; + +struct vpsp_dev_ctrl { + unsigned char op; + union { + unsigned int vid; + // Set or check the permissions for the default VID + unsigned int def_vid_perm; + unsigned char reserved[128]; + } data; +}; + +uint64_t atomic64_exchange(uint64_t *dst, uint64_t val) +{ + return xchg(dst, val); +} + +int psp_mutex_init(struct psp_mutex *mutex) +{ + if (!mutex) + return -1; + mutex->locked = 0; + return 0; +} + +int psp_mutex_trylock(struct psp_mutex *mutex) +{ + if (atomic64_exchange(&mutex->locked, 1)) + return 0; + else + return 1; +} + +int psp_mutex_lock_timeout(struct psp_mutex *mutex, uint64_t ms) +{ + int ret = 0; + unsigned long je; + + je = jiffies + msecs_to_jiffies(ms); + do { + if (psp_mutex_trylock(mutex)) { + ret = 1; + break; + } + } while ((ms == 0) || time_before(jiffies, je)); + + return ret; +} + +int psp_mutex_unlock(struct psp_mutex *mutex) +{ + if (!mutex) + return -1; + + atomic64_exchange(&mutex->locked, 0); + return 0; +} + +static int mmap_psp(struct file *filp, struct vm_area_struct *vma) +{ + unsigned long page; + + page = virt_to_phys((void *)psp_misc->data_pg_aligned) >> PAGE_SHIFT; + + if (remap_pfn_range(vma, vma->vm_start, page, (vma->vm_end - vma->vm_start), + vma->vm_page_prot)) { + printk(KERN_INFO "remap failed..."); + return -1; + } + vm_flags_mod(vma, VM_DONTDUMP | VM_DONTEXPAND, 0); + printk(KERN_INFO "remap_pfn_rang page:[%lu] ok.\n", page); + return 0; +} + +static ssize_t read_psp(struct file *file, char __user *buf, size_t count, loff_t *ppos) +{ + ssize_t remaining; + + if ((*ppos + count) > PAGE_SIZE) { + printk(KERN_INFO "%s: invalid address range, pos %llx, count %lx\n", + __func__, *ppos, count); + return -EFAULT; + } + + remaining = copy_to_user(buf, (char *)psp_misc->data_pg_aligned + *ppos, count); + if (remaining) + return -EFAULT; + + *ppos += count; + + return count; +} + +static ssize_t write_psp(struct file *file, const char __user *buf, size_t count, loff_t *ppos) +{ + ssize_t remaining, written; + + if ((*ppos + count) > PAGE_SIZE) { + printk(KERN_INFO "%s: invalid address range, pos %llx, count %lx\n", + __func__, *ppos, count); + return -EFAULT; + } + + remaining = copy_from_user((char *)psp_misc->data_pg_aligned + *ppos, buf, count); + written = count - remaining; + if (!written) + return -EFAULT; + + *ppos += written; + + return written; +} + +DEFINE_RWLOCK(vpsp_rwlock); + +/* VPSP_VID_MAX_ENTRIES determines the maximum number of vms that can set vid. + * but, the performance of finding vid is determined by g_vpsp_vid_num, + * so VPSP_VID_MAX_ENTRIES can be set larger. + */ +#define VPSP_VID_MAX_ENTRIES 2048 +#define VPSP_VID_NUM_MAX 64 + +struct vpsp_vid_entry { + uint32_t vid; + pid_t pid; +}; +static struct vpsp_vid_entry g_vpsp_vid_array[VPSP_VID_MAX_ENTRIES]; +static uint32_t g_vpsp_vid_num; +static int compare_vid_entries(const void *a, const void *b) +{ + return ((struct vpsp_vid_entry *)a)->pid - ((struct vpsp_vid_entry *)b)->pid; +} +static void swap_vid_entries(void *a, void *b, int size) +{ + struct vpsp_vid_entry entry; + + memcpy(&entry, a, size); + memcpy(a, b, size); + memcpy(b, &entry, size); +} + +/** + * When 'allow_default_vid' is set to 1, + * QEMU is allowed to use 'vid 0' by default + * in the absence of a valid 'vid' setting. + */ +uint32_t allow_default_vid = 1; +void vpsp_set_default_vid_permission(uint32_t is_allow) +{ + allow_default_vid = is_allow; +} + +int vpsp_get_default_vid_permission(void) +{ + return allow_default_vid; +} +EXPORT_SYMBOL_GPL(vpsp_get_default_vid_permission); + +/** + * When the virtual machine executes the 'tkm' command, + * it needs to retrieve the corresponding 'vid' + * by performing a binary search using 'kvm->userspace_pid'. + */ +int vpsp_get_vid(uint32_t *vid, pid_t pid) +{ + struct vpsp_vid_entry new_entry = {.pid = pid}; + struct vpsp_vid_entry *existing_entry = NULL; + + read_lock(&vpsp_rwlock); + existing_entry = bsearch(&new_entry, g_vpsp_vid_array, g_vpsp_vid_num, + sizeof(struct vpsp_vid_entry), compare_vid_entries); + read_unlock(&vpsp_rwlock); + + if (!existing_entry) + return -ENOENT; + if (vid) { + *vid = existing_entry->vid; + pr_debug("PSP: %s %d, by pid %d\n", __func__, *vid, pid); + } + return 0; +} +EXPORT_SYMBOL_GPL(vpsp_get_vid); + +/** + * Upon qemu startup, this section checks whether + * the '-device psp,vid' parameter is specified. + * If set, it utilizes the 'vpsp_add_vid' function + * to insert the 'vid' and 'pid' values into the 'g_vpsp_vid_array'. + * The insertion is done in ascending order of 'pid'. + */ +static int vpsp_add_vid(uint32_t vid) +{ + pid_t cur_pid = task_pid_nr(current); + struct vpsp_vid_entry new_entry = {.vid = vid, .pid = cur_pid}; + + if (vpsp_get_vid(NULL, cur_pid) == 0) + return -EEXIST; + if (g_vpsp_vid_num == VPSP_VID_MAX_ENTRIES) + return -ENOMEM; + if (vid >= VPSP_VID_NUM_MAX) + return -EINVAL; + + write_lock(&vpsp_rwlock); + memcpy(&g_vpsp_vid_array[g_vpsp_vid_num++], &new_entry, sizeof(struct vpsp_vid_entry)); + sort(g_vpsp_vid_array, g_vpsp_vid_num, sizeof(struct vpsp_vid_entry), + compare_vid_entries, swap_vid_entries); + pr_info("PSP: add vid %d, by pid %d, total vid num is %d\n", vid, cur_pid, g_vpsp_vid_num); + write_unlock(&vpsp_rwlock); + return 0; +} + +/** + * Upon the virtual machine is shut down, + * the 'vpsp_del_vid' function is employed to remove + * the 'vid' associated with the current 'pid'. + */ +static int vpsp_del_vid(void) +{ + pid_t cur_pid = task_pid_nr(current); + int i, ret = -ENOENT; + + write_lock(&vpsp_rwlock); + for (i = 0; i < g_vpsp_vid_num; ++i) { + if (g_vpsp_vid_array[i].pid == cur_pid) { + --g_vpsp_vid_num; + pr_info("PSP: delete vid %d, by pid %d, total vid num is %d\n", + g_vpsp_vid_array[i].vid, cur_pid, g_vpsp_vid_num); + memcpy(&g_vpsp_vid_array[i], &g_vpsp_vid_array[i + 1], + sizeof(struct vpsp_vid_entry) * (g_vpsp_vid_num - i)); + ret = 0; + goto end; + } + } + +end: + write_unlock(&vpsp_rwlock); + return ret; +} + +static int do_vpsp_op_ioctl(struct vpsp_dev_ctrl *ctrl) +{ + int ret = 0; + unsigned char op = ctrl->op; + + switch (op) { + case VPSP_OP_VID_ADD: + ret = vpsp_add_vid(ctrl->data.vid); + break; + + case VPSP_OP_VID_DEL: + ret = vpsp_del_vid(); + break; + + case VPSP_OP_SET_DEFAULT_VID_PERMISSION: + vpsp_set_default_vid_permission(ctrl->data.def_vid_perm); + break; + + case VPSP_OP_GET_DEFAULT_VID_PERMISSION: + ctrl->data.def_vid_perm = vpsp_get_default_vid_permission(); + break; + + default: + ret = -EINVAL; + break; + } + return ret; +} + +static long ioctl_psp(struct file *file, unsigned int ioctl, unsigned long arg) +{ + unsigned int opcode = 0; + struct vpsp_dev_ctrl vpsp_ctrl_op; + int ret = -EFAULT; + + if (_IOC_TYPE(ioctl) != HYGON_PSP_IOC_TYPE) { + printk(KERN_INFO "%s: invalid ioctl type: 0x%x\n", __func__, _IOC_TYPE(ioctl)); + return -EINVAL; + } + opcode = _IOC_NR(ioctl); + switch (opcode) { + case HYGON_PSP_MUTEX_ENABLE: + psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, 0); + // And get the sev lock to make sure no one is using it now. + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + hygon_psp_hooks.psp_mutex_enabled = 1; + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + // Wait 10ms just in case someone is right before getting the psp lock. + mdelay(10); + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + ret = 0; + break; + + case HYGON_PSP_MUTEX_DISABLE: + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + // And get the psp lock to make sure no one is using it now. + psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, 0); + hygon_psp_hooks.psp_mutex_enabled = 0; + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + // Wait 10ms just in case someone is right before getting the sev lock. + mdelay(10); + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + ret = 0; + break; + + case HYGON_VPSP_CTRL_OPT: + if (copy_from_user(&vpsp_ctrl_op, (void __user *)arg, + sizeof(struct vpsp_dev_ctrl))) + return -EFAULT; + ret = do_vpsp_op_ioctl(&vpsp_ctrl_op); + if (!ret && copy_to_user((void __user *)arg, &vpsp_ctrl_op, + sizeof(struct vpsp_dev_ctrl))) + return -EFAULT; + break; + + default: + printk(KERN_INFO "%s: invalid ioctl number: %d\n", __func__, opcode); + return -EINVAL; + } + return ret; +} + +static const struct file_operations psp_fops = { + .owner = THIS_MODULE, + .mmap = mmap_psp, + .read = read_psp, + .write = write_psp, + .unlocked_ioctl = ioctl_psp, +}; + +int hygon_psp_additional_setup(struct sp_device *sp) +{ + struct device *dev = sp->dev; + int ret = 0; + + if (!psp_misc) { + struct miscdevice *misc; + + psp_misc = devm_kzalloc(dev, sizeof(*psp_misc), GFP_KERNEL); + if (!psp_misc) + return -ENOMEM; + psp_misc->data_pg_aligned = (struct psp_dev_data *)get_zeroed_page(GFP_KERNEL); + if (!psp_misc->data_pg_aligned) { + dev_err(dev, "alloc psp data page failed\n"); + devm_kfree(dev, psp_misc); + psp_misc = NULL; + return -ENOMEM; + } + SetPageReserved(virt_to_page(psp_misc->data_pg_aligned)); + psp_mutex_init(&psp_misc->data_pg_aligned->mb_mutex); + + *(uint32_t *)((void *)psp_misc->data_pg_aligned + 8) = 0xdeadbeef; + misc = &psp_misc->misc; + misc->minor = MISC_DYNAMIC_MINOR; + misc->name = "hygon_psp_config"; + misc->fops = &psp_fops; + + ret = misc_register(misc); + if (ret) + return ret; + kref_init(&psp_misc->refcount); + hygon_psp_hooks.psp_misc = psp_misc; + } else { + kref_get(&psp_misc->refcount); + } + + return ret; +} + +void hygon_psp_exit(struct kref *ref) +{ + struct psp_misc_dev *misc_dev = container_of(ref, struct psp_misc_dev, refcount); + + misc_deregister(&misc_dev->misc); + ClearPageReserved(virt_to_page(misc_dev->data_pg_aligned)); + free_page((unsigned long)misc_dev->data_pg_aligned); + psp_misc = NULL; + hygon_psp_hooks.psp_misc = NULL; +} + int fixup_hygon_psp_caps(struct psp_device *psp) { /* the hygon psp is unavailable if bit0 is cleared in feature reg */ @@ -28,3 +427,296 @@ int fixup_hygon_psp_caps(struct psp_device *psp) PSP_CAPABILITY_PSP_SECURITY_REPORTING); return 0; } + +static int __psp_do_cmd_locked(int cmd, void *data, int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + unsigned int phys_lsb, phys_msb; + unsigned int reg, ret = 0; + + if (!psp || !psp->sev_data || !hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + if (*hygon_psp_hooks.psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + /* Get the physical address of the command buffer */ + phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0; + phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0; + + dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", + cmd, phys_msb, phys_lsb, *hygon_psp_hooks.psp_timeout); + + print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, + hygon_psp_hooks.sev_cmd_buffer_len(cmd), false); + + iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + sev->int_rcvd = 0; + + reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC; + iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for command completion */ + ret = hygon_psp_hooks.sev_wait_cmd_ioc(sev, ®, *hygon_psp_hooks.psp_timeout); + if (ret) { + if (psp_ret) + *psp_ret = 0; + + dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd); + *hygon_psp_hooks.psp_dead = true; + + return ret; + } + + *hygon_psp_hooks.psp_timeout = *hygon_psp_hooks.psp_cmd_timeout; + + if (psp_ret) + *psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg); + + if (FIELD_GET(PSP_CMDRESP_STS, reg)) { + dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n", + cmd, FIELD_GET(PSP_CMDRESP_STS, reg)); + ret = -EIO; + } + + print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data, + hygon_psp_hooks.sev_cmd_buffer_len(cmd), false); + + return ret; +} + +int __vpsp_do_cmd_locked(uint32_t vid, int cmd, void *data, int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + phys_addr_t phys_addr; + unsigned int phys_lsb, phys_msb; + unsigned int reg, ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + if (*hygon_psp_hooks.psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + if (data && WARN_ON_ONCE(!virt_addr_valid(data))) + return -EINVAL; + + /* Get the physical address of the command buffer */ + phys_addr = PUT_PSP_VID(__psp_pa(data), vid); + phys_lsb = data ? lower_32_bits(phys_addr) : 0; + phys_msb = data ? upper_32_bits(phys_addr) : 0; + + dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", + cmd, phys_msb, phys_lsb, *hygon_psp_hooks.psp_timeout); + + print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, + hygon_psp_hooks.sev_cmd_buffer_len(cmd), false); + + iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + sev->int_rcvd = 0; + + reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC; + iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for command completion */ + ret = hygon_psp_hooks.sev_wait_cmd_ioc(sev, ®, *hygon_psp_hooks.psp_timeout); + if (ret) { + if (psp_ret) + *psp_ret = 0; + + dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd); + *hygon_psp_hooks.psp_dead = true; + + return ret; + } + + *hygon_psp_hooks.psp_timeout = *hygon_psp_hooks.psp_cmd_timeout; + + if (psp_ret) + *psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg); + + if (FIELD_GET(PSP_CMDRESP_STS, reg)) { + dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n", + cmd, FIELD_GET(PSP_CMDRESP_STS, reg)); + ret = -EIO; + } + + print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data, + hygon_psp_hooks.sev_cmd_buffer_len(cmd), false); + + return ret; +} + +int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret) +{ + int rc; + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); + + if (mutex_enabled) { + if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) { + return -EBUSY; + } + } else { + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + } + + rc = __vpsp_do_cmd_locked(vid, cmd, data, psp_ret); + + if (mutex_enabled) + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + + return rc; +} + +int psp_do_cmd(int cmd, void *data, int *psp_ret) +{ + int rc; + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); + + if (mutex_enabled) { + if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + } + rc = __psp_do_cmd_locked(cmd, data, psp_ret); + if (mutex_enabled) + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + + return rc; +} +EXPORT_SYMBOL_GPL(psp_do_cmd); + +#ifdef CONFIG_HYGON_PSP2CPU_CMD + +static DEFINE_SPINLOCK(p2c_notifier_lock); +static p2c_notifier_t p2c_notifiers[P2C_NOTIFIERS_MAX] = {NULL}; + +int psp_register_cmd_notifier(uint32_t cmd_id, p2c_notifier_t notifier) +{ + int ret = -ENODEV; + unsigned long flags; + + spin_lock_irqsave(&p2c_notifier_lock, flags); + + if (cmd_id < P2C_NOTIFIERS_MAX && !p2c_notifiers[cmd_id]) { + p2c_notifiers[cmd_id] = notifier; + ret = 0; + } + + spin_unlock_irqrestore(&p2c_notifier_lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(psp_register_cmd_notifier); + +int psp_unregister_cmd_notifier(uint32_t cmd_id, p2c_notifier_t notifier) +{ + int ret = -ENODEV; + unsigned long flags; + + spin_lock_irqsave(&p2c_notifier_lock, flags); + + if (cmd_id < P2C_NOTIFIERS_MAX && p2c_notifiers[cmd_id] == notifier) { + p2c_notifiers[cmd_id] = NULL; + ret = 0; + } + + spin_unlock_irqrestore(&p2c_notifier_lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(psp_unregister_cmd_notifier); + +#define PSP2CPU_MAX_LOOP 100 + +static irqreturn_t psp_irq_handler_hygon(int irq, void *data) +{ + struct psp_device *psp = data; + struct sev_device *sev = psp->sev_irq_data; + unsigned int status; + int reg; + unsigned long flags; + int count = 0; + uint32_t p2c_cmd; + uint32_t p2c_lo_data; + uint32_t p2c_hi_data; + uint64_t p2c_data; + + /* Read the interrupt status: */ + status = ioread32(psp->io_regs + psp->vdata->intsts_reg); + + while (status && (count++ < PSP2CPU_MAX_LOOP)) { + /* Clear the interrupt status by writing the same value we read. */ + iowrite32(status, psp->io_regs + psp->vdata->intsts_reg); + + /* Check if it is command completion: */ + if (status & SEV_CMD_COMPLETE) { + /* Check if it is SEV command completion: */ + reg = ioread32(psp->io_regs + psp->vdata->sev->cmdresp_reg); + if (reg & PSP_CMDRESP_RESP) { + sev->int_rcvd = 1; + wake_up(&sev->int_queue); + } + } + + if (status & PSP_X86_CMD) { + /* Check if it is P2C command completion: */ + reg = ioread32(psp->io_regs + psp->vdata->p2c_cmdresp_reg); + if (!(reg & PSP_CMDRESP_RESP)) { + p2c_lo_data = ioread32(psp->io_regs + + psp->vdata->p2c_cmdbuff_addr_lo_reg); + p2c_hi_data = ioread32(psp->io_regs + + psp->vdata->p2c_cmdbuff_addr_hi_reg); + p2c_data = (((uint64_t)(p2c_hi_data) << 32) + + ((uint64_t)(p2c_lo_data))); + p2c_cmd = (uint32_t)(reg & SEV_CMDRESP_IOC); + if (p2c_cmd < P2C_NOTIFIERS_MAX) { + spin_lock_irqsave(&p2c_notifier_lock, flags); + if (p2c_notifiers[p2c_cmd]) + p2c_notifiers[p2c_cmd](p2c_cmd, p2c_data); + + spin_unlock_irqrestore(&p2c_notifier_lock, flags); + } + + reg |= PSP_CMDRESP_RESP; + iowrite32(reg, psp->io_regs + psp->vdata->p2c_cmdresp_reg); + } + } + status = ioread32(psp->io_regs + psp->vdata->intsts_reg); + } + + return IRQ_HANDLED; +} + +int sp_request_hygon_psp_irq(struct sp_device *sp, irq_handler_t handler, + const char *name, void *data) +{ + return sp_request_psp_irq(sp, psp_irq_handler_hygon, name, data); +} + +#else /* !CONFIG_HYGON_PSP2CPU_CMD */ + +int sp_request_hygon_psp_irq(struct sp_device *sp, irq_handler_t handler, + const char *name, void *data) +{ + return sp_request_psp_irq(sp, handler, name, data); +} + +#endif /* CONFIG_HYGON_PSP2CPU_CMD */ diff --git a/drivers/crypto/ccp/hygon/psp-dev.h b/drivers/crypto/ccp/hygon/psp-dev.h index e187d3f24bdf..d4339c51d085 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.h +++ b/drivers/crypto/ccp/hygon/psp-dev.h @@ -11,6 +11,7 @@ #define __CCP_HYGON_PSP_DEV_H__ #include +#include #include "sp-dev.h" @@ -24,9 +25,39 @@ extern struct hygon_psp_hooks_table { bool sev_dev_hooks_installed; struct mutex *sev_cmd_mutex; + struct psp_misc_dev *psp_misc; + bool psp_mutex_enabled; + bool *psp_dead; + int *psp_timeout; + int *psp_cmd_timeout; + int (*sev_cmd_buffer_len)(int cmd); int (*__sev_do_cmd_locked)(int cmd, void *data, int *psp_ret); + int (*sev_wait_cmd_ioc)(struct sev_device *sev, + unsigned int *reg, unsigned int timeout); } hygon_psp_hooks; +#define PSP_MUTEX_TIMEOUT 600000 +struct psp_mutex { + uint64_t locked; +}; + +struct psp_dev_data { + struct psp_mutex mb_mutex; +}; + +struct psp_misc_dev { + struct kref refcount; + struct psp_dev_data *data_pg_aligned; + struct miscdevice misc; +}; + +int hygon_psp_additional_setup(struct sp_device *sp); +void hygon_psp_exit(struct kref *ref); +int psp_mutex_trylock(struct psp_mutex *mutex); +int psp_mutex_lock_timeout(struct psp_mutex *mutex, uint64_t ms); +int psp_mutex_unlock(struct psp_mutex *mutex); int fixup_hygon_psp_caps(struct psp_device *psp); +int sp_request_hygon_psp_irq(struct sp_device *sp, irq_handler_t handler, + const char *name, void *data); #endif /* __CCP_HYGON_PSP_DEV_H__ */ diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index 9cf5f0e86532..d24ab07d90e5 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -9,12 +9,6 @@ #include #include -#include -#include -#include -#include -#include -#include #include "sp-dev.h" #include "psp-dev.h" @@ -22,90 +16,14 @@ #include "tee-dev.h" #include "platform-access.h" #include "dbc.h" + +#include "hygon/psp-dev.h" #ifdef CONFIG_TDM_DEV_HYGON #include "tdm-dev.h" #endif -#include "hygon/psp-dev.h" - struct psp_device *psp_master; -struct psp_misc_dev *psp_misc; -int is_hygon_psp; -#define HYGON_PSP_IOC_TYPE 'H' -enum HYGON_PSP_OPCODE { - HYGON_PSP_MUTEX_ENABLE = 1, - HYGON_PSP_MUTEX_DISABLE, - HYGON_VPSP_CTRL_OPT, - HYGON_PSP_OPCODE_MAX_NR, -}; - -enum VPSP_DEV_CTRL_OPCODE { - VPSP_OP_VID_ADD, - VPSP_OP_VID_DEL, - VPSP_OP_SET_DEFAULT_VID_PERMISSION, - VPSP_OP_GET_DEFAULT_VID_PERMISSION, -}; - -struct vpsp_dev_ctrl { - unsigned char op; - union { - unsigned int vid; - // Set or check the permissions for the default VID - unsigned int def_vid_perm; - unsigned char reserved[128]; - } data; -}; - -int psp_mutex_enabled; -extern struct mutex sev_cmd_mutex; - -uint64_t atomic64_exchange(uint64_t *dst, uint64_t val) -{ - return xchg(dst, val); -} - -int psp_mutex_init(struct psp_mutex *mutex) -{ - if (!mutex) - return -1; - mutex->locked = 0; - return 0; -} - -int psp_mutex_trylock(struct psp_mutex *mutex) -{ - if (atomic64_exchange(&mutex->locked, 1)) - return 0; - else - return 1; -} - -int psp_mutex_lock_timeout(struct psp_mutex *mutex, uint64_t ms) -{ - int ret = 0; - unsigned long je; - - je = jiffies + msecs_to_jiffies(ms); - do { - if (psp_mutex_trylock(mutex)) { - ret = 1; - break; - } - } while ((ms == 0) || time_before(jiffies, je)); - - return ret; -} - -int psp_mutex_unlock(struct psp_mutex *mutex) -{ - if (!mutex) - return -1; - - atomic64_exchange(&mutex->locked, 0); - return 0; -} - static struct psp_device *psp_alloc_struct(struct sp_device *sp) { struct device *dev = sp->dev; @@ -143,102 +61,6 @@ static irqreturn_t psp_irq_handler(int irq, void *data) return IRQ_HANDLED; } -#ifdef CONFIG_HYGON_PSP2CPU_CMD -static DEFINE_SPINLOCK(p2c_notifier_lock); -static p2c_notifier_t p2c_notifiers[P2C_NOTIFIERS_MAX] = {NULL}; -int psp_register_cmd_notifier(uint32_t cmd_id, int (*notifier)(uint32_t id, uint64_t data)) -{ - int ret = -ENODEV; - unsigned long flags; - - spin_lock_irqsave(&p2c_notifier_lock, flags); - if (cmd_id < P2C_NOTIFIERS_MAX && !p2c_notifiers[cmd_id]) { - p2c_notifiers[cmd_id] = notifier; - ret = 0; - } - spin_unlock_irqrestore(&p2c_notifier_lock, flags); - - return ret; -} -EXPORT_SYMBOL_GPL(psp_register_cmd_notifier); - -int psp_unregister_cmd_notifier(uint32_t cmd_id, int (*notifier)(uint32_t id, uint64_t data)) -{ - int ret = -ENODEV; - unsigned long flags; - - spin_lock_irqsave(&p2c_notifier_lock, flags); - if (cmd_id < P2C_NOTIFIERS_MAX && p2c_notifiers[cmd_id] == notifier) { - p2c_notifiers[cmd_id] = NULL; - ret = 0; - } - spin_unlock_irqrestore(&p2c_notifier_lock, flags); - - return ret; -} -EXPORT_SYMBOL_GPL(psp_unregister_cmd_notifier); - -#define PSP2CPU_MAX_LOOP 100 -static irqreturn_t psp_irq_handler_hygon(int irq, void *data) -{ - struct psp_device *psp = data; - struct sev_device *sev = psp->sev_irq_data; - unsigned int status; - int reg; - unsigned long flags; - int count = 0; - uint32_t p2c_cmd; - uint32_t p2c_lo_data; - uint32_t p2c_hi_data; - uint64_t p2c_data; - - /* Read the interrupt status: */ - status = ioread32(psp->io_regs + psp->vdata->intsts_reg); - - while (status && (count++ < PSP2CPU_MAX_LOOP)) { - /* Clear the interrupt status by writing the same value we read. */ - iowrite32(status, psp->io_regs + psp->vdata->intsts_reg); - - /* Check if it is command completion: */ - if (status & SEV_CMD_COMPLETE) { - /* Check if it is SEV command completion: */ - reg = ioread32(psp->io_regs + psp->vdata->sev->cmdresp_reg); - if (reg & PSP_CMDRESP_RESP) { - sev->int_rcvd = 1; - wake_up(&sev->int_queue); - } - } - - if (status & PSP_X86_CMD) { - /* Check if it is P2C command completion: */ - reg = ioread32(psp->io_regs + psp->vdata->p2c_cmdresp_reg); - if (!(reg & PSP_CMDRESP_RESP)) { - p2c_lo_data = ioread32(psp->io_regs + - psp->vdata->p2c_cmdbuff_addr_lo_reg); - p2c_hi_data = ioread32(psp->io_regs + - psp->vdata->p2c_cmdbuff_addr_hi_reg); - p2c_data = (((uint64_t)(p2c_hi_data) << 32) + - ((uint64_t)(p2c_lo_data))); - p2c_cmd = (uint32_t)(reg & SEV_CMDRESP_IOC); - if (p2c_cmd < P2C_NOTIFIERS_MAX) { - spin_lock_irqsave(&p2c_notifier_lock, flags); - if (p2c_notifiers[p2c_cmd]) - p2c_notifiers[p2c_cmd](p2c_cmd, p2c_data); - - spin_unlock_irqrestore(&p2c_notifier_lock, flags); - } - - reg |= PSP_CMDRESP_RESP; - iowrite32(reg, psp->io_regs + psp->vdata->p2c_cmdresp_reg); - } - } - status = ioread32(psp->io_regs + psp->vdata->intsts_reg); - } - - return IRQ_HANDLED; -} -#endif - static unsigned int psp_get_capability(struct psp_device *psp) { unsigned int val = ioread32(psp->io_regs + psp->vdata->feature_reg); @@ -335,7 +157,7 @@ static int psp_init(struct psp_device *psp) psp_init_platform_access(psp); #ifdef CONFIG_TDM_DEV_HYGON - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + if (is_vendor_hygon()) { ret = tdm_dev_init(); if (ret) return ret; @@ -345,330 +167,9 @@ static int psp_init(struct psp_device *psp) return 0; } -static int mmap_psp(struct file *filp, struct vm_area_struct *vma) -{ - unsigned long page; - - page = virt_to_phys((void *)psp_misc->data_pg_aligned) >> PAGE_SHIFT; - - if (remap_pfn_range(vma, vma->vm_start, page, (vma->vm_end - vma->vm_start), - vma->vm_page_prot)) { - printk(KERN_ERR "remap failed..."); - return -1; - } - vm_flags_mod(vma, VM_DONTDUMP | VM_DONTEXPAND, 0); - printk(KERN_INFO "remap_pfn_rang page:[%lu] ok.\n", page); - return 0; -} - -static ssize_t read_psp(struct file *file, char __user *buf, size_t count, loff_t *ppos) -{ - ssize_t remaining; - - if ((*ppos + count) > PAGE_SIZE) { - printk(KERN_ERR "%s: invalid address range, pos %llx, count %lx\n", - __func__, *ppos, count); - return -EFAULT; - } - - remaining = copy_to_user(buf, (char *)psp_misc->data_pg_aligned + *ppos, count); - if (remaining) - return -EFAULT; - - *ppos += count; - - return count; -} - -static ssize_t write_psp(struct file *file, const char __user *buf, size_t count, loff_t *ppos) -{ - ssize_t remaining, written; - - if ((*ppos + count) > PAGE_SIZE) { - printk(KERN_ERR "%s: invalid address range, pos %llx, count %lx\n", - __func__, *ppos, count); - return -EFAULT; - } - - remaining = copy_from_user((char *)psp_misc->data_pg_aligned + *ppos, buf, count); - written = count - remaining; - if (!written) - return -EFAULT; - - *ppos += written; - - return written; -} - -DEFINE_RWLOCK(vpsp_rwlock); - -/* VPSP_VID_MAX_ENTRIES determines the maximum number of vms that can set vid. - * but, the performance of finding vid is determined by g_vpsp_vid_num, - * so VPSP_VID_MAX_ENTRIES can be set larger. - */ -#define VPSP_VID_MAX_ENTRIES 2048 -#define VPSP_VID_NUM_MAX 64 - -struct vpsp_vid_entry { - uint32_t vid; - pid_t pid; -}; -static struct vpsp_vid_entry g_vpsp_vid_array[VPSP_VID_MAX_ENTRIES]; -static uint32_t g_vpsp_vid_num; -static int compare_vid_entries(const void *a, const void *b) -{ - return ((struct vpsp_vid_entry *)a)->pid - ((struct vpsp_vid_entry *)b)->pid; -} -static void swap_vid_entries(void *a, void *b, int size) -{ - struct vpsp_vid_entry entry; - - memcpy(&entry, a, size); - memcpy(a, b, size); - memcpy(b, &entry, size); -} - -/** - * When 'allow_default_vid' is set to 1, - * QEMU is allowed to use 'vid 0' by default - * in the absence of a valid 'vid' setting. - */ -uint32_t allow_default_vid = 1; -void vpsp_set_default_vid_permission(uint32_t is_allow) -{ - allow_default_vid = is_allow; -} - -int vpsp_get_default_vid_permission(void) -{ - return allow_default_vid; -} -EXPORT_SYMBOL_GPL(vpsp_get_default_vid_permission); - -/** - * When the virtual machine executes the 'tkm' command, - * it needs to retrieve the corresponding 'vid' - * by performing a binary search using 'kvm->userspace_pid'. - */ -int vpsp_get_vid(uint32_t *vid, pid_t pid) -{ - struct vpsp_vid_entry new_entry = {.pid = pid}; - struct vpsp_vid_entry *existing_entry = NULL; - - read_lock(&vpsp_rwlock); - existing_entry = bsearch(&new_entry, g_vpsp_vid_array, g_vpsp_vid_num, - sizeof(struct vpsp_vid_entry), compare_vid_entries); - read_unlock(&vpsp_rwlock); - - if (!existing_entry) - return -ENOENT; - - if (vid) { - *vid = existing_entry->vid; - pr_debug("PSP: %s %d, by pid %d\n", __func__, *vid, pid); - } - return 0; -} -EXPORT_SYMBOL_GPL(vpsp_get_vid); - -/** - * Upon qemu startup, this section checks whether - * the '-device psp,vid' parameter is specified. - * If set, it utilizes the 'vpsp_add_vid' function - * to insert the 'vid' and 'pid' values into the 'g_vpsp_vid_array'. - * The insertion is done in ascending order of 'pid'. - */ -static int vpsp_add_vid(uint32_t vid) -{ - pid_t cur_pid = task_pid_nr(current); - struct vpsp_vid_entry new_entry = {.vid = vid, .pid = cur_pid}; - - if (vpsp_get_vid(NULL, cur_pid) == 0) - return -EEXIST; - if (g_vpsp_vid_num == VPSP_VID_MAX_ENTRIES) - return -ENOMEM; - if (vid >= VPSP_VID_NUM_MAX) - return -EINVAL; - - write_lock(&vpsp_rwlock); - memcpy(&g_vpsp_vid_array[g_vpsp_vid_num++], &new_entry, sizeof(struct vpsp_vid_entry)); - sort(g_vpsp_vid_array, g_vpsp_vid_num, sizeof(struct vpsp_vid_entry), - compare_vid_entries, swap_vid_entries); - pr_info("PSP: add vid %d, by pid %d, total vid num is %d\n", vid, cur_pid, g_vpsp_vid_num); - write_unlock(&vpsp_rwlock); - return 0; -} - -/** - * Upon the virtual machine is shut down, - * the 'vpsp_del_vid' function is employed to remove - * the 'vid' associated with the current 'pid'. - */ -static int vpsp_del_vid(void) -{ - pid_t cur_pid = task_pid_nr(current); - int i, ret = -ENOENT; - - write_lock(&vpsp_rwlock); - for (i = 0; i < g_vpsp_vid_num; ++i) { - if (g_vpsp_vid_array[i].pid == cur_pid) { - --g_vpsp_vid_num; - pr_info("PSP: delete vid %d, by pid %d, total vid num is %d\n", - g_vpsp_vid_array[i].vid, cur_pid, g_vpsp_vid_num); - memcpy(&g_vpsp_vid_array[i], &g_vpsp_vid_array[i + 1], - sizeof(struct vpsp_vid_entry) * (g_vpsp_vid_num - i)); - ret = 0; - goto end; - } - } - -end: - write_unlock(&vpsp_rwlock); - return ret; -} - -static int do_vpsp_op_ioctl(struct vpsp_dev_ctrl *ctrl) -{ - int ret = 0; - unsigned char op = ctrl->op; - - switch (op) { - case VPSP_OP_VID_ADD: - ret = vpsp_add_vid(ctrl->data.vid); - break; - - case VPSP_OP_VID_DEL: - ret = vpsp_del_vid(); - break; - - case VPSP_OP_SET_DEFAULT_VID_PERMISSION: - vpsp_set_default_vid_permission(ctrl->data.def_vid_perm); - break; - - case VPSP_OP_GET_DEFAULT_VID_PERMISSION: - ctrl->data.def_vid_perm = vpsp_get_default_vid_permission(); - break; - - default: - ret = -EINVAL; - break; - } - return ret; -} - -static long ioctl_psp(struct file *file, unsigned int ioctl, unsigned long arg) -{ - unsigned int opcode = 0; - struct vpsp_dev_ctrl vpsp_ctrl_op; - int ret = -EFAULT; - - if (_IOC_TYPE(ioctl) != HYGON_PSP_IOC_TYPE) { - printk(KERN_ERR "%s: invalid ioctl type: 0x%x\n", __func__, _IOC_TYPE(ioctl)); - return -EINVAL; - } - opcode = _IOC_NR(ioctl); - switch (opcode) { - case HYGON_PSP_MUTEX_ENABLE: - psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, 0); - // And get the sev lock to make sure no one is using it now. - mutex_lock(&sev_cmd_mutex); - psp_mutex_enabled = 1; - mutex_unlock(&sev_cmd_mutex); - // Wait 10ms just in case someone is right before getting the psp lock. - mdelay(10); - psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); - ret = 0; - break; - - case HYGON_PSP_MUTEX_DISABLE: - mutex_lock(&sev_cmd_mutex); - // And get the psp lock to make sure no one is using it now. - psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, 0); - psp_mutex_enabled = 0; - psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); - // Wait 10ms just in case someone is right before getting the sev lock. - mdelay(10); - mutex_unlock(&sev_cmd_mutex); - ret = 0; - break; - - case HYGON_VPSP_CTRL_OPT: - if (copy_from_user(&vpsp_ctrl_op, (void __user *)arg, - sizeof(struct vpsp_dev_ctrl))) - return -EFAULT; - ret = do_vpsp_op_ioctl(&vpsp_ctrl_op); - if (!ret && copy_to_user((void __user *)arg, &vpsp_ctrl_op, - sizeof(struct vpsp_dev_ctrl))) - return -EFAULT; - break; - - default: - printk(KERN_ERR "%s: invalid ioctl number: %d\n", __func__, opcode); - return -EINVAL; - } - return ret; -} - -static const struct file_operations psp_fops = { - .owner = THIS_MODULE, - .mmap = mmap_psp, - .read = read_psp, - .write = write_psp, - .unlocked_ioctl = ioctl_psp, -}; - -static int hygon_psp_additional_setup(struct sp_device *sp) -{ - struct device *dev = sp->dev; - int ret = 0; - - if (!psp_misc) { - struct miscdevice *misc; - - psp_misc = devm_kzalloc(dev, sizeof(*psp_misc), GFP_KERNEL); - if (!psp_misc) - return -ENOMEM; - psp_misc->data_pg_aligned = (struct psp_dev_data *)get_zeroed_page(GFP_KERNEL); - if (!psp_misc->data_pg_aligned) { - dev_err(dev, "alloc psp data page failed\n"); - devm_kfree(dev, psp_misc); - psp_misc = NULL; - return -ENOMEM; - } - SetPageReserved(virt_to_page(psp_misc->data_pg_aligned)); - psp_mutex_init(&psp_misc->data_pg_aligned->mb_mutex); - - *(uint32_t *)((void *)psp_misc->data_pg_aligned + 8) = 0xdeadbeef; - misc = &psp_misc->misc; - misc->minor = MISC_DYNAMIC_MINOR; - misc->name = "hygon_psp_config"; - misc->fops = &psp_fops; - - ret = misc_register(misc); - if (ret) - return ret; - kref_init(&psp_misc->refcount); - } else { - kref_get(&psp_misc->refcount); - } - - return ret; -} - -static void hygon_psp_exit(struct kref *ref) -{ - struct psp_misc_dev *misc_dev = container_of(ref, struct psp_misc_dev, refcount); - - misc_deregister(&misc_dev->misc); - ClearPageReserved(virt_to_page(misc_dev->data_pg_aligned)); - free_page((unsigned long)misc_dev->data_pg_aligned); - psp_misc = NULL; -} - int psp_dev_init(struct sp_device *sp) { struct device *dev = sp->dev; - struct pci_dev *pdev = to_pci_dev(dev); struct psp_device *psp; int ret; @@ -696,23 +197,14 @@ int psp_dev_init(struct sp_device *sp) iowrite32(0, psp->io_regs + psp->vdata->inten_reg); iowrite32(-1, psp->io_regs + psp->vdata->intsts_reg); - if (pdev->vendor == PCI_VENDOR_ID_HYGON) { - is_hygon_psp = 1; - psp_mutex_enabled = 0; + /* Request an irq */ + if (is_vendor_hygon()) { ret = hygon_psp_additional_setup(sp); if (ret) { dev_err(dev, "psp: unable to do additional setup\n"); goto e_err; } - } - - /* Request an irq */ - if (pdev->vendor == PCI_VENDOR_ID_HYGON) { -#ifdef CONFIG_HYGON_PSP2CPU_CMD - ret = sp_request_psp_irq(psp->sp, psp_irq_handler_hygon, psp->name, psp); -#else - ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp); -#endif + ret = sp_request_hygon_psp_irq(psp->sp, psp_irq_handler, psp->name, psp); } else { ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp); } @@ -762,7 +254,7 @@ void psp_dev_destroy(struct sp_device *sp) return; #ifdef CONFIG_TDM_DEV_HYGON - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + if (is_vendor_hygon()) tdm_dev_destroy(); #endif @@ -770,15 +262,15 @@ void psp_dev_destroy(struct sp_device *sp) tee_dev_destroy(psp); - if (is_hygon_psp && psp_misc) - kref_put(&psp_misc->refcount, hygon_psp_exit); - dbc_dev_destroy(psp); platform_access_dev_destroy(psp); sp_free_psp_irq(sp, psp); + if (is_vendor_hygon() && hygon_psp_hooks.psp_misc) + kref_put(&hygon_psp_hooks.psp_misc->refcount, hygon_psp_exit); + if (sp->clear_psp_master_device) sp->clear_psp_master_device(sp); } diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h index 694bb3faf8be..2b06659bcf24 100644 --- a/drivers/crypto/ccp/psp-dev.h +++ b/drivers/crypto/ccp/psp-dev.h @@ -64,21 +64,6 @@ struct psp_device { unsigned int capability; }; -#define PSP_MUTEX_TIMEOUT 600000 -struct psp_mutex { - uint64_t locked; -}; - -struct psp_dev_data { - struct psp_mutex mb_mutex; -}; - -struct psp_misc_dev { - struct kref refcount; - struct psp_dev_data *data_pg_aligned; - struct miscdevice misc; -}; - void psp_set_sev_irq_handler(struct psp_device *psp, psp_irq_handler_t handler, void *data); void psp_clear_sev_irq_handler(struct psp_device *psp); diff --git a/drivers/crypto/ccp/psp-ringbuf.c b/drivers/crypto/ccp/psp-ringbuf.c index 09768c9bce31..8a461a0be6f7 100644 --- a/drivers/crypto/ccp/psp-ringbuf.c +++ b/drivers/crypto/ccp/psp-ringbuf.c @@ -67,6 +67,11 @@ int csv_queue_init(struct csv_queue *queue, return 0; } +void csv_queue_cleanup(struct csv_queue *queue) +{ + memset((void *)queue, 0, sizeof(struct csv_queue)); +} + unsigned int csv_enqueue_cmd(struct csv_queue *queue, const void *buf, unsigned int len) { diff --git a/drivers/crypto/ccp/psp-ringbuf.h b/drivers/crypto/ccp/psp-ringbuf.h index 336352cc7a66..2179834425b4 100644 --- a/drivers/crypto/ccp/psp-ringbuf.h +++ b/drivers/crypto/ccp/psp-ringbuf.h @@ -21,12 +21,13 @@ #include #include #include -#include +#include #include #include int csv_queue_init(struct csv_queue *queue, void *buffer, unsigned int size, size_t esize); +void csv_queue_cleanup(struct csv_queue *queue); unsigned int csv_enqueue_cmd(struct csv_queue *queue, const void *buf, unsigned int len); unsigned int csv_dequeue_stat(struct csv_queue *queue, diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 66137b7c0f11..ec1f6d79bd0f 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -43,7 +43,7 @@ #define SEV_FW_FILE "amd/sev.fw" #define SEV_FW_NAME_SIZE 64 -DEFINE_MUTEX(sev_cmd_mutex); +static DEFINE_MUTEX(sev_cmd_mutex); static struct sev_misc_dev *misc_dev; static int psp_cmd_timeout = 100; @@ -70,28 +70,6 @@ MODULE_FIRMWARE("amd/amd_sev_fam19h_model1xh.sbin"); /* 4th gen EPYC */ static bool psp_dead; static int psp_timeout; -static int csv_comm_mode = CSV_COMM_MAILBOX_ON; -extern int is_hygon_psp; -extern struct psp_misc_dev *psp_misc; -extern int psp_mutex_lock_timeout(struct psp_mutex *mutex, uint64_t ms); -extern int psp_mutex_trylock(struct psp_mutex *mutex); -extern int psp_mutex_unlock(struct psp_mutex *mutex); -extern int psp_mutex_enabled; - -/* defination of variabled used by virtual psp */ -enum VPSP_RB_CHECK_STATUS { - RB_NOT_CHECK = 0, - RB_CHECKING, - RB_CHECKED, - RB_CHECK_MAX -}; -#define VPSP_RB_IS_SUPPORTED(buildid) (buildid >= 1913) -#define VPSP_CMD_STATUS_RUNNING 0xffff -static DEFINE_MUTEX(vpsp_rb_mutex); -struct csv_ringbuffer_queue vpsp_ring_buffer[CSV_COMMAND_PRIORITY_NUM]; -static uint8_t vpsp_rb_supported; -static atomic_t vpsp_rb_check_status = ATOMIC_INIT(RB_NOT_CHECK); - /* Trusted Memory Region (TMR): * The TMR is a 1MB area that must be 1MB aligned. Use the page allocator * to allocate the memory, which will return aligned memory for the specified @@ -133,8 +111,7 @@ static void sev_irq_handler(int irq, void *data, unsigned int status) /* Check if it is SEV command completion: */ reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg); if (FIELD_GET(PSP_CMDRESP_RESP, reg) || - ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && - (csv_comm_mode == CSV_COMM_RINGBUFFER_ON))) { + (is_vendor_hygon() && csv_in_ring_buffer_mode())) { sev->int_rcvd = 1; wake_up(&sev->int_queue); } @@ -155,22 +132,6 @@ static int sev_wait_cmd_ioc(struct sev_device *sev, return 0; } -static int csv_wait_cmd_ioc_ring_buffer(struct sev_device *sev, - unsigned int *reg, - unsigned int timeout) -{ - int ret; - - ret = wait_event_timeout(sev->int_queue, - sev->int_rcvd, timeout * HZ); - if (!ret) - return -ETIMEDOUT; - - *reg = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); - - return 0; -} - static int sev_cmd_buffer_len(int cmd) { /* @@ -447,326 +408,27 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) return ret; } -static int __psp_do_cmd_locked(int cmd, void *data, int *psp_ret) -{ - struct psp_device *psp = psp_master; - struct sev_device *sev; - unsigned int phys_lsb, phys_msb; - unsigned int reg, ret = 0; - - if (!psp || !psp->sev_data) - return -ENODEV; - - if (psp_dead) - return -EBUSY; - - sev = psp->sev_data; - - /* Get the physical address of the command buffer */ - phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0; - phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0; - - dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", - cmd, phys_msb, phys_lsb, psp_timeout); - - print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, - sev_cmd_buffer_len(cmd), false); - - iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); - iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); - - sev->int_rcvd = 0; - - reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC; - iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg); - - /* wait for command completion */ - ret = sev_wait_cmd_ioc(sev, ®, psp_timeout); - if (ret) { - if (psp_ret) - *psp_ret = 0; - - dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd); - psp_dead = true; - - return ret; - } - - psp_timeout = psp_cmd_timeout; - - if (psp_ret) - *psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg); - - if (FIELD_GET(PSP_CMDRESP_STS, reg)) { - dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n", - cmd, FIELD_GET(PSP_CMDRESP_STS, reg)); - ret = -EIO; - } - - print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data, - sev_cmd_buffer_len(cmd), false); - - return ret; -} - -static int __csv_ring_buffer_enter_locked(int *error) -{ - struct psp_device *psp = psp_master; - struct sev_device *sev; - struct csv_data_ring_buffer *data; - struct csv_ringbuffer_queue *low_queue; - struct csv_ringbuffer_queue *hi_queue; - int ret = 0; - - if (!psp || !psp->sev_data) - return -ENODEV; - - sev = psp->sev_data; - - if (csv_comm_mode == CSV_COMM_RINGBUFFER_ON) - return -EEXIST; - - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) - return -ENOMEM; - - low_queue = &sev->ring_buffer[CSV_COMMAND_PRIORITY_LOW]; - hi_queue = &sev->ring_buffer[CSV_COMMAND_PRIORITY_HIGH]; - - data->queue_lo_cmdptr_address = __psp_pa(low_queue->cmd_ptr.data_align); - data->queue_lo_statval_address = __psp_pa(low_queue->stat_val.data_align); - data->queue_hi_cmdptr_address = __psp_pa(hi_queue->cmd_ptr.data_align); - data->queue_hi_statval_address = __psp_pa(hi_queue->stat_val.data_align); - data->queue_lo_size = 1; - data->queue_hi_size = 1; - data->int_on_empty = 1; - - ret = __sev_do_cmd_locked(CSV_CMD_RING_BUFFER, data, error); - if (!ret) { - iowrite32(0, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); - csv_comm_mode = CSV_COMM_RINGBUFFER_ON; - } - - kfree(data); - return ret; -} - -static int csv_get_cmd_status(struct sev_device *sev, int prio, int index) -{ - struct csv_queue *queue = &sev->ring_buffer[prio].stat_val; - struct csv_statval_entry *statval = (struct csv_statval_entry *)queue->data; - - return statval[index].status; -} - -static int __csv_do_ringbuf_cmds_locked(int *psp_ret) -{ - struct psp_device *psp = psp_master; - struct sev_device *sev; - unsigned int rb_tail; - unsigned int rb_ctl; - int last_cmd_index; - unsigned int reg, ret = 0; - - if (!psp || !psp->sev_data) - return -ENODEV; - - if (psp_dead) - return -EBUSY; - - sev = psp->sev_data; - - /* update rb tail */ - rb_tail = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); - rb_tail &= (~PSP_RBTAIL_QHI_TAIL_MASK); - rb_tail |= (sev->ring_buffer[CSV_COMMAND_PRIORITY_HIGH].cmd_ptr.tail - << PSP_RBTAIL_QHI_TAIL_SHIFT); - rb_tail &= (~PSP_RBTAIL_QLO_TAIL_MASK); - rb_tail |= sev->ring_buffer[CSV_COMMAND_PRIORITY_LOW].cmd_ptr.tail; - iowrite32(rb_tail, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); - - /* update rb ctl to trigger psp irq */ - sev->int_rcvd = 0; - - /* PSP response to x86 only when all queue is empty or error happends */ - rb_ctl = PSP_RBCTL_X86_WRITES | - PSP_RBCTL_RBMODE_ACT | - PSP_RBCTL_CLR_INTSTAT; - iowrite32(rb_ctl, sev->io_regs + sev->vdata->cmdresp_reg); - - /* wait for all commands in ring buffer completed */ - ret = csv_wait_cmd_ioc_ring_buffer(sev, ®, psp_timeout * 10); - if (ret) { - if (psp_ret) - *psp_ret = 0; - dev_err(sev->dev, "csv ringbuffer mode command timed out, disabling PSP\n"); - psp_dead = true; - - return ret; - } - - /* cmd error happends */ - if (reg & PSP_RBHEAD_QPAUSE_INT_STAT) - ret = -EFAULT; - - if (psp_ret) { - last_cmd_index = (reg & PSP_RBHEAD_QHI_HEAD_MASK) - >> PSP_RBHEAD_QHI_HEAD_SHIFT; - *psp_ret = csv_get_cmd_status(sev, CSV_COMMAND_PRIORITY_HIGH, - last_cmd_index); - if (*psp_ret == 0) { - last_cmd_index = reg & PSP_RBHEAD_QLO_HEAD_MASK; - *psp_ret = csv_get_cmd_status(sev, - CSV_COMMAND_PRIORITY_LOW, last_cmd_index); - } - } - - return ret; -} - -static int csv_do_ringbuf_cmds(int *psp_ret) -{ - struct sev_user_data_status data; - int rc; - int mutex_enabled = READ_ONCE(psp_mutex_enabled); - - if (is_hygon_psp && mutex_enabled) { - if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, - PSP_MUTEX_TIMEOUT) != 1) - return -EBUSY; - } else { - mutex_lock(&sev_cmd_mutex); - } - - rc = __csv_ring_buffer_enter_locked(psp_ret); - if (rc) - goto cmd_unlock; - - rc = __csv_do_ringbuf_cmds_locked(psp_ret); - - /* exit ringbuf mode by send CMD in mailbox mode */ - __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, NULL); - csv_comm_mode = CSV_COMM_MAILBOX_ON; - -cmd_unlock: - if (is_hygon_psp && mutex_enabled) - psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); - else - mutex_unlock(&sev_cmd_mutex); - - return rc; -} - static int sev_do_cmd(int cmd, void *data, int *psp_ret) { int rc; - int mutex_enabled = READ_ONCE(psp_mutex_enabled); + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); - if (is_hygon_psp && mutex_enabled) { - if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, - PSP_MUTEX_TIMEOUT) != 1) + if (is_vendor_hygon() && mutex_enabled) { + if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) return -EBUSY; } else { mutex_lock(&sev_cmd_mutex); } rc = __sev_do_cmd_locked(cmd, data, psp_ret); - if (is_hygon_psp && mutex_enabled) - psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + if (is_vendor_hygon() && mutex_enabled) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); else mutex_unlock(&sev_cmd_mutex); return rc; } -static int __vpsp_do_cmd_locked(uint32_t vid, int cmd, void *data, int *psp_ret) -{ - struct psp_device *psp = psp_master; - struct sev_device *sev; - phys_addr_t phys_addr; - unsigned int phys_lsb, phys_msb; - unsigned int reg, ret = 0; - - if (!psp || !psp->sev_data) - return -ENODEV; - - if (psp_dead) - return -EBUSY; - - sev = psp->sev_data; - - if (data && WARN_ON_ONCE(!virt_addr_valid(data))) - return -EINVAL; - - /* Get the physical address of the command buffer */ - phys_addr = PUT_PSP_VID(__psp_pa(data), vid); - phys_lsb = data ? lower_32_bits(phys_addr) : 0; - phys_msb = data ? upper_32_bits(phys_addr) : 0; - - dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", - cmd, phys_msb, phys_lsb, psp_timeout); - - print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, - sev_cmd_buffer_len(cmd), false); - - iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); - iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); - - sev->int_rcvd = 0; - - reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC; - iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg); - - /* wait for command completion */ - ret = sev_wait_cmd_ioc(sev, ®, psp_timeout); - if (ret) { - if (psp_ret) - *psp_ret = 0; - - dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd); - psp_dead = true; - - return ret; - } - - psp_timeout = psp_cmd_timeout; - - if (psp_ret) - *psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg); - - if (FIELD_GET(PSP_CMDRESP_STS, reg)) { - dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n", - cmd, FIELD_GET(PSP_CMDRESP_STS, reg)); - ret = -EIO; - } - - print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data, - sev_cmd_buffer_len(cmd), false); - - return ret; -} - -int psp_do_cmd(int cmd, void *data, int *psp_ret) -{ - int rc; - int mutex_enabled = READ_ONCE(psp_mutex_enabled); - - if (is_hygon_psp && mutex_enabled) { - if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, - PSP_MUTEX_TIMEOUT) != 1) - return -EBUSY; - } else { - mutex_lock(&sev_cmd_mutex); - } - rc = __psp_do_cmd_locked(cmd, data, psp_ret); - if (is_hygon_psp && mutex_enabled) - psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); - else - mutex_unlock(&sev_cmd_mutex); - - return rc; -} -EXPORT_SYMBOL_GPL(psp_do_cmd); - static int __sev_init_locked(int *error) { struct sev_data_init data; @@ -880,18 +542,18 @@ static int __sev_platform_init_locked(int *error) int sev_platform_init(int *error) { int rc; - int mutex_enabled = READ_ONCE(psp_mutex_enabled); + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); - if (is_hygon_psp && mutex_enabled) { - if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, - PSP_MUTEX_TIMEOUT) != 1) + if (is_vendor_hygon() && mutex_enabled) { + if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) return -EBUSY; } else { mutex_lock(&sev_cmd_mutex); } rc = __sev_platform_init_locked(error); - if (is_hygon_psp && mutex_enabled) - psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + if (is_vendor_hygon() && mutex_enabled) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); else mutex_unlock(&sev_cmd_mutex); @@ -917,10 +579,9 @@ static int __sev_platform_shutdown_locked(int *error) if (ret) return ret; - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { - csv_comm_mode = CSV_COMM_MAILBOX_ON; - csv_ring_buffer_queue_free(); - } + /* RING BUFFER mode exits if a SHUTDOWN command is executed */ + if (is_vendor_hygon() && csv_in_ring_buffer_mode()) + csv_restore_mailbox_mode_postprocess(); sev->state = SEV_STATE_UNINIT; dev_dbg(sev->dev, "SEV firmware shutdown\n"); @@ -931,18 +592,18 @@ static int __sev_platform_shutdown_locked(int *error) static int sev_platform_shutdown(int *error) { int rc; - int mutex_enabled = READ_ONCE(psp_mutex_enabled); + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); - if (is_hygon_psp && mutex_enabled) { - if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, - PSP_MUTEX_TIMEOUT) != 1) + if (is_vendor_hygon() && mutex_enabled) { + if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) return -EBUSY; } else { mutex_lock(&sev_cmd_mutex); } rc = __sev_platform_shutdown_locked(NULL); - if (is_hygon_psp && mutex_enabled) - psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + if (is_vendor_hygon() && mutex_enabled) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); else mutex_unlock(&sev_cmd_mutex); @@ -1491,7 +1152,7 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) struct sev_issue_cmd input; int ret = -EFAULT; bool writable = file->f_mode & FMODE_WRITE; - int mutex_enabled = READ_ONCE(psp_mutex_enabled); + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); if (!psp_master || !psp_master->sev_data) return -ENODEV; @@ -1510,9 +1171,9 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) return -EINVAL; } - if (is_hygon_psp && mutex_enabled) { - if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, - PSP_MUTEX_TIMEOUT) != 1) + if (is_vendor_hygon() && mutex_enabled) { + if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) return -EBUSY; } else { mutex_lock(&sev_cmd_mutex); @@ -1576,8 +1237,8 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd))) ret = -EFAULT; out: - if (is_hygon_psp && mutex_enabled) - psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + if (is_vendor_hygon() && mutex_enabled) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); else mutex_unlock(&sev_cmd_mutex); @@ -1619,621 +1280,6 @@ int sev_guest_df_flush(int *error) } EXPORT_SYMBOL_GPL(sev_guest_df_flush); -int csv_ring_buffer_queue_free(void); - -static int __csv_ring_buffer_queue_init(struct csv_ringbuffer_queue *ring_buffer) -{ - int ret = 0; - void *cmd_ptr_buffer = NULL; - void *stat_val_buffer = NULL; - - memset((void *)ring_buffer, 0, sizeof(struct csv_ringbuffer_queue)); - - cmd_ptr_buffer = kzalloc(CSV_RING_BUFFER_LEN, GFP_KERNEL); - if (!cmd_ptr_buffer) - return -ENOMEM; - - csv_queue_init(&ring_buffer->cmd_ptr, cmd_ptr_buffer, - CSV_RING_BUFFER_SIZE, CSV_RING_BUFFER_ESIZE); - - stat_val_buffer = kzalloc(CSV_RING_BUFFER_LEN, GFP_KERNEL); - if (!stat_val_buffer) { - ret = -ENOMEM; - goto free_cmdptr; - } - - csv_queue_init(&ring_buffer->stat_val, stat_val_buffer, - CSV_RING_BUFFER_SIZE, CSV_RING_BUFFER_ESIZE); - return 0; - -free_cmdptr: - kfree(cmd_ptr_buffer); - - return ret; -} - -int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) -{ - struct psp_device *psp = psp_master; - struct sev_device *sev; - struct csv_cmdptr_entry cmdptr = { }; - - if (!psp || !psp->sev_data) - return -ENODEV; - - sev = psp->sev_data; - - cmdptr.cmd_buf_ptr = __psp_pa(data); - cmdptr.cmd_id = cmd; - cmdptr.cmd_flags = flags; - - if (csv_enqueue_cmd(&sev->ring_buffer[prio].cmd_ptr, &cmdptr, 1) != 1) - return -EFAULT; - - return 0; -} -EXPORT_SYMBOL_GPL(csv_fill_cmd_queue); - -int csv_check_stat_queue_status(int *psp_ret) -{ - struct psp_device *psp = psp_master; - struct sev_device *sev; - unsigned int len; - int prio; - - if (!psp || !psp->sev_data) - return -ENODEV; - - sev = psp->sev_data; - - for (prio = CSV_COMMAND_PRIORITY_HIGH; - prio < CSV_COMMAND_PRIORITY_NUM; prio++) { - do { - struct csv_statval_entry statval; - - len = csv_dequeue_stat(&sev->ring_buffer[prio].stat_val, - &statval, 1); - if (len) { - if (statval.status != 0) { - *psp_ret = statval.status; - return -EFAULT; - } - } - } while (len); - } - - return 0; -} -EXPORT_SYMBOL_GPL(csv_check_stat_queue_status); - -int csv_ring_buffer_queue_init(void) -{ - struct psp_device *psp = psp_master; - struct sev_device *sev; - int i, ret = 0; - - if (!psp || !psp->sev_data) - return -ENODEV; - - sev = psp->sev_data; - - for (i = CSV_COMMAND_PRIORITY_HIGH; i < CSV_COMMAND_PRIORITY_NUM; i++) { - ret = __csv_ring_buffer_queue_init(&sev->ring_buffer[i]); - if (ret) - goto e_free; - } - - return 0; - -e_free: - csv_ring_buffer_queue_free(); - return ret; -} -EXPORT_SYMBOL_GPL(csv_ring_buffer_queue_init); - -int csv_ring_buffer_queue_free(void) -{ - struct psp_device *psp = psp_master; - struct sev_device *sev; - struct csv_ringbuffer_queue *ring_buffer; - int i; - - if (!psp || !psp->sev_data) - return -ENODEV; - - sev = psp->sev_data; - - for (i = 0; i < CSV_COMMAND_PRIORITY_NUM; i++) { - ring_buffer = &sev->ring_buffer[i]; - - if (ring_buffer->cmd_ptr.data) { - kfree((void *)ring_buffer->cmd_ptr.data); - ring_buffer->cmd_ptr.data = 0; - } - - if (ring_buffer->stat_val.data) { - kfree((void *)ring_buffer->stat_val.data); - ring_buffer->stat_val.data = 0; - } - } - return 0; -} -EXPORT_SYMBOL_GPL(csv_ring_buffer_queue_free); - -static int get_queue_tail(struct csv_ringbuffer_queue *ringbuffer) -{ - return ringbuffer->cmd_ptr.tail & ringbuffer->cmd_ptr.mask; -} - -static int get_queue_head(struct csv_ringbuffer_queue *ringbuffer) -{ - return ringbuffer->cmd_ptr.head & ringbuffer->cmd_ptr.mask; -} - -static void vpsp_set_cmd_status(int prio, int index, int status) -{ - struct csv_queue *ringbuf = &vpsp_ring_buffer[prio].stat_val; - struct csv_statval_entry *statval = (struct csv_statval_entry *)ringbuf->data; - - statval[index].status = status; -} - -static int vpsp_get_cmd_status(int prio, int index) -{ - struct csv_queue *ringbuf = &vpsp_ring_buffer[prio].stat_val; - struct csv_statval_entry *statval = (struct csv_statval_entry *)ringbuf->data; - - return statval[index].status; -} - -static unsigned int vpsp_queue_cmd_size(int prio) -{ - return csv_cmd_queue_size(&vpsp_ring_buffer[prio].cmd_ptr); -} - -static int vpsp_dequeue_cmd(int prio, int index, - struct csv_cmdptr_entry *cmd_ptr) -{ - mutex_lock(&vpsp_rb_mutex); - - /* The status update must be before the head update */ - vpsp_set_cmd_status(prio, index, 0); - csv_dequeue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, (void *)cmd_ptr, 1); - - mutex_unlock(&vpsp_rb_mutex); - - return 0; -} - -/* - * Populate the command from the virtual machine to the queue to - * support execution in ringbuffer mode - */ -static int vpsp_fill_cmd_queue(uint32_t vid, int prio, int cmd, void *data, uint16_t flags) -{ - struct csv_cmdptr_entry cmdptr = { }; - int index = -1; - - cmdptr.cmd_buf_ptr = PUT_PSP_VID(__psp_pa(data), vid); - cmdptr.cmd_id = cmd; - cmdptr.cmd_flags = flags; - - mutex_lock(&vpsp_rb_mutex); - index = get_queue_tail(&vpsp_ring_buffer[prio]); - - /* If status is equal to VPSP_CMD_STATUS_RUNNING, then the queue is full */ - if (vpsp_get_cmd_status(prio, index) == VPSP_CMD_STATUS_RUNNING) { - index = -1; - goto out; - } - - /* The status must be written first, and then the cmd can be enqueued */ - vpsp_set_cmd_status(prio, index, VPSP_CMD_STATUS_RUNNING); - if (csv_enqueue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, &cmdptr, 1) != 1) { - vpsp_set_cmd_status(prio, index, 0); - index = -1; - goto out; - } - -out: - mutex_unlock(&vpsp_rb_mutex); - return index; -} - -static void vpsp_ring_update_head(struct csv_ringbuffer_queue *ring_buffer, - uint32_t new_head) -{ - uint32_t orig_head = get_queue_head(ring_buffer); - uint32_t comple_num = 0; - - if (new_head >= orig_head) - comple_num = new_head - orig_head; - else - comple_num = ring_buffer->cmd_ptr.mask - (orig_head - new_head) - + 1; - - ring_buffer->cmd_ptr.head += comple_num; -} - -static int vpsp_ring_buffer_queue_init(void) -{ - int i; - int ret; - - for (i = CSV_COMMAND_PRIORITY_HIGH; i < CSV_COMMAND_PRIORITY_NUM; i++) { - ret = __csv_ring_buffer_queue_init(&vpsp_ring_buffer[i]); - if (ret) - return ret; - } - - return 0; -} - -static int __vpsp_ring_buffer_enter_locked(int *error) -{ - int ret; - struct csv_data_ring_buffer *data; - struct csv_ringbuffer_queue *low_queue; - struct csv_ringbuffer_queue *hi_queue; - struct sev_device *sev = psp_master->sev_data; - - if (csv_comm_mode == CSV_COMM_RINGBUFFER_ON) - return -EEXIST; - - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) - return -ENOMEM; - - low_queue = &vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]; - hi_queue = &vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]; - - data->queue_lo_cmdptr_address = __psp_pa(low_queue->cmd_ptr.data_align); - data->queue_lo_statval_address = __psp_pa(low_queue->stat_val.data_align); - data->queue_hi_cmdptr_address = __psp_pa(hi_queue->cmd_ptr.data_align); - data->queue_hi_statval_address = __psp_pa(hi_queue->stat_val.data_align); - data->queue_lo_size = 1; - data->queue_hi_size = 1; - data->int_on_empty = 1; - - ret = __sev_do_cmd_locked(CSV_CMD_RING_BUFFER, data, error); - if (!ret) { - iowrite32(0, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); - csv_comm_mode = CSV_COMM_RINGBUFFER_ON; - } - - kfree(data); - return ret; -} - -static int __vpsp_do_ringbuf_cmds_locked(int *psp_ret, uint8_t prio, int index) -{ - struct psp_device *psp = psp_master; - unsigned int reg, ret = 0; - unsigned int rb_tail, rb_head; - unsigned int rb_ctl; - struct sev_device *sev; - - if (!psp) - return -ENODEV; - - if (psp_dead) - return -EBUSY; - - sev = psp->sev_data; - - /* update rb tail */ - rb_tail = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); - rb_tail &= (~PSP_RBTAIL_QHI_TAIL_MASK); - rb_tail |= (get_queue_tail(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]) - << PSP_RBTAIL_QHI_TAIL_SHIFT); - rb_tail &= (~PSP_RBTAIL_QLO_TAIL_MASK); - rb_tail |= get_queue_tail(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]); - iowrite32(rb_tail, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); - - /* update rb head */ - rb_head = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); - rb_head &= (~PSP_RBHEAD_QHI_HEAD_MASK); - rb_head |= (get_queue_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]) - << PSP_RBHEAD_QHI_HEAD_SHIFT); - rb_head &= (~PSP_RBHEAD_QLO_HEAD_MASK); - rb_head |= get_queue_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]); - iowrite32(rb_head, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); - - /* update rb ctl to trigger psp irq */ - sev->int_rcvd = 0; - /* PSP response to x86 only when all queue is empty or error happends */ - rb_ctl = (PSP_RBCTL_X86_WRITES | PSP_RBCTL_RBMODE_ACT | PSP_RBCTL_CLR_INTSTAT); - iowrite32(rb_ctl, sev->io_regs + sev->vdata->cmdresp_reg); - - /* wait for all commands in ring buffer completed */ - ret = csv_wait_cmd_ioc_ring_buffer(sev, ®, psp_timeout*10); - if (ret) { - if (psp_ret) - *psp_ret = 0; - - dev_err(psp->dev, "sev command in ringbuffer mode timed out, disabling PSP\n"); - psp_dead = true; - return ret; - } - /* cmd error happends */ - if (reg & PSP_RBHEAD_QPAUSE_INT_STAT) - ret = -EFAULT; - - /* update head */ - vpsp_ring_update_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH], - (reg & PSP_RBHEAD_QHI_HEAD_MASK) >> PSP_RBHEAD_QHI_HEAD_SHIFT); - vpsp_ring_update_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW], - reg & PSP_RBHEAD_QLO_HEAD_MASK); - - if (psp_ret) - *psp_ret = vpsp_get_cmd_status(prio, index); - - return ret; -} - -static int vpsp_do_ringbuf_cmds_locked(int *psp_ret, uint8_t prio, int index) -{ - struct sev_user_data_status data; - int rc; - - rc = __vpsp_ring_buffer_enter_locked(psp_ret); - if (rc) - goto end; - - rc = __vpsp_do_ringbuf_cmds_locked(psp_ret, prio, index); - - /* exit ringbuf mode by send CMD in mailbox mode */ - __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, - &data, NULL); - csv_comm_mode = CSV_COMM_MAILBOX_ON; - -end: - return rc; -} - -/** - * struct user_data_status - PLATFORM_STATUS command parameters - * - * @major: major API version - * @minor: minor API version - * @state: platform state - * @owner: self-owned or externally owned - * @chip_secure: ES or MP chip - * @fw_enc: is this FW is encrypted - * @fw_sign: is this FW is signed - * @config_es: platform config flags for csv-es - * @build: Firmware Build ID for this API version - * @bl_version_debug: Bootloader VERSION_DEBUG field - * @bl_version_minor: Bootloader VERSION_MINOR field - * @bl_version_major: Bootloader VERSION_MAJOR field - * @guest_count: number of active guests - * @reserved: should set to zero - */ -struct user_data_status { - uint8_t api_major; /* Out */ - uint8_t api_minor; /* Out */ - uint8_t state; /* Out */ - uint8_t owner : 1, /* Out */ - chip_secure : 1, /* Out */ - fw_enc : 1, /* Out */ - fw_sign : 1, /* Out */ - reserved1 : 4; /*reserved*/ - uint32_t config_es : 1, /* Out */ - build : 31; /* Out */ - uint32_t guest_count; /* Out */ -} __packed; - -/* - * Check whether the firmware supports ringbuffer mode and parse - * commands from the virtual machine - */ -static int vpsp_rb_check_and_cmd_prio_parse(uint8_t *prio, - struct vpsp_cmd *vcmd) -{ - int ret, error; - int rb_supported; - int rb_check_old = RB_NOT_CHECK; - struct user_data_status *status = NULL; - - if (atomic_try_cmpxchg(&vpsp_rb_check_status, &rb_check_old, - RB_CHECKING)) { - /* get buildid to check if the firmware supports ringbuffer mode */ - status = kzalloc(sizeof(*status), GFP_KERNEL); - if (!status) { - atomic_set(&vpsp_rb_check_status, RB_CHECKED); - goto end; - } - ret = sev_platform_status((struct sev_user_data_status *)status, - &error); - if (ret) { - pr_warn("failed to get status[%#x], use default command mode.\n", error); - atomic_set(&vpsp_rb_check_status, RB_CHECKED); - goto end; - } - - /* check if the firmware supports the ringbuffer mode */ - if (VPSP_RB_IS_SUPPORTED(status->build)) { - if (vpsp_ring_buffer_queue_init()) { - pr_warn("vpsp_ring_buffer_queue_init fail, use default command mode\n"); - atomic_set(&vpsp_rb_check_status, RB_CHECKED); - goto end; - } - WRITE_ONCE(vpsp_rb_supported, 1); - } - - atomic_set(&vpsp_rb_check_status, RB_CHECKED); - } - -end: - rb_supported = READ_ONCE(vpsp_rb_supported); - /* parse prio by vcmd */ - if (rb_supported && vcmd->is_high_rb) - *prio = CSV_COMMAND_PRIORITY_HIGH; - else - *prio = CSV_COMMAND_PRIORITY_LOW; - /* clear rb level bit in vcmd */ - vcmd->is_high_rb = 0; - - kfree(status); - return rb_supported; -} - -/* - * Try to obtain the result again by the command index, this - * interface is used in ringbuffer mode - */ -int vpsp_try_get_result(uint32_t vid, uint8_t prio, uint32_t index, void *data, - struct vpsp_ret *psp_ret) -{ - int ret = 0; - struct csv_cmdptr_entry cmd = {0}; - int mutex_enabled = READ_ONCE(psp_mutex_enabled); - - /* Get the retult directly if the command has been executed */ - if (index >= 0 && vpsp_get_cmd_status(prio, index) != - VPSP_CMD_STATUS_RUNNING) { - psp_ret->pret = vpsp_get_cmd_status(prio, index); - psp_ret->status = VPSP_FINISH; - return 0; - } - - if (is_hygon_psp && mutex_enabled) - ret = psp_mutex_trylock(&psp_misc->data_pg_aligned->mb_mutex); - else - ret = mutex_trylock(&sev_cmd_mutex); - - if (ret) { - /* Use mailbox mode to execute a command if there is only one command */ - if (vpsp_queue_cmd_size(prio) == 1) { - /* dequeue command from queue*/ - vpsp_dequeue_cmd(prio, index, &cmd); - ret = __vpsp_do_cmd_locked(vid, cmd.cmd_id, data, - (int *)psp_ret); - psp_ret->status = VPSP_FINISH; - if (unlikely(ret)) { - if (ret == -EIO) { - ret = 0; - } else { - pr_err("[%s]: psp do cmd error, %d\n", - __func__, psp_ret->pret); - ret = -EIO; - goto end; - } - } - } else { - ret = vpsp_do_ringbuf_cmds_locked((int *)psp_ret, prio, - index); - psp_ret->status = VPSP_FINISH; - if (unlikely(ret)) { - pr_err("[%s]: vpsp_do_ringbuf_cmds_locked failed %d\n", - __func__, ret); - goto end; - } - } - } else { - /* Change the command to the running state if getting the mutex fails */ - psp_ret->index = index; - psp_ret->status = VPSP_RUNNING; - return 0; - } -end: - if (is_hygon_psp && mutex_enabled) - psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); - else - mutex_unlock(&sev_cmd_mutex); - return ret; -} -EXPORT_SYMBOL_GPL(vpsp_try_get_result); - -int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret) -{ - int rc; - int mutex_enabled = READ_ONCE(psp_mutex_enabled); - - if (is_hygon_psp && mutex_enabled) { - if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, - PSP_MUTEX_TIMEOUT) != 1) { - return -EBUSY; - } - } else { - mutex_lock(&sev_cmd_mutex); - } - - rc = __vpsp_do_cmd_locked(vid, cmd, data, psp_ret); - - if (is_hygon_psp && mutex_enabled) - psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); - else - mutex_unlock(&sev_cmd_mutex); - - return rc; -} - -/* - * Send the virtual psp command to the PSP device and try to get the - * execution result, the interface and the vpsp_try_get_result - * interface are executed asynchronously. If the execution succeeds, - * the result is returned to the VM. If the execution fails, the - * vpsp_try_get_result interface will be used to obtain the result - * later again - */ -int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret) -{ - int ret = 0; - int rb_supported; - int index = -1; - uint8_t prio = CSV_COMMAND_PRIORITY_LOW; - - /* ringbuffer mode check and parse command prio*/ - rb_supported = vpsp_rb_check_and_cmd_prio_parse(&prio, - (struct vpsp_cmd *)&cmd); - if (rb_supported) { - /* fill command in ringbuffer's queue and get index */ - index = vpsp_fill_cmd_queue(vid, prio, cmd, data, 0); - if (unlikely(index < 0)) { - /* do mailbox command if queuing failed*/ - ret = vpsp_do_cmd(vid, cmd, data, (int *)psp_ret); - if (unlikely(ret)) { - if (ret == -EIO) { - ret = 0; - } else { - pr_err("[%s]: psp do cmd error, %d\n", - __func__, psp_ret->pret); - ret = -EIO; - goto end; - } - } - psp_ret->status = VPSP_FINISH; - goto end; - } - - /* try to get result from the ringbuffer command */ - ret = vpsp_try_get_result(vid, prio, index, data, psp_ret); - if (unlikely(ret)) { - pr_err("[%s]: vpsp_try_get_result failed %d\n", __func__, ret); - goto end; - } - } else { - /* mailbox mode */ - ret = vpsp_do_cmd(vid, cmd, data, (int *)psp_ret); - if (unlikely(ret)) { - if (ret == -EIO) { - ret = 0; - } else { - pr_err("[%s]: psp do cmd error, %d\n", - __func__, psp_ret->pret); - ret = -EIO; - goto end; - } - } - psp_ret->status = VPSP_FINISH; - } - -end: - return ret; -} -EXPORT_SYMBOL_GPL(vpsp_try_do_cmd); - static void sev_exit(struct kref *ref) { misc_deregister(&misc_dev->misc); @@ -2295,7 +1341,12 @@ static int sev_misc_init(struct sev_device *sev) static void sev_dev_install_hooks(void) { hygon_psp_hooks.sev_cmd_mutex = &sev_cmd_mutex; + hygon_psp_hooks.psp_dead = &psp_dead; + hygon_psp_hooks.psp_timeout = &psp_timeout; + hygon_psp_hooks.psp_cmd_timeout = &psp_cmd_timeout; + hygon_psp_hooks.sev_cmd_buffer_len = sev_cmd_buffer_len; hygon_psp_hooks.__sev_do_cmd_locked = __sev_do_cmd_locked; + hygon_psp_hooks.sev_wait_cmd_ioc = sev_wait_cmd_ioc; hygon_psp_hooks.sev_dev_hooks_installed = true; } diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index aae8a7da1dac..7d95340c40ff 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -456,6 +456,8 @@ struct kvm_vpsp { #ifdef CONFIG_CRYPTO_DEV_SP_PSP +int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret); + int psp_do_cmd(int cmd, void *data, int *psp_ret); int csv_ring_buffer_queue_init(void); @@ -482,6 +484,8 @@ int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_ gpa_t table_gpa); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ +static inline int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret) { return -ENODEV; } + static inline int psp_do_cmd(int cmd, void *data, int *psp_ret) { return -ENODEV; } static inline int csv_ring_buffer_queue_init(void) { return -ENODEV; } -- Gitee From b59e99184b1ed6748be84d2c444e2108f861758b Mon Sep 17 00:00:00 2001 From: hanliyang Date: Thu, 17 Oct 2024 20:31:58 +0800 Subject: [PATCH 1489/2138] anolis: crypto: ccp: Use csv_ioctl to process Hygon CSV ioctl request ANBZ: #11419 Introduce csv_ioctl, when user request Hygon CSV ioctl command, the csv_ioctl will process first. The csv_ioctl interface is placed in .../hygon/csv-dev.c, this will further reduce the code intrusion in .../sev-dev.c. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4009 --- drivers/crypto/ccp/hygon/csv-dev.c | 94 +++++++++++++++++++++++++++--- drivers/crypto/ccp/hygon/csv-dev.h | 5 +- drivers/crypto/ccp/hygon/psp-dev.h | 3 + drivers/crypto/ccp/sev-dev.c | 50 ++++------------ 4 files changed, 102 insertions(+), 50 deletions(-) diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index 1847a8b9f465..e38e7b74235b 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -79,16 +79,13 @@ int csv_cmd_buffer_len(int cmd) } } -int csv_ioctl_do_hgsc_import(struct sev_issue_cmd *argp) +static int csv_ioctl_do_hgsc_import(struct sev_issue_cmd *argp) { struct csv_user_data_hgsc_cert_import input; struct csv_data_hgsc_cert_import *data; void *hgscsk_blob, *hgsc_blob; int ret; - if (!hygon_psp_hooks.sev_dev_hooks_installed) - return -ENODEV; - if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) return -EFAULT; @@ -127,7 +124,7 @@ int csv_ioctl_do_hgsc_import(struct sev_issue_cmd *argp) return ret; } -int csv_ioctl_do_download_firmware(struct sev_issue_cmd *argp) +static int csv_ioctl_do_download_firmware(struct sev_issue_cmd *argp) { struct sev_data_download_firmware *data = NULL; struct csv_user_data_download_firmware input; @@ -135,9 +132,6 @@ int csv_ioctl_do_download_firmware(struct sev_issue_cmd *argp) struct page *p; u64 data_size; - if (!hygon_psp_hooks.sev_dev_hooks_installed) - return -ENODEV; - /* Only support DOWNLOAD_FIRMWARE if build greater or equal 1667 */ if (!csv_version_greater_or_equal(1667)) { pr_err("DOWNLOAD_FIRMWARE not supported\n"); @@ -198,6 +192,79 @@ int csv_ioctl_do_download_firmware(struct sev_issue_cmd *argp) return ret; } +static long csv_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) +{ + void __user *argp = (void __user *)arg; + struct sev_issue_cmd input; + int ret = -EFAULT; + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); + + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + if (!psp_master || !psp_master->sev_data) + return -ENODEV; + + if (ioctl != SEV_ISSUE_CMD) + return -EINVAL; + + if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd))) + return -EFAULT; + + if (input.cmd > CSV_MAX) + return -EINVAL; + + if (mutex_enabled) { + if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + } + + switch (input.cmd) { + case CSV_HGSC_CERT_IMPORT: + ret = csv_ioctl_do_hgsc_import(&input); + break; + case CSV_PLATFORM_INIT: + ret = hygon_psp_hooks.__sev_platform_init_locked(&input.error); + break; + case CSV_PLATFORM_SHUTDOWN: + ret = hygon_psp_hooks.__sev_platform_shutdown_locked(&input.error); + break; + case CSV_DOWNLOAD_FIRMWARE: + ret = csv_ioctl_do_download_firmware(&input); + break; + default: + /* + * If the command is compatible between CSV and SEV, the + * native implementation of the driver is invoked. + * Release the mutex before calling the native ioctl function + * because it will acquires the mutex. + */ + if (mutex_enabled) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + return hygon_psp_hooks.sev_ioctl(file, ioctl, arg); + } + + if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd))) + ret = -EFAULT; + + if (mutex_enabled) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + + return ret; +} + +const struct file_operations csv_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = csv_ioctl, +}; + /* * __csv_ring_buffer_enter_locked issues command to switch to RING BUFFER * mode, the caller must acquire the mutex lock. @@ -339,7 +406,7 @@ static int __csv_do_ringbuf_cmds_locked(int *psp_ret) * queued in RING BUFFER queues, the user is obligate to manage RING * BUFFER queues including allocate, enqueue and free, etc. */ -int csv_do_ringbuf_cmds(int *psp_ret) +static int csv_do_ringbuf_cmds(int *psp_ret) { struct sev_user_data_status data; int rc; @@ -375,6 +442,15 @@ int csv_do_ringbuf_cmds(int *psp_ret) return rc; } +int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret) +{ + if (!filep || filep->f_op != &csv_fops) + return -EBADF; + + return csv_do_ringbuf_cmds(psp_ret); +} +EXPORT_SYMBOL_GPL(csv_issue_ringbuf_cmds_external_user); + void csv_restore_mailbox_mode_postprocess(void) { csv_comm_mode = CSV_COMM_MAILBOX_ON; diff --git a/drivers/crypto/ccp/hygon/csv-dev.h b/drivers/crypto/ccp/hygon/csv-dev.h index b2b46ae779a9..187aedef084a 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.h +++ b/drivers/crypto/ccp/hygon/csv-dev.h @@ -10,19 +10,18 @@ #ifndef __CCP_HYGON_CSV_DEV_H__ #define __CCP_HYGON_CSV_DEV_H__ +#include #include #define CSV_FW_FILE "hygon/csv.fw" extern u32 hygon_csv_build; extern int csv_comm_mode; +extern const struct file_operations csv_fops; void csv_update_api_version(struct sev_user_data_status *status); int csv_cmd_buffer_len(int cmd); void csv_restore_mailbox_mode_postprocess(void); -int csv_do_ringbuf_cmds(int *psp_ret); -int csv_ioctl_do_hgsc_import(struct sev_issue_cmd *argp); -int csv_ioctl_do_download_firmware(struct sev_issue_cmd *argp); static inline bool csv_version_greater_or_equal(u32 build) { diff --git a/drivers/crypto/ccp/hygon/psp-dev.h b/drivers/crypto/ccp/hygon/psp-dev.h index d4339c51d085..4f570b361f2c 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.h +++ b/drivers/crypto/ccp/hygon/psp-dev.h @@ -32,8 +32,11 @@ extern struct hygon_psp_hooks_table { int *psp_cmd_timeout; int (*sev_cmd_buffer_len)(int cmd); int (*__sev_do_cmd_locked)(int cmd, void *data, int *psp_ret); + int (*__sev_platform_init_locked)(int *error); + int (*__sev_platform_shutdown_locked)(int *error); int (*sev_wait_cmd_ioc)(struct sev_device *sev, unsigned int *reg, unsigned int timeout); + long (*sev_ioctl)(struct file *file, unsigned int ioctl, unsigned long arg); } hygon_psp_hooks; #define PSP_MUTEX_TIMEOUT 600000 diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index ec1f6d79bd0f..74ef73377108 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -1163,13 +1163,8 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd))) return -EFAULT; - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { - if (input.cmd > CSV_MAX) - return -EINVAL; - } else { - if (input.cmd > SEV_MAX) - return -EINVAL; - } + if (input.cmd > SEV_MAX) + return -EINVAL; if (is_vendor_hygon() && mutex_enabled) { if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, @@ -1179,25 +1174,6 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) mutex_lock(&sev_cmd_mutex); } - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { - switch (input.cmd) { - case CSV_PLATFORM_INIT: - ret = __sev_platform_init_locked(&input.error); - goto result_to_user; - case CSV_PLATFORM_SHUTDOWN: - ret = __sev_platform_shutdown_locked(&input.error); - goto result_to_user; - case CSV_DOWNLOAD_FIRMWARE: - ret = csv_ioctl_do_download_firmware(&input); - goto result_to_user; - case CSV_HGSC_CERT_IMPORT: - ret = csv_ioctl_do_hgsc_import(&input); - goto result_to_user; - default: - break; - } - } - switch (input.cmd) { case SEV_FACTORY_RESET: @@ -1233,7 +1209,6 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) goto out; } -result_to_user: if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd))) ret = -EFAULT; out: @@ -1316,7 +1291,11 @@ static int sev_misc_init(struct sev_device *sev) misc = &misc_dev->misc; misc->minor = MISC_DYNAMIC_MINOR; misc->name = DEVICE_NAME; - misc->fops = &sev_fops; + + if (is_vendor_hygon()) + misc->fops = &csv_fops; + else + misc->fops = &sev_fops; ret = misc_register(misc); if (ret) @@ -1346,7 +1325,10 @@ static void sev_dev_install_hooks(void) hygon_psp_hooks.psp_cmd_timeout = &psp_cmd_timeout; hygon_psp_hooks.sev_cmd_buffer_len = sev_cmd_buffer_len; hygon_psp_hooks.__sev_do_cmd_locked = __sev_do_cmd_locked; + hygon_psp_hooks.__sev_platform_init_locked = __sev_platform_init_locked; + hygon_psp_hooks.__sev_platform_shutdown_locked = __sev_platform_shutdown_locked; hygon_psp_hooks.sev_wait_cmd_ioc = sev_wait_cmd_ioc; + hygon_psp_hooks.sev_ioctl = sev_ioctl; hygon_psp_hooks.sev_dev_hooks_installed = true; } @@ -1454,22 +1436,14 @@ void sev_dev_destroy(struct psp_device *psp) int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd, void *data, int *error) { - if (!filep || filep->f_op != &sev_fops) + if (!filep || filep->f_op != (is_vendor_hygon() + ? &csv_fops : &sev_fops)) return -EBADF; return sev_do_cmd(cmd, data, error); } EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user); -int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret) -{ - if (!filep || filep->f_op != &sev_fops) - return -EBADF; - - return csv_do_ringbuf_cmds(psp_ret); -} -EXPORT_SYMBOL_GPL(csv_issue_ringbuf_cmds_external_user); - void sev_pci_init(void) { struct sev_device *sev = psp_master->sev_data; -- Gitee From 3eeb32d3745e8982e1dfffc7a858f0593490e2f2 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 18 Oct 2024 17:23:34 +0800 Subject: [PATCH 1490/2138] anolis: crypto: ccp: Rename psp-ringbuf.{c,h} to hygon/ring-buffer.{c,h} ANBZ: #11419 Move CSV RING BUFFER helpers from .../psp-ringbuf.{c,h} to .../hygon/ring-buffer.{c,h}. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4009 --- drivers/crypto/ccp/Makefile | 2 +- drivers/crypto/ccp/hygon/csv-dev.c | 1 + drivers/crypto/ccp/hygon/csv-dev.h | 13 ++++ drivers/crypto/ccp/hygon/psp-dev.h | 5 ++ .../{psp-ringbuf.c => hygon/ring-buffer.c} | 73 ++++++++++--------- .../{psp-ringbuf.h => hygon/ring-buffer.h} | 23 ++---- drivers/crypto/ccp/psp-dev.h | 19 ----- drivers/crypto/ccp/sev-dev.h | 3 +- 8 files changed, 66 insertions(+), 73 deletions(-) rename drivers/crypto/ccp/{psp-ringbuf.c => hygon/ring-buffer.c} (86%) rename drivers/crypto/ccp/{psp-ringbuf.h => hygon/ring-buffer.h} (59%) diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 3de149462f87..ce3e54939b89 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -17,7 +17,7 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ dbc.o \ hygon/psp-dev.o \ hygon/csv-dev.o \ - psp-ringbuf.o \ + hygon/ring-buffer.o \ csv-dev.o \ vpsp.o diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index e38e7b74235b..2657a6bd66ac 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -17,6 +17,7 @@ #include "csv-dev.h" #include "psp-dev.h" +#include "ring-buffer.h" /* * Hygon CSV build info: diff --git a/drivers/crypto/ccp/hygon/csv-dev.h b/drivers/crypto/ccp/hygon/csv-dev.h index 187aedef084a..19f7de9a338c 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.h +++ b/drivers/crypto/ccp/hygon/csv-dev.h @@ -15,6 +15,19 @@ #define CSV_FW_FILE "hygon/csv.fw" +#define PSP_RBCTL_X86_WRITES BIT(31) +#define PSP_RBCTL_RBMODE_ACT BIT(30) +#define PSP_RBCTL_CLR_INTSTAT BIT(29) +#define PSP_RBTAIL_QHI_TAIL_SHIFT 16 +#define PSP_RBTAIL_QHI_TAIL_MASK 0x7FF0000 +#define PSP_RBTAIL_QLO_TAIL_MASK 0x7FF + +#define PSP_RBHEAD_QHI_HEAD_SHIFT 16 +#define PSP_RBHEAD_QHI_HEAD_MASK 0x7FF0000 +#define PSP_RBHEAD_QLO_HEAD_MASK 0x7FF + +#define PSP_RBHEAD_QPAUSE_INT_STAT BIT(30) + extern u32 hygon_csv_build; extern int csv_comm_mode; extern const struct file_operations csv_fops; diff --git a/drivers/crypto/ccp/hygon/psp-dev.h b/drivers/crypto/ccp/hygon/psp-dev.h index 4f570b361f2c..cad42d1e9530 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.h +++ b/drivers/crypto/ccp/hygon/psp-dev.h @@ -18,6 +18,11 @@ #include "../psp-dev.h" #include "../sev-dev.h" +#ifdef CONFIG_HYGON_PSP2CPU_CMD +#define PSP_X86_CMD BIT(2) +#define P2C_NOTIFIERS_MAX 16 +#endif + /* * Hooks table: a table of function and variable pointers filled in * when psp init. diff --git a/drivers/crypto/ccp/psp-ringbuf.c b/drivers/crypto/ccp/hygon/ring-buffer.c similarity index 86% rename from drivers/crypto/ccp/psp-ringbuf.c rename to drivers/crypto/ccp/hygon/ring-buffer.c index 8a461a0be6f7..0c9ea0217b2e 100644 --- a/drivers/crypto/ccp/psp-ringbuf.c +++ b/drivers/crypto/ccp/hygon/ring-buffer.c @@ -11,8 +11,13 @@ * published by the Free Software Foundation. */ -#include -#include "psp-ringbuf.h" +#include +#include +#include + +#include + +#include "ring-buffer.h" static void enqueue_data(struct csv_queue *queue, const void *src, @@ -42,6 +47,31 @@ static void enqueue_data(struct csv_queue *queue, smp_wmb(); } +static void dequeue_data(struct csv_queue *queue, + void *dst, unsigned int len, unsigned int off) +{ + unsigned int size = queue->mask + 1; + unsigned int esize = queue->esize; + unsigned int l; + + off &= queue->mask; + if (esize != 1) { + off *= esize; + size *= esize; + len *= esize; + } + l = min(len, size - off); + + memcpy(dst, (void *)(queue->data + off), l); + memcpy((void *)((uintptr_t)dst + l), (void *)queue->data, len - l); + + /* + * Make sure that the data is copied before incrementing the + * queue->tail index counter. + */ + smp_wmb(); +} + static unsigned int queue_avail_size(struct csv_queue *queue) { /* @@ -86,31 +116,6 @@ unsigned int csv_enqueue_cmd(struct csv_queue *queue, return len; } -static void dequeue_data(struct csv_queue *queue, - void *dst, unsigned int len, unsigned int off) -{ - unsigned int size = queue->mask + 1; - unsigned int esize = queue->esize; - unsigned int l; - - off &= queue->mask; - if (esize != 1) { - off *= esize; - size *= esize; - len *= esize; - } - l = min(len, size - off); - - memcpy(dst, (void *)(queue->data + off), l); - memcpy((void *)((uintptr_t)dst + l), (void *)queue->data, len - l); - - /* - * Make sure that the data is copied before incrementing the - * queue->tail index counter. - */ - smp_wmb(); -} - unsigned int csv_dequeue_stat(struct csv_queue *queue, void *buf, unsigned int len) { @@ -125,24 +130,24 @@ unsigned int csv_dequeue_stat(struct csv_queue *queue, return len; } -unsigned int csv_dequeue_cmd(struct csv_queue *queue, +unsigned int csv_dequeue_cmd(struct csv_queue *ring_buf, void *buf, unsigned int len) { unsigned int size; - size = queue->tail - queue->head; + size = ring_buf->tail - ring_buf->head; if (len > size) len = size; - dequeue_data(queue, buf, len, queue->head); - queue->head += len; + dequeue_data(ring_buf, buf, len, ring_buf->head); + ring_buf->head += len; return len; } -unsigned int csv_cmd_queue_size(struct csv_queue *queue) +unsigned int csv_cmd_queue_size(struct csv_queue *ring_buf) { unsigned int free_size; - free_size = queue_avail_size(queue); - return queue->mask - free_size; + free_size = queue_avail_size(ring_buf); + return ring_buf->mask - free_size; } diff --git a/drivers/crypto/ccp/psp-ringbuf.h b/drivers/crypto/ccp/hygon/ring-buffer.h similarity index 59% rename from drivers/crypto/ccp/psp-ringbuf.h rename to drivers/crypto/ccp/hygon/ring-buffer.h index 2179834425b4..bf97aa6df36a 100644 --- a/drivers/crypto/ccp/psp-ringbuf.h +++ b/drivers/crypto/ccp/hygon/ring-buffer.h @@ -7,23 +7,10 @@ * Author: Baoshun Fang */ -#ifndef __PSP_RINGBUF_H__ -#define __PSP_RINGBUF_H__ +#ifndef __CCP_HYGON_RINGBUF_H__ +#define __CCP_HYGON_RINGBUF_H__ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include -#include -#include int csv_queue_init(struct csv_queue *queue, void *buffer, unsigned int size, size_t esize); @@ -32,9 +19,9 @@ unsigned int csv_enqueue_cmd(struct csv_queue *queue, const void *buf, unsigned int len); unsigned int csv_dequeue_stat(struct csv_queue *queue, void *buf, unsigned int len); - unsigned int csv_dequeue_cmd(struct csv_queue *ring_buf, - void *buf, unsigned int len); + void *buf, unsigned int len); unsigned int csv_cmd_queue_size(struct csv_queue *ring_buf); -#endif /* __PSP_RINGBUF_H__ */ + +#endif /* __CCP_HYGON_RINGBUF_H__ */ diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h index 2b06659bcf24..8a4de69399c5 100644 --- a/drivers/crypto/ccp/psp-dev.h +++ b/drivers/crypto/ccp/psp-dev.h @@ -14,30 +14,11 @@ #include #include #include -#include #include "sp-dev.h" -#define PSP_RBCTL_X86_WRITES BIT(31) -#define PSP_RBCTL_RBMODE_ACT BIT(30) -#define PSP_RBCTL_CLR_INTSTAT BIT(29) -#define PSP_RBTAIL_QHI_TAIL_SHIFT 16 -#define PSP_RBTAIL_QHI_TAIL_MASK 0x7FF0000 -#define PSP_RBTAIL_QLO_TAIL_MASK 0x7FF - -#define PSP_RBHEAD_QHI_HEAD_SHIFT 16 -#define PSP_RBHEAD_QHI_HEAD_MASK 0x7FF0000 -#define PSP_RBHEAD_QLO_HEAD_MASK 0x7FF - -#define PSP_RBHEAD_QPAUSE_INT_STAT BIT(30) - #define MAX_PSP_NAME_LEN 16 -#ifdef CONFIG_HYGON_PSP2CPU_CMD -#define PSP_X86_CMD BIT(2) -#define P2C_NOTIFIERS_MAX 16 -#endif - extern struct psp_device *psp_master; typedef void (*psp_irq_handler_t)(int, void *, unsigned int); diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h index 372183b8c58f..a137ae695973 100644 --- a/drivers/crypto/ccp/sev-dev.h +++ b/drivers/crypto/ccp/sev-dev.h @@ -25,7 +25,7 @@ #include #include -#include "psp-ringbuf.h" +#include "hygon/ring-buffer.h" #define SEV_CMDRESP_CMD GENMASK(26, 16) #define SEV_CMD_COMPLETE BIT(1) @@ -55,6 +55,7 @@ struct sev_device { void *cmd_buf; + /* Management for the Hygon RING BUFFER mode */ struct csv_ringbuffer_queue ring_buffer[CSV_COMMAND_PRIORITY_NUM]; }; -- Gitee From 10a28ccce53c3922e8c3203a342ced0c440fe969 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 18 Oct 2024 17:53:09 +0800 Subject: [PATCH 1491/2138] anolis: crypto: ccp: Merge multiple csv-dev.{c,h} ANBZ: #11419 Place all the code for Hygon CSV in file .../hygon/csv-dev.{c,h}. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4009 --- drivers/crypto/ccp/Makefile | 1 - drivers/crypto/ccp/csv-dev.c | 101 ----------------------------- drivers/crypto/ccp/csv-dev.h | 31 --------- drivers/crypto/ccp/hygon/csv-dev.c | 100 ++++++++++++++++++++++++++++ drivers/crypto/ccp/hygon/csv-dev.h | 3 + drivers/crypto/ccp/hygon/psp-dev.h | 1 + drivers/crypto/ccp/sev-dev.c | 16 +---- 7 files changed, 106 insertions(+), 147 deletions(-) delete mode 100644 drivers/crypto/ccp/csv-dev.c delete mode 100644 drivers/crypto/ccp/csv-dev.h diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index ce3e54939b89..633ac4597ea9 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -18,7 +18,6 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ hygon/psp-dev.o \ hygon/csv-dev.o \ hygon/ring-buffer.o \ - csv-dev.o \ vpsp.o ccp-$(CONFIG_TDM_DEV_HYGON) += tdm-dev.o diff --git a/drivers/crypto/ccp/csv-dev.c b/drivers/crypto/ccp/csv-dev.c deleted file mode 100644 index 78d8d5c5a089..000000000000 --- a/drivers/crypto/ccp/csv-dev.c +++ /dev/null @@ -1,101 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * HYGON Platform Security Processor (PSP) interface - * - * Copyright (C) 2024 Hygon Info Technologies Ltd. - * - * Author: Liyang Han - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include "sev-dev.h" -#include "csv-dev.h" - -/* Function pointers for hooks */ -struct csv_hooks_table csv_hooks; - -#ifdef CONFIG_HYGON_CSV - -int csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error) -{ - int ret = 0; - unsigned int i = 0; - struct csv3_data_set_smr *cmd_set_smr; - struct csv3_data_set_smcr *cmd_set_smcr; - struct csv3_data_memory_region *smr_regions; - - if (!csv_smr || !csv_smr_num) - return -EINVAL; - - cmd_set_smr = kzalloc(sizeof(*cmd_set_smr), GFP_KERNEL); - if (!cmd_set_smr) - return -ENOMEM; - - smr_regions = kcalloc(csv_smr_num, sizeof(*smr_regions), GFP_KERNEL); - if (!smr_regions) { - ret = -ENOMEM; - goto e_free_cmd_set_smr; - } - - for (i = 0; i < csv_smr_num; i++) { - smr_regions[i].base_address = csv_smr[i].start; - smr_regions[i].size = csv_smr[i].size; - } - cmd_set_smr->smr_entry_size = 1 << csv_get_smr_entry_shift(); - cmd_set_smr->regions_paddr = __psp_pa(smr_regions); - cmd_set_smr->nregions = csv_smr_num; - ret = csv_hooks.sev_do_cmd(CSV3_CMD_SET_SMR, cmd_set_smr, error); - if (ret) { - pr_err("Fail to set SMR, ret %#x, error %#x\n", ret, *error); - goto e_free_smr_area; - } - - cmd_set_smcr = kzalloc(sizeof(*cmd_set_smcr), GFP_KERNEL); - if (!cmd_set_smcr) { - ret = -ENOMEM; - goto e_free_smr_area; - } - - cmd_set_smcr->base_address = csv_alloc_from_contiguous(1UL << CSV_MR_ALIGN_BITS, - &node_online_map, - get_order(1 << CSV_MR_ALIGN_BITS)); - if (!cmd_set_smcr->base_address) { - pr_err("Fail to alloc SMCR memory\n"); - ret = -ENOMEM; - goto e_free_cmd_set_smcr; - } - - cmd_set_smcr->size = 1UL << CSV_MR_ALIGN_BITS; - ret = csv_hooks.sev_do_cmd(CSV3_CMD_SET_SMCR, cmd_set_smcr, error); - if (ret) { - if (*error == SEV_RET_INVALID_COMMAND) - ret = 0; - else - pr_err("set smcr ret %#x, error %#x\n", ret, *error); - - csv_release_to_contiguous(cmd_set_smcr->base_address, - 1UL << CSV_MR_ALIGN_BITS); - } - -e_free_cmd_set_smcr: - kfree((void *)cmd_set_smcr); -e_free_smr_area: - kfree((void *)smr_regions); -e_free_cmd_set_smr: - kfree((void *)cmd_set_smr); - - if (ret) - dev_warn(sev->dev, - "CSV3: fail to set secure memory region, CSV3 support unavailable\n"); - - return ret; -} - -#endif /* CONFIG_HYGON_CSV */ diff --git a/drivers/crypto/ccp/csv-dev.h b/drivers/crypto/ccp/csv-dev.h deleted file mode 100644 index 8865b945728b..000000000000 --- a/drivers/crypto/ccp/csv-dev.h +++ /dev/null @@ -1,31 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * HYGON Platform Security Processor (PSP) interface driver - * - * Copyright (C) 2024 Hygon Info Technologies Ltd. - * - * Author: Liyang Han - */ - -#ifndef __CSV_DEV_H__ -#define __CSV_DEV_H__ - -#include - -/* Hooks table: a table of function pointers filled in when psp init */ -extern struct csv_hooks_table { - int (*sev_do_cmd)(int cmd, void *data, int *psp_ret); -} csv_hooks; - -#ifdef CONFIG_HYGON_CSV - -int csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error); - -#else /* !CONFIG_HYGON_CSV */ - -static inline int -csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error) { return 0; } - -#endif /* CONFIG_HYGON_CSV */ - -#endif /* __CSV_DEV_H__ */ diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index 2657a6bd66ac..9006e5b1a374 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -13,8 +13,11 @@ #include #include +#include #include +#include + #include "csv-dev.h" #include "psp-dev.h" #include "ring-buffer.h" @@ -612,6 +615,103 @@ int csv_check_stat_queue_status(int *psp_ret) } EXPORT_SYMBOL_GPL(csv_check_stat_queue_status); +#ifdef CONFIG_HYGON_CSV + +int csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error) +{ + int ret = 0; + unsigned int i = 0; + struct csv3_data_set_smr *cmd_set_smr; + struct csv3_data_set_smcr *cmd_set_smcr; + struct csv3_data_memory_region *smr_regions; + + if (!hygon_psp_hooks.sev_dev_hooks_installed) { + ret = -ENODEV; + goto l_end; + } + + if (!csv_smr || !csv_smr_num) { + ret = -EINVAL; + goto l_end; + } + + cmd_set_smr = kzalloc(sizeof(*cmd_set_smr), GFP_KERNEL); + if (!cmd_set_smr) { + ret = -ENOMEM; + goto l_end; + } + + smr_regions = kcalloc(csv_smr_num, sizeof(*smr_regions), GFP_KERNEL); + if (!smr_regions) { + ret = -ENOMEM; + goto e_free_cmd_set_smr; + } + + for (i = 0; i < csv_smr_num; i++) { + smr_regions[i].base_address = csv_smr[i].start; + smr_regions[i].size = csv_smr[i].size; + } + cmd_set_smr->smr_entry_size = 1 << csv_get_smr_entry_shift(); + cmd_set_smr->regions_paddr = __psp_pa(smr_regions); + cmd_set_smr->nregions = csv_smr_num; + ret = hygon_psp_hooks.sev_do_cmd(CSV3_CMD_SET_SMR, cmd_set_smr, error); + if (ret) { + pr_err("Fail to set SMR, ret %#x, error %#x\n", ret, *error); + goto e_free_smr_area; + } + + cmd_set_smcr = kzalloc(sizeof(*cmd_set_smcr), GFP_KERNEL); + if (!cmd_set_smcr) { + ret = -ENOMEM; + goto e_free_smr_area; + } + + cmd_set_smcr->base_address = csv_alloc_from_contiguous(1UL << CSV_MR_ALIGN_BITS, + &node_online_map, + get_order(1 << CSV_MR_ALIGN_BITS)); + if (!cmd_set_smcr->base_address) { + pr_err("Fail to alloc SMCR memory\n"); + ret = -ENOMEM; + goto e_free_cmd_set_smcr; + } + + cmd_set_smcr->size = 1UL << CSV_MR_ALIGN_BITS; + ret = hygon_psp_hooks.sev_do_cmd(CSV3_CMD_SET_SMCR, cmd_set_smcr, error); + if (ret) { + if (*error == SEV_RET_INVALID_COMMAND) + ret = 0; + else + pr_err("set smcr ret %#x, error %#x\n", ret, *error); + + csv_release_to_contiguous(cmd_set_smcr->base_address, + 1UL << CSV_MR_ALIGN_BITS); + } + +e_free_cmd_set_smcr: + kfree((void *)cmd_set_smcr); +e_free_smr_area: + kfree((void *)smr_regions); +e_free_cmd_set_smr: + kfree((void *)cmd_set_smr); + +l_end: + if (ret) + dev_warn(sev->dev, + "CSV3: fail to set secure memory region, CSV3 support unavailable\n"); + return ret; +} + +#else /* !CONFIG_HYGON_CSV */ + +int csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error) +{ + dev_warn(sev->dev, + "CSV3: needs CONFIG_HYGON_CSV, CSV3 support unavailable\n"); + return -EFAULT; +} + +#endif /* CONFIG_HYGON_CSV */ + static int get_queue_tail(struct csv_ringbuffer_queue *ringbuffer) { return ringbuffer->cmd_ptr.tail & ringbuffer->cmd_ptr.mask; diff --git a/drivers/crypto/ccp/hygon/csv-dev.h b/drivers/crypto/ccp/hygon/csv-dev.h index 19f7de9a338c..664685338e2c 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.h +++ b/drivers/crypto/ccp/hygon/csv-dev.h @@ -13,6 +13,8 @@ #include #include +#include "../sev-dev.h" + #define CSV_FW_FILE "hygon/csv.fw" #define PSP_RBCTL_X86_WRITES BIT(31) @@ -35,6 +37,7 @@ extern const struct file_operations csv_fops; void csv_update_api_version(struct sev_user_data_status *status); int csv_cmd_buffer_len(int cmd); void csv_restore_mailbox_mode_postprocess(void); +int csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error); static inline bool csv_version_greater_or_equal(u32 build) { diff --git a/drivers/crypto/ccp/hygon/psp-dev.h b/drivers/crypto/ccp/hygon/psp-dev.h index cad42d1e9530..1d180e46b1a3 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.h +++ b/drivers/crypto/ccp/hygon/psp-dev.h @@ -41,6 +41,7 @@ extern struct hygon_psp_hooks_table { int (*__sev_platform_shutdown_locked)(int *error); int (*sev_wait_cmd_ioc)(struct sev_device *sev, unsigned int *reg, unsigned int timeout); + int (*sev_do_cmd)(int cmd, void *data, int *psp_ret); long (*sev_ioctl)(struct file *file, unsigned int ioctl, unsigned long arg); } hygon_psp_hooks; diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 74ef73377108..947dc26e139c 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -26,15 +26,12 @@ #include #include #include -#include -#include #include #include #include "psp-dev.h" #include "sev-dev.h" -#include "csv-dev.h" #include "hygon/psp-dev.h" #include "hygon/csv-dev.h" @@ -1262,13 +1259,6 @@ static void sev_exit(struct kref *ref) misc_dev = NULL; } -/* Code to set all of the function pointers for CSV. */ -static inline void csv_install_hooks(void) -{ - /* Install the hook functions for CSV. */ - csv_hooks.sev_do_cmd = sev_do_cmd; -} - static int sev_misc_init(struct sev_device *sev) { struct device *dev = sev->dev; @@ -1302,9 +1292,6 @@ static int sev_misc_init(struct sev_device *sev) return ret; kref_init(&misc_dev->refcount); - - /* Install the hook functions for CSV */ - csv_install_hooks(); } else { kref_get(&misc_dev->refcount); } @@ -1327,6 +1314,7 @@ static void sev_dev_install_hooks(void) hygon_psp_hooks.__sev_do_cmd_locked = __sev_do_cmd_locked; hygon_psp_hooks.__sev_platform_init_locked = __sev_platform_init_locked; hygon_psp_hooks.__sev_platform_shutdown_locked = __sev_platform_shutdown_locked; + hygon_psp_hooks.sev_do_cmd = sev_do_cmd; hygon_psp_hooks.sev_wait_cmd_ioc = sev_wait_cmd_ioc; hygon_psp_hooks.sev_ioctl = sev_ioctl; @@ -1485,7 +1473,7 @@ void sev_pci_init(void) return; /* Set SMR for HYGON CSV3 */ - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + if (is_vendor_hygon() && boot_cpu_has(X86_FEATURE_CSV3)) csv_platform_cmd_set_secure_memory_region(sev, &error); /* Initialize the platform */ -- Gitee From 35d21326b552238c5d47adbcbbe0187f23c4a7c6 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 18 Oct 2024 20:43:31 +0800 Subject: [PATCH 1492/2138] anolis: crypto: ccp: Move Hygon tdm and vpsp code to drivers/crypto/ccp/hygon/ ANBZ: #11419 Just to keep the directoy cleaner. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4009 --- drivers/crypto/ccp/Makefile | 6 +++--- drivers/crypto/ccp/{ => hygon}/tdm-dev.c | 0 drivers/crypto/ccp/{ => hygon}/tdm-dev.h | 0 drivers/crypto/ccp/{ => hygon}/tdm-kernel-guard.c | 0 drivers/crypto/ccp/{ => hygon}/vpsp.c | 0 drivers/crypto/ccp/psp-dev.c | 2 +- 6 files changed, 4 insertions(+), 4 deletions(-) rename drivers/crypto/ccp/{ => hygon}/tdm-dev.c (100%) rename drivers/crypto/ccp/{ => hygon}/tdm-dev.h (100%) rename drivers/crypto/ccp/{ => hygon}/tdm-kernel-guard.c (100%) rename drivers/crypto/ccp/{ => hygon}/vpsp.c (100%) diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 633ac4597ea9..74fe6611bac4 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -18,9 +18,9 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ hygon/psp-dev.o \ hygon/csv-dev.o \ hygon/ring-buffer.o \ - vpsp.o + hygon/vpsp.o -ccp-$(CONFIG_TDM_DEV_HYGON) += tdm-dev.o +ccp-$(CONFIG_TDM_DEV_HYGON) += hygon/tdm-dev.o obj-$(CONFIG_CRYPTO_DEV_HCT) += hygon/hct.o obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o @@ -32,7 +32,7 @@ ccp-crypto-objs := ccp-crypto-main.o \ ccp-crypto-des3.o \ ccp-crypto-rsa.o \ ccp-crypto-sha.o -obj-$(CONFIG_TDM_KERNEL_GUARD) += tdm-kernel-guard.o +obj-$(CONFIG_TDM_KERNEL_GUARD) += hygon/tdm-kernel-guard.o $(obj)/ccp_sm2_sign.asn1.o: $(obj)/ccp_sm2_sign.asn1.c $(obj)/ccp_sm2_sign.asn1.h $(obj)/ccp-crypto-sm2-hygon.o: $(obj)/ccp_sm2_sign.asn1.h diff --git a/drivers/crypto/ccp/tdm-dev.c b/drivers/crypto/ccp/hygon/tdm-dev.c similarity index 100% rename from drivers/crypto/ccp/tdm-dev.c rename to drivers/crypto/ccp/hygon/tdm-dev.c diff --git a/drivers/crypto/ccp/tdm-dev.h b/drivers/crypto/ccp/hygon/tdm-dev.h similarity index 100% rename from drivers/crypto/ccp/tdm-dev.h rename to drivers/crypto/ccp/hygon/tdm-dev.h diff --git a/drivers/crypto/ccp/tdm-kernel-guard.c b/drivers/crypto/ccp/hygon/tdm-kernel-guard.c similarity index 100% rename from drivers/crypto/ccp/tdm-kernel-guard.c rename to drivers/crypto/ccp/hygon/tdm-kernel-guard.c diff --git a/drivers/crypto/ccp/vpsp.c b/drivers/crypto/ccp/hygon/vpsp.c similarity index 100% rename from drivers/crypto/ccp/vpsp.c rename to drivers/crypto/ccp/hygon/vpsp.c diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index d24ab07d90e5..49dde0baae41 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -19,7 +19,7 @@ #include "hygon/psp-dev.h" #ifdef CONFIG_TDM_DEV_HYGON -#include "tdm-dev.h" +#include "hygon/tdm-dev.h" #endif struct psp_device *psp_master; -- Gitee From 878e0746639ab40b74ce5b266180775eef441c6b Mon Sep 17 00:00:00 2001 From: xiongmengbiao Date: Sat, 14 Sep 2024 14:08:34 +0800 Subject: [PATCH 1493/2138] anolis: crypto: ccp: add more checks for sev_dev_hooks_installed ANBZ: #11419 In some Hygon platform-related functions, `hygon_psp_hooks` must be initialized. Therefore, additional checks are needed to ensure that `sev_dev_hooks_installed` value is true before continuing execution. Signed-off-by: xiongmengbiao Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4009 --- drivers/crypto/ccp/hygon/csv-dev.c | 20 +++++++++++++++++++- drivers/crypto/ccp/hygon/psp-dev.c | 14 +++++++++++++- drivers/crypto/ccp/psp-dev.c | 17 ++++++++++++----- 3 files changed, 44 insertions(+), 7 deletions(-) diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index 9006e5b1a374..f49a0ecebb3c 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -825,6 +825,9 @@ static int vpsp_psp_mutex_trylock(void) { int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + if (mutex_enabled) return psp_mutex_trylock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); else @@ -835,6 +838,9 @@ static int vpsp_psp_mutex_unlock(void) { int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + if (mutex_enabled) psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); else @@ -851,6 +857,9 @@ static int __vpsp_ring_buffer_enter_locked(int *error) struct csv_ringbuffer_queue *hi_queue; struct sev_device *sev = psp_master->sev_data; + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + if (csv_comm_mode == CSV_COMM_RINGBUFFER_ON) return -EEXIST; @@ -887,7 +896,7 @@ static int __vpsp_do_ringbuf_cmds_locked(int *psp_ret, uint8_t prio, int index) unsigned int rb_ctl; struct sev_device *sev; - if (!psp) + if (!psp || !hygon_psp_hooks.sev_dev_hooks_installed) return -ENODEV; if (*hygon_psp_hooks.psp_dead) @@ -950,6 +959,9 @@ static int vpsp_do_ringbuf_cmds_locked(int *psp_ret, uint8_t prio, int index) struct sev_user_data_status data; int rc; + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + rc = __vpsp_ring_buffer_enter_locked(psp_ret); if (rc) goto end; @@ -1063,6 +1075,9 @@ int vpsp_try_get_result(uint32_t vid, uint8_t prio, uint32_t index, void *data, int ret = 0; struct csv_cmdptr_entry cmd = {0}; + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + /* Get the retult directly if the command has been executed */ if (index >= 0 && vpsp_get_cmd_status(prio, index) != VPSP_CMD_STATUS_RUNNING) { @@ -1128,6 +1143,9 @@ int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret) int index = -1; uint8_t prio = CSV_COMMAND_PRIORITY_LOW; + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + /* ringbuffer mode check and parse command prio*/ rb_supported = vpsp_rb_check_and_cmd_prio_parse(&prio, (struct vpsp_cmd *)&cmd); diff --git a/drivers/crypto/ccp/hygon/psp-dev.c b/drivers/crypto/ccp/hygon/psp-dev.c index d60009a5e0d7..124d305c956b 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.c +++ b/drivers/crypto/ccp/hygon/psp-dev.c @@ -312,6 +312,9 @@ static long ioctl_psp(struct file *file, unsigned int ioctl, unsigned long arg) struct vpsp_dev_ctrl vpsp_ctrl_op; int ret = -EFAULT; + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + if (_IOC_TYPE(ioctl) != HYGON_PSP_IOC_TYPE) { printk(KERN_INFO "%s: invalid ioctl type: 0x%x\n", __func__, _IOC_TYPE(ioctl)); return -EINVAL; @@ -372,6 +375,9 @@ int hygon_psp_additional_setup(struct sp_device *sp) struct device *dev = sp->dev; int ret = 0; + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + if (!psp_misc) { struct miscdevice *misc; @@ -498,7 +504,7 @@ int __vpsp_do_cmd_locked(uint32_t vid, int cmd, void *data, int *psp_ret) unsigned int phys_lsb, phys_msb; unsigned int reg, ret = 0; - if (!psp || !psp->sev_data) + if (!psp || !psp->sev_data || !hygon_psp_hooks.sev_dev_hooks_installed) return -ENODEV; if (*hygon_psp_hooks.psp_dead) @@ -562,6 +568,9 @@ int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret) int rc; int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + if (mutex_enabled) { if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, PSP_MUTEX_TIMEOUT) != 1) { @@ -586,6 +595,9 @@ int psp_do_cmd(int cmd, void *data, int *psp_ret) int rc; int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + if (mutex_enabled) { if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, PSP_MUTEX_TIMEOUT) != 1) diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index 49dde0baae41..ccb605bf8618 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -199,11 +199,6 @@ int psp_dev_init(struct sp_device *sp) /* Request an irq */ if (is_vendor_hygon()) { - ret = hygon_psp_additional_setup(sp); - if (ret) { - dev_err(dev, "psp: unable to do additional setup\n"); - goto e_err; - } ret = sp_request_hygon_psp_irq(psp->sp, psp_irq_handler, psp->name, psp); } else { ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp); @@ -221,6 +216,18 @@ int psp_dev_init(struct sp_device *sp) if (ret) goto e_irq; + /** + * hygon_psp_additional_setup() needs to wait for + * sev_dev_install_hooks() to complete before it can be called. + */ + if (is_vendor_hygon()) { + ret = hygon_psp_additional_setup(sp); + if (ret) { + dev_err(dev, "psp: unable to do additional setup\n"); + goto e_irq; + } + } + /* Enable interrupt */ iowrite32(-1, psp->io_regs + psp->vdata->inten_reg); -- Gitee From b8e3c9edacc7c15951c5aafc7b6a0edc642811c2 Mon Sep 17 00:00:00 2001 From: Jia He Date: Fri, 13 Sep 2024 11:48:41 +0000 Subject: [PATCH 1494/2138] anolis: configs: Fix bug in if statement of generate_configs.sh ANBZ: #9262 cherry-picked from https://lore.kernel.org/linux-arm-kernel/20240806055444.528932-1-justin.he@arm.com/T/ Otherwise, the command "make anolis_defconfig" will report the error: scripts/generate_configs.sh: 35: [: Y: unexpected operator Signed-off-by: Jia He Signed-off-by: Wei Chen Acked-by: ydzhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3299 --- anolis/configs/scripts/generate_configs.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/anolis/configs/scripts/generate_configs.sh b/anolis/configs/scripts/generate_configs.sh index 95f2190189d3..a101b469c511 100644 --- a/anolis/configs/scripts/generate_configs.sh +++ b/anolis/configs/scripts/generate_configs.sh @@ -32,7 +32,7 @@ fi sh ${DIST_OUTPUT}/generate.sh | tee ${FILE_LIST} -if [ "${DIST_DO_GENERATE_DOT_CONFIG}" == "Y" ]; then +if [ "x${DIST_DO_GENERATE_DOT_CONFIG}" = "xY" ]; then file=$(cat ${FILE_LIST} | grep "generated" | awk '{print $4}' | head -1) cp -f ${file} ${DIST_SRCROOT}.config fi -- Gitee From c4a3d8ca3608981465c2842afea3d627d974d40b Mon Sep 17 00:00:00 2001 From: "Madhavan T. Venkataraman" Date: Wed, 14 Dec 2022 10:10:36 -0600 Subject: [PATCH 1495/2138] objtool: Reorganize CFI code ANBZ: #9262 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ check.c implements static stack validation. But the CFI code that it contains can be shared with other types of validation. E.g., dynamic FP validation. Move the CFI code to its own files - cfi.h and cfi.c. Signed-off-by: Madhavan T. Venkataraman Signed-off-by: Wei Chen Acked-by: ydzhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3299 --- tools/objtool/Build | 1 + tools/objtool/cfi.c | 111 ++++++++++++++++++++++++++++ tools/objtool/check.c | 98 ------------------------ tools/objtool/include/objtool/cfi.h | 13 ++++ 4 files changed, 125 insertions(+), 98 deletions(-) create mode 100644 tools/objtool/cfi.c diff --git a/tools/objtool/Build b/tools/objtool/Build index a3cdf8af6635..9f23d1f4c716 100644 --- a/tools/objtool/Build +++ b/tools/objtool/Build @@ -5,6 +5,7 @@ objtool-y += weak.o objtool-y += check.o objtool-y += special.o objtool-y += builtin-check.o +objtool-y += cfi.o objtool-y += elf.o objtool-y += objtool.o diff --git a/tools/objtool/cfi.c b/tools/objtool/cfi.c new file mode 100644 index 000000000000..bc3e216f1a94 --- /dev/null +++ b/tools/objtool/cfi.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2015-2017 Josh Poimboeuf + */ + +#include +#include + +#include +#include +#include +#include + +unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache; + +struct cfi_init_state initial_func_cfi; +struct cfi_state init_cfi; +struct cfi_state func_cfi; +struct cfi_state force_undefined_cfi; + +void init_cfi_state(struct cfi_state *cfi) +{ + int i; + + for (i = 0; i < CFI_NUM_REGS; i++) { + cfi->regs[i].base = CFI_UNDEFINED; + cfi->vals[i].base = CFI_UNDEFINED; + } + cfi->cfa.base = CFI_UNDEFINED; + cfi->drap_reg = CFI_UNDEFINED; + cfi->drap_offset = -1; +} + +static struct cfi_state *cfi_alloc(void) +{ + struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1); + + if (!cfi) { + WARN("calloc failed"); + exit(1); + } + nr_cfi++; + return cfi; +} + +static int cfi_bits; +static struct hlist_head *cfi_hash; + +inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2) +{ + return memcmp((void *)cfi1 + sizeof(cfi1->hash), + (void *)cfi2 + sizeof(cfi2->hash), + sizeof(struct cfi_state) - sizeof(struct hlist_node)); +} + +static inline u32 cfi_key(struct cfi_state *cfi) +{ + return jhash((void *)cfi + sizeof(cfi->hash), + sizeof(*cfi) - sizeof(cfi->hash), 0); +} + +struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi) +{ + struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)]; + struct cfi_state *obj; + + hlist_for_each_entry(obj, head, hash) { + if (!cficmp(cfi, obj)) { + nr_cfi_cache++; + return obj; + } + } + + obj = cfi_alloc(); + *obj = *cfi; + hlist_add_head(&obj->hash, head); + + return obj; +} + +void cfi_hash_add(struct cfi_state *cfi) +{ + struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)]; + + hlist_add_head(&cfi->hash, head); +} + +void *cfi_hash_alloc(unsigned long size) +{ + cfi_bits = max(10, ilog2(size)); + cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits, + PROT_READ|PROT_WRITE, + MAP_PRIVATE|MAP_ANON, -1, 0); + if (cfi_hash == (void *)-1L) { + WARN("mmap fail cfi_hash"); + cfi_hash = NULL; + } else if (opts.stats) { + printf("cfi_bits: %d\n", cfi_bits); + } + + return cfi_hash; +} + +void set_func_state(struct cfi_state *state) +{ + state->cfa = initial_func_cfi.cfa; + memcpy(&state->regs, &initial_func_cfi.regs, + CFI_NUM_REGS * sizeof(struct cfi_reg)); + state->stack_size = initial_func_cfi.cfa.offset; + state->type = UNWIND_HINT_TYPE_CALL; +} diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 1b242c3c2d45..77099ea9e2aa 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -27,13 +27,6 @@ struct alternative { bool skip_orig; }; -static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache; - -static struct cfi_init_state initial_func_cfi; -static struct cfi_state init_cfi; -static struct cfi_state func_cfi; -static struct cfi_state force_undefined_cfi; - struct instruction *find_insn(struct objtool_file *file, struct section *sec, unsigned long offset) { @@ -261,19 +254,6 @@ static bool dead_end_function(struct objtool_file *file, struct symbol *func) return __dead_end_function(file, func, 0); } -static void init_cfi_state(struct cfi_state *cfi) -{ - int i; - - for (i = 0; i < CFI_NUM_REGS; i++) { - cfi->regs[i].base = CFI_UNDEFINED; - cfi->vals[i].base = CFI_UNDEFINED; - } - cfi->cfa.base = CFI_UNDEFINED; - cfi->drap_reg = CFI_UNDEFINED; - cfi->drap_offset = -1; -} - static void init_insn_state(struct objtool_file *file, struct insn_state *state, struct section *sec) { @@ -289,75 +269,6 @@ static void init_insn_state(struct objtool_file *file, struct insn_state *state, state->noinstr = sec->noinstr; } -static struct cfi_state *cfi_alloc(void) -{ - struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1); - if (!cfi) { - WARN("calloc failed"); - exit(1); - } - nr_cfi++; - return cfi; -} - -static int cfi_bits; -static struct hlist_head *cfi_hash; - -static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2) -{ - return memcmp((void *)cfi1 + sizeof(cfi1->hash), - (void *)cfi2 + sizeof(cfi2->hash), - sizeof(struct cfi_state) - sizeof(struct hlist_node)); -} - -static inline u32 cfi_key(struct cfi_state *cfi) -{ - return jhash((void *)cfi + sizeof(cfi->hash), - sizeof(*cfi) - sizeof(cfi->hash), 0); -} - -static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi) -{ - struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)]; - struct cfi_state *obj; - - hlist_for_each_entry(obj, head, hash) { - if (!cficmp(cfi, obj)) { - nr_cfi_cache++; - return obj; - } - } - - obj = cfi_alloc(); - *obj = *cfi; - hlist_add_head(&obj->hash, head); - - return obj; -} - -static void cfi_hash_add(struct cfi_state *cfi) -{ - struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)]; - - hlist_add_head(&cfi->hash, head); -} - -static void *cfi_hash_alloc(unsigned long size) -{ - cfi_bits = max(10, ilog2(size)); - cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits, - PROT_READ|PROT_WRITE, - MAP_PRIVATE|MAP_ANON, -1, 0); - if (cfi_hash == (void *)-1L) { - WARN("mmap fail cfi_hash"); - cfi_hash = NULL; - } else if (opts.stats) { - printf("cfi_bits: %d\n", cfi_bits); - } - - return cfi_hash; -} - static unsigned long nr_insns; static unsigned long nr_insns_visited; @@ -2192,15 +2103,6 @@ static int add_jump_table_alts(struct objtool_file *file) return 0; } -static void set_func_state(struct cfi_state *state) -{ - state->cfa = initial_func_cfi.cfa; - memcpy(&state->regs, &initial_func_cfi.regs, - CFI_NUM_REGS * sizeof(struct cfi_reg)); - state->stack_size = initial_func_cfi.cfa.offset; - state->type = UNWIND_HINT_TYPE_CALL; -} - static int read_unwind_hints(struct objtool_file *file) { struct cfi_state cfi = init_cfi; diff --git a/tools/objtool/include/objtool/cfi.h b/tools/objtool/include/objtool/cfi.h index c8a6bec4f6b9..557366799315 100644 --- a/tools/objtool/include/objtool/cfi.h +++ b/tools/objtool/include/objtool/cfi.h @@ -39,4 +39,17 @@ struct cfi_state { bool force_undefined; }; +void init_cfi_state(struct cfi_state *cfi); +bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2); +struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi); +void cfi_hash_add(struct cfi_state *cfi); +void *cfi_hash_alloc(unsigned long size); +void set_func_state(struct cfi_state *state); + +extern unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache; +extern struct cfi_init_state initial_func_cfi; +extern struct cfi_state init_cfi; +extern struct cfi_state func_cfi; +extern struct cfi_state force_undefined_cfi; + #endif /* _OBJTOOL_CFI_H */ -- Gitee From 57c9caf825dbd298ac0076778000bd085b08f772 Mon Sep 17 00:00:00 2001 From: "Madhavan T. Venkataraman" Date: Wed, 14 Dec 2022 10:18:57 -0600 Subject: [PATCH 1496/2138] objtool: Reorganize instruction-related code ANBZ: #9262 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ check.c implements static stack validation. But the instruction-related code that it contains can be shared with other types of validation. E.g., dynamic FP validation. Move the instruction-related code to its own files - insn.h and insn.c. Signed-off-by: Madhavan T. Venkataraman Signed-off-by: Wei Chen Acked-by: ydzhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3299 --- tools/objtool/Build | 1 + tools/objtool/check.c | 210 -------------------------- tools/objtool/include/objtool/check.h | 92 +---------- tools/objtool/include/objtool/insn.h | 144 ++++++++++++++++++ tools/objtool/insn.c | 192 +++++++++++++++++++++++ 5 files changed, 338 insertions(+), 301 deletions(-) create mode 100644 tools/objtool/include/objtool/insn.h create mode 100644 tools/objtool/insn.c diff --git a/tools/objtool/Build b/tools/objtool/Build index 9f23d1f4c716..c04e36267379 100644 --- a/tools/objtool/Build +++ b/tools/objtool/Build @@ -6,6 +6,7 @@ objtool-y += check.o objtool-y += special.o objtool-y += builtin-check.o objtool-y += cfi.o +objtool-y += insn.o objtool-y += elf.o objtool-y += objtool.o diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 77099ea9e2aa..70276f3354b7 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -27,103 +27,6 @@ struct alternative { bool skip_orig; }; -struct instruction *find_insn(struct objtool_file *file, - struct section *sec, unsigned long offset) -{ - struct instruction *insn; - - hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) { - if (insn->sec == sec && insn->offset == offset) - return insn; - } - - return NULL; -} - -struct instruction *next_insn_same_sec(struct objtool_file *file, - struct instruction *insn) -{ - if (insn->idx == INSN_CHUNK_MAX) - return find_insn(file, insn->sec, insn->offset + insn->len); - - insn++; - if (!insn->len) - return NULL; - - return insn; -} - -static struct instruction *next_insn_same_func(struct objtool_file *file, - struct instruction *insn) -{ - struct instruction *next = next_insn_same_sec(file, insn); - struct symbol *func = insn_func(insn); - - if (!func) - return NULL; - - if (next && insn_func(next) == func) - return next; - - /* Check if we're already in the subfunction: */ - if (func == func->cfunc) - return NULL; - - /* Move to the subfunction: */ - return find_insn(file, func->cfunc->sec, func->cfunc->offset); -} - -static struct instruction *prev_insn_same_sec(struct objtool_file *file, - struct instruction *insn) -{ - if (insn->idx == 0) { - if (insn->prev_len) - return find_insn(file, insn->sec, insn->offset - insn->prev_len); - return NULL; - } - - return insn - 1; -} - -static struct instruction *prev_insn_same_sym(struct objtool_file *file, - struct instruction *insn) -{ - struct instruction *prev = prev_insn_same_sec(file, insn); - - if (prev && insn_func(prev) == insn_func(insn)) - return prev; - - return NULL; -} - -#define for_each_insn(file, insn) \ - for (struct section *__sec, *__fake = (struct section *)1; \ - __fake; __fake = NULL) \ - for_each_sec(file, __sec) \ - sec_for_each_insn(file, __sec, insn) - -#define func_for_each_insn(file, func, insn) \ - for (insn = find_insn(file, func->sec, func->offset); \ - insn; \ - insn = next_insn_same_func(file, insn)) - -#define sym_for_each_insn(file, sym, insn) \ - for (insn = find_insn(file, sym->sec, sym->offset); \ - insn && insn->offset < sym->offset + sym->len; \ - insn = next_insn_same_sec(file, insn)) - -#define sym_for_each_insn_continue_reverse(file, sym, insn) \ - for (insn = prev_insn_same_sec(file, insn); \ - insn && insn->offset >= sym->offset; \ - insn = prev_insn_same_sec(file, insn)) - -#define sec_for_each_insn_from(file, insn) \ - for (; insn; insn = next_insn_same_sec(file, insn)) - -#define sec_for_each_insn_continue(file, insn) \ - for (insn = next_insn_same_sec(file, insn); insn; \ - insn = next_insn_same_sec(file, insn)) - static inline struct symbol *insn_call_dest(struct instruction *insn) { if (insn->type == INSN_JUMP_DYNAMIC || @@ -254,21 +157,6 @@ static bool dead_end_function(struct objtool_file *file, struct symbol *func) return __dead_end_function(file, func, 0); } -static void init_insn_state(struct objtool_file *file, struct insn_state *state, - struct section *sec) -{ - memset(state, 0, sizeof(*state)); - init_cfi_state(&state->cfi); - - /* - * We need the full vmlinux for noinstr validation, otherwise we can - * not correctly determine insn_call_dest(insn)->sec (external symbols - * do not have a section). - */ - if (opts.link && opts.noinstr && sec) - state->noinstr = sec->noinstr; -} - static unsigned long nr_insns; static unsigned long nr_insns_visited; @@ -474,19 +362,6 @@ static int init_pv_ops(struct objtool_file *file) return 0; } -static struct instruction *find_last_insn(struct objtool_file *file, - struct section *sec) -{ - struct instruction *insn = NULL; - unsigned int offset; - unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0; - - for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--) - insn = find_insn(file, sec, offset); - - return insn; -} - /* * Mark "ud2" instructions and manually annotated dead ends. */ @@ -1226,26 +1101,6 @@ __weak bool arch_is_embedded_insn(struct symbol *sym) return false; } -static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn) -{ - struct reloc *reloc; - - if (insn->no_reloc) - return NULL; - - if (!file) - return NULL; - - reloc = find_reloc_by_dest_range(file->elf, insn->sec, - insn->offset, insn->len); - if (!reloc) { - insn->no_reloc = 1; - return NULL; - } - - return reloc; -} - static void remove_insn_ops(struct instruction *insn) { struct stack_op *op, *next; @@ -1405,24 +1260,6 @@ static void add_return_call(struct objtool_file *file, struct instruction *insn, list_add_tail(&insn->call_node, &file->return_thunk_list); } -static bool is_first_func_insn(struct objtool_file *file, - struct instruction *insn, struct symbol *sym) -{ - if (insn->offset == sym->offset) - return true; - - /* Allow direct CALL/JMP past ENDBR */ - if (opts.ibt) { - struct instruction *prev = prev_insn_same_sym(file, insn); - - if (prev && prev->type == INSN_ENDBR && - insn->offset == sym->offset + prev->len) - return true; - } - - return false; -} - /* * A sibling call is a tail-call to another symbol -- to differentiate from a * recursive tail-call which is to the same symbol. @@ -3194,53 +3031,6 @@ static int handle_insn_ops(struct instruction *insn, return 0; } -static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2) -{ - struct cfi_state *cfi1 = insn->cfi; - int i; - - if (!cfi1) { - WARN("CFI missing"); - return false; - } - - if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) { - - WARN_INSN(insn, "stack state mismatch: cfa1=%d%+d cfa2=%d%+d", - cfi1->cfa.base, cfi1->cfa.offset, - cfi2->cfa.base, cfi2->cfa.offset); - - } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) { - for (i = 0; i < CFI_NUM_REGS; i++) { - if (!memcmp(&cfi1->regs[i], &cfi2->regs[i], - sizeof(struct cfi_reg))) - continue; - - WARN_INSN(insn, "stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d", - i, cfi1->regs[i].base, cfi1->regs[i].offset, - i, cfi2->regs[i].base, cfi2->regs[i].offset); - break; - } - - } else if (cfi1->type != cfi2->type) { - - WARN_INSN(insn, "stack state mismatch: type1=%d type2=%d", - cfi1->type, cfi2->type); - - } else if (cfi1->drap != cfi2->drap || - (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) || - (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) { - - WARN_INSN(insn, "stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)", - cfi1->drap, cfi1->drap_reg, cfi1->drap_offset, - cfi2->drap, cfi2->drap_reg, cfi2->drap_offset); - - } else - return true; - - return false; -} - static inline bool func_uaccess_safe(struct symbol *func) { if (func) diff --git a/tools/objtool/include/objtool/check.h b/tools/objtool/include/objtool/check.h index daa46f1f0965..1f63eca11ddd 100644 --- a/tools/objtool/include/objtool/check.h +++ b/tools/objtool/include/objtool/check.h @@ -7,17 +7,7 @@ #define _CHECK_H #include -#include -#include - -struct insn_state { - struct cfi_state cfi; - unsigned int uaccess_stack; - bool uaccess; - bool df; - bool noinstr; - s8 instr; -}; +#include struct alt_group { /* @@ -36,89 +26,9 @@ struct alt_group { struct cfi_state **cfi; }; -#define INSN_CHUNK_BITS 8 -#define INSN_CHUNK_SIZE (1 << INSN_CHUNK_BITS) -#define INSN_CHUNK_MAX (INSN_CHUNK_SIZE - 1) - -struct instruction { - struct hlist_node hash; - struct list_head call_node; - struct section *sec; - unsigned long offset; - unsigned long immediate; - - u8 len; - u8 prev_len; - u8 type; - s8 instr; - - u32 idx : INSN_CHUNK_BITS, - dead_end : 1, - ignore : 1, - ignore_alts : 1, - hint : 1, - save : 1, - restore : 1, - retpoline_safe : 1, - noendbr : 1, - unret : 1, - visited : 4, - no_reloc : 1; - /* 10 bit hole */ - - struct alt_group *alt_group; - struct instruction *jump_dest; - struct instruction *first_jump_src; - union { - struct symbol *_call_dest; - struct reloc *_jump_table; - }; - struct alternative *alts; - struct symbol *sym; - struct stack_op *stack_ops; - struct cfi_state *cfi; -}; - -static inline struct symbol *insn_func(struct instruction *insn) -{ - struct symbol *sym = insn->sym; - - if (sym && sym->type != STT_FUNC) - sym = NULL; - - return sym; -} - #define VISITED_BRANCH 0x01 #define VISITED_BRANCH_UACCESS 0x02 #define VISITED_BRANCH_MASK 0x03 #define VISITED_UNRET 0x04 -static inline bool is_static_jump(struct instruction *insn) -{ - return insn->type == INSN_JUMP_CONDITIONAL || - insn->type == INSN_JUMP_UNCONDITIONAL; -} - -static inline bool is_dynamic_jump(struct instruction *insn) -{ - return insn->type == INSN_JUMP_DYNAMIC || - insn->type == INSN_JUMP_DYNAMIC_CONDITIONAL; -} - -static inline bool is_jump(struct instruction *insn) -{ - return is_static_jump(insn) || is_dynamic_jump(insn); -} - -struct instruction *find_insn(struct objtool_file *file, - struct section *sec, unsigned long offset); - -struct instruction *next_insn_same_sec(struct objtool_file *file, struct instruction *insn); - -#define sec_for_each_insn(file, _sec, insn) \ - for (insn = find_insn(file, _sec, 0); \ - insn && insn->sec == _sec; \ - insn = next_insn_same_sec(file, insn)) - #endif /* _CHECK_H */ diff --git a/tools/objtool/include/objtool/insn.h b/tools/objtool/include/objtool/insn.h new file mode 100644 index 000000000000..0c4cff3a2bf4 --- /dev/null +++ b/tools/objtool/include/objtool/insn.h @@ -0,0 +1,144 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2017 Josh Poimboeuf + */ + +#ifndef _INSN_H +#define _INSN_H + +#include +#include + +struct insn_state { + struct cfi_state cfi; + unsigned int uaccess_stack; + bool uaccess; + bool df; + bool noinstr; + s8 instr; +}; + +#define INSN_CHUNK_BITS 8 +#define INSN_CHUNK_SIZE (1 << INSN_CHUNK_BITS) +#define INSN_CHUNK_MAX (INSN_CHUNK_SIZE - 1) + +struct instruction { + struct hlist_node hash; + struct list_head call_node; + struct section *sec; + unsigned long offset; + unsigned long immediate; + + u8 len; + u8 prev_len; + u8 type; + s8 instr; + + u32 idx : INSN_CHUNK_BITS, + dead_end : 1, + ignore : 1, + ignore_alts : 1, + hint : 1, + save : 1, + restore : 1, + retpoline_safe : 1, + noendbr : 1, + unret : 1, + visited : 4, + no_reloc : 1; + /* 10 bit hole */ + + struct alt_group *alt_group; + struct instruction *jump_dest; + struct instruction *first_jump_src; + union { + struct symbol *_call_dest; + struct reloc *_jump_table; + }; + struct alternative *alts; + struct symbol *sym; + struct stack_op *stack_ops; + struct cfi_state *cfi; +}; + +static inline struct symbol *insn_func(struct instruction *insn) +{ + struct symbol *sym = insn->sym; + + if (sym && sym->type != STT_FUNC) + sym = NULL; + + return sym; +} + +static inline bool is_static_jump(struct instruction *insn) +{ + return insn->type == INSN_JUMP_CONDITIONAL || + insn->type == INSN_JUMP_UNCONDITIONAL; +} + +static inline bool is_dynamic_jump(struct instruction *insn) +{ + return insn->type == INSN_JUMP_DYNAMIC || + insn->type == INSN_JUMP_DYNAMIC_CONDITIONAL; +} + +static inline bool is_jump(struct instruction *insn) +{ + return is_static_jump(insn) || is_dynamic_jump(insn); +} + +void init_insn_state(struct objtool_file *file, struct insn_state *state, + struct section *sec); +struct instruction *find_insn(struct objtool_file *file, + struct section *sec, unsigned long offset); +struct instruction *find_last_insn(struct objtool_file *file, + struct section *sec); +struct instruction *next_insn_same_sec(struct objtool_file *file, + struct instruction *insn); +struct instruction *next_insn_same_func(struct objtool_file *file, + struct instruction *insn); +struct instruction *prev_insn_same_sec(struct objtool_file *file, + struct instruction *insn); +struct instruction *prev_insn_same_sym(struct objtool_file *file, + struct instruction *insn); + +struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn); +bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2); +bool is_first_func_insn(struct objtool_file *file, + struct instruction *insn, struct symbol *sym); + +#define sec_for_each_insn(file, _sec, insn) \ + for (insn = find_insn(file, _sec, 0); \ + insn && insn->sec == _sec; \ + insn = next_insn_same_sec(file, insn)) + +#define for_each_insn(file, insn) \ + for (struct section *__sec, *__fake = (struct section *)1; \ + __fake; __fake = NULL) \ + for_each_sec(file, __sec) \ + sec_for_each_insn(file, __sec, insn) + +#define func_for_each_insn(file, func, insn) \ + for (insn = find_insn(file, func->sec, func->offset); \ + insn; \ + insn = next_insn_same_func(file, insn)) + +#define sym_for_each_insn(file, sym, insn) \ + for (insn = find_insn(file, sym->sec, sym->offset); \ + insn && insn->offset < sym->offset + sym->len; \ + insn = next_insn_same_sec(file, insn)) + +#define sym_for_each_insn_continue_reverse(file, sym, insn) \ + for (insn = prev_insn_same_sec(file, insn); \ + insn && insn->offset >= sym->offset; \ + insn = prev_insn_same_sec(file, insn)) + +#define sec_for_each_insn_from(file, insn) \ + for (; insn; insn = next_insn_same_sec(file, insn)) + +#define sec_for_each_insn_continue(file, insn) \ + for (insn = next_insn_same_sec(file, insn); insn; \ + insn = next_insn_same_sec(file, insn)) + +#endif /* _INSN_H */ diff --git a/tools/objtool/insn.c b/tools/objtool/insn.c new file mode 100644 index 000000000000..894b1d94e475 --- /dev/null +++ b/tools/objtool/insn.c @@ -0,0 +1,192 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2015-2017 Josh Poimboeuf + */ + +#include + +#include +#include +#include + +struct instruction *find_insn(struct objtool_file *file, + struct section *sec, unsigned long offset) +{ + struct instruction *insn; + + hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) { + if (insn->sec == sec && insn->offset == offset) + return insn; + } + + return NULL; +} + +struct instruction *next_insn_same_sec(struct objtool_file *file, + struct instruction *insn) +{ + if (insn->idx == INSN_CHUNK_MAX) + return find_insn(file, insn->sec, insn->offset + insn->len); + + insn++; + if (!insn->len) + return NULL; + + return insn; +} + +struct instruction *next_insn_same_func(struct objtool_file *file, + struct instruction *insn) +{ + struct instruction *next = next_insn_same_sec(file, insn); + struct symbol *func = insn_func(insn); + + if (!func) + return NULL; + + if (next && insn_func(next) == func) + return next; + + /* Check if we're already in the subfunction: */ + if (func == func->cfunc) + return NULL; + + /* Move to the subfunction: */ + return find_insn(file, func->cfunc->sec, func->cfunc->offset); +} + +struct instruction *prev_insn_same_sec(struct objtool_file *file, + struct instruction *insn) +{ + if (insn->idx == 0) { + if (insn->prev_len) + return find_insn(file, insn->sec, insn->offset - insn->prev_len); + return NULL; + } + + return insn - 1; +} + +struct instruction *prev_insn_same_sym(struct objtool_file *file, + struct instruction *insn) +{ + struct instruction *prev = prev_insn_same_sec(file, insn); + + if (prev && insn_func(prev) == insn_func(insn)) + return prev; + + return NULL; +} + +void init_insn_state(struct objtool_file *file, struct insn_state *state, + struct section *sec) +{ + memset(state, 0, sizeof(*state)); + init_cfi_state(&state->cfi); + + /* + * We need the full vmlinux for noinstr validation, otherwise we can + * not correctly determine insn_call_dest(insn)->sec (external symbols + * do not have a section). + */ + if (opts.link && opts.noinstr && sec) + state->noinstr = sec->noinstr; +} + +struct instruction *find_last_insn(struct objtool_file *file, + struct section *sec) +{ + struct instruction *insn = NULL; + unsigned int offset; + unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0; + + for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--) + insn = find_insn(file, sec, offset); + + return insn; +} + +struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn) +{ + struct reloc *reloc; + + if (insn->no_reloc) + return NULL; + + if (!file) + return NULL; + + reloc = find_reloc_by_dest_range(file->elf, insn->sec, + insn->offset, insn->len); + if (!reloc) { + insn->no_reloc = 1; + return NULL; + } + + return reloc; +} + +bool is_first_func_insn(struct objtool_file *file, + struct instruction *insn, struct symbol *sym) +{ + if (insn->offset == sym->offset) + return true; + + /* Allow direct CALL/JMP past ENDBR */ + if (opts.ibt) { + struct instruction *prev = prev_insn_same_sym(file, insn); + + if (prev && prev->type == INSN_ENDBR && + insn->offset == sym->offset + prev->len) + return true; + } + + return false; +} + +bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2) +{ + struct cfi_state *cfi1 = insn->cfi; + int i; + + if (!cfi1) { + WARN("CFI missing"); + return false; + } + + if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) { + + WARN_INSN(insn, "stack state mismatch: cfa1=%d%+d cfa2=%d%+d", + cfi1->cfa.base, cfi1->cfa.offset, + cfi2->cfa.base, cfi2->cfa.offset); + + } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) { + for (i = 0; i < CFI_NUM_REGS; i++) { + if (!memcmp(&cfi1->regs[i], &cfi2->regs[i], + sizeof(struct cfi_reg))) + continue; + + WARN_INSN(insn, "stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d", + i, cfi1->regs[i].base, cfi1->regs[i].offset, + i, cfi2->regs[i].base, cfi2->regs[i].offset); + break; + } + + } else if (cfi1->type != cfi2->type) { + + WARN_INSN(insn, "stack state mismatch: type1=%d type2=%d", + cfi1->type, cfi2->type); + + } else if (cfi1->drap != cfi2->drap || + (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) || + (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) { + + WARN_INSN(insn, "stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)", + cfi1->drap, cfi1->drap_reg, cfi1->drap_offset, + cfi2->drap, cfi2->drap_reg, cfi2->drap_offset); + + } else + return true; + + return false; +} -- Gitee From da8b5d5dbc96013ee4752c1efd6eb88984de103f Mon Sep 17 00:00:00 2001 From: "Madhavan T. Venkataraman" Date: Wed, 14 Dec 2022 10:20:37 -0600 Subject: [PATCH 1497/2138] objtool: Move decode_instructions() to a separate file ANBZ: #9262 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ check.c implements static stack validation. But decode_instructions() which resides in it can be shared with other types of validation. E.g., dynamic FP validation. Move the function to its own file - decode.c. nr_insns is shared to use after commit 1c34496e5856 ("objtool: Remove instruction::list"), hence tweak it. Signed-off-by: Madhavan T. Venkataraman Signed-off-by: Wei Chen Signed-off-by: Justin He Acked-by: ydzhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3299 --- arch/x86/coco/tdx/tdcall.S | 1 + tools/objtool/Build | 1 + tools/objtool/check.c | 126 ------------------------- tools/objtool/decode.c | 136 +++++++++++++++++++++++++++ tools/objtool/include/objtool/insn.h | 3 + 5 files changed, 141 insertions(+), 126 deletions(-) create mode 100644 tools/objtool/decode.c diff --git a/arch/x86/coco/tdx/tdcall.S b/arch/x86/coco/tdx/tdcall.S index 56b9cd32895e..0658012149a2 100644 --- a/arch/x86/coco/tdx/tdcall.S +++ b/arch/x86/coco/tdx/tdcall.S @@ -7,6 +7,7 @@ #include #include #include +#include #include "../../virt/vmx/tdx/tdxcall.S" diff --git a/tools/objtool/Build b/tools/objtool/Build index c04e36267379..64ccae49cd5f 100644 --- a/tools/objtool/Build +++ b/tools/objtool/Build @@ -7,6 +7,7 @@ objtool-y += special.o objtool-y += builtin-check.o objtool-y += cfi.o objtool-y += insn.o +objtool-y += decode.o objtool-y += elf.o objtool-y += objtool.o diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 70276f3354b7..3ea857e7f45c 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -157,134 +157,8 @@ static bool dead_end_function(struct objtool_file *file, struct symbol *func) return __dead_end_function(file, func, 0); } -static unsigned long nr_insns; static unsigned long nr_insns_visited; -/* - * Call the arch-specific instruction decoder for all the instructions and add - * them to the global instruction list. - */ -static int decode_instructions(struct objtool_file *file) -{ - struct section *sec; - struct symbol *func; - unsigned long offset; - struct instruction *insn; - int ret; - - for_each_sec(file, sec) { - struct instruction *insns = NULL; - u8 prev_len = 0; - u8 idx = 0; - - if (!(sec->sh.sh_flags & SHF_EXECINSTR)) - continue; - - if (strcmp(sec->name, ".altinstr_replacement") && - strcmp(sec->name, ".altinstr_aux") && - strncmp(sec->name, ".discard.", 9)) - sec->text = true; - - if (!strcmp(sec->name, ".noinstr.text") || - !strcmp(sec->name, ".entry.text") || - !strcmp(sec->name, ".cpuidle.text") || - !strncmp(sec->name, ".text..__x86.", 13)) - sec->noinstr = true; - - /* - * .init.text code is ran before userspace and thus doesn't - * strictly need retpolines, except for modules which are - * loaded late, they very much do need retpoline in their - * .init.text - */ - if (!strcmp(sec->name, ".init.text") && !opts.module) - sec->init = true; - - for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) { - if (!insns || idx == INSN_CHUNK_MAX) { - insns = calloc(sizeof(*insn), INSN_CHUNK_SIZE); - if (!insns) { - WARN("malloc failed"); - return -1; - } - idx = 0; - } else { - idx++; - } - insn = &insns[idx]; - insn->idx = idx; - - INIT_LIST_HEAD(&insn->call_node); - insn->sec = sec; - insn->offset = offset; - insn->prev_len = prev_len; - - ret = arch_decode_instruction(file, sec, offset, - sec->sh.sh_size - offset, - insn); - if (ret) - return ret; - - prev_len = insn->len; - - /* - * By default, "ud2" is a dead end unless otherwise - * annotated, because GCC 7 inserts it for certain - * divide-by-zero cases. - */ - if (insn->type == INSN_BUG) - insn->dead_end = true; - - hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset)); - nr_insns++; - } - -// printf("%s: last chunk used: %d\n", sec->name, (int)idx); - - sec_for_each_sym(sec, func) { - if (func->type != STT_NOTYPE && func->type != STT_FUNC) - continue; - - if (func->offset == sec->sh.sh_size) { - /* Heuristic: likely an "end" symbol */ - if (func->type == STT_NOTYPE) - continue; - WARN("%s(): STT_FUNC at end of section", - func->name); - return -1; - } - - if (func->embedded_insn || func->alias != func) - continue; - - if (!find_insn(file, sec, func->offset)) { - WARN("%s(): can't find starting instruction", - func->name); - return -1; - } - - sym_for_each_insn(file, func, insn) { - insn->sym = func; - if (func->type == STT_FUNC && - insn->type == INSN_ENDBR && - list_empty(&insn->call_node)) { - if (insn->offset == func->offset) { - list_add_tail(&insn->call_node, &file->endbr_list); - file->nr_endbr++; - } else { - file->nr_endbr_int++; - } - } - } - } - } - - if (opts.stats) - printf("nr_insns: %lu\n", nr_insns); - - return 0; -} - /* * Read the pv_ops[] .data table to find the static initialized values. */ diff --git a/tools/objtool/decode.c b/tools/objtool/decode.c new file mode 100644 index 000000000000..59fea7e1d35b --- /dev/null +++ b/tools/objtool/decode.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2015-2017 Josh Poimboeuf + */ +#include + +#include +#include +#include + +unsigned long nr_insns; + +/* + * Call the arch-specific instruction decoder for all the instructions and add + * them to the global instruction list. + */ +int decode_instructions(struct objtool_file *file) +{ + struct section *sec; + struct symbol *func; + unsigned long offset; + struct instruction *insn; + int ret; + + for_each_sec(file, sec) { + struct instruction *insns = NULL; + u8 prev_len = 0; + u8 idx = 0; + + if (!(sec->sh.sh_flags & SHF_EXECINSTR)) + continue; + + if (strcmp(sec->name, ".altinstr_replacement") && + strcmp(sec->name, ".altinstr_aux") && + strncmp(sec->name, ".discard.", 9)) + sec->text = true; + + if (!strcmp(sec->name, ".noinstr.text") || + !strcmp(sec->name, ".entry.text") || + !strcmp(sec->name, ".cpuidle.text") || + !strncmp(sec->name, ".text..__x86.", 13)) + sec->noinstr = true; + + /* + * .init.text code is ran before userspace and thus doesn't + * strictly need retpolines, except for modules which are + * loaded late, they very much do need retpoline in their + * .init.text + */ + if (!strcmp(sec->name, ".init.text") && !opts.module) + sec->init = true; + + for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) { + if (!insns || idx == INSN_CHUNK_MAX) { + insns = calloc(sizeof(*insn), INSN_CHUNK_SIZE); + if (!insns) { + WARN("malloc failed"); + return -1; + } + idx = 0; + } else { + idx++; + } + insn = &insns[idx]; + insn->idx = idx; + + INIT_LIST_HEAD(&insn->call_node); + insn->sec = sec; + insn->offset = offset; + insn->prev_len = prev_len; + + ret = arch_decode_instruction(file, sec, offset, + sec->sh.sh_size - offset, + insn); + if (ret) + return ret; + + prev_len = insn->len; + + /* + * By default, "ud2" is a dead end unless otherwise + * annotated, because GCC 7 inserts it for certain + * divide-by-zero cases. + */ + if (insn->type == INSN_BUG) + insn->dead_end = true; + + hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset)); + nr_insns++; + } + +// printf("%s: last chunk used: %d\n", sec->name, (int)idx); + + sec_for_each_sym(sec, func) { + if (func->type != STT_NOTYPE && func->type != STT_FUNC) + continue; + + if (func->offset == sec->sh.sh_size) { + /* Heuristic: likely an "end" symbol */ + if (func->type == STT_NOTYPE) + continue; + WARN("%s(): STT_FUNC at end of section", + func->name); + return -1; + } + + if (func->embedded_insn || func->alias != func) + continue; + + if (!find_insn(file, sec, func->offset)) { + WARN("%s(): can't find starting instruction", + func->name); + return -1; + } + + sym_for_each_insn(file, func, insn) { + insn->sym = func; + if (func->type == STT_FUNC && + insn->type == INSN_ENDBR && + list_empty(&insn->call_node)) { + if (insn->offset == func->offset) { + list_add_tail(&insn->call_node, &file->endbr_list); + file->nr_endbr++; + } else { + file->nr_endbr_int++; + } + } + } + } + } + + if (opts.stats) + printf("nr_insns: %lu\n", nr_insns); + + return 0; +} diff --git a/tools/objtool/include/objtool/insn.h b/tools/objtool/include/objtool/insn.h index 0c4cff3a2bf4..1c1ab4e3b666 100644 --- a/tools/objtool/include/objtool/insn.h +++ b/tools/objtool/include/objtool/insn.h @@ -108,6 +108,8 @@ bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2); bool is_first_func_insn(struct objtool_file *file, struct instruction *insn, struct symbol *sym); +int decode_instructions(struct objtool_file *file); + #define sec_for_each_insn(file, _sec, insn) \ for (insn = find_insn(file, _sec, 0); \ insn && insn->sec == _sec; \ @@ -141,4 +143,5 @@ bool is_first_func_insn(struct objtool_file *file, for (insn = next_insn_same_sec(file, insn); insn; \ insn = next_insn_same_sec(file, insn)) +extern unsigned long nr_insns; #endif /* _INSN_H */ -- Gitee From 635d7c6c7a3faaa406dabe5601e8921519225bb6 Mon Sep 17 00:00:00 2001 From: "Madhavan T. Venkataraman" Date: Wed, 11 Sep 2024 05:40:59 +0000 Subject: [PATCH 1498/2138] objtool: Reorganize Unwind hint code ANBZ: #9262 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ Unwind hint macros and struct unwind_hint are arch-specific. Move them into the arch-specific file asm/unwind_hints.h. But the unwind hint types are generic. Retain them in linux/objtool.h. Unwind hints can be used with static stack validation as well as other forms of validation such as dynamic FP validation. Move the function read_unwind_hints() from check.c to a new file unwind_hints.c so that it can be shared across validation schemes. Signed-off-by: Madhavan T. Venkataraman Signed-off-by: Wei Chen Acked-by: ydzhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3299 --- arch/x86/entry/entry.S | 1 + arch/x86/include/asm/unwind_hints.h | 67 ++++++++- arch/x86/kernel/unwind_orc.c | 3 +- include/linux/objtool.h | 49 ------- tools/arch/x86/include/asm/unwind_hints.h | 158 ++++++++++++++++++++++ tools/objtool/Build | 1 + tools/objtool/check.c | 91 ------------- tools/objtool/include/objtool/insn.h | 1 + tools/objtool/sync-check.sh | 1 + tools/objtool/unwind_hints.c | 101 ++++++++++++++ 10 files changed, 331 insertions(+), 142 deletions(-) create mode 100644 tools/arch/x86/include/asm/unwind_hints.h create mode 100644 tools/objtool/unwind_hints.c diff --git a/arch/x86/entry/entry.S b/arch/x86/entry/entry.S index 2143358d0c4c..75cf576c7c4c 100644 --- a/arch/x86/entry/entry.S +++ b/arch/x86/entry/entry.S @@ -4,6 +4,7 @@ */ #include +#include #include #include #include diff --git a/arch/x86/include/asm/unwind_hints.h b/arch/x86/include/asm/unwind_hints.h index 85cc57cb6539..196b82f806e4 100644 --- a/arch/x86/include/asm/unwind_hints.h +++ b/arch/x86/include/asm/unwind_hints.h @@ -1,10 +1,75 @@ #ifndef _ASM_X86_UNWIND_HINTS_H #define _ASM_X86_UNWIND_HINTS_H -#include +#include #include "orc_types.h" +#ifdef CONFIG_OBJTOOL + +#ifndef __ASSEMBLY__ + +#define UNWIND_HINT(type, sp_reg, sp_offset, signal) \ + "987: \n\t" \ + ".pushsection .discard.unwind_hints\n\t" \ + /* struct unwind_hint */ \ + ".long 987b - .\n\t" \ + ".short " __stringify(sp_offset) "\n\t" \ + ".byte " __stringify(sp_reg) "\n\t" \ + ".byte " __stringify(type) "\n\t" \ + ".byte " __stringify(signal) "\n\t" \ + ".balign 4 \n\t" \ + ".popsection\n\t" + +#else /* __ASSEMBLY__ */ + +/* + * In asm, there are two kinds of code: normal C-type callable functions and + * the rest. The normal callable functions can be called by other code, and + * don't do anything unusual with the stack. Such normal callable functions + * are annotated with the ENTRY/ENDPROC macros. Most asm code falls in this + * category. In this case, no special debugging annotations are needed because + * objtool can automatically generate the ORC data for the ORC unwinder to read + * at runtime. + * + * Anything which doesn't fall into the above category, such as syscall and + * interrupt handlers, tends to not be called directly by other functions, and + * often does unusual non-C-function-type things with the stack pointer. Such + * code needs to be annotated such that objtool can understand it. The + * following CFI hint macros are for this type of code. + * + * These macros provide hints to objtool about the state of the stack at each + * instruction. Objtool starts from the hints and follows the code flow, + * making automatic CFI adjustments when it sees pushes and pops, filling out + * the debuginfo as necessary. It will also warn if it sees any + * inconsistencies. + */ +.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 +.Lhere_\@: + .pushsection .discard.unwind_hints + /* struct unwind_hint */ + .long .Lhere_\@ - . + .short \sp_offset + .byte \sp_reg + .byte \type + .byte \signal + .balign 4 + .popsection +.endm + +#endif /* __ASSEMBLY__ */ + +#else /* !CONFIG_OBJTOOL */ + +#ifndef __ASSEMBLY__ +#define UNWIND_HINT(type, sp_reg, sp_offset, signal) "\n\t" +#else +.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 +.endm +#endif + +#endif /* CONFIG_OBJTOOL */ + #ifdef __ASSEMBLY__ .macro UNWIND_HINT_END_OF_STACK diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index 7784076819de..a75d9a827594 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -1,10 +1,11 @@ // SPDX-License-Identifier: GPL-2.0-only -#include #include +#include #include #include #include #include +#include #include #include #include diff --git a/include/linux/objtool.h b/include/linux/objtool.h index 33212e93f4a6..82bf1042a7cf 100644 --- a/include/linux/objtool.h +++ b/include/linux/objtool.h @@ -10,18 +10,6 @@ #ifndef __ASSEMBLY__ -#define UNWIND_HINT(type, sp_reg, sp_offset, signal) \ - "987: \n\t" \ - ".pushsection .discard.unwind_hints\n\t" \ - /* struct unwind_hint */ \ - ".long 987b - .\n\t" \ - ".short " __stringify(sp_offset) "\n\t" \ - ".byte " __stringify(sp_reg) "\n\t" \ - ".byte " __stringify(type) "\n\t" \ - ".byte " __stringify(signal) "\n\t" \ - ".balign 4 \n\t" \ - ".popsection\n\t" - /* * This macro marks the given function's stack frame as "non-standard", which * tells objtool to ignore the function when doing stack metadata validation. @@ -69,40 +57,6 @@ .long 999b; \ .popsection; -/* - * In asm, there are two kinds of code: normal C-type callable functions and - * the rest. The normal callable functions can be called by other code, and - * don't do anything unusual with the stack. Such normal callable functions - * are annotated with the ENTRY/ENDPROC macros. Most asm code falls in this - * category. In this case, no special debugging annotations are needed because - * objtool can automatically generate the ORC data for the ORC unwinder to read - * at runtime. - * - * Anything which doesn't fall into the above category, such as syscall and - * interrupt handlers, tends to not be called directly by other functions, and - * often does unusual non-C-function-type things with the stack pointer. Such - * code needs to be annotated such that objtool can understand it. The - * following CFI hint macros are for this type of code. - * - * These macros provide hints to objtool about the state of the stack at each - * instruction. Objtool starts from the hints and follows the code flow, - * making automatic CFI adjustments when it sees pushes and pops, filling out - * the debuginfo as necessary. It will also warn if it sees any - * inconsistencies. - */ -.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 -.Lhere_\@: - .pushsection .discard.unwind_hints - /* struct unwind_hint */ - .long .Lhere_\@ - . - .short \sp_offset - .byte \sp_reg - .byte \type - .byte \signal - .balign 4 - .popsection -.endm - .macro STACK_FRAME_NON_STANDARD func:req .pushsection .discard.func_stack_frame_non_standard, "aw" .long \func - . @@ -152,15 +106,12 @@ #ifndef __ASSEMBLY__ -#define UNWIND_HINT(type, sp_reg, sp_offset, signal) "\n\t" #define STACK_FRAME_NON_STANDARD(func) #define STACK_FRAME_NON_STANDARD_FP(func) #define ANNOTATE_NOENDBR #define ASM_REACHABLE #else #define ANNOTATE_INTRA_FUNCTION_CALL -.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 -.endm .macro STACK_FRAME_NON_STANDARD func:req .endm .macro ANNOTATE_NOENDBR diff --git a/tools/arch/x86/include/asm/unwind_hints.h b/tools/arch/x86/include/asm/unwind_hints.h new file mode 100644 index 000000000000..196b82f806e4 --- /dev/null +++ b/tools/arch/x86/include/asm/unwind_hints.h @@ -0,0 +1,158 @@ +#ifndef _ASM_X86_UNWIND_HINTS_H +#define _ASM_X86_UNWIND_HINTS_H + +#include + +#include "orc_types.h" + +#ifdef CONFIG_OBJTOOL + +#ifndef __ASSEMBLY__ + +#define UNWIND_HINT(type, sp_reg, sp_offset, signal) \ + "987: \n\t" \ + ".pushsection .discard.unwind_hints\n\t" \ + /* struct unwind_hint */ \ + ".long 987b - .\n\t" \ + ".short " __stringify(sp_offset) "\n\t" \ + ".byte " __stringify(sp_reg) "\n\t" \ + ".byte " __stringify(type) "\n\t" \ + ".byte " __stringify(signal) "\n\t" \ + ".balign 4 \n\t" \ + ".popsection\n\t" + +#else /* __ASSEMBLY__ */ + +/* + * In asm, there are two kinds of code: normal C-type callable functions and + * the rest. The normal callable functions can be called by other code, and + * don't do anything unusual with the stack. Such normal callable functions + * are annotated with the ENTRY/ENDPROC macros. Most asm code falls in this + * category. In this case, no special debugging annotations are needed because + * objtool can automatically generate the ORC data for the ORC unwinder to read + * at runtime. + * + * Anything which doesn't fall into the above category, such as syscall and + * interrupt handlers, tends to not be called directly by other functions, and + * often does unusual non-C-function-type things with the stack pointer. Such + * code needs to be annotated such that objtool can understand it. The + * following CFI hint macros are for this type of code. + * + * These macros provide hints to objtool about the state of the stack at each + * instruction. Objtool starts from the hints and follows the code flow, + * making automatic CFI adjustments when it sees pushes and pops, filling out + * the debuginfo as necessary. It will also warn if it sees any + * inconsistencies. + */ +.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 +.Lhere_\@: + .pushsection .discard.unwind_hints + /* struct unwind_hint */ + .long .Lhere_\@ - . + .short \sp_offset + .byte \sp_reg + .byte \type + .byte \signal + .balign 4 + .popsection +.endm + +#endif /* __ASSEMBLY__ */ + +#else /* !CONFIG_OBJTOOL */ + +#ifndef __ASSEMBLY__ +#define UNWIND_HINT(type, sp_reg, sp_offset, signal) "\n\t" +#else +.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 +.endm +#endif + +#endif /* CONFIG_OBJTOOL */ + +#ifdef __ASSEMBLY__ + +.macro UNWIND_HINT_END_OF_STACK + UNWIND_HINT type=UNWIND_HINT_TYPE_END_OF_STACK +.endm + +.macro UNWIND_HINT_UNDEFINED + UNWIND_HINT type=UNWIND_HINT_TYPE_UNDEFINED +.endm + +.macro UNWIND_HINT_ENTRY + VALIDATE_UNRET_BEGIN + UNWIND_HINT_END_OF_STACK +.endm + +.macro UNWIND_HINT_REGS base=%rsp offset=0 indirect=0 extra=1 partial=0 signal=1 + .if \base == %rsp + .if \indirect + .set sp_reg, ORC_REG_SP_INDIRECT + .else + .set sp_reg, ORC_REG_SP + .endif + .elseif \base == %rbp + .set sp_reg, ORC_REG_BP + .elseif \base == %rdi + .set sp_reg, ORC_REG_DI + .elseif \base == %rdx + .set sp_reg, ORC_REG_DX + .elseif \base == %r10 + .set sp_reg, ORC_REG_R10 + .else + .error "UNWIND_HINT_REGS: bad base register" + .endif + + .set sp_offset, \offset + + .if \partial + .set type, UNWIND_HINT_TYPE_REGS_PARTIAL + .elseif \extra == 0 + .set type, UNWIND_HINT_TYPE_REGS_PARTIAL + .set sp_offset, \offset + (16*8) + .else + .set type, UNWIND_HINT_TYPE_REGS + .endif + + UNWIND_HINT sp_reg=sp_reg sp_offset=sp_offset type=type signal=\signal +.endm + +.macro UNWIND_HINT_IRET_REGS base=%rsp offset=0 signal=1 + UNWIND_HINT_REGS base=\base offset=\offset partial=1 signal=\signal +.endm + +.macro UNWIND_HINT_IRET_ENTRY base=%rsp offset=0 signal=1 + VALIDATE_UNRET_BEGIN + UNWIND_HINT_IRET_REGS base=\base offset=\offset signal=\signal +.endm + +.macro UNWIND_HINT_FUNC + UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=8 type=UNWIND_HINT_TYPE_FUNC +.endm + +.macro UNWIND_HINT_SAVE + UNWIND_HINT type=UNWIND_HINT_TYPE_SAVE +.endm + +.macro UNWIND_HINT_RESTORE + UNWIND_HINT type=UNWIND_HINT_TYPE_RESTORE +.endm + +#else + +#define UNWIND_HINT_UNDEFINED \ + UNWIND_HINT(UNWIND_HINT_TYPE_UNDEFINED, 0, 0, 0) + +#define UNWIND_HINT_FUNC \ + UNWIND_HINT(UNWIND_HINT_TYPE_FUNC, ORC_REG_SP, 8, 0) + +#define UNWIND_HINT_SAVE \ + UNWIND_HINT(UNWIND_HINT_TYPE_SAVE, 0, 0, 0) + +#define UNWIND_HINT_RESTORE \ + UNWIND_HINT(UNWIND_HINT_TYPE_RESTORE, 0, 0, 0) + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_X86_UNWIND_HINTS_H */ diff --git a/tools/objtool/Build b/tools/objtool/Build index 64ccae49cd5f..4e9ec210f134 100644 --- a/tools/objtool/Build +++ b/tools/objtool/Build @@ -8,6 +8,7 @@ objtool-y += builtin-check.o objtool-y += cfi.o objtool-y += insn.o objtool-y += decode.o +objtool-y += unwind_hints.o objtool-y += elf.o objtool-y += objtool.o diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 3ea857e7f45c..04ea17dec1c7 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -1814,97 +1814,6 @@ static int add_jump_table_alts(struct objtool_file *file) return 0; } -static int read_unwind_hints(struct objtool_file *file) -{ - struct cfi_state cfi = init_cfi; - struct section *sec; - struct unwind_hint *hint; - struct instruction *insn; - struct reloc *reloc; - int i; - - sec = find_section_by_name(file->elf, ".discard.unwind_hints"); - if (!sec) - return 0; - - if (!sec->rsec) { - WARN("missing .rela.discard.unwind_hints section"); - return -1; - } - - if (sec->sh.sh_size % sizeof(struct unwind_hint)) { - WARN("struct unwind_hint size mismatch"); - return -1; - } - - file->hints = true; - - for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) { - hint = (struct unwind_hint *)sec->data->d_buf + i; - - reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint)); - if (!reloc) { - WARN("can't find reloc for unwind_hints[%d]", i); - return -1; - } - - insn = find_insn(file, reloc->sym->sec, reloc_addend(reloc)); - if (!insn) { - WARN("can't find insn for unwind_hints[%d]", i); - return -1; - } - - insn->hint = true; - - if (hint->type == UNWIND_HINT_TYPE_UNDEFINED) { - insn->cfi = &force_undefined_cfi; - continue; - } - - if (hint->type == UNWIND_HINT_TYPE_SAVE) { - insn->hint = false; - insn->save = true; - continue; - } - - if (hint->type == UNWIND_HINT_TYPE_RESTORE) { - insn->restore = true; - continue; - } - - if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) { - struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset); - - if (sym && sym->bind == STB_GLOBAL) { - if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) { - WARN_INSN(insn, "UNWIND_HINT_IRET_REGS without ENDBR"); - } - } - } - - if (hint->type == UNWIND_HINT_TYPE_FUNC) { - insn->cfi = &func_cfi; - continue; - } - - if (insn->cfi) - cfi = *(insn->cfi); - - if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) { - WARN_INSN(insn, "unsupported unwind_hint sp base reg %d", hint->sp_reg); - return -1; - } - - cfi.cfa.offset = bswap_if_needed(file->elf, hint->sp_offset); - cfi.type = hint->type; - cfi.signal = hint->signal; - - insn->cfi = cfi_hash_find_or_add(&cfi); - } - - return 0; -} - static int read_noendbr_hints(struct objtool_file *file) { struct instruction *insn; diff --git a/tools/objtool/include/objtool/insn.h b/tools/objtool/include/objtool/insn.h index 1c1ab4e3b666..31fddeec9540 100644 --- a/tools/objtool/include/objtool/insn.h +++ b/tools/objtool/include/objtool/insn.h @@ -109,6 +109,7 @@ bool is_first_func_insn(struct objtool_file *file, struct instruction *insn, struct symbol *sym); int decode_instructions(struct objtool_file *file); +int read_unwind_hints(struct objtool_file *file); #define sec_for_each_insn(file, _sec, insn) \ for (insn = find_insn(file, _sec, 0); \ diff --git a/tools/objtool/sync-check.sh b/tools/objtool/sync-check.sh index 81d120d05442..77e40cf3cf1f 100755 --- a/tools/objtool/sync-check.sh +++ b/tools/objtool/sync-check.sh @@ -14,6 +14,7 @@ arch/x86/include/asm/nops.h arch/x86/include/asm/inat_types.h arch/x86/include/asm/orc_types.h arch/x86/include/asm/emulate_prefix.h +arch/x86/include/asm/unwind_hints.h arch/x86/lib/x86-opcode-map.txt arch/x86/tools/gen-insn-attr-x86.awk include/linux/static_call_types.h diff --git a/tools/objtool/unwind_hints.c b/tools/objtool/unwind_hints.c new file mode 100644 index 000000000000..40c54ce21110 --- /dev/null +++ b/tools/objtool/unwind_hints.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2015-2017 Josh Poimboeuf + */ +#include + +#include +#include +#include +#include + +int read_unwind_hints(struct objtool_file *file) +{ + struct cfi_state cfi = init_cfi; + struct section *sec; + struct unwind_hint *hint; + struct instruction *insn; + struct reloc *reloc; + int i; + + sec = find_section_by_name(file->elf, ".discard.unwind_hints"); + if (!sec) + return 0; + + if (!sec->rsec) { + WARN("missing .rela.discard.unwind_hints section"); + return -1; + } + + if (sec->sh.sh_size % sizeof(struct unwind_hint)) { + WARN("struct unwind_hint size mismatch"); + return -1; + } + + file->hints = true; + + for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) { + hint = (struct unwind_hint *)sec->data->d_buf + i; + + reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint)); + if (!reloc) { + WARN("can't find reloc for unwind_hints[%d]", i); + return -1; + } + + insn = find_insn(file, reloc->sym->sec, reloc_addend(reloc)); + if (!insn) { + WARN("can't find insn for unwind_hints[%d]", i); + return -1; + } + + insn->hint = true; + + if (hint->type == UNWIND_HINT_TYPE_UNDEFINED) { + insn->cfi = &force_undefined_cfi; + continue; + } + + if (hint->type == UNWIND_HINT_TYPE_SAVE) { + insn->hint = false; + insn->save = true; + continue; + } + + if (hint->type == UNWIND_HINT_TYPE_RESTORE) { + insn->restore = true; + continue; + } + + if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) { + struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset); + + if (sym && sym->bind == STB_GLOBAL) { + if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) { + WARN_INSN(insn, "UNWIND_HINT_IRET_REGS without ENDBR"); + } + } + } + + if (hint->type == UNWIND_HINT_TYPE_FUNC) { + insn->cfi = &func_cfi; + continue; + } + + if (insn->cfi) + cfi = *(insn->cfi); + + if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) { + WARN_INSN(insn, "unsupported unwind_hint sp base reg %d", hint->sp_reg); + return -1; + } + + cfi.cfa.offset = bswap_if_needed(file->elf, hint->sp_offset); + cfi.type = hint->type; + cfi.signal = hint->signal; + + insn->cfi = cfi_hash_find_or_add(&cfi); + } + + return 0; +} -- Gitee From 445dd0b8ffcb020cf1d4d696814eb89e4f79ec5e Mon Sep 17 00:00:00 2001 From: "Madhavan T. Venkataraman" Date: Wed, 14 Dec 2022 10:33:29 -0600 Subject: [PATCH 1499/2138] objtool: Reorganize ORC types ANBZ: #9262 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ The ORC code needs to be reorganized into arch-specific and generic parts so that architectures other than X86 can use the generic parts. orc_types.h contains the following ORC definitions shared between objtool and the kernel: - ORC register definitions which are arch-specific. - orc_entry structure which is generic. Move orc_entry into a new file include/linux/orc_entry.h. Also, the field names bp_reg and bp_offset in struct orc_entry are x86-specific. Change them to fp_reg and fp_offset. FP stands for frame pointer. Currently, the type field in orc_entry is only 2 bits. For other architectures, we will need more. So, expand this to 3 bits. Signed-off-by: Madhavan T. Venkataraman Signed-off-by: Wei Chen Acked-by: ydzhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3299 --- arch/x86/include/asm/orc_types.h | 37 +++++------------------- include/linux/orc_entry.h | 39 ++++++++++++++++++++++++++ tools/arch/x86/include/asm/orc_types.h | 37 +++++------------------- tools/include/linux/orc_entry.h | 39 ++++++++++++++++++++++++++ tools/objtool/orc_gen.c | 7 +++-- tools/objtool/sync-check.sh | 1 + 6 files changed, 98 insertions(+), 62 deletions(-) create mode 100644 include/linux/orc_entry.h create mode 100644 tools/include/linux/orc_entry.h diff --git a/arch/x86/include/asm/orc_types.h b/arch/x86/include/asm/orc_types.h index 46d7e06763c9..45f21662ac21 100644 --- a/arch/x86/include/asm/orc_types.h +++ b/arch/x86/include/asm/orc_types.h @@ -8,6 +8,13 @@ #include #include +#include + +/* + * For x86, use the appripriate name for the frame pointer in orc_entry. + */ +#define bp_offset fp_offset +#define bp_reg fp_reg /* * The ORC_REG_* registers are base registers which are used to find other @@ -45,34 +52,4 @@ #define ORC_TYPE_REGS 3 #define ORC_TYPE_REGS_PARTIAL 4 -#ifndef __ASSEMBLY__ -#include - -/* - * This struct is more or less a vastly simplified version of the DWARF Call - * Frame Information standard. It contains only the necessary parts of DWARF - * CFI, simplified for ease of access by the in-kernel unwinder. It tells the - * unwinder how to find the previous SP and BP (and sometimes entry regs) on - * the stack for a given code address. Each instance of the struct corresponds - * to one or more code locations. - */ -struct orc_entry { - s16 sp_offset; - s16 bp_offset; -#if defined(__LITTLE_ENDIAN_BITFIELD) - unsigned sp_reg:4; - unsigned bp_reg:4; - unsigned type:3; - unsigned signal:1; -#elif defined(__BIG_ENDIAN_BITFIELD) - unsigned bp_reg:4; - unsigned sp_reg:4; - unsigned unused:4; - unsigned signal:1; - unsigned type:3; -#endif -} __packed; - -#endif /* __ASSEMBLY__ */ - #endif /* _ORC_TYPES_H */ diff --git a/include/linux/orc_entry.h b/include/linux/orc_entry.h new file mode 100644 index 000000000000..f8182edade4f --- /dev/null +++ b/include/linux/orc_entry.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2017 Josh Poimboeuf + */ + +#ifndef _ORC_ENTRY_H +#define _ORC_ENTRY_H + +#ifndef __ASSEMBLY__ +#include + +/* + * This struct is more or less a vastly simplified version of the DWARF Call + * Frame Information standard. It contains only the necessary parts of DWARF + * CFI, simplified for ease of access by the in-kernel unwinder. It tells the + * unwinder how to find the previous SP and BP (and sometimes entry regs) on + * the stack for a given code address. Each instance of the struct corresponds + * to one or more code locations. + */ +struct orc_entry { + s16 sp_offset; + s16 fp_offset; +#if defined(__LITTLE_ENDIAN_BITFIELD) + unsigned sp_reg:4; + unsigned fp_reg:4; + unsigned type:3; + unsigned signal:1; +#elif defined(__BIG_ENDIAN_BITFIELD) + unsigned fp_reg:4; + unsigned sp_reg:4; + unsigned unused:4; + unsigned signal:1; + unsigned type:3; +#endif +} __packed; + +#endif /* __ASSEMBLY__ */ + +#endif /* _ORC_ENTRY_H */ diff --git a/tools/arch/x86/include/asm/orc_types.h b/tools/arch/x86/include/asm/orc_types.h index 46d7e06763c9..45f21662ac21 100644 --- a/tools/arch/x86/include/asm/orc_types.h +++ b/tools/arch/x86/include/asm/orc_types.h @@ -8,6 +8,13 @@ #include #include +#include + +/* + * For x86, use the appripriate name for the frame pointer in orc_entry. + */ +#define bp_offset fp_offset +#define bp_reg fp_reg /* * The ORC_REG_* registers are base registers which are used to find other @@ -45,34 +52,4 @@ #define ORC_TYPE_REGS 3 #define ORC_TYPE_REGS_PARTIAL 4 -#ifndef __ASSEMBLY__ -#include - -/* - * This struct is more or less a vastly simplified version of the DWARF Call - * Frame Information standard. It contains only the necessary parts of DWARF - * CFI, simplified for ease of access by the in-kernel unwinder. It tells the - * unwinder how to find the previous SP and BP (and sometimes entry regs) on - * the stack for a given code address. Each instance of the struct corresponds - * to one or more code locations. - */ -struct orc_entry { - s16 sp_offset; - s16 bp_offset; -#if defined(__LITTLE_ENDIAN_BITFIELD) - unsigned sp_reg:4; - unsigned bp_reg:4; - unsigned type:3; - unsigned signal:1; -#elif defined(__BIG_ENDIAN_BITFIELD) - unsigned bp_reg:4; - unsigned sp_reg:4; - unsigned unused:4; - unsigned signal:1; - unsigned type:3; -#endif -} __packed; - -#endif /* __ASSEMBLY__ */ - #endif /* _ORC_TYPES_H */ diff --git a/tools/include/linux/orc_entry.h b/tools/include/linux/orc_entry.h new file mode 100644 index 000000000000..f8182edade4f --- /dev/null +++ b/tools/include/linux/orc_entry.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2017 Josh Poimboeuf + */ + +#ifndef _ORC_ENTRY_H +#define _ORC_ENTRY_H + +#ifndef __ASSEMBLY__ +#include + +/* + * This struct is more or less a vastly simplified version of the DWARF Call + * Frame Information standard. It contains only the necessary parts of DWARF + * CFI, simplified for ease of access by the in-kernel unwinder. It tells the + * unwinder how to find the previous SP and BP (and sometimes entry regs) on + * the stack for a given code address. Each instance of the struct corresponds + * to one or more code locations. + */ +struct orc_entry { + s16 sp_offset; + s16 fp_offset; +#if defined(__LITTLE_ENDIAN_BITFIELD) + unsigned sp_reg:4; + unsigned fp_reg:4; + unsigned type:3; + unsigned signal:1; +#elif defined(__BIG_ENDIAN_BITFIELD) + unsigned fp_reg:4; + unsigned sp_reg:4; + unsigned unused:4; + unsigned signal:1; + unsigned type:3; +#endif +} __packed; + +#endif /* __ASSEMBLY__ */ + +#endif /* _ORC_ENTRY_H */ diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c index bae343908867..ceed7b00cced 100644 --- a/tools/objtool/orc_gen.c +++ b/tools/objtool/orc_gen.c @@ -115,7 +115,7 @@ static int write_orc_entry(struct elf *elf, struct section *orc_sec, orc = (struct orc_entry *)orc_sec->data->d_buf + idx; memcpy(orc, o, sizeof(*orc)); orc->sp_offset = bswap_if_needed(elf, orc->sp_offset); - orc->bp_offset = bswap_if_needed(elf, orc->bp_offset); + orc->fp_offset = bswap_if_needed(elf, orc->fp_offset); /* populate reloc for ip */ if (!elf_init_reloc_text_sym(elf, ip_sec, idx * sizeof(int), idx, @@ -164,7 +164,10 @@ int orc_create(struct objtool_file *file) struct orc_list_entry *entry; struct list_head orc_list; - struct orc_entry null = { .type = ORC_TYPE_UNDEFINED }; + struct orc_entry null = { + .fp_reg = ORC_REG_UNDEFINED, + .type = UNWIND_HINT_TYPE_CALL, + }; /* Build a deduplicated list of ORC entries: */ INIT_LIST_HEAD(&orc_list); diff --git a/tools/objtool/sync-check.sh b/tools/objtool/sync-check.sh index 77e40cf3cf1f..bc3613c10dff 100755 --- a/tools/objtool/sync-check.sh +++ b/tools/objtool/sync-check.sh @@ -18,6 +18,7 @@ arch/x86/include/asm/unwind_hints.h arch/x86/lib/x86-opcode-map.txt arch/x86/tools/gen-insn-attr-x86.awk include/linux/static_call_types.h +include/linux/orc_entry.h " SYNC_CHECK_FILES=' -- Gitee From 050b045c08f0d00a247ff8d975032615f2da5565 Mon Sep 17 00:00:00 2001 From: "Madhavan T. Venkataraman" Date: Wed, 14 Dec 2022 10:40:29 -0600 Subject: [PATCH 1500/2138] objtool: Reorganize ORC code ANBZ: #9262 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ The ORC code needs to be reorganized into arch-specific and generic parts so that architectures other than X86 can avail the generic parts. Some arch-specific ORC code is present in orc_gen.c and orc_dump.c. Create the following two files for such code: - tools/objtool/include/objtool/orc.h - tools/objtool/arch/x86/orc.c Move the following arch-specific function from tools/objtool/orc_gen.c to tools/objtool/arch/x86/orc.c: - init_orc_entry() Move the following arch-specific functions from tools/objtool/orc_dump.c to tools/objtool/arch/x86/orc.c: - reg_name() - orc_type_name() - print_reg() Create arch-specific functions to print the names of the SP and FP registers. The relocation type for relocation entries for ORC structures is arch-specific. Define it in tools/objtool/arch/x86/include/arch/elf.h: #define R_PCREL R_X86_64_PC32 and use that in orc_gen.c so each architecture can provide its own relocation type. Signed-off-by: Madhavan T. Venkataraman Signed-off-by: Wei Chen Acked-by: ydzhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3299 --- tools/objtool/arch/x86/Build | 1 + tools/objtool/arch/x86/include/arch/elf.h | 1 + tools/objtool/arch/x86/orc.c | 161 ++++++++++++++++++++++ tools/objtool/include/objtool/orc.h | 18 +++ tools/objtool/orc_dump.c | 65 +-------- tools/objtool/orc_gen.c | 94 +------------ 6 files changed, 188 insertions(+), 152 deletions(-) create mode 100644 tools/objtool/arch/x86/orc.c create mode 100644 tools/objtool/include/objtool/orc.h diff --git a/tools/objtool/arch/x86/Build b/tools/objtool/arch/x86/Build index 9f7869b5c5e0..77b9a66cd6da 100644 --- a/tools/objtool/arch/x86/Build +++ b/tools/objtool/arch/x86/Build @@ -1,5 +1,6 @@ objtool-y += special.o objtool-y += decode.o +objtool-$(BUILD_ORC) += orc.o inat_tables_script = ../arch/x86/tools/gen-insn-attr-x86.awk inat_tables_maps = ../arch/x86/lib/x86-opcode-map.txt diff --git a/tools/objtool/arch/x86/include/arch/elf.h b/tools/objtool/arch/x86/include/arch/elf.h index 7131f7f51a4e..39f23cb55352 100644 --- a/tools/objtool/arch/x86/include/arch/elf.h +++ b/tools/objtool/arch/x86/include/arch/elf.h @@ -9,5 +9,6 @@ #define R_DATA64 R_X86_64_PC32 #define R_TEXT32 R_X86_64_PC32 #define R_TEXT64 R_X86_64_PC32 +#define R_PCREL R_X86_64_PC32 #endif /* _OBJTOOL_ARCH_ELF */ diff --git a/tools/objtool/arch/x86/orc.c b/tools/objtool/arch/x86/orc.c new file mode 100644 index 000000000000..891b85ae314e --- /dev/null +++ b/tools/objtool/arch/x86/orc.c @@ -0,0 +1,161 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2017 Josh Poimboeuf + */ + +#include +#include + +#include + +#include +#include +#include +#include + +int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, + struct instruction *insn) +{ + struct cfi_reg *bp = &cfi->regs[CFI_BP]; + + memset(orc, 0, sizeof(*orc)); + + if (!cfi) { + /* + * This is usually either unreachable nops/traps (which don't + * trigger unreachable instruction warnings), or + * STACK_FRAME_NON_STANDARD functions. + */ + orc->type = ORC_TYPE_UNDEFINED; + return 0; + } + + switch (cfi->type) { + case UNWIND_HINT_TYPE_UNDEFINED: + orc->type = ORC_TYPE_UNDEFINED; + return 0; + case UNWIND_HINT_TYPE_END_OF_STACK: + orc->type = ORC_TYPE_END_OF_STACK; + return 0; + case UNWIND_HINT_TYPE_CALL: + orc->type = ORC_TYPE_CALL; + break; + case UNWIND_HINT_TYPE_REGS: + orc->type = ORC_TYPE_REGS; + break; + case UNWIND_HINT_TYPE_REGS_PARTIAL: + orc->type = ORC_TYPE_REGS_PARTIAL; + break; + default: + WARN_INSN(insn, "unknown unwind hint type %d", cfi->type); + return -1; + } + + orc->signal = cfi->signal; + + switch (cfi->cfa.base) { + case CFI_SP: + orc->sp_reg = ORC_REG_SP; + break; + case CFI_SP_INDIRECT: + orc->sp_reg = ORC_REG_SP_INDIRECT; + break; + case CFI_BP: + orc->sp_reg = ORC_REG_BP; + break; + case CFI_BP_INDIRECT: + orc->sp_reg = ORC_REG_BP_INDIRECT; + break; + case CFI_R10: + orc->sp_reg = ORC_REG_R10; + break; + case CFI_R13: + orc->sp_reg = ORC_REG_R13; + break; + case CFI_DI: + orc->sp_reg = ORC_REG_DI; + break; + case CFI_DX: + orc->sp_reg = ORC_REG_DX; + break; + default: + WARN_INSN(insn, "unknown CFA base reg %d", cfi->cfa.base); + return -1; + } + + switch (bp->base) { + case CFI_UNDEFINED: + orc->bp_reg = ORC_REG_UNDEFINED; + break; + case CFI_CFA: + orc->bp_reg = ORC_REG_PREV_SP; + break; + case CFI_BP: + orc->bp_reg = ORC_REG_BP; + break; + default: + WARN_INSN(insn, "unknown BP base reg %d", bp->base); + return -1; + } + + orc->sp_offset = cfi->cfa.offset; + orc->bp_offset = bp->offset; + + return 0; +} + +static const char *reg_name(unsigned int reg) +{ + switch (reg) { + case ORC_REG_PREV_SP: + return "prevsp"; + case ORC_REG_DX: + return "dx"; + case ORC_REG_DI: + return "di"; + case ORC_REG_BP: + return "bp"; + case ORC_REG_SP: + return "sp"; + case ORC_REG_R10: + return "r10"; + case ORC_REG_R13: + return "r13"; + case ORC_REG_BP_INDIRECT: + return "bp(ind)"; + case ORC_REG_SP_INDIRECT: + return "sp(ind)"; + default: + return "?"; + } +} + +const char *orc_type_name(unsigned int type) +{ + switch (type) { + case ORC_TYPE_UNDEFINED: + return "(und)"; + case ORC_TYPE_END_OF_STACK: + return "end"; + case ORC_TYPE_CALL: + return "call"; + case ORC_TYPE_REGS: + return "regs"; + case ORC_TYPE_REGS_PARTIAL: + return "regs (partial)"; + default: + return "?"; + } +} + +void orc_print_reg(unsigned int reg, int offset) +{ + if (reg == ORC_REG_BP_INDIRECT) + printf("(bp%+d)", offset); + else if (reg == ORC_REG_SP_INDIRECT) + printf("(sp)%+d", offset); + else if (reg == ORC_REG_UNDEFINED) + printf("(und)"); + else + printf("%s%+d", reg_name(reg), offset); +} diff --git a/tools/objtool/include/objtool/orc.h b/tools/objtool/include/objtool/orc.h new file mode 100644 index 000000000000..11e746786fb4 --- /dev/null +++ b/tools/objtool/include/objtool/orc.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2015-2017 Josh Poimboeuf + */ + +#ifndef _OBJTOOL_ORC_H +#define _OBJTOOL_ORC_H + +#include + +int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, + struct instruction *insn); +const char *orc_type_name(unsigned int type); +void orc_print_reg(unsigned int reg, int offset); +void orc_print_sp(void); +void orc_print_fp(void); + +#endif /* _OBJTOOL_ORC_H */ diff --git a/tools/objtool/orc_dump.c b/tools/objtool/orc_dump.c index 0e183bb1c720..84545d5e694e 100644 --- a/tools/objtool/orc_dump.c +++ b/tools/objtool/orc_dump.c @@ -4,67 +4,12 @@ */ #include -#include #include +#include +#include #include #include -static const char *reg_name(unsigned int reg) -{ - switch (reg) { - case ORC_REG_PREV_SP: - return "prevsp"; - case ORC_REG_DX: - return "dx"; - case ORC_REG_DI: - return "di"; - case ORC_REG_BP: - return "bp"; - case ORC_REG_SP: - return "sp"; - case ORC_REG_R10: - return "r10"; - case ORC_REG_R13: - return "r13"; - case ORC_REG_BP_INDIRECT: - return "bp(ind)"; - case ORC_REG_SP_INDIRECT: - return "sp(ind)"; - default: - return "?"; - } -} - -static const char *orc_type_name(unsigned int type) -{ - switch (type) { - case ORC_TYPE_UNDEFINED: - return "(und)"; - case ORC_TYPE_END_OF_STACK: - return "end"; - case ORC_TYPE_CALL: - return "call"; - case ORC_TYPE_REGS: - return "regs"; - case ORC_TYPE_REGS_PARTIAL: - return "regs (partial)"; - default: - return "?"; - } -} - -static void print_reg(unsigned int reg, int offset) -{ - if (reg == ORC_REG_BP_INDIRECT) - printf("(bp%+d)", offset); - else if (reg == ORC_REG_SP_INDIRECT) - printf("(sp)%+d", offset); - else if (reg == ORC_REG_UNDEFINED) - printf("(und)"); - else - printf("%s%+d", reg_name(reg), offset); -} - int orc_dump(const char *_objname) { int fd, nr_entries, i, *orc_ip = NULL, orc_size = 0; @@ -209,11 +154,11 @@ int orc_dump(const char *_objname) printf(" sp:"); - print_reg(orc[i].sp_reg, bswap_if_needed(&dummy_elf, orc[i].sp_offset)); + orc_print_reg(orc[i].sp_reg, bswap_if_needed(&dummy_elf, orc[i].sp_offset)); - printf(" bp:"); + printf(" fp:"); - print_reg(orc[i].bp_reg, bswap_if_needed(&dummy_elf, orc[i].bp_offset)); + orc_print_reg(orc[i].fp_reg, bswap_if_needed(&dummy_elf, orc[i].fp_offset)); printf(" signal:%d\n", orc[i].signal); } diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c index ceed7b00cced..f90770a71080 100644 --- a/tools/objtool/orc_gen.c +++ b/tools/objtool/orc_gen.c @@ -7,103 +7,13 @@ #include #include -#include +#include #include +#include #include #include -static int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, - struct instruction *insn) -{ - struct cfi_reg *bp = &cfi->regs[CFI_BP]; - - memset(orc, 0, sizeof(*orc)); - - if (!cfi) { - /* - * This is usually either unreachable nops/traps (which don't - * trigger unreachable instruction warnings), or - * STACK_FRAME_NON_STANDARD functions. - */ - orc->type = ORC_TYPE_UNDEFINED; - return 0; - } - - switch (cfi->type) { - case UNWIND_HINT_TYPE_UNDEFINED: - orc->type = ORC_TYPE_UNDEFINED; - return 0; - case UNWIND_HINT_TYPE_END_OF_STACK: - orc->type = ORC_TYPE_END_OF_STACK; - return 0; - case UNWIND_HINT_TYPE_CALL: - orc->type = ORC_TYPE_CALL; - break; - case UNWIND_HINT_TYPE_REGS: - orc->type = ORC_TYPE_REGS; - break; - case UNWIND_HINT_TYPE_REGS_PARTIAL: - orc->type = ORC_TYPE_REGS_PARTIAL; - break; - default: - WARN_INSN(insn, "unknown unwind hint type %d", cfi->type); - return -1; - } - - orc->signal = cfi->signal; - - switch (cfi->cfa.base) { - case CFI_SP: - orc->sp_reg = ORC_REG_SP; - break; - case CFI_SP_INDIRECT: - orc->sp_reg = ORC_REG_SP_INDIRECT; - break; - case CFI_BP: - orc->sp_reg = ORC_REG_BP; - break; - case CFI_BP_INDIRECT: - orc->sp_reg = ORC_REG_BP_INDIRECT; - break; - case CFI_R10: - orc->sp_reg = ORC_REG_R10; - break; - case CFI_R13: - orc->sp_reg = ORC_REG_R13; - break; - case CFI_DI: - orc->sp_reg = ORC_REG_DI; - break; - case CFI_DX: - orc->sp_reg = ORC_REG_DX; - break; - default: - WARN_INSN(insn, "unknown CFA base reg %d", cfi->cfa.base); - return -1; - } - - switch (bp->base) { - case CFI_UNDEFINED: - orc->bp_reg = ORC_REG_UNDEFINED; - break; - case CFI_CFA: - orc->bp_reg = ORC_REG_PREV_SP; - break; - case CFI_BP: - orc->bp_reg = ORC_REG_BP; - break; - default: - WARN_INSN(insn, "unknown BP base reg %d", bp->base); - return -1; - } - - orc->sp_offset = cfi->cfa.offset; - orc->bp_offset = bp->offset; - - return 0; -} - static int write_orc_entry(struct elf *elf, struct section *orc_sec, struct section *ip_sec, unsigned int idx, struct section *insn_sec, unsigned long insn_off, -- Gitee From 44e64175edfa258b1c0659a704b13177d3e542c2 Mon Sep 17 00:00:00 2001 From: justinhe2 Date: Thu, 12 Sep 2024 04:07:05 +0000 Subject: [PATCH 1501/2138] objtool: Reorganize ORC kernel code ANBZ: #9262 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ All of the ORC code in the kernel is currently under arch/x86. The following parts of that code can be shared by other architectures that wish to use ORC. (1) ORC lookup initialization for vmlinux (2) ORC lookup initialization for modules (3) ORC lookup functions Move arch/x86/include/asm/orc_lookup.h to include/asm-generic/orc_lookup.h. Move the ORC lookup code into kernel/orc_lookup.c. Rename the following init functions: unwind_module_init ==> orc_lookup_module_init unwind_init ==> orc_lookup_init since that is exactly what they do. orc_find() is the function that locates the ORC entry for a given PC. Currently, it contains an architecture-specific part to locate ftrace entries. Introduce a new arch-specific function called arch_orc_find() and move the ftrace-related lookup there. If orc_find() is unable to locate the ORC entry for a given PC in vmlinux or in the modules, it can call arch_orc_find() to find architecture-specific entries. Signed-off-by: Madhavan T. Venkataraman Signed-off-by: Wei Chen Acked-by: ydzhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3299 --- arch/x86/include/asm/unwind.h | 5 - arch/x86/kernel/module.c | 7 +- arch/x86/kernel/unwind_orc.c | 257 +----------------- arch/x86/kernel/vmlinux.lds.S | 2 +- .../asm => include/asm-generic}/orc_lookup.h | 50 +++- kernel/Makefile | 2 + kernel/orc_lookup.c | 247 +++++++++++++++++ tools/objtool/arch/x86/orc.c | 1 + 8 files changed, 311 insertions(+), 260 deletions(-) rename {arch/x86/include/asm => include/asm-generic}/orc_lookup.h (45%) create mode 100644 kernel/orc_lookup.c diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h index 7cede4dc21f0..71af8246c69e 100644 --- a/arch/x86/include/asm/unwind.h +++ b/arch/x86/include/asm/unwind.h @@ -94,13 +94,8 @@ static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state, #ifdef CONFIG_UNWINDER_ORC void unwind_init(void); -void unwind_module_init(struct module *mod, void *orc_ip, size_t orc_ip_size, - void *orc, size_t orc_size); #else static inline void unwind_init(void) {} -static inline -void unwind_module_init(struct module *mod, void *orc_ip, size_t orc_ip_size, - void *orc, size_t orc_size) {} #endif static inline diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index 5f71a0cf4399..2fc4411a22d9 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #if 0 #define DEBUGP(fmt, ...) \ @@ -370,8 +370,9 @@ int module_finalize(const Elf_Ehdr *hdr, } if (orc && orc_ip) - unwind_module_init(me, (void *)orc_ip->sh_addr, orc_ip->sh_size, - (void *)orc->sh_addr, orc->sh_size); + orc_lookup_module_init(me, + (void *)orc_ip->sh_addr, orc_ip->sh_size, + (void *)orc->sh_addr, orc->sh_size); return 0; } diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index a75d9a827594..0f2f51dbc7e3 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -7,34 +7,12 @@ #include #include #include -#include #include +#include ORC_HEADER; -#define orc_warn(fmt, ...) \ - printk_deferred_once(KERN_WARNING "WARNING: " fmt, ##__VA_ARGS__) - -#define orc_warn_current(args...) \ -({ \ - static bool dumped_before; \ - if (state->task == current && !state->error) { \ - orc_warn(args); \ - if (unwind_debug && !dumped_before) { \ - dumped_before = true; \ - unwind_dump(state); \ - } \ - } \ -}) - -extern int __start_orc_unwind_ip[]; -extern int __stop_orc_unwind_ip[]; -extern struct orc_entry __start_orc_unwind[]; -extern struct orc_entry __stop_orc_unwind[]; - -static bool orc_init __ro_after_init; static bool unwind_debug __ro_after_init; -static unsigned int lookup_num_blocks __ro_after_init; static int __init unwind_debug_cmdline(char *str) { @@ -76,60 +54,9 @@ static void unwind_dump(struct unwind_state *state) } } -static inline unsigned long orc_ip(const int *ip) -{ - return (unsigned long)ip + *ip; -} - -static struct orc_entry *__orc_find(int *ip_table, struct orc_entry *u_table, - unsigned int num_entries, unsigned long ip) -{ - int *first = ip_table; - int *last = ip_table + num_entries - 1; - int *mid = first, *found = first; - - if (!num_entries) - return NULL; - - /* - * Do a binary range search to find the rightmost duplicate of a given - * starting address. Some entries are section terminators which are - * "weak" entries for ensuring there are no gaps. They should be - * ignored when they conflict with a real entry. - */ - while (first <= last) { - mid = first + ((last - first) / 2); - - if (orc_ip(mid) <= ip) { - found = mid; - first = mid + 1; - } else - last = mid - 1; - } - - return u_table + (found - ip_table); -} - -#ifdef CONFIG_MODULES -static struct orc_entry *orc_module_find(unsigned long ip) -{ - struct module *mod; - - mod = __module_address(ip); - if (!mod || !mod->arch.orc_unwind || !mod->arch.orc_unwind_ip) - return NULL; - return __orc_find(mod->arch.orc_unwind_ip, mod->arch.orc_unwind, - mod->arch.num_orcs, ip); -} -#else -static struct orc_entry *orc_module_find(unsigned long ip) -{ - return NULL; -} -#endif +#include #ifdef CONFIG_DYNAMIC_FTRACE -static struct orc_entry *orc_find(unsigned long ip); /* * Ftrace dynamic trampolines do not have orc entries of their own. @@ -173,19 +100,10 @@ static struct orc_entry *orc_ftrace_find(unsigned long ip) } #endif -/* - * If we crash with IP==0, the last successfully executed instruction - * was probably an indirect function call with a NULL function pointer, - * and we don't have unwind information for NULL. - * This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function - * pointer into its parent and then continue normally from there. - */ -static struct orc_entry null_orc_entry = { - .sp_offset = sizeof(long), - .sp_reg = ORC_REG_SP, - .bp_reg = ORC_REG_UNDEFINED, - .type = ORC_TYPE_CALL -}; +struct orc_entry *arch_orc_find(unsigned long ip) +{ + return orc_ftrace_find(ip); +} /* Fake frame pointer entry -- used as a fallback for generated code */ static struct orc_entry orc_fp_entry = { @@ -196,170 +114,9 @@ static struct orc_entry orc_fp_entry = { .bp_offset = -16, }; -static struct orc_entry *orc_find(unsigned long ip) -{ - static struct orc_entry *orc; - - if (ip == 0) - return &null_orc_entry; - - /* For non-init vmlinux addresses, use the fast lookup table: */ - if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) { - unsigned int idx, start, stop; - - idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE; - - if (unlikely((idx >= lookup_num_blocks-1))) { - orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n", - idx, lookup_num_blocks, (void *)ip); - return NULL; - } - - start = orc_lookup[idx]; - stop = orc_lookup[idx + 1] + 1; - - if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) || - (__start_orc_unwind + stop > __stop_orc_unwind))) { - orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n", - idx, lookup_num_blocks, start, stop, (void *)ip); - return NULL; - } - - return __orc_find(__start_orc_unwind_ip + start, - __start_orc_unwind + start, stop - start, ip); - } - - /* vmlinux .init slow lookup: */ - if (is_kernel_inittext(ip)) - return __orc_find(__start_orc_unwind_ip, __start_orc_unwind, - __stop_orc_unwind_ip - __start_orc_unwind_ip, ip); - - /* Module lookup: */ - orc = orc_module_find(ip); - if (orc) - return orc; - - return orc_ftrace_find(ip); -} - -#ifdef CONFIG_MODULES - -static DEFINE_MUTEX(sort_mutex); -static int *cur_orc_ip_table = __start_orc_unwind_ip; -static struct orc_entry *cur_orc_table = __start_orc_unwind; - -static void orc_sort_swap(void *_a, void *_b, int size) -{ - struct orc_entry *orc_a, *orc_b; - int *a = _a, *b = _b, tmp; - int delta = _b - _a; - - /* Swap the .orc_unwind_ip entries: */ - tmp = *a; - *a = *b + delta; - *b = tmp - delta; - - /* Swap the corresponding .orc_unwind entries: */ - orc_a = cur_orc_table + (a - cur_orc_ip_table); - orc_b = cur_orc_table + (b - cur_orc_ip_table); - swap(*orc_a, *orc_b); -} - -static int orc_sort_cmp(const void *_a, const void *_b) -{ - struct orc_entry *orc_a; - const int *a = _a, *b = _b; - unsigned long a_val = orc_ip(a); - unsigned long b_val = orc_ip(b); - - if (a_val > b_val) - return 1; - if (a_val < b_val) - return -1; - - /* - * The "weak" section terminator entries need to always be first - * to ensure the lookup code skips them in favor of real entries. - * These terminator entries exist to handle any gaps created by - * whitelisted .o files which didn't get objtool generation. - */ - orc_a = cur_orc_table + (a - cur_orc_ip_table); - return orc_a->type == ORC_TYPE_UNDEFINED ? -1 : 1; -} - -void unwind_module_init(struct module *mod, void *_orc_ip, size_t orc_ip_size, - void *_orc, size_t orc_size) -{ - int *orc_ip = _orc_ip; - struct orc_entry *orc = _orc; - unsigned int num_entries = orc_ip_size / sizeof(int); - - WARN_ON_ONCE(orc_ip_size % sizeof(int) != 0 || - orc_size % sizeof(*orc) != 0 || - num_entries != orc_size / sizeof(*orc)); - - /* - * The 'cur_orc_*' globals allow the orc_sort_swap() callback to - * associate an .orc_unwind_ip table entry with its corresponding - * .orc_unwind entry so they can both be swapped. - */ - mutex_lock(&sort_mutex); - cur_orc_ip_table = orc_ip; - cur_orc_table = orc; - sort(orc_ip, num_entries, sizeof(int), orc_sort_cmp, orc_sort_swap); - mutex_unlock(&sort_mutex); - - mod->arch.orc_unwind_ip = orc_ip; - mod->arch.orc_unwind = orc; - mod->arch.num_orcs = num_entries; -} -#endif - void __init unwind_init(void) { - size_t orc_ip_size = (void *)__stop_orc_unwind_ip - (void *)__start_orc_unwind_ip; - size_t orc_size = (void *)__stop_orc_unwind - (void *)__start_orc_unwind; - size_t num_entries = orc_ip_size / sizeof(int); - struct orc_entry *orc; - int i; - - if (!num_entries || orc_ip_size % sizeof(int) != 0 || - orc_size % sizeof(struct orc_entry) != 0 || - num_entries != orc_size / sizeof(struct orc_entry)) { - orc_warn("WARNING: Bad or missing .orc_unwind table. Disabling unwinder.\n"); - return; - } - - /* - * Note, the orc_unwind and orc_unwind_ip tables were already - * sorted at build time via the 'sorttable' tool. - * It's ready for binary search straight away, no need to sort it. - */ - - /* Initialize the fast lookup table: */ - lookup_num_blocks = orc_lookup_end - orc_lookup; - for (i = 0; i < lookup_num_blocks-1; i++) { - orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, - num_entries, - LOOKUP_START_IP + (LOOKUP_BLOCK_SIZE * i)); - if (!orc) { - orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n"); - return; - } - - orc_lookup[i] = orc - __start_orc_unwind; - } - - /* Initialize the ending block: */ - orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, num_entries, - LOOKUP_STOP_IP); - if (!orc) { - orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n"); - return; - } - orc_lookup[lookup_num_blocks-1] = orc - __start_orc_unwind; - - orc_init = true; + orc_lookup_init(); } unsigned long unwind_get_return_address(struct unwind_state *state) diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 60eb8baa44d7..cb6c9527fdde 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/x86/include/asm/orc_lookup.h b/include/asm-generic/orc_lookup.h similarity index 45% rename from arch/x86/include/asm/orc_lookup.h rename to include/asm-generic/orc_lookup.h index 241631282e43..b883758e4c85 100644 --- a/arch/x86/include/asm/orc_lookup.h +++ b/include/asm-generic/orc_lookup.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2017 Josh Poimboeuf */ @@ -23,6 +23,8 @@ #ifndef LINKER_SCRIPT +#include + extern unsigned int orc_lookup[]; extern unsigned int orc_lookup_end[]; @@ -31,4 +33,50 @@ extern unsigned int orc_lookup_end[]; #endif /* LINKER_SCRIPT */ +#ifndef __ASSEMBLY__ + +#include + +#ifdef CONFIG_UNWINDER_ORC +void orc_lookup_init(void); +void orc_lookup_module_init(struct module *mod, + void *orc_ip, size_t orc_ip_size, + void *orc, size_t orc_size); +#else +static inline void orc_lookup_init(void) {} +static inline +void orc_lookup_module_init(struct module *mod, + void *orc_ip, size_t orc_ip_size, + void *orc, size_t orc_size) +{ +} +#endif + +struct orc_entry *arch_orc_find(unsigned long ip); + +#define orc_warn(fmt, ...) \ + printk_deferred_once(KERN_WARNING "WARNING: " fmt, ##__VA_ARGS__) + +#define orc_warn_current(args...) \ +({ \ + static bool dumped_before; \ + if (state->task == current && !state->error) { \ + orc_warn(args); \ + if (unwind_debug && !dumped_before) { \ + dumped_before = true; \ + unwind_dump(state); \ + } \ + } \ +}) + +struct orc_entry *orc_find(unsigned long ip); + +extern bool orc_init; +extern int __start_orc_unwind_ip[]; +extern int __stop_orc_unwind_ip[]; +extern struct orc_entry __start_orc_unwind[]; +extern struct orc_entry __stop_orc_unwind[]; + +#endif /* __ASSEMBLY__ */ + #endif /* _ORC_LOOKUP_H */ diff --git a/kernel/Makefile b/kernel/Makefile index ce105a5558fc..680be35dffc6 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -132,6 +132,8 @@ obj-$(CONFIG_WATCH_QUEUE) += watch_queue.o obj-$(CONFIG_RESOURCE_KUNIT_TEST) += resource_kunit.o obj-$(CONFIG_SYSCTL_KUNIT_TEST) += sysctl-test.o +obj-$(CONFIG_UNWINDER_ORC) += orc_lookup.o + CFLAGS_stackleak.o += $(DISABLE_STACKLEAK_PLUGIN) obj-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak.o KASAN_SANITIZE_stackleak.o := n diff --git a/kernel/orc_lookup.c b/kernel/orc_lookup.c new file mode 100644 index 000000000000..ad845da546b4 --- /dev/null +++ b/kernel/orc_lookup.c @@ -0,0 +1,247 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include +#include +#include +#include + +bool orc_init __ro_after_init; +static unsigned int lookup_num_blocks __ro_after_init; + +static inline unsigned long orc_ip(const int *ip) +{ + return (unsigned long)ip + *ip; +} + +static struct orc_entry *__orc_find(int *ip_table, struct orc_entry *u_table, + unsigned int num_entries, unsigned long ip) +{ + int *first = ip_table; + int *last = ip_table + num_entries - 1; + int *mid = first, *found = first; + + if (!num_entries) + return NULL; + + /* + * Do a binary range search to find the rightmost duplicate of a given + * starting address. Some entries are section terminators which are + * "weak" entries for ensuring there are no gaps. They should be + * ignored when they conflict with a real entry. + */ + while (first <= last) { + mid = first + ((last - first) / 2); + + if (orc_ip(mid) <= ip) { + found = mid; + first = mid + 1; + } else + last = mid - 1; + } + + return u_table + (found - ip_table); +} + +#ifdef CONFIG_MODULES +static struct orc_entry *orc_module_find(unsigned long ip) +{ + struct module *mod; + + mod = __module_address(ip); + if (!mod || !mod->arch.orc_unwind || !mod->arch.orc_unwind_ip) + return NULL; + return __orc_find(mod->arch.orc_unwind_ip, mod->arch.orc_unwind, + mod->arch.num_orcs, ip); +} +#else +static struct orc_entry *orc_module_find(unsigned long ip) +{ + return NULL; +} +#endif + +/* + * If we crash with IP==0, the last successfully executed instruction + * was probably an indirect function call with a NULL function pointer, + * and we don't have unwind information for NULL. + * This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function + * pointer into its parent and then continue normally from there. + */ +static struct orc_entry null_orc_entry = { + .sp_offset = sizeof(long), + .sp_reg = ORC_REG_SP, + .fp_reg = ORC_REG_UNDEFINED, + .type = ORC_TYPE_CALL +}; + +struct orc_entry *orc_find(unsigned long ip) +{ + static struct orc_entry *orc; + + if (ip == 0) + return &null_orc_entry; + + /* For non-init vmlinux addresses, use the fast lookup table: */ + if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) { + unsigned int idx, start, stop; + + idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE; + + if (unlikely((idx >= lookup_num_blocks-1))) { + orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n", + idx, lookup_num_blocks, (void *)ip); + return NULL; + } + + start = orc_lookup[idx]; + stop = orc_lookup[idx + 1] + 1; + + if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) || + (__start_orc_unwind + stop > __stop_orc_unwind))) { + orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n", + idx, lookup_num_blocks, start, stop, (void *)ip); + return NULL; + } + + return __orc_find(__start_orc_unwind_ip + start, + __start_orc_unwind + start, stop - start, ip); + } + + /* vmlinux .init slow lookup: */ + if (is_kernel_inittext(ip)) + return __orc_find(__start_orc_unwind_ip, __start_orc_unwind, + __stop_orc_unwind_ip - __start_orc_unwind_ip, ip); + + /* Module lookup: */ + orc = orc_module_find(ip); + if (orc) + return orc; + + return arch_orc_find(ip); +} + +#ifdef CONFIG_MODULES + +static DEFINE_MUTEX(sort_mutex); +static int *cur_orc_ip_table = __start_orc_unwind_ip; +static struct orc_entry *cur_orc_table = __start_orc_unwind; + +static void orc_sort_swap(void *_a, void *_b, int size) +{ + struct orc_entry *orc_a, *orc_b; + int *a = _a, *b = _b, tmp; + int delta = _b - _a; + + /* Swap the .orc_unwind_ip entries: */ + tmp = *a; + *a = *b + delta; + *b = tmp - delta; + + /* Swap the corresponding .orc_unwind entries: */ + orc_a = cur_orc_table + (a - cur_orc_ip_table); + orc_b = cur_orc_table + (b - cur_orc_ip_table); + swap(*orc_a, *orc_b); +} + +static int orc_sort_cmp(const void *_a, const void *_b) +{ + struct orc_entry *orc_a; + const int *a = _a, *b = _b; + unsigned long a_val = orc_ip(a); + unsigned long b_val = orc_ip(b); + + if (a_val > b_val) + return 1; + if (a_val < b_val) + return -1; + + /* + * The "weak" section terminator entries need to always be first + * to ensure the lookup code skips them in favor of real entries. + * These terminator entries exist to handle any gaps created by + * whitelisted .o files which didn't get objtool generation. + */ + orc_a = cur_orc_table + (a - cur_orc_ip_table); + return orc_a->type == ORC_TYPE_UNDEFINED ? -1 : 1; +} + +void orc_lookup_module_init(struct module *mod, + void *_orc_ip, size_t orc_ip_size, + void *_orc, size_t orc_size) +{ + int *orc_ip = _orc_ip; + struct orc_entry *orc = _orc; + unsigned int num_entries = orc_ip_size / sizeof(int); + + WARN_ON_ONCE(orc_ip_size % sizeof(int) != 0 || + orc_size % sizeof(*orc) != 0 || + num_entries != orc_size / sizeof(*orc)); + + /* + * The 'cur_orc_*' globals allow the orc_sort_swap() callback to + * associate an .orc_unwind_ip table entry with its corresponding + * .orc_unwind entry so they can both be swapped. + */ + mutex_lock(&sort_mutex); + cur_orc_ip_table = orc_ip; + cur_orc_table = orc; + sort(orc_ip, num_entries, sizeof(int), orc_sort_cmp, orc_sort_swap); + mutex_unlock(&sort_mutex); + + mod->arch.orc_unwind_ip = orc_ip; + mod->arch.orc_unwind = orc; + mod->arch.num_orcs = num_entries; +} +#endif + +void __init orc_lookup_init(void) +{ + size_t orc_ip_size = (void *)__stop_orc_unwind_ip - (void *)__start_orc_unwind_ip; + size_t orc_size = (void *)__stop_orc_unwind - (void *)__start_orc_unwind; + size_t num_entries = orc_ip_size / sizeof(int); + struct orc_entry *orc; + int i; + + if (!num_entries || orc_ip_size % sizeof(int) != 0 || + orc_size % sizeof(struct orc_entry) != 0 || + num_entries != orc_size / sizeof(struct orc_entry)) { + orc_warn("WARNING: Bad or missing .orc_unwind table. Disabling unwinder.\n"); + return; + } + + /* + * Note, the orc_unwind and orc_unwind_ip tables were already + * sorted at build time via the 'sorttable' tool. + * It's ready for binary search straight away, no need to sort it. + */ + + /* Initialize the fast lookup table: */ + lookup_num_blocks = orc_lookup_end - orc_lookup; + for (i = 0; i < lookup_num_blocks-1; i++) { + orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, + num_entries, + LOOKUP_START_IP + (LOOKUP_BLOCK_SIZE * i)); + if (!orc) { + orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n"); + return; + } + + orc_lookup[i] = orc - __start_orc_unwind; + } + + /* Initialize the ending block: */ + orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, num_entries, + LOOKUP_STOP_IP); + if (!orc) { + orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n"); + return; + } + orc_lookup[lookup_num_blocks-1] = orc - __start_orc_unwind; + + orc_init = true; +} + +__weak struct orc_entry *arch_orc_find(unsigned long ip) +{ + return NULL; +} diff --git a/tools/objtool/arch/x86/orc.c b/tools/objtool/arch/x86/orc.c index 891b85ae314e..3526dfd9749d 100644 --- a/tools/objtool/arch/x86/orc.c +++ b/tools/objtool/arch/x86/orc.c @@ -9,6 +9,7 @@ #include #include +#include #include #include #include -- Gitee From 25b97027f1f99c6e6a5b32d4688f97a6c913857b Mon Sep 17 00:00:00 2001 From: "Madhavan T. Venkataraman" Date: Sun, 8 Jan 2023 11:51:05 -0600 Subject: [PATCH 1502/2138] objtool: Introduce STATIC_CHECK ANBZ: #9262 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ Objtool currently implements static stack validation. Another method called dynamic validation can be supported for other architectures. Define STATIC_CHECK to select the files required for static validation in objtool build. Signed-off-by: Madhavan T. Venkataraman Signed-off-by: Wei Chen Acked-by: ydzhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3299 --- tools/objtool/Build | 6 +++--- tools/objtool/Makefile | 3 ++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/tools/objtool/Build b/tools/objtool/Build index 4e9ec210f134..0eec935b52c9 100644 --- a/tools/objtool/Build +++ b/tools/objtool/Build @@ -2,13 +2,13 @@ objtool-y += arch/$(SRCARCH)/ objtool-y += weak.o -objtool-y += check.o -objtool-y += special.o +objtool-$(STATIC_CHECK) += check.o +objtool-$(STATIC_CHECK) += special.o objtool-y += builtin-check.o objtool-y += cfi.o objtool-y += insn.o objtool-y += decode.o -objtool-y += unwind_hints.o +objtool-$(STATIC_CHECK) += unwind_hints.o objtool-y += elf.o objtool-y += objtool.o diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index 83b100c1e7f6..2262b49691b8 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile @@ -55,9 +55,10 @@ BUILD_ORC := n ifeq ($(SRCARCH),x86) BUILD_ORC := y + STATIC_CHECK := y endif -export BUILD_ORC +export BUILD_ORC STATIC_CHECK export srctree OUTPUT CFLAGS SRCARCH AWK include $(srctree)/tools/build/Makefile.include -- Gitee From 0f5220c6ff9b1b594471498b588f1abec1b60b63 Mon Sep 17 00:00:00 2001 From: "Madhavan T. Venkataraman" Date: Wed, 14 Dec 2022 13:18:46 -0600 Subject: [PATCH 1503/2138] objtool: arm64: Add basic definitions and compile ANBZ: #9262 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ Add CFI definitions and Endianness for ARM64. Add DYNAMIC_CHECK option for ARM64. Provide stubs for arch_decode_instructions() and check() just to get Objtool to build on ARM64. Signed-off-by: Madhavan T. Venkataraman Signed-off-by: Wei Chen Acked-by: ydzhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3299 --- tools/objtool/Build | 1 + tools/objtool/Makefile | 6 +++++- tools/objtool/arch/arm64/Build | 1 + tools/objtool/arch/arm64/decode.c | 21 +++++++++++++++++++ .../arch/arm64/include/arch/cfi_regs.h | 13 ++++++++++++ .../arch/arm64/include/arch/endianness.h | 9 ++++++++ tools/objtool/dcheck.c | 16 ++++++++++++++ 7 files changed, 66 insertions(+), 1 deletion(-) create mode 100644 tools/objtool/arch/arm64/Build create mode 100644 tools/objtool/arch/arm64/decode.c create mode 100644 tools/objtool/arch/arm64/include/arch/cfi_regs.h create mode 100644 tools/objtool/arch/arm64/include/arch/endianness.h create mode 100644 tools/objtool/dcheck.c diff --git a/tools/objtool/Build b/tools/objtool/Build index 0eec935b52c9..5dbab24ad586 100644 --- a/tools/objtool/Build +++ b/tools/objtool/Build @@ -4,6 +4,7 @@ objtool-y += weak.o objtool-$(STATIC_CHECK) += check.o objtool-$(STATIC_CHECK) += special.o +objtool-$(DYNAMIC_CHECK) += dcheck.o objtool-y += builtin-check.o objtool-y += cfi.o objtool-y += insn.o diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index 2262b49691b8..caef8b6a5dc6 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile @@ -58,7 +58,11 @@ ifeq ($(SRCARCH),x86) STATIC_CHECK := y endif -export BUILD_ORC STATIC_CHECK +ifeq ($(SRCARCH),arm64) + DYNAMIC_CHECK := y +endif + +export BUILD_ORC STATIC_CHECK DYNAMIC_CHECK export srctree OUTPUT CFLAGS SRCARCH AWK include $(srctree)/tools/build/Makefile.include diff --git a/tools/objtool/arch/arm64/Build b/tools/objtool/arch/arm64/Build new file mode 100644 index 000000000000..3ff1f00c6a47 --- /dev/null +++ b/tools/objtool/arch/arm64/Build @@ -0,0 +1 @@ +objtool-y += decode.o diff --git a/tools/objtool/arch/arm64/decode.c b/tools/objtool/arch/arm64/decode.c new file mode 100644 index 000000000000..1b99c230cf63 --- /dev/null +++ b/tools/objtool/arch/arm64/decode.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Author: Madhavan T. Venkataraman (madvenka@linux.microsoft.com) + * + * Copyright (C) 2022 Microsoft Corporation + */ + +#include +#include + +#include + +int arch_decode_instruction(struct objtool_file *file, + const struct section *sec, + unsigned long offset, unsigned int maxlen, + unsigned int *len, enum insn_type *type, + unsigned long *immediate, + struct list_head *ops_list) +{ + return 0; +} diff --git a/tools/objtool/arch/arm64/include/arch/cfi_regs.h b/tools/objtool/arch/arm64/include/arch/cfi_regs.h new file mode 100644 index 000000000000..3b36ddeeddda --- /dev/null +++ b/tools/objtool/arch/arm64/include/arch/cfi_regs.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _OBJTOOL_CFI_REGS_H +#define _OBJTOOL_CFI_REGS_H + +#define CFI_FP 29 +#define CFI_BP CFI_FP +#define CFI_RA 30 +#define CFI_SP 31 + +#define CFI_NUM_REGS 32 + +#endif /* _OBJTOOL_CFI_REGS_H */ diff --git a/tools/objtool/arch/arm64/include/arch/endianness.h b/tools/objtool/arch/arm64/include/arch/endianness.h new file mode 100644 index 000000000000..092401687c3c --- /dev/null +++ b/tools/objtool/arch/arm64/include/arch/endianness.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _ARCH_ENDIANNESS_H +#define _ARCH_ENDIANNESS_H + +#include + +#define __TARGET_BYTE_ORDER __LITTLE_ENDIAN + +#endif /* _ARCH_ENDIANNESS_H */ diff --git a/tools/objtool/dcheck.c b/tools/objtool/dcheck.c new file mode 100644 index 000000000000..8663611e2ff4 --- /dev/null +++ b/tools/objtool/dcheck.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2015-2017 Josh Poimboeuf + */ + +#include +#include +#include +#include + +#include + +int check(struct objtool_file *file) +{ + return 0; +} -- Gitee From 40e628ceb70f51cdc3df7c9195ac7b9f316eba36 Mon Sep 17 00:00:00 2001 From: "Madhavan T. Venkataraman" Date: Sun, 15 Jan 2023 15:46:07 -0600 Subject: [PATCH 1504/2138] objtool: arm64: Implement decoder for Dynamic FP validation ANBZ: #9262 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ Implement arch_decode_instruction() for ARM64. For Dynamic FP validation, we need to walk each function's code and determine the stack and frame offsets at each instruction. So, the following instructions are completely decoded: Instructions that affect the SP and FP: - Load-Store instructions - Add/Sub/Mov instructions Instructions that affect control flow: - Branch instructions - Call instructions - Return instructions Miscellaneous instructions: - Break instruction used for bugs - Paciasp instruction that occurs at the beginning of the frame pointer prolog The rest of the instructions are either dont-care from an unwind perspective or unexpected from the compiler. Add checks for the unexpected ones to catch them if the compiler ever generates them. Signed-off-by: Madhavan T. Venkataraman Signed-off-by: Wei Chen Acked-by: ydzhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3299 --- tools/objtool/arch/arm64/decode.c | 518 ++++++++++++++++++++++++++- tools/objtool/include/objtool/arch.h | 2 + 2 files changed, 517 insertions(+), 3 deletions(-) diff --git a/tools/objtool/arch/arm64/decode.c b/tools/objtool/arch/arm64/decode.c index 1b99c230cf63..0279bbf69143 100644 --- a/tools/objtool/arch/arm64/decode.c +++ b/tools/objtool/arch/arm64/decode.c @@ -1,5 +1,9 @@ // SPDX-License-Identifier: GPL-2.0-only /* + * decode.c - ARM64 instruction decoder for dynamic FP validation. Only a + * small subset of the instructions need to be decoded. The rest + * only need to be sanity checked. + * * Author: Madhavan T. Venkataraman (madvenka@linux.microsoft.com) * * Copyright (C) 2022 Microsoft Corporation @@ -7,15 +11,523 @@ #include #include +#include #include +#include +#include + +/* ARM64 instructions are all 4 bytes wide. */ +#define INSN_SIZE 4 + +/* --------------------- instruction decode structs ------------------------ */ + +struct decode_var { + u32 insn; + enum insn_type type; + s64 imm; + unsigned int mode1; + unsigned int mode2; + unsigned int check_reg; + struct stack_op **ops_list; +}; + +struct decode { + unsigned long opmask; + unsigned long op; + unsigned int width; + unsigned int shift; + unsigned int bits; + unsigned int sign_extend; + unsigned int mult; + unsigned int mode1; + unsigned int mode2; + void (*func)(struct decode *decode, struct decode_var *var); +}; + +struct class { + unsigned long opmask; + unsigned long op; + void (*check)(struct decode_var *var); +}; + +/* ------------------------ stack operations ------------------------------- */ + +static void add_stack_op(unsigned char src_reg, enum op_src_type src_type, + s64 src_offset, + unsigned char dest_reg, enum op_dest_type dest_type, + s64 dest_offset, + struct stack_op **ops_list) +{ + struct stack_op *op, *tmp; + + op = calloc(1, sizeof(*op)); + if (!op) { + WARN("calloc failed"); + return; + } + + op->src.reg = src_reg; + op->src.type = src_type; + op->src.offset = src_offset; + op->dest.reg = dest_reg; + op->dest.type = dest_type; + op->dest.offset = dest_offset; + + op->next = NULL; + + if (*ops_list == NULL) + *ops_list = op; + else { + tmp = *ops_list; + while (tmp->next) + tmp = tmp->next; + tmp->next = op; + } +} + +static void add_op(struct decode_var *var, + unsigned char rn, s64 offset, unsigned char rd) +{ + add_stack_op(rn, OP_SRC_ADD, offset, rd, OP_DEST_REG, 0, + var->ops_list); +} + +static void load_op(struct decode_var *var, s64 offset, unsigned char rd) +{ + add_stack_op(CFI_SP, OP_SRC_REG_INDIRECT, offset, rd, OP_DEST_REG, 0, + var->ops_list); +} + +static void store_op(struct decode_var *var, s64 offset, unsigned char rd) +{ + add_stack_op(CFI_SP, OP_SRC_REG, 0, rd, OP_DEST_REG_INDIRECT, offset, + var->ops_list); +} + +/* ------------------------ decode functions ------------------------------- */ + +#define is_saved_reg(rt) ((rt) == CFI_FP || (rt) == CFI_RA) +#define is_frame_reg(rt) ((rt) == CFI_FP || (rt) == CFI_SP) + +/* ----- Add/Subtract instructions. ----- */ + +#define CMN_OP 0x31000000 /* Alias of ADDS imm */ +#define CMP_OP 0x71000000 /* Alias of SUBS imm */ + +static void add(struct decode *decode, struct decode_var *var) +{ + unsigned int rd = var->insn & 0x1F; + unsigned int rn = (var->insn >> 5) & 0x1F; + unsigned int shift = (var->insn >> 22) & 1; + + if (decode->op == CMN_OP || decode->op == CMP_OP) + return; + + if (!is_frame_reg(rd)) + return; + + if (is_frame_reg(rn)) { + if (shift) + var->imm <<= 12; + add_op(var, rn, var->imm, rd); + } else { + var->type = INSN_UNRELIABLE; + } +} + +#define CMN_EXT_OP 0x2B200000 /* Alias of ADDS ext */ +#define CMP_EXT_OP 0x6B200000 /* Alias of SUBS ext */ + +static void addc(struct decode *decode, struct decode_var *var) +{ + unsigned int rd = var->insn & 0x1F; + + if (decode->op == CMN_EXT_OP || decode->op == CMP_EXT_OP) + return; + + if (is_frame_reg(rd)) + var->type = INSN_UNRELIABLE; +} + +static void sub(struct decode *decode, struct decode_var *var) +{ + var->imm = -var->imm; + return add(decode, var); +} + +/* ----- Load instructions. ----- */ + +/* + * For some instructions, the target register cannot be FP. There are 3 cases: + * + * - The register width is 32 bits. FP cannot be 32 bits. + * - The register is loaded from one that is not the SP. We do not track + * the value of other registers in static analysis. + * - The instruction does not make sense for the FP to be the target. + */ +static void check_reg(unsigned int reg, struct decode_var *var) +{ + if (reg == CFI_FP) + var->type = INSN_UNRELIABLE; +} + +static void ldp(struct decode *decode, struct decode_var *var) +{ + unsigned int rt1 = var->insn & 0x1F; + unsigned int rt2 = (var->insn >> 10) & 0x1F; + unsigned int rn = (var->insn >> 5) & 0x1F; + s64 imm; + + if (rn != CFI_SP || var->check_reg) { + check_reg(rt1, var); + check_reg(rt2, var); + } + + if (rn == CFI_SP) { + if (var->mode1 && var->mode2) /* Pre-index */ + add_op(var, CFI_SP, var->imm, CFI_SP); + + imm = var->mode1 ? 0 : var->imm; + if (is_saved_reg(rt1)) + load_op(var, imm, rt1); + if (is_saved_reg(rt2)) + load_op(var, imm + 8, rt2); + + if (var->mode1 && !var->mode2) /* Post-index */ + add_op(var, CFI_SP, var->imm, CFI_SP); + } +} + +static void ldpc(struct decode *decode, struct decode_var *var) +{ + var->check_reg = 1; + ldp(decode, var); +} + +static void ldr(struct decode *decode, struct decode_var *var) +{ + unsigned int rd = var->insn & 0x1F; + unsigned int rn = (var->insn >> 5) & 0x1F; + s64 imm; + + if (rn != CFI_SP || var->check_reg) + check_reg(rd, var); + + if (rn == CFI_SP) { + if (var->mode1 && var->mode2) /* Pre-index */ + add_op(var, CFI_SP, var->imm, CFI_SP); + + imm = var->mode1 ? 0 : var->imm; + if (is_saved_reg(rd)) + load_op(var, imm, rd); + + if (var->mode1 && !var->mode2) /* Post-index */ + add_op(var, CFI_SP, var->imm, CFI_SP); + } +} + +/* ----- Store instructions. ----- */ + +static void stp(struct decode *decode, struct decode_var *var) +{ + unsigned int rt1 = var->insn & 0x1F; + unsigned int rt2 = (var->insn >> 10) & 0x1F; + unsigned int rn = (var->insn >> 5) & 0x1F; + s64 imm; + + if (var->check_reg) { + check_reg(rt1, var); + check_reg(rt2, var); + } + + if (rn == CFI_SP) { + if (var->mode1 && var->mode2) /* Pre-index */ + add_op(var, CFI_SP, var->imm, CFI_SP); + + imm = var->mode1 ? 0 : var->imm; + if (is_saved_reg(rt1)) + store_op(var, imm, rt1); + if (is_saved_reg(rt2)) + store_op(var, imm + 8, rt2); + + if (var->mode1 && !var->mode2) /* Post-index */ + add_op(var, CFI_SP, var->imm, CFI_SP); + } +} + +static void stpc(struct decode *decode, struct decode_var *var) +{ + var->check_reg = 1; + stp(decode, var); +} + +static void str(struct decode *decode, struct decode_var *var) +{ + unsigned int rd = var->insn & 0x1F; + unsigned int rn = (var->insn >> 5) & 0x1F; + s64 imm; + + if (var->check_reg) + check_reg(rd, var); + + if (rn == CFI_SP) { + if (var->mode1 && var->mode2) /* Pre-index */ + add_op(var, CFI_SP, var->imm, CFI_SP); + + imm = var->mode1 ? 0 : var->imm; + if (is_saved_reg(rd)) + store_op(var, imm, rd); + + if (var->mode1 && !var->mode2) /* Post-index */ + add_op(var, CFI_SP, var->imm, CFI_SP); + } +} + +static void strc(struct decode *decode, struct decode_var *var) +{ + var->check_reg = 1; + str(decode, var); +} + +/* ----- Control transfer instructions. ----- */ + +#define BR_UNCONDITIONAL 0x14000000 + +static void bra(struct decode *decode, struct decode_var *var) +{ + if (var->imm) { + if (decode->op == BR_UNCONDITIONAL) + var->type = INSN_JUMP_UNCONDITIONAL; + else + var->type = INSN_JUMP_CONDITIONAL; + } else { + var->type = INSN_JUMP_DYNAMIC; + } +} + +static void call(struct decode *decode, struct decode_var *var) +{ + var->type = var->imm ? INSN_CALL : INSN_CALL_DYNAMIC; +} + +static void ret(struct decode *decode, struct decode_var *var) +{ + var->type = INSN_RETURN; +} + +/* ----- Miscellaneous instructions. ----- */ + +static void bug(struct decode *decode, struct decode_var *var) +{ + var->type = INSN_BUG; +} + +static void pac(struct decode *decode, struct decode_var *var) +{ + var->type = INSN_START; +} + +/* ------------------------ Instruction decode ----------------------------- */ + +struct decode decode_array[] = { +/* + * mask OP code mask + * opcode OP code + * width Target register width. Values can be: + * 64 (64-bit) + * 32 (32-bit), + * X (64-bit if bit X in the instruction is set) + * -X (32-bit if bit X in the instruction is set) + * shift Shift for the immediate value + * bits Number of bits in the immediate value + * sign Sign extend the immediate value + * mult Multiplier for the immediate value + * am1 Addressing mode bit 1 + * am2 Addressing mode bit 2 + * func Decode function + * + * =============================== INSTRUCTIONS =============================== + * mask opcode width shift bits sign mult am1 am2 func + * ============================================================================ + */ +{ 0x7E400000, 0x28400000, 31, 15, 7, 1, 0, 23, 24, ldp /* LDP */}, +{ 0x7E400000, 0x68400000, 32, 15, 7, 1, 4, 23, 24, ldp /* LDPSW */}, +{ 0x7FC00000, 0x28400000, 31, 15, 7, 1, 0, 0, 0, ldpc /* LDNP */}, +{ 0xBFE00000, 0xB8400000, 30, 12, 9, 1, 1, 10, 11, ldr /* LDR */}, +{ 0xBFC00000, 0xB9400000, 30, 10, 12, 0, 0, 0, 0, ldr /* LDR off */}, +{ 0xFF200400, 0xF8200400, 64, 12, 9, 1, 8, 11, 11, ldr /* LDRA */}, +{ 0xFFC00000, 0x39400000, 32, 10, 12, 0, 1, 0, 0, ldr /* LDRB off */}, +{ 0xFFE00000, 0x38400000, 32, 12, 9, 1, 1, 10, 11, ldr /* LDRB */}, +{ 0xFFC00000, 0x79400000, 32, 10, 12, 0, 2, 0, 0, ldr /* LDRH off */}, +{ 0xFFE00000, 0x78400000, 32, 12, 9, 1, 1, 10, 11, ldr /* LDRH */}, +{ 0xFF800000, 0x39800000, -22, 10, 12, 0, 1, 0, 0, ldr /* LDRSB off */}, +{ 0xFFA00000, 0x38800000, -22, 12, 9, 1, 1, 10, 11, ldr /* LDRSB */}, +{ 0xFF800000, 0x79800000, -22, 10, 12, 0, 2, 0, 0, ldr /* LDRSH off */}, +{ 0xFFA00000, 0x78800000, -22, 12, 9, 1, 1, 10, 11, ldr /* LDRSH */}, +{ 0xFFC00000, 0xB9800000, 32, 10, 12, 0, 4, 0, 0, ldr /* LDRSW off */}, +{ 0xFFE00000, 0xB8800000, 32, 12, 9, 1, 1, 10, 11, ldr /* LDRSW */}, +{ 0x7E000000, 0x28000000, 31, 15, 7, 1, 0, 23, 24, stp /* STP */}, +{ 0x7E400000, 0x28000000, 31, 15, 7, 1, 0, 23, 24, stp /* STG */}, +{ 0xFE400000, 0x68000000, 64, 15, 7, 1, 16, 23, 24, stpc /* STGP */}, +{ 0x7FC00000, 0x28000000, 31, 15, 7, 1, 0, 0, 0, stpc /* STNP */}, +{ 0xBFC00000, 0xB9000000, 30, 10, 12, 0, 0, 0, 0, str /* STR off */}, +{ 0xBFE00000, 0xB8000000, 30, 12, 9, 1, 1, 10, 11, str /* STR */}, +{ 0xFFE00000, 0xD9200000, 64, 12, 9, 1, 16, 10, 11, strc /* STG */}, +{ 0xFFE00000, 0xD9A00000, 64, 12, 9, 1, 16, 10, 11, strc /* ST2G */}, +{ 0x7F800000, 0x11000000, 31, 10, 12, 0, 1, 0, 0, add /* ADD imm */}, +{ 0x7FE00000, 0x0B200000, 31, 10, 3, 0, 1, 0, 0, addc /* ADD ext */}, +{ 0x7F800000, 0x31000000, 31, 10, 12, 0, 1, 0, 0, add /* ADDS imm */}, +{ 0x7FE00000, 0x2B200000, 31, 10, 3, 0, 1, 0, 0, addc /* ADDS ext */}, +{ 0x7F800000, 0x51000000, 31, 10, 12, 0, 1, 0, 0, sub /* SUB imm */}, +{ 0x7FE00000, 0x4B200000, 31, 10, 3, 0, 1, 0, 0, addc /* SUB ext */}, +{ 0x7F800000, 0x71000000, 31, 10, 12, 0, 1, 0, 0, sub /* SUBS imm */}, +{ 0x7FE00000, 0x6B200000, 31, 10, 3, 0, 1, 0, 0, addc /* SUBS ext */}, +{ 0xFC000000, 0x14000000, 64, 0, 26, 1, 4, 0, 0, bra /* B */}, +{ 0xFF000010, 0x54000000, 64, 5, 19, 1, 4, 0, 0, bra /* B.cond */}, +{ 0xFF000010, 0x54000010, 64, 5, 19, 1, 4, 0, 0, bra /* BC.cond */}, +{ 0xFFFFFC1F, 0xD61F0000, 64, 0, 0, 0, 0, 0, 0, bra /* BR */}, +{ 0xFEFFF800, 0xD61F0800, 64, 0, 0, 0, 0, 0, 0, bra /* BRA */}, +{ 0x7E000000, 0x34000000, 31, 5, 19, 1, 4, 0, 0, bra /* CBZ/CBNZ */}, +{ 0x7E000000, 0x36000000, 31, 5, 14, 1, 4, 0, 0, bra /* TBZ/TBNZ */}, +{ 0xFC000000, 0x94000000, 64, 0, 26, 1, 4, 0, 0, call /* BL */}, +{ 0xFFFFFC1F, 0xD63F0000, 64, 0, 0, 0, 0, 0, 0, call /* BLR */}, +{ 0xFEFFF800, 0xD63F0800, 64, 0, 0, 0, 0, 0, 0, call /* BLRA */}, +{ 0xFFFFFC1F, 0xD65F0000, 64, 0, 0, 0, 0, 0, 0, ret /* RET */}, +{ 0xFFFFFBFF, 0xD65F0BFF, 64, 0, 0, 0, 0, 0, 0, ret /* RETA */}, +{ 0xFFFFFFFF, 0xD69F03E0, 64, 0, 0, 0, 0, 0, 0, ret /* ERET */}, +{ 0xFFFFFBFF, 0xD69F0BFF, 64, 0, 0, 0, 0, 0, 0, ret /* ERETA */}, +{ 0xFFE00000, 0xD4200000, 64, 5, 16, 0, 1, 0, 0, bug /* BRK */}, +{ 0xFFFFFFFF, 0xD503233F, 64, 0, 0, 0, 1, 0, 0, pac /* PACIASP */}, +}; +unsigned int ndecode = ARRAY_SIZE(decode_array); + +static void ignore(struct decode_var *var) +{ +} + +static void check_target(struct decode_var *var) +{ + unsigned int rd = var->insn & 0x1F; + + check_reg(rd, var); +} + +struct class class_array[] = { +/* + * mask Class OP mask + * opcode Class OP code + * check Function to perform checks + * + * ========================== INSTRUCTION CLASSES ============================= + * mask opcode check + * ============================================================================ + */ +{ 0x1E000000, 0x00000000, ignore /* RSVD_00 */ }, +{ 0x1E000000, 0x02000000, ignore /* UNALLOC_01 */ }, +{ 0x1E000000, 0x04000000, ignore /* SVE_02 */ }, +{ 0x1E000000, 0x06000000, ignore /* UNALLOC_03 */ }, +{ 0x1E000000, 0x08000000, check_target /* LOAD_STORE_04 */ }, +{ 0x1E000000, 0x0A000000, check_target /* DP_REGISTER_05 */ }, +{ 0x1E000000, 0x0C000000, ignore /* LOAD_STORE_06 */ }, +{ 0x1E000000, 0x0E000000, ignore /* SIMD_FP_07 */ }, +{ 0x1E000000, 0x12000000, check_target /* DP_IMMEDIATE_09 */ }, +{ 0x1E000000, 0x10000000, check_target /* DP_IMMEDIATE_08 */ }, +{ 0x1E000000, 0x14000000, check_target /* BR_SYS_10 */ }, +{ 0x1E000000, 0x16000000, check_target /* BR_SYS_11 */ }, +{ 0x1E000000, 0x18000000, check_target /* LOAD_STORE_12 */ }, +{ 0x1E000000, 0x1A000000, ignore /* DP_REGISTER_13 */ }, +{ 0x1E000000, 0x1C000000, check_target /* LOAD_STORE_14 */ }, +{ 0x1E000000, 0x1E000000, ignore /* SIMD_FP_15 */ }, +}; +unsigned int nclass = ARRAY_SIZE(class_array); + +static inline s64 sign_extend(s64 imm, unsigned int bits) +{ + return (imm << (64 - bits)) >> (64 - bits); +} int arch_decode_instruction(struct objtool_file *file, const struct section *sec, unsigned long offset, unsigned int maxlen, - unsigned int *len, enum insn_type *type, - unsigned long *immediate, - struct list_head *ops_list) + struct instruction *insn) { + struct decode *decode; + struct decode_var var; + struct class *class; + unsigned int width, mask, mult, i; + + if (maxlen < INSN_SIZE) + return -1; + insn->len = INSN_SIZE; + + var.insn = *(u32 *)(sec->data->d_buf + offset); + var.type = INSN_OTHER; + var.imm = 0; + var.ops_list = &insn->stack_ops; + + insn->type = INSN_OTHER; + + /* Decode the instruction, if listed. */ + for (i = 0; i < ndecode; i++) { + decode = &decode_array[i]; + + if ((var.insn & decode->opmask) != decode->op) + continue; + + /* Extract addressing mode (for some instructions). */ + var.mode1 = 0; + var.mode2 = 0; + if (decode->mode1) + var.mode1 = (var.insn >> decode->mode1) & 1; + if (decode->mode2) + var.mode2 = (var.insn >> decode->mode2) & 1; + + /* Determine target register width. */ + width = decode->width; + if (width < 0) + width = (var.insn & (1 << -width)) ? 32 : 64; + else if (width < 32) + width = (var.insn & (1 << width)) ? 64 : 32; + + /* + * If the target register width is 32 bits, set the check flag + * so that the target registers are checked to make sure they + * are not the FP or the RA. We should not be using 32-bit + * values in these registers. + */ + var.check_reg = (width == 32); + + /* Extract the immediate value. */ + mask = (1 << decode->bits) - 1; + var.imm = (var.insn >> decode->shift) & mask; + if (decode->sign_extend) + var.imm = sign_extend(var.imm, decode->bits); + + /* Scale the immediate value. */ + mult = decode->mult; + if (!mult) + mult = (width == 32) ? 4 : 8; + var.imm *= mult; + + /* Decode the instruction. */ + decode->func(decode, &var); + goto out; + } + + /* + * Sanity check to make sure that the compiler has not generated + * code that modifies the FP or the RA in an unexpected way. + */ + for (i = 0; i < nclass; i++) { + class = &class_array[i]; + if ((var.insn & class->opmask) == class->op) { + class->check(&var); + goto out; + } + } +out: + insn->immediate = var.imm; + insn->type = var.type; return 0; } diff --git a/tools/objtool/include/objtool/arch.h b/tools/objtool/include/objtool/arch.h index 0b303eba660e..aad28c7a6de9 100644 --- a/tools/objtool/include/objtool/arch.h +++ b/tools/objtool/include/objtool/arch.h @@ -29,6 +29,8 @@ enum insn_type { INSN_TRAP, INSN_ENDBR, INSN_OTHER, + INSN_START, + INSN_UNRELIABLE, }; enum op_dest_type { -- Gitee From becfd53ad5483680491d9e5fae6c7b0bb9d9a3b0 Mon Sep 17 00:00:00 2001 From: "Madhavan T. Venkataraman" Date: Fri, 16 Dec 2022 08:13:00 -0600 Subject: [PATCH 1505/2138] objtool: arm64: Invoke the decoder ANBZ: #9262 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ Invoke decode_instructions() from check(). For Dynamic Validation of the frame pointer, we only need the "-s" option for objtool. Signed-off-by: Madhavan T. Venkataraman Signed-off-by: Wei Chen Acked-by: ydzhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3299 --- tools/objtool/dcheck.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tools/objtool/dcheck.c b/tools/objtool/dcheck.c index 8663611e2ff4..12e91f4d18d8 100644 --- a/tools/objtool/dcheck.c +++ b/tools/objtool/dcheck.c @@ -9,8 +9,13 @@ #include #include +#include +#include int check(struct objtool_file *file) { - return 0; + if (!opts.stackval) + return 1; + + return decode_instructions(file); } -- Gitee From 8aeec14ddb6d287d6227eb52c056f5a38f35497d Mon Sep 17 00:00:00 2001 From: "Madhavan T. Venkataraman" Date: Fri, 16 Dec 2022 08:21:39 -0600 Subject: [PATCH 1506/2138] objtool: arm64: Compute destinations for call and jump instructions ANBZ: #9262 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ Compute the destination address of each call and jump instruction after decoding all the instructions. Signed-off-by: Madhavan T. Venkataraman Signed-off-by: Wei Chen Acked-by: ydzhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3299 --- tools/objtool/arch/arm64/decode.c | 12 ++++++++ tools/objtool/dcheck.c | 47 ++++++++++++++++++++++++++++++- 2 files changed, 58 insertions(+), 1 deletion(-) diff --git a/tools/objtool/arch/arm64/decode.c b/tools/objtool/arch/arm64/decode.c index 0279bbf69143..1002ccd039e8 100644 --- a/tools/objtool/arch/arm64/decode.c +++ b/tools/objtool/arch/arm64/decode.c @@ -20,6 +20,18 @@ /* ARM64 instructions are all 4 bytes wide. */ #define INSN_SIZE 4 +/* --------------------- arch support functions ------------------------- */ + +unsigned long arch_dest_reloc_offset(int addend) +{ + return addend; +} + +unsigned long arch_jump_destination(struct instruction *insn) +{ + return insn->offset + insn->immediate; +} + /* --------------------- instruction decode structs ------------------------ */ struct decode_var { diff --git a/tools/objtool/dcheck.c b/tools/objtool/dcheck.c index 12e91f4d18d8..459cd0abd656 100644 --- a/tools/objtool/dcheck.c +++ b/tools/objtool/dcheck.c @@ -12,10 +12,55 @@ #include #include +/* + * Find the destination instructions for all jumps. + */ +static void add_jump_destinations(struct objtool_file *file) +{ + struct instruction *insn; + struct reloc *reloc; + struct section *dest_sec; + unsigned long dest_off; + + for_each_insn(file, insn) { + if (insn->type != INSN_CALL && + insn->type != INSN_JUMP_CONDITIONAL && + insn->type != INSN_JUMP_UNCONDITIONAL) { + continue; + } + + reloc = insn_reloc(file, insn); + if (!reloc) { + dest_sec = insn->sec; + dest_off = arch_jump_destination(insn); + } else if (reloc->sym->type == STT_SECTION) { + dest_sec = reloc->sym->sec; + dest_off = arch_dest_reloc_offset(reloc_addend(reloc)); + } else if (reloc->sym->sec->idx) { + dest_sec = reloc->sym->sec; + dest_off = reloc->sym->sym.st_value + + arch_dest_reloc_offset(reloc_addend(reloc)); + } else { + /* non-func asm code jumping to another file */ + continue; + } + + insn->jump_dest = find_insn(file, dest_sec, dest_off); + } +} + int check(struct objtool_file *file) { + int ret; + if (!opts.stackval) return 1; - return decode_instructions(file); + ret = decode_instructions(file); + if (ret) + return ret; + + add_jump_destinations(file); + + return 0; } -- Gitee From 50402b7ae5b6912c74e2fd0f942121f0d58f6bea Mon Sep 17 00:00:00 2001 From: "Madhavan T. Venkataraman" Date: Wed, 11 Sep 2024 03:23:42 +0000 Subject: [PATCH 1507/2138] objtool: arm64: Walk instructions and compute CFI for each instruction ANBZ: #9262 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ Implement arch_initial_func_cfi_state() to initialize the CFI for a function. Add code to check() in dcheck.c to walk the instructions in every function and compute the CFI information for each instruction. Perform the following checks to validate the CFI: - Make sure that there is exactly one frame pointer prolog for an epilog. - Make sure that the frame pointer register is initialized to the location at which the previous frame pointer is stored on the stack. - Make sure that the frame pointer is restored in the epilog from the same location on stack where it was saved. - Make sure that the return address is restored in the epilog from the same location on stack where it was saved. - Make sure that the frame pointer and return address are saved on the stack adjacent to each other in the correct order as specified in the ABI. - If an instruction can be reached via two different code paths, make sure that the CFIs computed from traversing each path match for the instruction. - Every time the frame pointer or stack offset is changed, make sure the offsets have legal values. insn_cfi_match() is used to compare CFIs to see if they match. When there is a mismatch, the function emits error messages. With static checking, these errors result in failure. With dynamic checking, these errors only resulting in marking those instructions as unreliable for unwind. In the latter case, suppress the warning messages. Signed-off-by: Madhavan T. Venkataraman Signed-off-by: Wei Chen Acked-by: ydzhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3299 --- tools/objtool/arch/arm64/decode.c | 15 ++ tools/objtool/check.c | 2 +- tools/objtool/dcheck.c | 284 +++++++++++++++++++++++++++ tools/objtool/include/objtool/insn.h | 3 +- tools/objtool/insn.c | 38 ++-- 5 files changed, 323 insertions(+), 19 deletions(-) diff --git a/tools/objtool/arch/arm64/decode.c b/tools/objtool/arch/arm64/decode.c index 1002ccd039e8..1668c288786c 100644 --- a/tools/objtool/arch/arm64/decode.c +++ b/tools/objtool/arch/arm64/decode.c @@ -22,6 +22,21 @@ /* --------------------- arch support functions ------------------------- */ +void arch_initial_func_cfi_state(struct cfi_init_state *state) +{ + int i; + + for (i = 0; i < CFI_NUM_REGS; i++) { + state->regs[i].base = CFI_UNDEFINED; + state->regs[i].offset = 0; + } + state->regs[CFI_FP].base = CFI_CFA; + + /* initial CFA (call frame address) */ + state->cfa.base = CFI_SP; + state->cfa.offset = 0; +} + unsigned long arch_dest_reloc_offset(int addend) { return addend; diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 04ea17dec1c7..1322c6f0dc0b 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -3047,7 +3047,7 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, visited = VISITED_BRANCH << state.uaccess; if (insn->visited & VISITED_BRANCH_MASK) { - if (!insn->hint && !insn_cfi_match(insn, &state.cfi)) + if (!insn->hint && !insn_cfi_match(insn, &state.cfi, true)) return 1; if (insn->visited & visited) diff --git a/tools/objtool/dcheck.c b/tools/objtool/dcheck.c index 459cd0abd656..3cd9ae0d2cd3 100644 --- a/tools/objtool/dcheck.c +++ b/tools/objtool/dcheck.c @@ -49,6 +49,283 @@ static void add_jump_destinations(struct objtool_file *file) } } +static bool update_cfi_state(struct cfi_state *cfi, struct stack_op *op) +{ + struct cfi_reg *cfa = &cfi->cfa; + struct cfi_reg *fp_reg = &cfi->regs[CFI_FP]; + struct cfi_reg *fp_val = &cfi->vals[CFI_FP]; + struct cfi_reg *ra_val = &cfi->vals[CFI_RA]; + enum op_src_type src_type = op->src.type; + enum op_dest_type dest_type = op->dest.type; + unsigned char dest_reg = op->dest.reg; + int offset; + + if (src_type == OP_SRC_ADD && dest_type == OP_DEST_REG) { + + if (op->src.reg == CFI_SP) { + if (op->dest.reg == CFI_SP) { + cfa->offset -= op->src.offset; + } else { + if (fp_reg->offset) { + /* FP is already set. */ + return false; + } + fp_reg->offset = -cfa->offset + op->src.offset; + if (fp_reg->offset != fp_val->offset) { + /* + * FP does not match the location + * where FP is stored on stack. + */ + return false; + } + } + } else { + if (op->dest.reg == CFI_SP) { + cfa->offset = + -(fp_reg->offset + op->src.offset); + } else { + /* Setting the FP from itself is unreliable. */ + return false; + } + } + /* + * When the stack pointer is restored in the frame pointer + * epilog, forget where the FP and RA were stored. + */ + if (cfa->offset < -fp_val->offset) + fp_val->offset = 0; + if (cfa->offset < -ra_val->offset) + ra_val->offset = 0; + goto out; + } + + if (src_type == OP_SRC_REG_INDIRECT && dest_type == OP_DEST_REG) { + offset = -cfa->offset + op->src.offset; + if (dest_reg == CFI_FP) { + if (!fp_val->offset || fp_val->offset != offset) { + /* + * Loading the FP from a different place than + * where it is stored. + */ + return false; + } + if (!ra_val->offset || + (ra_val->offset - fp_val->offset) != 8) { + /* FP and RA must be adjacent in a frame. */ + return false; + } + fp_reg->offset = 0; + } + goto out; + } + + if (src_type == OP_SRC_REG && dest_type == OP_DEST_REG_INDIRECT) { + offset = -cfa->offset + op->dest.offset; + if (dest_reg == CFI_FP) { + /* Record where the FP is stored on the stack. */ + fp_val->offset = offset; + } else { + /* Record where the RA is stored on the stack. */ + if (fp_val->offset && (offset - fp_val->offset) == 8) + ra_val->offset = offset; + } + goto out; + } + return false; +out: + if (cfa->offset < 0 || fp_reg->offset > 0 || + fp_val->offset > 0 || ra_val->offset > 0) { + /* Unexpected SP and FP offset values. */ + return false; + } + return true; +} + +static bool do_stack_ops(struct instruction *insn, struct insn_state *state) +{ + struct stack_op *op; + + for (op = insn->stack_ops; op; op = op->next) { + if (!update_cfi_state(&state->cfi, op)) + return false; + } + return true; +} + +static bool validate_branch(struct objtool_file *file, struct section *sec, + struct symbol *func, struct instruction *insn, + struct insn_state *state) +{ + struct symbol *insn_func = insn->sym; + struct instruction *dest; + struct cfi_state save_cfi; + struct cfi_reg *cfa; + struct cfi_reg *regs; + unsigned long start, end; + + for (; insn; insn = next_insn_same_sec(file, insn)) { + + if (insn->sym != insn_func) + return true; + + if (insn->cfi) + return insn_cfi_match(insn, &state->cfi, false); + + insn->cfi = cfi_hash_find_or_add(&state->cfi); + dest = insn->jump_dest; + + if (!do_stack_ops(insn, state)) + return false; + + switch (insn->type) { + case INSN_BUG: + return true; + + case INSN_UNRELIABLE: + return false; + + case INSN_RETURN: + cfa = &state->cfi.cfa; + regs = state->cfi.regs; + if (cfa->offset || regs[CFI_FP].offset) { + /* SP and FP offsets should be 0 on return. */ + return false; + } + return true; + + case INSN_CALL: + case INSN_CALL_DYNAMIC: + start = func->offset; + end = start + func->len; + /* Treat intra-function calls as jumps. */ + if (!dest || dest->sec != sec || + dest->offset <= start || dest->offset >= end) { + break; + } + + case INSN_JUMP_UNCONDITIONAL: + case INSN_JUMP_CONDITIONAL: + case INSN_JUMP_DYNAMIC: + if (dest) { + save_cfi = state->cfi; + if (!validate_branch(file, sec, func, dest, + state)) { + return false; + } + state->cfi = save_cfi; + } + if (insn->type == INSN_JUMP_UNCONDITIONAL || + insn->type == INSN_JUMP_DYNAMIC) { + return true; + } + break; + + default: + break; + } + } + return true; +} + +static bool walk_reachable(struct objtool_file *file, struct section *sec, + struct symbol *func) +{ + struct instruction *insn = find_insn(file, sec, func->offset); + struct insn_state state; + + func_for_each_insn(file, func, insn) { + + if (insn->offset != func->offset && + (insn->type != INSN_START || insn->cfi)) { + continue; + } + + init_insn_state(file, &state, sec); + set_func_state(&state.cfi); + + if (!validate_branch(file, sec, func, insn, &state)) + return false; + } + return true; +} + +static void remove_cfi(struct objtool_file *file, struct symbol *func) +{ + struct instruction *insn; + + func_for_each_insn(file, func, insn) { + insn->cfi = NULL; + } +} + +/* + * Instructions that were not visited by walk_reachable() would not have a + * CFI. Try to initialize their CFI. For instance, there could be a table of + * unconditional branches like for a switch statement. Or, code can be patched + * by the kernel at runtime. After patching, some of the previously unreachable + * code may become reachable. + * + * This follows the same pattern as the DWARF info generated by the compiler. + */ +static bool walk_unreachable(struct objtool_file *file, struct section *sec, + struct symbol *func) +{ + struct instruction *insn, *prev; + struct insn_state state; + + func_for_each_insn(file, func, insn) { + + if (insn->cfi) + continue; + + prev = prev_insn_same_sec(file, insn); + if (!prev || prev->sym != insn->sym || !prev->cfi) + continue; + + if (prev->type != INSN_JUMP_UNCONDITIONAL && + prev->type != INSN_JUMP_DYNAMIC && + prev->type != INSN_BUG) { + continue; + } + + /* Propagate the CFI. */ + state.cfi = *prev->cfi; + if (!validate_branch(file, sec, func, insn, &state)) + return false; + } + return true; +} + +static void walk_section(struct objtool_file *file, struct section *sec) +{ + struct symbol *func; + + list_for_each_entry(func, &sec->symbol_list, list) { + + if (func->type != STT_FUNC || !func->len || + func->pfunc != func || func->alias != func) { + /* No CFI generated for this function. */ + continue; + } + + if (!walk_reachable(file, sec, func) || + !walk_unreachable(file, sec, func)) { + remove_cfi(file, func); + continue; + } + } +} + +static void walk_sections(struct objtool_file *file) +{ + struct section *sec; + + for_each_sec(file, sec) { + if (sec->sh.sh_flags & SHF_EXECINSTR) + walk_section(file, sec); + } +} + int check(struct objtool_file *file) { int ret; @@ -56,11 +333,18 @@ int check(struct objtool_file *file) if (!opts.stackval) return 1; + arch_initial_func_cfi_state(&initial_func_cfi); + + if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3))) + return -1; + ret = decode_instructions(file); if (ret) return ret; add_jump_destinations(file); + walk_sections(file); + return 0; } diff --git a/tools/objtool/include/objtool/insn.h b/tools/objtool/include/objtool/insn.h index 31fddeec9540..cc54e72234d8 100644 --- a/tools/objtool/include/objtool/insn.h +++ b/tools/objtool/include/objtool/insn.h @@ -104,7 +104,8 @@ struct instruction *prev_insn_same_sym(struct objtool_file *file, struct instruction *insn); struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn); -bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2); +bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2, + bool print); bool is_first_func_insn(struct objtool_file *file, struct instruction *insn, struct symbol *sym); diff --git a/tools/objtool/insn.c b/tools/objtool/insn.c index 894b1d94e475..c26e2e43c5da 100644 --- a/tools/objtool/insn.c +++ b/tools/objtool/insn.c @@ -144,7 +144,8 @@ bool is_first_func_insn(struct objtool_file *file, return false; } -bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2) +bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2, + bool print) { struct cfi_state *cfi1 = insn->cfi; int i; @@ -155,35 +156,38 @@ bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2) } if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) { - - WARN_INSN(insn, "stack state mismatch: cfa1=%d%+d cfa2=%d%+d", - cfi1->cfa.base, cfi1->cfa.offset, - cfi2->cfa.base, cfi2->cfa.offset); - + if (print) { + WARN_INSN(insn, "stack state mismatch: cfa1=%d%+d cfa2=%d%+d", + cfi1->cfa.base, cfi1->cfa.offset, + cfi2->cfa.base, cfi2->cfa.offset); + } } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) { for (i = 0; i < CFI_NUM_REGS; i++) { if (!memcmp(&cfi1->regs[i], &cfi2->regs[i], sizeof(struct cfi_reg))) continue; - - WARN_INSN(insn, "stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d", - i, cfi1->regs[i].base, cfi1->regs[i].offset, - i, cfi2->regs[i].base, cfi2->regs[i].offset); + if (print) { + WARN_INSN(insn, "stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d", + i, cfi1->regs[i].base, cfi1->regs[i].offset, + i, cfi2->regs[i].base, cfi2->regs[i].offset); + } break; } } else if (cfi1->type != cfi2->type) { - - WARN_INSN(insn, "stack state mismatch: type1=%d type2=%d", - cfi1->type, cfi2->type); + if (print) { + WARN_INSN(insn, "stack state mismatch: type1=%d type2=%d", + cfi1->type, cfi2->type); + } } else if (cfi1->drap != cfi2->drap || (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) || (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) { - - WARN_INSN(insn, "stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)", - cfi1->drap, cfi1->drap_reg, cfi1->drap_offset, - cfi2->drap, cfi2->drap_reg, cfi2->drap_offset); + if (print) { + WARN_INSN(insn, "stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)", + cfi1->drap, cfi1->drap_reg, cfi1->drap_offset, + cfi2->drap, cfi2->drap_reg, cfi2->drap_offset); + } } else return true; -- Gitee From 15534c5110a88e20e08f3e96ce2316ebb5cf78b0 Mon Sep 17 00:00:00 2001 From: "Madhavan T. Venkataraman" Date: Wed, 11 Sep 2024 04:15:11 +0000 Subject: [PATCH 1508/2138] objtool: arm64: Generate ORC data from CFI for object files ANBZ: #9262 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ Enable ORC data for ARM64. Call orc_create() from check() in dcheck.c to generate the ORC sections in object files for dynamic frame pointer validation. Define support functions for ORC data creation. Signed-off-by: Madhavan T. Venkataraman Signed-off-by: Wei Chen Acked-by: ydzhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3299 --- arch/arm64/include/asm/orc_types.h | 41 ++++++++++ tools/arch/arm64/include/asm/orc_types.h | 41 ++++++++++ tools/objtool/Makefile | 1 + tools/objtool/arch/arm64/Build | 1 + tools/objtool/arch/arm64/include/arch/elf.h | 15 ++++ tools/objtool/arch/arm64/orc.c | 87 +++++++++++++++++++++ tools/objtool/dcheck.c | 5 +- tools/objtool/include/objtool/insn.h | 1 + tools/objtool/include/objtool/objtool.h | 1 + tools/objtool/insn.c | 20 +++++ tools/objtool/orc_gen.c | 12 ++- tools/objtool/sync-check.sh | 7 ++ 12 files changed, 229 insertions(+), 3 deletions(-) create mode 100644 arch/arm64/include/asm/orc_types.h create mode 100644 tools/arch/arm64/include/asm/orc_types.h create mode 100644 tools/objtool/arch/arm64/include/arch/elf.h create mode 100644 tools/objtool/arch/arm64/orc.c diff --git a/arch/arm64/include/asm/orc_types.h b/arch/arm64/include/asm/orc_types.h new file mode 100644 index 000000000000..d7e8089f80da --- /dev/null +++ b/arch/arm64/include/asm/orc_types.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Author: Madhavan T. Venkataraman (madvenka@linux.microsoft.com) + * + * Copyright (C) 2022 Microsoft Corporation + */ + +#ifndef _ORC_TYPES_H +#define _ORC_TYPES_H + +#include +#include +#include + +/* + * The ORC_REG_* registers are base registers which are used to find other + * registers on the stack. + * + * ORC_REG_PREV_SP, also known as DWARF Call Frame Address (CFA), is the + * address of the previous frame: the caller's SP before it called the current + * function. + * + * ORC_REG_UNDEFINED means the corresponding register's value didn't change in + * the current frame. + * + * We only use base registers SP and FP -- which the previous SP is based on -- + * and PREV_SP and UNDEFINED -- which the previous FP is based on. + */ +#define ORC_REG_UNDEFINED 0 +#define ORC_REG_PREV_SP 1 +#define ORC_REG_SP 2 +#define ORC_REG_FP 3 +#define ORC_REG_MAX 4 + +#define ORC_TYPE_UNDEFINED 0 +#define ORC_TYPE_END_OF_STACK 1 +#define ORC_TYPE_CALL 2 +#define ORC_TYPE_REGS 3 +#define ORC_TYPE_REGS_PARTIAL 4 + +#endif /* _ORC_TYPES_H */ diff --git a/tools/arch/arm64/include/asm/orc_types.h b/tools/arch/arm64/include/asm/orc_types.h new file mode 100644 index 000000000000..d7e8089f80da --- /dev/null +++ b/tools/arch/arm64/include/asm/orc_types.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Author: Madhavan T. Venkataraman (madvenka@linux.microsoft.com) + * + * Copyright (C) 2022 Microsoft Corporation + */ + +#ifndef _ORC_TYPES_H +#define _ORC_TYPES_H + +#include +#include +#include + +/* + * The ORC_REG_* registers are base registers which are used to find other + * registers on the stack. + * + * ORC_REG_PREV_SP, also known as DWARF Call Frame Address (CFA), is the + * address of the previous frame: the caller's SP before it called the current + * function. + * + * ORC_REG_UNDEFINED means the corresponding register's value didn't change in + * the current frame. + * + * We only use base registers SP and FP -- which the previous SP is based on -- + * and PREV_SP and UNDEFINED -- which the previous FP is based on. + */ +#define ORC_REG_UNDEFINED 0 +#define ORC_REG_PREV_SP 1 +#define ORC_REG_SP 2 +#define ORC_REG_FP 3 +#define ORC_REG_MAX 4 + +#define ORC_TYPE_UNDEFINED 0 +#define ORC_TYPE_END_OF_STACK 1 +#define ORC_TYPE_CALL 2 +#define ORC_TYPE_REGS 3 +#define ORC_TYPE_REGS_PARTIAL 4 + +#endif /* _ORC_TYPES_H */ diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index caef8b6a5dc6..fcedbad726c0 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile @@ -59,6 +59,7 @@ ifeq ($(SRCARCH),x86) endif ifeq ($(SRCARCH),arm64) + BUILD_ORC := y DYNAMIC_CHECK := y endif diff --git a/tools/objtool/arch/arm64/Build b/tools/objtool/arch/arm64/Build index 3ff1f00c6a47..8615abfb12cf 100644 --- a/tools/objtool/arch/arm64/Build +++ b/tools/objtool/arch/arm64/Build @@ -1 +1,2 @@ objtool-y += decode.o +objtool-y += orc.o diff --git a/tools/objtool/arch/arm64/include/arch/elf.h b/tools/objtool/arch/arm64/include/arch/elf.h new file mode 100644 index 000000000000..9f75e8a3210c --- /dev/null +++ b/tools/objtool/arch/arm64/include/arch/elf.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ + +#ifndef _OBJTOOL_ARCH_ELF +#define _OBJTOOL_ARCH_ELF + +#define R_NONE R_AARCH64_NONE +#define R_ABS32 R_AARCH64_ABS32 +#define R_ABS64 R_AARCH64_ABS64 +#define R_DATA32 R_AARCH64_PREL32 +#define R_DATA64 R_AARCH64_PREL32 +#define R_TEXT32 R_AARCH64_PREL32 +#define R_TEXT64 R_AARCH64_PREL32 +#define R_PCREL R_AARCH64_PREL32 + +#endif /* _OBJTOOL_ARCH_ELF */ diff --git a/tools/objtool/arch/arm64/orc.c b/tools/objtool/arch/arm64/orc.c new file mode 100644 index 000000000000..82c73abedf6a --- /dev/null +++ b/tools/objtool/arch/arm64/orc.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Author: Madhavan T. Venkataraman (madvenka@linux.microsoft.com) + * + * Copyright (C) 2022 Microsoft Corporation + */ +#include + +#include + +#include +#include + +int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, + struct instruction *insn) +{ + struct cfi_reg *fp = &cfi->regs[CFI_FP]; + + memset(orc, 0, sizeof(*orc)); + + orc->sp_reg = ORC_REG_SP; + orc->fp_reg = ORC_REG_PREV_SP; + orc->type = UNWIND_HINT_TYPE_CALL; + + if (!cfi || cfi->cfa.base == CFI_UNDEFINED || + (cfi->type == UNWIND_HINT_TYPE_CALL && !fp->offset)) { + /* + * The frame pointer has not been set up. This instruction is + * unreliable from an unwind perspective. + */ + return 0; + } + + orc->sp_offset = cfi->cfa.offset; + orc->fp_offset = fp->offset; + orc->type = cfi->type; + orc->signal = cfi->end; + + return 0; +} + +static const char *reg_name(unsigned int reg) +{ + switch (reg) { + case ORC_REG_PREV_SP: + return "cfa"; + case ORC_REG_FP: + return "x29"; + case ORC_REG_SP: + return "sp"; + default: + return "?"; + } +} + +const char *orc_type_name(unsigned int type) +{ + switch (type) { + case UNWIND_HINT_TYPE_CALL: + return "call"; + default: + return "?"; + } +} + +void orc_print_reg(unsigned int reg, int offset) +{ + if (reg == ORC_REG_UNDEFINED) + printf("(und)"); + else + printf("%s%+d", reg_name(reg), offset); +} + +void orc_print_sp(void) +{ + printf(" cfa:"); +} + +void orc_print_fp(void) +{ + printf(" x29:"); +} + +bool orc_ignore_section(struct section *sec) +{ + return !strcmp(sec->name, ".head.text"); +} diff --git a/tools/objtool/dcheck.c b/tools/objtool/dcheck.c index 3cd9ae0d2cd3..f356220509cb 100644 --- a/tools/objtool/dcheck.c +++ b/tools/objtool/dcheck.c @@ -346,5 +346,8 @@ int check(struct objtool_file *file) walk_sections(file); - return 0; + if (opts.orc) + ret = orc_create(file); + + return ret; } diff --git a/tools/objtool/include/objtool/insn.h b/tools/objtool/include/objtool/insn.h index cc54e72234d8..d19cd2c12f06 100644 --- a/tools/objtool/include/objtool/insn.h +++ b/tools/objtool/include/objtool/insn.h @@ -104,6 +104,7 @@ struct instruction *prev_insn_same_sym(struct objtool_file *file, struct instruction *insn); struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn); +bool insn_can_reloc(struct instruction *insn); bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2, bool print); bool is_first_func_insn(struct objtool_file *file, diff --git a/tools/objtool/include/objtool/objtool.h b/tools/objtool/include/objtool/objtool.h index 94a33ee7b363..e5f93745aa26 100644 --- a/tools/objtool/include/objtool/objtool.h +++ b/tools/objtool/include/objtool/objtool.h @@ -46,5 +46,6 @@ void objtool_pv_add(struct objtool_file *file, int idx, struct symbol *func); int check(struct objtool_file *file); int orc_dump(const char *objname); int orc_create(struct objtool_file *file); +bool orc_ignore_section(struct section *sec); #endif /* _OBJTOOL_H */ diff --git a/tools/objtool/insn.c b/tools/objtool/insn.c index c26e2e43c5da..b63ec049696a 100644 --- a/tools/objtool/insn.c +++ b/tools/objtool/insn.c @@ -194,3 +194,23 @@ bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2, return false; } + +/* + * This is a hack for Clang. Clang is aggressive about removing section + * symbols and then some. If we cannot find something to relocate an + * instruction against, we must not generate CFI for it or the ORC + * generation will fail later. + */ +bool insn_can_reloc(struct instruction *insn) +{ + struct section *insn_sec = insn->sec; + unsigned long insn_off = insn->offset; + + if (insn_sec->sym || + find_symbol_containing(insn_sec, insn_off) || + find_symbol_containing(insn_sec, insn_off - 1)) { + /* See elf_add_reloc_to_insn(). */ + return true; + } + return false; +} diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c index f90770a71080..a146666ea9d1 100644 --- a/tools/objtool/orc_gen.c +++ b/tools/objtool/orc_gen.c @@ -14,6 +14,11 @@ #include #include +bool __weak orc_ignore_section(struct section *sec) +{ + return false; +} + static int write_orc_entry(struct elf *elf, struct section *orc_sec, struct section *ip_sec, unsigned int idx, struct section *insn_sec, unsigned long insn_off, @@ -86,13 +91,16 @@ int orc_create(struct objtool_file *file) struct instruction *insn; bool empty = true; - if (!sec->text) + if (!sec->text || orc_ignore_section(sec)) continue; sec_for_each_insn(file, sec, insn) { struct alt_group *alt_group = insn->alt_group; int i; + if (!insn_can_reloc(insn)) + continue; + if (!alt_group) { if (init_orc_entry(&orc, insn->cfi, insn)) return -1; @@ -136,7 +144,7 @@ int orc_create(struct objtool_file *file) } /* Add a section terminator */ - if (!empty) { + if (!empty && sec->sym) { orc_list_add(&orc_list, &null, sec, sec->sh.sh_size); nr++; } diff --git a/tools/objtool/sync-check.sh b/tools/objtool/sync-check.sh index bc3613c10dff..29f06de57659 100755 --- a/tools/objtool/sync-check.sh +++ b/tools/objtool/sync-check.sh @@ -29,6 +29,13 @@ arch/x86/lib/insn.c ' fi +if [ "$SRCARCH" = "arm64" ]; then +FILES="$FILES +arch/arm64/include/asm/orc_types.h +include/linux/orc_entry.h +" +fi + check_2 () { file1=$1 file2=$2 -- Gitee From cbd215a42c63f587785193ba20462da72a970788 Mon Sep 17 00:00:00 2001 From: "Madhavan T. Venkataraman" Date: Tue, 10 Jan 2023 16:20:29 -0600 Subject: [PATCH 1509/2138] objtool: arm64: Add unwind hint support ANBZ: #9262 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ Implement the unwind hint macros for ARM64. Define the unwind hint types as well. Process the unwind hints section for dynamic FP validation for ARM64. Signed-off-by: Madhavan T. Venkataraman Signed-off-by: Wei Chen Acked-by: ydzhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3299 --- arch/arm64/include/asm/unwind_hints.h | 99 +++++++++++++++++++++ include/linux/objtool_types.h | 3 + tools/arch/arm64/include/asm/unwind_hints.h | 99 +++++++++++++++++++++ tools/include/linux/objtool_types.h | 3 + tools/objtool/Build | 2 +- tools/objtool/arch/arm64/decode.c | 21 +++++ tools/objtool/arch/arm64/orc.c | 4 + tools/objtool/dcheck.c | 4 + tools/objtool/include/objtool/endianness.h | 2 + tools/objtool/sync-check.sh | 1 + tools/objtool/unwind_hints.c | 20 +++-- 11 files changed, 249 insertions(+), 9 deletions(-) create mode 100644 arch/arm64/include/asm/unwind_hints.h create mode 100644 tools/arch/arm64/include/asm/unwind_hints.h diff --git a/arch/arm64/include/asm/unwind_hints.h b/arch/arm64/include/asm/unwind_hints.h new file mode 100644 index 000000000000..e11a0586b434 --- /dev/null +++ b/arch/arm64/include/asm/unwind_hints.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _ASM_ARM64_UNWIND_HINTS_H +#define _ASM_ARM64_UNWIND_HINTS_H + +#include + +#include "orc_types.h" + +#ifdef CONFIG_STACK_VALIDATION + +#ifndef __ASSEMBLY__ + +#define UNWIND_HINT(type, sp_reg, sp_offset, signal) \ + "987: \n\t" \ + ".pushsection .discard.unwind_hints\n\t" \ + /* struct unwind_hint */ \ + ".long 987b - .\n\t" \ + ".short " __stringify(sp_offset) "\n\t" \ + ".byte " __stringify(sp_reg) "\n\t" \ + ".byte " __stringify(type) "\n\t" \ + ".byte " __stringify(signal) "\n\t" \ + ".balign 4 \n\t" \ + ".popsection\n\t" + +#else /* __ASSEMBLY__ */ + +/* + * In asm, there are two kinds of code: normal C-type callable functions and + * the rest. The normal callable functions can be called by other code, and + * don't do anything unusual with the stack. Such normal callable functions + * are annotated with the ENTRY/ENDPROC macros. Most asm code falls in this + * category. In this case, no special debugging annotations are needed because + * objtool can automatically generate the ORC data for the ORC unwinder to read + * at runtime. + * + * Anything which doesn't fall into the above category, such as syscall and + * interrupt handlers, tends to not be called directly by other functions, and + * often does unusual non-C-function-type things with the stack pointer. Such + * code needs to be annotated such that objtool can understand it. The + * following CFI hint macros are for this type of code. + * + * These macros provide hints to objtool about the state of the stack at each + * instruction. Objtool starts from the hints and follows the code flow, + * making automatic CFI adjustments when it sees pushes and pops, filling out + * the debuginfo as necessary. It will also warn if it sees any + * inconsistencies. + */ +.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 +.Lhere_\@: + .pushsection .discard.unwind_hints + /* struct unwind_hint */ + .long .Lhere_\@ - . + .short \sp_offset + .byte \sp_reg + .byte \type + .byte \signal + .balign 4 + .popsection +.endm + +#endif /* __ASSEMBLY__ */ + +#else /* !CONFIG_STACK_VALIDATION */ + +#ifndef __ASSEMBLY__ + +#define UNWIND_HINT(type, sp_reg, sp_offset, signal) "\n\t" +#else +.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 +.endm +#endif + +#endif /* CONFIG_STACK_VALIDATION */ +#ifdef __ASSEMBLY__ + +.macro UNWIND_HINT_FTRACE, offset + .set sp_reg, ORC_REG_SP + .set sp_offset, \offset + .set type, UNWIND_HINT_TYPE_FTRACE + UNWIND_HINT type=type sp_reg=sp_reg sp_offset=sp_offset +.endm + +.macro UNWIND_HINT_REGS, offset + .set sp_reg, ORC_REG_SP + .set sp_offset, \offset + .set type, UNWIND_HINT_TYPE_REGS + UNWIND_HINT type=type sp_reg=sp_reg sp_offset=sp_offset +.endm + +.macro UNWIND_HINT_IRQ, offset + .set sp_reg, ORC_REG_SP + .set sp_offset, \offset + .set type, UNWIND_HINT_TYPE_IRQ_STACK + UNWIND_HINT type=type sp_reg=sp_reg sp_offset=sp_offset +.endm + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_ARM64_UNWIND_HINTS_H */ diff --git a/include/linux/objtool_types.h b/include/linux/objtool_types.h index 453a4f4ef39d..ca51cc50dc82 100644 --- a/include/linux/objtool_types.h +++ b/include/linux/objtool_types.h @@ -43,6 +43,8 @@ struct unwind_hint { * * UNWIND_HINT_TYPE_{SAVE,RESTORE}: Save the unwind metadata at a certain * location so that it can be restored later. + * + * UNWIND_HINT_TYPE_IRQ_STACK: Used to unwind through the IRQ stack. */ #define UNWIND_HINT_TYPE_UNDEFINED 0 #define UNWIND_HINT_TYPE_END_OF_STACK 1 @@ -53,5 +55,6 @@ struct unwind_hint { #define UNWIND_HINT_TYPE_FUNC 5 #define UNWIND_HINT_TYPE_SAVE 6 #define UNWIND_HINT_TYPE_RESTORE 7 +#define UNWIND_HINT_TYPE_IRQ_STACK 8 #endif /* _LINUX_OBJTOOL_TYPES_H */ diff --git a/tools/arch/arm64/include/asm/unwind_hints.h b/tools/arch/arm64/include/asm/unwind_hints.h new file mode 100644 index 000000000000..e11a0586b434 --- /dev/null +++ b/tools/arch/arm64/include/asm/unwind_hints.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _ASM_ARM64_UNWIND_HINTS_H +#define _ASM_ARM64_UNWIND_HINTS_H + +#include + +#include "orc_types.h" + +#ifdef CONFIG_STACK_VALIDATION + +#ifndef __ASSEMBLY__ + +#define UNWIND_HINT(type, sp_reg, sp_offset, signal) \ + "987: \n\t" \ + ".pushsection .discard.unwind_hints\n\t" \ + /* struct unwind_hint */ \ + ".long 987b - .\n\t" \ + ".short " __stringify(sp_offset) "\n\t" \ + ".byte " __stringify(sp_reg) "\n\t" \ + ".byte " __stringify(type) "\n\t" \ + ".byte " __stringify(signal) "\n\t" \ + ".balign 4 \n\t" \ + ".popsection\n\t" + +#else /* __ASSEMBLY__ */ + +/* + * In asm, there are two kinds of code: normal C-type callable functions and + * the rest. The normal callable functions can be called by other code, and + * don't do anything unusual with the stack. Such normal callable functions + * are annotated with the ENTRY/ENDPROC macros. Most asm code falls in this + * category. In this case, no special debugging annotations are needed because + * objtool can automatically generate the ORC data for the ORC unwinder to read + * at runtime. + * + * Anything which doesn't fall into the above category, such as syscall and + * interrupt handlers, tends to not be called directly by other functions, and + * often does unusual non-C-function-type things with the stack pointer. Such + * code needs to be annotated such that objtool can understand it. The + * following CFI hint macros are for this type of code. + * + * These macros provide hints to objtool about the state of the stack at each + * instruction. Objtool starts from the hints and follows the code flow, + * making automatic CFI adjustments when it sees pushes and pops, filling out + * the debuginfo as necessary. It will also warn if it sees any + * inconsistencies. + */ +.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 +.Lhere_\@: + .pushsection .discard.unwind_hints + /* struct unwind_hint */ + .long .Lhere_\@ - . + .short \sp_offset + .byte \sp_reg + .byte \type + .byte \signal + .balign 4 + .popsection +.endm + +#endif /* __ASSEMBLY__ */ + +#else /* !CONFIG_STACK_VALIDATION */ + +#ifndef __ASSEMBLY__ + +#define UNWIND_HINT(type, sp_reg, sp_offset, signal) "\n\t" +#else +.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 +.endm +#endif + +#endif /* CONFIG_STACK_VALIDATION */ +#ifdef __ASSEMBLY__ + +.macro UNWIND_HINT_FTRACE, offset + .set sp_reg, ORC_REG_SP + .set sp_offset, \offset + .set type, UNWIND_HINT_TYPE_FTRACE + UNWIND_HINT type=type sp_reg=sp_reg sp_offset=sp_offset +.endm + +.macro UNWIND_HINT_REGS, offset + .set sp_reg, ORC_REG_SP + .set sp_offset, \offset + .set type, UNWIND_HINT_TYPE_REGS + UNWIND_HINT type=type sp_reg=sp_reg sp_offset=sp_offset +.endm + +.macro UNWIND_HINT_IRQ, offset + .set sp_reg, ORC_REG_SP + .set sp_offset, \offset + .set type, UNWIND_HINT_TYPE_IRQ_STACK + UNWIND_HINT type=type sp_reg=sp_reg sp_offset=sp_offset +.endm + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_ARM64_UNWIND_HINTS_H */ diff --git a/tools/include/linux/objtool_types.h b/tools/include/linux/objtool_types.h index 453a4f4ef39d..ca51cc50dc82 100644 --- a/tools/include/linux/objtool_types.h +++ b/tools/include/linux/objtool_types.h @@ -43,6 +43,8 @@ struct unwind_hint { * * UNWIND_HINT_TYPE_{SAVE,RESTORE}: Save the unwind metadata at a certain * location so that it can be restored later. + * + * UNWIND_HINT_TYPE_IRQ_STACK: Used to unwind through the IRQ stack. */ #define UNWIND_HINT_TYPE_UNDEFINED 0 #define UNWIND_HINT_TYPE_END_OF_STACK 1 @@ -53,5 +55,6 @@ struct unwind_hint { #define UNWIND_HINT_TYPE_FUNC 5 #define UNWIND_HINT_TYPE_SAVE 6 #define UNWIND_HINT_TYPE_RESTORE 7 +#define UNWIND_HINT_TYPE_IRQ_STACK 8 #endif /* _LINUX_OBJTOOL_TYPES_H */ diff --git a/tools/objtool/Build b/tools/objtool/Build index 5dbab24ad586..9da7ebdae86c 100644 --- a/tools/objtool/Build +++ b/tools/objtool/Build @@ -9,7 +9,7 @@ objtool-y += builtin-check.o objtool-y += cfi.o objtool-y += insn.o objtool-y += decode.o -objtool-$(STATIC_CHECK) += unwind_hints.o +objtool-y += unwind_hints.o objtool-y += elf.o objtool-y += objtool.o diff --git a/tools/objtool/arch/arm64/decode.c b/tools/objtool/arch/arm64/decode.c index 1668c288786c..19ef9b22a734 100644 --- a/tools/objtool/arch/arm64/decode.c +++ b/tools/objtool/arch/arm64/decode.c @@ -17,6 +17,8 @@ #include #include +#include + /* ARM64 instructions are all 4 bytes wide. */ #define INSN_SIZE 4 @@ -47,6 +49,25 @@ unsigned long arch_jump_destination(struct instruction *insn) return insn->offset + insn->immediate; } +int arch_decode_hint_reg(u8 sp_reg, int *base) +{ + switch (sp_reg) { + case ORC_REG_UNDEFINED: + *base = CFI_UNDEFINED; + break; + case ORC_REG_SP: + *base = CFI_SP; + break; + case ORC_REG_FP: + *base = CFI_FP; + break; + default: + return -1; + } + + return 0; +} + /* --------------------- instruction decode structs ------------------------ */ struct decode_var { diff --git a/tools/objtool/arch/arm64/orc.c b/tools/objtool/arch/arm64/orc.c index 82c73abedf6a..98e930991ef1 100644 --- a/tools/objtool/arch/arm64/orc.c +++ b/tools/objtool/arch/arm64/orc.c @@ -58,6 +58,10 @@ const char *orc_type_name(unsigned int type) switch (type) { case UNWIND_HINT_TYPE_CALL: return "call"; + case UNWIND_HINT_TYPE_REGS: + return "regs"; + case UNWIND_HINT_TYPE_IRQ_STACK: + return "irqstack"; default: return "?"; } diff --git a/tools/objtool/dcheck.c b/tools/objtool/dcheck.c index f356220509cb..39dcd0a30f46 100644 --- a/tools/objtool/dcheck.c +++ b/tools/objtool/dcheck.c @@ -346,6 +346,10 @@ int check(struct objtool_file *file) walk_sections(file); + ret = read_unwind_hints(file); + if (ret) + return ret; + if (opts.orc) ret = orc_create(file); diff --git a/tools/objtool/include/objtool/endianness.h b/tools/objtool/include/objtool/endianness.h index 4d2aa9b0fe2f..8ea0818bd9a0 100644 --- a/tools/objtool/include/objtool/endianness.h +++ b/tools/objtool/include/objtool/endianness.h @@ -29,6 +29,8 @@ static inline bool need_bswap(struct elf *elf) __ret = __need_bswap ? bswap_32(val) : (val); break; \ case 2: \ __ret = __need_bswap ? bswap_16(val) : (val); break; \ + case 1: \ + __ret = (val); break; \ default: \ BUILD_BUG(); break; \ } \ diff --git a/tools/objtool/sync-check.sh b/tools/objtool/sync-check.sh index 29f06de57659..9bacf219bfe0 100755 --- a/tools/objtool/sync-check.sh +++ b/tools/objtool/sync-check.sh @@ -31,6 +31,7 @@ fi if [ "$SRCARCH" = "arm64" ]; then FILES="$FILES +arch/arm64/include/asm/unwind_hints.h arch/arm64/include/asm/orc_types.h include/linux/orc_entry.h " diff --git a/tools/objtool/unwind_hints.c b/tools/objtool/unwind_hints.c index 40c54ce21110..c59d259d0392 100644 --- a/tools/objtool/unwind_hints.c +++ b/tools/objtool/unwind_hints.c @@ -16,6 +16,7 @@ int read_unwind_hints(struct objtool_file *file) struct unwind_hint *hint; struct instruction *insn; struct reloc *reloc; + u8 sp_reg, type; int i; sec = find_section_by_name(file->elf, ".discard.unwind_hints"); @@ -37,6 +38,9 @@ int read_unwind_hints(struct objtool_file *file) for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) { hint = (struct unwind_hint *)sec->data->d_buf + i; + sp_reg = bswap_if_needed(file->elf, hint->sp_reg); + type = bswap_if_needed(file->elf, hint->type); + reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint)); if (!reloc) { WARN("can't find reloc for unwind_hints[%d]", i); @@ -51,23 +55,23 @@ int read_unwind_hints(struct objtool_file *file) insn->hint = true; - if (hint->type == UNWIND_HINT_TYPE_UNDEFINED) { + if (type == UNWIND_HINT_TYPE_UNDEFINED) { insn->cfi = &force_undefined_cfi; continue; } - if (hint->type == UNWIND_HINT_TYPE_SAVE) { + if (type == UNWIND_HINT_TYPE_SAVE) { insn->hint = false; insn->save = true; continue; } - if (hint->type == UNWIND_HINT_TYPE_RESTORE) { + if (type == UNWIND_HINT_TYPE_RESTORE) { insn->restore = true; continue; } - if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) { + if (type == UNWIND_HINT_TYPE_REGS_PARTIAL) { struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset); if (sym && sym->bind == STB_GLOBAL) { @@ -77,7 +81,7 @@ int read_unwind_hints(struct objtool_file *file) } } - if (hint->type == UNWIND_HINT_TYPE_FUNC) { + if (type == UNWIND_HINT_TYPE_FUNC) { insn->cfi = &func_cfi; continue; } @@ -85,13 +89,13 @@ int read_unwind_hints(struct objtool_file *file) if (insn->cfi) cfi = *(insn->cfi); - if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) { - WARN_INSN(insn, "unsupported unwind_hint sp base reg %d", hint->sp_reg); + if (arch_decode_hint_reg(sp_reg, &cfi.cfa.base)) { + WARN_INSN(insn, "unsupported unwind_hint sp base reg %d", sp_reg); return -1; } cfi.cfa.offset = bswap_if_needed(file->elf, hint->sp_offset); - cfi.type = hint->type; + cfi.type = type; cfi.signal = hint->signal; insn->cfi = cfi_hash_find_or_add(&cfi); -- Gitee From 83d071169be386d70979d394527ab35791aba19d Mon Sep 17 00:00:00 2001 From: "Madhavan T. Venkataraman" Date: Fri, 6 Jan 2023 12:11:41 -0600 Subject: [PATCH 1510/2138] arm64: Add unwind hints to exception handlers ANBZ: #9262 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ Add unwind hints to Interrupt and Exception handlers. Signed-off-by: Madhavan T. Venkataraman Signed-off-by: Wei Chen Acked-by: ydzhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3299 --- arch/arm64/kernel/entry.S | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 7fcbee0f6c0e..b69ffa44c031 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -28,6 +28,7 @@ #include #include #include +#include .macro clear_gp_regs .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29 @@ -578,6 +579,7 @@ SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\label) .if \el == 0 b ret_to_user .else + UNWIND_HINT_REGS PT_REGS_SIZE b ret_to_kernel .endif SYM_CODE_END(el\el\ht\()_\regsize\()_\label) @@ -888,6 +890,7 @@ SYM_FUNC_START(call_on_irq_stack) /* Move to the new stack and call the function there */ add sp, x16, #IRQ_STACK_SIZE blr x1 + UNWIND_HINT_IRQ 16 /* * Restore the SP from the FP, and restore the FP and LR from the frame -- Gitee From 14ebc36c57295061cebe94381dec2ca3863b4eb2 Mon Sep 17 00:00:00 2001 From: "Madhavan T. Venkataraman" Date: Sun, 22 May 2022 12:32:52 -0500 Subject: [PATCH 1511/2138] arm64: Add kernel and module support for ORC ANBZ: #9262 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ Call orc_lookup_init() from setup_arch() to perform ORC lookup initialization for vmlinux. Call orc_lookup_module_init() in module load to perform ORC lookup initialization for modules. Signed-off-by: Madhavan T. Venkataraman Signed-off-by: Wei Chen Acked-by: ydzhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3299 --- arch/arm64/kernel/module.c | 13 ++++++++++++- arch/arm64/kernel/setup.c | 2 ++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index dd851297596e..09251ad0ff4e 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -25,6 +25,7 @@ #include #include #include +#include static u64 module_direct_base __ro_after_init = 0; static u64 module_plt_base __ro_after_init = 0; @@ -587,7 +588,8 @@ int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *me) { - const Elf_Shdr *s; + const Elf_Shdr *s, *orc, *orc_ip;; + s = find_section(hdr, sechdrs, ".altinstructions"); if (s) apply_alternatives_module((void *)s->sh_addr, s->sh_size); @@ -598,5 +600,14 @@ int module_finalize(const Elf_Ehdr *hdr, scs_patch((void *)s->sh_addr, s->sh_size); } + orc = find_section(hdr, sechdrs, ".orc_unwind"); + orc_ip = find_section(hdr, sechdrs, ".orc_unwind_ip"); + + if (orc && orc_ip) { + orc_lookup_module_init(me, + (void *)orc_ip->sh_addr, orc_ip->sh_size, + (void *)orc->sh_addr, orc->sh_size); + } + return module_init_ftrace_plt(hdr, sechdrs, me); } diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 040b0175334c..90a2e3aceb00 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -53,6 +53,7 @@ #include #include #include +#include static int num_standard_resources; static struct resource *standard_resources; @@ -390,6 +391,7 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p) "This indicates a broken bootloader or old kernel\n", boot_args[1], boot_args[2], boot_args[3]); } + orc_lookup_init(); } static inline bool cpu_can_disable(unsigned int cpu) -- Gitee From 309c87da3b7298c20f0f5c2de5898a76cd4f5664 Mon Sep 17 00:00:00 2001 From: "Madhavan T. Venkataraman" Date: Sun, 29 Jan 2023 12:54:06 -0600 Subject: [PATCH 1512/2138] arm64: Build the kernel with ORC information ANBZ: #9262 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ Add code to scripts/Makefile.lib to define objtool options to generate ORC data for frame pointer validation. Define kernel configs: - to enable dynamic FRAME_POINTER_VALIDATION - to enable the generation of ORC data using objtool When these configs are enabled, objtool is invoked on relocatable files during kernel build with the following command: objtool --stackval --orc Objtool creates special sections in the object files: .orc_unwind_ip PC array. .orc_unwind ORC structure table. .orc_lookup ORC lookup table. Change arch/arm64/kernel/vmlinux.lds.S to include ORC_UNWIND_TABLE in the data section so that the special sections get included there. For modules, these sections will be added to the kernel during module load. In the future, the kernel can use these sections to find the ORC for a given instruction address. The unwinder can then compute the FP at an instruction address and validate the actual FP with that. Signed-off-by: Madhavan T. Venkataraman Signed-off-by: Wei Chen Acked-by: ydzhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3299 --- arch/arm64/Kconfig | 2 ++ arch/arm64/Kconfig.debug | 32 ++++++++++++++++++++++++++++++++ arch/arm64/include/asm/module.h | 7 +++++++ arch/arm64/kernel/vmlinux.lds.S | 3 +++ include/linux/objtool.h | 2 ++ scripts/Makefile | 4 +++- scripts/Makefile.lib | 9 +++++++++ 7 files changed, 58 insertions(+), 1 deletion(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 931d0dcd12fd..ea02335e54dd 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -257,6 +257,8 @@ config ARM64 select TRACE_IRQFLAGS_SUPPORT select TRACE_IRQFLAGS_NMI_SUPPORT select HAVE_SOFTIRQ_ON_OWN_STACK + select HAVE_STACK_VALIDATION if FRAME_POINTER_VALIDATION + select STACK_VALIDATION if HAVE_STACK_VALIDATION help ARM 64-bit (AArch64) Linux support. diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug index 265c4461031f..a50caabdb18e 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug @@ -20,4 +20,36 @@ config ARM64_RELOC_TEST depends on m tristate "Relocation testing module" +config UNWINDER_ORC + bool "ORC unwinder" + depends on FRAME_POINTER_VALIDATION + select HAVE_MOD_ARCH_SPECIFIC + select OBJTOOL + help + This option enables ORC (Oops Rewind Capability) for ARM64. This + allows the unwinder to look up ORC data for an instruction address + and compute the frame pointer at that address. The computed frame + pointer is used to validate the actual frame pointer. + +config UNWINDER_FRAME_POINTER + bool "Frame pointer unwinder" + depends on FRAME_POINTER_VALIDATION + select FRAME_POINTER + help + ARM64 already uses the frame pointer for unwinding kernel stack + traces. We need to enable this config to enable STACK_VALIDATION. + STACK_VALIDATION is needed to get objtool to do static analysis + of kernel code. + +config FRAME_POINTER_VALIDATION + bool "Dynamic Frame pointer validation" + select UNWINDER_FRAME_POINTER + select UNWINDER_ORC + help + This invokes objtool on every object file causing it to + generate ORC data for the object file. ORC data is in a custom + data format which is a simplified version of the DWARF + Call Frame Information standard. See UNWINDER_ORC for more + details. + source "drivers/hwtracing/coresight/Kconfig" diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h index bfa6638b4c93..57e97c23e768 100644 --- a/arch/arm64/include/asm/module.h +++ b/arch/arm64/include/asm/module.h @@ -6,6 +6,7 @@ #define __ASM_MODULE_H #include +#include struct mod_plt_sec { int plt_shndx; @@ -19,6 +20,12 @@ struct mod_arch_specific { /* for CONFIG_DYNAMIC_FTRACE */ struct plt_entry *ftrace_trampolines; + +#ifdef CONFIG_UNWINDER_ORC + unsigned int num_orcs; + int *orc_unwind_ip; + struct orc_entry *orc_unwind; +#endif }; u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs, diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index a553dae9a0d4..42af43ce8d1b 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -61,6 +61,7 @@ #define RUNTIME_DISCARD_EXIT #include +#include #include #include #include @@ -310,6 +311,8 @@ SECTIONS __mmuoff_data_end = .; } + ORC_UNWIND_TABLE + PECOFF_EDATA_PADDING __pecoff_data_rawsize = ABSOLUTE(. - __initdata_begin); _edata = .; diff --git a/include/linux/objtool.h b/include/linux/objtool.h index 82bf1042a7cf..865caa2d1232 100644 --- a/include/linux/objtool.h +++ b/include/linux/objtool.h @@ -6,7 +6,9 @@ #ifdef CONFIG_OBJTOOL +#ifndef CONFIG_ARM64 #include +#endif #ifndef __ASSEMBLY__ diff --git a/scripts/Makefile b/scripts/Makefile index 576cf64be667..72af39a5945c 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -32,8 +32,10 @@ HOSTLDLIBS_sign-file = $(shell $(HOSTPKG_CONFIG) --libs libcrypto 2> /dev/null | ifdef CONFIG_UNWINDER_ORC ifeq ($(ARCH),x86_64) ARCH := x86 -endif HOSTCFLAGS_sorttable.o += -I$(srctree)/tools/arch/x86/include +else +HOSTCFLAGS_sorttable.o += -I$(srctree)/tools/arch/$(ARCH)/include +endif HOSTCFLAGS_sorttable.o += -DUNWINDER_ORC_ENABLED endif diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index e702552fb131..f614b138b046 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -252,6 +252,13 @@ ifdef CONFIG_OBJTOOL objtool := $(objtree)/tools/objtool/objtool +ifdef CONFIG_FRAME_POINTER_VALIDATION + +objtool-args-$(CONFIG_STACK_VALIDATION) += --stackval +objtool-args-$(CONFIG_UNWINDER_ORC) += --orc + +else + objtool-args-$(CONFIG_HAVE_JUMP_LABEL_HACK) += --hacks=jump_label objtool-args-$(CONFIG_HAVE_NOINSTR_HACK) += --hacks=noinstr objtool-args-$(CONFIG_CALL_DEPTH_TRACKING) += --hacks=skylake @@ -271,6 +278,8 @@ objtool-args-$(CONFIG_HAVE_UACCESS_VALIDATION) += --uaccess objtool-args-$(CONFIG_GCOV_KERNEL) += --no-unreachable objtool-args-$(CONFIG_PREFIX_SYMBOLS) += --prefix=$(CONFIG_FUNCTION_PADDING_BYTES) +endif + objtool-args = $(objtool-args-y) \ $(if $(delay-objtool), --link) \ $(if $(part-of-module), --module) -- Gitee From cd21ee0249f2eb3527000ecffe415aa48cf512c4 Mon Sep 17 00:00:00 2001 From: "Madhavan T. Venkataraman" Date: Sun, 29 Jan 2023 12:58:52 -0600 Subject: [PATCH 1513/2138] arm64: unwinder: Add a reliability check in the unwinder based on ORC ANBZ: #9262 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ Introduce a reliability flag in struct unwind_state. This will be set to false if the PC does not have a valid ORC or if the frame pointer computed from the ORC does not match the actual frame pointer. Now that the unwinder can validate the frame pointer, introduce arch_stack_walk_reliable(). Signed-off-by: Madhavan T. Venkataraman Signed-off-by: Wei Chen Acked-by: ydzhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3299 --- arch/arm64/include/asm/stacktrace/common.h | 15 ++ arch/arm64/kernel/stacktrace.c | 178 +++++++++++++++++++-- 2 files changed, 182 insertions(+), 11 deletions(-) diff --git a/arch/arm64/include/asm/stacktrace/common.h b/arch/arm64/include/asm/stacktrace/common.h index 508f734de46e..064aaf5dc3a0 100644 --- a/arch/arm64/include/asm/stacktrace/common.h +++ b/arch/arm64/include/asm/stacktrace/common.h @@ -11,6 +11,7 @@ #include #include +#include struct stack_info { unsigned long low; @@ -23,6 +24,7 @@ struct stack_info { * @fp: The fp value in the frame record (or the real fp) * @pc: The lr value in the frame record (or the real lr) * + * @prev_pc: The lr value in the previous frame record. * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance * associated with the most recently encountered replacement lr * value. @@ -32,10 +34,15 @@ struct stack_info { * @stack: The stack currently being unwound. * @stacks: An array of stacks which can be unwound. * @nr_stacks: The number of stacks in @stacks. + * + * @cfa: The sp value at the call site of the current function. + * @unwind_type The previous frame's unwind type. + * @reliable: Stack trace is reliable. */ struct unwind_state { unsigned long fp; unsigned long pc; + unsigned long prev_pc; #ifdef CONFIG_KRETPROBES struct llist_node *kr_cur; #endif @@ -44,6 +51,9 @@ struct unwind_state { struct stack_info stack; struct stack_info *stacks; int nr_stacks; + unsigned long cfa; + int unwind_type; + bool reliable; }; static inline struct stack_info stackinfo_get_unknown(void) @@ -70,11 +80,15 @@ static inline void unwind_init_common(struct unwind_state *state, struct task_struct *task) { state->task = task; + state->prev_pc = 0; #ifdef CONFIG_KRETPROBES state->kr_cur = NULL; #endif state->stack = stackinfo_get_unknown(); + state->reliable = true; + state->cfa = 0; + state->unwind_type = UNWIND_HINT_TYPE_CALL; } static struct stack_info *unwind_find_next_stack(const struct unwind_state *state, @@ -167,6 +181,7 @@ unwind_next_frame_record(struct unwind_state *state) /* * Record this frame record's values. */ + state->prev_pc = state->pc; state->fp = READ_ONCE(*(unsigned long *)(fp)); state->pc = READ_ONCE(*(unsigned long *)(fp + 8)); diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 17f66a74c745..71157c0eb77b 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -5,6 +5,8 @@ * Copyright (C) 2012 ARM Ltd. */ #include +#include +#include #include #include #include @@ -18,6 +20,122 @@ #include #include +static inline bool unwind_completed(struct unwind_state *state) +{ + if (state->fp == (unsigned long)task_pt_regs(state->task)->stackframe) { + /* Final frame; nothing to unwind */ + return true; + } + return false; +} + +#ifdef CONFIG_FRAME_POINTER_VALIDATION + +static void unwind_check_reliable(struct unwind_state *state) +{ + unsigned long pc, fp; + struct orc_entry *orc; + bool adjust_pc = false; + + if (unwind_completed(state)) + return; + + /* + * If a previous frame was unreliable, the CFA cannot be reliably + * computed anymore. + */ + if (!state->reliable) + return; + + pc = state->pc; + + /* Don't let modules unload while we're reading their ORC data. */ + preempt_disable(); + + orc = orc_find(pc); + if (!orc || (!orc->fp_offset && orc->type == UNWIND_HINT_TYPE_CALL)) { + /* + * If the final instruction in a function happens to be a call + * instruction, the return address would fall outside of the + * function. That could be the case here. This can happen, for + * instance, if the called function is a "noreturn" function. + * The compiler can optimize away the instructions after the + * call. So, adjust the PC so it falls inside the function and + * retry. + * + * We only do this if the current and the previous frames + * are call frames and not hint frames. + */ + if (state->unwind_type == UNWIND_HINT_TYPE_CALL) { + pc -= 4; + adjust_pc = true; + orc = orc_find(pc); + } + } + if (!orc) { + state->reliable = false; + goto out; + } + state->unwind_type = orc->type; + + if (!state->cfa) { + /* Set up the initial CFA and return. */ + state->cfa = state->fp - orc->fp_offset; + goto out; + } + + /* Compute the next CFA and FP. */ + switch (orc->type) { + case UNWIND_HINT_TYPE_CALL: + /* Normal call */ + state->cfa += orc->sp_offset; + fp = state->cfa + orc->fp_offset; + break; + + case UNWIND_HINT_TYPE_REGS: + /* + * pt_regs hint: The frame pointer points to either the + * synthetic frame within pt_regs or to the place where + * x29 and x30 are saved in the register save area in + * pt_regs. + */ + state->cfa += orc->sp_offset; + fp = state->cfa + offsetof(struct pt_regs, stackframe) - + sizeof(struct pt_regs); + if (state->fp != fp) { + fp = state->cfa + offsetof(struct pt_regs, regs[29]) - + sizeof(struct pt_regs); + } + break; + + case UNWIND_HINT_TYPE_IRQ_STACK: + /* Hint to unwind from the IRQ stack to the task stack. */ + state->cfa = state->fp + orc->sp_offset; + fp = state->fp; + break; + + default: + fp = 0; + break; + } + + /* Validate the actual FP with the computed one. */ + if (state->fp != fp) + state->reliable = false; +out: + if (state->reliable && adjust_pc) + state->pc = pc; + preempt_enable(); +} + +#else /* !CONFIG_FRAME_POINTER_VALIDATION */ + +static void unwind_check_reliable(struct unwind_state *state) +{ +} + +#endif /* CONFIG_FRAME_POINTER_VALIDATION */ + /* * Start an unwind from a pt_regs. * @@ -108,12 +226,9 @@ unwind_recover_return_address(struct unwind_state *state) static __always_inline int unwind_next(struct unwind_state *state) { - struct task_struct *tsk = state->task; - unsigned long fp = state->fp; int err; - /* Final frame; nothing to unwind */ - if (fp == (unsigned long)task_pt_regs(tsk)->stackframe) + if (unwind_completed(state)) return -ENOENT; err = unwind_next_frame_record(state); @@ -125,22 +240,28 @@ unwind_next(struct unwind_state *state) return unwind_recover_return_address(state); } -static __always_inline void -unwind(struct unwind_state *state, stack_trace_consume_fn consume_entry, - void *cookie) +static __always_inline int +unwind(struct unwind_state *state, bool need_reliable, + stack_trace_consume_fn consume_entry, void *cookie) { - if (unwind_recover_return_address(state)) - return; + int ret = unwind_recover_return_address(state); + + if (ret) + return ret; while (1) { - int ret; + if (need_reliable && !state->reliable) + return -EINVAL; if (!consume_entry(cookie, state->pc)) break; ret = unwind_next(state); + if (need_reliable && !ret) + unwind_check_reliable(state); if (ret < 0) break; } + return ret; } /* @@ -205,7 +326,42 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, unwind_init_from_task(&state, task); } - unwind(&state, consume_entry, cookie); + unwind(&state, false, consume_entry, cookie); +} + +noinline notrace int arch_stack_walk_reliable( + stack_trace_consume_fn consume_entry, + void *cookie, struct task_struct *task) +{ + struct stack_info stacks[] = { + stackinfo_get_task(task), + STACKINFO_CPU(irq), +#if defined(CONFIG_VMAP_STACK) + STACKINFO_CPU(overflow), +#endif +#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE) + STACKINFO_SDEI(normal), + STACKINFO_SDEI(critical), +#endif +#ifdef CONFIG_EFI + STACKINFO_EFI, +#endif + }; + struct unwind_state state = { + .stacks = stacks, + .nr_stacks = ARRAY_SIZE(stacks), + }; + int ret; + + if (task == current) + unwind_init_from_caller(&state); + else + unwind_init_from_task(&state, task); + unwind_check_reliable(&state); + + ret = unwind(&state, true, consume_entry, cookie); + + return ret == -ENOENT ? 0 : -EINVAL; } static bool dump_backtrace_entry(void *arg, unsigned long where) -- Gitee From 263a5ca3acd8504c2cd44f796e16af6f744f442b Mon Sep 17 00:00:00 2001 From: "Madhavan T. Venkataraman" Date: Sun, 29 Jan 2023 13:27:01 -0600 Subject: [PATCH 1514/2138] arm64: Define HAVE_DYNAMIC_FTRACE_WITH_ARGS ANBZ: #9262 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ - Define HAVE_DYNAMIC_FTRACE_WITH_ARGS to support livepatch. - Supply the arch code for HAVE_DYNAMIC_FTRACE_WITH_ARGS. Signed-off-by: Madhavan T. Venkataraman Signed-off-by: Wei Chen Acked-by: ydzhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3299 --- arch/arm64/Kconfig.debug | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug index a50caabdb18e..6d5dc90a0a52 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug @@ -45,6 +45,7 @@ config FRAME_POINTER_VALIDATION bool "Dynamic Frame pointer validation" select UNWINDER_FRAME_POINTER select UNWINDER_ORC + select HAVE_DYNAMIC_FTRACE_WITH_ARGS help This invokes objtool on every object file causing it to generate ORC data for the object file. ORC data is in a custom -- Gitee From ba7e45349f1b00c84543680a386fbff81d1cf531 Mon Sep 17 00:00:00 2001 From: "Madhavan T. Venkataraman" Date: Sun, 29 Jan 2023 14:58:42 -0600 Subject: [PATCH 1515/2138] arm64: Define TIF_PATCH_PENDING for livepatch ANBZ: #9262 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ - Define TIF_PATCH_PENDING in arch/arm64/include/asm/thread_info.h for livepatch. - Check TIF_PATCH_PENDING in do_notify_resume() to patch the current task for livepatch. Signed-off-by: Suraj Jitindar Singh Signed-off-by: Madhavan T. Venkataraman Signed-off-by: Wei Chen Acked-by: ydzhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3299 --- arch/arm64/include/asm/thread_info.h | 4 +++- arch/arm64/kernel/signal.c | 4 ++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index c57b33de0ed1..b7d2412a0f5f 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -67,6 +67,7 @@ void arch_setup_new_exec(void); #define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */ #define TIF_MTE_ASYNC_FAULT 5 /* MTE Asynchronous Tag Check Fault */ #define TIF_NOTIFY_SIGNAL 6 /* signal notifications exist */ +#define TIF_PATCH_PENDING 7 /* pending live patching update */ #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ #define TIF_SYSCALL_AUDIT 9 /* syscall auditing */ #define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */ @@ -99,11 +100,12 @@ void arch_setup_new_exec(void); #define _TIF_SVE (1 << TIF_SVE) #define _TIF_MTE_ASYNC_FAULT (1 << TIF_MTE_ASYNC_FAULT) #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) +#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING) #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \ _TIF_UPROBE | _TIF_MTE_ASYNC_FAULT | \ - _TIF_NOTIFY_SIGNAL) + _TIF_NOTIFY_SIGNAL | _TIF_PATCH_PENDING) #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 425b1bc17a3f..27528e91b675 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -1298,6 +1299,9 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) (void __user *)NULL, current); } + if (thread_flags & _TIF_PATCH_PENDING) + klp_update_patch_state(current); + if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) do_signal(regs); -- Gitee From 833f7d2e7ee6d61e32818d97cc5bf1f6151478e7 Mon Sep 17 00:00:00 2001 From: "Madhavan T. Venkataraman" Date: Sun, 29 Jan 2023 15:42:10 -0600 Subject: [PATCH 1516/2138] arm64: Enable livepatch for ARM64 ANBZ: #9262 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ Enable livepatch in arch/arm64/Kconfig. As the invoke_syscall will generate instructions to add random offset in sp when RANDOMIZE_KSTACK_OFFSET=y, this will make reliability check failed in livepatch selftest [1]. So when LIVEPATCH support is enabled the RANDOMIZE_KSTACK_OFFSET support will be disabled. [1] https://lore.kernel.org/linux-arm-kernel/TYCPR01MB6993C08EEC0FF360E3FBC2A3E5B39@TYCPR01MB6993.jpnprd01.prod.outlook.com/ Signed-off-by: Madhavan T. Venkataraman Signed-off-by: Wei Chen Acked-by: ydzhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3299 --- arch/arm64/Kconfig | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index ea02335e54dd..c0db32c45f57 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -174,7 +174,7 @@ config ARM64 select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT select HAVE_ARCH_PREL32_RELOCATIONS - select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET + select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET if !HAVE_LIVEPATCH select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_STACKLEAK select HAVE_ARCH_THREAD_STRUCT_WHITELIST @@ -259,6 +259,8 @@ config ARM64 select HAVE_SOFTIRQ_ON_OWN_STACK select HAVE_STACK_VALIDATION if FRAME_POINTER_VALIDATION select STACK_VALIDATION if HAVE_STACK_VALIDATION + select HAVE_RELIABLE_STACKTRACE if STACK_VALIDATION + select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_ARGS && HAVE_RELIABLE_STACKTRACE help ARM 64-bit (AArch64) Linux support. @@ -2406,3 +2408,4 @@ source "drivers/acpi/Kconfig" source "arch/arm64/kvm/Kconfig" +source "kernel/livepatch/Kconfig" -- Gitee From 8c89abf054a494a9c0b5435e07e5bcd0ea8568f7 Mon Sep 17 00:00:00 2001 From: Wei Chen Date: Wed, 22 May 2024 19:09:01 +0800 Subject: [PATCH 1517/2138] arm64/orc: Add ELF section with ORC version identifier ANBZ: #9262 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ Signed-off-by: Wei Chen Acked-by: ydzhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3299 --- arch/arm64/Makefile | 12 ++++++++++++ arch/arm64/include/asm/orc_header.h | 19 +++++++++++++++++++ 2 files changed, 31 insertions(+) create mode 100644 arch/arm64/include/asm/orc_header.h diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 11782860717f..14ffc627e1e5 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -63,6 +63,18 @@ stack_protector_prepare: prepare0 include/generated/asm-offsets.h)) endif +ifdef CONFIG_UNWINDER_ORC +orc_hash_h := arch/$(SRCARCH)/include/generated/asm/orc_hash.h +orc_hash_sh := $(srctree)/scripts/orc_hash.sh +targets += $(orc_hash_h) +quiet_cmd_orc_hash = GEN $@ + cmd_orc_hash = mkdir -p $(dir $@); \ + $(CONFIG_SHELL) $(orc_hash_sh) < $< > $@ +$(orc_hash_h): $(srctree)/arch/arm64/include/asm/orc_types.h $(orc_hash_sh) FORCE + $(call if_changed,orc_hash) +prepare: $(orc_hash_h) +endif + ifeq ($(CONFIG_ARM64_BTI_KERNEL),y) KBUILD_CFLAGS += -mbranch-protection=pac-ret+bti else ifeq ($(CONFIG_ARM64_PTR_AUTH_KERNEL),y) diff --git a/arch/arm64/include/asm/orc_header.h b/arch/arm64/include/asm/orc_header.h new file mode 100644 index 000000000000..a7857588fb39 --- /dev/null +++ b/arch/arm64/include/asm/orc_header.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#ifndef _ORC_HEADER_H +#define _ORC_HEADER_H + +#include +#include +#include + +/* + * The header is currently a 20-byte hash of the ORC entry definition; see + * scripts/orc_hash.sh. + */ +#define ORC_HEADER \ + __used __section(".orc_header") __aligned(4) \ + static const u8 orc_header[] = { ORC_HASH } + +#endif /* _ORC_HEADER_H */ -- Gitee From 0e0ba31dab528e1f50851942306dfc5db550d608 Mon Sep 17 00:00:00 2001 From: Jia He Date: Fri, 13 Sep 2024 11:49:04 +0000 Subject: [PATCH 1518/2138] crypto: arm64/poly1305 - move data to rodata section ANBZ: #9262 cherry-picked from https://lore.kernel.org/linux-arm-kernel/20240806055444.528932-1-justin.he@arm.com/T/ When objtool gains support for ARM in the future, it may encounter issues disassembling the following data in the .text section: > .Lzeros: > .long 0,0,0,0,0,0,0,0 > .asciz "Poly1305 for ARMv8, CRYPTOGAMS by \@dot-asm" > .align 2 Move it to .rodata which is a more appropriate section for read-only data. There is a limit on how far the label can be from the instruction, hence use "adrp" and low 12bits offset of the label to avoid the compilation error. Signed-off-by: Jia He Signed-off-by: Wei Chen Acked-by: ydzhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3299 --- arch/arm64/crypto/poly1305-armv8.pl | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/arm64/crypto/poly1305-armv8.pl b/arch/arm64/crypto/poly1305-armv8.pl index cbc980fb02e3..22c9069c0650 100644 --- a/arch/arm64/crypto/poly1305-armv8.pl +++ b/arch/arm64/crypto/poly1305-armv8.pl @@ -473,7 +473,8 @@ poly1305_blocks_neon: subs $len,$len,#64 ldp x9,x13,[$inp,#48] add $in2,$inp,#96 - adr $zeros,.Lzeros + adrp $zeros,.Lzeros + add $zeros,$zeros,#:lo12:.Lzeros lsl $padbit,$padbit,#24 add x15,$ctx,#48 @@ -885,10 +886,13 @@ poly1305_blocks_neon: ret .size poly1305_blocks_neon,.-poly1305_blocks_neon +.pushsection .rodata .align 5 .Lzeros: .long 0,0,0,0,0,0,0,0 .asciz "Poly1305 for ARMv8, CRYPTOGAMS by \@dot-asm" +.popsection + .align 2 #if !defined(__KERNEL__) && !defined(_WIN64) .comm OPENSSL_armcap_P,4,4 -- Gitee From 0db04864bc5b53efb19fd589d7fe2d8c55ee9814 Mon Sep 17 00:00:00 2001 From: Jia He Date: Fri, 13 Sep 2024 11:49:14 +0000 Subject: [PATCH 1519/2138] irqchip/gic-v3: Use BUG() instead of unreachable() in gic_dist_base_alias() ANBZ: #9262 cherry-picked from https://lore.kernel.org/linux-arm-kernel/20240806055444.528932-1-justin.he@arm.com/T/ __get_intid_range() might be returned with more values besides SPI_RANGE and ESPI_RANGE. Hence use BUG() instead to avoid the complaining in some static valiation tools. Signed-off-by: Jia He Signed-off-by: Wei Chen Acked-by: ydzhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3299 --- anolis/configs/examination/L1-RECOMMEND/arm64.config | 1 + drivers/irqchip/irq-gic-v3.c | 3 +-- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/anolis/configs/examination/L1-RECOMMEND/arm64.config b/anolis/configs/examination/L1-RECOMMEND/arm64.config index 7146c58229ef..0d625ac05fd4 100644 --- a/anolis/configs/examination/L1-RECOMMEND/arm64.config +++ b/anolis/configs/examination/L1-RECOMMEND/arm64.config @@ -125,6 +125,7 @@ CONFIG_TASKSTATS=y CONFIG_TCP_CONG_BBR=m CONFIG_TXGBE=m CONFIG_UACCE=m +CONFIG_UNWINDER_ORC=y CONFIG_USB_ACM=m CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_PCI=y diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index e7f000f90bb4..741e3775a1db 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -216,11 +216,10 @@ static inline void __iomem *gic_dist_base_alias(struct irq_data *d) chip = 3; break; default: - unreachable(); + BUG(); } return t241_dist_base_alias[chip]; } - return gic_data.dist_base; } -- Gitee From 391af2d525b20b5230f132571a7968beb0ad0ab5 Mon Sep 17 00:00:00 2001 From: Jia He Date: Fri, 13 Sep 2024 11:49:30 +0000 Subject: [PATCH 1520/2138] objtool: Fix the size of bit field "type" in struct orc_entry ANBZ: #9262 cherry-picked from https://lore.kernel.org/linux-arm-kernel/20240806055444.528932-1-justin.he@arm.com/T/ > #define UNWIND_HINT_TYPE_IRQ_STACK 8 Because the macro UNWIND_HINT_TYPE_IRQ_STACK defined in objtool_types.h is larger than 3 bits, enlarge it to 4 instead. Signed-off-by: Jia He Signed-off-by: Wei Chen Acked-by: ydzhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3299 --- include/linux/orc_entry.h | 6 +++--- tools/include/linux/orc_entry.h | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/include/linux/orc_entry.h b/include/linux/orc_entry.h index f8182edade4f..194a6c41476e 100644 --- a/include/linux/orc_entry.h +++ b/include/linux/orc_entry.h @@ -23,14 +23,14 @@ struct orc_entry { #if defined(__LITTLE_ENDIAN_BITFIELD) unsigned sp_reg:4; unsigned fp_reg:4; - unsigned type:3; + unsigned type:4; unsigned signal:1; #elif defined(__BIG_ENDIAN_BITFIELD) unsigned fp_reg:4; unsigned sp_reg:4; - unsigned unused:4; + unsigned unused:3; unsigned signal:1; - unsigned type:3; + unsigned type:4; #endif } __packed; diff --git a/tools/include/linux/orc_entry.h b/tools/include/linux/orc_entry.h index f8182edade4f..194a6c41476e 100644 --- a/tools/include/linux/orc_entry.h +++ b/tools/include/linux/orc_entry.h @@ -23,14 +23,14 @@ struct orc_entry { #if defined(__LITTLE_ENDIAN_BITFIELD) unsigned sp_reg:4; unsigned fp_reg:4; - unsigned type:3; + unsigned type:4; unsigned signal:1; #elif defined(__BIG_ENDIAN_BITFIELD) unsigned fp_reg:4; unsigned sp_reg:4; - unsigned unused:4; + unsigned unused:3; unsigned signal:1; - unsigned type:3; + unsigned type:4; #endif } __packed; -- Gitee From 4778330725427982903209f21ce38de673765be9 Mon Sep 17 00:00:00 2001 From: Jia He Date: Fri, 13 Sep 2024 11:49:43 +0000 Subject: [PATCH 1521/2138] init/Kconfig: Downgrade LD_ORPHAN_WARN_LEVEL to warn if ARM_64 && LIVEPATCH ANBZ: #9262 cherry-picked from https://lore.kernel.org/linux-arm-kernel/20240806055444.528932-1-justin.he@arm.com/T/ The warnings exist in the original patch series and are awaiting a fix from the author. > ld: warning: orphan section `.init.orc_unwind' from `arch/arm64/kernel/pi/lib-fdt.pi.o' being placed in section `.init.orc_unwind'. > ld: warning: orphan section `.init.orc_unwind_ip' from `arch/arm64/kernel/pi/lib-fdt.pi.o' being placed in section `.init.orc_unwind_ip'. Downgrade the LD_ORPHAN_WARN_LEVEL to warn instead of exiting ld is a workaround. Signed-off-by: Jia He Signed-off-by: Wei Chen Acked-by: ydzhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3299 --- init/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/init/Kconfig b/init/Kconfig index 065310947818..6b458f16330d 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1461,7 +1461,7 @@ config LD_ORPHAN_WARN config LD_ORPHAN_WARN_LEVEL string depends on LD_ORPHAN_WARN - default "error" if WERROR + default "error" if WERROR && !(ARM64 && LIVEPATCH) default "warn" config SYSCTL -- Gitee From 5497906f0c0c1b9fd6fff6d7dc03a188abc4f79f Mon Sep 17 00:00:00 2001 From: Jia He Date: Fri, 13 Sep 2024 11:50:04 +0000 Subject: [PATCH 1522/2138] arm64: Generate orc_hash.h ANBZ: #9262 cherry-picked from https://lore.kernel.org/linux-arm-kernel/20240806055444.528932-1-justin.he@arm.com/T/ Otherwise, the kernel build will report error as follows: In file included from arch/arm64/kernel/arm64-reloc-test.mod.c:10: ./arch/arm64/include/asm/orc_header.h:9:10: fatal error: asm/orc_hash.h: No such file or directory 9 | #include | ^~~~~~~~~~~~~~~~ Fix it by keep the orc_hash.h unremoved. Signed-off-by: Jia He Signed-off-by: Wei Chen Acked-by: ydzhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3299 --- arch/arm64/include/asm/Kbuild | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index 5c8ee5a541d2..654a4b174a36 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -8,3 +8,4 @@ generic-y += user.h generated-y += cpucaps.h generated-y += sysreg-defs.h +generated-y += orc_hash.h -- Gitee From 053011673f0ebfe6b62a25e40e79634fc0164a54 Mon Sep 17 00:00:00 2001 From: Jia He Date: Fri, 13 Sep 2024 11:50:30 +0000 Subject: [PATCH 1523/2138] anolis: configs: Enable CONFIG_LIVEPATCH for arm64 ANBZ: #9262 cherry-picked from https://lore.kernel.org/linux-arm-kernel/20240806055444.528932-1-justin.he@arm.com/T/ Enable CONFIG_LIVEPATCH and its dependency configs for anolis_defconfig and anolis-debug_defconfig on arm64. Signed-off-by: Jia He Signed-off-by: Wei Chen Acked-by: ydzhang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/3299 --- .../configs/L1-RECOMMEND/arm64/CONFIG_FRAME_POINTER_VALIDATION | 1 + .../L1-RECOMMEND/arm64/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_LIVEPATCH | 1 + .../configs/L1-RECOMMEND/arm64/CONFIG_HAVE_RELIABLE_STACKTRACE | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_STACK_VALIDATION | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_LIVEPATCH | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_OBJTOOL | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET | 1 + .../L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_STACK_VALIDATION | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_FRAME_POINTER | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_ORC | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEST_LIVEPATCH | 1 + 13 files changed, 13 insertions(+) create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_FRAME_POINTER_VALIDATION create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_LIVEPATCH create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_RELIABLE_STACKTRACE create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_STACK_VALIDATION create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_LIVEPATCH create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_OBJTOOL create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_STACK_VALIDATION create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_FRAME_POINTER create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_ORC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEST_LIVEPATCH diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FRAME_POINTER_VALIDATION b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FRAME_POINTER_VALIDATION new file mode 100644 index 000000000000..cc041e559182 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FRAME_POINTER_VALIDATION @@ -0,0 +1 @@ +CONFIG_FRAME_POINTER_VALIDATION=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET new file mode 100644 index 000000000000..c7daa4f60d5d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET @@ -0,0 +1 @@ +# CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_LIVEPATCH b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_LIVEPATCH new file mode 100644 index 000000000000..7ebdb924703e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_LIVEPATCH @@ -0,0 +1 @@ +CONFIG_HAVE_LIVEPATCH=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_RELIABLE_STACKTRACE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_RELIABLE_STACKTRACE new file mode 100644 index 000000000000..2ce8faabc4cf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_RELIABLE_STACKTRACE @@ -0,0 +1 @@ +CONFIG_HAVE_RELIABLE_STACKTRACE=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_STACK_VALIDATION b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_STACK_VALIDATION new file mode 100644 index 000000000000..6f36a32d84ae --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_STACK_VALIDATION @@ -0,0 +1 @@ +CONFIG_HAVE_STACK_VALIDATION=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_LIVEPATCH b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_LIVEPATCH new file mode 100644 index 000000000000..1b05d0d1a109 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_LIVEPATCH @@ -0,0 +1 @@ +CONFIG_LIVEPATCH=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_OBJTOOL b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_OBJTOOL new file mode 100644 index 000000000000..cf3a9f20f93d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_OBJTOOL @@ -0,0 +1 @@ +CONFIG_OBJTOOL=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET new file mode 100644 index 000000000000..759cb13e424c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET @@ -0,0 +1 @@ +# CONFIG_RANDOMIZE_KSTACK_OFFSET is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT new file mode 100644 index 000000000000..d680659c1703 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT @@ -0,0 +1 @@ +# CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STACK_VALIDATION b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STACK_VALIDATION new file mode 100644 index 000000000000..e335fefdd9be --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STACK_VALIDATION @@ -0,0 +1 @@ +CONFIG_STACK_VALIDATION=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_FRAME_POINTER b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_FRAME_POINTER new file mode 100644 index 000000000000..0938fde11ffe --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_FRAME_POINTER @@ -0,0 +1 @@ +CONFIG_UNWINDER_FRAME_POINTER=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_ORC b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_ORC new file mode 100644 index 000000000000..6b6908419acb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_ORC @@ -0,0 +1 @@ +CONFIG_UNWINDER_ORC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEST_LIVEPATCH b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEST_LIVEPATCH new file mode 100644 index 000000000000..0dd7700464a8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEST_LIVEPATCH @@ -0,0 +1 @@ +CONFIG_TEST_LIVEPATCH=m -- Gitee From a0f57977706f3d0d5f216f70954e673cef136c39 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 15 Dec 2023 10:12:17 +0100 Subject: [PATCH 1524/2138] cfi: Flip headers ANBZ: #11469 commit 4382159696c9af67ee047ed55f2dbf05480f52f6 upstream. Normal include order is that linux/foo.h should include asm/foo.h, CFI has it the wrong way around. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Sami Tolvanen Link: https://lore.kernel.org/r/20231215092707.231038174@infradead.org Signed-off-by: Alexei Starovoitov Signed-off-by: Tianchen Ding Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/4026 --- arch/riscv/include/asm/cfi.h | 3 ++- arch/riscv/kernel/cfi.c | 2 +- arch/x86/include/asm/cfi.h | 3 ++- arch/x86/kernel/cfi.c | 4 ++-- include/asm-generic/Kbuild | 1 + include/asm-generic/cfi.h | 5 +++++ include/linux/cfi.h | 1 + 7 files changed, 14 insertions(+), 5 deletions(-) create mode 100644 include/asm-generic/cfi.h diff --git a/arch/riscv/include/asm/cfi.h b/arch/riscv/include/asm/cfi.h index 56bf9d69d5e3..8f7a62257044 100644 --- a/arch/riscv/include/asm/cfi.h +++ b/arch/riscv/include/asm/cfi.h @@ -7,8 +7,9 @@ * * Copyright (C) 2023 Google LLC */ +#include -#include +struct pt_regs; #ifdef CONFIG_CFI_CLANG enum bug_trap_type handle_cfi_failure(struct pt_regs *regs); diff --git a/arch/riscv/kernel/cfi.c b/arch/riscv/kernel/cfi.c index 820158d7a291..6ec9dbd7292e 100644 --- a/arch/riscv/kernel/cfi.c +++ b/arch/riscv/kernel/cfi.c @@ -4,7 +4,7 @@ * * Copyright (C) 2023 Google LLC */ -#include +#include #include /* diff --git a/arch/x86/include/asm/cfi.h b/arch/x86/include/asm/cfi.h index 58dacd90daef..2a494643089d 100644 --- a/arch/x86/include/asm/cfi.h +++ b/arch/x86/include/asm/cfi.h @@ -7,8 +7,9 @@ * * Copyright (C) 2022 Google LLC */ +#include -#include +struct pt_regs; #ifdef CONFIG_CFI_CLANG enum bug_trap_type handle_cfi_failure(struct pt_regs *regs); diff --git a/arch/x86/kernel/cfi.c b/arch/x86/kernel/cfi.c index 8674a5c0c031..e6bf78fac146 100644 --- a/arch/x86/kernel/cfi.c +++ b/arch/x86/kernel/cfi.c @@ -4,10 +4,10 @@ * * Copyright (C) 2022 Google LLC */ -#include +#include +#include #include #include -#include /* * Returns the target address and the expected type when regs->ip points diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild index 941be574bbe0..ca2be8eaba5e 100644 --- a/include/asm-generic/Kbuild +++ b/include/asm-generic/Kbuild @@ -11,6 +11,7 @@ mandatory-y += bitops.h mandatory-y += bug.h mandatory-y += bugs.h mandatory-y += cacheflush.h +mandatory-y += cfi.h mandatory-y += checksum.h mandatory-y += compat.h mandatory-y += current.h diff --git a/include/asm-generic/cfi.h b/include/asm-generic/cfi.h new file mode 100644 index 000000000000..41fac3537bf9 --- /dev/null +++ b/include/asm-generic/cfi.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_CFI_H +#define __ASM_GENERIC_CFI_H + +#endif /* __ASM_GENERIC_CFI_H */ diff --git a/include/linux/cfi.h b/include/linux/cfi.h index 3552ec82b725..2309d74e77e6 100644 --- a/include/linux/cfi.h +++ b/include/linux/cfi.h @@ -9,6 +9,7 @@ #include #include +#include #ifdef CONFIG_CFI_CLANG enum bug_trap_type report_cfi_failure(struct pt_regs *regs, unsigned long addr, -- Gitee From ed3b24cf265163301bc64f03a1e22f3874513167 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 15 Dec 2023 10:12:18 +0100 Subject: [PATCH 1525/2138] x86/cfi,bpf: Fix BPF JIT call ANBZ: #11469 commit 4f9087f16651aca4a5f32da840a53f6660f0579a upstream. The current BPF call convention is __nocfi, except when it calls !JIT things, then it calls regular C functions. It so happens that with FineIBT the __nocfi and C calling conventions are incompatible. Specifically __nocfi will call at func+0, while FineIBT will have endbr-poison there, which is not a valid indirect target. Causing #CP. Notably this only triggers on IBT enabled hardware, which is probably why this hasn't been reported (also, most people will have JIT on anyway). Implement proper CFI prologues for the BPF JIT codegen and drop __nocfi for x86. Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20231215092707.345270396@infradead.org Signed-off-by: Alexei Starovoitov Signed-off-by: Tianchen Ding Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/4026 --- arch/x86/include/asm/cfi.h | 110 ++++++++++++++++++++++++++++++++++ arch/x86/kernel/alternative.c | 47 ++++++++++++--- arch/x86/net/bpf_jit_comp.c | 82 +++++++++++++++++++++++-- include/linux/bpf.h | 12 +++- include/linux/cfi.h | 7 +++ kernel/bpf/core.c | 25 ++++++++ 6 files changed, 269 insertions(+), 14 deletions(-) diff --git a/arch/x86/include/asm/cfi.h b/arch/x86/include/asm/cfi.h index 2a494643089d..7a7b0b823a98 100644 --- a/arch/x86/include/asm/cfi.h +++ b/arch/x86/include/asm/cfi.h @@ -9,15 +9,125 @@ */ #include +/* + * An overview of the various calling conventions... + * + * Traditional: + * + * foo: + * ... code here ... + * ret + * + * direct caller: + * call foo + * + * indirect caller: + * lea foo(%rip), %r11 + * ... + * call *%r11 + * + * + * IBT: + * + * foo: + * endbr64 + * ... code here ... + * ret + * + * direct caller: + * call foo / call foo+4 + * + * indirect caller: + * lea foo(%rip), %r11 + * ... + * call *%r11 + * + * + * kCFI: + * + * __cfi_foo: + * movl $0x12345678, %eax + * # 11 nops when CONFIG_CALL_PADDING + * foo: + * endbr64 # when IBT + * ... code here ... + * ret + * + * direct call: + * call foo # / call foo+4 when IBT + * + * indirect call: + * lea foo(%rip), %r11 + * ... + * movl $(-0x12345678), %r10d + * addl -4(%r11), %r10d # -15 when CONFIG_CALL_PADDING + * jz 1f + * ud2 + * 1:call *%r11 + * + * + * FineIBT (builds as kCFI + CALL_PADDING + IBT + RETPOLINE and runtime patches into): + * + * __cfi_foo: + * endbr64 + * subl 0x12345678, %r10d + * jz foo + * ud2 + * nop + * foo: + * osp nop3 # was endbr64 + * ... code here ... + * ret + * + * direct caller: + * call foo / call foo+4 + * + * indirect caller: + * lea foo(%rip), %r11 + * ... + * movl $0x12345678, %r10d + * subl $16, %r11 + * nop4 + * call *%r11 + * + */ +enum cfi_mode { + CFI_DEFAULT, /* FineIBT if hardware has IBT, otherwise kCFI */ + CFI_OFF, /* Taditional / IBT depending on .config */ + CFI_KCFI, /* Optionally CALL_PADDING, IBT, RETPOLINE */ + CFI_FINEIBT, /* see arch/x86/kernel/alternative.c */ +}; + +extern enum cfi_mode cfi_mode; + struct pt_regs; #ifdef CONFIG_CFI_CLANG enum bug_trap_type handle_cfi_failure(struct pt_regs *regs); +#define __bpfcall +extern u32 cfi_bpf_hash; + +static inline int cfi_get_offset(void) +{ + switch (cfi_mode) { + case CFI_FINEIBT: + return 16; + case CFI_KCFI: + if (IS_ENABLED(CONFIG_CALL_PADDING)) + return 16; + return 5; + default: + return 0; + } +} +#define cfi_get_offset cfi_get_offset + #else static inline enum bug_trap_type handle_cfi_failure(struct pt_regs *regs) { return BUG_TRAP_TYPE_NONE; } +#define cfi_bpf_hash 0U #endif /* CONFIG_CFI_CLANG */ #endif /* _ASM_X86_CFI_H */ diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index aae7456ece07..811f4a1f0fdf 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -30,6 +30,7 @@ #include #include #include +#include int __read_mostly alternatives_patched; @@ -842,15 +843,43 @@ void __init_or_module apply_seal_endbr(s32 *start, s32 *end) { } #endif /* CONFIG_X86_KERNEL_IBT */ #ifdef CONFIG_FINEIBT +#define __CFI_DEFAULT CFI_DEFAULT +#elif defined(CONFIG_CFI_CLANG) +#define __CFI_DEFAULT CFI_KCFI +#else +#define __CFI_DEFAULT CFI_OFF +#endif -enum cfi_mode { - CFI_DEFAULT, - CFI_OFF, - CFI_KCFI, - CFI_FINEIBT, -}; +enum cfi_mode cfi_mode __ro_after_init = __CFI_DEFAULT; + +#ifdef CONFIG_CFI_CLANG +struct bpf_insn; + +/* Must match bpf_func_t / DEFINE_BPF_PROG_RUN() */ +extern unsigned int __bpf_prog_runX(const void *ctx, + const struct bpf_insn *insn); + +/* + * Force a reference to the external symbol so the compiler generates + * __kcfi_typid. + */ +__ADDRESSABLE(__bpf_prog_runX); + +/* u32 __ro_after_init cfi_bpf_hash = __kcfi_typeid___bpf_prog_runX; */ +asm ( +" .pushsection .data..ro_after_init,\"aw\",@progbits \n" +" .type cfi_bpf_hash,@object \n" +" .globl cfi_bpf_hash \n" +" .p2align 2, 0x0 \n" +"cfi_bpf_hash: \n" +" .long __kcfi_typeid___bpf_prog_runX \n" +" .size cfi_bpf_hash, 4 \n" +" .popsection \n" +); +#endif + +#ifdef CONFIG_FINEIBT -static enum cfi_mode cfi_mode __ro_after_init = CFI_DEFAULT; static bool cfi_rand __ro_after_init = true; static u32 cfi_seed __ro_after_init; @@ -1159,8 +1188,10 @@ static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline, goto err; if (cfi_rand) { - if (builtin) + if (builtin) { cfi_seed = get_random_u32(); + cfi_bpf_hash = cfi_rehash(cfi_bpf_hash); + } ret = cfi_rand_preamble(start_cfi, end_cfi); if (ret) diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index a50c99e9b5c0..022ab02b1887 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -16,6 +16,7 @@ #include #include #include +#include static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) { @@ -48,9 +49,11 @@ static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) #ifdef CONFIG_X86_KERNEL_IBT -#define EMIT_ENDBR() EMIT(gen_endbr(), 4) +#define EMIT_ENDBR() EMIT(gen_endbr(), 4) +#define EMIT_ENDBR_POISON() EMIT(gen_endbr_poison(), 4) #else #define EMIT_ENDBR() +#define EMIT_ENDBR_POISON() #endif static bool is_imm8(int value) @@ -335,6 +338,69 @@ static void pop_callee_regs(u8 **pprog, bool *callee_regs_used) *pprog = prog; } +/* + * Emit the various CFI preambles, see asm/cfi.h and the comments about FineIBT + * in arch/x86/kernel/alternative.c + */ + +static void emit_fineibt(u8 **pprog) +{ + u8 *prog = *pprog; + + EMIT_ENDBR(); + EMIT3_off32(0x41, 0x81, 0xea, cfi_bpf_hash); /* subl $hash, %r10d */ + EMIT2(0x74, 0x07); /* jz.d8 +7 */ + EMIT2(0x0f, 0x0b); /* ud2 */ + EMIT1(0x90); /* nop */ + EMIT_ENDBR_POISON(); + + *pprog = prog; +} + +static void emit_kcfi(u8 **pprog) +{ + u8 *prog = *pprog; + + EMIT1_off32(0xb8, cfi_bpf_hash); /* movl $hash, %eax */ +#ifdef CONFIG_CALL_PADDING + EMIT1(0x90); + EMIT1(0x90); + EMIT1(0x90); + EMIT1(0x90); + EMIT1(0x90); + EMIT1(0x90); + EMIT1(0x90); + EMIT1(0x90); + EMIT1(0x90); + EMIT1(0x90); + EMIT1(0x90); +#endif + EMIT_ENDBR(); + + *pprog = prog; +} + +static void emit_cfi(u8 **pprog) +{ + u8 *prog = *pprog; + + switch (cfi_mode) { + case CFI_FINEIBT: + emit_fineibt(&prog); + break; + + case CFI_KCFI: + emit_kcfi(&prog); + break; + + default: + EMIT_ENDBR(); + break; + } + + *pprog = prog; +} + /* * Emit x86-64 prologue code for BPF program. * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes @@ -345,10 +411,10 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf, { u8 *prog = *pprog; + emit_cfi(&prog); /* BPF trampoline can be made to work without these nops, * but let's waste 5 bytes for now and optimize later */ - EMIT_ENDBR(); memcpy(prog, x86_nops[5], X86_PATCH_SIZE); prog += X86_PATCH_SIZE; if (!ebpf_from_cbpf) { @@ -2908,9 +2974,16 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) jit_data->header = header; jit_data->rw_header = rw_header; } - prog->bpf_func = (void *)image; + /* + * ctx.prog_offset is used when CFI preambles put code *before* + * the function. See emit_cfi(). For FineIBT specifically this code + * can also be executed and bpf_prog_kallsyms_add() will + * generate an additional symbol to cover this, hence also + * decrement proglen. + */ + prog->bpf_func = (void *)image + cfi_get_offset(); prog->jited = 1; - prog->jited_len = proglen; + prog->jited_len = proglen - cfi_get_offset(); } else { prog = orig_prog; } @@ -2965,6 +3038,7 @@ void bpf_jit_free(struct bpf_prog *prog) kvfree(jit_data->addrs); kfree(jit_data); } + prog->bpf_func = (void *)prog->bpf_func - cfi_get_offset(); hdr = bpf_jit_binary_pack_hdr(prog); bpf_jit_binary_pack_free(hdr, NULL); WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog)); diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 8dff572dedcd..61b3ba435294 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -29,6 +29,7 @@ #include #include #include +#include struct bpf_verifier_env; struct bpf_verifier_log; @@ -1228,7 +1229,11 @@ struct bpf_dispatcher { #endif }; -static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func( +#ifndef __bpfcall +#define __bpfcall __nocfi +#endif + +static __always_inline __bpfcall unsigned int bpf_dispatcher_nop_func( const void *ctx, const struct bpf_insn *insnsi, bpf_func_t bpf_func) @@ -1318,7 +1323,7 @@ int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_func #define DEFINE_BPF_DISPATCHER(name) \ __BPF_DISPATCHER_SC(name); \ - noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \ + noinline __bpfcall unsigned int bpf_dispatcher_##name##_func( \ const void *ctx, \ const struct bpf_insn *insnsi, \ bpf_func_t bpf_func) \ @@ -1463,6 +1468,9 @@ struct bpf_prog_aux { struct bpf_kfunc_desc_tab *kfunc_tab; struct bpf_kfunc_btf_tab *kfunc_btf_tab; u32 size_poke_tab; +#ifdef CONFIG_FINEIBT + struct bpf_ksym ksym_prefix; +#endif struct bpf_ksym ksym; const struct bpf_prog_ops *ops; struct bpf_map **used_maps; diff --git a/include/linux/cfi.h b/include/linux/cfi.h index 2309d74e77e6..1ed2d96c0cfc 100644 --- a/include/linux/cfi.h +++ b/include/linux/cfi.h @@ -11,6 +11,13 @@ #include #include +#ifndef cfi_get_offset +static inline int cfi_get_offset(void) +{ + return 0; +} +#endif + #ifdef CONFIG_CFI_CLANG enum bug_trap_type report_cfi_failure(struct pt_regs *regs, unsigned long addr, unsigned long *target, u32 type); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 02f327f05fd6..a94e90ce0b2f 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -121,6 +121,9 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag #endif INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode); +#ifdef CONFIG_FINEIBT + INIT_LIST_HEAD_RCU(&fp->aux->ksym_prefix.lnode); +#endif mutex_init(&fp->aux->used_maps_mutex); mutex_init(&fp->aux->dst_mutex); @@ -691,6 +694,23 @@ void bpf_prog_kallsyms_add(struct bpf_prog *fp) fp->aux->ksym.prog = true; bpf_ksym_add(&fp->aux->ksym); + +#ifdef CONFIG_FINEIBT + /* + * When FineIBT, code in the __cfi_foo() symbols can get executed + * and hence unwinder needs help. + */ + if (cfi_mode != CFI_FINEIBT) + return; + + snprintf(fp->aux->ksym_prefix.name, KSYM_NAME_LEN, + "__cfi_%s", fp->aux->ksym.name); + + fp->aux->ksym_prefix.start = (unsigned long) fp->bpf_func - 16; + fp->aux->ksym_prefix.end = (unsigned long) fp->bpf_func; + + bpf_ksym_add(&fp->aux->ksym_prefix); +#endif } void bpf_prog_kallsyms_del(struct bpf_prog *fp) @@ -699,6 +719,11 @@ void bpf_prog_kallsyms_del(struct bpf_prog *fp) return; bpf_ksym_del(&fp->aux->ksym); +#ifdef CONFIG_FINEIBT + if (cfi_mode != CFI_FINEIBT) + return; + bpf_ksym_del(&fp->aux->ksym_prefix); +#endif } static struct bpf_ksym *bpf_ksym_find(unsigned long addr) -- Gitee From 9bf3431f1cf42254f5b4506d2ea6e400a8927273 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 15 Dec 2023 10:12:19 +0100 Subject: [PATCH 1526/2138] x86/cfi,bpf: Fix bpf_callback_t CFI ANBZ: #11469 commit e72d88d18df4e03c80e64c2535f70c64f1dc6fc1 upstream. Where the main BPF program is expected to match bpf_func_t, sub-programs are expected to match bpf_callback_t. This fixes things like: tools/testing/selftests/bpf/progs/bloom_filter_bench.c: bpf_for_each_map_elem(&array_map, bloom_callback, &data, 0); Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20231215092707.451956710@infradead.org Signed-off-by: Alexei Starovoitov Signed-off-by: Tianchen Ding Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/4026 --- arch/x86/include/asm/cfi.h | 2 ++ arch/x86/kernel/alternative.c | 18 ++++++++++++++++++ arch/x86/net/bpf_jit_comp.c | 18 ++++++++++-------- 3 files changed, 30 insertions(+), 8 deletions(-) diff --git a/arch/x86/include/asm/cfi.h b/arch/x86/include/asm/cfi.h index 7a7b0b823a98..8779abd217b7 100644 --- a/arch/x86/include/asm/cfi.h +++ b/arch/x86/include/asm/cfi.h @@ -106,6 +106,7 @@ struct pt_regs; enum bug_trap_type handle_cfi_failure(struct pt_regs *regs); #define __bpfcall extern u32 cfi_bpf_hash; +extern u32 cfi_bpf_subprog_hash; static inline int cfi_get_offset(void) { @@ -128,6 +129,7 @@ static inline enum bug_trap_type handle_cfi_failure(struct pt_regs *regs) return BUG_TRAP_TYPE_NONE; } #define cfi_bpf_hash 0U +#define cfi_bpf_subprog_hash 0U #endif /* CONFIG_CFI_CLANG */ #endif /* _ASM_X86_CFI_H */ diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 811f4a1f0fdf..287065ad3feb 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -876,6 +876,23 @@ asm ( " .size cfi_bpf_hash, 4 \n" " .popsection \n" ); + +/* Must match bpf_callback_t */ +extern u64 __bpf_callback_fn(u64, u64, u64, u64, u64); + +__ADDRESSABLE(__bpf_callback_fn); + +/* u32 __ro_after_init cfi_bpf_subprog_hash = __kcfi_typeid___bpf_callback_fn; */ +asm ( +" .pushsection .data..ro_after_init,\"aw\",@progbits \n" +" .type cfi_bpf_subprog_hash,@object \n" +" .globl cfi_bpf_subprog_hash \n" +" .p2align 2, 0x0 \n" +"cfi_bpf_subprog_hash: \n" +" .long __kcfi_typeid___bpf_callback_fn \n" +" .size cfi_bpf_subprog_hash, 4 \n" +" .popsection \n" +); #endif #ifdef CONFIG_FINEIBT @@ -1191,6 +1208,7 @@ static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline, if (builtin) { cfi_seed = get_random_u32(); cfi_bpf_hash = cfi_rehash(cfi_bpf_hash); + cfi_bpf_subprog_hash = cfi_rehash(cfi_bpf_subprog_hash); } ret = cfi_rand_preamble(start_cfi, end_cfi); diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 022ab02b1887..0ea8c0d91064 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -343,12 +343,13 @@ static void pop_callee_regs(u8 **pprog, bool *callee_regs_used) * in arch/x86/kernel/alternative.c */ -static void emit_fineibt(u8 **pprog) +static void emit_fineibt(u8 **pprog, bool is_subprog) { + u32 hash = is_subprog ? cfi_bpf_subprog_hash : cfi_bpf_hash; u8 *prog = *pprog; EMIT_ENDBR(); - EMIT3_off32(0x41, 0x81, 0xea, cfi_bpf_hash); /* subl $hash, %r10d */ + EMIT3_off32(0x41, 0x81, 0xea, hash); /* subl $hash, %r10d */ EMIT2(0x74, 0x07); /* jz.d8 +7 */ EMIT2(0x0f, 0x0b); /* ud2 */ EMIT1(0x90); /* nop */ @@ -357,11 +358,12 @@ static void emit_fineibt(u8 **pprog) *pprog = prog; } -static void emit_kcfi(u8 **pprog) +static void emit_kcfi(u8 **pprog, bool is_subprog) { + u32 hash = is_subprog ? cfi_bpf_subprog_hash : cfi_bpf_hash; u8 *prog = *pprog; - EMIT1_off32(0xb8, cfi_bpf_hash); /* movl $hash, %eax */ + EMIT1_off32(0xb8, hash); /* movl $hash, %eax */ #ifdef CONFIG_CALL_PADDING EMIT1(0x90); EMIT1(0x90); @@ -380,17 +382,17 @@ static void emit_kcfi(u8 **pprog) *pprog = prog; } -static void emit_cfi(u8 **pprog) +static void emit_cfi(u8 **pprog, bool is_subprog) { u8 *prog = *pprog; switch (cfi_mode) { case CFI_FINEIBT: - emit_fineibt(&prog); + emit_fineibt(&prog, is_subprog); break; case CFI_KCFI: - emit_kcfi(&prog); + emit_kcfi(&prog, is_subprog); break; default: @@ -411,7 +413,7 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf, { u8 *prog = *pprog; - emit_cfi(&prog); + emit_cfi(&prog, is_subprog); /* BPF trampoline can be made to work without these nops, * but let's waste 5 bytes for now and optimize later */ -- Gitee From bd04e1589bf47d2a427de2227c842a8ab8aa70ad Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 15 Dec 2023 10:12:20 +0100 Subject: [PATCH 1527/2138] x86/cfi,bpf: Fix bpf_struct_ops CFI ANBZ: #11469 commit 2cd3e3772e41377f32d6eea643e0590774e9187c upstream. BPF struct_ops uses __arch_prepare_bpf_trampoline() to write trampolines for indirect function calls. These tramplines much have matching CFI. In order to obtain the correct CFI hash for the various methods, add a matching structure that contains stub functions, the compiler will generate correct CFI which we can pilfer for the trampolines. Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20231215092707.566977112@infradead.org Signed-off-by: Alexei Starovoitov Signed-off-by: Tianchen Ding Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/4026 --- arch/x86/include/asm/cfi.h | 6 +++ arch/x86/kernel/alternative.c | 22 +++++++++++ arch/x86/net/bpf_jit_comp.c | 66 ++++++++++++++++++++------------ include/linux/bpf.h | 13 +++++++ kernel/bpf/bpf_struct_ops.c | 17 +++++---- net/bpf/bpf_dummy_struct_ops.c | 31 ++++++++++++++- net/ipv4/bpf_tcp_ca.c | 69 ++++++++++++++++++++++++++++++++++ 7 files changed, 192 insertions(+), 32 deletions(-) diff --git a/arch/x86/include/asm/cfi.h b/arch/x86/include/asm/cfi.h index 8779abd217b7..1a50b2cd4713 100644 --- a/arch/x86/include/asm/cfi.h +++ b/arch/x86/include/asm/cfi.h @@ -123,6 +123,8 @@ static inline int cfi_get_offset(void) } #define cfi_get_offset cfi_get_offset +extern u32 cfi_get_func_hash(void *func); + #else static inline enum bug_trap_type handle_cfi_failure(struct pt_regs *regs) { @@ -130,6 +132,10 @@ static inline enum bug_trap_type handle_cfi_failure(struct pt_regs *regs) } #define cfi_bpf_hash 0U #define cfi_bpf_subprog_hash 0U +static inline u32 cfi_get_func_hash(void *func) +{ + return 0; +} #endif /* CONFIG_CFI_CLANG */ #endif /* _ASM_X86_CFI_H */ diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 287065ad3feb..183d42302243 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -893,6 +893,28 @@ asm ( " .size cfi_bpf_subprog_hash, 4 \n" " .popsection \n" ); + +u32 cfi_get_func_hash(void *func) +{ + u32 hash; + + func -= cfi_get_offset(); + switch (cfi_mode) { + case CFI_FINEIBT: + func += 7; + break; + case CFI_KCFI: + func += 1; + break; + default: + return 0; + } + + if (get_kernel_nofault(hash, func)) + return 0; + + return hash; +} #endif #ifdef CONFIG_FINEIBT diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 0ea8c0d91064..7d631623a950 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -343,9 +343,8 @@ static void pop_callee_regs(u8 **pprog, bool *callee_regs_used) * in arch/x86/kernel/alternative.c */ -static void emit_fineibt(u8 **pprog, bool is_subprog) +static void emit_fineibt(u8 **pprog, u32 hash) { - u32 hash = is_subprog ? cfi_bpf_subprog_hash : cfi_bpf_hash; u8 *prog = *pprog; EMIT_ENDBR(); @@ -358,9 +357,8 @@ static void emit_fineibt(u8 **pprog, bool is_subprog) *pprog = prog; } -static void emit_kcfi(u8 **pprog, bool is_subprog) +static void emit_kcfi(u8 **pprog, u32 hash) { - u32 hash = is_subprog ? cfi_bpf_subprog_hash : cfi_bpf_hash; u8 *prog = *pprog; EMIT1_off32(0xb8, hash); /* movl $hash, %eax */ @@ -382,17 +380,17 @@ static void emit_kcfi(u8 **pprog, bool is_subprog) *pprog = prog; } -static void emit_cfi(u8 **pprog, bool is_subprog) +static void emit_cfi(u8 **pprog, u32 hash) { u8 *prog = *pprog; switch (cfi_mode) { case CFI_FINEIBT: - emit_fineibt(&prog, is_subprog); + emit_fineibt(&prog, hash); break; case CFI_KCFI: - emit_kcfi(&prog, is_subprog); + emit_kcfi(&prog, hash); break; default: @@ -413,7 +411,7 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf, { u8 *prog = *pprog; - emit_cfi(&prog, is_subprog); + emit_cfi(&prog, is_subprog ? cfi_bpf_subprog_hash : cfi_bpf_hash); /* BPF trampoline can be made to work without these nops, * but let's waste 5 bytes for now and optimize later */ @@ -2478,10 +2476,19 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i u8 *prog; bool save_ret; + /* + * F_INDIRECT is only compatible with F_RET_FENTRY_RET, it is + * explicitly incompatible with F_CALL_ORIG | F_SKIP_FRAME | F_IP_ARG + * because @func_addr. + */ + WARN_ON_ONCE((flags & BPF_TRAMP_F_INDIRECT) && + (flags & ~(BPF_TRAMP_F_INDIRECT | BPF_TRAMP_F_RET_FENTRY_RET))); + /* extra registers for struct arguments */ - for (i = 0; i < m->nr_args; i++) + for (i = 0; i < m->nr_args; i++) { if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) nr_regs += (m->arg_size[i] + 7) / 8 - 1; + } /* x86-64 supports up to MAX_BPF_FUNC_ARGS arguments. 1-6 * are passed through regs, the remains are through stack. @@ -2564,20 +2571,27 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i prog = image; - EMIT_ENDBR(); - /* - * This is the direct-call trampoline, as such it needs accounting - * for the __fentry__ call. - */ - x86_call_depth_emit_accounting(&prog, NULL); + if (flags & BPF_TRAMP_F_INDIRECT) { + /* + * Indirect call for bpf_struct_ops + */ + emit_cfi(&prog, cfi_get_func_hash(func_addr)); + } else { + /* + * Direct-call fentry stub, as such it needs accounting for the + * __fentry__ call. + */ + x86_call_depth_emit_accounting(&prog, NULL); + } EMIT1(0x55); /* push rbp */ EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ - if (!is_imm8(stack_size)) + if (!is_imm8(stack_size)) { /* sub rsp, stack_size */ EMIT3_off32(0x48, 0x81, 0xEC, stack_size); - else + } else { /* sub rsp, stack_size */ EMIT4(0x48, 0x83, 0xEC, stack_size); + } if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) EMIT1(0x50); /* push rax */ /* mov QWORD PTR [rbp - rbx_off], rbx */ @@ -2610,10 +2624,11 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i } } - if (fentry->nr_links) + if (fentry->nr_links) { if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off, flags & BPF_TRAMP_F_RET_FENTRY_RET)) return -EINVAL; + } if (fmod_ret->nr_links) { branches = kcalloc(fmod_ret->nr_links, sizeof(u8 *), @@ -2632,11 +2647,12 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i restore_regs(m, &prog, regs_off); save_args(m, &prog, arg_stack_off, true); - if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) + if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) { /* Before calling the original function, restore the * tail_call_cnt from stack to rax. */ RESTORE_TAIL_CALL_CNT(stack_size); + } if (flags & BPF_TRAMP_F_ORIG_STACK) { emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8); @@ -2665,16 +2681,18 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i /* Update the branches saved in invoke_bpf_mod_ret with the * aligned address of do_fexit. */ - for (i = 0; i < fmod_ret->nr_links; i++) + for (i = 0; i < fmod_ret->nr_links; i++) { emit_cond_near_jump(&branches[i], prog, branches[i], X86_JNE); + } } - if (fexit->nr_links) + if (fexit->nr_links) { if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off, false)) { ret = -EINVAL; goto cleanup; } + } if (flags & BPF_TRAMP_F_RESTORE_REGS) restore_regs(m, &prog, regs_off); @@ -2691,11 +2709,12 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i ret = -EINVAL; goto cleanup; } - } else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) + } else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) { /* Before running the original function, restore the * tail_call_cnt from stack to rax. */ RESTORE_TAIL_CALL_CNT(stack_size); + } /* restore return value of orig_call or fentry prog back into RAX */ if (save_ret) @@ -2703,9 +2722,10 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, -rbx_off); EMIT1(0xC9); /* leave */ - if (flags & BPF_TRAMP_F_SKIP_FRAME) + if (flags & BPF_TRAMP_F_SKIP_FRAME) { /* skip our return address and return to parent */ EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */ + } emit_return(&prog, prog); /* Make sure the trampoline generation logic doesn't overflow */ if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) { diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 61b3ba435294..72409d7e926f 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1078,6 +1078,17 @@ struct btf_func_model { */ #define BPF_TRAMP_F_TAIL_CALL_CTX BIT(7) +/* + * Indicate the trampoline should be suitable to receive indirect calls; + * without this indirectly calling the generated code can result in #UD/#CP, + * depending on the CFI options. + * + * Used by bpf_struct_ops. + * + * Incompatible with FENTRY usage, overloads @func_addr argument. + */ +#define BPF_TRAMP_F_INDIRECT BIT(8) + /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 * bytes on x86. */ @@ -1706,6 +1717,7 @@ struct bpf_struct_ops { struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS]; u32 type_id; u32 value_id; + void *cfi_stubs; }; #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL) @@ -1719,6 +1731,7 @@ int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks, struct bpf_tramp_link *link, const struct btf_func_model *model, + void *stub_func, void *image, void *image_end); static inline bool bpf_try_module_get(const void *data, struct module *owner) { diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index fdc3e8705a3c..81eb5f4370ee 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -352,18 +352,18 @@ const struct bpf_link_ops bpf_struct_ops_link_lops = { int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks, struct bpf_tramp_link *link, const struct btf_func_model *model, - void *image, void *image_end) + void *stub_func, void *image, void *image_end) { - u32 flags; + u32 flags = BPF_TRAMP_F_INDIRECT; tlinks[BPF_TRAMP_FENTRY].links[0] = link; tlinks[BPF_TRAMP_FENTRY].nr_links = 1; - /* BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops, - * and it must be used alone. - */ - flags = model->ret_size > 0 ? BPF_TRAMP_F_RET_FENTRY_RET : 0; + + if (model->ret_size > 0) + flags |= BPF_TRAMP_F_RET_FENTRY_RET; + return arch_prepare_bpf_trampoline(NULL, image, image_end, - model, flags, tlinks, NULL); + model, flags, tlinks, stub_func); } static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, @@ -497,11 +497,12 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, err = bpf_struct_ops_prepare_trampoline(tlinks, link, &st_ops->func_models[i], + *(void **)(st_ops->cfi_stubs + moff), image, image_end); if (err < 0) goto reset_unlock; - *(void **)(kdata + moff) = image; + *(void **)(kdata + moff) = image + cfi_get_offset(); image += err; /* put prog_id to udata */ diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c index 5918d1b32e19..c639bdafe6b0 100644 --- a/net/bpf/bpf_dummy_struct_ops.c +++ b/net/bpf/bpf_dummy_struct_ops.c @@ -12,6 +12,11 @@ extern struct bpf_struct_ops bpf_bpf_dummy_ops; /* A common type for test_N with return value in bpf_dummy_ops */ typedef int (*dummy_ops_test_ret_fn)(struct bpf_dummy_ops_state *state, ...); +static int dummy_ops_test_ret_function(struct bpf_dummy_ops_state *state, ...) +{ + return 0; +} + struct bpf_dummy_ops_test_args { u64 args[MAX_BPF_FUNC_ARGS]; struct bpf_dummy_ops_state state; @@ -62,7 +67,7 @@ static int dummy_ops_copy_args(struct bpf_dummy_ops_test_args *args) static int dummy_ops_call_op(void *image, struct bpf_dummy_ops_test_args *args) { - dummy_ops_test_ret_fn test = (void *)image; + dummy_ops_test_ret_fn test = (void *)image + cfi_get_offset(); struct bpf_dummy_ops_state *state = NULL; /* state needs to be NULL if args[0] is 0 */ @@ -120,6 +125,7 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr, op_idx = prog->expected_attach_type; err = bpf_struct_ops_prepare_trampoline(tlinks, link, &st_ops->func_models[op_idx], + &dummy_ops_test_ret_function, image, image + PAGE_SIZE); if (err < 0) goto out; @@ -220,6 +226,28 @@ static void bpf_dummy_unreg(void *kdata) { } +static int bpf_dummy_test_1(struct bpf_dummy_ops_state *cb) +{ + return 0; +} + +static int bpf_dummy_test_2(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2, + char a3, unsigned long a4) +{ + return 0; +} + +static int bpf_dummy_test_sleepable(struct bpf_dummy_ops_state *cb) +{ + return 0; +} + +static struct bpf_dummy_ops __bpf_bpf_dummy_ops = { + .test_1 = bpf_dummy_test_1, + .test_2 = bpf_dummy_test_2, + .test_sleepable = bpf_dummy_test_sleepable, +}; + struct bpf_struct_ops bpf_bpf_dummy_ops = { .verifier_ops = &bpf_dummy_verifier_ops, .init = bpf_dummy_init, @@ -228,4 +256,5 @@ struct bpf_struct_ops bpf_bpf_dummy_ops = { .reg = bpf_dummy_reg, .unreg = bpf_dummy_unreg, .name = "bpf_dummy_ops", + .cfi_stubs = &__bpf_bpf_dummy_ops, }; diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c index 39dcccf0f174..ae8b15e6896f 100644 --- a/net/ipv4/bpf_tcp_ca.c +++ b/net/ipv4/bpf_tcp_ca.c @@ -271,6 +271,74 @@ static int bpf_tcp_ca_validate(void *kdata) return tcp_validate_congestion_control(kdata); } +static u32 bpf_tcp_ca_ssthresh(struct sock *sk) +{ + return 0; +} + +static void bpf_tcp_ca_cong_avoid(struct sock *sk, u32 ack, u32 acked) +{ +} + +static void bpf_tcp_ca_set_state(struct sock *sk, u8 new_state) +{ +} + +static void bpf_tcp_ca_cwnd_event(struct sock *sk, enum tcp_ca_event ev) +{ +} + +static void bpf_tcp_ca_in_ack_event(struct sock *sk, u32 flags) +{ +} + +static void bpf_tcp_ca_pkts_acked(struct sock *sk, const struct ack_sample *sample) +{ +} + +static u32 bpf_tcp_ca_min_tso_segs(struct sock *sk) +{ + return 0; +} + +static void bpf_tcp_ca_cong_control(struct sock *sk, const struct rate_sample *rs) +{ +} + +static u32 bpf_tcp_ca_undo_cwnd(struct sock *sk) +{ + return 0; +} + +static u32 bpf_tcp_ca_sndbuf_expand(struct sock *sk) +{ + return 0; +} + +static void __bpf_tcp_ca_init(struct sock *sk) +{ +} + +static void __bpf_tcp_ca_release(struct sock *sk) +{ +} + +static struct tcp_congestion_ops __bpf_ops_tcp_congestion_ops = { + .ssthresh = bpf_tcp_ca_ssthresh, + .cong_avoid = bpf_tcp_ca_cong_avoid, + .set_state = bpf_tcp_ca_set_state, + .cwnd_event = bpf_tcp_ca_cwnd_event, + .in_ack_event = bpf_tcp_ca_in_ack_event, + .pkts_acked = bpf_tcp_ca_pkts_acked, + .min_tso_segs = bpf_tcp_ca_min_tso_segs, + .cong_control = bpf_tcp_ca_cong_control, + .undo_cwnd = bpf_tcp_ca_undo_cwnd, + .sndbuf_expand = bpf_tcp_ca_sndbuf_expand, + + .init = __bpf_tcp_ca_init, + .release = __bpf_tcp_ca_release, +}; + struct bpf_struct_ops bpf_tcp_congestion_ops = { .verifier_ops = &bpf_tcp_ca_verifier_ops, .reg = bpf_tcp_ca_reg, @@ -281,6 +349,7 @@ struct bpf_struct_ops bpf_tcp_congestion_ops = { .init = bpf_tcp_ca_init, .validate = bpf_tcp_ca_validate, .name = "tcp_congestion_ops", + .cfi_stubs = &__bpf_ops_tcp_congestion_ops, }; static int __init bpf_tcp_ca_kfunc_init(void) -- Gitee From 01bd3d837dcaa36cb3d29b46758f309ae73ae20a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 15 Dec 2023 10:12:21 +0100 Subject: [PATCH 1528/2138] cfi: Add CFI_NOSEAL() ANBZ: #11469 commit e9d13b9d2f99ccf7afeab490d97eaa5ac9846598 upstream. Add a CFI_NOSEAL() helper to mark functions that need to retain their CFI information, despite not otherwise leaking their address. Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20231215092707.669401084@infradead.org Signed-off-by: Alexei Starovoitov Signed-off-by: Tianchen Ding Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/4026 --- arch/x86/include/asm/cfi.h | 5 +++++ include/linux/cfi.h | 4 ++++ 2 files changed, 9 insertions(+) diff --git a/arch/x86/include/asm/cfi.h b/arch/x86/include/asm/cfi.h index 1a50b2cd4713..7cd752557905 100644 --- a/arch/x86/include/asm/cfi.h +++ b/arch/x86/include/asm/cfi.h @@ -8,6 +8,7 @@ * Copyright (C) 2022 Google LLC */ #include +#include /* * An overview of the various calling conventions... @@ -138,4 +139,8 @@ static inline u32 cfi_get_func_hash(void *func) } #endif /* CONFIG_CFI_CLANG */ +#if HAS_KERNEL_IBT == 1 +#define CFI_NOSEAL(x) asm(IBT_NOSEAL(__stringify(x))) +#endif + #endif /* _ASM_X86_CFI_H */ diff --git a/include/linux/cfi.h b/include/linux/cfi.h index 1ed2d96c0cfc..f0df518e11dd 100644 --- a/include/linux/cfi.h +++ b/include/linux/cfi.h @@ -46,4 +46,8 @@ static inline void module_cfi_finalize(const Elf_Ehdr *hdr, #endif /* CONFIG_ARCH_USES_CFI_TRAPS */ #endif /* CONFIG_MODULES */ +#ifndef CFI_NOSEAL +#define CFI_NOSEAL(x) +#endif + #endif /* _LINUX_CFI_H */ -- Gitee From 6aca7c16db0362a7efab64703285caaebdfc7a12 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 15 Dec 2023 10:12:22 +0100 Subject: [PATCH 1529/2138] bpf: Fix dtor CFI ANBZ: #11469 commit e4c00339891c074c76f626ac82981963cbba5332 upstream. Ensure the various dtor functions match their prototype and retain their CFI signatures, since they don't have their address taken, they are prone to not getting CFI, making them impossible to call indirectly. Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20231215092707.799451071@infradead.org Signed-off-by: Alexei Starovoitov Signed-off-by: Tianchen Ding Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/4026 --- kernel/bpf/cpumask.c | 8 +++++++- kernel/bpf/helpers.c | 16 ++++++++++++++-- net/bpf/test_run.c | 15 +++++++++++++-- 3 files changed, 34 insertions(+), 5 deletions(-) diff --git a/kernel/bpf/cpumask.c b/kernel/bpf/cpumask.c index 6983af8e093c..09cb2a71e850 100644 --- a/kernel/bpf/cpumask.c +++ b/kernel/bpf/cpumask.c @@ -98,6 +98,12 @@ __bpf_kfunc void bpf_cpumask_release(struct bpf_cpumask *cpumask) migrate_enable(); } +__bpf_kfunc void bpf_cpumask_release_dtor(void *cpumask) +{ + bpf_cpumask_release(cpumask); +} +CFI_NOSEAL(bpf_cpumask_release_dtor); + /** * bpf_cpumask_first() - Get the index of the first nonzero bit in the cpumask. * @cpumask: The cpumask being queried. @@ -443,7 +449,7 @@ static const struct btf_kfunc_id_set cpumask_kfunc_set = { BTF_ID_LIST(cpumask_dtor_ids) BTF_ID(struct, bpf_cpumask) -BTF_ID(func, bpf_cpumask_release) +BTF_ID(func, bpf_cpumask_release_dtor) static int __init cpumask_kfunc_init(void) { diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 41d62405c852..4e77fa1bf41e 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -2212,6 +2212,12 @@ __bpf_kfunc void bpf_task_release(struct task_struct *p) put_task_struct_rcu_user(p); } +__bpf_kfunc void bpf_task_release_dtor(void *p) +{ + put_task_struct_rcu_user(p); +} +CFI_NOSEAL(bpf_task_release_dtor); + #ifdef CONFIG_CGROUPS /** * bpf_cgroup_acquire - Acquire a reference to a cgroup. A cgroup acquired by @@ -2236,6 +2242,12 @@ __bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp) cgroup_put(cgrp); } +__bpf_kfunc void bpf_cgroup_release_dtor(void *cgrp) +{ + cgroup_put(cgrp); +} +CFI_NOSEAL(bpf_cgroup_release_dtor); + /** * bpf_cgroup_ancestor - Perform a lookup on an entry in a cgroup's ancestor * array. A cgroup returned by this kfunc which is not subsequently stored in a @@ -2566,10 +2578,10 @@ static const struct btf_kfunc_id_set generic_kfunc_set = { BTF_ID_LIST(generic_dtor_ids) BTF_ID(struct, task_struct) -BTF_ID(func, bpf_task_release) +BTF_ID(func, bpf_task_release_dtor) #ifdef CONFIG_CGROUPS BTF_ID(struct, cgroup) -BTF_ID(func, bpf_cgroup_release) +BTF_ID(func, bpf_cgroup_release_dtor) #endif BTF_SET8_START(common_btf_ids) diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 905de361f862..835efb0246c9 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -602,10 +602,21 @@ __bpf_kfunc void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) refcount_dec(&p->cnt); } +__bpf_kfunc void bpf_kfunc_call_test_release_dtor(void *p) +{ + bpf_kfunc_call_test_release(p); +} +CFI_NOSEAL(bpf_kfunc_call_test_release_dtor); + __bpf_kfunc void bpf_kfunc_call_memb_release(struct prog_test_member *p) { } +__bpf_kfunc void bpf_kfunc_call_memb_release_dtor(void *p) +{ +} +CFI_NOSEAL(bpf_kfunc_call_memb_release_dtor); + __diag_pop(); BTF_SET8_START(bpf_test_modify_return_ids) @@ -1679,9 +1690,9 @@ static const struct btf_kfunc_id_set bpf_prog_test_kfunc_set = { BTF_ID_LIST(bpf_prog_test_dtor_kfunc_ids) BTF_ID(struct, prog_test_ref_kfunc) -BTF_ID(func, bpf_kfunc_call_test_release) +BTF_ID(func, bpf_kfunc_call_test_release_dtor) BTF_ID(struct, prog_test_member) -BTF_ID(func, bpf_kfunc_call_memb_release) +BTF_ID(func, bpf_kfunc_call_memb_release_dtor) static int __init bpf_prog_test_run_init(void) { -- Gitee From 53fabc63770875d1da91a1f380edd7c526c1ba24 Mon Sep 17 00:00:00 2001 From: zhenghaowei Date: Thu, 18 Jul 2024 08:58:33 +0800 Subject: [PATCH 1530/2138] PCI/ACPI: Increase Loongson max PCI hosts to 8 ANBZ: #11460 commit 1f35a0c74e441e1a21b5414c25bc01f06e9cca31 upstream. Beginning with Loongson-3C6000, there can be up to 8 PCI hosts for multi-chip machines. To support these machines, increase the number of entries in mcfg_quirks to 8. Link: https://lore.kernel.org/r/20240726092911.2042656-1-chenhuacai@loongson.cn Signed-off-by: Haowei Zheng Signed-off-by: Huacai Chen Signed-off-by: Bjorn Helgaas Signed-off-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4017 --- arch/loongarch/include/asm/irq.h | 4 ++-- drivers/acpi/pci_mcfg.c | 13 +++++++++++++ 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h index 85a3315597b6..efc55a9bae04 100644 --- a/arch/loongarch/include/asm/irq.h +++ b/arch/loongarch/include/asm/irq.h @@ -42,8 +42,8 @@ void spurious_interrupt(void); #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace void arch_trigger_cpumask_backtrace(const struct cpumask *mask, int exclude_cpu); -#define MAX_IO_PICS 2 -#define NR_IRQS (64 + (256 * MAX_IO_PICS)) +#define MAX_IO_PICS 16 +#define NR_IRQS (64 + NR_VECTORS * (NR_CPUS + MAX_IO_PICS)) struct acpi_vector_group { int node; diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c index 1dccb26b2b7f..98bbb01981c5 100644 --- a/drivers/acpi/pci_mcfg.c +++ b/drivers/acpi/pci_mcfg.c @@ -181,6 +181,19 @@ static struct mcfg_fixup mcfg_quirks[] = { LOONGSON_ECAM_MCFG("LOONGSON", 0), LOONGSON_ECAM_MCFG("\0", 1), LOONGSON_ECAM_MCFG("LOONGSON", 1), + LOONGSON_ECAM_MCFG("\0", 2), + LOONGSON_ECAM_MCFG("LOONGSON", 2), + LOONGSON_ECAM_MCFG("\0", 3), + LOONGSON_ECAM_MCFG("LOONGSON", 3), + LOONGSON_ECAM_MCFG("\0", 4), + LOONGSON_ECAM_MCFG("LOONGSON", 4), + LOONGSON_ECAM_MCFG("\0", 5), + LOONGSON_ECAM_MCFG("LOONGSON", 5), + LOONGSON_ECAM_MCFG("\0", 6), + LOONGSON_ECAM_MCFG("LOONGSON", 6), + LOONGSON_ECAM_MCFG("\0", 7), + LOONGSON_ECAM_MCFG("LOONGSON", 7), + #endif /* LOONGARCH */ #ifdef CONFIG_SW64 -- Gitee From 1ae52c9956c57819807bb9e6969725adaa2e1786 Mon Sep 17 00:00:00 2001 From: zhangtianyang Date: Thu, 26 Sep 2024 20:55:06 +0800 Subject: [PATCH 1531/2138] anolis: Loongarch: Cancel CONFIG_RANDOMIZE_BASE=y ANBZ: #11463 Temporarily close the address space randomization function under the Loongson platform. This function currently affects the testing of s4. Signed-off-by: zhangtianyang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4020 --- arch/loongarch/configs/anolis_defconfig | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/loongarch/configs/anolis_defconfig b/arch/loongarch/configs/anolis_defconfig index d750f96d973f..aeff56d28799 100644 --- a/arch/loongarch/configs/anolis_defconfig +++ b/arch/loongarch/configs/anolis_defconfig @@ -337,8 +337,6 @@ CONFIG_ARCH_SUPPORTS_KEXEC=y CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y CONFIG_ARCH_SELECTS_CRASH_DUMP=y CONFIG_RELOCATABLE=y -CONFIG_RANDOMIZE_BASE=y -CONFIG_RANDOMIZE_BASE_MAX_OFFSET=0x01000000 CONFIG_SECCOMP=y # end of Kernel type and options -- Gitee From 7a0cbada74553cc1133eecc51f627ec23ebc08a2 Mon Sep 17 00:00:00 2001 From: gaojuxin Date: Tue, 22 Oct 2024 20:15:29 +0800 Subject: [PATCH 1532/2138] irqchip/loongarch-avec: Add AVEC irqchip support ANBZ: #11462 commit ae16f05c928a1336d5d9d19fd805d7bf29c3f0c8 upstream. Introduce the advanced extended interrupt controllers (AVECINTC). This feature will allow each core to have 256 independent interrupt vectors and MSI interrupts can be independently routed to any vector on any CPU. The whole topology of irqchips in LoongArch machines looks like this if AVECINTC is supported: +-----+ +-----------------------+ +-------+ | IPI | --> | CPUINTC | <-- | Timer | +-----+ +-----------------------+ +-------+ ^ ^ ^ | | | +---------+ +----------+ +---------+ +-------+ | EIOINTC | | AVECINTC | | LIOINTC | <-- | UARTs | +---------+ +----------+ +---------+ +-------+ ^ ^ | | +---------+ +---------+ | PCH-PIC | | PCH-MSI | +---------+ +---------+ ^ ^ ^ | | | +---------+ +---------+ +---------+ | Devices | | PCH-LPC | | Devices | +---------+ +---------+ +---------+ ^ | +---------+ | Devices | +---------+ Co-developed-by: Jianmin Lv Signed-off-by: Jianmin Lv Co-developed-by: Liupu Wang Signed-off-by: Liupu Wang Co-developed-by: Huacai Chen Signed-off-by: Huacai Chen Signed-off-by: Tianyang Zhang Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/all/20240823104337.25577-2-zhangtianyang@loongson.cn Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4019 --- .../arch/loongarch/irq-chip-model.rst | 32 ++ .../zh_CN/arch/loongarch/irq-chip-model.rst | 32 ++ arch/loongarch/Kconfig | 1 + arch/loongarch/include/asm/cpu-features.h | 1 + arch/loongarch/include/asm/cpu.h | 2 + arch/loongarch/include/asm/hardirq.h | 3 +- arch/loongarch/include/asm/irq.h | 29 +- arch/loongarch/include/asm/loongarch.h | 18 +- arch/loongarch/include/asm/smp.h | 2 + arch/loongarch/kernel/cpu-probe.c | 3 +- arch/loongarch/kernel/irq.c | 13 + arch/loongarch/kernel/legacy_boot.c | 9 + arch/loongarch/kernel/smp.c | 6 + drivers/irqchip/Makefile | 2 +- drivers/irqchip/irq-loongarch-avec.c | 449 ++++++++++++++++++ drivers/irqchip/irq-loongarch-cpu.c | 7 +- drivers/irqchip/irq-loongson-eiointc.c | 9 +- drivers/irqchip/irq-loongson-htvec.c | 2 + drivers/irqchip/irq-loongson-liointc.c | 2 + drivers/irqchip/irq-loongson-pch-lpc.c | 2 + drivers/irqchip/irq-loongson-pch-msi.c | 44 +- drivers/irqchip/irq-loongson-pch-pic.c | 2 + include/linux/cpuhotplug.h | 3 +- 23 files changed, 642 insertions(+), 31 deletions(-) create mode 100644 drivers/irqchip/irq-loongarch-avec.c diff --git a/Documentation/arch/loongarch/irq-chip-model.rst b/Documentation/arch/loongarch/irq-chip-model.rst index 7988f4192363..6dd48256e39f 100644 --- a/Documentation/arch/loongarch/irq-chip-model.rst +++ b/Documentation/arch/loongarch/irq-chip-model.rst @@ -85,6 +85,38 @@ to CPUINTC directly:: | Devices | +---------+ +Advanced Extended IRQ model +=========================== + +In this model, IPI (Inter-Processor Interrupt) and CPU Local Timer interrupt go +to CPUINTC directly, CPU UARTS interrupts go to LIOINTC, PCH-MSI interrupts go +to AVECINTC, and then go to CPUINTC directly, while all other devices interrupts +go to PCH-PIC/PCH-LPC and gathered by EIOINTC, and then go to CPUINTC directly:: + + +-----+ +-----------------------+ +-------+ + | IPI | --> | CPUINTC | <-- | Timer | + +-----+ +-----------------------+ +-------+ + ^ ^ ^ + | | | + +---------+ +----------+ +---------+ +-------+ + | EIOINTC | | AVECINTC | | LIOINTC | <-- | UARTs | + +---------+ +----------+ +---------+ +-------+ + ^ ^ + | | + +---------+ +---------+ + | PCH-PIC | | PCH-MSI | + +---------+ +---------+ + ^ ^ ^ + | | | + +---------+ +---------+ +---------+ + | Devices | | PCH-LPC | | Devices | + +---------+ +---------+ +---------+ + ^ + | + +---------+ + | Devices | + +---------+ + ACPI-related definitions ======================== diff --git a/Documentation/translations/zh_CN/arch/loongarch/irq-chip-model.rst b/Documentation/translations/zh_CN/arch/loongarch/irq-chip-model.rst index f1e9ab18206c..472761938682 100644 --- a/Documentation/translations/zh_CN/arch/loongarch/irq-chip-model.rst +++ b/Documentation/translations/zh_CN/arch/loongarch/irq-chip-model.rst @@ -87,6 +87,38 @@ PCH-LPC/PCH-MSI,然后被EIOINTC统一收集,再直接到达CPUINTC:: | Devices | +---------+ +高级扩展IRQ模型 +=============== + +在这种模型里面,IPI(Inter-Processor Interrupt)和CPU本地时钟中断直接发送到CPUINTC, +CPU串口(UARTs)中断发送到LIOINTC,PCH-MSI中断发送到AVECINTC,而后通过AVECINTC直接 +送达CPUINTC,而其他所有设备的中断则分别发送到所连接的PCH-PIC/PCH-LPC,然后由EIOINTC +统一收集,再直接到达CPUINTC:: + + +-----+ +-----------------------+ +-------+ + | IPI | --> | CPUINTC | <-- | Timer | + +-----+ +-----------------------+ +-------+ + ^ ^ ^ + | | | + +---------+ +----------+ +---------+ +-------+ + | EIOINTC | | AVECINTC | | LIOINTC | <-- | UARTs | + +---------+ +----------+ +---------+ +-------+ + ^ ^ + | | + +---------+ +---------+ + | PCH-PIC | | PCH-MSI | + +---------+ +---------+ + ^ ^ ^ + | | | + +---------+ +---------+ +---------+ + | Devices | | PCH-LPC | | Devices | + +---------+ +---------+ +---------+ + ^ + | + +---------+ + | Devices | + +---------+ + ACPI相关的定义 ============== diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 7b82992af3c4..a3b39567442f 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -75,6 +75,7 @@ config LOONGARCH select GENERIC_ENTRY select GENERIC_GETTIMEOFDAY select GENERIC_IOREMAP if !ARCH_IOREMAP + select GENERIC_IRQ_MATRIX_ALLOCATOR select GENERIC_IRQ_MULTI_HANDLER select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW diff --git a/arch/loongarch/include/asm/cpu-features.h b/arch/loongarch/include/asm/cpu-features.h index 2eafe6a6aca8..16a716f88a5c 100644 --- a/arch/loongarch/include/asm/cpu-features.h +++ b/arch/loongarch/include/asm/cpu-features.h @@ -65,5 +65,6 @@ #define cpu_has_guestid cpu_opt(LOONGARCH_CPU_GUESTID) #define cpu_has_hypervisor cpu_opt(LOONGARCH_CPU_HYPERVISOR) #define cpu_has_ptw cpu_opt(LOONGARCH_CPU_PTW) +#define cpu_has_avecint cpu_opt(LOONGARCH_CPU_AVECINT) #endif /* __ASM_CPU_FEATURES_H */ diff --git a/arch/loongarch/include/asm/cpu.h b/arch/loongarch/include/asm/cpu.h index 48b9f7168bcc..843f9c4ec980 100644 --- a/arch/loongarch/include/asm/cpu.h +++ b/arch/loongarch/include/asm/cpu.h @@ -99,6 +99,7 @@ enum cpu_type_enum { #define CPU_FEATURE_GUESTID 24 /* CPU has GuestID feature */ #define CPU_FEATURE_HYPERVISOR 25 /* CPU has hypervisor (running in VM) */ #define CPU_FEATURE_PTW 26 /* CPU has hardware page table walker */ +#define CPU_FEATURE_AVECINT 27 /* CPU has avec interrupt */ #define LOONGARCH_CPU_CPUCFG BIT_ULL(CPU_FEATURE_CPUCFG) #define LOONGARCH_CPU_LAM BIT_ULL(CPU_FEATURE_LAM) @@ -127,5 +128,6 @@ enum cpu_type_enum { #define LOONGARCH_CPU_GUESTID BIT_ULL(CPU_FEATURE_GUESTID) #define LOONGARCH_CPU_HYPERVISOR BIT_ULL(CPU_FEATURE_HYPERVISOR) #define LOONGARCH_CPU_PTW BIT_ULL(CPU_FEATURE_PTW) +#define LOONGARCH_CPU_AVECINT BIT_ULL(CPU_FEATURE_AVECINT) #endif /* _ASM_CPU_H */ diff --git a/arch/loongarch/include/asm/hardirq.h b/arch/loongarch/include/asm/hardirq.h index b26d596a73aa..5f70cb77b54d 100644 --- a/arch/loongarch/include/asm/hardirq.h +++ b/arch/loongarch/include/asm/hardirq.h @@ -15,8 +15,9 @@ extern void ack_bad_irq(unsigned int irq); enum ipi_msg_type { IPI_RESCHEDULE, IPI_CALL_FUNCTION, + IPI_CLEAR_VECTOR, }; -#define NR_IPI 2 +#define NR_IPI 3 typedef struct { unsigned int ipi_irqs[NR_IPI]; diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h index efc55a9bae04..a43cbd2f1dd5 100644 --- a/arch/loongarch/include/asm/irq.h +++ b/arch/loongarch/include/asm/irq.h @@ -38,6 +38,17 @@ static inline bool on_irq_stack(int cpu, unsigned long sp) void spurious_interrupt(void); #define NR_IRQS_LEGACY 16 +/* + * 256 Vectors Mapping for AVECINTC: + * + * 0 - 15: Mapping classic IPs, e.g. IP0-12. + * 16 - 255: Mapping vectors for external IRQ. + * + */ +#define NR_VECTORS 256 +#define NR_LEGACY_VECTORS 16 +#define IRQ_MATRIX_BITS NR_VECTORS + #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace void arch_trigger_cpumask_backtrace(const struct cpumask *mask, int exclude_cpu); @@ -66,7 +77,7 @@ extern struct acpi_vector_group msi_group[MAX_IO_PICS]; #define LOONGSON_LPC_LAST_IRQ (LOONGSON_LPC_IRQ_BASE + 15) #define LOONGSON_CPU_IRQ_BASE 16 -#define LOONGSON_CPU_LAST_IRQ (LOONGSON_CPU_IRQ_BASE + 14) +#define LOONGSON_CPU_LAST_IRQ (LOONGSON_CPU_IRQ_BASE + 15) #define LOONGSON_PCH_IRQ_BASE 64 #define LOONGSON_PCH_ACPI_IRQ (LOONGSON_PCH_IRQ_BASE + 47) @@ -89,20 +100,8 @@ struct acpi_madt_bio_pic; struct acpi_madt_msi_pic; struct acpi_madt_lpc_pic; -int liointc_acpi_init(struct irq_domain *parent, - struct acpi_madt_lio_pic *acpi_liointc); -int eiointc_acpi_init(struct irq_domain *parent, - struct acpi_madt_eio_pic *acpi_eiointc); - -int htvec_acpi_init(struct irq_domain *parent, - struct acpi_madt_ht_pic *acpi_htvec); -int pch_lpc_acpi_init(struct irq_domain *parent, - struct acpi_madt_lpc_pic *acpi_pchlpc); -int pch_msi_acpi_init(struct irq_domain *parent, - struct acpi_madt_msi_pic *acpi_pchmsi); -int pch_pic_acpi_init(struct irq_domain *parent, - struct acpi_madt_bio_pic *acpi_pchpic); -int find_pch_pic(u32 gsi); +void complete_irq_moving(void); + struct fwnode_handle *get_pch_msi_handle(int pci_segment); extern struct acpi_madt_lio_pic *acpi_liointc; diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h index 8594c55ec171..f1fecf9eea8d 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -254,8 +254,8 @@ #define CSR_ESTAT_EXC_WIDTH 6 #define CSR_ESTAT_EXC (_ULCAST_(0x3f) << CSR_ESTAT_EXC_SHIFT) #define CSR_ESTAT_IS_SHIFT 0 -#define CSR_ESTAT_IS_WIDTH 14 -#define CSR_ESTAT_IS (_ULCAST_(0x3fff) << CSR_ESTAT_IS_SHIFT) +#define CSR_ESTAT_IS_WIDTH 15 +#define CSR_ESTAT_IS (_ULCAST_(0x7fff) << CSR_ESTAT_IS_SHIFT) #define LOONGARCH_CSR_ERA 0x6 /* Exception return address */ @@ -650,6 +650,13 @@ #define LOONGARCH_CSR_CTAG 0x98 /* TagLo + TagHi */ +#define LOONGARCH_CSR_ISR0 0xa0 +#define LOONGARCH_CSR_ISR1 0xa1 +#define LOONGARCH_CSR_ISR2 0xa2 +#define LOONGARCH_CSR_ISR3 0xa3 + +#define LOONGARCH_CSR_IRR 0xa4 + #define LOONGARCH_CSR_PRID 0xc0 /* Shadow MCSR : 0xc0 ~ 0xff */ @@ -1012,7 +1019,7 @@ /* * CSR_ECFG IM */ -#define ECFG0_IM 0x00001fff +#define ECFG0_IM 0x00005fff #define ECFGB_SIP0 0 #define ECFGF_SIP0 (_ULCAST_(1) << ECFGB_SIP0) #define ECFGB_SIP1 1 @@ -1055,6 +1062,7 @@ #define IOCSRF_EIODECODE BIT_ULL(9) #define IOCSRF_FLATMODE BIT_ULL(10) #define IOCSRF_VM BIT_ULL(11) +#define IOCSRF_AVEC BIT_ULL(15) #define LOONGARCH_IOCSR_VENDOR 0x10 @@ -1065,6 +1073,7 @@ #define LOONGARCH_IOCSR_MISC_FUNC 0x420 #define IOCSR_MISC_FUNC_TIMER_RESET BIT_ULL(21) #define IOCSR_MISC_FUNC_EXT_IOI_EN BIT_ULL(48) +#define IOCSR_MISC_FUNC_AVEC_EN BIT_ULL(51) #define LOONGARCH_IOCSR_CPUTEMP 0x428 @@ -1386,9 +1395,10 @@ __BUILD_CSR_OP(tlbidx) #define INT_TI 11 /* Timer */ #define INT_IPI 12 #define INT_NMI 13 +#define INT_AVEC 14 /* ExcCodes corresponding to interrupts */ -#define EXCCODE_INT_NUM (INT_NMI + 1) +#define EXCCODE_INT_NUM (INT_AVEC + 1) #define EXCCODE_INT_START 64 #define EXCCODE_INT_END (EXCCODE_INT_START + EXCCODE_INT_NUM - 1) diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h index 75d30529748c..630e5ebec21c 100644 --- a/arch/loongarch/include/asm/smp.h +++ b/arch/loongarch/include/asm/smp.h @@ -67,9 +67,11 @@ extern int __cpu_logical_map[NR_CPUS]; #define ACTION_BOOT_CPU 0 #define ACTION_RESCHEDULE 1 #define ACTION_CALL_FUNCTION 2 +#define ACTION_CLEAR_VECTOR 3 #define SMP_BOOT_CPU BIT(ACTION_BOOT_CPU) #define SMP_RESCHEDULE BIT(ACTION_RESCHEDULE) #define SMP_CALL_FUNCTION BIT(ACTION_CALL_FUNCTION) +#define SMP_CLEAR_VECTOR BIT(ACTION_CLEAR_VECTOR) struct secondary_data { unsigned long stack; diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c index 55320813ee08..14f0449f5452 100644 --- a/arch/loongarch/kernel/cpu-probe.c +++ b/arch/loongarch/kernel/cpu-probe.c @@ -106,7 +106,6 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c) elf_hwcap |= HWCAP_LOONGARCH_CRC32; } - config = read_cpucfg(LOONGARCH_CPUCFG2); if (config & CPUCFG2_LAM) { c->options |= LOONGARCH_CPU_LAM; @@ -174,6 +173,8 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c) c->options |= LOONGARCH_CPU_FLATMODE; if (config & IOCSRF_EIODECODE) c->options |= LOONGARCH_CPU_EIODECODE; + if (config & IOCSRF_AVEC) + c->options |= LOONGARCH_CPU_AVECINT; if (config & IOCSRF_VM) c->options |= LOONGARCH_CPU_HYPERVISOR; diff --git a/arch/loongarch/kernel/irq.c b/arch/loongarch/kernel/irq.c index 0a2243c8847a..1b9eec76699a 100644 --- a/arch/loongarch/kernel/irq.c +++ b/arch/loongarch/kernel/irq.c @@ -122,6 +122,19 @@ void fixup_irqs(void) } #endif +int __init arch_probe_nr_irqs(void) +{ + int nr_io_pics = bitmap_weight(&loongson_sysconf.cores_io_master, NR_CPUS); + + if (!cpu_has_avecint) + nr_irqs = (64 + NR_VECTORS * nr_io_pics); + else + nr_irqs = (64 + NR_VECTORS * (nr_cpu_ids + nr_io_pics)); + + return NR_IRQS_LEGACY; +} + + void __init init_IRQ(void) { int i, ret; diff --git a/arch/loongarch/kernel/legacy_boot.c b/arch/loongarch/kernel/legacy_boot.c index 35a0a118486f..6503d5f0c034 100644 --- a/arch/loongarch/kernel/legacy_boot.c +++ b/arch/loongarch/kernel/legacy_boot.c @@ -25,6 +25,15 @@ #define MSI_MSG_ADDRESS 0x2FF00000 #define MSI_MSG_DEFAULT_COUNT 0xC0 +extern int liointc_acpi_init(struct irq_domain *parent, + struct acpi_madt_lio_pic *acpi_liointc); +extern int eiointc_acpi_init(struct irq_domain *parent, + struct acpi_madt_eio_pic *acpi_eiointc); +extern int htvec_acpi_init(struct irq_domain *parent, + struct acpi_madt_ht_pic *acpi_htvec); +extern int pch_lpc_acpi_init(struct irq_domain *parent, + struct acpi_madt_lpc_pic *acpi_pchlpc); + struct boot_params *efi_bp; struct loongsonlist_mem_map *g_mmap; struct acpi_madt_lio_pic *acpi_liointc; diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index 974303b6084c..ed3b539c786b 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -71,6 +71,7 @@ static DEFINE_PER_CPU(int, cpu_state); static const char *ipi_types[NR_IPI] __tracepoint_string = { [IPI_RESCHEDULE] = "Rescheduling interrupts", [IPI_CALL_FUNCTION] = "Function call interrupts", + [IPI_CLEAR_VECTOR] = "Clear vector interrupts", }; void show_ipi_list(struct seq_file *p, int prec) @@ -245,6 +246,11 @@ static irqreturn_t loongson_ipi_interrupt(int irq, void *dev) per_cpu(irq_stat, cpu).ipi_irqs[IPI_CALL_FUNCTION]++; } + if (action & SMP_CLEAR_VECTOR) { + complete_irq_moving(); + per_cpu(irq_stat, cpu).ipi_irqs[IPI_CLEAR_VECTOR]++; + } + return IRQ_HANDLED; } diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index 246aa0603d6e..787206e166fc 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -116,7 +116,7 @@ obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o obj-$(CONFIG_TI_SCI_INTR_IRQCHIP) += irq-ti-sci-intr.o obj-$(CONFIG_TI_SCI_INTA_IRQCHIP) += irq-ti-sci-inta.o obj-$(CONFIG_TI_PRUSS_INTC) += irq-pruss-intc.o -obj-$(CONFIG_IRQ_LOONGARCH_CPU) += irq-loongarch-cpu.o +obj-$(CONFIG_IRQ_LOONGARCH_CPU) += irq-loongarch-cpu.o irq-loongarch-avec.o obj-$(CONFIG_LOONGSON_LIOINTC) += irq-loongson-liointc.o obj-$(CONFIG_LOONGSON_EIOINTC) += irq-loongson-eiointc.o obj-$(CONFIG_LOONGSON_HTPIC) += irq-loongson-htpic.o diff --git a/drivers/irqchip/irq-loongarch-avec.c b/drivers/irqchip/irq-loongarch-avec.c new file mode 100644 index 000000000000..5c9dcc488e21 --- /dev/null +++ b/drivers/irqchip/irq-loongarch-avec.c @@ -0,0 +1,449 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2024 Loongson Technologies, Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "irq-loongson.h" + +#define VECTORS_PER_REG 64 +#define IRR_VECTOR_MASK 0xffUL +#define IRR_INVALID_MASK 0x80000000UL +#define AVEC_MSG_OFFSET 0x100000 + +#ifdef CONFIG_SMP +struct pending_list { + struct list_head head; +}; + +static struct cpumask intersect_mask; +static DEFINE_PER_CPU(struct pending_list, pending_list); +#endif + +static DEFINE_PER_CPU(struct irq_desc * [NR_VECTORS], irq_map); + +struct avecintc_chip { + raw_spinlock_t lock; + struct fwnode_handle *fwnode; + struct irq_domain *domain; + struct irq_matrix *vector_matrix; + phys_addr_t msi_base_addr; +}; + +static struct avecintc_chip loongarch_avec; + +struct avecintc_data { + struct list_head entry; + unsigned int cpu; + unsigned int vec; + unsigned int prev_cpu; + unsigned int prev_vec; + unsigned int moving; +}; + +static inline void avecintc_ack_irq(struct irq_data *d) +{ +} + +static inline void avecintc_mask_irq(struct irq_data *d) +{ +} + +static inline void avecintc_unmask_irq(struct irq_data *d) +{ +} + +#ifdef CONFIG_SMP +static inline void pending_list_init(int cpu) +{ + struct pending_list *plist = per_cpu_ptr(&pending_list, cpu); + + INIT_LIST_HEAD(&plist->head); +} + +static void avecintc_sync(struct avecintc_data *adata) +{ + struct pending_list *plist; + + if (cpu_online(adata->prev_cpu)) { + plist = per_cpu_ptr(&pending_list, adata->prev_cpu); + list_add_tail(&adata->entry, &plist->head); + adata->moving = 1; + smp_ops.send_ipi_single(adata->prev_cpu, SMP_CLEAR_VECTOR); + } +} + +static int avecintc_set_affinity(struct irq_data *data, const struct cpumask *dest, bool force) +{ + int cpu, ret, vector; + struct avecintc_data *adata; + + raw_spin_lock(&loongarch_avec.lock); + adata = irq_data_get_irq_chip_data(data); + + if (adata->moving) { + raw_spin_unlock(&loongarch_avec.lock); + return -EBUSY; + } + + if (adata->vec == UINT_MAX) { + raw_spin_unlock(&loongarch_avec.lock); + return -EINVAL; + } + + if (cpu_online(adata->cpu) && cpumask_test_cpu(adata->cpu, dest)) { + raw_spin_unlock(&loongarch_avec.lock); + return 0; + } + + cpumask_and(&intersect_mask, dest, cpu_online_mask); + + ret = irq_matrix_alloc(loongarch_avec.vector_matrix, &intersect_mask, false, &cpu); + if (ret < 0) { + raw_spin_unlock(&loongarch_avec.lock); + return ret; + } + + vector = ret; + adata->cpu = cpu; + adata->vec = vector; + per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(data); + avecintc_sync(adata); + + raw_spin_unlock(&loongarch_avec.lock); + irq_data_update_effective_affinity(data, cpumask_of(cpu)); + + return IRQ_SET_MASK_OK; +} + +static int avecintc_cpu_online(unsigned int cpu) +{ + if (!loongarch_avec.vector_matrix) + return 0; + + raw_spin_lock(&loongarch_avec.lock); + + irq_matrix_online(loongarch_avec.vector_matrix); + + pending_list_init(cpu); + + raw_spin_unlock(&loongarch_avec.lock); + + return 0; +} + +static int avecintc_cpu_offline(unsigned int cpu) +{ + struct pending_list *plist = per_cpu_ptr(&pending_list, cpu); + + if (!loongarch_avec.vector_matrix) + return 0; + + raw_spin_lock(&loongarch_avec.lock); + + if (!list_empty(&plist->head)) + pr_warn("CPU#%d vector is busy\n", cpu); + irq_matrix_offline(loongarch_avec.vector_matrix); + + raw_spin_unlock(&loongarch_avec.lock); + + return 0; +} + +void complete_irq_moving(void) +{ + struct pending_list *plist = this_cpu_ptr(&pending_list); + struct avecintc_data *adata, *tdata; + int cpu, vector, bias; + uint64_t isr; + + raw_spin_lock(&loongarch_avec.lock); + + list_for_each_entry_safe(adata, tdata, &plist->head, entry) { + cpu = adata->prev_cpu; + vector = adata->prev_vec; + bias = vector / VECTORS_PER_REG; + switch (bias) { + case 0: + isr = csr_read64(LOONGARCH_CSR_ISR0); + break; + case 1: + isr = csr_read64(LOONGARCH_CSR_ISR1); + break; + case 2: + isr = csr_read64(LOONGARCH_CSR_ISR2); + break; + case 3: + isr = csr_read64(LOONGARCH_CSR_ISR3); + break; + } + + if (isr & (1UL << (vector % VECTORS_PER_REG))) { + smp_ops.send_ipi_single(cpu, SMP_CLEAR_VECTOR); + continue; + } + list_del(&adata->entry); + irq_matrix_free(loongarch_avec.vector_matrix, cpu, vector, false); + this_cpu_write(irq_map[vector], NULL); + adata->moving = 0; + adata->prev_cpu = adata->cpu; + adata->prev_vec = adata->vec; + } + + raw_spin_unlock(&loongarch_avec.lock); +} +#endif + +static void avecintc_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) +{ + struct avecintc_data *adata = irq_data_get_irq_chip_data(d); + + msg->address_hi = 0x0; + msg->address_lo = (loongarch_avec.msi_base_addr | (adata->vec & 0xff) << 4) + | ((cpu_logical_map(adata->cpu & 0xffff)) << 12); + msg->data = 0x0; +} + +static struct irq_chip avec_irq_controller = { + .name = "AVECINTC", + .irq_ack = avecintc_ack_irq, + .irq_mask = avecintc_mask_irq, + .irq_unmask = avecintc_unmask_irq, +#ifdef CONFIG_SMP + .irq_set_affinity = avecintc_set_affinity, +#endif + .irq_compose_msi_msg = avecintc_compose_msi_msg, +}; + +static void avecintc_irq_dispatch(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct irq_desc *d; + + chained_irq_enter(chip, desc); + + while (true) { + unsigned long vector = csr_read64(LOONGARCH_CSR_IRR); + if (vector & IRR_INVALID_MASK) + break; + + vector &= IRR_VECTOR_MASK; + + d = this_cpu_read(irq_map[vector]); + if (d) { + generic_handle_irq_desc(d); + } else { + spurious_interrupt(); + pr_warn("Unexpected IRQ occurs on CPU#%d [vector %ld]\n", smp_processor_id(), vector); + } + } + + chained_irq_exit(chip, desc); +} + +static int avecintc_alloc_vector(struct irq_data *irqd, struct avecintc_data *adata) +{ + int cpu, ret; + unsigned long flags; + + raw_spin_lock_irqsave(&loongarch_avec.lock, flags); + + ret = irq_matrix_alloc(loongarch_avec.vector_matrix, cpu_online_mask, false, &cpu); + if (ret < 0) { + raw_spin_unlock_irqrestore(&loongarch_avec.lock, flags); + return ret; + } + + adata->prev_cpu = adata->cpu = cpu; + adata->prev_vec = adata->vec = ret; + per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(irqd); + + raw_spin_unlock_irqrestore(&loongarch_avec.lock, flags); + + return 0; +} + +static int avecintc_domain_alloc(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs, void *arg) +{ + for (unsigned int i = 0; i < nr_irqs; i++) { + struct irq_data *irqd = irq_domain_get_irq_data(domain, virq + i); + struct avecintc_data *adata = kzalloc(sizeof(*adata), GFP_KERNEL); + int ret; + + if (!adata) + return -ENOMEM; + + ret = avecintc_alloc_vector(irqd, adata); + if (ret < 0) { + kfree(adata); + return ret; + } + + irq_domain_set_info(domain, virq + i, virq + i, &avec_irq_controller, + adata, handle_edge_irq, NULL, NULL); + irqd_set_single_target(irqd); + irqd_set_affinity_on_activate(irqd); + } + + return 0; +} + +static void avecintc_free_vector(struct irq_data *irqd, struct avecintc_data *adata) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&loongarch_avec.lock, flags); + + per_cpu(irq_map, adata->cpu)[adata->vec] = NULL; + irq_matrix_free(loongarch_avec.vector_matrix, adata->cpu, adata->vec, false); + +#ifdef CONFIG_SMP + if (!adata->moving) { + raw_spin_unlock_irqrestore(&loongarch_avec.lock, flags); + return; + } + + per_cpu(irq_map, adata->prev_cpu)[adata->prev_vec] = NULL; + irq_matrix_free(loongarch_avec.vector_matrix, adata->prev_cpu, adata->prev_vec, false); + list_del_init(&adata->entry); +#endif + raw_spin_unlock_irqrestore(&loongarch_avec.lock, flags); +} + +static void avecintc_domain_free(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + for (unsigned int i = 0; i < nr_irqs; i++) { + struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); + + if (d) { + struct avecintc_data *adata = irq_data_get_irq_chip_data(d); + + avecintc_free_vector(d, adata); + irq_domain_reset_irq_data(d); + kfree(adata); + } + } +} + +static const struct irq_domain_ops avecintc_domain_ops = { + .alloc = avecintc_domain_alloc, + .free = avecintc_domain_free, +}; + +static int __init irq_matrix_init(void) +{ + loongarch_avec.vector_matrix = irq_alloc_matrix(NR_VECTORS, 0, NR_VECTORS); + if (!loongarch_avec.vector_matrix) + return -ENOMEM; + + for (int i = 0; i < NR_LEGACY_VECTORS; i++) + irq_matrix_assign_system(loongarch_avec.vector_matrix, i, false); + + irq_matrix_online(loongarch_avec.vector_matrix); + + return 0; +} + +static int __init avecintc_init(struct irq_domain *parent) +{ + int ret, parent_irq; + unsigned long value; + + raw_spin_lock_init(&loongarch_avec.lock); + + loongarch_avec.fwnode = irq_domain_alloc_named_fwnode("AVECINTC"); + if (!loongarch_avec.fwnode) { + pr_err("Unable to allocate domain handle\n"); + ret = -ENOMEM; + goto out; + } + + loongarch_avec.domain = irq_domain_create_tree(loongarch_avec.fwnode, + &avecintc_domain_ops, NULL); + if (!loongarch_avec.domain) { + pr_err("Unable to create IRQ domain\n"); + ret = -ENOMEM; + goto out_free_handle; + } + + parent_irq = irq_create_mapping(parent, INT_AVEC); + if (!parent_irq) { + pr_err("Failed to mapping hwirq\n"); + ret = -EINVAL; + goto out_remove_domain; + } + + ret = irq_matrix_init(); + if (ret < 0) { + pr_err("Failed to init irq matrix\n"); + goto out_remove_domain; + } + irq_set_chained_handler_and_data(parent_irq, avecintc_irq_dispatch, NULL); + +#ifdef CONFIG_SMP + pending_list_init(0); + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_AVECINTC_STARTING, + "irqchip/loongarch/avecintc:starting", + avecintc_cpu_online, avecintc_cpu_offline); +#endif + value = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC); + value |= IOCSR_MISC_FUNC_AVEC_EN; + iocsr_write64(value, LOONGARCH_IOCSR_MISC_FUNC); + + return ret; + +out_remove_domain: + irq_domain_remove(loongarch_avec.domain); +out_free_handle: + irq_domain_free_fwnode(loongarch_avec.fwnode); +out: + return ret; +} + +static int __init pch_msi_parse_madt(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header; + + loongarch_avec.msi_base_addr = pchmsi_entry->msg_address - AVEC_MSG_OFFSET; + + return pch_msi_acpi_init_avec(loongarch_avec.domain); +} + +static inline int __init acpi_cascade_irqdomain_init(void) +{ + return acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1); +} + +int __init avecintc_acpi_init(struct irq_domain *parent) +{ + int ret = avecintc_init(parent); + if (ret < 0) { + pr_err("Failed to init IRQ domain\n"); + return ret; + } + + ret = acpi_cascade_irqdomain_init(); + if (ret < 0) { + pr_err("Failed to init cascade IRQ domain\n"); + return ret; + } + + return ret; +} diff --git a/drivers/irqchip/irq-loongarch-cpu.c b/drivers/irqchip/irq-loongarch-cpu.c index 4380b4d8dd20..c6e0c9849ba9 100644 --- a/drivers/irqchip/irq-loongarch-cpu.c +++ b/drivers/irqchip/irq-loongarch-cpu.c @@ -13,6 +13,8 @@ #include #include +#include "irq-loongson.h" + static struct irq_domain *irq_domain; struct fwnode_handle *cpuintc_handle; @@ -140,7 +142,10 @@ static int __init acpi_cascade_irqdomain_init(void) if (r < 0) return r; - return 0; + if (cpu_has_avecint) + r = avecintc_acpi_init(irq_domain); + + return r; } struct irq_domain *get_cpudomain(void) diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c index 5c5462681c03..ca70e443c5a5 100644 --- a/drivers/irqchip/irq-loongson-eiointc.c +++ b/drivers/irqchip/irq-loongson-eiointc.c @@ -17,6 +17,8 @@ #include #include +#include "irq-loongson.h" + #define EIOINTC_REG_NODEMAP 0x14a0 #define EIOINTC_REG_IPMAP 0x14c0 #define EIOINTC_REG_ENABLE 0x1600 @@ -396,6 +398,9 @@ static int __init acpi_cascade_irqdomain_init(void) if (r < 0) return r; + if (cpu_has_avecint) + return 0; + r = acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1); if (r < 0) return r; @@ -443,8 +448,8 @@ static int __init eiointc_init(struct eiointc_priv *priv, int parent_irq, if (nr_pics == 1) { register_syscore_ops(&eiointc_syscore_ops); - cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING, - "irqchip/loongarch/intc:starting", + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_EIOINTC_STARTING, + "irqchip/loongarch/eiointc:starting", eiointc_router_init, NULL); } diff --git a/drivers/irqchip/irq-loongson-htvec.c b/drivers/irqchip/irq-loongson-htvec.c index 0bff728b25e3..5da02c7ad0b3 100644 --- a/drivers/irqchip/irq-loongson-htvec.c +++ b/drivers/irqchip/irq-loongson-htvec.c @@ -17,6 +17,8 @@ #include #include +#include "irq-loongson.h" + /* Registers */ #define HTVEC_EN_OFF 0x20 #define HTVEC_MAX_PARENT_IRQ 8 diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c index 7c4fe7ab4b83..2b1bd4a96665 100644 --- a/drivers/irqchip/irq-loongson-liointc.c +++ b/drivers/irqchip/irq-loongson-liointc.c @@ -22,6 +22,8 @@ #include #endif +#include "irq-loongson.h" + #define LIOINTC_CHIP_IRQ 32 #define LIOINTC_NUM_PARENT 4 #define LIOINTC_NUM_CORES 4 diff --git a/drivers/irqchip/irq-loongson-pch-lpc.c b/drivers/irqchip/irq-loongson-pch-lpc.c index 9b35492fb6be..2d4c3ec128b8 100644 --- a/drivers/irqchip/irq-loongson-pch-lpc.c +++ b/drivers/irqchip/irq-loongson-pch-lpc.c @@ -15,6 +15,8 @@ #include #include +#include "irq-loongson.h" + /* Registers */ #define LPC_INT_CTL 0x00 #define LPC_INT_ENA 0x04 diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c index dd4d699170f4..2c9f58536fce 100644 --- a/drivers/irqchip/irq-loongson-pch-msi.c +++ b/drivers/irqchip/irq-loongson-pch-msi.c @@ -15,6 +15,8 @@ #include #include +#include "irq-loongson.h" + static int nr_pics; struct pch_msi_data { @@ -266,17 +268,17 @@ IRQCHIP_DECLARE(pch_msi, "loongson,pch-msi-1.0", pch_msi_of_init); #ifdef CONFIG_ACPI struct fwnode_handle *get_pch_msi_handle(int pci_segment) { - int i; + if (cpu_has_avecint) + return pch_msi_handle[0]; - for (i = 0; i < MAX_IO_PICS; i++) { + for (int i = 0; i < MAX_IO_PICS; i++) { if (msi_group[i].pci_segment == pci_segment) return pch_msi_handle[i]; } - return NULL; + return pch_msi_handle[0]; } -int __init pch_msi_acpi_init(struct irq_domain *parent, - struct acpi_madt_msi_pic *acpi_pchmsi) +int __init pch_msi_acpi_init(struct irq_domain *parent, struct acpi_madt_msi_pic *acpi_pchmsi) { int ret; struct fwnode_handle *domain_handle; @@ -289,4 +291,36 @@ int __init pch_msi_acpi_init(struct irq_domain *parent, return ret; } + +static struct irq_chip pch_msi_irq_chip_avec = { + .name = "PCH PCI MSI", + .irq_ack = irq_chip_ack_parent, +}; + +static struct msi_domain_info pch_msi_domain_info_avec = { + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX, + .chip = &pch_msi_irq_chip_avec, +}; + +int __init pch_msi_acpi_init_avec(struct irq_domain *parent) +{ + struct irq_domain *msi_domain; + + if (pch_msi_handle[0]) + return 0; + + pch_msi_handle[0] = parent->fwnode; + irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS); + + msi_domain = pci_msi_create_irq_domain(pch_msi_handle[0], + &pch_msi_domain_info_avec, parent); + if (!msi_domain) { + pr_err("Failed to create PCI MSI domain\n"); + kfree(pch_msi_handle[0]); + return -ENOMEM; + } + + return 0; +} #endif diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c index 79bc3d132657..d2356e63e4d4 100644 --- a/drivers/irqchip/irq-loongson-pch-pic.c +++ b/drivers/irqchip/irq-loongson-pch-pic.c @@ -17,6 +17,8 @@ #include #include +#include "irq-loongson.h" + /* Registers */ #define PCH_PIC_MASK 0x20 #define PCH_PIC_HTMSI_EN 0x40 diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 44f1e762b1ec..2b5bc17a9ae7 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -152,7 +152,8 @@ enum cpuhp_state { CPUHP_AP_IRQ_BCM2836_STARTING, CPUHP_AP_IRQ_MIPS_GIC_STARTING, CPUHP_AP_IRQ_RISCV_STARTING, - CPUHP_AP_IRQ_LOONGARCH_STARTING, + CPUHP_AP_IRQ_EIOINTC_STARTING, + CPUHP_AP_IRQ_AVECINTC_STARTING, CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, CPUHP_AP_ARM_MVEBU_COHERENCY, CPUHP_AP_MICROCODE_LOADER, -- Gitee From 6017f6938cdf1738141acab6e4338e71eecb2e4a Mon Sep 17 00:00:00 2001 From: zhangtianyang Date: Wed, 16 Oct 2024 09:40:01 +0800 Subject: [PATCH 1533/2138] anolis: Loongarch: Adjust SHMLBA to PAGE_SIZE ANBZ: #11466 Signed-off-by: zhangtianyang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4022 --- arch/loongarch/include/asm/shmparam.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/loongarch/include/asm/shmparam.h b/arch/loongarch/include/asm/shmparam.h index c9554f48d2df..a564ee7f136b 100644 --- a/arch/loongarch/include/asm/shmparam.h +++ b/arch/loongarch/include/asm/shmparam.h @@ -7,6 +7,4 @@ #define __ARCH_FORCE_SHMLBA 1 -#define SHMLBA SZ_64K /* attach addr a multiple of this */ - #endif /* _ASM_SHMPARAM_H */ -- Gitee From 930f0014af9714e8a592b8248fdaf3cf6a91ad9d Mon Sep 17 00:00:00 2001 From: zhangtianyang Date: Thu, 17 Oct 2024 10:23:51 +0800 Subject: [PATCH 1534/2138] anolis: Loongarch: Use generic SHMLBA ANBZ: #11466 Signed-off-by: zhangtianyang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4022 --- arch/loongarch/include/asm/shmparam.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/loongarch/include/asm/shmparam.h b/arch/loongarch/include/asm/shmparam.h index a564ee7f136b..8af1e70cbf2c 100644 --- a/arch/loongarch/include/asm/shmparam.h +++ b/arch/loongarch/include/asm/shmparam.h @@ -6,5 +6,6 @@ #define _ASM_SHMPARAM_H #define __ARCH_FORCE_SHMLBA 1 +#include #endif /* _ASM_SHMPARAM_H */ -- Gitee From 02d57ff7524c1c8973d6964af2b87e1949e00761 Mon Sep 17 00:00:00 2001 From: gaojuxin Date: Sun, 29 Sep 2024 09:55:40 +0800 Subject: [PATCH 1535/2138] anolis: Loongarch: remove ARCH_WRITECOMBINE ANBZ: #11465 Temporarily close the WRITECOMBINE under the Loongson platform. Judging from the current tests, this function will have some impact on the stability testing of some peripherals. Signed-off-by: gaojuxin Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4021 --- arch/loongarch/configs/anolis_defconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/loongarch/configs/anolis_defconfig b/arch/loongarch/configs/anolis_defconfig index aeff56d28799..c94c2e59b383 100644 --- a/arch/loongarch/configs/anolis_defconfig +++ b/arch/loongarch/configs/anolis_defconfig @@ -326,7 +326,6 @@ CONFIG_NUMA=y CONFIG_NODES_SHIFT=6 CONFIG_ARCH_FORCE_MAX_ORDER=11 CONFIG_ARCH_IOREMAP=y -CONFIG_ARCH_WRITECOMBINE=y CONFIG_ARCH_STRICT_ALIGN=y CONFIG_CPU_HAS_FPU=y CONFIG_CPU_HAS_LSX=y -- Gitee From 92275649bd37a0e79a8fbd9abc2e8f7ff85cc7f7 Mon Sep 17 00:00:00 2001 From: zhangtianyang Date: Mon, 14 Oct 2024 11:45:17 +0800 Subject: [PATCH 1536/2138] anolis: Loongarch: Dynamic enable writecombined ANBZ: #11465 Add the dynamic switch writecombined function under the loongson platform. Signed-off-by: zhangtianyang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4021 --- arch/loongarch/kernel/setup.c | 28 ++++++++++++++++++++++++++-- include/drm/drm_cache.h | 2 +- 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 83d8e7662b06..721f89e00e2b 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -185,12 +185,14 @@ bool wc_enabled = false; EXPORT_SYMBOL(wc_enabled); +static int wc_arg = -1; + static int __init setup_writecombine(char *p) { if (!strcmp(p, "on")) - wc_enabled = true; + wc_arg = true; else if (!strcmp(p, "off")) - wc_enabled = false; + wc_arg = false; else pr_warn("Unknown writecombine setting \"%s\".\n", p); @@ -372,6 +374,26 @@ static void __init bootcmdline_init(char **cmdline_p) *cmdline_p = boot_command_line; } +static void __init writecombine_detect(void) +{ + u64 cpuname; + + if (wc_arg >= 0) { + wc_enabled = wc_arg; + return; + } + + cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME); + + switch (cpuname) { + case 0x0000303030364333: + wc_enabled = true; + break; + default: + break; + } +} + void __init platform_init(void) { arch_reserve_vmcore(); @@ -395,6 +417,8 @@ void __init platform_init(void) smbios_parse(); pr_info("The BIOS Version: %s\n", b_info.bios_version); + writecombine_detect(); + pr_info("WriteCombine: %s\n", wc_enabled ? "on":"off"); efi_runtime_init(); } diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h index 08e0e3ffad13..667fb0368ef6 100644 --- a/include/drm/drm_cache.h +++ b/include/drm/drm_cache.h @@ -74,7 +74,7 @@ static inline bool drm_arch_can_wc_memory(void) * cache coherency machanism. This means WUC can only used for write-only * memory regions. */ - return false; + return wc_enabled; #else return true; #endif -- Gitee From 31e6c55dc822a77774949e8b00c846340a61845c Mon Sep 17 00:00:00 2001 From: Barry Song Date: Wed, 24 Jul 2024 14:00:56 +1200 Subject: [PATCH 1537/2138] mm: extend 'usage' parameter so that cluster_swap_free_nr() can be reused ANBZ: #9728 commit d2539ed7ee3b042e4503c304603d0eaa50c9c476 upstream. Extend a usage parameter so that cluster_swap_free_nr() can be reused by both swapcache_clear() and swap_free(). __swap_entry_free() is quite similar but more tricky as it requires the return value of __swap_entry_free_locked() which cluster_swap_free_nr() doesn't support. Link: https://lkml.kernel.org/r/20240724020056.65838-1-21cnbao@gmail.com Signed-off-by: Barry Song Reviewed-by: Ryan Roberts Acked-by: Chris Li Cc: "Huang, Ying" Cc: Kairui Song Cc: David Hildenbrand Cc: Chuanhua Han Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3997 --- mm/swapfile.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index eb5aaac66f4d..76217558d3aa 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1339,7 +1339,8 @@ static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry) } static void cluster_swap_free_nr(struct swap_info_struct *sis, - unsigned long offset, int nr_pages) + unsigned long offset, int nr_pages, + unsigned char usage) { struct swap_cluster_info *ci; DECLARE_BITMAP(to_free, BITS_PER_LONG) = { 0 }; @@ -1349,7 +1350,7 @@ static void cluster_swap_free_nr(struct swap_info_struct *sis, while (nr_pages) { nr = min(BITS_PER_LONG, nr_pages); for (i = 0; i < nr; i++) { - if (!__swap_entry_free_locked(sis, offset + i, 1)) + if (!__swap_entry_free_locked(sis, offset + i, usage)) bitmap_set(to_free, i, 1); } if (!bitmap_empty(to_free, BITS_PER_LONG)) { @@ -1383,7 +1384,7 @@ void swap_free_nr(swp_entry_t entry, int nr_pages) while (nr_pages) { nr = min_t(int, nr_pages, SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER); - cluster_swap_free_nr(sis, offset, nr); + cluster_swap_free_nr(sis, offset, nr, 1); offset += nr; nr_pages -= nr; } @@ -3465,15 +3466,9 @@ int swapcache_prepare(swp_entry_t entry) void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry) { - struct swap_cluster_info *ci; unsigned long offset = swp_offset(entry); - unsigned char usage; - ci = lock_cluster_or_swap_info(si, offset); - usage = __swap_entry_free_locked(si, offset, SWAP_HAS_CACHE); - unlock_cluster_or_swap_info(si, ci); - if (!usage) - free_swap_slot(entry); + cluster_swap_free_nr(si, offset, 1, SWAP_HAS_CACHE); } struct swap_info_struct *swp_swap_info(swp_entry_t entry) -- Gitee From 5fc65e5f665dc59d8c310e2f55008da1dfc1a8fc Mon Sep 17 00:00:00 2001 From: Barry Song Date: Wed, 21 Feb 2024 22:10:28 +1300 Subject: [PATCH 1538/2138] mm/swapfile:__swap_duplicate: drop redundant WRITE_ONCE on swap_map for err cases ANBZ: #9728 commit e26f0b939df49d17bda8d5faa4813a255734e8c8 upstream. The code is quite hard to read, we are still writing swap_map after errors happen. Though the written value is as before, has_cache = count & SWAP_HAS_CACHE; count &= ~SWAP_HAS_CACHE; [snipped] WRITE_ONCE(p->swap_map[offset], count | has_cache); It would be better to entirely drop the WRITE_ONCE for both performance and readability. [akpm@linux-foundation.org: avoid using goto] Link: https://lkml.kernel.org/r/20240221091028.123122-1-21cnbao@gmail.com Signed-off-by: Barry Song Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3997 --- mm/swapfile.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index 76217558d3aa..fb963169c612 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -3419,7 +3419,8 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage) } else err = -ENOENT; /* unused swap entry */ - WRITE_ONCE(p->swap_map[offset], count | has_cache); + if (!err) + WRITE_ONCE(p->swap_map[offset], count | has_cache); unlock_out: unlock_cluster_or_swap_info(p, ci); -- Gitee From 0983d1867c1f65aaef62afae13d2983816b49e17 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Tue, 30 Jul 2024 19:13:39 +1200 Subject: [PATCH 1539/2138] mm: swap: add nr argument in swapcache_prepare and swapcache_clear to support large folios ANBZ: #9728 commit 9f101bef408a3f70c44b6e4de44d3d4e2655ed10 upstream. Right now, swapcache_prepare() and swapcache_clear() supports one entry only, to support large folios, we need to handle multiple swap entries. To optimize stack usage, we iterate twice in __swap_duplicate(): the first time to verify that all entries are valid, and the second time to apply the modifications to the entries. Currently, we're using nr=1 for the existing users. [v-songbaohua@oppo.com: clarify swap_count_continued and improve readability for __swap_duplicate] Link: https://lkml.kernel.org/r/20240802071817.47081-1-21cnbao@gmail.com Link: https://lkml.kernel.org/r/20240730071339.107447-2-21cnbao@gmail.com Signed-off-by: Barry Song Reviewed-by: Baolin Wang Acked-by: David Hildenbrand Tested-by: Baolin Wang Cc: Chris Li Cc: Gao Xiang Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kairui Song Cc: Kalesh Singh Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Minchan Kim Cc: Nhat Pham Cc: Ryan Roberts Cc: Sergey Senozhatsky Cc: Shakeel Butt Cc: Suren Baghdasaryan Cc: Yang Shi Cc: Yosry Ahmed Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3997 --- include/linux/swap.h | 4 +- mm/memory.c | 6 +-- mm/swap.h | 5 ++- mm/swap_state.c | 2 +- mm/swapfile.c | 95 +++++++++++++++++++++++++------------------- 5 files changed, 63 insertions(+), 49 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index dfbd47e66ec0..a803fdd1db42 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -495,7 +495,7 @@ extern int get_swap_pages(int n, swp_entry_t swp_entries[], int order); extern int add_swap_count_continuation(swp_entry_t, gfp_t); extern void swap_shmem_alloc(swp_entry_t); extern int swap_duplicate(swp_entry_t); -extern int swapcache_prepare(swp_entry_t); +extern int swapcache_prepare(swp_entry_t entry, int nr); extern void swap_free_nr(swp_entry_t entry, int nr_pages); extern void swapcache_free_entries(swp_entry_t *entries, int n); extern void free_swap_and_cache_nr(swp_entry_t entry, int nr); @@ -570,7 +570,7 @@ static inline int swap_duplicate(swp_entry_t swp) return 0; } -static inline int swapcache_prepare(swp_entry_t swp) +static inline int swapcache_prepare(swp_entry_t swp, int nr) { return 0; } diff --git a/mm/memory.c b/mm/memory.c index 7b25e8831c9b..0c58ee25d8a4 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4148,7 +4148,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) * reusing the same entry. It's undetectable as * pte_same() returns true due to entry reuse. */ - if (swapcache_prepare(entry)) { + if (swapcache_prepare(entry, 1)) { /* Relax a bit to prevent rapid repeated page faults */ schedule_timeout_uninterruptible(1); goto out; @@ -4441,7 +4441,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) out: /* Clear the swap cache pin for direct swapin after PTL unlock */ if (need_clear_cache) - swapcache_clear(si, entry); + swapcache_clear(si, entry, 1); if (si) put_swap_device(si); return ret; @@ -4457,7 +4457,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) folio_put(swapcache); } if (need_clear_cache) - swapcache_clear(si, entry); + swapcache_clear(si, entry, 1); if (si) put_swap_device(si); return ret; diff --git a/mm/swap.h b/mm/swap.h index 693d1b281559..500f99202776 100644 --- a/mm/swap.h +++ b/mm/swap.h @@ -38,7 +38,7 @@ void __delete_from_swap_cache(struct folio *folio, void delete_from_swap_cache(struct folio *folio); void clear_shadow_from_swap_cache(int type, unsigned long begin, unsigned long end); -void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry); +void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr); struct folio *swap_cache_get_folio(swp_entry_t entry, struct vm_area_struct *vma, unsigned long addr); struct folio *filemap_get_incore_folio(struct address_space *mapping, @@ -97,7 +97,7 @@ static inline int swap_writepage(struct page *p, struct writeback_control *wbc) return 0; } -static inline void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry) +static inline void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr) { } @@ -149,4 +149,5 @@ static inline unsigned int folio_swap_flags(struct folio *folio) return 0; } #endif /* CONFIG_SWAP */ + #endif /* _MM_SWAP_H */ diff --git a/mm/swap_state.c b/mm/swap_state.c index 1e3497c7b634..cdbeb9e4a5b0 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -471,7 +471,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, /* * Swap entry may have been freed since our caller observed it. */ - err = swapcache_prepare(entry); + err = swapcache_prepare(entry, 1); if (!err) break; diff --git a/mm/swapfile.c b/mm/swapfile.c index fb963169c612..ddd9f65de953 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -3357,7 +3357,7 @@ void si_swapinfo(struct sysinfo *val) } /* - * Verify that a swap entry is valid and increment its swap map count. + * Verify that nr swap entries are valid and increment their swap map counts. * * Returns error code in following case. * - success -> 0 @@ -3367,60 +3367,73 @@ void si_swapinfo(struct sysinfo *val) * - swap-cache reference is requested but the entry is not used. -> ENOENT * - swap-mapped reference requested but needs continued swap count. -> ENOMEM */ -static int __swap_duplicate(swp_entry_t entry, unsigned char usage) +static int __swap_duplicate(swp_entry_t entry, unsigned char usage, int nr) { struct swap_info_struct *p; struct swap_cluster_info *ci; unsigned long offset; unsigned char count; unsigned char has_cache; - int err; + int err, i; p = swp_swap_info(entry); offset = swp_offset(entry); + VM_WARN_ON(nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER); + VM_WARN_ON(usage == 1 && nr > 1); ci = lock_cluster_or_swap_info(p, offset); - count = p->swap_map[offset]; - - /* - * swapin_readahead() doesn't check if a swap entry is valid, so the - * swap entry could be SWAP_MAP_BAD. Check here with lock held. - */ - if (unlikely(swap_count(count) == SWAP_MAP_BAD)) { - err = -ENOENT; - goto unlock_out; - } - - has_cache = count & SWAP_HAS_CACHE; - count &= ~SWAP_HAS_CACHE; err = 0; + for (i = 0; i < nr; i++) { + count = p->swap_map[offset + i]; - if (usage == SWAP_HAS_CACHE) { + /* + * swapin_readahead() doesn't check if a swap entry is valid, so the + * swap entry could be SWAP_MAP_BAD. Check here with lock held. + */ + if (unlikely(swap_count(count) == SWAP_MAP_BAD)) { + err = -ENOENT; + goto unlock_out; + } - /* set SWAP_HAS_CACHE if there is no cache and entry is used */ - if (!has_cache && count) - has_cache = SWAP_HAS_CACHE; - else if (has_cache) /* someone else added cache */ - err = -EEXIST; - else /* no users remaining */ + has_cache = count & SWAP_HAS_CACHE; + count &= ~SWAP_HAS_CACHE; + + if (!count && !has_cache) { err = -ENOENT; + } else if (usage == SWAP_HAS_CACHE) { + if (has_cache) + err = -EEXIST; + } else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX) { + err = -EINVAL; + } + + if (err) + goto unlock_out; + } - } else if (count || has_cache) { + for (i = 0; i < nr; i++) { + count = p->swap_map[offset + i]; + has_cache = count & SWAP_HAS_CACHE; + count &= ~SWAP_HAS_CACHE; - if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX) + if (usage == SWAP_HAS_CACHE) + has_cache = SWAP_HAS_CACHE; + else if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX) count += usage; - else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX) - err = -EINVAL; - else if (swap_count_continued(p, offset, count)) + else if (swap_count_continued(p, offset + i, count)) count = COUNT_CONTINUED; - else + else { + /* + * Don't need to rollback changes, because if + * usage == 1, there must be nr == 1. + */ err = -ENOMEM; - } else - err = -ENOENT; /* unused swap entry */ + goto unlock_out; + } - if (!err) - WRITE_ONCE(p->swap_map[offset], count | has_cache); + WRITE_ONCE(p->swap_map[offset + i], count | has_cache); + } unlock_out: unlock_cluster_or_swap_info(p, ci); @@ -3433,7 +3446,7 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage) */ void swap_shmem_alloc(swp_entry_t entry) { - __swap_duplicate(entry, SWAP_MAP_SHMEM); + __swap_duplicate(entry, SWAP_MAP_SHMEM, 1); } /* @@ -3447,29 +3460,29 @@ int swap_duplicate(swp_entry_t entry) { int err = 0; - while (!err && __swap_duplicate(entry, 1) == -ENOMEM) + while (!err && __swap_duplicate(entry, 1, 1) == -ENOMEM) err = add_swap_count_continuation(entry, GFP_ATOMIC); return err; } /* - * @entry: swap entry for which we allocate swap cache. + * @entry: first swap entry from which we allocate nr swap cache. * - * Called when allocating swap cache for existing swap entry, + * Called when allocating swap cache for existing swap entries, * This can return error codes. Returns 0 at success. * -EEXIST means there is a swap cache. * Note: return code is different from swap_duplicate(). */ -int swapcache_prepare(swp_entry_t entry) +int swapcache_prepare(swp_entry_t entry, int nr) { - return __swap_duplicate(entry, SWAP_HAS_CACHE); + return __swap_duplicate(entry, SWAP_HAS_CACHE, nr); } -void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry) +void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr) { unsigned long offset = swp_offset(entry); - cluster_swap_free_nr(si, offset, 1, SWAP_HAS_CACHE); + cluster_swap_free_nr(si, offset, nr, SWAP_HAS_CACHE); } struct swap_info_struct *swp_swap_info(swp_entry_t entry) -- Gitee From 8c1b2ca2dba88d9e12d1f84aab2d609cbcbbfd13 Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Mon, 6 May 2024 19:29:24 +0000 Subject: [PATCH 1540/2138] mm: do not update memcg stats for NR_{FILE/SHMEM}_PMDMAPPED ANBZ: #9728 commit 4f687281012e2da1a34cfe08ab10ea1361c600d2 upstream. Previously, all NR_VM_EVENT_ITEMS stats were maintained per-memcg, although some of those fields are not exposed anywhere. Commit 14e0f6c957e39 ("memcg: reduce memory for the lruvec and memcg stats") changed this such that we only maintain the stats we actually expose per-memcg via a translation table. Additionally, commit 514462bbe927b ("memcg: warn for unexpected events and stats") added a warning if a per-memcg stat update is attempted for a stat that is not in the translation table. The warning started firing for the NR_{FILE/SHMEM}_PMDMAPPED stat updates in the rmap code. These stats are not maintained per-memcg, and hence are not in the translation table. Do not use __lruvec_stat_mod_folio() when updating NR_FILE_PMDMAPPED and NR_SHMEM_PMDMAPPED. Use __mod_node_page_state() instead, which updates the global per-node stats only. Link: https://lkml.kernel.org/r/20240506192924.271999-1-yosryahmed@google.com Fixes: 514462bbe927 ("memcg: warn for unexpected events and stats") Signed-off-by: Yosry Ahmed Reported-by: syzbot+9319a4268a640e26b72b@syzkaller.appspotmail.com Closes: https://lore.kernel.org/lkml/0000000000001b9d500617c8b23c@google.com Acked-by: Shakeel Butt Acked-by: David Hildenbrand Reviewed-by: Roman Gushchin Cc: Johannes Weiner Cc: Michal Hocko Cc: Muchun Song Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3997 --- mm/rmap.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/mm/rmap.c b/mm/rmap.c index 0066ae501a42..78499559519c 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1415,13 +1415,14 @@ static __always_inline void __folio_add_file_rmap(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *vma, enum rmap_level level) { + pg_data_t *pgdat = folio_pgdat(folio); int nr, nr_pmdmapped = 0; VM_WARN_ON_FOLIO(folio_test_anon(folio), folio); nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped); if (nr_pmdmapped) - __lruvec_stat_mod_folio(folio, folio_test_swapbacked(folio) ? + __mod_node_page_state(pgdat, folio_test_swapbacked(folio) ? NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped); if (nr) __lruvec_stat_mod_folio(folio, NR_FILE_MAPPED, nr); @@ -1473,6 +1474,7 @@ static __always_inline void __folio_remove_rmap(struct folio *folio, enum rmap_level level) { atomic_t *mapped = &folio->_nr_pages_mapped; + pg_data_t *pgdat = folio_pgdat(folio); int last, nr = 0, nr_pmdmapped = 0; enum node_stat_item idx; @@ -1510,13 +1512,14 @@ static __always_inline void __folio_remove_rmap(struct folio *folio, } if (nr_pmdmapped) { + /* NR_{FILE/SHMEM}_PMDMAPPED are not maintained per-memcg */ if (folio_test_anon(folio)) - idx = NR_ANON_THPS; - else if (folio_test_swapbacked(folio)) - idx = NR_SHMEM_PMDMAPPED; + __lruvec_stat_mod_folio(folio, NR_ANON_THPS, -nr_pmdmapped); else - idx = NR_FILE_PMDMAPPED; - __lruvec_stat_mod_folio(folio, idx, -nr_pmdmapped); + __mod_node_page_state(pgdat, + folio_test_swapbacked(folio) ? + NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, + -nr_pmdmapped); } if (nr) { idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED; -- Gitee From 0935f236b072211026f99f1756397649b9187d09 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Tue, 18 Jun 2024 11:11:35 +1200 Subject: [PATCH 1541/2138] mm: extend rmap flags arguments for folio_add_new_anon_rmap MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #9728 commit 15bde4abab734c687c1f81704886aba3a70c268e upstream. Patch series "mm: clarify folio_add_new_anon_rmap() and __folio_add_anon_rmap()", v2. This patchset is preparatory work for mTHP swapin. folio_add_new_anon_rmap() assumes that new anon rmaps are always exclusive. However, this assumption doesn’t hold true for cases like do_swap_page(), where a new anon might be added to the swapcache and is not necessarily exclusive. The patchset extends the rmap flags to allow folio_add_new_anon_rmap() to handle both exclusive and non-exclusive new anon folios. The do_swap_page() function is updated to use this extended API with rmap flags. Consequently, all new anon folios now consistently use folio_add_new_anon_rmap(). The special case for !folio_test_anon() in __folio_add_anon_rmap() can be safely removed. In conclusion, new anon folios always use folio_add_new_anon_rmap(), regardless of exclusivity. Old anon folios continue to use __folio_add_anon_rmap() via folio_add_anon_rmap_pmd() and folio_add_anon_rmap_ptes(). This patch (of 3): In the case of a swap-in, a new anonymous folio is not necessarily exclusive. This patch updates the rmap flags to allow a new anonymous folio to be treated as either exclusive or non-exclusive. To maintain the existing behavior, we always use EXCLUSIVE as the default setting. [akpm@linux-foundation.org: cleanup and constifications per David and akpm] [v-songbaohua@oppo.com: fix missing doc for flags of folio_add_new_anon_rmap()] Link: https://lkml.kernel.org/r/20240619210641.62542-1-21cnbao@gmail.com [v-songbaohua@oppo.com: enhance doc for extend rmap flags arguments for folio_add_new_anon_rmap] Link: https://lkml.kernel.org/r/20240622030256.43775-1-21cnbao@gmail.com Link: https://lkml.kernel.org/r/20240617231137.80726-1-21cnbao@gmail.com Link: https://lkml.kernel.org/r/20240617231137.80726-2-21cnbao@gmail.com Signed-off-by: Barry Song Suggested-by: David Hildenbrand Tested-by: Shuai Yuan Acked-by: David Hildenbrand Cc: Baolin Wang Cc: Chris Li Cc: "Huang, Ying" Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Ryan Roberts Cc: Suren Baghdasaryan Cc: Yang Shi Cc: Yosry Ahmed Cc: Yu Zhao Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3997 --- include/linux/rmap.h | 2 +- kernel/events/uprobes.c | 2 +- mm/huge_memory.c | 2 +- mm/khugepaged.c | 2 +- mm/memory.c | 10 +++++----- mm/migrate_device.c | 2 +- mm/rmap.c | 25 ++++++++++++++++--------- mm/swapfile.c | 2 +- mm/userfaultfd.c | 2 +- 9 files changed, 28 insertions(+), 21 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 0d157c0ee981..fac9366b2dc7 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -239,7 +239,7 @@ void folio_add_anon_rmap_ptes(struct folio *, struct page *, int nr_pages, void folio_add_anon_rmap_pmd(struct folio *, struct page *, struct vm_area_struct *, unsigned long address, rmap_t flags); void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *, - unsigned long address); + unsigned long address, rmap_t flags); void folio_add_file_rmap_ptes(struct folio *, struct page *, int nr_pages, struct vm_area_struct *); #define folio_add_file_rmap_pte(folio, page, vma) \ diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 7668f9219353..9b870747abb0 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -181,7 +181,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, if (new_page) { folio_get(new_folio); - folio_add_new_anon_rmap(new_folio, vma, addr); + folio_add_new_anon_rmap(new_folio, vma, addr, RMAP_EXCLUSIVE); folio_add_lru_vma(new_folio, vma); } else /* no new page, just dec_mm_counter for old_page */ diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 472548b7157a..2a8ae9f5522c 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1046,7 +1046,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, entry = mk_huge_pmd(page, vma->vm_page_prot); entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); - folio_add_new_anon_rmap(folio, vma, haddr); + folio_add_new_anon_rmap(folio, vma, haddr, RMAP_EXCLUSIVE); folio_add_lru_vma(folio, vma); pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 20327d0d1472..996087bc0fad 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1230,7 +1230,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, spin_lock(pmd_ptl); BUG_ON(!pmd_none(*pmd)); - folio_add_new_anon_rmap(folio, vma, address); + folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE); folio_add_lru_vma(folio, vma); pgtable_trans_huge_deposit(mm, pmd, pgtable); set_pmd_at(mm, address, pmd, _pmd); diff --git a/mm/memory.c b/mm/memory.c index 0c58ee25d8a4..51193f6b34dc 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -915,7 +915,7 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma *prealloc = NULL; copy_user_highpage(&new_folio->page, page, addr, src_vma); __folio_mark_uptodate(new_folio); - folio_add_new_anon_rmap(new_folio, dst_vma, addr); + folio_add_new_anon_rmap(new_folio, dst_vma, addr, RMAP_EXCLUSIVE); folio_add_lru_vma(new_folio, dst_vma); rss[MM_ANONPAGES]++; @@ -3482,7 +3482,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) * some TLBs while the old PTE remains in others. */ ptep_clear_flush(vma, vmf->address, vmf->pte); - folio_add_new_anon_rmap(new_folio, vma, vmf->address); + folio_add_new_anon_rmap(new_folio, vma, vmf->address, RMAP_EXCLUSIVE); folio_add_lru_vma(new_folio, vma); /* * We call the notify macro here because, when using secondary @@ -4399,7 +4399,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) /* ksm created a completely new copy */ if (unlikely(folio != swapcache && swapcache)) { - folio_add_new_anon_rmap(folio, vma, address); + folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE); folio_add_lru_vma(folio, vma); } else { folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address, @@ -4649,7 +4649,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) folio_ref_add(folio, nr_pages - 1); add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages); count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_FAULT_ALLOC); - folio_add_new_anon_rmap(folio, vma, addr); + folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE); folio_add_lru_vma(folio, vma); setpte: if (uffd_wp) @@ -4858,7 +4858,7 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio, if (write && !(vma->vm_flags & VM_SHARED)) { add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr); VM_BUG_ON_FOLIO(nr != 1, folio); - folio_add_new_anon_rmap(folio, vma, addr); + folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE); folio_add_lru_vma(folio, vma); } else { add_mm_counter(vma->vm_mm, mm_counter_file(page), nr); diff --git a/mm/migrate_device.c b/mm/migrate_device.c index b6c27c76e1a0..1bebdfae2286 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -656,7 +656,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, goto unlock_abort; inc_mm_counter(mm, MM_ANONPAGES); - folio_add_new_anon_rmap(folio, vma, addr); + folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE); if (!folio_is_zone_device(folio)) folio_add_lru_vma(folio, vma); folio_get(folio); diff --git a/mm/rmap.c b/mm/rmap.c index 78499559519c..7791ad341857 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1365,29 +1365,34 @@ void folio_add_anon_rmap_pmd(struct folio *folio, struct page *page, * @folio: The folio to add the mapping to. * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped + * @flags: The rmap flags * * Like folio_add_anon_rmap_*() but must only be called on *new* folios. * This means the inc-and-test can be bypassed. - * The folio does not have to be locked. + * The folio doesn't necessarily need to be locked while it's exclusive + * unless two threads map it concurrently. However, the folio must be + * locked if it's shared. * - * If the folio is pmd-mappable, it is accounted as a THP. As the folio - * is new, it's assumed to be mapped exclusively by a single process. + * If the folio is pmd-mappable, it is accounted as a THP. */ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, - unsigned long address) + unsigned long address, rmap_t flags) { - int nr = folio_nr_pages(folio); + const int nr = folio_nr_pages(folio); + const bool exclusive = flags & RMAP_EXCLUSIVE; VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); + VM_WARN_ON_FOLIO(!exclusive && !folio_test_locked(folio), folio); VM_BUG_ON_VMA(address < vma->vm_start || address + (nr << PAGE_SHIFT) > vma->vm_end, vma); __folio_set_swapbacked(folio); - __folio_set_anon(folio, vma, address, true); + __folio_set_anon(folio, vma, address, exclusive); if (likely(!folio_test_large(folio))) { /* increment count (starts at -1) */ atomic_set(&folio->_mapcount, 0); - SetPageAnonExclusive(&folio->page); + if (exclusive) + SetPageAnonExclusive(&folio->page); } else if (!folio_test_pmd_mappable(folio)) { int i; @@ -1396,7 +1401,8 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, /* increment count (starts at -1) */ atomic_set(&page->_mapcount, 0); - SetPageAnonExclusive(page); + if (exclusive) + SetPageAnonExclusive(page); } atomic_set(&folio->_nr_pages_mapped, nr); @@ -1404,7 +1410,8 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, /* increment count (starts at -1) */ atomic_set(&folio->_entire_mapcount, 0); atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED); - SetPageAnonExclusive(&folio->page); + if (exclusive) + SetPageAnonExclusive(&folio->page); __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr); } diff --git a/mm/swapfile.c b/mm/swapfile.c index ddd9f65de953..752412f129d0 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1907,7 +1907,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, folio_add_anon_rmap_pte(folio, page, vma, addr, rmap_flags); } else { /* ksm created a completely new copy */ - folio_add_new_anon_rmap(folio, vma, addr); + folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE); folio_add_lru_vma(folio, vma); } new_pte = pte_mkold(mk_pte(page, vma->vm_page_prot)); diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 2031e1d5b2d7..79ec6c3387bb 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -116,7 +116,7 @@ int mfill_atomic_install_pte(pmd_t *dst_pmd, folio_add_lru(folio); folio_add_file_rmap_pte(folio, page, dst_vma); } else { - folio_add_new_anon_rmap(folio, dst_vma, dst_addr); + folio_add_new_anon_rmap(folio, dst_vma, dst_addr, RMAP_EXCLUSIVE); folio_add_lru_vma(folio, dst_vma); } -- Gitee From 306a7589997d8723a38966999ce306398e37e681 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Tue, 18 Jun 2024 11:11:36 +1200 Subject: [PATCH 1542/2138] mm: use folio_add_new_anon_rmap() if folio_test_anon(folio)==false ANBZ: #9728 commit 9ae2feacedde16067014f11414675f385c68eedc upstream. For the !folio_test_anon(folio) case, we can now invoke folio_add_new_anon_rmap() with the rmap flags set to either EXCLUSIVE or non-EXCLUSIVE. This action will suppress the VM_WARN_ON_FOLIO check within __folio_add_anon_rmap() while initiating the process of bringing up mTHP swapin. static __always_inline void __folio_add_anon_rmap(struct folio *folio, struct page *page, int nr_pages, struct vm_area_struct *vma, unsigned long address, rmap_t flags, enum rmap_level level) { ... if (unlikely(!folio_test_anon(folio))) { VM_WARN_ON_FOLIO(folio_test_large(folio) && level != RMAP_LEVEL_PMD, folio); } ... } It also improves the code's readability. Currently, all new anonymous folios calling folio_add_anon_rmap_ptes() are order-0. This ensures that new folios cannot be partially exclusive; they are either entirely exclusive or entirely shared. A useful comment from Hugh's fix: : Commit "mm: use folio_add_new_anon_rmap() if folio_test_anon(folio)== : false" has extended folio_add_new_anon_rmap() to use on non-exclusive : folios, already visible to others in swap cache and on LRU. : : That renders its non-atomic __folio_set_swapbacked() unsafe: it risks : overwriting concurrent atomic operations on folio->flags, losing bits : added or restoring bits cleared. Since it's only used in this risky way : when folio_test_locked and !folio_test_anon, many such races are excluded; : but, for example, isolations by folio_test_clear_lru() are vulnerable, and : setting or clearing active. : : It could just use the atomic folio_set_swapbacked(); but this function : does try to avoid atomics where it can, so use a branch instead: just : avoid setting swapbacked when it is already set, that is good enough. : (Swapbacked is normally stable once set: lazyfree can undo it, but only : later, when found anon in a page table.) : : This fixes a lot of instability under compaction and swapping loads: : assorted "Bad page"s, VM_BUG_ON_FOLIO()s, apparently even page double : frees - though I've not worked out what races could lead to the latter. [akpm@linux-foundation.org: comment fixes, per David and akpm] [v-songbaohua@oppo.com: lock the folio to avoid race] Link: https://lkml.kernel.org/r/20240622032002.53033-1-21cnbao@gmail.com [hughd@google.com: folio_add_new_anon_rmap() careful __folio_set_swapbacked()] Link: https://lkml.kernel.org/r/f3599b1d-8323-0dc5-e9e0-fdb3cfc3dd5a@google.com Link: https://lkml.kernel.org/r/20240617231137.80726-3-21cnbao@gmail.com Signed-off-by: Barry Song Signed-off-by: Hugh Dickins Suggested-by: David Hildenbrand Tested-by: Shuai Yuan Acked-by: David Hildenbrand Cc: Baolin Wang Cc: Chris Li Cc: "Huang, Ying" Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Ryan Roberts Cc: Suren Baghdasaryan Cc: Yang Shi Cc: Yosry Ahmed Cc: Yu Zhao Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3997 --- mm/memory.c | 9 +++++++++ mm/rmap.c | 4 +++- mm/swapfile.c | 14 ++++++++++++-- 3 files changed, 24 insertions(+), 3 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 51193f6b34dc..52a1a3c3165d 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4401,6 +4401,15 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) if (unlikely(folio != swapcache && swapcache)) { folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE); folio_add_lru_vma(folio, vma); + } else if (!folio_test_anon(folio)) { + /* + * We currently only expect small !anon folios, which are either + * fully exclusive or fully shared. If we ever get large folios + * here, we have to be careful. + */ + VM_WARN_ON_ONCE(folio_test_large(folio)); + VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); + folio_add_new_anon_rmap(folio, vma, address, rmap_flags); } else { folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address, rmap_flags); diff --git a/mm/rmap.c b/mm/rmap.c index 7791ad341857..0eb12fadb2d4 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1385,7 +1385,9 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, VM_WARN_ON_FOLIO(!exclusive && !folio_test_locked(folio), folio); VM_BUG_ON_VMA(address < vma->vm_start || address + (nr << PAGE_SHIFT) > vma->vm_end, vma); - __folio_set_swapbacked(folio); + + if (!folio_test_swapbacked(folio)) + __folio_set_swapbacked(folio); __folio_set_anon(folio, vma, address, exclusive); if (likely(!folio_test_large(folio))) { diff --git a/mm/swapfile.c b/mm/swapfile.c index 752412f129d0..a171a00cd097 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1904,8 +1904,18 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio); if (pte_swp_exclusive(old_pte)) rmap_flags |= RMAP_EXCLUSIVE; - - folio_add_anon_rmap_pte(folio, page, vma, addr, rmap_flags); + /* + * We currently only expect small !anon folios, which are either + * fully exclusive or fully shared. If we ever get large folios + * here, we have to be careful. + */ + if (!folio_test_anon(folio)) { + VM_WARN_ON_ONCE(folio_test_large(folio)); + VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); + folio_add_new_anon_rmap(folio, vma, addr, rmap_flags); + } else { + folio_add_anon_rmap_pte(folio, page, vma, addr, rmap_flags); + } } else { /* ksm created a completely new copy */ folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE); folio_add_lru_vma(folio, vma); -- Gitee From 6431dd9f3ce649b6e039625c1211fea3f37de748 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Tue, 18 Jun 2024 11:11:37 +1200 Subject: [PATCH 1543/2138] mm: remove folio_test_anon(folio)==false path in __folio_add_anon_rmap() ANBZ: #9728 commit 4c1171f1d22484f2419b07ab688548350db521cb upstream. The folio_test_anon(folio)==false cases has been relocated to folio_add_new_anon_rmap(). Additionally, four other callers consistently pass anonymous folios. stack 1: remove_migration_pmd -> folio_add_anon_rmap_pmd -> __folio_add_anon_rmap stack 2: __split_huge_pmd_locked -> folio_add_anon_rmap_ptes -> __folio_add_anon_rmap stack 3: remove_migration_pmd -> folio_add_anon_rmap_pmd -> __folio_add_anon_rmap (RMAP_LEVEL_PMD) stack 4: try_to_merge_one_page -> replace_page -> folio_add_anon_rmap_pte -> __folio_add_anon_rmap __folio_add_anon_rmap() only needs to handle the cases folio_test_anon(folio)==true now. We can remove the !folio_test_anon(folio)) path within __folio_add_anon_rmap() now. Link: https://lkml.kernel.org/r/20240617231137.80726-4-21cnbao@gmail.com Signed-off-by: Barry Song Suggested-by: David Hildenbrand Tested-by: Shuai Yuan Acked-by: David Hildenbrand Cc: Baolin Wang Cc: Chris Li Cc: "Huang, Ying" Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Ryan Roberts Cc: Suren Baghdasaryan Cc: Yang Shi Cc: Yosry Ahmed Cc: Yu Zhao Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3997 --- mm/rmap.c | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) diff --git a/mm/rmap.c b/mm/rmap.c index 0eb12fadb2d4..cea4983acbe0 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1259,27 +1259,16 @@ static __always_inline void __folio_add_anon_rmap(struct folio *folio, { int i, nr, nr_pmdmapped = 0; + VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); + nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped); if (nr_pmdmapped) __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr_pmdmapped); if (nr) __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); - if (unlikely(!folio_test_anon(folio))) { - VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); - /* - * For a PTE-mapped large folio, we only know that the single - * PTE is exclusive. Further, __folio_set_anon() might not get - * folio->index right when not given the address of the head - * page. - */ - VM_WARN_ON_FOLIO(folio_test_large(folio) && - level != RMAP_LEVEL_PMD, folio); - __folio_set_anon(folio, vma, address, - !!(flags & RMAP_EXCLUSIVE)); - } else if (likely(!folio_test_ksm(folio))) { + if (likely(!folio_test_ksm(folio))) __page_check_anon_rmap(folio, page, vma, address); - } if (flags & RMAP_EXCLUSIVE) { switch (level) { -- Gitee From 030d28162886051a057b778b2a988eac80bd5a77 Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Tue, 11 Jun 2024 02:45:15 +0000 Subject: [PATCH 1544/2138] mm: zswap: add zswap_never_enabled() ANBZ: #9728 commit 2d4d2b1cfb85cc07f6d5619acb882d8b11e55cf4 upstream. Add zswap_never_enabled() to skip the xarray lookup in zswap_load() if zswap was never enabled on the system. It is implemented using static branches for efficiency, as enabling zswap should be a rare event. This could shave some cycles off zswap_load() when CONFIG_ZSWAP is used but zswap is never enabled. However, the real motivation behind this patch is two-fold: - Incoming large folio swapin work will need to fallback to order-0 folios if zswap was ever enabled, because any part of the folio could be in zswap, until proper handling of large folios with zswap is added. - A warning and recovery attempt will be added in a following change in case the above was not done incorrectly. Zswap will fail the read if the folio is large and it was ever enabled. Expose zswap_never_enabled() in the header for the swapin work to use it later. [yosryahmed@google.com: expose zswap_never_enabled() in the header] Link: https://lkml.kernel.org/r/Zmjf0Dr8s9xSW41X@google.com Link: https://lkml.kernel.org/r/20240611024516.1375191-2-yosryahmed@google.com Signed-off-by: Yosry Ahmed Reviewed-by: Nhat Pham Cc: Barry Song Cc: Chengming Zhou Cc: Chris Li Cc: David Hildenbrand Cc: Johannes Weiner Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3997 --- include/linux/zswap.h | 6 ++++++ mm/zswap.c | 10 ++++++++++ 2 files changed, 16 insertions(+) diff --git a/include/linux/zswap.h b/include/linux/zswap.h index 2a60ce39cfde..4683a8fe0e5b 100644 --- a/include/linux/zswap.h +++ b/include/linux/zswap.h @@ -15,6 +15,7 @@ bool zswap_load(struct folio *folio); void zswap_invalidate(int type, pgoff_t offset); void zswap_swapon(int type); void zswap_swapoff(int type); +bool zswap_never_enabled(void); #else @@ -32,6 +33,11 @@ static inline void zswap_invalidate(int type, pgoff_t offset) {} static inline void zswap_swapon(int type) {} static inline void zswap_swapoff(int type) {} +static inline bool zswap_never_enabled(void) +{ + return false; +} + #endif #endif /* _LINUX_ZSWAP_H */ diff --git a/mm/zswap.c b/mm/zswap.c index 69681b9173fd..8ad7c97fac19 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -84,6 +84,7 @@ static bool zswap_pool_reached_full; static int zswap_setup(void); /* Enable/disable zswap */ +static DEFINE_STATIC_KEY_MAYBE(CONFIG_ZSWAP_DEFAULT_ON, zswap_ever_enabled); static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON); static int zswap_enabled_param_set(const char *, const struct kernel_param *); @@ -144,6 +145,11 @@ module_param_named(exclusive_loads, zswap_exclusive_loads_enabled, bool, 0644); /* Number of zpools in zswap_pool (empirically determined for scalability) */ #define ZSWAP_NR_ZPOOLS 32 +bool zswap_never_enabled(void) +{ + return !static_branch_maybe(CONFIG_ZSWAP_DEFAULT_ON, &zswap_ever_enabled); +} + /********************************* * data structures **********************************/ @@ -1410,6 +1416,9 @@ bool zswap_load(struct folio *folio) VM_WARN_ON_ONCE(!folio_test_locked(folio)); + if (zswap_never_enabled()) + return false; + /* find */ spin_lock(&tree->lock); entry = zswap_entry_find_get(&tree->rbroot, offset); @@ -1611,6 +1620,7 @@ static int zswap_setup(void) zpool_get_type(pool->zpools[0])); list_add(&pool->list, &zswap_pools); zswap_has_pool = true; + static_branch_enable(&zswap_ever_enabled); } else { pr_err("pool creation failed\n"); zswap_enabled = false; -- Gitee From c480062f80fc64c82d9e7c29b37d8fd3f86c652c Mon Sep 17 00:00:00 2001 From: Barry Song Date: Sun, 30 Jun 2024 11:22:31 +1200 Subject: [PATCH 1545/2138] mm: zswap: fix zswap_never_enabled() for CONFIG_ZSWAP==N ANBZ: #9728 commit 259043e3b730e0aa6408bff27af7edf7a5c9101c upstream. If CONFIG_ZSWAP is set to N, it means zswap cannot be enabled. zswap_never_enabled() should return true. The only effect of this issue is that with Barry's latest large folio swapin patches for zram ("mm: support mTHP swap-in for zRAM-like swapfile"), we will always fallback to order-0 swapin, even mistakenly when !CONFIG_ZSWAP. Basically this bug makes Barry's in progress patches not work at all. The API was created to inform the mm core that zswap has never been enabled, allowing the mm core to perform mTHP swap-in. This is a transitional solution until zswap supports mTHP. If zswap has been enabled, performing mTHP swap-in will result in corrupted data. You may find the answer in the mTHP swap-in series: https://lore.kernel.org/linux-mm/CAJD7tkZ4FQr6HZpduOdvmqgg_-whuZYE-Bz5O2t6yzw6Yg+v1A@mail.gmail.com/ Link: https://lkml.kernel.org/r/20240629232231.42394-1-21cnbao@gmail.com Fixes: 0300e17d67c3 ("mm: zswap: add zswap_never_enabled()") Signed-off-by: Barry Song Reviewed-by: Chengming Zhou Acked-by: Yosry Ahmed Acked-by: Chris Li Acked-by: David Hildenbrand Reviewed-by: Nhat Pham Cc: Johannes Weiner Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3997 --- include/linux/zswap.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/zswap.h b/include/linux/zswap.h index 4683a8fe0e5b..f647ee38598a 100644 --- a/include/linux/zswap.h +++ b/include/linux/zswap.h @@ -35,7 +35,7 @@ static inline void zswap_swapoff(int type) {} static inline bool zswap_never_enabled(void) { - return false; + return true; } #endif -- Gitee From e1ec0b505b54589936e9c73d19c8e79970a9a8c6 Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Tue, 11 Jun 2024 02:45:16 +0000 Subject: [PATCH 1546/2138] mm: zswap: handle incorrect attempts to load large folios ANBZ: #9728 commit c63f210d4891f5b1b1057a0d7c91d2b0d15431d1 upstream. Zswap does not support storing or loading large folios. Until proper support is added, attempts to load large folios from zswap are a bug. For example, if a swapin fault observes that contiguous PTEs are pointing to contiguous swap entries and tries to swap them in as a large folio, swap_read_folio() will pass in a large folio to zswap_load(), but zswap_load() will only effectively load the first page in the folio. If the first page is not in zswap, the folio will be read from disk, even though other pages may be in zswap. In both cases, this will lead to silent data corruption. Proper support needs to be added before large folio swapins and zswap can work together. Looking at callers of swap_read_folio(), it seems like they are either allocated from __read_swap_cache_async() or do_swap_page() in the SWP_SYNCHRONOUS_IO path. Both of which allocate order-0 folios, so everything is fine for now. However, there is ongoing work to add to support large folio swapins [1]. To make sure new development does not break zswap (or get broken by zswap), add minimal handling of incorrect loads of large folios to zswap. First, move the call folio_mark_uptodate() inside zswap_load(). If a large folio load is attempted, and zswap was ever enabled on the system, return 'true' without calling folio_mark_uptodate(). This will prevent the folio from being read from disk, and will emit an IO error because the folio is not uptodate (e.g. do_swap_fault() will return VM_FAULT_SIGBUS). It may not be reliable recovery in all cases, but it is better than nothing. This was tested by hacking the allocation in __read_swap_cache_async() to use order 2 and __GFP_COMP. In the future, to handle this correctly, the swapin code should: (a) Fall back to order-0 swapins if zswap was ever used on the machine, because compressed pages remain in zswap after it is disabled. (b) Add proper support to swapin large folios from zswap (fully or partially). Probably start with (a) then followup with (b). [1]https://lore.kernel.org/linux-mm/20240304081348.197341-6-21cnbao@gmail.com/ Link: https://lkml.kernel.org/r/20240611024516.1375191-3-yosryahmed@google.com Signed-off-by: Yosry Ahmed Acked-by: Barry Song Cc: Barry Song Cc: Chengming Zhou Cc: Chris Li Cc: David Hildenbrand Cc: Johannes Weiner Cc: Matthew Wilcox (Oracle) Cc: Nhat Pham Signed-off-by: Andrew Morton Singed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3997 --- mm/page_io.c | 1 - mm/zswap.c | 13 +++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/mm/page_io.c b/mm/page_io.c index 7d402c66a01f..42a11cee4a46 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -515,7 +515,6 @@ void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug) delayacct_swapin_start(); if (zswap_load(folio)) { - folio_mark_uptodate(folio); folio_unlock(folio); } else if (data_race(sis->flags & SWP_FS_OPS)) { swap_readpage_fs(page, plug); diff --git a/mm/zswap.c b/mm/zswap.c index 8ad7c97fac19..25ee19e45be8 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -1419,6 +1419,17 @@ bool zswap_load(struct folio *folio) if (zswap_never_enabled()) return false; + /* + * Large folios should not be swapped in while zswap is being used, as + * they are not properly handled. Zswap does not properly load large + * folios, and a large folio may only be partially in zswap. + * + * Return true without marking the folio uptodate so that an IO error is + * emitted (e.g. do_swap_page() will sigbus). + */ + if (WARN_ON_ONCE(folio_test_large(folio))) + return true; + /* find */ spin_lock(&tree->lock); entry = zswap_entry_find_get(&tree->rbroot, offset); @@ -1488,6 +1499,8 @@ bool zswap_load(struct folio *folio) zswap_entry_put(tree, entry); spin_unlock(&tree->lock); + if (ret) + folio_mark_uptodate(folio); return ret; } -- Gitee From 68e4f1937b5dec6655f13153329d76a05ba8a79a Mon Sep 17 00:00:00 2001 From: Chris Li Date: Tue, 30 Jul 2024 23:49:13 -0700 Subject: [PATCH 1547/2138] mm: swap: swap cluster switch to double link list MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #9728 commit 73ed0baae66df50359c876f65f41179d6ebd2716 upstream. Patch series "mm: swap: mTHP swap allocator base on swap cluster order", v5. This is the short term solutions "swap cluster order" listed in my "Swap Abstraction" discussion slice 8 in the recent LSF/MM conference. When commit 845982eb264bc "mm: swap: allow storage of all mTHP orders" is introduced, it only allocates the mTHP swap entries from the new empty cluster list.  It has a fragmentation issue reported by Barry. https://lore.kernel.org/all/CAGsJ_4zAcJkuW016Cfi6wicRr8N9X+GJJhgMQdSMp+Ah+NSgNQ@mail.gmail.com/ The reason is that all the empty clusters have been exhausted while there are plenty of free swap entries in the cluster that are not 100% free. Remember the swap allocation order in the cluster. Keep track of the per order non full cluster list for later allocation. This series gives the swap SSD allocation a new separate code path from the HDD allocation. The new allocator use cluster list only and do not global scan swap_map[] without lock any more. This streamline the swap allocation for SSD. The code matches the execution flow much better. User impact: For users that allocate and free mix order mTHP swapping, It greatly improves the success rate of the mTHP swap allocation after the initial phase. It also performs faster when the swapfile is close to full, because the allocator can get the non full cluster from a list rather than scanning a lot of swap_map entries.  With Barry's mthp test program V2: Without: $ ./thp_swap_allocator_test -a Iteration 1: swpout inc: 32, swpout fallback inc: 192, Fallback percentage: 85.71% Iteration 2: swpout inc: 0, swpout fallback inc: 231, Fallback percentage: 100.00% Iteration 3: swpout inc: 0, swpout fallback inc: 227, Fallback percentage: 100.00% ... Iteration 98: swpout inc: 0, swpout fallback inc: 224, Fallback percentage: 100.00% Iteration 99: swpout inc: 0, swpout fallback inc: 215, Fallback percentage: 100.00% Iteration 100: swpout inc: 0, swpout fallback inc: 222, Fallback percentage: 100.00% $ ./thp_swap_allocator_test -a -s Iteration 1: swpout inc: 0, swpout fallback inc: 224, Fallback percentage: 100.00% Iteration 2: swpout inc: 0, swpout fallback inc: 218, Fallback percentage: 100.00% Iteration 3: swpout inc: 0, swpout fallback inc: 222, Fallback percentage: 100.00% .. Iteration 98: swpout inc: 0, swpout fallback inc: 228, Fallback percentage: 100.00% Iteration 99: swpout inc: 0, swpout fallback inc: 230, Fallback percentage: 100.00% Iteration 100: swpout inc: 0, swpout fallback inc: 229, Fallback percentage: 100.00% $ ./thp_swap_allocator_test -s Iteration 1: swpout inc: 0, swpout fallback inc: 224, Fallback percentage: 100.00% Iteration 2: swpout inc: 0, swpout fallback inc: 218, Fallback percentage: 100.00% Iteration 3: swpout inc: 0, swpout fallback inc: 222, Fallback percentage: 100.00% .. Iteration 98: swpout inc: 0, swpout fallback inc: 228, Fallback percentage: 100.00% Iteration 99: swpout inc: 0, swpout fallback inc: 230, Fallback percentage: 100.00% Iteration 100: swpout inc: 0, swpout fallback inc: 229, Fallback percentage: 100.00% $ ./thp_swap_allocator_test Iteration 1: swpout inc: 0, swpout fallback inc: 224, Fallback percentage: 100.00% Iteration 2: swpout inc: 0, swpout fallback inc: 218, Fallback percentage: 100.00% Iteration 3: swpout inc: 0, swpout fallback inc: 222, Fallback percentage: 100.00% .. Iteration 98: swpout inc: 0, swpout fallback inc: 228, Fallback percentage: 100.00% Iteration 99: swpout inc: 0, swpout fallback inc: 230, Fallback percentage: 100.00% Iteration 100: swpout inc: 0, swpout fallback inc: 229, Fallback percentage: 100.00% With: # with all 0.00% filter out $ ./thp_swap_allocator_test -a | grep -v "0.00%" $ # all result are 0.00% $ ./thp_swap_allocator_test -a -s | grep -v "0.00%" ./thp_swap_allocator_test -a -s | grep -v "0.00%" Iteration 14: swpout inc: 223, swpout fallback inc: 3, Fallback percentage: 1.33% Iteration 19: swpout inc: 219, swpout fallback inc: 7, Fallback percentage: 3.10% Iteration 28: swpout inc: 225, swpout fallback inc: 1, Fallback percentage: 0.44% Iteration 29: swpout inc: 227, swpout fallback inc: 1, Fallback percentage: 0.44% Iteration 34: swpout inc: 220, swpout fallback inc: 8, Fallback percentage: 3.51% Iteration 35: swpout inc: 222, swpout fallback inc: 11, Fallback percentage: 4.72% Iteration 38: swpout inc: 217, swpout fallback inc: 4, Fallback percentage: 1.81% Iteration 40: swpout inc: 222, swpout fallback inc: 6, Fallback percentage: 2.63% Iteration 42: swpout inc: 221, swpout fallback inc: 2, Fallback percentage: 0.90% Iteration 43: swpout inc: 215, swpout fallback inc: 7, Fallback percentage: 3.15% Iteration 47: swpout inc: 226, swpout fallback inc: 2, Fallback percentage: 0.88% Iteration 49: swpout inc: 217, swpout fallback inc: 1, Fallback percentage: 0.46% Iteration 52: swpout inc: 221, swpout fallback inc: 8, Fallback percentage: 3.49% Iteration 56: swpout inc: 224, swpout fallback inc: 4, Fallback percentage: 1.75% Iteration 58: swpout inc: 214, swpout fallback inc: 5, Fallback percentage: 2.28% Iteration 62: swpout inc: 220, swpout fallback inc: 3, Fallback percentage: 1.35% Iteration 64: swpout inc: 224, swpout fallback inc: 1, Fallback percentage: 0.44% Iteration 67: swpout inc: 221, swpout fallback inc: 1, Fallback percentage: 0.45% Iteration 75: swpout inc: 220, swpout fallback inc: 9, Fallback percentage: 3.93% Iteration 82: swpout inc: 227, swpout fallback inc: 1, Fallback percentage: 0.44% Iteration 86: swpout inc: 211, swpout fallback inc: 12, Fallback percentage: 5.38% Iteration 89: swpout inc: 226, swpout fallback inc: 2, Fallback percentage: 0.88% Iteration 93: swpout inc: 220, swpout fallback inc: 1, Fallback percentage: 0.45% Iteration 94: swpout inc: 224, swpout fallback inc: 1, Fallback percentage: 0.44% Iteration 96: swpout inc: 221, swpout fallback inc: 6, Fallback percentage: 2.64% Iteration 98: swpout inc: 227, swpout fallback inc: 1, Fallback percentage: 0.44% Iteration 99: swpout inc: 227, swpout fallback inc: 3, Fallback percentage: 1.30% $ ./thp_swap_allocator_test ./thp_swap_allocator_test Iteration 1: swpout inc: 233, swpout fallback inc: 0, Fallback percentage: 0.00% Iteration 2: swpout inc: 131, swpout fallback inc: 101, Fallback percentage: 43.53% Iteration 3: swpout inc: 71, swpout fallback inc: 155, Fallback percentage: 68.58% Iteration 4: swpout inc: 55, swpout fallback inc: 168, Fallback percentage: 75.34% Iteration 5: swpout inc: 35, swpout fallback inc: 191, Fallback percentage: 84.51% Iteration 6: swpout inc: 25, swpout fallback inc: 199, Fallback percentage: 88.84% Iteration 7: swpout inc: 23, swpout fallback inc: 205, Fallback percentage: 89.91% Iteration 8: swpout inc: 9, swpout fallback inc: 219, Fallback percentage: 96.05% Iteration 9: swpout inc: 13, swpout fallback inc: 213, Fallback percentage: 94.25% Iteration 10: swpout inc: 12, swpout fallback inc: 216, Fallback percentage: 94.74% Iteration 11: swpout inc: 16, swpout fallback inc: 213, Fallback percentage: 93.01% Iteration 12: swpout inc: 10, swpout fallback inc: 210, Fallback percentage: 95.45% Iteration 13: swpout inc: 16, swpout fallback inc: 212, Fallback percentage: 92.98% Iteration 14: swpout inc: 12, swpout fallback inc: 212, Fallback percentage: 94.64% Iteration 15: swpout inc: 15, swpout fallback inc: 211, Fallback percentage: 93.36% Iteration 16: swpout inc: 15, swpout fallback inc: 200, Fallback percentage: 93.02% Iteration 17: swpout inc: 9, swpout fallback inc: 220, Fallback percentage: 96.07% $ ./thp_swap_allocator_test -s ./thp_swap_allocator_test -s Iteration 1: swpout inc: 233, swpout fallback inc: 0, Fallback percentage: 0.00% Iteration 2: swpout inc: 97, swpout fallback inc: 135, Fallback percentage: 58.19% Iteration 3: swpout inc: 42, swpout fallback inc: 192, Fallback percentage: 82.05% Iteration 4: swpout inc: 19, swpout fallback inc: 214, Fallback percentage: 91.85% Iteration 5: swpout inc: 12, swpout fallback inc: 213, Fallback percentage: 94.67% Iteration 6: swpout inc: 11, swpout fallback inc: 217, Fallback percentage: 95.18% Iteration 7: swpout inc: 9, swpout fallback inc: 214, Fallback percentage: 95.96% Iteration 8: swpout inc: 8, swpout fallback inc: 213, Fallback percentage: 96.38% Iteration 9: swpout inc: 2, swpout fallback inc: 223, Fallback percentage: 99.11% Iteration 10: swpout inc: 2, swpout fallback inc: 228, Fallback percentage: 99.13% Iteration 11: swpout inc: 4, swpout fallback inc: 214, Fallback percentage: 98.17% Iteration 12: swpout inc: 5, swpout fallback inc: 226, Fallback percentage: 97.84% Iteration 13: swpout inc: 3, swpout fallback inc: 212, Fallback percentage: 98.60% Iteration 14: swpout inc: 0, swpout fallback inc: 222, Fallback percentage: 100.00% Iteration 15: swpout inc: 3, swpout fallback inc: 222, Fallback percentage: 98.67% Iteration 16: swpout inc: 4, swpout fallback inc: 223, Fallback percentage: 98.24% ========= Kernel compile under tmpfs with cgroup memory.max = 470M. 12 core 24 hyperthreading, 32 jobs. 10 Run each group SSD swap 10 runs average, 20G swap partition: With: user 2929.064 system 1479.381 : 1376.89 1398.22 1444.64 1477.39 1479.04 1497.27 1504.47 1531.4 1532.92 1551.57 real 1441.324 Without: user 2910.872 system 1482.732 : 1440.01 1451.4 1462.01 1467.47 1467.51 1469.3 1470.19 1496.32 1544.1 1559.01 real 1580.822 Two zram swap: zram0 3.0G zram1 20G. The idea is forcing the zram0 almost full then overflow to zram1: With: user 4320.301 system 4272.403 : 4236.24 4262.81 4264.75 4269.13 4269.44 4273.06 4279.85 4285.98 4289.64 4293.13 real 431.759 Without user 4301.393 system 4387.672 : 4374.47 4378.3 4380.95 4382.84 4383.06 4388.05 4389.76 4397.16 4398.23 4403.9 real 433.979 ------ more test result from Kaiui ---------- Test with build linux kernel using a 4G ZRAM, 1G memory.max limit on top of shmem: System info: 32 Core AMD Zen2, 64G total memory. Test 3 times using only 4K pages: ================================= With: ----- 1838.74user 2411.21system 2:37.86elapsed 2692%CPU (0avgtext+0avgdata 847060maxresident)k 1839.86user 2465.77system 2:39.35elapsed 2701%CPU (0avgtext+0avgdata 847060maxresident)k 1840.26user 2454.68system 2:39.43elapsed 2693%CPU (0avgtext+0avgdata 847060maxresident)k Summary (~4.6% improment of system time): User: 1839.62 System: 2443.89: 2465.77 2454.68 2411.21 Real: 158.88 Without: -------- 1837.99user 2575.95system 2:43.09elapsed 2706%CPU (0avgtext+0avgdata 846520maxresident)k 1838.32user 2555.15system 2:42.52elapsed 2709%CPU (0avgtext+0avgdata 846520maxresident)k 1843.02user 2561.55system 2:43.35elapsed 2702%CPU (0avgtext+0avgdata 846520maxresident)k Summary: User: 1839.78 System: 2564.22: 2575.95 2555.15 2561.55 Real: 162.99 Test 5 times using enabled all mTHP pages: ========================================== With: ----- 1796.44user 2937.33system 2:59.09elapsed 2643%CPU (0avgtext+0avgdata 846936maxresident)k 1802.55user 3002.32system 2:54.68elapsed 2750%CPU (0avgtext+0avgdata 847072maxresident)k 1806.59user 2986.53system 2:55.17elapsed 2736%CPU (0avgtext+0avgdata 847092maxresident)k 1803.27user 2982.40system 2:54.49elapsed 2742%CPU (0avgtext+0avgdata 846796maxresident)k 1807.43user 3036.08system 2:56.06elapsed 2751%CPU (0avgtext+0avgdata 846488maxresident)k Summary (~8.4% improvement of system time): User: 1803.25 System: 2988.93: 2937.33 3002.32 2986.53 2982.40 3036.08 Real: 175.90 mTHP swapout status: /sys/kernel/mm/transparent_hugepage/hugepages-32kB/stats/swpout:347721 /sys/kernel/mm/transparent_hugepage/hugepages-32kB/stats/swpout_fallback:3110 /sys/kernel/mm/transparent_hugepage/hugepages-512kB/stats/swpout:3365 /sys/kernel/mm/transparent_hugepage/hugepages-512kB/stats/swpout_fallback:8269 /sys/kernel/mm/transparent_hugepage/hugepages-2048kB/stats/swpout:24 /sys/kernel/mm/transparent_hugepage/hugepages-2048kB/stats/swpout_fallback:3341 /sys/kernel/mm/transparent_hugepage/hugepages-1024kB/stats/swpout:145 /sys/kernel/mm/transparent_hugepage/hugepages-1024kB/stats/swpout_fallback:5038 /sys/kernel/mm/transparent_hugepage/hugepages-64kB/stats/swpout:322737 /sys/kernel/mm/transparent_hugepage/hugepages-64kB/stats/swpout_fallback:36808 /sys/kernel/mm/transparent_hugepage/hugepages-16kB/stats/swpout:380455 /sys/kernel/mm/transparent_hugepage/hugepages-16kB/stats/swpout_fallback:1010 /sys/kernel/mm/transparent_hugepage/hugepages-256kB/stats/swpout:24973 /sys/kernel/mm/transparent_hugepage/hugepages-256kB/stats/swpout_fallback:13223 /sys/kernel/mm/transparent_hugepage/hugepages-128kB/stats/swpout:197348 /sys/kernel/mm/transparent_hugepage/hugepages-128kB/stats/swpout_fallback:80541 Without: -------- 1794.41user 3151.29system 3:05.97elapsed 2659%CPU (0avgtext+0avgdata 846704maxresident)k 1810.27user 3304.48system 3:05.38elapsed 2759%CPU (0avgtext+0avgdata 846636maxresident)k 1809.84user 3254.85system 3:03.83elapsed 2755%CPU (0avgtext+0avgdata 846952maxresident)k 1813.54user 3259.56system 3:04.28elapsed 2752%CPU (0avgtext+0avgdata 846848maxresident)k 1829.97user 3338.40system 3:07.32elapsed 2759%CPU (0avgtext+0avgdata 847024maxresident)k Summary: User: 1811.61 System: 3261.72 : 3151.29 3304.48 3254.85 3259.56 3338.40 Real: 185.356 mTHP swapout status: hugepages-32kB/stats/swpout:35630 hugepages-32kB/stats/swpout_fallback:1809908 hugepages-512kB/stats/swpout:523 hugepages-512kB/stats/swpout_fallback:55235 hugepages-2048kB/stats/swpout:53 hugepages-2048kB/stats/swpout_fallback:17264 hugepages-1024kB/stats/swpout:85 hugepages-1024kB/stats/swpout_fallback:24979 hugepages-64kB/stats/swpout:30117 hugepages-64kB/stats/swpout_fallback:1825399 hugepages-16kB/stats/swpout:42775 hugepages-16kB/stats/swpout_fallback:1951123 hugepages-256kB/stats/swpout:2326 hugepages-256kB/stats/swpout_fallback:170165 hugepages-128kB/stats/swpout:17925 hugepages-128kB/stats/swpout_fallback:1309757 This patch (of 9): Previously, the swap cluster used a cluster index as a pointer to construct a custom single link list type "swap_cluster_list". The next cluster pointer is shared with the cluster->count. It prevents puting the non free cluster into a list. Change the cluster to use the standard double link list instead. This allows tracing the nonfull cluster in the follow up patch. That way, it is faster to get to the nonfull cluster of that order. Remove the cluster getter/setter for accessing the cluster struct member. The list operation is protected by the swap_info_struct->lock. Change cluster code to use "struct swap_cluster_info *" to reference the cluster rather than by using index. That is more consistent with the list manipulation. It avoids the repeat adding index to the cluser_info. The code is easier to understand. Remove the cluster next pointer is NULL flag, the double link list can handle the empty list pretty well. The "swap_cluster_info" struct is two pointer bigger, because 512 swap entries share one swap_cluster_info struct, it has very little impact on the average memory usage per swap entry. For 1TB swapfile, the swap cluster data structure increases from 8MB to 24MB. Other than the list conversion, there is no real function change in this patch. Link: https://lkml.kernel.org/r/20240730-swap-allocator-v5-0-cb9c148b9297@kernel.org Link: https://lkml.kernel.org/r/20240730-swap-allocator-v5-1-cb9c148b9297@kernel.org Signed-off-by: Chris Li Reported-by: Barry Song <21cnbao@gmail.com> Reviewed-by: "Huang, Ying" Cc: Hugh Dickins Cc: Kairui Song Cc: Kalesh Singh Cc: Ryan Roberts Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3997 --- include/linux/swap.h | 25 ++--- mm/swapfile.c | 226 ++++++++++++------------------------------- 2 files changed, 71 insertions(+), 180 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index a803fdd1db42..1b70ddb8c573 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -243,22 +243,20 @@ enum { * free clusters are organized into a list. We fetch an entry from the list to * get a free cluster. * - * The data field stores next cluster if the cluster is free or cluster usage - * counter otherwise. The flags field determines if a cluster is free. This is - * protected by swap_info_struct.lock. + * The flags field determines if a cluster is free. This is + * protected by cluster lock. */ struct swap_cluster_info { spinlock_t lock; /* * Protect swap_cluster_info fields - * and swap_info_struct->swap_map - * elements correspond to the swap - * cluster + * other than list, and swap_info_struct->swap_map + * elements corresponding to the swap cluster. */ - unsigned int data:24; - unsigned int flags:8; + u16 count; + u8 flags; + struct list_head list; }; #define CLUSTER_FLAG_FREE 1 /* This cluster is free */ -#define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */ /* * The first page in the swap file is the swap header, which is always marked @@ -283,11 +281,6 @@ struct percpu_cluster { unsigned int next[SWAP_NR_ORDERS]; /* Likely next allocation offset */ }; -struct swap_cluster_list { - struct swap_cluster_info head; - struct swap_cluster_info tail; -}; - /* * The in-memory structure used to track swap areas. */ @@ -300,7 +293,7 @@ struct swap_info_struct { unsigned int max; /* extent of the swap_map */ unsigned char *swap_map; /* vmalloc'ed array of usage counts */ struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */ - struct swap_cluster_list free_clusters; /* free clusters list */ + struct list_head free_clusters; /* free clusters list */ unsigned int lowest_bit; /* index of first free in swap_map */ unsigned int highest_bit; /* index of last free in swap_map */ unsigned int pages; /* total of usable pages of swap */ @@ -332,7 +325,7 @@ struct swap_info_struct { * list. */ struct work_struct discard_work; /* discard worker */ - struct swap_cluster_list discard_clusters; /* discard clusters list */ + struct list_head discard_clusters; /* discard clusters list */ CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) diff --git a/mm/swapfile.c b/mm/swapfile.c index a171a00cd097..e4533ab587af 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -289,62 +289,15 @@ static void discard_swap_cluster(struct swap_info_struct *si, #endif #define LATENCY_LIMIT 256 -static inline void cluster_set_flag(struct swap_cluster_info *info, - unsigned int flag) -{ - info->flags = flag; -} - -static inline unsigned int cluster_count(struct swap_cluster_info *info) -{ - return info->data; -} - -static inline void cluster_set_count(struct swap_cluster_info *info, - unsigned int c) -{ - info->data = c; -} - -static inline void cluster_set_count_flag(struct swap_cluster_info *info, - unsigned int c, unsigned int f) -{ - info->flags = f; - info->data = c; -} - -static inline unsigned int cluster_next(struct swap_cluster_info *info) -{ - return info->data; -} - -static inline void cluster_set_next(struct swap_cluster_info *info, - unsigned int n) -{ - info->data = n; -} - -static inline void cluster_set_next_flag(struct swap_cluster_info *info, - unsigned int n, unsigned int f) -{ - info->flags = f; - info->data = n; -} - static inline bool cluster_is_free(struct swap_cluster_info *info) { return info->flags & CLUSTER_FLAG_FREE; } -static inline bool cluster_is_null(struct swap_cluster_info *info) -{ - return info->flags & CLUSTER_FLAG_NEXT_NULL; -} - -static inline void cluster_set_null(struct swap_cluster_info *info) +static inline unsigned int cluster_index(struct swap_info_struct *si, + struct swap_cluster_info *ci) { - info->flags = CLUSTER_FLAG_NEXT_NULL; - info->data = 0; + return ci - si->cluster_info; } static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si, @@ -393,65 +346,11 @@ static inline void unlock_cluster_or_swap_info(struct swap_info_struct *si, spin_unlock(&si->lock); } -static inline bool cluster_list_empty(struct swap_cluster_list *list) -{ - return cluster_is_null(&list->head); -} - -static inline unsigned int cluster_list_first(struct swap_cluster_list *list) -{ - return cluster_next(&list->head); -} - -static void cluster_list_init(struct swap_cluster_list *list) -{ - cluster_set_null(&list->head); - cluster_set_null(&list->tail); -} - -static void cluster_list_add_tail(struct swap_cluster_list *list, - struct swap_cluster_info *ci, - unsigned int idx) -{ - if (cluster_list_empty(list)) { - cluster_set_next_flag(&list->head, idx, 0); - cluster_set_next_flag(&list->tail, idx, 0); - } else { - struct swap_cluster_info *ci_tail; - unsigned int tail = cluster_next(&list->tail); - - /* - * Nested cluster lock, but both cluster locks are - * only acquired when we held swap_info_struct->lock - */ - ci_tail = ci + tail; - spin_lock_nested(&ci_tail->lock, SINGLE_DEPTH_NESTING); - cluster_set_next(ci_tail, idx); - spin_unlock(&ci_tail->lock); - cluster_set_next_flag(&list->tail, idx, 0); - } -} - -static unsigned int cluster_list_del_first(struct swap_cluster_list *list, - struct swap_cluster_info *ci) -{ - unsigned int idx; - - idx = cluster_next(&list->head); - if (cluster_next(&list->tail) == idx) { - cluster_set_null(&list->head); - cluster_set_null(&list->tail); - } else - cluster_set_next_flag(&list->head, - cluster_next(&ci[idx]), 0); - - return idx; -} - /* Add a cluster to discard list and schedule it to do discard */ static void swap_cluster_schedule_discard(struct swap_info_struct *si, - unsigned int idx) + struct swap_cluster_info *ci) { + unsigned int idx = cluster_index(si, ci); /* * If scan_swap_map_slots() can't find a free cluster, it will check * si->swap_map directly. To make sure the discarding cluster isn't @@ -461,17 +360,14 @@ static void swap_cluster_schedule_discard(struct swap_info_struct *si, memset(si->swap_map + idx * SWAPFILE_CLUSTER, SWAP_MAP_BAD, SWAPFILE_CLUSTER); - cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx); - + list_add_tail(&ci->list, &si->discard_clusters); schedule_work(&si->discard_work); } -static void __free_cluster(struct swap_info_struct *si, unsigned long idx) +static void __free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci) { - struct swap_cluster_info *ci = si->cluster_info; - - cluster_set_flag(ci + idx, CLUSTER_FLAG_FREE); - cluster_list_add_tail(&si->free_clusters, ci, idx); + ci->flags = CLUSTER_FLAG_FREE; + list_add_tail(&ci->list, &si->free_clusters); } /* @@ -480,24 +376,25 @@ static void __free_cluster(struct swap_info_struct *si, unsigned long idx) */ static void swap_do_scheduled_discard(struct swap_info_struct *si) { - struct swap_cluster_info *info, *ci; + struct swap_cluster_info *ci; unsigned int idx; - info = si->cluster_info; - - while (!cluster_list_empty(&si->discard_clusters)) { - idx = cluster_list_del_first(&si->discard_clusters, info); + while (!list_empty(&si->discard_clusters)) { + ci = list_first_entry(&si->discard_clusters, struct swap_cluster_info, list); + list_del(&ci->list); + idx = cluster_index(si, ci); spin_unlock(&si->lock); discard_swap_cluster(si, idx * SWAPFILE_CLUSTER, SWAPFILE_CLUSTER); spin_lock(&si->lock); - ci = lock_cluster(si, idx * SWAPFILE_CLUSTER); - __free_cluster(si, idx); + + spin_lock(&ci->lock); + __free_cluster(si, ci); memset(si->swap_map + idx * SWAPFILE_CLUSTER, 0, SWAPFILE_CLUSTER); - unlock_cluster(ci); + spin_unlock(&ci->lock); } } @@ -520,20 +417,21 @@ static void swap_users_ref_free(struct percpu_ref *ref) complete(&si->comp); } -static void alloc_cluster(struct swap_info_struct *si, unsigned long idx) +static struct swap_cluster_info *alloc_cluster(struct swap_info_struct *si, unsigned long idx) { - struct swap_cluster_info *ci = si->cluster_info; + struct swap_cluster_info *ci = list_first_entry(&si->free_clusters, + struct swap_cluster_info, list); - VM_BUG_ON(cluster_list_first(&si->free_clusters) != idx); - cluster_list_del_first(&si->free_clusters, ci); - cluster_set_count_flag(ci + idx, 0, 0); + VM_BUG_ON(cluster_index(si, ci) != idx); + list_del(&ci->list); + ci->count = 0; + ci->flags = 0; + return ci; } -static void free_cluster(struct swap_info_struct *si, unsigned long idx) +static void free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci) { - struct swap_cluster_info *ci = si->cluster_info + idx; - - VM_BUG_ON(cluster_count(ci) != 0); + VM_BUG_ON(ci->count != 0); /* * If the swap is discardable, prepare discard the cluster * instead of free it immediately. The cluster will be freed @@ -541,11 +439,11 @@ static void free_cluster(struct swap_info_struct *si, unsigned long idx) */ if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) == (SWP_WRITEOK | SWP_PAGE_DISCARD)) { - swap_cluster_schedule_discard(si, idx); + swap_cluster_schedule_discard(si, ci); return; } - __free_cluster(si, idx); + __free_cluster(si, ci); } /* @@ -558,15 +456,15 @@ static void add_cluster_info_page(struct swap_info_struct *p, unsigned long count) { unsigned long idx = page_nr / SWAPFILE_CLUSTER; + struct swap_cluster_info *ci = cluster_info + idx; if (!cluster_info) return; - if (cluster_is_free(&cluster_info[idx])) + if (cluster_is_free(ci)) alloc_cluster(p, idx); - VM_BUG_ON(cluster_count(&cluster_info[idx]) + count > SWAPFILE_CLUSTER); - cluster_set_count(&cluster_info[idx], - cluster_count(&cluster_info[idx]) + count); + VM_BUG_ON(ci->count + count > SWAPFILE_CLUSTER); + ci->count += count; } /* @@ -580,24 +478,20 @@ static void inc_cluster_info_page(struct swap_info_struct *p, } /* - * The cluster corresponding to page_nr decreases one usage. If the usage - * counter becomes 0, which means no page in the cluster is in using, we can - * optionally discard the cluster and add it to free cluster list. + * The cluster ci decreases one usage. If the usage counter becomes 0, + * which means no page in the cluster is in use, we can optionally discard + * the cluster and add it to free cluster list. */ -static void dec_cluster_info_page(struct swap_info_struct *p, - struct swap_cluster_info *cluster_info, unsigned long page_nr) +static void dec_cluster_info_page(struct swap_info_struct *p, struct swap_cluster_info *ci) { - unsigned long idx = page_nr / SWAPFILE_CLUSTER; - - if (!cluster_info) + if (!p->cluster_info) return; - VM_BUG_ON(cluster_count(&cluster_info[idx]) == 0); - cluster_set_count(&cluster_info[idx], - cluster_count(&cluster_info[idx]) - 1); + VM_BUG_ON(ci->count == 0); + ci->count--; - if (cluster_count(&cluster_info[idx]) == 0) - free_cluster(p, idx); + if (!ci->count) + free_cluster(p, ci); } /* @@ -610,10 +504,12 @@ scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si, { struct percpu_cluster *percpu_cluster; bool conflict; + struct swap_cluster_info *first = list_first_entry(&si->free_clusters, + struct swap_cluster_info, list); offset /= SWAPFILE_CLUSTER; - conflict = !cluster_list_empty(&si->free_clusters) && - offset != cluster_list_first(&si->free_clusters) && + conflict = !list_empty(&si->free_clusters) && + offset != cluster_index(si, first) && cluster_is_free(&si->cluster_info[offset]); if (!conflict) @@ -654,10 +550,10 @@ static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si, cluster = this_cpu_ptr(si->percpu_cluster); tmp = cluster->next[order]; if (tmp == SWAP_NEXT_INVALID) { - if (!cluster_list_empty(&si->free_clusters)) { - tmp = cluster_next(&si->free_clusters.head) * - SWAPFILE_CLUSTER; - } else if (!cluster_list_empty(&si->discard_clusters)) { + if (!list_empty(&si->free_clusters)) { + ci = list_first_entry(&si->free_clusters, struct swap_cluster_info, list); + tmp = cluster_index(si, ci) * SWAPFILE_CLUSTER; + } else if (!list_empty(&si->discard_clusters)) { /* * we don't have free cluster but have some clusters in * discarding, do discard now and reclaim them, then @@ -1056,8 +952,9 @@ static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx) ci = lock_cluster(si, offset); memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER); - cluster_set_count_flag(ci, 0, 0); - free_cluster(si, idx); + ci->count = 0; + ci->flags = 0; + free_cluster(si, ci); unlock_cluster(ci); swap_range_free(si, offset, SWAPFILE_CLUSTER); } @@ -1331,7 +1228,7 @@ static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry) count = p->swap_map[offset]; VM_BUG_ON(count != SWAP_HAS_CACHE); p->swap_map[offset] = 0; - dec_cluster_info_page(p, p->cluster_info, offset); + dec_cluster_info_page(p, ci); unlock_cluster(ci); mem_cgroup_uncharge_swap(entry, 1); @@ -3020,8 +2917,8 @@ static int setup_swap_map_and_extents(struct swap_info_struct *p, nr_good_pages = maxpages - 1; /* omit header page */ - cluster_list_init(&p->free_clusters); - cluster_list_init(&p->discard_clusters); + INIT_LIST_HEAD(&p->free_clusters); + INIT_LIST_HEAD(&p->discard_clusters); for (i = 0; i < swap_header->info.nr_badpages; i++) { unsigned int page_nr = swap_header->info.badpages[i]; @@ -3072,14 +2969,15 @@ static int setup_swap_map_and_extents(struct swap_info_struct *p, for (k = 0; k < SWAP_CLUSTER_COLS; k++) { j = (k + col) % SWAP_CLUSTER_COLS; for (i = 0; i < DIV_ROUND_UP(nr_clusters, SWAP_CLUSTER_COLS); i++) { + struct swap_cluster_info *ci; idx = i * SWAP_CLUSTER_COLS + j; + ci = cluster_info + idx; if (idx >= nr_clusters) continue; - if (cluster_count(&cluster_info[idx])) + if (ci->count) continue; - cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE); - cluster_list_add_tail(&p->free_clusters, cluster_info, - idx); + ci->flags = CLUSTER_FLAG_FREE; + list_add_tail(&ci->list, &p->free_clusters); } } return nr_extents; -- Gitee From 17d6357f7d691f28b30b35d8061df2654230a445 Mon Sep 17 00:00:00 2001 From: Chris Li Date: Tue, 30 Jul 2024 23:49:14 -0700 Subject: [PATCH 1548/2138] mm: swap: mTHP allocate swap entries from nonfull list MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #9728 commit d07a46a4ac18786e7f4c98fb08525ed80dd1f642 upstream. Track the nonfull cluster as well as the empty cluster on lists. Each order has one nonfull cluster list. The cluster will remember which order it was used during new cluster allocation. When the cluster has free entry, add to the nonfull[order] list.  When the free cluster list is empty, also allocate from the nonempty list of that order. This improves the mTHP swap allocation success rate. There are limitations if the distribution of numbers of different orders of mTHP changes a lot. e.g. there are a lot of nonfull cluster assign to order A while later time there are a lot of order B allocation while very little allocation in order A. Currently the cluster used by order A will not reused by order B unless the cluster is 100% empty. Link: https://lkml.kernel.org/r/20240730-swap-allocator-v5-2-cb9c148b9297@kernel.org Signed-off-by: Chris Li Reported-by: Barry Song <21cnbao@gmail.com> Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Kairui Song Cc: Kalesh Singh Cc: Ryan Roberts Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3997 --- include/linux/swap.h | 4 ++++ mm/swapfile.c | 38 +++++++++++++++++++++++++++++++++++--- 2 files changed, 39 insertions(+), 3 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index 1b70ddb8c573..eb17203bbbad 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -254,9 +254,11 @@ struct swap_cluster_info { */ u16 count; u8 flags; + u8 order; struct list_head list; }; #define CLUSTER_FLAG_FREE 1 /* This cluster is free */ +#define CLUSTER_FLAG_NONFULL 2 /* This cluster is on nonfull list */ /* * The first page in the swap file is the swap header, which is always marked @@ -294,6 +296,8 @@ struct swap_info_struct { unsigned char *swap_map; /* vmalloc'ed array of usage counts */ struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */ struct list_head free_clusters; /* free clusters list */ + struct list_head nonfull_clusters[SWAP_NR_ORDERS]; + /* list of cluster that contains at least one free slot */ unsigned int lowest_bit; /* index of first free in swap_map */ unsigned int highest_bit; /* index of last free in swap_map */ unsigned int pages; /* total of usable pages of swap */ diff --git a/mm/swapfile.c b/mm/swapfile.c index e4533ab587af..125cc49f69f8 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -360,14 +360,22 @@ static void swap_cluster_schedule_discard(struct swap_info_struct *si, memset(si->swap_map + idx * SWAPFILE_CLUSTER, SWAP_MAP_BAD, SWAPFILE_CLUSTER); - list_add_tail(&ci->list, &si->discard_clusters); + VM_BUG_ON(ci->flags & CLUSTER_FLAG_FREE); + if (ci->flags & CLUSTER_FLAG_NONFULL) + list_move_tail(&ci->list, &si->discard_clusters); + else + list_add_tail(&ci->list, &si->discard_clusters); + ci->flags = 0; schedule_work(&si->discard_work); } static void __free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci) { + if (ci->flags & CLUSTER_FLAG_NONFULL) + list_move_tail(&ci->list, &si->free_clusters); + else + list_add_tail(&ci->list, &si->free_clusters); ci->flags = CLUSTER_FLAG_FREE; - list_add_tail(&ci->list, &si->free_clusters); } /* @@ -490,8 +498,15 @@ static void dec_cluster_info_page(struct swap_info_struct *p, struct swap_cluste VM_BUG_ON(ci->count == 0); ci->count--; - if (!ci->count) + if (!ci->count) { free_cluster(p, ci); + return; + } + + if (!(ci->flags & CLUSTER_FLAG_NONFULL)) { + list_add_tail(&ci->list, &p->nonfull_clusters[ci->order]); + ci->flags |= CLUSTER_FLAG_NONFULL; + } } /* @@ -552,6 +567,19 @@ static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si, if (tmp == SWAP_NEXT_INVALID) { if (!list_empty(&si->free_clusters)) { ci = list_first_entry(&si->free_clusters, struct swap_cluster_info, list); + list_del(&ci->list); + spin_lock(&ci->lock); + ci->order = order; + ci->flags = 0; + spin_unlock(&ci->lock); + tmp = cluster_index(si, ci) * SWAPFILE_CLUSTER; + } else if (!list_empty(&si->nonfull_clusters[order])) { + ci = list_first_entry(&si->nonfull_clusters[order], + struct swap_cluster_info, list); + list_del(&ci->list); + spin_lock(&ci->lock); + ci->flags = 0; + spin_unlock(&ci->lock); tmp = cluster_index(si, ci) * SWAPFILE_CLUSTER; } else if (!list_empty(&si->discard_clusters)) { /* @@ -953,6 +981,7 @@ static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx) ci = lock_cluster(si, offset); memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER); ci->count = 0; + ci->order = 0; ci->flags = 0; free_cluster(si, ci); unlock_cluster(ci); @@ -2920,6 +2949,9 @@ static int setup_swap_map_and_extents(struct swap_info_struct *p, INIT_LIST_HEAD(&p->free_clusters); INIT_LIST_HEAD(&p->discard_clusters); + for (i = 0; i < SWAP_NR_ORDERS; i++) + INIT_LIST_HEAD(&p->nonfull_clusters[i]); + for (i = 0; i < swap_header->info.nr_badpages; i++) { unsigned int page_nr = swap_header->info.badpages[i]; if (page_nr == 0 || page_nr > swap_header->info.last_page) -- Gitee From 62f4a4dce051463130769f483010bde4d92ff33e Mon Sep 17 00:00:00 2001 From: Chris Li Date: Tue, 30 Jul 2024 23:49:15 -0700 Subject: [PATCH 1549/2138] mm: swap: separate SSD allocation from scan_swap_map_slots() ANBZ: #9728 commit 5f843a9a3a1e865fbf349419bde39977c2e7d3d1 upstream. Previously the SSD and HDD share the same swap_map scan loop in scan_swap_map_slots(). This function is complex and hard to flow the execution flow. scan_swap_map_try_ssd_cluster() can already do most of the heavy lifting to locate the candidate swap range in the cluster. However it needs to go back to scan_swap_map_slots() to check conflict and then perform the allocation. When scan_swap_map_try_ssd_cluster() failed, it still depended on the scan_swap_map_slots() to do brute force scanning of the swap_map. When the swapfile is large and almost full, it will take some CPU time to go through the swap_map array. Get rid of the cluster allocation dependency on the swap_map scan loop in scan_swap_map_slots(). Streamline the cluster allocation code path. No more conflict checks. For order 0 swap entry, when run out of free and nonfull list. It will allocate from the higher order nonfull cluster list. Users should see less CPU time spent on searching the free swap slot when swapfile is almost full. [ryncsn@gmail.com: fix array-bounds error with CONFIG_THP_SWAP=n] Link: https://lkml.kernel.org/r/CAMgjq7Bz0DY+rY0XgCoH7-Q=uHLdo3omi8kUr4ePDweNyofsbQ@mail.gmail.com Link: https://lkml.kernel.org/r/20240730-swap-allocator-v5-3-cb9c148b9297@kernel.org Signed-off-by: Chris Li Signed-off-by: Kairui Song Reported-by: Barry Song <21cnbao@gmail.com> Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Kalesh Singh Cc: Ryan Roberts Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3997 --- mm/swapfile.c | 300 ++++++++++++++++++++++++++++---------------------- 1 file changed, 168 insertions(+), 132 deletions(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index 125cc49f69f8..d4d00726f7ad 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -52,6 +52,8 @@ static bool swap_count_continued(struct swap_info_struct *, pgoff_t, unsigned char); static void free_swap_count_continuations(struct swap_info_struct *); +static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset, + unsigned int nr_entries); static DEFINE_SPINLOCK(swap_lock); static unsigned int nr_swapfiles; @@ -300,6 +302,12 @@ static inline unsigned int cluster_index(struct swap_info_struct *si, return ci - si->cluster_info; } +static inline unsigned int cluster_offset(struct swap_info_struct *si, + struct swap_cluster_info *ci) +{ + return cluster_index(si, ci) * SWAPFILE_CLUSTER; +} + static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si, unsigned long offset) { @@ -371,11 +379,15 @@ static void swap_cluster_schedule_discard(struct swap_info_struct *si, static void __free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci) { + lockdep_assert_held(&si->lock); + lockdep_assert_held(&ci->lock); + if (ci->flags & CLUSTER_FLAG_NONFULL) list_move_tail(&ci->list, &si->free_clusters); else list_add_tail(&ci->list, &si->free_clusters); ci->flags = CLUSTER_FLAG_FREE; + ci->order = 0; } /* @@ -430,9 +442,11 @@ static struct swap_cluster_info *alloc_cluster(struct swap_info_struct *si, unsi struct swap_cluster_info *ci = list_first_entry(&si->free_clusters, struct swap_cluster_info, list); + lockdep_assert_held(&si->lock); + lockdep_assert_held(&ci->lock); VM_BUG_ON(cluster_index(si, ci) != idx); + VM_BUG_ON(ci->count); list_del(&ci->list); - ci->count = 0; ci->flags = 0; return ci; } @@ -440,6 +454,8 @@ static struct swap_cluster_info *alloc_cluster(struct swap_info_struct *si, unsi static void free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci) { VM_BUG_ON(ci->count != 0); + lockdep_assert_held(&si->lock); + lockdep_assert_held(&ci->lock); /* * If the swap is discardable, prepare discard the cluster * instead of free it immediately. The cluster will be freed @@ -496,6 +512,9 @@ static void dec_cluster_info_page(struct swap_info_struct *p, struct swap_cluste return; VM_BUG_ON(ci->count == 0); + VM_BUG_ON(cluster_is_free(ci)); + lockdep_assert_held(&p->lock); + lockdep_assert_held(&ci->lock); ci->count--; if (!ci->count) { @@ -504,48 +523,88 @@ static void dec_cluster_info_page(struct swap_info_struct *p, struct swap_cluste } if (!(ci->flags & CLUSTER_FLAG_NONFULL)) { + VM_BUG_ON(ci->flags & CLUSTER_FLAG_FREE); list_add_tail(&ci->list, &p->nonfull_clusters[ci->order]); - ci->flags |= CLUSTER_FLAG_NONFULL; + ci->flags = CLUSTER_FLAG_NONFULL; } } -/* - * It's possible scan_swap_map_slots() uses a free cluster in the middle of free - * cluster list. Avoiding such abuse to avoid list corruption. - */ -static bool -scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si, - unsigned long offset, int order) +static inline bool cluster_scan_range(struct swap_info_struct *si, unsigned int start, + unsigned int nr_pages) { - struct percpu_cluster *percpu_cluster; - bool conflict; - struct swap_cluster_info *first = list_first_entry(&si->free_clusters, - struct swap_cluster_info, list); - - offset /= SWAPFILE_CLUSTER; - conflict = !list_empty(&si->free_clusters) && - offset != cluster_index(si, first) && - cluster_is_free(&si->cluster_info[offset]); + unsigned char *p = si->swap_map + start; + unsigned char *end = p + nr_pages; - if (!conflict) - return false; + while (p < end) + if (*p++) + return false; - percpu_cluster = this_cpu_ptr(si->percpu_cluster); - percpu_cluster->next[order] = SWAP_NEXT_INVALID; return true; } -static inline bool swap_range_empty(char *swap_map, unsigned int start, - unsigned int nr_pages) + +static inline void cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster_info *ci, + unsigned int start, unsigned char usage, + unsigned int order) { - unsigned int i; + unsigned int nr_pages = 1 << order; - for (i = 0; i < nr_pages; i++) { - if (swap_map[start + i]) - return false; + if (cluster_is_free(ci)) { + if (nr_pages < SWAPFILE_CLUSTER) { + list_move_tail(&ci->list, &si->nonfull_clusters[order]); + ci->flags = CLUSTER_FLAG_NONFULL; + } + ci->order = order; } - return true; + memset(si->swap_map + start, usage, nr_pages); + swap_range_alloc(si, start, nr_pages); + ci->count += nr_pages; + + if (ci->count == SWAPFILE_CLUSTER) { + VM_BUG_ON(!(ci->flags & (CLUSTER_FLAG_FREE | CLUSTER_FLAG_NONFULL))); + list_del(&ci->list); + ci->flags = 0; + } +} + +static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, unsigned long offset, + unsigned int *foundp, unsigned int order, + unsigned char usage) +{ + unsigned long start = offset & ~(SWAPFILE_CLUSTER - 1); + unsigned long end = min(start + SWAPFILE_CLUSTER, si->max); + unsigned int nr_pages = 1 << order; + struct swap_cluster_info *ci; + + if (end < nr_pages) + return SWAP_NEXT_INVALID; + end -= nr_pages; + + ci = lock_cluster(si, offset); + if (ci->count + nr_pages > SWAPFILE_CLUSTER) { + offset = SWAP_NEXT_INVALID; + goto done; + } + + while (offset <= end) { + if (cluster_scan_range(si, offset, nr_pages)) { + cluster_alloc_range(si, ci, offset, usage, order); + *foundp = offset; + if (ci->count == SWAPFILE_CLUSTER) { + offset = SWAP_NEXT_INVALID; + goto done; + } + offset += nr_pages; + break; + } + offset += nr_pages; + } + if (offset > end) + offset = SWAP_NEXT_INVALID; +done: + unlock_cluster(ci); + return offset; } /* @@ -553,72 +612,66 @@ static inline bool swap_range_empty(char *swap_map, unsigned int start, * pool (a cluster). This might involve allocating a new cluster for current CPU * too. */ -static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si, - unsigned long *offset, unsigned long *scan_base, int order) +static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int order, + unsigned char usage) { - unsigned int nr_pages = 1 << order; struct percpu_cluster *cluster; - struct swap_cluster_info *ci; - unsigned int tmp, max; + struct swap_cluster_info *ci, *n; + unsigned int offset, found = 0; new_cluster: + lockdep_assert_held(&si->lock); cluster = this_cpu_ptr(si->percpu_cluster); - tmp = cluster->next[order]; - if (tmp == SWAP_NEXT_INVALID) { - if (!list_empty(&si->free_clusters)) { - ci = list_first_entry(&si->free_clusters, struct swap_cluster_info, list); - list_del(&ci->list); - spin_lock(&ci->lock); - ci->order = order; - ci->flags = 0; - spin_unlock(&ci->lock); - tmp = cluster_index(si, ci) * SWAPFILE_CLUSTER; - } else if (!list_empty(&si->nonfull_clusters[order])) { - ci = list_first_entry(&si->nonfull_clusters[order], - struct swap_cluster_info, list); - list_del(&ci->list); - spin_lock(&ci->lock); - ci->flags = 0; - spin_unlock(&ci->lock); - tmp = cluster_index(si, ci) * SWAPFILE_CLUSTER; - } else if (!list_empty(&si->discard_clusters)) { - /* - * we don't have free cluster but have some clusters in - * discarding, do discard now and reclaim them, then - * reread cluster_next_cpu since we dropped si->lock - */ - swap_do_scheduled_discard(si); - *scan_base = this_cpu_read(*si->cluster_next_cpu); - *offset = *scan_base; - goto new_cluster; - } else - return false; + offset = cluster->next[order]; + if (offset) { + offset = alloc_swap_scan_cluster(si, offset, &found, order, usage); + if (found) + goto done; } - /* - * Other CPUs can use our cluster if they can't find a free cluster, - * check if there is still free entry in the cluster, maintaining - * natural alignment. - */ - max = min_t(unsigned long, si->max, ALIGN(tmp + 1, SWAPFILE_CLUSTER)); - if (tmp < max) { - ci = lock_cluster(si, tmp); - while (tmp < max) { - if (swap_range_empty(si->swap_map, tmp, nr_pages)) - break; - tmp += nr_pages; + if (!list_empty(&si->free_clusters)) { + ci = list_first_entry(&si->free_clusters, struct swap_cluster_info, list); + offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), &found, order, usage); + VM_BUG_ON(!found); + goto done; + } + + if (order < PMD_ORDER) { + list_for_each_entry_safe(ci, n, &si->nonfull_clusters[order], list) { + offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), + &found, order, usage); + if (found) + goto done; } - unlock_cluster(ci); } - if (tmp >= max) { - cluster->next[order] = SWAP_NEXT_INVALID; + + if (!list_empty(&si->discard_clusters)) { + /* + * we don't have free cluster but have some clusters in + * discarding, do discard now and reclaim them, then + * reread cluster_next_cpu since we dropped si->lock + */ + swap_do_scheduled_discard(si); goto new_cluster; } - *offset = tmp; - *scan_base = tmp; - tmp += nr_pages; - cluster->next[order] = tmp < max ? tmp : SWAP_NEXT_INVALID; - return true; + + if (order) + goto done; + + for (int o = 1; o < SWAP_NR_ORDERS; o++) { + if (!list_empty(&si->nonfull_clusters[o])) { + ci = list_first_entry(&si->nonfull_clusters[o], struct swap_cluster_info, + list); + offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), + &found, 0, usage); + VM_BUG_ON(!found); + goto done; + } + } + +done: + cluster->next[order] = offset; + return found; } static void __del_from_avail_list(struct swap_info_struct *p) @@ -740,11 +793,29 @@ static bool swap_offset_available_and_locked(struct swap_info_struct *si, return false; } +static int cluster_alloc_swap(struct swap_info_struct *si, + unsigned char usage, int nr, + swp_entry_t slots[], int order) +{ + int n_ret = 0; + + VM_BUG_ON(!si->cluster_info); + + while (n_ret < nr) { + unsigned long offset = cluster_alloc_swap_entry(si, order, usage); + + if (!offset) + break; + slots[n_ret++] = swp_entry(si->type, offset); + } + + return n_ret; +} + static int scan_swap_map_slots(struct swap_info_struct *si, unsigned char usage, int nr, swp_entry_t slots[], int order) { - struct swap_cluster_info *ci; unsigned long offset; unsigned long scan_base; unsigned long last_in_cluster = 0; @@ -783,26 +854,16 @@ static int scan_swap_map_slots(struct swap_info_struct *si, return 0; } + if (si->cluster_info) + return cluster_alloc_swap(si, usage, nr, slots, order); + si->flags += SWP_SCANNING; - /* - * Use percpu scan base for SSD to reduce lock contention on - * cluster and swap cache. For HDD, sequential access is more - * important. - */ - if (si->flags & SWP_SOLIDSTATE) - scan_base = this_cpu_read(*si->cluster_next_cpu); - else - scan_base = si->cluster_next; + + /* For HDD, sequential access is more important. */ + scan_base = si->cluster_next; offset = scan_base; - /* SSD algorithm */ - if (si->cluster_info) { - if (!scan_swap_map_try_ssd_cluster(si, &offset, &scan_base, order)) { - if (order > 0) - goto no_page; - goto scan; - } - } else if (unlikely(!si->cluster_nr--)) { + if (unlikely(!si->cluster_nr--)) { if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) { si->cluster_nr = SWAPFILE_CLUSTER - 1; goto checks; @@ -813,8 +874,6 @@ static int scan_swap_map_slots(struct swap_info_struct *si, /* * If seek is expensive, start searching for new cluster from * start of partition, to minimize the span of allocated swap. - * If seek is cheap, that is the SWP_SOLIDSTATE si->cluster_info - * case, just handled by scan_swap_map_try_ssd_cluster() above. */ scan_base = offset = si->lowest_bit; last_in_cluster = offset + SWAPFILE_CLUSTER - 1; @@ -842,19 +901,6 @@ static int scan_swap_map_slots(struct swap_info_struct *si, } checks: - if (si->cluster_info) { - while (scan_swap_map_ssd_cluster_conflict(si, offset, order)) { - /* take a break if we already got some slots */ - if (n_ret) - goto done; - if (!scan_swap_map_try_ssd_cluster(si, &offset, - &scan_base, order)) { - if (order > 0) - goto no_page; - goto scan; - } - } - } if (!(si->flags & SWP_WRITEOK)) goto no_page; if (!si->highest_bit) @@ -862,11 +908,9 @@ static int scan_swap_map_slots(struct swap_info_struct *si, if (offset > si->highest_bit) scan_base = offset = si->lowest_bit; - ci = lock_cluster(si, offset); /* reuse swap entry of cache-only swap if not busy. */ if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { int swap_was_freed; - unlock_cluster(ci); spin_unlock(&si->lock); swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY); spin_lock(&si->lock); @@ -877,15 +921,12 @@ static int scan_swap_map_slots(struct swap_info_struct *si, } if (si->swap_map[offset]) { - unlock_cluster(ci); if (!n_ret) goto scan; else goto done; } memset(si->swap_map + offset, usage, nr_pages); - add_cluster_info_page(si, si->cluster_info, offset, nr_pages); - unlock_cluster(ci); swap_range_alloc(si, offset, nr_pages); slots[n_ret++] = swp_entry(si->type, offset); @@ -906,13 +947,7 @@ static int scan_swap_map_slots(struct swap_info_struct *si, latency_ration = LATENCY_LIMIT; } - /* try to get more slots in cluster */ - if (si->cluster_info) { - if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base, order)) - goto checks; - if (order > 0) - goto done; - } else if (si->cluster_nr && !si->swap_map[++offset]) { + if (si->cluster_nr && !si->swap_map[++offset]) { /* non-ssd case, still more slots in cluster? */ --si->cluster_nr; goto checks; @@ -981,8 +1016,6 @@ static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx) ci = lock_cluster(si, offset); memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER); ci->count = 0; - ci->order = 0; - ci->flags = 0; free_cluster(si, ci); unlock_cluster(ci); swap_range_free(si, offset, SWAPFILE_CLUSTER); @@ -3006,8 +3039,11 @@ static int setup_swap_map_and_extents(struct swap_info_struct *p, ci = cluster_info + idx; if (idx >= nr_clusters) continue; - if (ci->count) + if (ci->count) { + ci->flags = CLUSTER_FLAG_NONFULL; + list_add_tail(&ci->list, &p->nonfull_clusters[0]); continue; + } ci->flags = CLUSTER_FLAG_FREE; list_add_tail(&ci->list, &p->free_clusters); } -- Gitee From 00a9858b92ee43b1282be88a0f0401a74774388f Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Tue, 30 Jul 2024 23:49:16 -0700 Subject: [PATCH 1550/2138] mm: swap: clean up initialization helper ANBZ: #9728 commit 3b2561b5daeb3531c011491e9a6d2b934cc8f49f upstream. At this point, alloc_cluster is never called already, and inc_cluster_info_page is called by initialization only, a lot of dead code can be dropped. Link: https://lkml.kernel.org/r/20240730-swap-allocator-v5-4-cb9c148b9297@kernel.org Signed-off-by: Kairui Song Reported-by: Barry Song <21cnbao@gmail.com> Cc: Chris Li Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Kalesh Singh Cc: Ryan Roberts Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3997 --- mm/swapfile.c | 44 ++++++++++---------------------------------- 1 file changed, 10 insertions(+), 34 deletions(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index d4d00726f7ad..232551ccd534 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -437,20 +437,6 @@ static void swap_users_ref_free(struct percpu_ref *ref) complete(&si->comp); } -static struct swap_cluster_info *alloc_cluster(struct swap_info_struct *si, unsigned long idx) -{ - struct swap_cluster_info *ci = list_first_entry(&si->free_clusters, - struct swap_cluster_info, list); - - lockdep_assert_held(&si->lock); - lockdep_assert_held(&ci->lock); - VM_BUG_ON(cluster_index(si, ci) != idx); - VM_BUG_ON(ci->count); - list_del(&ci->list); - ci->flags = 0; - return ci; -} - static void free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci) { VM_BUG_ON(ci->count != 0); @@ -471,34 +457,24 @@ static void free_cluster(struct swap_info_struct *si, struct swap_cluster_info * } /* - * The cluster corresponding to page_nr will be used. The cluster will be - * removed from free cluster list and its usage counter will be increased by - * count. + * The cluster corresponding to page_nr will be used. The cluster will not be + * added to free cluster list and its usage counter will be increased by 1. + * Only used for initialization. */ -static void add_cluster_info_page(struct swap_info_struct *p, - struct swap_cluster_info *cluster_info, unsigned long page_nr, - unsigned long count) +static void inc_cluster_info_page(struct swap_info_struct *p, + struct swap_cluster_info *cluster_info, unsigned long page_nr) { unsigned long idx = page_nr / SWAPFILE_CLUSTER; - struct swap_cluster_info *ci = cluster_info + idx; + struct swap_cluster_info *ci; if (!cluster_info) return; - if (cluster_is_free(ci)) - alloc_cluster(p, idx); - VM_BUG_ON(ci->count + count > SWAPFILE_CLUSTER); - ci->count += count; -} + ci = cluster_info + idx; + ci->count++; -/* - * The cluster corresponding to page_nr will be used. The cluster will be - * removed from free cluster list and its usage counter will be increased by 1. - */ -static void inc_cluster_info_page(struct swap_info_struct *p, - struct swap_cluster_info *cluster_info, unsigned long page_nr) -{ - add_cluster_info_page(p, cluster_info, page_nr, 1); + VM_BUG_ON(ci->count > SWAPFILE_CLUSTER); + VM_BUG_ON(ci->flags); } /* -- Gitee From ff39313ae00f1aa0629f23b472c55367c6f443b2 Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Tue, 30 Jul 2024 23:49:17 -0700 Subject: [PATCH 1551/2138] mm: swap: skip slot cache on freeing for mTHP ANBZ: #9728 commit 650975d2b181e30c9017c42cb3f6535287555b1e upstream. Currently when we are freeing mTHP folios from swap cache, we free then one by one and put each entry into swap slot cache. Slot cache is designed to reduce the overhead by batching the freeing, but mTHP swap entries are already continuous so they can be batch freed without it already, it saves litle overhead, or even increase overhead for larger mTHP. What's more, mTHP entries could stay in swap cache for a while. Contiguous swap entry is an rather rare resource so releasing them directly can help improve mTHP allocation success rate when under pressure. Link: https://lkml.kernel.org/r/20240730-swap-allocator-v5-5-cb9c148b9297@kernel.org Signed-off-by: Kairui Song Reported-by: Barry Song <21cnbao@gmail.com> Acked-by: Barry Song Cc: Chris Li Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Kalesh Singh Cc: Ryan Roberts Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3997 --- mm/swapfile.c | 59 +++++++++++++++++++++++---------------------------- 1 file changed, 26 insertions(+), 33 deletions(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index 232551ccd534..2ffafd46ce5e 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -478,20 +478,21 @@ static void inc_cluster_info_page(struct swap_info_struct *p, } /* - * The cluster ci decreases one usage. If the usage counter becomes 0, + * The cluster ci decreases @nr_pages usage. If the usage counter becomes 0, * which means no page in the cluster is in use, we can optionally discard * the cluster and add it to free cluster list. */ -static void dec_cluster_info_page(struct swap_info_struct *p, struct swap_cluster_info *ci) +static void dec_cluster_info_page(struct swap_info_struct *p, + struct swap_cluster_info *ci, int nr_pages) { if (!p->cluster_info) return; - VM_BUG_ON(ci->count == 0); + VM_BUG_ON(ci->count < nr_pages); VM_BUG_ON(cluster_is_free(ci)); lockdep_assert_held(&p->lock); lockdep_assert_held(&ci->lock); - ci->count--; + ci->count -= nr_pages; if (!ci->count) { free_cluster(p, ci); @@ -984,19 +985,6 @@ static int scan_swap_map_slots(struct swap_info_struct *si, return n_ret; } -static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx) -{ - unsigned long offset = idx * SWAPFILE_CLUSTER; - struct swap_cluster_info *ci; - - ci = lock_cluster(si, offset); - memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER); - ci->count = 0; - free_cluster(si, ci); - unlock_cluster(ci); - swap_range_free(si, offset, SWAPFILE_CLUSTER); -} - int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_order) { int order = swap_entry_order(entry_order); @@ -1256,21 +1244,28 @@ static unsigned char __swap_entry_free(struct swap_info_struct *p, return usage; } -static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry) +/* + * Drop the last HAS_CACHE flag of swap entries, caller have to + * ensure all entries belong to the same cgroup. + */ +static void swap_entry_range_free(struct swap_info_struct *p, swp_entry_t entry, + unsigned int nr_pages) { - struct swap_cluster_info *ci; unsigned long offset = swp_offset(entry); - unsigned char count; + unsigned char *map = p->swap_map + offset; + unsigned char *map_end = map + nr_pages; + struct swap_cluster_info *ci; ci = lock_cluster(p, offset); - count = p->swap_map[offset]; - VM_BUG_ON(count != SWAP_HAS_CACHE); - p->swap_map[offset] = 0; - dec_cluster_info_page(p, ci); + do { + VM_BUG_ON(*map != SWAP_HAS_CACHE); + *map = 0; + } while (++map < map_end); + dec_cluster_info_page(p, ci, nr_pages); unlock_cluster(ci); - mem_cgroup_uncharge_swap(entry, 1); - swap_range_free(p, offset, 1); + mem_cgroup_uncharge_swap(entry, nr_pages); + swap_range_free(p, offset, nr_pages); } static void cluster_swap_free_nr(struct swap_info_struct *sis, @@ -1331,7 +1326,6 @@ void swap_free_nr(swp_entry_t entry, int nr_pages) void put_swap_folio(struct folio *folio, swp_entry_t entry) { unsigned long offset = swp_offset(entry); - unsigned long idx = offset / SWAPFILE_CLUSTER; struct swap_cluster_info *ci; struct swap_info_struct *si; unsigned char *map; @@ -1344,19 +1338,18 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry) return; ci = lock_cluster_or_swap_info(si, offset); - if (size == SWAPFILE_CLUSTER) { + if (size > 1) { map = si->swap_map + offset; - for (i = 0; i < SWAPFILE_CLUSTER; i++) { + for (i = 0; i < size; i++) { val = map[i]; VM_BUG_ON(!(val & SWAP_HAS_CACHE)); if (val == SWAP_HAS_CACHE) free_entries++; } - if (free_entries == SWAPFILE_CLUSTER) { + if (free_entries == size) { unlock_cluster_or_swap_info(si, ci); spin_lock(&si->lock); - mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER); - swap_free_cluster(si, idx); + swap_entry_range_free(si, entry, size); spin_unlock(&si->lock); return; } @@ -1401,7 +1394,7 @@ void swapcache_free_entries(swp_entry_t *entries, int n) for (i = 0; i < n; ++i) { p = swap_info_get_cont(entries[i], prev); if (p) - swap_entry_free(p, entries[i]); + swap_entry_range_free(p, entries[i], 1); prev = p; } if (p) -- Gitee From fc0221edd23a04ae09d440b1818ae30a1988acc5 Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Tue, 30 Jul 2024 23:49:18 -0700 Subject: [PATCH 1552/2138] mm: swap: allow cache reclaim to skip slot cache ANBZ: #9728 commit 862590ac3708e1cbbfb02a8ed78587b86ecba4ba upstream. Currently we free the reclaimed slots through slot cache even if the slot is required to be empty immediately. As a result the reclaim caller will see the slot still occupied even after a successful reclaim, and need to keep reclaiming until slot cache get flushed. This caused ineffective or over reclaim when SWAP is under stress. So introduce a new flag allowing the slot to be emptied bypassing the slot cache. [21cnbao@gmail.com: small folios should have nr_pages == 1 but not nr_page == 0] Link: https://lkml.kernel.org/r/20240805015324.45134-1-21cnbao@gmail.com Link: https://lkml.kernel.org/r/20240730-swap-allocator-v5-6-cb9c148b9297@kernel.org Signed-off-by: Kairui Song Reported-by: Barry Song <21cnbao@gmail.com> Cc: Chris Li Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Kalesh Singh Cc: Ryan Roberts Signed-off-by: Andrew Morton [ keep the old params of zswap_invalidate ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3997 --- mm/swapfile.c | 152 ++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 109 insertions(+), 43 deletions(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index 2ffafd46ce5e..7cfec865a146 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -52,8 +52,15 @@ static bool swap_count_continued(struct swap_info_struct *, pgoff_t, unsigned char); static void free_swap_count_continuations(struct swap_info_struct *); +static void swap_entry_range_free(struct swap_info_struct *si, swp_entry_t entry, + unsigned int nr_pages); static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset, unsigned int nr_entries); +static bool folio_swapcache_freeable(struct folio *folio); +static struct swap_cluster_info *lock_cluster_or_swap_info( + struct swap_info_struct *si, unsigned long offset); +static void unlock_cluster_or_swap_info(struct swap_info_struct *si, + struct swap_cluster_info *ci); static DEFINE_SPINLOCK(swap_lock); static unsigned int nr_swapfiles; @@ -128,8 +135,25 @@ static inline unsigned char swap_count(unsigned char ent) * corresponding page */ #define TTRS_UNMAPPED 0x2 -/* Reclaim the swap entry if swap is getting full*/ +/* Reclaim the swap entry if swap is getting full */ #define TTRS_FULL 0x4 +/* Reclaim directly, bypass the slot cache and don't touch device lock */ +#define TTRS_DIRECT 0x8 + +static bool swap_is_has_cache(struct swap_info_struct *si, + unsigned long offset, int nr_pages) +{ + unsigned char *map = si->swap_map + offset; + unsigned char *map_end = map + nr_pages; + + do { + VM_BUG_ON(!(*map & SWAP_HAS_CACHE)); + if (*map != SWAP_HAS_CACHE) + return false; + } while (++map < map_end); + + return true; +} /* * returns number of pages in the folio that backs the swap entry. If positive, @@ -140,12 +164,22 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset, unsigned long flags) { swp_entry_t entry = swp_entry(si->type, offset); + struct address_space *address_space = swap_address_space(entry); + struct swap_cluster_info *ci; struct folio *folio; - int ret = 0; + int ret, nr_pages; + bool need_reclaim; - folio = filemap_get_folio(swap_address_space(entry), offset); + folio = filemap_get_folio(address_space, offset); if (IS_ERR(folio)) return 0; + + /* offset could point to the middle of a large folio */ + entry = folio->swap; + offset = swp_offset(entry); + nr_pages = folio_nr_pages(folio); + ret = -nr_pages; + /* * When this function is called from scan_swap_map_slots() and it's * called by vmscan.c at reclaiming folios. So we hold a folio lock @@ -153,14 +187,50 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si, * case and you should use folio_free_swap() with explicit folio_lock() * in usual operations. */ - if (folio_trylock(folio)) { - if ((flags & TTRS_ANYWAY) || - ((flags & TTRS_UNMAPPED) && !folio_mapped(folio)) || - ((flags & TTRS_FULL) && mem_cgroup_swap_full(folio))) - ret = folio_free_swap(folio); - folio_unlock(folio); + if (!folio_trylock(folio)) + goto out; + + need_reclaim = ((flags & TTRS_ANYWAY) || + ((flags & TTRS_UNMAPPED) && !folio_mapped(folio)) || + ((flags & TTRS_FULL) && mem_cgroup_swap_full(folio))); + if (!need_reclaim || !folio_swapcache_freeable(folio)) + goto out_unlock; + + /* + * It's safe to delete the folio from swap cache only if the folio's + * swap_map is HAS_CACHE only, which means the slots have no page table + * reference or pending writeback, and can't be allocated to others. + */ + ci = lock_cluster_or_swap_info(si, offset); + need_reclaim = swap_is_has_cache(si, offset, nr_pages); + unlock_cluster_or_swap_info(si, ci); + if (!need_reclaim) + goto out_unlock; + + if (!(flags & TTRS_DIRECT)) { + /* Free through slot cache */ + delete_from_swap_cache(folio); + folio_set_dirty(folio); + ret = nr_pages; + goto out_unlock; } - ret = ret ? folio_nr_pages(folio) : -folio_nr_pages(folio); + + xa_lock_irq(&address_space->i_pages); + __delete_from_swap_cache(folio, entry, NULL); + xa_unlock_irq(&address_space->i_pages); + folio_ref_sub(folio, nr_pages); + folio_set_dirty(folio); + + spin_lock(&si->lock); + /* Only sinple page folio can be backed by zswap */ + if (nr_pages == 1) + zswap_invalidate(si->type, offset); + swap_entry_range_free(si, entry, nr_pages); + spin_unlock(&si->lock); + ret = nr_pages; +out_unlock: + folio_unlock(folio); +out: folio_put(folio); return ret; } @@ -889,7 +959,7 @@ static int scan_swap_map_slots(struct swap_info_struct *si, if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { int swap_was_freed; spin_unlock(&si->lock); - swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY); + swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY | TTRS_DIRECT); spin_lock(&si->lock); /* entry was freed successfully, try to use this again */ if (swap_was_freed > 0) @@ -1328,9 +1398,6 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry) unsigned long offset = swp_offset(entry); struct swap_cluster_info *ci; struct swap_info_struct *si; - unsigned char *map; - unsigned int i, free_entries = 0; - unsigned char val; int size = 1 << swap_entry_order(folio_order(folio)); si = _swap_info_get(entry); @@ -1338,23 +1405,14 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry) return; ci = lock_cluster_or_swap_info(si, offset); - if (size > 1) { - map = si->swap_map + offset; - for (i = 0; i < size; i++) { - val = map[i]; - VM_BUG_ON(!(val & SWAP_HAS_CACHE)); - if (val == SWAP_HAS_CACHE) - free_entries++; - } - if (free_entries == size) { - unlock_cluster_or_swap_info(si, ci); - spin_lock(&si->lock); - swap_entry_range_free(si, entry, size); - spin_unlock(&si->lock); - return; - } + if (size > 1 && swap_is_has_cache(si, offset, size)) { + unlock_cluster_or_swap_info(si, ci); + spin_lock(&si->lock); + swap_entry_range_free(si, entry, size); + spin_unlock(&si->lock); + return; } - for (i = 0; i < size; i++, entry.val++) { + for (int i = 0; i < size; i++, entry.val++) { if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) { unlock_cluster_or_swap_info(si, ci); free_swap_slot(entry); @@ -1514,16 +1572,7 @@ static bool folio_swapped(struct folio *folio) return swap_page_trans_huge_swapped(si, entry, folio_order(folio)); } -/** - * folio_free_swap() - Free the swap space used for this folio. - * @folio: The folio to remove. - * - * If swap is getting full, or if there are no more mappings of this folio, - * then call folio_free_swap to free its swap space. - * - * Return: true if we were able to release the swap space. - */ -bool folio_free_swap(struct folio *folio) +static bool folio_swapcache_freeable(struct folio *folio) { VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); @@ -1531,8 +1580,6 @@ bool folio_free_swap(struct folio *folio) return false; if (folio_test_writeback(folio)) return false; - if (folio_swapped(folio)) - return false; /* * Once hibernation has begun to create its image of memory, @@ -1552,6 +1599,25 @@ bool folio_free_swap(struct folio *folio) if (pm_suspended_storage()) return false; + return true; +} + +/** + * folio_free_swap() - Free the swap space used for this folio. + * @folio: The folio to remove. + * + * If swap is getting full, or if there are no more mappings of this folio, + * then call folio_free_swap to free its swap space. + * + * Return: true if we were able to release the swap space. + */ +bool folio_free_swap(struct folio *folio) +{ + if (!folio_swapcache_freeable(folio)) + return false; + if (folio_swapped(folio)) + return false; + delete_from_swap_cache(folio); folio_set_dirty(folio); return true; @@ -1628,7 +1694,7 @@ void free_swap_and_cache_nr(swp_entry_t entry, int nr) * to the next boundary. */ nr = __try_to_reclaim_swap(si, offset, - TTRS_UNMAPPED | TTRS_FULL); + TTRS_UNMAPPED | TTRS_FULL); if (nr == 0) nr = 1; else if (nr < 0) -- Gitee From 45509766dee81ac1d6f9815d1ea76dd10f5bb25c Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Tue, 30 Jul 2024 23:49:19 -0700 Subject: [PATCH 1553/2138] mm: swap: add a fragment cluster list ANBZ: #9728 commit 477cb7ba28892eda112c79d8f75d10edabfc3050 upstream. Now swap cluster allocator arranges the clusters in LRU style, so the "cold" cluster stay at the head of nonfull lists are the ones that were used for allocation long time ago and still partially occupied. So if allocator can't find enough contiguous slots to satisfy an high order allocation, it's unlikely there will be slot being free on them to satisfy the allocation, at least in a short period. As a result, nonfull cluster scanning will waste time repeatly scanning the unusable head of the list. Also, multiple CPUs could content on the same head cluster of nonfull list. Unlike free clusters which are removed from the list when any CPU starts using it, nonfull cluster stays on the head. So introduce a new list frag list, all scanned nonfull clusters will be moved to this list. Both for avoiding repeated scanning and contention. Frag list is still used as fallback for allocations, so if one CPU failed to allocate one order of slots, it can still steal other CPU's clusters. And order 0 will favor the fragmented clusters to better protect nonfull clusters If any slots on a fragment list are being freed, move the fragment list back to nonfull list indicating it worth another scan on the cluster. Compared to scan upon freeing a slot, this keep the scanning lazy and save some CPU if there are still other clusters to use. It may seems unneccessay to keep the fragmented cluster on list at all if they can't be used for specific order allocation. But this will start to make sense once reclaim dring scanning is ready. Link: https://lkml.kernel.org/r/20240730-swap-allocator-v5-7-cb9c148b9297@kernel.org Signed-off-by: Kairui Song Reported-by: Barry Song <21cnbao@gmail.com> Cc: Chris Li Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Kalesh Singh Cc: Ryan Roberts Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3997 --- include/linux/swap.h | 3 +++ mm/swapfile.c | 41 +++++++++++++++++++++++++++++++++++++---- 2 files changed, 40 insertions(+), 4 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index eb17203bbbad..059ca48cec43 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -259,6 +259,7 @@ struct swap_cluster_info { }; #define CLUSTER_FLAG_FREE 1 /* This cluster is free */ #define CLUSTER_FLAG_NONFULL 2 /* This cluster is on nonfull list */ +#define CLUSTER_FLAG_FRAG 4 /* This cluster is on nonfull list */ /* * The first page in the swap file is the swap header, which is always marked @@ -298,6 +299,8 @@ struct swap_info_struct { struct list_head free_clusters; /* free clusters list */ struct list_head nonfull_clusters[SWAP_NR_ORDERS]; /* list of cluster that contains at least one free slot */ + struct list_head frag_clusters[SWAP_NR_ORDERS]; + /* list of cluster that are fragmented or contented */ unsigned int lowest_bit; /* index of first free in swap_map */ unsigned int highest_bit; /* index of last free in swap_map */ unsigned int pages; /* total of usable pages of swap */ diff --git a/mm/swapfile.c b/mm/swapfile.c index 7cfec865a146..042499af4cc6 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -571,7 +571,10 @@ static void dec_cluster_info_page(struct swap_info_struct *p, if (!(ci->flags & CLUSTER_FLAG_NONFULL)) { VM_BUG_ON(ci->flags & CLUSTER_FLAG_FREE); - list_add_tail(&ci->list, &p->nonfull_clusters[ci->order]); + if (ci->flags & CLUSTER_FLAG_FRAG) + list_move_tail(&ci->list, &p->nonfull_clusters[ci->order]); + else + list_add_tail(&ci->list, &p->nonfull_clusters[ci->order]); ci->flags = CLUSTER_FLAG_NONFULL; } } @@ -609,7 +612,8 @@ static inline void cluster_alloc_range(struct swap_info_struct *si, struct swap_ ci->count += nr_pages; if (ci->count == SWAPFILE_CLUSTER) { - VM_BUG_ON(!(ci->flags & (CLUSTER_FLAG_FREE | CLUSTER_FLAG_NONFULL))); + VM_BUG_ON(!(ci->flags & + (CLUSTER_FLAG_FREE | CLUSTER_FLAG_NONFULL | CLUSTER_FLAG_FRAG))); list_del(&ci->list); ci->flags = 0; } @@ -665,6 +669,7 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o struct percpu_cluster *cluster; struct swap_cluster_info *ci, *n; unsigned int offset, found = 0; + LIST_HEAD(fraged); new_cluster: lockdep_assert_held(&si->lock); @@ -685,13 +690,29 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o if (order < PMD_ORDER) { list_for_each_entry_safe(ci, n, &si->nonfull_clusters[order], list) { + list_move_tail(&ci->list, &fraged); + ci->flags = CLUSTER_FLAG_FRAG; offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), &found, order, usage); if (found) - goto done; + break; } + + if (!found) { + list_for_each_entry_safe(ci, n, &si->frag_clusters[order], list) { + offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), + &found, order, usage); + if (found) + break; + } + } + + list_splice_tail(&fraged, &si->frag_clusters[order]); } + if (found) + goto done; + if (!list_empty(&si->discard_clusters)) { /* * we don't have free cluster but have some clusters in @@ -705,7 +726,17 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o if (order) goto done; + /* Order 0 stealing from higher order */ for (int o = 1; o < SWAP_NR_ORDERS; o++) { + if (!list_empty(&si->frag_clusters[o])) { + ci = list_first_entry(&si->frag_clusters[o], + struct swap_cluster_info, list); + offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), &found, + 0, usage); + VM_BUG_ON(!found); + goto done; + } + if (!list_empty(&si->nonfull_clusters[o])) { ci = list_first_entry(&si->nonfull_clusters[o], struct swap_cluster_info, list); @@ -3017,8 +3048,10 @@ static int setup_swap_map_and_extents(struct swap_info_struct *p, INIT_LIST_HEAD(&p->free_clusters); INIT_LIST_HEAD(&p->discard_clusters); - for (i = 0; i < SWAP_NR_ORDERS; i++) + for (i = 0; i < SWAP_NR_ORDERS; i++) { INIT_LIST_HEAD(&p->nonfull_clusters[i]); + INIT_LIST_HEAD(&p->frag_clusters[i]); + } for (i = 0; i < swap_header->info.nr_badpages; i++) { unsigned int page_nr = swap_header->info.badpages[i]; -- Gitee From 972876ee3c82c2df7faef35d78bedb6296cf820a Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Tue, 30 Jul 2024 23:49:20 -0700 Subject: [PATCH 1554/2138] mm: swap: relaim the cached parts that got scanned ANBZ: #9728 commit 661383c6111a38c88df61af6bfbcfacd2ff20a67 upstream. This commit implements reclaim during scan for cluster allocator. Cluster scanning were unable to reuse SWAP_HAS_CACHE slots, which could result in low allocation success rate or early OOM. So to ensure maximum allocation success rate, integrate reclaiming with scanning. If found a range of suitable swap slots but fragmented due to HAS_CACHE, just try to reclaim the slots. Link: https://lkml.kernel.org/r/20240730-swap-allocator-v5-8-cb9c148b9297@kernel.org Signed-off-by: Kairui Song Reported-by: Barry Song <21cnbao@gmail.com> Cc: Chris Li Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Kalesh Singh Cc: Ryan Roberts Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3997 --- include/linux/swap.h | 1 + mm/swapfile.c | 140 +++++++++++++++++++++++++++++++++---------- 2 files changed, 110 insertions(+), 31 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index 059ca48cec43..839fa32b0658 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -301,6 +301,7 @@ struct swap_info_struct { /* list of cluster that contains at least one free slot */ struct list_head frag_clusters[SWAP_NR_ORDERS]; /* list of cluster that are fragmented or contented */ + unsigned int frag_cluster_nr[SWAP_NR_ORDERS]; unsigned int lowest_bit; /* index of first free in swap_map */ unsigned int highest_bit; /* index of last free in swap_map */ unsigned int pages; /* total of usable pages of swap */ diff --git a/mm/swapfile.c b/mm/swapfile.c index 042499af4cc6..92bff311cdc6 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -512,6 +512,10 @@ static void free_cluster(struct swap_info_struct *si, struct swap_cluster_info * VM_BUG_ON(ci->count != 0); lockdep_assert_held(&si->lock); lockdep_assert_held(&ci->lock); + + if (ci->flags & CLUSTER_FLAG_FRAG) + si->frag_cluster_nr[ci->order]--; + /* * If the swap is discardable, prepare discard the cluster * instead of free it immediately. The cluster will be freed @@ -571,31 +575,84 @@ static void dec_cluster_info_page(struct swap_info_struct *p, if (!(ci->flags & CLUSTER_FLAG_NONFULL)) { VM_BUG_ON(ci->flags & CLUSTER_FLAG_FREE); - if (ci->flags & CLUSTER_FLAG_FRAG) + if (ci->flags & CLUSTER_FLAG_FRAG) { + p->frag_cluster_nr[ci->order]--; list_move_tail(&ci->list, &p->nonfull_clusters[ci->order]); - else + } else { list_add_tail(&ci->list, &p->nonfull_clusters[ci->order]); + } ci->flags = CLUSTER_FLAG_NONFULL; } } -static inline bool cluster_scan_range(struct swap_info_struct *si, unsigned int start, - unsigned int nr_pages) +static bool cluster_reclaim_range(struct swap_info_struct *si, + struct swap_cluster_info *ci, + unsigned long start, unsigned long end) { - unsigned char *p = si->swap_map + start; - unsigned char *end = p + nr_pages; + unsigned char *map = si->swap_map; + unsigned long offset; + + spin_unlock(&ci->lock); + spin_unlock(&si->lock); + + for (offset = start; offset < end; offset++) { + switch (READ_ONCE(map[offset])) { + case 0: + continue; + case SWAP_HAS_CACHE: + if (__try_to_reclaim_swap(si, offset, TTRS_ANYWAY | TTRS_DIRECT) > 0) + continue; + goto out; + default: + goto out; + } + } +out: + spin_lock(&si->lock); + spin_lock(&ci->lock); - while (p < end) - if (*p++) + /* + * Recheck the range no matter reclaim succeeded or not, the slot + * could have been be freed while we are not holding the lock. + */ + for (offset = start; offset < end; offset++) + if (READ_ONCE(map[offset])) return false; return true; } +static bool cluster_scan_range(struct swap_info_struct *si, + struct swap_cluster_info *ci, + unsigned long start, unsigned int nr_pages) +{ + unsigned long offset, end = start + nr_pages; + unsigned char *map = si->swap_map; + bool need_reclaim = false; -static inline void cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster_info *ci, - unsigned int start, unsigned char usage, - unsigned int order) + for (offset = start; offset < end; offset++) { + switch (READ_ONCE(map[offset])) { + case 0: + continue; + case SWAP_HAS_CACHE: + if (!vm_swap_full()) + return false; + need_reclaim = true; + continue; + default: + return false; + } + } + + if (need_reclaim) + return cluster_reclaim_range(si, ci, start, end); + + return true; +} + +static void cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster_info *ci, + unsigned int start, unsigned char usage, + unsigned int order) { unsigned int nr_pages = 1 << order; @@ -614,6 +671,8 @@ static inline void cluster_alloc_range(struct swap_info_struct *si, struct swap_ if (ci->count == SWAPFILE_CLUSTER) { VM_BUG_ON(!(ci->flags & (CLUSTER_FLAG_FREE | CLUSTER_FLAG_NONFULL | CLUSTER_FLAG_FRAG))); + if (ci->flags & CLUSTER_FLAG_FRAG) + si->frag_cluster_nr[ci->order]--; list_del(&ci->list); ci->flags = 0; } @@ -639,7 +698,7 @@ static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, unsigne } while (offset <= end) { - if (cluster_scan_range(si, offset, nr_pages)) { + if (cluster_scan_range(si, ci, offset, nr_pages)) { cluster_alloc_range(si, ci, offset, usage, order); *foundp = offset; if (ci->count == SWAPFILE_CLUSTER) { @@ -667,9 +726,8 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o unsigned char usage) { struct percpu_cluster *cluster; - struct swap_cluster_info *ci, *n; + struct swap_cluster_info *ci; unsigned int offset, found = 0; - LIST_HEAD(fraged); new_cluster: lockdep_assert_held(&si->lock); @@ -689,25 +747,42 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o } if (order < PMD_ORDER) { - list_for_each_entry_safe(ci, n, &si->nonfull_clusters[order], list) { - list_move_tail(&ci->list, &fraged); + unsigned int frags = 0; + + while (!list_empty(&si->nonfull_clusters[order])) { + ci = list_first_entry(&si->nonfull_clusters[order], + struct swap_cluster_info, list); + list_move_tail(&ci->list, &si->frag_clusters[order]); ci->flags = CLUSTER_FLAG_FRAG; + si->frag_cluster_nr[order]++; offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), &found, order, usage); + frags++; if (found) break; } if (!found) { - list_for_each_entry_safe(ci, n, &si->frag_clusters[order], list) { + /* + * Nonfull clusters are moved to frag tail if we reached + * here, count them too, don't over scan the frag list. + */ + while (frags < si->frag_cluster_nr[order]) { + ci = list_first_entry(&si->frag_clusters[order], + struct swap_cluster_info, list); + /* + * Rotate the frag list to iterate, they were all failing + * high order allocation or moved here due to per-CPU usage, + * this help keeping usable cluster ahead. + */ + list_move_tail(&ci->list, &si->frag_clusters[order]); offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), &found, order, usage); + frags++; if (found) break; } } - - list_splice_tail(&fraged, &si->frag_clusters[order]); } if (found) @@ -728,25 +803,28 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o /* Order 0 stealing from higher order */ for (int o = 1; o < SWAP_NR_ORDERS; o++) { - if (!list_empty(&si->frag_clusters[o])) { + /* + * Clusters here have at least one usable slots and can't fail order 0 + * allocation, but reclaim may drop si->lock and race with another user. + */ + while (!list_empty(&si->frag_clusters[o])) { ci = list_first_entry(&si->frag_clusters[o], struct swap_cluster_info, list); - offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), &found, - 0, usage); - VM_BUG_ON(!found); - goto done; + offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), + &found, 0, usage); + if (found) + goto done; } - if (!list_empty(&si->nonfull_clusters[o])) { - ci = list_first_entry(&si->nonfull_clusters[o], struct swap_cluster_info, - list); + while (!list_empty(&si->nonfull_clusters[o])) { + ci = list_first_entry(&si->nonfull_clusters[o], + struct swap_cluster_info, list); offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), &found, 0, usage); - VM_BUG_ON(!found); - goto done; + if (found) + goto done; } } - done: cluster->next[order] = offset; return found; @@ -3051,6 +3129,7 @@ static int setup_swap_map_and_extents(struct swap_info_struct *p, for (i = 0; i < SWAP_NR_ORDERS; i++) { INIT_LIST_HEAD(&p->nonfull_clusters[i]); INIT_LIST_HEAD(&p->frag_clusters[i]); + p->frag_cluster_nr[i] = 0; } for (i = 0; i < swap_header->info.nr_badpages; i++) { @@ -3094,7 +3173,6 @@ static int setup_swap_map_and_extents(struct swap_info_struct *p, if (!cluster_info) return nr_extents; - /* * Reduce false cache line sharing between cluster_info and * sharing same address space. -- Gitee From 78add56ba40176ec7227b4593d0c2ff73609bd6d Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Tue, 30 Jul 2024 23:49:21 -0700 Subject: [PATCH 1555/2138] mm: swap: add a adaptive full cluster cache reclaim ANBZ: #9728 commit 2cacbdfdee65b18f9952620e762eab043d71b564 upstream. Link all full cluster with one full list, and reclaim from it when the allocation have ran out of all usable clusters. There are many reason a folio can end up being in the swap cache while having no swap count reference. So the best way to search for such slots is still by iterating the swap clusters. With the list as an LRU, iterating from the oldest cluster and keep them rotating is a very doable and clean way to free up potentially not inuse clusters. When any allocation failure, try reclaim and rotate only one cluster. This is adaptive for high order allocations they can tolerate fallback. So this avoids latency, and give the full cluster list an fair chance to get reclaimed. It release the usage stress for the fallback order 0 allocation or following up high order allocation. If the swap device is getting very full, reclaim more aggresively to ensure no OOM will happen. This ensures order 0 heavy workload won't go OOM as order 0 won't fail if any cluster still have any space. [ryncsn@gmail.com: fix discard of full cluster] Link: https://lkml.kernel.org/r/CAMgjq7CWwK75_2Zi5P40K08pk9iqOcuWKL6khu=x4Yg_nXaQag@mail.gmail.com Link: https://lkml.kernel.org/r/20240730-swap-allocator-v5-9-cb9c148b9297@kernel.org Signed-off-by: Kairui Song Reported-by: Barry Song <21cnbao@gmail.com> Cc: Chris Li Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Kalesh Singh Cc: Ryan Roberts Cc: David Hildenbrand Cc: Kairui Song Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3997 --- include/linux/swap.h | 2 ++ mm/swapfile.c | 68 +++++++++++++++++++++++++++++++++++--------- 2 files changed, 57 insertions(+), 13 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index 839fa32b0658..a5fe71b945e4 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -260,6 +260,7 @@ struct swap_cluster_info { #define CLUSTER_FLAG_FREE 1 /* This cluster is free */ #define CLUSTER_FLAG_NONFULL 2 /* This cluster is on nonfull list */ #define CLUSTER_FLAG_FRAG 4 /* This cluster is on nonfull list */ +#define CLUSTER_FLAG_FULL 8 /* This cluster is on full list */ /* * The first page in the swap file is the swap header, which is always marked @@ -297,6 +298,7 @@ struct swap_info_struct { unsigned char *swap_map; /* vmalloc'ed array of usage counts */ struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */ struct list_head free_clusters; /* free clusters list */ + struct list_head full_clusters; /* full clusters list */ struct list_head nonfull_clusters[SWAP_NR_ORDERS]; /* list of cluster that contains at least one free slot */ struct list_head frag_clusters[SWAP_NR_ORDERS]; diff --git a/mm/swapfile.c b/mm/swapfile.c index 92bff311cdc6..b7f851ad0126 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -439,10 +439,7 @@ static void swap_cluster_schedule_discard(struct swap_info_struct *si, SWAP_MAP_BAD, SWAPFILE_CLUSTER); VM_BUG_ON(ci->flags & CLUSTER_FLAG_FREE); - if (ci->flags & CLUSTER_FLAG_NONFULL) - list_move_tail(&ci->list, &si->discard_clusters); - else - list_add_tail(&ci->list, &si->discard_clusters); + list_move_tail(&ci->list, &si->discard_clusters); ci->flags = 0; schedule_work(&si->discard_work); } @@ -452,7 +449,7 @@ static void __free_cluster(struct swap_info_struct *si, struct swap_cluster_info lockdep_assert_held(&si->lock); lockdep_assert_held(&ci->lock); - if (ci->flags & CLUSTER_FLAG_NONFULL) + if (ci->flags) list_move_tail(&ci->list, &si->free_clusters); else list_add_tail(&ci->list, &si->free_clusters); @@ -479,7 +476,6 @@ static void swap_do_scheduled_discard(struct swap_info_struct *si) SWAPFILE_CLUSTER); spin_lock(&si->lock); - spin_lock(&ci->lock); __free_cluster(si, ci); memset(si->swap_map + idx * SWAPFILE_CLUSTER, @@ -575,12 +571,9 @@ static void dec_cluster_info_page(struct swap_info_struct *p, if (!(ci->flags & CLUSTER_FLAG_NONFULL)) { VM_BUG_ON(ci->flags & CLUSTER_FLAG_FREE); - if (ci->flags & CLUSTER_FLAG_FRAG) { + if (ci->flags & CLUSTER_FLAG_FRAG) p->frag_cluster_nr[ci->order]--; - list_move_tail(&ci->list, &p->nonfull_clusters[ci->order]); - } else { - list_add_tail(&ci->list, &p->nonfull_clusters[ci->order]); - } + list_move_tail(&ci->list, &p->nonfull_clusters[ci->order]); ci->flags = CLUSTER_FLAG_NONFULL; } } @@ -673,8 +666,8 @@ static void cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster (CLUSTER_FLAG_FREE | CLUSTER_FLAG_NONFULL | CLUSTER_FLAG_FRAG))); if (ci->flags & CLUSTER_FLAG_FRAG) si->frag_cluster_nr[ci->order]--; - list_del(&ci->list); - ci->flags = 0; + list_move_tail(&ci->list, &si->full_clusters); + ci->flags = CLUSTER_FLAG_FULL; } } @@ -717,6 +710,46 @@ static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, unsigne return offset; } +static void swap_reclaim_full_clusters(struct swap_info_struct *si) +{ + long to_scan = 1; + unsigned long offset, end; + struct swap_cluster_info *ci; + unsigned char *map = si->swap_map; + int nr_reclaim, total_reclaimed = 0; + + if (atomic_long_read(&nr_swap_pages) <= SWAPFILE_CLUSTER) + to_scan = si->inuse_pages / SWAPFILE_CLUSTER; + + while (!list_empty(&si->full_clusters)) { + ci = list_first_entry(&si->full_clusters, struct swap_cluster_info, list); + list_move_tail(&ci->list, &si->full_clusters); + offset = cluster_offset(si, ci); + end = min(si->max, offset + SWAPFILE_CLUSTER); + to_scan--; + + while (offset < end) { + if (READ_ONCE(map[offset]) == SWAP_HAS_CACHE) { + spin_unlock(&si->lock); + nr_reclaim = __try_to_reclaim_swap(si, offset, + TTRS_ANYWAY | TTRS_DIRECT); + spin_lock(&si->lock); + if (nr_reclaim > 0) { + offset += nr_reclaim; + total_reclaimed += nr_reclaim; + continue; + } else if (nr_reclaim < 0) { + offset += -nr_reclaim; + continue; + } + } + offset++; + } + if (to_scan <= 0 || total_reclaimed) + break; + } +} + /* * Try to get swap entries with specified order from current cpu's swap entry * pool (a cluster). This might involve allocating a new cluster for current CPU @@ -825,7 +858,15 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o goto done; } } + done: + /* Try reclaim from full clusters if device is nearfull */ + if (vm_swap_full() && (!found || (si->pages - si->inuse_pages) < SWAPFILE_CLUSTER)) { + swap_reclaim_full_clusters(si); + if (!found && !order && si->pages != si->inuse_pages) + goto new_cluster; + } + cluster->next[order] = offset; return found; } @@ -3124,6 +3165,7 @@ static int setup_swap_map_and_extents(struct swap_info_struct *p, nr_good_pages = maxpages - 1; /* omit header page */ INIT_LIST_HEAD(&p->free_clusters); + INIT_LIST_HEAD(&p->full_clusters); INIT_LIST_HEAD(&p->discard_clusters); for (i = 0; i < SWAP_NR_ORDERS; i++) { -- Gitee From 2359c30a7aca288d5ed610220df9c332bc3caaae Mon Sep 17 00:00:00 2001 From: Barry Song Date: Mon, 9 Sep 2024 11:21:18 +1200 Subject: [PATCH 1556/2138] mm: add nr argument in mem_cgroup_swapin_uncharge_swap() helper to support large folios ANBZ: #9728 commit 325efb16da2c840e165d9b620fec8049d4d664cc upstream. With large folios swap-in, we might need to uncharge multiple entries all together, add nr argument in mem_cgroup_swapin_uncharge_swap(). For the existing two users, just pass nr=1. Link: https://lkml.kernel.org/r/20240908232119.2157-3-21cnbao@gmail.com Signed-off-by: Barry Song Acked-by: Chris Li Reviewed-by: Yosry Ahmed Cc: Shakeel Butt Cc: Baolin Wang Cc: Christoph Hellwig Cc: David Hildenbrand Cc: Gao Xiang Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kairui Song Cc: Kairui Song Cc: Kalesh Singh Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Minchan Kim Cc: Nhat Pham Cc: Ryan Roberts Cc: Sergey Senozhatsky Cc: Suren Baghdasaryan Cc: Yang Shi Cc: Chuanhua Han Cc: Kanchana P Sridhar Cc: Usama Arif Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3997 --- include/linux/memcontrol.h | 5 +++-- mm/memcontrol.c | 7 ++++--- mm/memory.c | 2 +- mm/swap_state.c | 2 +- 4 files changed, 9 insertions(+), 7 deletions(-) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 9a1e6b5cdb31..5bfa630a7635 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -708,7 +708,8 @@ static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, gfp_t gfp, swp_entry_t entry); -void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry); + +void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr_pages); void __mem_cgroup_uncharge(struct folio *folio); @@ -1284,7 +1285,7 @@ static inline int mem_cgroup_swapin_charge_folio(struct folio *folio, return 0; } -static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) +static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr) { } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 01733b9fb62d..cfc65fabc6f8 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -7153,14 +7153,15 @@ int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, /* * mem_cgroup_swapin_uncharge_swap - uncharge swap slot - * @entry: swap entry for which the page is charged + * @entry: the first swap entry for which the pages are charged + * @nr_pages: number of pages which will be uncharged * * Call this function after successfully adding the charged page to swapcache. * * Note: This function assumes the page for which swap slot is being uncharged * is order 0 page. */ -void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) +void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) { /* * Cgroup1's unified memory+swap counter has been charged with the @@ -7180,7 +7181,7 @@ void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) * let's not wait for it. The page already received a * memory+swap charge, drop the swap entry duplicate. */ - mem_cgroup_uncharge_swap(entry, 1); + mem_cgroup_uncharge_swap(entry, nr_pages); } } diff --git a/mm/memory.c b/mm/memory.c index 52a1a3c3165d..25cce06fe6a2 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4169,7 +4169,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) ret = VM_FAULT_OOM; goto out_page; } - mem_cgroup_swapin_uncharge_swap(entry); + mem_cgroup_swapin_uncharge_swap(entry, 1); shadow = get_shadow_from_swap_cache(entry); if (shadow) diff --git a/mm/swap_state.c b/mm/swap_state.c index cdbeb9e4a5b0..d331f21749f8 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -503,7 +503,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) goto fail_unlock; - mem_cgroup_swapin_uncharge_swap(entry); + mem_cgroup_swapin_uncharge_swap(entry, 1); if (shadow) workingset_refault(folio, shadow); -- Gitee From 9b13a727d85e9d501d826e476692d7ba3c9f732d Mon Sep 17 00:00:00 2001 From: Chuanhua Han Date: Mon, 9 Sep 2024 11:21:19 +1200 Subject: [PATCH 1557/2138] mm: support large folios swap-in for sync io devices ANBZ: #9728 commit 242d12c98174584a18965cfab95778893872d650 upstream. Currently, we have mTHP features, but unfortunately, without support for large folio swap-ins, once these large folios are swapped out, they are lost because mTHP swap is a one-way process. The lack of mTHP swap-in functionality prevents mTHP from being used on devices like Android that heavily rely on swap. This patch introduces mTHP swap-in support. It starts from sync devices such as zRAM. This is probably the simplest and most common use case, benefiting billions of Android phones and similar devices with minimal implementation cost. In this straightforward scenario, large folios are always exclusive, eliminating the need to handle complex rmap and swapcache issues. It offers several benefits: 1. Enables bidirectional mTHP swapping, allowing retrieval of mTHP after swap-out and swap-in. Large folios in the buddy system are also preserved as much as possible, rather than being fragmented due to swap-in. 2. Eliminates fragmentation in swap slots and supports successful THP_SWPOUT. w/o this patch (Refer to the data from Chris's and Kairui's latest swap allocator optimization while running ./thp_swap_allocator_test w/o "-a" option [1]): ./thp_swap_allocator_test Iteration 1: swpout inc: 233, swpout fallback inc: 0, Fallback percentage: 0.00% Iteration 2: swpout inc: 131, swpout fallback inc: 101, Fallback percentage: 43.53% Iteration 3: swpout inc: 71, swpout fallback inc: 155, Fallback percentage: 68.58% Iteration 4: swpout inc: 55, swpout fallback inc: 168, Fallback percentage: 75.34% Iteration 5: swpout inc: 35, swpout fallback inc: 191, Fallback percentage: 84.51% Iteration 6: swpout inc: 25, swpout fallback inc: 199, Fallback percentage: 88.84% Iteration 7: swpout inc: 23, swpout fallback inc: 205, Fallback percentage: 89.91% Iteration 8: swpout inc: 9, swpout fallback inc: 219, Fallback percentage: 96.05% Iteration 9: swpout inc: 13, swpout fallback inc: 213, Fallback percentage: 94.25% Iteration 10: swpout inc: 12, swpout fallback inc: 216, Fallback percentage: 94.74% Iteration 11: swpout inc: 16, swpout fallback inc: 213, Fallback percentage: 93.01% Iteration 12: swpout inc: 10, swpout fallback inc: 210, Fallback percentage: 95.45% Iteration 13: swpout inc: 16, swpout fallback inc: 212, Fallback percentage: 92.98% Iteration 14: swpout inc: 12, swpout fallback inc: 212, Fallback percentage: 94.64% Iteration 15: swpout inc: 15, swpout fallback inc: 211, Fallback percentage: 93.36% Iteration 16: swpout inc: 15, swpout fallback inc: 200, Fallback percentage: 93.02% Iteration 17: swpout inc: 9, swpout fallback inc: 220, Fallback percentage: 96.07% w/ this patch (always 0%): Iteration 1: swpout inc: 948, swpout fallback inc: 0, Fallback percentage: 0.00% Iteration 2: swpout inc: 953, swpout fallback inc: 0, Fallback percentage: 0.00% Iteration 3: swpout inc: 950, swpout fallback inc: 0, Fallback percentage: 0.00% Iteration 4: swpout inc: 952, swpout fallback inc: 0, Fallback percentage: 0.00% Iteration 5: swpout inc: 950, swpout fallback inc: 0, Fallback percentage: 0.00% Iteration 6: swpout inc: 950, swpout fallback inc: 0, Fallback percentage: 0.00% Iteration 7: swpout inc: 947, swpout fallback inc: 0, Fallback percentage: 0.00% Iteration 8: swpout inc: 950, swpout fallback inc: 0, Fallback percentage: 0.00% Iteration 9: swpout inc: 950, swpout fallback inc: 0, Fallback percentage: 0.00% Iteration 10: swpout inc: 945, swpout fallback inc: 0, Fallback percentage: 0.00% Iteration 11: swpout inc: 947, swpout fallback inc: 0, Fallback percentage: 0.00% ... 3. With both mTHP swap-out and swap-in supported, we offer the option to enable zsmalloc compression/decompression with larger granularity[2]. The upcoming optimization in zsmalloc will significantly increase swap speed and improve compression efficiency. Tested by running 100 iterations of swapping 100MiB of anon memory, the swap speed improved dramatically: time consumption of swapin(ms) time consumption of swapout(ms) lz4 4k 45274 90540 lz4 64k 22942 55667 zstdn 4k 85035 186585 zstdn 64k 46558 118533 The compression ratio also improved, as evaluated with 1 GiB of data: granularity orig_data_size compr_data_size 4KiB-zstd 1048576000 246876055 64KiB-zstd 1048576000 199763892 Without mTHP swap-in, the potential optimizations in zsmalloc cannot be realized. 4. Even mTHP swap-in itself can reduce swap-in page faults by a factor of nr_pages. Swapping in content filled with the same data 0x11, w/o and w/ the patch for five rounds (Since the content is the same, decompression will be very fast. This primarily assesses the impact of reduced page faults): swp in bandwidth(bytes/ms) w/o w/ round1 624152 1127501 round2 631672 1127501 round3 620459 1139756 round4 606113 1139756 round5 624152 1152281 avg 621310 1137359 +83% 5. With both mTHP swap-out and swap-in supported, we offer the option to enable hardware accelerators(Intel IAA) to do parallel decompression with which Kanchana reported 7X improvement on zRAM read latency[3]. [1] https://lore.kernel.org/all/20240730-swap-allocator-v5-0-cb9c148b9297@kernel.org/ [2] https://lore.kernel.org/all/20240327214816.31191-1-21cnbao@gmail.com/ [3] https://lore.kernel.org/all/cover.1714581792.git.andre.glover@linux.intel.com/ Link: https://lkml.kernel.org/r/20240908232119.2157-4-21cnbao@gmail.com Signed-off-by: Chuanhua Han Co-developed-by: Barry Song Signed-off-by: Barry Song Cc: Baolin Wang Cc: Chris Li Cc: Christoph Hellwig Cc: David Hildenbrand Cc: Gao Xiang Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kairui Song Cc: Kalesh Singh Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Minchan Kim Cc: Nhat Pham Cc: Ryan Roberts Cc: Sergey Senozhatsky Cc: Shakeel Butt Cc: Suren Baghdasaryan Cc: Yang Shi Cc: Yosry Ahmed Cc: Usama Arif Cc: Kanchana P Sridhar Cc: Kairui Song Signed-off-by: Andrew Morton [ shawnwang: remove zeromap checking ] [ shawnwang: add include/linux/zswap.h to mm/memory.c ] [ shawnwang: refactor thp_vma_allowable_orders's params ] Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3997 --- mm/memory.c | 260 ++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 233 insertions(+), 27 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 25cce06fe6a2..1e8f547eaa62 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -76,6 +76,7 @@ #include #include #include +#include #include #include @@ -4053,6 +4054,192 @@ static vm_fault_t handle_pte_marker(struct vm_fault *vmf) return VM_FAULT_SIGBUS; } +static struct folio *__alloc_swap_folio(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct folio *folio; + swp_entry_t entry; + + folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, + vmf->address, false); + if (!folio) + return NULL; + + entry = pte_to_swp_entry(vmf->orig_pte); + if (mem_cgroup_swapin_charge_folio(folio, vma->vm_mm, + GFP_KERNEL, entry)) { + folio_put(folio); + return NULL; + } + + return folio; +} + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline int non_swapcache_batch(swp_entry_t entry, int max_nr) +{ + struct swap_info_struct *si = swp_swap_info(entry); + pgoff_t offset = swp_offset(entry); + int i; + + /* + * While allocating a large folio and doing swap_read_folio, which is + * the case the being faulted pte doesn't have swapcache. We need to + * ensure all PTEs have no cache as well, otherwise, we might go to + * swap devices while the content is in swapcache. + */ + for (i = 0; i < max_nr; i++) { + if ((si->swap_map[offset + i] & SWAP_HAS_CACHE)) + return i; + } + + return i; +} + +/* + * Check if the PTEs within a range are contiguous swap entries + * and have consistent swapcache, zeromap. + */ +static bool can_swapin_thp(struct vm_fault *vmf, pte_t *ptep, int nr_pages) +{ + unsigned long addr; + swp_entry_t entry; + int idx; + pte_t pte; + + addr = ALIGN_DOWN(vmf->address, nr_pages * PAGE_SIZE); + idx = (vmf->address - addr) / PAGE_SIZE; + pte = ptep_get(ptep); + + if (!pte_same(pte, pte_move_swp_offset(vmf->orig_pte, -idx))) + return false; + entry = pte_to_swp_entry(pte); + if (swap_pte_batch(ptep, nr_pages, pte) != nr_pages) + return false; + + /* + * swap_read_folio() can't handle the case a large folio is hybridly + * from different backends. And they are likely corner cases. Similar + * things might be added once zswap support large folios. + */ + if (unlikely(non_swapcache_batch(entry, nr_pages) != nr_pages)) + return false; + + return true; +} + +static inline unsigned long thp_swap_suitable_orders(pgoff_t swp_offset, + unsigned long addr, + unsigned long orders) +{ + int order, nr; + + order = highest_order(orders); + + /* + * To swap in a THP with nr pages, we require that its first swap_offset + * is aligned with that number, as it was when the THP was swapped out. + * This helps filter out most invalid entries. + */ + while (orders) { + nr = 1 << order; + if ((addr >> PAGE_SHIFT) % nr == swp_offset % nr) + break; + order = next_order(&orders, order); + } + + return orders; +} + +static struct folio *alloc_swap_folio(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + unsigned long orders; + struct folio *folio; + unsigned long addr; + swp_entry_t entry; + spinlock_t *ptl; + pte_t *pte; + gfp_t gfp; + int order; + + /* + * If uffd is active for the vma we need per-page fault fidelity to + * maintain the uffd semantics. + */ + if (unlikely(userfaultfd_armed(vma))) + goto fallback; + + /* + * A large swapped out folio could be partially or fully in zswap. We + * lack handling for such cases, so fallback to swapping in order-0 + * folio. + */ + if (!zswap_never_enabled()) + goto fallback; + + entry = pte_to_swp_entry(vmf->orig_pte); + /* + * Get a list of all the (large) orders below PMD_ORDER that are enabled + * and suitable for swapping THP. + */ + orders = thp_vma_allowable_orders(vma, vma->vm_flags, false, true, true, + BIT(PMD_ORDER) - 1); + orders = thp_vma_suitable_orders(vma, vmf->address, orders); + orders = thp_swap_suitable_orders(swp_offset(entry), + vmf->address, orders); + + if (!orders) + goto fallback; + + pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, + vmf->address & PMD_MASK, &ptl); + if (unlikely(!pte)) + goto fallback; + + /* + * For do_swap_page, find the highest order where the aligned range is + * completely swap entries with contiguous swap offsets. + */ + order = highest_order(orders); + while (orders) { + addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); + if (can_swapin_thp(vmf, pte + pte_index(addr), 1 << order)) + break; + order = next_order(&orders, order); + } + + pte_unmap_unlock(pte, ptl); + + /* Try allocating the highest of the remaining orders. */ + gfp = vma_thp_gfp_mask(vma); + while (orders) { + addr = ALIGN_DOWN(vmf->address, PAGE_SIZE << order); + folio = vma_alloc_folio(gfp, order, vma, addr, true); + if (folio) { + if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm, + gfp, entry)) + return folio; + folio_put(folio); + } + order = next_order(&orders, order); + } + +fallback: + return __alloc_swap_folio(vmf); +} +#else /* !CONFIG_TRANSPARENT_HUGEPAGE */ +static inline bool can_swapin_thp(struct vm_fault *vmf, pte_t *ptep, int nr_pages) +{ + return false; +} + +static struct folio *alloc_swap_folio(struct vm_fault *vmf) +{ + return __alloc_swap_folio(vmf); +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + /* * We enter with non-exclusive mmap_lock (to exclude vma changes, * but allow concurrent faults), and pte mapped but not yet locked. @@ -4141,35 +4328,35 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) if (!folio) { if (data_race(si->flags & SWP_SYNCHRONOUS_IO) && __swap_count(entry) == 1) { - /* - * Prevent parallel swapin from proceeding with - * the cache flag. Otherwise, another thread may - * finish swapin first, free the entry, and swapout - * reusing the same entry. It's undetectable as - * pte_same() returns true due to entry reuse. - */ - if (swapcache_prepare(entry, 1)) { - /* Relax a bit to prevent rapid repeated page faults */ - schedule_timeout_uninterruptible(1); - goto out; - } - need_clear_cache = true; - /* skip swapcache */ - folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, - vma, vmf->address, false); + folio = alloc_swap_folio(vmf); page = &folio->page; if (folio) { __folio_set_locked(folio); __folio_set_swapbacked(folio); - if (mem_cgroup_swapin_charge_folio(folio, - vma->vm_mm, GFP_KERNEL, - entry)) { - ret = VM_FAULT_OOM; + nr_pages = folio_nr_pages(folio); + if (folio_test_large(folio)) + entry.val = ALIGN_DOWN(entry.val, nr_pages); + /* + * Prevent parallel swapin from proceeding with + * the cache flag. Otherwise, another thread + * may finish swapin first, free the entry, and + * swapout reusing the same entry. It's + * undetectable as pte_same() returns true due + * to entry reuse. + */ + if (swapcache_prepare(entry, nr_pages)) { + /* + * Relax a bit to prevent rapid + * repeated page faults. + */ + schedule_timeout_uninterruptible(1); goto out_page; } - mem_cgroup_swapin_uncharge_swap(entry, 1); + need_clear_cache = true; + + mem_cgroup_swapin_uncharge_swap(entry, nr_pages); shadow = get_shadow_from_swap_cache(entry); if (shadow) @@ -4276,6 +4463,24 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) goto out_nomap; } + /* allocated large folios for SWP_SYNCHRONOUS_IO */ + if (folio_test_large(folio) && !folio_test_swapcache(folio)) { + unsigned long nr = folio_nr_pages(folio); + unsigned long folio_start = ALIGN_DOWN(vmf->address, nr * PAGE_SIZE); + unsigned long idx = (vmf->address - folio_start) / PAGE_SIZE; + pte_t *folio_ptep = vmf->pte - idx; + pte_t folio_pte = ptep_get(folio_ptep); + + if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) || + swap_pte_batch(folio_ptep, nr, folio_pte) != nr) + goto out_nomap; + + page_idx = idx; + address = folio_start; + ptep = folio_ptep; + goto check_folio; + } + nr_pages = 1; page_idx = 0; address = vmf->address; @@ -4403,11 +4608,12 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) folio_add_lru_vma(folio, vma); } else if (!folio_test_anon(folio)) { /* - * We currently only expect small !anon folios, which are either - * fully exclusive or fully shared. If we ever get large folios - * here, we have to be careful. + * We currently only expect small !anon folios which are either + * fully exclusive or fully shared, or new allocated large + * folios which are fully exclusive. If we ever get large + * folios within swapcache here, we have to be careful. */ - VM_WARN_ON_ONCE(folio_test_large(folio)); + VM_WARN_ON_ONCE(folio_test_large(folio) && folio_test_swapcache(folio)); VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); folio_add_new_anon_rmap(folio, vma, address, rmap_flags); } else { @@ -4450,7 +4656,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) out: /* Clear the swap cache pin for direct swapin after PTL unlock */ if (need_clear_cache) - swapcache_clear(si, entry, 1); + swapcache_clear(si, entry, nr_pages); if (si) put_swap_device(si); return ret; @@ -4466,7 +4672,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) folio_put(swapcache); } if (need_clear_cache) - swapcache_clear(si, entry, 1); + swapcache_clear(si, entry, nr_pages); if (si) put_swap_device(si); return ret; -- Gitee From c3e56d60cdd09120bae2a0f19bd088d82efe6341 Mon Sep 17 00:00:00 2001 From: Zhao Qunqin Date: Thu, 19 Sep 2024 15:56:32 +0800 Subject: [PATCH 1558/2138] anolis: Loongarch: Driver for loongson SE SDF ANBZ: #11461 Signed-off-by: Zhao Qunqin Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4018 --- .../soc/loongson/loongson,ls3c6000se.yaml | 43 ++ MAINTAINERS | 6 + arch/loongarch/include/asm/se.h | 146 +++++ drivers/char/Kconfig | 18 + drivers/char/Makefile | 2 + drivers/char/loongson_se.c | 599 ++++++++++++++++++ drivers/char/lsse_sdf_cdev.c | 379 +++++++++++ 7 files changed, 1193 insertions(+) create mode 100644 Documentation/devicetree/bindings/soc/loongson/loongson,ls3c6000se.yaml create mode 100644 arch/loongarch/include/asm/se.h create mode 100644 drivers/char/loongson_se.c create mode 100644 drivers/char/lsse_sdf_cdev.c diff --git a/Documentation/devicetree/bindings/soc/loongson/loongson,ls3c6000se.yaml b/Documentation/devicetree/bindings/soc/loongson/loongson,ls3c6000se.yaml new file mode 100644 index 000000000000..6ac073de2125 --- /dev/null +++ b/Documentation/devicetree/bindings/soc/loongson/loongson,ls3c6000se.yaml @@ -0,0 +1,43 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/soc/loongson/loongson,ls3c6000se.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# +title: Loongson Security Module (SE) +maintainers: + - Qunqin Zhao +description: + This binding describes the Loongson Security Module which provides control for + hardware encryption acceleration devices. +properties: + compatible: + items: + - enum: + - "loongson,ls3xse" + reg: + maxItems: 1 + interrupts: + minItems: 1 + maxItems: 32 + dmam_size: + $ref: /schemas/types.yaml#/definitions/uint32 +required: + - compatible + - reg + - interrupts + - dmam_size +additionalProperties: false +examples: + - | + #include + soc { + #address-cells = <2>; + #size-cells = <2>; + lsse@c00e0000000 { + compatible = "loongson,ls3c6000se"; + reg = <0xc00 0xe0000000 0x0 0x1000>; + interrupt-parent = <&liointc>; + interrupts = <0x20 IRQ_TYPE_LEVEL_HIGH>; + dmam_size = <0x800000>; + }; + }; diff --git a/MAINTAINERS b/MAINTAINERS index 3181aef3d470..be976c69839c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -12412,6 +12412,12 @@ S: Maintained F: Documentation/devicetree/bindings/hwinfo/loongson,ls2k-chipid.yaml F: drivers/soc/loongson/loongson2_guts.c +LOONGSON SECURITY MODULE DRIVER +M: Qunqin Zhao +L: loongarch@lists.linux.dev +S: Maintained +F: Documentation/devicetree/bindings/soc/loongson/loongson,ls3c6000se.yaml + LOONGSON-2 SOC SERIES PM DRIVER M: Yinbo Zhu L: linux-pm@vger.kernel.org diff --git a/arch/loongarch/include/asm/se.h b/arch/loongarch/include/asm/se.h new file mode 100644 index 000000000000..a6b968d2d545 --- /dev/null +++ b/arch/loongarch/include/asm/se.h @@ -0,0 +1,146 @@ +/* + * Copyright (C) 2012 IBM Corporation + * + * Copyright 2023 Loongson Technology, Inc. + * Yinggang Gu + * + * Device driver for Loongson SE module. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + */ +#ifndef __LOONGSON_SE_H__ +#define __LOONGSON_SE_H__ + +#define SE_MAILBOX_S 0x0 +#define SE_MAILBOX_L 0x20 +#define SE_S2LINT_STAT 0x88 +#define SE_S2LINT_EN 0x8c +#define SE_S2LINT_SET 0x90 +#define SE_S2LINT_CL 0x94 +#define SE_L2SINT_STAT 0x98 +#define SE_L2SINT_EN 0x9c +#define SE_L2SINT_SET 0xa0 +#define SE_L2SINT_CL 0xa4 + +/* INT bit definition */ +#define SE_INT_SETUP BIT(0) +#define SE_INT_SM2 BIT(0) +#define SE_INT_SM3 BIT(0) +#define SE_INT_SM4 BIT(0) +#define SE_INT_RNG BIT(0) +#define SE_INT_TPM BIT(5) +#define SE_INT_ALL 0xffffffff + +#define SE_CMD_START 0x0 +#define SE_CMD_STOP 0x1 +#define SE_CMD_GETVER 0x2 +#define SE_CMD_SETBUF 0x3 +#define SE_CMD_SETMSG 0x4 + +#define SE_CMD_RNG 0x100 + +#define SE_CMD_SM2_SIGN 0x200 +#define SE_CMD_SM2_VSIGN 0x201 + +#define SE_CMD_SM3_DIGEST 0x300 +#define SE_CMD_SM3_UPDATE 0x301 +#define SE_CMD_SM3_FINISH 0x302 + +#define SE_CMD_SM4_ECB_ENCRY 0x400 +#define SE_CMD_SM4_ECB_DECRY 0x401 +#define SE_CMD_SM4_CBC_ENCRY 0x402 +#define SE_CMD_SM4_CBC_DECRY 0x403 +#define SE_CMD_SM4_CTR 0x404 + +#define SE_CMD_TPM 0x500 +#define SE_CMD_ZUC_INIT_READ 0x600 +#define SE_CMD_ZUC_READ 0x601 + +#define SE_CMD_SDF 0x700 + +#define SE_CH_MAX 32 + +#define SE_CH_RNG 1 +#define SE_CH_SM2 2 +#define SE_CH_SM3 3 +#define SE_CH_SM4 4 +#define SE_CH_TPM 5 +#define SE_CH_ZUC 6 +#define SE_CH_SDF 7 + +struct se_msg { + u32 cmd; + u32 data_off; + u32 data_len; + u32 info[5]; +}; + +struct se_cmd { + u32 cmd; + u32 info[7]; +}; + +struct se_res { + u32 cmd; + u32 cmd_ret; + u32 info[6]; +}; + +struct se_mailbox_data { + u32 int_bit; + union { + u32 mailbox[8]; + struct se_cmd gcmd; + struct se_res res; + } u; +}; + +struct lsse_ch { + u32 id; + u32 int_bit; + struct loongson_se *se; + void *priv; + spinlock_t ch_lock; + void *smsg; + void *rmsg; + int msg_size; + void *data_buffer; + dma_addr_t data_addr; + int data_size; + + void (*complete)(struct lsse_ch *se_ch); +}; + +struct loongson_se { + struct device *dev; + void __iomem *base; + u32 version; + u32 ch_status; + spinlock_t cmd_lock; + spinlock_t dev_lock; + + /* Interaction memory */ + void *mem_base; + dma_addr_t mem_addr; + unsigned long *mem_map; + int mem_map_size; + void *smsg; + void *rmsg; + + /* Synchronous CMD */ + struct completion cmd_completion; + + /* Virtual Channel */ + struct lsse_ch chs[SE_CH_MAX]; +}; + +struct lsse_ch *se_init_ch(int id, int data_size, int msg_size, void *priv, + void (*complete)(struct lsse_ch *se_ch)); +void se_deinit_ch(struct lsse_ch *ch); +int se_send_ch_requeset(struct lsse_ch *ch); + +#endif diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 625af75833fc..8a5e272bdc78 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -391,6 +391,24 @@ config UV_MMTIMER The uv_mmtimer device allows direct userspace access to the UV system timer. +config LOONGSON_SE + tristate "LOONGSON SECURITY MODULE Interface" + depends on LOONGARCH + default m + help + If you have LOONGSON security module (SE) support say Yes and it + will be accessible from within Linux. To compile this driver + as a module, choose M here; the module will be called loongson-se. + +config LOONGSON_SE_SDF + tristate "LOONGSON SECURITY MODULE SDF Interface" + depends on LOONGARCH && LOONGSON_SE + default m + help + If you want to use LOONGSON security module (SE) as SDF say Yes + and it will be accessible from within Linux. To compile this driver + as a module, choose M here; + source "drivers/char/tpm/Kconfig" config TELCLOCK diff --git a/drivers/char/Makefile b/drivers/char/Makefile index c5f532e412f1..109af71c5416 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -32,6 +32,8 @@ obj-$(CONFIG_SCx200_GPIO) += scx200_gpio.o obj-$(CONFIG_PC8736x_GPIO) += pc8736x_gpio.o obj-$(CONFIG_NSC_GPIO) += nsc_gpio.o obj-$(CONFIG_TELCLOCK) += tlclk.o +obj-$(CONFIG_LOONGSON_SE) += loongson_se.o +obj-$(CONFIG_LOONGSON_SE_SDF) += lsse_sdf_cdev.o obj-$(CONFIG_MWAVE) += mwave/ obj-y += agp/ diff --git a/drivers/char/loongson_se.c b/drivers/char/loongson_se.c new file mode 100644 index 000000000000..3eeb348fc711 --- /dev/null +++ b/drivers/char/loongson_se.c @@ -0,0 +1,599 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int se_mem_size = 0x800000; +module_param(se_mem_size, int, 0444); +MODULE_PARM_DESC(se_mem_size, "LOONGSON SE shared memory size"); + +static int se_mem_page = PAGE_SIZE; +module_param(se_mem_page, int, 0444); +MODULE_PARM_DESC(se_mem_page, "LOONGSON SE shared memory page size"); + +static struct loongson_se se_dev; + +static int lsse_open(struct inode *inode, struct file *filp) +{ + return 0; +} + +static ssize_t lsse_write(struct file *filp, const char __user *buf, + size_t cnt, loff_t *offt) +{ + return 0; +} + +static const struct file_operations lsse_fops = { + .owner = THIS_MODULE, + .open = lsse_open, + .write = lsse_write, +}; + +static struct miscdevice lsse_miscdev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "loongson-se", + .fops = &lsse_fops, +}; + +static inline u32 se_readl(u64 addr) +{ + return readl(se_dev.base + addr); +} + +static inline void se_writel(u32 val, u64 addr) +{ + writel(val, se_dev.base + addr); +} + +static inline bool se_ch_status(struct loongson_se *se, u32 int_bit) +{ + return !!(se->ch_status & int_bit) == 1; +} + +static void se_enable_int(struct loongson_se *se, u32 int_bit) +{ + unsigned long flag; + u32 tmp; + + if (!int_bit) + return; + + spin_lock_irqsave(&se->dev_lock, flag); + + tmp = se_readl(SE_S2LINT_EN); + tmp |= int_bit; + se_writel(tmp, SE_S2LINT_EN); + + spin_unlock_irqrestore(&se->dev_lock, flag); +} + +static void se_disable_int(struct loongson_se *se, u32 int_bit) +{ + unsigned long flag; + u32 tmp; + + if (!int_bit) + return; + + spin_lock_irqsave(&se->dev_lock, flag); + + tmp = se_readl(SE_S2LINT_EN); + tmp &= ~(int_bit); + se_writel(tmp, SE_S2LINT_EN); + + spin_unlock_irqrestore(&se->dev_lock, flag); +} + +static int se_send_requeset(struct loongson_se *se, + struct se_mailbox_data *req) +{ + unsigned long flag; + u32 status; + int err = 0; + int i; + + if (!se || !req) + return -EINVAL; + + if (se_readl(SE_L2SINT_STAT) || + !(se_readl(SE_L2SINT_EN) & req->int_bit)) + return -EBUSY; + + spin_lock_irqsave(&se->cmd_lock, flag); + + for (i = 0; i < ARRAY_SIZE(req->u.mailbox); i++) + se_writel(req->u.mailbox[i], SE_MAILBOX_S + i * 4); + + se_writel(req->int_bit, SE_L2SINT_SET); + + err = readl_relaxed_poll_timeout_atomic(se->base + SE_L2SINT_STAT, status, + !(status & req->int_bit), 10, 10000); + + spin_unlock_irqrestore(&se->cmd_lock, flag); + + return err; +} + +static int se_get_response(struct loongson_se *se, + struct se_mailbox_data *res) +{ + unsigned long flag; + int i; + + if (!se || !res) + return -EINVAL; + + if ((se_readl(SE_S2LINT_STAT) & res->int_bit) == 0) + return -EBUSY; + + spin_lock_irqsave(&se->cmd_lock, flag); + + for (i = 0; i < ARRAY_SIZE(res->u.mailbox); i++) + res->u.mailbox[i] = se_readl(SE_MAILBOX_L + i * 4); + + se_writel(res->int_bit, SE_S2LINT_CL); + + spin_unlock_irqrestore(&se->cmd_lock, flag); + + return 0; +} + +static int loongson_se_get_res(struct loongson_se *se, u32 int_bit, u32 cmd, + struct se_mailbox_data *res) +{ + int err = 0; + + res->int_bit = int_bit; + + if (se_get_response(se, res)) { + dev_err(se->dev, "Int 0x%x get response fail.\n", int_bit); + return -EFAULT; + } + + /* Check response */ + if (res->u.res.cmd == cmd) + err = 0; + else { + dev_err(se->dev, "Response cmd is 0x%x, not expect cmd 0x%x.\n", + res->u.res.cmd, cmd); + err = -EFAULT; + } + + return err; +} + +static int se_send_genl_cmd(struct loongson_se *se, struct se_mailbox_data *req, + struct se_mailbox_data *res, int retry) +{ + int err = 0, cnt = 0; + +try_again: + if (cnt++ >= retry) { + err = -ETIMEDOUT; + goto out; + } + + dev_dbg(se->dev, "%d time send cmd 0x%x\n", cnt, req->u.gcmd.cmd); + + err = se_send_requeset(se, req); + if (err) + goto try_again; + + if (!wait_for_completion_timeout(&se->cmd_completion, + msecs_to_jiffies(0x1000))) { + se_enable_int(se, req->int_bit); + goto try_again; + } + + err = loongson_se_get_res(se, req->int_bit, req->u.gcmd.cmd, res); + if (err || res->u.res.cmd_ret) { + se_enable_int(se, req->int_bit); + goto try_again; + } + +out: + se_enable_int(se, req->int_bit); + + return err; +} + +static int loongson_se_set_msg(struct lsse_ch *ch) +{ + struct loongson_se *se = ch->se; + struct se_mailbox_data req = {0}; + struct se_mailbox_data res = {0}; + int err; + + req.int_bit = SE_INT_SETUP; + req.u.gcmd.cmd = SE_CMD_SETMSG; + /* MSG off */ + req.u.gcmd.info[0] = ch->id; + req.u.gcmd.info[1] = ch->smsg - se->mem_base; + req.u.gcmd.info[2] = ch->msg_size; + + dev_dbg(se->dev, "Set Channel %d msg off 0x%x, msg size %d\n", ch->id, + req.u.gcmd.info[1], req.u.gcmd.info[2]); + + err = se_send_genl_cmd(se, &req, &res, 5); + if (res.u.res.cmd_ret) + return res.u.res.cmd_ret; + + return err; +} + +static irqreturn_t loongson_se_irq(int irq, void *dev_id) +{ + struct loongson_se *se = (struct loongson_se *)dev_id; + struct lsse_ch *ch; + u32 int_status; + + int_status = se_readl(SE_S2LINT_STAT); + + dev_dbg(se->dev, "%s int status is 0x%x\n", __func__, int_status); + + se_disable_int(se, int_status); + + if (int_status & SE_INT_SETUP) { + complete(&se->cmd_completion); + int_status &= ~SE_INT_SETUP; + } + + while (int_status) { + int id = __ffs(int_status); + + ch = &se->chs[id]; + if (ch->complete) + ch->complete(ch); + int_status &= ~BIT(id); + se_writel(BIT(id), SE_S2LINT_CL); + } + + return IRQ_HANDLED; +} + +static int se_init_hw(struct loongson_se *se) +{ + struct se_mailbox_data req = {0}; + struct se_mailbox_data res = {0}; + struct device *dev = se->dev; + int err, retry = 5; + u64 size; + + size = se_mem_size; + + if (size & (size - 1)) { + size = roundup_pow_of_two(size); + se_mem_size = size; + } + + se_enable_int(se, SE_INT_SETUP); + + /* Start engine */ + memset(&req, 0, sizeof(struct se_mailbox_data)); + memset(&res, 0, sizeof(struct se_mailbox_data)); + req.int_bit = SE_INT_SETUP; + req.u.gcmd.cmd = SE_CMD_START; + err = se_send_genl_cmd(se, &req, &res, retry); + if (err) + return err; + + /* Get Version */ + memset(&req, 0, sizeof(struct se_mailbox_data)); + memset(&res, 0, sizeof(struct se_mailbox_data)); + req.int_bit = SE_INT_SETUP; + req.u.gcmd.cmd = SE_CMD_GETVER; + err = se_send_genl_cmd(se, &req, &res, retry); + if (err) + return err; + + se->version = res.u.res.info[0]; + + /* Setup data buffer */ + se->mem_base = dmam_alloc_coherent(dev, size, + &se->mem_addr, GFP_KERNEL); + if (!se->mem_base) + return -ENOMEM; + + memset(se->mem_base, 0, size); + + memset(&req, 0, sizeof(struct se_mailbox_data)); + memset(&res, 0, sizeof(struct se_mailbox_data)); + req.int_bit = SE_INT_SETUP; + req.u.gcmd.cmd = SE_CMD_SETBUF; + /* MMAP */ + req.u.gcmd.info[0] = (se->mem_addr & 0xffffffff) | 0x80; + req.u.gcmd.info[1] = se->mem_addr >> 32; + /* MASK */ + req.u.gcmd.info[2] = ~(size - 1); + req.u.gcmd.info[3] = 0xffffffff; + + pr_debug("Set win mmap 0x%llx, mask 0x%llx\n", + ((u64)req.u.gcmd.info[1] << 32) | req.u.gcmd.info[0], + ((u64)req.u.gcmd.info[3] << 32) | req.u.gcmd.info[2]); + + err = se_send_genl_cmd(se, &req, &res, retry); + if (err) + return err; + + se->mem_map_size = size / se_mem_page; + se->mem_map = bitmap_zalloc(se->mem_map_size, GFP_KERNEL); + if (!se->mem_map) + return -ENOMEM; + + dev_info(se->dev, "SE module setup down, shared memory size is 0x%x bytes, " + "memory page size is 0x%x bytes\n", + se_mem_size, se_mem_page); + + return err; +} + +static void loongson_se_disable_hw(struct loongson_se *se) +{ + struct se_mailbox_data req = {0}; + struct se_mailbox_data res = {0}; + int retry = 5; + + /* Stop engine */ + req.int_bit = SE_INT_SETUP; + req.u.gcmd.cmd = SE_CMD_STOP; + se_send_genl_cmd(se, &req, &res, retry); + + se_disable_int(se, SE_INT_ALL); + kfree(se->mem_map); +} + +int se_send_ch_requeset(struct lsse_ch *ch) +{ + struct loongson_se *se; + u32 status, int_bit; + int err = 0; + + if (!ch) + return -EINVAL; + + se = ch->se; + int_bit = ch->int_bit; + + if ((se_readl(SE_L2SINT_STAT) & int_bit) || + !(se_readl(SE_L2SINT_EN) & int_bit)) + return -EBUSY; + + se_enable_int(se, int_bit); + se_writel(int_bit, SE_L2SINT_SET); + + err = readl_relaxed_poll_timeout_atomic(se->base + SE_L2SINT_STAT, status, + !(status & int_bit), 10, 10000); + + return err; +} +EXPORT_SYMBOL_GPL(se_send_ch_requeset); + +struct lsse_ch *se_init_ch(int id, int data_size, int msg_size, void *priv, + void (*complete)(struct lsse_ch *se_ch)) +{ + struct loongson_se *se = &se_dev; + struct lsse_ch *ch; + unsigned long flag; + int data_first, data_nr; + int msg_first, msg_nr; + + if (!se) { + pr_err("SE has bot been initialized\n"); + return NULL; + } + + if (id == 0 || id > SE_CH_MAX) { + dev_err(se->dev, "Channel number %d is invalid\n", id); + return NULL; + } + + if (se_ch_status(se, BIT(id))) { + dev_err(se->dev, "Channel number %d has been initialized\n", id); + return NULL; + } + + spin_lock_irqsave(&se->dev_lock, flag); + + ch = &se_dev.chs[id]; + ch->se = se; + ch->id = id; + ch->int_bit = BIT(id); + se->ch_status |= BIT(id); + + data_nr = round_up(data_size, se_mem_page) / se_mem_page; + data_first = bitmap_find_next_zero_area(se->mem_map, se->mem_map_size, + 0, data_nr, 0); + if (data_first >= se->mem_map_size) { + dev_err(se->dev, "Insufficient memory space\n"); + spin_unlock_irqrestore(&se->dev_lock, flag); + return NULL; + } + + bitmap_set(se->mem_map, data_first, data_nr); + ch->data_buffer = se->mem_base + data_first * se_mem_page; + ch->data_addr = se->mem_addr + data_first * se_mem_page; + ch->data_size = data_size; + + msg_nr = round_up(msg_size, se_mem_page) / se_mem_page; + msg_first = bitmap_find_next_zero_area(se->mem_map, se->mem_map_size, + 0, msg_nr, 0); + if (msg_first >= se->mem_map_size) { + dev_err(se->dev, "Insufficient memory space\n"); + bitmap_clear(se->mem_map, data_first, data_nr); + spin_unlock_irqrestore(&se->dev_lock, flag); + return NULL; + } + + bitmap_set(se->mem_map, msg_first, msg_nr); + ch->smsg = se->mem_base + msg_first * se_mem_page; + ch->rmsg = ch->smsg + msg_size / 2; + ch->msg_size = msg_size; + + ch->complete = complete; + ch->priv = priv; + + spin_lock_init(&ch->ch_lock); + + spin_unlock_irqrestore(&se->dev_lock, flag); + + if (loongson_se_set_msg(ch)) { + dev_err(se->dev, "Channel %d setup message address failed\n", id); + return NULL; + } + + se_enable_int(se, ch->int_bit); + + return ch; +} +EXPORT_SYMBOL_GPL(se_init_ch); + +void se_deinit_ch(struct lsse_ch *ch) +{ + struct loongson_se *se = &se_dev; + unsigned long flag; + int first, nr; + int id = ch->id; + + if (!se) { + pr_err("SE has bot been initialized\n"); + return; + } + + if (id == 0 || id > SE_CH_MAX) { + dev_err(se->dev, "Channel number %d is invalid\n", id); + return; + } + + if (!se_ch_status(se, BIT(id))) { + dev_err(se->dev, "Channel number %d has not been initialized\n", id); + return; + } + + spin_lock_irqsave(&se->dev_lock, flag); + + se->ch_status &= ~BIT(ch->id); + + first = (ch->data_buffer - se->mem_base) / se_mem_page; + nr = round_up(ch->data_size, se_mem_page) / se_mem_page; + bitmap_clear(se->mem_map, first, nr); + + first = (ch->smsg - se->mem_base) / se_mem_page; + nr = round_up(ch->msg_size, se_mem_page) / se_mem_page; + bitmap_clear(se->mem_map, first, nr); + + spin_unlock_irqrestore(&se->dev_lock, flag); + + se_disable_int(se, ch->int_bit); +} +EXPORT_SYMBOL_GPL(se_deinit_ch); + +static struct platform_device lsse_sdf_pdev = { + .name = "loongson-sdf", + .id = -1, +}; + +static const struct of_device_id loongson_se_of_match[] = { + { .compatible = "loongson,ls3c6000se", }, + {} +}; +MODULE_DEVICE_TABLE(of, loongson_se_of_match); + +static int loongson_se_probe(struct platform_device *pdev) +{ + struct loongson_se *se = &se_dev; + struct resource *res; + struct device *dev = &pdev->dev; + int nr_irq, err, i; + int irq[8]; + + nr_irq = platform_irq_count(pdev); + if (nr_irq < 0) + return -ENODEV; + + for (i = 0; i < nr_irq; i++) { + irq[i] = platform_get_irq(pdev, i); + if (irq[i] < 0) + return -ENODEV; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + se->base = devm_ioremap_resource(dev, res); + if (IS_ERR(se->base)) + return PTR_ERR(se->base); + + se->dev = &pdev->dev; + platform_set_drvdata(pdev, se); + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + init_completion(&se->cmd_completion); + spin_lock_init(&se->cmd_lock); + spin_lock_init(&se->dev_lock); + + for (i = 0; i < nr_irq; i++) { + err = devm_request_irq(dev, irq[i], loongson_se_irq, 0, + "loongson-se", se); + if (err) + goto out; + } + + err = se_init_hw(se); + if (err) + goto disable_hw; + + err = misc_register(&lsse_miscdev); + if (err) + goto disable_hw; + + err = platform_device_register(&lsse_sdf_pdev); + if (err) + pr_err("register sdf device failed\n"); + + return 0; + +disable_hw: + loongson_se_disable_hw(se); +out: + for (; i >= 0; i--) + devm_free_irq(dev, irq[i], se); + return err; +} + +static int loongson_se_remove(struct platform_device *pdev) +{ + struct loongson_se *se = platform_get_drvdata(pdev); + + misc_deregister(&lsse_miscdev); + loongson_se_disable_hw(se); + platform_device_unregister(&lsse_sdf_pdev); + + return 0; +} + +static struct platform_driver loongson_se_driver = { + .probe = loongson_se_probe, + .remove = loongson_se_remove, + .driver = { + .name = "loongson-se", + .of_match_table = loongson_se_of_match, + }, +}; + +module_platform_driver(loongson_se_driver); + +MODULE_AUTHOR("Yinggang Gu"); +MODULE_DESCRIPTION("Loongson SE driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/char/lsse_sdf_cdev.c b/drivers/char/lsse_sdf_cdev.c new file mode 100644 index 000000000000..a4df08003686 --- /dev/null +++ b/drivers/char/lsse_sdf_cdev.c @@ -0,0 +1,379 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SE_SDF_BUFSIZE (PAGE_SIZE * 2) +#define SDF_OPENSESSION 0x204 +#define SDF_CLOSESESSION 0x205 + +struct lsse_sdf_dev { + struct lsse_ch *se_ch; + struct mutex data_lock; + bool processing_cmd; + + /* Synchronous CMD */ + wait_queue_head_t wq; +}; + +struct se_sdf_msg { + u32 cmd; + u32 data_off; + u32 data_len; + u32 info[5]; +}; + +struct sdf_command_header { + int command; + union { + int param_cnt; + int ret; + } u; + int param_len[14]; +}; + +struct sdf_kernel_command { + struct sdf_command_header header; + void *handle; +}; + +#define KERNEL_COMMAND_SIZE (sizeof(struct sdf_kernel_command)) + +struct sdf_handle { + struct list_head handle_list; + void *handle; +}; + +struct sdf_file_pvt_data { + struct lsse_sdf_dev *se; + struct list_head handle_list; + struct sdf_kernel_command skc; + struct sdf_handle *ph; +}; + +static struct lsse_sdf_dev *se_sdf_dev; + +static void lsse_sdf_complete(struct lsse_ch *ch) +{ + struct lsse_sdf_dev *se = (struct lsse_sdf_dev *)ch->priv; + + se->processing_cmd = false; + wake_up(&se->wq); +} + +static int se_send_sdf_cmd(struct lsse_sdf_dev *se, int len, int retry) +{ + struct se_sdf_msg *smsg = (struct se_sdf_msg *)se->se_ch->smsg; + unsigned long flag; + int err; + + spin_lock_irqsave(&se->se_ch->ch_lock, flag); + + smsg->cmd = SE_CMD_SDF; + /* One time one cmd */ + smsg->data_off = se->se_ch->data_buffer - se->se_ch->se->mem_base; + smsg->data_len = len; + +try_again: + if (!retry--) + goto out; + + pr_debug("Send sdf cmd, last retry %d times\n", retry); + + err = se_send_ch_requeset(se->se_ch); + if (err) { + udelay(5); + goto try_again; + } + +out: + spin_unlock_irqrestore(&se->se_ch->ch_lock, flag); + + return err; +} + +static int lsse_sdf_recv(struct sdf_file_pvt_data *pvt, char *buf, + size_t size, int user, int *se_ret) +{ + int len, time, ret = 0; + struct se_sdf_msg *rmsg; + struct sdf_kernel_command *skc; + struct sdf_handle *ph; + struct lsse_sdf_dev *se = pvt->se; + + if (!se->se_ch->rmsg) { + pr_err("se device is not ready\n"); + return -EBUSY; + } + + time = wait_event_timeout(se->wq, !se->processing_cmd, HZ*30); + if (!time) + return -ETIME; + + rmsg = (struct se_sdf_msg *)se->se_ch->rmsg; + if (rmsg->cmd != SE_CMD_SDF) { + pr_err("se get wrong response\n"); + return -EIO; + } + len = rmsg->data_len; + + if ((!user && len > KERNEL_COMMAND_SIZE) || len > SE_SDF_BUFSIZE + || (size && len > size)) + return -E2BIG; + + if (user) { + ret = copy_to_user((char __user *)buf, + se->se_ch->data_buffer + rmsg->data_off, len); + if (!se_ret) + return ret; + + skc = (struct sdf_kernel_command *) + (se->se_ch->data_buffer + rmsg->data_off); + *se_ret = skc->header.u.ret; + if (skc->header.command == SDF_OPENSESSION && !*se_ret) { + ph = kmalloc(sizeof(*ph), GFP_KERNEL); + if (!ph) + return -ENOMEM; + ph->handle = skc->handle; + list_add(&ph->handle_list, &pvt->handle_list); + } + } else { + memcpy(buf, se->se_ch->data_buffer + rmsg->data_off, len); + } + return ret; +} + +static struct sdf_handle *find_sdf_handle(void *handle, + struct sdf_file_pvt_data *pvt) +{ + struct sdf_handle *ph; + + list_for_each_entry(ph, &pvt->handle_list, handle_list) { + if (ph->handle == handle) + return ph; + } + + return NULL; +} + +static int lsse_sdf_send(struct sdf_file_pvt_data *pvt, const char *buf, + size_t count, int user) +{ + int ret, se_ret; + struct sdf_handle *ph = NULL; + struct sdf_kernel_command *skc; + struct lsse_sdf_dev *se = pvt->se; + + if (!se->se_ch->smsg) { + pr_err("se device is not ready\n"); + return 0; + } + + if (count > se->se_ch->data_size) { + pr_err("Invalid size in send: count=%zd, size=%d\n", + count, se->se_ch->data_size); + return -EIO; + } + + if (user) { + ret = mutex_lock_interruptible(&se->data_lock); + if (ret) + goto out; + } else + mutex_lock(&se->data_lock); + + if (user) { + ret = copy_from_user(se->se_ch->data_buffer, buf, count); + if (ret) { + ret = -EFAULT; + goto out_unlock; + } + skc = (struct sdf_kernel_command *)se->se_ch->data_buffer; + if (skc->header.command == SDF_CLOSESESSION) + ph = find_sdf_handle(skc->handle, pvt); + } else { + memcpy(se->se_ch->data_buffer, buf, count); + } + + se->processing_cmd = true; + ret = se_send_sdf_cmd(se, count, 5); + if (ret) { + pr_err("se_send_sdf_cmd failed\n"); + goto out_unlock; + } + + ret = lsse_sdf_recv(pvt, (char *)buf, 0, user, &se_ret); + if (ret) { + pr_err("recv failed ret: %x\n", ret); + goto out_unlock; + } + if (ph && !se_ret) { + list_del(&ph->handle_list); + kfree(ph); + } +out_unlock: + mutex_unlock(&se->data_lock); +out: + return ret; +} + +static ssize_t lsse_sdf_write(struct file *filp, const char __user *buf, + size_t cnt, loff_t *offt) +{ + struct sdf_file_pvt_data *pvt = filp->private_data; + + if (cnt > SE_SDF_BUFSIZE) + return -E2BIG; + + if (lsse_sdf_send(pvt, buf, cnt, 1)) + return -EFAULT; + + return cnt; +} + +static ssize_t lsse_sdf_read(struct file *filp, char __user *buf, + size_t size, loff_t *off) +{ + return lsse_sdf_recv(filp->private_data, buf, size, 1, NULL); +} + +static int close_one_handle(struct sdf_file_pvt_data *pvt, struct sdf_handle *ph) +{ + struct sdf_kernel_command *skc = &pvt->skc; + + skc->header.command = 0x205; + skc->header.u.param_cnt = 1; + skc->header.param_len[0] = 8; + skc->handle = ph->handle; + /* close one session */ + lsse_sdf_send(pvt, (char *)&pvt->skc, KERNEL_COMMAND_SIZE, 0); + if (skc->header.u.ret) { + pr_err("Auto Close Session failed, session handle: %llx, ret: %d\n", + (u64)ph->handle, skc->header.u.ret); + return skc->header.u.ret; + } + kfree(ph); + + return 0; +} + +static int close_all_handle(struct sdf_file_pvt_data *pvt) +{ + int ret = 0; + struct sdf_handle *ph, *tmp; + + list_for_each_entry_safe(ph, tmp, &pvt->handle_list, handle_list) { + list_del(&ph->handle_list); + ret = close_one_handle(pvt, ph); + if (ret) + return ret; + } + + return 0; +} + +static int lsse_sdf_release(struct inode *inode, struct file *filp) +{ + int ret; + struct sdf_file_pvt_data *pvt = filp->private_data; + + ret = close_all_handle(pvt); + filp->private_data = NULL; + kfree(pvt); + + if (ret) + ret = -EFAULT; + return ret; +} + +static int lsse_sdf_open(struct inode *inode, struct file *filp) +{ + struct sdf_file_pvt_data *pvt = kmalloc(sizeof(*pvt), GFP_KERNEL); + + if (!pvt) + return -ENOMEM; + + INIT_LIST_HEAD(&pvt->handle_list); + pvt->se = se_sdf_dev; + filp->private_data = pvt; + + return 0; +} + +static const struct file_operations lsse_sdf_fops = { + .owner = THIS_MODULE, + .open = lsse_sdf_open, + .write = lsse_sdf_write, + .read = lsse_sdf_read, + .release = lsse_sdf_release, +}; + +static struct miscdevice lsse_sdf_miscdev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "lsse_sdf", + .fops = &lsse_sdf_fops, +}; + +static int lsse_sdf_probe(struct platform_device *pdev) +{ + int msg_size; + int ret; + + se_sdf_dev = kzalloc(sizeof(*se_sdf_dev), GFP_KERNEL); + if (IS_ERR_OR_NULL(se_sdf_dev)) + return PTR_ERR(se_sdf_dev); + + mutex_init(&se_sdf_dev->data_lock); + init_waitqueue_head(&se_sdf_dev->wq); + se_sdf_dev->processing_cmd = false; + + msg_size = 2 * sizeof(struct se_sdf_msg); + se_sdf_dev->se_ch = se_init_ch(SE_CH_SDF, SE_SDF_BUFSIZE, msg_size, + se_sdf_dev, lsse_sdf_complete); + + ret = misc_register(&lsse_sdf_miscdev); + if (ret < 0) { + pr_err("register sdf dev failed!\n"); + goto out; + } + + return 0; + +out: + kfree(se_sdf_dev); + + return ret; +} + +static int lsse_sdf_remove(struct platform_device *pdev) +{ + misc_deregister(&lsse_sdf_miscdev); + se_deinit_ch(se_sdf_dev->se_ch); + kfree(se_sdf_dev); + + return 0; +} + +static struct platform_driver loongson_sdf_driver = { + .probe = lsse_sdf_probe, + .remove = lsse_sdf_remove, + .driver = { + .name = "loongson-sdf", + }, +}; +module_platform_driver(loongson_sdf_driver); + +MODULE_ALIAS("platform:loongson-sdf"); +MODULE_AUTHOR("Yinggang Gu"); +MODULE_DESCRIPTION("Loongson SE sdf driver"); +MODULE_LICENSE("GPL"); -- Gitee From 1f0d0de6046a959ca0a93542a087211cc4662a0c Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Fri, 27 Sep 2024 11:50:51 +0800 Subject: [PATCH 1559/2138] anolis: Fix the compile error ANBZ: #11464 Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4025 --- arch/loongarch/include/asm/kvm_para.h | 4 +++- arch/loongarch/include/uapi/asm/kvm.h | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/loongarch/include/asm/kvm_para.h b/arch/loongarch/include/asm/kvm_para.h index 032101b941d9..fd6937931482 100644 --- a/arch/loongarch/include/asm/kvm_para.h +++ b/arch/loongarch/include/asm/kvm_para.h @@ -18,12 +18,13 @@ /* * LoongArch hypercall return code */ -#define KVM_HCALL_STATUS_SUCCESS 0 +#define KVM_HCALL_SUCCESS 0 #define KVM_HCALL_INVALID_CODE -1UL #define KVM_HCALL_INVALID_PARAMETER -2UL #define KVM_STEAL_PHYS_VALID BIT_ULL(0) #define KVM_STEAL_PHYS_MASK GENMASK_ULL(63, 6) + struct kvm_steal_time { __u64 steal; __u32 version; @@ -165,4 +166,5 @@ static inline bool kvm_check_and_clear_guest_paused(void) { return false; } + #endif /* _ASM_LOONGARCH_KVM_PARA_H */ diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h index af676247dd60..9e20a8071216 100644 --- a/arch/loongarch/include/uapi/asm/kvm.h +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -22,6 +22,7 @@ #define __KVM_HAVE_IRQ_LINE #define KVM_GUESTDBG_USE_SW_BP 0x00010000 + /* * for KVM_GET_REGS and KVM_SET_REGS */ -- Gitee From 1b3f11622abfb3861ca387a3631f0cb73849f37e Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Mon, 6 May 2024 22:00:47 +0800 Subject: [PATCH 1560/2138] LoongArch: KVM: Add PV IPI support on host side ANBZ: #11464 commit e33bda7ee50c3c20d80f5ca6dc5ca2cd37863518 upstream. On LoongArch system, IPI hw uses iocsr registers. There are one iocsr register access on IPI sending, and two iocsr access on IPI receiving for the IPI interrupt handler. In VM mode all iocsr accessing will cause VM to trap into hypervisor. So with one IPI hw notification there will be three times of trap. In this patch PV IPI is added for VM, hypercall instruction is used for IPI sender, and hypervisor will inject an SWI to the destination vcpu. During the SWI interrupt handler, only CSR.ESTAT register is written to clear irq. CSR.ESTAT register access will not trap into hypervisor, so with PV IPI supported, there is one trap with IPI sender, and no trap with IPI receiver, there is only one trap with IPI notification. Also this patch adds IPI multicast support, the method is similar with x86. With IPI multicast support, IPI notification can be sent to at most 128 vcpus at one time. It greatly reduces the times of trapping into hypervisor. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4025 --- arch/loongarch/include/asm/kvm_para.h | 58 ++++++++++++----------- arch/loongarch/include/asm/kvm_vcpu.h | 10 ++++ arch/loongarch/kvm/exit.c | 67 +++++++++++++-------------- 3 files changed, 72 insertions(+), 63 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_para.h b/arch/loongarch/include/asm/kvm_para.h index fd6937931482..335fb86778e2 100644 --- a/arch/loongarch/include/asm/kvm_para.h +++ b/arch/loongarch/include/asm/kvm_para.h @@ -7,13 +7,16 @@ */ #define HYPERVISOR_KVM 1 #define HYPERVISOR_VENDOR_SHIFT 8 -#define HYPERCALL_CODE(vendor, code) ((vendor << HYPERVISOR_VENDOR_SHIFT) + code) -#define KVM_HCALL_CODE_PV_SERVICE 0 +#define HYPERCALL_ENCODE(vendor, code) ((vendor << HYPERVISOR_VENDOR_SHIFT) + code) + +#define KVM_HCALL_CODE_SERVICE 0 #define KVM_HCALL_CODE_SWDBG 1 -#define KVM_HCALL_PV_SERVICE HYPERCALL_CODE(HYPERVISOR_KVM, KVM_HCALL_CODE_PV_SERVICE) -#define KVM_HCALL_FUNC_PV_IPI 1 + +#define KVM_HCALL_SERVICE HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SERVICE) +#define KVM_HCALL_FUNC_IPI 1 #define KVM_HCALL_FUNC_NOTIFY 2 -#define KVM_HCALL_SWDBG HYPERCALL_CODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SWDBG) + +#define KVM_HCALL_SWDBG HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SWDBG) /* * LoongArch hypercall return code @@ -37,16 +40,16 @@ struct kvm_steal_time { * * a0: function identifier * a1-a6: args - * Return value will be placed in v0. + * Return value will be placed in a0. * Up to 6 arguments are passed in a1, a2, a3, a4, a5, a6. */ -static __always_inline long kvm_hypercall(u64 fid) +static __always_inline long kvm_hypercall0(u64 fid) { - register long ret asm("v0"); + register long ret asm("a0"); register unsigned long fun asm("a0") = fid; __asm__ __volatile__( - "hvcl "__stringify(KVM_HCALL_PV_SERVICE) + "hvcl "__stringify(KVM_HCALL_SERVICE) : "=r" (ret) : "r" (fun) : "memory" @@ -57,12 +60,12 @@ static __always_inline long kvm_hypercall(u64 fid) static __always_inline long kvm_hypercall1(u64 fid, unsigned long arg0) { - register long ret asm("v0"); + register long ret asm("a0"); register unsigned long fun asm("a0") = fid; register unsigned long a1 asm("a1") = arg0; __asm__ __volatile__( - "hvcl "__stringify(KVM_HCALL_PV_SERVICE) + "hvcl "__stringify(KVM_HCALL_SERVICE) : "=r" (ret) : "r" (fun), "r" (a1) : "memory" @@ -74,17 +77,17 @@ static __always_inline long kvm_hypercall1(u64 fid, unsigned long arg0) static __always_inline long kvm_hypercall2(u64 fid, unsigned long arg0, unsigned long arg1) { - register long ret asm("v0"); + register long ret asm("a0"); register unsigned long fun asm("a0") = fid; register unsigned long a1 asm("a1") = arg0; register unsigned long a2 asm("a2") = arg1; __asm__ __volatile__( - "hvcl "__stringify(KVM_HCALL_PV_SERVICE) - : "=r" (ret) - : "r" (fun), "r" (a1), "r" (a2) - : "memory" - ); + "hvcl "__stringify(KVM_HCALL_SERVICE) + : "=r" (ret) + : "r" (fun), "r" (a1), "r" (a2) + : "memory" + ); return ret; } @@ -92,14 +95,14 @@ static __always_inline long kvm_hypercall2(u64 fid, static __always_inline long kvm_hypercall3(u64 fid, unsigned long arg0, unsigned long arg1, unsigned long arg2) { - register long ret asm("v0"); + register long ret asm("a0"); register unsigned long fun asm("a0") = fid; register unsigned long a1 asm("a1") = arg0; register unsigned long a2 asm("a2") = arg1; register unsigned long a3 asm("a3") = arg2; __asm__ __volatile__( - "hvcl "__stringify(KVM_HCALL_PV_SERVICE) + "hvcl "__stringify(KVM_HCALL_SERVICE) : "=r" (ret) : "r" (fun), "r" (a1), "r" (a2), "r" (a3) : "memory" @@ -109,10 +112,10 @@ static __always_inline long kvm_hypercall3(u64 fid, } static __always_inline long kvm_hypercall4(u64 fid, - unsigned long arg0, unsigned long arg1, unsigned long arg2, - unsigned long arg3) + unsigned long arg0, unsigned long arg1, + unsigned long arg2, unsigned long arg3) { - register long ret asm("v0"); + register long ret asm("a0"); register unsigned long fun asm("a0") = fid; register unsigned long a1 asm("a1") = arg0; register unsigned long a2 asm("a2") = arg1; @@ -120,7 +123,7 @@ static __always_inline long kvm_hypercall4(u64 fid, register unsigned long a4 asm("a4") = arg3; __asm__ __volatile__( - "hvcl "__stringify(KVM_HCALL_PV_SERVICE) + "hvcl "__stringify(KVM_HCALL_SERVICE) : "=r" (ret) : "r"(fun), "r" (a1), "r" (a2), "r" (a3), "r" (a4) : "memory" @@ -130,10 +133,10 @@ static __always_inline long kvm_hypercall4(u64 fid, } static __always_inline long kvm_hypercall5(u64 fid, - unsigned long arg0, unsigned long arg1, unsigned long arg2, - unsigned long arg3, unsigned long arg4) + unsigned long arg0, unsigned long arg1, + unsigned long arg2, unsigned long arg3, unsigned long arg4) { - register long ret asm("v0"); + register long ret asm("a0"); register unsigned long fun asm("a0") = fid; register unsigned long a1 asm("a1") = arg0; register unsigned long a2 asm("a2") = arg1; @@ -142,7 +145,7 @@ static __always_inline long kvm_hypercall5(u64 fid, register unsigned long a5 asm("a5") = arg4; __asm__ __volatile__( - "hvcl "__stringify(KVM_HCALL_PV_SERVICE) + "hvcl "__stringify(KVM_HCALL_SERVICE) : "=r" (ret) : "r"(fun), "r" (a1), "r" (a2), "r" (a3), "r" (a4), "r" (a5) : "memory" @@ -151,7 +154,6 @@ static __always_inline long kvm_hypercall5(u64 fid, return ret; } - static inline unsigned int kvm_arch_para_features(void) { return 0; diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h index 9f53950959da..590a92cb5416 100644 --- a/arch/loongarch/include/asm/kvm_vcpu.h +++ b/arch/loongarch/include/asm/kvm_vcpu.h @@ -110,4 +110,14 @@ static inline int kvm_queue_exception(struct kvm_vcpu *vcpu, return -1; } +static inline unsigned long kvm_read_reg(struct kvm_vcpu *vcpu, int num) +{ + return vcpu->arch.gprs[num]; +} + +static inline void kvm_write_reg(struct kvm_vcpu *vcpu, int num, unsigned long val) +{ + vcpu->arch.gprs[num] = val; +} + #endif /* __ASM_LOONGARCH_KVM_VCPU_H__ */ diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 74028ad40b24..e903753e44de 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -160,6 +160,9 @@ int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu) run->iocsr_io.len = 8; run->iocsr_io.is_write = 1; break; + case CPUCFG_KVM_FEATURE: + vcpu->arch.gprs[rd] = KVM_FEATURE_IPI; + break; default: ret = EMULATE_FAIL; return ret; @@ -761,29 +764,26 @@ static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu) return RESUME_GUEST; } -static int kvm_pv_send_ipi(struct kvm_vcpu *vcpu) +static int kvm_send_pv_ipi(struct kvm_vcpu *vcpu) { - unsigned long ipi_bitmap; unsigned int min, cpu, i; + unsigned long ipi_bitmap; struct kvm_vcpu *dest; - min = vcpu->arch.gprs[LOONGARCH_GPR_A3]; + min = kvm_read_reg(vcpu, LOONGARCH_GPR_A3); for (i = 0; i < 2; i++, min += BITS_PER_LONG) { - ipi_bitmap = vcpu->arch.gprs[LOONGARCH_GPR_A1 + i]; + ipi_bitmap = kvm_read_reg(vcpu, LOONGARCH_GPR_A1 + i); if (!ipi_bitmap) continue; cpu = find_first_bit((void *)&ipi_bitmap, BITS_PER_LONG); while (cpu < BITS_PER_LONG) { dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min); - cpu = find_next_bit((void *)&ipi_bitmap, BITS_PER_LONG, - cpu + 1); + cpu = find_next_bit((void *)&ipi_bitmap, BITS_PER_LONG, cpu + 1); if (!dest) continue; - /* - * Send SWI0 to dest vcpu to emulate IPI interrupt - */ + /* Send SWI0 to dest vcpu to emulate IPI interrupt */ kvm_queue_irq(dest, INT_SWI0); kvm_vcpu_kick(dest); } @@ -792,6 +792,27 @@ static int kvm_pv_send_ipi(struct kvm_vcpu *vcpu) return 0; } +/* + * Hypercall emulation always return to guest, Caller should check retval. + */ +static void kvm_handle_service(struct kvm_vcpu *vcpu) +{ + unsigned long func = kvm_read_reg(vcpu, LOONGARCH_GPR_A0); + long ret; + + switch (func) { + case KVM_HCALL_FUNC_IPI: + kvm_send_pv_ipi(vcpu); + ret = KVM_HCALL_SUCCESS; + break; + default: + ret = KVM_HCALL_INVALID_CODE; + break; + }; + + kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret); +} + static int kvm_save_notify(struct kvm_vcpu *vcpu) { unsigned long id, data; @@ -811,30 +832,6 @@ static int kvm_save_notify(struct kvm_vcpu *vcpu) return 0; }; -/* - * hypercall emulation always return to guest, Caller should check retval. - */ -static void kvm_handle_pv_service(struct kvm_vcpu *vcpu) -{ - unsigned long func = vcpu->arch.gprs[LOONGARCH_GPR_A0]; - long ret; - - switch (func) { - case KVM_HCALL_FUNC_PV_IPI: - kvm_pv_send_ipi(vcpu); - ret = KVM_HCALL_STATUS_SUCCESS; - break; - case KVM_HCALL_FUNC_NOTIFY: - ret = kvm_save_notify(vcpu); - break; - default: - ret = KVM_HCALL_INVALID_CODE; - break; - }; - - vcpu->arch.gprs[LOONGARCH_GPR_A0] = ret; -} - static int kvm_handle_hypercall(struct kvm_vcpu *vcpu) { larch_inst inst; @@ -846,9 +843,9 @@ static int kvm_handle_hypercall(struct kvm_vcpu *vcpu) ret = RESUME_GUEST; switch (code) { - case KVM_HCALL_PV_SERVICE: + case KVM_HCALL_SERVICE: vcpu->stat.hypercall_exits++; - kvm_handle_pv_service(vcpu); + kvm_handle_service(vcpu); break; case KVM_HCALL_SWDBG: /* KVM_HC_SWDBG only in effective when SW_BP is enabled */ -- Gitee From bb396182f8e3c4a9b97f58cd7515f4ddf5d7da84 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Mon, 6 May 2024 22:00:47 +0800 Subject: [PATCH 1561/2138] LoongArch: KVM: Add PV IPI support on guest side ANBZ: #11464 commit 74c16b2e2b0c3b193324f47300fd30cf03a606b7 upstream. PARAVIRT config option and PV IPI is added for the guest side, function pv_ipi_init() is used to add IPI sending and IPI receiving hooks. This function firstly checks whether system runs in VM mode, and if kernel runs in VM mode, it will call function kvm_para_available() to detect the current hypervirsor type (now only KVM type detection is supported). The paravirt functions can work only if current hypervisor type is KVM, since there is only KVM supported on LoongArch now. PV IPI uses virtual IPI sender and virtual IPI receiver functions. With virtual IPI sender, IPI message is stored in memory rather than emulated HW. IPI multicast is also supported, and 128 vcpus can received IPIs at the same time like X86 KVM method. Hypercall method is used for IPI sending. With virtual IPI receiver, HW SWI0 is used rather than real IPI HW. Since VCPU has separate HW SWI0 like HW timer, there is no trap in IPI interrupt acknowledge. Since IPI message is stored in memory, there is no trap in getting IPI message. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4025 --- arch/loongarch/Kconfig | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index a3b39567442f..b29a0943cbd6 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -619,6 +619,15 @@ config RANDOMIZE_BASE_MAX_OFFSET This is limited by the size of the lower address memory, 256MB. +config PARAVIRT + bool "Enable paravirtualization code" + depends on AS_HAS_LVZ_EXTENSION + help + This changes the kernel so it can modify itself when it is run + under a hypervisor, potentially improving performance significantly + over full virtualization. However, when run without a hypervisor + the kernel is theoretically slower and slightly larger. + endmenu config ARCH_SELECT_MEMORY_MODEL -- Gitee From ddd39d190cb67fd70bba1f50f1c8038b706a6870 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Mon, 6 May 2024 22:00:47 +0800 Subject: [PATCH 1562/2138] LoongArch: KVM: Add software breakpoint support ANBZ: #11464 commit 163e9fc6957fc24d1d6c0a30a3febfd2ecade039 upstream. When VM runs in kvm mode, system will not exit to host mode when executing a general software breakpoint instruction such as INSN_BREAK, trap exception happens in guest mode rather than host mode. In order to debug guest kernel on host side, one mechanism should be used to let VM exit to host mode. Here a hypercall instruction with a special code is used for software breakpoint usage. VM exits to host mode and kvm hypervisor identifies the special hypercall code and sets exit_reason with KVM_EXIT_DEBUG. And then let qemu handle it. Idea comes from ppc kvm, one api KVM_REG_LOONGARCH_DEBUG_INST is added to get the hypercall code. VMM needs get sw breakpoint instruction with this api and set the corresponding sw break point for guest kernel. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4025 --- arch/loongarch/include/asm/kvm_host.h | 5 +++++ arch/loongarch/kvm/exit.c | 12 ++++++------ arch/loongarch/kvm/vcpu.c | 2 +- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index 32a22532da76..4dd3cdb1ff91 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -35,6 +35,11 @@ #define KVM_HALT_POLL_NS_DEFAULT 500000 #define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(1) +#define KVM_GUESTDBG_SW_BP_MASK \ + (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP) +#define KVM_GUESTDBG_VALID_MASK \ + (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP | KVM_GUESTDBG_SINGLESTEP) + /* KVM_IRQ_LINE irq field index values */ #define KVM_LOONGARCH_IRQ_TYPE_SHIFT 24 #define KVM_LOONGARCH_IRQ_TYPE_MASK 0xff diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index e903753e44de..7a132ce416b4 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -834,9 +834,9 @@ static int kvm_save_notify(struct kvm_vcpu *vcpu) static int kvm_handle_hypercall(struct kvm_vcpu *vcpu) { + int ret; larch_inst inst; unsigned int code; - int ret; inst.word = vcpu->arch.badi; code = inst.reg0i15_format.immediate; @@ -848,13 +848,13 @@ static int kvm_handle_hypercall(struct kvm_vcpu *vcpu) kvm_handle_service(vcpu); break; case KVM_HCALL_SWDBG: - /* KVM_HC_SWDBG only in effective when SW_BP is enabled */ - if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { + /* KVM_HCALL_SWDBG only in effective when SW_BP is enabled */ + if (vcpu->guest_debug & KVM_GUESTDBG_SW_BP_MASK) { vcpu->run->exit_reason = KVM_EXIT_DEBUG; ret = RESUME_HOST; - } else - vcpu->arch.gprs[LOONGARCH_GPR_A0] = KVM_HCALL_INVALID_CODE; - break; + break; + } + fallthrough; default: /* Treat it as noop intruction, only set return value */ vcpu->arch.gprs[LOONGARCH_GPR_A0] = KVM_HCALL_INVALID_CODE; diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index c1f6363b6372..6e6ff28b0cd0 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -782,7 +782,7 @@ static int kvm_get_one_reg(struct kvm_vcpu *vcpu, *v = drdtime() + vcpu->kvm->arch.time_offset; break; case KVM_REG_LOONGARCH_DEBUG_INST: - *v = INSN_HVCL + KVM_HCALL_SWDBG; + *v = INSN_HVCL | KVM_HCALL_SWDBG; break; default: ret = -EINVAL; -- Gitee From d851c2815bf76eedb9e0282e45712cc0bf4a0d3b Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Mon, 6 May 2024 22:00:47 +0800 Subject: [PATCH 1563/2138] LoongArch: KVM: Add mmio trace events support ANBZ: #11464 commit 7b7e584f90bf670d5c6f2b1fff884bf3b972cad4 upstream. Add mmio trace events support, currently generic mmio events KVM_TRACE_MMIO_WRITE/xxx_READ/xx_READ_UNSATISFIED are added here. Also vcpu id field is added for all kvm trace events, since perf KVM tool parses vcpu id information for kvm entry event. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4025 --- arch/loongarch/kvm/exit.c | 8 ++++++++ arch/loongarch/kvm/trace.h | 20 ++++++++++++++------ 2 files changed, 22 insertions(+), 6 deletions(-) diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 7a132ce416b4..48a59a906b4b 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -484,6 +485,8 @@ int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst) vcpu->arch.io_gpr = rd; run->mmio.is_write = 0; vcpu->mmio_is_write = 0; + trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, run->mmio.len, + run->mmio.phys_addr, NULL); return EMULATE_DO_MMIO; } @@ -531,6 +534,9 @@ int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run) break; } + trace_kvm_mmio(KVM_TRACE_MMIO_READ, run->mmio.len, + run->mmio.phys_addr, run->mmio.data); + return er; } @@ -642,6 +648,8 @@ int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst) run->mmio.is_write = 1; vcpu->mmio_needed = 1; vcpu->mmio_is_write = 1; + trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, run->mmio.len, + run->mmio.phys_addr, data); return EMULATE_DO_MMIO; } diff --git a/arch/loongarch/kvm/trace.h b/arch/loongarch/kvm/trace.h index c2484ad4cffa..1783397b1bc8 100644 --- a/arch/loongarch/kvm/trace.h +++ b/arch/loongarch/kvm/trace.h @@ -19,14 +19,16 @@ DECLARE_EVENT_CLASS(kvm_transition, TP_PROTO(struct kvm_vcpu *vcpu), TP_ARGS(vcpu), TP_STRUCT__entry( + __field(unsigned int, vcpu_id) __field(unsigned long, pc) ), TP_fast_assign( + __entry->vcpu_id = vcpu->vcpu_id; __entry->pc = vcpu->arch.pc; ), - TP_printk("PC: 0x%08lx", __entry->pc) + TP_printk("vcpu %u PC: 0x%08lx", __entry->vcpu_id, __entry->pc) ); DEFINE_EVENT(kvm_transition, kvm_enter, @@ -54,19 +56,22 @@ DECLARE_EVENT_CLASS(kvm_exit, TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), TP_ARGS(vcpu, reason), TP_STRUCT__entry( + __field(unsigned int, vcpu_id) __field(unsigned long, pc) __field(unsigned int, reason) ), TP_fast_assign( + __entry->vcpu_id = vcpu->vcpu_id; __entry->pc = vcpu->arch.pc; __entry->reason = reason; ), - TP_printk("[%s]PC: 0x%08lx", - __print_symbolic(__entry->reason, - kvm_trace_symbol_exit_types), - __entry->pc) + TP_printk("vcpu %u [%s] PC: 0x%08lx", + __entry->vcpu_id, + __print_symbolic(__entry->reason, + kvm_trace_symbol_exit_types), + __entry->pc) ); DEFINE_EVENT(kvm_exit, kvm_exit_idle, @@ -85,14 +90,17 @@ TRACE_EVENT(kvm_exit_gspr, TP_PROTO(struct kvm_vcpu *vcpu, unsigned int inst_word), TP_ARGS(vcpu, inst_word), TP_STRUCT__entry( + __field(unsigned int, vcpu_id) __field(unsigned int, inst_word) ), TP_fast_assign( + __entry->vcpu_id = vcpu->vcpu_id; __entry->inst_word = inst_word; ), - TP_printk("Inst word: 0x%08x", __entry->inst_word) + TP_printk("vcpu %u Inst word: 0x%08x", __entry->vcpu_id, + __entry->inst_word) ); #define KVM_TRACE_AUX_SAVE 0 -- Gitee From 3d296996f3120268097fc7754a2a70216708c112 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Tue, 9 Jul 2024 16:25:50 +0800 Subject: [PATCH 1564/2138] LoongArch: KVM: Sync pending interrupt when getting ESTAT from user mode ANBZ: #11464 commit e306e514906c444c3678b9c94dd92584b0859859 upstream. Currently interrupts are posted and cleared with the asynchronous mode, meanwhile they are saved in SW state vcpu::arch::irq_pending and vcpu:: arch::irq_clear. When vcpu is ready to run, pending interrupt is written back to CSR.ESTAT register from SW state vcpu::arch::irq_pending at the guest entrance. During VM migration stage, vcpu is put into stopped state, however pending interrupts are not synced to CSR.ESTAT register. So there will be interrupt lost when VCPU is migrated to another host machines. Here in this patch when ESTAT CSR register is read from VMM user mode, pending interrupts are synchronized to ESTAT also. So that VMM can get correct pending interrupts. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4025 --- arch/loongarch/kvm/vcpu.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 6e6ff28b0cd0..0dade3a96800 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -508,6 +508,17 @@ static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val) return -EINVAL; if (id == LOONGARCH_CSR_ESTAT) { + preempt_disable(); + vcpu_load(vcpu); + /* + * Sync pending interrupts into ESTAT so that interrupt + * remains during VM migration stage + */ + kvm_deliver_intr(vcpu); + vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; + vcpu_put(vcpu); + preempt_enable(); + /* ESTAT IP0~IP7 get from GINTC */ gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff; *val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2); -- Gitee From 04a44a056e8b72aaac2e58eda384402af7ed23a7 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Tue, 9 Jul 2024 16:25:50 +0800 Subject: [PATCH 1565/2138] LoongArch: KVM: Delay secondary mmu tlb flush until guest entry ANBZ: #11464 commit b5d4e2325db29e063ff23772adb5846f1299b2e2 upstream. With hardware assisted virtualization, there are two level HW mmu, one is GVA to GPA mapping, the other is GPA to HPA mapping which is called secondary mmu in generic. If there is page fault for secondary mmu, there needs tlb flush operation indexed with fault GPA address and VMID. VMID is stored at register CSR.GSTAT and will be reload or recalculated before guest entry. Currently CSR.GSTAT is not saved and restored during VCPU context switch, instead it is recalculated during guest entry. So CSR.GSTAT is effective only when a VCPU runs in guest mode, however it may not be effective if the VCPU exits to host mode. Since register CSR.GSTAT may be stale, it may records the VMID of the last schedule-out VCPU, rather than the current VCPU. Function kvm_flush_tlb_gpa() should be called with its real VMID, so here move it to the guest entrance. Also an arch-specific request id KVM_REQ_TLB_FLUSH_GPA is added to flush tlb for secondary mmu, and it can be optimized if VMID is updated, since all guest tlb entries will be invalid if VMID is updated. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4025 --- arch/loongarch/include/asm/kvm_host.h | 2 ++ arch/loongarch/kvm/main.c | 1 + arch/loongarch/kvm/mmu.c | 3 ++- arch/loongarch/kvm/tlb.c | 5 +---- arch/loongarch/kvm/vcpu.c | 18 ++++++++++++++++++ 5 files changed, 24 insertions(+), 5 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index 4dd3cdb1ff91..f4a2104bb454 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -33,6 +33,7 @@ #define KVM_PRIVATE_MEM_SLOTS 0 #define KVM_HALT_POLL_NS_DEFAULT 500000 +#define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0) #define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(1) #define KVM_GUESTDBG_SW_BP_MASK \ @@ -230,6 +231,7 @@ struct kvm_vcpu_arch { /* vcpu's vpid */ u64 vpid; + gpa_t flush_gpa; /* Frequency of stable timer in Hz */ u64 timer_mhz; diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c index 285bd4126e54..1f50f6723739 100644 --- a/arch/loongarch/kvm/main.c +++ b/arch/loongarch/kvm/main.c @@ -244,6 +244,7 @@ void kvm_check_vpid(struct kvm_vcpu *vcpu) kvm_update_vpid(vcpu, cpu); trace_kvm_vpid_change(vcpu, vcpu->arch.vpid); vcpu->cpu = cpu; + kvm_clear_request(KVM_REQ_TLB_FLUSH_GPA, vcpu); } /* Restore GSTAT(0x50).vpid */ diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c index 915f17527893..03379ebd8f6a 100644 --- a/arch/loongarch/kvm/mmu.c +++ b/arch/loongarch/kvm/mmu.c @@ -940,7 +940,8 @@ int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) return ret; /* Invalidate this entry in the TLB */ - kvm_flush_tlb_gpa(vcpu, gpa); + vcpu->arch.flush_gpa = gpa; + kvm_make_request(KVM_REQ_TLB_FLUSH_GPA, vcpu); return 0; } diff --git a/arch/loongarch/kvm/tlb.c b/arch/loongarch/kvm/tlb.c index 02535df6b51f..ebdbe9264e9c 100644 --- a/arch/loongarch/kvm/tlb.c +++ b/arch/loongarch/kvm/tlb.c @@ -23,10 +23,7 @@ void kvm_flush_tlb_all(void) void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa) { - unsigned long flags; - - local_irq_save(flags); + lockdep_assert_irqs_disabled(); gpa &= (PAGE_MASK << 1); invtlb(INVTLB_GID_ADDR, read_csr_gstat() & CSR_GSTAT_GID, gpa); - local_irq_restore(flags); } diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 0dade3a96800..4a0d4642a5fb 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -288,6 +288,16 @@ static int kvm_check_requests(struct kvm_vcpu *vcpu) return RESUME_GUEST; } +static void kvm_late_check_requests(struct kvm_vcpu *vcpu) +{ + lockdep_assert_irqs_disabled(); + if (kvm_check_request(KVM_REQ_TLB_FLUSH_GPA, vcpu)) + if (vcpu->arch.flush_gpa != INVALID_GPA) { + kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa); + vcpu->arch.flush_gpa = INVALID_GPA; + } +} + /* * Check and handle pending signal and vCPU requests etc * Run with irq enabled and preempt enabled @@ -339,6 +349,13 @@ static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu) smp_store_mb(vcpu->mode, IN_GUEST_MODE); kvm_check_vpid(vcpu); kvm_check_pmu(vcpu); + + /* + * Called after function kvm_check_vpid() + * Since it updates CSR.GSTAT used by kvm_flush_tlb_gpa(), + * and it may also clear KVM_REQ_TLB_FLUSH_GPA pending bit + */ + kvm_late_check_requests(vcpu); vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY); /* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */ vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; @@ -1293,6 +1310,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) struct loongarch_csrs *csr; vcpu->arch.vpid = 0; + vcpu->arch.flush_gpa = INVALID_GPA; hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); vcpu->arch.swtimer.function = kvm_swtimer_wakeup; -- Gitee From 7151e4e9d6b170f441a1ee45fcb503adf6ec8dce Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Tue, 9 Jul 2024 16:25:51 +0800 Subject: [PATCH 1566/2138] LoongArch: KVM: Select huge page only if secondary mmu supports it ANBZ: #11464 commit 2f56f9ea4dc3892c1265751a1c09038f365107ed upstream. Currently page level selection about secondary mmu depends on memory slot and page level about host mmu. There will be problems if page level of secondary mmu is zero already. Huge page cannot be selected if there is normal page mapped in secondary mmu already, since it is not supported to merge normal pages into huge pages now. So page level selection should depend on the following three conditions. 1. Memslot is aligned for huge page and vm is not migrating. 2. Page level of host mmu is also huge page. 3. Page level of secondary mmu is suituable for huge page. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4025 --- arch/loongarch/kvm/mmu.c | 16 +++++++++++++--- arch/loongarch/kvm/vcpu.c | 2 -- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c index 03379ebd8f6a..67d92b38b0a9 100644 --- a/arch/loongarch/kvm/mmu.c +++ b/arch/loongarch/kvm/mmu.c @@ -890,10 +890,20 @@ static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) /* Disable dirty logging on HugePages */ level = 0; - if (!fault_supports_huge_mapping(memslot, hva, write)) { - level = 0; - } else { + if (fault_supports_huge_mapping(memslot, hva, write)) { + /* Check page level about host mmu*/ level = host_pfn_mapping_level(kvm, gfn, memslot); + if (level == 1) { + /* + * Check page level about secondary mmu + * Disable hugepage if it is normal page on + * secondary mmu already + */ + ptep = kvm_populate_gpa(kvm, NULL, gpa, 0); + if (ptep && !kvm_pte_huge(*ptep)) + level = 0; + } + if (level == 1) { gfn = gfn & ~(PTRS_PER_PTE - 1); pfn = pfn & ~(PTRS_PER_PTE - 1); diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 4a0d4642a5fb..781640239d16 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -1407,8 +1407,6 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) /* Restore hardware PMU CSRs */ kvm_restore_pmu(vcpu); - kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu); - /* Don't bother restoring registers multiple times unless necessary */ if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE) return 0; -- Gitee From 5348927ff4fa51af76e793b75d430e966291cfed Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Tue, 9 Jul 2024 16:25:51 +0800 Subject: [PATCH 1567/2138] LoongArch: KVM: Discard dirty page tracking on readonly memslot ANBZ: #11464 commit b072cbf0233b1fd9d84730cbe5cd1706dcacd354 upstream. For readonly memslot such as UEFI BIOS or UEFI var space, guest cannot write this memory space directly. So it is not necessary to track dirty pages for readonly memslot. Here we make such optimization in function kvm_arch_commit_memory_region(). Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4025 --- arch/loongarch/kvm/mmu.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c index 67d92b38b0a9..0a1a8f49cc81 100644 --- a/arch/loongarch/kvm/mmu.c +++ b/arch/loongarch/kvm/mmu.c @@ -444,6 +444,17 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, enum kvm_mr_change change) { int needs_flush; + u32 old_flags = old ? old->flags : 0; + u32 new_flags = new ? new->flags : 0; + bool log_dirty_pages = new_flags & KVM_MEM_LOG_DIRTY_PAGES; + + /* Only track memslot flags changed */ + if (change != KVM_MR_FLAGS_ONLY) + return; + + /* Discard dirty page tracking on readonly memslot */ + if ((old_flags & new_flags) & KVM_MEM_READONLY) + return; /* * If dirty page logging is enabled, write protect all pages in the slot @@ -454,9 +465,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, * MOVE/DELETE: The old mappings will already have been cleaned up by * kvm_arch_flush_shadow_memslot() */ - if (change == KVM_MR_FLAGS_ONLY && - (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) && - new->flags & KVM_MEM_LOG_DIRTY_PAGES)) { + if (!(old_flags & KVM_MEM_LOG_DIRTY_PAGES) && log_dirty_pages) { spin_lock(&kvm->mmu_lock); /* Write protect GPA page table entries */ needs_flush = kvm_mkclean_gpa_pt(kvm, new->base_gfn, -- Gitee From 5d3278c684304eee36846b9380d14324bbaf77ce Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Tue, 9 Jul 2024 16:25:51 +0800 Subject: [PATCH 1568/2138] LoongArch: KVM: Add memory barrier before update pmd entry ANBZ: #11464 commit 32d4b999dadee0a84ac7fe709cae21d29364e1d1 upstream. When updating pmd entry such as allocating new pmd page or splitting huge page into normal page, it is necessary to firstly update all pte entries, and then update pmd entry. It is weak order with LoongArch system, there will be problem if other VCPUs see pmd update firstly while ptes are not updated. Here smp_wmb() is added to assure this. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4025 --- arch/loongarch/kvm/mmu.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c index 0a1a8f49cc81..a3c7c1059f70 100644 --- a/arch/loongarch/kvm/mmu.c +++ b/arch/loongarch/kvm/mmu.c @@ -163,6 +163,7 @@ static kvm_pte_t *kvm_populate_gpa(struct kvm *kvm, child = kvm_mmu_memory_cache_alloc(cache); _kvm_pte_init(child, ctx.invalid_ptes[ctx.level - 1]); + smp_wmb(); /* Make pte visible before pmd */ kvm_set_pte(entry, __pa(child)); } else if (kvm_pte_huge(*entry)) { return entry; @@ -778,6 +779,7 @@ static kvm_pte_t *kvm_split_huge(struct kvm_vcpu *vcpu, kvm_pte_t *ptep, gfn_t g val += PAGE_SIZE; } + smp_wmb(); /* Make pte visible before pmd */ /* The later kvm_flush_tlb_gpa() will flush hugepage tlb */ kvm_set_pte(ptep, __pa(child)); -- Gitee From 3e0a3384f5d1d43f4763068f7cf0a4cfefbee826 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Tue, 9 Jul 2024 16:25:51 +0800 Subject: [PATCH 1569/2138] LoongArch: KVM: Add dirty bitmap initially all set support ANBZ: #11464 commit 8c347042527058976e8a1cb10c0ae31e55145f76 upstream. Add KVM_DIRTY_LOG_INITIALLY_SET support on LoongArch system, this feature comes from other architectures like x86 and arm64. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4025 --- arch/loongarch/include/asm/kvm_host.h | 4 ++++ arch/loongarch/kvm/mmu.c | 7 +++++++ 2 files changed, 11 insertions(+) diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index f4a2104bb454..7b17712bf57b 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -59,6 +59,10 @@ #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ KVM_GUESTDBG_USE_SW_BP | KVM_GUESTDBG_SINGLESTEP) + +#define KVM_DIRTY_LOG_MANUAL_CAPS \ + (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | KVM_DIRTY_LOG_INITIALLY_SET) + struct kvm_vm_stat { struct kvm_vm_stat_generic generic; u64 pages; diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c index a3c7c1059f70..fa2306c2df4b 100644 --- a/arch/loongarch/kvm/mmu.c +++ b/arch/loongarch/kvm/mmu.c @@ -467,6 +467,13 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, * kvm_arch_flush_shadow_memslot() */ if (!(old_flags & KVM_MEM_LOG_DIRTY_PAGES) && log_dirty_pages) { + /* + * Initially-all-set does not require write protecting any page + * because they're all assumed to be dirty. + */ + if (kvm_dirty_log_manual_protect_and_init_set(kvm)) + return; + spin_lock(&kvm->mmu_lock); /* Write protect GPA page table entries */ needs_flush = kvm_mkclean_gpa_pt(kvm, new->base_gfn, -- Gitee From 7222c32a10dc512c1d8f04eb4c39d3249e4a1ed0 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Tue, 9 Jul 2024 16:25:51 +0800 Subject: [PATCH 1570/2138] LoongArch: KVM: Mark page accessed and dirty with page ref added ANBZ: #11464 commit ebf00272da5c32ecd9f28e56b71bdfd5f11227e6 upstream. Function kvm_map_page_fast() is fast path of secondary mmu page fault flow, pfn is parsed from secondary mmu page table walker. However the corresponding page reference is not added, it is dangerious to access page out of mmu_lock. Here page ref is added inside mmu_lock, function kvm_set_pfn_accessed() and kvm_set_pfn_dirty() is called with page ref added, so that the page will not be freed by others. Also kvm_set_pfn_accessed() is removed here since it is called in the following function kvm_release_pfn_clean(). Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4025 --- arch/loongarch/kvm/mmu.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c index fa2306c2df4b..539f03e7cf42 100644 --- a/arch/loongarch/kvm/mmu.c +++ b/arch/loongarch/kvm/mmu.c @@ -589,6 +589,7 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ gfn_t gfn = gpa >> PAGE_SHIFT; struct kvm *kvm = vcpu->kvm; struct kvm_memory_slot *slot; + struct page *page; spin_lock(&kvm->mmu_lock); @@ -631,19 +632,22 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ if (changed) { kvm_set_pte(ptep, new); pfn = kvm_pte_pfn(new); + page = kvm_pfn_to_refcounted_page(pfn); + if (page) + get_page(page); } spin_unlock(&kvm->mmu_lock); - /* - * Fixme: pfn may be freed after mmu_lock - * kvm_try_get_pfn(pfn)/kvm_release_pfn pair to prevent this? - */ - if (kvm_pte_young(changed)) - kvm_set_pfn_accessed(pfn); + if (changed) { + if (kvm_pte_young(changed)) + kvm_set_pfn_accessed(pfn); - if (kvm_pte_dirty(changed)) { - mark_page_dirty(kvm, gfn); - kvm_set_pfn_dirty(pfn); + if (kvm_pte_dirty(changed)) { + mark_page_dirty(kvm, gfn); + kvm_set_pfn_dirty(pfn); + } + if (page) + put_page(page); } return ret; out: @@ -952,7 +956,6 @@ static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) kvm_set_pfn_dirty(pfn); } - kvm_set_pfn_accessed(pfn); kvm_release_pfn_clean(pfn); out: srcu_read_unlock(&kvm->srcu, srcu_idx); -- Gitee From f812a24c90bd92cca776a4d7022703994e4ed0a6 Mon Sep 17 00:00:00 2001 From: Jia Qingtong Date: Tue, 9 Jul 2024 16:25:51 +0800 Subject: [PATCH 1571/2138] LoongArch: KVM: always make pte young in page map's fast path ANBZ: #11464 commit d7ad41a31d91abd01a4d9b040074d808899636ea upstream. It seems redundant to check if pte is young before the call to kvm_pte_mkyoung() in kvm_map_page_fast(). Just remove the check. Reviewed-by: Bibo Mao Signed-off-by: Jia Qingtong Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4025 --- arch/loongarch/kvm/mmu.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c index 539f03e7cf42..d312921f3ab9 100644 --- a/arch/loongarch/kvm/mmu.c +++ b/arch/loongarch/kvm/mmu.c @@ -601,10 +601,8 @@ static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool writ } /* Track access to pages marked old */ - new = *ptep; - if (!kvm_pte_young(new)) - new = kvm_pte_mkyoung(new); - /* call kvm_set_pfn_accessed() after unlock */ + new = kvm_pte_mkyoung(*ptep); + /* call kvm_set_pfn_accessed() after unlock */ if (write && !kvm_pte_dirty(new)) { if (!kvm_pte_write(new)) { -- Gitee From afb07f02bad69554c0f3f3c2f8f26d363bea48b9 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Tue, 9 Jul 2024 16:25:51 +0800 Subject: [PATCH 1572/2138] LoongArch: KVM: Add PV steal time support in host side ANBZ: #11464 commit 03779999ac3053122c33173a652100c8fa6c61c5 upstream. Add ParaVirt steal time feature in host side, VM can search supported features provided by KVM hypervisor, a feature KVM_FEATURE_STEAL_TIME is added here. Like x86, steal time structure is saved in guest memory, one hypercall function KVM_HCALL_FUNC_NOTIFY is added to notify KVM to enable this feature. One CPU attr ioctl command KVM_LOONGARCH_VCPU_PVTIME_CTRL is added to save and restore the base address of steal time structure when a VM is migrated. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4025 --- arch/loongarch/include/asm/kvm_host.h | 2 +- arch/loongarch/include/asm/kvm_para.h | 1 + arch/loongarch/include/asm/kvm_vcpu.h | 5 + arch/loongarch/kvm/Kconfig | 1 + arch/loongarch/kvm/exit.c | 55 ++--- arch/loongarch/kvm/vcpu.c | 277 +++++++------------------- 6 files changed, 113 insertions(+), 228 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index 7b17712bf57b..5d92e0a11f35 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -34,7 +34,7 @@ #define KVM_HALT_POLL_NS_DEFAULT 500000 #define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0) -#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(1) +#define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(1) #define KVM_GUESTDBG_SW_BP_MASK \ (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP) diff --git a/arch/loongarch/include/asm/kvm_para.h b/arch/loongarch/include/asm/kvm_para.h index 335fb86778e2..a0ed1aacb8b4 100644 --- a/arch/loongarch/include/asm/kvm_para.h +++ b/arch/loongarch/include/asm/kvm_para.h @@ -35,6 +35,7 @@ struct kvm_steal_time { __u32 pad[12]; }; + /* * Hypercall interface for KVM hypervisor * diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h index 590a92cb5416..c416cb7125c0 100644 --- a/arch/loongarch/include/asm/kvm_vcpu.h +++ b/arch/loongarch/include/asm/kvm_vcpu.h @@ -120,4 +120,9 @@ static inline void kvm_write_reg(struct kvm_vcpu *vcpu, int num, unsigned long v vcpu->arch.gprs[num] = val; } +static inline bool kvm_pvtime_supported(void) +{ + return !!sched_info_on(); +} + #endif /* __ASM_LOONGARCH_KVM_VCPU_H__ */ diff --git a/arch/loongarch/kvm/Kconfig b/arch/loongarch/kvm/Kconfig index 2f44176a45b5..461a465e49fd 100644 --- a/arch/loongarch/kvm/Kconfig +++ b/arch/loongarch/kvm/Kconfig @@ -32,6 +32,7 @@ config KVM select KVM_GENERIC_HARDWARE_ENABLING select KVM_MMIO select KVM_XFER_TO_GUEST_WORK + select SCHED_INFO select MMU_NOTIFIER select PREEMPT_NOTIFIERS help diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 48a59a906b4b..8e415503e8e8 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -259,12 +259,9 @@ static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst) vcpu->arch.gprs[rd] = 0; break; case CPUCFG_KVM_FEATURE: - ret = 0; - if ((plv & CSR_CRMD_PLV) == PLV_KERN) { - ret = KVM_FEATURE_PV_IPI; - if (sched_info_on()) - ret |= KVM_FEATURE_STEAL_TIME; - } + ret = KVM_FEATURE_IPI; + if (kvm_pvtime_supported()) + ret |= KVM_FEATURE_STEAL_TIME; vcpu->arch.gprs[rd] = ret; break; default: @@ -742,6 +739,31 @@ static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu) return RESUME_GUEST; } +static long kvm_save_notify(struct kvm_vcpu *vcpu) +{ + unsigned long id, data; + + id = kvm_read_reg(vcpu, LOONGARCH_GPR_A1); + data = kvm_read_reg(vcpu, LOONGARCH_GPR_A2); + switch (id) { + case BIT(KVM_FEATURE_STEAL_TIME): + if (data & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID)) + return KVM_HCALL_INVALID_PARAMETER; + + vcpu->arch.st.guest_addr = data; + if (!(data & KVM_STEAL_PHYS_VALID)) + return 0; + + vcpu->arch.st.last_steal = current->sched_info.run_delay; + kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); + return 0; + default: + return KVM_HCALL_INVALID_CODE; + }; + + return KVM_HCALL_INVALID_CODE; +}; + /* * kvm_handle_lsx_disabled() - Guest used LSX while disabled in root. * @vcpu: Virtual CPU context. @@ -821,25 +843,6 @@ static void kvm_handle_service(struct kvm_vcpu *vcpu) kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret); } -static int kvm_save_notify(struct kvm_vcpu *vcpu) -{ - unsigned long id, data; - - id = vcpu->arch.gprs[LOONGARCH_GPR_A1]; - data = vcpu->arch.gprs[LOONGARCH_GPR_A2]; - switch (id) { - case KVM_FEATURE_STEAL_TIME: - vcpu->arch.st.guest_addr = data; - vcpu->arch.st.last_steal = current->sched_info.run_delay; - kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu); - break; - default: - break; - }; - - return 0; -}; - static int kvm_handle_hypercall(struct kvm_vcpu *vcpu) { int ret; @@ -865,7 +868,7 @@ static int kvm_handle_hypercall(struct kvm_vcpu *vcpu) fallthrough; default: /* Treat it as noop intruction, only set return value */ - vcpu->arch.gprs[LOONGARCH_GPR_A0] = KVM_HCALL_INVALID_CODE; + kvm_write_reg(vcpu, LOONGARCH_GPR_A0, KVM_HCALL_INVALID_CODE); break; } diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 781640239d16..ddfbdd117704 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -33,12 +33,12 @@ const struct kvm_stats_header kvm_vcpu_stats_header = { static void kvm_update_stolen_time(struct kvm_vcpu *vcpu) { + u32 version; + u64 steal; + gpa_t gpa; + struct kvm_memslots *slots; struct kvm_steal_time __user *st; struct gfn_to_hva_cache *ghc; - struct kvm_memslots *slots; - gpa_t gpa; - u64 steal; - u32 version; ghc = &vcpu->arch.st.cache; gpa = vcpu->arch.st.guest_addr; @@ -48,8 +48,7 @@ static void kvm_update_stolen_time(struct kvm_vcpu *vcpu) gpa &= KVM_STEAL_PHYS_MASK; slots = kvm_memslots(vcpu->kvm); if (slots->generation != ghc->generation || gpa != ghc->gpa) { - if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, - sizeof(*st))) { + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) { ghc->gpa = INVALID_GPA; return; } @@ -58,19 +57,17 @@ static void kvm_update_stolen_time(struct kvm_vcpu *vcpu) st = (struct kvm_steal_time __user *)ghc->hva; unsafe_get_user(version, &st->version, out); if (version & 1) - version += 1; + version += 1; /* first time write, random junk */ + version += 1; unsafe_put_user(version, &st->version, out); - /* Make sure st->version is written first */ smp_wmb(); unsafe_get_user(steal, &st->steal, out); - steal += current->sched_info.run_delay - - vcpu->arch.st.last_steal; + steal += current->sched_info.run_delay - vcpu->arch.st.last_steal; vcpu->arch.st.last_steal = current->sched_info.run_delay; unsafe_put_user(steal, &st->steal, out); - /* Make sure st->steal is written first */ smp_wmb(); version += 1; unsafe_put_user(version, &st->version, out); @@ -78,193 +75,6 @@ static void kvm_update_stolen_time(struct kvm_vcpu *vcpu) mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); } -static bool kvm_pvtime_supported(void) -{ - return !!sched_info_on(); -} - -static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu, - struct kvm_device_attr *attr) -{ - u64 __user *user = (u64 __user *)attr->addr; - struct kvm *kvm = vcpu->kvm; - u64 gpa; - int ret = 0; - int idx; - - if (!kvm_pvtime_supported() || - attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) - return -ENXIO; - - if (get_user(gpa, user)) - return -EFAULT; - - /* Check the address is in a valid memslot */ - idx = srcu_read_lock(&kvm->srcu); - if (kvm_is_error_hva(gfn_to_hva(kvm, gpa >> PAGE_SHIFT))) - ret = -EINVAL; - srcu_read_unlock(&kvm->srcu, idx); - - if (!ret) - vcpu->arch.st.guest_addr = gpa; - - return ret; -} - -static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu, - struct kvm_device_attr *attr) -{ - u64 __user *user = (u64 __user *)attr->addr; - u64 gpa; - - if (!kvm_pvtime_supported() || - attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) - return -ENXIO; - - gpa = vcpu->arch.st.guest_addr; - if (put_user(gpa, user)) - return -EFAULT; - - return 0; -} - -static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu, - struct kvm_device_attr *attr) -{ - switch (attr->attr) { - case KVM_LOONGARCH_VCPU_PVTIME_GPA: - if (kvm_pvtime_supported()) - return 0; - } - - return -ENXIO; -} - -static inline void kvm_save_host_pmu(struct kvm_vcpu *vcpu) -{ - struct kvm_context *context; - - context = this_cpu_ptr(vcpu->kvm->arch.vmcs); - context->perf_ctrl[0] = write_csr_perfctrl0(0); - context->perf_ctrl[1] = write_csr_perfctrl1(0); - context->perf_ctrl[2] = write_csr_perfctrl2(0); - context->perf_ctrl[3] = write_csr_perfctrl3(0); - context->perf_cntr[0] = read_csr_perfcntr0(); - context->perf_cntr[1] = read_csr_perfcntr1(); - context->perf_cntr[2] = read_csr_perfcntr2(); - context->perf_cntr[3] = read_csr_perfcntr3(); -} - -static inline void kvm_restore_host_pmu(struct kvm_vcpu *vcpu) -{ - struct kvm_context *context; - - context = this_cpu_ptr(vcpu->kvm->arch.vmcs); - write_csr_perfcntr0(context->perf_cntr[0]); - write_csr_perfcntr1(context->perf_cntr[1]); - write_csr_perfcntr2(context->perf_cntr[2]); - write_csr_perfcntr3(context->perf_cntr[3]); - write_csr_perfctrl0(context->perf_ctrl[0]); - write_csr_perfctrl1(context->perf_ctrl[1]); - write_csr_perfctrl2(context->perf_ctrl[2]); - write_csr_perfctrl3(context->perf_ctrl[3]); -} - - -static inline void kvm_save_guest_pmu(struct kvm_vcpu *vcpu) -{ - struct loongarch_csrs *csr = vcpu->arch.csr; - - kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0); - kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1); - kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2); - kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3); - kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0); - kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1); - kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2); - kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3); -} - -static inline void kvm_restore_guest_pmu(struct kvm_vcpu *vcpu) -{ - struct loongarch_csrs *csr = vcpu->arch.csr; - - kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0); - kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1); - kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2); - kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3); - kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0); - kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1); - kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2); - kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3); -} - -static void kvm_lose_pmu(struct kvm_vcpu *vcpu) -{ - unsigned long val; - struct loongarch_csrs *csr = vcpu->arch.csr; - - if (!(vcpu->arch.aux_inuse & KVM_GUEST_PMU_ENABLE)) - return; - if (!(vcpu->arch.aux_inuse & KVM_GUEST_PMU_ACTIVE)) - return; - - kvm_save_guest_pmu(vcpu); - /* Disable pmu access from guest */ - write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF); - - /* - * Clear KVM_GUEST_PMU_ENABLE if the guest is not using PMU CSRs - * when exiting the guest, so that the next time trap into the guest. - * we don't need to deal with PMU CSRs contexts. - */ - val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0); - val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1); - val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2); - val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3); - if (!(val & KVM_PMU_EVENT_ENABLED)) - vcpu->arch.aux_inuse &= ~KVM_GUEST_PMU_ENABLE; - kvm_restore_host_pmu(vcpu); - - /* KVM_GUEST_PMU_ACTIVE needs to be cleared when exiting the guest */ - vcpu->arch.aux_inuse &= ~KVM_GUEST_PMU_ACTIVE; -} - -static void kvm_own_pmu(struct kvm_vcpu *vcpu) -{ - unsigned long val; - - kvm_save_host_pmu(vcpu); - /* Set PM0-PM(num) to guest */ - val = read_csr_gcfg() & ~CSR_GCFG_GPERF; - val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT; - write_csr_gcfg(val); - kvm_restore_guest_pmu(vcpu); -} - -static void kvm_restore_pmu(struct kvm_vcpu *vcpu) -{ - if (!(vcpu->arch.aux_inuse & KVM_GUEST_PMU_ENABLE)) - return; - - kvm_make_request(KVM_REQ_PMU, vcpu); -} - -static void kvm_check_pmu(struct kvm_vcpu *vcpu) -{ - if (!kvm_check_request(KVM_REQ_PMU, vcpu)) - return; - - kvm_own_pmu(vcpu); - - /* - * Set KVM_GUEST PMU_ENABLE and GUEST_PMU_ACTIVE - * when guest has KVM_REQ_PMU request. - */ - vcpu->arch.aux_inuse |= KVM_GUEST_PMU_ENABLE; - vcpu->arch.aux_inuse |= KVM_GUEST_PMU_ACTIVE; -} - /* * kvm_check_requests - check and handle pending vCPU requests * @@ -282,7 +92,7 @@ static int kvm_check_requests(struct kvm_vcpu *vcpu) if (kvm_dirty_ring_check_request(vcpu)) return RESUME_HOST; - if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu)) + if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) kvm_update_stolen_time(vcpu); return RESUME_GUEST; @@ -969,6 +779,16 @@ static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu, return -ENXIO; } +static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + if (!kvm_pvtime_supported() || + attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) + return -ENXIO; + + return 0; +} + static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) { @@ -988,7 +808,7 @@ static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu, return ret; } -static int kvm_loongarch_get_cpucfg_attr(struct kvm_vcpu *vcpu, +static int kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) { int ret = 0; @@ -1004,6 +824,23 @@ static int kvm_loongarch_get_cpucfg_attr(struct kvm_vcpu *vcpu, return ret; } +static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + u64 gpa; + u64 __user *user = (u64 __user *)attr->addr; + + if (!kvm_pvtime_supported() || + attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) + return -ENXIO; + + gpa = vcpu->arch.st.guest_addr; + if (put_user(gpa, user)) + return -EFAULT; + + return 0; +} + static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) { @@ -1011,7 +848,7 @@ static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu, switch (attr->group) { case KVM_LOONGARCH_VCPU_CPUCFG: - ret = kvm_loongarch_get_cpucfg_attr(vcpu, attr); + ret = kvm_loongarch_cpucfg_get_attr(vcpu, attr); break; case KVM_LOONGARCH_VCPU_PVTIME_CTRL: ret = kvm_loongarch_pvtime_get_attr(vcpu, attr); @@ -1029,6 +866,43 @@ static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu, return -ENXIO; } +static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int idx, ret = 0; + u64 gpa, __user *user = (u64 __user *)attr->addr; + struct kvm *kvm = vcpu->kvm; + + if (!kvm_pvtime_supported() || + attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) + return -ENXIO; + + if (get_user(gpa, user)) + return -EFAULT; + + if (gpa & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID)) + return -EINVAL; + + if (!(gpa & KVM_STEAL_PHYS_VALID)) { + vcpu->arch.st.guest_addr = gpa; + return 0; + } + + /* Check the address is in a valid memslot */ + idx = srcu_read_lock(&kvm->srcu); + if (kvm_is_error_hva(gfn_to_hva(kvm, gpa >> PAGE_SHIFT))) + ret = -EINVAL; + srcu_read_unlock(&kvm->srcu, idx); + + if (!ret) { + vcpu->arch.st.guest_addr = gpa; + vcpu->arch.st.last_steal = current->sched_info.run_delay; + kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); + } + + return ret; +} + static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) { @@ -1403,6 +1277,7 @@ static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) /* Control guest page CCA attribute */ change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT); + kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); /* Restore hardware PMU CSRs */ kvm_restore_pmu(vcpu); -- Gitee From 9e8b971d252c5ed8b92b467e24fb3ae467818be0 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Tue, 9 Jul 2024 16:25:51 +0800 Subject: [PATCH 1573/2138] LoongArch: KVM: Add PV steal time support in guest side ANBZ: #11464 commit 03779999ac3053122c33173a652100c8fa6c61c5 upstream. Per-cpu struct kvm_steal_time is added here, its size is 64 bytes and also defined as 64 bytes, so that the whole structure is in one physical page. When a VCPU is online, function pv_enable_steal_time() is called. This function will pass guest physical address of struct kvm_steal_time and tells hypervisor to enable steal time. When a vcpu is offline, physical address is set as 0 and tells hypervisor to disable steal time. Here is an output of vmstat on guest when there is workload on both host and guest. It shows steal time stat information. procs -----------memory---------- -----io---- -system-- ------cpu----- r b swpd free inact active bi bo in cs us sy id wa st 15 1 0 7583616 184112 72208 20 0 162 52 31 6 43 0 20 17 0 0 7583616 184704 72192 0 0 6318 6885 5 60 8 5 22 16 0 0 7583616 185392 72144 0 0 1766 1081 0 49 0 1 50 16 0 0 7583616 184816 72304 0 0 6300 6166 4 62 12 2 20 18 0 0 7583632 184480 72240 0 0 2814 1754 2 58 4 1 35 Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4025 --- .../admin-guide/kernel-parameters.txt | 7 +- arch/loongarch/Kconfig | 22 ++ arch/loongarch/kernel/paravirt.c | 286 +++++++++++++++++- 3 files changed, 311 insertions(+), 4 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 4c28ffed07a1..d2629f0c8288 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -3952,9 +3952,10 @@ vulnerability. System may allow data leaks with this option. - no-steal-acc [X86,PV_OPS,ARM64,PPC/PSERIES] Disable paravirtualized - steal time accounting. steal time is computed, but - won't influence scheduler behaviour + no-steal-acc [X86,PV_OPS,ARM64,PPC/PSERIES,RISCV,LOONGARCH,EARLY] + Disable paravirtualized steal time accounting. steal time + is computed, but won't influence scheduler behaviour + nosync [HW,M68K] Disables sync negotiation for all devices. diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index b29a0943cbd6..8c1854ef9561 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -628,6 +628,17 @@ config PARAVIRT over full virtualization. However, when run without a hypervisor the kernel is theoretically slower and slightly larger. +config PARAVIRT_TIME_ACCOUNTING + bool "Paravirtual steal time accounting" + depends on PARAVIRT + help + Select this option to enable fine granularity task steal time + accounting. Time spent executing other tasks in parallel with + the current vCPU is discounted from the vCPU power. To account for + that, there can be a small performance impact. + + If in doubt, say N here. + endmenu config ARCH_SELECT_MEMORY_MODEL @@ -680,6 +691,17 @@ source "drivers/cpufreq/Kconfig" source "kernel/power/Kconfig" source "drivers/acpi/Kconfig" +config PARAVIRT_TIME_ACCOUNTING + bool "Paravirtual steal time accounting" + depends on PARAVIRT + help + Select this option to enable fine granularity task steal time + accounting. Time spent executing other tasks in parallel with + the current vCPU is discounted from the vCPU power. To account for + that, there can be a small performance impact. + + If in doubt, say N here. + endmenu source "arch/loongarch/kvm/Kconfig" diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c index 56182c64ab38..f55b7e027d46 100644 --- a/arch/loongarch/kernel/paravirt.c +++ b/arch/loongarch/kernel/paravirt.c @@ -8,10 +8,10 @@ #include #include +static int has_steal_clock; struct static_key paravirt_steal_enabled; struct static_key paravirt_steal_rq_enabled; static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64); -static int has_steal_clock; static u64 native_steal_clock(int cpu) { @@ -71,6 +71,62 @@ static int pv_register_steal_time(void) return 0; } +static bool steal_acc = true; + +static int __init parse_no_stealacc(char *arg) +{ + steal_acc = false; + return 0; +} +early_param("no-steal-acc", parse_no_stealacc); + +static u64 paravt_steal_clock(int cpu) +{ + int version; + u64 steal; + struct kvm_steal_time *src; + + src = &per_cpu(steal_time, cpu); + do { + + version = src->version; + virt_rmb(); /* Make sure that the version is read before the steal */ + steal = src->steal; + virt_rmb(); /* Make sure that the steal is read before the next version */ + + } while ((version & 1) || (version != src->version)); + + return steal; +} + +static bool steal_acc = true; + +static int __init parse_no_stealacc(char *arg) +{ + steal_acc = false; + return 0; +} +early_param("no-steal-acc", parse_no_stealacc); + +static u64 paravt_steal_clock(int cpu) +{ + int version; + u64 steal; + struct kvm_steal_time *src; + + src = &per_cpu(steal_time, cpu); + do { + + version = src->version; + virt_rmb(); /* Make sure that the version is read before the steal */ + steal = src->steal; + virt_rmb(); /* Make sure that the steal is read before the next version */ + + } while ((version & 1) || (version != src->version)); + + return steal; +} + #ifdef CONFIG_SMP static void pv_send_ipi_single(int cpu, unsigned int action) { @@ -279,3 +335,231 @@ int __init pv_time_init(void) pr_info("Using stolen time PV\n"); return 0; } + +static int pv_enable_steal_time(void) +{ + int cpu = smp_processor_id(); + unsigned long addr; + struct kvm_steal_time *st; + + if (!has_steal_clock) + return -EPERM; + + st = &per_cpu(steal_time, cpu); + addr = per_cpu_ptr_to_phys(st); + + /* The whole structure kvm_steal_time should be in one page */ + if (PFN_DOWN(addr) != PFN_DOWN(addr + sizeof(*st))) { + pr_warn("Illegal PV steal time addr %lx\n", addr); + return -EFAULT; + } + + addr |= KVM_STEAL_PHYS_VALID; + kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, addr); + + return 0; +} + +static void pv_disable_steal_time(void) +{ + if (has_steal_clock) + kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, 0); +} + +#ifdef CONFIG_SMP +static int pv_time_cpu_online(unsigned int cpu) +{ + unsigned long flags; + + local_irq_save(flags); + pv_enable_steal_time(); + local_irq_restore(flags); + + return 0; +} + +static int pv_time_cpu_down_prepare(unsigned int cpu) +{ + unsigned long flags; + + local_irq_save(flags); + pv_disable_steal_time(); + local_irq_restore(flags); + + return 0; +} +#endif + +static void pv_cpu_reboot(void *unused) +{ + pv_disable_steal_time(); +} + +static int pv_reboot_notify(struct notifier_block *nb, unsigned long code, void *unused) +{ + on_each_cpu(pv_cpu_reboot, NULL, 1); + return NOTIFY_DONE; +} + +static struct notifier_block pv_reboot_nb = { + .notifier_call = pv_reboot_notify, +}; + +int __init pv_time_init(void) +{ + int r, feature; + + if (!cpu_has_hypervisor) + return 0; + if (!kvm_para_available()) + return 0; + + feature = read_cpucfg(CPUCFG_KVM_FEATURE); + if (!(feature & KVM_FEATURE_STEAL_TIME)) + return 0; + + has_steal_clock = 1; + r = pv_enable_steal_time(); + if (r < 0) { + has_steal_clock = 0; + return 0; + } + register_reboot_notifier(&pv_reboot_nb); + +#ifdef CONFIG_SMP + r = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, + "loongarch/pv_time:online", + pv_time_cpu_online, pv_time_cpu_down_prepare); + if (r < 0) { + has_steal_clock = 0; + pr_err("Failed to install cpu hotplug callbacks\n"); + return r; + } +#endif + + static_call_update(pv_steal_clock, paravt_steal_clock); + + static_key_slow_inc(¶virt_steal_enabled); +#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING + if (steal_acc) + static_key_slow_inc(¶virt_steal_rq_enabled); +#endif + + pr_info("Using paravirt steal-time\n"); + + return 0; +} + +static int pv_enable_steal_time(void) +{ + int cpu = smp_processor_id(); + unsigned long addr; + struct kvm_steal_time *st; + + if (!has_steal_clock) + return -EPERM; + + st = &per_cpu(steal_time, cpu); + addr = per_cpu_ptr_to_phys(st); + + /* The whole structure kvm_steal_time should be in one page */ + if (PFN_DOWN(addr) != PFN_DOWN(addr + sizeof(*st))) { + pr_warn("Illegal PV steal time addr %lx\n", addr); + return -EFAULT; + } + + addr |= KVM_STEAL_PHYS_VALID; + kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, addr); + + return 0; +} + +static void pv_disable_steal_time(void) +{ + if (has_steal_clock) + kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, 0); +} + +#ifdef CONFIG_SMP +static int pv_time_cpu_online(unsigned int cpu) +{ + unsigned long flags; + + local_irq_save(flags); + pv_enable_steal_time(); + local_irq_restore(flags); + + return 0; +} + +static int pv_time_cpu_down_prepare(unsigned int cpu) +{ + unsigned long flags; + + local_irq_save(flags); + pv_disable_steal_time(); + local_irq_restore(flags); + + return 0; +} +#endif + +static void pv_cpu_reboot(void *unused) +{ + pv_disable_steal_time(); +} + +static int pv_reboot_notify(struct notifier_block *nb, unsigned long code, void *unused) +{ + on_each_cpu(pv_cpu_reboot, NULL, 1); + return NOTIFY_DONE; +} + +static struct notifier_block pv_reboot_nb = { + .notifier_call = pv_reboot_notify, +}; + +int __init pv_time_init(void) +{ + int r, feature; + + if (!cpu_has_hypervisor) + return 0; + if (!kvm_para_available()) + return 0; + + feature = read_cpucfg(CPUCFG_KVM_FEATURE); + if (!(feature & KVM_FEATURE_STEAL_TIME)) + return 0; + + has_steal_clock = 1; + r = pv_enable_steal_time(); + if (r < 0) { + has_steal_clock = 0; + return 0; + } + register_reboot_notifier(&pv_reboot_nb); + +#ifdef CONFIG_SMP + r = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, + "loongarch/pv_time:online", + pv_time_cpu_online, pv_time_cpu_down_prepare); + if (r < 0) { + has_steal_clock = 0; + pr_err("Failed to install cpu hotplug callbacks\n"); + return r; + } +#endif + + static_call_update(pv_steal_clock, paravt_steal_clock); + + static_key_slow_inc(¶virt_steal_enabled); +#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING + if (steal_acc) + static_key_slow_inc(¶virt_steal_rq_enabled); +#endif + + pr_info("Using paravirt steal-time\n"); + + return 0; +} -- Gitee From 1cb248108bbf969c6034848dab9e17a756243905 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Wed, 10 Jul 2024 16:50:27 +0800 Subject: [PATCH 1574/2138] perf kvm: Add kvm-stat for loongarch64 ANBZ: #11464 commit 492ac37fa38faf520b5beae44c930063265ee183 upstream. Add support for 'perf kvm stat' on loongarch64 platform, now only kvm exit event is supported. Here is example output about "perf kvm --host stat report" command Event name Samples Sample% Time (ns) Time% Mean Time (ns) Mem Store 83969 51.00% 625697070 8.00% 7451 Mem Read 37641 22.00% 112485730 1.00% 2988 Interrupt 15542 9.00% 20620190 0.00% 1326 IOCSR 15207 9.00% 94296190 1.00% 6200 Hypercall 4873 2.00% 12265280 0.00% 2516 Idle 3713 2.00% 6322055860 87.00% 1702681 FPU 1819 1.00% 2750300 0.00% 1511 Inst Fetch 502 0.00% 1341740 0.00% 2672 Mem Modify 324 0.00% 602240 0.00% 1858 CPUCFG 55 0.00% 77610 0.00% 1411 CSR 12 0.00% 19690 0.00% 1640 LASX 3 0.00% 4870 0.00% 1623 LSX 2 0.00% 2100 0.00% 1050 Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4025 --- tools/perf/arch/loongarch/Makefile | 1 + tools/perf/arch/loongarch/util/Build | 2 + tools/perf/arch/loongarch/util/header.c | 96 +++++++++++++++ tools/perf/arch/loongarch/util/kvm-stat.c | 139 ++++++++++++++++++++++ 4 files changed, 238 insertions(+) create mode 100644 tools/perf/arch/loongarch/util/header.c create mode 100644 tools/perf/arch/loongarch/util/kvm-stat.c diff --git a/tools/perf/arch/loongarch/Makefile b/tools/perf/arch/loongarch/Makefile index c392e7af4743..c8be64c5cdb4 100644 --- a/tools/perf/arch/loongarch/Makefile +++ b/tools/perf/arch/loongarch/Makefile @@ -4,6 +4,7 @@ PERF_HAVE_DWARF_REGS := 1 endif PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1 PERF_HAVE_JITDUMP := 1 +HAVE_KVM_STAT_SUPPORT := 1 # # Syscall table generation for perf diff --git a/tools/perf/arch/loongarch/util/Build b/tools/perf/arch/loongarch/util/Build index d776125a2d06..b12d374d7096 100644 --- a/tools/perf/arch/loongarch/util/Build +++ b/tools/perf/arch/loongarch/util/Build @@ -1,5 +1,7 @@ +perf-y += header.o perf-y += perf_regs.o perf-$(CONFIG_DWARF) += dwarf-regs.o perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o +perf-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o diff --git a/tools/perf/arch/loongarch/util/header.c b/tools/perf/arch/loongarch/util/header.c new file mode 100644 index 000000000000..d962dff55512 --- /dev/null +++ b/tools/perf/arch/loongarch/util/header.c @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Implementation of get_cpuid(). + * + * Author: Nikita Shubin + * Bibo Mao + * Huacai Chen + */ + +#include +#include +#include +#include +#include "util/debug.h" +#include "util/header.h" + +/* + * Output example from /proc/cpuinfo + * CPU Family : Loongson-64bit + * Model Name : Loongson-3C5000 + * CPU Revision : 0x10 + * FPU Revision : 0x01 + */ +#define CPUINFO_MODEL "Model Name" +#define CPUINFO "/proc/cpuinfo" + +static char *_get_field(const char *line) +{ + char *line2, *nl; + + line2 = strrchr(line, ' '); + if (!line2) + return NULL; + + line2++; + nl = strrchr(line, '\n'); + if (!nl) + return NULL; + + return strndup(line2, nl - line2); +} + +static char *_get_cpuid(void) +{ + unsigned long line_sz; + char *line, *model, *cpuid; + FILE *file; + + file = fopen(CPUINFO, "r"); + if (file == NULL) + return NULL; + + line = model = cpuid = NULL; + while (getline(&line, &line_sz, file) != -1) { + if (strncmp(line, CPUINFO_MODEL, strlen(CPUINFO_MODEL))) + continue; + + model = _get_field(line); + if (!model) + goto out_free; + break; + } + + if (model && (asprintf(&cpuid, "%s", model) < 0)) + cpuid = NULL; + +out_free: + fclose(file); + free(model); + return cpuid; +} + +int get_cpuid(char *buffer, size_t sz) +{ + int ret = 0; + char *cpuid = _get_cpuid(); + + if (!cpuid) + return EINVAL; + + if (sz < strlen(cpuid)) { + ret = ENOBUFS; + goto out_free; + } + + scnprintf(buffer, sz, "%s", cpuid); + +out_free: + free(cpuid); + return ret; +} + +char *get_cpuid_str(struct perf_pmu *pmu __maybe_unused) +{ + return _get_cpuid(); +} diff --git a/tools/perf/arch/loongarch/util/kvm-stat.c b/tools/perf/arch/loongarch/util/kvm-stat.c new file mode 100644 index 000000000000..a7859a3a9a51 --- /dev/null +++ b/tools/perf/arch/loongarch/util/kvm-stat.c @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include "util/kvm-stat.h" +#include "util/parse-events.h" +#include "util/debug.h" +#include "util/evsel.h" +#include "util/evlist.h" +#include "util/pmus.h" + +#define LOONGARCH_EXCEPTION_INT 0 +#define LOONGARCH_EXCEPTION_PIL 1 +#define LOONGARCH_EXCEPTION_PIS 2 +#define LOONGARCH_EXCEPTION_PIF 3 +#define LOONGARCH_EXCEPTION_PME 4 +#define LOONGARCH_EXCEPTION_FPD 15 +#define LOONGARCH_EXCEPTION_SXD 16 +#define LOONGARCH_EXCEPTION_ASXD 17 +#define LOONGARCH_EXCEPTION_GSPR 22 +#define LOONGARCH_EXCEPTION_CPUCFG 100 +#define LOONGARCH_EXCEPTION_CSR 101 +#define LOONGARCH_EXCEPTION_IOCSR 102 +#define LOONGARCH_EXCEPTION_IDLE 103 +#define LOONGARCH_EXCEPTION_OTHERS 104 +#define LOONGARCH_EXCEPTION_HVC 23 + +#define loongarch_exception_type \ + {LOONGARCH_EXCEPTION_INT, "Interrupt" }, \ + {LOONGARCH_EXCEPTION_PIL, "Mem Read" }, \ + {LOONGARCH_EXCEPTION_PIS, "Mem Store" }, \ + {LOONGARCH_EXCEPTION_PIF, "Inst Fetch" }, \ + {LOONGARCH_EXCEPTION_PME, "Mem Modify" }, \ + {LOONGARCH_EXCEPTION_FPD, "FPU" }, \ + {LOONGARCH_EXCEPTION_SXD, "LSX" }, \ + {LOONGARCH_EXCEPTION_ASXD, "LASX" }, \ + {LOONGARCH_EXCEPTION_GSPR, "Privilege Error" }, \ + {LOONGARCH_EXCEPTION_HVC, "Hypercall" }, \ + {LOONGARCH_EXCEPTION_CPUCFG, "CPUCFG" }, \ + {LOONGARCH_EXCEPTION_CSR, "CSR" }, \ + {LOONGARCH_EXCEPTION_IOCSR, "IOCSR" }, \ + {LOONGARCH_EXCEPTION_IDLE, "Idle" }, \ + {LOONGARCH_EXCEPTION_OTHERS, "Others" } + +define_exit_reasons_table(loongarch_exit_reasons, loongarch_exception_type); + +const char *vcpu_id_str = "vcpu_id"; +const char *kvm_exit_reason = "reason"; +const char *kvm_entry_trace = "kvm:kvm_enter"; +const char *kvm_reenter_trace = "kvm:kvm_reenter"; +const char *kvm_exit_trace = "kvm:kvm_exit"; +const char *kvm_events_tp[] = { + "kvm:kvm_enter", + "kvm:kvm_reenter", + "kvm:kvm_exit", + "kvm:kvm_exit_gspr", + NULL, +}; + +static bool event_begin(struct evsel *evsel, + struct perf_sample *sample, struct event_key *key) +{ + return exit_event_begin(evsel, sample, key); +} + +static bool event_end(struct evsel *evsel, + struct perf_sample *sample __maybe_unused, + struct event_key *key __maybe_unused) +{ + /* + * LoongArch kvm is different with other architectures + * + * There is kvm:kvm_reenter or kvm:kvm_enter event adjacent with + * kvm:kvm_exit event. + * kvm:kvm_enter means returning to vmm and then to guest + * kvm:kvm_reenter means returning to guest immediately + */ + return evsel__name_is(evsel, kvm_entry_trace) || evsel__name_is(evsel, kvm_reenter_trace); +} + +static void event_gspr_get_key(struct evsel *evsel, + struct perf_sample *sample, struct event_key *key) +{ + unsigned int insn; + + key->key = LOONGARCH_EXCEPTION_OTHERS; + insn = evsel__intval(evsel, sample, "inst_word"); + + switch (insn >> 24) { + case 0: + /* CPUCFG inst trap */ + if ((insn >> 10) == 0x1b) + key->key = LOONGARCH_EXCEPTION_CPUCFG; + break; + case 4: + /* CSR inst trap */ + key->key = LOONGARCH_EXCEPTION_CSR; + break; + case 6: + /* IOCSR inst trap */ + if ((insn >> 15) == 0xc90) + key->key = LOONGARCH_EXCEPTION_IOCSR; + else if ((insn >> 15) == 0xc91) + /* Idle inst trap */ + key->key = LOONGARCH_EXCEPTION_IDLE; + break; + default: + key->key = LOONGARCH_EXCEPTION_OTHERS; + break; + } +} + +static struct child_event_ops child_events[] = { + { .name = "kvm:kvm_exit_gspr", .get_key = event_gspr_get_key }, + { NULL, NULL }, +}; + +static struct kvm_events_ops exit_events = { + .is_begin_event = event_begin, + .is_end_event = event_end, + .child_ops = child_events, + .decode_key = exit_event_decode_key, + .name = "VM-EXIT" +}; + +struct kvm_reg_events_ops kvm_reg_events_ops[] = { + { .name = "vmexit", .ops = &exit_events, }, + { NULL, NULL }, +}; + +const char * const kvm_skip_events[] = { + NULL, +}; + +int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid __maybe_unused) +{ + kvm->exit_reasons_isa = "loongarch64"; + kvm->exit_reasons = loongarch_exit_reasons; + return 0; +} -- Gitee From 24fdbbab6c59407d322b52f120697398c887e7c3 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Thu, 13 Jun 2024 20:28:03 +0800 Subject: [PATCH 1575/2138] KVM: Discard zero mask with function kvm_dirty_ring_reset ANBZ: #11464 commit 676f819c3e982db3695a371f336a05086585ea4f upstream. Function kvm_reset_dirty_gfn may be called with parameters cur_slot / cur_offset / mask are all zero, it does not represent real dirty page. It is not necessary to clear dirty page in this condition. Also return value of macro __fls() is undefined if mask is zero which is called in funciton kvm_reset_dirty_gfn(). Here just return. Signed-off-by: Bibo Mao Message-ID: <20240613122803.1031511-1-maobibo@loongson.cn> [Move the conditional inside kvm_reset_dirty_gfn; suggested by Sean Christopherson. - Paolo] Signed-off-by: Paolo Bonzini Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4025 --- virt/kvm/dirty_ring.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/virt/kvm/dirty_ring.c b/virt/kvm/dirty_ring.c index c1cd7dfe4a90..27e50190d419 100644 --- a/virt/kvm/dirty_ring.c +++ b/virt/kvm/dirty_ring.c @@ -55,6 +55,9 @@ static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask) struct kvm_memory_slot *memslot; int as_id, id; + if (!mask) + return; + as_id = slot >> 16; id = (u16)slot; -- Gitee From 5dbe0b87d30902a57d0b59f7702acf2d71553bce Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Mon, 26 Aug 2024 23:11:32 +0800 Subject: [PATCH 1576/2138] LoongArch: KVM: Invalidate guest steal time address on vCPU reset ANBZ: #11464 commit 4956e07f05e239b274d042618a250c9fa3e92629 upstream. If ParaVirt steal time feature is enabled, there is a percpu gpa address passed from guest vCPU and host modifies guest memory space with this gpa address. When vCPU is reset normally, it will notify host and invalidate gpa address. However if VM is crashed and VMM reboots VM forcely, the vCPU reboot notification callback will not be called in VM. Host needs invalidate the gpa address, else host will modify guest memory during VM reboots. Here it is invalidated from the vCPU KVM_REG_LOONGARCH_VCPU_RESET ioctl interface. Also funciton kvm_reset_timer() is removed at vCPU reset stage, since SW emulated timer is only used in vCPU block state. When a vCPU is removed from the block waiting queue, kvm_restore_timer() is called and SW timer is cancelled. And the timer register is also cleared at VMM when a vCPU is reset. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4025 --- arch/loongarch/include/asm/kvm_vcpu.h | 1 - arch/loongarch/kvm/timer.c | 7 ------- arch/loongarch/kvm/vcpu.c | 2 +- 3 files changed, 1 insertion(+), 9 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h index c416cb7125c0..86570084e05a 100644 --- a/arch/loongarch/include/asm/kvm_vcpu.h +++ b/arch/loongarch/include/asm/kvm_vcpu.h @@ -76,7 +76,6 @@ static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { } #endif void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz); -void kvm_reset_timer(struct kvm_vcpu *vcpu); void kvm_save_timer(struct kvm_vcpu *vcpu); void kvm_restore_timer(struct kvm_vcpu *vcpu); diff --git a/arch/loongarch/kvm/timer.c b/arch/loongarch/kvm/timer.c index bcc6b6d063d9..74a4b5c272d6 100644 --- a/arch/loongarch/kvm/timer.c +++ b/arch/loongarch/kvm/timer.c @@ -188,10 +188,3 @@ void kvm_save_timer(struct kvm_vcpu *vcpu) kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ESTAT); preempt_enable(); } - -void kvm_reset_timer(struct kvm_vcpu *vcpu) -{ - write_gcsr_timercfg(0); - kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG, 0); - hrtimer_cancel(&vcpu->arch.swtimer); -} diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index ddfbdd117704..e733d15cc015 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -688,7 +688,7 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu, vcpu->kvm->arch.time_offset = (signed long)(v - drdtime()); break; case KVM_REG_LOONGARCH_VCPU_RESET: - kvm_reset_timer(vcpu); + vcpu->arch.st.guest_addr = 0; memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending)); memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear)); break; -- Gitee From b8f00e478b7cc20a8326c319bbe051572c4cf2ef Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Wed, 11 Sep 2024 23:26:32 +0800 Subject: [PATCH 1577/2138] LoongArch: Revert qspinlock to test-and-set simple lock on VM ANBZ: #11464 commit e5ba90abb2ebdfd3c19481319b349d4885312bef upstream. Similar with x86, when VM is detected, revert to a simple test-and-set lock to avoid the horrors of queue preemption. Tested on 3C5000 Dual-way machine with 32 cores and 2 numa nodes, test case is kcbench on kernel mainline 6.10, the detailed command is "kcbench --src /root/src/linux" Performance on host machine kernel compile time performance impact Original 150.29 seconds With patch 150.19 seconds almost no impact Performance on virtual machine: 1. 1 VM with 32 vCPUs and 2 numa node, numa node pinned kernel compile time performance impact Original 170.87 seconds With patch 171.73 seconds almost no impact 2. 2 VMs, each VM with 32 vCPUs and 2 numa node, numa node pinned kernel compile time performance impact Original 2362.04 seconds With patch 354.73 seconds +565% Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4025 --- arch/loongarch/include/asm/Kbuild | 1 - arch/loongarch/include/asm/paravirt.h | 7 +++++ arch/loongarch/include/asm/qspinlock.h | 41 ++++++++++++++++++++++++++ arch/loongarch/kernel/paravirt.c | 11 +++++++ arch/loongarch/kernel/setup.c | 2 ++ arch/loongarch/kernel/smp.c | 4 ++- 6 files changed, 64 insertions(+), 2 deletions(-) create mode 100644 arch/loongarch/include/asm/qspinlock.h diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild index 27f66930ab6a..22991a6f0e2b 100644 --- a/arch/loongarch/include/asm/Kbuild +++ b/arch/loongarch/include/asm/Kbuild @@ -4,7 +4,6 @@ generic-y += mcs_spinlock.h generic-y += parport.h generic-y += early_ioremap.h generic-y += qrwlock.h -generic-y += qspinlock.h generic-y += rwsem.h generic-y += segment.h generic-y += user.h diff --git a/arch/loongarch/include/asm/paravirt.h b/arch/loongarch/include/asm/paravirt.h index fe27fb5e82b8..dabc5aec179c 100644 --- a/arch/loongarch/include/asm/paravirt.h +++ b/arch/loongarch/include/asm/paravirt.h @@ -18,6 +18,7 @@ static inline u64 paravirt_steal_clock(int cpu) int pv_ipi_init(void); int __init pv_time_init(void); +int __init pv_spinlock_init(void); #else static inline int pv_ipi_init(void) { @@ -28,5 +29,11 @@ static inline int pv_time_init(void) { return 0; } + +static inline int pv_spinlock_init(void) +{ + return 0; +} + #endif // CONFIG_PARAVIRT #endif diff --git a/arch/loongarch/include/asm/qspinlock.h b/arch/loongarch/include/asm/qspinlock.h new file mode 100644 index 000000000000..e76d3aa1e1eb --- /dev/null +++ b/arch/loongarch/include/asm/qspinlock.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_LOONGARCH_QSPINLOCK_H +#define _ASM_LOONGARCH_QSPINLOCK_H + +#include + +#ifdef CONFIG_PARAVIRT + +DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key); + +#define virt_spin_lock virt_spin_lock + +static inline bool virt_spin_lock(struct qspinlock *lock) +{ + int val; + + if (!static_branch_unlikely(&virt_spin_lock_key)) + return false; + + /* + * On hypervisors without PARAVIRT_SPINLOCKS support we fall + * back to a Test-and-Set spinlock, because fair locks have + * horrible lock 'holder' preemption issues. + */ + +__retry: + val = atomic_read(&lock->val); + + if (val || !atomic_try_cmpxchg(&lock->val, &val, _Q_LOCKED_VAL)) { + cpu_relax(); + goto __retry; + } + + return true; +} + +#endif /* CONFIG_PARAVIRT */ + +#include + +#endif // _ASM_LOONGARCH_QSPINLOCK_H diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c index f55b7e027d46..658333b889b8 100644 --- a/arch/loongarch/kernel/paravirt.c +++ b/arch/loongarch/kernel/paravirt.c @@ -12,6 +12,7 @@ static int has_steal_clock; struct static_key paravirt_steal_enabled; struct static_key paravirt_steal_rq_enabled; static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64); +DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key); static u64 native_steal_clock(int cpu) { @@ -563,3 +564,13 @@ int __init pv_time_init(void) return 0; } + +int __init pv_spinlock_init(void) +{ + if (!cpu_has_hypervisor) + return 0; + + static_branch_enable(&virt_spin_lock_key); + + return 0; +} diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 721f89e00e2b..7bdeed9fbc26 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -663,6 +663,8 @@ void __init setup_arch(char **cmdline_p) arch_mem_init(cmdline_p); resource_init(); + jump_label_init(); /* Initialise the static keys for paravirtualization */ + #ifdef CONFIG_SMP plat_smp_setup(); prefill_possible_map(); diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index ed3b539c786b..35b2fc29f125 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -477,7 +477,7 @@ core_initcall(ipi_pm_init); #endif /* Preload SMP state for boot cpu */ -void smp_prepare_boot_cpu(void) +void __init smp_prepare_boot_cpu(void) { unsigned int cpu, node, rr_node; @@ -510,6 +510,8 @@ void smp_prepare_boot_cpu(void) rr_node = next_node_in(rr_node, node_online_map); } } + + pv_spinlock_init(); } /* called from main before smp_init() */ -- Gitee From 92663078742af24bc84fb0316dbce292a878455c Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Wed, 11 Sep 2024 23:26:32 +0800 Subject: [PATCH 1578/2138] LoongArch: KVM: Add VM feature detection function ANBZ: #11464 commit a53f48b6327c12437c9f429da2283e526eda2362 upstream. Loongson SIMD Extension (LSX), Loongson Advanced SIMD Extension (LASX) and Loongson Binary Translation (LBT) features are defined in register CPUCFG2. Two kinds of LSX/LASX/LBT feature detection are added here, one is VCPU feature, and the other is VM feature. VCPU feature dection can only work with VCPU thread itself, and requires VCPU thread is created already. So LSX/LASX/LBT feature detection for VM is added also, it can be done even if VM is not created, and also can be done by any threads besides VCPU threads. Here ioctl command KVM_HAS_DEVICE_ATTR is added for VM, and macro KVM_LOONGARCH_VM_FEAT_CTRL is added to check supported feature. And five sub-features relative with LSX/LASX/LBT are added as following: KVM_LOONGARCH_VM_FEAT_LSX KVM_LOONGARCH_VM_FEAT_LASX KVM_LOONGARCH_VM_FEAT_X86BT KVM_LOONGARCH_VM_FEAT_ARMBT KVM_LOONGARCH_VM_FEAT_MIPSBT Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4025 --- arch/loongarch/include/uapi/asm/kvm.h | 8 ++++++++ arch/loongarch/kvm/vcpu.c | 6 ++++++ arch/loongarch/kvm/vm.c | 23 +++++++++++++++++++---- 3 files changed, 33 insertions(+), 4 deletions(-) diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h index 9e20a8071216..feb948b2d1a8 100644 --- a/arch/loongarch/include/uapi/asm/kvm.h +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -86,6 +86,14 @@ struct kvm_fpu { #define KVM_IOC_CSRID(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG) #define KVM_IOC_CPUCFG(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG) +/* Device Control API on vm fd */ +#define KVM_LOONGARCH_VM_FEAT_CTRL 0 +#define KVM_LOONGARCH_VM_FEAT_LSX 0 +#define KVM_LOONGARCH_VM_FEAT_LASX 1 +#define KVM_LOONGARCH_VM_FEAT_X86BT 2 +#define KVM_LOONGARCH_VM_FEAT_ARMBT 3 +#define KVM_LOONGARCH_VM_FEAT_MIPSBT 4 + /* Device Control API on vcpu fd */ #define KVM_LOONGARCH_VCPU_CPUCFG 0 #define KVM_LOONGARCH_VCPU_PVTIME_CTRL 1 diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index e733d15cc015..01d7a42489de 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -516,6 +516,12 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v) *v |= CPUCFG2_LSX; if (cpu_has_lasx) *v |= CPUCFG2_LASX; + if (cpu_has_lbt_x86) + *v |= CPUCFG2_X86BT; + if (cpu_has_lbt_arm) + *v |= CPUCFG2_ARMBT; + if (cpu_has_lbt_mips) + *v |= CPUCFG2_MIPSBT; return 0; case LOONGARCH_CPUCFG3: diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c index 01e35a841027..486b48510414 100644 --- a/arch/loongarch/kvm/vm.c +++ b/arch/loongarch/kvm/vm.c @@ -107,11 +107,26 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) static int kvm_vm_feature_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) { switch (attr->attr) { - case KVM_LOONGARCH_VM_FEAT_PMU: - if (cpu_has_pmp) + case KVM_LOONGARCH_VM_FEAT_LSX: + if (cpu_has_lsx) + return 0; + return -ENXIO; + case KVM_LOONGARCH_VM_FEAT_LASX: + if (cpu_has_lasx) + return 0; + return -ENXIO; + case KVM_LOONGARCH_VM_FEAT_X86BT: + if (cpu_has_lbt_x86) + return 0; + return -ENXIO; + case KVM_LOONGARCH_VM_FEAT_ARMBT: + if (cpu_has_lbt_arm) + return 0; + return -ENXIO; + case KVM_LOONGARCH_VM_FEAT_MIPSBT: + if (cpu_has_lbt_mips) return 0; return -ENXIO; - default: return -ENXIO; } @@ -132,8 +147,8 @@ static int kvm_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { int r; - struct kvm *kvm = filp->private_data; void __user *argp = (void __user *)arg; + struct kvm *kvm = filp->private_data; struct kvm_device_attr attr; switch (ioctl) { -- Gitee From cf2daf613a8e61bfb8753d184d27cc7db8db146d Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Wed, 11 Sep 2024 23:26:32 +0800 Subject: [PATCH 1579/2138] LoongArch: KVM: Add Binary Translation extension support ANBZ: #11464 commit b67ee19a907ddb7dab8b1bb4b35659d8372bfc46 upstream. Loongson Binary Translation (LBT) is used to accelerate binary translation, which contains 4 scratch registers (scr0 to scr3), x86/ARM eflags (eflags) and x87 fpu stack pointer (ftop). Like FPU extension, here a lazy enabling method is used for LBT. the LBT context is saved/restored on the vcpu context switch path. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4025 --- arch/loongarch/include/asm/kvm_host.h | 13 ++++-- arch/loongarch/include/asm/kvm_vcpu.h | 6 +++ arch/loongarch/kvm/exit.c | 9 ++++ arch/loongarch/kvm/vcpu.c | 61 ++++++++++++++++++++++++++- 4 files changed, 84 insertions(+), 5 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index 5d92e0a11f35..ff93a3e6882b 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -170,10 +170,9 @@ enum emulation_result { #define KVM_LARCH_FPU (0x1 << 0) #define KVM_LARCH_LSX (0x1 << 1) #define KVM_LARCH_LASX (0x1 << 2) -#define KVM_LARCH_SWCSR_LATEST (0x1 << 3) -#define KVM_LARCH_HWCSR_USABLE (0x1 << 4) -#define KVM_GUEST_PMU_ENABLE (0x1 << 5) -#define KVM_GUEST_PMU_ACTIVE (0x1 << 6) +#define KVM_LARCH_LBT (0x1 << 3) +#define KVM_LARCH_SWCSR_LATEST (0x1 << 4) +#define KVM_LARCH_HWCSR_USABLE (0x1 << 5) struct kvm_vcpu_arch { /* @@ -207,6 +206,7 @@ struct kvm_vcpu_arch { /* FPU state */ struct loongarch_fpu fpu FPU_ALIGN; + struct loongarch_lbt lbt; /* CSR state */ struct loongarch_csrs *csr; @@ -295,6 +295,11 @@ static inline int kvm_get_pmu_num(struct kvm_vcpu_arch *arch) return (arch->cpucfg[LOONGARCH_CPUCFG6] & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT; } +static inline bool kvm_guest_has_lbt(struct kvm_vcpu_arch *arch) +{ + return arch->cpucfg[2] & (CPUCFG2_X86BT | CPUCFG2_ARMBT | CPUCFG2_MIPSBT); +} + /* Debug: dump vcpu state */ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h index 86570084e05a..ca067bf6dd0a 100644 --- a/arch/loongarch/include/asm/kvm_vcpu.h +++ b/arch/loongarch/include/asm/kvm_vcpu.h @@ -75,6 +75,12 @@ static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { } static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { } #endif +#ifdef CONFIG_CPU_HAS_LBT +int kvm_own_lbt(struct kvm_vcpu *vcpu); +#else +static inline int kvm_own_lbt(struct kvm_vcpu *vcpu) { return -EINVAL; } +#endif + void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz); void kvm_save_timer(struct kvm_vcpu *vcpu); void kvm_restore_timer(struct kvm_vcpu *vcpu); diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 8e415503e8e8..47ab50253f82 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -794,6 +794,14 @@ static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu) return RESUME_GUEST; } +static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu) +{ + if (kvm_own_lbt(vcpu)) + kvm_queue_exception(vcpu, EXCCODE_INE, 0); + + return RESUME_GUEST; +} + static int kvm_send_pv_ipi(struct kvm_vcpu *vcpu) { unsigned int min, cpu, i; @@ -908,6 +916,7 @@ static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = { [EXCCODE_FPDIS] = kvm_handle_fpu_disabled, [EXCCODE_LSXDIS] = kvm_handle_lsx_disabled, [EXCCODE_LASXDIS] = kvm_handle_lasx_disabled, + [EXCCODE_BTDIS] = kvm_handle_lbt_disabled, [EXCCODE_GSPR] = kvm_handle_gspr, [EXCCODE_HVC] = kvm_handle_hypercall, }; diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 01d7a42489de..3eb230674b7d 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -1025,12 +1026,66 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) return 0; } +#ifdef CONFIG_CPU_HAS_LBT +int kvm_own_lbt(struct kvm_vcpu *vcpu) +{ + if (!kvm_guest_has_lbt(&vcpu->arch)) + return -EINVAL; + + preempt_disable(); + set_csr_euen(CSR_EUEN_LBTEN); + _restore_lbt(&vcpu->arch.lbt); + vcpu->arch.aux_inuse |= KVM_LARCH_LBT; + preempt_enable(); + + return 0; +} + +static void kvm_lose_lbt(struct kvm_vcpu *vcpu) +{ + preempt_disable(); + if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) { + _save_lbt(&vcpu->arch.lbt); + clear_csr_euen(CSR_EUEN_LBTEN); + vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT; + } + preempt_enable(); +} + +static void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr) +{ + /* + * If TM is enabled, top register save/restore will + * cause lbt exception, here enable lbt in advance + */ + if (fcsr & FPU_CSR_TM) + kvm_own_lbt(vcpu); +} + +static void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) +{ + if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { + if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) + return; + kvm_check_fcsr(vcpu, read_fcsr(LOONGARCH_FCSR0)); + } +} +#else +static inline void kvm_lose_lbt(struct kvm_vcpu *vcpu) { } +static inline void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr) { } +static inline void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) { } +#endif + /* Enable FPU and restore context */ void kvm_own_fpu(struct kvm_vcpu *vcpu) { preempt_disable(); - /* Enable FPU */ + /* + * Enable FPU for guest + * Set FR and FRE according to guest context + */ + kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); set_csr_euen(CSR_EUEN_FPEN); kvm_restore_fpu(&vcpu->arch.fpu); @@ -1050,6 +1105,7 @@ int kvm_own_lsx(struct kvm_vcpu *vcpu) preempt_disable(); /* Enable LSX for guest */ + kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN); switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { case KVM_LARCH_FPU: @@ -1084,6 +1140,7 @@ int kvm_own_lasx(struct kvm_vcpu *vcpu) preempt_disable(); + kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN); switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) { case KVM_LARCH_LSX: @@ -1115,6 +1172,7 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu) { preempt_disable(); + kvm_check_fcsr_alive(vcpu); if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) { kvm_save_lasx(&vcpu->arch.fpu); vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX); @@ -1137,6 +1195,7 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu) /* Disable FPU */ clear_csr_euen(CSR_EUEN_FPEN); } + kvm_lose_lbt(vcpu); preempt_enable(); } -- Gitee From 4e5f5d263138cd994dc4ffa1fd17ac55da686a28 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Wed, 11 Sep 2024 23:26:32 +0800 Subject: [PATCH 1580/2138] LoongArch: KVM: Add vm migration support for LBT registers ANBZ: #11464 commit acc7f20d54a3eeceec7602b11d6e3462e7fba862 upstream. Every vcpu has separate LBT registers. And there are four scr registers, one flags and ftop register for LBT extension. When VM migrates, VMM needs to get LBT registers for every vcpu. Here macro KVM_REG_LOONGARCH_LBT is added for new vcpu lbt register type, the following macro is added to get/put LBT registers. KVM_REG_LOONGARCH_LBT_SCR0 KVM_REG_LOONGARCH_LBT_SCR1 KVM_REG_LOONGARCH_LBT_SCR2 KVM_REG_LOONGARCH_LBT_SCR3 KVM_REG_LOONGARCH_LBT_EFLAGS KVM_REG_LOONGARCH_LBT_FTOP Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4025 --- arch/loongarch/include/uapi/asm/kvm.h | 9 +++++ arch/loongarch/kvm/vcpu.c | 56 +++++++++++++++++++++++++++ 2 files changed, 65 insertions(+) diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h index feb948b2d1a8..6ca6700c5c51 100644 --- a/arch/loongarch/include/uapi/asm/kvm.h +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -68,6 +68,7 @@ struct kvm_fpu { #define KVM_REG_LOONGARCH_KVM (KVM_REG_LOONGARCH | 0x20000ULL) #define KVM_REG_LOONGARCH_FPSIMD (KVM_REG_LOONGARCH | 0x30000ULL) #define KVM_REG_LOONGARCH_CPUCFG (KVM_REG_LOONGARCH | 0x40000ULL) +#define KVM_REG_LOONGARCH_LBT (KVM_REG_LOONGARCH | 0x50000ULL) #define KVM_REG_LOONGARCH_MASK (KVM_REG_LOONGARCH | 0x70000ULL) #define KVM_CSR_IDX_MASK 0x7fff #define KVM_CPUCFG_IDX_MASK 0x7fff @@ -81,6 +82,14 @@ struct kvm_fpu { /* Debugging: Special instruction for software breakpoint */ #define KVM_REG_LOONGARCH_DEBUG_INST (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 3) +/* LBT registers */ +#define KVM_REG_LOONGARCH_LBT_SCR0 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 1) +#define KVM_REG_LOONGARCH_LBT_SCR1 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 2) +#define KVM_REG_LOONGARCH_LBT_SCR2 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 3) +#define KVM_REG_LOONGARCH_LBT_SCR3 (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 4) +#define KVM_REG_LOONGARCH_LBT_EFLAGS (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 5) +#define KVM_REG_LOONGARCH_LBT_FTOP (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 6) + #define LOONGARCH_REG_SHIFT 3 #define LOONGARCH_REG_64(TYPE, REG) (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT)) #define KVM_IOC_CSRID(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG) diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 3eb230674b7d..e3dcfd3ae3e2 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -621,6 +621,34 @@ static int kvm_get_one_reg(struct kvm_vcpu *vcpu, else ret = -EINVAL; break; + case KVM_REG_LOONGARCH_LBT: + if (!kvm_guest_has_lbt(&vcpu->arch)) + return -ENXIO; + + switch (reg->id) { + case KVM_REG_LOONGARCH_LBT_SCR0: + *v = vcpu->arch.lbt.scr0; + break; + case KVM_REG_LOONGARCH_LBT_SCR1: + *v = vcpu->arch.lbt.scr1; + break; + case KVM_REG_LOONGARCH_LBT_SCR2: + *v = vcpu->arch.lbt.scr2; + break; + case KVM_REG_LOONGARCH_LBT_SCR3: + *v = vcpu->arch.lbt.scr3; + break; + case KVM_REG_LOONGARCH_LBT_EFLAGS: + *v = vcpu->arch.lbt.eflags; + break; + case KVM_REG_LOONGARCH_LBT_FTOP: + *v = vcpu->arch.fpu.ftop; + break; + default: + ret = -EINVAL; + break; + } + break; case KVM_REG_LOONGARCH_KVM: switch (reg->id) { case KVM_REG_LOONGARCH_COUNTER: @@ -684,6 +712,34 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu, 2 * kvm_get_pmu_num(&vcpu->arch) + 1; } break; + case KVM_REG_LOONGARCH_LBT: + if (!kvm_guest_has_lbt(&vcpu->arch)) + return -ENXIO; + + switch (reg->id) { + case KVM_REG_LOONGARCH_LBT_SCR0: + vcpu->arch.lbt.scr0 = v; + break; + case KVM_REG_LOONGARCH_LBT_SCR1: + vcpu->arch.lbt.scr1 = v; + break; + case KVM_REG_LOONGARCH_LBT_SCR2: + vcpu->arch.lbt.scr2 = v; + break; + case KVM_REG_LOONGARCH_LBT_SCR3: + vcpu->arch.lbt.scr3 = v; + break; + case KVM_REG_LOONGARCH_LBT_EFLAGS: + vcpu->arch.lbt.eflags = v; + break; + case KVM_REG_LOONGARCH_LBT_FTOP: + vcpu->arch.fpu.ftop = v; + break; + default: + ret = -EINVAL; + break; + } + break; case KVM_REG_LOONGARCH_KVM: switch (reg->id) { case KVM_REG_LOONGARCH_COUNTER: -- Gitee From cc5bda3344a64bdbeb31c3b3736ccee717c051c4 Mon Sep 17 00:00:00 2001 From: Song Gao Date: Thu, 12 Sep 2024 20:53:40 +0800 Subject: [PATCH 1581/2138] LoongArch: KVM: Add PMU support for guest ANBZ: #11464 commit f4e40ea9f78fed585e953bf38575e47d24922e1a upstream. On LoongArch, the host and guest have their own PMU CSRs registers and they share PMU hardware resources. A set of PMU CSRs consists of a CTRL register and a CNTR register. We can set which PMU CSRs are used by the guest by writing to the GCFG register [24:26] bits. On KVM side: - Save the host PMU CSRs into structure kvm_context. - If the host supports the PMU feature. - When entering guest mode, save the host PMU CSRs and restore the guest PMU CSRs. - When exiting guest mode, save the guest PMU CSRs and restore the host PMU CSRs. Reviewed-by: Bibo Mao Signed-off-by: Song Gao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4025 --- arch/loongarch/include/asm/kvm_csr.h | 7 +- arch/loongarch/include/asm/kvm_host.h | 24 ++--- arch/loongarch/include/uapi/asm/kvm.h | 5 +- arch/loongarch/kvm/vcpu.c | 141 ++++++++++++++++++++++++-- arch/loongarch/kvm/vm.c | 4 + 5 files changed, 149 insertions(+), 32 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_csr.h b/arch/loongarch/include/asm/kvm_csr.h index 0a52f115a87e..4a76ce796f1f 100644 --- a/arch/loongarch/include/asm/kvm_csr.h +++ b/arch/loongarch/include/asm/kvm_csr.h @@ -181,6 +181,7 @@ __BUILD_GCSR_OP(tlbidx) #define kvm_save_hw_gcsr(csr, gid) (csr->csrs[gid] = gcsr_read(gid)) #define kvm_restore_hw_gcsr(csr, gid) (gcsr_write(csr->csrs[gid], gid)) + #define kvm_read_clear_hw_gcsr(csr, gid) (csr->csrs[gid] = gcsr_write(0, gid)) int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu); @@ -210,9 +211,7 @@ static __always_inline void kvm_change_sw_gcsr(struct loongarch_csrs *csr, csr->csrs[gid] |= val & _mask; } -#define KVM_PMU_EVENT_ENABLED (CSR_PERFCTRL_PLV0 | \ - CSR_PERFCTRL_PLV1 | \ - CSR_PERFCTRL_PLV2 | \ - CSR_PERFCTRL_PLV3) +#define KVM_PMU_EVENT_ENABLED (CSR_PERFCTRL_PLV0 | CSR_PERFCTRL_PLV1 | \ + CSR_PERFCTRL_PLV2 | CSR_PERFCTRL_PLV3) #endif /* __ASM_LOONGARCH_KVM_CSR_H__ */ diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index ff93a3e6882b..31884b3d4e17 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -35,6 +35,7 @@ #define KVM_HALT_POLL_NS_DEFAULT 500000 #define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0) #define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(1) +#define KVM_REQ_PMU KVM_ARCH_REQ(2) #define KVM_GUESTDBG_SW_BP_MASK \ (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP) @@ -90,12 +91,11 @@ struct kvm_arch_memory_slot { unsigned long flags; }; -#define KVM_REQ_PMU KVM_ARCH_REQ(0) #define HOST_MAX_PMNUM 16 struct kvm_context { unsigned long vpid_cache; struct kvm_vcpu *last_vcpu; - /* Save host pmu csr */ + /* Host PMU CSR */ u64 perf_ctrl[HOST_MAX_PMNUM]; u64 perf_cntr[HOST_MAX_PMNUM]; }; @@ -171,8 +171,9 @@ enum emulation_result { #define KVM_LARCH_LSX (0x1 << 1) #define KVM_LARCH_LASX (0x1 << 2) #define KVM_LARCH_LBT (0x1 << 3) -#define KVM_LARCH_SWCSR_LATEST (0x1 << 4) -#define KVM_LARCH_HWCSR_USABLE (0x1 << 5) +#define KVM_LARCH_PMU (0x1 << 4) +#define KVM_LARCH_SWCSR_LATEST (0x1 << 5) +#define KVM_LARCH_HWCSR_USABLE (0x1 << 6) struct kvm_vcpu_arch { /* @@ -255,9 +256,6 @@ struct kvm_vcpu_arch { u64 last_steal; struct gfn_to_hva_cache cache; } st; - /* Save host pmu csr */ - u64 perf_ctrl[4]; - u64 perf_cntr[4]; }; static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg) @@ -285,19 +283,19 @@ static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch) return arch->cpucfg[2] & CPUCFG2_LASX; } -static inline bool kvm_guest_has_pmu(struct kvm_vcpu_arch *arch) +static inline bool kvm_guest_has_lbt(struct kvm_vcpu_arch *arch) { - return arch->cpucfg[LOONGARCH_CPUCFG6] & CPUCFG6_PMP; + return arch->cpucfg[2] & (CPUCFG2_X86BT | CPUCFG2_ARMBT | CPUCFG2_MIPSBT); } -static inline int kvm_get_pmu_num(struct kvm_vcpu_arch *arch) +static inline bool kvm_guest_has_pmu(struct kvm_vcpu_arch *arch) { - return (arch->cpucfg[LOONGARCH_CPUCFG6] & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT; + return arch->cpucfg[6] & CPUCFG6_PMP; } -static inline bool kvm_guest_has_lbt(struct kvm_vcpu_arch *arch) +static inline int kvm_get_pmu_num(struct kvm_vcpu_arch *arch) { - return arch->cpucfg[2] & (CPUCFG2_X86BT | CPUCFG2_ARMBT | CPUCFG2_MIPSBT); + return (arch->cpucfg[6] & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT; } /* Debug: dump vcpu state */ diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h index 6ca6700c5c51..cd9f46f30fc0 100644 --- a/arch/loongarch/include/uapi/asm/kvm.h +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -102,16 +102,13 @@ struct kvm_fpu { #define KVM_LOONGARCH_VM_FEAT_X86BT 2 #define KVM_LOONGARCH_VM_FEAT_ARMBT 3 #define KVM_LOONGARCH_VM_FEAT_MIPSBT 4 +#define KVM_LOONGARCH_VM_FEAT_PMU 5 /* Device Control API on vcpu fd */ #define KVM_LOONGARCH_VCPU_CPUCFG 0 #define KVM_LOONGARCH_VCPU_PVTIME_CTRL 1 #define KVM_LOONGARCH_VCPU_PVTIME_GPA 0 -/* Device Control API on vm fd */ -#define KVM_LOONGARCH_VM_FEAT_CTRL 1000 -#define KVM_LOONGARCH_VM_FEAT_PMU 1000 - struct kvm_debug_exit_arch { }; diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index e3dcfd3ae3e2..6124eefdc8a9 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -32,6 +32,126 @@ const struct kvm_stats_header kvm_vcpu_stats_header = { sizeof(kvm_vcpu_stats_desc), }; +static inline void kvm_save_host_pmu(struct kvm_vcpu *vcpu) +{ + struct kvm_context *context; + + context = this_cpu_ptr(vcpu->kvm->arch.vmcs); + context->perf_cntr[0] = read_csr_perfcntr0(); + context->perf_cntr[1] = read_csr_perfcntr1(); + context->perf_cntr[2] = read_csr_perfcntr2(); + context->perf_cntr[3] = read_csr_perfcntr3(); + context->perf_ctrl[0] = write_csr_perfctrl0(0); + context->perf_ctrl[1] = write_csr_perfctrl1(0); + context->perf_ctrl[2] = write_csr_perfctrl2(0); + context->perf_ctrl[3] = write_csr_perfctrl3(0); +} + +static inline void kvm_restore_host_pmu(struct kvm_vcpu *vcpu) +{ + struct kvm_context *context; + + context = this_cpu_ptr(vcpu->kvm->arch.vmcs); + write_csr_perfcntr0(context->perf_cntr[0]); + write_csr_perfcntr1(context->perf_cntr[1]); + write_csr_perfcntr2(context->perf_cntr[2]); + write_csr_perfcntr3(context->perf_cntr[3]); + write_csr_perfctrl0(context->perf_ctrl[0]); + write_csr_perfctrl1(context->perf_ctrl[1]); + write_csr_perfctrl2(context->perf_ctrl[2]); + write_csr_perfctrl3(context->perf_ctrl[3]); +} + + +static inline void kvm_save_guest_pmu(struct kvm_vcpu *vcpu) +{ + struct loongarch_csrs *csr = vcpu->arch.csr; + + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3); + kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0); + kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1); + kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2); + kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3); +} + +static inline void kvm_restore_guest_pmu(struct kvm_vcpu *vcpu) +{ + struct loongarch_csrs *csr = vcpu->arch.csr; + + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3); +} + +static int kvm_own_pmu(struct kvm_vcpu *vcpu) +{ + unsigned long val; + + if (!kvm_guest_has_pmu(&vcpu->arch)) + return -EINVAL; + + kvm_save_host_pmu(vcpu); + + /* Set PM0-PM(num) to guest */ + val = read_csr_gcfg() & ~CSR_GCFG_GPERF; + val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT; + write_csr_gcfg(val); + + kvm_restore_guest_pmu(vcpu); + + return 0; +} + +static void kvm_lose_pmu(struct kvm_vcpu *vcpu) +{ + unsigned long val; + struct loongarch_csrs *csr = vcpu->arch.csr; + + if (!(vcpu->arch.aux_inuse & KVM_LARCH_PMU)) + return; + + kvm_save_guest_pmu(vcpu); + + /* Disable pmu access from guest */ + write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF); + + /* + * Clear KVM_LARCH_PMU if the guest is not using PMU CSRs when + * exiting the guest, so that the next time trap into the guest. + * We don't need to deal with PMU CSRs contexts. + */ + val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0); + val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1); + val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2); + val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3); + if (!(val & KVM_PMU_EVENT_ENABLED)) + vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU; + + kvm_restore_host_pmu(vcpu); +} + +static void kvm_restore_pmu(struct kvm_vcpu *vcpu) +{ + if ((vcpu->arch.aux_inuse & KVM_LARCH_PMU)) + kvm_make_request(KVM_REQ_PMU, vcpu); +} + +static void kvm_check_pmu(struct kvm_vcpu *vcpu) +{ + if (kvm_check_request(KVM_REQ_PMU, vcpu)) { + kvm_own_pmu(vcpu); + vcpu->arch.aux_inuse |= KVM_LARCH_PMU; + } +} + static void kvm_update_stolen_time(struct kvm_vcpu *vcpu) { u32 version; @@ -480,10 +600,11 @@ static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val) if (id >= LOONGARCH_CSR_PERFCTRL0 && id <= LOONGARCH_CSR_PERFCNTR3) { unsigned long val; - val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0); - val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1); - val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2); - val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3); + val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0) | + kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1) | + kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2) | + kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3); + if (val & KVM_PMU_EVENT_ENABLED) kvm_make_request(KVM_REQ_PMU, vcpu); } @@ -556,7 +677,7 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v) static int kvm_check_cpucfg(int id, u64 val) { - int ret, host; + int ret; u64 mask = 0; ret = _kvm_get_cpucfg_mask(id, &mask); @@ -584,9 +705,8 @@ static int kvm_check_cpucfg(int id, u64 val) return 0; case LOONGARCH_CPUCFG6: if (val & CPUCFG6_PMP) { - host = read_cpucfg(LOONGARCH_CPUCFG6); + u32 host = read_cpucfg(LOONGARCH_CPUCFG6); if ((val & CPUCFG6_PMBITS) != (host & CPUCFG6_PMBITS)) - /* Guest pmbits must be the same with host */ return -EINVAL; if ((val & CPUCFG6_PMNUM) > (host & CPUCFG6_PMNUM)) return -EINVAL; @@ -707,10 +827,9 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu, if (ret) break; vcpu->arch.cpucfg[id] = (u32)v; - if (id == LOONGARCH_CPUCFG6) { - vcpu->arch.max_pmu_csrid = LOONGARCH_CSR_PERFCTRL0 + - 2 * kvm_get_pmu_num(&vcpu->arch) + 1; - } + if (id == LOONGARCH_CPUCFG6) + vcpu->arch.max_pmu_csrid = + LOONGARCH_CSR_PERFCTRL0 + 2 * kvm_get_pmu_num(&vcpu->arch) + 1; break; case KVM_REG_LOONGARCH_LBT: if (!kvm_guest_has_lbt(&vcpu->arch)) diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c index 486b48510414..7a5a9c2bec80 100644 --- a/arch/loongarch/kvm/vm.c +++ b/arch/loongarch/kvm/vm.c @@ -127,6 +127,10 @@ static int kvm_vm_feature_has_attr(struct kvm *kvm, struct kvm_device_attr *attr if (cpu_has_lbt_mips) return 0; return -ENXIO; + case KVM_LOONGARCH_VM_FEAT_PMU: + if (cpu_has_pmp) + return 0; + return -ENXIO; default: return -ENXIO; } -- Gitee From f2f29fc5a197309a47a2c352fcb7d2bb4b89441c Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Thu, 12 Sep 2024 20:53:40 +0800 Subject: [PATCH 1582/2138] LoongArch: KVM: Enable paravirt feature control from VMM ANBZ: #11464 commit cdc118f802410525cca872e0861a14d76d12c574 upstream. Export kernel paravirt features to user space, so that VMM can control each single paravirt feature. By default paravirt features will be the same with kvm supported features if VMM does not set it. Also a new feature KVM_FEATURE_VIRT_EXTIOI is added which can be set from user space. This feature indicates that the virt EIOINTC can route interrupts to 256 vCPUs, rather than 4 vCPUs like with real HW. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4025 --- arch/loongarch/include/asm/kvm_host.h | 7 +++ arch/loongarch/include/asm/kvm_para.h | 2 + arch/loongarch/include/asm/kvm_vcpu.h | 5 +++ arch/loongarch/include/asm/loongarch.h | 9 +--- arch/loongarch/include/uapi/asm/kvm.h | 16 ++++--- arch/loongarch/include/uapi/asm/kvm_para.h | 21 +++++++++ arch/loongarch/kernel/paravirt.c | 8 ++-- arch/loongarch/kvm/exit.c | 19 ++++---- arch/loongarch/kvm/vcpu.c | 52 +++++++++++++++++----- arch/loongarch/kvm/vm.c | 13 ++++++ 10 files changed, 115 insertions(+), 37 deletions(-) create mode 100644 arch/loongarch/include/uapi/asm/kvm_para.h diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index 31884b3d4e17..a053e574ba42 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -141,6 +141,8 @@ struct kvm_arch { unsigned int root_level; spinlock_t phyid_map_lock; struct kvm_phyid_map *phyid_map; + /* Enabled PV features */ + unsigned long pv_features; s64 time_offset; struct kvm_context __percpu *vmcs; @@ -175,6 +177,11 @@ enum emulation_result { #define KVM_LARCH_SWCSR_LATEST (0x1 << 5) #define KVM_LARCH_HWCSR_USABLE (0x1 << 6) +#define LOONGARCH_PV_FEAT_UPDATED BIT_ULL(63) +#define LOONGARCH_PV_FEAT_MASK (BIT(KVM_FEATURE_IPI) | \ + BIT(KVM_FEATURE_STEAL_TIME) | \ + BIT(KVM_FEATURE_VIRT_EXTIOI)) + struct kvm_vcpu_arch { /* * Switch pointer-to-function type to unsigned long diff --git a/arch/loongarch/include/asm/kvm_para.h b/arch/loongarch/include/asm/kvm_para.h index a0ed1aacb8b4..4866924b92f3 100644 --- a/arch/loongarch/include/asm/kvm_para.h +++ b/arch/loongarch/include/asm/kvm_para.h @@ -2,6 +2,8 @@ #ifndef _ASM_LOONGARCH_KVM_PARA_H #define _ASM_LOONGARCH_KVM_PARA_H +#include + /* * Hypercall code field */ diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h index ca067bf6dd0a..d7e8f7d50ee0 100644 --- a/arch/loongarch/include/asm/kvm_vcpu.h +++ b/arch/loongarch/include/asm/kvm_vcpu.h @@ -130,4 +130,9 @@ static inline bool kvm_pvtime_supported(void) return !!sched_info_on(); } +static inline bool kvm_guest_has_pv_feature(struct kvm_vcpu *vcpu, unsigned int feature) +{ + return vcpu->kvm->arch.pv_features & BIT(feature); +} + #endif /* __ASM_LOONGARCH_KVM_VCPU_H__ */ diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h index f1fecf9eea8d..7578a10a32eb 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -161,15 +161,8 @@ /* * cpucfg index area: 0x40000000 -- 0x400000ff - * SW emulation for KVM hypervirsor + * SW emulation for KVM hypervirsor, see arch/loongarch/include/uapi/asm/kvm_para.h */ -#define CPUCFG_KVM_BASE 0x40000000UL -#define CPUCFG_KVM_SIZE 0x100 -#define CPUCFG_KVM_SIG CPUCFG_KVM_BASE -#define KVM_SIGNATURE "KVM\0" -#define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4) -#define KVM_FEATURE_PV_IPI BIT(1) -#define KVM_FEATURE_STEAL_TIME BIT(2) #ifndef __ASSEMBLY__ diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h index cd9f46f30fc0..d619b943d20d 100644 --- a/arch/loongarch/include/uapi/asm/kvm.h +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -96,13 +96,15 @@ struct kvm_fpu { #define KVM_IOC_CPUCFG(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG) /* Device Control API on vm fd */ -#define KVM_LOONGARCH_VM_FEAT_CTRL 0 -#define KVM_LOONGARCH_VM_FEAT_LSX 0 -#define KVM_LOONGARCH_VM_FEAT_LASX 1 -#define KVM_LOONGARCH_VM_FEAT_X86BT 2 -#define KVM_LOONGARCH_VM_FEAT_ARMBT 3 -#define KVM_LOONGARCH_VM_FEAT_MIPSBT 4 -#define KVM_LOONGARCH_VM_FEAT_PMU 5 +#define KVM_LOONGARCH_VM_FEAT_CTRL 0 +#define KVM_LOONGARCH_VM_FEAT_LSX 0 +#define KVM_LOONGARCH_VM_FEAT_LASX 1 +#define KVM_LOONGARCH_VM_FEAT_X86BT 2 +#define KVM_LOONGARCH_VM_FEAT_ARMBT 3 +#define KVM_LOONGARCH_VM_FEAT_MIPSBT 4 +#define KVM_LOONGARCH_VM_FEAT_PMU 5 +#define KVM_LOONGARCH_VM_FEAT_PV_IPI 6 +#define KVM_LOONGARCH_VM_FEAT_PV_STEALTIME 7 /* Device Control API on vcpu fd */ #define KVM_LOONGARCH_VCPU_CPUCFG 0 diff --git a/arch/loongarch/include/uapi/asm/kvm_para.h b/arch/loongarch/include/uapi/asm/kvm_para.h new file mode 100644 index 000000000000..b0604aa9b4bb --- /dev/null +++ b/arch/loongarch/include/uapi/asm/kvm_para.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_KVM_PARA_H +#define _UAPI_ASM_KVM_PARA_H + +#include + +/* + * CPUCFG index area: 0x40000000 -- 0x400000ff + * SW emulation for KVM hypervirsor + */ +#define CPUCFG_KVM_BASE 0x40000000 +#define CPUCFG_KVM_SIZE 0x100 +#define CPUCFG_KVM_SIG (CPUCFG_KVM_BASE + 0) +#define KVM_SIGNATURE "KVM\0" +#define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4) +#define KVM_FEATURE_IPI 1 +#define KVM_FEATURE_STEAL_TIME 2 +/* BIT 24 - 31 are features configurable by user space vmm */ +#define KVM_FEATURE_VIRT_EXTIOI 24 + +#endif /* _UAPI_ASM_KVM_PARA_H */ diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c index 658333b889b8..6d2dbc0b8feb 100644 --- a/arch/loongarch/kernel/paravirt.c +++ b/arch/loongarch/kernel/paravirt.c @@ -269,7 +269,7 @@ int __init pv_ipi_init(void) if (!cpu_has_hypervisor) return 0; - if (!kvm_para_available()) + if (!(feature & BIT(KVM_FEATURE_IPI))) return 0; /* @@ -356,7 +356,7 @@ static int pv_enable_steal_time(void) } addr |= KVM_STEAL_PHYS_VALID; - kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, addr); + kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, BIT(KVM_FEATURE_STEAL_TIME), addr); return 0; } @@ -364,7 +364,7 @@ static int pv_enable_steal_time(void) static void pv_disable_steal_time(void) { if (has_steal_clock) - kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, 0); + kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, BIT(KVM_FEATURE_STEAL_TIME), 0); } #ifdef CONFIG_SMP @@ -416,7 +416,7 @@ int __init pv_time_init(void) return 0; feature = read_cpucfg(CPUCFG_KVM_FEATURE); - if (!(feature & KVM_FEATURE_STEAL_TIME)) + if (!(feature & BIT(KVM_FEATURE_STEAL_TIME))) return 0; has_steal_clock = 1; diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 47ab50253f82..27b55f5b015d 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -259,9 +259,7 @@ static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst) vcpu->arch.gprs[rd] = 0; break; case CPUCFG_KVM_FEATURE: - ret = KVM_FEATURE_IPI; - if (kvm_pvtime_supported()) - ret |= KVM_FEATURE_STEAL_TIME; + ret = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK; vcpu->arch.gprs[rd] = ret; break; default: @@ -835,18 +833,23 @@ static int kvm_send_pv_ipi(struct kvm_vcpu *vcpu) */ static void kvm_handle_service(struct kvm_vcpu *vcpu) { + long ret = KVM_HCALL_INVALID_CODE; unsigned long func = kvm_read_reg(vcpu, LOONGARCH_GPR_A0); - long ret; switch (func) { case KVM_HCALL_FUNC_IPI: - kvm_send_pv_ipi(vcpu); - ret = KVM_HCALL_SUCCESS; + if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_IPI)) { + kvm_send_pv_ipi(vcpu); + ret = KVM_HCALL_SUCCESS; + } + break; + case KVM_HCALL_FUNC_NOTIFY: + if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)) + ret = kvm_save_notify(vcpu); break; default: - ret = KVM_HCALL_INVALID_CODE; break; - }; + } kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret); } diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 6124eefdc8a9..b8d0bc516167 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -954,6 +954,8 @@ static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu, case LOONGARCH_CPUCFG2: case LOONGARCH_CPUCFG6: return 0; + case CPUCFG_KVM_FEATURE: + return 0; default: return -ENXIO; } @@ -964,8 +966,8 @@ static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu, static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) { - if (!kvm_pvtime_supported() || - attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) + if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME) + || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) return -ENXIO; return 0; @@ -997,9 +999,18 @@ static int kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu *vcpu, uint64_t val; uint64_t __user *uaddr = (uint64_t __user *)attr->addr; - ret = _kvm_get_cpucfg_mask(attr->attr, &val); - if (ret) - return ret; + switch (attr->attr) { + case 0 ... (KVM_MAX_CPUCFG_REGS - 1): + ret = _kvm_get_cpucfg_mask(attr->attr, &val); + if (ret) + return ret; + break; + case CPUCFG_KVM_FEATURE: + val = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK; + break; + default: + return -ENXIO; + } put_user(val, uaddr); @@ -1012,8 +1023,8 @@ static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu, u64 gpa; u64 __user *user = (u64 __user *)attr->addr; - if (!kvm_pvtime_supported() || - attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) + if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME) + || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) return -ENXIO; gpa = vcpu->arch.st.guest_addr; @@ -1045,7 +1056,28 @@ static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu, static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) { - return -ENXIO; + u64 val, valid; + u64 __user *user = (u64 __user *)attr->addr; + struct kvm *kvm = vcpu->kvm; + + switch (attr->attr) { + case CPUCFG_KVM_FEATURE: + if (get_user(val, user)) + return -EFAULT; + + valid = LOONGARCH_PV_FEAT_MASK; + if (val & ~valid) + return -EINVAL; + + /* All vCPUs need set the same PV features */ + if ((kvm->arch.pv_features & LOONGARCH_PV_FEAT_UPDATED) + && ((kvm->arch.pv_features & valid) != val)) + return -EINVAL; + kvm->arch.pv_features = val | LOONGARCH_PV_FEAT_UPDATED; + return 0; + default: + return -ENXIO; + } } static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu, @@ -1055,8 +1087,8 @@ static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu, u64 gpa, __user *user = (u64 __user *)attr->addr; struct kvm *kvm = vcpu->kvm; - if (!kvm_pvtime_supported() || - attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) + if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME) + || attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) return -ENXIO; if (get_user(gpa, user)) diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c index 7a5a9c2bec80..6995a36bd36e 100644 --- a/arch/loongarch/kvm/vm.c +++ b/arch/loongarch/kvm/vm.c @@ -5,6 +5,7 @@ #include #include +#include #include #include @@ -41,6 +42,12 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) } kvm_init_vmcs(kvm); + + /* Enable all PV features by default */ + kvm->arch.pv_features = BIT(KVM_FEATURE_IPI); + if (kvm_pvtime_supported()) + kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME); + kvm->arch.gpa_size = BIT(cpu_vabits - 1); kvm->arch.root_level = CONFIG_PGTABLE_LEVELS - 1; kvm->arch.invalid_ptes[0] = 0; @@ -131,6 +138,12 @@ static int kvm_vm_feature_has_attr(struct kvm *kvm, struct kvm_device_attr *attr if (cpu_has_pmp) return 0; return -ENXIO; + case KVM_LOONGARCH_VM_FEAT_PV_IPI: + return 0; + case KVM_LOONGARCH_VM_FEAT_PV_STEALTIME: + if (kvm_pvtime_supported()) + return 0; + return -ENXIO; default: return -ENXIO; } -- Gitee From 0e7110b6d979d8195a85424ad4cac54278998134 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Thu, 12 Sep 2024 22:56:14 +0800 Subject: [PATCH 1583/2138] LoongArch: KVM: Implement function kvm_para_has_feature() ANBZ: #11464 commit 3abb708ec0be25da16a1ee9f1ab5cbc93f3256f3 upstream. Implement function kvm_para_has_feature() to detect supported paravirt features. It can be used by device driver to detect and enable paravirt features, such as the EIOINTC irqchip driver is able to detect feature KVM_FEATURE_VIRT_EXTIOI and do some optimization. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4025 --- arch/loongarch/include/asm/kvm_para.h | 10 ++++++ arch/loongarch/kernel/paravirt.c | 46 +++++++++++++-------------- 2 files changed, 33 insertions(+), 23 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_para.h b/arch/loongarch/include/asm/kvm_para.h index 4866924b92f3..dbea66d7eaa6 100644 --- a/arch/loongarch/include/asm/kvm_para.h +++ b/arch/loongarch/include/asm/kvm_para.h @@ -157,10 +157,20 @@ static __always_inline long kvm_hypercall5(u64 fid, return ret; } +#ifdef CONFIG_PARAVIRT +bool kvm_para_available(void); +unsigned int kvm_arch_para_features(void); +#else +static inline bool kvm_para_available(void) +{ + return false; +} + static inline unsigned int kvm_arch_para_features(void) { return 0; } +#endif static inline unsigned int kvm_arch_para_hints(void) { diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c index 6d2dbc0b8feb..92e37d0e6b22 100644 --- a/arch/loongarch/kernel/paravirt.c +++ b/arch/loongarch/kernel/paravirt.c @@ -249,11 +249,14 @@ static int pv_cpu_down_prepare(unsigned int cpu) } #endif -static bool kvm_para_available(void) +bool kvm_para_available(void) { static int hypervisor_type; int config; + if (!cpu_has_hypervisor) + return false; + if (!hypervisor_type) { config = read_cpucfg(CPUCFG_KVM_SIG); if (!memcmp(&config, KVM_SIGNATURE, 4)) @@ -263,28 +266,31 @@ static bool kvm_para_available(void) return hypervisor_type == HYPERVISOR_KVM; } -int __init pv_ipi_init(void) +unsigned int kvm_arch_para_features(void) { - int feature; + static unsigned int feature; - if (!cpu_has_hypervisor) + if (!kvm_para_available()) return 0; - if (!(feature & BIT(KVM_FEATURE_IPI))) + + if (!feature) + feature = read_cpucfg(CPUCFG_KVM_FEATURE); + + return feature; +} + +int __init pv_ipi_init(void) +{ + if (!kvm_para_has_feature(KVM_FEATURE_IPI)) return 0; - /* - * check whether KVM hypervisor supports pv_ipi or not - */ - feature = read_cpucfg(CPUCFG_KVM_FEATURE); #ifdef CONFIG_SMP - if (feature & KVM_FEATURE_PV_IPI) { - smp_ops.init_ipi = pv_init_ipi; - smp_ops.send_ipi_single = pv_send_ipi_single; - smp_ops.send_ipi_mask = pv_send_ipi_mask; - } + smp_ops.init_ipi = pv_init_ipi; + smp_ops.send_ipi_single = pv_send_ipi_single; + smp_ops.send_ipi_mask = pv_send_ipi_mask; #endif - return 1; + return 0; } static void pv_cpu_reboot(void *unused) @@ -408,15 +414,9 @@ static struct notifier_block pv_reboot_nb = { int __init pv_time_init(void) { - int r, feature; + int r; - if (!cpu_has_hypervisor) - return 0; - if (!kvm_para_available()) - return 0; - - feature = read_cpucfg(CPUCFG_KVM_FEATURE); - if (!(feature & BIT(KVM_FEATURE_STEAL_TIME))) + if (!kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) return 0; has_steal_clock = 1; -- Gitee From 82fd1d9089dff629347bbb74c7ac82657cf59ace Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Wed, 28 Aug 2024 12:59:50 +0800 Subject: [PATCH 1584/2138] Loongarch: KVM: Add KVM hypercalls documentation for LoongArch ANBZ: #11464 commit bc6cb62007fd6e321385d2df6fae3c38b53c0a82 upstream. Add documentation topic for using pv_virt when running as a guest on KVM hypervisor. Signed-off-by: Bibo Mao Signed-off-by: Xianglai Li Co-developed-by: Mingcong Bai Signed-off-by: Mingcong Bai Link: https://lore.kernel.org/all/5c338084b1bcccc1d57dce9ddb1e7081@aosc.io/ Signed-off-by: Dandan Zhang [jc: fixed htmldocs build error] Signed-off-by: Jonathan Corbet Link: https://lore.kernel.org/r/4769C036576F8816+20240828045950.3484113-1-zhangdandan@uniontech.com Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4030 --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index be976c69839c..0166f496d16b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11546,6 +11546,7 @@ L: kvm@vger.kernel.org L: loongarch@lists.linux.dev S: Maintained T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git +F: Documentation/virt/kvm/loongarch/ F: arch/loongarch/include/asm/kvm* F: arch/loongarch/include/uapi/asm/kvm* F: arch/loongarch/kvm/ -- Gitee From 53fdd93c0e55e5e52767651a57dc676b6ff5c8a5 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Mon, 6 May 2024 22:00:47 +0800 Subject: [PATCH 1585/2138] LoongArch: KVM: Add cpucfg area for kvm hypervisor ANBZ: #11464 commit 9753d3037964fffa5c57de8c57168dc1a4832dd4 upstream. Instruction cpucfg can be used to get processor features. And there is a trap exception when it is executed in VM mode, and also it can be used to provide cpu features to VM. On real hardware cpucfg area 0 - 20 is used by now. Here one specified area 0x40000000 -- 0x400000ff is used for KVM hypervisor to provide PV features, and the area can be extended for other hypervisors in future. This area will never be used for real HW, it is only used by software. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4030 --- arch/loongarch/kvm/exit.c | 90 ++++++++++++++++++--------------------- 1 file changed, 42 insertions(+), 48 deletions(-) diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 27b55f5b015d..19cb22da35de 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -21,6 +21,47 @@ #include #include "trace.h" +static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst) +{ + int rd, rj; + unsigned int index, ret; + + if (inst.reg2_format.opcode != cpucfg_op) + return EMULATE_FAIL; + + rd = inst.reg2_format.rd; + rj = inst.reg2_format.rj; + ++vcpu->stat.cpucfg_exits; + index = vcpu->arch.gprs[rj]; + + /* + * By LoongArch Reference Manual 2.2.10.5 + * Return value is 0 for undefined CPUCFG index + * + * Disable preemption since hw gcsr is accessed + */ + preempt_disable(); + switch (index) { + case 0 ... (KVM_MAX_CPUCFG_REGS - 1): + vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index]; + break; + case CPUCFG_KVM_SIG: + /* CPUCFG emulation between 0x40000000 -- 0x400000ff */ + vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE; + break; + case CPUCFG_KVM_FEATURE: + ret = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK; + vcpu->arch.gprs[rd] = ret; + break; + default: + vcpu->arch.gprs[rd] = 0; + break; + } + preempt_enable(); + + return EMULATE_DONE; +} + static unsigned long kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid) { unsigned long val = 0; @@ -225,52 +266,6 @@ int kvm_emu_idle(struct kvm_vcpu *vcpu) return EMULATE_DONE; } -static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst) -{ - int rd, rj; - unsigned int index, ret; - unsigned long plv; - - rd = inst.reg2_format.rd; - rj = inst.reg2_format.rj; - ++vcpu->stat.cpucfg_exits; - index = vcpu->arch.gprs[rj]; - - /* - * By LoongArch Reference Manual 2.2.10.5 - * Return value is 0 for undefined cpucfg index - * - * Disable preemption since hw gcsr is accessed - */ - preempt_disable(); - plv = kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD) >> CSR_CRMD_PLV_SHIFT; - switch (index) { - case 0 ... (KVM_MAX_CPUCFG_REGS - 1): - vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index]; - break; - case CPUCFG_KVM_SIG: - /* - * Cpucfg emulation between 0x40000000 -- 0x400000ff - * Return value with 0 if executed in user mode - */ - if ((plv & CSR_CRMD_PLV) == PLV_KERN) - vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE; - else - vcpu->arch.gprs[rd] = 0; - break; - case CPUCFG_KVM_FEATURE: - ret = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK; - vcpu->arch.gprs[rd] = ret; - break; - default: - vcpu->arch.gprs[rd] = 0; - break; - } - - preempt_enable(); - return EMULATE_DONE; -} - static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu) { unsigned long curr_pc; @@ -287,8 +282,7 @@ static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu) er = EMULATE_FAIL; switch (((inst.word >> 24) & 0xff)) { case 0x0: /* CPUCFG GSPR */ - if (inst.reg2_format.opcode == cpucfg_op) - er = kvm_emu_cpucfg(vcpu, inst); + er = kvm_emu_cpucfg(vcpu, inst); break; case 0x4: /* CSR{RD,WR,XCHG} GSPR */ er = kvm_handle_csr(vcpu, inst); -- Gitee From b1ee3f37b495ae190508b65d6972c62619b3e561 Mon Sep 17 00:00:00 2001 From: David Matlack Date: Fri, 3 May 2024 11:17:32 -0700 Subject: [PATCH 1586/2138] KVM: Introduce vcpu->wants_to_run ANBZ: #11464 commit a6816314af5749cd88944bfdceb270c627cdf348 upstream. Introduce vcpu->wants_to_run to indicate when a vCPU is in its core run loop, i.e. when the vCPU is running the KVM_RUN ioctl and immediate_exit was not set. Replace all references to vcpu->run->immediate_exit with !vcpu->wants_to_run to avoid TOCTOU races with userspace. For example, a malicious userspace could invoked KVM_RUN with immediate_exit=true and then after KVM reads it to set wants_to_run=false, flip it to false. This would result in the vCPU running in KVM_RUN with wants_to_run=false. This wouldn't cause any real bugs today but is a dangerous landmine. Signed-off-by: David Matlack Link: https://lore.kernel.org/r/20240503181734.1467938-2-dmatlack@google.com Signed-off-by: Sean Christopherson Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4030 --- arch/arm64/kvm/arm.c | 2 +- arch/loongarch/kvm/vcpu.c | 2 +- arch/mips/kvm/mips.c | 2 +- arch/powerpc/kvm/powerpc.c | 2 +- arch/riscv/kvm/vcpu.c | 2 +- arch/s390/kvm/kvm-s390.c | 2 +- arch/x86/kvm/x86.c | 4 ++-- include/linux/kvm_host.h | 1 + virt/kvm/kvm_main.c | 3 +++ 9 files changed, 12 insertions(+), 8 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index ffdc2c4d07ee..decc5f2af4d4 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -906,7 +906,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) vcpu_load(vcpu); - if (run->immediate_exit) { + if (!vcpu->wants_to_run) { ret = -EINTR; goto out; } diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index b8d0bc516167..af0922339f5b 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -1735,7 +1735,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) kvm_complete_iocsr_read(vcpu, run); } - if (run->immediate_exit) + if (!vcpu->wants_to_run) return r; /* Clear exit_reason */ diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 231ac052b506..f1a99962027a 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -436,7 +436,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) vcpu->mmio_needed = 0; } - if (vcpu->run->immediate_exit) + if (!vcpu->wants_to_run) goto out; lose_fpu(1); diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 6cef200c2404..0e35668b2830 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -1858,7 +1858,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) kvm_sigset_activate(vcpu); - if (run->immediate_exit) + if (!vcpu->wants_to_run) r = -EINTR; else r = kvmppc_vcpu_run(vcpu); diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 82229db1ce73..4870e4658466 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -654,7 +654,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) return ret; } - if (run->immediate_exit) { + if (!vcpu->wants_to_run) { kvm_vcpu_srcu_read_unlock(vcpu); return -EINTR; } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 348d030d2660..7c2308bae00c 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -5048,7 +5048,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) if (vcpu->kvm->arch.pv.dumping) return -EINVAL; - if (kvm_run->immediate_exit) + if (!vcpu->wants_to_run) return -EINTR; if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS || diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 7a044c4427f3..d8e5d5392c32 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -11176,7 +11176,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) kvm_vcpu_srcu_read_lock(vcpu); if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { - if (kvm_run->immediate_exit) { + if (!vcpu->wants_to_run) { r = -EINTR; goto out; } @@ -11254,7 +11254,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) WARN_ON_ONCE(vcpu->mmio_needed); } - if (kvm_run->immediate_exit) { + if (!vcpu->wants_to_run) { r = -EINTR; goto out; } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index f3f8e6112a7e..b97f9880af18 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -378,6 +378,7 @@ struct kvm_vcpu { bool dy_eligible; } spin_loop; #endif + bool wants_to_run; bool preempted; bool ready; struct kvm_vcpu_arch arch; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 3a14fe491050..4cd23e014e18 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -4167,7 +4167,10 @@ static long kvm_vcpu_ioctl(struct file *filp, vcpu->stat.pid = current->pid; #endif } + vcpu->wants_to_run = !READ_ONCE(vcpu->run->immediate_exit); r = kvm_arch_vcpu_ioctl_run(vcpu); + vcpu->wants_to_run = false; + trace_kvm_userspace_exit(vcpu->run->exit_reason, r); break; } -- Gitee From 25fe881776d186164ca2a12ab9185ab9bfcea18f Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 21 May 2024 18:40:11 -0700 Subject: [PATCH 1587/2138] KVM: Delete the now unused kvm_arch_sched_in() ANBZ: #11464 commit 2a27c431400797e0044872283d1971aa372fcd3a upstream. Delete kvm_arch_sched_in() now that all implementations are nops. Reviewed-by: Bibo Mao Acked-by: Kai Huang Link: https://lore.kernel.org/r/20240522014013.1672962-5-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4030 --- arch/arm64/include/asm/kvm_host.h | 1 - arch/loongarch/include/asm/kvm_host.h | 2 +- arch/mips/include/asm/kvm_host.h | 1 - arch/powerpc/include/asm/kvm_host.h | 1 - arch/riscv/include/asm/kvm_host.h | 1 - arch/s390/include/asm/kvm_host.h | 1 - arch/x86/kvm/pmu.c | 6 +++--- arch/x86/kvm/x86.c | 12 ------------ include/linux/kvm_host.h | 2 -- virt/kvm/kvm_main.c | 1 - 10 files changed, 4 insertions(+), 24 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index a89b35070a35..b681c0b9f7d9 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -1056,7 +1056,6 @@ static inline bool kvm_system_needs_idmapped_vectors(void) } static inline void kvm_arch_sync_events(struct kvm *kvm) {} -static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} void kvm_arm_init_debug(void); void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu); diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index a053e574ba42..cee0c39514fe 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -257,6 +257,7 @@ struct kvm_vcpu_arch { struct ipi_state ipi_state; /* cpucfg */ u32 cpucfg[KVM_MAX_CPUCFG_REGS]; + /* paravirt steal time */ struct { u64 guest_addr; @@ -340,7 +341,6 @@ static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch) static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} -static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 54a85f1d4f2c..30d42657ac1b 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -892,7 +892,6 @@ static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) {} static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} -static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 14ee0dece853..f9bd6b018cb0 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -879,7 +879,6 @@ struct kvm_vcpu_arch { static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {} -static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 1ebf20dfbaa6..098a63bfb3d9 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -247,7 +247,6 @@ struct kvm_vcpu_arch { }; static inline void kvm_arch_sync_events(struct kvm *kvm) {} -static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} #define KVM_ARCH_WANT_MMU_NOTIFIER diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index b039881c277a..b49bbcac635d 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -1045,7 +1045,6 @@ extern int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc); extern int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc); static inline void kvm_arch_sync_events(struct kvm *kvm) {} -static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) {} static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index da2d82e3a873..a36be3f5812f 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -468,9 +468,9 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu) } /* - * Unused perf_events are only released if the corresponding MSRs - * weren't accessed during the last vCPU time slice. kvm_arch_sched_in - * triggers KVM_REQ_PMU if cleanup is needed. + * Release unused perf_events if the corresponding guest MSRs weren't + * accessed during the last vCPU time slice (need_cleanup is set when + * the vCPU is scheduled back in). */ if (unlikely(pmu->need_cleanup)) kvm_pmu_cleanup(vcpu); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index d8e5d5392c32..97ce30712c66 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -12358,18 +12358,6 @@ bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) __read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu); EXPORT_SYMBOL_GPL(kvm_has_noapic_vcpu); -void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) -{ - struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); - - vcpu->arch.l1tf_flush_l1d = true; - if (pmu->version && unlikely(pmu->event_count)) { - pmu->need_cleanup = true; - kvm_make_request(KVM_REQ_PMU, vcpu); - } - static_call(kvm_x86_sched_in)(vcpu, cpu); -} - void kvm_arch_free_vm(struct kvm *kvm) { kfree(to_kvm_hv(kvm)->hv_pa_pg); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index b97f9880af18..646d3d851486 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1453,8 +1453,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg); int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu); -void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu); - void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 4cd23e014e18..7e8879de4db4 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -6090,7 +6090,6 @@ static void kvm_sched_in(struct preempt_notifier *pn, int cpu) WRITE_ONCE(vcpu->ready, false); __this_cpu_write(kvm_running_vcpu, vcpu); - kvm_arch_sched_in(vcpu, cpu); kvm_arch_vcpu_load(vcpu, cpu); } -- Gitee From 1ff851a2d7a4bc3f5fbe2f2951dd1762134d3dec Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 27 Oct 2023 11:21:49 -0700 Subject: [PATCH 1588/2138] KVM: Convert KVM_ARCH_WANT_MMU_NOTIFIER to CONFIG_KVM_GENERIC_MMU_NOTIFIER ANBZ: #11464 commit f128cf8cfbecccf95e891ae90d9c917df5117c7a upstream. Convert KVM_ARCH_WANT_MMU_NOTIFIER into a Kconfig and select it where appropriate to effectively maintain existing behavior. Using a proper Kconfig will simplify building more functionality on top of KVM's mmu_notifier infrastructure. Add a forward declaration of kvm_gfn_range to kvm_types.h so that including arch/powerpc/include/asm/kvm_ppc.h's with CONFIG_KVM=n doesn't generate warnings due to kvm_gfn_range being undeclared. PPC defines hooks for PR vs. HV without guarding them via #ifdeffery, e.g. bool (*unmap_gfn_range)(struct kvm *kvm, struct kvm_gfn_range *range); bool (*age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range); bool (*test_age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range); bool (*set_spte_gfn)(struct kvm *kvm, struct kvm_gfn_range *range); Alternatively, PPC could forward declare kvm_gfn_range, but there's no good reason not to define it in common KVM. Acked-by: Anup Patel Signed-off-by: Sean Christopherson Reviewed-by: Paolo Bonzini Reviewed-by: Fuad Tabba Tested-by: Fuad Tabba Message-Id: <20231027182217.3615211-8-seanjc@google.com> Signed-off-by: Paolo Bonzini Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4030 --- arch/arm64/include/asm/kvm_host.h | 2 -- arch/arm64/kvm/Kconfig | 2 +- arch/loongarch/include/asm/kvm_host.h | 1 - arch/loongarch/kvm/Kconfig | 2 +- arch/mips/include/asm/kvm_host.h | 2 -- arch/mips/kvm/Kconfig | 2 +- arch/powerpc/include/asm/kvm_host.h | 2 -- arch/powerpc/kvm/Kconfig | 8 ++++---- arch/powerpc/kvm/powerpc.c | 5 +---- arch/riscv/include/asm/kvm_host.h | 2 -- arch/riscv/kvm/Kconfig | 2 +- arch/x86/include/asm/kvm_host.h | 2 -- arch/x86/kvm/Kconfig | 2 +- include/linux/kvm_host.h | 6 +++--- include/linux/kvm_types.h | 1 + virt/kvm/Kconfig | 4 ++++ virt/kvm/kvm_main.c | 10 +++++----- 17 files changed, 23 insertions(+), 32 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index b681c0b9f7d9..9d32ef2f7b90 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -921,8 +921,6 @@ int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events); -#define KVM_ARCH_WANT_MMU_NOTIFIER - void kvm_arm_halt_guest(struct kvm *kvm); void kvm_arm_resume_guest(struct kvm *kvm); diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 83c1e09be42e..1a777715199f 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -22,7 +22,7 @@ menuconfig KVM bool "Kernel-based Virtual Machine (KVM) support" depends on HAVE_KVM select KVM_GENERIC_HARDWARE_ENABLING - select MMU_NOTIFIER + select KVM_GENERIC_MMU_NOTIFIER select PREEMPT_NOTIFIERS select HAVE_KVM_CPU_RELAX_INTERCEPT select KVM_MMIO diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index cee0c39514fe..fb6fa6f01aec 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -314,7 +314,6 @@ void kvm_flush_tlb_all(void); void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa); int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write); -#define KVM_ARCH_WANT_MMU_NOTIFIER void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); diff --git a/arch/loongarch/kvm/Kconfig b/arch/loongarch/kvm/Kconfig index 461a465e49fd..3a03121ecfc0 100644 --- a/arch/loongarch/kvm/Kconfig +++ b/arch/loongarch/kvm/Kconfig @@ -30,10 +30,10 @@ config KVM select HAVE_KVM_MSI select KVM_GENERIC_DIRTYLOG_READ_PROTECT select KVM_GENERIC_HARDWARE_ENABLING + select KVM_GENERIC_MMU_NOTIFIER select KVM_MMIO select KVM_XFER_TO_GUEST_WORK select SCHED_INFO - select MMU_NOTIFIER select PREEMPT_NOTIFIERS help Support hosting virtualized guest machines using diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 30d42657ac1b..6743a57c1ab4 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -810,8 +810,6 @@ int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn); pgd_t *kvm_pgd_alloc(void); void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); -#define KVM_ARCH_WANT_MMU_NOTIFIER - /* Emulation */ enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause); int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig index a8cdba75f98d..c04987d2ed2e 100644 --- a/arch/mips/kvm/Kconfig +++ b/arch/mips/kvm/Kconfig @@ -25,7 +25,7 @@ config KVM select HAVE_KVM_EVENTFD select HAVE_KVM_VCPU_ASYNC_IOCTL select KVM_MMIO - select MMU_NOTIFIER + select KVM_GENERIC_MMU_NOTIFIER select INTERVAL_TREE select KVM_GENERIC_HARDWARE_ENABLING help diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index f9bd6b018cb0..4c509a65bb59 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -62,8 +62,6 @@ #include -#define KVM_ARCH_WANT_MMU_NOTIFIER - #define HPTEG_CACHE_NUM (1 << 15) #define HPTEG_HASH_BITS_PTE 13 #define HPTEG_HASH_BITS_PTE_LONG 12 diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index 902611954200..b33358ee6424 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -42,7 +42,7 @@ config KVM_BOOK3S_64_HANDLER config KVM_BOOK3S_PR_POSSIBLE bool select KVM_MMIO - select MMU_NOTIFIER + select KVM_GENERIC_MMU_NOTIFIER config KVM_BOOK3S_HV_POSSIBLE bool @@ -85,7 +85,7 @@ config KVM_BOOK3S_64_HV tristate "KVM for POWER7 and later using hypervisor mode in host" depends on KVM_BOOK3S_64 && PPC_POWERNV select KVM_BOOK3S_HV_POSSIBLE - select MMU_NOTIFIER + select KVM_GENERIC_MMU_NOTIFIER select CMA help Support running unmodified book3s_64 guest kernels in @@ -194,7 +194,7 @@ config KVM_E500V2 depends on !CONTEXT_TRACKING_USER select KVM select KVM_MMIO - select MMU_NOTIFIER + select KVM_GENERIC_MMU_NOTIFIER help Support running unmodified E500 guest kernels in virtual machines on E500v2 host processors. @@ -211,7 +211,7 @@ config KVM_E500MC select KVM select KVM_MMIO select KVM_BOOKE_HV - select MMU_NOTIFIER + select KVM_GENERIC_MMU_NOTIFIER help Support running unmodified E500MC/E5500/E6500 guest kernels in virtual machines on E500MC/E5500/E6500 host processors. diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 0e35668b2830..5780a4b21839 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -634,11 +634,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_SYNC_MMU: #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE r = hv_enabled; -#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER) - r = 1; -#else - r = 0; #endif + BUILD_BUG_ON(!IS_ENABLED(CONFIG_KVM_GENERIC_MMU_NOTIFIER)); break; #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE case KVM_CAP_PPC_HTAB_FD: diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 098a63bfb3d9..77971843dd02 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -248,8 +248,6 @@ struct kvm_vcpu_arch { static inline void kvm_arch_sync_events(struct kvm *kvm) {} -#define KVM_ARCH_WANT_MMU_NOTIFIER - #define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12 void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid, diff --git a/arch/riscv/kvm/Kconfig b/arch/riscv/kvm/Kconfig index dfc237d7875b..ae2e05f050ec 100644 --- a/arch/riscv/kvm/Kconfig +++ b/arch/riscv/kvm/Kconfig @@ -30,7 +30,7 @@ config KVM select KVM_GENERIC_HARDWARE_ENABLING select KVM_MMIO select KVM_XFER_TO_GUEST_WORK - select MMU_NOTIFIER + select KVM_GENERIC_MMU_NOTIFIER select PREEMPT_NOTIFIERS help Support hosting virtualized guest machines. diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index b96fe390a9c4..264c0393c837 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -2141,8 +2141,6 @@ enum { # define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, 0) #endif -#define KVM_ARCH_WANT_MMU_NOTIFIER - int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v); int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); int kvm_cpu_has_extint(struct kvm_vcpu *v); diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 463732963a15..d42e832ffb4a 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -24,7 +24,7 @@ config KVM depends on HIGH_RES_TIMERS depends on X86_LOCAL_APIC select PREEMPT_NOTIFIERS - select MMU_NOTIFIER + select KVM_GENERIC_MMU_NOTIFIER select HAVE_KVM_IRQCHIP select HAVE_KVM_PFNCACHE select HAVE_KVM_IRQFD diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 646d3d851486..91464161c800 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -254,7 +254,7 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); #endif -#ifdef KVM_ARCH_WANT_MMU_NOTIFIER +#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER union kvm_mmu_notifier_arg { pte_t pte; }; @@ -789,7 +789,7 @@ struct kvm { struct hlist_head irq_ack_notifier_list; #endif -#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) +#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER struct mmu_notifier mmu_notifier; unsigned long mmu_invalidate_seq; long mmu_invalidate_in_progress; @@ -1971,7 +1971,7 @@ extern const struct _kvm_stats_desc kvm_vm_stats_desc[]; extern const struct kvm_stats_header kvm_vcpu_stats_header; extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[]; -#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) +#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq) { if (unlikely(kvm->mmu_invalidate_in_progress)) diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index 6f4737d5046a..9d1f7835d8c1 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -6,6 +6,7 @@ struct kvm; struct kvm_async_pf; struct kvm_device_ops; +struct kvm_gfn_range; struct kvm_interrupt; struct kvm_irq_routing_table; struct kvm_memory_slot; diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig index 484d0873061c..ecae2914c97e 100644 --- a/virt/kvm/Kconfig +++ b/virt/kvm/Kconfig @@ -92,3 +92,7 @@ config HAVE_KVM_PM_NOTIFIER config KVM_GENERIC_HARDWARE_ENABLING bool + +config KVM_GENERIC_MMU_NOTIFIER + select MMU_NOTIFIER + bool diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 7e8879de4db4..a04c31d60001 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -540,7 +540,7 @@ void kvm_destroy_vcpus(struct kvm *kvm) } EXPORT_SYMBOL_GPL(kvm_destroy_vcpus); -#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) +#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) { return container_of(mn, struct kvm, mmu_notifier); @@ -939,14 +939,14 @@ static int kvm_init_mmu_notifier(struct kvm *kvm) return mmu_notifier_register(&kvm->mmu_notifier, current->mm); } -#else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ +#else /* !CONFIG_KVM_GENERIC_MMU_NOTIFIER */ static int kvm_init_mmu_notifier(struct kvm *kvm) { return 0; } -#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ +#endif /* CONFIG_KVM_GENERIC_MMU_NOTIFIER */ #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER static int kvm_pm_notifier_call(struct notifier_block *bl, @@ -1266,7 +1266,7 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname) out_err_no_debugfs: kvm_coalesced_mmio_free(kvm); out_no_coalesced_mmio: -#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) +#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER if (kvm->mmu_notifier.ops) mmu_notifier_unregister(&kvm->mmu_notifier, current->mm); #endif @@ -1326,7 +1326,7 @@ static void kvm_destroy_vm(struct kvm *kvm) kvm->buses[i] = NULL; } kvm_coalesced_mmio_free(kvm); -#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) +#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); /* * At this point, pending calls to invalidate_range_start() -- Gitee From 31b240970b93c828ea1b1f816a3373b948eabbe7 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 11 Jan 2024 03:12:59 -0500 Subject: [PATCH 1589/2138] KVM: define __KVM_HAVE_GUEST_DEBUG unconditionally ANBZ: #11464 commit 6bda055d625860736f7ea5b4eda816f276899d8b upstream. Since all architectures (for historical reasons) have to define struct kvm_guest_debug_arch, and since userspace has to check KVM_CHECK_EXTENSION(KVM_CAP_SET_GUEST_DEBUG) anyway, there is no advantage in masking the capability #define itself. Remove the #define __KVM_HAVE_GUEST_DEBUG from architecture-specific headers. Signed-off-by: Paolo Bonzini Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4030 --- arch/arm64/include/uapi/asm/kvm.h | 1 - arch/loongarch/include/uapi/asm/kvm.h | 1 - arch/powerpc/include/uapi/asm/kvm.h | 1 - arch/s390/include/uapi/asm/kvm.h | 1 - arch/x86/include/uapi/asm/kvm.h | 1 - include/uapi/linux/kvm.h | 7 +++++-- 6 files changed, 5 insertions(+), 7 deletions(-) diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index f7ddd73a8c0f..57f8df4aaf28 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -37,7 +37,6 @@ #include #include -#define __KVM_HAVE_GUEST_DEBUG #define __KVM_HAVE_IRQ_LINE #define __KVM_HAVE_READONLY_MEM #define __KVM_HAVE_VCPU_EVENTS diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h index d619b943d20d..d65a2993a406 100644 --- a/arch/loongarch/include/uapi/asm/kvm.h +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -15,7 +15,6 @@ */ #define __KVM_HAVE_READONLY_MEM -#define __KVM_HAVE_GUEST_DEBUG #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 #define KVM_DIRTY_LOG_PAGE_OFFSET 64 diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index 9f18fa090f1f..0572d5238b13 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -28,7 +28,6 @@ #define __KVM_HAVE_PPC_SMT #define __KVM_HAVE_IRQCHIP #define __KVM_HAVE_IRQ_LINE -#define __KVM_HAVE_GUEST_DEBUG /* Not always available, but if it is, this is the correct offset. */ #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index abe926d43cbe..b3ec6ddd6a13 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -12,7 +12,6 @@ #include #define __KVM_S390 -#define __KVM_HAVE_GUEST_DEBUG /* Device control API: s390-specific devices */ #define KVM_DEV_FLIC_GET_ALL_IRQS 1 diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index 1a6a1f987949..d60c65c377c5 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h @@ -40,7 +40,6 @@ #define __KVM_HAVE_IRQ_LINE #define __KVM_HAVE_MSI #define __KVM_HAVE_USER_NMI -#define __KVM_HAVE_GUEST_DEBUG #define __KVM_HAVE_MSIX #define __KVM_HAVE_MCE #define __KVM_HAVE_PIT_STATE2 diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index ec5d3be77663..44010332ab25 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -86,6 +86,11 @@ struct kvm_debug_guest { /* *** End of deprecated interfaces *** */ +/* + * Backwards-compatible definitions. + */ +#define __KVM_HAVE_GUEST_DEBUG + /* for KVM_SET_USER_MEMORY_REGION */ struct kvm_userspace_memory_region { __u32 slot; @@ -974,9 +979,7 @@ struct kvm_ppc_resize_hpt { /* Bug in KVM_SET_USER_MEMORY_REGION fixed: */ #define KVM_CAP_DESTROY_MEMORY_REGION_WORKS 21 #define KVM_CAP_USER_NMI 22 -#ifdef __KVM_HAVE_GUEST_DEBUG #define KVM_CAP_SET_GUEST_DEBUG 23 -#endif #ifdef __KVM_HAVE_PIT #define KVM_CAP_REINJECT_CONTROL 24 #endif -- Gitee From a6fc2509c63c5b1a5034e94c34d35778912ecb49 Mon Sep 17 00:00:00 2001 From: Yuli Wang Date: Wed, 7 Aug 2024 17:37:14 +0800 Subject: [PATCH 1590/2138] LoongArch: KVM: Remove unnecessary definition of KVM_PRIVATE_MEM_SLOTS ANBZ: #11464 commit 296b03ce389b4f7b3d7ea5664e53d432fb17e745 upstream. 1. "KVM_PRIVATE_MEM_SLOTS" is renamed as "KVM_INTERNAL_MEM_SLOTS". 2. "KVM_INTERNAL_MEM_SLOTS" defaults to zero, so it is not necessary to define it in LoongArch's asm/kvm_host.h. Link: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=bdd1c37a315bc50ab14066c4852bc8dcf070451e Link: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=b075450868dbc0950f0942617f222eeb989cad10 Reviewed-by: Bibo Mao Signed-off-by: Wentao Guan Signed-off-by: Yuli Wang Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4030 --- arch/loongarch/include/asm/kvm_host.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index fb6fa6f01aec..3d27f3860946 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -29,8 +29,6 @@ #define KVM_MAX_VCPUS 256 #define KVM_MAX_CPUCFG_REGS 21 -/* memory slots that does not exposed to userspace */ -#define KVM_PRIVATE_MEM_SLOTS 0 #define KVM_HALT_POLL_NS_DEFAULT 500000 #define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0) -- Gitee From 72d5f4e64d4de0ba314adba43ccc563313c4dee6 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Mon, 6 May 2024 22:00:47 +0800 Subject: [PATCH 1591/2138] LoongArch: KVM: Add vcpu mapping from physical cpuid ANBZ: #11464 commit 73516e9da512adc63ba3859fbd82a21f6257348f upstream. Physical CPUID is used for interrupt routing for irqchips such as ipi, msgint and eiointc interrupt controllers. Physical CPUID is stored at the CSR register LOONGARCH_CSR_CPUID, it can not be changed once vcpu is created and the physical CPUIDs of two vcpus cannot be the same. Different irqchips have different size declaration about physical CPUID, the max CPUID value for CSR LOONGARCH_CSR_CPUID on Loongson-3A5000 is 512, the max CPUID supported by IPI hardware is 1024, while for eiointc irqchip is 256, and for msgint irqchip is 65536. The smallest value from all interrupt controllers is selected now, and the max cpuid size is defines as 256 by KVM which comes from the eiointc irqchip. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4030 --- arch/loongarch/include/asm/kvm_host.h | 11 +- arch/loongarch/kvm/vcpu.c | 162 +++++++++++++------------- arch/loongarch/kvm/vm.c | 7 +- 3 files changed, 90 insertions(+), 90 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index 3d27f3860946..f23d99fac993 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -107,12 +107,13 @@ struct kvm_world_switch { #define MAX_PGTABLE_LEVELS 4 /* - * Physical cpu id is used for interrupt routing, there are different + * Physical CPUID is used for interrupt routing, there are different * definitions about physical cpuid on different hardwares. - * For LOONGARCH_CSR_CPUID register, max cpuid size if 512 - * For IPI HW, max dest CPUID size 1024 - * For extioi interrupt controller, max dest CPUID size is 256 - * For MSI interrupt controller, max supported CPUID size is 65536 + * + * For LOONGARCH_CSR_CPUID register, max CPUID size if 512 + * For IPI hardware, max destination CPUID size 1024 + * For extioi interrupt controller, max destination CPUID size is 256 + * For msgint interrupt controller, max supported CPUID size is 65536 * * Currently max CPUID is defined as 256 for KVM hypervisor, in future * it will be expanded to 4096, including 16 packages at most. And every diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index af0922339f5b..2c5505ffebb4 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -447,128 +447,125 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, return 0; } -static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val) -{ - unsigned long gintc; - struct loongarch_csrs *csr = vcpu->arch.csr; - - if (get_gcsr_flag(id) & INVALID_GCSR) - return -EINVAL; - - if (id == LOONGARCH_CSR_ESTAT) { - preempt_disable(); - vcpu_load(vcpu); - /* - * Sync pending interrupts into ESTAT so that interrupt - * remains during VM migration stage - */ - kvm_deliver_intr(vcpu); - vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; - vcpu_put(vcpu); - preempt_enable(); - - /* ESTAT IP0~IP7 get from GINTC */ - gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff; - *val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2); - return 0; - } - - /* - * Get software CSR state since software state is consistent - * with hardware for synchronous ioctl - */ - *val = kvm_read_sw_gcsr(csr, id); - - return 0; -} - static inline int kvm_set_cpuid(struct kvm_vcpu *vcpu, u64 val) { int cpuid; + struct kvm_phyid_map *map; struct loongarch_csrs *csr = vcpu->arch.csr; - struct kvm_phyid_map *map; if (val >= KVM_MAX_PHYID) return -EINVAL; - cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT); map = vcpu->kvm->arch.phyid_map; + cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID); + spin_lock(&vcpu->kvm->arch.phyid_map_lock); - if (map->phys_map[cpuid].enabled) { - /* - * Cpuid is already set before - * Forbid changing different cpuid at runtime - */ - if (cpuid != val) { - /* - * Cpuid 0 is initial value for vcpu, maybe invalid - * unset value for vcpu - */ - if (cpuid) { - spin_unlock(&vcpu->kvm->arch.phyid_map_lock); - return -EINVAL; - } - } else { - /* Discard duplicated cpuid set */ + if ((cpuid < KVM_MAX_PHYID) && map->phys_map[cpuid].enabled) { + /* Discard duplicated CPUID set operation */ + if (cpuid == val) { spin_unlock(&vcpu->kvm->arch.phyid_map_lock); return 0; } - } - if (map->phys_map[val].enabled) { /* - * New cpuid is already set with other vcpu - * Forbid sharing the same cpuid between different vcpus + * CPUID is already set before + * Forbid changing to a different CPUID at runtime */ - if (map->phys_map[val].vcpu != vcpu) { + spin_unlock(&vcpu->kvm->arch.phyid_map_lock); + return -EINVAL; + } + + if (map->phys_map[val].enabled) { + /* Discard duplicated CPUID set operation */ + if (vcpu == map->phys_map[val].vcpu) { spin_unlock(&vcpu->kvm->arch.phyid_map_lock); - return -EINVAL; + return 0; } - /* Discard duplicated cpuid set operation*/ + /* + * New CPUID is already set with other vcpu + * Forbid sharing the same CPUID between different vcpus + */ spin_unlock(&vcpu->kvm->arch.phyid_map_lock); - return 0; + return -EINVAL; } kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, val); map->phys_map[val].enabled = true; map->phys_map[val].vcpu = vcpu; - if (map->max_phyid < val) - map->max_phyid = val; spin_unlock(&vcpu->kvm->arch.phyid_map_lock); + return 0; } +static inline void kvm_drop_cpuid(struct kvm_vcpu *vcpu) +{ + int cpuid; + struct kvm_phyid_map *map; + struct loongarch_csrs *csr = vcpu->arch.csr; + + map = vcpu->kvm->arch.phyid_map; + cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID); + + if (cpuid >= KVM_MAX_PHYID) + return; + + spin_lock(&vcpu->kvm->arch.phyid_map_lock); + if (map->phys_map[cpuid].enabled) { + map->phys_map[cpuid].vcpu = NULL; + map->phys_map[cpuid].enabled = false; + kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID); + } + spin_unlock(&vcpu->kvm->arch.phyid_map_lock); +} + struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid) { - struct kvm_phyid_map *map; + struct kvm_phyid_map *map; if (cpuid >= KVM_MAX_PHYID) return NULL; map = kvm->arch.phyid_map; - if (map->phys_map[cpuid].enabled) - return map->phys_map[cpuid].vcpu; + if (!map->phys_map[cpuid].enabled) + return NULL; - return NULL; + return map->phys_map[cpuid].vcpu; } -static inline void kvm_drop_cpuid(struct kvm_vcpu *vcpu) +static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val) { - int cpuid; + unsigned long gintc; struct loongarch_csrs *csr = vcpu->arch.csr; - struct kvm_phyid_map *map; - map = vcpu->kvm->arch.phyid_map; - cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT); - if (cpuid >= KVM_MAX_PHYID) - return; + if (get_gcsr_flag(id) & INVALID_GCSR) + return -EINVAL; - if (map->phys_map[cpuid].enabled) { - map->phys_map[cpuid].vcpu = NULL; - map->phys_map[cpuid].enabled = false; - kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, 0); + if (id == LOONGARCH_CSR_ESTAT) { + preempt_disable(); + vcpu_load(vcpu); + /* + * Sync pending interrupts into ESTAT so that interrupt + * remains during VM migration stage + */ + kvm_deliver_intr(vcpu); + vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; + vcpu_put(vcpu); + preempt_enable(); + + /* ESTAT IP0~IP7 get from GINTC */ + gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff; + *val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2); + return 0; } + + /* + * Get software CSR state since software state is consistent + * with hardware for synchronous ioctl + */ + *val = kvm_read_sw_gcsr(csr, id); + + return 0; } static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val) @@ -579,6 +576,9 @@ static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val) if (get_gcsr_flag(id) & INVALID_GCSR) return -EINVAL; + if (id == LOONGARCH_CSR_CPUID) + return kvm_set_cpuid(vcpu, val); + if (id == LOONGARCH_CSR_ESTAT) { /* ESTAT IP0~IP7 inject through GINTC */ gintc = (val >> 2) & 0xff; @@ -588,8 +588,7 @@ static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val) kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc); return ret; - } else if (id == LOONGARCH_CSR_CPUID) - return kvm_set_cpuid(vcpu, val); + } kvm_write_sw_gcsr(csr, id, val); @@ -1491,6 +1490,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) /* Set cpuid */ kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id); + kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID); /* Start with no pending virtual guest interrupts */ csr->csrs[LOONGARCH_CSR_GINTC] = 0; @@ -1509,8 +1509,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) hrtimer_cancel(&vcpu->arch.swtimer); kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); - kfree(vcpu->arch.csr); kvm_drop_cpuid(vcpu); + kfree(vcpu->arch.csr); /* * If the vCPU is freed and reused as another vCPU, we don't want the diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c index 6995a36bd36e..5f65610aa9fc 100644 --- a/arch/loongarch/kvm/vm.c +++ b/arch/loongarch/kvm/vm.c @@ -33,13 +33,13 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) if (!kvm->arch.pgd) return -ENOMEM; - kvm->arch.phyid_map = kvzalloc(sizeof(struct kvm_phyid_map), - GFP_KERNEL_ACCOUNT); + kvm->arch.phyid_map = kvzalloc(sizeof(struct kvm_phyid_map), GFP_KERNEL_ACCOUNT); if (!kvm->arch.phyid_map) { free_page((unsigned long)kvm->arch.pgd); kvm->arch.pgd = NULL; return -ENOMEM; } + spin_lock_init(&kvm->arch.phyid_map_lock); kvm_init_vmcs(kvm); @@ -61,7 +61,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) for (i = 0; i <= kvm->arch.root_level; i++) kvm->arch.pte_shifts[i] = PAGE_SHIFT + i * (PAGE_SHIFT - 3); - spin_lock_init(&kvm->arch.phyid_map_lock); return 0; } @@ -69,8 +68,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm) { kvm_destroy_vcpus(kvm); free_page((unsigned long)kvm->arch.pgd); - kvfree(kvm->arch.phyid_map); kvm->arch.pgd = NULL; + kvfree(kvm->arch.phyid_map); kvm->arch.phyid_map = NULL; } -- Gitee From 57d2f40fa5ce3dd1b5d6b4ec601d21c2a8315015 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 5 Apr 2024 07:58:12 -0400 Subject: [PATCH 1592/2138] KVM: delete .change_pte MMU notifier callback ANBZ: #11464 commit f3b65bbaed7c43d10989380d4b95e2a3e9fe5a6b upstream. The .change_pte() MMU notifier callback was intended as an optimization. The original point of it was that KSM could tell KVM to flip its secondary PTE to a new location without having to first zap it. At the time there was also an .invalidate_page() callback; both of them were *not* bracketed by calls to mmu_notifier_invalidate_range_{start,end}(), and .invalidate_page() also doubled as a fallback implementation of .change_pte(). Later on, however, both callbacks were changed to occur within an invalidate_range_start/end() block. In the case of .change_pte(), commit 6bdb913f0a70 ("mm: wrap calls to set_pte_at_notify with invalidate_range_start and invalidate_range_end", 2012-10-09) did so to remove the fallback from .invalidate_page() to .change_pte() and allow sleepable .invalidate_page() hooks. This however made KVM's usage of the .change_pte() callback completely moot, because KVM unmaps the sPTEs during .invalidate_range_start() and therefore .change_pte() has no hope of finding a sPTE to change. Drop the generic KVM code that dispatches to kvm_set_spte_gfn(), as well as all the architecture specific implementations. Signed-off-by: Paolo Bonzini Acked-by: Anup Patel Acked-by: Michael Ellerman (powerpc) Reviewed-by: Bibo Mao Message-ID: <20240405115815.3226315-2-pbonzini@redhat.com> Signed-off-by: Paolo Bonzini Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4030 --- arch/arm64/kvm/mmu.c | 34 -------------- arch/loongarch/include/asm/kvm_host.h | 1 - arch/loongarch/kvm/mmu.c | 32 ------------- arch/mips/kvm/mmu.c | 30 ------------ arch/powerpc/include/asm/kvm_ppc.h | 1 - arch/powerpc/kvm/book3s.c | 5 -- arch/powerpc/kvm/book3s.h | 1 - arch/powerpc/kvm/book3s_64_mmu_hv.c | 12 ----- arch/powerpc/kvm/book3s_hv.c | 1 - arch/powerpc/kvm/book3s_pr.c | 7 --- arch/powerpc/kvm/e500_mmu_host.c | 6 --- arch/riscv/kvm/mmu.c | 20 -------- arch/x86/kvm/mmu/mmu.c | 67 +++------------------------ arch/x86/kvm/mmu/spte.c | 16 ------- arch/x86/kvm/mmu/spte.h | 2 - arch/x86/kvm/mmu/tdp_mmu.c | 46 ------------------ arch/x86/kvm/mmu/tdp_mmu.h | 1 - include/linux/kvm_host.h | 2 - include/trace/events/kvm.h | 15 ------ virt/kvm/kvm_main.c | 43 ----------------- 20 files changed, 7 insertions(+), 335 deletions(-) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 482280fe22d7..6b32eaf9e731 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1780,40 +1780,6 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) return false; } -bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) -{ - kvm_pfn_t pfn = pte_pfn(range->arg.pte); - - if (!kvm->arch.mmu.pgt) - return false; - - WARN_ON(range->end - range->start != 1); - - /* - * If the page isn't tagged, defer to user_mem_abort() for sanitising - * the MTE tags. The S2 pte should have been unmapped by - * mmu_notifier_invalidate_range_end(). - */ - if (kvm_has_mte(kvm) && !page_mte_tagged(pfn_to_page(pfn))) - return false; - - /* - * We've moved a page around, probably through CoW, so let's treat - * it just like a translation fault and the map handler will clean - * the cache to the PoC. - * - * The MMU notifiers will have unmapped a huge PMD before calling - * ->change_pte() (which in turn calls kvm_set_spte_gfn()) and - * therefore we never need to clear out a huge PMD through this - * calling path and a memcache is not required. - */ - kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, range->start << PAGE_SHIFT, - PAGE_SIZE, __pfn_to_phys(pfn), - KVM_PGTABLE_PROT_R, NULL, 0); - - return false; -} - bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { u64 size = (range->end - range->start) << PAGE_SHIFT; diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index f23d99fac993..2e5ae8a41781 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -313,7 +313,6 @@ void kvm_flush_tlb_all(void); void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa); int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write); -void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c index d312921f3ab9..ddc3b1f607e7 100644 --- a/arch/loongarch/kvm/mmu.c +++ b/arch/loongarch/kvm/mmu.c @@ -511,38 +511,6 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) range->end << PAGE_SHIFT, &ctx); } -bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) -{ - unsigned long prot_bits; - kvm_pte_t *ptep; - kvm_pfn_t pfn = pte_pfn(range->arg.pte); - gpa_t gpa = range->start << PAGE_SHIFT; - - ptep = kvm_populate_gpa(kvm, NULL, gpa, 0); - if (!ptep) - return false; - - /* Replacing an absent or old page doesn't need flushes */ - if (!kvm_pte_present(NULL, ptep) || !kvm_pte_young(*ptep)) { - kvm_set_pte(ptep, 0); - return false; - } - - /* Fill new pte if write protected or page migrated */ - prot_bits = _PAGE_PRESENT | __READABLE; - prot_bits |= _CACHE_MASK & pte_val(range->arg.pte); - - /* - * Set _PAGE_WRITE or _PAGE_DIRTY iff old and new pte both support - * _PAGE_WRITE for map_page_fast if next page write fault - * _PAGE_DIRTY since gpa has already recorded as dirty page - */ - prot_bits |= __WRITEABLE & *ptep & pte_val(range->arg.pte); - kvm_set_pte(ptep, kvm_pfn_pte(pfn, __pgprot(prot_bits))); - - return true; -} - bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { kvm_ptw_ctx ctx; diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index 467ee6b95ae1..c17157e700c0 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -444,36 +444,6 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) return true; } -bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) -{ - gpa_t gpa = range->start << PAGE_SHIFT; - pte_t hva_pte = range->arg.pte; - pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa); - pte_t old_pte; - - if (!gpa_pte) - return false; - - /* Mapping may need adjusting depending on memslot flags */ - old_pte = *gpa_pte; - if (range->slot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte)) - hva_pte = pte_mkclean(hva_pte); - else if (range->slot->flags & KVM_MEM_READONLY) - hva_pte = pte_wrprotect(hva_pte); - - set_pte(gpa_pte, hva_pte); - - /* Replacing an absent or old page doesn't need flushes */ - if (!pte_present(old_pte) || !pte_young(old_pte)) - return false; - - /* Pages swapped, aged, moved, or cleaned require flushes */ - return !pte_present(hva_pte) || - !pte_young(hva_pte) || - pte_pfn(old_pte) != pte_pfn(hva_pte) || - (pte_dirty(old_pte) && !pte_dirty(hva_pte)); -} - bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { return kvm_mips_mkold_gpa_pt(kvm, range->start, range->end); diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index b4da8514af43..1f7a28320944 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -287,7 +287,6 @@ struct kvmppc_ops { bool (*unmap_gfn_range)(struct kvm *kvm, struct kvm_gfn_range *range); bool (*age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range); bool (*test_age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range); - bool (*set_spte_gfn)(struct kvm *kvm, struct kvm_gfn_range *range); void (*free_memslot)(struct kvm_memory_slot *slot); int (*init_vm)(struct kvm *kvm); void (*destroy_vm)(struct kvm *kvm); diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 686d8d9eda3e..a98b4ec06541 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -899,11 +899,6 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) return kvm->arch.kvm_ops->test_age_gfn(kvm, range); } -bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) -{ - return kvm->arch.kvm_ops->set_spte_gfn(kvm, range); -} - int kvmppc_core_init_vm(struct kvm *kvm) { diff --git a/arch/powerpc/kvm/book3s.h b/arch/powerpc/kvm/book3s.h index 58391b4b32ed..4aa2ab89afbc 100644 --- a/arch/powerpc/kvm/book3s.h +++ b/arch/powerpc/kvm/book3s.h @@ -12,7 +12,6 @@ extern void kvmppc_core_flush_memslot_hv(struct kvm *kvm, extern bool kvm_unmap_gfn_range_hv(struct kvm *kvm, struct kvm_gfn_range *range); extern bool kvm_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range); extern bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range); -extern bool kvm_set_spte_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range); extern int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu); extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu); diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index fdfc2a62dd67..aa6c121ed22d 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -1010,18 +1010,6 @@ bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) return kvm_test_age_rmapp(kvm, range->slot, range->start); } -bool kvm_set_spte_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) -{ - WARN_ON(range->start + 1 != range->end); - - if (kvm_is_radix(kvm)) - kvm_unmap_radix(kvm, range->slot, range->start); - else - kvm_unmap_rmapp(kvm, range->slot, range->start); - - return false; -} - static int vcpus_running(struct kvm *kvm) { return atomic_read(&kvm->arch.vcpus_running) != 0; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 924689fa5efa..8058a1854c3a 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -6207,7 +6207,6 @@ static struct kvmppc_ops kvm_ops_hv = { .unmap_gfn_range = kvm_unmap_gfn_range_hv, .age_gfn = kvm_age_gfn_hv, .test_age_gfn = kvm_test_age_gfn_hv, - .set_spte_gfn = kvm_set_spte_gfn_hv, .free_memslot = kvmppc_core_free_memslot_hv, .init_vm = kvmppc_core_init_vm_hv, .destroy_vm = kvmppc_core_destroy_vm_hv, diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 9118242063fb..d7238dc451e1 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -461,12 +461,6 @@ static bool kvm_test_age_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range) return false; } -static bool kvm_set_spte_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range) -{ - /* The page will get remapped properly on its next fault */ - return do_kvm_unmap_gfn(kvm, range); -} - /*****************************************/ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) @@ -2070,7 +2064,6 @@ static struct kvmppc_ops kvm_ops_pr = { .unmap_gfn_range = kvm_unmap_gfn_range_pr, .age_gfn = kvm_age_gfn_pr, .test_age_gfn = kvm_test_age_gfn_pr, - .set_spte_gfn = kvm_set_spte_gfn_pr, .free_memslot = kvmppc_core_free_memslot_pr, .init_vm = kvmppc_core_init_vm_pr, .destroy_vm = kvmppc_core_destroy_vm_pr, diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index ccb8f16ffe41..c664fdec75b1 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -747,12 +747,6 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) return false; } -bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) -{ - /* The page will get remapped properly on its next fault */ - return kvm_e500_mmu_unmap_gfn(kvm, range); -} - /*****************************************/ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500) diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c index a9e2fd7245e1..b63650f9b966 100644 --- a/arch/riscv/kvm/mmu.c +++ b/arch/riscv/kvm/mmu.c @@ -550,26 +550,6 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) return false; } -bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) -{ - int ret; - kvm_pfn_t pfn = pte_pfn(range->arg.pte); - - if (!kvm->arch.pgd) - return false; - - WARN_ON(range->end - range->start != 1); - - ret = gstage_map_page(kvm, NULL, range->start << PAGE_SHIFT, - __pfn_to_phys(pfn), PAGE_SIZE, true, true); - if (ret) { - kvm_debug("Failed to map G-stage page (error %d)\n", ret); - return true; - } - - return false; -} - bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { pte_t *ptep; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 31c5562f51b2..f582ce29de27 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -436,8 +436,8 @@ static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) * The idea using the light way get the spte on x86_32 guest is from * gup_get_pte (mm/gup.c). * - * An spte tlb flush may be pending, because kvm_set_pte_rmap - * coalesces them and we are running out of the MMU lock. Therefore + * An spte tlb flush may be pending, because they are coalesced and + * we are running out of the MMU lock. Therefore * we need to protect against in-progress updates of the spte. * * Reading the spte while an update is in progress may get the old value @@ -1442,49 +1442,11 @@ static bool __kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, } static bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, gfn_t gfn, int level, - pte_t unused) + struct kvm_memory_slot *slot, gfn_t gfn, int level) { return __kvm_zap_rmap(kvm, rmap_head, slot); } -static bool kvm_set_pte_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, gfn_t gfn, int level, - pte_t pte) -{ - u64 *sptep; - struct rmap_iterator iter; - bool need_flush = false; - u64 new_spte; - kvm_pfn_t new_pfn; - - WARN_ON_ONCE(pte_huge(pte)); - new_pfn = pte_pfn(pte); - -restart: - for_each_rmap_spte(rmap_head, &iter, sptep) { - need_flush = true; - - if (pte_write(pte)) { - kvm_zap_one_rmap_spte(kvm, rmap_head, sptep); - goto restart; - } else { - new_spte = kvm_mmu_changed_pte_notifier_make_spte( - *sptep, new_pfn); - - mmu_spte_clear_track_bits(kvm, sptep); - mmu_spte_set(sptep, new_spte); - } - } - - if (need_flush && kvm_available_flush_remote_tlbs_range()) { - kvm_flush_remote_tlbs_gfn(kvm, gfn, level); - return false; - } - - return need_flush; -} - struct slot_rmap_walk_iterator { /* input fields. */ const struct kvm_memory_slot *slot; @@ -1556,7 +1518,7 @@ static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator) typedef bool (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct kvm_memory_slot *slot, gfn_t gfn, - int level, pte_t pte); + int level); static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, @@ -1568,7 +1530,7 @@ static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm, for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL, range->start, range->end - 1, &iterator) ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn, - iterator.level, range->arg.pte); + iterator.level); return ret; } @@ -1590,22 +1552,8 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) return flush; } -bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) -{ - bool flush = false; - - if (kvm_memslots_have_rmaps(kvm)) - flush = kvm_handle_gfn_range(kvm, range, kvm_set_pte_rmap); - - if (tdp_mmu_enabled) - flush |= kvm_tdp_mmu_set_spte_gfn(kvm, range); - - return flush; -} - static bool kvm_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, gfn_t gfn, int level, - pte_t unused) + struct kvm_memory_slot *slot, gfn_t gfn, int level) { u64 *sptep; struct rmap_iterator iter; @@ -1618,8 +1566,7 @@ static bool kvm_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, } static bool kvm_test_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, gfn_t gfn, - int level, pte_t unused) + struct kvm_memory_slot *slot, gfn_t gfn, int level) { u64 *sptep; struct rmap_iterator iter; diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index b4c1119cc48b..f499e79b3a15 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -330,22 +330,6 @@ u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled) return spte; } -u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn) -{ - u64 new_spte; - - new_spte = old_spte & ~SPTE_BASE_ADDR_MASK; - new_spte |= (u64)new_pfn << PAGE_SHIFT; - - new_spte &= ~PT_WRITABLE_MASK; - new_spte &= ~shadow_host_writable_mask; - new_spte &= ~shadow_mmu_writable_mask; - - new_spte = mark_spte_for_access_track(new_spte); - - return new_spte; -} - u64 mark_spte_for_access_track(u64 spte) { if (spte_ad_enabled(spte)) diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index a129951c9a88..f5c600c52f83 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -496,8 +496,6 @@ static inline u64 restore_acc_track_spte(u64 spte) return spte; } -u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn); - void __init kvm_mmu_spte_module_init(void); void kvm_mmu_reset_all_pte_masks(void); diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 8eef3ed5fe04..b57095cee455 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1217,52 +1217,6 @@ bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn); } -static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, - struct kvm_gfn_range *range) -{ - u64 new_spte; - - /* Huge pages aren't expected to be modified without first being zapped. */ - WARN_ON_ONCE(pte_huge(range->arg.pte) || range->start + 1 != range->end); - - if (iter->level != PG_LEVEL_4K || - !is_shadow_present_pte(iter->old_spte)) - return false; - - /* - * Note, when changing a read-only SPTE, it's not strictly necessary to - * zero the SPTE before setting the new PFN, but doing so preserves the - * invariant that the PFN of a present * leaf SPTE can never change. - * See handle_changed_spte(). - */ - tdp_mmu_iter_set_spte(kvm, iter, 0); - - if (!pte_write(range->arg.pte)) { - new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte, - pte_pfn(range->arg.pte)); - - tdp_mmu_iter_set_spte(kvm, iter, new_spte); - } - - return true; -} - -/* - * Handle the changed_pte MMU notifier for the TDP MMU. - * data is a pointer to the new pte_t mapping the HVA specified by the MMU - * notifier. - * Returns non-zero if a flush is needed before releasing the MMU lock. - */ -bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) -{ - /* - * No need to handle the remote TLB flush under RCU protection, the - * target SPTE _must_ be a leaf SPTE, i.e. cannot result in freeing a - * shadow page. See the WARN on pfn_changed in handle_changed_spte(). - */ - return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn); -} - /* * Remove write access from all SPTEs at or above min_level that map GFNs * [start, end). Returns true if an SPTE has been changed and the TLBs need to diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h index 733a3aef3a96..6a2742078961 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.h +++ b/arch/x86/kvm/mmu/tdp_mmu.h @@ -32,7 +32,6 @@ bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, bool flush); bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); -bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range); bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, const struct kvm_memory_slot *slot, int min_level); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 91464161c800..31f6a6965066 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -256,7 +256,6 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER union kvm_mmu_notifier_arg { - pte_t pte; }; struct kvm_gfn_range { @@ -269,7 +268,6 @@ struct kvm_gfn_range { bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); -bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range); #endif enum { diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index 3bd31ea23fee..faaf45ac716b 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -456,21 +456,6 @@ TRACE_EVENT(kvm_unmap_hva_range, __entry->start, __entry->end) ); -TRACE_EVENT(kvm_set_spte_hva, - TP_PROTO(unsigned long hva), - TP_ARGS(hva), - - TP_STRUCT__entry( - __field( unsigned long, hva ) - ), - - TP_fast_assign( - __entry->hva = hva; - ), - - TP_printk("mmu notifier set pte hva: %#016lx", __entry->hva) -); - TRACE_EVENT(kvm_age_hva, TP_PROTO(unsigned long start, unsigned long end), TP_ARGS(start, end), diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a04c31d60001..1d6872475c0e 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -701,48 +701,6 @@ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn return __kvm_handle_hva_range(kvm, &range); } -static bool kvm_change_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) -{ - /* - * Skipping invalid memslots is correct if and only change_pte() is - * surrounded by invalidate_range_{start,end}(), which is currently - * guaranteed by the primary MMU. If that ever changes, KVM needs to - * unmap the memslot instead of skipping the memslot to ensure that KVM - * doesn't hold references to the old PFN. - */ - WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count)); - - if (range->slot->flags & KVM_MEMSLOT_INVALID) - return false; - - return kvm_set_spte_gfn(kvm, range); -} - -static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, - struct mm_struct *mm, - unsigned long address, - pte_t pte) -{ - struct kvm *kvm = mmu_notifier_to_kvm(mn); - const union kvm_mmu_notifier_arg arg = { .pte = pte }; - - trace_kvm_set_spte_hva(address); - - /* - * .change_pte() must be surrounded by .invalidate_range_{start,end}(). - * If mmu_invalidate_in_progress is zero, then no in-progress - * invalidations, including this one, found a relevant memslot at - * start(); rechecking memslots here is unnecessary. Note, a false - * positive (count elevated by a different invalidation) is sub-optimal - * but functionally ok. - */ - WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count)); - if (!READ_ONCE(kvm->mmu_invalidate_in_progress)) - return; - - kvm_handle_hva_range(mn, address, address + 1, arg, kvm_change_spte_gfn); -} - void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start, unsigned long end) { @@ -929,7 +887,6 @@ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { .clear_flush_young = kvm_mmu_notifier_clear_flush_young, .clear_young = kvm_mmu_notifier_clear_young, .test_young = kvm_mmu_notifier_test_young, - .change_pte = kvm_mmu_notifier_change_pte, .release = kvm_mmu_notifier_release, }; -- Gitee From 1a5573661cea48c6c43d01c816c74b869d8bca5b Mon Sep 17 00:00:00 2001 From: Dandan Zhang Date: Wed, 7 Aug 2024 17:37:14 +0800 Subject: [PATCH 1593/2138] LoongArch: KVM: Remove undefined a6 argument comment for kvm_hypercall() ANBZ: #11464 commit 494b0792d962e8efac72b3a5b6d9bcd4e6fa8cf0 upstream. The kvm_hypercall() set for LoongArch is limited to a1-a5. So the mention of a6 in the comment is undefined that needs to be rectified. Reviewed-by: Bibo Mao Signed-off-by: Wentao Guan Signed-off-by: Dandan Zhang Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4030 --- arch/loongarch/include/asm/kvm_para.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/loongarch/include/asm/kvm_para.h b/arch/loongarch/include/asm/kvm_para.h index dbea66d7eaa6..710ca8c4b61d 100644 --- a/arch/loongarch/include/asm/kvm_para.h +++ b/arch/loongarch/include/asm/kvm_para.h @@ -42,9 +42,9 @@ struct kvm_steal_time { * Hypercall interface for KVM hypervisor * * a0: function identifier - * a1-a6: args + * a1-a5: args * Return value will be placed in a0. - * Up to 6 arguments are passed in a1, a2, a3, a4, a5, a6. + * Up to 5 arguments are passed in a1, a2, a3, a4, a5. */ static __always_inline long kvm_hypercall0(u64 fid) { -- Gitee From 1095bde01b483b0db6094a13ab18d278f1c85d15 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 11 Jan 2024 03:00:34 -0500 Subject: [PATCH 1594/2138] kvm: replace __KVM_HAVE_READONLY_MEM with Kconfig symbol ANBZ: #11464 commit 8886640dade4ae2595fcdce511c8bcc716aa47d3 upstream. KVM uses __KVM_HAVE_* symbols in the architecture-dependent uapi/asm/kvm.h to mask unused definitions in include/uapi/linux/kvm.h. __KVM_HAVE_READONLY_MEM however was nothing but a misguided attempt to define KVM_CAP_READONLY_MEM only on architectures where KVM_CHECK_EXTENSION(KVM_CAP_READONLY_MEM) could possibly return nonzero. This however does not make sense, and it prevented userspace from supporting this architecture-independent feature without recompilation. Therefore, these days __KVM_HAVE_READONLY_MEM does not mask anything and is only used in virt/kvm/kvm_main.c. Userspace does not need to test it and there should be no need for it to exist. Remove it and replace it with a Kconfig symbol within Linux source code. Signed-off-by: Paolo Bonzini Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4030 --- arch/arm64/include/uapi/asm/kvm.h | 1 - arch/arm64/kvm/Kconfig | 1 + arch/loongarch/include/uapi/asm/kvm.h | 2 -- arch/loongarch/kvm/Kconfig | 1 + arch/mips/include/uapi/asm/kvm.h | 2 -- arch/mips/kvm/Kconfig | 1 + arch/riscv/include/uapi/asm/kvm.h | 1 - arch/riscv/kvm/Kconfig | 1 + arch/x86/include/uapi/asm/kvm.h | 1 - arch/x86/kvm/Kconfig | 1 + virt/kvm/Kconfig | 3 +++ virt/kvm/kvm_main.c | 2 +- 12 files changed, 9 insertions(+), 8 deletions(-) diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 57f8df4aaf28..0f3cbb903805 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -38,7 +38,6 @@ #include #define __KVM_HAVE_IRQ_LINE -#define __KVM_HAVE_READONLY_MEM #define __KVM_HAVE_VCPU_EVENTS #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 1a777715199f..126d6322891a 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -38,6 +38,7 @@ menuconfig KVM select HAVE_KVM_IRQ_ROUTING select IRQ_BYPASS_MANAGER select HAVE_KVM_IRQ_BYPASS + select HAVE_KVM_READONLY_MEM select HAVE_KVM_VCPU_RUN_PID_CHANGE select SCHED_INFO select GUEST_PERF_EVENTS if PERF_EVENTS diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h index d65a2993a406..f07bc0de2809 100644 --- a/arch/loongarch/include/uapi/asm/kvm.h +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -14,8 +14,6 @@ * Some parts derived from the x86 version of this file. */ -#define __KVM_HAVE_READONLY_MEM - #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 #define KVM_DIRTY_LOG_PAGE_OFFSET 64 #define __KVM_HAVE_IRQ_LINE diff --git a/arch/loongarch/kvm/Kconfig b/arch/loongarch/kvm/Kconfig index 3a03121ecfc0..c6da55bc9143 100644 --- a/arch/loongarch/kvm/Kconfig +++ b/arch/loongarch/kvm/Kconfig @@ -32,6 +32,7 @@ config KVM select KVM_GENERIC_HARDWARE_ENABLING select KVM_GENERIC_MMU_NOTIFIER select KVM_MMIO + select HAVE_KVM_READONLY_MEM select KVM_XFER_TO_GUEST_WORK select SCHED_INFO select PREEMPT_NOTIFIERS diff --git a/arch/mips/include/uapi/asm/kvm.h b/arch/mips/include/uapi/asm/kvm.h index edcf717c4327..9673dc9cb315 100644 --- a/arch/mips/include/uapi/asm/kvm.h +++ b/arch/mips/include/uapi/asm/kvm.h @@ -20,8 +20,6 @@ * Some parts derived from the x86 version of this file. */ -#define __KVM_HAVE_READONLY_MEM - #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 /* diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig index c04987d2ed2e..30c7ff4c2ca8 100644 --- a/arch/mips/kvm/Kconfig +++ b/arch/mips/kvm/Kconfig @@ -28,6 +28,7 @@ config KVM select KVM_GENERIC_MMU_NOTIFIER select INTERVAL_TREE select KVM_GENERIC_HARDWARE_ENABLING + select HAVE_KVM_READONLY_MEM help Support for hosting Guest kernels. diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index 992c5e407104..be0bf81cc309 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -16,7 +16,6 @@ #include #define __KVM_HAVE_IRQ_LINE -#define __KVM_HAVE_READONLY_MEM #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 diff --git a/arch/riscv/kvm/Kconfig b/arch/riscv/kvm/Kconfig index ae2e05f050ec..c47047d2d332 100644 --- a/arch/riscv/kvm/Kconfig +++ b/arch/riscv/kvm/Kconfig @@ -26,6 +26,7 @@ config KVM select HAVE_KVM_IRQ_ROUTING select HAVE_KVM_MSI select HAVE_KVM_VCPU_ASYNC_IOCTL + select HAVE_KVM_READONLY_MEM select KVM_GENERIC_DIRTYLOG_READ_PROTECT select KVM_GENERIC_HARDWARE_ENABLING select KVM_MMIO diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index d60c65c377c5..c12fb4467e44 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h @@ -48,7 +48,6 @@ #define __KVM_HAVE_DEBUGREGS #define __KVM_HAVE_XSAVE #define __KVM_HAVE_XCRS -#define __KVM_HAVE_READONLY_MEM /* Architectural interrupt line count. */ #define KVM_NR_INTERRUPTS 256 diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index d42e832ffb4a..28749b2df8e3 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -33,6 +33,7 @@ config KVM select IRQ_BYPASS_MANAGER select HAVE_KVM_IRQ_BYPASS select HAVE_KVM_IRQ_ROUTING + select HAVE_KVM_READONLY_MEM select HAVE_KVM_EVENTFD select KVM_ASYNC_PF select USER_RETURN_NOTIFIER diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig index ecae2914c97e..492d91639a23 100644 --- a/virt/kvm/Kconfig +++ b/virt/kvm/Kconfig @@ -56,6 +56,9 @@ config KVM_ASYNC_PF_SYNC config HAVE_KVM_MSI bool +config HAVE_KVM_READONLY_MEM + bool + config HAVE_KVM_CPU_RELAX_INTERCEPT bool diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 1d6872475c0e..ee7b6e988876 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1511,7 +1511,7 @@ static int check_memory_region_flags(const struct kvm_userspace_memory_region *m { u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; -#ifdef __KVM_HAVE_READONLY_MEM +#ifdef CONFIG_HAVE_KVM_READONLY_MEM valid_flags |= KVM_MEM_READONLY; #endif -- Gitee From fb22503c4c91b76f8b4b82d4c6bf6044edd4775e Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Wed, 6 Mar 2024 09:12:13 +0800 Subject: [PATCH 1595/2138] LoongArch: KVM: Remove unnecessary CSR register saving during enter guest ANBZ: #11464 commit b99f783106ea5b2f8c9d74f4d3b1e2f77af9ec6e upstream. Some CSR registers like CRMD/PRMD are saved during enter VM mode now. However they are not restored for actual use, so saving for these CSR registers can be removed. Reviewed-by: Tianrui Zhao Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Xianglai Li Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4030 --- arch/loongarch/kvm/switch.S | 6 ------ 1 file changed, 6 deletions(-) diff --git a/arch/loongarch/kvm/switch.S b/arch/loongarch/kvm/switch.S index ba976509bfe8..3634431db18a 100644 --- a/arch/loongarch/kvm/switch.S +++ b/arch/loongarch/kvm/switch.S @@ -213,12 +213,6 @@ SYM_FUNC_START(kvm_enter_guest) /* Save host GPRs */ kvm_save_host_gpr a2 - /* Save host CRMD, PRMD to stack */ - csrrd a3, LOONGARCH_CSR_CRMD - st.d a3, a2, PT_CRMD - csrrd a3, LOONGARCH_CSR_PRMD - st.d a3, a2, PT_PRMD - addi.d a2, a1, KVM_VCPU_ARCH st.d sp, a2, KVM_ARCH_HSP st.d tp, a2, KVM_ARCH_HTP -- Gitee From a4182dc0f97a7abfb36aff629869e5436dfc361d Mon Sep 17 00:00:00 2001 From: gaojuxin Date: Mon, 28 Oct 2024 09:40:33 +0800 Subject: [PATCH 1596/2138] anolis: Loongarch: Update config for loongson3_defconfig ANBZ: #11535 Use the make savedefconfig command to update the loongson3_defconfig file Signed-off-by: gaojuxin Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4032 --- arch/loongarch/configs/loongson3_defconfig | 24 ++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index 2b544d4b76d0..5359ede7101e 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -4,6 +4,7 @@ CONFIG_POSIX_MQUEUE=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y # CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set CONFIG_PREEMPT_VOLUNTARY=y CONFIG_IRQ_TIME_ACCOUNTING=y @@ -13,6 +14,7 @@ CONFIG_TASKSTATS=y CONFIG_TASK_DELAY_ACCT=y CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y CONFIG_LOG_BUF_SHIFT=18 CONFIG_NUMA_BALANCING=y CONFIG_MEMCG=y @@ -32,7 +34,6 @@ CONFIG_NAMESPACES=y CONFIG_USER_NS=y CONFIG_CHECKPOINT_RESTORE=y CONFIG_SCHED_AUTOGROUP=y -CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y CONFIG_KALLSYMS_ALL=y @@ -41,9 +42,10 @@ CONFIG_KEXEC=y CONFIG_CRASH_DUMP=y CONFIG_NR_CPUS=256 CONFIG_NUMA=y +CONFIG_ARCH_IOREMAP=y CONFIG_CPU_HAS_LSX=y CONFIG_CPU_HAS_LASX=y -CONFIG_RANDOMIZE_BASE=y +CONFIG_CPU_HAS_LBT=y CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_STAT=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y @@ -56,7 +58,8 @@ CONFIG_ACPI_IPMI=m CONFIG_ACPI_PCI_SLOT=y CONFIG_ACPI_HOTPLUG_MEMORY=y CONFIG_VIRTUALIZATION=y -CONFIG_KVM=m +CONFIG_KVM=y +CONFIG_KPROBES=y CONFIG_JUMP_LABEL=y CONFIG_MODULES=y CONFIG_MODULE_FORCE_LOAD=y @@ -78,6 +81,7 @@ CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y CONFIG_ZBUD=y CONFIG_ZSMALLOC=m CONFIG_Z3FOLD=y +CONFIG_ZSMALLOC=y CONFIG_ZSMALLOC_STAT=y CONFIG_SLAB_FREELIST_RANDOM=y # CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set @@ -569,7 +573,6 @@ CONFIG_BT_BNEP_MC_FILTER=y CONFIG_BT_BNEP_PROTO_FILTER=y CONFIG_BT_CMTP=m CONFIG_BT_HIDP=m -CONFIG_BT_HS=y CONFIG_BT_HCIBTUSB=m CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y # CONFIG_BT_HCIBTUSB_BCM is not set @@ -691,6 +694,7 @@ CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_SAS_ATA=y CONFIG_ISCSI_TCP=m CONFIG_SCSI_CXGB4_ISCSI=m CONFIG_SCSI_BNX2_ISCSI=m @@ -881,6 +885,7 @@ CONFIG_R8169=m # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_SOCIONEXT is not set CONFIG_STMMAC_ETH=y +CONFIG_DWMAC_LOONGSON=m # CONFIG_NET_VENDOR_SUN is not set # CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_TEHUTI is not set @@ -1479,6 +1484,7 @@ CONFIG_DRM_VIRTIO_GPU=m CONFIG_DRM_LOONGSON=y CONFIG_DRM_BOCHS=m CONFIG_DRM_CIRRUS_QEMU=m +CONFIG_DRM_INSPUR=m CONFIG_FB=y CONFIG_FB_EFI=y CONFIG_FB_RADEON=y @@ -1839,7 +1845,6 @@ CONFIG_INFINIBAND_SRPT=m CONFIG_INFINIBAND_ISER=m CONFIG_INFINIBAND_ISERT=m CONFIG_RTC_CLASS=y -# CONFIG_RTC_SYSTOHC is not set CONFIG_RTC_DRV_DS1307=m CONFIG_RTC_DRV_DS1374=m CONFIG_RTC_DRV_DS1672=m @@ -2143,6 +2148,7 @@ CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4_GENERIC=y CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m CONFIG_CRYPTO_ARC4=m @@ -2157,6 +2163,7 @@ CONFIG_CRYPTO_SEQIV=y CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_SM3_GENERIC=y CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_XCBC=m @@ -2178,7 +2185,6 @@ CONFIG_SIGNED_PE_FILE_VERIFICATION=y CONFIG_SECONDARY_TRUSTED_KEYRING=y CONFIG_SYSTEM_BLACKLIST_KEYRING=y CONFIG_SYSTEM_REVOCATION_LIST=y -CONFIG_CRC_T10DIF=y CONFIG_CRC_ITU_T=y CONFIG_CRC7=m CONFIG_DMA_CMA=y @@ -2186,16 +2192,22 @@ CONFIG_PRINTK_TIME=y CONFIG_PRINTK_CALLER=y CONFIG_BOOT_PRINTK_DELAY=y CONFIG_DYNAMIC_DEBUG=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y +CONFIG_DEBUG_INFO_BTF=y CONFIG_FRAME_WARN=4096 CONFIG_STRIP_ASM_SYMS=y CONFIG_DEBUG_SECTION_MISMATCH=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_SHIRQ=y CONFIG_PANIC_ON_OOPS=y +CONFIG_HARDLOCKUP_DETECTOR=y # CONFIG_SCHED_DEBUG is not set CONFIG_SCHEDSTATS=y CONFIG_DEBUG_LIST=y CONFIG_RCU_CPU_STALL_TIMEOUT=60 # CONFIG_RCU_TRACE is not set +CONFIG_FUNCTION_TRACER=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_BLK_DEV_IO_TRACE=y # CONFIG_STRICT_DEVMEM is not set # CONFIG_RUNTIME_TESTING_MENU is not set -- Gitee From 44d4ff929389ee9a16b0fd664e81157a3ae55d2d Mon Sep 17 00:00:00 2001 From: gaojuxin Date: Mon, 28 Oct 2024 10:19:32 +0800 Subject: [PATCH 1597/2138] anolis: irqchip: Add forgotten irq-loongson.h ANBZ: #11537 Signed-off-by: gaojuxin Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4033 --- drivers/irqchip/irq-loongson.h | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 drivers/irqchip/irq-loongson.h diff --git a/drivers/irqchip/irq-loongson.h b/drivers/irqchip/irq-loongson.h new file mode 100644 index 000000000000..11fa138d1f44 --- /dev/null +++ b/drivers/irqchip/irq-loongson.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ + +#ifndef _DRIVERS_IRQCHIP_IRQ_LOONGSON_H +#define _DRIVERS_IRQCHIP_IRQ_LOONGSON_H + +int find_pch_pic(u32 gsi); + +int liointc_acpi_init(struct irq_domain *parent, + struct acpi_madt_lio_pic *acpi_liointc); +int eiointc_acpi_init(struct irq_domain *parent, + struct acpi_madt_eio_pic *acpi_eiointc); +int avecintc_acpi_init(struct irq_domain *parent); + +int htvec_acpi_init(struct irq_domain *parent, + struct acpi_madt_ht_pic *acpi_htvec); +int pch_lpc_acpi_init(struct irq_domain *parent, + struct acpi_madt_lpc_pic *acpi_pchlpc); +int pch_pic_acpi_init(struct irq_domain *parent, + struct acpi_madt_bio_pic *acpi_pchpic); +int pch_msi_acpi_init(struct irq_domain *parent, + struct acpi_madt_msi_pic *acpi_pchmsi); +int pch_msi_acpi_init_avec(struct irq_domain *parent); + +#endif /* _DRIVERS_IRQCHIP_IRQ_LOONGSON_H */ -- Gitee From 14ade2a9c0549c458e840a372e0efa9aeea3a3b2 Mon Sep 17 00:00:00 2001 From: Shawn Wang Date: Mon, 21 Oct 2024 16:39:23 +0800 Subject: [PATCH 1598/2138] anolis: KVM: arm64: Make MPAM mask in ID_AA64DFR0_EL1 writable ANBZ: #11429 After commit 26d74871db58 ("KVM: arm64: Disable MPAM visibility by default, and handle traps"), the MPAM field in AA64PFR0_EL1 is disabled by default, but can also be written by used-space. So the MPAM field should be set as writable, which is missed when backporting. This bug can cause arm64_check_features() fail with errno -E2BIG. Fix it by making MPAM_MASK in ID_AA64PFR0_EL1 writable explicitly. Fixes: 26d74871db58 ("KVM: arm64: Disable MPAM visibility by default, and handle traps") Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4011 --- arch/arm64/kvm/sys_regs.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 16b8cb1a1590..b542cb9c5d1d 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -2120,7 +2120,8 @@ static const struct sys_reg_desc sys_reg_descs[] = { .get_user = get_id_reg, .set_user = set_id_aa64pfr0_el1, .reset = read_sanitised_id_aa64pfr0_el1, - .val = ID_AA64PFR0_EL1_CSV2_MASK | ID_AA64PFR0_EL1_CSV3_MASK, }, + .val = ID_AA64PFR0_EL1_CSV2_MASK | ID_AA64PFR0_EL1_CSV3_MASK | + ID_AA64PFR0_EL1_MPAM_MASK, }, { SYS_DESC(SYS_ID_AA64PFR1_EL1), .access = access_id_reg, .get_user = get_id_reg, -- Gitee From 5bf4a1c84df3b3e17fb2cc1db712ee16edc36ca0 Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Sat, 26 Oct 2024 16:42:41 +0800 Subject: [PATCH 1599/2138] anolis: revert "kvm: replace __KVM_HAVE_READONLY_MEM with Kconfig symbol" ANBZ: #11534 Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/4031 Reviewed-by: Juxin Gao --- arch/arm64/include/uapi/asm/kvm.h | 1 + arch/arm64/kvm/Kconfig | 1 - arch/loongarch/include/uapi/asm/kvm.h | 2 ++ arch/loongarch/kvm/Kconfig | 1 - arch/mips/include/uapi/asm/kvm.h | 2 ++ arch/mips/kvm/Kconfig | 1 - arch/riscv/include/uapi/asm/kvm.h | 1 + arch/riscv/kvm/Kconfig | 1 - arch/x86/include/uapi/asm/kvm.h | 1 + arch/x86/kvm/Kconfig | 1 - virt/kvm/Kconfig | 3 --- virt/kvm/kvm_main.c | 2 +- 12 files changed, 8 insertions(+), 9 deletions(-) diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 0f3cbb903805..57f8df4aaf28 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -38,6 +38,7 @@ #include #define __KVM_HAVE_IRQ_LINE +#define __KVM_HAVE_READONLY_MEM #define __KVM_HAVE_VCPU_EVENTS #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 126d6322891a..1a777715199f 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -38,7 +38,6 @@ menuconfig KVM select HAVE_KVM_IRQ_ROUTING select IRQ_BYPASS_MANAGER select HAVE_KVM_IRQ_BYPASS - select HAVE_KVM_READONLY_MEM select HAVE_KVM_VCPU_RUN_PID_CHANGE select SCHED_INFO select GUEST_PERF_EVENTS if PERF_EVENTS diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h index f07bc0de2809..d65a2993a406 100644 --- a/arch/loongarch/include/uapi/asm/kvm.h +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -14,6 +14,8 @@ * Some parts derived from the x86 version of this file. */ +#define __KVM_HAVE_READONLY_MEM + #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 #define KVM_DIRTY_LOG_PAGE_OFFSET 64 #define __KVM_HAVE_IRQ_LINE diff --git a/arch/loongarch/kvm/Kconfig b/arch/loongarch/kvm/Kconfig index c6da55bc9143..3a03121ecfc0 100644 --- a/arch/loongarch/kvm/Kconfig +++ b/arch/loongarch/kvm/Kconfig @@ -32,7 +32,6 @@ config KVM select KVM_GENERIC_HARDWARE_ENABLING select KVM_GENERIC_MMU_NOTIFIER select KVM_MMIO - select HAVE_KVM_READONLY_MEM select KVM_XFER_TO_GUEST_WORK select SCHED_INFO select PREEMPT_NOTIFIERS diff --git a/arch/mips/include/uapi/asm/kvm.h b/arch/mips/include/uapi/asm/kvm.h index 9673dc9cb315..edcf717c4327 100644 --- a/arch/mips/include/uapi/asm/kvm.h +++ b/arch/mips/include/uapi/asm/kvm.h @@ -20,6 +20,8 @@ * Some parts derived from the x86 version of this file. */ +#define __KVM_HAVE_READONLY_MEM + #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 /* diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig index 30c7ff4c2ca8..c04987d2ed2e 100644 --- a/arch/mips/kvm/Kconfig +++ b/arch/mips/kvm/Kconfig @@ -28,7 +28,6 @@ config KVM select KVM_GENERIC_MMU_NOTIFIER select INTERVAL_TREE select KVM_GENERIC_HARDWARE_ENABLING - select HAVE_KVM_READONLY_MEM help Support for hosting Guest kernels. diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index be0bf81cc309..992c5e407104 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -16,6 +16,7 @@ #include #define __KVM_HAVE_IRQ_LINE +#define __KVM_HAVE_READONLY_MEM #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 diff --git a/arch/riscv/kvm/Kconfig b/arch/riscv/kvm/Kconfig index c47047d2d332..ae2e05f050ec 100644 --- a/arch/riscv/kvm/Kconfig +++ b/arch/riscv/kvm/Kconfig @@ -26,7 +26,6 @@ config KVM select HAVE_KVM_IRQ_ROUTING select HAVE_KVM_MSI select HAVE_KVM_VCPU_ASYNC_IOCTL - select HAVE_KVM_READONLY_MEM select KVM_GENERIC_DIRTYLOG_READ_PROTECT select KVM_GENERIC_HARDWARE_ENABLING select KVM_MMIO diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index c12fb4467e44..d60c65c377c5 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h @@ -48,6 +48,7 @@ #define __KVM_HAVE_DEBUGREGS #define __KVM_HAVE_XSAVE #define __KVM_HAVE_XCRS +#define __KVM_HAVE_READONLY_MEM /* Architectural interrupt line count. */ #define KVM_NR_INTERRUPTS 256 diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 28749b2df8e3..d42e832ffb4a 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -33,7 +33,6 @@ config KVM select IRQ_BYPASS_MANAGER select HAVE_KVM_IRQ_BYPASS select HAVE_KVM_IRQ_ROUTING - select HAVE_KVM_READONLY_MEM select HAVE_KVM_EVENTFD select KVM_ASYNC_PF select USER_RETURN_NOTIFIER diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig index 492d91639a23..ecae2914c97e 100644 --- a/virt/kvm/Kconfig +++ b/virt/kvm/Kconfig @@ -56,9 +56,6 @@ config KVM_ASYNC_PF_SYNC config HAVE_KVM_MSI bool -config HAVE_KVM_READONLY_MEM - bool - config HAVE_KVM_CPU_RELAX_INTERCEPT bool diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index ee7b6e988876..1d6872475c0e 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1511,7 +1511,7 @@ static int check_memory_region_flags(const struct kvm_userspace_memory_region *m { u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES; -#ifdef CONFIG_HAVE_KVM_READONLY_MEM +#ifdef __KVM_HAVE_READONLY_MEM valid_flags |= KVM_MEM_READONLY; #endif -- Gitee From 584925f56943f14b19e9d698169cd05027eb2d39 Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Sat, 26 Oct 2024 16:45:11 +0800 Subject: [PATCH 1600/2138] anolis: revert "KVM: delete .change_pte MMU notifier callback" ANBZ: #11534 Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/4031 Reviewed-by: Juxin Gao --- arch/arm64/kvm/mmu.c | 34 ++++++++++++++ arch/loongarch/include/asm/kvm_host.h | 1 + arch/loongarch/kvm/mmu.c | 32 +++++++++++++ arch/mips/kvm/mmu.c | 30 ++++++++++++ arch/powerpc/include/asm/kvm_ppc.h | 1 + arch/powerpc/kvm/book3s.c | 5 ++ arch/powerpc/kvm/book3s.h | 1 + arch/powerpc/kvm/book3s_64_mmu_hv.c | 12 +++++ arch/powerpc/kvm/book3s_hv.c | 1 + arch/powerpc/kvm/book3s_pr.c | 7 +++ arch/powerpc/kvm/e500_mmu_host.c | 6 +++ arch/riscv/kvm/mmu.c | 20 ++++++++ arch/x86/kvm/mmu/mmu.c | 67 ++++++++++++++++++++++++--- arch/x86/kvm/mmu/spte.c | 16 +++++++ arch/x86/kvm/mmu/spte.h | 2 + arch/x86/kvm/mmu/tdp_mmu.c | 46 ++++++++++++++++++ arch/x86/kvm/mmu/tdp_mmu.h | 1 + include/linux/kvm_host.h | 2 + include/trace/events/kvm.h | 15 ++++++ virt/kvm/kvm_main.c | 43 +++++++++++++++++ 20 files changed, 335 insertions(+), 7 deletions(-) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 6b32eaf9e731..482280fe22d7 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1780,6 +1780,40 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) return false; } +bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) +{ + kvm_pfn_t pfn = pte_pfn(range->arg.pte); + + if (!kvm->arch.mmu.pgt) + return false; + + WARN_ON(range->end - range->start != 1); + + /* + * If the page isn't tagged, defer to user_mem_abort() for sanitising + * the MTE tags. The S2 pte should have been unmapped by + * mmu_notifier_invalidate_range_end(). + */ + if (kvm_has_mte(kvm) && !page_mte_tagged(pfn_to_page(pfn))) + return false; + + /* + * We've moved a page around, probably through CoW, so let's treat + * it just like a translation fault and the map handler will clean + * the cache to the PoC. + * + * The MMU notifiers will have unmapped a huge PMD before calling + * ->change_pte() (which in turn calls kvm_set_spte_gfn()) and + * therefore we never need to clear out a huge PMD through this + * calling path and a memcache is not required. + */ + kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, range->start << PAGE_SHIFT, + PAGE_SIZE, __pfn_to_phys(pfn), + KVM_PGTABLE_PROT_R, NULL, 0); + + return false; +} + bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { u64 size = (range->end - range->start) << PAGE_SHIFT; diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index 2e5ae8a41781..f23d99fac993 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -313,6 +313,7 @@ void kvm_flush_tlb_all(void); void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa); int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write); +void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c index ddc3b1f607e7..d312921f3ab9 100644 --- a/arch/loongarch/kvm/mmu.c +++ b/arch/loongarch/kvm/mmu.c @@ -511,6 +511,38 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) range->end << PAGE_SHIFT, &ctx); } +bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) +{ + unsigned long prot_bits; + kvm_pte_t *ptep; + kvm_pfn_t pfn = pte_pfn(range->arg.pte); + gpa_t gpa = range->start << PAGE_SHIFT; + + ptep = kvm_populate_gpa(kvm, NULL, gpa, 0); + if (!ptep) + return false; + + /* Replacing an absent or old page doesn't need flushes */ + if (!kvm_pte_present(NULL, ptep) || !kvm_pte_young(*ptep)) { + kvm_set_pte(ptep, 0); + return false; + } + + /* Fill new pte if write protected or page migrated */ + prot_bits = _PAGE_PRESENT | __READABLE; + prot_bits |= _CACHE_MASK & pte_val(range->arg.pte); + + /* + * Set _PAGE_WRITE or _PAGE_DIRTY iff old and new pte both support + * _PAGE_WRITE for map_page_fast if next page write fault + * _PAGE_DIRTY since gpa has already recorded as dirty page + */ + prot_bits |= __WRITEABLE & *ptep & pte_val(range->arg.pte); + kvm_set_pte(ptep, kvm_pfn_pte(pfn, __pgprot(prot_bits))); + + return true; +} + bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { kvm_ptw_ctx ctx; diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index c17157e700c0..467ee6b95ae1 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -444,6 +444,36 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) return true; } +bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) +{ + gpa_t gpa = range->start << PAGE_SHIFT; + pte_t hva_pte = range->arg.pte; + pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa); + pte_t old_pte; + + if (!gpa_pte) + return false; + + /* Mapping may need adjusting depending on memslot flags */ + old_pte = *gpa_pte; + if (range->slot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte)) + hva_pte = pte_mkclean(hva_pte); + else if (range->slot->flags & KVM_MEM_READONLY) + hva_pte = pte_wrprotect(hva_pte); + + set_pte(gpa_pte, hva_pte); + + /* Replacing an absent or old page doesn't need flushes */ + if (!pte_present(old_pte) || !pte_young(old_pte)) + return false; + + /* Pages swapped, aged, moved, or cleaned require flushes */ + return !pte_present(hva_pte) || + !pte_young(hva_pte) || + pte_pfn(old_pte) != pte_pfn(hva_pte) || + (pte_dirty(old_pte) && !pte_dirty(hva_pte)); +} + bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { return kvm_mips_mkold_gpa_pt(kvm, range->start, range->end); diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 1f7a28320944..b4da8514af43 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -287,6 +287,7 @@ struct kvmppc_ops { bool (*unmap_gfn_range)(struct kvm *kvm, struct kvm_gfn_range *range); bool (*age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range); bool (*test_age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range); + bool (*set_spte_gfn)(struct kvm *kvm, struct kvm_gfn_range *range); void (*free_memslot)(struct kvm_memory_slot *slot); int (*init_vm)(struct kvm *kvm); void (*destroy_vm)(struct kvm *kvm); diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index a98b4ec06541..686d8d9eda3e 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -899,6 +899,11 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) return kvm->arch.kvm_ops->test_age_gfn(kvm, range); } +bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) +{ + return kvm->arch.kvm_ops->set_spte_gfn(kvm, range); +} + int kvmppc_core_init_vm(struct kvm *kvm) { diff --git a/arch/powerpc/kvm/book3s.h b/arch/powerpc/kvm/book3s.h index 4aa2ab89afbc..58391b4b32ed 100644 --- a/arch/powerpc/kvm/book3s.h +++ b/arch/powerpc/kvm/book3s.h @@ -12,6 +12,7 @@ extern void kvmppc_core_flush_memslot_hv(struct kvm *kvm, extern bool kvm_unmap_gfn_range_hv(struct kvm *kvm, struct kvm_gfn_range *range); extern bool kvm_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range); extern bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range); +extern bool kvm_set_spte_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range); extern int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu); extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu); diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index aa6c121ed22d..fdfc2a62dd67 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -1010,6 +1010,18 @@ bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) return kvm_test_age_rmapp(kvm, range->slot, range->start); } +bool kvm_set_spte_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) +{ + WARN_ON(range->start + 1 != range->end); + + if (kvm_is_radix(kvm)) + kvm_unmap_radix(kvm, range->slot, range->start); + else + kvm_unmap_rmapp(kvm, range->slot, range->start); + + return false; +} + static int vcpus_running(struct kvm *kvm) { return atomic_read(&kvm->arch.vcpus_running) != 0; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 8058a1854c3a..924689fa5efa 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -6207,6 +6207,7 @@ static struct kvmppc_ops kvm_ops_hv = { .unmap_gfn_range = kvm_unmap_gfn_range_hv, .age_gfn = kvm_age_gfn_hv, .test_age_gfn = kvm_test_age_gfn_hv, + .set_spte_gfn = kvm_set_spte_gfn_hv, .free_memslot = kvmppc_core_free_memslot_hv, .init_vm = kvmppc_core_init_vm_hv, .destroy_vm = kvmppc_core_destroy_vm_hv, diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index d7238dc451e1..9118242063fb 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -461,6 +461,12 @@ static bool kvm_test_age_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range) return false; } +static bool kvm_set_spte_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range) +{ + /* The page will get remapped properly on its next fault */ + return do_kvm_unmap_gfn(kvm, range); +} + /*****************************************/ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) @@ -2064,6 +2070,7 @@ static struct kvmppc_ops kvm_ops_pr = { .unmap_gfn_range = kvm_unmap_gfn_range_pr, .age_gfn = kvm_age_gfn_pr, .test_age_gfn = kvm_test_age_gfn_pr, + .set_spte_gfn = kvm_set_spte_gfn_pr, .free_memslot = kvmppc_core_free_memslot_pr, .init_vm = kvmppc_core_init_vm_pr, .destroy_vm = kvmppc_core_destroy_vm_pr, diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index c664fdec75b1..ccb8f16ffe41 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -747,6 +747,12 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) return false; } +bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) +{ + /* The page will get remapped properly on its next fault */ + return kvm_e500_mmu_unmap_gfn(kvm, range); +} + /*****************************************/ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500) diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c index b63650f9b966..a9e2fd7245e1 100644 --- a/arch/riscv/kvm/mmu.c +++ b/arch/riscv/kvm/mmu.c @@ -550,6 +550,26 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) return false; } +bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) +{ + int ret; + kvm_pfn_t pfn = pte_pfn(range->arg.pte); + + if (!kvm->arch.pgd) + return false; + + WARN_ON(range->end - range->start != 1); + + ret = gstage_map_page(kvm, NULL, range->start << PAGE_SHIFT, + __pfn_to_phys(pfn), PAGE_SIZE, true, true); + if (ret) { + kvm_debug("Failed to map G-stage page (error %d)\n", ret); + return true; + } + + return false; +} + bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { pte_t *ptep; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index f582ce29de27..31c5562f51b2 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -436,8 +436,8 @@ static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) * The idea using the light way get the spte on x86_32 guest is from * gup_get_pte (mm/gup.c). * - * An spte tlb flush may be pending, because they are coalesced and - * we are running out of the MMU lock. Therefore + * An spte tlb flush may be pending, because kvm_set_pte_rmap + * coalesces them and we are running out of the MMU lock. Therefore * we need to protect against in-progress updates of the spte. * * Reading the spte while an update is in progress may get the old value @@ -1442,11 +1442,49 @@ static bool __kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, } static bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, gfn_t gfn, int level) + struct kvm_memory_slot *slot, gfn_t gfn, int level, + pte_t unused) { return __kvm_zap_rmap(kvm, rmap_head, slot); } +static bool kvm_set_pte_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, + struct kvm_memory_slot *slot, gfn_t gfn, int level, + pte_t pte) +{ + u64 *sptep; + struct rmap_iterator iter; + bool need_flush = false; + u64 new_spte; + kvm_pfn_t new_pfn; + + WARN_ON_ONCE(pte_huge(pte)); + new_pfn = pte_pfn(pte); + +restart: + for_each_rmap_spte(rmap_head, &iter, sptep) { + need_flush = true; + + if (pte_write(pte)) { + kvm_zap_one_rmap_spte(kvm, rmap_head, sptep); + goto restart; + } else { + new_spte = kvm_mmu_changed_pte_notifier_make_spte( + *sptep, new_pfn); + + mmu_spte_clear_track_bits(kvm, sptep); + mmu_spte_set(sptep, new_spte); + } + } + + if (need_flush && kvm_available_flush_remote_tlbs_range()) { + kvm_flush_remote_tlbs_gfn(kvm, gfn, level); + return false; + } + + return need_flush; +} + struct slot_rmap_walk_iterator { /* input fields. */ const struct kvm_memory_slot *slot; @@ -1518,7 +1556,7 @@ static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator) typedef bool (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct kvm_memory_slot *slot, gfn_t gfn, - int level); + int level, pte_t pte); static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, @@ -1530,7 +1568,7 @@ static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm, for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL, range->start, range->end - 1, &iterator) ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn, - iterator.level); + iterator.level, range->arg.pte); return ret; } @@ -1552,8 +1590,22 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) return flush; } +bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) +{ + bool flush = false; + + if (kvm_memslots_have_rmaps(kvm)) + flush = kvm_handle_gfn_range(kvm, range, kvm_set_pte_rmap); + + if (tdp_mmu_enabled) + flush |= kvm_tdp_mmu_set_spte_gfn(kvm, range); + + return flush; +} + static bool kvm_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, gfn_t gfn, int level) + struct kvm_memory_slot *slot, gfn_t gfn, int level, + pte_t unused) { u64 *sptep; struct rmap_iterator iter; @@ -1566,7 +1618,8 @@ static bool kvm_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, } static bool kvm_test_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, gfn_t gfn, int level) + struct kvm_memory_slot *slot, gfn_t gfn, + int level, pte_t unused) { u64 *sptep; struct rmap_iterator iter; diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index f499e79b3a15..b4c1119cc48b 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -330,6 +330,22 @@ u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled) return spte; } +u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn) +{ + u64 new_spte; + + new_spte = old_spte & ~SPTE_BASE_ADDR_MASK; + new_spte |= (u64)new_pfn << PAGE_SHIFT; + + new_spte &= ~PT_WRITABLE_MASK; + new_spte &= ~shadow_host_writable_mask; + new_spte &= ~shadow_mmu_writable_mask; + + new_spte = mark_spte_for_access_track(new_spte); + + return new_spte; +} + u64 mark_spte_for_access_track(u64 spte) { if (spte_ad_enabled(spte)) diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index f5c600c52f83..a129951c9a88 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -496,6 +496,8 @@ static inline u64 restore_acc_track_spte(u64 spte) return spte; } +u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn); + void __init kvm_mmu_spte_module_init(void); void kvm_mmu_reset_all_pte_masks(void); diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index b57095cee455..8eef3ed5fe04 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1217,6 +1217,52 @@ bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn); } +static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, + struct kvm_gfn_range *range) +{ + u64 new_spte; + + /* Huge pages aren't expected to be modified without first being zapped. */ + WARN_ON_ONCE(pte_huge(range->arg.pte) || range->start + 1 != range->end); + + if (iter->level != PG_LEVEL_4K || + !is_shadow_present_pte(iter->old_spte)) + return false; + + /* + * Note, when changing a read-only SPTE, it's not strictly necessary to + * zero the SPTE before setting the new PFN, but doing so preserves the + * invariant that the PFN of a present * leaf SPTE can never change. + * See handle_changed_spte(). + */ + tdp_mmu_iter_set_spte(kvm, iter, 0); + + if (!pte_write(range->arg.pte)) { + new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte, + pte_pfn(range->arg.pte)); + + tdp_mmu_iter_set_spte(kvm, iter, new_spte); + } + + return true; +} + +/* + * Handle the changed_pte MMU notifier for the TDP MMU. + * data is a pointer to the new pte_t mapping the HVA specified by the MMU + * notifier. + * Returns non-zero if a flush is needed before releasing the MMU lock. + */ +bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) +{ + /* + * No need to handle the remote TLB flush under RCU protection, the + * target SPTE _must_ be a leaf SPTE, i.e. cannot result in freeing a + * shadow page. See the WARN on pfn_changed in handle_changed_spte(). + */ + return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn); +} + /* * Remove write access from all SPTEs at or above min_level that map GFNs * [start, end). Returns true if an SPTE has been changed and the TLBs need to diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h index 6a2742078961..733a3aef3a96 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.h +++ b/arch/x86/kvm/mmu/tdp_mmu.h @@ -32,6 +32,7 @@ bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, bool flush); bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); +bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range); bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, const struct kvm_memory_slot *slot, int min_level); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 31f6a6965066..91464161c800 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -256,6 +256,7 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER union kvm_mmu_notifier_arg { + pte_t pte; }; struct kvm_gfn_range { @@ -268,6 +269,7 @@ struct kvm_gfn_range { bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); +bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range); #endif enum { diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index faaf45ac716b..0dc9bb18c7ba 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -456,6 +456,21 @@ TRACE_EVENT(kvm_unmap_hva_range, __entry->start, __entry->end) ); +TRACE_EVENT(kvm_set_spte_hva, + TP_PROTO(unsigned long hva), + TP_ARGS(hva), + + TP_STRUCT__entry( + __field(unsigned long, hva) + ), + + TP_fast_assign( + __entry->hva = hva; + ), + + TP_printk("mmu notifier set pte hva: %#016lx", __entry->hva) +); + TRACE_EVENT(kvm_age_hva, TP_PROTO(unsigned long start, unsigned long end), TP_ARGS(start, end), diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 1d6872475c0e..a04c31d60001 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -701,6 +701,48 @@ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn return __kvm_handle_hva_range(kvm, &range); } +static bool kvm_change_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) +{ + /* + * Skipping invalid memslots is correct if and only change_pte() is + * surrounded by invalidate_range_{start,end}(), which is currently + * guaranteed by the primary MMU. If that ever changes, KVM needs to + * unmap the memslot instead of skipping the memslot to ensure that KVM + * doesn't hold references to the old PFN. + */ + WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count)); + + if (range->slot->flags & KVM_MEMSLOT_INVALID) + return false; + + return kvm_set_spte_gfn(kvm, range); +} + +static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address, + pte_t pte) +{ + struct kvm *kvm = mmu_notifier_to_kvm(mn); + const union kvm_mmu_notifier_arg arg = { .pte = pte }; + + trace_kvm_set_spte_hva(address); + + /* + * .change_pte() must be surrounded by .invalidate_range_{start,end}(). + * If mmu_invalidate_in_progress is zero, then no in-progress + * invalidations, including this one, found a relevant memslot at + * start(); rechecking memslots here is unnecessary. Note, a false + * positive (count elevated by a different invalidation) is sub-optimal + * but functionally ok. + */ + WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count)); + if (!READ_ONCE(kvm->mmu_invalidate_in_progress)) + return; + + kvm_handle_hva_range(mn, address, address + 1, arg, kvm_change_spte_gfn); +} + void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start, unsigned long end) { @@ -887,6 +929,7 @@ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { .clear_flush_young = kvm_mmu_notifier_clear_flush_young, .clear_young = kvm_mmu_notifier_clear_young, .test_young = kvm_mmu_notifier_test_young, + .change_pte = kvm_mmu_notifier_change_pte, .release = kvm_mmu_notifier_release, }; -- Gitee From 235b4b2390ed3c9cc91fc19a313663be4bdc7434 Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Sat, 26 Oct 2024 16:46:29 +0800 Subject: [PATCH 1601/2138] anolis: revert "KVM: define __KVM_HAVE_GUEST_DEBUG unconditionally" ANBZ: #11534 Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/4031 Reviewed-by: Juxin Gao --- arch/arm64/include/uapi/asm/kvm.h | 1 + arch/loongarch/include/uapi/asm/kvm.h | 1 + arch/powerpc/include/uapi/asm/kvm.h | 1 + arch/s390/include/uapi/asm/kvm.h | 1 + arch/x86/include/uapi/asm/kvm.h | 1 + include/uapi/linux/kvm.h | 7 ++----- 6 files changed, 7 insertions(+), 5 deletions(-) diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 57f8df4aaf28..f7ddd73a8c0f 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -37,6 +37,7 @@ #include #include +#define __KVM_HAVE_GUEST_DEBUG #define __KVM_HAVE_IRQ_LINE #define __KVM_HAVE_READONLY_MEM #define __KVM_HAVE_VCPU_EVENTS diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h index d65a2993a406..d619b943d20d 100644 --- a/arch/loongarch/include/uapi/asm/kvm.h +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -15,6 +15,7 @@ */ #define __KVM_HAVE_READONLY_MEM +#define __KVM_HAVE_GUEST_DEBUG #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 #define KVM_DIRTY_LOG_PAGE_OFFSET 64 diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index 0572d5238b13..9f18fa090f1f 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -28,6 +28,7 @@ #define __KVM_HAVE_PPC_SMT #define __KVM_HAVE_IRQCHIP #define __KVM_HAVE_IRQ_LINE +#define __KVM_HAVE_GUEST_DEBUG /* Not always available, but if it is, this is the correct offset. */ #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index b3ec6ddd6a13..abe926d43cbe 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -12,6 +12,7 @@ #include #define __KVM_S390 +#define __KVM_HAVE_GUEST_DEBUG /* Device control API: s390-specific devices */ #define KVM_DEV_FLIC_GET_ALL_IRQS 1 diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index d60c65c377c5..1a6a1f987949 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h @@ -40,6 +40,7 @@ #define __KVM_HAVE_IRQ_LINE #define __KVM_HAVE_MSI #define __KVM_HAVE_USER_NMI +#define __KVM_HAVE_GUEST_DEBUG #define __KVM_HAVE_MSIX #define __KVM_HAVE_MCE #define __KVM_HAVE_PIT_STATE2 diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 44010332ab25..ec5d3be77663 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -86,11 +86,6 @@ struct kvm_debug_guest { /* *** End of deprecated interfaces *** */ -/* - * Backwards-compatible definitions. - */ -#define __KVM_HAVE_GUEST_DEBUG - /* for KVM_SET_USER_MEMORY_REGION */ struct kvm_userspace_memory_region { __u32 slot; @@ -979,7 +974,9 @@ struct kvm_ppc_resize_hpt { /* Bug in KVM_SET_USER_MEMORY_REGION fixed: */ #define KVM_CAP_DESTROY_MEMORY_REGION_WORKS 21 #define KVM_CAP_USER_NMI 22 +#ifdef __KVM_HAVE_GUEST_DEBUG #define KVM_CAP_SET_GUEST_DEBUG 23 +#endif #ifdef __KVM_HAVE_PIT #define KVM_CAP_REINJECT_CONTROL 24 #endif -- Gitee From c55c24facffc9e19c89923b1f65e955e3a70c2cd Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Sat, 26 Oct 2024 16:47:37 +0800 Subject: [PATCH 1602/2138] anolis: revert "KVM: Convert KVM_ARCH_WANT_MMU_NOTIFIER to CONFIG_KVM_GENERIC_MMU_NOTIFIER" ANBZ: #11534 Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/4031 Reviewed-by: Juxin Gao --- arch/arm64/include/asm/kvm_host.h | 2 ++ arch/arm64/kvm/Kconfig | 2 +- arch/loongarch/include/asm/kvm_host.h | 1 + arch/loongarch/kvm/Kconfig | 2 +- arch/mips/include/asm/kvm_host.h | 2 ++ arch/mips/kvm/Kconfig | 2 +- arch/powerpc/include/asm/kvm_host.h | 2 ++ arch/powerpc/kvm/Kconfig | 8 ++++---- arch/powerpc/kvm/powerpc.c | 5 ++++- arch/riscv/include/asm/kvm_host.h | 2 ++ arch/riscv/kvm/Kconfig | 2 +- arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/Kconfig | 2 +- include/linux/kvm_host.h | 6 +++--- include/linux/kvm_types.h | 1 - virt/kvm/Kconfig | 4 ---- virt/kvm/kvm_main.c | 10 +++++----- 17 files changed, 32 insertions(+), 23 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 9d32ef2f7b90..b681c0b9f7d9 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -921,6 +921,8 @@ int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events); +#define KVM_ARCH_WANT_MMU_NOTIFIER + void kvm_arm_halt_guest(struct kvm *kvm); void kvm_arm_resume_guest(struct kvm *kvm); diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 1a777715199f..83c1e09be42e 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -22,7 +22,7 @@ menuconfig KVM bool "Kernel-based Virtual Machine (KVM) support" depends on HAVE_KVM select KVM_GENERIC_HARDWARE_ENABLING - select KVM_GENERIC_MMU_NOTIFIER + select MMU_NOTIFIER select PREEMPT_NOTIFIERS select HAVE_KVM_CPU_RELAX_INTERCEPT select KVM_MMIO diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index f23d99fac993..27ca9b6b23a8 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -313,6 +313,7 @@ void kvm_flush_tlb_all(void); void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa); int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write); +#define KVM_ARCH_WANT_MMU_NOTIFIER void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); diff --git a/arch/loongarch/kvm/Kconfig b/arch/loongarch/kvm/Kconfig index 3a03121ecfc0..461a465e49fd 100644 --- a/arch/loongarch/kvm/Kconfig +++ b/arch/loongarch/kvm/Kconfig @@ -30,10 +30,10 @@ config KVM select HAVE_KVM_MSI select KVM_GENERIC_DIRTYLOG_READ_PROTECT select KVM_GENERIC_HARDWARE_ENABLING - select KVM_GENERIC_MMU_NOTIFIER select KVM_MMIO select KVM_XFER_TO_GUEST_WORK select SCHED_INFO + select MMU_NOTIFIER select PREEMPT_NOTIFIERS help Support hosting virtualized guest machines using diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 6743a57c1ab4..30d42657ac1b 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -810,6 +810,8 @@ int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn); pgd_t *kvm_pgd_alloc(void); void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); +#define KVM_ARCH_WANT_MMU_NOTIFIER + /* Emulation */ enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause); int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out); diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig index c04987d2ed2e..a8cdba75f98d 100644 --- a/arch/mips/kvm/Kconfig +++ b/arch/mips/kvm/Kconfig @@ -25,7 +25,7 @@ config KVM select HAVE_KVM_EVENTFD select HAVE_KVM_VCPU_ASYNC_IOCTL select KVM_MMIO - select KVM_GENERIC_MMU_NOTIFIER + select MMU_NOTIFIER select INTERVAL_TREE select KVM_GENERIC_HARDWARE_ENABLING help diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 4c509a65bb59..f9bd6b018cb0 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -62,6 +62,8 @@ #include +#define KVM_ARCH_WANT_MMU_NOTIFIER + #define HPTEG_CACHE_NUM (1 << 15) #define HPTEG_HASH_BITS_PTE 13 #define HPTEG_HASH_BITS_PTE_LONG 12 diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index b33358ee6424..902611954200 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -42,7 +42,7 @@ config KVM_BOOK3S_64_HANDLER config KVM_BOOK3S_PR_POSSIBLE bool select KVM_MMIO - select KVM_GENERIC_MMU_NOTIFIER + select MMU_NOTIFIER config KVM_BOOK3S_HV_POSSIBLE bool @@ -85,7 +85,7 @@ config KVM_BOOK3S_64_HV tristate "KVM for POWER7 and later using hypervisor mode in host" depends on KVM_BOOK3S_64 && PPC_POWERNV select KVM_BOOK3S_HV_POSSIBLE - select KVM_GENERIC_MMU_NOTIFIER + select MMU_NOTIFIER select CMA help Support running unmodified book3s_64 guest kernels in @@ -194,7 +194,7 @@ config KVM_E500V2 depends on !CONTEXT_TRACKING_USER select KVM select KVM_MMIO - select KVM_GENERIC_MMU_NOTIFIER + select MMU_NOTIFIER help Support running unmodified E500 guest kernels in virtual machines on E500v2 host processors. @@ -211,7 +211,7 @@ config KVM_E500MC select KVM select KVM_MMIO select KVM_BOOKE_HV - select KVM_GENERIC_MMU_NOTIFIER + select MMU_NOTIFIER help Support running unmodified E500MC/E5500/E6500 guest kernels in virtual machines on E500MC/E5500/E6500 host processors. diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 5780a4b21839..0e35668b2830 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -634,8 +634,11 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_SYNC_MMU: #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE r = hv_enabled; +#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER) + r = 1; +#else + r = 0; #endif - BUILD_BUG_ON(!IS_ENABLED(CONFIG_KVM_GENERIC_MMU_NOTIFIER)); break; #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE case KVM_CAP_PPC_HTAB_FD: diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 77971843dd02..098a63bfb3d9 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -248,6 +248,8 @@ struct kvm_vcpu_arch { static inline void kvm_arch_sync_events(struct kvm *kvm) {} +#define KVM_ARCH_WANT_MMU_NOTIFIER + #define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12 void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid, diff --git a/arch/riscv/kvm/Kconfig b/arch/riscv/kvm/Kconfig index ae2e05f050ec..dfc237d7875b 100644 --- a/arch/riscv/kvm/Kconfig +++ b/arch/riscv/kvm/Kconfig @@ -30,7 +30,7 @@ config KVM select KVM_GENERIC_HARDWARE_ENABLING select KVM_MMIO select KVM_XFER_TO_GUEST_WORK - select KVM_GENERIC_MMU_NOTIFIER + select MMU_NOTIFIER select PREEMPT_NOTIFIERS help Support hosting virtualized guest machines. diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 264c0393c837..b96fe390a9c4 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -2141,6 +2141,8 @@ enum { # define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, 0) #endif +#define KVM_ARCH_WANT_MMU_NOTIFIER + int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v); int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); int kvm_cpu_has_extint(struct kvm_vcpu *v); diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index d42e832ffb4a..463732963a15 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -24,7 +24,7 @@ config KVM depends on HIGH_RES_TIMERS depends on X86_LOCAL_APIC select PREEMPT_NOTIFIERS - select KVM_GENERIC_MMU_NOTIFIER + select MMU_NOTIFIER select HAVE_KVM_IRQCHIP select HAVE_KVM_PFNCACHE select HAVE_KVM_IRQFD diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 91464161c800..646d3d851486 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -254,7 +254,7 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); #endif -#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER +#ifdef KVM_ARCH_WANT_MMU_NOTIFIER union kvm_mmu_notifier_arg { pte_t pte; }; @@ -789,7 +789,7 @@ struct kvm { struct hlist_head irq_ack_notifier_list; #endif -#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER +#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) struct mmu_notifier mmu_notifier; unsigned long mmu_invalidate_seq; long mmu_invalidate_in_progress; @@ -1971,7 +1971,7 @@ extern const struct _kvm_stats_desc kvm_vm_stats_desc[]; extern const struct kvm_stats_header kvm_vcpu_stats_header; extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[]; -#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER +#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq) { if (unlikely(kvm->mmu_invalidate_in_progress)) diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index 9d1f7835d8c1..6f4737d5046a 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -6,7 +6,6 @@ struct kvm; struct kvm_async_pf; struct kvm_device_ops; -struct kvm_gfn_range; struct kvm_interrupt; struct kvm_irq_routing_table; struct kvm_memory_slot; diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig index ecae2914c97e..484d0873061c 100644 --- a/virt/kvm/Kconfig +++ b/virt/kvm/Kconfig @@ -92,7 +92,3 @@ config HAVE_KVM_PM_NOTIFIER config KVM_GENERIC_HARDWARE_ENABLING bool - -config KVM_GENERIC_MMU_NOTIFIER - select MMU_NOTIFIER - bool diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a04c31d60001..7e8879de4db4 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -540,7 +540,7 @@ void kvm_destroy_vcpus(struct kvm *kvm) } EXPORT_SYMBOL_GPL(kvm_destroy_vcpus); -#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER +#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) { return container_of(mn, struct kvm, mmu_notifier); @@ -939,14 +939,14 @@ static int kvm_init_mmu_notifier(struct kvm *kvm) return mmu_notifier_register(&kvm->mmu_notifier, current->mm); } -#else /* !CONFIG_KVM_GENERIC_MMU_NOTIFIER */ +#else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */ static int kvm_init_mmu_notifier(struct kvm *kvm) { return 0; } -#endif /* CONFIG_KVM_GENERIC_MMU_NOTIFIER */ +#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */ #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER static int kvm_pm_notifier_call(struct notifier_block *bl, @@ -1266,7 +1266,7 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname) out_err_no_debugfs: kvm_coalesced_mmio_free(kvm); out_no_coalesced_mmio: -#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER +#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) if (kvm->mmu_notifier.ops) mmu_notifier_unregister(&kvm->mmu_notifier, current->mm); #endif @@ -1326,7 +1326,7 @@ static void kvm_destroy_vm(struct kvm *kvm) kvm->buses[i] = NULL; } kvm_coalesced_mmio_free(kvm); -#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER +#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); /* * At this point, pending calls to invalidate_range_start() -- Gitee From 29ac922f9ae7044806af136724d2b6d29b72ece3 Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Sat, 26 Oct 2024 16:48:37 +0800 Subject: [PATCH 1603/2138] anolis: revert "KVM: Delete the now unused kvm_arch_sched_in()" ANBZ: #11534 Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/4031 Reviewed-by: Juxin Gao --- arch/arm64/include/asm/kvm_host.h | 1 + arch/loongarch/include/asm/kvm_host.h | 2 +- arch/mips/include/asm/kvm_host.h | 1 + arch/powerpc/include/asm/kvm_host.h | 1 + arch/riscv/include/asm/kvm_host.h | 1 + arch/s390/include/asm/kvm_host.h | 1 + arch/x86/kvm/pmu.c | 6 +++--- arch/x86/kvm/x86.c | 12 ++++++++++++ include/linux/kvm_host.h | 2 ++ virt/kvm/kvm_main.c | 1 + 10 files changed, 24 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index b681c0b9f7d9..a89b35070a35 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -1056,6 +1056,7 @@ static inline bool kvm_system_needs_idmapped_vectors(void) } static inline void kvm_arch_sync_events(struct kvm *kvm) {} +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} void kvm_arm_init_debug(void); void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu); diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index 27ca9b6b23a8..d28d70fea012 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -256,7 +256,6 @@ struct kvm_vcpu_arch { struct ipi_state ipi_state; /* cpucfg */ u32 cpucfg[KVM_MAX_CPUCFG_REGS]; - /* paravirt steal time */ struct { u64 guest_addr; @@ -340,6 +339,7 @@ static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch) static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 30d42657ac1b..54a85f1d4f2c 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -892,6 +892,7 @@ static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) {} static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index f9bd6b018cb0..14ee0dece853 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -879,6 +879,7 @@ struct kvm_vcpu_arch { static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {} +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 098a63bfb3d9..1ebf20dfbaa6 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -247,6 +247,7 @@ struct kvm_vcpu_arch { }; static inline void kvm_arch_sync_events(struct kvm *kvm) {} +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} #define KVM_ARCH_WANT_MMU_NOTIFIER diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index b49bbcac635d..b039881c277a 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -1045,6 +1045,7 @@ extern int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc); extern int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc); static inline void kvm_arch_sync_events(struct kvm *kvm) {} +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) {} static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index a36be3f5812f..da2d82e3a873 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -468,9 +468,9 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu) } /* - * Release unused perf_events if the corresponding guest MSRs weren't - * accessed during the last vCPU time slice (need_cleanup is set when - * the vCPU is scheduled back in). + * Unused perf_events are only released if the corresponding MSRs + * weren't accessed during the last vCPU time slice. kvm_arch_sched_in + * triggers KVM_REQ_PMU if cleanup is needed. */ if (unlikely(pmu->need_cleanup)) kvm_pmu_cleanup(vcpu); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 97ce30712c66..d8e5d5392c32 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -12358,6 +12358,18 @@ bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) __read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu); EXPORT_SYMBOL_GPL(kvm_has_noapic_vcpu); +void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) +{ + struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); + + vcpu->arch.l1tf_flush_l1d = true; + if (pmu->version && unlikely(pmu->event_count)) { + pmu->need_cleanup = true; + kvm_make_request(KVM_REQ_PMU, vcpu); + } + static_call(kvm_x86_sched_in)(vcpu, cpu); +} + void kvm_arch_free_vm(struct kvm *kvm) { kfree(to_kvm_hv(kvm)->hv_pa_pg); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 646d3d851486..b97f9880af18 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1453,6 +1453,8 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg); int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu); +void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu); + void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 7e8879de4db4..4cd23e014e18 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -6090,6 +6090,7 @@ static void kvm_sched_in(struct preempt_notifier *pn, int cpu) WRITE_ONCE(vcpu->ready, false); __this_cpu_write(kvm_running_vcpu, vcpu); + kvm_arch_sched_in(vcpu, cpu); kvm_arch_vcpu_load(vcpu, cpu); } -- Gitee From bcebdafc0a4937d67929809e330b4537e9aed441 Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Sat, 26 Oct 2024 16:50:20 +0800 Subject: [PATCH 1604/2138] anolis: revert "KVM: Introduce vcpu->wants_to_run" ANBZ: #11534 Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/4031 Reviewed-by: Juxin Gao --- arch/arm64/kvm/arm.c | 2 +- arch/loongarch/kvm/vcpu.c | 2 +- arch/mips/kvm/mips.c | 2 +- arch/powerpc/kvm/powerpc.c | 2 +- arch/riscv/kvm/vcpu.c | 2 +- arch/s390/kvm/kvm-s390.c | 2 +- arch/x86/kvm/x86.c | 4 ++-- include/linux/kvm_host.h | 1 - virt/kvm/kvm_main.c | 3 --- 9 files changed, 8 insertions(+), 12 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index decc5f2af4d4..ffdc2c4d07ee 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -906,7 +906,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) vcpu_load(vcpu); - if (!vcpu->wants_to_run) { + if (run->immediate_exit) { ret = -EINTR; goto out; } diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 2c5505ffebb4..e9b397543fdf 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -1735,7 +1735,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) kvm_complete_iocsr_read(vcpu, run); } - if (!vcpu->wants_to_run) + if (run->immediate_exit) return r; /* Clear exit_reason */ diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index f1a99962027a..231ac052b506 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -436,7 +436,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) vcpu->mmio_needed = 0; } - if (!vcpu->wants_to_run) + if (vcpu->run->immediate_exit) goto out; lose_fpu(1); diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 0e35668b2830..6cef200c2404 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -1858,7 +1858,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) kvm_sigset_activate(vcpu); - if (!vcpu->wants_to_run) + if (run->immediate_exit) r = -EINTR; else r = kvmppc_vcpu_run(vcpu); diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 4870e4658466..82229db1ce73 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -654,7 +654,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) return ret; } - if (!vcpu->wants_to_run) { + if (run->immediate_exit) { kvm_vcpu_srcu_read_unlock(vcpu); return -EINTR; } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 7c2308bae00c..348d030d2660 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -5048,7 +5048,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) if (vcpu->kvm->arch.pv.dumping) return -EINVAL; - if (!vcpu->wants_to_run) + if (kvm_run->immediate_exit) return -EINTR; if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS || diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index d8e5d5392c32..7a044c4427f3 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -11176,7 +11176,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) kvm_vcpu_srcu_read_lock(vcpu); if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { - if (!vcpu->wants_to_run) { + if (kvm_run->immediate_exit) { r = -EINTR; goto out; } @@ -11254,7 +11254,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) WARN_ON_ONCE(vcpu->mmio_needed); } - if (!vcpu->wants_to_run) { + if (kvm_run->immediate_exit) { r = -EINTR; goto out; } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index b97f9880af18..f3f8e6112a7e 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -378,7 +378,6 @@ struct kvm_vcpu { bool dy_eligible; } spin_loop; #endif - bool wants_to_run; bool preempted; bool ready; struct kvm_vcpu_arch arch; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 4cd23e014e18..3a14fe491050 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -4167,10 +4167,7 @@ static long kvm_vcpu_ioctl(struct file *filp, vcpu->stat.pid = current->pid; #endif } - vcpu->wants_to_run = !READ_ONCE(vcpu->run->immediate_exit); r = kvm_arch_vcpu_ioctl_run(vcpu); - vcpu->wants_to_run = false; - trace_kvm_userspace_exit(vcpu->run->exit_reason, r); break; } -- Gitee From 9407206877f180153e7278c75d40e6f98147e006 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Mon, 21 Oct 2024 15:19:09 +0800 Subject: [PATCH 1605/2138] anolis: LoongArch: Fix cpu hotplug issue ANBZ: #11534 On LoongArch system, there are two places to set cpu numa node. One is in arch specified function smp_prepare_boot_cpu(), the other is in generic function early_numa_node_init(). The latter will overwrite the numa node information. With hot-added cpu without numa information, cpu_logical_map() fails to its physical cpuid at beginning since it is not enabled in ACPI MADT table. So function early_cpu_to_node() also fails to get its numa node for hot-added cpu, and generic function early_numa_node_init() will overwrite with incorrect numa node. APIs topo_get_cpu() and topo_add_cpu() is added here, like other architectures logic cpu is allocated when parsing MADT table. When parsing SRAT table or hot-add cpu, logic cpu is acquired by searching all allocated logical cpu with matched physical id. It solves such problems such as: 1. Boot cpu is not the first entry in MADT table, the first entry will be overwritten with later boot cpu. 2. Physical cpu id not presented in MADT table is invalid, in later SRAT/hot-add cpu parsing, invalid physical cpu detected is added 3. For hot-add cpu, its logic cpu is allocated in MADT table parsing, so early_cpu_to_node() can be used for hot-add cpu and cpu_to_node() is correct for hot-add cpu. Signed-off-by: Bibo Mao Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/4031 Reviewed-by: Juxin Gao --- arch/loongarch/include/asm/smp.h | 3 ++ arch/loongarch/kernel/acpi.c | 24 ++++++++++------ arch/loongarch/kernel/setup.c | 47 ++++++++++++++++++++++++++++++++ arch/loongarch/kernel/smp.c | 8 ++---- 4 files changed, 69 insertions(+), 13 deletions(-) diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h index 630e5ebec21c..cc232901e4dd 100644 --- a/arch/loongarch/include/asm/smp.h +++ b/arch/loongarch/include/asm/smp.h @@ -102,4 +102,7 @@ static inline void __cpu_die(unsigned int cpu) } #endif +int topo_add_cpu(int physid); +int topo_get_cpu(int physid); + #endif /* __ASM_SMP_H */ diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c index 58819b017ba8..17dc28821a9d 100644 --- a/arch/loongarch/kernel/acpi.c +++ b/arch/loongarch/kernel/acpi.c @@ -71,10 +71,10 @@ int set_processor_mask(u32 id, u32 flags) return -ENODEV; } - if (cpuid == loongson_sysconf.boot_cpu_id) - cpu = 0; - else - cpu = cpumask_next_zero(-1, cpu_present_mask); + + cpu = topo_add_cpu(cpuid); + if (cpu < 0) + return -EEXIST; if (flags & ACPI_MADT_ENABLED) { num_processors++; @@ -197,8 +197,6 @@ void __init acpi_boot_table_init(void) goto fdt_earlycon; } - loongson_sysconf.boot_cpu_id = read_csr_cpuid(); - /* * Process the Multiple APIC Description Table (MADT), if present */ @@ -248,7 +246,7 @@ void __init numa_set_distance(int from, int to, int distance) void __init acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) { - int pxm, node; + int pxm, node, cpu; if (srat_disabled()) return; @@ -277,6 +275,11 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) return; } + cpu = topo_get_cpu(pa->apic_id); + /* Check whether apic_id exists in MADT table */ + if (cpu < 0) + return; + early_numa_add_cpu(pa->apic_id, node); set_cpuid_to_node(pa->apic_id, node); @@ -315,12 +318,17 @@ int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu { int cpu; - cpu = set_processor_mask(physid, ACPI_MADT_ENABLED); + cpu = topo_get_cpu(physid); + /* Check whether apic_id exists in MADT table */ if (cpu < 0) { pr_info(PREFIX "Unable to map lapic to logical cpu number\n"); return cpu; } + num_processors++; + set_cpu_present(cpu, true); + __cpu_number_map[physid] = cpu; + __cpu_logical_map[cpu] = physid; acpi_map_cpu2node(handle, cpu, physid); *pcpu = cpu; diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 7bdeed9fbc26..77077dd324ff 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -71,6 +71,8 @@ EXPORT_SYMBOL(cpu_data); struct loongson_board_info b_info; static const char dmi_empty_string[] = " "; +static int possible_cpus; +static bool bsp_added; /* * Setup information @@ -374,6 +376,50 @@ static void __init bootcmdline_init(char **cmdline_p) *cmdline_p = boot_command_line; } +int topo_get_cpu(int physid) +{ + int i; + + for (i = 0; i < possible_cpus; i++) + if (cpu_logical_map(i) == physid) + break; + + if (i == possible_cpus) + return -ENOENT; + + return i; +} + +int topo_add_cpu(int physid) +{ + int cpu; + + if (!bsp_added && (physid == loongson_sysconf.boot_cpu_id)) { + bsp_added = true; + return 0; + } + + cpu = topo_get_cpu(physid); + if (cpu >= 0) { + pr_warn("Adding duplicated physical cpuid 0x%x\n", physid); + return -EEXIST; + } + + if (possible_cpus >= nr_cpu_ids) + return -ERANGE; + + __cpu_logical_map[possible_cpus] = physid; + cpu = possible_cpus++; + return cpu; +} + +static void __init topo_init(void) +{ + loongson_sysconf.boot_cpu_id = read_csr_cpuid(); + __cpu_logical_map[0] = loongson_sysconf.boot_cpu_id; + possible_cpus++; +} + static void __init writecombine_detect(void) { u64 cpuname; @@ -398,6 +444,7 @@ void __init platform_init(void) { arch_reserve_vmcore(); arch_parse_crashkernel(); + topo_init(); #ifdef CONFIG_ACPI_TABLE_UPGRADE acpi_table_upgrade(); diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index 35b2fc29f125..d4717fc9e1db 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -288,11 +288,9 @@ static void __init fdt_smp_setup(void) if (cpuid >= nr_cpu_ids) continue; - if (cpuid == loongson_sysconf.boot_cpu_id) { - cpu = 0; - } else { - cpu = cpumask_next_zero(-1, cpu_present_mask); - } + cpu = topo_add_cpu(cpuid); + if (cpu < 0) + continue; num_processors++; set_cpu_possible(cpu, true); -- Gitee From 8be37539d18e5ec00c8970da216f55860e4eeb34 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Fri, 28 Jun 2024 16:33:56 +0800 Subject: [PATCH 1606/2138] anolis: x86/mce: Add NMIs setup in machine_check func ANBZ: #9448 This will lead to console_owner_lock issue and HPET dead loop issue. For example, The HPET dead loop issue: CPU x CPU x ---- ---- read_hpet() arch_spin_trylock(&hpet.lock) [CPU x got the hpet.lock] #MCE happened do_machine_check() mce_panic() panic() kmsg_dump() pstore_dump() pstore_record_init() ktime_get_real_fast_ns() read_hpet() [dead loops] This may lead to read_hpet dead loops. The console_owner_lock issue is similar. CPU x CPU x ---- ---- vprintk vprintk_default vprintk_emit console_trylock_spinning ...(&console_owner_lock) #MCE happened do_machine_check mce_panic panic console_flush_on_panic console_emit_next_record console_lock_spinning_enable ...(&console_owner_lock) To avoid these issues, add NMIs setup When Handling #MC Exceptions. Signed-off-by: leoliu-oc Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/3447 --- arch/x86/kernel/cpu/mce/core.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index c9c9ebbb3268..1efb2a807981 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -2122,11 +2122,17 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs) static __always_inline void exc_machine_check_user(struct pt_regs *regs) { + irqentry_state_t irq_state; + + irq_state = irqentry_nmi_enter(regs); + irqentry_enter_from_user_mode(regs); do_machine_check(regs); irqentry_exit_to_user_mode(regs); + + irqentry_nmi_exit(regs, irq_state); } #ifdef CONFIG_X86_64 -- Gitee From 922c36fd2c22ccaa32198c2d15cd16f110441612 Mon Sep 17 00:00:00 2001 From: Reinette Chatre Date: Mon, 1 Apr 2024 11:16:39 -0700 Subject: [PATCH 1607/2138] x86/resctrl: Fix uninitialized memory read when last CPU of domain goes offline ANBZ: #11576 commit c3eeb1ffc6a88af9b002e22be0f70851759be03a upstream. [ shawnwang: move the modification from arch/x86/kernel/cpu/resctrl to fs/resctrl since we introduced mpam support ] Tony encountered this OOPS when the last CPU of a domain goes offline while running a kernel built with CONFIG_NO_HZ_FULL: BUG: kernel NULL pointer dereference, address: 0000000000000000 #PF: supervisor read access in kernel mode #PF: error_code(0x0000) - not-present page PGD 0 Oops: 0000 [#1] PREEMPT SMP NOPTI ... RIP: 0010:__find_nth_andnot_bit+0x66/0x110 ... Call Trace: ? __die() ? page_fault_oops() ? exc_page_fault() ? asm_exc_page_fault() cpumask_any_housekeeping() mbm_setup_overflow_handler() resctrl_offline_cpu() resctrl_arch_offline_cpu() cpuhp_invoke_callback() cpuhp_thread_fun() smpboot_thread_fn() kthread() ret_from_fork() ret_from_fork_asm() The NULL pointer dereference is encountered while searching for another online CPU in the domain (of which there are none) that can be used to run the MBM overflow handler. Because the kernel is configured with CONFIG_NO_HZ_FULL the search for another CPU (in its effort to prefer those CPUs that aren't marked nohz_full) consults the mask representing the nohz_full CPUs, tick_nohz_full_mask. On a kernel with CONFIG_CPUMASK_OFFSTACK=y tick_nohz_full_mask is not allocated unless the kernel is booted with the "nohz_full=" parameter and because of that any access to tick_nohz_full_mask needs to be guarded with tick_nohz_full_enabled(). Replace the IS_ENABLED(CONFIG_NO_HZ_FULL) with tick_nohz_full_enabled(). The latter ensures tick_nohz_full_mask can be accessed safely and can be used whether kernel is built with CONFIG_NO_HZ_FULL enabled or not. [ Use Ingo's suggestion that combines the two NO_HZ checks into one. ] Fixes: a4846aaf3945 ("x86/resctrl: Add cpumask_any_housekeeping() for limbo/overflow") Reported-by: Tony Luck Signed-off-by: Reinette Chatre Signed-off-by: Ingo Molnar Reviewed-by: Babu Moger Link: https://lore.kernel.org/r/ff8dfc8d3dcb04b236d523d1e0de13d2ef585223.1711993956.git.reinette.chatre@intel.com Closes: https://lore.kernel.org/lkml/ZgIFT5gZgIQ9A9G7@agluck-desk3/ Signed-off-by: Shawn Wang Reviewed-by: Zelin Deng Reviewed-by: Shuai Xue Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4047 --- fs/resctrl/internal.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/resctrl/internal.h b/fs/resctrl/internal.h index f73267762a87..b8afeee36fff 100644 --- a/fs/resctrl/internal.h +++ b/fs/resctrl/internal.h @@ -33,7 +33,8 @@ cpumask_any_housekeeping(const struct cpumask *mask, int exclude_cpu) else cpu = cpumask_any_but(mask, exclude_cpu); - if (!IS_ENABLED(CONFIG_NO_HZ_FULL)) + /* Only continue if tick_nohz_full_mask has been initialized. */ + if (!tick_nohz_full_enabled()) return cpu; /* If the CPU picked isn't marked nohz_full nothing more needs doing. */ -- Gitee From 6b573947df917b1903d474b0ad55431f426527d0 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 18 Jun 2024 15:01:52 +0100 Subject: [PATCH 1608/2138] x86/resctrl: Don't try to free nonexistent RMIDs ANBZ: #11576 commit 739c9765793e5794578a64aab293c58607f1826a upstream. [ shawnwang: move the modification from arch/x86/kernel/cpu/resctrl to fs/resctrl since we introduced mpam support ] Commit 6791e0ea3071 ("x86/resctrl: Access per-rmid structures by index") adds logic to map individual monitoring groups into a global index space used for tracking allocated RMIDs. Attempts to free the default RMID are ignored in free_rmid(), and this works fine on x86. With arm64 MPAM, there is a latent bug here however: on platforms with no monitors exposed through resctrl, each control group still gets a different monitoring group ID as seen by the hardware, since the CLOSID always forms part of the monitoring group ID. This means that when removing a control group, the code may try to free this group's default monitoring group RMID for real. If there are no monitors however, the RMID tracking table rmid_ptrs[] would be a waste of memory and is never allocated, leading to a splat when free_rmid() tries to dereference the table. One option would be to treat RMID 0 as special for every CLOSID, but this would be ugly since bookkeeping still needs to be done for these monitoring group IDs when there are monitors present in the hardware. Instead, add a gating check of resctrl_arch_mon_capable() in free_rmid(), and just do nothing if the hardware doesn't have monitors. This fix mirrors the gating checks already present in mkdir_rdt_prepare_rmid_alloc() and elsewhere. No functional change on x86. [ bp: Massage commit message. ] Fixes: 6791e0ea3071 ("x86/resctrl: Access per-rmid structures by index") Signed-off-by: Dave Martin Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Reinette Chatre Tested-by: Reinette Chatre Link: https://lore.kernel.org/r/20240618140152.83154-1-Dave.Martin@arm.com Signed-off-by: Shawn Wang Reviewed-by: Zelin Deng Reviewed-by: Shuai Xue Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4047 --- fs/resctrl/monitor.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/resctrl/monitor.c b/fs/resctrl/monitor.c index 51baa0f71b65..ea4183cc48ad 100644 --- a/fs/resctrl/monitor.c +++ b/fs/resctrl/monitor.c @@ -312,7 +312,8 @@ void free_rmid(u32 closid, u32 rmid) * allows architectures that ignore the closid parameter to avoid an * unnecessary check. */ - if (idx == resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID, + if (!resctrl_arch_mon_capable() || + idx == resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID, RESCTRL_RESERVED_RMID)) return; -- Gitee From 1404bbd8ec4569f4f98d606981a14a06ab93c26c Mon Sep 17 00:00:00 2001 From: gaojuxin Date: Wed, 30 Oct 2024 17:32:06 +0800 Subject: [PATCH 1609/2138] anolis: drm/ast_loongson: override drm/ast driver ANBZ: #11571 For remove patch: drm/ast: Convert ast to SHMEM Rebase ast driver to d95dcfc4e3e747b7cee9077bfd18f6e5ccab1d12 Continue to revert to vram solution Signed-off-by: Dongyan Qian Signed-off-by: gaojuxin Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4045 --- arch/loongarch/configs/anolis_defconfig | 2 +- arch/loongarch/configs/loongson3_defconfig | 2 +- drivers/gpu/drm/Kconfig | 2 + drivers/gpu/drm/Makefile | 1 + drivers/gpu/drm/ast_loongson/Kconfig | 14 + drivers/gpu/drm/ast_loongson/Makefile | 8 + drivers/gpu/drm/ast_loongson/ast_dp.c | 299 +++ drivers/gpu/drm/ast_loongson/ast_dp501.c | 429 ++++ .../gpu/drm/ast_loongson/ast_dram_tables.h | 125 + drivers/gpu/drm/ast_loongson/ast_drv.c | 231 ++ drivers/gpu/drm/ast_loongson/ast_drv.h | 528 +++++ drivers/gpu/drm/ast_loongson/ast_i2c.c | 170 ++ drivers/gpu/drm/ast_loongson/ast_main.c | 486 ++++ drivers/gpu/drm/ast_loongson/ast_mm.c | 101 + drivers/gpu/drm/ast_loongson/ast_mode.c | 1881 +++++++++++++++ drivers/gpu/drm/ast_loongson/ast_post.c | 2090 +++++++++++++++++ drivers/gpu/drm/ast_loongson/ast_tables.h | 342 +++ 17 files changed, 6709 insertions(+), 2 deletions(-) create mode 100644 drivers/gpu/drm/ast_loongson/Kconfig create mode 100644 drivers/gpu/drm/ast_loongson/Makefile create mode 100644 drivers/gpu/drm/ast_loongson/ast_dp.c create mode 100644 drivers/gpu/drm/ast_loongson/ast_dp501.c create mode 100644 drivers/gpu/drm/ast_loongson/ast_dram_tables.h create mode 100644 drivers/gpu/drm/ast_loongson/ast_drv.c create mode 100644 drivers/gpu/drm/ast_loongson/ast_drv.h create mode 100644 drivers/gpu/drm/ast_loongson/ast_i2c.c create mode 100644 drivers/gpu/drm/ast_loongson/ast_main.c create mode 100644 drivers/gpu/drm/ast_loongson/ast_mm.c create mode 100644 drivers/gpu/drm/ast_loongson/ast_mode.c create mode 100644 drivers/gpu/drm/ast_loongson/ast_post.c create mode 100644 drivers/gpu/drm/ast_loongson/ast_tables.h diff --git a/arch/loongarch/configs/anolis_defconfig b/arch/loongarch/configs/anolis_defconfig index c94c2e59b383..1ab6f0003830 100644 --- a/arch/loongarch/configs/anolis_defconfig +++ b/arch/loongarch/configs/anolis_defconfig @@ -5232,7 +5232,7 @@ CONFIG_DRM_NOUVEAU_BACKLIGHT=y # CONFIG_DRM_VGEM is not set CONFIG_DRM_VKMS=m CONFIG_DRM_UDL=m -CONFIG_DRM_AST=y +CONFIG_DRM_AST_LOONGSON=y CONFIG_DRM_MGAG200=m CONFIG_DRM_QXL=m CONFIG_DRM_VIRTIO_GPU=m diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index 5359ede7101e..0b27346f6140 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -1477,7 +1477,7 @@ CONFIG_DRM_AMDGPU_USERPTR=y CONFIG_DRM_NOUVEAU=m CONFIG_DRM_VKMS=m CONFIG_DRM_UDL=m -CONFIG_DRM_AST=y +CONFIG_DRM_AST_LOONGSON=y CONFIG_DRM_MGAG200=m CONFIG_DRM_QXL=m CONFIG_DRM_VIRTIO_GPU=m diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 1c17d051c98f..ffb759c0bd36 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -306,6 +306,8 @@ source "drivers/gpu/drm/udl/Kconfig" source "drivers/gpu/drm/ast/Kconfig" +source "drivers/gpu/drm/ast_loongson/Kconfig" + source "drivers/gpu/drm/mgag200/Kconfig" source "drivers/gpu/drm/armada/Kconfig" diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 68d2eefe2c25..ff0e5faff9d9 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -158,6 +158,7 @@ obj-$(CONFIG_DRM_ROCKCHIP) +=rockchip/ obj-$(CONFIG_DRM_GMA500) += gma500/ obj-$(CONFIG_DRM_UDL) += udl/ obj-$(CONFIG_DRM_AST) += ast/ +obj-$(CONFIG_DRM_AST_LOONGSON) += ast_loongson/ obj-$(CONFIG_DRM_ARMADA) += armada/ obj-$(CONFIG_DRM_ATMEL_HLCDC) += atmel-hlcdc/ obj-y += renesas/ diff --git a/drivers/gpu/drm/ast_loongson/Kconfig b/drivers/gpu/drm/ast_loongson/Kconfig new file mode 100644 index 000000000000..40af6934ac36 --- /dev/null +++ b/drivers/gpu/drm/ast_loongson/Kconfig @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0-only +config DRM_AST_LOONGSON + tristate "AST server chips for Loongson Platform" + depends on DRM && PCI && MMU && LOONGARCH + select DRM_KMS_HELPER + select DRM_VRAM_HELPER + select DRM_TTM + select DRM_TTM_HELPER + help + Say yes for experimental AST GPU driver. Do not enable + this driver without having a working -modesetting, + and a version of AST that knows to fail if KMS + is bound to the driver. These GPUs are commonly found + in server chipsets. diff --git a/drivers/gpu/drm/ast_loongson/Makefile b/drivers/gpu/drm/ast_loongson/Makefile new file mode 100644 index 000000000000..02d40f992f5a --- /dev/null +++ b/drivers/gpu/drm/ast_loongson/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for the drm device driver. This driver provides support for the +# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. + +ast-y := ast_drv.o ast_i2c.o ast_main.o ast_mm.o ast_mode.o ast_post.o ast_dp501.o ast_dp.o + +obj-$(CONFIG_DRM_AST_LOONGSON) := ast.o diff --git a/drivers/gpu/drm/ast_loongson/ast_dp.c b/drivers/gpu/drm/ast_loongson/ast_dp.c new file mode 100644 index 000000000000..b7e1f51d558b --- /dev/null +++ b/drivers/gpu/drm/ast_loongson/ast_dp.c @@ -0,0 +1,299 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2021, ASPEED Technology Inc. +// Authors: KuoHsiang Chou + +#include +#include +#include +#include "ast_drv.h" + +int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata) +{ + struct ast_private *ast = to_ast_private(dev); + u8 i = 0, j = 0; + + /* + * CRD1[b5]: DP MCU FW is executing + * CRDC[b0]: DP link success + * CRDF[b0]: DP HPD + * CRE5[b0]: Host reading EDID process is done + */ + if (!(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, + ASTDP_MCU_FW_EXECUTING) && + ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDC, + ASTDP_LINK_SUCCESS) && + ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDF, ASTDP_HPD) && + ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5, + ASTDP_HOST_EDID_READ_DONE_MASK))) { + goto err_astdp_edid_not_ready; + } + + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5, + (u8)~ASTDP_HOST_EDID_READ_DONE_MASK, 0x00); + + for (i = 0; i < 32; i++) { + /* + * CRE4[7:0]: Read-Pointer for EDID (Unit: 4bytes); valid range: 0~64 + */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE4, + ASTDP_AND_CLEAR_MASK, (u8)i); + j = 0; + + /* + * CRD7[b0]: valid flag for EDID + * CRD6[b0]: mirror read pointer for EDID + */ + while ((ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD7, + ASTDP_EDID_VALID_FLAG_MASK) != + 0x01) || + (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD6, + ASTDP_EDID_READ_POINTER_MASK) != + i)) { + /* + * Delay are getting longer with each retry. + * 1. The Delays are often 2 loops when users request "Display Settings" + * of right-click of mouse. + * 2. The Delays are often longer a lot when system resume from S3/S4. + */ + mdelay(j + 1); + + if (!(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, + 0xD1, + ASTDP_MCU_FW_EXECUTING) && + ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, + 0xDC, + ASTDP_LINK_SUCCESS) && + ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, + 0xDF, ASTDP_HPD))) { + goto err_astdp_jump_out_loop_of_edid; + } + + j++; + if (j > 200) + goto err_astdp_jump_out_loop_of_edid; + } + + *(ediddata) = ast_get_index_reg_mask( + ast, AST_IO_CRTC_PORT, 0xD8, ASTDP_EDID_READ_DATA_MASK); + *(ediddata + 1) = ast_get_index_reg_mask( + ast, AST_IO_CRTC_PORT, 0xD9, ASTDP_EDID_READ_DATA_MASK); + *(ediddata + 2) = ast_get_index_reg_mask( + ast, AST_IO_CRTC_PORT, 0xDA, ASTDP_EDID_READ_DATA_MASK); + *(ediddata + 3) = ast_get_index_reg_mask( + ast, AST_IO_CRTC_PORT, 0xDB, ASTDP_EDID_READ_DATA_MASK); + + if (i == 31) { + /* + * For 128-bytes EDID_1.3, + * 1. Add the value of Bytes-126 to Bytes-127. + * The Bytes-127 is Checksum. Sum of all 128bytes should + * equal 0 (mod 256). + * 2. Modify Bytes-126 to be 0. + * The Bytes-126 indicates the Number of extensions to + * follow. 0 represents noextensions. + */ + *(ediddata + 3) = *(ediddata + 3) + *(ediddata + 2); + *(ediddata + 2) = 0; + } + + ediddata += 4; + } + + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5, + (u8)~ASTDP_HOST_EDID_READ_DONE_MASK, + ASTDP_HOST_EDID_READ_DONE); + + return 0; + +err_astdp_jump_out_loop_of_edid: + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5, + (u8)~ASTDP_HOST_EDID_READ_DONE_MASK, + ASTDP_HOST_EDID_READ_DONE); + return (~(j + 256) + 1); + +err_astdp_edid_not_ready: + if (!(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, + ASTDP_MCU_FW_EXECUTING))) + return (~0xD1 + 1); + if (!(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDC, + ASTDP_LINK_SUCCESS))) + return (~0xDC + 1); + if (!(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDF, ASTDP_HPD))) + return (~0xDF + 1); + if (!(ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5, + ASTDP_HOST_EDID_READ_DONE_MASK))) + return (~0xE5 + 1); + + return 0; +} + +/* + * Launch Aspeed DP + */ +void ast_dp_launch(struct drm_device *dev, u8 bPower) +{ + u32 i = 0, j = 0, WaitCount = 1; + u8 bDPTX = 0; + u8 bDPExecute = 1; + + struct ast_private *ast = to_ast_private(dev); + // S3 come back, need more time to wait BMC ready. + if (bPower) + WaitCount = 300; + + // Wait total count by different condition. + for (j = 0; j < WaitCount; j++) { + bDPTX = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, + TX_TYPE_MASK); + + if (bDPTX) + break; + + msleep(100); + } + + // 0xE : ASTDP with DPMCU FW handling + if (bDPTX == ASTDP_DPMCU_TX) { + // Wait one second then timeout. + i = 0; + + while (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, + COPROCESSOR_LAUNCH) != + COPROCESSOR_LAUNCH) { + i++; + // wait 100 ms + msleep(100); + + if (i >= 10) { + // DP would not be ready. + bDPExecute = 0; + break; + } + } + + if (bDPExecute) + ast->tx_chip_types |= BIT(AST_TX_ASTDP); + + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5, + (u8)~ASTDP_HOST_EDID_READ_DONE_MASK, + ASTDP_HOST_EDID_READ_DONE); + } +} + +void ast_dp_power_on_off(struct drm_device *dev, bool on) +{ + struct ast_private *ast = to_ast_private(dev); + // Read and Turn off DP PHY sleep + u8 bE3 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE3, + AST_DP_VIDEO_ENABLE); + + // Turn on DP PHY sleep + if (!on) + bE3 |= AST_DP_PHY_SLEEP; + + // DP Power on/off + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE3, + (u8)~AST_DP_PHY_SLEEP, bE3); +} + +void ast_dp_set_on_off(struct drm_device *dev, bool on) +{ + struct ast_private *ast = to_ast_private(dev); + u8 video_on_off = on; + + // Video On/Off + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE3, + (u8)~AST_DP_VIDEO_ENABLE, on); + + // If DP plug in and link successful then check video on / off status + if (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDC, + ASTDP_LINK_SUCCESS) && + ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDF, ASTDP_HPD)) { + video_on_off <<= 4; + while (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xDF, + ASTDP_MIRROR_VIDEO_ENABLE) != + video_on_off) { + // wait 1 ms + mdelay(1); + } + } +} + +void ast_dp_set_mode(struct drm_crtc *crtc, + struct ast_vbios_mode_info *vbios_mode) +{ + struct ast_private *ast = to_ast_private(crtc->dev); + + u32 ulRefreshRateIndex; + u8 ModeIdx; + + ulRefreshRateIndex = vbios_mode->enh_table->refresh_rate_index - 1; + + switch (crtc->mode.crtc_hdisplay) { + case 320: + ModeIdx = ASTDP_320x240_60; + break; + case 400: + ModeIdx = ASTDP_400x300_60; + break; + case 512: + ModeIdx = ASTDP_512x384_60; + break; + case 640: + ModeIdx = (ASTDP_640x480_60 + (u8)ulRefreshRateIndex); + break; + case 800: + ModeIdx = (ASTDP_800x600_56 + (u8)ulRefreshRateIndex); + break; + case 1024: + ModeIdx = (ASTDP_1024x768_60 + (u8)ulRefreshRateIndex); + break; + case 1152: + ModeIdx = ASTDP_1152x864_75; + break; + case 1280: + if (crtc->mode.crtc_vdisplay == 800) + ModeIdx = + (ASTDP_1280x800_60_RB - (u8)ulRefreshRateIndex); + else // 1024 + ModeIdx = (ASTDP_1280x1024_60 + (u8)ulRefreshRateIndex); + break; + case 1360: + case 1366: + ModeIdx = ASTDP_1366x768_60; + break; + case 1440: + ModeIdx = (ASTDP_1440x900_60_RB - (u8)ulRefreshRateIndex); + break; + case 1600: + if (crtc->mode.crtc_vdisplay == 900) + ModeIdx = + (ASTDP_1600x900_60_RB - (u8)ulRefreshRateIndex); + else //1200 + ModeIdx = ASTDP_1600x1200_60; + break; + case 1680: + ModeIdx = (ASTDP_1680x1050_60_RB - (u8)ulRefreshRateIndex); + break; + case 1920: + if (crtc->mode.crtc_vdisplay == 1080) + ModeIdx = ASTDP_1920x1080_60; + else //1200 + ModeIdx = ASTDP_1920x1200_60; + break; + default: + return; + } + + /* + * CRE0[7:0]: MISC0 ((0x00: 18-bpp) or (0x20: 24-bpp) + * CRE1[7:0]: MISC1 (default: 0x00) + * CRE2[7:0]: video format index (0x00 ~ 0x20 or 0x40 ~ 0x50) + */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE0, + ASTDP_AND_CLEAR_MASK, ASTDP_MISC0_24bpp); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE1, + ASTDP_AND_CLEAR_MASK, ASTDP_MISC1); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE2, + ASTDP_AND_CLEAR_MASK, ModeIdx); +} diff --git a/drivers/gpu/drm/ast_loongson/ast_dp501.c b/drivers/gpu/drm/ast_loongson/ast_dp501.c new file mode 100644 index 000000000000..39474bff1aea --- /dev/null +++ b/drivers/gpu/drm/ast_loongson/ast_dp501.c @@ -0,0 +1,429 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include + +#include "ast_drv.h" + +MODULE_FIRMWARE("ast_dp501_fw.bin"); + +static void ast_release_firmware(void *data) +{ + struct ast_private *ast = data; + + release_firmware(ast->dp501_fw); + ast->dp501_fw = NULL; +} + +static int ast_load_dp501_microcode(struct drm_device *dev) +{ + struct ast_private *ast = to_ast_private(dev); + int ret; + + ret = request_firmware(&ast->dp501_fw, "ast_dp501_fw.bin", dev->dev); + if (ret) + return ret; + + return devm_add_action_or_reset(dev->dev, ast_release_firmware, ast); +} + +static void send_ack(struct ast_private *ast) +{ + u8 sendack; + + sendack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0xff); + sendack |= 0x80; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0x00, sendack); +} + +static void send_nack(struct ast_private *ast) +{ + u8 sendack; + + sendack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0xff); + sendack &= ~0x80; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0x00, sendack); +} + +static bool wait_ack(struct ast_private *ast) +{ + u8 waitack; + u32 retry = 0; + + do { + waitack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd2, + 0xff); + waitack &= 0x80; + udelay(100); + } while ((!waitack) && (retry++ < 1000)); + + if (retry < 1000) + return true; + else + return false; +} + +static bool wait_nack(struct ast_private *ast) +{ + u8 waitack; + u32 retry = 0; + + do { + waitack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd2, + 0xff); + waitack &= 0x80; + udelay(100); + } while ((waitack) && (retry++ < 1000)); + + if (retry < 1000) + return true; + else + return false; +} + +static void set_cmd_trigger(struct ast_private *ast) +{ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, ~0x40, 0x40); +} + +static void clear_cmd_trigger(struct ast_private *ast) +{ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, ~0x40, 0x00); +} + +static bool ast_write_cmd(struct drm_device *dev, u8 data) +{ + struct ast_private *ast = to_ast_private(dev); + int retry = 0; + + if (wait_nack(ast)) { + send_nack(ast); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9a, 0x00, data); + send_ack(ast); + set_cmd_trigger(ast); + do { + if (wait_ack(ast)) { + clear_cmd_trigger(ast); + send_nack(ast); + return true; + } + } while (retry++ < 100); + } + clear_cmd_trigger(ast); + send_nack(ast); + return false; +} + +static bool ast_write_data(struct drm_device *dev, u8 data) +{ + struct ast_private *ast = to_ast_private(dev); + + if (wait_nack(ast)) { + send_nack(ast); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9a, 0x00, data); + send_ack(ast); + if (wait_ack(ast)) { + send_nack(ast); + return true; + } + } + send_nack(ast); + return false; +} + +void ast_set_dp501_video_output(struct drm_device *dev, u8 mode) +{ + ast_write_cmd(dev, 0x40); + ast_write_data(dev, mode); + + /* + * msleep < 20ms can sleep for up to 20ms; + * see Documentation/timers/timers-howto.rst + */ + msleep(20); +} + +static u32 get_fw_base(struct ast_private *ast) +{ + return ast_mindwm(ast, 0x1e6e2104) & 0x7fffffff; +} + +bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size) +{ + struct ast_private *ast = to_ast_private(dev); + u32 i, data; + u32 boot_address; + + if (ast->config_mode != ast_use_p2a) + return false; + + data = ast_mindwm(ast, 0x1e6e2100) & 0x01; + if (data) { + boot_address = get_fw_base(ast); + for (i = 0; i < size; i += 4) + *(u32 *)(addr + i) = ast_mindwm(ast, boot_address + i); + return true; + } + return false; +} + +static bool ast_launch_m68k(struct drm_device *dev) +{ + struct ast_private *ast = to_ast_private(dev); + u32 i, data, len = 0; + u32 boot_address; + u8 *fw_addr = NULL; + u8 jreg; + + if (ast->config_mode != ast_use_p2a) + return false; + + data = ast_mindwm(ast, 0x1e6e2100) & 0x01; + if (!data) { + if (ast->dp501_fw_addr) { + fw_addr = ast->dp501_fw_addr; + len = 32 * 1024; + } else { + if (!ast->dp501_fw && ast_load_dp501_microcode(dev) < 0) + return false; + + fw_addr = (u8 *)ast->dp501_fw->data; + len = ast->dp501_fw->size; + } + /* Get BootAddress */ + ast_moutdwm(ast, 0x1e6e2000, 0x1688a8a8); + data = ast_mindwm(ast, 0x1e6e0004); + switch (data & 0x03) { + case 0: + boot_address = 0x44000000; + break; + default: + case 1: + boot_address = 0x48000000; + break; + case 2: + boot_address = 0x50000000; + break; + case 3: + boot_address = 0x60000000; + break; + } + boot_address -= 0x200000; /* -2MB */ + + /* copy image to buffer */ + for (i = 0; i < len; i += 4) { + data = *(u32 *)(fw_addr + i); + ast_moutdwm(ast, boot_address + i, data); + } + + /* Init SCU */ + ast_moutdwm(ast, 0x1e6e2000, 0x1688a8a8); + + /* Launch FW */ + ast_moutdwm(ast, 0x1e6e2104, 0x80000000 + boot_address); + ast_moutdwm(ast, 0x1e6e2100, 1); + + /* Update Scratch */ + data = ast_mindwm(ast, 0x1e6e2040) & + 0xfffff1ff; /* D[11:9] = 100b: UEFI handling */ + data |= 0x800; + ast_moutdwm(ast, 0x1e6e2040, data); + + jreg = ast_get_index_reg_mask( + ast, AST_IO_CRTC_PORT, 0x99, + 0xfc); /* D[1:0]: Reserved Video Buffer */ + jreg |= 0x02; + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x99, jreg); + } + return true; +} + +bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata) +{ + struct ast_private *ast = to_ast_private(dev); + u32 i, boot_address, offset, data; + u32 *pEDIDidx; + + if (ast->config_mode == ast_use_p2a) { + boot_address = get_fw_base(ast); + + /* validate FW version */ + offset = AST_DP501_GBL_VERSION; + data = ast_mindwm(ast, boot_address + offset); + if ((data & AST_DP501_FW_VERSION_MASK) != + AST_DP501_FW_VERSION_1) + return false; + + /* validate PnP Monitor */ + offset = AST_DP501_PNPMONITOR; + data = ast_mindwm(ast, boot_address + offset); + if (!(data & AST_DP501_PNP_CONNECTED)) + return false; + + /* Read EDID */ + offset = AST_DP501_EDID_DATA; + for (i = 0; i < 128; i += 4) { + data = ast_mindwm(ast, boot_address + offset + i); + pEDIDidx = (u32 *)(ediddata + i); + *pEDIDidx = data; + } + } else { + if (!ast->dp501_fw_buf) + return false; + + /* dummy read */ + offset = 0x0000; + data = readl(ast->dp501_fw_buf + offset); + + /* validate FW version */ + offset = AST_DP501_GBL_VERSION; + data = readl(ast->dp501_fw_buf + offset); + if ((data & AST_DP501_FW_VERSION_MASK) != + AST_DP501_FW_VERSION_1) + return false; + + /* validate PnP Monitor */ + offset = AST_DP501_PNPMONITOR; + data = readl(ast->dp501_fw_buf + offset); + if (!(data & AST_DP501_PNP_CONNECTED)) + return false; + + /* Read EDID */ + offset = AST_DP501_EDID_DATA; + for (i = 0; i < 128; i += 4) { + data = readl(ast->dp501_fw_buf + offset + i); + pEDIDidx = (u32 *)(ediddata + i); + *pEDIDidx = data; + } + } + + return true; +} + +static bool ast_init_dvo(struct drm_device *dev) +{ + struct ast_private *ast = to_ast_private(dev); + u8 jreg; + u32 data; + + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + ast_write32(ast, 0x12000, 0x1688a8a8); + + jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + if (!(jreg & 0x80)) { + /* Init SCU DVO Settings */ + data = ast_read32(ast, 0x12008); + /* delay phase */ + data &= 0xfffff8ff; + data |= 0x00000500; + ast_write32(ast, 0x12008, data); + + if (ast->chip == AST2300) { + data = ast_read32(ast, 0x12084); + /* multi-pins for DVO single-edge */ + data |= 0xfffe0000; + ast_write32(ast, 0x12084, data); + + data = ast_read32(ast, 0x12088); + /* multi-pins for DVO single-edge */ + data |= 0x000fffff; + ast_write32(ast, 0x12088, data); + + data = ast_read32(ast, 0x12090); + /* multi-pins for DVO single-edge */ + data &= 0xffffffcf; + data |= 0x00000020; + ast_write32(ast, 0x12090, data); + } else { /* AST2400 */ + data = ast_read32(ast, 0x12088); + /* multi-pins for DVO single-edge */ + data |= 0x30000000; + ast_write32(ast, 0x12088, data); + + data = ast_read32(ast, 0x1208c); + /* multi-pins for DVO single-edge */ + data |= 0x000000cf; + ast_write32(ast, 0x1208c, data); + + data = ast_read32(ast, 0x120a4); + /* multi-pins for DVO single-edge */ + data |= 0xffff0000; + ast_write32(ast, 0x120a4, data); + + data = ast_read32(ast, 0x120a8); + /* multi-pins for DVO single-edge */ + data |= 0x0000000f; + ast_write32(ast, 0x120a8, data); + + data = ast_read32(ast, 0x12094); + /* multi-pins for DVO single-edge */ + data |= 0x00000002; + ast_write32(ast, 0x12094, data); + } + } + + /* Force to DVO */ + data = ast_read32(ast, 0x1202c); + data &= 0xfffbffff; + ast_write32(ast, 0x1202c, data); + + /* Init VGA DVO Settings */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); + return true; +} + +static void ast_init_analog(struct drm_device *dev) +{ + struct ast_private *ast = to_ast_private(dev); + u32 data; + + /* + * Set DAC source to VGA mode in SCU2C via the P2A + * bridge. First configure the P2U to target the SCU + * in case it isn't at this stage. + */ + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + + /* Then unlock the SCU with the magic password */ + ast_write32(ast, 0x12000, 0x1688a8a8); + ast_write32(ast, 0x12000, 0x1688a8a8); + ast_write32(ast, 0x12000, 0x1688a8a8); + + /* Finally, clear bits [17:16] of SCU2c */ + data = ast_read32(ast, 0x1202c); + data &= 0xfffcffff; + ast_write32(ast, 0, data); + + /* Disable DVO */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x00); +} + +void ast_init_3rdtx(struct drm_device *dev) +{ + struct ast_private *ast = to_ast_private(dev); + u8 jreg; + + if (ast->chip == AST2300 || ast->chip == AST2400) { + jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, + 0xff); + switch (jreg & 0x0e) { + case 0x04: + ast_init_dvo(dev); + break; + case 0x08: + ast_launch_m68k(dev); + break; + case 0x0c: + ast_init_dvo(dev); + break; + default: + if (ast->tx_chip_types & BIT(AST_TX_SIL164)) + ast_init_dvo(dev); + else + ast_init_analog(dev); + } + } +} diff --git a/drivers/gpu/drm/ast_loongson/ast_dram_tables.h b/drivers/gpu/drm/ast_loongson/ast_dram_tables.h new file mode 100644 index 000000000000..114b1de15c1e --- /dev/null +++ b/drivers/gpu/drm/ast_loongson/ast_dram_tables.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef AST_DRAM_TABLES_H +#define AST_DRAM_TABLES_H + +/* DRAM timing tables */ +struct ast_dramstruct { + u16 index; + u32 data; +}; + +static const struct ast_dramstruct ast2000_dram_table_data[] = { + { 0x0108, 0x00000000 }, { 0x0120, 0x00004a21 }, { 0xFF00, 0x00000043 }, + { 0x0000, 0xFFFFFFFF }, { 0x0004, 0x00000089 }, { 0x0008, 0x22331353 }, + { 0x000C, 0x0d07000b }, { 0x0010, 0x11113333 }, { 0x0020, 0x00110350 }, + { 0x0028, 0x1e0828f0 }, { 0x0024, 0x00000001 }, { 0x001C, 0x00000000 }, + { 0x0014, 0x00000003 }, { 0xFF00, 0x00000043 }, { 0x0018, 0x00000131 }, + { 0x0014, 0x00000001 }, { 0xFF00, 0x00000043 }, { 0x0018, 0x00000031 }, + { 0x0014, 0x00000001 }, { 0xFF00, 0x00000043 }, { 0x0028, 0x1e0828f1 }, + { 0x0024, 0x00000003 }, { 0x002C, 0x1f0f28fb }, { 0x0030, 0xFFFFFE01 }, + { 0xFFFF, 0xFFFFFFFF } +}; + +static const struct ast_dramstruct ast1100_dram_table_data[] = { + { 0x2000, 0x1688a8a8 }, { 0x2020, 0x000041f0 }, { 0xFF00, 0x00000043 }, + { 0x0000, 0xfc600309 }, { 0x006C, 0x00909090 }, { 0x0064, 0x00050000 }, + { 0x0004, 0x00000585 }, { 0x0008, 0x0011030f }, { 0x0010, 0x22201724 }, + { 0x0018, 0x1e29011a }, { 0x0020, 0x00c82222 }, { 0x0014, 0x01001523 }, + { 0x001C, 0x1024010d }, { 0x0024, 0x00cb2522 }, { 0x0038, 0xffffff82 }, + { 0x003C, 0x00000000 }, { 0x0040, 0x00000000 }, { 0x0044, 0x00000000 }, + { 0x0048, 0x00000000 }, { 0x004C, 0x00000000 }, { 0x0050, 0x00000000 }, + { 0x0054, 0x00000000 }, { 0x0058, 0x00000000 }, { 0x005C, 0x00000000 }, + { 0x0060, 0x032aa02a }, { 0x0064, 0x002d3000 }, { 0x0068, 0x00000000 }, + { 0x0070, 0x00000000 }, { 0x0074, 0x00000000 }, { 0x0078, 0x00000000 }, + { 0x007C, 0x00000000 }, { 0x0034, 0x00000001 }, { 0xFF00, 0x00000043 }, + { 0x002C, 0x00000732 }, { 0x0030, 0x00000040 }, { 0x0028, 0x00000005 }, + { 0x0028, 0x00000007 }, { 0x0028, 0x00000003 }, { 0x0028, 0x00000001 }, + { 0x000C, 0x00005a08 }, { 0x002C, 0x00000632 }, { 0x0028, 0x00000001 }, + { 0x0030, 0x000003c0 }, { 0x0028, 0x00000003 }, { 0x0030, 0x00000040 }, + { 0x0028, 0x00000003 }, { 0x000C, 0x00005a21 }, { 0x0034, 0x00007c03 }, + { 0x0120, 0x00004c41 }, { 0xffff, 0xffffffff }, +}; + +static const struct ast_dramstruct ast2100_dram_table_data[] = { + { 0x2000, 0x1688a8a8 }, { 0x2020, 0x00004120 }, { 0xFF00, 0x00000043 }, + { 0x0000, 0xfc600309 }, { 0x006C, 0x00909090 }, { 0x0064, 0x00070000 }, + { 0x0004, 0x00000489 }, { 0x0008, 0x0011030f }, { 0x0010, 0x32302926 }, + { 0x0018, 0x274c0122 }, { 0x0020, 0x00ce2222 }, { 0x0014, 0x01001523 }, + { 0x001C, 0x1024010d }, { 0x0024, 0x00cb2522 }, { 0x0038, 0xffffff82 }, + { 0x003C, 0x00000000 }, { 0x0040, 0x00000000 }, { 0x0044, 0x00000000 }, + { 0x0048, 0x00000000 }, { 0x004C, 0x00000000 }, { 0x0050, 0x00000000 }, + { 0x0054, 0x00000000 }, { 0x0058, 0x00000000 }, { 0x005C, 0x00000000 }, + { 0x0060, 0x0f2aa02a }, { 0x0064, 0x003f3005 }, { 0x0068, 0x02020202 }, + { 0x0070, 0x00000000 }, { 0x0074, 0x00000000 }, { 0x0078, 0x00000000 }, + { 0x007C, 0x00000000 }, { 0x0034, 0x00000001 }, { 0xFF00, 0x00000043 }, + { 0x002C, 0x00000942 }, { 0x0030, 0x00000040 }, { 0x0028, 0x00000005 }, + { 0x0028, 0x00000007 }, { 0x0028, 0x00000003 }, { 0x0028, 0x00000001 }, + { 0x000C, 0x00005a08 }, { 0x002C, 0x00000842 }, { 0x0028, 0x00000001 }, + { 0x0030, 0x000003c0 }, { 0x0028, 0x00000003 }, { 0x0030, 0x00000040 }, + { 0x0028, 0x00000003 }, { 0x000C, 0x00005a21 }, { 0x0034, 0x00007c03 }, + { 0x0120, 0x00005061 }, { 0xffff, 0xffffffff }, +}; + +/* + * AST2500 DRAM settings modules + */ +#define REGTBL_NUM 17 +#define REGIDX_010 0 +#define REGIDX_014 1 +#define REGIDX_018 2 +#define REGIDX_020 3 +#define REGIDX_024 4 +#define REGIDX_02C 5 +#define REGIDX_030 6 +#define REGIDX_214 7 +#define REGIDX_2E0 8 +#define REGIDX_2E4 9 +#define REGIDX_2E8 10 +#define REGIDX_2EC 11 +#define REGIDX_2F0 12 +#define REGIDX_2F4 13 +#define REGIDX_2F8 14 +#define REGIDX_RFC 15 +#define REGIDX_PLL 16 + +static const u32 ast2500_ddr3_1600_timing_table[REGTBL_NUM] = { + 0x64604D38, /* 0x010 */ + 0x29690599, /* 0x014 */ + 0x00000300, /* 0x018 */ + 0x00000000, /* 0x020 */ + 0x00000000, /* 0x024 */ + 0x02181E70, /* 0x02C */ + 0x00000040, /* 0x030 */ + 0x00000024, /* 0x214 */ + 0x02001300, /* 0x2E0 */ + 0x0E0000A0, /* 0x2E4 */ + 0x000E001B, /* 0x2E8 */ + 0x35B8C105, /* 0x2EC */ + 0x08090408, /* 0x2F0 */ + 0x9B000800, /* 0x2F4 */ + 0x0E400A00, /* 0x2F8 */ + 0x9971452F, /* tRFC */ + 0x000071C1 /* PLL */ +}; + +static const u32 ast2500_ddr4_1600_timing_table[REGTBL_NUM] = { + 0x63604E37, /* 0x010 */ + 0xE97AFA99, /* 0x014 */ + 0x00019000, /* 0x018 */ + 0x08000000, /* 0x020 */ + 0x00000400, /* 0x024 */ + 0x00000410, /* 0x02C */ + 0x00000101, /* 0x030 */ + 0x00000024, /* 0x214 */ + 0x03002900, /* 0x2E0 */ + 0x0E0000A0, /* 0x2E4 */ + 0x000E001C, /* 0x2E8 */ + 0x35B8C106, /* 0x2EC */ + 0x08080607, /* 0x2F0 */ + 0x9B000900, /* 0x2F4 */ + 0x0E400A00, /* 0x2F8 */ + 0x99714545, /* tRFC */ + 0x000071C1 /* PLL */ +}; + +#endif diff --git a/drivers/gpu/drm/ast_loongson/ast_drv.c b/drivers/gpu/drm/ast_loongson/ast_drv.c new file mode 100644 index 000000000000..2e069fe97939 --- /dev/null +++ b/drivers/gpu/drm/ast_loongson/ast_drv.c @@ -0,0 +1,231 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ +/* + * Authors: Dave Airlie + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include "ast_drv.h" + +static int ast_modeset = -1; + +MODULE_PARM_DESC(modeset, "Disable/Enable modesetting"); +module_param_named(modeset, ast_modeset, int, 0400); + +/* + * DRM driver + */ + +DEFINE_DRM_GEM_FOPS(ast_fops); + +static const struct drm_driver ast_driver = { .driver_features = DRIVER_ATOMIC | + DRIVER_GEM | + DRIVER_MODESET, + + .fops = &ast_fops, + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, + + DRM_GEM_VRAM_DRIVER }; + +/* + * PCI driver + */ + +#define PCI_VENDOR_ASPEED 0x1a03 + +#define AST_VGA_DEVICE(id, info) \ + { .class = PCI_BASE_CLASS_DISPLAY << 16, \ + .class_mask = 0xff0000, \ + .vendor = PCI_VENDOR_ASPEED, \ + .device = id, \ + .subvendor = PCI_ANY_ID, \ + .subdevice = PCI_ANY_ID, \ + .driver_data = (unsigned long)info } + +static const struct pci_device_id ast_pciidlist[] = { + AST_VGA_DEVICE(PCI_CHIP_AST2000, NULL), + AST_VGA_DEVICE(PCI_CHIP_AST2100, NULL), + { 0, 0, 0 }, +}; + +MODULE_DEVICE_TABLE(pci, ast_pciidlist); + +static int ast_remove_conflicting_framebuffers(struct pci_dev *pdev) +{ + resource_size_t base, size; + + base = pci_resource_start(pdev, 0); + size = pci_resource_len(pdev, 0); + + return drm_aperture_remove_conflicting_framebuffers(base, size, + &ast_driver); +} + +static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct ast_private *ast; + struct drm_device *dev; + int ret; + + ret = ast_remove_conflicting_framebuffers(pdev); + if (ret) + return ret; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + ast = ast_device_create(&ast_driver, pdev, ent->driver_data); + if (IS_ERR(ast)) + return PTR_ERR(ast); + dev = &ast->base; + + ret = drm_dev_register(dev, ent->driver_data); + if (ret) + return ret; + + drm_fbdev_generic_setup(dev, 32); + + return 0; +} + +static void ast_pci_remove(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + + drm_dev_unregister(dev); + drm_atomic_helper_shutdown(dev); +} + +static int ast_drm_freeze(struct drm_device *dev) +{ + int error; + + error = drm_mode_config_helper_suspend(dev); + if (error) + return error; + pci_save_state(to_pci_dev(dev->dev)); + return 0; +} + +static int ast_drm_thaw(struct drm_device *dev) +{ + ast_post_gpu(dev); + + return drm_mode_config_helper_resume(dev); +} + +static int ast_drm_resume(struct drm_device *dev) +{ + if (pci_enable_device(to_pci_dev(dev->dev))) + return -EIO; + + return ast_drm_thaw(dev); +} + +static int ast_pm_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *ddev = pci_get_drvdata(pdev); + int error; + + error = ast_drm_freeze(ddev); + if (error) + return error; + + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); + return 0; +} + +static int ast_pm_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *ddev = pci_get_drvdata(pdev); + + return ast_drm_resume(ddev); +} + +static int ast_pm_freeze(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *ddev = pci_get_drvdata(pdev); + + return ast_drm_freeze(ddev); +} + +static int ast_pm_thaw(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *ddev = pci_get_drvdata(pdev); + + return ast_drm_thaw(ddev); +} + +static int ast_pm_poweroff(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *ddev = pci_get_drvdata(pdev); + + return ast_drm_freeze(ddev); +} + +static const struct dev_pm_ops ast_pm_ops = { + .suspend = ast_pm_suspend, + .resume = ast_pm_resume, + .freeze = ast_pm_freeze, + .thaw = ast_pm_thaw, + .poweroff = ast_pm_poweroff, + .restore = ast_pm_resume, +}; + +static struct pci_driver ast_pci_driver = { + .name = DRIVER_NAME, + .id_table = ast_pciidlist, + .probe = ast_pci_probe, + .remove = ast_pci_remove, + .driver.pm = &ast_pm_ops, +}; + +drm_module_pci_driver_if_modeset(ast_pci_driver, ast_modeset); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/ast_loongson/ast_drv.h b/drivers/gpu/drm/ast_loongson/ast_drv.h new file mode 100644 index 000000000000..29a2965080ef --- /dev/null +++ b/drivers/gpu/drm/ast_loongson/ast_drv.h @@ -0,0 +1,528 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ +/* + * Authors: Dave Airlie + */ +#ifndef __AST_DRV_H__ +#define __AST_DRV_H__ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#define DRIVER_AUTHOR "Dave Airlie" + +#define DRIVER_NAME "ast" +#define DRIVER_DESC "AST" +#define DRIVER_DATE "20120228" + +#define DRIVER_MAJOR 0 +#define DRIVER_MINOR 1 +#define DRIVER_PATCHLEVEL 0 + +#define PCI_CHIP_AST2000 0x2000 +#define PCI_CHIP_AST2100 0x2010 + +enum ast_chip { + AST2000, + AST2100, + AST1100, + AST2200, + AST2150, + AST2300, + AST2400, + AST2500, + AST2600, +}; + +enum ast_tx_chip { + AST_TX_NONE, + AST_TX_SIL164, + AST_TX_DP501, + AST_TX_ASTDP, +}; + +#define AST_TX_NONE_BIT BIT(AST_TX_NONE) +#define AST_TX_SIL164_BIT BIT(AST_TX_SIL164) +#define AST_TX_DP501_BIT BIT(AST_TX_DP501) +#define AST_TX_ASTDP_BIT BIT(AST_TX_ASTDP) + +#define AST_DRAM_512Mx16 0 +#define AST_DRAM_1Gx16 1 +#define AST_DRAM_512Mx32 2 +#define AST_DRAM_1Gx32 3 +#define AST_DRAM_2Gx16 6 +#define AST_DRAM_4Gx16 7 +#define AST_DRAM_8Gx16 8 + +/* + * Hardware cursor + */ + +#define AST_MAX_HWC_WIDTH 64 +#define AST_MAX_HWC_HEIGHT 64 + +#define AST_HWC_SIZE (AST_MAX_HWC_WIDTH * AST_MAX_HWC_HEIGHT * 2) +#define AST_HWC_SIGNATURE_SIZE 32 + +/* define for signature structure */ +#define AST_HWC_SIGNATURE_CHECKSUM 0x00 +#define AST_HWC_SIGNATURE_SizeX 0x04 +#define AST_HWC_SIGNATURE_SizeY 0x08 +#define AST_HWC_SIGNATURE_X 0x0C +#define AST_HWC_SIGNATURE_Y 0x10 +#define AST_HWC_SIGNATURE_HOTSPOTX 0x14 +#define AST_HWC_SIGNATURE_HOTSPOTY 0x18 + +/* + * Planes + */ + +struct ast_plane { + struct drm_plane base; + + struct drm_gem_vram_object *gbo; + struct iosys_map map; + u64 off; +}; + +static inline struct ast_plane *to_ast_plane(struct drm_plane *plane) +{ + return container_of(plane, struct ast_plane, base); +} + +/* + * Connector with i2c channel + */ + +struct ast_i2c_chan { + struct i2c_adapter adapter; + struct drm_device *dev; + struct i2c_algo_bit_data bit; +}; + +struct ast_vga_connector { + struct drm_connector base; + struct ast_i2c_chan *i2c; +}; + +static inline struct ast_vga_connector * +to_ast_vga_connector(struct drm_connector *connector) +{ + return container_of(connector, struct ast_vga_connector, base); +} + +struct ast_sil164_connector { + struct drm_connector base; + struct ast_i2c_chan *i2c; +}; + +static inline struct ast_sil164_connector * +to_ast_sil164_connector(struct drm_connector *connector) +{ + return container_of(connector, struct ast_sil164_connector, base); +} + +/* + * Device + */ + +struct ast_private { + struct drm_device base; + + struct mutex ioregs_lock; /* Protects access to I/O registers in ioregs */ + void __iomem *regs; + void __iomem *ioregs; + void __iomem *dp501_fw_buf; + + enum ast_chip chip; + bool vga2_clone; + uint32_t dram_bus_width; + uint32_t dram_type; + uint32_t mclk; + + struct drm_plane primary_plane; + struct ast_plane cursor_plane; + struct drm_crtc crtc; + struct { + struct { + struct drm_encoder encoder; + struct ast_vga_connector vga_connector; + } vga; + struct { + struct drm_encoder encoder; + struct ast_sil164_connector sil164_connector; + } sil164; + struct { + struct drm_encoder encoder; + struct drm_connector connector; + } dp501; + struct { + struct drm_encoder encoder; + struct drm_connector connector; + } astdp; + } output; + + bool support_wide_screen; + enum { ast_use_p2a, ast_use_dt, ast_use_defaults } config_mode; + + unsigned long tx_chip_types; /* bitfield of enum ast_chip_type */ + u8 *dp501_fw_addr; + const struct firmware *dp501_fw; /* dp501 fw */ +}; + +static inline struct ast_private *to_ast_private(struct drm_device *dev) +{ + return container_of(dev, struct ast_private, base); +} + +struct ast_private *ast_device_create(const struct drm_driver *drv, + struct pci_dev *pdev, + unsigned long flags); + +#define AST_IO_AR_PORT_WRITE (0x40) +#define AST_IO_MISC_PORT_WRITE (0x42) +#define AST_IO_VGA_ENABLE_PORT (0x43) +#define AST_IO_SEQ_PORT (0x44) +#define AST_IO_DAC_INDEX_READ (0x47) +#define AST_IO_DAC_INDEX_WRITE (0x48) +#define AST_IO_DAC_DATA (0x49) +#define AST_IO_GR_PORT (0x4E) +#define AST_IO_CRTC_PORT (0x54) +#define AST_IO_INPUT_STATUS1_READ (0x5A) +#define AST_IO_MISC_PORT_READ (0x4C) + +#define AST_IO_MM_OFFSET (0x380) + +#define AST_IO_VGAIR1_VREFRESH BIT(3) + +#define AST_IO_VGACRCB_HWC_ENABLED BIT(1) +#define AST_IO_VGACRCB_HWC_16BPP \ + BIT(0) /* set: ARGB4444, cleared: 2bpp palette */ + +static inline u8 ast_read8(struct ast_private *ast, u32 reg) +{ + u8 val = 0; + + val = ioread8(ast->regs + reg); + return val; +} + +static inline u16 ast_read16(struct ast_private *ast, u32 reg) +{ + u16 val = 0; + + val = ioread16(ast->regs + reg); + return val; +} + +static inline u32 ast_read32(struct ast_private *ast, u32 reg) +{ + u32 val = 0; + + val = ioread32(ast->regs + reg); + return val; +} + +static inline u8 ast_io_read8(struct ast_private *ast, u32 reg) +{ + u8 val = 0; + + val = ioread8(ast->ioregs + reg); + return val; +} + +static inline u16 ast_io_read16(struct ast_private *ast, u32 reg) +{ + u16 val = 0; + + val = ioread16(ast->ioregs + reg); + return val; +} + +static inline u32 ast_io_read32(struct ast_private *ast, u32 reg) +{ + u32 val = 0; + + val = ioread32(ast->ioregs + reg); + return val; +} + +#define __ast_write(x) \ + static inline void ast_write##x(struct ast_private *ast, u32 reg, \ + u##x val) \ + { \ + iowrite##x(val, ast->regs + reg); \ + } + +__ast_write(8); +__ast_write(16); +__ast_write(32); + +#define __ast_io_write(x) \ + static inline void ast_io_write##x(struct ast_private *ast, u32 reg, \ + u##x val) \ + { \ + iowrite##x(val, ast->ioregs + reg); \ + } + +__ast_io_write(8); +__ast_io_write(16); +#undef __ast_io_write + +static inline void ast_set_index_reg(struct ast_private *ast, uint32_t base, + uint8_t index, uint8_t val) +{ + ast_io_write16(ast, base, ((u16)val << 8) | index); +} + +void ast_set_index_reg_mask(struct ast_private *ast, uint32_t base, + uint8_t index, uint8_t mask, uint8_t val); +uint8_t ast_get_index_reg(struct ast_private *ast, uint32_t base, + uint8_t index); +uint8_t ast_get_index_reg_mask(struct ast_private *ast, uint32_t base, + uint8_t index, uint8_t mask); + +static inline void ast_open_key(struct ast_private *ast) +{ + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x80, 0xA8); +} + +#define AST_VIDMEM_SIZE_8M 0x00800000 +#define AST_VIDMEM_SIZE_16M 0x01000000 +#define AST_VIDMEM_SIZE_32M 0x02000000 +#define AST_VIDMEM_SIZE_64M 0x04000000 +#define AST_VIDMEM_SIZE_128M 0x08000000 + +#define AST_VIDMEM_DEFAULT_SIZE AST_VIDMEM_SIZE_8M + +struct ast_vbios_stdtable { + u8 misc; + u8 seq[4]; + u8 crtc[25]; + u8 ar[20]; + u8 gr[9]; +}; + +struct ast_vbios_enhtable { + u32 ht; + u32 hde; + u32 hfp; + u32 hsync; + u32 vt; + u32 vde; + u32 vfp; + u32 vsync; + u32 dclk_index; + u32 flags; + u32 refresh_rate; + u32 refresh_rate_index; + u32 mode_id; +}; + +struct ast_vbios_dclk_info { + u8 param1; + u8 param2; + u8 param3; +}; + +struct ast_vbios_mode_info { + const struct ast_vbios_stdtable *std_table; + const struct ast_vbios_enhtable *enh_table; +}; + +struct ast_crtc_state { + struct drm_crtc_state base; + + /* Last known format of primary plane */ + const struct drm_format_info *format; + + struct ast_vbios_mode_info vbios_mode_info; +}; + +#define to_ast_crtc_state(state) \ + container_of(state, struct ast_crtc_state, base) + +int ast_mode_config_init(struct ast_private *ast); + +#define AST_MM_ALIGN_SHIFT 4 +#define AST_MM_ALIGN_MASK ((1 << AST_MM_ALIGN_SHIFT) - 1) + +#define AST_DP501_FW_VERSION_MASK GENMASK(7, 4) +#define AST_DP501_FW_VERSION_1 BIT(4) +#define AST_DP501_PNP_CONNECTED BIT(1) + +#define AST_DP501_DEFAULT_DCLK 65 + +#define AST_DP501_GBL_VERSION 0xf000 +#define AST_DP501_PNPMONITOR 0xf010 +#define AST_DP501_LINKRATE 0xf014 +#define AST_DP501_EDID_DATA 0xf020 + +/* Define for Soc scratched reg */ +#define COPROCESSOR_LAUNCH BIT(5) + +/* + * Display Transmitter Type: + */ +#define TX_TYPE_MASK GENMASK(3, 1) +#define NO_TX (0 << 1) +#define ITE66121_VBIOS_TX (1 << 1) +#define SI164_VBIOS_TX (2 << 1) +#define CH7003_VBIOS_TX (3 << 1) +#define DP501_VBIOS_TX (4 << 1) +#define ANX9807_VBIOS_TX (5 << 1) +#define TX_FW_EMBEDDED_FW_TX (6 << 1) +#define ASTDP_DPMCU_TX (7 << 1) + +#define AST_VRAM_INIT_STATUS_MASK GENMASK(7, 6) +//#define AST_VRAM_INIT_BY_BMC BIT(7) +//#define AST_VRAM_INIT_READY BIT(6) + +/* Define for Soc scratched reg used on ASTDP */ +#define AST_DP_PHY_SLEEP BIT(4) +#define AST_DP_VIDEO_ENABLE BIT(0) + +#define AST_DP_POWER_ON true +#define AST_DP_POWER_OFF false + +/* + * CRD1[b5]: DP MCU FW is executing + * CRDC[b0]: DP link success + * CRDF[b0]: DP HPD + * CRE5[b0]: Host reading EDID process is done + */ +#define ASTDP_MCU_FW_EXECUTING BIT(5) +#define ASTDP_LINK_SUCCESS BIT(0) +#define ASTDP_HPD BIT(0) +#define ASTDP_HOST_EDID_READ_DONE BIT(0) +#define ASTDP_HOST_EDID_READ_DONE_MASK GENMASK(0, 0) + +/* + * CRB8[b1]: Enable VSYNC off + * CRB8[b0]: Enable HSYNC off + */ +#define AST_DPMS_VSYNC_OFF BIT(1) +#define AST_DPMS_HSYNC_OFF BIT(0) + +/* + * CRDF[b4]: Mirror of AST_DP_VIDEO_ENABLE + * Precondition: A. ~AST_DP_PHY_SLEEP && + * B. DP_HPD && + * C. DP_LINK_SUCCESS + */ +#define ASTDP_MIRROR_VIDEO_ENABLE BIT(4) + +#define ASTDP_EDID_READ_POINTER_MASK GENMASK(7, 0) +#define ASTDP_EDID_VALID_FLAG_MASK GENMASK(0, 0) +#define ASTDP_EDID_READ_DATA_MASK GENMASK(7, 0) + +/* + * ASTDP setmode registers: + * CRE0[7:0]: MISC0 ((0x00: 18-bpp) or (0x20: 24-bpp) + * CRE1[7:0]: MISC1 (default: 0x00) + * CRE2[7:0]: video format index (0x00 ~ 0x20 or 0x40 ~ 0x50) + */ +#define ASTDP_MISC0_24bpp BIT(5) +#define ASTDP_MISC1 0 +#define ASTDP_AND_CLEAR_MASK 0x00 + +/* + * ASTDP resoultion table: + * EX: ASTDP_A_B_C: + * A: Resolution + * B: Refresh Rate + * C: Misc information, such as CVT, Reduce Blanked + */ +#define ASTDP_640x480_60 0x00 +#define ASTDP_640x480_72 0x01 +#define ASTDP_640x480_75 0x02 +#define ASTDP_640x480_85 0x03 +#define ASTDP_800x600_56 0x04 +#define ASTDP_800x600_60 0x05 +#define ASTDP_800x600_72 0x06 +#define ASTDP_800x600_75 0x07 +#define ASTDP_800x600_85 0x08 +#define ASTDP_1024x768_60 0x09 +#define ASTDP_1024x768_70 0x0A +#define ASTDP_1024x768_75 0x0B +#define ASTDP_1024x768_85 0x0C +#define ASTDP_1280x1024_60 0x0D +#define ASTDP_1280x1024_75 0x0E +#define ASTDP_1280x1024_85 0x0F +#define ASTDP_1600x1200_60 0x10 +#define ASTDP_320x240_60 0x11 +#define ASTDP_400x300_60 0x12 +#define ASTDP_512x384_60 0x13 +#define ASTDP_1920x1200_60 0x14 +#define ASTDP_1920x1080_60 0x15 +#define ASTDP_1280x800_60 0x16 +#define ASTDP_1280x800_60_RB 0x17 +#define ASTDP_1440x900_60 0x18 +#define ASTDP_1440x900_60_RB 0x19 +#define ASTDP_1680x1050_60 0x1A +#define ASTDP_1680x1050_60_RB 0x1B +#define ASTDP_1600x900_60 0x1C +#define ASTDP_1600x900_60_RB 0x1D +#define ASTDP_1366x768_60 0x1E +#define ASTDP_1152x864_75 0x1F + +int ast_mm_init(struct ast_private *ast); + +/* ast post */ +void ast_enable_vga(struct drm_device *dev); +void ast_enable_mmio(struct drm_device *dev); +bool ast_is_vga_enabled(struct drm_device *dev); +void ast_post_gpu(struct drm_device *dev); +u32 ast_mindwm(struct ast_private *ast, u32 r); +void ast_moutdwm(struct ast_private *ast, u32 r, u32 v); +void ast_patch_ahb_2500(struct ast_private *ast); +/* ast dp501 */ +void ast_set_dp501_video_output(struct drm_device *dev, u8 mode); +bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size); +bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata); +u8 ast_get_dp501_max_clk(struct drm_device *dev); +void ast_init_3rdtx(struct drm_device *dev); + +/* ast_i2c.c */ +struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev); + +/* aspeed DP */ +int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata); +void ast_dp_launch(struct drm_device *dev, u8 bPower); +void ast_dp_power_on_off(struct drm_device *dev, bool no); +void ast_dp_set_on_off(struct drm_device *dev, bool no); +void ast_dp_set_mode(struct drm_crtc *crtc, + struct ast_vbios_mode_info *vbios_mode); + +#endif diff --git a/drivers/gpu/drm/ast_loongson/ast_i2c.c b/drivers/gpu/drm/ast_loongson/ast_i2c.c new file mode 100644 index 000000000000..a3daabe3b6a6 --- /dev/null +++ b/drivers/gpu/drm/ast_loongson/ast_i2c.c @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: MIT +/* + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + */ + +#include +#include + +#include "ast_drv.h" + +static void ast_i2c_setsda(void *i2c_priv, int data) +{ + struct ast_i2c_chan *i2c = i2c_priv; + struct ast_private *ast = to_ast_private(i2c->dev); + int i; + u8 ujcrb7, jtemp; + + for (i = 0; i < 0x10000; i++) { + ujcrb7 = ((data & 0x01) ? 0 : 1) << 2; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf1, + ujcrb7); + jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, + 0x04); + if (ujcrb7 == jtemp) + break; + } +} + +static void ast_i2c_setscl(void *i2c_priv, int clock) +{ + struct ast_i2c_chan *i2c = i2c_priv; + struct ast_private *ast = to_ast_private(i2c->dev); + int i; + u8 ujcrb7, jtemp; + + for (i = 0; i < 0x10000; i++) { + ujcrb7 = ((clock & 0x01) ? 0 : 1); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf4, + ujcrb7); + jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, + 0x01); + if (ujcrb7 == jtemp) + break; + } +} + +static int ast_i2c_getsda(void *i2c_priv) +{ + struct ast_i2c_chan *i2c = i2c_priv; + struct ast_private *ast = to_ast_private(i2c->dev); + uint32_t val, val2, count, pass; + + count = 0; + pass = 0; + val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & + 0x01; + do { + val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, + 0x20) >> + 5) & + 0x01; + if (val == val2) { + pass++; + } else { + pass = 0; + val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, + 0xb7, 0x20) >> + 5) & + 0x01; + } + } while ((pass < 5) && (count++ < 0x10000)); + + return val & 1 ? 1 : 0; +} + +static int ast_i2c_getscl(void *i2c_priv) +{ + struct ast_i2c_chan *i2c = i2c_priv; + struct ast_private *ast = to_ast_private(i2c->dev); + uint32_t val, val2, count, pass; + + count = 0; + pass = 0; + val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & + 0x01; + do { + val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, + 0x10) >> + 4) & + 0x01; + if (val == val2) { + pass++; + } else { + pass = 0; + val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, + 0xb7, 0x10) >> + 4) & + 0x01; + } + } while ((pass < 5) && (count++ < 0x10000)); + + return val & 1 ? 1 : 0; +} + +static void ast_i2c_release(struct drm_device *dev, void *res) +{ + struct ast_i2c_chan *i2c = res; + + i2c_del_adapter(&i2c->adapter); + kfree(i2c); +} + +struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev) +{ + struct ast_i2c_chan *i2c; + int ret; + + i2c = kzalloc(sizeof(struct ast_i2c_chan), GFP_KERNEL); + if (!i2c) + return NULL; + + i2c->adapter.owner = THIS_MODULE; + i2c->adapter.class = I2C_CLASS_DDC; + i2c->adapter.dev.parent = dev->dev; + i2c->dev = dev; + i2c_set_adapdata(&i2c->adapter, i2c); + snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), + "AST i2c bit bus"); + i2c->adapter.algo_data = &i2c->bit; + + i2c->bit.udelay = 20; + i2c->bit.timeout = 2; + i2c->bit.data = i2c; + i2c->bit.setsda = ast_i2c_setsda; + i2c->bit.setscl = ast_i2c_setscl; + i2c->bit.getsda = ast_i2c_getsda; + i2c->bit.getscl = ast_i2c_getscl; + ret = i2c_bit_add_bus(&i2c->adapter); + if (ret) { + drm_err(dev, "Failed to register bit i2c\n"); + goto out_kfree; + } + + ret = drmm_add_action_or_reset(dev, ast_i2c_release, i2c); + if (ret) + return NULL; + return i2c; + +out_kfree: + kfree(i2c); + return NULL; +} diff --git a/drivers/gpu/drm/ast_loongson/ast_main.c b/drivers/gpu/drm/ast_loongson/ast_main.c new file mode 100644 index 000000000000..ab6195b61b95 --- /dev/null +++ b/drivers/gpu/drm/ast_loongson/ast_main.c @@ -0,0 +1,486 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ +/* + * Authors: Dave Airlie + */ + +#include + +#include +#include +#include +#include +#include +#include + +#include "ast_drv.h" + +void ast_set_index_reg_mask(struct ast_private *ast, uint32_t base, + uint8_t index, uint8_t mask, uint8_t val) +{ + u8 tmp; + + ast_io_write8(ast, base, index); + tmp = (ast_io_read8(ast, base + 1) & mask) | val; + ast_set_index_reg(ast, base, index, tmp); +} + +uint8_t ast_get_index_reg(struct ast_private *ast, uint32_t base, uint8_t index) +{ + uint8_t ret; + + ast_io_write8(ast, base, index); + ret = ast_io_read8(ast, base + 1); + return ret; +} + +uint8_t ast_get_index_reg_mask(struct ast_private *ast, uint32_t base, + uint8_t index, uint8_t mask) +{ + uint8_t ret; + + ast_io_write8(ast, base, index); + ret = ast_io_read8(ast, base + 1) & mask; + return ret; +} + +static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev) +{ + struct device_node *np = dev->dev->of_node; + struct ast_private *ast = to_ast_private(dev); + struct pci_dev *pdev = to_pci_dev(dev->dev); + uint32_t data, jregd0, jregd1; + + /* Defaults */ + ast->config_mode = ast_use_defaults; + *scu_rev = 0xffffffff; + + /* Check if we have device-tree properties */ + if (np && + !of_property_read_u32(np, "aspeed,scu-revision-id", scu_rev)) { + /* We do, disable P2A access */ + ast->config_mode = ast_use_dt; + drm_info(dev, "Using device-tree for configuration\n"); + return; + } + + /* Not all families have a P2A bridge */ + if (pdev->device != PCI_CHIP_AST2000) + return; + + /* + * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge + * is disabled. We force using P2A if VGA only mode bit + * is set D[7] + */ + jregd0 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + jregd1 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff); + if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) { + /* Patch AST2500 */ + if (((pdev->revision & 0xF0) == 0x40) && + ((jregd0 & AST_VRAM_INIT_STATUS_MASK) == 0)) + ast_patch_ahb_2500(ast); + + /* Double check it's actually working */ + data = ast_read32(ast, 0xf004); + if ((data != 0xFFFFFFFF) && (data != 0x00)) { + /* P2A works, grab silicon revision */ + ast->config_mode = ast_use_p2a; + + drm_info(dev, "Using P2A bridge for configuration\n"); + + /* Read SCU7c (silicon revision register) */ + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + *scu_rev = ast_read32(ast, 0x1207c); + return; + } + } + + /* We have a P2A bridge but it's disabled */ + drm_info(dev, "P2A bridge disabled, using default configuration\n"); +} + +static int ast_detect_chip(struct drm_device *dev, bool *need_post) +{ + struct ast_private *ast = to_ast_private(dev); + struct pci_dev *pdev = to_pci_dev(dev->dev); + uint32_t jreg, scu_rev; + + /* + * If VGA isn't enabled, we need to enable now or subsequent + * access to the scratch registers will fail. We also inform + * our caller that it needs to POST the chip + * (Assumption: VGA not enabled -> need to POST) + */ + if (!ast_is_vga_enabled(dev)) { + ast_enable_vga(dev); + drm_info(dev, + "VGA not enabled on entry, requesting chip POST\n"); + *need_post = true; + } else + *need_post = false; + + /* Enable extended register access */ + ast_open_key(ast); + ast_enable_mmio(dev); + + /* Find out whether P2A works or whether to use device-tree */ + ast_detect_config_mode(dev, &scu_rev); + + /* Identify chipset */ + if (pdev->revision >= 0x50) { + ast->chip = AST2600; + drm_info(dev, "AST 2600 detected\n"); + } else if (pdev->revision >= 0x40) { + ast->chip = AST2500; + drm_info(dev, "AST 2500 detected\n"); + } else if (pdev->revision >= 0x30) { + ast->chip = AST2400; + drm_info(dev, "AST 2400 detected\n"); + } else if (pdev->revision >= 0x20) { + ast->chip = AST2300; + drm_info(dev, "AST 2300 detected\n"); + } else if (pdev->revision >= 0x10) { + switch (scu_rev & 0x0300) { + case 0x0200: + ast->chip = AST1100; + drm_info(dev, "AST 1100 detected\n"); + break; + case 0x0100: + ast->chip = AST2200; + drm_info(dev, "AST 2200 detected\n"); + break; + case 0x0000: + ast->chip = AST2150; + drm_info(dev, "AST 2150 detected\n"); + break; + default: + ast->chip = AST2100; + drm_info(dev, "AST 2100 detected\n"); + break; + } + ast->vga2_clone = false; + } else { + ast->chip = AST2000; + drm_info(dev, "AST 2000 detected\n"); + } + + /* Check if we support wide screen */ + switch (ast->chip) { + case AST2000: + ast->support_wide_screen = false; + break; + default: + jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, + 0xff); + if (!(jreg & 0x80)) + ast->support_wide_screen = true; + else if (jreg & 0x01) + ast->support_wide_screen = true; + else { + ast->support_wide_screen = false; + if (ast->chip == AST2300 && + (scu_rev & 0x300) == 0x0) /* ast1300 */ + ast->support_wide_screen = true; + if (ast->chip == AST2400 && + (scu_rev & 0x300) == 0x100) /* ast1400 */ + ast->support_wide_screen = true; + if (ast->chip == AST2500 && + scu_rev == 0x100) /* ast2510 */ + ast->support_wide_screen = true; + if (ast->chip == AST2600) /* ast2600 */ + ast->support_wide_screen = true; + } + break; + } + + /* Check 3rd Tx option (digital output afaik) */ + ast->tx_chip_types |= AST_TX_NONE_BIT; + + /* + * VGACRA3 Enhanced Color Mode Register, check if DVO is already + * enabled, in that case, assume we have a SIL164 TMDS transmitter + * + * Don't make that assumption if we the chip wasn't enabled and + * is at power-on reset, otherwise we'll incorrectly "detect" a + * SIL164 when there is none. + */ + if (!*need_post) { + jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, + 0xff); + if (jreg & 0x80) + ast->tx_chip_types = AST_TX_SIL164_BIT; + } + + if ((ast->chip == AST2300) || (ast->chip == AST2400) || + (ast->chip == AST2500)) { + /* + * On AST2300 and 2400, look the configuration set by the SoC in + * the SOC scratch register #1 bits 11:8 (interestingly marked + * as "reserved" in the spec) + */ + jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, + 0xff); + switch (jreg) { + case 0x04: + ast->tx_chip_types = AST_TX_SIL164_BIT; + break; + case 0x08: + ast->dp501_fw_addr = + drmm_kzalloc(dev, 32 * 1024, GFP_KERNEL); + if (ast->dp501_fw_addr) { + /* backup firmware */ + if (ast_backup_fw(dev, ast->dp501_fw_addr, + 32 * 1024)) { + drmm_kfree(dev, ast->dp501_fw_addr); + ast->dp501_fw_addr = NULL; + } + } + fallthrough; + case 0x0c: + ast->tx_chip_types = AST_TX_DP501_BIT; + } + } else if (ast->chip == AST2600) + ast_dp_launch(&ast->base, 0); + + /* Print stuff for diagnostic purposes */ + if (ast->tx_chip_types & AST_TX_NONE_BIT) + drm_info(dev, "Using analog VGA\n"); + if (ast->tx_chip_types & AST_TX_SIL164_BIT) + drm_info(dev, "Using Sil164 TMDS transmitter\n"); + if (ast->tx_chip_types & AST_TX_DP501_BIT) + drm_info(dev, "Using DP501 DisplayPort transmitter\n"); + + return 0; +} + +static int ast_get_dram_info(struct drm_device *dev) +{ + struct device_node *np = dev->dev->of_node; + struct ast_private *ast = to_ast_private(dev); + uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap; + uint32_t denum, num, div, ref_pll, dsel; + + switch (ast->config_mode) { + case ast_use_dt: + /* + * If some properties are missing, use reasonable + * defaults for AST2400 + */ + if (of_property_read_u32(np, "aspeed,mcr-configuration", + &mcr_cfg)) + mcr_cfg = 0x00000577; + if (of_property_read_u32(np, "aspeed,mcr-scu-mpll", + &mcr_scu_mpll)) + mcr_scu_mpll = 0x000050C0; + if (of_property_read_u32(np, "aspeed,mcr-scu-strap", + &mcr_scu_strap)) + mcr_scu_strap = 0; + break; + case ast_use_p2a: + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + mcr_cfg = ast_read32(ast, 0x10004); + mcr_scu_mpll = ast_read32(ast, 0x10120); + mcr_scu_strap = ast_read32(ast, 0x10170); + break; + case ast_use_defaults: + default: + ast->dram_bus_width = 16; + ast->dram_type = AST_DRAM_1Gx16; + if (ast->chip == AST2500) + ast->mclk = 800; + else + ast->mclk = 396; + return 0; + } + + if (mcr_cfg & 0x40) + ast->dram_bus_width = 16; + else + ast->dram_bus_width = 32; + + if (ast->chip == AST2500) { + switch (mcr_cfg & 0x03) { + case 0: + ast->dram_type = AST_DRAM_1Gx16; + break; + default: + case 1: + ast->dram_type = AST_DRAM_2Gx16; + break; + case 2: + ast->dram_type = AST_DRAM_4Gx16; + break; + case 3: + ast->dram_type = AST_DRAM_8Gx16; + break; + } + } else if (ast->chip == AST2300 || ast->chip == AST2400) { + switch (mcr_cfg & 0x03) { + case 0: + ast->dram_type = AST_DRAM_512Mx16; + break; + default: + case 1: + ast->dram_type = AST_DRAM_1Gx16; + break; + case 2: + ast->dram_type = AST_DRAM_2Gx16; + break; + case 3: + ast->dram_type = AST_DRAM_4Gx16; + break; + } + } else { + switch (mcr_cfg & 0x0c) { + case 0: + case 4: + ast->dram_type = AST_DRAM_512Mx16; + break; + case 8: + if (mcr_cfg & 0x40) + ast->dram_type = AST_DRAM_1Gx16; + else + ast->dram_type = AST_DRAM_512Mx32; + break; + case 0xc: + ast->dram_type = AST_DRAM_1Gx32; + break; + } + } + + if (mcr_scu_strap & 0x2000) + ref_pll = 14318; + else + ref_pll = 12000; + + denum = mcr_scu_mpll & 0x1f; + num = (mcr_scu_mpll & 0x3fe0) >> 5; + dsel = (mcr_scu_mpll & 0xc000) >> 14; + switch (dsel) { + case 3: + div = 0x4; + break; + case 2: + case 1: + div = 0x2; + break; + default: + div = 0x1; + break; + } + ast->mclk = ref_pll * (num + 2) / ((denum + 2) * (div * 1000)); + return 0; +} + +/* + * Run this function as part of the HW device cleanup; not + * when the DRM device gets released. + */ +static void ast_device_release(void *data) +{ + struct ast_private *ast = data; + + /* enable standard VGA decode */ + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04); +} + +struct ast_private *ast_device_create(const struct drm_driver *drv, + struct pci_dev *pdev, unsigned long flags) +{ + struct drm_device *dev; + struct ast_private *ast; + bool need_post; + int ret = 0; + + ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_private, base); + if (IS_ERR(ast)) + return ast; + dev = &ast->base; + + pci_set_drvdata(pdev, dev); + + ret = drmm_mutex_init(dev, &ast->ioregs_lock); + if (ret) + return ERR_PTR(ret); + + ast->regs = pcim_iomap(pdev, 1, 0); + if (!ast->regs) + return ERR_PTR(-EIO); + + /* + * If we don't have IO space at all, use MMIO now and + * assume the chip has MMIO enabled by default (rev 0x20 + * and higher). + */ + if (!(pci_resource_flags(pdev, 2) & IORESOURCE_IO)) { + drm_info(dev, "platform has no IO space, trying MMIO\n"); + ast->ioregs = ast->regs + AST_IO_MM_OFFSET; + } + + /* "map" IO regs if the above hasn't done so already */ + if (!ast->ioregs) { + ast->ioregs = pcim_iomap(pdev, 2, 0); + if (!ast->ioregs) + return ERR_PTR(-EIO); + } + + ast_detect_chip(dev, &need_post); + + ret = ast_get_dram_info(dev); + if (ret) + return ERR_PTR(ret); + + drm_info(dev, "dram MCLK=%u Mhz type=%d bus_width=%d\n", ast->mclk, + ast->dram_type, ast->dram_bus_width); + + if (need_post) + ast_post_gpu(dev); + + ret = ast_mm_init(ast); + if (ret) + return ERR_PTR(ret); + + /* map reserved buffer */ + ast->dp501_fw_buf = NULL; + if (dev->vram_mm->vram_size < pci_resource_len(pdev, 0)) { + ast->dp501_fw_buf = + pci_iomap_range(pdev, 0, dev->vram_mm->vram_size, 0); + if (!ast->dp501_fw_buf) + drm_info(dev, "failed to map reserved buffer!\n"); + } + + ret = ast_mode_config_init(ast); + if (ret) + return ERR_PTR(ret); + + ret = devm_add_action_or_reset(dev->dev, ast_device_release, ast); + if (ret) + return ERR_PTR(ret); + + return ast; +} diff --git a/drivers/gpu/drm/ast_loongson/ast_mm.c b/drivers/gpu/drm/ast_loongson/ast_mm.c new file mode 100644 index 000000000000..6e999408dda9 --- /dev/null +++ b/drivers/gpu/drm/ast_loongson/ast_mm.c @@ -0,0 +1,101 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ +/* + * Authors: Dave Airlie + */ + +#include + +#include +#include +#include + +#include "ast_drv.h" + +static u32 ast_get_vram_size(struct ast_private *ast) +{ + u8 jreg; + u32 vram_size; + + ast_open_key(ast); + + vram_size = AST_VIDMEM_DEFAULT_SIZE; + jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xaa, 0xff); + switch (jreg & 3) { + case 0: + vram_size = AST_VIDMEM_SIZE_8M; + break; + case 1: + vram_size = AST_VIDMEM_SIZE_16M; + break; + case 2: + vram_size = AST_VIDMEM_SIZE_32M; + break; + case 3: + vram_size = AST_VIDMEM_SIZE_64M; + break; + } + + jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x99, 0xff); + switch (jreg & 0x03) { + case 1: + vram_size -= 0x100000; + break; + case 2: + vram_size -= 0x200000; + break; + case 3: + vram_size -= 0x400000; + break; + } + + return vram_size; +} + +int ast_mm_init(struct ast_private *ast) +{ + struct drm_device *dev = &ast->base; + struct pci_dev *pdev = to_pci_dev(dev->dev); + resource_size_t base, size; + u32 vram_size; + int ret; + + base = pci_resource_start(pdev, 0); + size = pci_resource_len(pdev, 0); + + /* Don't fail on errors, but performance might be reduced. */ + devm_arch_io_reserve_memtype_wc(dev->dev, base, size); + devm_arch_phys_wc_add(dev->dev, base, size); + + vram_size = ast_get_vram_size(ast); + + ret = drmm_vram_helper_init(dev, base, vram_size); + if (ret) { + drm_err(dev, "Error initializing VRAM MM; %d\n", ret); + return ret; + } + + return 0; +} diff --git a/drivers/gpu/drm/ast_loongson/ast_mode.c b/drivers/gpu/drm/ast_loongson/ast_mode.c new file mode 100644 index 000000000000..5374fc38757f --- /dev/null +++ b/drivers/gpu/drm/ast_loongson/ast_mode.c @@ -0,0 +1,1881 @@ +/* + * Copyright 2012 Red Hat Inc. + * Parts based on xf86-video-ast + * Copyright (c) 2005 ASPEED Technology Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ +/* + * Authors: Dave Airlie + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ast_drv.h" +#include "ast_tables.h" + +#define AST_LUT_SIZE 256 + +static inline void ast_load_palette_index(struct ast_private *ast, u8 index, + u8 red, u8 green, u8 blue) +{ + ast_io_write8(ast, AST_IO_DAC_INDEX_WRITE, index); + ast_io_read8(ast, AST_IO_SEQ_PORT); + ast_io_write8(ast, AST_IO_DAC_DATA, red); + ast_io_read8(ast, AST_IO_SEQ_PORT); + ast_io_write8(ast, AST_IO_DAC_DATA, green); + ast_io_read8(ast, AST_IO_SEQ_PORT); + ast_io_write8(ast, AST_IO_DAC_DATA, blue); + ast_io_read8(ast, AST_IO_SEQ_PORT); +} + +static void ast_crtc_set_gamma_linear(struct ast_private *ast, + const struct drm_format_info *format) +{ + int i; + + switch (format->format) { + case DRM_FORMAT_C8: /* In this case, gamma table is used as color palette */ + case DRM_FORMAT_RGB565: + case DRM_FORMAT_XRGB8888: + for (i = 0; i < AST_LUT_SIZE; i++) + ast_load_palette_index(ast, i, i, i, i); + break; + default: + drm_warn_once(&ast->base, + "Unsupported format %p4cc for gamma correction\n", + &format->format); + break; + } +} + +static void ast_crtc_set_gamma(struct ast_private *ast, + const struct drm_format_info *format, + struct drm_color_lut *lut) +{ + int i; + + switch (format->format) { + case DRM_FORMAT_C8: /* In this case, gamma table is used as color palette */ + case DRM_FORMAT_RGB565: + case DRM_FORMAT_XRGB8888: + for (i = 0; i < AST_LUT_SIZE; i++) + ast_load_palette_index(ast, i, lut[i].red >> 8, + lut[i].green >> 8, + lut[i].blue >> 8); + break; + default: + drm_warn_once(&ast->base, + "Unsupported format %p4cc for gamma correction\n", + &format->format); + break; + } +} + +static bool ast_get_vbios_mode_info(const struct drm_format_info *format, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + struct ast_vbios_mode_info *vbios_mode) +{ + u32 refresh_rate_index = 0, refresh_rate; + const struct ast_vbios_enhtable *best = NULL; + u32 hborder, vborder; + bool check_sync; + + switch (format->cpp[0] * 8) { + case 8: + vbios_mode->std_table = &vbios_stdtable[VGAModeIndex]; + break; + case 16: + vbios_mode->std_table = &vbios_stdtable[HiCModeIndex]; + break; + case 24: + case 32: + vbios_mode->std_table = &vbios_stdtable[TrueCModeIndex]; + break; + default: + return false; + } + + switch (mode->crtc_hdisplay) { + case 640: + vbios_mode->enh_table = &res_640x480[refresh_rate_index]; + break; + case 800: + vbios_mode->enh_table = &res_800x600[refresh_rate_index]; + break; + case 1024: + vbios_mode->enh_table = &res_1024x768[refresh_rate_index]; + break; + case 1152: + vbios_mode->enh_table = &res_1152x864[refresh_rate_index]; + break; + case 1280: + if (mode->crtc_vdisplay == 800) + vbios_mode->enh_table = + &res_1280x800[refresh_rate_index]; + else + vbios_mode->enh_table = + &res_1280x1024[refresh_rate_index]; + break; + case 1360: + vbios_mode->enh_table = &res_1360x768[refresh_rate_index]; + break; + case 1440: + vbios_mode->enh_table = &res_1440x900[refresh_rate_index]; + break; + case 1600: + if (mode->crtc_vdisplay == 900) + vbios_mode->enh_table = + &res_1600x900[refresh_rate_index]; + else + vbios_mode->enh_table = + &res_1600x1200[refresh_rate_index]; + break; + case 1680: + vbios_mode->enh_table = &res_1680x1050[refresh_rate_index]; + break; + case 1920: + if (mode->crtc_vdisplay == 1080) + vbios_mode->enh_table = + &res_1920x1080[refresh_rate_index]; + else + vbios_mode->enh_table = + &res_1920x1200[refresh_rate_index]; + break; + default: + return false; + } + + refresh_rate = drm_mode_vrefresh(mode); + check_sync = vbios_mode->enh_table->flags & WideScreenMode; + + while (1) { + const struct ast_vbios_enhtable *loop = vbios_mode->enh_table; + + while (loop->refresh_rate != 0xff) { + if ((check_sync) && + (((mode->flags & DRM_MODE_FLAG_NVSYNC) && + (loop->flags & PVSync)) || + ((mode->flags & DRM_MODE_FLAG_PVSYNC) && + (loop->flags & NVSync)) || + ((mode->flags & DRM_MODE_FLAG_NHSYNC) && + (loop->flags & PHSync)) || + ((mode->flags & DRM_MODE_FLAG_PHSYNC) && + (loop->flags & NHSync)))) { + loop++; + continue; + } + if (loop->refresh_rate <= refresh_rate && + (!best || loop->refresh_rate > best->refresh_rate)) + best = loop; + loop++; + } + if (best || !check_sync) + break; + check_sync = 0; + } + + if (best) + vbios_mode->enh_table = best; + + hborder = (vbios_mode->enh_table->flags & HBorder) ? 8 : 0; + vborder = (vbios_mode->enh_table->flags & VBorder) ? 8 : 0; + + adjusted_mode->crtc_htotal = vbios_mode->enh_table->ht; + adjusted_mode->crtc_hblank_start = vbios_mode->enh_table->hde + hborder; + adjusted_mode->crtc_hblank_end = vbios_mode->enh_table->ht - hborder; + adjusted_mode->crtc_hsync_start = vbios_mode->enh_table->hde + hborder + + vbios_mode->enh_table->hfp; + adjusted_mode->crtc_hsync_end = + (vbios_mode->enh_table->hde + hborder + + vbios_mode->enh_table->hfp + vbios_mode->enh_table->hsync); + + adjusted_mode->crtc_vtotal = vbios_mode->enh_table->vt; + adjusted_mode->crtc_vblank_start = vbios_mode->enh_table->vde + vborder; + adjusted_mode->crtc_vblank_end = vbios_mode->enh_table->vt - vborder; + adjusted_mode->crtc_vsync_start = vbios_mode->enh_table->vde + vborder + + vbios_mode->enh_table->vfp; + adjusted_mode->crtc_vsync_end = + (vbios_mode->enh_table->vde + vborder + + vbios_mode->enh_table->vfp + vbios_mode->enh_table->vsync); + + return true; +} + +static void +ast_set_vbios_color_reg(struct ast_private *ast, + const struct drm_format_info *format, + const struct ast_vbios_mode_info *vbios_mode) +{ + u32 color_index; + + switch (format->cpp[0]) { + case 1: + color_index = VGAModeIndex - 1; + break; + case 2: + color_index = HiCModeIndex; + break; + case 3: + case 4: + color_index = TrueCModeIndex; + break; + default: + return; + } + + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8c, + (u8)((color_index & 0x0f) << 4)); + + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0x00); + + if (vbios_mode->enh_table->flags & NewModeInfo) { + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92, + format->cpp[0] * 8); + } +} + +static void ast_set_vbios_mode_reg(struct ast_private *ast, + const struct drm_display_mode *adjusted_mode, + const struct ast_vbios_mode_info *vbios_mode) +{ + u32 refresh_rate_index, mode_id; + + refresh_rate_index = vbios_mode->enh_table->refresh_rate_index; + mode_id = vbios_mode->enh_table->mode_id; + + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8d, + refresh_rate_index & 0xff); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x8e, mode_id & 0xff); + + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0x00); + + if (vbios_mode->enh_table->flags & NewModeInfo) { + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x93, + adjusted_mode->clock / 1000); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x94, + adjusted_mode->crtc_hdisplay); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x95, + adjusted_mode->crtc_hdisplay >> 8); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x96, + adjusted_mode->crtc_vdisplay); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x97, + adjusted_mode->crtc_vdisplay >> 8); + } +} + +static void ast_set_std_reg(struct ast_private *ast, + struct drm_display_mode *mode, + struct ast_vbios_mode_info *vbios_mode) +{ + const struct ast_vbios_stdtable *stdtable; + u32 i; + u8 jreg; + + stdtable = vbios_mode->std_table; + + jreg = stdtable->misc; + ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg); + + /* Set SEQ; except Screen Disable field */ + ast_set_index_reg(ast, AST_IO_SEQ_PORT, 0x00, 0x03); + ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x01, 0xdf, + stdtable->seq[0]); + for (i = 1; i < 4; i++) { + jreg = stdtable->seq[i]; + ast_set_index_reg(ast, AST_IO_SEQ_PORT, (i + 1), jreg); + } + + /* Set CRTC; except base address and offset */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x00); + for (i = 0; i < 12; i++) + ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, stdtable->crtc[i]); + for (i = 14; i < 19; i++) + ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, stdtable->crtc[i]); + for (i = 20; i < 25; i++) + ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, stdtable->crtc[i]); + + /* set AR */ + jreg = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ); + for (i = 0; i < 20; i++) { + jreg = stdtable->ar[i]; + ast_io_write8(ast, AST_IO_AR_PORT_WRITE, (u8)i); + ast_io_write8(ast, AST_IO_AR_PORT_WRITE, jreg); + } + ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x14); + ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x00); + + jreg = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ); + ast_io_write8(ast, AST_IO_AR_PORT_WRITE, 0x20); + + /* Set GR */ + for (i = 0; i < 9; i++) + ast_set_index_reg(ast, AST_IO_GR_PORT, i, stdtable->gr[i]); +} + +static void ast_set_crtc_reg(struct ast_private *ast, + struct drm_display_mode *mode, + struct ast_vbios_mode_info *vbios_mode) +{ + u8 jreg05 = 0, jreg07 = 0, jreg09 = 0, jregAC = 0, jregAD = 0, + jregAE = 0; + u16 temp, precache = 0; + + if ((ast->chip == AST2500 || ast->chip == AST2600) && + (vbios_mode->enh_table->flags & AST2500PreCatchCRT)) + precache = 40; + + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x00); + + temp = (mode->crtc_htotal >> 3) - 5; + if (temp & 0x100) + jregAC |= 0x01; /* HT D[8] */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x00, 0x00, temp); + + temp = (mode->crtc_hdisplay >> 3) - 1; + if (temp & 0x100) + jregAC |= 0x04; /* HDE D[8] */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x01, 0x00, temp); + + temp = (mode->crtc_hblank_start >> 3) - 1; + if (temp & 0x100) + jregAC |= 0x10; /* HBS D[8] */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x02, 0x00, temp); + + temp = ((mode->crtc_hblank_end >> 3) - 1) & 0x7f; + if (temp & 0x20) + jreg05 |= 0x80; /* HBE D[5] */ + if (temp & 0x40) + jregAD |= 0x01; /* HBE D[5] */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x03, 0xE0, + (temp & 0x1f)); + + temp = ((mode->crtc_hsync_start - precache) >> 3) - 1; + if (temp & 0x100) + jregAC |= 0x40; /* HRS D[5] */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x04, 0x00, temp); + + temp = (((mode->crtc_hsync_end - precache) >> 3) - 1) & 0x3f; + if (temp & 0x20) + jregAD |= 0x04; /* HRE D[5] */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x05, 0x60, + (u8)((temp & 0x1f) | jreg05)); + + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAC, 0x00, jregAC); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAD, 0x00, jregAD); + + // Workaround for HSync Time non octave pixels (1920x1080@60Hz HSync 44 pixels); + if ((ast->chip == AST2600) && (mode->crtc_vdisplay == 1080)) + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xFC, 0xFD, 0x02); + else + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xFC, 0xFD, 0x00); + + /* vert timings */ + temp = (mode->crtc_vtotal) - 2; + if (temp & 0x100) + jreg07 |= 0x01; + if (temp & 0x200) + jreg07 |= 0x20; + if (temp & 0x400) + jregAE |= 0x01; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x06, 0x00, temp); + + temp = (mode->crtc_vsync_start) - 1; + if (temp & 0x100) + jreg07 |= 0x04; + if (temp & 0x200) + jreg07 |= 0x80; + if (temp & 0x400) + jregAE |= 0x08; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x10, 0x00, temp); + + temp = (mode->crtc_vsync_end - 1) & 0x3f; + if (temp & 0x10) + jregAE |= 0x20; + if (temp & 0x20) + jregAE |= 0x40; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x70, temp & 0xf); + + temp = mode->crtc_vdisplay - 1; + if (temp & 0x100) + jreg07 |= 0x02; + if (temp & 0x200) + jreg07 |= 0x40; + if (temp & 0x400) + jregAE |= 0x02; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x12, 0x00, temp); + + temp = mode->crtc_vblank_start - 1; + if (temp & 0x100) + jreg07 |= 0x08; + if (temp & 0x200) + jreg09 |= 0x20; + if (temp & 0x400) + jregAE |= 0x04; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x15, 0x00, temp); + + temp = mode->crtc_vblank_end - 1; + if (temp & 0x100) + jregAE |= 0x10; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x16, 0x00, temp); + + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x07, 0x00, jreg07); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x09, 0xdf, jreg09); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xAE, 0x00, + (jregAE | 0x80)); + + if (precache) + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0x3f, 0x80); + else + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0x3f, 0x00); + + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x80); +} + +static void ast_set_offset_reg(struct ast_private *ast, + struct drm_framebuffer *fb) +{ + u16 offset; + + offset = fb->pitches[0] >> 3; + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x13, (offset & 0xff)); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xb0, (offset >> 8) & 0x3f); +} + +static void ast_set_dclk_reg(struct ast_private *ast, + struct drm_display_mode *mode, + struct ast_vbios_mode_info *vbios_mode) +{ + const struct ast_vbios_dclk_info *clk_info; + + if ((ast->chip == AST2500) || (ast->chip == AST2600)) + clk_info = + &dclk_table_ast2500[vbios_mode->enh_table->dclk_index]; + else + clk_info = &dclk_table[vbios_mode->enh_table->dclk_index]; + + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xc0, 0x00, + clk_info->param1); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xc1, 0x00, + clk_info->param2); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xbb, 0x0f, + (clk_info->param3 & 0xc0) | + ((clk_info->param3 & 0x3) << 4)); +} + +static void ast_set_color_reg(struct ast_private *ast, + const struct drm_format_info *format) +{ + u8 jregA0 = 0, jregA3 = 0, jregA8 = 0; + + switch (format->cpp[0] * 8) { + case 8: + jregA0 = 0x70; + jregA3 = 0x01; + jregA8 = 0x00; + break; + case 15: + case 16: + jregA0 = 0x70; + jregA3 = 0x04; + jregA8 = 0x02; + break; + case 32: + jregA0 = 0x70; + jregA3 = 0x08; + jregA8 = 0x02; + break; + } + + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa0, 0x8f, jregA0); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xf0, jregA3); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa8, 0xfd, jregA8); +} + +static void ast_set_crtthd_reg(struct ast_private *ast) +{ + /* Set Threshold */ + if (ast->chip == AST2600) { + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0xe0); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0xa0); + } else if (ast->chip == AST2300 || ast->chip == AST2400 || + ast->chip == AST2500) { + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x78); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x60); + } else if (ast->chip == AST2100 || ast->chip == AST1100 || + ast->chip == AST2200 || ast->chip == AST2150) { + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x3f); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x2f); + } else { + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x2f); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x1f); + } +} + +static void ast_set_sync_reg(struct ast_private *ast, + struct drm_display_mode *mode, + struct ast_vbios_mode_info *vbios_mode) +{ + u8 jreg; + + jreg = ast_io_read8(ast, AST_IO_MISC_PORT_READ); + jreg &= ~0xC0; + if (vbios_mode->enh_table->flags & NVSync) + jreg |= 0x80; + if (vbios_mode->enh_table->flags & NHSync) + jreg |= 0x40; + ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg); +} + +static void ast_set_start_address_crt1(struct ast_private *ast, + unsigned int offset) +{ + u32 addr; + + addr = offset >> 2; + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x0d, (u8)(addr & 0xff)); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x0c, + (u8)((addr >> 8) & 0xff)); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xaf, + (u8)((addr >> 16) & 0xff)); +} + +static void ast_wait_for_vretrace(struct ast_private *ast) +{ + unsigned long timeout = jiffies + HZ; + u8 vgair1; + + do { + vgair1 = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ); + } while (!(vgair1 & AST_IO_VGAIR1_VREFRESH) && + time_before(jiffies, timeout)); +} + +/* + * Primary plane + */ + +static const uint32_t ast_primary_plane_formats[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_RGB565, + DRM_FORMAT_C8, +}; + +static int ast_primary_plane_helper_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_device *dev = plane->dev; + struct drm_plane_state *new_plane_state = + drm_atomic_get_new_plane_state(state, plane); + struct drm_crtc_state *new_crtc_state = NULL; + struct ast_crtc_state *new_ast_crtc_state; + int ret; + + if (new_plane_state->crtc) + new_crtc_state = drm_atomic_get_new_crtc_state( + state, new_plane_state->crtc); + + ret = drm_atomic_helper_check_plane_state( + new_plane_state, new_crtc_state, DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, false, true); + if (ret) { + return ret; + } else if (!new_plane_state->visible) { + if (drm_WARN_ON( + dev, + new_plane_state->crtc)) /* cannot legally happen */ + return -EINVAL; + else + return 0; + } + + new_ast_crtc_state = to_ast_crtc_state(new_crtc_state); + + new_ast_crtc_state->format = new_plane_state->fb->format; + + return 0; +} + +static void +ast_primary_plane_helper_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_device *dev = plane->dev; + struct ast_private *ast = to_ast_private(dev); + struct drm_plane_state *plane_state = + drm_atomic_get_new_plane_state(state, plane); + struct drm_framebuffer *fb = plane_state->fb; + struct drm_plane_state *old_plane_state = + drm_atomic_get_old_plane_state(state, plane); + struct drm_framebuffer *old_fb = old_plane_state->fb; + struct drm_gem_vram_object *gbo; + s64 gpu_addr; + + if (!old_fb || (fb->format != old_fb->format)) { + struct drm_crtc *crtc = plane_state->crtc; + struct drm_crtc_state *crtc_state = + drm_atomic_get_new_crtc_state(state, crtc); + struct ast_crtc_state *ast_crtc_state = + to_ast_crtc_state(crtc_state); + struct ast_vbios_mode_info *vbios_mode_info = + &ast_crtc_state->vbios_mode_info; + + ast_set_color_reg(ast, fb->format); + ast_set_vbios_color_reg(ast, fb->format, vbios_mode_info); + } + + gbo = drm_gem_vram_of_gem(fb->obj[0]); + gpu_addr = drm_gem_vram_offset(gbo); + if (drm_WARN_ON_ONCE(dev, gpu_addr < 0)) + return; /* Bug: we didn't pin the BO to VRAM in prepare_fb. */ + + ast_set_offset_reg(ast, fb); + ast_set_start_address_crt1(ast, (u32)gpu_addr); + + ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x00); +} + +static void +ast_primary_plane_helper_atomic_disable(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct ast_private *ast = to_ast_private(plane->dev); + + ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x20); +} + +static const struct drm_plane_helper_funcs ast_primary_plane_helper_funcs = { + DRM_GEM_VRAM_PLANE_HELPER_FUNCS, + .atomic_check = ast_primary_plane_helper_atomic_check, + .atomic_update = ast_primary_plane_helper_atomic_update, + .atomic_disable = ast_primary_plane_helper_atomic_disable, +}; + +static const struct drm_plane_funcs ast_primary_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_plane_cleanup, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, +}; + +static int ast_primary_plane_init(struct ast_private *ast) +{ + struct drm_device *dev = &ast->base; + struct drm_plane *primary_plane = &ast->primary_plane; + int ret; + + ret = drm_universal_plane_init(dev, primary_plane, 0x01, + &ast_primary_plane_funcs, + ast_primary_plane_formats, + ARRAY_SIZE(ast_primary_plane_formats), + NULL, DRM_PLANE_TYPE_PRIMARY, NULL); + if (ret) { + drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret); + return ret; + } + drm_plane_helper_add(primary_plane, &ast_primary_plane_helper_funcs); + + return 0; +} + +/* + * Cursor plane + */ + +static void ast_update_cursor_image(u8 __iomem *dst, const u8 *src, int width, + int height) +{ + union { + u32 ul; + u8 b[4]; + } srcdata32[2], data32; + union { + u16 us; + u8 b[2]; + } data16; + u32 csum = 0; + s32 alpha_dst_delta, last_alpha_dst_delta; + u8 __iomem *dstxor; + const u8 *srcxor; + int i, j; + u32 per_pixel_copy, two_pixel_copy; + + alpha_dst_delta = AST_MAX_HWC_WIDTH << 1; + last_alpha_dst_delta = alpha_dst_delta - (width << 1); + + srcxor = src; + dstxor = (u8 *)dst + last_alpha_dst_delta + + (AST_MAX_HWC_HEIGHT - height) * alpha_dst_delta; + per_pixel_copy = width & 1; + two_pixel_copy = width >> 1; + + for (j = 0; j < height; j++) { + for (i = 0; i < two_pixel_copy; i++) { + srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0; + srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0; + data32.b[0] = srcdata32[0].b[1] | + (srcdata32[0].b[0] >> 4); + data32.b[1] = srcdata32[0].b[3] | + (srcdata32[0].b[2] >> 4); + data32.b[2] = srcdata32[1].b[1] | + (srcdata32[1].b[0] >> 4); + data32.b[3] = srcdata32[1].b[3] | + (srcdata32[1].b[2] >> 4); + + writel(data32.ul, dstxor); + csum += data32.ul; + + dstxor += 4; + srcxor += 8; + } + + for (i = 0; i < per_pixel_copy; i++) { + srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0; + data16.b[0] = srcdata32[0].b[1] | + (srcdata32[0].b[0] >> 4); + data16.b[1] = srcdata32[0].b[3] | + (srcdata32[0].b[2] >> 4); + writew(data16.us, dstxor); + csum += (u32)data16.us; + + dstxor += 2; + srcxor += 4; + } + dstxor += last_alpha_dst_delta; + } + + /* write checksum + signature */ + dst += AST_HWC_SIZE; + writel(csum, dst); + writel(width, dst + AST_HWC_SIGNATURE_SizeX); + writel(height, dst + AST_HWC_SIGNATURE_SizeY); + writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX); + writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY); +} + +static void ast_set_cursor_base(struct ast_private *ast, u64 address) +{ + u8 addr0 = (address >> 3) & 0xff; + u8 addr1 = (address >> 11) & 0xff; + u8 addr2 = (address >> 19) & 0xff; + + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, addr0); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, addr1); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, addr2); +} + +static void ast_set_cursor_location(struct ast_private *ast, u16 x, u16 y, + u8 x_offset, u8 y_offset) +{ + u8 x0 = (x & 0x00ff); + u8 x1 = (x & 0x0f00) >> 8; + u8 y0 = (y & 0x00ff); + u8 y1 = (y & 0x0700) >> 8; + + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc2, x_offset); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc3, y_offset); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc4, x0); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc5, x1); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc6, y0); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, y1); +} + +static void ast_set_cursor_enabled(struct ast_private *ast, bool enabled) +{ + static const u8 mask = + (u8) ~(AST_IO_VGACRCB_HWC_16BPP | AST_IO_VGACRCB_HWC_ENABLED); + + u8 vgacrcb = AST_IO_VGACRCB_HWC_16BPP; + + if (enabled) + vgacrcb |= AST_IO_VGACRCB_HWC_ENABLED; + + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, mask, vgacrcb); +} + +static const uint32_t ast_cursor_plane_formats[] = { + DRM_FORMAT_ARGB8888, +}; + +static int ast_cursor_plane_helper_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_plane_state = + drm_atomic_get_new_plane_state(state, plane); + struct drm_framebuffer *new_fb = new_plane_state->fb; + struct drm_crtc_state *new_crtc_state = NULL; + int ret; + + if (new_plane_state->crtc) + new_crtc_state = drm_atomic_get_new_crtc_state( + state, new_plane_state->crtc); + + ret = drm_atomic_helper_check_plane_state( + new_plane_state, new_crtc_state, DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, true, true); + if (ret || !new_plane_state->visible) + return ret; + + if (new_fb->width > AST_MAX_HWC_WIDTH || + new_fb->height > AST_MAX_HWC_HEIGHT) + return -EINVAL; + + return 0; +} + +static void +ast_cursor_plane_helper_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct ast_plane *ast_plane = to_ast_plane(plane); + struct drm_plane_state *plane_state = + drm_atomic_get_new_plane_state(state, plane); + struct drm_shadow_plane_state *shadow_plane_state = + to_drm_shadow_plane_state(plane_state); + struct drm_framebuffer *fb = plane_state->fb; + struct drm_plane_state *old_plane_state = + drm_atomic_get_old_plane_state(state, plane); + struct drm_framebuffer *old_fb = old_plane_state->fb; + struct ast_private *ast = to_ast_private(plane->dev); + struct iosys_map dst_map = ast_plane->map; + u64 dst_off = ast_plane->off; + struct iosys_map src_map = shadow_plane_state->data[0]; + unsigned int offset_x, offset_y; + u16 x, y; + u8 x_offset, y_offset; + u8 __iomem *dst; + u8 __iomem *sig; + const u8 *src; + + src = src_map.vaddr; /* TODO: Use mapping abstraction properly */ + dst = dst_map.vaddr_iomem; /* TODO: Use mapping abstraction properly */ + sig = dst + AST_HWC_SIZE; /* TODO: Use mapping abstraction properly */ + + /* + * Do data transfer to HW cursor BO. If a new cursor image was installed, + * point the scanout engine to dst_gbo's offset and page-flip the HWC buffers. + */ + + ast_update_cursor_image(dst, src, fb->width, fb->height); + + if (fb != old_fb) + ast_set_cursor_base(ast, dst_off); + + /* + * Update location in HWC signature and registers. + */ + + writel(plane_state->crtc_x, sig + AST_HWC_SIGNATURE_X); + writel(plane_state->crtc_y, sig + AST_HWC_SIGNATURE_Y); + + offset_x = AST_MAX_HWC_WIDTH - fb->width; + offset_y = AST_MAX_HWC_HEIGHT - fb->height; + + if (plane_state->crtc_x < 0) { + x_offset = (-plane_state->crtc_x) + offset_x; + x = 0; + } else { + x_offset = offset_x; + x = plane_state->crtc_x; + } + if (plane_state->crtc_y < 0) { + y_offset = (-plane_state->crtc_y) + offset_y; + y = 0; + } else { + y_offset = offset_y; + y = plane_state->crtc_y; + } + + ast_set_cursor_location(ast, x, y, x_offset, y_offset); + + /* Dummy write to enable HWC and make the HW pick-up the changes. */ + ast_set_cursor_enabled(ast, true); +} + +static void +ast_cursor_plane_helper_atomic_disable(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct ast_private *ast = to_ast_private(plane->dev); + + ast_set_cursor_enabled(ast, false); +} + +static const struct drm_plane_helper_funcs ast_cursor_plane_helper_funcs = { + DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, + .atomic_check = ast_cursor_plane_helper_atomic_check, + .atomic_update = ast_cursor_plane_helper_atomic_update, + .atomic_disable = ast_cursor_plane_helper_atomic_disable, +}; + +static void ast_cursor_plane_destroy(struct drm_plane *plane) +{ + struct ast_plane *ast_plane = to_ast_plane(plane); + struct drm_gem_vram_object *gbo = ast_plane->gbo; + struct iosys_map map = ast_plane->map; + + drm_gem_vram_vunmap(gbo, &map); + drm_gem_vram_unpin(gbo); + drm_gem_vram_put(gbo); + + drm_plane_cleanup(plane); +} + +static const struct drm_plane_funcs ast_cursor_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = ast_cursor_plane_destroy, + DRM_GEM_SHADOW_PLANE_FUNCS, +}; + +static int ast_cursor_plane_init(struct ast_private *ast) +{ + struct drm_device *dev = &ast->base; + struct ast_plane *ast_plane = &ast->cursor_plane; + struct drm_plane *cursor_plane = &ast_plane->base; + size_t size; + struct drm_gem_vram_object *gbo; + struct iosys_map map; + int ret; + s64 off; + + /* + * Allocate backing storage for cursors. The BOs are permanently + * pinned to the top end of the VRAM. + */ + + size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE); + + gbo = drm_gem_vram_create(dev, size, 0); + if (IS_ERR(gbo)) + return PTR_ERR(gbo); + + ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM | + DRM_GEM_VRAM_PL_FLAG_TOPDOWN); + if (ret) + goto err_drm_gem_vram_put; + ret = drm_gem_vram_vmap(gbo, &map); + if (ret) + goto err_drm_gem_vram_unpin; + off = drm_gem_vram_offset(gbo); + if (off < 0) { + ret = off; + goto err_drm_gem_vram_vunmap; + } + + ast_plane->gbo = gbo; + ast_plane->map = map; + ast_plane->off = off; + + /* + * Create the cursor plane. The plane's destroy callback will release + * the backing storages' BO memory. + */ + + ret = drm_universal_plane_init(dev, cursor_plane, 0x01, + &ast_cursor_plane_funcs, + ast_cursor_plane_formats, + ARRAY_SIZE(ast_cursor_plane_formats), + NULL, DRM_PLANE_TYPE_CURSOR, NULL); + if (ret) { + drm_err(dev, "drm_universal_plane failed(): %d\n", ret); + goto err_drm_gem_vram_vunmap; + } + drm_plane_helper_add(cursor_plane, &ast_cursor_plane_helper_funcs); + + return 0; + +err_drm_gem_vram_vunmap: + drm_gem_vram_vunmap(gbo, &map); +err_drm_gem_vram_unpin: + drm_gem_vram_unpin(gbo); +err_drm_gem_vram_put: + drm_gem_vram_put(gbo); + return ret; +} + +/* + * CRTC + */ + +static void ast_crtc_dpms(struct drm_crtc *crtc, int mode) +{ + struct ast_private *ast = to_ast_private(crtc->dev); + u8 ch = AST_DPMS_VSYNC_OFF | AST_DPMS_HSYNC_OFF; + struct ast_crtc_state *ast_state; + const struct drm_format_info *format; + struct ast_vbios_mode_info *vbios_mode_info; + + /* TODO: Maybe control display signal generation with + * Sync Enable (bit CR17.7). + */ + switch (mode) { + case DRM_MODE_DPMS_ON: + ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x01, 0xdf, 0); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xfc, 0); + if (ast->tx_chip_types & AST_TX_DP501_BIT) + ast_set_dp501_video_output(crtc->dev, 1); + + if (ast->tx_chip_types & AST_TX_ASTDP_BIT) { + ast_dp_power_on_off(crtc->dev, AST_DP_POWER_ON); + ast_wait_for_vretrace(ast); + ast_dp_set_on_off(crtc->dev, 1); + } + + ast_state = to_ast_crtc_state(crtc->state); + format = ast_state->format; + + if (format) { + vbios_mode_info = &ast_state->vbios_mode_info; + + ast_set_color_reg(ast, format); + ast_set_vbios_color_reg(ast, format, vbios_mode_info); + if (crtc->state->gamma_lut) + ast_crtc_set_gamma( + ast, format, + crtc->state->gamma_lut->data); + else + ast_crtc_set_gamma_linear(ast, format); + } + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + ch = mode; + if (ast->tx_chip_types & AST_TX_DP501_BIT) + ast_set_dp501_video_output(crtc->dev, 0); + + if (ast->tx_chip_types & AST_TX_ASTDP_BIT) { + ast_dp_set_on_off(crtc->dev, 0); + ast_dp_power_on_off(crtc->dev, AST_DP_POWER_OFF); + } + + ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x01, 0xdf, 0x20); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xfc, ch); + break; + } +} + +static enum drm_mode_status +ast_crtc_helper_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode) +{ + struct ast_private *ast = to_ast_private(crtc->dev); + enum drm_mode_status status; + uint32_t jtemp; + + if (ast->support_wide_screen) { + if ((mode->hdisplay == 1680) && (mode->vdisplay == 1050)) + return MODE_OK; + if ((mode->hdisplay == 1280) && (mode->vdisplay == 800)) + return MODE_OK; + if ((mode->hdisplay == 1440) && (mode->vdisplay == 900)) + return MODE_OK; + if ((mode->hdisplay == 1360) && (mode->vdisplay == 768)) + return MODE_OK; + if ((mode->hdisplay == 1600) && (mode->vdisplay == 900)) + return MODE_OK; + if ((mode->hdisplay == 1152) && (mode->vdisplay == 864)) + return MODE_OK; + + if ((ast->chip == AST2100) || (ast->chip == AST2200) || + (ast->chip == AST2300) || (ast->chip == AST2400) || + (ast->chip == AST2500) || (ast->chip == AST2600)) { + if ((mode->hdisplay == 1920) && + (mode->vdisplay == 1080)) + return MODE_OK; + + if ((mode->hdisplay == 1920) && + (mode->vdisplay == 1200)) { + jtemp = ast_get_index_reg_mask( + ast, AST_IO_CRTC_PORT, 0xd1, 0xff); + if (jtemp & 0x01) + return MODE_NOMODE; + else + return MODE_OK; + } + } + } + + status = MODE_NOMODE; + + switch (mode->hdisplay) { + case 640: + if (mode->vdisplay == 480) + status = MODE_OK; + break; + case 800: + if (mode->vdisplay == 600) + status = MODE_OK; + break; + case 1024: + if (mode->vdisplay == 768) + status = MODE_OK; + break; + case 1152: + if (mode->vdisplay == 864) + status = MODE_OK; + break; + case 1280: + if (mode->vdisplay == 1024) + status = MODE_OK; + break; + case 1600: + if (mode->vdisplay == 1200) + status = MODE_OK; + break; + default: + break; + } + + return status; +} + +static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *crtc_state = + drm_atomic_get_new_crtc_state(state, crtc); + struct drm_crtc_state *old_crtc_state = + drm_atomic_get_old_crtc_state(state, crtc); + struct ast_crtc_state *old_ast_crtc_state = + to_ast_crtc_state(old_crtc_state); + struct drm_device *dev = crtc->dev; + struct ast_crtc_state *ast_state; + const struct drm_format_info *format; + bool succ; + int ret; + + if (!crtc_state->enable) + return 0; + + ret = drm_atomic_helper_check_crtc_primary_plane(crtc_state); + if (ret) + return ret; + + ast_state = to_ast_crtc_state(crtc_state); + + format = ast_state->format; + if (drm_WARN_ON_ONCE(dev, !format)) + return -EINVAL; /* BUG: We didn't set format in primary check(). */ + + /* + * The gamma LUT has to be reloaded after changing the primary + * plane's color format. + */ + if (old_ast_crtc_state->format != format) + crtc_state->color_mgmt_changed = true; + + if (crtc_state->color_mgmt_changed && crtc_state->gamma_lut) { + if (crtc_state->gamma_lut->length != + AST_LUT_SIZE * sizeof(struct drm_color_lut)) { + drm_err(dev, "Wrong size for gamma_lut %zu\n", + crtc_state->gamma_lut->length); + return -EINVAL; + } + } + + succ = ast_get_vbios_mode_info(format, &crtc_state->mode, + &crtc_state->adjusted_mode, + &ast_state->vbios_mode_info); + if (!succ) + return -EINVAL; + + return 0; +} + +static void ast_crtc_helper_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *crtc_state = + drm_atomic_get_new_crtc_state(state, crtc); + struct drm_device *dev = crtc->dev; + struct ast_private *ast = to_ast_private(dev); + struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state); + struct ast_vbios_mode_info *vbios_mode_info = + &ast_crtc_state->vbios_mode_info; + + /* + * The gamma LUT has to be reloaded after changing the primary + * plane's color format. + */ + if (crtc_state->enable && crtc_state->color_mgmt_changed) { + if (crtc_state->gamma_lut) + ast_crtc_set_gamma(ast, ast_crtc_state->format, + crtc_state->gamma_lut->data); + else + ast_crtc_set_gamma_linear(ast, ast_crtc_state->format); + } + + //Set Aspeed Display-Port + if (ast->tx_chip_types & AST_TX_ASTDP_BIT) + ast_dp_set_mode(crtc, vbios_mode_info); +} + +static void ast_crtc_helper_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_device *dev = crtc->dev; + struct ast_private *ast = to_ast_private(dev); + struct drm_crtc_state *crtc_state = + drm_atomic_get_new_crtc_state(state, crtc); + struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state); + struct ast_vbios_mode_info *vbios_mode_info = + &ast_crtc_state->vbios_mode_info; + struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; + + ast_set_vbios_mode_reg(ast, adjusted_mode, vbios_mode_info); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06); + ast_set_std_reg(ast, adjusted_mode, vbios_mode_info); + ast_set_crtc_reg(ast, adjusted_mode, vbios_mode_info); + ast_set_dclk_reg(ast, adjusted_mode, vbios_mode_info); + ast_set_crtthd_reg(ast); + ast_set_sync_reg(ast, adjusted_mode, vbios_mode_info); + + ast_crtc_dpms(crtc, DRM_MODE_DPMS_ON); +} + +static void ast_crtc_helper_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *old_crtc_state = + drm_atomic_get_old_crtc_state(state, crtc); + struct drm_device *dev = crtc->dev; + struct ast_private *ast = to_ast_private(dev); + + ast_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + + /* + * HW cursors require the underlying primary plane and CRTC to + * display a valid mode and image. This is not the case during + * full modeset operations. So we temporarily disable any active + * plane, including the HW cursor. Each plane's atomic_update() + * helper will re-enable it if necessary. + * + * We only do this during *full* modesets. It does not affect + * simple pageflips on the planes. + */ + drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false); + + /* + * Ensure that no scanout takes place before reprogramming mode + * and format registers. + */ + ast_wait_for_vretrace(ast); +} + +static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = { + .mode_valid = ast_crtc_helper_mode_valid, + .atomic_check = ast_crtc_helper_atomic_check, + .atomic_flush = ast_crtc_helper_atomic_flush, + .atomic_enable = ast_crtc_helper_atomic_enable, + .atomic_disable = ast_crtc_helper_atomic_disable, +}; + +static void ast_crtc_reset(struct drm_crtc *crtc) +{ + struct ast_crtc_state *ast_state = + kzalloc(sizeof(*ast_state), GFP_KERNEL); + + if (crtc->state) + crtc->funcs->atomic_destroy_state(crtc, crtc->state); + + if (ast_state) + __drm_atomic_helper_crtc_reset(crtc, &ast_state->base); + else + __drm_atomic_helper_crtc_reset(crtc, NULL); +} + +static struct drm_crtc_state * +ast_crtc_atomic_duplicate_state(struct drm_crtc *crtc) +{ + struct ast_crtc_state *new_ast_state, *ast_state; + struct drm_device *dev = crtc->dev; + + if (drm_WARN_ON(dev, !crtc->state)) + return NULL; + + new_ast_state = kmalloc(sizeof(*new_ast_state), GFP_KERNEL); + if (!new_ast_state) + return NULL; + __drm_atomic_helper_crtc_duplicate_state(crtc, &new_ast_state->base); + + ast_state = to_ast_crtc_state(crtc->state); + + new_ast_state->format = ast_state->format; + memcpy(&new_ast_state->vbios_mode_info, &ast_state->vbios_mode_info, + sizeof(new_ast_state->vbios_mode_info)); + + return &new_ast_state->base; +} + +static void ast_crtc_atomic_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct ast_crtc_state *ast_state = to_ast_crtc_state(state); + + __drm_atomic_helper_crtc_destroy_state(&ast_state->base); + kfree(ast_state); +} + +static const struct drm_crtc_funcs ast_crtc_funcs = { + .reset = ast_crtc_reset, + .destroy = drm_crtc_cleanup, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .atomic_duplicate_state = ast_crtc_atomic_duplicate_state, + .atomic_destroy_state = ast_crtc_atomic_destroy_state, +}; + +static int ast_crtc_init(struct drm_device *dev) +{ + struct ast_private *ast = to_ast_private(dev); + struct drm_crtc *crtc = &ast->crtc; + int ret; + + ret = drm_crtc_init_with_planes(dev, crtc, &ast->primary_plane, + &ast->cursor_plane.base, + &ast_crtc_funcs, NULL); + if (ret) + return ret; + + drm_mode_crtc_set_gamma_size(crtc, AST_LUT_SIZE); + drm_crtc_enable_color_mgmt(crtc, 0, false, AST_LUT_SIZE); + + drm_crtc_helper_add(crtc, &ast_crtc_helper_funcs); + + return 0; +} + +/* + * VGA Connector + */ + +static int ast_vga_connector_helper_get_modes(struct drm_connector *connector) +{ + struct ast_vga_connector *ast_vga_connector = + to_ast_vga_connector(connector); + struct drm_device *dev = connector->dev; + struct ast_private *ast = to_ast_private(dev); + struct edid *edid; + int count; + + if (!ast_vga_connector->i2c) + goto err_drm_connector_update_edid_property; + + /* + * Protect access to I/O registers from concurrent modesetting + * by acquiring the I/O-register lock. + */ + mutex_lock(&ast->ioregs_lock); + + edid = drm_get_edid(connector, &ast_vga_connector->i2c->adapter); + if (!edid) + goto err_mutex_unlock; + + mutex_unlock(&ast->ioregs_lock); + + count = drm_add_edid_modes(connector, edid); + kfree(edid); + + return count; + +err_mutex_unlock: + mutex_unlock(&ast->ioregs_lock); +err_drm_connector_update_edid_property: + drm_connector_update_edid_property(connector, NULL); + return 0; +} + +static const struct drm_connector_helper_funcs ast_vga_connector_helper_funcs = { + .get_modes = ast_vga_connector_helper_get_modes, +}; + +static const struct drm_connector_funcs ast_vga_connector_funcs = { + .reset = drm_atomic_helper_connector_reset, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int ast_vga_connector_init(struct drm_device *dev, + struct ast_vga_connector *ast_vga_connector) +{ + struct drm_connector *connector = &ast_vga_connector->base; + int ret; + + ast_vga_connector->i2c = ast_i2c_create(dev); + if (!ast_vga_connector->i2c) + drm_err(dev, "failed to add ddc bus for connector\n"); + + if (ast_vga_connector->i2c) + ret = drm_connector_init_with_ddc( + dev, connector, &ast_vga_connector_funcs, + DRM_MODE_CONNECTOR_VGA, + &ast_vga_connector->i2c->adapter); + else + ret = drm_connector_init(dev, connector, + &ast_vga_connector_funcs, + DRM_MODE_CONNECTOR_VGA); + if (ret) + return ret; + + drm_connector_helper_add(connector, &ast_vga_connector_helper_funcs); + + connector->interlace_allowed = 0; + connector->doublescan_allowed = 0; + + connector->polled = DRM_CONNECTOR_POLL_CONNECT; + + return 0; +} + +static int ast_vga_output_init(struct ast_private *ast) +{ + struct drm_device *dev = &ast->base; + struct drm_crtc *crtc = &ast->crtc; + struct drm_encoder *encoder = &ast->output.vga.encoder; + struct ast_vga_connector *ast_vga_connector = + &ast->output.vga.vga_connector; + struct drm_connector *connector = &ast_vga_connector->base; + int ret; + + ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC); + if (ret) + return ret; + encoder->possible_crtcs = drm_crtc_mask(crtc); + + ret = ast_vga_connector_init(dev, ast_vga_connector); + if (ret) + return ret; + + ret = drm_connector_attach_encoder(connector, encoder); + if (ret) + return ret; + + return 0; +} + +/* + * SIL164 Connector + */ + +static int +ast_sil164_connector_helper_get_modes(struct drm_connector *connector) +{ + struct ast_sil164_connector *ast_sil164_connector = + to_ast_sil164_connector(connector); + struct drm_device *dev = connector->dev; + struct ast_private *ast = to_ast_private(dev); + struct edid *edid; + int count; + + if (!ast_sil164_connector->i2c) + goto err_drm_connector_update_edid_property; + + /* + * Protect access to I/O registers from concurrent modesetting + * by acquiring the I/O-register lock. + */ + mutex_lock(&ast->ioregs_lock); + + edid = drm_get_edid(connector, &ast_sil164_connector->i2c->adapter); + if (!edid) + goto err_mutex_unlock; + + mutex_unlock(&ast->ioregs_lock); + + count = drm_add_edid_modes(connector, edid); + kfree(edid); + + return count; + +err_mutex_unlock: + mutex_unlock(&ast->ioregs_lock); +err_drm_connector_update_edid_property: + drm_connector_update_edid_property(connector, NULL); + return 0; +} + +static const struct drm_connector_helper_funcs + ast_sil164_connector_helper_funcs = { + .get_modes = ast_sil164_connector_helper_get_modes, + }; + +static const struct drm_connector_funcs ast_sil164_connector_funcs = { + .reset = drm_atomic_helper_connector_reset, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int +ast_sil164_connector_init(struct drm_device *dev, + struct ast_sil164_connector *ast_sil164_connector) +{ + struct drm_connector *connector = &ast_sil164_connector->base; + int ret; + + ast_sil164_connector->i2c = ast_i2c_create(dev); + if (!ast_sil164_connector->i2c) + drm_err(dev, "failed to add ddc bus for connector\n"); + + if (ast_sil164_connector->i2c) + ret = drm_connector_init_with_ddc( + dev, connector, &ast_sil164_connector_funcs, + DRM_MODE_CONNECTOR_DVII, + &ast_sil164_connector->i2c->adapter); + else + ret = drm_connector_init(dev, connector, + &ast_sil164_connector_funcs, + DRM_MODE_CONNECTOR_DVII); + if (ret) + return ret; + + drm_connector_helper_add(connector, &ast_sil164_connector_helper_funcs); + + connector->interlace_allowed = 0; + connector->doublescan_allowed = 0; + + connector->polled = DRM_CONNECTOR_POLL_CONNECT; + + return 0; +} + +static int ast_sil164_output_init(struct ast_private *ast) +{ + struct drm_device *dev = &ast->base; + struct drm_crtc *crtc = &ast->crtc; + struct drm_encoder *encoder = &ast->output.sil164.encoder; + struct ast_sil164_connector *ast_sil164_connector = + &ast->output.sil164.sil164_connector; + struct drm_connector *connector = &ast_sil164_connector->base; + int ret; + + ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS); + if (ret) + return ret; + encoder->possible_crtcs = drm_crtc_mask(crtc); + + ret = ast_sil164_connector_init(dev, ast_sil164_connector); + if (ret) + return ret; + + ret = drm_connector_attach_encoder(connector, encoder); + if (ret) + return ret; + + return 0; +} + +/* + * DP501 Connector + */ + +static int ast_dp501_connector_helper_get_modes(struct drm_connector *connector) +{ + void *edid; + bool succ; + int count; + + edid = kmalloc(EDID_LENGTH, GFP_KERNEL); + if (!edid) + goto err_drm_connector_update_edid_property; + + succ = ast_dp501_read_edid(connector->dev, edid); + if (!succ) + goto err_kfree; + + drm_connector_update_edid_property(connector, edid); + count = drm_add_edid_modes(connector, edid); + kfree(edid); + + return count; + +err_kfree: + kfree(edid); +err_drm_connector_update_edid_property: + drm_connector_update_edid_property(connector, NULL); + return 0; +} + +static const struct drm_connector_helper_funcs + ast_dp501_connector_helper_funcs = { + .get_modes = ast_dp501_connector_helper_get_modes, + }; + +static const struct drm_connector_funcs ast_dp501_connector_funcs = { + .reset = drm_atomic_helper_connector_reset, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int ast_dp501_connector_init(struct drm_device *dev, + struct drm_connector *connector) +{ + int ret; + + ret = drm_connector_init(dev, connector, &ast_dp501_connector_funcs, + DRM_MODE_CONNECTOR_DisplayPort); + if (ret) + return ret; + + drm_connector_helper_add(connector, &ast_dp501_connector_helper_funcs); + + connector->interlace_allowed = 0; + connector->doublescan_allowed = 0; + + connector->polled = DRM_CONNECTOR_POLL_CONNECT; + + return 0; +} + +static int ast_dp501_output_init(struct ast_private *ast) +{ + struct drm_device *dev = &ast->base; + struct drm_crtc *crtc = &ast->crtc; + struct drm_encoder *encoder = &ast->output.dp501.encoder; + struct drm_connector *connector = &ast->output.dp501.connector; + int ret; + + ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS); + if (ret) + return ret; + encoder->possible_crtcs = drm_crtc_mask(crtc); + + ret = ast_dp501_connector_init(dev, connector); + if (ret) + return ret; + + ret = drm_connector_attach_encoder(connector, encoder); + if (ret) + return ret; + + return 0; +} + +/* + * ASPEED Display-Port Connector + */ + +static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector) +{ + void *edid; + + int succ; + int count; + + edid = kmalloc(EDID_LENGTH, GFP_KERNEL); + if (!edid) + goto err_drm_connector_update_edid_property; + + succ = ast_astdp_read_edid(connector->dev, edid); + if (succ < 0) + goto err_kfree; + + drm_connector_update_edid_property(connector, edid); + count = drm_add_edid_modes(connector, edid); + kfree(edid); + + return count; + +err_kfree: + kfree(edid); +err_drm_connector_update_edid_property: + drm_connector_update_edid_property(connector, NULL); + return 0; +} + +static const struct drm_connector_helper_funcs + ast_astdp_connector_helper_funcs = { + .get_modes = ast_astdp_connector_helper_get_modes, + }; + +static const struct drm_connector_funcs ast_astdp_connector_funcs = { + .reset = drm_atomic_helper_connector_reset, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int ast_astdp_connector_init(struct drm_device *dev, + struct drm_connector *connector) +{ + int ret; + + ret = drm_connector_init(dev, connector, &ast_astdp_connector_funcs, + DRM_MODE_CONNECTOR_DisplayPort); + if (ret) + return ret; + + drm_connector_helper_add(connector, &ast_astdp_connector_helper_funcs); + + connector->interlace_allowed = 0; + connector->doublescan_allowed = 0; + + connector->polled = DRM_CONNECTOR_POLL_CONNECT; + + return 0; +} + +static int ast_astdp_output_init(struct ast_private *ast) +{ + struct drm_device *dev = &ast->base; + struct drm_crtc *crtc = &ast->crtc; + struct drm_encoder *encoder = &ast->output.astdp.encoder; + struct drm_connector *connector = &ast->output.astdp.connector; + int ret; + + ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS); + if (ret) + return ret; + encoder->possible_crtcs = drm_crtc_mask(crtc); + + ret = ast_astdp_connector_init(dev, connector); + if (ret) + return ret; + + ret = drm_connector_attach_encoder(connector, encoder); + if (ret) + return ret; + + return 0; +} + +/* + * Mode config + */ + +static void +ast_mode_config_helper_atomic_commit_tail(struct drm_atomic_state *state) +{ + struct ast_private *ast = to_ast_private(state->dev); + + /* + * Concurrent operations could possibly trigger a call to + * drm_connector_helper_funcs.get_modes by trying to read the + * display modes. Protect access to I/O registers by acquiring + * the I/O-register lock. Released in atomic_flush(). + */ + mutex_lock(&ast->ioregs_lock); + drm_atomic_helper_commit_tail_rpm(state); + mutex_unlock(&ast->ioregs_lock); +} + +static const struct drm_mode_config_helper_funcs ast_mode_config_helper_funcs = { + .atomic_commit_tail = ast_mode_config_helper_atomic_commit_tail, +}; + +static const struct drm_mode_config_funcs ast_mode_config_funcs = { + .fb_create = drm_gem_fb_create, + .mode_valid = drm_vram_helper_mode_valid, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +int ast_mode_config_init(struct ast_private *ast) +{ + struct drm_device *dev = &ast->base; + int ret; + + ret = drmm_mode_config_init(dev); + if (ret) + return ret; + + dev->mode_config.funcs = &ast_mode_config_funcs; + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + dev->mode_config.preferred_depth = 24; + dev->mode_config.prefer_shadow = 1; + + if (ast->chip == AST2100 || ast->chip == AST2200 || + ast->chip == AST2300 || ast->chip == AST2400 || + ast->chip == AST2500 || ast->chip == AST2600) { + dev->mode_config.max_width = 1920; + dev->mode_config.max_height = 2048; + } else { + dev->mode_config.max_width = 1600; + dev->mode_config.max_height = 1200; + } + + dev->mode_config.helper_private = &ast_mode_config_helper_funcs; + + ret = ast_primary_plane_init(ast); + if (ret) + return ret; + + ret = ast_cursor_plane_init(ast); + if (ret) + return ret; + + ast_crtc_init(dev); + + if (ast->tx_chip_types & AST_TX_NONE_BIT) { + ret = ast_vga_output_init(ast); + if (ret) + return ret; + } + if (ast->tx_chip_types & AST_TX_SIL164_BIT) { + ret = ast_sil164_output_init(ast); + if (ret) + return ret; + } + if (ast->tx_chip_types & AST_TX_DP501_BIT) { + ret = ast_dp501_output_init(ast); + if (ret) + return ret; + } + if (ast->tx_chip_types & AST_TX_ASTDP_BIT) { + ret = ast_astdp_output_init(ast); + if (ret) + return ret; + } + + drm_mode_config_reset(dev); + + return 0; +} diff --git a/drivers/gpu/drm/ast_loongson/ast_post.c b/drivers/gpu/drm/ast_loongson/ast_post.c new file mode 100644 index 000000000000..a7a9c37dfeee --- /dev/null +++ b/drivers/gpu/drm/ast_loongson/ast_post.c @@ -0,0 +1,2090 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ +/* + * Authors: Dave Airlie + */ + +#include +#include + +#include + +#include "ast_dram_tables.h" +#include "ast_drv.h" + +static void ast_post_chip_2300(struct drm_device *dev); +static void ast_post_chip_2500(struct drm_device *dev); + +void ast_enable_vga(struct drm_device *dev) +{ + struct ast_private *ast = to_ast_private(dev); + + ast_io_write8(ast, AST_IO_VGA_ENABLE_PORT, 0x01); + ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, 0x01); +} + +void ast_enable_mmio(struct drm_device *dev) +{ + struct ast_private *ast = to_ast_private(dev); + + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06); +} + +bool ast_is_vga_enabled(struct drm_device *dev) +{ + struct ast_private *ast = to_ast_private(dev); + u8 ch; + + ch = ast_io_read8(ast, AST_IO_VGA_ENABLE_PORT); + + return !!(ch & 0x01); +} + +static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff }; +static const u8 extreginfo_ast2300a0[] = { 0x0f, 0x04, 0x1c, 0xff }; +static const u8 extreginfo_ast2300[] = { 0x0f, 0x04, 0x1f, 0xff }; + +static void ast_set_def_ext_reg(struct drm_device *dev) +{ + struct ast_private *ast = to_ast_private(dev); + struct pci_dev *pdev = to_pci_dev(dev->dev); + u8 i, index, reg; + const u8 *ext_reg_info; + + /* reset scratch */ + for (i = 0x81; i <= 0x9f; i++) + ast_set_index_reg(ast, AST_IO_CRTC_PORT, i, 0x00); + + if (ast->chip == AST2300 || ast->chip == AST2400 || + ast->chip == AST2500) { + if (pdev->revision >= 0x20) + ext_reg_info = extreginfo_ast2300; + else + ext_reg_info = extreginfo_ast2300a0; + } else + ext_reg_info = extreginfo; + + index = 0xa0; + while (*ext_reg_info != 0xff) { + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, index, 0x00, + *ext_reg_info); + index++; + ext_reg_info++; + } + + /* disable standard IO/MEM decode if secondary */ + /* ast_set_index_reg-mask(ast, AST_IO_CRTC_PORT, 0xa1, 0xff, 0x3); */ + + /* Set Ext. Default */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x8c, 0x00, 0x01); + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x00, 0x00); + + /* Enable RAMDAC for A1 */ + reg = 0x04; + if (ast->chip == AST2300 || ast->chip == AST2400 || + ast->chip == AST2500) + reg |= 0x20; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff, reg); +} + +u32 ast_mindwm(struct ast_private *ast, u32 r) +{ + uint32_t data; + + ast_write32(ast, 0xf004, r & 0xffff0000); + ast_write32(ast, 0xf000, 0x1); + + do { + data = ast_read32(ast, 0xf004) & 0xffff0000; + } while (data != (r & 0xffff0000)); + return ast_read32(ast, 0x10000 + (r & 0x0000ffff)); +} + +void ast_moutdwm(struct ast_private *ast, u32 r, u32 v) +{ + uint32_t data; + + ast_write32(ast, 0xf004, r & 0xffff0000); + ast_write32(ast, 0xf000, 0x1); + do { + data = ast_read32(ast, 0xf004) & 0xffff0000; + } while (data != (r & 0xffff0000)); + ast_write32(ast, 0x10000 + (r & 0x0000ffff), v); +} + +/* + * AST2100/2150 DLL CBR Setting + */ +#define CBR_SIZE_AST2150 ((16 << 10) - 1) +#define CBR_PASSNUM_AST2150 5 +#define CBR_THRESHOLD_AST2150 10 +#define CBR_THRESHOLD2_AST2150 10 +#define TIMEOUT_AST2150 5000000 + +#define CBR_PATNUM_AST2150 8 + +static const u32 pattern_AST2150[14] = { 0xFF00FF00, 0xCC33CC33, 0xAA55AA55, + 0xFFFE0001, 0x683501FE, 0x0F1929B0, + 0x2D0B4346, 0x60767F02, 0x6FBE36A6, + 0x3A253035, 0x3019686D, 0x41C6167E, + 0x620152BF, 0x20F050E0 }; + +static u32 mmctestburst2_ast2150(struct ast_private *ast, u32 datagen) +{ + u32 data, timeout; + + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + ast_moutdwm(ast, 0x1e6e0070, 0x00000001 | (datagen << 3)); + timeout = 0; + do { + data = ast_mindwm(ast, 0x1e6e0070) & 0x40; + if (++timeout > TIMEOUT_AST2150) { + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + return 0xffffffff; + } + } while (!data); + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + ast_moutdwm(ast, 0x1e6e0070, 0x00000003 | (datagen << 3)); + timeout = 0; + do { + data = ast_mindwm(ast, 0x1e6e0070) & 0x40; + if (++timeout > TIMEOUT_AST2150) { + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + return 0xffffffff; + } + } while (!data); + data = (ast_mindwm(ast, 0x1e6e0070) & 0x80) >> 7; + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + return data; +} + +static int cbrtest_ast2150(struct ast_private *ast) +{ + int i; + + for (i = 0; i < 8; i++) + if (mmctestburst2_ast2150(ast, i)) + return 0; + return 1; +} + +static int cbrscan_ast2150(struct ast_private *ast, int busw) +{ + u32 patcnt, loop; + + for (patcnt = 0; patcnt < CBR_PATNUM_AST2150; patcnt++) { + ast_moutdwm(ast, 0x1e6e007c, pattern_AST2150[patcnt]); + for (loop = 0; loop < CBR_PASSNUM_AST2150; loop++) { + if (cbrtest_ast2150(ast)) + break; + } + if (loop == CBR_PASSNUM_AST2150) + return 0; + } + return 1; +} + +static void cbrdlli_ast2150(struct ast_private *ast, int busw) +{ + u32 dll_min[4], dll_max[4], dlli, data, passcnt; + +cbr_start: + dll_min[0] = dll_min[1] = dll_min[2] = dll_min[3] = 0xff; + dll_max[0] = dll_max[1] = dll_max[2] = dll_max[3] = 0x0; + passcnt = 0; + + for (dlli = 0; dlli < 100; dlli++) { + ast_moutdwm(ast, 0x1e6e0068, + dlli | (dlli << 8) | (dlli << 16) | (dlli << 24)); + data = cbrscan_ast2150(ast, busw); + if (data != 0) { + if (data & 0x1) { + if (dll_min[0] > dlli) + dll_min[0] = dlli; + if (dll_max[0] < dlli) + dll_max[0] = dlli; + } + passcnt++; + } else if (passcnt >= CBR_THRESHOLD_AST2150) + goto cbr_start; + } + if (dll_max[0] == 0 || + (dll_max[0] - dll_min[0]) < CBR_THRESHOLD_AST2150) + goto cbr_start; + + dlli = dll_min[0] + (((dll_max[0] - dll_min[0]) * 7) >> 4); + ast_moutdwm(ast, 0x1e6e0068, + dlli | (dlli << 8) | (dlli << 16) | (dlli << 24)); +} + +static void ast_init_dram_reg(struct drm_device *dev) +{ + struct ast_private *ast = to_ast_private(dev); + u8 j; + u32 data, temp, i; + const struct ast_dramstruct *dram_reg_info; + + j = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + + if ((j & 0x80) == 0) { /* VGA only */ + if (ast->chip == AST2000) { + dram_reg_info = ast2000_dram_table_data; + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + ast_write32(ast, 0x10100, 0xa8); + + do { + ; + } while (ast_read32(ast, 0x10100) != 0xa8); + } else { /* AST2100/1100 */ + if (ast->chip == AST2100 || ast->chip == 2200) + dram_reg_info = ast2100_dram_table_data; + else + dram_reg_info = ast1100_dram_table_data; + + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + ast_write32(ast, 0x12000, 0x1688A8A8); + do { + ; + } while (ast_read32(ast, 0x12000) != 0x01); + + ast_write32(ast, 0x10000, 0xfc600309); + do { + ; + } while (ast_read32(ast, 0x10000) != 0x01); + } + + while (dram_reg_info->index != 0xffff) { + if (dram_reg_info->index == 0xff00) { /* delay fn */ + for (i = 0; i < 15; i++) + udelay(dram_reg_info->data); + } else if (dram_reg_info->index == 0x4 && + ast->chip != AST2000) { + data = dram_reg_info->data; + if (ast->dram_type == AST_DRAM_1Gx16) + data = 0x00000d89; + else if (ast->dram_type == AST_DRAM_1Gx32) + data = 0x00000c8d; + + temp = ast_read32(ast, 0x12070); + temp &= 0xc; + temp <<= 2; + ast_write32(ast, 0x10000 + dram_reg_info->index, + data | temp); + } else + ast_write32(ast, 0x10000 + dram_reg_info->index, + dram_reg_info->data); + dram_reg_info++; + } + + /* AST 2100/2150 DRAM calibration */ + data = ast_read32(ast, 0x10120); + if (data == 0x5061) { /* 266Mhz */ + data = ast_read32(ast, 0x10004); + if (data & 0x40) + cbrdlli_ast2150(ast, 16); /* 16 bits */ + else + cbrdlli_ast2150(ast, 32); /* 32 bits */ + } + + switch (ast->chip) { + case AST2000: + temp = ast_read32(ast, 0x10140); + ast_write32(ast, 0x10140, temp | 0x40); + break; + case AST1100: + case AST2100: + case AST2200: + case AST2150: + temp = ast_read32(ast, 0x1200c); + ast_write32(ast, 0x1200c, temp & 0xfffffffd); + temp = ast_read32(ast, 0x12040); + ast_write32(ast, 0x12040, temp | 0x40); + break; + default: + break; + } + } + + /* wait ready */ + do { + j = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + } while ((j & 0x40) == 0); +} + +void ast_post_gpu(struct drm_device *dev) +{ + struct ast_private *ast = to_ast_private(dev); + struct pci_dev *pdev = to_pci_dev(dev->dev); + u32 reg; + + pci_read_config_dword(pdev, 0x04, ®); + reg |= 0x3; + pci_write_config_dword(pdev, 0x04, reg); + + ast_enable_vga(dev); + ast_open_key(ast); + ast_enable_mmio(dev); + ast_set_def_ext_reg(dev); + + if (ast->chip == AST2600) { + ast_dp_launch(dev, 1); + } else if (ast->config_mode == ast_use_p2a) { + if (ast->chip == AST2500) + ast_post_chip_2500(dev); + else if (ast->chip == AST2300 || ast->chip == AST2400) + ast_post_chip_2300(dev); + else + ast_init_dram_reg(dev); + + ast_init_3rdtx(dev); + } else { + if (ast->tx_chip_types & AST_TX_SIL164_BIT) + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, + 0xcf, 0x80); /* Enable DVO */ + } +} + +/* AST 2300 DRAM settings */ +#define AST_DDR3 0 +#define AST_DDR2 1 + +struct ast2300_dram_param { + u32 dram_type; + u32 dram_chipid; + u32 dram_freq; + u32 vram_size; + u32 odt; + u32 wodt; + u32 rodt; + u32 dram_config; + u32 reg_PERIOD; + u32 reg_MADJ; + u32 reg_SADJ; + u32 reg_MRS; + u32 reg_EMRS; + u32 reg_AC1; + u32 reg_AC2; + u32 reg_DQSIC; + u32 reg_DRV; + u32 reg_IOZ; + u32 reg_DQIDLY; + u32 reg_FREQ; + u32 madj_max; + u32 dll2_finetune_step; +}; + +/* + * DQSI DLL CBR Setting + */ +#define CBR_SIZE0 ((1 << 10) - 1) +#define CBR_SIZE1 ((4 << 10) - 1) +#define CBR_SIZE2 ((64 << 10) - 1) +#define CBR_PASSNUM 5 +#define CBR_PASSNUM2 5 +#define CBR_THRESHOLD 10 +#define CBR_THRESHOLD2 10 +#define TIMEOUT 5000000 +#define CBR_PATNUM 8 + +static const u32 pattern[8] = { 0xFF00FF00, 0xCC33CC33, 0xAA55AA55, 0x88778877, + 0x92CC4D6E, 0x543D3CDE, 0xF1E843C7, 0x7C61D253 }; + +static bool mmc_test(struct ast_private *ast, u32 datagen, u8 test_ctl) +{ + u32 data, timeout; + + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + ast_moutdwm(ast, 0x1e6e0070, (datagen << 3) | test_ctl); + timeout = 0; + do { + data = ast_mindwm(ast, 0x1e6e0070) & 0x3000; + if (data & 0x2000) + return false; + if (++timeout > TIMEOUT) { + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + return false; + } + } while (!data); + ast_moutdwm(ast, 0x1e6e0070, 0x0); + return true; +} + +static u32 mmc_test2(struct ast_private *ast, u32 datagen, u8 test_ctl) +{ + u32 data, timeout; + + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + ast_moutdwm(ast, 0x1e6e0070, (datagen << 3) | test_ctl); + timeout = 0; + do { + data = ast_mindwm(ast, 0x1e6e0070) & 0x1000; + if (++timeout > TIMEOUT) { + ast_moutdwm(ast, 0x1e6e0070, 0x0); + return 0xffffffff; + } + } while (!data); + data = ast_mindwm(ast, 0x1e6e0078); + data = (data | (data >> 16)) & 0xffff; + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + return data; +} + +static bool mmc_test_burst(struct ast_private *ast, u32 datagen) +{ + return mmc_test(ast, datagen, 0xc1); +} + +static u32 mmc_test_burst2(struct ast_private *ast, u32 datagen) +{ + return mmc_test2(ast, datagen, 0x41); +} + +static bool mmc_test_single(struct ast_private *ast, u32 datagen) +{ + return mmc_test(ast, datagen, 0xc5); +} + +static u32 mmc_test_single2(struct ast_private *ast, u32 datagen) +{ + return mmc_test2(ast, datagen, 0x05); +} + +static bool mmc_test_single_2500(struct ast_private *ast, u32 datagen) +{ + return mmc_test(ast, datagen, 0x85); +} + +static int cbr_test(struct ast_private *ast) +{ + u32 data; + int i; + + data = mmc_test_single2(ast, 0); + if ((data & 0xff) && (data & 0xff00)) + return 0; + for (i = 0; i < 8; i++) { + data = mmc_test_burst2(ast, i); + if ((data & 0xff) && (data & 0xff00)) + return 0; + } + if (!data) + return 3; + else if (data & 0xff) + return 2; + return 1; +} + +static int cbr_scan(struct ast_private *ast) +{ + u32 data, data2, patcnt, loop; + + data2 = 3; + for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) { + ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]); + for (loop = 0; loop < CBR_PASSNUM2; loop++) { + data = cbr_test(ast); + if (data != 0) { + data2 &= data; + if (!data2) + return 0; + break; + } + } + if (loop == CBR_PASSNUM2) + return 0; + } + return data2; +} + +static u32 cbr_test2(struct ast_private *ast) +{ + u32 data; + + data = mmc_test_burst2(ast, 0); + if (data == 0xffff) + return 0; + data |= mmc_test_single2(ast, 0); + if (data == 0xffff) + return 0; + + return ~data & 0xffff; +} + +static u32 cbr_scan2(struct ast_private *ast) +{ + u32 data, data2, patcnt, loop; + + data2 = 0xffff; + for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) { + ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]); + for (loop = 0; loop < CBR_PASSNUM2; loop++) { + data = cbr_test2(ast); + if (data != 0) { + data2 &= data; + if (!data2) + return 0; + break; + } + } + if (loop == CBR_PASSNUM2) + return 0; + } + return data2; +} + +static bool cbr_test3(struct ast_private *ast) +{ + if (!mmc_test_burst(ast, 0)) + return false; + if (!mmc_test_single(ast, 0)) + return false; + return true; +} + +static bool cbr_scan3(struct ast_private *ast) +{ + u32 patcnt, loop; + + for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) { + ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]); + for (loop = 0; loop < 2; loop++) { + if (cbr_test3(ast)) + break; + } + if (loop == 2) + return false; + } + return true; +} + +static bool finetuneDQI_L(struct ast_private *ast, + struct ast2300_dram_param *param) +{ + u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, + passcnt, retry = 0; + bool status = false; +FINETUNE_START: + for (cnt = 0; cnt < 16; cnt++) { + dllmin[cnt] = 0xff; + dllmax[cnt] = 0x0; + } + passcnt = 0; + for (dlli = 0; dlli < 76; dlli++) { + ast_moutdwm(ast, 0x1E6E0068, + 0x00001400 | (dlli << 16) | (dlli << 24)); + ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE1); + data = cbr_scan2(ast); + if (data != 0) { + mask = 0x00010001; + for (cnt = 0; cnt < 16; cnt++) { + if (data & mask) { + if (dllmin[cnt] > dlli) + dllmin[cnt] = dlli; + if (dllmax[cnt] < dlli) + dllmax[cnt] = dlli; + } + mask <<= 1; + } + passcnt++; + } else if (passcnt >= CBR_THRESHOLD2) { + break; + } + } + gold_sadj[0] = 0x0; + passcnt = 0; + for (cnt = 0; cnt < 16; cnt++) { + if ((dllmax[cnt] > dllmin[cnt]) && + ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) { + gold_sadj[0] += dllmin[cnt]; + passcnt++; + } + } + if (retry++ > 10) + goto FINETUNE_DONE; + if (passcnt != 16) + goto FINETUNE_START; + status = true; +FINETUNE_DONE: + gold_sadj[0] = gold_sadj[0] >> 4; + gold_sadj[1] = gold_sadj[0]; + + data = 0; + for (cnt = 0; cnt < 8; cnt++) { + data >>= 3; + if ((dllmax[cnt] > dllmin[cnt]) && + ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) { + dlli = dllmin[cnt]; + if (gold_sadj[0] >= dlli) { + dlli = ((gold_sadj[0] - dlli) * 19) >> 5; + if (dlli > 3) + dlli = 3; + } else { + dlli = ((dlli - gold_sadj[0]) * 19) >> 5; + if (dlli > 4) + dlli = 4; + dlli = (8 - dlli) & 0x7; + } + data |= dlli << 21; + } + } + ast_moutdwm(ast, 0x1E6E0080, data); + + data = 0; + for (cnt = 8; cnt < 16; cnt++) { + data >>= 3; + if ((dllmax[cnt] > dllmin[cnt]) && + ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) { + dlli = dllmin[cnt]; + if (gold_sadj[1] >= dlli) { + dlli = ((gold_sadj[1] - dlli) * 19) >> 5; + if (dlli > 3) + dlli = 3; + else + dlli = (dlli - 1) & 0x7; + } else { + dlli = ((dlli - gold_sadj[1]) * 19) >> 5; + dlli += 1; + if (dlli > 4) + dlli = 4; + dlli = (8 - dlli) & 0x7; + } + data |= dlli << 21; + } + } + ast_moutdwm(ast, 0x1E6E0084, data); + return status; +} /* finetuneDQI_L */ + +static void finetuneDQSI(struct ast_private *ast) +{ + u32 dlli, dqsip, dqidly; + u32 reg_mcr18, reg_mcr0c, passcnt[2], diff; + u32 g_dqidly, g_dqsip, g_margin, g_side; + u16 pass[32][2][2]; + char tag[2][76]; + + /* Disable DQI CBR */ + reg_mcr0c = ast_mindwm(ast, 0x1E6E000C); + reg_mcr18 = ast_mindwm(ast, 0x1E6E0018); + reg_mcr18 &= 0x0000ffff; + ast_moutdwm(ast, 0x1E6E0018, reg_mcr18); + + for (dlli = 0; dlli < 76; dlli++) { + tag[0][dlli] = 0x0; + tag[1][dlli] = 0x0; + } + for (dqidly = 0; dqidly < 32; dqidly++) { + pass[dqidly][0][0] = 0xff; + pass[dqidly][0][1] = 0x0; + pass[dqidly][1][0] = 0xff; + pass[dqidly][1][1] = 0x0; + } + for (dqidly = 0; dqidly < 32; dqidly++) { + passcnt[0] = passcnt[1] = 0; + for (dqsip = 0; dqsip < 2; dqsip++) { + ast_moutdwm(ast, 0x1E6E000C, 0); + ast_moutdwm(ast, 0x1E6E0018, + reg_mcr18 | (dqidly << 16) | (dqsip << 23)); + ast_moutdwm(ast, 0x1E6E000C, reg_mcr0c); + for (dlli = 0; dlli < 76; dlli++) { + ast_moutdwm(ast, 0x1E6E0068, + 0x00001300 | (dlli << 16) | + (dlli << 24)); + ast_moutdwm(ast, 0x1E6E0070, 0); + ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE0); + if (cbr_scan3(ast)) { + if (dlli == 0) + break; + passcnt[dqsip]++; + tag[dqsip][dlli] = 'P'; + if (dlli < pass[dqidly][dqsip][0]) + pass[dqidly][dqsip][0] = + (u16)dlli; + if (dlli > pass[dqidly][dqsip][1]) + pass[dqidly][dqsip][1] = + (u16)dlli; + } + if (passcnt[dqsip] >= 5) + break; + if (!cbr_scan3(ast)) { + pass[dqidly][dqsip][0] = 0xff; + pass[dqidly][dqsip][1] = 0x0; + } + } + } + if (passcnt[0] == 0 && passcnt[1] == 0) + dqidly++; + } + /* Search margin */ + g_dqidly = g_dqsip = g_margin = g_side = 0; + + for (dqidly = 0; dqidly < 32; dqidly++) { + for (dqsip = 0; dqsip < 2; dqsip++) { + if (pass[dqidly][dqsip][0] > pass[dqidly][dqsip][1]) + continue; + diff = pass[dqidly][dqsip][1] - pass[dqidly][dqsip][0]; + if ((diff + 2) < g_margin) + continue; + passcnt[0] = passcnt[1] = 0; + for (dlli = pass[dqidly][dqsip][0]; + dlli > 0 && tag[dqsip][dlli] != 0; + dlli--, passcnt[0]++) + ; + for (dlli = pass[dqidly][dqsip][1]; + dlli < 76 && tag[dqsip][dlli] != 0; + dlli++, passcnt[1]++) + ; + if (passcnt[0] > passcnt[1]) + passcnt[0] = passcnt[1]; + passcnt[1] = 0; + if (passcnt[0] > g_side) + passcnt[1] = passcnt[0] - g_side; + if (diff > (g_margin + 1) && + (passcnt[1] > 0 || passcnt[0] > 8)) { + g_margin = diff; + g_dqidly = dqidly; + g_dqsip = dqsip; + g_side = passcnt[0]; + } else if (passcnt[1] > 1 && g_side < 8) { + if (diff > g_margin) + g_margin = diff; + g_dqidly = dqidly; + g_dqsip = dqsip; + g_side = passcnt[0]; + } + } + } + reg_mcr18 = reg_mcr18 | (g_dqidly << 16) | (g_dqsip << 23); + ast_moutdwm(ast, 0x1E6E0018, reg_mcr18); +} +static bool cbr_dll2(struct ast_private *ast, struct ast2300_dram_param *param) +{ + u32 dllmin[2], dllmax[2], dlli, data, passcnt, retry = 0; + bool status = false; + + finetuneDQSI(ast); + if (finetuneDQI_L(ast, param) == false) + return status; + +CBR_START2: + dllmin[0] = dllmin[1] = 0xff; + dllmax[0] = dllmax[1] = 0x0; + passcnt = 0; + for (dlli = 0; dlli < 76; dlli++) { + ast_moutdwm(ast, 0x1E6E0068, + 0x00001300 | (dlli << 16) | (dlli << 24)); + ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE2); + data = cbr_scan(ast); + if (data != 0) { + if (data & 0x1) { + if (dllmin[0] > dlli) + dllmin[0] = dlli; + if (dllmax[0] < dlli) + dllmax[0] = dlli; + } + if (data & 0x2) { + if (dllmin[1] > dlli) + dllmin[1] = dlli; + if (dllmax[1] < dlli) + dllmax[1] = dlli; + } + passcnt++; + } else if (passcnt >= CBR_THRESHOLD) { + break; + } + } + if (retry++ > 10) + goto CBR_DONE2; + if (dllmax[0] == 0 || (dllmax[0] - dllmin[0]) < CBR_THRESHOLD) + goto CBR_START2; + if (dllmax[1] == 0 || (dllmax[1] - dllmin[1]) < CBR_THRESHOLD) + goto CBR_START2; + status = true; +CBR_DONE2: + dlli = (dllmin[1] + dllmax[1]) >> 1; + dlli <<= 8; + dlli += (dllmin[0] + dllmax[0]) >> 1; + ast_moutdwm(ast, 0x1E6E0068, + ast_mindwm(ast, 0x1E720058) | (dlli << 16)); + return status; +} /* CBRDLL2 */ + +static void get_ddr3_info(struct ast_private *ast, + struct ast2300_dram_param *param) +{ + u32 trap, trap_AC2, trap_MRS; + + ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8); + + /* Ger trap info */ + trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3; + trap_AC2 = 0x00020000 + (trap << 16); + trap_AC2 |= 0x00300000 + ((trap & 0x2) << 19); + trap_MRS = 0x00000010 + (trap << 4); + trap_MRS |= ((trap & 0x2) << 18); + + param->reg_MADJ = 0x00034C4C; + param->reg_SADJ = 0x00001800; + param->reg_DRV = 0x000000F0; + param->reg_PERIOD = param->dram_freq; + param->rodt = 0; + + switch (param->dram_freq) { + case 336: + ast_moutdwm(ast, 0x1E6E2020, 0x0190); + param->wodt = 0; + param->reg_AC1 = 0x22202725; + param->reg_AC2 = 0xAA007613 | trap_AC2; + param->reg_DQSIC = 0x000000BA; + param->reg_MRS = 0x04001400 | trap_MRS; + param->reg_EMRS = 0x00000000; + param->reg_IOZ = 0x00000023; + param->reg_DQIDLY = 0x00000074; + param->reg_FREQ = 0x00004DC0; + param->madj_max = 96; + param->dll2_finetune_step = 3; + switch (param->dram_chipid) { + default: + case AST_DRAM_512Mx16: + case AST_DRAM_1Gx16: + param->reg_AC2 = 0xAA007613 | trap_AC2; + break; + case AST_DRAM_2Gx16: + param->reg_AC2 = 0xAA00761C | trap_AC2; + break; + case AST_DRAM_4Gx16: + param->reg_AC2 = 0xAA007636 | trap_AC2; + break; + } + break; + default: + case 396: + ast_moutdwm(ast, 0x1E6E2020, 0x03F1); + param->wodt = 1; + param->reg_AC1 = 0x33302825; + param->reg_AC2 = 0xCC009617 | trap_AC2; + param->reg_DQSIC = 0x000000E2; + param->reg_MRS = 0x04001600 | trap_MRS; + param->reg_EMRS = 0x00000000; + param->reg_IOZ = 0x00000034; + param->reg_DRV = 0x000000FA; + param->reg_DQIDLY = 0x00000089; + param->reg_FREQ = 0x00005040; + param->madj_max = 96; + param->dll2_finetune_step = 4; + + switch (param->dram_chipid) { + default: + case AST_DRAM_512Mx16: + case AST_DRAM_1Gx16: + param->reg_AC2 = 0xCC009617 | trap_AC2; + break; + case AST_DRAM_2Gx16: + param->reg_AC2 = 0xCC009622 | trap_AC2; + break; + case AST_DRAM_4Gx16: + param->reg_AC2 = 0xCC00963F | trap_AC2; + break; + } + break; + + case 408: + ast_moutdwm(ast, 0x1E6E2020, 0x01F0); + param->wodt = 1; + param->reg_AC1 = 0x33302825; + param->reg_AC2 = 0xCC009617 | trap_AC2; + param->reg_DQSIC = 0x000000E2; + param->reg_MRS = 0x04001600 | trap_MRS; + param->reg_EMRS = 0x00000000; + param->reg_IOZ = 0x00000023; + param->reg_DRV = 0x000000FA; + param->reg_DQIDLY = 0x00000089; + param->reg_FREQ = 0x000050C0; + param->madj_max = 96; + param->dll2_finetune_step = 4; + + switch (param->dram_chipid) { + default: + case AST_DRAM_512Mx16: + case AST_DRAM_1Gx16: + param->reg_AC2 = 0xCC009617 | trap_AC2; + break; + case AST_DRAM_2Gx16: + param->reg_AC2 = 0xCC009622 | trap_AC2; + break; + case AST_DRAM_4Gx16: + param->reg_AC2 = 0xCC00963F | trap_AC2; + break; + } + + break; + case 456: + ast_moutdwm(ast, 0x1E6E2020, 0x0230); + param->wodt = 0; + param->reg_AC1 = 0x33302926; + param->reg_AC2 = 0xCD44961A; + param->reg_DQSIC = 0x000000FC; + param->reg_MRS = 0x00081830; + param->reg_EMRS = 0x00000000; + param->reg_IOZ = 0x00000045; + param->reg_DQIDLY = 0x00000097; + param->reg_FREQ = 0x000052C0; + param->madj_max = 88; + param->dll2_finetune_step = 4; + break; + case 504: + ast_moutdwm(ast, 0x1E6E2020, 0x0270); + param->wodt = 1; + param->reg_AC1 = 0x33302926; + param->reg_AC2 = 0xDE44A61D; + param->reg_DQSIC = 0x00000117; + param->reg_MRS = 0x00081A30; + param->reg_EMRS = 0x00000000; + param->reg_IOZ = 0x070000BB; + param->reg_DQIDLY = 0x000000A0; + param->reg_FREQ = 0x000054C0; + param->madj_max = 79; + param->dll2_finetune_step = 4; + break; + case 528: + ast_moutdwm(ast, 0x1E6E2020, 0x0290); + param->wodt = 1; + param->rodt = 1; + param->reg_AC1 = 0x33302926; + param->reg_AC2 = 0xEF44B61E; + param->reg_DQSIC = 0x00000125; + param->reg_MRS = 0x00081A30; + param->reg_EMRS = 0x00000040; + param->reg_DRV = 0x000000F5; + param->reg_IOZ = 0x00000023; + param->reg_DQIDLY = 0x00000088; + param->reg_FREQ = 0x000055C0; + param->madj_max = 76; + param->dll2_finetune_step = 3; + break; + case 576: + ast_moutdwm(ast, 0x1E6E2020, 0x0140); + param->reg_MADJ = 0x00136868; + param->reg_SADJ = 0x00004534; + param->wodt = 1; + param->rodt = 1; + param->reg_AC1 = 0x33302A37; + param->reg_AC2 = 0xEF56B61E; + param->reg_DQSIC = 0x0000013F; + param->reg_MRS = 0x00101A50; + param->reg_EMRS = 0x00000040; + param->reg_DRV = 0x000000FA; + param->reg_IOZ = 0x00000023; + param->reg_DQIDLY = 0x00000078; + param->reg_FREQ = 0x000057C0; + param->madj_max = 136; + param->dll2_finetune_step = 3; + break; + case 600: + ast_moutdwm(ast, 0x1E6E2020, 0x02E1); + param->reg_MADJ = 0x00136868; + param->reg_SADJ = 0x00004534; + param->wodt = 1; + param->rodt = 1; + param->reg_AC1 = 0x32302A37; + param->reg_AC2 = 0xDF56B61F; + param->reg_DQSIC = 0x0000014D; + param->reg_MRS = 0x00101A50; + param->reg_EMRS = 0x00000004; + param->reg_DRV = 0x000000F5; + param->reg_IOZ = 0x00000023; + param->reg_DQIDLY = 0x00000078; + param->reg_FREQ = 0x000058C0; + param->madj_max = 132; + param->dll2_finetune_step = 3; + break; + case 624: + ast_moutdwm(ast, 0x1E6E2020, 0x0160); + param->reg_MADJ = 0x00136868; + param->reg_SADJ = 0x00004534; + param->wodt = 1; + param->rodt = 1; + param->reg_AC1 = 0x32302A37; + param->reg_AC2 = 0xEF56B621; + param->reg_DQSIC = 0x0000015A; + param->reg_MRS = 0x02101A50; + param->reg_EMRS = 0x00000004; + param->reg_DRV = 0x000000F5; + param->reg_IOZ = 0x00000034; + param->reg_DQIDLY = 0x00000078; + param->reg_FREQ = 0x000059C0; + param->madj_max = 128; + param->dll2_finetune_step = 3; + break; + } /* switch freq */ + + switch (param->dram_chipid) { + case AST_DRAM_512Mx16: + param->dram_config = 0x130; + break; + default: + case AST_DRAM_1Gx16: + param->dram_config = 0x131; + break; + case AST_DRAM_2Gx16: + param->dram_config = 0x132; + break; + case AST_DRAM_4Gx16: + param->dram_config = 0x133; + break; + } /* switch size */ + + switch (param->vram_size) { + default: + case AST_VIDMEM_SIZE_8M: + param->dram_config |= 0x00; + break; + case AST_VIDMEM_SIZE_16M: + param->dram_config |= 0x04; + break; + case AST_VIDMEM_SIZE_32M: + param->dram_config |= 0x08; + break; + case AST_VIDMEM_SIZE_64M: + param->dram_config |= 0x0c; + break; + } +} + +static void ddr3_init(struct ast_private *ast, struct ast2300_dram_param *param) +{ + u32 data, data2, retry = 0; + +ddr3_init_start: + ast_moutdwm(ast, 0x1E6E0000, 0xFC600309); + ast_moutdwm(ast, 0x1E6E0018, 0x00000100); + ast_moutdwm(ast, 0x1E6E0024, 0x00000000); + ast_moutdwm(ast, 0x1E6E0034, 0x00000000); + udelay(10); + ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ); + ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ); + udelay(10); + ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000); + udelay(10); + + ast_moutdwm(ast, 0x1E6E0004, param->dram_config); + ast_moutdwm(ast, 0x1E6E0008, 0x90040f); + ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1); + ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2); + ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC); + ast_moutdwm(ast, 0x1E6E0080, 0x00000000); + ast_moutdwm(ast, 0x1E6E0084, 0x00000000); + ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY); + ast_moutdwm(ast, 0x1E6E0018, 0x4000A170); + ast_moutdwm(ast, 0x1E6E0018, 0x00002370); + ast_moutdwm(ast, 0x1E6E0038, 0x00000000); + ast_moutdwm(ast, 0x1E6E0040, 0xFF444444); + ast_moutdwm(ast, 0x1E6E0044, 0x22222222); + ast_moutdwm(ast, 0x1E6E0048, 0x22222222); + ast_moutdwm(ast, 0x1E6E004C, 0x00000002); + ast_moutdwm(ast, 0x1E6E0050, 0x80000000); + ast_moutdwm(ast, 0x1E6E0050, 0x00000000); + ast_moutdwm(ast, 0x1E6E0054, 0); + ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV); + ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ); + ast_moutdwm(ast, 0x1E6E0070, 0x00000000); + ast_moutdwm(ast, 0x1E6E0074, 0x00000000); + ast_moutdwm(ast, 0x1E6E0078, 0x00000000); + ast_moutdwm(ast, 0x1E6E007C, 0x00000000); + /* Wait MCLK2X lock to MCLK */ + do { + data = ast_mindwm(ast, 0x1E6E001C); + } while (!(data & 0x08000000)); + data = ast_mindwm(ast, 0x1E6E001C); + data = (data >> 8) & 0xff; + while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) { + data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4; + if ((data2 & 0xff) > param->madj_max) + break; + ast_moutdwm(ast, 0x1E6E0064, data2); + if (data2 & 0x00100000) + data2 = ((data2 & 0xff) >> 3) + 3; + else + data2 = ((data2 & 0xff) >> 2) + 5; + data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff; + data2 += data & 0xff; + data = data | (data2 << 8); + ast_moutdwm(ast, 0x1E6E0068, data); + udelay(10); + ast_moutdwm(ast, 0x1E6E0064, + ast_mindwm(ast, 0x1E6E0064) | 0xC0000); + udelay(10); + data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff; + ast_moutdwm(ast, 0x1E6E0018, data); + data = data | 0x200; + ast_moutdwm(ast, 0x1E6E0018, data); + do { + data = ast_mindwm(ast, 0x1E6E001C); + } while (!(data & 0x08000000)); + + data = ast_mindwm(ast, 0x1E6E001C); + data = (data >> 8) & 0xff; + } + ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0068) & 0xffff); + data = ast_mindwm(ast, 0x1E6E0018) | 0xC00; + ast_moutdwm(ast, 0x1E6E0018, data); + + ast_moutdwm(ast, 0x1E6E0034, 0x00000001); + ast_moutdwm(ast, 0x1E6E000C, 0x00000040); + udelay(50); + /* Mode Register Setting */ + ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100); + ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS); + ast_moutdwm(ast, 0x1E6E0028, 0x00000005); + ast_moutdwm(ast, 0x1E6E0028, 0x00000007); + ast_moutdwm(ast, 0x1E6E0028, 0x00000003); + ast_moutdwm(ast, 0x1E6E0028, 0x00000001); + ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS); + ast_moutdwm(ast, 0x1E6E000C, 0x00005C08); + ast_moutdwm(ast, 0x1E6E0028, 0x00000001); + + ast_moutdwm(ast, 0x1E6E000C, 0x00005C01); + data = 0; + if (param->wodt) + data = 0x300; + if (param->rodt) + data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3); + ast_moutdwm(ast, 0x1E6E0034, data | 0x3); + + /* Calibrate the DQSI delay */ + if ((cbr_dll2(ast, param) == false) && (retry++ < 10)) + goto ddr3_init_start; + + ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ); + /* ECC Memory Initialization */ +#ifdef ECC + ast_moutdwm(ast, 0x1E6E007C, 0x00000000); + ast_moutdwm(ast, 0x1E6E0070, 0x221); + do { + data = ast_mindwm(ast, 0x1E6E0070); + } while (!(data & 0x00001000)); + ast_moutdwm(ast, 0x1E6E0070, 0x00000000); + ast_moutdwm(ast, 0x1E6E0050, 0x80000000); + ast_moutdwm(ast, 0x1E6E0050, 0x00000000); +#endif +} + +static void get_ddr2_info(struct ast_private *ast, + struct ast2300_dram_param *param) +{ + u32 trap, trap_AC2, trap_MRS; + + ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8); + + /* Ger trap info */ + trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3; + trap_AC2 = (trap << 20) | (trap << 16); + trap_AC2 += 0x00110000; + trap_MRS = 0x00000040 | (trap << 4); + + param->reg_MADJ = 0x00034C4C; + param->reg_SADJ = 0x00001800; + param->reg_DRV = 0x000000F0; + param->reg_PERIOD = param->dram_freq; + param->rodt = 0; + + switch (param->dram_freq) { + case 264: + ast_moutdwm(ast, 0x1E6E2020, 0x0130); + param->wodt = 0; + param->reg_AC1 = 0x11101513; + param->reg_AC2 = 0x78117011; + param->reg_DQSIC = 0x00000092; + param->reg_MRS = 0x00000842; + param->reg_EMRS = 0x00000000; + param->reg_DRV = 0x000000F0; + param->reg_IOZ = 0x00000034; + param->reg_DQIDLY = 0x0000005A; + param->reg_FREQ = 0x00004AC0; + param->madj_max = 138; + param->dll2_finetune_step = 3; + break; + case 336: + ast_moutdwm(ast, 0x1E6E2020, 0x0190); + param->wodt = 1; + param->reg_AC1 = 0x22202613; + param->reg_AC2 = 0xAA009016 | trap_AC2; + param->reg_DQSIC = 0x000000BA; + param->reg_MRS = 0x00000A02 | trap_MRS; + param->reg_EMRS = 0x00000040; + param->reg_DRV = 0x000000FA; + param->reg_IOZ = 0x00000034; + param->reg_DQIDLY = 0x00000074; + param->reg_FREQ = 0x00004DC0; + param->madj_max = 96; + param->dll2_finetune_step = 3; + switch (param->dram_chipid) { + default: + case AST_DRAM_512Mx16: + param->reg_AC2 = 0xAA009012 | trap_AC2; + break; + case AST_DRAM_1Gx16: + param->reg_AC2 = 0xAA009016 | trap_AC2; + break; + case AST_DRAM_2Gx16: + param->reg_AC2 = 0xAA009023 | trap_AC2; + break; + case AST_DRAM_4Gx16: + param->reg_AC2 = 0xAA00903B | trap_AC2; + break; + } + break; + default: + case 396: + ast_moutdwm(ast, 0x1E6E2020, 0x03F1); + param->wodt = 1; + param->rodt = 0; + param->reg_AC1 = 0x33302714; + param->reg_AC2 = 0xCC00B01B | trap_AC2; + param->reg_DQSIC = 0x000000E2; + param->reg_MRS = 0x00000C02 | trap_MRS; + param->reg_EMRS = 0x00000040; + param->reg_DRV = 0x000000FA; + param->reg_IOZ = 0x00000034; + param->reg_DQIDLY = 0x00000089; + param->reg_FREQ = 0x00005040; + param->madj_max = 96; + param->dll2_finetune_step = 4; + + switch (param->dram_chipid) { + case AST_DRAM_512Mx16: + param->reg_AC2 = 0xCC00B016 | trap_AC2; + break; + default: + case AST_DRAM_1Gx16: + param->reg_AC2 = 0xCC00B01B | trap_AC2; + break; + case AST_DRAM_2Gx16: + param->reg_AC2 = 0xCC00B02B | trap_AC2; + break; + case AST_DRAM_4Gx16: + param->reg_AC2 = 0xCC00B03F | trap_AC2; + break; + } + + break; + + case 408: + ast_moutdwm(ast, 0x1E6E2020, 0x01F0); + param->wodt = 1; + param->rodt = 0; + param->reg_AC1 = 0x33302714; + param->reg_AC2 = 0xCC00B01B | trap_AC2; + param->reg_DQSIC = 0x000000E2; + param->reg_MRS = 0x00000C02 | trap_MRS; + param->reg_EMRS = 0x00000040; + param->reg_DRV = 0x000000FA; + param->reg_IOZ = 0x00000034; + param->reg_DQIDLY = 0x00000089; + param->reg_FREQ = 0x000050C0; + param->madj_max = 96; + param->dll2_finetune_step = 4; + + switch (param->dram_chipid) { + case AST_DRAM_512Mx16: + param->reg_AC2 = 0xCC00B016 | trap_AC2; + break; + default: + case AST_DRAM_1Gx16: + param->reg_AC2 = 0xCC00B01B | trap_AC2; + break; + case AST_DRAM_2Gx16: + param->reg_AC2 = 0xCC00B02B | trap_AC2; + break; + case AST_DRAM_4Gx16: + param->reg_AC2 = 0xCC00B03F | trap_AC2; + break; + } + + break; + case 456: + ast_moutdwm(ast, 0x1E6E2020, 0x0230); + param->wodt = 0; + param->reg_AC1 = 0x33302815; + param->reg_AC2 = 0xCD44B01E; + param->reg_DQSIC = 0x000000FC; + param->reg_MRS = 0x00000E72; + param->reg_EMRS = 0x00000000; + param->reg_DRV = 0x00000000; + param->reg_IOZ = 0x00000034; + param->reg_DQIDLY = 0x00000097; + param->reg_FREQ = 0x000052C0; + param->madj_max = 88; + param->dll2_finetune_step = 3; + break; + case 504: + ast_moutdwm(ast, 0x1E6E2020, 0x0261); + param->wodt = 1; + param->rodt = 1; + param->reg_AC1 = 0x33302815; + param->reg_AC2 = 0xDE44C022; + param->reg_DQSIC = 0x00000117; + param->reg_MRS = 0x00000E72; + param->reg_EMRS = 0x00000040; + param->reg_DRV = 0x0000000A; + param->reg_IOZ = 0x00000045; + param->reg_DQIDLY = 0x000000A0; + param->reg_FREQ = 0x000054C0; + param->madj_max = 79; + param->dll2_finetune_step = 3; + break; + case 528: + ast_moutdwm(ast, 0x1E6E2020, 0x0120); + param->wodt = 1; + param->rodt = 1; + param->reg_AC1 = 0x33302815; + param->reg_AC2 = 0xEF44D024; + param->reg_DQSIC = 0x00000125; + param->reg_MRS = 0x00000E72; + param->reg_EMRS = 0x00000004; + param->reg_DRV = 0x000000F9; + param->reg_IOZ = 0x00000045; + param->reg_DQIDLY = 0x000000A7; + param->reg_FREQ = 0x000055C0; + param->madj_max = 76; + param->dll2_finetune_step = 3; + break; + case 552: + ast_moutdwm(ast, 0x1E6E2020, 0x02A1); + param->wodt = 1; + param->rodt = 1; + param->reg_AC1 = 0x43402915; + param->reg_AC2 = 0xFF44E025; + param->reg_DQSIC = 0x00000132; + param->reg_MRS = 0x00000E72; + param->reg_EMRS = 0x00000040; + param->reg_DRV = 0x0000000A; + param->reg_IOZ = 0x00000045; + param->reg_DQIDLY = 0x000000AD; + param->reg_FREQ = 0x000056C0; + param->madj_max = 76; + param->dll2_finetune_step = 3; + break; + case 576: + ast_moutdwm(ast, 0x1E6E2020, 0x0140); + param->wodt = 1; + param->rodt = 1; + param->reg_AC1 = 0x43402915; + param->reg_AC2 = 0xFF44E027; + param->reg_DQSIC = 0x0000013F; + param->reg_MRS = 0x00000E72; + param->reg_EMRS = 0x00000004; + param->reg_DRV = 0x000000F5; + param->reg_IOZ = 0x00000045; + param->reg_DQIDLY = 0x000000B3; + param->reg_FREQ = 0x000057C0; + param->madj_max = 76; + param->dll2_finetune_step = 3; + break; + } + + switch (param->dram_chipid) { + case AST_DRAM_512Mx16: + param->dram_config = 0x100; + break; + default: + case AST_DRAM_1Gx16: + param->dram_config = 0x121; + break; + case AST_DRAM_2Gx16: + param->dram_config = 0x122; + break; + case AST_DRAM_4Gx16: + param->dram_config = 0x123; + break; + } /* switch size */ + + switch (param->vram_size) { + default: + case AST_VIDMEM_SIZE_8M: + param->dram_config |= 0x00; + break; + case AST_VIDMEM_SIZE_16M: + param->dram_config |= 0x04; + break; + case AST_VIDMEM_SIZE_32M: + param->dram_config |= 0x08; + break; + case AST_VIDMEM_SIZE_64M: + param->dram_config |= 0x0c; + break; + } +} + +static void ddr2_init(struct ast_private *ast, struct ast2300_dram_param *param) +{ + u32 data, data2, retry = 0; + +ddr2_init_start: + ast_moutdwm(ast, 0x1E6E0000, 0xFC600309); + ast_moutdwm(ast, 0x1E6E0018, 0x00000100); + ast_moutdwm(ast, 0x1E6E0024, 0x00000000); + ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ); + ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ); + udelay(10); + ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000); + udelay(10); + + ast_moutdwm(ast, 0x1E6E0004, param->dram_config); + ast_moutdwm(ast, 0x1E6E0008, 0x90040f); + ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1); + ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2); + ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC); + ast_moutdwm(ast, 0x1E6E0080, 0x00000000); + ast_moutdwm(ast, 0x1E6E0084, 0x00000000); + ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY); + ast_moutdwm(ast, 0x1E6E0018, 0x4000A130); + ast_moutdwm(ast, 0x1E6E0018, 0x00002330); + ast_moutdwm(ast, 0x1E6E0038, 0x00000000); + ast_moutdwm(ast, 0x1E6E0040, 0xFF808000); + ast_moutdwm(ast, 0x1E6E0044, 0x88848466); + ast_moutdwm(ast, 0x1E6E0048, 0x44440008); + ast_moutdwm(ast, 0x1E6E004C, 0x00000000); + ast_moutdwm(ast, 0x1E6E0050, 0x80000000); + ast_moutdwm(ast, 0x1E6E0050, 0x00000000); + ast_moutdwm(ast, 0x1E6E0054, 0); + ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV); + ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ); + ast_moutdwm(ast, 0x1E6E0070, 0x00000000); + ast_moutdwm(ast, 0x1E6E0074, 0x00000000); + ast_moutdwm(ast, 0x1E6E0078, 0x00000000); + ast_moutdwm(ast, 0x1E6E007C, 0x00000000); + + /* Wait MCLK2X lock to MCLK */ + do { + data = ast_mindwm(ast, 0x1E6E001C); + } while (!(data & 0x08000000)); + data = ast_mindwm(ast, 0x1E6E001C); + data = (data >> 8) & 0xff; + while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) { + data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4; + if ((data2 & 0xff) > param->madj_max) + break; + ast_moutdwm(ast, 0x1E6E0064, data2); + if (data2 & 0x00100000) + data2 = ((data2 & 0xff) >> 3) + 3; + else + data2 = ((data2 & 0xff) >> 2) + 5; + data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff; + data2 += data & 0xff; + data = data | (data2 << 8); + ast_moutdwm(ast, 0x1E6E0068, data); + udelay(10); + ast_moutdwm(ast, 0x1E6E0064, + ast_mindwm(ast, 0x1E6E0064) | 0xC0000); + udelay(10); + data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff; + ast_moutdwm(ast, 0x1E6E0018, data); + data = data | 0x200; + ast_moutdwm(ast, 0x1E6E0018, data); + do { + data = ast_mindwm(ast, 0x1E6E001C); + } while (!(data & 0x08000000)); + + data = ast_mindwm(ast, 0x1E6E001C); + data = (data >> 8) & 0xff; + } + ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0008) & 0xffff); + data = ast_mindwm(ast, 0x1E6E0018) | 0xC00; + ast_moutdwm(ast, 0x1E6E0018, data); + + ast_moutdwm(ast, 0x1E6E0034, 0x00000001); + ast_moutdwm(ast, 0x1E6E000C, 0x00000000); + udelay(50); + /* Mode Register Setting */ + ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100); + ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS); + ast_moutdwm(ast, 0x1E6E0028, 0x00000005); + ast_moutdwm(ast, 0x1E6E0028, 0x00000007); + ast_moutdwm(ast, 0x1E6E0028, 0x00000003); + ast_moutdwm(ast, 0x1E6E0028, 0x00000001); + + ast_moutdwm(ast, 0x1E6E000C, 0x00005C08); + ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS); + ast_moutdwm(ast, 0x1E6E0028, 0x00000001); + ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS | 0x380); + ast_moutdwm(ast, 0x1E6E0028, 0x00000003); + ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS); + ast_moutdwm(ast, 0x1E6E0028, 0x00000003); + + ast_moutdwm(ast, 0x1E6E000C, 0x7FFF5C01); + data = 0; + if (param->wodt) + data = 0x500; + if (param->rodt) + data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3); + ast_moutdwm(ast, 0x1E6E0034, data | 0x3); + ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ); + + /* Calibrate the DQSI delay */ + if ((cbr_dll2(ast, param) == false) && (retry++ < 10)) + goto ddr2_init_start; + + /* ECC Memory Initialization */ +#ifdef ECC + ast_moutdwm(ast, 0x1E6E007C, 0x00000000); + ast_moutdwm(ast, 0x1E6E0070, 0x221); + do { + data = ast_mindwm(ast, 0x1E6E0070); + } while (!(data & 0x00001000)); + ast_moutdwm(ast, 0x1E6E0070, 0x00000000); + ast_moutdwm(ast, 0x1E6E0050, 0x80000000); + ast_moutdwm(ast, 0x1E6E0050, 0x00000000); +#endif +} + +static void ast_post_chip_2300(struct drm_device *dev) +{ + struct ast_private *ast = to_ast_private(dev); + struct ast2300_dram_param param; + u32 temp; + u8 reg; + + reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + if ((reg & 0x80) == 0) { /* vga only */ + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + ast_write32(ast, 0x12000, 0x1688a8a8); + do { + ; + } while (ast_read32(ast, 0x12000) != 0x1); + + ast_write32(ast, 0x10000, 0xfc600309); + do { + ; + } while (ast_read32(ast, 0x10000) != 0x1); + + /* Slow down CPU/AHB CLK in VGA only mode */ + temp = ast_read32(ast, 0x12008); + temp |= 0x73; + ast_write32(ast, 0x12008, temp); + + param.dram_freq = 396; + param.dram_type = AST_DDR3; + temp = ast_mindwm(ast, 0x1e6e2070); + if (temp & 0x01000000) + param.dram_type = AST_DDR2; + switch (temp & 0x18000000) { + case 0: + param.dram_chipid = AST_DRAM_512Mx16; + break; + default: + case 0x08000000: + param.dram_chipid = AST_DRAM_1Gx16; + break; + case 0x10000000: + param.dram_chipid = AST_DRAM_2Gx16; + break; + case 0x18000000: + param.dram_chipid = AST_DRAM_4Gx16; + break; + } + switch (temp & 0x0c) { + default: + case 0x00: + param.vram_size = AST_VIDMEM_SIZE_8M; + break; + + case 0x04: + param.vram_size = AST_VIDMEM_SIZE_16M; + break; + + case 0x08: + param.vram_size = AST_VIDMEM_SIZE_32M; + break; + + case 0x0c: + param.vram_size = AST_VIDMEM_SIZE_64M; + break; + } + + if (param.dram_type == AST_DDR3) { + get_ddr3_info(ast, ¶m); + ddr3_init(ast, ¶m); + } else { + get_ddr2_info(ast, ¶m); + ddr2_init(ast, ¶m); + } + + temp = ast_mindwm(ast, 0x1e6e2040); + ast_moutdwm(ast, 0x1e6e2040, temp | 0x40); + } + + /* wait ready */ + do { + reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + } while ((reg & 0x40) == 0); +} + +static bool cbr_test_2500(struct ast_private *ast) +{ + ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF); + ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00); + if (!mmc_test_burst(ast, 0)) + return false; + if (!mmc_test_single_2500(ast, 0)) + return false; + return true; +} + +static bool ddr_test_2500(struct ast_private *ast) +{ + ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF); + ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00); + if (!mmc_test_burst(ast, 0)) + return false; + if (!mmc_test_burst(ast, 1)) + return false; + if (!mmc_test_burst(ast, 2)) + return false; + if (!mmc_test_burst(ast, 3)) + return false; + if (!mmc_test_single_2500(ast, 0)) + return false; + return true; +} + +static void ddr_init_common_2500(struct ast_private *ast) +{ + ast_moutdwm(ast, 0x1E6E0034, 0x00020080); + ast_moutdwm(ast, 0x1E6E0008, 0x2003000F); + ast_moutdwm(ast, 0x1E6E0038, 0x00000FFF); + ast_moutdwm(ast, 0x1E6E0040, 0x88448844); + ast_moutdwm(ast, 0x1E6E0044, 0x24422288); + ast_moutdwm(ast, 0x1E6E0048, 0x22222222); + ast_moutdwm(ast, 0x1E6E004C, 0x22222222); + ast_moutdwm(ast, 0x1E6E0050, 0x80000000); + ast_moutdwm(ast, 0x1E6E0208, 0x00000000); + ast_moutdwm(ast, 0x1E6E0218, 0x00000000); + ast_moutdwm(ast, 0x1E6E0220, 0x00000000); + ast_moutdwm(ast, 0x1E6E0228, 0x00000000); + ast_moutdwm(ast, 0x1E6E0230, 0x00000000); + ast_moutdwm(ast, 0x1E6E02A8, 0x00000000); + ast_moutdwm(ast, 0x1E6E02B0, 0x00000000); + ast_moutdwm(ast, 0x1E6E0240, 0x86000000); + ast_moutdwm(ast, 0x1E6E0244, 0x00008600); + ast_moutdwm(ast, 0x1E6E0248, 0x80000000); + ast_moutdwm(ast, 0x1E6E024C, 0x80808080); +} + +static void ddr_phy_init_2500(struct ast_private *ast) +{ + u32 data, pass, timecnt; + + pass = 0; + ast_moutdwm(ast, 0x1E6E0060, 0x00000005); + while (!pass) { + for (timecnt = 0; timecnt < TIMEOUT; timecnt++) { + data = ast_mindwm(ast, 0x1E6E0060) & 0x1; + if (!data) + break; + } + if (timecnt != TIMEOUT) { + data = ast_mindwm(ast, 0x1E6E0300) & 0x000A0000; + if (!data) + pass = 1; + } + if (!pass) { + ast_moutdwm(ast, 0x1E6E0060, 0x00000000); + udelay(10); /* delay 10 us */ + ast_moutdwm(ast, 0x1E6E0060, 0x00000005); + } + } + + ast_moutdwm(ast, 0x1E6E0060, 0x00000006); +} + +/* + * Check DRAM Size + * 1Gb : 0x80000000 ~ 0x87FFFFFF + * 2Gb : 0x80000000 ~ 0x8FFFFFFF + * 4Gb : 0x80000000 ~ 0x9FFFFFFF + * 8Gb : 0x80000000 ~ 0xBFFFFFFF + */ +static void check_dram_size_2500(struct ast_private *ast, u32 tRFC) +{ + u32 reg_04, reg_14; + + reg_04 = ast_mindwm(ast, 0x1E6E0004) & 0xfffffffc; + reg_14 = ast_mindwm(ast, 0x1E6E0014) & 0xffffff00; + + ast_moutdwm(ast, 0xA0100000, 0x41424344); + ast_moutdwm(ast, 0x90100000, 0x35363738); + ast_moutdwm(ast, 0x88100000, 0x292A2B2C); + ast_moutdwm(ast, 0x80100000, 0x1D1E1F10); + + /* Check 8Gbit */ + if (ast_mindwm(ast, 0xA0100000) == 0x41424344) { + reg_04 |= 0x03; + reg_14 |= (tRFC >> 24) & 0xFF; + /* Check 4Gbit */ + } else if (ast_mindwm(ast, 0x90100000) == 0x35363738) { + reg_04 |= 0x02; + reg_14 |= (tRFC >> 16) & 0xFF; + /* Check 2Gbit */ + } else if (ast_mindwm(ast, 0x88100000) == 0x292A2B2C) { + reg_04 |= 0x01; + reg_14 |= (tRFC >> 8) & 0xFF; + } else { + reg_14 |= tRFC & 0xFF; + } + ast_moutdwm(ast, 0x1E6E0004, reg_04); + ast_moutdwm(ast, 0x1E6E0014, reg_14); +} + +static void enable_cache_2500(struct ast_private *ast) +{ + u32 reg_04, data; + + reg_04 = ast_mindwm(ast, 0x1E6E0004); + ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x1000); + + do + data = ast_mindwm(ast, 0x1E6E0004); + while (!(data & 0x80000)); + ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x400); +} + +static void set_mpll_2500(struct ast_private *ast) +{ + u32 addr, data, param; + + /* Reset MMC */ + ast_moutdwm(ast, 0x1E6E0000, 0xFC600309); + ast_moutdwm(ast, 0x1E6E0034, 0x00020080); + for (addr = 0x1e6e0004; addr < 0x1e6e0090;) { + ast_moutdwm(ast, addr, 0x0); + addr += 4; + } + ast_moutdwm(ast, 0x1E6E0034, 0x00020000); + + ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8); + data = ast_mindwm(ast, 0x1E6E2070) & 0x00800000; + if (data) { + /* CLKIN = 25MHz */ + param = 0x930023E0; + ast_moutdwm(ast, 0x1E6E2160, 0x00011320); + } else { + /* CLKIN = 24MHz */ + param = 0x93002400; + } + ast_moutdwm(ast, 0x1E6E2020, param); + udelay(100); +} + +static void reset_mmc_2500(struct ast_private *ast) +{ + ast_moutdwm(ast, 0x1E78505C, 0x00000004); + ast_moutdwm(ast, 0x1E785044, 0x00000001); + ast_moutdwm(ast, 0x1E785048, 0x00004755); + ast_moutdwm(ast, 0x1E78504C, 0x00000013); + mdelay(100); + ast_moutdwm(ast, 0x1E785054, 0x00000077); + ast_moutdwm(ast, 0x1E6E0000, 0xFC600309); +} + +static void ddr3_init_2500(struct ast_private *ast, const u32 *ddr_table) +{ + ast_moutdwm(ast, 0x1E6E0004, 0x00000303); + ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]); + ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]); + ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]); + ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */ + ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */ + ast_moutdwm(ast, 0x1E6E002C, + ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */ + ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */ + + /* DDR PHY Setting */ + ast_moutdwm(ast, 0x1E6E0200, 0x02492AAE); + ast_moutdwm(ast, 0x1E6E0204, 0x00001001); + ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B); + ast_moutdwm(ast, 0x1E6E0210, 0x20000000); + ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]); + ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]); + ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]); + ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]); + ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]); + ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]); + ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]); + ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]); + ast_moutdwm(ast, 0x1E6E0290, 0x00100008); + ast_moutdwm(ast, 0x1E6E02C0, 0x00000006); + + /* Controller Setting */ + ast_moutdwm(ast, 0x1E6E0034, 0x00020091); + + /* Wait DDR PHY init done */ + ddr_phy_init_2500(ast); + + ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]); + ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81); + ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93); + + check_dram_size_2500(ast, ddr_table[REGIDX_RFC]); + enable_cache_2500(ast); + ast_moutdwm(ast, 0x1E6E001C, 0x00000008); + ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00); +} + +static void ddr4_init_2500(struct ast_private *ast, const u32 *ddr_table) +{ + u32 data, data2, pass, retrycnt; + u32 ddr_vref, phy_vref; + u32 min_ddr_vref = 0, min_phy_vref = 0; + u32 max_ddr_vref = 0, max_phy_vref = 0; + + ast_moutdwm(ast, 0x1E6E0004, 0x00000313); + ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]); + ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]); + ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]); + ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */ + ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */ + ast_moutdwm(ast, 0x1E6E002C, + ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */ + ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */ + + /* DDR PHY Setting */ + ast_moutdwm(ast, 0x1E6E0200, 0x42492AAE); + ast_moutdwm(ast, 0x1E6E0204, 0x09002000); + ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B); + ast_moutdwm(ast, 0x1E6E0210, 0x20000000); + ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]); + ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]); + ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]); + ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]); + ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]); + ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]); + ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]); + ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]); + ast_moutdwm(ast, 0x1E6E0290, 0x00100008); + ast_moutdwm(ast, 0x1E6E02C4, 0x3C183C3C); + ast_moutdwm(ast, 0x1E6E02C8, 0x00631E0E); + + /* Controller Setting */ + ast_moutdwm(ast, 0x1E6E0034, 0x0001A991); + + /* Train PHY Vref first */ + pass = 0; + + for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) { + max_phy_vref = 0x0; + pass = 0; + ast_moutdwm(ast, 0x1E6E02C0, 0x00001C06); + for (phy_vref = 0x40; phy_vref < 0x80; phy_vref++) { + ast_moutdwm(ast, 0x1E6E000C, 0x00000000); + ast_moutdwm(ast, 0x1E6E0060, 0x00000000); + ast_moutdwm(ast, 0x1E6E02CC, + phy_vref | (phy_vref << 8)); + /* Fire DFI Init */ + ddr_phy_init_2500(ast); + ast_moutdwm(ast, 0x1E6E000C, 0x00005C01); + if (cbr_test_2500(ast)) { + pass++; + data = ast_mindwm(ast, 0x1E6E03D0); + data2 = data >> 8; + data = data & 0xff; + if (data > data2) + data = data2; + if (max_phy_vref < data) { + max_phy_vref = data; + min_phy_vref = phy_vref; + } + } else if (pass > 0) + break; + } + } + ast_moutdwm(ast, 0x1E6E02CC, min_phy_vref | (min_phy_vref << 8)); + + /* Train DDR Vref next */ + pass = 0; + + for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) { + min_ddr_vref = 0xFF; + max_ddr_vref = 0x0; + pass = 0; + for (ddr_vref = 0x00; ddr_vref < 0x40; ddr_vref++) { + ast_moutdwm(ast, 0x1E6E000C, 0x00000000); + ast_moutdwm(ast, 0x1E6E0060, 0x00000000); + ast_moutdwm(ast, 0x1E6E02C0, + 0x00000006 | (ddr_vref << 8)); + /* Fire DFI Init */ + ddr_phy_init_2500(ast); + ast_moutdwm(ast, 0x1E6E000C, 0x00005C01); + if (cbr_test_2500(ast)) { + pass++; + if (min_ddr_vref > ddr_vref) + min_ddr_vref = ddr_vref; + if (max_ddr_vref < ddr_vref) + max_ddr_vref = ddr_vref; + } else if (pass != 0) + break; + } + } + + ast_moutdwm(ast, 0x1E6E000C, 0x00000000); + ast_moutdwm(ast, 0x1E6E0060, 0x00000000); + ddr_vref = (min_ddr_vref + max_ddr_vref + 1) >> 1; + ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8)); + + /* Wait DDR PHY init done */ + ddr_phy_init_2500(ast); + + ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]); + ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81); + ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93); + + check_dram_size_2500(ast, ddr_table[REGIDX_RFC]); + enable_cache_2500(ast); + ast_moutdwm(ast, 0x1E6E001C, 0x00000008); + ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00); +} + +static bool ast_dram_init_2500(struct ast_private *ast) +{ + u32 data; + u32 max_tries = 5; + + do { + if (max_tries-- == 0) + return false; + set_mpll_2500(ast); + reset_mmc_2500(ast); + ddr_init_common_2500(ast); + + data = ast_mindwm(ast, 0x1E6E2070); + if (data & 0x01000000) + ddr4_init_2500(ast, ast2500_ddr4_1600_timing_table); + else + ddr3_init_2500(ast, ast2500_ddr3_1600_timing_table); + } while (!ddr_test_2500(ast)); + + ast_moutdwm(ast, 0x1E6E2040, ast_mindwm(ast, 0x1E6E2040) | 0x41); + + /* Patch code */ + data = ast_mindwm(ast, 0x1E6E200C) & 0xF9FFFFFF; + ast_moutdwm(ast, 0x1E6E200C, data | 0x10000000); + + return true; +} + +void ast_patch_ahb_2500(struct ast_private *ast) +{ + u32 data; + + /* Clear bus lock condition */ + ast_moutdwm(ast, 0x1e600000, 0xAEED1A03); + ast_moutdwm(ast, 0x1e600084, 0x00010000); + ast_moutdwm(ast, 0x1e600088, 0x00000000); + ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8); + data = ast_mindwm(ast, 0x1e6e2070); + if (data & 0x08000000) { /* check fast reset */ + /* + * If "Fast restet" is enabled for ARM-ICE debugger, + * then WDT needs to enable, that + * WDT04 is WDT#1 Reload reg. + * WDT08 is WDT#1 counter restart reg to avoid system deadlock + * WDT0C is WDT#1 control reg + * [6:5]:= 01:Full chip + * [4]:= 1:1MHz clock source + * [1]:= 1:WDT will be cleeared and disabled after timeout occurs + * [0]:= 1:WDT enable + */ + ast_moutdwm(ast, 0x1E785004, 0x00000010); + ast_moutdwm(ast, 0x1E785008, 0x00004755); + ast_moutdwm(ast, 0x1E78500c, 0x00000033); + udelay(1000); + } + do { + ast_moutdwm(ast, 0x1e6e2000, 0x1688A8A8); + data = ast_mindwm(ast, 0x1e6e2000); + } while (data != 1); + ast_moutdwm(ast, 0x1e6e207c, 0x08000000); /* clear fast reset */ +} + +void ast_post_chip_2500(struct drm_device *dev) +{ + struct ast_private *ast = to_ast_private(dev); + u32 temp; + u8 reg; + + reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + if ((reg & AST_VRAM_INIT_STATUS_MASK) == 0) { /* vga only */ + /* Clear bus lock condition */ + ast_patch_ahb_2500(ast); + + /* Disable watchdog */ + ast_moutdwm(ast, 0x1E78502C, 0x00000000); + ast_moutdwm(ast, 0x1E78504C, 0x00000000); + + /* + * Reset USB port to patch USB unknown device issue + * SCU90 is Multi-function Pin Control #5 + * [29]:= 1:Enable USB2.0 Host port#1 (that the mutually shared USB2.0 Hub + * port). + * SCU94 is Multi-function Pin Control #6 + * [14:13]:= 1x:USB2.0 Host2 controller + * SCU70 is Hardware Strap reg + * [23]:= 1:CLKIN is 25MHz and USBCK1 = 24/48 MHz (determined by + * [18]: 0(24)/1(48) MHz) + * SCU7C is Write clear reg to SCU70 + * [23]:= write 1 and then SCU70[23] will be clear as 0b. + */ + ast_moutdwm(ast, 0x1E6E2090, 0x20000000); + ast_moutdwm(ast, 0x1E6E2094, 0x00004000); + if (ast_mindwm(ast, 0x1E6E2070) & 0x00800000) { + ast_moutdwm(ast, 0x1E6E207C, 0x00800000); + mdelay(100); + ast_moutdwm(ast, 0x1E6E2070, 0x00800000); + } + /* Modify eSPI reset pin */ + temp = ast_mindwm(ast, 0x1E6E2070); + if (temp & 0x02000000) + ast_moutdwm(ast, 0x1E6E207C, 0x00004000); + + /* Slow down CPU/AHB CLK in VGA only mode */ + temp = ast_read32(ast, 0x12008); + temp |= 0x73; + ast_write32(ast, 0x12008, temp); + + if (!ast_dram_init_2500(ast)) + drm_err(dev, "DRAM init failed !\n"); + + temp = ast_mindwm(ast, 0x1e6e2040); + ast_moutdwm(ast, 0x1e6e2040, temp | 0x40); + } + + /* wait ready */ + do { + reg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + } while ((reg & 0x40) == 0); +} diff --git a/drivers/gpu/drm/ast_loongson/ast_tables.h b/drivers/gpu/drm/ast_loongson/ast_tables.h new file mode 100644 index 000000000000..e92a17a5cf27 --- /dev/null +++ b/drivers/gpu/drm/ast_loongson/ast_tables.h @@ -0,0 +1,342 @@ +/* + * Copyright (c) 2005 ASPEED Technology Inc. + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that + * copyright notice and this permission notice appear in supporting + * documentation, and that the name of the authors not be used in + * advertising or publicity pertaining to distribution of the software without + * specific, written prior permission. The authors makes no representations + * about the suitability of this software for any purpose. It is provided + * "as is" without express or implied warranty. + * + * THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ +/* Ported from xf86-video-ast driver */ + +#ifndef AST_TABLES_H +#define AST_TABLES_H + +/* Std. Table Index Definition */ +#define TextModeIndex 0 +#define EGAModeIndex 1 +#define VGAModeIndex 2 +#define HiCModeIndex 3 +#define TrueCModeIndex 4 + +#define Charx8Dot 0x00000001 +#define HalfDCLK 0x00000002 +#define DoubleScanMode 0x00000004 +#define LineCompareOff 0x00000008 +#define HBorder 0x00000020 +#define VBorder 0x00000010 +#define WideScreenMode 0x00000100 +#define NewModeInfo 0x00000200 +#define NHSync 0x00000400 +#define PHSync 0x00000800 +#define NVSync 0x00001000 +#define PVSync 0x00002000 +#define SyncPP (PVSync | PHSync) +#define SyncPN (PVSync | NHSync) +#define SyncNP (NVSync | PHSync) +#define SyncNN (NVSync | NHSync) +#define AST2500PreCatchCRT 0x00004000 + +/* DCLK Index */ +#define VCLK25_175 0x00 +#define VCLK28_322 0x01 +#define VCLK31_5 0x02 +#define VCLK36 0x03 +#define VCLK40 0x04 +#define VCLK49_5 0x05 +#define VCLK50 0x06 +#define VCLK56_25 0x07 +#define VCLK65 0x08 +#define VCLK75 0x09 +#define VCLK78_75 0x0A +#define VCLK94_5 0x0B +#define VCLK108 0x0C +#define VCLK135 0x0D +#define VCLK157_5 0x0E +#define VCLK162 0x0F +#define VCLK154 0x10 +#define VCLK83_5 0x11 +#define VCLK106_5 0x12 +#define VCLK146_25 0x13 +#define VCLK148_5 0x14 +#define VCLK71 0x15 +#define VCLK88_75 0x16 +#define VCLK119 0x17 +#define VCLK85_5 0x18 +#define VCLK97_75 0x19 +#define VCLK118_25 0x1A + +static const struct ast_vbios_dclk_info dclk_table[] = { + { 0x2C, 0xE7, 0x03 }, /* 00: VCLK25_175 */ + { 0x95, 0x62, 0x03 }, /* 01: VCLK28_322 */ + { 0x67, 0x63, 0x01 }, /* 02: VCLK31_5 */ + { 0x76, 0x63, 0x01 }, /* 03: VCLK36 */ + { 0xEE, 0x67, 0x01 }, /* 04: VCLK40 */ + { 0x82, 0x62, 0x01 }, /* 05: VCLK49_5 */ + { 0xC6, 0x64, 0x01 }, /* 06: VCLK50 */ + { 0x94, 0x62, 0x01 }, /* 07: VCLK56_25 */ + { 0x80, 0x64, 0x00 }, /* 08: VCLK65 */ + { 0x7B, 0x63, 0x00 }, /* 09: VCLK75 */ + { 0x67, 0x62, 0x00 }, /* 0A: VCLK78_75 */ + { 0x7C, 0x62, 0x00 }, /* 0B: VCLK94_5 */ + { 0x8E, 0x62, 0x00 }, /* 0C: VCLK108 */ + { 0x85, 0x24, 0x00 }, /* 0D: VCLK135 */ + { 0x67, 0x22, 0x00 }, /* 0E: VCLK157_5 */ + { 0x6A, 0x22, 0x00 }, /* 0F: VCLK162 */ + { 0x4d, 0x4c, 0x80 }, /* 10: VCLK154 */ + { 0x68, 0x6f, 0x80 }, /* 11: VCLK83.5 */ + { 0x28, 0x49, 0x80 }, /* 12: VCLK106.5 */ + { 0x37, 0x49, 0x80 }, /* 13: VCLK146.25 */ + { 0x1f, 0x45, 0x80 }, /* 14: VCLK148.5 */ + { 0x47, 0x6c, 0x80 }, /* 15: VCLK71 */ + { 0x25, 0x65, 0x80 }, /* 16: VCLK88.75 */ + { 0x77, 0x58, 0x80 }, /* 17: VCLK119 */ + { 0x32, 0x67, 0x80 }, /* 18: VCLK85_5 */ + { 0x6a, 0x6d, 0x80 }, /* 19: VCLK97_75 */ + { 0x3b, 0x2c, 0x81 }, /* 1A: VCLK118_25 */ +}; + +static const struct ast_vbios_dclk_info dclk_table_ast2500[] = { + { 0x2C, 0xE7, 0x03 }, /* 00: VCLK25_175 */ + { 0x95, 0x62, 0x03 }, /* 01: VCLK28_322 */ + { 0x67, 0x63, 0x01 }, /* 02: VCLK31_5 */ + { 0x76, 0x63, 0x01 }, /* 03: VCLK36 */ + { 0xEE, 0x67, 0x01 }, /* 04: VCLK40 */ + { 0x82, 0x62, 0x01 }, /* 05: VCLK49_5 */ + { 0xC6, 0x64, 0x01 }, /* 06: VCLK50 */ + { 0x94, 0x62, 0x01 }, /* 07: VCLK56_25 */ + { 0x80, 0x64, 0x00 }, /* 08: VCLK65 */ + { 0x7B, 0x63, 0x00 }, /* 09: VCLK75 */ + { 0x67, 0x62, 0x00 }, /* 0A: VCLK78_75 */ + { 0x7C, 0x62, 0x00 }, /* 0B: VCLK94_5 */ + { 0x8E, 0x62, 0x00 }, /* 0C: VCLK108 */ + { 0x85, 0x24, 0x00 }, /* 0D: VCLK135 */ + { 0x67, 0x22, 0x00 }, /* 0E: VCLK157_5 */ + { 0x6A, 0x22, 0x00 }, /* 0F: VCLK162 */ + { 0x4d, 0x4c, 0x80 }, /* 10: VCLK154 */ + { 0x68, 0x6f, 0x80 }, /* 11: VCLK83.5 */ + { 0x28, 0x49, 0x80 }, /* 12: VCLK106.5 */ + { 0x37, 0x49, 0x80 }, /* 13: VCLK146.25 */ + { 0x1f, 0x45, 0x80 }, /* 14: VCLK148.5 */ + { 0x47, 0x6c, 0x80 }, /* 15: VCLK71 */ + { 0x25, 0x65, 0x80 }, /* 16: VCLK88.75 */ + { 0x58, 0x01, 0x42 }, /* 17: VCLK119 */ + { 0x32, 0x67, 0x80 }, /* 18: VCLK85_5 */ + { 0x6a, 0x6d, 0x80 }, /* 19: VCLK97_75 */ + { 0x44, 0x20, 0x43 }, /* 1A: VCLK118_25 */ +}; + +static const struct ast_vbios_stdtable vbios_stdtable[] = { + /* MD_2_3_400 */ + { 0x67, + { 0x00, 0x03, 0x00, 0x02 }, + { 0x5f, 0x4f, 0x50, 0x82, 0x55, 0x81, 0xbf, 0x1f, 0x00, + 0x4f, 0x0d, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x9c, 0x8e, + 0x8f, 0x28, 0x1f, 0x96, 0xb9, 0xa3, 0xff }, + { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x14, 0x07, 0x38, 0x39, + 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x0c, 0x00, 0x0f, 0x08 }, + { 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x0e, 0x00, 0xff } }, + /* Mode12/ExtEGATable */ + { 0xe3, + { 0x01, 0x0f, 0x00, 0x06 }, + { 0x5f, 0x4f, 0x50, 0x82, 0x55, 0x81, 0x0b, 0x3e, 0x00, + 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe9, 0x8b, + 0xdf, 0x28, 0x00, 0xe7, 0x04, 0xe3, 0xff }, + { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x14, 0x07, 0x38, 0x39, + 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x01, 0x00, 0x0f, 0x00 }, + { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x0f, 0xff } }, + /* ExtVGATable */ + { 0x2f, + { 0x01, 0x0f, 0x00, 0x0e }, + { 0x5f, 0x4f, 0x50, 0x82, 0x54, 0x80, 0x0b, 0x3e, 0x00, + 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xea, 0x8c, + 0xdf, 0x28, 0x40, 0xe7, 0x04, 0xa3, 0xff }, + { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, + 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x01, 0x00, 0x00, 0x00 }, + { 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0f, 0xff } }, + /* ExtHiCTable */ + { 0x2f, + { 0x01, 0x0f, 0x00, 0x0e }, + { 0x5f, 0x4f, 0x50, 0x82, 0x54, 0x80, 0x0b, 0x3e, 0x00, + 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xea, 0x8c, + 0xdf, 0x28, 0x40, 0xe7, 0x04, 0xa3, 0xff }, + { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, + 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x01, 0x00, 0x00, 0x00 }, + { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x0f, 0xff } }, + /* ExtTrueCTable */ + { 0x2f, + { 0x01, 0x0f, 0x00, 0x0e }, + { 0x5f, 0x4f, 0x50, 0x82, 0x54, 0x80, 0x0b, 0x3e, 0x00, + 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xea, 0x8c, + 0xdf, 0x28, 0x40, 0xe7, 0x04, 0xa3, 0xff }, + { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, + 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x01, 0x00, 0x00, 0x00 }, + { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x0f, 0xff } }, +}; + +static const struct ast_vbios_enhtable res_640x480[] = { + { 800, 640, 8, 96, 525, 480, 2, 2, VCLK25_175, /* 60Hz */ + (SyncNN | HBorder | VBorder | Charx8Dot), 60, 1, 0x2E }, + { 832, 640, 16, 40, 520, 480, 1, 3, VCLK31_5, /* 72Hz */ + (SyncNN | HBorder | VBorder | Charx8Dot), 72, 2, 0x2E }, + { 840, 640, 16, 64, 500, 480, 1, 3, VCLK31_5, /* 75Hz */ + (SyncNN | Charx8Dot), 75, 3, 0x2E }, + { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* 85Hz */ + (SyncNN | Charx8Dot), 85, 4, 0x2E }, + { 832, 640, 56, 56, 509, 480, 1, 3, VCLK36, /* end */ + (SyncNN | Charx8Dot), 0xFF, 4, 0x2E }, +}; + +static const struct ast_vbios_enhtable res_800x600[] = { + { 1024, 800, 24, 72, 625, 600, 1, 2, VCLK36, /* 56Hz */ + (SyncPP | Charx8Dot), 56, 1, 0x30 }, + { 1056, 800, 40, 128, 628, 600, 1, 4, VCLK40, /* 60Hz */ + (SyncPP | Charx8Dot), 60, 2, 0x30 }, + { 1040, 800, 56, 120, 666, 600, 37, 6, VCLK50, /* 72Hz */ + (SyncPP | Charx8Dot), 72, 3, 0x30 }, + { 1056, 800, 16, 80, 625, 600, 1, 3, VCLK49_5, /* 75Hz */ + (SyncPP | Charx8Dot), 75, 4, 0x30 }, + { 1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25, /* 85Hz */ + (SyncPP | Charx8Dot), 84, 5, 0x30 }, + { 1048, 800, 32, 64, 631, 600, 1, 3, VCLK56_25, /* end */ + (SyncPP | Charx8Dot), 0xFF, 5, 0x30 }, +}; + +static const struct ast_vbios_enhtable res_1024x768[] = { + { 1344, 1024, 24, 136, 806, 768, 3, 6, VCLK65, /* 60Hz */ + (SyncNN | Charx8Dot), 60, 1, 0x31 }, + { 1328, 1024, 24, 136, 806, 768, 3, 6, VCLK75, /* 70Hz */ + (SyncNN | Charx8Dot), 70, 2, 0x31 }, + { 1312, 1024, 16, 96, 800, 768, 1, 3, VCLK78_75, /* 75Hz */ + (SyncPP | Charx8Dot), 75, 3, 0x31 }, + { 1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5, /* 85Hz */ + (SyncPP | Charx8Dot), 84, 4, 0x31 }, + { 1376, 1024, 48, 96, 808, 768, 1, 3, VCLK94_5, /* end */ + (SyncPP | Charx8Dot), 0xFF, 4, 0x31 }, +}; + +static const struct ast_vbios_enhtable res_1280x1024[] = { + { 1688, 1280, 48, 112, 1066, 1024, 1, 3, VCLK108, /* 60Hz */ + (SyncPP | Charx8Dot), 60, 1, 0x32 }, + { 1688, 1280, 16, 144, 1066, 1024, 1, 3, VCLK135, /* 75Hz */ + (SyncPP | Charx8Dot), 75, 2, 0x32 }, + { 1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5, /* 85Hz */ + (SyncPP | Charx8Dot), 85, 3, 0x32 }, + { 1728, 1280, 64, 160, 1072, 1024, 1, 3, VCLK157_5, /* end */ + (SyncPP | Charx8Dot), 0xFF, 3, 0x32 }, +}; + +static const struct ast_vbios_enhtable res_1600x1200[] = { + { 2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* 60Hz */ + (SyncPP | Charx8Dot), 60, 1, 0x33 }, + { 2160, 1600, 64, 192, 1250, 1200, 1, 3, VCLK162, /* end */ + (SyncPP | Charx8Dot), 0xFF, 1, 0x33 }, +}; + +static const struct ast_vbios_enhtable res_1152x864[] = { + { 1600, 1152, 64, 128, 900, 864, 1, 3, VCLK108, /* 75Hz */ + (SyncPP | Charx8Dot | NewModeInfo), 75, 1, 0x3B }, + { 1600, 1152, 64, 128, 900, 864, 1, 3, VCLK108, /* end */ + (SyncPP | Charx8Dot | NewModeInfo), 0xFF, 1, 0x3B }, +}; + +/* 16:9 */ +static const struct ast_vbios_enhtable res_1360x768[] = { + { 1792, 1360, 64, 112, 795, 768, 3, 6, VCLK85_5, /* 60Hz */ + (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), + 60, 1, 0x39 }, + { 1792, 1360, 64, 112, 795, 768, 3, 6, VCLK85_5, /* end */ + (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), + 0xFF, 1, 0x39 }, +}; + +static const struct ast_vbios_enhtable res_1600x900[] = { + { 1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* 60Hz CVT RB */ + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), + 60, 1, 0x3A }, + { 2112, 1600, 88, 168, 934, 900, 3, 5, VCLK118_25, /* 60Hz CVT */ + (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), + 60, 2, 0x3A }, + { 2112, 1600, 88, 168, 934, 900, 3, 5, VCLK118_25, /* 60Hz CVT */ + (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), + 0xFF, 2, 0x3A }, +}; + +static const struct ast_vbios_enhtable res_1920x1080[] = { + { 2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */ + (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), + 60, 1, 0x38 }, + { 2200, 1920, 88, 44, 1125, 1080, 4, 5, VCLK148_5, /* 60Hz */ + (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), + 0xFF, 1, 0x38 }, +}; + +/* 16:10 */ +static const struct ast_vbios_enhtable res_1280x800[] = { + { 1440, 1280, 48, 32, 823, 800, 3, 6, VCLK71, /* 60Hz RB */ + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), + 60, 1, 0x35 }, + { 1680, 1280, 72, 128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */ + (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), + 60, 2, 0x35 }, + { 1680, 1280, 72, 128, 831, 800, 3, 6, VCLK83_5, /* 60Hz */ + (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), + 0xFF, 2, 0x35 }, + +}; + +static const struct ast_vbios_enhtable res_1440x900[] = { + { 1600, 1440, 48, 32, 926, 900, 3, 6, VCLK88_75, /* 60Hz RB */ + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), + 60, 1, 0x36 }, + { 1904, 1440, 80, 152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */ + (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), + 60, 2, 0x36 }, + { 1904, 1440, 80, 152, 934, 900, 3, 6, VCLK106_5, /* 60Hz */ + (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), + 0xFF, 2, 0x36 }, +}; + +static const struct ast_vbios_enhtable res_1680x1050[] = { + { 1840, 1680, 48, 32, 1080, 1050, 3, 6, VCLK119, /* 60Hz RB */ + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), + 60, 1, 0x37 }, + { 2240, 1680, 104, 176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */ + (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), + 60, 2, 0x37 }, + { 2240, 1680, 104, 176, 1089, 1050, 3, 6, VCLK146_25, /* 60Hz */ + (SyncPN | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), + 0xFF, 2, 0x37 }, +}; + +static const struct ast_vbios_enhtable res_1920x1200[] = { + { 2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz RB*/ + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), + 60, 1, 0x34 }, + { 2080, 1920, 48, 32, 1235, 1200, 3, 6, VCLK154, /* 60Hz RB */ + (SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo | + AST2500PreCatchCRT), + 0xFF, 1, 0x34 }, +}; + +#endif -- Gitee From 191f681a2172bb047feb92e3f3a186a82ad3ef6f Mon Sep 17 00:00:00 2001 From: "YiLin.Li" Date: Thu, 31 Oct 2024 20:43:10 +0800 Subject: [PATCH 1610/2138] anolis: configs: set both CONFIG_CRYPTO_DEV_CCP_DD and CONFIG_TCG_HYGON y ANBZ: #11593 Signed-off-by: YiLin.Li Reviewed-by: Tianjia Zhang Link: https://gitee.com/anolis/cloud-kernel/pulls/4054 --- anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_CCP_DD | 2 +- anolis/configs/L1-RECOMMEND/x86/CONFIG_TCG_HYGON | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_CCP_DD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_CCP_DD index 18c7b900eb3c..371645117adf 100644 --- a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_CCP_DD +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_CCP_DD @@ -1 +1 @@ -CONFIG_CRYPTO_DEV_CCP_DD=m +CONFIG_CRYPTO_DEV_CCP_DD=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCG_HYGON b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCG_HYGON index aaa0cb94d759..bf7d85b6af8f 100644 --- a/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCG_HYGON +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCG_HYGON @@ -1 +1 @@ -CONFIG_TCG_HYGON=m +CONFIG_TCG_HYGON=y -- Gitee From fa49e8e366b3b7f3bf720a3d8d88a828e040c4fd Mon Sep 17 00:00:00 2001 From: "YiLin.Li" Date: Thu, 31 Oct 2024 20:40:35 +0800 Subject: [PATCH 1611/2138] anolis:ima/Kconfig: add the select relationship about IMA and TCG_HYGON ANBZ: #11593 Signed-off-by: YiLin.Li Reviewed-by: Tianjia Zhang Link: https://gitee.com/anolis/cloud-kernel/pulls/4054 --- security/integrity/ima/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig index a6bd817efc1a..3a28beee0720 100644 --- a/security/integrity/ima/Kconfig +++ b/security/integrity/ima/Kconfig @@ -11,6 +11,7 @@ config IMA select TCG_TPM if HAS_IOMEM select TCG_TIS if TCG_TPM && X86 select TCG_CRB if TCG_TPM && ACPI + select TCG_HYGON if TCG_TPM && CPU_SUP_HYGON select TCG_IBMVTPM if TCG_TPM && PPC_PSERIES select INTEGRITY_AUDIT if AUDIT help -- Gitee From ea3e792595f6983368bbb5dfbb7fddc497364aed Mon Sep 17 00:00:00 2001 From: hanliyang Date: Mon, 21 Oct 2024 17:08:31 +0800 Subject: [PATCH 1612/2138] anolis: x86: Provide helper is_x86_vendor_hygon() and use it in Hygon confidential computing code ANBZ: #11454 Provide helper is_x86_vendor_hygon(), use it in the Hygon confidential computing code. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/4015 --- arch/x86/include/asm/processor-hygon.h | 23 +++++++++++++++ arch/x86/kernel/sev.c | 5 ++-- arch/x86/kvm/svm/csv.h | 4 ++- arch/x86/kvm/svm/sev.c | 39 +++++++++++++------------- arch/x86/kvm/svm/svm.c | 14 ++++----- arch/x86/kvm/x86.c | 11 ++++---- arch/x86/mm/csv.c | 3 +- arch/x86/mm/mem_encrypt.c | 4 ++- 8 files changed, 66 insertions(+), 37 deletions(-) create mode 100644 arch/x86/include/asm/processor-hygon.h diff --git a/arch/x86/include/asm/processor-hygon.h b/arch/x86/include/asm/processor-hygon.h new file mode 100644 index 000000000000..a19bda3ed005 --- /dev/null +++ b/arch/x86/include/asm/processor-hygon.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * The helpers to support Hygon CPU specific code path. + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + */ + +#ifndef _ASM_X86_PROCESSOR_HYGON_H +#define _ASM_X86_PROCESSOR_HYGON_H + +#include + +/* + * helper to determine HYGON CPU + */ +static inline bool is_x86_vendor_hygon(void) +{ + return boot_cpu_data.x86_vendor == X86_VENDOR_HYGON; +} + +#endif /* _ASM_X86_PROCESSOR_HYGON_H */ diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c index 62eef7824ab1..614335589d52 100644 --- a/arch/x86/kernel/sev.c +++ b/arch/x86/kernel/sev.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -1858,7 +1859,7 @@ static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_co * codes here are in atomic context. If #VC comes from user mode, then * it's necessary to switch to atomic context manually. */ - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && !in_nmi()) + if (is_x86_vendor_hygon() && !in_nmi()) __preempt_count_add(HARDIRQ_OFFSET); ghcb = __sev_get_ghcb(&state); @@ -1871,7 +1872,7 @@ static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_co __sev_put_ghcb(&state); - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && !in_nmi()) + if (is_x86_vendor_hygon() && !in_nmi()) __preempt_count_sub(HARDIRQ_OFFSET); /* Done - now check the result */ diff --git a/arch/x86/kvm/svm/csv.h b/arch/x86/kvm/svm/csv.h index df5cf9ea9422..d8db8423cd92 100644 --- a/arch/x86/kvm/svm/csv.h +++ b/arch/x86/kvm/svm/csv.h @@ -10,6 +10,8 @@ #ifndef __SVM_CSV_H #define __SVM_CSV_H +#include + #ifdef CONFIG_HYGON_CSV void __init csv_init(struct kvm_x86_ops *ops); @@ -20,4 +22,4 @@ static inline void __init csv_init(struct kvm_x86_ops *ops) { } #endif /* CONFIG_HYGON_CSV */ -#endif +#endif /* __SVM_CSV_H */ diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index b8740bca7312..c3f22fa97739 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -25,6 +25,7 @@ #include #include #include +#include #include "mmu.h" #include "x86.h" @@ -195,7 +196,7 @@ static int sev_asid_new(struct kvm_sev_info *sev) #ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID /* For Hygon CPU, check whether the userid exists */ - if ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && + if ((is_x86_vendor_hygon()) && userid && userid_len) { int i = !min_sev_asid ? 1 : min_sev_asid; @@ -225,7 +226,7 @@ static int sev_asid_new(struct kvm_sev_info *sev) * No matter what the min_sev_asid is, all asids in range * [1, max_sev_asid] can be used for CSV2 guest on Hygon CPUs. */ - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + if (is_x86_vendor_hygon()) max_asid = max_sev_asid; again: asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid); @@ -243,7 +244,7 @@ static int sev_asid_new(struct kvm_sev_info *sev) #ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID /* For Hygon CPU, initialize the new userid */ - if ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && + if ((is_x86_vendor_hygon()) && userid && userid_len) { memcpy(csv_asid_userid_array[asid].userid, userid, userid_len); csv_asid_userid_array[asid].userid_len = userid_len; @@ -277,7 +278,7 @@ static void sev_asid_free(struct kvm_sev_info *sev) #ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID /* For Hygon CPU, decrease the reference count if userid exist */ - if ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && + if ((is_x86_vendor_hygon()) && csv_asid_userid_array[sev->asid].userid_len) { /* If reach here, reference count should large than 0. */ WARN_ON(csv_asid_userid_array[sev->asid].refcnt <= 0); @@ -356,7 +357,7 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) sev->es_active = argp->id == KVM_SEV_ES_INIT; #ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + if (is_x86_vendor_hygon()) { memset(¶ms, 0, sizeof(params)); if (argp->data && @@ -833,7 +834,7 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu, * memory area pointed by svm->sev_es.vmsa so that we can read * fresh memory updated by PSP. */ - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + if (is_x86_vendor_hygon()) { clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE); memcpy(svm->sev_es.reset_vmsa, svm->sev_es.vmsa, PAGE_SIZE); } @@ -2289,7 +2290,7 @@ int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp) r = sev_send_update_data(kvm, &sev_cmd); break; case KVM_SEV_SEND_UPDATE_VMSA: - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + if (is_x86_vendor_hygon()) r = sev_send_update_vmsa(kvm, &sev_cmd); else r = -EINVAL; @@ -2307,7 +2308,7 @@ int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp) r = sev_receive_update_data(kvm, &sev_cmd); break; case KVM_SEV_RECEIVE_UPDATE_VMSA: - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + if (is_x86_vendor_hygon()) r = sev_receive_update_vmsa(kvm, &sev_cmd); else r = -EINVAL; @@ -2316,7 +2317,7 @@ int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp) r = sev_receive_finish(kvm, &sev_cmd); break; case KVM_CSV_COMMAND_BATCH: - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + if (is_x86_vendor_hygon()) { mutex_lock(&csv_cmd_batch_mutex); r = csv_command_batch(kvm, &sev_cmd); mutex_unlock(&csv_cmd_batch_mutex); @@ -2621,7 +2622,7 @@ void __init sev_hardware_setup(void) goto out; } - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + if (is_x86_vendor_hygon()) { #ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID /* Initialize CSV ASID reuse array */ csv_asid_userid_array = kcalloc(nr_asids, @@ -2678,7 +2679,7 @@ void __init sev_hardware_setup(void) goto out; } - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + if (is_x86_vendor_hygon()) { /* * Ths ASIDs from 1 to max_sev_asid are available for hygon * CSV2 guest. @@ -2697,19 +2698,17 @@ void __init sev_hardware_setup(void) out: if (boot_cpu_has(X86_FEATURE_SEV)) pr_info("%s %s (ASIDs %u - %u)\n", - boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? "CSV" : "SEV", + is_x86_vendor_hygon() ? "CSV" : "SEV", sev_supported ? min_sev_asid <= max_sev_asid ? "enabled" : "unusable" : "disabled", min_sev_asid, max_sev_asid); if (boot_cpu_has(X86_FEATURE_SEV_ES)) pr_info("%s %s (ASIDs %u - %u)\n", - boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? "CSV2" : "SEV-ES", + is_x86_vendor_hygon() ? "CSV2" : "SEV-ES", sev_es_supported ? "enabled" : "disabled", - boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? - 1 : (min_sev_asid > 1 ? 1 : 0), - boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? - max_sev_asid : min_sev_asid - 1); + is_x86_vendor_hygon() ? 1 : (min_sev_asid > 1 ? 1 : 0), + is_x86_vendor_hygon() ? max_sev_asid : min_sev_asid - 1); sev_enabled = sev_supported; sev_es_enabled = sev_es_supported; @@ -2727,7 +2726,7 @@ void sev_hardware_unsetup(void) /* No need to take sev_bitmap_lock, all VMs have been destroyed. */ sev_flush_asids(1, max_sev_asid); - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + if (is_x86_vendor_hygon()) { free_trans_mempool(); #ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID kfree(csv_asid_userid_array); @@ -3089,7 +3088,7 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu) #ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID /* If ASID is shared with other guests, then flush TLB before VMRUN */ - if ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && + if ((is_x86_vendor_hygon()) && csv_asid_userid_array[asid].userid_len) svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; #endif @@ -3623,7 +3622,7 @@ int sev_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len) unsigned long guest_uaddr, n; int ret = 0, offset, error; - if (!sev_guest(kvm) || (boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)) + if (!sev_guest(kvm) || !is_x86_vendor_hygon()) return -ENOTTY; /* diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 484a04089234..20a5d3513832 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -42,6 +42,7 @@ #include #include #include +#include #include @@ -549,7 +550,7 @@ static bool __kvm_is_svm_supported(void) } if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + if (is_x86_vendor_hygon()) pr_info("KVM is unsupported when running as an CSV guest\n"); else pr_info("KVM is unsupported when running as an SEV guest\n"); @@ -2972,7 +2973,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = svm->msr_decfg; break; case MSR_AMD64_SEV_ES_GHCB: - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + if (is_x86_vendor_hygon()) { /* * Only support userspace get/set from/to * vmcb.control.ghcb_gpa @@ -3243,7 +3244,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) break; } case MSR_AMD64_SEV_ES_GHCB: - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + if (is_x86_vendor_hygon()) { /* * Only support userspace get/set from/to * vmcb.control.ghcb_gpa @@ -4261,7 +4262,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) * the necessary GHCB page. When handling the exit code * afterwards, it can exit to userspace and stop the guest. */ - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + if (is_x86_vendor_hygon() && sev_es_guest(vcpu->kvm) && svm->sev_es.receiver_ghcb_map_fail) { svm->vmcb->control.exit_code = SVM_EXIT_ERR; @@ -4447,8 +4448,7 @@ static bool svm_has_emulated_msr(struct kvm *kvm, u32 index) * Only CSV2 guests support to export this MSR, this should * be determined after KVM_CREATE_VM. */ - if (boot_cpu_data.x86_vendor != X86_VENDOR_HYGON || - (kvm && !sev_es_guest(kvm))) + if (!is_x86_vendor_hygon() || (kvm && !sev_es_guest(kvm))) return false; break; default: @@ -5520,7 +5520,7 @@ static int __init svm_init(void) if (!kvm_is_svm_supported()) return -EOPNOTSUPP; - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + if (is_x86_vendor_hygon()) csv_init(&svm_x86_ops); r = kvm_x86_vendor_init(&svm_init_ops); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 7a044c4427f3..2b21096fc18b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -86,6 +86,8 @@ #include #include +#include + #define CREATE_TRACE_POINTS #include "trace.h" @@ -4640,7 +4642,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) * but only CSV2 guest support export to emulate * MSR_AMD64_SEV_ES_GHCB. */ - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + if (is_x86_vendor_hygon()) r = static_call(kvm_x86_has_emulated_msr)(kvm, MSR_AMD64_SEV_ES_GHCB); break; @@ -7116,14 +7118,14 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) break; } case KVM_CONTROL_PRE_SYSTEM_RESET: - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + if (is_x86_vendor_hygon() && kvm_x86_ops.control_pre_system_reset) r = static_call(kvm_x86_control_pre_system_reset)(kvm); else r = -ENOTTY; break; case KVM_CONTROL_POST_SYSTEM_RESET: - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + if (is_x86_vendor_hygon() && kvm_x86_ops.control_post_system_reset) r = static_call(kvm_x86_control_post_system_reset)(kvm); else @@ -11573,8 +11575,7 @@ static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, if (kvm_set_apic_base(vcpu, &apic_base_msr)) return -EINVAL; - if (vcpu->arch.guest_state_protected && - boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) + if (vcpu->arch.guest_state_protected && !is_x86_vendor_hygon()) return 0; if (!vcpu->arch.guest_state_protected) { diff --git a/arch/x86/mm/csv.c b/arch/x86/mm/csv.c index 09f2cb7b358a..9757fc40f5bf 100644 --- a/arch/x86/mm/csv.c +++ b/arch/x86/mm/csv.c @@ -17,6 +17,7 @@ #include #include #include +#include #undef pr_fmt #define pr_fmt(fmt) "CSV-CMA: " fmt @@ -210,7 +211,7 @@ static bool __init csv3_check_cpu_support(void) u64 msr; bool csv3_enabled; - if (boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) + if (!is_x86_vendor_hygon()) return false; if (sev_status) diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 054f6113be67..fde0c763a156 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -14,6 +14,8 @@ #include #include +#include + /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */ bool force_dma_unencrypted(struct device *dev) { @@ -73,7 +75,7 @@ static void print_mem_encrypt_feature_info(void) return; } - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + if (is_x86_vendor_hygon()) { print_hygon_cc_feature_info(); return; } -- Gitee From 4a84dbf83d862c414ab46697a2f878816ce8a681 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Tue, 22 Oct 2024 10:41:54 +0800 Subject: [PATCH 1613/2138] anolis: virt/csv-guest: CONFIG_CSV_GUEST should depends on CONFIG_HYGON_CSV ANBZ: #11454 In addition, remove test code in the module and fix the SPDX comment in csv-guest.h. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/4015 --- drivers/virt/coco/csv-guest/Kconfig | 2 +- drivers/virt/coco/csv-guest/csv-guest.c | 16 ---------------- drivers/virt/coco/csv-guest/csv-guest.h | 2 +- 3 files changed, 2 insertions(+), 18 deletions(-) diff --git a/drivers/virt/coco/csv-guest/Kconfig b/drivers/virt/coco/csv-guest/Kconfig index 4cbde598e665..f14f6766e5ae 100644 --- a/drivers/virt/coco/csv-guest/Kconfig +++ b/drivers/virt/coco/csv-guest/Kconfig @@ -1,7 +1,7 @@ config CSV_GUEST tristate "HYGON CSV Guest driver" default m - depends on AMD_MEM_ENCRYPT + depends on HYGON_CSV help CSV firmware provides the guest a mechanism to communicate with the PSP without risk from a malicious hypervisor who wishes to read, diff --git a/drivers/virt/coco/csv-guest/csv-guest.c b/drivers/virt/coco/csv-guest/csv-guest.c index 7bd9abe7d8b6..7db8177637ce 100644 --- a/drivers/virt/coco/csv-guest/csv-guest.c +++ b/drivers/virt/coco/csv-guest/csv-guest.c @@ -64,19 +64,6 @@ static long csv_guest_ioctl(struct file *file, unsigned int cmd, unsigned long a } } -static void mem_test_init(void) -{ - char head_str[] = "test mem encrypt"; - u64 *va_addr = __va(0x0); - - if (va_addr) { - memset(va_addr, 0x66, PAGE_SIZE); - memcpy(va_addr, head_str, sizeof(head_str)); - clflush_cache_range(va_addr, PAGE_SIZE); - } else - pr_err("Initialize 1 page for csv memory test failed!\n"); -} - static const struct file_operations csv_guest_fops = { .owner = THIS_MODULE, .unlocked_ioctl = csv_guest_ioctl, @@ -96,9 +83,6 @@ static int __init csv_guest_init(void) if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) return -ENODEV; - // Initialize 1 page for csv memory test - mem_test_init(); - return misc_register(&csv_guest_dev); } diff --git a/drivers/virt/coco/csv-guest/csv-guest.h b/drivers/virt/coco/csv-guest/csv-guest.h index 0342d5f16cb3..337211b928db 100644 --- a/drivers/virt/coco/csv-guest/csv-guest.h +++ b/drivers/virt/coco/csv-guest/csv-guest.h @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0-only +/* SPDX-License-Identifier: GPL-2.0-only */ /* * * Userspace interface for CSV guest driver -- Gitee From a4f3fec9264abf3b581084f482a977154e7cf0f9 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Mon, 21 Oct 2024 18:09:55 +0800 Subject: [PATCH 1614/2138] anolis: x86/mm: Rename csv.c to mem_encrypt_hygon.c ANBZ: #11454 The name csv.c is limited, rename it to mem_encrypt_hygon.c and aggregate basic functions about Hygon memory encryption into the file mem_encrypt_hygon.c. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/4015 --- arch/x86/boot/compressed/csv.c | 4 + arch/x86/include/asm/csv.h | 37 ++-- arch/x86/include/asm/mem_encrypt.h | 6 + arch/x86/kernel/csv-shared.c | 7 - arch/x86/kernel/csv.c | 21 -- arch/x86/mm/Makefile | 2 +- arch/x86/mm/mem_encrypt.c | 25 --- arch/x86/mm/{csv.c => mem_encrypt_hygon.c} | 212 +++++++++++++-------- 8 files changed, 169 insertions(+), 145 deletions(-) rename arch/x86/mm/{csv.c => mem_encrypt_hygon.c} (77%) diff --git a/arch/x86/boot/compressed/csv.c b/arch/x86/boot/compressed/csv.c index ae6ca1484048..042340abdaea 100644 --- a/arch/x86/boot/compressed/csv.c +++ b/arch/x86/boot/compressed/csv.c @@ -14,6 +14,10 @@ #define __initdata #define __pa(x) ((unsigned long)(x)) +#include +#include + +/* Include code for early secure calls */ #include "../../kernel/csv-shared.c" static unsigned int csv3_enabled __section(".data"); diff --git a/arch/x86/include/asm/csv.h b/arch/x86/include/asm/csv.h index 30e8a89ce8c0..e2fcaf4ded5f 100644 --- a/arch/x86/include/asm/csv.h +++ b/arch/x86/include/asm/csv.h @@ -12,13 +12,13 @@ #ifndef __ASSEMBLY__ +#ifdef CONFIG_HYGON_CSV + struct csv_mem { uint64_t start; uint64_t size; }; -#ifdef CONFIG_HYGON_CSV - #define CSV_MR_ALIGN_BITS (28) extern struct csv_mem *csv_smr; @@ -32,16 +32,6 @@ void csv_release_to_contiguous(phys_addr_t pa, size_t size); uint32_t csv_get_smr_entry_shift(void); -bool csv3_active(void); - -void __init csv_early_reset_memory(struct boot_params *bp); -void __init csv_early_update_memory_enc(u64 vaddr, u64 pages); -void __init csv_early_update_memory_dec(u64 vaddr, u64 pages); - -void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, bool enc); - -void csv_memory_enc_dec(u64 vaddr, u64 pages, bool enc); - #else /* !CONFIG_HYGON_CSV */ #define csv_smr NULL @@ -56,6 +46,29 @@ static inline void csv_release_to_contiguous(phys_addr_t pa, size_t size) { } static inline uint32_t csv_get_smr_entry_shift(void) { return 0; } +#endif /* CONFIG_HYGON_CSV */ + +#define CPUID_VENDOR_HygonGenuine_ebx 0x6f677948 +#define CPUID_VENDOR_HygonGenuine_ecx 0x656e6975 +#define CPUID_VENDOR_HygonGenuine_edx 0x6e65476e + +#define MSR_CSV3_ENABLED_BIT 30 +#define MSR_CSV3_ENABLED BIT_ULL(MSR_CSV3_ENABLED_BIT) + +#ifdef CONFIG_HYGON_CSV + +bool csv3_active(void); + +void __init csv_early_reset_memory(struct boot_params *bp); +void __init csv_early_update_memory_enc(u64 vaddr, u64 pages); +void __init csv_early_update_memory_dec(u64 vaddr, u64 pages); + +void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, bool enc); + +void csv_memory_enc_dec(u64 vaddr, u64 pages, bool enc); + +#else /* !CONFIG_HYGON_CSV */ + static inline bool csv3_active(void) { return false; } static inline void __init csv_early_reset_memory(struct boot_params *bp) { } diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 76081a34fc23..9816db501ea4 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -114,6 +114,12 @@ void add_encrypt_protection_map(void); extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[]; +#ifdef CONFIG_HYGON_CSV +extern void print_hygon_cc_feature_info(void); +#else /* !CONFIG_HYGON_CSV */ +static inline void print_hygon_cc_feature_info(void) { } +#endif /* CONFIG_HYGON_CSV */ + #endif /* __ASSEMBLY__ */ #endif /* __X86_MEM_ENCRYPT_H__ */ diff --git a/arch/x86/kernel/csv-shared.c b/arch/x86/kernel/csv-shared.c index fd55e570bbbb..e8c482898802 100644 --- a/arch/x86/kernel/csv-shared.c +++ b/arch/x86/kernel/csv-shared.c @@ -10,13 +10,6 @@ #include -#define CPUID_VENDOR_HygonGenuine_ebx 0x6f677948 -#define CPUID_VENDOR_HygonGenuine_ecx 0x656e6975 -#define CPUID_VENDOR_HygonGenuine_edx 0x6e65476e - -#define MSR_CSV3_ENABLED_BIT 30 -#define MSR_CSV3_ENABLED BIT_ULL(MSR_CSV3_ENABLED_BIT) - /* ****************************** CSV3 secure call ******************************* * diff --git a/arch/x86/kernel/csv.c b/arch/x86/kernel/csv.c index c0ad12aa94f3..6f2106ac3bf4 100644 --- a/arch/x86/kernel/csv.c +++ b/arch/x86/kernel/csv.c @@ -14,10 +14,6 @@ #include "../mm/mm_internal.h" #include "csv-shared.c" -u32 vendor_ebx __section(".data") = 0; -u32 vendor_ecx __section(".data") = 0; -u32 vendor_edx __section(".data") = 0; - struct secure_call_pages { struct csv3_secure_call_cmd page_a; struct csv3_secure_call_cmd page_b; @@ -32,23 +28,6 @@ static DEFINE_PER_CPU(int, secure_call_page_idx); typedef void (*csv3_secure_call_func)(u64 base_address, u64 num_pages, enum csv3_secure_command_type cmd_type); -bool noinstr csv3_active(void) -{ - if (vendor_ebx == 0 || vendor_ecx == 0 || vendor_edx == 0) { - u32 eax = 0; - - native_cpuid(&eax, &vendor_ebx, &vendor_ecx, &vendor_edx); - } - - /* HygonGenuine */ - if (vendor_ebx == CPUID_VENDOR_HygonGenuine_ebx && - vendor_ecx == CPUID_VENDOR_HygonGenuine_ecx && - vendor_edx == CPUID_VENDOR_HygonGenuine_edx) - return !!(sev_status & MSR_CSV3_ENABLED); - else - return false; -} - void __init csv_early_reset_memory(struct boot_params *bp) { if (!csv3_active()) diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 166a0934d3e4..699cd989f6af 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -68,4 +68,4 @@ obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_amd.o obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_identity.o obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_boot.o -obj-$(CONFIG_HYGON_CSV) += csv.o +obj-$(CONFIG_HYGON_CSV) += mem_encrypt_hygon.o diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index fde0c763a156..050f77087d8f 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -12,7 +12,6 @@ #include #include #include -#include #include @@ -42,30 +41,6 @@ bool force_dma_unencrypted(struct device *dev) return false; } -static void print_hygon_cc_feature_info(void) -{ - /* Secure Memory Encryption */ - if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { - /* - * HYGON SME is mutually exclusive with any of the - * HYGON CSV features below. - */ - pr_info(" HYGON SME"); - return; - } - - /* Secure Encrypted Virtualization */ - if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) - pr_info(" HYGON CSV"); - - /* Encrypted Register State */ - if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) - pr_info(" HYGON CSV2"); - - if (csv3_active()) - pr_info(" HYGON CSV3"); -} - static void print_mem_encrypt_feature_info(void) { pr_info("Memory Encryption Features active:"); diff --git a/arch/x86/mm/csv.c b/arch/x86/mm/mem_encrypt_hygon.c similarity index 77% rename from arch/x86/mm/csv.c rename to arch/x86/mm/mem_encrypt_hygon.c index 9757fc40f5bf..1871850cbb60 100644 --- a/arch/x86/mm/csv.c +++ b/arch/x86/mm/mem_encrypt_hygon.c @@ -1,16 +1,23 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Hygon China Secure Virtualization (CSV) + * HYGON Memory Encryption Support * - * Copyright (C) Hygon Info Technologies Ltd. + * Copyright (C) 2024 Hygon Info Technologies Ltd. * - * Author: Jiang Xin + * Author: Liyang Han + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. */ +#define DISABLE_BRANCH_PROFILING + +#include +#include +#include #include #include -#include -#include #include #include #include @@ -19,14 +26,114 @@ #include #include -#undef pr_fmt -#define pr_fmt(fmt) "CSV-CMA: " fmt +u32 vendor_ebx __section(".data") = 0; +u32 vendor_ecx __section(".data") = 0; +u32 vendor_edx __section(".data") = 0; -#define NUM_SMR_ENTRIES (8 * 1024) -#define CSV_CMA_SHIFT PUD_SHIFT -#define CSV_CMA_SIZE (1 << CSV_CMA_SHIFT) -#define MIN_SMR_ENTRY_SHIFT 23 -#define CSV_SMR_INFO_SIZE (nr_node_ids * sizeof(struct csv_mem)) +void print_hygon_cc_feature_info(void) +{ + /* Secure Memory Encryption */ + if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { + /* + * HYGON SME is mutually exclusive with any of the + * HYGON CSV features below. + */ + pr_info(" HYGON SME"); + return; + } + + /* Secure Encrypted Virtualization */ + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) + pr_info(" HYGON CSV"); + + /* Encrypted Register State */ + if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) + pr_info(" HYGON CSV2"); + + if (csv3_active()) + pr_info(" HYGON CSV3"); +} + +/* + * Check whether host supports CSV3 in hygon platform. + * Called in the guest, it always returns false. + */ +static bool __init __maybe_unused csv3_check_cpu_support(void) +{ + unsigned int eax, ebx, ecx, edx; + unsigned long me_mask; + u64 msr; + bool csv3_enabled; + + if (!is_x86_vendor_hygon()) + return false; + + if (sev_status) + return false; + + /* Check for the SME/CSV support leaf */ + eax = 0x80000000; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + if (eax < 0x8000001f) + return false; + +#define HYGON_SME_BIT BIT(0) +#define HYGON_CSV3_BIT BIT(30) + /* + * Check for the CSV feature: + * CPUID Fn8000_001F[EAX] + * - Bit 0 - SME support + * - Bit 1 - CSV support + * - Bit 3 - CSV2 support + * - Bit 30 - CSV3 support + */ + eax = 0x8000001f; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + if (!(eax & HYGON_SME_BIT)) + return false; + + csv3_enabled = !!(eax & HYGON_CSV3_BIT); + + me_mask = 1UL << (ebx & 0x3f); + + /* No SME if Hypervisor bit is set */ + eax = 1; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + if (ecx & BIT(31)) + return false; + + /* For SME, check the SYSCFG MSR */ + msr = __rdmsr(MSR_AMD64_SYSCFG); + if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT)) + return false; + + return !!me_mask && csv3_enabled; +} + +/* csv3_active() indicate whether the guest is protected by CSV3 */ +bool noinstr csv3_active(void) +{ + if (vendor_ebx == 0 || vendor_ecx == 0 || vendor_edx == 0) { + u32 eax = 0; + + native_cpuid(&eax, &vendor_ebx, &vendor_ecx, &vendor_edx); + } + + /* HygonGenuine */ + if (vendor_ebx == CPUID_VENDOR_HygonGenuine_ebx && + vendor_ecx == CPUID_VENDOR_HygonGenuine_ecx && + vendor_edx == CPUID_VENDOR_HygonGenuine_edx) + return !!(sev_status & MSR_CSV3_ENABLED); + else + return false; +} + +/******************************************************************************/ +/**************************** CSV3 CMA interfaces *****************************/ +/******************************************************************************/ /* 0 percent of total memory by default*/ static unsigned char csv_mem_percentage; @@ -71,6 +178,12 @@ static int __init cmdline_parse_csv_mem_percentage(char *str) } early_param("csv_mem_percentage", cmdline_parse_csv_mem_percentage); +#define NUM_SMR_ENTRIES (8 * 1024) +#define CSV_CMA_SHIFT PUD_SHIFT +#define CSV_CMA_SIZE (1 << CSV_CMA_SHIFT) +#define MIN_SMR_ENTRY_SHIFT 23 +#define CSV_SMR_INFO_SIZE (nr_node_ids * sizeof(struct csv_mem)) + struct csv_mem *csv_smr; EXPORT_SYMBOL_GPL(csv_smr); @@ -93,7 +206,7 @@ static struct cma_array *csv_contiguous_pernuma_area[MAX_NUMNODES]; static void csv_set_smr_entry_shift(unsigned int shift) { smr_entry_shift = max_t(unsigned int, shift, MIN_SMR_ENTRY_SHIFT); - pr_info("SMR entry size is 0x%x\n", 1 << smr_entry_shift); + pr_info("CSV-CMA: SMR entry size is 0x%x\n", 1 << smr_entry_shift); } unsigned int csv_get_smr_entry_shift(void) @@ -130,7 +243,7 @@ static void __init csv_cma_reserve_mem(void) csv_smr = memblock_alloc_node(CSV_SMR_INFO_SIZE, SMP_CACHE_BYTES, NUMA_NO_NODE); if (!csv_smr) { - pr_err("Fail to allocate csv_smr\n"); + pr_err("CSV-CMA: Fail to allocate csv_smr\n"); return; } @@ -150,7 +263,7 @@ static void __init csv_cma_reserve_mem(void) cma_array_size = count * sizeof(*csv_cma) + sizeof(*array); array = memblock_alloc_node(cma_array_size, SMP_CACHE_BYTES, NUMA_NO_NODE); if (!array) { - pr_err("Fail to allocate cma_array\n"); + pr_err("CSV-CMA: Fail to allocate cma_array\n"); continue; } @@ -165,7 +278,7 @@ static void __init csv_cma_reserve_mem(void) 1 << CSV_MR_ALIGN_BITS, PMD_SHIFT - PAGE_SHIFT, false, name, &(csv_cma->cma), node); if (ret) { - pr_warn("Fail to reserve memory size 0x%x node %d\n", + pr_warn("CSV-CMA: Fail to reserve memory size 0x%x node %d\n", 1 << CSV_CMA_SHIFT, node); break; } @@ -190,7 +303,7 @@ static void __init csv_cma_reserve_mem(void) csv_smr[idx].size = end - start; idx++; - pr_info("Node %d - reserve size 0x%016lx, (expected size 0x%016lx)\n", + pr_info("CSV-CMA: Node %d - reserve size 0x%016lx, (expected size 0x%016lx)\n", node, (unsigned long)i * CSV_CMA_SIZE, size); } @@ -200,65 +313,6 @@ static void __init csv_cma_reserve_mem(void) csv_set_smr_entry_shift(ilog2(max_spanned_size / NUM_SMR_ENTRIES - 1) + 1); } -/* - * Check whether host supports CSV3 in hygon platform. - * Called in the guest, it always returns false. - */ -static bool __init csv3_check_cpu_support(void) -{ - unsigned int eax, ebx, ecx, edx; - unsigned long me_mask; - u64 msr; - bool csv3_enabled; - - if (!is_x86_vendor_hygon()) - return false; - - if (sev_status) - return false; - - /* Check for the SME/CSV support leaf */ - eax = 0x80000000; - ecx = 0; - native_cpuid(&eax, &ebx, &ecx, &edx); - if (eax < 0x8000001f) - return false; - -#define HYGON_SME_BIT BIT(0) -#define HYGON_CSV3_BIT BIT(30) - /* - * Check for the CSV feature: - * CPUID Fn8000_001F[EAX] - * - Bit 0 - SME support - * - Bit 1 - CSV support - * - Bit 3 - CSV2 support - * - Bit 30 - CSV3 support - */ - eax = 0x8000001f; - ecx = 0; - native_cpuid(&eax, &ebx, &ecx, &edx); - if (!(eax & HYGON_SME_BIT)) - return false; - - csv3_enabled = !!(eax & HYGON_CSV3_BIT); - - me_mask = 1UL << (ebx & 0x3f); - - /* No SME if Hypervisor bit is set */ - eax = 1; - ecx = 0; - native_cpuid(&eax, &ebx, &ecx, &edx); - if (ecx & BIT(31)) - return false; - - /* For SME, check the SYSCFG MSR */ - msr = __rdmsr(MSR_AMD64_SYSCFG); - if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT)) - return false; - - return !!me_mask && csv3_enabled; -} - #define CSV_CMA_AREAS 2458 void __init early_csv_reserve_mem(void) @@ -283,7 +337,7 @@ void __init early_csv_reserve_mem(void) } if (!csv_mem_percentage) { - pr_warn("Don't reserve any memory\n"); + pr_warn("CSV-CMA: Don't reserve any memory\n"); return; } @@ -302,7 +356,7 @@ phys_addr_t csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed, int fast = 1; if (!nodes_allowed || size > CSV_CMA_SIZE) { - pr_err("Invalid params, size = 0x%lx, nodes_allowed = %p\n", + pr_err("CSV-CMA: Invalid params, size = 0x%lx, nodes_allowed = %p\n", size, nodes_allowed); return 0; } @@ -353,7 +407,7 @@ phys_addr_t csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed, fast = 0; goto retry; } else { - pr_err("Fail to alloc secure memory(size = 0x%lx)\n", size); + pr_err("CSV-CMA: Fail to alloc secure memory(size = 0x%lx)\n", size); return 0; } -- Gitee From 3ff6c59d215d912537cd185257ae44851f30c6d8 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Mon, 21 Oct 2024 19:58:07 +0800 Subject: [PATCH 1615/2138] anolis: x86: Rename csv3_early_secure_call() to csv3_early_secure_call_ident_map() ANBZ: #11454 The function csv3_early_secure_call() access secure call pages based on the identity mapping. In order to distinguish with other secure call helpers, rename csv3_early_secure_call() to csv3_early_secure_call_ident_map() here. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/4015 --- arch/x86/boot/compressed/csv.c | 8 +++++--- arch/x86/kernel/csv-shared.c | 8 ++++---- arch/x86/kernel/csv.c | 8 +++++--- 3 files changed, 14 insertions(+), 10 deletions(-) diff --git a/arch/x86/boot/compressed/csv.c b/arch/x86/boot/compressed/csv.c index 042340abdaea..18e0bde5bca2 100644 --- a/arch/x86/boot/compressed/csv.c +++ b/arch/x86/boot/compressed/csv.c @@ -30,10 +30,12 @@ void csv_update_page_attr(unsigned long address, pteval_t set, pteval_t clr) if ((set | clr) & _PAGE_ENC) { if (set & _PAGE_ENC) - csv3_early_secure_call(__pa(address), 1, CSV3_SECURE_CMD_ENC); + csv3_early_secure_call_ident_map(__pa(address), 1, + CSV3_SECURE_CMD_ENC); if (clr & _PAGE_ENC) - csv3_early_secure_call(__pa(address), 1, CSV3_SECURE_CMD_DEC); + csv3_early_secure_call_ident_map(__pa(address), 1, + CSV3_SECURE_CMD_DEC); } } @@ -55,7 +57,7 @@ void csv_init_secure_call_pages(void *boot_params) * field. */ csv3_scan_secure_call_pages(boot_params); - csv3_early_secure_call(0, 0, CSV3_SECURE_CMD_RESET); + csv3_early_secure_call_ident_map(0, 0, CSV3_SECURE_CMD_RESET); csv3_secure_call_init = 1; } diff --git a/arch/x86/kernel/csv-shared.c b/arch/x86/kernel/csv-shared.c index e8c482898802..0763195764da 100644 --- a/arch/x86/kernel/csv-shared.c +++ b/arch/x86/kernel/csv-shared.c @@ -168,14 +168,14 @@ void __init csv3_scan_secure_call_pages(struct boot_params *boot_params) } /** - * csv3_early_secure_call - issue early secure call command at the stage where - * identity page table is created. + * csv3_early_secure_call_ident_map - issue early secure call command at the + * stage where identity page table is created. * @base_address: Start address of the specified memory range. * @num_pages: number of the specific pages. * @cmd_type: Secure call cmd type. */ -void __init csv3_early_secure_call(u64 base_address, u64 num_pages, - enum csv3_secure_command_type cmd_type) +void __init csv3_early_secure_call_ident_map(u64 base_address, u64 num_pages, + enum csv3_secure_command_type cmd_type) { struct csv3_secure_call_cmd *page_rd; struct csv3_secure_call_cmd *page_wr; diff --git a/arch/x86/kernel/csv.c b/arch/x86/kernel/csv.c index 6f2106ac3bf4..4f80c97798de 100644 --- a/arch/x86/kernel/csv.c +++ b/arch/x86/kernel/csv.c @@ -34,7 +34,7 @@ void __init csv_early_reset_memory(struct boot_params *bp) return; csv3_scan_secure_call_pages(bp); - csv3_early_secure_call(0, 0, CSV3_SECURE_CMD_RESET); + csv3_early_secure_call_ident_map(0, 0, CSV3_SECURE_CMD_RESET); } void __init csv_early_update_memory_dec(u64 vaddr, u64 pages) @@ -43,7 +43,8 @@ void __init csv_early_update_memory_dec(u64 vaddr, u64 pages) return; if (pages) - csv3_early_secure_call(__pa(vaddr), pages, CSV3_SECURE_CMD_DEC); + csv3_early_secure_call_ident_map(__pa(vaddr), pages, + CSV3_SECURE_CMD_DEC); } void __init csv_early_update_memory_enc(u64 vaddr, u64 pages) @@ -52,7 +53,8 @@ void __init csv_early_update_memory_enc(u64 vaddr, u64 pages) return; if (pages) - csv3_early_secure_call(__pa(vaddr), pages, CSV3_SECURE_CMD_ENC); + csv3_early_secure_call_ident_map(__pa(vaddr), pages, + CSV3_SECURE_CMD_ENC); } static void __init csv3_alloc_secure_call_data(int cpu) -- Gitee From 17996776362ceac53bebed9b6fdb58b8af9678c8 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Mon, 21 Oct 2024 20:12:01 +0800 Subject: [PATCH 1616/2138] anolis: x86/Kconfig: Select CMA if CONFIG_HYGON_CSV=y ANBZ: #11454 This will enable CMA automatically when CONFIG_HYGON_CSV=y. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/4015 --- arch/x86/Kconfig | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d8d988e96e9a..1936bbf7d2d1 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2048,7 +2048,9 @@ config EFI_RUNTIME_MAP config HYGON_CSV bool "Hygon secure virtualization CSV support" default y - depends on CPU_SUP_HYGON && AMD_MEM_ENCRYPT && CMA + depends on CPU_SUP_HYGON && AMD_MEM_ENCRYPT + select MMU + select CMA help Hygon CSV integrates secure processor, memory encryption and memory isolation to provide the ability to protect guest's private -- Gitee From 3caa58f9cf6353eb0d72cc19b2a723279f8b9570 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Tue, 22 Oct 2024 11:10:02 +0800 Subject: [PATCH 1617/2138] anolis: KVM: SVM: CSV: Skip initialization if CSV is unsupported ANBZ: #11454 Also keep csv_init() and csv_exit() in pair. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/4015 --- arch/x86/kvm/svm/csv.c | 20 ++++++++++++++++---- arch/x86/kvm/svm/csv.h | 2 ++ arch/x86/kvm/svm/svm.c | 15 ++++++++++++++- 3 files changed, 32 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 07dc910edb79..260bd45759f2 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -1442,7 +1442,7 @@ static void csv_guest_memory_reclaimed(struct kvm *kvm) } } -static int csv_mem_enc_op(struct kvm *kvm, void __user *argp) +static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; int r = -EINVAL; @@ -1493,12 +1493,24 @@ static int csv_mem_enc_op(struct kvm *kvm, void __user *argp) return r; } +void csv_exit(void) +{ +} + void __init csv_init(struct kvm_x86_ops *ops) { - if (boot_cpu_has(X86_FEATURE_CSV3)) { - memcpy(&csv_x86_ops, ops, sizeof(struct kvm_x86_ops)); + /* + * Hygon CSV is indicated by X86_FEATURE_SEV, return directly if CSV + * is unsupported. + */ + if (!boot_cpu_has(X86_FEATURE_SEV)) + return; + + memcpy(&csv_x86_ops, ops, sizeof(struct kvm_x86_ops)); + + ops->mem_enc_ioctl = csv_mem_enc_ioctl; - ops->mem_enc_ioctl = csv_mem_enc_op; + if (boot_cpu_has(X86_FEATURE_SEV_ES) && boot_cpu_has(X86_FEATURE_CSV3)) { ops->vm_destroy = csv_vm_destroy; ops->vm_size = sizeof(struct kvm_svm_csv); ops->handle_exit = csv_handle_exit; diff --git a/arch/x86/kvm/svm/csv.h b/arch/x86/kvm/svm/csv.h index d8db8423cd92..71557ec7cc3a 100644 --- a/arch/x86/kvm/svm/csv.h +++ b/arch/x86/kvm/svm/csv.h @@ -15,10 +15,12 @@ #ifdef CONFIG_HYGON_CSV void __init csv_init(struct kvm_x86_ops *ops); +void csv_exit(void); #else /* !CONFIG_HYGON_CSV */ static inline void __init csv_init(struct kvm_x86_ops *ops) { } +static inline void csv_exit(void) { } #endif /* CONFIG_HYGON_CSV */ diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 20a5d3513832..111f977cde6c 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -5506,6 +5506,10 @@ static struct kvm_x86_init_ops svm_init_ops __initdata = { static void __svm_exit(void) { + /* Unregister CSV specific interface for Hygon CPUs */ + if (is_x86_vendor_hygon()) + csv_exit(); + kvm_x86_vendor_exit(); cpu_emergency_unregister_virt_callback(svm_emergency_disable); @@ -5520,12 +5524,21 @@ static int __init svm_init(void) if (!kvm_is_svm_supported()) return -EOPNOTSUPP; + /* Register CSV specific interface for Hygon CPUs */ if (is_x86_vendor_hygon()) csv_init(&svm_x86_ops); r = kvm_x86_vendor_init(&svm_init_ops); - if (r) + if (r) { + /* + * Unregister CSV specific interface for Hygon CPUs + * if error occurs. + */ + if (is_x86_vendor_hygon()) + csv_exit(); + return r; + } cpu_emergency_register_virt_callback(svm_emergency_disable); -- Gitee From f526511740b3b3ff7a74d685831acc0fc8fe4115 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Tue, 22 Oct 2024 11:33:42 +0800 Subject: [PATCH 1618/2138] anolis: KVM: SVM: CSV: Introduce hygon_kvm_hooks to save local functions and variables ANBZ: #11454 We'll attempt to reduce code intrusion in native files in KVM by move most of the Hygon CSV code to Hygon specific files. But many functions and variables in native code are not visible for Hygon specific files, introduce hygon_kvm_hooks to save these functions and variables so that to access them in Hygon specific code. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/4015 --- arch/x86/kvm/svm/csv.c | 3 +++ arch/x86/kvm/svm/csv.h | 8 ++++++++ arch/x86/kvm/svm/sev.c | 22 ++++++++++++++++++++++ 3 files changed, 33 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 260bd45759f2..0113f38b35ff 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -25,6 +25,9 @@ #undef pr_fmt #define pr_fmt(fmt) "CSV: " fmt +/* Function and variable pointers for hooks */ +struct hygon_kvm_hooks_table hygon_kvm_hooks; + struct encrypt_data_block { struct { u64 npages: 12; diff --git a/arch/x86/kvm/svm/csv.h b/arch/x86/kvm/svm/csv.h index 71557ec7cc3a..0542f62f340a 100644 --- a/arch/x86/kvm/svm/csv.h +++ b/arch/x86/kvm/svm/csv.h @@ -14,6 +14,14 @@ #ifdef CONFIG_HYGON_CSV +/* + * Hooks table: a table of function and variable pointers filled in + * when module init. + */ +extern struct hygon_kvm_hooks_table { + bool sev_hooks_installed; +} hygon_kvm_hooks; + void __init csv_init(struct kvm_x86_ops *ops); void csv_exit(void); diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index c3f22fa97739..a9f2a0979942 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -34,6 +34,8 @@ #include "cpuid.h" #include "trace.h" +#include "csv.h" + #ifndef CONFIG_KVM_AMD_SEV /* * When this config is not defined, SEV feature is not supported and APIs in @@ -2571,6 +2573,14 @@ void __init sev_set_cpu_caps(void) kvm_cpu_cap_clear(X86_FEATURE_SEV_ES); } +#ifdef CONFIG_HYGON_CSV +/* Code to set all of the function and vaiable pointers */ +static void sev_install_hooks(void) +{ + hygon_kvm_hooks.sev_hooks_installed = true; +} +#endif + void __init sev_hardware_setup(void) { #ifdef CONFIG_KVM_AMD_SEV @@ -2715,6 +2725,18 @@ void __init sev_hardware_setup(void) if (!sev_es_enabled || !cpu_feature_enabled(X86_FEATURE_DEBUG_SWAP) || !cpu_feature_enabled(X86_FEATURE_NO_NESTED_DATA_BP)) sev_es_debug_swap_enabled = false; + +#ifdef CONFIG_HYGON_CSV + /* Setup resources which are necessary for HYGON CSV */ + if (is_x86_vendor_hygon()) { + /* + * Install sev related function and variable pointers hooks + * no matter @sev_enabled is false. + */ + sev_install_hooks(); + } +#endif + #endif } -- Gitee From 24ed1c09b99a0f8ecde9c9c213aed8e2d14cec26 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Tue, 22 Oct 2024 12:06:07 +0800 Subject: [PATCH 1619/2138] anolis: KVM: SVM: CSV: Move Hygon CSV attestation handler to csv.c ANBZ: #11454 Rename the handler name from sev_vm_attestation() to csv_vm_attestation(). Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/4015 --- arch/x86/include/asm/kvm_host.h | 4 ++ arch/x86/kvm/svm/csv.c | 72 ++++++++++++++++++++++++++++++++- arch/x86/kvm/svm/csv.h | 9 +++++ arch/x86/kvm/svm/sev.c | 72 +++------------------------------ arch/x86/kvm/svm/svm.c | 1 - arch/x86/kvm/svm/svm.h | 1 - arch/x86/kvm/x86.c | 4 +- 7 files changed, 90 insertions(+), 73 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index b96fe390a9c4..97fc08a49c91 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1757,9 +1757,13 @@ struct kvm_x86_ops { gva_t (*get_untagged_addr)(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags); + /* + * Interfaces for HYGON CSV guest + */ int (*vm_attestation)(struct kvm *kvm, unsigned long gpa, unsigned long len); int (*control_pre_system_reset)(struct kvm *kvm); int (*control_post_system_reset)(struct kvm *kvm); + int (*arch_hypercall)(struct kvm *kvm, u64 nr, u64 a0, u64 a1, u64 a2, u64 a3); }; diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 0113f38b35ff..318723c9ab7f 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -28,6 +28,75 @@ /* Function and variable pointers for hooks */ struct hygon_kvm_hooks_table hygon_kvm_hooks; +static struct kvm_x86_ops csv_x86_ops; +static const char csv_vm_mnonce[] = "VM_ATTESTATION"; + +int csv_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_attestation_report *data = NULL; + struct page **pages; + unsigned long guest_uaddr, n; + int ret = 0, offset, error; + + if (!sev_guest(kvm) || !hygon_kvm_hooks.sev_hooks_installed) + return -ENOTTY; + + /* + * The physical address of guest must valid and page aligned, and + * the length of guest memory region must be page size aligned. + */ + if (!gpa || (gpa & ~PAGE_MASK) || (len & ~PAGE_MASK)) { + pr_err("invalid guest address or length\n"); + return -EFAULT; + } + + guest_uaddr = gfn_to_hva(kvm, gpa_to_gfn(gpa)); + pages = hygon_kvm_hooks.sev_pin_memory(kvm, guest_uaddr, len, &n, 1); + if (IS_ERR(pages)) + return PTR_ERR(pages); + + /* + * The attestation report must be copied into contiguous memory region, + * lets verify that userspace memory pages are contiguous before we + * issue commmand. + */ + if (hygon_kvm_hooks.get_num_contig_pages(0, pages, n) != n) { + ret = -EINVAL; + goto e_unpin_memory; + } + + ret = -ENOMEM; + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto e_unpin_memory; + + /* csv_vm_mnonce indicates attestation request from guest */ + if (sizeof(csv_vm_mnonce) >= sizeof(data->mnonce)) { + ret = -EINVAL; + goto e_free; + } + + memcpy(data->mnonce, csv_vm_mnonce, sizeof(csv_vm_mnonce)); + + offset = guest_uaddr & (PAGE_SIZE - 1); + data->address = __sme_page_pa(pages[0]) + offset; + data->len = len; + + data->handle = sev->handle; + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, + data, &error); + + if (ret) + pr_err("vm attestation ret %#x, error %#x\n", ret, error); + +e_free: + kfree(data); +e_unpin_memory: + hygon_kvm_hooks.sev_unpin_memory(kvm, pages, n); + return ret; +} + struct encrypt_data_block { struct { u64 npages: 12; @@ -101,8 +170,6 @@ struct secure_memory_region { u64 hpa; }; -static struct kvm_x86_ops csv_x86_ops; - static inline struct kvm_svm_csv *to_kvm_svm_csv(struct kvm *kvm) { return (struct kvm_svm_csv *)container_of(kvm, struct kvm_svm, kvm); @@ -1512,6 +1579,7 @@ void __init csv_init(struct kvm_x86_ops *ops) memcpy(&csv_x86_ops, ops, sizeof(struct kvm_x86_ops)); ops->mem_enc_ioctl = csv_mem_enc_ioctl; + ops->vm_attestation = csv_vm_attestation; if (boot_cpu_has(X86_FEATURE_SEV_ES) && boot_cpu_has(X86_FEATURE_CSV3)) { ops->vm_destroy = csv_vm_destroy; diff --git a/arch/x86/kvm/svm/csv.h b/arch/x86/kvm/svm/csv.h index 0542f62f340a..655fe457b27f 100644 --- a/arch/x86/kvm/svm/csv.h +++ b/arch/x86/kvm/svm/csv.h @@ -20,6 +20,15 @@ */ extern struct hygon_kvm_hooks_table { bool sev_hooks_installed; + int (*sev_issue_cmd)(struct kvm *kvm, int id, void *data, int *error); + unsigned long (*get_num_contig_pages)(unsigned long idx, + struct page **inpages, + unsigned long npages); + struct page **(*sev_pin_memory)(struct kvm *kvm, unsigned long uaddr, + unsigned long ulen, unsigned long *n, + int write); + void (*sev_unpin_memory)(struct kvm *kvm, struct page **pages, + unsigned long npages); } hygon_kvm_hooks; void __init csv_init(struct kvm_x86_ops *ops); diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index a9f2a0979942..dfdcb10483d2 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -81,8 +81,6 @@ static unsigned long *sev_reclaim_asid_bitmap; static DEFINE_MUTEX(csv_cmd_batch_mutex); -static const char sev_vm_mnonce[] = "VM_ATTESTATION"; - static int alloc_trans_mempool(void); static void free_trans_mempool(void); @@ -2577,6 +2575,11 @@ void __init sev_set_cpu_caps(void) /* Code to set all of the function and vaiable pointers */ static void sev_install_hooks(void) { + hygon_kvm_hooks.sev_issue_cmd = sev_issue_cmd; + hygon_kvm_hooks.get_num_contig_pages = get_num_contig_pages; + hygon_kvm_hooks.sev_pin_memory = sev_pin_memory; + hygon_kvm_hooks.sev_unpin_memory = sev_unpin_memory; + hygon_kvm_hooks.sev_hooks_installed = true; } #endif @@ -3636,71 +3639,6 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1); } -int sev_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len) -{ - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; - struct sev_data_attestation_report *data = NULL; - struct page **pages; - unsigned long guest_uaddr, n; - int ret = 0, offset, error; - - if (!sev_guest(kvm) || !is_x86_vendor_hygon()) - return -ENOTTY; - - /* - * The physical address of guest must valid and page aligned, and - * the length of guest memory region must be page size aligned. - */ - if (!gpa || (gpa & ~PAGE_MASK) || (len & ~PAGE_MASK)) { - pr_err("invalid guest address or length\n"); - return -EFAULT; - } - - guest_uaddr = gfn_to_hva(kvm, gpa_to_gfn(gpa)); - pages = sev_pin_memory(kvm, guest_uaddr, len, &n, 1); - if (IS_ERR(pages)) - return PTR_ERR(pages); - - /* - * The attestation report must be copied into contiguous memory region, - * lets verify that userspace memory pages are contiguous before we - * issue commmand. - */ - if (get_num_contig_pages(0, pages, n) != n) { - ret = -EINVAL; - goto e_unpin_memory; - } - - ret = -ENOMEM; - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) - goto e_unpin_memory; - - /* sev_vm_mnonce indicates attestation request from guest */ - if (sizeof(sev_vm_mnonce) >= sizeof(data->mnonce)) { - ret = -EINVAL; - goto e_free; - } - - memcpy(data->mnonce, sev_vm_mnonce, sizeof(sev_vm_mnonce)); - - offset = guest_uaddr & (PAGE_SIZE - 1); - data->address = __sme_page_pa(pages[0]) + offset; - data->len = len; - - data->handle = sev->handle; - ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, data, &error); - - if (ret) - pr_err("vm attestation ret %#x, error %#x\n", ret, error); - -e_free: - kfree(data); -e_unpin_memory: - sev_unpin_memory(kvm, pages, n); - return ret; -} - /*--1024--1023--1024--1023--*/ #define TRANS_MEMPOOL_1ST_BLOCK_OFFSET 0 #define TRANS_MEMPOOL_2ND_BLOCK_OFFSET (1024 << PAGE_SHIFT) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 111f977cde6c..866f5ada1cfa 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -5205,7 +5205,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector, .vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons, - .vm_attestation = sev_vm_attestation, .control_pre_system_reset = csv_control_pre_system_reset, .control_post_system_reset = csv_control_post_system_reset, .arch_hypercall = kvm_hygon_arch_hypercall, diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 0c50689a018a..b4346ea313ab 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -738,7 +738,6 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa); void sev_es_unmap_ghcb(struct vcpu_svm *svm); -int sev_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len); int sev_es_ghcb_map(struct vcpu_svm *svm, u64 ghcb_gpa); int csv_control_pre_system_reset(struct kvm *kvm); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2b21096fc18b..3fc7cd1ce849 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9899,7 +9899,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) } if (static_call(kvm_x86_get_cpl)(vcpu) != 0 && - !(nr == KVM_HC_VM_ATTESTATION || nr == KVM_HC_PSP_OP)) { + !(is_x86_vendor_hygon() && (nr == KVM_HC_VM_ATTESTATION || nr == KVM_HC_PSP_OP))) { ret = -KVM_EPERM; goto out; } @@ -9964,7 +9964,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) } case KVM_HC_VM_ATTESTATION: ret = -KVM_ENOSYS; - if (kvm_x86_ops.vm_attestation) + if (is_x86_vendor_hygon() && kvm_x86_ops.vm_attestation) ret = static_call(kvm_x86_vm_attestation)(vcpu->kvm, a0, a1); break; case KVM_HC_PSP_OP: -- Gitee From 7c2aaef1b9fc7cda56804e91cf2ce88b0487f2f2 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Tue, 22 Oct 2024 14:17:45 +0800 Subject: [PATCH 1620/2138] anolis: KVM: SVM: CSV: Move Hygon CSV RING_BUFFER handler to csv.c ANBZ: #11454 This will reduce code intrusion in sev.c. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/4015 --- arch/x86/include/asm/svm.h | 20 -- arch/x86/kvm/svm/csv.c | 536 ++++++++++++++++++++++++++++++++++++- arch/x86/kvm/svm/csv.h | 27 ++ arch/x86/kvm/svm/sev.c | 536 +------------------------------------ include/uapi/linux/kvm.h | 32 ++- 5 files changed, 580 insertions(+), 571 deletions(-) diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 24b6a7e60f33..3ac0ffc4f3e2 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -680,24 +680,4 @@ DEFINE_GHCB_ACCESSORS(sw_exit_info_2) DEFINE_GHCB_ACCESSORS(sw_scratch) DEFINE_GHCB_ACCESSORS(xcr0) -/* same to the ring buffer max num */ -#define SVM_RING_BUFFER_MAX 4094 - -struct csv_ringbuf_info_item { - struct page **pages; - uintptr_t hdr_vaddr; - uintptr_t trans_vaddr; - uintptr_t data_vaddr; - uintptr_t trans_uaddr; - uintptr_t hdr_uaddr; - unsigned long trans_len; - unsigned long hdr_len; - unsigned long n; -}; - -struct csv_ringbuf_infos { - struct csv_ringbuf_info_item *item[SVM_RING_BUFFER_MAX]; - int num; -}; - #endif diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 318723c9ab7f..04dab13cc06d 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -30,6 +30,29 @@ struct hygon_kvm_hooks_table hygon_kvm_hooks; static struct kvm_x86_ops csv_x86_ops; static const char csv_vm_mnonce[] = "VM_ATTESTATION"; +static DEFINE_MUTEX(csv_cmd_batch_mutex); + +static int __csv_issue_ringbuf_cmds(int fd, int *psp_ret) +{ + struct fd f; + int ret; + + f = fdget(fd); + if (!f.file) + return -EBADF; + + ret = csv_issue_ringbuf_cmds_external_user(f.file, psp_ret); + + fdput(f); + return ret; +} + +static int csv_issue_ringbuf_cmds(struct kvm *kvm, int *psp_ret) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + + return __csv_issue_ringbuf_cmds(sev->fd, psp_ret); +} int csv_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len) { @@ -97,6 +120,495 @@ int csv_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len) return ret; } +/*--1024--1023--1024--1023--*/ +#define TRANS_MEMPOOL_1ST_BLOCK_OFFSET 0 +#define TRANS_MEMPOOL_2ND_BLOCK_OFFSET (1024 << PAGE_SHIFT) +#define TRANS_MEMPOOL_3RD_BLOCK_OFFSET (2047 << PAGE_SHIFT) +#define TRANS_MEMPOOL_4TH_BLOCK_OFFSET (3071 << PAGE_SHIFT) +#define TRANS_MEMPOOL_BLOCKS_MAX_OFFSET (4094 << PAGE_SHIFT) +#define TRANS_MEMPOOL_BLOCK_NUM 4 +#define TRANS_MEMPOOL_BLOCK_SIZE (1024 * PAGE_SIZE) + +static size_t g_mempool_offset; +void *g_trans_mempool[TRANS_MEMPOOL_BLOCK_NUM] = { 0, }; + +static void csv_reset_mempool_offset(void) +{ + g_mempool_offset = 0; +} + +void csv_free_trans_mempool(void) +{ + int i; + + for (i = 0; i < TRANS_MEMPOOL_BLOCK_NUM; i++) { + kfree(g_trans_mempool[i]); + g_trans_mempool[i] = NULL; + } + + csv_reset_mempool_offset(); +} + +int csv_alloc_trans_mempool(void) +{ + int i; + + for (i = 0; i < TRANS_MEMPOOL_BLOCK_NUM; i++) { + WARN_ONCE(g_trans_mempool[i], + "g_trans_mempool[%d] was tainted\n", i); + + g_trans_mempool[i] = kzalloc(TRANS_MEMPOOL_BLOCK_SIZE, GFP_KERNEL); + if (!g_trans_mempool[i]) + goto free_trans_mempool; + } + + csv_reset_mempool_offset(); + return 0; + +free_trans_mempool: + csv_free_trans_mempool(); + pr_warn("Fail to allocate mem pool, CSV(2) live migration will very slow\n"); + + return -ENOMEM; +} + +static void __maybe_unused *get_trans_data_from_mempool(size_t size) +{ + void *trans = NULL; + char *trans_data = NULL; + int i; + size_t offset; + + if (g_mempool_offset < TRANS_MEMPOOL_2ND_BLOCK_OFFSET) { + i = 0; + offset = g_mempool_offset - TRANS_MEMPOOL_1ST_BLOCK_OFFSET; + } else if (g_mempool_offset < TRANS_MEMPOOL_3RD_BLOCK_OFFSET) { + i = 1; + offset = g_mempool_offset - TRANS_MEMPOOL_2ND_BLOCK_OFFSET; + } else if (g_mempool_offset < TRANS_MEMPOOL_4TH_BLOCK_OFFSET) { + i = 2; + offset = g_mempool_offset - TRANS_MEMPOOL_3RD_BLOCK_OFFSET; + } else if (g_mempool_offset < TRANS_MEMPOOL_BLOCKS_MAX_OFFSET) { + i = 3; + offset = g_mempool_offset - TRANS_MEMPOOL_4TH_BLOCK_OFFSET; + } else { + pr_err("mempool is full (offset: %lu)\n", g_mempool_offset); + return NULL; + } + + trans_data = (char *)g_trans_mempool[i]; + if (!trans_data) + return NULL; + + trans = &trans_data[offset]; + g_mempool_offset += size; + + return trans; +} + +static int +csv_send_update_data_to_ringbuf(struct kvm *kvm, + int prio, + uintptr_t data_ptr, + struct csv_ringbuf_infos *ringbuf_infos) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_update_data *data; + struct kvm_sev_send_update_data params; + struct csv_ringbuf_info_item *item; + void *hdr, *trans_data; + struct page **guest_page; + unsigned long n; + int ret, offset; + + if (!sev_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)data_ptr, + sizeof(struct kvm_sev_send_update_data))) + return -EFAULT; + + /* + * userspace shouldn't query either header or trans length in ringbuf + * mode. + */ + if (!params.trans_len || !params.hdr_len) + return -EINVAL; + + if (!params.trans_uaddr || !params.guest_uaddr || + !params.guest_len || !params.hdr_uaddr) + return -EINVAL; + + /* Check if we are crossing the page boundary */ + offset = params.guest_uaddr & (PAGE_SIZE - 1); + if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE) + return -EINVAL; + + /* Pin guest memory */ + guest_page = hygon_kvm_hooks.sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, + PAGE_SIZE, &n, 0); + if (IS_ERR(guest_page)) + return PTR_ERR(guest_page); + + /* Allocate memory for header and transport buffer */ + ret = -ENOMEM; + hdr = kzalloc(params.hdr_len, GFP_KERNEL); + if (!hdr) + goto e_unpin; + + trans_data = get_trans_data_from_mempool(params.trans_len); + if (!trans_data) + goto e_free_hdr; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto e_free_hdr; + + data->hdr_address = __psp_pa(hdr); + data->hdr_len = params.hdr_len; + data->trans_address = __psp_pa(trans_data); + data->trans_len = params.trans_len; + + /* The SEND_UPDATE_DATA command requires C-bit to be always set. */ + data->guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + + offset; + data->guest_address |= *hygon_kvm_hooks.sev_me_mask; + data->guest_len = params.guest_len; + data->handle = sev->handle; + + ret = csv_fill_cmd_queue(prio, SEV_CMD_SEND_UPDATE_DATA, data, 0); + if (ret) + goto e_free; + + /* + * Create item to save page info and pointer, which will be freed + * in function csv_command_batch because it will be used after PSP + * return for copy_to_user. + */ + item = kzalloc(sizeof(*item), GFP_KERNEL); + if (!item) { + ret = -ENOMEM; + goto e_free; + } + + item->pages = guest_page; + item->n = n; + item->hdr_vaddr = (uintptr_t)hdr; + item->hdr_uaddr = params.hdr_uaddr; + item->hdr_len = params.hdr_len; + item->trans_vaddr = (uintptr_t)trans_data; + item->trans_uaddr = params.trans_uaddr; + item->trans_len = params.trans_len; + item->data_vaddr = (uintptr_t)data; + + ringbuf_infos->item[ringbuf_infos->num++] = item; + + /* copy to ring buffer success, data freed after commands completed */ + return 0; + +e_free: + kfree(data); +e_free_hdr: + kfree(hdr); +e_unpin: + hygon_kvm_hooks.sev_unpin_memory(kvm, guest_page, n); + return ret; +} + +static int +csv_send_update_data_copy_to_user(struct kvm *kvm, + struct csv_ringbuf_infos *ringbuf_infos) +{ + int i, ret = 0; + + for (i = 0; i < ringbuf_infos->num; i++) { + struct csv_ringbuf_info_item *item = ringbuf_infos->item[i]; + + /* copy transport buffer to user space */ + if (copy_to_user((void __user *)item->trans_uaddr, + (void *)item->trans_vaddr, item->trans_len)) { + ret = -EFAULT; + break; + } + + /* Copy packet header to userspace. */ + if (copy_to_user((void __user *)item->hdr_uaddr, + (void *)item->hdr_vaddr, item->hdr_len)) { + ret = -EFAULT; + break; + } + } + + return ret; +} + +static int +csv_receive_update_data_to_ringbuf(struct kvm *kvm, + int prio, + uintptr_t data_ptr, + struct csv_ringbuf_infos *ringbuf_infos) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_sev_receive_update_data params; + struct sev_data_receive_update_data *data; + struct csv_ringbuf_info_item *item; + void *hdr = NULL, *trans = NULL; + struct page **guest_page; + unsigned long n; + int ret, offset; + + if (!sev_guest(kvm)) + return -EINVAL; + + if (copy_from_user(¶ms, (void __user *)data_ptr, + sizeof(struct kvm_sev_receive_update_data))) + return -EFAULT; + + if (!params.hdr_uaddr || !params.hdr_len || + !params.guest_uaddr || !params.guest_len || + !params.trans_uaddr || !params.trans_len) + return -EINVAL; + + /* Check if we are crossing the page boundary */ + offset = params.guest_uaddr & (PAGE_SIZE - 1); + if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE) + return -EINVAL; + + hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len); + if (IS_ERR(hdr)) + return PTR_ERR(hdr); + + ret = -ENOMEM; + trans = get_trans_data_from_mempool(params.trans_len); + if (!trans) + goto e_free_hdr; + + if (copy_from_user(trans, (void __user *)params.trans_uaddr, + params.trans_len)) { + ret = -EFAULT; + goto e_free_hdr; + } + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto e_free_hdr; + + data->hdr_address = __psp_pa(hdr); + data->hdr_len = params.hdr_len; + data->trans_address = __psp_pa(trans); + data->trans_len = params.trans_len; + + /* Pin guest memory */ + guest_page = hygon_kvm_hooks.sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, + PAGE_SIZE, &n, 1); + if (IS_ERR(guest_page)) { + ret = PTR_ERR(guest_page); + goto e_free; + } + + /* + * Flush (on non-coherent CPUs) before RECEIVE_UPDATE_DATA, the PSP + * encrypts the written data with the guest's key, and the cache may + * contain dirty, unencrypted data. + */ + hygon_kvm_hooks.sev_clflush_pages(guest_page, n); + + /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */ + data->guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + + offset; + data->guest_address |= *hygon_kvm_hooks.sev_me_mask; + data->guest_len = params.guest_len; + data->handle = sev->handle; + + ret = csv_fill_cmd_queue(prio, SEV_CMD_RECEIVE_UPDATE_DATA, data, 0); + + if (ret) + goto e_unpin; + + /* + * Create item to save page info and pointer, whitch will be freed + * in function csv_command_batch because it will be used after PSP + * return for copy_to_user. + */ + item = kzalloc(sizeof(*item), GFP_KERNEL); + if (!item) { + ret = -ENOMEM; + goto e_unpin; + } + + item->pages = guest_page; + item->n = n; + item->hdr_vaddr = (uintptr_t)hdr; + item->trans_vaddr = (uintptr_t)trans; + item->data_vaddr = (uintptr_t)data; + + ringbuf_infos->item[ringbuf_infos->num++] = item; + + /* copy to ring buffer success, data freed after commands completed */ + return 0; + +e_unpin: + hygon_kvm_hooks.sev_unpin_memory(kvm, guest_page, n); +e_free: + kfree(data); +e_free_hdr: + kfree(hdr); + + return ret; +} + +static int csv_ringbuf_infos_free(struct kvm *kvm, + struct csv_ringbuf_infos *ringbuf_infos) +{ + int i; + + for (i = 0; i < ringbuf_infos->num; i++) { + struct csv_ringbuf_info_item *item = ringbuf_infos->item[i]; + + if (item) { + if (item->data_vaddr) + kfree((void *)item->data_vaddr); + + if (item->hdr_vaddr) + kfree((void *)item->hdr_vaddr); + + if (item->pages) + hygon_kvm_hooks.sev_unpin_memory(kvm, item->pages, + item->n); + + kfree(item); + + ringbuf_infos->item[i] = NULL; + } + } + + return 0; +} + +typedef int (*csv_ringbuf_input_fn)(struct kvm *kvm, int prio, + uintptr_t data_ptr, + struct csv_ringbuf_infos *ringbuf_infos); +typedef int (*csv_ringbuf_output_fn)(struct kvm *kvm, + struct csv_ringbuf_infos *ringbuf_infos); + +static int get_cmd_helpers(__u32 cmd, + csv_ringbuf_input_fn *to_ringbuf_fn, + csv_ringbuf_output_fn *to_user_fn) +{ + int ret = 0; + + /* copy commands to ring buffer*/ + switch (cmd) { + case KVM_SEV_SEND_UPDATE_DATA: + *to_ringbuf_fn = csv_send_update_data_to_ringbuf; + *to_user_fn = csv_send_update_data_copy_to_user; + break; + case KVM_SEV_RECEIVE_UPDATE_DATA: + *to_ringbuf_fn = csv_receive_update_data_to_ringbuf; + *to_user_fn = NULL; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int csv_command_batch(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + int ret; + struct kvm_csv_command_batch params; + uintptr_t node_addr; + struct csv_ringbuf_infos *ringbuf_infos; + csv_ringbuf_input_fn csv_cmd_to_ringbuf_fn = NULL; + csv_ringbuf_output_fn csv_copy_to_user_fn = NULL; + int prio = CSV_COMMAND_PRIORITY_HIGH; + + if (!sev_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(struct kvm_csv_command_batch))) + return -EFAULT; + + /* return directly if node list is NULL */ + if (!params.csv_batch_list_uaddr) + return 0; + + /* ring buffer init */ + if (csv_ring_buffer_queue_init()) + return -EINVAL; + + if (get_cmd_helpers(params.command_id, + &csv_cmd_to_ringbuf_fn, &csv_copy_to_user_fn)) { + ret = -EINVAL; + goto err_free_ring_buffer; + } + + ringbuf_infos = kzalloc(sizeof(*ringbuf_infos), GFP_KERNEL); + if (!ringbuf_infos) { + ret = -ENOMEM; + goto err_free_ring_buffer; + } + + node_addr = (uintptr_t)params.csv_batch_list_uaddr; + while (node_addr) { + struct kvm_csv_batch_list_node node; + + if (copy_from_user(&node, (void __user *)node_addr, + sizeof(struct kvm_csv_batch_list_node))) { + ret = -EFAULT; + goto err_free_ring_buffer_infos_items; + } + + if (ringbuf_infos->num > SVM_RING_BUFFER_MAX) { + pr_err("%s: ring num is too large:%d, cmd:0x%x\n", + __func__, ringbuf_infos->num, params.command_id); + + ret = -EINVAL; + goto err_free_ring_buffer_infos_items; + } + + if (csv_cmd_to_ringbuf_fn(kvm, prio, + (uintptr_t)node.cmd_data_addr, + ringbuf_infos)) { + ret = -EFAULT; + goto err_free_ring_buffer_infos_items; + } + + /* 1st half set to HIGH queue, 2nd half set to LOW queue */ + if (ringbuf_infos->num == SVM_RING_BUFFER_MAX / 2) + prio = CSV_COMMAND_PRIORITY_LOW; + + node_addr = node.next_cmd_addr; + } + + /* ring buffer process */ + ret = csv_issue_ringbuf_cmds(kvm, &argp->error); + if (ret) + goto err_free_ring_buffer_infos_items; + + ret = csv_check_stat_queue_status(&argp->error); + if (ret) + goto err_free_ring_buffer_infos_items; + + if (csv_copy_to_user_fn && csv_copy_to_user_fn(kvm, ringbuf_infos)) { + ret = -EFAULT; + goto err_free_ring_buffer_infos_items; + } + +err_free_ring_buffer_infos_items: + csv_ringbuf_infos_free(kvm, ringbuf_infos); + kfree(ringbuf_infos); + csv_reset_mempool_offset(); + +err_free_ring_buffer: + csv_ring_buffer_queue_free(); + + return ret; +} + struct encrypt_data_block { struct { u64 npages: 12; @@ -1515,7 +2027,11 @@ static void csv_guest_memory_reclaimed(struct kvm *kvm) static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; - int r = -EINVAL; + int r; + + if (!hygon_kvm_hooks.sev_hooks_installed || + !(*hygon_kvm_hooks.sev_enabled)) + return -ENOTTY; if (!argp) return 0; @@ -1526,6 +2042,11 @@ static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) mutex_lock(&kvm->lock); switch (sev_cmd.id) { + case KVM_CSV_COMMAND_BATCH: + mutex_lock(&csv_cmd_batch_mutex); + r = csv_command_batch(kvm, &sev_cmd); + mutex_unlock(&csv_cmd_batch_mutex); + break; case KVM_CSV3_INIT: r = csv3_guest_init(kvm, &sev_cmd); break; @@ -1548,18 +2069,21 @@ static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) r = csv3_receive_encrypt_context(kvm, &sev_cmd); break; default: + /* + * If the command is compatible between CSV and SEV, the + * native implementation of the driver is invoked. + * Release the mutex before calling the native ioctl function + * because it will acquires the mutex. + */ mutex_unlock(&kvm->lock); if (likely(csv_x86_ops.mem_enc_ioctl)) - r = csv_x86_ops.mem_enc_ioctl(kvm, argp); - goto out; + return csv_x86_ops.mem_enc_ioctl(kvm, argp); } - mutex_unlock(&kvm->lock); - if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd))) r = -EFAULT; -out: + mutex_unlock(&kvm->lock); return r; } diff --git a/arch/x86/kvm/svm/csv.h b/arch/x86/kvm/svm/csv.h index 655fe457b27f..5c62887cbdc1 100644 --- a/arch/x86/kvm/svm/csv.h +++ b/arch/x86/kvm/svm/csv.h @@ -12,6 +12,26 @@ #include +/* same to the ring buffer max num */ +#define SVM_RING_BUFFER_MAX 4094 + +struct csv_ringbuf_info_item { + struct page **pages; + uintptr_t hdr_vaddr; + uintptr_t trans_vaddr; + uintptr_t data_vaddr; + uintptr_t trans_uaddr; + uintptr_t hdr_uaddr; + unsigned long trans_len; + unsigned long hdr_len; + unsigned long n; +}; + +struct csv_ringbuf_infos { + struct csv_ringbuf_info_item *item[SVM_RING_BUFFER_MAX]; + int num; +}; + #ifdef CONFIG_HYGON_CSV /* @@ -20,6 +40,8 @@ */ extern struct hygon_kvm_hooks_table { bool sev_hooks_installed; + bool *sev_enabled; + unsigned long *sev_me_mask; int (*sev_issue_cmd)(struct kvm *kvm, int id, void *data, int *error); unsigned long (*get_num_contig_pages)(unsigned long idx, struct page **inpages, @@ -29,15 +51,20 @@ extern struct hygon_kvm_hooks_table { int write); void (*sev_unpin_memory)(struct kvm *kvm, struct page **pages, unsigned long npages); + void (*sev_clflush_pages)(struct page *pages[], unsigned long npages); } hygon_kvm_hooks; void __init csv_init(struct kvm_x86_ops *ops); void csv_exit(void); +int csv_alloc_trans_mempool(void); +void csv_free_trans_mempool(void); #else /* !CONFIG_HYGON_CSV */ static inline void __init csv_init(struct kvm_x86_ops *ops) { } static inline void csv_exit(void) { } +static inline int csv_alloc_trans_mempool(void) { return -ENOMEM; } +static inline void csv_free_trans_mempool(void) { } #endif /* CONFIG_HYGON_CSV */ diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index dfdcb10483d2..528d4906837a 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -79,11 +79,6 @@ static unsigned int nr_asids; static unsigned long *sev_asid_bitmap; static unsigned long *sev_reclaim_asid_bitmap; -static DEFINE_MUTEX(csv_cmd_batch_mutex); - -static int alloc_trans_mempool(void); -static void free_trans_mempool(void); - struct enc_region { struct list_head list; unsigned long npages; @@ -455,28 +450,6 @@ static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error) return __sev_issue_cmd(sev->fd, id, data, error); } -static int __csv_issue_ringbuf_cmds(int fd, int *psp_ret) -{ - struct fd f; - int ret; - - f = fdget(fd); - if (!f.file) - return -EBADF; - - ret = csv_issue_ringbuf_cmds_external_user(f.file, psp_ret); - - fdput(f); - return ret; -} - -static int csv_issue_ringbuf_cmds(struct kvm *kvm, int *psp_ret) -{ - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; - - return __csv_issue_ringbuf_cmds(sev->fd, psp_ret); -} - static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; @@ -2218,8 +2191,6 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd) return ret; } -static int csv_command_batch(struct kvm *kvm, struct kvm_sev_cmd *argp); - int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -2316,14 +2287,6 @@ int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp) case KVM_SEV_RECEIVE_FINISH: r = sev_receive_finish(kvm, &sev_cmd); break; - case KVM_CSV_COMMAND_BATCH: - if (is_x86_vendor_hygon()) { - mutex_lock(&csv_cmd_batch_mutex); - r = csv_command_batch(kvm, &sev_cmd); - mutex_unlock(&csv_cmd_batch_mutex); - break; - } - fallthrough; default: r = -EINVAL; goto out; @@ -2575,10 +2538,13 @@ void __init sev_set_cpu_caps(void) /* Code to set all of the function and vaiable pointers */ static void sev_install_hooks(void) { + hygon_kvm_hooks.sev_enabled = &sev_enabled; + hygon_kvm_hooks.sev_me_mask = &sev_me_mask; hygon_kvm_hooks.sev_issue_cmd = sev_issue_cmd; hygon_kvm_hooks.get_num_contig_pages = get_num_contig_pages; hygon_kvm_hooks.sev_pin_memory = sev_pin_memory; hygon_kvm_hooks.sev_unpin_memory = sev_unpin_memory; + hygon_kvm_hooks.sev_clflush_pages = sev_clflush_pages; hygon_kvm_hooks.sev_hooks_installed = true; } @@ -2650,7 +2616,7 @@ void __init sev_hardware_setup(void) #endif /* Initialize buffer to accelerate migration of CSV/CSV2 guest */ - if (alloc_trans_mempool()) { + if (csv_alloc_trans_mempool()) { #ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID kfree(csv_asid_userid_array); csv_asid_userid_array = NULL; @@ -2752,7 +2718,7 @@ void sev_hardware_unsetup(void) sev_flush_asids(1, max_sev_asid); if (is_x86_vendor_hygon()) { - free_trans_mempool(); + csv_free_trans_mempool(); #ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID kfree(csv_asid_userid_array); #endif @@ -3639,498 +3605,6 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1); } -/*--1024--1023--1024--1023--*/ -#define TRANS_MEMPOOL_1ST_BLOCK_OFFSET 0 -#define TRANS_MEMPOOL_2ND_BLOCK_OFFSET (1024 << PAGE_SHIFT) -#define TRANS_MEMPOOL_3RD_BLOCK_OFFSET (2047 << PAGE_SHIFT) -#define TRANS_MEMPOOL_4TH_BLOCK_OFFSET (3071 << PAGE_SHIFT) -#define TRANS_MEMPOOL_BLOCKS_MAX_OFFSET (4094 << PAGE_SHIFT) -#define TRANS_MEMPOOL_BLOCK_NUM 4 -#define TRANS_MEMPOOL_BLOCK_SIZE (1024 * PAGE_SIZE) - -static size_t g_mempool_offset; -void *g_trans_mempool[TRANS_MEMPOOL_BLOCK_NUM] = { 0, }; - -static void reset_mempool_offset(void) -{ - g_mempool_offset = 0; -} - -static int alloc_trans_mempool(void) -{ - int i; - - for (i = 0; i < TRANS_MEMPOOL_BLOCK_NUM; i++) { - WARN_ONCE(g_trans_mempool[i], - "CSV: g_trans_mempool[%d] was tainted\n", i); - - g_trans_mempool[i] = kzalloc(TRANS_MEMPOOL_BLOCK_SIZE, GFP_KERNEL); - if (!g_trans_mempool[i]) - goto free_trans_mempool; - } - - g_mempool_offset = 0; - return 0; - -free_trans_mempool: - for (i = 0; i < TRANS_MEMPOOL_BLOCK_NUM; i++) { - kfree(g_trans_mempool[i]); - g_trans_mempool[i] = NULL; - } - - return -ENOMEM; -} - -static void free_trans_mempool(void) -{ - int i; - - for (i = 0; i < TRANS_MEMPOOL_BLOCK_NUM; i++) { - kfree(g_trans_mempool[i]); - g_trans_mempool[i] = NULL; - } - - g_mempool_offset = 0; -} - -static void __maybe_unused *get_trans_data_from_mempool(size_t size) -{ - void *trans = NULL; - char *trans_data = NULL; - int i; - size_t offset; - - if (g_mempool_offset < TRANS_MEMPOOL_2ND_BLOCK_OFFSET) { - i = 0; - offset = g_mempool_offset - TRANS_MEMPOOL_1ST_BLOCK_OFFSET; - } else if (g_mempool_offset < TRANS_MEMPOOL_3RD_BLOCK_OFFSET) { - i = 1; - offset = g_mempool_offset - TRANS_MEMPOOL_2ND_BLOCK_OFFSET; - } else if (g_mempool_offset < TRANS_MEMPOOL_4TH_BLOCK_OFFSET) { - i = 2; - offset = g_mempool_offset - TRANS_MEMPOOL_3RD_BLOCK_OFFSET; - } else if (g_mempool_offset < TRANS_MEMPOOL_BLOCKS_MAX_OFFSET) { - i = 3; - offset = g_mempool_offset - TRANS_MEMPOOL_4TH_BLOCK_OFFSET; - } else { - pr_err("CSV: mempool is full (offset: %lu)\n", g_mempool_offset); - return NULL; - } - - trans_data = (char *)g_trans_mempool[i]; - trans = &trans_data[offset]; - g_mempool_offset += size; - - return trans; -} - -static int csv_ringbuf_infos_free(struct kvm *kvm, - struct csv_ringbuf_infos *ringbuf_infos) -{ - int i; - - for (i = 0; i < ringbuf_infos->num; i++) { - struct csv_ringbuf_info_item *item = ringbuf_infos->item[i]; - - if (item) { - if (item->data_vaddr) - kfree((void *)item->data_vaddr); - - if (item->hdr_vaddr) - kfree((void *)item->hdr_vaddr); - - if (item->pages) - sev_unpin_memory(kvm, item->pages, item->n); - - kfree(item); - - ringbuf_infos->item[i] = NULL; - } - } - - return 0; -} - -static int -sev_send_update_data_to_ringbuf(struct kvm *kvm, - int prio, - uintptr_t data_ptr, - struct csv_ringbuf_infos *ringbuf_infos) -{ - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; - struct sev_data_send_update_data *data; - struct kvm_sev_send_update_data params; - struct csv_ringbuf_info_item *item; - void *hdr, *trans_data; - struct page **guest_page; - unsigned long n; - int ret, offset; - - if (!sev_guest(kvm)) - return -ENOTTY; - - if (copy_from_user(¶ms, (void __user *)data_ptr, - sizeof(struct kvm_sev_send_update_data))) - return -EFAULT; - - /* - * userspace shouldn't query either header or trans length in ringbuf - * mode. - */ - if (!params.trans_len || !params.hdr_len) - return -EINVAL; - - if (!params.trans_uaddr || !params.guest_uaddr || - !params.guest_len || !params.hdr_uaddr) - return -EINVAL; - - /* Check if we are crossing the page boundary */ - offset = params.guest_uaddr & (PAGE_SIZE - 1); - if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE) - return -EINVAL; - - /* Pin guest memory */ - guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, - PAGE_SIZE, &n, 0); - if (IS_ERR(guest_page)) - return PTR_ERR(guest_page); - - /* Allocate memory for header and transport buffer */ - ret = -ENOMEM; - hdr = kmalloc(params.hdr_len, GFP_KERNEL); - if (!hdr) - goto e_unpin; - - trans_data = get_trans_data_from_mempool(params.trans_len); - if (!trans_data) - goto e_free_hdr; - - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) - goto e_free_hdr; - - data->hdr_address = __psp_pa(hdr); - data->hdr_len = params.hdr_len; - data->trans_address = __psp_pa(trans_data); - data->trans_len = params.trans_len; - - /* The SEND_UPDATE_DATA command requires C-bit to be always set. */ - data->guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + - offset; - data->guest_address |= sev_me_mask; - data->guest_len = params.guest_len; - data->handle = sev->handle; - - ret = csv_fill_cmd_queue(prio, SEV_CMD_SEND_UPDATE_DATA, data, 0); - if (ret) - goto e_free; - - /* - * Create item to save page info and pointer, which will be freed - * in function csv_command_batch because it will be used after PSP - * return for copy_to_user. - */ - item = kzalloc(sizeof(*item), GFP_KERNEL); - if (!item) { - ret = -ENOMEM; - goto e_free; - } - - item->pages = guest_page; - item->n = n; - item->hdr_vaddr = (uintptr_t)hdr; - item->trans_vaddr = (uintptr_t)trans_data; - item->data_vaddr = (uintptr_t)data; - item->hdr_uaddr = params.hdr_uaddr; - item->trans_uaddr = params.trans_uaddr; - item->hdr_len = params.hdr_len; - item->trans_len = params.trans_len; - - ringbuf_infos->item[ringbuf_infos->num] = item; - ringbuf_infos->num++; - - /* copy to ring buffer success, data freed after commands completed */ - goto finish; - -e_free: - kfree(data); -e_free_hdr: - kfree(hdr); -e_unpin: - sev_unpin_memory(kvm, guest_page, n); - -finish: - return ret; -} - -static int -sev_send_update_data_copy_to_user(struct kvm *kvm, - struct csv_ringbuf_infos *ringbuf_infos) -{ - int i, ret = 0; - - for (i = 0; i < ringbuf_infos->num; i++) { - struct csv_ringbuf_info_item *item = ringbuf_infos->item[i]; - - /* copy transport buffer to user space */ - if (copy_to_user((void __user *)item->trans_uaddr, - (void *)item->trans_vaddr, item->trans_len)) { - ret = -EFAULT; - break; - } - - /* Copy packet header to userspace. */ - if (copy_to_user((void __user *)item->hdr_uaddr, - (void *)item->hdr_vaddr, item->hdr_len)) { - ret = -EFAULT; - break; - } - } - - return ret; -} - -static int -sev_receive_update_data_to_ringbuf(struct kvm *kvm, - int prio, - uintptr_t data_ptr, - struct csv_ringbuf_infos *ringbuf_infos) -{ - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; - struct kvm_sev_receive_update_data params; - struct sev_data_receive_update_data *data; - struct csv_ringbuf_info_item *item; - void *hdr = NULL, *trans = NULL; - struct page **guest_page; - unsigned long n; - int ret, offset; - - if (!sev_guest(kvm)) - return -EINVAL; - - if (copy_from_user(¶ms, (void __user *)data_ptr, - sizeof(struct kvm_sev_receive_update_data))) - return -EFAULT; - - if (!params.hdr_uaddr || !params.hdr_len || - !params.guest_uaddr || !params.guest_len || - !params.trans_uaddr || !params.trans_len) - return -EINVAL; - - /* Check if we are crossing the page boundary */ - offset = params.guest_uaddr & (PAGE_SIZE - 1); - if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE) - return -EINVAL; - - hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len); - if (IS_ERR(hdr)) - return PTR_ERR(hdr); - - ret = -ENOMEM; - trans = get_trans_data_from_mempool(params.trans_len); - if (!trans) - goto e_free_hdr; - - if (copy_from_user(trans, (void __user *)params.trans_uaddr, - params.trans_len)) { - ret = -EFAULT; - goto e_free_hdr; - } - - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) - goto e_free_hdr; - - data->hdr_address = __psp_pa(hdr); - data->hdr_len = params.hdr_len; - data->trans_address = __psp_pa(trans); - data->trans_len = params.trans_len; - - /* Pin guest memory */ - guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, - PAGE_SIZE, &n, 1); - if (IS_ERR(guest_page)) { - ret = PTR_ERR(guest_page); - goto e_free; - } - - /* - * Flush (on non-coherent CPUs) before RECEIVE_UPDATE_DATA, the PSP - * encrypts the written data with the guest's key, and the cache may - * contain dirty, unencrypted data. - */ - sev_clflush_pages(guest_page, n); - - /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */ - data->guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + - offset; - data->guest_address |= sev_me_mask; - data->guest_len = params.guest_len; - data->handle = sev->handle; - - ret = csv_fill_cmd_queue(prio, SEV_CMD_RECEIVE_UPDATE_DATA, data, 0); - - if (ret) - goto e_unpin; - - /* - * Create item to save page info and pointer, whitch will be freed - * in function csv_command_batch because it will be used after PSP - * return for copy_to_user. - */ - item = kzalloc(sizeof(*item), GFP_KERNEL); - if (!item) { - ret = -ENOMEM; - goto e_unpin; - } - - item->pages = guest_page; - item->n = n; - item->hdr_vaddr = (uintptr_t)hdr; - item->trans_vaddr = (uintptr_t)trans; - item->data_vaddr = (uintptr_t)data; - - ringbuf_infos->item[ringbuf_infos->num] = item; - ringbuf_infos->num++; - - /* copy to ring buffer success, data freed after commands completed */ - goto finish; - -e_unpin: - sev_unpin_memory(kvm, guest_page, n); -e_free: - kfree(data); -e_free_hdr: - kfree(hdr); - -finish: - return ret; -} - -typedef int (*csv_ringbuf_input_fn)(struct kvm *kvm, int prio, - uintptr_t data_ptr, - struct csv_ringbuf_infos *ringbuf_infos); -typedef int (*csv_ringbuf_output_fn)(struct kvm *kvm, - struct csv_ringbuf_infos *ringbuf_infos); - -static int get_cmd_helpers(__u32 cmd, - csv_ringbuf_input_fn *to_ringbuf_fn, - csv_ringbuf_output_fn *to_user_fn) -{ - int ret = 0; - - /* copy commands to ring buffer*/ - switch (cmd) { - case KVM_SEV_SEND_UPDATE_DATA: - *to_ringbuf_fn = sev_send_update_data_to_ringbuf; - *to_user_fn = sev_send_update_data_copy_to_user; - break; - case KVM_SEV_RECEIVE_UPDATE_DATA: - *to_ringbuf_fn = sev_receive_update_data_to_ringbuf; - *to_user_fn = NULL; - break; - default: - ret = -EINVAL; - break; - } - - return ret; -} - -static int csv_command_batch(struct kvm *kvm, struct kvm_sev_cmd *argp) -{ - int ret; - struct kvm_csv_command_batch params; - uintptr_t node_addr; - struct csv_ringbuf_infos *ringbuf_infos; - csv_ringbuf_input_fn csv_cmd_to_ringbuf_fn = NULL; - csv_ringbuf_output_fn csv_copy_to_user_fn = NULL; - int prio = CSV_COMMAND_PRIORITY_HIGH; - - if (!sev_guest(kvm)) - return -ENOTTY; - - if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, - sizeof(struct kvm_csv_command_batch))) - return -EFAULT; - - /* return directly if node list is NULL */ - if (!params.csv_batch_list_uaddr) - return 0; - - /* ring buffer init */ - if (csv_ring_buffer_queue_init()) - return -EINVAL; - - if (get_cmd_helpers(params.command_id, - &csv_cmd_to_ringbuf_fn, &csv_copy_to_user_fn)) { - ret = -EINVAL; - goto err_free_ring_buffer; - } - - ringbuf_infos = kzalloc(sizeof(*ringbuf_infos), GFP_KERNEL); - if (!ringbuf_infos) { - ret = -ENOMEM; - goto err_free_ring_buffer; - } - - node_addr = (uintptr_t)params.csv_batch_list_uaddr; - while (node_addr) { - struct kvm_csv_batch_list_node node; - - if (copy_from_user(&node, (void __user *)node_addr, - sizeof(struct kvm_csv_batch_list_node))) { - ret = -EFAULT; - goto err_free_ring_buffer_infos_items; - } - - if (ringbuf_infos->num > SVM_RING_BUFFER_MAX) { - pr_err("%s: ring num is too large:%d, cmd:0x%x\n", - __func__, ringbuf_infos->num, params.command_id); - - ret = -EINVAL; - goto err_free_ring_buffer_infos_items; - } - - if (csv_cmd_to_ringbuf_fn(kvm, prio, - (uintptr_t)node.cmd_data_addr, - ringbuf_infos)) { - ret = -EFAULT; - goto err_free_ring_buffer_infos_items; - } - - /* 1st half set to HIGH queue, 2nd half set to LOW queue */ - if (ringbuf_infos->num == SVM_RING_BUFFER_MAX / 2) - prio = CSV_COMMAND_PRIORITY_LOW; - - node_addr = node.next_cmd_addr; - } - - /* ring buffer process */ - ret = csv_issue_ringbuf_cmds(kvm, &argp->error); - if (ret) - goto err_free_ring_buffer_infos_items; - - ret = csv_check_stat_queue_status(&argp->error); - if (ret) - goto err_free_ring_buffer_infos_items; - - if (csv_copy_to_user_fn && csv_copy_to_user_fn(kvm, ringbuf_infos)) { - ret = -EFAULT; - goto err_free_ring_buffer_infos_items; - } - -err_free_ring_buffer_infos_items: - csv_ringbuf_infos_free(kvm, ringbuf_infos); - kfree(ringbuf_infos); - reset_mempool_offset(); - -err_free_ring_buffer: - csv_ring_buffer_queue_free(); - - return ret; -} - int sev_es_ghcb_map(struct vcpu_svm *svm, u64 ghcb_gpa) { if (kvm_vcpu_map(&svm->vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) { diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index ec5d3be77663..45121f043349 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1946,9 +1946,6 @@ enum sev_cmd_id { /* Guest Migration Extension */ KVM_SEV_SEND_CANCEL, - /* Hygon CSV batch command */ - KVM_CSV_COMMAND_BATCH = 0x18, - KVM_SEV_NR_MAX, }; @@ -2061,17 +2058,6 @@ struct kvm_sev_receive_update_vmsa { __u32 trans_len; }; -struct kvm_csv_batch_list_node { - __u64 cmd_data_addr; - __u64 addr; - __u64 next_cmd_addr; -}; - -struct kvm_csv_command_batch { - __u32 command_id; - __u64 csv_batch_list_uaddr; -}; - struct kvm_csv_init { __u64 userid_addr; __u32 len; @@ -2319,6 +2305,24 @@ struct kvm_s390_zpci_op { /* flags for kvm_s390_zpci_op->u.reg_aen.flags */ #define KVM_S390_ZPCIOP_REGAEN_HOST (1 << 0) +enum csv_cmd_id { + /* HYGON CSV batch command */ + KVM_CSV_COMMAND_BATCH = 0x18, + + KVM_CSV_NR_MAX, +}; + +struct kvm_csv_batch_list_node { + __u64 cmd_data_addr; + __u64 addr; + __u64 next_cmd_addr; +}; + +struct kvm_csv_command_batch { + __u32 command_id; + __u64 csv_batch_list_uaddr; +}; + /* CSV3 command */ enum csv3_cmd_id { KVM_CSV3_NR_MIN = 0xc0, -- Gitee From b7f10c6493a208f2fa69ddf47e80085451764400 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Tue, 22 Oct 2024 15:16:16 +0800 Subject: [PATCH 1621/2138] anolis: KVM: SVM: CSV: Move {SEND,RECEIVE}_UPDATE_VMSA handlers to csv.c ANBZ: #11454 Currently, the KVM_SEV_{SEND,RECEIVE}_UPDATE_VMSA handlers are used for Hygon CSV2 guest, move these handlers to csv.c. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/4015 --- arch/x86/kvm/svm/csv.c | 207 +++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/svm/sev.c | 196 ------------------------------------ include/uapi/linux/kvm.h | 32 +++--- 3 files changed, 223 insertions(+), 212 deletions(-) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 04dab13cc06d..7a8c52c75e6a 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -609,6 +609,195 @@ static int csv_command_batch(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +/* Userspace wants to query either header or trans length. */ +static int +__csv_send_update_vmsa_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp, + struct kvm_csv_send_update_vmsa *params) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_update_vmsa *vmsa; + int ret; + + vmsa = kzalloc(sizeof(*vmsa), GFP_KERNEL_ACCOUNT); + if (!vmsa) + return -ENOMEM; + + vmsa->handle = sev->handle; + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_VMSA, + vmsa, &argp->error); + + params->hdr_len = vmsa->hdr_len; + params->trans_len = vmsa->trans_len; + + if (copy_to_user((void __user *)argp->data, params, + sizeof(struct kvm_csv_send_update_vmsa))) + ret = -EFAULT; + + kfree(vmsa); + return ret; +} + +static int csv_send_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_update_vmsa *vmsa; + struct kvm_csv_send_update_vmsa params; + struct kvm_vcpu *vcpu; + void *hdr, *trans_data; + int ret; + + if (!sev_es_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(struct kvm_csv_send_update_vmsa))) + return -EFAULT; + + /* userspace wants to query either header or trans length */ + if (!params.trans_len || !params.hdr_len) + return __csv_send_update_vmsa_query_lengths(kvm, argp, ¶ms); + + if (!params.trans_uaddr || !params.hdr_uaddr) + return -EINVAL; + + /* Get the target vcpu */ + vcpu = kvm_get_vcpu_by_id(kvm, params.vcpu_id); + if (!vcpu) { + pr_err("%s: invalid vcpu\n", __func__); + return -EINVAL; + } + + pr_debug("%s: vcpu (%d)\n", __func__, vcpu->vcpu_id); + + /* allocate memory for header and transport buffer */ + ret = -ENOMEM; + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) + return ret; + + trans_data = kzalloc(params.trans_len, GFP_KERNEL_ACCOUNT); + if (!trans_data) + goto e_free_hdr; + + vmsa = kzalloc(sizeof(*vmsa), GFP_KERNEL_ACCOUNT); + if (!vmsa) + goto e_free_trans_data; + + vmsa->hdr_address = __psp_pa(hdr); + vmsa->hdr_len = params.hdr_len; + vmsa->trans_address = __psp_pa(trans_data); + vmsa->trans_len = params.trans_len; + + /* The SEND_UPDATE_VMSA command requires C-bit to be always set. */ + vmsa->guest_address = __pa(to_svm(vcpu)->sev_es.vmsa) | + *hygon_kvm_hooks.sev_me_mask; + vmsa->guest_len = PAGE_SIZE; + vmsa->handle = sev->handle; + + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_VMSA, + vmsa, &argp->error); + + if (ret) + goto e_free; + + /* copy transport buffer to user space */ + if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr, + trans_data, params.trans_len)) { + ret = -EFAULT; + goto e_free; + } + + /* Copy packet header to userspace. */ + ret = copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, + params.hdr_len); + +e_free: + kfree(vmsa); +e_free_trans_data: + kfree(trans_data); +e_free_hdr: + kfree(hdr); + + return ret; +} + +static int csv_receive_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_csv_receive_update_vmsa params; + struct sev_data_receive_update_vmsa *vmsa; + struct kvm_vcpu *vcpu; + void *hdr = NULL, *trans = NULL; + int ret; + + if (!sev_es_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(struct kvm_csv_receive_update_vmsa))) + return -EFAULT; + + if (!params.hdr_uaddr || !params.hdr_len || + !params.trans_uaddr || !params.trans_len) + return -EINVAL; + + /* Get the target vcpu */ + vcpu = kvm_get_vcpu_by_id(kvm, params.vcpu_id); + if (!vcpu) { + pr_err("%s: invalid vcpu\n", __func__); + return -EINVAL; + } + + pr_debug("%s: vcpu (%d)\n", __func__, vcpu->vcpu_id); + + hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len); + if (IS_ERR(hdr)) + return PTR_ERR(hdr); + + trans = psp_copy_user_blob(params.trans_uaddr, params.trans_len); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + goto e_free_hdr; + } + + ret = -ENOMEM; + vmsa = kzalloc(sizeof(*vmsa), GFP_KERNEL_ACCOUNT); + if (!vmsa) + goto e_free_trans; + + vmsa->hdr_address = __psp_pa(hdr); + vmsa->hdr_len = params.hdr_len; + vmsa->trans_address = __psp_pa(trans); + vmsa->trans_len = params.trans_len; + + /* + * Flush before RECEIVE_UPDATE_VMSA, the PSP encrypts the + * written VMSA memory content with the guest's key), and + * the cache may contain dirty, unencrypted data. + */ + clflush_cache_range(to_svm(vcpu)->sev_es.vmsa, PAGE_SIZE); + + /* The RECEIVE_UPDATE_VMSA command requires C-bit to be always set. */ + vmsa->guest_address = __pa(to_svm(vcpu)->sev_es.vmsa) | + *hygon_kvm_hooks.sev_me_mask; + vmsa->guest_len = PAGE_SIZE; + vmsa->handle = sev->handle; + + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, SEV_CMD_RECEIVE_UPDATE_VMSA, + vmsa, &argp->error); + + if (!ret) + vcpu->arch.guest_state_protected = true; + + kfree(vmsa); +e_free_trans: + kfree(trans); +e_free_hdr: + kfree(hdr); + + return ret; +} + struct encrypt_data_block { struct { u64 npages: 12; @@ -2047,6 +2236,24 @@ static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) r = csv_command_batch(kvm, &sev_cmd); mutex_unlock(&csv_cmd_batch_mutex); break; + case KVM_SEV_SEND_UPDATE_VMSA: + /* + * Hygon implement the specific interface, although + * KVM_SEV_SEND_UPDATE_VMSA is the command shared by CSV and + * SEV. The struct sev_data_send_update_vmsa is also shared + * by CSV and SEV, we'll use this structure in the code. + */ + r = csv_send_update_vmsa(kvm, &sev_cmd); + break; + case KVM_SEV_RECEIVE_UPDATE_VMSA: + /* + * Hygon implement the specific interface, although + * KVM_SEV_RECEIVE_UPDATE_VMSA is the command shared by CSV and + * SEV. The struct sev_data_receive_update_vmsa is also shared + * by CSV and SEV, we'll use this structure in the code. + */ + r = csv_receive_update_vmsa(kvm, &sev_cmd); + break; case KVM_CSV3_INIT: r = csv3_guest_init(kvm, &sev_cmd); break; diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 528d4906837a..1fb3c36f38fc 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1525,115 +1525,6 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } -/* Userspace wants to query either header or trans length. */ -static int -__sev_send_update_vmsa_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp, - struct kvm_sev_send_update_vmsa *params) -{ - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; - struct sev_data_send_update_vmsa *vmsa; - int ret; - - vmsa = kzalloc(sizeof(*vmsa), GFP_KERNEL_ACCOUNT); - if (!vmsa) - return -ENOMEM; - - vmsa->handle = sev->handle; - ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_VMSA, vmsa, &argp->error); - - params->hdr_len = vmsa->hdr_len; - params->trans_len = vmsa->trans_len; - - if (copy_to_user((void __user *)argp->data, params, - sizeof(struct kvm_sev_send_update_vmsa))) - ret = -EFAULT; - - kfree(vmsa); - return ret; -} - -static int sev_send_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) -{ - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; - struct sev_data_send_update_vmsa *vmsa; - struct kvm_sev_send_update_vmsa params; - struct kvm_vcpu *vcpu; - void *hdr, *trans_data; - int ret; - - if (!sev_es_guest(kvm)) - return -ENOTTY; - - if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, - sizeof(struct kvm_sev_send_update_vmsa))) - return -EFAULT; - - /* userspace wants to query either header or trans length */ - if (!params.trans_len || !params.hdr_len) - return __sev_send_update_vmsa_query_lengths(kvm, argp, ¶ms); - - if (!params.trans_uaddr || !params.hdr_uaddr) - return -EINVAL; - - /* Get the target vcpu */ - vcpu = kvm_get_vcpu_by_id(kvm, params.vcpu_id); - if (!vcpu) { - pr_err("%s: invalid vcpu\n", __func__); - return -EINVAL; - } - - pr_debug("%s: vcpu (%d)\n", __func__, vcpu->vcpu_id); - - /* allocate memory for header and transport buffer */ - ret = -ENOMEM; - hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); - if (!hdr) - return ret; - - trans_data = kzalloc(params.trans_len, GFP_KERNEL_ACCOUNT); - if (!trans_data) - goto e_free_hdr; - - vmsa = kzalloc(sizeof(*vmsa), GFP_KERNEL); - if (!vmsa) - goto e_free_trans_data; - - vmsa->hdr_address = __psp_pa(hdr); - vmsa->hdr_len = params.hdr_len; - vmsa->trans_address = __psp_pa(trans_data); - vmsa->trans_len = params.trans_len; - - /* The SEND_UPDATE_VMSA command requires C-bit to be always set. */ - vmsa->guest_address = __pa(to_svm(vcpu)->sev_es.vmsa) | sev_me_mask; - vmsa->guest_len = PAGE_SIZE; - vmsa->handle = sev->handle; - - ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_VMSA, vmsa, &argp->error); - - if (ret) - goto e_free; - - /* copy transport buffer to user space */ - if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr, - trans_data, params.trans_len)) { - ret = -EFAULT; - goto e_free; - } - - /* Copy packet header to userspace. */ - ret = copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, - params.hdr_len); - -e_free: - kfree(vmsa); -e_free_trans_data: - kfree(trans_data); -e_free_hdr: - kfree(hdr); - - return ret; -} - static int sev_send_finish(struct kvm *kvm, struct kvm_sev_cmd *argp) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; @@ -1809,81 +1700,6 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } -static int sev_receive_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) -{ - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; - struct kvm_sev_receive_update_vmsa params; - struct sev_data_receive_update_vmsa *vmsa; - struct kvm_vcpu *vcpu; - void *hdr = NULL, *trans = NULL; - int ret; - - if (!sev_es_guest(kvm)) - return -ENOTTY; - - if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, - sizeof(struct kvm_sev_receive_update_vmsa))) - return -EFAULT; - - if (!params.hdr_uaddr || !params.hdr_len || - !params.trans_uaddr || !params.trans_len) - return -EINVAL; - - /* Get the target vcpu */ - vcpu = kvm_get_vcpu_by_id(kvm, params.vcpu_id); - if (!vcpu) { - pr_err("%s: invalid vcpu\n", __func__); - return -EINVAL; - } - - pr_debug("%s: vcpu (%d)\n", __func__, vcpu->vcpu_id); - - hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len); - if (IS_ERR(hdr)) - return PTR_ERR(hdr); - - trans = psp_copy_user_blob(params.trans_uaddr, params.trans_len); - if (IS_ERR(trans)) { - ret = PTR_ERR(trans); - goto e_free_hdr; - } - - ret = -ENOMEM; - vmsa = kzalloc(sizeof(*vmsa), GFP_KERNEL); - if (!vmsa) - goto e_free_trans; - - vmsa->hdr_address = __psp_pa(hdr); - vmsa->hdr_len = params.hdr_len; - vmsa->trans_address = __psp_pa(trans); - vmsa->trans_len = params.trans_len; - - /* - * Flush before RECEIVE_UPDATE_VMSA, the PSP encrypts the - * written VMSA memory content with the guest's key), and - * the cache may contain dirty, unencrypted data. - */ - clflush_cache_range(to_svm(vcpu)->sev_es.vmsa, PAGE_SIZE); - - /* The RECEIVE_UPDATE_VMSA command requires C-bit to be always set. */ - vmsa->guest_address = __pa(to_svm(vcpu)->sev_es.vmsa) | sev_me_mask; - vmsa->guest_len = PAGE_SIZE; - vmsa->handle = sev->handle; - - ret = sev_issue_cmd(kvm, SEV_CMD_RECEIVE_UPDATE_VMSA, vmsa, &argp->error); - - if (!ret) - vcpu->arch.guest_state_protected = true; - - kfree(vmsa); -e_free_trans: - kfree(trans); -e_free_hdr: - kfree(hdr); - - return ret; -} - static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; @@ -2260,12 +2076,6 @@ int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp) case KVM_SEV_SEND_UPDATE_DATA: r = sev_send_update_data(kvm, &sev_cmd); break; - case KVM_SEV_SEND_UPDATE_VMSA: - if (is_x86_vendor_hygon()) - r = sev_send_update_vmsa(kvm, &sev_cmd); - else - r = -EINVAL; - break; case KVM_SEV_SEND_FINISH: r = sev_send_finish(kvm, &sev_cmd); break; @@ -2278,12 +2088,6 @@ int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp) case KVM_SEV_RECEIVE_UPDATE_DATA: r = sev_receive_update_data(kvm, &sev_cmd); break; - case KVM_SEV_RECEIVE_UPDATE_VMSA: - if (is_x86_vendor_hygon()) - r = sev_receive_update_vmsa(kvm, &sev_cmd); - else - r = -EINVAL; - break; case KVM_SEV_RECEIVE_FINISH: r = sev_receive_finish(kvm, &sev_cmd); break; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 45121f043349..4f2f8fd11681 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -2024,14 +2024,6 @@ struct kvm_sev_send_update_data { __u32 trans_len; }; -struct kvm_sev_send_update_vmsa { - __u32 vcpu_id; - __u64 hdr_uaddr; - __u32 hdr_len; - __u64 trans_uaddr; - __u32 trans_len; -}; - struct kvm_sev_receive_start { __u32 handle; __u32 policy; @@ -2050,14 +2042,6 @@ struct kvm_sev_receive_update_data { __u32 trans_len; }; -struct kvm_sev_receive_update_vmsa { - __u32 vcpu_id; - __u64 hdr_uaddr; - __u32 hdr_len; - __u64 trans_uaddr; - __u32 trans_len; -}; - struct kvm_csv_init { __u64 userid_addr; __u32 len; @@ -2323,6 +2307,22 @@ struct kvm_csv_command_batch { __u64 csv_batch_list_uaddr; }; +struct kvm_csv_send_update_vmsa { + __u32 vcpu_id; + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + +struct kvm_csv_receive_update_vmsa { + __u32 vcpu_id; + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + /* CSV3 command */ enum csv3_cmd_id { KVM_CSV3_NR_MIN = 0xc0, -- Gitee From c2422ed5db3e21439b944fcc06696c68833df5b4 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Tue, 22 Oct 2024 15:47:26 +0800 Subject: [PATCH 1622/2138] anolis: KVM: SVM: CSV: Move the code to support reboot Hygon CSV guest to csv.c ANBZ: #11454 This will reduce code intrusion in svm.c and sev.c. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/4015 --- arch/x86/kvm/svm/csv.c | 87 ++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/svm/csv.h | 8 ++++ arch/x86/kvm/svm/sev.c | 60 ++------------------------- arch/x86/kvm/svm/svm.c | 18 ++++----- arch/x86/kvm/svm/svm.h | 5 +-- arch/x86/kvm/x86.c | 6 +-- include/uapi/linux/kvm.h | 8 ++-- 7 files changed, 113 insertions(+), 79 deletions(-) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 7a8c52c75e6a..57044046e0f3 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -2294,6 +2294,91 @@ static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) return r; } +/* The caller must flush the stale caches about svm->sev_es.vmsa */ +void csv2_sync_reset_vmsa(struct vcpu_svm *svm) +{ + if (svm->sev_es.reset_vmsa) + memcpy(svm->sev_es.reset_vmsa, svm->sev_es.vmsa, PAGE_SIZE); +} + +void csv2_free_reset_vmsa(struct vcpu_svm *svm) +{ + if (svm->sev_es.reset_vmsa) { + __free_page(virt_to_page(svm->sev_es.reset_vmsa)); + svm->sev_es.reset_vmsa = NULL; + } +} + +int csv2_setup_reset_vmsa(struct vcpu_svm *svm) +{ + struct page *reset_vmsa_page = NULL; + + reset_vmsa_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); + if (!reset_vmsa_page) + return -ENOMEM; + + svm->sev_es.reset_vmsa = page_address(reset_vmsa_page); + return 0; +} + +static int csv_control_pre_system_reset(struct kvm *kvm) +{ + struct kvm_vcpu *vcpu; + unsigned long i; + int ret; + + if (!sev_es_guest(kvm)) + return 0; + + kvm_for_each_vcpu(i, vcpu, kvm) { + ret = mutex_lock_killable(&vcpu->mutex); + if (ret) + return ret; + + vcpu->arch.guest_state_protected = false; + + mutex_unlock(&vcpu->mutex); + } + + return 0; +} + +static int csv_control_post_system_reset(struct kvm *kvm) +{ + struct kvm_vcpu *vcpu; + unsigned long i; + int ret; + + if (!sev_guest(kvm)) + return 0; + + /* Flush both host and guest caches before next boot flow */ + wbinvd_on_all_cpus(); + + if (!sev_es_guest(kvm)) + return 0; + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + ret = mutex_lock_killable(&vcpu->mutex); + if (ret) + return ret; + + memcpy(svm->sev_es.vmsa, svm->sev_es.reset_vmsa, PAGE_SIZE); + + /* Flush encrypted vmsa to memory */ + clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE); + + svm->vcpu.arch.guest_state_protected = true; + svm->sev_es.received_first_sipi = false; + + mutex_unlock(&vcpu->mutex); + } + + return 0; +} + void csv_exit(void) { } @@ -2311,6 +2396,8 @@ void __init csv_init(struct kvm_x86_ops *ops) ops->mem_enc_ioctl = csv_mem_enc_ioctl; ops->vm_attestation = csv_vm_attestation; + ops->control_pre_system_reset = csv_control_pre_system_reset; + ops->control_post_system_reset = csv_control_post_system_reset; if (boot_cpu_has(X86_FEATURE_SEV_ES) && boot_cpu_has(X86_FEATURE_CSV3)) { ops->vm_destroy = csv_vm_destroy; diff --git a/arch/x86/kvm/svm/csv.h b/arch/x86/kvm/svm/csv.h index 5c62887cbdc1..1231e9f610e4 100644 --- a/arch/x86/kvm/svm/csv.h +++ b/arch/x86/kvm/svm/csv.h @@ -59,6 +59,10 @@ void csv_exit(void); int csv_alloc_trans_mempool(void); void csv_free_trans_mempool(void); +void csv2_sync_reset_vmsa(struct vcpu_svm *svm); +void csv2_free_reset_vmsa(struct vcpu_svm *svm); +int csv2_setup_reset_vmsa(struct vcpu_svm *svm); + #else /* !CONFIG_HYGON_CSV */ static inline void __init csv_init(struct kvm_x86_ops *ops) { } @@ -66,6 +70,10 @@ static inline void csv_exit(void) { } static inline int csv_alloc_trans_mempool(void) { return -ENOMEM; } static inline void csv_free_trans_mempool(void) { } +static inline void csv2_sync_reset_vmsa(struct vcpu_svm *svm) { } +static inline void csv2_free_reset_vmsa(struct vcpu_svm *svm) { } +static inline int csv2_setup_reset_vmsa(struct vcpu_svm *svm) { return 0; } + #endif /* CONFIG_HYGON_CSV */ #endif /* __SVM_CSV_H */ diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 1fb3c36f38fc..edb94068737a 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -809,7 +809,7 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu, */ if (is_x86_vendor_hygon()) { clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE); - memcpy(svm->sev_es.reset_vmsa, svm->sev_es.vmsa, PAGE_SIZE); + csv2_sync_reset_vmsa(svm); } return 0; @@ -2615,7 +2615,8 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu) if (svm->sev_es.ghcb_sa_free) kvfree(svm->sev_es.ghcb_sa); - __free_page(virt_to_page(svm->sev_es.reset_vmsa)); + if (is_x86_vendor_hygon()) + csv2_free_reset_vmsa(svm); } static void dump_ghcb(struct vcpu_svm *svm) @@ -3427,58 +3428,3 @@ int sev_es_ghcb_map(struct vcpu_svm *svm, u64 ghcb_gpa) return 0; } - -int csv_control_pre_system_reset(struct kvm *kvm) -{ - struct kvm_vcpu *vcpu; - unsigned long i; - int ret; - - if (!sev_es_guest(kvm)) - return 0; - - kvm_for_each_vcpu(i, vcpu, kvm) { - ret = mutex_lock_killable(&vcpu->mutex); - if (ret) - return ret; - - vcpu->arch.guest_state_protected = false; - - mutex_unlock(&vcpu->mutex); - } - - return 0; -} - -int csv_control_post_system_reset(struct kvm *kvm) -{ - struct kvm_vcpu *vcpu; - unsigned long i; - int ret; - - /* Flush both host and guest caches before next boot flow */ - wbinvd_on_all_cpus(); - - if (!sev_es_guest(kvm)) - return 0; - - kvm_for_each_vcpu(i, vcpu, kvm) { - struct vcpu_svm *svm = to_svm(vcpu); - - ret = mutex_lock_killable(&vcpu->mutex); - if (ret) - return ret; - - memcpy(svm->sev_es.vmsa, svm->sev_es.reset_vmsa, PAGE_SIZE); - - /* Flush encrypted vmsa to memory */ - clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE); - - svm->vcpu.arch.guest_state_protected = true; - svm->sev_es.received_first_sipi = false; - - mutex_unlock(&vcpu->mutex); - } - - return 0; -} diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 866f5ada1cfa..57d090adf64f 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1445,7 +1445,6 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu) struct vcpu_svm *svm; struct page *vmcb01_page; struct page *vmsa_page = NULL; - struct page *reset_vmsa_page = NULL; int err; BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0); @@ -1465,9 +1464,10 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu) if (!vmsa_page) goto error_free_vmcb_page; - reset_vmsa_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); - if (!reset_vmsa_page) - goto error_free_vmsa_page; + if (is_x86_vendor_hygon()) { + if (csv2_setup_reset_vmsa(svm)) + goto error_free_vmsa_page; + } /* * SEV-ES guests maintain an encrypted version of their FPU @@ -1497,9 +1497,6 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu) if (vmsa_page) svm->sev_es.vmsa = page_address(vmsa_page); - if (reset_vmsa_page) - svm->sev_es.reset_vmsa = page_address(reset_vmsa_page); - svm->guest_state_loaded = false; return 0; @@ -1507,8 +1504,9 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu) error_free_vmsa_page: if (vmsa_page) __free_page(vmsa_page); - if (reset_vmsa_page) - __free_page(reset_vmsa_page); + + if (is_x86_vendor_hygon()) + csv2_free_reset_vmsa(svm); error_free_vmcb_page: __free_page(vmcb01_page); out: @@ -5205,8 +5203,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector, .vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons, - .control_pre_system_reset = csv_control_pre_system_reset, - .control_post_system_reset = csv_control_post_system_reset, .arch_hypercall = kvm_hygon_arch_hypercall, }; diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index b4346ea313ab..c702ad4a4444 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -206,8 +206,10 @@ struct vcpu_sev_es_state { /* CSV2 migrated ghcb mapping state support */ bool receiver_ghcb_map_fail; +#ifdef CONFIG_HYGON_CSV /* CSV2 reboot vmsa */ struct vmcb_save_area *reset_vmsa; +#endif }; struct vcpu_svm { @@ -740,9 +742,6 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm); int sev_es_ghcb_map(struct vcpu_svm *svm, u64 ghcb_gpa); -int csv_control_pre_system_reset(struct kvm *kvm); -int csv_control_post_system_reset(struct kvm *kvm); - /* vmenter.S */ void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3fc7cd1ce849..b46accdbf59e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7118,15 +7118,13 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) break; } case KVM_CONTROL_PRE_SYSTEM_RESET: - if (is_x86_vendor_hygon() && - kvm_x86_ops.control_pre_system_reset) + if (kvm_x86_ops.control_pre_system_reset) r = static_call(kvm_x86_control_pre_system_reset)(kvm); else r = -ENOTTY; break; case KVM_CONTROL_POST_SYSTEM_RESET: - if (is_x86_vendor_hygon() && - kvm_x86_ops.control_post_system_reset) + if (kvm_x86_ops.control_post_system_reset) r = static_call(kvm_x86_control_post_system_reset)(kvm); else r = -ENOTTY; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 4f2f8fd11681..2f513c091dba 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1595,10 +1595,6 @@ struct kvm_s390_ucas_mapping { #define KVM_GET_DEVICE_ATTR _IOW(KVMIO, 0xe2, struct kvm_device_attr) #define KVM_HAS_DEVICE_ATTR _IOW(KVMIO, 0xe3, struct kvm_device_attr) -/* ioctls for control vm during system reset */ -#define KVM_CONTROL_PRE_SYSTEM_RESET _IO(KVMIO, 0xe8) -#define KVM_CONTROL_POST_SYSTEM_RESET _IO(KVMIO, 0xe9) - /* * ioctls for vcpu fds */ @@ -2323,6 +2319,10 @@ struct kvm_csv_receive_update_vmsa { __u32 trans_len; }; +/* ioctls for control vm during system reset, currently only for CSV */ +#define KVM_CONTROL_PRE_SYSTEM_RESET _IO(KVMIO, 0xe8) +#define KVM_CONTROL_POST_SYSTEM_RESET _IO(KVMIO, 0xe9) + /* CSV3 command */ enum csv3_cmd_id { KVM_CSV3_NR_MIN = 0xc0, -- Gitee From 8d58ee4f9766bdd5ff4dbf98ac015623df29d1a0 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Tue, 22 Oct 2024 16:28:40 +0800 Subject: [PATCH 1623/2138] anolis: KVM: SVM: CSV: Move MSR_AMD64_SEV_ES_GHCB emulation to csv.c ANBZ: #11454 The emulation on MSR_AMD64_SEV_ES_GHCB only support for Hygon CSV2 and CSV3 guest. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/4015 --- arch/x86/kvm/svm/csv.c | 128 +++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/svm/csv.h | 49 ++++++++++++++++ arch/x86/kvm/svm/sev.c | 19 ------ arch/x86/kvm/svm/svm.c | 113 ++++++++---------------------------- arch/x86/kvm/svm/svm.h | 44 +------------- 5 files changed, 202 insertions(+), 151 deletions(-) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 57044046e0f3..cdce01b1bf6e 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -2321,6 +2321,134 @@ int csv2_setup_reset_vmsa(struct vcpu_svm *svm) return 0; } +static int csv2_map_ghcb_gpa(struct vcpu_svm *svm, u64 ghcb_gpa) +{ + if (kvm_vcpu_map(&svm->vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) { + /* Unable to map GHCB from guest */ + vcpu_unimpl(&svm->vcpu, "Missing GHCB [%#llx] from guest\n", + ghcb_gpa); + + svm->sev_es.receiver_ghcb_map_fail = true; + return -EINVAL; + } + + svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva; + svm->sev_es.receiver_ghcb_map_fail = false; + + pr_info("Mapping GHCB [%#llx] from guest at recipient\n", ghcb_gpa); + + return 0; +} + +static bool is_ghcb_msr_protocol(u64 ghcb_val) +{ + return !!(ghcb_val & GHCB_MSR_INFO_MASK); +} + +/* + * csv_get_msr return msr data to the userspace. + * + * Return 0 if get msr success. + */ +int csv_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) +{ + struct vcpu_svm *svm = to_svm(vcpu); + + switch (msr_info->index) { + case MSR_AMD64_SEV_ES_GHCB: + /* Only support userspace get from vmcb.control.ghcb_gpa */ + if (!msr_info->host_initiated) + return 1; + + /* Filling the data as 0 if it's not a Hygon CSV2 guest */ + if (!sev_es_guest(svm->vcpu.kvm)) { + msr_info->data = 0; + return 0; + } + + msr_info->data = svm->vmcb->control.ghcb_gpa; + + /* Only set status bits when using GHCB page protocol */ + if (msr_info->data && + !is_ghcb_msr_protocol(msr_info->data)) { + if (svm->sev_es.ghcb) + msr_info->data |= GHCB_MSR_MAPPED_MASK; + + if (svm->sev_es.received_first_sipi) + msr_info->data |= + GHCB_MSR_RECEIVED_FIRST_SIPI_MASK; + } + break; + default: + return 1; + } + return 0; +} + +/* + * csv_set_msr set msr data from the userspace. + * + * Return 0 if set msr success. + */ +int csv_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) +{ + struct vcpu_svm *svm = to_svm(vcpu); + u32 ecx = msr_info->index; + u64 data = msr_info->data; + + switch (ecx) { + case MSR_AMD64_SEV_ES_GHCB: + /* Only support userspace set to vmcb.control.ghcb_gpa */ + if (!msr_info->host_initiated) + return 1; + + /* Ignore write to this MSR if it's not a Hygon CSV2 guest. */ + if (!sev_es_guest(svm->vcpu.kvm)) + return 0; + + /* + * Value 0 means uninitialized userspace MSR data, userspace + * need get the initial MSR data afterwards. + */ + if (!data) + return 0; + + /* Extract status info when using GHCB page protocol */ + if (!is_ghcb_msr_protocol(data)) { + if (!svm->sev_es.ghcb && (data & GHCB_MSR_MAPPED_MASK)) { + /* + * This happened on the recipient of migration, + * should return error if cannot map the ghcb + * page. + */ + if (csv2_map_ghcb_gpa(to_svm(vcpu), + data & ~GHCB_MSR_KVM_STATUS_MASK)) + return 1; + } + + if (data & GHCB_MSR_RECEIVED_FIRST_SIPI_MASK) + svm->sev_es.received_first_sipi = true; + + data &= ~GHCB_MSR_KVM_STATUS_MASK; + } + + svm->vmcb->control.ghcb_gpa = data; + break; + default: + return 1; + } + return 0; +} + +bool csv_has_emulated_ghcb_msr(struct kvm *kvm) +{ + /* this should be determined after KVM_CREATE_VM. */ + if (kvm && !sev_es_guest(kvm)) + return false; + + return true; +} + static int csv_control_pre_system_reset(struct kvm *kvm) { struct kvm_vcpu *vcpu; diff --git a/arch/x86/kvm/svm/csv.h b/arch/x86/kvm/svm/csv.h index 1231e9f610e4..b15a5660fb18 100644 --- a/arch/x86/kvm/svm/csv.h +++ b/arch/x86/kvm/svm/csv.h @@ -59,10 +59,18 @@ void csv_exit(void); int csv_alloc_trans_mempool(void); void csv_free_trans_mempool(void); +int csv_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); +int csv_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); +bool csv_has_emulated_ghcb_msr(struct kvm *kvm); void csv2_sync_reset_vmsa(struct vcpu_svm *svm); void csv2_free_reset_vmsa(struct vcpu_svm *svm); int csv2_setup_reset_vmsa(struct vcpu_svm *svm); +static inline bool csv2_state_unstable(struct vcpu_svm *svm) +{ + return svm->sev_es.receiver_ghcb_map_fail; +} + #else /* !CONFIG_HYGON_CSV */ static inline void __init csv_init(struct kvm_x86_ops *ops) { } @@ -70,10 +78,51 @@ static inline void csv_exit(void) { } static inline int csv_alloc_trans_mempool(void) { return -ENOMEM; } static inline void csv_free_trans_mempool(void) { } +static inline +int csv_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { return 1; } +static inline +int csv_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { return 1; } +static inline bool csv_has_emulated_ghcb_msr(struct kvm *kvm) { return false; } +static inline bool csv2_state_unstable(struct vcpu_svm *svm) { return false; } static inline void csv2_sync_reset_vmsa(struct vcpu_svm *svm) { } static inline void csv2_free_reset_vmsa(struct vcpu_svm *svm) { } static inline int csv2_setup_reset_vmsa(struct vcpu_svm *svm) { return 0; } #endif /* CONFIG_HYGON_CSV */ +#include + +/* + * CSV2 live migration support: + * If MSR_AMD64_SEV_ES_GHCB in migration didn't apply GHCB MSR protocol, + * reuse bits [52-63] to indicate vcpu status. The following status are + * currently included: + * * ghcb_map: indicate whether GHCB page was mapped. The mapped GHCB + * page may be filled with GPRs before VMRUN, so we must + * remap GHCB page on the recipient's side. + * * received_first_sipi: indicate AP's INIT-SIPI-SIPI stage. Reuse + * these bits for received_first_sipi is acceptable cause + * runtime stage of guest's linux only applies GHCB page + * protocol. + * It's unlikely that the migration encounter other stages + * of guest's linux. Once encountered, AP bringup may fail + * which will not impact user payload. + * Otherbits keep their's original meaning. (See GHCB Spec 2.3.1 for detail) + */ +#define GHCB_MSR_KVM_STATUS_POS 52 +#define GHCB_MSR_KVM_STATUS_BITS 12 +#define GHCB_MSR_KVM_STATUS_MASK \ + ((BIT_ULL(GHCB_MSR_KVM_STATUS_BITS) - 1) \ + << GHCB_MSR_KVM_STATUS_POS) +#define GHCB_MSR_MAPPED_POS 63 +#define GHCB_MSR_MAPPED_BITS 1 +#define GHCB_MSR_MAPPED_MASK \ + ((BIT_ULL(GHCB_MSR_MAPPED_BITS) - 1) \ + << GHCB_MSR_MAPPED_POS) +#define GHCB_MSR_RECEIVED_FIRST_SIPI_POS 62 +#define GHCB_MSR_RECEIVED_FIRST_SIPI_BITS 1 +#define GHCB_MSR_RECEIVED_FIRST_SIPI_MASK \ + ((BIT_ULL(GHCB_MSR_RECEIVED_FIRST_SIPI_BITS) - 1) \ + << GHCB_MSR_RECEIVED_FIRST_SIPI_POS) + #endif /* __SVM_CSV_H */ diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index edb94068737a..df9d3f2b8046 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -3409,22 +3409,3 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1); } - -int sev_es_ghcb_map(struct vcpu_svm *svm, u64 ghcb_gpa) -{ - if (kvm_vcpu_map(&svm->vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) { - /* Unable to map GHCB from guest */ - vcpu_unimpl(&svm->vcpu, "Missing GHCB [%#llx] from guest\n", - ghcb_gpa); - - svm->sev_es.receiver_ghcb_map_fail = true; - return -EINVAL; - } - - svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva; - svm->sev_es.receiver_ghcb_map_fail = false; - - pr_info("Mapping GHCB [%#llx] from guest at recipient\n", ghcb_gpa); - - return 0; -} diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 57d090adf64f..1ba907ebaa3e 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2971,35 +2971,11 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = svm->msr_decfg; break; case MSR_AMD64_SEV_ES_GHCB: - if (is_x86_vendor_hygon()) { - /* - * Only support userspace get/set from/to - * vmcb.control.ghcb_gpa - */ - if (!msr_info->host_initiated) - return 1; - - /* Filling the data as 0 if it's not a Hygon CSV2 guest */ - if (!sev_es_guest(svm->vcpu.kvm)) { - msr_info->data = 0; - return 0; - } - - msr_info->data = svm->vmcb->control.ghcb_gpa; - - /* Only set status bits when using GHCB page protocol */ - if (msr_info->data && - !is_ghcb_msr_protocol(msr_info->data)) { - if (svm->sev_es.ghcb) - msr_info->data |= GHCB_MSR_MAPPED_MASK; - - if (svm->sev_es.received_first_sipi) - msr_info->data |= - GHCB_MSR_RECEIVED_FIRST_SIPI_MASK; - } - break; - } - return 1; + /* HYGON CSV2 support export this MSR to userspace */ + if (is_x86_vendor_hygon()) + return csv_get_msr(vcpu, msr_info); + else + return 1; default: return kvm_get_msr_common(vcpu, msr_info); } @@ -3242,52 +3218,11 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) break; } case MSR_AMD64_SEV_ES_GHCB: - if (is_x86_vendor_hygon()) { - /* - * Only support userspace get/set from/to - * vmcb.control.ghcb_gpa - */ - if (!msr->host_initiated) - return 1; - - /* - * Ignore write to this MSR if it's not a Hygon CSV2 - * guest. - */ - if (!sev_es_guest(svm->vcpu.kvm)) - return 0; - - /* - * Value 0 means uninitialized userspace MSR data, - * userspace need get the initial MSR data afterwards. - */ - if (!data) - return 0; - - /* Extract status info when using GHCB page protocol */ - if (!is_ghcb_msr_protocol(data)) { - if (!svm->sev_es.ghcb && - (data & GHCB_MSR_MAPPED_MASK)) { - /* - * This happened on recipient of migration, - * should return error if cannot map the - * ghcb page. - */ - if (sev_es_ghcb_map(to_svm(vcpu), - data & ~GHCB_MSR_KVM_STATUS_MASK)) - return 1; - } - - if (data & GHCB_MSR_RECEIVED_FIRST_SIPI_MASK) - svm->sev_es.received_first_sipi = true; - - data &= ~GHCB_MSR_KVM_STATUS_MASK; - } - - svm->vmcb->control.ghcb_gpa = data; - break; - } - return 1; + /* HYGON CSV2 support update this MSR from userspace */ + if (is_x86_vendor_hygon()) + return csv_set_msr(vcpu, msr); + else + return 1; default: return kvm_set_msr_common(vcpu, msr); } @@ -4255,16 +4190,16 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) trace_kvm_entry(vcpu); /* - * For receipient side of CSV2 guest, fake the exit code as - * SVM_EXIT_ERR and return directly if failed to mapping - * the necessary GHCB page. When handling the exit code - * afterwards, it can exit to userspace and stop the guest. + * For receipient side of CSV2 guest, fake the exit code as SVM_EXIT_ERR + * and return directly if failed to mapping the necessary GHCB page. + * When handling the exit code afterwards, it can exit to userspace and + * stop the guest. */ - if (is_x86_vendor_hygon() && - sev_es_guest(vcpu->kvm) && - svm->sev_es.receiver_ghcb_map_fail) { - svm->vmcb->control.exit_code = SVM_EXIT_ERR; - return EXIT_FASTPATH_NONE; + if (is_x86_vendor_hygon() && sev_es_guest(vcpu->kvm)) { + if (csv2_state_unstable(svm)) { + svm->vmcb->control.exit_code = SVM_EXIT_ERR; + return EXIT_FASTPATH_NONE; + } } svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; @@ -4442,13 +4377,11 @@ static bool svm_has_emulated_msr(struct kvm *kvm, u32 index) return false; break; case MSR_AMD64_SEV_ES_GHCB: - /* - * Only CSV2 guests support to export this MSR, this should - * be determined after KVM_CREATE_VM. - */ - if (!is_x86_vendor_hygon() || (kvm && !sev_es_guest(kvm))) + /* HYGON CSV2 support emulate this MSR */ + if (is_x86_vendor_hygon()) + return csv_has_emulated_ghcb_msr(kvm); + else return false; - break; default: break; } diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index c702ad4a4444..aaf945935ff7 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -204,9 +204,9 @@ struct vcpu_sev_es_state { bool ghcb_sa_sync; bool ghcb_sa_free; - /* CSV2 migrated ghcb mapping state support */ - bool receiver_ghcb_map_fail; #ifdef CONFIG_HYGON_CSV + /* migrated ghcb mapping state for HYGON CSV2 */ + bool receiver_ghcb_map_fail; /* CSV2 reboot vmsa */ struct vmcb_save_area *reset_vmsa; #endif @@ -674,44 +674,6 @@ void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu); #define GHCB_VERSION_MAX 1ULL #define GHCB_VERSION_MIN 1ULL -/* - * CSV2 live migration support: - * If MSR_AMD64_SEV_ES_GHCB in migration didn't apply GHCB MSR protocol, - * reuse bits [52-63] to indicate vcpu status. The following status are - * currently included: - * * ghcb_map: indicate whether GHCB page was mapped. The mapped GHCB - * page may be filled with GPRs before VMRUN, so we must - * remap GHCB page on the recipient's side. - * * received_first_sipi: indicate AP's INIT-SIPI-SIPI stage. Reuse - * these bits for received_first_sipi is acceptable cause - * runtime stage of guest's linux only applies GHCB page - * protocol. - * It's unlikely that the migration encounter other stages - * of guest's linux. Once encountered, AP bringup may fail - * which will not impact user payload. - * Otherbits keep their's original meaning. (See GHCB Spec 2.3.1 for detail) - */ -#define GHCB_MSR_KVM_STATUS_POS 52 -#define GHCB_MSR_KVM_STATUS_BITS 12 -#define GHCB_MSR_KVM_STATUS_MASK \ - ((BIT_ULL(GHCB_MSR_KVM_STATUS_BITS) - 1) \ - << GHCB_MSR_KVM_STATUS_POS) -#define GHCB_MSR_MAPPED_POS 63 -#define GHCB_MSR_MAPPED_BITS 1 -#define GHCB_MSR_MAPPED_MASK \ - ((BIT_ULL(GHCB_MSR_MAPPED_BITS) - 1) \ - << GHCB_MSR_MAPPED_POS) -#define GHCB_MSR_RECEIVED_FIRST_SIPI_POS 62 -#define GHCB_MSR_RECEIVED_FIRST_SIPI_BITS 1 -#define GHCB_MSR_RECEIVED_FIRST_SIPI_MASK \ - ((BIT_ULL(GHCB_MSR_RECEIVED_FIRST_SIPI_BITS) - 1) \ - << GHCB_MSR_RECEIVED_FIRST_SIPI_POS) - - -static inline bool is_ghcb_msr_protocol(u64 ghcb_val) -{ - return ghcb_val & GHCB_MSR_INFO_MASK; -} extern unsigned int max_sev_asid; @@ -740,8 +702,6 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa); void sev_es_unmap_ghcb(struct vcpu_svm *svm); -int sev_es_ghcb_map(struct vcpu_svm *svm, u64 ghcb_gpa); - /* vmenter.S */ void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted); -- Gitee From c822a5841a16bd883e19d180440f150eb195f473 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Tue, 22 Oct 2024 17:30:03 +0800 Subject: [PATCH 1624/2138] anolis: KVM: SVM: CSV: Move the code to support Hygon CSV reuse ASID to csv.c ANBZ: #11454 This make the code for Hygon CSV reuse ASID cleaner. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/4015 --- arch/x86/kvm/Kconfig | 2 +- arch/x86/kvm/svm/csv.c | 67 ++++++++++++++++++++++++++++++- arch/x86/kvm/svm/csv.h | 23 +++++++++-- arch/x86/kvm/svm/sev.c | 87 ++++++++++------------------------------ include/uapi/linux/kvm.h | 10 ++--- 5 files changed, 111 insertions(+), 78 deletions(-) diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 463732963a15..3e23d32e655a 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -157,7 +157,7 @@ config KVM_EXTERNAL_WRITE_TRACKING config KVM_SUPPORTS_CSV_REUSE_ASID def_bool y bool "Reuse the same ASID for different HYGON CSV guests" - depends on KVM_AMD_SEV && CPU_SUP_HYGON + depends on KVM_AMD_SEV && CPU_SUP_HYGON && HYGON_CSV depends on !CGROUP_MISC help Provide support for reuse the same ASID for difference HYGON diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index cdce01b1bf6e..ba7a580908d0 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -137,7 +137,7 @@ static void csv_reset_mempool_offset(void) g_mempool_offset = 0; } -void csv_free_trans_mempool(void) +static void csv_free_trans_mempool(void) { int i; @@ -149,7 +149,7 @@ void csv_free_trans_mempool(void) csv_reset_mempool_offset(); } -int csv_alloc_trans_mempool(void) +static int csv_alloc_trans_mempool(void) { int i; @@ -2507,6 +2507,69 @@ static int csv_control_post_system_reset(struct kvm *kvm) return 0; } +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + +struct csv_asid_userid *csv_asid_userid_array; + +static int csv_alloc_asid_userid_array(unsigned int nr_asids) +{ + int ret = 0; + + csv_asid_userid_array = kcalloc(nr_asids, sizeof(struct csv_asid_userid), + GFP_KERNEL_ACCOUNT); + if (!csv_asid_userid_array) + ret = -ENOMEM; + + if (ret) + pr_warn("Fail to allocate array, reuse ASID is unavailable\n"); + + return ret; +} + +static void csv_free_asid_userid_array(void) +{ + kfree(csv_asid_userid_array); + csv_asid_userid_array = NULL; +} + +#else /* !CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID */ + +static int csv_alloc_asid_userid_array(unsigned int nr_asids) +{ + pr_warn("reuse ASID is unavailable\n"); + return -EFAULT; +} + +static void csv_free_asid_userid_array(void) +{ +} + +#endif /* CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID */ + +void __init csv_hardware_setup(unsigned int max_csv_asid) +{ + unsigned int nr_asids = max_csv_asid + 1; + + /* + * Allocate a memory pool to speed up live migration of + * the CSV/CSV2 guests. If the allocation fails, no + * acceleration is performed at live migration. + */ + csv_alloc_trans_mempool(); + /* + * Allocate a buffer to support reuse ASID, reuse ASID + * will not work if the allocation fails. + */ + csv_alloc_asid_userid_array(nr_asids); +} + +void csv_hardware_unsetup(void) +{ + /* Free the memory that allocated in csv_hardware_setup(). */ + csv_free_trans_mempool(); + csv_free_asid_userid_array(); +} + void csv_exit(void) { } diff --git a/arch/x86/kvm/svm/csv.h b/arch/x86/kvm/svm/csv.h index b15a5660fb18..9b0563062a94 100644 --- a/arch/x86/kvm/svm/csv.h +++ b/arch/x86/kvm/svm/csv.h @@ -32,6 +32,19 @@ struct csv_ringbuf_infos { int num; }; +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + +#define ASID_USERID_LENGTH 20 + +struct csv_asid_userid { + int refcnt; // reference count of the ASID + u32 userid_len; + char userid[ASID_USERID_LENGTH]; +}; +extern struct csv_asid_userid *csv_asid_userid_array; + +#endif /* CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID */ + #ifdef CONFIG_HYGON_CSV /* @@ -56,8 +69,9 @@ extern struct hygon_kvm_hooks_table { void __init csv_init(struct kvm_x86_ops *ops); void csv_exit(void); -int csv_alloc_trans_mempool(void); -void csv_free_trans_mempool(void); + +void __init csv_hardware_setup(unsigned int max_csv_asid); +void csv_hardware_unsetup(void); int csv_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); int csv_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); @@ -75,8 +89,9 @@ static inline bool csv2_state_unstable(struct vcpu_svm *svm) static inline void __init csv_init(struct kvm_x86_ops *ops) { } static inline void csv_exit(void) { } -static inline int csv_alloc_trans_mempool(void) { return -ENOMEM; } -static inline void csv_free_trans_mempool(void) { } + +static inline void __init csv_hardware_setup(unsigned int max_csv_asid) { } +static inline void csv_hardware_unsetup(void) { } static inline int csv_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { return 1; } diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index df9d3f2b8046..2e3463f1ff0d 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -87,17 +87,6 @@ struct enc_region { unsigned long size; }; -#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID -#define ASID_USERID_LENGTH 20 -struct csv_asid_userid { - int refcnt; // reference count of the ASID - u32 userid_len; - char userid[ASID_USERID_LENGTH]; -}; - -static struct csv_asid_userid *csv_asid_userid_array; -#endif - /* Called with the sev_bitmap_lock held, or on shutdown */ static int sev_flush_asids(unsigned int min_asid, unsigned int max_asid) { @@ -191,8 +180,8 @@ static int sev_asid_new(struct kvm_sev_info *sev) #ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID /* For Hygon CPU, check whether the userid exists */ - if ((is_x86_vendor_hygon()) && - userid && userid_len) { + if (is_x86_vendor_hygon() && userid && userid_len && + !WARN_ON_ONCE(!csv_asid_userid_array)) { int i = !min_sev_asid ? 1 : min_sev_asid; for (; i <= max_sev_asid; i++) { @@ -239,8 +228,8 @@ static int sev_asid_new(struct kvm_sev_info *sev) #ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID /* For Hygon CPU, initialize the new userid */ - if ((is_x86_vendor_hygon()) && - userid && userid_len) { + if (is_x86_vendor_hygon() && userid && userid_len && + !WARN_ON_ONCE(!csv_asid_userid_array)) { memcpy(csv_asid_userid_array[asid].userid, userid, userid_len); csv_asid_userid_array[asid].userid_len = userid_len; csv_asid_userid_array[asid].refcnt = 1; @@ -273,8 +262,10 @@ static void sev_asid_free(struct kvm_sev_info *sev) #ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID /* For Hygon CPU, decrease the reference count if userid exist */ - if ((is_x86_vendor_hygon()) && - csv_asid_userid_array[sev->asid].userid_len) { + if (!is_x86_vendor_hygon() || !csv_asid_userid_array || + !csv_asid_userid_array[sev->asid].userid_len) { + __set_bit(sev->asid, sev_reclaim_asid_bitmap); + } else { /* If reach here, reference count should large than 0. */ WARN_ON(csv_asid_userid_array[sev->asid].refcnt <= 0); @@ -284,8 +275,6 @@ static void sev_asid_free(struct kvm_sev_info *sev) memset(&csv_asid_userid_array[sev->asid], 0, sizeof(struct csv_asid_userid)); } - } else { - __set_bit(sev->asid, sev_reclaim_asid_bitmap); } #else __set_bit(sev->asid, sev_reclaim_asid_bitmap); @@ -336,11 +325,6 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; int asid, ret; -#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID - struct kvm_csv_init params; - void *csv_blob = NULL; -#endif - if (kvm->created_vcpus) return -EINVAL; @@ -352,7 +336,11 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) sev->es_active = argp->id == KVM_SEV_ES_INIT; #ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID - if (is_x86_vendor_hygon()) { + /* Try reuse ASID iff userid array is available for HYGON CSV guests */ + if (is_x86_vendor_hygon() && csv_asid_userid_array) { + struct kvm_csv_init params; + void *csv_blob = NULL; + memset(¶ms, 0, sizeof(params)); if (argp->data && @@ -377,9 +365,8 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) asid = sev_asid_new(sev, (const char *)csv_blob, params.len); - /* Free the @csv_blob to prevent memory leak */ + /* The buffer @csv_blob is no longer used, free it. */ kfree(csv_blob); - csv_blob = NULL; } else { asid = sev_asid_new(sev, NULL, 0); } @@ -406,9 +393,6 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) sev_asid_free(sev); sev->asid = 0; e_no_asid: -#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID - kfree(csv_blob); -#endif sev->es_active = false; sev->active = false; return ret; @@ -2405,34 +2389,6 @@ void __init sev_hardware_setup(void) goto out; } - if (is_x86_vendor_hygon()) { -#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID - /* Initialize CSV ASID reuse array */ - csv_asid_userid_array = kcalloc(nr_asids, - sizeof(struct csv_asid_userid), GFP_KERNEL); - if (!csv_asid_userid_array) { - bitmap_free(sev_asid_bitmap); - sev_asid_bitmap = NULL; - bitmap_free(sev_reclaim_asid_bitmap); - sev_reclaim_asid_bitmap = NULL; - goto out; - } -#endif - - /* Initialize buffer to accelerate migration of CSV/CSV2 guest */ - if (csv_alloc_trans_mempool()) { -#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID - kfree(csv_asid_userid_array); - csv_asid_userid_array = NULL; -#endif - bitmap_free(sev_asid_bitmap); - sev_asid_bitmap = NULL; - bitmap_free(sev_reclaim_asid_bitmap); - sev_reclaim_asid_bitmap = NULL; - goto out; - } - } - if (min_sev_asid <= max_sev_asid) { sev_asid_count = max_sev_asid - min_sev_asid + 1; WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count)); @@ -2507,6 +2463,9 @@ void __init sev_hardware_setup(void) * no matter @sev_enabled is false. */ sev_install_hooks(); + + if (sev_enabled) + csv_hardware_setup(max_sev_asid); } #endif @@ -2518,16 +2477,12 @@ void sev_hardware_unsetup(void) if (!sev_enabled) return; + if (is_x86_vendor_hygon()) + csv_hardware_unsetup(); + /* No need to take sev_bitmap_lock, all VMs have been destroyed. */ sev_flush_asids(1, max_sev_asid); - if (is_x86_vendor_hygon()) { - csv_free_trans_mempool(); -#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID - kfree(csv_asid_userid_array); -#endif - } - bitmap_free(sev_asid_bitmap); bitmap_free(sev_reclaim_asid_bitmap); @@ -2884,7 +2839,7 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu) #ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID /* If ASID is shared with other guests, then flush TLB before VMRUN */ - if ((is_x86_vendor_hygon()) && + if (is_x86_vendor_hygon() && csv_asid_userid_array && csv_asid_userid_array[asid].userid_len) svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; #endif diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 2f513c091dba..b4b5a6982086 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -2038,11 +2038,6 @@ struct kvm_sev_receive_update_data { __u32 trans_len; }; -struct kvm_csv_init { - __u64 userid_addr; - __u32 len; -}; - #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) #define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) #define KVM_DEV_ASSIGN_MASK_INTX (1 << 2) @@ -2319,6 +2314,11 @@ struct kvm_csv_receive_update_vmsa { __u32 trans_len; }; +struct kvm_csv_init { + __u64 userid_addr; + __u32 len; +}; + /* ioctls for control vm during system reset, currently only for CSV */ #define KVM_CONTROL_PRE_SYSTEM_RESET _IO(KVMIO, 0xe8) #define KVM_CONTROL_POST_SYSTEM_RESET _IO(KVMIO, 0xe9) -- Gitee From a6c256026c461481471016056655b44609cb3f5e Mon Sep 17 00:00:00 2001 From: hanliyang Date: Tue, 22 Oct 2024 21:38:19 +0800 Subject: [PATCH 1625/2138] anolis: KVM: SVM: CSV: Print CSV3 support info ANBZ: #11454 Currently, the function for issue CSV3 API command is available from hygon_kvm_hooks, use it and remove the duplicate function. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/4015 --- arch/x86/kvm/svm/csv.c | 130 ++++++++++++++++++++++------------------- arch/x86/kvm/svm/sev.c | 1 - 2 files changed, 70 insertions(+), 61 deletions(-) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index ba7a580908d0..f9d675aaa917 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -28,6 +28,9 @@ /* Function and variable pointers for hooks */ struct hygon_kvm_hooks_table hygon_kvm_hooks; +/* enable/disable CSV3 support */ +static bool csv3_enabled = true; + static struct kvm_x86_ops csv_x86_ops; static const char csv_vm_mnonce[] = "VM_ATTESTATION"; static DEFINE_MUTEX(csv_cmd_batch_mutex); @@ -901,48 +904,6 @@ static bool csv3_guest(struct kvm *kvm) return sev_es_guest(kvm) && csv->csv3_active; } -static int csv_sync_vmsa(struct vcpu_svm *svm) -{ - struct sev_es_save_area *save = svm->sev_es.vmsa; - - /* Check some debug related fields before encrypting the VMSA */ - if (svm->vcpu.guest_debug || (svm->vmcb->save.dr7 & ~DR7_FIXED_1)) - return -EINVAL; - - memcpy(save, &svm->vmcb->save, sizeof(svm->vmcb->save)); - - /* Sync registgers per spec. */ - save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX]; - save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX]; - save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP]; - save->xcr0 = svm->vcpu.arch.xcr0; - save->xss = svm->vcpu.arch.ia32_xss; - - return 0; -} - -static int __csv_issue_cmd(int fd, int id, void *data, int *error) -{ - struct fd f; - int ret; - - f = fdget(fd); - if (!f.file) - return -EBADF; - - ret = sev_issue_cmd_external_user(f.file, id, data, error); - - fdput(f); - return ret; -} - -static int csv_issue_cmd(struct kvm *kvm, int id, void *data, int *error) -{ - struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; - - return __csv_issue_cmd(sev->fd, id, data, error); -} - static inline void csv3_init_update_npt(struct csv3_data_update_npt *update_npt, gpa_t gpa, u32 error, u32 handle) { @@ -1094,8 +1055,9 @@ static int csv3_set_guest_private_memory(struct kvm *kvm) set_guest_private_memory->regions_paddr = __sme_pa(regions); /* set secury memory region for launch enrypt data */ - ret = csv_issue_cmd(kvm, CSV3_CMD_SET_GUEST_PRIVATE_MEMORY, - set_guest_private_memory, &error); + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, + CSV3_CMD_SET_GUEST_PRIVATE_MEMORY, + set_guest_private_memory, &error); if (ret) goto e_free_smr; @@ -1216,8 +1178,8 @@ static int csv3_launch_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) } clflush_cache_range(data, params.len); - ret = csv_issue_cmd(kvm, CSV3_CMD_LAUNCH_ENCRYPT_DATA, - encrypt_data, &argp->error); + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_LAUNCH_ENCRYPT_DATA, + encrypt_data, &argp->error); kfree(encrypt_data); block_free: @@ -1228,6 +1190,26 @@ static int csv3_launch_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +static int csv3_sync_vmsa(struct vcpu_svm *svm) +{ + struct sev_es_save_area *save = svm->sev_es.vmsa; + + /* Check some debug related fields before encrypting the VMSA */ + if (svm->vcpu.guest_debug || (svm->vmcb->save.dr7 & ~DR7_FIXED_1)) + return -EINVAL; + + memcpy(save, &svm->vmcb->save, sizeof(svm->vmcb->save)); + + /* Sync registgers per spec. */ + save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX]; + save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX]; + save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP]; + save->xcr0 = svm->vcpu.arch.xcr0; + save->xss = svm->vcpu.arch.ia32_xss; + + return 0; +} + static int csv3_launch_encrypt_vmcb(struct kvm *kvm, struct kvm_sev_cmd *argp) { struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; @@ -1248,7 +1230,7 @@ static int csv3_launch_encrypt_vmcb(struct kvm *kvm, struct kvm_sev_cmd *argp) kvm_for_each_vcpu(i, vcpu, kvm) { struct vcpu_svm *svm = to_svm(vcpu); - ret = csv_sync_vmsa(svm); + ret = csv3_sync_vmsa(svm); if (ret) goto e_free; clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE); @@ -1259,8 +1241,9 @@ static int csv3_launch_encrypt_vmcb(struct kvm *kvm, struct kvm_sev_cmd *argp) encrypt_vmcb->vmsa_len = PAGE_SIZE; encrypt_vmcb->shadow_vmcb_addr = __sme_pa(svm->vmcb); encrypt_vmcb->shadow_vmcb_len = PAGE_SIZE; - ret = csv_issue_cmd(kvm, CSV3_CMD_LAUNCH_ENCRYPT_VMCB, - encrypt_vmcb, &argp->error); + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, + CSV3_CMD_LAUNCH_ENCRYPT_VMCB, + encrypt_vmcb, &argp->error); if (ret) goto e_free; @@ -1285,7 +1268,8 @@ csv3_send_encrypt_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp, memset(&data, 0, sizeof(data)); data.handle = sev->handle; - ret = csv_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_DATA, &data, &argp->error); + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_DATA, + &data, &argp->error); params->hdr_len = data.hdr_len; params->trans_len = data.trans_len; @@ -1389,14 +1373,16 @@ static int csv3_send_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) clflush_cache_range(guest_block, PAGE_SIZE); data.flag = CSV3_SEND_ENCRYPT_DATA_SET_READONLY; - ret = csv_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_DATA, &data, &argp->error); + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_DATA, + &data, &argp->error); if (ret) goto e_free_trans_data; kvm_flush_remote_tlbs(kvm); data.flag = CSV3_SEND_ENCRYPT_DATA_MIGRATE_PAGE; - ret = csv_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_DATA, &data, &argp->error); + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_DATA, + &data, &argp->error); if (ret) goto e_free_trans_data; @@ -1440,7 +1426,8 @@ csv3_send_encrypt_context_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *arg memset(&data, 0, sizeof(data)); data.handle = sev->handle; - ret = csv_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_CONTEXT, &data, &argp->error); + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_CONTEXT, + &data, &argp->error); params->hdr_len = data.hdr_len; params->trans_len = data.trans_len; @@ -1515,7 +1502,8 @@ static int csv3_send_encrypt_context(struct kvm *kvm, struct kvm_sev_cmd *argp) /* flush hdr, trans data, trans block, secure VMSAs */ wbinvd_on_all_cpus(); - ret = csv_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_CONTEXT, &data, &argp->error); + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_CONTEXT, + &data, &argp->error); if (ret) goto e_free_trans_data; @@ -1648,8 +1636,8 @@ static int csv3_receive_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) clflush_cache_range(trans_data, params.trans_len); clflush_cache_range(trans_block, PAGE_SIZE); clflush_cache_range(guest_block, PAGE_SIZE); - ret = csv_issue_cmd(kvm, CSV3_CMD_RECEIVE_ENCRYPT_DATA, &data, - &argp->error); + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_RECEIVE_ENCRYPT_DATA, + &data, &argp->error); e_free_trans_data: vfree(trans_data); @@ -1772,8 +1760,8 @@ static int csv3_receive_encrypt_context(struct kvm *kvm, struct kvm_sev_cmd *arg clflush_cache_range(shadow_vmcb_block, PAGE_SIZE); clflush_cache_range(secure_vmcb_block, PAGE_SIZE); - ret = csv_issue_cmd(kvm, CSV3_CMD_RECEIVE_ENCRYPT_CONTEXT, &data, - &argp->error); + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_RECEIVE_ENCRYPT_CONTEXT, + &data, &argp->error); if (ret) goto e_free_shadow_vmcb_block; @@ -1834,6 +1822,9 @@ static int csv3_mmio_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code struct csv3_data_update_npt *update_npt; int psp_ret; + if (!hygon_kvm_hooks.sev_hooks_installed) + return -EFAULT; + update_npt = kzalloc(sizeof(*update_npt), GFP_KERNEL); if (!update_npt) { r = -ENOMEM; @@ -1846,7 +1837,8 @@ static int csv3_mmio_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code update_npt->page_attr_mask = page_attr_mask.val; update_npt->level = CSV3_PG_LEVEL_4K; - r = csv_issue_cmd(vcpu->kvm, CSV3_CMD_UPDATE_NPT, update_npt, &psp_ret); + r = hygon_kvm_hooks.sev_issue_cmd(vcpu->kvm, CSV3_CMD_UPDATE_NPT, + update_npt, &psp_ret); if (psp_ret != SEV_RET_SUCCESS) r = -EFAULT; @@ -1865,6 +1857,9 @@ static int __csv3_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm); int psp_ret = 0; + if (!hygon_kvm_hooks.sev_hooks_installed) + return -EFAULT; + update_npt = kzalloc(sizeof(*update_npt), GFP_KERNEL); if (!update_npt) { r = -ENOMEM; @@ -1880,7 +1875,8 @@ static int __csv3_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, if (!csv3_is_mmio_pfn(pfn)) update_npt->spa |= sme_me_mask; - r = csv_issue_cmd(vcpu->kvm, CSV3_CMD_UPDATE_NPT, update_npt, &psp_ret); + r = hygon_kvm_hooks.sev_issue_cmd(vcpu->kvm, CSV3_CMD_UPDATE_NPT, + update_npt, &psp_ret); kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); kvm_flush_remote_tlbs(vcpu->kvm); @@ -2255,6 +2251,10 @@ static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) r = csv_receive_update_vmsa(kvm, &sev_cmd); break; case KVM_CSV3_INIT: + if (!csv3_enabled) { + r = -ENOTTY; + goto out; + } r = csv3_guest_init(kvm, &sev_cmd); break; case KVM_CSV3_LAUNCH_ENCRYPT_DATA: @@ -2290,6 +2290,7 @@ static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd))) r = -EFAULT; +out: mutex_unlock(&kvm->lock); return r; } @@ -2561,6 +2562,15 @@ void __init csv_hardware_setup(unsigned int max_csv_asid) * will not work if the allocation fails. */ csv_alloc_asid_userid_array(nr_asids); + + /* CSV3 depends on X86_FEATURE_CSV3 */ + if (boot_cpu_has(X86_FEATURE_SEV_ES) && boot_cpu_has(X86_FEATURE_CSV3)) + csv3_enabled = true; + else + csv3_enabled = false; + + pr_info("CSV3 %s (ASIDs 1 - %u)\n", + csv3_enabled ? "enabled" : "disabled", max_csv_asid); } void csv_hardware_unsetup(void) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 2e3463f1ff0d..f2710d7ed7ed 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include -- Gitee From cc8b3cc960796c417c95b0a9f8e336d8dd94c1ae Mon Sep 17 00:00:00 2001 From: LeoLiu-oc Date: Mon, 4 Nov 2024 10:58:37 +0800 Subject: [PATCH 1626/2138] anolis: x86/mce: Avoid triggering a schedule call in the NMI context MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #11627 In the original patch solution, when a UCR-type DRAM error occurs, the flow does not enter do_machine_check->mce_panic; instead, it exits after executing do_machine_check and continues with the irqentry_exit_to_user_mode function. This flow triggers a schedule call. Since irqentry_nmi_enter calls __preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET), if a schedule occurs without executing irqentry_nmi_exit, the system will call the preempt_disable() function. Then, __schedule will call schedule_debug or determine in_atomic_preempt_off() to be true, leading to __schedule_bug and reporting the following error: BUG: scheduling while atomic:…… Therefore, it is necessary to adjust the position of irqentry_nmi_enter. Signed-off-by: LeoLiu-oc Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/4064 --- arch/x86/kernel/cpu/mce/core.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 1efb2a807981..7730f006715a 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -2124,15 +2124,15 @@ static __always_inline void exc_machine_check_user(struct pt_regs *regs) { irqentry_state_t irq_state; - irq_state = irqentry_nmi_enter(regs); - irqentry_enter_from_user_mode(regs); - do_machine_check(regs); + irq_state = irqentry_nmi_enter(regs); - irqentry_exit_to_user_mode(regs); + do_machine_check(regs); irqentry_nmi_exit(regs, irq_state); + + irqentry_exit_to_user_mode(regs); } #ifdef CONFIG_X86_64 -- Gitee From fb3d627531a5c3a9b85e3e899423f2fe4d1abb4e Mon Sep 17 00:00:00 2001 From: Juxin Gao Date: Mon, 4 Nov 2024 09:46:21 +0800 Subject: [PATCH 1627/2138] anolis: LoongArch: config: Disable CONFIG_LOONGSON3_ACPI_CPUFREQ by default ANBZ: #11609 The CONFIG_LOONGSON3_ACPI_CPUFREQ option attempts to use ACPI for CPU frequency management on Loongson3 platforms.However, this approach is known to be unstable and can lead to unexpected behavior. Signed-off-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4063 --- arch/loongarch/configs/loongson3_defconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index 0b27346f6140..fe266926e134 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -49,7 +49,6 @@ CONFIG_CPU_HAS_LBT=y CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_STAT=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y -CONFIG_LOONGSON3_ACPI_CPUFREQ=y CONFIG_HIBERNATION=y CONFIG_ACPI_SPCR_TABLE=y CONFIG_ACPI_TAD=y -- Gitee From c0f2108e740c4552fe2a258a30f400cb1798d9cb Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Mon, 4 Nov 2024 14:03:26 +0800 Subject: [PATCH 1628/2138] anolis: sched: remove the limit about cpu burst setting ANBZ: #8586 When porting anolis own cpu burst to ANCK 6.6, we missed to remove the limited that burst must under quota which is in upstream version. Signed-off-by: Tianchen Ding Reviewed-by: Cruz Zhao Link: https://gitee.com/anolis/cloud-kernel/pulls/4065 --- kernel/sched/core.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 71c397cba58b..72f9d0f8a14d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -11118,10 +11118,6 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, if (quota != RUNTIME_INF && quota > max_cfs_runtime) return -EINVAL; - if (quota != RUNTIME_INF && (burst > quota || - burst + quota > max_cfs_runtime)) - return -EINVAL; - /* * Bound burst to defend burst against overflow during bandwidth shift. */ -- Gitee From 71681a211fcbb594f749b88fb124e47a56ef2125 Mon Sep 17 00:00:00 2001 From: Xu Yu Date: Fri, 29 Apr 2022 14:35:44 +0800 Subject: [PATCH 1629/2138] anolis: mm: disable proactive compaction by default ANBZ: #11647 This disables proactive compaction by default, i.e., changes the default value of sysctl_compaction_proactiveness to 0 from 20. Users who need this feature can manually enable it, as shown below. $ echo 20 > /proc/sys/vm/compaction_proactiveness Signed-off-by: Xu Yu Reviewed-by: zhongjiang-ali Reviewed-by: Gang Deng Signed-off-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4068 --- mm/compaction.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/compaction.c b/mm/compaction.c index 1b583b5cb1dd..219e6b48f5fc 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1917,7 +1917,7 @@ static int sysctl_compact_unevictable_allowed __read_mostly = CONFIG_COMPACT_UNE * aggressively the kernel should compact memory in the * background. It takes values in the range [0, 100]. */ -static unsigned int __read_mostly sysctl_compaction_proactiveness = 20; +static unsigned int __read_mostly sysctl_compaction_proactiveness; static int sysctl_extfrag_threshold = 500; static int __read_mostly sysctl_compact_memory; -- Gitee From 1353fd79c951ae4dc7b1c101c437aba5bb361804 Mon Sep 17 00:00:00 2001 From: "YiLin.Li" Date: Mon, 4 Nov 2024 18:07:07 +0800 Subject: [PATCH 1630/2138] anolis: ima/Kconfig: fix TCG_HYGON select error ANBZ: #11651 TCG_HYGON should depend on ACPI && CRYPTO_DEV_CCP_DD && CRYPTO_DEV_SP_PSP, which is same as the definition in drivers/char/tpm/Kconfig. Signed-off-by: YiLin.Li Reviewed-by: Tianjia Zhang Link: https://gitee.com/anolis/cloud-kernel/pulls/4074 --- security/integrity/ima/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig index 3a28beee0720..cebba0ab51bc 100644 --- a/security/integrity/ima/Kconfig +++ b/security/integrity/ima/Kconfig @@ -11,7 +11,7 @@ config IMA select TCG_TPM if HAS_IOMEM select TCG_TIS if TCG_TPM && X86 select TCG_CRB if TCG_TPM && ACPI - select TCG_HYGON if TCG_TPM && CPU_SUP_HYGON + select TCG_HYGON if TCG_TPM && ACPI && CRYPTO_DEV_CCP_DD && CRYPTO_DEV_SP_PSP select TCG_IBMVTPM if TCG_TPM && PPC_PSERIES select INTEGRITY_AUDIT if AUDIT help -- Gitee From 064174abd561fd936d15cc74b65ca2c30dfa1729 Mon Sep 17 00:00:00 2001 From: Juxin Gao Date: Mon, 4 Nov 2024 14:40:48 +0800 Subject: [PATCH 1631/2138] anolis: config: loongarch: sync anolis_defconfig from loongson3_defconfig ANBZ: #11645 Signed-off-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4066 --- arch/loongarch/configs/anolis_defconfig | 6638 ----------------------- 1 file changed, 6638 deletions(-) diff --git a/arch/loongarch/configs/anolis_defconfig b/arch/loongarch/configs/anolis_defconfig index 1ab6f0003830..abd3ea974a00 100644 --- a/arch/loongarch/configs/anolis_defconfig +++ b/arch/loongarch/configs/anolis_defconfig @@ -1,126 +1,13 @@ -# -# Automatically generated file; DO NOT EDIT. -# Linux/loongarch 6.6.7 Kernel Configuration -# -CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" -CONFIG_CC_IS_GCC=y -CONFIG_GCC_VERSION=200000 -CONFIG_CLANG_VERSION=0 -CONFIG_AS_IS_GNU=y -CONFIG_AS_VERSION=25000 -CONFIG_LD_IS_BFD=y -CONFIG_LD_VERSION=25000 -CONFIG_LLD_VERSION=0 -CONFIG_CC_CAN_LINK=y -CONFIG_CC_CAN_LINK_STATIC=y -CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y -CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y -CONFIG_TOOLS_SUPPORT_RELR=y -CONFIG_CC_HAS_ASM_INLINE=y -CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y -CONFIG_PAHOLE_VERSION=117 -CONFIG_IRQ_WORK=y -CONFIG_BUILDTIME_TABLE_SORT=y - -# -# General setup -# -CONFIG_INIT_ENV_ARG_LIMIT=32 -# CONFIG_COMPILE_TEST is not set -# CONFIG_WERROR is not set -CONFIG_LOCALVERSION="" # CONFIG_LOCALVERSION_AUTO is not set -CONFIG_BUILD_SALT="" -CONFIG_HAVE_KERNEL_GZIP=y -CONFIG_HAVE_KERNEL_LZMA=y -CONFIG_HAVE_KERNEL_XZ=y -CONFIG_HAVE_KERNEL_LZO=y -CONFIG_HAVE_KERNEL_LZ4=y -CONFIG_HAVE_KERNEL_ZSTD=y -CONFIG_KERNEL_GZIP=y -# CONFIG_KERNEL_LZMA is not set -# CONFIG_KERNEL_XZ is not set -# CONFIG_KERNEL_LZO is not set -# CONFIG_KERNEL_LZ4 is not set -# CONFIG_KERNEL_ZSTD is not set -CONFIG_DEFAULT_INIT="" -CONFIG_DEFAULT_HOSTNAME="(none)" CONFIG_SYSVIPC=y -CONFIG_SYSVIPC_SYSCTL=y CONFIG_POSIX_MQUEUE=y -CONFIG_POSIX_MQUEUE_SYSCTL=y -# CONFIG_WATCH_QUEUE is not set -CONFIG_CROSS_MEMORY_ATTACH=y -# CONFIG_USELIB is not set -CONFIG_AUDIT=y -CONFIG_HAVE_ARCH_AUDITSYSCALL=y -CONFIG_AUDITSYSCALL=y - -# -# IRQ subsystem -# -CONFIG_GENERIC_IRQ_PROBE=y -CONFIG_GENERIC_IRQ_SHOW=y -CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y -CONFIG_GENERIC_IRQ_INJECTION=y -CONFIG_GENERIC_IRQ_CHIP=y -CONFIG_IRQ_DOMAIN=y -CONFIG_IRQ_DOMAIN_HIERARCHY=y -CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS=y -CONFIG_GENERIC_MSI_IRQ=y -CONFIG_IRQ_FORCED_THREADING=y -CONFIG_SPARSE_IRQ=y -# CONFIG_GENERIC_IRQ_DEBUGFS is not set -# end of IRQ subsystem - -CONFIG_GENERIC_IRQ_MULTI_HANDLER=y -CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE=y -CONFIG_GENERIC_TIME_VSYSCALL=y -CONFIG_GENERIC_CLOCKEVENTS=y -CONFIG_GENERIC_CMOS_UPDATE=y -CONFIG_CONTEXT_TRACKING=y -CONFIG_CONTEXT_TRACKING_IDLE=y - -# -# Timers subsystem -# -CONFIG_TICK_ONESHOT=y -CONFIG_NO_HZ_COMMON=y -# CONFIG_HZ_PERIODIC is not set -CONFIG_NO_HZ_IDLE=y -# CONFIG_NO_HZ_FULL is not set CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y -# end of Timers subsystem - -CONFIG_BPF=y -CONFIG_HAVE_EBPF_JIT=y - -# -# BPF subsystem -# CONFIG_BPF_SYSCALL=y CONFIG_BPF_JIT=y -# CONFIG_BPF_JIT_ALWAYS_ON is not set # CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set -CONFIG_USERMODE_DRIVER=y -# CONFIG_BPF_PRELOAD is not set -# CONFIG_BPF_LSM is not set -# end of BPF subsystem - -CONFIG_PREEMPT_VOLUNTARY_BUILD=y -# CONFIG_PREEMPT_NONE is not set CONFIG_PREEMPT_VOLUNTARY=y -# CONFIG_PREEMPT is not set -# CONFIG_SCHED_CORE is not set - -# -# CPU/Task time and stats accounting -# -CONFIG_TICK_CPU_ACCOUNTING=y -# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set CONFIG_IRQ_TIME_ACCOUNTING=y -CONFIG_HAVE_SCHED_AVG_IRQ=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_TASKSTATS=y @@ -128,774 +15,133 @@ CONFIG_TASK_DELAY_ACCT=y CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y CONFIG_PSI=y -# CONFIG_PSI_DEFAULT_DISABLED is not set -# end of CPU/Task time and stats accounting - -CONFIG_CPU_ISOLATION=y - -# -# RCU Subsystem -# -CONFIG_TREE_RCU=y -# CONFIG_RCU_EXPERT is not set -CONFIG_TREE_SRCU=y -CONFIG_TASKS_RCU_GENERIC=y -CONFIG_TASKS_RUDE_RCU=y -CONFIG_TASKS_TRACE_RCU=y -CONFIG_RCU_STALL_COMMON=y -CONFIG_RCU_NEED_SEGCBLIST=y -# end of RCU Subsystem - -# CONFIG_IKCONFIG is not set -# CONFIG_IKHEADERS is not set CONFIG_LOG_BUF_SHIFT=18 -CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 -# CONFIG_PRINTK_INDEX is not set -CONFIG_GENERIC_SCHED_CLOCK=y - -# -# Scheduler features -# -# end of Scheduler features - -CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y -CONFIG_CC_HAS_INT128=y -CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" -CONFIG_GCC11_NO_ARRAY_BOUNDS=y -CONFIG_CC_NO_ARRAY_BOUNDS=y CONFIG_NUMA_BALANCING=y -CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y -CONFIG_CGROUPS=y -CONFIG_PAGE_COUNTER=y -# CONFIG_CGROUP_FAVOR_DYNMODS is not set CONFIG_MEMCG=y -CONFIG_MEMCG_KMEM=y CONFIG_BLK_CGROUP=y -CONFIG_CGROUP_WRITEBACK=y -CONFIG_CGROUP_SCHED=y -CONFIG_FAIR_GROUP_SCHED=y CONFIG_CFS_BANDWIDTH=y CONFIG_RT_GROUP_SCHED=y -CONFIG_SCHED_MM_CID=y CONFIG_CGROUP_PIDS=y CONFIG_CGROUP_RDMA=y CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_HUGETLB=y CONFIG_CPUSETS=y -CONFIG_PROC_PID_CPUSET=y CONFIG_CGROUP_DEVICE=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y CONFIG_CGROUP_BPF=y -# CONFIG_CGROUP_MISC is not set -# CONFIG_CGROUP_DEBUG is not set -CONFIG_SOCK_CGROUP_DATA=y CONFIG_NAMESPACES=y -CONFIG_UTS_NS=y -CONFIG_TIME_NS=y -CONFIG_IPC_NS=y CONFIG_USER_NS=y -CONFIG_PID_NS=y -CONFIG_NET_NS=y CONFIG_CHECKPOINT_RESTORE=y CONFIG_SCHED_AUTOGROUP=y -CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" -CONFIG_RD_GZIP=y -CONFIG_RD_BZIP2=y -CONFIG_RD_LZMA=y -CONFIG_RD_XZ=y -CONFIG_RD_LZO=y -CONFIG_RD_LZ4=y -CONFIG_RD_ZSTD=y -# CONFIG_BOOT_CONFIG is not set -CONFIG_INITRAMFS_PRESERVE_MTIME=y -CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_LD_ORPHAN_WARN=y -CONFIG_LD_ORPHAN_WARN_LEVEL="warn" -CONFIG_SYSCTL=y -CONFIG_SYSCTL_EXCEPTION_TRACE=y -CONFIG_SYSCTL_ARCH_UNALIGN_NO_WARN=y -CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW=y CONFIG_EXPERT=y -CONFIG_MULTIUSER=y -# CONFIG_SGETMASK_SYSCALL is not set -CONFIG_SYSFS_SYSCALL=y -CONFIG_FHANDLE=y -CONFIG_POSIX_TIMERS=y -CONFIG_PRINTK=y -CONFIG_BUG=y -CONFIG_ELF_CORE=y -CONFIG_BASE_FULL=y -CONFIG_FUTEX=y -CONFIG_FUTEX_PI=y -CONFIG_EPOLL=y -CONFIG_SIGNALFD=y -CONFIG_TIMERFD=y -CONFIG_EVENTFD=y -CONFIG_SHMEM=y -CONFIG_AIO=y -CONFIG_IO_URING=y -CONFIG_ADVISE_SYSCALLS=y -CONFIG_MEMBARRIER=y -CONFIG_KALLSYMS=y -# CONFIG_KALLSYMS_SELFTEST is not set CONFIG_KALLSYMS_ALL=y -CONFIG_KALLSYMS_BASE_RELATIVE=y -CONFIG_KCMP=y -CONFIG_RSEQ=y -CONFIG_CACHESTAT_SYSCALL=y -# CONFIG_DEBUG_RSEQ is not set -CONFIG_HAVE_PERF_EVENTS=y -CONFIG_PERF_USE_VMALLOC=y -# CONFIG_PC104 is not set - -# -# Kernel Performance Events And Counters -# -CONFIG_PERF_EVENTS=y -# CONFIG_DEBUG_PERF_USE_VMALLOC is not set -# end of Kernel Performance Events And Counters - -CONFIG_SYSTEM_DATA_VERIFICATION=y CONFIG_PROFILING=y -CONFIG_TRACEPOINTS=y - -# -# Kexec and crash features -# -CONFIG_CRASH_CORE=y -CONFIG_KEXEC_CORE=y CONFIG_KEXEC=y CONFIG_CRASH_DUMP=y -# end of Kexec and crash features -# end of General setup - -CONFIG_LOONGARCH=y -CONFIG_64BIT=y -CONFIG_GENERIC_BUG=y -CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y -CONFIG_GENERIC_CALIBRATE_DELAY=y -CONFIG_GENERIC_CSUM=y -CONFIG_GENERIC_HWEIGHT=y -CONFIG_L1_CACHE_SHIFT=6 -CONFIG_LOCKDEP_SUPPORT=y -CONFIG_STACKTRACE_SUPPORT=y -CONFIG_MACH_LOONGSON64=y -CONFIG_FIX_EARLYCON_MEM=y -CONFIG_PAGE_SIZE_16KB=y -CONFIG_PGTABLE_3LEVEL=y -CONFIG_PGTABLE_LEVELS=3 -CONFIG_SCHED_OMIT_FRAME_POINTER=y -CONFIG_AS_HAS_EXPLICIT_RELOCS=y -CONFIG_AS_HAS_FCSR_CLASS=y -CONFIG_AS_HAS_LSX_EXTENSION=y -CONFIG_AS_HAS_LASX_EXTENSION=y -CONFIG_AS_HAS_LBT_EXTENSION=y -CONFIG_AS_HAS_LVZ_EXTENSION=y - -# -# Kernel type and options -# -# CONFIG_HZ_100 is not set -CONFIG_HZ_250=y -# CONFIG_HZ_300 is not set -# CONFIG_HZ_1000 is not set -CONFIG_HZ=250 -CONFIG_SCHED_HRTICK=y -# CONFIG_4KB_3LEVEL is not set -# CONFIG_4KB_4LEVEL is not set -# CONFIG_16KB_2LEVEL is not set -CONFIG_16KB_3LEVEL=y -# CONFIG_64KB_2LEVEL is not set -# CONFIG_64KB_3LEVEL is not set -CONFIG_CMDLINE="" -CONFIG_CMDLINE_BOOTLOADER=y -# CONFIG_CMDLINE_EXTEND is not set -# CONFIG_CMDLINE_FORCE is not set -CONFIG_DMI=y -CONFIG_EFI=y -CONFIG_EFI_STUB=y -CONFIG_SCHED_SMT=y -CONFIG_SMP=y -CONFIG_HOTPLUG_CPU=y CONFIG_NR_CPUS=256 CONFIG_NUMA=y -CONFIG_NODES_SHIFT=6 -CONFIG_ARCH_FORCE_MAX_ORDER=11 CONFIG_ARCH_IOREMAP=y -CONFIG_ARCH_STRICT_ALIGN=y -CONFIG_CPU_HAS_FPU=y CONFIG_CPU_HAS_LSX=y CONFIG_CPU_HAS_LASX=y CONFIG_CPU_HAS_LBT=y -CONFIG_CPU_HAS_PREFETCH=y -CONFIG_ARCH_SUPPORTS_KEXEC=y -CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y -CONFIG_ARCH_SELECTS_CRASH_DUMP=y -CONFIG_RELOCATABLE=y -CONFIG_SECCOMP=y -# end of Kernel type and options - -CONFIG_ARCH_SELECT_MEMORY_MODEL=y -CONFIG_ARCH_SPARSEMEM_ENABLE=y -CONFIG_ARCH_ENABLE_THP_MIGRATION=y -CONFIG_ARCH_MEMORY_PROBE=y -CONFIG_MMU=y -CONFIG_ARCH_MMAP_RND_BITS_MIN=12 -CONFIG_ARCH_MMAP_RND_BITS_MAX=18 -CONFIG_ARCH_SUPPORTS_UPROBES=y - -# -# Power management options -# -CONFIG_ARCH_SUSPEND_POSSIBLE=y -CONFIG_ARCH_HIBERNATION_POSSIBLE=y - -# -# CPU Frequency scaling -# CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_STAT=y -CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y -# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set -CONFIG_CPU_FREQ_GOV_PERFORMANCE=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y -# CONFIG_CPU_FREQ_GOV_USERSPACE is not set -# CONFIG_CPU_FREQ_GOV_ONDEMAND is not set -# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set -# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set - -# -# CPU frequency scaling drivers -# -# CONFIG_CPUFREQ_DT is not set -# CONFIG_CPUFREQ_DT_PLATDEV is not set CONFIG_LOONGSON3_ACPI_CPUFREQ=y -# end of CPU Frequency scaling - -CONFIG_SUSPEND=y -CONFIG_SUSPEND_FREEZER=y -# CONFIG_SUSPEND_SKIP_SYNC is not set -CONFIG_HIBERNATE_CALLBACKS=y CONFIG_HIBERNATION=y -CONFIG_HIBERNATION_SNAPSHOT_DEV=y -CONFIG_PM_STD_PARTITION="" -CONFIG_PM_SLEEP=y -CONFIG_PM_SLEEP_SMP=y -# CONFIG_PM_AUTOSLEEP is not set -# CONFIG_PM_USERSPACE_AUTOSLEEP is not set -# CONFIG_PM_WAKELOCKS is not set -CONFIG_PM=y -# CONFIG_PM_DEBUG is not set -CONFIG_PM_CLK=y -# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set -CONFIG_CPU_PM=y -# CONFIG_ENERGY_MODEL is not set -CONFIG_ARCH_SUPPORTS_ACPI=y -CONFIG_ACPI=y -CONFIG_ACPI_GENERIC_GSI=y -CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y -# CONFIG_ACPI_DEBUGGER is not set CONFIG_ACPI_SPCR_TABLE=y -CONFIG_ACPI_SLEEP=y -# CONFIG_ACPI_EC_DEBUGFS is not set -CONFIG_ACPI_AC=y -CONFIG_ACPI_BATTERY=y -CONFIG_ACPI_BUTTON=y -CONFIG_ACPI_VIDEO=y -CONFIG_ACPI_FAN=y CONFIG_ACPI_TAD=y CONFIG_ACPI_DOCK=y -CONFIG_ACPI_CPU_FREQ_PSS=y -CONFIG_ACPI_PROCESSOR_IDLE=y -CONFIG_ACPI_MCFG=y -CONFIG_ACPI_PROCESSOR=y CONFIG_ACPI_IPMI=m -CONFIG_ACPI_HOTPLUG_CPU=y -CONFIG_ACPI_THERMAL=y -CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y -CONFIG_ACPI_TABLE_UPGRADE=y -# CONFIG_ACPI_DEBUG is not set CONFIG_ACPI_PCI_SLOT=y -CONFIG_ACPI_CONTAINER=y CONFIG_ACPI_HOTPLUG_MEMORY=y -# CONFIG_ACPI_HED is not set -# CONFIG_ACPI_CUSTOM_METHOD is not set -# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set -CONFIG_ACPI_NUMA=y -# CONFIG_ACPI_HMAT is not set -CONFIG_ACPI_WATCHDOG=y -# CONFIG_ACPI_CONFIGFS is not set -# CONFIG_ACPI_PFRUT is not set -CONFIG_ACPI_PPTT=y -# CONFIG_ACPI_FFH is not set -# CONFIG_PMIC_OPREGION is not set -# end of Power management options - -CONFIG_HAVE_KVM=y -CONFIG_HAVE_KVM_DIRTY_RING=y -CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL=y -CONFIG_HAVE_KVM_EVENTFD=y -CONFIG_KVM_MMIO=y -CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y -CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL=y -CONFIG_KVM_XFER_TO_GUEST_WORK=y -CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y CONFIG_VIRTUALIZATION=y CONFIG_KVM=y - -# -# General architecture-dependent options -# -CONFIG_GENERIC_ENTRY=y CONFIG_KPROBES=y CONFIG_JUMP_LABEL=y -# CONFIG_STATIC_KEYS_SELFTEST is not set -CONFIG_KPROBES_ON_FTRACE=y -CONFIG_UPROBES=y -CONFIG_HAVE_64BIT_ALIGNED_ACCESS=y -CONFIG_ARCH_USE_BUILTIN_BSWAP=y -CONFIG_KRETPROBES=y -CONFIG_KRETPROBE_ON_RETHOOK=y -CONFIG_HAVE_IOREMAP_PROT=y -CONFIG_HAVE_KPROBES=y -CONFIG_HAVE_KRETPROBES=y -CONFIG_HAVE_KPROBES_ON_FTRACE=y -CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y -CONFIG_HAVE_NMI=y -CONFIG_TRACE_IRQFLAGS_SUPPORT=y -CONFIG_HAVE_ARCH_TRACEHOOK=y -CONFIG_HAVE_DMA_CONTIGUOUS=y -CONFIG_GENERIC_SMP_IDLE_THREAD=y -CONFIG_ARCH_HAS_FORTIFY_SOURCE=y -CONFIG_ARCH_HAS_CPU_FINALIZE_INIT=y -CONFIG_ARCH_WANTS_NO_INSTR=y -CONFIG_HAVE_ASM_MODVERSIONS=y -CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y -CONFIG_HAVE_RSEQ=y -CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y -CONFIG_HAVE_HW_BREAKPOINT=y -CONFIG_HAVE_PERF_REGS=y -CONFIG_HAVE_PERF_USER_STACK_DUMP=y -CONFIG_HAVE_ARCH_JUMP_LABEL=y -CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y -CONFIG_MMU_GATHER_MERGE_VMAS=y -CONFIG_MMU_LAZY_TLB_REFCOUNT=y -CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS=y -CONFIG_HAVE_ARCH_SECCOMP=y -CONFIG_HAVE_ARCH_SECCOMP_FILTER=y -CONFIG_SECCOMP_FILTER=y -# CONFIG_SECCOMP_CACHE_DEBUG is not set -CONFIG_HAVE_STACKPROTECTOR=y -CONFIG_STACKPROTECTOR=y -CONFIG_STACKPROTECTOR_STRONG=y -CONFIG_ARCH_SUPPORTS_LTO_CLANG=y -CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y -CONFIG_LTO_NONE=y -CONFIG_HAVE_CONTEXT_TRACKING_USER=y -CONFIG_HAVE_TIF_NOHZ=y -CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y -CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y -CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y -CONFIG_ARCH_WANT_PMD_MKWRITE=y -CONFIG_HAVE_MOD_ARCH_SPECIFIC=y -CONFIG_MODULES_USE_ELF_RELA=y -CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y -CONFIG_ARCH_HAS_ELF_RANDOMIZE=y -CONFIG_HAVE_ARCH_MMAP_RND_BITS=y -CONFIG_HAVE_EXIT_THREAD=y -CONFIG_ARCH_MMAP_RND_BITS=12 -CONFIG_PAGE_SIZE_LESS_THAN_64KB=y -CONFIG_PAGE_SIZE_LESS_THAN_256KB=y -CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT=y -# CONFIG_COMPAT_32BIT_TIME is not set -CONFIG_ARCH_HAS_PHYS_TO_DMA=y -CONFIG_ARCH_USE_MEMREMAP_PROT=y -# CONFIG_LOCK_EVENT_COUNTS is not set -CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y - -# -# GCOV-based kernel profiling -# -# CONFIG_GCOV_KERNEL is not set -# end of GCOV-based kernel profiling - -CONFIG_HAVE_GCC_PLUGINS=y -CONFIG_GCC_PLUGINS=y -# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set -CONFIG_FUNCTION_ALIGNMENT=0 -# end of General architecture-dependent options - -CONFIG_RT_MUTEXES=y -CONFIG_BASE_SMALL=0 -CONFIG_MODULE_SIG_FORMAT=y CONFIG_MODULES=y -# CONFIG_MODULE_DEBUG is not set CONFIG_MODULE_FORCE_LOAD=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y -# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set CONFIG_MODVERSIONS=y -CONFIG_ASM_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_MODULE_SIG=y -# CONFIG_MODULE_SIG_FORCE is not set -CONFIG_MODULE_SIG_ALL=y -# CONFIG_MODULE_SIG_SHA1 is not set -# CONFIG_MODULE_SIG_SHA224 is not set CONFIG_MODULE_SIG_SHA256=y -# CONFIG_MODULE_SIG_SHA384 is not set -# CONFIG_MODULE_SIG_SHA512 is not set -CONFIG_MODULE_SIG_HASH="sha256" -CONFIG_MODULE_COMPRESS_NONE=y -# CONFIG_MODULE_COMPRESS_GZIP is not set -# CONFIG_MODULE_COMPRESS_XZ is not set -# CONFIG_MODULE_COMPRESS_ZSTD is not set -# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set -CONFIG_MODPROBE_PATH="/sbin/modprobe" -# CONFIG_TRIM_UNUSED_KSYMS is not set -CONFIG_MODULES_TREE_LOOKUP=y -CONFIG_BLOCK=y -CONFIG_BLOCK_LEGACY_AUTOLOAD=y -CONFIG_BLK_CGROUP_RWSTAT=y -CONFIG_BLK_CGROUP_PUNT_BIO=y -CONFIG_BLK_DEV_BSG_COMMON=y -CONFIG_BLK_ICQ=y -CONFIG_BLK_DEV_BSGLIB=y -CONFIG_BLK_DEV_INTEGRITY=y -CONFIG_BLK_DEV_INTEGRITY_T10=m CONFIG_BLK_DEV_ZONED=y CONFIG_BLK_DEV_THROTTLING=y -# CONFIG_BLK_DEV_THROTTLING_LOW is not set CONFIG_BLK_WBT=y -CONFIG_BLK_WBT_MQ=y -# CONFIG_BLK_CGROUP_IOLATENCY is not set -# CONFIG_BLK_CGROUP_FC_APPID is not set -# CONFIG_BLK_CGROUP_IOCOST is not set -# CONFIG_BLK_CGROUP_IOPRIO is not set -CONFIG_BLK_DEBUG_FS=y -CONFIG_BLK_DEBUG_FS_ZONED=y -# CONFIG_BLK_SED_OPAL is not set -# CONFIG_BLK_INLINE_ENCRYPTION is not set - -# -# Partition Types -# CONFIG_PARTITION_ADVANCED=y -# CONFIG_ACORN_PARTITION is not set -# CONFIG_AIX_PARTITION is not set -# CONFIG_OSF_PARTITION is not set -# CONFIG_AMIGA_PARTITION is not set -# CONFIG_ATARI_PARTITION is not set -# CONFIG_MAC_PARTITION is not set -CONFIG_MSDOS_PARTITION=y CONFIG_BSD_DISKLABEL=y -# CONFIG_MINIX_SUBPARTITION is not set -# CONFIG_SOLARIS_X86_PARTITION is not set CONFIG_UNIXWARE_DISKLABEL=y -# CONFIG_LDM_PARTITION is not set -# CONFIG_SGI_PARTITION is not set -# CONFIG_ULTRIX_PARTITION is not set -# CONFIG_SUN_PARTITION is not set -# CONFIG_KARMA_PARTITION is not set -CONFIG_EFI_PARTITION=y -# CONFIG_SYSV68_PARTITION is not set -# CONFIG_CMDLINE_PARTITION is not set -# end of Partition Types - -CONFIG_BLK_MQ_PCI=y -CONFIG_BLK_MQ_VIRTIO=y -CONFIG_BLK_PM=y -CONFIG_BLOCK_HOLDER_DEPRECATED=y -CONFIG_BLK_MQ_STACKING=y - -# -# IO Schedulers -# -CONFIG_MQ_IOSCHED_DEADLINE=y -CONFIG_MQ_IOSCHED_KYBER=y CONFIG_IOSCHED_BFQ=y -CONFIG_BFQ_GROUP_IOSCHED=y -# CONFIG_BFQ_CGROUP_DEBUG is not set -# end of IO Schedulers - -CONFIG_PREEMPT_NOTIFIERS=y -CONFIG_PADATA=y -CONFIG_ASN1=y -CONFIG_ARCH_INLINE_SPIN_TRYLOCK=y -CONFIG_ARCH_INLINE_SPIN_TRYLOCK_BH=y -CONFIG_ARCH_INLINE_SPIN_LOCK=y -CONFIG_ARCH_INLINE_SPIN_LOCK_BH=y -CONFIG_ARCH_INLINE_SPIN_LOCK_IRQ=y -CONFIG_ARCH_INLINE_SPIN_LOCK_IRQSAVE=y -CONFIG_ARCH_INLINE_SPIN_UNLOCK=y -CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y -CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y -CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y -CONFIG_ARCH_INLINE_READ_LOCK=y -CONFIG_ARCH_INLINE_READ_LOCK_BH=y -CONFIG_ARCH_INLINE_READ_LOCK_IRQ=y -CONFIG_ARCH_INLINE_READ_LOCK_IRQSAVE=y -CONFIG_ARCH_INLINE_READ_UNLOCK=y -CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y -CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y -CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y -CONFIG_ARCH_INLINE_WRITE_LOCK=y -CONFIG_ARCH_INLINE_WRITE_LOCK_BH=y -CONFIG_ARCH_INLINE_WRITE_LOCK_IRQ=y -CONFIG_ARCH_INLINE_WRITE_LOCK_IRQSAVE=y -CONFIG_ARCH_INLINE_WRITE_UNLOCK=y -CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y -CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y -CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y -CONFIG_INLINE_SPIN_TRYLOCK=y -CONFIG_INLINE_SPIN_TRYLOCK_BH=y -CONFIG_INLINE_SPIN_LOCK=y -CONFIG_INLINE_SPIN_LOCK_BH=y -CONFIG_INLINE_SPIN_LOCK_IRQ=y -CONFIG_INLINE_SPIN_LOCK_IRQSAVE=y -CONFIG_INLINE_SPIN_UNLOCK_BH=y -CONFIG_INLINE_SPIN_UNLOCK_IRQ=y -CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE=y -CONFIG_INLINE_READ_LOCK=y -CONFIG_INLINE_READ_LOCK_BH=y -CONFIG_INLINE_READ_LOCK_IRQ=y -CONFIG_INLINE_READ_LOCK_IRQSAVE=y -CONFIG_INLINE_READ_UNLOCK=y -CONFIG_INLINE_READ_UNLOCK_BH=y -CONFIG_INLINE_READ_UNLOCK_IRQ=y -CONFIG_INLINE_READ_UNLOCK_IRQRESTORE=y -CONFIG_INLINE_WRITE_LOCK=y -CONFIG_INLINE_WRITE_LOCK_BH=y -CONFIG_INLINE_WRITE_LOCK_IRQ=y -CONFIG_INLINE_WRITE_LOCK_IRQSAVE=y -CONFIG_INLINE_WRITE_UNLOCK=y -CONFIG_INLINE_WRITE_UNLOCK_BH=y -CONFIG_INLINE_WRITE_UNLOCK_IRQ=y -CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE=y -CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y -CONFIG_MUTEX_SPIN_ON_OWNER=y -CONFIG_RWSEM_SPIN_ON_OWNER=y -CONFIG_LOCK_SPIN_ON_OWNER=y -CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y -CONFIG_QUEUED_SPINLOCKS=y -CONFIG_ARCH_USE_QUEUED_RWLOCKS=y -CONFIG_QUEUED_RWLOCKS=y -CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y -CONFIG_CK_KABI_RESERVE=y -CONFIG_CK_KABI_SIZE_ALIGN_CHECKS=y -CONFIG_FREEZER=y - -# -# Executable file formats -# -CONFIG_BINFMT_ELF=y -CONFIG_ARCH_BINFMT_ELF_STATE=y -CONFIG_ELFCORE=y -CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y -CONFIG_BINFMT_SCRIPT=y CONFIG_BINFMT_MISC=m -CONFIG_COREDUMP=y -# end of Executable file formats - -# -# Memory Management options -# -CONFIG_ZPOOL=y -CONFIG_SWAP=y CONFIG_ZSWAP=y -# CONFIG_ZSWAP_DEFAULT_ON is not set -# CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON is not set -# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set -# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO is not set -# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set -# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 is not set -# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y -CONFIG_ZSWAP_COMPRESSOR_DEFAULT="zstd" -CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y -# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set -# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set -CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud" -CONFIG_ZBUD=y CONFIG_Z3FOLD=y CONFIG_ZSMALLOC=y CONFIG_ZSMALLOC_STAT=y -CONFIG_ZSMALLOC_CHAIN_SIZE=8 - -# -# SLAB allocator options -# -# CONFIG_SLAB_DEPRECATED is not set -CONFIG_SLUB=y -# CONFIG_SLUB_TINY is not set -CONFIG_SLAB_MERGE_DEFAULT=y CONFIG_SLAB_FREELIST_RANDOM=y -# CONFIG_SLAB_FREELIST_HARDENED is not set -# CONFIG_SLUB_STATS is not set -CONFIG_SLUB_CPU_PARTIAL=y -# CONFIG_RANDOM_KMALLOC_CACHES is not set -# end of SLAB allocator options - # CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set # CONFIG_COMPAT_BRK is not set -CONFIG_SELECT_MEMORY_MODEL=y -CONFIG_SPARSEMEM_MANUAL=y -CONFIG_SPARSEMEM=y -CONFIG_SPARSEMEM_EXTREME=y -CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y -CONFIG_SPARSEMEM_VMEMMAP=y -CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP=y -CONFIG_HAVE_FAST_GUP=y -CONFIG_ARCH_KEEP_MEMBLOCK=y -CONFIG_NUMA_KEEP_MEMINFO=y -CONFIG_MEMORY_ISOLATION=y -CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y -CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y CONFIG_MEMORY_HOTREMOVE=y -CONFIG_SPLIT_PTLOCK_CPUS=4 -CONFIG_MEMORY_BALLOON=y -CONFIG_BALLOON_COMPACTION=y -CONFIG_COMPACTION=y -CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 -CONFIG_PAGE_REPORTING=y -CONFIG_MIGRATION=y -CONFIG_CONTIG_ALLOC=y -CONFIG_PHYS_ADDR_T_64BIT=y -CONFIG_MMU_NOTIFIER=y CONFIG_KSM=y -CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 CONFIG_TRANSPARENT_HUGEPAGE=y -CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y -# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set -# CONFIG_READ_ONLY_THP_FOR_FS is not set -CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y -CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y -CONFIG_USE_PERCPU_NUMA_NODE_ID=y -CONFIG_HAVE_SETUP_PER_CPU_AREA=y CONFIG_CMA=y -# CONFIG_CMA_DEBUG is not set -# CONFIG_CMA_DEBUGFS is not set -# CONFIG_CMA_SYSFS is not set -CONFIG_CMA_AREAS=19 -# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set -CONFIG_PAGE_IDLE_FLAG=y CONFIG_IDLE_PAGE_TRACKING=y -CONFIG_ZONE_DMA32=y -CONFIG_HMM_MIRROR=y -CONFIG_VM_EVENT_COUNTERS=y -# CONFIG_PERCPU_STATS is not set -# CONFIG_GUP_TEST is not set -# CONFIG_DMAPOOL_TEST is not set -CONFIG_ARCH_HAS_PTE_SPECIAL=y -CONFIG_MEMFD_CREATE=y -# CONFIG_ANON_VMA_NAME is not set CONFIG_USERFAULTFD=y -# CONFIG_LRU_GEN is not set -CONFIG_LOCK_MM_AND_FIND_VMA=y - -# -# Data Access Monitoring -# -# CONFIG_DAMON is not set -# end of Data Access Monitoring -# end of Memory Management options - CONFIG_NET=y -CONFIG_NET_INGRESS=y -CONFIG_NET_EGRESS=y -CONFIG_NET_XGRESS=y -CONFIG_NET_REDIRECT=y -CONFIG_SKB_EXTENSIONS=y - -# -# Networking options -# CONFIG_PACKET=y CONFIG_PACKET_DIAG=m -CONFIG_UNIX=y -CONFIG_UNIX_SCM=y -CONFIG_AF_UNIX_OOB=y CONFIG_UNIX_DIAG=m CONFIG_TLS=m CONFIG_TLS_DEVICE=y CONFIG_TLS_TOE=y -CONFIG_XFRM=y -CONFIG_XFRM_OFFLOAD=y -CONFIG_XFRM_ALGO=y CONFIG_XFRM_USER=y CONFIG_XFRM_INTERFACE=m CONFIG_XFRM_SUB_POLICY=y -CONFIG_XFRM_MIGRATE=y CONFIG_XFRM_STATISTICS=y -CONFIG_XFRM_AH=m -CONFIG_XFRM_ESP=m -CONFIG_XFRM_IPCOMP=m CONFIG_NET_KEY=m CONFIG_NET_KEY_MIGRATE=y -CONFIG_XFRM_ESPINTCP=y CONFIG_SMC=m CONFIG_SMC_DIAG=m CONFIG_XDP_SOCKETS=y CONFIG_XDP_SOCKETS_DIAG=m -CONFIG_NET_HANDSHAKE=y -CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_FIB_TRIE_STATS=y CONFIG_IP_MULTIPLE_TABLES=y CONFIG_IP_ROUTE_MULTIPATH=y CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_ROUTE_CLASSID=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_IP_PNP_RARP=y CONFIG_NET_IPIP=m CONFIG_NET_IPGRE_DEMUX=m -CONFIG_NET_IP_TUNNEL=m CONFIG_NET_IPGRE=m CONFIG_NET_IPGRE_BROADCAST=y -CONFIG_IP_MROUTE_COMMON=y CONFIG_IP_MROUTE=y CONFIG_IP_MROUTE_MULTIPLE_TABLES=y CONFIG_IP_PIMSM_V1=y CONFIG_IP_PIMSM_V2=y -CONFIG_SYN_COOKIES=y CONFIG_NET_IPVTI=m -CONFIG_NET_UDP_TUNNEL=m -CONFIG_NET_FOU=m CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_ESP_OFFLOAD=m CONFIG_INET_ESPINTCP=y CONFIG_INET_IPCOMP=m -CONFIG_INET_TABLE_PERTURB_ORDER=16 -CONFIG_INET_XFRM_TUNNEL=m -CONFIG_INET_TUNNEL=m CONFIG_INET_DIAG=m -CONFIG_INET_TCP_DIAG=m CONFIG_INET_UDP_DIAG=m CONFIG_INET_RAW_DIAG=m CONFIG_INET_DIAG_DESTROY=y CONFIG_TCP_CONG_ADVANCED=y -CONFIG_TCP_CONG_BIC=m CONFIG_TCP_CONG_CUBIC=m -CONFIG_TCP_CONG_WESTWOOD=m -CONFIG_TCP_CONG_HTCP=m CONFIG_TCP_CONG_HSTCP=m CONFIG_TCP_CONG_HYBLA=m -CONFIG_TCP_CONG_VEGAS=m CONFIG_TCP_CONG_NV=m CONFIG_TCP_CONG_SCALABLE=m CONFIG_TCP_CONG_LP=m @@ -905,8 +151,6 @@ CONFIG_TCP_CONG_ILLINOIS=m CONFIG_TCP_CONG_DCTCP=m CONFIG_TCP_CONG_CDG=m CONFIG_TCP_CONG_BBR=m -CONFIG_DEFAULT_RENO=y -CONFIG_DEFAULT_TCP_CONG="reno" CONFIG_TCP_MD5SIG=y CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y @@ -919,17 +163,9 @@ CONFIG_INET6_ESPINTCP=y CONFIG_INET6_IPCOMP=m CONFIG_IPV6_MIP6=m CONFIG_IPV6_ILA=m -CONFIG_INET6_XFRM_TUNNEL=m -CONFIG_INET6_TUNNEL=m CONFIG_IPV6_VTI=m -CONFIG_IPV6_SIT=m CONFIG_IPV6_SIT_6RD=y -CONFIG_IPV6_NDISC_NODETYPE=y -CONFIG_IPV6_TUNNEL=m CONFIG_IPV6_GRE=m -CONFIG_IPV6_FOU=m -CONFIG_IPV6_FOU_TUNNEL=m -CONFIG_IPV6_MULTIPLE_TABLES=y CONFIG_IPV6_SUBTREES=y CONFIG_IPV6_MROUTE=y CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y @@ -937,53 +173,21 @@ CONFIG_IPV6_PIMSM_V2=y CONFIG_IPV6_SEG6_LWTUNNEL=y CONFIG_IPV6_SEG6_HMAC=y CONFIG_IPV6_RPL_LWTUNNEL=y -# CONFIG_IPV6_IOAM6_LWTUNNEL is not set CONFIG_NETLABEL=y CONFIG_MPTCP=y -CONFIG_INET_MPTCP_DIAG=m -CONFIG_NETWORK_SECMARK=y -CONFIG_NET_PTP_CLASSIFY=y CONFIG_NETWORK_PHY_TIMESTAMPING=y CONFIG_NETFILTER=y -CONFIG_NETFILTER_ADVANCED=y CONFIG_BRIDGE_NETFILTER=m - -# -# Core Netfilter Configuration -# -CONFIG_NETFILTER_INGRESS=y -CONFIG_NETFILTER_EGRESS=y -CONFIG_NETFILTER_SKIP_EGRESS=y -CONFIG_NETFILTER_NETLINK=m -CONFIG_NETFILTER_FAMILY_BRIDGE=y -CONFIG_NETFILTER_FAMILY_ARP=y -CONFIG_NETFILTER_BPF_LINK=y -# CONFIG_NETFILTER_NETLINK_HOOK is not set -CONFIG_NETFILTER_NETLINK_ACCT=m -CONFIG_NETFILTER_NETLINK_QUEUE=m -CONFIG_NETFILTER_NETLINK_LOG=m -CONFIG_NETFILTER_NETLINK_OSF=m CONFIG_NF_CONNTRACK=m -CONFIG_NF_LOG_SYSLOG=m -CONFIG_NETFILTER_CONNCOUNT=m -CONFIG_NF_CONNTRACK_MARK=y CONFIG_NF_CONNTRACK_SECMARK=y CONFIG_NF_CONNTRACK_ZONES=y -# CONFIG_NF_CONNTRACK_PROCFS is not set CONFIG_NF_CONNTRACK_EVENTS=y CONFIG_NF_CONNTRACK_TIMEOUT=y CONFIG_NF_CONNTRACK_TIMESTAMP=y -CONFIG_NF_CONNTRACK_LABELS=y -CONFIG_NF_CONNTRACK_OVS=y -CONFIG_NF_CT_PROTO_DCCP=y -CONFIG_NF_CT_PROTO_GRE=y -CONFIG_NF_CT_PROTO_SCTP=y -CONFIG_NF_CT_PROTO_UDPLITE=y CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m CONFIG_NF_CONNTRACK_IRC=m -CONFIG_NF_CONNTRACK_BROADCAST=m CONFIG_NF_CONNTRACK_NETBIOS_NS=m CONFIG_NF_CONNTRACK_SNMP=m CONFIG_NF_CONNTRACK_PPTP=m @@ -994,16 +198,6 @@ CONFIG_NF_CT_NETLINK=m CONFIG_NF_CT_NETLINK_TIMEOUT=m CONFIG_NF_CT_NETLINK_HELPER=m CONFIG_NETFILTER_NETLINK_GLUE_CT=y -CONFIG_NF_NAT=m -CONFIG_NF_NAT_AMANDA=m -CONFIG_NF_NAT_FTP=m -CONFIG_NF_NAT_IRC=m -CONFIG_NF_NAT_SIP=m -CONFIG_NF_NAT_TFTP=m -CONFIG_NF_NAT_REDIRECT=y -CONFIG_NF_NAT_MASQUERADE=y -CONFIG_NF_NAT_OVS=y -CONFIG_NETFILTER_SYNPROXY=m CONFIG_NF_TABLES=m CONFIG_NF_TABLES_INET=y CONFIG_NF_TABLES_NETDEV=y @@ -1020,67 +214,41 @@ CONFIG_NFT_TUNNEL=m CONFIG_NFT_QUEUE=m CONFIG_NFT_QUOTA=m CONFIG_NFT_REJECT=m -CONFIG_NFT_REJECT_INET=m CONFIG_NFT_COMPAT=m CONFIG_NFT_HASH=m -CONFIG_NFT_FIB=m CONFIG_NFT_FIB_INET=m CONFIG_NFT_XFRM=m CONFIG_NFT_SOCKET=m CONFIG_NFT_OSF=m CONFIG_NFT_TPROXY=m CONFIG_NFT_SYNPROXY=m -CONFIG_NF_DUP_NETDEV=m CONFIG_NFT_DUP_NETDEV=m CONFIG_NFT_FWD_NETDEV=m CONFIG_NFT_FIB_NETDEV=m -# CONFIG_NFT_REJECT_NETDEV is not set CONFIG_NF_FLOW_TABLE_INET=m CONFIG_NF_FLOW_TABLE=m -# CONFIG_NF_FLOW_TABLE_PROCFS is not set CONFIG_NETFILTER_XTABLES=y - -# -# Xtables combined modules -# -CONFIG_NETFILTER_XT_MARK=m -CONFIG_NETFILTER_XT_CONNMARK=m CONFIG_NETFILTER_XT_SET=m - -# -# Xtables targets -# CONFIG_NETFILTER_XT_TARGET_AUDIT=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_CONNMARK=m CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m -CONFIG_NETFILTER_XT_TARGET_CT=m CONFIG_NETFILTER_XT_TARGET_DSCP=m -CONFIG_NETFILTER_XT_TARGET_HL=m CONFIG_NETFILTER_XT_TARGET_HMARK=m CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m CONFIG_NETFILTER_XT_TARGET_LED=m CONFIG_NETFILTER_XT_TARGET_LOG=m CONFIG_NETFILTER_XT_TARGET_MARK=m -CONFIG_NETFILTER_XT_NAT=m -CONFIG_NETFILTER_XT_TARGET_NETMAP=m CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m CONFIG_NETFILTER_XT_TARGET_NOTRACK=m -CONFIG_NETFILTER_XT_TARGET_RATEEST=m -CONFIG_NETFILTER_XT_TARGET_REDIRECT=m -CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m CONFIG_NETFILTER_XT_TARGET_TEE=m CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_SECMARK=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m - -# -# Xtables matches -# CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m CONFIG_NETFILTER_XT_MATCH_BPF=m CONFIG_NETFILTER_XT_MATCH_CGROUP=m @@ -1092,14 +260,11 @@ CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m CONFIG_NETFILTER_XT_MATCH_CONNMARK=m CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m CONFIG_NETFILTER_XT_MATCH_CPU=m -CONFIG_NETFILTER_XT_MATCH_DCCP=m CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m CONFIG_NETFILTER_XT_MATCH_DSCP=m -CONFIG_NETFILTER_XT_MATCH_ECN=m CONFIG_NETFILTER_XT_MATCH_ESP=m CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m CONFIG_NETFILTER_XT_MATCH_HELPER=m -CONFIG_NETFILTER_XT_MATCH_HL=m CONFIG_NETFILTER_XT_MATCH_IPCOMP=m CONFIG_NETFILTER_XT_MATCH_IPRANGE=m CONFIG_NETFILTER_XT_MATCH_IPVS=m @@ -1119,7 +284,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m CONFIG_NETFILTER_XT_MATCH_RECENT=m -CONFIG_NETFILTER_XT_MATCH_SCTP=m CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m @@ -1127,10 +291,7 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_TIME=m CONFIG_NETFILTER_XT_MATCH_U32=m -# end of Core Netfilter Configuration - CONFIG_IP_SET=m -CONFIG_IP_SET_MAX=256 CONFIG_IP_SET_BITMAP_IP=m CONFIG_IP_SET_BITMAP_IPMAC=m CONFIG_IP_SET_BITMAP_PORT=m @@ -1150,21 +311,11 @@ CONFIG_IP_SET_LIST_SET=m CONFIG_IP_VS=m CONFIG_IP_VS_IPV6=y CONFIG_IP_VS_DEBUG=y -CONFIG_IP_VS_TAB_BITS=12 - -# -# IPVS transport protocol load balancing support -# CONFIG_IP_VS_PROTO_TCP=y CONFIG_IP_VS_PROTO_UDP=y -CONFIG_IP_VS_PROTO_AH_ESP=y CONFIG_IP_VS_PROTO_ESP=y CONFIG_IP_VS_PROTO_AH=y CONFIG_IP_VS_PROTO_SCTP=y - -# -# IPVS scheduler -# CONFIG_IP_VS_RR=m CONFIG_IP_VS_WRR=m CONFIG_IP_VS_LC=m @@ -1178,43 +329,13 @@ CONFIG_IP_VS_SH=m CONFIG_IP_VS_MH=m CONFIG_IP_VS_SED=m CONFIG_IP_VS_NQ=m -# CONFIG_IP_VS_TWOS is not set - -# -# IPVS SH scheduler -# -CONFIG_IP_VS_SH_TAB_BITS=8 - -# -# IPVS MH scheduler -# -CONFIG_IP_VS_MH_TAB_INDEX=12 - -# -# IPVS application helper -# CONFIG_IP_VS_FTP=m -CONFIG_IP_VS_NFCT=y CONFIG_IP_VS_PE_SIP=m - -# -# IP: Netfilter Configuration -# -CONFIG_NF_DEFRAG_IPV4=m -CONFIG_NF_SOCKET_IPV4=m -CONFIG_NF_TPROXY_IPV4=m -CONFIG_NF_TABLES_IPV4=y -CONFIG_NFT_REJECT_IPV4=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m CONFIG_NF_TABLES_ARP=y -CONFIG_NF_DUP_IPV4=m CONFIG_NF_LOG_ARP=m CONFIG_NF_LOG_IPV4=m -CONFIG_NF_REJECT_IPV4=m -CONFIG_NF_NAT_SNMP_BASIC=m -CONFIG_NF_NAT_PPTP=m -CONFIG_NF_NAT_H323=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -1235,20 +356,8 @@ CONFIG_IP_NF_SECURITY=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -# end of IP: Netfilter Configuration - -# -# IPv6: Netfilter Configuration -# -CONFIG_NF_SOCKET_IPV6=m -CONFIG_NF_TPROXY_IPV6=m -CONFIG_NF_TABLES_IPV6=y -CONFIG_NFT_REJECT_IPV6=m CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_FIB_IPV6=m -CONFIG_NF_DUP_IPV6=m -CONFIG_NF_REJECT_IPV6=m -CONFIG_NF_LOG_IPV6=m CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m @@ -1270,9 +379,6 @@ CONFIG_IP6_NF_SECURITY=m CONFIG_IP6_NF_NAT=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m -# end of IPv6: Netfilter Configuration - -CONFIG_NF_DEFRAG_IPV6=m CONFIG_NF_TABLES_BRIDGE=m CONFIG_NFT_BRIDGE_META=m CONFIG_NFT_BRIDGE_REJECT=m @@ -1299,43 +405,19 @@ CONFIG_BRIDGE_EBT_SNAT=m CONFIG_BRIDGE_EBT_LOG=m CONFIG_BRIDGE_EBT_NFLOG=m CONFIG_BPFILTER=y -CONFIG_BPFILTER_UMH=m CONFIG_IP_DCCP=m -CONFIG_INET_DCCP_DIAG=m - -# -# DCCP CCIDs Configuration -# CONFIG_IP_DCCP_CCID2_DEBUG=y -CONFIG_IP_DCCP_CCID3=y CONFIG_IP_DCCP_CCID3_DEBUG=y -CONFIG_IP_DCCP_TFRC_LIB=y -CONFIG_IP_DCCP_TFRC_DEBUG=y -# end of DCCP CCIDs Configuration - -# -# DCCP Kernel Hacking -# CONFIG_IP_DCCP_DEBUG=y -# end of DCCP Kernel Hacking - -CONFIG_IP_SCTP=m CONFIG_SCTP_DBG_OBJCNT=y -# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y -# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set CONFIG_SCTP_COOKIE_HMAC_MD5=y -CONFIG_SCTP_COOKIE_HMAC_SHA1=y -CONFIG_INET_SCTP_DIAG=m CONFIG_RDS=m CONFIG_RDS_RDMA=m CONFIG_RDS_TCP=m CONFIG_RDS_DEBUG=y CONFIG_TIPC=m CONFIG_TIPC_MEDIA_IB=y -CONFIG_TIPC_MEDIA_UDP=y -CONFIG_TIPC_CRYPTO=y -CONFIG_TIPC_DIAG=m CONFIG_ATM=m CONFIG_ATM_CLIP=m CONFIG_ATM_CLIP_NO_ICMP=y @@ -1344,46 +426,30 @@ CONFIG_ATM_MPOA=m CONFIG_ATM_BR2684=m CONFIG_ATM_BR2684_IPFILTER=y CONFIG_L2TP=m -# CONFIG_L2TP_DEBUGFS is not set CONFIG_L2TP_V3=y CONFIG_L2TP_IP=m CONFIG_L2TP_ETH=m -CONFIG_STP=m -CONFIG_GARP=m -CONFIG_MRP=m CONFIG_BRIDGE=m -CONFIG_BRIDGE_IGMP_SNOOPING=y CONFIG_BRIDGE_VLAN_FILTERING=y CONFIG_BRIDGE_MRP=y -# CONFIG_BRIDGE_CFM is not set CONFIG_NET_DSA=m -# CONFIG_NET_DSA_TAG_NONE is not set CONFIG_NET_DSA_TAG_AR9331=m -CONFIG_NET_DSA_TAG_BRCM_COMMON=m CONFIG_NET_DSA_TAG_BRCM=m -# CONFIG_NET_DSA_TAG_BRCM_LEGACY is not set CONFIG_NET_DSA_TAG_BRCM_PREPEND=m -# CONFIG_NET_DSA_TAG_HELLCREEK is not set CONFIG_NET_DSA_TAG_GSWIP=m -CONFIG_NET_DSA_TAG_DSA_COMMON=m CONFIG_NET_DSA_TAG_DSA=m CONFIG_NET_DSA_TAG_EDSA=m CONFIG_NET_DSA_TAG_MTK=m CONFIG_NET_DSA_TAG_KSZ=m CONFIG_NET_DSA_TAG_OCELOT=m -# CONFIG_NET_DSA_TAG_OCELOT_8021Q is not set CONFIG_NET_DSA_TAG_QCA=m CONFIG_NET_DSA_TAG_RTL4_A=m -# CONFIG_NET_DSA_TAG_RTL8_4 is not set -# CONFIG_NET_DSA_TAG_RZN1_A5PSW is not set CONFIG_NET_DSA_TAG_LAN9303=m CONFIG_NET_DSA_TAG_SJA1105=m CONFIG_NET_DSA_TAG_TRAILER=m -# CONFIG_NET_DSA_TAG_XRS700X is not set CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q_GVRP=y CONFIG_VLAN_8021Q_MVRP=y -CONFIG_LLC=m CONFIG_LLC2=m CONFIG_ATALK=m CONFIG_DEV_APPLETALK=m @@ -1393,18 +459,12 @@ CONFIG_X25=m CONFIG_LAPB=m CONFIG_PHONET=m CONFIG_6LOWPAN=m -# CONFIG_6LOWPAN_DEBUGFS is not set # CONFIG_6LOWPAN_NHC is not set CONFIG_IEEE802154=m CONFIG_IEEE802154_NL802154_EXPERIMENTAL=y -CONFIG_IEEE802154_SOCKET=m CONFIG_IEEE802154_6LOWPAN=m CONFIG_MAC802154=m CONFIG_NET_SCHED=y - -# -# Queueing/Scheduling -# CONFIG_NET_SCH_HTB=m CONFIG_NET_SCH_HFSC=m CONFIG_NET_SCH_PRIO=m @@ -1416,7 +476,6 @@ CONFIG_NET_SCH_TEQL=m CONFIG_NET_SCH_TBF=m CONFIG_NET_SCH_CBS=m CONFIG_NET_SCH_ETF=m -CONFIG_NET_SCH_MQPRIO_LIB=m CONFIG_NET_SCH_TAPRIO=m CONFIG_NET_SCH_GRED=m CONFIG_NET_SCH_NETEM=m @@ -1436,18 +495,7 @@ CONFIG_NET_SCH_INGRESS=m CONFIG_NET_SCH_PLUG=m CONFIG_NET_SCH_ETS=m CONFIG_NET_SCH_DEFAULT=y -# CONFIG_DEFAULT_FQ is not set -# CONFIG_DEFAULT_CODEL is not set CONFIG_DEFAULT_FQ_CODEL=y -# CONFIG_DEFAULT_FQ_PIE is not set -# CONFIG_DEFAULT_SFQ is not set -# CONFIG_DEFAULT_PFIFO_FAST is not set -CONFIG_DEFAULT_NET_SCH="fq_codel" - -# -# Classification -# -CONFIG_NET_CLS=y CONFIG_NET_CLS_BASIC=m CONFIG_NET_CLS_ROUTE4=m CONFIG_NET_CLS_FW=m @@ -1460,13 +508,11 @@ CONFIG_NET_CLS_BPF=m CONFIG_NET_CLS_FLOWER=m CONFIG_NET_CLS_MATCHALL=m CONFIG_NET_EMATCH=y -CONFIG_NET_EMATCH_STACK=32 CONFIG_NET_EMATCH_CMP=m CONFIG_NET_EMATCH_NBYTE=m CONFIG_NET_EMATCH_U32=m CONFIG_NET_EMATCH_META=m CONFIG_NET_EMATCH_TEXT=m -# CONFIG_NET_EMATCH_CANID is not set CONFIG_NET_EMATCH_IPSET=m CONFIG_NET_EMATCH_IPT=m CONFIG_NET_CLS_ACT=y @@ -1495,70 +541,29 @@ CONFIG_NET_IFE_SKBMARK=m CONFIG_NET_IFE_SKBPRIO=m CONFIG_NET_IFE_SKBTCINDEX=m CONFIG_NET_TC_SKB_EXT=y -CONFIG_NET_SCH_FIFO=y CONFIG_DCB=y CONFIG_DNS_RESOLVER=y CONFIG_BATMAN_ADV=m -CONFIG_BATMAN_ADV_BATMAN_V=y -CONFIG_BATMAN_ADV_BLA=y -CONFIG_BATMAN_ADV_DAT=y CONFIG_BATMAN_ADV_NC=y -CONFIG_BATMAN_ADV_MCAST=y CONFIG_BATMAN_ADV_DEBUG=y -# CONFIG_BATMAN_ADV_TRACING is not set CONFIG_OPENVSWITCH=m -CONFIG_OPENVSWITCH_GRE=m -CONFIG_OPENVSWITCH_VXLAN=m -CONFIG_OPENVSWITCH_GENEVE=m CONFIG_VSOCKETS=m -CONFIG_VSOCKETS_DIAG=m -CONFIG_VSOCKETS_LOOPBACK=m CONFIG_VIRTIO_VSOCKETS=m -CONFIG_VIRTIO_VSOCKETS_COMMON=m CONFIG_NETLINK_DIAG=m -CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=y CONFIG_MPLS_ROUTING=m CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_NSH=y CONFIG_HSR=m -CONFIG_NET_SWITCHDEV=y -CONFIG_NET_L3_MASTER_DEV=y CONFIG_QRTR=m CONFIG_QRTR_TUN=m CONFIG_NET_NCSI=y CONFIG_NCSI_OEM_CMD_GET_MAC=y -# CONFIG_NCSI_OEM_CMD_KEEP_PHY is not set -CONFIG_PCPU_DEV_REFCNT=y -CONFIG_MAX_SKB_FRAGS=17 -CONFIG_RPS=y -CONFIG_RFS_ACCEL=y -CONFIG_SOCK_RX_QUEUE_MAPPING=y -CONFIG_XPS=y CONFIG_CGROUP_NET_PRIO=y -CONFIG_CGROUP_NET_CLASSID=y -CONFIG_NET_RX_BUSY_POLL=y -CONFIG_BQL=y CONFIG_BPF_STREAM_PARSER=y -CONFIG_NET_FLOW_LIMIT=y - -# -# Network testing -# CONFIG_NET_PKTGEN=m -# CONFIG_NET_DROP_MONITOR is not set -# end of Network testing -# end of Networking options - -# CONFIG_HAMRADIO is not set CONFIG_CAN=m -CONFIG_CAN_RAW=m -CONFIG_CAN_BCM=m -CONFIG_CAN_GW=m -# CONFIG_CAN_J1939 is not set -# CONFIG_CAN_ISOTP is not set CONFIG_BT=m -CONFIG_BT_BREDR=y CONFIG_BT_RFCOMM=m CONFIG_BT_RFCOMM_TTY=y CONFIG_BT_BNEP=m @@ -1566,515 +571,95 @@ CONFIG_BT_BNEP_MC_FILTER=y CONFIG_BT_BNEP_PROTO_FILTER=y CONFIG_BT_CMTP=m CONFIG_BT_HIDP=m -CONFIG_BT_HS=y -CONFIG_BT_LE=y -CONFIG_BT_LE_L2CAP_ECRED=y -# CONFIG_BT_6LOWPAN is not set -# CONFIG_BT_LEDS is not set -# CONFIG_BT_MSFTEXT is not set -# CONFIG_BT_AOSPEXT is not set -CONFIG_BT_DEBUGFS=y -# CONFIG_BT_SELFTEST is not set - -# -# Bluetooth device drivers -# -CONFIG_BT_INTEL=m -CONFIG_BT_RTL=m CONFIG_BT_HCIBTUSB=m CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y -CONFIG_BT_HCIBTUSB_POLL_SYNC=y # CONFIG_BT_HCIBTUSB_BCM is not set -# CONFIG_BT_HCIBTUSB_MTK is not set -CONFIG_BT_HCIBTUSB_RTL=y CONFIG_BT_HCIBTSDIO=m CONFIG_BT_HCIUART=m -CONFIG_BT_HCIUART_H4=y CONFIG_BT_HCIUART_BCSP=y CONFIG_BT_HCIUART_ATH3K=y -# CONFIG_BT_HCIUART_INTEL is not set -# CONFIG_BT_HCIUART_AG6XX is not set CONFIG_BT_HCIBCM203X=m -# CONFIG_BT_HCIBCM4377 is not set CONFIG_BT_HCIBPA10X=m CONFIG_BT_HCIBFUSB=m CONFIG_BT_HCIVHCI=m CONFIG_BT_MRVL=m CONFIG_BT_MRVL_SDIO=m CONFIG_BT_ATH3K=m -# CONFIG_BT_MTKSDIO is not set -# CONFIG_BT_VIRTIO is not set -# end of Bluetooth device drivers - -# CONFIG_AF_RXRPC is not set -# CONFIG_AF_KCM is not set -CONFIG_STREAM_PARSER=y -# CONFIG_MCTP is not set -CONFIG_FIB_RULES=y -CONFIG_WIRELESS=y -CONFIG_WEXT_CORE=y -CONFIG_WEXT_PROC=y CONFIG_CFG80211=m -# CONFIG_NL80211_TESTMODE is not set -# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set -# CONFIG_CFG80211_CERTIFICATION_ONUS is not set -CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y -CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y -CONFIG_CFG80211_DEFAULT_PS=y -# CONFIG_CFG80211_DEBUGFS is not set -CONFIG_CFG80211_CRDA_SUPPORT=y CONFIG_CFG80211_WEXT=y CONFIG_MAC80211=m -CONFIG_MAC80211_HAS_RC=y -CONFIG_MAC80211_RC_MINSTREL=y -CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y -CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" -# CONFIG_MAC80211_MESH is not set -CONFIG_MAC80211_LEDS=y -# CONFIG_MAC80211_DEBUGFS is not set -# CONFIG_MAC80211_MESSAGE_TRACING is not set -# CONFIG_MAC80211_DEBUG_MENU is not set -CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 CONFIG_RFKILL=m -CONFIG_RFKILL_LEDS=y CONFIG_RFKILL_INPUT=y -# CONFIG_RFKILL_GPIO is not set CONFIG_NET_9P=y -CONFIG_NET_9P_FD=y CONFIG_NET_9P_VIRTIO=y -# CONFIG_NET_9P_RDMA is not set -# CONFIG_NET_9P_DEBUG is not set -# CONFIG_CAIF is not set -CONFIG_CEPH_LIB=m -# CONFIG_CEPH_LIB_PRETTYDEBUG is not set CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y -# CONFIG_NFC is not set -CONFIG_PSAMPLE=m -CONFIG_NET_IFE=m -CONFIG_LWTUNNEL=y -CONFIG_LWTUNNEL_BPF=y -CONFIG_DST_CACHE=y -CONFIG_GRO_CELLS=y -CONFIG_SOCK_VALIDATE_XMIT=y -CONFIG_NET_SELFTESTS=y -CONFIG_NET_SOCK_MSG=y -CONFIG_NET_DEVLINK=y -CONFIG_PAGE_POOL=y -# CONFIG_PAGE_POOL_STATS is not set -CONFIG_FAILOVER=m -CONFIG_ETHTOOL_NETLINK=y - -# -# Device Drivers -# -CONFIG_HAVE_PCI=y -CONFIG_PCI=y -CONFIG_PCI_DOMAINS=y -CONFIG_PCI_DOMAINS_GENERIC=y -CONFIG_PCIEPORTBUS=y -CONFIG_HOTPLUG_PCI_PCIE=y CONFIG_PCIEAER=y CONFIG_PCIEAER_INJECT=m CONFIG_PCIE_ECRC=y -CONFIG_PCIEASPM=y -CONFIG_PCIEASPM_DEFAULT=y -# CONFIG_PCIEASPM_POWERSAVE is not set -# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set -# CONFIG_PCIEASPM_PERFORMANCE is not set -CONFIG_PCIE_PME=y CONFIG_PCIE_DPC=y -# CONFIG_PCIE_PTM is not set -# CONFIG_PCIE_EDR is not set -CONFIG_PCI_MSI=y -CONFIG_PCI_MSI_ARCH_FALLBACKS=y -CONFIG_PCI_QUIRKS=y -# CONFIG_PCI_DEBUG is not set -# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set CONFIG_PCI_STUB=y CONFIG_PCI_PF_STUB=m -CONFIG_PCI_ATS=y -CONFIG_PCI_ECAM=y CONFIG_PCI_IOV=y -# CONFIG_PCI_PRI is not set -# CONFIG_PCI_PASID is not set -CONFIG_PCI_LABEL=y -# CONFIG_PCI_DYNAMIC_OF_NODES is not set -# CONFIG_PCIE_BUS_TUNE_OFF is not set -CONFIG_PCIE_BUS_DEFAULT=y -# CONFIG_PCIE_BUS_SAFE is not set -# CONFIG_PCIE_BUS_PERFORMANCE is not set -# CONFIG_PCIE_BUS_PEER2PEER is not set -CONFIG_VGA_ARB=y CONFIG_VGA_ARB_MAX_GPUS=64 -CONFIG_HOTPLUG_PCI=y CONFIG_HOTPLUG_PCI_ACPI=y -# CONFIG_HOTPLUG_PCI_ACPI_IBM is not set -# CONFIG_HOTPLUG_PCI_CPCI is not set CONFIG_HOTPLUG_PCI_SHPC=y - -# -# PCI controller drivers -# -# CONFIG_PCI_FTPCI100 is not set -# CONFIG_PCI_HOST_GENERIC is not set -CONFIG_PCI_LOONGSON=y -# CONFIG_PCIE_MICROCHIP_HOST is not set -# CONFIG_PCIE_XILINX is not set - -# -# Cadence-based PCIe controllers -# -# CONFIG_PCIE_CADENCE_PLAT_HOST is not set -# CONFIG_PCI_J721E_HOST is not set -# end of Cadence-based PCIe controllers - -# -# DesignWare-based PCIe controllers -# -# CONFIG_PCI_MESON is not set -# CONFIG_PCIE_DW_PLAT_HOST is not set -# end of DesignWare-based PCIe controllers - -# -# Mobiveil-based PCIe controllers -# -# end of Mobiveil-based PCIe controllers -# end of PCI controller drivers - -# -# PCI Endpoint -# -# CONFIG_PCI_ENDPOINT is not set -# end of PCI Endpoint - -# -# PCI switch controller drivers -# -# CONFIG_PCI_SW_SWITCHTEC is not set -# end of PCI switch controller drivers - -# CONFIG_CXL_BUS is not set CONFIG_PCCARD=m # CONFIG_PCMCIA is not set -CONFIG_CARDBUS=y - -# -# PC-card bridges -# CONFIG_YENTA=m -CONFIG_YENTA_O2=y -CONFIG_YENTA_RICOH=y -CONFIG_YENTA_TI=y -CONFIG_YENTA_ENE_TUNE=y -CONFIG_YENTA_TOSHIBA=y CONFIG_RAPIDIO=y CONFIG_RAPIDIO_TSI721=y -CONFIG_RAPIDIO_DISC_TIMEOUT=30 CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS=y -# CONFIG_RAPIDIO_DMA_ENGINE is not set -# CONFIG_RAPIDIO_DEBUG is not set CONFIG_RAPIDIO_ENUM_BASIC=m CONFIG_RAPIDIO_CHMAN=m CONFIG_RAPIDIO_MPORT_CDEV=m - -# -# RapidIO Switch drivers -# -# CONFIG_RAPIDIO_CPS_XX is not set -# CONFIG_RAPIDIO_CPS_GEN2 is not set -# CONFIG_RAPIDIO_RXS_GEN3 is not set -# end of RapidIO Switch drivers - -# -# Generic Driver Options -# -CONFIG_AUXILIARY_BUS=y CONFIG_UEVENT_HELPER=y -CONFIG_UEVENT_HELPER_PATH="" CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y -# CONFIG_DEVTMPFS_SAFE is not set -CONFIG_STANDALONE=y -CONFIG_PREVENT_FIRMWARE_BUILD=y - -# -# Firmware loader -# -CONFIG_FW_LOADER=y -CONFIG_FW_LOADER_DEBUG=y -CONFIG_FW_LOADER_PAGED_BUF=y -CONFIG_EXTRA_FIRMWARE="" -# CONFIG_FW_LOADER_USER_HELPER is not set CONFIG_FW_LOADER_COMPRESS=y -CONFIG_FW_LOADER_COMPRESS_XZ=y -# CONFIG_FW_LOADER_COMPRESS_ZSTD is not set -CONFIG_FW_CACHE=y -# CONFIG_FW_UPLOAD is not set -# end of Firmware loader - -CONFIG_WANT_DEV_COREDUMP=y -CONFIG_ALLOW_DEV_COREDUMP=y -CONFIG_DEV_COREDUMP=y -# CONFIG_DEBUG_DRIVER is not set -# CONFIG_DEBUG_DEVRES is not set -# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set -# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set -CONFIG_GENERIC_CPU_AUTOPROBE=y -CONFIG_SOC_BUS=y -CONFIG_REGMAP=y -CONFIG_REGMAP_I2C=m -CONFIG_REGMAP_SPI=m -CONFIG_REGMAP_MMIO=y -CONFIG_DMA_SHARED_BUFFER=y -# CONFIG_DMA_FENCE_TRACE is not set -# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set -# end of Generic Driver Options - -# -# Bus devices -# -# CONFIG_MOXTET is not set -# CONFIG_MHI_BUS is not set -# CONFIG_MHI_BUS_EP is not set -# end of Bus devices - -# -# Cache Drivers -# -# end of Cache Drivers - CONFIG_CONNECTOR=y -CONFIG_PROC_EVENTS=y - -# -# Firmware Drivers -# - -# -# ARM System Control and Management Interface Protocol -# -# end of ARM System Control and Management Interface Protocol - -# CONFIG_FIRMWARE_MEMMAP is not set -CONFIG_DMIID=y CONFIG_DMI_SYSFS=y -CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y CONFIG_ISCSI_IBFT=m -CONFIG_SYSFB=y -# CONFIG_SYSFB_SIMPLEFB is not set -# CONFIG_GOOGLE_FIRMWARE is not set - -# -# EFI (Extensible Firmware Interface) Support -# -CONFIG_EFI_ESRT=y -CONFIG_EFI_VARS_PSTORE=m -# CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE is not set -CONFIG_EFI_RUNTIME_WRAPPERS=y -CONFIG_EFI_GENERIC_STUB=y CONFIG_EFI_ZBOOT=y -# CONFIG_EFI_BOOTLOADER_CONTROL is not set CONFIG_EFI_CAPSULE_LOADER=m CONFIG_EFI_TEST=m -# CONFIG_RESET_ATTACK_MITIGATION is not set -# CONFIG_EFI_DISABLE_PCI_DMA is not set -CONFIG_EFI_EARLYCON=y -CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y -# CONFIG_EFI_DISABLE_RUNTIME is not set -# CONFIG_EFI_COCO_SECRET is not set -# end of EFI (Extensible Firmware Interface) Support - -# -# Tegra firmware driver -# -# end of Tegra firmware driver -# end of Firmware Drivers - -# CONFIG_GNSS is not set CONFIG_MTD=m -# CONFIG_MTD_TESTS is not set - -# -# Partition parsers -# -# CONFIG_MTD_AR7_PARTS is not set -# CONFIG_MTD_CMDLINE_PARTS is not set -CONFIG_MTD_OF_PARTS=m -# CONFIG_MTD_REDBOOT_PARTS is not set -# end of Partition parsers - -# -# User Modules And Translation Layers -# -CONFIG_MTD_BLKDEVS=m CONFIG_MTD_BLOCK=m -# CONFIG_MTD_BLOCK_RO is not set - -# -# Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK. -# -# CONFIG_FTL is not set -# CONFIG_NFTL is not set -# CONFIG_INFTL is not set -# CONFIG_RFD_FTL is not set -# CONFIG_SSFDC is not set -# CONFIG_SM_FTL is not set -# CONFIG_MTD_OOPS is not set -# CONFIG_MTD_SWAP is not set -# CONFIG_MTD_PARTITIONED_MASTER is not set - -# -# RAM/ROM/Flash chip drivers -# CONFIG_MTD_CFI=m CONFIG_MTD_JEDECPROBE=m -CONFIG_MTD_GEN_PROBE=m -# CONFIG_MTD_CFI_ADV_OPTIONS is not set -CONFIG_MTD_MAP_BANK_WIDTH_1=y -CONFIG_MTD_MAP_BANK_WIDTH_2=y -CONFIG_MTD_MAP_BANK_WIDTH_4=y -CONFIG_MTD_CFI_I1=y -CONFIG_MTD_CFI_I2=y CONFIG_MTD_CFI_INTELEXT=m CONFIG_MTD_CFI_AMDSTD=m CONFIG_MTD_CFI_STAA=m -CONFIG_MTD_CFI_UTIL=m CONFIG_MTD_RAM=m CONFIG_MTD_ROM=m -# CONFIG_MTD_ABSENT is not set -# end of RAM/ROM/Flash chip drivers - -# -# Mapping drivers for chip access -# -# CONFIG_MTD_COMPLEX_MAPPINGS is not set -# CONFIG_MTD_PHYSMAP is not set -# CONFIG_MTD_INTEL_VR_NOR is not set -# CONFIG_MTD_PLATRAM is not set -# end of Mapping drivers for chip access - -# -# Self-contained MTD device drivers -# -# CONFIG_MTD_PMC551 is not set -# CONFIG_MTD_DATAFLASH is not set -# CONFIG_MTD_MCHP23K256 is not set -# CONFIG_MTD_MCHP48L640 is not set -# CONFIG_MTD_SST25L is not set -# CONFIG_MTD_SLRAM is not set -# CONFIG_MTD_PHRAM is not set -# CONFIG_MTD_MTDRAM is not set CONFIG_MTD_BLOCK2MTD=m - -# -# Disk-On-Chip Device Drivers -# -# CONFIG_MTD_DOCG3 is not set -# end of Self-contained MTD device drivers - -# -# NAND -# -# CONFIG_MTD_ONENAND is not set -# CONFIG_MTD_RAW_NAND is not set -# CONFIG_MTD_SPI_NAND is not set - -# -# ECC engine support -# -# CONFIG_MTD_NAND_ECC_SW_HAMMING is not set -# CONFIG_MTD_NAND_ECC_SW_BCH is not set -# CONFIG_MTD_NAND_ECC_MXIC is not set -# end of ECC engine support -# end of NAND - -# -# LPDDR & LPDDR2 PCM memory drivers -# -# CONFIG_MTD_LPDDR is not set -# end of LPDDR & LPDDR2 PCM memory drivers - CONFIG_MTD_SPI_NOR=m -CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y -# CONFIG_MTD_SPI_NOR_SWP_DISABLE is not set -CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE=y -# CONFIG_MTD_SPI_NOR_SWP_KEEP is not set CONFIG_MTD_UBI=m -CONFIG_MTD_UBI_WL_THRESHOLD=4096 -CONFIG_MTD_UBI_BEB_LIMIT=20 -# CONFIG_MTD_UBI_FASTMAP is not set CONFIG_MTD_UBI_GLUEBI=m CONFIG_MTD_UBI_BLOCK=y -# CONFIG_MTD_HYPERBUS is not set -CONFIG_DTC=y -CONFIG_OF=y -# CONFIG_OF_UNITTEST is not set -CONFIG_OF_FLATTREE=y -CONFIG_OF_EARLY_FLATTREE=y -CONFIG_OF_KOBJ=y -CONFIG_OF_ADDRESS=y -CONFIG_OF_IRQ=y -CONFIG_OF_RESERVED_MEM=y -# CONFIG_OF_OVERLAY is not set -CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y CONFIG_PARPORT=m CONFIG_PARPORT_PC=m CONFIG_PARPORT_SERIAL=m CONFIG_PARPORT_PC_FIFO=y -# CONFIG_PARPORT_PC_SUPERIO is not set CONFIG_PARPORT_1284=y -CONFIG_PARPORT_NOT_PC=y -CONFIG_PNP=y # CONFIG_PNP_DEBUG_MESSAGES is not set - -# -# Protocols -# -CONFIG_PNPACPI=y -CONFIG_BLK_DEV=y CONFIG_BLK_DEV_NULL_BLK=m -CONFIG_CDROM=m -# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set CONFIG_ZRAM=m -# CONFIG_ZRAM_DEF_COMP_LZORLE is not set CONFIG_ZRAM_DEF_COMP_ZSTD=y -# CONFIG_ZRAM_DEF_COMP_LZ4 is not set -# CONFIG_ZRAM_DEF_COMP_LZO is not set -# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set -# CONFIG_ZRAM_DEF_COMP_842 is not set -CONFIG_ZRAM_DEF_COMP="zstd" CONFIG_ZRAM_WRITEBACK=y -# CONFIG_ZRAM_MEMORY_TRACKING is not set -# CONFIG_ZRAM_MULTI_COMP is not set CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 CONFIG_BLK_DEV_DRBD=m -# CONFIG_DRBD_FAULT_INJECTION is not set CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=m -CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_SIZE=8192 CONFIG_CDROM_PKTCDVD=m -CONFIG_CDROM_PKTCDVD_BUFFERS=8 -# CONFIG_CDROM_PKTCDVD_WCACHE is not set -# CONFIG_ATA_OVER_ETH is not set CONFIG_VIRTIO_BLK=m CONFIG_BLK_DEV_RBD=m -# CONFIG_BLK_DEV_UBLK is not set - -# -# NVME Support -# -CONFIG_NVME_CORE=m CONFIG_BLK_DEV_NVME=y CONFIG_NVME_MULTIPATH=y -# CONFIG_NVME_VERBOSE_ERRORS is not set -# CONFIG_NVME_HWMON is not set -CONFIG_NVME_FABRICS=m CONFIG_NVME_RDMA=m CONFIG_NVME_FC=m CONFIG_NVME_TCP=m -# CONFIG_NVME_AUTH is not set CONFIG_NVME_TARGET=m CONFIG_NVME_TARGET_PASSTHRU=y CONFIG_NVME_TARGET_LOOP=m @@ -2082,183 +667,58 @@ CONFIG_NVME_TARGET_RDMA=m CONFIG_NVME_TARGET_FC=m CONFIG_NVME_TARGET_FCLOOP=m CONFIG_NVME_TARGET_TCP=m -# CONFIG_NVME_TARGET_AUTH is not set -# end of NVME Support - -# -# Misc devices -# -CONFIG_SENSORS_LIS3LV02D=m -# CONFIG_AD525X_DPOT is not set -# CONFIG_DUMMY_IRQ is not set -# CONFIG_PHANTOM is not set -CONFIG_TIFM_CORE=m -CONFIG_TIFM_7XX1=m -# CONFIG_ICS932S401 is not set CONFIG_ENCLOSURE_SERVICES=m -# CONFIG_HP_ILO is not set CONFIG_APDS9802ALS=m CONFIG_ISL29003=m CONFIG_ISL29020=m CONFIG_SENSORS_TSL2550=m CONFIG_SENSORS_BH1770=m CONFIG_SENSORS_APDS990X=m -# CONFIG_HMC6352 is not set -# CONFIG_DS1682 is not set -# CONFIG_LATTICE_ECP3_CONFIG is not set -# CONFIG_SRAM is not set -# CONFIG_DW_XDATA_PCIE is not set -# CONFIG_PCI_ENDPOINT_TEST is not set -# CONFIG_XILINX_SDFEC is not set -CONFIG_MISC_RTSX=m -# CONFIG_HISI_HIKEY_USB is not set -# CONFIG_OPEN_DICE is not set -# CONFIG_VCPU_STALL_DETECTOR is not set -# CONFIG_C2PORT is not set - -# -# EEPROM support -# CONFIG_EEPROM_AT24=m -# CONFIG_EEPROM_AT25 is not set CONFIG_EEPROM_LEGACY=m CONFIG_EEPROM_MAX6875=m -CONFIG_EEPROM_93CX6=m -# CONFIG_EEPROM_93XX46 is not set -# CONFIG_EEPROM_IDT_89HPESX is not set -# CONFIG_EEPROM_EE1004 is not set -# end of EEPROM support - -CONFIG_CB710_CORE=m -# CONFIG_CB710_DEBUG is not set -CONFIG_CB710_DEBUG_ASSUMPTIONS=y - -# -# Texas Instruments shared transport line discipline -# -# CONFIG_TI_ST is not set -# end of Texas Instruments shared transport line discipline - CONFIG_SENSORS_LIS3_I2C=m -CONFIG_ALTERA_STAPL=m -# CONFIG_GENWQE is not set -# CONFIG_ECHO is not set -# CONFIG_BCM_VK is not set -# CONFIG_MISC_ALCOR_PCI is not set CONFIG_MISC_RTSX_PCI=m CONFIG_MISC_RTSX_USB=m CONFIG_UACCE=m CONFIG_PVPANIC=y -# CONFIG_PVPANIC_MMIO is not set -# CONFIG_PVPANIC_PCI is not set -# CONFIG_GP_PCI1XXXX is not set -# end of Misc devices - -# -# SCSI device support -# -CONFIG_SCSI_MOD=y -CONFIG_RAID_ATTRS=y -CONFIG_SCSI_COMMON=y -CONFIG_SCSI=y -CONFIG_SCSI_DMA=y -CONFIG_SCSI_NETLINK=y -CONFIG_SCSI_PROC_FS=y - -# -# SCSI support type (disk, tape, CD-ROM) -# CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=m CONFIG_BLK_DEV_SR=m CONFIG_CHR_DEV_SG=m -CONFIG_BLK_DEV_BSG=y CONFIG_CHR_DEV_SCH=m CONFIG_SCSI_ENCLOSURE=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y CONFIG_SCSI_SCAN_ASYNC=y - -# -# SCSI Transports -# -CONFIG_SCSI_SPI_ATTRS=m CONFIG_SCSI_FC_ATTRS=m -CONFIG_SCSI_ISCSI_ATTRS=m -CONFIG_SCSI_SAS_ATTRS=y -CONFIG_SCSI_SAS_LIBSAS=y CONFIG_SCSI_SAS_ATA=y -CONFIG_SCSI_SAS_HOST_SMP=y -CONFIG_SCSI_SRP_ATTRS=m -# end of SCSI Transports - -CONFIG_SCSI_LOWLEVEL=y CONFIG_ISCSI_TCP=m -CONFIG_ISCSI_BOOT_SYSFS=m -# CONFIG_SCSI_CXGB3_ISCSI is not set CONFIG_SCSI_CXGB4_ISCSI=m CONFIG_SCSI_BNX2_ISCSI=m CONFIG_SCSI_BNX2X_FCOE=m CONFIG_BE2ISCSI=m -# CONFIG_BLK_DEV_3W_XXXX_RAID is not set CONFIG_SCSI_HPSA=m -# CONFIG_SCSI_3W_9XXX is not set -# CONFIG_SCSI_3W_SAS is not set -# CONFIG_SCSI_ACARD is not set CONFIG_SCSI_AACRAID=m -# CONFIG_SCSI_AIC7XXX is not set -# CONFIG_SCSI_AIC79XX is not set -# CONFIG_SCSI_AIC94XX is not set CONFIG_SCSI_MVSAS=y # CONFIG_SCSI_MVSAS_DEBUG is not set CONFIG_SCSI_MVSAS_TASKLET=y CONFIG_SCSI_MVUMI=y -# CONFIG_SCSI_ADVANSYS is not set -# CONFIG_SCSI_ARCMSR is not set -# CONFIG_SCSI_ESAS2R is not set CONFIG_MEGARAID_NEWGEN=y CONFIG_MEGARAID_MM=y CONFIG_MEGARAID_MAILBOX=y CONFIG_MEGARAID_LEGACY=y CONFIG_MEGARAID_SAS=m CONFIG_SCSI_MPT3SAS=y -CONFIG_SCSI_MPT2SAS_MAX_SGE=128 -CONFIG_SCSI_MPT3SAS_MAX_SGE=128 CONFIG_SCSI_MPT2SAS=m -# CONFIG_SCSI_MPI3MR is not set CONFIG_SCSI_SMARTPQI=m -# CONFIG_SCSI_HPTIOP is not set -# CONFIG_SCSI_BUSLOGIC is not set -# CONFIG_SCSI_MYRB is not set -# CONFIG_SCSI_MYRS is not set CONFIG_LIBFC=m CONFIG_LIBFCOE=m CONFIG_FCOE=m -# CONFIG_SCSI_SNIC is not set -# CONFIG_SCSI_DMX3191D is not set -# CONFIG_SCSI_FDOMAIN_PCI is not set -# CONFIG_SCSI_IPS is not set -# CONFIG_SCSI_INITIO is not set -# CONFIG_SCSI_INIA100 is not set -# CONFIG_SCSI_PPA is not set -# CONFIG_SCSI_IMM is not set -# CONFIG_SCSI_STEX is not set -# CONFIG_SCSI_SYM53C8XX_2 is not set -# CONFIG_SCSI_IPR is not set CONFIG_SCSI_QLOGIC_1280=m CONFIG_SCSI_QLA_FC=m CONFIG_TCM_QLA2XXX=m -# CONFIG_TCM_QLA2XXX_DEBUG is not set CONFIG_SCSI_QLA_ISCSI=m -# CONFIG_SCSI_LPFC is not set -# CONFIG_SCSI_EFCT is not set -# CONFIG_SCSI_DC395x is not set -# CONFIG_SCSI_AM53C974 is not set -# CONFIG_SCSI_WD719X is not set -# CONFIG_SCSI_DEBUG is not set -# CONFIG_SCSI_PMCRAID is not set -# CONFIG_SCSI_PM8001 is not set -# CONFIG_SCSI_BFA_FC is not set CONFIG_SCSI_VIRTIO=m CONFIG_SCSI_CHELSIO_FCOE=m CONFIG_SCSI_DH=y @@ -2266,141 +726,25 @@ CONFIG_SCSI_DH_RDAC=y CONFIG_SCSI_DH_HP_SW=y CONFIG_SCSI_DH_EMC=y CONFIG_SCSI_DH_ALUA=y -# end of SCSI device support - CONFIG_ATA=y -CONFIG_SATA_HOST=y -CONFIG_PATA_TIMINGS=y -CONFIG_ATA_VERBOSE_ERROR=y -CONFIG_ATA_FORCE=y -CONFIG_ATA_ACPI=y -# CONFIG_SATA_ZPODD is not set -CONFIG_SATA_PMP=y - -# -# Controllers with non-SFF native interface -# CONFIG_SATA_AHCI=y -CONFIG_SATA_MOBILE_LPM_POLICY=0 CONFIG_SATA_AHCI_PLATFORM=y -# CONFIG_AHCI_DWC is not set -# CONFIG_AHCI_CEVA is not set -# CONFIG_SATA_INIC162X is not set -# CONFIG_SATA_ACARD_AHCI is not set -# CONFIG_SATA_SIL24 is not set -CONFIG_ATA_SFF=y - -# -# SFF controllers with custom DMA interface -# -# CONFIG_PDC_ADMA is not set -# CONFIG_SATA_QSTOR is not set -# CONFIG_SATA_SX4 is not set -CONFIG_ATA_BMDMA=y - -# -# SATA SFF controllers with BMDMA -# CONFIG_ATA_PIIX=m -# CONFIG_SATA_DWC is not set -# CONFIG_SATA_MV is not set -# CONFIG_SATA_NV is not set -# CONFIG_SATA_PROMISE is not set -# CONFIG_SATA_SIL is not set -# CONFIG_SATA_SIS is not set -# CONFIG_SATA_SVW is not set -# CONFIG_SATA_ULI is not set -# CONFIG_SATA_VIA is not set -# CONFIG_SATA_VITESSE is not set -# CONFIG_SATA_ZHAOXIN is not set - -# -# PATA SFF controllers with BMDMA -# -# CONFIG_PATA_ALI is not set -# CONFIG_PATA_AMD is not set -# CONFIG_PATA_ARTOP is not set CONFIG_PATA_ATIIXP=y -# CONFIG_PATA_ATP867X is not set -# CONFIG_PATA_CMD64X is not set -# CONFIG_PATA_CYPRESS is not set -# CONFIG_PATA_EFAR is not set -# CONFIG_PATA_HPT366 is not set -# CONFIG_PATA_HPT37X is not set -# CONFIG_PATA_HPT3X2N is not set -# CONFIG_PATA_HPT3X3 is not set -# CONFIG_PATA_IT8213 is not set -# CONFIG_PATA_IT821X is not set -# CONFIG_PATA_JMICRON is not set -# CONFIG_PATA_MARVELL is not set -# CONFIG_PATA_NETCELL is not set -# CONFIG_PATA_NINJA32 is not set -# CONFIG_PATA_NS87415 is not set -# CONFIG_PATA_OLDPIIX is not set -# CONFIG_PATA_OPTIDMA is not set -# CONFIG_PATA_PDC2027X is not set -# CONFIG_PATA_PDC_OLD is not set -# CONFIG_PATA_RADISYS is not set -# CONFIG_PATA_RDC is not set -# CONFIG_PATA_SCH is not set -# CONFIG_PATA_SERVERWORKS is not set -# CONFIG_PATA_SIL680 is not set -# CONFIG_PATA_SIS is not set -# CONFIG_PATA_TOSHIBA is not set -# CONFIG_PATA_TRIFLEX is not set -# CONFIG_PATA_VIA is not set -# CONFIG_PATA_WINBOND is not set - -# -# PIO-only SFF controllers -# -# CONFIG_PATA_CMD640_PCI is not set -# CONFIG_PATA_MPIIX is not set -# CONFIG_PATA_NS87410 is not set -# CONFIG_PATA_OPTI is not set -# CONFIG_PATA_OF_PLATFORM is not set -# CONFIG_PATA_RZ1000 is not set -# CONFIG_PATA_PARPORT is not set - -# -# Generic fallback / legacy drivers -# -# CONFIG_PATA_ACPI is not set CONFIG_ATA_GENERIC=m -# CONFIG_PATA_LEGACY is not set CONFIG_MD=y CONFIG_BLK_DEV_MD=y -CONFIG_MD_AUTODETECT=y -CONFIG_MD_BITMAP_FILE=y CONFIG_MD_LINEAR=m -CONFIG_MD_RAID0=m -CONFIG_MD_RAID1=m -CONFIG_MD_RAID10=m -CONFIG_MD_RAID456=m CONFIG_MD_MULTIPATH=m CONFIG_MD_FAULTY=m -# CONFIG_MD_CLUSTER is not set CONFIG_BCACHE=m -# CONFIG_BCACHE_DEBUG is not set -# CONFIG_BCACHE_CLOSURES_DEBUG is not set -# CONFIG_BCACHE_ASYNC_REGISTRATION is not set -CONFIG_BLK_DEV_DM_BUILTIN=y CONFIG_BLK_DEV_DM=m -# CONFIG_DM_DEBUG is not set -CONFIG_DM_BUFIO=m -# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set -CONFIG_DM_BIO_PRISON=m -CONFIG_DM_PERSISTENT_DATA=m -# CONFIG_DM_UNSTRIPED is not set CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m CONFIG_DM_CACHE=m -CONFIG_DM_CACHE_SMQ=m CONFIG_DM_WRITECACHE=m -# CONFIG_DM_EBS is not set CONFIG_DM_ERA=m -# CONFIG_DM_CLONE is not set CONFIG_DM_MIRROR=m CONFIG_DM_LOG_USERSPACE=m CONFIG_DM_RAID=m @@ -2408,57 +752,33 @@ CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m CONFIG_DM_MULTIPATH_QL=m CONFIG_DM_MULTIPATH_ST=m -# CONFIG_DM_MULTIPATH_HST is not set -# CONFIG_DM_MULTIPATH_IOA is not set CONFIG_DM_DELAY=m -# CONFIG_DM_DUST is not set CONFIG_DM_UEVENT=y CONFIG_DM_FLAKEY=m CONFIG_DM_VERITY=m -# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set -# CONFIG_DM_VERITY_FEC is not set CONFIG_DM_SWITCH=m CONFIG_DM_LOG_WRITES=m CONFIG_DM_INTEGRITY=m -# CONFIG_DM_ZONED is not set -CONFIG_DM_AUDIT=y CONFIG_TARGET_CORE=m CONFIG_TCM_IBLOCK=m CONFIG_TCM_FILEIO=m CONFIG_TCM_PSCSI=m CONFIG_TCM_USER2=m CONFIG_LOOPBACK_TARGET=m -# CONFIG_TCM_FC is not set CONFIG_ISCSI_TARGET=m CONFIG_ISCSI_TARGET_CXGB4=m -# CONFIG_SBP_TARGET is not set -# CONFIG_REMOTE_TARGET is not set CONFIG_FUSION=y CONFIG_FUSION_SPI=m -# CONFIG_FUSION_FC is not set CONFIG_FUSION_SAS=m -CONFIG_FUSION_MAX_SGE=128 CONFIG_FUSION_CTL=m CONFIG_FUSION_LOGGING=y - -# -# IEEE 1394 (FireWire) support -# CONFIG_FIREWIRE=m CONFIG_FIREWIRE_OHCI=m CONFIG_FIREWIRE_SBP2=m CONFIG_FIREWIRE_NET=m -# CONFIG_FIREWIRE_NOSY is not set -# end of IEEE 1394 (FireWire) support - -CONFIG_NETDEVICES=y -CONFIG_MII=y -CONFIG_NET_CORE=y CONFIG_BONDING=m CONFIG_DUMMY=m CONFIG_WIREGUARD=m -# CONFIG_WIREGUARD_DEBUG is not set -# CONFIG_EQUALIZER is not set CONFIG_NET_FC=y CONFIG_IFB=m CONFIG_NET_TEAM=m @@ -2469,210 +789,89 @@ CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m -CONFIG_IPVLAN_L3S=y CONFIG_IPVLAN=m CONFIG_IPVTAP=m CONFIG_VXLAN=m CONFIG_GENEVE=m -# CONFIG_BAREUDP is not set -# CONFIG_GTP is not set -# CONFIG_AMT is not set CONFIG_MACSEC=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y -# CONFIG_NETCONSOLE_EXTENDED_LOG is not set -CONFIG_NETPOLL=y -CONFIG_NET_POLL_CONTROLLER=y CONFIG_NTB_NETDEV=m CONFIG_RIONET=m -CONFIG_RIONET_TX_SIZE=128 -CONFIG_RIONET_RX_SIZE=128 CONFIG_TUN=m -CONFIG_TAP=m -# CONFIG_TUN_VNET_CROSS_LE is not set CONFIG_VETH=m CONFIG_VIRTIO_NET=m CONFIG_NLMON=m CONFIG_NET_VRF=m CONFIG_VSOCKMON=m -# CONFIG_ARCNET is not set # CONFIG_ATM_DRIVERS is not set - -# -# Distributed Switch Architecture drivers -# -# CONFIG_B53 is not set -# CONFIG_NET_DSA_BCM_SF2 is not set -# CONFIG_NET_DSA_LOOP is not set -# CONFIG_NET_DSA_HIRSCHMANN_HELLCREEK is not set -# CONFIG_NET_DSA_LANTIQ_GSWIP is not set -# CONFIG_NET_DSA_MT7530 is not set -# CONFIG_NET_DSA_MV88E6060 is not set -# CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON is not set -# CONFIG_NET_DSA_MV88E6XXX is not set -# CONFIG_NET_DSA_AR9331 is not set -# CONFIG_NET_DSA_QCA8K is not set -# CONFIG_NET_DSA_SJA1105 is not set -# CONFIG_NET_DSA_XRS700X_I2C is not set -# CONFIG_NET_DSA_XRS700X_MDIO is not set -# CONFIG_NET_DSA_REALTEK is not set -# CONFIG_NET_DSA_SMSC_LAN9303_I2C is not set -# CONFIG_NET_DSA_SMSC_LAN9303_MDIO is not set -# CONFIG_NET_DSA_VITESSE_VSC73XX_SPI is not set -# CONFIG_NET_DSA_VITESSE_VSC73XX_PLATFORM is not set -# end of Distributed Switch Architecture drivers - -CONFIG_ETHERNET=y -CONFIG_MDIO=m # CONFIG_NET_VENDOR_3COM is not set # CONFIG_NET_VENDOR_ADAPTEC is not set # CONFIG_NET_VENDOR_AGERE is not set # CONFIG_NET_VENDOR_ALACRITECH is not set # CONFIG_NET_VENDOR_ALTEON is not set -# CONFIG_ALTERA_TSE is not set # CONFIG_NET_VENDOR_AMAZON is not set # CONFIG_NET_VENDOR_AMD is not set # CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set -CONFIG_NET_VENDOR_ASIX=y -# CONFIG_SPI_AX88796C is not set # CONFIG_NET_VENDOR_ATHEROS is not set -CONFIG_NET_VENDOR_BROADCOM=y -# CONFIG_B44 is not set -# CONFIG_BCMGENET is not set CONFIG_BNX2=y -CONFIG_CNIC=m CONFIG_TIGON3=m -CONFIG_TIGON3_HWMON=y CONFIG_BNX2X=m -CONFIG_BNX2X_SRIOV=y -# CONFIG_SYSTEMPORT is not set CONFIG_BNXT=m -CONFIG_BNXT_SRIOV=y -CONFIG_BNXT_FLOWER_OFFLOAD=y CONFIG_BNXT_DCB=y -CONFIG_BNXT_HWMON=y -CONFIG_NET_VENDOR_CADENCE=y -# CONFIG_MACB is not set # CONFIG_NET_VENDOR_CAVIUM is not set -CONFIG_NET_VENDOR_CHELSIO=y CONFIG_CHELSIO_T1=m CONFIG_CHELSIO_T1_1G=y CONFIG_CHELSIO_T3=m -CONFIG_CHELSIO_T4=m -# CONFIG_CHELSIO_T4_DCB is not set CONFIG_CHELSIO_T4VF=m -CONFIG_CHELSIO_LIB=m -CONFIG_CHELSIO_INLINE_CRYPTO=y -# CONFIG_CRYPTO_DEV_CHELSIO_TLS is not set CONFIG_CHELSIO_IPSEC_INLINE=m -# CONFIG_CHELSIO_TLS_DEVICE is not set # CONFIG_NET_VENDOR_CISCO is not set # CONFIG_NET_VENDOR_CORTINA is not set -CONFIG_NET_VENDOR_DAVICOM=y -# CONFIG_DM9051 is not set CONFIG_DNET=m # CONFIG_NET_VENDOR_DEC is not set # CONFIG_NET_VENDOR_DLINK is not set # CONFIG_NET_VENDOR_EMULEX is not set -CONFIG_NET_VENDOR_ENGLEDER=y -# CONFIG_TSNEP is not set # CONFIG_NET_VENDOR_EZCHIP is not set -CONFIG_NET_VENDOR_FUNGIBLE=y -# CONFIG_FUN_ETH is not set -CONFIG_NET_VENDOR_GOOGLE=y -CONFIG_NET_VENDOR_HUAWEI=y # CONFIG_NET_VENDOR_I825XX is not set -CONFIG_NET_VENDOR_INTEL=y -# CONFIG_E100 is not set CONFIG_E1000=m CONFIG_E1000E=m CONFIG_IGB=m -CONFIG_IGB_HWMON=y CONFIG_IGBVF=m CONFIG_IXGBE=m -CONFIG_IXGBE_HWMON=y CONFIG_IXGBE_DCB=y -CONFIG_IXGBE_IPSEC=y CONFIG_IXGBEVF=m -CONFIG_IXGBEVF_IPSEC=y CONFIG_I40E=m CONFIG_I40E_DCB=y -CONFIG_IAVF=m CONFIG_I40EVF=m CONFIG_ICE=m -CONFIG_ICE_SWITCHDEV=y CONFIG_FM10K=m -# CONFIG_IGC is not set -# CONFIG_JME is not set -CONFIG_NET_VENDOR_ADI=y -# CONFIG_ADIN1110 is not set -CONFIG_NET_VENDOR_LITEX=y -# CONFIG_LITEX_LITEETH is not set # CONFIG_NET_VENDOR_MARVELL is not set -CONFIG_NET_VENDOR_MELLANOX=y CONFIG_MLX4_EN=m -CONFIG_MLX4_EN_DCB=y -CONFIG_MLX4_CORE=m -CONFIG_MLX4_DEBUG=y # CONFIG_MLX4_CORE_GEN2 is not set CONFIG_MLX5_CORE=m CONFIG_MLX5_FPGA=y CONFIG_MLX5_CORE_EN=y -CONFIG_MLX5_EN_ARFS=y -CONFIG_MLX5_EN_RXNFC=y -CONFIG_MLX5_MPFS=y -CONFIG_MLX5_ESWITCH=y -CONFIG_MLX5_BRIDGE=y -CONFIG_MLX5_CLS_ACT=y -CONFIG_MLX5_TC_CT=y -CONFIG_MLX5_TC_SAMPLE=y -CONFIG_MLX5_CORE_EN_DCB=y CONFIG_MLX5_CORE_IPOIB=y -# CONFIG_MLX5_MACSEC is not set -# CONFIG_MLX5_EN_IPSEC is not set -# CONFIG_MLX5_EN_TLS is not set -CONFIG_MLX5_SW_STEERING=y -# CONFIG_MLX5_SF is not set CONFIG_MLXSW_CORE=m -CONFIG_MLXSW_CORE_HWMON=y -CONFIG_MLXSW_CORE_THERMAL=y -CONFIG_MLXSW_PCI=m -CONFIG_MLXSW_I2C=m -CONFIG_MLXSW_SPECTRUM=m -CONFIG_MLXSW_SPECTRUM_DCB=y -CONFIG_MLXSW_MINIMAL=m -CONFIG_MLXFW=m # CONFIG_NET_VENDOR_MICREL is not set # CONFIG_NET_VENDOR_MICROCHIP is not set # CONFIG_NET_VENDOR_MICROSEMI is not set -CONFIG_NET_VENDOR_MICROSOFT=y # CONFIG_NET_VENDOR_MYRI is not set -# CONFIG_FEALNX is not set # CONFIG_NET_VENDOR_NI is not set # CONFIG_NET_VENDOR_NATSEMI is not set -CONFIG_NET_VENDOR_NETERION=y -# CONFIG_S2IO is not set # CONFIG_NET_VENDOR_NETRONOME is not set # CONFIG_NET_VENDOR_NVIDIA is not set # CONFIG_NET_VENDOR_OKI is not set CONFIG_ETHOC=m -CONFIG_NET_VENDOR_PACKET_ENGINES=y -# CONFIG_HAMACHI is not set -# CONFIG_YELLOWFIN is not set -CONFIG_NET_VENDOR_PENSANDO=y -# CONFIG_IONIC is not set # CONFIG_NET_VENDOR_QLOGIC is not set # CONFIG_NET_VENDOR_BROCADE is not set # CONFIG_NET_VENDOR_QUALCOMM is not set # CONFIG_NET_VENDOR_RDC is not set -CONFIG_NET_VENDOR_REALTEK=y CONFIG_8139CP=m CONFIG_8139TOO=m # CONFIG_8139TOO_PIO is not set -# CONFIG_8139TOO_TUNE_TWISTER is not set CONFIG_8139TOO_8129=y -# CONFIG_8139_OLD_RX_RESET is not set CONFIG_R8169=m # CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set @@ -2683,54 +882,24 @@ CONFIG_R8169=m # CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_SOCIONEXT is not set -CONFIG_NET_VENDOR_STMICRO=y CONFIG_STMMAC_ETH=y -# CONFIG_STMMAC_SELFTESTS is not set -CONFIG_STMMAC_PLATFORM=y -# CONFIG_DWMAC_DWC_QOS_ETH is not set -CONFIG_DWMAC_GENERIC=y -# CONFIG_DWMAC_INTEL_PLAT is not set CONFIG_DWMAC_LOONGSON=m -# CONFIG_STMMAC_PCI is not set # CONFIG_NET_VENDOR_SUN is not set # CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_TEHUTI is not set # CONFIG_NET_VENDOR_TI is not set -CONFIG_NET_VENDOR_VERTEXCOM=y -# CONFIG_MSE102X is not set # CONFIG_NET_VENDOR_VIA is not set -CONFIG_NET_VENDOR_WANGXUN=y -CONFIG_LIBWX=m CONFIG_NGBE=m CONFIG_TXGBE=m # CONFIG_NET_VENDOR_WIZNET is not set # CONFIG_NET_VENDOR_XILINX is not set -# CONFIG_FDDI is not set -# CONFIG_HIPPI is not set -# CONFIG_NET_SB1000 is not set -CONFIG_PHYLINK=y -CONFIG_PHYLIB=y -CONFIG_SWPHY=y CONFIG_LED_TRIGGER_PHY=y -CONFIG_PHYLIB_LEDS=y -CONFIG_FIXED_PHY=y CONFIG_SFP=y - -# -# MII PHY device drivers -# CONFIG_AMD_PHY=m -# CONFIG_ADIN_PHY is not set -# CONFIG_ADIN1100_PHY is not set CONFIG_AQUANTIA_PHY=m -# CONFIG_AX88796B_PHY is not set CONFIG_BROADCOM_PHY=m -# CONFIG_BCM54140_PHY is not set CONFIG_BCM7XXX_PHY=m -# CONFIG_BCM84881_PHY is not set CONFIG_BCM87XX_PHY=m -CONFIG_BCM_NET_PHYLIB=m -CONFIG_BCM_NET_PHYPTP=m CONFIG_CICADA_PHY=m CONFIG_CORTINA_PHY=m CONFIG_DAVICOM_PHY=m @@ -2740,127 +909,43 @@ CONFIG_INTEL_XWAY_PHY=m CONFIG_LSI_ET1011C_PHY=m CONFIG_MARVELL_PHY=m CONFIG_MARVELL_10G_PHY=y -# CONFIG_MARVELL_88Q2XXX_PHY is not set -# CONFIG_MARVELL_88X2222_PHY is not set -# CONFIG_MAXLINEAR_GPHY is not set -# CONFIG_MEDIATEK_GE_PHY is not set CONFIG_MICREL_PHY=m -# CONFIG_MICROCHIP_T1S_PHY is not set -CONFIG_MICROCHIP_PHY=m CONFIG_MICROCHIP_T1_PHY=m CONFIG_MICROSEMI_PHY=m -# CONFIG_MOTORCOMM_PHY is not set CONFIG_NATIONAL_PHY=m -# CONFIG_NXP_CBTX_PHY is not set -# CONFIG_NXP_C45_TJA11XX_PHY is not set -# CONFIG_NXP_TJA11XX_PHY is not set -# CONFIG_NCN26000_PHY is not set CONFIG_QSEMI_PHY=m -CONFIG_REALTEK_PHY=m CONFIG_RENESAS_PHY=m CONFIG_ROCKCHIP_PHY=m -CONFIG_SMSC_PHY=m CONFIG_STE10XP=m CONFIG_TERANETICS_PHY=m CONFIG_DP83822_PHY=m CONFIG_DP83TC811_PHY=m CONFIG_DP83848_PHY=m CONFIG_DP83867_PHY=m -# CONFIG_DP83869_PHY is not set -# CONFIG_DP83TD510_PHY is not set CONFIG_VITESSE_PHY=m CONFIG_XILINX_GMII2RGMII=m CONFIG_MICREL_KS8995MA=m -# CONFIG_PSE_CONTROLLER is not set -CONFIG_CAN_DEV=m CONFIG_CAN_VCAN=m -# CONFIG_CAN_VXCAN is not set -CONFIG_CAN_NETLINK=y -CONFIG_CAN_CALC_BITTIMING=y -# CONFIG_CAN_CAN327 is not set -# CONFIG_CAN_FLEXCAN is not set -# CONFIG_CAN_GRCAN is not set -# CONFIG_CAN_KVASER_PCIEFD is not set CONFIG_CAN_SLCAN=m CONFIG_CAN_C_CAN=m CONFIG_CAN_C_CAN_PLATFORM=m CONFIG_CAN_C_CAN_PCI=m CONFIG_CAN_CC770=m -# CONFIG_CAN_CC770_ISA is not set CONFIG_CAN_CC770_PLATFORM=m -# CONFIG_CAN_CTUCANFD_PCI is not set -# CONFIG_CAN_CTUCANFD_PLATFORM is not set -# CONFIG_CAN_IFI_CANFD is not set -# CONFIG_CAN_M_CAN is not set -# CONFIG_CAN_PEAK_PCIEFD is not set CONFIG_CAN_SJA1000=m CONFIG_CAN_EMS_PCI=m -# CONFIG_CAN_F81601 is not set CONFIG_CAN_KVASER_PCI=m CONFIG_CAN_PEAK_PCI=m -CONFIG_CAN_PEAK_PCIEC=y CONFIG_CAN_PLX_PCI=m -# CONFIG_CAN_SJA1000_ISA is not set CONFIG_CAN_SJA1000_PLATFORM=m CONFIG_CAN_SOFTING=m - -# -# CAN SPI interfaces -# -# CONFIG_CAN_HI311X is not set -# CONFIG_CAN_MCP251X is not set -# CONFIG_CAN_MCP251XFD is not set -# end of CAN SPI interfaces - -# -# CAN USB interfaces -# CONFIG_CAN_8DEV_USB=m CONFIG_CAN_EMS_USB=m -# CONFIG_CAN_ESD_USB is not set -# CONFIG_CAN_ETAS_ES58X is not set -# CONFIG_CAN_F81604 is not set -# CONFIG_CAN_GS_USB is not set CONFIG_CAN_KVASER_USB=m -# CONFIG_CAN_MCBA_USB is not set CONFIG_CAN_PEAK_USB=m -# CONFIG_CAN_UCAN is not set -# end of CAN USB interfaces - -# CONFIG_CAN_DEBUG_DEVICES is not set -CONFIG_MDIO_DEVICE=y -CONFIG_MDIO_BUS=y -CONFIG_FWNODE_MDIO=y -CONFIG_OF_MDIO=y -CONFIG_ACPI_MDIO=y -CONFIG_MDIO_DEVRES=y CONFIG_MDIO_BITBANG=m -# CONFIG_MDIO_BCM_UNIMAC is not set -CONFIG_MDIO_CAVIUM=m -# CONFIG_MDIO_GPIO is not set -# CONFIG_MDIO_HISI_FEMAC is not set -CONFIG_MDIO_I2C=y -# CONFIG_MDIO_MVUSB is not set CONFIG_MDIO_MSCC_MIIM=m -# CONFIG_MDIO_OCTEON is not set -# CONFIG_MDIO_IPQ4019 is not set -# CONFIG_MDIO_IPQ8064 is not set CONFIG_MDIO_THUNDER=m - -# -# MDIO Multiplexers -# -# CONFIG_MDIO_BUS_MUX_GPIO is not set -# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set -# CONFIG_MDIO_BUS_MUX_MMIOREG is not set - -# -# PCS device drivers -# -CONFIG_PCS_XPCS=y -# end of PCS device drivers - -# CONFIG_PLIP is not set CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m @@ -2869,47 +954,31 @@ CONFIG_PPP_MPPE=m CONFIG_PPP_MULTILINK=y CONFIG_PPPOATM=m CONFIG_PPPOE=m -# CONFIG_PPPOE_HASH_BITS_1 is not set -# CONFIG_PPPOE_HASH_BITS_2 is not set -CONFIG_PPPOE_HASH_BITS_4=y -# CONFIG_PPPOE_HASH_BITS_8 is not set -CONFIG_PPPOE_HASH_BITS=4 CONFIG_PPTP=m CONFIG_PPPOL2TP=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m CONFIG_SLIP=m -CONFIG_SLHC=m CONFIG_SLIP_COMPRESSED=y CONFIG_SLIP_SMART=y -# CONFIG_SLIP_MODE_SLIP6 is not set -CONFIG_USB_NET_DRIVERS=y CONFIG_USB_CATC=m CONFIG_USB_KAWETH=m CONFIG_USB_PEGASUS=m CONFIG_USB_RTL8150=m CONFIG_USB_RTL8152=m CONFIG_USB_LAN78XX=m -CONFIG_USB_USBNET=m # CONFIG_USB_NET_AX8817X is not set # CONFIG_USB_NET_AX88179_178A is not set -CONFIG_USB_NET_CDCETHER=m CONFIG_USB_NET_CDC_EEM=m -CONFIG_USB_NET_CDC_NCM=m CONFIG_USB_NET_HUAWEI_CDC_NCM=m CONFIG_USB_NET_CDC_MBIM=m CONFIG_USB_NET_DM9601=m -# CONFIG_USB_NET_SR9700 is not set -# CONFIG_USB_NET_SR9800 is not set CONFIG_USB_NET_SMSC75XX=m CONFIG_USB_NET_SMSC95XX=m CONFIG_USB_NET_GL620A=m # CONFIG_USB_NET_NET1080 is not set CONFIG_USB_NET_PLUSB=m CONFIG_USB_NET_MCS7830=m -CONFIG_USB_NET_RNDIS_HOST=m -CONFIG_USB_NET_CDC_SUBSET_ENABLE=m -CONFIG_USB_NET_CDC_SUBSET=m CONFIG_USB_ALI_M5632=y CONFIG_USB_AN2720=y # CONFIG_USB_BELKIN is not set @@ -2922,155 +991,41 @@ CONFIG_USB_NET_KALMIA=m CONFIG_USB_NET_QMI_WWAN=m CONFIG_USB_HSO=m CONFIG_USB_NET_INT51X1=m -# CONFIG_USB_CDC_PHONET is not set CONFIG_USB_IPHETH=m CONFIG_USB_SIERRA_NET=m CONFIG_USB_VL600=m CONFIG_USB_NET_CH9200=m -# CONFIG_USB_NET_AQC111 is not set -CONFIG_USB_RTL8153_ECM=m -CONFIG_WLAN=y # CONFIG_WLAN_VENDOR_ADMTEK is not set -CONFIG_ATH_COMMON=m -CONFIG_WLAN_VENDOR_ATH=y -# CONFIG_ATH_DEBUG is not set -# CONFIG_ATH5K is not set -# CONFIG_ATH5K_PCI is not set -CONFIG_ATH9K_HW=m -CONFIG_ATH9K_COMMON=m -CONFIG_ATH9K_BTCOEX_SUPPORT=y CONFIG_ATH9K=m -CONFIG_ATH9K_PCI=y CONFIG_ATH9K_AHB=y -# CONFIG_ATH9K_DEBUGFS is not set -# CONFIG_ATH9K_DYNACK is not set CONFIG_ATH9K_WOW=y -CONFIG_ATH9K_RFKILL=y -# CONFIG_ATH9K_CHANNEL_CONTEXT is not set -CONFIG_ATH9K_PCOEM=y -# CONFIG_ATH9K_PCI_NO_EEPROM is not set CONFIG_ATH9K_HTC=m -# CONFIG_ATH9K_HTC_DEBUGFS is not set -# CONFIG_ATH9K_HWRNG is not set -# CONFIG_CARL9170 is not set -# CONFIG_ATH6KL is not set -# CONFIG_AR5523 is not set -# CONFIG_WIL6210 is not set CONFIG_ATH10K=m -CONFIG_ATH10K_CE=y CONFIG_ATH10K_PCI=m -# CONFIG_ATH10K_AHB is not set -# CONFIG_ATH10K_SDIO is not set -# CONFIG_ATH10K_USB is not set -# CONFIG_ATH10K_DEBUG is not set -# CONFIG_ATH10K_DEBUGFS is not set -# CONFIG_ATH10K_TRACING is not set -# CONFIG_WCN36XX is not set -# CONFIG_ATH11K is not set -# CONFIG_ATH12K is not set # CONFIG_WLAN_VENDOR_ATMEL is not set -CONFIG_WLAN_VENDOR_BROADCOM=y -# CONFIG_B43 is not set -# CONFIG_B43LEGACY is not set -CONFIG_BRCMUTIL=m CONFIG_BRCMSMAC=m -CONFIG_BRCMSMAC_LEDS=y CONFIG_BRCMFMAC=m -CONFIG_BRCMFMAC_PROTO_BCDC=y -CONFIG_BRCMFMAC_PROTO_MSGBUF=y -CONFIG_BRCMFMAC_SDIO=y CONFIG_BRCMFMAC_USB=y CONFIG_BRCMFMAC_PCIE=y -# CONFIG_BRCM_TRACING is not set -# CONFIG_BRCMDBG is not set # CONFIG_WLAN_VENDOR_CISCO is not set -CONFIG_WLAN_VENDOR_INTEL=y -# CONFIG_IPW2100 is not set -# CONFIG_IPW2200 is not set -# CONFIG_IWL4965 is not set -# CONFIG_IWL3945 is not set CONFIG_IWLWIFI=m -CONFIG_IWLWIFI_LEDS=y CONFIG_IWLDVM=m CONFIG_IWLMVM=m -CONFIG_IWLWIFI_OPMODE_MODULAR=y - -# -# Debugging Options -# -# CONFIG_IWLWIFI_DEBUG is not set -CONFIG_IWLWIFI_DEVICE_TRACING=y -# end of Debugging Options - # CONFIG_WLAN_VENDOR_INTERSIL is not set -CONFIG_WLAN_VENDOR_MARVELL=y -# CONFIG_LIBERTAS is not set -# CONFIG_LIBERTAS_THINFIRM is not set CONFIG_MWIFIEX=m CONFIG_MWIFIEX_SDIO=m CONFIG_MWIFIEX_PCIE=m CONFIG_MWIFIEX_USB=m -# CONFIG_MWL8K is not set -CONFIG_WLAN_VENDOR_MEDIATEK=y CONFIG_MT7601U=m -CONFIG_MT76_CORE=m -CONFIG_MT76_LEDS=y -CONFIG_MT76_USB=m -CONFIG_MT76x02_LIB=m -CONFIG_MT76x02_USB=m -CONFIG_MT76x0_COMMON=m CONFIG_MT76x0U=m -# CONFIG_MT76x0E is not set -CONFIG_MT76x2_COMMON=m -# CONFIG_MT76x2E is not set CONFIG_MT76x2U=m -# CONFIG_MT7603E is not set -# CONFIG_MT7615E is not set -# CONFIG_MT7663U is not set -# CONFIG_MT7663S is not set -# CONFIG_MT7915E is not set -# CONFIG_MT7921E is not set -# CONFIG_MT7921S is not set -# CONFIG_MT7921U is not set -# CONFIG_MT7996E is not set -CONFIG_WLAN_VENDOR_MICROCHIP=y -# CONFIG_WILC1000_SDIO is not set -# CONFIG_WILC1000_SPI is not set -CONFIG_WLAN_VENDOR_PURELIFI=y -# CONFIG_PLFXLC is not set -CONFIG_WLAN_VENDOR_RALINK=y CONFIG_RT2X00=m -# CONFIG_RT2400PCI is not set -# CONFIG_RT2500PCI is not set -# CONFIG_RT61PCI is not set CONFIG_RT2800PCI=m -CONFIG_RT2800PCI_RT33XX=y -CONFIG_RT2800PCI_RT35XX=y -CONFIG_RT2800PCI_RT53XX=y -CONFIG_RT2800PCI_RT3290=y -# CONFIG_RT2500USB is not set -# CONFIG_RT73USB is not set CONFIG_RT2800USB=m -CONFIG_RT2800USB_RT33XX=y -CONFIG_RT2800USB_RT35XX=y CONFIG_RT2800USB_RT3573=y CONFIG_RT2800USB_RT53XX=y CONFIG_RT2800USB_RT55XX=y CONFIG_RT2800USB_UNKNOWN=y -CONFIG_RT2800_LIB=m -CONFIG_RT2800_LIB_MMIO=m -CONFIG_RT2X00_LIB_MMIO=m -CONFIG_RT2X00_LIB_PCI=m -CONFIG_RT2X00_LIB_USB=m -CONFIG_RT2X00_LIB=m -CONFIG_RT2X00_LIB_FIRMWARE=y -CONFIG_RT2X00_LIB_CRYPTO=y -CONFIG_RT2X00_LIB_LEDS=y -# CONFIG_RT2X00_DEBUG is not set -CONFIG_WLAN_VENDOR_REALTEK=y -# CONFIG_RTL8180 is not set -# CONFIG_RTL8187 is not set -CONFIG_RTL_CARDS=m CONFIG_RTL8192CE=m CONFIG_RTL8192SE=m CONFIG_RTL8192DE=m @@ -3080,77 +1035,29 @@ CONFIG_RTL8188EE=m CONFIG_RTL8192EE=m CONFIG_RTL8821AE=m CONFIG_RTL8192CU=m -CONFIG_RTLWIFI=m -CONFIG_RTLWIFI_PCI=m -CONFIG_RTLWIFI_USB=m # CONFIG_RTLWIFI_DEBUG is not set -CONFIG_RTL8192C_COMMON=m -CONFIG_RTL8723_COMMON=m -CONFIG_RTLBTCOEXIST=m CONFIG_RTL8XXXU=m -# CONFIG_RTL8XXXU_UNTESTED is not set -# CONFIG_RTW88 is not set -# CONFIG_RTW89 is not set # CONFIG_WLAN_VENDOR_RSI is not set -CONFIG_WLAN_VENDOR_SILABS=y -# CONFIG_WFX is not set # CONFIG_WLAN_VENDOR_ST is not set # CONFIG_WLAN_VENDOR_TI is not set -CONFIG_WLAN_VENDOR_ZYDAS=y -# CONFIG_USB_ZD1201 is not set CONFIG_ZD1211RW=m -# CONFIG_ZD1211RW_DEBUG is not set -CONFIG_WLAN_VENDOR_QUANTENNA=y -# CONFIG_QTNFMAC_PCIE is not set CONFIG_USB_NET_RNDIS_WLAN=m CONFIG_MAC80211_HWSIM=m -# CONFIG_VIRT_WIFI is not set CONFIG_WAN=y CONFIG_HDLC=m CONFIG_HDLC_RAW=m -# CONFIG_HDLC_RAW_ETH is not set CONFIG_HDLC_CISCO=m CONFIG_HDLC_FR=m CONFIG_HDLC_PPP=m -# CONFIG_HDLC_X25 is not set -# CONFIG_PCI200SYN is not set -# CONFIG_WANXL is not set -# CONFIG_PC300TOO is not set -# CONFIG_FARSYNC is not set -# CONFIG_LAPBETHER is not set -CONFIG_IEEE802154_DRIVERS=m CONFIG_IEEE802154_FAKELB=m -# CONFIG_IEEE802154_AT86RF230 is not set -# CONFIG_IEEE802154_MRF24J40 is not set -# CONFIG_IEEE802154_CC2520 is not set -# CONFIG_IEEE802154_ATUSB is not set -# CONFIG_IEEE802154_ADF7242 is not set -# CONFIG_IEEE802154_CA8210 is not set -# CONFIG_IEEE802154_MCR20A is not set -# CONFIG_IEEE802154_HWSIM is not set - -# -# Wireless WAN -# -# CONFIG_WWAN is not set -# end of Wireless WAN - CONFIG_VMXNET3=m CONFIG_FUJITSU_ES=m CONFIG_USB4_NET=m CONFIG_NETDEVSIM=m -CONFIG_NET_FAILOVER=m CONFIG_ISDN=y -CONFIG_ISDN_CAPI=y -CONFIG_CAPI_TRACE=y -CONFIG_ISDN_CAPI_MIDDLEWARE=y CONFIG_MISDN=m CONFIG_MISDN_DSP=m CONFIG_MISDN_L1OIP=m - -# -# mISDN hardware drivers -# CONFIG_MISDN_HFCPCI=m CONFIG_MISDN_HFCMULTI=m CONFIG_MISDN_HFCUSB=m @@ -3159,777 +1066,161 @@ CONFIG_MISDN_SPEEDFAX=m CONFIG_MISDN_INFINEON=m CONFIG_MISDN_W6692=m CONFIG_MISDN_NETJET=m -CONFIG_MISDN_HDLC=m -CONFIG_MISDN_IPAC=m -CONFIG_MISDN_ISAR=m - -# -# Input device support -# -CONFIG_INPUT=y -CONFIG_INPUT_LEDS=y -CONFIG_INPUT_FF_MEMLESS=m -CONFIG_INPUT_SPARSEKMAP=y -# CONFIG_INPUT_MATRIXKMAP is not set -CONFIG_INPUT_VIVALDIFMAP=y - -# -# Userland interfaces -# CONFIG_INPUT_MOUSEDEV=y CONFIG_INPUT_MOUSEDEV_PSAUX=y -CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 -CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 CONFIG_INPUT_JOYDEV=m CONFIG_INPUT_EVDEV=y -# CONFIG_INPUT_EVBUG is not set - -# -# Input Device Drivers -# -CONFIG_INPUT_KEYBOARD=y -# CONFIG_KEYBOARD_ADC is not set -# CONFIG_KEYBOARD_ADP5588 is not set -# CONFIG_KEYBOARD_ADP5589 is not set -CONFIG_KEYBOARD_ATKBD=y -# CONFIG_KEYBOARD_QT1050 is not set -# CONFIG_KEYBOARD_QT1070 is not set -# CONFIG_KEYBOARD_QT2160 is not set -# CONFIG_KEYBOARD_DLINK_DIR685 is not set -# CONFIG_KEYBOARD_LKKBD is not set -# CONFIG_KEYBOARD_GPIO is not set -# CONFIG_KEYBOARD_GPIO_POLLED is not set -# CONFIG_KEYBOARD_TCA6416 is not set -# CONFIG_KEYBOARD_TCA8418 is not set -# CONFIG_KEYBOARD_MATRIX is not set -# CONFIG_KEYBOARD_LM8323 is not set -# CONFIG_KEYBOARD_LM8333 is not set -# CONFIG_KEYBOARD_MAX7359 is not set -# CONFIG_KEYBOARD_MCS is not set -# CONFIG_KEYBOARD_MPR121 is not set -# CONFIG_KEYBOARD_NEWTON is not set -# CONFIG_KEYBOARD_OPENCORES is not set -# CONFIG_KEYBOARD_SAMSUNG is not set -# CONFIG_KEYBOARD_STOWAWAY is not set -# CONFIG_KEYBOARD_SUNKBD is not set -# CONFIG_KEYBOARD_OMAP4 is not set -# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set CONFIG_KEYBOARD_XTKBD=m -# CONFIG_KEYBOARD_CAP11XX is not set -# CONFIG_KEYBOARD_BCM is not set -# CONFIG_KEYBOARD_CYPRESS_SF is not set -CONFIG_INPUT_MOUSE=y -CONFIG_MOUSE_PS2=y -CONFIG_MOUSE_PS2_ALPS=y -CONFIG_MOUSE_PS2_BYD=y -CONFIG_MOUSE_PS2_LOGIPS2PP=y -CONFIG_MOUSE_PS2_SYNAPTICS=y -CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y -CONFIG_MOUSE_PS2_CYPRESS=y -CONFIG_MOUSE_PS2_TRACKPOINT=y CONFIG_MOUSE_PS2_ELANTECH=y -CONFIG_MOUSE_PS2_ELANTECH_SMBUS=y CONFIG_MOUSE_PS2_SENTELIC=y -# CONFIG_MOUSE_PS2_TOUCHKIT is not set -CONFIG_MOUSE_PS2_FOCALTECH=y -CONFIG_MOUSE_PS2_SMBUS=y CONFIG_MOUSE_SERIAL=m CONFIG_MOUSE_APPLETOUCH=m CONFIG_MOUSE_BCM5974=m CONFIG_MOUSE_CYAPA=m CONFIG_MOUSE_ELAN_I2C=m -CONFIG_MOUSE_ELAN_I2C_I2C=y CONFIG_MOUSE_ELAN_I2C_SMBUS=y CONFIG_MOUSE_VSXXXAA=m -# CONFIG_MOUSE_GPIO is not set CONFIG_MOUSE_SYNAPTICS_I2C=m CONFIG_MOUSE_SYNAPTICS_USB=m -# CONFIG_INPUT_JOYSTICK is not set CONFIG_INPUT_TABLET=y CONFIG_TABLET_USB_ACECAD=m CONFIG_TABLET_USB_AIPTEK=m -# CONFIG_TABLET_USB_HANWANG is not set CONFIG_TABLET_USB_KBTAB=m -# CONFIG_TABLET_USB_PEGASUS is not set CONFIG_TABLET_SERIAL_WACOM4=m CONFIG_INPUT_TOUCHSCREEN=y -# CONFIG_TOUCHSCREEN_ADS7846 is not set -# CONFIG_TOUCHSCREEN_AD7877 is not set -# CONFIG_TOUCHSCREEN_AD7879 is not set -# CONFIG_TOUCHSCREEN_ADC is not set -# CONFIG_TOUCHSCREEN_AR1021_I2C is not set -# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set -# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set -# CONFIG_TOUCHSCREEN_BU21013 is not set -# CONFIG_TOUCHSCREEN_BU21029 is not set -# CONFIG_TOUCHSCREEN_CHIPONE_ICN8318 is not set -# CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 is not set -# CONFIG_TOUCHSCREEN_CY8CTMA140 is not set -# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set -# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set -# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set -# CONFIG_TOUCHSCREEN_CYTTSP5 is not set -# CONFIG_TOUCHSCREEN_DYNAPRO is not set -# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set -# CONFIG_TOUCHSCREEN_EETI is not set -# CONFIG_TOUCHSCREEN_EGALAX is not set -# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set -# CONFIG_TOUCHSCREEN_EXC3000 is not set -# CONFIG_TOUCHSCREEN_FUJITSU is not set -# CONFIG_TOUCHSCREEN_GOODIX is not set -# CONFIG_TOUCHSCREEN_HIDEEP is not set -# CONFIG_TOUCHSCREEN_HYCON_HY46XX is not set -# CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX is not set -# CONFIG_TOUCHSCREEN_ILI210X is not set -# CONFIG_TOUCHSCREEN_ILITEK is not set -# CONFIG_TOUCHSCREEN_S6SY761 is not set -# CONFIG_TOUCHSCREEN_GUNZE is not set -# CONFIG_TOUCHSCREEN_EKTF2127 is not set -# CONFIG_TOUCHSCREEN_ELAN is not set CONFIG_TOUCHSCREEN_ELO=m CONFIG_TOUCHSCREEN_WACOM_W8001=m CONFIG_TOUCHSCREEN_WACOM_I2C=m -# CONFIG_TOUCHSCREEN_MAX11801 is not set -# CONFIG_TOUCHSCREEN_MCS5000 is not set -# CONFIG_TOUCHSCREEN_MMS114 is not set -# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set -# CONFIG_TOUCHSCREEN_MSG2638 is not set -# CONFIG_TOUCHSCREEN_MTOUCH is not set -# CONFIG_TOUCHSCREEN_NOVATEK_NVT_TS is not set -# CONFIG_TOUCHSCREEN_IMAGIS is not set -# CONFIG_TOUCHSCREEN_IMX6UL_TSC is not set -# CONFIG_TOUCHSCREEN_INEXIO is not set -# CONFIG_TOUCHSCREEN_PENMOUNT is not set -# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set -# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set -# CONFIG_TOUCHSCREEN_TOUCHWIN is not set -# CONFIG_TOUCHSCREEN_PIXCIR is not set -# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set -# CONFIG_TOUCHSCREEN_WM97XX is not set -# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set -# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set -# CONFIG_TOUCHSCREEN_TSC_SERIO is not set -# CONFIG_TOUCHSCREEN_TSC2004 is not set -# CONFIG_TOUCHSCREEN_TSC2005 is not set -# CONFIG_TOUCHSCREEN_TSC2007 is not set -# CONFIG_TOUCHSCREEN_RM_TS is not set -# CONFIG_TOUCHSCREEN_SILEAD is not set -# CONFIG_TOUCHSCREEN_SIS_I2C is not set -# CONFIG_TOUCHSCREEN_ST1232 is not set -# CONFIG_TOUCHSCREEN_STMFTS is not set -# CONFIG_TOUCHSCREEN_SUR40 is not set -# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set -# CONFIG_TOUCHSCREEN_SX8654 is not set -# CONFIG_TOUCHSCREEN_TPS6507X is not set -# CONFIG_TOUCHSCREEN_ZET6223 is not set -# CONFIG_TOUCHSCREEN_ZFORCE is not set -# CONFIG_TOUCHSCREEN_COLIBRI_VF50 is not set -# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set -# CONFIG_TOUCHSCREEN_IQS5XX is not set -# CONFIG_TOUCHSCREEN_IQS7211 is not set -# CONFIG_TOUCHSCREEN_ZINITIX is not set -# CONFIG_TOUCHSCREEN_HIMAX_HX83112B is not set CONFIG_INPUT_MISC=y -# CONFIG_INPUT_AD714X is not set -# CONFIG_INPUT_ATMEL_CAPTOUCH is not set -# CONFIG_INPUT_BMA150 is not set -# CONFIG_INPUT_E3X0_BUTTON is not set -# CONFIG_INPUT_MMA8450 is not set -# CONFIG_INPUT_GPIO_BEEPER is not set -# CONFIG_INPUT_GPIO_DECODER is not set -# CONFIG_INPUT_GPIO_VIBRA is not set CONFIG_INPUT_ATI_REMOTE2=m CONFIG_INPUT_KEYSPAN_REMOTE=m -# CONFIG_INPUT_KXTJ9 is not set CONFIG_INPUT_POWERMATE=m CONFIG_INPUT_YEALINK=m CONFIG_INPUT_CM109=m CONFIG_INPUT_UINPUT=m -# CONFIG_INPUT_PCF8574 is not set -# CONFIG_INPUT_PWM_BEEPER is not set -# CONFIG_INPUT_PWM_VIBRA is not set CONFIG_INPUT_GPIO_ROTARY_ENCODER=m -# CONFIG_INPUT_DA7280_HAPTICS is not set -# CONFIG_INPUT_ADXL34X is not set -# CONFIG_INPUT_IMS_PCU is not set -# CONFIG_INPUT_IQS269A is not set -# CONFIG_INPUT_IQS626A is not set -# CONFIG_INPUT_IQS7222 is not set -# CONFIG_INPUT_CMA3000 is not set -# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set -# CONFIG_INPUT_DRV260X_HAPTICS is not set -# CONFIG_INPUT_DRV2665_HAPTICS is not set -# CONFIG_INPUT_DRV2667_HAPTICS is not set -CONFIG_RMI4_CORE=m CONFIG_RMI4_I2C=m CONFIG_RMI4_SPI=m CONFIG_RMI4_SMB=m -CONFIG_RMI4_F03=y -CONFIG_RMI4_F03_SERIO=m -CONFIG_RMI4_2D_SENSOR=y -CONFIG_RMI4_F11=y -CONFIG_RMI4_F12=y -CONFIG_RMI4_F30=y CONFIG_RMI4_F34=y -# CONFIG_RMI4_F3A is not set -# CONFIG_RMI4_F54 is not set CONFIG_RMI4_F55=y - -# -# Hardware I/O ports -# -CONFIG_SERIO=y -CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y -CONFIG_SERIO_I8042=y CONFIG_SERIO_SERPORT=m -# CONFIG_SERIO_PARKBD is not set -# CONFIG_SERIO_PCIPS2 is not set -CONFIG_SERIO_LIBPS2=y CONFIG_SERIO_RAW=m CONFIG_SERIO_ALTERA_PS2=m -# CONFIG_SERIO_PS2MULT is not set CONFIG_SERIO_ARC_PS2=m -# CONFIG_SERIO_APBPS2 is not set -# CONFIG_SERIO_GPIO_PS2 is not set -# CONFIG_USERIO is not set -# CONFIG_GAMEPORT is not set -# end of Hardware I/O ports -# end of Input device support - -# -# Character devices -# -CONFIG_TTY=y -CONFIG_VT=y -CONFIG_CONSOLE_TRANSLATIONS=y -CONFIG_VT_CONSOLE=y -CONFIG_VT_CONSOLE_SLEEP=y -CONFIG_HW_CONSOLE=y -CONFIG_VT_HW_CONSOLE_BINDING=y -CONFIG_UNIX98_PTYS=y -CONFIG_LEGACY_PTYS=y CONFIG_LEGACY_PTY_COUNT=16 -CONFIG_LEGACY_TIOCSTI=y -CONFIG_LDISC_AUTOLOAD=y - -# -# Serial drivers -# -CONFIG_SERIAL_EARLYCON=y CONFIG_SERIAL_8250=y # CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set -CONFIG_SERIAL_8250_PNP=y -CONFIG_SERIAL_8250_16550A_VARIANTS=y -# CONFIG_SERIAL_8250_FINTEK is not set CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_DMA=y -CONFIG_SERIAL_8250_PCILIB=y -CONFIG_SERIAL_8250_PCI=y -CONFIG_SERIAL_8250_EXAR=y CONFIG_SERIAL_8250_NR_UARTS=16 CONFIG_SERIAL_8250_RUNTIME_UARTS=16 CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_MANY_PORTS=y -# CONFIG_SERIAL_8250_PCI1XXXX is not set CONFIG_SERIAL_8250_SHARE_IRQ=y -# CONFIG_SERIAL_8250_DETECT_IRQ is not set CONFIG_SERIAL_8250_RSA=y -CONFIG_SERIAL_8250_DWLIB=y CONFIG_SERIAL_8250_DW=y -# CONFIG_SERIAL_8250_RT288X is not set -CONFIG_SERIAL_8250_PERICOM=y -# CONFIG_SERIAL_OF_PLATFORM is not set - -# -# Non-8250 serial port support -# -# CONFIG_SERIAL_MAX3100 is not set -# CONFIG_SERIAL_MAX310X is not set -# CONFIG_SERIAL_UARTLITE is not set -CONFIG_SERIAL_CORE=y -CONFIG_SERIAL_CORE_CONSOLE=y CONFIG_SERIAL_JSM=m -# CONFIG_SERIAL_SIFIVE is not set -# CONFIG_SERIAL_SCCNXP is not set -# CONFIG_SERIAL_SC16IS7XX is not set -# CONFIG_SERIAL_ALTERA_JTAGUART is not set -# CONFIG_SERIAL_ALTERA_UART is not set -# CONFIG_SERIAL_XILINX_PS_UART is not set CONFIG_SERIAL_ARC=m -CONFIG_SERIAL_ARC_NR_PORTS=1 -# CONFIG_SERIAL_RP2 is not set -# CONFIG_SERIAL_FSL_LPUART is not set -# CONFIG_SERIAL_FSL_LINFLEXUART is not set -# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set -# CONFIG_SERIAL_SPRD is not set -# end of Serial drivers - -CONFIG_SERIAL_MCTRL_GPIO=y CONFIG_SERIAL_NONSTANDARD=y -# CONFIG_MOXA_INTELLIO is not set -# CONFIG_MOXA_SMARTIO is not set CONFIG_N_HDLC=m CONFIG_N_GSM=m CONFIG_NOZOMI=m -# CONFIG_NULL_TTY is not set -CONFIG_HVC_DRIVER=y -# CONFIG_SERIAL_DEV_BUS is not set -# CONFIG_TTY_PRINTK is not set CONFIG_PRINTER=m -# CONFIG_LP_CONSOLE is not set CONFIG_PPDEV=m CONFIG_VIRTIO_CONSOLE=y CONFIG_IPMI_HANDLER=m -CONFIG_IPMI_DMI_DECODE=y -CONFIG_IPMI_PLAT_DATA=y CONFIG_IPMI_PANIC_EVENT=y CONFIG_IPMI_PANIC_STRING=y CONFIG_IPMI_DEVICE_INTERFACE=m -CONFIG_IPMI_SI=m CONFIG_IPMI_SSIF=m CONFIG_IPMI_WATCHDOG=m CONFIG_IPMI_POWEROFF=m CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_TIMERIOMEM=m -# CONFIG_HW_RANDOM_BA431 is not set CONFIG_HW_RANDOM_VIRTIO=m -# CONFIG_HW_RANDOM_CCTRNG is not set -# CONFIG_HW_RANDOM_XIPHERA is not set -# CONFIG_APPLICOM is not set -CONFIG_DEVMEM=y -CONFIG_DEVPORT=y -CONFIG_TCG_TPM=y -CONFIG_HW_RANDOM_TPM=y -CONFIG_TCG_TIS_CORE=m -# CONFIG_TCG_TIS is not set CONFIG_TCG_TIS_SPI=m -# CONFIG_TCG_TIS_SPI_CR50 is not set -# CONFIG_TCG_TIS_I2C is not set -# CONFIG_TCG_TIS_I2C_CR50 is not set CONFIG_TCG_TIS_I2C_ATMEL=m CONFIG_TCG_TIS_I2C_INFINEON=m CONFIG_TCG_TIS_I2C_NUVOTON=m CONFIG_TCG_ATMEL=m CONFIG_TCG_INFINEON=m -CONFIG_TCG_CRB=y -# CONFIG_TCG_VTPM_PROXY is not set -CONFIG_TCG_TIS_ST33ZP24=m CONFIG_TCG_TIS_ST33ZP24_I2C=m CONFIG_TCG_TIS_ST33ZP24_SPI=m -# CONFIG_XILLYBUS is not set -# CONFIG_XILLYUSB is not set -# end of Character devices - -# -# I2C support -# -CONFIG_I2C=y -CONFIG_ACPI_I2C_OPREGION=y -CONFIG_I2C_BOARDINFO=y -CONFIG_I2C_COMPAT=y CONFIG_I2C_CHARDEV=y -# CONFIG_I2C_MUX is not set -CONFIG_I2C_HELPER_AUTO=y -CONFIG_I2C_SMBUS=m -CONFIG_I2C_ALGOBIT=y -CONFIG_I2C_ALGOPCA=m - -# -# I2C Hardware Bus support -# - -# -# PC SMBus host controller drivers -# -# CONFIG_I2C_ALI1535 is not set -# CONFIG_I2C_ALI1563 is not set -# CONFIG_I2C_ALI15X3 is not set CONFIG_I2C_AMD756=m CONFIG_I2C_AMD8111=m -# CONFIG_I2C_AMD_MP2 is not set -# CONFIG_I2C_I801 is not set CONFIG_I2C_ISCH=m CONFIG_I2C_PIIX4=y CONFIG_I2C_NFORCE2=m -# CONFIG_I2C_NVIDIA_GPU is not set -# CONFIG_I2C_SIS5595 is not set -# CONFIG_I2C_SIS630 is not set CONFIG_I2C_SIS96X=m CONFIG_I2C_VIA=m CONFIG_I2C_VIAPRO=m -# CONFIG_I2C_ZHAOXIN is not set - -# -# ACPI drivers -# CONFIG_I2C_SCMI=m -# CONFIG_I2C_ZHAOXIN_SMBUS is not set - -# -# I2C system bus drivers (mostly embedded / system-on-chip) -# -# CONFIG_I2C_CBUS_GPIO is not set -CONFIG_I2C_DESIGNWARE_CORE=y -# CONFIG_I2C_DESIGNWARE_SLAVE is not set CONFIG_I2C_DESIGNWARE_PLATFORM=y -# CONFIG_I2C_DESIGNWARE_PCI is not set -# CONFIG_I2C_EMEV2 is not set CONFIG_I2C_GPIO=y -# CONFIG_I2C_GPIO_FAULT_INJECTOR is not set CONFIG_I2C_LS2X=m -# CONFIG_I2C_OCORES is not set CONFIG_I2C_PCA_PLATFORM=m -# CONFIG_I2C_RK3X is not set CONFIG_I2C_SIMTEC=m -# CONFIG_I2C_XILINX is not set - -# -# External I2C/SMBus adapter drivers -# CONFIG_I2C_DIOLAN_U2C=m -# CONFIG_I2C_CP2615 is not set CONFIG_I2C_PARPORT=m -# CONFIG_I2C_PCI1XXXX is not set -# CONFIG_I2C_ROBOTFUZZ_OSIF is not set -# CONFIG_I2C_TAOS_EVM is not set CONFIG_I2C_TINY_USB=m CONFIG_I2C_VIPERBOARD=m - -# -# Other I2C/SMBus bus drivers -# -# CONFIG_I2C_VIRTIO is not set -# end of I2C Hardware Bus support - CONFIG_I2C_STUB=m -# CONFIG_I2C_SLAVE is not set -# CONFIG_I2C_DEBUG_CORE is not set -# CONFIG_I2C_DEBUG_ALGO is not set -# CONFIG_I2C_DEBUG_BUS is not set -# end of I2C support - -# CONFIG_I3C is not set CONFIG_SPI=y -# CONFIG_SPI_DEBUG is not set -CONFIG_SPI_MASTER=y -CONFIG_SPI_MEM=y - -# -# SPI Master Controller Drivers -# -# CONFIG_SPI_ALTERA is not set -# CONFIG_SPI_AXI_SPI_ENGINE is not set -# CONFIG_SPI_BITBANG is not set -# CONFIG_SPI_BUTTERFLY is not set -# CONFIG_SPI_CADENCE is not set -# CONFIG_SPI_CADENCE_XSPI is not set -# CONFIG_SPI_DESIGNWARE is not set -# CONFIG_SPI_GPIO is not set -# CONFIG_SPI_LM70_LLP is not set -CONFIG_SPI_LOONGSON_CORE=y CONFIG_SPI_LOONGSON_PCI=y CONFIG_SPI_LOONGSON_PLATFORM=m -# CONFIG_SPI_FSL_SPI is not set -# CONFIG_SPI_MICROCHIP_CORE is not set -# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set -# CONFIG_SPI_OC_TINY is not set -# CONFIG_SPI_PCI1XXXX is not set -# CONFIG_SPI_PXA2XX is not set -# CONFIG_SPI_SC18IS602 is not set -# CONFIG_SPI_SIFIVE is not set -# CONFIG_SPI_SN_F_OSPI is not set -# CONFIG_SPI_MXIC is not set -# CONFIG_SPI_XCOMM is not set -# CONFIG_SPI_XILINX is not set -# CONFIG_SPI_ZYNQMP_GQSPI is not set -# CONFIG_SPI_AMD is not set - -# -# SPI Multiplexer support -# -# CONFIG_SPI_MUX is not set - -# -# SPI Protocol Masters -# -# CONFIG_SPI_SPIDEV is not set -# CONFIG_SPI_LOOPBACK_TEST is not set -# CONFIG_SPI_TLE62X0 is not set -# CONFIG_SPI_SLAVE is not set -CONFIG_SPI_DYNAMIC=y -# CONFIG_SPMI is not set -# CONFIG_HSI is not set -CONFIG_PPS=y -# CONFIG_PPS_DEBUG is not set - -# -# PPS clients support -# -# CONFIG_PPS_CLIENT_KTIMER is not set CONFIG_PPS_CLIENT_LDISC=m CONFIG_PPS_CLIENT_PARPORT=m CONFIG_PPS_CLIENT_GPIO=m - -# -# PPS generators support -# - -# -# PTP clock support -# -CONFIG_PTP_1588_CLOCK=y -CONFIG_PTP_1588_CLOCK_OPTIONAL=y CONFIG_DP83640_PHY=m -# CONFIG_PTP_1588_CLOCK_INES is not set -# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set -# CONFIG_PTP_1588_CLOCK_IDTCM is not set -# CONFIG_PTP_1588_CLOCK_MOCK is not set -# CONFIG_PTP_1588_CLOCK_OCP is not set -# end of PTP clock support - CONFIG_PINCTRL=y -CONFIG_PINMUX=y -CONFIG_PINCONF=y -CONFIG_GENERIC_PINCONF=y -# CONFIG_DEBUG_PINCTRL is not set -# CONFIG_PINCTRL_AMD is not set -# CONFIG_PINCTRL_CY8C95X0 is not set CONFIG_PINCTRL_LOONGSON2=y -# CONFIG_PINCTRL_MCP23S08 is not set -# CONFIG_PINCTRL_MICROCHIP_SGPIO is not set -# CONFIG_PINCTRL_OCELOT is not set -# CONFIG_PINCTRL_SINGLE is not set -# CONFIG_PINCTRL_STMFX is not set -# CONFIG_PINCTRL_SX150X is not set - -# -# Renesas pinctrl drivers -# -# end of Renesas pinctrl drivers - -CONFIG_GPIOLIB=y -CONFIG_GPIOLIB_FASTPATH_LIMIT=512 -CONFIG_OF_GPIO=y -CONFIG_GPIO_ACPI=y -CONFIG_GPIOLIB_IRQCHIP=y -# CONFIG_DEBUG_GPIO is not set CONFIG_GPIO_SYSFS=y -CONFIG_GPIO_CDEV=y -CONFIG_GPIO_CDEV_V1=y -CONFIG_GPIO_GENERIC=y - -# -# Memory mapped GPIO drivers -# -# CONFIG_GPIO_74XX_MMIO is not set -# CONFIG_GPIO_ALTERA is not set CONFIG_GPIO_AMDPT=m -# CONFIG_GPIO_CADENCE is not set -# CONFIG_GPIO_DWAPB is not set -# CONFIG_GPIO_EXAR is not set -# CONFIG_GPIO_FTGPIO010 is not set -# CONFIG_GPIO_GENERIC_PLATFORM is not set -# CONFIG_GPIO_GRGPIO is not set -# CONFIG_GPIO_HLWD is not set -# CONFIG_GPIO_LOGICVC is not set CONFIG_GPIO_LOONGSON_64BIT=y -# CONFIG_GPIO_MB86S7X is not set -# CONFIG_GPIO_SIFIVE is not set -# CONFIG_GPIO_SYSCON is not set -# CONFIG_GPIO_XILINX is not set -# CONFIG_GPIO_AMD_FCH is not set -# end of Memory mapped GPIO drivers - -# -# I2C GPIO expanders -# -# CONFIG_GPIO_ADNP is not set -# CONFIG_GPIO_FXL6408 is not set -# CONFIG_GPIO_DS4520 is not set -# CONFIG_GPIO_GW_PLD is not set -# CONFIG_GPIO_MAX7300 is not set -# CONFIG_GPIO_MAX732X is not set -# CONFIG_GPIO_PCA953X is not set -# CONFIG_GPIO_PCA9570 is not set -# CONFIG_GPIO_PCF857X is not set -# CONFIG_GPIO_TPIC2810 is not set -# end of I2C GPIO expanders - -# -# MFD GPIO expanders -# -# end of MFD GPIO expanders - -# -# PCI GPIO expanders -# -# CONFIG_GPIO_PCI_IDIO_16 is not set -# CONFIG_GPIO_PCIE_IDIO_24 is not set -# CONFIG_GPIO_RDC321X is not set -# end of PCI GPIO expanders - -# -# SPI GPIO expanders -# -# CONFIG_GPIO_74X164 is not set -# CONFIG_GPIO_MAX3191X is not set -# CONFIG_GPIO_MAX7301 is not set -# CONFIG_GPIO_MC33880 is not set -# CONFIG_GPIO_PISOSR is not set -# CONFIG_GPIO_XRA1403 is not set -# end of SPI GPIO expanders - -# -# USB GPIO expanders -# CONFIG_GPIO_VIPERBOARD=m -# end of USB GPIO expanders - -# -# Virtual GPIO drivers -# -# CONFIG_GPIO_AGGREGATOR is not set -# CONFIG_GPIO_LATCH is not set -# CONFIG_GPIO_MOCKUP is not set -# CONFIG_GPIO_VIRTIO is not set -# CONFIG_GPIO_SIM is not set -# end of Virtual GPIO drivers - -# CONFIG_W1 is not set CONFIG_POWER_RESET=y -# CONFIG_POWER_RESET_GPIO is not set -# CONFIG_POWER_RESET_GPIO_RESTART is not set -# CONFIG_POWER_RESET_LTC2952 is not set -# CONFIG_POWER_RESET_RESTART is not set -# CONFIG_POWER_RESET_SYSCON is not set -# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set -# CONFIG_SYSCON_REBOOT_MODE is not set -# CONFIG_NVMEM_REBOOT_MODE is not set -CONFIG_POWER_SUPPLY=y -# CONFIG_POWER_SUPPLY_DEBUG is not set -CONFIG_POWER_SUPPLY_HWMON=y -# CONFIG_GENERIC_ADC_BATTERY is not set -# CONFIG_IP5XXX_POWER is not set -# CONFIG_TEST_POWER is not set -# CONFIG_CHARGER_ADP5061 is not set -# CONFIG_BATTERY_CW2015 is not set -# CONFIG_BATTERY_DS2780 is not set -# CONFIG_BATTERY_DS2781 is not set -# CONFIG_BATTERY_DS2782 is not set -# CONFIG_BATTERY_SAMSUNG_SDI is not set -# CONFIG_BATTERY_SBS is not set -# CONFIG_CHARGER_SBS is not set -# CONFIG_BATTERY_BQ27XXX is not set -# CONFIG_BATTERY_MAX17040 is not set -# CONFIG_BATTERY_MAX17042 is not set -# CONFIG_CHARGER_MAX8903 is not set -# CONFIG_CHARGER_LP8727 is not set -# CONFIG_CHARGER_GPIO is not set -# CONFIG_CHARGER_LT3651 is not set -# CONFIG_CHARGER_LTC4162L is not set -# CONFIG_CHARGER_DETECTOR_MAX14656 is not set -# CONFIG_CHARGER_MAX77976 is not set -# CONFIG_CHARGER_BQ2415X is not set -# CONFIG_CHARGER_BQ24257 is not set -# CONFIG_CHARGER_BQ24735 is not set -# CONFIG_CHARGER_BQ2515X is not set -# CONFIG_CHARGER_BQ25890 is not set -# CONFIG_CHARGER_BQ25980 is not set -# CONFIG_CHARGER_BQ256XX is not set -# CONFIG_BATTERY_GAUGE_LTC2941 is not set -# CONFIG_BATTERY_GOLDFISH is not set -# CONFIG_BATTERY_RT5033 is not set -# CONFIG_CHARGER_RT9455 is not set -# CONFIG_CHARGER_BD99954 is not set -# CONFIG_BATTERY_UG3105 is not set -CONFIG_HWMON=y -CONFIG_HWMON_VID=m -# CONFIG_HWMON_DEBUG_CHIP is not set - -# -# Native drivers -# -# CONFIG_SENSORS_AD7314 is not set CONFIG_SENSORS_AD7414=m CONFIG_SENSORS_AD7418=m CONFIG_SENSORS_ADM1025=m CONFIG_SENSORS_ADM1026=m CONFIG_SENSORS_ADM1029=m CONFIG_SENSORS_ADM1031=m -# CONFIG_SENSORS_ADM1177 is not set CONFIG_SENSORS_ADM9240=m -CONFIG_SENSORS_ADT7X10=m -# CONFIG_SENSORS_ADT7310 is not set CONFIG_SENSORS_ADT7410=m CONFIG_SENSORS_ADT7411=m CONFIG_SENSORS_ADT7462=m CONFIG_SENSORS_ADT7470=m CONFIG_SENSORS_ADT7475=m -# CONFIG_SENSORS_AHT10 is not set -# CONFIG_SENSORS_AQUACOMPUTER_D5NEXT is not set -# CONFIG_SENSORS_AS370 is not set CONFIG_SENSORS_ASC7621=m -# CONFIG_SENSORS_AXI_FAN_CONTROL is not set CONFIG_SENSORS_ATXP1=m -# CONFIG_SENSORS_CORSAIR_CPRO is not set -# CONFIG_SENSORS_CORSAIR_PSU is not set -# CONFIG_SENSORS_DRIVETEMP is not set CONFIG_SENSORS_DS620=m CONFIG_SENSORS_DS1621=m CONFIG_SENSORS_I5K_AMB=m CONFIG_SENSORS_F71805F=m CONFIG_SENSORS_F71882FG=m CONFIG_SENSORS_F75375S=m -# CONFIG_SENSORS_FTSTEUTATES is not set CONFIG_SENSORS_GL518SM=m CONFIG_SENSORS_GL520SM=m CONFIG_SENSORS_G760A=m -# CONFIG_SENSORS_G762 is not set -# CONFIG_SENSORS_GPIO_FAN is not set -# CONFIG_SENSORS_HIH6130 is not set -# CONFIG_SENSORS_HS3001 is not set CONFIG_SENSORS_IBMAEM=m CONFIG_SENSORS_IBMPEX=m -# CONFIG_SENSORS_IIO_HWMON is not set CONFIG_SENSORS_IT87=m CONFIG_SENSORS_JC42=m -# CONFIG_SENSORS_POWR1220 is not set CONFIG_SENSORS_LINEAGE=m -# CONFIG_SENSORS_LTC2945 is not set -# CONFIG_SENSORS_LTC2947_I2C is not set -# CONFIG_SENSORS_LTC2947_SPI is not set -# CONFIG_SENSORS_LTC2990 is not set -# CONFIG_SENSORS_LTC2992 is not set CONFIG_SENSORS_LTC4151=m CONFIG_SENSORS_LTC4215=m -# CONFIG_SENSORS_LTC4222 is not set CONFIG_SENSORS_LTC4245=m -# CONFIG_SENSORS_LTC4260 is not set CONFIG_SENSORS_LTC4261=m -# CONFIG_SENSORS_MAX1111 is not set -# CONFIG_SENSORS_MAX127 is not set CONFIG_SENSORS_MAX16065=m CONFIG_SENSORS_MAX1619=m CONFIG_SENSORS_MAX1668=m CONFIG_SENSORS_MAX197=m -# CONFIG_SENSORS_MAX31722 is not set -# CONFIG_SENSORS_MAX31730 is not set -# CONFIG_SENSORS_MAX31760 is not set -# CONFIG_MAX31827 is not set -# CONFIG_SENSORS_MAX6620 is not set -# CONFIG_SENSORS_MAX6621 is not set CONFIG_SENSORS_MAX6639=m CONFIG_SENSORS_MAX6650=m CONFIG_SENSORS_MAX6697=m -# CONFIG_SENSORS_MAX31790 is not set -# CONFIG_SENSORS_MC34VR500 is not set CONFIG_SENSORS_MCP3021=m -# CONFIG_SENSORS_TC654 is not set -# CONFIG_SENSORS_TPS23861 is not set -# CONFIG_SENSORS_MR75203 is not set -# CONFIG_SENSORS_ADCXX is not set CONFIG_SENSORS_LM63=m -# CONFIG_SENSORS_LM70 is not set CONFIG_SENSORS_LM73=m CONFIG_SENSORS_LM75=m CONFIG_SENSORS_LM77=m @@ -3947,322 +1238,72 @@ CONFIG_SENSORS_LM95245=m CONFIG_SENSORS_PC87360=m CONFIG_SENSORS_PC87427=m CONFIG_SENSORS_NTC_THERMISTOR=m -# CONFIG_SENSORS_NCT6683 is not set -CONFIG_SENSORS_NCT6775_CORE=m CONFIG_SENSORS_NCT6775=m -# CONFIG_SENSORS_NCT6775_I2C is not set -# CONFIG_SENSORS_NCT7802 is not set -# CONFIG_SENSORS_NCT7904 is not set -# CONFIG_SENSORS_NPCM7XX is not set -# CONFIG_SENSORS_NZXT_KRAKEN2 is not set -# CONFIG_SENSORS_NZXT_SMART2 is not set -# CONFIG_SENSORS_OCC_P8_I2C is not set CONFIG_SENSORS_PCF8591=m CONFIG_PMBUS=m -CONFIG_SENSORS_PMBUS=m -# CONFIG_SENSORS_ACBEL_FSG032 is not set -# CONFIG_SENSORS_ADM1266 is not set CONFIG_SENSORS_ADM1275=m -# CONFIG_SENSORS_BEL_PFE is not set -# CONFIG_SENSORS_BPA_RS600 is not set -# CONFIG_SENSORS_DELTA_AHE50DC_FAN is not set -# CONFIG_SENSORS_FSP_3Y is not set -# CONFIG_SENSORS_IBM_CFFPS is not set -# CONFIG_SENSORS_DPS920AB is not set -# CONFIG_SENSORS_INSPUR_IPSPS is not set -# CONFIG_SENSORS_IR35221 is not set -# CONFIG_SENSORS_IR36021 is not set -# CONFIG_SENSORS_IR38064 is not set -# CONFIG_SENSORS_IRPS5401 is not set -# CONFIG_SENSORS_ISL68137 is not set CONFIG_SENSORS_LM25066=m -# CONFIG_SENSORS_LT7182S is not set CONFIG_SENSORS_LTC2978=m -# CONFIG_SENSORS_LTC3815 is not set -# CONFIG_SENSORS_MAX15301 is not set CONFIG_SENSORS_MAX16064=m -# CONFIG_SENSORS_MAX16601 is not set -# CONFIG_SENSORS_MAX20730 is not set -# CONFIG_SENSORS_MAX20751 is not set -# CONFIG_SENSORS_MAX31785 is not set CONFIG_SENSORS_MAX34440=m CONFIG_SENSORS_MAX8688=m -# CONFIG_SENSORS_MP2888 is not set -# CONFIG_SENSORS_MP2975 is not set -# CONFIG_SENSORS_MP5023 is not set -# CONFIG_SENSORS_MPQ7932 is not set -# CONFIG_SENSORS_PIM4328 is not set -# CONFIG_SENSORS_PLI1209BC is not set -# CONFIG_SENSORS_PM6764TR is not set -# CONFIG_SENSORS_PXE1610 is not set -# CONFIG_SENSORS_Q54SJ108A2 is not set -# CONFIG_SENSORS_STPDDC60 is not set -# CONFIG_SENSORS_TDA38640 is not set -# CONFIG_SENSORS_TPS40422 is not set -# CONFIG_SENSORS_TPS53679 is not set -# CONFIG_SENSORS_TPS546D24 is not set CONFIG_SENSORS_UCD9000=m CONFIG_SENSORS_UCD9200=m -# CONFIG_SENSORS_XDPE152 is not set -# CONFIG_SENSORS_XDPE122 is not set CONFIG_SENSORS_ZL6100=m -# CONFIG_SENSORS_PWM_FAN is not set -# CONFIG_SENSORS_SBTSI is not set -# CONFIG_SENSORS_SBRMI is not set CONFIG_SENSORS_SHT15=m CONFIG_SENSORS_SHT21=m -# CONFIG_SENSORS_SHT3x is not set -# CONFIG_SENSORS_SHT4x is not set -# CONFIG_SENSORS_SHTC1 is not set CONFIG_SENSORS_SIS5595=m CONFIG_SENSORS_DME1737=m CONFIG_SENSORS_EMC1403=m -# CONFIG_SENSORS_EMC2103 is not set -# CONFIG_SENSORS_EMC2305 is not set CONFIG_SENSORS_EMC6W201=m CONFIG_SENSORS_SMSC47M1=m CONFIG_SENSORS_SMSC47M192=m CONFIG_SENSORS_SMSC47B397=m -CONFIG_SENSORS_SCH56XX_COMMON=m CONFIG_SENSORS_SCH5627=m CONFIG_SENSORS_SCH5636=m -# CONFIG_SENSORS_STTS751 is not set -# CONFIG_SENSORS_ADC128D818 is not set CONFIG_SENSORS_ADS7828=m -# CONFIG_SENSORS_ADS7871 is not set CONFIG_SENSORS_AMC6821=m CONFIG_SENSORS_INA209=m CONFIG_SENSORS_INA2XX=m -# CONFIG_SENSORS_INA238 is not set -# CONFIG_SENSORS_INA3221 is not set -# CONFIG_SENSORS_TC74 is not set CONFIG_SENSORS_THMC50=m CONFIG_SENSORS_TMP102=m -# CONFIG_SENSORS_TMP103 is not set -# CONFIG_SENSORS_TMP108 is not set CONFIG_SENSORS_TMP401=m CONFIG_SENSORS_TMP421=m -# CONFIG_SENSORS_TMP464 is not set -# CONFIG_SENSORS_TMP513 is not set CONFIG_SENSORS_VIA686A=m CONFIG_SENSORS_VT1211=m CONFIG_SENSORS_VT8231=m -# CONFIG_SENSORS_W83773G is not set CONFIG_SENSORS_W83781D=m CONFIG_SENSORS_W83791D=m CONFIG_SENSORS_W83792D=m CONFIG_SENSORS_W83793=m CONFIG_SENSORS_W83795=m -# CONFIG_SENSORS_W83795_FANCTRL is not set CONFIG_SENSORS_W83L785TS=m CONFIG_SENSORS_W83L786NG=m CONFIG_SENSORS_W83627HF=m CONFIG_SENSORS_W83627EHF=m - -# -# ACPI drivers -# CONFIG_SENSORS_ACPI_POWER=m -CONFIG_THERMAL=y -# CONFIG_THERMAL_NETLINK is not set -# CONFIG_THERMAL_STATISTICS is not set -CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 -CONFIG_THERMAL_HWMON=y -CONFIG_THERMAL_OF=y -# CONFIG_THERMAL_WRITABLE_TRIPS is not set -CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y -# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set -# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set CONFIG_THERMAL_GOV_FAIR_SHARE=y -CONFIG_THERMAL_GOV_STEP_WISE=y -# CONFIG_THERMAL_GOV_BANG_BANG is not set -# CONFIG_THERMAL_GOV_USER_SPACE is not set -# CONFIG_CPU_THERMAL is not set -# CONFIG_DEVFREQ_THERMAL is not set CONFIG_THERMAL_EMULATION=y -# CONFIG_THERMAL_MMIO is not set -# CONFIG_GENERIC_ADC_THERMAL is not set CONFIG_LOONGSON2_THERMAL=m CONFIG_WATCHDOG=y CONFIG_WATCHDOG_CORE=y -# CONFIG_WATCHDOG_NOWAYOUT is not set -CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y -CONFIG_WATCHDOG_OPEN_TIMEOUT=0 CONFIG_WATCHDOG_SYSFS=y -# CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT is not set - -# -# Watchdog Pretimeout Governors -# -# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set - -# -# Watchdog Device Drivers -# CONFIG_SOFT_WATCHDOG=m CONFIG_GPIO_WATCHDOG=m CONFIG_WDAT_WDT=m -# CONFIG_XILINX_WATCHDOG is not set -# CONFIG_ZIIRAVE_WATCHDOG is not set -# CONFIG_CADENCE_WATCHDOG is not set -# CONFIG_DW_WATCHDOG is not set -# CONFIG_MAX63XX_WATCHDOG is not set CONFIG_ALIM7101_WDT=m CONFIG_I6300ESB_WDT=m -# CONFIG_MEN_A21_WDT is not set - -# -# PCI-based Watchdog Cards -# CONFIG_PCIPCWATCHDOG=m CONFIG_WDTPCI=m - -# -# USB-based Watchdog Cards -# CONFIG_USBPCWATCHDOG=m -CONFIG_SSB_POSSIBLE=y -# CONFIG_SSB is not set -CONFIG_BCMA_POSSIBLE=y -CONFIG_BCMA=m -CONFIG_BCMA_HOST_PCI_POSSIBLE=y -CONFIG_BCMA_HOST_PCI=y -# CONFIG_BCMA_HOST_SOC is not set -CONFIG_BCMA_DRIVER_PCI=y CONFIG_BCMA_DRIVER_GMAC_CMN=y CONFIG_BCMA_DRIVER_GPIO=y -# CONFIG_BCMA_DEBUG is not set - -# -# Multifunction device drivers -# -CONFIG_MFD_CORE=y -# CONFIG_MFD_ACT8945A is not set -# CONFIG_MFD_AS3711 is not set -# CONFIG_MFD_SMPRO is not set -# CONFIG_MFD_AS3722 is not set -# CONFIG_PMIC_ADP5520 is not set -# CONFIG_MFD_AAT2870_CORE is not set -# CONFIG_MFD_ATMEL_FLEXCOM is not set -# CONFIG_MFD_ATMEL_HLCDC is not set -# CONFIG_MFD_BCM590XX is not set -# CONFIG_MFD_BD9571MWV is not set -# CONFIG_MFD_AXP20X_I2C is not set -# CONFIG_MFD_CS42L43_I2C is not set -# CONFIG_MFD_MADERA is not set -# CONFIG_MFD_MAX5970 is not set -# CONFIG_PMIC_DA903X is not set -# CONFIG_MFD_DA9052_SPI is not set -# CONFIG_MFD_DA9052_I2C is not set -# CONFIG_MFD_DA9055 is not set -# CONFIG_MFD_DA9062 is not set -# CONFIG_MFD_DA9063 is not set -# CONFIG_MFD_DA9150 is not set -# CONFIG_MFD_DLN2 is not set -# CONFIG_MFD_GATEWORKS_GSC is not set -# CONFIG_MFD_MC13XXX_SPI is not set -# CONFIG_MFD_MC13XXX_I2C is not set -# CONFIG_MFD_MP2629 is not set -# CONFIG_MFD_HI6421_PMIC is not set -# CONFIG_LPC_ICH is not set -CONFIG_LPC_SCH=m -# CONFIG_MFD_IQS62X is not set -# CONFIG_MFD_JANZ_CMODIO is not set -# CONFIG_MFD_KEMPLD is not set -# CONFIG_MFD_88PM800 is not set -# CONFIG_MFD_88PM805 is not set -# CONFIG_MFD_88PM860X is not set -# CONFIG_MFD_MAX14577 is not set -# CONFIG_MFD_MAX77541 is not set -# CONFIG_MFD_MAX77620 is not set -# CONFIG_MFD_MAX77650 is not set -# CONFIG_MFD_MAX77686 is not set -# CONFIG_MFD_MAX77693 is not set -# CONFIG_MFD_MAX77714 is not set -# CONFIG_MFD_MAX77843 is not set -# CONFIG_MFD_MAX8907 is not set -# CONFIG_MFD_MAX8925 is not set -# CONFIG_MFD_MAX8997 is not set -# CONFIG_MFD_MAX8998 is not set -# CONFIG_MFD_MT6360 is not set -# CONFIG_MFD_MT6370 is not set -# CONFIG_MFD_MT6397 is not set -# CONFIG_MFD_MENF21BMC is not set -# CONFIG_MFD_OCELOT is not set -# CONFIG_EZX_PCAP is not set -# CONFIG_MFD_CPCAP is not set CONFIG_MFD_VIPERBOARD=m -# CONFIG_MFD_NTXEC is not set -# CONFIG_MFD_RETU is not set -# CONFIG_MFD_PCF50633 is not set -# CONFIG_MFD_SY7636A is not set -# CONFIG_MFD_RDC321X is not set -# CONFIG_MFD_RT4831 is not set -# CONFIG_MFD_RT5033 is not set -# CONFIG_MFD_RT5120 is not set -# CONFIG_MFD_RC5T583 is not set -# CONFIG_MFD_RK8XX_I2C is not set -# CONFIG_MFD_RK8XX_SPI is not set -# CONFIG_MFD_RN5T618 is not set -# CONFIG_MFD_SEC_CORE is not set -# CONFIG_MFD_SI476X_CORE is not set CONFIG_MFD_SM501=m CONFIG_MFD_SM501_GPIO=y -# CONFIG_MFD_SKY81452 is not set -# CONFIG_MFD_STMPE is not set -CONFIG_MFD_SYSCON=y -# CONFIG_MFD_TI_AM335X_TSCADC is not set -# CONFIG_MFD_LP3943 is not set -# CONFIG_MFD_LP8788 is not set -# CONFIG_MFD_TI_LMU is not set -# CONFIG_MFD_PALMAS is not set -# CONFIG_TPS6105X is not set -# CONFIG_TPS65010 is not set -# CONFIG_TPS6507X is not set -# CONFIG_MFD_TPS65086 is not set -# CONFIG_MFD_TPS65090 is not set -# CONFIG_MFD_TPS65217 is not set -# CONFIG_MFD_TI_LP873X is not set -# CONFIG_MFD_TI_LP87565 is not set -# CONFIG_MFD_TPS65218 is not set -# CONFIG_MFD_TPS65219 is not set -# CONFIG_MFD_TPS6586X is not set -# CONFIG_MFD_TPS65910 is not set -# CONFIG_MFD_TPS65912_I2C is not set -# CONFIG_MFD_TPS65912_SPI is not set -# CONFIG_MFD_TPS6594_I2C is not set -# CONFIG_MFD_TPS6594_SPI is not set -# CONFIG_TWL4030_CORE is not set -# CONFIG_TWL6040_CORE is not set -# CONFIG_MFD_WL1273_CORE is not set -# CONFIG_MFD_LM3533 is not set -# CONFIG_MFD_TC3589X is not set -# CONFIG_MFD_TQMX86 is not set CONFIG_MFD_VX855=m -# CONFIG_MFD_LOCHNAGAR is not set -# CONFIG_MFD_ARIZONA_I2C is not set -# CONFIG_MFD_ARIZONA_SPI is not set -# CONFIG_MFD_WM8400 is not set -# CONFIG_MFD_WM831X_I2C is not set -# CONFIG_MFD_WM831X_SPI is not set -# CONFIG_MFD_WM8350_I2C is not set -# CONFIG_MFD_WM8994 is not set -# CONFIG_MFD_ROHM_BD718XX is not set -# CONFIG_MFD_ROHM_BD71828 is not set -# CONFIG_MFD_ROHM_BD957XMUF is not set -# CONFIG_MFD_STPMIC1 is not set -# CONFIG_MFD_STMFX is not set -# CONFIG_MFD_ATC260X_I2C is not set -# CONFIG_MFD_QCOM_PM8008 is not set -# CONFIG_MFD_INTEL_M10_BMC_SPI is not set -# CONFIG_MFD_RSMU_I2C is not set -# CONFIG_MFD_RSMU_SPI is not set -# end of Multifunction device drivers - -# CONFIG_REGULATOR is not set CONFIG_RC_CORE=m CONFIG_LIRC=y -CONFIG_RC_MAP=m CONFIG_RC_DECODERS=y CONFIG_IR_IMON_DECODER=m CONFIG_IR_JVC_DECODER=m @@ -4270,7 +1311,6 @@ CONFIG_IR_MCE_KBD_DECODER=m CONFIG_IR_NEC_DECODER=m CONFIG_IR_RC5_DECODER=m CONFIG_IR_RC6_DECODER=m -# CONFIG_IR_RCMM_DECODER is not set CONFIG_IR_SANYO_DECODER=m CONFIG_IR_SHARP_DECODER=m CONFIG_IR_SONY_DECODER=m @@ -4278,114 +1318,31 @@ CONFIG_IR_XMP_DECODER=m CONFIG_RC_DEVICES=y CONFIG_IR_ENE=m CONFIG_IR_FINTEK=m -# CONFIG_IR_GPIO_CIR is not set -# CONFIG_IR_GPIO_TX is not set -# CONFIG_IR_HIX5HD2 is not set -# CONFIG_IR_IGORPLUGUSB is not set CONFIG_IR_IGUANA=m CONFIG_IR_IMON=m CONFIG_IR_IMON_RAW=m CONFIG_IR_ITE_CIR=m CONFIG_IR_MCEUSB=m CONFIG_IR_NUVOTON=m -# CONFIG_IR_PWM_TX is not set CONFIG_IR_REDRAT3=m CONFIG_IR_SERIAL=m CONFIG_IR_SERIAL_TRANSMITTER=y -# CONFIG_IR_SPI is not set CONFIG_IR_STREAMZAP=m -# CONFIG_IR_TOY is not set CONFIG_IR_TTUSBIR=m CONFIG_RC_ATI_REMOTE=m -# CONFIG_RC_LOOPBACK is not set -# CONFIG_RC_XBOX_DVD is not set -CONFIG_CEC_CORE=m - -# -# CEC support -# -# CONFIG_MEDIA_CEC_RC is not set -CONFIG_MEDIA_CEC_SUPPORT=y -# CONFIG_CEC_CH7322 is not set CONFIG_USB_PULSE8_CEC=m CONFIG_USB_RAINSHADOW_CEC=m -# end of CEC support - CONFIG_MEDIA_SUPPORT=m -# CONFIG_MEDIA_SUPPORT_FILTER is not set -# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set - -# -# Media device types -# -CONFIG_MEDIA_CAMERA_SUPPORT=y -CONFIG_MEDIA_ANALOG_TV_SUPPORT=y -CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y -CONFIG_MEDIA_RADIO_SUPPORT=y -CONFIG_MEDIA_SDR_SUPPORT=y -CONFIG_MEDIA_PLATFORM_SUPPORT=y -CONFIG_MEDIA_TEST_SUPPORT=y -# end of Media device types - -# -# Media core support -# -CONFIG_VIDEO_DEV=m -CONFIG_MEDIA_CONTROLLER=y -CONFIG_DVB_CORE=m -# end of Media core support - -# -# Video4Linux options -# -CONFIG_VIDEO_V4L2_I2C=y -CONFIG_VIDEO_V4L2_SUBDEV_API=y -# CONFIG_VIDEO_ADV_DEBUG is not set -# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set -CONFIG_VIDEO_TUNER=m -CONFIG_V4L2_FWNODE=m -CONFIG_V4L2_ASYNC=m -# end of Video4Linux options - -# -# Media controller options -# -CONFIG_MEDIA_CONTROLLER_DVB=y -# end of Media controller options - -# -# Digital TV options -# -# CONFIG_DVB_MMAP is not set -CONFIG_DVB_NET=y CONFIG_DVB_MAX_ADAPTERS=8 -CONFIG_DVB_DYNAMIC_MINORS=y -# CONFIG_DVB_DEMUX_SECTION_LOSS_LOG is not set -# CONFIG_DVB_ULE_DEBUG is not set -# end of Digital TV options - -# -# Media drivers -# - -# -# Media drivers -# CONFIG_MEDIA_USB_SUPPORT=y - -# -# Webcam devices -# CONFIG_USB_GSPCA=m CONFIG_USB_GSPCA_BENQ=m CONFIG_USB_GSPCA_CONEX=m CONFIG_USB_GSPCA_CPIA1=m -# CONFIG_USB_GSPCA_DTCS033 is not set CONFIG_USB_GSPCA_ETOMS=m CONFIG_USB_GSPCA_FINEPIX=m CONFIG_USB_GSPCA_JEILINJ=m CONFIG_USB_GSPCA_JL2005BCD=m -# CONFIG_USB_GSPCA_KINECT is not set CONFIG_USB_GSPCA_KONICA=m CONFIG_USB_GSPCA_MARS=m CONFIG_USB_GSPCA_MR97310A=m @@ -4412,12 +1369,10 @@ CONFIG_USB_GSPCA_SQ905=m CONFIG_USB_GSPCA_SQ905C=m CONFIG_USB_GSPCA_SQ930X=m CONFIG_USB_GSPCA_STK014=m -# CONFIG_USB_GSPCA_STK1135 is not set CONFIG_USB_GSPCA_STV0680=m CONFIG_USB_GSPCA_SUNPLUS=m CONFIG_USB_GSPCA_T613=m CONFIG_USB_GSPCA_TOPRO=m -# CONFIG_USB_GSPCA_TOUPTEK is not set CONFIG_USB_GSPCA_TV8532=m CONFIG_USB_GSPCA_VC032X=m CONFIG_USB_GSPCA_VICAM=m @@ -4427,62 +1382,31 @@ CONFIG_USB_GL860=m CONFIG_USB_M5602=m CONFIG_USB_STV06XX=m CONFIG_USB_PWC=m -# CONFIG_USB_PWC_DEBUG is not set -CONFIG_USB_PWC_INPUT_EVDEV=y CONFIG_USB_S2255=m -# CONFIG_VIDEO_USBTV is not set CONFIG_USB_VIDEO_CLASS=m -CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y - -# -# Analog TV USB devices -# -# CONFIG_VIDEO_GO7007 is not set CONFIG_VIDEO_HDPVR=m CONFIG_VIDEO_PVRUSB2=m -CONFIG_VIDEO_PVRUSB2_SYSFS=y -CONFIG_VIDEO_PVRUSB2_DVB=y -# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set -# CONFIG_VIDEO_STK1160 is not set - -# -# Analog/digital TV USB devices -# CONFIG_VIDEO_AU0828=m -CONFIG_VIDEO_AU0828_V4L2=y -# CONFIG_VIDEO_AU0828_RC is not set - -# -# Digital TV USB devices -# -# CONFIG_DVB_AS102 is not set CONFIG_DVB_B2C2_FLEXCOP_USB=m -# CONFIG_DVB_B2C2_FLEXCOP_USB_DEBUG is not set CONFIG_DVB_USB_V2=m CONFIG_DVB_USB_AF9035=m CONFIG_DVB_USB_ANYSEE=m CONFIG_DVB_USB_AU6610=m CONFIG_DVB_USB_AZ6007=m CONFIG_DVB_USB_CE6230=m -# CONFIG_DVB_USB_DVBSKY is not set CONFIG_DVB_USB_EC168=m CONFIG_DVB_USB_GL861=m CONFIG_DVB_USB_LME2510=m CONFIG_DVB_USB_MXL111SF=m -# CONFIG_DVB_USB_ZD1301 is not set CONFIG_DVB_USB=m -# CONFIG_DVB_USB_DEBUG is not set CONFIG_DVB_USB_A800=m CONFIG_DVB_USB_AF9005=m CONFIG_DVB_USB_AF9005_REMOTE=m CONFIG_DVB_USB_AZ6027=m CONFIG_DVB_USB_CINERGY_T2=m CONFIG_DVB_USB_CXUSB=m -# CONFIG_DVB_USB_CXUSB_ANALOG is not set CONFIG_DVB_USB_DIB0700=m -CONFIG_DVB_USB_DIB3000MC=m CONFIG_DVB_USB_DIBUSB_MB=m -# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set CONFIG_DVB_USB_DIBUSB_MC=m CONFIG_DVB_USB_DIGITV=m CONFIG_DVB_USB_DTT200U=m @@ -4501,1043 +1425,104 @@ CONFIG_DVB_USB_VP7045=m CONFIG_SMS_USB_DRV=m CONFIG_DVB_TTUSB_BUDGET=m CONFIG_DVB_TTUSB_DEC=m - -# -# Webcam, TV (analog/digital) USB devices -# CONFIG_VIDEO_EM28XX=m -# CONFIG_VIDEO_EM28XX_V4L2 is not set CONFIG_VIDEO_EM28XX_ALSA=m CONFIG_VIDEO_EM28XX_DVB=m -CONFIG_VIDEO_EM28XX_RC=m - -# -# Software defined radio USB devices -# -# CONFIG_USB_AIRSPY is not set -# CONFIG_USB_HACKRF is not set -# CONFIG_USB_MSI2500 is not set CONFIG_MEDIA_PCI_SUPPORT=y - -# -# Media capture support -# -# CONFIG_VIDEO_SOLO6X10 is not set -# CONFIG_VIDEO_TW5864 is not set -# CONFIG_VIDEO_TW68 is not set -# CONFIG_VIDEO_TW686X is not set -# CONFIG_VIDEO_ZORAN is not set - -# -# Media capture/analog TV support -# -# CONFIG_VIDEO_DT3155 is not set CONFIG_VIDEO_IVTV=m -# CONFIG_VIDEO_IVTV_ALSA is not set CONFIG_VIDEO_FB_IVTV=m -# CONFIG_VIDEO_HEXIUM_GEMINI is not set -# CONFIG_VIDEO_HEXIUM_ORION is not set -# CONFIG_VIDEO_MXB is not set - -# -# Media capture/analog/hybrid TV support -# CONFIG_VIDEO_BT848=m CONFIG_DVB_BT8XX=m CONFIG_VIDEO_CX18=m -# CONFIG_VIDEO_CX18_ALSA is not set CONFIG_VIDEO_CX23885=m CONFIG_MEDIA_ALTERA_CI=m -# CONFIG_VIDEO_CX25821 is not set CONFIG_VIDEO_CX88=m CONFIG_VIDEO_CX88_ALSA=m CONFIG_VIDEO_CX88_BLACKBIRD=m CONFIG_VIDEO_CX88_DVB=m # CONFIG_VIDEO_CX88_ENABLE_VP3054 is not set -CONFIG_VIDEO_CX88_MPEG=m CONFIG_VIDEO_SAA7134=m CONFIG_VIDEO_SAA7134_ALSA=m -CONFIG_VIDEO_SAA7134_RC=y CONFIG_VIDEO_SAA7134_DVB=m CONFIG_VIDEO_SAA7164=m - -# -# Media digital TV PCI Adapters -# CONFIG_DVB_B2C2_FLEXCOP_PCI=m -# CONFIG_DVB_B2C2_FLEXCOP_PCI_DEBUG is not set CONFIG_DVB_DDBRIDGE=m -# CONFIG_DVB_DDBRIDGE_MSIENABLE is not set CONFIG_DVB_DM1105=m CONFIG_MANTIS_CORE=m CONFIG_DVB_MANTIS=m CONFIG_DVB_HOPPER=m -# CONFIG_DVB_NETUP_UNIDVB is not set CONFIG_DVB_NGENE=m CONFIG_DVB_PLUTO2=m CONFIG_DVB_PT1=m -# CONFIG_DVB_PT3 is not set -# CONFIG_DVB_SMIPCIE is not set CONFIG_DVB_BUDGET_CORE=m CONFIG_DVB_BUDGET=m CONFIG_DVB_BUDGET_CI=m CONFIG_DVB_BUDGET_AV=m -# CONFIG_IPU_BRIDGE is not set -CONFIG_RADIO_ADAPTERS=m -# CONFIG_RADIO_MAXIRADIO is not set -# CONFIG_RADIO_SAA7706H is not set -# CONFIG_RADIO_SHARK is not set -# CONFIG_RADIO_SHARK2 is not set -# CONFIG_RADIO_SI4713 is not set -CONFIG_RADIO_TEA575X=m -# CONFIG_RADIO_TEA5764 is not set -# CONFIG_RADIO_TEF6862 is not set -# CONFIG_RADIO_WL1273 is not set -# CONFIG_USB_DSBR is not set -# CONFIG_USB_KEENE is not set -# CONFIG_USB_MA901 is not set -# CONFIG_USB_MR800 is not set -# CONFIG_USB_RAREMONO is not set -# CONFIG_RADIO_SI470X is not set -CONFIG_MEDIA_PLATFORM_DRIVERS=y -# CONFIG_V4L_PLATFORM_DRIVERS is not set -# CONFIG_SDR_PLATFORM_DRIVERS is not set -# CONFIG_DVB_PLATFORM_DRIVERS is not set -# CONFIG_V4L_MEM2MEM_DRIVERS is not set - -# -# Allegro DVT media platform drivers -# - -# -# Amlogic media platform drivers -# - -# -# Amphion drivers -# - -# -# Aspeed media platform drivers -# - -# -# Atmel media platform drivers -# - -# -# Cadence media platform drivers -# -# CONFIG_VIDEO_CADENCE_CSI2RX is not set -# CONFIG_VIDEO_CADENCE_CSI2TX is not set - -# -# Chips&Media media platform drivers -# - -# -# Intel media platform drivers -# - -# -# Marvell media platform drivers -# - -# -# Mediatek media platform drivers -# - -# -# Microchip Technology, Inc. media platform drivers -# - -# -# NVidia media platform drivers -# - -# -# NXP media platform drivers -# - -# -# Qualcomm media platform drivers -# - -# -# Renesas media platform drivers -# - -# -# Rockchip media platform drivers -# - -# -# Samsung media platform drivers -# - -# -# STMicroelectronics media platform drivers -# - -# -# Sunxi media platform drivers -# - -# -# Texas Instruments drivers -# - -# -# Verisilicon media platform drivers -# - -# -# VIA media platform drivers -# - -# -# Xilinx media platform drivers -# - -# -# MMC/SDIO DVB adapters -# CONFIG_SMS_SDIO_DRV=m -# CONFIG_V4L_TEST_DRIVERS is not set -# CONFIG_DVB_TEST_DRIVERS is not set - -# -# FireWire (IEEE 1394) Adapters -# CONFIG_DVB_FIREDTV=m -CONFIG_DVB_FIREDTV_INPUT=y -CONFIG_MEDIA_COMMON_OPTIONS=y - -# -# common driver options -# -CONFIG_CYPRESS_FIRMWARE=m -CONFIG_TTPCI_EEPROM=m -CONFIG_UVC_COMMON=m -CONFIG_VIDEO_CX2341X=m -CONFIG_VIDEO_TVEEPROM=m -CONFIG_DVB_B2C2_FLEXCOP=m -CONFIG_VIDEO_SAA7146=m -CONFIG_VIDEO_SAA7146_VV=m -CONFIG_SMS_SIANO_MDTV=m -CONFIG_SMS_SIANO_RC=y -# CONFIG_SMS_SIANO_DEBUGFS is not set -CONFIG_VIDEOBUF2_CORE=m -CONFIG_VIDEOBUF2_V4L2=m -CONFIG_VIDEOBUF2_MEMOPS=m -CONFIG_VIDEOBUF2_VMALLOC=m -CONFIG_VIDEOBUF2_DMA_SG=m -CONFIG_VIDEOBUF2_DVB=m -# end of Media drivers - -# -# Media ancillary drivers -# -CONFIG_MEDIA_ATTACH=y -CONFIG_VIDEO_IR_I2C=m -CONFIG_VIDEO_CAMERA_SENSOR=y -# CONFIG_VIDEO_AR0521 is not set -# CONFIG_VIDEO_HI556 is not set -# CONFIG_VIDEO_HI846 is not set -# CONFIG_VIDEO_HI847 is not set -# CONFIG_VIDEO_IMX208 is not set -# CONFIG_VIDEO_IMX214 is not set -# CONFIG_VIDEO_IMX219 is not set -# CONFIG_VIDEO_IMX258 is not set -# CONFIG_VIDEO_IMX274 is not set -# CONFIG_VIDEO_IMX290 is not set -# CONFIG_VIDEO_IMX296 is not set -# CONFIG_VIDEO_IMX319 is not set -# CONFIG_VIDEO_IMX334 is not set -# CONFIG_VIDEO_IMX335 is not set -# CONFIG_VIDEO_IMX355 is not set -# CONFIG_VIDEO_IMX412 is not set -# CONFIG_VIDEO_IMX415 is not set -# CONFIG_VIDEO_MT9M001 is not set -# CONFIG_VIDEO_MT9M111 is not set -# CONFIG_VIDEO_MT9P031 is not set -# CONFIG_VIDEO_MT9T112 is not set -# CONFIG_VIDEO_MT9V011 is not set -# CONFIG_VIDEO_MT9V032 is not set -# CONFIG_VIDEO_MT9V111 is not set -# CONFIG_VIDEO_OG01A1B is not set -# CONFIG_VIDEO_OV01A10 is not set -# CONFIG_VIDEO_OV02A10 is not set -# CONFIG_VIDEO_OV08D10 is not set -# CONFIG_VIDEO_OV08X40 is not set -# CONFIG_VIDEO_OV13858 is not set -# CONFIG_VIDEO_OV13B10 is not set -# CONFIG_VIDEO_OV2640 is not set -# CONFIG_VIDEO_OV2659 is not set -# CONFIG_VIDEO_OV2680 is not set -# CONFIG_VIDEO_OV2685 is not set -# CONFIG_VIDEO_OV2740 is not set -# CONFIG_VIDEO_OV4689 is not set -# CONFIG_VIDEO_OV5640 is not set -# CONFIG_VIDEO_OV5645 is not set -# CONFIG_VIDEO_OV5647 is not set -# CONFIG_VIDEO_OV5648 is not set -# CONFIG_VIDEO_OV5670 is not set -# CONFIG_VIDEO_OV5675 is not set -# CONFIG_VIDEO_OV5693 is not set -# CONFIG_VIDEO_OV5695 is not set -# CONFIG_VIDEO_OV6650 is not set -# CONFIG_VIDEO_OV7251 is not set -# CONFIG_VIDEO_OV7640 is not set -# CONFIG_VIDEO_OV7670 is not set -# CONFIG_VIDEO_OV772X is not set -# CONFIG_VIDEO_OV7740 is not set -# CONFIG_VIDEO_OV8856 is not set -# CONFIG_VIDEO_OV8858 is not set -# CONFIG_VIDEO_OV8865 is not set -# CONFIG_VIDEO_OV9282 is not set -# CONFIG_VIDEO_OV9640 is not set -# CONFIG_VIDEO_OV9650 is not set -# CONFIG_VIDEO_OV9734 is not set -# CONFIG_VIDEO_RDACM20 is not set -# CONFIG_VIDEO_RDACM21 is not set -# CONFIG_VIDEO_RJ54N1 is not set -# CONFIG_VIDEO_S5C73M3 is not set -# CONFIG_VIDEO_S5K5BAF is not set -# CONFIG_VIDEO_S5K6A3 is not set -# CONFIG_VIDEO_ST_VGXY61 is not set -# CONFIG_VIDEO_CCS is not set -# CONFIG_VIDEO_ET8EK8 is not set - -# -# Lens drivers -# -# CONFIG_VIDEO_AD5820 is not set -# CONFIG_VIDEO_AK7375 is not set -# CONFIG_VIDEO_DW9714 is not set -# CONFIG_VIDEO_DW9719 is not set -# CONFIG_VIDEO_DW9768 is not set -# CONFIG_VIDEO_DW9807_VCM is not set -# end of Lens drivers - -# -# Flash devices -# -# CONFIG_VIDEO_ADP1653 is not set -# CONFIG_VIDEO_LM3560 is not set -# CONFIG_VIDEO_LM3646 is not set -# end of Flash devices - -# -# Audio decoders, processors and mixers -# -CONFIG_VIDEO_CS3308=m -CONFIG_VIDEO_CS5345=m -CONFIG_VIDEO_CS53L32A=m -CONFIG_VIDEO_MSP3400=m -# CONFIG_VIDEO_SONY_BTF_MPX is not set -# CONFIG_VIDEO_TDA1997X is not set -# CONFIG_VIDEO_TDA7432 is not set -# CONFIG_VIDEO_TDA9840 is not set -# CONFIG_VIDEO_TEA6415C is not set -# CONFIG_VIDEO_TEA6420 is not set -# CONFIG_VIDEO_TLV320AIC23B is not set -# CONFIG_VIDEO_TVAUDIO is not set -# CONFIG_VIDEO_UDA1342 is not set -CONFIG_VIDEO_VP27SMPX=m -CONFIG_VIDEO_WM8739=m -CONFIG_VIDEO_WM8775=m -# end of Audio decoders, processors and mixers - -# -# RDS decoders -# -# CONFIG_VIDEO_SAA6588 is not set -# end of RDS decoders - -# -# Video decoders -# -# CONFIG_VIDEO_ADV7180 is not set -# CONFIG_VIDEO_ADV7183 is not set -# CONFIG_VIDEO_ADV748X is not set -# CONFIG_VIDEO_ADV7604 is not set -# CONFIG_VIDEO_ADV7842 is not set -# CONFIG_VIDEO_BT819 is not set -# CONFIG_VIDEO_BT856 is not set -# CONFIG_VIDEO_BT866 is not set -# CONFIG_VIDEO_ISL7998X is not set -# CONFIG_VIDEO_KS0127 is not set -# CONFIG_VIDEO_ML86V7667 is not set -# CONFIG_VIDEO_SAA7110 is not set -CONFIG_VIDEO_SAA711X=m -# CONFIG_VIDEO_TC358743 is not set -# CONFIG_VIDEO_TC358746 is not set -# CONFIG_VIDEO_TVP514X is not set -# CONFIG_VIDEO_TVP5150 is not set -# CONFIG_VIDEO_TVP7002 is not set -# CONFIG_VIDEO_TW2804 is not set -# CONFIG_VIDEO_TW9903 is not set -# CONFIG_VIDEO_TW9906 is not set -# CONFIG_VIDEO_TW9910 is not set -# CONFIG_VIDEO_VPX3220 is not set - -# -# Video and audio decoders -# -CONFIG_VIDEO_SAA717X=m -CONFIG_VIDEO_CX25840=m -# end of Video decoders - -# -# Video encoders -# -# CONFIG_VIDEO_ADV7170 is not set -# CONFIG_VIDEO_ADV7175 is not set -# CONFIG_VIDEO_ADV7343 is not set -# CONFIG_VIDEO_ADV7393 is not set -# CONFIG_VIDEO_ADV7511 is not set -# CONFIG_VIDEO_AK881X is not set -CONFIG_VIDEO_SAA7127=m -# CONFIG_VIDEO_SAA7185 is not set -# CONFIG_VIDEO_THS8200 is not set -# end of Video encoders - -# -# Video improvement chips -# -CONFIG_VIDEO_UPD64031A=m -CONFIG_VIDEO_UPD64083=m -# end of Video improvement chips - -# -# Audio/Video compression chips -# -# CONFIG_VIDEO_SAA6752HS is not set -# end of Audio/Video compression chips - -# -# SDR tuner chips -# -# CONFIG_SDR_MAX2175 is not set -# end of SDR tuner chips - -# -# Miscellaneous helper chips -# -# CONFIG_VIDEO_I2C is not set -CONFIG_VIDEO_M52790=m -# CONFIG_VIDEO_ST_MIPID02 is not set -# CONFIG_VIDEO_THS7303 is not set -# end of Miscellaneous helper chips - -# -# Video serializers and deserializers -# -# CONFIG_VIDEO_DS90UB913 is not set -# CONFIG_VIDEO_DS90UB953 is not set -# CONFIG_VIDEO_DS90UB960 is not set -# end of Video serializers and deserializers - -# -# Media SPI Adapters -# -CONFIG_CXD2880_SPI_DRV=m -# CONFIG_VIDEO_GS1662 is not set -# end of Media SPI Adapters - -CONFIG_MEDIA_TUNER=m - -# -# Customize TV tuners -# -CONFIG_MEDIA_TUNER_E4000=m -CONFIG_MEDIA_TUNER_FC0011=m -CONFIG_MEDIA_TUNER_FC0012=m -CONFIG_MEDIA_TUNER_FC0013=m -CONFIG_MEDIA_TUNER_FC2580=m -CONFIG_MEDIA_TUNER_IT913X=m -CONFIG_MEDIA_TUNER_M88RS6000T=m -CONFIG_MEDIA_TUNER_MAX2165=m -CONFIG_MEDIA_TUNER_MC44S803=m -CONFIG_MEDIA_TUNER_MSI001=m -CONFIG_MEDIA_TUNER_MT2060=m -CONFIG_MEDIA_TUNER_MT2063=m -CONFIG_MEDIA_TUNER_MT20XX=m -CONFIG_MEDIA_TUNER_MT2131=m -CONFIG_MEDIA_TUNER_MT2266=m -CONFIG_MEDIA_TUNER_MXL301RF=m -CONFIG_MEDIA_TUNER_MXL5005S=m -CONFIG_MEDIA_TUNER_MXL5007T=m -CONFIG_MEDIA_TUNER_QM1D1B0004=m -CONFIG_MEDIA_TUNER_QM1D1C0042=m -CONFIG_MEDIA_TUNER_QT1010=m -CONFIG_MEDIA_TUNER_R820T=m -CONFIG_MEDIA_TUNER_SI2157=m -CONFIG_MEDIA_TUNER_SIMPLE=m -CONFIG_MEDIA_TUNER_TDA18212=m -CONFIG_MEDIA_TUNER_TDA18218=m -CONFIG_MEDIA_TUNER_TDA18250=m -CONFIG_MEDIA_TUNER_TDA18271=m -CONFIG_MEDIA_TUNER_TDA827X=m -CONFIG_MEDIA_TUNER_TDA8290=m -CONFIG_MEDIA_TUNER_TDA9887=m -CONFIG_MEDIA_TUNER_TEA5761=m -CONFIG_MEDIA_TUNER_TEA5767=m -CONFIG_MEDIA_TUNER_TUA9001=m -CONFIG_MEDIA_TUNER_XC2028=m -CONFIG_MEDIA_TUNER_XC4000=m -CONFIG_MEDIA_TUNER_XC5000=m -# end of Customize TV tuners - -# -# Customise DVB Frontends -# - -# -# Multistandard (satellite) frontends -# -CONFIG_DVB_MXL5XX=m -CONFIG_DVB_STB0899=m -CONFIG_DVB_STB6100=m -CONFIG_DVB_STV090x=m -CONFIG_DVB_STV0910=m -CONFIG_DVB_STV6110x=m -CONFIG_DVB_STV6111=m - -# -# Multistandard (cable + terrestrial) frontends -# -CONFIG_DVB_DRXK=m -CONFIG_DVB_MN88472=m -CONFIG_DVB_MN88473=m -CONFIG_DVB_SI2165=m -CONFIG_DVB_TDA18271C2DD=m - -# -# DVB-S (satellite) frontends -# -CONFIG_DVB_CX24110=m -CONFIG_DVB_CX24116=m -CONFIG_DVB_CX24117=m -CONFIG_DVB_CX24120=m -CONFIG_DVB_CX24123=m -CONFIG_DVB_DS3000=m -CONFIG_DVB_MB86A16=m -CONFIG_DVB_MT312=m -CONFIG_DVB_S5H1420=m -CONFIG_DVB_SI21XX=m -CONFIG_DVB_STB6000=m -CONFIG_DVB_STV0288=m -CONFIG_DVB_STV0299=m -CONFIG_DVB_STV0900=m -CONFIG_DVB_STV6110=m -CONFIG_DVB_TDA10071=m -CONFIG_DVB_TDA10086=m -CONFIG_DVB_TDA8083=m -CONFIG_DVB_TDA8261=m -CONFIG_DVB_TDA826X=m -CONFIG_DVB_TS2020=m -CONFIG_DVB_TUA6100=m -CONFIG_DVB_TUNER_CX24113=m -CONFIG_DVB_TUNER_ITD1000=m -CONFIG_DVB_VES1X93=m -CONFIG_DVB_ZL10036=m -CONFIG_DVB_ZL10039=m - -# -# DVB-T (terrestrial) frontends -# -CONFIG_DVB_CX22700=m -CONFIG_DVB_CX22702=m -CONFIG_DVB_CXD2820R=m -CONFIG_DVB_CXD2841ER=m -CONFIG_DVB_DIB3000MB=m -CONFIG_DVB_DIB3000MC=m -CONFIG_DVB_DIB7000M=m -CONFIG_DVB_DIB7000P=m -CONFIG_DVB_DIB9000=m -CONFIG_DVB_DRXD=m -CONFIG_DVB_EC100=m -CONFIG_DVB_GP8PSK_FE=m -CONFIG_DVB_L64781=m -CONFIG_DVB_MT352=m -CONFIG_DVB_NXT6000=m -CONFIG_DVB_S5H1432=m -CONFIG_DVB_SP887X=m -CONFIG_DVB_STV0367=m -CONFIG_DVB_TDA10048=m -CONFIG_DVB_TDA1004X=m -CONFIG_DVB_ZD1301_DEMOD=m -CONFIG_DVB_ZL10353=m -CONFIG_DVB_CXD2880=m - -# -# DVB-C (cable) frontends -# -CONFIG_DVB_STV0297=m -CONFIG_DVB_TDA10021=m -CONFIG_DVB_TDA10023=m -CONFIG_DVB_VES1820=m - -# -# ATSC (North American/Korean Terrestrial/Cable DTV) frontends -# -CONFIG_DVB_AU8522=m -CONFIG_DVB_AU8522_DTV=m -CONFIG_DVB_AU8522_V4L=m -CONFIG_DVB_BCM3510=m -CONFIG_DVB_LG2160=m -CONFIG_DVB_LGDT3305=m -CONFIG_DVB_LGDT330X=m -CONFIG_DVB_MXL692=m -CONFIG_DVB_NXT200X=m -CONFIG_DVB_OR51132=m -CONFIG_DVB_OR51211=m -CONFIG_DVB_S5H1409=m -CONFIG_DVB_S5H1411=m - -# -# ISDB-T (terrestrial) frontends -# -CONFIG_DVB_DIB8000=m -CONFIG_DVB_MB86A20S=m -CONFIG_DVB_S921=m - -# -# ISDB-S (satellite) & ISDB-T (terrestrial) frontends -# -CONFIG_DVB_MN88443X=m -CONFIG_DVB_TC90522=m - -# -# Digital terrestrial only tuners/PLL -# -CONFIG_DVB_PLL=m -CONFIG_DVB_TUNER_DIB0070=m -CONFIG_DVB_TUNER_DIB0090=m - -# -# SEC control devices for DVB-S -# -CONFIG_DVB_A8293=m -CONFIG_DVB_AF9033=m -CONFIG_DVB_ASCOT2E=m -CONFIG_DVB_ATBM8830=m -CONFIG_DVB_HELENE=m -CONFIG_DVB_HORUS3A=m -CONFIG_DVB_ISL6405=m -CONFIG_DVB_ISL6421=m -CONFIG_DVB_ISL6423=m -CONFIG_DVB_IX2505V=m -CONFIG_DVB_LGS8GL5=m -CONFIG_DVB_LGS8GXX=m -CONFIG_DVB_LNBH25=m -CONFIG_DVB_LNBH29=m -CONFIG_DVB_LNBP21=m -CONFIG_DVB_LNBP22=m -CONFIG_DVB_M88RS2000=m -CONFIG_DVB_TDA665x=m -CONFIG_DVB_DRX39XYJ=m - -# -# Common Interface (EN50221) controller drivers -# -CONFIG_DVB_CXD2099=m -CONFIG_DVB_SP2=m -# end of Customise DVB Frontends - -# -# Tools to develop new frontends -# -# CONFIG_DVB_DUMMY_FE is not set -# end of Media ancillary drivers - -# -# Graphics support -# -CONFIG_APERTURE_HELPERS=y -CONFIG_VIDEO_CMDLINE=y -CONFIG_VIDEO_NOMODESET=y -# CONFIG_AUXDISPLAY is not set -# CONFIG_PANEL is not set CONFIG_DRM=y -# CONFIG_DRM_DEBUG_MM is not set -CONFIG_DRM_KMS_HELPER=y -# CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS is not set -# CONFIG_DRM_DEBUG_MODESET_LOCK is not set -CONFIG_DRM_FBDEV_EMULATION=y -CONFIG_DRM_FBDEV_OVERALLOC=100 -# CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set CONFIG_DRM_LOAD_EDID_FIRMWARE=y -CONFIG_DRM_DISPLAY_HELPER=m -CONFIG_DRM_DISPLAY_DP_HELPER=y -CONFIG_DRM_DISPLAY_HDCP_HELPER=y -CONFIG_DRM_DISPLAY_HDMI_HELPER=y CONFIG_DRM_DP_AUX_CHARDEV=y CONFIG_DRM_DP_CEC=y -CONFIG_DRM_TTM=y -CONFIG_DRM_EXEC=m -CONFIG_DRM_BUDDY=m -CONFIG_DRM_VRAM_HELPER=m -CONFIG_DRM_TTM_HELPER=m -CONFIG_DRM_GEM_SHMEM_HELPER=y -CONFIG_DRM_SUBALLOC_HELPER=m -CONFIG_DRM_SCHED=m - -# -# I2C encoder or helper chips -# # CONFIG_DRM_I2C_CH7006 is not set # CONFIG_DRM_I2C_SIL164 is not set -# CONFIG_DRM_I2C_NXP_TDA998X is not set -# CONFIG_DRM_I2C_NXP_TDA9950 is not set -# end of I2C encoder or helper chips - -# -# ARM devices -# -# CONFIG_DRM_KOMEDA is not set -# end of ARM devices - CONFIG_DRM_RADEON=m CONFIG_DRM_RADEON_USERPTR=y CONFIG_DRM_AMDGPU=m CONFIG_DRM_AMDGPU_SI=y CONFIG_DRM_AMDGPU_CIK=y CONFIG_DRM_AMDGPU_USERPTR=y -# CONFIG_DRM_AMDGPU_WERROR is not set - -# -# ACP (Audio CoProcessor) Configuration -# -# CONFIG_DRM_AMD_ACP is not set -# end of ACP (Audio CoProcessor) Configuration - -# -# Display Engine Configuration -# -CONFIG_DRM_AMD_DC=y -CONFIG_DRM_AMD_DC_FP=y -# CONFIG_DRM_AMD_DC_SI is not set -# CONFIG_DRM_AMD_SECURE_DISPLAY is not set -# end of Display Engine Configuration - CONFIG_DRM_NOUVEAU=m -CONFIG_NOUVEAU_DEBUG=5 -CONFIG_NOUVEAU_DEBUG_DEFAULT=3 -# CONFIG_NOUVEAU_DEBUG_MMU is not set -# CONFIG_NOUVEAU_DEBUG_PUSH is not set -CONFIG_DRM_NOUVEAU_BACKLIGHT=y -# CONFIG_DRM_VGEM is not set CONFIG_DRM_VKMS=m CONFIG_DRM_UDL=m CONFIG_DRM_AST_LOONGSON=y CONFIG_DRM_MGAG200=m CONFIG_DRM_QXL=m CONFIG_DRM_VIRTIO_GPU=m -CONFIG_DRM_VIRTIO_GPU_KMS=y -CONFIG_DRM_PANEL=y - -# -# Display Panels -# -# CONFIG_DRM_PANEL_ABT_Y030XX067A is not set -# CONFIG_DRM_PANEL_ARM_VERSATILE is not set -# CONFIG_DRM_PANEL_AUO_A030JTN01 is not set -# CONFIG_DRM_PANEL_LVDS is not set -# CONFIG_DRM_PANEL_SIMPLE is not set -# CONFIG_DRM_PANEL_EDP is not set -# CONFIG_DRM_PANEL_ILITEK_IL9322 is not set -# CONFIG_DRM_PANEL_ILITEK_ILI9341 is not set -# CONFIG_DRM_PANEL_INNOLUX_EJ030NA is not set -# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set -# CONFIG_DRM_PANEL_LG_LB035Q02 is not set -# CONFIG_DRM_PANEL_LG_LG4573 is not set -# CONFIG_DRM_PANEL_NEC_NL8048HL11 is not set -# CONFIG_DRM_PANEL_NEWVISION_NV3052C is not set -# CONFIG_DRM_PANEL_NOVATEK_NT39016 is not set -# CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO is not set -# CONFIG_DRM_PANEL_ORISETECH_OTA5601A is not set -# CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20 is not set -# CONFIG_DRM_PANEL_SAMSUNG_DB7430 is not set -# CONFIG_DRM_PANEL_SAMSUNG_S6D27A1 is not set -# CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0 is not set -# CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 is not set -# CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 is not set -# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set -# CONFIG_DRM_PANEL_SEIKO_43WVF1G is not set -# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set -# CONFIG_DRM_PANEL_SONY_ACX565AKM is not set -# CONFIG_DRM_PANEL_TPO_TD028TTEC1 is not set -# CONFIG_DRM_PANEL_TPO_TPG110 is not set -# CONFIG_DRM_PANEL_WIDECHIPS_WS2401 is not set -# end of Display Panels - -CONFIG_DRM_BRIDGE=y -CONFIG_DRM_PANEL_BRIDGE=y - -# -# Display Interface Bridges -# -# CONFIG_DRM_CHIPONE_ICN6211 is not set -# CONFIG_DRM_CHRONTEL_CH7033 is not set -# CONFIG_DRM_DISPLAY_CONNECTOR is not set -# CONFIG_DRM_ITE_IT6505 is not set -# CONFIG_DRM_LONTIUM_LT8912B is not set -# CONFIG_DRM_LONTIUM_LT9211 is not set -# CONFIG_DRM_LONTIUM_LT9611 is not set -# CONFIG_DRM_LONTIUM_LT9611UXC is not set -# CONFIG_DRM_ITE_IT66121 is not set -# CONFIG_DRM_LVDS_CODEC is not set -# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set -# CONFIG_DRM_NWL_MIPI_DSI is not set -# CONFIG_DRM_NXP_PTN3460 is not set -# CONFIG_DRM_PARADE_PS8622 is not set -# CONFIG_DRM_PARADE_PS8640 is not set -# CONFIG_DRM_SAMSUNG_DSIM is not set -# CONFIG_DRM_SIL_SII8620 is not set -# CONFIG_DRM_SII902X is not set -# CONFIG_DRM_SII9234 is not set -# CONFIG_DRM_SIMPLE_BRIDGE is not set -# CONFIG_DRM_THINE_THC63LVD1024 is not set -# CONFIG_DRM_TOSHIBA_TC358762 is not set -# CONFIG_DRM_TOSHIBA_TC358764 is not set -# CONFIG_DRM_TOSHIBA_TC358767 is not set -# CONFIG_DRM_TOSHIBA_TC358768 is not set -# CONFIG_DRM_TOSHIBA_TC358775 is not set -# CONFIG_DRM_TI_DLPC3433 is not set -# CONFIG_DRM_TI_TFP410 is not set -# CONFIG_DRM_TI_SN65DSI83 is not set -# CONFIG_DRM_TI_SN65DSI86 is not set -# CONFIG_DRM_TI_TPD12S015 is not set -# CONFIG_DRM_ANALOGIX_ANX6345 is not set -# CONFIG_DRM_ANALOGIX_ANX78XX is not set -# CONFIG_DRM_ANALOGIX_ANX7625 is not set -# CONFIG_DRM_I2C_ADV7511 is not set -# CONFIG_DRM_CDNS_DSI is not set -# CONFIG_DRM_CDNS_MHDP8546 is not set -# end of Display Interface Bridges - CONFIG_DRM_LOONGSON=y -# CONFIG_DRM_ETNAVIV is not set -# CONFIG_DRM_LOGICVC is not set -# CONFIG_DRM_ARCPGU is not set CONFIG_DRM_BOCHS=m CONFIG_DRM_CIRRUS_QEMU=m -# CONFIG_DRM_GM12U320 is not set -# CONFIG_DRM_PANEL_MIPI_DBI is not set -# CONFIG_DRM_SIMPLEDRM is not set -# CONFIG_TINYDRM_HX8357D is not set -# CONFIG_TINYDRM_ILI9163 is not set -# CONFIG_TINYDRM_ILI9225 is not set -# CONFIG_TINYDRM_ILI9341 is not set -# CONFIG_TINYDRM_ILI9486 is not set -# CONFIG_TINYDRM_MI0283QT is not set -# CONFIG_TINYDRM_REPAPER is not set -# CONFIG_TINYDRM_ST7586 is not set -# CONFIG_TINYDRM_ST7735R is not set -# CONFIG_DRM_GUD is not set -# CONFIG_DRM_SSD130X is not set -# CONFIG_DRM_LEGACY is not set -CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y -# CONFIG_HYDCU_FIXUP_HEADER is not set CONFIG_DRM_INSPUR=m - -# -# Frame buffer Devices -# CONFIG_FB=y -# CONFIG_FB_CIRRUS is not set -# CONFIG_FB_PM2 is not set -# CONFIG_FB_CYBER2000 is not set -# CONFIG_FB_ASILIANT is not set -# CONFIG_FB_IMSTT is not set -# CONFIG_FB_UVESA is not set CONFIG_FB_EFI=y -# CONFIG_FB_OPENCORES is not set -# CONFIG_FB_S1D13XXX is not set -# CONFIG_FB_NVIDIA is not set -# CONFIG_FB_RIVA is not set -# CONFIG_FB_I740 is not set -# CONFIG_FB_MATROX is not set CONFIG_FB_RADEON=y -CONFIG_FB_RADEON_I2C=y -CONFIG_FB_RADEON_BACKLIGHT=y -# CONFIG_FB_RADEON_DEBUG is not set -# CONFIG_FB_ATY128 is not set -# CONFIG_FB_ATY is not set -# CONFIG_FB_S3 is not set -# CONFIG_FB_SAVAGE is not set -# CONFIG_FB_SIS is not set -# CONFIG_FB_NEOMAGIC is not set -# CONFIG_FB_KYRO is not set -# CONFIG_FB_3DFX is not set -# CONFIG_FB_VOODOO1 is not set -# CONFIG_FB_VT8623 is not set -# CONFIG_FB_TRIDENT is not set -# CONFIG_FB_ARK is not set -# CONFIG_FB_PM3 is not set -# CONFIG_FB_CARMINE is not set -# CONFIG_FB_SM501 is not set -# CONFIG_FB_SMSCUFX is not set -# CONFIG_FB_UDL is not set -# CONFIG_FB_IBM_GXT4500 is not set -# CONFIG_FB_VIRTUAL is not set -# CONFIG_FB_METRONOME is not set -# CONFIG_FB_MB862XX is not set -# CONFIG_FB_SIMPLE is not set -# CONFIG_FB_SSD1307 is not set -# CONFIG_FB_SM712 is not set CONFIG_FB_LS2K500=m -CONFIG_FB_CORE=y -CONFIG_FB_NOTIFY=y -# CONFIG_FIRMWARE_EDID is not set -CONFIG_FB_DEVICE=y -CONFIG_FB_DDC=y -CONFIG_FB_CFB_FILLRECT=y -CONFIG_FB_CFB_COPYAREA=y -CONFIG_FB_CFB_IMAGEBLIT=y -CONFIG_FB_SYS_FILLRECT=y -CONFIG_FB_SYS_COPYAREA=y -CONFIG_FB_SYS_IMAGEBLIT=y -# CONFIG_FB_FOREIGN_ENDIAN is not set -CONFIG_FB_SYS_FOPS=y -CONFIG_FB_DEFERRED_IO=y -CONFIG_FB_IOMEM_HELPERS=y -CONFIG_FB_SYSMEM_HELPERS=y -CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y -CONFIG_FB_BACKLIGHT=y -CONFIG_FB_MODE_HELPERS=y CONFIG_FB_TILEBLITTING=y -# end of Frame buffer Devices - -# -# Backlight & LCD device support -# CONFIG_LCD_CLASS_DEVICE=m -# CONFIG_LCD_L4F00242T03 is not set -# CONFIG_LCD_LMS283GF05 is not set -# CONFIG_LCD_LTV350QV is not set -# CONFIG_LCD_ILI922X is not set -# CONFIG_LCD_ILI9320 is not set -# CONFIG_LCD_TDO24M is not set -# CONFIG_LCD_VGG2432A4 is not set CONFIG_LCD_PLATFORM=m -# CONFIG_LCD_AMS369FG06 is not set -# CONFIG_LCD_LMS501KF03 is not set -# CONFIG_LCD_HX8357 is not set -# CONFIG_LCD_OTM3225A is not set -CONFIG_BACKLIGHT_CLASS_DEVICE=y -# CONFIG_BACKLIGHT_KTD253 is not set -# CONFIG_BACKLIGHT_KTZ8866 is not set -# CONFIG_BACKLIGHT_PWM is not set -# CONFIG_BACKLIGHT_QCOM_WLED is not set -# CONFIG_BACKLIGHT_ADP8860 is not set -# CONFIG_BACKLIGHT_ADP8870 is not set -# CONFIG_BACKLIGHT_LM3630A is not set -# CONFIG_BACKLIGHT_LM3639 is not set CONFIG_BACKLIGHT_LP855X=m -# CONFIG_BACKLIGHT_GPIO is not set -# CONFIG_BACKLIGHT_LV5207LP is not set -# CONFIG_BACKLIGHT_BD6107 is not set -# CONFIG_BACKLIGHT_ARCXCNN is not set -# CONFIG_BACKLIGHT_LED is not set -# end of Backlight & LCD device support - -CONFIG_HDMI=y - -# -# Console display driver support -# # CONFIG_VGA_CONSOLE is not set -CONFIG_DUMMY_CONSOLE=y -CONFIG_DUMMY_CONSOLE_COLUMNS=80 -CONFIG_DUMMY_CONSOLE_ROWS=25 -CONFIG_FRAMEBUFFER_CONSOLE=y -# CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION is not set -CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y -# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set -# end of Console display driver support - CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set -CONFIG_LOGO_LINUX_CLUT224=y -# end of Graphics support - -# CONFIG_DRM_ACCEL is not set CONFIG_SOUND=y -CONFIG_SOUND_OSS_CORE=y -CONFIG_SOUND_OSS_CORE_PRECLAIM=y CONFIG_SND=y -CONFIG_SND_TIMER=m -CONFIG_SND_PCM=m -CONFIG_SND_HWDEP=m -CONFIG_SND_SEQ_DEVICE=m -CONFIG_SND_RAWMIDI=m -CONFIG_SND_JACK=y -CONFIG_SND_JACK_INPUT_DEV=y CONFIG_SND_OSSEMUL=y -# CONFIG_SND_MIXER_OSS is not set -# CONFIG_SND_PCM_OSS is not set -CONFIG_SND_PCM_TIMER=y CONFIG_SND_HRTIMER=m -CONFIG_SND_DYNAMIC_MINORS=y -CONFIG_SND_MAX_CARDS=32 # CONFIG_SND_SUPPORT_OLD_API is not set -CONFIG_SND_PROC_FS=y -CONFIG_SND_VERBOSE_PROCFS=y -# CONFIG_SND_VERBOSE_PRINTK is not set -CONFIG_SND_CTL_FAST_LOOKUP=y -# CONFIG_SND_DEBUG is not set -# CONFIG_SND_CTL_INPUT_VALIDATION is not set -CONFIG_SND_VMASTER=y -CONFIG_SND_CTL_LED=m CONFIG_SND_SEQUENCER=m CONFIG_SND_SEQ_DUMMY=m CONFIG_SND_SEQUENCER_OSS=m -CONFIG_SND_SEQ_HRTIMER_DEFAULT=y -CONFIG_SND_SEQ_MIDI_EVENT=m -CONFIG_SND_SEQ_MIDI=m -CONFIG_SND_SEQ_MIDI_EMUL=m -CONFIG_SND_SEQ_VIRMIDI=m -# CONFIG_SND_SEQ_UMP is not set -CONFIG_SND_MPU401_UART=m -CONFIG_SND_OPL3_LIB=m -CONFIG_SND_OPL3_LIB_SEQ=m -CONFIG_SND_VX_LIB=m -CONFIG_SND_AC97_CODEC=m -CONFIG_SND_DRIVERS=y CONFIG_SND_DUMMY=m CONFIG_SND_ALOOP=m -# CONFIG_SND_PCMTEST is not set CONFIG_SND_VIRMIDI=m CONFIG_SND_MTPAV=m -# CONFIG_SND_MTS64 is not set -# CONFIG_SND_SERIAL_U16550 is not set CONFIG_SND_MPU401=m -# CONFIG_SND_PORTMAN2X4 is not set CONFIG_SND_AC97_POWER_SAVE=y CONFIG_SND_AC97_POWER_SAVE_DEFAULT=5 -CONFIG_SND_PCI=y CONFIG_SND_AD1889=m CONFIG_SND_ATIIXP=m CONFIG_SND_ATIIXP_MODEM=m CONFIG_SND_AU8810=m CONFIG_SND_AU8820=m CONFIG_SND_AU8830=m -# CONFIG_SND_AW2 is not set CONFIG_SND_BT87X=m CONFIG_SND_BT87X_OVERCLOCK=y CONFIG_SND_CA0106=m CONFIG_SND_CMIPCI=m -CONFIG_SND_OXYGEN_LIB=m CONFIG_SND_OXYGEN=m -# CONFIG_SND_CS4281 is not set CONFIG_SND_CS46XX=m -CONFIG_SND_CS46XX_NEW_DSP=y CONFIG_SND_CTXFI=m CONFIG_SND_DARLA20=m CONFIG_SND_GINA20=m @@ -5555,7 +1540,6 @@ CONFIG_SND_INDIGOIOX=m CONFIG_SND_INDIGODJX=m CONFIG_SND_ENS1370=m CONFIG_SND_ENS1371=m -# CONFIG_SND_FM801 is not set CONFIG_SND_HDSP=m CONFIG_SND_HDSPM=m CONFIG_SND_ICE1724=m @@ -5565,9 +1549,7 @@ CONFIG_SND_KORG1212=m CONFIG_SND_LOLA=m CONFIG_SND_LX6464ES=m CONFIG_SND_MIXART=m -# CONFIG_SND_NM256 is not set CONFIG_SND_PCXHR=m -# CONFIG_SND_RIPTIDE is not set CONFIG_SND_RME32=m CONFIG_SND_RME96=m CONFIG_SND_RME9652=m @@ -5575,68 +1557,35 @@ CONFIG_SND_VIA82XX=m CONFIG_SND_VIA82XX_MODEM=m CONFIG_SND_VIRTUOSO=m CONFIG_SND_VX222=m -# CONFIG_SND_YMFPCI is not set - -# -# HD-Audio -# -CONFIG_SND_HDA=m -CONFIG_SND_HDA_GENERIC_LEDS=y CONFIG_SND_HDA_INTEL=m CONFIG_SND_HDA_HWDEP=y -CONFIG_SND_HDA_RECONFIG=y CONFIG_SND_HDA_INPUT_BEEP=y CONFIG_SND_HDA_INPUT_BEEP_MODE=0 CONFIG_SND_HDA_PATCH_LOADER=y -# CONFIG_SND_HDA_SCODEC_CS35L41_I2C is not set -# CONFIG_SND_HDA_SCODEC_CS35L41_SPI is not set -# CONFIG_SND_HDA_SCODEC_CS35L56_I2C is not set -# CONFIG_SND_HDA_SCODEC_CS35L56_SPI is not set -# CONFIG_SND_HDA_SCODEC_TAS2781_I2C is not set CONFIG_SND_HDA_CODEC_REALTEK=m CONFIG_SND_HDA_CODEC_ANALOG=m CONFIG_SND_HDA_CODEC_SIGMATEL=m CONFIG_SND_HDA_CODEC_VIA=m CONFIG_SND_HDA_CODEC_HDMI=m CONFIG_SND_HDA_CODEC_CIRRUS=m -# CONFIG_SND_HDA_CODEC_CS8409 is not set CONFIG_SND_HDA_CODEC_CONEXANT=m CONFIG_SND_HDA_CODEC_CA0110=m CONFIG_SND_HDA_CODEC_CA0132=m -CONFIG_SND_HDA_CODEC_CA0132_DSP=y CONFIG_SND_HDA_CODEC_CMEDIA=m CONFIG_SND_HDA_CODEC_SI3054=m -CONFIG_SND_HDA_GENERIC=m -CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0 -# CONFIG_SND_HDA_INTEL_HDMI_SILENT_STREAM is not set -# CONFIG_SND_HDA_CTL_DEV_ID is not set -# end of HD-Audio - -CONFIG_SND_HDA_CORE=m -CONFIG_SND_HDA_DSP_LOADER=y -CONFIG_SND_HDA_COMPONENT=y CONFIG_SND_HDA_PREALLOC_SIZE=512 -CONFIG_SND_INTEL_NHLT=y -CONFIG_SND_INTEL_DSP_CONFIG=m -CONFIG_SND_INTEL_SOUNDWIRE_ACPI=m # CONFIG_SND_SPI is not set -CONFIG_SND_USB=y CONFIG_SND_USB_AUDIO=m -# CONFIG_SND_USB_AUDIO_MIDI_V2 is not set -CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER=y CONFIG_SND_USB_UA101=m CONFIG_SND_USB_CAIAQ=m CONFIG_SND_USB_CAIAQ_INPUT=y CONFIG_SND_USB_6FIRE=m CONFIG_SND_USB_HIFACE=m CONFIG_SND_BCD2000=m -CONFIG_SND_USB_LINE6=m CONFIG_SND_USB_POD=m CONFIG_SND_USB_PODHD=m CONFIG_SND_USB_TONEPORT=m CONFIG_SND_USB_VARIAX=m -CONFIG_SND_FIREWIRE=y -CONFIG_SND_FIREWIRE_LIB=m CONFIG_SND_DICE=m CONFIG_SND_OXFW=m CONFIG_SND_ISIGHT=m @@ -5647,306 +1596,36 @@ CONFIG_SND_FIREWIRE_TASCAM=m CONFIG_SND_FIREWIRE_MOTU=m CONFIG_SND_FIREFACE=m CONFIG_SND_SOC=m -# CONFIG_SND_SOC_ADI is not set -# CONFIG_SND_SOC_AMD_ACP is not set -# CONFIG_SND_AMD_ACP_CONFIG is not set -# CONFIG_SND_ATMEL_SOC is not set -# CONFIG_SND_BCM63XX_I2S_WHISTLER is not set -# CONFIG_SND_DESIGNWARE_I2S is not set - -# -# SoC Audio for Freescale CPUs -# - -# -# Common SoC Audio options for Freescale CPUs: -# -# CONFIG_SND_SOC_FSL_ASRC is not set -# CONFIG_SND_SOC_FSL_SAI is not set -# CONFIG_SND_SOC_FSL_AUDMIX is not set -# CONFIG_SND_SOC_FSL_SSI is not set -# CONFIG_SND_SOC_FSL_SPDIF is not set -# CONFIG_SND_SOC_FSL_ESAI is not set -# CONFIG_SND_SOC_FSL_MICFIL is not set -# CONFIG_SND_SOC_FSL_XCVR is not set -# CONFIG_SND_SOC_IMX_AUDMUX is not set -# end of SoC Audio for Freescale CPUs - -# CONFIG_SND_SOC_CHV3_I2S is not set -# CONFIG_SND_I2S_HI6210_I2S is not set - -# -# SoC Audio for Loongson CPUs -# -# CONFIG_SND_SOC_LOONGSON_I2S_PCI is not set -# CONFIG_SND_SOC_LOONGSON_CARD is not set -# end of SoC Audio for Loongson CPUs - -# CONFIG_SND_SOC_IMG is not set -# CONFIG_SND_SOC_MTK_BTCVSD is not set -# CONFIG_SND_SOC_SOF_TOPLEVEL is not set - -# -# STMicroelectronics STM32 SOC audio support -# -# end of STMicroelectronics STM32 SOC audio support - -# CONFIG_SND_SOC_XILINX_I2S is not set -# CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER is not set -# CONFIG_SND_SOC_XILINX_SPDIF is not set -# CONFIG_SND_SOC_XTFPGA_I2S is not set -CONFIG_SND_SOC_I2C_AND_SPI=m - -# -# CODEC drivers -# -# CONFIG_SND_SOC_AC97_CODEC is not set -# CONFIG_SND_SOC_ADAU1372_I2C is not set -# CONFIG_SND_SOC_ADAU1372_SPI is not set -# CONFIG_SND_SOC_ADAU1701 is not set -# CONFIG_SND_SOC_ADAU1761_I2C is not set -# CONFIG_SND_SOC_ADAU1761_SPI is not set -# CONFIG_SND_SOC_ADAU7002 is not set -# CONFIG_SND_SOC_ADAU7118_HW is not set -# CONFIG_SND_SOC_ADAU7118_I2C is not set -# CONFIG_SND_SOC_AK4104 is not set -# CONFIG_SND_SOC_AK4118 is not set -# CONFIG_SND_SOC_AK4375 is not set -# CONFIG_SND_SOC_AK4458 is not set -# CONFIG_SND_SOC_AK4554 is not set -# CONFIG_SND_SOC_AK4613 is not set -# CONFIG_SND_SOC_AK4642 is not set -# CONFIG_SND_SOC_AK5386 is not set -# CONFIG_SND_SOC_AK5558 is not set -# CONFIG_SND_SOC_ALC5623 is not set -# CONFIG_SND_SOC_AUDIO_IIO_AUX is not set -# CONFIG_SND_SOC_AW8738 is not set -# CONFIG_SND_SOC_AW88395 is not set -# CONFIG_SND_SOC_AW88261 is not set -# CONFIG_SND_SOC_BD28623 is not set -# CONFIG_SND_SOC_BT_SCO is not set -# CONFIG_SND_SOC_CHV3_CODEC is not set -# CONFIG_SND_SOC_CS35L32 is not set -# CONFIG_SND_SOC_CS35L33 is not set -# CONFIG_SND_SOC_CS35L34 is not set -# CONFIG_SND_SOC_CS35L35 is not set -# CONFIG_SND_SOC_CS35L36 is not set -# CONFIG_SND_SOC_CS35L41_SPI is not set -# CONFIG_SND_SOC_CS35L41_I2C is not set -# CONFIG_SND_SOC_CS35L45_SPI is not set -# CONFIG_SND_SOC_CS35L45_I2C is not set -# CONFIG_SND_SOC_CS35L56_I2C is not set -# CONFIG_SND_SOC_CS35L56_SPI is not set -# CONFIG_SND_SOC_CS42L42 is not set -# CONFIG_SND_SOC_CS42L51_I2C is not set -# CONFIG_SND_SOC_CS42L52 is not set -# CONFIG_SND_SOC_CS42L56 is not set -# CONFIG_SND_SOC_CS42L73 is not set -# CONFIG_SND_SOC_CS42L83 is not set -# CONFIG_SND_SOC_CS4234 is not set -# CONFIG_SND_SOC_CS4265 is not set -# CONFIG_SND_SOC_CS4270 is not set -# CONFIG_SND_SOC_CS4271_I2C is not set -# CONFIG_SND_SOC_CS4271_SPI is not set -# CONFIG_SND_SOC_CS42XX8_I2C is not set -# CONFIG_SND_SOC_CS43130 is not set -# CONFIG_SND_SOC_CS4341 is not set -# CONFIG_SND_SOC_CS4349 is not set -# CONFIG_SND_SOC_CS53L30 is not set -# CONFIG_SND_SOC_CX2072X is not set -# CONFIG_SND_SOC_DA7213 is not set -# CONFIG_SND_SOC_DMIC is not set -# CONFIG_SND_SOC_ES7134 is not set -# CONFIG_SND_SOC_ES7241 is not set -# CONFIG_SND_SOC_ES8316 is not set -# CONFIG_SND_SOC_ES8326 is not set -# CONFIG_SND_SOC_ES8328_I2C is not set -# CONFIG_SND_SOC_ES8328_SPI is not set -# CONFIG_SND_SOC_GTM601 is not set -# CONFIG_SND_SOC_HDA is not set -# CONFIG_SND_SOC_ICS43432 is not set -# CONFIG_SND_SOC_IDT821034 is not set -# CONFIG_SND_SOC_INNO_RK3036 is not set -# CONFIG_SND_SOC_MAX98088 is not set -# CONFIG_SND_SOC_MAX98090 is not set -# CONFIG_SND_SOC_MAX98357A is not set -# CONFIG_SND_SOC_MAX98504 is not set -# CONFIG_SND_SOC_MAX9867 is not set -# CONFIG_SND_SOC_MAX98927 is not set -# CONFIG_SND_SOC_MAX98520 is not set -# CONFIG_SND_SOC_MAX98373_I2C is not set -# CONFIG_SND_SOC_MAX98388 is not set -# CONFIG_SND_SOC_MAX98390 is not set -# CONFIG_SND_SOC_MAX98396 is not set -# CONFIG_SND_SOC_MAX9860 is not set -# CONFIG_SND_SOC_MSM8916_WCD_DIGITAL is not set -# CONFIG_SND_SOC_PCM1681 is not set -# CONFIG_SND_SOC_PCM1789_I2C is not set -# CONFIG_SND_SOC_PCM179X_I2C is not set -# CONFIG_SND_SOC_PCM179X_SPI is not set -# CONFIG_SND_SOC_PCM186X_I2C is not set -# CONFIG_SND_SOC_PCM186X_SPI is not set -# CONFIG_SND_SOC_PCM3060_I2C is not set -# CONFIG_SND_SOC_PCM3060_SPI is not set -# CONFIG_SND_SOC_PCM3168A_I2C is not set -# CONFIG_SND_SOC_PCM3168A_SPI is not set -# CONFIG_SND_SOC_PCM5102A is not set -# CONFIG_SND_SOC_PCM512x_I2C is not set -# CONFIG_SND_SOC_PCM512x_SPI is not set -# CONFIG_SND_SOC_PEB2466 is not set -# CONFIG_SND_SOC_RK3328 is not set -# CONFIG_SND_SOC_RT5616 is not set -# CONFIG_SND_SOC_RT5631 is not set -# CONFIG_SND_SOC_RT5640 is not set -# CONFIG_SND_SOC_RT5659 is not set -# CONFIG_SND_SOC_RT9120 is not set -# CONFIG_SND_SOC_SGTL5000 is not set -# CONFIG_SND_SOC_SIMPLE_AMPLIFIER is not set -# CONFIG_SND_SOC_SIMPLE_MUX is not set -# CONFIG_SND_SOC_SMA1303 is not set -# CONFIG_SND_SOC_SPDIF is not set -# CONFIG_SND_SOC_SRC4XXX_I2C is not set -# CONFIG_SND_SOC_SSM2305 is not set -# CONFIG_SND_SOC_SSM2518 is not set -# CONFIG_SND_SOC_SSM2602_SPI is not set -# CONFIG_SND_SOC_SSM2602_I2C is not set -# CONFIG_SND_SOC_SSM3515 is not set -# CONFIG_SND_SOC_SSM4567 is not set -# CONFIG_SND_SOC_STA32X is not set -# CONFIG_SND_SOC_STA350 is not set -# CONFIG_SND_SOC_STI_SAS is not set -# CONFIG_SND_SOC_TAS2552 is not set -# CONFIG_SND_SOC_TAS2562 is not set -# CONFIG_SND_SOC_TAS2764 is not set -# CONFIG_SND_SOC_TAS2770 is not set -# CONFIG_SND_SOC_TAS2780 is not set -# CONFIG_SND_SOC_TAS2781_I2C is not set -# CONFIG_SND_SOC_TAS5086 is not set -# CONFIG_SND_SOC_TAS571X is not set -# CONFIG_SND_SOC_TAS5720 is not set -# CONFIG_SND_SOC_TAS5805M is not set -# CONFIG_SND_SOC_TAS6424 is not set -# CONFIG_SND_SOC_TDA7419 is not set -# CONFIG_SND_SOC_TFA9879 is not set -# CONFIG_SND_SOC_TFA989X is not set -# CONFIG_SND_SOC_TLV320ADC3XXX is not set -# CONFIG_SND_SOC_TLV320AIC23_I2C is not set -# CONFIG_SND_SOC_TLV320AIC23_SPI is not set -# CONFIG_SND_SOC_TLV320AIC31XX is not set -# CONFIG_SND_SOC_TLV320AIC32X4_I2C is not set -# CONFIG_SND_SOC_TLV320AIC32X4_SPI is not set -# CONFIG_SND_SOC_TLV320AIC3X_I2C is not set -# CONFIG_SND_SOC_TLV320AIC3X_SPI is not set -# CONFIG_SND_SOC_TLV320ADCX140 is not set -# CONFIG_SND_SOC_TS3A227E is not set -# CONFIG_SND_SOC_TSCS42XX is not set -# CONFIG_SND_SOC_TSCS454 is not set -# CONFIG_SND_SOC_UDA1334 is not set -# CONFIG_SND_SOC_WM8510 is not set -# CONFIG_SND_SOC_WM8523 is not set -# CONFIG_SND_SOC_WM8524 is not set -# CONFIG_SND_SOC_WM8580 is not set -# CONFIG_SND_SOC_WM8711 is not set -# CONFIG_SND_SOC_WM8728 is not set -# CONFIG_SND_SOC_WM8731_I2C is not set -# CONFIG_SND_SOC_WM8731_SPI is not set -# CONFIG_SND_SOC_WM8737 is not set -# CONFIG_SND_SOC_WM8741 is not set -# CONFIG_SND_SOC_WM8750 is not set -# CONFIG_SND_SOC_WM8753 is not set -# CONFIG_SND_SOC_WM8770 is not set -# CONFIG_SND_SOC_WM8776 is not set -# CONFIG_SND_SOC_WM8782 is not set -# CONFIG_SND_SOC_WM8804_I2C is not set -# CONFIG_SND_SOC_WM8804_SPI is not set -# CONFIG_SND_SOC_WM8903 is not set -# CONFIG_SND_SOC_WM8904 is not set -# CONFIG_SND_SOC_WM8940 is not set -# CONFIG_SND_SOC_WM8960 is not set -# CONFIG_SND_SOC_WM8961 is not set -# CONFIG_SND_SOC_WM8962 is not set -# CONFIG_SND_SOC_WM8974 is not set -# CONFIG_SND_SOC_WM8978 is not set -# CONFIG_SND_SOC_WM8985 is not set -# CONFIG_SND_SOC_ZL38060 is not set -# CONFIG_SND_SOC_MAX9759 is not set -# CONFIG_SND_SOC_MT6351 is not set -# CONFIG_SND_SOC_MT6358 is not set -# CONFIG_SND_SOC_MT6660 is not set -# CONFIG_SND_SOC_NAU8315 is not set -# CONFIG_SND_SOC_NAU8540 is not set -# CONFIG_SND_SOC_NAU8810 is not set -# CONFIG_SND_SOC_NAU8821 is not set -# CONFIG_SND_SOC_NAU8822 is not set -# CONFIG_SND_SOC_NAU8824 is not set -# CONFIG_SND_SOC_TPA6130A2 is not set -# CONFIG_SND_SOC_LPASS_WSA_MACRO is not set -# CONFIG_SND_SOC_LPASS_VA_MACRO is not set -# CONFIG_SND_SOC_LPASS_RX_MACRO is not set -# CONFIG_SND_SOC_LPASS_TX_MACRO is not set -# end of CODEC drivers - -# CONFIG_SND_SIMPLE_CARD is not set -# CONFIG_SND_AUDIO_GRAPH_CARD is not set -# CONFIG_SND_AUDIO_GRAPH_CARD2 is not set -# CONFIG_SND_TEST_COMPONENT is not set -# CONFIG_SND_VIRTIO is not set -CONFIG_AC97_BUS=m -CONFIG_HID_SUPPORT=y -CONFIG_HID=y CONFIG_HID_BATTERY_STRENGTH=y CONFIG_HIDRAW=y CONFIG_UHID=m -CONFIG_HID_GENERIC=y - -# -# Special HID drivers -# CONFIG_HID_A4TECH=m -# CONFIG_HID_ACCUTOUCH is not set CONFIG_HID_ACRUX=m -# CONFIG_HID_ACRUX_FF is not set CONFIG_HID_APPLE=m CONFIG_HID_APPLEIR=m CONFIG_HID_ASUS=m CONFIG_HID_AUREAL=m CONFIG_HID_BELKIN=m CONFIG_HID_BETOP_FF=m -# CONFIG_HID_BIGBEN_FF is not set CONFIG_HID_CHERRY=m CONFIG_HID_CHICONY=m CONFIG_HID_CORSAIR=m -# CONFIG_HID_COUGAR is not set -# CONFIG_HID_MACALLY is not set CONFIG_HID_PRODIKEYS=m CONFIG_HID_CMEDIA=m -# CONFIG_HID_CP2112 is not set -# CONFIG_HID_CREATIVE_SB0540 is not set CONFIG_HID_CYPRESS=m CONFIG_HID_DRAGONRISE=m -# CONFIG_DRAGONRISE_FF is not set -# CONFIG_HID_EMS_FF is not set CONFIG_HID_ELAN=m CONFIG_HID_ELECOM=m CONFIG_HID_ELO=m -# CONFIG_HID_EVISION is not set CONFIG_HID_EZKEY=m -# CONFIG_HID_FT260 is not set CONFIG_HID_GEMBIRD=m CONFIG_HID_GFRM=m -# CONFIG_HID_GLORIOUS is not set CONFIG_HID_HOLTEK=m -# CONFIG_HOLTEK_FF is not set -# CONFIG_HID_GOOGLE_STADIA_FF is not set -# CONFIG_HID_VIVALDI is not set CONFIG_HID_GT683R=m CONFIG_HID_KEYTOUCH=m CONFIG_HID_KYE=m CONFIG_HID_UCLOGIC=m CONFIG_HID_WALTOP=m -# CONFIG_HID_VIEWSONIC is not set -# CONFIG_HID_VRC2 is not set -# CONFIG_HID_XIAOMI is not set CONFIG_HID_GYRATION=m CONFIG_HID_ICADE=m CONFIG_HID_ITE=m @@ -5954,169 +1633,69 @@ CONFIG_HID_JABRA=m CONFIG_HID_TWINHAN=m CONFIG_HID_KENSINGTON=m CONFIG_HID_LCPOWER=m -CONFIG_HID_LED=m CONFIG_HID_LENOVO=m -# CONFIG_HID_LETSKETCH is not set CONFIG_HID_LOGITECH=m CONFIG_HID_LOGITECH_DJ=m -CONFIG_HID_LOGITECH_HIDPP=m CONFIG_LOGITECH_FF=y CONFIG_LOGIRUMBLEPAD2_FF=y CONFIG_LOGIG940_FF=y -CONFIG_LOGIWHEELS_FF=y CONFIG_HID_MAGICMOUSE=y -# CONFIG_HID_MALTRON is not set -# CONFIG_HID_MAYFLASH is not set -# CONFIG_HID_MEGAWORLD_FF is not set -# CONFIG_HID_REDRAGON is not set CONFIG_HID_MICROSOFT=m CONFIG_HID_MONTEREY=m CONFIG_HID_MULTITOUCH=m -# CONFIG_HID_NINTENDO is not set CONFIG_HID_NTI=m CONFIG_HID_NTRIG=y -# CONFIG_HID_NVIDIA_SHIELD is not set CONFIG_HID_ORTEK=m CONFIG_HID_PANTHERLORD=m -# CONFIG_PANTHERLORD_FF is not set CONFIG_HID_PENMOUNT=m CONFIG_HID_PETALYNX=m CONFIG_HID_PICOLCD=m -# CONFIG_HID_PICOLCD_FB is not set -# CONFIG_HID_PICOLCD_BACKLIGHT is not set -# CONFIG_HID_PICOLCD_LCD is not set -# CONFIG_HID_PICOLCD_LEDS is not set -# CONFIG_HID_PICOLCD_CIR is not set CONFIG_HID_PLANTRONICS=m -# CONFIG_HID_PXRC is not set -# CONFIG_HID_RAZER is not set CONFIG_HID_PRIMAX=m -# CONFIG_HID_RETRODE is not set CONFIG_HID_ROCCAT=m CONFIG_HID_SAITEK=m CONFIG_HID_SAMSUNG=m -# CONFIG_HID_SEMITEK is not set -# CONFIG_HID_SIGMAMICRO is not set CONFIG_HID_SONY=m CONFIG_SONY_FF=y CONFIG_HID_SPEEDLINK=m -# CONFIG_HID_STEAM is not set CONFIG_HID_STEELSERIES=m CONFIG_HID_SUNPLUS=m CONFIG_HID_RMI=m CONFIG_HID_GREENASIA=m -# CONFIG_GREENASIA_FF is not set CONFIG_HID_SMARTJOYPLUS=m -# CONFIG_SMARTJOYPLUS_FF is not set CONFIG_HID_TIVO=m CONFIG_HID_TOPSEED=m -# CONFIG_HID_TOPRE is not set CONFIG_HID_THINGM=m CONFIG_HID_THRUSTMASTER=m -# CONFIG_THRUSTMASTER_FF is not set -# CONFIG_HID_UDRAW_PS3 is not set -# CONFIG_HID_U2FZERO is not set CONFIG_HID_WACOM=m CONFIG_HID_WIIMOTE=m CONFIG_HID_XINMO=m CONFIG_HID_ZEROPLUS=m -# CONFIG_ZEROPLUS_FF is not set CONFIG_HID_ZYDACRON=m CONFIG_HID_SENSOR_HUB=y CONFIG_HID_SENSOR_CUSTOM_SENSOR=m CONFIG_HID_ALPS=m -# CONFIG_HID_MCP2221 is not set -# end of Special HID drivers - -# -# HID-BPF support -# -# CONFIG_HID_BPF is not set -# end of HID-BPF support - -# -# USB HID support -# -CONFIG_USB_HID=y CONFIG_HID_PID=y CONFIG_USB_HIDDEV=y -# end of USB HID support - CONFIG_I2C_HID=m -# CONFIG_I2C_HID_ACPI is not set -# CONFIG_I2C_HID_OF is not set -# CONFIG_I2C_HID_OF_ELAN is not set -# CONFIG_I2C_HID_OF_GOODIX is not set -CONFIG_USB_OHCI_LITTLE_ENDIAN=y -CONFIG_USB_SUPPORT=y -CONFIG_USB_COMMON=y CONFIG_USB_LED_TRIG=y -# CONFIG_USB_ULPI_BUS is not set -# CONFIG_USB_CONN_GPIO is not set -CONFIG_USB_ARCH_HAS_HCD=y CONFIG_USB=y -CONFIG_USB_PCI=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y - -# -# Miscellaneous USB options -# -CONFIG_USB_DEFAULT_PERSIST=y -# CONFIG_USB_FEW_INIT_RETRIES is not set -# CONFIG_USB_DYNAMIC_MINORS is not set -# CONFIG_USB_OTG is not set -# CONFIG_USB_OTG_PRODUCTLIST is not set -# CONFIG_USB_OTG_DISABLE_EXTERNAL_HUB is not set CONFIG_USB_LEDS_TRIGGER_USBPORT=m -CONFIG_USB_AUTOSUSPEND_DELAY=2 CONFIG_USB_MON=y - -# -# USB Host Controller Drivers -# -# CONFIG_USB_C67X00_HCD is not set CONFIG_USB_XHCI_HCD=y CONFIG_USB_XHCI_DBGCAP=y -CONFIG_USB_XHCI_PCI=y -# CONFIG_USB_XHCI_PCI_RENESAS is not set CONFIG_USB_XHCI_PLATFORM=m CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_ROOT_HUB_TT=y -CONFIG_USB_EHCI_TT_NEWSCHED=y -CONFIG_USB_EHCI_PCI=y -# CONFIG_USB_EHCI_FSL is not set CONFIG_USB_EHCI_HCD_PLATFORM=y -# CONFIG_USB_OXU210HP_HCD is not set -# CONFIG_USB_ISP116X_HCD is not set -# CONFIG_USB_MAX3421_HCD is not set CONFIG_USB_OHCI_HCD=y -CONFIG_USB_OHCI_HCD_PCI=y CONFIG_USB_OHCI_HCD_PLATFORM=y CONFIG_USB_UHCI_HCD=y -# CONFIG_USB_SL811_HCD is not set -# CONFIG_USB_R8A66597_HCD is not set -# CONFIG_USB_HCD_BCMA is not set -# CONFIG_USB_HCD_TEST_MODE is not set - -# -# USB Device Class drivers -# -CONFIG_USB_ACM=m CONFIG_USB_PRINTER=m -CONFIG_USB_WDM=m CONFIG_USB_TMC=m - -# -# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may -# - -# -# also be needed; see USB_STORAGE Help for more info -# CONFIG_USB_STORAGE=m -# CONFIG_USB_STORAGE_DEBUG is not set CONFIG_USB_STORAGE_REALTEK=m -CONFIG_REALTEK_AUTOPM=y CONFIG_USB_STORAGE_DATAFAB=m CONFIG_USB_STORAGE_FREECOM=m CONFIG_USB_STORAGE_ISD200=m @@ -6130,40 +1709,12 @@ CONFIG_USB_STORAGE_KARMA=m CONFIG_USB_STORAGE_CYPRESS_ATACB=m CONFIG_USB_STORAGE_ENE_UB6250=m CONFIG_USB_UAS=m - -# -# USB Imaging devices -# CONFIG_USB_MDC800=m CONFIG_USB_MICROTEK=m -# CONFIG_USBIP_CORE is not set - -# -# USB dual-mode controller drivers -# -# CONFIG_USB_CDNS_SUPPORT is not set -# CONFIG_USB_MUSB_HDRC is not set -# CONFIG_USB_DWC3 is not set CONFIG_USB_DWC2=y CONFIG_USB_DWC2_HOST=y - -# -# Gadget/Dual-role mode requires USB Gadget support to be enabled -# -# CONFIG_USB_DWC2_PERIPHERAL is not set -# CONFIG_USB_DWC2_DUAL_ROLE is not set -# CONFIG_USB_DWC2_PCI is not set -# CONFIG_USB_DWC2_DEBUG is not set -# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set -# CONFIG_USB_CHIPIDEA is not set -# CONFIG_USB_ISP1760 is not set - -# -# USB port drivers -# CONFIG_USB_SERIAL=m CONFIG_USB_SERIAL_GENERIC=y -# CONFIG_USB_SERIAL_SIMPLE is not set CONFIG_USB_SERIAL_AIRCABLE=m CONFIG_USB_SERIAL_ARK3116=m CONFIG_USB_SERIAL_BELKIN=m @@ -6179,7 +1730,6 @@ CONFIG_USB_SERIAL_IPAQ=m CONFIG_USB_SERIAL_IR=m CONFIG_USB_SERIAL_EDGEPORT=m CONFIG_USB_SERIAL_EDGEPORT_TI=m -# CONFIG_USB_SERIAL_F81232 is not set CONFIG_USB_SERIAL_F8153X=m CONFIG_USB_SERIAL_GARMIN=m CONFIG_USB_SERIAL_IPW=m @@ -6189,7 +1739,6 @@ CONFIG_USB_SERIAL_KEYSPAN=m CONFIG_USB_SERIAL_KLSI=m CONFIG_USB_SERIAL_KOBIL_SCT=m CONFIG_USB_SERIAL_MCT_U232=m -# CONFIG_USB_SERIAL_METRO is not set CONFIG_USB_SERIAL_MOS7720=m CONFIG_USB_SERIAL_MOS7715_PARPORT=y CONFIG_USB_SERIAL_MOS7840=m @@ -6206,21 +1755,14 @@ CONFIG_USB_SERIAL_SIERRAWIRELESS=m CONFIG_USB_SERIAL_SYMBOL=m CONFIG_USB_SERIAL_TI=m CONFIG_USB_SERIAL_CYBERJACK=m -CONFIG_USB_SERIAL_WWAN=m CONFIG_USB_SERIAL_OPTION=m CONFIG_USB_SERIAL_OMNINET=m CONFIG_USB_SERIAL_OPTICON=m CONFIG_USB_SERIAL_XSENS_MT=m -# CONFIG_USB_SERIAL_WISHBONE is not set CONFIG_USB_SERIAL_SSU100=m CONFIG_USB_SERIAL_QT2=m CONFIG_USB_SERIAL_UPD78F0730=m -# CONFIG_USB_SERIAL_XR is not set CONFIG_USB_SERIAL_DEBUG=m - -# -# USB Miscellaneous drivers -# CONFIG_USB_USS720=m CONFIG_USB_EMI62=m CONFIG_USB_EMI26=m @@ -6228,541 +1770,137 @@ CONFIG_USB_ADUTUX=m CONFIG_USB_SEVSEG=m CONFIG_USB_LEGOTOWER=m CONFIG_USB_LCD=m -# CONFIG_USB_CYPRESS_CY7C63 is not set -# CONFIG_USB_CYTHERM is not set CONFIG_USB_IDMOUSE=m CONFIG_USB_APPLEDISPLAY=m -# CONFIG_APPLE_MFI_FASTCHARGE is not set CONFIG_USB_SISUSBVGA=m CONFIG_USB_LD=m -# CONFIG_USB_TRANCEVIBRATOR is not set CONFIG_USB_IOWARRIOR=m -# CONFIG_USB_TEST is not set -# CONFIG_USB_EHSET_TEST_FIXTURE is not set CONFIG_USB_ISIGHTFW=m -# CONFIG_USB_YUREX is not set -CONFIG_USB_EZUSB_FX2=m -# CONFIG_USB_HUB_USB251XB is not set CONFIG_USB_HSIC_USB3503=m -# CONFIG_USB_HSIC_USB4604 is not set -# CONFIG_USB_LINK_LAYER_TEST is not set -# CONFIG_USB_CHAOSKEY is not set -# CONFIG_USB_ONBOARD_HUB is not set CONFIG_USB_ATM=m CONFIG_USB_SPEEDTOUCH=m CONFIG_USB_CXACRU=m CONFIG_USB_UEAGLEATM=m CONFIG_USB_XUSBATM=m - -# -# USB Physical Layer drivers -# -# CONFIG_NOP_USB_XCEIV is not set -# CONFIG_USB_GPIO_VBUS is not set -# CONFIG_USB_ISP1301 is not set -# end of USB Physical Layer drivers - CONFIG_USB_GADGET=y -# CONFIG_USB_GADGET_DEBUG is not set -# CONFIG_USB_GADGET_DEBUG_FILES is not set -# CONFIG_USB_GADGET_DEBUG_FS is not set -CONFIG_USB_GADGET_VBUS_DRAW=2 -CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2 - -# -# USB Peripheral Controller -# -# CONFIG_USB_GR_UDC is not set -# CONFIG_USB_R8A66597 is not set -# CONFIG_USB_PXA27X is not set -# CONFIG_USB_MV_UDC is not set -# CONFIG_USB_MV_U3D is not set -# CONFIG_USB_SNP_UDC_PLAT is not set -# CONFIG_USB_M66592 is not set -# CONFIG_USB_BDC_UDC is not set -# CONFIG_USB_AMD5536UDC is not set -# CONFIG_USB_NET2272 is not set -# CONFIG_USB_NET2280 is not set -# CONFIG_USB_GOKU is not set -# CONFIG_USB_EG20T is not set -# CONFIG_USB_GADGET_XILINX is not set -# CONFIG_USB_MAX3420_UDC is not set -# CONFIG_USB_CDNS2_UDC is not set -# CONFIG_USB_DUMMY_HCD is not set -# end of USB Peripheral Controller - -# CONFIG_USB_CONFIGFS is not set - -# -# USB Gadget precomposed configurations -# -# CONFIG_USB_ZERO is not set -# CONFIG_USB_AUDIO is not set -# CONFIG_USB_ETH is not set -# CONFIG_USB_G_NCM is not set -# CONFIG_USB_GADGETFS is not set -# CONFIG_USB_FUNCTIONFS is not set -# CONFIG_USB_MASS_STORAGE is not set -# CONFIG_USB_GADGET_TARGET is not set -# CONFIG_USB_G_SERIAL is not set -# CONFIG_USB_MIDI_GADGET is not set -# CONFIG_USB_G_PRINTER is not set -# CONFIG_USB_CDC_COMPOSITE is not set -# CONFIG_USB_G_NOKIA is not set -# CONFIG_USB_G_ACM_MS is not set -# CONFIG_USB_G_MULTI is not set -# CONFIG_USB_G_HID is not set -# CONFIG_USB_G_DBGP is not set -# CONFIG_USB_G_WEBCAM is not set -# CONFIG_USB_RAW_GADGET is not set -# end of USB Gadget precomposed configurations - CONFIG_TYPEC=m CONFIG_TYPEC_TCPM=m CONFIG_TYPEC_TCPCI=m CONFIG_TYPEC_RT1711H=m -# CONFIG_TYPEC_TCPCI_MAXIM is not set CONFIG_TYPEC_FUSB302=m CONFIG_TYPEC_UCSI=m -# CONFIG_UCSI_CCG is not set CONFIG_UCSI_ACPI=m -# CONFIG_UCSI_STM32G0 is not set CONFIG_TYPEC_TPS6598X=m -# CONFIG_TYPEC_ANX7411 is not set -# CONFIG_TYPEC_RT1719 is not set -# CONFIG_TYPEC_HD3SS3220 is not set -# CONFIG_TYPEC_STUSB160X is not set -# CONFIG_TYPEC_WUSB3801 is not set - -# -# USB Type-C Multiplexer/DeMultiplexer Switch support -# -# CONFIG_TYPEC_MUX_FSA4480 is not set -# CONFIG_TYPEC_MUX_GPIO_SBU is not set CONFIG_TYPEC_MUX_PI3USB30532=m -# CONFIG_TYPEC_MUX_NB7VPQ904M is not set -# end of USB Type-C Multiplexer/DeMultiplexer Switch support - -# -# USB Type-C Alternate Mode drivers -# CONFIG_TYPEC_DP_ALTMODE=m -# CONFIG_TYPEC_NVIDIA_ALTMODE is not set -# end of USB Type-C Alternate Mode drivers - -CONFIG_USB_ROLE_SWITCH=y CONFIG_MMC=m -CONFIG_PWRSEQ_EMMC=m -# CONFIG_PWRSEQ_SD8787 is not set -CONFIG_PWRSEQ_SIMPLE=m -CONFIG_MMC_BLOCK=m -CONFIG_MMC_BLOCK_MINORS=8 CONFIG_SDIO_UART=m -# CONFIG_MMC_TEST is not set - -# -# MMC/SD/SDIO Host Controller Drivers -# -# CONFIG_MMC_DEBUG is not set CONFIG_MMC_SDHCI=m -CONFIG_MMC_SDHCI_IO_ACCESSORS=y CONFIG_MMC_SDHCI_PCI=m -CONFIG_MMC_RICOH_MMC=y CONFIG_MMC_SDHCI_ACPI=m CONFIG_MMC_SDHCI_PLTFM=m -# CONFIG_MMC_SDHCI_OF_ARASAN is not set -# CONFIG_MMC_SDHCI_OF_AT91 is not set -# CONFIG_MMC_SDHCI_OF_DWCMSHC is not set -# CONFIG_MMC_SDHCI_CADENCE is not set -# CONFIG_MMC_SDHCI_F_SDH30 is not set -# CONFIG_MMC_SDHCI_MILBEAUT is not set CONFIG_MMC_TIFM_SD=m -# CONFIG_MMC_SPI is not set CONFIG_MMC_CB710=m CONFIG_MMC_VIA_SDMMC=m CONFIG_MMC_VUB300=m CONFIG_MMC_USHC=m -# CONFIG_MMC_USDHI6ROL0 is not set CONFIG_MMC_REALTEK_PCI=m CONFIG_MMC_REALTEK_USB=m -CONFIG_MMC_CQHCI=m -# CONFIG_MMC_HSQ is not set -# CONFIG_MMC_TOSHIBA_PCI is not set -# CONFIG_MMC_MTK is not set CONFIG_MMC_SDHCI_XENON=m -# CONFIG_MMC_SDHCI_OMAP is not set -# CONFIG_MMC_SDHCI_AM654 is not set -# CONFIG_SCSI_UFSHCD is not set CONFIG_MEMSTICK=m -# CONFIG_MEMSTICK_DEBUG is not set - -# -# MemoryStick drivers -# -# CONFIG_MEMSTICK_UNSAFE_RESUME is not set CONFIG_MSPRO_BLOCK=m -# CONFIG_MS_BLOCK is not set - -# -# MemoryStick Host Controller Drivers -# CONFIG_MEMSTICK_TIFM_MS=m CONFIG_MEMSTICK_JMICRON_38X=m CONFIG_MEMSTICK_R592=m CONFIG_MEMSTICK_REALTEK_PCI=m CONFIG_MEMSTICK_REALTEK_USB=m -CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y -# CONFIG_LEDS_CLASS_FLASH is not set -# CONFIG_LEDS_CLASS_MULTICOLOR is not set -# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set - -# -# LED drivers -# -# CONFIG_LEDS_AN30259A is not set -# CONFIG_LEDS_AW200XX is not set -# CONFIG_LEDS_AW2013 is not set -# CONFIG_LEDS_BCM6328 is not set -# CONFIG_LEDS_BCM6358 is not set -# CONFIG_LEDS_CR0014114 is not set -# CONFIG_LEDS_EL15203000 is not set CONFIG_LEDS_LM3530=m -# CONFIG_LEDS_LM3532 is not set -# CONFIG_LEDS_LM3642 is not set -# CONFIG_LEDS_LM3692X is not set -# CONFIG_LEDS_PCA9532 is not set -# CONFIG_LEDS_GPIO is not set CONFIG_LEDS_LP3944=m -# CONFIG_LEDS_LP3952 is not set -# CONFIG_LEDS_LP50XX is not set -# CONFIG_LEDS_LP55XX_COMMON is not set -# CONFIG_LEDS_LP8860 is not set -# CONFIG_LEDS_PCA955X is not set -# CONFIG_LEDS_PCA963X is not set -# CONFIG_LEDS_PCA995X is not set -# CONFIG_LEDS_DAC124S085 is not set -# CONFIG_LEDS_PWM is not set -# CONFIG_LEDS_BD2606MVV is not set -# CONFIG_LEDS_BD2802 is not set -# CONFIG_LEDS_LT3593 is not set -# CONFIG_LEDS_TCA6507 is not set -# CONFIG_LEDS_TLC591XX is not set -# CONFIG_LEDS_LM355x is not set -# CONFIG_LEDS_IS31FL319X is not set -# CONFIG_LEDS_IS31FL32XX is not set - -# -# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) -# CONFIG_LEDS_BLINKM=m -# CONFIG_LEDS_SYSCON is not set -# CONFIG_LEDS_MLXREG is not set -# CONFIG_LEDS_USER is not set -# CONFIG_LEDS_SPI_BYTE is not set -# CONFIG_LEDS_LM3697 is not set - -# -# Flash and Torch LED drivers -# - -# -# RGB LED drivers -# - -# -# LED Triggers -# -CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_TIMER=m CONFIG_LEDS_TRIGGER_ONESHOT=m CONFIG_LEDS_TRIGGER_DISK=y -# CONFIG_LEDS_TRIGGER_MTD is not set CONFIG_LEDS_TRIGGER_HEARTBEAT=m CONFIG_LEDS_TRIGGER_BACKLIGHT=m -# CONFIG_LEDS_TRIGGER_CPU is not set -# CONFIG_LEDS_TRIGGER_ACTIVITY is not set CONFIG_LEDS_TRIGGER_DEFAULT_ON=m - -# -# iptables trigger is under Netfilter config (LED target) -# CONFIG_LEDS_TRIGGER_TRANSIENT=m CONFIG_LEDS_TRIGGER_CAMERA=m -# CONFIG_LEDS_TRIGGER_PANIC is not set -# CONFIG_LEDS_TRIGGER_NETDEV is not set -# CONFIG_LEDS_TRIGGER_PATTERN is not set CONFIG_LEDS_TRIGGER_AUDIO=y -# CONFIG_LEDS_TRIGGER_TTY is not set - -# -# Simple LED drivers -# -# CONFIG_ACCESSIBILITY is not set CONFIG_INFINIBAND=m CONFIG_INFINIBAND_USER_MAD=m CONFIG_INFINIBAND_USER_ACCESS=m -CONFIG_INFINIBAND_USER_MEM=y -CONFIG_INFINIBAND_ON_DEMAND_PAGING=y -CONFIG_INFINIBAND_ADDR_TRANS=y -CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y -CONFIG_INFINIBAND_VIRT_DMA=y CONFIG_INFINIBAND_BNXT_RE=m CONFIG_INFINIBAND_CXGB4=m -# CONFIG_INFINIBAND_EFA is not set -# CONFIG_INFINIBAND_ERDMA is not set -# CONFIG_INFINIBAND_IRDMA is not set CONFIG_MLX4_INFINIBAND=m CONFIG_MLX5_INFINIBAND=m -# CONFIG_INFINIBAND_MTHCA is not set -# CONFIG_INFINIBAND_OCRDMA is not set CONFIG_INFINIBAND_VMWARE_PVRDMA=m CONFIG_RDMA_RXE=m -# CONFIG_RDMA_SIW is not set CONFIG_INFINIBAND_IPOIB=m CONFIG_INFINIBAND_IPOIB_CM=y -CONFIG_INFINIBAND_IPOIB_DEBUG=y -# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set CONFIG_INFINIBAND_SRP=m CONFIG_INFINIBAND_SRPT=m CONFIG_INFINIBAND_ISER=m CONFIG_INFINIBAND_ISERT=m -# CONFIG_INFINIBAND_RTRS_CLIENT is not set -# CONFIG_INFINIBAND_RTRS_SERVER is not set -CONFIG_RTC_LIB=y CONFIG_RTC_CLASS=y -CONFIG_RTC_HCTOSYS=y -CONFIG_RTC_HCTOSYS_DEVICE="rtc0" -CONFIG_RTC_SYSTOHC=y -CONFIG_RTC_SYSTOHC_DEVICE="rtc0" -# CONFIG_RTC_DEBUG is not set -CONFIG_RTC_NVMEM=y - -# -# RTC interfaces -# -CONFIG_RTC_INTF_SYSFS=y -CONFIG_RTC_INTF_PROC=y -CONFIG_RTC_INTF_DEV=y -# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set -# CONFIG_RTC_DRV_TEST is not set - -# -# I2C RTC drivers -# -# CONFIG_RTC_DRV_ABB5ZES3 is not set -# CONFIG_RTC_DRV_ABEOZ9 is not set -# CONFIG_RTC_DRV_ABX80X is not set CONFIG_RTC_DRV_DS1307=m -# CONFIG_RTC_DRV_DS1307_CENTURY is not set CONFIG_RTC_DRV_DS1374=m -# CONFIG_RTC_DRV_DS1374_WDT is not set CONFIG_RTC_DRV_DS1672=m -# CONFIG_RTC_DRV_HYM8563 is not set CONFIG_RTC_DRV_MAX6900=m -# CONFIG_RTC_DRV_NCT3018Y is not set CONFIG_RTC_DRV_RS5C372=m CONFIG_RTC_DRV_ISL1208=m CONFIG_RTC_DRV_ISL12022=m -# CONFIG_RTC_DRV_ISL12026 is not set CONFIG_RTC_DRV_X1205=m CONFIG_RTC_DRV_PCF8523=m -# CONFIG_RTC_DRV_PCF85063 is not set -# CONFIG_RTC_DRV_PCF85363 is not set CONFIG_RTC_DRV_PCF8563=m CONFIG_RTC_DRV_PCF8583=m CONFIG_RTC_DRV_M41T80=m CONFIG_RTC_DRV_M41T80_WDT=y CONFIG_RTC_DRV_BQ32K=m -# CONFIG_RTC_DRV_S35390A is not set CONFIG_RTC_DRV_FM3130=m -# CONFIG_RTC_DRV_RX8010 is not set CONFIG_RTC_DRV_RX8581=m CONFIG_RTC_DRV_RX8025=m CONFIG_RTC_DRV_EM3027=m -# CONFIG_RTC_DRV_RV3028 is not set -# CONFIG_RTC_DRV_RV3032 is not set CONFIG_RTC_DRV_RV8803=m -# CONFIG_RTC_DRV_SD3078 is not set - -# -# SPI RTC drivers -# -# CONFIG_RTC_DRV_M41T93 is not set -# CONFIG_RTC_DRV_M41T94 is not set -# CONFIG_RTC_DRV_DS1302 is not set -# CONFIG_RTC_DRV_DS1305 is not set -# CONFIG_RTC_DRV_DS1343 is not set -# CONFIG_RTC_DRV_DS1347 is not set -# CONFIG_RTC_DRV_DS1390 is not set -# CONFIG_RTC_DRV_MAX6916 is not set -# CONFIG_RTC_DRV_R9701 is not set CONFIG_RTC_DRV_RX4581=m -# CONFIG_RTC_DRV_RS5C348 is not set -# CONFIG_RTC_DRV_MAX6902 is not set -# CONFIG_RTC_DRV_PCF2123 is not set -# CONFIG_RTC_DRV_MCP795 is not set -CONFIG_RTC_I2C_AND_SPI=y - -# -# SPI and I2C RTC drivers -# CONFIG_RTC_DRV_DS3232=m -CONFIG_RTC_DRV_DS3232_HWMON=y -# CONFIG_RTC_DRV_PCF2127 is not set CONFIG_RTC_DRV_RV3029C2=m # CONFIG_RTC_DRV_RV3029_HWMON is not set -# CONFIG_RTC_DRV_RX6110 is not set - -# -# Platform RTC drivers -# CONFIG_RTC_DRV_DS1286=m CONFIG_RTC_DRV_DS1511=m CONFIG_RTC_DRV_DS1553=m -# CONFIG_RTC_DRV_DS1685_FAMILY is not set CONFIG_RTC_DRV_DS1742=m CONFIG_RTC_DRV_DS2404=m CONFIG_RTC_DRV_EFI=m CONFIG_RTC_DRV_STK17TA8=m -# CONFIG_RTC_DRV_M48T86 is not set CONFIG_RTC_DRV_M48T35=m CONFIG_RTC_DRV_M48T59=m CONFIG_RTC_DRV_MSM6242=m CONFIG_RTC_DRV_RP5C01=m -# CONFIG_RTC_DRV_ZYNQMP is not set - -# -# on-CPU RTC drivers -# -# CONFIG_RTC_DRV_CADENCE is not set -# CONFIG_RTC_DRV_FTRTC010 is not set CONFIG_RTC_DRV_LOONGSON=y -# CONFIG_RTC_DRV_R7301 is not set - -# -# HID Sensor RTC drivers -# -# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set -# CONFIG_RTC_DRV_GOLDFISH is not set CONFIG_DMADEVICES=y -# CONFIG_DMADEVICES_DEBUG is not set - -# -# DMA Devices -# -CONFIG_DMA_ENGINE=y -CONFIG_DMA_ACPI=y -CONFIG_DMA_OF=y -# CONFIG_ALTERA_MSGDMA is not set -# CONFIG_DW_AXI_DMAC is not set -# CONFIG_FSL_EDMA is not set -# CONFIG_INTEL_IDMA64 is not set -# CONFIG_PLX_DMA is not set -# CONFIG_XILINX_DMA is not set -# CONFIG_XILINX_XDMA is not set -# CONFIG_XILINX_ZYNQMP_DPDMA is not set -# CONFIG_QCOM_HIDMA_MGMT is not set -# CONFIG_QCOM_HIDMA is not set -CONFIG_DW_DMAC_CORE=m CONFIG_DW_DMAC=m -# CONFIG_DW_DMAC_PCI is not set -# CONFIG_DW_EDMA is not set -# CONFIG_SF_PDMA is not set - -# -# DMA Clients -# CONFIG_ASYNC_TX_DMA=y -# CONFIG_DMATEST is not set - -# -# DMABUF options -# -CONFIG_SYNC_FILE=y -# CONFIG_SW_SYNC is not set -# CONFIG_UDMABUF is not set -# CONFIG_DMABUF_MOVE_NOTIFY is not set -# CONFIG_DMABUF_DEBUG is not set -# CONFIG_DMABUF_SELFTESTS is not set -# CONFIG_DMABUF_HEAPS is not set -# CONFIG_DMABUF_SYSFS_STATS is not set -# end of DMABUF options - -CONFIG_UIO=m CONFIG_UIO_CIF=m CONFIG_UIO_PDRV_GENIRQ=m CONFIG_UIO_DMEM_GENIRQ=m CONFIG_UIO_AEC=m CONFIG_UIO_SERCOS3=m CONFIG_UIO_PCI_GENERIC=m -# CONFIG_UIO_NETX is not set -# CONFIG_UIO_PRUSS is not set -# CONFIG_UIO_MF624 is not set CONFIG_VFIO=m -CONFIG_VFIO_GROUP=y -CONFIG_VFIO_CONTAINER=y CONFIG_VFIO_NOIOMMU=y -CONFIG_VFIO_VIRQFD=y - -# -# VFIO support for PCI devices -# -CONFIG_VFIO_PCI_CORE=m -CONFIG_VFIO_PCI_MMAP=y -CONFIG_VFIO_PCI_INTX=y CONFIG_VFIO_PCI=m -# CONFIG_MLX5_VFIO_PCI is not set -# end of VFIO support for PCI devices - -CONFIG_IRQ_BYPASS_MANAGER=m -# CONFIG_VIRT_DRIVERS is not set -CONFIG_VIRTIO_ANCHOR=y -CONFIG_VIRTIO=y -CONFIG_VIRTIO_PCI_LIB=y -CONFIG_VIRTIO_PCI_LIB_LEGACY=y -CONFIG_VIRTIO_MENU=y CONFIG_VIRTIO_PCI=y -CONFIG_VIRTIO_PCI_LEGACY=y CONFIG_VIRTIO_BALLOON=m CONFIG_VIRTIO_INPUT=m CONFIG_VIRTIO_MMIO=m CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y -CONFIG_VIRTIO_DMA_SHARED_BUFFER=m -# CONFIG_VDPA is not set -CONFIG_VHOST_IOTLB=m -CONFIG_VHOST_TASK=y -CONFIG_VHOST=m -CONFIG_VHOST_MENU=y CONFIG_VHOST_NET=m CONFIG_VHOST_SCSI=m CONFIG_VHOST_VSOCK=m -# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set - -# -# Microsoft Hyper-V guest support -# -# end of Microsoft Hyper-V guest support - -# CONFIG_GREYBUS is not set CONFIG_COMEDI=m -# CONFIG_COMEDI_DEBUG is not set -CONFIG_COMEDI_DEFAULT_BUF_SIZE_KB=2048 -CONFIG_COMEDI_DEFAULT_BUF_MAXSIZE_KB=20480 -# CONFIG_COMEDI_MISC_DRIVERS is not set -# CONFIG_COMEDI_ISA_DRIVERS is not set CONFIG_COMEDI_PCI_DRIVERS=m CONFIG_COMEDI_8255_PCI=m -# CONFIG_COMEDI_ADDI_APCI_1032 is not set -# CONFIG_COMEDI_ADDI_APCI_1500 is not set -# CONFIG_COMEDI_ADDI_APCI_1516 is not set -# CONFIG_COMEDI_ADDI_APCI_1564 is not set -# CONFIG_COMEDI_ADDI_APCI_16XX is not set -# CONFIG_COMEDI_ADDI_APCI_2032 is not set -# CONFIG_COMEDI_ADDI_APCI_2200 is not set -# CONFIG_COMEDI_ADDI_APCI_3120 is not set -# CONFIG_COMEDI_ADDI_APCI_3501 is not set -# CONFIG_COMEDI_ADDI_APCI_3XXX is not set CONFIG_COMEDI_ADL_PCI6208=m CONFIG_COMEDI_ADL_PCI7X3X=m CONFIG_COMEDI_ADL_PCI8164=m @@ -6774,916 +1912,39 @@ CONFIG_COMEDI_ADV_PCI1723=m CONFIG_COMEDI_ADV_PCI1724=m CONFIG_COMEDI_ADV_PCI1760=m CONFIG_COMEDI_ADV_PCI_DIO=m -# CONFIG_COMEDI_AMPLC_DIO200_PCI is not set -# CONFIG_COMEDI_AMPLC_PC236_PCI is not set -# CONFIG_COMEDI_AMPLC_PC263_PCI is not set -# CONFIG_COMEDI_AMPLC_PCI224 is not set -# CONFIG_COMEDI_AMPLC_PCI230 is not set -# CONFIG_COMEDI_CONTEC_PCI_DIO is not set -# CONFIG_COMEDI_DAS08_PCI is not set -# CONFIG_COMEDI_DT3000 is not set -# CONFIG_COMEDI_DYNA_PCI10XX is not set -# CONFIG_COMEDI_GSC_HPDI is not set -# CONFIG_COMEDI_MF6X4 is not set -# CONFIG_COMEDI_ICP_MULTI is not set -# CONFIG_COMEDI_DAQBOARD2000 is not set -# CONFIG_COMEDI_JR3_PCI is not set -# CONFIG_COMEDI_KE_COUNTER is not set -# CONFIG_COMEDI_CB_PCIDAS64 is not set -# CONFIG_COMEDI_CB_PCIDAS is not set -# CONFIG_COMEDI_CB_PCIDDA is not set -# CONFIG_COMEDI_CB_PCIMDAS is not set -# CONFIG_COMEDI_CB_PCIMDDA is not set -# CONFIG_COMEDI_ME4000 is not set -# CONFIG_COMEDI_ME_DAQ is not set -# CONFIG_COMEDI_NI_6527 is not set -# CONFIG_COMEDI_NI_65XX is not set -# CONFIG_COMEDI_NI_660X is not set -# CONFIG_COMEDI_NI_670X is not set CONFIG_COMEDI_NI_LABPC_PCI=m CONFIG_COMEDI_NI_PCIDIO=m CONFIG_COMEDI_NI_PCIMIO=m -# CONFIG_COMEDI_RTD520 is not set -# CONFIG_COMEDI_S626 is not set -CONFIG_COMEDI_MITE=m -CONFIG_COMEDI_NI_TIOCMD=m -# CONFIG_COMEDI_USB_DRIVERS is not set -CONFIG_COMEDI_8254=m -CONFIG_COMEDI_8255=m -# CONFIG_COMEDI_8255_SA is not set -# CONFIG_COMEDI_KCOMEDILIB is not set -CONFIG_COMEDI_NI_LABPC=m -CONFIG_COMEDI_NI_TIO=m -CONFIG_COMEDI_NI_ROUTING=m -# CONFIG_COMEDI_TESTS is not set CONFIG_STAGING=y -# CONFIG_PRISM2_USB is not set -# CONFIG_RTL8192U is not set -# CONFIG_RTLLIB is not set -# CONFIG_RTL8723BS is not set -# CONFIG_R8712U is not set -# CONFIG_RTS5208 is not set -# CONFIG_VT6655 is not set -# CONFIG_VT6656 is not set - -# -# IIO staging drivers -# - -# -# Accelerometers -# -# CONFIG_ADIS16203 is not set -# CONFIG_ADIS16240 is not set -# end of Accelerometers - -# -# Analog to digital converters -# -# CONFIG_AD7816 is not set -# end of Analog to digital converters - -# -# Analog digital bi-direction converters -# -# CONFIG_ADT7316 is not set -# end of Analog digital bi-direction converters - -# -# Direct Digital Synthesis -# -# CONFIG_AD9832 is not set -# CONFIG_AD9834 is not set -# end of Direct Digital Synthesis - -# -# Network Analyzer, Impedance Converters -# -# CONFIG_AD5933 is not set -# end of Network Analyzer, Impedance Converters - -# -# Resolver to digital converters -# -# CONFIG_AD2S1210 is not set -# end of Resolver to digital converters -# end of IIO staging drivers - -# CONFIG_FB_SM750 is not set -# CONFIG_STAGING_MEDIA is not set -# CONFIG_STAGING_BOARD is not set -# CONFIG_LTE_GDM724X is not set -# CONFIG_FB_TFT is not set -# CONFIG_KS7010 is not set -# CONFIG_PI433 is not set -# CONFIG_XIL_AXIS_FIFO is not set -# CONFIG_FIELDBUS_DEV is not set -# CONFIG_QLGE is not set -# CONFIG_VME_BUS is not set -CONFIG_LOONGARCH_PLATFORM_DEVICES=y -CONFIG_LOONGSON_LAPTOP=y -# CONFIG_GOLDFISH is not set -CONFIG_HAVE_CLK=y -CONFIG_HAVE_CLK_PREPARE=y -CONFIG_COMMON_CLK=y -# CONFIG_LMK04832 is not set -# CONFIG_COMMON_CLK_MAX9485 is not set -# CONFIG_COMMON_CLK_SI5341 is not set -# CONFIG_COMMON_CLK_SI5351 is not set -# CONFIG_COMMON_CLK_SI514 is not set -# CONFIG_COMMON_CLK_SI544 is not set -# CONFIG_COMMON_CLK_SI570 is not set -# CONFIG_COMMON_CLK_CDCE706 is not set -# CONFIG_COMMON_CLK_CDCE925 is not set -# CONFIG_COMMON_CLK_CS2000_CP is not set -# CONFIG_COMMON_CLK_AXI_CLKGEN is not set CONFIG_COMMON_CLK_LOONGSON2=y -# CONFIG_COMMON_CLK_PWM is not set -# CONFIG_COMMON_CLK_RS9_PCIE is not set -# CONFIG_COMMON_CLK_SI521XX is not set -# CONFIG_COMMON_CLK_VC3 is not set -# CONFIG_COMMON_CLK_VC5 is not set -# CONFIG_COMMON_CLK_VC7 is not set -# CONFIG_COMMON_CLK_FIXED_MMIO is not set -# CONFIG_XILINX_VCU is not set -# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set -# CONFIG_HWSPINLOCK is not set - -# -# Clock Source drivers -# -# end of Clock Source drivers - -# CONFIG_MAILBOX is not set -CONFIG_IOMMU_API=y -CONFIG_IOMMU_SUPPORT=y - -# -# Generic IOMMU Pagetable Support -# -# end of Generic IOMMU Pagetable Support - -# CONFIG_IOMMU_DEBUGFS is not set -CONFIG_IOMMU_DEFAULT_DMA_STRICT=y -# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set -# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set -CONFIG_OF_IOMMU=y -# CONFIG_IOMMUFD is not set - -# -# Remoteproc drivers -# -# CONFIG_REMOTEPROC is not set -# end of Remoteproc drivers - -# -# Rpmsg drivers -# -# CONFIG_RPMSG_VIRTIO is not set -# end of Rpmsg drivers - -# CONFIG_SOUNDWIRE is not set - -# -# SOC (System On Chip) specific Drivers -# - -# -# Amlogic SoC drivers -# -# end of Amlogic SoC drivers - -# -# Broadcom SoC drivers -# -# end of Broadcom SoC drivers - -# -# NXP/Freescale QorIQ SoC drivers -# -# end of NXP/Freescale QorIQ SoC drivers - -# -# fujitsu SoC drivers -# -# end of fujitsu SoC drivers - -# -# i.MX SoC drivers -# -# end of i.MX SoC drivers - -# -# Enable LiteX SoC Builder specific drivers -# -# CONFIG_LITEX_SOC_CONTROLLER is not set -# end of Enable LiteX SoC Builder specific drivers - CONFIG_LOONGSON2_GUTS=y CONFIG_LOONGSON2_PM=y -# CONFIG_WPCM450_SOC is not set - -# -# Qualcomm SoC drivers -# -# end of Qualcomm SoC drivers - -# CONFIG_SOC_TI is not set - -# -# Xilinx SoC drivers -# -# end of Xilinx SoC drivers -# end of SOC (System On Chip) specific Drivers - CONFIG_PM_DEVFREQ=y - -# -# DEVFREQ Governors -# CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y CONFIG_DEVFREQ_GOV_PERFORMANCE=y CONFIG_DEVFREQ_GOV_POWERSAVE=y CONFIG_DEVFREQ_GOV_USERSPACE=y -# CONFIG_DEVFREQ_GOV_PASSIVE is not set - -# -# DEVFREQ Drivers -# -# CONFIG_PM_DEVFREQ_EVENT is not set -# CONFIG_EXTCON is not set -# CONFIG_MEMORY is not set CONFIG_IIO=m -CONFIG_IIO_BUFFER=y -# CONFIG_IIO_BUFFER_CB is not set -# CONFIG_IIO_BUFFER_DMA is not set -# CONFIG_IIO_BUFFER_DMAENGINE is not set -# CONFIG_IIO_BUFFER_HW_CONSUMER is not set -CONFIG_IIO_KFIFO_BUF=m -CONFIG_IIO_TRIGGERED_BUFFER=m -# CONFIG_IIO_CONFIGFS is not set -CONFIG_IIO_TRIGGER=y -CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 -# CONFIG_IIO_SW_DEVICE is not set -# CONFIG_IIO_SW_TRIGGER is not set -# CONFIG_IIO_TRIGGERED_EVENT is not set - -# -# Accelerometers -# -# CONFIG_ADIS16201 is not set -# CONFIG_ADIS16209 is not set -# CONFIG_ADXL313_I2C is not set -# CONFIG_ADXL313_SPI is not set -# CONFIG_ADXL345_I2C is not set -# CONFIG_ADXL345_SPI is not set -# CONFIG_ADXL355_I2C is not set -# CONFIG_ADXL355_SPI is not set -# CONFIG_ADXL367_SPI is not set -# CONFIG_ADXL367_I2C is not set -# CONFIG_ADXL372_SPI is not set -# CONFIG_ADXL372_I2C is not set -# CONFIG_BMA180 is not set -# CONFIG_BMA220 is not set -# CONFIG_BMA400 is not set -# CONFIG_BMC150_ACCEL is not set -# CONFIG_BMI088_ACCEL is not set -# CONFIG_DA280 is not set -# CONFIG_DA311 is not set -# CONFIG_DMARD06 is not set -# CONFIG_DMARD09 is not set -# CONFIG_DMARD10 is not set -# CONFIG_FXLS8962AF_I2C is not set -# CONFIG_FXLS8962AF_SPI is not set CONFIG_HID_SENSOR_ACCEL_3D=m -# CONFIG_IIO_ST_ACCEL_3AXIS is not set -# CONFIG_IIO_KX022A_SPI is not set -# CONFIG_IIO_KX022A_I2C is not set -# CONFIG_KXSD9 is not set -# CONFIG_KXCJK1013 is not set -# CONFIG_MC3230 is not set -# CONFIG_MMA7455_I2C is not set -# CONFIG_MMA7455_SPI is not set -# CONFIG_MMA7660 is not set -# CONFIG_MMA8452 is not set -# CONFIG_MMA9551 is not set -# CONFIG_MMA9553 is not set -# CONFIG_MSA311 is not set -# CONFIG_MXC4005 is not set -# CONFIG_MXC6255 is not set -# CONFIG_SCA3000 is not set -# CONFIG_SCA3300 is not set -# CONFIG_STK8312 is not set -# CONFIG_STK8BA50 is not set -# end of Accelerometers - -# -# Analog to digital converters -# -# CONFIG_AD4130 is not set -# CONFIG_AD7091R5 is not set -# CONFIG_AD7124 is not set -# CONFIG_AD7192 is not set -# CONFIG_AD7266 is not set -# CONFIG_AD7280 is not set -# CONFIG_AD7291 is not set -# CONFIG_AD7292 is not set -# CONFIG_AD7298 is not set -# CONFIG_AD7476 is not set -# CONFIG_AD7606_IFACE_PARALLEL is not set -# CONFIG_AD7606_IFACE_SPI is not set -# CONFIG_AD7766 is not set -# CONFIG_AD7768_1 is not set -# CONFIG_AD7780 is not set -# CONFIG_AD7791 is not set -# CONFIG_AD7793 is not set -# CONFIG_AD7887 is not set -# CONFIG_AD7923 is not set -# CONFIG_AD7949 is not set -# CONFIG_AD799X is not set -# CONFIG_ADI_AXI_ADC is not set -# CONFIG_ENVELOPE_DETECTOR is not set -# CONFIG_HI8435 is not set -# CONFIG_HX711 is not set -# CONFIG_INA2XX_ADC is not set -# CONFIG_LTC2471 is not set -# CONFIG_LTC2485 is not set -# CONFIG_LTC2496 is not set -# CONFIG_LTC2497 is not set -# CONFIG_MAX1027 is not set -# CONFIG_MAX11100 is not set -# CONFIG_MAX1118 is not set -# CONFIG_MAX11205 is not set -# CONFIG_MAX11410 is not set -# CONFIG_MAX1241 is not set -# CONFIG_MAX1363 is not set -# CONFIG_MAX9611 is not set -# CONFIG_MCP320X is not set -# CONFIG_MCP3422 is not set -# CONFIG_MCP3911 is not set -# CONFIG_NAU7802 is not set -# CONFIG_RICHTEK_RTQ6056 is not set -# CONFIG_SD_ADC_MODULATOR is not set -# CONFIG_TI_ADC081C is not set -# CONFIG_TI_ADC0832 is not set -# CONFIG_TI_ADC084S021 is not set -# CONFIG_TI_ADC12138 is not set -# CONFIG_TI_ADC108S102 is not set -# CONFIG_TI_ADC128S052 is not set -# CONFIG_TI_ADC161S626 is not set -# CONFIG_TI_ADS1015 is not set -# CONFIG_TI_ADS7924 is not set -# CONFIG_TI_ADS1100 is not set -# CONFIG_TI_ADS7950 is not set -# CONFIG_TI_ADS8344 is not set -# CONFIG_TI_ADS8688 is not set -# CONFIG_TI_ADS124S08 is not set -# CONFIG_TI_ADS131E08 is not set -# CONFIG_TI_LMP92064 is not set -# CONFIG_TI_TLC4541 is not set -# CONFIG_TI_TSC2046 is not set -# CONFIG_VF610_ADC is not set -# CONFIG_VIPERBOARD_ADC is not set -# CONFIG_XILINX_XADC is not set -# end of Analog to digital converters - -# -# Analog to digital and digital to analog converters -# -# CONFIG_AD74115 is not set -# CONFIG_AD74413R is not set -# end of Analog to digital and digital to analog converters - -# -# Analog Front Ends -# -# CONFIG_IIO_RESCALE is not set -# end of Analog Front Ends - -# -# Amplifiers -# -# CONFIG_AD8366 is not set -# CONFIG_ADA4250 is not set -# CONFIG_HMC425 is not set -# end of Amplifiers - -# -# Capacitance to digital converters -# -# CONFIG_AD7150 is not set -# CONFIG_AD7746 is not set -# end of Capacitance to digital converters - -# -# Chemical Sensors -# -# CONFIG_ATLAS_PH_SENSOR is not set -# CONFIG_ATLAS_EZO_SENSOR is not set -# CONFIG_BME680 is not set -# CONFIG_CCS811 is not set -# CONFIG_IAQCORE is not set -# CONFIG_SCD30_CORE is not set -# CONFIG_SCD4X is not set -# CONFIG_SENSIRION_SGP30 is not set -# CONFIG_SENSIRION_SGP40 is not set -# CONFIG_SPS30_I2C is not set -# CONFIG_SENSEAIR_SUNRISE_CO2 is not set -# CONFIG_VZ89X is not set -# end of Chemical Sensors - -# -# Hid Sensor IIO Common -# -CONFIG_HID_SENSOR_IIO_COMMON=m -CONFIG_HID_SENSOR_IIO_TRIGGER=m -# end of Hid Sensor IIO Common - -# -# IIO SCMI Sensors -# -# end of IIO SCMI Sensors - -# -# SSP Sensor Common -# -# CONFIG_IIO_SSP_SENSORHUB is not set -# end of SSP Sensor Common - -# -# Digital to analog converters -# -# CONFIG_AD3552R is not set -# CONFIG_AD5064 is not set -# CONFIG_AD5360 is not set -# CONFIG_AD5380 is not set -# CONFIG_AD5421 is not set -# CONFIG_AD5446 is not set -# CONFIG_AD5449 is not set -# CONFIG_AD5592R is not set -# CONFIG_AD5593R is not set -# CONFIG_AD5504 is not set -# CONFIG_AD5624R_SPI is not set -# CONFIG_LTC2688 is not set -# CONFIG_AD5686_SPI is not set -# CONFIG_AD5696_I2C is not set -# CONFIG_AD5755 is not set -# CONFIG_AD5758 is not set -# CONFIG_AD5761 is not set -# CONFIG_AD5764 is not set -# CONFIG_AD5766 is not set -# CONFIG_AD5770R is not set -# CONFIG_AD5791 is not set -# CONFIG_AD7293 is not set -# CONFIG_AD7303 is not set -# CONFIG_AD8801 is not set -# CONFIG_DPOT_DAC is not set -# CONFIG_DS4424 is not set -# CONFIG_LTC1660 is not set -# CONFIG_LTC2632 is not set -# CONFIG_M62332 is not set -# CONFIG_MAX517 is not set -# CONFIG_MAX5522 is not set -# CONFIG_MAX5821 is not set -# CONFIG_MCP4725 is not set -# CONFIG_MCP4728 is not set -# CONFIG_MCP4922 is not set -# CONFIG_TI_DAC082S085 is not set -# CONFIG_TI_DAC5571 is not set -# CONFIG_TI_DAC7311 is not set -# CONFIG_TI_DAC7612 is not set -# CONFIG_VF610_DAC is not set -# end of Digital to analog converters - -# -# IIO dummy driver -# -# end of IIO dummy driver - -# -# Filters -# -# CONFIG_ADMV8818 is not set -# end of Filters - -# -# Frequency Synthesizers DDS/PLL -# - -# -# Clock Generator/Distribution -# -# CONFIG_AD9523 is not set -# end of Clock Generator/Distribution - -# -# Phase-Locked Loop (PLL) frequency synthesizers -# -# CONFIG_ADF4350 is not set -# CONFIG_ADF4371 is not set -# CONFIG_ADF4377 is not set -# CONFIG_ADMV1013 is not set -# CONFIG_ADMV1014 is not set -# CONFIG_ADMV4420 is not set -# CONFIG_ADRF6780 is not set -# end of Phase-Locked Loop (PLL) frequency synthesizers -# end of Frequency Synthesizers DDS/PLL - -# -# Digital gyroscope sensors -# -# CONFIG_ADIS16080 is not set -# CONFIG_ADIS16130 is not set -# CONFIG_ADIS16136 is not set -# CONFIG_ADIS16260 is not set -# CONFIG_ADXRS290 is not set -# CONFIG_ADXRS450 is not set -# CONFIG_BMG160 is not set -# CONFIG_FXAS21002C is not set CONFIG_HID_SENSOR_GYRO_3D=m -# CONFIG_MPU3050_I2C is not set -# CONFIG_IIO_ST_GYRO_3AXIS is not set -# CONFIG_ITG3200 is not set -# end of Digital gyroscope sensors - -# -# Health Sensors -# - -# -# Heart Rate Monitors -# -# CONFIG_AFE4403 is not set -# CONFIG_AFE4404 is not set -# CONFIG_MAX30100 is not set -# CONFIG_MAX30102 is not set -# end of Heart Rate Monitors -# end of Health Sensors - -# -# Humidity sensors -# -# CONFIG_AM2315 is not set -# CONFIG_DHT11 is not set -# CONFIG_HDC100X is not set -# CONFIG_HDC2010 is not set CONFIG_HID_SENSOR_HUMIDITY=m -# CONFIG_HTS221 is not set -# CONFIG_HTU21 is not set -# CONFIG_SI7005 is not set -# CONFIG_SI7020 is not set -# end of Humidity sensors - -# -# Inertial measurement units -# -# CONFIG_ADIS16400 is not set -# CONFIG_ADIS16460 is not set -# CONFIG_ADIS16475 is not set -# CONFIG_ADIS16480 is not set -# CONFIG_BMI160_I2C is not set -# CONFIG_BMI160_SPI is not set -# CONFIG_BOSCH_BNO055_I2C is not set -# CONFIG_FXOS8700_I2C is not set -# CONFIG_FXOS8700_SPI is not set -# CONFIG_KMX61 is not set -# CONFIG_INV_ICM42600_I2C is not set -# CONFIG_INV_ICM42600_SPI is not set -# CONFIG_INV_MPU6050_I2C is not set -# CONFIG_INV_MPU6050_SPI is not set -# CONFIG_IIO_ST_LSM6DSX is not set -# CONFIG_IIO_ST_LSM9DS0 is not set -# end of Inertial measurement units - -# -# Light sensors -# -# CONFIG_ACPI_ALS is not set -# CONFIG_ADJD_S311 is not set -# CONFIG_ADUX1020 is not set -# CONFIG_AL3010 is not set -# CONFIG_AL3320A is not set -# CONFIG_APDS9300 is not set -# CONFIG_APDS9960 is not set -# CONFIG_AS73211 is not set -# CONFIG_BH1750 is not set -# CONFIG_BH1780 is not set -# CONFIG_CM32181 is not set -# CONFIG_CM3232 is not set -# CONFIG_CM3323 is not set -# CONFIG_CM3605 is not set -# CONFIG_CM36651 is not set -# CONFIG_GP2AP002 is not set -# CONFIG_GP2AP020A00F is not set -# CONFIG_SENSORS_ISL29018 is not set -# CONFIG_SENSORS_ISL29028 is not set -# CONFIG_ISL29125 is not set CONFIG_HID_SENSOR_ALS=m CONFIG_HID_SENSOR_PROX=m -# CONFIG_JSA1212 is not set -# CONFIG_ROHM_BU27008 is not set -# CONFIG_ROHM_BU27034 is not set -# CONFIG_RPR0521 is not set -# CONFIG_LTR501 is not set -# CONFIG_LTRF216A is not set -# CONFIG_LV0104CS is not set -# CONFIG_MAX44000 is not set -# CONFIG_MAX44009 is not set -# CONFIG_NOA1305 is not set -# CONFIG_OPT3001 is not set -# CONFIG_OPT4001 is not set -# CONFIG_PA12203001 is not set -# CONFIG_SI1133 is not set -# CONFIG_SI1145 is not set -# CONFIG_STK3310 is not set -# CONFIG_ST_UVIS25 is not set -# CONFIG_TCS3414 is not set -# CONFIG_TCS3472 is not set -# CONFIG_SENSORS_TSL2563 is not set -# CONFIG_TSL2583 is not set -# CONFIG_TSL2591 is not set -# CONFIG_TSL2772 is not set -# CONFIG_TSL4531 is not set -# CONFIG_US5182D is not set -# CONFIG_VCNL4000 is not set -# CONFIG_VCNL4035 is not set -# CONFIG_VEML6030 is not set -# CONFIG_VEML6070 is not set -# CONFIG_VL6180 is not set -# CONFIG_ZOPT2201 is not set -# end of Light sensors - -# -# Magnetometer sensors -# -# CONFIG_AK8974 is not set -# CONFIG_AK8975 is not set -# CONFIG_AK09911 is not set -# CONFIG_BMC150_MAGN_I2C is not set -# CONFIG_BMC150_MAGN_SPI is not set -# CONFIG_MAG3110 is not set CONFIG_HID_SENSOR_MAGNETOMETER_3D=m -# CONFIG_MMC35240 is not set -# CONFIG_IIO_ST_MAGN_3AXIS is not set -# CONFIG_SENSORS_HMC5843_I2C is not set -# CONFIG_SENSORS_HMC5843_SPI is not set -# CONFIG_SENSORS_RM3100_I2C is not set -# CONFIG_SENSORS_RM3100_SPI is not set -# CONFIG_TI_TMAG5273 is not set -# CONFIG_YAMAHA_YAS530 is not set -# end of Magnetometer sensors - -# -# Multiplexers -# -# CONFIG_IIO_MUX is not set -# end of Multiplexers - -# -# Inclinometer sensors -# CONFIG_HID_SENSOR_INCLINOMETER_3D=m CONFIG_HID_SENSOR_DEVICE_ROTATION=m -# end of Inclinometer sensors - -# -# Triggers - standalone -# -# CONFIG_IIO_INTERRUPT_TRIGGER is not set -# CONFIG_IIO_SYSFS_TRIGGER is not set -# end of Triggers - standalone - -# -# Linear and angular position sensors -# -# CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE is not set -# end of Linear and angular position sensors - -# -# Digital potentiometers -# -# CONFIG_AD5110 is not set -# CONFIG_AD5272 is not set -# CONFIG_DS1803 is not set -# CONFIG_MAX5432 is not set -# CONFIG_MAX5481 is not set -# CONFIG_MAX5487 is not set -# CONFIG_MCP4018 is not set -# CONFIG_MCP4131 is not set -# CONFIG_MCP4531 is not set -# CONFIG_MCP41010 is not set -# CONFIG_TPL0102 is not set -# CONFIG_X9250 is not set -# end of Digital potentiometers - -# -# Digital potentiostats -# -# CONFIG_LMP91000 is not set -# end of Digital potentiostats - -# -# Pressure sensors -# -# CONFIG_ABP060MG is not set -# CONFIG_BMP280 is not set -# CONFIG_DLHL60D is not set -# CONFIG_DPS310 is not set CONFIG_HID_SENSOR_PRESS=m -# CONFIG_HP03 is not set -# CONFIG_ICP10100 is not set -# CONFIG_MPL115_I2C is not set -# CONFIG_MPL115_SPI is not set -# CONFIG_MPL3115 is not set -# CONFIG_MPRLS0025PA is not set -# CONFIG_MS5611 is not set -# CONFIG_MS5637 is not set -# CONFIG_IIO_ST_PRESS is not set -# CONFIG_T5403 is not set -# CONFIG_HP206C is not set -# CONFIG_ZPA2326 is not set -# end of Pressure sensors - -# -# Lightning sensors -# -# CONFIG_AS3935 is not set -# end of Lightning sensors - -# -# Proximity and distance sensors -# -# CONFIG_IRSD200 is not set -# CONFIG_ISL29501 is not set -# CONFIG_LIDAR_LITE_V2 is not set -# CONFIG_MB1232 is not set -# CONFIG_PING is not set -# CONFIG_RFD77402 is not set -# CONFIG_SRF04 is not set -# CONFIG_SX9310 is not set -# CONFIG_SX9324 is not set -# CONFIG_SX9360 is not set -# CONFIG_SX9500 is not set -# CONFIG_SRF08 is not set -# CONFIG_VCNL3020 is not set -# CONFIG_VL53L0X_I2C is not set -# end of Proximity and distance sensors - -# -# Resolver to digital converters -# -# CONFIG_AD2S90 is not set -# CONFIG_AD2S1200 is not set -# end of Resolver to digital converters - -# -# Temperature sensors -# -# CONFIG_LTC2983 is not set -# CONFIG_MAXIM_THERMOCOUPLE is not set CONFIG_HID_SENSOR_TEMP=m -# CONFIG_MLX90614 is not set -# CONFIG_MLX90632 is not set -# CONFIG_TMP006 is not set -# CONFIG_TMP007 is not set -# CONFIG_TMP117 is not set -# CONFIG_TSYS01 is not set -# CONFIG_TSYS02D is not set -# CONFIG_MAX30208 is not set -# CONFIG_MAX31856 is not set -# CONFIG_MAX31865 is not set -# end of Temperature sensors - CONFIG_NTB=m -# CONFIG_NTB_MSI is not set -# CONFIG_NTB_IDT is not set -# CONFIG_NTB_EPF is not set -# CONFIG_NTB_SWITCHTEC is not set CONFIG_NTB_PINGPONG=m CONFIG_NTB_TOOL=m CONFIG_NTB_PERF=m CONFIG_NTB_TRANSPORT=m CONFIG_PWM=y -CONFIG_PWM_SYSFS=y -# CONFIG_PWM_DEBUG is not set -# CONFIG_PWM_ATMEL_TCB is not set -# CONFIG_PWM_CLK is not set -# CONFIG_PWM_DWC is not set -# CONFIG_PWM_FSL_FTM is not set -# CONFIG_PWM_PCA9685 is not set -# CONFIG_PWM_XILINX is not set - -# -# IRQ chip support -# -CONFIG_IRQCHIP=y -# CONFIG_AL_FIC is not set -# CONFIG_XILINX_INTC is not set -CONFIG_IRQ_LOONGARCH_CPU=y -CONFIG_LOONGSON_LIOINTC=y -CONFIG_LOONGSON_EIOINTC=y -CONFIG_LOONGSON_HTVEC=y -CONFIG_LOONGSON_PCH_PIC=y -CONFIG_LOONGSON_PCH_MSI=y -CONFIG_LOONGSON_PCH_LPC=y -# end of IRQ chip support - -# CONFIG_IPACK_BUS is not set -CONFIG_RESET_CONTROLLER=y -# CONFIG_RESET_SIMPLE is not set -# CONFIG_RESET_TI_SYSCON is not set -# CONFIG_RESET_TI_TPS380X is not set - -# -# PHY Subsystem -# -# CONFIG_GENERIC_PHY is not set -# CONFIG_PHY_CAN_TRANSCEIVER is not set - -# -# PHY drivers for Broadcom platforms -# -# CONFIG_BCM_KONA_USB2_PHY is not set -# end of PHY drivers for Broadcom platforms - -# CONFIG_PHY_CADENCE_TORRENT is not set -# CONFIG_PHY_CADENCE_DPHY is not set -# CONFIG_PHY_CADENCE_DPHY_RX is not set -# CONFIG_PHY_CADENCE_SIERRA is not set -# CONFIG_PHY_CADENCE_SALVO is not set -# CONFIG_PHY_PXA_28NM_HSIC is not set -# CONFIG_PHY_PXA_28NM_USB2 is not set -# CONFIG_PHY_LAN966X_SERDES is not set -# CONFIG_PHY_CPCAP_USB is not set -# CONFIG_PHY_MAPPHONE_MDM6600 is not set -# CONFIG_PHY_OCELOT_SERDES is not set -# CONFIG_PHY_SAMSUNG_USB2 is not set -# end of PHY Subsystem - CONFIG_POWERCAP=y -# CONFIG_DTPM is not set -# CONFIG_MCB is not set - -# -# Performance monitor support -# -# CONFIG_DWC_PCIE_PMU is not set -# end of Performance monitor support - -CONFIG_RAS=y CONFIG_USB4=m -# CONFIG_USB4_DEBUGFS_WRITE is not set -# CONFIG_USB4_DMA_TEST is not set - -# -# Android -# -# CONFIG_ANDROID_BINDER_IPC is not set -# end of Android - -# CONFIG_LIBNVDIMM is not set CONFIG_DAX=y CONFIG_DEV_DAX=m -CONFIG_DEV_DAX_KMEM=m -CONFIG_NVMEM=y -CONFIG_NVMEM_SYSFS=y - -# -# Layout Types -# -# CONFIG_NVMEM_LAYOUT_SL28_VPD is not set -# CONFIG_NVMEM_LAYOUT_ONIE_TLV is not set -# end of Layout Types - -# CONFIG_NVMEM_RMEM is not set -# CONFIG_NVMEM_U_BOOT_ENV is not set - -# -# HW tracing support -# -# CONFIG_STM is not set -# CONFIG_INTEL_TH is not set -# end of HW tracing support - -# CONFIG_FPGA is not set -# CONFIG_FSI is not set -CONFIG_PM_OPP=y -# CONFIG_SIOX is not set -# CONFIG_SLIMBUS is not set -# CONFIG_INTERCONNECT is not set -# CONFIG_COUNTER is not set -# CONFIG_MOST is not set -# CONFIG_PECI is not set -# CONFIG_HTE is not set -# end of Device Drivers - -# -# File systems -# -# CONFIG_VALIDATE_FS_PARSER is not set -CONFIG_FS_IOMAP=y -CONFIG_BUFFER_HEAD=y -CONFIG_LEGACY_DIRECT_IO=y CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y @@ -7691,289 +1952,105 @@ CONFIG_EXT2_FS_SECURITY=y CONFIG_EXT3_FS=y CONFIG_EXT3_FS_POSIX_ACL=y CONFIG_EXT3_FS_SECURITY=y -CONFIG_EXT4_FS=y -CONFIG_EXT4_FS_POSIX_ACL=y -CONFIG_EXT4_FS_SECURITY=y -# CONFIG_EXT4_DEBUG is not set -CONFIG_JBD2=y -# CONFIG_JBD2_DEBUG is not set -CONFIG_FS_MBCACHE=y -# CONFIG_REISERFS_FS is not set CONFIG_JFS_FS=m CONFIG_JFS_POSIX_ACL=y CONFIG_JFS_SECURITY=y -# CONFIG_JFS_DEBUG is not set -# CONFIG_JFS_STATISTICS is not set CONFIG_XFS_FS=y -CONFIG_XFS_SUPPORT_V4=y -CONFIG_XFS_SUPPORT_ASCII_CI=y CONFIG_XFS_QUOTA=y CONFIG_XFS_POSIX_ACL=y -# CONFIG_XFS_RT is not set -# CONFIG_XFS_ONLINE_SCRUB is not set -# CONFIG_XFS_WARN is not set -# CONFIG_XFS_DEBUG is not set CONFIG_GFS2_FS=m CONFIG_GFS2_FS_LOCKING_DLM=y CONFIG_OCFS2_FS=m -CONFIG_OCFS2_FS_O2CB=m -CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m -CONFIG_OCFS2_FS_STATS=y -CONFIG_OCFS2_DEBUG_MASKLOG=y -# CONFIG_OCFS2_DEBUG_FS is not set CONFIG_BTRFS_FS=y CONFIG_BTRFS_FS_POSIX_ACL=y -# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set -# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set -# CONFIG_BTRFS_DEBUG is not set -# CONFIG_BTRFS_ASSERT is not set -# CONFIG_BTRFS_FS_REF_VERIFY is not set -# CONFIG_NILFS2_FS is not set -# CONFIG_F2FS_FS is not set -# CONFIG_ZONEFS_FS is not set -CONFIG_FS_POSIX_ACL=y -CONFIG_EXPORTFS=y -CONFIG_EXPORTFS_BLOCK_OPS=y -CONFIG_FILE_LOCKING=y -# CONFIG_FS_ENCRYPTION is not set -# CONFIG_FS_VERITY is not set -CONFIG_FSNOTIFY=y -CONFIG_DNOTIFY=y -CONFIG_INOTIFY_USER=y CONFIG_FANOTIFY=y CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y -CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y -# CONFIG_QUOTA_DEBUG is not set -CONFIG_QUOTA_TREE=y CONFIG_QFMT_V1=m CONFIG_QFMT_V2=y -CONFIG_QUOTACTL=y CONFIG_AUTOFS_FS=y CONFIG_FUSE_FS=m CONFIG_CUSE=m CONFIG_VIRTIO_FS=m -# CONFIG_VIRT_FUSE is not set CONFIG_OVERLAY_FS=y -CONFIG_OVERLAY_FS_REDIRECT_DIR=y # CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW is not set CONFIG_OVERLAY_FS_INDEX=y CONFIG_OVERLAY_FS_XINO_AUTO=y CONFIG_OVERLAY_FS_METACOPY=y -# CONFIG_OVERLAY_FS_DEBUG is not set - -# -# Caches -# -CONFIG_NETFS_SUPPORT=y -CONFIG_NETFS_STATS=y CONFIG_FSCACHE=m CONFIG_FSCACHE_STATS=y -# CONFIG_FSCACHE_DEBUG is not set CONFIG_CACHEFILES=m -# CONFIG_CACHEFILES_DEBUG is not set -# CONFIG_CACHEFILES_ERROR_INJECTION is not set -# CONFIG_CACHEFILES_ONDEMAND is not set -# end of Caches - -# -# CD-ROM/DVD Filesystems -# CONFIG_ISO9660_FS=m CONFIG_JOLIET=y CONFIG_ZISOFS=y CONFIG_UDF_FS=m -# end of CD-ROM/DVD Filesystems - -# -# DOS/FAT/EXFAT/NT Filesystems -# -CONFIG_FAT_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_FAT_DEFAULT_CODEPAGE=936 CONFIG_FAT_DEFAULT_IOCHARSET="gb2312" -# CONFIG_FAT_DEFAULT_UTF8 is not set CONFIG_EXFAT_FS=m -CONFIG_EXFAT_DEFAULT_IOCHARSET="utf8" CONFIG_NTFS_FS=m -# CONFIG_NTFS_DEBUG is not set -# CONFIG_NTFS_RW is not set CONFIG_NTFS3_FS=m CONFIG_NTFS3_64BIT_CLUSTER=y CONFIG_NTFS3_LZX_XPRESS=y -# CONFIG_NTFS3_FS_POSIX_ACL is not set -# end of DOS/FAT/EXFAT/NT Filesystems - -# -# Pseudo filesystems -# -CONFIG_PROC_FS=y CONFIG_PROC_KCORE=y -CONFIG_PROC_VMCORE=y CONFIG_PROC_VMCORE_DEVICE_DUMP=y -CONFIG_PROC_SYSCTL=y -CONFIG_PROC_PAGE_MONITOR=y -CONFIG_PROC_CHILDREN=y -CONFIG_KERNFS=y -CONFIG_SYSFS=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y -CONFIG_TMPFS_XATTR=y -# CONFIG_TMPFS_INODE64 is not set -# CONFIG_TMPFS_QUOTA is not set -CONFIG_ARCH_SUPPORTS_HUGETLBFS=y CONFIG_HUGETLBFS=y -CONFIG_HUGETLB_PAGE=y -CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP=y -# CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON is not set CONFIG_CONFIGFS_FS=y CONFIG_EFIVAR_FS=y -# end of Pseudo filesystems - -CONFIG_MISC_FILESYSTEMS=y CONFIG_ORANGEFS_FS=m -# CONFIG_ADFS_FS is not set -# CONFIG_AFFS_FS is not set CONFIG_ECRYPT_FS=m CONFIG_ECRYPT_FS_MESSAGING=y CONFIG_HFS_FS=m CONFIG_HFSPLUS_FS=m -# CONFIG_BEFS_FS is not set -# CONFIG_BFS_FS is not set -# CONFIG_EFS_FS is not set -# CONFIG_JFFS2_FS is not set CONFIG_UBIFS_FS=m CONFIG_UBIFS_FS_ADVANCED_COMPR=y -CONFIG_UBIFS_FS_LZO=y -CONFIG_UBIFS_FS_ZLIB=y -CONFIG_UBIFS_FS_ZSTD=y -# CONFIG_UBIFS_ATIME_SUPPORT is not set -CONFIG_UBIFS_FS_XATTR=y -CONFIG_UBIFS_FS_SECURITY=y -# CONFIG_UBIFS_FS_AUTHENTICATION is not set CONFIG_CRAMFS=m -CONFIG_CRAMFS_BLOCKDEV=y -# CONFIG_CRAMFS_MTD is not set CONFIG_SQUASHFS=m -# CONFIG_SQUASHFS_FILE_CACHE is not set CONFIG_SQUASHFS_FILE_DIRECT=y -CONFIG_SQUASHFS_DECOMP_SINGLE=y -# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set -CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y -# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set -# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set CONFIG_SQUASHFS_XATTR=y -CONFIG_SQUASHFS_ZLIB=y CONFIG_SQUASHFS_LZ4=y CONFIG_SQUASHFS_LZO=y CONFIG_SQUASHFS_XZ=y -# CONFIG_SQUASHFS_ZSTD is not set -# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set -# CONFIG_SQUASHFS_EMBEDDED is not set -CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 -# CONFIG_VXFS_FS is not set CONFIG_MINIX_FS=m -# CONFIG_OMFS_FS is not set -# CONFIG_HPFS_FS is not set -# CONFIG_QNX4FS_FS is not set -# CONFIG_QNX6FS_FS is not set CONFIG_ROMFS_FS=m -CONFIG_ROMFS_BACKED_BY_BLOCK=y -# CONFIG_ROMFS_BACKED_BY_MTD is not set -# CONFIG_ROMFS_BACKED_BY_BOTH is not set -CONFIG_ROMFS_ON_BLOCK=y CONFIG_PSTORE=m -CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 -CONFIG_PSTORE_COMPRESS=y -# CONFIG_PSTORE_CONSOLE is not set -# CONFIG_PSTORE_PMSG is not set -# CONFIG_PSTORE_FTRACE is not set -# CONFIG_PSTORE_RAM is not set -# CONFIG_PSTORE_BLK is not set CONFIG_SYSV_FS=m CONFIG_UFS_FS=m -# CONFIG_UFS_FS_WRITE is not set -# CONFIG_UFS_DEBUG is not set CONFIG_EROFS_FS=m -# CONFIG_EROFS_FS_DEBUG is not set -CONFIG_EROFS_FS_XATTR=y -CONFIG_EROFS_FS_POSIX_ACL=y -CONFIG_EROFS_FS_SECURITY=y -CONFIG_EROFS_FS_ZIP=y CONFIG_EROFS_FS_ZIP_LZMA=y -# CONFIG_EROFS_FS_ZIP_DEFLATE is not set CONFIG_EROFS_FS_PCPU_KTHREAD=y -CONFIG_EROFS_FS_PCPU_KTHREAD_HIPRI=y -CONFIG_NETWORK_FILESYSTEMS=y CONFIG_NFS_FS=y # CONFIG_NFS_V2 is not set CONFIG_NFS_V3=m CONFIG_NFS_V3_ACL=y CONFIG_NFS_V4=m -# CONFIG_NFS_SWAP is not set CONFIG_NFS_V4_1=y CONFIG_NFS_V4_2=y -CONFIG_PNFS_FILE_LAYOUT=m -CONFIG_PNFS_BLOCK=m -CONFIG_PNFS_FLEXFILE_LAYOUT=m -CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" -# CONFIG_NFS_V4_1_MIGRATION is not set -CONFIG_NFS_V4_SECURITY_LABEL=y -# CONFIG_ROOT_NFS is not set -# CONFIG_NFS_USE_LEGACY_DNS is not set -CONFIG_NFS_USE_KERNEL_DNS=y -CONFIG_NFS_DEBUG=y # CONFIG_NFS_DISABLE_UDP_SUPPORT is not set -CONFIG_NFS_V4_2_READ_PLUS=y CONFIG_NFSD=y -# CONFIG_NFSD_V2 is not set CONFIG_NFSD_V3_ACL=y CONFIG_NFSD_V4=y -CONFIG_NFSD_PNFS=y CONFIG_NFSD_BLOCKLAYOUT=y CONFIG_NFSD_SCSILAYOUT=y CONFIG_NFSD_FLEXFILELAYOUT=y CONFIG_NFSD_V4_2_INTER_SSC=y CONFIG_NFSD_V4_SECURITY_LABEL=y -CONFIG_GRACE_PERIOD=y -CONFIG_LOCKD=y -CONFIG_LOCKD_V4=y -CONFIG_NFS_ACL_SUPPORT=y -CONFIG_NFS_COMMON=y -CONFIG_NFS_V4_2_SSC_HELPER=y -CONFIG_SUNRPC=y -CONFIG_SUNRPC_GSS=y -CONFIG_SUNRPC_BACKCHANNEL=y -CONFIG_RPCSEC_GSS_KRB5=y -CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1=y -# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA is not set -# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 is not set CONFIG_SUNRPC_DEBUG=y -CONFIG_SUNRPC_XPRT_RDMA=m CONFIG_CEPH_FS=m CONFIG_CEPH_FSCACHE=y CONFIG_CEPH_FS_POSIX_ACL=y CONFIG_CEPH_FS_SECURITY_LABEL=y CONFIG_CIFS=m # CONFIG_CIFS_STATS2 is not set -CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y CONFIG_CIFS_UPCALL=y CONFIG_CIFS_XATTR=y CONFIG_CIFS_POSIX=y # CONFIG_CIFS_DEBUG is not set CONFIG_CIFS_DFS_UPCALL=y -# CONFIG_CIFS_SWN_UPCALL is not set -# CONFIG_CIFS_SMB_DIRECT is not set -# CONFIG_CIFS_FSCACHE is not set -# CONFIG_SMB_SERVER is not set -CONFIG_SMBFS=m -# CONFIG_CODA_FS is not set -# CONFIG_AFS_FS is not set CONFIG_9P_FS=y -# CONFIG_9P_FS_POSIX_ACL is not set -# CONFIG_9P_FS_SECURITY is not set -CONFIG_NLS=y CONFIG_NLS_DEFAULT="utf8" CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_737=m @@ -8024,207 +2101,44 @@ CONFIG_NLS_MAC_INUIT=m CONFIG_NLS_MAC_ROMANIAN=m CONFIG_NLS_MAC_TURKISH=m CONFIG_NLS_UTF8=y -CONFIG_NLS_UCS2_UTILS=m CONFIG_DLM=m CONFIG_DLM_DEBUG=y -# CONFIG_UNICODE is not set -CONFIG_IO_WQ=y -# end of File systems - -# -# Security options -# -CONFIG_KEYS=y -# CONFIG_KEYS_REQUEST_CACHE is not set CONFIG_PERSISTENT_KEYRINGS=y CONFIG_TRUSTED_KEYS=y -CONFIG_TRUSTED_KEYS_TPM=y -CONFIG_ENCRYPTED_KEYS=y -# CONFIG_USER_DECRYPTED_DATA is not set CONFIG_KEY_DH_OPERATIONS=y -# CONFIG_SECURITY_DMESG_RESTRICT is not set CONFIG_SECURITY=y -CONFIG_SECURITYFS=y -CONFIG_SECURITY_NETWORK=y CONFIG_SECURITY_INFINIBAND=y CONFIG_SECURITY_NETWORK_XFRM=y -CONFIG_SECURITY_PATH=y CONFIG_LSM_MMAP_MIN_ADDR=65535 CONFIG_HARDENED_USERCOPY=y -# CONFIG_FORTIFY_SOURCE is not set -# CONFIG_STATIC_USERMODEHELPER is not set CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y -CONFIG_SECURITY_SELINUX_DEVELOP=y -CONFIG_SECURITY_SELINUX_AVC_STATS=y -CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9 -CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256 -# CONFIG_SECURITY_SELINUX_DEBUG is not set -# CONFIG_SECURITY_SMACK is not set -# CONFIG_SECURITY_TOMOYO is not set CONFIG_SECURITY_APPARMOR=y -# CONFIG_SECURITY_APPARMOR_DEBUG is not set -CONFIG_SECURITY_APPARMOR_INTROSPECT_POLICY=y -CONFIG_SECURITY_APPARMOR_HASH=y -CONFIG_SECURITY_APPARMOR_HASH_DEFAULT=y -CONFIG_SECURITY_APPARMOR_EXPORT_BINARY=y -CONFIG_SECURITY_APPARMOR_PARANOID_LOAD=y -# CONFIG_SECURITY_LOADPIN is not set CONFIG_SECURITY_YAMA=y -# CONFIG_SECURITY_SAFESETID is not set CONFIG_SECURITY_LOCKDOWN_LSM=y CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y -CONFIG_LOCK_DOWN_KERNEL_FORCE_NONE=y -# CONFIG_LOCK_DOWN_KERNEL_FORCE_INTEGRITY is not set -# CONFIG_LOCK_DOWN_KERNEL_FORCE_CONFIDENTIALITY is not set -# CONFIG_SECURITY_LANDLOCK is not set -CONFIG_INTEGRITY=y CONFIG_INTEGRITY_SIGNATURE=y CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y -CONFIG_INTEGRITY_TRUSTED_KEYRING=y CONFIG_INTEGRITY_PLATFORM_KEYRING=y -# CONFIG_INTEGRITY_MACHINE_KEYRING is not set -CONFIG_LOAD_UEFI_KEYS=y -CONFIG_INTEGRITY_AUDIT=y CONFIG_IMA=y -CONFIG_IMA_MEASURE_PCR_IDX=10 -CONFIG_IMA_LSM_RULES=y -CONFIG_IMA_NG_TEMPLATE=y -# CONFIG_IMA_SIG_TEMPLATE is not set -CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng" -# CONFIG_IMA_DEFAULT_HASH_SHA1 is not set CONFIG_IMA_DEFAULT_HASH_SHA256=y -# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set -# CONFIG_IMA_DEFAULT_HASH_SM3 is not set -CONFIG_IMA_DEFAULT_HASH="sha256" -# CONFIG_IMA_WRITE_POLICY is not set CONFIG_IMA_READ_POLICY=y CONFIG_IMA_APPRAISE=y -# CONFIG_IMA_ARCH_POLICY is not set -# CONFIG_IMA_APPRAISE_BUILD_POLICY is not set -CONFIG_IMA_APPRAISE_BOOTPARAM=y -# CONFIG_IMA_APPRAISE_MODSIG is not set -# CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY is not set -# CONFIG_IMA_BLACKLIST_KEYRING is not set CONFIG_IMA_LOAD_X509=y -CONFIG_IMA_X509_PATH="/etc/keys/x509_ima.der" -# CONFIG_IMA_APPRAISE_SIGNED_INIT is not set -CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y -CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y -# CONFIG_IMA_DISABLE_HTABLE is not set CONFIG_EVM=y -CONFIG_EVM_ATTR_FSUUID=y -# CONFIG_EVM_ADD_XATTRS is not set CONFIG_EVM_LOAD_X509=y -CONFIG_EVM_X509_PATH="/etc/keys/x509_evm.der" -# CONFIG_DEFAULT_SECURITY_SELINUX is not set -# CONFIG_DEFAULT_SECURITY_APPARMOR is not set CONFIG_DEFAULT_SECURITY_DAC=y CONFIG_LSM="landlock,lockdown,yama,loadpin,safesetid,integrity,bpf" - -# -# Kernel hardening options -# - -# -# Memory initialization -# -CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y -CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y -CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y -# CONFIG_INIT_STACK_NONE is not set -# CONFIG_INIT_STACK_ALL_PATTERN is not set -CONFIG_INIT_STACK_ALL_ZERO=y -# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set -# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set -CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y -# CONFIG_ZERO_CALL_USED_REGS is not set -# end of Memory initialization - -# -# Hardening of kernel data structures -# -CONFIG_LIST_HARDENED=y -# CONFIG_BUG_ON_DATA_CORRUPTION is not set -# end of Hardening of kernel data structures - -CONFIG_CC_HAS_RANDSTRUCT=y -CONFIG_RANDSTRUCT_NONE=y -# CONFIG_RANDSTRUCT_FULL is not set -# CONFIG_RANDSTRUCT_PERFORMANCE is not set -# end of Kernel hardening options -# end of Security options - -CONFIG_XOR_BLOCKS=y -CONFIG_ASYNC_CORE=m -CONFIG_ASYNC_MEMCPY=m -CONFIG_ASYNC_XOR=m -CONFIG_ASYNC_PQ=m -CONFIG_ASYNC_RAID6_RECOV=m -CONFIG_CRYPTO=y - -# -# Crypto core or helper -# CONFIG_CRYPTO_FIPS=y -CONFIG_CRYPTO_FIPS_NAME="Linux Kernel Cryptographic API" -# CONFIG_CRYPTO_FIPS_CUSTOM_VERSION is not set -CONFIG_CRYPTO_ALGAPI=y -CONFIG_CRYPTO_ALGAPI2=y -CONFIG_CRYPTO_AEAD=y -CONFIG_CRYPTO_AEAD2=y -CONFIG_CRYPTO_SIG2=y -CONFIG_CRYPTO_SKCIPHER=y -CONFIG_CRYPTO_SKCIPHER2=y -CONFIG_CRYPTO_HASH=y -CONFIG_CRYPTO_HASH2=y -CONFIG_CRYPTO_RNG=y -CONFIG_CRYPTO_RNG2=y -CONFIG_CRYPTO_RNG_DEFAULT=y -CONFIG_CRYPTO_AKCIPHER2=y -CONFIG_CRYPTO_AKCIPHER=y -CONFIG_CRYPTO_KPP2=y -CONFIG_CRYPTO_KPP=y -CONFIG_CRYPTO_ACOMP2=y -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_MANAGER2=y CONFIG_CRYPTO_USER=m # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set -# CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is not set -CONFIG_CRYPTO_NULL=y -CONFIG_CRYPTO_NULL2=y CONFIG_CRYPTO_PCRYPT=m CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_AUTHENC=m CONFIG_CRYPTO_TEST=m -CONFIG_CRYPTO_ENGINE=m -# end of Crypto core or helper - -# -# Public-key cryptography -# -CONFIG_CRYPTO_RSA=y -CONFIG_CRYPTO_DH=y -# CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set -CONFIG_CRYPTO_ECC=m -CONFIG_CRYPTO_ECDH=m -# CONFIG_CRYPTO_ECDSA is not set -# CONFIG_CRYPTO_ECRDSA is not set CONFIG_CRYPTO_SM2=y -# CONFIG_CRYPTO_CURVE25519 is not set -# end of Public-key cryptography - -# -# Block ciphers -# -CONFIG_CRYPTO_AES=y -# CONFIG_CRYPTO_AES_TI is not set CONFIG_CRYPTO_ANUBIS=m -# CONFIG_CRYPTO_ARIA is not set CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_BLOWFISH_COMMON=m CONFIG_CRYPTO_CAMELLIA=m -CONFIG_CRYPTO_CAST_COMMON=m CONFIG_CRYPTO_CAST5=m CONFIG_CRYPTO_CAST6=m CONFIG_CRYPTO_DES=m @@ -8232,618 +2146,66 @@ CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_SM4=y CONFIG_CRYPTO_SM4_GENERIC=y CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_TWOFISH_COMMON=m -# end of Block ciphers - -# -# Length-preserving ciphers and modes -# -# CONFIG_CRYPTO_ADIANTUM is not set CONFIG_CRYPTO_ARC4=m -CONFIG_CRYPTO_CHACHA20=m -CONFIG_CRYPTO_CBC=y CONFIG_CRYPTO_CFB=y -CONFIG_CRYPTO_CTR=y CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_ECB=y -# CONFIG_CRYPTO_HCTR2 is not set -# CONFIG_CRYPTO_KEYWRAP is not set CONFIG_CRYPTO_LRW=m -# CONFIG_CRYPTO_OFB is not set CONFIG_CRYPTO_PCBC=m -# CONFIG_CRYPTO_XTS is not set -# end of Length-preserving ciphers and modes - -# -# AEAD (authenticated encryption with associated data) ciphers -# -# CONFIG_CRYPTO_AEGIS128 is not set CONFIG_CRYPTO_CHACHA20POLY1305=m -CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=y -CONFIG_CRYPTO_GENIV=y CONFIG_CRYPTO_SEQIV=y -CONFIG_CRYPTO_ECHAINIV=m -CONFIG_CRYPTO_ESSIV=m -# end of AEAD (authenticated encryption with associated data) ciphers - -# -# Hashes, digests, and MACs -# -CONFIG_CRYPTO_BLAKE2B=y -CONFIG_CRYPTO_CMAC=m -CONFIG_CRYPTO_GHASH=y -CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_MD4=m -CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_POLY1305=m CONFIG_CRYPTO_RMD160=m -CONFIG_CRYPTO_SHA1=y -CONFIG_CRYPTO_SHA256=y -CONFIG_CRYPTO_SHA512=y -CONFIG_CRYPTO_SHA3=y -CONFIG_CRYPTO_SM3=y CONFIG_CRYPTO_SM3_GENERIC=y -# CONFIG_CRYPTO_STREEBOG is not set CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_XXHASH=y -# end of Hashes, digests, and MACs - -# -# CRCs (cyclic redundancy checks) -# -CONFIG_CRYPTO_CRC32C=y -CONFIG_CRYPTO_CRC32=m -CONFIG_CRYPTO_CRCT10DIF=y -CONFIG_CRYPTO_CRC64_ROCKSOFT=m -# end of CRCs (cyclic redundancy checks) - -# -# Compression -# -CONFIG_CRYPTO_DEFLATE=m -CONFIG_CRYPTO_LZO=m CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m -CONFIG_CRYPTO_ZSTD=y -# end of Compression - -# -# Random number generation -# CONFIG_CRYPTO_ANSI_CPRNG=m -CONFIG_CRYPTO_DRBG_MENU=y -CONFIG_CRYPTO_DRBG_HMAC=y CONFIG_CRYPTO_DRBG_HASH=y CONFIG_CRYPTO_DRBG_CTR=y -CONFIG_CRYPTO_DRBG=y -CONFIG_CRYPTO_JITTERENTROPY=y -# CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE is not set -CONFIG_CRYPTO_KDF800108_CTR=y -# end of Random number generation - -# -# Userspace interface -# -CONFIG_CRYPTO_USER_API=y CONFIG_CRYPTO_USER_API_HASH=y CONFIG_CRYPTO_USER_API_SKCIPHER=y CONFIG_CRYPTO_USER_API_RNG=y -# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set CONFIG_CRYPTO_USER_API_AEAD=y -CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y -# CONFIG_CRYPTO_STATS is not set -# end of Userspace interface - -CONFIG_CRYPTO_HASH_INFO=y - -# -# Accelerated Cryptographic Algorithms for CPU (loongarch) -# CONFIG_CRYPTO_CRC32_LOONGARCH=m -# end of Accelerated Cryptographic Algorithms for CPU (loongarch) - -CONFIG_CRYPTO_HW=y -# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set -# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set -CONFIG_CRYPTO_DEV_NITROX=m CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m -# CONFIG_CRYPTO_DEV_QAT_DH895xCC is not set -# CONFIG_CRYPTO_DEV_QAT_C3XXX is not set -# CONFIG_CRYPTO_DEV_QAT_C62X is not set -# CONFIG_CRYPTO_DEV_QAT_4XXX is not set -# CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set -# CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set -# CONFIG_CRYPTO_DEV_QAT_C62XVF is not set CONFIG_CRYPTO_DEV_CHELSIO=m CONFIG_CRYPTO_DEV_VIRTIO=m -# CONFIG_CRYPTO_DEV_SAFEXCEL is not set -# CONFIG_CRYPTO_DEV_CCREE is not set -# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set -CONFIG_ASYMMETRIC_KEY_TYPE=y -CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y -CONFIG_X509_CERTIFICATE_PARSER=y -# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set -CONFIG_PKCS7_MESSAGE_PARSER=y -# CONFIG_PKCS7_TEST_KEY is not set CONFIG_SIGNED_PE_FILE_VERIFICATION=y -# CONFIG_FIPS_SIGNATURE_SELFTEST is not set - -# -# Certificates for signature checking -# -CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" -CONFIG_MODULE_SIG_KEY_TYPE_RSA=y -# CONFIG_MODULE_SIG_KEY_TYPE_ECDSA is not set -CONFIG_SYSTEM_TRUSTED_KEYRING=y -CONFIG_SYSTEM_TRUSTED_KEYS="" -# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set CONFIG_SECONDARY_TRUSTED_KEYRING=y CONFIG_SYSTEM_BLACKLIST_KEYRING=y -CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" CONFIG_SYSTEM_REVOCATION_LIST=y -CONFIG_SYSTEM_REVOCATION_KEYS="" -# CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE is not set -# end of Certificates for signature checking - -CONFIG_BINARY_PRINTF=y - -# -# Library routines -# -CONFIG_RAID6_PQ=y -CONFIG_RAID6_PQ_BENCHMARK=y -CONFIG_PACKING=y -CONFIG_BITREVERSE=y -CONFIG_GENERIC_STRNCPY_FROM_USER=y -CONFIG_GENERIC_STRNLEN_USER=y -CONFIG_GENERIC_NET_UTILS=y -CONFIG_CORDIC=m -# CONFIG_PRIME_NUMBERS is not set -CONFIG_RATIONAL=y -CONFIG_GENERIC_PCI_IOMAP=y -CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y - -# -# Crypto library routines -# -CONFIG_CRYPTO_LIB_UTILS=y -CONFIG_CRYPTO_LIB_AES=y -CONFIG_CRYPTO_LIB_ARC4=m -CONFIG_CRYPTO_LIB_GF128MUL=y -CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y -CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m -CONFIG_CRYPTO_LIB_CHACHA=m -CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m -CONFIG_CRYPTO_LIB_CURVE25519=m -CONFIG_CRYPTO_LIB_DES=m -CONFIG_CRYPTO_LIB_POLY1305_RSIZE=1 -CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m -CONFIG_CRYPTO_LIB_POLY1305=m -CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m -CONFIG_CRYPTO_LIB_SHA1=y -CONFIG_CRYPTO_LIB_SHA256=y -# end of Crypto library routines - -CONFIG_CRC_CCITT=m -CONFIG_CRC16=y -CONFIG_CRC_T10DIF=y -CONFIG_CRC64_ROCKSOFT=m CONFIG_CRC_ITU_T=y -CONFIG_CRC32=y -# CONFIG_CRC32_SELFTEST is not set -CONFIG_CRC32_SLICEBY8=y -# CONFIG_CRC32_SLICEBY4 is not set -# CONFIG_CRC32_SARWATE is not set -# CONFIG_CRC32_BIT is not set -CONFIG_CRC64=m -# CONFIG_CRC4 is not set CONFIG_CRC7=m -CONFIG_LIBCRC32C=y -# CONFIG_CRC8 is not set -CONFIG_XXHASH=y -CONFIG_AUDIT_GENERIC=y -# CONFIG_RANDOM32_SELFTEST is not set -CONFIG_842_COMPRESS=m -CONFIG_842_DECOMPRESS=m -CONFIG_ZLIB_INFLATE=y -CONFIG_ZLIB_DEFLATE=y -CONFIG_LZO_COMPRESS=y -CONFIG_LZO_DECOMPRESS=y -CONFIG_LZ4_COMPRESS=m -CONFIG_LZ4HC_COMPRESS=m -CONFIG_LZ4_DECOMPRESS=y -CONFIG_ZSTD_COMMON=y -CONFIG_ZSTD_COMPRESS=y -CONFIG_ZSTD_DECOMPRESS=y -CONFIG_XZ_DEC=y -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y -CONFIG_XZ_DEC_MICROLZMA=y -CONFIG_XZ_DEC_BCJ=y -# CONFIG_XZ_DEC_TEST is not set -CONFIG_DECOMPRESS_GZIP=y -CONFIG_DECOMPRESS_BZIP2=y -CONFIG_DECOMPRESS_LZMA=y -CONFIG_DECOMPRESS_XZ=y -CONFIG_DECOMPRESS_LZO=y -CONFIG_DECOMPRESS_LZ4=y -CONFIG_DECOMPRESS_ZSTD=y -CONFIG_GENERIC_ALLOCATOR=y -CONFIG_TEXTSEARCH=y -CONFIG_TEXTSEARCH_KMP=m -CONFIG_TEXTSEARCH_BM=m -CONFIG_TEXTSEARCH_FSM=m -CONFIG_BTREE=y -CONFIG_INTERVAL_TREE=y -CONFIG_XARRAY_MULTI=y -CONFIG_ASSOCIATIVE_ARRAY=y -CONFIG_HAS_IOMEM=y -CONFIG_HAS_IOPORT=y -CONFIG_HAS_IOPORT_MAP=y -CONFIG_HAS_DMA=y -CONFIG_NEED_DMA_MAP_STATE=y -CONFIG_ARCH_DMA_ADDR_T_64BIT=y -CONFIG_DMA_DECLARE_COHERENT=y -CONFIG_SWIOTLB=y -# CONFIG_SWIOTLB_DYNAMIC is not set -# CONFIG_DMA_RESTRICTED_POOL is not set CONFIG_DMA_CMA=y -# CONFIG_DMA_NUMA_CMA is not set - -# -# Default contiguous memory area size: -# -CONFIG_CMA_SIZE_MBYTES=16 -CONFIG_CMA_SIZE_SEL_MBYTES=y -# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set -# CONFIG_CMA_SIZE_SEL_MIN is not set -# CONFIG_CMA_SIZE_SEL_MAX is not set -CONFIG_CMA_ALIGNMENT=8 -# CONFIG_DMA_API_DEBUG is not set -# CONFIG_DMA_MAP_BENCHMARK is not set -CONFIG_SGL_ALLOC=y -CONFIG_CHECK_SIGNATURE=y -# CONFIG_CPUMASK_OFFSTACK is not set -# CONFIG_FORCE_NR_CPUS is not set -CONFIG_CPU_RMAP=y -CONFIG_DQL=y -CONFIG_GLOB=y -# CONFIG_GLOB_SELFTEST is not set -CONFIG_NLATTR=y -CONFIG_LRU_CACHE=m -CONFIG_CLZ_TAB=y -CONFIG_IRQ_POLL=y -CONFIG_MPILIB=y -CONFIG_SIGNATURE=y -CONFIG_DIMLIB=y -CONFIG_LIBFDT=y -CONFIG_OID_REGISTRY=y -CONFIG_UCS2_STRING=y -CONFIG_HAVE_GENERIC_VDSO=y -CONFIG_GENERIC_GETTIMEOFDAY=y -CONFIG_GENERIC_VDSO_TIME_NS=y -CONFIG_FONT_SUPPORT=y -# CONFIG_FONTS is not set -CONFIG_FONT_8x8=y -CONFIG_FONT_8x16=y -CONFIG_SG_POOL=y -CONFIG_ARCH_STACKWALK=y -CONFIG_STACKDEPOT=y -CONFIG_SBITMAP=y -CONFIG_PARMAN=m -CONFIG_OBJAGG=m -# end of Library routines - -CONFIG_GENERIC_LIB_ASHLDI3=y -CONFIG_GENERIC_LIB_ASHRDI3=y -CONFIG_GENERIC_LIB_LSHRDI3=y -CONFIG_GENERIC_LIB_CMPDI2=y -CONFIG_GENERIC_LIB_UCMPDI2=y -CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y -CONFIG_PLDMFW=y -CONFIG_ASN1_ENCODER=y - -# -# Kernel hacking -# - -# -# printk and dmesg options -# CONFIG_PRINTK_TIME=y CONFIG_PRINTK_CALLER=y -# CONFIG_STACKTRACE_BUILD_ID is not set -CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 -CONFIG_CONSOLE_LOGLEVEL_QUIET=4 -CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 CONFIG_BOOT_PRINTK_DELAY=y CONFIG_DYNAMIC_DEBUG=y -CONFIG_DYNAMIC_DEBUG_CORE=y -CONFIG_SYMBOLIC_ERRNAME=y -CONFIG_DEBUG_BUGVERBOSE=y -# end of printk and dmesg options - -CONFIG_DEBUG_KERNEL=y -CONFIG_DEBUG_MISC=y - -# -# Compile-time checks and compiler options -# -CONFIG_DEBUG_INFO=y -CONFIG_AS_HAS_NON_CONST_LEB128=y -# CONFIG_DEBUG_INFO_NONE is not set CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y -# CONFIG_DEBUG_INFO_DWARF4 is not set -# CONFIG_DEBUG_INFO_DWARF5 is not set -# CONFIG_DEBUG_INFO_REDUCED is not set -CONFIG_DEBUG_INFO_COMPRESSED_NONE=y -# CONFIG_DEBUG_INFO_COMPRESSED_ZLIB is not set -# CONFIG_DEBUG_INFO_COMPRESSED_ZSTD is not set -# CONFIG_DEBUG_INFO_SPLIT is not set CONFIG_DEBUG_INFO_BTF=y -# CONFIG_GDB_SCRIPTS is not set CONFIG_FRAME_WARN=4096 CONFIG_STRIP_ASM_SYMS=y -# CONFIG_READABLE_ASM is not set -# CONFIG_HEADERS_INSTALL is not set CONFIG_DEBUG_SECTION_MISMATCH=y -CONFIG_SECTION_MISMATCH_WARN_ONLY=y -# CONFIG_VMLINUX_MAP is not set -# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set -# end of Compile-time checks and compiler options - -# -# Generic Kernel Debugging Instruments -# CONFIG_MAGIC_SYSRQ=y -CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 -CONFIG_MAGIC_SYSRQ_SERIAL=y -CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" -CONFIG_DEBUG_FS=y -CONFIG_DEBUG_FS_ALLOW_ALL=y -# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set -# CONFIG_DEBUG_FS_ALLOW_NONE is not set -CONFIG_HAVE_ARCH_KGDB=y -# CONFIG_KGDB is not set -# CONFIG_UBSAN is not set -CONFIG_HAVE_KCSAN_COMPILER=y -# end of Generic Kernel Debugging Instruments - -# -# Networking Debugging -# -# CONFIG_NET_DEV_REFCNT_TRACKER is not set -# CONFIG_NET_NS_REFCNT_TRACKER is not set -# CONFIG_DEBUG_NET is not set -# end of Networking Debugging - -# -# Memory Debugging -# -# CONFIG_PAGE_EXTENSION is not set -CONFIG_SLUB_DEBUG=y -# CONFIG_SLUB_DEBUG_ON is not set -# CONFIG_PAGE_OWNER is not set -# CONFIG_PAGE_POISONING is not set -# CONFIG_DEBUG_PAGE_REF is not set -CONFIG_HAVE_DEBUG_KMEMLEAK=y -# CONFIG_DEBUG_KMEMLEAK is not set -# CONFIG_DEBUG_OBJECTS is not set -# CONFIG_SHRINKER_DEBUG is not set -# CONFIG_DEBUG_STACK_USAGE is not set -# CONFIG_SCHED_STACK_END_CHECK is not set -# CONFIG_DEBUG_VM is not set -# CONFIG_DEBUG_MEMORY_INIT is not set -# CONFIG_DEBUG_PER_CPU_MAPS is not set -CONFIG_HAVE_DEBUG_STACKOVERFLOW=y -# CONFIG_DEBUG_STACKOVERFLOW is not set -CONFIG_HAVE_ARCH_KASAN=y -CONFIG_ARCH_DISABLE_KASAN_INLINE=y -CONFIG_CC_HAS_KASAN_GENERIC=y -CONFIG_CC_HAS_KASAN_SW_TAGS=y -CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y -# CONFIG_KASAN is not set -CONFIG_HAVE_ARCH_KFENCE=y -# CONFIG_KFENCE is not set -# end of Memory Debugging - CONFIG_DEBUG_SHIRQ=y - -# -# Debug Oops, Lockups and Hangs -# CONFIG_PANIC_ON_OOPS=y -CONFIG_PANIC_ON_OOPS_VALUE=1 -CONFIG_PANIC_TIMEOUT=0 -CONFIG_LOCKUP_DETECTOR=y -CONFIG_SOFTLOCKUP_DETECTOR=y -# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set -CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y -# CONFIG_SDEI_WATCHDOG is not set CONFIG_HARDLOCKUP_DETECTOR=y -# CONFIG_HARDLOCKUP_DETECTOR_PERF is not set -CONFIG_HARDLOCKUP_DETECTOR_BUDDY=y -# CONFIG_HARDLOCKUP_DETECTOR_ARCH is not set -CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER=y -# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set -CONFIG_DETECT_HUNG_TASK=y -CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 -# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set -# CONFIG_WQ_WATCHDOG is not set -# CONFIG_WQ_CPU_INTENSIVE_REPORT is not set -# CONFIG_TEST_LOCKUP is not set -# end of Debug Oops, Lockups and Hangs - -# -# Scheduler Debugging -# # CONFIG_SCHED_DEBUG is not set -CONFIG_SCHED_INFO=y CONFIG_SCHEDSTATS=y -CONFIG_SCHED_ACPU=y -# end of Scheduler Debugging - -# CONFIG_DEBUG_TIMEKEEPING is not set - -# -# Lock Debugging (spinlocks, mutexes, etc...) -# -CONFIG_LOCK_DEBUGGING_SUPPORT=y -# CONFIG_PROVE_LOCKING is not set -# CONFIG_LOCK_STAT is not set -# CONFIG_DEBUG_RT_MUTEXES is not set -# CONFIG_DEBUG_SPINLOCK is not set -# CONFIG_DEBUG_MUTEXES is not set -# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set -# CONFIG_DEBUG_RWSEMS is not set -# CONFIG_DEBUG_LOCK_ALLOC is not set -# CONFIG_DEBUG_ATOMIC_SLEEP is not set -# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set -# CONFIG_LOCK_TORTURE_TEST is not set -# CONFIG_WW_MUTEX_SELFTEST is not set -# CONFIG_SCF_TORTURE_TEST is not set -# CONFIG_CSD_LOCK_WAIT_DEBUG is not set -# end of Lock Debugging (spinlocks, mutexes, etc...) - -# CONFIG_DEBUG_IRQFLAGS is not set -CONFIG_STACKTRACE=y -# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set -# CONFIG_DEBUG_KOBJECT is not set - -# -# Debug kernel data structures -# CONFIG_DEBUG_LIST=y -# CONFIG_DEBUG_PLIST is not set -# CONFIG_DEBUG_SG is not set -# CONFIG_DEBUG_NOTIFIERS is not set -# CONFIG_DEBUG_MAPLE_TREE is not set -# end of Debug kernel data structures - -# CONFIG_DEBUG_CREDENTIALS is not set - -# -# RCU Debugging -# -# CONFIG_RCU_SCALE_TEST is not set -# CONFIG_RCU_TORTURE_TEST is not set -# CONFIG_RCU_REF_SCALE_TEST is not set CONFIG_RCU_CPU_STALL_TIMEOUT=60 -CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 -# CONFIG_RCU_CPU_STALL_CPUTIME is not set # CONFIG_RCU_TRACE is not set -# CONFIG_RCU_EQS_DEBUG is not set -# end of RCU Debugging - -# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set -# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set -# CONFIG_DEBUG_CGROUP_REF is not set -CONFIG_USER_STACKTRACE_SUPPORT=y -CONFIG_NOP_TRACER=y -CONFIG_HAVE_RETHOOK=y -CONFIG_RETHOOK=y -CONFIG_HAVE_FUNCTION_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y -CONFIG_HAVE_DYNAMIC_FTRACE=y -CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y -CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y -CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y -CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y -CONFIG_HAVE_SYSCALL_TRACEPOINTS=y -CONFIG_HAVE_C_RECORDMCOUNT=y -CONFIG_TRACE_CLOCK=y -CONFIG_RING_BUFFER=y -CONFIG_EVENT_TRACING=y -CONFIG_CONTEXT_SWITCH_TRACER=y -CONFIG_TRACING=y -CONFIG_GENERIC_TRACER=y -CONFIG_TRACING_SUPPORT=y -CONFIG_FTRACE=y -# CONFIG_BOOTTIME_TRACING is not set CONFIG_FUNCTION_TRACER=y -CONFIG_FUNCTION_GRAPH_TRACER=y -# CONFIG_FUNCTION_GRAPH_RETVAL is not set -CONFIG_DYNAMIC_FTRACE=y -CONFIG_DYNAMIC_FTRACE_WITH_REGS=y -CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y -CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y -# CONFIG_FPROBE is not set -# CONFIG_FUNCTION_PROFILER is not set -# CONFIG_STACK_TRACER is not set -# CONFIG_IRQSOFF_TRACER is not set -# CONFIG_SCHED_TRACER is not set -# CONFIG_HWLAT_TRACER is not set -# CONFIG_OSNOISE_TRACER is not set -# CONFIG_TIMERLAT_TRACER is not set CONFIG_FTRACE_SYSCALLS=y -# CONFIG_TRACER_SNAPSHOT is not set -CONFIG_BRANCH_PROFILE_NONE=y -# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set -# CONFIG_PROFILE_ALL_BRANCHES is not set CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_PROBE_EVENTS_BTF_ARGS=y -CONFIG_KPROBE_EVENTS=y -# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set -CONFIG_UPROBE_EVENTS=y -CONFIG_BPF_EVENTS=y -CONFIG_DYNAMIC_EVENTS=y -CONFIG_PROBE_EVENTS=y -CONFIG_FTRACE_MCOUNT_RECORD=y -CONFIG_FTRACE_MCOUNT_USE_CC=y -# CONFIG_SYNTH_EVENTS is not set -# CONFIG_USER_EVENTS is not set -# CONFIG_TRACE_EVENT_INJECT is not set -# CONFIG_TRACEPOINT_BENCHMARK is not set -# CONFIG_RING_BUFFER_BENCHMARK is not set -# CONFIG_TRACE_EVAL_MAP_FILE is not set -# CONFIG_FTRACE_RECORD_RECURSION is not set -# CONFIG_FTRACE_STARTUP_TEST is not set -# CONFIG_RING_BUFFER_STARTUP_TEST is not set -# CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS is not set -# CONFIG_PREEMPTIRQ_DELAY_TEST is not set -# CONFIG_KPROBE_EVENT_GEN_TEST is not set -# CONFIG_RV is not set -# CONFIG_SAMPLES is not set -CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y -CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y # CONFIG_STRICT_DEVMEM is not set - -# -# loongarch Debugging -# -# CONFIG_UNWINDER_GUESS is not set -CONFIG_UNWINDER_PROLOGUE=y -# end of loongarch Debugging - -# -# Kernel Testing and Coverage -# -# CONFIG_KUNIT is not set -# CONFIG_NOTIFIER_ERROR_INJECTION is not set -# CONFIG_FUNCTION_ERROR_INJECTION is not set -# CONFIG_FAULT_INJECTION is not set -CONFIG_ARCH_HAS_KCOV=y -CONFIG_CC_HAS_SANCOV_TRACE_PC=y -# CONFIG_KCOV is not set # CONFIG_RUNTIME_TESTING_MENU is not set -# end of Kernel Testing and Coverage - -# -# Rust hacking -# -# end of Rust hacking -# end of Kernel hacking -- Gitee From eaef3146b8ea318bff336059e00a5b12f8d6ff85 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Mon, 21 Oct 2024 10:02:51 +0200 Subject: [PATCH 1632/2138] Revert "fuse: move initialization of fuse_file to fuse_writepages() instead of in callback" ANBZ: #11538 commit 184429a17f8ffc9e188dee9a4459165014025e71 upstream. This reverts commit 672c3b7457fcee9656c36a29a4b21ec4a652433e. fuse_writepages() might be called with no dirty pages after all writable opens were closed. In this case __fuse_write_file_get() will return NULL which will trigger the WARNING. The exact conditions under which this is triggered is unclear and syzbot didn't find a reproducer yet. Reported-by: syzbot+217a976dc26ef2fa8711@syzkaller.appspotmail.com Link: https://lore.kernel.org/all/CAJnrk1aQwfvb51wQ5rUSf9N8j1hArTFeSkHqC_3T-mU6_BCD=A@mail.gmail.com/ Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4034 --- fs/fuse/file.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index adb8afcecf67..913129b7d8e0 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -2260,6 +2260,13 @@ static int fuse_writepages_fill(struct folio *folio, struct folio *tmp_folio; int err; + if (!data->ff) { + err = -EIO; + data->ff = fuse_write_file_get(fi); + if (!data->ff) + goto out_unlock; + } + if (wpa && fuse_writepage_need_send(fc, &folio->page, ap, data)) { fuse_writepages_send(data); data->wpa = NULL; @@ -2323,13 +2330,13 @@ static int fuse_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct inode *inode = mapping->host; - struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_fill_wb_data data; int err; + err = -EIO; if (fuse_is_bad(inode)) - return -EIO; + goto out; if (wbc->sync_mode == WB_SYNC_NONE && fc->num_background >= fc->congestion_threshold) @@ -2337,9 +2344,7 @@ static int fuse_writepages(struct address_space *mapping, data.inode = inode; data.wpa = NULL; - data.ff = fuse_write_file_get(fi); - if (!data.ff) - return -EIO; + data.ff = NULL; err = -ENOMEM; data.orig_pages = kcalloc(fc->max_pages, @@ -2353,10 +2358,11 @@ static int fuse_writepages(struct address_space *mapping, WARN_ON(!data.wpa->ia.ap.num_pages); fuse_writepages_send(&data); } + if (data.ff) + fuse_file_put(data.ff, false); kfree(data.orig_pages); out: - fuse_file_put(data.ff, false); return err; } -- Gitee From cc8a52699407cefebe82ba9c922b9acde588a7ea Mon Sep 17 00:00:00 2001 From: Ferry Meng Date: Mon, 4 Nov 2024 19:51:02 +0800 Subject: [PATCH 1633/2138] anolis: io_uring: add cpu check in io_get_sq_data ANBZ: #11652 For IORING_SETUP_SQPOLL_PERCPU, we should ensure "p->sq_thread_cpu" should be a legal value. Thus we need to add a check for this. Fixes: 18793bb23d2f ("anolis: io_uring: re-add sqthread percpu polling support") Signed-off-by: Ferry Meng Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4076 --- io_uring/sqpoll.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c index 1e1096f4858d..8314d57564f4 100644 --- a/io_uring/sqpoll.c +++ b/io_uring/sqpoll.c @@ -191,8 +191,12 @@ static struct io_sq_data *io_get_sq_data(struct io_uring_params *p, if ((p->flags & IORING_SETUP_SQ_AFF) && (p->flags & IORING_SETUP_SQPOLL_PERCPU)) { + int cpu = p->sq_thread_cpu; + + if (cpu >= nr_cpu_ids || !cpu_online(cpu)) + return ERR_PTR(-EINVAL); mutex_lock(&percpu_sqd_lock); - sqd = *per_cpu_ptr(percpu_sqd, p->sq_thread_cpu); + sqd = *per_cpu_ptr(percpu_sqd, cpu); if (sqd) { if (sqd->task_tgid != current->tgid) { mutex_unlock(&percpu_sqd_lock); -- Gitee From d5f908cfa9ecff1724f263d004c6ef42cf25c34f Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Mon, 4 Nov 2024 15:28:28 +0800 Subject: [PATCH 1634/2138] anolis: loongarch/kvm: fix pch pic spinlock dead lock ANBZ: #11650 Fix pch pic spinlock deadlock Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/4071 Reviewed-by: Juxin Gao --- arch/loongarch/kvm/intc/pch_pic.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/loongarch/kvm/intc/pch_pic.c b/arch/loongarch/kvm/intc/pch_pic.c index e18e27992978..7d053dbcd5c0 100644 --- a/arch/loongarch/kvm/intc/pch_pic.c +++ b/arch/loongarch/kvm/intc/pch_pic.c @@ -52,8 +52,10 @@ void pch_pic_set_irq(struct loongarch_pch_pic *s, int irq, int level) * The irr register variable is cleared when the cpu writes to the * PCH_PIC_CLEAR_START address area */ - if (s->edge & mask) + if (s->edge & mask) { + spin_unlock(&s->lock); return; + } s->irr &= ~mask; } pch_pic_update_irq(s, irq, level); -- Gitee From abb4eb59c88279ee588f50f751b980a77fcab6b1 Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Thu, 8 Aug 2024 17:13:07 +0800 Subject: [PATCH 1635/2138] anolis: drivers/iommu: add iommu support ANBZ: #11650 Added iommu support for loongarch Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/4071 Reviewed-by: Juxin Gao --- arch/loongarch/configs/anolis_defconfig | 3 + arch/loongarch/configs/loongson3_defconfig | 3 + arch/loongarch/include/asm/device.h | 36 + arch/loongarch/kvm/Kconfig | 1 + drivers/iommu/Kconfig | 15 +- drivers/iommu/Makefile | 1 + drivers/iommu/dma-iommu.c | 4 +- drivers/iommu/loongarch_iommu.c | 1728 ++++++++++++++++++++ drivers/iommu/loongarch_iommu.h | 184 +++ drivers/pci/quirks.c | 4 + drivers/vfio/Kconfig | 2 +- 11 files changed, 1977 insertions(+), 4 deletions(-) create mode 100644 arch/loongarch/include/asm/device.h create mode 100644 drivers/iommu/loongarch_iommu.c create mode 100644 drivers/iommu/loongarch_iommu.h diff --git a/arch/loongarch/configs/anolis_defconfig b/arch/loongarch/configs/anolis_defconfig index abd3ea974a00..fc088eacce5b 100644 --- a/arch/loongarch/configs/anolis_defconfig +++ b/arch/loongarch/configs/anolis_defconfig @@ -2209,3 +2209,6 @@ CONFIG_FTRACE_SYSCALLS=y CONFIG_BLK_DEV_IO_TRACE=y # CONFIG_STRICT_DEVMEM is not set # CONFIG_RUNTIME_TESTING_MENU is not set +CONFIG_LOONGARCH_IOMMU=m +CONFIG_CMDLINE_EXTEND=y +CONFIG_CMDLINE="vfio_iommu_type1.allow_unsafe_interrupts=1 nokaslr" diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index fe266926e134..abf7c951affc 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -2210,3 +2210,6 @@ CONFIG_FTRACE_SYSCALLS=y CONFIG_BLK_DEV_IO_TRACE=y # CONFIG_STRICT_DEVMEM is not set # CONFIG_RUNTIME_TESTING_MENU is not set +CONFIG_LOONGARCH_IOMMU=m +CONFIG_CMDLINE_EXTEND=y +CONFIG_CMDLINE="vfio_iommu_type1.allow_unsafe_interrupts=1 nokaslr" diff --git a/arch/loongarch/include/asm/device.h b/arch/loongarch/include/asm/device.h new file mode 100644 index 000000000000..30cc6b610335 --- /dev/null +++ b/arch/loongarch/include/asm/device.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Arch specific extensions to struct device + * + * This file is released under the GPLv2 + * Copyright (C) 2020 Loongson Technology Corporation Limited + */ +#ifndef _ASM_LOONGARCH_DEVICE_H +#define _ASM_LOONGARCH_DEVICE_H + +struct dev_archdata { + /* hook for IOMMU specific extension */ + void *iommu; + struct bus_dma_region *dma_range_map; + /* + * On some old 7A chipset, dma address is different from physical + * address, the main difference is that node id. For dma address + * node id starts from bit 36, physical node id starts from + * bit 44. The remaining address below node id is the same. + */ + unsigned long dma_node_mask; + unsigned int dma_node_off; +}; + +struct pdev_archdata { +}; + +struct dma_domain { + struct list_head node; + const struct dma_map_ops *dma_ops; + int domain_nr; +}; +void add_dma_domain(struct dma_domain *domain); +void del_dma_domain(struct dma_domain *domain); + +#endif /* _ASM_LOONGARCH_DEVICE_H*/ diff --git a/arch/loongarch/kvm/Kconfig b/arch/loongarch/kvm/Kconfig index 461a465e49fd..e899d96f4da6 100644 --- a/arch/loongarch/kvm/Kconfig +++ b/arch/loongarch/kvm/Kconfig @@ -35,6 +35,7 @@ config KVM select SCHED_INFO select MMU_NOTIFIER select PREEMPT_NOTIFIERS + select KVM_VFIO help Support hosting virtualized guest machines using hardware virtualization extensions. You will need diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index f0ba61f8a49d..d44df703e96e 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -150,7 +150,7 @@ config OF_IOMMU # IOMMU-agnostic DMA-mapping layer config IOMMU_DMA - def_bool ARM64 || IA64 || X86 + def_bool ARM64 || IA64 || X86 || LOONGARCH select DMA_OPS select IOMMU_API select IOMMU_IOVA @@ -499,4 +499,17 @@ config SPRD_IOMMU Say Y here if you want to use the multimedia devices listed above. +# LOONGARCH IOMMU support +config LOONGARCH_IOMMU + tristate "LOONGARCH IOMMU support" + select IOMMU_API + select IOMMU_DEFAULT_PASSTHROUGH + depends on LOONGARCH + help + With this option you can enable support for LOONGARCH IOMMU hardware in + your system. An IOMMU is a hardware component which provides + remapping of DMA memory accesses from devices. With an LOONGARCH IOMMU you + can isolate the DMA memory of different devices and protect the + system from misbehaving device drivers or hardware. + endif # IOMMU_SUPPORT diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index f74b08c2fb00..724a56c2976a 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -30,3 +30,4 @@ obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o obj-$(CONFIG_IOMMU_SVA) += iommu-sva.o io-pgfault.o obj-$(CONFIG_SPRD_IOMMU) += sprd-iommu.o obj-$(CONFIG_APPLE_DART) += apple-dart.o +obj-$(CONFIG_LOONGARCH_IOMMU) += loongarch_iommu.o diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 9a33d5435ea5..2eabbe24e897 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -1630,7 +1630,7 @@ static size_t iommu_dma_max_mapping_size(struct device *dev) return SIZE_MAX; } -static const struct dma_map_ops iommu_dma_ops = { +static const struct dma_map_ops iommu_dmafops = { .flags = DMA_F_PCI_P2PDMA_SUPPORTED, .alloc = iommu_dma_alloc, .free = iommu_dma_free, @@ -1673,7 +1673,7 @@ void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit) if (iommu_is_dma_domain(domain)) { if (iommu_dma_init_domain(domain, dma_base, dma_limit, dev)) goto out_err; - dev->dma_ops = &iommu_dma_ops; + dev->dma_ops = &iommu_dmafops; } return; diff --git a/drivers/iommu/loongarch_iommu.c b/drivers/iommu/loongarch_iommu.c new file mode 100644 index 000000000000..7dfc6459045b --- /dev/null +++ b/drivers/iommu/loongarch_iommu.c @@ -0,0 +1,1728 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Loongson IOMMU Driver + * + * Copyright (C) 2024 Loongson Technology Ltd. + * Author: Lv Chen + * Wang Yang + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "loongarch_iommu.h" + +MODULE_LICENSE("GPL"); + +#define LOOP_TIMEOUT 100000 + +#define IVRS_HEADER_LENGTH 48 +#define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40 +#define IVHD_DEV_ALL 0x01 +#define IVHD_DEV_SELECT 0x02 +#define IVHD_DEV_SELECT_RANGE_START 0x03 +#define IVHD_DEV_RANGE_END 0x04 +#define IVHD_DEV_ALIAS 0x42 +#define IVHD_DEV_EXT_SELECT 0x46 +#define IVHD_DEV_ACPI_HID 0xf0 + +#define IVHD_HEAD_TYPE10 0x10 +#define IVHD_HEAD_TYPE11 0x11 +#define IVHD_HEAD_TYPE40 0x40 + +#define MAX_BDF_NUM 0xffff + +#define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *)) + +/* + * structure describing one IOMMU in the ACPI table. Typically followed by one + * or more ivhd_entrys. + */ +struct ivhd_header { + u8 type; + u8 flags; + u16 length; + u16 devid; + u16 cap_ptr; + u64 mmio_phys; + u16 pci_seg; + u16 info; + u32 efr_attr; + + /* Following only valid on IVHD type 11h and 40h */ + u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */ + u64 res; +} __packed; + +/* + * A device entry describing which devices a specific IOMMU translates and + * which requestor ids they use. + */ +struct ivhd_entry { + u8 type; + u16 devid; + u8 flags; + u32 ext; + u32 hidh; + u64 cid; + u8 uidf; + u8 uidl; + u8 uid; +} __packed; + +struct iommu_callback_data { + const struct iommu_ops *ops; +}; + +LIST_HEAD(la_rlookup_iommu_list); +LIST_HEAD(la_iommu_list); /* list of all loongarch + * IOMMUs in the system + */ + +static u32 rlookup_table_size; /* size if the rlookup table */ +static int la_iommu_target_ivhd_type; +u16 la_iommu_last_bdf; /* largest PCI device id + * we have to handle + */ + +int loongarch_iommu_disable; + +#define iommu_write_regl(iommu, off, val) \ + writel(val, iommu->confbase + off) +#define iommu_read_regl(iommu, off) readl(iommu->confbase + off) + +static void iommu_translate_disable(struct loongarch_iommu *iommu) +{ + u32 val; + + if (iommu == NULL) { + pr_err("%s iommu is NULL", __func__); + return; + } + + /* Disable */ + val = iommu_read_regl(iommu, LA_IOMMU_PFM_CNT_EN); + val &= ~(1 << 31); + iommu_write_regl(iommu, LA_IOMMU_PFM_CNT_EN, val); + + /* Write cmd */ + val = iommu_read_regl(iommu, LA_IOMMU_CMD); + val &= 0xfffffffc; + iommu_write_regl(iommu, LA_IOMMU_CMD, val); +} + +static void iommu_translate_enable(struct loongarch_iommu *iommu) +{ + u32 val = 0; + + if (iommu == NULL) { + pr_err("%s iommu is NULL", __func__); + return; + } + + /* Enable use mem */ + val = iommu_read_regl(iommu, LA_IOMMU_PFM_CNT_EN); + val |= (1 << 29); + iommu_write_regl(iommu, LA_IOMMU_PFM_CNT_EN, val); + + /* Enable */ + val = iommu_read_regl(iommu, LA_IOMMU_PFM_CNT_EN); + val |= (1 << 31); + iommu_write_regl(iommu, LA_IOMMU_PFM_CNT_EN, val); + + /* Write cmd */ + val = iommu_read_regl(iommu, LA_IOMMU_CMD); + val &= 0xfffffffc; + iommu_write_regl(iommu, LA_IOMMU_CMD, val); +} + +static bool la_iommu_capable(struct device *dev, enum iommu_cap cap) +{ + switch (cap) { + case IOMMU_CAP_CACHE_COHERENCY: + return true; + default: + return false; + } +} + +static struct dom_info *to_dom_info(struct iommu_domain *dom) +{ + return container_of(dom, struct dom_info, domain); +} + +static int update_dev_table(struct la_iommu_dev_data *dev_data, int flag) +{ + u32 val = 0; + int index; + unsigned short bdf; + struct loongarch_iommu *iommu; + u16 domain_id; + + if (dev_data == NULL) { + pr_err("%s dev_data is NULL", __func__); + return 0; + } + + if (dev_data->iommu == NULL) { + pr_err("%s iommu is NULL", __func__); + return 0; + } + + if (dev_data->iommu_entry == NULL) { + pr_err("%s iommu_entry is NULL", __func__); + return 0; + } + + iommu = dev_data->iommu; + domain_id = dev_data->iommu_entry->id; + bdf = dev_data->bdf; + + /* Set device table */ + if (flag) { + index = find_first_zero_bit(iommu->devtable_bitmap, + MAX_ATTACHED_DEV_ID); + if (index < MAX_ATTACHED_DEV_ID) { + __set_bit(index, iommu->devtable_bitmap); + dev_data->index = index; + } else { + pr_err("%s get id from dev table failed\n", __func__); + return 0; + } + + pr_info("%s bdf %x domain_id %d iommu devid %x iommu segment %d flag %x\n", + __func__, bdf, domain_id, iommu->devid, + iommu->segment, flag); + + val = bdf & 0xffff; + val |= ((domain_id & 0xf) << 16); /* domain id */ + val |= ((index & 0xf) << 24); /* index */ + val |= (0x1 << 20); /* valid */ + iommu_write_regl(iommu, LA_IOMMU_EIVDB, val); + + val = (0x1 << 31) | (0xf << 0); + val |= (0x1 << 29); /* 1: use main memory */ + iommu_write_regl(iommu, LA_IOMMU_PFM_CNT_EN, val); + + val = iommu_read_regl(iommu, LA_IOMMU_CMD); + val &= 0xfffffffc; + iommu_write_regl(iommu, LA_IOMMU_CMD, val); + } else { + /* Flush device table */ + index = dev_data->index; + pr_info("%s bdf %x domain_id %d iommu devid %x iommu segment %d flag %x\n", + __func__, bdf, domain_id, iommu->devid, + iommu->segment, flag); + + val = iommu_read_regl(iommu, LA_IOMMU_EIVDB); + val &= ~(0xffffffff); + val |= ((index & 0xf) << 24); /* index */ + iommu_write_regl(iommu, LA_IOMMU_EIVDB, val); + + val = iommu_read_regl(iommu, LA_IOMMU_PFM_CNT_EN); + val |= (0x1 << 29); /* 1: use main memory */ + iommu_write_regl(iommu, LA_IOMMU_PFM_CNT_EN, val); + + if (index < MAX_ATTACHED_DEV_ID) + __clear_bit(index, iommu->devtable_bitmap); + } + return 0; +} + +static void flush_iotlb(struct loongarch_iommu *iommu) +{ + u32 val; + + if (iommu == NULL) { + pr_err("%s iommu is NULL", __func__); + return; + } + + /* Flush all tlb */ + val = iommu_read_regl(iommu, LA_IOMMU_VBTC); + val &= ~0x1f; + val |= 0x5; + iommu_write_regl(iommu, LA_IOMMU_VBTC, val); +} + +static int flush_pgtable_is_busy(struct loongarch_iommu *iommu) +{ + u32 val; + + val = iommu_read_regl(iommu, LA_IOMMU_VBTC); + return val & IOMMU_PGTABLE_BUSY; +} + +static int iommu_flush_iotlb(struct loongarch_iommu *iommu) +{ + u32 retry = 0; + + if (iommu == NULL) { + pr_err("%s iommu is NULL", __func__); + return 0; + } + + flush_iotlb(iommu); + while (flush_pgtable_is_busy(iommu)) { + if (retry == LOOP_TIMEOUT) { + pr_err("LA-IOMMU: iotlb flush busy\n"); + return -EIO; + } + retry++; + udelay(1); + } + iommu_translate_enable(iommu); + return 0; +} + +static void la_iommu_flush_iotlb_all(struct iommu_domain *domain) +{ + struct dom_info *priv = to_dom_info(domain); + struct iommu_info *info; + + spin_lock(&priv->lock); + list_for_each_entry(info, &priv->iommu_devlist, list) + iommu_flush_iotlb(info->iommu); + spin_unlock(&priv->lock); +} + +static void do_attach(struct iommu_info *info, struct la_iommu_dev_data *dev_data) +{ + if (dev_data->count) + return; + + dev_data->count++; + dev_data->iommu_entry = info; + + spin_lock(&info->devlock); + list_add(&dev_data->list, &info->dev_list); + info->dev_cnt += 1; + spin_unlock(&info->devlock); + + update_dev_table(dev_data, 1); + if (info->dev_cnt > 0) + iommu_flush_iotlb(dev_data->iommu); +} + +static void do_detach(struct la_iommu_dev_data *dev_data) +{ + struct iommu_info *info; + + if (!dev_data || !dev_data->iommu_entry || (dev_data->count == 0)) { + pr_err("%s dev_data or iommu_entry is NULL", __func__); + return; + } + dev_data->count--; + info = dev_data->iommu_entry; + list_del(&dev_data->list); + info->dev_cnt -= 1; + update_dev_table(dev_data, 0); + dev_data->iommu_entry = NULL; +} + +static void detach_all_dev_by_domain(struct iommu_info *info) +{ + struct la_iommu_dev_data *dev_data = NULL; + + spin_lock(&info->devlock); + while (!list_empty(&info->dev_list)) { + dev_data = list_first_entry(&info->dev_list, + struct la_iommu_dev_data, list); + do_detach(dev_data); + } + spin_unlock(&info->devlock); +} + +static int domain_id_alloc(struct loongarch_iommu *iommu) +{ + int id = -1; + + if (iommu == NULL) { + pr_err("%s iommu is NULL", __func__); + return id; + } + spin_lock(&iommu->domain_bitmap_lock); + id = find_first_zero_bit(iommu->domain_bitmap, MAX_DOMAIN_ID); + if (id < MAX_DOMAIN_ID) + __set_bit(id, iommu->domain_bitmap); + spin_unlock(&iommu->domain_bitmap_lock); + if (id >= MAX_DOMAIN_ID) + pr_err("LA-IOMMU: Alloc domain id over max domain id\n"); + return id; +} + +static void domain_id_free(struct loongarch_iommu *iommu, int id) +{ + if (iommu == NULL) { + pr_err("%s iommu is NULL", __func__); + return; + } + if ((id >= 0) && (id < MAX_DOMAIN_ID)) { + spin_lock(&iommu->domain_bitmap_lock); + __clear_bit(id, iommu->domain_bitmap); + spin_unlock(&iommu->domain_bitmap_lock); + } +} + +/* + * Check whether the system has a priv. + * If yes, it returns 1 and if not, it returns 0 + */ +static int has_dom(struct loongarch_iommu *iommu) +{ + int ret = 0; + + spin_lock(&iommu->dom_info_lock); + while (!list_empty(&iommu->dom_list)) { + ret = 1; + break; + } + spin_unlock(&iommu->dom_info_lock); + return ret; +} + +/* + * This function adds a private domain to the global domain list + */ +static struct dom_entry *find_domain_in_list(struct loongarch_iommu *iommu, struct dom_info *priv) +{ + struct dom_entry *entry, *found = NULL; + + if (priv == NULL) + return found; + spin_lock(&iommu->dom_info_lock); + list_for_each_entry(entry, &iommu->dom_list, list) { + if (entry->domain_info == priv) { + found = entry; + break; + } + } + spin_unlock(&iommu->dom_info_lock); + return found; +} + +static void add_domain_to_list(struct loongarch_iommu *iommu, struct dom_info *priv) +{ + struct dom_entry *entry; + + if (priv == NULL) + return; + entry = find_domain_in_list(iommu, priv); + if (entry != NULL) + return; + entry = kzalloc(sizeof(struct dom_entry), GFP_KERNEL); + entry->domain_info = priv; + spin_lock(&iommu->dom_info_lock); + list_add(&entry->list, &iommu->dom_list); + spin_unlock(&iommu->dom_info_lock); +} + +static void del_domain_from_list(struct loongarch_iommu *iommu, struct dom_info *priv) +{ + struct dom_entry *entry; + + entry = find_domain_in_list(iommu, priv); + if (entry == NULL) + return; + spin_lock(&iommu->dom_info_lock); + list_del(&entry->list); + spin_unlock(&iommu->dom_info_lock); + kfree(entry); +} + +static void free_pagetable(void *pt_base, int level) +{ + int i; + unsigned long *ptep, *pgtable; + + ptep = pt_base; + if (level == IOMMU_PT_LEVEL1) { + kfree(pt_base); + return; + } + for (i = 0; i < IOMMU_PTRS_PER_LEVEL; i++, ptep++) { + if (!iommu_pte_present(ptep)) + continue; + + if (((level - 1) == IOMMU_PT_LEVEL1) && iommu_pte_huge(ptep)) { + *ptep = 0; + continue; + } + + pgtable = phys_to_virt(*ptep & IOMMU_PAGE_MASK); + free_pagetable(pgtable, level - 1); + } + kfree(pt_base); +} + +static void iommu_free_pagetable(struct dom_info *info) +{ + free_pagetable(info->pgd, IOMMU_LEVEL_MAX); + info->pgd = NULL; +} + +static struct dom_info *alloc_dom_info(void) +{ + struct dom_info *info; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (info == NULL) + return NULL; + + info->pgd = kzalloc(IOMMU_PAGE_SIZE, GFP_KERNEL_ACCOUNT); + if (info->pgd == NULL) { + kfree(info); + return NULL; + } + INIT_LIST_HEAD(&info->iommu_devlist); + spin_lock_init(&info->lock); + mutex_init(&info->ptl_lock); + info->domain.geometry.aperture_start = 0; + info->domain.geometry.aperture_end = ~0ULL; + info->domain.geometry.force_aperture = true; + + return info; +} + +static void dom_info_free(struct dom_info *info) +{ + if (info->pgd != NULL) { + kfree(info->pgd); + info->pgd = NULL; + } + kfree(info); +} + +static struct iommu_domain *la_iommu_domain_alloc(unsigned int type) +{ + struct dom_info *info; + + switch (type) { + case IOMMU_DOMAIN_UNMANAGED: + info = alloc_dom_info(); + if (info == NULL) + return NULL; + break; + default: + return NULL; + } + return &info->domain; +} + +void domain_deattach_iommu(struct dom_info *priv, struct iommu_info *info) +{ + if ((priv == NULL) || (info == NULL) || + (info->dev_cnt != 0) || (info->iommu == NULL)) { + pr_err("%s invalid parameter", __func__); + return; + } + del_domain_from_list(info->iommu, priv); + domain_id_free(info->iommu, info->id); + spin_lock(&priv->lock); + list_del(&info->list); + spin_unlock(&priv->lock); + kfree(info); +} + +static void la_iommu_domain_free(struct iommu_domain *domain) +{ + struct dom_info *priv; + struct loongarch_iommu *iommu = NULL; + struct iommu_info *info, *tmp; + + priv = to_dom_info(domain); + spin_lock(&priv->lock); + list_for_each_entry_safe(info, tmp, &priv->iommu_devlist, list) { + if (info->dev_cnt > 0) + detach_all_dev_by_domain(info); + iommu = info->iommu; + spin_unlock(&priv->lock); + domain_deattach_iommu(priv, info); + spin_lock(&priv->lock); + iommu_flush_iotlb(iommu); + if (!has_dom(iommu)) + iommu_translate_disable(iommu); + } + spin_unlock(&priv->lock); + mutex_lock(&priv->ptl_lock); + iommu_free_pagetable(priv); + mutex_unlock(&priv->ptl_lock); + dom_info_free(priv); +} + +struct iommu_rlookup_entry *lookup_rlooptable(int pcisegment) +{ + struct iommu_rlookup_entry *rlookupentry = NULL; + + list_for_each_entry(rlookupentry, &la_rlookup_iommu_list, list) { + if (rlookupentry->pcisegment == pcisegment) + return rlookupentry; + } + return NULL; +} + +struct loongarch_iommu *find_iommu_by_dev(struct pci_dev *pdev) +{ + int pcisegment; + unsigned short devid; + struct iommu_rlookup_entry *rlookupentry = NULL; + struct loongarch_iommu *iommu = NULL; + struct pci_bus *bus = pdev->bus; + + devid = PCI_DEVID(bus->number, pdev->devfn); + pcisegment = pci_domain_nr(bus); + rlookupentry = lookup_rlooptable(pcisegment); + if (rlookupentry == NULL) { + pr_info("%s find segment %d rlookupentry failed\n", __func__, + pcisegment); + return iommu; + } + iommu = rlookupentry->rlookup_table[devid]; + if (iommu && (!iommu->confbase)) + iommu = NULL; + return iommu; +} + +struct iommu_device *iommu_init_device(struct device *dev) +{ + struct la_iommu_dev_data *dev_data; + struct pci_dev *pdev = to_pci_dev(dev); + struct pci_bus *bus = pdev->bus; + unsigned short devid; + struct loongarch_iommu *iommu = NULL; + struct iommu_device *iommu_dev = ERR_PTR(-ENODEV); + + if (!dev_is_pci(dev)) + return iommu_dev; + + if (dev->archdata.iommu != NULL || bus == NULL) { + pr_info("LA-IOMMU: bdf:0x%x has added\n", pdev->devfn); + return iommu_dev; + } + iommu = find_iommu_by_dev(pdev); + if (iommu == NULL) { + pci_info(pdev, "%s find iommu failed by dev\n", __func__); + return iommu_dev; + } + dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL); + if (!dev_data) + return iommu_dev; + devid = PCI_DEVID(bus->number, pdev->devfn); + dev_data->bdf = devid; + + pci_info(pdev, "%s bdf %#x iommu dev id %#x\n", __func__, dev_data->bdf, iommu->devid); + /* The initial state is 0, and 1 is added only when attach dev */ + dev_data->count = 0; + dev_data->iommu = iommu; + dev_data->dev = dev; + dev->archdata.iommu = dev_data; + iommu_dev = &iommu->iommu_dev; + return iommu_dev; +} + +struct iommu_device *la_iommu_probe_device(struct device *dev) +{ + return iommu_init_device(dev); +} + +static struct iommu_group *la_iommu_device_group(struct device *dev) +{ + struct iommu_group *group; + + /* + * We don't support devices sharing stream IDs other than PCI RID + * aliases, since the necessary ID-to-device lookup becomes rather + * impractical given a potential sparse 32-bit stream ID space. + */ + if (dev_is_pci(dev)) + group = pci_device_group(dev); + else + group = generic_device_group(dev); + return group; +} + +static void la_iommu_remove_device(struct device *dev) +{ + struct la_iommu_dev_data *dev_data; + + iommu_group_remove_device(dev); + dev_data = dev->archdata.iommu; + dev->archdata.iommu = NULL; + kfree(dev_data); +} + +struct iommu_info *get_iommu_info_from_dom(struct dom_info *priv, struct loongarch_iommu *iommu) +{ + struct iommu_info *info; + + spin_lock(&priv->lock); + list_for_each_entry(info, &priv->iommu_devlist, list) { + if (info->iommu == iommu) { + spin_unlock(&priv->lock); + return info; + } + } + spin_unlock(&priv->lock); + return NULL; +} + +struct iommu_info *domain_attach_iommu(struct dom_info *priv, struct loongarch_iommu *iommu) +{ + u32 dir_ctrl; + struct iommu_info *info; + unsigned long phys; + + info = get_iommu_info_from_dom(priv, iommu); + if (info) + return info; + + info = kzalloc(sizeof(struct iommu_info), GFP_KERNEL_ACCOUNT); + if (!info) + return NULL; + + INIT_LIST_HEAD(&info->dev_list); + info->iommu = iommu; + info->id = domain_id_alloc(iommu); + if (info->id == -1) { + pr_info("%s alloc id for domain failed\n", __func__); + kfree(info); + return NULL; + } + + phys = virt_to_phys(priv->pgd); + dir_ctrl = (IOMMU_LEVEL_STRIDE << 26) | (IOMMU_LEVEL_SHIFT(2) << 20); + dir_ctrl |= (IOMMU_LEVEL_STRIDE << 16) | (IOMMU_LEVEL_SHIFT(1) << 10); + dir_ctrl |= (IOMMU_LEVEL_STRIDE << 6) | IOMMU_LEVEL_SHIFT(0); + iommu_write_regl(iommu, LA_IOMMU_DIR_CTRL(info->id), dir_ctrl); + iommu_write_regl(iommu, LA_IOMMU_PGD_HI(info->id), phys >> 32); + iommu_write_regl(iommu, LA_IOMMU_PGD_LO(info->id), phys & UINT_MAX); + + spin_lock(&priv->lock); + list_add(&info->list, &priv->iommu_devlist); + spin_unlock(&priv->lock); + add_domain_to_list(iommu, priv); + return info; +} + +static struct la_iommu_dev_data *get_devdata_from_iommu_info(struct dom_info *info, + struct loongarch_iommu *iommu, unsigned long bdf) +{ + struct iommu_info *entry; + struct la_iommu_dev_data *dev_data, *found = NULL; + + entry = get_iommu_info_from_dom(info, iommu); + if (!entry) + return found; + spin_lock(&entry->devlock); + list_for_each_entry(dev_data, &entry->dev_list, list) { + if (dev_data->bdf == bdf) { + found = dev_data; + break; + } + } + spin_unlock(&entry->devlock); + return found; +} +static void la_iommu_detach_dev(struct device *dev); + +static int la_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) +{ + struct dom_info *priv = to_dom_info(domain); + struct pci_dev *pdev = to_pci_dev(dev); + unsigned char busnum = pdev->bus->number; + struct la_iommu_dev_data *dev_data; + struct loongarch_iommu *iommu; + struct iommu_info *info; + unsigned short bdf; + + la_iommu_detach_dev(dev); + + if (domain == NULL) + return 0; + + bdf = PCI_DEVID(busnum, pdev->devfn); + dev_data = (struct la_iommu_dev_data *)dev->archdata.iommu; + if (dev_data == NULL) { + pci_info(pdev, "%s dev_data is Invalid\n", __func__); + return 0; + } + + iommu = dev_data->iommu; + if (iommu == NULL) { + pci_info(pdev, "%s iommu is Invalid\n", __func__); + return 0; + } + + pci_info(pdev, "%s bdf %#x priv %lx iommu devid %#x\n", __func__, + bdf, (unsigned long)priv, iommu->devid); + dev_data = get_devdata_from_iommu_info(priv, iommu, bdf); + if (dev_data) { + pci_info(pdev, "LA-IOMMU: bdf 0x%x devfn %x has attached, count:0x%x\n", + bdf, pdev->devfn, dev_data->count); + return 0; + } + dev_data = (struct la_iommu_dev_data *)dev->archdata.iommu; + + info = domain_attach_iommu(priv, iommu); + if (!info) { + pci_info(pdev, "domain attach iommu failed\n"); + return 0; + } + dev_data->domain = domain; + do_attach(info, dev_data); + return 0; +} + +static void la_iommu_detach_dev(struct device *dev) +{ + struct iommu_domain *domain; + struct dom_info *priv; + struct pci_dev *pdev = to_pci_dev(dev); + unsigned char busnum = pdev->bus->number; + struct la_iommu_dev_data *dev_data; + struct loongarch_iommu *iommu; + struct iommu_info *iommu_entry = NULL; + unsigned short bdf; + + bdf = PCI_DEVID(busnum, pdev->devfn); + dev_data = (struct la_iommu_dev_data *)dev->archdata.iommu; + if (dev_data == NULL) { + pci_info(pdev, "%s dev_data is Invalid\n", __func__); + return; + } + + domain = dev_data->domain; + if (domain == NULL) + return; + + priv = to_dom_info(domain); + iommu = dev_data->iommu; + if (iommu == NULL) { + pci_info(pdev, "%s iommu is Invalid\n", __func__); + return; + } + dev_data = get_devdata_from_iommu_info(priv, iommu, bdf); + if (dev_data == NULL) { + pci_info(pdev, "%s bdf %#x hasn't attached\n", + __func__, bdf); + return; + } + + iommu = dev_data->iommu; + dev_data->dev = NULL; + iommu_entry = get_iommu_info_from_dom(priv, iommu); + if (iommu_entry == NULL) { + pci_info(pdev, "%s get iommu_entry failed\n", __func__); + return; + } + + spin_lock(&iommu_entry->devlock); + do_detach(dev_data); + spin_unlock(&iommu_entry->devlock); + + pci_info(pdev, "%s iommu devid %x sigment %x\n", __func__, + iommu->devid, iommu->segment); +} + +static unsigned long *iommu_get_pte(void *pt_base, unsigned long vaddr, int level) +{ + int i; + unsigned long *ptep, *pgtable; + + if (level > (IOMMU_LEVEL_MAX - 1)) + return NULL; + pgtable = pt_base; + for (i = IOMMU_LEVEL_MAX - 1; i >= level; i--) { + ptep = iommu_pte_offset(pgtable, vaddr, i); + if (!iommu_pte_present(ptep)) + break; + if (iommu_pte_huge(ptep)) + break; + pgtable = phys_to_virt(*ptep & IOMMU_PAGE_MASK); + } + return ptep; +} + +static int iommu_get_page_table(unsigned long *ptep) +{ + void *addr; + unsigned long pte; + + if (!iommu_pte_present(ptep)) { + addr = kzalloc(IOMMU_PAGE_SIZE, GFP_KERNEL_ACCOUNT); + if (!addr) + return -ENOMEM; + pte = virt_to_phys(addr) & IOMMU_PAGE_MASK; + pte |= IOMMU_PTE_RW; + *ptep = pte; + } + return 0; +} + +static size_t iommu_page_map(void *pt_base, + unsigned long start, unsigned long end, + phys_addr_t paddr, int level) +{ + unsigned long next, old, step; + unsigned long pte, *ptep, *pgtable; + int ret, huge; + + old = start; + ptep = iommu_pte_offset(pt_base, start, level); + if (level == IOMMU_PT_LEVEL0) { + paddr = paddr & IOMMU_PAGE_MASK; + do { + pte = paddr | IOMMU_PTE_RW; + *ptep = pte; + ptep++; + start += IOMMU_PAGE_SIZE; + paddr += IOMMU_PAGE_SIZE; + } while (start < end); + + return start - old; + } + + do { + next = iommu_ptable_end(start, end, level); + step = next - start; + huge = 0; + if ((level == IOMMU_PT_LEVEL1) && (step == IOMMU_HPAGE_SIZE)) + if (!iommu_pte_present(ptep) || iommu_pte_huge(ptep)) + huge = 1; + + if (huge) { + pte = (paddr & IOMMU_HPAGE_MASK) | + IOMMU_PTE_RW | IOMMU_PTE_HP; + *ptep = pte; + } else { + ret = iommu_get_page_table(ptep); + if (ret != 0) + break; + pgtable = phys_to_virt(*ptep & IOMMU_PAGE_MASK); + iommu_page_map(pgtable, start, next, paddr, level - 1); + } + + ptep++; + paddr += step; + start = next; + } while (start < end); + return start - old; +} + +static int domain_map_page(struct dom_info *priv, unsigned long start, + phys_addr_t paddr, size_t size) +{ + int ret = 0; + phys_addr_t end; + size_t map_size; + + end = start + size; + mutex_lock(&priv->ptl_lock); + map_size = iommu_page_map(priv->pgd, start, + end, paddr, IOMMU_LEVEL_MAX - 1); + if (map_size != size) + ret = -EFAULT; + mutex_unlock(&priv->ptl_lock); + la_iommu_flush_iotlb_all(&priv->domain); + return ret; +} + +static size_t iommu_page_unmap(void *pt_base, + unsigned long start, unsigned long end, int level) +{ + unsigned long next, old; + unsigned long *ptep, *pgtable; + + old = start; + ptep = iommu_pte_offset(pt_base, start, level); + if (level == IOMMU_PT_LEVEL0) { + do { + *ptep++ = 0; + start += IOMMU_PAGE_SIZE; + } while (start < end); + } else { + do { + next = iommu_ptable_end(start, end, level); + if (!iommu_pte_present(ptep)) + continue; + + if (iommu_pte_huge(ptep)) { + if ((next - start) != IOMMU_HPAGE_SIZE) + pr_err( + "Map pte on hugepage not supported now\n"); + *ptep = 0; + } else { + pgtable = phys_to_virt(*ptep & IOMMU_PAGE_MASK); + iommu_page_unmap(pgtable, start, + next, level - 1); + } + } while (ptep++, start = next, start < end); + } + return start - old; +} + +static size_t domain_unmap_page(struct dom_info *priv, + unsigned long start, size_t size) +{ + size_t unmap_len; + unsigned long end; + + end = start + size; + mutex_lock(&priv->ptl_lock); + unmap_len = iommu_page_unmap(priv->pgd, start, + end, (IOMMU_LEVEL_MAX - 1)); + mutex_unlock(&priv->ptl_lock); + la_iommu_flush_iotlb_all(&priv->domain); + return unmap_len; +} + +static int la_iommu_map(struct iommu_domain *domain, unsigned long vaddr, + phys_addr_t paddr, size_t len, int prot, gfp_t gfp) +{ + int ret; + struct dom_info *priv = to_dom_info(domain); + + ret = domain_map_page(priv, vaddr, paddr, len); + return ret; +} + +static size_t la_iommu_unmap(struct iommu_domain *domain, unsigned long vaddr, + size_t len, struct iommu_iotlb_gather *iotlb_gather) +{ + struct dom_info *priv = to_dom_info(domain); + + return domain_unmap_page(priv, vaddr, len); +} + +static phys_addr_t _iommu_iova_to_phys(struct dom_info *info, dma_addr_t vaddr) +{ + unsigned long *ptep; + unsigned long page_size, page_mask; + phys_addr_t paddr; + + mutex_lock(&info->ptl_lock); + ptep = iommu_get_pte(info->pgd, vaddr, IOMMU_PT_LEVEL0); + mutex_unlock(&info->ptl_lock); + + if (!ptep || !iommu_pte_present(ptep)) { + pr_warn_once( + "LA-IOMMU: shadow pte is null or not present with vaddr %llx\n", + vaddr); + paddr = 0; + return paddr; + } + + if (iommu_pte_huge(ptep)) { + page_size = IOMMU_HPAGE_SIZE; + page_mask = IOMMU_HPAGE_MASK; + } else { + page_size = IOMMU_PAGE_SIZE; + page_mask = IOMMU_PAGE_MASK; + } + paddr = *ptep & page_mask; + paddr |= vaddr & (page_size - 1); + return paddr; +} + +static phys_addr_t la_iommu_iova_to_phys(struct iommu_domain *domain, + dma_addr_t vaddr) +{ + struct dom_info *priv = to_dom_info(domain); + phys_addr_t phys; + + spin_lock(&priv->lock); + phys = _iommu_iova_to_phys(priv, vaddr); + spin_unlock(&priv->lock); + return phys; +} + +static void la_domain_set_plaform_dma_ops(struct device *dev) +{ + /* + * loongarch doesn't setup default domains because we can't hook into the + * normal probe path + */ +} + +const struct iommu_ops la_iommu_ops = { + .capable = la_iommu_capable, + .domain_alloc = la_iommu_domain_alloc, + .probe_device = la_iommu_probe_device, + .release_device = la_iommu_remove_device, + .device_group = la_iommu_device_group, + .pgsize_bitmap = LA_IOMMU_PGSIZE, + .owner = THIS_MODULE, + .set_platform_dma_ops = la_domain_set_plaform_dma_ops, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = la_iommu_attach_dev, + .map = la_iommu_map, + .unmap = la_iommu_unmap, + .iova_to_phys = la_iommu_iova_to_phys, + .flush_iotlb_all = la_iommu_flush_iotlb_all, + .free = la_iommu_domain_free, + } +}; + + +struct loongarch_iommu *loongarch_get_iommu_by_devid(struct pci_dev *pdev) +{ + int pcisegment; + unsigned short devid; + struct loongarch_iommu *iommu = NULL; + struct pci_bus *bus = pdev->bus; + + devid = PCI_DEVID(bus->number, pdev->devfn); + pcisegment = pci_domain_nr(pdev->bus); + list_for_each_entry(iommu, &la_iommu_list, list) { + if ((iommu->segment == pcisegment) && + (iommu->devid == devid)) { + return iommu; + } + } + return NULL; +} + +bool check_device_compat(struct pci_dev *pdev) +{ + bool compat = true; + + if ((pdev->revision == 0) && (pdev->device == 0x7a1f)) + compat = false; + return compat; +} + +static int loongarch_iommu_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int ret = 1; + int bitmap_sz = 0; + int tmp; + bool compat = false; + struct loongarch_iommu *iommu = NULL; + resource_size_t base, size; + + iommu = loongarch_get_iommu_by_devid(pdev); + if (iommu == NULL) { + pci_info(pdev, "%s can't find iommu\n", __func__); + return -ENODEV; + } + + compat = check_device_compat(pdev); + if (!compat) { + pci_info(pdev, + "%s The iommu driver is not compatible with this device\n", + __func__); + return -ENODEV; + } + + iommu->pdev = pdev; + base = pci_resource_start(pdev, 0); + size = pci_resource_len(pdev, 0); + if (!request_mem_region(base, size, "loongarch_iommu")) { + pci_err(pdev, + "%d can't reserve mmio registers base %llx size %llx\n", + __LINE__, base, size); + return -ENOMEM; + } + iommu->confbase_phy = base; + iommu->conf_size = size; + iommu->confbase = ioremap(base, size); + if (iommu->confbase == NULL) { + pci_info(pdev, "%s iommu pci dev bar0 is NULL\n", __func__); + return ret; + } + + pr_info("iommu confbase %llx pgtsize %llx\n", + (u64)iommu->confbase, size); + tmp = MAX_DOMAIN_ID / 8; + bitmap_sz = (MAX_DOMAIN_ID % 8) ? (tmp + 1) : tmp; + iommu->domain_bitmap = bitmap_zalloc(bitmap_sz, GFP_KERNEL); + if (iommu->domain_bitmap == NULL) { + pr_err("LA-IOMMU: domain bitmap alloc err bitmap_sz:%d\n", + bitmap_sz); + goto out_err; + } + + tmp = MAX_ATTACHED_DEV_ID / 8; + bitmap_sz = (MAX_ATTACHED_DEV_ID % 8) ? (tmp + 1) : tmp; + iommu->devtable_bitmap = bitmap_zalloc(bitmap_sz, GFP_KERNEL); + if (iommu->devtable_bitmap == NULL) { + pr_err("LA-IOMMU: devtable bitmap alloc err bitmap_sz:%d\n", + bitmap_sz); + goto out_err_1; + } + + ret = iommu_device_sysfs_add(&iommu->iommu_dev, &pdev->dev, + NULL, "ivhd-%#x", iommu->devid); + iommu_device_register(&iommu->iommu_dev, &la_iommu_ops, NULL); + return 0; + +out_err_1: + iommu->pdev = NULL; + iounmap(iommu->confbase); + iommu->confbase = NULL; + release_mem_region(iommu->confbase_phy, iommu->conf_size); + iommu->confbase_phy = 0; + iommu->conf_size = 0; + kfree(iommu->domain_bitmap); + iommu->domain_bitmap = NULL; +out_err: + return ret; +} + +static void loongarch_iommu_remove(struct pci_dev *pdev) +{ + struct loongarch_iommu *iommu = NULL; + + iommu = loongarch_get_iommu_by_devid(pdev); + if (iommu == NULL) + return; + if (iommu->domain_bitmap != NULL) { + kfree(iommu->domain_bitmap); + iommu->domain_bitmap = NULL; + } + if (iommu->devtable_bitmap != NULL) { + kfree(iommu->devtable_bitmap); + iommu->devtable_bitmap = NULL; + } + if (iommu->confbase != NULL) { + iounmap(iommu->confbase); + iommu->confbase = NULL; + } + if (iommu->confbase_phy != 0) { + release_mem_region(iommu->confbase_phy, iommu->conf_size); + iommu->confbase_phy = 0; + iommu->conf_size = 0; + } +} + +static int __init check_ivrs_checksum(struct acpi_table_header *table) +{ + int i; + u8 checksum = 0, *p = (u8 *)table; + + for (i = 0; i < table->length; ++i) + checksum += p[i]; + if (checksum != 0) { + /* ACPI table corrupt */ + pr_err("IVRS invalid checksum\n"); + return -ENODEV; + } + return 0; +} + +struct iommu_rlookup_entry *create_rlookup_entry(int pcisegment) +{ + struct iommu_rlookup_entry *rlookupentry = NULL; + + rlookupentry = kzalloc(sizeof(struct iommu_rlookup_entry), + GFP_KERNEL); + if (rlookupentry == NULL) + return rlookupentry; + + rlookupentry->pcisegment = pcisegment; + /* IOMMU rlookup table - find the IOMMU for a specific device */ + rlookupentry->rlookup_table = (void *)__get_free_pages( + GFP_KERNEL | __GFP_ZERO, + get_order(rlookup_table_size)); + if (rlookupentry->rlookup_table == NULL) { + kfree(rlookupentry); + rlookupentry = NULL; + } else { + list_add(&rlookupentry->list, &la_rlookup_iommu_list); + } + return rlookupentry; +} + +/* Writes the specific IOMMU for a device into the rlookup table */ +static void __init set_iommu_for_device(struct loongarch_iommu *iommu, + u16 devid) +{ + struct iommu_rlookup_entry *rlookupentry = NULL; + + rlookupentry = lookup_rlooptable(iommu->segment); + if (rlookupentry == NULL) + rlookupentry = create_rlookup_entry(iommu->segment); + if (rlookupentry != NULL) + rlookupentry->rlookup_table[devid] = iommu; +} + +static inline u32 get_ivhd_header_size(struct ivhd_header *h) +{ + u32 size = 0; + + switch (h->type) { + case IVHD_HEAD_TYPE10: + size = 24; + break; + case IVHD_HEAD_TYPE11: + case IVHD_HEAD_TYPE40: + size = 40; + break; + } + return size; +} + +static inline void update_last_devid(u16 devid) +{ + if (devid > la_iommu_last_bdf) + la_iommu_last_bdf = devid; +} + +/* + * This function calculates the length of a given IVHD entry + */ +static inline int ivhd_entry_length(u8 *ivhd) +{ + u32 type = ((struct ivhd_entry *)ivhd)->type; + + if (type < 0x80) { + return 0x04 << (*ivhd >> 6); + } else if (type == IVHD_DEV_ACPI_HID) { + /* For ACPI_HID, offset 21 is uid len */ + return *((u8 *)ivhd + 21) + 22; + } + return 0; +} + +/* + * After reading the highest device id from the IOMMU PCI capability header + * this function looks if there is a higher device id defined in the ACPI table + */ +static int __init find_last_devid_from_ivhd(struct ivhd_header *h) +{ + u8 *p = (void *)h, *end = (void *)h; + struct ivhd_entry *dev; + + u32 ivhd_size = get_ivhd_header_size(h); + + if (!ivhd_size) { + pr_err("la-iommu: Unsupported IVHD type %#x\n", h->type); + return -EINVAL; + } + + p += ivhd_size; + end += h->length; + + while (p < end) { + dev = (struct ivhd_entry *)p; + switch (dev->type) { + case IVHD_DEV_ALL: + /* Use maximum BDF value for DEV_ALL */ + update_last_devid(MAX_BDF_NUM); + break; + case IVHD_DEV_SELECT: + case IVHD_DEV_RANGE_END: + case IVHD_DEV_ALIAS: + case IVHD_DEV_EXT_SELECT: + /* all the above subfield types refer to device ids */ + update_last_devid(dev->devid); + break; + default: + break; + } + p += ivhd_entry_length(p); + } + + WARN_ON(p != end); + + return 0; +} + +/* + * Iterate over all IVHD entries in the ACPI table and find the highest device + * id which we need to handle. This is the first of three functions which parse + * the ACPI table. So we check the checksum here. + */ +static int __init find_last_devid_acpi(struct acpi_table_header *table) +{ + u8 *p = (u8 *)table, *end = (u8 *)table; + struct ivhd_header *h; + + p += IVRS_HEADER_LENGTH; + + end += table->length; + while (p < end) { + h = (struct ivhd_header *)p; + if (h->type == la_iommu_target_ivhd_type) { + int ret = find_last_devid_from_ivhd(h); + + if (ret) + return ret; + } + + if (h->length == 0) + break; + + p += h->length; + } + + if (p != end) + return -EINVAL; + return 0; +} + +/* + * Takes a pointer to an loongarch IOMMU entry in the ACPI table and + * initializes the hardware and our data structures with it. + */ +static int __init init_iommu_from_acpi(struct loongarch_iommu *iommu, + struct ivhd_header *h) +{ + u8 *p = (u8 *)h; + u8 *end = p; + u16 devid = 0, devid_start = 0; + u32 dev_i; + struct ivhd_entry *e; + u32 ivhd_size; + + /* + * Done. Now parse the device entries + */ + ivhd_size = get_ivhd_header_size(h); + if (!ivhd_size) { + pr_err("loongarch iommu: Unsupported IVHD type %#x\n", h->type); + return -EINVAL; + } + + if (h->length == 0) + return -EINVAL; + + p += ivhd_size; + end += h->length; + + while (p < end) { + e = (struct ivhd_entry *)p; + switch (e->type) { + case IVHD_DEV_ALL: + for (dev_i = 0; dev_i <= la_iommu_last_bdf; ++dev_i) + set_iommu_for_device(iommu, dev_i); + break; + case IVHD_DEV_SELECT: + + pr_info(" DEV_SELECT\t\t\t devid: %02x:%02x.%x\n", + PCI_BUS_NUM(e->devid), + PCI_SLOT(e->devid), + PCI_FUNC(e->devid)); + + devid = e->devid; + set_iommu_for_device(iommu, devid); + break; + case IVHD_DEV_SELECT_RANGE_START: + + pr_info(" DEV_SELECT_RANGE_START\t devid: %02x:%02x.%x\n", + PCI_BUS_NUM(e->devid), + PCI_SLOT(e->devid), + PCI_FUNC(e->devid)); + + devid_start = e->devid; + break; + case IVHD_DEV_RANGE_END: + + pr_info(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n", + PCI_BUS_NUM(e->devid), + PCI_SLOT(e->devid), + PCI_FUNC(e->devid)); + + devid = e->devid; + for (dev_i = devid_start; dev_i <= devid; ++dev_i) + set_iommu_for_device(iommu, dev_i); + break; + default: + break; + } + + p += ivhd_entry_length(p); + } + + return 0; +} + +/* + * This function clues the initialization function for one IOMMU + * together and also allocates the command buffer and programs the + * hardware. It does NOT enable the IOMMU. This is done afterwards. + */ +static int __init init_iommu_one(struct loongarch_iommu *iommu, + struct ivhd_header *h) +{ + int ret; + struct iommu_rlookup_entry *rlookupentry = NULL; + + spin_lock_init(&iommu->domain_bitmap_lock); + spin_lock_init(&iommu->dom_info_lock); + + /* Add IOMMU to internal data structures */ + INIT_LIST_HEAD(&iommu->dom_list); + + list_add_tail(&iommu->list, &la_iommu_list); + + /* + * Copy data from ACPI table entry to the iommu struct + */ + iommu->devid = h->devid; + iommu->segment = h->pci_seg; + ret = init_iommu_from_acpi(iommu, h); + if (ret) { + pr_err("%s init iommu from acpi failed\n", __func__); + return ret; + } + rlookupentry = lookup_rlooptable(iommu->segment); + if (rlookupentry != NULL) { + /* + * Make sure IOMMU is not considered to translate itself. + * The IVRS table tells us so, but this is a lie! + */ + rlookupentry->rlookup_table[iommu->devid] = NULL; + } + return 0; +} + +/* + * Iterates over all IOMMU entries in the ACPI table, allocates the + * IOMMU structure and initializes it with init_iommu_one() + */ +static int __init init_iommu_all(struct acpi_table_header *table) +{ + u8 *p = (u8 *)table, *end = (u8 *)table; + struct ivhd_header *h; + struct loongarch_iommu *iommu; + int ret; + + end += table->length; + p += IVRS_HEADER_LENGTH; + + while (p < end) { + h = (struct ivhd_header *)p; + + if (h->length == 0) + break; + + if (*p == la_iommu_target_ivhd_type) { + + pr_info("device: %02x:%02x.%01x seg: %d\n", + PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid), + PCI_FUNC(h->devid), h->pci_seg); + + iommu = kzalloc(sizeof(struct loongarch_iommu), + GFP_KERNEL); + if (iommu == NULL) + return -ENOMEM; + + ret = init_iommu_one(iommu, h); + if (ret) { + kfree(iommu); + pr_info("%s init iommu failed\n", __func__); + return ret; + } + } + p += h->length; + } + if (p != end) + return -EINVAL; + return 0; +} + +/** + * get_highest_supported_ivhd_type - Look up the appropriate IVHD type + * @ivrs Pointer to the IVRS header + * + * This function search through all IVDB of the maximum supported IVHD + */ +static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs) +{ + u8 *base = (u8 *)ivrs; + struct ivhd_header *ivhd = (struct ivhd_header *) + (base + IVRS_HEADER_LENGTH); + u8 last_type = ivhd->type; + u16 devid = ivhd->devid; + + while (((u8 *)ivhd - base < ivrs->length) && + (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED) && + (ivhd->length > 0)) { + u8 *p = (u8 *) ivhd; + + if (ivhd->devid == devid) + last_type = ivhd->type; + ivhd = (struct ivhd_header *)(p + ivhd->length); + } + return last_type; +} + +static inline unsigned long tbl_size(int entry_size) +{ + unsigned int shift = PAGE_SHIFT + + get_order(((int)la_iommu_last_bdf + 1) * entry_size); + + return 1UL << shift; +} + +static int __init loongarch_iommu_ivrs_init(void) +{ + struct acpi_table_header *ivrs_base; + acpi_status status; + int ret = 0; + + status = acpi_get_table("IVRS", 0, &ivrs_base); + if (status == AE_NOT_FOUND) { + pr_info("%s get ivrs table failed\n", __func__); + return -ENODEV; + } + + /* + * Validate checksum here so we don't need to do it when + * we actually parse the table + */ + ret = check_ivrs_checksum(ivrs_base); + if (ret) + goto out; + + la_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base); + pr_info("Using IVHD type %#x\n", la_iommu_target_ivhd_type); + + /* + * First parse ACPI tables to find the largest Bus/Dev/Func + * we need to handle. Upon this information the shared data + * structures for the IOMMUs in the system will be allocated + */ + ret = find_last_devid_acpi(ivrs_base); + if (ret) { + pr_err("%s find last devid failed\n", __func__); + goto out; + } + + rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); + + /* + * now the data structures are allocated and basically initialized + * start the real acpi table scan + */ + ret = init_iommu_all(ivrs_base); +out: + /* Don't leak any ACPI memory */ + acpi_put_table(ivrs_base); + ivrs_base = NULL; + return ret; +} + +static void free_iommu_rlookup_entry(void) +{ + struct loongarch_iommu *iommu = NULL; + struct iommu_rlookup_entry *rlookupentry = NULL; + + while (!list_empty(&la_iommu_list)) { + iommu = list_first_entry(&la_iommu_list, struct loongarch_iommu, list); + list_del(&iommu->list); + kfree(iommu); + } + + while (!list_empty(&la_rlookup_iommu_list)) { + rlookupentry = list_first_entry(&la_rlookup_iommu_list, + struct iommu_rlookup_entry, list); + + list_del(&rlookupentry->list); + if (rlookupentry->rlookup_table != NULL) { + free_pages( + (unsigned long)rlookupentry->rlookup_table, + get_order(rlookup_table_size)); + + rlookupentry->rlookup_table = NULL; + } + kfree(rlookupentry); + } +} + +static int __init la_iommu_setup(char *str) +{ + if (!str) + return -EINVAL; + while (*str) { + if (!strncmp(str, "on", 2)) { + loongarch_iommu_disable = 0; + pr_info("IOMMU enabled\n"); + } else if (!strncmp(str, "off", 3)) { + loongarch_iommu_disable = 1; + pr_info("IOMMU disabled\n"); + } + str += strcspn(str, ","); + while (*str == ',') + str++; + } + return 0; +} +__setup("loongarch_iommu=", la_iommu_setup); + +static const struct pci_device_id loongson_iommu_pci_tbl[] = { + { PCI_DEVICE(0x14, 0x3c0f) }, + { PCI_DEVICE(0x14, 0x7a1f) }, + { 0, } +}; + +static struct pci_driver loongarch_iommu_driver = { + .name = "loongarch-iommu", + .id_table = loongson_iommu_pci_tbl, + .probe = loongarch_iommu_probe, + .remove = loongarch_iommu_remove, +}; + +static int __init loongarch_iommu_driver_init(void) +{ + int ret = 0; + + if (loongarch_iommu_disable == 0) { + ret = loongarch_iommu_ivrs_init(); + if (ret != 0) { + free_iommu_rlookup_entry(); + pr_err("Failed to init iommu by ivrs\n"); + } + + ret = pci_register_driver(&loongarch_iommu_driver); + if (ret != 0) { + pr_err("Failed to register IOMMU driver\n"); + return ret; + } + } + return ret; +} + +static void __exit loongarch_iommu_driver_exit(void) +{ + struct loongarch_iommu *iommu = NULL; + + if (loongarch_iommu_disable == 0) { + list_for_each_entry(iommu, &la_iommu_list, list) { + iommu_device_sysfs_remove(&iommu->iommu_dev); + iommu_device_unregister(&iommu->iommu_dev); + loongarch_iommu_remove(iommu->pdev); + } + free_iommu_rlookup_entry(); + pci_unregister_driver(&loongarch_iommu_driver); + } +} + +module_init(loongarch_iommu_driver_init); +module_exit(loongarch_iommu_driver_exit); diff --git a/drivers/iommu/loongarch_iommu.h b/drivers/iommu/loongarch_iommu.h new file mode 100644 index 000000000000..cf5640d95900 --- /dev/null +++ b/drivers/iommu/loongarch_iommu.h @@ -0,0 +1,184 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Loongson IOMMU Driver + * + * Copyright (C) 2020-2021 Loongson Technology Ltd. + * Author: Lv Chen + * Wang Yang + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef LOONGARCH_IOMMU_H +#define LOONGARCH_IOMMU_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#define IOVA_WIDTH 47 + +/* Bit value definition for I/O PTE fields */ +#define IOMMU_PTE_PR (1ULL << 0) /* Present */ +#define IOMMU_PTE_HP (1ULL << 1) /* HugePage */ +#define IOMMU_PTE_IR (1ULL << 2) /* Readable */ +#define IOMMU_PTE_IW (1ULL << 3) /* Writeable */ +#define IOMMU_PTE_RW (IOMMU_PTE_PR | IOMMU_PTE_IR | IOMMU_PTE_IW) + +#define iommu_pte_present(ptep) ((*ptep != 0)) +#define iommu_pte_huge(ptep) ((*ptep) & IOMMU_PTE_HP) + +#define LA_IOMMU_PGSIZE (SZ_16K | SZ_32M) + +#define IOMMU_PT_LEVEL0 0x00 +#define IOMMU_PT_LEVEL1 0x01 + +/* IOMMU page table */ +#define IOMMU_PAGE_SHIFT PAGE_SHIFT +#define IOMMU_PAGE_SIZE (_AC(1, UL) << IOMMU_PAGE_SHIFT) +#define IOMMU_LEVEL_STRIDE (IOMMU_PAGE_SHIFT - 3) +#define IOMMU_PTRS_PER_LEVEL (IOMMU_PAGE_SIZE >> 3) +#define IOMMU_LEVEL_SHIFT(n) (((n) * IOMMU_LEVEL_STRIDE) + IOMMU_PAGE_SHIFT) +#define IOMMU_LEVEL_SIZE(n) (_AC(1, UL) << (((n) * IOMMU_LEVEL_STRIDE) + IOMMU_PAGE_SHIFT)) +#define IOMMU_LEVEL_MASK(n) (~(IOMMU_LEVEL_SIZE(n) - 1)) +#define IOMMU_LEVEL_MAX DIV_ROUND_UP((IOVA_WIDTH - IOMMU_PAGE_SHIFT), IOMMU_LEVEL_STRIDE) +#define IOMMU_PAGE_MASK (~(IOMMU_PAGE_SIZE - 1)) + +#define IOMMU_HPAGE_SIZE (1UL << IOMMU_LEVEL_SHIFT(IOMMU_PT_LEVEL1)) +#define IOMMU_HPAGE_MASK (~(IOMMU_HPAGE_SIZE - 1)) + +/* wired | index | domain | shift */ +#define LA_IOMMU_WIDS 0x10 +/* valid | busy | tlbar/aw | cmd */ +#define LA_IOMMU_VBTC 0x14 +#define IOMMU_PGTABLE_BUSY (1 << 16) +/* enable |index | valid | domain | bdf */ +#define LA_IOMMU_EIVDB 0x18 +/* enable | valid | cmd */ +#define LA_IOMMU_CMD 0x1C +#define LA_IOMMU_PGD0_LO 0x20 +#define LA_IOMMU_PGD0_HI 0x24 +#define STEP_PGD 0x8 +#define STEP_PGD_SHIFT 3 +#define LA_IOMMU_PGD_LO(domain_id) \ + (LA_IOMMU_PGD0_LO + ((domain_id) << STEP_PGD_SHIFT)) +#define LA_IOMMU_PGD_HI(domain_id) \ + (LA_IOMMU_PGD0_HI + ((domain_id) << STEP_PGD_SHIFT)) + +#define LA_IOMMU_DIR_CTRL0 0xA0 +#define LA_IOMMU_DIR_CTRL1 0xA4 +#define LA_IOMMU_DIR_CTRL(x) (LA_IOMMU_DIR_CTRL0 + ((x) << 2)) + +#define LA_IOMMU_SAFE_BASE_HI 0xE0 +#define LA_IOMMU_SAFE_BASE_LO 0xE4 +#define LA_IOMMU_EX_ADDR_LO 0xE8 +#define LA_IOMMU_EX_ADDR_HI 0xEC + +#define LA_IOMMU_PFM_CNT_EN 0x100 + +#define LA_IOMMU_RD_HIT_CNT_0 0x110 +#define LA_IOMMU_RD_MISS_CNT_O 0x114 +#define LA_IOMMU_WR_HIT_CNT_0 0x118 +#define LA_IOMMU_WR_MISS_CNT_0 0x11C +#define LA_IOMMU_RD_HIT_CNT_1 0x120 +#define LA_IOMMU_RD_MISS_CNT_1 0x124 +#define LA_IOMMU_WR_HIT_CNT_1 0x128 +#define LA_IOMMU_WR_MISS_CNT_1 0x12C +#define LA_IOMMU_RD_HIT_CNT_2 0x130 +#define LA_IOMMU_RD_MISS_CNT_2 0x134 +#define LA_IOMMU_WR_HIT_CNT_2 0x138 +#define LA_IOMMU_WR_MISS_CNT_2 0x13C + +#define MAX_DOMAIN_ID 16 +#define MAX_ATTACHED_DEV_ID 16 + +#define iommu_ptable_end(addr, end, level) \ +({ unsigned long __boundary = ((addr) + IOMMU_LEVEL_SIZE(level)) & \ + IOMMU_LEVEL_MASK(level); \ + (__boundary - 1 < (end) - 1) ? __boundary : (end); \ +}) + +/* To find an entry in an iommu page table directory */ +#define iommu_page_index(addr, level) \ + (((addr) >> ((level * IOMMU_LEVEL_STRIDE) + IOMMU_PAGE_SHIFT)) \ + & (IOMMU_PTRS_PER_LEVEL - 1)) + +struct loongarch_iommu { + struct list_head list; /* for la_iommu_list */ + spinlock_t domain_bitmap_lock; /* Lock for domain allocing */ + spinlock_t dom_info_lock; /* Lock for dom_list */ + void *domain_bitmap; /* Bitmap of global domains */ + void *devtable_bitmap; /* Bitmap of devtable */ + struct list_head dom_list; /* List of all domain privates */ + /* PCI device id of the IOMMU device */ + u16 devid; + int segment; /* PCI segment# */ + /* iommu configures the register space base address */ + void *confbase; + /* iommu configures the register space physical base address */ + resource_size_t confbase_phy; + /* iommu configures the register space size */ + resource_size_t conf_size; + struct pci_dev *pdev; + /* Handle for IOMMU core code */ + struct iommu_device iommu_dev; +} loongarch_iommu; + +struct iommu_rlookup_entry { + struct list_head list; + struct loongarch_iommu **rlookup_table; + int pcisegment; +}; + +struct iommu_info { + struct list_head list; /* for dom_info->iommu_devlist */ + struct loongarch_iommu *iommu; + spinlock_t devlock; /* priv dev list lock */ + struct list_head dev_list; /* List of all devices in this domain iommu */ + unsigned int dev_cnt; /* devices assigned to this domain iommu */ + short id; +} iommu_info; + +/* One vm is equal to a domain,one domain has a priv */ +struct dom_info { + struct list_head iommu_devlist; + struct iommu_domain domain; + struct mutex ptl_lock; /* Lock for page table */ + void *pgd; + spinlock_t lock; /* Lock for dom_info->iommu_devlist */ +} dom_info; + +struct dom_entry { + struct list_head list; /* for loongarch_iommu->dom_list */ + struct dom_info *domain_info; +} dom_entry; + +/* A device for passthrough */ +struct la_iommu_dev_data { + struct list_head list; /* for iommu_entry->dev_list */ + struct loongarch_iommu *iommu; + struct iommu_info *iommu_entry; + struct iommu_domain *domain; + struct device *dev; + unsigned short bdf; + int count; + int index; /* index in device table */ +}; + +static inline unsigned long *iommu_pte_offset(unsigned long *ptep, unsigned long addr, int level) +{ + return ptep + iommu_page_index(addr, level); +} +#endif /* LOONGARCH_IOMMU_H */ diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 5b1dbdca3253..fe5555a9011e 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -4444,6 +4444,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9000, quirk_bridge_cavm_thrx2_pcie_root); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9084, quirk_bridge_cavm_thrx2_pcie_root); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LOONGSON, 0x3c09, + quirk_bridge_cavm_thrx2_pcie_root); /* * Intersil/Techwell TW686[4589]-based video capture cards have an empty (zero) @@ -5173,6 +5175,8 @@ static const struct pci_dev_acs_enabled { { PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs }, /* Wangxun nics */ { PCI_VENDOR_ID_WANGXUN, PCI_ANY_ID, pci_quirk_wangxun_nic_acs }, + { PCI_VENDOR_ID_LOONGSON, 0x3c09, pci_quirk_xgene_acs}, + { PCI_VENDOR_ID_LOONGSON, 0x3c19, pci_quirk_xgene_acs}, { 0 } }; diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig index d80b6ffefd9d..b9b459d9b073 100644 --- a/drivers/vfio/Kconfig +++ b/drivers/vfio/Kconfig @@ -39,7 +39,7 @@ config VFIO_GROUP config VFIO_CONTAINER bool "Support for the VFIO container /dev/vfio/vfio" - select VFIO_IOMMU_TYPE1 if MMU && (X86 || S390 || ARM || ARM64 || SW64) + select VFIO_IOMMU_TYPE1 if MMU && (X86 || S390 || ARM || ARM64 || SW64 || LOONGARCH) depends on VFIO_GROUP default y help -- Gitee From fb093420f0ca38b458d7ada270641f5e90f078a6 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Mon, 15 Jan 2024 11:07:31 +0100 Subject: [PATCH 1636/2138] uprobes: use pagesize-aligned virtual address when replacing pages ANBZ: #11732 commit 4dca82d14174fe53f656a6bc32398db1bdd8f481 upstream uprobes passes an unaligned page mapping address to folio_add_new_anon_rmap(), which ends up triggering a VM_BUG_ON() we recently extended in commit 372cbd4d5a066 ("mm: non-pmd-mappable, large folios for folio_add_new_anon_rmap()"). Arguably, this is uprobes code doing something wrong; however, for the time being it would have likely worked in rmap code because __folio_set_anon() would set folio->index to the same value. Looking at __replace_page(), we'd also pass slightly wrong values to mmu_notifier_range_init(), page_vma_mapped_walk(), flush_cache_page(), ptep_clear_flush() and set_pte_at_notify(). I suspect most of them are fine, but let's just mark the introducing commit as the one needed fixing. I don't think CC stable is warranted. We'll add more sanity checks in rmap code separately, to make sure that we always get properly aligned addresses. Link: https://lkml.kernel.org/r/20240115100731.91007-1-david@redhat.com Fixes: c517ee744b96 ("uprobes: __replace_page() should not use page_address_in_vma()") Signed-off-by: David Hildenbrand Reported-by: Jiri Olsa Closes: https://lkml.kernel.org/r/ZaMR2EWN-HvlCfUl@krava Tested-by: Jiri Olsa Reviewed-by: Ryan Roberts Acked-by: Oleg Nesterov Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Arnaldo Carvalho de Melo Cc: Mark Rutland Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Namhyung Kim Cc: Ian Rogers Cc: Adrian Hunter Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Reviewed-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/4081 --- kernel/events/uprobes.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 9b870747abb0..e71c35cf15ff 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -537,7 +537,7 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, } } - ret = __replace_page(vma, vaddr, old_page, new_page); + ret = __replace_page(vma, vaddr & PAGE_MASK, old_page, new_page); if (new_page) put_page(new_page); put_old: -- Gitee From f0b51aa402b6ea8f6963fa1fabbb384dd2e57826 Mon Sep 17 00:00:00 2001 From: xiongmengbiao Date: Thu, 6 Jun 2024 13:57:02 +0800 Subject: [PATCH 1637/2138] anolis: crypto: ccp: remove multi-level pointers processing for vpsp ANBZ: #11635 Signed-off-by: xiongmengbiao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4077 --- arch/x86/kvm/svm/svm.c | 4 +- arch/x86/kvm/x86.c | 7 +- drivers/crypto/ccp/hygon/csv-dev.c | 93 +++++- drivers/crypto/ccp/hygon/psp-dev.c | 94 ------ drivers/crypto/ccp/hygon/vpsp.c | 471 +++-------------------------- include/linux/psp-hygon.h | 17 +- include/uapi/linux/kvm_para.h | 3 +- 7 files changed, 150 insertions(+), 539 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 1ba907ebaa3e..69218e009811 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4993,8 +4993,8 @@ static int kvm_hygon_arch_hypercall(struct kvm *kvm, u64 nr, u64 a0, u64 a1, u64 }; switch (nr) { - case KVM_HC_PSP_OP: - ret = kvm_pv_psp_op(&vpsp, a0, a1, a2, a3); + case KVM_HC_PSP_COPY_FORWARD_OP: + ret = kvm_pv_psp_copy_forward_op(&vpsp, a0, a1, a2); break; default: diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b46accdbf59e..5649abfd751c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9897,7 +9897,9 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) } if (static_call(kvm_x86_get_cpl)(vcpu) != 0 && - !(is_x86_vendor_hygon() && (nr == KVM_HC_VM_ATTESTATION || nr == KVM_HC_PSP_OP))) { + !(is_x86_vendor_hygon() && (nr == KVM_HC_VM_ATTESTATION + || nr == KVM_HC_PSP_OP_OBSOLETE + || nr == KVM_HC_PSP_COPY_FORWARD_OP))) { ret = -KVM_EPERM; goto out; } @@ -9965,7 +9967,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) if (is_x86_vendor_hygon() && kvm_x86_ops.vm_attestation) ret = static_call(kvm_x86_vm_attestation)(vcpu->kvm, a0, a1); break; - case KVM_HC_PSP_OP: + case KVM_HC_PSP_OP_OBSOLETE: + case KVM_HC_PSP_COPY_FORWARD_OP: ret = -KVM_ENOSYS; if (kvm_x86_ops.arch_hypercall) ret = static_call(kvm_x86_arch_hypercall)(vcpu->kvm, nr, a0, a1, a2, a3); diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index f49a0ecebb3c..d611db54cbdf 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -15,6 +15,7 @@ #include #include #include +#include #include @@ -1064,7 +1065,97 @@ static int vpsp_rb_check_and_cmd_prio_parse(uint8_t *prio, return rb_supported; } -int __vpsp_do_cmd_locked(uint32_t vid, int cmd, void *data, int *psp_ret); +int __vpsp_do_cmd_locked(uint32_t vid, int cmd, void *data, int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + phys_addr_t phys_addr; + unsigned int phys_lsb, phys_msb; + unsigned int reg, ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + if (*hygon_psp_hooks.psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + if (data && WARN_ON_ONCE(!virt_addr_valid(data))) + return -EINVAL; + + /* Get the physical address of the command buffer */ + phys_addr = PUT_PSP_VID(__psp_pa(data), vid); + phys_lsb = data ? lower_32_bits(phys_addr) : 0; + phys_msb = data ? upper_32_bits(phys_addr) : 0; + + dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", + cmd, phys_msb, phys_lsb, *hygon_psp_hooks.psp_timeout); + + print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, + hygon_psp_hooks.sev_cmd_buffer_len(cmd), false); + + iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + sev->int_rcvd = 0; + + reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC; + iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for command completion */ + ret = hygon_psp_hooks.sev_wait_cmd_ioc(sev, ®, *hygon_psp_hooks.psp_timeout); + if (ret) { + if (psp_ret) + *psp_ret = 0; + + dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd); + *hygon_psp_hooks.psp_dead = true; + + return ret; + } + + *hygon_psp_hooks.psp_timeout = *hygon_psp_hooks.psp_cmd_timeout; + + if (psp_ret) + *psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg); + + if (FIELD_GET(PSP_CMDRESP_STS, reg)) { + dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n", + cmd, FIELD_GET(PSP_CMDRESP_STS, reg)); + ret = -EIO; + } + + print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data, + hygon_psp_hooks.sev_cmd_buffer_len(cmd), false); + + return ret; +} + +int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret) +{ + int rc; + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); + + if (is_vendor_hygon() && mutex_enabled) { + if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) { + return -EBUSY; + } + } else { + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + } + + rc = __vpsp_do_cmd_locked(vid, cmd, data, psp_ret); + + if (is_vendor_hygon() && mutex_enabled) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + + return rc; +} + /* * Try to obtain the result again by the command index, this * interface is used in ringbuffer mode diff --git a/drivers/crypto/ccp/hygon/psp-dev.c b/drivers/crypto/ccp/hygon/psp-dev.c index 124d305c956b..59380d1a65b0 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.c +++ b/drivers/crypto/ccp/hygon/psp-dev.c @@ -496,100 +496,6 @@ static int __psp_do_cmd_locked(int cmd, void *data, int *psp_ret) return ret; } -int __vpsp_do_cmd_locked(uint32_t vid, int cmd, void *data, int *psp_ret) -{ - struct psp_device *psp = psp_master; - struct sev_device *sev; - phys_addr_t phys_addr; - unsigned int phys_lsb, phys_msb; - unsigned int reg, ret = 0; - - if (!psp || !psp->sev_data || !hygon_psp_hooks.sev_dev_hooks_installed) - return -ENODEV; - - if (*hygon_psp_hooks.psp_dead) - return -EBUSY; - - sev = psp->sev_data; - - if (data && WARN_ON_ONCE(!virt_addr_valid(data))) - return -EINVAL; - - /* Get the physical address of the command buffer */ - phys_addr = PUT_PSP_VID(__psp_pa(data), vid); - phys_lsb = data ? lower_32_bits(phys_addr) : 0; - phys_msb = data ? upper_32_bits(phys_addr) : 0; - - dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", - cmd, phys_msb, phys_lsb, *hygon_psp_hooks.psp_timeout); - - print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, - hygon_psp_hooks.sev_cmd_buffer_len(cmd), false); - - iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); - iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); - - sev->int_rcvd = 0; - - reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC; - iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg); - - /* wait for command completion */ - ret = hygon_psp_hooks.sev_wait_cmd_ioc(sev, ®, *hygon_psp_hooks.psp_timeout); - if (ret) { - if (psp_ret) - *psp_ret = 0; - - dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd); - *hygon_psp_hooks.psp_dead = true; - - return ret; - } - - *hygon_psp_hooks.psp_timeout = *hygon_psp_hooks.psp_cmd_timeout; - - if (psp_ret) - *psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg); - - if (FIELD_GET(PSP_CMDRESP_STS, reg)) { - dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n", - cmd, FIELD_GET(PSP_CMDRESP_STS, reg)); - ret = -EIO; - } - - print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data, - hygon_psp_hooks.sev_cmd_buffer_len(cmd), false); - - return ret; -} - -int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret) -{ - int rc; - int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); - - if (!hygon_psp_hooks.sev_dev_hooks_installed) - return -ENODEV; - - if (mutex_enabled) { - if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, - PSP_MUTEX_TIMEOUT) != 1) { - return -EBUSY; - } - } else { - mutex_lock(hygon_psp_hooks.sev_cmd_mutex); - } - - rc = __vpsp_do_cmd_locked(vid, cmd, data, psp_ret); - - if (mutex_enabled) - psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); - else - mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); - - return rc; -} - int psp_do_cmd(int cmd, void *data, int *psp_ret) { int rc; diff --git a/drivers/crypto/ccp/hygon/vpsp.c b/drivers/crypto/ccp/hygon/vpsp.c index 183355b1c222..bcdad2bff848 100644 --- a/drivers/crypto/ccp/hygon/vpsp.c +++ b/drivers/crypto/ccp/hygon/vpsp.c @@ -10,8 +10,9 @@ #include #include #include -#include +#include #include +#include #ifdef pr_fmt #undef pr_fmt @@ -32,381 +33,53 @@ * The primary implementation logic of virtual PSP in kernel mode * call trace: * guest command(vmmcall) - * | - * | |-> kvm_pv_psp_cmd_pre_op - * | | - * | | -> guest_addr_map_table_op - * | | - * | | -> guest_multiple_level_gpa_replace - * | - * kvm_pv_psp_op->|-> vpsp_try_do_cmd/vpsp_try_get_result <====> psp device driver + * |-> kvm_pv_psp_cmd_pre_op * | + * kvm_pv_psp_copy_forward_op->|-> vpsp_try_do_cmd/vpsp_try_get_result <====> psp device driver * | * |-> kvm_pv_psp_cmd_post_op - * | - * | -> guest_addr_map_table_op - * | - * | -> guest_multiple_level_gpa_restore */ -#define TKM_CMD_ID_MIN 0x120 -#define TKM_CMD_ID_MAX 0x12f - struct psp_cmdresp_head { uint32_t buf_size; uint32_t cmdresp_size; uint32_t cmdresp_code; } __packed; -/** - * struct map_tbl - multilevel pointer address mapping table - * - * @parent_pa: parent address block's physics address - * @offset: offset in parent address block - * @size: submemory size - * @align: submemory align size, hva need to keep size alignment in kernel - * @hva: submemory copy block in kernel virtual address - */ -struct map_tbl { - uint64_t parent_pa; - uint32_t offset; - uint32_t size; - uint32_t align; - uint64_t hva; -} __packed; - -struct addr_map_tbls { - uint32_t tbl_nums; - struct map_tbl tbl[]; -} __packed; - -/* gpa and hva conversion maintenance table for internal use */ -struct gpa2hva_t { - void *hva; - gpa_t gpa; -}; - -struct gpa2hva_tbls { - uint32_t max_nums; - uint32_t tbl_nums; - struct gpa2hva_t tbl[]; -}; - /* save command data for restoring later */ struct vpsp_hbuf_wrapper { void *data; uint32_t data_size; - struct addr_map_tbls *map_tbls; - struct gpa2hva_tbls *g2h_tbls; }; -/* - * Virtual PSP host memory information maintenance, used in ringbuffer mode - */ +/* Virtual PSP host memory information maintenance, used in ringbuffer mode */ struct vpsp_hbuf_wrapper g_hbuf_wrap[CSV_COMMAND_PRIORITY_NUM][CSV_RING_BUFFER_SIZE / CSV_RING_BUFFER_ESIZE] = {0}; -void __maybe_unused map_tbl_dump(const char *title, struct addr_map_tbls *tbls) -{ - int i; - - pr_info("[%s]-> map_tbl_nums: %d", title, tbls->tbl_nums); - for (i = 0; i < tbls->tbl_nums; i++) { - pr_info("\t[%d]: parent_pa: 0x%llx, offset: 0x%x, size: 0x%x, align: 0x%x hva: 0x%llx", - i, tbls->tbl[i].parent_pa, tbls->tbl[i].offset, - tbls->tbl[i].size, tbls->tbl[i].align, tbls->tbl[i].hva); - } - pr_info("\n"); -} - -void __maybe_unused g2h_tbl_dump(const char *title, struct gpa2hva_tbls *tbls) -{ - int i; - - pr_info("[%s]-> g2h_tbl_nums: %d, max_nums: %d", title, tbls->tbl_nums, - tbls->max_nums); - for (i = 0; i < tbls->tbl_nums; i++) - pr_info("\t[%d]: hva: 0x%llx, gpa: 0x%llx", i, - (uint64_t)tbls->tbl[i].hva, tbls->tbl[i].gpa); - pr_info("\n"); -} - -static int gpa2hva_tbl_fill(struct gpa2hva_tbls *tbls, void *hva, gpa_t gpa) -{ - uint32_t fill_idx = tbls->tbl_nums; - - if (fill_idx >= tbls->max_nums) - return -EFAULT; - - tbls->tbl[fill_idx].hva = hva; - tbls->tbl[fill_idx].gpa = gpa; - tbls->tbl_nums = fill_idx + 1; - - return 0; -} - -static void clear_hva_in_g2h_tbls(struct gpa2hva_tbls *g2h, void *hva) -{ - int i; - - for (i = 0; i < g2h->tbl_nums; i++) { - if (g2h->tbl[i].hva == hva) - g2h->tbl[i].hva = NULL; - } -} - -static void *get_hva_from_gpa(struct gpa2hva_tbls *g2h, gpa_t gpa) -{ - int i; - - for (i = 0; i < g2h->tbl_nums; i++) { - if (g2h->tbl[i].gpa == gpa) - return (void *)g2h->tbl[i].hva; - } - - return NULL; -} - -static gpa_t get_gpa_from_hva(struct gpa2hva_tbls *g2h, void *hva) -{ - int i; - - for (i = 0; i < g2h->tbl_nums; i++) { - if (g2h->tbl[i].hva == hva) - return g2h->tbl[i].gpa; - } - - return 0; -} - -/* - * The virtual machine multilevel pointer command buffer handles the - * execution entity, synchronizes the data in the original gpa to the - * newly allocated hva(host virtual address) and updates the mapping - * relationship in the parent memory - */ -static int guest_multiple_level_gpa_replace(struct kvm_vpsp *vpsp, - struct map_tbl *tbl, struct gpa2hva_tbls *g2h) -{ - int ret = 0; - uint32_t sub_block_size; - uint64_t sub_paddr; - void *parent_kva = NULL; - - /* kmalloc memory for child block */ - sub_block_size = max(tbl->size, tbl->align); - tbl->hva = (uint64_t)kzalloc(sub_block_size, GFP_KERNEL); - if (!tbl->hva) - return -ENOMEM; - - /* get child gpa from parent gpa */ - if (unlikely(vpsp->read_guest(vpsp->kvm, tbl->parent_pa + tbl->offset, - &sub_paddr, sizeof(sub_paddr)))) { - pr_err("[%s]: kvm_read_guest for parent gpa failed\n", - __func__); - ret = -EFAULT; - goto e_free; - } - - /* copy child block data from gpa to hva */ - if (unlikely(vpsp->read_guest(vpsp->kvm, sub_paddr, (void *)tbl->hva, - tbl->size))) { - pr_err("[%s]: kvm_read_guest for sub_data failed\n", - __func__); - ret = -EFAULT; - goto e_free; - } - - /* get hva from gpa */ - parent_kva = get_hva_from_gpa(g2h, tbl->parent_pa); - if (unlikely(!parent_kva)) { - pr_err("[%s]: get_hva_from_gpa for parent_pa failed\n", - __func__); - ret = -EFAULT; - goto e_free; - } - - /* replace pa of hva from gpa */ - *(uint64_t *)((uint8_t *)parent_kva + tbl->offset) = __psp_pa(tbl->hva); - - /* fill in gpa and hva to map table for restoring later */ - if (unlikely(gpa2hva_tbl_fill(g2h, (void *)tbl->hva, sub_paddr))) { - pr_err("[%s]: gpa2hva_tbl_fill for sub_addr failed\n", - __func__); - ret = -EFAULT; - goto e_free; - } - - return ret; - -e_free: - kfree((const void *)tbl->hva); - return ret; -} - -/* The virtual machine multi-level pointer command memory handles the - * execution entity, synchronizes the data in the hva(host virtual - * address) back to the memory corresponding to the gpa, and restores - * the mapping relationship in the original parent memory - */ -static int guest_multiple_level_gpa_restore(struct kvm_vpsp *vpsp, - struct map_tbl *tbl, struct gpa2hva_tbls *g2h) -{ - int ret = 0; - gpa_t sub_gpa; - void *parent_hva = NULL; - - /* get gpa from hva */ - sub_gpa = get_gpa_from_hva(g2h, (void *)tbl->hva); - if (unlikely(!sub_gpa)) { - pr_err("[%s]: get_gpa_from_hva for sub_gpa failed\n", - __func__); - ret = -EFAULT; - goto end; - } - - /* copy child block data from hva to gpa */ - if (unlikely(vpsp->write_guest(vpsp->kvm, sub_gpa, (void *)tbl->hva, - tbl->size))) { - pr_err("[%s]: kvm_write_guest for sub_gpa failed\n", - __func__); - ret = -EFAULT; - goto end; - } - - /* get parent hva from parent gpa */ - parent_hva = get_hva_from_gpa(g2h, tbl->parent_pa); - if (unlikely(!parent_hva)) { - pr_err("[%s]: get_hva_from_gpa for parent_pa failed\n", - __func__); - ret = -EFAULT; - goto end; - } - - /* restore gpa from pa of hva in parent block */ - *(uint64_t *)((uint8_t *)parent_hva + tbl->offset) = sub_gpa; - - /* free child block memory */ - clear_hva_in_g2h_tbls(g2h, (void *)tbl->hva); - kfree((const void *)tbl->hva); - tbl->hva = 0; - -end: - return ret; -} - -/* - * The virtual machine multilevel pointer command memory processing - * executes upper-layer abstract interfaces, including replacing and - * restoring two sub-processing functions - */ -static int guest_addr_map_table_op(struct kvm_vpsp *vpsp, struct gpa2hva_tbls *g2h, - struct addr_map_tbls *map_tbls, int op) -{ - int ret = 0; - int i; - uint64_t *sub_paddr_ptr; - - if (op) { - for (i = map_tbls->tbl_nums - 1; i >= 0; i--) { - /* check if the gpa of root points to itself */ - if (map_tbls->tbl[i].parent_pa == g2h->tbl[0].gpa) { - sub_paddr_ptr = (uint64_t *)((uint8_t *)g2h->tbl[0].hva - + map_tbls->tbl[i].offset); - /* if the child paddr is equal to the parent paddr */ - if ((uint64_t)g2h->tbl[0].hva == map_tbls->tbl[i].hva) { - *sub_paddr_ptr = g2h->tbl[0].gpa; - continue; - } - } - - /* restore new pa of kva with the gpa from guest */ - if (unlikely(guest_multiple_level_gpa_restore(vpsp, - &map_tbls->tbl[i], g2h))) { - pr_err("[%s]: guest_multiple_level_gpa_restore failed\n", - __func__); - ret = -EFAULT; - goto end; - } - } - } else { - for (i = 0; i < map_tbls->tbl_nums; i++) { - /* check if the gpa of root points to itself */ - if (map_tbls->tbl[i].parent_pa == g2h->tbl[0].gpa) { - sub_paddr_ptr = (uint64_t *)((uint8_t *)g2h->tbl[0].hva - + map_tbls->tbl[i].offset); - /* if the child paddr is equal to the parent paddr */ - if (*sub_paddr_ptr == map_tbls->tbl[i].parent_pa) { - *sub_paddr_ptr = __psp_pa(g2h->tbl[0].hva); - map_tbls->tbl[i].hva = (uint64_t)g2h->tbl[0].hva; - continue; - } - } - - /* check if parent_pa is valid */ - if (unlikely(!get_hva_from_gpa(g2h, map_tbls->tbl[i].parent_pa))) { - pr_err("[%s]: g2h->tbl[%d].parent_pa: 0x%llx is invalid\n", - __func__, i, map_tbls->tbl[i].parent_pa); - ret = -EFAULT; - goto end; - } - - /* replace the gpa from guest with the new pa of kva */ - if (unlikely(guest_multiple_level_gpa_replace(vpsp, - &map_tbls->tbl[i], g2h))) { - pr_err("[%s]: guest_multiple_level_gpa_replace failed\n", - __func__); - ret = -EFAULT; - goto end; - } - } - } - -end: - return ret; -} - -static void kvm_pv_psp_mem_free(struct gpa2hva_tbls *g2h, struct addr_map_tbls - *map_tbl, void *data) -{ - int i; - - if (g2h) { - for (i = 0; i < g2h->tbl_nums; i++) { - if (g2h->tbl[i].hva && (g2h->tbl[i].hva != data)) { - kfree(g2h->tbl[i].hva); - g2h->tbl[i].hva = NULL; - } - } - kfree(g2h); - } - - kfree(map_tbl); - kfree(data); -} - /* * Obtain the VM command and preprocess the pointer mapping table * information in the command buffer, the processed data will be * used to interact with the psp device */ static int kvm_pv_psp_cmd_pre_op(struct kvm_vpsp *vpsp, gpa_t data_gpa, - gpa_t table_gpa, struct vpsp_hbuf_wrapper *hbuf) + struct vpsp_hbuf_wrapper *hbuf) { int ret = 0; void *data = NULL; struct psp_cmdresp_head psp_head; uint32_t data_size; - struct addr_map_tbls map_head, *map_tbls = NULL; - uint32_t map_tbl_size; - struct gpa2hva_tbls *g2h = NULL; - uint32_t g2h_tbl_size; if (unlikely(vpsp->read_guest(vpsp->kvm, data_gpa, &psp_head, sizeof(struct psp_cmdresp_head)))) return -EFAULT; data_size = psp_head.buf_size; + if ((((uintptr_t)data_gpa + data_size - 1) & ~PSP_2MB_MASK) + != ((uintptr_t)data_gpa & ~PSP_2MB_MASK)) { + pr_err("data_gpa %llx, data_size %d crossing 2MB\n", (u64)data_gpa, data_size); + return -EFAULT; + } + data = kzalloc(data_size, GFP_KERNEL); if (!data) return -ENOMEM; @@ -416,89 +89,18 @@ static int kvm_pv_psp_cmd_pre_op(struct kvm_vpsp *vpsp, gpa_t data_gpa, goto end; } - if (table_gpa) { - /* parse address map table from guest */ - if (unlikely(vpsp->read_guest(vpsp->kvm, table_gpa, &map_head, - sizeof(struct addr_map_tbls)))) { - pr_err("[%s]: kvm_read_guest for map_head failed\n", - __func__); - ret = -EFAULT; - goto end; - } - - map_tbl_size = sizeof(struct addr_map_tbls) + map_head.tbl_nums - * sizeof(struct map_tbl); - map_tbls = kzalloc(map_tbl_size, GFP_KERNEL); - if (!map_tbls) { - ret = -ENOMEM; - goto end; - } - - if (unlikely(vpsp->read_guest(vpsp->kvm, table_gpa, map_tbls, - map_tbl_size))) { - pr_err("[%s]: kvm_read_guest for map_tbls failed\n", - __func__); - ret = -EFAULT; - goto end; - } - - /* init for gpa2hva table*/ - g2h_tbl_size = sizeof(struct gpa2hva_tbls) + (map_head.tbl_nums - + 1) * sizeof(struct gpa2hva_t); - g2h = kzalloc(g2h_tbl_size, GFP_KERNEL); - if (!g2h) { - ret = -ENOMEM; - goto end; - } - g2h->max_nums = map_head.tbl_nums + 1; - - /* fill the root parent address */ - if (gpa2hva_tbl_fill(g2h, data, data_gpa)) { - pr_err("[%s]: gpa2hva_tbl_fill for root data address failed\n", - __func__); - ret = -EFAULT; - goto end; - } - - if (guest_addr_map_table_op(vpsp, g2h, map_tbls, 0)) { - pr_err("[%s]: guest_addr_map_table_op for replacing failed\n", - __func__); - ret = -EFAULT; - goto end; - } - } - hbuf->data = data; hbuf->data_size = data_size; - hbuf->map_tbls = map_tbls; - hbuf->g2h_tbls = g2h; end: - if (ret && data) - kfree(data); return ret; } -/* - * The executed command data is recovered according to the multilevel - * pointer of the mapping table when the command has finished - * interacting with the psp device - */ static int kvm_pv_psp_cmd_post_op(struct kvm_vpsp *vpsp, gpa_t data_gpa, - struct vpsp_hbuf_wrapper *hbuf) + struct vpsp_hbuf_wrapper *hbuf) { int ret = 0; - if (hbuf->map_tbls) { - if (guest_addr_map_table_op(vpsp, hbuf->g2h_tbls, - hbuf->map_tbls, 1)) { - pr_err("[%s]: guest_addr_map_table_op for restoring failed\n", - __func__); - ret = -EFAULT; - goto end; - } - } - /* restore cmdresp's buffer from context */ if (unlikely(vpsp->write_guest(vpsp->kvm, data_gpa, hbuf->data, hbuf->data_size))) { @@ -507,12 +109,9 @@ static int kvm_pv_psp_cmd_post_op(struct kvm_vpsp *vpsp, gpa_t data_gpa, ret = -EFAULT; goto end; } - end: - /* release memory and clear hbuf */ - kvm_pv_psp_mem_free(hbuf->g2h_tbls, hbuf->map_tbls, hbuf->data); + kfree(hbuf->data); memset(hbuf, 0, sizeof(*hbuf)); - return ret; } @@ -523,11 +122,16 @@ static int cmd_type_is_tkm(int cmd) return 0; } -/* - * The primary implementation interface of virtual PSP in kernel mode +/** + * @brief kvm_pv_psp_copy_forward_op is used for ordinary virtual machines to copy data + * in gpa to host memory and send it to psp for processing. + * + * @param vpsp points to kvm related data + * @param cmd psp cmd id, bit 31 indicates queue priority + * @param data_gpa guest physical address of input data + * @param psp_ret_gpa guest physical address of psp_ret */ -int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, - gpa_t table_gpa) +int kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa) { int ret = 0; struct vpsp_ret psp_ret = {0}; @@ -537,13 +141,18 @@ int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_ uint32_t index = 0; uint32_t vid = 0; + if (vcmd->cmd_id != TKM_PSP_CMDID_OFFSET) { + pr_err("[%s]: unsupported cmd id %x\n", __func__, vcmd->cmd_id); + return -EINVAL; + } + // only tkm cmd need vid if (cmd_type_is_tkm(vcmd->cmd_id)) { // check the permission to use the default vid when no vid is set ret = vpsp_get_vid(&vid, vpsp->kvm->userspace_pid); if (ret && !vpsp_get_default_vid_permission()) { pr_err("[%s]: not allowed tkm command without vid\n", __func__); - return -EFAULT; + return -EPERM; } } @@ -553,8 +162,8 @@ int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_ switch (psp_ret.status) { case VPSP_INIT: - /* multilevel pointer replace*/ - ret = kvm_pv_psp_cmd_pre_op(vpsp, data_gpa, table_gpa, &hbuf); + /* copy data from guest */ + ret = kvm_pv_psp_cmd_pre_op(vpsp, data_gpa, &hbuf); if (unlikely(ret)) { psp_ret.status = VPSP_FINISH; pr_err("[%s]: kvm_pv_psp_cmd_pre_op failed\n", @@ -567,20 +176,18 @@ int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_ ret = vpsp_try_do_cmd(vid, cmd, (void *)hbuf.data, (struct vpsp_ret *)&psp_ret); if (unlikely(ret)) { - pr_err("[%s]: vpsp_do_cmd failed\n", __func__); + pr_err("[%s]: vpsp_try_do_cmd failed\n", __func__); ret = -EFAULT; goto end; } - ret = -EFAULT; if (psp_ret.status == VPSP_RUNNING) { - /* backup host memory message for restoring later*/ prio = vcmd->is_high_rb ? CSV_COMMAND_PRIORITY_HIGH : CSV_COMMAND_PRIORITY_LOW; g_hbuf_wrap[prio][psp_ret.index] = hbuf; - ret = 0; + break; + } else if (psp_ret.status == VPSP_FINISH) { - /* restore multilevel pointer data */ ret = kvm_pv_psp_cmd_post_op(vpsp, data_gpa, &hbuf); if (unlikely(ret)) { pr_err("[%s]: kvm_pv_psp_cmd_post_op failed\n", @@ -604,20 +211,21 @@ int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_ goto end; } - ret = -EFAULT; if (psp_ret.status == VPSP_RUNNING) { ret = 0; + goto end; } else if (psp_ret.status == VPSP_FINISH) { - /* restore multilevel pointer data */ + /* copy data to guest */ ret = kvm_pv_psp_cmd_post_op(vpsp, data_gpa, &g_hbuf_wrap[prio][index]); if (unlikely(ret)) { pr_err("[%s]: kvm_pv_psp_cmd_post_op failed\n", __func__); ret = -EFAULT; - goto end; } + goto end; } + ret = -EFAULT; break; default: @@ -629,4 +237,5 @@ int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_ /* return psp_ret to guest */ vpsp->write_guest(vpsp->kvm, psp_ret_gpa, &psp_ret, sizeof(psp_ret)); return ret; -} EXPORT_SYMBOL_GPL(kvm_pv_psp_op); +} +EXPORT_SYMBOL_GPL(kvm_pv_psp_copy_forward_op); diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index 7d95340c40ff..fd2b2f677c2d 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -448,6 +448,11 @@ struct kvm_vpsp { int (*read_guest)(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); }; +#define PSP_2MB_MASK (2*1024*1024 - 1) +#define TKM_CMD_ID_MIN 0x120 +#define TKM_CMD_ID_MAX 0x12f +#define TKM_PSP_CMDID TKM_CMD_ID_MIN +#define TKM_PSP_CMDID_OFFSET 0x128 #define PSP_VID_MASK 0xff #define PSP_VID_SHIFT 56 #define PUT_PSP_VID(hpa, vid) ((__u64)(hpa) | ((__u64)(PSP_VID_MASK & vid) << PSP_VID_SHIFT)) @@ -456,8 +461,6 @@ struct kvm_vpsp { #ifdef CONFIG_CRYPTO_DEV_SP_PSP -int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret); - int psp_do_cmd(int cmd, void *data, int *psp_ret); int csv_ring_buffer_queue_init(void); @@ -480,11 +483,9 @@ int vpsp_get_vid(uint32_t *vid, pid_t pid); int vpsp_get_default_vid_permission(void); -int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, - gpa_t table_gpa); -#else /* !CONFIG_CRYPTO_DEV_SP_PSP */ +int kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa); -static inline int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret) { return -ENODEV; } +#else /* !CONFIG_CRYPTO_DEV_SP_PSP */ static inline int psp_do_cmd(int cmd, void *data, int *psp_ret) { return -ENODEV; } @@ -508,8 +509,8 @@ static inline int vpsp_get_default_vid_permission(void) { return -ENODEV; } static inline int -kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, - gpa_t psp_ret_gpa, gpa_t table_gpa) { return -ENODEV; } +kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, + gpa_t psp_ret_gpa) { return -ENODEV; } #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ typedef int (*p2c_notifier_t)(uint32_t id, uint64_t data); diff --git a/include/uapi/linux/kvm_para.h b/include/uapi/linux/kvm_para.h index 86369b7a5733..f2fc642db945 100644 --- a/include/uapi/linux/kvm_para.h +++ b/include/uapi/linux/kvm_para.h @@ -31,7 +31,8 @@ #define KVM_HC_SCHED_YIELD 11 #define KVM_HC_MAP_GPA_RANGE 12 #define KVM_HC_VM_ATTESTATION 100 /* Specific to Hygon CPU */ -#define KVM_HC_PSP_OP 101 /* Specific to Hygon platform */ +#define KVM_HC_PSP_OP_OBSOLETE 101 /* Specific to Hygon platform */ +#define KVM_HC_PSP_COPY_FORWARD_OP 102 /* Specific to Hygon platform */ /* * hypercalls use architecture specific -- Gitee From cd8fb6f85c5e547db1545e90658992ba95326bcd Mon Sep 17 00:00:00 2001 From: xiongmengbiao Date: Wed, 29 May 2024 11:00:23 +0800 Subject: [PATCH 1638/2138] anolis: crypto: ccp: support TKM run on CSV ANBZ: #11635 Signed-off-by: xiongmengbiao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4077 --- arch/x86/kvm/svm/svm.c | 11 +- arch/x86/kvm/x86.c | 4 +- drivers/crypto/ccp/hygon/csv-dev.c | 40 ++-- drivers/crypto/ccp/hygon/psp-dev.c | 86 +++++--- drivers/crypto/ccp/hygon/vpsp.c | 331 +++++++++++++++++++++++++---- include/linux/psp-hygon.h | 47 +++- include/uapi/linux/kvm_para.h | 1 + 7 files changed, 415 insertions(+), 105 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 69218e009811..1b1d332216a4 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4989,14 +4989,23 @@ static int kvm_hygon_arch_hypercall(struct kvm *kvm, u64 nr, u64 a0, u64 a1, u64 struct kvm_vpsp vpsp = { .kvm = kvm, .write_guest = kvm_write_guest, - .read_guest = kvm_read_guest + .read_guest = kvm_read_guest, + .gfn_to_pfn = gfn_to_pfn, }; + if (sev_guest(kvm)) { + vpsp.vm_handle = to_kvm_svm(kvm)->sev_info.handle; + vpsp.is_csv_guest = 1; + } + switch (nr) { case KVM_HC_PSP_COPY_FORWARD_OP: ret = kvm_pv_psp_copy_forward_op(&vpsp, a0, a1, a2); break; + case KVM_HC_PSP_FORWARD_OP: + ret = kvm_pv_psp_forward_op(&vpsp, a0, a1, a2); + break; default: ret = -KVM_ENOSYS; break; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 5649abfd751c..d65a54c97924 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9899,7 +9899,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) if (static_call(kvm_x86_get_cpl)(vcpu) != 0 && !(is_x86_vendor_hygon() && (nr == KVM_HC_VM_ATTESTATION || nr == KVM_HC_PSP_OP_OBSOLETE - || nr == KVM_HC_PSP_COPY_FORWARD_OP))) { + || nr == KVM_HC_PSP_COPY_FORWARD_OP + || nr == KVM_HC_PSP_FORWARD_OP))) { ret = -KVM_EPERM; goto out; } @@ -9969,6 +9970,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) break; case KVM_HC_PSP_OP_OBSOLETE: case KVM_HC_PSP_COPY_FORWARD_OP: + case KVM_HC_PSP_FORWARD_OP: ret = -KVM_ENOSYS; if (kvm_x86_ops.arch_hypercall) ret = static_call(kvm_x86_arch_hypercall)(vcpu->kvm, nr, a0, a1, a2, a3); diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index d611db54cbdf..4e6b9403f9a6 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -762,12 +762,12 @@ static int vpsp_dequeue_cmd(int prio, int index, * Populate the command from the virtual machine to the queue to * support execution in ringbuffer mode */ -static int vpsp_fill_cmd_queue(uint32_t vid, int prio, int cmd, void *data, uint16_t flags) +static int vpsp_fill_cmd_queue(int prio, int cmd, phys_addr_t phy_addr, uint16_t flags) { struct csv_cmdptr_entry cmdptr = { }; int index = -1; - cmdptr.cmd_buf_ptr = PUT_PSP_VID(__psp_pa(data), vid); + cmdptr.cmd_buf_ptr = phy_addr; cmdptr.cmd_id = cmd; cmdptr.cmd_flags = flags; @@ -1065,11 +1065,10 @@ static int vpsp_rb_check_and_cmd_prio_parse(uint8_t *prio, return rb_supported; } -int __vpsp_do_cmd_locked(uint32_t vid, int cmd, void *data, int *psp_ret) +static int __vpsp_do_cmd_locked(int cmd, phys_addr_t phy_addr, int *psp_ret) { struct psp_device *psp = psp_master; struct sev_device *sev; - phys_addr_t phys_addr; unsigned int phys_lsb, phys_msb; unsigned int reg, ret = 0; @@ -1081,20 +1080,13 @@ int __vpsp_do_cmd_locked(uint32_t vid, int cmd, void *data, int *psp_ret) sev = psp->sev_data; - if (data && WARN_ON_ONCE(!virt_addr_valid(data))) - return -EINVAL; - /* Get the physical address of the command buffer */ - phys_addr = PUT_PSP_VID(__psp_pa(data), vid); - phys_lsb = data ? lower_32_bits(phys_addr) : 0; - phys_msb = data ? upper_32_bits(phys_addr) : 0; + phys_lsb = phy_addr ? lower_32_bits(phy_addr) : 0; + phys_msb = phy_addr ? upper_32_bits(phy_addr) : 0; dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", cmd, phys_msb, phys_lsb, *hygon_psp_hooks.psp_timeout); - print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, - hygon_psp_hooks.sev_cmd_buffer_len(cmd), false); - iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); @@ -1126,13 +1118,10 @@ int __vpsp_do_cmd_locked(uint32_t vid, int cmd, void *data, int *psp_ret) ret = -EIO; } - print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data, - hygon_psp_hooks.sev_cmd_buffer_len(cmd), false); - return ret; } -int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret) +int vpsp_do_cmd(int cmd, phys_addr_t phy_addr, int *psp_ret) { int rc; int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); @@ -1146,7 +1135,7 @@ int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret) mutex_lock(hygon_psp_hooks.sev_cmd_mutex); } - rc = __vpsp_do_cmd_locked(vid, cmd, data, psp_ret); + rc = __vpsp_do_cmd_locked(cmd, phy_addr, psp_ret); if (is_vendor_hygon() && mutex_enabled) psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); @@ -1160,7 +1149,7 @@ int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret) * Try to obtain the result again by the command index, this * interface is used in ringbuffer mode */ -int vpsp_try_get_result(uint32_t vid, uint8_t prio, uint32_t index, void *data, +int vpsp_try_get_result(uint8_t prio, uint32_t index, phys_addr_t phy_addr, struct vpsp_ret *psp_ret) { int ret = 0; @@ -1183,8 +1172,7 @@ int vpsp_try_get_result(uint32_t vid, uint8_t prio, uint32_t index, void *data, /* dequeue command from queue*/ vpsp_dequeue_cmd(prio, index, &cmd); - ret = __vpsp_do_cmd_locked(vid, cmd.cmd_id, data, - (int *)psp_ret); + ret = __vpsp_do_cmd_locked(cmd.cmd_id, phy_addr, (int *)psp_ret); psp_ret->status = VPSP_FINISH; vpsp_psp_mutex_unlock(); if (unlikely(ret)) { @@ -1227,7 +1215,7 @@ EXPORT_SYMBOL_GPL(vpsp_try_get_result); * vpsp_try_get_result interface will be used to obtain the result * later again */ -int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret) +int vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, struct vpsp_ret *psp_ret) { int ret = 0; int rb_supported; @@ -1242,10 +1230,10 @@ int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret) (struct vpsp_cmd *)&cmd); if (rb_supported) { /* fill command in ringbuffer's queue and get index */ - index = vpsp_fill_cmd_queue(vid, prio, cmd, data, 0); + index = vpsp_fill_cmd_queue(prio, cmd, phy_addr, 0); if (unlikely(index < 0)) { /* do mailbox command if queuing failed*/ - ret = vpsp_do_cmd(vid, cmd, data, (int *)psp_ret); + ret = vpsp_do_cmd(cmd, phy_addr, (int *)psp_ret); if (unlikely(ret)) { if (ret == -EIO) { ret = 0; @@ -1261,14 +1249,14 @@ int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret) } /* try to get result from the ringbuffer command */ - ret = vpsp_try_get_result(vid, prio, index, data, psp_ret); + ret = vpsp_try_get_result(prio, index, phy_addr, psp_ret); if (unlikely(ret)) { pr_err("[%s]: vpsp_try_get_result failed %d\n", __func__, ret); goto end; } } else { /* mailbox mode */ - ret = vpsp_do_cmd(vid, cmd, data, (int *)psp_ret); + ret = vpsp_do_cmd(cmd, phy_addr, (int *)psp_ret); if (unlikely(ret)) { if (ret == -EIO) { ret = 0; diff --git a/drivers/crypto/ccp/hygon/psp-dev.c b/drivers/crypto/ccp/hygon/psp-dev.c index 59380d1a65b0..93745cce08f7 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.c +++ b/drivers/crypto/ccp/hygon/psp-dev.c @@ -38,16 +38,26 @@ enum VPSP_DEV_CTRL_OPCODE { VPSP_OP_VID_DEL, VPSP_OP_SET_DEFAULT_VID_PERMISSION, VPSP_OP_GET_DEFAULT_VID_PERMISSION, + VPSP_OP_SET_GPA, }; struct vpsp_dev_ctrl { unsigned char op; + /** + * To be compatible with old user mode, + * struct vpsp_dev_ctrl must be kept at 132 bytes. + */ + unsigned char resv[3]; union { unsigned int vid; // Set or check the permissions for the default VID unsigned int def_vid_perm; + struct { + u64 gpa_start; + u64 gpa_end; + } gpa; unsigned char reserved[128]; - } data; + } __packed data; }; uint64_t atomic64_exchange(uint64_t *dst, uint64_t val) @@ -160,19 +170,15 @@ DEFINE_RWLOCK(vpsp_rwlock); #define VPSP_VID_MAX_ENTRIES 2048 #define VPSP_VID_NUM_MAX 64 -struct vpsp_vid_entry { - uint32_t vid; - pid_t pid; -}; -static struct vpsp_vid_entry g_vpsp_vid_array[VPSP_VID_MAX_ENTRIES]; +static struct vpsp_context g_vpsp_context_array[VPSP_VID_MAX_ENTRIES]; static uint32_t g_vpsp_vid_num; static int compare_vid_entries(const void *a, const void *b) { - return ((struct vpsp_vid_entry *)a)->pid - ((struct vpsp_vid_entry *)b)->pid; + return ((struct vpsp_context *)a)->pid - ((struct vpsp_context *)b)->pid; } static void swap_vid_entries(void *a, void *b, int size) { - struct vpsp_vid_entry entry; + struct vpsp_context entry; memcpy(&entry, a, size); memcpy(a, b, size); @@ -197,43 +203,41 @@ int vpsp_get_default_vid_permission(void) EXPORT_SYMBOL_GPL(vpsp_get_default_vid_permission); /** - * When the virtual machine executes the 'tkm' command, - * it needs to retrieve the corresponding 'vid' - * by performing a binary search using 'kvm->userspace_pid'. + * get a vpsp context from pid */ -int vpsp_get_vid(uint32_t *vid, pid_t pid) +int vpsp_get_context(struct vpsp_context **ctx, pid_t pid) { - struct vpsp_vid_entry new_entry = {.pid = pid}; - struct vpsp_vid_entry *existing_entry = NULL; + struct vpsp_context new_entry = {.pid = pid}; + struct vpsp_context *existing_entry = NULL; read_lock(&vpsp_rwlock); - existing_entry = bsearch(&new_entry, g_vpsp_vid_array, g_vpsp_vid_num, - sizeof(struct vpsp_vid_entry), compare_vid_entries); + existing_entry = bsearch(&new_entry, g_vpsp_context_array, g_vpsp_vid_num, + sizeof(struct vpsp_context), compare_vid_entries); read_unlock(&vpsp_rwlock); if (!existing_entry) return -ENOENT; - if (vid) { - *vid = existing_entry->vid; - pr_debug("PSP: %s %d, by pid %d\n", __func__, *vid, pid); - } + + if (ctx) + *ctx = existing_entry; + return 0; } -EXPORT_SYMBOL_GPL(vpsp_get_vid); +EXPORT_SYMBOL_GPL(vpsp_get_context); /** * Upon qemu startup, this section checks whether * the '-device psp,vid' parameter is specified. * If set, it utilizes the 'vpsp_add_vid' function - * to insert the 'vid' and 'pid' values into the 'g_vpsp_vid_array'. + * to insert the 'vid' and 'pid' values into the 'g_vpsp_context_array'. * The insertion is done in ascending order of 'pid'. */ static int vpsp_add_vid(uint32_t vid) { pid_t cur_pid = task_pid_nr(current); - struct vpsp_vid_entry new_entry = {.vid = vid, .pid = cur_pid}; + struct vpsp_context new_entry = {.vid = vid, .pid = cur_pid}; - if (vpsp_get_vid(NULL, cur_pid) == 0) + if (vpsp_get_context(NULL, cur_pid) == 0) return -EEXIST; if (g_vpsp_vid_num == VPSP_VID_MAX_ENTRIES) return -ENOMEM; @@ -241,8 +245,8 @@ static int vpsp_add_vid(uint32_t vid) return -EINVAL; write_lock(&vpsp_rwlock); - memcpy(&g_vpsp_vid_array[g_vpsp_vid_num++], &new_entry, sizeof(struct vpsp_vid_entry)); - sort(g_vpsp_vid_array, g_vpsp_vid_num, sizeof(struct vpsp_vid_entry), + memcpy(&g_vpsp_context_array[g_vpsp_vid_num++], &new_entry, sizeof(struct vpsp_context)); + sort(g_vpsp_context_array, g_vpsp_vid_num, sizeof(struct vpsp_context), compare_vid_entries, swap_vid_entries); pr_info("PSP: add vid %d, by pid %d, total vid num is %d\n", vid, cur_pid, g_vpsp_vid_num); write_unlock(&vpsp_rwlock); @@ -261,12 +265,12 @@ static int vpsp_del_vid(void) write_lock(&vpsp_rwlock); for (i = 0; i < g_vpsp_vid_num; ++i) { - if (g_vpsp_vid_array[i].pid == cur_pid) { + if (g_vpsp_context_array[i].pid == cur_pid) { --g_vpsp_vid_num; pr_info("PSP: delete vid %d, by pid %d, total vid num is %d\n", - g_vpsp_vid_array[i].vid, cur_pid, g_vpsp_vid_num); - memcpy(&g_vpsp_vid_array[i], &g_vpsp_vid_array[i + 1], - sizeof(struct vpsp_vid_entry) * (g_vpsp_vid_num - i)); + g_vpsp_context_array[i].vid, cur_pid, g_vpsp_vid_num); + memcpy(&g_vpsp_context_array[i], &g_vpsp_context_array[i + 1], + sizeof(struct vpsp_context) * (g_vpsp_vid_num - i)); ret = 0; goto end; } @@ -277,6 +281,24 @@ static int vpsp_del_vid(void) return ret; } +static int vpsp_set_gpa_range(u64 gpa_start, u64 gpa_end) +{ + pid_t cur_pid = task_pid_nr(current); + struct vpsp_context *ctx = NULL; + + vpsp_get_context(&ctx, cur_pid); + if (!ctx) { + pr_err("PSP: %s get vpsp_context failed from pid %d\n", __func__, cur_pid); + return -ENOENT; + } + + ctx->gpa_start = gpa_start; + ctx->gpa_end = gpa_end; + pr_info("PSP: set gpa range (start 0x%llx, end 0x%llx), by pid %d\n", + gpa_start, gpa_end, cur_pid); + return 0; +} + static int do_vpsp_op_ioctl(struct vpsp_dev_ctrl *ctrl) { int ret = 0; @@ -299,6 +321,10 @@ static int do_vpsp_op_ioctl(struct vpsp_dev_ctrl *ctrl) ctrl->data.def_vid_perm = vpsp_get_default_vid_permission(); break; + case VPSP_OP_SET_GPA: + ret = vpsp_set_gpa_range(ctrl->data.gpa.gpa_start, ctrl->data.gpa.gpa_end); + break; + default: ret = -EINVAL; break; diff --git a/drivers/crypto/ccp/hygon/vpsp.c b/drivers/crypto/ccp/hygon/vpsp.c index bcdad2bff848..b94da0d2df43 100644 --- a/drivers/crypto/ccp/hygon/vpsp.c +++ b/drivers/crypto/ccp/hygon/vpsp.c @@ -18,26 +18,32 @@ #undef pr_fmt #endif #define pr_fmt(fmt) "vpsp: " fmt +#define VTKM_VM_BIND 0x904 /* - * The file mainly implements the base execution - * logic of virtual PSP in kernel mode, which mainly includes: - * (1) Obtain the VM command and preprocess the pointer - * mapping table information in the command buffer - * (2) The command that has been converted will interact - * with the channel of the psp through the driver and - * try to obtain the execution result - * (3) The executed command data is recovered according to - * the multilevel pointer of the mapping table, and then returned to the VM + * The file mainly implements the base execution logic of virtual PSP in kernel mode, + * which mainly includes: + * (1) Preprocess the guest data in the host kernel + * (2) The command that has been converted will interact with the channel of the + * psp through the driver and try to obtain the execution result + * (3) The executed command data is recovered, and then returned to the VM * * The primary implementation logic of virtual PSP in kernel mode * call trace: - * guest command(vmmcall) - * |-> kvm_pv_psp_cmd_pre_op + * guest command(vmmcall, KVM_HC_PSP_COPY_FORWARD_OP) * | - * kvm_pv_psp_copy_forward_op->|-> vpsp_try_do_cmd/vpsp_try_get_result <====> psp device driver + * kvm_pv_psp_copy_op----> | -> kvm_pv_psp_cmd_pre_op + * | + * | -> vpsp_try_do_cmd/vpsp_try_get_result + * | |<=> psp device driver + * | + * | + * |-> kvm_pv_psp_cmd_post_op + * + * guest command(vmmcall, KVM_HC_PSP_FORWARD_OP) * | - * |-> kvm_pv_psp_cmd_post_op + * kvm_pv_psp_forward_op-> |-> vpsp_try_do_cmd/vpsp_try_get_result + * |<=> psp device driver */ struct psp_cmdresp_head { @@ -56,10 +62,36 @@ struct vpsp_hbuf_wrapper { struct vpsp_hbuf_wrapper g_hbuf_wrap[CSV_COMMAND_PRIORITY_NUM][CSV_RING_BUFFER_SIZE / CSV_RING_BUFFER_ESIZE] = {0}; -/* - * Obtain the VM command and preprocess the pointer mapping table - * information in the command buffer, the processed data will be - * used to interact with the psp device +static int check_gpa_range(struct vpsp_context *vpsp_ctx, gpa_t addr, uint32_t size) +{ + if (!vpsp_ctx || !addr) + return -EFAULT; + + if (addr >= vpsp_ctx->gpa_start && (addr + size) <= vpsp_ctx->gpa_end) + return 0; + return -EFAULT; +} + +static int check_psp_mem_range(struct vpsp_context *vpsp_ctx, + void *data, uint32_t size) +{ + if ((((uintptr_t)data + size - 1) & ~PSP_2MB_MASK) != + ((uintptr_t)data & ~PSP_2MB_MASK)) { + pr_err("data %llx, size %d crossing 2MB\n", (u64)data, size); + return -EFAULT; + } + + if (vpsp_ctx) + return check_gpa_range(vpsp_ctx, (gpa_t)data, size); + + return 0; +} + +/** + * Copy the guest data to the host kernel buffer + * and record the host buffer address in 'hbuf'. + * This 'hbuf' is used to restore context information + * during asynchronous processing. */ static int kvm_pv_psp_cmd_pre_op(struct kvm_vpsp *vpsp, gpa_t data_gpa, struct vpsp_hbuf_wrapper *hbuf) @@ -74,11 +106,8 @@ static int kvm_pv_psp_cmd_pre_op(struct kvm_vpsp *vpsp, gpa_t data_gpa, return -EFAULT; data_size = psp_head.buf_size; - if ((((uintptr_t)data_gpa + data_size - 1) & ~PSP_2MB_MASK) - != ((uintptr_t)data_gpa & ~PSP_2MB_MASK)) { - pr_err("data_gpa %llx, data_size %d crossing 2MB\n", (u64)data_gpa, data_size); + if (check_psp_mem_range(NULL, (void *)data_gpa, data_size)) return -EFAULT; - } data = kzalloc(data_size, GFP_KERNEL); if (!data) @@ -122,9 +151,234 @@ static int cmd_type_is_tkm(int cmd) return 0; } +static int cmd_type_is_allowed(int cmd) +{ + if (cmd >= TKM_PSP_CMDID_OFFSET && cmd <= TKM_CMD_ID_MAX) + return 1; + return 0; +} + +struct psp_cmdresp_vtkm_vm_bind { + struct psp_cmdresp_head head; + uint16_t vid; + uint32_t vm_handle; + uint8_t reserved[46]; +} __packed; + +static int kvm_bind_vtkm(uint32_t vm_handle, uint32_t cmd_id, uint32_t vid, uint32_t *pret) +{ + int ret = 0; + struct psp_cmdresp_vtkm_vm_bind *data; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->head.buf_size = sizeof(*data); + data->head.cmdresp_size = sizeof(*data); + data->head.cmdresp_code = VTKM_VM_BIND; + data->vid = vid; + data->vm_handle = vm_handle; + + ret = psp_do_cmd(cmd_id, data, pret); + if (ret == -EIO) + ret = 0; + + kfree(data); + return ret; +} + +static phys_addr_t gpa_to_hpa(struct kvm_vpsp *vpsp, unsigned long data_gpa) +{ + phys_addr_t hpa = 0; + unsigned long pfn = vpsp->gfn_to_pfn(vpsp->kvm, data_gpa >> PAGE_SHIFT); + + if (!is_error_pfn(pfn)) + hpa = ((pfn << PAGE_SHIFT) + offset_in_page(data_gpa)) | sme_get_me_mask(); + + pr_debug("gpa %lx, hpa %llx\n", data_gpa, hpa); + return hpa; + +} + +static int check_cmd_forward_op_permission(struct kvm_vpsp *vpsp, struct vpsp_context *vpsp_ctx, + uint64_t data, uint32_t cmd) +{ + int ret; + struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd; + struct psp_cmdresp_head psp_head; + + if (!cmd_type_is_allowed(vcmd->cmd_id)) { + pr_err("[%s]: unsupported cmd id %x\n", __func__, vcmd->cmd_id); + return -EINVAL; + } + + if (vpsp->is_csv_guest) { + /** + * If the gpa address range exists, + * it means there must be a legal vid + */ + if (!vpsp_ctx || !vpsp_ctx->gpa_start || !vpsp_ctx->gpa_end) { + pr_err("[%s]: No set gpa range or vid in csv guest\n", __func__); + return -EPERM; + } + + ret = check_psp_mem_range(vpsp_ctx, (void *)data, 0); + if (ret) + return -EFAULT; + } else { + if (!vpsp_ctx && cmd_type_is_tkm(vcmd->cmd_id) + && !vpsp_get_default_vid_permission()) { + pr_err("[%s]: not allowed tkm command without vid\n", __func__); + return -EPERM; + } + + // the 'data' is gpa address + if (unlikely(vpsp->read_guest(vpsp->kvm, data, &psp_head, + sizeof(struct psp_cmdresp_head)))) + return -EFAULT; + + ret = check_psp_mem_range(vpsp_ctx, (void *)data, psp_head.buf_size); + if (ret) + return -EFAULT; + } + return 0; +} + +static int +check_cmd_copy_forward_op_permission(struct kvm_vpsp *vpsp, + struct vpsp_context *vpsp_ctx, + uint64_t data, uint32_t cmd) +{ + int ret = 0; + struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd; + + if (!cmd_type_is_allowed(vcmd->cmd_id)) { + pr_err("[%s]: unsupported cmd id %x\n", __func__, vcmd->cmd_id); + return -EINVAL; + } + + if (vpsp->is_csv_guest) { + pr_err("[%s]: unsupported run on csv guest\n", __func__); + ret = -EPERM; + } else { + if (!vpsp_ctx && cmd_type_is_tkm(vcmd->cmd_id) + && !vpsp_get_default_vid_permission()) { + pr_err("[%s]: not allowed tkm command without vid\n", __func__); + ret = -EPERM; + } + } + return ret; +} + +static int vpsp_try_bind_vtkm(struct kvm_vpsp *vpsp, struct vpsp_context *vpsp_ctx, + uint32_t cmd, uint32_t *psp_ret) +{ + int ret; + struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd; + + if (vpsp_ctx && !vpsp_ctx->vm_is_bound && vpsp->is_csv_guest) { + ret = kvm_bind_vtkm(vpsp->vm_handle, vcmd->cmd_id, + vpsp_ctx->vid, psp_ret); + if (ret || *psp_ret) { + pr_err("[%s] kvm bind vtkm failed with ret: %d, pspret: %d\n", + __func__, ret, *psp_ret); + return ret; + } + vpsp_ctx->vm_is_bound = 1; + } + return 0; +} + +/** + * @brief Directly convert the gpa address into hpa and forward it to PSP, + * It is another form of kvm_pv_psp_copy_op, mainly used for csv VMs. + * + * @param vpsp points to kvm related data + * @param cmd psp cmd id, bit 31 indicates queue priority + * @param data_gpa guest physical address of input data + * @param psp_ret indicates Asynchronous context information + * + * Since the csv guest memory cannot be read or written directly, + * the shared asynchronous context information is shared through psp_ret and return value. + */ +int kvm_pv_psp_forward_op(struct kvm_vpsp *vpsp, uint32_t cmd, + gpa_t data_gpa, uint32_t psp_ret) +{ + int ret; + uint64_t data_hpa; + uint32_t index = 0, vid = 0; + struct vpsp_ret psp_async = {0}; + struct vpsp_context *vpsp_ctx = NULL; + struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd; + uint8_t prio = CSV_COMMAND_PRIORITY_LOW; + + vpsp_get_context(&vpsp_ctx, vpsp->kvm->userspace_pid); + + ret = check_cmd_forward_op_permission(vpsp, vpsp_ctx, data_gpa, cmd); + if (unlikely(ret)) { + pr_err("directly operation not allowed\n"); + goto end; + } + + ret = vpsp_try_bind_vtkm(vpsp, vpsp_ctx, cmd, (uint32_t *)&psp_async); + if (unlikely(ret || *(uint32_t *)&psp_async)) { + pr_err("try to bind vtkm failed (ret %x, psp_async %x)\n", + ret, *(uint32_t *)&psp_async); + goto end; + } + + if (vpsp_ctx) + vid = vpsp_ctx->vid; + + *((uint32_t *)&psp_async) = psp_ret; + data_hpa = PUT_PSP_VID(gpa_to_hpa(vpsp, data_gpa), vid); + + switch (psp_async.status) { + case VPSP_INIT: + /* try to send command to the device for execution*/ + ret = vpsp_try_do_cmd(cmd, data_hpa, &psp_async); + if (unlikely(ret)) { + pr_err("[%s]: vpsp_do_cmd failed\n", __func__); + goto end; + } + break; + + case VPSP_RUNNING: + prio = vcmd->is_high_rb ? CSV_COMMAND_PRIORITY_HIGH : + CSV_COMMAND_PRIORITY_LOW; + index = psp_async.index; + /* try to get the execution result from ringbuffer*/ + ret = vpsp_try_get_result(prio, index, data_hpa, &psp_async); + if (unlikely(ret)) { + pr_err("[%s]: vpsp_try_get_result failed\n", __func__); + goto end; + } + break; + + default: + pr_err("[%s]: invalid command status\n", __func__); + break; + } + +end: + /** + * In order to indicate both system errors and PSP errors, + * the psp_async.pret field needs to be reused. + */ + psp_async.format = VPSP_RET_PSP_FORMAT; + if (ret) { + psp_async.format = VPSP_RET_SYS_FORMAT; + if (ret > 0) + ret = -ret; + psp_async.pret = (uint16_t)ret; + } + return *((int *)&psp_async); +} +EXPORT_SYMBOL_GPL(kvm_pv_psp_forward_op); + /** - * @brief kvm_pv_psp_copy_forward_op is used for ordinary virtual machines to copy data - * in gpa to host memory and send it to psp for processing. + * @brief copy data in gpa to host memory and send it to psp for processing. * * @param vpsp points to kvm related data * @param cmd psp cmd id, bit 31 indicates queue priority @@ -137,25 +391,23 @@ int kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, g struct vpsp_ret psp_ret = {0}; struct vpsp_hbuf_wrapper hbuf = {0}; struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd; + struct vpsp_context *vpsp_ctx = NULL; + phys_addr_t data_paddr = 0; uint8_t prio = CSV_COMMAND_PRIORITY_LOW; uint32_t index = 0; uint32_t vid = 0; - if (vcmd->cmd_id != TKM_PSP_CMDID_OFFSET) { - pr_err("[%s]: unsupported cmd id %x\n", __func__, vcmd->cmd_id); - return -EINVAL; - } + vpsp_get_context(&vpsp_ctx, vpsp->kvm->userspace_pid); - // only tkm cmd need vid - if (cmd_type_is_tkm(vcmd->cmd_id)) { - // check the permission to use the default vid when no vid is set - ret = vpsp_get_vid(&vid, vpsp->kvm->userspace_pid); - if (ret && !vpsp_get_default_vid_permission()) { - pr_err("[%s]: not allowed tkm command without vid\n", __func__); - return -EPERM; - } + ret = check_cmd_copy_forward_op_permission(vpsp, vpsp_ctx, data_gpa, cmd); + if (unlikely(ret)) { + pr_err("copy operation not allowed\n"); + return -EPERM; } + if (vpsp_ctx) + vid = vpsp_ctx->vid; + if (unlikely(vpsp->read_guest(vpsp->kvm, psp_ret_gpa, &psp_ret, sizeof(psp_ret)))) return -EFAULT; @@ -172,9 +424,9 @@ int kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, g goto end; } + data_paddr = PUT_PSP_VID(__psp_pa(hbuf.data), vid); /* try to send command to the device for execution*/ - ret = vpsp_try_do_cmd(vid, cmd, (void *)hbuf.data, - (struct vpsp_ret *)&psp_ret); + ret = vpsp_try_do_cmd(cmd, data_paddr, (struct vpsp_ret *)&psp_ret); if (unlikely(ret)) { pr_err("[%s]: vpsp_try_do_cmd failed\n", __func__); ret = -EFAULT; @@ -202,9 +454,10 @@ int kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, g prio = vcmd->is_high_rb ? CSV_COMMAND_PRIORITY_HIGH : CSV_COMMAND_PRIORITY_LOW; index = psp_ret.index; + data_paddr = PUT_PSP_VID(__psp_pa(g_hbuf_wrap[prio][index].data), vid); /* try to get the execution result from ringbuffer*/ - ret = vpsp_try_get_result(vid, prio, index, g_hbuf_wrap[prio][index].data, - (struct vpsp_ret *)&psp_ret); + ret = vpsp_try_get_result(prio, index, data_paddr, + (struct vpsp_ret *)&psp_ret); if (unlikely(ret)) { pr_err("[%s]: vpsp_try_get_result failed\n", __func__); ret = -EFAULT; diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index fd2b2f677c2d..39c1a149e658 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -432,23 +432,32 @@ struct vpsp_cmd { * * @pret: the return code from device * @resv: reserved bits + * @format: indicates that the error is a unix error code(is 0) or a psp error(is 1) * @index: used to distinguish the position of command in the ringbuffer * @status: indicates the current status of the related command */ struct vpsp_ret { u32 pret : 16; - u32 resv : 2; + u32 resv : 1; + u32 format : 1; u32 index : 12; u32 status : 2; }; +#define VPSP_RET_SYS_FORMAT 1 +#define VPSP_RET_PSP_FORMAT 0 struct kvm_vpsp { struct kvm *kvm; int (*write_guest)(struct kvm *kvm, gpa_t gpa, const void *data, unsigned long len); int (*read_guest)(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); + kvm_pfn_t (*gfn_to_pfn)(struct kvm *kvm, gfn_t gfn); + u32 vm_handle; + u8 is_csv_guest; }; #define PSP_2MB_MASK (2*1024*1024 - 1) +#define PSP_HUGEPAGE_2MB (2*1024*1024) +#define PSP_HUGEPAGE_NUM_MAX 128 #define TKM_CMD_ID_MIN 0x120 #define TKM_CMD_ID_MAX 0x12f #define TKM_PSP_CMDID TKM_CMD_ID_MIN @@ -459,6 +468,17 @@ struct kvm_vpsp { #define GET_PSP_VID(hpa) ((__u16)((__u64)(hpa) >> PSP_VID_SHIFT) & PSP_VID_MASK) #define CLEAR_PSP_VID(hpa) ((__u64)(hpa) & ~((__u64)PSP_VID_MASK << PSP_VID_SHIFT)) +struct vpsp_context { + u32 vid; + pid_t pid; + u64 gpa_start; + u64 gpa_end; + + // `vm_is_bound` indicates whether the binding operation has been performed + u32 vm_is_bound; + u32 vm_handle; // only for csv +}; + #ifdef CONFIG_CRYPTO_DEV_SP_PSP int psp_do_cmd(int cmd, void *data, int *psp_ret); @@ -474,17 +494,19 @@ int csv_check_stat_queue_status(int *psp_ret); */ int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret); -int vpsp_try_get_result(uint32_t vid, uint8_t prio, uint32_t index, - void *data, struct vpsp_ret *psp_ret); +int vpsp_try_get_result(uint8_t prio, uint32_t index, + phys_addr_t phy_addr, struct vpsp_ret *psp_ret); -int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret); +int vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, struct vpsp_ret *psp_ret); -int vpsp_get_vid(uint32_t *vid, pid_t pid); +int vpsp_get_context(struct vpsp_context **ctx, pid_t pid); int vpsp_get_default_vid_permission(void); int kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa); +int kvm_pv_psp_forward_op(struct kvm_vpsp *vpsp, uint32_t cmd, + gpa_t data_gpa, uint32_t psp_ret); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ static inline int psp_do_cmd(int cmd, void *data, int *psp_ret) { return -ENODEV; } @@ -499,11 +521,15 @@ static inline int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret) { return -ENODEV; } static inline int -vpsp_try_get_result(uint8_t prio, uint32_t index, - void *data, struct vpsp_ret *psp_ret) { return -ENODEV; } +vpsp_try_get_result(uint8_t prio, + uint32_t index, phys_addr_t phy_addr, struct vpsp_ret *psp_ret) { return -ENODEV; } + +static inline int +vpsp_try_do_cmd(int cmd, phys_addr_t phy_addr, + struct vpsp_ret *psp_ret) { return -ENODEV; } static inline int -vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret) { return -ENODEV; } +vpsp_get_context(struct vpsp_context **ctx, pid_t pid) { return -ENODEV; } static inline int vpsp_get_default_vid_permission(void) { return -ENODEV; } @@ -511,6 +537,11 @@ vpsp_get_default_vid_permission(void) { return -ENODEV; } static inline int kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa) { return -ENODEV; } + +static inline int +kvm_pv_psp_forward_op(struct kvm_vpsp *vpsp, uint32_t cmd, + gpa_t data_gpa, uint32_t psp_ret) { return -ENODEV; } + #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ typedef int (*p2c_notifier_t)(uint32_t id, uint64_t data); diff --git a/include/uapi/linux/kvm_para.h b/include/uapi/linux/kvm_para.h index f2fc642db945..944fe133ae3c 100644 --- a/include/uapi/linux/kvm_para.h +++ b/include/uapi/linux/kvm_para.h @@ -33,6 +33,7 @@ #define KVM_HC_VM_ATTESTATION 100 /* Specific to Hygon CPU */ #define KVM_HC_PSP_OP_OBSOLETE 101 /* Specific to Hygon platform */ #define KVM_HC_PSP_COPY_FORWARD_OP 102 /* Specific to Hygon platform */ +#define KVM_HC_PSP_FORWARD_OP 103 /* Specific to Hygon platform */ /* * hypercalls use architecture specific -- Gitee From cd4f094e5903d1c53edee91bc20e73c3eae389ec Mon Sep 17 00:00:00 2001 From: xiongmengbiao Date: Tue, 9 Jul 2024 14:31:54 +0800 Subject: [PATCH 1639/2138] anolis: driver/crypto/ccp: fix vtkm without C-bit when host SME deactivate ANBZ: #11635 CSV guests can run without SME enabled. Regardless of the host's SME status, the C-bit must be set for the physical address. Memory will be encrypted with a different key than SME. Signed-off-by: xiongmengbiao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4077 --- drivers/crypto/ccp/hygon/vpsp.c | 33 ++++++++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/ccp/hygon/vpsp.c b/drivers/crypto/ccp/hygon/vpsp.c index b94da0d2df43..4ece51f1e654 100644 --- a/drivers/crypto/ccp/hygon/vpsp.c +++ b/drivers/crypto/ccp/hygon/vpsp.c @@ -13,6 +13,7 @@ #include #include #include +#include #ifdef pr_fmt #undef pr_fmt @@ -188,13 +189,43 @@ static int kvm_bind_vtkm(uint32_t vm_handle, uint32_t cmd_id, uint32_t vid, uint return ret; } +static unsigned long vpsp_get_me_mask(void) +{ + unsigned int eax, ebx, ecx, edx; + unsigned long me_mask; + +#define AMD_SME_BIT BIT(0) +#define AMD_SEV_BIT BIT(1) + /* + * Check for the SME/SEV feature: + * CPUID Fn8000_001F[EAX] + * - Bit 0 - Secure Memory Encryption support + * - Bit 1 - Secure Encrypted Virtualization support + * CPUID Fn8000_001F[EBX] + * - Bits 5:0 - Pagetable bit position used to indicate encryption + */ + eax = 0x8000001f; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + /* Check whether SEV or SME is supported */ + if (!(eax & (AMD_SEV_BIT | AMD_SME_BIT))) + return 0; + + me_mask = 1UL << (ebx & 0x3f); + return me_mask; +} + static phys_addr_t gpa_to_hpa(struct kvm_vpsp *vpsp, unsigned long data_gpa) { phys_addr_t hpa = 0; unsigned long pfn = vpsp->gfn_to_pfn(vpsp->kvm, data_gpa >> PAGE_SHIFT); + unsigned long me_mask = sme_get_me_mask(); + + if (me_mask == 0 && vpsp->is_csv_guest) + me_mask = vpsp_get_me_mask(); if (!is_error_pfn(pfn)) - hpa = ((pfn << PAGE_SHIFT) + offset_in_page(data_gpa)) | sme_get_me_mask(); + hpa = ((pfn << PAGE_SHIFT) + offset_in_page(data_gpa)) | me_mask; pr_debug("gpa %lx, hpa %llx\n", data_gpa, hpa); return hpa; -- Gitee From 57ca0c59d97861b21bd6760e9b045837526e1be4 Mon Sep 17 00:00:00 2001 From: xiongmengbiao Date: Thu, 4 Jul 2024 10:57:27 +0800 Subject: [PATCH 1640/2138] anolis: drivers/crypto/ccp: add ioctl API to pin TKM hugepage ANBZ: #11635 Signed-off-by: xiongmengbiao Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4077 --- drivers/crypto/ccp/hygon/psp-dev.c | 71 ++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/drivers/crypto/ccp/hygon/psp-dev.c b/drivers/crypto/ccp/hygon/psp-dev.c index 93745cce08f7..f641262ef2ec 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.c +++ b/drivers/crypto/ccp/hygon/psp-dev.c @@ -30,6 +30,8 @@ enum HYGON_PSP_OPCODE { HYGON_PSP_MUTEX_ENABLE = 1, HYGON_PSP_MUTEX_DISABLE, HYGON_VPSP_CTRL_OPT, + HYGON_PSP_OP_PIN_USER_PAGE, + HYGON_PSP_OP_UNPIN_USER_PAGE, HYGON_PSP_OPCODE_MAX_NR, }; @@ -299,6 +301,67 @@ static int vpsp_set_gpa_range(u64 gpa_start, u64 gpa_end) return 0; } +/** + * Try to pin a page + * + * @vaddr: the userspace virtual address, must be aligned to PAGE_SIZE + */ +static int psp_pin_user_page(u64 vaddr) +{ + struct page *page; + long npinned = 0; + int ref_count = 0; + + // check must be aligned to PAGE_SIZE + if (vaddr & (PAGE_SIZE - 1)) { + pr_err("vaddr %llx not aligned to 0x%lx\n", vaddr, PAGE_SIZE); + return -EFAULT; + } + + npinned = pin_user_pages_fast(vaddr, 1, FOLL_WRITE, &page); + if (npinned != 1) { + pr_err("PSP: pin_user_pages_fast fail\n"); + return -ENOMEM; + } + + ref_count = page_ref_count(page); + pr_debug("pin user page with address %llx, page ref_count %d\n", vaddr, ref_count); + return 0; +} + +/** + * Try to unpin a page + * + * @vaddr: the userspace virtual address, must be aligned to PAGE_SIZE + */ +static int psp_unpin_user_page(u64 vaddr) +{ + struct page *page; + long npinned = 0; + int ref_count = 0; + + // check must be aligned to PAGE_SIZE + if (vaddr & (PAGE_SIZE - 1)) { + pr_err("vaddr %llx not aligned to 0x%lx\n", vaddr, PAGE_SIZE); + return -EFAULT; + } + + // page reference count increment by 1 + npinned = get_user_pages_fast(vaddr, 1, FOLL_WRITE, &page); + if (npinned != 1) { + pr_err("PSP: pin_user_pages_fast fail\n"); + return -ENOMEM; + } + + // page reference count decrement by 2 + put_page(page); + put_page(page); + + ref_count = page_ref_count(page); + pr_debug("unpin user page with address %llx, page ref_count %d\n", vaddr, ref_count); + return 0; +} + static int do_vpsp_op_ioctl(struct vpsp_dev_ctrl *ctrl) { int ret = 0; @@ -381,6 +444,14 @@ static long ioctl_psp(struct file *file, unsigned int ioctl, unsigned long arg) return -EFAULT; break; + case HYGON_PSP_OP_PIN_USER_PAGE: + ret = psp_pin_user_page((u64)arg); + break; + + case HYGON_PSP_OP_UNPIN_USER_PAGE: + ret = psp_unpin_user_page((u64)arg); + break; + default: printk(KERN_INFO "%s: invalid ioctl number: %d\n", __func__, opcode); return -EINVAL; -- Gitee From 46feb4cfa46d1c62e7cde69a37f46085a18599cc Mon Sep 17 00:00:00 2001 From: niuyongwen Date: Fri, 2 Aug 2024 13:04:08 +0800 Subject: [PATCH 1641/2138] anolis: drivers/crypto/ccp: memmove is used instead of memcpy in overlapped memmory for tkm ANBZ: #11635 Signed-off-by: niuyongwen Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4077 --- drivers/crypto/ccp/hygon/psp-dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/ccp/hygon/psp-dev.c b/drivers/crypto/ccp/hygon/psp-dev.c index f641262ef2ec..26d4e87836ac 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.c +++ b/drivers/crypto/ccp/hygon/psp-dev.c @@ -271,7 +271,7 @@ static int vpsp_del_vid(void) --g_vpsp_vid_num; pr_info("PSP: delete vid %d, by pid %d, total vid num is %d\n", g_vpsp_context_array[i].vid, cur_pid, g_vpsp_vid_num); - memcpy(&g_vpsp_context_array[i], &g_vpsp_context_array[i + 1], + memmove(&g_vpsp_context_array[i], &g_vpsp_context_array[i + 1], sizeof(struct vpsp_context) * (g_vpsp_vid_num - i)); ret = 0; goto end; -- Gitee From 7e0308aa0b34b106aca4c8e78a12771f5ce07816 Mon Sep 17 00:00:00 2001 From: niuyongwen Date: Mon, 30 Sep 2024 11:23:59 +0800 Subject: [PATCH 1642/2138] anolis: drivers/crypto/ccp: fix the increase in page references caused by gfn_to_pfn ANBZ: #11635 gfn_to_pfn causes the refcount to increment atomically by one, which needs to be released. Signed-off-by: niuyongwen Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4077 --- drivers/crypto/ccp/hygon/vpsp.c | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/ccp/hygon/vpsp.c b/drivers/crypto/ccp/hygon/vpsp.c index 4ece51f1e654..df62dab035b8 100644 --- a/drivers/crypto/ccp/hygon/vpsp.c +++ b/drivers/crypto/ccp/hygon/vpsp.c @@ -220,12 +220,28 @@ static phys_addr_t gpa_to_hpa(struct kvm_vpsp *vpsp, unsigned long data_gpa) phys_addr_t hpa = 0; unsigned long pfn = vpsp->gfn_to_pfn(vpsp->kvm, data_gpa >> PAGE_SHIFT); unsigned long me_mask = sme_get_me_mask(); + struct page *page; if (me_mask == 0 && vpsp->is_csv_guest) me_mask = vpsp_get_me_mask(); if (!is_error_pfn(pfn)) hpa = ((pfn << PAGE_SHIFT) + offset_in_page(data_gpa)) | me_mask; + else { + pr_err("[%s] pfn: %lx is invalid, gpa %lx", + __func__, pfn, data_gpa); + return 0; + } + + /* + * Using gfn_to_pfn causes the refcount to increment + * atomically by one, which needs to be released. + */ + page = pfn_to_page(pfn); + if (PageCompound(page)) + page = compound_head(page); + + put_page(page); pr_debug("gpa %lx, hpa %llx\n", data_gpa, hpa); return hpa; @@ -343,6 +359,7 @@ int kvm_pv_psp_forward_op(struct kvm_vpsp *vpsp, uint32_t cmd, struct vpsp_context *vpsp_ctx = NULL; struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd; uint8_t prio = CSV_COMMAND_PRIORITY_LOW; + phys_addr_t hpa; vpsp_get_context(&vpsp_ctx, vpsp->kvm->userspace_pid); @@ -363,7 +380,14 @@ int kvm_pv_psp_forward_op(struct kvm_vpsp *vpsp, uint32_t cmd, vid = vpsp_ctx->vid; *((uint32_t *)&psp_async) = psp_ret; - data_hpa = PUT_PSP_VID(gpa_to_hpa(vpsp, data_gpa), vid); + + hpa = gpa_to_hpa(vpsp, data_gpa); + if (unlikely(!hpa)) { + ret = -EFAULT; + goto end; + } + + data_hpa = PUT_PSP_VID(hpa, vid); switch (psp_async.status) { case VPSP_INIT: -- Gitee From aa698ffdee8457e7cf7efc7dffde74c4bebcf799 Mon Sep 17 00:00:00 2001 From: Juxin Gao Date: Mon, 4 Nov 2024 09:30:48 +0800 Subject: [PATCH 1643/2138] anolis: cpufreq: Initialize scaling_cur_freq correctly ANBZ: #11607 The policy->cur was not being initialized properly during CPU initialization, leading to it always reporting 0. This commit addresses this issue by setting the initial frequency to the normal maximum frequency. This ensures that the current frequency reflects the actual CPU operating frequency. Signed-off-by: Juxin Gao Reviewed-by: MinLi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/4062 --- drivers/cpufreq/loongson3-acpi-cpufreq.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/cpufreq/loongson3-acpi-cpufreq.c b/drivers/cpufreq/loongson3-acpi-cpufreq.c index 018b529a0cf9..67e48763e3f1 100644 --- a/drivers/cpufreq/loongson3-acpi-cpufreq.c +++ b/drivers/cpufreq/loongson3-acpi-cpufreq.c @@ -1241,6 +1241,8 @@ static int loongson3_cpufreq_cpu_init(struct cpufreq_policy *policy) if (has_boost_freq() && boost_supported()) loongson3_cpufreq_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs; + policy->cur = core->normal_max_freq * 1000; + pr_info("CPU%u - ACPI performance management activated.\n", cpu); for (i = 0; i < perf->state_count; i++) pr_debug(" %cP%d: %d MHz, %d mW, %d uS %d level\n", -- Gitee From ef83248a52743e838b91f074a2a682e5427fcd84 Mon Sep 17 00:00:00 2001 From: Ming Wang Date: Thu, 17 Oct 2024 10:52:38 +0800 Subject: [PATCH 1644/2138] anolis: PCI: Fixup kexec failed on loongson platform ANBZ: #11467 This is similar to commit 62b6dee1b44a ("PCI/portdrv: Prevent LS7A Bus Master clearing on shutdown"), which prevents LS7A Bus Master clearing on kexec. The key point of this is to work around the LS7A defect that clearing PCI_COMMAND_MASTER prevents MMIO requests from going downstream, and we may need to do that even after .shutdown(), e.g., to print console messages. And in this case we rely on .shutdown() for the downstream devices to disable interrupts and DMA. Only skip Bus Master clearing on bridges because endpoint devices still need it. Signed-off-by: Huacai Chen Signed-off-by: Ming Wang Reviewed-by: MinLi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/4023 --- drivers/pci/pci-driver.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 9c59bf03d657..b699839a7d4f 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -526,7 +526,11 @@ static void pci_device_shutdown(struct device *dev) * If it is not a kexec reboot, firmware will hit the PCI * devices with big hammer and stop their DMA any way. */ +#ifdef CONFIG_LOONGARCH + if (kexec_in_progress && !pci_is_bridge(pci_dev) && (pci_dev->current_state <= PCI_D3hot)) +#else if (kexec_in_progress && (pci_dev->current_state <= PCI_D3hot)) +#endif pci_clear_master(pci_dev); } -- Gitee From 2702d2c4f26f4778c3325b20fc73adedc2cc68e4 Mon Sep 17 00:00:00 2001 From: wusheng Date: Sat, 20 Apr 2024 11:48:53 +0800 Subject: [PATCH 1645/2138] anolis: PCI: LS7A2000 enable msi ANBZ: #11458 For the 7A2000 of loongson pcie port H, it is necessary to enable MSI (Message Signaled Interrupts) when the device is a host bridge. This patch adds a quirk for the Loongson 7A2000 chipset to ensure that MSI is enabled properly. Signed-off-by: wusheng Signed-off-by: Juxin Gao Reviewed-by: MinLi Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/4016 --- drivers/pci/quirks.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index fe5555a9011e..82cc68844f30 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -396,6 +396,20 @@ static void quirk_tigerpoint_bm_sts(struct pci_dev *dev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts); #endif +static void loongson_pcie_msi_quirk(struct pci_dev *dev) +{ + u16 val; + u16 class; + + class = dev->class >> 8; + if (class == PCI_CLASS_BRIDGE_HOST) { + pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &val); + val |= PCI_MSI_FLAGS_ENABLE; + pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, val); + } +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, 0x7a59, loongson_pcie_msi_quirk); + /* Chipsets where PCI->PCI transfers vanish or hang */ static void quirk_nopcipci(struct pci_dev *dev) { -- Gitee From 23aa765de3a67840fddb5f0bdd363f6be3442df8 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 13 Sep 2024 19:51:43 +0300 Subject: [PATCH 1646/2138] intel_idle: fix ACPI _CST matching for newer Xeon platforms ANBZ: #11598 commit 4c411cca33cf1c21946b710b2eb59aca9f646703 upstream. Background ~~~~~~~~~~ The driver uses 'use_acpi = true' in C-state custom table for all Xeon platforms. The meaning of this flag is as follows. 1. If a C-state from the custom table is defined in ACPI _CST (matched by the mwait hint), then enable this C-state. 2. Otherwise, disable this C-state, unless the C-sate definition in the custom table has the 'CPUIDLE_FLAG_ALWAYS_ENABLE' flag set, in which case enabled it. The goal is to honor BIOS C6 settings - If BIOS disables C6, disable it by default in the OS too (but it can be enabled via sysfs). This works well on Xeons that expose only one flavor of C6. This are all Xeons except for the newest Granite Rapids (GNR) and Sierra Forest (SRF). The problem ~~~~~~~~~~~ GNR and SRF have 2 flavors of C6: C6/C6P on GNR, C6S/C6SP on SRF. The the "P" flavor allows for the package C6, while the "non-P" flavor allows only for core/module C6. As far as this patch is concerned, both GNR and SRF platforms are handled the same way. Therefore, further discussion is focused on GNR, but it applies to SRF as well. On Intel Xeon platforms, BIOS exposes only 2 ACPI C-states: C1 and C2. Well, depending on BIOS settings, C2 may be named as C3. But there still will be only 2 states - C1 and C3. But this is a non-essential detail, so further discussion is focused on the ACPI C1 and C2 case. On pre-GNR/SRF Xeon platforms, ACPI C1 is mapped to C1 or C1E, and ACPI C2 is mapped to C6. The 'use_acpi' flag works just fine: * If ACPI C2 enabled, enable C6. * Otherwise, disable C6. However, on GNR there are 2 flavors of C6, so BIOS maps ACPI C2 to either C6 or C6P, depending on the user settings. As a result, due to the 'use_acpi' flag, 'intel_idle' disables least one of the C6 flavors. BIOS | OS | Verdict ----------------------------------------------------|--------- ACPI C2 disabled | C6 disabled, C6P disabled | OK ACPI C2 mapped to C6 | C6 enabled, C6P disabled | Not OK ACPI C2 mapped to C6P | C6 disabled, C6P enabled | Not OK The goal of 'use_acpi' is to honor BIOS ACPI C2 disabled case, which works fine. But if ACPI C2 is enabled, the goal is to enable all flavors of C6, not just one of the flavors. This was overlooked when enabling GNR/SRF platforms. In other words, before GNR/SRF, the ACPI C2 status was binary - enabled or disabled. But it is not binary on GNR/SRF, however the goal is to continue treat it as binary. The fix ~~~~~~~ Notice, that current algorithm matches ACPI and custom table C-states by the mwait hint. However, mwait hint consists of the 'state' and 'sub-state' parts, and all C6 flavors have the same state value of 0x20, but different sub-state values. Introduce new C-state table flag - CPUIDLE_FLAG_PARTIAL_HINT_MATCH and add it to both C6 flavors of the GNR/SRF platforms. When matching ACPI _CST and custom table C-states, match only the start part if the C-state has CPUIDLE_FLAG_PARTIAL_HINT_MATCH, other wise match both state and sub-state parts (as before). With this fix, GNR C-states enabled/disabled status looks like this. BIOS | OS ---------------------------------------------------- ACPI C2 disabled | C6 disabled, C6P disabled ACPI C2 mapped to C6 | C6 enabled, C6P enabled ACPI C2 mapped to C6P | C6 enabled, C6P enabled Possible alternative ~~~~~~~~~~~~~~~~~~~~ The alternative would be to remove 'use_acpi' flag for GNR and SRF. This would be a simpler solution, but it would violate the principle of least surprise - users of Xeon platforms are used to the fact that intel_idle honors C6 enabled/disabled flag. It is more consistent user experience if GNR/SRF continue doing so. How tested ~~~~~~~~~~ Tested on GNR and SRF platform with all the 3 BIOS configurations: ACPI C2 disabled, mapped to C6/C6S, mapped to C6P/C6SP. Tested on Ice lake Xeon and Sapphire Rapids Xeon platforms with ACPI C2 enabled and disabled, just to verify that the patch does not break older Xeons. Intel-SIG: Intel-SIG: commit 4c411cca33cf intel_idle: fix ACPI _CST matching for newer Xeon platforms. Backport intel_idle GNR and SRF fix Fixes: 92813fd5b156 ("intel_idle: add Sierra Forest SoC support") Fixes: 370406bf5738 ("intel_idle: add Granite Rapids Xeon support") Cc: 6.8+ # 6.8+ Signed-off-by: Artem Bityutskiy Link: https://patch.msgid.link/20240913165143.4140073-1-dedekind1@gmail.com [ rjw: Changelog edits ] Signed-off-by: Rafael J. Wysocki [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4058 --- drivers/idle/intel_idle.c | 37 +++++++++++++++++++++++++++++-------- 1 file changed, 29 insertions(+), 8 deletions(-) diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index cd6100f10581..f86b6ede17e2 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -120,6 +120,12 @@ static unsigned int mwait_substates __initdata; */ #define CPUIDLE_FLAG_INIT_XSTATE BIT(17) +/* + * Ignore the sub-state when matching mwait hints between the ACPI _CST and + * custom tables. + */ +#define CPUIDLE_FLAG_PARTIAL_HINT_MATCH BIT(18) + /* * MWAIT takes an 8-bit "hint" in EAX "suggesting" * the C-state (top nibble) and sub-state (bottom nibble) @@ -1014,7 +1020,8 @@ static struct cpuidle_state gnr_cstates[] __initdata = { .name = "C6", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | - CPUIDLE_FLAG_INIT_XSTATE, + CPUIDLE_FLAG_INIT_XSTATE | + CPUIDLE_FLAG_PARTIAL_HINT_MATCH, .exit_latency = 170, .target_residency = 650, .enter = &intel_idle, @@ -1023,7 +1030,8 @@ static struct cpuidle_state gnr_cstates[] __initdata = { .name = "C6P", .desc = "MWAIT 0x21", .flags = MWAIT2flg(0x21) | CPUIDLE_FLAG_TLB_FLUSHED | - CPUIDLE_FLAG_INIT_XSTATE, + CPUIDLE_FLAG_INIT_XSTATE | + CPUIDLE_FLAG_PARTIAL_HINT_MATCH, .exit_latency = 210, .target_residency = 1000, .enter = &intel_idle, @@ -1296,7 +1304,8 @@ static struct cpuidle_state srf_cstates[] __initdata = { { .name = "C6S", .desc = "MWAIT 0x22", - .flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED, + .flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED | + CPUIDLE_FLAG_PARTIAL_HINT_MATCH, .exit_latency = 270, .target_residency = 700, .enter = &intel_idle, @@ -1304,7 +1313,8 @@ static struct cpuidle_state srf_cstates[] __initdata = { { .name = "C6SP", .desc = "MWAIT 0x23", - .flags = MWAIT2flg(0x23) | CPUIDLE_FLAG_TLB_FLUSHED, + .flags = MWAIT2flg(0x23) | CPUIDLE_FLAG_TLB_FLUSHED | + CPUIDLE_FLAG_PARTIAL_HINT_MATCH, .exit_latency = 310, .target_residency = 900, .enter = &intel_idle, @@ -1668,7 +1678,7 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) } } -static bool __init intel_idle_off_by_default(u32 mwait_hint) +static bool __init intel_idle_off_by_default(unsigned int flags, u32 mwait_hint) { int cstate, limit; @@ -1685,7 +1695,15 @@ static bool __init intel_idle_off_by_default(u32 mwait_hint) * the interesting states are ACPI_CSTATE_FFH. */ for (cstate = 1; cstate < limit; cstate++) { - if (acpi_state_table.states[cstate].address == mwait_hint) + u32 acpi_hint = acpi_state_table.states[cstate].address; + u32 table_hint = mwait_hint; + + if (flags & CPUIDLE_FLAG_PARTIAL_HINT_MATCH) { + acpi_hint &= ~MWAIT_SUBSTATE_MASK; + table_hint &= ~MWAIT_SUBSTATE_MASK; + } + + if (acpi_hint == table_hint) return false; } return true; @@ -1695,7 +1713,10 @@ static bool __init intel_idle_off_by_default(u32 mwait_hint) static inline bool intel_idle_acpi_cst_extract(void) { return false; } static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { } -static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; } +static inline bool intel_idle_off_by_default(unsigned int flags, u32 mwait_hint) +{ + return false; +} #endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */ /** @@ -2019,7 +2040,7 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv) if ((disabled_states_mask & BIT(drv->state_count)) || ((icpu->use_acpi || force_use_acpi) && - intel_idle_off_by_default(mwait_hint) && + intel_idle_off_by_default(state->flags, mwait_hint) && !(state->flags & CPUIDLE_FLAG_ALWAYS_ENABLE))) state->flags |= CPUIDLE_FLAG_OFF; -- Gitee From e4c1ed8d0231ed59aa8539849a6eba914bfba46e Mon Sep 17 00:00:00 2001 From: Zelin Deng Date: Fri, 8 Nov 2024 14:03:52 +0800 Subject: [PATCH 1647/2138] anolis: uprobes: take care of ref count and mm counter for zero page ANBZ: #11781 An unexpected "Bad page state" is reported in function uprobe_write_opcode() when doing fuzzy test, see call trace: BUG: Bad page state in process syz.0.354 pfn:08021 page:00000000564dcf15 refcount:0 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x8021 flags: 0xffffe000004004(referenced|reserved|node=0|zone=1|lastcpupid=0x3ffff) page_type: 0xfffffffe() raw: 00ffffe000004004 ffffea0000200848 ffffea0000200848 0000000000000000 raw: 0000000000000000 0000000000000000 00000000fffffffe 0000000000000000 page dumped because: PAGE_FLAGS_CHECK_AT_FREE flag(s) set Modules linked in: CPU: 0 PID: 4909 Comm: syz.0.354 Tainted: G W 6.6.52+ #1 Hardware name: Red Hat KVM, BIOS 1.16.0-4.al8 04/01/2014 Call Trace: __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0x125/0x1b0 lib/dump_stack.c:106 bad_page+0x76/0x1a0 mm/page_alloc.c:514 free_page_is_bad_report mm/page_alloc.c:955 [inline] free_page_is_bad mm/page_alloc.c:971 [inline] free_pages_prepare mm/page_alloc.c:1140 [inline] free_unref_page_prepare+0x6f2/0xb70 mm/page_alloc.c:2333 free_unref_page+0x38/0x270 mm/page_alloc.c:2430 __folio_put_small mm/swap.c:106 [inline] __folio_put+0xba/0x160 mm/swap.c:129 folio_put include/linux/mm.h:1489 [inline] put_page include/linux/mm.h:1558 [inline] uprobe_write_opcode+0x913/0x1170 kernel/events/uprobes.c:544 install_breakpoint.part.0+0xfd/0x140 kernel/events/uprobes.c:908 install_breakpoint kernel/events/uprobes.c:897 [inline] register_for_each_vma+0x57c/0x880 kernel/events/uprobes.c:1064 uprobe_apply+0x11b/0x170 kernel/events/uprobes.c:1225 uprobe_perf_open+0x361/0x730 kernel/trace/trace_uprobe.c:1309 trace_uprobe_register+0xc2/0x110 kernel/trace/trace_uprobe.c:1462 perf_trace_event_open kernel/trace/trace_event_perf.c:184 [inline] perf_trace_event_init kernel/trace/trace_event_perf.c:206 [inline] perf_trace_event_init+0x143/0x580 kernel/trace/trace_event_perf.c:193 perf_uprobe_init+0x174/0x210 kernel/trace/trace_event_perf.c:332 perf_uprobe_event_init+0x106/0x1d0 kernel/events/core.c:10436 perf_try_init_event+0x132/0x530 kernel/events/core.c:11705 perf_init_event+0x2cc/0x6f0 kernel/events/core.c:11775 perf_event_alloc+0xd0d/0x2860 kernel/events/core.c:12056 __do_sys_perf_event_open+0x478/0x2910 kernel/events/core.c:12563 do_syscall_x64 arch/x86/entry/common.c:51 [inline] do_syscall_64+0x38/0x80 arch/x86/entry/common.c:81 entry_SYSCALL_64_after_hwframe+0x78/0xe2 RIP: 0033:0x7fd07b6154dd Code: 00 c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 6b 89 0c 00 f7 d8 64 89 01 48 RSP: 002b:00007fd07b512cb8 EFLAGS: 00000246 ORIG_RAX: 000000000000012a RAX: ffffffffffffffda RBX: 0000000000525f80 RCX: 00007fd07b6154dd RDX: 0000000000000000 RSI: ffffffffffffffff RDI: 0000000020000640 RBP: 0000000000525f80 R08: 0000000000000000 R09: 0000000000000000 R10: ffffffffffffffff R11: 0000000000000246 R12: 0000000000000000 R13: 0000000000000000 R14: 0000000000000000 R15: 00007fd07b513640 BUG: Bad rss-counter state mm:00000000bbc90bb3 type:MM_FILEPAGES val:-1 For COW, zero page could be returned in uprobe_write_opcode() by gup. We must take care of its ref count and mm counter: - gup page fault return zero folio without ref it, so do not call folio_put() in __replace_page() - zero page could be file pages or shmem pages, its coressponding rss_stat won't be increase, so don't decrease it either Signed-off-by: Zelin Deng Reviewed-by: Baolin Wang Reviewed-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/4089 --- kernel/events/uprobes.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index e71c35cf15ff..b58356719fa6 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -188,7 +188,8 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, dec_mm_counter(mm, MM_ANONPAGES); if (!folio_test_anon(old_folio)) { - dec_mm_counter(mm, mm_counter_file(old_page)); + if (!is_zero_page(old_page)) + dec_mm_counter(mm, mm_counter_file(old_page)); inc_mm_counter(mm, MM_ANONPAGES); } @@ -198,11 +199,15 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, set_pte_at_notify(mm, addr, pvmw.pte, mk_pte(new_page, vma->vm_page_prot)); - folio_remove_rmap_pte(old_folio, old_page, vma); - if (!folio_mapped(old_folio)) - folio_free_swap(old_folio); + /* zero page won't be added to rmap, skip, see do_anonymous_page() */ + if (!is_zero_page(old_page)) { + folio_remove_rmap_pte(old_folio, old_page, vma); + if (!folio_mapped(old_folio)) + folio_free_swap(old_folio); + } page_vma_mapped_walk_done(&pvmw); - folio_put(old_folio); + if (!is_zero_page(old_page)) + folio_put(old_folio); err = 0; unlock: -- Gitee From 7f1ff9241986ef00abdef70f515e0f03a4dd5d3f Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 26 Mar 2024 15:32:09 +0100 Subject: [PATCH 1648/2138] selftests/memfd_secret: add vmsplice() test ANBZ: #11800 commit c139ca42f5740f0c94a001eee82dafada72c19ee upstream. Let's add a simple reproducer for a scenario where GUP-fast could succeed on secretmem folios, making vmsplice() succeed instead of failing. The reproducer is based on a reproducer [1] by Miklos Szeredi. We want to perform two tests: vmsplice() when a fresh page was just faulted in, and vmsplice() on an existing page after munmap() that would drain certain LRU caches/batches in the kernel. In an ideal world, we could use fallocate(FALLOC_FL_PUNCH_HOLE) / MADV_REMOVE to remove any existing page. As that is currently not possible, run the test before any other tests that would allocate memory in the secretmem fd. Perform the ftruncate() only once, and check the return value. [1] https://lkml.kernel.org/r/CAJfpegt3UCsMmxd0taOY11Uaw5U=eS1fE5dn0wZX3HF0oy8-oQ@mail.gmail.com Link: https://lkml.kernel.org/r/20240326143210.291116-3-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Mike Rapoport (IBM) Cc: Lorenzo Stoakes Cc: Miklos Szeredi Cc: xingwei lee Cc: yue sun Signed-off-by: Andrew Morton Signed-off-by: Kun(llfl) Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4097 --- tools/testing/selftests/mm/memfd_secret.c | 51 ++++++++++++++++++++++- 1 file changed, 49 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/mm/memfd_secret.c b/tools/testing/selftests/mm/memfd_secret.c index 9b298f6a04b3..9a0597310a76 100644 --- a/tools/testing/selftests/mm/memfd_secret.c +++ b/tools/testing/selftests/mm/memfd_secret.c @@ -20,6 +20,7 @@ #include #include #include +#include #include "../kselftest.h" @@ -83,6 +84,45 @@ static void test_mlock_limit(int fd) pass("mlock limit is respected\n"); } +static void test_vmsplice(int fd, const char *desc) +{ + ssize_t transferred; + struct iovec iov; + int pipefd[2]; + char *mem; + + if (pipe(pipefd)) { + fail("pipe failed: %s\n", strerror(errno)); + return; + } + + mem = mmap(NULL, page_size, prot, mode, fd, 0); + if (mem == MAP_FAILED) { + fail("Unable to mmap secret memory\n"); + goto close_pipe; + } + + /* + * vmsplice() may use GUP-fast, which must also fail. Prefault the + * page table, so GUP-fast could find it. + */ + memset(mem, PATTERN, page_size); + + iov.iov_base = mem; + iov.iov_len = page_size; + transferred = vmsplice(pipefd[1], &iov, 1, 0); + + if (transferred < 0 && errno == EFAULT) + pass("vmsplice is blocked as expected with %s\n", desc); + else + fail("vmsplice: unexpected memory access with %s\n", desc); + + munmap(mem, page_size); +close_pipe: + close(pipefd[0]); + close(pipefd[1]); +} + static void try_process_vm_read(int fd, int pipefd[2]) { struct iovec liov, riov; @@ -187,7 +227,6 @@ static void test_remote_access(int fd, const char *name, return; } - ftruncate(fd, page_size); memset(mem, PATTERN, page_size); if (write(pipefd[1], &mem, sizeof(mem)) < 0) { @@ -258,7 +297,7 @@ static void prepare(void) strerror(errno)); } -#define NUM_TESTS 4 +#define NUM_TESTS 6 int main(int argc, char *argv[]) { @@ -277,9 +316,17 @@ int main(int argc, char *argv[]) ksft_exit_fail_msg("memfd_secret failed: %s\n", strerror(errno)); } + if (ftruncate(fd, page_size)) + ksft_exit_fail_msg("ftruncate failed: %s\n", strerror(errno)); test_mlock_limit(fd); test_file_apis(fd); + /* + * We have to run the first vmsplice test before any secretmem page was + * allocated for this fd. + */ + test_vmsplice(fd, "fresh page"); + test_vmsplice(fd, "existing page"); test_process_vm_read(fd); test_ptrace(fd); -- Gitee From 6a527f238fdfaf21e8e3158b8cbc2f1d4170ce13 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 26 Mar 2024 15:32:10 +0100 Subject: [PATCH 1649/2138] mm: merge folio_is_secretmem() and folio_fast_pin_allowed() into gup_fast_folio_allowed() ANBZ: #11800 commit f002882ca369aba3eece5006f3346ccf75ede7c5 upstream. folio_is_secretmem() is currently only used during GUP-fast. Nowadays, folio_fast_pin_allowed() performs similar checks during GUP-fast and contains a lot of careful handling -- READ_ONCE() -- , sanity checks -- lockdep_assert_irqs_disabled() -- and helpful comments on how this handling is safe and correct. So let's merge folio_is_secretmem() into folio_fast_pin_allowed(). Rename folio_fast_pin_allowed() to gup_fast_folio_allowed(), to better match the new semantics. Link: https://lkml.kernel.org/r/20240326143210.291116-4-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Mike Rapoport (IBM) Cc: David Hildenbrand Cc: Lorenzo Stoakes Cc: Miklos Szeredi Cc: xingwei lee Cc: yue sun Signed-off-by: Andrew Morton Signed-off-by: Kun(llfl) Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4097 --- include/linux/secretmem.h | 21 ++--------------- mm/gup.c | 48 +++++++++++++++++++++++---------------- 2 files changed, 30 insertions(+), 39 deletions(-) diff --git a/include/linux/secretmem.h b/include/linux/secretmem.h index acf7e1a3f3de..e918f96881f5 100644 --- a/include/linux/secretmem.h +++ b/include/linux/secretmem.h @@ -6,25 +6,8 @@ extern const struct address_space_operations secretmem_aops; -static inline bool folio_is_secretmem(struct folio *folio) +static inline bool secretmem_mapping(struct address_space *mapping) { - struct address_space *mapping; - - /* - * Using folio_mapping() is quite slow because of the actual call - * instruction. - * We know that secretmem pages are not compound, so we can - * save a couple of cycles here. - */ - if (folio_test_large(folio)) - return false; - - mapping = (struct address_space *) - ((unsigned long)folio->mapping & ~PAGE_MAPPING_FLAGS); - - if (!mapping || mapping != folio->mapping) - return false; - return mapping->a_ops == &secretmem_aops; } @@ -38,7 +21,7 @@ static inline bool vma_is_secretmem(struct vm_area_struct *vma) return false; } -static inline bool folio_is_secretmem(struct folio *folio) +static inline bool secretmem_mapping(struct address_space *mapping) { return false; } diff --git a/mm/gup.c b/mm/gup.c index 32249c7f94da..ce217ce2a584 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -2368,12 +2368,14 @@ EXPORT_SYMBOL(get_user_pages_unlocked); #ifdef CONFIG_HAVE_FAST_GUP /* - * Used in the GUP-fast path to determine whether a pin is permitted for a - * specific folio. + * Used in the GUP-fast path to determine whether GUP is permitted to work on + * a specific folio. * * This call assumes the caller has pinned the folio, that the lowest page table * level still points to this folio, and that interrupts have been disabled. * + * GUP-fast must reject all secretmem folios. + * * Writing to pinned file-backed dirty tracked folios is inherently problematic * (see comment describing the writable_file_mapping_allowed() function). We * therefore try to avoid the most egregious case of a long-term mapping doing @@ -2383,25 +2385,34 @@ EXPORT_SYMBOL(get_user_pages_unlocked); * in the fast path, so instead we whitelist known good cases and if in doubt, * fall back to the slow path. */ -static bool folio_fast_pin_allowed(struct folio *folio, unsigned int flags) +static bool gup_fast_folio_allowed(struct folio *folio, unsigned int flags) { + bool reject_file_backed = false; struct address_space *mapping; + bool check_secretmem = false; unsigned long mapping_flags; /* * If we aren't pinning then no problematic write can occur. A long term * pin is the most egregious case so this is the one we disallow. */ - if ((flags & (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) != + if ((flags & (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) == (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) - return true; + reject_file_backed = true; + + /* We hold a folio reference, so we can safely access folio fields. */ - /* The folio is pinned, so we can safely access folio fields. */ + /* secretmem folios are always order-0 folios. */ + if (IS_ENABLED(CONFIG_SECRETMEM) && !folio_test_large(folio)) + check_secretmem = true; + + if (!reject_file_backed && !check_secretmem) + return true; if (WARN_ON_ONCE(folio_test_slab(folio))) return false; - /* hugetlb mappings do not require dirty-tracking. */ + /* hugetlb neither requires dirty-tracking nor can be secretmem. */ if (folio_test_hugetlb(folio)) return true; @@ -2437,10 +2448,12 @@ static bool folio_fast_pin_allowed(struct folio *folio, unsigned int flags) /* * At this point, we know the mapping is non-null and points to an - * address_space object. The only remaining whitelisted file system is - * shmem. + * address_space object. */ - return shmem_mapping(mapping); + if (check_secretmem && secretmem_mapping(mapping)) + return false; + /* The only remaining allowed file system is shmem. */ + return !reject_file_backed || shmem_mapping(mapping); } static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start, @@ -2625,18 +2638,13 @@ static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, if (!folio) goto pte_unmap; - if (unlikely(folio_is_secretmem(folio))) { - gup_put_folio(folio, 1, flags); - goto pte_unmap; - } - if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) || unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) { gup_put_folio(folio, 1, flags); goto pte_unmap; } - if (!folio_fast_pin_allowed(folio, flags)) { + if (!gup_fast_folio_allowed(folio, flags)) { gup_put_folio(folio, 1, flags); goto pte_unmap; } @@ -2833,7 +2841,7 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, return 0; } - if (!folio_fast_pin_allowed(folio, flags)) { + if (!gup_fast_folio_allowed(folio, flags)) { gup_put_folio(folio, refs, flags); return 0; } @@ -2904,7 +2912,7 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, return 0; } - if (!folio_fast_pin_allowed(folio, flags)) { + if (!gup_fast_folio_allowed(folio, flags)) { gup_put_folio(folio, refs, flags); return 0; } @@ -2948,7 +2956,7 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, return 0; } - if (!folio_fast_pin_allowed(folio, flags)) { + if (!gup_fast_folio_allowed(folio, flags)) { gup_put_folio(folio, refs, flags); return 0; } @@ -2993,7 +3001,7 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, return 0; } - if (!folio_fast_pin_allowed(folio, flags)) { + if (!gup_fast_folio_allowed(folio, flags)) { gup_put_folio(folio, refs, flags); return 0; } -- Gitee From 8029634d08bf4aca8e6cc7795a6fe4940a2fc04b Mon Sep 17 00:00:00 2001 From: Peng Hao Date: Wed, 6 Mar 2024 11:48:04 +0800 Subject: [PATCH 1650/2138] buildid: use kmap_local_page() ANBZ: #11800 commit c44f063e740ed580574b9012751e641e749bbe0e upstream. Use kmap_local_page() instead of kmap_atomic() which has been deprecated. Link: https://lkml.kernel.org/r/20240306034804.62087-1-flyingpeng@tencent.com Signed-off-by: Peng Hao Signed-off-by: Andrew Morton Signed-off-by: Kun(llfl) Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4097 --- lib/buildid.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/buildid.c b/lib/buildid.c index 9fc46366597e..f2d6e19735bf 100644 --- a/lib/buildid.c +++ b/lib/buildid.c @@ -166,7 +166,7 @@ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, } ret = -EINVAL; - page_addr = kmap_atomic(page); + page_addr = kmap_local_page(page); ehdr = (Elf32_Ehdr *)page_addr; /* compare magic x7f "ELF" */ @@ -182,7 +182,7 @@ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) ret = get_build_id_64(page_addr, build_id, size); out: - kunmap_atomic(page_addr); + kunmap_local(page_addr); put_page(page); return ret; } -- Gitee From df650fcebd194d98e3b5efd55f7fae455e4cf89b Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 29 Aug 2024 10:42:23 -0700 Subject: [PATCH 1651/2138] lib/buildid: harden build ID parsing logic ANBZ: #11800 commit 905415ff3ffb1d7e5afa62bacabd79776bd24606 upstream. Harden build ID parsing logic, adding explicit READ_ONCE() where it's important to have a consistent value read and validated just once. Also, as pointed out by Andi Kleen, we need to make sure that entire ELF note is within a page bounds, so move the overflow check up and add an extra note_size boundaries validation. Fixes tag below points to the code that moved this code into lib/buildid.c, and then subsequently was used in perf subsystem, making this code exposed to perf_event_open() users in v5.12+. Cc: stable@vger.kernel.org Reviewed-by: Eduard Zingerman Reviewed-by: Jann Horn Suggested-by: Andi Kleen Fixes: bd7525dacd7e ("bpf: Move stack_map_get_build_id into lib") Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20240829174232.3133883-2-andrii@kernel.org Signed-off-by: Alexei Starovoitov Signed-off-by: Kun(llfl) Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4097 --- lib/buildid.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/buildid.c b/lib/buildid.c index f2d6e19735bf..f592ab9f8e09 100644 --- a/lib/buildid.c +++ b/lib/buildid.c @@ -40,7 +40,7 @@ static int parse_build_id_buf(unsigned char *build_id, name_sz == note_name_sz && memcmp(nhdr + 1, note_name, note_name_sz) == 0 && desc_sz > 0 && desc_sz <= BUILD_ID_SIZE_MAX) { - data = note_start + note_off + sizeof(Elf32_Nhdr) + ALIGN(note_name_sz, 4); + data = note_start + note_off + ALIGN(note_name_sz, 4); memcpy(build_id, data, desc_sz); memset(build_id + desc_sz, 0, BUILD_ID_SIZE_MAX - desc_sz); if (size) -- Gitee From 0f8252578e24320ce4a6b56ff940da4823b802ac Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 29 Aug 2024 10:42:24 -0700 Subject: [PATCH 1652/2138] lib/buildid: add single folio-based file reader abstraction ANBZ: #11800 commit de3ec364c3c37971dbba1e37a55ae5b646c6f24e upstream. Add freader abstraction that transparently manages fetching and local mapping of the underlying file page(s) and provides a simple direct data access interface. freader_fetch() is the only and single interface necessary. It accepts file offset and desired number of bytes that should be accessed, and will return a kernel mapped pointer that caller can use to dereference data up to requested size. Requested size can't be bigger than the size of the extra buffer provided during initialization (because, worst case, all requested data has to be copied into it, so it's better to flag wrongly sized buffer unconditionally, regardless if requested data range is crossing page boundaries or not). If folio is not paged in, or some of the conditions are not satisfied, NULL is returned and more detailed error code can be accessed through freader->err field. This approach makes the usage of freader_fetch() cleaner. To accommodate accessing file data that crosses folio boundaries, user has to provide an extra buffer that will be used to make a local copy, if necessary. This is done to maintain a simple linear pointer data access interface. We switch existing build ID parsing logic to it, without changing or lifting any of the existing constraints, yet. This will be done separately. Given existing code was written with the assumption that it's always working with a single (first) page of the underlying ELF file, logic passes direct pointers around, which doesn't really work well with freader approach and would be limiting when removing the single page (folio) limitation. So we adjust all the logic to work in terms of file offsets. There is also a memory buffer-based version (freader_init_from_mem()) for cases when desired data is already available in kernel memory. This is used for parsing vmlinux's own build ID note. In this mode assumption is that provided data starts at "file offset" zero, which works great when parsing ELF notes sections, as all the parsing logic is relative to note section's start. Reviewed-by: Eduard Zingerman Reviewed-by: Shakeel Butt Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20240829174232.3133883-3-andrii@kernel.org Signed-off-by: Alexei Starovoitov Signed-off-by: Kun(llfl) Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4097 --- lib/buildid.c | 263 ++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 210 insertions(+), 53 deletions(-) diff --git a/lib/buildid.c b/lib/buildid.c index f592ab9f8e09..6b71c0681ded 100644 --- a/lib/buildid.c +++ b/lib/buildid.c @@ -8,24 +8,155 @@ #define BUILD_ID 3 +struct freader { + void *buf; + u32 buf_sz; + int err; + union { + struct { + struct address_space *mapping; + struct folio *folio; + void *addr; + loff_t folio_off; + }; + struct { + const char *data; + u64 data_sz; + }; + }; +}; + +static void freader_init_from_file(struct freader *r, void *buf, u32 buf_sz, + struct address_space *mapping) +{ + memset(r, 0, sizeof(*r)); + r->buf = buf; + r->buf_sz = buf_sz; + r->mapping = mapping; +} + +static void freader_init_from_mem(struct freader *r, const char *data, u64 data_sz) +{ + memset(r, 0, sizeof(*r)); + r->data = data; + r->data_sz = data_sz; +} + +static void freader_put_folio(struct freader *r) +{ + if (!r->folio) + return; + kunmap_local(r->addr); + folio_put(r->folio); + r->folio = NULL; +} + +static int freader_get_folio(struct freader *r, loff_t file_off) +{ + /* check if we can just reuse current folio */ + if (r->folio && file_off >= r->folio_off && + file_off < r->folio_off + folio_size(r->folio)) + return 0; + + freader_put_folio(r); + + r->folio = filemap_get_folio(r->mapping, file_off >> PAGE_SHIFT); + if (IS_ERR(r->folio) || !folio_test_uptodate(r->folio)) { + if (!IS_ERR(r->folio)) + folio_put(r->folio); + r->folio = NULL; + return -EFAULT; + } + + r->folio_off = folio_pos(r->folio); + r->addr = kmap_local_folio(r->folio, 0); + + return 0; +} + +static const void *freader_fetch(struct freader *r, loff_t file_off, size_t sz) +{ + size_t folio_sz; + + /* provided internal temporary buffer should be sized correctly */ + if (WARN_ON(r->buf && sz > r->buf_sz)) { + r->err = -E2BIG; + return NULL; + } + + if (unlikely(file_off + sz < file_off)) { + r->err = -EOVERFLOW; + return NULL; + } + + /* working with memory buffer is much more straightforward */ + if (!r->buf) { + if (file_off + sz > r->data_sz) { + r->err = -ERANGE; + return NULL; + } + return r->data + file_off; + } + + /* fetch or reuse folio for given file offset */ + r->err = freader_get_folio(r, file_off); + if (r->err) + return NULL; + + /* if requested data is crossing folio boundaries, we have to copy + * everything into our local buffer to keep a simple linear memory + * access interface + */ + folio_sz = folio_size(r->folio); + if (file_off + sz > r->folio_off + folio_sz) { + int part_sz = r->folio_off + folio_sz - file_off; + + /* copy the part that resides in the current folio */ + memcpy(r->buf, r->addr + (file_off - r->folio_off), part_sz); + + /* fetch next folio */ + r->err = freader_get_folio(r, r->folio_off + folio_sz); + if (r->err) + return NULL; + + /* copy the rest of requested data */ + memcpy(r->buf + part_sz, r->addr, sz - part_sz); + + return r->buf; + } + + /* if data fits in a single folio, just return direct pointer */ + return r->addr + (file_off - r->folio_off); +} + +static void freader_cleanup(struct freader *r) +{ + if (!r->buf) + return; /* non-file-backed mode */ + + freader_put_folio(r); +} + /* * Parse build id from the note segment. This logic can be shared between * 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are * identical. */ -static int parse_build_id_buf(unsigned char *build_id, - __u32 *size, - const void *note_start, - Elf32_Word note_size) +static int parse_build_id_buf(struct freader *r, + unsigned char *build_id, __u32 *size, + loff_t note_off, Elf32_Word note_size) { const char note_name[] = "GNU"; const size_t note_name_sz = sizeof(note_name); - u64 note_off = 0, new_off, name_sz, desc_sz; + u32 build_id_off, new_off, note_end, name_sz, desc_sz; + const Elf32_Nhdr *nhdr; const char *data; - while (note_off + sizeof(Elf32_Nhdr) < note_size && - note_off + sizeof(Elf32_Nhdr) > note_off /* overflow */) { - Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_off); + note_end = note_off + note_size; + while (note_end - note_off > sizeof(Elf32_Nhdr) + note_name_sz) { + nhdr = freader_fetch(r, note_off, sizeof(Elf32_Nhdr) + note_name_sz); + if (!nhdr) + return r->err; name_sz = READ_ONCE(nhdr->n_namesz); desc_sz = READ_ONCE(nhdr->n_descsz); @@ -33,14 +164,20 @@ static int parse_build_id_buf(unsigned char *build_id, new_off = note_off + sizeof(Elf32_Nhdr); if (check_add_overflow(new_off, ALIGN(name_sz, 4), &new_off) || check_add_overflow(new_off, ALIGN(desc_sz, 4), &new_off) || - new_off > note_size) + new_off > note_end) break; if (nhdr->n_type == BUILD_ID && name_sz == note_name_sz && memcmp(nhdr + 1, note_name, note_name_sz) == 0 && desc_sz > 0 && desc_sz <= BUILD_ID_SIZE_MAX) { - data = note_start + note_off + ALIGN(note_name_sz, 4); + build_id_off = note_off + sizeof(Elf32_Nhdr) + ALIGN(note_name_sz, 4); + + /* freader_fetch() will invalidate nhdr pointer */ + data = freader_fetch(r, build_id_off, desc_sz); + if (!data) + return r->err; + memcpy(build_id, data, desc_sz); memset(build_id + desc_sz, 0, BUILD_ID_SIZE_MAX - desc_sz); if (size) @@ -54,30 +191,33 @@ static int parse_build_id_buf(unsigned char *build_id, return -EINVAL; } -static inline int parse_build_id(const void *page_addr, +static inline int parse_build_id(struct freader *r, unsigned char *build_id, __u32 *size, - const void *note_start, + loff_t note_start_off, Elf32_Word note_size) { /* check for overflow */ - if (note_start < page_addr || note_start + note_size < note_start) + if (note_start_off + note_size < note_start_off) return -EINVAL; /* only supports note that fits in the first page */ - if (note_start + note_size > page_addr + PAGE_SIZE) + if (note_start_off + note_size > PAGE_SIZE) return -EINVAL; - return parse_build_id_buf(build_id, size, note_start, note_size); + return parse_build_id_buf(r, build_id, size, note_start_off, note_size); } /* Parse build ID from 32-bit ELF */ -static int get_build_id_32(const void *page_addr, unsigned char *build_id, - __u32 *size) +static int get_build_id_32(struct freader *r, unsigned char *build_id, __u32 *size) { - Elf32_Ehdr *ehdr = (Elf32_Ehdr *)page_addr; - Elf32_Phdr *phdr; - __u32 i, phnum; + const Elf32_Ehdr *ehdr; + const Elf32_Phdr *phdr; + __u32 phnum, i; + + ehdr = freader_fetch(r, 0, sizeof(Elf32_Ehdr)); + if (!ehdr) + return r->err; /* * FIXME @@ -87,30 +227,35 @@ static int get_build_id_32(const void *page_addr, unsigned char *build_id, if (ehdr->e_phoff != sizeof(Elf32_Ehdr)) return -EINVAL; + /* subsequent freader_fetch() calls invalidate pointers, so remember locally */ phnum = READ_ONCE(ehdr->e_phnum); /* only supports phdr that fits in one page */ if (phnum > (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr)) return -EINVAL; - phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr)); - for (i = 0; i < phnum; ++i) { - if (phdr[i].p_type == PT_NOTE && - !parse_build_id(page_addr, build_id, size, - page_addr + READ_ONCE(phdr[i].p_offset), - READ_ONCE(phdr[i].p_filesz))) + phdr = freader_fetch(r, i * sizeof(Elf32_Phdr), sizeof(Elf32_Phdr)); + if (!phdr) + return r->err; + + if (phdr->p_type == PT_NOTE && + !parse_build_id(r, build_id, size, READ_ONCE(phdr->p_offset), + READ_ONCE(phdr->p_filesz))) return 0; } return -EINVAL; } /* Parse build ID from 64-bit ELF */ -static int get_build_id_64(const void *page_addr, unsigned char *build_id, - __u32 *size) +static int get_build_id_64(struct freader *r, unsigned char *build_id, __u32 *size) { - Elf64_Ehdr *ehdr = (Elf64_Ehdr *)page_addr; - Elf64_Phdr *phdr; - __u32 i, phnum; + const Elf64_Ehdr *ehdr; + const Elf64_Phdr *phdr; + __u32 phnum, i; + + ehdr = freader_fetch(r, 0, sizeof(Elf64_Ehdr)); + if (!ehdr) + return r->err; /* * FIXME @@ -120,23 +265,29 @@ static int get_build_id_64(const void *page_addr, unsigned char *build_id, if (ehdr->e_phoff != sizeof(Elf64_Ehdr)) return -EINVAL; + /* subsequent freader_fetch() calls invalidate pointers, so remember locally */ phnum = READ_ONCE(ehdr->e_phnum); /* only supports phdr that fits in one page */ if (phnum > (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr)) return -EINVAL; - phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr)); - for (i = 0; i < phnum; ++i) { - if (phdr[i].p_type == PT_NOTE && - !parse_build_id(page_addr, build_id, size, - page_addr + READ_ONCE(phdr[i].p_offset), - READ_ONCE(phdr[i].p_filesz))) + phdr = freader_fetch(r, i * sizeof(Elf64_Phdr), sizeof(Elf64_Phdr)); + if (!phdr) + return r->err; + + if (phdr->p_type == PT_NOTE && + !parse_build_id(r, build_id, size, READ_ONCE(phdr->p_offset), + READ_ONCE(phdr->p_filesz))) return 0; } + return -EINVAL; } +/* enough for Elf64_Ehdr, Elf64_Phdr, and all the smaller requests */ +#define MAX_FREADER_BUF_SZ 64 + /* * Parse build ID of ELF file mapped to vma * @vma: vma object @@ -148,26 +299,25 @@ static int get_build_id_64(const void *page_addr, unsigned char *build_id, int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size) { - Elf32_Ehdr *ehdr; - struct page *page; - void *page_addr; + const Elf32_Ehdr *ehdr; + struct freader r; + char buf[MAX_FREADER_BUF_SZ]; int ret; /* only works for page backed storage */ if (!vma->vm_file) return -EINVAL; - page = find_get_page(vma->vm_file->f_mapping, 0); - if (!page) - return -EFAULT; /* page not mapped */ - if (!PageUptodate(page)) { - put_page(page); - return -EFAULT; + freader_init_from_file(&r, buf, sizeof(buf), vma->vm_file->f_mapping); + + /* fetch first 18 bytes of ELF header for checks */ + ehdr = freader_fetch(&r, 0, offsetofend(Elf32_Ehdr, e_type)); + if (!ehdr) { + ret = r.err; + goto out; } ret = -EINVAL; - page_addr = kmap_local_page(page); - ehdr = (Elf32_Ehdr *)page_addr; /* compare magic x7f "ELF" */ if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) @@ -178,12 +328,11 @@ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, goto out; if (ehdr->e_ident[EI_CLASS] == ELFCLASS32) - ret = get_build_id_32(page_addr, build_id, size); + ret = get_build_id_32(&r, build_id, size); else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) - ret = get_build_id_64(page_addr, build_id, size); + ret = get_build_id_64(&r, build_id, size); out: - kunmap_local(page_addr); - put_page(page); + freader_cleanup(&r); return ret; } @@ -197,7 +346,15 @@ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, */ int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size) { - return parse_build_id_buf(build_id, NULL, buf, buf_size); + struct freader r; + int err; + + freader_init_from_mem(&r, buf, buf_size); + + err = parse_build_id(&r, build_id, NULL, 0, buf_size); + + freader_cleanup(&r); + return err; } #if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_CRASH_CORE) -- Gitee From 32945c3d0d01ed399a4c25dfe3227c71088e23b4 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 29 Aug 2024 10:42:25 -0700 Subject: [PATCH 1653/2138] lib/buildid: take into account e_phoff when fetching program headers ANBZ: #11800 commit d4deb82423416e3ace7889816eea630af81fe702 upstream. Current code assumption is that program (segment) headers are following ELF header immediately. This is a common case, but is not guaranteed. So take into account e_phoff field of the ELF header when accessing program headers. Reviewed-by: Eduard Zingerman Reported-by: Alexey Dobriyan Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20240829174232.3133883-4-andrii@kernel.org Signed-off-by: Alexei Starovoitov Signed-off-by: Kun(llfl) Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4097 --- lib/buildid.c | 35 ++++++++++++++++------------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/lib/buildid.c b/lib/buildid.c index 6b71c0681ded..a8eb2263af67 100644 --- a/lib/buildid.c +++ b/lib/buildid.c @@ -213,28 +213,26 @@ static int get_build_id_32(struct freader *r, unsigned char *build_id, __u32 *si { const Elf32_Ehdr *ehdr; const Elf32_Phdr *phdr; - __u32 phnum, i; + __u32 phnum, phoff, i; ehdr = freader_fetch(r, 0, sizeof(Elf32_Ehdr)); if (!ehdr) return r->err; - /* - * FIXME - * Neither ELF spec nor ELF loader require that program headers - * start immediately after ELF header. - */ - if (ehdr->e_phoff != sizeof(Elf32_Ehdr)) - return -EINVAL; - /* subsequent freader_fetch() calls invalidate pointers, so remember locally */ phnum = READ_ONCE(ehdr->e_phnum); + phoff = READ_ONCE(ehdr->e_phoff); + /* only supports phdr that fits in one page */ if (phnum > (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr)) return -EINVAL; + /* check that phoff is not large enough to cause an overflow */ + if (phoff + phnum * sizeof(Elf32_Phdr) < phoff) + return -EINVAL; + for (i = 0; i < phnum; ++i) { - phdr = freader_fetch(r, i * sizeof(Elf32_Phdr), sizeof(Elf32_Phdr)); + phdr = freader_fetch(r, phoff + i * sizeof(Elf32_Phdr), sizeof(Elf32_Phdr)); if (!phdr) return r->err; @@ -252,27 +250,26 @@ static int get_build_id_64(struct freader *r, unsigned char *build_id, __u32 *si const Elf64_Ehdr *ehdr; const Elf64_Phdr *phdr; __u32 phnum, i; + __u64 phoff; ehdr = freader_fetch(r, 0, sizeof(Elf64_Ehdr)); if (!ehdr) return r->err; - /* - * FIXME - * Neither ELF spec nor ELF loader require that program headers - * start immediately after ELF header. - */ - if (ehdr->e_phoff != sizeof(Elf64_Ehdr)) - return -EINVAL; - /* subsequent freader_fetch() calls invalidate pointers, so remember locally */ phnum = READ_ONCE(ehdr->e_phnum); + phoff = READ_ONCE(ehdr->e_phoff); + /* only supports phdr that fits in one page */ if (phnum > (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr)) return -EINVAL; + /* check that phoff is not large enough to cause an overflow */ + if (phoff + phnum * sizeof(Elf64_Phdr) < phoff) + return -EINVAL; + for (i = 0; i < phnum; ++i) { - phdr = freader_fetch(r, i * sizeof(Elf64_Phdr), sizeof(Elf64_Phdr)); + phdr = freader_fetch(r, phoff + i * sizeof(Elf64_Phdr), sizeof(Elf64_Phdr)); if (!phdr) return r->err; -- Gitee From d38a19b0ef1e8acaeeed6a4ef1cdef91599ffe50 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 29 Aug 2024 10:42:26 -0700 Subject: [PATCH 1654/2138] lib/buildid: remove single-page limit for PHDR search ANBZ: #11800 commit 4e9d360c4cdf2dc11a30fd5caf39e8c31f0896cb upstream. Now that freader allows to access multiple pages transparently, there is no need to limit program headers to the very first ELF file page. Remove this limitation, but still put some sane limit on amount of program headers that we are willing to iterate over (set arbitrarily to 256). Reviewed-by: Eduard Zingerman Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20240829174232.3133883-5-andrii@kernel.org Signed-off-by: Alexei Starovoitov Signed-off-by: Kun(llfl) Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4097 --- lib/buildid.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/lib/buildid.c b/lib/buildid.c index a8eb2263af67..0c980c388b45 100644 --- a/lib/buildid.c +++ b/lib/buildid.c @@ -8,6 +8,8 @@ #define BUILD_ID 3 +#define MAX_PHDR_CNT 256 + struct freader { void *buf; u32 buf_sz; @@ -223,9 +225,9 @@ static int get_build_id_32(struct freader *r, unsigned char *build_id, __u32 *si phnum = READ_ONCE(ehdr->e_phnum); phoff = READ_ONCE(ehdr->e_phoff); - /* only supports phdr that fits in one page */ - if (phnum > (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr)) - return -EINVAL; + /* set upper bound on amount of segments (phdrs) we iterate */ + if (phnum > MAX_PHDR_CNT) + phnum = MAX_PHDR_CNT; /* check that phoff is not large enough to cause an overflow */ if (phoff + phnum * sizeof(Elf32_Phdr) < phoff) @@ -260,9 +262,9 @@ static int get_build_id_64(struct freader *r, unsigned char *build_id, __u32 *si phnum = READ_ONCE(ehdr->e_phnum); phoff = READ_ONCE(ehdr->e_phoff); - /* only supports phdr that fits in one page */ - if (phnum > (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr)) - return -EINVAL; + /* set upper bound on amount of segments (phdrs) we iterate */ + if (phnum > MAX_PHDR_CNT) + phnum = MAX_PHDR_CNT; /* check that phoff is not large enough to cause an overflow */ if (phoff + phnum * sizeof(Elf64_Phdr) < phoff) -- Gitee From 5b04c561e3f2a1cf47447e2a88f3a5669b3d48d3 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 29 Aug 2024 10:42:27 -0700 Subject: [PATCH 1655/2138] lib/buildid: rename build_id_parse() into build_id_parse_nofault() ANBZ: #11800 commit 45b8fc3096542a53bfd245a9ad8ef870384b4897 upstream. Make it clear that build_id_parse() assumes that it can take no page fault by renaming it and current few users to build_id_parse_nofault(). Also add build_id_parse() stub which for now falls back to non-sleepable implementation, but will be changed in subsequent patches to take advantage of sleepable context. PROCMAP_QUERY ioctl() on /proc//maps file is using build_id_parse() and will automatically take advantage of more reliable sleepable context implementation. Reviewed-by: Eduard Zingerman Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20240829174232.3133883-6-andrii@kernel.org Signed-off-by: Alexei Starovoitov Signed-off-by: Kun(llfl) Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4097 --- include/linux/buildid.h | 4 ++-- kernel/bpf/stackmap.c | 2 +- kernel/events/core.c | 2 +- lib/buildid.c | 25 ++++++++++++++++++++++--- 4 files changed, 26 insertions(+), 7 deletions(-) diff --git a/include/linux/buildid.h b/include/linux/buildid.h index 3b7a0ff4642f..d093f52d73e0 100644 --- a/include/linux/buildid.h +++ b/include/linux/buildid.h @@ -6,8 +6,8 @@ #define BUILD_ID_SIZE_MAX 20 -int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, - __u32 *size); +int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size); +int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size); int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size); #if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_CRASH_CORE) diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index a330f38ae733..0ed48217b208 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -156,7 +156,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, goto build_id_valid; } vma = find_vma(current->mm, ips[i]); - if (!vma || build_id_parse(vma, id_offs[i].build_id, NULL)) { + if (!vma || build_id_parse_nofault(vma, id_offs[i].build_id, NULL)) { /* per entry fall back to ips */ id_offs[i].status = BPF_STACK_BUILD_ID_IP; id_offs[i].ip = ips[i]; diff --git a/kernel/events/core.c b/kernel/events/core.c index 264e3bcda783..b6d70669c918 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -8844,7 +8844,7 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; if (atomic_read(&nr_build_id_events)) - build_id_parse(vma, mmap_event->build_id, &mmap_event->build_id_size); + build_id_parse_nofault(vma, mmap_event->build_id, &mmap_event->build_id_size); perf_iterate_sb(perf_event_mmap_output, mmap_event, diff --git a/lib/buildid.c b/lib/buildid.c index 0c980c388b45..15184f9e817b 100644 --- a/lib/buildid.c +++ b/lib/buildid.c @@ -293,10 +293,12 @@ static int get_build_id_64(struct freader *r, unsigned char *build_id, __u32 *si * @build_id: buffer to store build id, at least BUILD_ID_SIZE long * @size: returns actual build id size in case of success * - * Return: 0 on success, -EINVAL otherwise + * Assumes no page fault can be taken, so if relevant portions of ELF file are + * not already paged in, fetching of build ID fails. + * + * Return: 0 on success; negative error, otherwise */ -int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, - __u32 *size) +int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size) { const Elf32_Ehdr *ehdr; struct freader r; @@ -335,6 +337,23 @@ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, return ret; } +/* + * Parse build ID of ELF file mapped to VMA + * @vma: vma object + * @build_id: buffer to store build id, at least BUILD_ID_SIZE long + * @size: returns actual build id size in case of success + * + * Assumes faultable context and can cause page faults to bring in file data + * into page cache. + * + * Return: 0 on success; negative error, otherwise + */ +int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size) +{ + /* fallback to non-faultable version for now */ + return build_id_parse_nofault(vma, build_id, size); +} + /** * build_id_parse_buf - Get build ID from a buffer * @buf: ELF note section(s) to parse -- Gitee From 2d9cde598267776c26f95749ecaae13a2c8f623c Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 29 Aug 2024 10:42:28 -0700 Subject: [PATCH 1656/2138] lib/buildid: implement sleepable build_id_parse() API ANBZ: #11800 commit ad41251c290dfe3c01472c94d2439a59de23fe97 upstream. Extend freader with a flag specifying whether it's OK to cause page fault to fetch file data that is not already physically present in memory. With this, it's now easy to wait for data if the caller is running in sleepable (faultable) context. We utilize read_cache_folio() to bring the desired folio into page cache, after which the rest of the logic works just the same at folio level. Suggested-by: Omar Sandoval Cc: Shakeel Butt Cc: Johannes Weiner Reviewed-by: Eduard Zingerman Reviewed-by: Shakeel Butt Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20240829174232.3133883-7-andrii@kernel.org Signed-off-by: Alexei Starovoitov Signed-off-by: Kun(llfl) Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4097 --- lib/buildid.c | 54 +++++++++++++++++++++++++++++++++------------------ 1 file changed, 35 insertions(+), 19 deletions(-) diff --git a/lib/buildid.c b/lib/buildid.c index 15184f9e817b..d680165fc1a1 100644 --- a/lib/buildid.c +++ b/lib/buildid.c @@ -16,10 +16,11 @@ struct freader { int err; union { struct { - struct address_space *mapping; + struct file *file; struct folio *folio; void *addr; loff_t folio_off; + bool may_fault; }; struct { const char *data; @@ -29,12 +30,13 @@ struct freader { }; static void freader_init_from_file(struct freader *r, void *buf, u32 buf_sz, - struct address_space *mapping) + struct file *file, bool may_fault) { memset(r, 0, sizeof(*r)); r->buf = buf; r->buf_sz = buf_sz; - r->mapping = mapping; + r->file = file; + r->may_fault = may_fault; } static void freader_init_from_mem(struct freader *r, const char *data, u64 data_sz) @@ -62,7 +64,16 @@ static int freader_get_folio(struct freader *r, loff_t file_off) freader_put_folio(r); - r->folio = filemap_get_folio(r->mapping, file_off >> PAGE_SHIFT); + r->folio = filemap_get_folio(r->file->f_mapping, file_off >> PAGE_SHIFT); + + /* if sleeping is allowed, wait for the page, if necessary */ + if (r->may_fault && (IS_ERR(r->folio) || !folio_test_uptodate(r->folio))) { + filemap_invalidate_lock_shared(r->file->f_mapping); + r->folio = read_cache_folio(r->file->f_mapping, file_off >> PAGE_SHIFT, + NULL, r->file); + filemap_invalidate_unlock_shared(r->file->f_mapping); + } + if (IS_ERR(r->folio) || !folio_test_uptodate(r->folio)) { if (!IS_ERR(r->folio)) folio_put(r->folio); @@ -287,18 +298,8 @@ static int get_build_id_64(struct freader *r, unsigned char *build_id, __u32 *si /* enough for Elf64_Ehdr, Elf64_Phdr, and all the smaller requests */ #define MAX_FREADER_BUF_SZ 64 -/* - * Parse build ID of ELF file mapped to vma - * @vma: vma object - * @build_id: buffer to store build id, at least BUILD_ID_SIZE long - * @size: returns actual build id size in case of success - * - * Assumes no page fault can be taken, so if relevant portions of ELF file are - * not already paged in, fetching of build ID fails. - * - * Return: 0 on success; negative error, otherwise - */ -int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size) +static int __build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, + __u32 *size, bool may_fault) { const Elf32_Ehdr *ehdr; struct freader r; @@ -309,7 +310,7 @@ int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, if (!vma->vm_file) return -EINVAL; - freader_init_from_file(&r, buf, sizeof(buf), vma->vm_file->f_mapping); + freader_init_from_file(&r, buf, sizeof(buf), vma->vm_file, may_fault); /* fetch first 18 bytes of ELF header for checks */ ehdr = freader_fetch(&r, 0, offsetofend(Elf32_Ehdr, e_type)); @@ -337,6 +338,22 @@ int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, return ret; } +/* + * Parse build ID of ELF file mapped to vma + * @vma: vma object + * @build_id: buffer to store build id, at least BUILD_ID_SIZE long + * @size: returns actual build id size in case of success + * + * Assumes no page fault can be taken, so if relevant portions of ELF file are + * not already paged in, fetching of build ID fails. + * + * Return: 0 on success; negative error, otherwise + */ +int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size) +{ + return __build_id_parse(vma, build_id, size, false /* !may_fault */); +} + /* * Parse build ID of ELF file mapped to VMA * @vma: vma object @@ -350,8 +367,7 @@ int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, */ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size) { - /* fallback to non-faultable version for now */ - return build_id_parse_nofault(vma, build_id, size); + return __build_id_parse(vma, build_id, size, true /* may_fault */); } /** -- Gitee From dac00ccb4579162a9b7d38aca7b70b23cc02ed90 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 29 Aug 2024 10:42:29 -0700 Subject: [PATCH 1657/2138] lib/buildid: don't limit .note.gnu.build-id to the first page in ELF ANBZ: #11800 commit cdbb44f9a74fe7d01090ae492672e89cf7d83ce5 upstream. With freader we don't need to restrict ourselves to a single page, so let's allow ELF notes to be at any valid position with the file. We also merge parse_build_id() and parse_build_id_buf() as now the only difference between them is note offset overflow, which makes sense to check in all situations. Reviewed-by: Eduard Zingerman Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20240829174232.3133883-8-andrii@kernel.org Signed-off-by: Alexei Starovoitov Signed-off-by: Kun(llfl) Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4097 --- lib/buildid.c | 26 +++++--------------------- 1 file changed, 5 insertions(+), 21 deletions(-) diff --git a/lib/buildid.c b/lib/buildid.c index d680165fc1a1..4bc3380145ea 100644 --- a/lib/buildid.c +++ b/lib/buildid.c @@ -155,9 +155,8 @@ static void freader_cleanup(struct freader *r) * 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are * identical. */ -static int parse_build_id_buf(struct freader *r, - unsigned char *build_id, __u32 *size, - loff_t note_off, Elf32_Word note_size) +static int parse_build_id(struct freader *r, unsigned char *build_id, __u32 *size, + loff_t note_off, Elf32_Word note_size) { const char note_name[] = "GNU"; const size_t note_name_sz = sizeof(note_name); @@ -165,7 +164,9 @@ static int parse_build_id_buf(struct freader *r, const Elf32_Nhdr *nhdr; const char *data; - note_end = note_off + note_size; + if (check_add_overflow(note_off, note_size, ¬e_end)) + return -EINVAL; + while (note_end - note_off > sizeof(Elf32_Nhdr) + note_name_sz) { nhdr = freader_fetch(r, note_off, sizeof(Elf32_Nhdr) + note_name_sz); if (!nhdr) @@ -204,23 +205,6 @@ static int parse_build_id_buf(struct freader *r, return -EINVAL; } -static inline int parse_build_id(struct freader *r, - unsigned char *build_id, - __u32 *size, - loff_t note_start_off, - Elf32_Word note_size) -{ - /* check for overflow */ - if (note_start_off + note_size < note_start_off) - return -EINVAL; - - /* only supports note that fits in the first page */ - if (note_start_off + note_size > PAGE_SIZE) - return -EINVAL; - - return parse_build_id_buf(r, build_id, size, note_start_off, note_size); -} - /* Parse build ID from 32-bit ELF */ static int get_build_id_32(struct freader *r, unsigned char *build_id, __u32 *size) { -- Gitee From 1aae191896d8c2ac1d90a845d0f8ee45c1173d15 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 17 Oct 2024 10:47:13 -0700 Subject: [PATCH 1658/2138] lib/buildid: Handle memfd_secret() files in build_id_parse() ANBZ: #11800 commit 5ac9b4e935dfc6af41eee2ddc21deb5c36507a9f upstream. >From memfd_secret(2) manpage: The memory areas backing the file created with memfd_secret(2) are visible only to the processes that have access to the file descriptor. The memory region is removed from the kernel page tables and only the page tables of the processes holding the file descriptor map the corresponding physical memory. (Thus, the pages in the region can't be accessed by the kernel itself, so that, for example, pointers to the region can't be passed to system calls.) We need to handle this special case gracefully in build ID fetching code. Return -EFAULT whenever secretmem file is passed to build_id_parse() family of APIs. Original report and repro can be found in [0]. [0] https://lore.kernel.org/bpf/ZwyG8Uro%2FSyTXAni@ly-workstation/ Fixes: de3ec364c3c3 ("lib/buildid: add single folio-based file reader abstraction") Reported-by: Yi Lai Suggested-by: Shakeel Butt Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Shakeel Butt Link: https://lore.kernel.org/bpf/20241017175431.6183-A-hca@linux.ibm.com Link: https://lore.kernel.org/bpf/20241017174713.2157873-1-andrii@kernel.org Signed-off-by: Kun(llfl) Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4097 --- lib/buildid.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/buildid.c b/lib/buildid.c index 4bc3380145ea..853b8388df80 100644 --- a/lib/buildid.c +++ b/lib/buildid.c @@ -5,6 +5,7 @@ #include #include #include +#include #define BUILD_ID 3 @@ -64,6 +65,10 @@ static int freader_get_folio(struct freader *r, loff_t file_off) freader_put_folio(r); + /* reject secretmem folios created with memfd_secret() */ + if (secretmem_mapping(r->file->f_mapping)) + return -EFAULT; + r->folio = filemap_get_folio(r->file->f_mapping, file_off >> PAGE_SHIFT); /* if sleeping is allowed, wait for the page, if necessary */ -- Gitee From f9df3fe1d5b73125609c6b75f7767feaf4681565 Mon Sep 17 00:00:00 2001 From: Juxin Gao Date: Tue, 12 Nov 2024 09:45:29 +0800 Subject: [PATCH 1659/2138] anolis: loongarch: Fix build fail when make dist-rpm ANBZ: #11807 Signed-off-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4099 --- arch/loongarch/configs/anolis_defconfig | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/loongarch/configs/anolis_defconfig b/arch/loongarch/configs/anolis_defconfig index fc088eacce5b..e10a12e5bc56 100644 --- a/arch/loongarch/configs/anolis_defconfig +++ b/arch/loongarch/configs/anolis_defconfig @@ -1,3 +1,7 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/loongarch 6.6.52 Kernel Configuration +# # CONFIG_LOCALVERSION_AUTO is not set CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -- Gitee From 169e116393692aa6052b1b46f0db1d2bd267b081 Mon Sep 17 00:00:00 2001 From: Hongchen Zhang Date: Mon, 4 Nov 2024 15:10:04 +0800 Subject: [PATCH 1660/2138] anolis: spec: use cgroup v1 for LoongArch platform ANBZ: #11574 There are some problems when use cgroup v2, so change to use cgroup v1. Signed-off-by: Hongchen Zhang Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/4083 --- anolis/rpm/kernel.spec.template | 2 ++ 1 file changed, 2 insertions(+) diff --git a/anolis/rpm/kernel.spec.template b/anolis/rpm/kernel.spec.template index f8eb18ee9952..453a38931a53 100644 --- a/anolis/rpm/kernel.spec.template +++ b/anolis/rpm/kernel.spec.template @@ -1375,6 +1375,8 @@ fi\ /bin/kernel-install add %{KVERREL}%{?1:+%{1}} /lib/modules/%{KVERREL}%{?1:+%{1}}/vmlinuz || exit $?\ %ifarch aarch64 \ grubby --update-kernel /boot/vmlinuz-%{KVERREL}%{?1:+%{1}} --args="cgroup.memory=nokmem crashkernel=0M-2G:0M,2G-64G:256M,64G-:384M iommu.passthrough=1 iommu.strict=0 nospectre_bhb ssbd=force-off"\ +%elifarch loongarch64 \ +grubby --update-kernel /boot/vmlinuz-%{KVERREL}%{?1:+%{1}} --args="systemd.unified_cgroup_hierarchy=0 cgroup.memory=nokmem crashkernel=0M-2G:0M,2G-8G:192M,8G-:256M"\ %else \ grubby --update-kernel /boot/vmlinuz-%{KVERREL}%{?1:+%{1}} --args="cgroup.memory=nokmem crashkernel=0M-2G:0M,2G-8G:192M,8G-:256M"\ %endif \ -- Gitee From 986d5dbf1b3b25d61f7842860619b460a1fc908b Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Fri, 8 Nov 2024 10:37:49 +0800 Subject: [PATCH 1661/2138] anolis: configs: refresh kconfigs ANBZ: #11822 No Functional Change. Refresh kconfigs by follow command: > make -C anolis/ dist-configs-update Signed-off-by: Qiao Ma Reviewed-by: Qinyun Tan Link: https://gitee.com/anolis/cloud-kernel/pulls/4102 --- .../arm64/CONFIG_UNWINDER_FRAME_POINTER | 0 anolis/configs/L0-MANDATORY/default/CONFIG_CPU_MITIGATIONS | 1 + anolis/configs/L0-MANDATORY/{x86 => default}/CONFIG_LIVEPATCH | 0 .../configs/L0-MANDATORY/{arm64 => default}/CONFIG_PCI_PF_STUB | 0 anolis/configs/L0-MANDATORY/{x86 => default}/CONFIG_UNWINDER_ORC | 0 anolis/configs/L0-MANDATORY/x86/CONFIG_PCI_PF_STUB | 1 - anolis/configs/L0-MANDATORY/x86/CONFIG_SPECULATION_MITIGATIONS | 1 - anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_CONTPTE | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_3194386 | 1 + .../L1-RECOMMEND/arm64/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET | 1 - anolis/configs/L1-RECOMMEND/arm64/CONFIG_LIVEPATCH | 1 - anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET | 1 - anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_ORC | 1 - anolis/configs/L1-RECOMMEND/default/CONFIG_AHCI_ZHAOXIN_SGPIO | 1 + .../configs/L1-RECOMMEND/{arm64 => default}/CONFIG_GENERIC_PHY | 0 .../L1-RECOMMEND/{arm64 => default}/CONFIG_HAVE_LIVEPATCH | 0 .../{arm64 => default}/CONFIG_HAVE_RELIABLE_STACKTRACE | 0 .../L1-RECOMMEND/{arm64 => default}/CONFIG_HAVE_STACK_VALIDATION | 0 anolis/configs/L1-RECOMMEND/{arm64 => default}/CONFIG_OBJTOOL | 0 anolis/configs/L1-RECOMMEND/default/CONFIG_PCP_BATCH_SCALE_MAX | 1 + .../L1-RECOMMEND/default/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT | 1 - .../configs/L1-RECOMMEND/{x86 => default}/CONFIG_TEST_LIVEPATCH | 0 anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_PHY | 1 - .../x86}/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET | 0 anolis/configs/L1-RECOMMEND/x86/CONFIG_MITIGATION_SPECTRE_BHI | 1 + .../L1-RECOMMEND/{default => x86}/CONFIG_RANDOMIZE_KSTACK_OFFSET | 0 .../{arm64 => x86}/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT | 0 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEST_LIVEPATCH | 1 - .../configs/L2-OPTIONAL/default/CONFIG_BCACHE_ASYNC_REGISTRATION | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_CLOSURES_DEBUG | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_DEBUG | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_SIG | 1 + .../{arm64 => default}/CONFIG_DRM_PANEL_ILITEK_ILI9341 | 0 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IOMEM_FOPS | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X3_DMA | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_SCREEN_INFO | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_DEBUG | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_TASKLET | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD9467 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_ADI_AXI_ADC | 1 + .../L2-OPTIONAL/x86/CONFIG_ARCH_CONFIGURES_CPU_MITIGATIONS | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_LIVEPATCH | 1 - anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RELIABLE_STACKTRACE | 1 - anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STACK_VALIDATION | 1 - anolis/configs/L2-OPTIONAL/x86/CONFIG_OBJTOOL | 1 - 45 files changed, 18 insertions(+), 13 deletions(-) rename anolis/configs/{L1-RECOMMEND => L0-MANDATORY}/arm64/CONFIG_UNWINDER_FRAME_POINTER (100%) create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CPU_MITIGATIONS rename anolis/configs/L0-MANDATORY/{x86 => default}/CONFIG_LIVEPATCH (100%) rename anolis/configs/L0-MANDATORY/{arm64 => default}/CONFIG_PCI_PF_STUB (100%) rename anolis/configs/L0-MANDATORY/{x86 => default}/CONFIG_UNWINDER_ORC (100%) delete mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_PCI_PF_STUB delete mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_SPECULATION_MITIGATIONS create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_CONTPTE create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_3194386 delete mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET delete mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_LIVEPATCH delete mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET delete mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_ORC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_AHCI_ZHAOXIN_SGPIO rename anolis/configs/L1-RECOMMEND/{arm64 => default}/CONFIG_GENERIC_PHY (100%) rename anolis/configs/L1-RECOMMEND/{arm64 => default}/CONFIG_HAVE_LIVEPATCH (100%) rename anolis/configs/L1-RECOMMEND/{arm64 => default}/CONFIG_HAVE_RELIABLE_STACKTRACE (100%) rename anolis/configs/L1-RECOMMEND/{arm64 => default}/CONFIG_HAVE_STACK_VALIDATION (100%) rename anolis/configs/L1-RECOMMEND/{arm64 => default}/CONFIG_OBJTOOL (100%) create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PCP_BATCH_SCALE_MAX delete mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT rename anolis/configs/L1-RECOMMEND/{x86 => default}/CONFIG_TEST_LIVEPATCH (100%) delete mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_PHY rename anolis/configs/{L2-OPTIONAL/default => L1-RECOMMEND/x86}/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET (100%) create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_MITIGATION_SPECTRE_BHI rename anolis/configs/L1-RECOMMEND/{default => x86}/CONFIG_RANDOMIZE_KSTACK_OFFSET (100%) rename anolis/configs/L1-RECOMMEND/{arm64 => x86}/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT (100%) delete mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEST_LIVEPATCH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_ASYNC_REGISTRATION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_CLOSURES_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_SIG rename anolis/configs/L2-OPTIONAL/{arm64 => default}/CONFIG_DRM_PANEL_ILITEK_ILI9341 (100%) create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IOMEM_FOPS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X3_DMA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCREEN_INFO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_TASKLET create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD9467 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADI_AXI_ADC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_CONFIGURES_CPU_MITIGATIONS delete mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_LIVEPATCH delete mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RELIABLE_STACKTRACE delete mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STACK_VALIDATION delete mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_OBJTOOL diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_FRAME_POINTER b/anolis/configs/L0-MANDATORY/arm64/CONFIG_UNWINDER_FRAME_POINTER similarity index 100% rename from anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_FRAME_POINTER rename to anolis/configs/L0-MANDATORY/arm64/CONFIG_UNWINDER_FRAME_POINTER diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CPU_MITIGATIONS b/anolis/configs/L0-MANDATORY/default/CONFIG_CPU_MITIGATIONS new file mode 100644 index 000000000000..3d6f96778a81 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CPU_MITIGATIONS @@ -0,0 +1 @@ +CONFIG_CPU_MITIGATIONS=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_LIVEPATCH b/anolis/configs/L0-MANDATORY/default/CONFIG_LIVEPATCH similarity index 100% rename from anolis/configs/L0-MANDATORY/x86/CONFIG_LIVEPATCH rename to anolis/configs/L0-MANDATORY/default/CONFIG_LIVEPATCH diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_PCI_PF_STUB b/anolis/configs/L0-MANDATORY/default/CONFIG_PCI_PF_STUB similarity index 100% rename from anolis/configs/L0-MANDATORY/arm64/CONFIG_PCI_PF_STUB rename to anolis/configs/L0-MANDATORY/default/CONFIG_PCI_PF_STUB diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_UNWINDER_ORC b/anolis/configs/L0-MANDATORY/default/CONFIG_UNWINDER_ORC similarity index 100% rename from anolis/configs/L0-MANDATORY/x86/CONFIG_UNWINDER_ORC rename to anolis/configs/L0-MANDATORY/default/CONFIG_UNWINDER_ORC diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_PCI_PF_STUB b/anolis/configs/L0-MANDATORY/x86/CONFIG_PCI_PF_STUB deleted file mode 100644 index 46eee76194b0..000000000000 --- a/anolis/configs/L0-MANDATORY/x86/CONFIG_PCI_PF_STUB +++ /dev/null @@ -1 +0,0 @@ -CONFIG_PCI_PF_STUB=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_SPECULATION_MITIGATIONS b/anolis/configs/L0-MANDATORY/x86/CONFIG_SPECULATION_MITIGATIONS deleted file mode 100644 index 37f78a6f2368..000000000000 --- a/anolis/configs/L0-MANDATORY/x86/CONFIG_SPECULATION_MITIGATIONS +++ /dev/null @@ -1 +0,0 @@ -CONFIG_SPECULATION_MITIGATIONS=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_CONTPTE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_CONTPTE new file mode 100644 index 000000000000..23a09e20f027 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_CONTPTE @@ -0,0 +1 @@ +CONFIG_ARM64_CONTPTE=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_3194386 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_3194386 new file mode 100644 index 000000000000..f6f6f286638d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_3194386 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_3194386=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET deleted file mode 100644 index c7daa4f60d5d..000000000000 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_LIVEPATCH b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_LIVEPATCH deleted file mode 100644 index 1b05d0d1a109..000000000000 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_LIVEPATCH +++ /dev/null @@ -1 +0,0 @@ -CONFIG_LIVEPATCH=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET deleted file mode 100644 index 759cb13e424c..000000000000 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_RANDOMIZE_KSTACK_OFFSET is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_ORC b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_ORC deleted file mode 100644 index 6b6908419acb..000000000000 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_ORC +++ /dev/null @@ -1 +0,0 @@ -CONFIG_UNWINDER_ORC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_AHCI_ZHAOXIN_SGPIO b/anolis/configs/L1-RECOMMEND/default/CONFIG_AHCI_ZHAOXIN_SGPIO new file mode 100644 index 000000000000..6a7ffe559b94 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_AHCI_ZHAOXIN_SGPIO @@ -0,0 +1 @@ +CONFIG_AHCI_ZHAOXIN_SGPIO=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GENERIC_PHY b/anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_PHY similarity index 100% rename from anolis/configs/L1-RECOMMEND/arm64/CONFIG_GENERIC_PHY rename to anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_PHY diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_LIVEPATCH b/anolis/configs/L1-RECOMMEND/default/CONFIG_HAVE_LIVEPATCH similarity index 100% rename from anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_LIVEPATCH rename to anolis/configs/L1-RECOMMEND/default/CONFIG_HAVE_LIVEPATCH diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_RELIABLE_STACKTRACE b/anolis/configs/L1-RECOMMEND/default/CONFIG_HAVE_RELIABLE_STACKTRACE similarity index 100% rename from anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_RELIABLE_STACKTRACE rename to anolis/configs/L1-RECOMMEND/default/CONFIG_HAVE_RELIABLE_STACKTRACE diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_STACK_VALIDATION b/anolis/configs/L1-RECOMMEND/default/CONFIG_HAVE_STACK_VALIDATION similarity index 100% rename from anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_STACK_VALIDATION rename to anolis/configs/L1-RECOMMEND/default/CONFIG_HAVE_STACK_VALIDATION diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_OBJTOOL b/anolis/configs/L1-RECOMMEND/default/CONFIG_OBJTOOL similarity index 100% rename from anolis/configs/L1-RECOMMEND/arm64/CONFIG_OBJTOOL rename to anolis/configs/L1-RECOMMEND/default/CONFIG_OBJTOOL diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PCP_BATCH_SCALE_MAX b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCP_BATCH_SCALE_MAX new file mode 100644 index 000000000000..8c42e3567daa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCP_BATCH_SCALE_MAX @@ -0,0 +1 @@ +CONFIG_PCP_BATCH_SCALE_MAX=5 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT b/anolis/configs/L1-RECOMMEND/default/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT deleted file mode 100644 index d680659c1703..000000000000 --- a/anolis/configs/L1-RECOMMEND/default/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_TEST_LIVEPATCH b/anolis/configs/L1-RECOMMEND/default/CONFIG_TEST_LIVEPATCH similarity index 100% rename from anolis/configs/L1-RECOMMEND/x86/CONFIG_TEST_LIVEPATCH rename to anolis/configs/L1-RECOMMEND/default/CONFIG_TEST_LIVEPATCH diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_PHY b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_PHY deleted file mode 100644 index 582e87c3b9f5..000000000000 --- a/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_PHY +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_GENERIC_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET similarity index 100% rename from anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET rename to anolis/configs/L1-RECOMMEND/x86/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MITIGATION_SPECTRE_BHI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MITIGATION_SPECTRE_BHI new file mode 100644 index 000000000000..71b428227384 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MITIGATION_SPECTRE_BHI @@ -0,0 +1 @@ +CONFIG_MITIGATION_SPECTRE_BHI=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RANDOMIZE_KSTACK_OFFSET b/anolis/configs/L1-RECOMMEND/x86/CONFIG_RANDOMIZE_KSTACK_OFFSET similarity index 100% rename from anolis/configs/L1-RECOMMEND/default/CONFIG_RANDOMIZE_KSTACK_OFFSET rename to anolis/configs/L1-RECOMMEND/x86/CONFIG_RANDOMIZE_KSTACK_OFFSET diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT similarity index 100% rename from anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT rename to anolis/configs/L1-RECOMMEND/x86/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEST_LIVEPATCH b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEST_LIVEPATCH deleted file mode 100644 index 0dd7700464a8..000000000000 --- a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEST_LIVEPATCH +++ /dev/null @@ -1 +0,0 @@ -CONFIG_TEST_LIVEPATCH=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_ASYNC_REGISTRATION b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_ASYNC_REGISTRATION new file mode 100644 index 000000000000..d966c9744d3d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_ASYNC_REGISTRATION @@ -0,0 +1 @@ +# CONFIG_BCACHE_ASYNC_REGISTRATION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_CLOSURES_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_CLOSURES_DEBUG new file mode 100644 index 000000000000..eb3f1af90e6c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_CLOSURES_DEBUG @@ -0,0 +1 @@ +# CONFIG_BCACHE_CLOSURES_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_DEBUG new file mode 100644 index 000000000000..36426027b2d1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_DEBUG @@ -0,0 +1 @@ +# CONFIG_BCACHE_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_SIG b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_SIG new file mode 100644 index 000000000000..01ee2034fbdc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_SIG @@ -0,0 +1 @@ +CONFIG_CRYPTO_SIG=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_ILITEK_ILI9341 b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_ILITEK_ILI9341 similarity index 100% rename from anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_ILITEK_ILI9341 rename to anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_ILITEK_ILI9341 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IOMEM_FOPS b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IOMEM_FOPS new file mode 100644 index 000000000000..485cf9b71de5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IOMEM_FOPS @@ -0,0 +1 @@ +CONFIG_FB_IOMEM_FOPS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X3_DMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X3_DMA new file mode 100644 index 000000000000..723cb8bb73ac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X3_DMA @@ -0,0 +1 @@ +# CONFIG_PATA_HPT3X3_DMA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCREEN_INFO b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCREEN_INFO new file mode 100644 index 000000000000..e5a12d9c60db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCREEN_INFO @@ -0,0 +1 @@ +CONFIG_SCREEN_INFO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_DEBUG new file mode 100644 index 000000000000..aa295ebbb545 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_DEBUG @@ -0,0 +1 @@ +CONFIG_SCSI_MVSAS_DEBUG=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_TASKLET b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_TASKLET new file mode 100644 index 000000000000..028f7d8e3d25 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_TASKLET @@ -0,0 +1 @@ +# CONFIG_SCSI_MVSAS_TASKLET is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD9467 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD9467 new file mode 100644 index 000000000000..421ac1f25eec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD9467 @@ -0,0 +1 @@ +# CONFIG_AD9467 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADI_AXI_ADC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADI_AXI_ADC new file mode 100644 index 000000000000..e98b407ac85f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADI_AXI_ADC @@ -0,0 +1 @@ +# CONFIG_ADI_AXI_ADC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_CONFIGURES_CPU_MITIGATIONS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_CONFIGURES_CPU_MITIGATIONS new file mode 100644 index 000000000000..a7a95432397c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_CONFIGURES_CPU_MITIGATIONS @@ -0,0 +1 @@ +CONFIG_ARCH_CONFIGURES_CPU_MITIGATIONS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_LIVEPATCH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_LIVEPATCH deleted file mode 100644 index 7ebdb924703e..000000000000 --- a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_LIVEPATCH +++ /dev/null @@ -1 +0,0 @@ -CONFIG_HAVE_LIVEPATCH=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RELIABLE_STACKTRACE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RELIABLE_STACKTRACE deleted file mode 100644 index 2ce8faabc4cf..000000000000 --- a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RELIABLE_STACKTRACE +++ /dev/null @@ -1 +0,0 @@ -CONFIG_HAVE_RELIABLE_STACKTRACE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STACK_VALIDATION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STACK_VALIDATION deleted file mode 100644 index 6f36a32d84ae..000000000000 --- a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STACK_VALIDATION +++ /dev/null @@ -1 +0,0 @@ -CONFIG_HAVE_STACK_VALIDATION=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_OBJTOOL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_OBJTOOL deleted file mode 100644 index cf3a9f20f93d..000000000000 --- a/anolis/configs/L2-OPTIONAL/x86/CONFIG_OBJTOOL +++ /dev/null @@ -1 +0,0 @@ -CONFIG_OBJTOOL=y -- Gitee From b96d33c7ca1e2d941eaab483ef114790beeb33e0 Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Wed, 20 Mar 2024 20:18:35 +0800 Subject: [PATCH 1662/2138] anolis: spec: extract cmdline from rpm spec ANBZ: #11862 Extract cmdline from rpm spec to anolis/cmdline/ directory for 2 reasons: 1. highlighting cmdline to make it easier for developers to find out 2. developers need to modify 3 positions due to historical reasons, but some positions may be missed. Extract it to a file could avoid such problem. Signed-off-by: Qinyun Tan Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/4119 --- anolis/cmdline/arm64 | 4 ++++ anolis/cmdline/loongarch64 | 3 +++ anolis/cmdline/x86 | 2 ++ anolis/genspec.sh | 19 ++++++++++++++++++- anolis/rpm/kernel.spec.template | 6 +++--- 5 files changed, 30 insertions(+), 4 deletions(-) create mode 100644 anolis/cmdline/arm64 create mode 100644 anolis/cmdline/loongarch64 create mode 100644 anolis/cmdline/x86 diff --git a/anolis/cmdline/arm64 b/anolis/cmdline/arm64 new file mode 100644 index 000000000000..0afaaf2e3a68 --- /dev/null +++ b/anolis/cmdline/arm64 @@ -0,0 +1,4 @@ +cgroup.memory=nokmem +crashkernel=0M-2G:0M,2G-64G:256M,64G-:384M +iommu.passthrough=1 iommu.strict=0 +nospectre_bhb ssbd=force-off diff --git a/anolis/cmdline/loongarch64 b/anolis/cmdline/loongarch64 new file mode 100644 index 000000000000..1125741dc591 --- /dev/null +++ b/anolis/cmdline/loongarch64 @@ -0,0 +1,3 @@ +systemd.unified_cgroup_hierarchy=0 +cgroup.memory=nokmem +crashkernel=0M-2G:0M,2G-8G:192M,8G-:256M diff --git a/anolis/cmdline/x86 b/anolis/cmdline/x86 new file mode 100644 index 000000000000..10623511d2c3 --- /dev/null +++ b/anolis/cmdline/x86 @@ -0,0 +1,2 @@ +cgroup.memory=nokmem +crashkernel=0M-2G:0M,2G-8G:192M,8G-:256M diff --git a/anolis/genspec.sh b/anolis/genspec.sh index cfd9f58c2a4e..96c84d74d216 100644 --- a/anolis/genspec.sh +++ b/anolis/genspec.sh @@ -13,4 +13,21 @@ done sed -i -e " s/%%DIST%%/$DIST/ s/%%DIST_KERNELVERSION%%/$DIST_KERNELVERSION/ - s/%%DIST_PKGRELEASEVERION%%/$DIST_PKGRELEASEVERION/" ${DIST_OUTPUT}/${DIST_SPEC_FILE} \ No newline at end of file + s/%%DIST_PKGRELEASEVERION%%/$DIST_PKGRELEASEVERION/" ${DIST_OUTPUT}/${DIST_SPEC_FILE} + +function generate_cmdline() { + local arch=$1 + local cmdline="" + for cmd in $(awk '!/^#/ && !/^[[:space:]]*$/' ${DIST_SOURCES}cmdline/${arch}) + do + cmdline="${cmdline} ${cmd}" + done + echo "${cmdline}" +} + +x86_cmdline=$(generate_cmdline x86) +arm_cmdline=$(generate_cmdline arm64) +loongarch_cmdline=$(generate_cmdline loongarch64) +sed -i -e "s/%%X86_CMDLINE%%/$x86_cmdline/" ${DIST_OUTPUT}/${DIST_SPEC_FILE} +sed -i -e "s/%%ARM_CMDLINE%%/$arm_cmdline/" ${DIST_OUTPUT}/${DIST_SPEC_FILE} +sed -i -e "s/%%LOONGARCH_CMDLINE%%/$loongarch_cmdline/" ${DIST_OUTPUT}/${DIST_SPEC_FILE} diff --git a/anolis/rpm/kernel.spec.template b/anolis/rpm/kernel.spec.template index 453a38931a53..ea0675613d0d 100644 --- a/anolis/rpm/kernel.spec.template +++ b/anolis/rpm/kernel.spec.template @@ -1374,11 +1374,11 @@ then\ fi\ /bin/kernel-install add %{KVERREL}%{?1:+%{1}} /lib/modules/%{KVERREL}%{?1:+%{1}}/vmlinuz || exit $?\ %ifarch aarch64 \ -grubby --update-kernel /boot/vmlinuz-%{KVERREL}%{?1:+%{1}} --args="cgroup.memory=nokmem crashkernel=0M-2G:0M,2G-64G:256M,64G-:384M iommu.passthrough=1 iommu.strict=0 nospectre_bhb ssbd=force-off"\ +grubby --update-kernel /boot/vmlinuz-%{KVERREL}%{?1:+%{1}} --args="%%ARM_CMDLINE%%"\ %elifarch loongarch64 \ -grubby --update-kernel /boot/vmlinuz-%{KVERREL}%{?1:+%{1}} --args="systemd.unified_cgroup_hierarchy=0 cgroup.memory=nokmem crashkernel=0M-2G:0M,2G-8G:192M,8G-:256M"\ +grubby --update-kernel /boot/vmlinuz-%{KVERREL}%{?1:+%{1}} --args="%%LOONGARCH_CMDLINE%%"\ %else \ -grubby --update-kernel /boot/vmlinuz-%{KVERREL}%{?1:+%{1}} --args="cgroup.memory=nokmem crashkernel=0M-2G:0M,2G-8G:192M,8G-:256M"\ +grubby --update-kernel /boot/vmlinuz-%{KVERREL}%{?1:+%{1}} --args="%%X86_CMDLINE%%"\ %endif \ %{nil} -- Gitee From 3b08edf5dbdc0e0051df3a20183f48ce19161483 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Wed, 13 Nov 2024 15:38:46 +0800 Subject: [PATCH 1663/2138] NFSD: Force all NFSv4.2 COPY requests to be synchronous ANBZ: #11801 commit 8d915bbf39266bb66082c1e4980e123883f19830 upstream. We've discovered that delivering a CB_OFFLOAD operation can be unreliable in some pretty unremarkable situations. Examples include: - The server dropped the connection because it lost a forechannel NFSv4 request and wishes to force the client to retransmit - The GSS sequence number window under-flowed - A network partition occurred When that happens, all pending callback operations, including CB_OFFLOAD, are lost. NFSD does not retransmit them. Moreover, the Linux NFS client does not yet support sending an OFFLOAD_STATUS operation to probe whether an asynchronous COPY operation has finished. Thus, on Linux NFS clients, when a CB_OFFLOAD is lost, asynchronous COPY can hang until manually interrupted. I've tried a couple of remedies, but so far the side-effects are worse than the disease and they have had to be reverted. So temporarily force COPY operations to be synchronous so that the use of CB_OFFLOAD is avoided entirely. This is a fix that can easily be backported to LTS kernels. I am working on client patches that introduce an implementation of OFFLOAD_STATUS. Note that NFSD arbitrarily limits the size of a copy_file_range to 4MB to avoid indefinitely blocking an nfsd thread. A short COPY result is returned in that case, and the client can present a fresh COPY request for the remainder. Signed-off-by: Chuck Lever Signed-off-by: Zhao Qiang Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4098 --- fs/nfsd/nfs4proc.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index b3eca08f15b1..4147f18dfe86 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -1794,6 +1794,13 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd42_write_res *result; __be32 status; + /* + * Currently, async COPY is not reliable. Force all COPY + * requests to be synchronous to avoid client application + * hangs waiting for COPY completion. + */ + nfsd4_copy_set_sync(copy, true); + result = ©->cp_res; nfsd_copy_write_verifier((__be32 *)&result->wr_verifier.data, nn); -- Gitee From 7181a66418113a391db8bc3d728fb9b584322611 Mon Sep 17 00:00:00 2001 From: Sidhartha Kumar Date: Tue, 26 Sep 2023 12:20:17 -0700 Subject: [PATCH 1664/2138] mm/filemap: remove hugetlb special casing in filemap.c ANBZ: #9728 commit a08c7193e4f18dc8508f2d07d0de2c5b94cb39a3 upstream Remove special cased hugetlb handling code within the page cache by changing the granularity of ->index to the base page size rather than the huge page size. The motivation of this patch is to reduce complexity within the filemap code while also increasing performance by removing branches that are evaluated on every page cache lookup. To support the change in index, new wrappers for hugetlb page cache interactions are added. These wrappers perform the conversion to a linear index which is now expected by the page cache for huge pages. ========================= PERFORMANCE ====================================== Perf was used to check the performance differences after the patch. Overall the performance is similar to mainline with a very small larger overhead that occurs in __filemap_add_folio() and hugetlb_add_to_page_cache(). This is because of the larger overhead that occurs in xa_load() and xa_store() as the xarray is now using more entries to store hugetlb folios in the page cache. Timing aarch64 2MB Page Size 6.5-rc3 + this patch: [root@sidhakum-ol9-1 hugepages]# time fallocate -l 700GB test.txt real 1m49.568s user 0m0.000s sys 1m49.461s 6.5-rc3: [root]# time fallocate -l 700GB test.txt real 1m47.495s user 0m0.000s sys 1m47.370s 1GB Page Size 6.5-rc3 + this patch: [root@sidhakum-ol9-1 hugepages1G]# time fallocate -l 700GB test.txt real 1m47.024s user 0m0.000s sys 1m46.921s 6.5-rc3: [root@sidhakum-ol9-1 hugepages1G]# time fallocate -l 700GB test.txt real 1m44.551s user 0m0.000s sys 1m44.438s x86 2MB Page Size 6.5-rc3 + this patch: [root@sidhakum-ol9-2 hugepages]# time fallocate -l 100GB test.txt real 0m22.383s user 0m0.000s sys 0m22.255s 6.5-rc3: [opc@sidhakum-ol9-2 hugepages]$ time sudo fallocate -l 100GB /dev/hugepages/test.txt real 0m22.735s user 0m0.038s sys 0m22.567s 1GB Page Size 6.5-rc3 + this patch: [root@sidhakum-ol9-2 hugepages1GB]# time fallocate -l 100GB test.txt real 0m25.786s user 0m0.001s sys 0m25.589s 6.5-rc3: [root@sidhakum-ol9-2 hugepages1G]# time fallocate -l 100GB test.txt real 0m33.454s user 0m0.001s sys 0m33.193s aarch64: workload - fallocate a 700GB file backed by huge pages 6.5-rc3 + this patch: 2MB Page Size: --100.00%--__arm64_sys_fallocate ksys_fallocate vfs_fallocate hugetlbfs_fallocate | |--95.04%--__pi_clear_page | |--3.57%--clear_huge_page | | | |--2.63%--rcu_all_qs | | | --0.91%--__cond_resched | --0.67%--__cond_resched 0.17% 0.00% 0 fallocate [kernel.vmlinux] [k] hugetlb_add_to_page_cache 0.14% 0.10% 11 fallocate [kernel.vmlinux] [k] __filemap_add_folio 6.5-rc3 2MB Page Size: --100.00%--__arm64_sys_fallocate ksys_fallocate vfs_fallocate hugetlbfs_fallocate | |--94.91%--__pi_clear_page | |--4.11%--clear_huge_page | | | |--3.00%--rcu_all_qs | | | --1.10%--__cond_resched | --0.59%--__cond_resched 0.08% 0.01% 1 fallocate [kernel.kallsyms] [k] hugetlb_add_to_page_cache 0.05% 0.03% 3 fallocate [kernel.kallsyms] [k] __filemap_add_folio x86 workload - fallocate a 100GB file backed by huge pages 6.5-rc3 + this patch: 2MB Page Size: hugetlbfs_fallocate | --99.57%--clear_huge_page | --98.47%--clear_page_erms | --0.53%--asm_sysvec_apic_timer_interrupt 0.04% 0.04% 1 fallocate [kernel.kallsyms] [k] xa_load 0.04% 0.00% 0 fallocate [kernel.kallsyms] [k] hugetlb_add_to_page_cache 0.04% 0.00% 0 fallocate [kernel.kallsyms] [k] __filemap_add_folio 0.04% 0.00% 0 fallocate [kernel.kallsyms] [k] xas_store 6.5-rc3 2MB Page Size: --99.93%--__x64_sys_fallocate vfs_fallocate hugetlbfs_fallocate | --99.38%--clear_huge_page | |--98.40%--clear_page_erms | --0.59%--__cond_resched 0.03% 0.03% 1 fallocate [kernel.kallsyms] [k] __filemap_add_folio ========================= TESTING ====================================== This patch passes libhugetlbfs tests and LTP hugetlb tests ********** TEST SUMMARY * 2M * 32-bit 64-bit * Total testcases: 110 113 * Skipped: 0 0 * PASS: 107 113 * FAIL: 0 0 * Killed by signal: 3 0 * Bad configuration: 0 0 * Expected FAIL: 0 0 * Unexpected PASS: 0 0 * Test not present: 0 0 * Strange test result: 0 0 ********** Done executing testcases. LTP Version: 20220527-178-g2761a81c4 page migration was also tested using Mike Kravetz's test program.[8] [dan.carpenter@linaro.org: fix an NULL vs IS_ERR() bug] Link: https://lkml.kernel.org/r/1772c296-1417-486f-8eef-171af2192681@moroto.mountain Link: https://lkml.kernel.org/r/20230926192017.98183-1-sidhartha.kumar@oracle.com Signed-off-by: Sidhartha Kumar Signed-off-by: Dan Carpenter Reported-and-tested-by: syzbot+c225dea486da4d5592bd@syzkaller.appspotmail.com Closes: https://syzkaller.appspot.com/bug?extid=c225dea486da4d5592bd Cc: Matthew Wilcox (Oracle) Cc: Mike Kravetz Cc: Muchun Song Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4122 --- fs/hugetlbfs/inode.c | 37 +++++++++++++++++++------------------ include/linux/hugetlb.h | 12 ++++++++++++ include/linux/pagemap.h | 32 ++------------------------------ mm/filemap.c | 34 ++++++++++------------------------ mm/hugetlb.c | 32 ++++++-------------------------- mm/migrate.c | 6 +++--- 6 files changed, 52 insertions(+), 101 deletions(-) diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index ac519515ef6c..cd3500d0166e 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -345,7 +345,7 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) ssize_t retval = 0; while (iov_iter_count(to)) { - struct page *page; + struct folio *folio; size_t nr, copied, want; /* nr is the maximum number of bytes to copy from this page */ @@ -363,18 +363,18 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) } nr = nr - offset; - /* Find the page */ - page = find_lock_page(mapping, index); - if (unlikely(page == NULL)) { + /* Find the folio */ + folio = filemap_lock_hugetlb_folio(h, mapping, index); + if (IS_ERR(folio)) { /* * We have a HOLE, zero out the user-buffer for the * length of the hole or request. */ copied = iov_iter_zero(nr, to); } else { - unlock_page(page); + folio_unlock(folio); - if (!PageHWPoison(page)) + if (!folio_test_has_hwpoisoned(folio)) want = nr; else { /* @@ -382,19 +382,19 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) * touching the 1st raw HWPOISON subpage after * offset. */ - want = adjust_range_hwpoison(page, offset, nr); + want = adjust_range_hwpoison(&folio->page, offset, nr); if (want == 0) { - put_page(page); + folio_put(folio); retval = -EIO; break; } } /* - * We have the page, copy it to user space buffer. + * We have the folio, copy it to user space buffer. */ - copied = copy_page_to_iter(page, offset, want, to); - put_page(page); + copied = copy_folio_to_iter(folio, offset, want, to); + folio_put(folio); } offset += copied; retval += copied; @@ -672,21 +672,20 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, { struct hstate *h = hstate_inode(inode); struct address_space *mapping = &inode->i_data; - const pgoff_t start = lstart >> huge_page_shift(h); - const pgoff_t end = lend >> huge_page_shift(h); + const pgoff_t end = lend >> PAGE_SHIFT; struct folio_batch fbatch; pgoff_t next, index; int i, freed = 0; bool truncate_op = (lend == LLONG_MAX); folio_batch_init(&fbatch); - next = start; + next = lstart >> PAGE_SHIFT; while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) { for (i = 0; i < folio_batch_count(&fbatch); ++i) { struct folio *folio = fbatch.folios[i]; u32 hash = 0; - index = folio->index; + index = folio->index >> huge_page_order(h); hash = hugetlb_fault_mutex_hash(mapping, index); mutex_lock(&hugetlb_fault_mutex_table[hash]); @@ -704,7 +703,9 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, } if (truncate_op) - (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed); + (void)hugetlb_unreserve_pages(inode, + lstart >> huge_page_shift(h), + LONG_MAX, freed); } static void hugetlbfs_evict_inode(struct inode *inode) @@ -752,7 +753,7 @@ static void hugetlbfs_zero_partial_page(struct hstate *h, pgoff_t idx = start >> huge_page_shift(h); struct folio *folio; - folio = filemap_lock_folio(mapping, idx); + folio = filemap_lock_hugetlb_folio(h, mapping, idx); if (IS_ERR(folio)) return; @@ -897,7 +898,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, mutex_lock(&hugetlb_fault_mutex_table[hash]); /* See if already present in mapping to avoid alloc/free */ - folio = filemap_get_folio(mapping, index); + folio = filemap_get_folio(mapping, index << huge_page_order(h)); if (!IS_ERR(folio)) { folio_put(folio); mutex_unlock(&hugetlb_fault_mutex_table[hash]); diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 0c50c4fceb95..6f811747d3ee 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -845,6 +845,12 @@ static inline unsigned int blocks_per_huge_page(struct hstate *h) return huge_page_size(h) / 512; } +static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h, + struct address_space *mapping, pgoff_t idx) +{ + return filemap_lock_folio(mapping, idx << huge_page_order(h)); +} + #include #ifndef is_hugepage_only_range @@ -1041,6 +1047,12 @@ static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio return NULL; } +static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h, + struct address_space *mapping, pgoff_t idx) +{ + return NULL; +} + static inline int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) { diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index c50f811fbf4a..33edd591ce1a 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -823,9 +823,6 @@ static inline pgoff_t folio_next_index(struct folio *folio) */ static inline struct page *folio_file_page(struct folio *folio, pgoff_t index) { - /* HugeTLBfs indexes the page cache in units of hpage_size */ - if (folio_test_hugetlb(folio)) - return &folio->page; return folio_page(folio, index & (folio_nr_pages(folio) - 1)); } @@ -841,9 +838,6 @@ static inline struct page *folio_file_page(struct folio *folio, pgoff_t index) */ static inline bool folio_contains(struct folio *folio, pgoff_t index) { - /* HugeTLBfs indexes the page cache in units of hpage_size */ - if (folio_test_hugetlb(folio)) - return folio->index == index; return index - folio_index(folio) < folio_nr_pages(folio); } @@ -901,10 +895,9 @@ static inline struct folio *read_mapping_folio(struct address_space *mapping, } /* - * Get index of the page within radix-tree (but not for hugetlb pages). - * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE) + * Get the offset in PAGE_SIZE (even for hugetlb pages). */ -static inline pgoff_t page_to_index(struct page *page) +static inline pgoff_t page_to_pgoff(struct page *page) { struct page *head; @@ -919,19 +912,6 @@ static inline pgoff_t page_to_index(struct page *page) return head->index + page - head; } -extern pgoff_t hugetlb_basepage_index(struct page *page); - -/* - * Get the offset in PAGE_SIZE (even for hugetlb pages). - * (TODO: hugetlb pages should have ->index in PAGE_SIZE) - */ -static inline pgoff_t page_to_pgoff(struct page *page) -{ - if (unlikely(PageHuge(page))) - return hugetlb_basepage_index(page); - return page_to_index(page); -} - /* * Return byte-offset into filesystem object for page. */ @@ -968,24 +948,16 @@ static inline loff_t folio_file_pos(struct folio *folio) /* * Get the offset in PAGE_SIZE (even for hugetlb folios). - * (TODO: hugetlb folios should have ->index in PAGE_SIZE) */ static inline pgoff_t folio_pgoff(struct folio *folio) { - if (unlikely(folio_test_hugetlb(folio))) - return hugetlb_basepage_index(&folio->page); return folio->index; } -extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, - unsigned long address); - static inline pgoff_t linear_page_index(struct vm_area_struct *vma, unsigned long address) { pgoff_t pgoff; - if (unlikely(is_vm_hugetlb_page(vma))) - return linear_hugepage_index(vma, address); pgoff = (address - vma->vm_start) >> PAGE_SHIFT; pgoff += vma->vm_pgoff; return pgoff; diff --git a/mm/filemap.c b/mm/filemap.c index 7e6ca5ebba6e..f0e9c9493410 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -131,11 +131,8 @@ static void page_cache_delete(struct address_space *mapping, mapping_set_update(&xas, mapping); - /* hugetlb pages are represented by a single entry in the xarray */ - if (!folio_test_hugetlb(folio)) { - xas_set_order(&xas, folio->index, folio_order(folio)); - nr = folio_nr_pages(folio); - } + xas_set_order(&xas, folio->index, folio_order(folio)); + nr = folio_nr_pages(folio); VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); @@ -234,7 +231,7 @@ void filemap_free_folio(struct address_space *mapping, struct folio *folio) if (free_folio) free_folio(folio); - if (folio_test_large(folio) && !folio_test_hugetlb(folio)) + if (folio_test_large(folio)) refs = folio_nr_pages(folio); folio_put_refs(folio, refs); } @@ -857,14 +854,15 @@ noinline int __filemap_add_folio(struct address_space *mapping, if (!huge) { int error = mem_cgroup_charge(folio, NULL, gfp); - VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio); if (error) return error; charged = true; - xas_set_order(&xas, index, folio_order(folio)); - nr = folio_nr_pages(folio); } + VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio); + xas_set_order(&xas, index, folio_order(folio)); + nr = folio_nr_pages(folio); + gfp &= GFP_RECLAIM_MASK; folio_ref_add(folio, nr); folio->mapping = mapping; @@ -2068,7 +2066,7 @@ unsigned find_get_entries(struct address_space *mapping, pgoff_t *start, int idx = folio_batch_count(fbatch) - 1; folio = fbatch->folios[idx]; - if (!xa_is_value(folio) && !folio_test_hugetlb(folio)) + if (!xa_is_value(folio)) nr = folio_nr_pages(folio); *start = indices[idx] + nr; } @@ -2132,7 +2130,7 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start, int idx = folio_batch_count(fbatch) - 1; folio = fbatch->folios[idx]; - if (!xa_is_value(folio) && !folio_test_hugetlb(folio)) + if (!xa_is_value(folio)) nr = folio_nr_pages(folio); *start = indices[idx] + nr; } @@ -2173,9 +2171,6 @@ unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start, continue; if (!folio_batch_add(fbatch, folio)) { unsigned long nr = folio_nr_pages(folio); - - if (folio_test_hugetlb(folio)) - nr = 1; *start = folio->index + nr; goto out; } @@ -2241,9 +2236,6 @@ unsigned filemap_get_folios_contig(struct address_space *mapping, if (!folio_batch_add(fbatch, folio)) { nr = folio_nr_pages(folio); - - if (folio_test_hugetlb(folio)) - nr = 1; *start = folio->index + nr; goto out; } @@ -2260,10 +2252,7 @@ unsigned filemap_get_folios_contig(struct address_space *mapping, if (nr) { folio = fbatch->folios[nr - 1]; - if (folio_test_hugetlb(folio)) - *start = folio->index + 1; - else - *start = folio_next_index(folio); + *start = folio->index + folio_nr_pages(folio); } out: rcu_read_unlock(); @@ -2301,9 +2290,6 @@ unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start, continue; if (!folio_batch_add(fbatch, folio)) { unsigned long nr = folio_nr_pages(folio); - - if (folio_test_hugetlb(folio)) - nr = 1; *start = folio->index + nr; goto out; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index c5a1db8db593..d55fd1203ccb 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -984,7 +984,7 @@ static long region_count(struct resv_map *resv, long f, long t) /* * Convert the address within this vma to the page offset within - * the mapping, in pagecache page units; huge pages here. + * the mapping, huge page units here. */ static pgoff_t vma_hugecache_offset(struct hstate *h, struct vm_area_struct *vma, unsigned long address) @@ -993,13 +993,6 @@ static pgoff_t vma_hugecache_offset(struct hstate *h, (vma->vm_pgoff >> huge_page_order(h)); } -pgoff_t linear_hugepage_index(struct vm_area_struct *vma, - unsigned long address) -{ - return vma_hugecache_offset(hstate_vma(vma), vma, address); -} -EXPORT_SYMBOL_GPL(linear_hugepage_index); - /** * vma_kernel_pagesize - Page size granularity for this VMA. * @vma: The user mapping. @@ -2093,20 +2086,6 @@ struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage) return NULL; } -pgoff_t hugetlb_basepage_index(struct page *page) -{ - struct page *page_head = compound_head(page); - pgoff_t index = page_index(page_head); - unsigned long compound_idx; - - if (compound_order(page_head) > MAX_ORDER) - compound_idx = page_to_pfn(page) - page_to_pfn(page_head); - else - compound_idx = page - page_head; - - return (index << compound_order(page_head)) + compound_idx; -} - static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry) @@ -5755,7 +5734,7 @@ static bool hugetlbfs_pagecache_present(struct hstate *h, struct vm_area_struct *vma, unsigned long address) { struct address_space *mapping = vma->vm_file->f_mapping; - pgoff_t idx = vma_hugecache_offset(h, vma, address); + pgoff_t idx = linear_page_index(vma, address); struct folio *folio; folio = filemap_get_folio(mapping, idx); @@ -5772,6 +5751,7 @@ int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping struct hstate *h = hstate_inode(inode); int err; + idx <<= huge_page_order(h); __folio_set_locked(folio); err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL); @@ -5879,7 +5859,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, * before we get page_table_lock. */ new_folio = false; - folio = filemap_lock_folio(mapping, idx); + folio = filemap_lock_hugetlb_folio(h, mapping, idx); if (IS_ERR(folio)) { size = i_size_read(mapping->host) >> huge_page_shift(h); if (idx >= size) @@ -6188,7 +6168,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, /* Just decrements count, does not deallocate */ vma_end_reservation(h, vma, haddr); - pagecache_folio = filemap_lock_folio(mapping, idx); + pagecache_folio = filemap_lock_hugetlb_folio(h, mapping, idx); if (IS_ERR(pagecache_folio)) pagecache_folio = NULL; } @@ -6321,7 +6301,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte, if (is_continue) { ret = -EFAULT; - folio = filemap_lock_folio(mapping, idx); + folio = filemap_lock_hugetlb_folio(h, mapping, idx); if (IS_ERR(folio)) goto out; folio_in_pagecache = true; diff --git a/mm/migrate.c b/mm/migrate.c index 6fa2da281aed..b341d0517e98 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -531,7 +531,7 @@ int migrate_huge_page_move_mapping(struct address_space *mapping, int expected_count; xas_lock_irq(&xas); - expected_count = 2 + folio_has_private(src); + expected_count = folio_expected_refs(mapping, src); if (!folio_ref_freeze(src, expected_count)) { xas_unlock_irq(&xas); return -EAGAIN; @@ -540,11 +540,11 @@ int migrate_huge_page_move_mapping(struct address_space *mapping, dst->index = src->index; dst->mapping = src->mapping; - folio_get(dst); + folio_ref_add(dst, folio_nr_pages(dst)); xas_store(&xas, dst); - folio_ref_unfreeze(src, expected_count - 1); + folio_ref_unfreeze(src, expected_count - folio_nr_pages(src)); xas_unlock_irq(&xas); -- Gitee From 4a55f1880c92a243c0fd6f6145c10ad96548a70b Mon Sep 17 00:00:00 2001 From: Sidhartha Kumar Date: Fri, 12 Jan 2024 10:08:40 -0800 Subject: [PATCH 1665/2138] fs/hugetlbfs/inode.c: mm/memory-failure.c: fix hugetlbfs hwpoison handling ANBZ: #9728 commit 19d3e221807772f8443e565234a6fdc5a2b09d26 upstream has_extra_refcount() makes the assumption that the page cache adds a ref count of 1 and subtracts this in the extra_pins case. Commit a08c7193e4f1 (mm/filemap: remove hugetlb special casing in filemap.c) modifies __filemap_add_folio() by calling folio_ref_add(folio, nr); for all cases (including hugtetlb) where nr is the number of pages in the folio. We should adjust the number of references coming from the page cache by subtracing the number of pages rather than 1. In hugetlbfs_read_iter(), folio_test_has_hwpoisoned() is testing the wrong flag as, in the hugetlb case, memory-failure code calls folio_test_set_hwpoison() to indicate poison. folio_test_hwpoison() is the correct function to test for that flag. After these fixes, the hugetlb hwpoison read selftest passes all cases. Link: https://lkml.kernel.org/r/20240112180840.367006-1-sidhartha.kumar@oracle.com Fixes: a08c7193e4f1 ("mm/filemap: remove hugetlb special casing in filemap.c") Signed-off-by: Sidhartha Kumar Closes: https://lore.kernel.org/linux-mm/20230713001833.3778937-1-jiaqiyan@google.com/T/#m8e1469119e5b831bbd05d495f96b842e4a1c5519 Reported-by: Muhammad Usama Anjum Tested-by: Muhammad Usama Anjum Acked-by: Miaohe Lin Acked-by: Muchun Song Cc: James Houghton Cc: Jiaqi Yan Cc: Matthew Wilcox (Oracle) Cc: Naoya Horiguchi Cc: [6.7+] Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4122 --- fs/hugetlbfs/inode.c | 2 +- mm/memory-failure.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index cd3500d0166e..36ac4e536bcb 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -374,7 +374,7 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) } else { folio_unlock(folio); - if (!folio_test_has_hwpoisoned(folio)) + if (!folio_test_hwpoison(folio)) want = nr; else { /* diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 75a94ca5fa5a..84a0b0cf7c92 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -989,7 +989,7 @@ static bool has_extra_refcount(struct page_state *ps, struct page *p, int count = page_count(p) - 1; if (extra_pins) - count -= 1; + count -= folio_nr_pages(page_folio(p)); if (count > 0) { pr_err("%#lx: %s still referenced by %d users\n", -- Gitee From 6c54326e178a276b88dae886d5d039f500f6750c Mon Sep 17 00:00:00 2001 From: Sidhartha Kumar Date: Mon, 4 Dec 2023 10:32:34 -0800 Subject: [PATCH 1666/2138] mm/hugetlb: have CONFIG_HUGETLB_PAGE select CONFIG_XARRAY_MULTI ANBZ: #9728 commit 4a3ef6be03e6700037fc20e63aa5ffd972e435ca upstream After commit a08c7193e4f1 "mm/filemap: remove hugetlb special casing in filemap.c", hugetlb pages are stored in the page cache in base page sized indexes. This leads to multi index stores in the xarray which is only supporting through CONFIG_XARRAY_MULTI. The other page cache user of multi index stores ,THP, selects XARRAY_MULTI. Have CONFIG_HUGETLB_PAGE follow this behavior as well to avoid the BUG() with a CONFIG_HUGETLB_PAGE && !CONFIG_XARRAY_MULTI config. Link: https://lkml.kernel.org/r/20231204183234.348697-1-sidhartha.kumar@oracle.com Fixes: a08c7193e4f1 ("mm/filemap: remove hugetlb special casing in filemap.c") Signed-off-by: Sidhartha Kumar Reported-by: Al Viro Cc: Mike Kravetz Cc: Muchun Song Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4122 --- fs/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/Kconfig b/fs/Kconfig index 1e3ed753b9fe..845e18c97ad5 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -267,6 +267,7 @@ config HUGETLBFS config HUGETLB_PAGE def_bool HUGETLBFS + select XARRAY_MULTI config HUGETLB_PAGE_OPTIMIZE_VMEMMAP def_bool HUGETLB_PAGE -- Gitee From 02a5cfa378285f417f9f92c76f5a0561d360af42 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 22 Jul 2024 13:43:17 +0800 Subject: [PATCH 1667/2138] mm: shmem: simplify the suitable huge orders validation for tmpfs ANBZ: #9728 commit 0bedf001e359368badbdefe722162ed4dd33e296 upstream Patch series "Some cleanups for shmem", v3. This series does some cleanups to reuse code, rename functions and simplify logic to make code more clear. No functional changes are expected. This patch (of 3): Move the suitable huge orders validation into shmem_suitable_orders() for tmpfs, which can reuse some code to simplify the logic. In addition, we don't have special handling for the error code -E2BIG when checking for conflicts with PMD sized THP in the pagecache for tmpfs, instead, it will just fallback to order-0 allocations like this patch does, so this simplification will not add functional changes. Link: https://lkml.kernel.org/r/cover.1721626645.git.baolin.wang@linux.alibaba.com Link: https://lkml.kernel.org/r/965985dd6d322929d78a0beee0dafa1c2a1b81e2.1721626645.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Reviewed-by: Ryan Roberts Acked-by: David Hildenbrand Cc: Barry Song <21cnbao@gmail.com> Cc: Hugh Dickins Cc: Lance Yang Cc: Matthew Wilcox (Oracle) Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4122 --- mm/shmem.c | 39 +++++++++++++++------------------------ 1 file changed, 15 insertions(+), 24 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index ba6f639170e6..08e36bb36226 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1694,20 +1694,30 @@ static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault struct address_space *mapping, pgoff_t index, unsigned long orders) { - struct vm_area_struct *vma = vmf->vma; + struct vm_area_struct *vma = vmf ? vmf->vma : NULL; pgoff_t aligned_index; unsigned long pages; int order; - orders = thp_vma_suitable_orders(vma, vmf->address, orders); - if (!orders) - return 0; + if (vma) { + orders = thp_vma_suitable_orders(vma, vmf->address, orders); + if (!orders) + return 0; + } /* Find the highest order that can add into the page cache */ order = highest_order(orders); while (orders) { pages = 1UL << order; aligned_index = round_down(index, pages); + /* + * Check for conflict before waiting on a huge allocation. + * Conflict might be that a huge page has just been allocated + * and added to page cache by a racing thread, or that there + * is already at least one small page in the huge extent. + * Be careful to retry when appropriate, but not forever! + * Elsewhere -EEXIST would be the right code, but not here. + */ if (!xa_find(&mapping->i_pages, &aligned_index, aligned_index + pages - 1, XA_PRESENT)) break; @@ -1744,7 +1754,6 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf, { struct address_space *mapping = inode->i_mapping; struct shmem_inode_info *info = SHMEM_I(inode); - struct vm_area_struct *vma = vmf ? vmf->vma : NULL; unsigned long suitable_orders = 0; struct folio *folio = NULL; long pages; @@ -1754,26 +1763,8 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf, orders = 0; if (orders > 0) { - if (vma && vma_is_anon_shmem(vma)) { - suitable_orders = shmem_suitable_orders(inode, vmf, + suitable_orders = shmem_suitable_orders(inode, vmf, mapping, index, orders); - } else if (orders & BIT(HPAGE_PMD_ORDER)) { - pages = HPAGE_PMD_NR; - suitable_orders = BIT(HPAGE_PMD_ORDER); - index = round_down(index, HPAGE_PMD_NR); - - /* - * Check for conflict before waiting on a huge allocation. - * Conflict might be that a huge page has just been allocated - * and added to page cache by a racing thread, or that there - * is already at least one small page in the huge extent. - * Be careful to retry when appropriate, but not forever! - * Elsewhere -EEXIST would be the right code, but not here. - */ - if (xa_find(&mapping->i_pages, &index, - index + HPAGE_PMD_NR - 1, XA_PRESENT)) - return ERR_PTR(-E2BIG); - } order = highest_order(suitable_orders); while (suitable_orders) { -- Gitee From 1ef58bad6efbe26fd36c10173b0cc9166428f4e2 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 22 Jul 2024 13:43:18 +0800 Subject: [PATCH 1668/2138] mm: shmem: rename shmem_is_huge() to shmem_huge_global_enabled() ANBZ: #9728 commit d58a2a581f132529eefac5377676011562b631b8 upstream shmem_is_huge() is now used to check if the top-level huge page is enabled, thus rename it to reflect its usage. Link: https://lkml.kernel.org/r/da53296e0ab6359aa083561d9dc01e4223d60fbe.1721626645.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Reviewed-by: Ryan Roberts Acked-by: David Hildenbrand Cc: Barry Song <21cnbao@gmail.com> Cc: Hugh Dickins Cc: Lance Yang Cc: Matthew Wilcox (Oracle) Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4122 --- include/linux/shmem_fs.h | 9 +++++---- mm/huge_memory.c | 5 +++-- mm/shmem.c | 15 ++++++++------- 3 files changed, 16 insertions(+), 13 deletions(-) diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 41aa4e0d6dbc..83a4fd53df8c 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -115,14 +115,15 @@ extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end); int shmem_unuse(unsigned int type); #ifdef CONFIG_TRANSPARENT_HUGEPAGE -extern bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force, - struct mm_struct *mm, unsigned long vm_flags); +extern bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, bool shmem_huge_force, + struct mm_struct *mm, unsigned long vm_flags); unsigned long shmem_allowable_huge_orders(struct inode *inode, struct vm_area_struct *vma, pgoff_t index, bool global_huge); #else -static __always_inline bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force, - struct mm_struct *mm, unsigned long vm_flags) +static __always_inline bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, + bool shmem_huge_force, struct mm_struct *mm, + unsigned long vm_flags) { return false; } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 2a8ae9f5522c..dca31c9b493a 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -141,8 +141,9 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, * own flags. */ if (!in_pf && shmem_file(vma->vm_file)) { - bool global_huge = shmem_is_huge(file_inode(vma->vm_file), vma->vm_pgoff, - !enforce_sysfs, vma->vm_mm, vm_flags); + bool global_huge = shmem_huge_global_enabled(file_inode(vma->vm_file), + vma->vm_pgoff, !enforce_sysfs, + vma->vm_mm, vm_flags); if (!vma_is_anon_shmem(vma)) return global_huge ? orders : 0; diff --git a/mm/shmem.c b/mm/shmem.c index 08e36bb36226..35c826f88e56 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -541,9 +541,9 @@ static bool shmem_confirm_swap(struct address_space *mapping, static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER; -static bool __shmem_is_huge(struct inode *inode, pgoff_t index, - bool shmem_huge_force, struct mm_struct *mm, - unsigned long vm_flags) +static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index, + bool shmem_huge_force, struct mm_struct *mm, + unsigned long vm_flags) { loff_t i_size; @@ -574,14 +574,15 @@ static bool __shmem_is_huge(struct inode *inode, pgoff_t index, } } -bool shmem_is_huge(struct inode *inode, pgoff_t index, +bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, bool shmem_huge_force, struct mm_struct *mm, unsigned long vm_flags) { if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER) return false; - return __shmem_is_huge(inode, index, shmem_huge_force, mm, vm_flags); + return __shmem_huge_global_enabled(inode, index, shmem_huge_force, + mm, vm_flags); } #if defined(CONFIG_SYSFS) @@ -1149,7 +1150,7 @@ static int shmem_getattr(struct mnt_idmap *idmap, STATX_ATTR_NODUMP); generic_fillattr(idmap, request_mask, inode, stat); - if (shmem_is_huge(inode, 0, false, NULL, 0)) + if (shmem_huge_global_enabled(inode, 0, false, NULL, 0)) stat->blksize = HPAGE_PMD_SIZE; if (request_mask & STATX_BTIME) { @@ -2155,7 +2156,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, return 0; } - huge = shmem_is_huge(inode, index, false, fault_mm, + huge = shmem_huge_global_enabled(inode, index, false, fault_mm, vma ? vma->vm_flags : 0); /* Find hugepage orders that are allowed for anonymous shmem. */ if (vma && vma_is_anon_shmem(vma)) -- Gitee From ac8496c0c969698c6890b99bc7de8ff6b3c4ef1b Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 22 Jul 2024 13:43:19 +0800 Subject: [PATCH 1669/2138] mm: shmem: move shmem_huge_global_enabled() into shmem_allowable_huge_orders() ANBZ: #9728 commit 6beeab870e70b2d4f49baf6c6be9da1b61c169f8 upstream Move shmem_huge_global_enabled() into shmem_allowable_huge_orders(), so that shmem_allowable_huge_orders() can also help to find the allowable huge orders for tmpfs. Moreover the shmem_huge_global_enabled() can become static. While we are at it, passing the vma instead of mm for shmem_huge_global_enabled() makes code cleaner. No functional changes. Link: https://lkml.kernel.org/r/8e825146bb29ee1a1c7bd64d2968ff3e19be7815.1721626645.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Reviewed-by: Ryan Roberts Acked-by: David Hildenbrand Cc: Barry Song <21cnbao@gmail.com> Cc: Hugh Dickins Cc: Lance Yang Cc: Matthew Wilcox (Oracle) Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4122 --- include/linux/shmem_fs.h | 12 ++-------- mm/huge_memory.c | 12 +++------- mm/shmem.c | 47 +++++++++++++++++++++++++--------------- 3 files changed, 35 insertions(+), 36 deletions(-) diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 83a4fd53df8c..57e8a6689439 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -115,21 +115,13 @@ extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end); int shmem_unuse(unsigned int type); #ifdef CONFIG_TRANSPARENT_HUGEPAGE -extern bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, bool shmem_huge_force, - struct mm_struct *mm, unsigned long vm_flags); unsigned long shmem_allowable_huge_orders(struct inode *inode, struct vm_area_struct *vma, pgoff_t index, - bool global_huge); + bool shmem_huge_force); #else -static __always_inline bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, - bool shmem_huge_force, struct mm_struct *mm, - unsigned long vm_flags) -{ - return false; -} static inline unsigned long shmem_allowable_huge_orders(struct inode *inode, struct vm_area_struct *vma, pgoff_t index, - bool global_huge) + bool shmem_huge_force) { return 0; } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index dca31c9b493a..836869205813 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -140,16 +140,10 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, * Must be done before hugepage flags check since shmem has its * own flags. */ - if (!in_pf && shmem_file(vma->vm_file)) { - bool global_huge = shmem_huge_global_enabled(file_inode(vma->vm_file), - vma->vm_pgoff, !enforce_sysfs, - vma->vm_mm, vm_flags); - - if (!vma_is_anon_shmem(vma)) - return global_huge ? orders : 0; + if (!in_pf && shmem_file(vma->vm_file)) return shmem_allowable_huge_orders(file_inode(vma->vm_file), - vma, vma->vm_pgoff, global_huge); - } + vma, vma->vm_pgoff, + !enforce_sysfs); if (!vma_is_anonymous(vma)) { /* diff --git a/mm/shmem.c b/mm/shmem.c index 35c826f88e56..fa08e99e96a7 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -542,9 +542,10 @@ static bool shmem_confirm_swap(struct address_space *mapping, static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER; static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index, - bool shmem_huge_force, struct mm_struct *mm, + bool shmem_huge_force, struct vm_area_struct *vma, unsigned long vm_flags) { + struct mm_struct *mm = vma ? vma->vm_mm : NULL; loff_t i_size; if (!S_ISREG(inode->i_mode)) @@ -574,15 +575,15 @@ static bool __shmem_huge_global_enabled(struct inode *inode, pgoff_t index, } } -bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, - bool shmem_huge_force, struct mm_struct *mm, +static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, + bool shmem_huge_force, struct vm_area_struct *vma, unsigned long vm_flags) { if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER) return false; return __shmem_huge_global_enabled(inode, index, shmem_huge_force, - mm, vm_flags); + vma, vm_flags); } #if defined(CONFIG_SYSFS) @@ -765,6 +766,13 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, { return 0; } + +static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, + bool shmem_huge_force, struct vm_area_struct *vma, + unsigned long vm_flags) +{ + return false; +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ /* @@ -1639,22 +1647,33 @@ static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp) #ifdef CONFIG_TRANSPARENT_HUGEPAGE unsigned long shmem_allowable_huge_orders(struct inode *inode, struct vm_area_struct *vma, pgoff_t index, - bool global_huge) + bool shmem_huge_force) { unsigned long mask = READ_ONCE(huge_shmem_orders_always); unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size); - unsigned long vm_flags = vma->vm_flags; + unsigned long vm_flags = vma ? vma->vm_flags : 0; + bool global_huge; loff_t i_size; int order; - if ((vm_flags & VM_NOHUGEPAGE) || - test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) + if (vma && ((vm_flags & VM_NOHUGEPAGE) || + test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))) return 0; /* If the hardware/firmware marked hugepage support disabled. */ if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED)) return 0; + global_huge = shmem_huge_global_enabled(inode, index, shmem_huge_force, + vma, vm_flags); + if (!vma || !vma_is_anon_shmem(vma)) { + /* + * For tmpfs, we now only support PMD sized THP if huge page + * is enabled, otherwise fallback to order 0. + */ + return global_huge ? BIT(HPAGE_PMD_ORDER) : 0; + } + /* * Following the 'deny' semantics of the top level, force the huge * option off from all mounts. @@ -2086,7 +2105,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, struct mm_struct *fault_mm; struct folio *folio; int error; - bool alloced, huge; + bool alloced; unsigned long orders = 0; if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) @@ -2156,14 +2175,8 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, return 0; } - huge = shmem_huge_global_enabled(inode, index, false, fault_mm, - vma ? vma->vm_flags : 0); - /* Find hugepage orders that are allowed for anonymous shmem. */ - if (vma && vma_is_anon_shmem(vma)) - orders = shmem_allowable_huge_orders(inode, vma, index, huge); - else if (huge) - orders = BIT(HPAGE_PMD_ORDER); - + /* Find hugepage orders that are allowed for anonymous shmem and tmpfs. */ + orders = shmem_allowable_huge_orders(inode, vma, index, false); if (orders > 0) { gfp_t huge_gfp; -- Gitee From 342cf1df3ea3e3e8a3b397e9e18a9873da7ab55f Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 12 Aug 2024 15:42:02 +0800 Subject: [PATCH 1670/2138] mm: swap: extend swap_shmem_alloc() to support batch SWAP_MAP_SHMEM flag setting ANBZ: #9728 commit 650180760be6bb448609d4d155eef3b728ace641 upstream Patch series "support large folio swap-out and swap-in for shmem", v5. Shmem will support large folio allocation [1] [2] to get a better performance, however, the memory reclaim still splits the precious large folios when trying to swap-out shmem, which may lead to the memory fragmentation issue and can not take advantage of the large folio for shmeme. Moreover, the swap code already supports for swapping out large folio without split, and large folio swap-in[3] series is queued into mm-unstable branch. Hence this patch set also supports the large folio swap-out and swap-in for shmem. This patch (of 9): To support shmem large folio swap operations, add a new parameter to swap_shmem_alloc() that allows batch SWAP_MAP_SHMEM flag setting for shmem swap entries. While we are at it, using folio_nr_pages() to get the number of pages of the folio as a preparation. Link: https://lkml.kernel.org/r/cover.1723434324.git.baolin.wang@linux.alibaba.com Link: https://lkml.kernel.org/r/99f64115d04b285e009580eb177352c57119ffd0.1723434324.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Reviewed-by: Barry Song Cc: Chris Li Cc: Daniel Gomez Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Kefeng Wang Cc: Lance Yang Cc: Matthew Wilcox Cc: Pankaj Raghav Cc: Ryan Roberts Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4122 --- include/linux/swap.h | 4 ++-- mm/shmem.c | 6 ++++-- mm/swapfile.c | 4 ++-- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index a5fe71b945e4..5762d794ebab 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -496,7 +496,7 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry); extern swp_entry_t get_swap_page_of_type(int); extern int get_swap_pages(int n, swp_entry_t swp_entries[], int order); extern int add_swap_count_continuation(swp_entry_t, gfp_t); -extern void swap_shmem_alloc(swp_entry_t); +extern void swap_shmem_alloc(swp_entry_t, int); extern int swap_duplicate(swp_entry_t); extern int swapcache_prepare(swp_entry_t entry, int nr); extern void swap_free_nr(swp_entry_t entry, int nr_pages); @@ -564,7 +564,7 @@ static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) return 0; } -static inline void swap_shmem_alloc(swp_entry_t swp) +static inline void swap_shmem_alloc(swp_entry_t swp, int nr) { } diff --git a/mm/shmem.c b/mm/shmem.c index fa08e99e96a7..8d8d9a11017c 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1445,6 +1445,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); swp_entry_t swap; pgoff_t index; + int nr_pages; /* * Our capabilities prevent regular writeback or sync from ever calling @@ -1477,6 +1478,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) } index = folio->index; + nr_pages = folio_nr_pages(folio); /* * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC @@ -1529,8 +1531,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) if (add_to_swap_cache(folio, swap, __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN, NULL) == 0) { - shmem_recalc_inode(inode, 0, 1); - swap_shmem_alloc(swap); + shmem_recalc_inode(inode, 0, nr_pages); + swap_shmem_alloc(swap, nr_pages); shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap)); mutex_unlock(&shmem_swaplist_mutex); diff --git a/mm/swapfile.c b/mm/swapfile.c index b7f851ad0126..7ec4cfe8002b 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -3608,9 +3608,9 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage, int nr) * Help swapoff by noting that swap entry belongs to shmem/tmpfs * (in which case its reference count is never incremented). */ -void swap_shmem_alloc(swp_entry_t entry) +void swap_shmem_alloc(swp_entry_t entry, int nr) { - __swap_duplicate(entry, SWAP_MAP_SHMEM, 1); + __swap_duplicate(entry, SWAP_MAP_SHMEM, nr); } /* -- Gitee From f16d09230573060e9cc1604e41b8c6e4bb76a462 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 12 Aug 2024 15:42:03 +0800 Subject: [PATCH 1671/2138] mm: shmem: extend shmem_partial_swap_usage() to support large folio swap ANBZ: #9728 commit 50f381eccefda0cdaf7aa617587dc04cb6652085 upstream To support shmem large folio swapout in the following patches, using xa_get_order() to get the order of the swap entry to calculate the swap usage of shmem. Link: https://lkml.kernel.org/r/60b130b9fc3e422bb91293a172c2113c85e9233a.1723434324.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Cc: Barry Song Cc: Chris Li Cc: Daniel Gomez Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Kefeng Wang Cc: Lance Yang Cc: Matthew Wilcox Cc: Pankaj Raghav Cc: Ryan Roberts Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4122 --- mm/shmem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/shmem.c b/mm/shmem.c index 8d8d9a11017c..f4a36d3a3c86 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -883,7 +883,7 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping, if (xas_retry(&xas, page)) continue; if (xa_is_value(page)) - swapped++; + swapped += 1 << xa_get_order(xas.xa, xas.xa_index); if (xas.xa_index == max) break; if (need_resched()) { -- Gitee From ee615bf9163691136844431293f016c96363b94f Mon Sep 17 00:00:00 2001 From: Daniel Gomez Date: Mon, 12 Aug 2024 15:42:04 +0800 Subject: [PATCH 1672/2138] mm: shmem: return number of pages beeing freed in shmem_free_swap ANBZ: #9728 commit 6ea0d1ccb110387244e04637f28a1d2eda54e3fb upstream Both shmem_free_swap callers expect the number of pages being freed. In the large folios context, this needs to support larger values other than 0 (used as 1 page being freed) and -ENOENT (used as 0 pages being freed). In preparation for large folios adoption, make shmem_free_swap routine return the number of pages being freed. So, returning 0 in this context, means 0 pages being freed. While we are at it, changing to use free_swap_and_cache_nr() to free large order swap entry by Baolin Wang. Link: https://lkml.kernel.org/r/9623e863c83d749d5ab407f6fdf0a8e5a3bdf052.1723434324.git.baolin.wang@linux.alibaba.com Signed-off-by: Daniel Gomez Signed-off-by: Baolin Wang Suggested-by: Matthew Wilcox Cc: Barry Song Cc: Chris Li Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Kefeng Wang Cc: Lance Yang Cc: Pankaj Raghav Cc: Ryan Roberts Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4122 --- mm/shmem.c | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index f4a36d3a3c86..8342e6208ed9 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -849,18 +849,22 @@ static void shmem_delete_from_page_cache(struct folio *folio, void *radswap) } /* - * Remove swap entry from page cache, free the swap and its page cache. + * Remove swap entry from page cache, free the swap and its page cache. Returns + * the number of pages being freed. 0 means entry not found in XArray (0 pages + * being freed). */ -static int shmem_free_swap(struct address_space *mapping, - pgoff_t index, void *radswap) +static long shmem_free_swap(struct address_space *mapping, + pgoff_t index, void *radswap) { + int order = xa_get_order(&mapping->i_pages, index); void *old; old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); if (old != radswap) - return -ENOENT; - free_swap_and_cache(radix_to_swp_entry(radswap)); - return 0; + return 0; + free_swap_and_cache_nr(radix_to_swp_entry(radswap), 1 << order); + + return 1 << order; } /* @@ -1012,7 +1016,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, if (xa_is_value(folio)) { if (unfalloc) continue; - nr_swaps_freed += !shmem_free_swap(mapping, + nr_swaps_freed += shmem_free_swap(mapping, indices[i], folio); continue; } @@ -1079,14 +1083,17 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, folio = fbatch.folios[i]; if (xa_is_value(folio)) { + long swaps_freed; + if (unfalloc) continue; - if (shmem_free_swap(mapping, indices[i], folio)) { + swaps_freed = shmem_free_swap(mapping, indices[i], folio); + if (!swaps_freed) { /* Swap was replaced by page: retry */ index = indices[i]; break; } - nr_swaps_freed++; + nr_swaps_freed += swaps_freed; continue; } -- Gitee From 06bb4a3c03d7e9ffbef7aa548652450c3ac5ad38 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 12 Aug 2024 15:42:05 +0800 Subject: [PATCH 1673/2138] mm: filemap: use xa_get_order() to get the swap entry order ANBZ: #9728 commit fb72415938d109fe8cca339a4f1423f76ba213c5 upstream In the following patches, shmem will support the swap out of large folios, which means the shmem mappings may contain large order swap entries, so using xa_get_order() to get the folio order of the shmem swap entry to update the '*start' correctly. [hughd@google.com: use xa_get_order() to get the swap entry order] Link: https://lkml.kernel.org/r/c336e6e4-da7f-b714-c0f1-12df715f2611@google.com Link: https://lkml.kernel.org/r/6876d55145c1cc80e79df7884aa3a62e397b101d.1723434324.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Signed-off-by: Hugh Dickins Cc: Barry Song Cc: Chris Li Cc: Daniel Gomez Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Kefeng Wang Cc: Lance Yang Cc: Matthew Wilcox Cc: Pankaj Raghav Cc: Ryan Roberts Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4122 --- mm/filemap.c | 41 +++++++++++++++++++++++++++-------------- 1 file changed, 27 insertions(+), 14 deletions(-) diff --git a/mm/filemap.c b/mm/filemap.c index f0e9c9493410..4a66b50f9430 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2059,17 +2059,20 @@ unsigned find_get_entries(struct address_space *mapping, pgoff_t *start, if (!folio_batch_add(fbatch, folio)) break; } - rcu_read_unlock(); if (folio_batch_count(fbatch)) { - unsigned long nr = 1; + unsigned long nr; int idx = folio_batch_count(fbatch) - 1; folio = fbatch->folios[idx]; if (!xa_is_value(folio)) nr = folio_nr_pages(folio); - *start = indices[idx] + nr; + else + nr = 1 << xa_get_order(&mapping->i_pages, indices[idx]); + *start = round_down(indices[idx] + nr, nr); } + rcu_read_unlock(); + return folio_batch_count(fbatch); } @@ -2101,10 +2104,17 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start, rcu_read_lock(); while ((folio = find_get_entry(&xas, end, XA_PRESENT))) { + unsigned long base; + unsigned long nr; + if (!xa_is_value(folio)) { - if (folio->index < *start) + nr = folio_nr_pages(folio); + base = folio->index; + /* Omit large folio which begins before the start */ + if (base < *start) goto put; - if (folio_next_index(folio) - 1 > end) + /* Omit large folio which extends beyond the end */ + if (base + nr - 1 > end) goto put; if (!folio_trylock(folio)) goto put; @@ -2113,7 +2123,19 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start, goto unlock; VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index), folio); + } else { + nr = 1 << xa_get_order(&mapping->i_pages, xas.xa_index); + base = xas.xa_index & ~(nr - 1); + /* Omit order>0 value which begins before the start */ + if (base < *start) + continue; + /* Omit order>0 value which extends beyond the end */ + if (base + nr - 1 > end) + break; } + + /* Update start now so that last update is correct on return */ + *start = base + nr; indices[fbatch->nr] = xas.xa_index; if (!folio_batch_add(fbatch, folio)) break; @@ -2125,15 +2147,6 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start, } rcu_read_unlock(); - if (folio_batch_count(fbatch)) { - unsigned long nr = 1; - int idx = folio_batch_count(fbatch) - 1; - - folio = fbatch->folios[idx]; - if (!xa_is_value(folio)) - nr = folio_nr_pages(folio); - *start = indices[idx] + nr; - } return folio_batch_count(fbatch); } -- Gitee From 6f641513cad5840b4763c6908bd7c1881e22f9ed Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 12 Aug 2024 15:42:06 +0800 Subject: [PATCH 1674/2138] mm: shmem: use swap_free_nr() to free shmem swap entries ANBZ: #9728 commit 40ff2d11bd58a3908897ccc689ed5d8ac498f173 upstream As a preparation for supporting shmem large folio swapout, use swap_free_nr() to free some continuous swap entries of the shmem large folio when the large folio was swapped in from the swap cache. In addition, the index should also be round down to the number of pages when adding the swapin folio into the pagecache. Link: https://lkml.kernel.org/r/342207fa679fc88a447dac2e101ad79e6050fe79.1723434324.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Cc: Barry Song Cc: Chris Li Cc: Daniel Gomez Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Kefeng Wang Cc: Lance Yang Cc: Matthew Wilcox Cc: Pankaj Raghav Cc: Ryan Roberts Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4122 --- mm/shmem.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index 8342e6208ed9..afea90ba4cf0 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1970,6 +1970,7 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index, struct address_space *mapping = inode->i_mapping; swp_entry_t swapin_error; void *old; + int nr_pages; swapin_error = make_poisoned_swp_entry(); old = xa_cmpxchg_irq(&mapping->i_pages, index, @@ -1978,6 +1979,7 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index, if (old != swp_to_radix_entry(swap)) return; + nr_pages = folio_nr_pages(folio); folio_wait_writeback(folio); delete_from_swap_cache(folio); /* @@ -1985,8 +1987,8 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index, * won't be 0 when inode is released and thus trigger WARN_ON(i_blocks) * in shmem_evict_inode(). */ - shmem_recalc_inode(inode, -1, -1); - swap_free(swap); + shmem_recalc_inode(inode, -nr_pages, -nr_pages); + swap_free_nr(swap, nr_pages); } /* @@ -2005,7 +2007,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, struct swap_info_struct *si; struct folio *folio = NULL; swp_entry_t swap; - int error; + int error, nr_pages; VM_BUG_ON(!*foliop || !xa_is_value(*foliop)); swap = radix_to_swp_entry(*foliop); @@ -2052,6 +2054,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, goto failed; } folio_wait_writeback(folio); + nr_pages = folio_nr_pages(folio); /* * Some architectures may have to restore extra metadata to the @@ -2065,19 +2068,20 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, goto failed; } - error = shmem_add_to_page_cache(folio, mapping, index, + error = shmem_add_to_page_cache(folio, mapping, + round_down(index, nr_pages), swp_to_radix_entry(swap), gfp); if (error) goto failed; - shmem_recalc_inode(inode, 0, -1); + shmem_recalc_inode(inode, 0, -nr_pages); if (sgp == SGP_WRITE) folio_mark_accessed(folio); delete_from_swap_cache(folio); folio_mark_dirty(folio); - swap_free(swap); + swap_free_nr(swap, nr_pages); put_swap_device(si); *foliop = folio; -- Gitee From 1eb77e984da2dc2388a05334c6fc081a6e8f8337 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 12 Aug 2024 15:42:07 +0800 Subject: [PATCH 1675/2138] mm: shmem: support large folio allocation for shmem_replace_folio() ANBZ: #9728 commit 736f0e03564729a5ba609c2c4fcb37ff1e92ede4 upstream To support large folio swapin for shmem in the following patches, add large folio allocation for the new replacement folio in shmem_replace_folio(). Moreover large folios occupy N consecutive entries in the swap cache instead of using multi-index entries like the page cache, therefore we should replace each consecutive entries in the swap cache instead of using the shmem_replace_entry(). As well as updating statistics and folio reference count using the number of pages in the folio. [baolin.wang@linux.alibaba.com: fix the gfp flag for large folio allocation] Link: https://lkml.kernel.org/r/5b1e9c5a-7f61-4d97-a8d7-41767ca04c77@linux.alibaba.com [baolin.wang@linux.alibaba.com: fix build without CONFIG_TRANSPARENT_HUGEPAGE] Link: https://lkml.kernel.org/r/8c03467c-63b2-43b4-9851-222d4188725c@linux.alibaba.com Link: https://lkml.kernel.org/r/a41138ecc857ef13e7c5ffa0174321e9e2c9970a.1723434324.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Cc: Barry Song Cc: Chris Li Cc: Daniel Gomez Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Kefeng Wang Cc: Lance Yang Cc: Matthew Wilcox Cc: Pankaj Raghav Cc: Ryan Roberts Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4122 --- mm/shmem.c | 74 +++++++++++++++++++++++++++++++++--------------------- 1 file changed, 46 insertions(+), 28 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index afea90ba4cf0..a3540080b8ee 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -154,7 +154,7 @@ static unsigned long shmem_default_max_inodes(void) static int shmem_swapin_folio(struct inode *inode, pgoff_t index, struct folio **foliop, enum sgp_type sgp, gfp_t gfp, - struct mm_struct *fault_mm, vm_fault_t *fault_type); + struct vm_area_struct *vma, vm_fault_t *fault_type); static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) { @@ -1896,30 +1896,35 @@ static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp) } static int shmem_replace_folio(struct folio **foliop, gfp_t gfp, - struct shmem_inode_info *info, pgoff_t index) + struct shmem_inode_info *info, pgoff_t index, + struct vm_area_struct *vma) { - struct folio *old, *new; - struct address_space *swap_mapping; - swp_entry_t entry; - pgoff_t swap_index; - int error; - - old = *foliop; - entry = old->swap; - swap_index = swp_offset(entry); - swap_mapping = swap_address_space(entry); + struct folio *new, *old = *foliop; + swp_entry_t entry = old->swap; + struct address_space *swap_mapping = swap_address_space(entry); + pgoff_t swap_index = swp_offset(entry); + XA_STATE(xas, &swap_mapping->i_pages, swap_index); + int nr_pages = folio_nr_pages(old); + int error = 0, i; /* * We have arrived here because our zones are constrained, so don't * limit chance of success by further cpuset and node constraints. */ gfp &= ~GFP_CONSTRAINT_MASK; - VM_BUG_ON_FOLIO(folio_test_large(old), old); - new = shmem_alloc_folio(gfp, 0, info, index); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + if (nr_pages > 1) { + gfp_t huge_gfp = vma_thp_gfp_mask(vma); + + gfp = limit_gfp_mask(huge_gfp, gfp); + } +#endif + + new = shmem_alloc_folio(gfp, folio_order(old), info, index); if (!new) return -ENOMEM; - folio_get(new); + folio_ref_add(new, nr_pages); folio_copy(new, old); flush_dcache_folio(new); @@ -1929,18 +1934,25 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp, new->swap = entry; folio_set_swapcache(new); - /* - * Our caller will very soon move newpage out of swapcache, but it's - * a nice clean interface for us to replace oldpage by newpage there. - */ + /* Swap cache still stores N entries instead of a high-order entry */ xa_lock_irq(&swap_mapping->i_pages); - error = shmem_replace_entry(swap_mapping, swap_index, old, new); + for (i = 0; i < nr_pages; i++) { + void *item = xas_load(&xas); + + if (item != old) { + error = -ENOENT; + break; + } + + xas_store(&xas, new); + xas_next(&xas); + } if (!error) { mem_cgroup_migrate(old, new); - __lruvec_stat_mod_folio(new, NR_FILE_PAGES, 1); - __lruvec_stat_mod_folio(new, NR_SHMEM, 1); - __lruvec_stat_mod_folio(old, NR_FILE_PAGES, -1); - __lruvec_stat_mod_folio(old, NR_SHMEM, -1); + __lruvec_stat_mod_folio(new, NR_FILE_PAGES, nr_pages); + __lruvec_stat_mod_folio(new, NR_SHMEM, nr_pages); + __lruvec_stat_mod_folio(old, NR_FILE_PAGES, -nr_pages); + __lruvec_stat_mod_folio(old, NR_SHMEM, -nr_pages); } xa_unlock_irq(&swap_mapping->i_pages); @@ -1960,7 +1972,12 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp, old->private = NULL; folio_unlock(old); - folio_put_refs(old, 2); + /* + * The old folio are removed from swap cache, drop the 'nr_pages' + * reference, as well as one temporary reference getting from swap + * cache. + */ + folio_put_refs(old, nr_pages + 1); return error; } @@ -1999,10 +2016,11 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index, */ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, struct folio **foliop, enum sgp_type sgp, - gfp_t gfp, struct mm_struct *fault_mm, + gfp_t gfp, struct vm_area_struct *vma, vm_fault_t *fault_type) { struct address_space *mapping = inode->i_mapping; + struct mm_struct *fault_mm = vma ? vma->vm_mm : NULL; struct shmem_inode_info *info = SHMEM_I(inode); struct swap_info_struct *si; struct folio *folio = NULL; @@ -2063,7 +2081,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, arch_swap_restore(swap, folio); if (shmem_should_replace_folio(folio, gfp)) { - error = shmem_replace_folio(&folio, gfp, info, index); + error = shmem_replace_folio(&folio, gfp, info, index, vma); if (error) goto failed; } @@ -2141,7 +2159,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, if (xa_is_value(folio)) { error = shmem_swapin_folio(inode, index, &folio, - sgp, gfp, fault_mm, fault_type); + sgp, gfp, vma, fault_type); if (error == -EEXIST) goto repeat; -- Gitee From 955e15f05315a2812ed30f3cb4447abcd004b716 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 12 Aug 2024 15:42:08 +0800 Subject: [PATCH 1676/2138] mm: shmem: drop folio reference count using 'nr_pages' in shmem_delete_from_page_cache() ANBZ: #9728 commit 872339c31f3b2dd466320a5bed54abeccf0db47b upstream To support large folio swapin/swapout for shmem in the following patches, drop the folio's reference count by the number of pages contained in the folio when a shmem folio is deleted from shmem pagecache after adding into swap cache. Link: https://lkml.kernel.org/r/b371eadb27f42fc51261c51008fbb9a334985b4c.1723434324.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Cc: Barry Song Cc: Chris Li Cc: Daniel Gomez Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Kefeng Wang Cc: Lance Yang Cc: Matthew Wilcox Cc: Pankaj Raghav Cc: Ryan Roberts Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4122 --- mm/shmem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/shmem.c b/mm/shmem.c index a3540080b8ee..3aeb79f44e56 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -844,7 +844,7 @@ static void shmem_delete_from_page_cache(struct folio *folio, void *radswap) __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr); __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr); xa_unlock_irq(&mapping->i_pages); - folio_put(folio); + folio_put_refs(folio, nr); BUG_ON(error); } -- Gitee From 7361f129ad7f54eb0ef51713f452cdfc7473925e Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 12 Aug 2024 15:42:09 +0800 Subject: [PATCH 1677/2138] mm: shmem: split large entry if the swapin folio is not large ANBZ: #9728 commit 12885cbe88ddf6c5fc3306193a6449ac310f2331 upstream Now the swap device can only swap-in order 0 folio, even though a large folio is swapped out. This requires us to split the large entry previously saved in the shmem pagecache to support the swap in of small folios. [hughd@google.com: fix warnings from kmalloc_fix_flags()] Link: https://lkml.kernel.org/r/e2a2ba5d-864c-50aa-7579-97cba1c7dd0c@google.com [baolin.wang@linux.alibaba.com: drop the 'new_order' parameter] Link: https://lkml.kernel.org/r/39c71ccf-669b-4d9f-923c-f6b9c4ceb8df@linux.alibaba.com Link: https://lkml.kernel.org/r/4a0f12f27c54a62eb4d9ca1265fed3a62531a63e.1723434324.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Signed-off-by: Hugh Dickins Cc: Barry Song Cc: Chris Li Cc: Daniel Gomez Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Kefeng Wang Cc: Lance Yang Cc: Matthew Wilcox Cc: Pankaj Raghav Cc: Ryan Roberts Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4122 --- mm/shmem.c | 103 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 103 insertions(+) diff --git a/mm/shmem.c b/mm/shmem.c index 3aeb79f44e56..f1d8ba40b520 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2008,6 +2008,84 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index, swap_free_nr(swap, nr_pages); } +static int shmem_split_large_entry(struct inode *inode, pgoff_t index, + swp_entry_t swap, gfp_t gfp) +{ + struct address_space *mapping = inode->i_mapping; + XA_STATE_ORDER(xas, &mapping->i_pages, index, 0); + void *alloced_shadow = NULL; + int alloced_order = 0, i; + + /* Convert user data gfp flags to xarray node gfp flags */ + gfp &= GFP_RECLAIM_MASK; + + for (;;) { + int order = -1, split_order = 0; + void *old = NULL; + + xas_lock_irq(&xas); + old = xas_load(&xas); + if (!xa_is_value(old) || swp_to_radix_entry(swap) != old) { + xas_set_err(&xas, -EEXIST); + goto unlock; + } + + order = xas_get_order(&xas); + + /* Swap entry may have changed before we re-acquire the lock */ + if (alloced_order && + (old != alloced_shadow || order != alloced_order)) { + xas_destroy(&xas); + alloced_order = 0; + } + + /* Try to split large swap entry in pagecache */ + if (order > 0) { + if (!alloced_order) { + split_order = order; + goto unlock; + } + xas_split(&xas, old, order); + + /* + * Re-set the swap entry after splitting, and the swap + * offset of the original large entry must be continuous. + */ + for (i = 0; i < 1 << order; i++) { + pgoff_t aligned_index = round_down(index, 1 << order); + swp_entry_t tmp; + + tmp = swp_entry(swp_type(swap), swp_offset(swap) + i); + __xa_store(&mapping->i_pages, aligned_index + i, + swp_to_radix_entry(tmp), 0); + } + } + +unlock: + xas_unlock_irq(&xas); + + /* split needed, alloc here and retry. */ + if (split_order) { + xas_split_alloc(&xas, old, split_order, gfp); + if (xas_error(&xas)) + goto error; + alloced_shadow = old; + alloced_order = split_order; + xas_reset(&xas); + continue; + } + + if (!xas_nomem(&xas, gfp)) + break; + } + +error: + if (xas_error(&xas)) + return xas_error(&xas); + + return alloced_order; +} + /* * Swap in the folio pointed to by *foliop. * Caller has to make sure that *foliop contains a valid swapped folio. @@ -2045,12 +2123,37 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, /* Look it up and read it in.. */ folio = swap_cache_get_folio(swap, NULL, 0); if (!folio) { + int split_order; + /* Or update major stats only when swapin succeeds?? */ if (fault_type) { *fault_type |= VM_FAULT_MAJOR; count_vm_event(PGMAJFAULT); count_memcg_event_mm(fault_mm, PGMAJFAULT); } + + /* + * Now swap device can only swap in order 0 folio, then we + * should split the large swap entry stored in the pagecache + * if necessary. + */ + split_order = shmem_split_large_entry(inode, index, swap, gfp); + if (split_order < 0) { + error = split_order; + goto failed; + } + + /* + * If the large swap entry has already been split, it is + * necessary to recalculate the new swap entry based on + * the old order alignment. + */ + if (split_order > 0) { + pgoff_t offset = index - round_down(index, 1 << split_order); + + swap = swp_entry(swp_type(swap), swp_offset(swap) + offset); + } + /* Here we actually start the io */ folio = shmem_swapin(swap, gfp, info, index); if (!folio) { -- Gitee From 3f6a2b55e3b866dea232c7e8fe7ebde49b399ba8 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 12 Aug 2024 15:42:10 +0800 Subject: [PATCH 1678/2138] mm: shmem: support large folio swap out ANBZ: #9728 commit 809bc86517cc408b5b8cb8e08e69096639432bc8 upstream Shmem will support large folio allocation [1] [2] to get a better performance, however, the memory reclaim still splits the precious large folios when trying to swap out shmem, which may lead to the memory fragmentation issue and can not take advantage of the large folio for shmeme. Moreover, the swap code already supports for swapping out large folio without split, hence this patch set supports the large folio swap out for shmem. Note the i915_gem_shmem driver still need to be split when swapping, thus add a new flag 'split_large_folio' for writeback_control to indicate spliting the large folio. [1] https://lore.kernel.org/all/cover.1717495894.git.baolin.wang@linux.alibaba.com/ [2] https://lore.kernel.org/all/20240515055719.32577-1-da.gomez@samsung.com/ [hughd@google.com: shmem_writepage() split folio at EOF before swapout] Link: https://lkml.kernel.org/r/aef55f8d-6040-692d-65e3-16150cce4440@google.com [baolin.wang@linux.alibaba.com: remove the wbc->split_large_folio per Hugh] Link: https://lkml.kernel.org/r/1236a002daa301b3b9ba73d6c0fab348427cf295.1724833399.git.baolin.wang@linux.alibaba.com Link: https://lkml.kernel.org/r/d80c21abd20e1b0f5ca66b330f074060fb2f082d.1723434324.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Signed-off-by: Hugh Dickins Cc: Barry Song Cc: Chris Li Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Kefeng Wang Cc: Lance Yang Cc: Matthew Wilcox Cc: Pankaj Raghav Cc: Ryan Roberts Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4122 --- include/linux/writeback.h | 3 +++ mm/shmem.c | 28 ++++++++++++++++++++++------ mm/vmscan.c | 30 +++++++++++++++++++++++------- 3 files changed, 48 insertions(+), 13 deletions(-) diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 6cc3a8bcb533..eba6d0760259 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -77,6 +77,9 @@ struct writeback_control { */ struct swap_iocb **swap_plug; + /* Target list for splitting a large folio */ + struct list_head *list; + #ifdef CONFIG_CGROUP_WRITEBACK struct bdi_writeback *wb; /* wb this writeback is issued under */ struct inode *inode; /* inode being written out */ diff --git a/mm/shmem.c b/mm/shmem.c index f1d8ba40b520..9d7df156f56f 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -788,7 +788,6 @@ static int shmem_add_to_page_cache(struct folio *folio, VM_BUG_ON_FOLIO(index != round_down(index, nr), folio); VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio); - VM_BUG_ON(expected && folio_test_large(folio)); folio_ref_add(folio, nr); folio->mapping = mapping; @@ -1453,6 +1452,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) swp_entry_t swap; pgoff_t index; int nr_pages; + bool split = false; /* * Our capabilities prevent regular writeback or sync from ever calling @@ -1471,14 +1471,26 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) goto redirty; /* - * If /sys/kernel/mm/transparent_hugepage/shmem_enabled is "always" or - * "force", drivers/gpu/drm/i915/gem/i915_gem_shmem.c gets huge pages, - * and its shmem_writeback() needs them to be split when swapping. + * If CONFIG_THP_SWAP is not enabled, the large folio should be + * split when swapping. + * + * And shrinkage of pages beyond i_size does not split swap, so + * swapout of a large folio crossing i_size needs to split too + * (unless fallocate has been used to preallocate beyond EOF). */ if (folio_test_large(folio)) { + index = shmem_fallocend(inode, + DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE)); + if ((index > folio->index && index < folio_next_index(folio)) || + !IS_ENABLED(CONFIG_THP_SWAP)) + split = true; + } + + if (split) { +try_split: /* Ensure the subpages are still dirty */ folio_test_set_dirty(folio); - if (split_huge_page(page) < 0) + if (split_huge_page_to_list(page, wbc->list)) goto redirty; folio = page_folio(page); folio_clear_dirty(folio); @@ -1520,8 +1532,12 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) } swap = folio_alloc_swap(folio); - if (!swap.val) + if (!swap.val) { + if (nr_pages > 1) + goto try_split; + goto redirty; + } /* * Add inode to shmem_unuse()'s list of swapped-out inodes, diff --git a/mm/vmscan.c b/mm/vmscan.c index f47ee6f06381..97d4511eb762 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1310,7 +1310,7 @@ typedef enum { * Calls ->writepage(). */ static pageout_t pageout(struct folio *folio, struct address_space *mapping, - struct swap_iocb **plug) + struct swap_iocb **plug, struct list_head *folio_list) { /* * If the folio is dirty, only perform writeback if that write @@ -1358,6 +1358,14 @@ static pageout_t pageout(struct folio *folio, struct address_space *mapping, .swap_plug = plug, }; + /* + * The large shmem folio can be split if CONFIG_THP_SWAP is + * not enabled or contiguous swap entries are failed to + * allocate. + */ + if (shmem_mapping(mapping) && folio_test_large(folio)) + wbc.list = folio_list; + folio_set_reclaim(folio); res = mapping->a_ops->writepage(&folio->page, &wbc); if (res < 0) @@ -1931,11 +1939,6 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, goto activate_locked_split; } } - } else if (folio_test_swapbacked(folio) && - folio_test_large(folio)) { - /* Split shmem folio */ - if (split_folio_to_list(folio, folio_list)) - goto keep_locked; } /* @@ -2022,12 +2025,25 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, * starts and then write it out here. */ try_to_unmap_flush_dirty(); - switch (pageout(folio, mapping, &plug)) { + switch (pageout(folio, mapping, &plug, folio_list)) { case PAGE_KEEP: goto keep_locked; case PAGE_ACTIVATE: + /* + * If shmem folio is split when writeback to swap, + * the tail pages will make their own pass through + * this function and be accounted then. + */ + if (nr_pages > 1 && !folio_test_large(folio)) { + sc->nr_scanned -= (nr_pages - 1); + nr_pages = 1; + } goto activate_locked; case PAGE_SUCCESS: + if (nr_pages > 1 && !folio_test_large(folio)) { + sc->nr_scanned -= (nr_pages - 1); + nr_pages = 1; + } stat->nr_pageout += nr_pages; if (folio_test_writeback(folio)) -- Gitee From f50a169f3a2aa2576dff0d53a2939feec2772938 Mon Sep 17 00:00:00 2001 From: zhangtianyang Date: Fri, 1 Nov 2024 09:22:44 +0800 Subject: [PATCH 1679/2138] anolis: LoongArch: Adjust dynamic writecombine policy ANBZ: #11908 Signed-off-by: zhangtianyang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4129 --- arch/loongarch/kernel/setup.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 77077dd324ff..a8de2f809403 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -430,10 +430,10 @@ static void __init writecombine_detect(void) } cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME); - + cpuname &= 0x0000ffffffffffff; switch (cpuname) { case 0x0000303030364333: - wc_enabled = true; + wc_enabled = false; break; default: break; -- Gitee From cfe948e316634758fe2c4661180fad13279709e4 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 3 Apr 2024 18:18:32 +0100 Subject: [PATCH 1680/2138] khugepaged: remove hpage from collapse_huge_page() ANBZ: #9728 commit 0234779276e56fb17677f3cf64d7cd501f8abe69 upstream Work purely in terms of the folio. Removes a call to compound_head() in put_page(). Link: https://lkml.kernel.org/r/20240403171838.1445826-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Vishal Moola (Oracle) Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4128 --- mm/khugepaged.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 996087bc0fad..b3736774921a 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1097,7 +1097,6 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, pte_t *pte; pgtable_t pgtable; struct folio *folio; - struct page *hpage; spinlock_t *pmd_ptl, *pte_ptl; int result = SCAN_FAIL; struct vm_area_struct *vma; @@ -1114,7 +1113,6 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, mmap_read_unlock(mm); result = alloc_charge_folio(&folio, mm, cc); - hpage = &folio->page; if (result != SCAN_SUCCEED) goto out_nolock; @@ -1210,7 +1208,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, */ anon_vma_unlock_write(vma->anon_vma); - result = __collapse_huge_page_copy(pte, hpage, pmd, _pmd, + result = __collapse_huge_page_copy(pte, &folio->page, pmd, _pmd, vma, address, pte_ptl, &compound_pagelist); pte_unmap(pte); @@ -1225,7 +1223,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, __folio_mark_uptodate(folio); pgtable = pmd_pgtable(_pmd); - _pmd = mk_huge_pmd(hpage, vma->vm_page_prot); + _pmd = mk_huge_pmd(&folio->page, vma->vm_page_prot); _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); spin_lock(pmd_ptl); @@ -1237,14 +1235,14 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, update_mmu_cache_pmd(vma, address, pmd); spin_unlock(pmd_ptl); - hpage = NULL; + folio = NULL; result = SCAN_SUCCEED; out_up_write: mmap_write_unlock(mm); out_nolock: - if (hpage) - put_page(hpage); + if (folio) + folio_put(folio); trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result); return result; } -- Gitee From ffe1b8091e77d6842b5f87eb7954d3a284112765 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 3 Apr 2024 18:18:33 +0100 Subject: [PATCH 1681/2138] khugepaged: pass a folio to __collapse_huge_page_copy() ANBZ: #9728 commit 8eca68e2cfdf863e98dc3c2cc8b2be9cac46b9d6 upstream Simplify the body of __collapse_huge_page_copy() while I'm looking at it. Link: https://lkml.kernel.org/r/20240403171838.1445826-5-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Vishal Moola (Oracle) Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4128 --- mm/khugepaged.c | 34 +++++++++++++++------------------- 1 file changed, 15 insertions(+), 19 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index b3736774921a..c7bfcc158dd7 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -782,7 +782,7 @@ static void __collapse_huge_page_copy_failed(pte_t *pte, * Returns SCAN_SUCCEED if copying succeeds, otherwise returns SCAN_COPY_MC. * * @pte: starting of the PTEs to copy from - * @page: the new hugepage to copy contents to + * @folio: the new hugepage to copy contents to * @pmd: pointer to the new hugepage's PMD * @orig_pmd: the original raw pages' PMD * @vma: the original raw pages' virtual memory area @@ -790,33 +790,29 @@ static void __collapse_huge_page_copy_failed(pte_t *pte, * @ptl: lock on raw pages' PTEs * @compound_pagelist: list that stores compound pages */ -static int __collapse_huge_page_copy(pte_t *pte, - struct page *page, - pmd_t *pmd, - pmd_t orig_pmd, - struct vm_area_struct *vma, - unsigned long address, - spinlock_t *ptl, - struct list_head *compound_pagelist) +static int __collapse_huge_page_copy(pte_t *pte, struct folio *folio, + pmd_t *pmd, pmd_t orig_pmd, struct vm_area_struct *vma, + unsigned long address, spinlock_t *ptl, + struct list_head *compound_pagelist) { - struct page *src_page; - pte_t *_pte; - pte_t pteval; - unsigned long _address; + unsigned int i; int result = SCAN_SUCCEED; /* * Copying pages' contents is subject to memory poison at any iteration. */ - for (_pte = pte, _address = address; _pte < pte + HPAGE_PMD_NR; - _pte++, page++, _address += PAGE_SIZE) { - pteval = ptep_get(_pte); + for (i = 0; i < HPAGE_PMD_NR; i++) { + pte_t pteval = ptep_get(pte + i); + struct page *page = folio_page(folio, i); + unsigned long src_addr = address + i * PAGE_SIZE; + struct page *src_page; + if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { - clear_user_highpage(page, _address); + clear_user_highpage(page, src_addr); continue; } src_page = pte_page(pteval); - if (copy_mc_user_highpage(page, src_page, _address, vma) > 0) { + if (copy_mc_user_highpage(page, src_page, src_addr, vma) > 0) { result = SCAN_COPY_MC; break; } @@ -1208,7 +1204,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, */ anon_vma_unlock_write(vma->anon_vma); - result = __collapse_huge_page_copy(pte, &folio->page, pmd, _pmd, + result = __collapse_huge_page_copy(pte, folio, pmd, _pmd, vma, address, pte_ptl, &compound_pagelist); pte_unmap(pte); -- Gitee From 6083ff59afde15833748d09d94353124d856cb77 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 3 Apr 2024 18:18:35 +0100 Subject: [PATCH 1682/2138] khugepaged: use a folio throughout collapse_file() ANBZ: #9728 commit 8d1e24c0b82d9730d05ee85eb7f4195df8cdf6a6 upstream Pull folios from the page cache instead of pages. Half of this work had been done already, but we were still operating on pages for a large chunk of this function. There is no attempt in this patch to handle large folios that are smaller than a THP; that will have to wait for a future patch. [willy@infradead.org: the unlikely() is embedded in IS_ERR()] Link: https://lkml.kernel.org/r/ZhIWX8K0E2tSyMSr@casper.infradead.org Link: https://lkml.kernel.org/r/20240403171838.1445826-7-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4128 --- mm/khugepaged.c | 113 +++++++++++++++++++++++------------------------- 1 file changed, 54 insertions(+), 59 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index c7bfcc158dd7..2a7d353dd80d 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1792,9 +1792,8 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, struct collapse_control *cc) { struct address_space *mapping = file->f_mapping; - struct page *page; - struct page *tmp, *dst; - struct folio *folio, *new_folio; + struct page *dst; + struct folio *folio, *tmp, *new_folio; pgoff_t index = 0, end = start + HPAGE_PMD_NR; LIST_HEAD(pagelist); XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER); @@ -1832,11 +1831,11 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, for (index = start; index < end; index++) { xas_set(&xas, index); - page = xas_load(&xas); + folio = xas_load(&xas); VM_BUG_ON(index != xas.xa_index); if (is_shmem) { - if (!page) { + if (!folio) { /* * Stop if extent has been truncated or * hole-punched, and is now completely @@ -1852,7 +1851,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, continue; } - if (xa_is_value(page) || !PageUptodate(page)) { + if (xa_is_value(folio) || !folio_test_uptodate(folio)) { xas_unlock_irq(&xas); /* swap in or instantiate fallocated page */ if (shmem_get_folio(mapping->host, index, @@ -1862,28 +1861,27 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, } /* drain lru cache to help isolate_lru_page() */ lru_add_drain(); - page = folio_file_page(folio, index); - } else if (trylock_page(page)) { - get_page(page); + } else if (folio_trylock(folio)) { + folio_get(folio); xas_unlock_irq(&xas); } else { result = SCAN_PAGE_LOCK; goto xa_locked; } } else { /* !is_shmem */ - if (!page || xa_is_value(page)) { + if (!folio || xa_is_value(folio)) { xas_unlock_irq(&xas); page_cache_sync_readahead(mapping, &file->f_ra, file, index, end - index); /* drain lru cache to help isolate_lru_page() */ lru_add_drain(); - page = find_lock_page(mapping, index); - if (unlikely(page == NULL)) { + folio = filemap_lock_folio(mapping, index); + if (IS_ERR(folio)) { result = SCAN_FAIL; goto xa_unlocked; } - } else if (PageDirty(page)) { + } else if (folio_test_dirty(folio)) { /* * khugepaged only works on read-only fd, * so this page is dirty because it hasn't @@ -1901,12 +1899,12 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, filemap_flush(mapping); result = SCAN_FAIL; goto xa_unlocked; - } else if (PageWriteback(page)) { + } else if (folio_test_writeback(folio)) { xas_unlock_irq(&xas); result = SCAN_FAIL; goto xa_unlocked; - } else if (trylock_page(page)) { - get_page(page); + } else if (folio_trylock(folio)) { + folio_get(folio); xas_unlock_irq(&xas); } else { result = SCAN_PAGE_LOCK; @@ -1915,35 +1913,31 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, } /* - * The page must be locked, so we can drop the i_pages lock + * The folio must be locked, so we can drop the i_pages lock * without racing with truncate. */ - VM_BUG_ON_PAGE(!PageLocked(page), page); + VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); - /* make sure the page is up to date */ - if (unlikely(!PageUptodate(page))) { + /* make sure the folio is up to date */ + if (unlikely(!folio_test_uptodate(folio))) { result = SCAN_FAIL; goto out_unlock; } /* * If file was truncated then extended, or hole-punched, before - * we locked the first page, then a THP might be there already. + * we locked the first folio, then a THP might be there already. * This will be discovered on the first iteration. */ - if (PageTransCompound(page)) { - struct page *head = compound_head(page); - - result = compound_order(head) == HPAGE_PMD_ORDER && - head->index == start + if (folio_test_large(folio)) { + result = folio_order(folio) == HPAGE_PMD_ORDER && + folio->index == start /* Maybe PMD-mapped */ ? SCAN_PTE_MAPPED_HUGEPAGE : SCAN_PAGE_COMPOUND; goto out_unlock; } - folio = page_folio(page); - if (folio_mapping(folio) != mapping) { result = SCAN_TRUNCATED; goto out_unlock; @@ -1953,7 +1947,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, folio_test_writeback(folio))) { /* * khugepaged only works on read-only fd, so this - * page is dirty because it hasn't been flushed + * folio is dirty because it hasn't been flushed * since first write. */ result = SCAN_FAIL; @@ -1977,33 +1971,34 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, xas_lock_irq(&xas); - VM_BUG_ON_PAGE(page != xa_load(xas.xa, index), page); + VM_BUG_ON_FOLIO(folio != xa_load(xas.xa, index), folio); /* - * We control three references to the page: + * We control three references to the folio: * - we hold a pin on it; * - one reference from page cache; - * - one from isolate_lru_page; - * If those are the only references, then any new usage of the - * page will have to fetch it from the page cache. That requires - * locking the page to handle truncate, so any new usage will be - * blocked until we unlock page after collapse/during rollback. + * - one from lru_isolate_folio; + * If those are the only references, then any new usage + * of the folio will have to fetch it from the page + * cache. That requires locking the folio to handle + * truncate, so any new usage will be blocked until we + * unlock folio after collapse/during rollback. */ - if (page_count(page) != 3) { + if (folio_ref_count(folio) != 3) { result = SCAN_PAGE_COUNT; xas_unlock_irq(&xas); - putback_lru_page(page); + folio_putback_lru(folio); goto out_unlock; } /* - * Accumulate the pages that are being collapsed. + * Accumulate the folios that are being collapsed. */ - list_add_tail(&page->lru, &pagelist); + list_add_tail(&folio->lru, &pagelist); continue; out_unlock: - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); goto xa_unlocked; } @@ -2042,17 +2037,17 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, } /* - * The old pages are locked, so they won't change anymore. + * The old folios are locked, so they won't change anymore. */ index = start; dst = folio_page(new_folio, 0); - list_for_each_entry(page, &pagelist, lru) { - while (index < page->index) { + list_for_each_entry(folio, &pagelist, lru) { + while (index < folio->index) { clear_highpage(dst); index++; dst++; } - if (copy_mc_highpage(dst, page) > 0) { + if (copy_mc_highpage(dst, folio_page(folio, 0)) > 0) { result = SCAN_COPY_MC; goto rollback; } @@ -2164,15 +2159,15 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, folio_unlock(new_folio); /* - * The collapse has succeeded, so free the old pages. + * The collapse has succeeded, so free the old folios. */ - list_for_each_entry_safe(page, tmp, &pagelist, lru) { - list_del(&page->lru); - page->mapping = NULL; - ClearPageActive(page); - ClearPageUnevictable(page); - unlock_page(page); - folio_put_refs(page_folio(page), 3); + list_for_each_entry_safe(folio, tmp, &pagelist, lru) { + list_del(&folio->lru); + folio->mapping = NULL; + folio_clear_active(folio); + folio_clear_unevictable(folio); + folio_unlock(folio); + folio_put_refs(folio, 3); } goto out; @@ -2186,11 +2181,11 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, shmem_uncharge(mapping->host, nr_none); } - list_for_each_entry_safe(page, tmp, &pagelist, lru) { - list_del(&page->lru); - unlock_page(page); - putback_lru_page(page); - put_page(page); + list_for_each_entry_safe(folio, tmp, &pagelist, lru) { + list_del(&folio->lru); + folio_unlock(folio); + folio_putback_lru(folio); + folio_put(folio); } /* * Undo the updates of filemap_nr_thps_inc for non-SHMEM -- Gitee From 23f4ea038d08569647d318cfa0435f66d7b529f4 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 3 Apr 2024 18:18:36 +0100 Subject: [PATCH 1683/2138] khugepaged: use a folio throughout hpage_collapse_scan_file() ANBZ: #9728 commit 43849758fdc976a6d6108ed6dfccdb136fdeec39 upstream Replace the use of pages with folios. Saves a few calls to compound_head() and removes some uses of obsolete functions. Link: https://lkml.kernel.org/r/20240403171838.1445826-8-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: David Hildenbrand Reviewed-by: Vishal Moola (Oracle) Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4128 --- include/trace/events/huge_memory.h | 6 +++--- mm/khugepaged.c | 33 +++++++++++++++--------------- 2 files changed, 19 insertions(+), 20 deletions(-) diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h index 37f2443b3cdb..9277524e84eb 100644 --- a/include/trace/events/huge_memory.h +++ b/include/trace/events/huge_memory.h @@ -174,10 +174,10 @@ TRACE_EVENT(mm_collapse_huge_page_swapin, TRACE_EVENT(mm_khugepaged_scan_file, - TP_PROTO(struct mm_struct *mm, struct page *page, struct file *file, + TP_PROTO(struct mm_struct *mm, struct folio *folio, struct file *file, int present, int swap, int result), - TP_ARGS(mm, page, file, present, swap, result), + TP_ARGS(mm, folio, file, present, swap, result), TP_STRUCT__entry( __field(struct mm_struct *, mm) @@ -190,7 +190,7 @@ TRACE_EVENT(mm_khugepaged_scan_file, TP_fast_assign( __entry->mm = mm; - __entry->pfn = page ? page_to_pfn(page) : -1; + __entry->pfn = folio ? folio_pfn(folio) : -1; __assign_str(filename, file->f_path.dentry->d_iname); __entry->present = present; __entry->swap = swap; diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 2a7d353dd80d..f969c9b8f86b 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -2215,7 +2215,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, struct file *file, pgoff_t start, struct collapse_control *cc) { - struct page *page = NULL; + struct folio *folio = NULL; struct address_space *mapping = file->f_mapping; XA_STATE(xas, &mapping->i_pages, start); int present, swap; @@ -2227,11 +2227,11 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, memset(cc->node_load, 0, sizeof(cc->node_load)); nodes_clear(cc->alloc_nmask); rcu_read_lock(); - xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) { - if (xas_retry(&xas, page)) + xas_for_each(&xas, folio, start + HPAGE_PMD_NR - 1) { + if (xas_retry(&xas, folio)) continue; - if (xa_is_value(page)) { + if (xa_is_value(folio)) { ++swap; if (cc->is_khugepaged && swap > khugepaged_max_ptes_swap) { @@ -2246,11 +2246,9 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, * TODO: khugepaged should compact smaller compound pages * into a PMD sized page */ - if (PageTransCompound(page)) { - struct page *head = compound_head(page); - - result = compound_order(head) == HPAGE_PMD_ORDER && - head->index == start + if (folio_test_large(folio)) { + result = folio_order(folio) == HPAGE_PMD_ORDER && + folio->index == start /* Maybe PMD-mapped */ ? SCAN_PTE_MAPPED_HUGEPAGE : SCAN_PAGE_COMPOUND; @@ -2263,28 +2261,29 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, break; } - node = page_to_nid(page); + node = folio_nid(folio); if (hpage_collapse_scan_abort(node, cc)) { result = SCAN_SCAN_ABORT; break; } cc->node_load[node]++; - if (!PageLRU(page)) { + if (!folio_test_lru(folio)) { result = SCAN_PAGE_LRU; break; } - if (page_count(page) != - 1 + page_mapcount(page) + page_has_private(page)) { + if (folio_ref_count(folio) != + 1 + folio_mapcount(folio) + folio_test_private(folio)) { result = SCAN_PAGE_COUNT; break; } /* - * We probably should check if the page is referenced here, but - * nobody would transfer pte_young() to PageReferenced() for us. - * And rmap walk here is just too costly... + * We probably should check if the folio is referenced + * here, but nobody would transfer pte_young() to + * folio_test_referenced() for us. And rmap walk here + * is just too costly... */ present++; @@ -2306,7 +2305,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, } } - trace_mm_khugepaged_scan_file(mm, page, file, present, swap, result); + trace_mm_khugepaged_scan_file(mm, folio, file, present, swap, result); return result; } #else -- Gitee From 05e624ae2caec121430adea85b051ae9951dc1b7 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Tue, 20 Aug 2024 17:49:13 +0800 Subject: [PATCH 1684/2138] mm: khugepaged: expand the is_refcount_suitable() to support file folios ANBZ: #9728 commit fda6d4de064a9d37414df36d45836898fff5e165 upstream Patch series "support shmem mTHP collapse", v2. Shmem already supports mTHP allocation[1], and this patchset adds support for shmem mTHP collapse, as well as adding relevant test cases. This patch (of 5): Expand the is_refcount_suitable() to support reference checks for file folios, as preparation for supporting shmem mTHP collapse. Link: https://lkml.kernel.org/r/cover.1724140601.git.baolin.wang@linux.alibaba.com Link: https://lkml.kernel.org/r/eae4cb3195ebbb654bfb7967cb7261d4e4e7c7fa.1724140601.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Acked-by: David Hildenbrand Cc: Barry Song <21cnbao@gmail.com> Cc: Hugh Dickins Cc: Matthew Wilcox Cc: Ryan Roberts Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4128 --- mm/khugepaged.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index f969c9b8f86b..97e8460077b1 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -542,12 +542,14 @@ static void release_pte_pages(pte_t *pte, pte_t *_pte, static bool is_refcount_suitable(struct folio *folio) { - int expected_refcount; + int expected_refcount = folio_mapcount(folio); - expected_refcount = folio_mapcount(folio); - if (folio_test_swapcache(folio)) + if (!folio_test_anon(folio) || folio_test_swapcache(folio)) expected_refcount += folio_nr_pages(folio); + if (folio_test_private(folio)) + expected_refcount++; + return folio_ref_count(folio) == expected_refcount; } @@ -2273,8 +2275,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, break; } - if (folio_ref_count(folio) != - 1 + folio_mapcount(folio) + folio_test_private(folio)) { + if (!is_refcount_suitable(folio)) { result = SCAN_PAGE_COUNT; break; } -- Gitee From 1a58e5cc9dea1ae959382fea091ea89e3d854ba7 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Tue, 20 Aug 2024 17:49:14 +0800 Subject: [PATCH 1685/2138] mm: khugepaged: use the number of pages in the folio to check the reference count ANBZ: #9728 commit d6b8f296e8d74d685627fd746558745c13b8bd32 upstream Use the number of pages in the folio to check the reference count as preparation for supporting shmem mTHP collapse. Link: https://lkml.kernel.org/r/9ea49262308de28957596cc6e8edc2d3a4f54659.1724140601.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Acked-by: David Hildenbrand Cc: Barry Song <21cnbao@gmail.com> Cc: Hugh Dickins Cc: Matthew Wilcox Cc: Ryan Roberts Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4128 --- mm/khugepaged.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 97e8460077b1..ff1a6d3c3cf7 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1976,9 +1976,9 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, VM_BUG_ON_FOLIO(folio != xa_load(xas.xa, index), folio); /* - * We control three references to the folio: + * We control 2 + nr_pages references to the folio: * - we hold a pin on it; - * - one reference from page cache; + * - nr_pages reference from page cache; * - one from lru_isolate_folio; * If those are the only references, then any new usage * of the folio will have to fetch it from the page @@ -1986,7 +1986,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, * truncate, so any new usage will be blocked until we * unlock folio after collapse/during rollback. */ - if (folio_ref_count(folio) != 3) { + if (folio_ref_count(folio) != 2 + folio_nr_pages(folio)) { result = SCAN_PAGE_COUNT; xas_unlock_irq(&xas); folio_putback_lru(folio); @@ -2169,7 +2169,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, folio_clear_active(folio); folio_clear_unevictable(folio); folio_unlock(folio); - folio_put_refs(folio, 3); + folio_put_refs(folio, 2 + folio_nr_pages(folio)); } goto out; -- Gitee From 603acf236369fad3dcc4d4412ef3822c0bf79829 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Tue, 20 Aug 2024 17:49:15 +0800 Subject: [PATCH 1686/2138] mm: khugepaged: support shmem mTHP copy ANBZ: #9728 commit dfa98f56d932fca3eaadaed8c17393fdfa00574d upstream Iterate each subpage in the large folio to copy, as preparation for supporting shmem mTHP collapse. Link: https://lkml.kernel.org/r/222d615b7c837eabb47a238126c5fdeff8aa5283.1724140601.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Cc: Barry Song <21cnbao@gmail.com> Cc: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox Cc: Ryan Roberts Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4128 --- mm/khugepaged.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index ff1a6d3c3cf7..4c8148587c96 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -2044,17 +2044,22 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, index = start; dst = folio_page(new_folio, 0); list_for_each_entry(folio, &pagelist, lru) { + int i, nr_pages = folio_nr_pages(folio); + while (index < folio->index) { clear_highpage(dst); index++; dst++; } - if (copy_mc_highpage(dst, folio_page(folio, 0)) > 0) { - result = SCAN_COPY_MC; - goto rollback; + + for (i = 0; i < nr_pages; i++) { + if (copy_mc_highpage(dst, folio_page(folio, i)) > 0) { + result = SCAN_COPY_MC; + goto rollback; + } + index++; + dst++; } - index++; - dst++; } while (index < end) { clear_highpage(dst); -- Gitee From 3e29e0fec9241ba8097c7c6e0f0aa1b81acfd40d Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Tue, 20 Aug 2024 17:49:16 +0800 Subject: [PATCH 1687/2138] mm: khugepaged: support shmem mTHP collapse ANBZ: #9728 commit 7de856ffd007f132fd4a0474b289a622a3f88cd7 upstream Shmem already supports the allocation of mTHP, but khugepaged does not yet support collapsing mTHP folios. Now khugepaged is ready to support mTHP, and this patch enables the collapse of shmem mTHP. Link: https://lkml.kernel.org/r/b9da76aab4276eb6e5d12c479af2b5eea5b4575d.1724140601.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Cc: Barry Song <21cnbao@gmail.com> Cc: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox Cc: Ryan Roberts Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4128 --- mm/khugepaged.c | 28 +++++++++++----------------- 1 file changed, 11 insertions(+), 17 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 4c8148587c96..255f92503310 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1831,7 +1831,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, } } while (1); - for (index = start; index < end; index++) { + for (index = start; index < end;) { xas_set(&xas, index); folio = xas_load(&xas); @@ -1850,6 +1850,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, } } nr_none++; + index++; continue; } @@ -1931,12 +1932,10 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, * we locked the first folio, then a THP might be there already. * This will be discovered on the first iteration. */ - if (folio_test_large(folio)) { - result = folio_order(folio) == HPAGE_PMD_ORDER && - folio->index == start - /* Maybe PMD-mapped */ - ? SCAN_PTE_MAPPED_HUGEPAGE - : SCAN_PAGE_COMPOUND; + if (folio_order(folio) == HPAGE_PMD_ORDER && + folio->index == start) { + /* Maybe PMD-mapped */ + result = SCAN_PTE_MAPPED_HUGEPAGE; goto out_unlock; } @@ -1997,6 +1996,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, * Accumulate the folios that are being collapsed. */ list_add_tail(&folio->lru, &pagelist); + index += folio_nr_pages(folio); continue; out_unlock: folio_unlock(folio); @@ -2249,16 +2249,10 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, continue; } - /* - * TODO: khugepaged should compact smaller compound pages - * into a PMD sized page - */ - if (folio_test_large(folio)) { - result = folio_order(folio) == HPAGE_PMD_ORDER && - folio->index == start - /* Maybe PMD-mapped */ - ? SCAN_PTE_MAPPED_HUGEPAGE - : SCAN_PAGE_COMPOUND; + if (folio_order(folio) == HPAGE_PMD_ORDER && + folio->index == start) { + /* Maybe PMD-mapped */ + result = SCAN_PTE_MAPPED_HUGEPAGE; /* * For SCAN_PTE_MAPPED_HUGEPAGE, further processing * by the caller won't touch the page cache, and so -- Gitee From 8d6be7d8dae43f8247e1830ecb87a7552f682e7f Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Tue, 20 Aug 2024 17:49:17 +0800 Subject: [PATCH 1688/2138] selftests: mm: support shmem mTHP collapse testing ANBZ: #9728 commit 2e6d88e9d455fae9c4ac95c893362258f9540dfa upstream Add shmem mTHP collpase testing. Similar to the anonymous page, users can use the '-s' parameter to specify the shmem mTHP size for testing. Link: https://lkml.kernel.org/r/fa44bfa20ca5b9fd6f9163a048f3d3c1e53cd0a8.1724140601.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Cc: Barry Song <21cnbao@gmail.com> Cc: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox Cc: Ryan Roberts Cc: Yang Shi Cc: Zi Yan Signed-off-by: Andrew Morton Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4128 --- tools/testing/selftests/mm/khugepaged.c | 4 +- tools/testing/selftests/mm/thp_settings.c | 46 ++++++++++++++++++++--- tools/testing/selftests/mm/thp_settings.h | 9 ++++- 3 files changed, 51 insertions(+), 8 deletions(-) diff --git a/tools/testing/selftests/mm/khugepaged.c b/tools/testing/selftests/mm/khugepaged.c index 829320a519e7..56d4480e8d3c 100644 --- a/tools/testing/selftests/mm/khugepaged.c +++ b/tools/testing/selftests/mm/khugepaged.c @@ -1095,7 +1095,7 @@ static void usage(void) fprintf(stderr, "\n\tSupported Options:\n"); fprintf(stderr, "\t\t-h: This help message.\n"); fprintf(stderr, "\t\t-s: mTHP size, expressed as page order.\n"); - fprintf(stderr, "\t\t Defaults to 0. Use this size for anon allocations.\n"); + fprintf(stderr, "\t\t Defaults to 0. Use this size for anon or shmem allocations.\n"); exit(1); } @@ -1209,6 +1209,8 @@ int main(int argc, char **argv) default_settings.khugepaged.pages_to_scan = hpage_pmd_nr * 8; default_settings.hugepages[hpage_pmd_order].enabled = THP_INHERIT; default_settings.hugepages[anon_order].enabled = THP_ALWAYS; + default_settings.shmem_hugepages[hpage_pmd_order].enabled = SHMEM_INHERIT; + default_settings.shmem_hugepages[anon_order].enabled = SHMEM_ALWAYS; save_settings(); thp_push_settings(&default_settings); diff --git a/tools/testing/selftests/mm/thp_settings.c b/tools/testing/selftests/mm/thp_settings.c index a4163438108e..577eaab6266f 100644 --- a/tools/testing/selftests/mm/thp_settings.c +++ b/tools/testing/selftests/mm/thp_settings.c @@ -33,10 +33,11 @@ static const char * const thp_defrag_strings[] = { }; static const char * const shmem_enabled_strings[] = { + "never", "always", "within_size", "advise", - "never", + "inherit", "deny", "force", NULL @@ -200,6 +201,7 @@ void thp_write_num(const char *name, unsigned long num) void thp_read_settings(struct thp_settings *settings) { unsigned long orders = thp_supported_orders(); + unsigned long shmem_orders = thp_shmem_supported_orders(); char path[PATH_MAX]; int i; @@ -234,12 +236,24 @@ void thp_read_settings(struct thp_settings *settings) settings->hugepages[i].enabled = thp_read_string(path, thp_enabled_strings); } + + for (i = 0; i < NR_ORDERS; i++) { + if (!((1 << i) & shmem_orders)) { + settings->shmem_hugepages[i].enabled = SHMEM_NEVER; + continue; + } + snprintf(path, PATH_MAX, "hugepages-%ukB/shmem_enabled", + (getpagesize() >> 10) << i); + settings->shmem_hugepages[i].enabled = + thp_read_string(path, shmem_enabled_strings); + } } void thp_write_settings(struct thp_settings *settings) { struct khugepaged_settings *khugepaged = &settings->khugepaged; unsigned long orders = thp_supported_orders(); + unsigned long shmem_orders = thp_shmem_supported_orders(); char path[PATH_MAX]; int enabled; int i; @@ -271,6 +285,15 @@ void thp_write_settings(struct thp_settings *settings) enabled = settings->hugepages[i].enabled; thp_write_string(path, thp_enabled_strings[enabled]); } + + for (i = 0; i < NR_ORDERS; i++) { + if (!((1 << i) & shmem_orders)) + continue; + snprintf(path, PATH_MAX, "hugepages-%ukB/shmem_enabled", + (getpagesize() >> 10) << i); + enabled = settings->shmem_hugepages[i].enabled; + thp_write_string(path, shmem_enabled_strings[enabled]); + } } struct thp_settings *thp_current_settings(void) @@ -324,17 +347,18 @@ void thp_set_read_ahead_path(char *path) dev_queue_read_ahead_path[sizeof(dev_queue_read_ahead_path) - 1] = '\0'; } -unsigned long thp_supported_orders(void) +static unsigned long __thp_supported_orders(bool is_shmem) { unsigned long orders = 0; char path[PATH_MAX]; char buf[256]; - int ret; - int i; + int ret, i; + char anon_dir[] = "enabled"; + char shmem_dir[] = "shmem_enabled"; for (i = 0; i < NR_ORDERS; i++) { - ret = snprintf(path, PATH_MAX, THP_SYSFS "hugepages-%ukB/enabled", - (getpagesize() >> 10) << i); + ret = snprintf(path, PATH_MAX, THP_SYSFS "hugepages-%ukB/%s", + (getpagesize() >> 10) << i, is_shmem ? shmem_dir : anon_dir); if (ret >= PATH_MAX) { printf("%s: Pathname is too long\n", __func__); exit(EXIT_FAILURE); @@ -347,3 +371,13 @@ unsigned long thp_supported_orders(void) return orders; } + +unsigned long thp_supported_orders(void) +{ + return __thp_supported_orders(false); +} + +unsigned long thp_shmem_supported_orders(void) +{ + return __thp_supported_orders(true); +} diff --git a/tools/testing/selftests/mm/thp_settings.h b/tools/testing/selftests/mm/thp_settings.h index 71cbff05f4c7..876235a23460 100644 --- a/tools/testing/selftests/mm/thp_settings.h +++ b/tools/testing/selftests/mm/thp_settings.h @@ -22,10 +22,11 @@ enum thp_defrag { }; enum shmem_enabled { + SHMEM_NEVER, SHMEM_ALWAYS, SHMEM_WITHIN_SIZE, SHMEM_ADVISE, - SHMEM_NEVER, + SHMEM_INHERIT, SHMEM_DENY, SHMEM_FORCE, }; @@ -46,6 +47,10 @@ struct khugepaged_settings { unsigned long pages_to_scan; }; +struct shmem_hugepages_settings { + enum shmem_enabled enabled; +}; + struct thp_settings { enum thp_enabled thp_enabled; enum thp_defrag thp_defrag; @@ -54,6 +59,7 @@ struct thp_settings { struct khugepaged_settings khugepaged; unsigned long read_ahead_kb; struct hugepages_settings hugepages[NR_ORDERS]; + struct shmem_hugepages_settings shmem_hugepages[NR_ORDERS]; }; int read_file(const char *path, char *buf, size_t buflen); @@ -76,5 +82,6 @@ void thp_save_settings(void); void thp_set_read_ahead_path(char *path); unsigned long thp_supported_orders(void); +unsigned long thp_shmem_supported_orders(void); #endif /* __THP_SETTINGS_H__ */ -- Gitee From bc570460587b01fb63f196861364b4c89a7d8a99 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 14 Oct 2024 18:24:44 +0800 Subject: [PATCH 1689/2138] mm: khugepaged: fix the incorrect statistics when collapsing large file folios ANBZ: #9728 commit d60fcaf00d752c52fdf566d4184e6d04d9d08879 upstream Khugepaged already supports collapsing file large folios (including shmem mTHP) by commit 7de856ffd007 ("mm: khugepaged: support shmem mTHP collapse"), and the control parameters in khugepaged: 'khugepaged_max_ptes_swap' and 'khugepaged_max_ptes_none', still compare based on PTE granularity to determine whether a file collapse is needed. However, the statistics for 'present' and 'swap' in hpage_collapse_scan_file() do not take into account the large folios, which may lead to incorrect judgments regarding the khugepaged_max_ptes_swap/none parameters, resulting in unnecessary file collapses. To fix this issue, take into account the large folios' statistics for 'present' and 'swap' variables in the hpage_collapse_scan_file(). Link: https://lkml.kernel.org/r/c76305d96d12d030a1a346b50503d148364246d2.1728901391.git.baolin.wang@linux.alibaba.com Fixes: 7de856ffd007 ("mm: khugepaged: support shmem mTHP collapse") Signed-off-by: Baolin Wang Acked-by: David Hildenbrand Reviewed-by: Barry Song Reviewed-by: Zi Yan Reviewed-by: Yang Shi Cc: Hugh Dickins Cc: Matthew Wilcox Cc: Ryan Roberts Signed-off-by: Andrew Morton Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4128 --- mm/khugepaged.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 255f92503310..5ef0182d1c88 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -2239,7 +2239,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, continue; if (xa_is_value(folio)) { - ++swap; + swap += 1 << xas_get_order(&xas); if (cc->is_khugepaged && swap > khugepaged_max_ptes_swap) { result = SCAN_EXCEED_SWAP_PTE; @@ -2286,7 +2286,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr, * is just too costly... */ - present++; + present += folio_nr_pages(folio); if (need_resched()) { xas_pause(&xas); -- Gitee From 8be67ed133667d489ba027a83088278958fa84c4 Mon Sep 17 00:00:00 2001 From: Jia He Date: Thu, 14 Nov 2024 05:34:28 +0000 Subject: [PATCH 1690/2138] anolis: Revert "anolis: configs: refresh kconfigs" ANBZ: #11595 This reverts commit 1b7e94c4231c44f82686f9381b81c58d36bc597f. This conflicts the reorganization of arm64 LIVEPATCH support, hence revert it firstly and refresh kconfigs after reorganization. Signed-off-by: Jia He Acked-by: Wardenjohn Reviewed-by: Qiao Ma Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4069 --- .../configs/L0-MANDATORY/{default => arm64}/CONFIG_PCI_PF_STUB | 0 anolis/configs/L0-MANDATORY/default/CONFIG_CPU_MITIGATIONS | 1 - anolis/configs/L0-MANDATORY/{default => x86}/CONFIG_LIVEPATCH | 0 anolis/configs/L0-MANDATORY/x86/CONFIG_PCI_PF_STUB | 1 + anolis/configs/L0-MANDATORY/x86/CONFIG_SPECULATION_MITIGATIONS | 1 + anolis/configs/L0-MANDATORY/{default => x86}/CONFIG_UNWINDER_ORC | 0 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_CONTPTE | 1 - anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_3194386 | 1 - .../configs/L1-RECOMMEND/{default => arm64}/CONFIG_GENERIC_PHY | 0 .../L1-RECOMMEND/arm64/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET | 1 + .../L1-RECOMMEND/{default => arm64}/CONFIG_HAVE_LIVEPATCH | 0 .../{default => arm64}/CONFIG_HAVE_RELIABLE_STACKTRACE | 0 .../L1-RECOMMEND/{default => arm64}/CONFIG_HAVE_STACK_VALIDATION | 0 anolis/configs/L1-RECOMMEND/arm64/CONFIG_LIVEPATCH | 1 + anolis/configs/L1-RECOMMEND/{default => arm64}/CONFIG_OBJTOOL | 0 anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET | 1 + .../{x86 => arm64}/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT | 0 .../arm64/CONFIG_UNWINDER_FRAME_POINTER | 0 anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_ORC | 1 + anolis/configs/L1-RECOMMEND/default/CONFIG_AHCI_ZHAOXIN_SGPIO | 1 - anolis/configs/L1-RECOMMEND/default/CONFIG_PCP_BATCH_SCALE_MAX | 1 - .../L1-RECOMMEND/{x86 => default}/CONFIG_RANDOMIZE_KSTACK_OFFSET | 0 .../L1-RECOMMEND/default/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT | 1 + anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_PHY | 1 + anolis/configs/L1-RECOMMEND/x86/CONFIG_MITIGATION_SPECTRE_BHI | 1 - .../configs/L1-RECOMMEND/{default => x86}/CONFIG_TEST_LIVEPATCH | 0 .../{default => arm64}/CONFIG_DRM_PANEL_ILITEK_ILI9341 | 0 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEST_LIVEPATCH | 1 + .../configs/L2-OPTIONAL/default/CONFIG_BCACHE_ASYNC_REGISTRATION | 1 - anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_CLOSURES_DEBUG | 1 - anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_DEBUG | 1 - anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_SIG | 1 - anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IOMEM_FOPS | 1 - .../default}/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET | 0 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X3_DMA | 1 - anolis/configs/L2-OPTIONAL/default/CONFIG_SCREEN_INFO | 1 - anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_DEBUG | 1 - anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_TASKLET | 1 - anolis/configs/L2-OPTIONAL/x86/CONFIG_AD9467 | 1 - anolis/configs/L2-OPTIONAL/x86/CONFIG_ADI_AXI_ADC | 1 - .../L2-OPTIONAL/x86/CONFIG_ARCH_CONFIGURES_CPU_MITIGATIONS | 1 - anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_LIVEPATCH | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RELIABLE_STACKTRACE | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STACK_VALIDATION | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_OBJTOOL | 1 + 45 files changed, 13 insertions(+), 18 deletions(-) rename anolis/configs/L0-MANDATORY/{default => arm64}/CONFIG_PCI_PF_STUB (100%) delete mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CPU_MITIGATIONS rename anolis/configs/L0-MANDATORY/{default => x86}/CONFIG_LIVEPATCH (100%) create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_PCI_PF_STUB create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_SPECULATION_MITIGATIONS rename anolis/configs/L0-MANDATORY/{default => x86}/CONFIG_UNWINDER_ORC (100%) delete mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_CONTPTE delete mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_3194386 rename anolis/configs/L1-RECOMMEND/{default => arm64}/CONFIG_GENERIC_PHY (100%) create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET rename anolis/configs/L1-RECOMMEND/{default => arm64}/CONFIG_HAVE_LIVEPATCH (100%) rename anolis/configs/L1-RECOMMEND/{default => arm64}/CONFIG_HAVE_RELIABLE_STACKTRACE (100%) rename anolis/configs/L1-RECOMMEND/{default => arm64}/CONFIG_HAVE_STACK_VALIDATION (100%) create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_LIVEPATCH rename anolis/configs/L1-RECOMMEND/{default => arm64}/CONFIG_OBJTOOL (100%) create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET rename anolis/configs/L1-RECOMMEND/{x86 => arm64}/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT (100%) rename anolis/configs/{L0-MANDATORY => L1-RECOMMEND}/arm64/CONFIG_UNWINDER_FRAME_POINTER (100%) create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_ORC delete mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_AHCI_ZHAOXIN_SGPIO delete mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PCP_BATCH_SCALE_MAX rename anolis/configs/L1-RECOMMEND/{x86 => default}/CONFIG_RANDOMIZE_KSTACK_OFFSET (100%) create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_PHY delete mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_MITIGATION_SPECTRE_BHI rename anolis/configs/L1-RECOMMEND/{default => x86}/CONFIG_TEST_LIVEPATCH (100%) rename anolis/configs/L2-OPTIONAL/{default => arm64}/CONFIG_DRM_PANEL_ILITEK_ILI9341 (100%) create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEST_LIVEPATCH delete mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_ASYNC_REGISTRATION delete mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_CLOSURES_DEBUG delete mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_DEBUG delete mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_SIG delete mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IOMEM_FOPS rename anolis/configs/{L1-RECOMMEND/x86 => L2-OPTIONAL/default}/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET (100%) delete mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X3_DMA delete mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCREEN_INFO delete mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_DEBUG delete mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_TASKLET delete mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD9467 delete mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADI_AXI_ADC delete mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_CONFIGURES_CPU_MITIGATIONS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_LIVEPATCH create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RELIABLE_STACKTRACE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STACK_VALIDATION create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_OBJTOOL diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_PCI_PF_STUB b/anolis/configs/L0-MANDATORY/arm64/CONFIG_PCI_PF_STUB similarity index 100% rename from anolis/configs/L0-MANDATORY/default/CONFIG_PCI_PF_STUB rename to anolis/configs/L0-MANDATORY/arm64/CONFIG_PCI_PF_STUB diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CPU_MITIGATIONS b/anolis/configs/L0-MANDATORY/default/CONFIG_CPU_MITIGATIONS deleted file mode 100644 index 3d6f96778a81..000000000000 --- a/anolis/configs/L0-MANDATORY/default/CONFIG_CPU_MITIGATIONS +++ /dev/null @@ -1 +0,0 @@ -CONFIG_CPU_MITIGATIONS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_LIVEPATCH b/anolis/configs/L0-MANDATORY/x86/CONFIG_LIVEPATCH similarity index 100% rename from anolis/configs/L0-MANDATORY/default/CONFIG_LIVEPATCH rename to anolis/configs/L0-MANDATORY/x86/CONFIG_LIVEPATCH diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_PCI_PF_STUB b/anolis/configs/L0-MANDATORY/x86/CONFIG_PCI_PF_STUB new file mode 100644 index 000000000000..46eee76194b0 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_PCI_PF_STUB @@ -0,0 +1 @@ +CONFIG_PCI_PF_STUB=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_SPECULATION_MITIGATIONS b/anolis/configs/L0-MANDATORY/x86/CONFIG_SPECULATION_MITIGATIONS new file mode 100644 index 000000000000..37f78a6f2368 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_SPECULATION_MITIGATIONS @@ -0,0 +1 @@ +CONFIG_SPECULATION_MITIGATIONS=y diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_UNWINDER_ORC b/anolis/configs/L0-MANDATORY/x86/CONFIG_UNWINDER_ORC similarity index 100% rename from anolis/configs/L0-MANDATORY/default/CONFIG_UNWINDER_ORC rename to anolis/configs/L0-MANDATORY/x86/CONFIG_UNWINDER_ORC diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_CONTPTE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_CONTPTE deleted file mode 100644 index 23a09e20f027..000000000000 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_CONTPTE +++ /dev/null @@ -1 +0,0 @@ -CONFIG_ARM64_CONTPTE=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_3194386 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_3194386 deleted file mode 100644 index f6f6f286638d..000000000000 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_3194386 +++ /dev/null @@ -1 +0,0 @@ -CONFIG_ARM64_ERRATUM_3194386=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_PHY b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GENERIC_PHY similarity index 100% rename from anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_PHY rename to anolis/configs/L1-RECOMMEND/arm64/CONFIG_GENERIC_PHY diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET new file mode 100644 index 000000000000..c7daa4f60d5d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET @@ -0,0 +1 @@ +# CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HAVE_LIVEPATCH b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_LIVEPATCH similarity index 100% rename from anolis/configs/L1-RECOMMEND/default/CONFIG_HAVE_LIVEPATCH rename to anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_LIVEPATCH diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HAVE_RELIABLE_STACKTRACE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_RELIABLE_STACKTRACE similarity index 100% rename from anolis/configs/L1-RECOMMEND/default/CONFIG_HAVE_RELIABLE_STACKTRACE rename to anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_RELIABLE_STACKTRACE diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_HAVE_STACK_VALIDATION b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_STACK_VALIDATION similarity index 100% rename from anolis/configs/L1-RECOMMEND/default/CONFIG_HAVE_STACK_VALIDATION rename to anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_STACK_VALIDATION diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_LIVEPATCH b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_LIVEPATCH new file mode 100644 index 000000000000..1b05d0d1a109 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_LIVEPATCH @@ -0,0 +1 @@ +CONFIG_LIVEPATCH=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_OBJTOOL b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_OBJTOOL similarity index 100% rename from anolis/configs/L1-RECOMMEND/default/CONFIG_OBJTOOL rename to anolis/configs/L1-RECOMMEND/arm64/CONFIG_OBJTOOL diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET new file mode 100644 index 000000000000..759cb13e424c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET @@ -0,0 +1 @@ +# CONFIG_RANDOMIZE_KSTACK_OFFSET is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT similarity index 100% rename from anolis/configs/L1-RECOMMEND/x86/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT rename to anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_UNWINDER_FRAME_POINTER b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_FRAME_POINTER similarity index 100% rename from anolis/configs/L0-MANDATORY/arm64/CONFIG_UNWINDER_FRAME_POINTER rename to anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_FRAME_POINTER diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_ORC b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_ORC new file mode 100644 index 000000000000..6b6908419acb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_ORC @@ -0,0 +1 @@ +CONFIG_UNWINDER_ORC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_AHCI_ZHAOXIN_SGPIO b/anolis/configs/L1-RECOMMEND/default/CONFIG_AHCI_ZHAOXIN_SGPIO deleted file mode 100644 index 6a7ffe559b94..000000000000 --- a/anolis/configs/L1-RECOMMEND/default/CONFIG_AHCI_ZHAOXIN_SGPIO +++ /dev/null @@ -1 +0,0 @@ -CONFIG_AHCI_ZHAOXIN_SGPIO=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PCP_BATCH_SCALE_MAX b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCP_BATCH_SCALE_MAX deleted file mode 100644 index 8c42e3567daa..000000000000 --- a/anolis/configs/L1-RECOMMEND/default/CONFIG_PCP_BATCH_SCALE_MAX +++ /dev/null @@ -1 +0,0 @@ -CONFIG_PCP_BATCH_SCALE_MAX=5 diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_RANDOMIZE_KSTACK_OFFSET b/anolis/configs/L1-RECOMMEND/default/CONFIG_RANDOMIZE_KSTACK_OFFSET similarity index 100% rename from anolis/configs/L1-RECOMMEND/x86/CONFIG_RANDOMIZE_KSTACK_OFFSET rename to anolis/configs/L1-RECOMMEND/default/CONFIG_RANDOMIZE_KSTACK_OFFSET diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT b/anolis/configs/L1-RECOMMEND/default/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT new file mode 100644 index 000000000000..d680659c1703 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT @@ -0,0 +1 @@ +# CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_PHY b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_PHY new file mode 100644 index 000000000000..582e87c3b9f5 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_PHY @@ -0,0 +1 @@ +# CONFIG_GENERIC_PHY is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MITIGATION_SPECTRE_BHI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MITIGATION_SPECTRE_BHI deleted file mode 100644 index 71b428227384..000000000000 --- a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MITIGATION_SPECTRE_BHI +++ /dev/null @@ -1 +0,0 @@ -CONFIG_MITIGATION_SPECTRE_BHI=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_TEST_LIVEPATCH b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TEST_LIVEPATCH similarity index 100% rename from anolis/configs/L1-RECOMMEND/default/CONFIG_TEST_LIVEPATCH rename to anolis/configs/L1-RECOMMEND/x86/CONFIG_TEST_LIVEPATCH diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_ILITEK_ILI9341 b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_ILITEK_ILI9341 similarity index 100% rename from anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_ILITEK_ILI9341 rename to anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_ILITEK_ILI9341 diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEST_LIVEPATCH b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEST_LIVEPATCH new file mode 100644 index 000000000000..0dd7700464a8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEST_LIVEPATCH @@ -0,0 +1 @@ +CONFIG_TEST_LIVEPATCH=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_ASYNC_REGISTRATION b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_ASYNC_REGISTRATION deleted file mode 100644 index d966c9744d3d..000000000000 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_ASYNC_REGISTRATION +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_BCACHE_ASYNC_REGISTRATION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_CLOSURES_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_CLOSURES_DEBUG deleted file mode 100644 index eb3f1af90e6c..000000000000 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_CLOSURES_DEBUG +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_BCACHE_CLOSURES_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_DEBUG deleted file mode 100644 index 36426027b2d1..000000000000 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_DEBUG +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_BCACHE_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_SIG b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_SIG deleted file mode 100644 index 01ee2034fbdc..000000000000 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_SIG +++ /dev/null @@ -1 +0,0 @@ -CONFIG_CRYPTO_SIG=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IOMEM_FOPS b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IOMEM_FOPS deleted file mode 100644 index 485cf9b71de5..000000000000 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IOMEM_FOPS +++ /dev/null @@ -1 +0,0 @@ -CONFIG_FB_IOMEM_FOPS=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET b/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET similarity index 100% rename from anolis/configs/L1-RECOMMEND/x86/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET rename to anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X3_DMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X3_DMA deleted file mode 100644 index 723cb8bb73ac..000000000000 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X3_DMA +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_PATA_HPT3X3_DMA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCREEN_INFO b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCREEN_INFO deleted file mode 100644 index e5a12d9c60db..000000000000 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCREEN_INFO +++ /dev/null @@ -1 +0,0 @@ -CONFIG_SCREEN_INFO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_DEBUG deleted file mode 100644 index aa295ebbb545..000000000000 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_DEBUG +++ /dev/null @@ -1 +0,0 @@ -CONFIG_SCSI_MVSAS_DEBUG=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_TASKLET b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_TASKLET deleted file mode 100644 index 028f7d8e3d25..000000000000 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_TASKLET +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_SCSI_MVSAS_TASKLET is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD9467 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD9467 deleted file mode 100644 index 421ac1f25eec..000000000000 --- a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD9467 +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_AD9467 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADI_AXI_ADC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADI_AXI_ADC deleted file mode 100644 index e98b407ac85f..000000000000 --- a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADI_AXI_ADC +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_ADI_AXI_ADC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_CONFIGURES_CPU_MITIGATIONS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_CONFIGURES_CPU_MITIGATIONS deleted file mode 100644 index a7a95432397c..000000000000 --- a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_CONFIGURES_CPU_MITIGATIONS +++ /dev/null @@ -1 +0,0 @@ -CONFIG_ARCH_CONFIGURES_CPU_MITIGATIONS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_LIVEPATCH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_LIVEPATCH new file mode 100644 index 000000000000..7ebdb924703e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_LIVEPATCH @@ -0,0 +1 @@ +CONFIG_HAVE_LIVEPATCH=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RELIABLE_STACKTRACE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RELIABLE_STACKTRACE new file mode 100644 index 000000000000..2ce8faabc4cf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RELIABLE_STACKTRACE @@ -0,0 +1 @@ +CONFIG_HAVE_RELIABLE_STACKTRACE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STACK_VALIDATION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STACK_VALIDATION new file mode 100644 index 000000000000..6f36a32d84ae --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STACK_VALIDATION @@ -0,0 +1 @@ +CONFIG_HAVE_STACK_VALIDATION=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_OBJTOOL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_OBJTOOL new file mode 100644 index 000000000000..cf3a9f20f93d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_OBJTOOL @@ -0,0 +1 @@ +CONFIG_OBJTOOL=y -- Gitee From 5c99eb7ac653276a278d28f5fb7cf7db27d15c77 Mon Sep 17 00:00:00 2001 From: Jia He Date: Mon, 4 Nov 2024 06:00:56 +0000 Subject: [PATCH 1691/2138] Revert "anolis: configs: Enable CONFIG_LIVEPATCH for arm64" ANBZ: #11595 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ This reverts commit ce8c54e48ecebb6979504596749f410fefcc2a3b. To support livepatch features on other architectures, it's preferable to keep the Anolis livepatch code layout closely aligned with the upstream kernel. Therefore, we should revert the commits that reorganized the code specifically for arm64 livepatch support. Hence temporarily disable the livepatch support on arm64 when reverting some reorganization commits to avoid conflicts or compilation errors. Signed-off-by: Jia He Acked-by: Wardenjohn Reviewed-by: Qiao Ma Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4069 --- .../configs/L1-RECOMMEND/arm64/CONFIG_FRAME_POINTER_VALIDATION | 1 - .../L1-RECOMMEND/arm64/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET | 1 - anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_LIVEPATCH | 1 - .../configs/L1-RECOMMEND/arm64/CONFIG_HAVE_RELIABLE_STACKTRACE | 1 - anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_STACK_VALIDATION | 1 - anolis/configs/L1-RECOMMEND/arm64/CONFIG_LIVEPATCH | 1 - anolis/configs/L1-RECOMMEND/arm64/CONFIG_OBJTOOL | 1 - anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET | 1 - .../L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT | 1 - anolis/configs/L1-RECOMMEND/arm64/CONFIG_STACK_VALIDATION | 1 - anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_FRAME_POINTER | 1 - anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_ORC | 1 - anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEST_LIVEPATCH | 1 - 13 files changed, 13 deletions(-) delete mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_FRAME_POINTER_VALIDATION delete mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET delete mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_LIVEPATCH delete mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_RELIABLE_STACKTRACE delete mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_STACK_VALIDATION delete mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_LIVEPATCH delete mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_OBJTOOL delete mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET delete mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT delete mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_STACK_VALIDATION delete mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_FRAME_POINTER delete mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_ORC delete mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEST_LIVEPATCH diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FRAME_POINTER_VALIDATION b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FRAME_POINTER_VALIDATION deleted file mode 100644 index cc041e559182..000000000000 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FRAME_POINTER_VALIDATION +++ /dev/null @@ -1 +0,0 @@ -CONFIG_FRAME_POINTER_VALIDATION=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET deleted file mode 100644 index c7daa4f60d5d..000000000000 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_LIVEPATCH b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_LIVEPATCH deleted file mode 100644 index 7ebdb924703e..000000000000 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_LIVEPATCH +++ /dev/null @@ -1 +0,0 @@ -CONFIG_HAVE_LIVEPATCH=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_RELIABLE_STACKTRACE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_RELIABLE_STACKTRACE deleted file mode 100644 index 2ce8faabc4cf..000000000000 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_RELIABLE_STACKTRACE +++ /dev/null @@ -1 +0,0 @@ -CONFIG_HAVE_RELIABLE_STACKTRACE=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_STACK_VALIDATION b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_STACK_VALIDATION deleted file mode 100644 index 6f36a32d84ae..000000000000 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_STACK_VALIDATION +++ /dev/null @@ -1 +0,0 @@ -CONFIG_HAVE_STACK_VALIDATION=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_LIVEPATCH b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_LIVEPATCH deleted file mode 100644 index 1b05d0d1a109..000000000000 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_LIVEPATCH +++ /dev/null @@ -1 +0,0 @@ -CONFIG_LIVEPATCH=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_OBJTOOL b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_OBJTOOL deleted file mode 100644 index cf3a9f20f93d..000000000000 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_OBJTOOL +++ /dev/null @@ -1 +0,0 @@ -CONFIG_OBJTOOL=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET deleted file mode 100644 index 759cb13e424c..000000000000 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_RANDOMIZE_KSTACK_OFFSET is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT deleted file mode 100644 index d680659c1703..000000000000 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STACK_VALIDATION b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STACK_VALIDATION deleted file mode 100644 index e335fefdd9be..000000000000 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STACK_VALIDATION +++ /dev/null @@ -1 +0,0 @@ -CONFIG_STACK_VALIDATION=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_FRAME_POINTER b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_FRAME_POINTER deleted file mode 100644 index 0938fde11ffe..000000000000 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_FRAME_POINTER +++ /dev/null @@ -1 +0,0 @@ -CONFIG_UNWINDER_FRAME_POINTER=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_ORC b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_ORC deleted file mode 100644 index 6b6908419acb..000000000000 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_ORC +++ /dev/null @@ -1 +0,0 @@ -CONFIG_UNWINDER_ORC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEST_LIVEPATCH b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEST_LIVEPATCH deleted file mode 100644 index 0dd7700464a8..000000000000 --- a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEST_LIVEPATCH +++ /dev/null @@ -1 +0,0 @@ -CONFIG_TEST_LIVEPATCH=m -- Gitee From 2486c02fcac9bd278a65d78b1b364c806fc534dc Mon Sep 17 00:00:00 2001 From: Jia He Date: Mon, 4 Nov 2024 06:01:09 +0000 Subject: [PATCH 1692/2138] Revert "arm64: Enable livepatch for ARM64" ANBZ: #11595 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ This reverts commit 01cc06c19187d6d4df515842add3156e68013cc0. To support livepatch features on other architectures, it's preferable to keep the Anolis livepatch code layout closely aligned with the upstream kernel. Therefore, we should revert the commits that reorganized the code specifically for arm64 livepatch support. Hence temporarily disable the livepatch support on arm64 when reverting some reorganization commits to avoid conflicts or compilation errors. Signed-off-by: Jia He Acked-by: Wardenjohn Reviewed-by: Qiao Ma Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4069 --- arch/arm64/Kconfig | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index c0db32c45f57..ea02335e54dd 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -174,7 +174,7 @@ config ARM64 select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT select HAVE_ARCH_PREL32_RELOCATIONS - select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET if !HAVE_LIVEPATCH + select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_STACKLEAK select HAVE_ARCH_THREAD_STRUCT_WHITELIST @@ -259,8 +259,6 @@ config ARM64 select HAVE_SOFTIRQ_ON_OWN_STACK select HAVE_STACK_VALIDATION if FRAME_POINTER_VALIDATION select STACK_VALIDATION if HAVE_STACK_VALIDATION - select HAVE_RELIABLE_STACKTRACE if STACK_VALIDATION - select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_ARGS && HAVE_RELIABLE_STACKTRACE help ARM 64-bit (AArch64) Linux support. @@ -2408,4 +2406,3 @@ source "drivers/acpi/Kconfig" source "arch/arm64/kvm/Kconfig" -source "kernel/livepatch/Kconfig" -- Gitee From 0b61dd9d3a6ed238d9922677f4ba53c577874add Mon Sep 17 00:00:00 2001 From: Jia He Date: Tue, 29 Oct 2024 11:03:00 +0000 Subject: [PATCH 1693/2138] Revert "objtool: Reorganize ORC kernel code" ANBZ: #11595 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ This reverts commit e46e240a9dafb543e4ff824cfd86632af6f9f799. To support livepatch features on other architectures, it's preferable to keep the Anolis livepatch code layout closely aligned with the upstream kernel. Therefore, we should revert the commits that reorganized the code specifically for arm64 livepatch support. Signed-off-by: Jia He Acked-by: Wardenjohn Reviewed-by: Qiao Ma Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4069 --- .../arm64/include/asm}/orc_lookup.h | 0 arch/arm64/kernel/Makefile | 1 + arch/arm64/kernel/module.c | 2 +- {kernel => arch/arm64/kernel}/orc_lookup.c | 2 +- arch/arm64/kernel/setup.c | 2 +- arch/arm64/kernel/stacktrace.c | 2 +- arch/arm64/kernel/vmlinux.lds.S | 2 +- arch/x86/include/asm/orc_lookup.h | 34 +++ arch/x86/include/asm/unwind.h | 5 + arch/x86/kernel/module.c | 7 +- arch/x86/kernel/unwind_orc.c | 257 +++++++++++++++++- arch/x86/kernel/vmlinux.lds.S | 2 +- kernel/Makefile | 2 - tools/objtool/arch/x86/orc.c | 1 - 14 files changed, 299 insertions(+), 20 deletions(-) rename {include/asm-generic => arch/arm64/include/asm}/orc_lookup.h (100%) rename {kernel => arch/arm64/kernel}/orc_lookup.c (99%) create mode 100644 arch/x86/include/asm/orc_lookup.h diff --git a/include/asm-generic/orc_lookup.h b/arch/arm64/include/asm/orc_lookup.h similarity index 100% rename from include/asm-generic/orc_lookup.h rename to arch/arm64/include/asm/orc_lookup.h diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 7c67e2f29206..f9439f96f0ec 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -76,6 +76,7 @@ obj-$(CONFIG_ARM64_MTE) += mte.o obj-y += vdso-wrap.o obj-$(CONFIG_COMPAT_VDSO) += vdso32-wrap.o obj-$(CONFIG_UNWIND_PATCH_PAC_INTO_SCS) += patch-scs.o +obj-$(CONFIG_UNWINDER_ORC) += orc_lookup.o CFLAGS_patch-scs.o += -mbranch-protection=none # Force dependency (vdso*-wrap.S includes vdso.so through incbin) diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index 09251ad0ff4e..8f19c7a7d65d 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include static u64 module_direct_base __ro_after_init = 0; static u64 module_plt_base __ro_after_init = 0; diff --git a/kernel/orc_lookup.c b/arch/arm64/kernel/orc_lookup.c similarity index 99% rename from kernel/orc_lookup.c rename to arch/arm64/kernel/orc_lookup.c index ad845da546b4..9c062c054dcb 100644 --- a/kernel/orc_lookup.c +++ b/arch/arm64/kernel/orc_lookup.c @@ -3,7 +3,7 @@ #include #include #include -#include +#include bool orc_init __ro_after_init; static unsigned int lookup_num_blocks __ro_after_init; diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 90a2e3aceb00..7b8aed3de946 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -53,7 +53,7 @@ #include #include #include -#include +#include static int num_standard_resources; static struct resource *standard_resources; diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 71157c0eb77b..ab9605aa721d 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -6,7 +6,7 @@ */ #include #include -#include +#include #include #include #include diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 42af43ce8d1b..e49aba6e5d23 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -61,7 +61,7 @@ #define RUNTIME_DISCARD_EXIT #include -#include +#include #include #include #include diff --git a/arch/x86/include/asm/orc_lookup.h b/arch/x86/include/asm/orc_lookup.h new file mode 100644 index 000000000000..241631282e43 --- /dev/null +++ b/arch/x86/include/asm/orc_lookup.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2017 Josh Poimboeuf + */ +#ifndef _ORC_LOOKUP_H +#define _ORC_LOOKUP_H + +/* + * This is a lookup table for speeding up access to the .orc_unwind table. + * Given an input address offset, the corresponding lookup table entry + * specifies a subset of the .orc_unwind table to search. + * + * Each block represents the end of the previous range and the start of the + * next range. An extra block is added to give the last range an end. + * + * The block size should be a power of 2 to avoid a costly 'div' instruction. + * + * A block size of 256 was chosen because it roughly doubles unwinder + * performance while only adding ~5% to the ORC data footprint. + */ +#define LOOKUP_BLOCK_ORDER 8 +#define LOOKUP_BLOCK_SIZE (1 << LOOKUP_BLOCK_ORDER) + +#ifndef LINKER_SCRIPT + +extern unsigned int orc_lookup[]; +extern unsigned int orc_lookup_end[]; + +#define LOOKUP_START_IP (unsigned long)_stext +#define LOOKUP_STOP_IP (unsigned long)_etext + +#endif /* LINKER_SCRIPT */ + +#endif /* _ORC_LOOKUP_H */ diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h index 71af8246c69e..7cede4dc21f0 100644 --- a/arch/x86/include/asm/unwind.h +++ b/arch/x86/include/asm/unwind.h @@ -94,8 +94,13 @@ static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state, #ifdef CONFIG_UNWINDER_ORC void unwind_init(void); +void unwind_module_init(struct module *mod, void *orc_ip, size_t orc_ip_size, + void *orc, size_t orc_size); #else static inline void unwind_init(void) {} +static inline +void unwind_module_init(struct module *mod, void *orc_ip, size_t orc_ip_size, + void *orc, size_t orc_size) {} #endif static inline diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index 2fc4411a22d9..5f71a0cf4399 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #if 0 #define DEBUGP(fmt, ...) \ @@ -370,9 +370,8 @@ int module_finalize(const Elf_Ehdr *hdr, } if (orc && orc_ip) - orc_lookup_module_init(me, - (void *)orc_ip->sh_addr, orc_ip->sh_size, - (void *)orc->sh_addr, orc->sh_size); + unwind_module_init(me, (void *)orc_ip->sh_addr, orc_ip->sh_size, + (void *)orc->sh_addr, orc->sh_size); return 0; } diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index 0f2f51dbc7e3..a75d9a827594 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -7,12 +7,34 @@ #include #include #include +#include #include -#include ORC_HEADER; +#define orc_warn(fmt, ...) \ + printk_deferred_once(KERN_WARNING "WARNING: " fmt, ##__VA_ARGS__) + +#define orc_warn_current(args...) \ +({ \ + static bool dumped_before; \ + if (state->task == current && !state->error) { \ + orc_warn(args); \ + if (unwind_debug && !dumped_before) { \ + dumped_before = true; \ + unwind_dump(state); \ + } \ + } \ +}) + +extern int __start_orc_unwind_ip[]; +extern int __stop_orc_unwind_ip[]; +extern struct orc_entry __start_orc_unwind[]; +extern struct orc_entry __stop_orc_unwind[]; + +static bool orc_init __ro_after_init; static bool unwind_debug __ro_after_init; +static unsigned int lookup_num_blocks __ro_after_init; static int __init unwind_debug_cmdline(char *str) { @@ -54,9 +76,60 @@ static void unwind_dump(struct unwind_state *state) } } -#include +static inline unsigned long orc_ip(const int *ip) +{ + return (unsigned long)ip + *ip; +} + +static struct orc_entry *__orc_find(int *ip_table, struct orc_entry *u_table, + unsigned int num_entries, unsigned long ip) +{ + int *first = ip_table; + int *last = ip_table + num_entries - 1; + int *mid = first, *found = first; + + if (!num_entries) + return NULL; + + /* + * Do a binary range search to find the rightmost duplicate of a given + * starting address. Some entries are section terminators which are + * "weak" entries for ensuring there are no gaps. They should be + * ignored when they conflict with a real entry. + */ + while (first <= last) { + mid = first + ((last - first) / 2); + + if (orc_ip(mid) <= ip) { + found = mid; + first = mid + 1; + } else + last = mid - 1; + } + + return u_table + (found - ip_table); +} + +#ifdef CONFIG_MODULES +static struct orc_entry *orc_module_find(unsigned long ip) +{ + struct module *mod; + + mod = __module_address(ip); + if (!mod || !mod->arch.orc_unwind || !mod->arch.orc_unwind_ip) + return NULL; + return __orc_find(mod->arch.orc_unwind_ip, mod->arch.orc_unwind, + mod->arch.num_orcs, ip); +} +#else +static struct orc_entry *orc_module_find(unsigned long ip) +{ + return NULL; +} +#endif #ifdef CONFIG_DYNAMIC_FTRACE +static struct orc_entry *orc_find(unsigned long ip); /* * Ftrace dynamic trampolines do not have orc entries of their own. @@ -100,10 +173,19 @@ static struct orc_entry *orc_ftrace_find(unsigned long ip) } #endif -struct orc_entry *arch_orc_find(unsigned long ip) -{ - return orc_ftrace_find(ip); -} +/* + * If we crash with IP==0, the last successfully executed instruction + * was probably an indirect function call with a NULL function pointer, + * and we don't have unwind information for NULL. + * This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function + * pointer into its parent and then continue normally from there. + */ +static struct orc_entry null_orc_entry = { + .sp_offset = sizeof(long), + .sp_reg = ORC_REG_SP, + .bp_reg = ORC_REG_UNDEFINED, + .type = ORC_TYPE_CALL +}; /* Fake frame pointer entry -- used as a fallback for generated code */ static struct orc_entry orc_fp_entry = { @@ -114,9 +196,170 @@ static struct orc_entry orc_fp_entry = { .bp_offset = -16, }; +static struct orc_entry *orc_find(unsigned long ip) +{ + static struct orc_entry *orc; + + if (ip == 0) + return &null_orc_entry; + + /* For non-init vmlinux addresses, use the fast lookup table: */ + if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) { + unsigned int idx, start, stop; + + idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE; + + if (unlikely((idx >= lookup_num_blocks-1))) { + orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n", + idx, lookup_num_blocks, (void *)ip); + return NULL; + } + + start = orc_lookup[idx]; + stop = orc_lookup[idx + 1] + 1; + + if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) || + (__start_orc_unwind + stop > __stop_orc_unwind))) { + orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n", + idx, lookup_num_blocks, start, stop, (void *)ip); + return NULL; + } + + return __orc_find(__start_orc_unwind_ip + start, + __start_orc_unwind + start, stop - start, ip); + } + + /* vmlinux .init slow lookup: */ + if (is_kernel_inittext(ip)) + return __orc_find(__start_orc_unwind_ip, __start_orc_unwind, + __stop_orc_unwind_ip - __start_orc_unwind_ip, ip); + + /* Module lookup: */ + orc = orc_module_find(ip); + if (orc) + return orc; + + return orc_ftrace_find(ip); +} + +#ifdef CONFIG_MODULES + +static DEFINE_MUTEX(sort_mutex); +static int *cur_orc_ip_table = __start_orc_unwind_ip; +static struct orc_entry *cur_orc_table = __start_orc_unwind; + +static void orc_sort_swap(void *_a, void *_b, int size) +{ + struct orc_entry *orc_a, *orc_b; + int *a = _a, *b = _b, tmp; + int delta = _b - _a; + + /* Swap the .orc_unwind_ip entries: */ + tmp = *a; + *a = *b + delta; + *b = tmp - delta; + + /* Swap the corresponding .orc_unwind entries: */ + orc_a = cur_orc_table + (a - cur_orc_ip_table); + orc_b = cur_orc_table + (b - cur_orc_ip_table); + swap(*orc_a, *orc_b); +} + +static int orc_sort_cmp(const void *_a, const void *_b) +{ + struct orc_entry *orc_a; + const int *a = _a, *b = _b; + unsigned long a_val = orc_ip(a); + unsigned long b_val = orc_ip(b); + + if (a_val > b_val) + return 1; + if (a_val < b_val) + return -1; + + /* + * The "weak" section terminator entries need to always be first + * to ensure the lookup code skips them in favor of real entries. + * These terminator entries exist to handle any gaps created by + * whitelisted .o files which didn't get objtool generation. + */ + orc_a = cur_orc_table + (a - cur_orc_ip_table); + return orc_a->type == ORC_TYPE_UNDEFINED ? -1 : 1; +} + +void unwind_module_init(struct module *mod, void *_orc_ip, size_t orc_ip_size, + void *_orc, size_t orc_size) +{ + int *orc_ip = _orc_ip; + struct orc_entry *orc = _orc; + unsigned int num_entries = orc_ip_size / sizeof(int); + + WARN_ON_ONCE(orc_ip_size % sizeof(int) != 0 || + orc_size % sizeof(*orc) != 0 || + num_entries != orc_size / sizeof(*orc)); + + /* + * The 'cur_orc_*' globals allow the orc_sort_swap() callback to + * associate an .orc_unwind_ip table entry with its corresponding + * .orc_unwind entry so they can both be swapped. + */ + mutex_lock(&sort_mutex); + cur_orc_ip_table = orc_ip; + cur_orc_table = orc; + sort(orc_ip, num_entries, sizeof(int), orc_sort_cmp, orc_sort_swap); + mutex_unlock(&sort_mutex); + + mod->arch.orc_unwind_ip = orc_ip; + mod->arch.orc_unwind = orc; + mod->arch.num_orcs = num_entries; +} +#endif + void __init unwind_init(void) { - orc_lookup_init(); + size_t orc_ip_size = (void *)__stop_orc_unwind_ip - (void *)__start_orc_unwind_ip; + size_t orc_size = (void *)__stop_orc_unwind - (void *)__start_orc_unwind; + size_t num_entries = orc_ip_size / sizeof(int); + struct orc_entry *orc; + int i; + + if (!num_entries || orc_ip_size % sizeof(int) != 0 || + orc_size % sizeof(struct orc_entry) != 0 || + num_entries != orc_size / sizeof(struct orc_entry)) { + orc_warn("WARNING: Bad or missing .orc_unwind table. Disabling unwinder.\n"); + return; + } + + /* + * Note, the orc_unwind and orc_unwind_ip tables were already + * sorted at build time via the 'sorttable' tool. + * It's ready for binary search straight away, no need to sort it. + */ + + /* Initialize the fast lookup table: */ + lookup_num_blocks = orc_lookup_end - orc_lookup; + for (i = 0; i < lookup_num_blocks-1; i++) { + orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, + num_entries, + LOOKUP_START_IP + (LOOKUP_BLOCK_SIZE * i)); + if (!orc) { + orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n"); + return; + } + + orc_lookup[i] = orc - __start_orc_unwind; + } + + /* Initialize the ending block: */ + orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, num_entries, + LOOKUP_STOP_IP); + if (!orc) { + orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n"); + return; + } + orc_lookup[lookup_num_blocks-1] = orc - __start_orc_unwind; + + orc_init = true; } unsigned long unwind_get_return_address(struct unwind_state *state) diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index cb6c9527fdde..60eb8baa44d7 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include #include diff --git a/kernel/Makefile b/kernel/Makefile index 680be35dffc6..ce105a5558fc 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -132,8 +132,6 @@ obj-$(CONFIG_WATCH_QUEUE) += watch_queue.o obj-$(CONFIG_RESOURCE_KUNIT_TEST) += resource_kunit.o obj-$(CONFIG_SYSCTL_KUNIT_TEST) += sysctl-test.o -obj-$(CONFIG_UNWINDER_ORC) += orc_lookup.o - CFLAGS_stackleak.o += $(DISABLE_STACKLEAK_PLUGIN) obj-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak.o KASAN_SANITIZE_stackleak.o := n diff --git a/tools/objtool/arch/x86/orc.c b/tools/objtool/arch/x86/orc.c index 3526dfd9749d..891b85ae314e 100644 --- a/tools/objtool/arch/x86/orc.c +++ b/tools/objtool/arch/x86/orc.c @@ -9,7 +9,6 @@ #include #include -#include #include #include #include -- Gitee From ac46a0931fce9b70e8a50700384b74e2dfd753d3 Mon Sep 17 00:00:00 2001 From: Jia He Date: Tue, 29 Oct 2024 11:24:31 +0000 Subject: [PATCH 1694/2138] =?UTF-8?q?Revert=20=E2=80=9Cobjtool:=20Reorgani?= =?UTF-8?q?ze=20ORC=20code=E2=80=9D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #11595 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ This reverts commit eb97f05779918730493086e79b2a3faeb3112691. To support livepatch features on other architectures, it's preferable to keep the Anolis livepatch code layout closely aligned with the upstream kernel. Therefore, we should revert the commits that reorganized the code specifically for arm64 livepatch support. Signed-off-by: Jia He Acked-by: Wardenjohn Reviewed-by: Qiao Ma Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4069 --- tools/objtool/arch/x86/Build | 1 - tools/objtool/arch/x86/include/arch/elf.h | 1 - tools/objtool/arch/x86/orc.c | 161 ---------------------- tools/objtool/include/objtool/orc.h | 18 --- tools/objtool/orc_dump.c | 65 ++++++++- tools/objtool/orc_gen.c | 94 ++++++++++++- 6 files changed, 152 insertions(+), 188 deletions(-) delete mode 100644 tools/objtool/arch/x86/orc.c delete mode 100644 tools/objtool/include/objtool/orc.h diff --git a/tools/objtool/arch/x86/Build b/tools/objtool/arch/x86/Build index 77b9a66cd6da..9f7869b5c5e0 100644 --- a/tools/objtool/arch/x86/Build +++ b/tools/objtool/arch/x86/Build @@ -1,6 +1,5 @@ objtool-y += special.o objtool-y += decode.o -objtool-$(BUILD_ORC) += orc.o inat_tables_script = ../arch/x86/tools/gen-insn-attr-x86.awk inat_tables_maps = ../arch/x86/lib/x86-opcode-map.txt diff --git a/tools/objtool/arch/x86/include/arch/elf.h b/tools/objtool/arch/x86/include/arch/elf.h index 39f23cb55352..7131f7f51a4e 100644 --- a/tools/objtool/arch/x86/include/arch/elf.h +++ b/tools/objtool/arch/x86/include/arch/elf.h @@ -9,6 +9,5 @@ #define R_DATA64 R_X86_64_PC32 #define R_TEXT32 R_X86_64_PC32 #define R_TEXT64 R_X86_64_PC32 -#define R_PCREL R_X86_64_PC32 #endif /* _OBJTOOL_ARCH_ELF */ diff --git a/tools/objtool/arch/x86/orc.c b/tools/objtool/arch/x86/orc.c deleted file mode 100644 index 891b85ae314e..000000000000 --- a/tools/objtool/arch/x86/orc.c +++ /dev/null @@ -1,161 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2017 Josh Poimboeuf - */ - -#include -#include - -#include - -#include -#include -#include -#include - -int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, - struct instruction *insn) -{ - struct cfi_reg *bp = &cfi->regs[CFI_BP]; - - memset(orc, 0, sizeof(*orc)); - - if (!cfi) { - /* - * This is usually either unreachable nops/traps (which don't - * trigger unreachable instruction warnings), or - * STACK_FRAME_NON_STANDARD functions. - */ - orc->type = ORC_TYPE_UNDEFINED; - return 0; - } - - switch (cfi->type) { - case UNWIND_HINT_TYPE_UNDEFINED: - orc->type = ORC_TYPE_UNDEFINED; - return 0; - case UNWIND_HINT_TYPE_END_OF_STACK: - orc->type = ORC_TYPE_END_OF_STACK; - return 0; - case UNWIND_HINT_TYPE_CALL: - orc->type = ORC_TYPE_CALL; - break; - case UNWIND_HINT_TYPE_REGS: - orc->type = ORC_TYPE_REGS; - break; - case UNWIND_HINT_TYPE_REGS_PARTIAL: - orc->type = ORC_TYPE_REGS_PARTIAL; - break; - default: - WARN_INSN(insn, "unknown unwind hint type %d", cfi->type); - return -1; - } - - orc->signal = cfi->signal; - - switch (cfi->cfa.base) { - case CFI_SP: - orc->sp_reg = ORC_REG_SP; - break; - case CFI_SP_INDIRECT: - orc->sp_reg = ORC_REG_SP_INDIRECT; - break; - case CFI_BP: - orc->sp_reg = ORC_REG_BP; - break; - case CFI_BP_INDIRECT: - orc->sp_reg = ORC_REG_BP_INDIRECT; - break; - case CFI_R10: - orc->sp_reg = ORC_REG_R10; - break; - case CFI_R13: - orc->sp_reg = ORC_REG_R13; - break; - case CFI_DI: - orc->sp_reg = ORC_REG_DI; - break; - case CFI_DX: - orc->sp_reg = ORC_REG_DX; - break; - default: - WARN_INSN(insn, "unknown CFA base reg %d", cfi->cfa.base); - return -1; - } - - switch (bp->base) { - case CFI_UNDEFINED: - orc->bp_reg = ORC_REG_UNDEFINED; - break; - case CFI_CFA: - orc->bp_reg = ORC_REG_PREV_SP; - break; - case CFI_BP: - orc->bp_reg = ORC_REG_BP; - break; - default: - WARN_INSN(insn, "unknown BP base reg %d", bp->base); - return -1; - } - - orc->sp_offset = cfi->cfa.offset; - orc->bp_offset = bp->offset; - - return 0; -} - -static const char *reg_name(unsigned int reg) -{ - switch (reg) { - case ORC_REG_PREV_SP: - return "prevsp"; - case ORC_REG_DX: - return "dx"; - case ORC_REG_DI: - return "di"; - case ORC_REG_BP: - return "bp"; - case ORC_REG_SP: - return "sp"; - case ORC_REG_R10: - return "r10"; - case ORC_REG_R13: - return "r13"; - case ORC_REG_BP_INDIRECT: - return "bp(ind)"; - case ORC_REG_SP_INDIRECT: - return "sp(ind)"; - default: - return "?"; - } -} - -const char *orc_type_name(unsigned int type) -{ - switch (type) { - case ORC_TYPE_UNDEFINED: - return "(und)"; - case ORC_TYPE_END_OF_STACK: - return "end"; - case ORC_TYPE_CALL: - return "call"; - case ORC_TYPE_REGS: - return "regs"; - case ORC_TYPE_REGS_PARTIAL: - return "regs (partial)"; - default: - return "?"; - } -} - -void orc_print_reg(unsigned int reg, int offset) -{ - if (reg == ORC_REG_BP_INDIRECT) - printf("(bp%+d)", offset); - else if (reg == ORC_REG_SP_INDIRECT) - printf("(sp)%+d", offset); - else if (reg == ORC_REG_UNDEFINED) - printf("(und)"); - else - printf("%s%+d", reg_name(reg), offset); -} diff --git a/tools/objtool/include/objtool/orc.h b/tools/objtool/include/objtool/orc.h deleted file mode 100644 index 11e746786fb4..000000000000 --- a/tools/objtool/include/objtool/orc.h +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2015-2017 Josh Poimboeuf - */ - -#ifndef _OBJTOOL_ORC_H -#define _OBJTOOL_ORC_H - -#include - -int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, - struct instruction *insn); -const char *orc_type_name(unsigned int type); -void orc_print_reg(unsigned int reg, int offset); -void orc_print_sp(void); -void orc_print_fp(void); - -#endif /* _OBJTOOL_ORC_H */ diff --git a/tools/objtool/orc_dump.c b/tools/objtool/orc_dump.c index 84545d5e694e..0e183bb1c720 100644 --- a/tools/objtool/orc_dump.c +++ b/tools/objtool/orc_dump.c @@ -4,12 +4,67 @@ */ #include +#include #include -#include -#include #include #include +static const char *reg_name(unsigned int reg) +{ + switch (reg) { + case ORC_REG_PREV_SP: + return "prevsp"; + case ORC_REG_DX: + return "dx"; + case ORC_REG_DI: + return "di"; + case ORC_REG_BP: + return "bp"; + case ORC_REG_SP: + return "sp"; + case ORC_REG_R10: + return "r10"; + case ORC_REG_R13: + return "r13"; + case ORC_REG_BP_INDIRECT: + return "bp(ind)"; + case ORC_REG_SP_INDIRECT: + return "sp(ind)"; + default: + return "?"; + } +} + +static const char *orc_type_name(unsigned int type) +{ + switch (type) { + case ORC_TYPE_UNDEFINED: + return "(und)"; + case ORC_TYPE_END_OF_STACK: + return "end"; + case ORC_TYPE_CALL: + return "call"; + case ORC_TYPE_REGS: + return "regs"; + case ORC_TYPE_REGS_PARTIAL: + return "regs (partial)"; + default: + return "?"; + } +} + +static void print_reg(unsigned int reg, int offset) +{ + if (reg == ORC_REG_BP_INDIRECT) + printf("(bp%+d)", offset); + else if (reg == ORC_REG_SP_INDIRECT) + printf("(sp)%+d", offset); + else if (reg == ORC_REG_UNDEFINED) + printf("(und)"); + else + printf("%s%+d", reg_name(reg), offset); +} + int orc_dump(const char *_objname) { int fd, nr_entries, i, *orc_ip = NULL, orc_size = 0; @@ -154,11 +209,11 @@ int orc_dump(const char *_objname) printf(" sp:"); - orc_print_reg(orc[i].sp_reg, bswap_if_needed(&dummy_elf, orc[i].sp_offset)); + print_reg(orc[i].sp_reg, bswap_if_needed(&dummy_elf, orc[i].sp_offset)); - printf(" fp:"); + printf(" bp:"); - orc_print_reg(orc[i].fp_reg, bswap_if_needed(&dummy_elf, orc[i].fp_offset)); + print_reg(orc[i].bp_reg, bswap_if_needed(&dummy_elf, orc[i].bp_offset)); printf(" signal:%d\n", orc[i].signal); } diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c index a146666ea9d1..cfa01f43edd6 100644 --- a/tools/objtool/orc_gen.c +++ b/tools/objtool/orc_gen.c @@ -7,10 +7,9 @@ #include #include -#include +#include #include -#include #include #include @@ -19,6 +18,97 @@ bool __weak orc_ignore_section(struct section *sec) return false; } +static int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, + struct instruction *insn) +{ + struct cfi_reg *bp = &cfi->regs[CFI_BP]; + + memset(orc, 0, sizeof(*orc)); + + if (!cfi) { + /* + * This is usually either unreachable nops/traps (which don't + * trigger unreachable instruction warnings), or + * STACK_FRAME_NON_STANDARD functions. + */ + orc->type = ORC_TYPE_UNDEFINED; + return 0; + } + + switch (cfi->type) { + case UNWIND_HINT_TYPE_UNDEFINED: + orc->type = ORC_TYPE_UNDEFINED; + return 0; + case UNWIND_HINT_TYPE_END_OF_STACK: + orc->type = ORC_TYPE_END_OF_STACK; + return 0; + case UNWIND_HINT_TYPE_CALL: + orc->type = ORC_TYPE_CALL; + break; + case UNWIND_HINT_TYPE_REGS: + orc->type = ORC_TYPE_REGS; + break; + case UNWIND_HINT_TYPE_REGS_PARTIAL: + orc->type = ORC_TYPE_REGS_PARTIAL; + break; + default: + WARN_INSN(insn, "unknown unwind hint type %d", cfi->type); + return -1; + } + + orc->signal = cfi->signal; + + switch (cfi->cfa.base) { + case CFI_SP: + orc->sp_reg = ORC_REG_SP; + break; + case CFI_SP_INDIRECT: + orc->sp_reg = ORC_REG_SP_INDIRECT; + break; + case CFI_BP: + orc->sp_reg = ORC_REG_BP; + break; + case CFI_BP_INDIRECT: + orc->sp_reg = ORC_REG_BP_INDIRECT; + break; + case CFI_R10: + orc->sp_reg = ORC_REG_R10; + break; + case CFI_R13: + orc->sp_reg = ORC_REG_R13; + break; + case CFI_DI: + orc->sp_reg = ORC_REG_DI; + break; + case CFI_DX: + orc->sp_reg = ORC_REG_DX; + break; + default: + WARN_INSN(insn, "unknown CFA base reg %d", cfi->cfa.base); + return -1; + } + + switch (bp->base) { + case CFI_UNDEFINED: + orc->bp_reg = ORC_REG_UNDEFINED; + break; + case CFI_CFA: + orc->bp_reg = ORC_REG_PREV_SP; + break; + case CFI_BP: + orc->bp_reg = ORC_REG_BP; + break; + default: + WARN_INSN(insn, "unknown BP base reg %d", bp->base); + return -1; + } + + orc->sp_offset = cfi->cfa.offset; + orc->bp_offset = bp->offset; + + return 0; +} + static int write_orc_entry(struct elf *elf, struct section *orc_sec, struct section *ip_sec, unsigned int idx, struct section *insn_sec, unsigned long insn_off, -- Gitee From 22031067368a6e32e5c2b7ca67ea3c0134446f8b Mon Sep 17 00:00:00 2001 From: Jia He Date: Wed, 30 Oct 2024 00:24:06 +0000 Subject: [PATCH 1695/2138] objtool: Seperate arch-specific and generic parts ANBZ: #11595 commit b8e85e6f3a09fc56b0ff574887798962ef8a8f80 upstream. This reverts commit 9ec25e3e74653d228e00dfb7d816919ec5003d27. In addition, it cherry-pick commit b8e85e6f3a09 ("objtool/x86: Separate arch-specific and generic parts") And it also implements the arm64 specific orc.c Co-developed-by: Jinyang He Signed-off-by: Jinyang He Co-developed-by: Youling Tang Signed-off-by: Youling Tang Signed-off-by: Tiezhu Yang Signed-off-by: Huacai Chen Signed-off-by: Jia He Acked-by: Wardenjohn Reviewed-by: Qiao Ma Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4069 --- tools/objtool/arch/arm64/include/arch/orc.h | 23 +++ tools/objtool/arch/arm64/orc.c | 39 +++- tools/objtool/arch/x86/Build | 1 + tools/objtool/arch/x86/orc.c | 188 ++++++++++++++++++++ tools/objtool/include/objtool/orc.h | 20 +++ tools/objtool/orc_dump.c | 69 +------ tools/objtool/orc_gen.c | 113 +----------- 7 files changed, 273 insertions(+), 180 deletions(-) create mode 100644 tools/objtool/arch/arm64/include/arch/orc.h create mode 100644 tools/objtool/arch/x86/orc.c create mode 100644 tools/objtool/include/objtool/orc.h diff --git a/tools/objtool/arch/arm64/include/arch/orc.h b/tools/objtool/arch/arm64/include/arch/orc.h new file mode 100644 index 000000000000..24fc9cf4de97 --- /dev/null +++ b/tools/objtool/arch/arm64/include/arch/orc.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2015-2017 Josh Poimboeuf + */ + +#ifndef _OBJTOOL_ORC_H +#define _OBJTOOL_ORC_H + +#include + +int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, + struct instruction *insn); +void orc_print_dump(struct elf *dummy_elf, struct orc_entry *orc, int i); +int write_orc_entry(struct elf *elf, struct section *orc_sec, + struct section *ip_sec, unsigned int idx, + struct section *insn_sec, unsigned long insn_off, + struct orc_entry *o); +const char *orc_type_name(unsigned int type); +void orc_print_reg(unsigned int reg, int offset); +void orc_print_sp(void); +void orc_print_fp(void); + +#endif /* _OBJTOOL_ORC_H */ diff --git a/tools/objtool/arch/arm64/orc.c b/tools/objtool/arch/arm64/orc.c index 98e930991ef1..25fba97534a1 100644 --- a/tools/objtool/arch/arm64/orc.c +++ b/tools/objtool/arch/arm64/orc.c @@ -9,7 +9,8 @@ #include #include -#include +#include +#include int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, struct instruction *insn) @@ -39,6 +40,27 @@ int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, return 0; } +int write_orc_entry(struct elf *elf, struct section *orc_sec, + struct section *ip_sec, unsigned int idx, + struct section *insn_sec, unsigned long insn_off, + struct orc_entry *o) +{ + struct orc_entry *orc; + + /* populate ORC data */ + orc = (struct orc_entry *)orc_sec->data->d_buf + idx; + memcpy(orc, o, sizeof(*orc)); + orc->sp_offset = bswap_if_needed(elf, orc->sp_offset); + orc->fp_offset = bswap_if_needed(elf, orc->fp_offset); + + /* populate reloc for ip */ + if (!elf_init_reloc_text_sym(elf, ip_sec, idx * sizeof(int), idx, + insn_sec, insn_off)) + return -1; + + return 0; +} + static const char *reg_name(unsigned int reg) { switch (reg) { @@ -67,6 +89,21 @@ const char *orc_type_name(unsigned int type) } } +void orc_print_dump(struct elf *dummy_elf, struct orc_entry *orc, int i) +{ + printf("type:%s", orc_type_name(orc[i].type)); + + printf(" sp:"); + + orc_print_reg(orc[i].sp_reg, bswap_if_needed(dummy_elf, orc[i].sp_offset)); + + printf(" fp:"); + + orc_print_reg(orc[i].fp_reg, bswap_if_needed(dummy_elf, orc[i].fp_offset)); + + printf(" signal:%d\n", orc[i].signal); +} + void orc_print_reg(unsigned int reg, int offset) { if (reg == ORC_REG_UNDEFINED) diff --git a/tools/objtool/arch/x86/Build b/tools/objtool/arch/x86/Build index 9f7869b5c5e0..3dedb2fd8f3a 100644 --- a/tools/objtool/arch/x86/Build +++ b/tools/objtool/arch/x86/Build @@ -1,5 +1,6 @@ objtool-y += special.o objtool-y += decode.o +objtool-y += orc.o inat_tables_script = ../arch/x86/tools/gen-insn-attr-x86.awk inat_tables_maps = ../arch/x86/lib/x86-opcode-map.txt diff --git a/tools/objtool/arch/x86/orc.c b/tools/objtool/arch/x86/orc.c new file mode 100644 index 000000000000..b6cd943e87f9 --- /dev/null +++ b/tools/objtool/arch/x86/orc.c @@ -0,0 +1,188 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#include +#include + +#include +#include +#include +#include + +int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, struct instruction *insn) +{ + struct cfi_reg *bp = &cfi->regs[CFI_BP]; + + memset(orc, 0, sizeof(*orc)); + + if (!cfi) { + /* + * This is usually either unreachable nops/traps (which don't + * trigger unreachable instruction warnings), or + * STACK_FRAME_NON_STANDARD functions. + */ + orc->type = ORC_TYPE_UNDEFINED; + return 0; + } + + switch (cfi->type) { + case UNWIND_HINT_TYPE_UNDEFINED: + orc->type = ORC_TYPE_UNDEFINED; + return 0; + case UNWIND_HINT_TYPE_END_OF_STACK: + orc->type = ORC_TYPE_END_OF_STACK; + return 0; + case UNWIND_HINT_TYPE_CALL: + orc->type = ORC_TYPE_CALL; + break; + case UNWIND_HINT_TYPE_REGS: + orc->type = ORC_TYPE_REGS; + break; + case UNWIND_HINT_TYPE_REGS_PARTIAL: + orc->type = ORC_TYPE_REGS_PARTIAL; + break; + default: + WARN_INSN(insn, "unknown unwind hint type %d", cfi->type); + return -1; + } + + orc->signal = cfi->signal; + + switch (cfi->cfa.base) { + case CFI_SP: + orc->sp_reg = ORC_REG_SP; + break; + case CFI_SP_INDIRECT: + orc->sp_reg = ORC_REG_SP_INDIRECT; + break; + case CFI_BP: + orc->sp_reg = ORC_REG_BP; + break; + case CFI_BP_INDIRECT: + orc->sp_reg = ORC_REG_BP_INDIRECT; + break; + case CFI_R10: + orc->sp_reg = ORC_REG_R10; + break; + case CFI_R13: + orc->sp_reg = ORC_REG_R13; + break; + case CFI_DI: + orc->sp_reg = ORC_REG_DI; + break; + case CFI_DX: + orc->sp_reg = ORC_REG_DX; + break; + default: + WARN_INSN(insn, "unknown CFA base reg %d", cfi->cfa.base); + return -1; + } + + switch (bp->base) { + case CFI_UNDEFINED: + orc->bp_reg = ORC_REG_UNDEFINED; + break; + case CFI_CFA: + orc->bp_reg = ORC_REG_PREV_SP; + break; + case CFI_BP: + orc->bp_reg = ORC_REG_BP; + break; + default: + WARN_INSN(insn, "unknown BP base reg %d", bp->base); + return -1; + } + + orc->sp_offset = cfi->cfa.offset; + orc->bp_offset = bp->offset; + + return 0; +} + +int write_orc_entry(struct elf *elf, struct section *orc_sec, + struct section *ip_sec, unsigned int idx, + struct section *insn_sec, unsigned long insn_off, + struct orc_entry *o) +{ + struct orc_entry *orc; + + /* populate ORC data */ + orc = (struct orc_entry *)orc_sec->data->d_buf + idx; + memcpy(orc, o, sizeof(*orc)); + orc->sp_offset = bswap_if_needed(elf, orc->sp_offset); + orc->bp_offset = bswap_if_needed(elf, orc->bp_offset); + + /* populate reloc for ip */ + if (!elf_init_reloc_text_sym(elf, ip_sec, idx * sizeof(int), idx, + insn_sec, insn_off)) + return -1; + + return 0; +} + +static const char *reg_name(unsigned int reg) +{ + switch (reg) { + case ORC_REG_PREV_SP: + return "prevsp"; + case ORC_REG_DX: + return "dx"; + case ORC_REG_DI: + return "di"; + case ORC_REG_BP: + return "bp"; + case ORC_REG_SP: + return "sp"; + case ORC_REG_R10: + return "r10"; + case ORC_REG_R13: + return "r13"; + case ORC_REG_BP_INDIRECT: + return "bp(ind)"; + case ORC_REG_SP_INDIRECT: + return "sp(ind)"; + default: + return "?"; + } +} + +static const char *orc_type_name(unsigned int type) +{ + switch (type) { + case ORC_TYPE_UNDEFINED: + return "(und)"; + case ORC_TYPE_END_OF_STACK: + return "end"; + case ORC_TYPE_CALL: + return "call"; + case ORC_TYPE_REGS: + return "regs"; + case ORC_TYPE_REGS_PARTIAL: + return "regs (partial)"; + default: + return "?"; + } +} + +static void print_reg(unsigned int reg, int offset) +{ + if (reg == ORC_REG_BP_INDIRECT) + printf("(bp%+d)", offset); + else if (reg == ORC_REG_SP_INDIRECT) + printf("(sp)%+d", offset); + else if (reg == ORC_REG_UNDEFINED) + printf("(und)"); + else + printf("%s%+d", reg_name(reg), offset); +} + +void orc_print_dump(struct elf *dummy_elf, struct orc_entry *orc, int i) +{ + printf("type:%s", orc_type_name(orc[i].type)); + + printf(" sp:"); + print_reg(orc[i].sp_reg, bswap_if_needed(dummy_elf, orc[i].sp_offset)); + + printf(" bp:"); + print_reg(orc[i].bp_reg, bswap_if_needed(dummy_elf, orc[i].bp_offset)); + + printf(" signal:%d\n", orc[i].signal); +} diff --git a/tools/objtool/include/objtool/orc.h b/tools/objtool/include/objtool/orc.h new file mode 100644 index 000000000000..88dc98a2b8a4 --- /dev/null +++ b/tools/objtool/include/objtool/orc.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2015-2017 Josh Poimboeuf + */ + +#ifndef _OBJTOOL_ORC_H +#define _OBJTOOL_ORC_H + +#include +#include + +int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, + struct instruction *insn); +void orc_print_dump(struct elf *dummy_elf, struct orc_entry *orc, int i); +int write_orc_entry(struct elf *elf, struct section *orc_sec, + struct section *ip_sec, unsigned int idx, + struct section *insn_sec, unsigned long insn_off, + struct orc_entry *o); + +#endif /* _OBJTOOL_ORC_H */ diff --git a/tools/objtool/orc_dump.c b/tools/objtool/orc_dump.c index 0e183bb1c720..a62247efb64f 100644 --- a/tools/objtool/orc_dump.c +++ b/tools/objtool/orc_dump.c @@ -6,65 +6,10 @@ #include #include #include +#include #include #include -static const char *reg_name(unsigned int reg) -{ - switch (reg) { - case ORC_REG_PREV_SP: - return "prevsp"; - case ORC_REG_DX: - return "dx"; - case ORC_REG_DI: - return "di"; - case ORC_REG_BP: - return "bp"; - case ORC_REG_SP: - return "sp"; - case ORC_REG_R10: - return "r10"; - case ORC_REG_R13: - return "r13"; - case ORC_REG_BP_INDIRECT: - return "bp(ind)"; - case ORC_REG_SP_INDIRECT: - return "sp(ind)"; - default: - return "?"; - } -} - -static const char *orc_type_name(unsigned int type) -{ - switch (type) { - case ORC_TYPE_UNDEFINED: - return "(und)"; - case ORC_TYPE_END_OF_STACK: - return "end"; - case ORC_TYPE_CALL: - return "call"; - case ORC_TYPE_REGS: - return "regs"; - case ORC_TYPE_REGS_PARTIAL: - return "regs (partial)"; - default: - return "?"; - } -} - -static void print_reg(unsigned int reg, int offset) -{ - if (reg == ORC_REG_BP_INDIRECT) - printf("(bp%+d)", offset); - else if (reg == ORC_REG_SP_INDIRECT) - printf("(sp)%+d", offset); - else if (reg == ORC_REG_UNDEFINED) - printf("(und)"); - else - printf("%s%+d", reg_name(reg), offset); -} - int orc_dump(const char *_objname) { int fd, nr_entries, i, *orc_ip = NULL, orc_size = 0; @@ -205,17 +150,7 @@ int orc_dump(const char *_objname) printf("%llx:", (unsigned long long)(orc_ip_addr + (i * sizeof(int)) + orc_ip[i])); } - printf("type:%s", orc_type_name(orc[i].type)); - - printf(" sp:"); - - print_reg(orc[i].sp_reg, bswap_if_needed(&dummy_elf, orc[i].sp_offset)); - - printf(" bp:"); - - print_reg(orc[i].bp_reg, bswap_if_needed(&dummy_elf, orc[i].bp_offset)); - - printf(" signal:%d\n", orc[i].signal); + orc_print_dump(&dummy_elf, orc, i); } elf_end(elf); diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c index cfa01f43edd6..71cdbb5fe138 100644 --- a/tools/objtool/orc_gen.c +++ b/tools/objtool/orc_gen.c @@ -10,6 +10,7 @@ #include #include +#include #include #include @@ -18,118 +19,6 @@ bool __weak orc_ignore_section(struct section *sec) return false; } -static int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, - struct instruction *insn) -{ - struct cfi_reg *bp = &cfi->regs[CFI_BP]; - - memset(orc, 0, sizeof(*orc)); - - if (!cfi) { - /* - * This is usually either unreachable nops/traps (which don't - * trigger unreachable instruction warnings), or - * STACK_FRAME_NON_STANDARD functions. - */ - orc->type = ORC_TYPE_UNDEFINED; - return 0; - } - - switch (cfi->type) { - case UNWIND_HINT_TYPE_UNDEFINED: - orc->type = ORC_TYPE_UNDEFINED; - return 0; - case UNWIND_HINT_TYPE_END_OF_STACK: - orc->type = ORC_TYPE_END_OF_STACK; - return 0; - case UNWIND_HINT_TYPE_CALL: - orc->type = ORC_TYPE_CALL; - break; - case UNWIND_HINT_TYPE_REGS: - orc->type = ORC_TYPE_REGS; - break; - case UNWIND_HINT_TYPE_REGS_PARTIAL: - orc->type = ORC_TYPE_REGS_PARTIAL; - break; - default: - WARN_INSN(insn, "unknown unwind hint type %d", cfi->type); - return -1; - } - - orc->signal = cfi->signal; - - switch (cfi->cfa.base) { - case CFI_SP: - orc->sp_reg = ORC_REG_SP; - break; - case CFI_SP_INDIRECT: - orc->sp_reg = ORC_REG_SP_INDIRECT; - break; - case CFI_BP: - orc->sp_reg = ORC_REG_BP; - break; - case CFI_BP_INDIRECT: - orc->sp_reg = ORC_REG_BP_INDIRECT; - break; - case CFI_R10: - orc->sp_reg = ORC_REG_R10; - break; - case CFI_R13: - orc->sp_reg = ORC_REG_R13; - break; - case CFI_DI: - orc->sp_reg = ORC_REG_DI; - break; - case CFI_DX: - orc->sp_reg = ORC_REG_DX; - break; - default: - WARN_INSN(insn, "unknown CFA base reg %d", cfi->cfa.base); - return -1; - } - - switch (bp->base) { - case CFI_UNDEFINED: - orc->bp_reg = ORC_REG_UNDEFINED; - break; - case CFI_CFA: - orc->bp_reg = ORC_REG_PREV_SP; - break; - case CFI_BP: - orc->bp_reg = ORC_REG_BP; - break; - default: - WARN_INSN(insn, "unknown BP base reg %d", bp->base); - return -1; - } - - orc->sp_offset = cfi->cfa.offset; - orc->bp_offset = bp->offset; - - return 0; -} - -static int write_orc_entry(struct elf *elf, struct section *orc_sec, - struct section *ip_sec, unsigned int idx, - struct section *insn_sec, unsigned long insn_off, - struct orc_entry *o) -{ - struct orc_entry *orc; - - /* populate ORC data */ - orc = (struct orc_entry *)orc_sec->data->d_buf + idx; - memcpy(orc, o, sizeof(*orc)); - orc->sp_offset = bswap_if_needed(elf, orc->sp_offset); - orc->fp_offset = bswap_if_needed(elf, orc->fp_offset); - - /* populate reloc for ip */ - if (!elf_init_reloc_text_sym(elf, ip_sec, idx * sizeof(int), idx, - insn_sec, insn_off)) - return -1; - - return 0; -} - struct orc_list_entry { struct list_head list; struct orc_entry orc; -- Gitee From 710d9098b49701075fc6d1dc039c9762f1a0b236 Mon Sep 17 00:00:00 2001 From: Jia He Date: Wed, 30 Oct 2024 01:21:08 +0000 Subject: [PATCH 1696/2138] Revert "objtool: Reorganize ORC types" ANBZ: #11595 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ This reverts commit 4c76014d8319006bf2c1a27896f4aa262cc19f25. To support livepatch features on other architectures, it's preferable to keep the Anolis livepatch code layout closely aligned with the upstream kernel. Therefore, we should revert the commits that reorganized the code specifically for arm64 livepatch support. Signed-off-by: Jia He Acked-by: Wardenjohn Reviewed-by: Qiao Ma Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4069 --- arch/x86/include/asm/orc_types.h | 37 +++++++++++++++++++++----- tools/arch/x86/include/asm/orc_types.h | 37 +++++++++++++++++++++----- tools/objtool/orc_gen.c | 6 ++++- tools/objtool/sync-check.sh | 1 - 4 files changed, 65 insertions(+), 16 deletions(-) diff --git a/arch/x86/include/asm/orc_types.h b/arch/x86/include/asm/orc_types.h index 45f21662ac21..46d7e06763c9 100644 --- a/arch/x86/include/asm/orc_types.h +++ b/arch/x86/include/asm/orc_types.h @@ -8,13 +8,6 @@ #include #include -#include - -/* - * For x86, use the appripriate name for the frame pointer in orc_entry. - */ -#define bp_offset fp_offset -#define bp_reg fp_reg /* * The ORC_REG_* registers are base registers which are used to find other @@ -52,4 +45,34 @@ #define ORC_TYPE_REGS 3 #define ORC_TYPE_REGS_PARTIAL 4 +#ifndef __ASSEMBLY__ +#include + +/* + * This struct is more or less a vastly simplified version of the DWARF Call + * Frame Information standard. It contains only the necessary parts of DWARF + * CFI, simplified for ease of access by the in-kernel unwinder. It tells the + * unwinder how to find the previous SP and BP (and sometimes entry regs) on + * the stack for a given code address. Each instance of the struct corresponds + * to one or more code locations. + */ +struct orc_entry { + s16 sp_offset; + s16 bp_offset; +#if defined(__LITTLE_ENDIAN_BITFIELD) + unsigned sp_reg:4; + unsigned bp_reg:4; + unsigned type:3; + unsigned signal:1; +#elif defined(__BIG_ENDIAN_BITFIELD) + unsigned bp_reg:4; + unsigned sp_reg:4; + unsigned unused:4; + unsigned signal:1; + unsigned type:3; +#endif +} __packed; + +#endif /* __ASSEMBLY__ */ + #endif /* _ORC_TYPES_H */ diff --git a/tools/arch/x86/include/asm/orc_types.h b/tools/arch/x86/include/asm/orc_types.h index 45f21662ac21..46d7e06763c9 100644 --- a/tools/arch/x86/include/asm/orc_types.h +++ b/tools/arch/x86/include/asm/orc_types.h @@ -8,13 +8,6 @@ #include #include -#include - -/* - * For x86, use the appripriate name for the frame pointer in orc_entry. - */ -#define bp_offset fp_offset -#define bp_reg fp_reg /* * The ORC_REG_* registers are base registers which are used to find other @@ -52,4 +45,34 @@ #define ORC_TYPE_REGS 3 #define ORC_TYPE_REGS_PARTIAL 4 +#ifndef __ASSEMBLY__ +#include + +/* + * This struct is more or less a vastly simplified version of the DWARF Call + * Frame Information standard. It contains only the necessary parts of DWARF + * CFI, simplified for ease of access by the in-kernel unwinder. It tells the + * unwinder how to find the previous SP and BP (and sometimes entry regs) on + * the stack for a given code address. Each instance of the struct corresponds + * to one or more code locations. + */ +struct orc_entry { + s16 sp_offset; + s16 bp_offset; +#if defined(__LITTLE_ENDIAN_BITFIELD) + unsigned sp_reg:4; + unsigned bp_reg:4; + unsigned type:3; + unsigned signal:1; +#elif defined(__BIG_ENDIAN_BITFIELD) + unsigned bp_reg:4; + unsigned sp_reg:4; + unsigned unused:4; + unsigned signal:1; + unsigned type:3; +#endif +} __packed; + +#endif /* __ASSEMBLY__ */ + #endif /* _ORC_TYPES_H */ diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c index 71cdbb5fe138..217a4e7d5617 100644 --- a/tools/objtool/orc_gen.c +++ b/tools/objtool/orc_gen.c @@ -14,6 +14,10 @@ #include #include +#ifdef __aarch64__ +#define bp_reg fp_reg +#endif + bool __weak orc_ignore_section(struct section *sec) { return false; @@ -59,7 +63,7 @@ int orc_create(struct objtool_file *file) struct list_head orc_list; struct orc_entry null = { - .fp_reg = ORC_REG_UNDEFINED, + .bp_reg = ORC_REG_UNDEFINED, .type = UNWIND_HINT_TYPE_CALL, }; diff --git a/tools/objtool/sync-check.sh b/tools/objtool/sync-check.sh index 9bacf219bfe0..d75b494e010d 100755 --- a/tools/objtool/sync-check.sh +++ b/tools/objtool/sync-check.sh @@ -18,7 +18,6 @@ arch/x86/include/asm/unwind_hints.h arch/x86/lib/x86-opcode-map.txt arch/x86/tools/gen-insn-attr-x86.awk include/linux/static_call_types.h -include/linux/orc_entry.h " SYNC_CHECK_FILES=' -- Gitee From cf2e0d5910d2f960dbc2f0fdd025d0b9815be5bc Mon Sep 17 00:00:00 2001 From: Jia He Date: Wed, 30 Oct 2024 10:30:18 +0000 Subject: [PATCH 1697/2138] Revert "objtool: Reorganize Unwind hint code" ANBZ: #11595 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ This reverts commit b3416886ab7d17d03b7e6501cb816daa760b0b89. To support livepatch features on other architectures, it's preferable to keep the Anolis livepatch code layout closely aligned with the upstream kernel. Therefore, we should revert the commits that reorganized the code specifically for arm64 livepatch support. Signed-off-by: Jia He Acked-by: Wardenjohn Reviewed-by: Qiao Ma Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4069 --- arch/x86/entry/entry.S | 1 - arch/x86/include/asm/unwind_hints.h | 67 +------- arch/x86/kernel/unwind_orc.c | 3 +- include/linux/objtool.h | 49 ++++++ tools/arch/x86/include/asm/unwind_hints.h | 158 ------------------ tools/objtool/Build | 1 - tools/objtool/arch/arm64/Build | 1 + tools/objtool/{ => arch/arm64}/unwind_hints.c | 0 tools/objtool/check.c | 91 ++++++++++ tools/objtool/include/objtool/insn.h | 3 + tools/objtool/sync-check.sh | 1 - 11 files changed, 146 insertions(+), 229 deletions(-) delete mode 100644 tools/arch/x86/include/asm/unwind_hints.h rename tools/objtool/{ => arch/arm64}/unwind_hints.c (100%) diff --git a/arch/x86/entry/entry.S b/arch/x86/entry/entry.S index 75cf576c7c4c..2143358d0c4c 100644 --- a/arch/x86/entry/entry.S +++ b/arch/x86/entry/entry.S @@ -4,7 +4,6 @@ */ #include -#include #include #include #include diff --git a/arch/x86/include/asm/unwind_hints.h b/arch/x86/include/asm/unwind_hints.h index 196b82f806e4..85cc57cb6539 100644 --- a/arch/x86/include/asm/unwind_hints.h +++ b/arch/x86/include/asm/unwind_hints.h @@ -1,75 +1,10 @@ #ifndef _ASM_X86_UNWIND_HINTS_H #define _ASM_X86_UNWIND_HINTS_H -#include +#include #include "orc_types.h" -#ifdef CONFIG_OBJTOOL - -#ifndef __ASSEMBLY__ - -#define UNWIND_HINT(type, sp_reg, sp_offset, signal) \ - "987: \n\t" \ - ".pushsection .discard.unwind_hints\n\t" \ - /* struct unwind_hint */ \ - ".long 987b - .\n\t" \ - ".short " __stringify(sp_offset) "\n\t" \ - ".byte " __stringify(sp_reg) "\n\t" \ - ".byte " __stringify(type) "\n\t" \ - ".byte " __stringify(signal) "\n\t" \ - ".balign 4 \n\t" \ - ".popsection\n\t" - -#else /* __ASSEMBLY__ */ - -/* - * In asm, there are two kinds of code: normal C-type callable functions and - * the rest. The normal callable functions can be called by other code, and - * don't do anything unusual with the stack. Such normal callable functions - * are annotated with the ENTRY/ENDPROC macros. Most asm code falls in this - * category. In this case, no special debugging annotations are needed because - * objtool can automatically generate the ORC data for the ORC unwinder to read - * at runtime. - * - * Anything which doesn't fall into the above category, such as syscall and - * interrupt handlers, tends to not be called directly by other functions, and - * often does unusual non-C-function-type things with the stack pointer. Such - * code needs to be annotated such that objtool can understand it. The - * following CFI hint macros are for this type of code. - * - * These macros provide hints to objtool about the state of the stack at each - * instruction. Objtool starts from the hints and follows the code flow, - * making automatic CFI adjustments when it sees pushes and pops, filling out - * the debuginfo as necessary. It will also warn if it sees any - * inconsistencies. - */ -.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 -.Lhere_\@: - .pushsection .discard.unwind_hints - /* struct unwind_hint */ - .long .Lhere_\@ - . - .short \sp_offset - .byte \sp_reg - .byte \type - .byte \signal - .balign 4 - .popsection -.endm - -#endif /* __ASSEMBLY__ */ - -#else /* !CONFIG_OBJTOOL */ - -#ifndef __ASSEMBLY__ -#define UNWIND_HINT(type, sp_reg, sp_offset, signal) "\n\t" -#else -.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 -.endm -#endif - -#endif /* CONFIG_OBJTOOL */ - #ifdef __ASSEMBLY__ .macro UNWIND_HINT_END_OF_STACK diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index a75d9a827594..7784076819de 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c @@ -1,11 +1,10 @@ // SPDX-License-Identifier: GPL-2.0-only -#include #include +#include #include #include #include #include -#include #include #include #include diff --git a/include/linux/objtool.h b/include/linux/objtool.h index 865caa2d1232..ac959bbbffd5 100644 --- a/include/linux/objtool.h +++ b/include/linux/objtool.h @@ -12,6 +12,18 @@ #ifndef __ASSEMBLY__ +#define UNWIND_HINT(type, sp_reg, sp_offset, signal) \ + "987: \n\t" \ + ".pushsection .discard.unwind_hints\n\t" \ + /* struct unwind_hint */ \ + ".long 987b - .\n\t" \ + ".short " __stringify(sp_offset) "\n\t" \ + ".byte " __stringify(sp_reg) "\n\t" \ + ".byte " __stringify(type) "\n\t" \ + ".byte " __stringify(signal) "\n\t" \ + ".balign 4 \n\t" \ + ".popsection\n\t" + /* * This macro marks the given function's stack frame as "non-standard", which * tells objtool to ignore the function when doing stack metadata validation. @@ -59,6 +71,40 @@ .long 999b; \ .popsection; +/* + * In asm, there are two kinds of code: normal C-type callable functions and + * the rest. The normal callable functions can be called by other code, and + * don't do anything unusual with the stack. Such normal callable functions + * are annotated with the ENTRY/ENDPROC macros. Most asm code falls in this + * category. In this case, no special debugging annotations are needed because + * objtool can automatically generate the ORC data for the ORC unwinder to read + * at runtime. + * + * Anything which doesn't fall into the above category, such as syscall and + * interrupt handlers, tends to not be called directly by other functions, and + * often does unusual non-C-function-type things with the stack pointer. Such + * code needs to be annotated such that objtool can understand it. The + * following CFI hint macros are for this type of code. + * + * These macros provide hints to objtool about the state of the stack at each + * instruction. Objtool starts from the hints and follows the code flow, + * making automatic CFI adjustments when it sees pushes and pops, filling out + * the debuginfo as necessary. It will also warn if it sees any + * inconsistencies. + */ +.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 +.Lhere_\@: + .pushsection .discard.unwind_hints + /* struct unwind_hint */ + .long .Lhere_\@ - . + .short \sp_offset + .byte \sp_reg + .byte \type + .byte \signal + .balign 4 + .popsection +.endm + .macro STACK_FRAME_NON_STANDARD func:req .pushsection .discard.func_stack_frame_non_standard, "aw" .long \func - . @@ -108,12 +154,15 @@ #ifndef __ASSEMBLY__ +#define UNWIND_HINT(type, sp_reg, sp_offset, signal) "\n\t" #define STACK_FRAME_NON_STANDARD(func) #define STACK_FRAME_NON_STANDARD_FP(func) #define ANNOTATE_NOENDBR #define ASM_REACHABLE #else #define ANNOTATE_INTRA_FUNCTION_CALL +.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 +.endm .macro STACK_FRAME_NON_STANDARD func:req .endm .macro ANNOTATE_NOENDBR diff --git a/tools/arch/x86/include/asm/unwind_hints.h b/tools/arch/x86/include/asm/unwind_hints.h deleted file mode 100644 index 196b82f806e4..000000000000 --- a/tools/arch/x86/include/asm/unwind_hints.h +++ /dev/null @@ -1,158 +0,0 @@ -#ifndef _ASM_X86_UNWIND_HINTS_H -#define _ASM_X86_UNWIND_HINTS_H - -#include - -#include "orc_types.h" - -#ifdef CONFIG_OBJTOOL - -#ifndef __ASSEMBLY__ - -#define UNWIND_HINT(type, sp_reg, sp_offset, signal) \ - "987: \n\t" \ - ".pushsection .discard.unwind_hints\n\t" \ - /* struct unwind_hint */ \ - ".long 987b - .\n\t" \ - ".short " __stringify(sp_offset) "\n\t" \ - ".byte " __stringify(sp_reg) "\n\t" \ - ".byte " __stringify(type) "\n\t" \ - ".byte " __stringify(signal) "\n\t" \ - ".balign 4 \n\t" \ - ".popsection\n\t" - -#else /* __ASSEMBLY__ */ - -/* - * In asm, there are two kinds of code: normal C-type callable functions and - * the rest. The normal callable functions can be called by other code, and - * don't do anything unusual with the stack. Such normal callable functions - * are annotated with the ENTRY/ENDPROC macros. Most asm code falls in this - * category. In this case, no special debugging annotations are needed because - * objtool can automatically generate the ORC data for the ORC unwinder to read - * at runtime. - * - * Anything which doesn't fall into the above category, such as syscall and - * interrupt handlers, tends to not be called directly by other functions, and - * often does unusual non-C-function-type things with the stack pointer. Such - * code needs to be annotated such that objtool can understand it. The - * following CFI hint macros are for this type of code. - * - * These macros provide hints to objtool about the state of the stack at each - * instruction. Objtool starts from the hints and follows the code flow, - * making automatic CFI adjustments when it sees pushes and pops, filling out - * the debuginfo as necessary. It will also warn if it sees any - * inconsistencies. - */ -.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 -.Lhere_\@: - .pushsection .discard.unwind_hints - /* struct unwind_hint */ - .long .Lhere_\@ - . - .short \sp_offset - .byte \sp_reg - .byte \type - .byte \signal - .balign 4 - .popsection -.endm - -#endif /* __ASSEMBLY__ */ - -#else /* !CONFIG_OBJTOOL */ - -#ifndef __ASSEMBLY__ -#define UNWIND_HINT(type, sp_reg, sp_offset, signal) "\n\t" -#else -.macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 signal=0 -.endm -#endif - -#endif /* CONFIG_OBJTOOL */ - -#ifdef __ASSEMBLY__ - -.macro UNWIND_HINT_END_OF_STACK - UNWIND_HINT type=UNWIND_HINT_TYPE_END_OF_STACK -.endm - -.macro UNWIND_HINT_UNDEFINED - UNWIND_HINT type=UNWIND_HINT_TYPE_UNDEFINED -.endm - -.macro UNWIND_HINT_ENTRY - VALIDATE_UNRET_BEGIN - UNWIND_HINT_END_OF_STACK -.endm - -.macro UNWIND_HINT_REGS base=%rsp offset=0 indirect=0 extra=1 partial=0 signal=1 - .if \base == %rsp - .if \indirect - .set sp_reg, ORC_REG_SP_INDIRECT - .else - .set sp_reg, ORC_REG_SP - .endif - .elseif \base == %rbp - .set sp_reg, ORC_REG_BP - .elseif \base == %rdi - .set sp_reg, ORC_REG_DI - .elseif \base == %rdx - .set sp_reg, ORC_REG_DX - .elseif \base == %r10 - .set sp_reg, ORC_REG_R10 - .else - .error "UNWIND_HINT_REGS: bad base register" - .endif - - .set sp_offset, \offset - - .if \partial - .set type, UNWIND_HINT_TYPE_REGS_PARTIAL - .elseif \extra == 0 - .set type, UNWIND_HINT_TYPE_REGS_PARTIAL - .set sp_offset, \offset + (16*8) - .else - .set type, UNWIND_HINT_TYPE_REGS - .endif - - UNWIND_HINT sp_reg=sp_reg sp_offset=sp_offset type=type signal=\signal -.endm - -.macro UNWIND_HINT_IRET_REGS base=%rsp offset=0 signal=1 - UNWIND_HINT_REGS base=\base offset=\offset partial=1 signal=\signal -.endm - -.macro UNWIND_HINT_IRET_ENTRY base=%rsp offset=0 signal=1 - VALIDATE_UNRET_BEGIN - UNWIND_HINT_IRET_REGS base=\base offset=\offset signal=\signal -.endm - -.macro UNWIND_HINT_FUNC - UNWIND_HINT sp_reg=ORC_REG_SP sp_offset=8 type=UNWIND_HINT_TYPE_FUNC -.endm - -.macro UNWIND_HINT_SAVE - UNWIND_HINT type=UNWIND_HINT_TYPE_SAVE -.endm - -.macro UNWIND_HINT_RESTORE - UNWIND_HINT type=UNWIND_HINT_TYPE_RESTORE -.endm - -#else - -#define UNWIND_HINT_UNDEFINED \ - UNWIND_HINT(UNWIND_HINT_TYPE_UNDEFINED, 0, 0, 0) - -#define UNWIND_HINT_FUNC \ - UNWIND_HINT(UNWIND_HINT_TYPE_FUNC, ORC_REG_SP, 8, 0) - -#define UNWIND_HINT_SAVE \ - UNWIND_HINT(UNWIND_HINT_TYPE_SAVE, 0, 0, 0) - -#define UNWIND_HINT_RESTORE \ - UNWIND_HINT(UNWIND_HINT_TYPE_RESTORE, 0, 0, 0) - -#endif /* __ASSEMBLY__ */ - -#endif /* _ASM_X86_UNWIND_HINTS_H */ diff --git a/tools/objtool/Build b/tools/objtool/Build index 9da7ebdae86c..e0a4685ac7a3 100644 --- a/tools/objtool/Build +++ b/tools/objtool/Build @@ -9,7 +9,6 @@ objtool-y += builtin-check.o objtool-y += cfi.o objtool-y += insn.o objtool-y += decode.o -objtool-y += unwind_hints.o objtool-y += elf.o objtool-y += objtool.o diff --git a/tools/objtool/arch/arm64/Build b/tools/objtool/arch/arm64/Build index 8615abfb12cf..77619e7ef476 100644 --- a/tools/objtool/arch/arm64/Build +++ b/tools/objtool/arch/arm64/Build @@ -1,2 +1,3 @@ objtool-y += decode.o objtool-y += orc.o +objtool-y += unwind_hints.o diff --git a/tools/objtool/unwind_hints.c b/tools/objtool/arch/arm64/unwind_hints.c similarity index 100% rename from tools/objtool/unwind_hints.c rename to tools/objtool/arch/arm64/unwind_hints.c diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 1322c6f0dc0b..9bef3cbf6508 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -1814,6 +1814,97 @@ static int add_jump_table_alts(struct objtool_file *file) return 0; } +static int read_unwind_hints(struct objtool_file *file) +{ + struct cfi_state cfi = init_cfi; + struct section *sec; + struct unwind_hint *hint; + struct instruction *insn; + struct reloc *reloc; + int i; + + sec = find_section_by_name(file->elf, ".discard.unwind_hints"); + if (!sec) + return 0; + + if (!sec->rsec) { + WARN("missing .rela.discard.unwind_hints section"); + return -1; + } + + if (sec->sh.sh_size % sizeof(struct unwind_hint)) { + WARN("struct unwind_hint size mismatch"); + return -1; + } + + file->hints = true; + + for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) { + hint = (struct unwind_hint *)sec->data->d_buf + i; + + reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint)); + if (!reloc) { + WARN("can't find reloc for unwind_hints[%d]", i); + return -1; + } + + insn = find_insn(file, reloc->sym->sec, reloc_addend(reloc)); + if (!insn) { + WARN("can't find insn for unwind_hints[%d]", i); + return -1; + } + + insn->hint = true; + + if (hint->type == UNWIND_HINT_TYPE_UNDEFINED) { + insn->cfi = &force_undefined_cfi; + continue; + } + + if (hint->type == UNWIND_HINT_TYPE_SAVE) { + insn->hint = false; + insn->save = true; + continue; + } + + if (hint->type == UNWIND_HINT_TYPE_RESTORE) { + insn->restore = true; + continue; + } + + if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) { + struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset); + + if (sym && sym->bind == STB_GLOBAL) { + if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) { + WARN_INSN(insn, "UNWIND_HINT_IRET_REGS without ENDBR"); + } + } + } + + if (hint->type == UNWIND_HINT_TYPE_FUNC) { + insn->cfi = &func_cfi; + continue; + } + + if (insn->cfi) + cfi = *(insn->cfi); + + if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) { + WARN_INSN(insn, "unsupported unwind_hint sp base reg %d", hint->sp_reg); + return -1; + } + + cfi.cfa.offset = bswap_if_needed(file->elf, hint->sp_offset); + cfi.type = hint->type; + cfi.signal = hint->signal; + + insn->cfi = cfi_hash_find_or_add(&cfi); + } + + return 0; +} + static int read_noendbr_hints(struct objtool_file *file) { struct instruction *insn; diff --git a/tools/objtool/include/objtool/insn.h b/tools/objtool/include/objtool/insn.h index d19cd2c12f06..d1e877095541 100644 --- a/tools/objtool/include/objtool/insn.h +++ b/tools/objtool/include/objtool/insn.h @@ -111,7 +111,10 @@ bool is_first_func_insn(struct objtool_file *file, struct instruction *insn, struct symbol *sym); int decode_instructions(struct objtool_file *file); +/* temporily add for revert */ +#ifdef __aarch64__ int read_unwind_hints(struct objtool_file *file); +#endif #define sec_for_each_insn(file, _sec, insn) \ for (insn = find_insn(file, _sec, 0); \ diff --git a/tools/objtool/sync-check.sh b/tools/objtool/sync-check.sh index d75b494e010d..b06b5b881121 100755 --- a/tools/objtool/sync-check.sh +++ b/tools/objtool/sync-check.sh @@ -14,7 +14,6 @@ arch/x86/include/asm/nops.h arch/x86/include/asm/inat_types.h arch/x86/include/asm/orc_types.h arch/x86/include/asm/emulate_prefix.h -arch/x86/include/asm/unwind_hints.h arch/x86/lib/x86-opcode-map.txt arch/x86/tools/gen-insn-attr-x86.awk include/linux/static_call_types.h -- Gitee From fa6b23f3c70147a10a7e263baf5858237c96c964 Mon Sep 17 00:00:00 2001 From: Jia He Date: Wed, 30 Oct 2024 10:55:15 +0000 Subject: [PATCH 1698/2138] Revert "objtool: Move decode_instructions() to a separate file" ANBZ: #11595 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ This reverts commit 9ec25e3e74653d228e00dfb7d816919ec5003d27. To support livepatch features on other architectures, it's preferable to keep the Anolis livepatch code layout closely aligned with the upstream kernel. Therefore, we should revert the commits that reorganized the code specifically for arm64 livepatch support. Signed-off-by: Jia He Acked-by: Wardenjohn Reviewed-by: Qiao Ma Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4069 --- arch/x86/coco/tdx/tdcall.S | 1 - tools/objtool/Build | 1 - tools/objtool/arch/arm64/decode.c | 129 +++++++++++++++++++++++++ tools/objtool/check.c | 126 +++++++++++++++++++++++++ tools/objtool/decode.c | 136 --------------------------- tools/objtool/include/objtool/insn.h | 3 +- 6 files changed, 256 insertions(+), 140 deletions(-) delete mode 100644 tools/objtool/decode.c diff --git a/arch/x86/coco/tdx/tdcall.S b/arch/x86/coco/tdx/tdcall.S index 0658012149a2..56b9cd32895e 100644 --- a/arch/x86/coco/tdx/tdcall.S +++ b/arch/x86/coco/tdx/tdcall.S @@ -7,7 +7,6 @@ #include #include #include -#include #include "../../virt/vmx/tdx/tdxcall.S" diff --git a/tools/objtool/Build b/tools/objtool/Build index e0a4685ac7a3..9d934bf1ceee 100644 --- a/tools/objtool/Build +++ b/tools/objtool/Build @@ -8,7 +8,6 @@ objtool-$(DYNAMIC_CHECK) += dcheck.o objtool-y += builtin-check.o objtool-y += cfi.o objtool-y += insn.o -objtool-y += decode.o objtool-y += elf.o objtool-y += objtool.o diff --git a/tools/objtool/arch/arm64/decode.c b/tools/objtool/arch/arm64/decode.c index 19ef9b22a734..28a972eb9976 100644 --- a/tools/objtool/arch/arm64/decode.c +++ b/tools/objtool/arch/arm64/decode.c @@ -13,12 +13,16 @@ #include #include +#include #include #include +#include #include #include +unsigned long nr_insns; + /* ARM64 instructions are all 4 bytes wide. */ #define INSN_SIZE 4 @@ -579,3 +583,128 @@ int arch_decode_instruction(struct objtool_file *file, insn->type = var.type; return 0; } + +/* + * Call the arch-specific instruction decoder for all the instructions and add + * them to the global instruction list. + */ +int decode_instructions(struct objtool_file *file) +{ + struct section *sec; + struct symbol *func; + unsigned long offset; + struct instruction *insn; + int ret; + + for_each_sec(file, sec) { + struct instruction *insns = NULL; + u8 prev_len = 0; + u8 idx = 0; + + if (!(sec->sh.sh_flags & SHF_EXECINSTR)) + continue; + + if (strcmp(sec->name, ".altinstr_replacement") && + strcmp(sec->name, ".altinstr_aux") && + strncmp(sec->name, ".discard.", 9)) + sec->text = true; + + if (!strcmp(sec->name, ".noinstr.text") || + !strcmp(sec->name, ".entry.text") || + !strcmp(sec->name, ".cpuidle.text") || + !strncmp(sec->name, ".text..__x86.", 13)) + sec->noinstr = true; + + /* + * .init.text code is ran before userspace and thus doesn't + * strictly need retpolines, except for modules which are + * loaded late, they very much do need retpoline in their + * .init.text + */ + if (!strcmp(sec->name, ".init.text") && !opts.module) + sec->init = true; + + for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) { + if (!insns || idx == INSN_CHUNK_MAX) { + insns = calloc(sizeof(*insn), INSN_CHUNK_SIZE); + if (!insns) { + WARN("malloc failed"); + return -1; + } + idx = 0; + } else { + idx++; + } + insn = &insns[idx]; + insn->idx = idx; + + INIT_LIST_HEAD(&insn->call_node); + insn->sec = sec; + insn->offset = offset; + insn->prev_len = prev_len; + + ret = arch_decode_instruction(file, sec, offset, + sec->sh.sh_size - offset, + insn); + if (ret) + return ret; + + prev_len = insn->len; + + /* + * By default, "ud2" is a dead end unless otherwise + * annotated, because GCC 7 inserts it for certain + * divide-by-zero cases. + */ + if (insn->type == INSN_BUG) + insn->dead_end = true; + + hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset)); + nr_insns++; + } + +// printf("%s: last chunk used: %d\n", sec->name, (int)idx); + + sec_for_each_sym(sec, func) { + if (func->type != STT_NOTYPE && func->type != STT_FUNC) + continue; + + if (func->offset == sec->sh.sh_size) { + /* Heuristic: likely an "end" symbol */ + if (func->type == STT_NOTYPE) + continue; + WARN("%s(): STT_FUNC at end of section", + func->name); + return -1; + } + + if (func->embedded_insn || func->alias != func) + continue; + + if (!find_insn(file, sec, func->offset)) { + WARN("%s(): can't find starting instruction", + func->name); + return -1; + } + + sym_for_each_insn(file, func, insn) { + insn->sym = func; + if (func->type == STT_FUNC && + insn->type == INSN_ENDBR && + list_empty(&insn->call_node)) { + if (insn->offset == func->offset) { + list_add_tail(&insn->call_node, &file->endbr_list); + file->nr_endbr++; + } else { + file->nr_endbr_int++; + } + } + } + } + } + + if (opts.stats) + printf("nr_insns: %lu\n", nr_insns); + + return 0; +} diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 9bef3cbf6508..66a3eebb3dfd 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -157,8 +157,134 @@ static bool dead_end_function(struct objtool_file *file, struct symbol *func) return __dead_end_function(file, func, 0); } +static unsigned long nr_insns; static unsigned long nr_insns_visited; +/* + * Call the arch-specific instruction decoder for all the instructions and add + * them to the global instruction list. + */ +static int decode_instructions(struct objtool_file *file) +{ + struct section *sec; + struct symbol *func; + unsigned long offset; + struct instruction *insn; + int ret; + + for_each_sec(file, sec) { + struct instruction *insns = NULL; + u8 prev_len = 0; + u8 idx = 0; + + if (!(sec->sh.sh_flags & SHF_EXECINSTR)) + continue; + + if (strcmp(sec->name, ".altinstr_replacement") && + strcmp(sec->name, ".altinstr_aux") && + strncmp(sec->name, ".discard.", 9)) + sec->text = true; + + if (!strcmp(sec->name, ".noinstr.text") || + !strcmp(sec->name, ".entry.text") || + !strcmp(sec->name, ".cpuidle.text") || + !strncmp(sec->name, ".text..__x86.", 13)) + sec->noinstr = true; + + /* + * .init.text code is ran before userspace and thus doesn't + * strictly need retpolines, except for modules which are + * loaded late, they very much do need retpoline in their + * .init.text + */ + if (!strcmp(sec->name, ".init.text") && !opts.module) + sec->init = true; + + for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) { + if (!insns || idx == INSN_CHUNK_MAX) { + insns = calloc(sizeof(*insn), INSN_CHUNK_SIZE); + if (!insns) { + WARN("malloc failed"); + return -1; + } + idx = 0; + } else { + idx++; + } + insn = &insns[idx]; + insn->idx = idx; + + INIT_LIST_HEAD(&insn->call_node); + insn->sec = sec; + insn->offset = offset; + insn->prev_len = prev_len; + + ret = arch_decode_instruction(file, sec, offset, + sec->sh.sh_size - offset, + insn); + if (ret) + return ret; + + prev_len = insn->len; + + /* + * By default, "ud2" is a dead end unless otherwise + * annotated, because GCC 7 inserts it for certain + * divide-by-zero cases. + */ + if (insn->type == INSN_BUG) + insn->dead_end = true; + + hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset)); + nr_insns++; + } + +// printf("%s: last chunk used: %d\n", sec->name, (int)idx); + + sec_for_each_sym(sec, func) { + if (func->type != STT_NOTYPE && func->type != STT_FUNC) + continue; + + if (func->offset == sec->sh.sh_size) { + /* Heuristic: likely an "end" symbol */ + if (func->type == STT_NOTYPE) + continue; + WARN("%s(): STT_FUNC at end of section", + func->name); + return -1; + } + + if (func->embedded_insn || func->alias != func) + continue; + + if (!find_insn(file, sec, func->offset)) { + WARN("%s(): can't find starting instruction", + func->name); + return -1; + } + + sym_for_each_insn(file, func, insn) { + insn->sym = func; + if (func->type == STT_FUNC && + insn->type == INSN_ENDBR && + list_empty(&insn->call_node)) { + if (insn->offset == func->offset) { + list_add_tail(&insn->call_node, &file->endbr_list); + file->nr_endbr++; + } else { + file->nr_endbr_int++; + } + } + } + } + } + + if (opts.stats) + printf("nr_insns: %lu\n", nr_insns); + + return 0; +} + /* * Read the pv_ops[] .data table to find the static initialized values. */ diff --git a/tools/objtool/decode.c b/tools/objtool/decode.c deleted file mode 100644 index 59fea7e1d35b..000000000000 --- a/tools/objtool/decode.c +++ /dev/null @@ -1,136 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2015-2017 Josh Poimboeuf - */ -#include - -#include -#include -#include - -unsigned long nr_insns; - -/* - * Call the arch-specific instruction decoder for all the instructions and add - * them to the global instruction list. - */ -int decode_instructions(struct objtool_file *file) -{ - struct section *sec; - struct symbol *func; - unsigned long offset; - struct instruction *insn; - int ret; - - for_each_sec(file, sec) { - struct instruction *insns = NULL; - u8 prev_len = 0; - u8 idx = 0; - - if (!(sec->sh.sh_flags & SHF_EXECINSTR)) - continue; - - if (strcmp(sec->name, ".altinstr_replacement") && - strcmp(sec->name, ".altinstr_aux") && - strncmp(sec->name, ".discard.", 9)) - sec->text = true; - - if (!strcmp(sec->name, ".noinstr.text") || - !strcmp(sec->name, ".entry.text") || - !strcmp(sec->name, ".cpuidle.text") || - !strncmp(sec->name, ".text..__x86.", 13)) - sec->noinstr = true; - - /* - * .init.text code is ran before userspace and thus doesn't - * strictly need retpolines, except for modules which are - * loaded late, they very much do need retpoline in their - * .init.text - */ - if (!strcmp(sec->name, ".init.text") && !opts.module) - sec->init = true; - - for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) { - if (!insns || idx == INSN_CHUNK_MAX) { - insns = calloc(sizeof(*insn), INSN_CHUNK_SIZE); - if (!insns) { - WARN("malloc failed"); - return -1; - } - idx = 0; - } else { - idx++; - } - insn = &insns[idx]; - insn->idx = idx; - - INIT_LIST_HEAD(&insn->call_node); - insn->sec = sec; - insn->offset = offset; - insn->prev_len = prev_len; - - ret = arch_decode_instruction(file, sec, offset, - sec->sh.sh_size - offset, - insn); - if (ret) - return ret; - - prev_len = insn->len; - - /* - * By default, "ud2" is a dead end unless otherwise - * annotated, because GCC 7 inserts it for certain - * divide-by-zero cases. - */ - if (insn->type == INSN_BUG) - insn->dead_end = true; - - hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset)); - nr_insns++; - } - -// printf("%s: last chunk used: %d\n", sec->name, (int)idx); - - sec_for_each_sym(sec, func) { - if (func->type != STT_NOTYPE && func->type != STT_FUNC) - continue; - - if (func->offset == sec->sh.sh_size) { - /* Heuristic: likely an "end" symbol */ - if (func->type == STT_NOTYPE) - continue; - WARN("%s(): STT_FUNC at end of section", - func->name); - return -1; - } - - if (func->embedded_insn || func->alias != func) - continue; - - if (!find_insn(file, sec, func->offset)) { - WARN("%s(): can't find starting instruction", - func->name); - return -1; - } - - sym_for_each_insn(file, func, insn) { - insn->sym = func; - if (func->type == STT_FUNC && - insn->type == INSN_ENDBR && - list_empty(&insn->call_node)) { - if (insn->offset == func->offset) { - list_add_tail(&insn->call_node, &file->endbr_list); - file->nr_endbr++; - } else { - file->nr_endbr_int++; - } - } - } - } - } - - if (opts.stats) - printf("nr_insns: %lu\n", nr_insns); - - return 0; -} diff --git a/tools/objtool/include/objtool/insn.h b/tools/objtool/include/objtool/insn.h index d1e877095541..9bfb1fa93e3e 100644 --- a/tools/objtool/include/objtool/insn.h +++ b/tools/objtool/include/objtool/insn.h @@ -110,10 +110,10 @@ bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2, bool is_first_func_insn(struct objtool_file *file, struct instruction *insn, struct symbol *sym); -int decode_instructions(struct objtool_file *file); /* temporily add for revert */ #ifdef __aarch64__ int read_unwind_hints(struct objtool_file *file); +int decode_instructions(struct objtool_file *file); #endif #define sec_for_each_insn(file, _sec, insn) \ @@ -149,5 +149,4 @@ int read_unwind_hints(struct objtool_file *file); for (insn = next_insn_same_sec(file, insn); insn; \ insn = next_insn_same_sec(file, insn)) -extern unsigned long nr_insns; #endif /* _INSN_H */ -- Gitee From e529fc89ad07238acda8725738ac6a6ce5135031 Mon Sep 17 00:00:00 2001 From: Jia He Date: Wed, 30 Oct 2024 11:14:50 +0000 Subject: [PATCH 1699/2138] Revert "objtool: Reorganize instruction-related code" ANBZ: #11595 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ This reverts commit 9eacf42afe83bca4cba24403d0481dd957c8f2ed. To support livepatch features on other architectures, it's preferable to keep the Anolis livepatch code layout closely aligned with the upstream kernel. Therefore, we should revert the commits that reorganized the code specifically for arm64 livepatch support. Signed-off-by: Jia He Acked-by: Wardenjohn Reviewed-by: Qiao Ma Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4069 --- tools/objtool/Build | 1 - tools/objtool/arch/arm64/Build | 1 + tools/objtool/arch/arm64/decode.c | 1 - tools/objtool/{ => arch/arm64}/insn.c | 0 tools/objtool/check.c | 212 +++++++++++++++++++++++++- tools/objtool/include/objtool/check.h | 101 +++++++++++- tools/objtool/include/objtool/insn.h | 21 +++ 7 files changed, 333 insertions(+), 4 deletions(-) rename tools/objtool/{ => arch/arm64}/insn.c (100%) diff --git a/tools/objtool/Build b/tools/objtool/Build index 9d934bf1ceee..1b2cdac753ef 100644 --- a/tools/objtool/Build +++ b/tools/objtool/Build @@ -7,7 +7,6 @@ objtool-$(STATIC_CHECK) += special.o objtool-$(DYNAMIC_CHECK) += dcheck.o objtool-y += builtin-check.o objtool-y += cfi.o -objtool-y += insn.o objtool-y += elf.o objtool-y += objtool.o diff --git a/tools/objtool/arch/arm64/Build b/tools/objtool/arch/arm64/Build index 77619e7ef476..00e9a5566b71 100644 --- a/tools/objtool/arch/arm64/Build +++ b/tools/objtool/arch/arm64/Build @@ -1,3 +1,4 @@ objtool-y += decode.o objtool-y += orc.o objtool-y += unwind_hints.o +objtool-y += insn.o diff --git a/tools/objtool/arch/arm64/decode.c b/tools/objtool/arch/arm64/decode.c index 28a972eb9976..49d46feb6a8a 100644 --- a/tools/objtool/arch/arm64/decode.c +++ b/tools/objtool/arch/arm64/decode.c @@ -14,7 +14,6 @@ #include #include -#include #include #include #include diff --git a/tools/objtool/insn.c b/tools/objtool/arch/arm64/insn.c similarity index 100% rename from tools/objtool/insn.c rename to tools/objtool/arch/arm64/insn.c diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 66a3eebb3dfd..77099ea9e2aa 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -27,6 +27,103 @@ struct alternative { bool skip_orig; }; +struct instruction *find_insn(struct objtool_file *file, + struct section *sec, unsigned long offset) +{ + struct instruction *insn; + + hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) { + if (insn->sec == sec && insn->offset == offset) + return insn; + } + + return NULL; +} + +struct instruction *next_insn_same_sec(struct objtool_file *file, + struct instruction *insn) +{ + if (insn->idx == INSN_CHUNK_MAX) + return find_insn(file, insn->sec, insn->offset + insn->len); + + insn++; + if (!insn->len) + return NULL; + + return insn; +} + +static struct instruction *next_insn_same_func(struct objtool_file *file, + struct instruction *insn) +{ + struct instruction *next = next_insn_same_sec(file, insn); + struct symbol *func = insn_func(insn); + + if (!func) + return NULL; + + if (next && insn_func(next) == func) + return next; + + /* Check if we're already in the subfunction: */ + if (func == func->cfunc) + return NULL; + + /* Move to the subfunction: */ + return find_insn(file, func->cfunc->sec, func->cfunc->offset); +} + +static struct instruction *prev_insn_same_sec(struct objtool_file *file, + struct instruction *insn) +{ + if (insn->idx == 0) { + if (insn->prev_len) + return find_insn(file, insn->sec, insn->offset - insn->prev_len); + return NULL; + } + + return insn - 1; +} + +static struct instruction *prev_insn_same_sym(struct objtool_file *file, + struct instruction *insn) +{ + struct instruction *prev = prev_insn_same_sec(file, insn); + + if (prev && insn_func(prev) == insn_func(insn)) + return prev; + + return NULL; +} + +#define for_each_insn(file, insn) \ + for (struct section *__sec, *__fake = (struct section *)1; \ + __fake; __fake = NULL) \ + for_each_sec(file, __sec) \ + sec_for_each_insn(file, __sec, insn) + +#define func_for_each_insn(file, func, insn) \ + for (insn = find_insn(file, func->sec, func->offset); \ + insn; \ + insn = next_insn_same_func(file, insn)) + +#define sym_for_each_insn(file, sym, insn) \ + for (insn = find_insn(file, sym->sec, sym->offset); \ + insn && insn->offset < sym->offset + sym->len; \ + insn = next_insn_same_sec(file, insn)) + +#define sym_for_each_insn_continue_reverse(file, sym, insn) \ + for (insn = prev_insn_same_sec(file, insn); \ + insn && insn->offset >= sym->offset; \ + insn = prev_insn_same_sec(file, insn)) + +#define sec_for_each_insn_from(file, insn) \ + for (; insn; insn = next_insn_same_sec(file, insn)) + +#define sec_for_each_insn_continue(file, insn) \ + for (insn = next_insn_same_sec(file, insn); insn; \ + insn = next_insn_same_sec(file, insn)) + static inline struct symbol *insn_call_dest(struct instruction *insn) { if (insn->type == INSN_JUMP_DYNAMIC || @@ -157,6 +254,21 @@ static bool dead_end_function(struct objtool_file *file, struct symbol *func) return __dead_end_function(file, func, 0); } +static void init_insn_state(struct objtool_file *file, struct insn_state *state, + struct section *sec) +{ + memset(state, 0, sizeof(*state)); + init_cfi_state(&state->cfi); + + /* + * We need the full vmlinux for noinstr validation, otherwise we can + * not correctly determine insn_call_dest(insn)->sec (external symbols + * do not have a section). + */ + if (opts.link && opts.noinstr && sec) + state->noinstr = sec->noinstr; +} + static unsigned long nr_insns; static unsigned long nr_insns_visited; @@ -362,6 +474,19 @@ static int init_pv_ops(struct objtool_file *file) return 0; } +static struct instruction *find_last_insn(struct objtool_file *file, + struct section *sec) +{ + struct instruction *insn = NULL; + unsigned int offset; + unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0; + + for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--) + insn = find_insn(file, sec, offset); + + return insn; +} + /* * Mark "ud2" instructions and manually annotated dead ends. */ @@ -1101,6 +1226,26 @@ __weak bool arch_is_embedded_insn(struct symbol *sym) return false; } +static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn) +{ + struct reloc *reloc; + + if (insn->no_reloc) + return NULL; + + if (!file) + return NULL; + + reloc = find_reloc_by_dest_range(file->elf, insn->sec, + insn->offset, insn->len); + if (!reloc) { + insn->no_reloc = 1; + return NULL; + } + + return reloc; +} + static void remove_insn_ops(struct instruction *insn) { struct stack_op *op, *next; @@ -1260,6 +1405,24 @@ static void add_return_call(struct objtool_file *file, struct instruction *insn, list_add_tail(&insn->call_node, &file->return_thunk_list); } +static bool is_first_func_insn(struct objtool_file *file, + struct instruction *insn, struct symbol *sym) +{ + if (insn->offset == sym->offset) + return true; + + /* Allow direct CALL/JMP past ENDBR */ + if (opts.ibt) { + struct instruction *prev = prev_insn_same_sym(file, insn); + + if (prev && prev->type == INSN_ENDBR && + insn->offset == sym->offset + prev->len) + return true; + } + + return false; +} + /* * A sibling call is a tail-call to another symbol -- to differentiate from a * recursive tail-call which is to the same symbol. @@ -3031,6 +3194,53 @@ static int handle_insn_ops(struct instruction *insn, return 0; } +static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2) +{ + struct cfi_state *cfi1 = insn->cfi; + int i; + + if (!cfi1) { + WARN("CFI missing"); + return false; + } + + if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) { + + WARN_INSN(insn, "stack state mismatch: cfa1=%d%+d cfa2=%d%+d", + cfi1->cfa.base, cfi1->cfa.offset, + cfi2->cfa.base, cfi2->cfa.offset); + + } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) { + for (i = 0; i < CFI_NUM_REGS; i++) { + if (!memcmp(&cfi1->regs[i], &cfi2->regs[i], + sizeof(struct cfi_reg))) + continue; + + WARN_INSN(insn, "stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d", + i, cfi1->regs[i].base, cfi1->regs[i].offset, + i, cfi2->regs[i].base, cfi2->regs[i].offset); + break; + } + + } else if (cfi1->type != cfi2->type) { + + WARN_INSN(insn, "stack state mismatch: type1=%d type2=%d", + cfi1->type, cfi2->type); + + } else if (cfi1->drap != cfi2->drap || + (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) || + (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) { + + WARN_INSN(insn, "stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)", + cfi1->drap, cfi1->drap_reg, cfi1->drap_offset, + cfi2->drap, cfi2->drap_reg, cfi2->drap_offset); + + } else + return true; + + return false; +} + static inline bool func_uaccess_safe(struct symbol *func) { if (func) @@ -3264,7 +3474,7 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, visited = VISITED_BRANCH << state.uaccess; if (insn->visited & VISITED_BRANCH_MASK) { - if (!insn->hint && !insn_cfi_match(insn, &state.cfi, true)) + if (!insn->hint && !insn_cfi_match(insn, &state.cfi)) return 1; if (insn->visited & visited) diff --git a/tools/objtool/include/objtool/check.h b/tools/objtool/include/objtool/check.h index 1f63eca11ddd..f389816e3fa2 100644 --- a/tools/objtool/include/objtool/check.h +++ b/tools/objtool/include/objtool/check.h @@ -6,8 +6,21 @@ #ifndef _CHECK_H #define _CHECK_H -#include +#ifdef __aarch64__ #include +#else +#include +#include +#include + +struct insn_state { + struct cfi_state cfi; + unsigned int uaccess_stack; + bool uaccess; + bool df; + bool noinstr; + s8 instr; +}; struct alt_group { /* @@ -26,9 +39,95 @@ struct alt_group { struct cfi_state **cfi; }; +#define INSN_CHUNK_BITS 8 +#define INSN_CHUNK_SIZE (1 << INSN_CHUNK_BITS) +#define INSN_CHUNK_MAX (INSN_CHUNK_SIZE - 1) + +struct instruction { + struct hlist_node hash; + struct list_head call_node; + struct section *sec; + unsigned long offset; + unsigned long immediate; + + u8 len; + u8 prev_len; + u8 type; + s8 instr; + + u32 idx : INSN_CHUNK_BITS, + dead_end : 1, + ignore : 1, + ignore_alts : 1, + hint : 1, + save : 1, + restore : 1, + retpoline_safe : 1, + noendbr : 1, + unret : 1, + visited : 4, + no_reloc : 1; + /* 10 bit hole */ + + struct alt_group *alt_group; + struct instruction *jump_dest; + struct instruction *first_jump_src; + union { + struct symbol *_call_dest; + struct reloc *_jump_table; + }; + struct alternative *alts; + struct symbol *sym; + struct stack_op *stack_ops; + struct cfi_state *cfi; +}; + +static inline struct symbol *insn_func(struct instruction *insn) +{ + struct symbol *sym = insn->sym; + + if (sym && sym->type != STT_FUNC) + sym = NULL; + + return sym; +} + #define VISITED_BRANCH 0x01 #define VISITED_BRANCH_UACCESS 0x02 #define VISITED_BRANCH_MASK 0x03 #define VISITED_UNRET 0x04 +static inline bool is_static_jump(struct instruction *insn) +{ + return insn->type == INSN_JUMP_CONDITIONAL || + insn->type == INSN_JUMP_UNCONDITIONAL; +} + +static inline bool is_dynamic_jump(struct instruction *insn) +{ + return insn->type == INSN_JUMP_DYNAMIC || + insn->type == INSN_JUMP_DYNAMIC_CONDITIONAL; +} + +static inline bool is_jump(struct instruction *insn) +{ + return is_static_jump(insn) || is_dynamic_jump(insn); +} + +struct instruction *find_insn(struct objtool_file *file, + struct section *sec, unsigned long offset); + +struct instruction *next_insn_same_sec(struct objtool_file *file, struct instruction *insn); + +#define sec_for_each_insn(file, _sec, insn) \ + for (insn = find_insn(file, _sec, 0); \ + insn && insn->sec == _sec; \ + insn = next_insn_same_sec(file, insn)) + +static inline bool insn_can_reloc(struct instruction *insn) +{ + return true; +} + +#endif /* endof !__aarch64__ */ #endif /* _CHECK_H */ diff --git a/tools/objtool/include/objtool/insn.h b/tools/objtool/include/objtool/insn.h index 9bfb1fa93e3e..b650cffaf7b1 100644 --- a/tools/objtool/include/objtool/insn.h +++ b/tools/objtool/include/objtool/insn.h @@ -6,6 +6,8 @@ #ifndef _INSN_H #define _INSN_H +/* This is an arm64 specific version for check.h */ +#ifdef __aarch64__ #include #include @@ -61,6 +63,23 @@ struct instruction { struct cfi_state *cfi; }; +struct alt_group { + /* + * Pointer from a replacement group to the original group. NULL if it + * *is* the original group. + */ + struct alt_group *orig_group; + + /* First and last instructions in the group */ + struct instruction *first_insn, *last_insn, *nop; + + /* + * Byte-offset-addressed len-sized array of pointers to CFI structs. + * This is shared with the other alt_groups in the same alternative. + */ + struct cfi_state **cfi; +}; + static inline struct symbol *insn_func(struct instruction *insn) { struct symbol *sym = insn->sym; @@ -149,4 +168,6 @@ int decode_instructions(struct objtool_file *file); for (insn = next_insn_same_sec(file, insn); insn; \ insn = next_insn_same_sec(file, insn)) +extern unsigned long nr_insns; +#endif /* __aarch64__ */ #endif /* _INSN_H */ -- Gitee From f3b4dc8afb853e8b540957d8d09d41dc7bce9cf3 Mon Sep 17 00:00:00 2001 From: Jia He Date: Thu, 31 Oct 2024 04:10:10 +0000 Subject: [PATCH 1700/2138] Revert "objtool: Reorganize CFI code" ANBZ: #11595 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ This reverts commit 15dd22635b4a253a60cfa3f219293175740516aa. To support livepatch features on other architectures, it's preferable to keep the Anolis livepatch code layout closely aligned with the upstream kernel. Therefore, we should revert the commits that reorganized the code specifically for arm64 livepatch support. Signed-off-by: Jia He Acked-by: Wardenjohn Reviewed-by: Qiao Ma Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4069 --- tools/objtool/Build | 1 - tools/objtool/arch/arm64/Build | 1 + tools/objtool/{ => arch/arm64}/cfi.c | 1 + tools/objtool/arch/arm64/include/arch/cfi.h | 26 ++++++ tools/objtool/arch/arm64/insn.c | 1 + tools/objtool/arch/arm64/unwind_hints.c | 1 + tools/objtool/check.c | 98 +++++++++++++++++++++ tools/objtool/dcheck.c | 1 + tools/objtool/include/objtool/cfi.h | 13 --- tools/objtool/include/objtool/insn.h | 3 - 10 files changed, 129 insertions(+), 17 deletions(-) rename tools/objtool/{ => arch/arm64}/cfi.c (99%) create mode 100644 tools/objtool/arch/arm64/include/arch/cfi.h diff --git a/tools/objtool/Build b/tools/objtool/Build index 1b2cdac753ef..b71547b660ce 100644 --- a/tools/objtool/Build +++ b/tools/objtool/Build @@ -6,7 +6,6 @@ objtool-$(STATIC_CHECK) += check.o objtool-$(STATIC_CHECK) += special.o objtool-$(DYNAMIC_CHECK) += dcheck.o objtool-y += builtin-check.o -objtool-y += cfi.o objtool-y += elf.o objtool-y += objtool.o diff --git a/tools/objtool/arch/arm64/Build b/tools/objtool/arch/arm64/Build index 00e9a5566b71..8d2f99a5b1ab 100644 --- a/tools/objtool/arch/arm64/Build +++ b/tools/objtool/arch/arm64/Build @@ -2,3 +2,4 @@ objtool-y += decode.o objtool-y += orc.o objtool-y += unwind_hints.o objtool-y += insn.o +objtool-y += cfi.o diff --git a/tools/objtool/cfi.c b/tools/objtool/arch/arm64/cfi.c similarity index 99% rename from tools/objtool/cfi.c rename to tools/objtool/arch/arm64/cfi.c index bc3e216f1a94..753aa82d20c8 100644 --- a/tools/objtool/cfi.c +++ b/tools/objtool/arch/arm64/cfi.c @@ -8,6 +8,7 @@ #include #include +#include #include #include diff --git a/tools/objtool/arch/arm64/include/arch/cfi.h b/tools/objtool/arch/arm64/include/arch/cfi.h new file mode 100644 index 000000000000..55f7a988d824 --- /dev/null +++ b/tools/objtool/arch/arm64/include/arch/cfi.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2015-2017 Josh Poimboeuf + */ + +#ifndef _OBJTOOL_ARCH_CFI_H +#define _OBJTOOL_ARCH_CFI_H + +#include + +#include + +void init_cfi_state(struct cfi_state *cfi); +bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2); +struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi); +void cfi_hash_add(struct cfi_state *cfi); +void *cfi_hash_alloc(unsigned long size); +void set_func_state(struct cfi_state *state); + +extern unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache; +extern struct cfi_init_state initial_func_cfi; +extern struct cfi_state init_cfi; +extern struct cfi_state func_cfi; +extern struct cfi_state force_undefined_cfi; + +#endif /* _OBJTOOL_ARCH_CFI_H */ diff --git a/tools/objtool/arch/arm64/insn.c b/tools/objtool/arch/arm64/insn.c index b63ec049696a..b205868690c2 100644 --- a/tools/objtool/arch/arm64/insn.c +++ b/tools/objtool/arch/arm64/insn.c @@ -8,6 +8,7 @@ #include #include #include +#include struct instruction *find_insn(struct objtool_file *file, struct section *sec, unsigned long offset) diff --git a/tools/objtool/arch/arm64/unwind_hints.c b/tools/objtool/arch/arm64/unwind_hints.c index c59d259d0392..3e5364623def 100644 --- a/tools/objtool/arch/arm64/unwind_hints.c +++ b/tools/objtool/arch/arm64/unwind_hints.c @@ -8,6 +8,7 @@ #include #include #include +#include int read_unwind_hints(struct objtool_file *file) { diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 77099ea9e2aa..1b242c3c2d45 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -27,6 +27,13 @@ struct alternative { bool skip_orig; }; +static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache; + +static struct cfi_init_state initial_func_cfi; +static struct cfi_state init_cfi; +static struct cfi_state func_cfi; +static struct cfi_state force_undefined_cfi; + struct instruction *find_insn(struct objtool_file *file, struct section *sec, unsigned long offset) { @@ -254,6 +261,19 @@ static bool dead_end_function(struct objtool_file *file, struct symbol *func) return __dead_end_function(file, func, 0); } +static void init_cfi_state(struct cfi_state *cfi) +{ + int i; + + for (i = 0; i < CFI_NUM_REGS; i++) { + cfi->regs[i].base = CFI_UNDEFINED; + cfi->vals[i].base = CFI_UNDEFINED; + } + cfi->cfa.base = CFI_UNDEFINED; + cfi->drap_reg = CFI_UNDEFINED; + cfi->drap_offset = -1; +} + static void init_insn_state(struct objtool_file *file, struct insn_state *state, struct section *sec) { @@ -269,6 +289,75 @@ static void init_insn_state(struct objtool_file *file, struct insn_state *state, state->noinstr = sec->noinstr; } +static struct cfi_state *cfi_alloc(void) +{ + struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1); + if (!cfi) { + WARN("calloc failed"); + exit(1); + } + nr_cfi++; + return cfi; +} + +static int cfi_bits; +static struct hlist_head *cfi_hash; + +static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2) +{ + return memcmp((void *)cfi1 + sizeof(cfi1->hash), + (void *)cfi2 + sizeof(cfi2->hash), + sizeof(struct cfi_state) - sizeof(struct hlist_node)); +} + +static inline u32 cfi_key(struct cfi_state *cfi) +{ + return jhash((void *)cfi + sizeof(cfi->hash), + sizeof(*cfi) - sizeof(cfi->hash), 0); +} + +static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi) +{ + struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)]; + struct cfi_state *obj; + + hlist_for_each_entry(obj, head, hash) { + if (!cficmp(cfi, obj)) { + nr_cfi_cache++; + return obj; + } + } + + obj = cfi_alloc(); + *obj = *cfi; + hlist_add_head(&obj->hash, head); + + return obj; +} + +static void cfi_hash_add(struct cfi_state *cfi) +{ + struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)]; + + hlist_add_head(&cfi->hash, head); +} + +static void *cfi_hash_alloc(unsigned long size) +{ + cfi_bits = max(10, ilog2(size)); + cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits, + PROT_READ|PROT_WRITE, + MAP_PRIVATE|MAP_ANON, -1, 0); + if (cfi_hash == (void *)-1L) { + WARN("mmap fail cfi_hash"); + cfi_hash = NULL; + } else if (opts.stats) { + printf("cfi_bits: %d\n", cfi_bits); + } + + return cfi_hash; +} + static unsigned long nr_insns; static unsigned long nr_insns_visited; @@ -2103,6 +2192,15 @@ static int add_jump_table_alts(struct objtool_file *file) return 0; } +static void set_func_state(struct cfi_state *state) +{ + state->cfa = initial_func_cfi.cfa; + memcpy(&state->regs, &initial_func_cfi.regs, + CFI_NUM_REGS * sizeof(struct cfi_reg)); + state->stack_size = initial_func_cfi.cfa.offset; + state->type = UNWIND_HINT_TYPE_CALL; +} + static int read_unwind_hints(struct objtool_file *file) { struct cfi_state cfi = init_cfi; diff --git a/tools/objtool/dcheck.c b/tools/objtool/dcheck.c index 39dcd0a30f46..a4c342bf697d 100644 --- a/tools/objtool/dcheck.c +++ b/tools/objtool/dcheck.c @@ -11,6 +11,7 @@ #include #include #include +#include /* * Find the destination instructions for all jumps. diff --git a/tools/objtool/include/objtool/cfi.h b/tools/objtool/include/objtool/cfi.h index 557366799315..c8a6bec4f6b9 100644 --- a/tools/objtool/include/objtool/cfi.h +++ b/tools/objtool/include/objtool/cfi.h @@ -39,17 +39,4 @@ struct cfi_state { bool force_undefined; }; -void init_cfi_state(struct cfi_state *cfi); -bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2); -struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi); -void cfi_hash_add(struct cfi_state *cfi); -void *cfi_hash_alloc(unsigned long size); -void set_func_state(struct cfi_state *state); - -extern unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache; -extern struct cfi_init_state initial_func_cfi; -extern struct cfi_state init_cfi; -extern struct cfi_state func_cfi; -extern struct cfi_state force_undefined_cfi; - #endif /* _OBJTOOL_CFI_H */ diff --git a/tools/objtool/include/objtool/insn.h b/tools/objtool/include/objtool/insn.h index b650cffaf7b1..36922da5ccc7 100644 --- a/tools/objtool/include/objtool/insn.h +++ b/tools/objtool/include/objtool/insn.h @@ -129,11 +129,8 @@ bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2, bool is_first_func_insn(struct objtool_file *file, struct instruction *insn, struct symbol *sym); -/* temporily add for revert */ -#ifdef __aarch64__ int read_unwind_hints(struct objtool_file *file); int decode_instructions(struct objtool_file *file); -#endif #define sec_for_each_insn(file, _sec, insn) \ for (insn = find_insn(file, _sec, 0); \ -- Gitee From 6bce792fb7d155c09be7b222748177bf7e12c204 Mon Sep 17 00:00:00 2001 From: "Madhavan T. Venkataraman" Date: Sun, 29 Jan 2023 15:42:10 -0600 Subject: [PATCH 1701/2138] arm64: Enable livepatch for ARM64 ANBZ: #11595 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ Enable livepatch again in arch/arm64/Kconfig since the code reorganization has been completed. Signed-off-by: Jia He Acked-by: Wardenjohn Reviewed-by: Qiao Ma Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4069 --- arch/arm64/Kconfig | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index ea02335e54dd..c0db32c45f57 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -174,7 +174,7 @@ config ARM64 select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT select HAVE_ARCH_PREL32_RELOCATIONS - select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET + select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET if !HAVE_LIVEPATCH select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_STACKLEAK select HAVE_ARCH_THREAD_STRUCT_WHITELIST @@ -259,6 +259,8 @@ config ARM64 select HAVE_SOFTIRQ_ON_OWN_STACK select HAVE_STACK_VALIDATION if FRAME_POINTER_VALIDATION select STACK_VALIDATION if HAVE_STACK_VALIDATION + select HAVE_RELIABLE_STACKTRACE if STACK_VALIDATION + select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_ARGS && HAVE_RELIABLE_STACKTRACE help ARM 64-bit (AArch64) Linux support. @@ -2406,3 +2408,4 @@ source "drivers/acpi/Kconfig" source "arch/arm64/kvm/Kconfig" +source "kernel/livepatch/Kconfig" -- Gitee From 2242b6510bc8fa10f34a974a8597bf8752d4966f Mon Sep 17 00:00:00 2001 From: Jia He Date: Fri, 13 Sep 2024 11:50:30 +0000 Subject: [PATCH 1702/2138] anolis: configs: Enable CONFIG_LIVEPATCH for arm64 ANBZ: #11595 cherry-picked from https://lore.kernel.org/lkml/20230202074036.507249-1-madvenka@linux.microsoft.com/ Enable CONFIG_LIVEPATCH and its dependency configs again for anolis_defconfig and anolis-debug_defconfig on arm64 since the code reorganization has been completed. Signed-off-by: Jia He Acked-by: Wardenjohn Reviewed-by: Qiao Ma Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4069 --- .../configs/L1-RECOMMEND/arm64/CONFIG_FRAME_POINTER_VALIDATION | 1 + .../L1-RECOMMEND/arm64/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_LIVEPATCH | 1 + .../configs/L1-RECOMMEND/arm64/CONFIG_HAVE_RELIABLE_STACKTRACE | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_STACK_VALIDATION | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_LIVEPATCH | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_OBJTOOL | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET | 1 + .../L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_STACK_VALIDATION | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_FRAME_POINTER | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_ORC | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEST_LIVEPATCH | 1 + 13 files changed, 13 insertions(+) create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_FRAME_POINTER_VALIDATION create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_LIVEPATCH create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_RELIABLE_STACKTRACE create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_STACK_VALIDATION create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_LIVEPATCH create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_OBJTOOL create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_STACK_VALIDATION create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_FRAME_POINTER create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_ORC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEST_LIVEPATCH diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FRAME_POINTER_VALIDATION b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FRAME_POINTER_VALIDATION new file mode 100644 index 000000000000..cc041e559182 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FRAME_POINTER_VALIDATION @@ -0,0 +1 @@ +CONFIG_FRAME_POINTER_VALIDATION=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET new file mode 100644 index 000000000000..c7daa4f60d5d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET @@ -0,0 +1 @@ +# CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_LIVEPATCH b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_LIVEPATCH new file mode 100644 index 000000000000..7ebdb924703e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_LIVEPATCH @@ -0,0 +1 @@ +CONFIG_HAVE_LIVEPATCH=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_RELIABLE_STACKTRACE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_RELIABLE_STACKTRACE new file mode 100644 index 000000000000..2ce8faabc4cf --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_RELIABLE_STACKTRACE @@ -0,0 +1 @@ +CONFIG_HAVE_RELIABLE_STACKTRACE=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_STACK_VALIDATION b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_STACK_VALIDATION new file mode 100644 index 000000000000..6f36a32d84ae --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_STACK_VALIDATION @@ -0,0 +1 @@ +CONFIG_HAVE_STACK_VALIDATION=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_LIVEPATCH b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_LIVEPATCH new file mode 100644 index 000000000000..1b05d0d1a109 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_LIVEPATCH @@ -0,0 +1 @@ +CONFIG_LIVEPATCH=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_OBJTOOL b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_OBJTOOL new file mode 100644 index 000000000000..cf3a9f20f93d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_OBJTOOL @@ -0,0 +1 @@ +CONFIG_OBJTOOL=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET new file mode 100644 index 000000000000..759cb13e424c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET @@ -0,0 +1 @@ +# CONFIG_RANDOMIZE_KSTACK_OFFSET is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT new file mode 100644 index 000000000000..d680659c1703 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT @@ -0,0 +1 @@ +# CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STACK_VALIDATION b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STACK_VALIDATION new file mode 100644 index 000000000000..e335fefdd9be --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_STACK_VALIDATION @@ -0,0 +1 @@ +CONFIG_STACK_VALIDATION=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_FRAME_POINTER b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_FRAME_POINTER new file mode 100644 index 000000000000..0938fde11ffe --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_FRAME_POINTER @@ -0,0 +1 @@ +CONFIG_UNWINDER_FRAME_POINTER=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_ORC b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_ORC new file mode 100644 index 000000000000..6b6908419acb --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_ORC @@ -0,0 +1 @@ +CONFIG_UNWINDER_ORC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEST_LIVEPATCH b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEST_LIVEPATCH new file mode 100644 index 000000000000..0dd7700464a8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEST_LIVEPATCH @@ -0,0 +1 @@ +CONFIG_TEST_LIVEPATCH=m -- Gitee From 83b655c2cb4f9ef8e250f4b45fb34be5bfd2653f Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Mon, 18 Nov 2024 11:32:34 +0800 Subject: [PATCH 1703/2138] anolis: configs: ensure CONFIG_RETPOLINE is enabled ANBZ: #11914 CONFIG_RETPOLINE must be enabled to avoid speculative indirect branches triggers kernel crash. Signed-off-by: Qiao Ma Reviewed-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/4134 --- anolis/configs/examination/EXTRA/x86.config | 3 +++ 1 file changed, 3 insertions(+) diff --git a/anolis/configs/examination/EXTRA/x86.config b/anolis/configs/examination/EXTRA/x86.config index 66f96bae3a6c..9a309ef17a84 100644 --- a/anolis/configs/examination/EXTRA/x86.config +++ b/anolis/configs/examination/EXTRA/x86.config @@ -16,3 +16,6 @@ CONFIG_DEV_DAX_KMEM=m ## (ANBZ#10820) CONFIG_FCOE=m + +## (ANBZ#11914) +CONFIG_RETPOLINE=y -- Gitee From a59cc0362b0035c9d2abf41c1e3a76e06e2f4015 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Wed, 24 Jan 2024 16:40:14 +0800 Subject: [PATCH 1704/2138] mm/memory-failure: fix crash in split_huge_page_to_list from soft_offline_page ANBZ: #11573 commit 2fde9e7f9e6dc38e1d7091b9705c22be945c8697 upstream. When I did soft offline stress test, a machine was observed to crash with the following message: kernel BUG at include/linux/memcontrol.h:554! invalid opcode: 0000 [#1] PREEMPT SMP NOPTI CPU: 5 PID: 3837 Comm: hwpoison.sh Not tainted 6.7.0-next-20240112-00001-g8ecf3e7fb7c8-dirty #97 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014 RIP: 0010:folio_memcg+0xaf/0xd0 Code: 10 5b 5d c3 cc cc cc cc 48 c7 c6 08 b1 f2 b2 48 89 ef e8 b4 c5 f8 ff 90 0f 0b 48 c7 c6 d0 b0 f2 b2 48 89 ef e8 a2 c5 f8 ff 90 <0f> 0b 48 c7 c6 08 b1 f2 b2 48 89 ef e8 90 c5 f8 ff 90 0f 0b 66 66 RSP: 0018:ffffb6c043657c98 EFLAGS: 00000296 RAX: 000000000000004b RBX: ffff932bc1d1e401 RCX: ffff933abfb5c908 RDX: 0000000000000000 RSI: 0000000000000027 RDI: ffff933abfb5c900 RBP: ffffea6f04019080 R08: ffffffffb3338ce8 R09: 0000000000009ffb R10: 00000000000004dd R11: ffffffffb3308d00 R12: ffffea6f04019080 R13: ffffea6f04019080 R14: 0000000000000001 R15: ffffb6c043657da0 FS: 00007f6c60f6b740(0000) GS:ffff933abfb40000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000559c3bc8b980 CR3: 0000000107f1c000 CR4: 00000000000006f0 Call Trace: split_huge_page_to_list+0x4d/0x1380 try_to_split_thp_page+0x3a/0xf0 soft_offline_page+0x1ea/0x8a0 soft_offline_page_store+0x52/0x90 kernfs_fop_write_iter+0x118/0x1b0 vfs_write+0x30b/0x430 ksys_write+0x5e/0xe0 do_syscall_64+0xb0/0x1b0 entry_SYSCALL_64_after_hwframe+0x6d/0x75 RIP: 0033:0x7f6c60d14697 Code: 10 00 f7 d8 64 89 02 48 c7 c0 ff ff ff ff eb b7 0f 1f 00 f3 0f 1e fa 64 8b 04 25 18 00 00 00 85 c0 75 10 b8 01 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 51 c3 48 83 ec 28 48 89 54 24 18 48 89 74 24 RSP: 002b:00007ffe9b72b8d8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 RAX: ffffffffffffffda RBX: 000000000000000c RCX: 00007f6c60d14697 RDX: 000000000000000c RSI: 0000559c3bc8b980 RDI: 0000000000000001 RBP: 0000559c3bc8b980 R08: 00007f6c60dd1460 R09: 000000007fffffff R10: 0000000000000000 R11: 0000000000000246 R12: 000000000000000c R13: 00007f6c60e1a780 R14: 00007f6c60e16600 R15: 00007f6c60e15a00 The problem is that page->mapping is overloaded with slab->slab_list or slabs fields now, so slab pages could be taken as non-LRU movable pages if field slabs contains PAGE_MAPPING_MOVABLE or slab_list->prev is set to LIST_POISON2. These slab pages will be treated as thp later leading to crash in split_huge_page_to_list(). Link: https://lkml.kernel.org/r/20240126065837.2100184-1-linmiaohe@huawei.com Link: https://lkml.kernel.org/r/20240124084014.1772906-1-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Fixes: 130d4df57390 ("mm/sl[au]b: rearrange struct slab fields to allow larger rcu_head") Reviewed-by: Matthew Wilcox (Oracle) Cc: Miaohe Lin Cc: Naoya Horiguchi Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Shawn Wang Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4141 --- mm/memory-failure.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 84a0b0cf7c92..60b9b55821a3 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1384,6 +1384,9 @@ void ClearPageHWPoisonTakenOff(struct page *page) */ static inline bool HWPoisonHandlable(struct page *page, unsigned long flags) { + if (PageSlab(page)) + return false; + /* Soft offline could migrate non-LRU movable pages */ if ((flags & MF_SOFT_OFFLINE) && __PageMovable(page)) return true; -- Gitee From c08ecc6aa7ad2849e1159848ae5339dedfed5657 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Sun, 20 Oct 2024 16:42:50 +0800 Subject: [PATCH 1705/2138] anolis: KVM: SVM: Correct asid range for Hygon CSV2,CSV3 guests when validate it ANBZ: #11597 The commit ab7a6fe9c1b5 ("KVM: SVM: Add support for allowing zero SEV ASIDs") change the code in sev_asid_new(), this commit first check if the asid range to search is valid, this can work because CPUID_Fn8000_001F_EDX limit the asid that can be used for SEV-ES. For Hygon CPUs, the CPUID_Fn8000_001F_EDX only means the minimum value of the ASID that can be used for CSV, the asid in [1, CPUID_Fn8000_001F_ECX] can be used for CSV2 and CSV3. Fixes: ab7a6fe9c1b5 ("KVM: SVM: Add support for allowing zero SEV ASIDs") Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4056 --- arch/x86/kvm/svm/sev.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index f2710d7ed7ed..36ea5d0c0a65 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -163,6 +163,13 @@ static int sev_asid_new(struct kvm_sev_info *sev) bool retry = true; int ret; + /* + * No matter what the min_sev_asid is, all asids in range + * [1, max_sev_asid] can be used for CSV2 guest on Hygon CPUs. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + max_asid = max_sev_asid; + if (min_asid > max_asid) return -ENOTTY; @@ -205,12 +212,6 @@ static int sev_asid_new(struct kvm_sev_info *sev) } #endif - /* - * No matter what the min_sev_asid is, all asids in range - * [1, max_sev_asid] can be used for CSV2 guest on Hygon CPUs. - */ - if (is_x86_vendor_hygon()) - max_asid = max_sev_asid; again: asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid); if (asid > max_asid) { -- Gitee From 495c999d2c658db0c4b924ea941c75084bdd59b1 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Wed, 7 Aug 2024 16:06:34 +0800 Subject: [PATCH 1706/2138] anolis: KVM: SVM: CSV: Explicitly enable LBR Virtualization after succeed to RECEIVE_UPDATE_VMSA ANBZ: #11597 Before the commit 834aa2c34b8f ("KVM: SEV-ES: Delegate LBR virtualization to the processor"), the LBR Virtualization is enabled during init VMCB: init_vmcb() -> sev_init_vmcb() -> sev_es_init_vmcb() While the commit 834aa2c34b8f ("KVM: SEV-ES: Delegate LBR virtualization to the processor") enable LBR Virtualization after succeed to LAUNCH_UPDATE_VMSA for each vCPUs. The process to enable LBR Virtualization will not be executed in common code path. To ensure the CSV2 guest to work properly after migrated to target machine, we should explicitly to enable LBR Virtualization after succeed to RECEIVE_UPDATE_VMSA for each vCPUs. Fixes: 834aa2c34b8f ("KVM: SEV-ES: Delegate LBR virtualization to the processor") Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4056 --- arch/x86/kvm/svm/csv.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index f9d675aaa917..de6814c7cc7c 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -789,9 +789,19 @@ static int csv_receive_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) ret = hygon_kvm_hooks.sev_issue_cmd(kvm, SEV_CMD_RECEIVE_UPDATE_VMSA, vmsa, &argp->error); - if (!ret) + if (!ret) { vcpu->arch.guest_state_protected = true; + /* + * CSV2 guest mandates LBR Virtualization to be _always_ ON. + * Enable it only after setting guest_state_protected because + * KVM_SET_MSRS allows dynamic toggling of LBRV (for performance + * reason) on write access to MSR_IA32_DEBUGCTLMSR when + * guest_state_protected is not set. + */ + svm_enable_lbrv(vcpu); + } + kfree(vmsa); e_free_trans: kfree(trans); -- Gitee From fecf2919d25e2b72c339ed4705559f4a61d8010a Mon Sep 17 00:00:00 2001 From: hanliyang Date: Mon, 2 Sep 2024 16:20:05 +0800 Subject: [PATCH 1707/2138] anolis: KVM: SVM: CSV: Explicitly enable LBR Virtualization after succeed to LAUNCH_ENCRYPT_VMCB ANBZ: #11597 Before the commit 834aa2c34b8f ("KVM: SEV-ES: Delegate LBR virtualization to the processor"), the LBR Virtualization is enabled during init VMCB: init_vmcb() -> sev_init_vmcb() -> sev_es_init_vmcb() While the commit 834aa2c34b8f ("KVM: SEV-ES: Delegate LBR virtualization to the processor") enable LBR Virtualization after succeed to LAUNCH_UPDATE_VMSA for each vCPUs. The process to enable LBR Virtualization will not be executed in common code path. To ensure the CSV3 guest to work properly, we should explicitly to enable LBR Virtualization after succeed to LAUNCH_ENCRYPT_VMCB for each vCPUs. Fixes: 834aa2c34b8f ("KVM: SEV-ES: Delegate LBR virtualization to the processor") Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4056 --- arch/x86/kvm/svm/csv.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index de6814c7cc7c..f838355a58eb 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -1259,6 +1259,15 @@ static int csv3_launch_encrypt_vmcb(struct kvm *kvm, struct kvm_sev_cmd *argp) svm->current_vmcb->pa = encrypt_vmcb->secure_vmcb_addr; svm->vcpu.arch.guest_state_protected = true; + + /* + * CSV3 guest mandates LBR Virtualization to be _always_ ON. + * Enable it only after setting guest_state_protected because + * KVM_SET_MSRS allows dynamic toggling of LBRV (for performance + * reason) on write access to MSR_IA32_DEBUGCTLMSR when + * guest_state_protected is not set. + */ + svm_enable_lbrv(vcpu); } e_free: -- Gitee From 5673163c79757e6baf805e48bb369cae2ac41f5a Mon Sep 17 00:00:00 2001 From: hanliyang Date: Tue, 3 Sep 2024 15:10:20 +0800 Subject: [PATCH 1708/2138] anolis: KVM: SVM: CSV: Explicitly enable LBR Virtualization after succeed to RECEIVE_ENCRYPT_CONTEXT ANBZ: #11597 Before the commit 834aa2c34b8f ("KVM: SEV-ES: Delegate LBR virtualization to the processor"), the LBR Virtualization is enabled during init VMCB: init_vmcb() -> sev_init_vmcb() -> sev_es_init_vmcb() While the commit 834aa2c34b8f ("KVM: SEV-ES: Delegate LBR virtualization to the processor") enable LBR Virtualization after succeed to LAUNCH_UPDATE_VMSA for each vCPUs. The process to enable LBR Virtualization will not be executed in common code path. To ensure the CSV3 guest to work properly after migrate to target machine, we should explicitly to enable LBR Virtualization after succeed to RECEIVE_ENCRYPT_CONTEXT for each vCPUs. Fixes: 834aa2c34b8f ("KVM: SEV-ES: Delegate LBR virtualization to the processor") Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4056 --- arch/x86/kvm/svm/csv.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index f838355a58eb..f6e8a97678af 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -1794,6 +1794,15 @@ static int csv3_receive_encrypt_context(struct kvm *kvm, struct kvm_sev_cmd *arg svm->current_vmcb->pa = secure_vmcb_block->vmcb_paddr[i]; svm->vcpu.arch.guest_state_protected = true; + + /* + * CSV3 guest mandates LBR Virtualization to be _always_ ON. + * Enable it only after setting guest_state_protected because + * KVM_SET_MSRS allows dynamic toggling of LBRV (for performance + * reason) on write access to MSR_IA32_DEBUGCTLMSR when + * guest_state_protected is not set. + */ + svm_enable_lbrv(vcpu); } e_free_shadow_vmcb_block: -- Gitee From 2fa1ae227f98b2cc0e7f9555ac947f6f7b9c8f70 Mon Sep 17 00:00:00 2001 From: Kui-Feng Lee Date: Thu, 8 Feb 2024 18:37:48 -0800 Subject: [PATCH 1709/2138] bpf: Move __kfunc_param_match_suffix() to btf.c. ANBZ: #11946 commit 6115a0aeef01aef152ad7738393aad11422bfb82 upstream. Move __kfunc_param_match_suffix() to btf.c and rename it as btf_param_match_suffix(). It can be reused by bpf_struct_ops later. Signed-off-by: Kui-Feng Lee Link: https://lore.kernel.org/r/20240209023750.1153905-3-thinker.li@gmail.com Signed-off-by: Martin KaFai Lau Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4145 --- include/linux/btf.h | 4 ++++ kernel/bpf/btf.c | 18 ++++++++++++++++++ kernel/bpf/verifier.c | 34 ++++++++-------------------------- 3 files changed, 30 insertions(+), 26 deletions(-) diff --git a/include/linux/btf.h b/include/linux/btf.h index 928113a80a95..4c3b97b3136b 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -482,6 +482,10 @@ static inline void *btf_id_set8_contains(const struct btf_id_set8 *set, u32 id) return bsearch(&id, set->pairs, set->cnt, sizeof(set->pairs[0]), btf_id_cmp_func); } +bool btf_param_match_suffix(const struct btf *btf, + const struct btf_param *arg, + const char *suffix); + struct bpf_verifier_log; #ifdef CONFIG_BPF_SYSCALL diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 14361b3b9edd..e713320c7c10 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -8599,3 +8599,21 @@ bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log, return !strncmp(reg_name, arg_name, cmp_len); } + +bool btf_param_match_suffix(const struct btf *btf, + const struct btf_param *arg, + const char *suffix) +{ + int suffix_len = strlen(suffix), len; + const char *param_name; + + /* In the future, this can be ported to use BTF tagging */ + param_name = btf_name_by_offset(btf, arg->name_off); + if (str_is_empty(param_name)) + return false; + len = strlen(param_name); + if (len <= suffix_len) + return false; + param_name += len - suffix_len; + return !strncmp(param_name, suffix, suffix_len); +} diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index d6a4102312fa..7c07568d5e35 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -10456,24 +10456,6 @@ static bool is_kfunc_rcu(struct bpf_kfunc_call_arg_meta *meta) return meta->kfunc_flags & KF_RCU; } -static bool __kfunc_param_match_suffix(const struct btf *btf, - const struct btf_param *arg, - const char *suffix) -{ - int suffix_len = strlen(suffix), len; - const char *param_name; - - /* In the future, this can be ported to use BTF tagging */ - param_name = btf_name_by_offset(btf, arg->name_off); - if (str_is_empty(param_name)) - return false; - len = strlen(param_name); - if (len < suffix_len) - return false; - param_name += len - suffix_len; - return !strncmp(param_name, suffix, suffix_len); -} - static bool is_kfunc_arg_mem_size(const struct btf *btf, const struct btf_param *arg, const struct bpf_reg_state *reg) @@ -10484,7 +10466,7 @@ static bool is_kfunc_arg_mem_size(const struct btf *btf, if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE) return false; - return __kfunc_param_match_suffix(btf, arg, "__sz"); + return btf_param_match_suffix(btf, arg, "__sz"); } static bool is_kfunc_arg_const_mem_size(const struct btf *btf, @@ -10497,37 +10479,37 @@ static bool is_kfunc_arg_const_mem_size(const struct btf *btf, if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE) return false; - return __kfunc_param_match_suffix(btf, arg, "__szk"); + return btf_param_match_suffix(btf, arg, "__szk"); } static bool is_kfunc_arg_optional(const struct btf *btf, const struct btf_param *arg) { - return __kfunc_param_match_suffix(btf, arg, "__opt"); + return btf_param_match_suffix(btf, arg, "__opt"); } static bool is_kfunc_arg_constant(const struct btf *btf, const struct btf_param *arg) { - return __kfunc_param_match_suffix(btf, arg, "__k"); + return btf_param_match_suffix(btf, arg, "__k"); } static bool is_kfunc_arg_ignore(const struct btf *btf, const struct btf_param *arg) { - return __kfunc_param_match_suffix(btf, arg, "__ign"); + return btf_param_match_suffix(btf, arg, "__ign"); } static bool is_kfunc_arg_alloc_obj(const struct btf *btf, const struct btf_param *arg) { - return __kfunc_param_match_suffix(btf, arg, "__alloc"); + return btf_param_match_suffix(btf, arg, "__alloc"); } static bool is_kfunc_arg_uninit(const struct btf *btf, const struct btf_param *arg) { - return __kfunc_param_match_suffix(btf, arg, "__uninit"); + return btf_param_match_suffix(btf, arg, "__uninit"); } static bool is_kfunc_arg_refcounted_kptr(const struct btf *btf, const struct btf_param *arg) { - return __kfunc_param_match_suffix(btf, arg, "__refcounted_kptr"); + return btf_param_match_suffix(btf, arg, "__refcounted_kptr"); } static bool is_kfunc_arg_scalar_with_name(const struct btf *btf, -- Gitee From 8fd54e55808a8342033489cdfc66cf27678d4639 Mon Sep 17 00:00:00 2001 From: Philo Lu Date: Wed, 11 Sep 2024 11:37:15 +0800 Subject: [PATCH 1710/2138] bpf: Support __nullable argument suffix for tp_btf ANBZ: #11946 commit 8aeaed21befc90f27f4fca6dd190850d97d2e9e3 upstream. Pointers passed to tp_btf were trusted to be valid, but some tracepoints do take NULL pointer as input, such as trace_tcp_send_reset(). Then the invalid memory access cannot be detected by verifier. This patch fix it by add a suffix "__nullable" to the unreliable argument. The suffix is shown in btf, and PTR_MAYBE_NULL will be added to nullable arguments. Then users must check the pointer before use it. A problem here is that we use "btf_trace_##call" to search func_proto. As it is a typedef, argument names as well as the suffix are not recorded. To solve this, I use bpf_raw_event_map to find "__bpf_trace##template" from "btf_trace_##call", and then we can see the suffix. Suggested-by: Alexei Starovoitov Signed-off-by: Philo Lu Link: https://lore.kernel.org/r/20240911033719.91468-2-lulie@linux.alibaba.com Signed-off-by: Martin KaFai Lau Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4145 --- kernel/bpf/btf.c | 3 +++ kernel/bpf/verifier.c | 36 ++++++++++++++++++++++++++++++++---- 2 files changed, 35 insertions(+), 4 deletions(-) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index e713320c7c10..eb473839d721 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -6075,6 +6075,9 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type, if (prog_args_trusted(prog)) info->reg_type |= PTR_TRUSTED; + if (btf_param_match_suffix(btf, &args[arg], "__nullable")) + info->reg_type |= PTR_MAYBE_NULL; + if (tgt_prog) { enum bpf_prog_type tgt_type; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 7c07568d5e35..9a2cdddb5beb 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -27,6 +27,8 @@ #include #include #include +#include +#include #include "disasm.h" @@ -19807,11 +19809,13 @@ int bpf_check_attach_target(struct bpf_verifier_log *log, struct bpf_attach_target_info *tgt_info) { bool prog_extension = prog->type == BPF_PROG_TYPE_EXT; + char trace_symbol[KSYM_SYMBOL_LEN]; const char prefix[] = "btf_trace_"; + struct bpf_raw_event_map *btp; int ret = 0, subprog = -1, i; const struct btf_type *t; bool conservative = true; - const char *tname; + const char *tname, *fname; struct btf *btf; long addr = 0; struct module *mod = NULL; @@ -19926,10 +19930,34 @@ int bpf_check_attach_target(struct bpf_verifier_log *log, return -EINVAL; } tname += sizeof(prefix) - 1; - t = btf_type_by_id(btf, t->type); - if (!btf_type_is_ptr(t)) - /* should never happen in valid vmlinux build */ + + /* The func_proto of "btf_trace_##tname" is generated from typedef without argument + * names. Thus using bpf_raw_event_map to get argument names. + */ + btp = bpf_get_raw_tracepoint(tname); + if (!btp) return -EINVAL; + fname = kallsyms_lookup((unsigned long)btp->bpf_func, NULL, NULL, NULL, + trace_symbol); + bpf_put_raw_tracepoint(btp); + + if (fname) + ret = btf_find_by_name_kind(btf, fname, BTF_KIND_FUNC); + + if (!fname || ret < 0) { + bpf_log(log, "Cannot find btf of tracepoint template, fall back to %s%s.\n", + prefix, tname); + t = btf_type_by_id(btf, t->type); + if (!btf_type_is_ptr(t)) + /* should never happen in valid vmlinux build */ + return -EINVAL; + } else { + t = btf_type_by_id(btf, ret); + if (!btf_type_is_func(t)) + /* should never happen in valid vmlinux build */ + return -EINVAL; + } + t = btf_type_by_id(btf, t->type); if (!btf_type_is_func_proto(t)) /* should never happen in valid vmlinux build */ -- Gitee From e03f325e1e74e9c6c71c685a5f50e45fb4229063 Mon Sep 17 00:00:00 2001 From: Philo Lu Date: Wed, 11 Sep 2024 11:37:16 +0800 Subject: [PATCH 1711/2138] selftests/bpf: Add test for __nullable suffix in tp_btf ANBZ: #11946 commit 2060f07f861a237345922023e9347a204c0795af upstream. Add a tracepoint with __nullable suffix in bpf_testmod, and add cases for it: $ ./test_progs -t "tp_btf_nullable" #406/1 tp_btf_nullable/handle_tp_btf_nullable_bare1:OK #406/2 tp_btf_nullable/handle_tp_btf_nullable_bare2:OK #406 tp_btf_nullable:OK Summary: 1/2 PASSED, 0 SKIPPED, 0 FAILED Signed-off-by: Philo Lu Link: https://lore.kernel.org/r/20240911033719.91468-3-lulie@linux.alibaba.com Signed-off-by: Martin KaFai Lau Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4145 --- .../bpf/bpf_testmod/bpf_testmod-events.h | 6 +++++ .../selftests/bpf/bpf_testmod/bpf_testmod.c | 2 ++ .../bpf/prog_tests/tp_btf_nullable.c | 14 +++++++++++ .../bpf/progs/test_tp_btf_nullable.c | 24 +++++++++++++++++++ 4 files changed, 46 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/tp_btf_nullable.c create mode 100644 tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h index 11ee801e75e7..6c3b4d4f173a 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h @@ -34,6 +34,12 @@ DECLARE_TRACE(bpf_testmod_test_write_bare, TP_ARGS(task, ctx) ); +/* Used in bpf_testmod_test_read() to test __nullable suffix */ +DECLARE_TRACE(bpf_testmod_test_nullable_bare, + TP_PROTO(struct bpf_testmod_test_read_ctx *ctx__nullable), + TP_ARGS(ctx__nullable) +); + #undef BPF_TESTMOD_DECLARE_TRACE #ifdef DECLARE_TRACE_WRITABLE #define BPF_TESTMOD_DECLARE_TRACE(call, proto, args, size) \ diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index 2e8adf059fa3..3c5b5669bceb 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -282,6 +282,8 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj, if (bpf_testmod_loop_test(101) > 100) trace_bpf_testmod_test_read(current, &ctx); + trace_bpf_testmod_test_nullable_bare(NULL); + /* Magic number to enable writable tp */ if (len == 64) { struct bpf_testmod_test_writable_ctx writable = { diff --git a/tools/testing/selftests/bpf/prog_tests/tp_btf_nullable.c b/tools/testing/selftests/bpf/prog_tests/tp_btf_nullable.c new file mode 100644 index 000000000000..accc42e01f8a --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/tp_btf_nullable.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include "test_tp_btf_nullable.skel.h" + +void test_tp_btf_nullable(void) +{ + if (!env.has_testmod) { + test__skip(); + return; + } + + RUN_TESTS(test_tp_btf_nullable); +} diff --git a/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c b/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c new file mode 100644 index 000000000000..bba3e37f749b --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_tp_btf_nullable.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "vmlinux.h" +#include +#include +#include "../bpf_testmod/bpf_testmod.h" +#include "bpf_misc.h" + +SEC("tp_btf/bpf_testmod_test_nullable_bare") +__failure __msg("R1 invalid mem access 'trusted_ptr_or_null_'") +int BPF_PROG(handle_tp_btf_nullable_bare1, struct bpf_testmod_test_read_ctx *nullable_ctx) +{ + return nullable_ctx->len; +} + +SEC("tp_btf/bpf_testmod_test_nullable_bare") +int BPF_PROG(handle_tp_btf_nullable_bare2, struct bpf_testmod_test_read_ctx *nullable_ctx) +{ + if (nullable_ctx) + return nullable_ctx->len; + return 0; +} + +char _license[] SEC("license") = "GPL"; -- Gitee From da513423c84a8def36628c5004824d0fb9d94604 Mon Sep 17 00:00:00 2001 From: Philo Lu Date: Wed, 11 Sep 2024 11:37:17 +0800 Subject: [PATCH 1712/2138] tcp: Use skb__nullable in trace_tcp_send_reset ANBZ: #11946 commit edd3f6f7588c713477e1299c38c84dcd91a7f148 upstream. Replace skb with skb__nullable as the argument name. The suffix tells bpf verifier through btf that the arg could be NULL and should be checked in tp_btf prog. For now, this is the only nullable argument in tcp tracepoints. Signed-off-by: Philo Lu Acked-by: Jakub Kicinski Link: https://lore.kernel.org/r/20240911033719.91468-4-lulie@linux.alibaba.com Signed-off-by: Martin KaFai Lau [backport: trace_tcp_send_reset implementation different from upstream] Signed-off-by: Philo Lu Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4145 --- include/trace/events/tcp.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h index 7b1ddffa3dfc..1bd27cdb4b87 100644 --- a/include/trace/events/tcp.h +++ b/include/trace/events/tcp.h @@ -108,9 +108,9 @@ DEFINE_EVENT(tcp_event_sk_skb, tcp_retransmit_skb, */ DEFINE_EVENT(tcp_event_sk_skb, tcp_send_reset, - TP_PROTO(const struct sock *sk, const struct sk_buff *skb), + TP_PROTO(const struct sock *sk, const struct sk_buff *skb__nullable), - TP_ARGS(sk, skb) + TP_ARGS(sk, skb__nullable) ); /* -- Gitee From 208471302bea626ff899432f4c71027ad40f8468 Mon Sep 17 00:00:00 2001 From: Philo Lu Date: Wed, 11 Sep 2024 11:37:18 +0800 Subject: [PATCH 1713/2138] bpf: Allow bpf_dynptr_from_skb() for tp_btf ANBZ: #11946 commit ffc83860d8c09705d8e83474b8c6ec4d1d3dca41 upstream. Making tp_btf able to use bpf_dynptr_from_skb(), which is useful for skb parsing, especially for non-linear paged skb data. This is achieved by adding KF_TRUSTED_ARGS flag to bpf_dynptr_from_skb and registering it for TRACING progs. With KF_TRUSTED_ARGS, args from fentry/fexit are excluded, so that unsafe progs like fexit/__kfree_skb are not allowed. We also need the skb dynptr to be read-only in tp_btf. Because may_access_direct_pkt_data() returns false by default when checking bpf_dynptr_from_skb, there is no need to add BPF_PROG_TYPE_TRACING to it explicitly. Suggested-by: Martin KaFai Lau Signed-off-by: Philo Lu Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/r/20240911033719.91468-5-lulie@linux.alibaba.com Signed-off-by: Martin KaFai Lau Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4145 --- net/core/filter.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/core/filter.c b/net/core/filter.c index 34320ce70096..6eb5d32ac5d2 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -11910,7 +11910,7 @@ int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags, } BTF_SET8_START(bpf_kfunc_check_set_skb) -BTF_ID_FLAGS(func, bpf_dynptr_from_skb) +BTF_ID_FLAGS(func, bpf_dynptr_from_skb, KF_TRUSTED_ARGS) BTF_SET8_END(bpf_kfunc_check_set_skb) BTF_SET8_START(bpf_kfunc_check_set_xdp) @@ -11950,6 +11950,7 @@ static int __init bpf_kfunc_init(void) ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_LWT_XMIT, &bpf_kfunc_set_skb); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_LWT_SEG6LOCAL, &bpf_kfunc_set_skb); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_NETFILTER, &bpf_kfunc_set_skb); + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_kfunc_set_skb); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &bpf_kfunc_set_xdp); return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SOCK_ADDR, &bpf_kfunc_set_sock_addr); -- Gitee From e8a5a79df89626e84be16334fc620905869d7f97 Mon Sep 17 00:00:00 2001 From: Philo Lu Date: Wed, 11 Sep 2024 11:37:19 +0800 Subject: [PATCH 1714/2138] selftests/bpf: Expand skb dynptr selftests for tp_btf ANBZ: #11946 commit 83dff601715bdc086dc1fc470ee3aaff42215e65 upstream. Add 3 test cases for skb dynptr used in tp_btf: - test_dynptr_skb_tp_btf: use skb dynptr in tp_btf and make sure it is read-only. - skb_invalid_ctx_fentry/skb_invalid_ctx_fexit: bpf_dynptr_from_skb should fail in fentry/fexit. In test_dynptr_skb_tp_btf, to trigger the tracepoint in kfree_skb, test_pkt_access is used for its test_run, as in kfree_skb.c. Because the test process is different from others, a new setup type is defined, i.e., SETUP_SKB_PROG_TP. The result is like: $ ./test_progs -t 'dynptr/test_dynptr_skb_tp_btf' #84/14 dynptr/test_dynptr_skb_tp_btf:OK #84 dynptr:OK #127 kfunc_dynptr_param:OK Summary: 2/1 PASSED, 0 SKIPPED, 0 FAILED $ ./test_progs -t 'dynptr/skb_invalid_ctx_f' #84/85 dynptr/skb_invalid_ctx_fentry:OK #84/86 dynptr/skb_invalid_ctx_fexit:OK #84 dynptr:OK #127 kfunc_dynptr_param:OK Summary: 2/2 PASSED, 0 SKIPPED, 0 FAILED Also fix two coding style nits (change spaces to tabs). Signed-off-by: Philo Lu Link: https://lore.kernel.org/r/20240911033719.91468-6-lulie@linux.alibaba.com Signed-off-by: Martin KaFai Lau Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4145 --- .../testing/selftests/bpf/prog_tests/dynptr.c | 37 ++++++++++++++++++- .../testing/selftests/bpf/progs/dynptr_fail.c | 25 +++++++++++++ .../selftests/bpf/progs/dynptr_success.c | 23 ++++++++++++ 3 files changed, 83 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/dynptr.c b/tools/testing/selftests/bpf/prog_tests/dynptr.c index 7cfac53c0d58..b614a5272dfd 100644 --- a/tools/testing/selftests/bpf/prog_tests/dynptr.c +++ b/tools/testing/selftests/bpf/prog_tests/dynptr.c @@ -9,6 +9,7 @@ enum test_setup_type { SETUP_SYSCALL_SLEEP, SETUP_SKB_PROG, + SETUP_SKB_PROG_TP, }; static struct { @@ -28,6 +29,7 @@ static struct { {"test_dynptr_clone", SETUP_SKB_PROG}, {"test_dynptr_skb_no_buff", SETUP_SKB_PROG}, {"test_dynptr_skb_strcmp", SETUP_SKB_PROG}, + {"test_dynptr_skb_tp_btf", SETUP_SKB_PROG_TP}, }; static void verify_success(const char *prog_name, enum test_setup_type setup_type) @@ -35,7 +37,7 @@ static void verify_success(const char *prog_name, enum test_setup_type setup_typ struct dynptr_success *skel; struct bpf_program *prog; struct bpf_link *link; - int err; + int err; skel = dynptr_success__open(); if (!ASSERT_OK_PTR(skel, "dynptr_success__open")) @@ -47,7 +49,7 @@ static void verify_success(const char *prog_name, enum test_setup_type setup_typ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) goto cleanup; - bpf_program__set_autoload(prog, true); + bpf_program__set_autoload(prog, true); err = dynptr_success__load(skel); if (!ASSERT_OK(err, "dynptr_success__load")) @@ -87,6 +89,37 @@ static void verify_success(const char *prog_name, enum test_setup_type setup_typ break; } + case SETUP_SKB_PROG_TP: + { + struct __sk_buff skb = {}; + struct bpf_object *obj; + int aux_prog_fd; + + /* Just use its test_run to trigger kfree_skb tracepoint */ + err = bpf_prog_test_load("./test_pkt_access.bpf.o", BPF_PROG_TYPE_SCHED_CLS, + &obj, &aux_prog_fd); + if (!ASSERT_OK(err, "prog_load sched cls")) + goto cleanup; + + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .ctx_in = &skb, + .ctx_size_in = sizeof(skb), + ); + + link = bpf_program__attach(prog); + if (!ASSERT_OK_PTR(link, "bpf_program__attach")) + goto cleanup; + + err = bpf_prog_test_run_opts(aux_prog_fd, &topts); + bpf_link__destroy(link); + + if (!ASSERT_OK(err, "test_run")) + goto cleanup; + + break; + } } ASSERT_EQ(skel->bss->err, 0, "err"); diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c index 66a60bfb5867..9791cd8d48a4 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_fail.c +++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include "bpf_misc.h" #include "bpf_kfuncs.h" @@ -1254,6 +1255,30 @@ int skb_invalid_ctx(void *ctx) return 0; } +SEC("fentry/skb_tx_error") +__failure __msg("must be referenced or trusted") +int BPF_PROG(skb_invalid_ctx_fentry, void *skb) +{ + struct bpf_dynptr ptr; + + /* this should fail */ + bpf_dynptr_from_skb(skb, 0, &ptr); + + return 0; +} + +SEC("fexit/skb_tx_error") +__failure __msg("must be referenced or trusted") +int BPF_PROG(skb_invalid_ctx_fexit, void *skb) +{ + struct bpf_dynptr ptr; + + /* this should fail */ + bpf_dynptr_from_skb(skb, 0, &ptr); + + return 0; +} + /* Reject writes to dynptr slot for uninit arg */ SEC("?raw_tp") __failure __msg("potential write to dynptr at off=-16") diff --git a/tools/testing/selftests/bpf/progs/dynptr_success.c b/tools/testing/selftests/bpf/progs/dynptr_success.c index 5985920d162e..bfcc85686cf0 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_success.c +++ b/tools/testing/selftests/bpf/progs/dynptr_success.c @@ -5,6 +5,7 @@ #include #include #include +#include #include "bpf_misc.h" #include "bpf_kfuncs.h" #include "errno.h" @@ -544,3 +545,25 @@ int test_dynptr_skb_strcmp(struct __sk_buff *skb) return 1; } + +SEC("tp_btf/kfree_skb") +int BPF_PROG(test_dynptr_skb_tp_btf, void *skb, void *location) +{ + __u8 write_data[2] = {1, 2}; + struct bpf_dynptr ptr; + int ret; + + if (bpf_dynptr_from_skb(skb, 0, &ptr)) { + err = 1; + return 1; + } + + /* since tp_btf skbs are read only, writes should fail */ + ret = bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0); + if (ret != -EINVAL) { + err = 2; + return 1; + } + + return 1; +} -- Gitee From 84a96b857e409ea4f41eed2320f536c07b28435f Mon Sep 17 00:00:00 2001 From: Dave Marchevsky Date: Tue, 31 Oct 2023 14:56:24 -0700 Subject: [PATCH 1715/2138] bpf: Add __bpf_kfunc_{start,end}_defs macros ANBZ: #11946 commit 391145ba2accc48b596f3d438af1a6255b62a555 upstream. BPF kfuncs are meant to be called from BPF programs. Accordingly, most kfuncs are not called from anywhere in the kernel, which the -Wmissing-prototypes warning is unhappy about. We've peppered __diag_ignore_all("-Wmissing-prototypes", ... everywhere kfuncs are defined in the codebase to suppress this warning. This patch adds two macros meant to bound one or many kfunc definitions. All existing kfunc definitions which use these __diag calls to suppress -Wmissing-prototypes are migrated to use the newly-introduced macros. A new __diag_ignore_all - for "-Wmissing-declarations" - is added to the __bpf_kfunc_start_defs macro based on feedback from Andrii on an earlier version of this patch [0] and another recent mailing list thread [1]. In the future we might need to ignore different warnings or do other kfunc-specific things. This change will make it easier to make such modifications for all kfunc defs. [0]: https://lore.kernel.org/bpf/CAEf4BzaE5dRWtK6RPLnjTW-MW9sx9K3Fn6uwqCTChK2Dcb1Xig@mail.gmail.com/ [1]: https://lore.kernel.org/bpf/ZT+2qCc%2FaXep0%2FLf@krava/ Signed-off-by: Dave Marchevsky Suggested-by: Andrii Nakryiko Acked-by: Andrii Nakryiko Cc: Jiri Olsa Acked-by: Jiri Olsa Acked-by: David Vernet Acked-by: Yafang Shao Link: https://lore.kernel.org/r/20231031215625.2343848-1-davemarchevsky@fb.com Signed-off-by: Alexei Starovoitov Signed-off-by: Philo Lu Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4151 --- Documentation/bpf/kfuncs.rst | 6 ++---- include/linux/btf.h | 9 +++++++++ kernel/bpf/bpf_iter.c | 6 ++---- kernel/bpf/cpumask.c | 6 ++---- kernel/bpf/helpers.c | 6 ++---- kernel/bpf/map_iter.c | 6 ++---- kernel/trace/bpf_trace.c | 6 ++---- net/bpf/test_run.c | 7 +++---- net/core/filter.c | 13 ++++--------- net/core/xdp.c | 6 ++---- net/ipv4/fou_bpf.c | 6 ++---- net/netfilter/nf_conntrack_bpf.c | 6 ++---- net/netfilter/nf_nat_bpf.c | 6 ++---- net/xfrm/xfrm_interface_bpf.c | 6 ++---- 14 files changed, 38 insertions(+), 57 deletions(-) diff --git a/Documentation/bpf/kfuncs.rst b/Documentation/bpf/kfuncs.rst index 0d2647fb358d..723408e399ab 100644 --- a/Documentation/bpf/kfuncs.rst +++ b/Documentation/bpf/kfuncs.rst @@ -37,16 +37,14 @@ prototype in a header for the wrapper kfunc. An example is given below:: /* Disables missing prototype warnings */ - __diag_push(); - __diag_ignore_all("-Wmissing-prototypes", - "Global kfuncs as their definitions will be in BTF"); + __bpf_kfunc_start_defs(); __bpf_kfunc struct task_struct *bpf_find_get_task_by_vpid(pid_t nr) { return find_get_task_by_vpid(nr); } - __diag_pop(); + __bpf_kfunc_end_defs(); A wrapper kfunc is often needed when we need to annotate parameters of the kfunc. Otherwise one may directly make the kfunc visible to the BPF program by diff --git a/include/linux/btf.h b/include/linux/btf.h index 4c3b97b3136b..3ba7b1b1b657 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -83,6 +83,15 @@ */ #define __bpf_kfunc __used noinline +#define __bpf_kfunc_start_defs() \ + __diag_push(); \ + __diag_ignore_all("-Wmissing-declarations", \ + "Global kfuncs as their definitions will be in BTF");\ + __diag_ignore_all("-Wmissing-prototypes", \ + "Global kfuncs as their definitions will be in BTF") + +#define __bpf_kfunc_end_defs() __diag_pop() + /* * Return the name of the passed struct, if exists, or halt the build if for * example the structure gets renamed. In this way, developers have to revisit diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c index 96856f130cbf..3ea907fa71e1 100644 --- a/kernel/bpf/bpf_iter.c +++ b/kernel/bpf/bpf_iter.c @@ -782,9 +782,7 @@ struct bpf_iter_num_kern { int end; /* final value, exclusive */ } __aligned(8); -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global functions as their definitions will be in vmlinux BTF"); +__bpf_kfunc_start_defs(); __bpf_kfunc int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end) { @@ -845,4 +843,4 @@ __bpf_kfunc void bpf_iter_num_destroy(struct bpf_iter_num *it) s->cur = s->end = 0; } -__diag_pop(); +__bpf_kfunc_end_defs(); diff --git a/kernel/bpf/cpumask.c b/kernel/bpf/cpumask.c index 09cb2a71e850..6acecc8ebd61 100644 --- a/kernel/bpf/cpumask.c +++ b/kernel/bpf/cpumask.c @@ -34,9 +34,7 @@ static bool cpu_valid(u32 cpu) return cpu < nr_cpu_ids; } -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global kfuncs as their definitions will be in BTF"); +__bpf_kfunc_start_defs(); /** * bpf_cpumask_create() - Create a mutable BPF cpumask. @@ -413,7 +411,7 @@ __bpf_kfunc u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1, return cpumask_any_and_distribute(src1, src2); } -__diag_pop(); +__bpf_kfunc_end_defs(); BTF_SET8_START(cpumask_kfunc_btf_ids) BTF_ID_FLAGS(func, bpf_cpumask_create, KF_ACQUIRE | KF_RET_NULL) diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 4e77fa1bf41e..e4b00ac1abce 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1973,9 +1973,7 @@ void bpf_rb_root_free(const struct btf_field *field, void *rb_root, } } -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global functions as their definitions will be in vmlinux BTF"); +__bpf_kfunc_start_defs(); __bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign) { @@ -2541,7 +2539,7 @@ __bpf_kfunc void bpf_rcu_read_unlock(void) rcu_read_unlock(); } -__diag_pop(); +__bpf_kfunc_end_defs(); BTF_SET8_START(generic_btf_ids) #ifdef CONFIG_KEXEC_CORE diff --git a/kernel/bpf/map_iter.c b/kernel/bpf/map_iter.c index 6fc9dae9edc8..6abd7c5df4b3 100644 --- a/kernel/bpf/map_iter.c +++ b/kernel/bpf/map_iter.c @@ -193,9 +193,7 @@ static int __init bpf_map_iter_init(void) late_initcall(bpf_map_iter_init); -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global functions as their definitions will be in vmlinux BTF"); +__bpf_kfunc_start_defs(); __bpf_kfunc s64 bpf_map_sum_elem_count(const struct bpf_map *map) { @@ -213,7 +211,7 @@ __bpf_kfunc s64 bpf_map_sum_elem_count(const struct bpf_map *map) return ret; } -__diag_pop(); +__bpf_kfunc_end_defs(); BTF_SET8_START(bpf_map_iter_kfunc_ids) BTF_ID_FLAGS(func, bpf_map_sum_elem_count, KF_TRUSTED_ARGS) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index aab43ba3daeb..087bc60963db 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1254,9 +1254,7 @@ static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = { }; #ifdef CONFIG_KEYS -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "kfuncs which will be used in BPF programs"); +__bpf_kfunc_start_defs(); /** * bpf_lookup_user_key - lookup a key by its serial @@ -1406,7 +1404,7 @@ __bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr, } #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */ -__diag_pop(); +__bpf_kfunc_end_defs(); BTF_SET8_START(key_sig_kfunc_set) BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE) diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 835efb0246c9..ca397f454eec 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -504,9 +504,8 @@ static int bpf_test_finish(const union bpf_attr *kattr, * architecture dependent calling conventions. 7+ can be supported in the * future. */ -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global functions as their definitions will be in vmlinux BTF"); +__bpf_kfunc_start_defs(); + __bpf_kfunc int bpf_fentry_test1(int a) { return a + 1; @@ -617,7 +616,7 @@ __bpf_kfunc void bpf_kfunc_call_memb_release_dtor(void *p) } CFI_NOSEAL(bpf_kfunc_call_memb_release_dtor); -__diag_pop(); +__bpf_kfunc_end_defs(); BTF_SET8_START(bpf_test_modify_return_ids) BTF_ID_FLAGS(func, bpf_modify_return_test) diff --git a/net/core/filter.c b/net/core/filter.c index 6eb5d32ac5d2..efa1e93c3b2c 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -11844,9 +11844,7 @@ bpf_sk_base_func_proto(enum bpf_func_id func_id) return func; } -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global functions as their definitions will be in vmlinux BTF"); +__bpf_kfunc_start_defs(); __bpf_kfunc int bpf_dynptr_from_skb(struct sk_buff *skb, u64 flags, struct bpf_dynptr_kern *ptr__uninit) { @@ -11893,7 +11891,7 @@ __bpf_kfunc int bpf_sock_addr_set_sun_path(struct bpf_sock_addr_kern *sa_kern, return 0; } -__diag_pop(); +__bpf_kfunc_end_defs(); int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags, struct bpf_dynptr_kern *ptr__uninit) @@ -11957,10 +11955,7 @@ static int __init bpf_kfunc_init(void) } late_initcall(bpf_kfunc_init); -/* Disables missing prototype warnings */ -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global functions as their definitions will be in vmlinux BTF"); +__bpf_kfunc_start_defs(); /* bpf_sock_destroy: Destroy the given socket with ECONNABORTED error code. * @@ -11994,7 +11989,7 @@ __bpf_kfunc int bpf_sock_destroy(struct sock_common *sock) return sk->sk_prot->diag_destroy(sk, ECONNABORTED); } -__diag_pop() +__bpf_kfunc_end_defs(); BTF_SET8_START(bpf_sk_iter_kfunc_ids) BTF_ID_FLAGS(func, bpf_sock_destroy, KF_TRUSTED_ARGS) diff --git a/net/core/xdp.c b/net/core/xdp.c index 5ee3f8f165e5..1642222e350b 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -692,9 +692,7 @@ struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf) return nxdpf; } -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global functions as their definitions will be in vmlinux BTF"); +__bpf_kfunc_start_defs(); /** * bpf_xdp_metadata_rx_timestamp - Read XDP frame RX timestamp. @@ -734,7 +732,7 @@ __bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash, return -EOPNOTSUPP; } -__diag_pop(); +__bpf_kfunc_end_defs(); BTF_SET8_START(xdp_metadata_kfunc_ids) #define XDP_METADATA_KFUNC(_, name) BTF_ID_FLAGS(func, name, KF_TRUSTED_ARGS) diff --git a/net/ipv4/fou_bpf.c b/net/ipv4/fou_bpf.c index 3760a14b6b57..4da03bf45c9b 100644 --- a/net/ipv4/fou_bpf.c +++ b/net/ipv4/fou_bpf.c @@ -22,9 +22,7 @@ enum bpf_fou_encap_type { FOU_BPF_ENCAP_GUE, }; -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global functions as their definitions will be in BTF"); +__bpf_kfunc_start_defs(); /* bpf_skb_set_fou_encap - Set FOU encap parameters * @@ -100,7 +98,7 @@ __bpf_kfunc int bpf_skb_get_fou_encap(struct __sk_buff *skb_ctx, return 0; } -__diag_pop() +__bpf_kfunc_end_defs(); BTF_SET8_START(fou_kfunc_set) BTF_ID_FLAGS(func, bpf_skb_set_fou_encap) diff --git a/net/netfilter/nf_conntrack_bpf.c b/net/netfilter/nf_conntrack_bpf.c index b21799d468d2..475358ec8212 100644 --- a/net/netfilter/nf_conntrack_bpf.c +++ b/net/netfilter/nf_conntrack_bpf.c @@ -230,9 +230,7 @@ static int _nf_conntrack_btf_struct_access(struct bpf_verifier_log *log, return 0; } -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global functions as their definitions will be in nf_conntrack BTF"); +__bpf_kfunc_start_defs(); /* bpf_xdp_ct_alloc - Allocate a new CT entry * @@ -467,7 +465,7 @@ __bpf_kfunc int bpf_ct_change_status(struct nf_conn *nfct, u32 status) return nf_ct_change_status_common(nfct, status); } -__diag_pop() +__bpf_kfunc_end_defs(); BTF_SET8_START(nf_ct_kfunc_set) BTF_ID_FLAGS(func, bpf_xdp_ct_alloc, KF_ACQUIRE | KF_RET_NULL) diff --git a/net/netfilter/nf_nat_bpf.c b/net/netfilter/nf_nat_bpf.c index 141ee7783223..6e3b2f58855f 100644 --- a/net/netfilter/nf_nat_bpf.c +++ b/net/netfilter/nf_nat_bpf.c @@ -12,9 +12,7 @@ #include #include -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global functions as their definitions will be in nf_nat BTF"); +__bpf_kfunc_start_defs(); /* bpf_ct_set_nat_info - Set source or destination nat address * @@ -54,7 +52,7 @@ __bpf_kfunc int bpf_ct_set_nat_info(struct nf_conn___init *nfct, return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0; } -__diag_pop() +__bpf_kfunc_end_defs(); BTF_SET8_START(nf_nat_kfunc_set) BTF_ID_FLAGS(func, bpf_ct_set_nat_info, KF_TRUSTED_ARGS) diff --git a/net/xfrm/xfrm_interface_bpf.c b/net/xfrm/xfrm_interface_bpf.c index d74f3fd20f2b..7d5e920141e9 100644 --- a/net/xfrm/xfrm_interface_bpf.c +++ b/net/xfrm/xfrm_interface_bpf.c @@ -27,9 +27,7 @@ struct bpf_xfrm_info { int link; }; -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global functions as their definitions will be in xfrm_interface BTF"); +__bpf_kfunc_start_defs(); /* bpf_skb_get_xfrm_info - Get XFRM metadata * @@ -93,7 +91,7 @@ __bpf_kfunc int bpf_skb_set_xfrm_info(struct __sk_buff *skb_ctx, const struct bp return 0; } -__diag_pop() +__bpf_kfunc_end_defs(); BTF_SET8_START(xfrm_ifc_kfunc_set) BTF_ID_FLAGS(func, bpf_skb_get_xfrm_info) -- Gitee From 9bcc402b5c499c46874f6c52b37f8c268d6419c9 Mon Sep 17 00:00:00 2001 From: Dave Marchevsky Date: Tue, 31 Oct 2023 14:56:25 -0700 Subject: [PATCH 1716/2138] bpf: Add __bpf_hook_{start,end} macros ANBZ: #11946 commit 15fb6f2b6c4c3c129adc2412ae12ec15e60a6adb upstream. Not all uses of __diag_ignore_all(...) in BPF-related code in order to suppress warnings are wrapping kfunc definitions. Some "hook point" definitions - small functions meant to be used as attach points for fentry and similar BPF progs - need to suppress -Wmissing-declarations. We could use __bpf_kfunc_{start,end}_defs added in the previous patch in such cases, but this might be confusing to someone unfamiliar with BPF internals. Instead, this patch adds __bpf_hook_{start,end} macros, currently having the same effect as __bpf_kfunc_{start,end}_defs, then uses them to suppress warnings for two hook points in the kernel itself and some bpf_testmod hook points as well. Signed-off-by: Dave Marchevsky Cc: Yafang Shao Acked-by: Jiri Olsa Acked-by: Yafang Shao Link: https://lore.kernel.org/r/20231031215625.2343848-2-davemarchevsky@fb.com Signed-off-by: Alexei Starovoitov Signed-off-by: Philo Lu Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4151 --- include/linux/btf.h | 2 ++ kernel/cgroup/rstat.c | 9 +++------ net/socket.c | 8 ++------ tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c | 6 ++---- 4 files changed, 9 insertions(+), 16 deletions(-) diff --git a/include/linux/btf.h b/include/linux/btf.h index 3ba7b1b1b657..f6180d83c266 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -91,6 +91,8 @@ "Global kfuncs as their definitions will be in BTF") #define __bpf_kfunc_end_defs() __diag_pop() +#define __bpf_hook_start() __bpf_kfunc_start_defs() +#define __bpf_hook_end() __bpf_kfunc_end_defs() /* * Return the name of the passed struct, if exists, or halt the build if for diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c index 2ac57d3760cf..3f5c19916951 100644 --- a/kernel/cgroup/rstat.c +++ b/kernel/cgroup/rstat.c @@ -156,19 +156,16 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos, * optimize away the callsite. Therefore, __weak is needed to ensure that the * call is still emitted, by telling the compiler that we don't know what the * function might eventually be. - * - * __diag_* below are needed to dismiss the missing prototype warning. */ -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "kfuncs which will be used in BPF programs"); + +__bpf_hook_start(); __weak noinline void bpf_rstat_flush(struct cgroup *cgrp, struct cgroup *parent, int cpu) { } -__diag_pop(); +__bpf_hook_end(); /* see cgroup_rstat_flush() */ static void cgroup_rstat_flush_locked(struct cgroup *cgrp) diff --git a/net/socket.c b/net/socket.c index bad58f23f307..0c3e8a815347 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1692,20 +1692,16 @@ struct file *__sys_socket_file(int family, int type, int protocol) * Therefore, __weak is needed to ensure that the call is still * emitted, by telling the compiler that we don't know what the * function might eventually be. - * - * __diag_* below are needed to dismiss the missing prototype warning. */ -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "A fmod_ret entry point for BPF programs"); +__bpf_hook_start(); __weak noinline int update_socket_protocol(int family, int type, int protocol) { return protocol; } -__diag_pop(); +__bpf_hook_end(); int __sys_socket(int family, int type, int protocol) { diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index 3c5b5669bceb..139c36fa3635 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -40,9 +40,7 @@ struct bpf_testmod_struct_arg_4 { int b; }; -__diag_push(); -__diag_ignore_all("-Wmissing-prototypes", - "Global functions as their definitions will be in bpf_testmod.ko BTF"); +__bpf_hook_start(); noinline int bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) { @@ -334,7 +332,7 @@ noinline int bpf_fentry_shadow_test(int a) } EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test); -__diag_pop(); +__bpf_hook_end(); static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = { .attr = { .name = "bpf_testmod", .mode = 0666, }, -- Gitee From b51411cf7fe15d4f46d244e83dd3be84efc29242 Mon Sep 17 00:00:00 2001 From: Philo Lu Date: Thu, 25 Jan 2024 18:56:26 +0800 Subject: [PATCH 1717/2138] anolis: tcp: add tracepoints for tcp data send/recv/ack used by tcprt ANBZ: #11946 3 tcp tracepoints are added: (1) tcp_data_send: when data sent (2) tcp_pkt_recv: when data received (3) tcp_data_acked: when data acked These tracepoints are used by tcprt in bpf. Signed-off-by: Philo Lu Acked-by: Tianchen Ding Reviewed-by: Xuan Zhuo Link: https://gitee.com/anolis/cloud-kernel/pulls/4147 --- include/trace/events/tcp.h | 21 +++++++++++++++++++++ net/ipv4/tcp_input.c | 4 ++++ net/ipv4/tcp_output.c | 2 ++ 3 files changed, 27 insertions(+) diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h index 1bd27cdb4b87..5c0da8061151 100644 --- a/include/trace/events/tcp.h +++ b/include/trace/events/tcp.h @@ -113,6 +113,13 @@ DEFINE_EVENT(tcp_event_sk_skb, tcp_send_reset, TP_ARGS(sk, skb__nullable) ); +DEFINE_EVENT(tcp_event_sk_skb, tcp_pkt_recv, + + TP_PROTO(const struct sock *sk, const struct sk_buff *skb), + + TP_ARGS(sk, skb) +); + /* * tcp event with arguments sk * @@ -187,6 +194,20 @@ DEFINE_EVENT(tcp_event_sk, tcp_rcv_space_adjust, TP_ARGS(sk) ); +DEFINE_EVENT(tcp_event_sk, tcp_data_send, + + TP_PROTO(struct sock *sk), + + TP_ARGS(sk) +); + +DEFINE_EVENT(tcp_event_sk, tcp_data_acked, + + TP_PROTO(struct sock *sk), + + TP_ARGS(sk) +); + TRACE_EVENT(tcp_retransmit_synack, TP_PROTO(const struct sock *sk, const struct request_sock *req), diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index f6a213bae5cc..70b67ea8017f 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -809,6 +809,8 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) now = tcp_jiffies32; + trace_tcp_pkt_recv(sk, skb); + if (!icsk->icsk_ack.ato) { /* The _first_ data packet received, initialize * delayed ACK engine. @@ -3498,6 +3500,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb, flag |= FLAG_SET_XMIT_TIMER; /* set TLP or RTO timer */ } + trace_tcp_data_acked(sk); + if (icsk->icsk_ca_ops->pkts_acked) { struct ack_sample sample = { .pkts_acked = pkts_acked, .rtt_us = sack->rate->rtt_us }; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index cfddc94508f0..fc6902b14e23 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2793,6 +2793,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, /* Send one loss probe per tail loss episode. */ if (push_one != 2) tcp_schedule_loss_probe(sk, false); + + trace_tcp_data_send(sk); return false; } return !tp->packets_out && !tcp_write_queue_empty(sk); -- Gitee From 015b483340aef2da1a70f8ca5292273bff2aa9f0 Mon Sep 17 00:00:00 2001 From: Philo Lu Date: Thu, 25 Jan 2024 08:57:46 +0800 Subject: [PATCH 1718/2138] anolis: bpf: Add relayfs support for bpf ANBZ: #11946 Implement private bpf-relay for anolis. Create a configuration file named relay_ebpf in debugfs, which can be used through echo to relay_ebpf. Following cmds are supported: (1) create/remove "create bufnum bufsize percpu " "remvoe " (2) show "cat /sys/kernel/debug/relay_ebpf" => id dir_name file_name bufnum bufsize percpu The field "id" is a unique identifier for each relay channel, which is needed by bpf. For bpf side, a kfunc bpf_anolis_relay_write() is provided to write into the targeted channel. The number of concurrent relay channels is bounded by RCHAN_NUM_MAX. Note that two ids, 0 and 1, are preserved for tcprt. And RELAY_INDEX_BEGIN is defined as 4 so that general relay channel ids are at least 4. Signed-off-by: Philo Lu Acked-by: Tianchen Ding Reviewed-by: Xuan Zhuo Link: https://gitee.com/anolis/cloud-kernel/pulls/4147 --- kernel/bpf/Makefile | 3 + kernel/bpf/bpf_relay.c | 489 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 492 insertions(+) create mode 100644 kernel/bpf/bpf_relay.c diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index f526b7573e97..a2d0e84e725b 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -11,6 +11,9 @@ obj-$(CONFIG_BPF_SYSCALL) += bpf_iter.o map_iter.o task_iter.o prog_iter.o link_ obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o bloom_filter.o obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o ringbuf.o obj-$(CONFIG_BPF_SYSCALL) += bpf_local_storage.o bpf_task_storage.o +ifeq ($(CONFIG_RELAY),y) +obj-$(CONFIG_BPF_SYSCALL) += bpf_relay.o +endif obj-${CONFIG_BPF_LSM} += bpf_inode_storage.o obj-$(CONFIG_BPF_SYSCALL) += disasm.o mprog.o obj-$(CONFIG_BPF_JIT) += trampoline.o diff --git a/kernel/bpf/bpf_relay.c b/kernel/bpf/bpf_relay.c new file mode 100644 index 000000000000..f041e6e479d2 --- /dev/null +++ b/kernel/bpf/bpf_relay.c @@ -0,0 +1,489 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Alibaba Cloud + * + * relay interface only used by bpf. To use it, please + * echo following cmds to /sys/kernel/debug/relay_ebpf: + * - Create: + * create bufnum bufsize percpu + * - Remove: + * remove + * + * Also `cat` can be used to show the current relay files, one entry each line + * - Show: cat /sys/kernel/debug/relay_ebpf + * => id dir_name file_name bufnum bufsize percpu + * + * The field "id" is a unique identifier for each relay channel, which is + * needed by bpf helper to write into the channel. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* preserved index used for tcprt */ +enum reserve_relay_idex { + RELAY_INDEX_TCPRT_LOG = 0, + RELAY_INDEX_TCPRT_STAT, + RELAY_INDEX_BEGIN = 4, +}; + +#define DIR_NAME_TCPRT "tcp-rt" +#define FILE_NAME_TCPRT_LOG "rt-network-log" +#define FILE_NAME_TCPRT_STAT "rt-network-stats" + +/* dynamic array to maintain relay channels, with number limit RCHAN_NUM_MAX */ +static struct rchan **rchan_array; +static size_t array_capacity; +#define RCHAN_NUM_MAX 32 + +/* use to protect relay_ebpf, which makes sure that relay_ebpf process one + * command at a time. + */ +static DEFINE_MUTEX(relay_file_lock); + +/* handle the extension of relay array */ +static int relay_array_extend(size_t new_size) +{ + struct rchan **new_array, **old; + size_t new_capacity; + + /* Calculate new capacity with a simple growth strategy */ + new_capacity = (new_size > array_capacity * 2) ? new_size : (array_capacity * 2); + + /* Compare with RCHAN_NUM_MAX, the max capacity */ + if (new_capacity > RCHAN_NUM_MAX) + new_capacity = RCHAN_NUM_MAX; + + /* Do nothing if new capacity is not larger than old */ + if (new_capacity <= array_capacity) + return -EINVAL; + + /* Allocate and init new array with new capacity */ + new_array = kcalloc(new_capacity, sizeof(*rchan_array), GFP_KERNEL); + if (!new_array) + return -ENOMEM; + + if (rchan_array) + memcpy(new_array, rchan_array, + array_capacity * sizeof(*rchan_array)); + + /* update rchan_array with rcu */ + old = rcu_dereference_protected(rchan_array, + lockdep_is_held(&relay_file_lock)); + rcu_assign_pointer(rchan_array, new_array); + synchronize_rcu(); + + array_capacity = new_capacity; + kfree(old); + + pr_info("bpf-relay: rchan_array extend to size %zu\n", new_capacity); + return 0; +} + +/* return the idx of target relay channel if exists, return -1 if not */ +static int relay_array_lookup(const char *dirname, const char *filename) +{ + const char *fname, *dname; + int i; + + for (i = 0; i < array_capacity; ++i) { + if (!rchan_array[i]) + continue; + + fname = rchan_array[i]->base_filename; + dname = rchan_array[i]->parent->d_name.name; + + if (strcmp(dname, dirname) == 0 && + strcmp(fname, filename) == 0) + return i; + } + + return -1; +} + +/* return first index if found, else return -1 */ +static int relay_array_lookup_dir(struct dentry *dir) +{ + int i; + + for (i = 0; i < array_capacity; ++i) { + if (!rchan_array[i]) + continue; + + if (rchan_array[i]->parent == dir) + return i; + } + + return -1; +} + +/* must ensure the index is valid */ +static void relay_array_delete(int index) +{ + struct dentry *dir = rchan_array[index]->parent; + struct rchan *rch = rchan_array[index]; + + /* remove targe relay channel */ + rcu_assign_pointer(rchan_array[index], NULL); + synchronize_rcu(); + + relay_close(rch); + + /* check if the parent dir is still in use */ + if (relay_array_lookup_dir(dir) == -1) { + debugfs_remove_recursive(dir); + pr_info("bpf-relay: directory deleted\n"); + } +} + +/* get the next usable id, return -1 if there is no id left */ +static int relay_array_usable_id(const char *dir_name, const char *file_name) +{ + int i; + + /* firstly check preserved special ids */ + if (strcmp(dir_name, DIR_NAME_TCPRT) == 0 && + strcmp(file_name, FILE_NAME_TCPRT_LOG) == 0) { + pr_info("bpf-relay: prepare to create tcprt log\n"); + i = RELAY_INDEX_TCPRT_LOG; + goto check; + } + if (strcmp(dir_name, DIR_NAME_TCPRT) == 0 && + strcmp(file_name, FILE_NAME_TCPRT_STAT) == 0) { + pr_info("bpf-relay: prepare to create stats\n"); + i = RELAY_INDEX_TCPRT_STAT; + goto check; + } + + /* not special relay, find the minimal usable id */ + for (i = RELAY_INDEX_BEGIN; i < array_capacity; ++i) { + if (!rchan_array[i]) + return i; + } + +check: + /* if extend needed but fails, return -1 */ + if (i >= array_capacity) { + if (relay_array_extend(i + 1)) + return -1; + } + + return i; +} + +/* relay callbacks used by all relay files */ +static struct dentry *create_buf_file_handler(const char *filename, + struct dentry *parent, + umode_t mode, + struct rchan_buf *buf, + int *is_global) +{ + char final_fname[NAME_MAX]; + + strscpy(final_fname, filename, sizeof(final_fname)); + if (buf->chan->private_data) { + *is_global = 1; + + /* if it is global, remove the last cpu_id 0 */ + final_fname[strlen(filename) - 1] = '\0'; + } + + return debugfs_create_file(final_fname, mode, parent, buf, + &relay_file_operations); +} + +static int remove_buf_file_handler(struct dentry *dentry) +{ + debugfs_remove(dentry); + return 0; +} + +static int subbuf_start(struct rchan_buf *buf, + void *subbuf, + void *prev_subbuf, + size_t prev_padding) +{ + return 1; +} + +static struct rchan_callbacks relay_callbacks = { + .create_buf_file = create_buf_file_handler, + .remove_buf_file = remove_buf_file_handler, + .subbuf_start = subbuf_start, +}; + +/* each time print a line */ +static ssize_t relay_ebpf_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + /* use relay_array index as ppos, each read() process an element and + * *ppos increases by 1, until it reaches array_capacity. + */ + struct rchan *rch; + char buf[128]; + int size, ret; + loff_t index; + + if (count < sizeof(buf)) + return -EACCES; + + mutex_lock(&relay_file_lock); + index = *ppos; + if (index < 0) { + ret = -EINVAL; + goto out; + } + + /* find the first non-null rchan from ppos */ + while (index < array_capacity && !rchan_array[index]) + index++; + + if (index >= array_capacity) { + ret = 0; + goto out; + } + + /* find a valid entry, rch->is_global==0 means percpu is on */ + rch = rchan_array[index]; + size = snprintf(buf, sizeof(buf), "%lld %s %s %lu %lu %d\n", index, + rch->parent->d_name.name, rch->base_filename, + rch->n_subbufs, rch->subbuf_size, !rch->is_global); + + ret = copy_to_user(user_buf, buf, size); + if (ret) { + ret = -EFAULT; + goto out; + } + + ret = size; + /* current element is processed, increase index */ + *ppos = index + 1; +out: + mutex_unlock(&relay_file_lock); + return ret; +} + +static int bpf_relay_create(const char *dir_name, const char *file_name, + unsigned long bufnum, unsigned long bufsize, + void *is_global, int index) +{ + struct dentry *dir; + struct rchan *rch; + int dir_create = 0; + + if (index >= array_capacity || index < 0) { + pr_info("bpf-relay: create fail, index %d out of range\n", + index); + return -EINVAL; + } + + /* check if this relay channel already exists */ + if (relay_array_lookup(dir_name, file_name) != -1) { + pr_info("bpf-relay: create fail, channel already exists\n"); + return -EEXIST; + } + + /* find if the dir already exists, if not, create it */ + dir = debugfs_lookup(dir_name, NULL); + if (!dir) { + dir = debugfs_create_dir(dir_name, NULL); + if (IS_ERR(dir)) + return PTR_ERR(dir); + dir_create = 1; + + } else if (!S_ISDIR(dir->d_inode->i_mode)) { + pr_info("bpf-relay: create fail, %s is not a directory\n", + dir_name); + return -EINVAL; + } + + rch = relay_open(file_name, dir, bufsize, bufnum, + &relay_callbacks, is_global); + if (!rch) { + if (dir_create) + debugfs_remove_recursive(dir); + pr_info("bpf-relay: create fail, relay_open fail\n"); + return -ENOMEM; + } + + rcu_assign_pointer(rchan_array[index], rch); + pr_info("bpf-relay: create finished, id=%d\n", index); + return 0; +} + +static int handle_create(const char *buf) +{ + char dir_name[NAME_MAX], file_name[NAME_MAX], bsize_str[20], percpu[4]; + unsigned long bufnum, bufsize; + static unsigned char global_flag; + unsigned char *is_global; + int ret; + + ret = sscanf(buf, " create %s %s bufnum %lu bufsize %s percpu %4s", + dir_name, file_name, &bufnum, bsize_str, percpu); + if (ret != 5) { + pr_info("bpf-relay: create fail, get args failed\n"); + return -EINVAL; + } + + /* parse arguments */ + bufsize = (unsigned long)memparse(bsize_str, NULL); + + /* by passing a valid pointer as private_data for relay channel, + * we mark the channel as global, see create_buf_file_handler() + */ + is_global = NULL; + if (strcmp(percpu, "off") == 0) + is_global = &global_flag; + + ret = relay_array_usable_id(dir_name, file_name); + if (ret < 0) { + pr_info("bpf-relay: create fail, no id left\n"); + return -ENOMEM; + } + + /* create common relay chan according to args */ + return bpf_relay_create(dir_name, file_name, bufnum, bufsize, + is_global, ret); +} + +static int handle_remove(const char *buf) +{ + char dir_name[NAME_MAX], file_name[NAME_MAX]; + int ret; + + ret = sscanf(buf, " remove %s %s", dir_name, file_name); + if (ret != 2) { + pr_info("bpf-relay: remove fail, get args failed\n"); + return -EINVAL; + } + + ret = relay_array_lookup(dir_name, file_name); + if (ret >= 0) { + relay_array_delete(ret); + pr_info("bpf-relay: remove finished, id=%d\n", ret); + } else { + pr_info("bpf-relay: remove finished, not exists\n"); + } + + return 0; +} + +static ssize_t relay_ebpf_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + char cmd[10], buf[128]; + int ret; + + if (!count || count >= sizeof(buf)) + return -EINVAL; + + if (copy_from_user(&buf, user_buf, count)) + return -EFAULT; + + /* parse cmd */ + buf[count] = '\0'; + ret = sscanf(buf, " %s", cmd); + if (ret != 1) { + pr_info("bpf-relay: write fail, get cmd failed\n"); + return -EINVAL; + } + + mutex_lock(&relay_file_lock); + if (strcmp(cmd, "create") == 0) { + ret = handle_create(buf); + } else if (strcmp(cmd, "remove") == 0) { + ret = handle_remove(buf); + } else { + pr_info("bpf-relay: write fail, invalid cmd\n"); + ret = -EINVAL; + } + + /* create or remove succ */ + if (!ret) + ret = count; + + mutex_unlock(&relay_file_lock); + return ret; +} + +static const struct file_operations relay_ebpf_fops = { + .write = relay_ebpf_write, + .read = relay_ebpf_read, +}; + +__bpf_kfunc_start_defs(); + +/* Write data of size size__sz to relay channel of index. + * WARNING: This kfunc can be deprecated at ANY time in the future. + */ +__bpf_kfunc int bpf_anolis_relay_write(void *data, size_t size__sz, int index) +{ + struct rchan *rch, **rch_arr; + int ret = 0; + + /* capacity does not need to be protected because it is always updated + * after rchan_array and will not decrease. It is safe to use a newer + * rchan_array is with older (as well as smaller) capacity. + */ + if (index >= array_capacity) + return -EINVAL; + + rcu_read_lock(); + + /* rch_arr will not be NULL, because if it is NULL, array_capacity must + * be 0 and then the above index checking would not pass. + */ + rch_arr = rcu_dereference(rchan_array); + rch = rcu_dereference(rch_arr[index]); + if (!rch) { + ret = -ENOENT; + goto out; + } + + relay_write(rch, data, size__sz); +out: + rcu_read_unlock(); + return ret; +} + +__bpf_kfunc_end_defs(); + +BTF_SET8_START(bpf_relay_kfunc_ids) +BTF_ID_FLAGS(func, bpf_anolis_relay_write, KF_TRUSTED_ARGS) +BTF_SET8_END(bpf_relay_kfunc_ids) + +static const struct btf_kfunc_id_set bpf_relay_kfunc_set = { + .owner = THIS_MODULE, + .set = &bpf_relay_kfunc_ids, +}; + +/* create relay-ebpf file, rchan_array is created with "create" cmd */ +static int __init bpf_relay_init(void) +{ + int ret; + + ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, + &bpf_relay_kfunc_set); + if (ret) { + pr_err("bpf-relay: register kfunc fail\n"); + return ret; + } + + if (!debugfs_create_file("relay_ebpf", 0644, NULL, NULL, + &relay_ebpf_fops)) { + pr_err("bpf-relay: debugfs create relay_ebpf fail\n"); + return -ENOMEM; + } + + return 0; +} +late_initcall(bpf_relay_init); -- Gitee From 66503814b4a0cd34450176749c1794c70079eece Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Mon, 11 Mar 2024 22:23:46 +0800 Subject: [PATCH 1719/2138] objtool/LoongArch: Enable objtool to be built ANBZ: #11459 commit e8aff71ca93026209dd0eab9b285e6808cd87d05 upstream. Add the minimal changes to enable objtool build on LoongArch, most of the functions are stubs to only fix the build errors when make -C tools/objtool. This is similar with commit e52ec98c5ab1 ("objtool/powerpc: Enable objtool to be built on ppc"). Co-developed-by: Jinyang He Signed-off-by: Jinyang He Co-developed-by: Youling Tang Signed-off-by: Youling Tang Signed-off-by: Tiezhu Yang Signed-off-by: Huacai Chen Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4150 --- tools/objtool/arch/loongarch/Build | 2 + tools/objtool/arch/loongarch/decode.c | 71 +++++++++++++++++++ .../arch/loongarch/include/arch/cfi_regs.h | 22 ++++++ .../objtool/arch/loongarch/include/arch/elf.h | 30 ++++++++ .../arch/loongarch/include/arch/special.h | 33 +++++++++ tools/objtool/arch/loongarch/special.c | 15 ++++ 6 files changed, 173 insertions(+) create mode 100644 tools/objtool/arch/loongarch/Build create mode 100644 tools/objtool/arch/loongarch/decode.c create mode 100644 tools/objtool/arch/loongarch/include/arch/cfi_regs.h create mode 100644 tools/objtool/arch/loongarch/include/arch/elf.h create mode 100644 tools/objtool/arch/loongarch/include/arch/special.h create mode 100644 tools/objtool/arch/loongarch/special.c diff --git a/tools/objtool/arch/loongarch/Build b/tools/objtool/arch/loongarch/Build new file mode 100644 index 000000000000..d24d5636a5b8 --- /dev/null +++ b/tools/objtool/arch/loongarch/Build @@ -0,0 +1,2 @@ +objtool-y += decode.o +objtool-y += special.o diff --git a/tools/objtool/arch/loongarch/decode.c b/tools/objtool/arch/loongarch/decode.c new file mode 100644 index 000000000000..cc74ba4e0f54 --- /dev/null +++ b/tools/objtool/arch/loongarch/decode.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#include +#include + +int arch_ftrace_match(char *name) +{ + return !strcmp(name, "_mcount"); +} + +unsigned long arch_jump_destination(struct instruction *insn) +{ + return insn->offset + (insn->immediate << 2); +} + +unsigned long arch_dest_reloc_offset(int addend) +{ + return addend; +} + +bool arch_pc_relative_reloc(struct reloc *reloc) +{ + return false; +} + +bool arch_callee_saved_reg(unsigned char reg) +{ + switch (reg) { + case CFI_RA: + case CFI_FP: + case CFI_S0 ... CFI_S8: + return true; + default: + return false; + } +} + +int arch_decode_hint_reg(u8 sp_reg, int *base) +{ + return 0; +} + +int arch_decode_instruction(struct objtool_file *file, const struct section *sec, + unsigned long offset, unsigned int maxlen, + struct instruction *insn) +{ + return 0; +} + +const char *arch_nop_insn(int len) +{ + return NULL; +} + +const char *arch_ret_insn(int len) +{ + return NULL; +} + +void arch_initial_func_cfi_state(struct cfi_init_state *state) +{ + int i; + + for (i = 0; i < CFI_NUM_REGS; i++) { + state->regs[i].base = CFI_UNDEFINED; + state->regs[i].offset = 0; + } + + /* initial CFA (call frame address) */ + state->cfa.base = CFI_SP; + state->cfa.offset = 0; +} diff --git a/tools/objtool/arch/loongarch/include/arch/cfi_regs.h b/tools/objtool/arch/loongarch/include/arch/cfi_regs.h new file mode 100644 index 000000000000..d183cc8f43bf --- /dev/null +++ b/tools/objtool/arch/loongarch/include/arch/cfi_regs.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _OBJTOOL_ARCH_CFI_REGS_H +#define _OBJTOOL_ARCH_CFI_REGS_H + +#define CFI_RA 1 +#define CFI_SP 3 +#define CFI_A0 4 +#define CFI_FP 22 +#define CFI_S0 23 +#define CFI_S1 24 +#define CFI_S2 25 +#define CFI_S3 26 +#define CFI_S4 27 +#define CFI_S5 28 +#define CFI_S6 29 +#define CFI_S7 30 +#define CFI_S8 31 +#define CFI_NUM_REGS 32 + +#define CFI_BP CFI_FP + +#endif /* _OBJTOOL_ARCH_CFI_REGS_H */ diff --git a/tools/objtool/arch/loongarch/include/arch/elf.h b/tools/objtool/arch/loongarch/include/arch/elf.h new file mode 100644 index 000000000000..9623d663220e --- /dev/null +++ b/tools/objtool/arch/loongarch/include/arch/elf.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _OBJTOOL_ARCH_ELF_H +#define _OBJTOOL_ARCH_ELF_H + +/* + * See the following link for more info about ELF Relocation types: + * https://loongson.github.io/LoongArch-Documentation/LoongArch-ELF-ABI-EN.html#_relocations + */ +#ifndef R_LARCH_NONE +#define R_LARCH_NONE 0 +#endif +#ifndef R_LARCH_32 +#define R_LARCH_32 1 +#endif +#ifndef R_LARCH_64 +#define R_LARCH_64 2 +#endif +#ifndef R_LARCH_32_PCREL +#define R_LARCH_32_PCREL 99 +#endif + +#define R_NONE R_LARCH_NONE +#define R_ABS32 R_LARCH_32 +#define R_ABS64 R_LARCH_64 +#define R_DATA32 R_LARCH_32_PCREL +#define R_DATA64 R_LARCH_32_PCREL +#define R_TEXT32 R_LARCH_32_PCREL +#define R_TEXT64 R_LARCH_32_PCREL + +#endif /* _OBJTOOL_ARCH_ELF_H */ diff --git a/tools/objtool/arch/loongarch/include/arch/special.h b/tools/objtool/arch/loongarch/include/arch/special.h new file mode 100644 index 000000000000..35fc979b550a --- /dev/null +++ b/tools/objtool/arch/loongarch/include/arch/special.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _OBJTOOL_ARCH_SPECIAL_H +#define _OBJTOOL_ARCH_SPECIAL_H + +/* + * See more info about struct exception_table_entry + * in arch/loongarch/include/asm/extable.h + */ +#define EX_ENTRY_SIZE 12 +#define EX_ORIG_OFFSET 0 +#define EX_NEW_OFFSET 4 + +/* + * See more info about struct jump_entry + * in include/linux/jump_label.h + */ +#define JUMP_ENTRY_SIZE 16 +#define JUMP_ORIG_OFFSET 0 +#define JUMP_NEW_OFFSET 4 +#define JUMP_KEY_OFFSET 8 + +/* + * See more info about struct alt_instr + * in arch/loongarch/include/asm/alternative.h + */ +#define ALT_ENTRY_SIZE 12 +#define ALT_ORIG_OFFSET 0 +#define ALT_NEW_OFFSET 4 +#define ALT_FEATURE_OFFSET 8 +#define ALT_ORIG_LEN_OFFSET 10 +#define ALT_NEW_LEN_OFFSET 11 + +#endif /* _OBJTOOL_ARCH_SPECIAL_H */ diff --git a/tools/objtool/arch/loongarch/special.c b/tools/objtool/arch/loongarch/special.c new file mode 100644 index 000000000000..9bba1e9318e0 --- /dev/null +++ b/tools/objtool/arch/loongarch/special.c @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#include + +bool arch_support_alt_relocation(struct special_alt *special_alt, + struct instruction *insn, + struct reloc *reloc) +{ + return false; +} + +struct reloc *arch_find_switch_table(struct objtool_file *file, + struct instruction *insn) +{ + return NULL; +} -- Gitee From 2123c36e84ac0e6185a109af80dac6620847f633 Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Mon, 11 Mar 2024 22:23:47 +0800 Subject: [PATCH 1720/2138] objtool/LoongArch: Implement instruction decoder ANBZ: #11459 commit b2d23158e6c881326321c2351b92568be4e57030 upstream. Only copy the minimal definitions of instruction opcodes and formats in inst.h from arch/loongarch to tools/arch/loongarch, and also copy the definition of sign_extend64() to tools/include/linux/bitops.h to decode the following kinds of instructions: (1) stack pointer related instructions addi.d, ld.d, st.d, ldptr.d and stptr.d (2) branch and jump related instructions beq, bne, blt, bge, bltu, bgeu, beqz, bnez, bceqz, bcnez, b, bl and jirl (3) other instructions break, nop and ertn See more info about instructions in LoongArch Reference Manual: https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html Co-developed-by: Jinyang He Signed-off-by: Jinyang He Co-developed-by: Youling Tang Signed-off-by: Youling Tang Signed-off-by: Tiezhu Yang Signed-off-by: Huacai Chen Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4150 --- tools/arch/loongarch/include/asm/inst.h | 161 ++++++++++++++ tools/include/linux/bitops.h | 11 + tools/objtool/arch/loongarch/decode.c | 273 +++++++++++++++++++++++- 3 files changed, 443 insertions(+), 2 deletions(-) create mode 100644 tools/arch/loongarch/include/asm/inst.h diff --git a/tools/arch/loongarch/include/asm/inst.h b/tools/arch/loongarch/include/asm/inst.h new file mode 100644 index 000000000000..c25b5853181d --- /dev/null +++ b/tools/arch/loongarch/include/asm/inst.h @@ -0,0 +1,161 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_INST_H +#define _ASM_INST_H + +#include + +#define LOONGARCH_INSN_NOP 0x03400000 + +enum reg0i15_op { + break_op = 0x54, +}; + +enum reg0i26_op { + b_op = 0x14, + bl_op = 0x15, +}; + +enum reg1i21_op { + beqz_op = 0x10, + bnez_op = 0x11, + bceqz_op = 0x12, /* bits[9:8] = 0x00 */ + bcnez_op = 0x12, /* bits[9:8] = 0x01 */ +}; + +enum reg2_op { + ertn_op = 0x1920e, +}; + +enum reg2i12_op { + addid_op = 0x0b, + andi_op = 0x0d, + ldd_op = 0xa3, + std_op = 0xa7, +}; + +enum reg2i14_op { + ldptrd_op = 0x26, + stptrd_op = 0x27, +}; + +enum reg2i16_op { + jirl_op = 0x13, + beq_op = 0x16, + bne_op = 0x17, + blt_op = 0x18, + bge_op = 0x19, + bltu_op = 0x1a, + bgeu_op = 0x1b, +}; + +struct reg0i15_format { + unsigned int immediate : 15; + unsigned int opcode : 17; +}; + +struct reg0i26_format { + unsigned int immediate_h : 10; + unsigned int immediate_l : 16; + unsigned int opcode : 6; +}; + +struct reg1i21_format { + unsigned int immediate_h : 5; + unsigned int rj : 5; + unsigned int immediate_l : 16; + unsigned int opcode : 6; +}; + +struct reg2_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int opcode : 22; +}; + +struct reg2i12_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int immediate : 12; + unsigned int opcode : 10; +}; + +struct reg2i14_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int immediate : 14; + unsigned int opcode : 8; +}; + +struct reg2i16_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int immediate : 16; + unsigned int opcode : 6; +}; + +union loongarch_instruction { + unsigned int word; + struct reg0i15_format reg0i15_format; + struct reg0i26_format reg0i26_format; + struct reg1i21_format reg1i21_format; + struct reg2_format reg2_format; + struct reg2i12_format reg2i12_format; + struct reg2i14_format reg2i14_format; + struct reg2i16_format reg2i16_format; +}; + +#define LOONGARCH_INSN_SIZE sizeof(union loongarch_instruction) + +enum loongarch_gpr { + LOONGARCH_GPR_ZERO = 0, + LOONGARCH_GPR_RA = 1, + LOONGARCH_GPR_TP = 2, + LOONGARCH_GPR_SP = 3, + LOONGARCH_GPR_A0 = 4, /* Reused as V0 for return value */ + LOONGARCH_GPR_A1, /* Reused as V1 for return value */ + LOONGARCH_GPR_A2, + LOONGARCH_GPR_A3, + LOONGARCH_GPR_A4, + LOONGARCH_GPR_A5, + LOONGARCH_GPR_A6, + LOONGARCH_GPR_A7, + LOONGARCH_GPR_T0 = 12, + LOONGARCH_GPR_T1, + LOONGARCH_GPR_T2, + LOONGARCH_GPR_T3, + LOONGARCH_GPR_T4, + LOONGARCH_GPR_T5, + LOONGARCH_GPR_T6, + LOONGARCH_GPR_T7, + LOONGARCH_GPR_T8, + LOONGARCH_GPR_FP = 22, + LOONGARCH_GPR_S0 = 23, + LOONGARCH_GPR_S1, + LOONGARCH_GPR_S2, + LOONGARCH_GPR_S3, + LOONGARCH_GPR_S4, + LOONGARCH_GPR_S5, + LOONGARCH_GPR_S6, + LOONGARCH_GPR_S7, + LOONGARCH_GPR_S8, + LOONGARCH_GPR_MAX +}; + +#define DEF_EMIT_REG2I16_FORMAT(NAME, OP) \ +static inline void emit_##NAME(union loongarch_instruction *insn, \ + enum loongarch_gpr rj, \ + enum loongarch_gpr rd, \ + int offset) \ +{ \ + insn->reg2i16_format.opcode = OP; \ + insn->reg2i16_format.immediate = offset; \ + insn->reg2i16_format.rj = rj; \ + insn->reg2i16_format.rd = rd; \ +} + +DEF_EMIT_REG2I16_FORMAT(jirl, jirl_op) + +#endif /* _ASM_INST_H */ diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h index f18683b95ea6..7319f6ced108 100644 --- a/tools/include/linux/bitops.h +++ b/tools/include/linux/bitops.h @@ -87,4 +87,15 @@ static inline __u32 rol32(__u32 word, unsigned int shift) return (word << shift) | (word >> ((-shift) & 31)); } +/** + * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit + * @value: value to sign extend + * @index: 0 based bit index (0<=index<64) to sign bit + */ +static __always_inline __s64 sign_extend64(__u64 value, int index) +{ + __u8 shift = 63 - index; + return (__s64)(value << shift) >> shift; +} + #endif diff --git a/tools/objtool/arch/loongarch/decode.c b/tools/objtool/arch/loongarch/decode.c index cc74ba4e0f54..ff0b53144d12 100644 --- a/tools/objtool/arch/loongarch/decode.c +++ b/tools/objtool/arch/loongarch/decode.c @@ -1,6 +1,12 @@ // SPDX-License-Identifier: GPL-2.0-or-later #include #include +#include +#include + +#ifndef EM_LOONGARCH +#define EM_LOONGARCH 258 +#endif int arch_ftrace_match(char *name) { @@ -39,21 +45,284 @@ int arch_decode_hint_reg(u8 sp_reg, int *base) return 0; } +static bool is_loongarch(const struct elf *elf) +{ + if (elf->ehdr.e_machine == EM_LOONGARCH) + return true; + + WARN("unexpected ELF machine type %d", elf->ehdr.e_machine); + return false; +} + +#define ADD_OP(op) \ + if (!(op = calloc(1, sizeof(*op)))) \ + return -1; \ + else for (*ops_list = op, ops_list = &op->next; op; op = NULL) + +static bool decode_insn_reg0i26_fomat(union loongarch_instruction inst, + struct instruction *insn) +{ + switch (inst.reg0i26_format.opcode) { + case b_op: + insn->type = INSN_JUMP_UNCONDITIONAL; + insn->immediate = sign_extend64(inst.reg0i26_format.immediate_h << 16 | + inst.reg0i26_format.immediate_l, 25); + break; + case bl_op: + insn->type = INSN_CALL; + insn->immediate = sign_extend64(inst.reg0i26_format.immediate_h << 16 | + inst.reg0i26_format.immediate_l, 25); + break; + default: + return false; + } + + return true; +} + +static bool decode_insn_reg1i21_fomat(union loongarch_instruction inst, + struct instruction *insn) +{ + switch (inst.reg1i21_format.opcode) { + case beqz_op: + case bnez_op: + case bceqz_op: + insn->type = INSN_JUMP_CONDITIONAL; + insn->immediate = sign_extend64(inst.reg1i21_format.immediate_h << 16 | + inst.reg1i21_format.immediate_l, 20); + break; + default: + return false; + } + + return true; +} + +static bool decode_insn_reg2i12_fomat(union loongarch_instruction inst, + struct instruction *insn, + struct stack_op **ops_list, + struct stack_op *op) +{ + switch (inst.reg2i12_format.opcode) { + case addid_op: + if ((inst.reg2i12_format.rd == CFI_SP) || (inst.reg2i12_format.rj == CFI_SP)) { + /* addi.d sp,sp,si12 or addi.d fp,sp,si12 */ + insn->immediate = sign_extend64(inst.reg2i12_format.immediate, 11); + ADD_OP(op) { + op->src.type = OP_SRC_ADD; + op->src.reg = inst.reg2i12_format.rj; + op->src.offset = insn->immediate; + op->dest.type = OP_DEST_REG; + op->dest.reg = inst.reg2i12_format.rd; + } + } + break; + case ldd_op: + if (inst.reg2i12_format.rj == CFI_SP) { + /* ld.d rd,sp,si12 */ + insn->immediate = sign_extend64(inst.reg2i12_format.immediate, 11); + ADD_OP(op) { + op->src.type = OP_SRC_REG_INDIRECT; + op->src.reg = CFI_SP; + op->src.offset = insn->immediate; + op->dest.type = OP_DEST_REG; + op->dest.reg = inst.reg2i12_format.rd; + } + } + break; + case std_op: + if (inst.reg2i12_format.rj == CFI_SP) { + /* st.d rd,sp,si12 */ + insn->immediate = sign_extend64(inst.reg2i12_format.immediate, 11); + ADD_OP(op) { + op->src.type = OP_SRC_REG; + op->src.reg = inst.reg2i12_format.rd; + op->dest.type = OP_DEST_REG_INDIRECT; + op->dest.reg = CFI_SP; + op->dest.offset = insn->immediate; + } + } + break; + case andi_op: + if (inst.reg2i12_format.rd == 0 && + inst.reg2i12_format.rj == 0 && + inst.reg2i12_format.immediate == 0) + /* andi r0,r0,0 */ + insn->type = INSN_NOP; + break; + default: + return false; + } + + return true; +} + +static bool decode_insn_reg2i14_fomat(union loongarch_instruction inst, + struct instruction *insn, + struct stack_op **ops_list, + struct stack_op *op) +{ + switch (inst.reg2i14_format.opcode) { + case ldptrd_op: + if (inst.reg2i14_format.rj == CFI_SP) { + /* ldptr.d rd,sp,si14 */ + insn->immediate = sign_extend64(inst.reg2i14_format.immediate, 13); + ADD_OP(op) { + op->src.type = OP_SRC_REG_INDIRECT; + op->src.reg = CFI_SP; + op->src.offset = insn->immediate; + op->dest.type = OP_DEST_REG; + op->dest.reg = inst.reg2i14_format.rd; + } + } + break; + case stptrd_op: + if (inst.reg2i14_format.rj == CFI_SP) { + /* stptr.d ra,sp,0 */ + if (inst.reg2i14_format.rd == LOONGARCH_GPR_RA && + inst.reg2i14_format.immediate == 0) + break; + + /* stptr.d rd,sp,si14 */ + insn->immediate = sign_extend64(inst.reg2i14_format.immediate, 13); + ADD_OP(op) { + op->src.type = OP_SRC_REG; + op->src.reg = inst.reg2i14_format.rd; + op->dest.type = OP_DEST_REG_INDIRECT; + op->dest.reg = CFI_SP; + op->dest.offset = insn->immediate; + } + } + break; + default: + return false; + } + + return true; +} + +static bool decode_insn_reg2i16_fomat(union loongarch_instruction inst, + struct instruction *insn) +{ + switch (inst.reg2i16_format.opcode) { + case jirl_op: + if (inst.reg2i16_format.rd == 0 && + inst.reg2i16_format.rj == CFI_RA && + inst.reg2i16_format.immediate == 0) { + /* jirl r0,ra,0 */ + insn->type = INSN_RETURN; + } else if (inst.reg2i16_format.rd == CFI_RA) { + /* jirl ra,rj,offs16 */ + insn->type = INSN_CALL_DYNAMIC; + } else if (inst.reg2i16_format.rd == CFI_A0 && + inst.reg2i16_format.immediate == 0) { + /* + * jirl a0,t0,0 + * this is a special case in loongarch_suspend_enter, + * just treat it as a call instruction. + */ + insn->type = INSN_CALL_DYNAMIC; + } else if (inst.reg2i16_format.rd == 0 && + inst.reg2i16_format.immediate == 0) { + /* jirl r0,rj,0 */ + insn->type = INSN_JUMP_DYNAMIC; + } else if (inst.reg2i16_format.rd == 0 && + inst.reg2i16_format.immediate != 0) { + /* + * jirl r0,t0,12 + * this is a rare case in JUMP_VIRT_ADDR, + * just ignore it due to it is harmless for tracing. + */ + break; + } else { + /* jirl rd,rj,offs16 */ + insn->type = INSN_JUMP_UNCONDITIONAL; + insn->immediate = sign_extend64(inst.reg2i16_format.immediate, 15); + } + break; + case beq_op: + case bne_op: + case blt_op: + case bge_op: + case bltu_op: + case bgeu_op: + insn->type = INSN_JUMP_CONDITIONAL; + insn->immediate = sign_extend64(inst.reg2i16_format.immediate, 15); + break; + default: + return false; + } + + return true; +} + int arch_decode_instruction(struct objtool_file *file, const struct section *sec, unsigned long offset, unsigned int maxlen, struct instruction *insn) { + struct stack_op **ops_list = &insn->stack_ops; + const struct elf *elf = file->elf; + struct stack_op *op = NULL; + union loongarch_instruction inst; + + if (!is_loongarch(elf)) + return -1; + + if (maxlen < LOONGARCH_INSN_SIZE) + return 0; + + insn->len = LOONGARCH_INSN_SIZE; + insn->type = INSN_OTHER; + insn->immediate = 0; + + inst = *(union loongarch_instruction *)(sec->data->d_buf + offset); + + if (decode_insn_reg0i26_fomat(inst, insn)) + return 0; + if (decode_insn_reg1i21_fomat(inst, insn)) + return 0; + if (decode_insn_reg2i12_fomat(inst, insn, ops_list, op)) + return 0; + if (decode_insn_reg2i14_fomat(inst, insn, ops_list, op)) + return 0; + if (decode_insn_reg2i16_fomat(inst, insn)) + return 0; + + if (inst.word == 0) + insn->type = INSN_NOP; + else if (inst.reg0i15_format.opcode == break_op) { + /* break */ + insn->type = INSN_BUG; + } else if (inst.reg2_format.opcode == ertn_op) { + /* ertn */ + insn->type = INSN_RETURN; + } + return 0; } const char *arch_nop_insn(int len) { - return NULL; + static u32 nop; + + if (len != LOONGARCH_INSN_SIZE) + WARN("invalid NOP size: %d\n", len); + + nop = LOONGARCH_INSN_NOP; + + return (const char *)&nop; } const char *arch_ret_insn(int len) { - return NULL; + static u32 ret; + + if (len != LOONGARCH_INSN_SIZE) + WARN("invalid RET size: %d\n", len); + + emit_jirl((union loongarch_instruction *)&ret, LOONGARCH_GPR_RA, LOONGARCH_GPR_ZERO, 0); + + return (const char *)&ret; } void arch_initial_func_cfi_state(struct cfi_init_state *state) -- Gitee From 141df43972b8b0919a36cc428317c1135aac8219 Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Mon, 11 Mar 2024 22:23:47 +0800 Subject: [PATCH 1721/2138] objtool/LoongArch: Enable orc to be built ANBZ: #11459 commit 3c7266cd7bc5e7843b631fea73cb0e82111e3158 upstream. Implement arch-specific init_orc_entry(), write_orc_entry(), reg_name(), orc_type_name(), print_reg() and orc_print_dump(), then set BUILD_ORC as y to build the orc related files. Co-developed-by: Jinyang He Signed-off-by: Jinyang He Co-developed-by: Youling Tang Signed-off-by: Youling Tang Signed-off-by: Tiezhu Yang Signed-off-by: Huacai Chen Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4150 --- tools/arch/loongarch/include/asm/orc_types.h | 58 +++++++ tools/objtool/Makefile | 5 + tools/objtool/arch/loongarch/Build | 1 + tools/objtool/arch/loongarch/decode.c | 16 ++ tools/objtool/arch/loongarch/orc.c | 171 +++++++++++++++++++ tools/objtool/orc_gen.c | 4 + 6 files changed, 255 insertions(+) create mode 100644 tools/arch/loongarch/include/asm/orc_types.h create mode 100644 tools/objtool/arch/loongarch/orc.c diff --git a/tools/arch/loongarch/include/asm/orc_types.h b/tools/arch/loongarch/include/asm/orc_types.h new file mode 100644 index 000000000000..caf1f71a1057 --- /dev/null +++ b/tools/arch/loongarch/include/asm/orc_types.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _ORC_TYPES_H +#define _ORC_TYPES_H + +#include + +/* + * The ORC_REG_* registers are base registers which are used to find other + * registers on the stack. + * + * ORC_REG_PREV_SP, also known as DWARF Call Frame Address (CFA), is the + * address of the previous frame: the caller's SP before it called the current + * function. + * + * ORC_REG_UNDEFINED means the corresponding register's value didn't change in + * the current frame. + * + * The most commonly used base registers are SP and FP -- which the previous SP + * is usually based on -- and PREV_SP and UNDEFINED -- which the previous FP is + * usually based on. + * + * The rest of the base registers are needed for special cases like entry code + * and GCC realigned stacks. + */ +#define ORC_REG_UNDEFINED 0 +#define ORC_REG_PREV_SP 1 +#define ORC_REG_SP 2 +#define ORC_REG_FP 3 +#define ORC_REG_MAX 4 + +#define ORC_TYPE_UNDEFINED 0 +#define ORC_TYPE_END_OF_STACK 1 +#define ORC_TYPE_CALL 2 +#define ORC_TYPE_REGS 3 +#define ORC_TYPE_REGS_PARTIAL 4 + +#ifndef __ASSEMBLY__ +/* + * This struct is more or less a vastly simplified version of the DWARF Call + * Frame Information standard. It contains only the necessary parts of DWARF + * CFI, simplified for ease of access by the in-kernel unwinder. It tells the + * unwinder how to find the previous SP and FP (and sometimes entry regs) on + * the stack for a given code address. Each instance of the struct corresponds + * to one or more code locations. + */ +struct orc_entry { + s16 sp_offset; + s16 fp_offset; + s16 ra_offset; + unsigned int sp_reg:4; + unsigned int fp_reg:4; + unsigned int ra_reg:4; + unsigned int type:3; + unsigned int signal:1; +}; +#endif /* __ASSEMBLY__ */ + +#endif /* _ORC_TYPES_H */ diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index fcedbad726c0..90f0826cc3ef 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile @@ -63,6 +63,11 @@ ifeq ($(SRCARCH),arm64) DYNAMIC_CHECK := y endif +ifeq ($(SRCARCH),loongarch) + BUILD_ORC := y + STATIC_CHECK := y +endif + export BUILD_ORC STATIC_CHECK DYNAMIC_CHECK export srctree OUTPUT CFLAGS SRCARCH AWK include $(srctree)/tools/build/Makefile.include diff --git a/tools/objtool/arch/loongarch/Build b/tools/objtool/arch/loongarch/Build index d24d5636a5b8..1d4b784b6887 100644 --- a/tools/objtool/arch/loongarch/Build +++ b/tools/objtool/arch/loongarch/Build @@ -1,2 +1,3 @@ objtool-y += decode.o objtool-y += special.o +objtool-y += orc.o diff --git a/tools/objtool/arch/loongarch/decode.c b/tools/objtool/arch/loongarch/decode.c index ff0b53144d12..aee479d2191c 100644 --- a/tools/objtool/arch/loongarch/decode.c +++ b/tools/objtool/arch/loongarch/decode.c @@ -3,6 +3,8 @@ #include #include #include +#include +#include #ifndef EM_LOONGARCH #define EM_LOONGARCH 258 @@ -42,6 +44,20 @@ bool arch_callee_saved_reg(unsigned char reg) int arch_decode_hint_reg(u8 sp_reg, int *base) { + switch (sp_reg) { + case ORC_REG_UNDEFINED: + *base = CFI_UNDEFINED; + break; + case ORC_REG_SP: + *base = CFI_SP; + break; + case ORC_REG_FP: + *base = CFI_FP; + break; + default: + return -1; + } + return 0; } diff --git a/tools/objtool/arch/loongarch/orc.c b/tools/objtool/arch/loongarch/orc.c new file mode 100644 index 000000000000..873536d009d9 --- /dev/null +++ b/tools/objtool/arch/loongarch/orc.c @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#include +#include + +#include +#include +#include +#include + +int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, struct instruction *insn) +{ + struct cfi_reg *fp = &cfi->regs[CFI_FP]; + struct cfi_reg *ra = &cfi->regs[CFI_RA]; + + memset(orc, 0, sizeof(*orc)); + + if (!cfi) { + /* + * This is usually either unreachable nops/traps (which don't + * trigger unreachable instruction warnings), or + * STACK_FRAME_NON_STANDARD functions. + */ + orc->type = ORC_TYPE_UNDEFINED; + return 0; + } + + switch (cfi->type) { + case UNWIND_HINT_TYPE_UNDEFINED: + orc->type = ORC_TYPE_UNDEFINED; + return 0; + case UNWIND_HINT_TYPE_END_OF_STACK: + orc->type = ORC_TYPE_END_OF_STACK; + return 0; + case UNWIND_HINT_TYPE_CALL: + orc->type = ORC_TYPE_CALL; + break; + case UNWIND_HINT_TYPE_REGS: + orc->type = ORC_TYPE_REGS; + break; + case UNWIND_HINT_TYPE_REGS_PARTIAL: + orc->type = ORC_TYPE_REGS_PARTIAL; + break; + default: + WARN_INSN(insn, "unknown unwind hint type %d", cfi->type); + return -1; + } + + orc->signal = cfi->signal; + + switch (cfi->cfa.base) { + case CFI_SP: + orc->sp_reg = ORC_REG_SP; + break; + case CFI_FP: + orc->sp_reg = ORC_REG_FP; + break; + default: + WARN_INSN(insn, "unknown CFA base reg %d", cfi->cfa.base); + return -1; + } + + switch (fp->base) { + case CFI_UNDEFINED: + orc->fp_reg = ORC_REG_UNDEFINED; + orc->fp_offset = 0; + break; + case CFI_CFA: + orc->fp_reg = ORC_REG_PREV_SP; + orc->fp_offset = fp->offset; + break; + case CFI_FP: + orc->fp_reg = ORC_REG_FP; + break; + default: + WARN_INSN(insn, "unknown FP base reg %d", fp->base); + return -1; + } + + switch (ra->base) { + case CFI_UNDEFINED: + orc->ra_reg = ORC_REG_UNDEFINED; + orc->ra_offset = 0; + break; + case CFI_CFA: + orc->ra_reg = ORC_REG_PREV_SP; + orc->ra_offset = ra->offset; + break; + case CFI_FP: + orc->ra_reg = ORC_REG_FP; + break; + default: + WARN_INSN(insn, "unknown RA base reg %d", ra->base); + return -1; + } + + orc->sp_offset = cfi->cfa.offset; + + return 0; +} + +int write_orc_entry(struct elf *elf, struct section *orc_sec, + struct section *ip_sec, unsigned int idx, + struct section *insn_sec, unsigned long insn_off, + struct orc_entry *o) +{ + struct orc_entry *orc; + + /* populate ORC data */ + orc = (struct orc_entry *)orc_sec->data->d_buf + idx; + memcpy(orc, o, sizeof(*orc)); + + /* populate reloc for ip */ + if (!elf_init_reloc_text_sym(elf, ip_sec, idx * sizeof(int), idx, + insn_sec, insn_off)) + return -1; + + return 0; +} + +static const char *reg_name(unsigned int reg) +{ + switch (reg) { + case ORC_REG_SP: + return "sp"; + case ORC_REG_FP: + return "fp"; + case ORC_REG_PREV_SP: + return "prevsp"; + default: + return "?"; + } +} + +static const char *orc_type_name(unsigned int type) +{ + switch (type) { + case UNWIND_HINT_TYPE_CALL: + return "call"; + case UNWIND_HINT_TYPE_REGS: + return "regs"; + case UNWIND_HINT_TYPE_REGS_PARTIAL: + return "regs (partial)"; + default: + return "?"; + } +} + +static void print_reg(unsigned int reg, int offset) +{ + if (reg == ORC_REG_UNDEFINED) + printf(" (und) "); + else + printf("%s + %3d", reg_name(reg), offset); + +} + +void orc_print_dump(struct elf *dummy_elf, struct orc_entry *orc, int i) +{ + printf("type:%s", orc_type_name(orc[i].type)); + + printf(" sp:"); + print_reg(orc[i].sp_reg, orc[i].sp_offset); + + printf(" fp:"); + print_reg(orc[i].fp_reg, orc[i].fp_offset); + + printf(" ra:"); + print_reg(orc[i].ra_reg, orc[i].ra_offset); + + printf(" signal:%d\n", orc[i].signal); +} diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c index 217a4e7d5617..b99ed15030c0 100644 --- a/tools/objtool/orc_gen.c +++ b/tools/objtool/orc_gen.c @@ -18,6 +18,10 @@ #define bp_reg fp_reg #endif +#ifdef __loongarch__ +#define bp_reg fp_reg +#endif + bool __weak orc_ignore_section(struct section *sec) { return false; -- Gitee From 87c136aaceb61bf8175c5742781705690797556a Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Mon, 11 Mar 2024 22:23:47 +0800 Subject: [PATCH 1722/2138] objtool: Check local label in add_dead_ends() ANBZ: #11459 commit d5ab2bc36c6b0ce2f3409f934ff9cdf6d6768fa2 upstream. When update the latest upstream gcc and binutils, it generates more objtool warnings on LoongArch, like this: init/main.o: warning: objtool: unexpected relocation symbol type in .rela.discard.unreachable We can see that the reloc sym name is local label instead of section in relocation section '.rela.discard.unreachable', in this case, the reloc sym type is STT_NOTYPE instead of STT_SECTION. As suggested by Peter Zijlstra, we add a "local_label" member in struct symbol, then set it as true if symbol type is STT_NOTYPE and symbol name starts with ".L" string in classify_symbols(). Let's check reloc->sym->local_label to not return -1 in add_dead_ends(), and also use reloc->sym->offset instead of reloc addend which is 0 to find the corresponding instruction. At the same time, let's replace the variable "addend" with "offset" to reflect the reality. Here are some detailed info: [fedora@linux 6.8.test]$ gcc --version gcc (GCC) 14.0.1 20240129 (experimental) [fedora@linux 6.8.test]$ as --version GNU assembler (GNU Binutils) 2.42.50.20240129 [fedora@linux 6.8.test]$ readelf -r init/main.o | grep -A 2 "rela.discard.unreachable" Relocation section '.rela.discard.unreachable' at offset 0x6028 contains 1 entry: Offset Info Type Sym. Value Sym. Name + Addend 000000000000 00d900000063 R_LARCH_32_PCREL 00000000000002c4 .L500^B1 + 0 Signed-off-by: Tiezhu Yang Signed-off-by: Huacai Chen Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4150 --- tools/objtool/check.c | 40 +++++++++++++++++------------ tools/objtool/include/objtool/elf.h | 1 + 2 files changed, 24 insertions(+), 17 deletions(-) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 1b242c3c2d45..d7115b141917 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -20,6 +20,7 @@ #include #include #include +#include struct alternative { struct alternative *next; @@ -584,7 +585,7 @@ static int add_dead_ends(struct objtool_file *file) struct section *rsec; struct reloc *reloc; struct instruction *insn; - s64 addend; + unsigned long offset; /* * Check for manually annotated dead ends. @@ -594,27 +595,28 @@ static int add_dead_ends(struct objtool_file *file) goto reachable; for_each_reloc(rsec, reloc) { - - if (reloc->sym->type != STT_SECTION) { + if (reloc->sym->type == STT_SECTION) { + offset = reloc_addend(reloc); + } else if (reloc->sym->local_label) { + offset = reloc->sym->offset; + } else { WARN("unexpected relocation symbol type in %s", rsec->name); return -1; } - addend = reloc_addend(reloc); - - insn = find_insn(file, reloc->sym->sec, addend); + insn = find_insn(file, reloc->sym->sec, offset); if (insn) insn = prev_insn_same_sec(file, insn); - else if (addend == reloc->sym->sec->sh.sh_size) { + else if (offset == reloc->sym->sec->sh.sh_size) { insn = find_last_insn(file, reloc->sym->sec); if (!insn) { WARN("can't find unreachable insn at %s+0x%" PRIx64, - reloc->sym->sec->name, addend); + reloc->sym->sec->name, offset); return -1; } } else { WARN("can't find unreachable insn at %s+0x%" PRIx64, - reloc->sym->sec->name, addend); + reloc->sym->sec->name, offset); return -1; } @@ -633,27 +635,28 @@ static int add_dead_ends(struct objtool_file *file) return 0; for_each_reloc(rsec, reloc) { - - if (reloc->sym->type != STT_SECTION) { + if (reloc->sym->type == STT_SECTION) { + offset = reloc_addend(reloc); + } else if (reloc->sym->local_label) { + offset = reloc->sym->offset; + } else { WARN("unexpected relocation symbol type in %s", rsec->name); return -1; } - addend = reloc_addend(reloc); - - insn = find_insn(file, reloc->sym->sec, addend); + insn = find_insn(file, reloc->sym->sec, offset); if (insn) insn = prev_insn_same_sec(file, insn); - else if (addend == reloc->sym->sec->sh.sh_size) { + else if (offset == reloc->sym->sec->sh.sh_size) { insn = find_last_insn(file, reloc->sym->sec); if (!insn) { WARN("can't find reachable insn at %s+0x%" PRIx64, - reloc->sym->sec->name, addend); + reloc->sym->sec->name, offset); return -1; } } else { WARN("can't find reachable insn at %s+0x%" PRIx64, - reloc->sym->sec->name, addend); + reloc->sym->sec->name, offset); return -1; } @@ -2506,6 +2509,9 @@ static int classify_symbols(struct objtool_file *file) struct symbol *func; for_each_sym(file, func) { + if (func->type == STT_NOTYPE && strstarts(func->name, ".L")) + func->local_label = true; + if (func->bind != STB_GLOBAL) continue; diff --git a/tools/objtool/include/objtool/elf.h b/tools/objtool/include/objtool/elf.h index 9f71e988eca4..2b8a69de4db8 100644 --- a/tools/objtool/include/objtool/elf.h +++ b/tools/objtool/include/objtool/elf.h @@ -67,6 +67,7 @@ struct symbol { u8 profiling_func : 1; u8 warned : 1; u8 embedded_insn : 1; + u8 local_label : 1; struct list_head pv_target; struct reloc *relocs; }; -- Gitee From ad375e5fedaa4c2b813d6ec2f4ae1383384e0131 Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Mon, 11 Mar 2024 22:23:47 +0800 Subject: [PATCH 1723/2138] objtool: Check local label in read_unwind_hints() ANBZ: #11459 commit e91c5e4c21b0339376ee124cda5c9b27d41f2cbc upstream. When update the latest upstream gcc and binutils, it generates some objtool warnings on LoongArch, like this: arch/loongarch/kernel/entry.o: warning: objtool: ret_from_fork+0x0: unreachable instruction We can see that the reloc sym name is local label instead of section in relocation section '.rela.discard.unwind_hints', in this case, the reloc sym type is STT_NOTYPE instead of STT_SECTION. Let us check it to not return -1, then use reloc->sym->offset instead of reloc addend which is 0 to find the corresponding instruction. Here are some detailed info: [fedora@linux 6.8.test]$ gcc --version gcc (GCC) 14.0.1 20240129 (experimental) [fedora@linux 6.8.test]$ as --version GNU assembler (GNU Binutils) 2.42.50.20240129 [fedora@linux 6.8.test]$ readelf -r arch/loongarch/kernel/entry.o | grep -A 3 "rela.discard.unwind_hints" Relocation section '.rela.discard.unwind_hints' at offset 0x3a8 contains 7 entries: Offset Info Type Sym. Value Sym. Name + Addend 000000000000 000a00000063 R_LARCH_32_PCREL 0000000000000000 .Lhere_1 + 0 00000000000c 000b00000063 R_LARCH_32_PCREL 00000000000000a8 .Lhere_50 + 0 Signed-off-by: Tiezhu Yang Signed-off-by: Huacai Chen Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4150 --- tools/objtool/check.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index d7115b141917..02090727e9b1 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -2211,6 +2211,7 @@ static int read_unwind_hints(struct objtool_file *file) struct unwind_hint *hint; struct instruction *insn; struct reloc *reloc; + unsigned long offset; int i; sec = find_section_by_name(file->elf, ".discard.unwind_hints"); @@ -2238,7 +2239,16 @@ static int read_unwind_hints(struct objtool_file *file) return -1; } - insn = find_insn(file, reloc->sym->sec, reloc_addend(reloc)); + if (reloc->sym->type == STT_SECTION) { + offset = reloc_addend(reloc); + } else if (reloc->sym->local_label) { + offset = reloc->sym->offset; + } else { + WARN("unexpected relocation symbol type in %s", sec->rsec->name); + return -1; + } + + insn = find_insn(file, reloc->sym->sec, offset); if (!insn) { WARN("can't find insn for unwind_hints[%d]", i); return -1; -- Gitee From fc9ebb52ca8766b110ff623c8e742db385a79a37 Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Mon, 11 Mar 2024 22:23:47 +0800 Subject: [PATCH 1724/2138] LoongArch: Add ORC stack unwinder support ANBZ: #11459 commit cb8a2ef0848ca80d67d6d56e2df757cfdf6b3355 upstream. The kernel CONFIG_UNWINDER_ORC option enables the ORC unwinder, which is similar in concept to a DWARF unwinder. The difference is that the format of the ORC data is much simpler than DWARF, which in turn allows the ORC unwinder to be much simpler and faster. The ORC data consists of unwind tables which are generated by objtool. After analyzing all the code paths of a .o file, it determines information about the stack state at each instruction address in the file and outputs that information to the .orc_unwind and .orc_unwind_ip sections. The per-object ORC sections are combined at link time and are sorted and post-processed at boot time. The unwinder uses the resulting data to correlate instruction addresses with their stack states at run time. Most of the logic are similar with x86, in order to get ra info before ra is saved into stack, add ra_reg and ra_offset into orc_entry. At the same time, modify some arch-specific code to silence the objtool warnings. Co-developed-by: Jinyang He Signed-off-by: Jinyang He Co-developed-by: Youling Tang Signed-off-by: Youling Tang Signed-off-by: Tiezhu Yang Signed-off-by: Huacai Chen Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4150 --- arch/loongarch/Kconfig | 2 + arch/loongarch/Kconfig.debug | 11 + arch/loongarch/Makefile | 23 +- arch/loongarch/include/asm/Kbuild | 2 + arch/loongarch/include/asm/bug.h | 1 + arch/loongarch/include/asm/exception.h | 2 + arch/loongarch/include/asm/module.h | 7 + arch/loongarch/include/asm/orc_header.h | 18 + arch/loongarch/include/asm/orc_lookup.h | 31 ++ arch/loongarch/include/asm/orc_types.h | 58 +++ arch/loongarch/include/asm/stackframe.h | 3 + arch/loongarch/include/asm/unwind.h | 20 +- arch/loongarch/include/asm/unwind_hints.h | 28 ++ arch/loongarch/kernel/Makefile | 4 + arch/loongarch/kernel/entry.S | 5 + arch/loongarch/kernel/fpu.S | 7 + arch/loongarch/kernel/genex.S | 6 + arch/loongarch/kernel/lbt.S | 3 + arch/loongarch/kernel/mcount_dyn.S | 6 + arch/loongarch/kernel/module.c | 22 +- arch/loongarch/kernel/relocate_kernel.S | 7 +- arch/loongarch/kernel/rethook_trampoline.S | 1 + arch/loongarch/kernel/setup.c | 2 + arch/loongarch/kernel/stacktrace.c | 1 + arch/loongarch/kernel/traps.c | 42 +- arch/loongarch/kernel/unwind_orc.c | 528 +++++++++++++++++++++ arch/loongarch/kernel/vmlinux.lds.S | 3 + arch/loongarch/kvm/switch.S | 9 +- arch/loongarch/lib/clear_user.S | 3 + arch/loongarch/lib/copy_user.S | 3 + arch/loongarch/lib/memcpy.S | 3 + arch/loongarch/lib/memset.S | 3 + arch/loongarch/mm/tlb.c | 27 +- arch/loongarch/mm/tlbex.S | 9 + arch/loongarch/vdso/Makefile | 1 + include/linux/compiler.h | 9 + 36 files changed, 870 insertions(+), 40 deletions(-) create mode 100644 arch/loongarch/include/asm/orc_header.h create mode 100644 arch/loongarch/include/asm/orc_lookup.h create mode 100644 arch/loongarch/include/asm/orc_types.h create mode 100644 arch/loongarch/include/asm/unwind_hints.h create mode 100644 arch/loongarch/kernel/unwind_orc.c diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 8c1854ef9561..2a25bf82b769 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -135,6 +135,7 @@ config LOONGARCH select HAVE_KVM select HAVE_MOD_ARCH_SPECIFIC select HAVE_NMI + select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS select HAVE_PCI select HAVE_PERF_EVENTS select HAVE_PERF_REGS @@ -145,6 +146,7 @@ config LOONGARCH select HAVE_SAMPLE_FTRACE_DIRECT select HAVE_SAMPLE_FTRACE_DIRECT_MULTI select HAVE_SETUP_PER_CPU_AREA if NUMA + select HAVE_STACK_VALIDATION if HAVE_OBJTOOL select HAVE_STACKPROTECTOR select ARCH_HAS_PHYS_TO_DMA select HAVE_SYSCALL_TRACEPOINTS diff --git a/arch/loongarch/Kconfig.debug b/arch/loongarch/Kconfig.debug index 8d36aab53008..98d60630c3d4 100644 --- a/arch/loongarch/Kconfig.debug +++ b/arch/loongarch/Kconfig.debug @@ -26,4 +26,15 @@ config UNWINDER_PROLOGUE Some of the addresses it reports may be incorrect (but better than the Guess unwinder). +config UNWINDER_ORC + bool "ORC unwinder" + select OBJTOOL + help + This option enables the ORC (Oops Rewind Capability) unwinder for + unwinding kernel stack traces. It uses a custom data format which is + a simplified version of the DWARF Call Frame Information standard. + + Enabling this option will increase the kernel's runtime memory usage + by roughly 2-4MB, depending on your kernel config. + endchoice diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile index 81e8089c9c4f..e05c10b04456 100644 --- a/arch/loongarch/Makefile +++ b/arch/loongarch/Makefile @@ -25,6 +25,18 @@ endif 32bit-emul = elf32loongarch 64bit-emul = elf64loongarch +ifdef CONFIG_UNWINDER_ORC +orc_hash_h := arch/$(SRCARCH)/include/generated/asm/orc_hash.h +orc_hash_sh := $(srctree)/scripts/orc_hash.sh +targets += $(orc_hash_h) +quiet_cmd_orc_hash = GEN $@ + cmd_orc_hash = mkdir -p $(dir $@); \ + $(CONFIG_SHELL) $(orc_hash_sh) < $< > $@ +$(orc_hash_h): $(srctree)/arch/loongarch/include/asm/orc_types.h $(orc_hash_sh) FORCE + $(call if_changed,orc_hash) +archprepare: $(orc_hash_h) +endif + ifdef CONFIG_DYNAMIC_FTRACE KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY CC_FLAGS_FTRACE := -fpatchable-function-entry=2 @@ -68,8 +80,6 @@ LDFLAGS_vmlinux += -static -n -nostdlib ifdef CONFIG_AS_HAS_EXPLICIT_RELOCS cflags-y += $(call cc-option,-mexplicit-relocs) KBUILD_CFLAGS_KERNEL += $(call cc-option,-mdirect-extern-access) -KBUILD_AFLAGS_MODULE += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax) -KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax) else cflags-y += $(call cc-option,-mno-explicit-relocs) KBUILD_AFLAGS_KERNEL += -Wa,-mla-global-with-pcrel @@ -78,6 +88,15 @@ KBUILD_AFLAGS_MODULE += -Wa,-mla-global-with-abs KBUILD_CFLAGS_MODULE += -fplt -Wa,-mla-global-with-abs,-mla-local-with-abs endif +KBUILD_AFLAGS += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax) +KBUILD_CFLAGS += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax) +KBUILD_AFLAGS += $(call cc-option,-mthin-add-sub) $(call cc-option,-Wa$(comma)-mthin-add-sub) +KBUILD_CFLAGS += $(call cc-option,-mthin-add-sub) $(call cc-option,-Wa$(comma)-mthin-add-sub) + +ifdef CONFIG_OBJTOOL +KBUILD_CFLAGS += -fno-jump-tables +endif + ifeq ($(CONFIG_RELOCATABLE),y) KBUILD_CFLAGS_KERNEL += -fPIE LDFLAGS_vmlinux += -static -pie --no-dynamic-linker -z notext $(call ld-option, --apply-dynamic-relocs) diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild index 22991a6f0e2b..aa4ab6ccc0c0 100644 --- a/arch/loongarch/include/asm/Kbuild +++ b/arch/loongarch/include/asm/Kbuild @@ -1,4 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 +generated-y += orc_hash.h + generic-y += dma-contiguous.h generic-y += mcs_spinlock.h generic-y += parport.h diff --git a/arch/loongarch/include/asm/bug.h b/arch/loongarch/include/asm/bug.h index d4ca3ba25418..08388876ade4 100644 --- a/arch/loongarch/include/asm/bug.h +++ b/arch/loongarch/include/asm/bug.h @@ -44,6 +44,7 @@ do { \ instrumentation_begin(); \ __BUG_FLAGS(BUGFLAG_WARNING|(flags)); \ + annotate_reachable(); \ instrumentation_end(); \ } while (0) diff --git a/arch/loongarch/include/asm/exception.h b/arch/loongarch/include/asm/exception.h index af74a3fdcad1..c6d20736fd92 100644 --- a/arch/loongarch/include/asm/exception.h +++ b/arch/loongarch/include/asm/exception.h @@ -6,6 +6,8 @@ #include #include +extern void *exception_table[]; + void show_registers(struct pt_regs *regs); asmlinkage void cache_parity_error(void); diff --git a/arch/loongarch/include/asm/module.h b/arch/loongarch/include/asm/module.h index 2ecd82bb64e1..f33f3fd32ecc 100644 --- a/arch/loongarch/include/asm/module.h +++ b/arch/loongarch/include/asm/module.h @@ -6,6 +6,7 @@ #define _ASM_MODULE_H #include +#include #include #define RELA_STACK_DEPTH 16 @@ -21,6 +22,12 @@ struct mod_arch_specific { struct mod_section plt; struct mod_section plt_idx; +#ifdef CONFIG_UNWINDER_ORC + unsigned int num_orcs; + int *orc_unwind_ip; + struct orc_entry *orc_unwind; +#endif + /* For CONFIG_DYNAMIC_FTRACE */ struct plt_entry *ftrace_trampolines; }; diff --git a/arch/loongarch/include/asm/orc_header.h b/arch/loongarch/include/asm/orc_header.h new file mode 100644 index 000000000000..f9d509c3fd70 --- /dev/null +++ b/arch/loongarch/include/asm/orc_header.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef _ORC_HEADER_H +#define _ORC_HEADER_H + +#include +#include +#include + +/* + * The header is currently a 20-byte hash of the ORC entry definition; see + * scripts/orc_hash.sh. + */ +#define ORC_HEADER \ + __used __section(".orc_header") __aligned(4) \ + static const u8 orc_header[] = { ORC_HASH } + +#endif /* _ORC_HEADER_H */ diff --git a/arch/loongarch/include/asm/orc_lookup.h b/arch/loongarch/include/asm/orc_lookup.h new file mode 100644 index 000000000000..b02e6357def4 --- /dev/null +++ b/arch/loongarch/include/asm/orc_lookup.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _ORC_LOOKUP_H +#define _ORC_LOOKUP_H + +/* + * This is a lookup table for speeding up access to the .orc_unwind table. + * Given an input address offset, the corresponding lookup table entry + * specifies a subset of the .orc_unwind table to search. + * + * Each block represents the end of the previous range and the start of the + * next range. An extra block is added to give the last range an end. + * + * The block size should be a power of 2 to avoid a costly 'div' instruction. + * + * A block size of 256 was chosen because it roughly doubles unwinder + * performance while only adding ~5% to the ORC data footprint. + */ +#define LOOKUP_BLOCK_ORDER 8 +#define LOOKUP_BLOCK_SIZE (1 << LOOKUP_BLOCK_ORDER) + +#ifndef LINKER_SCRIPT + +extern unsigned int orc_lookup[]; +extern unsigned int orc_lookup_end[]; + +#define LOOKUP_START_IP (unsigned long)_stext +#define LOOKUP_STOP_IP (unsigned long)_etext + +#endif /* LINKER_SCRIPT */ + +#endif /* _ORC_LOOKUP_H */ diff --git a/arch/loongarch/include/asm/orc_types.h b/arch/loongarch/include/asm/orc_types.h new file mode 100644 index 000000000000..caf1f71a1057 --- /dev/null +++ b/arch/loongarch/include/asm/orc_types.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _ORC_TYPES_H +#define _ORC_TYPES_H + +#include + +/* + * The ORC_REG_* registers are base registers which are used to find other + * registers on the stack. + * + * ORC_REG_PREV_SP, also known as DWARF Call Frame Address (CFA), is the + * address of the previous frame: the caller's SP before it called the current + * function. + * + * ORC_REG_UNDEFINED means the corresponding register's value didn't change in + * the current frame. + * + * The most commonly used base registers are SP and FP -- which the previous SP + * is usually based on -- and PREV_SP and UNDEFINED -- which the previous FP is + * usually based on. + * + * The rest of the base registers are needed for special cases like entry code + * and GCC realigned stacks. + */ +#define ORC_REG_UNDEFINED 0 +#define ORC_REG_PREV_SP 1 +#define ORC_REG_SP 2 +#define ORC_REG_FP 3 +#define ORC_REG_MAX 4 + +#define ORC_TYPE_UNDEFINED 0 +#define ORC_TYPE_END_OF_STACK 1 +#define ORC_TYPE_CALL 2 +#define ORC_TYPE_REGS 3 +#define ORC_TYPE_REGS_PARTIAL 4 + +#ifndef __ASSEMBLY__ +/* + * This struct is more or less a vastly simplified version of the DWARF Call + * Frame Information standard. It contains only the necessary parts of DWARF + * CFI, simplified for ease of access by the in-kernel unwinder. It tells the + * unwinder how to find the previous SP and FP (and sometimes entry regs) on + * the stack for a given code address. Each instance of the struct corresponds + * to one or more code locations. + */ +struct orc_entry { + s16 sp_offset; + s16 fp_offset; + s16 ra_offset; + unsigned int sp_reg:4; + unsigned int fp_reg:4; + unsigned int ra_reg:4; + unsigned int type:3; + unsigned int signal:1; +}; +#endif /* __ASSEMBLY__ */ + +#endif /* _ORC_TYPES_H */ diff --git a/arch/loongarch/include/asm/stackframe.h b/arch/loongarch/include/asm/stackframe.h index 35ba862f2025..66736837085b 100644 --- a/arch/loongarch/include/asm/stackframe.h +++ b/arch/loongarch/include/asm/stackframe.h @@ -13,6 +13,7 @@ #include #include #include +#include /* Make the addition of cfi info a little easier. */ .macro cfi_rel_offset reg offset=0 docfi=0 @@ -173,6 +174,7 @@ li.w t0, CSR_CRMD_WE csrxchg t0, t0, LOONGARCH_CSR_CRMD #endif + UNWIND_HINT_REGS .endm .macro SAVE_ALL docfi=0 @@ -230,6 +232,7 @@ .macro RESTORE_SP_AND_RET docfi=0 cfi_ld sp, PT_R3, \docfi + UNWIND_HINT_FUNC ertn .endm diff --git a/arch/loongarch/include/asm/unwind.h b/arch/loongarch/include/asm/unwind.h index b9dce87afd2e..40a6763c5aec 100644 --- a/arch/loongarch/include/asm/unwind.h +++ b/arch/loongarch/include/asm/unwind.h @@ -16,6 +16,7 @@ enum unwinder_type { UNWINDER_GUESS, UNWINDER_PROLOGUE, + UNWINDER_ORC, }; struct unwind_state { @@ -24,7 +25,7 @@ struct unwind_state { struct task_struct *task; bool first, error, reset; int graph_idx; - unsigned long sp, pc, ra; + unsigned long sp, fp, pc, ra; }; bool default_next_frame(struct unwind_state *state); @@ -61,14 +62,17 @@ static __always_inline void __unwind_start(struct unwind_state *state, state->sp = regs->regs[3]; state->pc = regs->csr_era; state->ra = regs->regs[1]; + state->fp = regs->regs[22]; } else if (task && task != current) { state->sp = thread_saved_fp(task); state->pc = thread_saved_ra(task); state->ra = 0; + state->fp = 0; } else { state->sp = (unsigned long)__builtin_frame_address(0); state->pc = (unsigned long)__builtin_return_address(0); state->ra = 0; + state->fp = 0; } state->task = task; get_stack_info(state->sp, state->task, &state->stack_info); @@ -77,6 +81,18 @@ static __always_inline void __unwind_start(struct unwind_state *state, static __always_inline unsigned long __unwind_get_return_address(struct unwind_state *state) { - return unwind_done(state) ? 0 : state->pc; + if (unwind_done(state)) + return 0; + + return __kernel_text_address(state->pc) ? state->pc : 0; } + +#ifdef CONFIG_UNWINDER_ORC +void unwind_init(void); +void unwind_module_init(struct module *mod, void *orc_ip, size_t orc_ip_size, void *orc, size_t orc_size); +#else +static inline void unwind_init(void) {} +static inline void unwind_module_init(struct module *mod, void *orc_ip, size_t orc_ip_size, void *orc, size_t orc_size) {} +#endif + #endif /* _ASM_UNWIND_H */ diff --git a/arch/loongarch/include/asm/unwind_hints.h b/arch/loongarch/include/asm/unwind_hints.h new file mode 100644 index 000000000000..a01086ad9dde --- /dev/null +++ b/arch/loongarch/include/asm/unwind_hints.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_LOONGARCH_UNWIND_HINTS_H +#define _ASM_LOONGARCH_UNWIND_HINTS_H + +#include +#include + +#ifdef __ASSEMBLY__ + +.macro UNWIND_HINT_UNDEFINED + UNWIND_HINT type=UNWIND_HINT_TYPE_UNDEFINED +.endm + +.macro UNWIND_HINT_END_OF_STACK + UNWIND_HINT type=UNWIND_HINT_TYPE_END_OF_STACK +.endm + +.macro UNWIND_HINT_REGS + UNWIND_HINT sp_reg=ORC_REG_SP type=UNWIND_HINT_TYPE_REGS +.endm + +.macro UNWIND_HINT_FUNC + UNWIND_HINT sp_reg=ORC_REG_SP type=UNWIND_HINT_TYPE_CALL +.endm + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_LOONGARCH_UNWIND_HINTS_H */ diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile index 6c148ccea674..caf9a0b5e62d 100644 --- a/arch/loongarch/kernel/Makefile +++ b/arch/loongarch/kernel/Makefile @@ -3,6 +3,8 @@ # Makefile for the Linux/LoongArch kernel. # +OBJECT_FILES_NON_STANDARD_head.o := y + extra-y := vmlinux.lds obj-y += head.o cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \ @@ -22,6 +24,7 @@ obj-$(CONFIG_ARCH_STRICT_ALIGN) += unaligned.o CFLAGS_module.o += $(call cc-option,-Wno-override-init,) CFLAGS_syscall.o += $(call cc-option,-Wno-override-init,) +CFLAGS_traps.o += $(call cc-option,-Wno-override-init,) CFLAGS_perf_event.o += $(call cc-option,-Wno-override-init,) ifdef CONFIG_FUNCTION_TRACER @@ -64,6 +67,7 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o +obj-$(CONFIG_UNWINDER_ORC) += unwind_orc.o obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_regs.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o diff --git a/arch/loongarch/kernel/entry.S b/arch/loongarch/kernel/entry.S index 1ec8e4c4cc2b..48e7e34e355e 100644 --- a/arch/loongarch/kernel/entry.S +++ b/arch/loongarch/kernel/entry.S @@ -14,11 +14,13 @@ #include #include #include +#include .text .cfi_sections .debug_frame .align 5 SYM_CODE_START(handle_syscall) + UNWIND_HINT_UNDEFINED csrrd t0, PERCPU_BASE_KS la.pcrel t1, kernelsp add.d t1, t1, t0 @@ -57,6 +59,7 @@ SYM_CODE_START(handle_syscall) cfi_st fp, PT_R22 SAVE_STATIC + UNWIND_HINT_REGS #ifdef CONFIG_KGDB li.w t1, CSR_CRMD_WE @@ -75,6 +78,7 @@ SYM_CODE_END(handle_syscall) _ASM_NOKPROBE(handle_syscall) SYM_CODE_START(ret_from_fork) + UNWIND_HINT_REGS bl schedule_tail # a0 = struct task_struct *prev move a0, sp bl syscall_exit_to_user_mode @@ -84,6 +88,7 @@ SYM_CODE_START(ret_from_fork) SYM_CODE_END(ret_from_fork) SYM_CODE_START(ret_from_kernel_thread) + UNWIND_HINT_REGS bl schedule_tail # a0 = struct task_struct *prev move a0, s1 jirl ra, s0, 0 diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S index 4382e36ae3d4..69a85f2479fb 100644 --- a/arch/loongarch/kernel/fpu.S +++ b/arch/loongarch/kernel/fpu.S @@ -15,6 +15,7 @@ #include #include #include +#include #define FPU_REG_WIDTH 8 #define LSX_REG_WIDTH 16 @@ -526,3 +527,9 @@ SYM_FUNC_END(_restore_lasx_context) .L_fpu_fault: li.w a0, -EFAULT # failure jr ra + +#ifdef CONFIG_CPU_HAS_LBT +STACK_FRAME_NON_STANDARD _restore_fp +STACK_FRAME_NON_STANDARD _restore_lsx +STACK_FRAME_NON_STANDARD _restore_lasx +#endif diff --git a/arch/loongarch/kernel/genex.S b/arch/loongarch/kernel/genex.S index 2bb3aa2dcfcb..86d5d90ebefe 100644 --- a/arch/loongarch/kernel/genex.S +++ b/arch/loongarch/kernel/genex.S @@ -32,6 +32,7 @@ SYM_FUNC_START(__arch_cpu_idle) SYM_FUNC_END(__arch_cpu_idle) SYM_CODE_START(handle_vint) + UNWIND_HINT_UNDEFINED BACKUP_T0T1 SAVE_ALL la_abs t1, __arch_cpu_idle @@ -49,6 +50,7 @@ SYM_CODE_START(handle_vint) SYM_CODE_END(handle_vint) SYM_CODE_START(except_vec_cex) + UNWIND_HINT_UNDEFINED b cache_parity_error SYM_CODE_END(except_vec_cex) @@ -67,6 +69,7 @@ SYM_CODE_END(except_vec_cex) .macro BUILD_HANDLER exception handler prep .align 5 SYM_CODE_START(handle_\exception) + UNWIND_HINT_UNDEFINED 666: BACKUP_T0T1 SAVE_ALL @@ -77,7 +80,9 @@ SYM_CODE_END(except_vec_cex) 668: RESTORE_ALL_AND_RET SYM_CODE_END(handle_\exception) + .pushsection ".data", "aw", %progbits SYM_DATA(unwind_hint_\exception, .word 668b - 666b) + .popsection .endm BUILD_HANDLER ade ade badv @@ -94,6 +99,7 @@ SYM_CODE_END(except_vec_cex) BUILD_HANDLER reserved reserved none /* others */ SYM_CODE_START(handle_sys) + UNWIND_HINT_UNDEFINED la_abs t0, handle_syscall jr t0 SYM_CODE_END(handle_sys) diff --git a/arch/loongarch/kernel/lbt.S b/arch/loongarch/kernel/lbt.S index 9c75120a26d8..001f061d226a 100644 --- a/arch/loongarch/kernel/lbt.S +++ b/arch/loongarch/kernel/lbt.S @@ -11,6 +11,7 @@ #include #include #include +#include #define SCR_REG_WIDTH 8 @@ -153,3 +154,5 @@ SYM_FUNC_END(_restore_ftop_context) .L_lbt_fault: li.w a0, -EFAULT # failure jr ra + +STACK_FRAME_NON_STANDARD _restore_ftop_context diff --git a/arch/loongarch/kernel/mcount_dyn.S b/arch/loongarch/kernel/mcount_dyn.S index 482aa553aa2d..0c65cf09110c 100644 --- a/arch/loongarch/kernel/mcount_dyn.S +++ b/arch/loongarch/kernel/mcount_dyn.S @@ -73,6 +73,7 @@ SYM_FUNC_START(ftrace_stub) SYM_FUNC_END(ftrace_stub) SYM_CODE_START(ftrace_common) + UNWIND_HINT_UNDEFINED PTR_ADDI a0, ra, -8 /* arg0: ip */ move a1, t0 /* arg1: parent_ip */ la.pcrel t1, function_trace_op @@ -113,12 +114,14 @@ ftrace_common_return: SYM_CODE_END(ftrace_common) SYM_CODE_START(ftrace_caller) + UNWIND_HINT_UNDEFINED ftrace_regs_entry allregs=0 b ftrace_common SYM_CODE_END(ftrace_caller) #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS SYM_CODE_START(ftrace_regs_caller) + UNWIND_HINT_UNDEFINED ftrace_regs_entry allregs=1 b ftrace_common SYM_CODE_END(ftrace_regs_caller) @@ -126,6 +129,7 @@ SYM_CODE_END(ftrace_regs_caller) #ifdef CONFIG_FUNCTION_GRAPH_TRACER SYM_CODE_START(ftrace_graph_caller) + UNWIND_HINT_UNDEFINED PTR_L a0, sp, PT_ERA PTR_ADDI a0, a0, -8 /* arg0: self_addr */ PTR_ADDI a1, sp, PT_R1 /* arg1: parent */ @@ -134,6 +138,7 @@ SYM_CODE_START(ftrace_graph_caller) SYM_CODE_END(ftrace_graph_caller) SYM_CODE_START(return_to_handler) + UNWIND_HINT_UNDEFINED /* Save return value regs */ PTR_ADDI sp, sp, -FGRET_REGS_SIZE PTR_S a0, sp, FGRET_REGS_A0 @@ -155,6 +160,7 @@ SYM_CODE_END(return_to_handler) #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS SYM_CODE_START(ftrace_stub_direct_tramp) + UNWIND_HINT_UNDEFINED jr t0 SYM_CODE_END(ftrace_stub_direct_tramp) #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ diff --git a/arch/loongarch/kernel/module.c b/arch/loongarch/kernel/module.c index b13b2858fe39..c7d0338d12c1 100644 --- a/arch/loongarch/kernel/module.c +++ b/arch/loongarch/kernel/module.c @@ -20,6 +20,7 @@ #include #include #include +#include static int rela_stack_push(s64 stack_value, s64 *rela_stack, size_t *rela_stack_top) { @@ -515,15 +516,28 @@ static void module_init_ftrace_plt(const Elf_Ehdr *hdr, int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod) { - const Elf_Shdr *s, *se; const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + const Elf_Shdr *s, *alt = NULL, *orc = NULL, *orc_ip = NULL, *ftrace = NULL; - for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) { + for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { if (!strcmp(".altinstructions", secstrs + s->sh_name)) - apply_alternatives((void *)s->sh_addr, (void *)s->sh_addr + s->sh_size); + alt = s; + if (!strcmp(".orc_unwind", secstrs + s->sh_name)) + orc = s; + if (!strcmp(".orc_unwind_ip", secstrs + s->sh_name)) + orc_ip = s; if (!strcmp(".ftrace_trampoline", secstrs + s->sh_name)) - module_init_ftrace_plt(hdr, s, mod); + ftrace = s; } + if (alt) + apply_alternatives((void *)alt->sh_addr, (void *)alt->sh_addr + alt->sh_size); + + if (orc && orc_ip) + unwind_module_init(mod, (void *)orc_ip->sh_addr, orc_ip->sh_size, (void *)orc->sh_addr, orc->sh_size); + + if (ftrace) + module_init_ftrace_plt(hdr, ftrace, mod); + return 0; } diff --git a/arch/loongarch/kernel/relocate_kernel.S b/arch/loongarch/kernel/relocate_kernel.S index f49f6b053763..84e6de2fd973 100644 --- a/arch/loongarch/kernel/relocate_kernel.S +++ b/arch/loongarch/kernel/relocate_kernel.S @@ -15,6 +15,7 @@ #include SYM_CODE_START(relocate_new_kernel) + UNWIND_HINT_UNDEFINED /* * a0: EFI boot flag for the new kernel * a1: Command line pointer for the new kernel @@ -90,6 +91,7 @@ SYM_CODE_END(relocate_new_kernel) * then start at the entry point from LOONGARCH_IOCSR_MBUF0. */ SYM_CODE_START(kexec_smp_wait) + UNWIND_HINT_UNDEFINED 1: li.w t0, 0x100 /* wait for init loop */ 2: addi.w t0, t0, -1 /* limit mailbox access */ bnez t0, 2b @@ -106,6 +108,5 @@ SYM_CODE_END(kexec_smp_wait) relocate_new_kernel_end: -SYM_DATA_START(relocate_new_kernel_size) - PTR relocate_new_kernel_end - relocate_new_kernel -SYM_DATA_END(relocate_new_kernel_size) + .section ".data" +SYM_DATA(relocate_new_kernel_size, .long relocate_new_kernel_end - relocate_new_kernel) diff --git a/arch/loongarch/kernel/rethook_trampoline.S b/arch/loongarch/kernel/rethook_trampoline.S index bd5772c96338..d4ceb2fa2a5c 100644 --- a/arch/loongarch/kernel/rethook_trampoline.S +++ b/arch/loongarch/kernel/rethook_trampoline.S @@ -76,6 +76,7 @@ .endm SYM_CODE_START(arch_rethook_trampoline) + UNWIND_HINT_UNDEFINED addi.d sp, sp, -PT_SIZE save_all_base_regs diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index a8de2f809403..37d61e240201 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -48,6 +48,7 @@ #include #include #include +#include #include "legacy_boot.h" #define SMBIOS_BIOSSIZE_OFFSET 0x09 @@ -694,6 +695,7 @@ static void __init prefill_possible_map(void) void __init setup_arch(char **cmdline_p) { cpu_probe(); + unwind_init(); init_environ(); efi_init(); diff --git a/arch/loongarch/kernel/stacktrace.c b/arch/loongarch/kernel/stacktrace.c index f623feb2129f..eaec82e02c92 100644 --- a/arch/loongarch/kernel/stacktrace.c +++ b/arch/loongarch/kernel/stacktrace.c @@ -29,6 +29,7 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, regs->csr_era = thread_saved_ra(task); } regs->regs[1] = 0; + regs->regs[22] = 0; } for (unwind_start(&state, task, regs); diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c index d59052c03d9b..c57b4134f3e8 100644 --- a/arch/loongarch/kernel/traps.c +++ b/arch/loongarch/kernel/traps.c @@ -53,6 +53,32 @@ #include "access-helper.h" +void *exception_table[EXCCODE_INT_START] = { + [0 ... EXCCODE_INT_START - 1] = handle_reserved, + + [EXCCODE_TLBI] = handle_tlb_load, + [EXCCODE_TLBL] = handle_tlb_load, + [EXCCODE_TLBS] = handle_tlb_store, + [EXCCODE_TLBM] = handle_tlb_modify, + [EXCCODE_TLBNR] = handle_tlb_protect, + [EXCCODE_TLBNX] = handle_tlb_protect, + [EXCCODE_TLBPE] = handle_tlb_protect, + [EXCCODE_ADE] = handle_ade, + [EXCCODE_ALE] = handle_ale, + [EXCCODE_BCE] = handle_bce, + [EXCCODE_SYS] = handle_sys, + [EXCCODE_BP] = handle_bp, + [EXCCODE_INE] = handle_ri, + [EXCCODE_IPE] = handle_ri, + [EXCCODE_FPDIS] = handle_fpu, + [EXCCODE_LSXDIS] = handle_lsx, + [EXCCODE_LASXDIS] = handle_lasx, + [EXCCODE_FPE] = handle_fpe, + [EXCCODE_WATCH] = handle_watch, + [EXCCODE_BTDIS] = handle_lbt, +}; +EXPORT_SYMBOL_GPL(exception_table); + static void show_backtrace(struct task_struct *task, const struct pt_regs *regs, const char *loglvl, bool user) { @@ -1155,19 +1181,9 @@ void __init trap_init(void) for (i = EXCCODE_INT_START; i <= EXCCODE_INT_END; i++) set_handler(i * VECSIZE, handle_vint, VECSIZE); - set_handler(EXCCODE_ADE * VECSIZE, handle_ade, VECSIZE); - set_handler(EXCCODE_ALE * VECSIZE, handle_ale, VECSIZE); - set_handler(EXCCODE_BCE * VECSIZE, handle_bce, VECSIZE); - set_handler(EXCCODE_SYS * VECSIZE, handle_sys, VECSIZE); - set_handler(EXCCODE_BP * VECSIZE, handle_bp, VECSIZE); - set_handler(EXCCODE_INE * VECSIZE, handle_ri, VECSIZE); - set_handler(EXCCODE_IPE * VECSIZE, handle_ri, VECSIZE); - set_handler(EXCCODE_FPDIS * VECSIZE, handle_fpu, VECSIZE); - set_handler(EXCCODE_LSXDIS * VECSIZE, handle_lsx, VECSIZE); - set_handler(EXCCODE_LASXDIS * VECSIZE, handle_lasx, VECSIZE); - set_handler(EXCCODE_FPE * VECSIZE, handle_fpe, VECSIZE); - set_handler(EXCCODE_BTDIS * VECSIZE, handle_lbt, VECSIZE); - set_handler(EXCCODE_WATCH * VECSIZE, handle_watch, VECSIZE); + /* Set exception vector handler */ + for (i = EXCCODE_ADE; i <= EXCCODE_BTDIS; i++) + set_handler(i * VECSIZE, exception_table[i], VECSIZE); cache_error_setup(); diff --git a/arch/loongarch/kernel/unwind_orc.c b/arch/loongarch/kernel/unwind_orc.c new file mode 100644 index 000000000000..b25722876331 --- /dev/null +++ b/arch/loongarch/kernel/unwind_orc.c @@ -0,0 +1,528 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +ORC_HEADER; + +#define orc_warn(fmt, ...) \ + printk_deferred_once(KERN_WARNING "WARNING: " fmt, ##__VA_ARGS__) + +extern int __start_orc_unwind_ip[]; +extern int __stop_orc_unwind_ip[]; +extern struct orc_entry __start_orc_unwind[]; +extern struct orc_entry __stop_orc_unwind[]; + +static bool orc_init __ro_after_init; +static unsigned int lookup_num_blocks __ro_after_init; + +/* Fake frame pointer entry -- used as a fallback for generated code */ +static struct orc_entry orc_fp_entry = { + .sp_reg = ORC_REG_FP, + .sp_offset = 16, + .fp_reg = ORC_REG_PREV_SP, + .fp_offset = -16, + .ra_reg = ORC_REG_PREV_SP, + .ra_offset = -8, + .type = ORC_TYPE_CALL +}; + +/* + * If we crash with IP==0, the last successfully executed instruction + * was probably an indirect function call with a NULL function pointer, + * and we don't have unwind information for NULL. + * This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function + * pointer into its parent and then continue normally from there. + */ +static struct orc_entry orc_null_entry = { + .sp_reg = ORC_REG_SP, + .sp_offset = sizeof(long), + .fp_reg = ORC_REG_UNDEFINED, + .type = ORC_TYPE_CALL +}; + +static inline unsigned long orc_ip(const int *ip) +{ + return (unsigned long)ip + *ip; +} + +static struct orc_entry *__orc_find(int *ip_table, struct orc_entry *u_table, + unsigned int num_entries, unsigned long ip) +{ + int *first = ip_table; + int *mid = first, *found = first; + int *last = ip_table + num_entries - 1; + + if (!num_entries) + return NULL; + + /* + * Do a binary range search to find the rightmost duplicate of a given + * starting address. Some entries are section terminators which are + * "weak" entries for ensuring there are no gaps. They should be + * ignored when they conflict with a real entry. + */ + while (first <= last) { + mid = first + ((last - first) / 2); + + if (orc_ip(mid) <= ip) { + found = mid; + first = mid + 1; + } else + last = mid - 1; + } + + return u_table + (found - ip_table); +} + +#ifdef CONFIG_MODULES +static struct orc_entry *orc_module_find(unsigned long ip) +{ + struct module *mod; + + mod = __module_address(ip); + if (!mod || !mod->arch.orc_unwind || !mod->arch.orc_unwind_ip) + return NULL; + + return __orc_find(mod->arch.orc_unwind_ip, mod->arch.orc_unwind, mod->arch.num_orcs, ip); +} +#else +static struct orc_entry *orc_module_find(unsigned long ip) +{ + return NULL; +} +#endif + +#ifdef CONFIG_DYNAMIC_FTRACE +static struct orc_entry *orc_find(unsigned long ip); + +/* + * Ftrace dynamic trampolines do not have orc entries of their own. + * But they are copies of the ftrace entries that are static and + * defined in ftrace_*.S, which do have orc entries. + * + * If the unwinder comes across a ftrace trampoline, then find the + * ftrace function that was used to create it, and use that ftrace + * function's orc entry, as the placement of the return code in + * the stack will be identical. + */ +static struct orc_entry *orc_ftrace_find(unsigned long ip) +{ + struct ftrace_ops *ops; + unsigned long tramp_addr, offset; + + ops = ftrace_ops_trampoline(ip); + if (!ops) + return NULL; + + /* Set tramp_addr to the start of the code copied by the trampoline */ + if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) + tramp_addr = (unsigned long)ftrace_regs_caller; + else + tramp_addr = (unsigned long)ftrace_caller; + + /* Now place tramp_addr to the location within the trampoline ip is at */ + offset = ip - ops->trampoline; + tramp_addr += offset; + + /* Prevent unlikely recursion */ + if (ip == tramp_addr) + return NULL; + + return orc_find(tramp_addr); +} +#else +static struct orc_entry *orc_ftrace_find(unsigned long ip) +{ + return NULL; +} +#endif + +static struct orc_entry *orc_find(unsigned long ip) +{ + static struct orc_entry *orc; + + if (ip == 0) + return &orc_null_entry; + + /* For non-init vmlinux addresses, use the fast lookup table: */ + if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) { + unsigned int idx, start, stop; + + idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE; + + if (unlikely((idx >= lookup_num_blocks-1))) { + orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n", + idx, lookup_num_blocks, (void *)ip); + return NULL; + } + + start = orc_lookup[idx]; + stop = orc_lookup[idx + 1] + 1; + + if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) || + (__start_orc_unwind + stop > __stop_orc_unwind))) { + orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n", + idx, lookup_num_blocks, start, stop, (void *)ip); + return NULL; + } + + return __orc_find(__start_orc_unwind_ip + start, + __start_orc_unwind + start, stop - start, ip); + } + + /* vmlinux .init slow lookup: */ + if (is_kernel_inittext(ip)) + return __orc_find(__start_orc_unwind_ip, __start_orc_unwind, + __stop_orc_unwind_ip - __start_orc_unwind_ip, ip); + + /* Module lookup: */ + orc = orc_module_find(ip); + if (orc) + return orc; + + return orc_ftrace_find(ip); +} + +#ifdef CONFIG_MODULES + +static DEFINE_MUTEX(sort_mutex); +static int *cur_orc_ip_table = __start_orc_unwind_ip; +static struct orc_entry *cur_orc_table = __start_orc_unwind; + +static void orc_sort_swap(void *_a, void *_b, int size) +{ + int delta = _b - _a; + int *a = _a, *b = _b, tmp; + struct orc_entry *orc_a, *orc_b; + + /* Swap the .orc_unwind_ip entries: */ + tmp = *a; + *a = *b + delta; + *b = tmp - delta; + + /* Swap the corresponding .orc_unwind entries: */ + orc_a = cur_orc_table + (a - cur_orc_ip_table); + orc_b = cur_orc_table + (b - cur_orc_ip_table); + swap(*orc_a, *orc_b); +} + +static int orc_sort_cmp(const void *_a, const void *_b) +{ + const int *a = _a, *b = _b; + unsigned long a_val = orc_ip(a); + unsigned long b_val = orc_ip(b); + struct orc_entry *orc_a; + + if (a_val > b_val) + return 1; + if (a_val < b_val) + return -1; + + /* + * The "weak" section terminator entries need to always be first + * to ensure the lookup code skips them in favor of real entries. + * These terminator entries exist to handle any gaps created by + * whitelisted .o files which didn't get objtool generation. + */ + orc_a = cur_orc_table + (a - cur_orc_ip_table); + + return orc_a->type == ORC_TYPE_UNDEFINED ? -1 : 1; +} + +void unwind_module_init(struct module *mod, void *_orc_ip, size_t orc_ip_size, + void *_orc, size_t orc_size) +{ + int *orc_ip = _orc_ip; + struct orc_entry *orc = _orc; + unsigned int num_entries = orc_ip_size / sizeof(int); + + WARN_ON_ONCE(orc_ip_size % sizeof(int) != 0 || + orc_size % sizeof(*orc) != 0 || + num_entries != orc_size / sizeof(*orc)); + + /* + * The 'cur_orc_*' globals allow the orc_sort_swap() callback to + * associate an .orc_unwind_ip table entry with its corresponding + * .orc_unwind entry so they can both be swapped. + */ + mutex_lock(&sort_mutex); + cur_orc_ip_table = orc_ip; + cur_orc_table = orc; + sort(orc_ip, num_entries, sizeof(int), orc_sort_cmp, orc_sort_swap); + mutex_unlock(&sort_mutex); + + mod->arch.orc_unwind_ip = orc_ip; + mod->arch.orc_unwind = orc; + mod->arch.num_orcs = num_entries; +} +#endif + +void __init unwind_init(void) +{ + int i; + size_t orc_size = (void *)__stop_orc_unwind - (void *)__start_orc_unwind; + size_t orc_ip_size = (void *)__stop_orc_unwind_ip - (void *)__start_orc_unwind_ip; + size_t num_entries = orc_ip_size / sizeof(int); + struct orc_entry *orc; + + if (!num_entries || orc_ip_size % sizeof(int) != 0 || + orc_size % sizeof(struct orc_entry) != 0 || + num_entries != orc_size / sizeof(struct orc_entry)) { + orc_warn("WARNING: Bad or missing .orc_unwind table. Disabling unwinder.\n"); + return; + } + + /* + * Note, the orc_unwind and orc_unwind_ip tables were already + * sorted at build time via the 'sorttable' tool. + * It's ready for binary search straight away, no need to sort it. + */ + + /* Initialize the fast lookup table: */ + lookup_num_blocks = orc_lookup_end - orc_lookup; + for (i = 0; i < lookup_num_blocks-1; i++) { + orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, + num_entries, LOOKUP_START_IP + (LOOKUP_BLOCK_SIZE * i)); + if (!orc) { + orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n"); + return; + } + + orc_lookup[i] = orc - __start_orc_unwind; + } + + /* Initialize the ending block: */ + orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, num_entries, LOOKUP_STOP_IP); + if (!orc) { + orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n"); + return; + } + orc_lookup[lookup_num_blocks-1] = orc - __start_orc_unwind; + + orc_init = true; +} + +static inline bool on_stack(struct stack_info *info, unsigned long addr, size_t len) +{ + unsigned long begin = info->begin; + unsigned long end = info->end; + + return (info->type != STACK_TYPE_UNKNOWN && + addr >= begin && addr < end && addr + len > begin && addr + len <= end); +} + +static bool stack_access_ok(struct unwind_state *state, unsigned long addr, size_t len) +{ + struct stack_info *info = &state->stack_info; + + if (on_stack(info, addr, len)) + return true; + + return !get_stack_info(addr, state->task, info) && on_stack(info, addr, len); +} + +unsigned long unwind_get_return_address(struct unwind_state *state) +{ + return __unwind_get_return_address(state); +} +EXPORT_SYMBOL_GPL(unwind_get_return_address); + +void unwind_start(struct unwind_state *state, struct task_struct *task, + struct pt_regs *regs) +{ + __unwind_start(state, task, regs); + state->type = UNWINDER_ORC; + if (!unwind_done(state) && !__kernel_text_address(state->pc)) + unwind_next_frame(state); +} +EXPORT_SYMBOL_GPL(unwind_start); + +static bool is_entry_func(unsigned long addr) +{ + extern u32 kernel_entry; + extern u32 kernel_entry_end; + + return addr >= (unsigned long)&kernel_entry && addr < (unsigned long)&kernel_entry_end; +} + +static inline unsigned long bt_address(unsigned long ra) +{ + extern unsigned long eentry; + + if (__kernel_text_address(ra)) + return ra; + + if (__module_text_address(ra)) + return ra; + + if (ra >= eentry && ra < eentry + EXCCODE_INT_END * VECSIZE) { + unsigned long func; + unsigned long type = (ra - eentry) / VECSIZE; + unsigned long offset = (ra - eentry) % VECSIZE; + + switch (type) { + case 0 ... EXCCODE_INT_START - 1: + func = (unsigned long)exception_table[type]; + break; + case EXCCODE_INT_START ... EXCCODE_INT_END: + func = (unsigned long)handle_vint; + break; + default: + func = (unsigned long)handle_reserved; + break; + } + + return func + offset; + } + + return ra; +} + +bool unwind_next_frame(struct unwind_state *state) +{ + unsigned long *p, pc; + struct pt_regs *regs; + struct orc_entry *orc; + struct stack_info *info = &state->stack_info; + + if (unwind_done(state)) + return false; + + /* Don't let modules unload while we're reading their ORC data. */ + preempt_disable(); + + if (is_entry_func(state->pc)) + goto end; + + orc = orc_find(state->pc); + if (!orc) { + /* + * As a fallback, try to assume this code uses a frame pointer. + * This is useful for generated code, like BPF, which ORC + * doesn't know about. This is just a guess, so the rest of + * the unwind is no longer considered reliable. + */ + orc = &orc_fp_entry; + state->error = true; + } else { + if (orc->type == ORC_TYPE_UNDEFINED) + goto err; + + if (orc->type == ORC_TYPE_END_OF_STACK) + goto end; + } + + switch (orc->sp_reg) { + case ORC_REG_SP: + if (info->type == STACK_TYPE_IRQ && state->sp == info->end) + orc->type = ORC_TYPE_REGS; + else + state->sp = state->sp + orc->sp_offset; + break; + case ORC_REG_FP: + state->sp = state->fp; + break; + default: + orc_warn("unknown SP base reg %d at %pB\n", orc->sp_reg, (void *)state->pc); + goto err; + } + + switch (orc->fp_reg) { + case ORC_REG_PREV_SP: + p = (unsigned long *)(state->sp + orc->fp_offset); + if (!stack_access_ok(state, (unsigned long)p, sizeof(unsigned long))) + goto err; + + state->fp = *p; + break; + case ORC_REG_UNDEFINED: + /* Nothing. */ + break; + default: + orc_warn("unknown FP base reg %d at %pB\n", orc->fp_reg, (void *)state->pc); + goto err; + } + + switch (orc->type) { + case ORC_TYPE_CALL: + if (orc->ra_reg == ORC_REG_PREV_SP) { + p = (unsigned long *)(state->sp + orc->ra_offset); + if (!stack_access_ok(state, (unsigned long)p, sizeof(unsigned long))) + goto err; + + pc = unwind_graph_addr(state, *p, state->sp); + pc -= LOONGARCH_INSN_SIZE; + } else if (orc->ra_reg == ORC_REG_UNDEFINED) { + if (!state->ra || state->ra == state->pc) + goto err; + + pc = unwind_graph_addr(state, state->ra, state->sp); + pc -= LOONGARCH_INSN_SIZE; + state->ra = 0; + } else { + orc_warn("unknown ra base reg %d at %pB\n", orc->ra_reg, (void *)state->pc); + goto err; + } + break; + case ORC_TYPE_REGS: + if (info->type == STACK_TYPE_IRQ && state->sp == info->end) + regs = (struct pt_regs *)info->next_sp; + else + regs = (struct pt_regs *)state->sp; + + if (!stack_access_ok(state, (unsigned long)regs, sizeof(*regs))) + goto err; + + if ((info->end == (unsigned long)regs + sizeof(*regs)) && + !regs->regs[3] && !regs->regs[1]) + goto end; + + if (user_mode(regs)) + goto end; + + pc = regs->csr_era; + if (!__kernel_text_address(pc)) + goto err; + + state->sp = regs->regs[3]; + state->ra = regs->regs[1]; + state->fp = regs->regs[22]; + get_stack_info(state->sp, state->task, info); + + break; + default: + orc_warn("unknown .orc_unwind entry type %d at %pB\n", orc->type, (void *)state->pc); + goto err; + } + + state->pc = bt_address(pc); + if (!state->pc) { + pr_err("cannot find unwind pc at %pK\n", (void *)pc); + goto err; + } + + if (!__kernel_text_address(state->pc)) + goto err; + + preempt_enable(); + return true; + +err: + state->error = true; + +end: + preempt_enable(); + state->stack_info.type = STACK_TYPE_UNKNOWN; + return false; +} +EXPORT_SYMBOL_GPL(unwind_next_frame); diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S index d5afd0c80a49..3c7595342730 100644 --- a/arch/loongarch/kernel/vmlinux.lds.S +++ b/arch/loongarch/kernel/vmlinux.lds.S @@ -2,6 +2,7 @@ #include #include #include +#include #define PAGE_SIZE _PAGE_SIZE #define RO_EXCEPTION_TABLE_ALIGN 4 @@ -123,6 +124,8 @@ SECTIONS } #endif + ORC_UNWIND_TABLE + .sdata : { *(.sdata) } diff --git a/arch/loongarch/kvm/switch.S b/arch/loongarch/kvm/switch.S index 3634431db18a..80e988985a6a 100644 --- a/arch/loongarch/kvm/switch.S +++ b/arch/loongarch/kvm/switch.S @@ -8,7 +8,7 @@ #include #include #include -#include +#include #define HGPR_OFFSET(x) (PT_R0 + 8*x) #define GGPR_OFFSET(x) (KVM_ARCH_GGPR + 8*x) @@ -112,6 +112,7 @@ .text .cfi_sections .debug_frame SYM_CODE_START(kvm_exc_entry) + UNWIND_HINT_UNDEFINED csrwr a2, KVM_TEMP_KS csrrd a2, KVM_VCPU_KS addi.d a2, a2, KVM_VCPU_ARCH @@ -273,3 +274,9 @@ SYM_FUNC_END(kvm_restore_lasx) .section ".rodata" SYM_DATA(kvm_exception_size, .quad kvm_exc_entry_end - kvm_exc_entry) SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest) + +#ifdef CONFIG_CPU_HAS_LBT +STACK_FRAME_NON_STANDARD kvm_restore_fpu +STACK_FRAME_NON_STANDARD kvm_restore_lsx +STACK_FRAME_NON_STANDARD kvm_restore_lasx +#endif diff --git a/arch/loongarch/lib/clear_user.S b/arch/loongarch/lib/clear_user.S index be741544e62b..7a0db643b286 100644 --- a/arch/loongarch/lib/clear_user.S +++ b/arch/loongarch/lib/clear_user.S @@ -10,6 +10,7 @@ #include #include #include +#include SYM_FUNC_START(__clear_user) /* @@ -204,3 +205,5 @@ SYM_FUNC_START(__clear_user_fast) _asm_extable 28b, .Lsmall_fixup _asm_extable 29b, .Lexit SYM_FUNC_END(__clear_user_fast) + +STACK_FRAME_NON_STANDARD __clear_user_fast diff --git a/arch/loongarch/lib/copy_user.S b/arch/loongarch/lib/copy_user.S index feec3d362803..095ce9181c6c 100644 --- a/arch/loongarch/lib/copy_user.S +++ b/arch/loongarch/lib/copy_user.S @@ -10,6 +10,7 @@ #include #include #include +#include SYM_FUNC_START(__copy_user) /* @@ -278,3 +279,5 @@ SYM_FUNC_START(__copy_user_fast) _asm_extable 58b, .Lexit _asm_extable 59b, .Lexit SYM_FUNC_END(__copy_user_fast) + +STACK_FRAME_NON_STANDARD __copy_user_fast diff --git a/arch/loongarch/lib/memcpy.S b/arch/loongarch/lib/memcpy.S index fa1148878d2b..9517a2f961af 100644 --- a/arch/loongarch/lib/memcpy.S +++ b/arch/loongarch/lib/memcpy.S @@ -9,6 +9,7 @@ #include #include #include +#include .section .noinstr.text, "ax" @@ -197,3 +198,5 @@ SYM_FUNC_START(__memcpy_fast) jr ra SYM_FUNC_END(__memcpy_fast) _ASM_NOKPROBE(__memcpy_fast) + +STACK_FRAME_NON_STANDARD __memcpy_small diff --git a/arch/loongarch/lib/memset.S b/arch/loongarch/lib/memset.S index 06d3ca54cbfe..df3846620553 100644 --- a/arch/loongarch/lib/memset.S +++ b/arch/loongarch/lib/memset.S @@ -9,6 +9,7 @@ #include #include #include +#include .macro fill_to_64 r0 bstrins.d \r0, \r0, 15, 8 @@ -166,3 +167,5 @@ SYM_FUNC_START(__memset_fast) jr ra SYM_FUNC_END(__memset_fast) _ASM_NOKPROBE(__memset_fast) + +STACK_FRAME_NON_STANDARD __memset_fast diff --git a/arch/loongarch/mm/tlb.c b/arch/loongarch/mm/tlb.c index 526310ec73c7..5503d4e4b096 100644 --- a/arch/loongarch/mm/tlb.c +++ b/arch/loongarch/mm/tlb.c @@ -9,8 +9,9 @@ #include #include -#include #include +#include +#include #include #include #include @@ -266,24 +267,20 @@ static void setup_tlb_handler(int cpu) setup_ptwalker(); local_flush_tlb_all(); + if (cpu_has_ptw) { + exception_table[EXCCODE_TLBI] = handle_tlb_load_ptw; + exception_table[EXCCODE_TLBL] = handle_tlb_load_ptw; + exception_table[EXCCODE_TLBS] = handle_tlb_store_ptw; + exception_table[EXCCODE_TLBM] = handle_tlb_modify_ptw; + } + /* The tlb handlers are generated only once */ if (cpu == 0) { memcpy((void *)tlbrentry, handle_tlb_refill, 0x80); local_flush_icache_range(tlbrentry, tlbrentry + 0x80); - if (!cpu_has_ptw) { - set_handler(EXCCODE_TLBI * VECSIZE, handle_tlb_load, VECSIZE); - set_handler(EXCCODE_TLBL * VECSIZE, handle_tlb_load, VECSIZE); - set_handler(EXCCODE_TLBS * VECSIZE, handle_tlb_store, VECSIZE); - set_handler(EXCCODE_TLBM * VECSIZE, handle_tlb_modify, VECSIZE); - } else { - set_handler(EXCCODE_TLBI * VECSIZE, handle_tlb_load_ptw, VECSIZE); - set_handler(EXCCODE_TLBL * VECSIZE, handle_tlb_load_ptw, VECSIZE); - set_handler(EXCCODE_TLBS * VECSIZE, handle_tlb_store_ptw, VECSIZE); - set_handler(EXCCODE_TLBM * VECSIZE, handle_tlb_modify_ptw, VECSIZE); - } - set_handler(EXCCODE_TLBNR * VECSIZE, handle_tlb_protect, VECSIZE); - set_handler(EXCCODE_TLBNX * VECSIZE, handle_tlb_protect, VECSIZE); - set_handler(EXCCODE_TLBPE * VECSIZE, handle_tlb_protect, VECSIZE); + + for (int i = EXCCODE_TLBL; i <= EXCCODE_TLBPE; i++) + set_handler(i * VECSIZE, exception_table[i], VECSIZE); } else { int vec_sz __maybe_unused; void *addr __maybe_unused; diff --git a/arch/loongarch/mm/tlbex.S b/arch/loongarch/mm/tlbex.S index d5d682f3d29f..a44387b838af 100644 --- a/arch/loongarch/mm/tlbex.S +++ b/arch/loongarch/mm/tlbex.S @@ -18,6 +18,7 @@ .macro tlb_do_page_fault, write SYM_CODE_START(tlb_do_page_fault_\write) + UNWIND_HINT_UNDEFINED SAVE_ALL csrrd a2, LOONGARCH_CSR_BADV move a0, sp @@ -32,6 +33,7 @@ tlb_do_page_fault 1 SYM_CODE_START(handle_tlb_protect) + UNWIND_HINT_UNDEFINED BACKUP_T0T1 SAVE_ALL move a0, sp @@ -44,6 +46,7 @@ SYM_CODE_START(handle_tlb_protect) SYM_CODE_END(handle_tlb_protect) SYM_CODE_START(handle_tlb_load) + UNWIND_HINT_UNDEFINED csrwr t0, EXCEPTION_KS0 csrwr t1, EXCEPTION_KS1 csrwr ra, EXCEPTION_KS2 @@ -190,6 +193,7 @@ nopage_tlb_load: SYM_CODE_END(handle_tlb_load) SYM_CODE_START(handle_tlb_load_ptw) + UNWIND_HINT_UNDEFINED csrwr t0, LOONGARCH_CSR_KS0 csrwr t1, LOONGARCH_CSR_KS1 la_abs t0, tlb_do_page_fault_0 @@ -197,6 +201,7 @@ SYM_CODE_START(handle_tlb_load_ptw) SYM_CODE_END(handle_tlb_load_ptw) SYM_CODE_START(handle_tlb_store) + UNWIND_HINT_UNDEFINED csrwr t0, EXCEPTION_KS0 csrwr t1, EXCEPTION_KS1 csrwr ra, EXCEPTION_KS2 @@ -346,6 +351,7 @@ nopage_tlb_store: SYM_CODE_END(handle_tlb_store) SYM_CODE_START(handle_tlb_store_ptw) + UNWIND_HINT_UNDEFINED csrwr t0, LOONGARCH_CSR_KS0 csrwr t1, LOONGARCH_CSR_KS1 la_abs t0, tlb_do_page_fault_1 @@ -353,6 +359,7 @@ SYM_CODE_START(handle_tlb_store_ptw) SYM_CODE_END(handle_tlb_store_ptw) SYM_CODE_START(handle_tlb_modify) + UNWIND_HINT_UNDEFINED csrwr t0, EXCEPTION_KS0 csrwr t1, EXCEPTION_KS1 csrwr ra, EXCEPTION_KS2 @@ -500,6 +507,7 @@ nopage_tlb_modify: SYM_CODE_END(handle_tlb_modify) SYM_CODE_START(handle_tlb_modify_ptw) + UNWIND_HINT_UNDEFINED csrwr t0, LOONGARCH_CSR_KS0 csrwr t1, LOONGARCH_CSR_KS1 la_abs t0, tlb_do_page_fault_1 @@ -507,6 +515,7 @@ SYM_CODE_START(handle_tlb_modify_ptw) SYM_CODE_END(handle_tlb_modify_ptw) SYM_CODE_START(handle_tlb_refill) + UNWIND_HINT_UNDEFINED csrwr t0, LOONGARCH_CSR_TLBRSAVE csrrd t0, LOONGARCH_CSR_PGD lddir t0, t0, 3 diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile index 1a0f6ca0247b..9cdb53f2e3b0 100644 --- a/arch/loongarch/vdso/Makefile +++ b/arch/loongarch/vdso/Makefile @@ -4,6 +4,7 @@ KASAN_SANITIZE := n UBSAN_SANITIZE := n KCOV_INSTRUMENT := n +OBJECT_FILES_NON_STANDARD := y # Include the generic Makefile to check the built vdso. include $(srctree)/lib/vdso/Makefile diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 5a4054f17cbc..0517d344baa0 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -116,6 +116,14 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, */ #define __stringify_label(n) #n +#define __annotate_reachable(c) ({ \ + asm volatile(__stringify_label(c) ":\n\t" \ + ".pushsection .discard.reachable\n\t" \ + ".long " __stringify_label(c) "b - .\n\t" \ + ".popsection\n\t"); \ +}) +#define annotate_reachable() __annotate_reachable(__COUNTER__) + #define __annotate_unreachable(c) ({ \ asm volatile(__stringify_label(c) ":\n\t" \ ".pushsection .discard.unreachable\n\t" \ @@ -128,6 +136,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, #define __annotate_jump_table __section(".rodata..c_jump_table") #else /* !CONFIG_OBJTOOL */ +#define annotate_reachable() #define annotate_unreachable() #define __annotate_jump_table #endif /* CONFIG_OBJTOOL */ -- Gitee From f028d236dacc447c6966bbdfe3cd1663068f51ec Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Wed, 17 Jan 2024 12:43:08 +0800 Subject: [PATCH 1725/2138] LoongArch: Fix definition of ftrace_regs_set_instruction_pointer() ANBZ: #11459 commit 91af17cd7d03db8836554c91ba7c38b0817aa980 upstream. The current definition of ftrace_regs_set_instruction_pointer() is not correct. Obviously, this function is used to set instruction pointer but not return value, so it should call instruction_pointer_set() instead of regs_set_return_value(). There is no side effect by now because it is only used for kernel live- patching which is not supported, so fix it to avoid failure when testing livepatch in the future. Fixes: 6fbff14a6382 ("LoongArch: ftrace: Abstract DYNAMIC_FTRACE_WITH_ARGS accesses") Signed-off-by: Tiezhu Yang Signed-off-by: Huacai Chen Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4150 --- arch/loongarch/include/asm/ftrace.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/loongarch/include/asm/ftrace.h b/arch/loongarch/include/asm/ftrace.h index a11996eb5892..de891c2c83d4 100644 --- a/arch/loongarch/include/asm/ftrace.h +++ b/arch/loongarch/include/asm/ftrace.h @@ -63,7 +63,7 @@ ftrace_regs_get_instruction_pointer(struct ftrace_regs *fregs) static __always_inline void ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs, unsigned long ip) { - regs_set_return_value(&fregs->regs, ip); + instruction_pointer_set(&fregs->regs, ip); } #define ftrace_regs_get_argument(fregs, n) \ -- Gitee From 0cc787abccc8d10adb33c9280b57d39663927a5a Mon Sep 17 00:00:00 2001 From: Jinyang He Date: Mon, 11 Mar 2024 22:23:47 +0800 Subject: [PATCH 1726/2138] LoongArch: Add kernel livepatching support ANBZ: #11459 commit 199cc14cb4f1cb8668be45f67af41755ed5f0175 upstream. The arch-specified function ftrace_regs_set_instruction_pointer() has been implemented in arch/loongarch/include/asm/ftrace.h, so here only implement arch_stack_walk_reliable() function. Here are the test logs: [root@linux fedora]# cat /proc/cmdline BOOT_IMAGE=/vmlinuz-6.8.0-rc2 root=/dev/sda3 [root@linux fedora]# modprobe livepatch-sample [root@linux fedora]# cat /proc/cmdline this has been live patched [root@linux fedora]# echo 0 > /sys/kernel/livepatch/livepatch_sample/enabled [root@linux fedora]# rmmod livepatch_sample [root@linux fedora]# cat /proc/cmdline BOOT_IMAGE=/vmlinuz-6.8.0-rc2 root=/dev/sda3 [root@linux fedora]# dmesg -t | tail -5 livepatch: enabling patch 'livepatch_sample' livepatch: 'livepatch_sample': starting patching transition livepatch: 'livepatch_sample': patching complete livepatch: 'livepatch_sample': starting unpatching transition livepatch: 'livepatch_sample': unpatching complete Signed-off-by: Jinyang He Signed-off-by: Tiezhu Yang Signed-off-by: Huacai Chen Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4150 --- arch/loongarch/Kconfig | 4 +++ arch/loongarch/include/asm/thread_info.h | 2 ++ arch/loongarch/kernel/stacktrace.c | 40 ++++++++++++++++++++++++ 3 files changed, 46 insertions(+) diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 2a25bf82b769..0dab7540a633 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -133,6 +133,7 @@ config LOONGARCH select HAVE_KPROBES_ON_FTRACE select HAVE_KRETPROBES select HAVE_KVM + select HAVE_LIVEPATCH select HAVE_MOD_ARCH_SPECIFIC select HAVE_NMI select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS @@ -141,6 +142,7 @@ config LOONGARCH select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_RELIABLE_STACKTRACE if UNWINDER_ORC select HAVE_RETHOOK select HAVE_RSEQ select HAVE_SAMPLE_FTRACE_DIRECT @@ -641,6 +643,8 @@ config PARAVIRT_TIME_ACCOUNTING If in doubt, say N here. +source "kernel/livepatch/Kconfig" + endmenu config ARCH_SELECT_MEMORY_MODEL diff --git a/arch/loongarch/include/asm/thread_info.h b/arch/loongarch/include/asm/thread_info.h index 8cb653d49a54..8bf0e6f51546 100644 --- a/arch/loongarch/include/asm/thread_info.h +++ b/arch/loongarch/include/asm/thread_info.h @@ -86,6 +86,7 @@ register unsigned long current_stack_pointer __asm__("$sp"); #define TIF_LASX_CTX_LIVE 18 /* LASX context must be preserved */ #define TIF_USEDLBT 19 /* LBT was used by this task this quantum (SMP) */ #define TIF_LBT_CTX_LIVE 20 /* LBT context must be preserved */ +#define TIF_PATCH_PENDING 21 /* pending live patching update */ #define _TIF_SIGPENDING (1<regs[3] = (unsigned long)__builtin_frame_address(0); + regs->csr_era = (unsigned long)__builtin_return_address(0); + } else { + regs->regs[3] = thread_saved_fp(task); + regs->csr_era = thread_saved_ra(task); + } + regs->regs[1] = 0; + regs->regs[22] = 0; + + for (unwind_start(&state, task, regs); + !unwind_done(&state) && !unwind_error(&state); unwind_next_frame(&state)) { + addr = unwind_get_return_address(&state); + + /* + * A NULL or invalid return address probably means there's some + * generated code which __kernel_text_address() doesn't know about. + */ + if (!addr) + return -EINVAL; + + if (!consume_entry(cookie, addr)) + return -EINVAL; + } + + /* Check for stack corruption */ + if (unwind_error(&state)) + return -EINVAL; + + return 0; +} + static int copy_stack_frame(unsigned long fp, struct stack_frame *frame) { -- Gitee From 3d288202a2b97c2449fd8fa6231a127d7870f6e2 Mon Sep 17 00:00:00 2001 From: Xi Ruoyao Date: Fri, 21 Jun 2024 10:18:40 +0800 Subject: [PATCH 1727/2138] LoongArch: Only allow OBJTOOL & ORC unwinder if toolchain supports -mthin-add-sub ANBZ: #11459 commit 120dd4118e58dbda2ddb1dcf55f3c56cdfe8cee0 upstream. GAS <= 2.41 does not support generating R_LARCH_{32,64}_PCREL for "label - ." and it generates R_LARCH_{ADD,SUB}{32,64} pairs instead. Objtool cannot handle R_LARCH_{ADD,SUB}{32,64} pair in __jump_table (static key implementation) and etc. so it will produce some warnings. This is causing the kernel CI systems to complain everywhere. For GAS we can check if -mthin-add-sub option is available to know if R_LARCH_{32,64}_PCREL are supported. For Clang, we require Clang >= 18 and Clang >= 17 already supports R_LARCH_{32,64}_PCREL. But unfortunately Clang has some other issues, so we disable objtool for Clang at present. Note that __jump_table here is not generated by the compiler, so -fno-jump-table is completely irrelevant for this issue. Fixes: cb8a2ef0848c ("LoongArch: Add ORC stack unwinder support") Closes: https://lore.kernel.org/loongarch/Zl5m1ZlVmGKitAof@yujie-X299/ Closes: https://lore.kernel.org/loongarch/ZlY1gDDPi_mNrwJ1@slm.duckdns.org/ Closes: https://lore.kernel.org/loongarch/1717478006.038663-1-hengqi@linux.alibaba.com/ Link: https://sourceware.org/git/?p=binutils-gdb.git;a=commitdiff;h=816029e06768 Link: https://github.com/llvm/llvm-project/commit/42cb3c6346fc Signed-off-by: Xi Ruoyao Signed-off-by: Huacai Chen Signed-off-by: Tiezhu Yang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4150 --- arch/loongarch/Kconfig | 5 ++++- arch/loongarch/Kconfig.debug | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 0dab7540a633..cf36ce211d2b 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -136,7 +136,7 @@ config LOONGARCH select HAVE_LIVEPATCH select HAVE_MOD_ARCH_SPECIFIC select HAVE_NMI - select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS + select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS && AS_HAS_THIN_ADD_SUB && !CC_IS_CLANG select HAVE_PCI select HAVE_PERF_EVENTS select HAVE_PERF_REGS @@ -263,6 +263,9 @@ config AS_HAS_EXPLICIT_RELOCS config AS_HAS_FCSR_CLASS def_bool $(as-instr,movfcsr2gr \$t0$(comma)\$fcsr0) +config AS_HAS_THIN_ADD_SUB + def_bool $(cc-option,-Wa$(comma)-mthin-add-sub) + config AS_HAS_LSX_EXTENSION def_bool $(as-instr,vld \$vr0$(comma)\$a0$(comma)0) diff --git a/arch/loongarch/Kconfig.debug b/arch/loongarch/Kconfig.debug index 98d60630c3d4..8b2ce5b5d43e 100644 --- a/arch/loongarch/Kconfig.debug +++ b/arch/loongarch/Kconfig.debug @@ -28,6 +28,7 @@ config UNWINDER_PROLOGUE config UNWINDER_ORC bool "ORC unwinder" + depends on HAVE_OBJTOOL select OBJTOOL help This option enables the ORC (Oops Rewind Capability) unwinder for -- Gitee From 4c3bd2bd437cfa45e9b7a7c37987c90b062237a6 Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Mon, 26 Aug 2024 23:11:32 +0800 Subject: [PATCH 1728/2138] LoongArch: Add ifdefs to fix LSX and LASX related warnings ANBZ: #11459 commit 80376323e2b6a4559f86b2b4d864848ac25cb054 upstream. There exist some warnings when building kernel if CONFIG_CPU_HAS_LBT is set but CONFIG_CPU_HAS_LSX and CONFIG_CPU_HAS_LASX are not set. In this case, there are no definitions of _restore_lsx & _restore_lasx and there are also no definitions of kvm_restore_lsx & kvm_restore_lasx in fpu.S and switch.S respectively, just add some ifdefs to fix these warnings. AS arch/loongarch/kernel/fpu.o arch/loongarch/kernel/fpu.o: warning: objtool: unexpected relocation symbol type in .rela.discard.func_stack_frame_non_standard: 0 arch/loongarch/kernel/fpu.o: warning: objtool: unexpected relocation symbol type in .rela.discard.func_stack_frame_non_standard: 0 AS [M] arch/loongarch/kvm/switch.o arch/loongarch/kvm/switch.o: warning: objtool: unexpected relocation symbol type in .rela.discard.func_stack_frame_non_standard: 0 arch/loongarch/kvm/switch.o: warning: objtool: unexpected relocation symbol type in .rela.discard.func_stack_frame_non_standard: 0 MODPOST Module.symvers ERROR: modpost: "kvm_restore_lsx" [arch/loongarch/kvm/kvm.ko] undefined! ERROR: modpost: "kvm_restore_lasx" [arch/loongarch/kvm/kvm.ko] undefined! Cc: stable@vger.kernel.org # 6.9+ Fixes: cb8a2ef0848c ("LoongArch: Add ORC stack unwinder support") Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202408120955.qls5oNQY-lkp@intel.com/ Signed-off-by: Tiezhu Yang Signed-off-by: Huacai Chen Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4150 --- arch/loongarch/kernel/fpu.S | 4 ++++ arch/loongarch/kvm/switch.S | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S index 69a85f2479fb..6ab640101457 100644 --- a/arch/loongarch/kernel/fpu.S +++ b/arch/loongarch/kernel/fpu.S @@ -530,6 +530,10 @@ SYM_FUNC_END(_restore_lasx_context) #ifdef CONFIG_CPU_HAS_LBT STACK_FRAME_NON_STANDARD _restore_fp +#ifdef CONFIG_CPU_HAS_LSX STACK_FRAME_NON_STANDARD _restore_lsx +#endif +#ifdef CONFIG_CPU_HAS_LASX STACK_FRAME_NON_STANDARD _restore_lasx #endif +#endif diff --git a/arch/loongarch/kvm/switch.S b/arch/loongarch/kvm/switch.S index 80e988985a6a..0c292f818492 100644 --- a/arch/loongarch/kvm/switch.S +++ b/arch/loongarch/kvm/switch.S @@ -277,6 +277,10 @@ SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest) #ifdef CONFIG_CPU_HAS_LBT STACK_FRAME_NON_STANDARD kvm_restore_fpu +#ifdef CONFIG_CPU_HAS_LSX STACK_FRAME_NON_STANDARD kvm_restore_lsx +#endif +#ifdef CONFIG_CPU_HAS_LASX STACK_FRAME_NON_STANDARD kvm_restore_lasx #endif +#endif -- Gitee From b1bf8ef1d4ba98f230426f2b03893064693b08b3 Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Tue, 17 Sep 2024 22:23:09 +0800 Subject: [PATCH 1729/2138] objtool: Handle frame pointer related instructions ANBZ: #11459 commit da5b2ad1c2f18834cb1ce429e2e5a5cf5cbdf21b upstream. After commit a0f7085f6a63 ("LoongArch: Add RANDOMIZE_KSTACK_OFFSET support"), there are three new instructions "addi.d $fp, $sp, 32", "sub.d $sp, $sp, $t0" and "addi.d $sp, $fp, -32" for the secondary stack in do_syscall(), then there is a objtool warning "return with modified stack frame" and no handle_syscall() which is the previous frame of do_syscall() in the call trace when executing the command "echo l > /proc/sysrq-trigger". objdump shows something like this: 0000000000000000 : 0: 02ff8063 addi.d $sp, $sp, -32 4: 29c04076 st.d $fp, $sp, 16 8: 29c02077 st.d $s0, $sp, 8 c: 29c06061 st.d $ra, $sp, 24 10: 02c08076 addi.d $fp, $sp, 32 ... 74: 0011b063 sub.d $sp, $sp, $t0 ... a8: 4c000181 jirl $ra, $t0, 0 ... dc: 02ff82c3 addi.d $sp, $fp, -32 e0: 28c06061 ld.d $ra, $sp, 24 e4: 28c04076 ld.d $fp, $sp, 16 e8: 28c02077 ld.d $s0, $sp, 8 ec: 02c08063 addi.d $sp, $sp, 32 f0: 4c000020 jirl $zero, $ra, 0 The instruction "sub.d $sp, $sp, $t0" changes the stack bottom and the new stack size is a random value, in order to find the return address of do_syscall() which is stored in the original stack frame after executing "jirl $ra, $t0, 0", it should use fp which points to the original stack top. At the beginning, the thought is tended to decode the secondary stack instruction "sub.d $sp, $sp, $t0" and set it as a label, then check this label for the two frame pointer instructions to change the cfa base and cfa offset during the period of secondary stack in update_cfi_state(). This is valid for GCC but invalid for Clang due to there are different secondary stack instructions for ClangBuiltLinux on LoongArch, something like this: 0000000000000000 : ... 88: 00119064 sub.d $a0, $sp, $a0 8c: 00150083 or $sp, $a0, $zero ... Actually, it equals to a single instruction "sub.d $sp, $sp, $a0", but there is no proper condition to check it as a label like GCC, and so the beginning thought is not a good way. Essentially, there are two special frame pointer instructions which are "addi.d $fp, $sp, imm" and "addi.d $sp, $fp, imm", the first one points fp to the original stack top and the second one restores the original stack bottom from fp. Based on the above analysis, in order to avoid adding an arch-specific update_cfi_state(), we just add a member "frame_pointer" in the "struct symbol" as a label to avoid affecting the current normal case, then set it as true only if there is "addi.d $sp, $fp, imm". The last is to check this label for the two frame pointer instructions to change the cfa base and cfa offset in update_cfi_state(). Tested with the following two configs: (1) CONFIG_RANDOMIZE_KSTACK_OFFSET=y && CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT=n (2) CONFIG_RANDOMIZE_KSTACK_OFFSET=y && CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT=y By the way, there is no effect for x86 with this patch, tested on the x86 machine with Fedora 40 system. Cc: stable@vger.kernel.org # 6.9+ Signed-off-by: Tiezhu Yang Signed-off-by: Huacai Chen Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4150 --- tools/objtool/arch/loongarch/decode.c | 11 ++++++++++- tools/objtool/check.c | 23 ++++++++++++++++++++--- tools/objtool/include/objtool/elf.h | 1 + 3 files changed, 31 insertions(+), 4 deletions(-) diff --git a/tools/objtool/arch/loongarch/decode.c b/tools/objtool/arch/loongarch/decode.c index aee479d2191c..69b66994f2a1 100644 --- a/tools/objtool/arch/loongarch/decode.c +++ b/tools/objtool/arch/loongarch/decode.c @@ -122,7 +122,7 @@ static bool decode_insn_reg2i12_fomat(union loongarch_instruction inst, switch (inst.reg2i12_format.opcode) { case addid_op: if ((inst.reg2i12_format.rd == CFI_SP) || (inst.reg2i12_format.rj == CFI_SP)) { - /* addi.d sp,sp,si12 or addi.d fp,sp,si12 */ + /* addi.d sp,sp,si12 or addi.d fp,sp,si12 or addi.d sp,fp,si12 */ insn->immediate = sign_extend64(inst.reg2i12_format.immediate, 11); ADD_OP(op) { op->src.type = OP_SRC_ADD; @@ -132,6 +132,15 @@ static bool decode_insn_reg2i12_fomat(union loongarch_instruction inst, op->dest.reg = inst.reg2i12_format.rd; } } + if ((inst.reg2i12_format.rd == CFI_SP) && (inst.reg2i12_format.rj == CFI_FP)) { + /* addi.d sp,fp,si12 */ + struct symbol *func = find_func_containing(insn->sec, insn->offset); + + if (!func) + return false; + + func->frame_pointer = true; + } break; case ldd_op: if (inst.reg2i12_format.rj == CFI_SP) { diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 02090727e9b1..6e9dd6d6db37 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -2975,10 +2975,27 @@ static int update_cfi_state(struct instruction *insn, break; } - if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) { + if (op->dest.reg == CFI_BP && op->src.reg == CFI_SP && + insn->sym->frame_pointer) { + /* addi.d fp,sp,imm on LoongArch */ + if (cfa->base == CFI_SP && cfa->offset == op->src.offset) { + cfa->base = CFI_BP; + cfa->offset = 0; + } + break; + } - /* lea disp(%rbp), %rsp */ - cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset); + if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) { + /* addi.d sp,fp,imm on LoongArch */ + if (cfa->base == CFI_BP && cfa->offset == 0) { + if (insn->sym->frame_pointer) { + cfa->base = CFI_SP; + cfa->offset = -op->src.offset; + } + } else { + /* lea disp(%rbp), %rsp */ + cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset); + } break; } diff --git a/tools/objtool/include/objtool/elf.h b/tools/objtool/include/objtool/elf.h index 2b8a69de4db8..d7e815c2fd15 100644 --- a/tools/objtool/include/objtool/elf.h +++ b/tools/objtool/include/objtool/elf.h @@ -68,6 +68,7 @@ struct symbol { u8 warned : 1; u8 embedded_insn : 1; u8 local_label : 1; + u8 frame_pointer : 1; struct list_head pv_target; struct reloc *relocs; }; -- Gitee From 99c17c17d633e8a72d34a636bfd10d3624ae3d22 Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Tue, 17 Sep 2024 22:23:09 +0800 Subject: [PATCH 1730/2138] LoongArch: Enable objtool for Clang ANBZ: #11459 commit b8468bd92ae19939d4844899fa05147888732519 upstream. For now, it can enable objtool for Clang, just remove !CC_IS_CLANG for HAVE_OBJTOOL in arch/loongarch/Kconfig. Signed-off-by: Tiezhu Yang Signed-off-by: Huacai Chen Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4150 --- arch/loongarch/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index cf36ce211d2b..7fd6b9168f81 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -136,7 +136,7 @@ config LOONGARCH select HAVE_LIVEPATCH select HAVE_MOD_ARCH_SPECIFIC select HAVE_NMI - select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS && AS_HAS_THIN_ADD_SUB && !CC_IS_CLANG + select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS && AS_HAS_THIN_ADD_SUB select HAVE_PCI select HAVE_PERF_EVENTS select HAVE_PERF_REGS -- Gitee From dee787a19bdc86a1d90513e8fe80adbeb4f1c609 Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Tue, 17 Sep 2024 22:23:09 +0800 Subject: [PATCH 1731/2138] LoongArch: Set AS_HAS_THIN_ADD_SUB as y if AS_IS_LLVM ANBZ: #11459 commit a7e0837724562ea8c1d869dd1a5cb1119ef651c3 upstream. When building kernel with "make CC=clang defconfig", LLVM Assembler is used due to LLVM_IAS=0 is not specified, then AS_HAS_THIN_ADD_SUB is not set, thus objtool can not be built after enable it for Clang. config AS_HAS_THIN_ADD_SUB is to check whether -mthin-add-sub option is available to know R_LARCH_{32,64}_PCREL are supported for GNU Assembler, there is no such an option for LLVM Assembler. The minimal version of Clang is 18 for building LoongArch kernel, and Clang >= 17 has already supported R_LARCH_{32,64}_PCREL, that is to say, there is no need to depend on AS_HAS_THIN_ADD_SUB for Clang, so just set AS_HAS_THIN_ADD_SUB as y if AS_IS_LLVM. Fixes: 120dd4118e58 ("LoongArch: Only allow OBJTOOL & ORC unwinder if toolchain supports -mthin-add-sub") Signed-off-by: Tiezhu Yang Signed-off-by: Huacai Chen Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4150 --- arch/loongarch/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 7fd6b9168f81..d31c89c87210 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -264,7 +264,7 @@ config AS_HAS_FCSR_CLASS def_bool $(as-instr,movfcsr2gr \$t0$(comma)\$fcsr0) config AS_HAS_THIN_ADD_SUB - def_bool $(cc-option,-Wa$(comma)-mthin-add-sub) + def_bool $(cc-option,-Wa$(comma)-mthin-add-sub) || AS_IS_LLVM config AS_HAS_LSX_EXTENSION def_bool $(as-instr,vld \$vr0$(comma)\$a0$(comma)0) -- Gitee From cfb095d0c1d302059f0c4b9b61e2abcb54b97f3d Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Tue, 24 Sep 2024 14:27:10 +0800 Subject: [PATCH 1732/2138] compiler.h: specify correct attribute for .rodata..c_jump_table ANBZ: #11459 commit c5b1184decc819756ae549ba54c63b6790c4ddfd upstream. Currently, there is an assembler message when generating kernel/bpf/core.o under CONFIG_OBJTOOL with LoongArch compiler toolchain: Warning: setting incorrect section attributes for .rodata..c_jump_table This is because the section ".rodata..c_jump_table" should be readonly, but there is a "W" (writable) part of the flags: $ readelf -S kernel/bpf/core.o | grep -A 1 "rodata..c" [34] .rodata..c_j[...] PROGBITS 0000000000000000 0000d2e0 0000000000000800 0000000000000000 WA 0 0 8 There is no above issue on x86 due to the generated section flag is only "A" (allocatable). In order to silence the warning on LoongArch, specify the attribute like ".rodata..c_jump_table,\"a\",@progbits #" explicitly, then the section attribute of ".rodata..c_jump_table" must be readonly in the kernel/bpf/core.o file. Before: $ objdump -h kernel/bpf/core.o | grep -A 1 "rodata..c" 21 .rodata..c_jump_table 00000800 0000000000000000 0000000000000000 0000d2e0 2**3 CONTENTS, ALLOC, LOAD, RELOC, DATA After: $ objdump -h kernel/bpf/core.o | grep -A 1 "rodata..c" 21 .rodata..c_jump_table 00000800 0000000000000000 0000000000000000 0000d2e0 2**3 CONTENTS, ALLOC, LOAD, RELOC, READONLY, DATA By the way, AFAICT, maybe the root cause is related with the different compiler behavior of various archs, so to some extent this change is a workaround for LoongArch, and also there is no effect for x86 which is the only port supported by objtool before LoongArch with this patch. Link: https://lkml.kernel.org/r/20240924062710.1243-1-yangtiezhu@loongson.cn Signed-off-by: Tiezhu Yang Cc: Josh Poimboeuf Cc: Peter Zijlstra Cc: [6.9+] Signed-off-by: Andrew Morton Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4150 --- include/linux/compiler.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 0517d344baa0..db140f106f3d 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -133,7 +133,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, #define annotate_unreachable() __annotate_unreachable(__COUNTER__) /* Annotate a C jump table to allow objtool to follow the code flow */ -#define __annotate_jump_table __section(".rodata..c_jump_table") +#define __annotate_jump_table __section(".rodata..c_jump_table,\"a\",@progbits #") #else /* !CONFIG_OBJTOOL */ #define annotate_reachable() -- Gitee From 4394852a514a9d915e0f72fe7d2b65cef8474a88 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Sun, 19 May 2024 22:18:56 +0800 Subject: [PATCH 1733/2138] LoongArch: Update Loongson-3 default config file ANBZ: #11459 commit 9cc1df421f00453afdcaf78b105d8e7fd03cce78 upstream. Enable ORC stack unwinder. Enable livepatch. While at it, remove CONFIG_LOONGSON3_ACPI_CPUFREQ=y to disable cpufreq driver in anolis_defconfig to make it consistent with loongson3_defconfig. Signed-off-by: Huacai Chen Signed-off-by: Tiezhu Yang Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4150 --- arch/loongarch/configs/anolis_defconfig | 3 ++- arch/loongarch/configs/loongson3_defconfig | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/loongarch/configs/anolis_defconfig b/arch/loongarch/configs/anolis_defconfig index e10a12e5bc56..48ca88722fda 100644 --- a/arch/loongarch/configs/anolis_defconfig +++ b/arch/loongarch/configs/anolis_defconfig @@ -50,10 +50,10 @@ CONFIG_ARCH_IOREMAP=y CONFIG_CPU_HAS_LSX=y CONFIG_CPU_HAS_LASX=y CONFIG_CPU_HAS_LBT=y +CONFIG_LIVEPATCH=y CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_STAT=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y -CONFIG_LOONGSON3_ACPI_CPUFREQ=y CONFIG_HIBERNATION=y CONFIG_ACPI_SPCR_TABLE=y CONFIG_ACPI_TAD=y @@ -2212,6 +2212,7 @@ CONFIG_FUNCTION_TRACER=y CONFIG_FTRACE_SYSCALLS=y CONFIG_BLK_DEV_IO_TRACE=y # CONFIG_STRICT_DEVMEM is not set +CONFIG_UNWINDER_ORC=y # CONFIG_RUNTIME_TESTING_MENU is not set CONFIG_LOONGARCH_IOMMU=m CONFIG_CMDLINE_EXTEND=y diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index abf7c951affc..e33401d67726 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -46,6 +46,7 @@ CONFIG_ARCH_IOREMAP=y CONFIG_CPU_HAS_LSX=y CONFIG_CPU_HAS_LASX=y CONFIG_CPU_HAS_LBT=y +CONFIG_LIVEPATCH=y CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_STAT=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y @@ -2209,6 +2210,7 @@ CONFIG_FUNCTION_TRACER=y CONFIG_FTRACE_SYSCALLS=y CONFIG_BLK_DEV_IO_TRACE=y # CONFIG_STRICT_DEVMEM is not set +CONFIG_UNWINDER_ORC=y # CONFIG_RUNTIME_TESTING_MENU is not set CONFIG_LOONGARCH_IOMMU=m CONFIG_CMDLINE_EXTEND=y -- Gitee From c9f58f4a272a0f47c824daf74c8958badba3087a Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Mon, 6 May 2019 10:34:19 +0800 Subject: [PATCH 1734/2138] anolis: mm, memcg: Provide users the ability to reap zombie memcgs ANBZ: #11915 to #34609742 After memcg was deleted, page caches still reference to this memcg causing large number of dead(zombie) memcgs in the system. Then it slows down access to "/sys/fs/cgroup/cpu/memory.stat", etc due to tons of iterations, further causing various latencies. This patch introduces two ways to reclaim these zombie memcgs. 1) Background kthread reaper Introduce a kernel thread "memcg_zombie_reaper" to reclaim zombie memcgs at background periodically. Several knobs are also added to control the reaper scan frequency: - /sys/kernel/mm/memcg_reaper/scan_interval The scan period in second. Default 5s. - /sys/kernel/mm/memcg_reaper/pages_scan The scan rate of pages per scan. Default 1310720(5GiB for 4KiB page). - /sys/kernel/mm/memcg_reaper/verbose Output some zombie memcg information for debug purpose. Default off. - /sys/kernel/mm/memcg_reaper/reap_background "on/off" switch. Default "0" means off. Write "1" to switch it on. 2) One-shot trigger by users - /sys/kernel/mm/memcg_reaper/reap Write "1" to trigger one round of zombie memcg reaping, but without any guarantee, you may need to launch multiple rounds as needed. Reviewed-by: Gavin Shan Signed-off-by: Xunlei Pang Signed-off-by: Xu Yu Reviewed-by: Xunlei Pang Signed-off-by: Weilin Tong Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4133 --- Documentation/vm/memcg_zombie_reaper.rst | 41 ++++ include/linux/memcontrol.h | 4 + mm/Makefile | 2 +- mm/memcg_zombie_reaper.c | 300 +++++++++++++++++++++++ mm/memcontrol.c | 4 +- 5 files changed, 349 insertions(+), 2 deletions(-) create mode 100644 Documentation/vm/memcg_zombie_reaper.rst create mode 100644 mm/memcg_zombie_reaper.c diff --git a/Documentation/vm/memcg_zombie_reaper.rst b/Documentation/vm/memcg_zombie_reaper.rst new file mode 100644 index 000000000000..9542ec11d9d3 --- /dev/null +++ b/Documentation/vm/memcg_zombie_reaper.rst @@ -0,0 +1,41 @@ +.. _memcg_zombie_reaper: + +=================== +Memcg Zombie Reaper +=================== + +After memcg was deleted, page caches still reference to this memcg +causing large number of dead (zombie) memcgs in the system. Then it +slows down access to "/sys/fs/cgroup/cpu/memory.stat", etc due to +tons of iterations, further causing various latencies. "zombie memcgs +reaper" is a tool to reclaim these dead memcgs. It has two modes: + +"Background kthread reaper" mode +-------------------------------- +In this mode, a kthread reaper keeps reclaiming at background, +some knobs are provided to control the reaper scan behaviour: + +- /sys/kernel/mm/memcg_reaper/scan_interval + +the scan period in second. Default is 5s. + +- /sys/kernel/mm/memcg_reaper/pages_scan + +the scan rate of pages per scan. Default 1310720(5GiB for 4KiB page). + +- /sys/kernel/mm/memcg_reaper/verbose + +output some zombie memcg information for debug purpose. Default off. + +- /sys/kernel/mm/memcg_reaper/reap_background + +on/off switch. Default "0" means off. Write "1" to switch it on. + +"One-shot trigger" mode +----------------------- +In this mode, there is no guarantee to finish the reclaim, you may need +to check and launch multiple rounds as needed. + +- /sys/kernel/mm/memcg_reaper/reap + +users write "1" to trigger one round of zombie memcg reaping. diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 5bfa630a7635..c833e2864594 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -344,6 +344,8 @@ struct mem_cgroup { unsigned long async_fork; #endif + unsigned long offline_jiffies; + CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) CK_KABI_RESERVE(3) @@ -1174,6 +1176,8 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, void memcg_meminfo(struct mem_cgroup *memcg, struct sysinfo *info, struct sysinfo_ext *ext); +void drain_all_stock(struct mem_cgroup *root_memcg); + #ifdef CONFIG_RICH_CONTAINER struct mem_cgroup *rich_container_get_memcg(void); #else diff --git a/mm/Makefile b/mm/Makefile index 08b22aa58959..555e4d70bfc9 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -95,7 +95,7 @@ obj-$(CONFIG_NUMA) += memory-tiers.o obj-$(CONFIG_DEVICE_MIGRATION) += migrate_device.o obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o khugepaged.o obj-$(CONFIG_PAGE_COUNTER) += page_counter.o -obj-$(CONFIG_MEMCG) += memcontrol.o vmpressure.o +obj-$(CONFIG_MEMCG) += memcontrol.o vmpressure.o memcg_zombie_reaper.o ifdef CONFIG_SWAP obj-$(CONFIG_MEMCG) += swap_cgroup.o endif diff --git a/mm/memcg_zombie_reaper.c b/mm/memcg_zombie_reaper.c new file mode 100644 index 000000000000..818167f93aa3 --- /dev/null +++ b/mm/memcg_zombie_reaper.c @@ -0,0 +1,300 @@ +/* + * Reap zombie memcgs: + * - reap at background periodically + * echo 1 > /sys/kernel/mm/memcg_reaper/reap_background + * - one-shot reap triggerred by users + * echo 1 > /sys/kernel/mm/memcg_reaper/reap + * + * Copyright (C) 2019 Alibaba + * Author: Xunlei Pang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include /* try_to_free_mem_cgroup_pages */ + +#define for_each_mem_cgroup_tree(iter, root) \ + for (iter = mem_cgroup_iter(root, NULL, NULL); \ + iter != NULL; \ + iter = mem_cgroup_iter(root, iter, NULL)) + +/* Reap by kthread at background, off by default */ +static unsigned int reaper_kthread_on; +static unsigned int reaper_verbose; +static unsigned int reaper_scan_interval = 5; /* in seconds */ +/* pages one scan, 5GiB for 4KiB page size */ +static unsigned int reaper_pages_scan = 1310720; + +static DECLARE_WAIT_QUEUE_HEAD(reaper_waitq); + +#ifdef CONFIG_SYSFS +static void reap_zombie_memcgs(bool background); + +#define REAPER_ATTR(_name) \ + static struct kobj_attribute _name##_attr = \ + __ATTR(_name, 0644, _name##_show, _name##_store) + +static ssize_t pages_scan_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", reaper_pages_scan); +} + +static ssize_t pages_scan_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + unsigned long pages; + int err; + + err = kstrtoul(buf, 10, &pages); + if (err || pages > UINT_MAX) + return -EINVAL; + + reaper_pages_scan = pages; + + return count; +} +REAPER_ATTR(pages_scan); + +static ssize_t scan_interval_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", reaper_scan_interval); +} + +static ssize_t scan_interval_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int err; + unsigned long interval; + + err = kstrtoul(buf, 10, &interval); + if (err || interval > UINT_MAX) + return -EINVAL; + + reaper_scan_interval = interval; + + return count; +} +REAPER_ATTR(scan_interval); + +static ssize_t verbose_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", reaper_verbose); +} + +static ssize_t verbose_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int err; + unsigned long verbose; + + err = kstrtoul(buf, 10, &verbose); + if (err || (verbose != 0 && verbose != 1)) + return -EINVAL; + + reaper_verbose = verbose; + + return count; +} +REAPER_ATTR(verbose); + +static ssize_t reap_background_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", reaper_kthread_on); +} + +static ssize_t reap_background_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int err; + unsigned long enable; + + err = kstrtoul(buf, 10, &enable); + if (err || (enable != 0 && enable != 1)) + return -EINVAL; + + reaper_kthread_on = enable; + if (reaper_kthread_on) + wake_up_interruptible(&reaper_waitq); + + return count; +} +REAPER_ATTR(reap_background); + +static ssize_t reap_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", 0); +} + +static ssize_t reap_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int err; + unsigned long enable; + + err = kstrtoul(buf, 10, &enable); + if (err || enable != 1) + return -EINVAL; + + reap_zombie_memcgs(false); + + return count; +} +REAPER_ATTR(reap); + +static struct attribute *reaper_attrs[] = { + &pages_scan_attr.attr, + &scan_interval_attr.attr, + &verbose_attr.attr, + &reap_background_attr.attr, + &reap_attr.attr, + NULL, +}; + +static struct attribute_group reaper_attr_group = { + .attrs = reaper_attrs, + .name = "memcg_reaper", +}; +#endif + +static char name_buf[1024]; +static unsigned long +do_reap_zombie_memcg(struct mem_cgroup *memcg, bool background) +{ + unsigned long did_some = 0; + bool drained = false; + unsigned int jiffies_thresh = dirty_expire_interval * HZ / 100; + + /* Let dirty dying memcgs be controlled a while by writeback */ + if (background && + time_before(jiffies, memcg->offline_jiffies + jiffies_thresh) && + (memcg_page_state(memcg, NR_FILE_DIRTY) + + memcg_page_state(memcg, NR_WRITEBACK))) + return 0; + + /* try to free all pages in this cgroup */ + while (page_counter_read(&memcg->memory)) { + unsigned int ret; + + ret = try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true); + did_some += ret; + if (ret) + continue; + + if (drained == false) { + drain_all_stock(memcg); + drained = true; + } else { + break; + } + } + + if (reaper_verbose) { + cgroup_name(memcg->css.cgroup, name_buf, sizeof(name_buf)); + if (page_counter_read(&memcg->memory) == 0) { + printk_ratelimited("empty zombie memcg: 0x%lx: %s\n", + (unsigned long)memcg, name_buf); + } else { + printk_ratelimited("non-empty zombie memcg: 0x%lx, counter %ld, %s\n", + (unsigned long)memcg, + page_counter_read(&memcg->memory), + name_buf); + } + } + + return did_some; +} + +static void reap_zombie_memcgs(bool background) +{ + unsigned long reclaimed; + unsigned long reclaimed_threshold; + struct mem_cgroup *iter; + + reclaimed = 0; + reclaimed_threshold = reaper_pages_scan; + for_each_mem_cgroup_tree(iter, NULL) { + if (background && reclaimed >= reclaimed_threshold) { + mem_cgroup_iter_break(NULL, iter); + break; + } + if (mem_cgroup_online(iter)) + continue; + reclaimed += do_reap_zombie_memcg(iter, background); + cond_resched(); + } + + if (background && reaper_scan_interval) + msleep_interruptible(reaper_scan_interval*1000); +} + +static int zombie_reaper_thread(void *unused) +{ + set_freezable(); + + /* Lower its priority to avoid hogging too much cpu */ + set_user_nice(current, 19); + + while (!kthread_should_stop()) { + if (reaper_kthread_on) { + reap_zombie_memcgs(true); + } else { + wait_event_freezable(reaper_waitq, + kthread_should_stop() || reaper_kthread_on); + } + + try_to_freeze(); + } + + return 0; +} + +static int __init memcg_zombie_reaper_init(void) +{ + static struct task_struct *zombie_reaper; + int err; + + zombie_reaper = kthread_run(zombie_reaper_thread, + NULL, "zombie_memcg_reaper"); + if (IS_ERR(zombie_reaper)) { + pr_err("%s: Unable to start reaper kthread\n", __func__); + return PTR_ERR(zombie_reaper); + } + +#ifdef CONFIG_SYSFS + err = sysfs_create_group(mm_kobj, &reaper_attr_group); + if (err) { + kthread_stop(zombie_reaper); + pr_err("%s: Unable to populate sysfs files\n", __func__); + return err; + } +#endif + + return 0; +} + +module_init(memcg_zombie_reaper_init); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index cfc65fabc6f8..29356e947b27 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2340,7 +2340,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) * Drains all per-CPU charge caches for given root_memcg resp. subtree * of the hierarchy under it. */ -static void drain_all_stock(struct mem_cgroup *root_memcg) +void drain_all_stock(struct mem_cgroup *root_memcg) { int cpu, curcpu; @@ -5506,6 +5506,8 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) struct mem_cgroup *memcg = mem_cgroup_from_css(css); struct mem_cgroup_event *event, *tmp; + memcg->offline_jiffies = jiffies; + /* * Unregister events and notify userspace. * Notify userspace about cgroup removing only after rmdir of cgroup -- Gitee From 55fb2c3ad5886c828fdcc636c8239d8371f5bfb9 Mon Sep 17 00:00:00 2001 From: Xu Yu Date: Wed, 2 Jun 2021 19:46:31 +0800 Subject: [PATCH 1735/2138] anolis: mm, memcg: introduce per memcg switch to reap zombie background ANBZ: #11915 to #34609742 This introduces a new memcg cftype file named reap_background, with default value as 0, to support local zombie memcg reaper. Only memcgs with this value set to 1 are participated when reaping local zombie memcgs in the background. This value is ignored when reaping global zombie memcgs through the sysfs reap_background interface. The local zombie memcg reaper is stopped when the reap_background of root memcg is written to 0. Note that when the reap_background of one memcg is written, all of the child memcgs are also updated with the new value. Signed-off-by: Xu Yu Reviewed-by: Xunlei Pang Signed-off-by: Weilin Tong Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4133 --- include/linux/memcontrol.h | 1 + mm/memcg_zombie_reaper.c | 23 ++++++++++++++++++++-- mm/memcontrol.c | 39 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 61 insertions(+), 2 deletions(-) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index c833e2864594..ed764df798b2 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -345,6 +345,7 @@ struct mem_cgroup { #endif unsigned long offline_jiffies; + unsigned long reap_background; CK_KABI_RESERVE(1) CK_KABI_RESERVE(2) diff --git a/mm/memcg_zombie_reaper.c b/mm/memcg_zombie_reaper.c index 818167f93aa3..28e3ffe52887 100644 --- a/mm/memcg_zombie_reaper.c +++ b/mm/memcg_zombie_reaper.c @@ -34,6 +34,8 @@ iter = mem_cgroup_iter(root, iter, NULL)) /* Reap by kthread at background, off by default */ +#define REAP_BACKGROUND_GLOBAL (1 << 0) +#define REAP_BACKGROUND_MEMCG (1 << 1) static unsigned int reaper_kthread_on; static unsigned int reaper_verbose; static unsigned int reaper_scan_interval = 5; /* in seconds */ @@ -42,6 +44,17 @@ static unsigned int reaper_pages_scan = 1310720; static DECLARE_WAIT_QUEUE_HEAD(reaper_waitq); +void memcg_reap_background_set(void) +{ + reaper_kthread_on |= REAP_BACKGROUND_MEMCG; + wake_up_interruptible(&reaper_waitq); +} + +void memcg_reap_background_clear(void) +{ + reaper_kthread_on &= ~REAP_BACKGROUND_MEMCG; +} + #ifdef CONFIG_SYSFS static void reap_zombie_memcgs(bool background); @@ -135,9 +148,12 @@ static ssize_t reap_background_store(struct kobject *kobj, if (err || (enable != 0 && enable != 1)) return -EINVAL; - reaper_kthread_on = enable; - if (reaper_kthread_on) + reaper_kthread_on &= ~REAP_BACKGROUND_GLOBAL; + + if (enable) { + reaper_kthread_on |= REAP_BACKGROUND_GLOBAL; wake_up_interruptible(&reaper_waitq); + } return count; } @@ -244,6 +260,9 @@ static void reap_zombie_memcgs(bool background) } if (mem_cgroup_online(iter)) continue; + if (!(reaper_kthread_on & REAP_BACKGROUND_GLOBAL) && + !iter->reap_background) + continue; reclaimed += do_reap_zombie_memcg(iter, background); cond_resched(); } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 29356e947b27..95d35d948745 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5032,6 +5032,34 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of, return ret; } +static u64 memcg_reap_background_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + return mem_cgroup_from_css(css)->reap_background; +} + +extern void memcg_reap_background_set(void); +extern void memcg_reap_background_clear(void); +static int memcg_reap_background_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val) +{ + struct mem_cgroup *iter, *memcg = mem_cgroup_from_css(css); + + /* Only 0 and 1 are allowed */ + if (val > 1) + return -EINVAL; + + for_each_mem_cgroup_tree(iter, memcg) + iter->reap_background = val; + + if (val) + memcg_reap_background_set(); + else if (mem_cgroup_is_root(memcg)) + memcg_reap_background_clear(); + + return 0; +} + #if defined(CONFIG_MEMCG_KMEM) && (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)) static int mem_cgroup_slab_show(struct seq_file *m, void *p) { @@ -5178,6 +5206,11 @@ static struct cftype mem_cgroup_legacy_files[] = { .write_u64 = mem_cgroup_async_fork_write, }, #endif + { + .name = "reap_background", + .read_u64 = memcg_reap_background_read, + .write_u64 = memcg_reap_background_write, + }, { }, /* terminate */ }; @@ -5425,6 +5458,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) if (parent) { WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent)); WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable)); + memcg->reap_background = parent->reap_background; #ifdef CONFIG_ASYNC_FORK memcg->async_fork = parent->async_fork; #endif @@ -6894,6 +6928,11 @@ static struct cftype memory_files[] = { .write_u64 = mem_cgroup_async_fork_write, }, #endif + { + .name = "reap_background", + .read_u64 = memcg_reap_background_read, + .write_u64 = memcg_reap_background_write, + }, { } /* terminate */ }; -- Gitee From 954ec2f49579e8355139dd691d7c43b49df4c9e8 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Thu, 24 Jun 2021 17:32:41 +0800 Subject: [PATCH 1736/2138] anolis: memcg: Restrict memcg zombie scan interval ANBZ: #11915 fix #35026402 Don't allow zero interval which can result in endless loops. Acked-by: Xu Yu Signed-off-by: Xunlei Pang Signed-off-by: Weilin Tong Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4133 --- mm/memcg_zombie_reaper.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/memcg_zombie_reaper.c b/mm/memcg_zombie_reaper.c index 28e3ffe52887..ca4f5762c982 100644 --- a/mm/memcg_zombie_reaper.c +++ b/mm/memcg_zombie_reaper.c @@ -99,7 +99,7 @@ static ssize_t scan_interval_store(struct kobject *kobj, unsigned long interval; err = kstrtoul(buf, 10, &interval); - if (err || interval > UINT_MAX) + if (err || interval > UINT_MAX || interval == 0) return -EINVAL; reaper_scan_interval = interval; -- Gitee From b3c6ab22ef447d44e062fa2e5de2577b9b051ed3 Mon Sep 17 00:00:00 2001 From: Xu Yu Date: Sun, 9 Apr 2023 16:35:03 +0800 Subject: [PATCH 1737/2138] anolis: mm: do not skip zombie memcgs in one-shot mode ANBZ: #11915 ANBZ: #4743 Zombie memcgs are skipped when reaper is in "one-shot trigger" mode, which is not expected. This leads to a result that "one-shot trigger" mode can only take effect when "background kthread reaper" is enabled. This makes zombie memcgs not skipped in one-shot mode, thus makes one-shot mode reaper standalone. Fixes: 33bbe281782b ("ck: mm, memcg: introduce per memcg switch to reap zombie background") Signed-off-by: Xu Yu Reviewed-by: Gang Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/1551 Signed-off-by: Weilin Tong Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4133 --- mm/memcg_zombie_reaper.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/mm/memcg_zombie_reaper.c b/mm/memcg_zombie_reaper.c index ca4f5762c982..995b819d028c 100644 --- a/mm/memcg_zombie_reaper.c +++ b/mm/memcg_zombie_reaper.c @@ -254,14 +254,17 @@ static void reap_zombie_memcgs(bool background) reclaimed = 0; reclaimed_threshold = reaper_pages_scan; for_each_mem_cgroup_tree(iter, NULL) { - if (background && reclaimed >= reclaimed_threshold) { + if (background && + (reclaimed >= reclaimed_threshold)) { mem_cgroup_iter_break(NULL, iter); break; } if (mem_cgroup_online(iter)) continue; - if (!(reaper_kthread_on & REAP_BACKGROUND_GLOBAL) && - !iter->reap_background) + if (background && + !(reaper_kthread_on & REAP_BACKGROUND_GLOBAL) && + !((reaper_kthread_on & REAP_BACKGROUND_MEMCG) && + (iter->reap_background))) continue; reclaimed += do_reap_zombie_memcg(iter, background); cond_resched(); -- Gitee From b87375531a8e538a2ed6d8158eb112bd2cd0030b Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Tue, 19 Nov 2024 16:08:32 +0800 Subject: [PATCH 1738/2138] anolis: configs: refresh kconfigs ANBZ: #11822 No Functional Change. Refresh kconfigs by follow command: > make -C anolis/ dist-configs-update Signed-off-by: Qiao Ma Reviewed-by: Baolin Wang Reviewed-by: Xuan Zhuo Reviewed-by: Tianjia Zhang Acked-by: Joseph Qi Reviewed-by: Zelin Deng Reviewed-by: Guixin Liu Acked-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/4118 --- .../arm64/CONFIG_UNWINDER_FRAME_POINTER | 0 anolis/configs/L0-MANDATORY/default/CONFIG_CPU_MITIGATIONS | 1 + anolis/configs/L0-MANDATORY/{x86 => default}/CONFIG_LIVEPATCH | 0 .../configs/L0-MANDATORY/{arm64 => default}/CONFIG_PCI_PF_STUB | 0 anolis/configs/L0-MANDATORY/{x86 => default}/CONFIG_UNWINDER_ORC | 0 anolis/configs/L0-MANDATORY/x86/CONFIG_PCI_PF_STUB | 1 - anolis/configs/L0-MANDATORY/x86/CONFIG_SPECULATION_MITIGATIONS | 1 - anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_CONTPTE | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_3194386 | 1 + .../L1-RECOMMEND/arm64/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET | 1 - anolis/configs/L1-RECOMMEND/arm64/CONFIG_LIVEPATCH | 1 - anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET | 1 - anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_ORC | 1 - anolis/configs/L1-RECOMMEND/default/CONFIG_AHCI_ZHAOXIN_SGPIO | 1 + .../configs/L1-RECOMMEND/{arm64 => default}/CONFIG_GENERIC_PHY | 0 .../L1-RECOMMEND/{arm64 => default}/CONFIG_HAVE_LIVEPATCH | 0 .../{arm64 => default}/CONFIG_HAVE_RELIABLE_STACKTRACE | 0 .../L1-RECOMMEND/{arm64 => default}/CONFIG_HAVE_STACK_VALIDATION | 0 anolis/configs/L1-RECOMMEND/{arm64 => default}/CONFIG_OBJTOOL | 0 anolis/configs/L1-RECOMMEND/default/CONFIG_PCP_BATCH_SCALE_MAX | 1 + .../L1-RECOMMEND/default/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT | 1 - .../configs/L1-RECOMMEND/{x86 => default}/CONFIG_TEST_LIVEPATCH | 0 anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_PHY | 1 - .../x86}/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET | 0 anolis/configs/L1-RECOMMEND/x86/CONFIG_MITIGATION_SPECTRE_BHI | 1 + .../L1-RECOMMEND/{default => x86}/CONFIG_RANDOMIZE_KSTACK_OFFSET | 0 .../{arm64 => x86}/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT | 0 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEST_LIVEPATCH | 1 - .../configs/L2-OPTIONAL/default/CONFIG_BCACHE_ASYNC_REGISTRATION | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_CLOSURES_DEBUG | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_DEBUG | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_SIG | 1 + .../{arm64 => default}/CONFIG_DRM_PANEL_ILITEK_ILI9341 | 0 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IOMEM_FOPS | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X3_DMA | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_SCREEN_INFO | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_DEBUG | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_TASKLET | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_AD9467 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_ADI_AXI_ADC | 1 + .../L2-OPTIONAL/x86/CONFIG_ARCH_CONFIGURES_CPU_MITIGATIONS | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_LIVEPATCH | 1 - anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RELIABLE_STACKTRACE | 1 - anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STACK_VALIDATION | 1 - anolis/configs/L2-OPTIONAL/x86/CONFIG_OBJTOOL | 1 - 45 files changed, 18 insertions(+), 13 deletions(-) rename anolis/configs/{L1-RECOMMEND => L0-MANDATORY}/arm64/CONFIG_UNWINDER_FRAME_POINTER (100%) create mode 100644 anolis/configs/L0-MANDATORY/default/CONFIG_CPU_MITIGATIONS rename anolis/configs/L0-MANDATORY/{x86 => default}/CONFIG_LIVEPATCH (100%) rename anolis/configs/L0-MANDATORY/{arm64 => default}/CONFIG_PCI_PF_STUB (100%) rename anolis/configs/L0-MANDATORY/{x86 => default}/CONFIG_UNWINDER_ORC (100%) delete mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_PCI_PF_STUB delete mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_SPECULATION_MITIGATIONS create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_CONTPTE create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_3194386 delete mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET delete mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_LIVEPATCH delete mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET delete mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_ORC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_AHCI_ZHAOXIN_SGPIO rename anolis/configs/L1-RECOMMEND/{arm64 => default}/CONFIG_GENERIC_PHY (100%) rename anolis/configs/L1-RECOMMEND/{arm64 => default}/CONFIG_HAVE_LIVEPATCH (100%) rename anolis/configs/L1-RECOMMEND/{arm64 => default}/CONFIG_HAVE_RELIABLE_STACKTRACE (100%) rename anolis/configs/L1-RECOMMEND/{arm64 => default}/CONFIG_HAVE_STACK_VALIDATION (100%) rename anolis/configs/L1-RECOMMEND/{arm64 => default}/CONFIG_OBJTOOL (100%) create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PCP_BATCH_SCALE_MAX delete mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT rename anolis/configs/L1-RECOMMEND/{x86 => default}/CONFIG_TEST_LIVEPATCH (100%) delete mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_PHY rename anolis/configs/{L2-OPTIONAL/default => L1-RECOMMEND/x86}/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET (100%) create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_MITIGATION_SPECTRE_BHI rename anolis/configs/L1-RECOMMEND/{default => x86}/CONFIG_RANDOMIZE_KSTACK_OFFSET (100%) rename anolis/configs/L1-RECOMMEND/{arm64 => x86}/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT (100%) delete mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEST_LIVEPATCH create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_ASYNC_REGISTRATION create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_CLOSURES_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_SIG rename anolis/configs/L2-OPTIONAL/{arm64 => default}/CONFIG_DRM_PANEL_ILITEK_ILI9341 (100%) create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IOMEM_FOPS create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X3_DMA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCREEN_INFO create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_TASKLET create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_AD9467 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADI_AXI_ADC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_CONFIGURES_CPU_MITIGATIONS delete mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_LIVEPATCH delete mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RELIABLE_STACKTRACE delete mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STACK_VALIDATION delete mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_OBJTOOL diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_FRAME_POINTER b/anolis/configs/L0-MANDATORY/arm64/CONFIG_UNWINDER_FRAME_POINTER similarity index 100% rename from anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_FRAME_POINTER rename to anolis/configs/L0-MANDATORY/arm64/CONFIG_UNWINDER_FRAME_POINTER diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_CPU_MITIGATIONS b/anolis/configs/L0-MANDATORY/default/CONFIG_CPU_MITIGATIONS new file mode 100644 index 000000000000..3d6f96778a81 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/default/CONFIG_CPU_MITIGATIONS @@ -0,0 +1 @@ +CONFIG_CPU_MITIGATIONS=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_LIVEPATCH b/anolis/configs/L0-MANDATORY/default/CONFIG_LIVEPATCH similarity index 100% rename from anolis/configs/L0-MANDATORY/x86/CONFIG_LIVEPATCH rename to anolis/configs/L0-MANDATORY/default/CONFIG_LIVEPATCH diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_PCI_PF_STUB b/anolis/configs/L0-MANDATORY/default/CONFIG_PCI_PF_STUB similarity index 100% rename from anolis/configs/L0-MANDATORY/arm64/CONFIG_PCI_PF_STUB rename to anolis/configs/L0-MANDATORY/default/CONFIG_PCI_PF_STUB diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_UNWINDER_ORC b/anolis/configs/L0-MANDATORY/default/CONFIG_UNWINDER_ORC similarity index 100% rename from anolis/configs/L0-MANDATORY/x86/CONFIG_UNWINDER_ORC rename to anolis/configs/L0-MANDATORY/default/CONFIG_UNWINDER_ORC diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_PCI_PF_STUB b/anolis/configs/L0-MANDATORY/x86/CONFIG_PCI_PF_STUB deleted file mode 100644 index 46eee76194b0..000000000000 --- a/anolis/configs/L0-MANDATORY/x86/CONFIG_PCI_PF_STUB +++ /dev/null @@ -1 +0,0 @@ -CONFIG_PCI_PF_STUB=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_SPECULATION_MITIGATIONS b/anolis/configs/L0-MANDATORY/x86/CONFIG_SPECULATION_MITIGATIONS deleted file mode 100644 index 37f78a6f2368..000000000000 --- a/anolis/configs/L0-MANDATORY/x86/CONFIG_SPECULATION_MITIGATIONS +++ /dev/null @@ -1 +0,0 @@ -CONFIG_SPECULATION_MITIGATIONS=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_CONTPTE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_CONTPTE new file mode 100644 index 000000000000..23a09e20f027 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_CONTPTE @@ -0,0 +1 @@ +CONFIG_ARM64_CONTPTE=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_3194386 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_3194386 new file mode 100644 index 000000000000..f6f6f286638d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_ERRATUM_3194386 @@ -0,0 +1 @@ +CONFIG_ARM64_ERRATUM_3194386=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET deleted file mode 100644 index c7daa4f60d5d..000000000000 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_LIVEPATCH b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_LIVEPATCH deleted file mode 100644 index 1b05d0d1a109..000000000000 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_LIVEPATCH +++ /dev/null @@ -1 +0,0 @@ -CONFIG_LIVEPATCH=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET deleted file mode 100644 index 759cb13e424c..000000000000 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_RANDOMIZE_KSTACK_OFFSET is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_ORC b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_ORC deleted file mode 100644 index 6b6908419acb..000000000000 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UNWINDER_ORC +++ /dev/null @@ -1 +0,0 @@ -CONFIG_UNWINDER_ORC=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_AHCI_ZHAOXIN_SGPIO b/anolis/configs/L1-RECOMMEND/default/CONFIG_AHCI_ZHAOXIN_SGPIO new file mode 100644 index 000000000000..6a7ffe559b94 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_AHCI_ZHAOXIN_SGPIO @@ -0,0 +1 @@ +CONFIG_AHCI_ZHAOXIN_SGPIO=m diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GENERIC_PHY b/anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_PHY similarity index 100% rename from anolis/configs/L1-RECOMMEND/arm64/CONFIG_GENERIC_PHY rename to anolis/configs/L1-RECOMMEND/default/CONFIG_GENERIC_PHY diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_LIVEPATCH b/anolis/configs/L1-RECOMMEND/default/CONFIG_HAVE_LIVEPATCH similarity index 100% rename from anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_LIVEPATCH rename to anolis/configs/L1-RECOMMEND/default/CONFIG_HAVE_LIVEPATCH diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_RELIABLE_STACKTRACE b/anolis/configs/L1-RECOMMEND/default/CONFIG_HAVE_RELIABLE_STACKTRACE similarity index 100% rename from anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_RELIABLE_STACKTRACE rename to anolis/configs/L1-RECOMMEND/default/CONFIG_HAVE_RELIABLE_STACKTRACE diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_STACK_VALIDATION b/anolis/configs/L1-RECOMMEND/default/CONFIG_HAVE_STACK_VALIDATION similarity index 100% rename from anolis/configs/L1-RECOMMEND/arm64/CONFIG_HAVE_STACK_VALIDATION rename to anolis/configs/L1-RECOMMEND/default/CONFIG_HAVE_STACK_VALIDATION diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_OBJTOOL b/anolis/configs/L1-RECOMMEND/default/CONFIG_OBJTOOL similarity index 100% rename from anolis/configs/L1-RECOMMEND/arm64/CONFIG_OBJTOOL rename to anolis/configs/L1-RECOMMEND/default/CONFIG_OBJTOOL diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PCP_BATCH_SCALE_MAX b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCP_BATCH_SCALE_MAX new file mode 100644 index 000000000000..8c42e3567daa --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PCP_BATCH_SCALE_MAX @@ -0,0 +1 @@ +CONFIG_PCP_BATCH_SCALE_MAX=5 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT b/anolis/configs/L1-RECOMMEND/default/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT deleted file mode 100644 index d680659c1703..000000000000 --- a/anolis/configs/L1-RECOMMEND/default/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT is not set diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_TEST_LIVEPATCH b/anolis/configs/L1-RECOMMEND/default/CONFIG_TEST_LIVEPATCH similarity index 100% rename from anolis/configs/L1-RECOMMEND/x86/CONFIG_TEST_LIVEPATCH rename to anolis/configs/L1-RECOMMEND/default/CONFIG_TEST_LIVEPATCH diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_PHY b/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_PHY deleted file mode 100644 index 582e87c3b9f5..000000000000 --- a/anolis/configs/L1-RECOMMEND/x86/CONFIG_GENERIC_PHY +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_GENERIC_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET b/anolis/configs/L1-RECOMMEND/x86/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET similarity index 100% rename from anolis/configs/L2-OPTIONAL/default/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET rename to anolis/configs/L1-RECOMMEND/x86/CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MITIGATION_SPECTRE_BHI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MITIGATION_SPECTRE_BHI new file mode 100644 index 000000000000..71b428227384 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MITIGATION_SPECTRE_BHI @@ -0,0 +1 @@ +CONFIG_MITIGATION_SPECTRE_BHI=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_RANDOMIZE_KSTACK_OFFSET b/anolis/configs/L1-RECOMMEND/x86/CONFIG_RANDOMIZE_KSTACK_OFFSET similarity index 100% rename from anolis/configs/L1-RECOMMEND/default/CONFIG_RANDOMIZE_KSTACK_OFFSET rename to anolis/configs/L1-RECOMMEND/x86/CONFIG_RANDOMIZE_KSTACK_OFFSET diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT b/anolis/configs/L1-RECOMMEND/x86/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT similarity index 100% rename from anolis/configs/L1-RECOMMEND/arm64/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT rename to anolis/configs/L1-RECOMMEND/x86/CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEST_LIVEPATCH b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEST_LIVEPATCH deleted file mode 100644 index 0dd7700464a8..000000000000 --- a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEST_LIVEPATCH +++ /dev/null @@ -1 +0,0 @@ -CONFIG_TEST_LIVEPATCH=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_ASYNC_REGISTRATION b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_ASYNC_REGISTRATION new file mode 100644 index 000000000000..d966c9744d3d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_ASYNC_REGISTRATION @@ -0,0 +1 @@ +# CONFIG_BCACHE_ASYNC_REGISTRATION is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_CLOSURES_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_CLOSURES_DEBUG new file mode 100644 index 000000000000..eb3f1af90e6c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_CLOSURES_DEBUG @@ -0,0 +1 @@ +# CONFIG_BCACHE_CLOSURES_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_DEBUG new file mode 100644 index 000000000000..36426027b2d1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_DEBUG @@ -0,0 +1 @@ +# CONFIG_BCACHE_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_SIG b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_SIG new file mode 100644 index 000000000000..01ee2034fbdc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_SIG @@ -0,0 +1 @@ +CONFIG_CRYPTO_SIG=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_ILITEK_ILI9341 b/anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_ILITEK_ILI9341 similarity index 100% rename from anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_PANEL_ILITEK_ILI9341 rename to anolis/configs/L2-OPTIONAL/default/CONFIG_DRM_PANEL_ILITEK_ILI9341 diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IOMEM_FOPS b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IOMEM_FOPS new file mode 100644 index 000000000000..485cf9b71de5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_FB_IOMEM_FOPS @@ -0,0 +1 @@ +CONFIG_FB_IOMEM_FOPS=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X3_DMA b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X3_DMA new file mode 100644 index 000000000000..723cb8bb73ac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PATA_HPT3X3_DMA @@ -0,0 +1 @@ +# CONFIG_PATA_HPT3X3_DMA is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCREEN_INFO b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCREEN_INFO new file mode 100644 index 000000000000..e5a12d9c60db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCREEN_INFO @@ -0,0 +1 @@ +CONFIG_SCREEN_INFO=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_DEBUG b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_DEBUG new file mode 100644 index 000000000000..aa295ebbb545 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_DEBUG @@ -0,0 +1 @@ +CONFIG_SCSI_MVSAS_DEBUG=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_TASKLET b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_TASKLET new file mode 100644 index 000000000000..028f7d8e3d25 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SCSI_MVSAS_TASKLET @@ -0,0 +1 @@ +# CONFIG_SCSI_MVSAS_TASKLET is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD9467 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD9467 new file mode 100644 index 000000000000..421ac1f25eec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_AD9467 @@ -0,0 +1 @@ +# CONFIG_AD9467 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADI_AXI_ADC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADI_AXI_ADC new file mode 100644 index 000000000000..e98b407ac85f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADI_AXI_ADC @@ -0,0 +1 @@ +# CONFIG_ADI_AXI_ADC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_CONFIGURES_CPU_MITIGATIONS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_CONFIGURES_CPU_MITIGATIONS new file mode 100644 index 000000000000..a7a95432397c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ARCH_CONFIGURES_CPU_MITIGATIONS @@ -0,0 +1 @@ +CONFIG_ARCH_CONFIGURES_CPU_MITIGATIONS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_LIVEPATCH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_LIVEPATCH deleted file mode 100644 index 7ebdb924703e..000000000000 --- a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_LIVEPATCH +++ /dev/null @@ -1 +0,0 @@ -CONFIG_HAVE_LIVEPATCH=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RELIABLE_STACKTRACE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RELIABLE_STACKTRACE deleted file mode 100644 index 2ce8faabc4cf..000000000000 --- a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_RELIABLE_STACKTRACE +++ /dev/null @@ -1 +0,0 @@ -CONFIG_HAVE_RELIABLE_STACKTRACE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STACK_VALIDATION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STACK_VALIDATION deleted file mode 100644 index 6f36a32d84ae..000000000000 --- a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HAVE_STACK_VALIDATION +++ /dev/null @@ -1 +0,0 @@ -CONFIG_HAVE_STACK_VALIDATION=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_OBJTOOL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_OBJTOOL deleted file mode 100644 index cf3a9f20f93d..000000000000 --- a/anolis/configs/L2-OPTIONAL/x86/CONFIG_OBJTOOL +++ /dev/null @@ -1 +0,0 @@ -CONFIG_OBJTOOL=y -- Gitee From 01826a16a038bf6f4d607f27abb485e56692b721 Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Thu, 14 Nov 2024 16:07:44 +0800 Subject: [PATCH 1739/2138] anolis: configs: align L0 kconfigs to devel-5.10 ANBZ: #11822 x86: CONFIG_VIRTIO_MMIO=y CONFIG_CRYPTO_SM3_AVX_X86_64=y arm64: CONFIG_IRQ_TIME_ACCOUNTING=y CONFIG_EXT4_FS=y CONFIG_JBD2=y Signed-off-by: Qiao Ma Reviewed-by: Baolin Wang Reviewed-by: Xuan Zhuo Reviewed-by: Tianjia Zhang Acked-by: Joseph Qi Reviewed-by: Zelin Deng Reviewed-by: Guixin Liu Acked-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/4118 --- anolis/configs/L0-MANDATORY/arm64/CONFIG_EXT4_FS | 1 - anolis/configs/L0-MANDATORY/arm64/CONFIG_IRQ_TIME_ACCOUNTING | 1 - anolis/configs/L0-MANDATORY/arm64/CONFIG_JBD2 | 1 - .../configs/L0-MANDATORY/{default => arm64}/CONFIG_VIRTIO_MMIO | 0 anolis/configs/L0-MANDATORY/{x86 => default}/CONFIG_EXT4_FS | 0 .../L0-MANDATORY/{x86 => default}/CONFIG_IRQ_TIME_ACCOUNTING | 0 anolis/configs/L0-MANDATORY/{x86 => default}/CONFIG_JBD2 | 0 anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SM3_AVX_X86_64 | 2 +- anolis/configs/L0-MANDATORY/x86/CONFIG_VIRTIO_MMIO | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_FS_MBCACHE | 1 - anolis/configs/L2-OPTIONAL/{x86 => default}/CONFIG_FS_MBCACHE | 0 11 files changed, 2 insertions(+), 5 deletions(-) delete mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_EXT4_FS delete mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_IRQ_TIME_ACCOUNTING delete mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_JBD2 rename anolis/configs/L0-MANDATORY/{default => arm64}/CONFIG_VIRTIO_MMIO (100%) rename anolis/configs/L0-MANDATORY/{x86 => default}/CONFIG_EXT4_FS (100%) rename anolis/configs/L0-MANDATORY/{x86 => default}/CONFIG_IRQ_TIME_ACCOUNTING (100%) rename anolis/configs/L0-MANDATORY/{x86 => default}/CONFIG_JBD2 (100%) create mode 100644 anolis/configs/L0-MANDATORY/x86/CONFIG_VIRTIO_MMIO delete mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_FS_MBCACHE rename anolis/configs/L2-OPTIONAL/{x86 => default}/CONFIG_FS_MBCACHE (100%) diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_EXT4_FS b/anolis/configs/L0-MANDATORY/arm64/CONFIG_EXT4_FS deleted file mode 100644 index 6ead740de57a..000000000000 --- a/anolis/configs/L0-MANDATORY/arm64/CONFIG_EXT4_FS +++ /dev/null @@ -1 +0,0 @@ -CONFIG_EXT4_FS=m diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_IRQ_TIME_ACCOUNTING b/anolis/configs/L0-MANDATORY/arm64/CONFIG_IRQ_TIME_ACCOUNTING deleted file mode 100644 index 50707d66e988..000000000000 --- a/anolis/configs/L0-MANDATORY/arm64/CONFIG_IRQ_TIME_ACCOUNTING +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_IRQ_TIME_ACCOUNTING is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_JBD2 b/anolis/configs/L0-MANDATORY/arm64/CONFIG_JBD2 deleted file mode 100644 index 72298fc42df1..000000000000 --- a/anolis/configs/L0-MANDATORY/arm64/CONFIG_JBD2 +++ /dev/null @@ -1 +0,0 @@ -CONFIG_JBD2=m diff --git a/anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_MMIO b/anolis/configs/L0-MANDATORY/arm64/CONFIG_VIRTIO_MMIO similarity index 100% rename from anolis/configs/L0-MANDATORY/default/CONFIG_VIRTIO_MMIO rename to anolis/configs/L0-MANDATORY/arm64/CONFIG_VIRTIO_MMIO diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_EXT4_FS b/anolis/configs/L0-MANDATORY/default/CONFIG_EXT4_FS similarity index 100% rename from anolis/configs/L0-MANDATORY/x86/CONFIG_EXT4_FS rename to anolis/configs/L0-MANDATORY/default/CONFIG_EXT4_FS diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_IRQ_TIME_ACCOUNTING b/anolis/configs/L0-MANDATORY/default/CONFIG_IRQ_TIME_ACCOUNTING similarity index 100% rename from anolis/configs/L0-MANDATORY/x86/CONFIG_IRQ_TIME_ACCOUNTING rename to anolis/configs/L0-MANDATORY/default/CONFIG_IRQ_TIME_ACCOUNTING diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_JBD2 b/anolis/configs/L0-MANDATORY/default/CONFIG_JBD2 similarity index 100% rename from anolis/configs/L0-MANDATORY/x86/CONFIG_JBD2 rename to anolis/configs/L0-MANDATORY/default/CONFIG_JBD2 diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SM3_AVX_X86_64 b/anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SM3_AVX_X86_64 index 3ff5948232ff..179f5f1fa240 100644 --- a/anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SM3_AVX_X86_64 +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_CRYPTO_SM3_AVX_X86_64 @@ -1 +1 @@ -CONFIG_CRYPTO_SM3_AVX_X86_64=m +CONFIG_CRYPTO_SM3_AVX_X86_64=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_VIRTIO_MMIO b/anolis/configs/L0-MANDATORY/x86/CONFIG_VIRTIO_MMIO new file mode 100644 index 000000000000..8d3f6df40a38 --- /dev/null +++ b/anolis/configs/L0-MANDATORY/x86/CONFIG_VIRTIO_MMIO @@ -0,0 +1 @@ +CONFIG_VIRTIO_MMIO=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FS_MBCACHE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FS_MBCACHE deleted file mode 100644 index daee2e23d02f..000000000000 --- a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_FS_MBCACHE +++ /dev/null @@ -1 +0,0 @@ -CONFIG_FS_MBCACHE=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_FS_MBCACHE b/anolis/configs/L2-OPTIONAL/default/CONFIG_FS_MBCACHE similarity index 100% rename from anolis/configs/L2-OPTIONAL/x86/CONFIG_FS_MBCACHE rename to anolis/configs/L2-OPTIONAL/default/CONFIG_FS_MBCACHE -- Gitee From 159ded0c05bb99afa6f9f6f44557c1d565f0dfbb Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Mon, 11 Nov 2024 14:14:34 +0800 Subject: [PATCH 1740/2138] anolis: configs: align L1 driver related configs to devel-5.10 ANBZ: #11822 x86: CONFIG_ACPI_CUSTOM_METHOD=m CONFIG_UEVENT_HELPER=y CONFIG_BLK_DEV_PCIESSD_MTIP32XX=m CONFIG_ATA_OVER_ETH=m CONFIG_SCSI_UFSHCD=m CONFIG_FUSION_CTL=m CONFIG_NET_TULIP=y CONFIG_INPUT_MOUSEDEV=y CONFIG_MOUSE_PS2=y CONFIG_THERMAL_NETLINK=y CONFIG_IR_SHARP_DECODER=m CONFIG_IR_XMP_DECODER=m CONFIG_SND=m CONFIG_INFINIBAND_MTHCA=m CONFIG_VFIO_PCI_IGD=y arm64: CONFIG_ACPI_CUSTOM_METHOD=m CONFIG_MOUSE_PS2=y CONFIG_AHCI_ZHAOXIN_SGPIO=y CONFIG_ATA_PIIX=y CONFIG_CNIC=m CONFIG_KEYBOARD_ATKBD=y CONFIG_SPI_DESIGNWARE=y CONFIG_HISI_THERMAL=y CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y Signed-off-by: Qiao Ma Reviewed-by: Baolin Wang Reviewed-by: Xuan Zhuo Reviewed-by: Tianjia Zhang Acked-by: Joseph Qi Reviewed-by: Zelin Deng Reviewed-by: Guixin Liu Acked-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/4118 --- .../configs/L1-RECOMMEND/{default => arm64}/CONFIG_ATA_OVER_ETH | 0 .../{default => arm64}/CONFIG_BLK_DEV_PCIESSD_MTIP32XX | 0 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CNIC | 1 - .../configs/L1-RECOMMEND/{default => arm64}/CONFIG_FUSION_CTL | 0 anolis/configs/L1-RECOMMEND/arm64/CONFIG_HISI_THERMAL | 2 +- .../L1-RECOMMEND/{default => arm64}/CONFIG_INFINIBAND_MTHCA | 0 anolis/configs/L1-RECOMMEND/arm64/CONFIG_KEYBOARD_ATKBD | 1 - anolis/configs/L1-RECOMMEND/arm64/CONFIG_MOUSE_PS2 | 1 - .../configs/L1-RECOMMEND/{default => arm64}/CONFIG_SCSI_UFSHCD | 0 anolis/configs/L1-RECOMMEND/{default => arm64}/CONFIG_SND | 0 anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_DESIGNWARE | 2 +- .../L1-RECOMMEND/{default => arm64}/CONFIG_THERMAL_NETLINK | 0 .../L1-RECOMMEND/{default => arm64}/CONFIG_UEVENT_HELPER | 0 .../L1-RECOMMEND/arm64/CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES | 1 - anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_CUSTOM_METHOD | 2 +- anolis/configs/L1-RECOMMEND/{x86 => default}/CONFIG_CNIC | 0 .../L1-RECOMMEND/{arm64 => default}/CONFIG_INPUT_MOUSEDEV | 0 .../configs/L1-RECOMMEND/{x86 => default}/CONFIG_KEYBOARD_ATKBD | 0 anolis/configs/L1-RECOMMEND/default/CONFIG_MOUSE_PS2 | 1 + .../{x86 => default}/CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES | 0 anolis/configs/L1-RECOMMEND/x86/CONFIG_ATA_OVER_ETH | 1 + anolis/configs/L1-RECOMMEND/x86/CONFIG_BLK_DEV_PCIESSD_MTIP32XX | 1 + anolis/configs/L1-RECOMMEND/x86/CONFIG_FUSION_CTL | 1 + anolis/configs/L1-RECOMMEND/x86/CONFIG_INFINIBAND_MTHCA | 1 + anolis/configs/L1-RECOMMEND/x86/CONFIG_INPUT_MOUSEDEV | 1 - anolis/configs/L1-RECOMMEND/x86/CONFIG_IR_SHARP_DECODER | 2 +- anolis/configs/L1-RECOMMEND/x86/CONFIG_IR_XMP_DECODER | 2 +- anolis/configs/L1-RECOMMEND/x86/CONFIG_MOUSE_PS2 | 1 - anolis/configs/L1-RECOMMEND/x86/CONFIG_NET_TULIP | 2 +- anolis/configs/L1-RECOMMEND/x86/CONFIG_SCSI_UFSHCD | 1 + anolis/configs/L1-RECOMMEND/x86/CONFIG_SND | 1 + anolis/configs/L1-RECOMMEND/x86/CONFIG_THERMAL_NETLINK | 1 + anolis/configs/L1-RECOMMEND/x86/CONFIG_UEVENT_HELPER | 1 + anolis/configs/L1-RECOMMEND/x86/CONFIG_VFIO_PCI_IGD | 2 +- anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_PS2_ELANTECH | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_PS2_SENTELIC | 1 + anolis/configs/L2-OPTIONAL/{default => arm64}/CONFIG_PM_DEVFREQ | 0 .../L2-OPTIONAL/{x86 => default}/CONFIG_INPUT_VIVALDIFMAP | 0 .../configs/L2-OPTIONAL/{x86 => default}/CONFIG_MOUSE_PS2_ALPS | 0 .../configs/L2-OPTIONAL/{x86 => default}/CONFIG_MOUSE_PS2_BYD | 0 .../L2-OPTIONAL/{x86 => default}/CONFIG_MOUSE_PS2_CYPRESS | 0 .../L2-OPTIONAL/{x86 => default}/CONFIG_MOUSE_PS2_FOCALTECH | 0 .../L2-OPTIONAL/{x86 => default}/CONFIG_MOUSE_PS2_LOGIPS2PP | 0 .../configs/L2-OPTIONAL/{x86 => default}/CONFIG_MOUSE_PS2_SMBUS | 0 .../L2-OPTIONAL/{x86 => default}/CONFIG_MOUSE_PS2_SYNAPTICS | 0 .../{x86 => default}/CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS | 0 .../L2-OPTIONAL/{x86 => default}/CONFIG_MOUSE_PS2_TOUCHKIT | 0 .../L2-OPTIONAL/{x86 => default}/CONFIG_MOUSE_PS2_TRACKPOINT | 0 anolis/configs/L2-OPTIONAL/x86/CONFIG_DE2104X | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_PASSIVE | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_PERFORMANCE | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_POWERSAVE | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_USERSPACE | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_THERMAL | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_DM9102 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_HDMI_LPE_AUDIO | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_PRODIKEYS | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_MTHCA_DEBUG | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_PCMCIA_XIRCOM | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_PM_DEVFREQ | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_PM_DEVFREQ_EVENT | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_PM_OPP | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_UFSHCD_PCI | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_UFSHCD_PLATFORM | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_UFS_BSG | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_UFS_HWMON | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AD1889 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ALI5451 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ALOOP | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ALS300 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ALS4000 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ASIHPI | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ATIIXP | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ATIIXP_MODEM | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AU8810 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AU8820 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AU8830 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AW2 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AZT3328 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_BCD2000 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_BEBOB | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_BT87X | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CA0106 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CMIPCI | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CS4281 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CS46XX | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CTL_FAST_LOOKUP | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CTL_INPUT_VALIDATION | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CTXFI | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DARLA20 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DARLA24 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DEBUG | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DICE | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DMA_SGBUF | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DRIVERS | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DUMMY | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DYNAMIC_MINORS | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ECHO3G | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_EMU10K1 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_EMU10K1X | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ENS1370 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ENS1371 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ES1938 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ES1968 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREFACE | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWIRE | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWIRE_DIGI00X | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWIRE_MOTU | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWIRE_TASCAM | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWORKS | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FM801 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_GINA20 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_GINA24 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HDA_INTEL | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HDA_PREALLOC_SIZE | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HDSP | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HDSPM | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HRTIMER | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ICE1712 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ICE1724 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGO | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGODJ | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGODJX | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGOIO | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGOIOX | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INTEL8X0 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INTEL8X0M | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ISIGHT | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_KORG1212 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_LAYLA20 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_LAYLA24 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_LOLA | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_LX6464ES | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MAESTRO3 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MIA | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MIXART | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MONA | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MPU401 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MTPAV | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MTS64 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_NM256 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_OSSEMUL | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_OXFW | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_OXYGEN | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCI | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCMTEST | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCM_TIMER | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCSP | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCXHR | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PORTMAN2X4 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PROC_FS | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_RIPTIDE | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_RME32 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_RME96 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_RME9652 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SE6X | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SEQUENCER | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SERIAL_U16550 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SOC | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SONICVIBES | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SPI | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SUPPORT_OLD_API | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_TRIDENT | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_6FIRE | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_AUDIO | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_CAIAQ | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_HIFACE | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_POD | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_PODHD | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_TONEPORT | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_UA101 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_US122L | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_USX2Y | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_VARIAX | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VERBOSE_PRINTK | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VERBOSE_PROCFS | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VIA82XX | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VIA82XX_MODEM | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VIRTIO | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VIRTUOSO | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VX222 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_X86 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_XEN_FRONTEND | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_YMFPCI | 1 + .../configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_ALSA_SUPPORT | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_TULIP | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_UEVENT_HELPER_PATH | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_ULI526X | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_WINBOND_840 | 1 + 191 files changed, 161 insertions(+), 13 deletions(-) rename anolis/configs/L1-RECOMMEND/{default => arm64}/CONFIG_ATA_OVER_ETH (100%) rename anolis/configs/L1-RECOMMEND/{default => arm64}/CONFIG_BLK_DEV_PCIESSD_MTIP32XX (100%) delete mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CNIC rename anolis/configs/L1-RECOMMEND/{default => arm64}/CONFIG_FUSION_CTL (100%) rename anolis/configs/L1-RECOMMEND/{default => arm64}/CONFIG_INFINIBAND_MTHCA (100%) delete mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_KEYBOARD_ATKBD delete mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_MOUSE_PS2 rename anolis/configs/L1-RECOMMEND/{default => arm64}/CONFIG_SCSI_UFSHCD (100%) rename anolis/configs/L1-RECOMMEND/{default => arm64}/CONFIG_SND (100%) rename anolis/configs/L1-RECOMMEND/{default => arm64}/CONFIG_THERMAL_NETLINK (100%) rename anolis/configs/L1-RECOMMEND/{default => arm64}/CONFIG_UEVENT_HELPER (100%) delete mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES rename anolis/configs/L1-RECOMMEND/{x86 => default}/CONFIG_CNIC (100%) rename anolis/configs/L1-RECOMMEND/{arm64 => default}/CONFIG_INPUT_MOUSEDEV (100%) rename anolis/configs/L1-RECOMMEND/{x86 => default}/CONFIG_KEYBOARD_ATKBD (100%) create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MOUSE_PS2 rename anolis/configs/L1-RECOMMEND/{x86 => default}/CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES (100%) create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_ATA_OVER_ETH create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_BLK_DEV_PCIESSD_MTIP32XX create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_FUSION_CTL create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INFINIBAND_MTHCA delete mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INPUT_MOUSEDEV delete mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_MOUSE_PS2 create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_SCSI_UFSHCD create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_SND create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_THERMAL_NETLINK create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_UEVENT_HELPER create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_PS2_ELANTECH create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_PS2_SENTELIC rename anolis/configs/L2-OPTIONAL/{default => arm64}/CONFIG_PM_DEVFREQ (100%) rename anolis/configs/L2-OPTIONAL/{x86 => default}/CONFIG_INPUT_VIVALDIFMAP (100%) rename anolis/configs/L2-OPTIONAL/{x86 => default}/CONFIG_MOUSE_PS2_ALPS (100%) rename anolis/configs/L2-OPTIONAL/{x86 => default}/CONFIG_MOUSE_PS2_BYD (100%) rename anolis/configs/L2-OPTIONAL/{x86 => default}/CONFIG_MOUSE_PS2_CYPRESS (100%) rename anolis/configs/L2-OPTIONAL/{x86 => default}/CONFIG_MOUSE_PS2_FOCALTECH (100%) rename anolis/configs/L2-OPTIONAL/{x86 => default}/CONFIG_MOUSE_PS2_LOGIPS2PP (100%) rename anolis/configs/L2-OPTIONAL/{x86 => default}/CONFIG_MOUSE_PS2_SMBUS (100%) rename anolis/configs/L2-OPTIONAL/{x86 => default}/CONFIG_MOUSE_PS2_SYNAPTICS (100%) rename anolis/configs/L2-OPTIONAL/{x86 => default}/CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS (100%) rename anolis/configs/L2-OPTIONAL/{x86 => default}/CONFIG_MOUSE_PS2_TOUCHKIT (100%) rename anolis/configs/L2-OPTIONAL/{x86 => default}/CONFIG_MOUSE_PS2_TRACKPOINT (100%) create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DE2104X create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_PASSIVE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_PERFORMANCE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_POWERSAVE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_USERSPACE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_THERMAL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_DM9102 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HDMI_LPE_AUDIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_PRODIKEYS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_MTHCA_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PCMCIA_XIRCOM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PM_DEVFREQ create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PM_DEVFREQ_EVENT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_PM_OPP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_UFSHCD_PCI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_UFSHCD_PLATFORM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_UFS_BSG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_UFS_HWMON create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AD1889 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ALI5451 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ALOOP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ALS300 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ALS4000 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ASIHPI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ATIIXP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ATIIXP_MODEM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AU8810 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AU8820 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AU8830 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AW2 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AZT3328 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_BCD2000 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_BEBOB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_BT87X create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CA0106 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CMIPCI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CS4281 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CS46XX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CTL_FAST_LOOKUP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CTL_INPUT_VALIDATION create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CTXFI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DARLA20 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DARLA24 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DICE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DMA_SGBUF create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DRIVERS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DUMMY create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DYNAMIC_MINORS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ECHO3G create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_EMU10K1 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_EMU10K1X create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ENS1370 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ENS1371 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ES1938 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ES1968 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREFACE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWIRE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWIRE_DIGI00X create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWIRE_MOTU create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWIRE_TASCAM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWORKS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FM801 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_GINA20 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_GINA24 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HDA_INTEL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HDA_PREALLOC_SIZE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HDSP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HDSPM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HRTIMER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ICE1712 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ICE1724 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGODJ create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGODJX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGOIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGOIOX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INTEL8X0 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INTEL8X0M create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ISIGHT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_KORG1212 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_LAYLA20 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_LAYLA24 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_LOLA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_LX6464ES create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MAESTRO3 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MIA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MIXART create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MONA create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MPU401 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MTPAV create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MTS64 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_NM256 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_OSSEMUL create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_OXFW create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_OXYGEN create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCMTEST create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCM_TIMER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCSP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCXHR create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PORTMAN2X4 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PROC_FS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_RIPTIDE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_RME32 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_RME96 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_RME9652 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SE6X create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SEQUENCER create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SERIAL_U16550 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SOC create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SONICVIBES create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SPI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SUPPORT_OLD_API create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_TRIDENT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_6FIRE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_AUDIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_CAIAQ create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_HIFACE create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_POD create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_PODHD create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_TONEPORT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_UA101 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_US122L create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_USX2Y create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_VARIAX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VERBOSE_PRINTK create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VERBOSE_PROCFS create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VIA82XX create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VIA82XX_MODEM create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VIRTIO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VIRTUOSO create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VX222 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_X86 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_XEN_FRONTEND create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_YMFPCI create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_ALSA_SUPPORT create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_TULIP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_UEVENT_HELPER_PATH create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ULI526X create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_WINBOND_840 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_OVER_ETH b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ATA_OVER_ETH similarity index 100% rename from anolis/configs/L1-RECOMMEND/default/CONFIG_ATA_OVER_ETH rename to anolis/configs/L1-RECOMMEND/arm64/CONFIG_ATA_OVER_ETH diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_PCIESSD_MTIP32XX b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_BLK_DEV_PCIESSD_MTIP32XX similarity index 100% rename from anolis/configs/L1-RECOMMEND/default/CONFIG_BLK_DEV_PCIESSD_MTIP32XX rename to anolis/configs/L1-RECOMMEND/arm64/CONFIG_BLK_DEV_PCIESSD_MTIP32XX diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CNIC b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CNIC deleted file mode 100644 index 6f1f30e41a52..000000000000 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CNIC +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_CNIC is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_CTL b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_FUSION_CTL similarity index 100% rename from anolis/configs/L1-RECOMMEND/default/CONFIG_FUSION_CTL rename to anolis/configs/L1-RECOMMEND/arm64/CONFIG_FUSION_CTL diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HISI_THERMAL b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HISI_THERMAL index 05fc96b89e1c..fe367d4ff6b5 100644 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HISI_THERMAL +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_HISI_THERMAL @@ -1 +1 @@ -CONFIG_HISI_THERMAL=m +CONFIG_HISI_THERMAL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_MTHCA b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_INFINIBAND_MTHCA similarity index 100% rename from anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_MTHCA rename to anolis/configs/L1-RECOMMEND/arm64/CONFIG_INFINIBAND_MTHCA diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_KEYBOARD_ATKBD b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_KEYBOARD_ATKBD deleted file mode 100644 index 99703e4ab082..000000000000 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_KEYBOARD_ATKBD +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_KEYBOARD_ATKBD is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_MOUSE_PS2 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_MOUSE_PS2 deleted file mode 100644 index bd4390f41ebb..000000000000 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_MOUSE_PS2 +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_MOUSE_PS2 is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_UFSHCD b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SCSI_UFSHCD similarity index 100% rename from anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_UFSHCD rename to anolis/configs/L1-RECOMMEND/arm64/CONFIG_SCSI_UFSHCD diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SND b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SND similarity index 100% rename from anolis/configs/L1-RECOMMEND/default/CONFIG_SND rename to anolis/configs/L1-RECOMMEND/arm64/CONFIG_SND diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_DESIGNWARE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_DESIGNWARE index 9f92cba69b5c..990578fbfa5d 100644 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_DESIGNWARE +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_DESIGNWARE @@ -1 +1 @@ -CONFIG_SPI_DESIGNWARE=m +CONFIG_SPI_DESIGNWARE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_NETLINK b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_NETLINK similarity index 100% rename from anolis/configs/L1-RECOMMEND/default/CONFIG_THERMAL_NETLINK rename to anolis/configs/L1-RECOMMEND/arm64/CONFIG_THERMAL_NETLINK diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_UEVENT_HELPER b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_UEVENT_HELPER similarity index 100% rename from anolis/configs/L1-RECOMMEND/default/CONFIG_UEVENT_HELPER rename to anolis/configs/L1-RECOMMEND/arm64/CONFIG_UEVENT_HELPER diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES deleted file mode 100644 index 1abf97c1a25a..000000000000 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_CUSTOM_METHOD b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_CUSTOM_METHOD index e7b797d0235e..76853b6d8224 100644 --- a/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_CUSTOM_METHOD +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_ACPI_CUSTOM_METHOD @@ -1 +1 @@ -# CONFIG_ACPI_CUSTOM_METHOD is not set +CONFIG_ACPI_CUSTOM_METHOD=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CNIC b/anolis/configs/L1-RECOMMEND/default/CONFIG_CNIC similarity index 100% rename from anolis/configs/L1-RECOMMEND/x86/CONFIG_CNIC rename to anolis/configs/L1-RECOMMEND/default/CONFIG_CNIC diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_INPUT_MOUSEDEV b/anolis/configs/L1-RECOMMEND/default/CONFIG_INPUT_MOUSEDEV similarity index 100% rename from anolis/configs/L1-RECOMMEND/arm64/CONFIG_INPUT_MOUSEDEV rename to anolis/configs/L1-RECOMMEND/default/CONFIG_INPUT_MOUSEDEV diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KEYBOARD_ATKBD b/anolis/configs/L1-RECOMMEND/default/CONFIG_KEYBOARD_ATKBD similarity index 100% rename from anolis/configs/L1-RECOMMEND/x86/CONFIG_KEYBOARD_ATKBD rename to anolis/configs/L1-RECOMMEND/default/CONFIG_KEYBOARD_ATKBD diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MOUSE_PS2 b/anolis/configs/L1-RECOMMEND/default/CONFIG_MOUSE_PS2 new file mode 100644 index 000000000000..ed68613bee8b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MOUSE_PS2 @@ -0,0 +1 @@ +CONFIG_MOUSE_PS2=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES b/anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES similarity index 100% rename from anolis/configs/L1-RECOMMEND/x86/CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES rename to anolis/configs/L1-RECOMMEND/default/CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ATA_OVER_ETH b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ATA_OVER_ETH new file mode 100644 index 000000000000..f9a30b59a373 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ATA_OVER_ETH @@ -0,0 +1 @@ +CONFIG_ATA_OVER_ETH=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_BLK_DEV_PCIESSD_MTIP32XX b/anolis/configs/L1-RECOMMEND/x86/CONFIG_BLK_DEV_PCIESSD_MTIP32XX new file mode 100644 index 000000000000..4efb94f64d5c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_BLK_DEV_PCIESSD_MTIP32XX @@ -0,0 +1 @@ +CONFIG_BLK_DEV_PCIESSD_MTIP32XX=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_FUSION_CTL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FUSION_CTL new file mode 100644 index 000000000000..82bf50128277 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_FUSION_CTL @@ -0,0 +1 @@ +CONFIG_FUSION_CTL=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INFINIBAND_MTHCA b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INFINIBAND_MTHCA new file mode 100644 index 000000000000..b5f09c38c0b7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INFINIBAND_MTHCA @@ -0,0 +1 @@ +CONFIG_INFINIBAND_MTHCA=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INPUT_MOUSEDEV b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INPUT_MOUSEDEV deleted file mode 100644 index cc573920bb78..000000000000 --- a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INPUT_MOUSEDEV +++ /dev/null @@ -1 +0,0 @@ -CONFIG_INPUT_MOUSEDEV=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_IR_SHARP_DECODER b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IR_SHARP_DECODER index 9ea076845465..ecbacc50aee6 100644 --- a/anolis/configs/L1-RECOMMEND/x86/CONFIG_IR_SHARP_DECODER +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IR_SHARP_DECODER @@ -1 +1 @@ -# CONFIG_IR_SHARP_DECODER is not set +CONFIG_IR_SHARP_DECODER=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_IR_XMP_DECODER b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IR_XMP_DECODER index 6c9e03537430..e5368826d93a 100644 --- a/anolis/configs/L1-RECOMMEND/x86/CONFIG_IR_XMP_DECODER +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IR_XMP_DECODER @@ -1 +1 @@ -# CONFIG_IR_XMP_DECODER is not set +CONFIG_IR_XMP_DECODER=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MOUSE_PS2 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MOUSE_PS2 deleted file mode 100644 index 5902f25a65e4..000000000000 --- a/anolis/configs/L1-RECOMMEND/x86/CONFIG_MOUSE_PS2 +++ /dev/null @@ -1 +0,0 @@ -CONFIG_MOUSE_PS2=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_NET_TULIP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NET_TULIP index 3a54ce6fffc6..1191c38531ee 100644 --- a/anolis/configs/L1-RECOMMEND/x86/CONFIG_NET_TULIP +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NET_TULIP @@ -1 +1 @@ -# CONFIG_NET_TULIP is not set +CONFIG_NET_TULIP=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_SCSI_UFSHCD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SCSI_UFSHCD new file mode 100644 index 000000000000..041b8209b69c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SCSI_UFSHCD @@ -0,0 +1 @@ +CONFIG_SCSI_UFSHCD=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_SND b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SND new file mode 100644 index 000000000000..1f2dde914637 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_SND @@ -0,0 +1 @@ +CONFIG_SND=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_THERMAL_NETLINK b/anolis/configs/L1-RECOMMEND/x86/CONFIG_THERMAL_NETLINK new file mode 100644 index 000000000000..a7a857579e1a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_THERMAL_NETLINK @@ -0,0 +1 @@ +CONFIG_THERMAL_NETLINK=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_UEVENT_HELPER b/anolis/configs/L1-RECOMMEND/x86/CONFIG_UEVENT_HELPER new file mode 100644 index 000000000000..6091d5635e43 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_UEVENT_HELPER @@ -0,0 +1 @@ +CONFIG_UEVENT_HELPER=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_VFIO_PCI_IGD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VFIO_PCI_IGD index 566c032ec0f4..88a6f32c19be 100644 --- a/anolis/configs/L1-RECOMMEND/x86/CONFIG_VFIO_PCI_IGD +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_VFIO_PCI_IGD @@ -1 +1 @@ -# CONFIG_VFIO_PCI_IGD is not set +CONFIG_VFIO_PCI_IGD=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_PS2_ELANTECH b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_PS2_ELANTECH new file mode 100644 index 000000000000..b84ff05dc82b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_PS2_ELANTECH @@ -0,0 +1 @@ +# CONFIG_MOUSE_PS2_ELANTECH is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_PS2_SENTELIC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_PS2_SENTELIC new file mode 100644 index 000000000000..1ecdbe98e5ef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MOUSE_PS2_SENTELIC @@ -0,0 +1 @@ +# CONFIG_MOUSE_PS2_SENTELIC is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PM_DEVFREQ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PM_DEVFREQ similarity index 100% rename from anolis/configs/L2-OPTIONAL/default/CONFIG_PM_DEVFREQ rename to anolis/configs/L2-OPTIONAL/arm64/CONFIG_PM_DEVFREQ diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_VIVALDIFMAP b/anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_VIVALDIFMAP similarity index 100% rename from anolis/configs/L2-OPTIONAL/x86/CONFIG_INPUT_VIVALDIFMAP rename to anolis/configs/L2-OPTIONAL/default/CONFIG_INPUT_VIVALDIFMAP diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_ALPS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_ALPS similarity index 100% rename from anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_ALPS rename to anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_ALPS diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_BYD b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_BYD similarity index 100% rename from anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_BYD rename to anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_BYD diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_CYPRESS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_CYPRESS similarity index 100% rename from anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_CYPRESS rename to anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_CYPRESS diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_FOCALTECH b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_FOCALTECH similarity index 100% rename from anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_FOCALTECH rename to anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_FOCALTECH diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_LOGIPS2PP b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_LOGIPS2PP similarity index 100% rename from anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_LOGIPS2PP rename to anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_LOGIPS2PP diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_SMBUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_SMBUS similarity index 100% rename from anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_SMBUS rename to anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_SMBUS diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_SYNAPTICS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_SYNAPTICS similarity index 100% rename from anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_SYNAPTICS rename to anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_SYNAPTICS diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS similarity index 100% rename from anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS rename to anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_TOUCHKIT b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_TOUCHKIT similarity index 100% rename from anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_TOUCHKIT rename to anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_TOUCHKIT diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_TRACKPOINT b/anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_TRACKPOINT similarity index 100% rename from anolis/configs/L2-OPTIONAL/x86/CONFIG_MOUSE_PS2_TRACKPOINT rename to anolis/configs/L2-OPTIONAL/default/CONFIG_MOUSE_PS2_TRACKPOINT diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DE2104X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DE2104X new file mode 100644 index 000000000000..e43c05e6a897 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DE2104X @@ -0,0 +1 @@ +# CONFIG_DE2104X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_PASSIVE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_PASSIVE new file mode 100644 index 000000000000..5f80876fcda4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_PASSIVE @@ -0,0 +1 @@ +# CONFIG_DEVFREQ_GOV_PASSIVE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_PERFORMANCE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_PERFORMANCE new file mode 100644 index 000000000000..b79a05cf1306 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_PERFORMANCE @@ -0,0 +1 @@ +# CONFIG_DEVFREQ_GOV_PERFORMANCE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_POWERSAVE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_POWERSAVE new file mode 100644 index 000000000000..5b1218239c63 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_POWERSAVE @@ -0,0 +1 @@ +# CONFIG_DEVFREQ_GOV_POWERSAVE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND new file mode 100644 index 000000000000..ca7663b8301e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND @@ -0,0 +1 @@ +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_USERSPACE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_USERSPACE new file mode 100644 index 000000000000..001f636a572d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_GOV_USERSPACE @@ -0,0 +1 @@ +# CONFIG_DEVFREQ_GOV_USERSPACE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_THERMAL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_THERMAL new file mode 100644 index 000000000000..5b90f90828f4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DEVFREQ_THERMAL @@ -0,0 +1 @@ +# CONFIG_DEVFREQ_THERMAL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_DM9102 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DM9102 new file mode 100644 index 000000000000..e9c58e76dff0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_DM9102 @@ -0,0 +1 @@ +# CONFIG_DM9102 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HDMI_LPE_AUDIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HDMI_LPE_AUDIO new file mode 100644 index 000000000000..a0da57f4088c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HDMI_LPE_AUDIO @@ -0,0 +1 @@ +# CONFIG_HDMI_LPE_AUDIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_PRODIKEYS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_PRODIKEYS new file mode 100644 index 000000000000..16e17caf6190 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_HID_PRODIKEYS @@ -0,0 +1 @@ +# CONFIG_HID_PRODIKEYS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_MTHCA_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_MTHCA_DEBUG new file mode 100644 index 000000000000..2eb8392f5c84 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INFINIBAND_MTHCA_DEBUG @@ -0,0 +1 @@ +CONFIG_INFINIBAND_MTHCA_DEBUG=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PCMCIA_XIRCOM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PCMCIA_XIRCOM new file mode 100644 index 000000000000..957ce8cf727e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PCMCIA_XIRCOM @@ -0,0 +1 @@ +# CONFIG_PCMCIA_XIRCOM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PM_DEVFREQ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PM_DEVFREQ new file mode 100644 index 000000000000..89637109cad4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PM_DEVFREQ @@ -0,0 +1 @@ +CONFIG_PM_DEVFREQ=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PM_DEVFREQ_EVENT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PM_DEVFREQ_EVENT new file mode 100644 index 000000000000..e8ed02e7e5c7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PM_DEVFREQ_EVENT @@ -0,0 +1 @@ +# CONFIG_PM_DEVFREQ_EVENT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PM_OPP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PM_OPP new file mode 100644 index 000000000000..bbe2b56ba5ff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_PM_OPP @@ -0,0 +1 @@ +CONFIG_PM_OPP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_UFSHCD_PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_UFSHCD_PCI new file mode 100644 index 000000000000..99d4f262cf70 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_UFSHCD_PCI @@ -0,0 +1 @@ +# CONFIG_SCSI_UFSHCD_PCI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_UFSHCD_PLATFORM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_UFSHCD_PLATFORM new file mode 100644 index 000000000000..16e9439f5f2f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_UFSHCD_PLATFORM @@ -0,0 +1 @@ +# CONFIG_SCSI_UFSHCD_PLATFORM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_UFS_BSG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_UFS_BSG new file mode 100644 index 000000000000..2b0c98548c08 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_UFS_BSG @@ -0,0 +1 @@ +# CONFIG_SCSI_UFS_BSG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_UFS_HWMON b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_UFS_HWMON new file mode 100644 index 000000000000..ba9edd4cd6da --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SCSI_UFS_HWMON @@ -0,0 +1 @@ +# CONFIG_SCSI_UFS_HWMON is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AD1889 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AD1889 new file mode 100644 index 000000000000..09102d9cfc64 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AD1889 @@ -0,0 +1 @@ +# CONFIG_SND_AD1889 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ALI5451 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ALI5451 new file mode 100644 index 000000000000..293781cd636a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ALI5451 @@ -0,0 +1 @@ +# CONFIG_SND_ALI5451 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ALOOP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ALOOP new file mode 100644 index 000000000000..a7eccb6f0a12 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ALOOP @@ -0,0 +1 @@ +# CONFIG_SND_ALOOP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ALS300 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ALS300 new file mode 100644 index 000000000000..68cb96e053ac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ALS300 @@ -0,0 +1 @@ +# CONFIG_SND_ALS300 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ALS4000 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ALS4000 new file mode 100644 index 000000000000..0ede559b708c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ALS4000 @@ -0,0 +1 @@ +# CONFIG_SND_ALS4000 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ASIHPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ASIHPI new file mode 100644 index 000000000000..d58e532055fc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ASIHPI @@ -0,0 +1 @@ +# CONFIG_SND_ASIHPI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ATIIXP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ATIIXP new file mode 100644 index 000000000000..22c792ae0bfe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ATIIXP @@ -0,0 +1 @@ +# CONFIG_SND_ATIIXP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ATIIXP_MODEM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ATIIXP_MODEM new file mode 100644 index 000000000000..4833d7971286 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ATIIXP_MODEM @@ -0,0 +1 @@ +# CONFIG_SND_ATIIXP_MODEM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AU8810 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AU8810 new file mode 100644 index 000000000000..8f3dbf45d927 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AU8810 @@ -0,0 +1 @@ +# CONFIG_SND_AU8810 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AU8820 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AU8820 new file mode 100644 index 000000000000..7dbfee28b701 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AU8820 @@ -0,0 +1 @@ +# CONFIG_SND_AU8820 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AU8830 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AU8830 new file mode 100644 index 000000000000..e8a91203c5c9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AU8830 @@ -0,0 +1 @@ +# CONFIG_SND_AU8830 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AW2 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AW2 new file mode 100644 index 000000000000..0065d112f5ba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AW2 @@ -0,0 +1 @@ +# CONFIG_SND_AW2 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AZT3328 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AZT3328 new file mode 100644 index 000000000000..6375bd6ed5a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_AZT3328 @@ -0,0 +1 @@ +# CONFIG_SND_AZT3328 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_BCD2000 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_BCD2000 new file mode 100644 index 000000000000..0a60c490b2bb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_BCD2000 @@ -0,0 +1 @@ +# CONFIG_SND_BCD2000 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_BEBOB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_BEBOB new file mode 100644 index 000000000000..5600866c1208 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_BEBOB @@ -0,0 +1 @@ +# CONFIG_SND_BEBOB is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_BT87X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_BT87X new file mode 100644 index 000000000000..05dbf16d52c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_BT87X @@ -0,0 +1 @@ +# CONFIG_SND_BT87X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CA0106 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CA0106 new file mode 100644 index 000000000000..8ccecad9bd70 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CA0106 @@ -0,0 +1 @@ +# CONFIG_SND_CA0106 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CMIPCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CMIPCI new file mode 100644 index 000000000000..15b6957c0201 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CMIPCI @@ -0,0 +1 @@ +# CONFIG_SND_CMIPCI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CS4281 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CS4281 new file mode 100644 index 000000000000..06b17f0f7999 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CS4281 @@ -0,0 +1 @@ +# CONFIG_SND_CS4281 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CS46XX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CS46XX new file mode 100644 index 000000000000..bdf735f0b183 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CS46XX @@ -0,0 +1 @@ +# CONFIG_SND_CS46XX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CTL_FAST_LOOKUP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CTL_FAST_LOOKUP new file mode 100644 index 000000000000..23547bc33781 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CTL_FAST_LOOKUP @@ -0,0 +1 @@ +CONFIG_SND_CTL_FAST_LOOKUP=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CTL_INPUT_VALIDATION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CTL_INPUT_VALIDATION new file mode 100644 index 000000000000..55768f31aee4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CTL_INPUT_VALIDATION @@ -0,0 +1 @@ +# CONFIG_SND_CTL_INPUT_VALIDATION is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CTXFI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CTXFI new file mode 100644 index 000000000000..4210ca96de18 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_CTXFI @@ -0,0 +1 @@ +# CONFIG_SND_CTXFI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DARLA20 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DARLA20 new file mode 100644 index 000000000000..94cfd295f5a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DARLA20 @@ -0,0 +1 @@ +# CONFIG_SND_DARLA20 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DARLA24 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DARLA24 new file mode 100644 index 000000000000..7dad111a18ba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DARLA24 @@ -0,0 +1 @@ +# CONFIG_SND_DARLA24 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DEBUG new file mode 100644 index 000000000000..21d131de6679 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DEBUG @@ -0,0 +1 @@ +# CONFIG_SND_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DICE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DICE new file mode 100644 index 000000000000..e9bc8c386010 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DICE @@ -0,0 +1 @@ +# CONFIG_SND_DICE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DMA_SGBUF b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DMA_SGBUF new file mode 100644 index 000000000000..d95631971004 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DMA_SGBUF @@ -0,0 +1 @@ +CONFIG_SND_DMA_SGBUF=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DRIVERS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DRIVERS new file mode 100644 index 000000000000..83723e9d48b3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DRIVERS @@ -0,0 +1 @@ +CONFIG_SND_DRIVERS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DUMMY b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DUMMY new file mode 100644 index 000000000000..2e2d9940a64e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DUMMY @@ -0,0 +1 @@ +# CONFIG_SND_DUMMY is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DYNAMIC_MINORS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DYNAMIC_MINORS new file mode 100644 index 000000000000..6c2911b74a73 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_DYNAMIC_MINORS @@ -0,0 +1 @@ +# CONFIG_SND_DYNAMIC_MINORS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ECHO3G b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ECHO3G new file mode 100644 index 000000000000..dbc71572d39b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ECHO3G @@ -0,0 +1 @@ +# CONFIG_SND_ECHO3G is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_EMU10K1 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_EMU10K1 new file mode 100644 index 000000000000..5b3614bfc977 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_EMU10K1 @@ -0,0 +1 @@ +# CONFIG_SND_EMU10K1 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_EMU10K1X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_EMU10K1X new file mode 100644 index 000000000000..b44b14a86ebc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_EMU10K1X @@ -0,0 +1 @@ +# CONFIG_SND_EMU10K1X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ENS1370 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ENS1370 new file mode 100644 index 000000000000..8ed9cccd199d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ENS1370 @@ -0,0 +1 @@ +# CONFIG_SND_ENS1370 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ENS1371 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ENS1371 new file mode 100644 index 000000000000..635ec92367bc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ENS1371 @@ -0,0 +1 @@ +# CONFIG_SND_ENS1371 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ES1938 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ES1938 new file mode 100644 index 000000000000..cd9e4ba6c1d5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ES1938 @@ -0,0 +1 @@ +# CONFIG_SND_ES1938 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ES1968 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ES1968 new file mode 100644 index 000000000000..22231b0583fa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ES1968 @@ -0,0 +1 @@ +# CONFIG_SND_ES1968 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREFACE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREFACE new file mode 100644 index 000000000000..e175acb0a098 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREFACE @@ -0,0 +1 @@ +# CONFIG_SND_FIREFACE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWIRE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWIRE new file mode 100644 index 000000000000..6e0712765bdf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWIRE @@ -0,0 +1 @@ +CONFIG_SND_FIREWIRE=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWIRE_DIGI00X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWIRE_DIGI00X new file mode 100644 index 000000000000..8cd2fa8174ef --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWIRE_DIGI00X @@ -0,0 +1 @@ +# CONFIG_SND_FIREWIRE_DIGI00X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWIRE_MOTU b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWIRE_MOTU new file mode 100644 index 000000000000..a9c67879cbd8 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWIRE_MOTU @@ -0,0 +1 @@ +# CONFIG_SND_FIREWIRE_MOTU is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWIRE_TASCAM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWIRE_TASCAM new file mode 100644 index 000000000000..07d6b9dbd0df --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWIRE_TASCAM @@ -0,0 +1 @@ +# CONFIG_SND_FIREWIRE_TASCAM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWORKS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWORKS new file mode 100644 index 000000000000..b9e704c9ae81 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FIREWORKS @@ -0,0 +1 @@ +# CONFIG_SND_FIREWORKS is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FM801 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FM801 new file mode 100644 index 000000000000..2f820d8f49ea --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_FM801 @@ -0,0 +1 @@ +# CONFIG_SND_FM801 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_GINA20 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_GINA20 new file mode 100644 index 000000000000..237adaa67cc2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_GINA20 @@ -0,0 +1 @@ +# CONFIG_SND_GINA20 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_GINA24 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_GINA24 new file mode 100644 index 000000000000..c248e36cef3f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_GINA24 @@ -0,0 +1 @@ +# CONFIG_SND_GINA24 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HDA_INTEL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HDA_INTEL new file mode 100644 index 000000000000..6f057ecfeaaa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HDA_INTEL @@ -0,0 +1 @@ +# CONFIG_SND_HDA_INTEL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HDA_PREALLOC_SIZE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HDA_PREALLOC_SIZE new file mode 100644 index 000000000000..c7493fadbec2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HDA_PREALLOC_SIZE @@ -0,0 +1 @@ +CONFIG_SND_HDA_PREALLOC_SIZE=0 diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HDSP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HDSP new file mode 100644 index 000000000000..488843333c19 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HDSP @@ -0,0 +1 @@ +# CONFIG_SND_HDSP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HDSPM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HDSPM new file mode 100644 index 000000000000..591d39285e3b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HDSPM @@ -0,0 +1 @@ +# CONFIG_SND_HDSPM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HRTIMER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HRTIMER new file mode 100644 index 000000000000..c05a9e7cd9bc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_HRTIMER @@ -0,0 +1 @@ +# CONFIG_SND_HRTIMER is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ICE1712 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ICE1712 new file mode 100644 index 000000000000..4bc037df56d6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ICE1712 @@ -0,0 +1 @@ +# CONFIG_SND_ICE1712 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ICE1724 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ICE1724 new file mode 100644 index 000000000000..a61f1f14b42c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ICE1724 @@ -0,0 +1 @@ +# CONFIG_SND_ICE1724 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGO new file mode 100644 index 000000000000..018458b13ddd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGO @@ -0,0 +1 @@ +# CONFIG_SND_INDIGO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGODJ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGODJ new file mode 100644 index 000000000000..1b50efe2806e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGODJ @@ -0,0 +1 @@ +# CONFIG_SND_INDIGODJ is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGODJX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGODJX new file mode 100644 index 000000000000..55d1e51b95d2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGODJX @@ -0,0 +1 @@ +# CONFIG_SND_INDIGODJX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGOIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGOIO new file mode 100644 index 000000000000..f08d6c10ec49 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGOIO @@ -0,0 +1 @@ +# CONFIG_SND_INDIGOIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGOIOX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGOIOX new file mode 100644 index 000000000000..5d2cf897abb4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INDIGOIOX @@ -0,0 +1 @@ +# CONFIG_SND_INDIGOIOX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INTEL8X0 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INTEL8X0 new file mode 100644 index 000000000000..d97191a98f68 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INTEL8X0 @@ -0,0 +1 @@ +# CONFIG_SND_INTEL8X0 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INTEL8X0M b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INTEL8X0M new file mode 100644 index 000000000000..4e04bb51cf1e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_INTEL8X0M @@ -0,0 +1 @@ +# CONFIG_SND_INTEL8X0M is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ISIGHT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ISIGHT new file mode 100644 index 000000000000..a2b5bdd76f4a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_ISIGHT @@ -0,0 +1 @@ +# CONFIG_SND_ISIGHT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_KORG1212 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_KORG1212 new file mode 100644 index 000000000000..4b6bb9cfd200 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_KORG1212 @@ -0,0 +1 @@ +# CONFIG_SND_KORG1212 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_LAYLA20 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_LAYLA20 new file mode 100644 index 000000000000..d49f5af1aa1a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_LAYLA20 @@ -0,0 +1 @@ +# CONFIG_SND_LAYLA20 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_LAYLA24 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_LAYLA24 new file mode 100644 index 000000000000..7a1198db1825 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_LAYLA24 @@ -0,0 +1 @@ +# CONFIG_SND_LAYLA24 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_LOLA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_LOLA new file mode 100644 index 000000000000..4687c9628fdc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_LOLA @@ -0,0 +1 @@ +# CONFIG_SND_LOLA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_LX6464ES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_LX6464ES new file mode 100644 index 000000000000..f5ca1aa35389 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_LX6464ES @@ -0,0 +1 @@ +# CONFIG_SND_LX6464ES is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MAESTRO3 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MAESTRO3 new file mode 100644 index 000000000000..d93d1ac5fbfc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MAESTRO3 @@ -0,0 +1 @@ +# CONFIG_SND_MAESTRO3 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MIA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MIA new file mode 100644 index 000000000000..663a8cff6c91 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MIA @@ -0,0 +1 @@ +# CONFIG_SND_MIA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MIXART b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MIXART new file mode 100644 index 000000000000..b1a96442ea0b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MIXART @@ -0,0 +1 @@ +# CONFIG_SND_MIXART is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MONA b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MONA new file mode 100644 index 000000000000..369b59f3b522 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MONA @@ -0,0 +1 @@ +# CONFIG_SND_MONA is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MPU401 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MPU401 new file mode 100644 index 000000000000..5ef99d5ba1f2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MPU401 @@ -0,0 +1 @@ +# CONFIG_SND_MPU401 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MTPAV b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MTPAV new file mode 100644 index 000000000000..150befdf8732 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MTPAV @@ -0,0 +1 @@ +# CONFIG_SND_MTPAV is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MTS64 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MTS64 new file mode 100644 index 000000000000..80edee474d8a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_MTS64 @@ -0,0 +1 @@ +# CONFIG_SND_MTS64 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_NM256 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_NM256 new file mode 100644 index 000000000000..84971d4fd34f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_NM256 @@ -0,0 +1 @@ +# CONFIG_SND_NM256 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_OSSEMUL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_OSSEMUL new file mode 100644 index 000000000000..998d310620a1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_OSSEMUL @@ -0,0 +1 @@ +# CONFIG_SND_OSSEMUL is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_OXFW b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_OXFW new file mode 100644 index 000000000000..31da39fc0faa --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_OXFW @@ -0,0 +1 @@ +# CONFIG_SND_OXFW is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_OXYGEN b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_OXYGEN new file mode 100644 index 000000000000..a2f71fa5c914 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_OXYGEN @@ -0,0 +1 @@ +# CONFIG_SND_OXYGEN is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCI new file mode 100644 index 000000000000..667778acc79a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCI @@ -0,0 +1 @@ +CONFIG_SND_PCI=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCMTEST b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCMTEST new file mode 100644 index 000000000000..07232a49cf04 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCMTEST @@ -0,0 +1 @@ +# CONFIG_SND_PCMTEST is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCM_TIMER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCM_TIMER new file mode 100644 index 000000000000..504115d70591 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCM_TIMER @@ -0,0 +1 @@ +CONFIG_SND_PCM_TIMER=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCSP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCSP new file mode 100644 index 000000000000..8a97d6baae09 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCSP @@ -0,0 +1 @@ +# CONFIG_SND_PCSP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCXHR b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCXHR new file mode 100644 index 000000000000..fa7e3199ebb4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PCXHR @@ -0,0 +1 @@ +# CONFIG_SND_PCXHR is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PORTMAN2X4 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PORTMAN2X4 new file mode 100644 index 000000000000..7f4615b982e1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PORTMAN2X4 @@ -0,0 +1 @@ +# CONFIG_SND_PORTMAN2X4 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PROC_FS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PROC_FS new file mode 100644 index 000000000000..506a28358360 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_PROC_FS @@ -0,0 +1 @@ +CONFIG_SND_PROC_FS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_RIPTIDE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_RIPTIDE new file mode 100644 index 000000000000..da50510fa0fd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_RIPTIDE @@ -0,0 +1 @@ +# CONFIG_SND_RIPTIDE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_RME32 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_RME32 new file mode 100644 index 000000000000..7def0b277782 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_RME32 @@ -0,0 +1 @@ +# CONFIG_SND_RME32 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_RME96 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_RME96 new file mode 100644 index 000000000000..8b3f743ec0ac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_RME96 @@ -0,0 +1 @@ +# CONFIG_SND_RME96 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_RME9652 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_RME9652 new file mode 100644 index 000000000000..3c5e88f77896 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_RME9652 @@ -0,0 +1 @@ +# CONFIG_SND_RME9652 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SE6X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SE6X new file mode 100644 index 000000000000..3da4e9ce72ba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SE6X @@ -0,0 +1 @@ +# CONFIG_SND_SE6X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SEQUENCER b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SEQUENCER new file mode 100644 index 000000000000..d3055ec3bd7d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SEQUENCER @@ -0,0 +1 @@ +# CONFIG_SND_SEQUENCER is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SERIAL_U16550 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SERIAL_U16550 new file mode 100644 index 000000000000..4b802a5283e5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SERIAL_U16550 @@ -0,0 +1 @@ +# CONFIG_SND_SERIAL_U16550 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SOC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SOC new file mode 100644 index 000000000000..a161b8e18362 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SOC @@ -0,0 +1 @@ +# CONFIG_SND_SOC is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SONICVIBES b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SONICVIBES new file mode 100644 index 000000000000..d9a4a21668c1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SONICVIBES @@ -0,0 +1 @@ +# CONFIG_SND_SONICVIBES is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SPI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SPI new file mode 100644 index 000000000000..05a828ae2203 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SPI @@ -0,0 +1 @@ +CONFIG_SND_SPI=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SUPPORT_OLD_API b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SUPPORT_OLD_API new file mode 100644 index 000000000000..dd74570bec64 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_SUPPORT_OLD_API @@ -0,0 +1 @@ +CONFIG_SND_SUPPORT_OLD_API=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_TRIDENT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_TRIDENT new file mode 100644 index 000000000000..abc485d574a6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_TRIDENT @@ -0,0 +1 @@ +# CONFIG_SND_TRIDENT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB new file mode 100644 index 000000000000..fb681b173d67 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB @@ -0,0 +1 @@ +CONFIG_SND_USB=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_6FIRE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_6FIRE new file mode 100644 index 000000000000..d48ebbd6c1f4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_6FIRE @@ -0,0 +1 @@ +# CONFIG_SND_USB_6FIRE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_AUDIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_AUDIO new file mode 100644 index 000000000000..232911a82531 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_AUDIO @@ -0,0 +1 @@ +# CONFIG_SND_USB_AUDIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_CAIAQ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_CAIAQ new file mode 100644 index 000000000000..2352b346814b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_CAIAQ @@ -0,0 +1 @@ +# CONFIG_SND_USB_CAIAQ is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_HIFACE b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_HIFACE new file mode 100644 index 000000000000..bd9be87194f5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_HIFACE @@ -0,0 +1 @@ +# CONFIG_SND_USB_HIFACE is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_POD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_POD new file mode 100644 index 000000000000..86f53fbe75ab --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_POD @@ -0,0 +1 @@ +# CONFIG_SND_USB_POD is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_PODHD b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_PODHD new file mode 100644 index 000000000000..c96cf752eae5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_PODHD @@ -0,0 +1 @@ +# CONFIG_SND_USB_PODHD is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_TONEPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_TONEPORT new file mode 100644 index 000000000000..15ecf5ac46ec --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_TONEPORT @@ -0,0 +1 @@ +# CONFIG_SND_USB_TONEPORT is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_UA101 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_UA101 new file mode 100644 index 000000000000..4805309714ac --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_UA101 @@ -0,0 +1 @@ +# CONFIG_SND_USB_UA101 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_US122L b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_US122L new file mode 100644 index 000000000000..b7a20b76efc5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_US122L @@ -0,0 +1 @@ +# CONFIG_SND_USB_US122L is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_USX2Y b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_USX2Y new file mode 100644 index 000000000000..1e222656395d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_USX2Y @@ -0,0 +1 @@ +# CONFIG_SND_USB_USX2Y is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_VARIAX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_VARIAX new file mode 100644 index 000000000000..68675ae6abbe --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_USB_VARIAX @@ -0,0 +1 @@ +# CONFIG_SND_USB_VARIAX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VERBOSE_PRINTK b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VERBOSE_PRINTK new file mode 100644 index 000000000000..b119c633de98 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VERBOSE_PRINTK @@ -0,0 +1 @@ +# CONFIG_SND_VERBOSE_PRINTK is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VERBOSE_PROCFS b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VERBOSE_PROCFS new file mode 100644 index 000000000000..4ae50d741fcc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VERBOSE_PROCFS @@ -0,0 +1 @@ +CONFIG_SND_VERBOSE_PROCFS=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VIA82XX b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VIA82XX new file mode 100644 index 000000000000..2c26735789e0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VIA82XX @@ -0,0 +1 @@ +# CONFIG_SND_VIA82XX is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VIA82XX_MODEM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VIA82XX_MODEM new file mode 100644 index 000000000000..53055c69427b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VIA82XX_MODEM @@ -0,0 +1 @@ +# CONFIG_SND_VIA82XX_MODEM is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VIRTIO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VIRTIO new file mode 100644 index 000000000000..506b74e9d78a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VIRTIO @@ -0,0 +1 @@ +# CONFIG_SND_VIRTIO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VIRTUOSO b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VIRTUOSO new file mode 100644 index 000000000000..0d204f85d909 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VIRTUOSO @@ -0,0 +1 @@ +# CONFIG_SND_VIRTUOSO is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VX222 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VX222 new file mode 100644 index 000000000000..8cd1e3bcdb07 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_VX222 @@ -0,0 +1 @@ +# CONFIG_SND_VX222 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_X86 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_X86 new file mode 100644 index 000000000000..310c61afc0ba --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_X86 @@ -0,0 +1 @@ +CONFIG_SND_X86=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_XEN_FRONTEND b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_XEN_FRONTEND new file mode 100644 index 000000000000..064c6b2fa668 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_XEN_FRONTEND @@ -0,0 +1 @@ +# CONFIG_SND_XEN_FRONTEND is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_YMFPCI b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_YMFPCI new file mode 100644 index 000000000000..7bc69034cfad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SND_YMFPCI @@ -0,0 +1 @@ +# CONFIG_SND_YMFPCI is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_ALSA_SUPPORT b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_ALSA_SUPPORT new file mode 100644 index 000000000000..5b406b9baa4f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_THINKPAD_ACPI_ALSA_SUPPORT @@ -0,0 +1 @@ +CONFIG_THINKPAD_ACPI_ALSA_SUPPORT=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TULIP b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TULIP new file mode 100644 index 000000000000..ddcccdbdfe8f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_TULIP @@ -0,0 +1 @@ +# CONFIG_TULIP is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_UEVENT_HELPER_PATH b/anolis/configs/L2-OPTIONAL/x86/CONFIG_UEVENT_HELPER_PATH new file mode 100644 index 000000000000..6f689df15b11 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_UEVENT_HELPER_PATH @@ -0,0 +1 @@ +CONFIG_UEVENT_HELPER_PATH="" diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ULI526X b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ULI526X new file mode 100644 index 000000000000..9afaec20ff50 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ULI526X @@ -0,0 +1 @@ +# CONFIG_ULI526X is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_WINBOND_840 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WINBOND_840 new file mode 100644 index 000000000000..61e16e73f6fb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_WINBOND_840 @@ -0,0 +1 @@ +# CONFIG_WINBOND_840 is not set -- Gitee From 290985373f2350f9a13ff8290dc26a72e2d2ea8e Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Mon, 11 Nov 2024 15:19:13 +0800 Subject: [PATCH 1741/2138] anolis: configs: align L1 fs related configs to devel-5.10 ANBZ: #11822 x86: CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU=y CONFIG_NFSD_V2_ACL=y CONFIG_NFSD_V2=y CONFIG_NFS_V2=m arm64: CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU=y CONFIG_NFSD_V2=y CONFIG_NFSD_V2_ACL=y CONFIG_BCACHE_ASYNC_REGISTRATION=y Signed-off-by: Qiao Ma Reviewed-by: Baolin Wang Reviewed-by: Xuan Zhuo Reviewed-by: Tianjia Zhang Acked-by: Joseph Qi Reviewed-by: Zelin Deng Reviewed-by: Guixin Liu Acked-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/4118 --- anolis/configs/L1-RECOMMEND/{default => arm64}/CONFIG_NFS_V2 | 0 anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_V2_ACL | 1 + .../L1-RECOMMEND/default/CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU | 1 + .../configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_DECOMP_SINGLE | 1 - anolis/configs/L1-RECOMMEND/x86/CONFIG_NFS_V2 | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_BCACHE_ASYNC_REGISTRATION | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_NFSD_V2 | 2 +- .../default/CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU | 2 +- .../L2-OPTIONAL/default/CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE | 2 +- .../{default => x86}/CONFIG_BCACHE_ASYNC_REGISTRATION | 0 10 files changed, 7 insertions(+), 4 deletions(-) rename anolis/configs/L1-RECOMMEND/{default => arm64}/CONFIG_NFS_V2 (100%) create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_V2_ACL create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU delete mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_DECOMP_SINGLE create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_NFS_V2 create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_BCACHE_ASYNC_REGISTRATION rename anolis/configs/L2-OPTIONAL/{default => x86}/CONFIG_BCACHE_ASYNC_REGISTRATION (100%) diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_V2 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_NFS_V2 similarity index 100% rename from anolis/configs/L1-RECOMMEND/default/CONFIG_NFS_V2 rename to anolis/configs/L1-RECOMMEND/arm64/CONFIG_NFS_V2 diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_V2_ACL b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_V2_ACL new file mode 100644 index 000000000000..e48b468c7754 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NFSD_V2_ACL @@ -0,0 +1 @@ +CONFIG_NFSD_V2_ACL=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU new file mode 100644 index 000000000000..79f750f7e14e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU @@ -0,0 +1 @@ +CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_DECOMP_SINGLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_DECOMP_SINGLE deleted file mode 100644 index 5978a504eaec..000000000000 --- a/anolis/configs/L1-RECOMMEND/default/CONFIG_SQUASHFS_DECOMP_SINGLE +++ /dev/null @@ -1 +0,0 @@ -CONFIG_SQUASHFS_DECOMP_SINGLE=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_NFS_V2 b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NFS_V2 new file mode 100644 index 000000000000..3bad5613f6e3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NFS_V2 @@ -0,0 +1 @@ +CONFIG_NFS_V2=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BCACHE_ASYNC_REGISTRATION b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BCACHE_ASYNC_REGISTRATION new file mode 100644 index 000000000000..3f41639fd68f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_BCACHE_ASYNC_REGISTRATION @@ -0,0 +1 @@ +CONFIG_BCACHE_ASYNC_REGISTRATION=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NFSD_V2 b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFSD_V2 index bcba20a2d853..22cf77e2874b 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_NFSD_V2 +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NFSD_V2 @@ -1 +1 @@ -# CONFIG_NFSD_V2 is not set +CONFIG_NFSD_V2=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU b/anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU index 2963ff2cb1b1..fc9de7069126 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU @@ -1 +1 @@ -# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE b/anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE index 638c3630aecf..6e030a19b321 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE @@ -1 +1 @@ -CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_ASYNC_REGISTRATION b/anolis/configs/L2-OPTIONAL/x86/CONFIG_BCACHE_ASYNC_REGISTRATION similarity index 100% rename from anolis/configs/L2-OPTIONAL/default/CONFIG_BCACHE_ASYNC_REGISTRATION rename to anolis/configs/L2-OPTIONAL/x86/CONFIG_BCACHE_ASYNC_REGISTRATION -- Gitee From e7f62aa573fc0ea0c52a8cbd8ad709430eb7a004 Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Mon, 11 Nov 2024 15:24:20 +0800 Subject: [PATCH 1742/2138] anolis: configs: align L1 net related configs to devel-5.10 ANBZ: #11822 x86: CONFIG_NETFILTER_XT_MATCH_IPCOMP=m CONFIG_IP6_NF_TARGET_HL=m CONFIG_IP_DCCP=m arm64: CONFIG_NETFILTER_XT_MATCH_IPCOMP=m Signed-off-by: Qiao Ma Reviewed-by: Baolin Wang Reviewed-by: Xuan Zhuo Reviewed-by: Tianjia Zhang Acked-by: Joseph Qi Reviewed-by: Zelin Deng Reviewed-by: Guixin Liu Acked-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/4118 --- .../L1-RECOMMEND/{default => arm64}/CONFIG_IP6_NF_TARGET_HL | 0 anolis/configs/L1-RECOMMEND/{default => arm64}/CONFIG_IP_DCCP | 0 .../L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_IPCOMP | 2 +- anolis/configs/L1-RECOMMEND/x86/CONFIG_IP6_NF_TARGET_HL | 1 + anolis/configs/L1-RECOMMEND/x86/CONFIG_IP_DCCP | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_INET_DCCP_DIAG | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_CCID2_DEBUG | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_CCID3 | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_CCID3_DEBUG | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_DEBUG | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_TFRC_LIB | 1 + 11 files changed, 9 insertions(+), 1 deletion(-) rename anolis/configs/L1-RECOMMEND/{default => arm64}/CONFIG_IP6_NF_TARGET_HL (100%) rename anolis/configs/L1-RECOMMEND/{default => arm64}/CONFIG_IP_DCCP (100%) create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_IP6_NF_TARGET_HL create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_IP_DCCP create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INET_DCCP_DIAG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_CCID2_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_CCID3 create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_CCID3_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_DEBUG create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_TFRC_LIB diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_HL b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_IP6_NF_TARGET_HL similarity index 100% rename from anolis/configs/L1-RECOMMEND/default/CONFIG_IP6_NF_TARGET_HL rename to anolis/configs/L1-RECOMMEND/arm64/CONFIG_IP6_NF_TARGET_HL diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_IP_DCCP b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_IP_DCCP similarity index 100% rename from anolis/configs/L1-RECOMMEND/default/CONFIG_IP_DCCP rename to anolis/configs/L1-RECOMMEND/arm64/CONFIG_IP_DCCP diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_IPCOMP b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_IPCOMP index 9e114c643133..5df60f906719 100644 --- a/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_IPCOMP +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NETFILTER_XT_MATCH_IPCOMP @@ -1 +1 @@ -# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_IP6_NF_TARGET_HL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IP6_NF_TARGET_HL new file mode 100644 index 000000000000..96b2c947041c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IP6_NF_TARGET_HL @@ -0,0 +1 @@ +CONFIG_IP6_NF_TARGET_HL=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_IP_DCCP b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IP_DCCP new file mode 100644 index 000000000000..26ba41376ee8 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_IP_DCCP @@ -0,0 +1 @@ +CONFIG_IP_DCCP=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INET_DCCP_DIAG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INET_DCCP_DIAG new file mode 100644 index 000000000000..6876f3fd266b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INET_DCCP_DIAG @@ -0,0 +1 @@ +CONFIG_INET_DCCP_DIAG=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_CCID2_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_CCID2_DEBUG new file mode 100644 index 000000000000..cddfe2944489 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_CCID2_DEBUG @@ -0,0 +1 @@ +# CONFIG_IP_DCCP_CCID2_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_CCID3 b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_CCID3 new file mode 100644 index 000000000000..b7427d311976 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_CCID3 @@ -0,0 +1 @@ +CONFIG_IP_DCCP_CCID3=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_CCID3_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_CCID3_DEBUG new file mode 100644 index 000000000000..b7d849decc4b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_CCID3_DEBUG @@ -0,0 +1 @@ +# CONFIG_IP_DCCP_CCID3_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_DEBUG b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_DEBUG new file mode 100644 index 000000000000..d62cd2f780d0 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_DEBUG @@ -0,0 +1 @@ +# CONFIG_IP_DCCP_DEBUG is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_TFRC_LIB b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_TFRC_LIB new file mode 100644 index 000000000000..b0c1d75c7704 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_IP_DCCP_TFRC_LIB @@ -0,0 +1 @@ +CONFIG_IP_DCCP_TFRC_LIB=y -- Gitee From 22cbaab5f3c4c6384b4696fbb144613b1b1bc0f8 Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Thu, 14 Nov 2024 17:46:08 +0800 Subject: [PATCH 1743/2138] anolis: configs: align mm related configs to devel-5.10 ANBZ: #11822 Only one kconfig for x86: CONFIG_ZRAM_MEMORY_TRACKING=y Signed-off-by: Qiao Ma Reviewed-by: Baolin Wang Reviewed-by: Xuan Zhuo Reviewed-by: Tianjia Zhang Acked-by: Joseph Qi Reviewed-by: Zelin Deng Reviewed-by: Guixin Liu Acked-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/4118 --- .../L1-RECOMMEND/{default => arm64}/CONFIG_ZRAM_MEMORY_TRACKING | 0 anolis/configs/L1-RECOMMEND/x86/CONFIG_ZRAM_MEMORY_TRACKING | 1 + 2 files changed, 1 insertion(+) rename anolis/configs/L1-RECOMMEND/{default => arm64}/CONFIG_ZRAM_MEMORY_TRACKING (100%) create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_ZRAM_MEMORY_TRACKING diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_MEMORY_TRACKING b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ZRAM_MEMORY_TRACKING similarity index 100% rename from anolis/configs/L1-RECOMMEND/default/CONFIG_ZRAM_MEMORY_TRACKING rename to anolis/configs/L1-RECOMMEND/arm64/CONFIG_ZRAM_MEMORY_TRACKING diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_ZRAM_MEMORY_TRACKING b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ZRAM_MEMORY_TRACKING new file mode 100644 index 000000000000..fb0483ea5dd2 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_ZRAM_MEMORY_TRACKING @@ -0,0 +1 @@ +CONFIG_ZRAM_MEMORY_TRACKING=y -- Gitee From 7013619dd2562656c3566a4c72d31220213ad316 Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Mon, 11 Nov 2024 16:35:24 +0800 Subject: [PATCH 1744/2138] anolis: configs: align L1 x86 arch related configs to devel-5.10 ANBZ: #11822 CONFIG_X86_PCC_CPUFREQ=m CONFIG_HW_RANDOM_ZHAOXIN=m CONFIG_INTEL_HFI_THERMAL=y CONFIG_INTEL_IDXD_PERFMON=y CONFIG_INTEL_UNCORE_FREQ_CONTROL_TPMI=m CONFIG_INTEL_VSEC=m CONFIG_INTEL_RAPL_TPMI=m This also adjusted CONFIG_INTEL_UNCORE_FREQ_CONTROL_TPMI to L1. Signed-off-by: Qiao Ma Reviewed-by: Baolin Wang Reviewed-by: Xuan Zhuo Reviewed-by: Tianjia Zhang Acked-by: Joseph Qi Reviewed-by: Zelin Deng Reviewed-by: Guixin Liu Acked-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/4118 --- anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_HFI_THERMAL | 2 +- anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD_PERFMON | 2 +- anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RAPL_TPMI | 2 +- .../configs/L1-RECOMMEND/x86/CONFIG_INTEL_UNCORE_FREQ_CONTROL | 1 + .../L1-RECOMMEND/x86/CONFIG_INTEL_UNCORE_FREQ_CONTROL_TPMI | 1 + anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_VSEC | 2 +- anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_PCC_CPUFREQ | 2 +- anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_UNCORE_FREQ_CONTROL | 1 - 8 files changed, 7 insertions(+), 6 deletions(-) create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_UNCORE_FREQ_CONTROL create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_UNCORE_FREQ_CONTROL_TPMI delete mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_UNCORE_FREQ_CONTROL diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_HFI_THERMAL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_HFI_THERMAL index d918b09fe21c..e410d3f983d3 100644 --- a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_HFI_THERMAL +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_HFI_THERMAL @@ -1 +1 @@ -# CONFIG_INTEL_HFI_THERMAL is not set +CONFIG_INTEL_HFI_THERMAL=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD_PERFMON b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD_PERFMON index 238078a4b727..f21c240492b1 100644 --- a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD_PERFMON +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_IDXD_PERFMON @@ -1 +1 @@ -# CONFIG_INTEL_IDXD_PERFMON is not set +CONFIG_INTEL_IDXD_PERFMON=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RAPL_TPMI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RAPL_TPMI index e809fe80e84e..9acd69083d77 100644 --- a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RAPL_TPMI +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_RAPL_TPMI @@ -1 +1 @@ -# CONFIG_INTEL_RAPL_TPMI is not set +CONFIG_INTEL_RAPL_TPMI=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_UNCORE_FREQ_CONTROL b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_UNCORE_FREQ_CONTROL new file mode 100644 index 000000000000..f8e5172cfb5d --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_UNCORE_FREQ_CONTROL @@ -0,0 +1 @@ +CONFIG_INTEL_UNCORE_FREQ_CONTROL=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_UNCORE_FREQ_CONTROL_TPMI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_UNCORE_FREQ_CONTROL_TPMI new file mode 100644 index 000000000000..786925e1f883 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_UNCORE_FREQ_CONTROL_TPMI @@ -0,0 +1 @@ +CONFIG_INTEL_UNCORE_FREQ_CONTROL_TPMI=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_VSEC b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_VSEC index 4c2846bdb979..e399ee6fe520 100644 --- a/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_VSEC +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_INTEL_VSEC @@ -1 +1 @@ -CONFIG_INTEL_VSEC=y +CONFIG_INTEL_VSEC=m diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_PCC_CPUFREQ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_PCC_CPUFREQ index 533a2352e2cd..10c283d7dc48 100644 --- a/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_PCC_CPUFREQ +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_X86_PCC_CPUFREQ @@ -1 +1 @@ -# CONFIG_X86_PCC_CPUFREQ is not set +CONFIG_X86_PCC_CPUFREQ=m diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_UNCORE_FREQ_CONTROL b/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_UNCORE_FREQ_CONTROL deleted file mode 100644 index 671fe025d6f1..000000000000 --- a/anolis/configs/L2-OPTIONAL/x86/CONFIG_INTEL_UNCORE_FREQ_CONTROL +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_INTEL_UNCORE_FREQ_CONTROL is not set -- Gitee From 94b62ff726cacde4ac72ce29c97cbc66695d25df Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Mon, 11 Nov 2024 17:08:22 +0800 Subject: [PATCH 1745/2138] anolis: configs: align L1 arm64 arch related configs to devel-5.10 ANBZ: #11822 CONFIG_ACPI_CPPC_CPUFREQ=y CONFIG_YITIAN_CPER_RAWDATA=y Signed-off-by: Qiao Ma Reviewed-by: Baolin Wang Reviewed-by: Xuan Zhuo Reviewed-by: Tianjia Zhang Acked-by: Joseph Qi Reviewed-by: Zelin Deng Reviewed-by: Guixin Liu Acked-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/4118 --- anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_CPPC_CPUFREQ | 2 +- anolis/configs/L1-RECOMMEND/arm64/CONFIG_YITIAN_CPER_RAWDATA | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_CPPC_CPUFREQ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_CPPC_CPUFREQ index 5cc88132a921..701191bf6ecf 100644 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_CPPC_CPUFREQ +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ACPI_CPPC_CPUFREQ @@ -1 +1 @@ -CONFIG_ACPI_CPPC_CPUFREQ=m +CONFIG_ACPI_CPPC_CPUFREQ=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_YITIAN_CPER_RAWDATA b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_YITIAN_CPER_RAWDATA index 4b6ac4a5df60..09fe8d346af3 100644 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_YITIAN_CPER_RAWDATA +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_YITIAN_CPER_RAWDATA @@ -1 +1 @@ -# CONFIG_YITIAN_CPER_RAWDATA is not set +CONFIG_YITIAN_CPER_RAWDATA=y -- Gitee From 71df04318efad9be19b61e5f701dcfb23bcea928 Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Mon, 11 Nov 2024 17:37:50 +0800 Subject: [PATCH 1746/2138] anolis: configs: align L1 security & crypto related configs to devel-5.10 ANBZ: #11822 x86: CONFIG_CRYPTO_RNG_DEFAULT=y CONFIG_CRYPTO_ECDSA=m CONFIG_CRYPTO_SEQIV=y CONFIG_CRYPTO_DEV_VIRTIO=m arm64: CONFIG_CRYPTO_RNG_DEFAULT=y CONFIG_CRYPTO_SEQIV=y CONFIG_CRYPTO_DEV_VIRTIO=m CONFIG_CRYPTO_DEV_CCP=y CONFIG_CRYPTO_DEV_CCP_DD=y CONFIG_CRYPTO_DEV_SP_CCP=y CONFIG_CRYPTO_DEV_CCP_CRYPTO=m Signed-off-by: Qiao Ma Reviewed-by: Baolin Wang Reviewed-by: Xuan Zhuo Reviewed-by: Tianjia Zhang Acked-by: Joseph Qi Reviewed-by: Zelin Deng Reviewed-by: Guixin Liu Acked-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/4118 --- anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_CCP | 1 - .../configs/L1-RECOMMEND/{default => arm64}/CONFIG_CRYPTO_ECDSA | 0 .../configs/L1-RECOMMEND/{x86 => default}/CONFIG_CRYPTO_DEV_CCP | 0 .../L1-RECOMMEND/{x86 => default}/CONFIG_CRYPTO_DEV_CCP_CRYPTO | 0 .../L1-RECOMMEND/{x86 => default}/CONFIG_CRYPTO_DEV_CCP_DD | 0 .../L1-RECOMMEND/{x86 => default}/CONFIG_CRYPTO_DEV_CCP_DEBUGFS | 0 .../L1-RECOMMEND/{x86 => default}/CONFIG_CRYPTO_DEV_SP_CCP | 0 anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_VIRTIO | 2 +- anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_RNG_DEFAULT | 2 +- anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SEQIV | 2 +- anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_ECDSA | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ENGINE | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_GENIV | 2 +- 13 files changed, 6 insertions(+), 5 deletions(-) delete mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_CCP rename anolis/configs/L1-RECOMMEND/{default => arm64}/CONFIG_CRYPTO_ECDSA (100%) rename anolis/configs/L1-RECOMMEND/{x86 => default}/CONFIG_CRYPTO_DEV_CCP (100%) rename anolis/configs/L1-RECOMMEND/{x86 => default}/CONFIG_CRYPTO_DEV_CCP_CRYPTO (100%) rename anolis/configs/L1-RECOMMEND/{x86 => default}/CONFIG_CRYPTO_DEV_CCP_DD (100%) rename anolis/configs/L1-RECOMMEND/{x86 => default}/CONFIG_CRYPTO_DEV_CCP_DEBUGFS (100%) rename anolis/configs/L1-RECOMMEND/{x86 => default}/CONFIG_CRYPTO_DEV_SP_CCP (100%) create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_ECDSA create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ENGINE diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_CCP b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_CCP deleted file mode 100644 index db6cdd873726..000000000000 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_DEV_CCP +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_CRYPTO_DEV_CCP is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECDSA b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_ECDSA similarity index 100% rename from anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_ECDSA rename to anolis/configs/L1-RECOMMEND/arm64/CONFIG_CRYPTO_ECDSA diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_CCP b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_CCP similarity index 100% rename from anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_CCP rename to anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_CCP diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_CCP_CRYPTO b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_CCP_CRYPTO similarity index 100% rename from anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_CCP_CRYPTO rename to anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_CCP_CRYPTO diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_CCP_DD b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_CCP_DD similarity index 100% rename from anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_CCP_DD rename to anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_CCP_DD diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_CCP_DEBUGFS b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_CCP_DEBUGFS similarity index 100% rename from anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_CCP_DEBUGFS rename to anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_CCP_DEBUGFS diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_SP_CCP b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_SP_CCP similarity index 100% rename from anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_DEV_SP_CCP rename to anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_SP_CCP diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_VIRTIO b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_VIRTIO index 8acad1aac397..2997f10e2174 100644 --- a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_VIRTIO +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_DEV_VIRTIO @@ -1 +1 @@ -# CONFIG_CRYPTO_DEV_VIRTIO is not set +CONFIG_CRYPTO_DEV_VIRTIO=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_RNG_DEFAULT b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_RNG_DEFAULT index e111e0254a92..fb0dd802353e 100644 --- a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_RNG_DEFAULT +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_RNG_DEFAULT @@ -1 +1 @@ -CONFIG_CRYPTO_RNG_DEFAULT=m +CONFIG_CRYPTO_RNG_DEFAULT=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SEQIV b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SEQIV index 01c950cb6f50..7be12018f24f 100644 --- a/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SEQIV +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_CRYPTO_SEQIV @@ -1 +1 @@ -CONFIG_CRYPTO_SEQIV=m +CONFIG_CRYPTO_SEQIV=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_ECDSA b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_ECDSA new file mode 100644 index 000000000000..efbe82ad8c8b --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_CRYPTO_ECDSA @@ -0,0 +1 @@ +CONFIG_CRYPTO_ECDSA=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ENGINE b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ENGINE new file mode 100644 index 000000000000..947c8fcc13ed --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_ENGINE @@ -0,0 +1 @@ +CONFIG_CRYPTO_ENGINE=m diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_GENIV b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_GENIV index f805fe3c8a30..86cb18c3b821 100644 --- a/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_GENIV +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CRYPTO_GENIV @@ -1 +1 @@ -CONFIG_CRYPTO_GENIV=m +CONFIG_CRYPTO_GENIV=y -- Gitee From 972b04e2cc8e098d45708ae042bc16af5c028726 Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Mon, 11 Nov 2024 17:41:13 +0800 Subject: [PATCH 1747/2138] anolis: configs: align L1 x86 misc configs to devel-5.10 ANBZ: #11822 CONFIG_KDB_DEFAULT_ENABLE=0x0 Signed-off-by: Qiao Ma Reviewed-by: Baolin Wang Reviewed-by: Xuan Zhuo Reviewed-by: Tianjia Zhang Acked-by: Joseph Qi Reviewed-by: Zelin Deng Reviewed-by: Guixin Liu Acked-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/4118 --- .../L1-RECOMMEND/{arm64 => default}/CONFIG_KDB_DEFAULT_ENABLE | 0 anolis/configs/L1-RECOMMEND/x86/CONFIG_KDB_DEFAULT_ENABLE | 1 - 2 files changed, 1 deletion(-) rename anolis/configs/L1-RECOMMEND/{arm64 => default}/CONFIG_KDB_DEFAULT_ENABLE (100%) delete mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_KDB_DEFAULT_ENABLE diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_KDB_DEFAULT_ENABLE b/anolis/configs/L1-RECOMMEND/default/CONFIG_KDB_DEFAULT_ENABLE similarity index 100% rename from anolis/configs/L1-RECOMMEND/arm64/CONFIG_KDB_DEFAULT_ENABLE rename to anolis/configs/L1-RECOMMEND/default/CONFIG_KDB_DEFAULT_ENABLE diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KDB_DEFAULT_ENABLE b/anolis/configs/L1-RECOMMEND/x86/CONFIG_KDB_DEFAULT_ENABLE deleted file mode 100644 index 25fa262830ea..000000000000 --- a/anolis/configs/L1-RECOMMEND/x86/CONFIG_KDB_DEFAULT_ENABLE +++ /dev/null @@ -1 +0,0 @@ -CONFIG_KDB_DEFAULT_ENABLE=0x1 -- Gitee From 5018102ae58b5b74967b592309da6fb6354f1544 Mon Sep 17 00:00:00 2001 From: Wenwei Tao Date: Mon, 26 Aug 2019 15:18:30 +0800 Subject: [PATCH 1748/2138] anolis: kernel: cgroup: account number of tasks in the css and its descendants ANBZ: #11918 to #34609731 Account number of the tasks in the css and its descendants, this is prepared for the incoming memcg priority patch. In memcg priority oom, we will select victim cgroup which has victim tasks in it. We need to know whether the memcg and its descendants have tasks before the selection can move on. Signed-off-by: Wenwei Tao Reviewed-by: Xunlei Pang Signed-off-by: Xu Yu Signed-off-by: zhongjiang-ali Acked-by: Xu Yu Signed-off-by: Weilin Tong Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4142 --- include/linux/cgroup-defs.h | 3 +++ kernel/cgroup/cgroup.c | 26 ++++++++++++++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index a2e401bed012..e8d59e792b3a 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -178,6 +178,9 @@ struct cgroup_subsys_state { */ int id; + /* number of procs under this css and its descendants */ + int nr_procs; + unsigned int flags; /* diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 97f2bd6dc314..28420b1d6c9b 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -2411,6 +2411,28 @@ void cgroup_attach_unlock(bool lock_threadgroup) cpus_read_unlock(); } +static void css_account_procs(struct task_struct *task, + struct css_set *cset, int num) +{ + struct cgroup_subsys *ss; + int ssid; + + if (!thread_group_leader(task)) + return; + + for_each_subsys(ss, ssid) { + struct cgroup_subsys_state *css = cset->subsys[ssid]; + + if (!css) + continue; + css->nr_procs += num; + while (css->parent) { + css = css->parent; + css->nr_procs += num; + } + } +} + /** * cgroup_migrate_add_task - add a migration target task to a migration context * @task: target task @@ -2563,8 +2585,10 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx) get_css_set(to_cset); to_cset->nr_tasks++; + css_account_procs(task, to_cset, 1); css_set_move_task(task, from_cset, to_cset, true); from_cset->nr_tasks--; + css_account_procs(task, from_cset, -1); /* * If the source or destination cgroup is frozen, * the task might require to change its state. @@ -6622,6 +6646,7 @@ void cgroup_post_fork(struct task_struct *child, WARN_ON_ONCE(!list_empty(&child->cg_list)); cset->nr_tasks++; + css_account_procs(child, cset, 1); css_set_move_task(child, NULL, cset, false); } else { put_css_set(cset); @@ -6703,6 +6728,7 @@ void cgroup_exit(struct task_struct *tsk) css_set_move_task(tsk, cset, NULL, false); list_add_tail(&tsk->cg_list, &cset->dying_tasks); cset->nr_tasks--; + css_account_procs(tsk, cset, -1); if (dl_task(tsk)) dec_dl_tasks_cs(tsk); -- Gitee From 64dccbed6ec239db7a93d4a226c169957a168522 Mon Sep 17 00:00:00 2001 From: Wenwei Tao Date: Fri, 23 Aug 2019 18:32:13 +0800 Subject: [PATCH 1749/2138] anolis: mm: memcontrol: introduce memcg priority oom ANBZ: #11918 to #34609731 Under memory pressure reclaim and oom would happen, with multiple cgroups exist in one system, we might want some of their memory or tasks survived the reclaim and oom while there are other candidates. The @memory.low and @memory.min have make that happen during reclaim, this patch introduces memcg priority oom to meet above requirement in the oom. The priority is from 0 to 12, the higher number the higher priority. When oom happens it always choose victim from low priority memcg. And it works both for memcg oom and global oom, it can be enabled/disabled through @memory.use_priority_oom, for global oom through the root memcg's @memory.use_priority_oom, it is disabled by default. Signed-off-by: Wenwei Tao Reviewed-by: Xunlei Pang Signed-off-by: Xu Yu Signed-off-by: zhongjiang-ali Acked-by: Xu Yu Signed-off-by: Weilin Tong Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4142 --- .../admin-guide/cgroup-v1/memory.rst | 45 +++- include/linux/memcontrol.h | 38 +++ include/linux/oom.h | 7 + mm/memcontrol.c | 235 +++++++++++++++++- mm/oom_kill.c | 25 +- 5 files changed, 339 insertions(+), 11 deletions(-) diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst index ff456871bf4b..9312cef008aa 100644 --- a/Documentation/admin-guide/cgroup-v1/memory.rst +++ b/Documentation/admin-guide/cgroup-v1/memory.rst @@ -971,7 +971,50 @@ Test: (Expect a bunch of notifications, and eventually, the oom-killer will trigger.) -12. TODO +12. Cgroup oom priority +======================= +Under memory pressure, reclaim and oom would happen, with multiple +cgroups exist in one system, we might want some of the cgroups's memory +or tasks survived the reclaim and oom while there are other candidates. + +The "memory.low" and "memory.min" make that happen during reclaim, this +"memory.priority" introduces a priority oom to meet above requirement +in oom. + +The priority value is from 0 to 12, the higher number the higher priority. +The priority is among siblings, it is not global priority, by this we can +map these 13 priorities to the tens of thousands of memcgs. + +When oom happens it first chooses the lowest priority memcg as victim then +uses the kernel default algorithm(see function oom_evaluate_task()) to select +bad process from the victim memcg. + +For example:: + + The following hierarchy: + root + / \ + A B + / \ / \ + C D E F + + priority: + A: 10, B: 8 + C: 5, D: 6, E: 7, F: 8 + +When oom happens in root, it first iterates its two children A and B, and selects +B as next iteration root since B's priority is lower than A, subsequent victim +selection is limit in the B's subtree. E is selected as victim memcg finally, since +its priority is lower than its sibling. + +This priority oom works both for memcg and global oom. For global oom the root is +root memcg. + +Meanwhile, we provide the interface memory.use_prioprity_oom to decide whether to +enable/disable the feature in each memcg. Write "1" to enable the priority oom and +"0" to disable it. + +13. TODO ======== 1. Make per-cgroup scanner reclaim not-shared pages first diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index ed764df798b2..7139432b51a1 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -27,6 +27,9 @@ struct obj_cgroup; struct page; struct mm_struct; struct kmem_cache; +struct oom_control; + +#define MEMCG_OOM_PRIORITY 12 /* Cgroup-specific page state, on top of universal node page state */ enum memcg_stat_item { @@ -251,6 +254,11 @@ struct mem_cgroup { /* protected by memcg_oom_lock */ bool oom_lock; int under_oom; + /* memcg priority */ + bool use_priority_oom; + int priority; + int num_oom_skip; + struct mem_cgroup *next_reset; int swappiness; /* OOM-Killer disable */ @@ -931,6 +939,21 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg) return !!(memcg->css.flags & CSS_ONLINE); } +/* memcg priority*/ +void mem_cgroup_account_oom_skip(struct task_struct *task, + struct oom_control *oc); + +void mem_cgroup_select_bad_process(struct oom_control *oc); + +static inline bool root_memcg_use_priority_oom(void) +{ + if (mem_cgroup_disabled()) + return false; + if (root_mem_cgroup->use_priority_oom) + return true; + return false; +} + void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, int zid, int nr_pages); @@ -1440,6 +1463,21 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg) return true; } +/* memcg priority */ +static inline void mem_cgroup_account_oom_skip(struct task_struct *task, + struct oom_control *oc) +{ +} + +static inline void mem_cgroup_select_bad_process(struct oom_control *oc) +{ +} + +static inline bool root_memcg_use_priority_oom(void) +{ + return false; +} + static inline unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx) diff --git a/include/linux/oom.h b/include/linux/oom.h index 7d0c9c48a0c5..3d7ab770308e 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -50,6 +50,11 @@ struct oom_control { struct task_struct *chosen; long chosen_points; + /* Memcg priority */ + struct mem_cgroup *reset_list; + int num_skip; + bool use_priority_oom; + /* Used to print the constraint info. */ enum oom_constraint constraint; }; @@ -110,6 +115,8 @@ extern int unregister_oom_notifier(struct notifier_block *nb); extern bool oom_killer_disable(signed long timeout); extern void oom_killer_enable(void); +extern int oom_evaluate_task(struct task_struct *task, void *arg); + extern struct task_struct *find_lock_task_mm(struct task_struct *p); #endif /* _INCLUDE_LINUX_OOM_H */ diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 95d35d948745..a4e56ec01140 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1259,6 +1259,131 @@ static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) dead_memcg); } +/* memcg oom priority */ +/* + * do_mem_cgroup_account_oom_skip - account the memcg with OOM-unkillable task + * @memcg: mem_cgroup struct with OOM-unkillable task + * @oc: oom_control struct + * + * Account OOM-unkillable task to its cgroup and up to the OOMing cgroup's + * @num_oom_skip, if all the tasks of one cgroup hierarchy are OOM-unkillable + * we skip this cgroup hierarchy when select the victim cgroup. + * + * The @num_oom_skip must be reset when bad process selection has finished, + * since before the next round bad process selection, these OOM-unkillable + * tasks might become killable. + * + */ +static void do_mem_cgroup_account_oom_skip(struct mem_cgroup *memcg, + struct oom_control *oc) +{ + struct mem_cgroup *root; + struct cgroup_subsys_state *css; + + if (!oc->use_priority_oom) + return; + if (unlikely(!memcg)) + return; + root = oc->memcg; + if (!root) + root = root_mem_cgroup; + + css = &memcg->css; + while (css) { + struct mem_cgroup *tmp; + + tmp = mem_cgroup_from_css(css); + tmp->num_oom_skip++; + /* + * Put these cgroups into a list to + * reduce the iteration time when reset + * the @num_oom_skip. + */ + if (!tmp->next_reset) { + css_get(&tmp->css); + tmp->next_reset = oc->reset_list; + oc->reset_list = tmp; + } + + if (mem_cgroup_from_css(css) == root) + break; + + css = css->parent; + } +} + +void mem_cgroup_account_oom_skip(struct task_struct *task, + struct oom_control *oc) +{ + do_mem_cgroup_account_oom_skip(mem_cgroup_from_task(task), oc); +} + +static struct mem_cgroup * +mem_cgroup_select_victim_cgroup(struct mem_cgroup *memcg) +{ + struct cgroup_subsys_state *chosen, *parent; + struct cgroup_subsys_state *victim; + int chosen_priority; + +again: + victim = NULL; + parent = &memcg->css; + rcu_read_lock(); + while (parent) { + struct cgroup_subsys_state *pos; + struct mem_cgroup *parent_mem; + + parent_mem = mem_cgroup_from_css(parent); + + if (parent->nr_procs <= parent_mem->num_oom_skip) + break; + victim = parent; + chosen = NULL; + chosen_priority = DEF_PRIORITY + 1; + list_for_each_entry_rcu(pos, &parent->children, sibling) { + struct mem_cgroup *tmp, *chosen_mem; + + tmp = mem_cgroup_from_css(pos); + + if (pos->nr_procs <= tmp->num_oom_skip) + continue; + if (tmp->priority > chosen_priority) + continue; + if (tmp->priority < chosen_priority) { + chosen_priority = tmp->priority; + chosen = pos; + continue; + } + + chosen_mem = mem_cgroup_from_css(chosen); + + if (do_memsw_account()) { + if (page_counter_read(&tmp->memsw) > + page_counter_read(&chosen_mem->memsw)) + chosen = pos; + } else if (page_counter_read(&tmp->memory) > + page_counter_read(&chosen_mem->memory)) { + chosen = pos; + } + } + parent = chosen; + } + + if (likely(victim)) { + if (!css_tryget(victim)) { + rcu_read_unlock(); + goto again; + } + } + + rcu_read_unlock(); + + if (likely(victim)) + return mem_cgroup_from_css(victim); + + return NULL; +} + /** * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy * @memcg: hierarchy root @@ -1270,7 +1395,6 @@ static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) * value, the function breaks the iteration loop. Otherwise, it will iterate * over all tasks and return 0. * - * This function must not be called for the root memory cgroup. */ void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, int (*fn)(struct task_struct *, void *), void *arg) @@ -1278,8 +1402,6 @@ void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, struct mem_cgroup *iter; int ret = 0; - BUG_ON(mem_cgroup_is_root(memcg)); - for_each_mem_cgroup_tree(iter, memcg) { struct css_task_iter it; struct task_struct *task; @@ -1295,6 +1417,49 @@ void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, } } +void mem_cgroup_select_bad_process(struct oom_control *oc) +{ + struct mem_cgroup *memcg, *victim, *iter; + + memcg = oc->memcg; + + if (!memcg) + memcg = root_mem_cgroup; + + oc->use_priority_oom = memcg->use_priority_oom; + victim = memcg; + +retry: + if (oc->use_priority_oom) { + victim = mem_cgroup_select_victim_cgroup(memcg); + if (!victim) { + if (mem_cgroup_is_root(memcg) && oc->num_skip) + oc->chosen = (void *)-1UL; + goto out; + } + } + + mem_cgroup_scan_tasks(victim, oom_evaluate_task, oc); + if (oc->use_priority_oom) { + css_put(&victim->css); + if (oc->chosen == (void *)-1UL) + goto out; + if (!oc->chosen && victim != memcg) { + do_mem_cgroup_account_oom_skip(victim, oc); + goto retry; + } + } +out: + /* See commets in mem_cgroup_account_oom_skip() */ + while (oc->reset_list) { + iter = oc->reset_list; + iter->num_oom_skip = 0; + oc->reset_list = iter->next_reset; + iter->next_reset = NULL; + css_put(&iter->css); + } +} + #ifdef CONFIG_DEBUG_VM void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio) { @@ -3673,6 +3838,27 @@ static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, return -EINVAL; } +static u64 mem_cgroup_priority_oom_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + + return memcg->use_priority_oom; +} + +static int mem_cgroup_priority_oom_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + + if (val > 1) + return -EINVAL; + + memcg->use_priority_oom = val; + + return 0; +} + static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) { unsigned long val; @@ -4208,6 +4394,27 @@ static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, return 0; } +static u64 mem_cgroup_priority_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + + return memcg->priority; +} + +static int mem_cgroup_priority_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + + if (val > MEMCG_OOM_PRIORITY) + return -EINVAL; + + memcg->priority = val; + + return 0; +} + #ifdef CONFIG_ASYNC_FORK static u64 mem_cgroup_async_fork_read(struct cgroup_subsys_state *css, struct cftype *cft) @@ -5116,6 +5323,11 @@ static struct cftype mem_cgroup_legacy_files[] = { .write_u64 = mem_cgroup_hierarchy_write, .read_u64 = mem_cgroup_hierarchy_read, }, + { + .name = "use_priority_oom", + .write_u64 = mem_cgroup_priority_oom_write, + .read_u64 = mem_cgroup_priority_oom_read, + }, { .name = "cgroup.event_control", /* XXX: for compat */ .write = memcg_write_event_control, @@ -5126,6 +5338,12 @@ static struct cftype mem_cgroup_legacy_files[] = { .read_u64 = mem_cgroup_swappiness_read, .write_u64 = mem_cgroup_swappiness_write, }, + { + .name = "priority", + .read_u64 = mem_cgroup_priority_read, + .write_u64 = mem_cgroup_priority_write, + .flags = CFTYPE_NOT_ON_ROOT, + }, { .name = "move_charge_at_immigrate", .read_u64 = mem_cgroup_move_charge_read, @@ -6888,6 +7106,17 @@ static struct cftype memory_files[] = { .seq_show = memory_max_show, .write = memory_max_write, }, + { + .name = "priority", + .flags = CFTYPE_NOT_ON_ROOT, + .read_u64 = mem_cgroup_priority_read, + .write_u64 = mem_cgroup_priority_write, + }, + { + .name = "use_priority_oom", + .write_u64 = mem_cgroup_priority_oom_write, + .read_u64 = mem_cgroup_priority_oom_read, + }, { .name = "events", .flags = CFTYPE_NOT_ON_ROOT, diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 44bde56ecd02..ca42526c4573 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -305,17 +305,21 @@ static enum oom_constraint constrained_alloc(struct oom_control *oc) return CONSTRAINT_NONE; } -static int oom_evaluate_task(struct task_struct *task, void *arg) +int oom_evaluate_task(struct task_struct *task, void *arg) { struct oom_control *oc = arg; long points; - if (oom_unkillable_task(task)) + if (oom_unkillable_task(task)) { + mem_cgroup_account_oom_skip(task, oc); goto next; + } /* p may not have freeable memory in nodemask */ - if (!is_memcg_oom(oc) && !oom_cpuset_eligible(task, oc)) + if (!is_memcg_oom(oc) && !oom_cpuset_eligible(task, oc)) { + mem_cgroup_account_oom_skip(task, oc); goto next; + } /* * This task already has access to memory reserves and is being killed. @@ -324,8 +328,11 @@ static int oom_evaluate_task(struct task_struct *task, void *arg) * any memory is quite low. */ if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) { - if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags)) + if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags)) { + mem_cgroup_account_oom_skip(task, oc); + oc->num_skip++; goto next; + } goto abort; } @@ -339,7 +346,11 @@ static int oom_evaluate_task(struct task_struct *task, void *arg) } points = oom_badness(task, oc->totalpages); - if (points == LONG_MIN || points < oc->chosen_points) + if (points == LONG_MIN) { + mem_cgroup_account_oom_skip(task, oc); + goto next; + } + if (points < oc->chosen_points) goto next; select: @@ -365,8 +376,8 @@ static void select_bad_process(struct oom_control *oc) { oc->chosen_points = LONG_MIN; - if (is_memcg_oom(oc)) - mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc); + if (is_memcg_oom(oc) || root_memcg_use_priority_oom()) + mem_cgroup_select_bad_process(oc); else { struct task_struct *p; -- Gitee From 9ee89c48dfacca4d792c5549d2eb1c6ef250df55 Mon Sep 17 00:00:00 2001 From: Gu Mi Date: Thu, 31 Mar 2022 16:26:07 +0800 Subject: [PATCH 1750/2138] anolis: mm: fix suspicious RCU usage in mem_cgroup_account_oom_skip ANBZ: #11918 ANBZ: #732 mem_cgroup_account_oom_skip() call rcu_dereference_check(), but rcu_dereference_check() is not protected, need to add rcu lock to protect. Reviewed-by: Xu Yu Reviewed-by: Xunlei Pang Signed-off-by: Gu Mi Signed-off-by: Weilin Tong Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4142 --- mm/memcontrol.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index a4e56ec01140..0163848bd0b4 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1315,7 +1315,9 @@ static void do_mem_cgroup_account_oom_skip(struct mem_cgroup *memcg, void mem_cgroup_account_oom_skip(struct task_struct *task, struct oom_control *oc) { + rcu_read_lock(); do_mem_cgroup_account_oom_skip(mem_cgroup_from_task(task), oc); + rcu_read_unlock(); } static struct mem_cgroup * -- Gitee From 25d65b297e375f24ed0ec538d07daab1a75e4752 Mon Sep 17 00:00:00 2001 From: Wenwei Tao Date: Tue, 10 Sep 2019 14:42:56 +0800 Subject: [PATCH 1751/2138] anolis: mm: memcontrol: enable oom.group on cgroup-v1 ANBZ: #11918 to #34609731 Enable oom.group on cgroup-v1. Signed-off-by: Wenwei Tao Reviewed-by: Yang Shi Reviewed-by: Xunlei Pang Signed-off-by: zhongjiang-ali Acked-by: Xu Yu Signed-off-by: Weilin Tong Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4142 --- mm/memcontrol.c | 71 ++++++++++++++++++++++++++----------------------- 1 file changed, 37 insertions(+), 34 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 0163848bd0b4..9f6aa465bae7 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2225,9 +2225,6 @@ struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, struct mem_cgroup *oom_group = NULL; struct mem_cgroup *memcg; - if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) - return NULL; - if (!oom_domain) oom_domain = root_mem_cgroup; @@ -4788,6 +4785,37 @@ static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css, return 0; } +static int memory_oom_group_show(struct seq_file *m, void *v) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); + + seq_printf(m, "%d\n", memcg->oom_group); + + return 0; +} + +static ssize_t memory_oom_group_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); + int ret, oom_group; + + buf = strstrip(buf); + if (!buf) + return -EINVAL; + + ret = kstrtoint(buf, 0, &oom_group); + if (ret) + return ret; + + if (oom_group != 0 && oom_group != 1) + return -EINVAL; + + memcg->oom_group = oom_group; + + return nbytes; +} + #ifdef CONFIG_CGROUP_WRITEBACK #include @@ -5356,6 +5384,12 @@ static struct cftype mem_cgroup_legacy_files[] = { .seq_show = mem_cgroup_oom_control_read, .write_u64 = mem_cgroup_oom_control_write, }, + { + .name = "oom.group", + .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE, + .seq_show = memory_oom_group_show, + .write = memory_oom_group_write, + }, { .name = "pressure_level", .seq_show = mem_cgroup_dummy_seq_show, @@ -7000,37 +7034,6 @@ static int memory_numa_stat_show(struct seq_file *m, void *v) } #endif -static int memory_oom_group_show(struct seq_file *m, void *v) -{ - struct mem_cgroup *memcg = mem_cgroup_from_seq(m); - - seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group)); - - return 0; -} - -static ssize_t memory_oom_group_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off) -{ - struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); - int ret, oom_group; - - buf = strstrip(buf); - if (!buf) - return -EINVAL; - - ret = kstrtoint(buf, 0, &oom_group); - if (ret) - return ret; - - if (oom_group != 0 && oom_group != 1) - return -EINVAL; - - WRITE_ONCE(memcg->oom_group, oom_group); - - return nbytes; -} - static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { -- Gitee From 571125260d3ca773d6f850ab7f6212437d1a8836 Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Sat, 16 Nov 2024 16:28:16 +0800 Subject: [PATCH 1752/2138] anolis: drivers/iommu: Repair the iommu pass-through TLB refresh and page table mapping problem ANBZ: #11925 The iommu forcibly refreshes the tlb table after updating the device table. Memory map added the ability to convert huge page to page. Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/4139 Reviewed-by: Juxin Gao Reviewed-by: Guixin Liu --- drivers/iommu/loongarch_iommu.c | 125 ++++++++++++++++++++++++++++---- 1 file changed, 112 insertions(+), 13 deletions(-) diff --git a/drivers/iommu/loongarch_iommu.c b/drivers/iommu/loongarch_iommu.c index 7dfc6459045b..b158467918ba 100644 --- a/drivers/iommu/loongarch_iommu.c +++ b/drivers/iommu/loongarch_iommu.c @@ -115,6 +115,8 @@ int loongarch_iommu_disable; writel(val, iommu->confbase + off) #define iommu_read_regl(iommu, off) readl(iommu->confbase + off) +static void switch_huge_to_page(unsigned long *ptep, unsigned long start); + static void iommu_translate_disable(struct loongarch_iommu *iommu) { u32 val; @@ -175,6 +177,83 @@ static struct dom_info *to_dom_info(struct iommu_domain *dom) return container_of(dom, struct dom_info, domain); } +static void flush_iotlb_by_domain_id(struct loongarch_iommu *iommu, u16 domain_id, bool read) +{ + u32 val; + u32 flush_read_tlb = read ? 1 : 0; + + if (iommu == NULL) { + pr_err("%s iommu is NULL", __func__); + return; + } + + val = iommu_read_regl(iommu, LA_IOMMU_EIVDB); + val &= ~0xf0000; + val |= ((u32)domain_id) << 16; + iommu_write_regl(iommu, LA_IOMMU_EIVDB, val); + + /* Flush all */ + val = iommu_read_regl(iommu, LA_IOMMU_VBTC); + val &= ~0x10f; + val |= (flush_read_tlb << 8) | 4; + iommu_write_regl(iommu, LA_IOMMU_VBTC, val); +} + +static int flush_pgtable_is_busy(struct loongarch_iommu *iommu) +{ + u32 val; + + val = iommu_read_regl(iommu, LA_IOMMU_VBTC); + return val & IOMMU_PGTABLE_BUSY; +} + +static int iommu_flush_iotlb_by_domain(struct la_iommu_dev_data *dev_data) +{ + u32 retry = 0; + struct loongarch_iommu *iommu; + u16 domain_id; + + if (dev_data == NULL) { + pr_err("%s dev_data is NULL", __func__); + return 0; + } + + if (dev_data->iommu == NULL) { + pr_err("%s iommu is NULL", __func__); + return 0; + } + + if (dev_data->iommu_entry == NULL) { + pr_err("%s iommu_entry is NULL", __func__); + return 0; + } + + iommu = dev_data->iommu; + domain_id = dev_data->iommu_entry->id; + + flush_iotlb_by_domain_id(iommu, domain_id, 0); + while (flush_pgtable_is_busy(iommu)) { + if (retry == LOOP_TIMEOUT) { + pr_err("LA-IOMMU: %s %d iotlb flush busy\n", __func__, __LINE__); + return -EIO; + } + retry++; + udelay(1); + } + + flush_iotlb_by_domain_id(iommu, domain_id, 1); + while (flush_pgtable_is_busy(iommu)) { + if (retry == LOOP_TIMEOUT) { + pr_err("LA-IOMMU: %s %d iotlb flush busy\n", __func__, __LINE__); + return -EIO; + } + retry++; + udelay(1); + } + iommu_translate_enable(iommu); + return 0; +} + static int update_dev_table(struct la_iommu_dev_data *dev_data, int flag) { u32 val = 0; @@ -250,6 +329,8 @@ static int update_dev_table(struct la_iommu_dev_data *dev_data, int flag) if (index < MAX_ATTACHED_DEV_ID) __clear_bit(index, iommu->devtable_bitmap); } + + iommu_flush_iotlb_by_domain(dev_data); return 0; } @@ -269,14 +350,6 @@ static void flush_iotlb(struct loongarch_iommu *iommu) iommu_write_regl(iommu, LA_IOMMU_VBTC, val); } -static int flush_pgtable_is_busy(struct loongarch_iommu *iommu) -{ - u32 val; - - val = iommu_read_regl(iommu, LA_IOMMU_VBTC); - return val & IOMMU_PGTABLE_BUSY; -} - static int iommu_flush_iotlb(struct loongarch_iommu *iommu) { u32 retry = 0; @@ -324,8 +397,6 @@ static void do_attach(struct iommu_info *info, struct la_iommu_dev_data *dev_dat spin_unlock(&info->devlock); update_dev_table(dev_data, 1); - if (info->dev_cnt > 0) - iommu_flush_iotlb(dev_data->iommu); } static void do_detach(struct la_iommu_dev_data *dev_data) @@ -889,7 +960,7 @@ static size_t iommu_page_map(void *pt_base, { unsigned long next, old, step; unsigned long pte, *ptep, *pgtable; - int ret, huge; + int ret, huge, switch_page; old = start; ptep = iommu_pte_offset(pt_base, start, level); @@ -910,9 +981,17 @@ static size_t iommu_page_map(void *pt_base, next = iommu_ptable_end(start, end, level); step = next - start; huge = 0; - if ((level == IOMMU_PT_LEVEL1) && (step == IOMMU_HPAGE_SIZE)) - if (!iommu_pte_present(ptep) || iommu_pte_huge(ptep)) + switch_page = 0; + if (level == IOMMU_PT_LEVEL1) { + if ((step == IOMMU_HPAGE_SIZE) && + (!iommu_pte_present(ptep) || iommu_pte_huge(ptep))) huge = 1; + else if (iommu_pte_present(ptep) && iommu_pte_huge(ptep)) + switch_page = 1; + } + + if (switch_page) + switch_huge_to_page(ptep, start); if (huge) { pte = (paddr & IOMMU_HPAGE_MASK) | @@ -933,6 +1012,21 @@ static size_t iommu_page_map(void *pt_base, return start - old; } +static void switch_huge_to_page(unsigned long *ptep, unsigned long start) +{ + phys_addr_t paddr = *ptep & IOMMU_HPAGE_MASK; + unsigned long next = start + IOMMU_HPAGE_SIZE; + unsigned long *pgtable; + int ret; + + *ptep = 0; + ret = iommu_get_page_table(ptep); + if (ret == 0) { + pgtable = phys_to_virt(*ptep & IOMMU_PAGE_MASK); + iommu_page_map(pgtable, start, next, paddr, 0); + } +} + static int domain_map_page(struct dom_info *priv, unsigned long start, phys_addr_t paddr, size_t size) { @@ -970,6 +1064,11 @@ static size_t iommu_page_unmap(void *pt_base, if (!iommu_pte_present(ptep)) continue; + if ((level == IOMMU_PT_LEVEL1) && + iommu_pte_huge(ptep) && + ((next - start) < IOMMU_HPAGE_SIZE)) + switch_huge_to_page(ptep, start); + if (iommu_pte_huge(ptep)) { if ((next - start) != IOMMU_HPAGE_SIZE) pr_err( -- Gitee From 87c7f4ed059634c3c05f7b3483c86c3cd740643e Mon Sep 17 00:00:00 2001 From: Philo Lu Date: Thu, 14 Nov 2024 18:52:04 +0800 Subject: [PATCH 1753/2138] net/udp: Add a new struct for hash2 slot ANBZ: #11971 commit accdd51dc74ff65b7b7be1961b11723d228fbbbd upstream. Preparing for udp 4-tuple hash (uhash4 for short). To implement uhash4 without cache line missing when lookup, hslot2 is used to record the number of hashed sockets in hslot4. Thus adding a new struct udp_hslot_main with field hash4_cnt, which is used by hash2. The new struct is used to avoid doubling the size of udp_hslot. Before uhash4 lookup, firstly checking hash4_cnt to see if there are hashed sks in hslot4. Because hslot2 is always used in lookup, there is no cache line miss. Related helpers are updated, and use the helpers as possible. uhash4 is implemented in following patches. Signed-off-by: Philo Lu Acked-by: Willem de Bruijn Acked-by: Paolo Abeni Signed-off-by: David S. Miller Signed-off-by: Philo Lu Reviewed-by: Xuan Zhuo Link: https://gitee.com/anolis/cloud-kernel/pulls/4153 --- include/net/udp.h | 38 ++++++++++++++++++++++++++++++++++---- net/ipv4/udp.c | 44 +++++++++++++++++++++++--------------------- net/ipv6/udp.c | 15 ++++++--------- 3 files changed, 63 insertions(+), 34 deletions(-) diff --git a/include/net/udp.h b/include/net/udp.h index 488a6d2babcc..6f8e8df7a358 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -50,7 +50,7 @@ struct udp_skb_cb { #define UDP_SKB_CB(__skb) ((struct udp_skb_cb *)((__skb)->cb)) /** - * struct udp_hslot - UDP hash slot + * struct udp_hslot - UDP hash slot used by udp_table.hash * * @head: head of list of sockets * @count: number of sockets in 'head' list @@ -60,7 +60,22 @@ struct udp_hslot { struct hlist_head head; int count; spinlock_t lock; -} __attribute__((aligned(2 * sizeof(long)))); +} __aligned(2 * sizeof(long)); + +/** + * struct udp_hslot_main - UDP hash slot used by udp_table.hash2 + * + * @hslot: basic hash slot + * @hash4_cnt: number of sockets in hslot4 of the same + * (local port, local address) + */ +struct udp_hslot_main { + struct udp_hslot hslot; /* must be the first member */ +#if !IS_ENABLED(CONFIG_BASE_SMALL) + u32 hash4_cnt; +#endif +} __aligned(2 * sizeof(long)); +#define UDP_HSLOT_MAIN(__hslot) ((struct udp_hslot_main *)(__hslot)) /** * struct udp_table - UDP table @@ -72,7 +87,7 @@ struct udp_hslot { */ struct udp_table { struct udp_hslot *hash; - struct udp_hslot *hash2; + struct udp_hslot_main *hash2; unsigned int mask; unsigned int log; }; @@ -83,6 +98,7 @@ static inline struct udp_hslot *udp_hashslot(struct udp_table *table, { return &table->hash[udp_hashfn(net, num, table->mask)]; } + /* * For secondary hash, net_hash_mix() is performed before calling * udp_hashslot2(), this explains difference with udp_hashslot() @@ -90,8 +106,22 @@ static inline struct udp_hslot *udp_hashslot(struct udp_table *table, static inline struct udp_hslot *udp_hashslot2(struct udp_table *table, unsigned int hash) { - return &table->hash2[hash & table->mask]; + return &table->hash2[hash & table->mask].hslot; +} + +#if IS_ENABLED(CONFIG_BASE_SMALL) +static inline void udp_table_hash4_init(struct udp_table *table) +{ +} +#else /* !CONFIG_BASE_SMALL */ + +/* Must be called with table->hash2 initialized */ +static inline void udp_table_hash4_init(struct udp_table *table) +{ + for (int i = 0; i <= table->mask; i++) + table->hash2[i].hash4_cnt = 0; } +#endif /* CONFIG_BASE_SMALL */ extern struct proto udp_prot; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 2e4e53560394..52195055d56d 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -487,13 +487,12 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, int sdif, struct udp_table *udptable, struct sk_buff *skb) { unsigned short hnum = ntohs(dport); - unsigned int hash2, slot2; struct udp_hslot *hslot2; struct sock *result, *sk; + unsigned int hash2; hash2 = ipv4_portaddr_hash(net, daddr, hnum); - slot2 = hash2 & udptable->mask; - hslot2 = &udptable->hash2[slot2]; + hslot2 = udp_hashslot2(udptable, hash2); /* Lookup connected or non-wildcard socket */ result = udp4_lib_lookup2(net, saddr, sport, @@ -520,8 +519,7 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, /* Lookup wildcard sockets */ hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum); - slot2 = hash2 & udptable->mask; - hslot2 = &udptable->hash2[slot2]; + hslot2 = udp_hashslot2(udptable, hash2); result = udp4_lib_lookup2(net, saddr, sport, htonl(INADDR_ANY), hnum, dif, sdif, @@ -2266,7 +2264,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, udptable->mask; hash2 = ipv4_portaddr_hash(net, daddr, hnum) & udptable->mask; start_lookup: - hslot = &udptable->hash2[hash2]; + hslot = &udptable->hash2[hash2].hslot; offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); } @@ -2537,14 +2535,13 @@ static struct sock *__udp4_lib_demux_lookup(struct net *net, struct udp_table *udptable = net->ipv4.udp_table; INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr); unsigned short hnum = ntohs(loc_port); - unsigned int hash2, slot2; struct udp_hslot *hslot2; + unsigned int hash2; __portpair ports; struct sock *sk; hash2 = ipv4_portaddr_hash(net, loc_addr, hnum); - slot2 = hash2 & udptable->mask; - hslot2 = &udptable->hash2[slot2]; + hslot2 = udp_hashslot2(udptable, hash2); ports = INET_COMBINED_PORTS(rmt_port, hnum); udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { @@ -3170,7 +3167,7 @@ static struct sock *bpf_iter_udp_batch(struct seq_file *seq) batch_sks = 0; for (; state->bucket <= udptable->mask; state->bucket++) { - struct udp_hslot *hslot2 = &udptable->hash2[state->bucket]; + struct udp_hslot *hslot2 = &udptable->hash2[state->bucket].hslot; if (hlist_empty(&hslot2->head)) continue; @@ -3411,10 +3408,11 @@ __setup("uhash_entries=", set_uhash_entries); void __init udp_table_init(struct udp_table *table, const char *name) { - unsigned int i; + unsigned int i, slot_size; + slot_size = sizeof(struct udp_hslot) + sizeof(struct udp_hslot_main); table->hash = alloc_large_system_hash(name, - 2 * sizeof(struct udp_hslot), + slot_size, uhash_entries, 21, /* one slot per 2 MB */ 0, @@ -3423,17 +3421,18 @@ void __init udp_table_init(struct udp_table *table, const char *name) UDP_HTABLE_SIZE_MIN, UDP_HTABLE_SIZE_MAX); - table->hash2 = table->hash + (table->mask + 1); + table->hash2 = (void *)(table->hash + (table->mask + 1)); for (i = 0; i <= table->mask; i++) { INIT_HLIST_HEAD(&table->hash[i].head); table->hash[i].count = 0; spin_lock_init(&table->hash[i].lock); } for (i = 0; i <= table->mask; i++) { - INIT_HLIST_HEAD(&table->hash2[i].head); - table->hash2[i].count = 0; - spin_lock_init(&table->hash2[i].lock); + INIT_HLIST_HEAD(&table->hash2[i].hslot.head); + table->hash2[i].hslot.count = 0; + spin_lock_init(&table->hash2[i].hslot.lock); } + udp_table_hash4_init(table); } u32 udp_flow_hashrnd(void) @@ -3459,18 +3458,20 @@ static void __net_init udp_sysctl_init(struct net *net) static struct udp_table __net_init *udp_pernet_table_alloc(unsigned int hash_entries) { struct udp_table *udptable; + unsigned int slot_size; int i; udptable = kmalloc(sizeof(*udptable), GFP_KERNEL); if (!udptable) goto out; - udptable->hash = vmalloc_huge(hash_entries * 2 * sizeof(struct udp_hslot), + slot_size = sizeof(struct udp_hslot) + sizeof(struct udp_hslot_main); + udptable->hash = vmalloc_huge(hash_entries * slot_size, GFP_KERNEL_ACCOUNT); if (!udptable->hash) goto free_table; - udptable->hash2 = udptable->hash + hash_entries; + udptable->hash2 = (void *)(udptable->hash + hash_entries); udptable->mask = hash_entries - 1; udptable->log = ilog2(hash_entries); @@ -3479,10 +3480,11 @@ static struct udp_table __net_init *udp_pernet_table_alloc(unsigned int hash_ent udptable->hash[i].count = 0; spin_lock_init(&udptable->hash[i].lock); - INIT_HLIST_HEAD(&udptable->hash2[i].head); - udptable->hash2[i].count = 0; - spin_lock_init(&udptable->hash2[i].lock); + INIT_HLIST_HEAD(&udptable->hash2[i].hslot.head); + udptable->hash2[i].hslot.count = 0; + spin_lock_init(&udptable->hash2[i].hslot.lock); } + udp_table_hash4_init(udptable); return udptable; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 954afe6ba883..bfbf7b68a28c 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -227,13 +227,12 @@ struct sock *__udp6_lib_lookup(struct net *net, struct sk_buff *skb) { unsigned short hnum = ntohs(dport); - unsigned int hash2, slot2; struct udp_hslot *hslot2; struct sock *result, *sk; + unsigned int hash2; hash2 = ipv6_portaddr_hash(net, daddr, hnum); - slot2 = hash2 & udptable->mask; - hslot2 = &udptable->hash2[slot2]; + hslot2 = udp_hashslot2(udptable, hash2); /* Lookup connected or non-wildcard sockets */ result = udp6_lib_lookup2(net, saddr, sport, @@ -260,8 +259,7 @@ struct sock *__udp6_lib_lookup(struct net *net, /* Lookup wildcard sockets */ hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum); - slot2 = hash2 & udptable->mask; - hslot2 = &udptable->hash2[slot2]; + hslot2 = udp_hashslot2(udptable, hash2); result = udp6_lib_lookup2(net, saddr, sport, &in6addr_any, hnum, dif, sdif, @@ -862,7 +860,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, udptable->mask; hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask; start_lookup: - hslot = &udptable->hash2[hash2]; + hslot = &udptable->hash2[hash2].hslot; offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); } @@ -1068,14 +1066,13 @@ static struct sock *__udp6_lib_demux_lookup(struct net *net, { struct udp_table *udptable = net->ipv4.udp_table; unsigned short hnum = ntohs(loc_port); - unsigned int hash2, slot2; struct udp_hslot *hslot2; + unsigned int hash2; __portpair ports; struct sock *sk; hash2 = ipv6_portaddr_hash(net, loc_addr, hnum); - slot2 = hash2 & udptable->mask; - hslot2 = &udptable->hash2[slot2]; + hslot2 = udp_hashslot2(udptable, hash2); ports = INET_COMBINED_PORTS(rmt_port, hnum); udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { -- Gitee From 00797745e3592ece2b7d4b30a52ddad79ee043b5 Mon Sep 17 00:00:00 2001 From: Philo Lu Date: Thu, 14 Nov 2024 18:52:05 +0800 Subject: [PATCH 1754/2138] net/udp: Add 4-tuple hash list basis ANBZ: #11971 commit dab78a1745ab3c6001e1e4d50a9d09efef8e260d upstream. Add a new hash list, hash4, in udp table. It will be used to implement 4-tuple hash for connected udp sockets. This patch adds the hlist to table, and implements helpers and the initialization. 4-tuple hash is implemented in the following patch. hash4 uses hlist_nulls to avoid moving wrongly onto another hlist due to concurrent rehash, because rehash() can happen with lookup(). Co-developed-by: Cambda Zhu Signed-off-by: Cambda Zhu Co-developed-by: Fred Chen Signed-off-by: Fred Chen Co-developed-by: Yubing Qiu Signed-off-by: Yubing Qiu Signed-off-by: Philo Lu Acked-by: Willem de Bruijn Acked-by: Paolo Abeni Signed-off-by: David S. Miller Signed-off-by: Philo Lu Reviewed-by: Xuan Zhuo Link: https://gitee.com/anolis/cloud-kernel/pulls/4153 --- include/linux/udp.h | 11 ++++++ include/net/udp.h | 85 +++++++++++++++++++++++++++++++++++++++++++-- net/ipv4/udp.c | 6 ++-- 3 files changed, 97 insertions(+), 5 deletions(-) diff --git a/include/linux/udp.h b/include/linux/udp.h index 00790bb5cbde..91b5b63ce667 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -56,6 +56,12 @@ struct udp_sock { int pending; /* Any pending frames ? */ __u8 encap_type; /* Is this an Encapsulation socket? */ +#if !IS_ENABLED(CONFIG_BASE_SMALL) + /* For UDP 4-tuple hash */ + __u16 udp_lrpa_hash; + struct hlist_nulls_node udp_lrpa_node; +#endif + /* * Following member retains the information to create a UDP header * when the socket is uncorked. @@ -196,6 +202,11 @@ static inline void udp_allow_gso(struct sock *sk) #define udp_portaddr_for_each_entry_rcu(__sk, list) \ hlist_for_each_entry_rcu(__sk, list, __sk_common.skc_portaddr_node) +#if !IS_ENABLED(CONFIG_BASE_SMALL) +#define udp_lrpa_for_each_entry_rcu(__up, node, list) \ + hlist_nulls_for_each_entry_rcu(__up, node, list, udp_lrpa_node) +#endif + #define IS_UDPLITE(__sk) (__sk->sk_protocol == IPPROTO_UDPLITE) #endif /* _LINUX_UDP_H */ diff --git a/include/net/udp.h b/include/net/udp.h index 6f8e8df7a358..22ec3ec7ccf6 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -50,14 +50,21 @@ struct udp_skb_cb { #define UDP_SKB_CB(__skb) ((struct udp_skb_cb *)((__skb)->cb)) /** - * struct udp_hslot - UDP hash slot used by udp_table.hash + * struct udp_hslot - UDP hash slot used by udp_table.hash/hash4 * * @head: head of list of sockets + * @nulls_head: head of list of sockets, only used by hash4 * @count: number of sockets in 'head' list * @lock: spinlock protecting changes to head/count */ struct udp_hslot { - struct hlist_head head; + union { + struct hlist_head head; + /* hash4 uses hlist_nulls to avoid moving wrongly onto another + * hlist, because rehash() can happen with lookup(). + */ + struct hlist_nulls_head nulls_head; + }; int count; spinlock_t lock; } __aligned(2 * sizeof(long)); @@ -82,12 +89,17 @@ struct udp_hslot_main { * * @hash: hash table, sockets are hashed on (local port) * @hash2: hash table, sockets are hashed on (local port, local address) + * @hash4: hash table, connected sockets are hashed on + * (local port, local address, remote port, remote address) * @mask: number of slots in hash tables, minus 1 * @log: log2(number of slots in hash table) */ struct udp_table { struct udp_hslot *hash; struct udp_hslot_main *hash2; +#if !IS_ENABLED(CONFIG_BASE_SMALL) + struct udp_hslot *hash4; +#endif unsigned int mask; unsigned int log; }; @@ -113,13 +125,80 @@ static inline struct udp_hslot *udp_hashslot2(struct udp_table *table, static inline void udp_table_hash4_init(struct udp_table *table) { } + +static inline struct udp_hslot *udp_hashslot4(struct udp_table *table, + unsigned int hash) +{ + BUILD_BUG(); + return NULL; +} + +static inline bool udp_hashed4(const struct sock *sk) +{ + return false; +} + +static inline unsigned int udp_hash4_slot_size(void) +{ + return 0; +} + +static inline bool udp_has_hash4(const struct udp_hslot *hslot2) +{ + return false; +} + +static inline void udp_hash4_inc(struct udp_hslot *hslot2) +{ +} + +static inline void udp_hash4_dec(struct udp_hslot *hslot2) +{ +} #else /* !CONFIG_BASE_SMALL */ /* Must be called with table->hash2 initialized */ static inline void udp_table_hash4_init(struct udp_table *table) { - for (int i = 0; i <= table->mask; i++) + table->hash4 = (void *)(table->hash2 + (table->mask + 1)); + for (int i = 0; i <= table->mask; i++) { table->hash2[i].hash4_cnt = 0; + + INIT_HLIST_NULLS_HEAD(&table->hash4[i].nulls_head, i); + table->hash4[i].count = 0; + spin_lock_init(&table->hash4[i].lock); + } +} + +static inline struct udp_hslot *udp_hashslot4(struct udp_table *table, + unsigned int hash) +{ + return &table->hash4[hash & table->mask]; +} + +static inline bool udp_hashed4(const struct sock *sk) +{ + return !hlist_nulls_unhashed(&udp_sk(sk)->udp_lrpa_node); +} + +static inline unsigned int udp_hash4_slot_size(void) +{ + return sizeof(struct udp_hslot); +} + +static inline bool udp_has_hash4(const struct udp_hslot *hslot2) +{ + return UDP_HSLOT_MAIN(hslot2)->hash4_cnt; +} + +static inline void udp_hash4_inc(struct udp_hslot *hslot2) +{ + UDP_HSLOT_MAIN(hslot2)->hash4_cnt++; +} + +static inline void udp_hash4_dec(struct udp_hslot *hslot2) +{ + UDP_HSLOT_MAIN(hslot2)->hash4_cnt--; } #endif /* CONFIG_BASE_SMALL */ diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 52195055d56d..2dfe94a0b9da 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -3410,7 +3410,8 @@ void __init udp_table_init(struct udp_table *table, const char *name) { unsigned int i, slot_size; - slot_size = sizeof(struct udp_hslot) + sizeof(struct udp_hslot_main); + slot_size = sizeof(struct udp_hslot) + sizeof(struct udp_hslot_main) + + udp_hash4_slot_size(); table->hash = alloc_large_system_hash(name, slot_size, uhash_entries, @@ -3465,7 +3466,8 @@ static struct udp_table __net_init *udp_pernet_table_alloc(unsigned int hash_ent if (!udptable) goto out; - slot_size = sizeof(struct udp_hslot) + sizeof(struct udp_hslot_main); + slot_size = sizeof(struct udp_hslot) + sizeof(struct udp_hslot_main) + + udp_hash4_slot_size(); udptable->hash = vmalloc_huge(hash_entries * slot_size, GFP_KERNEL_ACCOUNT); if (!udptable->hash) -- Gitee From 1d349ad30646bbab68cad76e545001c1bf3099ff Mon Sep 17 00:00:00 2001 From: Philo Lu Date: Thu, 14 Nov 2024 18:52:06 +0800 Subject: [PATCH 1755/2138] ipv4/udp: Add 4-tuple hash for connected socket ANBZ: #11971 commit 78c91ae2c6deb5d236a5a93ff2995cdd05514380 upstream. Currently, the udp_table has two hash table, the port hash and portaddr hash. Usually for UDP servers, all sockets have the same local port and addr, so they are all on the same hash slot within a reuseport group. In some applications, UDP servers use connect() to manage clients. In particular, when firstly receiving from an unseen 4 tuple, a new socket is created and connect()ed to the remote addr:port, and then the fd is used exclusively by the client. Once there are connected sks in a reuseport group, udp has to score all sks in the same hash2 slot to find the best match. This could be inefficient with a large number of connections, resulting in high softirq overhead. To solve the problem, this patch implement 4-tuple hash for connected udp sockets. During connect(), hash4 slot is updated, as well as a corresponding counter, hash4_cnt, in hslot2. In __udp4_lib_lookup(), hslot4 will be searched firstly if the counter is non-zero. Otherwise, hslot2 is used like before. Note that only connected sockets enter this hash4 path, while un-connected ones are not affected. hlist_nulls is used for hash4, because we probably move to another hslot wrongly when lookup with concurrent rehash. Then we check nulls at the list end to see if we should restart lookup. Because udp does not use SLAB_TYPESAFE_BY_RCU, we don't need to touch sk_refcnt when lookup. Stress test results (with 1 cpu fully used) are shown below, in pps: (1) _un-connected_ socket as server [a] w/o hash4: 1,825176 [b] w/ hash4: 1,831750 (+0.36%) (2) 500 _connected_ sockets as server [c] w/o hash4: 290860 (only 16% of [a]) [d] w/ hash4: 1,889658 (+3.1% compared with [b]) With hash4, compute_score is skipped when lookup, so [d] is slightly better than [b]. Co-developed-by: Cambda Zhu Signed-off-by: Cambda Zhu Co-developed-by: Fred Chen Signed-off-by: Fred Chen Co-developed-by: Yubing Qiu Signed-off-by: Yubing Qiu Signed-off-by: Philo Lu Acked-by: Willem de Bruijn Acked-by: Paolo Abeni Signed-off-by: David S. Miller Signed-off-by: Philo Lu Reviewed-by: Xuan Zhuo Link: https://gitee.com/anolis/cloud-kernel/pulls/4153 --- include/net/udp.h | 16 +++- net/ipv4/udp.c | 197 +++++++++++++++++++++++++++++++++++++++++++++- net/ipv6/udp.c | 2 +- 3 files changed, 210 insertions(+), 5 deletions(-) diff --git a/include/net/udp.h b/include/net/udp.h index 22ec3ec7ccf6..bcff77053645 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -301,13 +301,27 @@ static inline int udp_lib_hash(struct sock *sk) } void udp_lib_unhash(struct sock *sk); -void udp_lib_rehash(struct sock *sk, u16 new_hash); +void udp_lib_rehash(struct sock *sk, u16 new_hash, u16 new_hash4); static inline void udp_lib_close(struct sock *sk, long timeout) { sk_common_release(sk); } +/* hash4 routines shared between UDPv4/6 */ +#if IS_ENABLED(CONFIG_BASE_SMALL) +static inline void udp_lib_hash4(struct sock *sk, u16 hash) +{ +} + +static inline void udp4_hash4(struct sock *sk) +{ +} +#else /* !CONFIG_BASE_SMALL */ +void udp_lib_hash4(struct sock *sk, u16 hash); +void udp4_hash4(struct sock *sk); +#endif /* CONFIG_BASE_SMALL */ + int udp_lib_get_port(struct sock *sk, unsigned short snum, unsigned int hash2_nulladdr); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 2dfe94a0b9da..4115fbcfdec4 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -479,6 +479,159 @@ static struct sock *udp4_lib_lookup2(struct net *net, return result; } +#if IS_ENABLED(CONFIG_BASE_SMALL) +static struct sock *udp4_lib_lookup4(struct net *net, + __be32 saddr, __be16 sport, + __be32 daddr, unsigned int hnum, + int dif, int sdif, + struct udp_table *udptable) +{ + return NULL; +} + +static void udp_rehash4(struct udp_table *udptable, struct sock *sk, + u16 newhash4) +{ +} + +static void udp_unhash4(struct udp_table *udptable, struct sock *sk) +{ +} +#else /* !CONFIG_BASE_SMALL */ +static struct sock *udp4_lib_lookup4(struct net *net, + __be32 saddr, __be16 sport, + __be32 daddr, unsigned int hnum, + int dif, int sdif, + struct udp_table *udptable) +{ + const __portpair ports = INET_COMBINED_PORTS(sport, hnum); + const struct hlist_nulls_node *node; + struct udp_hslot *hslot4; + unsigned int hash4, slot; + struct udp_sock *up; + struct sock *sk; + + hash4 = udp_ehashfn(net, daddr, hnum, saddr, sport); + slot = hash4 & udptable->mask; + hslot4 = &udptable->hash4[slot]; + INET_ADDR_COOKIE(acookie, saddr, daddr); + +begin: + /* SLAB_TYPESAFE_BY_RCU not used, so we don't need to touch sk_refcnt */ + udp_lrpa_for_each_entry_rcu(up, node, &hslot4->nulls_head) { + sk = (struct sock *)up; + if (inet_match(net, sk, acookie, ports, dif, sdif)) + return sk; + } + + /* if the nulls value we got at the end of this lookup is not the + * expected one, we must restart lookup. We probably met an item that + * was moved to another chain due to rehash. + */ + if (get_nulls_value(node) != slot) + goto begin; + + return NULL; +} + +/* In hash4, rehash can happen in connect(), where hash4_cnt keeps unchanged. */ +static void udp_rehash4(struct udp_table *udptable, struct sock *sk, + u16 newhash4) +{ + struct udp_hslot *hslot4, *nhslot4; + + hslot4 = udp_hashslot4(udptable, udp_sk(sk)->udp_lrpa_hash); + nhslot4 = udp_hashslot4(udptable, newhash4); + udp_sk(sk)->udp_lrpa_hash = newhash4; + + if (hslot4 != nhslot4) { + spin_lock_bh(&hslot4->lock); + hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_lrpa_node); + hslot4->count--; + spin_unlock_bh(&hslot4->lock); + + spin_lock_bh(&nhslot4->lock); + hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_lrpa_node, + &nhslot4->nulls_head); + nhslot4->count++; + spin_unlock_bh(&nhslot4->lock); + } +} + +static void udp_unhash4(struct udp_table *udptable, struct sock *sk) +{ + struct udp_hslot *hslot2, *hslot4; + + if (udp_hashed4(sk)) { + hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); + hslot4 = udp_hashslot4(udptable, udp_sk(sk)->udp_lrpa_hash); + + spin_lock(&hslot4->lock); + hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_lrpa_node); + hslot4->count--; + spin_unlock(&hslot4->lock); + + spin_lock(&hslot2->lock); + udp_hash4_dec(hslot2); + spin_unlock(&hslot2->lock); + } +} + +void udp_lib_hash4(struct sock *sk, u16 hash) +{ + struct udp_hslot *hslot, *hslot2, *hslot4; + struct net *net = sock_net(sk); + struct udp_table *udptable; + + /* Connected udp socket can re-connect to another remote address, + * so rehash4 is needed. + */ + udptable = net->ipv4.udp_table; + if (udp_hashed4(sk)) { + udp_rehash4(udptable, sk, hash); + return; + } + + hslot = udp_hashslot(udptable, net, udp_sk(sk)->udp_port_hash); + hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); + hslot4 = udp_hashslot4(udptable, hash); + udp_sk(sk)->udp_lrpa_hash = hash; + + spin_lock_bh(&hslot->lock); + if (rcu_access_pointer(sk->sk_reuseport_cb)) + reuseport_detach_sock(sk); + + spin_lock(&hslot4->lock); + hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_lrpa_node, + &hslot4->nulls_head); + hslot4->count++; + spin_unlock(&hslot4->lock); + + spin_lock(&hslot2->lock); + udp_hash4_inc(hslot2); + spin_unlock(&hslot2->lock); + + spin_unlock_bh(&hslot->lock); +} +EXPORT_SYMBOL(udp_lib_hash4); + +/* call with sock lock */ +void udp4_hash4(struct sock *sk) +{ + struct net *net = sock_net(sk); + unsigned int hash; + + if (sk_unhashed(sk) || sk->sk_rcv_saddr == htonl(INADDR_ANY)) + return; + + hash = udp_ehashfn(net, sk->sk_rcv_saddr, sk->sk_num, + sk->sk_daddr, sk->sk_dport); + + udp_lib_hash4(sk, hash); +} +EXPORT_SYMBOL(udp4_hash4); +#endif /* CONFIG_BASE_SMALL */ + /* UDP is nearly always wildcards out the wazoo, it makes no sense to try * harder than this. -DaveM */ @@ -494,6 +647,13 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, hash2 = ipv4_portaddr_hash(net, daddr, hnum); hslot2 = udp_hashslot2(udptable, hash2); + if (udp_has_hash4(hslot2)) { + result = udp4_lib_lookup4(net, saddr, sport, daddr, hnum, + dif, sdif, udptable); + if (result) /* udp4_lib_lookup4 return sk or NULL */ + return result; + } + /* Lookup connected or non-wildcard socket */ result = udp4_lib_lookup2(net, saddr, sport, daddr, hnum, dif, sdif, @@ -1931,6 +2091,18 @@ int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) } EXPORT_SYMBOL(udp_pre_connect); +static int udp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) +{ + int res; + + lock_sock(sk); + res = __ip4_datagram_connect(sk, uaddr, addr_len); + if (!res) + udp4_hash4(sk); + release_sock(sk); + return res; +} + int __udp_disconnect(struct sock *sk, int flags) { struct inet_sock *inet = inet_sk(sk); @@ -1990,6 +2162,8 @@ void udp_lib_unhash(struct sock *sk) hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); hslot2->count--; spin_unlock(&hslot2->lock); + + udp_unhash4(udptable, sk); } spin_unlock_bh(&hslot->lock); } @@ -1999,7 +2173,7 @@ EXPORT_SYMBOL(udp_lib_unhash); /* * inet_rcv_saddr was changed, we must rehash secondary hash */ -void udp_lib_rehash(struct sock *sk, u16 newhash) +void udp_lib_rehash(struct sock *sk, u16 newhash, u16 newhash4) { if (sk_hashed(sk)) { struct udp_table *udptable = udp_get_table_prot(sk); @@ -2031,6 +2205,19 @@ void udp_lib_rehash(struct sock *sk, u16 newhash) spin_unlock(&nhslot2->lock); } + if (udp_hashed4(sk)) { + udp_rehash4(udptable, sk, newhash4); + + if (hslot2 != nhslot2) { + spin_lock(&hslot2->lock); + udp_hash4_dec(hslot2); + spin_unlock(&hslot2->lock); + + spin_lock(&nhslot2->lock); + udp_hash4_inc(nhslot2); + spin_unlock(&nhslot2->lock); + } + } spin_unlock_bh(&hslot->lock); } } @@ -2042,7 +2229,11 @@ void udp_v4_rehash(struct sock *sk) u16 new_hash = ipv4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, inet_sk(sk)->inet_num); - udp_lib_rehash(sk, new_hash); + u16 new_hash4 = udp_ehashfn(sock_net(sk), + sk->sk_rcv_saddr, sk->sk_num, + sk->sk_daddr, sk->sk_dport); + + udp_lib_rehash(sk, new_hash, new_hash4); } static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) @@ -2920,7 +3111,7 @@ struct proto udp_prot = { .owner = THIS_MODULE, .close = udp_lib_close, .pre_connect = udp_pre_connect, - .connect = ip4_datagram_connect, + .connect = udp_connect, .disconnect = udp_disconnect, .ioctl = udp_ioctl, .init = udp_init_sock, diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index bfbf7b68a28c..82585aafcf63 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -114,7 +114,7 @@ void udp_v6_rehash(struct sock *sk) &sk->sk_v6_rcv_saddr, inet_sk(sk)->inet_num); - udp_lib_rehash(sk, new_hash); + udp_lib_rehash(sk, new_hash, 0); /* 4-tuple hash not implemented */ } static int compute_score(struct sock *sk, struct net *net, -- Gitee From 985395bcb751c64a178fc5881150df5d0786992c Mon Sep 17 00:00:00 2001 From: Philo Lu Date: Thu, 14 Nov 2024 18:52:07 +0800 Subject: [PATCH 1756/2138] ipv6/udp: Add 4-tuple hash for connected socket ANBZ: #11971 commit 1b29a730ef8b6fd3aa3e11c2f6d409cf201cd913 upstream. Implement ipv6 udp hash4 like that in ipv4. The major difference is that the hash value should be calculated with udp6_ehashfn(). Besides, ipv4-mapped ipv6 address is handled before hash() and rehash(). Export udp_ehashfn because now we use it in udpv6 rehash. Core procedures of hash/unhash/rehash are same as ipv4, and udpv4 and udpv6 share the same udptable, so some functions in ipv4 hash4 can also be shared. Co-developed-by: Cambda Zhu Signed-off-by: Cambda Zhu Co-developed-by: Fred Chen Signed-off-by: Fred Chen Co-developed-by: Yubing Qiu Signed-off-by: Yubing Qiu Signed-off-by: Philo Lu Acked-by: Willem de Bruijn Acked-by: Paolo Abeni Signed-off-by: David S. Miller Signed-off-by: Philo Lu Reviewed-by: Xuan Zhuo Link: https://gitee.com/anolis/cloud-kernel/pulls/4153 --- include/net/udp.h | 2 + net/ipv4/udp.c | 2 +- net/ipv6/udp.c | 102 +++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 103 insertions(+), 3 deletions(-) diff --git a/include/net/udp.h b/include/net/udp.h index bcff77053645..f2be158ad07a 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -302,6 +302,8 @@ static inline int udp_lib_hash(struct sock *sk) void udp_lib_unhash(struct sock *sk); void udp_lib_rehash(struct sock *sk, u16 new_hash, u16 new_hash4); +u32 udp_ehashfn(const struct net *net, const __be32 laddr, const __u16 lport, + const __be32 faddr, const __be16 fport); static inline void udp_lib_close(struct sock *sk, long timeout) { diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 4115fbcfdec4..61b70ebaa170 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -409,7 +409,6 @@ static int compute_score(struct sock *sk, struct net *net, return score; } -INDIRECT_CALLABLE_SCOPE u32 udp_ehashfn(const struct net *net, const __be32 laddr, const __u16 lport, const __be32 faddr, const __be16 fport) { @@ -420,6 +419,7 @@ u32 udp_ehashfn(const struct net *net, const __be32 laddr, const __u16 lport, return __inet_ehashfn(laddr, lport, faddr, fport, udp_ehash_secret + net_hash_mix(net)); } +EXPORT_SYMBOL(udp_ehashfn); /* called with rcu_read_lock() */ static struct sock *udp4_lib_lookup2(struct net *net, diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 82585aafcf63..2590ef443733 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -113,8 +113,19 @@ void udp_v6_rehash(struct sock *sk) u16 new_hash = ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, inet_sk(sk)->inet_num); + u16 new_hash4; - udp_lib_rehash(sk, new_hash, 0); /* 4-tuple hash not implemented */ + if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) { + new_hash4 = udp_ehashfn(sock_net(sk), + sk->sk_rcv_saddr, sk->sk_num, + sk->sk_daddr, sk->sk_dport); + } else { + new_hash4 = udp6_ehashfn(sock_net(sk), + &sk->sk_v6_rcv_saddr, sk->sk_num, + &sk->sk_v6_daddr, sk->sk_dport); + } + + udp_lib_rehash(sk, new_hash, new_hash4); } static int compute_score(struct sock *sk, struct net *net, @@ -219,6 +230,74 @@ static struct sock *udp6_lib_lookup2(struct net *net, return result; } +#if IS_ENABLED(CONFIG_BASE_SMALL) +static struct sock *udp6_lib_lookup4(struct net *net, + const struct in6_addr *saddr, __be16 sport, + const struct in6_addr *daddr, + unsigned int hnum, int dif, int sdif, + struct udp_table *udptable) +{ + return NULL; +} + +static void udp6_hash4(struct sock *sk) +{ +} +#else /* !CONFIG_BASE_SMALL */ +static struct sock *udp6_lib_lookup4(struct net *net, + const struct in6_addr *saddr, __be16 sport, + const struct in6_addr *daddr, + unsigned int hnum, int dif, int sdif, + struct udp_table *udptable) +{ + const __portpair ports = INET_COMBINED_PORTS(sport, hnum); + const struct hlist_nulls_node *node; + struct udp_hslot *hslot4; + unsigned int hash4, slot; + struct udp_sock *up; + struct sock *sk; + + hash4 = udp6_ehashfn(net, daddr, hnum, saddr, sport); + slot = hash4 & udptable->mask; + hslot4 = &udptable->hash4[slot]; + +begin: + udp_lrpa_for_each_entry_rcu(up, node, &hslot4->nulls_head) { + sk = (struct sock *)up; + if (inet6_match(net, sk, saddr, daddr, ports, dif, sdif)) + return sk; + } + + /* if the nulls value we got at the end of this lookup is not the + * expected one, we must restart lookup. We probably met an item that + * was moved to another chain due to rehash. + */ + if (get_nulls_value(node) != slot) + goto begin; + + return NULL; +} + +static void udp6_hash4(struct sock *sk) +{ + struct net *net = sock_net(sk); + unsigned int hash; + + if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) { + udp4_hash4(sk); + return; + } + + if (sk_unhashed(sk) || ipv6_addr_any(&sk->sk_v6_rcv_saddr)) + return; + + hash = udp6_ehashfn(net, &sk->sk_v6_rcv_saddr, sk->sk_num, + &sk->sk_v6_daddr, sk->sk_dport); + + udp_lib_hash4(sk, hash); +} +#endif /* CONFIG_BASE_SMALL */ + /* rcu_read_lock() must be held */ struct sock *__udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, @@ -234,6 +313,13 @@ struct sock *__udp6_lib_lookup(struct net *net, hash2 = ipv6_portaddr_hash(net, daddr, hnum); hslot2 = udp_hashslot2(udptable, hash2); + if (udp_has_hash4(hslot2)) { + result = udp6_lib_lookup4(net, saddr, sport, daddr, hnum, + dif, sdif, udptable); + if (result) /* udp6_lib_lookup4 return sk or NULL */ + return result; + } + /* Lookup connected or non-wildcard sockets */ result = udp6_lib_lookup2(net, saddr, sport, daddr, hnum, dif, sdif, @@ -1168,6 +1254,18 @@ static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr, return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, &addr_len); } +static int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) +{ + int res; + + lock_sock(sk); + res = __ip6_datagram_connect(sk, uaddr, addr_len); + if (!res) + udp6_hash4(sk); + release_sock(sk); + return res; +} + /** * udp6_hwcsum_outgoing - handle outgoing HW checksumming * @sk: socket we are sending on @@ -1767,7 +1865,7 @@ struct proto udpv6_prot = { .owner = THIS_MODULE, .close = udp_lib_close, .pre_connect = udpv6_pre_connect, - .connect = ip6_datagram_connect, + .connect = udpv6_connect, .disconnect = udp_disconnect, .ioctl = udp_ioctl, .init = udpv6_init_sock, -- Gitee From de903f81b944319ff3d90bb44dd67e8b59f0c16a Mon Sep 17 00:00:00 2001 From: Jia He Date: Tue, 26 Nov 2024 08:02:40 +0000 Subject: [PATCH 1757/2138] anolis: objtool: arm64: Move structure orc_entry to arm64 asm/orc_types.h ANBZ: #12121 It is a good idea to abstract the similar struct orc_entry into a generic header file orc_entry.h. However, to maintain alignment between the Anolis livepatch code layout and the upstream kernel, we must retain arch specific definitions of struct orc_entry. Signed-off-by: Jia He Reviewed-by: Tiezhu Yang Reviewed-by: Shuai Xue Reviewed-by: Jay Chen Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4161 --- arch/arm64/include/asm/orc_lookup.h | 2 -- arch/arm64/include/asm/orc_types.h | 31 ++++++++++++++++++- tools/arch/arm64/include/asm/orc_types.h | 31 ++++++++++++++++++- tools/include/linux/orc_entry.h | 39 ------------------------ tools/objtool/sync-check.sh | 1 - 5 files changed, 60 insertions(+), 44 deletions(-) delete mode 100644 tools/include/linux/orc_entry.h diff --git a/arch/arm64/include/asm/orc_lookup.h b/arch/arm64/include/asm/orc_lookup.h index b883758e4c85..b9f9763d6e58 100644 --- a/arch/arm64/include/asm/orc_lookup.h +++ b/arch/arm64/include/asm/orc_lookup.h @@ -35,8 +35,6 @@ extern unsigned int orc_lookup_end[]; #ifndef __ASSEMBLY__ -#include - #ifdef CONFIG_UNWINDER_ORC void orc_lookup_init(void); void orc_lookup_module_init(struct module *mod, diff --git a/arch/arm64/include/asm/orc_types.h b/arch/arm64/include/asm/orc_types.h index d7e8089f80da..e18971fdf867 100644 --- a/arch/arm64/include/asm/orc_types.h +++ b/arch/arm64/include/asm/orc_types.h @@ -10,7 +10,6 @@ #include #include -#include /* * The ORC_REG_* registers are base registers which are used to find other @@ -38,4 +37,34 @@ #define ORC_TYPE_REGS 3 #define ORC_TYPE_REGS_PARTIAL 4 +#ifndef __ASSEMBLY__ +#include + +/* + * This struct is more or less a vastly simplified version of the DWARF Call + * Frame Information standard. It contains only the necessary parts of DWARF + * CFI, simplified for ease of access by the in-kernel unwinder. It tells the + * unwinder how to find the previous SP and BP (and sometimes entry regs) on + * the stack for a given code address. Each instance of the struct corresponds + * to one or more code locations. + */ +struct orc_entry { + s16 sp_offset; + s16 fp_offset; +#if defined(__LITTLE_ENDIAN_BITFIELD) + unsigned sp_reg:4; + unsigned fp_reg:4; + unsigned type:4; + unsigned signal:1; +#elif defined(__BIG_ENDIAN_BITFIELD) + unsigned fp_reg:4; + unsigned sp_reg:4; + unsigned unused:3; + unsigned signal:1; + unsigned type:4; +#endif +} __packed; + +#endif /* __ASSEMBLY__ */ + #endif /* _ORC_TYPES_H */ diff --git a/tools/arch/arm64/include/asm/orc_types.h b/tools/arch/arm64/include/asm/orc_types.h index d7e8089f80da..e18971fdf867 100644 --- a/tools/arch/arm64/include/asm/orc_types.h +++ b/tools/arch/arm64/include/asm/orc_types.h @@ -10,7 +10,6 @@ #include #include -#include /* * The ORC_REG_* registers are base registers which are used to find other @@ -38,4 +37,34 @@ #define ORC_TYPE_REGS 3 #define ORC_TYPE_REGS_PARTIAL 4 +#ifndef __ASSEMBLY__ +#include + +/* + * This struct is more or less a vastly simplified version of the DWARF Call + * Frame Information standard. It contains only the necessary parts of DWARF + * CFI, simplified for ease of access by the in-kernel unwinder. It tells the + * unwinder how to find the previous SP and BP (and sometimes entry regs) on + * the stack for a given code address. Each instance of the struct corresponds + * to one or more code locations. + */ +struct orc_entry { + s16 sp_offset; + s16 fp_offset; +#if defined(__LITTLE_ENDIAN_BITFIELD) + unsigned sp_reg:4; + unsigned fp_reg:4; + unsigned type:4; + unsigned signal:1; +#elif defined(__BIG_ENDIAN_BITFIELD) + unsigned fp_reg:4; + unsigned sp_reg:4; + unsigned unused:3; + unsigned signal:1; + unsigned type:4; +#endif +} __packed; + +#endif /* __ASSEMBLY__ */ + #endif /* _ORC_TYPES_H */ diff --git a/tools/include/linux/orc_entry.h b/tools/include/linux/orc_entry.h deleted file mode 100644 index 194a6c41476e..000000000000 --- a/tools/include/linux/orc_entry.h +++ /dev/null @@ -1,39 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2017 Josh Poimboeuf - */ - -#ifndef _ORC_ENTRY_H -#define _ORC_ENTRY_H - -#ifndef __ASSEMBLY__ -#include - -/* - * This struct is more or less a vastly simplified version of the DWARF Call - * Frame Information standard. It contains only the necessary parts of DWARF - * CFI, simplified for ease of access by the in-kernel unwinder. It tells the - * unwinder how to find the previous SP and BP (and sometimes entry regs) on - * the stack for a given code address. Each instance of the struct corresponds - * to one or more code locations. - */ -struct orc_entry { - s16 sp_offset; - s16 fp_offset; -#if defined(__LITTLE_ENDIAN_BITFIELD) - unsigned sp_reg:4; - unsigned fp_reg:4; - unsigned type:4; - unsigned signal:1; -#elif defined(__BIG_ENDIAN_BITFIELD) - unsigned fp_reg:4; - unsigned sp_reg:4; - unsigned unused:3; - unsigned signal:1; - unsigned type:4; -#endif -} __packed; - -#endif /* __ASSEMBLY__ */ - -#endif /* _ORC_ENTRY_H */ diff --git a/tools/objtool/sync-check.sh b/tools/objtool/sync-check.sh index b06b5b881121..53a12a0f9d8c 100755 --- a/tools/objtool/sync-check.sh +++ b/tools/objtool/sync-check.sh @@ -31,7 +31,6 @@ if [ "$SRCARCH" = "arm64" ]; then FILES="$FILES arch/arm64/include/asm/unwind_hints.h arch/arm64/include/asm/orc_types.h -include/linux/orc_entry.h " fi -- Gitee From 2290e0862acee734b323d2b6eae5e9d1aeefdfbb Mon Sep 17 00:00:00 2001 From: Jia He Date: Tue, 26 Nov 2024 08:14:44 +0000 Subject: [PATCH 1758/2138] anolis: objtool: Split struct orc_entry initialization for arch specific definitions ANBZ: #12121 Simplify cross-building (e.g., LoongArch on x86) by avoiding complex macros for architecture-specific orc_entry definitions. Introduce a helper function, arch_init_orc_entry(), to manage arch specific orc_entry initialization more cleanly. Signed-off-by: Jia He Reviewed-by: Tiezhu Yang Reviewed-by: Shuai Xue Reviewed-by: Jay Chen Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4161 --- tools/objtool/arch/arm64/orc.c | 9 ++++++++- tools/objtool/include/objtool/objtool.h | 3 +++ tools/objtool/orc_gen.c | 20 ++++++++------------ 3 files changed, 19 insertions(+), 13 deletions(-) diff --git a/tools/objtool/arch/arm64/orc.c b/tools/objtool/arch/arm64/orc.c index 25fba97534a1..4febbca42ddc 100644 --- a/tools/objtool/arch/arm64/orc.c +++ b/tools/objtool/arch/arm64/orc.c @@ -9,8 +9,15 @@ #include #include -#include #include +#include +#include + +void arch_init_orc_entry(struct orc_entry *entry) +{ + entry->fp_reg = ORC_REG_UNDEFINED; + entry->type = UNWIND_HINT_TYPE_CALL; +} int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi, struct instruction *insn) diff --git a/tools/objtool/include/objtool/objtool.h b/tools/objtool/include/objtool/objtool.h index e5f93745aa26..059abeff5066 100644 --- a/tools/objtool/include/objtool/objtool.h +++ b/tools/objtool/include/objtool/objtool.h @@ -12,6 +12,8 @@ #include +#include + #define __weak __attribute__((weak)) struct pv_state { @@ -47,5 +49,6 @@ int check(struct objtool_file *file); int orc_dump(const char *objname); int orc_create(struct objtool_file *file); bool orc_ignore_section(struct section *sec); +void arch_init_orc_entry(struct orc_entry *entry); #endif /* _OBJTOOL_H */ diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c index b99ed15030c0..6674f816d025 100644 --- a/tools/objtool/orc_gen.c +++ b/tools/objtool/orc_gen.c @@ -14,19 +14,15 @@ #include #include -#ifdef __aarch64__ -#define bp_reg fp_reg -#endif - -#ifdef __loongarch__ -#define bp_reg fp_reg -#endif - bool __weak orc_ignore_section(struct section *sec) { return false; } +void __weak arch_init_orc_entry(struct orc_entry *entry) +{ +} + struct orc_list_entry { struct list_head list; struct orc_entry orc; @@ -66,10 +62,10 @@ int orc_create(struct objtool_file *file) struct orc_list_entry *entry; struct list_head orc_list; - struct orc_entry null = { - .bp_reg = ORC_REG_UNDEFINED, - .type = UNWIND_HINT_TYPE_CALL, - }; + struct orc_entry null = { .type = ORC_TYPE_UNDEFINED }; + + /* Override orc_entry initialization for arch specific definition*/ + arch_init_orc_entry(&null); /* Build a deduplicated list of ORC entries: */ INIT_LIST_HEAD(&orc_list); -- Gitee From cef9393e45579842c4010d26c6ae934d7439a6ad Mon Sep 17 00:00:00 2001 From: Juxin Gao Date: Fri, 15 Nov 2024 19:20:57 +0800 Subject: [PATCH 1759/2138] anolis: drivers/irqchip: Disable pci_irq_limit when using avec interrupt controller ANBZ: #12161 In the new interrupt model, the avec interrupt controller no longer has a limit on the number of interrupts like the extended interrupt controller, so when using the avec interrupt model, pci_irq_limit is disabled by default. Signed-off-by Juxin Gao Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4168 --- arch/loongarch/include/asm/setup.h | 1 + drivers/irqchip/irq-loongarch-avec.c | 2 ++ drivers/pci/msi/msi.c | 18 ++++++++++++------ 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/arch/loongarch/include/asm/setup.h b/arch/loongarch/include/asm/setup.h index ee52fb1e9963..eefb30c33ba3 100644 --- a/arch/loongarch/include/asm/setup.h +++ b/arch/loongarch/include/asm/setup.h @@ -12,6 +12,7 @@ #define VECSIZE 0x200 +extern bool disable_pci_irq_limit; extern unsigned long eentry; extern unsigned long tlbrentry; extern char init_command_line[COMMAND_LINE_SIZE]; diff --git a/drivers/irqchip/irq-loongarch-avec.c b/drivers/irqchip/irq-loongarch-avec.c index 5c9dcc488e21..638516164d2f 100644 --- a/drivers/irqchip/irq-loongarch-avec.c +++ b/drivers/irqchip/irq-loongarch-avec.c @@ -30,6 +30,7 @@ struct pending_list { struct list_head head; }; +bool disable_pci_irq_limit; static struct cpumask intersect_mask; static DEFINE_PER_CPU(struct pending_list, pending_list); #endif @@ -365,6 +366,7 @@ static int __init avecintc_init(struct irq_domain *parent) int ret, parent_irq; unsigned long value; + disable_pci_irq_limit = true; raw_spin_lock_init(&loongarch_avec.lock); loongarch_avec.fwnode = irq_domain_alloc_named_fwnode("AVECINTC"); diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c index eeb6c5d0299a..205e6aaaa032 100644 --- a/drivers/pci/msi/msi.c +++ b/drivers/pci/msi/msi.c @@ -409,6 +409,8 @@ static int msi_capability_init(struct pci_dev *dev, int nvec, } #ifdef CONFIG_LOONGARCH +#include + static unsigned int pci_irq_numbers = 32; static int __init pci_irq_limit(char *str) @@ -430,9 +432,11 @@ int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, int rc; #ifdef CONFIG_LOONGARCH - if (maxvec > 32) { - maxvec = pci_irq_numbers; - minvec = min_t(int, pci_irq_numbers, minvec); + if (!disable_pci_irq_limit) { + if (maxvec > 32) { + maxvec = pci_irq_numbers; + minvec = min_t(int, pci_irq_numbers, minvec); + } } #endif @@ -811,9 +815,11 @@ int __pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int int hwsize, rc, nvec = maxvec; #ifdef CONFIG_LOONGARCH - if (maxvec > 32) { - nvec = pci_irq_numbers; - minvec = min_t(int, pci_irq_numbers, minvec); + if (!disable_pci_irq_limit) { + if (maxvec > 32) { + nvec = pci_irq_numbers; + minvec = min_t(int, pci_irq_numbers, minvec); + } } #endif -- Gitee From 99b2ff581d1e78852de95cf388f0e2f1195339ba Mon Sep 17 00:00:00 2001 From: Wedson Almeida Filho Date: Sat, 30 Sep 2023 02:00:12 -0300 Subject: [PATCH 1760/2138] erofs: move erofs_xattr_handlers and xattr_handler_map to .rodata ANBZ: #11101 commit 3591f40e223c66d4a3f152390b6db56421011854 upstream. This makes it harder for accidental or malicious changes to erofs_xattr_handlers or xattr_handler_map at runtime. Cc: Gao Xiang Cc: Chao Yu Cc: Yue Hu Cc: Jeffle Xu Cc: linux-erofs@lists.ozlabs.org Signed-off-by: Wedson Almeida Filho Link: https://lore.kernel.org/r/20230930050033.41174-9-wedsonaf@gmail.com Acked-by: Gao Xiang Signed-off-by: Christian Brauner Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4159 --- fs/erofs/xattr.c | 2 +- fs/erofs/xattr.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c index 09d341675e89..b58316b49a43 100644 --- a/fs/erofs/xattr.c +++ b/fs/erofs/xattr.c @@ -168,7 +168,7 @@ const struct xattr_handler __maybe_unused erofs_xattr_security_handler = { }; #endif -const struct xattr_handler *erofs_xattr_handlers[] = { +const struct xattr_handler * const erofs_xattr_handlers[] = { &erofs_xattr_user_handler, &erofs_xattr_trusted_handler, #ifdef CONFIG_EROFS_FS_SECURITY diff --git a/fs/erofs/xattr.h b/fs/erofs/xattr.h index f16283cb8c93..b246cd0e135e 100644 --- a/fs/erofs/xattr.h +++ b/fs/erofs/xattr.h @@ -23,7 +23,7 @@ static inline const char *erofs_xattr_prefix(unsigned int idx, { const struct xattr_handler *handler = NULL; - static const struct xattr_handler *xattr_handler_map[] = { + static const struct xattr_handler * const xattr_handler_map[] = { [EROFS_XATTR_INDEX_USER] = &erofs_xattr_user_handler, #ifdef CONFIG_EROFS_FS_POSIX_ACL [EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] = &nop_posix_acl_access, @@ -44,7 +44,7 @@ static inline const char *erofs_xattr_prefix(unsigned int idx, return xattr_prefix(handler); } -extern const struct xattr_handler *erofs_xattr_handlers[]; +extern const struct xattr_handler * const erofs_xattr_handlers[]; int erofs_xattr_prefixes_init(struct super_block *sb); void erofs_xattr_prefixes_cleanup(struct super_block *sb); -- Gitee From 428bb9d970180f298c38d0fe568dd8ddfa8c0ff9 Mon Sep 17 00:00:00 2001 From: Tiwei Bie Date: Tue, 10 Oct 2023 19:39:15 +0800 Subject: [PATCH 1761/2138] erofs: fix inode metadata space layout description in documentation ANBZ: #11101 commit 78a50b6a41665efeabeec5edbae245d8be93278c upstream. Xattrs, extents, data inline are _placed after_, not _followed by_ the corresponding inode. This patch fixes it. Signed-off-by: Tiwei Bie Reviewed-by: Gao Xiang Reviewed-by: Jingbo Xu Reviewed-by: Chao Yu Link: https://lore.kernel.org/r/20231010113915.436591-1-tiwei.btw@antgroup.com Signed-off-by: Gao Xiang Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4159 --- Documentation/filesystems/erofs.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/filesystems/erofs.rst b/Documentation/filesystems/erofs.rst index 445224817823..cc4626d6ee4f 100644 --- a/Documentation/filesystems/erofs.rst +++ b/Documentation/filesystems/erofs.rst @@ -203,7 +203,7 @@ may not. All metadatas can be now observed in two different spaces (views): | | |__________________| 64 bytes - Xattrs, extents, data inline are followed by the corresponding inode with + Xattrs, extents, data inline are placed after the corresponding inode with proper alignment, and they could be optional for different data mappings. _currently_ total 5 data layouts are supported: -- Gitee From 883b893de92eba78416e7548914d589d040d662e Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 4 Oct 2023 17:53:03 +0100 Subject: [PATCH 1762/2138] mm: add folio_end_read() ANBZ: #11101 commit 0b237047d5a72ffe06c0bdf2f4536f669dcd31c9 upstream. Provide a function for filesystems to call when they have finished reading an entire folio. Link: https://lkml.kernel.org/r/20231004165317.1061855-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Cc: Albert Ou Cc: Alexander Gordeev Cc: Andreas Dilger Cc: Christian Borntraeger Cc: Christophe Leroy Cc: Geert Uytterhoeven Cc: Heiko Carstens Cc: Ivan Kokshaysky Cc: Matt Turner Cc: Michael Ellerman Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Richard Henderson Cc: Sven Schnelle Cc: "Theodore Ts'o" Cc: Thomas Bogendoerfer Cc: Vasily Gorbik Signed-off-by: Andrew Morton Signed-off-by: Hongzhen Luo Acked-by: Gao Xiang Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4159 --- include/linux/pagemap.h | 1 + mm/filemap.c | 22 ++++++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 33edd591ce1a..f9ca9c252dca 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -1135,6 +1135,7 @@ static inline void wait_on_page_locked(struct page *page) folio_wait_locked(page_folio(page)); } +void folio_end_read(struct folio *folio, bool success); void wait_on_page_writeback(struct page *page); void folio_wait_writeback(struct folio *folio); int folio_wait_writeback_killable(struct folio *folio); diff --git a/mm/filemap.c b/mm/filemap.c index 4a66b50f9430..7c571d0d5443 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1553,6 +1553,28 @@ void folio_unlock(struct folio *folio) } EXPORT_SYMBOL(folio_unlock); +/** + * folio_end_read - End read on a folio. + * @folio: The folio. + * @success: True if all reads completed successfully. + * + * When all reads against a folio have completed, filesystems should + * call this function to let the pagecache know that no more reads + * are outstanding. This will unlock the folio and wake up any thread + * sleeping on the lock. The folio will also be marked uptodate if all + * reads succeeded. + * + * Context: May be called from interrupt or process context. May not be + * called from NMI context. + */ +void folio_end_read(struct folio *folio, bool success) +{ + if (likely(success)) + folio_mark_uptodate(folio); + folio_unlock(folio); +} +EXPORT_SYMBOL(folio_end_read); + /** * folio_end_private_2 - Clear PG_private_2 and wake any waiters. * @folio: The folio. -- Gitee From a06e89ab0084dd92cab6ad670e656287dfafe010 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Tue, 5 Mar 2024 17:14:43 +0800 Subject: [PATCH 1763/2138] erofs: convert z_erofs_onlinepage_.* to folios ANBZ: #11101 commit d136d335861613a3d92e76385a71225d8e9084b2 upstream. Online folios are locked file-backed folios which will eventually keep decoded (e.g. decompressed) data of each inode for end users to utilize. It may belong to a few pclusters and contain other data (e.g. compressed data for inplace I/Os) temporarily in a time-sharing manner to reduce memory footprints for low-ended storage devices with high latencies under heary I/O pressure. Apart from folio_end_read() usage, it's a straight-forward conversion. Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240305091448.1384242-1-hsiangkao@linux.alibaba.com Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4159 --- fs/erofs/zdata.c | 50 +++++++++++++++++++++--------------------------- 1 file changed, 22 insertions(+), 28 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index ff0aa72b0db3..5013fcd4965a 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -117,46 +117,39 @@ static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl) } /* - * bit 30: I/O error occurred on this page - * bit 0 - 29: remaining parts to complete this page + * bit 30: I/O error occurred on this folio + * bit 0 - 29: remaining parts to complete this folio */ -#define Z_EROFS_PAGE_EIO (1 << 30) +#define Z_EROFS_FOLIO_EIO (1 << 30) -static inline void z_erofs_onlinepage_init(struct page *page) +static void z_erofs_onlinefolio_init(struct folio *folio) { union { atomic_t o; - unsigned long v; + void *v; } u = { .o = ATOMIC_INIT(1) }; - set_page_private(page, u.v); - smp_wmb(); - SetPagePrivate(page); + folio->private = u.v; /* valid only if file-backed folio is locked */ } -static inline void z_erofs_onlinepage_split(struct page *page) +static void z_erofs_onlinefolio_split(struct folio *folio) { - atomic_inc((atomic_t *)&page->private); + atomic_inc((atomic_t *)&folio->private); } -static void z_erofs_onlinepage_endio(struct page *page, int err) +static void z_erofs_onlinefolio_end(struct folio *folio, int err) { int orig, v; - DBG_BUGON(!PagePrivate(page)); - do { - orig = atomic_read((atomic_t *)&page->private); - v = (orig - 1) | (err ? Z_EROFS_PAGE_EIO : 0); - } while (atomic_cmpxchg((atomic_t *)&page->private, orig, v) != orig); + orig = atomic_read((atomic_t *)&folio->private); + v = (orig - 1) | (err ? Z_EROFS_FOLIO_EIO : 0); + } while (atomic_cmpxchg((atomic_t *)&folio->private, orig, v) != orig); - if (!(v & ~Z_EROFS_PAGE_EIO)) { - set_page_private(page, 0); - ClearPagePrivate(page); - if (!(v & Z_EROFS_PAGE_EIO)) - SetPageUptodate(page); - unlock_page(page); - } + if (v & ~Z_EROFS_FOLIO_EIO) + return; + folio->private = 0; + folio_end_read(folio, !(v & Z_EROFS_FOLIO_EIO)); } #define Z_EROFS_ONSTACK_PAGES 32 @@ -965,6 +958,7 @@ static int z_erofs_read_fragment(struct super_block *sb, struct page *page, static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, struct page *page, bool ra) { + struct folio *folio = page_folio(page); struct inode *const inode = fe->inode; struct erofs_map_blocks *const map = &fe->map; const loff_t offset = page_offset(page); @@ -973,7 +967,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, unsigned int cur, end, len, split; int err = 0; - z_erofs_onlinepage_init(page); + z_erofs_onlinefolio_init(folio); split = 0; end = PAGE_SIZE; repeat: @@ -1035,7 +1029,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, if (err) goto out; - z_erofs_onlinepage_split(page); + z_erofs_onlinefolio_split(folio); if (fe->pcl->pageofs_out != (map->m_la & ~PAGE_MASK)) fe->pcl->multibases = true; if (fe->pcl->length < offset + end - map->m_la) { @@ -1056,7 +1050,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, goto repeat; out: - z_erofs_onlinepage_endio(page, err); + z_erofs_onlinefolio_end(folio, err); return err; } @@ -1159,7 +1153,7 @@ static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be, cur += len; } kunmap_local(dst); - z_erofs_onlinepage_endio(bvi->bvec.page, err); + z_erofs_onlinefolio_end(page_folio(bvi->bvec.page), err); list_del(p); kfree(bvi); } @@ -1316,7 +1310,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, /* recycle all individual short-lived pages */ if (z_erofs_put_shortlivedpage(be->pagepool, page)) continue; - z_erofs_onlinepage_endio(page, err); + z_erofs_onlinefolio_end(page_folio(page), err); } if (be->decompressed_pages != be->onstack_pages) -- Gitee From adf5bec345951613bd6296d1a4a4bb7dc9e5425c Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Tue, 5 Mar 2024 17:14:44 +0800 Subject: [PATCH 1764/2138] erofs: convert z_erofs_do_read_page() to folios ANBZ: #11101 commit 0e25a788ea2c4a6f5f971279396fcf79f4fecd7b upstream. It is a straight-forward conversion. Besides, it's renamed as z_erofs_scan_folio(). Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240305091448.1384242-2-hsiangkao@linux.alibaba.com Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4159 --- fs/erofs/zdata.c | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 5013fcd4965a..c25074657708 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -955,21 +955,20 @@ static int z_erofs_read_fragment(struct super_block *sb, struct page *page, return 0; } -static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, - struct page *page, bool ra) +static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *fe, + struct folio *folio, bool ra) { - struct folio *folio = page_folio(page); struct inode *const inode = fe->inode; struct erofs_map_blocks *const map = &fe->map; - const loff_t offset = page_offset(page); - const unsigned int bs = i_blocksize(inode); + const loff_t offset = folio_pos(folio); + const unsigned int bs = i_blocksize(inode), fs = folio_size(folio); bool tight = true, exclusive; unsigned int cur, end, len, split; int err = 0; z_erofs_onlinefolio_init(folio); split = 0; - end = PAGE_SIZE; + end = fs; repeat: if (offset + end - 1 < map->m_la || offset + end - 1 >= map->m_la + map->m_llen) { @@ -986,7 +985,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, ++split; if (!(map->m_flags & EROFS_MAP_MAPPED)) { - zero_user_segment(page, cur, end); + folio_zero_segment(folio, cur, end); tight = false; goto next_part; } @@ -995,8 +994,8 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, erofs_off_t fpos = offset + cur - map->m_la; len = min_t(unsigned int, map->m_llen - fpos, end - cur); - err = z_erofs_read_fragment(inode->i_sb, page, cur, cur + len, - EROFS_I(inode)->z_fragmentoff + fpos); + err = z_erofs_read_fragment(inode->i_sb, &folio->page, cur, + cur + len, EROFS_I(inode)->z_fragmentoff + fpos); if (err) goto out; tight = false; @@ -1011,18 +1010,18 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, } /* - * Ensure the current partial page belongs to this submit chain rather + * Ensure the current partial folio belongs to this submit chain rather * than other concurrent submit chains or the noio(bypass) chain since - * those chains are handled asynchronously thus the page cannot be used + * those chains are handled asynchronously thus the folio cannot be used * for inplace I/O or bvpage (should be processed in a strict order.) */ tight &= (fe->mode > Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE); - exclusive = (!cur && ((split <= 1) || (tight && bs == PAGE_SIZE))); + exclusive = (!cur && ((split <= 1) || (tight && bs == fs))); if (cur) tight &= (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED); err = z_erofs_attach_page(fe, &((struct z_erofs_bvec) { - .page = page, + .page = &folio->page, .offset = offset - map->m_la, .end = end, }), exclusive); @@ -1789,7 +1788,7 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f, if (PageUptodate(page)) unlock_page(page); else - (void)z_erofs_do_read_page(f, page, !!rac); + z_erofs_scan_folio(f, page_folio(page), !!rac); put_page(page); } @@ -1810,7 +1809,7 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio) f.headoffset = (erofs_off_t)folio->index << PAGE_SHIFT; z_erofs_pcluster_readmore(&f, NULL, true); - err = z_erofs_do_read_page(&f, &folio->page, false); + err = z_erofs_scan_folio(&f, folio, false); z_erofs_pcluster_readmore(&f, NULL, false); z_erofs_pcluster_end(&f); @@ -1851,7 +1850,7 @@ static void z_erofs_readahead(struct readahead_control *rac) folio = head; head = folio_get_private(folio); - err = z_erofs_do_read_page(&f, &folio->page, true); + err = z_erofs_scan_folio(&f, folio, true); if (err && err != -EINTR) erofs_err(inode->i_sb, "readahead error at folio %lu @ nid %llu", folio->index, EROFS_I(inode)->nid); -- Gitee From a3084b010b2bec6da46fed6bc92c53f23a6fefa2 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Tue, 5 Mar 2024 17:14:45 +0800 Subject: [PATCH 1765/2138] erofs: get rid of `justfound` debugging tag ANBZ: #11101 commit 19fb9070c2cd9aa6d4bd368985918d7200ec1722 upstream. `justfound` is introduced to identify cached folios that are just added to compressed bvecs so that more checks can be applied in the I/O submission path. EROFS is quite now stable compared to the codebase at that stage. `justfound` becomes a burden for upcoming features. Drop it. Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240305091448.1384242-3-hsiangkao@linux.alibaba.com Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4159 --- fs/erofs/zdata.c | 20 +++----------------- 1 file changed, 3 insertions(+), 17 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index c25074657708..75b05990b571 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -565,17 +565,13 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) for (i = 0; i < pclusterpages; ++i) { struct page *page, *newpage; - void *t; /* mark pages just found for debugging */ /* Inaccurate check w/o locking to avoid unneeded lookups */ if (READ_ONCE(pcl->compressed_bvecs[i].page)) continue; page = find_get_page(mc, pcl->obj.index + i); - if (page) { - t = (void *)((unsigned long)page | 1); - newpage = NULL; - } else { + if (!page) { /* I/O is needed, no possible to decompress directly */ standalone = false; if (!shouldalloc) @@ -589,11 +585,10 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) if (!newpage) continue; set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE); - t = (void *)((unsigned long)newpage | 1); } spin_lock(&pcl->obj.lockref.lock); if (!pcl->compressed_bvecs[i].page) { - pcl->compressed_bvecs[i].page = t; + pcl->compressed_bvecs[i].page = page ? page : newpage; spin_unlock(&pcl->obj.lockref.lock); continue; } @@ -1423,7 +1418,7 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, struct z_erofs_bvec zbv; struct address_space *mapping; struct page *page; - int justfound, bs = i_blocksize(f->inode); + int bs = i_blocksize(f->inode); /* Except for inplace pages, the entire page can be used for I/Os */ bvec->bv_offset = 0; @@ -1432,9 +1427,6 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, spin_lock(&pcl->obj.lockref.lock); zbv = pcl->compressed_bvecs[nr]; page = zbv.page; - justfound = (unsigned long)page & 1UL; - page = (struct page *)((unsigned long)page & ~1UL); - pcl->compressed_bvecs[nr].page = page; spin_unlock(&pcl->obj.lockref.lock); if (!page) goto out_allocpage; @@ -1465,9 +1457,6 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, } lock_page(page); - /* only true if page reclaim goes wrong, should never happen */ - DBG_BUGON(justfound && PagePrivate(page)); - /* the cached page is still in managed cache */ if (page->mapping == mc) { /* @@ -1475,7 +1464,6 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, * `->private` pcluster hint. Let's reconnect them. */ if (!PagePrivate(page)) { - DBG_BUGON(!justfound); /* compressed_bvecs[] already takes a ref */ attach_page_private(page, pcl); put_page(page); @@ -1494,8 +1482,6 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, * allocate a new page for compressed data. */ DBG_BUGON(page->mapping); - DBG_BUGON(!justfound); - tocache = true; unlock_page(page); put_page(page); -- Gitee From b6ba9d6e49a182c72a4ecdf09ad85171c81ed0a9 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Tue, 5 Mar 2024 17:14:46 +0800 Subject: [PATCH 1766/2138] erofs: convert z_erofs_fill_bio_vec() to folios ANBZ: #11101 commit 92cc38e02a0e89621b90d039769bfab434745d16 upstream. Introduce a folio member to `struct z_erofs_bvec` and convert most of z_erofs_fill_bio_vec() to folios, which is still straight-forward. Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240305091448.1384242-4-hsiangkao@linux.alibaba.com Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4159 --- fs/erofs/zdata.c | 71 ++++++++++++++++++++++++------------------------ 1 file changed, 36 insertions(+), 35 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 75b05990b571..d78cc54a96f5 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -19,7 +19,10 @@ typedef void *z_erofs_next_pcluster_t; struct z_erofs_bvec { - struct page *page; + union { + struct page *page; + struct folio *folio; + }; int offset; unsigned int end; }; @@ -1420,33 +1423,32 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, struct page *page; int bs = i_blocksize(f->inode); - /* Except for inplace pages, the entire page can be used for I/Os */ + /* Except for inplace folios, the entire folio can be used for I/Os */ bvec->bv_offset = 0; bvec->bv_len = PAGE_SIZE; repeat: spin_lock(&pcl->obj.lockref.lock); zbv = pcl->compressed_bvecs[nr]; - page = zbv.page; spin_unlock(&pcl->obj.lockref.lock); - if (!page) - goto out_allocpage; + if (!zbv.folio) + goto out_allocfolio; - bvec->bv_page = page; - DBG_BUGON(z_erofs_is_shortlived_page(page)); + bvec->bv_page = &zbv.folio->page; + DBG_BUGON(z_erofs_is_shortlived_page(bvec->bv_page)); /* - * Handle preallocated cached pages. We tried to allocate such pages + * Handle preallocated cached folios. We tried to allocate such folios * without triggering direct reclaim. If allocation failed, inplace - * file-backed pages will be used instead. + * file-backed folios will be used instead. */ - if (page->private == Z_EROFS_PREALLOCATED_PAGE) { - set_page_private(page, 0); + if (zbv.folio->private == (void *)Z_EROFS_PREALLOCATED_PAGE) { + zbv.folio->private = 0; tocache = true; goto out_tocache; } - mapping = READ_ONCE(page->mapping); + mapping = READ_ONCE(zbv.folio->mapping); /* - * File-backed pages for inplace I/Os are all locked steady, + * File-backed folios for inplace I/Os are all locked steady, * therefore it is impossible for `mapping` to be NULL. */ if (mapping && mapping != mc) { @@ -1456,22 +1458,21 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, return; } - lock_page(page); - /* the cached page is still in managed cache */ - if (page->mapping == mc) { + folio_lock(zbv.folio); + if (zbv.folio->mapping == mc) { /* - * The cached page is still available but without a valid - * `->private` pcluster hint. Let's reconnect them. + * The cached folio is still in managed cache but without + * a valid `->private` pcluster hint. Let's reconnect them. */ - if (!PagePrivate(page)) { - /* compressed_bvecs[] already takes a ref */ - attach_page_private(page, pcl); - put_page(page); + if (!folio_test_private(zbv.folio)) { + folio_attach_private(zbv.folio, pcl); + /* compressed_bvecs[] already takes a ref before */ + folio_put(zbv.folio); } /* no need to submit if it is already up-to-date */ - if (PageUptodate(page)) { - unlock_page(page); + if (folio_test_uptodate(zbv.folio)) { + folio_unlock(zbv.folio); bvec->bv_page = NULL; } return; @@ -1481,32 +1482,32 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, * It has been truncated, so it's unsafe to reuse this one. Let's * allocate a new page for compressed data. */ - DBG_BUGON(page->mapping); + DBG_BUGON(zbv.folio->mapping); tocache = true; - unlock_page(page); - put_page(page); -out_allocpage: + folio_unlock(zbv.folio); + folio_put(zbv.folio); +out_allocfolio: page = erofs_allocpage(&f->pagepool, gfp | __GFP_NOFAIL); spin_lock(&pcl->obj.lockref.lock); - if (pcl->compressed_bvecs[nr].page) { + if (pcl->compressed_bvecs[nr].folio) { erofs_pagepool_add(&f->pagepool, page); spin_unlock(&pcl->obj.lockref.lock); cond_resched(); goto repeat; } - pcl->compressed_bvecs[nr].page = page; + pcl->compressed_bvecs[nr].folio = zbv.folio = page_folio(page); spin_unlock(&pcl->obj.lockref.lock); bvec->bv_page = page; out_tocache: if (!tocache || bs != PAGE_SIZE || - add_to_page_cache_lru(page, mc, pcl->obj.index + nr, gfp)) { - /* turn into a temporary shortlived page (1 ref) */ - set_page_private(page, Z_EROFS_SHORTLIVED_PAGE); + filemap_add_folio(mc, zbv.folio, pcl->obj.index + nr, gfp)) { + /* turn into a temporary shortlived folio (1 ref) */ + zbv.folio->private = (void *)Z_EROFS_SHORTLIVED_PAGE; return; } - attach_page_private(page, pcl); + folio_attach_private(zbv.folio, pcl); /* drop a refcount added by allocpage (then 2 refs in total here) */ - put_page(page); + folio_put(zbv.folio); } static struct z_erofs_decompressqueue *jobqueue_init(struct super_block *sb, -- Gitee From c7bad8a9e7a587b7374ed6a58c48bfe98c800229 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Tue, 5 Mar 2024 17:14:47 +0800 Subject: [PATCH 1767/2138] erofs: convert z_erofs_submissionqueue_endio() to folios ANBZ: #11101 commit 9266f2dc5e1158e7466e9db48b4e9a750ee4e3a5 upstream. Use bio_for_each_folio() to iterate over each folio in the bio and there is no large folios for now. Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240305091448.1384242-5-hsiangkao@linux.alibaba.com Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4159 --- fs/erofs/zdata.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index d78cc54a96f5..63990c8192f2 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -1566,19 +1566,19 @@ static void z_erofs_submissionqueue_endio(struct bio *bio) { struct z_erofs_decompressqueue *q = bio->bi_private; blk_status_t err = bio->bi_status; - struct bio_vec *bvec; - struct bvec_iter_all iter_all; + struct folio_iter fi; - bio_for_each_segment_all(bvec, bio, iter_all) { - struct page *page = bvec->bv_page; + bio_for_each_folio_all(fi, bio) { + struct folio *folio = fi.folio; - DBG_BUGON(PageUptodate(page)); - DBG_BUGON(z_erofs_page_is_invalidated(page)); - if (erofs_page_is_managed(EROFS_SB(q->sb), page)) { - if (!err) - SetPageUptodate(page); - unlock_page(page); - } + DBG_BUGON(folio_test_uptodate(folio)); + DBG_BUGON(z_erofs_page_is_invalidated(&folio->page)); + if (!erofs_page_is_managed(EROFS_SB(q->sb), &folio->page)) + continue; + + if (!err) + folio_mark_uptodate(folio); + folio_unlock(folio); } if (err) q->eio = true; -- Gitee From a462841dad23a3f3301ec8faf6f7d99eeffb4223 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Tue, 5 Mar 2024 17:14:48 +0800 Subject: [PATCH 1768/2138] erofs: refine managed cache operations to folios ANBZ: #11101 commit 706fd68fce3a5737286afd3e6422ab9258bd3e94 upstream. Convert erofs_try_to_free_all_cached_pages() and z_erofs_cache_release_folio(). Besides, erofs_page_is_managed() is moved to zdata.c and renamed as erofs_folio_is_managed(). Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240305091448.1384242-6-hsiangkao@linux.alibaba.com Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4159 --- fs/erofs/compress.h | 7 ---- fs/erofs/decompressor_deflate.c | 3 -- fs/erofs/decompressor_lzma.c | 3 -- fs/erofs/internal.h | 4 +-- fs/erofs/utils.c | 2 +- fs/erofs/zdata.c | 63 ++++++++++++++++----------------- 6 files changed, 34 insertions(+), 48 deletions(-) diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h index 7cc5841577b2..333587ba6183 100644 --- a/fs/erofs/compress.h +++ b/fs/erofs/compress.h @@ -81,13 +81,6 @@ static inline bool z_erofs_put_shortlivedpage(struct page **pagepool, return true; } -#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping) -static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi, - struct page *page) -{ - return page->mapping == MNGD_MAPPING(sbi); -} - int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf, unsigned int padbufsize); extern const struct z_erofs_decompressor erofs_decompressors[]; diff --git a/fs/erofs/decompressor_deflate.c b/fs/erofs/decompressor_deflate.c index 26350c5b040e..3a3461561a3c 100644 --- a/fs/erofs/decompressor_deflate.c +++ b/fs/erofs/decompressor_deflate.c @@ -215,9 +215,6 @@ int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, if (rq->out[no] != rq->in[j]) continue; - - DBG_BUGON(erofs_page_is_managed(EROFS_SB(sb), - rq->in[j])); tmppage = erofs_allocpage(pgpl, rq->gfp); if (!tmppage) { err = -ENOMEM; diff --git a/fs/erofs/decompressor_lzma.c b/fs/erofs/decompressor_lzma.c index 6ca357d83cfa..4b28dc130c9f 100644 --- a/fs/erofs/decompressor_lzma.c +++ b/fs/erofs/decompressor_lzma.c @@ -258,9 +258,6 @@ int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, if (rq->out[no] != rq->in[j]) continue; - - DBG_BUGON(erofs_page_is_managed(EROFS_SB(rq->sb), - rq->in[j])); tmppage = erofs_allocpage(pgpl, rq->gfp); if (!tmppage) { err = -ENOMEM; diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index c69174675caf..5f5444a225df 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -459,8 +459,8 @@ int __init erofs_init_shrinker(void); void erofs_exit_shrinker(void); int __init z_erofs_init_zip_subsystem(void); void z_erofs_exit_zip_subsystem(void); -int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, - struct erofs_workgroup *egrp); +int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi, + struct erofs_workgroup *egrp); int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map, int flags); void *erofs_get_pcpubuf(unsigned int requiredpages); diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c index 603ded4db58e..5ba75c963fec 100644 --- a/fs/erofs/utils.c +++ b/fs/erofs/utils.c @@ -129,7 +129,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, * the XArray. Otherwise some cached pages could be still attached to * the orphan old workgroup when the new one is available in the tree. */ - if (erofs_try_to_free_all_cached_pages(sbi, grp)) + if (erofs_try_to_free_all_cached_folios(sbi, grp)) goto out; /* diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 63990c8192f2..c1bd4d8392eb 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -119,6 +119,12 @@ static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl) return PAGE_ALIGN(pcl->pclustersize) >> PAGE_SHIFT; } +#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping) +static bool erofs_folio_is_managed(struct erofs_sb_info *sbi, struct folio *fo) +{ + return fo->mapping == MNGD_MAPPING(sbi); +} + /* * bit 30: I/O error occurred on this folio * bit 0 - 29: remaining parts to complete this folio @@ -611,9 +617,9 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE; } -/* called by erofs_shrinker to get rid of all compressed_pages */ -int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, - struct erofs_workgroup *grp) +/* called by erofs_shrinker to get rid of all cached compressed bvecs */ +int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi, + struct erofs_workgroup *grp) { struct z_erofs_pcluster *const pcl = container_of(grp, struct z_erofs_pcluster, obj); @@ -621,27 +627,22 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, int i; DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); - /* - * refcount of workgroup is now freezed as 0, - * therefore no need to worry about available decompression users. - */ + /* There is no actice user since the pcluster is now freezed */ for (i = 0; i < pclusterpages; ++i) { - struct page *page = pcl->compressed_bvecs[i].page; + struct folio *folio = pcl->compressed_bvecs[i].folio; - if (!page) + if (!folio) continue; - /* block other users from reclaiming or migrating the page */ - if (!trylock_page(page)) + /* Avoid reclaiming or migrating this folio */ + if (!folio_trylock(folio)) return -EBUSY; - if (!erofs_page_is_managed(sbi, page)) + if (!erofs_folio_is_managed(sbi, folio)) continue; - - /* barrier is implied in the following 'unlock_page' */ - WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); - detach_page_private(page); - unlock_page(page); + pcl->compressed_bvecs[i].folio = NULL; + folio_detach_private(folio); + folio_unlock(folio); } return 0; } @@ -658,20 +659,17 @@ static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp) ret = false; spin_lock(&pcl->obj.lockref.lock); - if (pcl->obj.lockref.count > 0) - goto out; - - DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); - for (i = 0; i < pclusterpages; ++i) { - if (pcl->compressed_bvecs[i].page == &folio->page) { - WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); - ret = true; - break; + if (pcl->obj.lockref.count <= 0) { + DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); + for (i = 0; i < pclusterpages; ++i) { + if (pcl->compressed_bvecs[i].folio == folio) { + pcl->compressed_bvecs[i].folio = NULL; + folio_detach_private(folio); + ret = true; + break; + } } } - if (ret) - folio_detach_private(folio); -out: spin_unlock(&pcl->obj.lockref.lock); return ret; } @@ -1201,7 +1199,7 @@ static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be, be->compressed_pages[i] = page; if (z_erofs_is_inline_pcluster(pcl) || - erofs_page_is_managed(EROFS_SB(be->sb), page)) { + erofs_folio_is_managed(EROFS_SB(be->sb), page_folio(page))) { if (!PageUptodate(page)) err = -EIO; continue; @@ -1286,7 +1284,8 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, /* consider shortlived pages added when decompressing */ page = be->compressed_pages[i]; - if (!page || erofs_page_is_managed(sbi, page)) + if (!page || + erofs_folio_is_managed(sbi, page_folio(page))) continue; (void)z_erofs_put_shortlivedpage(be->pagepool, page); WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); @@ -1573,7 +1572,7 @@ static void z_erofs_submissionqueue_endio(struct bio *bio) DBG_BUGON(folio_test_uptodate(folio)); DBG_BUGON(z_erofs_page_is_invalidated(&folio->page)); - if (!erofs_page_is_managed(EROFS_SB(q->sb), &folio->page)) + if (!erofs_folio_is_managed(EROFS_SB(q->sb), folio)) continue; if (!err) -- Gitee From 0da838ffb7c55c609f23419c4e9cf5987ee53f16 Mon Sep 17 00:00:00 2001 From: Jingbo Xu Date: Fri, 8 Mar 2024 17:41:58 +0800 Subject: [PATCH 1769/2138] erofs: make iov_iter describe target buffers over fscache ANBZ: #11101 commit f2151df5743536e0b98a2094bd58b52d4e060016 upstream. So far the fscache mode supports uncompressed data only, and the data read from fscache is put directly into the target page cache. As the support for compressed data in fscache mode is going to be introduced, rework the fscache internals so that the following compressed part could make the raw data read from fscache be directed to the target buffer it wants, decompress the raw data, and finally fill the page cache with the decompressed data. As the first step, a new structure, i.e. erofs_fscache_io (io), is introduced to describe a generic read request from the fscache, while the caller can specify the target buffer it wants in the iov_iter structure (io->iter). Besides, the caller can also specify its completion callback and private data through erofs_fscache_io, which will be called to make further handling, e.g. unlocking the page cache for uncompressed data or decompressing the read raw data, when the read request from the fscache completes. Now erofs_fscache_read_io_async() serves as a generic interface for reading raw data from fscache for both compressed and uncompressed data. The erofs_fscache_rq structure is kept to describe a request to fill the page cache in the specified range. Signed-off-by: Jingbo Xu Reviewed-by: Gao Xiang Link: https://lore.kernel.org/r/20240308094159.40547-1-jefflexu@linux.alibaba.com Signed-off-by: Gao Xiang Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4159 --- fs/erofs/fscache.c | 235 ++++++++++++++++++++++++--------------------- 1 file changed, 123 insertions(+), 112 deletions(-) diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c index 122a4753ecea..b9fb4c4da39c 100644 --- a/fs/erofs/fscache.c +++ b/fs/erofs/fscache.c @@ -25,9 +25,15 @@ static struct file_system_type erofs_anon_fs_type = { .kill_sb = kill_anon_super, }; -struct erofs_fscache_request { - struct erofs_fscache_request *primary; - struct netfs_cache_resources cache_resources; +struct erofs_fscache_io { + struct netfs_cache_resources cres; + struct iov_iter iter; + netfs_io_terminated_t end_io; + void *private; + refcount_t ref; +}; + +struct erofs_fscache_rq { struct address_space *mapping; /* The mapping being accessed */ loff_t start; /* Start position */ size_t len; /* Length of the request */ @@ -36,44 +42,17 @@ struct erofs_fscache_request { refcount_t ref; }; -static struct erofs_fscache_request *erofs_fscache_req_alloc(struct address_space *mapping, - loff_t start, size_t len) -{ - struct erofs_fscache_request *req; - - req = kzalloc(sizeof(struct erofs_fscache_request), GFP_KERNEL); - if (!req) - return ERR_PTR(-ENOMEM); - - req->mapping = mapping; - req->start = start; - req->len = len; - refcount_set(&req->ref, 1); - - return req; -} - -static struct erofs_fscache_request *erofs_fscache_req_chain(struct erofs_fscache_request *primary, - size_t len) +static bool erofs_fscache_io_put(struct erofs_fscache_io *io) { - struct erofs_fscache_request *req; - - /* use primary request for the first submission */ - if (!primary->submitted) { - refcount_inc(&primary->ref); - return primary; - } - - req = erofs_fscache_req_alloc(primary->mapping, - primary->start + primary->submitted, len); - if (!IS_ERR(req)) { - req->primary = primary; - refcount_inc(&primary->ref); - } - return req; + if (!refcount_dec_and_test(&io->ref)) + return false; + if (io->cres.ops) + io->cres.ops->end_operation(&io->cres); + kfree(io); + return true; } -static void erofs_fscache_req_complete(struct erofs_fscache_request *req) +static void erofs_fscache_req_complete(struct erofs_fscache_rq *req) { struct folio *folio; bool failed = req->error; @@ -93,120 +72,149 @@ static void erofs_fscache_req_complete(struct erofs_fscache_request *req) rcu_read_unlock(); } -static void erofs_fscache_req_put(struct erofs_fscache_request *req) +static void erofs_fscache_req_put(struct erofs_fscache_rq *req) { - if (refcount_dec_and_test(&req->ref)) { - if (req->cache_resources.ops) - req->cache_resources.ops->end_operation(&req->cache_resources); - if (!req->primary) - erofs_fscache_req_complete(req); - else - erofs_fscache_req_put(req->primary); - kfree(req); - } + if (!refcount_dec_and_test(&req->ref)) + return; + erofs_fscache_req_complete(req); + kfree(req); +} + +static struct erofs_fscache_rq *erofs_fscache_req_alloc(struct address_space *mapping, + loff_t start, size_t len) +{ + struct erofs_fscache_rq *req = kzalloc(sizeof(*req), GFP_KERNEL); + + if (!req) + return NULL; + req->mapping = mapping; + req->start = start; + req->len = len; + refcount_set(&req->ref, 1); + return req; } -static void erofs_fscache_subreq_complete(void *priv, +static void erofs_fscache_req_io_put(struct erofs_fscache_io *io) +{ + struct erofs_fscache_rq *req = io->private; + + if (erofs_fscache_io_put(io)) + erofs_fscache_req_put(req); +} + +static void erofs_fscache_req_end_io(void *priv, ssize_t transferred_or_error, bool was_async) { - struct erofs_fscache_request *req = priv; + struct erofs_fscache_io *io = priv; + struct erofs_fscache_rq *req = io->private; - if (IS_ERR_VALUE(transferred_or_error)) { - if (req->primary) - req->primary->error = transferred_or_error; - else - req->error = transferred_or_error; - } - erofs_fscache_req_put(req); + if (IS_ERR_VALUE(transferred_or_error)) + req->error = transferred_or_error; + erofs_fscache_req_io_put(io); +} + +static struct erofs_fscache_io *erofs_fscache_req_io_alloc(struct erofs_fscache_rq *req) +{ + struct erofs_fscache_io *io = kzalloc(sizeof(*io), GFP_KERNEL); + + if (!io) + return NULL; + io->end_io = erofs_fscache_req_end_io; + io->private = req; + refcount_inc(&req->ref); + refcount_set(&io->ref, 1); + return io; } /* - * Read data from fscache (cookie, pstart, len), and fill the read data into - * page cache described by (req->mapping, lstart, len). @pstart describeis the - * start physical address in the cache file. + * Read data from fscache described by cookie at pstart physical address + * offset, and fill the read data into buffer described by io->iter. */ -static int erofs_fscache_read_folios_async(struct fscache_cookie *cookie, - struct erofs_fscache_request *req, loff_t pstart, size_t len) +static int erofs_fscache_read_io_async(struct fscache_cookie *cookie, + loff_t pstart, struct erofs_fscache_io *io) { enum netfs_io_source source; - struct super_block *sb = req->mapping->host->i_sb; - struct netfs_cache_resources *cres = &req->cache_resources; - struct iov_iter iter; - loff_t lstart = req->start + req->submitted; - size_t done = 0; + struct netfs_cache_resources *cres = &io->cres; + struct iov_iter *iter = &io->iter; int ret; - DBG_BUGON(len > req->len - req->submitted); - ret = fscache_begin_read_operation(cres, cookie); if (ret) return ret; - while (done < len) { - loff_t sstart = pstart + done; - size_t slen = len - done; + while (iov_iter_count(iter)) { + size_t orig_count = iov_iter_count(iter), len = orig_count; unsigned long flags = 1 << NETFS_SREQ_ONDEMAND; source = cres->ops->prepare_ondemand_read(cres, - sstart, &slen, LLONG_MAX, &flags, 0); - if (WARN_ON(slen == 0)) + pstart, &len, LLONG_MAX, &flags, 0); + if (WARN_ON(len == 0)) source = NETFS_INVALID_READ; if (source != NETFS_READ_FROM_CACHE) { - erofs_err(sb, "failed to fscache prepare_read (source %d)", source); + erofs_err(NULL, "prepare_read failed (source %d)", source); return -EIO; } - refcount_inc(&req->ref); - iov_iter_xarray(&iter, ITER_DEST, &req->mapping->i_pages, - lstart + done, slen); - - ret = fscache_read(cres, sstart, &iter, NETFS_READ_HOLE_FAIL, - erofs_fscache_subreq_complete, req); + iov_iter_truncate(iter, len); + refcount_inc(&io->ref); + ret = fscache_read(cres, pstart, iter, NETFS_READ_HOLE_FAIL, + io->end_io, io); if (ret == -EIOCBQUEUED) ret = 0; if (ret) { - erofs_err(sb, "failed to fscache_read (ret %d)", ret); + erofs_err(NULL, "fscache_read failed (ret %d)", ret); return ret; } + if (WARN_ON(iov_iter_count(iter))) + return -EIO; - done += slen; + iov_iter_reexpand(iter, orig_count - len); + pstart += len; } - DBG_BUGON(done != len); return 0; } static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio) { - int ret; struct erofs_fscache *ctx = folio->mapping->host->i_private; - struct erofs_fscache_request *req; + int ret = -ENOMEM; + struct erofs_fscache_rq *req; + struct erofs_fscache_io *io; req = erofs_fscache_req_alloc(folio->mapping, folio_pos(folio), folio_size(folio)); - if (IS_ERR(req)) { + if (!req) { folio_unlock(folio); - return PTR_ERR(req); + return ret; } - ret = erofs_fscache_read_folios_async(ctx->cookie, req, - folio_pos(folio), folio_size(folio)); + io = erofs_fscache_req_io_alloc(req); + if (!io) { + req->error = ret; + goto out; + } + iov_iter_xarray(&io->iter, ITER_DEST, &folio->mapping->i_pages, + folio_pos(folio), folio_size(folio)); + + ret = erofs_fscache_read_io_async(ctx->cookie, folio_pos(folio), io); if (ret) req->error = ret; + erofs_fscache_req_io_put(io); +out: erofs_fscache_req_put(req); return ret; } -static int erofs_fscache_data_read_slice(struct erofs_fscache_request *primary) +static int erofs_fscache_data_read_slice(struct erofs_fscache_rq *req) { - struct address_space *mapping = primary->mapping; + struct address_space *mapping = req->mapping; struct inode *inode = mapping->host; struct super_block *sb = inode->i_sb; - struct erofs_fscache_request *req; + struct erofs_fscache_io *io; struct erofs_map_blocks map; struct erofs_map_dev mdev; - struct iov_iter iter; - loff_t pos = primary->start + primary->submitted; + loff_t pos = req->start + req->submitted; size_t count; int ret; @@ -217,6 +225,7 @@ static int erofs_fscache_data_read_slice(struct erofs_fscache_request *primary) if (map.m_flags & EROFS_MAP_META) { struct erofs_buf buf = __EROFS_BUF_INITIALIZER; + struct iov_iter iter; erofs_blk_t blknr; size_t offset, size; void *src; @@ -237,15 +246,17 @@ static int erofs_fscache_data_read_slice(struct erofs_fscache_request *primary) } iov_iter_zero(PAGE_SIZE - size, &iter); erofs_put_metabuf(&buf); - primary->submitted += PAGE_SIZE; + req->submitted += PAGE_SIZE; return 0; } - count = primary->len - primary->submitted; + count = req->len - req->submitted; if (!(map.m_flags & EROFS_MAP_MAPPED)) { + struct iov_iter iter; + iov_iter_xarray(&iter, ITER_DEST, &mapping->i_pages, pos, count); iov_iter_zero(count, &iter); - primary->submitted += count; + req->submitted += count; return 0; } @@ -260,18 +271,19 @@ static int erofs_fscache_data_read_slice(struct erofs_fscache_request *primary) if (ret) return ret; - req = erofs_fscache_req_chain(primary, count); - if (IS_ERR(req)) - return PTR_ERR(req); + io = erofs_fscache_req_io_alloc(req); + if (!io) + return -ENOMEM; + iov_iter_xarray(&io->iter, ITER_DEST, &mapping->i_pages, pos, count); + ret = erofs_fscache_read_io_async(mdev.m_fscache->cookie, + mdev.m_pa + (pos - map.m_la), io); + erofs_fscache_req_io_put(io); - ret = erofs_fscache_read_folios_async(mdev.m_fscache->cookie, - req, mdev.m_pa + (pos - map.m_la), count); - erofs_fscache_req_put(req); - primary->submitted += count; + req->submitted += count; return ret; } -static int erofs_fscache_data_read(struct erofs_fscache_request *req) +static int erofs_fscache_data_read(struct erofs_fscache_rq *req) { int ret; @@ -280,20 +292,19 @@ static int erofs_fscache_data_read(struct erofs_fscache_request *req) if (ret) req->error = ret; } while (!ret && req->submitted < req->len); - return ret; } static int erofs_fscache_read_folio(struct file *file, struct folio *folio) { - struct erofs_fscache_request *req; + struct erofs_fscache_rq *req; int ret; req = erofs_fscache_req_alloc(folio->mapping, folio_pos(folio), folio_size(folio)); - if (IS_ERR(req)) { + if (!req) { folio_unlock(folio); - return PTR_ERR(req); + return -ENOMEM; } ret = erofs_fscache_data_read(req); @@ -303,14 +314,14 @@ static int erofs_fscache_read_folio(struct file *file, struct folio *folio) static void erofs_fscache_readahead(struct readahead_control *rac) { - struct erofs_fscache_request *req; + struct erofs_fscache_rq *req; if (!readahead_count(rac)) return; req = erofs_fscache_req_alloc(rac->mapping, readahead_pos(rac), readahead_length(rac)); - if (IS_ERR(req)) + if (!req) return; /* The request completion will drop refs on the folios. */ -- Gitee From b0850f9385d5ada9da3aa8d3e9339972413a0d8b Mon Sep 17 00:00:00 2001 From: Jingbo Xu Date: Fri, 8 Mar 2024 17:41:59 +0800 Subject: [PATCH 1770/2138] erofs: support compressed inodes over fscache ANBZ: #11101 commit a1bafc3109d713ed83f73d61ba5cb1e6fd80fdbc upstream. Since fscache can utilize iov_iter to write dest buffers, bio_vec can be used in this way too. To simplify this, pseudo bios are prepared and bio_vec will be filled with bio_add_page(). And a common .bi_end_io will be called directly to handle I/O completions. Signed-off-by: Jingbo Xu Reviewed-by: Gao Xiang Link: https://lore.kernel.org/r/20240308094159.40547-2-jefflexu@linux.alibaba.com Signed-off-by: Gao Xiang Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4159 --- fs/erofs/fscache.c | 47 +++++++++++++++++++++++++++++++++++++++++++++ fs/erofs/inode.c | 14 ++++++-------- fs/erofs/internal.h | 4 ++++ fs/erofs/zdata.c | 32 ++++++++++++++++++------------ 4 files changed, 77 insertions(+), 20 deletions(-) diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c index b9fb4c4da39c..8aff1a724805 100644 --- a/fs/erofs/fscache.c +++ b/fs/erofs/fscache.c @@ -174,6 +174,53 @@ static int erofs_fscache_read_io_async(struct fscache_cookie *cookie, return 0; } +struct erofs_fscache_bio { + struct erofs_fscache_io io; + struct bio bio; /* w/o bdev to share bio_add_page/endio() */ + struct bio_vec bvecs[BIO_MAX_VECS]; +}; + +static void erofs_fscache_bio_endio(void *priv, + ssize_t transferred_or_error, bool was_async) +{ + struct erofs_fscache_bio *io = priv; + + if (IS_ERR_VALUE(transferred_or_error)) + io->bio.bi_status = errno_to_blk_status(transferred_or_error); + io->bio.bi_end_io(&io->bio); + BUILD_BUG_ON(offsetof(struct erofs_fscache_bio, io) != 0); + erofs_fscache_io_put(&io->io); +} + +struct bio *erofs_fscache_bio_alloc(struct erofs_map_dev *mdev) +{ + struct erofs_fscache_bio *io; + + io = kmalloc(sizeof(*io), GFP_KERNEL | __GFP_NOFAIL); + bio_init(&io->bio, NULL, io->bvecs, BIO_MAX_VECS, REQ_OP_READ); + io->io.private = mdev->m_fscache->cookie; + io->io.end_io = erofs_fscache_bio_endio; + refcount_set(&io->io.ref, 1); + return &io->bio; +} + +void erofs_fscache_submit_bio(struct bio *bio) +{ + struct erofs_fscache_bio *io = container_of(bio, + struct erofs_fscache_bio, bio); + int ret; + + iov_iter_bvec(&io->io.iter, ITER_DEST, io->bvecs, bio->bi_vcnt, + bio->bi_iter.bi_size); + ret = erofs_fscache_read_io_async(io->io.private, + bio->bi_iter.bi_sector << 9, &io->io); + erofs_fscache_io_put(&io->io); + if (!ret) + return; + bio->bi_status = errno_to_blk_status(ret); + bio->bi_end_io(bio); +} + static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio) { struct erofs_fscache *ctx = folio->mapping->host->i_private; diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c index 5372dbc27e2c..09f74a70d3d7 100644 --- a/fs/erofs/inode.c +++ b/fs/erofs/inode.c @@ -250,14 +250,12 @@ static int erofs_fill_inode(struct inode *inode) if (erofs_inode_is_data_compressed(vi->datalayout)) { #ifdef CONFIG_EROFS_FS_ZIP - if (!erofs_is_fscache_mode(inode->i_sb)) { - DO_ONCE_LITE_IF(inode->i_sb->s_blocksize != PAGE_SIZE, - erofs_info, inode->i_sb, - "EXPERIMENTAL EROFS subpage compressed block support in use. Use at your own risk!"); - inode->i_mapping->a_ops = &z_erofs_aops; - err = 0; - goto out_unlock; - } + DO_ONCE_LITE_IF(inode->i_blkbits != PAGE_SHIFT, + erofs_info, inode->i_sb, + "EXPERIMENTAL EROFS subpage compressed block support in use. Use at your own risk!"); + inode->i_mapping->a_ops = &z_erofs_aops; + err = 0; + goto out_unlock; #endif err = -EOPNOTSUPP; goto out_unlock; diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 5f5444a225df..b3f571f43071 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -505,6 +505,8 @@ void erofs_fscache_unregister_fs(struct super_block *sb); struct erofs_fscache *erofs_fscache_register_cookie(struct super_block *sb, char *name, unsigned int flags); void erofs_fscache_unregister_cookie(struct erofs_fscache *fscache); +struct bio *erofs_fscache_bio_alloc(struct erofs_map_dev *mdev); +void erofs_fscache_submit_bio(struct bio *bio); #else static inline int erofs_fscache_register_fs(struct super_block *sb) { @@ -522,6 +524,8 @@ struct erofs_fscache *erofs_fscache_register_cookie(struct super_block *sb, static inline void erofs_fscache_unregister_cookie(struct erofs_fscache *fscache) { } +static inline struct bio *erofs_fscache_bio_alloc(struct erofs_map_dev *mdev) { return NULL; } +static inline void erofs_fscache_submit_bio(struct bio *bio) {} #endif #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index c1bd4d8392eb..3216b920d369 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -1561,7 +1561,7 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, qtail[JQ_BYPASS] = &pcl->next; } -static void z_erofs_submissionqueue_endio(struct bio *bio) +static void z_erofs_endio(struct bio *bio) { struct z_erofs_decompressqueue *q = bio->bi_private; blk_status_t err = bio->bi_status; @@ -1582,7 +1582,8 @@ static void z_erofs_submissionqueue_endio(struct bio *bio) if (err) q->eio = true; z_erofs_decompress_kickoff(q, -1); - bio_put(bio); + if (bio->bi_bdev) + bio_put(bio); } static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, @@ -1596,7 +1597,6 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, z_erofs_next_pcluster_t owned_head = f->owned_head; /* bio is NULL initially, so no need to initialize last_{index,bdev} */ erofs_off_t last_pa; - struct block_device *last_bdev; unsigned int nr_bios = 0; struct bio *bio = NULL; unsigned long pflags; @@ -1643,9 +1643,13 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, continue; if (bio && (cur != last_pa || - last_bdev != mdev.m_bdev)) { -submit_bio_retry: - submit_bio(bio); + bio->bi_bdev != mdev.m_bdev)) { +io_retry: + if (!erofs_is_fscache_mode(sb)) + submit_bio(bio); + else + erofs_fscache_submit_bio(bio); + if (memstall) { psi_memstall_leave(&pflags); memstall = 0; @@ -1660,15 +1664,16 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, } if (!bio) { - bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS, - REQ_OP_READ, GFP_NOIO); - bio->bi_end_io = z_erofs_submissionqueue_endio; + bio = erofs_is_fscache_mode(sb) ? + erofs_fscache_bio_alloc(&mdev) : + bio_alloc(mdev.m_bdev, BIO_MAX_VECS, + REQ_OP_READ, GFP_NOIO); + bio->bi_end_io = z_erofs_endio; bio->bi_iter.bi_sector = cur >> 9; bio->bi_private = q[JQ_SUBMIT]; if (readahead) bio->bi_opf |= REQ_RAHEAD; ++nr_bios; - last_bdev = mdev.m_bdev; } if (cur + bvec.bv_len > end) @@ -1676,7 +1681,7 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, DBG_BUGON(bvec.bv_len < sb->s_blocksize); if (!bio_add_page(bio, bvec.bv_page, bvec.bv_len, bvec.bv_offset)) - goto submit_bio_retry; + goto io_retry; last_pa = cur + bvec.bv_len; bypass = false; @@ -1689,7 +1694,10 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, } while (owned_head != Z_EROFS_PCLUSTER_TAIL); if (bio) { - submit_bio(bio); + if (!erofs_is_fscache_mode(sb)) + submit_bio(bio); + else + erofs_fscache_submit_bio(bio); if (memstall) psi_memstall_leave(&pflags); } -- Gitee From d81e32f42b88d227d25c7a8de5833b9633ce9fb4 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 7 Apr 2024 03:04:50 -0400 Subject: [PATCH 1771/2138] erofs: switch erofs_bread() to passing offset instead of block number ANBZ: #11101 commit 469ad583c1293f5d9f45183050b3beeb4a8c3475 upstream. Callers are happier that way, especially since we no longer need to play with splitting offset into block number and offset within block, passing the former to erofs_bread(), then adding the latter... erofs_bread() always reads entire pages, anyway. Signed-off-by: Al Viro Signed-off-by: Hongzhen Luo Acked-by: Gao Xiang Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4159 --- fs/erofs/data.c | 5 ++--- fs/erofs/dir.c | 2 +- fs/erofs/internal.h | 2 +- fs/erofs/namei.c | 2 +- fs/erofs/super.c | 8 ++++---- fs/erofs/xattr.c | 35 +++++++++++++---------------------- fs/erofs/zdata.c | 4 ++-- 7 files changed, 24 insertions(+), 34 deletions(-) diff --git a/fs/erofs/data.c b/fs/erofs/data.c index 3d9721b3faa8..36465561edfe 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -29,11 +29,10 @@ void erofs_put_metabuf(struct erofs_buf *buf) * Derive the block size from inode->i_blkbits to make compatible with * anonymous inode in fscache mode. */ -void *erofs_bread(struct erofs_buf *buf, erofs_blk_t blkaddr, +void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset, enum erofs_kmap_type type) { struct inode *inode = buf->inode; - erofs_off_t offset = (erofs_off_t)blkaddr << inode->i_blkbits; pgoff_t index = offset >> PAGE_SHIFT; struct page *page = buf->page; struct folio *folio; @@ -77,7 +76,7 @@ void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb, erofs_blk_t blkaddr, enum erofs_kmap_type type) { erofs_init_metabuf(buf, sb); - return erofs_bread(buf, blkaddr, type); + return erofs_bread(buf, erofs_pos(sb, blkaddr), type); } static int erofs_map_blocks_flatmode(struct inode *inode, diff --git a/fs/erofs/dir.c b/fs/erofs/dir.c index b80abec0531a..9d38f39bb4f7 100644 --- a/fs/erofs/dir.c +++ b/fs/erofs/dir.c @@ -63,7 +63,7 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx) struct erofs_dirent *de; unsigned int nameoff, maxsize; - de = erofs_bread(&buf, i, EROFS_KMAP); + de = erofs_bread(&buf, erofs_pos(sb, i), EROFS_KMAP); if (IS_ERR(de)) { erofs_err(sb, "fail to readdir of logical block %u of nid %llu", i, EROFS_I(dir)->nid); diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index b3f571f43071..c055fec82662 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -402,7 +402,7 @@ void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf, erofs_off_t *offset, int *lengthp); void erofs_unmap_metabuf(struct erofs_buf *buf); void erofs_put_metabuf(struct erofs_buf *buf); -void *erofs_bread(struct erofs_buf *buf, erofs_blk_t blkaddr, +void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset, enum erofs_kmap_type type); void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb); void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb, diff --git a/fs/erofs/namei.c b/fs/erofs/namei.c index f0110a78acb2..11afa48996a3 100644 --- a/fs/erofs/namei.c +++ b/fs/erofs/namei.c @@ -100,7 +100,7 @@ static void *erofs_find_target_block(struct erofs_buf *target, struct erofs_dirent *de; buf.inode = dir; - de = erofs_bread(&buf, mid, EROFS_KMAP); + de = erofs_bread(&buf, erofs_pos(dir->i_sb, mid), EROFS_KMAP); if (!IS_ERR(de)) { const int nameoff = nameoff_from_disk(de->nameoff, bsz); const int ndirents = nameoff / sizeof(*de); diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 7eb7efb577e8..5dab57f3f63e 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -132,11 +132,11 @@ void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf, int len, i, cnt; *offset = round_up(*offset, 4); - ptr = erofs_bread(buf, erofs_blknr(sb, *offset), EROFS_KMAP); + ptr = erofs_bread(buf, *offset, EROFS_KMAP); if (IS_ERR(ptr)) return ptr; - len = le16_to_cpu(*(__le16 *)&ptr[erofs_blkoff(sb, *offset)]); + len = le16_to_cpu(*(__le16 *)ptr); if (!len) len = U16_MAX + 1; buffer = kmalloc(len, GFP_KERNEL); @@ -148,12 +148,12 @@ void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf, for (i = 0; i < len; i += cnt) { cnt = min_t(int, sb->s_blocksize - erofs_blkoff(sb, *offset), len - i); - ptr = erofs_bread(buf, erofs_blknr(sb, *offset), EROFS_KMAP); + ptr = erofs_bread(buf, *offset, EROFS_KMAP); if (IS_ERR(ptr)) { kfree(buffer); return ptr; } - memcpy(buffer + i, ptr + erofs_blkoff(sb, *offset), cnt); + memcpy(buffer + i, ptr, cnt); *offset += cnt; } return buffer; diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c index b58316b49a43..ec233917830a 100644 --- a/fs/erofs/xattr.c +++ b/fs/erofs/xattr.c @@ -81,13 +81,13 @@ static int erofs_init_inode_xattrs(struct inode *inode) it.pos = erofs_iloc(inode) + vi->inode_isize; /* read in shared xattr array (non-atomic, see kmalloc below) */ - it.kaddr = erofs_bread(&it.buf, erofs_blknr(sb, it.pos), EROFS_KMAP); + it.kaddr = erofs_bread(&it.buf, it.pos, EROFS_KMAP); if (IS_ERR(it.kaddr)) { ret = PTR_ERR(it.kaddr); goto out_unlock; } - ih = it.kaddr + erofs_blkoff(sb, it.pos); + ih = it.kaddr; vi->xattr_name_filter = le32_to_cpu(ih->h_name_filter); vi->xattr_shared_count = ih->h_shared_count; vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count, @@ -102,16 +102,14 @@ static int erofs_init_inode_xattrs(struct inode *inode) it.pos += sizeof(struct erofs_xattr_ibody_header); for (i = 0; i < vi->xattr_shared_count; ++i) { - it.kaddr = erofs_bread(&it.buf, erofs_blknr(sb, it.pos), - EROFS_KMAP); + it.kaddr = erofs_bread(&it.buf, it.pos, EROFS_KMAP); if (IS_ERR(it.kaddr)) { kfree(vi->xattr_shared_xattrs); vi->xattr_shared_xattrs = NULL; ret = PTR_ERR(it.kaddr); goto out_unlock; } - vi->xattr_shared_xattrs[i] = le32_to_cpu(*(__le32 *) - (it.kaddr + erofs_blkoff(sb, it.pos))); + vi->xattr_shared_xattrs[i] = le32_to_cpu(*(__le32 *)it.kaddr); it.pos += sizeof(__le32); } erofs_put_metabuf(&it.buf); @@ -185,12 +183,11 @@ static int erofs_xattr_copy_to_buffer(struct erofs_xattr_iter *it, void *src; for (processed = 0; processed < len; processed += slice) { - it->kaddr = erofs_bread(&it->buf, erofs_blknr(sb, it->pos), - EROFS_KMAP); + it->kaddr = erofs_bread(&it->buf, it->pos, EROFS_KMAP); if (IS_ERR(it->kaddr)) return PTR_ERR(it->kaddr); - src = it->kaddr + erofs_blkoff(sb, it->pos); + src = it->kaddr; slice = min_t(unsigned int, sb->s_blocksize - erofs_blkoff(sb, it->pos), len - processed); memcpy(it->buffer + it->buffer_ofs, src, slice); @@ -208,8 +205,7 @@ static int erofs_listxattr_foreach(struct erofs_xattr_iter *it) int err; /* 1. handle xattr entry */ - entry = *(struct erofs_xattr_entry *) - (it->kaddr + erofs_blkoff(it->sb, it->pos)); + entry = *(struct erofs_xattr_entry *)it->kaddr; it->pos += sizeof(struct erofs_xattr_entry); base_index = entry.e_name_index; @@ -259,8 +255,7 @@ static int erofs_getxattr_foreach(struct erofs_xattr_iter *it) unsigned int slice, processed, value_sz; /* 1. handle xattr entry */ - entry = *(struct erofs_xattr_entry *) - (it->kaddr + erofs_blkoff(sb, it->pos)); + entry = *(struct erofs_xattr_entry *)it->kaddr; it->pos += sizeof(struct erofs_xattr_entry); value_sz = le16_to_cpu(entry.e_value_size); @@ -291,8 +286,7 @@ static int erofs_getxattr_foreach(struct erofs_xattr_iter *it) /* 2. handle xattr name */ for (processed = 0; processed < entry.e_name_len; processed += slice) { - it->kaddr = erofs_bread(&it->buf, erofs_blknr(sb, it->pos), - EROFS_KMAP); + it->kaddr = erofs_bread(&it->buf, it->pos, EROFS_KMAP); if (IS_ERR(it->kaddr)) return PTR_ERR(it->kaddr); @@ -300,7 +294,7 @@ static int erofs_getxattr_foreach(struct erofs_xattr_iter *it) sb->s_blocksize - erofs_blkoff(sb, it->pos), entry.e_name_len - processed); if (memcmp(it->name.name + it->infix_len + processed, - it->kaddr + erofs_blkoff(sb, it->pos), slice)) + it->kaddr, slice)) return -ENOATTR; it->pos += slice; } @@ -336,13 +330,11 @@ static int erofs_xattr_iter_inline(struct erofs_xattr_iter *it, it->pos = erofs_iloc(inode) + vi->inode_isize + xattr_header_sz; while (remaining) { - it->kaddr = erofs_bread(&it->buf, erofs_blknr(it->sb, it->pos), - EROFS_KMAP); + it->kaddr = erofs_bread(&it->buf, it->pos, EROFS_KMAP); if (IS_ERR(it->kaddr)) return PTR_ERR(it->kaddr); - entry_sz = erofs_xattr_entry_size(it->kaddr + - erofs_blkoff(it->sb, it->pos)); + entry_sz = erofs_xattr_entry_size(it->kaddr); /* xattr on-disk corruption: xattr entry beyond xattr_isize */ if (remaining < entry_sz) { DBG_BUGON(1); @@ -375,8 +367,7 @@ static int erofs_xattr_iter_shared(struct erofs_xattr_iter *it, for (i = 0; i < vi->xattr_shared_count; ++i) { it->pos = erofs_pos(sb, sbi->xattr_blkaddr) + vi->xattr_shared_xattrs[i] * sizeof(__le32); - it->kaddr = erofs_bread(&it->buf, erofs_blknr(sb, it->pos), - EROFS_KMAP); + it->kaddr = erofs_bread(&it->buf, it->pos, EROFS_KMAP); if (IS_ERR(it->kaddr)) return PTR_ERR(it->kaddr); diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 3216b920d369..9ffdae7fcd5b 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -940,12 +940,12 @@ static int z_erofs_read_fragment(struct super_block *sb, struct page *page, for (; cur < end; cur += cnt, pos += cnt) { cnt = min_t(unsigned int, end - cur, sb->s_blocksize - erofs_blkoff(sb, pos)); - src = erofs_bread(&buf, erofs_blknr(sb, pos), EROFS_KMAP); + src = erofs_bread(&buf, pos, EROFS_KMAP); if (IS_ERR(src)) { erofs_put_metabuf(&buf); return PTR_ERR(src); } - memcpy_to_page(page, cur, src + erofs_blkoff(sb, pos), cnt); + memcpy_to_page(page, cur, src, cnt); } erofs_put_metabuf(&buf); return 0; -- Gitee From c882d4937046f6d4da06016351f46ef0b07c1abd Mon Sep 17 00:00:00 2001 From: Hongbo Li Date: Wed, 24 Apr 2024 16:42:47 +0800 Subject: [PATCH 1772/2138] erofs: modify the error message when prepare_ondemand_read failed ANBZ: #11101 commit 17597b1e18d2fafef2230c987479eccaeddb4628 upstream. When prepare_ondemand_read failed, wrong error message is printed. The prepare_read is also implemented in cachefiles, so we amend it. Reviewed-by: Gao Xiang Signed-off-by: Hongbo Li Reviewed-by: Jingbo Xu Reviewed-by: Chao Yu Link: https://lore.kernel.org/r/20240424084247.759432-1-lihongbo22@huawei.com Signed-off-by: Gao Xiang Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4159 --- fs/erofs/fscache.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c index 8aff1a724805..62da538d91cb 100644 --- a/fs/erofs/fscache.c +++ b/fs/erofs/fscache.c @@ -151,7 +151,7 @@ static int erofs_fscache_read_io_async(struct fscache_cookie *cookie, if (WARN_ON(len == 0)) source = NETFS_INVALID_READ; if (source != NETFS_READ_FROM_CACHE) { - erofs_err(NULL, "prepare_read failed (source %d)", source); + erofs_err(NULL, "prepare_ondemand_read failed (source %d)", source); return -EIO; } -- Gitee From 6c98bb0125a3059fb7eec35e248d36f7b9ed60fa Mon Sep 17 00:00:00 2001 From: Chunhai Guo Date: Mon, 1 Apr 2024 07:55:50 -0600 Subject: [PATCH 1773/2138] erofs: rename utils.c to zutil.c ANBZ: #11101 commit cacd5b04e24c74a813c694ec7b26a1a370b5d666 upstream. Currently, utils.c is only useful if CONFIG_EROFS_FS_ZIP is on. So let's rename it to zutil.c as well as avoid its inclusion if CONFIG_EROFS_FS_ZIP is explicitly disabled. Signed-off-by: Chunhai Guo Reviewed-by: Gao Xiang Link: https://lore.kernel.org/r/20240401135550.2550043-1-guochunhai@vivo.com Signed-off-by: Gao Xiang Conflicts: 1. fs/erofs/zutil.c: Conflicts with the dynamic erofs_shrinker_info. Resolution: 1. Keep the original static erofs_shrinker_info. Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4159 --- fs/erofs/Makefile | 4 ++-- fs/erofs/{utils.c => zutil.c} | 25 ++++++++++--------------- 2 files changed, 12 insertions(+), 17 deletions(-) rename fs/erofs/{utils.c => zutil.c} (96%) diff --git a/fs/erofs/Makefile b/fs/erofs/Makefile index 994d0b9deddf..845eafdcee4a 100644 --- a/fs/erofs/Makefile +++ b/fs/erofs/Makefile @@ -1,9 +1,9 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_EROFS_FS) += erofs.o -erofs-objs := super.o inode.o data.o namei.o dir.o utils.o sysfs.o +erofs-objs := super.o inode.o data.o namei.o dir.o sysfs.o erofs-$(CONFIG_EROFS_FS_XATTR) += xattr.o -erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o pcpubuf.o +erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o pcpubuf.o zutil.o erofs-$(CONFIG_EROFS_FS_ZIP_LZMA) += decompressor_lzma.o erofs-$(CONFIG_EROFS_FS_ZIP_DEFLATE) += decompressor_deflate.o erofs-$(CONFIG_EROFS_FS_ONDEMAND) += fscache.o diff --git a/fs/erofs/utils.c b/fs/erofs/zutil.c similarity index 96% rename from fs/erofs/utils.c rename to fs/erofs/zutil.c index 5ba75c963fec..e040ecd778f2 100644 --- a/fs/erofs/utils.c +++ b/fs/erofs/zutil.c @@ -5,6 +5,14 @@ */ #include "internal.h" +static atomic_long_t erofs_global_shrink_cnt; /* for all mounted instances */ +/* protected by 'erofs_sb_list_lock' */ +static unsigned int shrinker_run_no; + +/* protects the mounted 'erofs_sb_list' */ +static DEFINE_SPINLOCK(erofs_sb_list_lock); +static LIST_HEAD(erofs_sb_list); + struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp) { struct page *page = *pagepool; @@ -12,10 +20,9 @@ struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp) if (page) { DBG_BUGON(page_ref_count(page) != 1); *pagepool = (struct page *)page_private(page); - } else { - page = alloc_page(gfp); + return page; } - return page; + return alloc_page(gfp); } void erofs_release_pages(struct page **pagepool) @@ -28,10 +35,6 @@ void erofs_release_pages(struct page **pagepool) } } -#ifdef CONFIG_EROFS_FS_ZIP -/* global shrink count (for all mounted EROFS instances) */ -static atomic_long_t erofs_global_shrink_cnt; - static bool erofs_workgroup_get(struct erofs_workgroup *grp) { if (lockref_get_not_zero(&grp->lockref)) @@ -171,13 +174,6 @@ static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi, return freed; } -/* protected by 'erofs_sb_list_lock' */ -static unsigned int shrinker_run_no; - -/* protects the mounted 'erofs_sb_list' */ -static DEFINE_SPINLOCK(erofs_sb_list_lock); -static LIST_HEAD(erofs_sb_list); - void erofs_shrinker_register(struct super_block *sb) { struct erofs_sb_info *sbi = EROFS_SB(sb); @@ -279,4 +275,3 @@ void erofs_exit_shrinker(void) { unregister_shrinker(&erofs_shrinker_info); } -#endif /* !CONFIG_EROFS_FS_ZIP */ -- Gitee From c8d05c523f3b222e11e8f6a46d26af6470e2f67d Mon Sep 17 00:00:00 2001 From: Chunhai Guo Date: Tue, 2 Apr 2024 04:00:36 -0600 Subject: [PATCH 1774/2138] erofs: rename per-CPU buffers to global buffer pool and make it configurable ANBZ: #11101 commit f36f3010f67611a45d66e773bc91e4c66a9abab5 upstream. It will cost more time if compressed buffers are allocated on demand for low-latency algorithms (like lz4) so EROFS uses per-CPU buffers to keep compressed data if in-place decompression is unfulfilled. While it is kind of wasteful of memory for a device with hundreds of CPUs, and only a small number of CPUs concurrently decompress most of the time. This patch renames it as 'global buffer pool' and makes it configurable. This allows two or more CPUs to share a common buffer to reduce memory occupation. Suggested-by: Gao Xiang Reviewed-by: Gao Xiang Signed-off-by: Chunhai Guo Link: https://lore.kernel.org/r/20240402100036.2673604-1-guochunhai@vivo.com Signed-off-by: Sandeep Dhavale Link: https://lore.kernel.org/r/20240408215231.3376659-1-dhavale@google.com Signed-off-by: Gao Xiang Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4159 --- fs/erofs/Makefile | 2 +- fs/erofs/decompressor.c | 6 +- fs/erofs/internal.h | 14 ++-- fs/erofs/pcpubuf.c | 148 ---------------------------------------- fs/erofs/super.c | 9 ++- fs/erofs/zutil.c | 148 ++++++++++++++++++++++++++++++++++++++++ 6 files changed, 166 insertions(+), 161 deletions(-) delete mode 100644 fs/erofs/pcpubuf.c diff --git a/fs/erofs/Makefile b/fs/erofs/Makefile index 845eafdcee4a..20d1ec422443 100644 --- a/fs/erofs/Makefile +++ b/fs/erofs/Makefile @@ -3,7 +3,7 @@ obj-$(CONFIG_EROFS_FS) += erofs.o erofs-objs := super.o inode.o data.o namei.o dir.o sysfs.o erofs-$(CONFIG_EROFS_FS_XATTR) += xattr.o -erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o pcpubuf.o zutil.o +erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o zutil.o erofs-$(CONFIG_EROFS_FS_ZIP_LZMA) += decompressor_lzma.o erofs-$(CONFIG_EROFS_FS_ZIP_DEFLATE) += decompressor_deflate.o erofs-$(CONFIG_EROFS_FS_ONDEMAND) += fscache.o diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index fce41d4875bf..2ce15ef75040 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -54,7 +54,7 @@ static int z_erofs_load_lz4_config(struct super_block *sb, sbi->lz4.max_distance_pages = distance ? DIV_ROUND_UP(distance, PAGE_SIZE) + 1 : LZ4_MAX_DISTANCE_PAGES; - return erofs_pcpubuf_growsize(sbi->lz4.max_pclusterblks); + return z_erofs_gbuf_growsize(sbi->lz4.max_pclusterblks); } /* @@ -159,7 +159,7 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx, docopy: /* Or copy compressed data which can be overlapped to per-CPU buffer */ in = rq->in; - src = erofs_get_pcpubuf(ctx->inpages); + src = z_erofs_get_gbuf(ctx->inpages); if (!src) { DBG_BUGON(1); kunmap_local(inpage); @@ -260,7 +260,7 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx, } else if (maptype == 1) { vm_unmap_ram(src, ctx->inpages); } else if (maptype == 2) { - erofs_put_pcpubuf(src); + z_erofs_put_gbuf(src); } else if (maptype != 3) { DBG_BUGON(1); return -EFAULT; diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index c055fec82662..21a4fa70cd0b 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -463,11 +463,11 @@ int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi, struct erofs_workgroup *egrp); int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map, int flags); -void *erofs_get_pcpubuf(unsigned int requiredpages); -void erofs_put_pcpubuf(void *ptr); -int erofs_pcpubuf_growsize(unsigned int nrpages); -void __init erofs_pcpubuf_init(void); -void erofs_pcpubuf_exit(void); +void *z_erofs_get_gbuf(unsigned int requiredpages); +void z_erofs_put_gbuf(void *ptr); +int z_erofs_gbuf_growsize(unsigned int nrpages); +int __init z_erofs_gbuf_init(void); +void z_erofs_gbuf_exit(void); int erofs_init_managed_cache(struct super_block *sb); int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb); #else @@ -477,8 +477,8 @@ static inline int erofs_init_shrinker(void) { return 0; } static inline void erofs_exit_shrinker(void) {} static inline int z_erofs_init_zip_subsystem(void) { return 0; } static inline void z_erofs_exit_zip_subsystem(void) {} -static inline void erofs_pcpubuf_init(void) {} -static inline void erofs_pcpubuf_exit(void) {} +static inline int z_erofs_gbuf_init(void) { return 0; } +static inline void z_erofs_gbuf_exit(void) {} static inline int erofs_init_managed_cache(struct super_block *sb) { return 0; } #endif /* !CONFIG_EROFS_FS_ZIP */ diff --git a/fs/erofs/pcpubuf.c b/fs/erofs/pcpubuf.c deleted file mode 100644 index c7a4b1d77069..000000000000 --- a/fs/erofs/pcpubuf.c +++ /dev/null @@ -1,148 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) Gao Xiang - * - * For low-latency decompression algorithms (e.g. lz4), reserve consecutive - * per-CPU virtual memory (in pages) in advance to store such inplace I/O - * data if inplace decompression is failed (due to unmet inplace margin for - * example). - */ -#include "internal.h" - -struct erofs_pcpubuf { - raw_spinlock_t lock; - void *ptr; - struct page **pages; - unsigned int nrpages; -}; - -static DEFINE_PER_CPU(struct erofs_pcpubuf, erofs_pcb); - -void *erofs_get_pcpubuf(unsigned int requiredpages) - __acquires(pcb->lock) -{ - struct erofs_pcpubuf *pcb = &get_cpu_var(erofs_pcb); - - raw_spin_lock(&pcb->lock); - /* check if the per-CPU buffer is too small */ - if (requiredpages > pcb->nrpages) { - raw_spin_unlock(&pcb->lock); - put_cpu_var(erofs_pcb); - /* (for sparse checker) pretend pcb->lock is still taken */ - __acquire(pcb->lock); - return NULL; - } - return pcb->ptr; -} - -void erofs_put_pcpubuf(void *ptr) __releases(pcb->lock) -{ - struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, smp_processor_id()); - - DBG_BUGON(pcb->ptr != ptr); - raw_spin_unlock(&pcb->lock); - put_cpu_var(erofs_pcb); -} - -/* the next step: support per-CPU page buffers hotplug */ -int erofs_pcpubuf_growsize(unsigned int nrpages) -{ - static DEFINE_MUTEX(pcb_resize_mutex); - static unsigned int pcb_nrpages; - struct page *pagepool = NULL; - int delta, cpu, ret, i; - - mutex_lock(&pcb_resize_mutex); - delta = nrpages - pcb_nrpages; - ret = 0; - /* avoid shrinking pcpubuf, since no idea how many fses rely on */ - if (delta <= 0) - goto out; - - for_each_possible_cpu(cpu) { - struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu); - struct page **pages, **oldpages; - void *ptr, *old_ptr; - - pages = kmalloc_array(nrpages, sizeof(*pages), GFP_KERNEL); - if (!pages) { - ret = -ENOMEM; - break; - } - - for (i = 0; i < nrpages; ++i) { - pages[i] = erofs_allocpage(&pagepool, GFP_KERNEL); - if (!pages[i]) { - ret = -ENOMEM; - oldpages = pages; - goto free_pagearray; - } - } - ptr = vmap(pages, nrpages, VM_MAP, PAGE_KERNEL); - if (!ptr) { - ret = -ENOMEM; - oldpages = pages; - goto free_pagearray; - } - raw_spin_lock(&pcb->lock); - old_ptr = pcb->ptr; - pcb->ptr = ptr; - oldpages = pcb->pages; - pcb->pages = pages; - i = pcb->nrpages; - pcb->nrpages = nrpages; - raw_spin_unlock(&pcb->lock); - - if (!oldpages) { - DBG_BUGON(old_ptr); - continue; - } - - if (old_ptr) - vunmap(old_ptr); -free_pagearray: - while (i) - erofs_pagepool_add(&pagepool, oldpages[--i]); - kfree(oldpages); - if (ret) - break; - } - pcb_nrpages = nrpages; - erofs_release_pages(&pagepool); -out: - mutex_unlock(&pcb_resize_mutex); - return ret; -} - -void __init erofs_pcpubuf_init(void) -{ - int cpu; - - for_each_possible_cpu(cpu) { - struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu); - - raw_spin_lock_init(&pcb->lock); - } -} - -void erofs_pcpubuf_exit(void) -{ - int cpu, i; - - for_each_possible_cpu(cpu) { - struct erofs_pcpubuf *pcb = &per_cpu(erofs_pcb, cpu); - - if (pcb->ptr) { - vunmap(pcb->ptr); - pcb->ptr = NULL; - } - if (!pcb->pages) - continue; - - for (i = 0; i < pcb->nrpages; ++i) - if (pcb->pages[i]) - put_page(pcb->pages[i]); - kfree(pcb->pages); - pcb->pages = NULL; - } -} diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 5dab57f3f63e..f6ecb88eaeae 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -859,7 +859,10 @@ static int __init erofs_module_init(void) if (err) goto deflate_err; - erofs_pcpubuf_init(); + err = z_erofs_gbuf_init(); + if (err) + goto gbuf_err; + err = z_erofs_init_zip_subsystem(); if (err) goto zip_err; @@ -879,6 +882,8 @@ static int __init erofs_module_init(void) sysfs_err: z_erofs_exit_zip_subsystem(); zip_err: + z_erofs_gbuf_exit(); +gbuf_err: z_erofs_deflate_exit(); deflate_err: z_erofs_lzma_exit(); @@ -902,7 +907,7 @@ static void __exit erofs_module_exit(void) z_erofs_lzma_exit(); erofs_exit_shrinker(); kmem_cache_destroy(erofs_inode_cachep); - erofs_pcpubuf_exit(); + z_erofs_gbuf_exit(); } static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf) diff --git a/fs/erofs/zutil.c b/fs/erofs/zutil.c index e040ecd778f2..e73267817bbc 100644 --- a/fs/erofs/zutil.c +++ b/fs/erofs/zutil.c @@ -5,6 +5,18 @@ */ #include "internal.h" +struct z_erofs_gbuf { + spinlock_t lock; + void *ptr; + struct page **pages; + unsigned int nrpages; +}; + +static struct z_erofs_gbuf *z_erofs_gbufpool; +static unsigned int z_erofs_gbuf_count, z_erofs_gbuf_nrpages; + +module_param_named(global_buffers, z_erofs_gbuf_count, uint, 0444); + static atomic_long_t erofs_global_shrink_cnt; /* for all mounted instances */ /* protected by 'erofs_sb_list_lock' */ static unsigned int shrinker_run_no; @@ -13,6 +25,142 @@ static unsigned int shrinker_run_no; static DEFINE_SPINLOCK(erofs_sb_list_lock); static LIST_HEAD(erofs_sb_list); +static unsigned int z_erofs_gbuf_id(void) +{ + return raw_smp_processor_id() % z_erofs_gbuf_count; +} + +void *z_erofs_get_gbuf(unsigned int requiredpages) + __acquires(gbuf->lock) +{ + struct z_erofs_gbuf *gbuf; + + gbuf = &z_erofs_gbufpool[z_erofs_gbuf_id()]; + spin_lock(&gbuf->lock); + /* check if the buffer is too small */ + if (requiredpages > gbuf->nrpages) { + spin_unlock(&gbuf->lock); + /* (for sparse checker) pretend gbuf->lock is still taken */ + __acquire(gbuf->lock); + return NULL; + } + return gbuf->ptr; +} + +void z_erofs_put_gbuf(void *ptr) __releases(gbuf->lock) +{ + struct z_erofs_gbuf *gbuf; + + gbuf = &z_erofs_gbufpool[z_erofs_gbuf_id()]; + DBG_BUGON(gbuf->ptr != ptr); + spin_unlock(&gbuf->lock); +} + +int z_erofs_gbuf_growsize(unsigned int nrpages) +{ + static DEFINE_MUTEX(gbuf_resize_mutex); + struct page *pagepool = NULL; + int delta, ret, i, j; + + mutex_lock(&gbuf_resize_mutex); + delta = nrpages - z_erofs_gbuf_nrpages; + ret = 0; + /* avoid shrinking gbufs, since no idea how many fses rely on */ + if (delta <= 0) + goto out; + + for (i = 0; i < z_erofs_gbuf_count; ++i) { + struct z_erofs_gbuf *gbuf = &z_erofs_gbufpool[i]; + struct page **pages, **tmp_pages; + void *ptr, *old_ptr = NULL; + + ret = -ENOMEM; + tmp_pages = kcalloc(nrpages, sizeof(*tmp_pages), GFP_KERNEL); + if (!tmp_pages) + break; + for (j = 0; j < nrpages; ++j) { + tmp_pages[j] = erofs_allocpage(&pagepool, GFP_KERNEL); + if (!tmp_pages[j]) + goto free_pagearray; + } + ptr = vmap(tmp_pages, nrpages, VM_MAP, PAGE_KERNEL); + if (!ptr) + goto free_pagearray; + + pages = tmp_pages; + spin_lock(&gbuf->lock); + old_ptr = gbuf->ptr; + gbuf->ptr = ptr; + tmp_pages = gbuf->pages; + gbuf->pages = pages; + j = gbuf->nrpages; + gbuf->nrpages = nrpages; + spin_unlock(&gbuf->lock); + ret = 0; + if (!tmp_pages) { + DBG_BUGON(old_ptr); + continue; + } + + if (old_ptr) + vunmap(old_ptr); +free_pagearray: + while (j) + erofs_pagepool_add(&pagepool, tmp_pages[--j]); + kfree(tmp_pages); + if (ret) + break; + } + z_erofs_gbuf_nrpages = nrpages; + erofs_release_pages(&pagepool); +out: + mutex_unlock(&gbuf_resize_mutex); + return ret; +} + +int __init z_erofs_gbuf_init(void) +{ + unsigned int i = num_possible_cpus(); + + if (!z_erofs_gbuf_count) + z_erofs_gbuf_count = i; + else + z_erofs_gbuf_count = min(z_erofs_gbuf_count, i); + + z_erofs_gbufpool = kcalloc(z_erofs_gbuf_count, + sizeof(*z_erofs_gbufpool), GFP_KERNEL); + if (!z_erofs_gbufpool) + return -ENOMEM; + + for (i = 0; i < z_erofs_gbuf_count; ++i) + spin_lock_init(&z_erofs_gbufpool[i].lock); + return 0; +} + +void z_erofs_gbuf_exit(void) +{ + int i; + + for (i = 0; i < z_erofs_gbuf_count; ++i) { + struct z_erofs_gbuf *gbuf = &z_erofs_gbufpool[i]; + + if (gbuf->ptr) { + vunmap(gbuf->ptr); + gbuf->ptr = NULL; + } + + if (!gbuf->pages) + continue; + + for (i = 0; i < gbuf->nrpages; ++i) + if (gbuf->pages[i]) + put_page(gbuf->pages[i]); + kfree(gbuf->pages); + gbuf->pages = NULL; + } + kfree(z_erofs_gbufpool); +} + struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp) { struct page *page = *pagepool; -- Gitee From fcfb6188f08f090caea7405f45432980db9254ba Mon Sep 17 00:00:00 2001 From: Chunhai Guo Date: Tue, 2 Apr 2024 03:27:57 -0600 Subject: [PATCH 1775/2138] erofs: do not use pagepool in z_erofs_gbuf_growsize() ANBZ: #11101 commit d6db47e571dcaecaeaafa8840d00ae849ae3907b upstream. Let's use alloc_pages_bulk_array() for simplicity and get rid of unnecessary pagepool. Signed-off-by: Chunhai Guo Reviewed-by: Gao Xiang Link: https://lore.kernel.org/r/20240402092757.2635257-1-guochunhai@vivo.com Signed-off-by: Gao Xiang Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4159 --- fs/erofs/zutil.c | 67 ++++++++++++++++++++++-------------------------- 1 file changed, 31 insertions(+), 36 deletions(-) diff --git a/fs/erofs/zutil.c b/fs/erofs/zutil.c index e73267817bbc..17c64c1e58e8 100644 --- a/fs/erofs/zutil.c +++ b/fs/erofs/zutil.c @@ -59,63 +59,58 @@ void z_erofs_put_gbuf(void *ptr) __releases(gbuf->lock) int z_erofs_gbuf_growsize(unsigned int nrpages) { static DEFINE_MUTEX(gbuf_resize_mutex); - struct page *pagepool = NULL; - int delta, ret, i, j; + struct page **tmp_pages = NULL; + struct z_erofs_gbuf *gbuf; + void *ptr, *old_ptr; + int last, i, j; mutex_lock(&gbuf_resize_mutex); - delta = nrpages - z_erofs_gbuf_nrpages; - ret = 0; /* avoid shrinking gbufs, since no idea how many fses rely on */ - if (delta <= 0) - goto out; + if (nrpages <= z_erofs_gbuf_nrpages) { + mutex_unlock(&gbuf_resize_mutex); + return 0; + } for (i = 0; i < z_erofs_gbuf_count; ++i) { - struct z_erofs_gbuf *gbuf = &z_erofs_gbufpool[i]; - struct page **pages, **tmp_pages; - void *ptr, *old_ptr = NULL; - - ret = -ENOMEM; + gbuf = &z_erofs_gbufpool[i]; tmp_pages = kcalloc(nrpages, sizeof(*tmp_pages), GFP_KERNEL); if (!tmp_pages) - break; - for (j = 0; j < nrpages; ++j) { - tmp_pages[j] = erofs_allocpage(&pagepool, GFP_KERNEL); - if (!tmp_pages[j]) - goto free_pagearray; - } + goto out; + + for (j = 0; j < gbuf->nrpages; ++j) + tmp_pages[j] = gbuf->pages[j]; + do { + last = j; + j = alloc_pages_bulk_array(GFP_KERNEL, nrpages, + tmp_pages); + if (last == j) + goto out; + } while (j != nrpages); + ptr = vmap(tmp_pages, nrpages, VM_MAP, PAGE_KERNEL); if (!ptr) - goto free_pagearray; + goto out; - pages = tmp_pages; spin_lock(&gbuf->lock); + kfree(gbuf->pages); + gbuf->pages = tmp_pages; old_ptr = gbuf->ptr; gbuf->ptr = ptr; - tmp_pages = gbuf->pages; - gbuf->pages = pages; - j = gbuf->nrpages; gbuf->nrpages = nrpages; spin_unlock(&gbuf->lock); - ret = 0; - if (!tmp_pages) { - DBG_BUGON(old_ptr); - continue; - } - if (old_ptr) vunmap(old_ptr); -free_pagearray: - while (j) - erofs_pagepool_add(&pagepool, tmp_pages[--j]); - kfree(tmp_pages); - if (ret) - break; } z_erofs_gbuf_nrpages = nrpages; - erofs_release_pages(&pagepool); out: + if (i < z_erofs_gbuf_count && tmp_pages) { + for (j = 0; j < nrpages; ++j) + if (tmp_pages[j] && tmp_pages[j] != gbuf->pages[j]) + __free_page(tmp_pages[j]); + kfree(tmp_pages); + } mutex_unlock(&gbuf_resize_mutex); - return ret; + return i < z_erofs_gbuf_count ? -ENOMEM : 0; } int __init z_erofs_gbuf_init(void) -- Gitee From 626fe5fe1706e591a9535f9e2c674d9daa555c02 Mon Sep 17 00:00:00 2001 From: Chunhai Guo Date: Tue, 2 Apr 2024 07:15:23 -0600 Subject: [PATCH 1776/2138] erofs: add a reserved buffer pool for lz4 decompression ANBZ: #11101 commit 0f6273ab46375b62c8dd5c987ce7c15877602831 upstream. This adds a special global buffer pool (in the end) for reserved pages. Using a reserved pool for LZ4 decompression significantly reduces the time spent on extra temporary page allocation for the extreme cases in low memory scenarios. The table below shows the reduction in time spent on page allocation for LZ4 decompression when using a reserved pool. The results were obtained from multi-app launch benchmarks on ARM64 Android devices running the 5.15 kernel with an 8-core CPU and 8GB of memory. In the benchmark, we launched 16 frequently-used apps, and the camera app was the last one in each round. The data in the table is the average time of camera app for each round. After using the reserved pool, there was an average improvement of 150ms in the overall launch time of our camera app, which was obtained from the systrace log. +--------------+---------------+--------------+---------+ | | w/o page pool | w/ page pool | diff | +--------------+---------------+--------------+---------+ | Average (ms) | 3434 | 21 | -99.38% | +--------------+---------------+--------------+---------+ Based on the benchmark logs, 64 pages are sufficient for 95% of scenarios. This value can be adjusted with a module parameter `reserved_pages`. The default value is 0. This pool is currently only used for the LZ4 decompressor, but it can be applied to more decompressors if needed. Signed-off-by: Chunhai Guo Reviewed-by: Gao Xiang Link: https://lore.kernel.org/r/20240402131523.2703948-1-guochunhai@vivo.com Signed-off-by: Gao Xiang Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4159 --- fs/erofs/decompressor.c | 2 +- fs/erofs/internal.h | 6 +++- fs/erofs/zutil.c | 61 +++++++++++++++++++++++++++++++---------- 3 files changed, 52 insertions(+), 17 deletions(-) diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index 2ce15ef75040..ec46fc778727 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -111,7 +111,7 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx, victim = availables[--top]; get_page(victim); } else { - victim = erofs_allocpage(pagepool, rq->gfp); + victim = __erofs_allocpage(pagepool, rq->gfp, true); if (!victim) return -ENOMEM; set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE); diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 21a4fa70cd0b..566a8fd6f733 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -438,7 +438,11 @@ void erofs_unregister_sysfs(struct super_block *sb); int __init erofs_init_sysfs(void); void erofs_exit_sysfs(void); -struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp); +struct page *__erofs_allocpage(struct page **pagepool, gfp_t gfp, bool tryrsv); +static inline struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp) +{ + return __erofs_allocpage(pagepool, gfp, false); +} static inline void erofs_pagepool_add(struct page **pagepool, struct page *page) { set_page_private(page, (unsigned long)*pagepool); diff --git a/fs/erofs/zutil.c b/fs/erofs/zutil.c index 17c64c1e58e8..2e7391e79e11 100644 --- a/fs/erofs/zutil.c +++ b/fs/erofs/zutil.c @@ -12,10 +12,12 @@ struct z_erofs_gbuf { unsigned int nrpages; }; -static struct z_erofs_gbuf *z_erofs_gbufpool; -static unsigned int z_erofs_gbuf_count, z_erofs_gbuf_nrpages; +static struct z_erofs_gbuf *z_erofs_gbufpool, *z_erofs_rsvbuf; +static unsigned int z_erofs_gbuf_count, z_erofs_gbuf_nrpages, + z_erofs_rsv_nrpages; module_param_named(global_buffers, z_erofs_gbuf_count, uint, 0444); +module_param_named(reserved_pages, z_erofs_rsv_nrpages, uint, 0444); static atomic_long_t erofs_global_shrink_cnt; /* for all mounted instances */ /* protected by 'erofs_sb_list_lock' */ @@ -115,19 +117,30 @@ int z_erofs_gbuf_growsize(unsigned int nrpages) int __init z_erofs_gbuf_init(void) { - unsigned int i = num_possible_cpus(); + unsigned int i, total = num_possible_cpus(); - if (!z_erofs_gbuf_count) - z_erofs_gbuf_count = i; - else - z_erofs_gbuf_count = min(z_erofs_gbuf_count, i); + if (z_erofs_gbuf_count) + total = min(z_erofs_gbuf_count, total); + z_erofs_gbuf_count = total; - z_erofs_gbufpool = kcalloc(z_erofs_gbuf_count, - sizeof(*z_erofs_gbufpool), GFP_KERNEL); + /* The last (special) global buffer is the reserved buffer */ + total += !!z_erofs_rsv_nrpages; + + z_erofs_gbufpool = kcalloc(total, sizeof(*z_erofs_gbufpool), + GFP_KERNEL); if (!z_erofs_gbufpool) return -ENOMEM; - for (i = 0; i < z_erofs_gbuf_count; ++i) + if (z_erofs_rsv_nrpages) { + z_erofs_rsvbuf = &z_erofs_gbufpool[total - 1]; + z_erofs_rsvbuf->pages = kcalloc(z_erofs_rsv_nrpages, + sizeof(*z_erofs_rsvbuf->pages), GFP_KERNEL); + if (!z_erofs_rsvbuf->pages) { + z_erofs_rsvbuf = NULL; + z_erofs_rsv_nrpages = 0; + } + } + for (i = 0; i < total; ++i) spin_lock_init(&z_erofs_gbufpool[i].lock); return 0; } @@ -136,7 +149,7 @@ void z_erofs_gbuf_exit(void) { int i; - for (i = 0; i < z_erofs_gbuf_count; ++i) { + for (i = 0; i < z_erofs_gbuf_count + (!!z_erofs_rsvbuf); ++i) { struct z_erofs_gbuf *gbuf = &z_erofs_gbufpool[i]; if (gbuf->ptr) { @@ -156,16 +169,22 @@ void z_erofs_gbuf_exit(void) kfree(z_erofs_gbufpool); } -struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp) +struct page *__erofs_allocpage(struct page **pagepool, gfp_t gfp, bool tryrsv) { struct page *page = *pagepool; if (page) { - DBG_BUGON(page_ref_count(page) != 1); *pagepool = (struct page *)page_private(page); - return page; + } else if (tryrsv && z_erofs_rsvbuf && z_erofs_rsvbuf->nrpages) { + spin_lock(&z_erofs_rsvbuf->lock); + if (z_erofs_rsvbuf->nrpages) + page = z_erofs_rsvbuf->pages[--z_erofs_rsvbuf->nrpages]; + spin_unlock(&z_erofs_rsvbuf->lock); } - return alloc_page(gfp); + if (!page) + page = alloc_page(gfp); + DBG_BUGON(page && page_ref_count(page) != 1); + return page; } void erofs_release_pages(struct page **pagepool) @@ -174,6 +193,18 @@ void erofs_release_pages(struct page **pagepool) struct page *page = *pagepool; *pagepool = (struct page *)page_private(page); + /* try to fill reserved global pool first */ + if (z_erofs_rsvbuf && z_erofs_rsvbuf->nrpages < + z_erofs_rsv_nrpages) { + spin_lock(&z_erofs_rsvbuf->lock); + if (z_erofs_rsvbuf->nrpages < z_erofs_rsv_nrpages) { + z_erofs_rsvbuf->pages[z_erofs_rsvbuf->nrpages++] + = page; + spin_unlock(&z_erofs_rsvbuf->lock); + continue; + } + spin_unlock(&z_erofs_rsvbuf->lock); + } put_page(page); } } -- Gitee From 6d690505d03a39c5b58ce2c2b01e3c8c7d62e6de Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Wed, 8 May 2024 20:33:57 +0800 Subject: [PATCH 1777/2138] erofs: clean up z_erofs_load_full_lcluster() ANBZ: #11101 commit d69189428d50c9a8de6475f3c8c241f86f4e764a upstream. Only four lcluster types here, remove redundant code. No real logic changes. Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240508123357.3266173-1-hsiangkao@linux.alibaba.com Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4159 --- fs/erofs/erofs_fs.h | 5 +---- fs/erofs/zmap.c | 21 +++++---------------- 2 files changed, 6 insertions(+), 20 deletions(-) diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h index a03ec70ba6f2..550baf1729d4 100644 --- a/fs/erofs/erofs_fs.h +++ b/fs/erofs/erofs_fs.h @@ -396,8 +396,7 @@ enum { Z_EROFS_LCLUSTER_TYPE_MAX }; -#define Z_EROFS_LI_LCLUSTER_TYPE_BITS 2 -#define Z_EROFS_LI_LCLUSTER_TYPE_BIT 0 +#define Z_EROFS_LI_LCLUSTER_TYPE_MASK (Z_EROFS_LCLUSTER_TYPE_MAX - 1) /* (noncompact only, HEAD) This pcluster refers to partial decompressed data */ #define Z_EROFS_LI_PARTIAL_REF (1 << 15) @@ -451,8 +450,6 @@ static inline void erofs_check_ondisk_layout_definitions(void) sizeof(struct z_erofs_lcluster_index)); BUILD_BUG_ON(sizeof(struct erofs_deviceslot) != 128); - BUILD_BUG_ON(BIT(Z_EROFS_LI_LCLUSTER_TYPE_BITS) < - Z_EROFS_LCLUSTER_TYPE_MAX - 1); /* exclude old compiler versions like gcc 7.5.0 */ BUILD_BUG_ON(__builtin_constant_p(fmh) ? fmh != cpu_to_le64(1ULL << 63) : 0); diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c index 76566c2cbf63..94e297de98d1 100644 --- a/fs/erofs/zmap.c +++ b/fs/erofs/zmap.c @@ -31,7 +31,7 @@ static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m, vi->inode_isize + vi->xattr_isize) + lcn * sizeof(struct z_erofs_lcluster_index); struct z_erofs_lcluster_index *di; - unsigned int advise, type; + unsigned int advise; m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb, erofs_blknr(inode->i_sb, pos), EROFS_KMAP); @@ -43,10 +43,8 @@ static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m, di = m->kaddr + erofs_blkoff(inode->i_sb, pos); advise = le16_to_cpu(di->di_advise); - type = (advise >> Z_EROFS_LI_LCLUSTER_TYPE_BIT) & - ((1 << Z_EROFS_LI_LCLUSTER_TYPE_BITS) - 1); - switch (type) { - case Z_EROFS_LCLUSTER_TYPE_NONHEAD: + m->type = advise & Z_EROFS_LI_LCLUSTER_TYPE_MASK; + if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) { m->clusterofs = 1 << vi->z_logical_clusterbits; m->delta[0] = le16_to_cpu(di->di_u.delta[0]); if (m->delta[0] & Z_EROFS_LI_D0_CBLKCNT) { @@ -60,24 +58,15 @@ static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m, m->delta[0] = 1; } m->delta[1] = le16_to_cpu(di->di_u.delta[1]); - break; - case Z_EROFS_LCLUSTER_TYPE_PLAIN: - case Z_EROFS_LCLUSTER_TYPE_HEAD1: - case Z_EROFS_LCLUSTER_TYPE_HEAD2: - if (advise & Z_EROFS_LI_PARTIAL_REF) - m->partialref = true; + } else { + m->partialref = !!(advise & Z_EROFS_LI_PARTIAL_REF); m->clusterofs = le16_to_cpu(di->di_clusterofs); if (m->clusterofs >= 1 << vi->z_logical_clusterbits) { DBG_BUGON(1); return -EFSCORRUPTED; } m->pblk = le32_to_cpu(di->di_u.blkaddr); - break; - default: - DBG_BUGON(1); - return -EOPNOTSUPP; } - m->type = type; return 0; } -- Gitee From 081b969f7d9c7d8621244cfdb4387e7b54581010 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Thu, 9 May 2024 07:44:53 +0800 Subject: [PATCH 1778/2138] erofs: Zstandard compression support ANBZ: #11101 commit 7c35de4df1056a5a1fb4de042197b8f5b1033b61 upstream. Add Zstandard compression as the 4th supported algorithm since it becomes more popular now and some end users have asked this for quite a while [1][2]. Each EROFS physical cluster contains only one valid standard Zstandard frame as described in [3] so that decompression can be performed on a per-pcluster basis independently. Currently, it just leverages multi-call stream decompression APIs with internal sliding window buffers. One-shot or bufferless decompression could be implemented later for even better performance if needed. [1] https://github.com/erofs/erofs-utils/issues/6 [2] https://lore.kernel.org/r/Y08h+z6CZdnS1XBm@B-P7TQMD6M-0146.lan [3] https://www.rfc-editor.org/rfc/rfc8478.txt Acked-by: Chao Yu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240508234453.17896-1-xiang@kernel.org Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4159 --- fs/erofs/Kconfig | 15 ++ fs/erofs/Makefile | 1 + fs/erofs/compress.h | 4 + fs/erofs/decompressor.c | 7 + fs/erofs/decompressor_zstd.c | 279 +++++++++++++++++++++++++++++++++++ fs/erofs/erofs_fs.h | 10 ++ fs/erofs/internal.h | 8 + fs/erofs/super.c | 7 + fs/erofs/zmap.c | 3 +- 9 files changed, 333 insertions(+), 1 deletion(-) create mode 100644 fs/erofs/decompressor_zstd.c diff --git a/fs/erofs/Kconfig b/fs/erofs/Kconfig index 1d318f85232d..5cedeb5b957a 100644 --- a/fs/erofs/Kconfig +++ b/fs/erofs/Kconfig @@ -112,6 +112,21 @@ config EROFS_FS_ZIP_DEFLATE If unsure, say N. +config EROFS_FS_ZIP_ZSTD + bool "EROFS Zstandard compressed data support" + depends on EROFS_FS_ZIP + select ZSTD_DECOMPRESS + help + Saying Y here includes support for reading EROFS file systems + containing Zstandard compressed data. It gives better compression + ratios than the default LZ4 format, while it costs more CPU + overhead. + + Zstandard support is an experimental feature for now and so most + file systems will be readable without selecting this option. + + If unsure, say N. + config EROFS_FS_ONDEMAND bool "EROFS fscache-based on-demand read support" depends on CACHEFILES_ONDEMAND && (EROFS_FS=m && FSCACHE || EROFS_FS=y && FSCACHE=y) diff --git a/fs/erofs/Makefile b/fs/erofs/Makefile index 20d1ec422443..097d672e6b14 100644 --- a/fs/erofs/Makefile +++ b/fs/erofs/Makefile @@ -6,4 +6,5 @@ erofs-$(CONFIG_EROFS_FS_XATTR) += xattr.o erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o zutil.o erofs-$(CONFIG_EROFS_FS_ZIP_LZMA) += decompressor_lzma.o erofs-$(CONFIG_EROFS_FS_ZIP_DEFLATE) += decompressor_deflate.o +erofs-$(CONFIG_EROFS_FS_ZIP_ZSTD) += decompressor_zstd.o erofs-$(CONFIG_EROFS_FS_ONDEMAND) += fscache.o diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h index 333587ba6183..19d53c30c8af 100644 --- a/fs/erofs/compress.h +++ b/fs/erofs/compress.h @@ -90,8 +90,12 @@ int z_erofs_load_lzma_config(struct super_block *sb, struct erofs_super_block *dsb, void *data, int size); int z_erofs_load_deflate_config(struct super_block *sb, struct erofs_super_block *dsb, void *data, int size); +int z_erofs_load_zstd_config(struct super_block *sb, + struct erofs_super_block *dsb, void *data, int size); int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, struct page **pagepool); int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, struct page **pagepool); +int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq, + struct page **pgpl); #endif diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index ec46fc778727..696a3516d75a 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -399,6 +399,13 @@ const struct z_erofs_decompressor erofs_decompressors[] = { .name = "deflate" }, #endif +#ifdef CONFIG_EROFS_FS_ZIP_ZSTD + [Z_EROFS_COMPRESSION_ZSTD] = { + .config = z_erofs_load_zstd_config, + .decompress = z_erofs_zstd_decompress, + .name = "zstd" + }, +#endif }; int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb) diff --git a/fs/erofs/decompressor_zstd.c b/fs/erofs/decompressor_zstd.c new file mode 100644 index 000000000000..63a23cac3af4 --- /dev/null +++ b/fs/erofs/decompressor_zstd.c @@ -0,0 +1,279 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#include +#include "compress.h" + +struct z_erofs_zstd { + struct z_erofs_zstd *next; + u8 bounce[PAGE_SIZE]; + void *wksp; + unsigned int wkspsz; +}; + +static DEFINE_SPINLOCK(z_erofs_zstd_lock); +static unsigned int z_erofs_zstd_max_dictsize; +static unsigned int z_erofs_zstd_nstrms, z_erofs_zstd_avail_strms; +static struct z_erofs_zstd *z_erofs_zstd_head; +static DECLARE_WAIT_QUEUE_HEAD(z_erofs_zstd_wq); + +module_param_named(zstd_streams, z_erofs_zstd_nstrms, uint, 0444); + +static struct z_erofs_zstd *z_erofs_isolate_strms(bool all) +{ + struct z_erofs_zstd *strm; + +again: + spin_lock(&z_erofs_zstd_lock); + strm = z_erofs_zstd_head; + if (!strm) { + spin_unlock(&z_erofs_zstd_lock); + wait_event(z_erofs_zstd_wq, READ_ONCE(z_erofs_zstd_head)); + goto again; + } + z_erofs_zstd_head = all ? NULL : strm->next; + spin_unlock(&z_erofs_zstd_lock); + return strm; +} + +void z_erofs_zstd_exit(void) +{ + while (z_erofs_zstd_avail_strms) { + struct z_erofs_zstd *strm, *n; + + for (strm = z_erofs_isolate_strms(true); strm; strm = n) { + n = strm->next; + + kvfree(strm->wksp); + kfree(strm); + --z_erofs_zstd_avail_strms; + } + } +} + +int __init z_erofs_zstd_init(void) +{ + /* by default, use # of possible CPUs instead */ + if (!z_erofs_zstd_nstrms) + z_erofs_zstd_nstrms = num_possible_cpus(); + + for (; z_erofs_zstd_avail_strms < z_erofs_zstd_nstrms; + ++z_erofs_zstd_avail_strms) { + struct z_erofs_zstd *strm; + + strm = kzalloc(sizeof(*strm), GFP_KERNEL); + if (!strm) { + z_erofs_zstd_exit(); + return -ENOMEM; + } + spin_lock(&z_erofs_zstd_lock); + strm->next = z_erofs_zstd_head; + z_erofs_zstd_head = strm; + spin_unlock(&z_erofs_zstd_lock); + } + return 0; +} + +int z_erofs_load_zstd_config(struct super_block *sb, + struct erofs_super_block *dsb, void *data, int size) +{ + static DEFINE_MUTEX(zstd_resize_mutex); + struct z_erofs_zstd_cfgs *zstd = data; + unsigned int dict_size, wkspsz; + struct z_erofs_zstd *strm, *head = NULL; + void *wksp; + + if (!zstd || size < sizeof(struct z_erofs_zstd_cfgs) || zstd->format) { + erofs_err(sb, "unsupported zstd format, size=%u", size); + return -EINVAL; + } + + if (zstd->windowlog > ilog2(Z_EROFS_ZSTD_MAX_DICT_SIZE) - 10) { + erofs_err(sb, "unsupported zstd window log %u", zstd->windowlog); + return -EINVAL; + } + dict_size = 1U << (zstd->windowlog + 10); + + /* in case 2 z_erofs_load_zstd_config() race to avoid deadlock */ + mutex_lock(&zstd_resize_mutex); + if (z_erofs_zstd_max_dictsize >= dict_size) { + mutex_unlock(&zstd_resize_mutex); + return 0; + } + + /* 1. collect/isolate all streams for the following check */ + while (z_erofs_zstd_avail_strms) { + struct z_erofs_zstd *n; + + for (strm = z_erofs_isolate_strms(true); strm; strm = n) { + n = strm->next; + strm->next = head; + head = strm; + --z_erofs_zstd_avail_strms; + } + } + + /* 2. walk each isolated stream and grow max dict_size if needed */ + wkspsz = zstd_dstream_workspace_bound(dict_size); + for (strm = head; strm; strm = strm->next) { + wksp = kvmalloc(wkspsz, GFP_KERNEL); + if (!wksp) + break; + kvfree(strm->wksp); + strm->wksp = wksp; + strm->wkspsz = wkspsz; + } + + /* 3. push back all to the global list and update max dict_size */ + spin_lock(&z_erofs_zstd_lock); + DBG_BUGON(z_erofs_zstd_head); + z_erofs_zstd_head = head; + spin_unlock(&z_erofs_zstd_lock); + z_erofs_zstd_avail_strms = z_erofs_zstd_nstrms; + wake_up_all(&z_erofs_zstd_wq); + if (!strm) + z_erofs_zstd_max_dictsize = dict_size; + mutex_unlock(&zstd_resize_mutex); + return strm ? -ENOMEM : 0; +} + +int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq, + struct page **pgpl) +{ + const unsigned int nrpages_out = + PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; + const unsigned int nrpages_in = + PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT; + zstd_dstream *stream; + struct super_block *sb = rq->sb; + unsigned int insz, outsz, pofs; + struct z_erofs_zstd *strm; + zstd_in_buffer in_buf = { NULL, 0, 0 }; + zstd_out_buffer out_buf = { NULL, 0, 0 }; + u8 *kin, *kout = NULL; + bool bounced = false; + int no = -1, ni = 0, j = 0, zerr, err; + + /* 1. get the exact compressed size */ + kin = kmap_local_page(*rq->in); + err = z_erofs_fixup_insize(rq, kin + rq->pageofs_in, + min_t(unsigned int, rq->inputsize, + sb->s_blocksize - rq->pageofs_in)); + if (err) { + kunmap_local(kin); + return err; + } + + /* 2. get an available ZSTD context */ + strm = z_erofs_isolate_strms(false); + + /* 3. multi-call decompress */ + insz = rq->inputsize; + outsz = rq->outputsize; + stream = zstd_init_dstream(z_erofs_zstd_max_dictsize, strm->wksp, strm->wkspsz); + if (!stream) { + err = -EIO; + goto failed_zinit; + } + + pofs = rq->pageofs_out; + in_buf.size = min_t(u32, insz, PAGE_SIZE - rq->pageofs_in); + insz -= in_buf.size; + in_buf.src = kin + rq->pageofs_in; + do { + if (out_buf.size == out_buf.pos) { + if (++no >= nrpages_out || !outsz) { + erofs_err(sb, "insufficient space for decompressed data"); + err = -EFSCORRUPTED; + break; + } + + if (kout) + kunmap_local(kout); + out_buf.size = min_t(u32, outsz, PAGE_SIZE - pofs); + outsz -= out_buf.size; + if (!rq->out[no]) { + rq->out[no] = erofs_allocpage(pgpl, rq->gfp); + if (!rq->out[no]) { + kout = NULL; + err = -ENOMEM; + break; + } + set_page_private(rq->out[no], + Z_EROFS_SHORTLIVED_PAGE); + } + kout = kmap_local_page(rq->out[no]); + out_buf.dst = kout + pofs; + out_buf.pos = 0; + pofs = 0; + } + + if (in_buf.size == in_buf.pos && insz) { + if (++ni >= nrpages_in) { + erofs_err(sb, "invalid compressed data"); + err = -EFSCORRUPTED; + break; + } + + if (kout) /* unlike kmap(), take care of the orders */ + kunmap_local(kout); + kunmap_local(kin); + in_buf.size = min_t(u32, insz, PAGE_SIZE); + insz -= in_buf.size; + kin = kmap_local_page(rq->in[ni]); + in_buf.src = kin; + in_buf.pos = 0; + bounced = false; + if (kout) { + j = (u8 *)out_buf.dst - kout; + kout = kmap_local_page(rq->out[no]); + out_buf.dst = kout + j; + } + } + + /* + * Handle overlapping: Use bounced buffer if the compressed + * data is under processing; Or use short-lived pages from the + * on-stack pagepool where pages share among the same request + * and not _all_ inplace I/O pages are needed to be doubled. + */ + if (!bounced && rq->out[no] == rq->in[ni]) { + memcpy(strm->bounce, in_buf.src, in_buf.size); + in_buf.src = strm->bounce; + bounced = true; + } + + for (j = ni + 1; j < nrpages_in; ++j) { + struct page *tmppage; + + if (rq->out[no] != rq->in[j]) + continue; + tmppage = erofs_allocpage(pgpl, rq->gfp); + if (!tmppage) { + err = -ENOMEM; + goto failed; + } + set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE); + copy_highpage(tmppage, rq->in[j]); + rq->in[j] = tmppage; + } + zerr = zstd_decompress_stream(stream, &out_buf, &in_buf); + if (zstd_is_error(zerr) || (!zerr && outsz)) { + erofs_err(sb, "failed to decompress in[%u] out[%u]: %s", + rq->inputsize, rq->outputsize, + zerr ? zstd_get_error_name(zerr) : "unexpected end of stream"); + err = -EFSCORRUPTED; + break; + } + } while (outsz || out_buf.pos < out_buf.size); +failed: + if (kout) + kunmap_local(kout); +failed_zinit: + kunmap_local(kin); + /* 4. push back ZSTD stream context to the global list */ + spin_lock(&z_erofs_zstd_lock); + strm->next = z_erofs_zstd_head; + z_erofs_zstd_head = strm; + spin_unlock(&z_erofs_zstd_lock); + wake_up(&z_erofs_zstd_wq); + return err; +} diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h index 550baf1729d4..6c0c270c42e1 100644 --- a/fs/erofs/erofs_fs.h +++ b/fs/erofs/erofs_fs.h @@ -296,6 +296,7 @@ enum { Z_EROFS_COMPRESSION_LZ4 = 0, Z_EROFS_COMPRESSION_LZMA = 1, Z_EROFS_COMPRESSION_DEFLATE = 2, + Z_EROFS_COMPRESSION_ZSTD = 3, Z_EROFS_COMPRESSION_MAX }; #define Z_EROFS_ALL_COMPR_ALGS ((1 << Z_EROFS_COMPRESSION_MAX) - 1) @@ -322,6 +323,15 @@ struct z_erofs_deflate_cfgs { u8 reserved[5]; } __packed; +/* 6 bytes (+ length field = 8 bytes) */ +struct z_erofs_zstd_cfgs { + u8 format; + u8 windowlog; /* windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN(10) */ + u8 reserved[4]; +} __packed; + +#define Z_EROFS_ZSTD_MAX_DICT_SIZE Z_EROFS_PCLUSTER_MAX_SIZE + /* * bit 0 : COMPACTED_2B indexes (0 - off; 1 - on) * e.g. for 4k logical cluster size, 4B if compacted 2B is off; diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 566a8fd6f733..b89dd170a529 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -502,6 +502,14 @@ static inline int z_erofs_deflate_init(void) { return 0; } static inline int z_erofs_deflate_exit(void) { return 0; } #endif /* !CONFIG_EROFS_FS_ZIP_DEFLATE */ +#ifdef CONFIG_EROFS_FS_ZIP_ZSTD +int __init z_erofs_zstd_init(void); +void z_erofs_zstd_exit(void); +#else +static inline int z_erofs_zstd_init(void) { return 0; } +static inline int z_erofs_zstd_exit(void) { return 0; } +#endif /* !CONFIG_EROFS_FS_ZIP_ZSTD */ + #ifdef CONFIG_EROFS_FS_ONDEMAND int erofs_fscache_register_fs(struct super_block *sb); void erofs_fscache_unregister_fs(struct super_block *sb); diff --git a/fs/erofs/super.c b/fs/erofs/super.c index f6ecb88eaeae..7ffc65c7f141 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -859,6 +859,10 @@ static int __init erofs_module_init(void) if (err) goto deflate_err; + err = z_erofs_zstd_init(); + if (err) + goto zstd_err; + err = z_erofs_gbuf_init(); if (err) goto gbuf_err; @@ -884,6 +888,8 @@ static int __init erofs_module_init(void) zip_err: z_erofs_gbuf_exit(); gbuf_err: + z_erofs_zstd_exit(); +zstd_err: z_erofs_deflate_exit(); deflate_err: z_erofs_lzma_exit(); @@ -903,6 +909,7 @@ static void __exit erofs_module_exit(void) erofs_exit_sysfs(); z_erofs_exit_zip_subsystem(); + z_erofs_zstd_exit(); z_erofs_deflate_exit(); z_erofs_lzma_exit(); erofs_exit_shrinker(); diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c index 94e297de98d1..7302c17d4ca8 100644 --- a/fs/erofs/zmap.c +++ b/fs/erofs/zmap.c @@ -551,7 +551,8 @@ static int z_erofs_do_map_blocks(struct inode *inode, if ((flags & EROFS_GET_BLOCKS_FIEMAP) || ((flags & EROFS_GET_BLOCKS_READMORE) && (map->m_algorithmformat == Z_EROFS_COMPRESSION_LZMA || - map->m_algorithmformat == Z_EROFS_COMPRESSION_DEFLATE) && + map->m_algorithmformat == Z_EROFS_COMPRESSION_DEFLATE || + map->m_algorithmformat == Z_EROFS_COMPRESSION_ZSTD) && map->m_llen >= i_blocksize(inode))) { err = z_erofs_get_extent_decompressedlen(&m); if (!err) -- Gitee From e3fb61e3be514aa82e4de5b38f59b09d43d467d2 Mon Sep 17 00:00:00 2001 From: Hongzhen Luo Date: Fri, 17 May 2024 17:56:52 +0800 Subject: [PATCH 1779/2138] erofs: clean up erofs_show_options() ANBZ: #11101 commit c34110e0fdfddc22b7fd606ca81303d20330bacb upstream. Avoid unnecessary #ifdefs and simplify the code a bit. Signed-off-by: Hongzhen Luo Link: https://lore.kernel.org/r/20240517095652.2282972-1-hongzhen@linux.alibaba.com Reviewed-by: Gao Xiang Signed-off-by: Gao Xiang Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4159 --- fs/erofs/internal.h | 3 --- fs/erofs/super.c | 28 ++++++++-------------------- 2 files changed, 8 insertions(+), 23 deletions(-) diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index b89dd170a529..46632306f185 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -64,15 +64,12 @@ enum { }; struct erofs_mount_opts { -#ifdef CONFIG_EROFS_FS_ZIP /* current strategy of how to use managed cache */ unsigned char cache_strategy; /* strategy of sync decompression (0 - auto, 1 - force on, 2 - force off) */ unsigned int sync_decompress; - /* threshold for decompression synchronously */ unsigned int max_sync_decompress_pages; -#endif unsigned int mount_opt; }; diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 7ffc65c7f141..03ab5607f41e 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -943,26 +943,14 @@ static int erofs_show_options(struct seq_file *seq, struct dentry *root) struct erofs_sb_info *sbi = EROFS_SB(root->d_sb); struct erofs_mount_opts *opt = &sbi->opt; -#ifdef CONFIG_EROFS_FS_XATTR - if (test_opt(opt, XATTR_USER)) - seq_puts(seq, ",user_xattr"); - else - seq_puts(seq, ",nouser_xattr"); -#endif -#ifdef CONFIG_EROFS_FS_POSIX_ACL - if (test_opt(opt, POSIX_ACL)) - seq_puts(seq, ",acl"); - else - seq_puts(seq, ",noacl"); -#endif -#ifdef CONFIG_EROFS_FS_ZIP - if (opt->cache_strategy == EROFS_ZIP_CACHE_DISABLED) - seq_puts(seq, ",cache_strategy=disabled"); - else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAHEAD) - seq_puts(seq, ",cache_strategy=readahead"); - else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAROUND) - seq_puts(seq, ",cache_strategy=readaround"); -#endif + if (IS_ENABLED(CONFIG_EROFS_FS_XATTR)) + seq_puts(seq, test_opt(opt, XATTR_USER) ? + ",user_xattr" : ",nouser_xattr"); + if (IS_ENABLED(CONFIG_EROFS_FS_POSIX_ACL)) + seq_puts(seq, test_opt(opt, POSIX_ACL) ? ",acl" : ",noacl"); + if (IS_ENABLED(CONFIG_EROFS_FS_ZIP)) + seq_printf(seq, ",cache_strategy=%s", + erofs_param_cache_strategy[opt->cache_strategy].name); if (test_opt(opt, DAX_ALWAYS)) seq_puts(seq, ",dax=always"); if (test_opt(opt, DAX_NEVER)) -- Gitee From 4b7cc568d043412207695a4814bc056f6b94632d Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 25 Apr 2024 20:58:46 +0100 Subject: [PATCH 1780/2138] erofs: mechanically convert erofs_read_metabuf() to offsets ANBZ: #11101 commit e09815446d6944fc5590a6e5f15dd51697202441 upstream. just lift the call of erofs_pos() into the callers; it will collapse in most of them, but that's better done caller-by-caller. Signed-off-by: Al Viro Link: https://lore.kernel.org/r/20240425195846.GC1031757@ZenIV Signed-off-by: Gao Xiang Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4159 --- fs/erofs/data.c | 8 ++++---- fs/erofs/fscache.c | 2 +- fs/erofs/inode.c | 4 ++-- fs/erofs/internal.h | 2 +- fs/erofs/super.c | 2 +- fs/erofs/zdata.c | 2 +- fs/erofs/zmap.c | 6 +++--- 7 files changed, 13 insertions(+), 13 deletions(-) diff --git a/fs/erofs/data.c b/fs/erofs/data.c index 36465561edfe..9395f6574089 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -73,10 +73,10 @@ void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb) } void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb, - erofs_blk_t blkaddr, enum erofs_kmap_type type) + erofs_off_t offset, enum erofs_kmap_type type) { erofs_init_metabuf(buf, sb); - return erofs_bread(buf, erofs_pos(sb, blkaddr), type); + return erofs_bread(buf, offset, type); } static int erofs_map_blocks_flatmode(struct inode *inode, @@ -153,7 +153,7 @@ int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map) pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, unit) + unit * chunknr; - kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(sb, pos), EROFS_KMAP); + kaddr = erofs_read_metabuf(&buf, sb, erofs_pos(sb, erofs_blknr(sb, pos)), EROFS_KMAP); if (IS_ERR(kaddr)) { err = PTR_ERR(kaddr); goto out; @@ -296,7 +296,7 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, iomap->type = IOMAP_INLINE; ptr = erofs_read_metabuf(&buf, sb, - erofs_blknr(sb, mdev.m_pa), EROFS_KMAP); + erofs_pos(sb, erofs_blknr(sb, mdev.m_pa)), EROFS_KMAP); if (IS_ERR(ptr)) return PTR_ERR(ptr); iomap->inline_data = ptr + erofs_blkoff(sb, mdev.m_pa); diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c index 62da538d91cb..ac618b3484f1 100644 --- a/fs/erofs/fscache.c +++ b/fs/erofs/fscache.c @@ -282,7 +282,7 @@ static int erofs_fscache_data_read_slice(struct erofs_fscache_rq *req) blknr = erofs_blknr(sb, map.m_pa); size = map.m_llen; - src = erofs_read_metabuf(&buf, sb, blknr, EROFS_KMAP); + src = erofs_read_metabuf(&buf, sb, erofs_pos(sb, blknr), EROFS_KMAP); if (IS_ERR(src)) return PTR_ERR(src); diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c index 09f74a70d3d7..64b18fbe8a77 100644 --- a/fs/erofs/inode.c +++ b/fs/erofs/inode.c @@ -26,7 +26,7 @@ static void *erofs_read_inode(struct erofs_buf *buf, blkaddr = erofs_blknr(sb, inode_loc); *ofs = erofs_blkoff(sb, inode_loc); - kaddr = erofs_read_metabuf(buf, sb, blkaddr, EROFS_KMAP); + kaddr = erofs_read_metabuf(buf, sb, erofs_pos(sb, blkaddr), EROFS_KMAP); if (IS_ERR(kaddr)) { erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld", vi->nid, PTR_ERR(kaddr)); @@ -66,7 +66,7 @@ static void *erofs_read_inode(struct erofs_buf *buf, goto err_out; } memcpy(copied, dic, gotten); - kaddr = erofs_read_metabuf(buf, sb, blkaddr + 1, + kaddr = erofs_read_metabuf(buf, sb, erofs_pos(sb, blkaddr + 1), EROFS_KMAP); if (IS_ERR(kaddr)) { erofs_err(sb, "failed to get inode payload block (nid: %llu), err %ld", diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 46632306f185..34746bb0eb1c 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -403,7 +403,7 @@ void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset, enum erofs_kmap_type type); void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb); void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb, - erofs_blk_t blkaddr, enum erofs_kmap_type type); + erofs_off_t offset, enum erofs_kmap_type type); int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *dev); int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len); diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 03ab5607f41e..69c66ee745fe 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -180,7 +180,7 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb, struct bdev_handle *bdev_handle; void *ptr; - ptr = erofs_read_metabuf(buf, sb, erofs_blknr(sb, *pos), EROFS_KMAP); + ptr = erofs_read_metabuf(buf, sb, erofs_pos(sb, erofs_blknr(sb, *pos)), EROFS_KMAP); if (IS_ERR(ptr)) return PTR_ERR(ptr); dis = ptr + erofs_blkoff(sb, *pos); diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 9ffdae7fcd5b..ec68494cedef 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -868,7 +868,7 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe) } else { void *mptr; - mptr = erofs_read_metabuf(&map->buf, sb, blknr, EROFS_NO_KMAP); + mptr = erofs_read_metabuf(&map->buf, sb, erofs_pos(sb, blknr), EROFS_NO_KMAP); if (IS_ERR(mptr)) { ret = PTR_ERR(mptr); erofs_err(sb, "failed to get inline data %d", ret); diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c index 7302c17d4ca8..230215afae4d 100644 --- a/fs/erofs/zmap.c +++ b/fs/erofs/zmap.c @@ -34,7 +34,7 @@ static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m, unsigned int advise; m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb, - erofs_blknr(inode->i_sb, pos), EROFS_KMAP); + erofs_pos(inode->i_sb, erofs_blknr(inode->i_sb, pos)), EROFS_KMAP); if (IS_ERR(m->kaddr)) return PTR_ERR(m->kaddr); @@ -256,7 +256,7 @@ static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m, out: pos += lcn * (1 << amortizedshift); m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb, - erofs_blknr(inode->i_sb, pos), EROFS_KMAP); + erofs_pos(inode->i_sb, erofs_blknr(inode->i_sb, pos)), EROFS_KMAP); if (IS_ERR(m->kaddr)) return PTR_ERR(m->kaddr); return unpack_compacted_index(m, amortizedshift, pos, lookahead); @@ -591,7 +591,7 @@ static int z_erofs_fill_inode_lazy(struct inode *inode) goto out_unlock; pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8); - kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(sb, pos), EROFS_KMAP); + kaddr = erofs_read_metabuf(&buf, sb, erofs_pos(sb, erofs_blknr(sb, pos)), EROFS_KMAP); if (IS_ERR(kaddr)) { err = PTR_ERR(kaddr); goto out_unlock; -- Gitee From 1025d231a79498a9e7a472910be1a4f2ba1c29e3 Mon Sep 17 00:00:00 2001 From: Hongzhen Luo Date: Tue, 26 Nov 2024 15:28:51 +0800 Subject: [PATCH 1781/2138] anolis: erofs: fix the warning in erofs_fc_fill_super ANBZ: #11101 Fixes the following warning: assignment discards 'const' qualifier from pointer target type [-Werror=discarded-qualifiers] 641 | sb->s_xattr = erofs_xattr_handlers; | ^ Fixes: 708f46fb1e62 ("erofs: move erofs_xattr_handlers and xattr_handler_map to .rodata") Signed-off-by: Hongzhen Luo Acked-by: Gao Xiang Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4159 --- fs/erofs/super.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 69c66ee745fe..d82a2fc80271 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -638,7 +638,7 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) } sb->s_time_gran = 1; - sb->s_xattr = erofs_xattr_handlers; + sb->s_xattr = (const struct xattr_handler **)erofs_xattr_handlers; sb->s_export_op = &erofs_export_ops; if (test_opt(&sbi->opt, POSIX_ACL)) -- Gitee From 0c1e2cd6f54f33639b45e808bda1e75de0035405 Mon Sep 17 00:00:00 2001 From: "David E. Box" Date: Wed, 29 Nov 2023 14:21:14 -0800 Subject: [PATCH 1782/2138] platform/x86/intel/vsec: Remove unnecessary return MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12075 commit ace7b6f00870cea56460df335606e35ace3c07ac upstream. In intel_vsec_add_aux(), just return from the last call to devm_add_action_or_reset() instead of checking its return value. Intel-SIG: commit ace7b6f00870 platform/x86/intel/vsec: Remove unnecessary return. Backport intel tpmi base driver update for 6.6 from 6.10 Suggested-by: Ilpo Järvinen Signed-off-by: David E. Box Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20231129222132.2331261-3-david.e.box@linux.intel.com Signed-off-by: Hans de Goede [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4163 --- drivers/platform/x86/intel/vsec.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c index 343ab6a82c01..25017227a0a6 100644 --- a/drivers/platform/x86/intel/vsec.c +++ b/drivers/platform/x86/intel/vsec.c @@ -175,12 +175,8 @@ int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent, return ret; } - ret = devm_add_action_or_reset(parent, intel_vsec_remove_aux, + return devm_add_action_or_reset(parent, intel_vsec_remove_aux, auxdev); - if (ret < 0) - return ret; - - return 0; } EXPORT_SYMBOL_NS_GPL(intel_vsec_add_aux, INTEL_VSEC); -- Gitee From 702be0e2f213584a133a01cf1c47b8f642d24d91 Mon Sep 17 00:00:00 2001 From: "David E. Box" Date: Wed, 29 Nov 2023 14:21:15 -0800 Subject: [PATCH 1783/2138] platform/x86/intel/vsec: Move structures to header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12075 commit dbc01b0c86a7b23ffd06e14a84591500b04591ed upstream. In preparation for exporting an API to register Intel Vendor Specific Extended Capabilities (VSEC) from other drivers, move needed structures to the header file. Intel-SIG: commit dbc01b0c86a7 platform/x86/intel/vsec: Move structures to header. Backport intel tpmi base driver update for 6.6 from 6.10 Signed-off-by: David E. Box Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20231129222132.2331261-4-david.e.box@linux.intel.com Signed-off-by: Hans de Goede [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4163 --- drivers/platform/x86/intel/vsec.c | 35 ------------------------------ drivers/platform/x86/intel/vsec.h | 36 +++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 35 deletions(-) diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c index 25017227a0a6..4dc490fd4a5b 100644 --- a/drivers/platform/x86/intel/vsec.c +++ b/drivers/platform/x86/intel/vsec.c @@ -24,13 +24,6 @@ #include "vsec.h" -/* Intel DVSEC offsets */ -#define INTEL_DVSEC_ENTRIES 0xA -#define INTEL_DVSEC_SIZE 0xB -#define INTEL_DVSEC_TABLE 0xC -#define INTEL_DVSEC_TABLE_BAR(x) ((x) & GENMASK(2, 0)) -#define INTEL_DVSEC_TABLE_OFFSET(x) ((x) & GENMASK(31, 3)) -#define TABLE_OFFSET_SHIFT 3 #define PMT_XA_START 0 #define PMT_XA_MAX INT_MAX #define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX) @@ -39,34 +32,6 @@ static DEFINE_IDA(intel_vsec_ida); static DEFINE_IDA(intel_vsec_sdsi_ida); static DEFINE_XARRAY_ALLOC(auxdev_array); -/** - * struct intel_vsec_header - Common fields of Intel VSEC and DVSEC registers. - * @rev: Revision ID of the VSEC/DVSEC register space - * @length: Length of the VSEC/DVSEC register space - * @id: ID of the feature - * @num_entries: Number of instances of the feature - * @entry_size: Size of the discovery table for each feature - * @tbir: BAR containing the discovery tables - * @offset: BAR offset of start of the first discovery table - */ -struct intel_vsec_header { - u8 rev; - u16 length; - u16 id; - u8 num_entries; - u8 entry_size; - u8 tbir; - u32 offset; -}; - -enum intel_vsec_id { - VSEC_ID_TELEMETRY = 2, - VSEC_ID_WATCHER = 3, - VSEC_ID_CRASHLOG = 4, - VSEC_ID_SDSI = 65, - VSEC_ID_TPMI = 66, -}; - static const char *intel_vsec_name(enum intel_vsec_id id) { switch (id) { diff --git a/drivers/platform/x86/intel/vsec.h b/drivers/platform/x86/intel/vsec.h index 0a6201b4a0e9..c242c07ea69c 100644 --- a/drivers/platform/x86/intel/vsec.h +++ b/drivers/platform/x86/intel/vsec.h @@ -11,9 +11,45 @@ #define VSEC_CAP_SDSI BIT(3) #define VSEC_CAP_TPMI BIT(4) +/* Intel DVSEC offsets */ +#define INTEL_DVSEC_ENTRIES 0xA +#define INTEL_DVSEC_SIZE 0xB +#define INTEL_DVSEC_TABLE 0xC +#define INTEL_DVSEC_TABLE_BAR(x) ((x) & GENMASK(2, 0)) +#define INTEL_DVSEC_TABLE_OFFSET(x) ((x) & GENMASK(31, 3)) +#define TABLE_OFFSET_SHIFT 3 + struct pci_dev; struct resource; +enum intel_vsec_id { + VSEC_ID_TELEMETRY = 2, + VSEC_ID_WATCHER = 3, + VSEC_ID_CRASHLOG = 4, + VSEC_ID_SDSI = 65, + VSEC_ID_TPMI = 66, +}; + +/** + * struct intel_vsec_header - Common fields of Intel VSEC and DVSEC registers. + * @rev: Revision ID of the VSEC/DVSEC register space + * @length: Length of the VSEC/DVSEC register space + * @id: ID of the feature + * @num_entries: Number of instances of the feature + * @entry_size: Size of the discovery table for each feature + * @tbir: BAR containing the discovery tables + * @offset: BAR offset of start of the first discovery table + */ +struct intel_vsec_header { + u8 rev; + u16 length; + u16 id; + u8 num_entries; + u8 entry_size; + u8 tbir; + u32 offset; +}; + enum intel_vsec_quirks { /* Watcher feature not supported */ VSEC_QUIRK_NO_WATCHER = BIT(0), -- Gitee From d48776b5b57362efa1e1c0cbb131037b6f0d0e98 Mon Sep 17 00:00:00 2001 From: "David E. Box" Date: Wed, 29 Nov 2023 14:21:16 -0800 Subject: [PATCH 1784/2138] platform/x86/intel/vsec: remove platform_info from vsec device structure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12075 commit 0a0a52abaa65b844afde3d7229c209a8cddc5a07 upstream. In preparation for exporting an API to register Intel Vendor Specific Extended Capabilities (VSEC) from other drivers, remove the pointer to platform_info from intel_vsec_device. This prevents a potential page fault when auxiliary drivers probe and attempt to dereference this pointer to access the needed quirks field. Instead, just add the quirks to intel_vsec_device. Intel-SIG: commit 0a0a52abaa65 platform/x86/intel/vsec: remove platform_info from vsec device structure. Backport intel tpmi base driver update for 6.6 from 6.10 Signed-off-by: David E. Box Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20231129222132.2331261-5-david.e.box@linux.intel.com Signed-off-by: Hans de Goede [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4163 --- drivers/platform/x86/intel/pmt/class.c | 2 +- drivers/platform/x86/intel/vsec.c | 2 +- drivers/platform/x86/intel/vsec.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/platform/x86/intel/pmt/class.c b/drivers/platform/x86/intel/pmt/class.c index f32a233470de..2ad91d2fd954 100644 --- a/drivers/platform/x86/intel/pmt/class.c +++ b/drivers/platform/x86/intel/pmt/class.c @@ -31,7 +31,7 @@ bool intel_pmt_is_early_client_hw(struct device *dev) * differences from the server platforms (which use the Out Of Band * Management Services Module OOBMSM). */ - return !!(ivdev->info->quirks & VSEC_QUIRK_EARLY_HW); + return !!(ivdev->quirks & VSEC_QUIRK_EARLY_HW); } EXPORT_SYMBOL_NS_GPL(intel_pmt_is_early_client_hw, INTEL_PMT); diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c index 4dc490fd4a5b..bcdc727c4cc3 100644 --- a/drivers/platform/x86/intel/vsec.c +++ b/drivers/platform/x86/intel/vsec.c @@ -194,7 +194,7 @@ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *he intel_vsec_dev->pcidev = pdev; intel_vsec_dev->resource = res; intel_vsec_dev->num_resources = header->num_entries; - intel_vsec_dev->info = info; + intel_vsec_dev->quirks = info->quirks; if (header->id == VSEC_ID_SDSI) intel_vsec_dev->ida = &intel_vsec_sdsi_ida; diff --git a/drivers/platform/x86/intel/vsec.h b/drivers/platform/x86/intel/vsec.h index c242c07ea69c..8b9fad170503 100644 --- a/drivers/platform/x86/intel/vsec.h +++ b/drivers/platform/x86/intel/vsec.h @@ -79,11 +79,11 @@ struct intel_vsec_device { struct pci_dev *pcidev; struct resource *resource; struct ida *ida; - struct intel_vsec_platform_info *info; int num_resources; int id; /* xa */ void *priv_data; size_t priv_data_size; + unsigned long quirks; }; int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent, -- Gitee From bdfa1ec193b82c1d18b08879c74bfbd9eb1e3391 Mon Sep 17 00:00:00 2001 From: "David E. Box" Date: Wed, 29 Nov 2023 14:21:17 -0800 Subject: [PATCH 1785/2138] platform/x86/intel/vsec: Use cleanup.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12075 commit 1d1b4770d4b661ecdf899c314ce406b9840c0c22 upstream. Use cleanup.h helpers to handle cleanup of resources in intel_vsec_add_dev() after failures. Intel-SIG: commit 1d1b4770d4b6 platform/x86/intel/vsec: Use cleanup.h. Backport Intel_tpmi base driver for 6.6 from 6.10 Signed-off-by: David E. Box Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20231129222132.2331261-6-david.e.box@linux.intel.com Signed-off-by: Hans de Goede [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4163 --- drivers/platform/x86/intel/vsec.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c index bcdc727c4cc3..6b0e7363397a 100644 --- a/drivers/platform/x86/intel/vsec.c +++ b/drivers/platform/x86/intel/vsec.c @@ -15,6 +15,7 @@ #include #include +#include #include #include #include @@ -148,8 +149,9 @@ EXPORT_SYMBOL_NS_GPL(intel_vsec_add_aux, INTEL_VSEC); static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *header, struct intel_vsec_platform_info *info) { - struct intel_vsec_device *intel_vsec_dev; - struct resource *res, *tmp; + struct intel_vsec_device __free(kfree) *intel_vsec_dev = NULL; + struct resource __free(kfree) *res = NULL; + struct resource *tmp; unsigned long quirks = info->quirks; int i; @@ -171,10 +173,8 @@ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *he return -ENOMEM; res = kcalloc(header->num_entries, sizeof(*res), GFP_KERNEL); - if (!res) { - kfree(intel_vsec_dev); + if (!res) return -ENOMEM; - } if (quirks & VSEC_QUIRK_TABLE_SHIFT) header->offset >>= TABLE_OFFSET_SHIFT; @@ -192,7 +192,7 @@ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *he } intel_vsec_dev->pcidev = pdev; - intel_vsec_dev->resource = res; + intel_vsec_dev->resource = no_free_ptr(res); intel_vsec_dev->num_resources = header->num_entries; intel_vsec_dev->quirks = info->quirks; @@ -201,7 +201,11 @@ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *he else intel_vsec_dev->ida = &intel_vsec_ida; - return intel_vsec_add_aux(pdev, NULL, intel_vsec_dev, + /* + * Pass the ownership of intel_vsec_dev and resource within it to + * intel_vsec_add_aux() + */ + return intel_vsec_add_aux(pdev, NULL, no_free_ptr(intel_vsec_dev), intel_vsec_name(header->id)); } -- Gitee From e03682a27c013b3bf730774e64f887057a600c80 Mon Sep 17 00:00:00 2001 From: "David E. Box" Date: Wed, 29 Nov 2023 14:21:18 -0800 Subject: [PATCH 1786/2138] platform/x86/intel/vsec: Assign auxdev parent by argument MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12075 commit 6dfc2514acee37e30ce59f1f25b1f8f6aa7c1b08 upstream. Instead of checking for a NULL parent argument in intel_vsec_add_aux() and then assigning it to the probed device, remove this check and just pass the device in the call. Since this function is exported, return -EINVAL if the parent is not specified. Intel-SIG: commit 6dfc2514acee platform/x86/intel/vsec: Assign auxdev parent by argument. Backport intel tpmi base driver update for 6.6 from 6.10 Signed-off-by: David E. Box Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20231129222132.2331261-7-david.e.box@linux.intel.com Signed-off-by: Hans de Goede [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4163 --- drivers/platform/x86/intel/vsec.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c index 6b0e7363397a..bcfb5d480ebd 100644 --- a/drivers/platform/x86/intel/vsec.c +++ b/drivers/platform/x86/intel/vsec.c @@ -103,6 +103,9 @@ int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent, struct auxiliary_device *auxdev = &intel_vsec_dev->auxdev; int ret, id; + if (!parent) + return -EINVAL; + ret = xa_alloc(&auxdev_array, &intel_vsec_dev->id, intel_vsec_dev, PMT_XA_LIMIT, GFP_KERNEL); if (ret < 0) { @@ -121,9 +124,6 @@ int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent, return id; } - if (!parent) - parent = &pdev->dev; - auxdev->id = id; auxdev->name = name; auxdev->dev.parent = parent; @@ -205,7 +205,7 @@ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *he * Pass the ownership of intel_vsec_dev and resource within it to * intel_vsec_add_aux() */ - return intel_vsec_add_aux(pdev, NULL, no_free_ptr(intel_vsec_dev), + return intel_vsec_add_aux(pdev, &pdev->dev, no_free_ptr(intel_vsec_dev), intel_vsec_name(header->id)); } -- Gitee From cbdb889444385855b42ceb47abcb9131f9aad9e4 Mon Sep 17 00:00:00 2001 From: Gayatri Kammela Date: Wed, 29 Nov 2023 14:21:19 -0800 Subject: [PATCH 1787/2138] platform/x86/intel/vsec: Add intel_vsec_register MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12075 commit 4edbd117ba3f7beacfb439aad60e8a5de77114b4 upstream. Add and export intel_vsec_register() to allow the registration of Intel extended capabilities from other drivers. Add check to look for memory conflicts before registering a new capability. Since the vsec provider may not be a PCI device, add a parent field to intel_vsec_platform_info() to allow specifying the parent device for device managed cleanup. Intel-SIG: commit 4edbd117ba3f platform/x86/intel/vsec: Add intel_vsec_register. Backport intel tpmi base driver update for 6.6 from 6.10 Signed-off-by: Gayatri Kammela Signed-off-by: David E. Box Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20231129222132.2331261-8-david.e.box@linux.intel.com Signed-off-by: Hans de Goede [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4163 --- drivers/platform/x86/intel/vsec.c | 24 +++++++++++++++++++++++- drivers/platform/x86/intel/vsec.h | 4 ++++ 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c index bcfb5d480ebd..5568d6236bd6 100644 --- a/drivers/platform/x86/intel/vsec.c +++ b/drivers/platform/x86/intel/vsec.c @@ -152,9 +152,15 @@ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *he struct intel_vsec_device __free(kfree) *intel_vsec_dev = NULL; struct resource __free(kfree) *res = NULL; struct resource *tmp; + struct device *parent; unsigned long quirks = info->quirks; int i; + if (info->parent) + parent = info->parent; + else + parent = &pdev->dev; + if (!intel_vsec_supported(header->id, info->caps)) return -EINVAL; @@ -189,6 +195,12 @@ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *he header->offset + i * (header->entry_size * sizeof(u32)); tmp->end = tmp->start + (header->entry_size * sizeof(u32)) - 1; tmp->flags = IORESOURCE_MEM; + + /* Check resource is not in use */ + if (!request_mem_region(tmp->start, resource_size(tmp), "")) + return -EBUSY; + + release_mem_region(tmp->start, resource_size(tmp)); } intel_vsec_dev->pcidev = pdev; @@ -205,7 +217,7 @@ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *he * Pass the ownership of intel_vsec_dev and resource within it to * intel_vsec_add_aux() */ - return intel_vsec_add_aux(pdev, &pdev->dev, no_free_ptr(intel_vsec_dev), + return intel_vsec_add_aux(pdev, parent, no_free_ptr(intel_vsec_dev), intel_vsec_name(header->id)); } @@ -323,6 +335,16 @@ static bool intel_vsec_walk_vsec(struct pci_dev *pdev, return have_devices; } +void intel_vsec_register(struct pci_dev *pdev, + struct intel_vsec_platform_info *info) +{ + if (!pdev || !info) + return; + + intel_vsec_walk_header(pdev, info); +} +EXPORT_SYMBOL_NS_GPL(intel_vsec_register, INTEL_VSEC); + static int intel_vsec_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct intel_vsec_platform_info *info; diff --git a/drivers/platform/x86/intel/vsec.h b/drivers/platform/x86/intel/vsec.h index 8b9fad170503..bb8b6452df70 100644 --- a/drivers/platform/x86/intel/vsec.h +++ b/drivers/platform/x86/intel/vsec.h @@ -69,6 +69,7 @@ enum intel_vsec_quirks { /* Platform specific data */ struct intel_vsec_platform_info { + struct device *parent; struct intel_vsec_header **headers; unsigned long caps; unsigned long quirks; @@ -99,4 +100,7 @@ static inline struct intel_vsec_device *auxdev_to_ivdev(struct auxiliary_device { return container_of(auxdev, struct intel_vsec_device, auxdev); } + +void intel_vsec_register(struct pci_dev *pdev, + struct intel_vsec_platform_info *info); #endif -- Gitee From a22523aa0f4ba7f865b17288af90efcc73a76d9f Mon Sep 17 00:00:00 2001 From: "David E. Box" Date: Wed, 29 Nov 2023 14:21:20 -0800 Subject: [PATCH 1788/2138] platform/x86/intel/vsec: Add base address field MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12075 commit e97ec7f621fbfdce07bf1b98a26883ee19281747 upstream. Some devices may emulate PCI VSEC capabilities in MMIO. In such cases the BAR is not readable from a config space. Provide a field for drivers to indicate the base address to be used. Intel-SIG: commit e97ec7f621fb platform/x86/intel/vsec: Add base address field. Backport intel tpmi base driver update for 6.6 from 6.10 Signed-off-by: David E. Box Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20231129222132.2331261-9-david.e.box@linux.intel.com Signed-off-by: Hans de Goede [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4163 --- drivers/platform/x86/intel/pmt/class.c | 14 +++++++++++--- drivers/platform/x86/intel/vsec.c | 10 ++++++++-- drivers/platform/x86/intel/vsec.h | 2 ++ 3 files changed, 21 insertions(+), 5 deletions(-) diff --git a/drivers/platform/x86/intel/pmt/class.c b/drivers/platform/x86/intel/pmt/class.c index 2ad91d2fd954..32608baaa56c 100644 --- a/drivers/platform/x86/intel/pmt/class.c +++ b/drivers/platform/x86/intel/pmt/class.c @@ -160,10 +160,11 @@ static struct class intel_pmt_class = { static int intel_pmt_populate_entry(struct intel_pmt_entry *entry, struct intel_pmt_header *header, - struct device *dev, + struct intel_vsec_device *ivdev, struct resource *disc_res) { - struct pci_dev *pci_dev = to_pci_dev(dev->parent); + struct pci_dev *pci_dev = ivdev->pcidev; + struct device *dev = &ivdev->auxdev.dev; u8 bir; /* @@ -215,6 +216,13 @@ static int intel_pmt_populate_entry(struct intel_pmt_entry *entry, break; case ACCESS_BARID: + /* Use the provided base address if it exists */ + if (ivdev->base_addr) { + entry->base_addr = ivdev->base_addr + + GET_ADDRESS(header->base_offset); + break; + } + /* * If another BAR was specified then the base offset * represents the offset within that BAR. SO retrieve the @@ -319,7 +327,7 @@ int intel_pmt_dev_create(struct intel_pmt_entry *entry, struct intel_pmt_namespa if (ret) return ret; - ret = intel_pmt_populate_entry(entry, &header, dev, disc_res); + ret = intel_pmt_populate_entry(entry, &header, intel_vsec_dev, disc_res); if (ret) return ret; diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c index 5568d6236bd6..b68586731e45 100644 --- a/drivers/platform/x86/intel/vsec.c +++ b/drivers/platform/x86/intel/vsec.c @@ -154,6 +154,7 @@ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *he struct resource *tmp; struct device *parent; unsigned long quirks = info->quirks; + u64 base_addr; int i; if (info->parent) @@ -185,14 +186,18 @@ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *he if (quirks & VSEC_QUIRK_TABLE_SHIFT) header->offset >>= TABLE_OFFSET_SHIFT; + if (info->base_addr) + base_addr = info->base_addr; + else + base_addr = pdev->resource[header->tbir].start; + /* * The DVSEC/VSEC contains the starting offset and count for a block of * discovery tables. Create a resource array of these tables to the * auxiliary device driver. */ for (i = 0, tmp = res; i < header->num_entries; i++, tmp++) { - tmp->start = pdev->resource[header->tbir].start + - header->offset + i * (header->entry_size * sizeof(u32)); + tmp->start = base_addr + header->offset + i * (header->entry_size * sizeof(u32)); tmp->end = tmp->start + (header->entry_size * sizeof(u32)) - 1; tmp->flags = IORESOURCE_MEM; @@ -207,6 +212,7 @@ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *he intel_vsec_dev->resource = no_free_ptr(res); intel_vsec_dev->num_resources = header->num_entries; intel_vsec_dev->quirks = info->quirks; + intel_vsec_dev->base_addr = info->base_addr; if (header->id == VSEC_ID_SDSI) intel_vsec_dev->ida = &intel_vsec_sdsi_ida; diff --git a/drivers/platform/x86/intel/vsec.h b/drivers/platform/x86/intel/vsec.h index bb8b6452df70..e23e76129691 100644 --- a/drivers/platform/x86/intel/vsec.h +++ b/drivers/platform/x86/intel/vsec.h @@ -73,6 +73,7 @@ struct intel_vsec_platform_info { struct intel_vsec_header **headers; unsigned long caps; unsigned long quirks; + u64 base_addr; }; struct intel_vsec_device { @@ -85,6 +86,7 @@ struct intel_vsec_device { void *priv_data; size_t priv_data_size; unsigned long quirks; + u64 base_addr; }; int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent, -- Gitee From 63ce1e46149424bbb76044298d5b48249355b89f Mon Sep 17 00:00:00 2001 From: "David E. Box" Date: Tue, 27 Feb 2024 11:01:32 -0800 Subject: [PATCH 1789/2138] platform/x86/intel/vsec: Remove nuisance message MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12075 commit 701d40af59373ac3a60c620cbd0ceff7b2b8e565 upstream. intel_vsec_walk_header() is used to configure features from devices that don't provide a PCI VSEC or DVSEC structure. Some of these features may be unsupported and fail to load. Ignore them silently as we do for unsupported features described by VSEC/DVSEC. Intel-SIG: commit 701d40af5937 platform/x86/intel/vsec: Remove nuisance message. Backport intel tpmi base driver update for 6.6 from 6.10 Signed-off-by: "David E. Box" Reviewed-by: Kuppuswamy Sathyanarayanan Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20240227190134.1592072-1-david.e.box@linux.intel.com Signed-off-by: Ilpo Järvinen [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4163 --- drivers/platform/x86/intel/vsec.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c index b68586731e45..47aea5f14ce1 100644 --- a/drivers/platform/x86/intel/vsec.c +++ b/drivers/platform/x86/intel/vsec.c @@ -236,10 +236,7 @@ static bool intel_vsec_walk_header(struct pci_dev *pdev, for ( ; *header; header++) { ret = intel_vsec_add_dev(pdev, *header, info); - if (ret) - dev_info(&pdev->dev, "Could not add device for VSEC id %d\n", - (*header)->id); - else + if (!ret) have_devices = true; } -- Gitee From f5effed28170d7c98847de9664b6e0a333c00bef Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Mon, 25 Sep 2023 12:42:19 -0700 Subject: [PATCH 1790/2138] platform/x86/intel/tpmi: Add debugfs support for read/write blocked MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12075 commit 8df012a7f513141412b3c35af204ccdb810fcc81 upstream. Display read and write blocked status of each TPMI feature in addition to disabled and locked status. This will require reading of read/write blocked state from the hardware. Currently tpmi_read_feature_status(), doesn't provide this state. Define TPMI feature state as defined in the TPMI spec. Modify the function tpmi_read_feature_status() to update full feature state instead of just disabled and locked state. Intel-SIG: commit 8df012a7f513 platform/x86/intel/tpmi: Add debugfs support for read/write blocked. Backport Intel_tpmi base driver for 6.6 from 6.10 Signed-off-by: Srinivas Pandruvada Link: https://lore.kernel.org/r/20230925194219.966602-1-srinivas.pandruvada@linux.intel.com Reviewed-by: Ilpo Järvinen Signed-off-by: Ilpo Järvinen [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4163 --- drivers/platform/x86/intel/tpmi.c | 80 +++++++++++++++++++++---------- 1 file changed, 55 insertions(+), 25 deletions(-) diff --git a/drivers/platform/x86/intel/tpmi.c b/drivers/platform/x86/intel/tpmi.c index 4c42c28bdd3d..086647fe9c71 100644 --- a/drivers/platform/x86/intel/tpmi.c +++ b/drivers/platform/x86/intel/tpmi.c @@ -143,6 +143,33 @@ struct tpmi_info_header { u64 lock:1; } __packed; +/** + * struct tpmi_feature_state - Structure to read hardware state of a feature + * @enabled: Enable state of a feature, 1: enabled, 0: disabled + * @reserved_1: Reserved for future use + * @write_blocked: Writes are blocked means all write operations are ignored + * @read_blocked: Reads are blocked means will read 0xFFs + * @pcs_select: Interface used by out of band software, not used in OS + * @reserved_2: Reserved for future use + * @id: TPMI ID of the feature + * @reserved_3: Reserved for future use + * @locked: When set to 1, OS can't change this register. + * + * The structure is used to read hardware state of a TPMI feature. This + * information is used for debug and restricting operations for this feature. + */ +struct tpmi_feature_state { + u32 enabled:1; + u32 reserved_1:3; + u32 write_blocked:1; + u32 read_blocked:1; + u32 pcs_select:1; + u32 reserved_2:1; + u32 id:8; + u32 reserved_3:15; + u32 locked:1; +} __packed; + /* * List of supported TMPI IDs. * Some TMPI IDs are not used by Linux, so the numbers are not consecutive. @@ -202,6 +229,7 @@ EXPORT_SYMBOL_NS_GPL(tpmi_get_resource_at_index, INTEL_TPMI); #define TPMI_CONTROL_STATUS_OFFSET 0x00 #define TPMI_COMMAND_OFFSET 0x08 +#define TMPI_CONTROL_DATA_VAL_OFFSET 0x0c /* * Spec is calling for max 1 seconds to get ownership at the worst @@ -230,7 +258,6 @@ EXPORT_SYMBOL_NS_GPL(tpmi_get_resource_at_index, INTEL_TPMI); /* TPMI command data registers */ #define TMPI_CONTROL_DATA_CMD GENMASK_ULL(7, 0) -#define TMPI_CONTROL_DATA_VAL GENMASK_ULL(63, 32) #define TPMI_CONTROL_DATA_VAL_FEATURE GENMASK_ULL(48, 40) /* Command to send via control interface */ @@ -240,9 +267,6 @@ EXPORT_SYMBOL_NS_GPL(tpmi_get_resource_at_index, INTEL_TPMI); #define TPMI_CMD_LEN_MASK GENMASK_ULL(18, 16) -#define TPMI_STATE_DISABLED BIT_ULL(0) -#define TPMI_STATE_LOCKED BIT_ULL(31) - /* Mutex to complete get feature status without interruption */ static DEFINE_MUTEX(tpmi_dev_lock); @@ -256,7 +280,7 @@ static int tpmi_wait_for_owner(struct intel_tpmi_info *tpmi_info, u8 owner) } static int tpmi_read_feature_status(struct intel_tpmi_info *tpmi_info, int feature_id, - int *locked, int *disabled) + struct tpmi_feature_state *feature_state) { u64 control, data; int ret; @@ -306,17 +330,8 @@ static int tpmi_read_feature_status(struct intel_tpmi_info *tpmi_info, int featu } /* Response is ready */ - data = readq(tpmi_info->tpmi_control_mem + TPMI_COMMAND_OFFSET); - data = FIELD_GET(TMPI_CONTROL_DATA_VAL, data); - - *disabled = 0; - *locked = 0; - - if (!(data & TPMI_STATE_DISABLED)) - *disabled = 1; - - if (data & TPMI_STATE_LOCKED) - *locked = 1; + memcpy_fromio(feature_state, tpmi_info->tpmi_control_mem + TMPI_CONTROL_DATA_VAL_OFFSET, + sizeof(*feature_state)); ret = 0; @@ -335,34 +350,49 @@ int tpmi_get_feature_status(struct auxiliary_device *auxdev, int feature_id, { struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(auxdev->dev.parent); struct intel_tpmi_info *tpmi_info = auxiliary_get_drvdata(&intel_vsec_dev->auxdev); + struct tpmi_feature_state feature_state; + int ret; + + ret = tpmi_read_feature_status(tpmi_info, feature_id, &feature_state); + if (ret) + return ret; + + *locked = feature_state.locked; + *disabled = !feature_state.enabled; - return tpmi_read_feature_status(tpmi_info, feature_id, locked, disabled); + return 0; } EXPORT_SYMBOL_NS_GPL(tpmi_get_feature_status, INTEL_TPMI); static int tpmi_pfs_dbg_show(struct seq_file *s, void *unused) { struct intel_tpmi_info *tpmi_info = s->private; + int locked, disabled, read_blocked, write_blocked; + struct tpmi_feature_state feature_state; struct intel_tpmi_pm_feature *pfs; - int locked, disabled, ret, i; + int ret, i; seq_printf(s, "tpmi PFS start offset 0x:%llx\n", tpmi_info->pfs_start); - seq_puts(s, "tpmi_id\t\tentries\t\tsize\t\tcap_offset\tattribute\tvsec_offset\tlocked\tdisabled\n"); + seq_puts(s, "tpmi_id\t\tentries\t\tsize\t\tcap_offset\tattribute\tvsec_offset\tlocked\tdisabled\tread_blocked\twrite_blocked\n"); for (i = 0; i < tpmi_info->feature_count; ++i) { pfs = &tpmi_info->tpmi_features[i]; - ret = tpmi_read_feature_status(tpmi_info, pfs->pfs_header.tpmi_id, &locked, - &disabled); + ret = tpmi_read_feature_status(tpmi_info, pfs->pfs_header.tpmi_id, &feature_state); if (ret) { locked = 'U'; disabled = 'U'; + read_blocked = 'U'; + write_blocked = 'U'; } else { - disabled = disabled ? 'Y' : 'N'; - locked = locked ? 'Y' : 'N'; + disabled = feature_state.enabled ? 'N' : 'Y'; + locked = feature_state.locked ? 'Y' : 'N'; + read_blocked = feature_state.read_blocked ? 'Y' : 'N'; + write_blocked = feature_state.write_blocked ? 'Y' : 'N'; } - seq_printf(s, "0x%02x\t\t0x%02x\t\t0x%04x\t\t0x%04x\t\t0x%02x\t\t0x%016llx\t%c\t%c\n", + seq_printf(s, "0x%02x\t\t0x%02x\t\t0x%04x\t\t0x%04x\t\t0x%02x\t\t0x%016llx\t%c\t%c\t\t%c\t\t%c\n", pfs->pfs_header.tpmi_id, pfs->pfs_header.num_entries, pfs->pfs_header.entry_size, pfs->pfs_header.cap_offset, - pfs->pfs_header.attribute, pfs->vsec_offset, locked, disabled); + pfs->pfs_header.attribute, pfs->vsec_offset, locked, disabled, + read_blocked, write_blocked); } return 0; -- Gitee From 8e4b88cf90ff9f2f8d1926206876c3f3592134ca Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Mon, 4 Dec 2023 14:17:36 -0800 Subject: [PATCH 1791/2138] platform/x86/intel/tpmi: Don't create devices for disabled features MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12075 commit b87434f2e6fe81362d2ac57f3aba45ba89a11399 upstream. If some TPMI features are disabled, don't create auxiliary devices. In this way feature drivers will not load. While creating auxiliary devices, call tpmi_read_feature_status() to check feature state and return if the feature is disabled without creating a device. Intel-SIG: commit b87434f2e6fe platform/x86/intel/tpmi: Don't create devices for disabled features. Backport intel tpmi base driver update for 6.6 from 6.10 Signed-off-by: Srinivas Pandruvada Reviewed-by: Hans de Goede Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20231204221740.3645130-2-srinivas.pandruvada@linux.intel.com Signed-off-by: Hans de Goede [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4163 --- drivers/platform/x86/intel/tpmi.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/drivers/platform/x86/intel/tpmi.c b/drivers/platform/x86/intel/tpmi.c index 086647fe9c71..8dd8f5e1a51f 100644 --- a/drivers/platform/x86/intel/tpmi.c +++ b/drivers/platform/x86/intel/tpmi.c @@ -598,9 +598,21 @@ static int tpmi_create_device(struct intel_tpmi_info *tpmi_info, struct intel_vsec_device *vsec_dev = tpmi_info->vsec_dev; char feature_id_name[TPMI_FEATURE_NAME_LEN]; struct intel_vsec_device *feature_vsec_dev; + struct tpmi_feature_state feature_state; struct resource *res, *tmp; const char *name; - int i; + int i, ret; + + ret = tpmi_read_feature_status(tpmi_info, pfs->pfs_header.tpmi_id, &feature_state); + if (ret) + return ret; + + /* + * If not enabled, continue to look at other features in the PFS, so return -EOPNOTSUPP. + * This will not cause failure of loading of this driver. + */ + if (!feature_state.enabled) + return -EOPNOTSUPP; name = intel_tpmi_name(pfs->pfs_header.tpmi_id); if (!name) -- Gitee From 89c304f8432ebdcc01c9ccef8f3dbc682da76252 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Mon, 4 Dec 2023 14:17:37 -0800 Subject: [PATCH 1792/2138] platform/x86/intel/tpmi: Modify external interface to get read/write state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12075 commit 72dd14d241e1c6e241fc5b265746c59f306c6aa3 upstream. Modify the external interface tpmi_get_feature_status() to get read and write blocked instead of locked and disabled. Since auxiliary device is not created when disabled, no use of returning disabled state. Also locked state is not useful as feature driver can't use locked state in a meaningful way. Using read and write state, feature driver can decide which operations to restrict for that feature. Intel-SIG: commit 72dd14d241e1 platform/x86/intel/tpmi: Modify external interface to get read/write state. Backport intel tpmi base driver update for 6.6 from 6.10 Signed-off-by: Srinivas Pandruvada Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20231204221740.3645130-3-srinivas.pandruvada@linux.intel.com Signed-off-by: Hans de Goede [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4163 --- drivers/platform/x86/intel/tpmi.c | 8 ++++---- include/linux/intel_tpmi.h | 5 ++--- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/platform/x86/intel/tpmi.c b/drivers/platform/x86/intel/tpmi.c index 8dd8f5e1a51f..93649f1da380 100644 --- a/drivers/platform/x86/intel/tpmi.c +++ b/drivers/platform/x86/intel/tpmi.c @@ -345,8 +345,8 @@ static int tpmi_read_feature_status(struct intel_tpmi_info *tpmi_info, int featu return ret; } -int tpmi_get_feature_status(struct auxiliary_device *auxdev, int feature_id, - int *locked, int *disabled) +int tpmi_get_feature_status(struct auxiliary_device *auxdev, + int feature_id, bool *read_blocked, bool *write_blocked) { struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(auxdev->dev.parent); struct intel_tpmi_info *tpmi_info = auxiliary_get_drvdata(&intel_vsec_dev->auxdev); @@ -357,8 +357,8 @@ int tpmi_get_feature_status(struct auxiliary_device *auxdev, int feature_id, if (ret) return ret; - *locked = feature_state.locked; - *disabled = !feature_state.enabled; + *read_blocked = feature_state.read_blocked; + *write_blocked = feature_state.write_blocked; return 0; } diff --git a/include/linux/intel_tpmi.h b/include/linux/intel_tpmi.h index ee07393445f9..4f89c5bd8663 100644 --- a/include/linux/intel_tpmi.h +++ b/include/linux/intel_tpmi.h @@ -32,7 +32,6 @@ struct intel_tpmi_plat_info { struct intel_tpmi_plat_info *tpmi_get_platform_data(struct auxiliary_device *auxdev); struct resource *tpmi_get_resource_at_index(struct auxiliary_device *auxdev, int index); int tpmi_get_resource_count(struct auxiliary_device *auxdev); - -int tpmi_get_feature_status(struct auxiliary_device *auxdev, int feature_id, int *locked, - int *disabled); +int tpmi_get_feature_status(struct auxiliary_device *auxdev, int feature_id, bool *read_blocked, + bool *write_blocked); #endif -- Gitee From 02d23fd3c873304bdf4194d965355c1f8cf4bd5f Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Mon, 4 Dec 2023 14:17:38 -0800 Subject: [PATCH 1793/2138] platform/x86/intel/tpmi: Move TPMI ID definition MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12075 commit 046d7be6210e7f870e53eb38fd410237e9d1d88f upstream. Move TPMI ID definitions to common include file. In this way other feature drivers don't have to redefine. Intel-SIG: commit 046d7be6210e platform/x86/intel/tpmi: Move TPMI ID definition. Backport intel tpmi base driver update for 6.6 from 6.10 Signed-off-by: Srinivas Pandruvada Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20231204221740.3645130-4-srinivas.pandruvada@linux.intel.com Signed-off-by: Hans de Goede [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4163 --- drivers/platform/x86/intel/tpmi.c | 13 ------------- include/linux/intel_tpmi.h | 13 +++++++++++++ 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/drivers/platform/x86/intel/tpmi.c b/drivers/platform/x86/intel/tpmi.c index 93649f1da380..9ba6e8a47c8e 100644 --- a/drivers/platform/x86/intel/tpmi.c +++ b/drivers/platform/x86/intel/tpmi.c @@ -170,19 +170,6 @@ struct tpmi_feature_state { u32 locked:1; } __packed; -/* - * List of supported TMPI IDs. - * Some TMPI IDs are not used by Linux, so the numbers are not consecutive. - */ -enum intel_tpmi_id { - TPMI_ID_RAPL = 0, /* Running Average Power Limit */ - TPMI_ID_PEM = 1, /* Power and Perf excursion Monitor */ - TPMI_ID_UNCORE = 2, /* Uncore Frequency Scaling */ - TPMI_ID_SST = 5, /* Speed Select Technology */ - TPMI_CONTROL_ID = 0x80, /* Special ID for getting feature status */ - TPMI_INFO_ID = 0x81, /* Special ID for PCI BDF and Package ID information */ -}; - /* * The size from hardware is in u32 units. This size is from a trusted hardware, * but better to verify for pre silicon platforms. Set size to 0, when invalid. diff --git a/include/linux/intel_tpmi.h b/include/linux/intel_tpmi.h index 4f89c5bd8663..a3529b962be6 100644 --- a/include/linux/intel_tpmi.h +++ b/include/linux/intel_tpmi.h @@ -12,6 +12,19 @@ #define TPMI_MINOR_VERSION(val) FIELD_GET(GENMASK(4, 0), val) #define TPMI_MAJOR_VERSION(val) FIELD_GET(GENMASK(7, 5), val) +/* + * List of supported TMPI IDs. + * Some TMPI IDs are not used by Linux, so the numbers are not consecutive. + */ +enum intel_tpmi_id { + TPMI_ID_RAPL = 0, /* Running Average Power Limit */ + TPMI_ID_PEM = 1, /* Power and Perf excursion Monitor */ + TPMI_ID_UNCORE = 2, /* Uncore Frequency Scaling */ + TPMI_ID_SST = 5, /* Speed Select Technology */ + TPMI_CONTROL_ID = 0x80, /* Special ID for getting feature status */ + TPMI_INFO_ID = 0x81, /* Special ID for PCI BDF and Package ID information */ +}; + /** * struct intel_tpmi_plat_info - Platform information for a TPMI device instance * @package_id: CPU Package id -- Gitee From 6dc10ecf180da354a4743f4466dbe4eda0b41bd9 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Tue, 23 Apr 2024 13:46:11 -0700 Subject: [PATCH 1794/2138] platform/x86/intel/tpmi: Check major version change for TPMI Information MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12075 commit 59eb0814d6a3541f55b1d6e3d52df1226de41f3e upstream. Check the major version from TPMI information header and fail to load driver if the version is not supported. Intel-SIG: commit 59eb0814d6a3 platform/x86/intel/tpmi: Check major version change for TPMI Information. Backport intel tpmi base driver update for 6.6 from 6.10 Signed-off-by: Srinivas Pandruvada Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20240423204619.3946901-3-srinivas.pandruvada@linux.intel.com Reviewed-by: Hans de Goede Signed-off-by: Hans de Goede [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4163 --- drivers/platform/x86/intel/tpmi.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/drivers/platform/x86/intel/tpmi.c b/drivers/platform/x86/intel/tpmi.c index 9ba6e8a47c8e..cb1563ab8673 100644 --- a/drivers/platform/x86/intel/tpmi.c +++ b/drivers/platform/x86/intel/tpmi.c @@ -665,28 +665,37 @@ static int tpmi_create_devices(struct intel_tpmi_info *tpmi_info) } #define TPMI_INFO_BUS_INFO_OFFSET 0x08 +#define TPMI_INFO_MAJOR_VERSION 0x00 static int tpmi_process_info(struct intel_tpmi_info *tpmi_info, struct intel_tpmi_pm_feature *pfs) { struct tpmi_info_header header; void __iomem *info_mem; + u64 feature_header; + int ret = 0; - info_mem = ioremap(pfs->vsec_offset + TPMI_INFO_BUS_INFO_OFFSET, - pfs->pfs_header.entry_size * sizeof(u32) - TPMI_INFO_BUS_INFO_OFFSET); + info_mem = ioremap(pfs->vsec_offset, pfs->pfs_header.entry_size * sizeof(u32)); if (!info_mem) return -ENOMEM; - memcpy_fromio(&header, info_mem, sizeof(header)); + feature_header = readq(info_mem); + if (TPMI_MAJOR_VERSION(feature_header) != TPMI_INFO_MAJOR_VERSION) { + ret = -ENODEV; + goto error_info_header; + } + + memcpy_fromio(&header, info_mem + TPMI_INFO_BUS_INFO_OFFSET, sizeof(header)); tpmi_info->plat_info.package_id = header.pkg; tpmi_info->plat_info.bus_number = header.bus; tpmi_info->plat_info.device_number = header.dev; tpmi_info->plat_info.function_number = header.fn; +error_info_header: iounmap(info_mem); - return 0; + return ret; } static int tpmi_fetch_pfs_header(struct intel_tpmi_pm_feature *pfs, u64 start, int size) -- Gitee From 96372ad93e666ca0039f80472a8b7a0e2777336e Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Tue, 23 Apr 2024 13:46:12 -0700 Subject: [PATCH 1795/2138] platform/x86/intel/tpmi: Align comments in kernel-doc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12075 commit 1192534407d0f8ab9a0503052777260ae74968c3 upstream. Align comments in kernel-doc for the struct intel_tpmi_plat_info. Intel-SIG: commit 1192534407d0 platform/x86/intel/tpmi: Align comments in kernel-doc. Backport intel tpmi base driver update for 6.6 from 6.10 Signed-off-by: Srinivas Pandruvada Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20240423204619.3946901-4-srinivas.pandruvada@linux.intel.com Reviewed-by: Hans de Goede Signed-off-by: Hans de Goede [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4163 --- include/linux/intel_tpmi.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/linux/intel_tpmi.h b/include/linux/intel_tpmi.h index a3529b962be6..685a41dddf82 100644 --- a/include/linux/intel_tpmi.h +++ b/include/linux/intel_tpmi.h @@ -27,9 +27,9 @@ enum intel_tpmi_id { /** * struct intel_tpmi_plat_info - Platform information for a TPMI device instance - * @package_id: CPU Package id - * @bus_number: PCI bus number - * @device_number: PCI device number + * @package_id: CPU Package id + * @bus_number: PCI bus number + * @device_number: PCI device number * @function_number: PCI function number * * Structure to store platform data for a TPMI device instance. This -- Gitee From 334152486389734296913f684b2a0e6b7c23a256 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Tue, 23 Apr 2024 13:46:13 -0700 Subject: [PATCH 1796/2138] platform/x86/intel/tpmi: Add additional TPMI header fields MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12075 commit c8405cc815151a8b2fa6f7510ede8256228e45da upstream. TPMI information header added additional fields in version 2. Some of the reserved fields in version 1 are used to define new fields. Parse new fields and export as part of platform data. These fields include: - PCI segment ID - Partition ID of the package: If a package is represented by more than one PCI device, then partition ID along with cdie_mask, describes the scope. For example to update get/set properties for a compute die, one of the PCI MMIO region is selected from the partition ID. - cdie_mask: Mask of all compute dies in this partition. Intel-SIG: commit c8405cc81515 platform/x86/intel/tpmi: Add additional TPMI header fields. Backport intel tpmi base driver update for 6.6 from 6.10 Signed-off-by: Srinivas Pandruvada Reviewed-by: Andy Shevchenko Reviewed-by: Zhang Rui Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20240423204619.3946901-5-srinivas.pandruvada@linux.intel.com Reviewed-by: Hans de Goede Signed-off-by: Hans de Goede [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4163 --- drivers/platform/x86/intel/tpmi.c | 15 ++++++++++++++- include/linux/intel_tpmi.h | 6 ++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/drivers/platform/x86/intel/tpmi.c b/drivers/platform/x86/intel/tpmi.c index cb1563ab8673..060f9b86bc03 100644 --- a/drivers/platform/x86/intel/tpmi.c +++ b/drivers/platform/x86/intel/tpmi.c @@ -128,6 +128,9 @@ struct intel_tpmi_info { * @dev: PCI device number * @bus: PCI bus number * @pkg: CPU Package id + * @segment: PCI segment id + * @partition: Package Partition id + * @cdie_mask: Bitmap of compute dies in the current partition * @reserved: Reserved for future use * @lock: When set to 1 the register is locked and becomes read-only * until next reset. Not for use by the OS driver. @@ -139,7 +142,10 @@ struct tpmi_info_header { u64 dev:5; u64 bus:8; u64 pkg:8; - u64 reserved:39; + u64 segment:8; + u64 partition:2; + u64 cdie_mask:16; + u64 reserved:13; u64 lock:1; } __packed; @@ -666,6 +672,7 @@ static int tpmi_create_devices(struct intel_tpmi_info *tpmi_info) #define TPMI_INFO_BUS_INFO_OFFSET 0x08 #define TPMI_INFO_MAJOR_VERSION 0x00 +#define TPMI_INFO_MINOR_VERSION 0x02 static int tpmi_process_info(struct intel_tpmi_info *tpmi_info, struct intel_tpmi_pm_feature *pfs) @@ -692,6 +699,12 @@ static int tpmi_process_info(struct intel_tpmi_info *tpmi_info, tpmi_info->plat_info.device_number = header.dev; tpmi_info->plat_info.function_number = header.fn; + if (TPMI_MINOR_VERSION(feature_header) >= TPMI_INFO_MINOR_VERSION) { + tpmi_info->plat_info.cdie_mask = header.cdie_mask; + tpmi_info->plat_info.partition = header.partition; + tpmi_info->plat_info.segment = header.segment; + } + error_info_header: iounmap(info_mem); diff --git a/include/linux/intel_tpmi.h b/include/linux/intel_tpmi.h index 685a41dddf82..1e880cb0f454 100644 --- a/include/linux/intel_tpmi.h +++ b/include/linux/intel_tpmi.h @@ -27,7 +27,10 @@ enum intel_tpmi_id { /** * struct intel_tpmi_plat_info - Platform information for a TPMI device instance + * @cdie_mask: Mask of all compute dies in the partition * @package_id: CPU Package id + * @partition: Package partition id when multiple VSEC PCI devices per package + * @segment: PCI segment ID * @bus_number: PCI bus number * @device_number: PCI device number * @function_number: PCI function number @@ -36,7 +39,10 @@ enum intel_tpmi_id { * struct is used to return data via tpmi_get_platform_data(). */ struct intel_tpmi_plat_info { + u16 cdie_mask; u8 package_id; + u8 partition; + u8 segment; u8 bus_number; u8 device_number; u8 function_number; -- Gitee From cbf4a44659285087d474f83bad3949ec55719c59 Mon Sep 17 00:00:00 2001 From: "David E. Box" Date: Wed, 29 Nov 2023 14:21:21 -0800 Subject: [PATCH 1797/2138] platform/x86/intel/pmt: Add header to struct intel_pmt_entry MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12075 commit 4d1b7efee3fc703c64bacc37c4824888c5f26e8b upstream. The PMT header is passed to several functions. Instead, store the header in struct intel_pmt_entry which is also passed to these functions and shorten the argument list. This simplifies the calls in preparation for later changes. While here also perform a newline cleanup. Intel-SIG: commit 4d1b7efee3fc platform/x86/intel/pmt: Add header to struct intel_pmt_entry. Backport intel tpmi base driver update for 6.6 from 6.10 Signed-off-by: David E. Box Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20231129222132.2331261-10-david.e.box@linux.intel.com Signed-off-by: Hans de Goede [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4163 --- drivers/platform/x86/intel/pmt/class.c | 8 +++----- drivers/platform/x86/intel/pmt/class.h | 16 ++++++++-------- drivers/platform/x86/intel/pmt/crashlog.c | 2 +- drivers/platform/x86/intel/pmt/telemetry.c | 2 +- 4 files changed, 13 insertions(+), 15 deletions(-) diff --git a/drivers/platform/x86/intel/pmt/class.c b/drivers/platform/x86/intel/pmt/class.c index 32608baaa56c..142a24e3727d 100644 --- a/drivers/platform/x86/intel/pmt/class.c +++ b/drivers/platform/x86/intel/pmt/class.c @@ -159,12 +159,12 @@ static struct class intel_pmt_class = { }; static int intel_pmt_populate_entry(struct intel_pmt_entry *entry, - struct intel_pmt_header *header, struct intel_vsec_device *ivdev, struct resource *disc_res) { struct pci_dev *pci_dev = ivdev->pcidev; struct device *dev = &ivdev->auxdev.dev; + struct intel_pmt_header *header = &entry->header; u8 bir; /* @@ -313,7 +313,6 @@ int intel_pmt_dev_create(struct intel_pmt_entry *entry, struct intel_pmt_namespa struct intel_vsec_device *intel_vsec_dev, int idx) { struct device *dev = &intel_vsec_dev->auxdev.dev; - struct intel_pmt_header header; struct resource *disc_res; int ret; @@ -323,16 +322,15 @@ int intel_pmt_dev_create(struct intel_pmt_entry *entry, struct intel_pmt_namespa if (IS_ERR(entry->disc_table)) return PTR_ERR(entry->disc_table); - ret = ns->pmt_header_decode(entry, &header, dev); + ret = ns->pmt_header_decode(entry, dev); if (ret) return ret; - ret = intel_pmt_populate_entry(entry, &header, intel_vsec_dev, disc_res); + ret = intel_pmt_populate_entry(entry, intel_vsec_dev, disc_res); if (ret) return ret; return intel_pmt_dev_register(entry, ns, dev); - } EXPORT_SYMBOL_NS_GPL(intel_pmt_dev_create, INTEL_PMT); diff --git a/drivers/platform/x86/intel/pmt/class.h b/drivers/platform/x86/intel/pmt/class.h index db11d58867ce..e477a19f6700 100644 --- a/drivers/platform/x86/intel/pmt/class.h +++ b/drivers/platform/x86/intel/pmt/class.h @@ -18,7 +18,15 @@ #define GET_BIR(v) ((v) & GENMASK(2, 0)) #define GET_ADDRESS(v) ((v) & GENMASK(31, 3)) +struct intel_pmt_header { + u32 base_offset; + u32 size; + u32 guid; + u8 access_type; +}; + struct intel_pmt_entry { + struct intel_pmt_header header; struct bin_attribute pmt_bin_attr; struct kobject *kobj; void __iomem *disc_table; @@ -29,19 +37,11 @@ struct intel_pmt_entry { int devid; }; -struct intel_pmt_header { - u32 base_offset; - u32 size; - u32 guid; - u8 access_type; -}; - struct intel_pmt_namespace { const char *name; struct xarray *xa; const struct attribute_group *attr_grp; int (*pmt_header_decode)(struct intel_pmt_entry *entry, - struct intel_pmt_header *header, struct device *dev); }; diff --git a/drivers/platform/x86/intel/pmt/crashlog.c b/drivers/platform/x86/intel/pmt/crashlog.c index bbb3d61d09f4..4014c02cafdb 100644 --- a/drivers/platform/x86/intel/pmt/crashlog.c +++ b/drivers/platform/x86/intel/pmt/crashlog.c @@ -223,10 +223,10 @@ static const struct attribute_group pmt_crashlog_group = { }; static int pmt_crashlog_header_decode(struct intel_pmt_entry *entry, - struct intel_pmt_header *header, struct device *dev) { void __iomem *disc_table = entry->disc_table; + struct intel_pmt_header *header = &entry->header; struct crashlog_entry *crashlog; if (!pmt_crashlog_supported(entry)) diff --git a/drivers/platform/x86/intel/pmt/telemetry.c b/drivers/platform/x86/intel/pmt/telemetry.c index 39cbc87cc28a..f86080e8bebd 100644 --- a/drivers/platform/x86/intel/pmt/telemetry.c +++ b/drivers/platform/x86/intel/pmt/telemetry.c @@ -58,10 +58,10 @@ static bool pmt_telem_region_overlaps(struct intel_pmt_entry *entry, } static int pmt_telem_header_decode(struct intel_pmt_entry *entry, - struct intel_pmt_header *header, struct device *dev) { void __iomem *disc_table = entry->disc_table; + struct intel_pmt_header *header = &entry->header; if (pmt_telem_region_overlaps(entry, dev)) return 1; -- Gitee From 7ebae0e7787124459c2f5c4dd3f130f1dcdaf7d0 Mon Sep 17 00:00:00 2001 From: "David E. Box" Date: Wed, 29 Nov 2023 14:21:22 -0800 Subject: [PATCH 1798/2138] platform/x86/intel/pmt: telemetry: Export API to read telemetry MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12075 commit 416eeb2e1fc7b60ab0c7ced26ab966dd7733357d upstream. Export symbols to allow access to Intel PMT Telemetry data on available devices. Provides APIs to search, register, and read telemetry using a kref managed pointer that serves as a handle to a telemetry endpoint. To simplify searching for present devices, have the IDA start at 1 instead of 0 so that 0 can be used to indicate end of search. Intel-SIG: commit 416eeb2e1fc7 platform/x86/intel/pmt: telemetry: Export API to read telemetry. Backport intel tpmi base driver update for 6.6 from 6.10 Signed-off-by: David E. Box Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20231129222132.2331261-11-david.e.box@linux.intel.com Signed-off-by: Hans de Goede [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4163 --- drivers/platform/x86/intel/pmt/class.c | 21 ++- drivers/platform/x86/intel/pmt/class.h | 14 ++ drivers/platform/x86/intel/pmt/telemetry.c | 191 ++++++++++++++++++++- drivers/platform/x86/intel/pmt/telemetry.h | 126 ++++++++++++++ 4 files changed, 344 insertions(+), 8 deletions(-) create mode 100644 drivers/platform/x86/intel/pmt/telemetry.h diff --git a/drivers/platform/x86/intel/pmt/class.c b/drivers/platform/x86/intel/pmt/class.c index 142a24e3727d..4b53940a64e2 100644 --- a/drivers/platform/x86/intel/pmt/class.c +++ b/drivers/platform/x86/intel/pmt/class.c @@ -17,7 +17,7 @@ #include "../vsec.h" #include "class.h" -#define PMT_XA_START 0 +#define PMT_XA_START 1 #define PMT_XA_MAX INT_MAX #define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX) #define GUID_SPR_PUNIT 0x9956f43f @@ -247,6 +247,7 @@ static int intel_pmt_dev_register(struct intel_pmt_entry *entry, struct intel_pmt_namespace *ns, struct device *parent) { + struct intel_vsec_device *ivdev = dev_to_ivdev(parent); struct resource res = {0}; struct device *dev; int ret; @@ -270,7 +271,7 @@ static int intel_pmt_dev_register(struct intel_pmt_entry *entry, if (ns->attr_grp) { ret = sysfs_create_group(entry->kobj, ns->attr_grp); if (ret) - goto fail_sysfs; + goto fail_sysfs_create_group; } /* if size is 0 assume no data buffer, so no file needed */ @@ -295,13 +296,23 @@ static int intel_pmt_dev_register(struct intel_pmt_entry *entry, entry->pmt_bin_attr.size = entry->size; ret = sysfs_create_bin_file(&dev->kobj, &entry->pmt_bin_attr); - if (!ret) - return 0; + if (ret) + goto fail_ioremap; + + if (ns->pmt_add_endpoint) { + ret = ns->pmt_add_endpoint(entry, ivdev->pcidev); + if (ret) + goto fail_add_endpoint; + } + + return 0; +fail_add_endpoint: + sysfs_remove_bin_file(entry->kobj, &entry->pmt_bin_attr); fail_ioremap: if (ns->attr_grp) sysfs_remove_group(entry->kobj, ns->attr_grp); -fail_sysfs: +fail_sysfs_create_group: device_unregister(dev); fail_dev_create: xa_erase(ns->xa, entry->devid); diff --git a/drivers/platform/x86/intel/pmt/class.h b/drivers/platform/x86/intel/pmt/class.h index e477a19f6700..d23c63b73ab7 100644 --- a/drivers/platform/x86/intel/pmt/class.h +++ b/drivers/platform/x86/intel/pmt/class.h @@ -9,6 +9,7 @@ #include #include "../vsec.h" +#include "telemetry.h" /* PMT access types */ #define ACCESS_BARID 2 @@ -18,6 +19,16 @@ #define GET_BIR(v) ((v) & GENMASK(2, 0)) #define GET_ADDRESS(v) ((v) & GENMASK(31, 3)) +struct pci_dev; + +struct telem_endpoint { + struct pci_dev *pcidev; + struct telem_header header; + void __iomem *base; + bool present; + struct kref kref; +}; + struct intel_pmt_header { u32 base_offset; u32 size; @@ -26,6 +37,7 @@ struct intel_pmt_header { }; struct intel_pmt_entry { + struct telem_endpoint *ep; struct intel_pmt_header header; struct bin_attribute pmt_bin_attr; struct kobject *kobj; @@ -43,6 +55,8 @@ struct intel_pmt_namespace { const struct attribute_group *attr_grp; int (*pmt_header_decode)(struct intel_pmt_entry *entry, struct device *dev); + int (*pmt_add_endpoint)(struct intel_pmt_entry *entry, + struct pci_dev *pdev); }; bool intel_pmt_is_early_client_hw(struct device *dev); diff --git a/drivers/platform/x86/intel/pmt/telemetry.c b/drivers/platform/x86/intel/pmt/telemetry.c index f86080e8bebd..09258564dfc4 100644 --- a/drivers/platform/x86/intel/pmt/telemetry.c +++ b/drivers/platform/x86/intel/pmt/telemetry.c @@ -30,6 +30,15 @@ /* Used by client hardware to identify a fixed telemetry entry*/ #define TELEM_CLIENT_FIXED_BLOCK_GUID 0x10000000 +#define NUM_BYTES_QWORD(v) ((v) << 3) +#define SAMPLE_ID_OFFSET(v) ((v) << 3) + +#define NUM_BYTES_DWORD(v) ((v) << 2) +#define SAMPLE_ID_OFFSET32(v) ((v) << 2) + +/* Protects access to the xarray of telemetry endpoint handles */ +static DEFINE_MUTEX(ep_lock); + enum telem_type { TELEM_TYPE_PUNIT = 0, TELEM_TYPE_CRASHLOG, @@ -84,21 +93,195 @@ static int pmt_telem_header_decode(struct intel_pmt_entry *entry, return 0; } +static int pmt_telem_add_endpoint(struct intel_pmt_entry *entry, + struct pci_dev *pdev) +{ + struct telem_endpoint *ep; + + /* Endpoint lifetimes are managed by kref, not devres */ + entry->ep = kzalloc(sizeof(*(entry->ep)), GFP_KERNEL); + if (!entry->ep) + return -ENOMEM; + + ep = entry->ep; + ep->pcidev = pdev; + ep->header.access_type = entry->header.access_type; + ep->header.guid = entry->header.guid; + ep->header.base_offset = entry->header.base_offset; + ep->header.size = entry->header.size; + ep->base = entry->base; + ep->present = true; + + kref_init(&ep->kref); + + return 0; +} + static DEFINE_XARRAY_ALLOC(telem_array); static struct intel_pmt_namespace pmt_telem_ns = { .name = "telem", .xa = &telem_array, .pmt_header_decode = pmt_telem_header_decode, + .pmt_add_endpoint = pmt_telem_add_endpoint, }; +/* Called when all users unregister and the device is removed */ +static void pmt_telem_ep_release(struct kref *kref) +{ + struct telem_endpoint *ep; + + ep = container_of(kref, struct telem_endpoint, kref); + kfree(ep); +} + +unsigned long pmt_telem_get_next_endpoint(unsigned long start) +{ + struct intel_pmt_entry *entry; + unsigned long found_idx; + + mutex_lock(&ep_lock); + xa_for_each_start(&telem_array, found_idx, entry, start) { + /* + * Return first found index after start. + * 0 is not valid id. + */ + if (found_idx > start) + break; + } + mutex_unlock(&ep_lock); + + return found_idx == start ? 0 : found_idx; +} +EXPORT_SYMBOL_NS_GPL(pmt_telem_get_next_endpoint, INTEL_PMT_TELEMETRY); + +struct telem_endpoint *pmt_telem_register_endpoint(int devid) +{ + struct intel_pmt_entry *entry; + unsigned long index = devid; + + mutex_lock(&ep_lock); + entry = xa_find(&telem_array, &index, index, XA_PRESENT); + if (!entry) { + mutex_unlock(&ep_lock); + return ERR_PTR(-ENXIO); + } + + kref_get(&entry->ep->kref); + mutex_unlock(&ep_lock); + + return entry->ep; +} +EXPORT_SYMBOL_NS_GPL(pmt_telem_register_endpoint, INTEL_PMT_TELEMETRY); + +void pmt_telem_unregister_endpoint(struct telem_endpoint *ep) +{ + kref_put(&ep->kref, pmt_telem_ep_release); +} +EXPORT_SYMBOL_NS_GPL(pmt_telem_unregister_endpoint, INTEL_PMT_TELEMETRY); + +int pmt_telem_get_endpoint_info(int devid, struct telem_endpoint_info *info) +{ + struct intel_pmt_entry *entry; + unsigned long index = devid; + int err = 0; + + if (!info) + return -EINVAL; + + mutex_lock(&ep_lock); + entry = xa_find(&telem_array, &index, index, XA_PRESENT); + if (!entry) { + err = -ENXIO; + goto unlock; + } + + info->pdev = entry->ep->pcidev; + info->header = entry->ep->header; + +unlock: + mutex_unlock(&ep_lock); + return err; + +} +EXPORT_SYMBOL_NS_GPL(pmt_telem_get_endpoint_info, INTEL_PMT_TELEMETRY); + +int pmt_telem_read(struct telem_endpoint *ep, u32 id, u64 *data, u32 count) +{ + u32 offset, size; + + if (!ep->present) + return -ENODEV; + + offset = SAMPLE_ID_OFFSET(id); + size = ep->header.size; + + if (offset + NUM_BYTES_QWORD(count) > size) + return -EINVAL; + + memcpy_fromio(data, ep->base + offset, NUM_BYTES_QWORD(count)); + + return ep->present ? 0 : -EPIPE; +} +EXPORT_SYMBOL_NS_GPL(pmt_telem_read, INTEL_PMT_TELEMETRY); + +int pmt_telem_read32(struct telem_endpoint *ep, u32 id, u32 *data, u32 count) +{ + u32 offset, size; + + if (!ep->present) + return -ENODEV; + + offset = SAMPLE_ID_OFFSET32(id); + size = ep->header.size; + + if (offset + NUM_BYTES_DWORD(count) > size) + return -EINVAL; + + memcpy_fromio(data, ep->base + offset, NUM_BYTES_DWORD(count)); + + return ep->present ? 0 : -EPIPE; +} +EXPORT_SYMBOL_NS_GPL(pmt_telem_read32, INTEL_PMT_TELEMETRY); + +struct telem_endpoint * +pmt_telem_find_and_register_endpoint(struct pci_dev *pcidev, u32 guid, u16 pos) +{ + int devid = 0; + int inst = 0; + int err = 0; + + while ((devid = pmt_telem_get_next_endpoint(devid))) { + struct telem_endpoint_info ep_info; + + err = pmt_telem_get_endpoint_info(devid, &ep_info); + if (err) + return ERR_PTR(err); + + if (ep_info.header.guid == guid && ep_info.pdev == pcidev) { + if (inst == pos) + return pmt_telem_register_endpoint(devid); + ++inst; + } + } + + return ERR_PTR(-ENXIO); +} +EXPORT_SYMBOL_NS_GPL(pmt_telem_find_and_register_endpoint, INTEL_PMT_TELEMETRY); + static void pmt_telem_remove(struct auxiliary_device *auxdev) { struct pmt_telem_priv *priv = auxiliary_get_drvdata(auxdev); int i; - for (i = 0; i < priv->num_entries; i++) - intel_pmt_dev_destroy(&priv->entry[i], &pmt_telem_ns); -} + mutex_lock(&ep_lock); + for (i = 0; i < priv->num_entries; i++) { + struct intel_pmt_entry *entry = &priv->entry[i]; + + kref_put(&entry->ep->kref, pmt_telem_ep_release); + intel_pmt_dev_destroy(entry, &pmt_telem_ns); + } + mutex_unlock(&ep_lock); +}; static int pmt_telem_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id) { @@ -117,7 +300,9 @@ static int pmt_telem_probe(struct auxiliary_device *auxdev, const struct auxilia for (i = 0; i < intel_vsec_dev->num_resources; i++) { struct intel_pmt_entry *entry = &priv->entry[priv->num_entries]; + mutex_lock(&ep_lock); ret = intel_pmt_dev_create(entry, &pmt_telem_ns, intel_vsec_dev, i); + mutex_unlock(&ep_lock); if (ret < 0) goto abort_probe; if (ret) diff --git a/drivers/platform/x86/intel/pmt/telemetry.h b/drivers/platform/x86/intel/pmt/telemetry.h new file mode 100644 index 000000000000..d45af5512b4e --- /dev/null +++ b/drivers/platform/x86/intel/pmt/telemetry.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _TELEMETRY_H +#define _TELEMETRY_H + +/* Telemetry types */ +#define PMT_TELEM_TELEMETRY 0 +#define PMT_TELEM_CRASHLOG 1 + +struct telem_endpoint; +struct pci_dev; + +struct telem_header { + u8 access_type; + u16 size; + u32 guid; + u32 base_offset; +}; + +struct telem_endpoint_info { + struct pci_dev *pdev; + struct telem_header header; +}; + +/** + * pmt_telem_get_next_endpoint() - Get next device id for a telemetry endpoint + * @start: starting devid to look from + * + * This functions can be used in a while loop predicate to retrieve the devid + * of all available telemetry endpoints. Functions pmt_telem_get_next_endpoint() + * and pmt_telem_register_endpoint() can be used inside of the loop to examine + * endpoint info and register to receive a pointer to the endpoint. The pointer + * is then usable in the telemetry read calls to access the telemetry data. + * + * Return: + * * devid - devid of the next present endpoint from start + * * 0 - when no more endpoints are present after start + */ +unsigned long pmt_telem_get_next_endpoint(unsigned long start); + +/** + * pmt_telem_register_endpoint() - Register a telemetry endpoint + * @devid: device id/handle of the telemetry endpoint + * + * Increments the kref usage counter for the endpoint. + * + * Return: + * * endpoint - On success returns pointer to the telemetry endpoint + * * -ENXIO - telemetry endpoint not found + */ +struct telem_endpoint *pmt_telem_register_endpoint(int devid); + +/** + * pmt_telem_unregister_endpoint() - Unregister a telemetry endpoint + * @ep: ep structure to populate. + * + * Decrements the kref usage counter for the endpoint. + */ +void pmt_telem_unregister_endpoint(struct telem_endpoint *ep); + +/** + * pmt_telem_get_endpoint_info() - Get info for an endpoint from its devid + * @devid: device id/handle of the telemetry endpoint + * @info: Endpoint info structure to be populated + * + * Return: + * * 0 - Success + * * -ENXIO - telemetry endpoint not found for the devid + * * -EINVAL - @info is NULL + */ +int pmt_telem_get_endpoint_info(int devid, struct telem_endpoint_info *info); + +/** + * pmt_telem_find_and_register_endpoint() - Get a telemetry endpoint from + * pci_dev device, guid and pos + * @pdev: PCI device inside the Intel vsec + * @guid: GUID of the telemetry space + * @pos: Instance of the guid + * + * Return: + * * endpoint - On success returns pointer to the telemetry endpoint + * * -ENXIO - telemetry endpoint not found + */ +struct telem_endpoint *pmt_telem_find_and_register_endpoint(struct pci_dev *pcidev, + u32 guid, u16 pos); + +/** + * pmt_telem_read() - Read qwords from counter sram using sample id + * @ep: Telemetry endpoint to be read + * @id: The beginning sample id of the metric(s) to be read + * @data: Allocated qword buffer + * @count: Number of qwords requested + * + * Callers must ensure reads are aligned. When the call returns -ENODEV, + * the device has been removed and callers should unregister the telemetry + * endpoint. + * + * Return: + * * 0 - Success + * * -ENODEV - The device is not present. + * * -EINVAL - The offset is out bounds + * * -EPIPE - The device was removed during the read. Data written + * but should be considered invalid. + */ +int pmt_telem_read(struct telem_endpoint *ep, u32 id, u64 *data, u32 count); + +/** + * pmt_telem_read32() - Read qwords from counter sram using sample id + * @ep: Telemetry endpoint to be read + * @id: The beginning sample id of the metric(s) to be read + * @data: Allocated dword buffer + * @count: Number of dwords requested + * + * Callers must ensure reads are aligned. When the call returns -ENODEV, + * the device has been removed and callers should unregister the telemetry + * endpoint. + * + * Return: + * * 0 - Success + * * -ENODEV - The device is not present. + * * -EINVAL - The offset is out bounds + * * -EPIPE - The device was removed during the read. Data written + * but should be considered invalid. + */ +int pmt_telem_read32(struct telem_endpoint *ep, u32 id, u32 *data, u32 count); + +#endif -- Gitee From d0a30408904ef4524555a21398b380738d77f8a5 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Thu, 7 Sep 2023 11:02:07 -0700 Subject: [PATCH 1799/2138] cpufreq: intel_pstate: Revise global turbo disable check ANBZ: #12074 commit 37b6ddba967c601479bea418a7ac6ff16b6232b7 upstream. Setting global turbo flag based on CPU 0 P-state limits is problematic as it limits max P-state request on every CPU on the system just based on its P-state limits. There are two cases in which global.turbo_disabled flag is set: - When the MSR_IA32_MISC_ENABLE_TURBO_DISABLE bit is set to 1 in the MSR MSR_IA32_MISC_ENABLE. This bit can be only changed by the system BIOS before power up. - When the max non turbo P-state is same as max turbo P-state for CPU 0. The second check is not a valid to decide global turbo state based on the CPU 0. CPU 0 max turbo P-state can be same as max non turbo P-state, but for other CPUs this may not be true. There is no guarantee that max P-state limits are same for every CPU. This is possible that during fusing max P-state for a CPU is constrained. Also with the Intel Speed Select performance profile, CPU 0 may not be present in all profiles. In this case the max non turbo and turbo P-state can be set to the lowest possible P-state by the hardware when switched to such profile. Since max non turbo and turbo P-state is same, global.turbo_disabled flag will be set. Once global.turbo_disabled is set, any scaling max and min frequency update for any CPU will result in its max P-state constrained to the max non turbo P-state. Hence remove the check of max non turbo P-state equal to max turbo P-state of CPU 0 to set global turbo disabled flag. Intel-SIG: commit 37b6ddba967c cpufreq: intel_pstate: Revise global turbo disable check. Backport intel_pstate driver update for 6.6 Signed-off-by: Srinivas Pandruvada [ rjw: Subject edit ] Signed-off-by: Rafael J. Wysocki [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4164 --- drivers/cpufreq/intel_pstate.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 8a4fdf212ce0..7cb35842f68a 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -595,13 +595,9 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu) static inline void update_turbo_state(void) { u64 misc_en; - struct cpudata *cpu; - cpu = all_cpu_data[0]; rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); - global.turbo_disabled = - (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || - cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); + global.turbo_disabled = misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE; } static int min_perf_pct_min(void) -- Gitee From 729fa2bc1872f483ab91a8fcd090e073b22fdfb0 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Mon, 20 Nov 2023 10:59:42 -0800 Subject: [PATCH 1800/2138] cpufreq: intel_pstate: Prioritize firmware-provided balance performance EPP ANBZ: #12074 commit 2719675fa8111a8d7a060133e1dd4797d20c9754 upstream. The platform firmware can provide a balance performance EPP value by enabling HWP and programming the EPP to the desired value. However, currently this only takes effect for processors listed in intel_epp_balance_perf[], so in order to enable a new processor model to utilize this mechanism, that table needs to be updated. It arguably should not be necessary to modify the kernel to work properly with every new generation of processors, though, and distributions that don't always ship the most recent kernels should be able to run reasonably well on new hardware without code changes. For this reason, move the check to avoid updating the EPP when the balance performance EPP is unmodified from the power-up default of 0x80 after the check that allows the firmware-provided balance performance EPP value to be retrieved. This will cause the code to always look for the firmware- provided value before consulting intel_epp_balance_perf[] and the handling of new hardware will not depend on whether or not that thable has been updated yet. Intel-SIG: commit 2719675fa811 cpufreq: intel_pstate: Prioritize firmware-provided balance performance EPP. Backport intel_pstate driver update for 6.6 from 6.11 Signed-off-by: Srinivas Pandruvada [ rjw: Subject and changelog edits ] Signed-off-by: Rafael J. Wysocki [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4164 --- drivers/cpufreq/intel_pstate.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 7cb35842f68a..df231d7f65a1 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1715,13 +1715,6 @@ static void intel_pstate_update_epp_defaults(struct cpudata *cpudata) { cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); - /* - * If this CPU gen doesn't call for change in balance_perf - * EPP return. - */ - if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE) - return; - /* * If the EPP is set by firmware, which means that firmware enabled HWP * - Is equal or less than 0x80 (default balance_perf EPP) @@ -1734,6 +1727,13 @@ static void intel_pstate_update_epp_defaults(struct cpudata *cpudata) return; } + /* + * If this CPU gen doesn't call for change in balance_perf + * EPP return. + */ + if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE) + return; + /* * Use hard coded value per gen to update the balance_perf * and default EPP. -- Gitee From dedf2d24e27dddb5f14000eb7f7f8bf8712c4822 Mon Sep 17 00:00:00 2001 From: Zhenguo Yao Date: Wed, 13 Dec 2023 18:28:08 +0800 Subject: [PATCH 1801/2138] cpufreq: intel_pstate: Add Emerald Rapids support in no-HWP mode ANBZ: #12074 commit e95013156ad88e6a1e1db6545881f49183e2ee0a upstream. Users may disable HWP in firmware, in which case intel_pstate will give up unless the CPU model is explicitly supported. See also the following past commits: - commit df51f287b5de ("cpufreq: intel_pstate: Add Sapphire Rapids support in no-HWP mode") - commit d8de7a44e11f ("cpufreq: intel_pstate: Add Skylake servers support") - commit 706c5328851d ("cpufreq: intel_pstate: Add Cometlake support in no-HWP mode") - commit fbdc21e9b038 ("cpufreq: intel_pstate: Add Icelake servers support in no-HWP mode") - commit 71bb5c82aaae ("cpufreq: intel_pstate: Add Tigerlake support in no-HWP mode") Intel-SIG: commit e95013156ad8 cpufreq: intel_pstate: Add Emerald Rapids support in no-HWP mode. Backport intel_pstate driver update for 6.6 from 6.11 Signed-off-by: Zhenguo Yao Acked-by: Srinivas Pandruvada [ rjw: Changelog edits ] Signed-off-by: Rafael J. Wysocki [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4164 --- drivers/cpufreq/intel_pstate.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index df231d7f65a1..bd621dda767b 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -2430,6 +2430,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { X86_MATCH(ICELAKE_X, core_funcs), X86_MATCH(TIGERLAKE, core_funcs), X86_MATCH(SAPPHIRERAPIDS_X, core_funcs), + X86_MATCH(EMERALDRAPIDS_X, core_funcs), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); -- Gitee From 1cc07a35ea34f72072fd446044bababbcf2877ae Mon Sep 17 00:00:00 2001 From: "Jiri Slaby (SUSE)" Date: Tue, 13 Feb 2024 12:16:00 +0100 Subject: [PATCH 1802/2138] cpufreq: intel_pstate: remove cpudata::prev_cummulative_iowait ANBZ: #12074 commit 4615ac9010be84d676f9e893e5a7ea4b5febd1e8 upstream. Commit 09c448d3c61f ("cpufreq: intel_pstate: Use IOWAIT flag in Atom algorithm") removed the last user of cpudata::prev_cummulative_iowait. Remove the member too. Found by https://github.com/jirislaby/clang-struct. Intel-SIG: commit 4615ac9010be cpufreq: intel_pstate: remove cpudata. Backport intel_pstate driver update for 6.6 from 6.11 Signed-off-by: Jiri Slaby (SUSE) Signed-off-by: Rafael J. Wysocki [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4164 --- drivers/cpufreq/intel_pstate.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index bd621dda767b..f9f801e2e088 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -201,8 +201,6 @@ struct global_params { * @prev_aperf: Last APERF value read from APERF MSR * @prev_mperf: Last MPERF value read from MPERF MSR * @prev_tsc: Last timestamp counter (TSC) value - * @prev_cummulative_iowait: IO Wait time difference from last and - * current sample * @sample: Storage for storing last Sample data * @min_perf_ratio: Minimum capacity in terms of PERF or HWP ratios * @max_perf_ratio: Maximum capacity in terms of PERF or HWP ratios @@ -241,7 +239,6 @@ struct cpudata { u64 prev_aperf; u64 prev_mperf; u64 prev_tsc; - u64 prev_cummulative_iowait; struct sample sample; int32_t min_perf_ratio; int32_t max_perf_ratio; -- Gitee From 199d06302df46b18029752827473944bdea9c24f Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Mon, 19 Feb 2024 18:26:06 -0800 Subject: [PATCH 1803/2138] cpufreq: intel_pstate: Allow model specific EPPs ANBZ: #12074 commit 240a8da623008eb9f4e32c7a19ce16a6605911dc upstream. The current implementation allows model specific EPP override for balanced_performance. Add feature to allow model specific EPP for all predefined EPP strings. For example for some CPU models, even changing performance EPP has benefits Use a mask of EPPs as driver_data instead of just balanced_performance. Intel-SIG: commit 240a8da62300 cpufreq: intel_pstate: Allow model specific EPPs. Backport intel_pstate driver update for 6.6 from 6.11 Signed-off-by: Srinivas Pandruvada Signed-off-by: Rafael J. Wysocki [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4164 --- drivers/cpufreq/intel_pstate.c | 41 +++++++++++++++++++++++++++++----- 1 file changed, 35 insertions(+), 6 deletions(-) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index f9f801e2e088..be3db0003e2e 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -3400,14 +3401,29 @@ static bool intel_pstate_hwp_is_enabled(void) return !!(value & 0x1); } -static const struct x86_cpu_id intel_epp_balance_perf[] = { +#define POWERSAVE_MASK GENMASK(7, 0) +#define BALANCE_POWER_MASK GENMASK(15, 8) +#define BALANCE_PERFORMANCE_MASK GENMASK(23, 16) +#define PERFORMANCE_MASK GENMASK(31, 24) + +#define HWP_SET_EPP_VALUES(powersave, balance_power, balance_perf, performance) \ + (FIELD_PREP_CONST(POWERSAVE_MASK, powersave) |\ + FIELD_PREP_CONST(BALANCE_POWER_MASK, balance_power) |\ + FIELD_PREP_CONST(BALANCE_PERFORMANCE_MASK, balance_perf) |\ + FIELD_PREP_CONST(PERFORMANCE_MASK, performance)) + +#define HWP_SET_DEF_BALANCE_PERF_EPP(balance_perf) \ + (HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE, HWP_EPP_BALANCE_POWERSAVE,\ + balance_perf, HWP_EPP_PERFORMANCE)) + +static const struct x86_cpu_id intel_epp_default[] = { /* * Set EPP value as 102, this is the max suggested EPP * which can result in one core turbo frequency for * AlderLake Mobile CPUs. */ - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 102), - X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, 32), + X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, HWP_SET_DEF_BALANCE_PERF_EPP(102)), + X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)), {} }; @@ -3500,10 +3516,23 @@ static int __init intel_pstate_init(void) intel_pstate_sysfs_expose_params(); if (hwp_active) { - const struct x86_cpu_id *id = x86_match_cpu(intel_epp_balance_perf); + const struct x86_cpu_id *id = x86_match_cpu(intel_epp_default); - if (id) - epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = id->driver_data; + if (id) { + epp_values[EPP_INDEX_POWERSAVE] = + FIELD_GET(POWERSAVE_MASK, id->driver_data); + epp_values[EPP_INDEX_BALANCE_POWERSAVE] = + FIELD_GET(BALANCE_POWER_MASK, id->driver_data); + epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = + FIELD_GET(BALANCE_PERFORMANCE_MASK, id->driver_data); + epp_values[EPP_INDEX_PERFORMANCE] = + FIELD_GET(PERFORMANCE_MASK, id->driver_data); + pr_debug("Updated EPPs powersave:%x balanced power:%x balanced perf:%x performance:%x\n", + epp_values[EPP_INDEX_POWERSAVE], + epp_values[EPP_INDEX_BALANCE_POWERSAVE], + epp_values[EPP_INDEX_BALANCE_PERFORMANCE], + epp_values[EPP_INDEX_PERFORMANCE]); + } } mutex_lock(&intel_pstate_driver_lock); -- Gitee From cc310b8369a1069ac920b1e3345fb7b79d408560 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 21 Mar 2024 20:29:43 +0100 Subject: [PATCH 1804/2138] cpufreq: intel_pstate: Drop redundant locking from intel_pstate_driver_cleanup() ANBZ: #12074 commit f186b2dace86f36cc08872b693185eaf71128898 upstream. Remove the spinlock locking from intel_pstate_driver_cleanup() as it is not necessary because no other code accessing all_cpu_data[] can run in parallel with that function. Had the locking been necessary, though, it would have been incorrect because the lock in question is acquired from a hardirq handler and it cannot be acquired from thread context without disabling interrupts. Intel-SIG: commit f186b2dace86 cpufreq: intel_pstate: Drop redundant locking from. Backport intel_pstate driver update for 6.6 from 6.11 Signed-off-by: Rafael J. Wysocki [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4164 --- drivers/cpufreq/intel_pstate.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index be3db0003e2e..f49df43b0d04 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -3131,10 +3131,8 @@ static void intel_pstate_driver_cleanup(void) if (intel_pstate_driver == &intel_pstate) intel_pstate_clear_update_util_hook(cpu); - raw_spin_lock(&hwp_notify_lock); kfree(all_cpu_data[cpu]); WRITE_ONCE(all_cpu_data[cpu], NULL); - raw_spin_unlock(&hwp_notify_lock); } } cpus_read_unlock(); -- Gitee From 207e98e8b1297f0d4e2483f666d8aeb628ade66b Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 21 Mar 2024 20:30:42 +0100 Subject: [PATCH 1805/2138] cpufreq: intel_pstate: Simplify spinlock locking ANBZ: #12074 commit 12ebba42d2f1eadc0f897ffeb6dbcfaf2449e107 upstream. Because intel_pstate_enable/disable_hwp_interrupt() are only called from thread context, they need not save the IRQ flags when using a spinlock as interrupts are guaranteed to be enabled when they run, so make them use spin_lock/unlock_irq(). Intel-SIG: commit 12ebba42d2f1 cpufreq: intel_pstate: Simplify spinlock locking. Backport intel_pstate driver update for 6.6 from 6.11 Signed-off-by: Rafael J. Wysocki [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4164 --- drivers/cpufreq/intel_pstate.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index f49df43b0d04..3c27f249e4ef 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1678,30 +1678,27 @@ void notify_hwp_interrupt(void) static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) { - unsigned long flags; - if (!boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) return; /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); - raw_spin_lock_irqsave(&hwp_notify_lock, flags); + raw_spin_lock_irq(&hwp_notify_lock); if (cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask)) cancel_delayed_work(&cpudata->hwp_notify_work); - raw_spin_unlock_irqrestore(&hwp_notify_lock, flags); + raw_spin_unlock_irq(&hwp_notify_lock); } static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata) { /* Enable HWP notification interrupt for guaranteed performance change */ if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) { - unsigned long flags; - raw_spin_lock_irqsave(&hwp_notify_lock, flags); + raw_spin_lock_irq(&hwp_notify_lock); INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work); cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask); - raw_spin_unlock_irqrestore(&hwp_notify_lock, flags); + raw_spin_unlock_irq(&hwp_notify_lock); /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01); -- Gitee From 0652e8f93b826912861552bf2cf12c053ee4e14c Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 21 Mar 2024 20:32:02 +0100 Subject: [PATCH 1806/2138] cpufreq: intel_pstate: Wait for canceled delayed work to complete ANBZ: #12074 commit 432acb219af4edecdd11d360f30b7cc643524db8 upstream. Make intel_pstate_disable_hwp_interrupt() wait for canceled delayed work to complete to avoid leftover work items running when it returns which may be during driver unregistration and may confuse things going forward. Intel-SIG: commit 432acb219af4 cpufreq: intel_pstate: Wait for canceled delayed work to complete. Backport intel_pstate driver update for 6.6 from 6.11 Signed-off-by: Rafael J. Wysocki [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4164 --- drivers/cpufreq/intel_pstate.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 3c27f249e4ef..0b5b5ccef343 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1678,6 +1678,8 @@ void notify_hwp_interrupt(void) static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) { + bool cancel_work; + if (!boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) return; @@ -1685,9 +1687,11 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00); raw_spin_lock_irq(&hwp_notify_lock); - if (cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask)) - cancel_delayed_work(&cpudata->hwp_notify_work); + cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask); raw_spin_unlock_irq(&hwp_notify_lock); + + if (cancel_work) + cancel_delayed_work_sync(&cpudata->hwp_notify_work); } static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata) -- Gitee From c836fe35e4de91a57a329b1dab2e9481681f4182 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 28 Mar 2024 19:52:06 +0100 Subject: [PATCH 1807/2138] cpufreq: intel_pstate: Get rid of unnecessary READ_ONCE() annotations ANBZ: #12074 commit 0f2828e17b6f41b8b345f0031e3fe58529991748 upstream. Drop two redundant checks involving READ_ONCE() from notify_hwp_interrupt() and make it check hwp_active without READ_ONCE() which is not necessary, because that variable is only set once during the early initialization of the driver. In order to make that clear, annotate hwp_active with __ro_after_init. Intel-SIG: commit 0f2828e17b6f cpufreq: intel_pstate: Get rid of unnecessary READ_ONCE() annotations. Backport intel_pstate driver update for 6.6 from 6.11 Signed-off-by: Rafael J. Wysocki [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4164 --- drivers/cpufreq/intel_pstate.c | 27 +++++---------------------- 1 file changed, 5 insertions(+), 22 deletions(-) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 0b5b5ccef343..069ae03cb617 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -292,7 +292,7 @@ struct pstate_funcs { static struct pstate_funcs pstate_funcs __read_mostly; -static int hwp_active __read_mostly; +static bool hwp_active __ro_after_init; static int hwp_mode_bdw __read_mostly; static bool per_cpu_limits __read_mostly; static bool hwp_boost __read_mostly; @@ -1632,11 +1632,10 @@ static cpumask_t hwp_intr_enable_mask; void notify_hwp_interrupt(void) { unsigned int this_cpu = smp_processor_id(); - struct cpudata *cpudata; unsigned long flags; u64 value; - if (!READ_ONCE(hwp_active) || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) + if (!hwp_active || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) return; rdmsrl_safe(MSR_HWP_STATUS, &value); @@ -1648,24 +1647,8 @@ void notify_hwp_interrupt(void) if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask)) goto ack_intr; - /* - * Currently we never free all_cpu_data. And we can't reach here - * without this allocated. But for safety for future changes, added - * check. - */ - if (unlikely(!READ_ONCE(all_cpu_data))) - goto ack_intr; - - /* - * The free is done during cleanup, when cpufreq registry is failed. - * We wouldn't be here if it fails on init or switch status. But for - * future changes, added check. - */ - cpudata = READ_ONCE(all_cpu_data[this_cpu]); - if (unlikely(!cpudata)) - goto ack_intr; - - schedule_delayed_work(&cpudata->hwp_notify_work, msecs_to_jiffies(10)); + schedule_delayed_work(&all_cpu_data[this_cpu]->hwp_notify_work, + msecs_to_jiffies(10)); raw_spin_unlock_irqrestore(&hwp_notify_lock, flags); @@ -3454,7 +3437,7 @@ static int __init intel_pstate_init(void) * deal with it. */ if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) { - WRITE_ONCE(hwp_active, 1); + hwp_active = true; hwp_mode_bdw = id->driver_data; intel_pstate.attr = hwp_cpufreq_attrs; intel_cpufreq.attr = hwp_cpufreq_attrs; -- Gitee From 063531dee8285e7c3f273b5b9a64253362e7fee7 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 21 Mar 2024 20:34:06 +0100 Subject: [PATCH 1808/2138] cpufreq: intel_pstate: Use __ro_after_init for three variables ANBZ: #12074 commit e97a98238da68aea4a0be0b2cc40e39527c880b1 upstream. There are at least 3 variables in intel_pstate that do not get updated after they have been initialized, so annotate them with __ro_after_init. Intel-SIG: commit e97a98238da6 cpufreq: intel_pstate: Use __ro_after_init for three variables. Backport intel_pstate driver update for 6.6 from 6.11 Signed-off-by: Rafael J. Wysocki [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4164 --- drivers/cpufreq/intel_pstate.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 069ae03cb617..88387ad9630d 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -293,10 +293,10 @@ struct pstate_funcs { static struct pstate_funcs pstate_funcs __read_mostly; static bool hwp_active __ro_after_init; -static int hwp_mode_bdw __read_mostly; -static bool per_cpu_limits __read_mostly; +static int hwp_mode_bdw __ro_after_init; +static bool per_cpu_limits __ro_after_init; +static bool hwp_forced __ro_after_init; static bool hwp_boost __read_mostly; -static bool hwp_forced __read_mostly; static struct cpufreq_driver *intel_pstate_driver __read_mostly; -- Gitee From 894fa315cb76f40f22eba76e2a59edecb64ffccf Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 25 Mar 2024 18:01:58 +0100 Subject: [PATCH 1809/2138] cpufreq: intel_pstate: Fold intel_pstate_max_within_limits() into caller ANBZ: #12074 commit 032c5565eb80edb6f2faeb31939540c897987119 upstream. Fold intel_pstate_max_within_limits() into its only caller. No functional impact. Intel-SIG: commit 032c5565eb80 cpufreq: intel_pstate: Fold intel_pstate_max_within_limits() into caller. Backport intel_pstate driver update for 6.6 from 6.11 Signed-off-by: Rafael J. Wysocki Acked-by: Srinivas Pandruvada [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4164 --- drivers/cpufreq/intel_pstate.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 88387ad9630d..867502a7ce02 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -2009,14 +2009,6 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu) intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); } -static void intel_pstate_max_within_limits(struct cpudata *cpu) -{ - int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio); - - update_turbo_state(); - intel_pstate_set_pstate(cpu, pstate); -} - static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) { int perf_ctl_max_phys = pstate_funcs.get_max_physical(cpu->cpu); @@ -2591,12 +2583,15 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) intel_pstate_update_perf_limits(cpu, policy->min, policy->max); if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { + int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio); + /* * NOHZ_FULL CPUs need this as the governor callback may not * be invoked on them. */ intel_pstate_clear_update_util_hook(policy->cpu); - intel_pstate_max_within_limits(cpu); + update_turbo_state(); + intel_pstate_set_pstate(cpu, pstate); } else { intel_pstate_set_update_util_hook(policy->cpu); } -- Gitee From bf403f21157d164c314ef07633b2b4d28f6e7676 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 25 Mar 2024 18:02:42 +0100 Subject: [PATCH 1810/2138] cpufreq: intel_pstate: Do not update global.turbo_disabled after initialization ANBZ: #12074 commit 0940f1a8011fd69be5082015068e0dc31c800c20 upstream. The global.turbo_disabled is updated quite often, especially in the passive mode in which case it is updated every time the scheduler calls into the driver. However, this is generally not necessary and it adds MSR read overhead to scheduler code paths (and that particular MSR is slow to read). For this reason, make the driver read MSR_IA32_MISC_ENABLE_TURBO_DISABLE just once at the cpufreq driver registration time and remove all of the in-flight updates of global.turbo_disabled. Intel-SIG: commit 0940f1a8011f cpufreq: intel_pstate: Do not update global.turbo_disabled. Backport intel_pstate driver update for 6.6 from 6.11 Signed-off-by: Rafael J. Wysocki Acked-by: Srinivas Pandruvada [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4164 --- drivers/cpufreq/intel_pstate.c | 51 ++++++---------------------------- 1 file changed, 8 insertions(+), 43 deletions(-) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 867502a7ce02..810b4a56dbd3 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -173,7 +173,6 @@ struct vid_data { * based on the MSR_IA32_MISC_ENABLE value and whether or * not the maximum reported turbo P-state is different from * the maximum reported non-turbo one. - * @turbo_disabled_mf: The @turbo_disabled value reflected by cpuinfo.max_freq. * @min_perf_pct: Minimum capacity limit in percent of the maximum turbo * P-state capacity. * @max_perf_pct: Maximum capacity limit in percent of the maximum turbo @@ -182,7 +181,6 @@ struct vid_data { struct global_params { bool no_turbo; bool turbo_disabled; - bool turbo_disabled_mf; int max_perf_pct; int min_perf_pct; }; @@ -590,12 +588,13 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu) cpu->pstate.min_pstate = intel_pstate_freq_to_hwp(cpu, freq); } -static inline void update_turbo_state(void) +static bool turbo_is_disabled(void) { u64 misc_en; rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); - global.turbo_disabled = misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE; + + return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); } static int min_perf_pct_min(void) @@ -1150,40 +1149,16 @@ static void intel_pstate_update_policies(void) static void __intel_pstate_update_max_freq(struct cpudata *cpudata, struct cpufreq_policy *policy) { - policy->cpuinfo.max_freq = global.turbo_disabled_mf ? + policy->cpuinfo.max_freq = global.turbo_disabled ? cpudata->pstate.max_freq : cpudata->pstate.turbo_freq; refresh_frequency_limits(policy); } -static void intel_pstate_update_max_freq(unsigned int cpu) -{ - struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); - - if (!policy) - return; - - __intel_pstate_update_max_freq(all_cpu_data[cpu], policy); - - cpufreq_cpu_release(policy); -} - static void intel_pstate_update_limits(unsigned int cpu) { mutex_lock(&intel_pstate_driver_lock); - update_turbo_state(); - /* - * If turbo has been turned on or off globally, policy limits for - * all CPUs need to be updated to reflect that. - */ - if (global.turbo_disabled_mf != global.turbo_disabled) { - global.turbo_disabled_mf = global.turbo_disabled; - arch_set_max_freq_ratio(global.turbo_disabled); - for_each_possible_cpu(cpu) - intel_pstate_update_max_freq(cpu); - } else { - cpufreq_update_policy(cpu); - } + cpufreq_update_policy(cpu); mutex_unlock(&intel_pstate_driver_lock); } @@ -1283,7 +1258,6 @@ static ssize_t show_no_turbo(struct kobject *kobj, return -EAGAIN; } - update_turbo_state(); if (global.turbo_disabled) ret = sprintf(buf, "%u\n", global.turbo_disabled); else @@ -1313,7 +1287,6 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, mutex_lock(&intel_pstate_limits_lock); - update_turbo_state(); if (global.turbo_disabled) { pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n"); mutex_unlock(&intel_pstate_limits_lock); @@ -2278,8 +2251,6 @@ static void intel_pstate_adjust_pstate(struct cpudata *cpu) struct sample *sample; int target_pstate; - update_turbo_state(); - target_pstate = get_target_pstate(cpu); target_pstate = intel_pstate_prepare_request(cpu, target_pstate); trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu); @@ -2590,7 +2561,6 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) * be invoked on them. */ intel_pstate_clear_update_util_hook(policy->cpu); - update_turbo_state(); intel_pstate_set_pstate(cpu, pstate); } else { intel_pstate_set_update_util_hook(policy->cpu); @@ -2634,7 +2604,6 @@ static void intel_pstate_verify_cpu_policy(struct cpudata *cpu, { int max_freq; - update_turbo_state(); if (hwp_active) { intel_pstate_get_hwp_cap(cpu); max_freq = global.no_turbo || global.turbo_disabled ? @@ -2731,8 +2700,6 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) /* cpuinfo and default policy values */ policy->cpuinfo.min_freq = cpu->pstate.min_freq; - update_turbo_state(); - global.turbo_disabled_mf = global.turbo_disabled; policy->cpuinfo.max_freq = global.turbo_disabled ? cpu->pstate.max_freq : cpu->pstate.turbo_freq; @@ -2898,8 +2865,6 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy, struct cpufreq_freqs freqs; int target_pstate; - update_turbo_state(); - freqs.old = policy->cur; freqs.new = target_freq; @@ -2921,8 +2886,6 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, struct cpudata *cpu = all_cpu_data[policy->cpu]; int target_pstate; - update_turbo_state(); - target_pstate = intel_pstate_freq_to_hwp(cpu, target_freq); target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true); @@ -2940,7 +2903,6 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum, int old_pstate = cpu->pstate.current_pstate; int cap_pstate, min_pstate, max_pstate, target_pstate; - update_turbo_state(); cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) : HWP_HIGHEST_PERF(hwp_cap); @@ -3128,6 +3090,9 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver) memset(&global, 0, sizeof(global)); global.max_perf_pct = 100; + global.turbo_disabled = turbo_is_disabled(); + + arch_set_max_freq_ratio(global.turbo_disabled); intel_pstate_driver = driver; ret = cpufreq_register_driver(intel_pstate_driver); -- Gitee From feafbf59dcaf732040f3bdfc4cd3d5722c53ab01 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 25 Mar 2024 18:03:25 +0100 Subject: [PATCH 1811/2138] cpufreq: intel_pstate: Rearrange show_no_turbo() and store_no_turbo() ANBZ: #12074 commit c626a438452079824139f97137f17af47b1a8989 upstream. Now that global.turbo_disabled can only change at the cpufreq driver registration time, initialize global.no_turbo at that time too so they are in sync to start with (if the former is set, the latter cannot be updated later anyway). That allows show_no_turbo() to be simlified because it does not need to check global.turbo_disabled and store_no_turbo() can be rearranged to avoid doing anything if the new value of global.no_turbo is equal to the current one and only return an error on attempts to clear global.no_turbo when global.turbo_disabled. While at it, eliminate the redundant ret variable from store_no_turbo(). No intentional functional impact. Intel-SIG: commit c626a4384520 cpufreq: intel_pstate: Rearrange show_no_turbo() and store_no_turbo(). Backport intel_pstate driver update for 6.6 from 6.11 Signed-off-by: Rafael J. Wysocki Acked-by: Srinivas Pandruvada [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4164 --- drivers/cpufreq/intel_pstate.c | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 810b4a56dbd3..140df2f68b72 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1258,10 +1258,7 @@ static ssize_t show_no_turbo(struct kobject *kobj, return -EAGAIN; } - if (global.turbo_disabled) - ret = sprintf(buf, "%u\n", global.turbo_disabled); - else - ret = sprintf(buf, "%u\n", global.no_turbo); + ret = sprintf(buf, "%u\n", global.no_turbo); mutex_unlock(&intel_pstate_driver_lock); @@ -1272,31 +1269,34 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, const char *buf, size_t count) { unsigned int input; - int ret; + bool no_turbo; - ret = sscanf(buf, "%u", &input); - if (ret != 1) + if (sscanf(buf, "%u", &input) != 1) return -EINVAL; mutex_lock(&intel_pstate_driver_lock); if (!intel_pstate_driver) { - mutex_unlock(&intel_pstate_driver_lock); - return -EAGAIN; + count = -EAGAIN; + goto unlock_driver; } - mutex_lock(&intel_pstate_limits_lock); + no_turbo = !!clamp_t(int, input, 0, 1); + + if (no_turbo == global.no_turbo) + goto unlock_driver; if (global.turbo_disabled) { pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n"); - mutex_unlock(&intel_pstate_limits_lock); - mutex_unlock(&intel_pstate_driver_lock); - return -EPERM; + count = -EPERM; + goto unlock_driver; } - global.no_turbo = clamp_t(int, input, 0, 1); + global.no_turbo = no_turbo; + + mutex_lock(&intel_pstate_limits_lock); - if (global.no_turbo) { + if (no_turbo) { struct cpudata *cpu = all_cpu_data[0]; int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate; @@ -1308,8 +1308,9 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, mutex_unlock(&intel_pstate_limits_lock); intel_pstate_update_policies(); - arch_set_max_freq_ratio(global.no_turbo); + arch_set_max_freq_ratio(no_turbo); +unlock_driver: mutex_unlock(&intel_pstate_driver_lock); return count; @@ -3091,6 +3092,7 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver) memset(&global, 0, sizeof(global)); global.max_perf_pct = 100; global.turbo_disabled = turbo_is_disabled(); + global.no_turbo = global.turbo_disabled; arch_set_max_freq_ratio(global.turbo_disabled); -- Gitee From eef3be26ea775087de40baf733dc549441834884 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 25 Mar 2024 18:04:24 +0100 Subject: [PATCH 1812/2138] cpufreq: intel_pstate: Read global.no_turbo under READ_ONCE() ANBZ: #12074 commit 9558fae8ce97b3b320b387dd7c88309df2c36d4d upstream. Because global.no_turbo is generally not read under intel_pstate_driver_lock make store_no_turbo() use WRITE_ONCE() for updating it (this is the only place at which it is updated except for the initialization) and make the majority of places reading it use READ_ONCE(). Also remove redundant global.turbo_disabled checks from places that depend on the 'true' value of global.no_turbo because it can only be 'true' if global.turbo_disabled is also 'true'. Intel-SIG: commit 9558fae8ce97 cpufreq: intel_pstate: Read global.no_turbo under READ_ONCE(). Backport intel_pstate driver update for 6.6 from 6.11 Signed-off-by: Rafael J. Wysocki Acked-by: Srinivas Pandruvada [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4164 --- drivers/cpufreq/intel_pstate.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 140df2f68b72..7ab1728f3174 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1292,7 +1292,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, goto unlock_driver; } - global.no_turbo = no_turbo; + WRITE_ONCE(global.no_turbo, no_turbo); mutex_lock(&intel_pstate_limits_lock); @@ -1745,7 +1745,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate) u32 vid; val = (u64)pstate << 8; - if (global.no_turbo && !global.turbo_disabled) + if (READ_ONCE(global.no_turbo) && !global.turbo_disabled) val |= (u64)1 << 32; vid_fp = cpudata->vid.min + mul_fp( @@ -1910,7 +1910,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate) u64 val; val = (u64)pstate << 8; - if (global.no_turbo && !global.turbo_disabled) + if (READ_ONCE(global.no_turbo) && !global.turbo_disabled) val |= (u64)1 << 32; return val; @@ -2208,7 +2208,7 @@ static inline int32_t get_target_pstate(struct cpudata *cpu) sample->busy_scaled = busy_frac * 100; - target = global.no_turbo || global.turbo_disabled ? + target = READ_ONCE(global.no_turbo) ? cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; target += target >> 2; target = mul_fp(target, busy_frac); @@ -2470,7 +2470,7 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu) static int intel_pstate_get_max_freq(struct cpudata *cpu) { - return global.turbo_disabled || global.no_turbo ? + return READ_ONCE(global.no_turbo) ? cpu->pstate.max_freq : cpu->pstate.turbo_freq; } @@ -2607,7 +2607,7 @@ static void intel_pstate_verify_cpu_policy(struct cpudata *cpu, if (hwp_active) { intel_pstate_get_hwp_cap(cpu); - max_freq = global.no_turbo || global.turbo_disabled ? + max_freq = READ_ONCE(global.no_turbo) ? cpu->pstate.max_freq : cpu->pstate.turbo_freq; } else { max_freq = intel_pstate_get_max_freq(cpu); -- Gitee From 2bc9ef04984c2d6237501204bece214f90d53c6b Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 25 Mar 2024 18:05:06 +0100 Subject: [PATCH 1813/2138] cpufreq: intel_pstate: Replace three global.turbo_disabled checks ANBZ: #12074 commit f32587dcbe5f40e160d8de262add6abab79356a7 upstream. Replace the global.turbo_disabled in __intel_pstate_update_max_freq() with a global.no_turbo one to make store_no_turbo() actually update the maximum CPU frequency on the trubo preference changes, which needs to be consistent with arch_set_max_freq_ratio() called from there. For more consistency, replace the global.turbo_disabled checks in __intel_pstate_cpu_init() and intel_cpufreq_adjust_perf() with global.no_turbo checks either. Intel-SIG: commit f32587dcbe5f cpufreq: intel_pstate: Replace three global.turbo_disabled checks. Backport intel_pstate driver update for 6.6 from 6.11 Signed-off-by: Rafael J. Wysocki Acked-by: Srinivas Pandruvada [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4164 --- drivers/cpufreq/intel_pstate.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 7ab1728f3174..2641c7b91791 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1149,7 +1149,7 @@ static void intel_pstate_update_policies(void) static void __intel_pstate_update_max_freq(struct cpudata *cpudata, struct cpufreq_policy *policy) { - policy->cpuinfo.max_freq = global.turbo_disabled ? + policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ? cpudata->pstate.max_freq : cpudata->pstate.turbo_freq; refresh_frequency_limits(policy); } @@ -2701,7 +2701,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) /* cpuinfo and default policy values */ policy->cpuinfo.min_freq = cpu->pstate.min_freq; - policy->cpuinfo.max_freq = global.turbo_disabled ? + policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ? cpu->pstate.max_freq : cpu->pstate.turbo_freq; policy->min = policy->cpuinfo.min_freq; @@ -2904,8 +2904,9 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum, int old_pstate = cpu->pstate.current_pstate; int cap_pstate, min_pstate, max_pstate, target_pstate; - cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) : - HWP_HIGHEST_PERF(hwp_cap); + cap_pstate = READ_ONCE(global.no_turbo) ? + HWP_GUARANTEED_PERF(hwp_cap) : + HWP_HIGHEST_PERF(hwp_cap); /* Optimization: Avoid unnecessary divisions. */ -- Gitee From bedb45b85d71424de28b6cddce5ab62a76b409a8 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 28 Mar 2024 19:52:45 +0100 Subject: [PATCH 1814/2138] cpufreq: intel_pstate: Update the maximum CPU frequency consistently ANBZ: #12074 commit e8217b4bece379e66d43ab5070431712f07bf625 upstream. There are 3 places at which the maximum CPU frequency may change, store_no_turbo(), intel_pstate_update_limits() (when called by the cpufreq core) and intel_pstate_notify_work() (when handling a HWP change notification). Currently, cpuinfo.max_freq is only updated by store_no_turbo() and intel_pstate_notify_work(), although it principle it may be necessary to update it in intel_pstate_update_limits() either. Make all of them mutually consistent. Intel-SIG: commit e8217b4bece3 cpufreq: intel_pstate: Update the maximum CPU frequency consistently. Backport intel_pstate driver update for 6.6 from 6.11 Signed-off-by: Rafael J. Wysocki Acked-by: Srinivas Pandruvada [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4164 --- drivers/cpufreq/intel_pstate.c | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 2641c7b91791..6d8ea6117da9 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1149,18 +1149,32 @@ static void intel_pstate_update_policies(void) static void __intel_pstate_update_max_freq(struct cpudata *cpudata, struct cpufreq_policy *policy) { + intel_pstate_get_hwp_cap(cpudata); + policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ? cpudata->pstate.max_freq : cpudata->pstate.turbo_freq; + refresh_frequency_limits(policy); } static void intel_pstate_update_limits(unsigned int cpu) { - mutex_lock(&intel_pstate_driver_lock); + struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu); - cpufreq_update_policy(cpu); + if (!policy) + return; - mutex_unlock(&intel_pstate_driver_lock); + __intel_pstate_update_max_freq(all_cpu_data[cpu], policy); + + cpufreq_cpu_release(policy); +} + +static void intel_pstate_update_limits_for_all(void) +{ + int cpu; + + for_each_possible_cpu(cpu) + intel_pstate_update_limits(cpu); } /************************** sysfs begin ************************/ @@ -1307,7 +1321,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, mutex_unlock(&intel_pstate_limits_lock); - intel_pstate_update_policies(); + intel_pstate_update_limits_for_all(); arch_set_max_freq_ratio(no_turbo); unlock_driver: @@ -1591,7 +1605,6 @@ static void intel_pstate_notify_work(struct work_struct *work) struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpudata->cpu); if (policy) { - intel_pstate_get_hwp_cap(cpudata); __intel_pstate_update_max_freq(cpudata, policy); cpufreq_cpu_release(policy); -- Gitee From 47da40b887ff0e6170ea844e3ce1367ea8168c62 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 3 Apr 2024 10:06:45 +0200 Subject: [PATCH 1815/2138] cpufreq: intel_pstate: hide unused intel_pstate_cpu_oob_ids[] ANBZ: #12074 commit 8c556541a53848d6611ff8b5f9bf52e96c56f48e upstream. The reference to this variable is hidden in an #ifdef: drivers/cpufreq/intel_pstate.c:2440:32: error: 'intel_pstate_cpu_oob_ids' defined but not used [-Werror=unused-const-variable=] Use the same check around the definition. Intel-SIG: commit 8c556541a538 cpufreq: intel_pstate: hide unused intel_pstate_cpu_oob_ids[]. Backport intel_pstate driver update for 6.6 from 6.11 Signed-off-by: Arnd Bergmann Signed-off-by: Rafael J. Wysocki [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4164 --- drivers/cpufreq/intel_pstate.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 6d8ea6117da9..ef37d509715d 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -2394,6 +2394,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { }; MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); +#ifdef CONFIG_ACPI static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { X86_MATCH(BROADWELL_D, core_funcs), X86_MATCH(BROADWELL_X, core_funcs), @@ -2402,6 +2403,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { X86_MATCH(SAPPHIRERAPIDS_X, core_funcs), {} }; +#endif static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = { X86_MATCH(KABYLAKE, core_funcs), -- Gitee From 875bfb2d34fc6b75f84f001b1862085d42520140 Mon Sep 17 00:00:00 2001 From: Jeff Johnson Date: Sun, 5 May 2024 12:07:12 -0700 Subject: [PATCH 1816/2138] cpufreq: intel_pstate: fix struct cpudata::epp_cached kernel-doc ANBZ: #12074 commit 0a206fe35d360a9ec1c8b1609ca394c2759a8962 upstream. make C=1 currently gives the following warning: drivers/cpufreq/intel_pstate.c:262: warning: Function parameter or struct member 'epp_cached' not described in 'cpudata' Add the missing ":" to fix the trivial kernel-doc syntax error. Intel-SIG: commit 0a206fe35d36 cpufreq: intel_pstate: fix struct cpudata::epp_cached kernel-doc. Backport intel_pstate driver update for 6.6 from 6.11 Signed-off-by: Jeff Johnson Signed-off-by: Rafael J. Wysocki [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4164 --- drivers/cpufreq/intel_pstate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index ef37d509715d..23a4539acec5 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -211,7 +211,7 @@ struct global_params { * @epp_policy: Last saved policy used to set EPP/EPB * @epp_default: Power on default HWP energy performance * preference/bias - * @epp_cached Cached HWP energy-performance preference value + * @epp_cached: Cached HWP energy-performance preference value * @hwp_req_cached: Cached value of the last HWP Request MSR * @hwp_cap_cached: Cached value of the last HWP Capabilities MSR * @last_io_update: Last time when IO wake flag was set -- Gitee From 8b61394bd3199c1d73c11d191958aabd55a710f5 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Fri, 31 May 2024 16:00:04 -0700 Subject: [PATCH 1817/2138] cpufreq: intel_pstate: Fix unchecked HWP MSR access ANBZ: #12074 commit 1e24c31351787e24b7eebe84866bd55fd62a0aef upstream. Fix unchecked MSR access error for processors with no HWP support. On such processors, maximum frequency can be changed by the system firmware using ACPI event ACPI_PROCESSOR_NOTIFY_HIGEST_PERF_CHANGED. This results in accessing HWP MSR 0x771. Call Trace: generic_exec_single+0x58/0x120 smp_call_function_single+0xbf/0x110 rdmsrl_on_cpu+0x46/0x60 intel_pstate_get_hwp_cap+0x1b/0x70 intel_pstate_update_limits+0x2a/0x60 acpi_processor_notify+0xb7/0x140 acpi_ev_notify_dispatch+0x3b/0x60 HWP MSR 0x771 can be only read on a CPU which supports HWP and enabled. Hence intel_pstate_get_hwp_cap() can only be called when hwp_active is true. Intel-SIG: commit 1e24c3135178 cpufreq: intel_pstate: Fix unchecked HWP MSR access. Backport intel_pstate driver update for 6.6 from 6.11 Reported-by: Sebastian Andrzej Siewior Closes: https://lore.kernel.org/linux-pm/20240529155740.Hq2Hw7be@linutronix.de/ Fixes: e8217b4bece3 ("cpufreq: intel_pstate: Update the maximum CPU frequency consistently") Tested-by: Sebastian Andrzej Siewior Signed-off-by: Srinivas Pandruvada Signed-off-by: Rafael J. Wysocki [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4164 --- drivers/cpufreq/intel_pstate.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 23a4539acec5..dbef23938fc6 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1149,7 +1149,8 @@ static void intel_pstate_update_policies(void) static void __intel_pstate_update_max_freq(struct cpudata *cpudata, struct cpufreq_policy *policy) { - intel_pstate_get_hwp_cap(cpudata); + if (hwp_active) + intel_pstate_get_hwp_cap(cpudata); policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ? cpudata->pstate.max_freq : cpudata->pstate.turbo_freq; -- Gitee From 1ebda378973cdc160a0934252941129a0c8075c9 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 11 Jun 2024 16:53:06 +0200 Subject: [PATCH 1818/2138] cpufreq: intel_pstate: Check turbo_is_disabled() in store_no_turbo() ANBZ: #12074 commit 350cbb5d2f676bff22c49e5e81764c3b8da342a9 upstream. After recent changes in intel_pstate, global.turbo_disabled is only set at the initialization time and never changed. However, it turns out that on some systems the "turbo disabled" bit in MSR_IA32_MISC_ENABLE, the initial state of which is reflected by global.turbo_disabled, can be flipped later and there should be a way to take that into account (other than checking that MSR every time the driver runs which is costly and useless overhead on the vast majority of systems). For this purpose, notice that before the changes in question, store_no_turbo() contained a turbo_is_disabled() check that was used for updating global.turbo_disabled if the "turbo disabled" bit in MSR_IA32_MISC_ENABLE had been flipped and that functionality can be restored. Then, users will be able to reset global.turbo_disabled by writing 0 to no_turbo which used to work before on systems with flipping "turbo disabled" bit. This guarantees the driver state to remain in sync, but READ_ONCE() annotations need to be added in two places where global.turbo_disabled is accessed locklessly, so modify the driver to make that happen. Intel-SIG: commit 350cbb5d2f67 cpufreq: intel_pstate: Check turbo_is_disabled() in store_no_turbo(). Backport intel_pstate driver update for 6.6 from 6.11 Fixes: 0940f1a8011f ("cpufreq: intel_pstate: Do not update global.turbo_disabled after initialization") Closes: https://lore.kernel.org/linux-pm/bf3ebf1571a4788e97daf861eb493c12d42639a3.camel@xry111.site Suggested-by: Srinivas Pandruvada Reported-by: Xi Ruoyao Tested-by: Xi Ruoyao Signed-off-by: Rafael J. Wysocki [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4164 --- drivers/cpufreq/intel_pstate.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index dbef23938fc6..c81719ab298e 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1298,12 +1298,17 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, no_turbo = !!clamp_t(int, input, 0, 1); - if (no_turbo == global.no_turbo) - goto unlock_driver; - - if (global.turbo_disabled) { - pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n"); + WRITE_ONCE(global.turbo_disabled, turbo_is_disabled()); + if (global.turbo_disabled && !no_turbo) { + pr_notice("Turbo disabled by BIOS or unavailable on processor\n"); count = -EPERM; + if (global.no_turbo) + goto unlock_driver; + else + no_turbo = 1; + } + + if (no_turbo == global.no_turbo) { goto unlock_driver; } @@ -1759,7 +1764,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate) u32 vid; val = (u64)pstate << 8; - if (READ_ONCE(global.no_turbo) && !global.turbo_disabled) + if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled)) val |= (u64)1 << 32; vid_fp = cpudata->vid.min + mul_fp( @@ -1924,7 +1929,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate) u64 val; val = (u64)pstate << 8; - if (READ_ONCE(global.no_turbo) && !global.turbo_disabled) + if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled)) val |= (u64)1 << 32; return val; -- Gitee From b22a0ec98030a7ecf14f9d98d187e59d1a435fd8 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Mon, 24 Jun 2024 09:27:14 -0700 Subject: [PATCH 1819/2138] cpufreq: intel_pstate: Replace boot_cpu_has() ANBZ: #12074 commit acfc429e42f09524653af52998548cd9317892a6 upstream. Replace boot_cpu_has() with cpu_feature_enabled(). Intel-SIG: commit acfc429e42f0 cpufreq: intel_pstate: Replace boot_cpu_has(). Backport intel_pstate driver update for 6.6 from 6.11 Signed-off-by: Srinivas Pandruvada Link: https://patch.msgid.link/20240624162714.1431182-1-srinivas.pandruvada@linux.intel.com Signed-off-by: Rafael J. Wysocki [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4164 --- drivers/cpufreq/intel_pstate.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index c81719ab298e..feb97b3bf30c 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1628,7 +1628,7 @@ void notify_hwp_interrupt(void) unsigned long flags; u64 value; - if (!hwp_active || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) + if (!hwp_active || !cpu_feature_enabled(X86_FEATURE_HWP_NOTIFY)) return; rdmsrl_safe(MSR_HWP_STATUS, &value); @@ -1656,7 +1656,7 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) { bool cancel_work; - if (!boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) + if (!cpu_feature_enabled(X86_FEATURE_HWP_NOTIFY)) return; /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ -- Gitee From d84ef10af77ca4be25c1b22b67399b18f7582380 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Mon, 24 Jun 2024 09:11:08 -0700 Subject: [PATCH 1820/2138] x86/cpufeatures: Add HWP highest perf change feature flag ANBZ: #12074 commit 7ea81936b85317aee8a73cd35d7f9cd6ce654dee upstream. When CPUID[6].EAX[15] is set to 1, this CPU supports notification for HWP (Hardware P-states) highest performance change. Add a feature flag to check if the CPU supports HWP highest performance change. Intel-SIG: commit 7ea81936b853 x86/cpufeatures: Add HWP highest perf change feature flag. Backport intel_pstate driver update for 6.6 from 6.11 Signed-off-by: Srinivas Pandruvada Link: https://patch.msgid.link/20240624161109.1427640-2-srinivas.pandruvada@linux.intel.com Signed-off-by: Rafael J. Wysocki [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4164 --- arch/x86/include/asm/cpufeatures.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index f6d3ab0bed37..0f174149a845 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -381,6 +381,7 @@ #define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */ #define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */ #define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */ +#define X86_FEATURE_HWP_HIGHEST_PERF_CHANGE (14*32+15) /* "" HWP Highest perf change */ #define X86_FEATURE_HFI (14*32+19) /* Hardware Feedback Interface */ /* AMD SVM Feature Identification, CPUID level 0x8000000a (EDX), word 15 */ -- Gitee From e94ed7c0ea73a3c8569207471f974ccd5eac9b34 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Mon, 24 Jun 2024 09:11:09 -0700 Subject: [PATCH 1821/2138] cpufreq: intel_pstate: Support highest performance change interrupt ANBZ: #12074 commit d845cd901b28f1b6c02a208b864fc3fc46d14536 upstream. On some systems, the HWP (Hardware P-states) highest performance level can change from the value set at boot-up. This behavior can lead to two issues: - The 'cpuinfo_max_freq' within the 'cpufreq' sysfs will not reflect the CPU's highest achievable performance. - Even if the CPU's highest performance level is increased after booting, the CPU may not reach the full expected performance. The availability of this feature is indicated by the CPUID instruction: if CPUID[6].EAX[15] is set to 1, the feature is supported. When supported, setting bit 2 of the MSR_HWP_INTERRUPT register enables notifications of the highest performance level changes. Therefore, as part of enabling the HWP interrupt, bit 2 of the MSR_HWP_INTERRUPT should also be set when this feature is supported. Upon a change in the highest performance level, a new HWP interrupt is generated, with bit 3 of the MSR_HWP_STATUS register set, and the MSR_HWP_CAPABILITIES register is updated with the new highest performance limit. The processing of the interrupt is the same as the guaranteed performance change. Notify change to cpufreq core and update MSR_HWP_REQUEST with new performance limits. The current driver implementation already takes care of the highest performance change as part of: commit dfeeedc1bf57 ("cpufreq: intel_pstate: Update cpuinfo.max_freq on HWP_CAP changes") For example: Before highest performance change interrupt: cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq 3700000 cat /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq 3700000 After highest performance changes interrupt: cat /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq 3900000 cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq 3900000 Intel-SIG: commit d845cd901b28 cpufreq: intel_pstate: Support highest performance change interrupt. Backport intel_pstate driver update for 6.6 from 6.11 Signed-off-by: Srinivas Pandruvada Link: https://patch.msgid.link/20240624161109.1427640-3-srinivas.pandruvada@linux.intel.com Signed-off-by: Rafael J. Wysocki [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4164 --- drivers/cpufreq/intel_pstate.c | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index feb97b3bf30c..471725fe0211 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1622,17 +1622,24 @@ static void intel_pstate_notify_work(struct work_struct *work) static DEFINE_RAW_SPINLOCK(hwp_notify_lock); static cpumask_t hwp_intr_enable_mask; +#define HWP_GUARANTEED_PERF_CHANGE_STATUS BIT(0) +#define HWP_HIGHEST_PERF_CHANGE_STATUS BIT(3) + void notify_hwp_interrupt(void) { unsigned int this_cpu = smp_processor_id(); + u64 value, status_mask; unsigned long flags; - u64 value; if (!hwp_active || !cpu_feature_enabled(X86_FEATURE_HWP_NOTIFY)) return; + status_mask = HWP_GUARANTEED_PERF_CHANGE_STATUS; + if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE)) + status_mask |= HWP_HIGHEST_PERF_CHANGE_STATUS; + rdmsrl_safe(MSR_HWP_STATUS, &value); - if (!(value & 0x01)) + if (!(value & status_mask)) return; raw_spin_lock_irqsave(&hwp_notify_lock, flags); @@ -1670,18 +1677,25 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata) cancel_delayed_work_sync(&cpudata->hwp_notify_work); } +#define HWP_GUARANTEED_PERF_CHANGE_REQ BIT(0) +#define HWP_HIGHEST_PERF_CHANGE_REQ BIT(2) + static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata) { - /* Enable HWP notification interrupt for guaranteed performance change */ + /* Enable HWP notification interrupt for performance change */ if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) { + u64 interrupt_mask = HWP_GUARANTEED_PERF_CHANGE_REQ; raw_spin_lock_irq(&hwp_notify_lock); INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work); cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask); raw_spin_unlock_irq(&hwp_notify_lock); + if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE)) + interrupt_mask |= HWP_HIGHEST_PERF_CHANGE_REQ; + /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */ - wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01); + wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, interrupt_mask); wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0); } } -- Gitee From 859162a86ef6d12fe543650b9d177d7be04e6749 Mon Sep 17 00:00:00 2001 From: Pedro Henrique Kopper Date: Thu, 1 Aug 2024 13:41:50 -0300 Subject: [PATCH 1822/2138] cpufreq: intel_pstate: Update Balance performance EPP for Emerald Rapids ANBZ: #12074 commit 64a66f4a3c89b4602ee1e6cd23b28729fc4562b3 upstream. On Intel Emerald Rapids machines, we ship the Energy Performance Preference (EPP) default for balance_performance as 128. However, during an internal investigation together with Intel, we have determined that 32 is a more suitable value. This leads to significant improvements in both performance and energy: POV-Ray: 32% faster | 12% less energy OpenSSL: 12% faster | energy within 1% Build Linux Kernel: 29% faster | 18% less energy Therefore, we should move the default EPP for balance_performance to 32. This is in line with what has already been done for Sapphire Rapids. Intel-SIG: commit 64a66f4a3c89 cpufreq: intel_pstate: Update Balance performance EPP for Emerald Rapids. Backport intel_pstate driver update for 6.6 from 6.11 Signed-off-by: Pedro Henrique Kopper Acked-by: Srinivas Pandruvada Link: https://patch.msgid.link/Zqu6zjVMoiXwROBI@capivara Signed-off-by: Rafael J. Wysocki [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4164 --- drivers/cpufreq/intel_pstate.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 471725fe0211..08689264b54a 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -3404,6 +3404,7 @@ static const struct x86_cpu_id intel_epp_default[] = { */ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, HWP_SET_DEF_BALANCE_PERF_EPP(102)), X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)), + X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)), {} }; -- Gitee From a098e5a6c6acd0354ace1d930b4058d82a3ebf48 Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Sat, 31 Aug 2024 17:37:49 +0800 Subject: [PATCH 1823/2138] virtiofs: use pages instead of pointer for kernel direct IO ANBZ: #12158 commit 41748675c0bf252b3c5f600a95830f0936d366c1 upstream. When trying to insert a 10MB kernel module kept in a virtio-fs with cache disabled, the following warning was reported: ------------[ cut here ]------------ WARNING: CPU: 1 PID: 404 at mm/page_alloc.c:4551 ...... Modules linked in: CPU: 1 PID: 404 Comm: insmod Not tainted 6.9.0-rc5+ #123 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996) ...... RIP: 0010:__alloc_pages+0x2bf/0x380 ...... Call Trace: ? __warn+0x8e/0x150 ? __alloc_pages+0x2bf/0x380 __kmalloc_large_node+0x86/0x160 __kmalloc+0x33c/0x480 virtio_fs_enqueue_req+0x240/0x6d0 virtio_fs_wake_pending_and_unlock+0x7f/0x190 queue_request_and_unlock+0x55/0x60 fuse_simple_request+0x152/0x2b0 fuse_direct_io+0x5d2/0x8c0 fuse_file_read_iter+0x121/0x160 __kernel_read+0x151/0x2d0 kernel_read+0x45/0x50 kernel_read_file+0x1a9/0x2a0 init_module_from_file+0x6a/0xe0 idempotent_init_module+0x175/0x230 __x64_sys_finit_module+0x5d/0xb0 x64_sys_call+0x1c3/0x9e0 do_syscall_64+0x3d/0xc0 entry_SYSCALL_64_after_hwframe+0x4b/0x53 ...... ---[ end trace 0000000000000000 ]--- The warning is triggered as follows: 1) syscall finit_module() handles the module insertion and it invokes kernel_read_file() to read the content of the module first. 2) kernel_read_file() allocates a 10MB buffer by using vmalloc() and passes it to kernel_read(). kernel_read() constructs a kvec iter by using iov_iter_kvec() and passes it to fuse_file_read_iter(). 3) virtio-fs disables the cache, so fuse_file_read_iter() invokes fuse_direct_io(). As for now, the maximal read size for kvec iter is only limited by fc->max_read. For virtio-fs, max_read is UINT_MAX, so fuse_direct_io() doesn't split the 10MB buffer. It saves the address and the size of the 10MB-sized buffer in out_args[0] of a fuse request and passes the fuse request to virtio_fs_wake_pending_and_unlock(). 4) virtio_fs_wake_pending_and_unlock() uses virtio_fs_enqueue_req() to queue the request. Because virtiofs need DMA-able address, so virtio_fs_enqueue_req() uses kmalloc() to allocate a bounce buffer for all fuse args, copies these args into the bounce buffer and passed the physical address of the bounce buffer to virtiofsd. The total length of these fuse args for the passed fuse request is about 10MB, so copy_args_to_argbuf() invokes kmalloc() with a 10MB size parameter and it triggers the warning in __alloc_pages(): if (WARN_ON_ONCE_GFP(order > MAX_PAGE_ORDER, gfp)) return NULL; 5) virtio_fs_enqueue_req() will retry the memory allocation in a kworker, but it won't help, because kmalloc() will always return NULL due to the abnormal size and finit_module() will hang forever. A feasible solution is to limit the value of max_read for virtio-fs, so the length passed to kmalloc() will be limited. However it will affect the maximal read size for normal read. And for virtio-fs write initiated from kernel, it has the similar problem but now there is no way to limit fc->max_write in kernel. So instead of limiting both the values of max_read and max_write in kernel, introducing use_pages_for_kvec_io in fuse_conn and setting it as true in virtiofs. When use_pages_for_kvec_io is enabled, fuse will use pages instead of pointer to pass the KVEC_IO data. After switching to pages for KVEC_IO data, these pages will be used for DMA through virtio-fs. If these pages are backed by vmalloc(), {flush|invalidate}_kernel_vmap_range() are necessary to flush or invalidate the cache before the DMA operation. So add two new fields in fuse_args_pages to record the base address of vmalloc area and the condition indicating whether invalidation is needed. Perform the flush in fuse_get_user_pages() for write operations and the invalidation in fuse_release_user_pages() for read operations. It may seem necessary to introduce another field in fuse_conn to indicate that these KVEC_IO pages are used for DMA, However, considering that virtio-fs is currently the only user of use_pages_for_kvec_io, just reuse use_pages_for_kvec_io to indicate that these pages will be used for DMA. Fixes: a62a8ef9d97d ("virtio-fs: add virtiofs filesystem") Signed-off-by: Hou Tao Tested-by: Jingbo Xu Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4166 --- fs/fuse/file.c | 62 +++++++++++++++++++++++++++++++-------------- fs/fuse/fuse_i.h | 6 +++++ fs/fuse/virtio_fs.c | 1 + 3 files changed, 50 insertions(+), 19 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 913129b7d8e0..8b9d6e10645d 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -645,7 +645,7 @@ void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos, args->out_args[0].size = count; } -static void fuse_release_user_pages(struct fuse_args_pages *ap, +static void fuse_release_user_pages(struct fuse_args_pages *ap, ssize_t nres, bool should_dirty) { unsigned int i; @@ -656,6 +656,9 @@ static void fuse_release_user_pages(struct fuse_args_pages *ap, if (ap->args.is_pinned) unpin_user_page(ap->pages[i]); } + + if (nres > 0 && ap->args.invalidate_vmap) + invalidate_kernel_vmap_range(ap->args.vmap_base, nres); } static void fuse_io_release(struct kref *kref) @@ -754,25 +757,29 @@ static void fuse_aio_complete_req(struct fuse_mount *fm, struct fuse_args *args, struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args); struct fuse_io_priv *io = ia->io; ssize_t pos = -1; - - fuse_release_user_pages(&ia->ap, io->should_dirty); + size_t nres; if (err) { /* Nothing */ } else if (io->write) { if (ia->write.out.size > ia->write.in.size) { err = -EIO; - } else if (ia->write.in.size != ia->write.out.size) { - pos = ia->write.in.offset - io->offset + - ia->write.out.size; + } else { + nres = ia->write.out.size; + if (ia->write.in.size != ia->write.out.size) + pos = ia->write.in.offset - io->offset + + ia->write.out.size; } } else { u32 outsize = args->out_args[0].size; + nres = outsize; if (ia->read.in.size != outsize) pos = ia->read.in.offset - io->offset + outsize; } + fuse_release_user_pages(&ia->ap, err ?: nres, io->should_dirty); + fuse_aio_complete(io, err, pos); fuse_io_free(ia); } @@ -1471,24 +1478,37 @@ static inline size_t fuse_get_frag_size(const struct iov_iter *ii, static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii, size_t *nbytesp, int write, - unsigned int max_pages) + unsigned int max_pages, + bool use_pages_for_kvec_io) { + bool flush_or_invalidate = false; size_t nbytes = 0; /* # bytes already packed in req */ ssize_t ret = 0; - /* Special case for kernel I/O: can copy directly into the buffer */ + /* Special case for kernel I/O: can copy directly into the buffer. + * However if the implementation of fuse_conn requires pages instead of + * pointer (e.g., virtio-fs), use iov_iter_extract_pages() instead. + */ if (iov_iter_is_kvec(ii)) { - unsigned long user_addr = fuse_get_user_addr(ii); - size_t frag_size = fuse_get_frag_size(ii, *nbytesp); + void *user_addr = (void *)fuse_get_user_addr(ii); - if (write) - ap->args.in_args[1].value = (void *) user_addr; - else - ap->args.out_args[0].value = (void *) user_addr; + if (!use_pages_for_kvec_io) { + size_t frag_size = fuse_get_frag_size(ii, *nbytesp); - iov_iter_advance(ii, frag_size); - *nbytesp = frag_size; - return 0; + if (write) + ap->args.in_args[1].value = user_addr; + else + ap->args.out_args[0].value = user_addr; + + iov_iter_advance(ii, frag_size); + *nbytesp = frag_size; + return 0; + } + + if (is_vmalloc_addr(user_addr)) { + ap->args.vmap_base = user_addr; + flush_or_invalidate = true; + } } while (nbytes < *nbytesp && ap->num_pages < max_pages) { @@ -1517,6 +1537,10 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii, (PAGE_SIZE - ret) & (PAGE_SIZE - 1); } + if (write && flush_or_invalidate) + flush_kernel_vmap_range(ap->args.vmap_base, nbytes); + + ap->args.invalidate_vmap = !write && flush_or_invalidate; ap->args.is_pinned = iov_iter_extract_will_pin(ii); ap->args.user_pages = true; if (write) @@ -1585,7 +1609,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, size_t nbytes = min(count, nmax); err = fuse_get_user_pages(&ia->ap, iter, &nbytes, write, - max_pages); + max_pages, fc->use_pages_for_kvec_io); if (err && !nbytes) break; @@ -1599,7 +1623,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, } if (!io->async || nres < 0) { - fuse_release_user_pages(&ia->ap, io->should_dirty); + fuse_release_user_pages(&ia->ap, nres, io->should_dirty); fuse_io_free(ia); } ia = NULL; diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index c33e34c04b81..ab4d3a383c52 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -290,9 +290,12 @@ struct fuse_args { bool may_block:1; bool is_ext:1; bool is_pinned:1; + bool invalidate_vmap:1; struct fuse_in_arg in_args[3]; struct fuse_arg out_args[2]; void (*end)(struct fuse_mount *fm, struct fuse_args *args, int error); + /* Used for kvec iter backed by vmalloc address */ + void *vmap_base; }; struct fuse_args_pages { @@ -822,6 +825,9 @@ struct fuse_conn { /* Is statx not implemented by fs? */ unsigned int no_statx:1; + /* Use pages instead of pointer for kernel I/O */ + unsigned int use_pages_for_kvec_io:1; + /** The number of requests waiting for completion */ atomic_t num_waiting; diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index 4916ef656b49..2c639e528494 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -1689,6 +1689,7 @@ static int virtio_fs_get_tree(struct fs_context *fsc) fc->delete_stale = true; fc->auto_submounts = true; fc->sync_fs = true; + fc->use_pages_for_kvec_io = true; /* Tell FUSE to split requests that exceed the virtqueue's size */ fc->max_pages_limit = min_t(unsigned int, fc->max_pages_limit, -- Gitee From 8abaa6137bdd82bbe69291a2d4d169f447a6065d Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Sat, 31 Aug 2024 17:37:50 +0800 Subject: [PATCH 1824/2138] virtiofs: use GFP_NOFS when enqueuing request through kworker ANBZ: #12158 commit 86b74eb5a11e878151eb429c3810f1dcda090b8c upstream. When invoking virtio_fs_enqueue_req() through kworker, both the allocation of the sg array and the bounce buffer still use GFP_ATOMIC. Considering the size of the sg array may be greater than PAGE_SIZE, use GFP_NOFS instead of GFP_ATOMIC to lower the possibility of memory allocation failure and to avoid unnecessarily depleting the atomic reserves. GFP_NOFS is not passed to virtio_fs_enqueue_req() directly, GFP_KERNEL and memalloc_nofs_{save|restore} helpers are used instead. It may seem OK to pass GFP_NOFS to virtio_fs_enqueue_req() as well when queuing the request for the first time, but this is not the case. The reason is that fuse_request_queue_background() may call ->queue_request_and_unlock() while holding fc->bg_lock, which is a spin-lock. Therefore, still use GFP_ATOMIC for it. Signed-off-by: Hou Tao Reviewed-by: Jingbo Xu Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4166 --- fs/fuse/virtio_fs.c | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index 2c639e528494..da036795b5fb 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -96,7 +96,8 @@ struct virtio_fs_req_work { }; static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq, - struct fuse_req *req, bool in_flight); + struct fuse_req *req, bool in_flight, + gfp_t gfp); static const struct constant_table dax_param_enums[] = { {"always", FUSE_DAX_ALWAYS }, @@ -574,6 +575,8 @@ static void virtio_fs_request_dispatch_work(struct work_struct *work) /* Dispatch pending requests */ while (1) { + unsigned int flags; + spin_lock(&fsvq->lock); req = list_first_entry_or_null(&fsvq->queued_reqs, struct fuse_req, list); @@ -584,7 +587,9 @@ static void virtio_fs_request_dispatch_work(struct work_struct *work) list_del_init(&req->list); spin_unlock(&fsvq->lock); - ret = virtio_fs_enqueue_req(fsvq, req, true); + flags = memalloc_nofs_save(); + ret = virtio_fs_enqueue_req(fsvq, req, true, GFP_KERNEL); + memalloc_nofs_restore(flags); if (ret < 0) { if (ret == -ENOSPC) { spin_lock(&fsvq->lock); @@ -685,7 +690,7 @@ static void virtio_fs_hiprio_dispatch_work(struct work_struct *work) } /* Allocate and copy args into req->argbuf */ -static int copy_args_to_argbuf(struct fuse_req *req) +static int copy_args_to_argbuf(struct fuse_req *req, gfp_t gfp) { struct fuse_args *args = req->args; unsigned int offset = 0; @@ -699,7 +704,7 @@ static int copy_args_to_argbuf(struct fuse_req *req) len = fuse_len_args(num_in, (struct fuse_arg *) args->in_args) + fuse_len_args(num_out, args->out_args); - req->argbuf = kmalloc(len, GFP_ATOMIC); + req->argbuf = kmalloc(len, gfp); if (!req->argbuf) return -ENOMEM; @@ -1364,7 +1369,8 @@ static unsigned int sg_init_fuse_args(struct scatterlist *sg, /* Add a request to a virtqueue and kick the device */ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq, - struct fuse_req *req, bool in_flight) + struct fuse_req *req, bool in_flight, + gfp_t gfp) { /* requests need at least 4 elements */ struct scatterlist *stack_sgs[6]; @@ -1385,8 +1391,8 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq, /* Does the sglist fit on the stack? */ total_sgs = sg_count_fuse_req(req); if (total_sgs > ARRAY_SIZE(stack_sgs)) { - sgs = kmalloc_array(total_sgs, sizeof(sgs[0]), GFP_ATOMIC); - sg = kmalloc_array(total_sgs, sizeof(sg[0]), GFP_ATOMIC); + sgs = kmalloc_array(total_sgs, sizeof(sgs[0]), gfp); + sg = kmalloc_array(total_sgs, sizeof(sg[0]), gfp); if (!sgs || !sg) { ret = -ENOMEM; goto out; @@ -1394,7 +1400,7 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq, } /* Use a bounce buffer since stack args cannot be mapped */ - ret = copy_args_to_argbuf(req); + ret = copy_args_to_argbuf(req, gfp); if (ret < 0) goto out; @@ -1488,7 +1494,7 @@ static void virtio_fs_send_req(struct fuse_iqueue *fiq, struct fuse_req *req) queue_id); fsvq = &fs->vqs[queue_id]; - ret = virtio_fs_enqueue_req(fsvq, req, false); + ret = virtio_fs_enqueue_req(fsvq, req, false, GFP_ATOMIC); if (ret < 0) { if (ret == -ENOSPC) { /* -- Gitee From c8e5fb6bc60076f8844d08699f6f54a061b5e1f0 Mon Sep 17 00:00:00 2001 From: yangyun Date: Mon, 26 Aug 2024 21:06:12 +0800 Subject: [PATCH 1825/2138] fuse: remove useless IOCB_DIRECT in fuse_direct_read/write_iter ANBZ: #12158 commit cc23d537e56153560bb2f88fd826675a5a8c6af6 upstream. Commit 23c94e1cdcbf ("fuse: Switch to using async direct IO for FOPEN_DIRECT_IO") gave the async direct IO code path in the fuse_direct_read_iter() and fuse_direct_write_iter(). But since these two functions are only called under FOPEN_DIRECT_IO is set, it seems that we can also use the async direct IO even the flag IOCB_DIRECT is not set to enjoy the async direct IO method. Also move the definition of fuse_io_priv to where it is used in fuse_ direct_write_iter. Signed-off-by: yangyun Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4166 --- fs/fuse/file.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 8b9d6e10645d..7ba75b6e6ad9 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1677,7 +1677,7 @@ static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to) { ssize_t res; - if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) { + if (!is_sync_kiocb(iocb)) { res = fuse_direct_IO(iocb, to); } else { struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb); @@ -1691,7 +1691,6 @@ static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to) static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct inode *inode = file_inode(iocb->ki_filp); - struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb); ssize_t res; bool exclusive; @@ -1699,9 +1698,11 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) res = generic_write_checks(iocb, from); if (res > 0) { task_io_account_write(res); - if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) { + if (!is_sync_kiocb(iocb)) { res = fuse_direct_IO(iocb, from); } else { + struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb); + res = fuse_direct_io(&io, from, &iocb->ki_pos, FUSE_DIO_WRITE); fuse_write_update_attr(inode, iocb->ki_pos, res); -- Gitee From 615e970c4a920539590096103a84bbe09cfc83e4 Mon Sep 17 00:00:00 2001 From: Asahi Lina Date: Wed, 13 Nov 2024 04:55:32 +0900 Subject: [PATCH 1826/2138] virtiofs: dax: remove ->writepages() callback ANBZ: #12158 commit d1dfb5f52ffc4a142d88da5c0ed0514f3602c4b8 upstream. When using FUSE DAX with virtiofs, cache coherency is managed by the host. Disk persistence is handled via fsync() and friends, which are passed directly via the FUSE layer to the host. Therefore, there's no need to do dax_writeback_mapping_range(). All that ends up doing is a cache flush operation, which is not caught by KVM and doesn't do much, since the host and guest are already cache-coherent. Since dax_writeback_mapping_range() checks that the inode block size is equal to PAGE_SIZE, this fixes a spurious WARN when virtiofs is used with a mismatched guest PAGE_SIZE and virtiofs backing FS block size (this happens, for example, when it's a tmpfs and the host and guest have a different PAGE_SIZE). FUSE DAX does not require any particular FS block size, since it always performs DAX mappings in aligned 2MiB blocks. See discussion in [1]. [1] https://lore.kernel.org/lkml/20241101-dax-page-size-v1-1-eedbd0c6b08f@asahilina.net/T/#u [SzM: remove the empty callback] Suggested-by: Dan Williams Signed-off-by: Asahi Lina Acked-by: Dan Williams Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4166 --- fs/fuse/dax.c | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/fs/fuse/dax.c b/fs/fuse/dax.c index 12ef91d170bb..9abbc2f2894f 100644 --- a/fs/fuse/dax.c +++ b/fs/fuse/dax.c @@ -774,16 +774,6 @@ ssize_t fuse_dax_write_iter(struct kiocb *iocb, struct iov_iter *from) return ret; } -static int fuse_dax_writepages(struct address_space *mapping, - struct writeback_control *wbc) -{ - - struct inode *inode = mapping->host; - struct fuse_conn *fc = get_fuse_conn(inode); - - return dax_writeback_mapping_range(mapping, fc->dax->dev, wbc); -} - static vm_fault_t __fuse_dax_fault(struct vm_fault *vmf, unsigned int order, bool write) { @@ -1323,7 +1313,6 @@ bool fuse_dax_inode_alloc(struct super_block *sb, struct fuse_inode *fi) } static const struct address_space_operations fuse_dax_file_aops = { - .writepages = fuse_dax_writepages, .direct_IO = noop_direct_IO, .dirty_folio = noop_dirty_folio, }; -- Gitee From 731ee07cdee138e2e37dde76b860ed578d32a723 Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Mon, 23 Sep 2024 10:13:11 -0700 Subject: [PATCH 1827/2138] fuse: enable dynamic configuration of fuse max pages limit (FUSE_MAX_MAX_PAGES) ANBZ: #12158 commit 2b3933b1e0a0a4b758fbc164bb31db0c113a7e2c upstream. Introduce the capability to dynamically configure the max pages limit (FUSE_MAX_MAX_PAGES) through a sysctl. This allows system administrators to dynamically set the maximum number of pages that can be used for servicing requests in fuse. Previously, this is gated by FUSE_MAX_MAX_PAGES which is statically set to 256 pages. One result of this is that the buffer size for a write request is limited to 1 MiB on a 4k-page system. The default value for this sysctl is the original limit (256 pages). $ sysctl -a | grep max_pages_limit fs.fuse.max_pages_limit = 256 $ sysctl -n fs.fuse.max_pages_limit 256 $ echo 1024 | sudo tee /proc/sys/fs/fuse/max_pages_limit 1024 $ sysctl -n fs.fuse.max_pages_limit 1024 $ echo 65536 | sudo tee /proc/sys/fs/fuse/max_pages_limit tee: /proc/sys/fs/fuse/max_pages_limit: Invalid argument $ echo 0 | sudo tee /proc/sys/fs/fuse/max_pages_limit tee: /proc/sys/fs/fuse/max_pages_limit: Invalid argument $ echo 65535 | sudo tee /proc/sys/fs/fuse/max_pages_limit 65535 $ sysctl -n fs.fuse.max_pages_limit 65535 Signed-off-by: Joanne Koong Reviewed-by: Josef Bacik Reviewed-by: Sweet Tea Dorminy Signed-off-by: Miklos Szeredi [jingbo: set default fuse_max_pages_limit to 1024] Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4166 --- Documentation/admin-guide/sysctl/fs.rst | 10 +++++++ fs/fuse/Makefile | 1 + fs/fuse/fuse_i.h | 14 +++++++-- fs/fuse/inode.c | 11 ++++++- fs/fuse/sysctl.c | 40 +++++++++++++++++++++++++ 5 files changed, 72 insertions(+), 4 deletions(-) create mode 100644 fs/fuse/sysctl.c diff --git a/Documentation/admin-guide/sysctl/fs.rst b/Documentation/admin-guide/sysctl/fs.rst index 11b2dd4ef5ae..59174739319c 100644 --- a/Documentation/admin-guide/sysctl/fs.rst +++ b/Documentation/admin-guide/sysctl/fs.rst @@ -341,3 +341,13 @@ Each "watch" costs roughly 90 bytes on a 32-bit kernel, and roughly 160 bytes on a 64-bit one. The current default value for ``max_user_watches`` is 4% of the available low memory, divided by the "watch" cost in bytes. + +5. /proc/sys/fs/fuse - Configuration options for FUSE filesystems +===================================================================== + +This directory contains the following configuration options for FUSE +filesystems: + +``/proc/sys/fs/fuse/max_pages_limit`` is a read/write file for +setting/getting the maximum number of pages that can be used for servicing +requests in FUSE. diff --git a/fs/fuse/Makefile b/fs/fuse/Makefile index 2b5cf7bc5b58..0afc54209c49 100644 --- a/fs/fuse/Makefile +++ b/fs/fuse/Makefile @@ -14,5 +14,6 @@ obj-$(CONFIG_VIRT_FUSE) += virtfuse.o fuse-y := dev.o dir.o file.o inode.o control.o xattr.o acl.o readdir.o ioctl.o fuse-y += iomode.o fuse-$(CONFIG_FUSE_DAX) += dax.o +fuse-$(CONFIG_SYSCTL) += sysctl.o virtiofs-y := virtio_fs.o diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index ab4d3a383c52..a46d69ba0110 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -36,9 +36,6 @@ /** Default max number of pages that can be used in a single read request */ #define FUSE_DEFAULT_MAX_PAGES_PER_REQ 32 -/** Maximum of max_pages received in init_out */ -#define FUSE_MAX_MAX_PAGES 1024 - /** Bias for fi->writectr, meaning new writepages must not be sent */ #define FUSE_NOWRITE INT_MIN @@ -48,6 +45,9 @@ /** Number of dentries for each connection in the control filesystem */ #define FUSE_CTL_NUM_DENTRIES 5 +/** Maximum of max_pages received in init_out */ +extern unsigned int fuse_max_pages_limit; + /** List of active connections */ extern struct list_head fuse_conn_list; @@ -1375,4 +1375,12 @@ static inline bool is_virtfuse_device(struct file *file) static inline bool is_virtfuse_device(struct file *file) { return false; } #endif +#ifdef CONFIG_SYSCTL +extern int fuse_sysctl_register(void); +extern void fuse_sysctl_unregister(void); +#else +#define fuse_sysctl_register() (0) +#define fuse_sysctl_unregister() do { } while (0) +#endif /* CONFIG_SYSCTL */ + #endif /* _FS_FUSE_I_H */ diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index d33b04e6f616..093b7070406e 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -35,6 +35,8 @@ DEFINE_MUTEX(fuse_mutex); static int set_global_limit(const char *val, const struct kernel_param *kp); +unsigned int fuse_max_pages_limit = 1024; + unsigned max_user_bgreq; module_param_call(max_user_bgreq, set_global_limit, param_get_uint, &max_user_bgreq, 0644); @@ -947,7 +949,7 @@ void fuse_conn_init(struct fuse_conn *fc, struct fuse_mount *fm, fc->pid_ns = get_pid_ns(task_active_pid_ns(current)); fc->user_ns = get_user_ns(user_ns); fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ; - fc->max_pages_limit = FUSE_MAX_MAX_PAGES; + fc->max_pages_limit = fuse_max_pages_limit; INIT_LIST_HEAD(&fc->mounts); list_add(&fm->fc_entry, &fc->mounts); @@ -2045,8 +2047,14 @@ static int __init fuse_fs_init(void) if (err) goto out3; + err = fuse_sysctl_register(); + if (err) + goto out4; + return 0; + out4: + unregister_filesystem(&fuse_fs_type); out3: unregister_fuseblk(); out2: @@ -2057,6 +2065,7 @@ static int __init fuse_fs_init(void) static void fuse_fs_cleanup(void) { + fuse_sysctl_unregister(); unregister_filesystem(&fuse_fs_type); unregister_fuseblk(); diff --git a/fs/fuse/sysctl.c b/fs/fuse/sysctl.c new file mode 100644 index 000000000000..b272bb333005 --- /dev/null +++ b/fs/fuse/sysctl.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/fs/fuse/fuse_sysctl.c + * + * Sysctl interface to fuse parameters + */ +#include + +#include "fuse_i.h" + +static struct ctl_table_header *fuse_table_header; + +/* Bound by fuse_init_out max_pages, which is a u16 */ +static unsigned int sysctl_fuse_max_pages_limit = 65535; + +static struct ctl_table fuse_sysctl_table[] = { + { + .procname = "max_pages_limit", + .data = &fuse_max_pages_limit, + .maxlen = sizeof(fuse_max_pages_limit), + .mode = 0644, + .proc_handler = proc_douintvec_minmax, + .extra1 = SYSCTL_ONE, + .extra2 = &sysctl_fuse_max_pages_limit, + }, +}; + +int fuse_sysctl_register(void) +{ + fuse_table_header = register_sysctl("fs/fuse", fuse_sysctl_table); + if (!fuse_table_header) + return -ENOMEM; + return 0; +} + +void fuse_sysctl_unregister(void) +{ + unregister_sysctl_table(fuse_table_header); + fuse_table_header = NULL; +} -- Gitee From 984c312a35c358f2a3f23c3331e4b1f3bd72cd05 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Sat, 24 Aug 2024 13:04:40 +1200 Subject: [PATCH 1828/2138] mm: count the number of anonymous THPs per size ANBZ: #9728 commit 5d65c8d758f2596c008009e39bb2614deed2c730 upstream Patch series "mm: count the number of anonymous THPs per size", v4. Knowing the number of transparent anon THPs in the system is crucial for performance analysis. It helps in understanding the ratio and distribution of THPs versus small folios throughout the system. Additionally, partial unmapping by userspace can lead to significant waste of THPs over time and increase memory reclamation pressure. We need this information for comprehensive system tuning. This patch (of 2): Let's track for each anonymous THP size, how many of them are currently allocated. We'll track the complete lifespan of an anon THP, starting when it becomes an anon THP ("large anon folio") (->mapping gets set), until it gets freed (->mapping gets cleared). Introduce a new "nr_anon" counter per THP size and adjust the corresponding counter in the following cases: * We allocate a new THP and call folio_add_new_anon_rmap() to map it the first time and turn it into an anon THP. * We split an anon THP into multiple smaller ones. * We migrate an anon THP, when we prepare the destination. * We free an anon THP back to the buddy. Note that AnonPages in /proc/meminfo currently tracks the total number of *mapped* anonymous *pages*, and therefore has slightly different semantics. In the future, we might also want to track "nr_anon_mapped" for each THP size, which might be helpful when comparing it to the number of allocated anon THPs (long-term pinning, stuck in swapcache, memory leaks, ...). Further note that for now, we only track anon THPs after they got their ->mapping set, for example via folio_add_new_anon_rmap(). If we would allocate some in the swapcache, they will only show up in the statistics for now after they have been mapped to user space the first time, where we call folio_add_new_anon_rmap(). [akpm@linux-foundation.org: documentation fixups, per David] Link: https://lkml.kernel.org/r/3e8add35-e26b-443b-8a04-1078f4bc78f6@redhat.com Link: https://lkml.kernel.org/r/20240824010441.21308-1-21cnbao@gmail.com Link: https://lkml.kernel.org/r/20240824010441.21308-2-21cnbao@gmail.com Signed-off-by: Barry Song Acked-by: David Hildenbrand Cc: Baolin Wang Cc: Chris Li Cc: Chuanhua Han Cc: Kairui Song Cc: Kalesh Singh Cc: Lance Yang Cc: Ryan Roberts Cc: Shuai Yuan Cc: Usama Arif Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4175 --- Documentation/admin-guide/mm/transhuge.rst | 5 +++++ include/linux/huge_mm.h | 15 +++++++++++++-- mm/huge_memory.c | 10 ++++++++-- mm/migrate.c | 4 ++++ mm/page_alloc.c | 5 ++++- mm/rmap.c | 1 + 6 files changed, 35 insertions(+), 5 deletions(-) diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index a71aa21a6874..47d89977c1c0 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -526,6 +526,11 @@ split_deferred it would free up some memory. Pages on split queue are going to be split under memory pressure, if splitting is possible. +nr_anon + the number of anonymous THP we have in the whole system. These THPs + might be currently entirely mapped or have partially unmapped/unused + subpages. + As the system ages, allocating huge pages may be expensive as the system uses memory compaction to copy data around memory to free a huge page for use. There are some counters in ``/proc/vmstat`` to help diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 95fe4fa24c24..edda04fd6ac5 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -107,6 +107,7 @@ enum mthp_stat_item { MTHP_STAT_SPLIT, MTHP_STAT_SPLIT_FAILED, MTHP_STAT_SPLIT_DEFERRED, + MTHP_STAT_NR_ANON, __MTHP_STAT_COUNT }; @@ -117,14 +118,24 @@ struct mthp_stat { DECLARE_PER_CPU(struct mthp_stat, mthp_stats); -static inline void count_mthp_stat(int order, enum mthp_stat_item item) +static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta) { if (order <= 0 || order > PMD_ORDER) return; - this_cpu_inc(mthp_stats.stats[order][item]); + this_cpu_add(mthp_stats.stats[order][item], delta); +} + +static inline void count_mthp_stat(int order, enum mthp_stat_item item) +{ + mod_mthp_stat(order, item, 1); } + #else +static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta) +{ +} + static inline void count_mthp_stat(int order, enum mthp_stat_item item) { } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 836869205813..1d262f0caa17 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -580,6 +580,7 @@ DEFINE_MTHP_STAT_ATTR(shmem_fallback_charge, MTHP_STAT_SHMEM_FALLBACK_CHARGE); DEFINE_MTHP_STAT_ATTR(split, MTHP_STAT_SPLIT); DEFINE_MTHP_STAT_ATTR(split_failed, MTHP_STAT_SPLIT_FAILED); DEFINE_MTHP_STAT_ATTR(split_deferred, MTHP_STAT_SPLIT_DEFERRED); +DEFINE_MTHP_STAT_ATTR(nr_anon, MTHP_STAT_NR_ANON); static struct attribute *anon_stats_attrs[] = { &anon_fault_alloc_attr.attr, @@ -590,6 +591,7 @@ static struct attribute *anon_stats_attrs[] = { &swpout_fallback_attr.attr, #endif &split_deferred_attr.attr, + &nr_anon_attr.attr, NULL, }; @@ -2986,8 +2988,9 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) struct folio *folio = page_folio(page); struct deferred_split *ds_queue = get_deferred_split_queue(folio); XA_STATE(xas, &folio->mapping->i_pages, folio->index); - struct anon_vma *anon_vma = NULL; + bool is_anon = folio_test_anon(folio); struct address_space *mapping = NULL; + struct anon_vma *anon_vma = NULL; int order = folio_order(folio); int extra_pins, ret; pgoff_t end; @@ -3005,7 +3008,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) if (folio_test_writeback(folio)) return -EBUSY; - if (folio_test_anon(folio)) { + if (is_anon) { /* * The caller does not necessarily hold an mmap_lock that would * prevent the anon_vma disappearing so we first we take a @@ -3111,6 +3114,9 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) } } + if (is_anon) + mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); + __split_huge_page(page, list, end); ret = 0; } else { diff --git a/mm/migrate.c b/mm/migrate.c index b341d0517e98..33bdbe29ba40 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -415,6 +415,8 @@ int folio_migrate_mapping(struct address_space *mapping, /* No turning back from here */ newfolio->index = folio->index; newfolio->mapping = folio->mapping; + if (folio_test_anon(folio) && folio_test_large(folio)) + mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); if (folio_test_swapbacked(folio)) __folio_set_swapbacked(newfolio); @@ -436,6 +438,8 @@ int folio_migrate_mapping(struct address_space *mapping, */ newfolio->index = folio->index; newfolio->mapping = folio->mapping; + if (folio_test_anon(folio) && folio_test_large(folio)) + mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); folio_ref_add(newfolio, nr); /* add cache reference */ if (folio_test_swapbacked(folio)) { __folio_set_swapbacked(newfolio); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 88d16b47badb..dd0cf7492de6 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1146,8 +1146,11 @@ __always_inline bool free_pages_prepare(struct page *page, (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; } } - if (PageMappingFlags(page)) + if (PageMappingFlags(page)) { + if (PageAnon(page)) + mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); page->mapping = NULL; + } if (memcg_kmem_online() && PageMemcgKmem(page)) __memcg_kmem_uncharge_page(page, order); if (is_check_pages_enabled()) { diff --git a/mm/rmap.c b/mm/rmap.c index cea4983acbe0..4bb0ef91d69a 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1407,6 +1407,7 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, } __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); + mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); } static __always_inline void __folio_add_file_rmap(struct folio *folio, -- Gitee From 496b4a7d8b36bb4f89920e7be0b9c3717b27862d Mon Sep 17 00:00:00 2001 From: Barry Song Date: Sat, 24 Aug 2024 13:04:41 +1200 Subject: [PATCH 1829/2138] mm: count the number of partially mapped anonymous THPs per size ANBZ: #9728 commit 8175ebfd302abe6fbdca9037f763ecbfdb8db572 upstream When a THP is added to the deferred_list due to partially mapped, its partial pages are unused, leading to wasted memory and potentially increasing memory reclamation pressure. Detailing the specifics of how unmapping occurs is quite difficult and not that useful, so we adopt a simple approach: each time a THP enters the deferred_list, we increment the count by 1; whenever it leaves for any reason, we decrement the count by 1. Link: https://lkml.kernel.org/r/20240824010441.21308-3-21cnbao@gmail.com Signed-off-by: Barry Song Acked-by: David Hildenbrand Cc: Baolin Wang Cc: Chris Li Cc: Chuanhua Han Cc: Kairui Song Cc: Kalesh Singh Cc: Lance Yang Cc: Ryan Roberts Cc: Shuai Yuan Cc: Usama Arif Cc: Zi Yan Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4175 --- Documentation/admin-guide/mm/transhuge.rst | 7 +++++++ include/linux/huge_mm.h | 1 + mm/huge_memory.c | 6 ++++++ 3 files changed, 14 insertions(+) diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index 47d89977c1c0..4ece123bc5e6 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -531,6 +531,13 @@ nr_anon might be currently entirely mapped or have partially unmapped/unused subpages. +nr_anon_partially_mapped + the number of anonymous THP which are likely partially mapped, possibly + wasting memory, and have been queued for deferred memory reclamation. + Note that in corner some cases (e.g., failed migration), we might detect + an anonymous THP as "partially mapped" and count it here, even though it + is not actually partially mapped anymore. + As the system ages, allocating huge pages may be expensive as the system uses memory compaction to copy data around memory to free a huge page for use. There are some counters in ``/proc/vmstat`` to help diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index edda04fd6ac5..f6c139d2edf9 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -108,6 +108,7 @@ enum mthp_stat_item { MTHP_STAT_SPLIT_FAILED, MTHP_STAT_SPLIT_DEFERRED, MTHP_STAT_NR_ANON, + MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, __MTHP_STAT_COUNT }; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 1d262f0caa17..bf29e70bb5e4 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -581,6 +581,7 @@ DEFINE_MTHP_STAT_ATTR(split, MTHP_STAT_SPLIT); DEFINE_MTHP_STAT_ATTR(split_failed, MTHP_STAT_SPLIT_FAILED); DEFINE_MTHP_STAT_ATTR(split_deferred, MTHP_STAT_SPLIT_DEFERRED); DEFINE_MTHP_STAT_ATTR(nr_anon, MTHP_STAT_NR_ANON); +DEFINE_MTHP_STAT_ATTR(nr_anon_partially_mapped, MTHP_STAT_NR_ANON_PARTIALLY_MAPPED); static struct attribute *anon_stats_attrs[] = { &anon_fault_alloc_attr.attr, @@ -592,6 +593,7 @@ static struct attribute *anon_stats_attrs[] = { #endif &split_deferred_attr.attr, &nr_anon_attr.attr, + &nr_anon_partially_mapped_attr.attr, NULL, }; @@ -3095,6 +3097,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) if (folio_order(folio) > 1 && !list_empty(&folio->_deferred_list)) { ds_queue->split_queue_len--; + mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1); list_del_init(&folio->_deferred_list); } spin_unlock(&ds_queue->split_queue_lock); @@ -3170,6 +3173,7 @@ bool __folio_unqueue_deferred_split(struct folio *folio) spin_lock_irqsave(&ds_queue->split_queue_lock, flags); if (!list_empty(&folio->_deferred_list)) { ds_queue->split_queue_len--; + mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1); list_del_init(&folio->_deferred_list); unqueued = true; } @@ -3211,6 +3215,7 @@ void deferred_split_folio(struct folio *folio) if (folio_test_pmd_mappable(folio)) count_vm_event(THP_DEFERRED_SPLIT_PAGE); count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED); + mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, 1); list_add_tail(&folio->_deferred_list, &ds_queue->split_queue); ds_queue->split_queue_len++; #ifdef CONFIG_MEMCG @@ -3258,6 +3263,7 @@ static unsigned long deferred_split_scan(struct shrinker *shrink, list_move(&folio->_deferred_list, &list); } else { /* We lost race with folio_put() */ + mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1); list_del_init(&folio->_deferred_list); ds_queue->split_queue_len--; } -- Gitee From 33e28aa22135eb88c22a1398479bcfb4f84bcb2b Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Sun, 22 Sep 2024 12:32:13 +0800 Subject: [PATCH 1830/2138] mm: shmem: fix khugepaged activation policy for shmem ANBZ: #9728 commit d2d243df445a88c26e91eac02b041213c7a32e9e upstream Shmem has a separate interface (different from anonymous pages) to control huge page allocation, that means shmem THP can be enabled while anonymous THP is disabled. However, in this case, khugepaged will not start to collapse shmem THP, which is unreasonable. To fix this issue, we should call start_stop_khugepaged() to activate or deactivate the khugepaged thread when setting shmem mTHP interfaces. Moreover, add a new helper shmem_hpage_pmd_enabled() to help to check whether shmem THP is enabled, which will determine if khugepaged should be activated. Link: https://lkml.kernel.org/r/9b9c6cbc4499bf44c6455367fd9e0f6036525680.1726978977.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Reported-by: Ryan Roberts Reviewed-by: Ryan Roberts Cc: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4175 --- include/linux/shmem_fs.h | 6 ++++++ mm/internal.h | 1 + mm/khugepaged.c | 6 +++++- mm/shmem.c | 29 +++++++++++++++++++++++++++-- 4 files changed, 39 insertions(+), 3 deletions(-) diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 57e8a6689439..9add4f6f8c57 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -118,6 +118,7 @@ int shmem_unuse(unsigned int type); unsigned long shmem_allowable_huge_orders(struct inode *inode, struct vm_area_struct *vma, pgoff_t index, bool shmem_huge_force); +bool shmem_hpage_pmd_enabled(void); #else static inline unsigned long shmem_allowable_huge_orders(struct inode *inode, struct vm_area_struct *vma, pgoff_t index, @@ -125,6 +126,11 @@ static inline unsigned long shmem_allowable_huge_orders(struct inode *inode, { return 0; } + +static inline bool shmem_hpage_pmd_enabled(void) +{ + return false; +} #endif #ifdef CONFIG_SHMEM diff --git a/mm/internal.h b/mm/internal.h index 3c90a44ac7b3..5bc0370185cd 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -8,6 +8,7 @@ #define __MM_INTERNAL_H #include +#include #include #include #include diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 5ef0182d1c88..11cb513add0c 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -412,9 +412,11 @@ static inline int hpage_collapse_test_exit(struct mm_struct *mm) static bool hugepage_pmd_enabled(void) { /* - * We cover both the anon and the file-backed case here; file-backed + * We cover the anon, shmem and the file-backed case here; file-backed * hugepages, when configured in, are determined by the global control. * Anon pmd-sized hugepages are determined by the pmd-size control. + * Shmem pmd-sized hugepages are also determined by its pmd-size control, + * except when the global shmem_huge is set to SHMEM_HUGE_DENY. */ if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && hugepage_global_enabled()) @@ -426,6 +428,8 @@ static bool hugepage_pmd_enabled(void) if (test_bit(PMD_ORDER, &huge_anon_orders_inherit) && hugepage_global_enabled()) return true; + if (IS_ENABLED(CONFIG_SHMEM) && shmem_hpage_pmd_enabled()) + return true; return false; } diff --git a/mm/shmem.c b/mm/shmem.c index 9d7df156f56f..4854cec9c4e4 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1670,6 +1670,23 @@ static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp) } #ifdef CONFIG_TRANSPARENT_HUGEPAGE +bool shmem_hpage_pmd_enabled(void) +{ + if (shmem_huge == SHMEM_HUGE_DENY) + return false; + if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_always)) + return true; + if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_madvise)) + return true; + if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_within_size)) + return true; + if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_inherit) && + shmem_huge != SHMEM_HUGE_NEVER) + return true; + + return false; +} + unsigned long shmem_allowable_huge_orders(struct inode *inode, struct vm_area_struct *vma, pgoff_t index, bool shmem_huge_force) @@ -4982,7 +4999,7 @@ static ssize_t shmem_enabled_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { char tmp[16]; - int huge; + int huge, err; if (count + 1 > sizeof(tmp)) return -EINVAL; @@ -5006,7 +5023,9 @@ static ssize_t shmem_enabled_store(struct kobject *kobj, shmem_huge = huge; if (shmem_huge > SHMEM_HUGE_DENY) SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; - return count; + + err = start_stop_khugepaged(); + return err ? err : count; } struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled); @@ -5083,6 +5102,12 @@ static ssize_t thpsize_shmem_enabled_store(struct kobject *kobj, ret = -EINVAL; } + if (ret > 0) { + int err = start_stop_khugepaged(); + + if (err) + ret = err; + } return ret; } -- Gitee From 5aec18476bf96fba589c92f0c5c92b21096788a0 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Fri, 18 Oct 2024 11:00:27 +0800 Subject: [PATCH 1831/2138] mm: shmem: update iocb->ki_pos directly to simplify tmpfs read logic ANBZ: #9728 commit f3650ef89b879d63c63f04e98481f7ed4df1119a upstream Patch series "Improve the tmpfs large folio read performance", v2. tmpfs already supports PMD-sized large folios, but the tmpfs read operation still performs copying at PAGE_SIZE granularity, which is not perfect. This patchset changes tmpfs to copy data at the folio granularity, which can improve the read performance. Use 'fio bs=64k' to read a 1G tmpfs file populated with 2M THPs, and I can see about 20% performance improvement, and no regression with bs=4k. I also did some functional testing with the xfstests suite, and I did not find any regressions with the following xfstests config: FSTYP=tmpfs export TEST_DIR=/mnt/tempfs_mnt export TEST_DEV=/mnt/tempfs_mnt export SCRATCH_MNT=/mnt/scratchdir export SCRATCH_DEV=/mnt/scratchdir This patch (of 2): Using iocb->ki_pos to check if the read bytes exceeds the file size and to calculate the bytes to be read can help simplify the code logic. Meanwhile, this is also a preparation for improving tmpfs large folios read performance in the following patch. Link: https://lkml.kernel.org/r/cover.1729218573.git.baolin.wang@linux.alibaba.com Link: https://lkml.kernel.org/r/e8863e289577e0dc1e365b5419bf2d1c9a24ae3d.1729218573.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Reviewed-by: Yang Shi Cc: David Hildenbrand Cc: Hugh Dickins Cc: Kefeng Wang Cc: Matthew Wilcox Signed-off-by: Andrew Morton Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4175 --- mm/shmem.c | 35 +++++++++++------------------------ 1 file changed, 11 insertions(+), 24 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index 4854cec9c4e4..72fe3c7cfb63 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -3069,27 +3069,19 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) unsigned long offset; int error = 0; ssize_t retval = 0; - loff_t *ppos = &iocb->ki_pos; - index = *ppos >> PAGE_SHIFT; - offset = *ppos & ~PAGE_MASK; + offset = iocb->ki_pos & ~PAGE_MASK; for (;;) { struct folio *folio = NULL; struct page *page = NULL; - pgoff_t end_index; unsigned long nr, ret; - loff_t i_size = i_size_read(inode); + loff_t end_offset, i_size = i_size_read(inode); - end_index = i_size >> PAGE_SHIFT; - if (index > end_index) + if (unlikely(iocb->ki_pos >= i_size)) break; - if (index == end_index) { - nr = i_size & ~PAGE_MASK; - if (nr <= offset) - break; - } + index = iocb->ki_pos >> PAGE_SHIFT; error = shmem_get_folio(inode, index, &folio, SGP_READ); if (error) { if (error == -EINVAL) @@ -3111,18 +3103,14 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) * We must evaluate after, since reads (unlike writes) * are called without i_rwsem protection against truncate */ - nr = PAGE_SIZE; i_size = i_size_read(inode); - end_index = i_size >> PAGE_SHIFT; - if (index == end_index) { - nr = i_size & ~PAGE_MASK; - if (nr <= offset) { - if (folio) - folio_put(folio); - break; - } + if (unlikely(iocb->ki_pos >= i_size)) { + if (folio) + folio_put(folio); + break; } - nr -= offset; + end_offset = min_t(loff_t, i_size, iocb->ki_pos + to->count); + nr = min_t(loff_t, end_offset - iocb->ki_pos, PAGE_SIZE - offset); if (folio) { /* @@ -3162,8 +3150,8 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) retval += ret; offset += ret; - index += offset >> PAGE_SHIFT; offset &= ~PAGE_MASK; + iocb->ki_pos += ret; if (!iov_iter_count(to)) break; @@ -3174,7 +3162,6 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) cond_resched(); } - *ppos = ((loff_t) index << PAGE_SHIFT) + offset; file_accessed(file); return retval ? retval : error; } -- Gitee From 976218af2b8d766b6e6d828f2d53a3d7a70b32ee Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Fri, 18 Oct 2024 11:00:28 +0800 Subject: [PATCH 1832/2138] mm: shmem: improve the tmpfs large folio read performance ANBZ: #9728 commit a284cb8472ec6bb027ebf3b936385601d8a8f414 upstream tmpfs already supports PMD-sized large folios, but the tmpfs read operation still performs copying at PAGE_SIZE granularity, which is unreasonable. This patch changes tmpfs to copy data at folio granularity, which can improve the read performance, as well as changing to use folio related functions. Moreover, if a large folio has a subpage that is hwpoisoned, it will still fall back to page granularity copying. Use 'fio bs=64k' to read a 1G tmpfs file populated with 2M THPs, and I can see about 20% performance improvement, and no regression with bs=4k. Before the patch: READ: bw=10.0GiB/s After the patch: READ: bw=12.0GiB/s Link: https://lkml.kernel.org/r/2129a21a5b9f77d3bb7ddec152c009ce7c5653c4.1729218573.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Reviewed-by: Yang Shi Cc: David Hildenbrand Cc: Hugh Dickins Cc: Kefeng Wang Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4175 --- mm/shmem.c | 34 ++++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index 72fe3c7cfb63..a5413999b52e 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -3070,13 +3070,13 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) int error = 0; ssize_t retval = 0; - offset = iocb->ki_pos & ~PAGE_MASK; - for (;;) { struct folio *folio = NULL; struct page *page = NULL; unsigned long nr, ret; loff_t end_offset, i_size = i_size_read(inode); + bool fallback_page_copy = false; + size_t fsize; if (unlikely(iocb->ki_pos >= i_size)) break; @@ -3097,6 +3097,10 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) error = -EIO; break; } + + if (folio_test_large(folio) && + folio_test_has_hwpoisoned(folio)) + fallback_page_copy = true; } /* @@ -3110,7 +3114,12 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) break; } end_offset = min_t(loff_t, i_size, iocb->ki_pos + to->count); - nr = min_t(loff_t, end_offset - iocb->ki_pos, PAGE_SIZE - offset); + if (folio && likely(!fallback_page_copy)) + fsize = folio_size(folio); + else + fsize = PAGE_SIZE; + offset = iocb->ki_pos & (fsize - 1); + nr = min_t(loff_t, end_offset - iocb->ki_pos, fsize - offset); if (folio) { /* @@ -3118,10 +3127,15 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) * virtual addresses, take care about potential aliasing * before reading the page on the kernel side. */ - if (mapping_writably_mapped(mapping)) - flush_dcache_page(page); + if (mapping_writably_mapped(mapping)) { + if (likely(!fallback_page_copy)) + flush_dcache_folio(folio); + else + flush_dcache_page(page); + } + /* - * Mark the page accessed if we read the beginning. + * Mark the folio accessed if we read the beginning. */ if (!offset) folio_mark_accessed(folio); @@ -3129,9 +3143,11 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) * Ok, we have the page, and it's up-to-date, so * now we can copy it to user space... */ - ret = copy_page_to_iter(page, offset, nr, to); + if (likely(!fallback_page_copy)) + ret = copy_folio_to_iter(folio, offset, nr, to); + else + ret = copy_page_to_iter(page, offset, nr, to); folio_put(folio); - } else if (user_backed_iter(to)) { /* * Copy to user tends to be so well optimized, but @@ -3149,8 +3165,6 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) } retval += ret; - offset += ret; - offset &= ~PAGE_MASK; iocb->ki_pos += ret; if (!iov_iter_count(to)) -- Gitee From 9510a16c64448cd7462f28c8560e9bf9a3b2d44b Mon Sep 17 00:00:00 2001 From: LeoLiu-oc Date: Tue, 3 Dec 2024 11:19:11 +0800 Subject: [PATCH 1833/2138] anolis: perf/x86/zhaoxin/uncore: fix pci_driver conflict issue ANBZ: #12192 Some PCI drivers in the PMU uncore conflict with the inbox pcieport driver, which may lead to pcieport failure. Signed-off-by: LeoLiu-oc Reviewed-by: Xingrui Yi Reviewed-by: Zelin Deng Link: https://gitee.com/anolis/cloud-kernel/pulls/4184 --- arch/x86/events/zhaoxin/uncore.c | 135 ------------------------------- 1 file changed, 135 deletions(-) diff --git a/arch/x86/events/zhaoxin/uncore.c b/arch/x86/events/zhaoxin/uncore.c index 12f331334c40..30a51324f41a 100644 --- a/arch/x86/events/zhaoxin/uncore.c +++ b/arch/x86/events/zhaoxin/uncore.c @@ -933,56 +933,6 @@ static const struct pci_device_id kh40000_uncore_pci_ids[] = { .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_MC0, 0), }, - { /* PCIE D2F0 */ - PCI_DEVICE(0x1D17, 0x0717), - .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 0), - }, - - { /* PCIE D2F1 */ - PCI_DEVICE(0x1D17, 0x0718), - .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 1), - }, - - { /* PCIE D3F0 */ - PCI_DEVICE(0x1D17, 0x0719), - .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 2), - }, - - { /* PCIE D3F1 */ - PCI_DEVICE(0x1D17, 0x071A), - .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 3), - }, - - { /* PCIE D3F2 */ - PCI_DEVICE(0x1D17, 0x071B), - .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 4), - }, - - { /* PCIE D4F0 */ - PCI_DEVICE(0x1D17, 0x071C), - .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 5), - }, - - { /* PCIE D4F1 */ - PCI_DEVICE(0x1D17, 0x071D), - .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 6), - }, - - { /* PCIE D5F0 */ - PCI_DEVICE(0x1D17, 0x071E), - .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 7), - }, - - { /* PCIE D5F1 */ - PCI_DEVICE(0x1D17, 0x0731), - .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 8), - }, - - { /* PCIE D5F2 */ - PCI_DEVICE(0x1D17, 0x0732), - .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 9), - }, - { /* ZPI_DLL */ PCI_DEVICE(0x1D17, 0x91c1), .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_ZPI_DLL, 0), @@ -1274,91 +1224,6 @@ static const struct pci_device_id kx7000_uncore_pci_ids[] = { .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_MC_A0, 0), }, - { /* PCIE D2F0 */ - PCI_DEVICE(0x1D17, 0x0717), - .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 0), - }, - - { /* PCIE D2F1 */ - PCI_DEVICE(0x1D17, 0x0718), - .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 1), - }, - - { /* PCIE D2F2 */ - PCI_DEVICE(0x1D17, 0x0733), - .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 2), - }, - - { /* PCIE D2F3 */ - PCI_DEVICE(0x1D17, 0x0734), - .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 3), - }, - - { /* PCIE D3F0 */ - PCI_DEVICE(0x1D17, 0x0719), - .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 4), - }, - - { /* PCIE D3F1 */ - PCI_DEVICE(0x1D17, 0x0735), - .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 5), - }, - - { /* PCIE D3F2 */ - PCI_DEVICE(0x1D17, 0x0739), - .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 6), - }, - - { /* PCIE D3F3 */ - PCI_DEVICE(0x1D17, 0x073A), - .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 7), - }, - - { /* PCIE D4F0 */ - PCI_DEVICE(0x1D17, 0x071B), - .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 8), - }, - - { /* PCIE D4F1 */ - PCI_DEVICE(0x1D17, 0x071C), - .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 9), - }, - - { /* PCIE D4F2 */ - PCI_DEVICE(0x1D17, 0x0736), - .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 10), - }, - - { /* PCIE D4F3 */ - PCI_DEVICE(0x1D17, 0x0737), - .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 11), - }, - - { /* PCIE D4F4 */ - PCI_DEVICE(0x1D17, 0x0738), - .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 12), - }, - - { /* PCIE D5F0 */ - PCI_DEVICE(0x1D17, 0x071D), - .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 13), - }, - - { /* PCIE D5F1 */ - PCI_DEVICE(0x1D17, 0x071E), - .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 14), - }, - - { /* PCIE D5F2 */ - PCI_DEVICE(0x1D17, 0x0732), - .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 15), - }, - - { /* PCIE D5F3 */ - PCI_DEVICE(0x1D17, 0x073B), - .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PCI, 16), - }, - { /* PXPTRF */ PCI_DEVICE(0x1D17, 0x31B4), .driver_data = UNCORE_PCI_DEV_DATA(KX7000_PCI_UNCORE_PXPTRF, 0), -- Gitee From cefc6d453fded1029300edd8364b20385f2a68a0 Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Wed, 14 Aug 2019 03:11:42 +0800 Subject: [PATCH 1834/2138] anolis: mm: memcontrol: support background async page reclaim ANBZ: #11973 to #32655467 Currently when memory usage exceeds memory cgroup limit, memory cgroup just can do sync direct reclaim. This may incur unexpected stall on some applications which are sensitive to latency. Introduce background async page reclaim mechanism, like what kswapd does. Define memcg memory usage water mark by introducing wmark_ratio interface, which is from 0 to 100 and represents percentage of max limit. The wmark_high is calculated by (max * wmark_ratio / 100), the wmark_low is (wmark_high - wmark_high >> 8), which is an empirical value. If wmark_ratio is 0, it means water mark is disabled, both wmark_low and wmark_high is max, which is the default value. If wmark_ratio is setup, when charging page, if usage is greater than wmark_high, which means the available memory of memcg is low, a work would be scheduled to do background page reclaim until memory usage is reduced to wmark_low if possible. Define a dedicated unbound workqueue for scheduling water mark reclaim works. [ kun: addjust memcg->wmark_ratio into {READ,WRITE}_ONCE. ] [ kun: remove PF_SWAPWRITE according to b698f0a1773f7 ("mm/fs: delete PF_SWAPWRITE"). ] [ kun: Add setup_memcg_wmark(memcg) in mem_cgroup_css_alloc() !parent branch. ] Reviewed-by: Gavin Shan Reviewed-by: Xunlei Pang Signed-off-by: Yang Shi Signed-off-by: zhongjiang-ali Signed-off-by: Kun(llfl) Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4154 --- .../admin-guide/cgroup-v1/memory.rst | 21 ++- include/linux/memcontrol.h | 16 ++ include/linux/page_counter.h | 8 + mm/memcontrol.c | 145 +++++++++++++++++- mm/page_counter.c | 12 ++ 5 files changed, 199 insertions(+), 3 deletions(-) diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst index 9312cef008aa..72b8a5ce13f7 100644 --- a/Documentation/admin-guide/cgroup-v1/memory.rst +++ b/Documentation/admin-guide/cgroup-v1/memory.rst @@ -109,6 +109,11 @@ Brief summary of control files. memory.kmem.tcp.failcnt show the number of tcp buf memory usage hits limits memory.kmem.tcp.max_usage_in_bytes show max tcp buf memory usage recorded + memory.wmark_ratio set/show water mark ratio + memory.wmark_low low limit (memory usage low water mark, + read-only) + memory.wmark_high high limit (memory usge high water mark, + read-only) ==================================== ========================================== 1. History @@ -1014,7 +1019,21 @@ Meanwhile, we provide the interface memory.use_prioprity_oom to decide whether t enable/disable the feature in each memcg. Write "1" to enable the priority oom and "0" to disable it. -13. TODO +13. Background reclaim +====================== + +The user could setup memory usage water mark by echoing a value to +memory.wmark_ratio. Valid value is from 0 to 100, which represents percentage +of max limit. The wmark_low and wmark_high would be calculated by max limit +and wmark_ratio. 0 means water mark is disabled, both wmark_low and wmark_high +would be max, which is the default value. + +Once water mark is setup correctly, when charging pages to memcg, if the usage +exceeds wmark_high, which means available memory is low, a work would be +scheduled to reclaim pages in background to try to reduce memory usage to +wmark_low if possible. + +14. TODO ======== 1. Make per-cgroup scanner reclaim not-shared pages first diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 7139432b51a1..547eed5b2829 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -312,6 +312,9 @@ struct mem_cgroup { bool tcpmem_active; int tcpmem_pressure; + unsigned int wmark_ratio; + struct work_struct wmark_work; + #ifdef CONFIG_MEMCG_KMEM int kmemcg_id; struct obj_cgroup __rcu *objcg; @@ -1211,6 +1214,14 @@ static inline struct mem_cgroup *rich_container_get_memcg(void) } #endif +static inline bool is_wmark_ok(struct mem_cgroup *memcg, bool high) +{ + if (high) + return page_counter_read(&memcg->memory) < memcg->memory.wmark_high; + + return page_counter_read(&memcg->memory) < memcg->memory.wmark_low; +} + #else /* CONFIG_MEMCG */ #define MEM_CGROUP_ID_SHIFT 0 @@ -1659,6 +1670,11 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, { return 0; } + +static inline bool is_wmark_ok(struct mem_cgroup *memcg, bool low) +{ + return false; +} #endif /* CONFIG_MEMCG */ #ifdef CONFIG_ASYNC_FORK diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h index c141ea9a95ef..192326822f46 100644 --- a/include/linux/page_counter.h +++ b/include/linux/page_counter.h @@ -25,6 +25,10 @@ struct page_counter { atomic_long_t low_usage; atomic_long_t children_low_usage; + /* water mark low and high */ + unsigned long wmark_low; + unsigned long wmark_high; + unsigned long watermark; unsigned long failcnt; @@ -65,6 +69,10 @@ bool page_counter_try_charge(struct page_counter *counter, void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages); void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages); +void page_counter_set_wmark_high(struct page_counter *counter, + unsigned long nr_pages); +void page_counter_set_wmark_low(struct page_counter *counter, + unsigned long nr_pages); static inline void page_counter_set_high(struct page_counter *counter, unsigned long nr_pages) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9f6aa465bae7..a33bc0fb1f45 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -97,6 +97,8 @@ static bool cgroup_memory_nobpf __ro_after_init; static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq); #endif +static struct workqueue_struct *memcg_wmark_wq; + /* Whether legacy memory+swap accounting is active */ static bool do_memsw_account(void) { @@ -2555,6 +2557,34 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu) return 0; } +static void reclaim_wmark(struct mem_cgroup *memcg) +{ + long nr_pages; + + if (is_wmark_ok(memcg, false)) + return; + + nr_pages = page_counter_read(&memcg->memory) - + memcg->memory.wmark_low; + if (nr_pages <= 0) + return; + + nr_pages = max_t(unsigned long, SWAP_CLUSTER_MAX, nr_pages); + + try_to_free_mem_cgroup_pages(memcg, nr_pages, GFP_KERNEL, true); +} + +static void wmark_work_func(struct work_struct *work) +{ + struct mem_cgroup *memcg; + + memcg = container_of(work, struct mem_cgroup, wmark_work); + + current->flags |= PF_MEMALLOC; + reclaim_wmark(memcg); + current->flags &= ~PF_MEMALLOC; +} + static unsigned long reclaim_high(struct mem_cgroup *memcg, unsigned int nr_pages, gfp_t gfp_mask) @@ -2961,6 +2991,11 @@ static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, do { bool mem_high, swap_high; + if (!is_wmark_ok(memcg, true)) { + queue_work(memcg_wmark_wq, &memcg->wmark_work); + break; + } + mem_high = page_counter_read(&memcg->memory) > READ_ONCE(memcg->memory.high); swap_high = page_counter_read(&memcg->swap) > @@ -3642,6 +3677,25 @@ static inline int mem_cgroup_move_swap_account(swp_entry_t entry, } #endif +static void setup_memcg_wmark(struct mem_cgroup *memcg) +{ + unsigned long high_wmark; + unsigned long low_wmark; + unsigned long max = memcg->memory.max; + unsigned int wmark_ratio = memcg->wmark_ratio; + + if (wmark_ratio) { + high_wmark = (max * wmark_ratio) / 100; + low_wmark = high_wmark - (high_wmark >> 8); + + page_counter_set_wmark_low(&memcg->memory, low_wmark); + page_counter_set_wmark_high(&memcg->memory, high_wmark); + } else { + page_counter_set_wmark_low(&memcg->memory, PAGE_COUNTER_MAX); + page_counter_set_wmark_high(&memcg->memory, PAGE_COUNTER_MAX); + } +} + static DEFINE_MUTEX(memcg_max_mutex); static int mem_cgroup_resize_max(struct mem_cgroup *memcg, @@ -3692,8 +3746,15 @@ static int mem_cgroup_resize_max(struct mem_cgroup *memcg, } } while (true); - if (!ret && enlarge) - memcg_oom_recover(memcg); + if (!ret) { + setup_memcg_wmark(memcg); + + if (!is_wmark_ok(memcg, true)) + queue_work(memcg_wmark_wq, &memcg->wmark_work); + + if (enlarge) + memcg_oom_recover(memcg); + } return ret; } @@ -3886,6 +3947,8 @@ enum { RES_MAX_USAGE, RES_FAILCNT, RES_SOFT_LIMIT, + WMARK_HIGH_LIMIT, + WMARK_LOW_LIMIT, }; static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, @@ -3926,6 +3989,10 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, return counter->failcnt; case RES_SOFT_LIMIT: return (u64)READ_ONCE(memcg->soft_limit) * PAGE_SIZE; + case WMARK_HIGH_LIMIT: + return (u64)counter->wmark_high * PAGE_SIZE; + case WMARK_LOW_LIMIT: + return (u64)counter->wmark_low * PAGE_SIZE; default: BUG(); } @@ -4433,6 +4500,43 @@ static int mem_cgroup_async_fork_write(struct cgroup_subsys_state *css, } #endif +static int memory_wmark_ratio_show(struct seq_file *m, void *v) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); + unsigned int wmark_ratio = READ_ONCE(memcg->wmark_ratio); + + seq_printf(m, "%d\n", wmark_ratio); + + return 0; +} + +static ssize_t memory_wmark_ratio_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); + int ret, wmark_ratio; + + buf = strstrip(buf); + if (!buf) + return -EINVAL; + + ret = kstrtouint(buf, 0, &wmark_ratio); + if (ret) + return ret; + + if (wmark_ratio > 100) + return -EINVAL; + + xchg(&memcg->wmark_ratio, wmark_ratio); + + setup_memcg_wmark(memcg); + + if (!is_wmark_ok(memcg, true)) + queue_work(memcg_wmark_wq, &memcg->wmark_work); + + return nbytes; +} + static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) { struct mem_cgroup_threshold_ary *t; @@ -5344,6 +5448,24 @@ static struct cftype mem_cgroup_legacy_files[] = { .name = "stat", .seq_show = memory_stat_show, }, + { + .name = "wmark_ratio", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = memory_wmark_ratio_show, + .write = memory_wmark_ratio_write, + }, + { + .name = "wmark_high", + .flags = CFTYPE_NOT_ON_ROOT, + .private = MEMFILE_PRIVATE(_MEM, WMARK_HIGH_LIMIT), + .read_u64 = mem_cgroup_read_u64, + }, + { + .name = "wmark_low", + .flags = CFTYPE_NOT_ON_ROOT, + .private = MEMFILE_PRIVATE(_MEM, WMARK_LOW_LIMIT), + .read_u64 = mem_cgroup_read_u64, + }, { .name = "force_empty", .write = mem_cgroup_force_empty_write, @@ -5661,6 +5783,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void) goto fail; INIT_WORK(&memcg->high_work, high_work_func); + INIT_WORK(&memcg->wmark_work, wmark_work_func); INIT_LIST_HEAD(&memcg->oom_notify); mutex_init(&memcg->thresholds_lock); spin_lock_init(&memcg->move_lock); @@ -5712,6 +5835,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) if (parent) { WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent)); WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable)); + WRITE_ONCE(memcg->wmark_ratio, READ_ONCE(parent->wmark_ratio)); memcg->reap_background = parent->reap_background; #ifdef CONFIG_ASYNC_FORK memcg->async_fork = parent->async_fork; @@ -5728,9 +5852,13 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) page_counter_init(&memcg->kmem, NULL); page_counter_init(&memcg->tcpmem, NULL); + /* initializing memcg wmark */ + setup_memcg_wmark(memcg); + root_mem_cgroup = memcg; return &memcg->css; } + setup_memcg_wmark(memcg); if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket) static_branch_inc(&memcg_sockets_enabled_key); @@ -5811,6 +5939,9 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) page_counter_set_min(&memcg->memory, 0); page_counter_set_low(&memcg->memory, 0); + page_counter_set_wmark_low(&memcg->memory, PAGE_COUNTER_MAX); + page_counter_set_wmark_high(&memcg->memory, PAGE_COUNTER_MAX); + memcg_offline_kmem(memcg); reparent_shrinker_deferred(memcg); wb_memcg_offline(memcg); @@ -5851,6 +5982,7 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css) vmpressure_cleanup(&memcg->vmpressure); cancel_work_sync(&memcg->high_work); + cancel_work_sync(&memcg->wmark_work); mem_cgroup_remove_from_trees(memcg); free_shrinker_info(memcg); mem_cgroup_free(memcg); @@ -5879,6 +6011,8 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); page_counter_set_min(&memcg->memory, 0); page_counter_set_low(&memcg->memory, 0); + page_counter_set_wmark_low(&memcg->memory, PAGE_COUNTER_MAX); + page_counter_set_wmark_high(&memcg->memory, PAGE_COUNTER_MAX); page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX); page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); @@ -7771,6 +7905,13 @@ static int __init mem_cgroup_init(void) */ BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE); + memcg_wmark_wq = alloc_workqueue("memcg_wmark", WQ_MEM_RECLAIM | + WQ_UNBOUND | WQ_FREEZABLE, + WQ_UNBOUND_MAX_ACTIVE); + + if (!memcg_wmark_wq) + return -ENOMEM; + cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, memcg_hotplug_cpu_dead); diff --git a/mm/page_counter.c b/mm/page_counter.c index db20d6452b71..ce6f551ca7d2 100644 --- a/mm/page_counter.c +++ b/mm/page_counter.c @@ -234,6 +234,18 @@ void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages) propagate_protected_usage(c, atomic_long_read(&c->usage)); } +void page_counter_set_wmark_high(struct page_counter *counter, + unsigned long nr_pages) +{ + xchg(&counter->wmark_high, nr_pages); +} + +void page_counter_set_wmark_low(struct page_counter *counter, + unsigned long nr_pages) +{ + xchg(&counter->wmark_low, nr_pages); +} + /** * page_counter_memparse - memparse() for page counter limits * @buf: string to parse -- Gitee From 84cf556b44f3b85a9377d7c10b09b23c9777116c Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Wed, 14 Aug 2019 05:45:18 +0800 Subject: [PATCH 1835/2138] anolis: mm: memcontrol: add background reclaim support for cgroupv2 ANBZ: #11973 to #32655467 Like v1, add background reclaim support for cgroup v2. The interfaces are exactly same with v1. However, if high limit is setup for v2, the water mark would be calculated by high limit instead of max limit. Reviewed-by: Gavin Shan Reviewed-by: Xunlei Pang Signed-off-by: Yang Shi Signed-off-by: zhongjiang-ali Signed-off-by: Kun(llfl) Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4154 --- Documentation/admin-guide/cgroup-v2.rst | 32 ++++++++++++++ mm/memcontrol.c | 55 ++++++++++++++++++++++++- 2 files changed, 86 insertions(+), 1 deletion(-) diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index 6f53cb81067a..a50b87c528d8 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1276,6 +1276,38 @@ PAGE_SIZE multiple when read back. The max memory usage recorded for the cgroup and its descendants since the creation of the cgroup. + memory.wmark_ratio + A read-write single value file which exists on non-root + cgroups. The default is 0. + + Memory usage water mark. Valid value is from 0 to 100, which + represents percentage of max limit or high limit if high is setup. + The wmark_low and wmark_high would be calculated by max limit and + wmark_ratio. 0 means water mark is disabled, both wmark_low and + wmark_high would be max, which is the default value. + + Once water mark is setup correctly, when charging pages to memcg, + if the usage exceeds wmark_high, which means available memory is low, + a work would be scheduled to reclaim pages in background to try to + reduce memory usage to wmark_low if possible. + + If memory.low is greater than memory.wmark_high, back ground reclaim + may not take effect at all due to low protection. + + memory.wmark_high + A read-only single value file which exists on non-root cgroups. + The default is max. + + Memory usage high water mark, which means the available memory is low. + For details, please refer to the above wmark_ratio section. + + memory.wmark_low + A read-only single value file which exists on non-root cgroups. + The default is max. + + Memory usage low water mark, which means the available memory is ok. + For details, please refer to the above wmark_ratio section. + memory.oom.group A read-write single value file which exists on non-root cgroups. The default value is "0". diff --git a/mm/memcontrol.c b/mm/memcontrol.c index a33bc0fb1f45..538a0d631d17 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3681,7 +3681,8 @@ static void setup_memcg_wmark(struct mem_cgroup *memcg) { unsigned long high_wmark; unsigned long low_wmark; - unsigned long max = memcg->memory.max; + unsigned long max = memcg->memory.high > memcg->memory.max ? + memcg->memory.max : memcg->memory.high; unsigned int wmark_ratio = memcg->wmark_ratio; if (wmark_ratio) { @@ -7031,10 +7032,41 @@ static ssize_t memory_high_write(struct kernfs_open_file *of, break; } + setup_memcg_wmark(memcg); + + if (!is_wmark_ok(memcg, true)) + queue_work(memcg_wmark_wq, &memcg->wmark_work); + memcg_wb_domain_size_changed(memcg); return nbytes; } +static int memory_wmark_low_show(struct seq_file *m, void *v) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); + unsigned long wmark_low = READ_ONCE(memcg->memory.wmark_low); + + if (wmark_low == PAGE_COUNTER_MAX) + seq_puts(m, "max\n"); + else + seq_printf(m, "%llu\n", (u64)wmark_low * PAGE_SIZE); + + return 0; +} + +static int memory_wmark_high_show(struct seq_file *m, void *v) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); + unsigned long wmark_high = READ_ONCE(memcg->memory.wmark_high); + + if (wmark_high == PAGE_COUNTER_MAX) + seq_puts(m, "max\n"); + else + seq_printf(m, "%llu\n", (u64)wmark_high * PAGE_SIZE); + + return 0; +} + static int memory_max_show(struct seq_file *m, void *v) { return seq_puts_memcg_tunable(m, @@ -7084,6 +7116,11 @@ static ssize_t memory_max_write(struct kernfs_open_file *of, break; } + setup_memcg_wmark(memcg); + + if (!is_wmark_ok(memcg, true)) + queue_work(memcg_wmark_wq, &memcg->wmark_work); + memcg_wb_domain_size_changed(memcg); return nbytes; } @@ -7256,6 +7293,22 @@ static struct cftype memory_files[] = { .write_u64 = mem_cgroup_priority_oom_write, .read_u64 = mem_cgroup_priority_oom_read, }, + { + .name = "wmark_ratio", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = memory_wmark_ratio_show, + .write = memory_wmark_ratio_write, + }, + { + .name = "wmark_high", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = memory_wmark_high_show, + }, + { + .name = "wmark_low", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = memory_wmark_low_show, + }, { .name = "events", .flags = CFTYPE_NOT_ON_ROOT, -- Gitee From 0c675558860e00cdb42192b45a7b5b931d7658f3 Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Fri, 2 Aug 2019 02:01:40 +0800 Subject: [PATCH 1836/2138] anolis: mm: memcontrol: treat memcg wmark reclaim work as kswapd ANBZ: #11973 to #32655467 Since background water mark reclaim is scheduled by workqueue, it could do more work than direct reclaim, i.e. write out dirty page, etc. So, add PF_KSWAPD flag, so that current_is_kswapd() would return true for memcg background reclaim. The condition "current_is_kswapd() && !global_reclaim(sc)" is good enough to tell current is global kswapd or memcg background reclaim. And, kswapd is not allowed to break memory.low protection for now, memcg kswapd should not break it either. Reviewed-by: Gavin Shan Reviewed-by: Xunlei Pang Signed-off-by: Yang Shi Signed-off-by: zhongjiang-ali Signed-off-by: Kun(llfl) Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4154 --- mm/memcontrol.c | 4 ++-- mm/vmscan.c | 23 ++++++++++++++++++++--- 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 538a0d631d17..15a5f1e419eb 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2580,9 +2580,9 @@ static void wmark_work_func(struct work_struct *work) memcg = container_of(work, struct mem_cgroup, wmark_work); - current->flags |= PF_MEMALLOC; + current->flags |= PF_MEMALLOC | PF_KSWAPD; reclaim_wmark(memcg); - current->flags &= ~PF_MEMALLOC; + current->flags &= ~(PF_MEMALLOC | PF_KSWAPD); } static unsigned long reclaim_high(struct mem_cgroup *memcg, diff --git a/mm/vmscan.c b/mm/vmscan.c index 97d4511eb762..7ac9d08f4f26 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -6550,6 +6550,15 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc) sc->nr_scanned - scanned, sc->nr_reclaimed - reclaimed); + /* + * Memcg background reclaim would break iter once water + * mark is satisfied. + */ + if (cgroup_reclaim(sc) && current_is_kswapd() && + is_wmark_ok(target_memcg, false)) { + mem_cgroup_iter_break(target_memcg, memcg); + break; + } } while ((memcg = mem_cgroup_iter(target_memcg, memcg, NULL))); } @@ -6588,7 +6597,7 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc) if (nr_node_reclaimed) reclaimable = true; - if (current_is_kswapd()) { + if (current_is_kswapd() && !cgroup_reclaim(sc)) { /* * If reclaim is isolating dirty pages under writeback, * it implies that the long-lived page allocation rate @@ -6874,6 +6883,10 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1); do { + if (current_is_kswapd() && cgroup_reclaim(sc) && + is_wmark_ok(sc->target_mem_cgroup, false)) + break; + if (!sc->proactive) vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup, sc->priority); @@ -6937,8 +6950,12 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, goto retry; } - /* Untapped cgroup reserves? Don't OOM, retry. */ - if (sc->memcg_low_skipped) { + /* + * Untapped cgroup reserves? Don't OOM, retry. + * + * Memcg kswapd should not break low protection. + */ + if (sc->memcg_low_skipped && !current_is_kswapd()) { sc->priority = initial_priority; sc->force_deactivate = 0; sc->memcg_low_reclaim = 1; -- Gitee From 4337b38608067473c3b6b0faae0cdb777cdc7bc3 Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Fri, 2 Aug 2019 11:47:44 +0800 Subject: [PATCH 1837/2138] anolis: mm: vmscan: make memcg kswapd set memcg state to dirty or writeback ANBZ: #11973 to #32655467 The global kswapd could set memory node to dirty or writeback if current scan find all pages are unqueued dirty or writeback. Then kswapd would write out dirty pages or wait for writeback done. The memcg kswapd behaves like global kswapd, and it should set dirty or writeback state to memcg too if the same condition is met. Since direct reclaim can't write out page caches, the system depends on kswapd to write out dirty pages if scan finds too many dirty pages in order to avoid pre-mature OOM. But, if page cache is dirtied too fast, writing out pages definitely can't catch up with dirtying pages. It is the responsibility of dirty page balance to throttle dirtying pages. Reviewed-by: Gavin Shan Reviewed-by: Xunlei Pang Signed-off-by: Yang Shi Signed-off-by: zhongjiang-ali Signed-off-by: Kun(llfl) Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4154 --- include/linux/mmzone.h | 14 +++++++------- mm/vmscan.c | 23 +++++++++++++++-------- 2 files changed, 22 insertions(+), 15 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index d797575c68c2..1a6fdfc16b8f 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -313,6 +313,13 @@ enum lruvec_flags { */ LRUVEC_CGROUP_CONGESTED, LRUVEC_NODE_CONGESTED, + LRUVEC_DIRTY, /* reclaim scanning has recently found + * many dirty file pages at the tail + * of the LRU. + */ + LRUVEC_WRITEBACK, /* reclaim scanning has recently found + * many pages under writeback + */ }; #endif /* !__GENERATING_BOUNDS_H */ @@ -996,13 +1003,6 @@ struct zone { } ____cacheline_internodealigned_in_smp; enum pgdat_flags { - PGDAT_DIRTY, /* reclaim scanning has recently found - * many dirty file pages at the tail - * of the LRU. - */ - PGDAT_WRITEBACK, /* reclaim scanning has recently found - * many pages under writeback - */ PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */ }; diff --git a/mm/vmscan.c b/mm/vmscan.c index 7ac9d08f4f26..577649ccf468 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1728,6 +1728,9 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, unsigned int pgactivate = 0; bool do_demote_pass; struct swap_iocb *plug = NULL; + struct lruvec *target_lruvec; + + target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); memset(stat, 0, sizeof(*stat)); cond_resched(); @@ -1836,7 +1839,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, /* Case 1 above */ if (current_is_kswapd() && folio_test_reclaim(folio) && - test_bit(PGDAT_WRITEBACK, &pgdat->flags)) { + test_bit(LRUVEC_WRITEBACK, &pgdat->flags)) { stat->nr_immediate += nr_pages; goto activate_locked; @@ -1998,7 +2001,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, if (folio_is_file_lru(folio) && (!current_is_kswapd() || !folio_test_reclaim(folio) || - !test_bit(PGDAT_DIRTY, &pgdat->flags))) { + !test_bit(LRUVEC_DIRTY, &pgdat->flags))) { /* * Immediately reclaim when written back. * Similar in principle to folio_deactivate() @@ -6597,7 +6600,7 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc) if (nr_node_reclaimed) reclaimable = true; - if (current_is_kswapd() && !cgroup_reclaim(sc)) { + if (current_is_kswapd()) { /* * If reclaim is isolating dirty pages under writeback, * it implies that the long-lived page allocation rate @@ -6616,11 +6619,11 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc) * in the nr_immediate check below. */ if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken) - set_bit(PGDAT_WRITEBACK, &pgdat->flags); + set_bit(LRUVEC_WRITEBACK, &target_lruvec->flags); /* Allow kswapd to start writing pages during reclaim.*/ if (sc->nr.unqueued_dirty == sc->nr.file_taken) - set_bit(PGDAT_DIRTY, &pgdat->flags); + set_bit(LRUVEC_DIRTY, &target_lruvec->flags); /* * If kswapd scans pages marked for immediate @@ -6644,7 +6647,7 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc) if (cgroup_reclaim(sc) && writeback_throttling_sane(sc)) set_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags); - if (current_is_kswapd()) + if (current_is_kswapd() && !cgroup_reclaim(sc)) set_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags); } @@ -6922,6 +6925,10 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, zone->zone_pgdat); clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags); + if (current_is_kswapd()) { + clear_bit(LRUVEC_DIRTY, &lruvec->flags); + clear_bit(LRUVEC_WRITEBACK, &lruvec->flags); + } } } @@ -7316,8 +7323,8 @@ static void clear_pgdat_congested(pg_data_t *pgdat) clear_bit(LRUVEC_NODE_CONGESTED, &lruvec->flags); clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags); - clear_bit(PGDAT_DIRTY, &pgdat->flags); - clear_bit(PGDAT_WRITEBACK, &pgdat->flags); + clear_bit(LRUVEC_DIRTY, &lruvec->flags); + clear_bit(LRUVEC_WRITEBACK, &lruvec->flags); } /* -- Gitee From 6a029edb3bbf5cd2c1308ca766665d9e5b73fc71 Mon Sep 17 00:00:00 2001 From: Yang Shi Date: Sat, 17 Aug 2019 08:04:03 +0800 Subject: [PATCH 1838/2138] anolis: mm: memcontrol: make distance between wmark_low and wmark_high configurable ANBZ: #11973 to #32655467 Introduce a new interface, wmark_scale_factor, which defines the distance between wmark_high and wmark_low. The unit is in fractions of 10,000. The default value of 50 means the distance between wmark_high and wmark_low is 0.5% of the max limit of the cgroup. The maximum value is 1000, or 10% of the max limit. The distance between wmark_low and wmark_high have impact on how hard memcg kswapd would reclaim. Reviewed-by: Gavin Shan Reviewed-by: Xunlei Pang Signed-off-by: Yang Shi Signed-off-by: zhongjiang-ali Signed-off-by: Kun(llfl) Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4154 --- .../admin-guide/cgroup-v1/memory.rst | 3 + Documentation/admin-guide/cgroup-v2.rst | 9 +++ include/linux/memcontrol.h | 1 + mm/memcontrol.c | 61 ++++++++++++++++++- 4 files changed, 73 insertions(+), 1 deletion(-) diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst index 72b8a5ce13f7..90ce42fd70c3 100644 --- a/Documentation/admin-guide/cgroup-v1/memory.rst +++ b/Documentation/admin-guide/cgroup-v1/memory.rst @@ -114,6 +114,9 @@ Brief summary of control files. read-only) memory.wmark_high high limit (memory usge high water mark, read-only) + memory.wmark_scale_factor the gap between wmark_low and wmark_high, + percentage of max limit, default is 50 or 0.5% of max limit. + The max value is 1000 or 10% of max limit. ==================================== ========================================== 1. History diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index a50b87c528d8..4c485887e1ac 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1308,6 +1308,15 @@ PAGE_SIZE multiple when read back. Memory usage low water mark, which means the available memory is ok. For details, please refer to the above wmark_ratio section. + memory.wmark_scale_factor + A read-write single value file which exists on non-root cgroups. + The default is 50. + + The gap between wmark_low and wmark_high. The unit is in fractions + of 10,000. The default value of 50 means the distance between wmark_high + and wmark_low is 0.5% of the max limit of the cgroup. The maximum value + is 1000, or 10% of max limit. + memory.oom.group A read-write single value file which exists on non-root cgroups. The default value is "0". diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 547eed5b2829..93c65d1e3c33 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -314,6 +314,7 @@ struct mem_cgroup { unsigned int wmark_ratio; struct work_struct wmark_work; + unsigned int wmark_scale_factor; #ifdef CONFIG_MEMCG_KMEM int kmemcg_id; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 15a5f1e419eb..2262494a2c0c 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3684,10 +3684,18 @@ static void setup_memcg_wmark(struct mem_cgroup *memcg) unsigned long max = memcg->memory.high > memcg->memory.max ? memcg->memory.max : memcg->memory.high; unsigned int wmark_ratio = memcg->wmark_ratio; + unsigned int wmark_scale_factor = memcg->wmark_scale_factor; + unsigned long gap; if (wmark_ratio) { high_wmark = (max * wmark_ratio) / 100; - low_wmark = high_wmark - (high_wmark >> 8); + + /* + * Set the memcg watermark distance according to the + * scale factor in proportion to max limit. + */ + gap = mult_frac(max, wmark_scale_factor, 10000); + low_wmark = high_wmark - gap; page_counter_set_wmark_low(&memcg->memory, low_wmark); page_counter_set_wmark_high(&memcg->memory, high_wmark); @@ -4538,6 +4546,42 @@ static ssize_t memory_wmark_ratio_write(struct kernfs_open_file *of, return nbytes; } +static int memory_wmark_scale_factor_show(struct seq_file *m, void *v) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); + unsigned int wmark_scale_factor; + + wmark_scale_factor = READ_ONCE(memcg->wmark_scale_factor); + + seq_printf(m, "%d\n", wmark_scale_factor); + + return 0; +} + +static ssize_t memory_wmark_scale_factor_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); + int ret, wmark_scale_factor; + + buf = strstrip(buf); + if (!buf) + return -EINVAL; + + ret = kstrtouint(buf, 0, &wmark_scale_factor); + if (ret) + return ret; + + if (wmark_scale_factor > 1000 || wmark_scale_factor < 1) + return -EINVAL; + + xchg(&memcg->wmark_scale_factor, wmark_scale_factor); + + setup_memcg_wmark(memcg); + + return nbytes; +} + static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) { struct mem_cgroup_threshold_ary *t; @@ -5467,6 +5511,12 @@ static struct cftype mem_cgroup_legacy_files[] = { .private = MEMFILE_PRIVATE(_MEM, WMARK_LOW_LIMIT), .read_u64 = mem_cgroup_read_u64, }, + { + .name = "wmark_scale_factor", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = memory_wmark_scale_factor_show, + .write = memory_wmark_scale_factor_write, + }, { .name = "force_empty", .write = mem_cgroup_force_empty_write, @@ -5838,6 +5888,9 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable)); WRITE_ONCE(memcg->wmark_ratio, READ_ONCE(parent->wmark_ratio)); memcg->reap_background = parent->reap_background; + /* Default gap is 0.5% max limit */ + memcg->wmark_scale_factor = parent->wmark_scale_factor ? + : 50; #ifdef CONFIG_ASYNC_FORK memcg->async_fork = parent->async_fork; #endif @@ -7309,6 +7362,12 @@ static struct cftype memory_files[] = { .flags = CFTYPE_NOT_ON_ROOT, .seq_show = memory_wmark_low_show, }, + { + .name = "wmark_scale_factor", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = memory_wmark_scale_factor_show, + .write = memory_wmark_scale_factor_write, + }, { .name = "events", .flags = CFTYPE_NOT_ON_ROOT, -- Gitee From 3a8bd7e8ef228474daa9ddacb68c182c5cb9f5f7 Mon Sep 17 00:00:00 2001 From: zhongjiang-ali Date: Mon, 1 Feb 2021 12:00:23 +0800 Subject: [PATCH 1839/2138] anolis: mm: count the memory pressure when wmark meets. ANBZ: #11973 to #32655467 It will reclaim the memory since we introduce the memcg kswapd, hence the memory pressure should be counted. Reviewed-by: Xunlei Pang Signed-off-by: zhongjiang-ali Signed-off-by: Kun(llfl) Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4154 --- mm/memcontrol.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 2262494a2c0c..89b56d0af77c 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2560,6 +2560,7 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu) static void reclaim_wmark(struct mem_cgroup *memcg) { long nr_pages; + unsigned long pflags; if (is_wmark_ok(memcg, false)) return; @@ -2571,7 +2572,9 @@ static void reclaim_wmark(struct mem_cgroup *memcg) nr_pages = max_t(unsigned long, SWAP_CLUSTER_MAX, nr_pages); + psi_memstall_enter(&pflags); try_to_free_mem_cgroup_pages(memcg, nr_pages, GFP_KERNEL, true); + psi_memstall_leave(&pflags); } static void wmark_work_func(struct work_struct *work) -- Gitee From 01e58a1e13869ccd6b91f05fa71dd6b3caf32811 Mon Sep 17 00:00:00 2001 From: Xu Yu Date: Sun, 1 Sep 2019 19:10:49 +0800 Subject: [PATCH 1840/2138] anolis: mm,memcg: record latency of memcg wmark reclaim ANBZ: #11973 to #32655467 The memcg background async page reclaim, a.k.a, memcg kswapd, is implemented with a dedicated unbound workqueue in 4.19, eliminating original kthreads. However, memcg kswapd will run too frequently, resulting in high overhead, page cache thrashing, frequent dirty page writeback, etc., due to improper memcg memory.wmark_ratio, unreasonable memcg memor capacity, or even abnormal memcg memory usage. We need to find out the problematic memcg(s) where memcg kswapd introduces significant overhead. This records the latency of each run of memcg kswapd work, and then aggregates into the exstat of per memcg. Signed-off-by: Xu Yu Reviewed-by: Xunlei Pang Signed-off-by: zhongjiang-ali Signed-off-by: Kun(llfl) Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4154 --- include/linux/memcontrol.h | 13 +++++++++++ mm/memcontrol.c | 47 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 93c65d1e3c33..21a898238c1a 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -43,6 +43,16 @@ enum memcg_stat_item { MEMCG_NR_STAT, }; +enum memcg_exstat_item { + MEMCG_WMARK_RECLAIM, + MEMCG_NR_EXSTAT, +}; + +/* Only care about 64bit using "long" */ +struct mem_cgroup_exstat_cpu { + unsigned long item[MEMCG_NR_EXSTAT]; +}; + enum memcg_memory_event { MEMCG_LOW, MEMCG_HIGH, @@ -312,6 +322,9 @@ struct mem_cgroup { bool tcpmem_active; int tcpmem_pressure; + /* memory.exstat */ + struct mem_cgroup_exstat_cpu __percpu *exstat_cpu; + unsigned int wmark_ratio; struct work_struct wmark_work; unsigned int wmark_scale_factor; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 89b56d0af77c..13d1e667e027 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2561,6 +2561,8 @@ static void reclaim_wmark(struct mem_cgroup *memcg) { long nr_pages; unsigned long pflags; + struct mem_cgroup *iter; + u64 start, duration; if (is_wmark_ok(memcg, false)) return; @@ -2572,9 +2574,23 @@ static void reclaim_wmark(struct mem_cgroup *memcg) nr_pages = max_t(unsigned long, SWAP_CLUSTER_MAX, nr_pages); + /* + * Typically, we would like to record the actual cpu% of reclaim_wmark + * work, excluding any sleep/resched time. However, currently we just + * simply record the whole duration of reclaim_wmark work for the + * overhead-accuracy trade-off. + */ + start = ktime_get_ns(); psi_memstall_enter(&pflags); try_to_free_mem_cgroup_pages(memcg, nr_pages, GFP_KERNEL, true); psi_memstall_leave(&pflags); + duration = ktime_get_ns() - start; + + css_get(&memcg->css); + for (iter = memcg; iter; iter = parent_mem_cgroup(iter)) + this_cpu_add(iter->exstat_cpu->item[MEMCG_WMARK_RECLAIM], + duration); + css_put(&memcg->css); } static void wmark_work_func(struct work_struct *work) @@ -4448,6 +4464,28 @@ static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) #endif } +static u64 memcg_exstat_gather(struct mem_cgroup *memcg, + enum memcg_exstat_item idx) +{ + u64 sum = 0; + int cpu; + + for_each_online_cpu(cpu) + sum += per_cpu_ptr(memcg->exstat_cpu, cpu)->item[idx]; + + return sum; +} + +static int memcg_exstat_show(struct seq_file *m, void *v) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); + + seq_printf(m, "wmark_reclaim_work_ms %llu\n", + memcg_exstat_gather(memcg, MEMCG_WMARK_RECLAIM) >> 20); + + return 0; +} + static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css, struct cftype *cft) { @@ -5496,6 +5534,10 @@ static struct cftype mem_cgroup_legacy_files[] = { .name = "stat", .seq_show = memory_stat_show, }, + { + .name = "exstat", + .seq_show = memcg_exstat_show, + }, { .name = "wmark_ratio", .flags = CFTYPE_NOT_ON_ROOT, @@ -5793,6 +5835,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg) free_mem_cgroup_per_node_info(memcg, node); kfree(memcg->vmstats); free_percpu(memcg->vmstats_percpu); + free_percpu(memcg->exstat_cpu); kfree(memcg); } @@ -5829,6 +5872,10 @@ static struct mem_cgroup *mem_cgroup_alloc(void) if (!memcg->vmstats_percpu) goto fail; + memcg->exstat_cpu = alloc_percpu(struct mem_cgroup_exstat_cpu); + if (!memcg->exstat_cpu) + goto fail; + for_each_node(node) if (alloc_mem_cgroup_per_node_info(memcg, node)) goto fail; -- Gitee From 6ecf42d2b6dcd2c779020a0fcb5aad68aae6fcfe Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Tue, 3 Dec 2024 16:54:52 +0800 Subject: [PATCH 1841/2138] brd: implement discard support ANBZ: #12193 commit 9ead7efc6f3f2b46c4ec68209bca4888cfbd4c19 upstream. The ramdisk memory utilization can only go up when data is written to new pages. Implement discard to provide the possibility to reduce memory usage for pages no longer in use. Aligned discards will free the associated pages, if any, and determinisitically return zeroed data until written again. Signed-off-by: Keith Busch Link: https://lore.kernel.org/r/20240429102308.147627-1-kbusch@meta.com Signed-off-by: Jens Axboe Signed-off-by: Zhang Xianwei Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4190 --- drivers/block/brd.c | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/drivers/block/brd.c b/drivers/block/brd.c index e7be237c90d3..995968456bf0 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -243,6 +243,23 @@ static int brd_do_bvec(struct brd_device *brd, struct page *page, return err; } +static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size) +{ + sector_t aligned_sector = (sector + PAGE_SECTORS) & ~PAGE_SECTORS; + struct page *page; + + size -= (aligned_sector - sector) * SECTOR_SIZE; + xa_lock(&brd->brd_pages); + while (size >= PAGE_SIZE && aligned_sector < rd_size * 2) { + page = __xa_erase(&brd->brd_pages, aligned_sector >> PAGE_SECTORS_SHIFT); + if (page) + __free_page(page); + aligned_sector += PAGE_SECTORS; + size -= PAGE_SIZE; + } + xa_unlock(&brd->brd_pages); +} + static void brd_submit_bio(struct bio *bio) { struct brd_device *brd = bio->bi_bdev->bd_disk->private_data; @@ -250,6 +267,12 @@ static void brd_submit_bio(struct bio *bio) struct bio_vec bvec; struct bvec_iter iter; + if (unlikely(op_is_discard(bio->bi_opf))) { + brd_do_discard(brd, sector, bio->bi_iter.bi_size); + bio_endio(bio); + return; + } + bio_for_each_segment(bvec, bio, iter) { unsigned int len = bvec.bv_len; int err; @@ -376,7 +399,7 @@ static int brd_alloc(int i) disk->private_data = brd; strscpy(disk->disk_name, buf, DISK_NAME_LEN); set_capacity(disk, rd_size * 2); - + /* * This is so fdisk will align partitions on 4k, because of * direct_access API needing 4k alignment, returning a PFN @@ -385,6 +408,9 @@ static int brd_alloc(int i) * is harmless) */ blk_queue_physical_block_size(disk->queue, PAGE_SIZE); + blk_queue_max_discard_sectors(disk->queue, UINT_MAX); + disk->queue->limits.discard_granularity = PAGE_SIZE; + blk_queue_max_discard_segments(disk->queue, 1); /* Tell the block layer that this is not a rotational device */ blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); -- Gitee From 6c4203996ab1e03b85e3639e9ecf624621d842a9 Mon Sep 17 00:00:00 2001 From: Zhang Xianwei Date: Wed, 4 Dec 2024 10:39:47 +0800 Subject: [PATCH 1842/2138] brd: decrease the number of allocated pages which discarded ANBZ: #12191 commit 82734209bedd65a8b508844bab652b464379bfdd upstream. The number of allocated pages which discarded will not decrease. Fix it. Fixes: 9ead7efc6f3f ("brd: implement discard support") Signed-off-by: Zhang Xianwei Reviewed-by: Ming Lei Link: https://lore.kernel.org/r/20241128170056565nPKSz2vsP8K8X2uk2iaDG@zte.com.cn Signed-off-by: Jens Axboe Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4193 --- drivers/block/brd.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 995968456bf0..d69b96d01316 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -252,8 +252,10 @@ static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size) xa_lock(&brd->brd_pages); while (size >= PAGE_SIZE && aligned_sector < rd_size * 2) { page = __xa_erase(&brd->brd_pages, aligned_sector >> PAGE_SECTORS_SHIFT); - if (page) + if (page) { __free_page(page); + brd->brd_nr_pages--; + } aligned_sector += PAGE_SECTORS; size -= PAGE_SIZE; } -- Gitee From 1f4839a4d84b324f0241c5580154f51a581d2716 Mon Sep 17 00:00:00 2001 From: Kaihao Bai Date: Fri, 6 Dec 2024 10:16:44 +0800 Subject: [PATCH 1843/2138] anolis: Revert "anolis: arm64: replace can_set_direct_map by splitting linear mapping" ANBZ: #12240 This reverts commit 28ced0af1ad5b4e971ad86badfdffe63ac4bf18f. Commit '3d5121bfa7f0 (" anolis: arm64: replace can_set_direct_map by splitting linear mapping")' replaced the function "can_set_direct_map" with "can_set_block_and_cont_map". However, the function can_set_block_and_cont_map was only defined and implemented for ARM64 architecture, not in the general path. Meanwhile, commit '7caf966390e6 ("secretmem: disable memfd_secret() if arch cannot set direct map")' utilized "can_set_direct_map" within the syscall "memfd_secret", which resulted in a compilation failure. Just fix it. [Fixes conflicts] Signed-off-by: Kaihao Bai Signed-off-by: Qinyun Tan Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4197 --- arch/arm64/include/asm/kfence.h | 11 ++++++++--- arch/arm64/include/asm/set_memory.h | 3 +++ arch/arm64/mm/pageattr.c | 28 +++++++++++++++++++++------- include/linux/set_memory.h | 12 ++++++++++++ 4 files changed, 44 insertions(+), 10 deletions(-) diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h index 44994e2a6d88..786d62839961 100644 --- a/arch/arm64/include/asm/kfence.h +++ b/arch/arm64/include/asm/kfence.h @@ -8,13 +8,11 @@ #ifndef __ASM_KFENCE_H #define __ASM_KFENCE_H -#ifdef CONFIG_KFENCE #include #include -extern bool kfence_early_init; - +#ifdef CONFIG_KFENCE static inline bool arch_kfence_init_pool(struct kfence_pool_area *kpa) { unsigned long addr = (unsigned long)kpa->addr; @@ -40,6 +38,13 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect) static inline bool arch_kfence_free_pool(unsigned long addr) { return false; } +extern bool kfence_early_init; +static inline bool arm64_kfence_can_set_direct_map(void) +{ + return !kfence_early_init; +} +#else /* CONFIG_KFENCE */ +static inline bool arm64_kfence_can_set_direct_map(void) { return false; } #endif /* CONFIG_KFENCE */ #endif /* __ASM_KFENCE_H */ diff --git a/arch/arm64/include/asm/set_memory.h b/arch/arm64/include/asm/set_memory.h index 3f5d866b98d0..20fb7b1d5423 100644 --- a/arch/arm64/include/asm/set_memory.h +++ b/arch/arm64/include/asm/set_memory.h @@ -5,6 +5,9 @@ #include +bool can_set_direct_map(void); +#define can_set_direct_map can_set_direct_map + bool can_set_block_and_cont_map(void); int set_memory_valid(unsigned long addr, int numpages, int enable); diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 32ed0e6bb611..51fb46822583 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -20,6 +20,19 @@ struct page_change_data { bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED); +bool can_set_direct_map(void) +{ + /* + * rodata_full and DEBUG_PAGEALLOC require linear map to be + * mapped at page granularity, so that it is possible to + * protect/unprotect single pages. + * + * KFENCE pool requires page-granular mapping if initialized late. + */ + return rodata_full || debug_pagealloc_enabled() || + arm64_kfence_can_set_direct_map(); +} + /* * If rodata_full is enabled, the mapping of linear mapping range can also be * block & cont mapping, here decouples the rodata_full and debug_pagealloc. @@ -187,9 +200,8 @@ int set_direct_map_invalid_noflush(struct page *page) .clear_mask = __pgprot(PTE_VALID), }; - if (can_set_block_and_cont_map()) - split_linear_mapping_after_init((unsigned long)page_address(page), - PAGE_SIZE, PAGE_KERNEL); + if (!can_set_direct_map()) + return 0; return apply_to_page_range(&init_mm, (unsigned long)page_address(page), @@ -203,9 +215,8 @@ int set_direct_map_default_noflush(struct page *page) .clear_mask = __pgprot(PTE_RDONLY), }; - if (can_set_block_and_cont_map()) - split_linear_mapping_after_init((unsigned long)page_address(page), - PAGE_SIZE, PAGE_KERNEL); + if (!can_set_direct_map()) + return 0; return apply_to_page_range(&init_mm, (unsigned long)page_address(page), @@ -215,7 +226,7 @@ int set_direct_map_default_noflush(struct page *page) #ifdef CONFIG_DEBUG_PAGEALLOC void __kernel_map_pages(struct page *page, int numpages, int enable) { - if (can_set_block_and_cont_map()) + if (!can_set_direct_map()) return; set_memory_valid((unsigned long)page_address(page), numpages, enable); @@ -239,6 +250,9 @@ bool kernel_page_present(struct page *page) pte_t *ptep; unsigned long addr = (unsigned long)page_address(page); + if (!can_set_direct_map()) + return true; + pgdp = pgd_offset_k(addr); if (pgd_none(READ_ONCE(*pgdp))) return false; diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h index e77a3345d20b..ed93371c944c 100644 --- a/include/linux/set_memory.h +++ b/include/linux/set_memory.h @@ -39,6 +39,18 @@ static inline bool kernel_page_present(struct page *page) { return true; } +#else /* CONFIG_ARCH_HAS_SET_DIRECT_MAP */ +/* + * Some architectures, e.g. ARM64 can disable direct map modifications at + * boot time. Let them overrive this query. + */ +#ifndef can_set_direct_map +static inline bool can_set_direct_map(void) +{ + return true; +} +#define can_set_direct_map can_set_direct_map +#endif #endif /* CONFIG_ARCH_HAS_SET_DIRECT_MAP */ #ifdef CONFIG_X86_64 -- Gitee From 27fee2c9a88b6375360bb8ae816e415cf41fcc3e Mon Sep 17 00:00:00 2001 From: Kaihao Bai Date: Fri, 6 Dec 2024 10:36:41 +0800 Subject: [PATCH 1844/2138] anolis: arm64: avoid rodata_full splitting if secretmem used ANBZ: #12240 If secretmem used, the possible splitting behavior might infect the performance. Thus avoid to maintain the block and cont mapping if rodota_full is enabled. Signed-off-by: Kaihao Bai Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4197 --- arch/arm64/mm/pageattr.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 51fb46822583..a756056bbd0a 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -26,20 +26,17 @@ bool can_set_direct_map(void) * rodata_full and DEBUG_PAGEALLOC require linear map to be * mapped at page granularity, so that it is possible to * protect/unprotect single pages. - * - * KFENCE pool requires page-granular mapping if initialized late. */ - return rodata_full || debug_pagealloc_enabled() || - arm64_kfence_can_set_direct_map(); + return rodata_full || debug_pagealloc_enabled(); } /* - * If rodata_full is enabled, the mapping of linear mapping range can also be - * block & cont mapping, here decouples the rodata_full and debug_pagealloc. + * If rodata_full is enabled, the mapping of linear mapping range can not be + * block & cont mapping, here combines the rodata_full and debug_pagealloc. */ bool can_set_block_and_cont_map(void) { - return !debug_pagealloc_enabled(); + return !rodata_full && !debug_pagealloc_enabled(); } static int change_page_range(pte_t *ptep, unsigned long addr, void *data) -- Gitee From da2a2cea50c32d9be5f9e074bfedda60a4299f7d Mon Sep 17 00:00:00 2001 From: Kaihao Bai Date: Fri, 6 Dec 2024 10:41:52 +0800 Subject: [PATCH 1845/2138] anolis: crypto: sm4-zhaoxin-gmi: fix a compilation error ANBZ: #12240 The commit 'b60d2bc676e4 ("crypto: simd - Avoid calling crypto_alloc_tfm during registration")' modified the definition of the function simd_skcipher_create_compat, which caused a compilation failure. This commit addresses and fixes the issue. Signed-off-by: Qinyun Tan Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4197 --- arch/x86/crypto/sm4-zhaoxin-gmi.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/crypto/sm4-zhaoxin-gmi.c b/arch/x86/crypto/sm4-zhaoxin-gmi.c index 5a3695fcf647..288bae293607 100644 --- a/arch/x86/crypto/sm4-zhaoxin-gmi.c +++ b/arch/x86/crypto/sm4-zhaoxin-gmi.c @@ -784,7 +784,8 @@ static int __init gmi_sm4_init(void) algname = sm4_algs[i].base.cra_name + 2; drvname = sm4_algs[i].base.cra_driver_name + 2; basename = sm4_algs[i].base.cra_driver_name; - simd = simd_skcipher_create_compat(algname, drvname, basename); + simd = simd_skcipher_create_compat(sm4_algs + i, algname, + drvname, basename); err = PTR_ERR(simd); if (IS_ERR(simd)) goto unregister_simds; -- Gitee From 09e8309be845b22f41c043f034b01427613a4b34 Mon Sep 17 00:00:00 2001 From: Qinyun Tan Date: Fri, 6 Dec 2024 16:51:35 +0800 Subject: [PATCH 1846/2138] anolis: kconfig: temporarily disable checks for CONFIG_ARM64_SME. ANBZ: #12240 In commit '22aaaa7a1a73 ("arm64: Kconfig: Make SME depend on BROKEN for now")'. Due to the issues identified with the implementation of SME, the upstream has currently marked the SME Kconfig as BROKEN. This action is taken with the intention of re-enabling the Kconfig once these issues are resolved in the future. Despite this temporary change, the SME Kconfig remains a Level 0 configuration that we need to monitor. For now, we will disregard checks related to this Kconfig. Once the relevant patches have been merged and the issues are resolved, we will resume the checks and re-enable the Kconfig accordingly. Signed-off-by: Qinyun Tan Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/4203 --- anolis/configs/examination/L0-MANDATORY/arm64.config | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/anolis/configs/examination/L0-MANDATORY/arm64.config b/anolis/configs/examination/L0-MANDATORY/arm64.config index eab0cbcff378..58414684fe3d 100644 --- a/anolis/configs/examination/L0-MANDATORY/arm64.config +++ b/anolis/configs/examination/L0-MANDATORY/arm64.config @@ -22,7 +22,15 @@ CONFIG_ARM64_HW_AFDBM=y CONFIG_ARM64_PAN=y CONFIG_ARM64_PSEUDO_NMI=y CONFIG_ARM64_RAS_EXTN=y -CONFIG_ARM64_SME=y +# UNLIMITED CONFIG_ARM64_SME +## (https://lore.kernel.org/all/20241106164220.2789279-1-mark.rutland@arm.com/) +## Due to the issues identified with the implementation of CONFIG_ARM64_SME, the upstream has +## currently marked the SME Kconfig as BROKEN. This action is taken with the intention of +## re-enabling the Kconfig once these issues are resolved in the future. Despite this temporary +## change, the SME Kconfig remains a Level 0 (L0) configuration that we need to monitor. +## For now, we will disregard checks related to this Kconfig. Once the relevant patches have +## been merged and the issues are resolved, we will resume the checks and re-enable the +## Kconfig accordingly. CONFIG_ARM64_SVE=y CONFIG_ARM_CCN=y CONFIG_ARM_GIC=y -- Gitee From c95d09ec4bad62356b0ad3c149c287e6433bc0b1 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sat, 20 Apr 2024 03:50:06 +0100 Subject: [PATCH 1847/2138] fuse: Convert fuse_readpages_end() to use folio_end_read() ANBZ: #12255 commit 413e8f014c8b848e4ce939156f210df59fbd1c24 upstream. Nobody checks the error flag on fuse folios, so stop setting it. Optimise the (optional) setting of the uptodate flag and clearing of the lock flag by using folio_end_read(). Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4202 --- fs/fuse/file.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 7ba75b6e6ad9..4ed4d9683c70 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -942,14 +942,10 @@ static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args, } for (i = 0; i < ap->num_pages; i++) { - struct page *page = ap->pages[i]; + struct folio *folio = page_folio(ap->pages[i]); - if (!err) - SetPageUptodate(page); - else - SetPageError(page); - unlock_page(page); - put_page(page); + folio_end_read(folio, !err); + folio_put(folio); } if (ia->ff) fuse_file_put(ia->ff, false); -- Gitee From ab1123325fe6910608380507e3a8c31ad3c538b0 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Wed, 28 Aug 2024 15:55:17 +0200 Subject: [PATCH 1848/2138] fuse: clear PG_uptodate when using a stolen page MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12255 commit 76a51ac00ca2a72fe3e168b7fb0e70f75ba6f512 upstream. Originally when a stolen page was inserted into fuse's page cache by fuse_try_move_page(), it would be marked uptodate. Then fuse_readpages_end() would call SetPageUptodate() again on the already uptodate page. Commit 413e8f014c8b ("fuse: Convert fuse_readpages_end() to use folio_end_read()") changed that by replacing the SetPageUptodate() + unlock_page() combination with folio_end_read(), which does mostly the same, except it sets the uptodate flag with an xor operation, which in the above scenario resulted in the uptodate flag being cleared, which in turn resulted in EIO being returned on the read. Fix by clearing PG_uptodate instead of setting it in fuse_try_move_page(), conforming to the expectation of folio_end_read(). Reported-by: Jürg Billeter Debugged-by: Matthew Wilcox Fixes: 413e8f014c8b ("fuse: Convert fuse_readpages_end() to use folio_end_read()") Cc: # v6.10 Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4202 --- fs/fuse/dev.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 42d167ceceac..cedc56d37a89 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -797,7 +797,6 @@ static int fuse_check_folio(struct folio *folio) (folio->flags & PAGE_FLAGS_CHECK_AT_PREP & ~(1 << PG_locked | 1 << PG_referenced | - 1 << PG_uptodate | 1 << PG_lru | 1 << PG_active | 1 << PG_workingset | @@ -842,9 +841,7 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) newfolio = page_folio(buf->page); - if (!folio_test_uptodate(newfolio)) - folio_mark_uptodate(newfolio); - + folio_clear_uptodate(newfolio); folio_clear_mappedtodisk(newfolio); if (fuse_check_folio(newfolio) != 0) -- Gitee From f9b765a9b03732931d77b04dfbc5e788315aa102 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 10 Jul 2024 16:42:35 -0400 Subject: [PATCH 1849/2138] fuse: Convert fuse_write_end() to use a folio ANBZ: #12255 commit 556d0ac068d71b78c309d7444357df4fa55f594e upstream. Convert the passed page to a folio and operate on that. Replaces five calls to compound_head() with one. Reviewed-by: Josef Bacik Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Christian Brauner Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4202 --- fs/fuse/file.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 4ed4d9683c70..f53030bf5c23 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -2439,29 +2439,30 @@ static int fuse_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { - struct inode *inode = page->mapping->host; + struct folio *folio = page_folio(page); + struct inode *inode = folio->mapping->host; /* Haven't copied anything? Skip zeroing, size extending, dirtying. */ if (!copied) goto unlock; pos += copied; - if (!PageUptodate(page)) { + if (!folio_test_uptodate(folio)) { /* Zero any unwritten bytes at the end of the page */ size_t endoff = pos & ~PAGE_MASK; if (endoff) - zero_user_segment(page, endoff, PAGE_SIZE); - SetPageUptodate(page); + folio_zero_segment(folio, endoff, PAGE_SIZE); + folio_mark_uptodate(folio); } if (pos > inode->i_size) i_size_write(inode, pos); - set_page_dirty(page); + folio_mark_dirty(folio); unlock: - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); return copied; } -- Gitee From 1d1ec33ada7607373009f649f2f60c7956d4436f Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 11 Jul 2024 16:58:06 -0400 Subject: [PATCH 1850/2138] fuse: Convert fuse_write_begin() to use a folio ANBZ: #12255 commit a060d835cf76605fbb784f1285a6d40e9239c436 upstream. Fetch a folio from the page cache instead of a page and use it throughout removing several calls to compound_head() and supporting large folios (in this function). We still have to convert back to a page for calling internal fuse functions, but hopefully they will be converted soon. Reviewed-by: Josef Bacik Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Christian Brauner Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4202 --- fs/fuse/file.c | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index f53030bf5c23..ae77a260e841 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -2396,41 +2396,42 @@ static int fuse_write_begin(struct file *file, struct address_space *mapping, { pgoff_t index = pos >> PAGE_SHIFT; struct fuse_conn *fc = get_fuse_conn(file_inode(file)); - struct page *page; + struct folio *folio; loff_t fsize; int err = -ENOMEM; WARN_ON(!fc->writeback_cache); - page = grab_cache_page_write_begin(mapping, index); - if (!page) + folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN, + mapping_gfp_mask(mapping)); + if (IS_ERR(folio)) goto error; - fuse_wait_on_page_writeback(mapping->host, page->index); + fuse_wait_on_page_writeback(mapping->host, folio->index); - if (PageUptodate(page) || len == PAGE_SIZE) + if (folio_test_uptodate(folio) || len >= folio_size(folio)) goto success; /* - * Check if the start this page comes after the end of file, in which - * case the readpage can be optimized away. + * Check if the start of this folio comes after the end of file, + * in which case the readpage can be optimized away. */ fsize = i_size_read(mapping->host); - if (fsize <= (pos & PAGE_MASK)) { - size_t off = pos & ~PAGE_MASK; + if (fsize <= folio_pos(folio)) { + size_t off = offset_in_folio(folio, pos); if (off) - zero_user_segment(page, 0, off); + folio_zero_segment(folio, 0, off); goto success; } - err = fuse_do_readpage(file, page); + err = fuse_do_readpage(file, &folio->page); if (err) goto cleanup; success: - *pagep = page; + *pagep = &folio->page; return 0; cleanup: - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); error: return err; } -- Gitee From b4fd5075631db507360e3c192229c99f868fa779 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 30 Sep 2024 09:45:09 -0400 Subject: [PATCH 1851/2138] fuse: use fuse_range_is_writeback() instead of iterating pages ANBZ: #12255 commit aaa32429da09a9afa0f54a197733d757334ed169 upstream. fuse_send_readpages() waits for writeback on each page. This can be replaced by a single call to fuse_range_is_writeback(). [SzM: split this off from "fuse: convert readahead to use folios"] Signed-off-by: Josef Bacik Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4202 --- fs/fuse/file.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index ae77a260e841..24d33fac45f7 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -992,12 +992,17 @@ static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file) static void fuse_readahead(struct readahead_control *rac) { struct inode *inode = rac->mapping->host; + struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_conn *fc = get_fuse_conn(inode); unsigned int i, max_pages, nr_pages = 0; + pgoff_t first = readahead_index(rac); + pgoff_t last = first + readahead_count(rac) - 1; if (fuse_is_bad(inode)) return; + wait_event(fi->page_waitq, !fuse_range_is_writeback(inode, first, last)); + max_pages = min_t(unsigned int, fc->max_pages, fc->max_read / PAGE_SIZE); @@ -1024,8 +1029,6 @@ static void fuse_readahead(struct readahead_control *rac) ap = &ia->ap; nr_pages = __readahead_batch(rac, ap->pages, nr_pages); for (i = 0; i < nr_pages; i++) { - fuse_wait_on_page_writeback(inode, - readahead_index(rac) + i); ap->descs[i].length = PAGE_SIZE; } ap->num_pages = nr_pages; -- Gitee From 6fe2fa5301ac929b6f58508cbe5c14771821b2cd Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 30 Sep 2024 09:45:09 -0400 Subject: [PATCH 1852/2138] fuse: convert readahead to use folios ANBZ: #12255 commit 3eab9d7bc2f4ae7f3f9c9c7852ff61600df79856 upstream. Currently we're using the __readahead_batch() helper which populates our fuse_args_pages->pages array with pages. Convert this to use the newer folio based pattern which is to call readahead_folio() to get the next folio in the read ahead batch. I've updated the code to use things like folio_size() and to take into account larger folio sizes, but this is purely to make that eventual work easier to do, we currently will not get large folios so this is more future proofing than actual support. [SzM: remove check for readahead_folio() won't return NULL (at least for now) so remove ugly assign in conditional.] Signed-off-by: Josef Bacik Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4202 --- fs/fuse/file.c | 36 +++++++++++++++++++++++------------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 24d33fac45f7..ff21f259c008 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -945,7 +945,6 @@ static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args, struct folio *folio = page_folio(ap->pages[i]); folio_end_read(folio, !err); - folio_put(folio); } if (ia->ff) fuse_file_put(ia->ff, false); @@ -994,7 +993,7 @@ static void fuse_readahead(struct readahead_control *rac) struct inode *inode = rac->mapping->host; struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_conn *fc = get_fuse_conn(inode); - unsigned int i, max_pages, nr_pages = 0; + unsigned int max_pages, nr_pages; pgoff_t first = readahead_index(rac); pgoff_t last = first + readahead_count(rac) - 1; @@ -1006,9 +1005,22 @@ static void fuse_readahead(struct readahead_control *rac) max_pages = min_t(unsigned int, fc->max_pages, fc->max_read / PAGE_SIZE); - for (;;) { + /* + * This is only accurate the first time through, since readahead_folio() + * doesn't update readahead_count() from the previous folio until the + * next call. Grab nr_pages here so we know how many pages we're going + * to have to process. This means that we will exit here with + * readahead_count() == folio_nr_pages(last_folio), but we will have + * consumed all of the folios, and read_pages() will call + * readahead_folio() again which will clean up the rac. + */ + nr_pages = readahead_count(rac); + + while (nr_pages) { struct fuse_io_args *ia; struct fuse_args_pages *ap; + struct folio *folio; + unsigned cur_pages = min(max_pages, nr_pages); if (fc->num_background >= fc->congestion_threshold && rac->ra->async_size >= readahead_count(rac)) @@ -1018,21 +1030,19 @@ static void fuse_readahead(struct readahead_control *rac) */ break; - nr_pages = readahead_count(rac) - nr_pages; - if (nr_pages > max_pages) - nr_pages = max_pages; - if (nr_pages == 0) - break; - ia = fuse_io_alloc(NULL, nr_pages); + ia = fuse_io_alloc(NULL, cur_pages); if (!ia) return; ap = &ia->ap; - nr_pages = __readahead_batch(rac, ap->pages, nr_pages); - for (i = 0; i < nr_pages; i++) { - ap->descs[i].length = PAGE_SIZE; + + while (ap->num_pages < cur_pages) { + folio = readahead_folio(rac); + ap->pages[ap->num_pages] = &folio->page; + ap->descs[ap->num_pages].length = folio_size(folio); + ap->num_pages++; } - ap->num_pages = nr_pages; fuse_send_readpages(ia, rac->file); + nr_pages -= cur_pages; } } -- Gitee From 90acdb3c9bc57210c9b6fb9e972f9dc8c36e3416 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 30 Sep 2024 09:45:10 -0400 Subject: [PATCH 1853/2138] fuse: convert fuse_send_write_pages to use folios ANBZ: #12255 commit 785d06afc840922cced0c4e90f99209210dd6bd9 upstream. Convert this to grab the folio from the fuse_args_pages and use the appropriate folio related functions. Reviewed-by: Matthew Wilcox (Oracle) Reviewed-by: Joanne Koong Signed-off-by: Josef Bacik Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4202 --- fs/fuse/file.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index ff21f259c008..7f90cc81e5a8 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1175,23 +1175,23 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia, offset = ap->descs[0].offset; count = ia->write.out.size; for (i = 0; i < ap->num_pages; i++) { - struct page *page = ap->pages[i]; + struct folio *folio = page_folio(ap->pages[i]); if (err) { - ClearPageUptodate(page); + folio_clear_uptodate(folio); } else { - if (count >= PAGE_SIZE - offset) - count -= PAGE_SIZE - offset; + if (count >= folio_size(folio) - offset) + count -= folio_size(folio) - offset; else { if (short_write) - ClearPageUptodate(page); + folio_clear_uptodate(folio); count = 0; } offset = 0; } if (ia->write.page_locked && (i == ap->num_pages - 1)) - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); } return err; -- Gitee From b9f16e49841835a99d8cc33f7af23d61f225d9fc Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 30 Sep 2024 09:45:11 -0400 Subject: [PATCH 1854/2138] fuse: convert fuse_fill_write_pages to use folios ANBZ: #12255 commit 9bafbe7ae01321eb1345daf0975355f890c975cf upstream. Convert this to grab the folio directly, and update all the helpers to use the folio related functions. Reviewed-by: Joanne Koong Signed-off-by: Josef Bacik Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4202 --- fs/fuse/file.c | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 7f90cc81e5a8..b7a3eedcadc6 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1213,7 +1213,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia, do { size_t tmp; - struct page *page; + struct folio *folio; pgoff_t index = pos >> PAGE_SHIFT; size_t bytes = min_t(size_t, PAGE_SIZE - offset, iov_iter_count(ii)); @@ -1225,25 +1225,27 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia, if (fault_in_iov_iter_readable(ii, bytes)) break; - err = -ENOMEM; - page = grab_cache_page_write_begin(mapping, index); - if (!page) + folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN, + mapping_gfp_mask(mapping)); + if (IS_ERR(folio)) { + err = PTR_ERR(folio); break; + } if (mapping_writably_mapped(mapping)) - flush_dcache_page(page); + flush_dcache_folio(folio); - tmp = copy_page_from_iter_atomic(page, offset, bytes, ii); - flush_dcache_page(page); + tmp = copy_folio_from_iter_atomic(folio, offset, bytes, ii); + flush_dcache_folio(folio); if (!tmp) { - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); goto again; } err = 0; - ap->pages[ap->num_pages] = page; + ap->pages[ap->num_pages] = &folio->page; ap->descs[ap->num_pages].length = tmp; ap->num_pages++; @@ -1255,10 +1257,10 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia, /* If we copied full page, mark it uptodate */ if (tmp == PAGE_SIZE) - SetPageUptodate(page); + folio_mark_uptodate(folio); - if (PageUptodate(page)) { - unlock_page(page); + if (folio_test_uptodate(folio)) { + folio_unlock(folio); } else { ia->write.page_locked = true; break; -- Gitee From 8b398c5bbd4e27cbae6a566e0dbee664f6649590 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 30 Sep 2024 09:45:12 -0400 Subject: [PATCH 1855/2138] fuse: convert fuse_page_mkwrite to use folios ANBZ: #12255 commit 184b6eb3645ad9e0e5ea8a1ac9e6a4fd501a4b45 upstream. Convert this to grab the folio directly, and update all the helpers to use the folio related functions. Reviewed-by: Joanne Koong Signed-off-by: Josef Bacik Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4202 --- fs/fuse/file.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index b7a3eedcadc6..c5053f106825 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -483,6 +483,16 @@ static void fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index) wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index)); } +static void fuse_wait_on_folio_writeback(struct inode *inode, + struct folio *folio) +{ + struct fuse_inode *fi = get_fuse_inode(inode); + pgoff_t last = folio_next_index(folio) - 1; + + wait_event(fi->page_waitq, + !fuse_range_is_writeback(inode, folio_index(folio), last)); +} + /* * Wait for all pending writepages on the inode to finish. * @@ -2527,17 +2537,17 @@ static void fuse_vma_close(struct vm_area_struct *vma) */ static vm_fault_t fuse_page_mkwrite(struct vm_fault *vmf) { - struct page *page = vmf->page; + struct folio *folio = page_folio(vmf->page); struct inode *inode = file_inode(vmf->vma->vm_file); file_update_time(vmf->vma->vm_file); - lock_page(page); - if (page->mapping != inode->i_mapping) { - unlock_page(page); + folio_lock(folio); + if (folio->mapping != inode->i_mapping) { + folio_unlock(folio); return VM_FAULT_NOPAGE; } - fuse_wait_on_page_writeback(inode, page->index); + fuse_wait_on_folio_writeback(inode, folio); return VM_FAULT_LOCKED; } -- Gitee From bd2357bc241954ec124345b3179dfaeecae78c8d Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 30 Sep 2024 09:45:13 -0400 Subject: [PATCH 1856/2138] fuse: use kiocb_modified in buffered write path ANBZ: #12255 commit e6befec5e901e06dd6c7c456a4e20d2529efb014 upstream. This combines the file_remove_privs() and file_update_time() call into one call. Signed-off-by: Josef Bacik Reviewed-by: Bernd Schubert Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4202 --- fs/fuse/file.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index c5053f106825..3caa24641c2b 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1461,11 +1461,7 @@ static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from) task_io_account_write(count); - err = file_remove_privs(file); - if (err) - goto out; - - err = file_update_time(file); + err = kiocb_modified(iocb); if (err) goto out; -- Gitee From b8759fb56611bb620158fdccd1ae8e3b8f141adf Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 30 Sep 2024 09:45:14 -0400 Subject: [PATCH 1857/2138] fuse: convert fuse_do_readpage to use folios ANBZ: #12255 commit 65fe891d9005a41de2fccfd5ced3c0bf6f1e3bcd upstream. Now that the buffered write path is using folios, convert fuse_do_readpage() to take a folio instead of a page, update it to use the appropriate folio helpers, and update the callers to pass in the folio directly instead of a page. Signed-off-by: Josef Bacik Reviewed-by: Joanne Koong Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4202 --- fs/fuse/file.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 3caa24641c2b..3986fa2a869f 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -865,12 +865,13 @@ static void fuse_short_read(struct inode *inode, u64 attr_ver, size_t num_read, } } -static int fuse_do_readpage(struct file *file, struct page *page) +static int fuse_do_readfolio(struct file *file, struct folio *folio) { - struct inode *inode = page->mapping->host; + struct inode *inode = folio->mapping->host; struct fuse_mount *fm = get_fuse_mount(inode); - loff_t pos = page_offset(page); + loff_t pos = folio_pos(folio); struct fuse_page_desc desc = { .length = PAGE_SIZE }; + struct page *page = &folio->page; struct fuse_io_args ia = { .ap.args.page_zeroing = true, .ap.args.out_pages = true, @@ -882,11 +883,11 @@ static int fuse_do_readpage(struct file *file, struct page *page) u64 attr_ver; /* - * Page writeback can extend beyond the lifetime of the - * page-cache page, so make sure we read a properly synced - * page. + * With the temporary pages that are used to complete writeback, we can + * have writeback that extends beyond the lifetime of the folio. So + * make sure we read a properly synced folio. */ - fuse_wait_on_page_writeback(inode, page->index); + fuse_wait_on_folio_writeback(inode, folio); attr_ver = fuse_get_attr_version(fm->fc); @@ -904,25 +905,24 @@ static int fuse_do_readpage(struct file *file, struct page *page) if (res < desc.length) fuse_short_read(inode, attr_ver, res, &ia.ap); - SetPageUptodate(page); + folio_mark_uptodate(folio); return 0; } static int fuse_read_folio(struct file *file, struct folio *folio) { - struct page *page = &folio->page; - struct inode *inode = page->mapping->host; + struct inode *inode = folio->mapping->host; int err; err = -EIO; if (fuse_is_bad(inode)) goto out; - err = fuse_do_readpage(file, page); + err = fuse_do_readfolio(file, folio); fuse_invalidate_atime(inode); out: - unlock_page(page); + folio_unlock(folio); return err; } @@ -2443,7 +2443,7 @@ static int fuse_write_begin(struct file *file, struct address_space *mapping, folio_zero_segment(folio, 0, off); goto success; } - err = fuse_do_readpage(file, &folio->page); + err = fuse_do_readfolio(file, folio); if (err) goto cleanup; success: -- Gitee From 5087a51b2542104c31ee38e45677c3574d6fc551 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 30 Sep 2024 09:45:15 -0400 Subject: [PATCH 1858/2138] fuse: convert fuse_writepage_need_send to take a folio ANBZ: #12255 commit 6930b8dac19ee86282222ea1cb559ee0602e4877 upstream. fuse_writepage_need_send is called by fuse_writepages_fill() which already has a folio. Change fuse_writepage_need_send() to take a folio instead, add a helper to check if the folio range is under writeback and use this, as well as the appropriate folio helpers in the rest of the function. Update fuse_writepage_need_send() to pass in the folio directly. Reviewed-by: Joanne Koong Signed-off-by: Josef Bacik Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4202 --- fs/fuse/file.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 3986fa2a869f..54c5b09db88f 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -483,14 +483,19 @@ static void fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index) wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index)); } +static inline bool fuse_folio_is_writeback(struct inode *inode, + struct folio *folio) +{ + pgoff_t last = folio_next_index(folio) - 1; + return fuse_range_is_writeback(inode, folio_index(folio), last); +} + static void fuse_wait_on_folio_writeback(struct inode *inode, struct folio *folio) { struct fuse_inode *fi = get_fuse_inode(inode); - pgoff_t last = folio_next_index(folio) - 1; - wait_event(fi->page_waitq, - !fuse_range_is_writeback(inode, folio_index(folio), last)); + wait_event(fi->page_waitq, !fuse_folio_is_writeback(inode, folio)); } /* @@ -2256,7 +2261,7 @@ static bool fuse_writepage_add(struct fuse_writepage_args *new_wpa, return false; } -static bool fuse_writepage_need_send(struct fuse_conn *fc, struct page *page, +static bool fuse_writepage_need_send(struct fuse_conn *fc, struct folio *folio, struct fuse_args_pages *ap, struct fuse_fill_wb_data *data) { @@ -2268,7 +2273,7 @@ static bool fuse_writepage_need_send(struct fuse_conn *fc, struct page *page, * the pages are faulted with get_user_pages(), and then after the read * completed. */ - if (fuse_page_is_writeback(data->inode, page->index)) + if (fuse_folio_is_writeback(data->inode, folio)) return true; /* Reached max pages */ @@ -2280,7 +2285,7 @@ static bool fuse_writepage_need_send(struct fuse_conn *fc, struct page *page, return true; /* Discontinuity */ - if (data->orig_pages[ap->num_pages - 1]->index + 1 != page->index) + if (data->orig_pages[ap->num_pages - 1]->index + 1 != folio_index(folio)) return true; /* Need to grow the pages array? If so, did the expansion fail? */ @@ -2309,7 +2314,7 @@ static int fuse_writepages_fill(struct folio *folio, goto out_unlock; } - if (wpa && fuse_writepage_need_send(fc, &folio->page, ap, data)) { + if (wpa && fuse_writepage_need_send(fc, folio, ap, data)) { fuse_writepages_send(data); data->wpa = NULL; } -- Gitee From f55877cd09967d66429928eede8ce48fdea57d83 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 30 Sep 2024 09:45:16 -0400 Subject: [PATCH 1859/2138] fuse: use the folio based vmstat helpers ANBZ: #12255 commit 949d67ac2eff129f1dbe2d6dc69f51f4f64281f2 upstream. In order to make it easier to switch to folios in the fuse_args_pages update the places where we update the vmstat counters for writeback to use the folio related helpers. On the inc side this is easy as we already have the folio, on the dec side we have to page_folio() the pages for now. Reviewed-by: Joanne Koong Signed-off-by: Josef Bacik Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4202 --- fs/fuse/file.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 54c5b09db88f..97f7b3254360 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1788,12 +1788,12 @@ static void fuse_writepage_free(struct fuse_writepage_args *wpa) kfree(wpa); } -static void fuse_writepage_finish_stat(struct inode *inode, struct page *page) +static void fuse_writepage_finish_stat(struct inode *inode, struct folio *folio) { struct backing_dev_info *bdi = inode_to_bdi(inode); dec_wb_stat(&bdi->wb, WB_WRITEBACK); - dec_node_page_state(page, NR_WRITEBACK_TEMP); + node_stat_sub_folio(folio, NR_WRITEBACK_TEMP); wb_writeout_inc(&bdi->wb); } @@ -1805,7 +1805,7 @@ static void fuse_writepage_finish(struct fuse_writepage_args *wpa) int i; for (i = 0; i < ap->num_pages; i++) - fuse_writepage_finish_stat(inode, ap->pages[i]); + fuse_writepage_finish_stat(inode, page_folio(ap->pages[i])); wake_up(&fi->page_waitq); } @@ -1860,7 +1860,8 @@ __acquires(fi->lock) for (aux = wpa->next; aux; aux = next) { next = aux->next; aux->next = NULL; - fuse_writepage_finish_stat(aux->inode, aux->ia.ap.pages[0]); + fuse_writepage_finish_stat(aux->inode, + page_folio(aux->ia.ap.pages[0])); fuse_writepage_free(aux); } @@ -2080,7 +2081,7 @@ static void fuse_writepage_args_page_fill(struct fuse_writepage_args *wpa, struc ap->descs[page_index].length = PAGE_SIZE; inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); - inc_node_page_state(&tmp_folio->page, NR_WRITEBACK_TEMP); + node_stat_add_folio(tmp_folio, NR_WRITEBACK_TEMP); } static struct fuse_writepage_args *fuse_writepage_args_setup(struct folio *folio, @@ -2254,7 +2255,8 @@ static bool fuse_writepage_add(struct fuse_writepage_args *new_wpa, spin_unlock(&fi->lock); if (tmp) { - fuse_writepage_finish_stat(new_wpa->inode, new_ap->pages[0]); + fuse_writepage_finish_stat(new_wpa->inode, + page_folio(new_ap->pages[0])); fuse_writepage_free(new_wpa); } -- Gitee From 931efd8db272268c43c90288958b3f0d38cfbf1e Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 30 Sep 2024 09:45:17 -0400 Subject: [PATCH 1860/2138] fuse: convert fuse_retrieve to use folios ANBZ: #12255 commit 71e10dc2f561b1f7cef5152a865813339e96d575 upstream. We're just looking for pages in a mapping, use a folio and the folio lookup function directly instead of using the page helper. Reviewed-by: Joanne Koong Signed-off-by: Josef Bacik Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4202 --- fs/fuse/dev.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index cedc56d37a89..d7ad0d59bea2 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -1729,15 +1729,15 @@ static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode, index = outarg->offset >> PAGE_SHIFT; while (num && ap->num_pages < num_pages) { - struct page *page; + struct folio *folio; unsigned int this_num; - page = find_get_page(mapping, index); - if (!page) + folio = filemap_get_folio(mapping, index); + if (IS_ERR(folio)) break; this_num = min_t(unsigned, num, PAGE_SIZE - offset); - ap->pages[ap->num_pages] = page; + ap->pages[ap->num_pages] = &folio->page; ap->descs[ap->num_pages].offset = offset; ap->descs[ap->num_pages].length = this_num; ap->num_pages++; -- Gitee From 28951e70137632278083d2b7aa8e817d824ab19b Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 30 Sep 2024 09:45:18 -0400 Subject: [PATCH 1861/2138] fuse: convert fuse_notify_store to use folios ANBZ: #12255 commit 8807f117be9d15088003e63bfaf0533355371ee8 upstream. This function creates pages in an inode and copies data into them, update the function to use a folio instead of a page, and use the appropriate folio helpers. [SzM: use filemap_grab_folio()] [Hau Tao: The third argument of folio_zero_range() should be the length to be zeroed, not the total length. Fix it by using folio_zero_segment() instead in fuse_notify_store()] Reviewed-by: Joanne Koong Signed-off-by: Josef Bacik Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4202 --- fs/fuse/dev.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index d7ad0d59bea2..0edce7ecfce4 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -1627,24 +1627,25 @@ static int fuse_notify_store(struct fuse_conn *fc, unsigned int size, num = outarg.size; while (num) { + struct folio *folio; struct page *page; unsigned int this_num; - err = -ENOMEM; - page = find_or_create_page(mapping, index, - mapping_gfp_mask(mapping)); - if (!page) + folio = filemap_grab_folio(mapping, index); + err = PTR_ERR(folio); + if (IS_ERR(folio)) goto out_iput; - this_num = min_t(unsigned, num, PAGE_SIZE - offset); + page = &folio->page; + this_num = min_t(unsigned, num, folio_size(folio) - offset); err = fuse_copy_page(cs, &page, offset, this_num, 0); - if (!PageUptodate(page) && !err && offset == 0 && - (this_num == PAGE_SIZE || file_size == end)) { - zero_user_segment(page, this_num, PAGE_SIZE); - SetPageUptodate(page); + if (!folio_test_uptodate(folio) && !err && offset == 0 && + (this_num == folio_size(folio) || file_size == end)) { + folio_zero_segment(folio, this_num, folio_size(folio)); + folio_mark_uptodate(folio); } - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); if (err) goto out_iput; -- Gitee From c4de125dd1a130e49d684d438ac7eabc6f9e3809 Mon Sep 17 00:00:00 2001 From: Tianyang Zhang Date: Wed, 27 Nov 2024 14:28:18 +0800 Subject: [PATCH 1862/2138] anolis: irq-loongarch-avec.c:Support Multi-node ANBZ: #12189 This patch enables the advanced interrupt controller function under multiple-node of 3C6000. The topology of the advanced interrupt controller is consistent with NUMA node. We check the enable status of the node where each CPU is located once when it goes online, which may cause some additional operations, but it can ensure that the advanced interrupt controller can still be used in situations where some CPUs cannot start. In addition, there is a bug in the code that uses the ipi method to clean expired affinity on multiple nodes. Let's fix it together Signed-off-by: Tianyang Zhang Signed-off-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4183 --- drivers/irqchip/irq-loongarch-avec.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/irqchip/irq-loongarch-avec.c b/drivers/irqchip/irq-loongarch-avec.c index 638516164d2f..b9202fdb6d92 100644 --- a/drivers/irqchip/irq-loongarch-avec.c +++ b/drivers/irqchip/irq-loongarch-avec.c @@ -84,7 +84,7 @@ static void avecintc_sync(struct avecintc_data *adata) plist = per_cpu_ptr(&pending_list, adata->prev_cpu); list_add_tail(&adata->entry, &plist->head); adata->moving = 1; - smp_ops.send_ipi_single(adata->prev_cpu, SMP_CLEAR_VECTOR); + smp_ops.send_ipi_single(adata->prev_cpu, ACTION_CLEAR_VECTOR); } } @@ -133,6 +133,7 @@ static int avecintc_set_affinity(struct irq_data *data, const struct cpumask *de static int avecintc_cpu_online(unsigned int cpu) { + long value; if (!loongarch_avec.vector_matrix) return 0; @@ -142,6 +143,10 @@ static int avecintc_cpu_online(unsigned int cpu) pending_list_init(cpu); + value = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC); + value |= IOCSR_MISC_FUNC_AVEC_EN; + iocsr_write64(value, LOONGARCH_IOCSR_MISC_FUNC); + raw_spin_unlock(&loongarch_avec.lock); return 0; @@ -194,7 +199,7 @@ void complete_irq_moving(void) } if (isr & (1UL << (vector % VECTORS_PER_REG))) { - smp_ops.send_ipi_single(cpu, SMP_CLEAR_VECTOR); + smp_ops.send_ipi_single(cpu, ACTION_CLEAR_VECTOR); continue; } list_del(&adata->entry); -- Gitee From 516bbf303db7f0861b636e959a876c3e6b9a98bc Mon Sep 17 00:00:00 2001 From: gaojuxin Date: Thu, 5 Dec 2024 11:09:39 +0800 Subject: [PATCH 1863/2138] anolis: LoongArch: Add CPU HWMon platform driver ANBZ: #12293 This add CPU HWMon (temperature sensor) platform driver for Loongson-3. Tested-by: Xi Ruoyao Signed-off-by: Huacai Chen Signed-off-by: Kexy Biscuit Signed-off-by: gaojuxin Reviewed-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4209 --- drivers/platform/loongarch/Kconfig | 8 + drivers/platform/loongarch/Makefile | 1 + drivers/platform/loongarch/cpu_hwmon.c | 196 +++++++++++++++++++++++++ 3 files changed, 205 insertions(+) create mode 100644 drivers/platform/loongarch/cpu_hwmon.c diff --git a/drivers/platform/loongarch/Kconfig b/drivers/platform/loongarch/Kconfig index 5633e4d73991..9ec1a86ef7fa 100644 --- a/drivers/platform/loongarch/Kconfig +++ b/drivers/platform/loongarch/Kconfig @@ -16,6 +16,14 @@ menuconfig LOONGARCH_PLATFORM_DEVICES if LOONGARCH_PLATFORM_DEVICES +config CPU_HWMON + bool "Loongson CPU HWMon Driver" + depends on MACH_LOONGSON64 + select HWMON + default y + help + Loongson-3A/3B/3C CPU HWMon (temperature sensor) driver. + config LOONGSON_LAPTOP tristate "Generic Loongson-3 Laptop Driver" depends on ACPI diff --git a/drivers/platform/loongarch/Makefile b/drivers/platform/loongarch/Makefile index f43ab03db1a2..695688bed423 100644 --- a/drivers/platform/loongarch/Makefile +++ b/drivers/platform/loongarch/Makefile @@ -1 +1,2 @@ obj-$(CONFIG_LOONGSON_LAPTOP) += loongson-laptop.o +obj-$(CONFIG_CPU_HWMON) += cpu_hwmon.o diff --git a/drivers/platform/loongarch/cpu_hwmon.c b/drivers/platform/loongarch/cpu_hwmon.c new file mode 100644 index 000000000000..c705d088c44a --- /dev/null +++ b/drivers/platform/loongarch/cpu_hwmon.c @@ -0,0 +1,196 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Loongson Technology Corporation Limited + */ +#include +#include +#include +#include +#include + +#include + +static int nr_packages; +static struct device *cpu_hwmon_dev; + +static int loongson3_cpu_temp(int cpu) +{ + u32 reg; + + reg = iocsr_read32(LOONGARCH_IOCSR_CPUTEMP) & 0xff; + + return (int)((s8)reg) * 1000; +} + +static ssize_t cpu_temp_label(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int id = (to_sensor_dev_attr(attr))->index - 1; + + return sprintf(buf, "CPU %d Temperature\n", id); +} + +static ssize_t get_cpu_temp(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int id = (to_sensor_dev_attr(attr))->index - 1; + int value = loongson3_cpu_temp(id); + + return sprintf(buf, "%d\n", value); +} + +static SENSOR_DEVICE_ATTR(temp1_input, 0444, get_cpu_temp, NULL, 1); +static SENSOR_DEVICE_ATTR(temp1_label, 0444, cpu_temp_label, NULL, 1); +static SENSOR_DEVICE_ATTR(temp2_input, 0444, get_cpu_temp, NULL, 2); +static SENSOR_DEVICE_ATTR(temp2_label, 0444, cpu_temp_label, NULL, 2); +static SENSOR_DEVICE_ATTR(temp3_input, 0444, get_cpu_temp, NULL, 3); +static SENSOR_DEVICE_ATTR(temp3_label, 0444, cpu_temp_label, NULL, 3); +static SENSOR_DEVICE_ATTR(temp4_input, 0444, get_cpu_temp, NULL, 4); +static SENSOR_DEVICE_ATTR(temp4_label, 0444, cpu_temp_label, NULL, 4); +static SENSOR_DEVICE_ATTR(temp5_input, 0444, get_cpu_temp, NULL, 4); +static SENSOR_DEVICE_ATTR(temp5_label, 0444, cpu_temp_label, NULL, 4); +static SENSOR_DEVICE_ATTR(temp6_input, 0444, get_cpu_temp, NULL, 4); +static SENSOR_DEVICE_ATTR(temp6_label, 0444, cpu_temp_label, NULL, 4); +static SENSOR_DEVICE_ATTR(temp7_input, 0444, get_cpu_temp, NULL, 4); +static SENSOR_DEVICE_ATTR(temp7_label, 0444, cpu_temp_label, NULL, 4); +static SENSOR_DEVICE_ATTR(temp8_input, 0444, get_cpu_temp, NULL, 4); +static SENSOR_DEVICE_ATTR(temp8_label, 0444, cpu_temp_label, NULL, 4); +static SENSOR_DEVICE_ATTR(temp9_input, 0444, get_cpu_temp, NULL, 4); +static SENSOR_DEVICE_ATTR(temp9_label, 0444, cpu_temp_label, NULL, 4); +static SENSOR_DEVICE_ATTR(temp10_input, 0444, get_cpu_temp, NULL, 4); +static SENSOR_DEVICE_ATTR(temp10_label, 0444, cpu_temp_label, NULL, 4); +static SENSOR_DEVICE_ATTR(temp11_input, 0444, get_cpu_temp, NULL, 4); +static SENSOR_DEVICE_ATTR(temp11_label, 0444, cpu_temp_label, NULL, 4); +static SENSOR_DEVICE_ATTR(temp12_input, 0444, get_cpu_temp, NULL, 4); +static SENSOR_DEVICE_ATTR(temp12_label, 0444, cpu_temp_label, NULL, 4); +static SENSOR_DEVICE_ATTR(temp13_input, 0444, get_cpu_temp, NULL, 4); +static SENSOR_DEVICE_ATTR(temp13_label, 0444, cpu_temp_label, NULL, 4); +static SENSOR_DEVICE_ATTR(temp14_input, 0444, get_cpu_temp, NULL, 4); +static SENSOR_DEVICE_ATTR(temp14_label, 0444, cpu_temp_label, NULL, 4); +static SENSOR_DEVICE_ATTR(temp15_input, 0444, get_cpu_temp, NULL, 4); +static SENSOR_DEVICE_ATTR(temp15_label, 0444, cpu_temp_label, NULL, 4); +static SENSOR_DEVICE_ATTR(temp16_input, 0444, get_cpu_temp, NULL, 4); +static SENSOR_DEVICE_ATTR(temp16_label, 0444, cpu_temp_label, NULL, 4); + +static struct attribute *cpu_hwmon_attributes[] = { + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp1_label.dev_attr.attr, + &sensor_dev_attr_temp2_input.dev_attr.attr, + &sensor_dev_attr_temp2_label.dev_attr.attr, + &sensor_dev_attr_temp3_input.dev_attr.attr, + &sensor_dev_attr_temp3_label.dev_attr.attr, + &sensor_dev_attr_temp4_input.dev_attr.attr, + &sensor_dev_attr_temp4_label.dev_attr.attr, + &sensor_dev_attr_temp5_input.dev_attr.attr, + &sensor_dev_attr_temp5_label.dev_attr.attr, + &sensor_dev_attr_temp6_input.dev_attr.attr, + &sensor_dev_attr_temp6_label.dev_attr.attr, + &sensor_dev_attr_temp7_input.dev_attr.attr, + &sensor_dev_attr_temp7_label.dev_attr.attr, + &sensor_dev_attr_temp8_input.dev_attr.attr, + &sensor_dev_attr_temp8_label.dev_attr.attr, + &sensor_dev_attr_temp9_input.dev_attr.attr, + &sensor_dev_attr_temp9_label.dev_attr.attr, + &sensor_dev_attr_temp10_input.dev_attr.attr, + &sensor_dev_attr_temp10_label.dev_attr.attr, + &sensor_dev_attr_temp11_input.dev_attr.attr, + &sensor_dev_attr_temp11_label.dev_attr.attr, + &sensor_dev_attr_temp12_input.dev_attr.attr, + &sensor_dev_attr_temp12_label.dev_attr.attr, + &sensor_dev_attr_temp13_input.dev_attr.attr, + &sensor_dev_attr_temp13_label.dev_attr.attr, + &sensor_dev_attr_temp14_input.dev_attr.attr, + &sensor_dev_attr_temp14_label.dev_attr.attr, + &sensor_dev_attr_temp15_input.dev_attr.attr, + &sensor_dev_attr_temp15_label.dev_attr.attr, + &sensor_dev_attr_temp16_input.dev_attr.attr, + &sensor_dev_attr_temp16_label.dev_attr.attr, + NULL +}; +static umode_t cpu_hwmon_is_visible(struct kobject *kobj, + struct attribute *attr, int i) +{ + int id = i / 2; + + if (id < nr_packages) + return attr->mode; + return 0; +} + +static struct attribute_group cpu_hwmon_group = { + .attrs = cpu_hwmon_attributes, + .is_visible = cpu_hwmon_is_visible, +}; + +static const struct attribute_group *cpu_hwmon_groups[] = { + &cpu_hwmon_group, + NULL +}; + +static int cpu_initial_threshold = 72000; +static int cpu_thermal_threshold = 96000; +module_param(cpu_thermal_threshold, int, 0644); +MODULE_PARM_DESC(cpu_thermal_threshold, "cpu thermal threshold (96000 (default))"); + +static struct delayed_work thermal_work; + +static void do_thermal_timer(struct work_struct *work) +{ + int i, value, temp_max = 0; + + for (i = 0; i < nr_packages; i++) { + value = loongson3_cpu_temp(i); + if (value > temp_max) + temp_max = value; + } + + if (temp_max <= cpu_thermal_threshold) + schedule_delayed_work(&thermal_work, msecs_to_jiffies(5000)); + else + orderly_poweroff(true); +} + +static int __init loongson_hwmon_init(void) +{ + int i, value, temp_max = 0; + + pr_info("Loongson Hwmon Enter...\n"); + + nr_packages = loongson_sysconf.nr_cpus / + loongson_sysconf.cores_per_package; + + cpu_hwmon_dev = hwmon_device_register_with_groups(NULL, "cpu_hwmon", + NULL, cpu_hwmon_groups); + if (IS_ERR(cpu_hwmon_dev)) { + pr_err("Hwmon register fail with %ld!\n", PTR_ERR(cpu_hwmon_dev)); + return PTR_ERR(cpu_hwmon_dev); + } + + for (i = 0; i < nr_packages; i++) { + value = loongson3_cpu_temp(i); + if (value > temp_max) + temp_max = value; + } + + pr_info("Initial CPU temperature is %d (highest).\n", temp_max); + if (temp_max > cpu_initial_threshold) + cpu_thermal_threshold += temp_max - cpu_initial_threshold; + + INIT_DEFERRABLE_WORK(&thermal_work, do_thermal_timer); + schedule_delayed_work(&thermal_work, msecs_to_jiffies(20000)); + + return 0; +} + +static void __exit loongson_hwmon_exit(void) +{ + cancel_delayed_work_sync(&thermal_work); + hwmon_device_unregister(cpu_hwmon_dev); +} + +module_init(loongson_hwmon_init); +module_exit(loongson_hwmon_exit); + +MODULE_AUTHOR("Huacai Chen "); +MODULE_DESCRIPTION("Loongson CPU Hwmon driver"); +MODULE_LICENSE("GPL"); -- Gitee From 3905f26267037b174ab21ecc901653658ba955ec Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Tue, 3 Oct 2023 11:49:16 -0700 Subject: [PATCH 1864/2138] platform/x86/intel-uncore-freq: Ignore minor version change MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12077 commit 4b0d9c8f8f55562ed5346723e66eec64f22887f4 upstream. The hardware definition of every TPMI feature contains a major and minor version. When there is a change in the MMIO offset or change in the definition of a field, hardware will change major version. For addition of new fields without modifying existing MMIO offsets or fields, only the minor version is changed. Driver is developed to support uncore frequency control (UFS) for a major and minor version. If the hardware changes major version, since offsets and definitions are changed, driver cannot continue to provide UFS interface to users. Driver can still function with minor version change as it will just miss the new functionality added by the hardware. The current implementation logs information message and skips adding uncore sysfs entry for a resource for any version mismatch. Check major and minor version mismatch for every valid resource and fail on any major version mismatch by logging an error message. A valid resource has a version which is not 0xFF. If there is mismatch with the minor version, continue with a log message. Intel-SIG: commit 4b0d9c8f8f55 platform/x86/intel-uncore-freq: Ignore minor version change. Backport Intel-uncore-freq driver support for 6.6 from 6.11. Signed-off-by: Srinivas Pandruvada Link: https://lore.kernel.org/r/20231003184916.1860084-4-srinivas.pandruvada@linux.intel.com Reviewed-by: Ilpo Järvinen Signed-off-by: Ilpo Järvinen [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4182 --- .../uncore-frequency/uncore-frequency-tpmi.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c index 105022164548..43f45fa7999a 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c @@ -28,7 +28,8 @@ #include "uncore-frequency-common.h" -#define UNCORE_HEADER_VERSION 1 +#define UNCORE_MAJOR_VERSION 0 +#define UNCORE_MINOR_VERSION 1 #define UNCORE_HEADER_INDEX 0 #define UNCORE_FABRIC_CLUSTER_OFFSET 8 @@ -303,12 +304,21 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_ /* Check for version and skip this resource if there is mismatch */ header = readq(pd_info->uncore_base); pd_info->ufs_header_ver = header & UNCORE_VERSION_MASK; - if (pd_info->ufs_header_ver != UNCORE_HEADER_VERSION) { - dev_info(&auxdev->dev, "Uncore: Unsupported version:%d\n", - pd_info->ufs_header_ver); + + if (pd_info->ufs_header_ver == TPMI_VERSION_INVALID) continue; + + if (TPMI_MAJOR_VERSION(pd_info->ufs_header_ver) != UNCORE_MAJOR_VERSION) { + dev_err(&auxdev->dev, "Uncore: Unsupported major version:%lx\n", + TPMI_MAJOR_VERSION(pd_info->ufs_header_ver)); + ret = -ENODEV; + goto remove_clusters; } + if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) != UNCORE_MINOR_VERSION) + dev_info(&auxdev->dev, "Uncore: Ignore: Unsupported minor version:%lx\n", + TPMI_MINOR_VERSION(pd_info->ufs_header_ver)); + /* Get Cluster ID Mask */ cluster_mask = FIELD_GET(UNCORE_LOCAL_FABRIC_CLUSTER_ID_MASK, header); if (!cluster_mask) { -- Gitee From 1e46701cedbc12938234be297178a6f21ae8be27 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Mon, 4 Dec 2023 14:17:40 -0800 Subject: [PATCH 1865/2138] platform/x86/intel-uncore-freq: Process read/write blocked feature status MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12077 commit b06458d1b1cbb99635c7bb4f9a4f4c4cef2ed984 upstream. When a feature is read blocked, don't continue to read uncore information and register with uncore core. When the feature is write blocked, continue to offer read interface but block setting uncore limits. Intel-SIG: commit b06458d1b1cb platform/x86/intel-uncore-freq: Process read/write blocked feature status. Backport intel uncore-freq driver update for 6.6 from 6.11 Signed-off-by: Srinivas Pandruvada Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20231204221740.3645130-6-srinivas.pandruvada@linux.intel.com Signed-off-by: Hans de Goede [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4182 --- .../uncore-frequency/uncore-frequency-tpmi.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c index 43f45fa7999a..587437211d72 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c @@ -66,6 +66,7 @@ struct tpmi_uncore_struct { int min_ratio; struct tpmi_uncore_power_domain_info *pd_info; struct tpmi_uncore_cluster_info root_cluster; + bool write_blocked; }; #define UNCORE_GENMASK_MIN_RATIO GENMASK_ULL(21, 15) @@ -157,6 +158,9 @@ static int uncore_write_control_freq(struct uncore_data *data, unsigned int inpu cluster_info = container_of(data, struct tpmi_uncore_cluster_info, uncore_data); uncore_root = cluster_info->uncore_root; + if (uncore_root->write_blocked) + return -EPERM; + /* Update each cluster in a package */ if (cluster_info->root_domain) { struct tpmi_uncore_struct *uncore_root = cluster_info->uncore_root; @@ -233,12 +237,22 @@ static void remove_cluster_entries(struct tpmi_uncore_struct *tpmi_uncore) static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id) { + bool read_blocked = 0, write_blocked = 0; struct intel_tpmi_plat_info *plat_info; struct tpmi_uncore_struct *tpmi_uncore; bool uncore_sysfs_added = false; int ret, i, pkg = 0; int num_resources; + ret = tpmi_get_feature_status(auxdev, TPMI_ID_UNCORE, &read_blocked, &write_blocked); + if (ret) + dev_info(&auxdev->dev, "Can't read feature status: ignoring blocked status\n"); + + if (read_blocked) { + dev_info(&auxdev->dev, "Firmware has blocked reads, exiting\n"); + return -ENODEV; + } + /* Get number of power domains, which is equal to number of resources */ num_resources = tpmi_get_resource_count(auxdev); if (!num_resources) @@ -267,6 +281,7 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_ } tpmi_uncore->power_domain_count = num_resources; + tpmi_uncore->write_blocked = write_blocked; /* Get the package ID from the TPMI core */ plat_info = tpmi_get_platform_data(auxdev); -- Gitee From 09eb9cc549ba4c4402cce3be914abaefbd77ce0b Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Mon, 15 Apr 2024 15:06:25 -0700 Subject: [PATCH 1866/2138] platform/x86/intel-uncore-freq: Increase minor number support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12077 commit bc774d46b41482534c7ba92f6342ca0a355c13af upstream. No new changes will be added for minor version 2. Change the minor version number to 2 and stop displaying log message for unsupported minor version 2. Intel-SIG: commit bc774d46b414 platform/x86/intel-uncore-freq: Increase minor number support. Backport intel uncore-freq driver update for 6.6 from 6.10 Signed-off-by: Srinivas Pandruvada Link: https://lore.kernel.org/r/20240415220625.2828339-1-srinivas.pandruvada@linux.intel.com Reviewed-by: Ilpo Järvinen Signed-off-by: Ilpo Järvinen [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4182 --- .../x86/intel/uncore-frequency/uncore-frequency-tpmi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c index 587437211d72..bb8e72deb354 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c @@ -29,7 +29,7 @@ #include "uncore-frequency-common.h" #define UNCORE_MAJOR_VERSION 0 -#define UNCORE_MINOR_VERSION 1 +#define UNCORE_MINOR_VERSION 2 #define UNCORE_HEADER_INDEX 0 #define UNCORE_FABRIC_CLUSTER_OFFSET 8 @@ -330,7 +330,7 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_ goto remove_clusters; } - if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) != UNCORE_MINOR_VERSION) + if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) > UNCORE_MINOR_VERSION) dev_info(&auxdev->dev, "Uncore: Ignore: Unsupported minor version:%lx\n", TPMI_MINOR_VERSION(pd_info->ufs_header_ver)); -- Gitee From 768cdc3c1adc2676070bcf2b8ac40c4800945a89 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Mon, 17 Jun 2024 09:04:34 +0300 Subject: [PATCH 1867/2138] platform/x86/intel-uncore-freq: Re-arrange bit masks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12077 commit 36f70045528f5639bb50b3b74c7925943cf6983a upstream. Rename the various bitmasks from the 'UNCORE_GENMASK_*' to 'UNCORE_*_MASK', and re-order them based on the register they reside in. No functional change intended. Intel-SIG: commit 36f70045528f platform/x86/intel-uncore-freq: Re-arrange bit masks. Backport intel uncore-freq driver update from 6.11 Signed-off-by: Tero Kristo Reviewed-by: Ilpo Järvinen Acked-by: Srinivas Pandruvada Link: https://lore.kernel.org/r/20240617060708.892981-2-tero.kristo@linux.intel.com Signed-off-by: Ilpo Järvinen [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4182 --- .../uncore-frequency/uncore-frequency-tpmi.c | 25 +++++++++++-------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c index bb8e72deb354..b58294498921 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c @@ -69,9 +69,12 @@ struct tpmi_uncore_struct { bool write_blocked; }; -#define UNCORE_GENMASK_MIN_RATIO GENMASK_ULL(21, 15) -#define UNCORE_GENMASK_MAX_RATIO GENMASK_ULL(14, 8) -#define UNCORE_GENMASK_CURRENT_RATIO GENMASK_ULL(6, 0) +/* Bit definitions for STATUS register */ +#define UNCORE_CURRENT_RATIO_MASK GENMASK_ULL(6, 0) + +/* Bit definitions for CONTROL register */ +#define UNCORE_MAX_RATIO_MASK GENMASK_ULL(14, 8) +#define UNCORE_MIN_RATIO_MASK GENMASK_ULL(21, 15) /* Helper function to read MMIO offset for max/min control frequency */ static void read_control_freq(struct tpmi_uncore_cluster_info *cluster_info, @@ -80,11 +83,11 @@ static void read_control_freq(struct tpmi_uncore_cluster_info *cluster_info, u64 control; control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX); - *max = FIELD_GET(UNCORE_GENMASK_MAX_RATIO, control) * UNCORE_FREQ_KHZ_MULTIPLIER; - *min = FIELD_GET(UNCORE_GENMASK_MIN_RATIO, control) * UNCORE_FREQ_KHZ_MULTIPLIER; + *max = FIELD_GET(UNCORE_MAX_RATIO_MASK, control) * UNCORE_FREQ_KHZ_MULTIPLIER; + *min = FIELD_GET(UNCORE_MIN_RATIO_MASK, control) * UNCORE_FREQ_KHZ_MULTIPLIER; } -#define UNCORE_MAX_RATIO FIELD_MAX(UNCORE_GENMASK_MAX_RATIO) +#define UNCORE_MAX_RATIO FIELD_MAX(UNCORE_MAX_RATIO_MASK) /* Callback for sysfs read for max/min frequencies. Called under mutex locks */ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min, @@ -134,11 +137,11 @@ static void write_control_freq(struct tpmi_uncore_cluster_info *cluster_info, un control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX); if (min_max) { - control &= ~UNCORE_GENMASK_MAX_RATIO; - control |= FIELD_PREP(UNCORE_GENMASK_MAX_RATIO, input); + control &= ~UNCORE_MAX_RATIO_MASK; + control |= FIELD_PREP(UNCORE_MAX_RATIO_MASK, input); } else { - control &= ~UNCORE_GENMASK_MIN_RATIO; - control |= FIELD_PREP(UNCORE_GENMASK_MIN_RATIO, input); + control &= ~UNCORE_MIN_RATIO_MASK; + control |= FIELD_PREP(UNCORE_MIN_RATIO_MASK, input); } writeq(control, (cluster_info->cluster_base + UNCORE_CONTROL_INDEX)); @@ -204,7 +207,7 @@ static int uncore_read_freq(struct uncore_data *data, unsigned int *freq) return -ENODATA; status = readq((u8 __iomem *)cluster_info->cluster_base + UNCORE_STATUS_INDEX); - *freq = FIELD_GET(UNCORE_GENMASK_CURRENT_RATIO, status) * UNCORE_FREQ_KHZ_MULTIPLIER; + *freq = FIELD_GET(UNCORE_CURRENT_RATIO_MASK, status) * UNCORE_FREQ_KHZ_MULTIPLIER; return 0; } -- Gitee From b476740479bc5e0a3cb85c396382f8b07e538e6a Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Mon, 17 Jun 2024 09:04:35 +0300 Subject: [PATCH 1868/2138] platform/x86/intel-uncore-freq: Get rid of magic values MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12077 commit 4babdbdce6a05ae6f228c1ae1e61a4bc91a91d33 upstream. Get rid of any magic bitmasks from the code. Define proper macros for these, and use the bitfield operations to access them. No functional change intended. Intel-SIG: commit 4babdbdce6a0 platform/x86/intel-uncore-freq: Get rid of magic values. Backport intel uncore-freq driver update from 6.11 Signed-off-by: Tero Kristo Acked-by: Srinivas Pandruvada Link: https://lore.kernel.org/r/20240617060708.892981-3-tero.kristo@linux.intel.com Reviewed-by: Ilpo Järvinen Signed-off-by: Ilpo Järvinen [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4182 --- .../intel/uncore-frequency/uncore-frequency.c | 22 ++++++++++++------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c index a3b25253b6fd..b7d3c71ea1c4 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c @@ -14,6 +14,7 @@ * Author: Srinivas Pandruvada */ +#include #include #include #include @@ -36,6 +37,11 @@ static enum cpuhp_state uncore_hp_state __read_mostly; #define MSR_UNCORE_PERF_STATUS 0x621 #define UNCORE_FREQ_KHZ_MULTIPLIER 100000 +#define UNCORE_MAX_RATIO_MASK GENMASK_ULL(6, 0) +#define UNCORE_MIN_RATIO_MASK GENMASK_ULL(14, 8) + +#define UNCORE_CURRENT_RATIO_MASK GENMASK_ULL(6, 0) + static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min, unsigned int *max) { @@ -49,8 +55,8 @@ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min, if (ret) return ret; - *max = (cap & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER; - *min = ((cap & GENMASK(14, 8)) >> 8) * UNCORE_FREQ_KHZ_MULTIPLIER; + *max = FIELD_GET(UNCORE_MAX_RATIO_MASK, cap) * UNCORE_FREQ_KHZ_MULTIPLIER; + *min = FIELD_GET(UNCORE_MIN_RATIO_MASK, cap) * UNCORE_FREQ_KHZ_MULTIPLIER; return 0; } @@ -62,7 +68,7 @@ static int uncore_write_control_freq(struct uncore_data *data, unsigned int inpu u64 cap; input /= UNCORE_FREQ_KHZ_MULTIPLIER; - if (!input || input > 0x7F) + if (!input || input > FIELD_MAX(UNCORE_MAX_RATIO_MASK)) return -EINVAL; if (data->control_cpu < 0) @@ -73,11 +79,11 @@ static int uncore_write_control_freq(struct uncore_data *data, unsigned int inpu return ret; if (min_max) { - cap &= ~0x7F; - cap |= input; + cap &= ~UNCORE_MAX_RATIO_MASK; + cap |= FIELD_PREP(UNCORE_MAX_RATIO_MASK, input); } else { - cap &= ~GENMASK(14, 8); - cap |= (input << 8); + cap &= ~UNCORE_MIN_RATIO_MASK; + cap |= FIELD_PREP(UNCORE_MIN_RATIO_MASK, input); } ret = wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, cap); @@ -101,7 +107,7 @@ static int uncore_read_freq(struct uncore_data *data, unsigned int *freq) if (ret) return ret; - *freq = (ratio & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER; + *freq = FIELD_GET(UNCORE_CURRENT_RATIO_MASK, ratio) * UNCORE_FREQ_KHZ_MULTIPLIER; return 0; } -- Gitee From abf8500b5cb6d980881354372672d99ab9325120 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Mon, 17 Jun 2024 09:04:36 +0300 Subject: [PATCH 1869/2138] platform/x86/intel-uncore-freq: Get rid of magic min_max argument MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12077 commit 90583374f1a0951d563de50aeac6357b724d08f6 upstream. Get rid of the hardcoded / magic min_max argument from internal APIs. Instead, use an enumerated index value for it. No functional change intended. Intel-SIG: commit 90583374f1a0 platform/x86/intel-uncore-freq: Get rid of magic min_max argument. Backport intel uncore-freq driver update from 6.11 Signed-off-by: Tero Kristo Acked-by: Srinivas Pandruvada Link: https://lore.kernel.org/r/20240617060708.892981-4-tero.kristo@linux.intel.com Reviewed-by: Ilpo Järvinen Signed-off-by: Ilpo Järvinen [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4182 --- .../uncore-frequency-common.c | 21 ++++++++++--------- .../uncore-frequency-common.h | 8 ++++++- .../uncore-frequency/uncore-frequency-tpmi.c | 18 +++++++++------- .../intel/uncore-frequency/uncore-frequency.c | 4 ++-- 4 files changed, 30 insertions(+), 21 deletions(-) diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c index 33bb58dc3f78..7daca46f9235 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c @@ -20,7 +20,7 @@ static DEFINE_IDA(intel_uncore_ida); /* callbacks for actual HW read/write */ static int (*uncore_read)(struct uncore_data *data, unsigned int *min, unsigned int *max); -static int (*uncore_write)(struct uncore_data *data, unsigned int input, unsigned int min_max); +static int (*uncore_write)(struct uncore_data *data, unsigned int input, enum uncore_index index); static int (*uncore_read_freq)(struct uncore_data *data, unsigned int *freq); static ssize_t show_domain_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf) @@ -45,7 +45,7 @@ static ssize_t show_package_id(struct kobject *kobj, struct kobj_attribute *attr } static ssize_t show_min_max_freq_khz(struct uncore_data *data, - char *buf, int min_max) + char *buf, enum uncore_index index) { unsigned int min, max; int ret; @@ -56,7 +56,7 @@ static ssize_t show_min_max_freq_khz(struct uncore_data *data, if (ret) return ret; - if (min_max) + if (index == UNCORE_INDEX_MAX_FREQ) return sprintf(buf, "%u\n", max); return sprintf(buf, "%u\n", min); @@ -64,7 +64,7 @@ static ssize_t show_min_max_freq_khz(struct uncore_data *data, static ssize_t store_min_max_freq_khz(struct uncore_data *data, const char *buf, ssize_t count, - int min_max) + enum uncore_index index) { unsigned int input; int ret; @@ -73,7 +73,7 @@ static ssize_t store_min_max_freq_khz(struct uncore_data *data, return -EINVAL; mutex_lock(&uncore_lock); - ret = uncore_write(data, input, min_max); + ret = uncore_write(data, input, index); mutex_unlock(&uncore_lock); if (ret) @@ -125,11 +125,11 @@ static ssize_t show_perf_status_freq_khz(struct uncore_data *data, char *buf) return show_perf_status_freq_khz(data, buf); \ } -store_uncore_min_max(min_freq_khz, 0); -store_uncore_min_max(max_freq_khz, 1); +store_uncore_min_max(min_freq_khz, UNCORE_INDEX_MIN_FREQ); +store_uncore_min_max(max_freq_khz, UNCORE_INDEX_MAX_FREQ); -show_uncore_min_max(min_freq_khz, 0); -show_uncore_min_max(max_freq_khz, 1); +show_uncore_min_max(min_freq_khz, UNCORE_INDEX_MIN_FREQ); +show_uncore_min_max(max_freq_khz, UNCORE_INDEX_MAX_FREQ); show_uncore_perf_status(current_freq_khz); @@ -270,7 +270,8 @@ void uncore_freq_remove_die_entry(struct uncore_data *data) EXPORT_SYMBOL_NS_GPL(uncore_freq_remove_die_entry, INTEL_UNCORE_FREQUENCY); int uncore_freq_common_init(int (*read_control_freq)(struct uncore_data *data, unsigned int *min, unsigned int *max), - int (*write_control_freq)(struct uncore_data *data, unsigned int input, unsigned int set_max), + int (*write_control_freq)(struct uncore_data *data, unsigned int input, + enum uncore_index index), int (*read_freq)(struct uncore_data *data, unsigned int *freq)) { mutex_lock(&uncore_lock); diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h index 0e5bf507e555..c6ceeda3f2bf 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h @@ -66,8 +66,14 @@ struct uncore_data { #define UNCORE_DOMAIN_ID_INVALID -1 +enum uncore_index { + UNCORE_INDEX_MIN_FREQ, + UNCORE_INDEX_MAX_FREQ, +}; + int uncore_freq_common_init(int (*read_control_freq)(struct uncore_data *data, unsigned int *min, unsigned int *max), - int (*write_control_freq)(struct uncore_data *data, unsigned int input, unsigned int min_max), + int (*write_control_freq)(struct uncore_data *data, unsigned int input, + enum uncore_index index), int (*uncore_read_freq)(struct uncore_data *data, unsigned int *freq)); void uncore_freq_common_exit(void); int uncore_freq_add_entry(struct uncore_data *data, int cpu); diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c index b58294498921..c8e8c8087812 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c @@ -130,13 +130,13 @@ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min, /* Helper function to write MMIO offset for max/min control frequency */ static void write_control_freq(struct tpmi_uncore_cluster_info *cluster_info, unsigned int input, - unsigned int min_max) + unsigned int index) { u64 control; control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX); - if (min_max) { + if (index == UNCORE_INDEX_MAX_FREQ) { control &= ~UNCORE_MAX_RATIO_MASK; control |= FIELD_PREP(UNCORE_MAX_RATIO_MASK, input); } else { @@ -149,7 +149,7 @@ static void write_control_freq(struct tpmi_uncore_cluster_info *cluster_info, un /* Callback for sysfs write for max/min frequencies. Called under mutex locks */ static int uncore_write_control_freq(struct uncore_data *data, unsigned int input, - unsigned int min_max) + enum uncore_index index) { struct tpmi_uncore_cluster_info *cluster_info; struct tpmi_uncore_struct *uncore_root; @@ -174,10 +174,10 @@ static int uncore_write_control_freq(struct uncore_data *data, unsigned int inpu for (j = 0; j < uncore_root->pd_info[i].cluster_count; ++j) write_control_freq(&uncore_root->pd_info[i].cluster_infos[j], - input, min_max); + input, index); } - if (min_max) + if (index == UNCORE_INDEX_MAX_FREQ) uncore_root->max_ratio = input; else uncore_root->min_ratio = input; @@ -185,13 +185,15 @@ static int uncore_write_control_freq(struct uncore_data *data, unsigned int inpu return 0; } - if (min_max && uncore_root->max_ratio && uncore_root->max_ratio < input) + if (index == UNCORE_INDEX_MAX_FREQ && uncore_root->max_ratio && + uncore_root->max_ratio < input) return -EINVAL; - if (!min_max && uncore_root->min_ratio && uncore_root->min_ratio > input) + if (index == UNCORE_INDEX_MIN_FREQ && uncore_root->min_ratio && + uncore_root->min_ratio > input) return -EINVAL; - write_control_freq(cluster_info, input, min_max); + write_control_freq(cluster_info, input, index); return 0; } diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c index b7d3c71ea1c4..bfa667051526 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c @@ -62,7 +62,7 @@ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min, } static int uncore_write_control_freq(struct uncore_data *data, unsigned int input, - unsigned int min_max) + enum uncore_index index) { int ret; u64 cap; @@ -78,7 +78,7 @@ static int uncore_write_control_freq(struct uncore_data *data, unsigned int inpu if (ret) return ret; - if (min_max) { + if (index == UNCORE_INDEX_MAX_FREQ) { cap &= ~UNCORE_MAX_RATIO_MASK; cap |= FIELD_PREP(UNCORE_MAX_RATIO_MASK, input); } else { -- Gitee From 4d6c70ca73dd6acab8e83ab135c4ca55257705f9 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Mon, 17 Jun 2024 09:04:37 +0300 Subject: [PATCH 1870/2138] platform/x86/intel-uncore-freq: Use uncore_index with read_control_freq MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12077 commit 69207a0f17d4e5a9bdd9feeece26a84add305157 upstream. Use the enumerated index for selecting the uncore driver parameter to read, instead of reading everything. This is done in preparation to expand the API to access more parameters later. No functional change intended. Intel-SIG: commit 69207a0f17d4 platform/x86/intel-uncore-freq: Use uncore_index with read_control_freq. Backport intel uncore-freq driver update from 6.11 Signed-off-by: Tero Kristo Acked-by: Srinivas Pandruvada Link: https://lore.kernel.org/r/20240617060708.892981-5-tero.kristo@linux.intel.com [ij: Removed underscores from variable names] Reviewed-by: Ilpo Järvinen Signed-off-by: Ilpo Järvinen [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4182 --- .../uncore-frequency-common.c | 23 ++++++------ .../uncore-frequency-common.h | 9 +++-- .../uncore-frequency/uncore-frequency-tpmi.c | 37 ++++++++++++------- .../intel/uncore-frequency/uncore-frequency.c | 10 +++-- 4 files changed, 45 insertions(+), 34 deletions(-) diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c index 7daca46f9235..8d9817d70e27 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c @@ -19,7 +19,7 @@ static int uncore_instance_count; static DEFINE_IDA(intel_uncore_ida); /* callbacks for actual HW read/write */ -static int (*uncore_read)(struct uncore_data *data, unsigned int *min, unsigned int *max); +static int (*uncore_read)(struct uncore_data *data, unsigned int *value, enum uncore_index index); static int (*uncore_write)(struct uncore_data *data, unsigned int input, enum uncore_index index); static int (*uncore_read_freq)(struct uncore_data *data, unsigned int *freq); @@ -47,19 +47,16 @@ static ssize_t show_package_id(struct kobject *kobj, struct kobj_attribute *attr static ssize_t show_min_max_freq_khz(struct uncore_data *data, char *buf, enum uncore_index index) { - unsigned int min, max; + unsigned int value; int ret; mutex_lock(&uncore_lock); - ret = uncore_read(data, &min, &max); + ret = uncore_read(data, &value, index); mutex_unlock(&uncore_lock); if (ret) return ret; - if (index == UNCORE_INDEX_MAX_FREQ) - return sprintf(buf, "%u\n", max); - - return sprintf(buf, "%u\n", min); + return sprintf(buf, "%u\n", value); } static ssize_t store_min_max_freq_khz(struct uncore_data *data, @@ -238,7 +235,8 @@ int uncore_freq_add_entry(struct uncore_data *data, int cpu) sprintf(data->name, "package_%02d_die_%02d", data->package_id, data->die_id); } - uncore_read(data, &data->initial_min_freq_khz, &data->initial_max_freq_khz); + uncore_read(data, &data->initial_min_freq_khz, UNCORE_INDEX_MIN_FREQ); + uncore_read(data, &data->initial_max_freq_khz, UNCORE_INDEX_MAX_FREQ); ret = create_attr_group(data, data->name); if (ret) { @@ -269,10 +267,11 @@ void uncore_freq_remove_die_entry(struct uncore_data *data) } EXPORT_SYMBOL_NS_GPL(uncore_freq_remove_die_entry, INTEL_UNCORE_FREQUENCY); -int uncore_freq_common_init(int (*read_control_freq)(struct uncore_data *data, unsigned int *min, unsigned int *max), - int (*write_control_freq)(struct uncore_data *data, unsigned int input, - enum uncore_index index), - int (*read_freq)(struct uncore_data *data, unsigned int *freq)) +int uncore_freq_common_init(int (*read_control_freq)(struct uncore_data *data, unsigned int *value, + enum uncore_index index), + int (*write_control_freq)(struct uncore_data *data, unsigned int input, + enum uncore_index index), + int (*read_freq)(struct uncore_data *data, unsigned int *freq)) { mutex_lock(&uncore_lock); diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h index c6ceeda3f2bf..2d98931cd8e2 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h @@ -71,10 +71,11 @@ enum uncore_index { UNCORE_INDEX_MAX_FREQ, }; -int uncore_freq_common_init(int (*read_control_freq)(struct uncore_data *data, unsigned int *min, unsigned int *max), - int (*write_control_freq)(struct uncore_data *data, unsigned int input, - enum uncore_index index), - int (*uncore_read_freq)(struct uncore_data *data, unsigned int *freq)); +int uncore_freq_common_init(int (*read_control_freq)(struct uncore_data *data, unsigned int *value, + enum uncore_index index), + int (*write_control_freq)(struct uncore_data *data, unsigned int input, + enum uncore_index index), + int (*uncore_read_freq)(struct uncore_data *data, unsigned int *freq)); void uncore_freq_common_exit(void); int uncore_freq_add_entry(struct uncore_data *data, int cpu); void uncore_freq_remove_die_entry(struct uncore_data *data); diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c index c8e8c8087812..0d6803e9dc15 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c @@ -78,20 +78,22 @@ struct tpmi_uncore_struct { /* Helper function to read MMIO offset for max/min control frequency */ static void read_control_freq(struct tpmi_uncore_cluster_info *cluster_info, - unsigned int *min, unsigned int *max) + unsigned int *value, enum uncore_index index) { u64 control; control = readq(cluster_info->cluster_base + UNCORE_CONTROL_INDEX); - *max = FIELD_GET(UNCORE_MAX_RATIO_MASK, control) * UNCORE_FREQ_KHZ_MULTIPLIER; - *min = FIELD_GET(UNCORE_MIN_RATIO_MASK, control) * UNCORE_FREQ_KHZ_MULTIPLIER; + if (index == UNCORE_INDEX_MAX_FREQ) + *value = FIELD_GET(UNCORE_MAX_RATIO_MASK, control) * UNCORE_FREQ_KHZ_MULTIPLIER; + else + *value = FIELD_GET(UNCORE_MIN_RATIO_MASK, control) * UNCORE_FREQ_KHZ_MULTIPLIER; } #define UNCORE_MAX_RATIO FIELD_MAX(UNCORE_MAX_RATIO_MASK) /* Callback for sysfs read for max/min frequencies. Called under mutex locks */ -static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min, - unsigned int *max) +static int uncore_read_control_freq(struct uncore_data *data, unsigned int *value, + enum uncore_index index) { struct tpmi_uncore_cluster_info *cluster_info; @@ -99,10 +101,11 @@ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min, if (cluster_info->root_domain) { struct tpmi_uncore_struct *uncore_root = cluster_info->uncore_root; - int i, _min = 0, _max = 0; + unsigned int min, max, v; + int i; - *min = UNCORE_MAX_RATIO * UNCORE_FREQ_KHZ_MULTIPLIER; - *max = 0; + min = UNCORE_MAX_RATIO * UNCORE_FREQ_KHZ_MULTIPLIER; + max = 0; /* * Get the max/min by looking at each cluster. Get the lowest @@ -113,17 +116,23 @@ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min, for (j = 0; j < uncore_root->pd_info[i].cluster_count; ++j) { read_control_freq(&uncore_root->pd_info[i].cluster_infos[j], - &_min, &_max); - if (*min > _min) - *min = _min; - if (*max < _max) - *max = _max; + &v, index); + if (v < min) + min = v; + if (v > max) + max = v; } } + + if (index == UNCORE_INDEX_MIN_FREQ) + *value = min; + else + *value = max; + return 0; } - read_control_freq(cluster_info, min, max); + read_control_freq(cluster_info, value, index); return 0; } diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c index bfa667051526..1ec04db5ad69 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c @@ -42,8 +42,8 @@ static enum cpuhp_state uncore_hp_state __read_mostly; #define UNCORE_CURRENT_RATIO_MASK GENMASK_ULL(6, 0) -static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min, - unsigned int *max) +static int uncore_read_control_freq(struct uncore_data *data, unsigned int *value, + enum uncore_index index) { u64 cap; int ret; @@ -55,8 +55,10 @@ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min, if (ret) return ret; - *max = FIELD_GET(UNCORE_MAX_RATIO_MASK, cap) * UNCORE_FREQ_KHZ_MULTIPLIER; - *min = FIELD_GET(UNCORE_MIN_RATIO_MASK, cap) * UNCORE_FREQ_KHZ_MULTIPLIER; + if (index == UNCORE_INDEX_MAX_FREQ) + *value = FIELD_GET(UNCORE_MAX_RATIO_MASK, cap) * UNCORE_FREQ_KHZ_MULTIPLIER; + else + *value = FIELD_GET(UNCORE_MIN_RATIO_MASK, cap) * UNCORE_FREQ_KHZ_MULTIPLIER; return 0; } -- Gitee From dd4c5cb3e9981f80ef68534b9c8011034d9951cb Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Mon, 17 Jun 2024 09:04:38 +0300 Subject: [PATCH 1871/2138] platform/x86/intel-uncore-freq: Get rid of uncore_read_freq driver API MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12077 commit d766abfbea9c8f2e7c87c59a572a1ae7f7ee5909 upstream. Get rid of uncore_read_freq driver API. Instead, add a new entry to the enumerated read interface and use this. No functional change intended. Intel-SIG: commit d766abfbea9c platform/x86/intel-uncore-freq: Get rid of uncore_read_freq driver API. Backport intel uncore-freq driver update from 6.11 Signed-off-by: Tero Kristo Acked-by: Srinivas Pandruvada Link: https://lore.kernel.org/r/20240617060708.892981-6-tero.kristo@linux.intel.com Reviewed-by: Ilpo Järvinen Signed-off-by: Ilpo Järvinen [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4182 --- .../uncore-frequency-common.c | 19 ++++++-------- .../uncore-frequency-common.h | 10 ++++---- .../uncore-frequency/uncore-frequency-tpmi.c | 25 ++++++++++++++++--- .../intel/uncore-frequency/uncore-frequency.c | 20 +++++++++++++-- 4 files changed, 52 insertions(+), 22 deletions(-) diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c index 8d9817d70e27..cfbff057458d 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c @@ -21,7 +21,6 @@ static DEFINE_IDA(intel_uncore_ida); /* callbacks for actual HW read/write */ static int (*uncore_read)(struct uncore_data *data, unsigned int *value, enum uncore_index index); static int (*uncore_write)(struct uncore_data *data, unsigned int input, enum uncore_index index); -static int (*uncore_read_freq)(struct uncore_data *data, unsigned int *freq); static ssize_t show_domain_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -85,7 +84,7 @@ static ssize_t show_perf_status_freq_khz(struct uncore_data *data, char *buf) int ret; mutex_lock(&uncore_lock); - ret = uncore_read_freq(data, &freq); + ret = uncore_read(data, &freq, UNCORE_INDEX_CURRENT_FREQ); mutex_unlock(&uncore_lock); if (ret) return ret; @@ -195,7 +194,7 @@ static int create_attr_group(struct uncore_data *data, char *name) data->uncore_attrs[index++] = &data->initial_min_freq_khz_kobj_attr.attr; data->uncore_attrs[index++] = &data->initial_max_freq_khz_kobj_attr.attr; - ret = uncore_read_freq(data, &freq); + ret = uncore_read(data, &freq, UNCORE_INDEX_CURRENT_FREQ); if (!ret) data->uncore_attrs[index++] = &data->current_freq_khz_kobj_attr.attr; @@ -267,17 +266,15 @@ void uncore_freq_remove_die_entry(struct uncore_data *data) } EXPORT_SYMBOL_NS_GPL(uncore_freq_remove_die_entry, INTEL_UNCORE_FREQUENCY); -int uncore_freq_common_init(int (*read_control_freq)(struct uncore_data *data, unsigned int *value, - enum uncore_index index), - int (*write_control_freq)(struct uncore_data *data, unsigned int input, - enum uncore_index index), - int (*read_freq)(struct uncore_data *data, unsigned int *freq)) +int uncore_freq_common_init(int (*read)(struct uncore_data *data, unsigned int *value, + enum uncore_index index), + int (*write)(struct uncore_data *data, unsigned int input, + enum uncore_index index)) { mutex_lock(&uncore_lock); - uncore_read = read_control_freq; - uncore_write = write_control_freq; - uncore_read_freq = read_freq; + uncore_read = read; + uncore_write = write; if (!uncore_root_kobj) { struct device *dev_root = bus_get_dev_root(&cpu_subsys); diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h index 2d98931cd8e2..4c245b945e4e 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h @@ -69,13 +69,13 @@ struct uncore_data { enum uncore_index { UNCORE_INDEX_MIN_FREQ, UNCORE_INDEX_MAX_FREQ, + UNCORE_INDEX_CURRENT_FREQ, }; -int uncore_freq_common_init(int (*read_control_freq)(struct uncore_data *data, unsigned int *value, - enum uncore_index index), - int (*write_control_freq)(struct uncore_data *data, unsigned int input, - enum uncore_index index), - int (*uncore_read_freq)(struct uncore_data *data, unsigned int *freq)); +int uncore_freq_common_init(int (*read)(struct uncore_data *data, unsigned int *value, + enum uncore_index index), + int (*write)(struct uncore_data *data, unsigned int input, + enum uncore_index index)); void uncore_freq_common_exit(void); int uncore_freq_add_entry(struct uncore_data *data, int cpu); void uncore_freq_remove_die_entry(struct uncore_data *data); diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c index 0d6803e9dc15..9fa3037c03d1 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c @@ -91,7 +91,7 @@ static void read_control_freq(struct tpmi_uncore_cluster_info *cluster_info, #define UNCORE_MAX_RATIO FIELD_MAX(UNCORE_MAX_RATIO_MASK) -/* Callback for sysfs read for max/min frequencies. Called under mutex locks */ +/* Helper for sysfs read for max/min frequencies. Called under mutex locks */ static int uncore_read_control_freq(struct uncore_data *data, unsigned int *value, enum uncore_index index) { @@ -207,7 +207,7 @@ static int uncore_write_control_freq(struct uncore_data *data, unsigned int inpu return 0; } -/* Callback for sysfs read for the current uncore frequency. Called under mutex locks */ +/* Helper for sysfs read for the current uncore frequency. Called under mutex locks */ static int uncore_read_freq(struct uncore_data *data, unsigned int *freq) { struct tpmi_uncore_cluster_info *cluster_info; @@ -223,6 +223,24 @@ static int uncore_read_freq(struct uncore_data *data, unsigned int *freq) return 0; } +/* Callback for sysfs read for TPMI uncore values. Called under mutex locks. */ +static int uncore_read(struct uncore_data *data, unsigned int *value, enum uncore_index index) +{ + switch (index) { + case UNCORE_INDEX_MIN_FREQ: + case UNCORE_INDEX_MAX_FREQ: + return uncore_read_control_freq(data, value, index); + + case UNCORE_INDEX_CURRENT_FREQ: + return uncore_read_freq(data, value); + + default: + break; + } + + return -EOPNOTSUPP; +} + static void remove_cluster_entries(struct tpmi_uncore_struct *tpmi_uncore) { int i; @@ -273,8 +291,7 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_ return -EINVAL; /* Register callbacks to uncore core */ - ret = uncore_freq_common_init(uncore_read_control_freq, uncore_write_control_freq, - uncore_read_freq); + ret = uncore_freq_common_init(uncore_read, uncore_write_control_freq); if (ret) return ret; diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c index 1ec04db5ad69..c68e69d7b242 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c @@ -114,6 +114,23 @@ static int uncore_read_freq(struct uncore_data *data, unsigned int *freq) return 0; } +static int uncore_read(struct uncore_data *data, unsigned int *value, enum uncore_index index) +{ + switch (index) { + case UNCORE_INDEX_MIN_FREQ: + case UNCORE_INDEX_MAX_FREQ: + return uncore_read_control_freq(data, value, index); + + case UNCORE_INDEX_CURRENT_FREQ: + return uncore_read_freq(data, value); + + default: + break; + } + + return -EOPNOTSUPP; +} + /* Caller provides protection */ static struct uncore_data *uncore_get_instance(unsigned int cpu) { @@ -243,8 +260,7 @@ static int __init intel_uncore_init(void) if (!uncore_instances) return -ENOMEM; - ret = uncore_freq_common_init(uncore_read_control_freq, uncore_write_control_freq, - uncore_read_freq); + ret = uncore_freq_common_init(uncore_read, uncore_write_control_freq); if (ret) goto err_free; -- Gitee From f1270e5a4b652feba9b631aa03305358984ca814 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Mon, 17 Jun 2024 09:04:39 +0300 Subject: [PATCH 1872/2138] platform/x86/intel-uncore-freq: Rename the sysfs helper macro names MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12077 commit b2cc9f908af19dd851dd4642325daf0e86614885 upstream. The macros to create sysfs entries are going to be used for other attributes in addition to current min/max frequencies only, so rename these to be more generic. No functional change intended. Intel-SIG: commit b2cc9f908af1 platform/x86/intel-uncore-freq: Rename the sysfs helper macro names. Backport intel uncore-freq driver update from 6.11 Signed-off-by: Tero Kristo Acked-by: Srinivas Pandruvada Link: https://lore.kernel.org/r/20240617060708.892981-7-tero.kristo@linux.intel.com Reviewed-by: Ilpo Järvinen Signed-off-by: Ilpo Järvinen [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4182 --- .../uncore-frequency-common.c | 25 ++++++++----------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c index cfbff057458d..9356a0f322e6 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c @@ -43,8 +43,7 @@ static ssize_t show_package_id(struct kobject *kobj, struct kobj_attribute *attr return sprintf(buf, "%u\n", data->package_id); } -static ssize_t show_min_max_freq_khz(struct uncore_data *data, - char *buf, enum uncore_index index) +static ssize_t show_attr(struct uncore_data *data, char *buf, enum uncore_index index) { unsigned int value; int ret; @@ -58,9 +57,8 @@ static ssize_t show_min_max_freq_khz(struct uncore_data *data, return sprintf(buf, "%u\n", value); } -static ssize_t store_min_max_freq_khz(struct uncore_data *data, - const char *buf, ssize_t count, - enum uncore_index index) +static ssize_t store_attr(struct uncore_data *data, const char *buf, ssize_t count, + enum uncore_index index) { unsigned int input; int ret; @@ -92,24 +90,23 @@ static ssize_t show_perf_status_freq_khz(struct uncore_data *data, char *buf) return sprintf(buf, "%u\n", freq); } -#define store_uncore_min_max(name, min_max) \ +#define store_uncore_attr(name, index) \ static ssize_t store_##name(struct kobject *kobj, \ struct kobj_attribute *attr, \ const char *buf, size_t count) \ { \ struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\ \ - return store_min_max_freq_khz(data, buf, count, \ - min_max); \ + return store_attr(data, buf, count, index); \ } -#define show_uncore_min_max(name, min_max) \ +#define show_uncore_attr(name, index) \ static ssize_t show_##name(struct kobject *kobj, \ struct kobj_attribute *attr, char *buf)\ { \ struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\ \ - return show_min_max_freq_khz(data, buf, min_max); \ + return show_attr(data, buf, index); \ } #define show_uncore_perf_status(name) \ @@ -121,11 +118,11 @@ static ssize_t show_perf_status_freq_khz(struct uncore_data *data, char *buf) return show_perf_status_freq_khz(data, buf); \ } -store_uncore_min_max(min_freq_khz, UNCORE_INDEX_MIN_FREQ); -store_uncore_min_max(max_freq_khz, UNCORE_INDEX_MAX_FREQ); +store_uncore_attr(min_freq_khz, UNCORE_INDEX_MIN_FREQ); +store_uncore_attr(max_freq_khz, UNCORE_INDEX_MAX_FREQ); -show_uncore_min_max(min_freq_khz, UNCORE_INDEX_MIN_FREQ); -show_uncore_min_max(max_freq_khz, UNCORE_INDEX_MAX_FREQ); +show_uncore_attr(min_freq_khz, UNCORE_INDEX_MIN_FREQ); +show_uncore_attr(max_freq_khz, UNCORE_INDEX_MAX_FREQ); show_uncore_perf_status(current_freq_khz); -- Gitee From 40c8631a6b3ac8de766cd65b680ad19646976951 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Mon, 17 Jun 2024 09:04:40 +0300 Subject: [PATCH 1873/2138] platform/x86/intel-uncore-freq: Use generic helpers for current frequency MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12077 commit 7b3ffe0d815d15310545f77d444ada92c5d26903 upstream. Use the generic sysfs helpers for reading the current frequency also, and remove the custom ones. No functional change intended. Intel-SIG: commit 7b3ffe0d815d platform/x86/intel-uncore-freq: Use generic helpers for current frequency. Backport intel uncore-freq driver update from 6.11 Signed-off-by: Tero Kristo Acked-by: Srinivas Pandruvada Link: https://lore.kernel.org/r/20240617060708.892981-8-tero.kristo@linux.intel.com Reviewed-by: Ilpo Järvinen Signed-off-by: Ilpo Järvinen [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4182 --- .../uncore-frequency-common.c | 25 +------------------ 1 file changed, 1 insertion(+), 24 deletions(-) diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c index 9356a0f322e6..4e880585cbe4 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c @@ -76,20 +76,6 @@ static ssize_t store_attr(struct uncore_data *data, const char *buf, ssize_t cou return count; } -static ssize_t show_perf_status_freq_khz(struct uncore_data *data, char *buf) -{ - unsigned int freq; - int ret; - - mutex_lock(&uncore_lock); - ret = uncore_read(data, &freq, UNCORE_INDEX_CURRENT_FREQ); - mutex_unlock(&uncore_lock); - if (ret) - return ret; - - return sprintf(buf, "%u\n", freq); -} - #define store_uncore_attr(name, index) \ static ssize_t store_##name(struct kobject *kobj, \ struct kobj_attribute *attr, \ @@ -109,22 +95,13 @@ static ssize_t show_perf_status_freq_khz(struct uncore_data *data, char *buf) return show_attr(data, buf, index); \ } -#define show_uncore_perf_status(name) \ - static ssize_t show_##name(struct kobject *kobj, \ - struct kobj_attribute *attr, char *buf)\ - { \ - struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\ - \ - return show_perf_status_freq_khz(data, buf); \ - } - store_uncore_attr(min_freq_khz, UNCORE_INDEX_MIN_FREQ); store_uncore_attr(max_freq_khz, UNCORE_INDEX_MAX_FREQ); show_uncore_attr(min_freq_khz, UNCORE_INDEX_MIN_FREQ); show_uncore_attr(max_freq_khz, UNCORE_INDEX_MAX_FREQ); -show_uncore_perf_status(current_freq_khz); +show_uncore_attr(current_freq_khz, UNCORE_INDEX_CURRENT_FREQ); #define show_uncore_data(member_name) \ static ssize_t show_##member_name(struct kobject *kobj, \ -- Gitee From 40263eb76365373935f5e1b2fe97b270ff9d36c3 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 25 Apr 2024 20:59:15 +0100 Subject: [PATCH 1874/2138] erofs: don't align offset for erofs_read_metabuf() (simple cases) ANBZ: #11101 commit 076d965eb812f2ad88daf693d745ea1f28bf8f80 upstream. Most of the callers of erofs_read_metabuf() have the following form: block = erofs_blknr(sb, offset); off = erofs_blkoff(sb, offset); p = erofs_read_metabuf(...., erofs_pos(sb, block), ...); if (IS_ERR(p)) return PTR_ERR(p); q = p + off; // no further uses of p, block or off. The value passed to erofs_read_metabuf() is offset rounded down to block size, i.e. offset - off. Passing offset as-is would increase the return value by off in case of success and keep the return value unchanged in in case of error. In other words, the same could be achieved by q = erofs_read_metabuf(...., offset, ...); if (IS_ERR(q)) return PTR_ERR(q); This commit convert these simple cases. Signed-off-by: Al Viro Link: https://lore.kernel.org/r/20240425195915.GD1031757@ZenIV Signed-off-by: Gao Xiang Conflicts: 1. fs/erofs/super.c: In erofs_init_device(), there is a conflict between `bdev_handle` in the original file and `bdev_file` in the patch. Resolution: 1. Use the original `bdev_handle` because using `bdev_file` involves third-party dependencies. Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4180 --- fs/erofs/data.c | 11 +++++------ fs/erofs/fscache.c | 12 +++--------- fs/erofs/super.c | 8 +++----- fs/erofs/zmap.c | 8 +++----- 4 files changed, 14 insertions(+), 25 deletions(-) diff --git a/fs/erofs/data.c b/fs/erofs/data.c index 9395f6574089..4d1371c329d6 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -153,7 +153,7 @@ int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map) pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, unit) + unit * chunknr; - kaddr = erofs_read_metabuf(&buf, sb, erofs_pos(sb, erofs_blknr(sb, pos)), EROFS_KMAP); + kaddr = erofs_read_metabuf(&buf, sb, pos, EROFS_KMAP); if (IS_ERR(kaddr)) { err = PTR_ERR(kaddr); goto out; @@ -164,7 +164,7 @@ int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map) /* handle block map */ if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) { - __le32 *blkaddr = kaddr + erofs_blkoff(sb, pos); + __le32 *blkaddr = kaddr; if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) { map->m_flags = 0; @@ -175,7 +175,7 @@ int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map) goto out_unlock; } /* parse chunk indexes */ - idx = kaddr + erofs_blkoff(sb, pos); + idx = kaddr; switch (le32_to_cpu(idx->blkaddr)) { case EROFS_NULL_ADDR: map->m_flags = 0; @@ -295,11 +295,10 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, struct erofs_buf buf = __EROFS_BUF_INITIALIZER; iomap->type = IOMAP_INLINE; - ptr = erofs_read_metabuf(&buf, sb, - erofs_pos(sb, erofs_blknr(sb, mdev.m_pa)), EROFS_KMAP); + ptr = erofs_read_metabuf(&buf, sb, mdev.m_pa, EROFS_KMAP); if (IS_ERR(ptr)) return PTR_ERR(ptr); - iomap->inline_data = ptr + erofs_blkoff(sb, mdev.m_pa); + iomap->inline_data = ptr; iomap->private = buf.base; } else { iomap->type = IOMAP_MAPPED; diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c index ac618b3484f1..fda16eedafb5 100644 --- a/fs/erofs/fscache.c +++ b/fs/erofs/fscache.c @@ -273,21 +273,15 @@ static int erofs_fscache_data_read_slice(struct erofs_fscache_rq *req) if (map.m_flags & EROFS_MAP_META) { struct erofs_buf buf = __EROFS_BUF_INITIALIZER; struct iov_iter iter; - erofs_blk_t blknr; - size_t offset, size; + size_t size = map.m_llen; void *src; - /* For tail packing layout, the offset may be non-zero. */ - offset = erofs_blkoff(sb, map.m_pa); - blknr = erofs_blknr(sb, map.m_pa); - size = map.m_llen; - - src = erofs_read_metabuf(&buf, sb, erofs_pos(sb, blknr), EROFS_KMAP); + src = erofs_read_metabuf(&buf, sb, map.m_pa, EROFS_KMAP); if (IS_ERR(src)) return PTR_ERR(src); iov_iter_xarray(&iter, ITER_DEST, &mapping->i_pages, pos, PAGE_SIZE); - if (copy_to_iter(src + offset, size, &iter) != size) { + if (copy_to_iter(src, size, &iter) != size) { erofs_put_metabuf(&buf); return -EFAULT; } diff --git a/fs/erofs/super.c b/fs/erofs/super.c index d82a2fc80271..886a74784802 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -178,12 +178,10 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb, struct erofs_fscache *fscache; struct erofs_deviceslot *dis; struct bdev_handle *bdev_handle; - void *ptr; - ptr = erofs_read_metabuf(buf, sb, erofs_pos(sb, erofs_blknr(sb, *pos)), EROFS_KMAP); - if (IS_ERR(ptr)) - return PTR_ERR(ptr); - dis = ptr + erofs_blkoff(sb, *pos); + dis = erofs_read_metabuf(buf, sb, *pos, EROFS_KMAP); + if (IS_ERR(dis)) + return PTR_ERR(dis); if (!sbi->devs->flatdev && !dif->path) { if (!dis->tag[0]) { diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c index 230215afae4d..f006c06bc813 100644 --- a/fs/erofs/zmap.c +++ b/fs/erofs/zmap.c @@ -571,7 +571,6 @@ static int z_erofs_fill_inode_lazy(struct inode *inode) int err, headnr; erofs_off_t pos; struct erofs_buf buf = __EROFS_BUF_INITIALIZER; - void *kaddr; struct z_erofs_map_header *h; if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) { @@ -591,13 +590,12 @@ static int z_erofs_fill_inode_lazy(struct inode *inode) goto out_unlock; pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8); - kaddr = erofs_read_metabuf(&buf, sb, erofs_pos(sb, erofs_blknr(sb, pos)), EROFS_KMAP); - if (IS_ERR(kaddr)) { - err = PTR_ERR(kaddr); + h = erofs_read_metabuf(&buf, sb, pos, EROFS_KMAP); + if (IS_ERR(h)) { + err = PTR_ERR(h); goto out_unlock; } - h = kaddr + erofs_blkoff(sb, pos); /* * if the highest bit of the 8-byte map header is set, the whole file * is stored in the packed inode. The rest bits keeps z_fragmentoff. -- Gitee From 2a5163fa5158c2411cf254caa3ffbb32ee16fddf Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 25 Apr 2024 20:59:44 +0100 Subject: [PATCH 1875/2138] erofs: don't round offset down for erofs_read_metabuf() ANBZ: #11101 commit 4afe6b8d21e5ff644fedd7db5673fe5a48b177b7 upstream. There's only one place where struct z_erofs_maprecorder ->kaddr is used not in the same function that has assigned it - the value read in unpack_compacted_index() gets calculated in z_erofs_load_compact_lcluster(). With minor massage we can switch to storing it with offset in block already added. Signed-off-by: Al Viro Link: https://lore.kernel.org/r/20240425195944.GE1031757@ZenIV Signed-off-by: Gao Xiang Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4180 --- fs/erofs/zmap.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c index f006c06bc813..34d8acdca83f 100644 --- a/fs/erofs/zmap.c +++ b/fs/erofs/zmap.c @@ -34,13 +34,13 @@ static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m, unsigned int advise; m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb, - erofs_pos(inode->i_sb, erofs_blknr(inode->i_sb, pos)), EROFS_KMAP); + pos, EROFS_KMAP); if (IS_ERR(m->kaddr)) return PTR_ERR(m->kaddr); m->nextpackoff = pos + sizeof(struct z_erofs_lcluster_index); m->lcn = lcn; - di = m->kaddr + erofs_blkoff(inode->i_sb, pos); + di = m->kaddr; advise = le16_to_cpu(di->di_advise); m->type = advise & Z_EROFS_LI_LCLUSTER_TYPE_MASK; @@ -109,7 +109,7 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, { struct erofs_inode *const vi = EROFS_I(m->inode); const unsigned int lclusterbits = vi->z_logical_clusterbits; - unsigned int vcnt, base, lo, lobits, encodebits, nblk, eofs; + unsigned int vcnt, lo, lobits, encodebits, nblk, bytes; int i; u8 *in, type; bool big_pcluster; @@ -127,11 +127,11 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1; lobits = max(lclusterbits, ilog2(Z_EROFS_LI_D0_CBLKCNT) + 1U); encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt; - eofs = erofs_blkoff(m->inode->i_sb, pos); - base = round_down(eofs, vcnt << amortizedshift); - in = m->kaddr + base; + bytes = pos & ((vcnt << amortizedshift) - 1); - i = (eofs - base) >> amortizedshift; + in = m->kaddr - bytes; + + i = bytes >> amortizedshift; lo = decode_compactedbits(lobits, in, encodebits * i, &type); m->type = type; @@ -256,7 +256,7 @@ static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m, out: pos += lcn * (1 << amortizedshift); m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb, - erofs_pos(inode->i_sb, erofs_blknr(inode->i_sb, pos)), EROFS_KMAP); + pos, EROFS_KMAP); if (IS_ERR(m->kaddr)) return PTR_ERR(m->kaddr); return unpack_compacted_index(m, amortizedshift, pos, lookahead); -- Gitee From d05259253fadac0f6263103fca7c6a1039ea9583 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 25 Apr 2024 21:00:17 +0100 Subject: [PATCH 1876/2138] z_erofs_pcluster_begin(): don't bother with rounding position down ANBZ: #11101 commit 5587a8172eb6040e388c3fc9fa6553b99510da9e upstream. ... and be more idiomatic when calculating ->pageofs_in. Signed-off-by: Al Viro Link: https://lore.kernel.org/r/20240425200017.GF1031757@ZenIV [ Gao Xiang: don't use `offset_in_page(mptr)` due to EROFS_NO_KMAP. ] Signed-off-by: Gao Xiang Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4180 --- fs/erofs/zdata.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index ec68494cedef..5670596bd6d7 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -868,7 +868,7 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe) } else { void *mptr; - mptr = erofs_read_metabuf(&map->buf, sb, erofs_pos(sb, blknr), EROFS_NO_KMAP); + mptr = erofs_read_metabuf(&map->buf, sb, map->m_pa, EROFS_NO_KMAP); if (IS_ERR(mptr)) { ret = PTR_ERR(mptr); erofs_err(sb, "failed to get inline data %d", ret); -- Gitee From 1721d9f262854e98de6b345bee4211577b8ba8ae Mon Sep 17 00:00:00 2001 From: Sandeep Dhavale Date: Mon, 24 Jun 2024 15:02:05 -0700 Subject: [PATCH 1877/2138] erofs: fix possible memory leak in z_erofs_gbuf_exit() ANBZ: #11101 commit 9d01f6f6d8b57131c74810739b9d65141062e4c0 upstream. Because we incorrectly reused of variable `i` in `z_erofs_gbuf_exit()` for inner loop, we may exit early from outer loop resulting in memory leak. Fix this by using separate variable for iterating through inner loop. Fixes: f36f3010f676 ("erofs: rename per-CPU buffers to global buffer pool and make it configurable") Signed-off-by: Sandeep Dhavale Reviewed-by: Gao Xiang Reviewed-by: Chao Yu Link: https://lore.kernel.org/r/20240624220206.3373197-1-dhavale@google.com Signed-off-by: Gao Xiang Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4180 --- fs/erofs/zutil.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/fs/erofs/zutil.c b/fs/erofs/zutil.c index 2e7391e79e11..b219a6a255b5 100644 --- a/fs/erofs/zutil.c +++ b/fs/erofs/zutil.c @@ -147,7 +147,7 @@ int __init z_erofs_gbuf_init(void) void z_erofs_gbuf_exit(void) { - int i; + int i, j; for (i = 0; i < z_erofs_gbuf_count + (!!z_erofs_rsvbuf); ++i) { struct z_erofs_gbuf *gbuf = &z_erofs_gbufpool[i]; @@ -160,9 +160,9 @@ void z_erofs_gbuf_exit(void) if (!gbuf->pages) continue; - for (i = 0; i < gbuf->nrpages; ++i) - if (gbuf->pages[i]) - put_page(gbuf->pages[i]); + for (j = 0; j < gbuf->nrpages; ++j) + if (gbuf->pages[j]) + put_page(gbuf->pages[j]); kfree(gbuf->pages); gbuf->pages = NULL; } -- Gitee From 5503b512eb55b5630023c2ecac6882bdeb90b6e9 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Wed, 3 Jul 2024 20:00:48 +0800 Subject: [PATCH 1878/2138] erofs: convert z_erofs_pcluster_readmore() to folios ANBZ: #11101 commit 1a4821a0a037f6bedd796a589d07d44547763da4 upstream. Unlike `pagecache_get_page()`, `__filemap_get_folio()` returns error pointers instead of NULL, thus switching to `IS_ERR_OR_NULL`. Apart from that, it's just a straightforward conversion. Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240703120051.3653452-1-hsiangkao@linux.alibaba.com Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4180 --- fs/erofs/internal.h | 14 +++++--------- fs/erofs/zdata.c | 15 +++++++-------- 2 files changed, 12 insertions(+), 17 deletions(-) diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 34746bb0eb1c..a6a031712678 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -312,17 +312,13 @@ static inline unsigned int erofs_inode_datalayout(unsigned int ifmt) return (ifmt >> EROFS_I_DATALAYOUT_BIT) & EROFS_I_DATALAYOUT_MASK; } -/* - * Different from grab_cache_page_nowait(), reclaiming is never triggered - * when allocating new pages. - */ -static inline -struct page *erofs_grab_cache_page_nowait(struct address_space *mapping, - pgoff_t index) +/* reclaiming is never triggered when allocating new folios. */ +static inline struct folio *erofs_grab_folio_nowait(struct address_space *as, + pgoff_t index) { - return pagecache_get_page(mapping, index, + return __filemap_get_folio(as, index, FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, - readahead_gfp_mask(mapping) & ~__GFP_RECLAIM); + readahead_gfp_mask(as) & ~__GFP_RECLAIM); } /* Has a disk mapping */ diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 5670596bd6d7..86fe9ba51c14 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -1767,7 +1767,6 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f, end = round_up(end, PAGE_SIZE); } else { end = round_up(map->m_la, PAGE_SIZE); - if (!map->m_llen) return; } @@ -1775,15 +1774,15 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f, cur = map->m_la + map->m_llen - 1; while ((cur >= end) && (cur < i_size_read(inode))) { pgoff_t index = cur >> PAGE_SHIFT; - struct page *page; + struct folio *folio; - page = erofs_grab_cache_page_nowait(inode->i_mapping, index); - if (page) { - if (PageUptodate(page)) - unlock_page(page); + folio = erofs_grab_folio_nowait(inode->i_mapping, index); + if (!IS_ERR_OR_NULL(folio)) { + if (folio_test_uptodate(folio)) + folio_unlock(folio); else - z_erofs_scan_folio(f, page_folio(page), !!rac); - put_page(page); + z_erofs_scan_folio(f, folio, !!rac); + folio_put(folio); } if (cur < PAGE_SIZE) -- Gitee From fb988e2d86f1ae66d46ac91d664cbc568461d83a Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Wed, 3 Jul 2024 20:00:49 +0800 Subject: [PATCH 1879/2138] erofs: convert z_erofs_read_fragment() to folios ANBZ: #11101 commit 90cd33d79338b9df75ae91d1452be10e40443527 upstream. Just a straight-forward conversion. No logic changes. Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240703120051.3653452-2-hsiangkao@linux.alibaba.com Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4180 --- fs/erofs/zdata.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 86fe9ba51c14..b80f4a656396 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -925,7 +925,7 @@ static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe) fe->pcl = NULL; } -static int z_erofs_read_fragment(struct super_block *sb, struct page *page, +static int z_erofs_read_fragment(struct super_block *sb, struct folio *folio, unsigned int cur, unsigned int end, erofs_off_t pos) { struct inode *packed_inode = EROFS_SB(sb)->packed_inode; @@ -938,14 +938,13 @@ static int z_erofs_read_fragment(struct super_block *sb, struct page *page, buf.inode = packed_inode; for (; cur < end; cur += cnt, pos += cnt) { - cnt = min_t(unsigned int, end - cur, - sb->s_blocksize - erofs_blkoff(sb, pos)); + cnt = min(end - cur, sb->s_blocksize - erofs_blkoff(sb, pos)); src = erofs_bread(&buf, pos, EROFS_KMAP); if (IS_ERR(src)) { erofs_put_metabuf(&buf); return PTR_ERR(src); } - memcpy_to_page(page, cur, src, cnt); + memcpy_to_folio(folio, cur, src, cnt); } erofs_put_metabuf(&buf); return 0; @@ -959,7 +958,7 @@ static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *fe, const loff_t offset = folio_pos(folio); const unsigned int bs = i_blocksize(inode), fs = folio_size(folio); bool tight = true, exclusive; - unsigned int cur, end, len, split; + unsigned int cur, end, split; int err = 0; z_erofs_onlinefolio_init(folio); @@ -989,9 +988,9 @@ static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *fe, if (map->m_flags & EROFS_MAP_FRAGMENT) { erofs_off_t fpos = offset + cur - map->m_la; - len = min_t(unsigned int, map->m_llen - fpos, end - cur); - err = z_erofs_read_fragment(inode->i_sb, &folio->page, cur, - cur + len, EROFS_I(inode)->z_fragmentoff + fpos); + err = z_erofs_read_fragment(inode->i_sb, folio, cur, + cur + min(map->m_llen - fpos, end - cur), + EROFS_I(inode)->z_fragmentoff + fpos); if (err) goto out; tight = false; -- Gitee From bf49fdadd0035848f203d03e16db9c789748930c Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Wed, 3 Jul 2024 20:00:50 +0800 Subject: [PATCH 1880/2138] erofs: teach z_erofs_scan_folios() to handle multi-page folios ANBZ: #11101 commit 5b9654efb60423284dd0f8845812ac7216f60858 upstream. Previously, a folio just contains one page. In order to enable large folios, z_erofs_scan_folios() needs to handle multi-page folios. First, this patch eliminates all gotos. Instead, the new loop deal with multiple parts in each folio. It's simple to handle the parts which belong to unmapped extents or fragment extents; but for encoded extents, the page boundaries needs to be considered for `tight` and `split` to keep inplace I/Os work correctly: when a part crosses the page boundary, they needs to be reseted properly. Besides, simplify `tight` derivation since Z_EROFS_PCLUSTER_HOOKED has been removed for quite a while. Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240703120051.3653452-3-hsiangkao@linux.alibaba.com Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4180 --- fs/erofs/zdata.c | 167 +++++++++++++++++++++++------------------------ 1 file changed, 82 insertions(+), 85 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index b80f4a656396..0b1e1d2b8bdb 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -950,100 +950,97 @@ static int z_erofs_read_fragment(struct super_block *sb, struct folio *folio, return 0; } -static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *fe, +static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *f, struct folio *folio, bool ra) { - struct inode *const inode = fe->inode; - struct erofs_map_blocks *const map = &fe->map; + struct inode *const inode = f->inode; + struct erofs_map_blocks *const map = &f->map; const loff_t offset = folio_pos(folio); - const unsigned int bs = i_blocksize(inode), fs = folio_size(folio); - bool tight = true, exclusive; - unsigned int cur, end, split; - int err = 0; + const unsigned int bs = i_blocksize(inode); + unsigned int end = folio_size(folio), split = 0, cur, pgs; + bool tight, excl; + int err; + tight = (bs == PAGE_SIZE); z_erofs_onlinefolio_init(folio); - split = 0; - end = fs; -repeat: - if (offset + end - 1 < map->m_la || - offset + end - 1 >= map->m_la + map->m_llen) { - z_erofs_pcluster_end(fe); - map->m_la = offset + end - 1; - map->m_llen = 0; - err = z_erofs_map_blocks_iter(inode, map, 0); - if (err) - goto out; - } - - cur = offset > map->m_la ? 0 : map->m_la - offset; - /* bump split parts first to avoid several separate cases */ - ++split; - - if (!(map->m_flags & EROFS_MAP_MAPPED)) { - folio_zero_segment(folio, cur, end); - tight = false; - goto next_part; - } - - if (map->m_flags & EROFS_MAP_FRAGMENT) { - erofs_off_t fpos = offset + cur - map->m_la; + do { + if (offset + end - 1 < map->m_la || + offset + end - 1 >= map->m_la + map->m_llen) { + z_erofs_pcluster_end(f); + map->m_la = offset + end - 1; + map->m_llen = 0; + err = z_erofs_map_blocks_iter(inode, map, 0); + if (err) + break; + } - err = z_erofs_read_fragment(inode->i_sb, folio, cur, - cur + min(map->m_llen - fpos, end - cur), - EROFS_I(inode)->z_fragmentoff + fpos); - if (err) - goto out; - tight = false; - goto next_part; - } + cur = offset > map->m_la ? 0 : map->m_la - offset; + pgs = round_down(cur, PAGE_SIZE); + /* bump split parts first to avoid several separate cases */ + ++split; + + if (!(map->m_flags & EROFS_MAP_MAPPED)) { + folio_zero_segment(folio, cur, end); + tight = false; + } else if (map->m_flags & EROFS_MAP_FRAGMENT) { + erofs_off_t fpos = offset + cur - map->m_la; + + err = z_erofs_read_fragment(inode->i_sb, folio, cur, + cur + min(map->m_llen - fpos, end - cur), + EROFS_I(inode)->z_fragmentoff + fpos); + if (err) + break; + tight = false; + } else { + if (!f->pcl) { + err = z_erofs_pcluster_begin(f); + if (err) + break; + f->pcl->besteffort |= !ra; + } - if (!fe->pcl) { - err = z_erofs_pcluster_begin(fe); - if (err) - goto out; - fe->pcl->besteffort |= !ra; - } + pgs = round_down(end - 1, PAGE_SIZE); + /* + * Ensure this partial page belongs to this submit chain + * rather than other concurrent submit chains or + * noio(bypass) chains since those chains are handled + * asynchronously thus it cannot be used for inplace I/O + * or bvpage (should be processed in the strict order.) + */ + tight &= (f->mode >= Z_EROFS_PCLUSTER_FOLLOWED); + excl = false; + if (cur <= pgs) { + excl = (split <= 1) || tight; + cur = pgs; + } - /* - * Ensure the current partial folio belongs to this submit chain rather - * than other concurrent submit chains or the noio(bypass) chain since - * those chains are handled asynchronously thus the folio cannot be used - * for inplace I/O or bvpage (should be processed in a strict order.) - */ - tight &= (fe->mode > Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE); - exclusive = (!cur && ((split <= 1) || (tight && bs == fs))); - if (cur) - tight &= (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED); - - err = z_erofs_attach_page(fe, &((struct z_erofs_bvec) { - .page = &folio->page, - .offset = offset - map->m_la, - .end = end, - }), exclusive); - if (err) - goto out; - - z_erofs_onlinefolio_split(folio); - if (fe->pcl->pageofs_out != (map->m_la & ~PAGE_MASK)) - fe->pcl->multibases = true; - if (fe->pcl->length < offset + end - map->m_la) { - fe->pcl->length = offset + end - map->m_la; - fe->pcl->pageofs_out = map->m_la & ~PAGE_MASK; - } - if ((map->m_flags & EROFS_MAP_FULL_MAPPED) && - !(map->m_flags & EROFS_MAP_PARTIAL_REF) && - fe->pcl->length == map->m_llen) - fe->pcl->partial = false; -next_part: - /* shorten the remaining extent to update progress */ - map->m_llen = offset + cur - map->m_la; - map->m_flags &= ~EROFS_MAP_FULL_MAPPED; - - end = cur; - if (end > 0) - goto repeat; + err = z_erofs_attach_page(f, &((struct z_erofs_bvec) { + .page = folio_page(folio, pgs >> PAGE_SHIFT), + .offset = offset + pgs - map->m_la, + .end = end - pgs, }), excl); + if (err) + break; -out: + z_erofs_onlinefolio_split(folio); + if (f->pcl->pageofs_out != (map->m_la & ~PAGE_MASK)) + f->pcl->multibases = true; + if (f->pcl->length < offset + end - map->m_la) { + f->pcl->length = offset + end - map->m_la; + f->pcl->pageofs_out = map->m_la & ~PAGE_MASK; + } + if ((map->m_flags & EROFS_MAP_FULL_MAPPED) && + !(map->m_flags & EROFS_MAP_PARTIAL_REF) && + f->pcl->length == map->m_llen) + f->pcl->partial = false; + } + /* shorten the remaining extent to update progress */ + map->m_llen = offset + cur - map->m_la; + map->m_flags &= ~EROFS_MAP_FULL_MAPPED; + if (cur <= pgs) { + split = cur < pgs; + tight = (bs == PAGE_SIZE); + } + } while ((end = cur) > 0); z_erofs_onlinefolio_end(folio, err); return err; } -- Gitee From 12a619e25a99a381e3171297b323adcb6b4a9ae5 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Wed, 3 Jul 2024 20:00:51 +0800 Subject: [PATCH 1881/2138] erofs: tidy up `struct z_erofs_bvec` ANBZ: #11101 commit 2080ca1ed3e43233c4e8480c0b9d2840886de01e upstream. After revisiting the design, I believe `struct z_erofs_bvec` should be page-based instead of folio-based due to the reasons below: - The minimized memory mapping block is a page; - Under the certain circumstances, only temporary pages needs to be used instead of folios since refcount, mapcount for such pages are unnecessary; - Decompressors handle all types of pages including temporary pages, not only folios. When handling `struct z_erofs_bvec`, all folio-related information is now accessed using the page_folio() helper. The final goal of this round adaptation is to eliminate direct accesses to `struct page` in the EROFS codebase, except for some exceptions like `z_erofs_is_shortlived_page()` and `z_erofs_page_is_invalidated()`, which require a new helper to determine the memdesc type of an arbitrary page. Actually large folios of compressed files seem to work now, yet I tend to conduct more tests before officially enabling this for all scenarios. Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240703120051.3653452-4-hsiangkao@linux.alibaba.com Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4180 --- fs/erofs/zdata.c | 101 +++++++++++++++++++++++------------------------ 1 file changed, 49 insertions(+), 52 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 0b1e1d2b8bdb..afd6f89487aa 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -19,10 +19,7 @@ typedef void *z_erofs_next_pcluster_t; struct z_erofs_bvec { - union { - struct page *page; - struct folio *folio; - }; + struct page *page; int offset; unsigned int end; }; @@ -617,32 +614,31 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE; } -/* called by erofs_shrinker to get rid of all cached compressed bvecs */ +/* (erofs_shrinker) disconnect cached encoded data with pclusters */ int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi, struct erofs_workgroup *grp) { struct z_erofs_pcluster *const pcl = container_of(grp, struct z_erofs_pcluster, obj); unsigned int pclusterpages = z_erofs_pclusterpages(pcl); + struct folio *folio; int i; DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); - /* There is no actice user since the pcluster is now freezed */ + /* Each cached folio contains one page unless bs > ps is supported */ for (i = 0; i < pclusterpages; ++i) { - struct folio *folio = pcl->compressed_bvecs[i].folio; + if (pcl->compressed_bvecs[i].page) { + folio = page_folio(pcl->compressed_bvecs[i].page); + /* Avoid reclaiming or migrating this folio */ + if (!folio_trylock(folio)) + return -EBUSY; - if (!folio) - continue; - - /* Avoid reclaiming or migrating this folio */ - if (!folio_trylock(folio)) - return -EBUSY; - - if (!erofs_folio_is_managed(sbi, folio)) - continue; - pcl->compressed_bvecs[i].folio = NULL; - folio_detach_private(folio); - folio_unlock(folio); + if (!erofs_folio_is_managed(sbi, folio)) + continue; + pcl->compressed_bvecs[i].page = NULL; + folio_detach_private(folio); + folio_unlock(folio); + } } return 0; } @@ -650,9 +646,9 @@ int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi, static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp) { struct z_erofs_pcluster *pcl = folio_get_private(folio); - unsigned int pclusterpages = z_erofs_pclusterpages(pcl); + struct z_erofs_bvec *bvec = pcl->compressed_bvecs; + struct z_erofs_bvec *end = bvec + z_erofs_pclusterpages(pcl); bool ret; - int i; if (!folio_test_private(folio)) return true; @@ -661,9 +657,9 @@ static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp) spin_lock(&pcl->obj.lockref.lock); if (pcl->obj.lockref.count <= 0) { DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); - for (i = 0; i < pclusterpages; ++i) { - if (pcl->compressed_bvecs[i].folio == folio) { - pcl->compressed_bvecs[i].folio = NULL; + for (; bvec < end; ++bvec) { + if (bvec->page && page_folio(bvec->page) == folio) { + bvec->page = NULL; folio_detach_private(folio); ret = true; break; @@ -1062,7 +1058,7 @@ static bool z_erofs_is_sync_decompress(struct erofs_sb_info *sbi, static bool z_erofs_page_is_invalidated(struct page *page) { - return !page->mapping && !z_erofs_is_shortlived_page(page); + return !page_folio(page)->mapping && !z_erofs_is_shortlived_page(page); } struct z_erofs_decompress_backend { @@ -1415,7 +1411,7 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, bool tocache = false; struct z_erofs_bvec zbv; struct address_space *mapping; - struct page *page; + struct folio *folio; int bs = i_blocksize(f->inode); /* Except for inplace folios, the entire folio can be used for I/Os */ @@ -1425,23 +1421,25 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, spin_lock(&pcl->obj.lockref.lock); zbv = pcl->compressed_bvecs[nr]; spin_unlock(&pcl->obj.lockref.lock); - if (!zbv.folio) + if (!zbv.page) goto out_allocfolio; - bvec->bv_page = &zbv.folio->page; + bvec->bv_page = zbv.page; DBG_BUGON(z_erofs_is_shortlived_page(bvec->bv_page)); + + folio = page_folio(zbv.page); /* * Handle preallocated cached folios. We tried to allocate such folios * without triggering direct reclaim. If allocation failed, inplace * file-backed folios will be used instead. */ - if (zbv.folio->private == (void *)Z_EROFS_PREALLOCATED_PAGE) { - zbv.folio->private = 0; + if (folio->private == (void *)Z_EROFS_PREALLOCATED_PAGE) { + folio->private = 0; tocache = true; goto out_tocache; } - mapping = READ_ONCE(zbv.folio->mapping); + mapping = READ_ONCE(folio->mapping); /* * File-backed folios for inplace I/Os are all locked steady, * therefore it is impossible for `mapping` to be NULL. @@ -1453,21 +1451,21 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, return; } - folio_lock(zbv.folio); - if (zbv.folio->mapping == mc) { + folio_lock(folio); + if (folio->mapping == mc) { /* * The cached folio is still in managed cache but without * a valid `->private` pcluster hint. Let's reconnect them. */ - if (!folio_test_private(zbv.folio)) { - folio_attach_private(zbv.folio, pcl); + if (!folio_test_private(folio)) { + folio_attach_private(folio, pcl); /* compressed_bvecs[] already takes a ref before */ - folio_put(zbv.folio); + folio_put(folio); } /* no need to submit if it is already up-to-date */ - if (folio_test_uptodate(zbv.folio)) { - folio_unlock(zbv.folio); + if (folio_test_uptodate(folio)) { + folio_unlock(folio); bvec->bv_page = NULL; } return; @@ -1477,32 +1475,31 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, * It has been truncated, so it's unsafe to reuse this one. Let's * allocate a new page for compressed data. */ - DBG_BUGON(zbv.folio->mapping); + DBG_BUGON(folio->mapping); tocache = true; - folio_unlock(zbv.folio); - folio_put(zbv.folio); + folio_unlock(folio); + folio_put(folio); out_allocfolio: - page = erofs_allocpage(&f->pagepool, gfp | __GFP_NOFAIL); + zbv.page = erofs_allocpage(&f->pagepool, gfp | __GFP_NOFAIL); spin_lock(&pcl->obj.lockref.lock); - if (pcl->compressed_bvecs[nr].folio) { - erofs_pagepool_add(&f->pagepool, page); + if (pcl->compressed_bvecs[nr].page) { + erofs_pagepool_add(&f->pagepool, zbv.page); spin_unlock(&pcl->obj.lockref.lock); cond_resched(); goto repeat; } - pcl->compressed_bvecs[nr].folio = zbv.folio = page_folio(page); + bvec->bv_page = pcl->compressed_bvecs[nr].page = zbv.page; + folio = page_folio(zbv.page); + /* first mark it as a temporary shortlived folio (now 1 ref) */ + folio->private = (void *)Z_EROFS_SHORTLIVED_PAGE; spin_unlock(&pcl->obj.lockref.lock); - bvec->bv_page = page; out_tocache: if (!tocache || bs != PAGE_SIZE || - filemap_add_folio(mc, zbv.folio, pcl->obj.index + nr, gfp)) { - /* turn into a temporary shortlived folio (1 ref) */ - zbv.folio->private = (void *)Z_EROFS_SHORTLIVED_PAGE; + filemap_add_folio(mc, folio, pcl->obj.index + nr, gfp)) return; - } - folio_attach_private(zbv.folio, pcl); + folio_attach_private(folio, pcl); /* drop a refcount added by allocpage (then 2 refs in total here) */ - folio_put(zbv.folio); + folio_put(folio); } static struct z_erofs_decompressqueue *jobqueue_init(struct super_block *sb, -- Gitee From 0d957794db2837d93630ca591d9451eff51603a8 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Tue, 9 Jul 2024 17:41:04 +0800 Subject: [PATCH 1882/2138] erofs: move each decompressor to its own source file ANBZ: #11101 commit 392d20ccef22cb471856f41860737e6306bee0b9 upstream. Thus *_config() function declarations can be avoided. Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240709094106.3018109-1-hsiangkao@linux.alibaba.com Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4180 --- fs/erofs/compress.h | 20 +++++------------- fs/erofs/decompressor.c | 36 ++++++++++----------------------- fs/erofs/decompressor_deflate.c | 12 ++++++++--- fs/erofs/decompressor_lzma.c | 12 ++++++++--- fs/erofs/decompressor_zstd.c | 12 ++++++++--- fs/erofs/zdata.c | 2 +- 6 files changed, 44 insertions(+), 50 deletions(-) diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h index 19d53c30c8af..c68d5739932f 100644 --- a/fs/erofs/compress.h +++ b/fs/erofs/compress.h @@ -81,21 +81,11 @@ static inline bool z_erofs_put_shortlivedpage(struct page **pagepool, return true; } +extern const struct z_erofs_decompressor z_erofs_lzma_decomp; +extern const struct z_erofs_decompressor z_erofs_deflate_decomp; +extern const struct z_erofs_decompressor z_erofs_zstd_decomp; +extern const struct z_erofs_decompressor *z_erofs_decomp[]; + int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf, unsigned int padbufsize); -extern const struct z_erofs_decompressor erofs_decompressors[]; - -/* prototypes for specific algorithms */ -int z_erofs_load_lzma_config(struct super_block *sb, - struct erofs_super_block *dsb, void *data, int size); -int z_erofs_load_deflate_config(struct super_block *sb, - struct erofs_super_block *dsb, void *data, int size); -int z_erofs_load_zstd_config(struct super_block *sb, - struct erofs_super_block *dsb, void *data, int size); -int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, - struct page **pagepool); -int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, - struct page **pagepool); -int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq, - struct page **pgpl); #endif diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index 696a3516d75a..f7a152c36080 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -371,40 +371,28 @@ static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq, return 0; } -const struct z_erofs_decompressor erofs_decompressors[] = { - [Z_EROFS_COMPRESSION_SHIFTED] = { +const struct z_erofs_decompressor *z_erofs_decomp[] = { + [Z_EROFS_COMPRESSION_SHIFTED] = &(const struct z_erofs_decompressor) { .decompress = z_erofs_transform_plain, .name = "shifted" }, - [Z_EROFS_COMPRESSION_INTERLACED] = { + [Z_EROFS_COMPRESSION_INTERLACED] = &(const struct z_erofs_decompressor) { .decompress = z_erofs_transform_plain, .name = "interlaced" }, - [Z_EROFS_COMPRESSION_LZ4] = { + [Z_EROFS_COMPRESSION_LZ4] = &(const struct z_erofs_decompressor) { .config = z_erofs_load_lz4_config, .decompress = z_erofs_lz4_decompress, .name = "lz4" }, #ifdef CONFIG_EROFS_FS_ZIP_LZMA - [Z_EROFS_COMPRESSION_LZMA] = { - .config = z_erofs_load_lzma_config, - .decompress = z_erofs_lzma_decompress, - .name = "lzma" - }, + [Z_EROFS_COMPRESSION_LZMA] = &z_erofs_lzma_decomp, #endif #ifdef CONFIG_EROFS_FS_ZIP_DEFLATE - [Z_EROFS_COMPRESSION_DEFLATE] = { - .config = z_erofs_load_deflate_config, - .decompress = z_erofs_deflate_decompress, - .name = "deflate" - }, + [Z_EROFS_COMPRESSION_DEFLATE] = &z_erofs_deflate_decomp, #endif #ifdef CONFIG_EROFS_FS_ZIP_ZSTD - [Z_EROFS_COMPRESSION_ZSTD] = { - .config = z_erofs_load_zstd_config, - .decompress = z_erofs_zstd_decompress, - .name = "zstd" - }, + [Z_EROFS_COMPRESSION_ZSTD] = &z_erofs_zstd_decomp, #endif }; @@ -432,6 +420,7 @@ int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb) offset = EROFS_SUPER_OFFSET + sbi->sb_size; alg = 0; for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) { + const struct z_erofs_decompressor *dec = z_erofs_decomp[alg]; void *data; if (!(algs & 1)) @@ -443,16 +432,13 @@ int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb) break; } - if (alg >= ARRAY_SIZE(erofs_decompressors) || - !erofs_decompressors[alg].config) { + if (alg < Z_EROFS_COMPRESSION_MAX && dec && dec->config) { + ret = dec->config(sb, dsb, data, size); + } else { erofs_err(sb, "algorithm %d isn't enabled on this kernel", alg); ret = -EOPNOTSUPP; - } else { - ret = erofs_decompressors[alg].config(sb, - dsb, data, size); } - kfree(data); if (ret) break; diff --git a/fs/erofs/decompressor_deflate.c b/fs/erofs/decompressor_deflate.c index 3a3461561a3c..1c0ed77dcdb2 100644 --- a/fs/erofs/decompressor_deflate.c +++ b/fs/erofs/decompressor_deflate.c @@ -49,7 +49,7 @@ int __init z_erofs_deflate_init(void) return 0; } -int z_erofs_load_deflate_config(struct super_block *sb, +static int z_erofs_load_deflate_config(struct super_block *sb, struct erofs_super_block *dsb, void *data, int size) { struct z_erofs_deflate_cfgs *dfl = data; @@ -97,8 +97,8 @@ int z_erofs_load_deflate_config(struct super_block *sb, return -ENOMEM; } -int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, - struct page **pgpl) +static int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, + struct page **pgpl) { const unsigned int nrpages_out = PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; @@ -252,3 +252,9 @@ int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, wake_up(&z_erofs_deflate_wq); return err; } + +const struct z_erofs_decompressor z_erofs_deflate_decomp = { + .config = z_erofs_load_deflate_config, + .decompress = z_erofs_deflate_decompress, + .name = "deflate", +}; diff --git a/fs/erofs/decompressor_lzma.c b/fs/erofs/decompressor_lzma.c index 4b28dc130c9f..9cab3a2f7558 100644 --- a/fs/erofs/decompressor_lzma.c +++ b/fs/erofs/decompressor_lzma.c @@ -70,7 +70,7 @@ int __init z_erofs_lzma_init(void) return 0; } -int z_erofs_load_lzma_config(struct super_block *sb, +static int z_erofs_load_lzma_config(struct super_block *sb, struct erofs_super_block *dsb, void *data, int size) { static DEFINE_MUTEX(lzma_resize_mutex); @@ -147,8 +147,8 @@ int z_erofs_load_lzma_config(struct super_block *sb, return err; } -int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, - struct page **pgpl) +static int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, + struct page **pgpl) { const unsigned int nrpages_out = PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; @@ -293,3 +293,9 @@ int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, wake_up(&z_erofs_lzma_wq); return err; } + +const struct z_erofs_decompressor z_erofs_lzma_decomp = { + .config = z_erofs_load_lzma_config, + .decompress = z_erofs_lzma_decompress, + .name = "lzma" +}; diff --git a/fs/erofs/decompressor_zstd.c b/fs/erofs/decompressor_zstd.c index 63a23cac3af4..e8f931d41e60 100644 --- a/fs/erofs/decompressor_zstd.c +++ b/fs/erofs/decompressor_zstd.c @@ -72,7 +72,7 @@ int __init z_erofs_zstd_init(void) return 0; } -int z_erofs_load_zstd_config(struct super_block *sb, +static int z_erofs_load_zstd_config(struct super_block *sb, struct erofs_super_block *dsb, void *data, int size) { static DEFINE_MUTEX(zstd_resize_mutex); @@ -135,8 +135,8 @@ int z_erofs_load_zstd_config(struct super_block *sb, return strm ? -ENOMEM : 0; } -int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq, - struct page **pgpl) +static int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq, + struct page **pgpl) { const unsigned int nrpages_out = PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; @@ -277,3 +277,9 @@ int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq, wake_up(&z_erofs_zstd_wq); return err; } + +const struct z_erofs_decompressor z_erofs_zstd_decomp = { + .config = z_erofs_load_zstd_config, + .decompress = z_erofs_zstd_decompress, + .name = "zstd", +}; diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index afd6f89487aa..bb938e79e5e0 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -1213,7 +1213,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, struct z_erofs_pcluster *pcl = be->pcl; unsigned int pclusterpages = z_erofs_pclusterpages(pcl); const struct z_erofs_decompressor *decomp = - &erofs_decompressors[pcl->algorithmformat]; + z_erofs_decomp[pcl->algorithmformat]; int i, err2; struct page *page; bool overlapped; -- Gitee From ae4649b42a15cd634afd305bf99bcf3071cfef2a Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Tue, 9 Jul 2024 17:41:05 +0800 Subject: [PATCH 1883/2138] erofs: refine z_erofs_{init,exit}_subsystem() ANBZ: #11101 commit 5a7cce827ee9e2c56fcecf5cda0ad39d9568283d upstream. Introduce z_erofs_{init,exit}_decompressor() to unexport z_erofs_{deflate,lzma,zstd}_{init,exit}(). Besides, call them in z_erofs_{init,exit}_subsystem() for simplicity. Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240709094106.3018109-2-hsiangkao@linux.alibaba.com Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4180 --- fs/erofs/compress.h | 4 ++++ fs/erofs/decompressor.c | 28 +++++++++++++++++++++++++++ fs/erofs/decompressor_deflate.c | 6 ++++-- fs/erofs/decompressor_lzma.c | 6 ++++-- fs/erofs/decompressor_zstd.c | 6 ++++-- fs/erofs/internal.h | 34 ++++----------------------------- fs/erofs/super.c | 34 +++------------------------------ fs/erofs/zdata.c | 29 +++++++++++++++++----------- 8 files changed, 69 insertions(+), 78 deletions(-) diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h index c68d5739932f..601f533c9649 100644 --- a/fs/erofs/compress.h +++ b/fs/erofs/compress.h @@ -24,6 +24,8 @@ struct z_erofs_decompressor { void *data, int size); int (*decompress)(struct z_erofs_decompress_req *rq, struct page **pagepool); + int (*init)(void); + void (*exit)(void); char *name; }; @@ -88,4 +90,6 @@ extern const struct z_erofs_decompressor *z_erofs_decomp[]; int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf, unsigned int padbufsize); +int __init z_erofs_init_decompressor(void); +void z_erofs_exit_decompressor(void); #endif diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index f7a152c36080..612245c8ad5e 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -2,6 +2,7 @@ /* * Copyright (C) 2019 HUAWEI, Inc. * https://www.huawei.com/ + * Copyright (C) 2024 Alibaba Cloud */ #include "compress.h" #include @@ -383,6 +384,8 @@ const struct z_erofs_decompressor *z_erofs_decomp[] = { [Z_EROFS_COMPRESSION_LZ4] = &(const struct z_erofs_decompressor) { .config = z_erofs_load_lz4_config, .decompress = z_erofs_lz4_decompress, + .init = z_erofs_gbuf_init, + .exit = z_erofs_gbuf_exit, .name = "lz4" }, #ifdef CONFIG_EROFS_FS_ZIP_LZMA @@ -446,3 +449,28 @@ int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb) erofs_put_metabuf(&buf); return ret; } + +int __init z_erofs_init_decompressor(void) +{ + int i, err; + + for (i = 0; i < Z_EROFS_COMPRESSION_MAX; ++i) { + err = z_erofs_decomp[i] ? z_erofs_decomp[i]->init() : 0; + if (err) { + while (--i) + if (z_erofs_decomp[i]) + z_erofs_decomp[i]->exit(); + return err; + } + } + return 0; +} + +void z_erofs_exit_decompressor(void) +{ + int i; + + for (i = 0; i < Z_EROFS_COMPRESSION_MAX; ++i) + if (z_erofs_decomp[i]) + z_erofs_decomp[i]->exit(); +} diff --git a/fs/erofs/decompressor_deflate.c b/fs/erofs/decompressor_deflate.c index 1c0ed77dcdb2..79232ef15654 100644 --- a/fs/erofs/decompressor_deflate.c +++ b/fs/erofs/decompressor_deflate.c @@ -15,7 +15,7 @@ static DECLARE_WAIT_QUEUE_HEAD(z_erofs_deflate_wq); module_param_named(deflate_streams, z_erofs_deflate_nstrms, uint, 0444); -void z_erofs_deflate_exit(void) +static void z_erofs_deflate_exit(void) { /* there should be no running fs instance */ while (z_erofs_deflate_avail_strms) { @@ -41,7 +41,7 @@ void z_erofs_deflate_exit(void) } } -int __init z_erofs_deflate_init(void) +static int __init z_erofs_deflate_init(void) { /* by default, use # of possible CPUs instead */ if (!z_erofs_deflate_nstrms) @@ -256,5 +256,7 @@ static int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, const struct z_erofs_decompressor z_erofs_deflate_decomp = { .config = z_erofs_load_deflate_config, .decompress = z_erofs_deflate_decompress, + .init = z_erofs_deflate_init, + .exit = z_erofs_deflate_exit, .name = "deflate", }; diff --git a/fs/erofs/decompressor_lzma.c b/fs/erofs/decompressor_lzma.c index 9cab3a2f7558..80e735dc8406 100644 --- a/fs/erofs/decompressor_lzma.c +++ b/fs/erofs/decompressor_lzma.c @@ -18,7 +18,7 @@ static DECLARE_WAIT_QUEUE_HEAD(z_erofs_lzma_wq); module_param_named(lzma_streams, z_erofs_lzma_nstrms, uint, 0444); -void z_erofs_lzma_exit(void) +static void z_erofs_lzma_exit(void) { /* there should be no running fs instance */ while (z_erofs_lzma_avail_strms) { @@ -46,7 +46,7 @@ void z_erofs_lzma_exit(void) } } -int __init z_erofs_lzma_init(void) +static int __init z_erofs_lzma_init(void) { unsigned int i; @@ -297,5 +297,7 @@ static int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, const struct z_erofs_decompressor z_erofs_lzma_decomp = { .config = z_erofs_load_lzma_config, .decompress = z_erofs_lzma_decompress, + .init = z_erofs_lzma_init, + .exit = z_erofs_lzma_exit, .name = "lzma" }; diff --git a/fs/erofs/decompressor_zstd.c b/fs/erofs/decompressor_zstd.c index e8f931d41e60..49415bc40d7c 100644 --- a/fs/erofs/decompressor_zstd.c +++ b/fs/erofs/decompressor_zstd.c @@ -34,7 +34,7 @@ static struct z_erofs_zstd *z_erofs_isolate_strms(bool all) return strm; } -void z_erofs_zstd_exit(void) +static void z_erofs_zstd_exit(void) { while (z_erofs_zstd_avail_strms) { struct z_erofs_zstd *strm, *n; @@ -49,7 +49,7 @@ void z_erofs_zstd_exit(void) } } -int __init z_erofs_zstd_init(void) +static int __init z_erofs_zstd_init(void) { /* by default, use # of possible CPUs instead */ if (!z_erofs_zstd_nstrms) @@ -281,5 +281,7 @@ static int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq, const struct z_erofs_decompressor z_erofs_zstd_decomp = { .config = z_erofs_load_zstd_config, .decompress = z_erofs_zstd_decompress, + .init = z_erofs_zstd_init, + .exit = z_erofs_zstd_exit, .name = "zstd", }; diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index a6a031712678..5062551c2b7f 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -454,8 +454,8 @@ void erofs_shrinker_register(struct super_block *sb); void erofs_shrinker_unregister(struct super_block *sb); int __init erofs_init_shrinker(void); void erofs_exit_shrinker(void); -int __init z_erofs_init_zip_subsystem(void); -void z_erofs_exit_zip_subsystem(void); +int __init z_erofs_init_subsystem(void); +void z_erofs_exit_subsystem(void); int erofs_try_to_free_all_cached_folios(struct erofs_sb_info *sbi, struct erofs_workgroup *egrp); int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map, @@ -472,37 +472,11 @@ static inline void erofs_shrinker_register(struct super_block *sb) {} static inline void erofs_shrinker_unregister(struct super_block *sb) {} static inline int erofs_init_shrinker(void) { return 0; } static inline void erofs_exit_shrinker(void) {} -static inline int z_erofs_init_zip_subsystem(void) { return 0; } -static inline void z_erofs_exit_zip_subsystem(void) {} -static inline int z_erofs_gbuf_init(void) { return 0; } -static inline void z_erofs_gbuf_exit(void) {} +static inline int z_erofs_init_subsystem(void) { return 0; } +static inline void z_erofs_exit_subsystem(void) {} static inline int erofs_init_managed_cache(struct super_block *sb) { return 0; } #endif /* !CONFIG_EROFS_FS_ZIP */ -#ifdef CONFIG_EROFS_FS_ZIP_LZMA -int __init z_erofs_lzma_init(void); -void z_erofs_lzma_exit(void); -#else -static inline int z_erofs_lzma_init(void) { return 0; } -static inline int z_erofs_lzma_exit(void) { return 0; } -#endif /* !CONFIG_EROFS_FS_ZIP_LZMA */ - -#ifdef CONFIG_EROFS_FS_ZIP_DEFLATE -int __init z_erofs_deflate_init(void); -void z_erofs_deflate_exit(void); -#else -static inline int z_erofs_deflate_init(void) { return 0; } -static inline int z_erofs_deflate_exit(void) { return 0; } -#endif /* !CONFIG_EROFS_FS_ZIP_DEFLATE */ - -#ifdef CONFIG_EROFS_FS_ZIP_ZSTD -int __init z_erofs_zstd_init(void); -void z_erofs_zstd_exit(void); -#else -static inline int z_erofs_zstd_init(void) { return 0; } -static inline int z_erofs_zstd_exit(void) { return 0; } -#endif /* !CONFIG_EROFS_FS_ZIP_ZSTD */ - #ifdef CONFIG_EROFS_FS_ONDEMAND int erofs_fscache_register_fs(struct super_block *sb); void erofs_fscache_unregister_fs(struct super_block *sb); diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 886a74784802..81d3b9e5d313 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -849,23 +849,7 @@ static int __init erofs_module_init(void) if (err) goto shrinker_err; - err = z_erofs_lzma_init(); - if (err) - goto lzma_err; - - err = z_erofs_deflate_init(); - if (err) - goto deflate_err; - - err = z_erofs_zstd_init(); - if (err) - goto zstd_err; - - err = z_erofs_gbuf_init(); - if (err) - goto gbuf_err; - - err = z_erofs_init_zip_subsystem(); + err = z_erofs_init_subsystem(); if (err) goto zip_err; @@ -882,16 +866,8 @@ static int __init erofs_module_init(void) fs_err: erofs_exit_sysfs(); sysfs_err: - z_erofs_exit_zip_subsystem(); + z_erofs_exit_subsystem(); zip_err: - z_erofs_gbuf_exit(); -gbuf_err: - z_erofs_zstd_exit(); -zstd_err: - z_erofs_deflate_exit(); -deflate_err: - z_erofs_lzma_exit(); -lzma_err: erofs_exit_shrinker(); shrinker_err: kmem_cache_destroy(erofs_inode_cachep); @@ -906,13 +882,9 @@ static void __exit erofs_module_exit(void) rcu_barrier(); erofs_exit_sysfs(); - z_erofs_exit_zip_subsystem(); - z_erofs_zstd_exit(); - z_erofs_deflate_exit(); - z_erofs_lzma_exit(); + z_erofs_exit_subsystem(); erofs_exit_shrinker(); kmem_cache_destroy(erofs_inode_cachep); - z_erofs_gbuf_exit(); } static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index bb938e79e5e0..153d686e76f4 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -446,44 +446,51 @@ static inline int erofs_cpu_hotplug_init(void) { return 0; } static inline void erofs_cpu_hotplug_destroy(void) {} #endif -void z_erofs_exit_zip_subsystem(void) +void z_erofs_exit_subsystem(void) { erofs_cpu_hotplug_destroy(); erofs_destroy_percpu_workers(); destroy_workqueue(z_erofs_workqueue); z_erofs_destroy_pcluster_pool(); + z_erofs_exit_decompressor(); } -int __init z_erofs_init_zip_subsystem(void) +int __init z_erofs_init_subsystem(void) { - int err = z_erofs_create_pcluster_pool(); + int err = z_erofs_init_decompressor(); if (err) - goto out_error_pcluster_pool; + goto err_decompressor; + + err = z_erofs_create_pcluster_pool(); + if (err) + goto err_pcluster_pool; z_erofs_workqueue = alloc_workqueue("erofs_worker", WQ_UNBOUND | WQ_HIGHPRI, num_possible_cpus()); if (!z_erofs_workqueue) { err = -ENOMEM; - goto out_error_workqueue_init; + goto err_workqueue_init; } err = erofs_init_percpu_workers(); if (err) - goto out_error_pcpu_worker; + goto err_pcpu_worker; err = erofs_cpu_hotplug_init(); if (err < 0) - goto out_error_cpuhp_init; + goto err_cpuhp_init; return err; -out_error_cpuhp_init: +err_cpuhp_init: erofs_destroy_percpu_workers(); -out_error_pcpu_worker: +err_pcpu_worker: destroy_workqueue(z_erofs_workqueue); -out_error_workqueue_init: +err_workqueue_init: z_erofs_destroy_pcluster_pool(); -out_error_pcluster_pool: +err_pcluster_pool: + z_erofs_exit_decompressor(); +err_decompressor: return err; } -- Gitee From 392ac682bb556461d151c080a1d48155ec4efd38 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Tue, 9 Jul 2024 17:41:06 +0800 Subject: [PATCH 1884/2138] erofs: tidy up stream decompressors ANBZ: #11101 commit 84a2ceefff99633d8f88c7c1f9bbd2c139b8f805 upstream. Just use a generic helper to prepare buffers for all supported stream decompressors, eliminating similar logic. Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240709094106.3018109-3-hsiangkao@linux.alibaba.com Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4180 --- fs/erofs/compress.h | 15 ++++ fs/erofs/decompressor.c | 83 ++++++++++++++++++ fs/erofs/decompressor_deflate.c | 131 +++++++--------------------- fs/erofs/decompressor_lzma.c | 148 ++++++++++---------------------- fs/erofs/decompressor_zstd.c | 136 ++++++++--------------------- 5 files changed, 209 insertions(+), 304 deletions(-) diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h index 601f533c9649..526edc0a7d2d 100644 --- a/fs/erofs/compress.h +++ b/fs/erofs/compress.h @@ -88,6 +88,21 @@ extern const struct z_erofs_decompressor z_erofs_deflate_decomp; extern const struct z_erofs_decompressor z_erofs_zstd_decomp; extern const struct z_erofs_decompressor *z_erofs_decomp[]; +struct z_erofs_stream_dctx { + struct z_erofs_decompress_req *rq; + unsigned int inpages, outpages; /* # of {en,de}coded pages */ + int no, ni; /* the current {en,de}coded page # */ + + unsigned int avail_out; /* remaining bytes in the decoded buffer */ + unsigned int inbuf_pos, inbuf_sz; + /* current status of the encoded buffer */ + u8 *kin, *kout; /* buffer mapped pointers */ + void *bounce; /* bounce buffer for inplace I/Os */ + bool bounced; /* is the bounce buffer used now? */ +}; + +int z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx, void **dst, + void **src, struct page **pgpl); int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf, unsigned int padbufsize); int __init z_erofs_init_decompressor(void); diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index 612245c8ad5e..8b6949149067 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -372,6 +372,89 @@ static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq, return 0; } +int z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx, void **dst, + void **src, struct page **pgpl) +{ + struct z_erofs_decompress_req *rq = dctx->rq; + struct super_block *sb = rq->sb; + struct page **pgo, *tmppage; + unsigned int j; + + if (!dctx->avail_out) { + if (++dctx->no >= dctx->outpages || !rq->outputsize) { + erofs_err(sb, "insufficient space for decompressed data"); + return -EFSCORRUPTED; + } + + if (dctx->kout) + kunmap_local(dctx->kout); + dctx->avail_out = min(rq->outputsize, PAGE_SIZE - rq->pageofs_out); + rq->outputsize -= dctx->avail_out; + pgo = &rq->out[dctx->no]; + if (!*pgo && rq->fillgaps) { /* deduped */ + *pgo = erofs_allocpage(pgpl, rq->gfp); + if (!*pgo) { + dctx->kout = NULL; + return -ENOMEM; + } + set_page_private(*pgo, Z_EROFS_SHORTLIVED_PAGE); + } + if (*pgo) { + dctx->kout = kmap_local_page(*pgo); + *dst = dctx->kout + rq->pageofs_out; + } else { + *dst = dctx->kout = NULL; + } + rq->pageofs_out = 0; + } + + if (dctx->inbuf_pos == dctx->inbuf_sz && rq->inputsize) { + if (++dctx->ni >= dctx->inpages) { + erofs_err(sb, "invalid compressed data"); + return -EFSCORRUPTED; + } + if (dctx->kout) /* unlike kmap(), take care of the orders */ + kunmap_local(dctx->kout); + kunmap_local(dctx->kin); + + dctx->inbuf_sz = min_t(u32, rq->inputsize, PAGE_SIZE); + rq->inputsize -= dctx->inbuf_sz; + dctx->kin = kmap_local_page(rq->in[dctx->ni]); + *src = dctx->kin; + dctx->bounced = false; + if (dctx->kout) { + j = (u8 *)*dst - dctx->kout; + dctx->kout = kmap_local_page(rq->out[dctx->no]); + *dst = dctx->kout + j; + } + dctx->inbuf_pos = 0; + } + + /* + * Handle overlapping: Use the given bounce buffer if the input data is + * under processing; Or utilize short-lived pages from the on-stack page + * pool, where pages are shared among the same request. Note that only + * a few inplace I/O pages need to be doubled. + */ + if (!dctx->bounced && rq->out[dctx->no] == rq->in[dctx->ni]) { + memcpy(dctx->bounce, *src, dctx->inbuf_sz); + *src = dctx->bounce; + dctx->bounced = true; + } + + for (j = dctx->ni + 1; j < dctx->inpages; ++j) { + if (rq->out[dctx->no] != rq->in[j]) + continue; + tmppage = erofs_allocpage(pgpl, rq->gfp); + if (!tmppage) + return -ENOMEM; + set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE); + copy_highpage(tmppage, rq->in[j]); + rq->in[j] = tmppage; + } + return 0; +} + const struct z_erofs_decompressor *z_erofs_decomp[] = { [Z_EROFS_COMPRESSION_SHIFTED] = &(const struct z_erofs_decompressor) { .decompress = z_erofs_transform_plain, diff --git a/fs/erofs/decompressor_deflate.c b/fs/erofs/decompressor_deflate.c index 79232ef15654..5070d2fcc737 100644 --- a/fs/erofs/decompressor_deflate.c +++ b/fs/erofs/decompressor_deflate.c @@ -100,24 +100,23 @@ static int z_erofs_load_deflate_config(struct super_block *sb, static int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, struct page **pgpl) { - const unsigned int nrpages_out = - PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; - const unsigned int nrpages_in = - PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT; struct super_block *sb = rq->sb; - unsigned int insz, outsz, pofs; + struct z_erofs_stream_dctx dctx = { + .rq = rq, + .inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT, + .outpages = PAGE_ALIGN(rq->pageofs_out + rq->outputsize) + >> PAGE_SHIFT, + .no = -1, .ni = 0, + }; struct z_erofs_deflate *strm; - u8 *kin, *kout = NULL; - bool bounced = false; - int no = -1, ni = 0, j = 0, zerr, err; + int zerr, err; /* 1. get the exact DEFLATE compressed size */ - kin = kmap_local_page(*rq->in); - err = z_erofs_fixup_insize(rq, kin + rq->pageofs_in, - min_t(unsigned int, rq->inputsize, - sb->s_blocksize - rq->pageofs_in)); + dctx.kin = kmap_local_page(*rq->in); + err = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in, + min(rq->inputsize, sb->s_blocksize - rq->pageofs_in)); if (err) { - kunmap_local(kin); + kunmap_local(dctx.kin); return err; } @@ -134,102 +133,35 @@ static int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, spin_unlock(&z_erofs_deflate_lock); /* 3. multi-call decompress */ - insz = rq->inputsize; - outsz = rq->outputsize; zerr = zlib_inflateInit2(&strm->z, -MAX_WBITS); if (zerr != Z_OK) { err = -EIO; goto failed_zinit; } - pofs = rq->pageofs_out; - strm->z.avail_in = min_t(u32, insz, PAGE_SIZE - rq->pageofs_in); - insz -= strm->z.avail_in; - strm->z.next_in = kin + rq->pageofs_in; + rq->fillgaps = true; /* DEFLATE doesn't support NULL output buffer */ + strm->z.avail_in = min(rq->inputsize, PAGE_SIZE - rq->pageofs_in); + rq->inputsize -= strm->z.avail_in; + strm->z.next_in = dctx.kin + rq->pageofs_in; strm->z.avail_out = 0; + dctx.bounce = strm->bounce; while (1) { - if (!strm->z.avail_out) { - if (++no >= nrpages_out || !outsz) { - erofs_err(sb, "insufficient space for decompressed data"); - err = -EFSCORRUPTED; - break; - } - - if (kout) - kunmap_local(kout); - strm->z.avail_out = min_t(u32, outsz, PAGE_SIZE - pofs); - outsz -= strm->z.avail_out; - if (!rq->out[no]) { - rq->out[no] = erofs_allocpage(pgpl, rq->gfp); - if (!rq->out[no]) { - kout = NULL; - err = -ENOMEM; - break; - } - set_page_private(rq->out[no], - Z_EROFS_SHORTLIVED_PAGE); - } - kout = kmap_local_page(rq->out[no]); - strm->z.next_out = kout + pofs; - pofs = 0; - } - - if (!strm->z.avail_in && insz) { - if (++ni >= nrpages_in) { - erofs_err(sb, "invalid compressed data"); - err = -EFSCORRUPTED; - break; - } - - if (kout) { /* unlike kmap(), take care of the orders */ - j = strm->z.next_out - kout; - kunmap_local(kout); - } - kunmap_local(kin); - strm->z.avail_in = min_t(u32, insz, PAGE_SIZE); - insz -= strm->z.avail_in; - kin = kmap_local_page(rq->in[ni]); - strm->z.next_in = kin; - bounced = false; - if (kout) { - kout = kmap_local_page(rq->out[no]); - strm->z.next_out = kout + j; - } - } - - /* - * Handle overlapping: Use bounced buffer if the compressed - * data is under processing; Or use short-lived pages from the - * on-stack pagepool where pages share among the same request - * and not _all_ inplace I/O pages are needed to be doubled. - */ - if (!bounced && rq->out[no] == rq->in[ni]) { - memcpy(strm->bounce, strm->z.next_in, strm->z.avail_in); - strm->z.next_in = strm->bounce; - bounced = true; - } - - for (j = ni + 1; j < nrpages_in; ++j) { - struct page *tmppage; - - if (rq->out[no] != rq->in[j]) - continue; - tmppage = erofs_allocpage(pgpl, rq->gfp); - if (!tmppage) { - err = -ENOMEM; - goto failed; - } - set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE); - copy_highpage(tmppage, rq->in[j]); - rq->in[j] = tmppage; - } + dctx.avail_out = strm->z.avail_out; + dctx.inbuf_sz = strm->z.avail_in; + err = z_erofs_stream_switch_bufs(&dctx, + (void **)&strm->z.next_out, + (void **)&strm->z.next_in, pgpl); + if (err) + break; + strm->z.avail_out = dctx.avail_out; + strm->z.avail_in = dctx.inbuf_sz; zerr = zlib_inflate(&strm->z, Z_SYNC_FLUSH); - if (zerr != Z_OK || !(outsz + strm->z.avail_out)) { + if (zerr != Z_OK || !(rq->outputsize + strm->z.avail_out)) { if (zerr == Z_OK && rq->partial_decoding) break; - if (zerr == Z_STREAM_END && !outsz) + if (zerr == Z_STREAM_END && !rq->outputsize) break; erofs_err(sb, "failed to decompress %d in[%u] out[%u]", zerr, rq->inputsize, rq->outputsize); @@ -237,13 +169,12 @@ static int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, break; } } -failed: if (zlib_inflateEnd(&strm->z) != Z_OK && !err) err = -EIO; - if (kout) - kunmap_local(kout); + if (dctx.kout) + kunmap_local(dctx.kout); failed_zinit: - kunmap_local(kin); + kunmap_local(dctx.kin); /* 4. push back DEFLATE stream context to the global list */ spin_lock(&z_erofs_deflate_lock); strm->next = z_erofs_deflate_head; diff --git a/fs/erofs/decompressor_lzma.c b/fs/erofs/decompressor_lzma.c index 80e735dc8406..06a722b85a45 100644 --- a/fs/erofs/decompressor_lzma.c +++ b/fs/erofs/decompressor_lzma.c @@ -5,7 +5,6 @@ struct z_erofs_lzma { struct z_erofs_lzma *next; struct xz_dec_microlzma *state; - struct xz_buf buf; u8 bounce[PAGE_SIZE]; }; @@ -150,23 +149,25 @@ static int z_erofs_load_lzma_config(struct super_block *sb, static int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, struct page **pgpl) { - const unsigned int nrpages_out = - PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; - const unsigned int nrpages_in = - PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT; - unsigned int inlen, outlen, pageofs; + struct super_block *sb = rq->sb; + struct z_erofs_stream_dctx dctx = { + .rq = rq, + .inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT, + .outpages = PAGE_ALIGN(rq->pageofs_out + rq->outputsize) + >> PAGE_SHIFT, + .no = -1, .ni = 0, + }; + struct xz_buf buf = {}; struct z_erofs_lzma *strm; - u8 *kin; - bool bounced = false; - int no, ni, j, err = 0; + enum xz_ret xz_err; + int err; /* 1. get the exact LZMA compressed size */ - kin = kmap(*rq->in); - err = z_erofs_fixup_insize(rq, kin + rq->pageofs_in, - min_t(unsigned int, rq->inputsize, - rq->sb->s_blocksize - rq->pageofs_in)); + dctx.kin = kmap_local_page(*rq->in); + err = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in, + min(rq->inputsize, sb->s_blocksize - rq->pageofs_in)); if (err) { - kunmap(*rq->in); + kunmap_local(dctx.kin); return err; } @@ -183,108 +184,45 @@ static int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, spin_unlock(&z_erofs_lzma_lock); /* 3. multi-call decompress */ - inlen = rq->inputsize; - outlen = rq->outputsize; - xz_dec_microlzma_reset(strm->state, inlen, outlen, + xz_dec_microlzma_reset(strm->state, rq->inputsize, rq->outputsize, !rq->partial_decoding); - pageofs = rq->pageofs_out; - strm->buf.in = kin + rq->pageofs_in; - strm->buf.in_pos = 0; - strm->buf.in_size = min_t(u32, inlen, PAGE_SIZE - rq->pageofs_in); - inlen -= strm->buf.in_size; - strm->buf.out = NULL; - strm->buf.out_pos = 0; - strm->buf.out_size = 0; - - for (ni = 0, no = -1;;) { - enum xz_ret xz_err; - - if (strm->buf.out_pos == strm->buf.out_size) { - if (strm->buf.out) { - kunmap(rq->out[no]); - strm->buf.out = NULL; - } - - if (++no >= nrpages_out || !outlen) { - erofs_err(rq->sb, "decompressed buf out of bound"); - err = -EFSCORRUPTED; - break; - } - strm->buf.out_pos = 0; - strm->buf.out_size = min_t(u32, outlen, - PAGE_SIZE - pageofs); - outlen -= strm->buf.out_size; - if (!rq->out[no] && rq->fillgaps) { /* deduped */ - rq->out[no] = erofs_allocpage(pgpl, rq->gfp); - if (!rq->out[no]) { - err = -ENOMEM; - break; - } - set_page_private(rq->out[no], - Z_EROFS_SHORTLIVED_PAGE); - } - if (rq->out[no]) - strm->buf.out = kmap(rq->out[no]) + pageofs; - pageofs = 0; - } else if (strm->buf.in_pos == strm->buf.in_size) { - kunmap(rq->in[ni]); - - if (++ni >= nrpages_in || !inlen) { - erofs_err(rq->sb, "compressed buf out of bound"); - err = -EFSCORRUPTED; - break; - } - strm->buf.in_pos = 0; - strm->buf.in_size = min_t(u32, inlen, PAGE_SIZE); - inlen -= strm->buf.in_size; - kin = kmap(rq->in[ni]); - strm->buf.in = kin; - bounced = false; - } + buf.in_size = min(rq->inputsize, PAGE_SIZE - rq->pageofs_in); + rq->inputsize -= buf.in_size; + buf.in = dctx.kin + rq->pageofs_in, + dctx.bounce = strm->bounce; + do { + dctx.avail_out = buf.out_size - buf.out_pos; + dctx.inbuf_sz = buf.in_size; + dctx.inbuf_pos = buf.in_pos; + err = z_erofs_stream_switch_bufs(&dctx, (void **)&buf.out, + (void **)&buf.in, pgpl); + if (err) + break; - /* - * Handle overlapping: Use bounced buffer if the compressed - * data is under processing; Otherwise, Use short-lived pages - * from the on-stack pagepool where pages share with the same - * request. - */ - if (!bounced && rq->out[no] == rq->in[ni]) { - memcpy(strm->bounce, strm->buf.in, strm->buf.in_size); - strm->buf.in = strm->bounce; - bounced = true; + if (buf.out_size == buf.out_pos) { + buf.out_size = dctx.avail_out; + buf.out_pos = 0; } - for (j = ni + 1; j < nrpages_in; ++j) { - struct page *tmppage; + buf.in_size = dctx.inbuf_sz; + buf.in_pos = dctx.inbuf_pos; - if (rq->out[no] != rq->in[j]) - continue; - tmppage = erofs_allocpage(pgpl, rq->gfp); - if (!tmppage) { - err = -ENOMEM; - goto failed; - } - set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE); - copy_highpage(tmppage, rq->in[j]); - rq->in[j] = tmppage; - } - xz_err = xz_dec_microlzma_run(strm->state, &strm->buf); - DBG_BUGON(strm->buf.out_pos > strm->buf.out_size); - DBG_BUGON(strm->buf.in_pos > strm->buf.in_size); + xz_err = xz_dec_microlzma_run(strm->state, &buf); + DBG_BUGON(buf.out_pos > buf.out_size); + DBG_BUGON(buf.in_pos > buf.in_size); if (xz_err != XZ_OK) { - if (xz_err == XZ_STREAM_END && !outlen) + if (xz_err == XZ_STREAM_END && !rq->outputsize) break; - erofs_err(rq->sb, "failed to decompress %d in[%u] out[%u]", + erofs_err(sb, "failed to decompress %d in[%u] out[%u]", xz_err, rq->inputsize, rq->outputsize); err = -EFSCORRUPTED; break; } - } -failed: - if (no < nrpages_out && strm->buf.out) - kunmap(rq->out[no]); - if (ni < nrpages_in) - kunmap(rq->in[ni]); + } while (1); + + if (dctx.kout) + kunmap_local(dctx.kout); + kunmap_local(dctx.kin); /* 4. push back LZMA stream context to the global list */ spin_lock(&z_erofs_lzma_lock); strm->next = z_erofs_lzma_head; diff --git a/fs/erofs/decompressor_zstd.c b/fs/erofs/decompressor_zstd.c index 49415bc40d7c..7e177304967e 100644 --- a/fs/erofs/decompressor_zstd.c +++ b/fs/erofs/decompressor_zstd.c @@ -138,27 +138,26 @@ static int z_erofs_load_zstd_config(struct super_block *sb, static int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq, struct page **pgpl) { - const unsigned int nrpages_out = - PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; - const unsigned int nrpages_in = - PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT; - zstd_dstream *stream; struct super_block *sb = rq->sb; - unsigned int insz, outsz, pofs; - struct z_erofs_zstd *strm; + struct z_erofs_stream_dctx dctx = { + .rq = rq, + .inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT, + .outpages = PAGE_ALIGN(rq->pageofs_out + rq->outputsize) + >> PAGE_SHIFT, + .no = -1, .ni = 0, + }; zstd_in_buffer in_buf = { NULL, 0, 0 }; zstd_out_buffer out_buf = { NULL, 0, 0 }; - u8 *kin, *kout = NULL; - bool bounced = false; - int no = -1, ni = 0, j = 0, zerr, err; + struct z_erofs_zstd *strm; + zstd_dstream *stream; + int zerr, err; /* 1. get the exact compressed size */ - kin = kmap_local_page(*rq->in); - err = z_erofs_fixup_insize(rq, kin + rq->pageofs_in, - min_t(unsigned int, rq->inputsize, - sb->s_blocksize - rq->pageofs_in)); + dctx.kin = kmap_local_page(*rq->in); + err = z_erofs_fixup_insize(rq, dctx.kin + rq->pageofs_in, + min(rq->inputsize, sb->s_blocksize - rq->pageofs_in)); if (err) { - kunmap_local(kin); + kunmap_local(dctx.kin); return err; } @@ -166,109 +165,48 @@ static int z_erofs_zstd_decompress(struct z_erofs_decompress_req *rq, strm = z_erofs_isolate_strms(false); /* 3. multi-call decompress */ - insz = rq->inputsize; - outsz = rq->outputsize; stream = zstd_init_dstream(z_erofs_zstd_max_dictsize, strm->wksp, strm->wkspsz); if (!stream) { err = -EIO; goto failed_zinit; } - pofs = rq->pageofs_out; - in_buf.size = min_t(u32, insz, PAGE_SIZE - rq->pageofs_in); - insz -= in_buf.size; - in_buf.src = kin + rq->pageofs_in; + rq->fillgaps = true; /* ZSTD doesn't support NULL output buffer */ + in_buf.size = min_t(u32, rq->inputsize, PAGE_SIZE - rq->pageofs_in); + rq->inputsize -= in_buf.size; + in_buf.src = dctx.kin + rq->pageofs_in; + dctx.bounce = strm->bounce; + do { - if (out_buf.size == out_buf.pos) { - if (++no >= nrpages_out || !outsz) { - erofs_err(sb, "insufficient space for decompressed data"); - err = -EFSCORRUPTED; - break; - } + dctx.avail_out = out_buf.size - out_buf.pos; + dctx.inbuf_sz = in_buf.size; + dctx.inbuf_pos = in_buf.pos; + err = z_erofs_stream_switch_bufs(&dctx, &out_buf.dst, + (void **)&in_buf.src, pgpl); + if (err) + break; - if (kout) - kunmap_local(kout); - out_buf.size = min_t(u32, outsz, PAGE_SIZE - pofs); - outsz -= out_buf.size; - if (!rq->out[no]) { - rq->out[no] = erofs_allocpage(pgpl, rq->gfp); - if (!rq->out[no]) { - kout = NULL; - err = -ENOMEM; - break; - } - set_page_private(rq->out[no], - Z_EROFS_SHORTLIVED_PAGE); - } - kout = kmap_local_page(rq->out[no]); - out_buf.dst = kout + pofs; + if (out_buf.size == out_buf.pos) { + out_buf.size = dctx.avail_out; out_buf.pos = 0; - pofs = 0; } + in_buf.size = dctx.inbuf_sz; + in_buf.pos = dctx.inbuf_pos; - if (in_buf.size == in_buf.pos && insz) { - if (++ni >= nrpages_in) { - erofs_err(sb, "invalid compressed data"); - err = -EFSCORRUPTED; - break; - } - - if (kout) /* unlike kmap(), take care of the orders */ - kunmap_local(kout); - kunmap_local(kin); - in_buf.size = min_t(u32, insz, PAGE_SIZE); - insz -= in_buf.size; - kin = kmap_local_page(rq->in[ni]); - in_buf.src = kin; - in_buf.pos = 0; - bounced = false; - if (kout) { - j = (u8 *)out_buf.dst - kout; - kout = kmap_local_page(rq->out[no]); - out_buf.dst = kout + j; - } - } - - /* - * Handle overlapping: Use bounced buffer if the compressed - * data is under processing; Or use short-lived pages from the - * on-stack pagepool where pages share among the same request - * and not _all_ inplace I/O pages are needed to be doubled. - */ - if (!bounced && rq->out[no] == rq->in[ni]) { - memcpy(strm->bounce, in_buf.src, in_buf.size); - in_buf.src = strm->bounce; - bounced = true; - } - - for (j = ni + 1; j < nrpages_in; ++j) { - struct page *tmppage; - - if (rq->out[no] != rq->in[j]) - continue; - tmppage = erofs_allocpage(pgpl, rq->gfp); - if (!tmppage) { - err = -ENOMEM; - goto failed; - } - set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE); - copy_highpage(tmppage, rq->in[j]); - rq->in[j] = tmppage; - } zerr = zstd_decompress_stream(stream, &out_buf, &in_buf); - if (zstd_is_error(zerr) || (!zerr && outsz)) { + if (zstd_is_error(zerr) || (!zerr && rq->outputsize)) { erofs_err(sb, "failed to decompress in[%u] out[%u]: %s", rq->inputsize, rq->outputsize, zerr ? zstd_get_error_name(zerr) : "unexpected end of stream"); err = -EFSCORRUPTED; break; } - } while (outsz || out_buf.pos < out_buf.size); -failed: - if (kout) - kunmap_local(kout); + } while (rq->outputsize || out_buf.pos < out_buf.size); + + if (dctx.kout) + kunmap_local(dctx.kout); failed_zinit: - kunmap_local(kin); + kunmap_local(dctx.kin); /* 4. push back ZSTD stream context to the global list */ spin_lock(&z_erofs_zstd_lock); strm->next = z_erofs_zstd_head; -- Gitee From e28fd2b8ffdb423be815d6260fe6459145f0fcc4 Mon Sep 17 00:00:00 2001 From: Hongzhen Luo Date: Wed, 10 Jul 2024 16:34:59 +0800 Subject: [PATCH 1885/2138] erofs: get rid of z_erofs_map_blocks_iter_* tracepoints ANBZ: #11101 commit 1c076f1f4d7fc7cfb45dba10b3b49d574b4c4c28 upstream. Consolidate them under erofs_map_blocks_* for simplicity since we have many other ways to know if a given inode is compressed or not. Signed-off-by: Hongzhen Luo Reviewed-by: Gao Xiang Link: https://lore.kernel.org/r/20240710083459.208362-1-hongzhen@linux.alibaba.com Signed-off-by: Gao Xiang Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4180 --- fs/erofs/zmap.c | 4 ++-- include/trace/events/erofs.h | 32 +++----------------------------- 2 files changed, 5 insertions(+), 31 deletions(-) diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c index 34d8acdca83f..8d28cfc6a4b8 100644 --- a/fs/erofs/zmap.c +++ b/fs/erofs/zmap.c @@ -687,7 +687,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map, struct erofs_inode *const vi = EROFS_I(inode); int err = 0; - trace_z_erofs_map_blocks_iter_enter(inode, map, flags); + trace_erofs_map_blocks_enter(inode, map, flags); /* when trying to read beyond EOF, leave it unmapped */ if (map->m_la >= inode->i_size) { @@ -714,7 +714,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map, out: if (err) map->m_llen = 0; - trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err); + trace_erofs_map_blocks_exit(inode, map, flags, err); return err; } diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h index e18684b02c3d..455ab94a511c 100644 --- a/include/trace/events/erofs.h +++ b/include/trace/events/erofs.h @@ -143,7 +143,8 @@ TRACE_EVENT(erofs_readpages, __entry->raw) ); -DECLARE_EVENT_CLASS(erofs__map_blocks_enter, +TRACE_EVENT(erofs_map_blocks_enter, + TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, unsigned int flags), @@ -171,21 +172,8 @@ DECLARE_EVENT_CLASS(erofs__map_blocks_enter, __entry->flags ? show_map_flags(__entry->flags) : "NULL") ); -DEFINE_EVENT(erofs__map_blocks_enter, erofs_map_blocks_enter, - TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, - unsigned flags), - - TP_ARGS(inode, map, flags) -); - -DEFINE_EVENT(erofs__map_blocks_enter, z_erofs_map_blocks_iter_enter, - TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, - unsigned int flags), - - TP_ARGS(inode, map, flags) -); +TRACE_EVENT(erofs_map_blocks_exit, -DECLARE_EVENT_CLASS(erofs__map_blocks_exit, TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, unsigned int flags, int ret), @@ -223,20 +211,6 @@ DECLARE_EVENT_CLASS(erofs__map_blocks_exit, show_mflags(__entry->mflags), __entry->ret) ); -DEFINE_EVENT(erofs__map_blocks_exit, erofs_map_blocks_exit, - TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, - unsigned flags, int ret), - - TP_ARGS(inode, map, flags, ret) -); - -DEFINE_EVENT(erofs__map_blocks_exit, z_erofs_map_blocks_iter_exit, - TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, - unsigned int flags, int ret), - - TP_ARGS(inode, map, flags, ret) -); - TRACE_EVENT(erofs_destroy_inode, TP_PROTO(struct inode *inode), -- Gitee From c8dce320b44882eddf5e0ebbcbe99ef4fc1632b5 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Thu, 11 Jul 2024 13:36:59 +0800 Subject: [PATCH 1886/2138] erofs: avoid refcounting short-lived pages ANBZ: #11101 commit 1001042e54ef324c0c665b60a012519be05ae022 upstream. LZ4 always reuses the decompressed buffer as its LZ77 sliding window (dynamic dictionary) for optimal performance. However, in specific cases, the output buffer may not fully contain valid page cache pages, resulting in the use of short-lived pages for temporary purposes. Due to the limited sliding window size, LZ4 shortlived bounce pages can also be reused in a sliding manner, so each bounce page can be vmapped multiple times in different relative positions by design. In order to avoiding double frees, currently, reuse counts are recorded via page refcount, but it will no longer be used as-is in the future world of Memdescs. Just maintain a lookup table to check if a shortlived page is reused. Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240711053659.1364989-1-hsiangkao@linux.alibaba.com Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4180 --- fs/erofs/compress.h | 22 ++++++---------------- fs/erofs/decompressor.c | 1 - fs/erofs/zdata.c | 27 ++++++++++++++++++--------- 3 files changed, 24 insertions(+), 26 deletions(-) diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h index 526edc0a7d2d..7bfe251680ec 100644 --- a/fs/erofs/compress.h +++ b/fs/erofs/compress.h @@ -54,17 +54,14 @@ struct z_erofs_decompressor { */ /* - * short-lived pages are pages directly from buddy system with specific - * page->private (no need to set PagePrivate since these are non-LRU / - * non-movable pages and bypass reclaim / migration code). + * Currently, short-lived pages are pages directly from buddy system + * with specific page->private (Z_EROFS_SHORTLIVED_PAGE). + * In the future world of Memdescs, it should be type 0 (Misc) memory + * which type can be checked with a new helper. */ static inline bool z_erofs_is_shortlived_page(struct page *page) { - if (page->private != Z_EROFS_SHORTLIVED_PAGE) - return false; - - DBG_BUGON(page->mapping); - return true; + return page->private == Z_EROFS_SHORTLIVED_PAGE; } static inline bool z_erofs_put_shortlivedpage(struct page **pagepool, @@ -72,14 +69,7 @@ static inline bool z_erofs_put_shortlivedpage(struct page **pagepool, { if (!z_erofs_is_shortlived_page(page)) return false; - - /* short-lived pages should not be used by others at the same time */ - if (page_ref_count(page) > 1) { - put_page(page); - } else { - /* follow the pcluster rule above. */ - erofs_pagepool_add(pagepool, page); - } + erofs_pagepool_add(pagepool, page); return true; } diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index 8b6949149067..d09436f39da7 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -110,7 +110,6 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx, if (top) { victim = availables[--top]; - get_page(victim); } else { victim = __erofs_allocpage(pagepool, rq->gfp, true); if (!victim) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 153d686e76f4..92f366ca7a8d 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -1221,7 +1221,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, unsigned int pclusterpages = z_erofs_pclusterpages(pcl); const struct z_erofs_decompressor *decomp = z_erofs_decomp[pcl->algorithmformat]; - int i, err2; + int i, j, jtop, err2; struct page *page; bool overlapped; @@ -1279,10 +1279,9 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL); put_page(page); } else { + /* managed folios are still left in compressed_bvecs[] */ for (i = 0; i < pclusterpages; ++i) { - /* consider shortlived pages added when decompressing */ page = be->compressed_pages[i]; - if (!page || erofs_folio_is_managed(sbi, page_folio(page))) continue; @@ -1293,21 +1292,31 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, if (be->compressed_pages < be->onstack_pages || be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES) kvfree(be->compressed_pages); - z_erofs_fill_other_copies(be, err); + jtop = 0; + z_erofs_fill_other_copies(be, err); for (i = 0; i < be->nr_pages; ++i) { page = be->decompressed_pages[i]; if (!page) continue; DBG_BUGON(z_erofs_page_is_invalidated(page)); - - /* recycle all individual short-lived pages */ - if (z_erofs_put_shortlivedpage(be->pagepool, page)) + if (!z_erofs_is_shortlived_page(page)) { + z_erofs_onlinefolio_end(page_folio(page), err); continue; - z_erofs_onlinefolio_end(page_folio(page), err); + } + if (pcl->algorithmformat != Z_EROFS_COMPRESSION_LZ4) { + erofs_pagepool_add(be->pagepool, page); + continue; + } + for (j = 0; j < jtop && be->decompressed_pages[j] != page; ++j) + ; + if (j >= jtop) /* this bounce page is newly detected */ + be->decompressed_pages[jtop++] = page; } - + while (jtop) + erofs_pagepool_add(be->pagepool, + be->decompressed_pages[--jtop]); if (be->decompressed_pages != be->onstack_pages) kvfree(be->decompressed_pages); -- Gitee From d53154970234571be3fe5694d704245f00fe10b1 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 12 Jul 2024 20:04:16 -0500 Subject: [PATCH 1887/2138] erofs: silence uninitialized variable warning in z_erofs_scan_folio() ANBZ: #11101 commit a3c10bed330b7ab401254a0c91098a03b04f1448 upstream. Smatch complains that: fs/erofs/zdata.c:1047 z_erofs_scan_folio() error: uninitialized symbol 'err'. The issue is if we hit this (!(map->m_flags & EROFS_MAP_MAPPED)) { condition then "err" isn't set. It's inside a loop so we would have to hit that condition on every iteration. Initialize "err" to zero to solve this. Fixes: 5b9654efb604 ("erofs: teach z_erofs_scan_folios() to handle multi-page folios") Signed-off-by: Dan Carpenter Link: https://lore.kernel.org/r/f78ab50e-ed6d-4275-8dd4-a4159fa565a2@stanley.mountain Signed-off-by: Gao Xiang Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4180 --- fs/erofs/zdata.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 92f366ca7a8d..073d08f05e7d 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -962,7 +962,7 @@ static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *f, const unsigned int bs = i_blocksize(inode); unsigned int end = folio_size(folio), split = 0, cur, pgs; bool tight, excl; - int err; + int err = 0; tight = (bs == PAGE_SIZE); z_erofs_onlinefolio_init(folio); -- Gitee From 75b246dd244f881cbc9f82e4d487afe3c1517d12 Mon Sep 17 00:00:00 2001 From: Hongbo Li Date: Thu, 18 Jul 2024 16:32:43 +0800 Subject: [PATCH 1888/2138] erofs: support STATX_DIOALIGN ANBZ: #11101 commit 9c421ef3f6b30ab912eaaa3c3d20cfb921fd8c8f upstream. Add support for STATX_DIOALIGN to EROFS, so that direct I/O alignment restrictions are exposed to userspace in a generic way. [Before] ``` ./statx_test /mnt/erofs/testfile statx(/mnt/erofs/testfile) = 0 dio mem align:0 dio offset align:0 ``` [After] ``` ./statx_test /mnt/erofs/testfile statx(/mnt/erofs/testfile) = 0 dio mem align:512 dio offset align:512 ``` Signed-off-by: Hongbo Li Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240718083243.2485437-1-hsiangkao@linux.alibaba.com Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4180 --- fs/erofs/inode.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c index 64b18fbe8a77..bbb7cb61f340 100644 --- a/fs/erofs/inode.c +++ b/fs/erofs/inode.c @@ -325,14 +325,29 @@ int erofs_getattr(struct mnt_idmap *idmap, const struct path *path, unsigned int query_flags) { struct inode *const inode = d_inode(path->dentry); + bool compressed = + erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout); - if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) + if (compressed) stat->attributes |= STATX_ATTR_COMPRESSED; - stat->attributes |= STATX_ATTR_IMMUTABLE; stat->attributes_mask |= (STATX_ATTR_COMPRESSED | STATX_ATTR_IMMUTABLE); + /* + * Return the DIO alignment restrictions if requested. + * + * In EROFS, STATX_DIOALIGN is not supported in ondemand mode and + * compressed files, so in these cases we report no DIO support. + */ + if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) { + stat->result_mask |= STATX_DIOALIGN; + if (!erofs_is_fscache_mode(inode->i_sb) && !compressed) { + stat->dio_mem_align = + bdev_logical_block_size(inode->i_sb->s_bdev); + stat->dio_offset_align = stat->dio_mem_align; + } + } generic_fillattr(idmap, request_mask, inode, stat); return 0; } -- Gitee From 594af230f0edfb80054e1fcd2acea2f4397ec9ef Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Mon, 22 Jul 2024 11:51:10 +0800 Subject: [PATCH 1889/2138] erofs: fix race in z_erofs_get_gbuf() ANBZ: #11101 commit 7dc5537c3f8be87e005f0844a7626c987914f8fd upstream. In z_erofs_get_gbuf(), the current task may be migrated to another CPU between `z_erofs_gbuf_id()` and `spin_lock(&gbuf->lock)`. Therefore, z_erofs_put_gbuf() will trigger the following issue which was found by stress test: <2>[772156.434168] kernel BUG at fs/erofs/zutil.c:58! .. <4>[772156.435007] <4>[772156.439237] CPU: 0 PID: 3078 Comm: stress Kdump: loaded Tainted: G E 6.10.0-rc7+ #2 <4>[772156.439239] Hardware name: Alibaba Cloud Alibaba Cloud ECS, BIOS 1.0.0 01/01/2017 <4>[772156.439241] pstate: 83400005 (Nzcv daif +PAN -UAO +TCO +DIT -SSBS BTYPE=--) <4>[772156.439243] pc : z_erofs_put_gbuf+0x64/0x70 [erofs] <4>[772156.439252] lr : z_erofs_lz4_decompress+0x600/0x6a0 [erofs] .. <6>[772156.445958] stress (3127): drop_caches: 1 <4>[772156.446120] Call trace: <4>[772156.446121] z_erofs_put_gbuf+0x64/0x70 [erofs] <4>[772156.446761] z_erofs_lz4_decompress+0x600/0x6a0 [erofs] <4>[772156.446897] z_erofs_decompress_queue+0x740/0xa10 [erofs] <4>[772156.447036] z_erofs_runqueue+0x428/0x8c0 [erofs] <4>[772156.447160] z_erofs_readahead+0x224/0x390 [erofs] .. Fixes: f36f3010f676 ("erofs: rename per-CPU buffers to global buffer pool and make it configurable") Cc: # 6.10+ Reviewed-by: Chunhai Guo Reviewed-by: Sandeep Dhavale Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240722035110.3456740-1-hsiangkao@linux.alibaba.com Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4180 --- fs/erofs/zutil.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fs/erofs/zutil.c b/fs/erofs/zutil.c index b219a6a255b5..4ab804769fed 100644 --- a/fs/erofs/zutil.c +++ b/fs/erofs/zutil.c @@ -37,11 +37,13 @@ void *z_erofs_get_gbuf(unsigned int requiredpages) { struct z_erofs_gbuf *gbuf; + migrate_disable(); gbuf = &z_erofs_gbufpool[z_erofs_gbuf_id()]; spin_lock(&gbuf->lock); /* check if the buffer is too small */ if (requiredpages > gbuf->nrpages) { spin_unlock(&gbuf->lock); + migrate_enable(); /* (for sparse checker) pretend gbuf->lock is still taken */ __acquire(gbuf->lock); return NULL; @@ -56,6 +58,7 @@ void z_erofs_put_gbuf(void *ptr) __releases(gbuf->lock) gbuf = &z_erofs_gbufpool[z_erofs_gbuf_id()]; DBG_BUGON(gbuf->ptr != ptr); spin_unlock(&gbuf->lock); + migrate_enable(); } int z_erofs_gbuf_growsize(unsigned int nrpages) -- Gitee From bb298ee56f54122eb59f88e4feb25e8232512d43 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 25 Apr 2024 00:15:46 -0400 Subject: [PATCH 1890/2138] erofs_buf: store address_space instead of inode ANBZ: #11101 commit 958b9f85f8d9d884045ed4b93b2082090e617f97 upstream. ... seeing that ->i_mapping is the only thing we want from the inode. Signed-off-by: Al Viro Signed-off-by: Hongzhen Luo Acked-by: Gao Xiang Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4180 --- fs/erofs/data.c | 7 +++---- fs/erofs/dir.c | 2 +- fs/erofs/internal.h | 2 +- fs/erofs/namei.c | 4 ++-- fs/erofs/xattr.c | 2 +- fs/erofs/zdata.c | 2 +- 6 files changed, 9 insertions(+), 10 deletions(-) diff --git a/fs/erofs/data.c b/fs/erofs/data.c index 4d1371c329d6..5d5460355bac 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -32,7 +32,6 @@ void erofs_put_metabuf(struct erofs_buf *buf) void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset, enum erofs_kmap_type type) { - struct inode *inode = buf->inode; pgoff_t index = offset >> PAGE_SHIFT; struct page *page = buf->page; struct folio *folio; @@ -42,7 +41,7 @@ void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset, erofs_put_metabuf(buf); nofs_flag = memalloc_nofs_save(); - folio = read_cache_folio(inode->i_mapping, index, NULL, NULL); + folio = read_cache_folio(buf->mapping, index, NULL, NULL); memalloc_nofs_restore(nofs_flag); if (IS_ERR(folio)) return folio; @@ -67,9 +66,9 @@ void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset, void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb) { if (erofs_is_fscache_mode(sb)) - buf->inode = EROFS_SB(sb)->s_fscache->inode; + buf->mapping = EROFS_SB(sb)->s_fscache->inode->i_mapping; else - buf->inode = sb->s_bdev->bd_inode; + buf->mapping = sb->s_bdev->bd_inode->i_mapping; } void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb, diff --git a/fs/erofs/dir.c b/fs/erofs/dir.c index 9d38f39bb4f7..2193a6710c8f 100644 --- a/fs/erofs/dir.c +++ b/fs/erofs/dir.c @@ -58,7 +58,7 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx) int err = 0; bool initial = true; - buf.inode = dir; + buf.mapping = dir->i_mapping; while (ctx->pos < dirsize) { struct erofs_dirent *de; unsigned int nameoff, maxsize; diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 5062551c2b7f..e7d6aaccd5db 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -213,7 +213,7 @@ enum erofs_kmap_type { }; struct erofs_buf { - struct inode *inode; + struct address_space *mapping; struct page *page; void *base; enum erofs_kmap_type kmap_type; diff --git a/fs/erofs/namei.c b/fs/erofs/namei.c index 11afa48996a3..c94d0c1608a8 100644 --- a/fs/erofs/namei.c +++ b/fs/erofs/namei.c @@ -99,7 +99,7 @@ static void *erofs_find_target_block(struct erofs_buf *target, struct erofs_buf buf = __EROFS_BUF_INITIALIZER; struct erofs_dirent *de; - buf.inode = dir; + buf.mapping = dir->i_mapping; de = erofs_bread(&buf, erofs_pos(dir->i_sb, mid), EROFS_KMAP); if (!IS_ERR(de)) { const int nameoff = nameoff_from_disk(de->nameoff, bsz); @@ -171,7 +171,7 @@ int erofs_namei(struct inode *dir, const struct qstr *name, erofs_nid_t *nid, qn.name = name->name; qn.end = name->name + name->len; - buf.inode = dir; + buf.mapping = dir->i_mapping; ndirents = 0; de = erofs_find_target_block(&buf, dir, &qn, &ndirents); diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c index ec233917830a..a90d7d649739 100644 --- a/fs/erofs/xattr.c +++ b/fs/erofs/xattr.c @@ -483,7 +483,7 @@ int erofs_xattr_prefixes_init(struct super_block *sb) return -ENOMEM; if (sbi->packed_inode) - buf.inode = sbi->packed_inode; + buf.mapping = sbi->packed_inode->i_mapping; else erofs_init_metabuf(&buf, sb); diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 073d08f05e7d..424f656cd765 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -939,7 +939,7 @@ static int z_erofs_read_fragment(struct super_block *sb, struct folio *folio, if (!packed_inode) return -EFSCORRUPTED; - buf.inode = packed_inode; + buf.mapping = packed_inode->i_mapping; for (; cur < end; cur += cnt, pos += cnt) { cnt = min(end - cur, sb->s_blocksize - erofs_blkoff(sb, pos)); src = erofs_bread(&buf, pos, EROFS_KMAP); -- Gitee From 09a929a2a4166d668546e0a0c685631464a2666b Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Tue, 23 Jul 2024 15:30:24 +0800 Subject: [PATCH 1891/2138] erofs: support multi-page folios for erofs_bread() ANBZ: #11101 commit 5d3bb77e5fce1d224b94da31abae0a7afed54735 upstream. If the requested page is part of the previous multi-page folio, there is no need to call read_mapping_folio() again. Also, get rid of the remaining one of page->index [1] in our codebase. [1] https://lore.kernel.org/r/Zp8fgUSIBGQ1TN0D@casper.infradead.org Cc: Matthew Wilcox Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240723073024.875290-1-hsiangkao@linux.alibaba.com Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4180 --- fs/erofs/data.c | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-) diff --git a/fs/erofs/data.c b/fs/erofs/data.c index 5d5460355bac..b2fe7744949d 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -21,38 +21,32 @@ void erofs_put_metabuf(struct erofs_buf *buf) if (!buf->page) return; erofs_unmap_metabuf(buf); - put_page(buf->page); + folio_put(page_folio(buf->page)); buf->page = NULL; } -/* - * Derive the block size from inode->i_blkbits to make compatible with - * anonymous inode in fscache mode. - */ void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset, enum erofs_kmap_type type) { pgoff_t index = offset >> PAGE_SHIFT; - struct page *page = buf->page; - struct folio *folio; - unsigned int nofs_flag; + struct folio *folio = NULL; - if (!page || page->index != index) { + if (buf->page) { + folio = page_folio(buf->page); + if (folio_file_page(folio, index) != buf->page) + erofs_unmap_metabuf(buf); + } + if (!folio || !folio_contains(folio, index)) { erofs_put_metabuf(buf); - - nofs_flag = memalloc_nofs_save(); - folio = read_cache_folio(buf->mapping, index, NULL, NULL); - memalloc_nofs_restore(nofs_flag); + folio = read_mapping_folio(buf->mapping, index, NULL); if (IS_ERR(folio)) return folio; - - /* should already be PageUptodate, no need to lock page */ - page = folio_file_page(folio, index); - buf->page = page; } + buf->page = folio_file_page(folio, index); + if (buf->kmap_type == EROFS_NO_KMAP) { if (type == EROFS_KMAP) - buf->base = kmap_local_page(page); + buf->base = kmap_local_page(buf->page); buf->kmap_type = type; } else if (buf->kmap_type != type) { DBG_BUGON(1); -- Gitee From 70199d1c70ae1348f754b1570cc45ec827e2bb61 Mon Sep 17 00:00:00 2001 From: Chen Ni Date: Wed, 24 Jul 2024 10:07:21 +0800 Subject: [PATCH 1892/2138] erofs: convert comma to semicolon ANBZ: #11101 commit 14e9283fb22d0d259820a5f05c6059678bab9ac5 upstream. Replace a comma between expression statements by a semicolon. Signed-off-by: Chen Ni Link: https://lore.kernel.org/r/20240724020721.2389738-1-nichen@iscas.ac.cn Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4180 --- fs/erofs/decompressor_lzma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/erofs/decompressor_lzma.c b/fs/erofs/decompressor_lzma.c index 06a722b85a45..40666815046f 100644 --- a/fs/erofs/decompressor_lzma.c +++ b/fs/erofs/decompressor_lzma.c @@ -188,7 +188,7 @@ static int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, !rq->partial_decoding); buf.in_size = min(rq->inputsize, PAGE_SIZE - rq->pageofs_in); rq->inputsize -= buf.in_size; - buf.in = dctx.kin + rq->pageofs_in, + buf.in = dctx.kin + rq->pageofs_in; dctx.bounce = strm->bounce; do { dctx.avail_out = buf.out_size - buf.out_pos; -- Gitee From a039fa432cbdfe443bed4da6298b9e1684a9c30a Mon Sep 17 00:00:00 2001 From: Hongzhen Luo Date: Thu, 1 Aug 2024 19:26:22 +0800 Subject: [PATCH 1893/2138] erofs: simplify readdir operation ANBZ: #11101 commit 5b5c96c63d5b6e91c622611e04b2b156bbae53f5 upstream. - Use i_size instead of i_size_read() due to immutable fses; - Get rid of an unneeded goto since erofs_fill_dentries() also works; - Remove unnecessary lines. Signed-off-by: Hongzhen Luo Link: https://lore.kernel.org/r/20240801112622.2164029-1-hongzhen@linux.alibaba.com Reviewed-by: Gao Xiang Signed-off-by: Gao Xiang Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4180 --- fs/erofs/dir.c | 35 ++++++++++++----------------------- fs/erofs/internal.h | 2 +- 2 files changed, 13 insertions(+), 24 deletions(-) diff --git a/fs/erofs/dir.c b/fs/erofs/dir.c index 2193a6710c8f..c3b90abdee37 100644 --- a/fs/erofs/dir.c +++ b/fs/erofs/dir.c @@ -8,19 +8,15 @@ static int erofs_fill_dentries(struct inode *dir, struct dir_context *ctx, void *dentry_blk, struct erofs_dirent *de, - unsigned int nameoff, unsigned int maxsize) + unsigned int nameoff0, unsigned int maxsize) { - const struct erofs_dirent *end = dentry_blk + nameoff; + const struct erofs_dirent *end = dentry_blk + nameoff0; while (de < end) { - const char *de_name; + unsigned char d_type = fs_ftype_to_dtype(de->file_type); + unsigned int nameoff = le16_to_cpu(de->nameoff); + const char *de_name = (char *)dentry_blk + nameoff; unsigned int de_namelen; - unsigned char d_type; - - d_type = fs_ftype_to_dtype(de->file_type); - - nameoff = le16_to_cpu(de->nameoff); - de_name = (char *)dentry_blk + nameoff; /* the last dirent in the block? */ if (de + 1 >= end) @@ -52,21 +48,20 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx) struct erofs_buf buf = __EROFS_BUF_INITIALIZER; struct super_block *sb = dir->i_sb; unsigned long bsz = sb->s_blocksize; - const size_t dirsize = i_size_read(dir); - unsigned int i = erofs_blknr(sb, ctx->pos); unsigned int ofs = erofs_blkoff(sb, ctx->pos); int err = 0; bool initial = true; buf.mapping = dir->i_mapping; - while (ctx->pos < dirsize) { + while (ctx->pos < dir->i_size) { + erofs_off_t dbstart = ctx->pos - ofs; struct erofs_dirent *de; unsigned int nameoff, maxsize; - de = erofs_bread(&buf, erofs_pos(sb, i), EROFS_KMAP); + de = erofs_bread(&buf, dbstart, EROFS_KMAP); if (IS_ERR(de)) { erofs_err(sb, "fail to readdir of logical block %u of nid %llu", - i, EROFS_I(dir)->nid); + erofs_blknr(sb, dbstart), EROFS_I(dir)->nid); err = PTR_ERR(de); break; } @@ -79,25 +74,19 @@ static int erofs_readdir(struct file *f, struct dir_context *ctx) break; } - maxsize = min_t(unsigned int, dirsize - ctx->pos + ofs, bsz); - + maxsize = min_t(unsigned int, dir->i_size - dbstart, bsz); /* search dirents at the arbitrary position */ if (initial) { initial = false; - ofs = roundup(ofs, sizeof(struct erofs_dirent)); - ctx->pos = erofs_pos(sb, i) + ofs; - if (ofs >= nameoff) - goto skip_this; + ctx->pos = dbstart + ofs; } err = erofs_fill_dentries(dir, ctx, de, (void *)de + ofs, nameoff, maxsize); if (err) break; -skip_this: - ctx->pos = erofs_pos(sb, i) + maxsize; - ++i; + ctx->pos = dbstart + maxsize; ofs = 0; } erofs_put_metabuf(&buf); diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index e7d6aaccd5db..5bd27052fd73 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -220,7 +220,7 @@ struct erofs_buf { }; #define __EROFS_BUF_INITIALIZER ((struct erofs_buf){ .page = NULL }) -#define erofs_blknr(sb, addr) ((addr) >> (sb)->s_blocksize_bits) +#define erofs_blknr(sb, addr) ((erofs_blk_t)((addr) >> (sb)->s_blocksize_bits)) #define erofs_blkoff(sb, addr) ((addr) & ((sb)->s_blocksize - 1)) #define erofs_pos(sb, blk) ((erofs_off_t)(blk) << (sb)->s_blocksize_bits) #define erofs_iblks(i) (round_up((i)->i_size, i_blocksize(i)) >> (i)->i_blkbits) -- Gitee From 9bccb3e5ea9db657d1a26f57825756c13d1436ff Mon Sep 17 00:00:00 2001 From: Hongzhen Luo Date: Tue, 6 Aug 2024 19:22:08 +0800 Subject: [PATCH 1894/2138] erofs: get rid of check_layout_compatibility() ANBZ: #11101 commit 2c534624ae70100aeea0b5800b0f3768b2fd3cf0 upstream. Simple enough to just open-code it. Signed-off-by: Hongzhen Luo Reviewed-by: Sandeep Dhavale Reviewed-by: Gao Xiang Link: https://lore.kernel.org/r/20240806112208.150323-1-hongzhen@linux.alibaba.com Signed-off-by: Gao Xiang Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4180 --- fs/erofs/super.c | 26 ++++++-------------------- 1 file changed, 6 insertions(+), 20 deletions(-) diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 81d3b9e5d313..23d049ddeefe 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -108,22 +108,6 @@ static void erofs_free_inode(struct inode *inode) kmem_cache_free(erofs_inode_cachep, vi); } -static bool check_layout_compatibility(struct super_block *sb, - struct erofs_super_block *dsb) -{ - const unsigned int feature = le32_to_cpu(dsb->feature_incompat); - - EROFS_SB(sb)->feature_incompat = feature; - - /* check if current kernel meets all mandatory requirements */ - if (feature & (~EROFS_ALL_FEATURE_INCOMPAT)) { - erofs_err(sb, "unidentified incompatible feature %x, please upgrade kernel", - feature & ~EROFS_ALL_FEATURE_INCOMPAT); - return false; - } - return true; -} - /* read variable-sized metadata, offset will be aligned by 4-byte */ void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf, erofs_off_t *offset, int *lengthp) @@ -279,7 +263,7 @@ static int erofs_scan_devices(struct super_block *sb, static int erofs_read_superblock(struct super_block *sb) { - struct erofs_sb_info *sbi; + struct erofs_sb_info *sbi = EROFS_SB(sb); struct erofs_buf buf = __EROFS_BUF_INITIALIZER; struct erofs_super_block *dsb; void *data; @@ -291,9 +275,7 @@ static int erofs_read_superblock(struct super_block *sb) return PTR_ERR(data); } - sbi = EROFS_SB(sb); dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET); - ret = -EINVAL; if (le32_to_cpu(dsb->magic) != EROFS_SUPER_MAGIC_V1) { erofs_err(sb, "cannot find valid erofs superblock"); @@ -318,8 +300,12 @@ static int erofs_read_superblock(struct super_block *sb) } ret = -EINVAL; - if (!check_layout_compatibility(sb, dsb)) + sbi->feature_incompat = le32_to_cpu(dsb->feature_incompat); + if (sbi->feature_incompat & ~EROFS_ALL_FEATURE_INCOMPAT) { + erofs_err(sb, "unidentified incompatible feature %x, please upgrade kernel", + sbi->feature_incompat & ~EROFS_ALL_FEATURE_INCOMPAT); goto out; + } sbi->sb_size = 128 + dsb->sb_extslots * EROFS_SB_EXTSLOT_SIZE; if (sbi->sb_size > PAGE_SIZE - EROFS_SUPER_OFFSET) { -- Gitee From 58ac66410a1c20fb58a0255c1d620da49a33d8fc Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Mon, 19 Aug 2024 10:52:07 +0800 Subject: [PATCH 1895/2138] erofs: allow large folios for compressed files ANBZ: #11101 commit e080a26725fb36f535f22ea42694c60ab005fb2e upstream. As commit 2e6506e1c4ee ("mm/migrate: fix deadlock in migrate_pages_batch() on large folios") has landed upstream, large folios can be safely enabled for compressed inodes since all prerequisites have already landed in 6.11-rc1. Stress tests has been running on my fleet for over 20 days without any regression. Additionally, users [1] have requested it for months. Let's allow large folios for EROFS full cases upstream now for wider testing. [1] https://lore.kernel.org/r/CAGsJ_4wtE8OcpinuqVwG4jtdx6Qh5f+TON6wz+4HMCq=A2qFcA@mail.gmail.com Cc: Barry Song <21cnbao@gmail.com> Cc: Matthew Wilcox (Oracle) [ Gao Xiang: minor commit typo fixes. ] Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240819025207.3808649-1-hsiangkao@linux.alibaba.com Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4180 --- Documentation/filesystems/erofs.rst | 2 +- fs/erofs/inode.c | 18 ++++++++---------- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/Documentation/filesystems/erofs.rst b/Documentation/filesystems/erofs.rst index cc4626d6ee4f..c293f8e37468 100644 --- a/Documentation/filesystems/erofs.rst +++ b/Documentation/filesystems/erofs.rst @@ -75,7 +75,7 @@ Here are the main features of EROFS: - Support merging tail-end data into a special inode as fragments. - - Support large folios for uncompressed files. + - Support large folios to make use of THPs (Transparent Hugepages); - Support direct I/O on uncompressed files to avoid double caching for loop devices; diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c index bbb7cb61f340..7a63af980c10 100644 --- a/fs/erofs/inode.c +++ b/fs/erofs/inode.c @@ -248,25 +248,23 @@ static int erofs_fill_inode(struct inode *inode) goto out_unlock; } + mapping_set_large_folios(inode->i_mapping); if (erofs_inode_is_data_compressed(vi->datalayout)) { #ifdef CONFIG_EROFS_FS_ZIP DO_ONCE_LITE_IF(inode->i_blkbits != PAGE_SHIFT, erofs_info, inode->i_sb, "EXPERIMENTAL EROFS subpage compressed block support in use. Use at your own risk!"); inode->i_mapping->a_ops = &z_erofs_aops; - err = 0; - goto out_unlock; -#endif +#else err = -EOPNOTSUPP; - goto out_unlock; - } - inode->i_mapping->a_ops = &erofs_raw_access_aops; - mapping_set_large_folios(inode->i_mapping); +#endif + } else { + inode->i_mapping->a_ops = &erofs_raw_access_aops; #ifdef CONFIG_EROFS_FS_ONDEMAND - if (erofs_is_fscache_mode(inode->i_sb)) - inode->i_mapping->a_ops = &erofs_fscache_access_aops; + if (erofs_is_fscache_mode(inode->i_sb)) + inode->i_mapping->a_ops = &erofs_fscache_access_aops; #endif - + } out_unlock: erofs_put_metabuf(&buf); return err; -- Gitee From ead6bfe189c6e4da9f2e045a80c85c80a1aef623 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Tue, 20 Aug 2024 16:56:19 +0800 Subject: [PATCH 1896/2138] erofs: fix out-of-bound access when z_erofs_gbuf_growsize() partially fails ANBZ: #11101 commit 0005e01e1e875c5e27130c5e2ed0189749d1e08a upstream. If z_erofs_gbuf_growsize() partially fails on a global buffer due to memory allocation failure or fault injection (as reported by syzbot [1]), new pages need to be freed by comparing to the existing pages to avoid memory leaks. However, the old gbuf->pages[] array may not be large enough, which can lead to null-ptr-deref or out-of-bound access. Fix this by checking against gbuf->nrpages in advance. [1] https://lore.kernel.org/r/000000000000f7b96e062018c6e3@google.com Reported-by: syzbot+242ee56aaa9585553766@syzkaller.appspotmail.com Fixes: d6db47e571dc ("erofs: do not use pagepool in z_erofs_gbuf_growsize()") Cc: # 6.10+ Reviewed-by: Chunhai Guo Reviewed-by: Sandeep Dhavale Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240820085619.1375963-1-hsiangkao@linux.alibaba.com Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4180 --- fs/erofs/zutil.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/erofs/zutil.c b/fs/erofs/zutil.c index 4ab804769fed..1262809b4104 100644 --- a/fs/erofs/zutil.c +++ b/fs/erofs/zutil.c @@ -110,7 +110,8 @@ int z_erofs_gbuf_growsize(unsigned int nrpages) out: if (i < z_erofs_gbuf_count && tmp_pages) { for (j = 0; j < nrpages; ++j) - if (tmp_pages[j] && tmp_pages[j] != gbuf->pages[j]) + if (tmp_pages[j] && (j >= gbuf->nrpages || + tmp_pages[j] != gbuf->pages[j])) __free_page(tmp_pages[j]); kfree(tmp_pages); } -- Gitee From 5ef209f32538117325e2a0bb8de7f78b908aa26a Mon Sep 17 00:00:00 2001 From: Sandeep Dhavale Date: Wed, 4 Sep 2024 23:00:25 -0700 Subject: [PATCH 1897/2138] erofs: fix error handling in z_erofs_init_decompressor ANBZ: #11101 commit 3fc3e45fcdeaad4b7660b560fcbc827eb733f58e upstream. If we get a failure at the first decompressor init (i = 0), the clean up while loop could enter infinite loop due to wrong while check. Check the value of i now to see if we need any clean up at all. Fixes: 5a7cce827ee9 ("erofs: refine z_erofs_{init,exit}_subsystem()") Reported-by: liujinbao1 Signed-off-by: Sandeep Dhavale Reviewed-by: Gao Xiang Reviewed-by: Chao Yu Link: https://lore.kernel.org/r/20240905060027.2388893-1-dhavale@google.com Signed-off-by: Gao Xiang Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4180 --- fs/erofs/decompressor.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index d09436f39da7..3e76908b8ea7 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -539,7 +539,7 @@ int __init z_erofs_init_decompressor(void) for (i = 0; i < Z_EROFS_COMPRESSION_MAX; ++i) { err = z_erofs_decomp[i] ? z_erofs_decomp[i]->init() : 0; if (err) { - while (--i) + while (i--) if (z_erofs_decomp[i]) z_erofs_decomp[i]->exit(); return err; -- Gitee From 544058f6b08311a3cc5d09d9add04f5953c5ae91 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Tue, 3 Oct 2023 09:32:31 -0700 Subject: [PATCH 1898/2138] platform/x86: ISST: Use fuse enabled mask instead of allowed levels MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12076 commit da4082841ccf022beae73e63d3f476f59777172b upstream. Allowed level mask is a mask of levels, which are currently allowed to dynamically switch by the OS. Fused mask is a mask of all levels even if OS is not allowed to switch. Even if OS is not allowed to dynamically switch, it is still possible for user to boot to a level by using BIOS option. To decide which level to boot next time, user wants to check parameters (power, performance or thermal) of that level to decide. So, when passing the level mask for display to user space, use fuse enabled mask, which has all levels. Intel-SIG: commit da4082841ccf platform/x86: ISST: Use fuse enabled mask instead of allowed levels. Backport Intel speed select ISST driver support on TPMI. Signed-off-by: Srinivas Pandruvada Link: https://lore.kernel.org/r/20231003163234.1856669-2-srinivas.pandruvada@linux.intel.com Reviewed-by: Ilpo Järvinen Signed-off-by: Ilpo Järvinen [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4181 --- drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c index 63faa2ea8327..a672a1c814af 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c @@ -704,7 +704,7 @@ static int isst_if_get_perf_level(void __user *argp) return -EINVAL; perf_level.max_level = power_domain_info->max_level; - perf_level.level_mask = power_domain_info->pp_header.allowed_level_mask; + perf_level.level_mask = power_domain_info->pp_header.level_en_mask; perf_level.feature_rev = power_domain_info->pp_header.feature_rev; _read_pp_info("current_level", perf_level.current_level, SST_PP_STATUS_OFFSET, SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE) -- Gitee From 1f133f9d51fb64b7dcaca51254e266cca58b5487 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Tue, 3 Oct 2023 09:32:32 -0700 Subject: [PATCH 1899/2138] platform/x86: ISST: Allow level 0 to be not present MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12076 commit a22d36eb5b150913325640cb793e13e08d1bd715 upstream. It is possible that SST level 0 or base level is not present in some configurations. So don't set level 0 mask in level_en_mask by default. Intel-SIG: commit a22d36eb5b15 platform/x86: ISST: Allow level 0 to be not present. Backport Intel speed select ISST driver support on TPMI. Signed-off-by: Srinivas Pandruvada Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20231003163234.1856669-3-srinivas.pandruvada@linux.intel.com Signed-off-by: Ilpo Järvinen [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4181 --- drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c index a672a1c814af..ac5c6a812592 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c @@ -364,9 +364,6 @@ static int sst_main(struct auxiliary_device *auxdev, struct tpmi_per_power_domai /* Read PP header */ *((u64 *)&pd_info->pp_header) = readq(pd_info->sst_base + pd_info->sst_header.pp_offset); - /* Force level_en_mask level 0 */ - pd_info->pp_header.level_en_mask |= 0x01; - mask = 0x01; levels = 0; for (i = 0; i < 8; ++i) { -- Gitee From 5b22684a54c49b84663d58f45ede0da1c34d4ec8 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Tue, 3 Oct 2023 09:32:33 -0700 Subject: [PATCH 1900/2138] platform/x86: intel_speed_select_if: Remove hardcoded map size MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12076 commit 7525cea3ef9384054a30f25ebb501234befecdcb upstream. The driver is using 256 as the size while calling devm_ioremap(). The maximum offset can be obtained from isst_mmio_range. Add a field "size" to the isst_mmio_range and use it instead of hardcoding. No functional impact is expected. Intel-SIG: commit 7525cea3ef93 platform/x86: intel_speed_select_if: Remove hardcoded map size. Backport Intel speed select ISST driver support on TPMI. Signed-off-by: Srinivas Pandruvada Reviewed-by: Andy Shevchenko Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20231003163234.1856669-4-srinivas.pandruvada@linux.intel.com Signed-off-by: Ilpo Järvinen [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4181 --- .../x86/intel/speed_select_if/isst_if_mmio.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c b/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c index ff49025ec085..13e068c77d50 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c @@ -18,16 +18,17 @@ struct isst_mmio_range { int beg; int end; + int size; }; static struct isst_mmio_range mmio_range_devid_0[] = { - {0x04, 0x14}, - {0x20, 0xD0}, + {0x04, 0x14, 0x18}, + {0x20, 0xD0, 0xD4}, }; static struct isst_mmio_range mmio_range_devid_1[] = { - {0x04, 0x14}, - {0x20, 0x11C}, + {0x04, 0x14, 0x18}, + {0x20, 0x11C, 0x120}, }; struct isst_if_device { @@ -114,13 +115,16 @@ static int isst_if_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pcu_base &= GENMASK(10, 0); base_addr = (u64)mmio_base << 23 | (u64) pcu_base << 12; - punit_dev->punit_mmio = devm_ioremap(&pdev->dev, base_addr, 256); + + punit_dev->mmio_range = (struct isst_mmio_range *) ent->driver_data; + + punit_dev->punit_mmio = devm_ioremap(&pdev->dev, base_addr, + punit_dev->mmio_range[1].size); if (!punit_dev->punit_mmio) return -ENOMEM; mutex_init(&punit_dev->mutex); pci_set_drvdata(pdev, punit_dev); - punit_dev->mmio_range = (struct isst_mmio_range *) ent->driver_data; memset(&cb, 0, sizeof(cb)); cb.cmd_size = sizeof(struct isst_if_io_reg); -- Gitee From 4d4da72e7d712263f27893f012d56ec8d3c3f51f Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Tue, 3 Oct 2023 09:32:34 -0700 Subject: [PATCH 1901/2138] platform/x86: intel_speed_select_if: Use devm_ioremap_resource MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12076 commit 23f392ea6d1916f68be8067e2a038ef9a746a94b upstream. Replace devm_ioremap() with devm_ioremap_resource() by defining a resource. Intel-SIG: commit 23f392ea6d19 platform/x86: intel_speed_select_if: Use devm_ioremap_resource. Backport Intel SST driver for 6.6 from 6.11 Signed-off-by: Srinivas Pandruvada Suggested-by: Andy Shevchenko Reviewed-by: Andy Shevchenko Link: https://lore.kernel.org/r/20231003163234.1856669-5-srinivas.pandruvada@linux.intel.com Reviewed-by: Ilpo Järvinen Signed-off-by: Ilpo Järvinen [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4181 --- .../platform/x86/intel/speed_select_if/isst_if_mmio.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c b/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c index 13e068c77d50..3f4343147dad 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c @@ -94,6 +94,7 @@ static int isst_if_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct isst_if_device *punit_dev; struct isst_if_cmd_cb cb; u32 mmio_base, pcu_base; + struct resource r; u64 base_addr; int ret; @@ -118,10 +119,10 @@ static int isst_if_probe(struct pci_dev *pdev, const struct pci_device_id *ent) punit_dev->mmio_range = (struct isst_mmio_range *) ent->driver_data; - punit_dev->punit_mmio = devm_ioremap(&pdev->dev, base_addr, - punit_dev->mmio_range[1].size); - if (!punit_dev->punit_mmio) - return -ENOMEM; + r = DEFINE_RES_MEM(base_addr, punit_dev->mmio_range[1].size); + punit_dev->punit_mmio = devm_ioremap_resource(&pdev->dev, &r); + if (IS_ERR(punit_dev->punit_mmio)) + return PTR_ERR(punit_dev->punit_mmio); mutex_init(&punit_dev->mutex); pci_set_drvdata(pdev, punit_dev); -- Gitee From a7fbe65ea6b2d30cb4d5e91701d5735950ee2d62 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Tue, 3 Oct 2023 11:49:15 -0700 Subject: [PATCH 1902/2138] platform/x86: ISST: Ignore minor version change MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12076 commit 07510a59b316445311775ee53adf10448140607a upstream. The hardware definition of every TPMI feature contains a major and minor version. When there is a change in the MMIO offset or change in the definition of a field, hardware will change major version. For addition of new fields without modifying existing MMIO offsets or fields, only the minor version is changed. Driver is developed to support SST functionality for a major and minor version. If the hardware changes major version, since offsets and definitions are changed, driver cannot continue to provide SST interface to users. Driver can still function with a minor version change as it will just miss the new functionality added by the hardware. The current implementation doesn't ignore any version change. If there is mismatch with the minor version, continue with an information log message. If there is mismatch with the major version, log error and exit. Intel-SIG: commit 07510a59b316 platform/x86: ISST: Ignore minor version change. Backport Intel SST driver for 6.6 from 6.11 Signed-off-by: Srinivas Pandruvada Link: https://lore.kernel.org/r/20231003184916.1860084-3-srinivas.pandruvada@linux.intel.com Reviewed-by: Ilpo Järvinen Signed-off-by: Ilpo Järvinen [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4181 --- .../x86/intel/speed_select_if/isst_tpmi_core.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c index ac5c6a812592..0b6d2c864437 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c @@ -30,7 +30,8 @@ #include "isst_if_common.h" /* Supported SST hardware version by this driver */ -#define ISST_HEADER_VERSION 1 +#define ISST_MAJOR_VERSION 0 +#define ISST_MINOR_VERSION 1 /* * Used to indicate if value read from MMIO needs to get multiplied @@ -352,12 +353,19 @@ static int sst_main(struct auxiliary_device *auxdev, struct tpmi_per_power_domai pd_info->sst_header.cp_offset *= 8; pd_info->sst_header.pp_offset *= 8; - if (pd_info->sst_header.interface_version != ISST_HEADER_VERSION) { - dev_err(&auxdev->dev, "SST: Unsupported version:%x\n", - pd_info->sst_header.interface_version); + if (pd_info->sst_header.interface_version == TPMI_VERSION_INVALID) + return -ENODEV; + + if (TPMI_MAJOR_VERSION(pd_info->sst_header.interface_version) != ISST_MAJOR_VERSION) { + dev_err(&auxdev->dev, "SST: Unsupported major version:%lx\n", + TPMI_MAJOR_VERSION(pd_info->sst_header.interface_version)); return -ENODEV; } + if (TPMI_MINOR_VERSION(pd_info->sst_header.interface_version) != ISST_MINOR_VERSION) + dev_info(&auxdev->dev, "SST: Ignore: Unsupported minor version:%lx\n", + TPMI_MINOR_VERSION(pd_info->sst_header.interface_version)); + /* Read SST CP Header */ *((u64 *)&pd_info->cp_header) = readq(pd_info->sst_base + pd_info->sst_header.cp_offset); -- Gitee From 97b84c32d5a1fbda961b3ce7832b8693b821e822 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Mon, 4 Dec 2023 14:17:39 -0800 Subject: [PATCH 1903/2138] platform/x86: ISST: Process read/write blocked feature status MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12076 commit 8bed9ff7dbcce4d1a436f7839be48c6fd5fac0ce upstream. When a feature is read blocked, don't continue to read SST information and register with SST core. When the feature is write blocked, continue to offer read interface for SST parameters, but don't allow any operation to change state. A state change results from SST level change, feature change or class of service change. Intel-SIG: commit 8bed9ff7dbcc platform/x86: ISST: Process read/write blocked feature status. Backport intel SST driver update for 6.6 from 6.11 Signed-off-by: Srinivas Pandruvada Reviewed-by: Hans de Goede Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20231204221740.3645130-5-srinivas.pandruvada@linux.intel.com Signed-off-by: Hans de Goede [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4181 --- .../intel/speed_select_if/isst_tpmi_core.c | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c index 0b6d2c864437..2662fbbddf0c 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c @@ -234,6 +234,7 @@ struct perf_level { * @saved_clos_configs: Save SST-CP CLOS configuration to store restore for suspend/resume * @saved_clos_assocs: Save SST-CP CLOS association to store restore for suspend/resume * @saved_pp_control: Save SST-PP control information to store restore for suspend/resume + * @write_blocked: Write operation is blocked, so can't change SST state * * This structure is used store complete SST information for a power_domain. This information * is used to read/write request for any SST IOCTL. Each physical CPU package can have multiple @@ -259,6 +260,7 @@ struct tpmi_per_power_domain_info { u64 saved_clos_configs[4]; u64 saved_clos_assocs[4]; u64 saved_pp_control; + bool write_blocked; }; /** @@ -515,6 +517,9 @@ static long isst_if_clos_param(void __user *argp) return -EINVAL; if (clos_param.get_set) { + if (power_domain_info->write_blocked) + return -EPERM; + _write_cp_info("clos.min_freq", clos_param.min_freq_mhz, (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE), SST_CLOS_CONFIG_MIN_START, SST_CLOS_CONFIG_MIN_WIDTH, @@ -602,6 +607,9 @@ static long isst_if_clos_assoc(void __user *argp) power_domain_info = &sst_inst->power_domain_info[punit_id]; + if (assoc_cmds.get_set && power_domain_info->write_blocked) + return -EPERM; + offset = SST_CLOS_ASSOC_0_OFFSET + (punit_cpu_no / SST_CLOS_ASSOC_CPUS_PER_REG) * SST_REG_SIZE; shift = punit_cpu_no % SST_CLOS_ASSOC_CPUS_PER_REG; @@ -752,6 +760,9 @@ static int isst_if_set_perf_level(void __user *argp) if (!power_domain_info) return -EINVAL; + if (power_domain_info->write_blocked) + return -EPERM; + if (!(power_domain_info->pp_header.allowed_level_mask & BIT(perf_level.level))) return -EINVAL; @@ -809,6 +820,9 @@ static int isst_if_set_perf_feature(void __user *argp) if (!power_domain_info) return -EINVAL; + if (power_domain_info->write_blocked) + return -EPERM; + _write_pp_info("perf_feature", perf_feature.feature, SST_PP_CONTROL_OFFSET, SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH, SST_MUL_FACTOR_NONE) @@ -1257,11 +1271,21 @@ static long isst_if_def_ioctl(struct file *file, unsigned int cmd, int tpmi_sst_dev_add(struct auxiliary_device *auxdev) { + bool read_blocked = 0, write_blocked = 0; struct intel_tpmi_plat_info *plat_info; struct tpmi_sst_struct *tpmi_sst; int i, ret, pkg = 0, inst = 0; int num_resources; + ret = tpmi_get_feature_status(auxdev, TPMI_ID_SST, &read_blocked, &write_blocked); + if (ret) + dev_info(&auxdev->dev, "Can't read feature status: ignoring read/write blocked status\n"); + + if (read_blocked) { + dev_info(&auxdev->dev, "Firmware has blocked reads, exiting\n"); + return -ENODEV; + } + plat_info = tpmi_get_platform_data(auxdev); if (!plat_info) { dev_err(&auxdev->dev, "No platform info\n"); @@ -1306,6 +1330,7 @@ int tpmi_sst_dev_add(struct auxiliary_device *auxdev) tpmi_sst->power_domain_info[i].package_id = pkg; tpmi_sst->power_domain_info[i].power_domain_id = i; tpmi_sst->power_domain_info[i].auxdev = auxdev; + tpmi_sst->power_domain_info[i].write_blocked = write_blocked; tpmi_sst->power_domain_info[i].sst_base = devm_ioremap_resource(&auxdev->dev, res); if (IS_ERR(tpmi_sst->power_domain_info[i].sst_base)) return PTR_ERR(tpmi_sst->power_domain_info[i].sst_base); -- Gitee From 5695aa64a967175635c691406e8a50a237584a7e Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Wed, 28 Feb 2024 16:26:59 -0800 Subject: [PATCH 1904/2138] platform/x86: ISST: Allow reading core-power state on HWP disabled systems MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12076 commit e8b4223dbf12cb6b722e1b8c48a9386cb096d4fb upstream. When HWP (Hardware P-states) is disabled, dynamic SST features are disabled. But user should still be able to read the current core-power state, with legacy P-states. This will allow users to read current configuration with static SST enabled from BIOS. To address this, do not call disable_dynamic_sst_features() when the request is for reading the state. Intel-SIG: commit e8b4223dbf12 platform/x86: ISST: Allow reading core-power state on HWP disabled systems. Backport intel SST driver update for 6.6 from 6.11 Signed-off-by: Srinivas Pandruvada Reviewed-by: Kuppuswamy Sathyanarayanan Link: https://lore.kernel.org/r/20240229002659.1416623-1-srinivas.pandruvada@linux.intel.com Reviewed-by: Ilpo Järvinen Signed-off-by: Ilpo Järvinen [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4181 --- drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c index 2662fbbddf0c..1d918000d72b 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c @@ -462,10 +462,10 @@ static long isst_if_core_power_state(void __user *argp) struct tpmi_per_power_domain_info *power_domain_info; struct isst_core_power core_power; - if (disable_dynamic_sst_features()) + if (copy_from_user(&core_power, argp, sizeof(core_power))) return -EFAULT; - if (copy_from_user(&core_power, argp, sizeof(core_power))) + if (core_power.get_set && disable_dynamic_sst_features()) return -EFAULT; power_domain_info = get_instance(core_power.socket_id, core_power.power_domain_id); -- Gitee From 9031e61ecdaea86d973b20db9aba0e0e8599baf4 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Tue, 23 Apr 2024 13:46:14 -0700 Subject: [PATCH 1905/2138] platform/x86: ISST: Use local variable for auxdev->dev MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12076 commit 8c5a689eef5b1c1eaddd17b0b1f2609d3f66c5b0 upstream. Define a local variable for &auxdev->dev and use to shorten length of lines. No functional change is done. Intel-SIG: commit 8c5a689eef5b platform/x86: ISST: Use local variable for auxdev->dev. Backport intel SST driver update for 6.6 from 6.11 Signed-off-by: Srinivas Pandruvada Suggested-by: Andy Shevchenko Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20240423204619.3946901-6-srinivas.pandruvada@linux.intel.com Reviewed-by: Hans de Goede Signed-off-by: Hans de Goede [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4181 --- .../intel/speed_select_if/isst_tpmi_core.c | 27 ++++++++++--------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c index 1d918000d72b..4e09a5611aca 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c @@ -313,12 +313,11 @@ static int sst_add_perf_profiles(struct auxiliary_device *auxdev, struct tpmi_per_power_domain_info *pd_info, int levels) { + struct device *dev = &auxdev->dev; u64 perf_level_offsets; int i; - pd_info->perf_levels = devm_kcalloc(&auxdev->dev, levels, - sizeof(struct perf_level), - GFP_KERNEL); + pd_info->perf_levels = devm_kcalloc(dev, levels, sizeof(struct perf_level), GFP_KERNEL); if (!pd_info->perf_levels) return 0; @@ -349,6 +348,7 @@ static int sst_add_perf_profiles(struct auxiliary_device *auxdev, static int sst_main(struct auxiliary_device *auxdev, struct tpmi_per_power_domain_info *pd_info) { + struct device *dev = &auxdev->dev; int i, mask, levels; *((u64 *)&pd_info->sst_header) = readq(pd_info->sst_base); @@ -359,13 +359,13 @@ static int sst_main(struct auxiliary_device *auxdev, struct tpmi_per_power_domai return -ENODEV; if (TPMI_MAJOR_VERSION(pd_info->sst_header.interface_version) != ISST_MAJOR_VERSION) { - dev_err(&auxdev->dev, "SST: Unsupported major version:%lx\n", + dev_err(dev, "SST: Unsupported major version:%lx\n", TPMI_MAJOR_VERSION(pd_info->sst_header.interface_version)); return -ENODEV; } if (TPMI_MINOR_VERSION(pd_info->sst_header.interface_version) != ISST_MINOR_VERSION) - dev_info(&auxdev->dev, "SST: Ignore: Unsupported minor version:%lx\n", + dev_info(dev, "SST: Ignore: Unsupported minor version:%lx\n", TPMI_MINOR_VERSION(pd_info->sst_header.interface_version)); /* Read SST CP Header */ @@ -1273,28 +1273,29 @@ int tpmi_sst_dev_add(struct auxiliary_device *auxdev) { bool read_blocked = 0, write_blocked = 0; struct intel_tpmi_plat_info *plat_info; + struct device *dev = &auxdev->dev; struct tpmi_sst_struct *tpmi_sst; int i, ret, pkg = 0, inst = 0; int num_resources; ret = tpmi_get_feature_status(auxdev, TPMI_ID_SST, &read_blocked, &write_blocked); if (ret) - dev_info(&auxdev->dev, "Can't read feature status: ignoring read/write blocked status\n"); + dev_info(dev, "Can't read feature status: ignoring read/write blocked status\n"); if (read_blocked) { - dev_info(&auxdev->dev, "Firmware has blocked reads, exiting\n"); + dev_info(dev, "Firmware has blocked reads, exiting\n"); return -ENODEV; } plat_info = tpmi_get_platform_data(auxdev); if (!plat_info) { - dev_err(&auxdev->dev, "No platform info\n"); + dev_err(dev, "No platform info\n"); return -EINVAL; } pkg = plat_info->package_id; if (pkg >= topology_max_packages()) { - dev_err(&auxdev->dev, "Invalid package id :%x\n", pkg); + dev_err(dev, "Invalid package id :%x\n", pkg); return -EINVAL; } @@ -1306,11 +1307,11 @@ int tpmi_sst_dev_add(struct auxiliary_device *auxdev) if (!num_resources) return -EINVAL; - tpmi_sst = devm_kzalloc(&auxdev->dev, sizeof(*tpmi_sst), GFP_KERNEL); + tpmi_sst = devm_kzalloc(dev, sizeof(*tpmi_sst), GFP_KERNEL); if (!tpmi_sst) return -ENOMEM; - tpmi_sst->power_domain_info = devm_kcalloc(&auxdev->dev, num_resources, + tpmi_sst->power_domain_info = devm_kcalloc(dev, num_resources, sizeof(*tpmi_sst->power_domain_info), GFP_KERNEL); if (!tpmi_sst->power_domain_info) @@ -1331,13 +1332,13 @@ int tpmi_sst_dev_add(struct auxiliary_device *auxdev) tpmi_sst->power_domain_info[i].power_domain_id = i; tpmi_sst->power_domain_info[i].auxdev = auxdev; tpmi_sst->power_domain_info[i].write_blocked = write_blocked; - tpmi_sst->power_domain_info[i].sst_base = devm_ioremap_resource(&auxdev->dev, res); + tpmi_sst->power_domain_info[i].sst_base = devm_ioremap_resource(dev, res); if (IS_ERR(tpmi_sst->power_domain_info[i].sst_base)) return PTR_ERR(tpmi_sst->power_domain_info[i].sst_base); ret = sst_main(auxdev, &tpmi_sst->power_domain_info[i]); if (ret) { - devm_iounmap(&auxdev->dev, tpmi_sst->power_domain_info[i].sst_base); + devm_iounmap(dev, tpmi_sst->power_domain_info[i].sst_base); tpmi_sst->power_domain_info[i].sst_base = NULL; continue; } -- Gitee From fcac14bc87cdd78da61faf07f60745e44f7d3234 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Tue, 23 Apr 2024 13:46:15 -0700 Subject: [PATCH 1906/2138] platform/x86: ISST: Shorten the assignments for power_domain_info MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12076 commit fe4211d21fee3672b251f9a535eaf0a0cf0b51e1 upstream. Instead of long lines for assignment to tpmi_sst->power_domain_info, use a local variable pd_info and assign later. Also move the assignment of number of resources after the assignment of pd_info. No functional change is expected. Intel-SIG: commit fe4211d21fee platform/x86: ISST: Shorten the assignments for power_domain_info. Backport intel SST driver update for 6.6 from 6.11 Signed-off-by: Srinivas Pandruvada Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20240423204619.3946901-7-srinivas.pandruvada@linux.intel.com Reviewed-by: Hans de Goede Signed-off-by: Hans de Goede [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4181 --- .../intel/speed_select_if/isst_tpmi_core.c | 33 +++++++++---------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c index 4e09a5611aca..49d573fcbd72 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c @@ -1271,6 +1271,7 @@ static long isst_if_def_ioctl(struct file *file, unsigned int cmd, int tpmi_sst_dev_add(struct auxiliary_device *auxdev) { + struct tpmi_per_power_domain_info *pd_info; bool read_blocked = 0, write_blocked = 0; struct intel_tpmi_plat_info *plat_info; struct device *dev = &auxdev->dev; @@ -1311,35 +1312,31 @@ int tpmi_sst_dev_add(struct auxiliary_device *auxdev) if (!tpmi_sst) return -ENOMEM; - tpmi_sst->power_domain_info = devm_kcalloc(dev, num_resources, - sizeof(*tpmi_sst->power_domain_info), - GFP_KERNEL); - if (!tpmi_sst->power_domain_info) + pd_info = devm_kcalloc(dev, num_resources, sizeof(*pd_info), GFP_KERNEL); + if (!pd_info) return -ENOMEM; - tpmi_sst->number_of_power_domains = num_resources; - for (i = 0; i < num_resources; ++i) { struct resource *res; res = tpmi_get_resource_at_index(auxdev, i); if (!res) { - tpmi_sst->power_domain_info[i].sst_base = NULL; + pd_info[i].sst_base = NULL; continue; } - tpmi_sst->power_domain_info[i].package_id = pkg; - tpmi_sst->power_domain_info[i].power_domain_id = i; - tpmi_sst->power_domain_info[i].auxdev = auxdev; - tpmi_sst->power_domain_info[i].write_blocked = write_blocked; - tpmi_sst->power_domain_info[i].sst_base = devm_ioremap_resource(dev, res); - if (IS_ERR(tpmi_sst->power_domain_info[i].sst_base)) - return PTR_ERR(tpmi_sst->power_domain_info[i].sst_base); + pd_info[i].package_id = pkg; + pd_info[i].power_domain_id = i; + pd_info[i].auxdev = auxdev; + pd_info[i].write_blocked = write_blocked; + pd_info[i].sst_base = devm_ioremap_resource(dev, res); + if (IS_ERR(pd_info[i].sst_base)) + return PTR_ERR(pd_info[i].sst_base); - ret = sst_main(auxdev, &tpmi_sst->power_domain_info[i]); + ret = sst_main(auxdev, &pd_info[i]); if (ret) { - devm_iounmap(dev, tpmi_sst->power_domain_info[i].sst_base); - tpmi_sst->power_domain_info[i].sst_base = NULL; + devm_iounmap(dev, pd_info[i].sst_base); + pd_info[i].sst_base = NULL; continue; } @@ -1350,6 +1347,8 @@ int tpmi_sst_dev_add(struct auxiliary_device *auxdev) return -ENODEV; tpmi_sst->package_id = pkg; + tpmi_sst->power_domain_info = pd_info; + tpmi_sst->number_of_power_domains = num_resources; auxiliary_set_drvdata(auxdev, tpmi_sst); mutex_lock(&isst_tpmi_dev_lock); -- Gitee From 72fc02f5f0390c642a18394b3730f848ed419f16 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Tue, 23 Apr 2024 13:46:16 -0700 Subject: [PATCH 1907/2138] platform/x86: ISST: Support partitioned systems MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12076 commit 9d1d36268f3d8276aefd1fad4e0a415dc8c36edd upstream. A partitioned system has two different PCI VSEC devices per package. A non-partitioned device has only one PCI VSEC device per package. The current implementation only supports non partitioned systems. Each partition maps a set of power domains. Other than reading from different MMIO regions, there is no change in the SST functionality. The scope of SST control is still per power domain. Hence user space does not need to be aware of existence of partitions. With partitions, existing per package information defined using struct tpmi_sst_struct is enhanced to store information for both partitions. A mapping function map_partition_power_domain_id() is introduced, which maps to correct partition and index. This mapping function is called in get_instance() and isst_if_clos_assoc(), before indexing into tpmi_sst_struct->power_domain_info[]. The TPMI core platform info provides partition id and compute die ID mask for each partition. Use this information to order power domains, so that compute dies are presented before IO dies to match hardware defined compute die ID for each CPU. Intel-SIG: commit 9d1d36268f3d platform/x86: ISST: Support partitioned systems. Backport intel SST driver update for 6.6 from 6.11 Signed-off-by: Srinivas Pandruvada Reviewed-by: Zhang Rui Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20240423204619.3946901-8-srinivas.pandruvada@linux.intel.com Signed-off-by: Hans de Goede [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4181 --- .../intel/speed_select_if/isst_tpmi_core.c | 299 ++++++++++++++++-- 1 file changed, 267 insertions(+), 32 deletions(-) diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c index 49d573fcbd72..b8da6847622b 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -263,20 +264,33 @@ struct tpmi_per_power_domain_info { bool write_blocked; }; +/* Supported maximum partitions */ +#define SST_MAX_PARTITIONS 2 + /** * struct tpmi_sst_struct - Store sst info for a package * @package_id: Package id for this aux device instance * @number_of_power_domains: Number of power_domains pointed by power_domain_info pointer * @power_domain_info: Pointer to power domains information + * @cdie_mask: Mask of compute dies present in a partition from hardware. + * This mask is not present in the version 1 information header. + * @io_dies: Number of IO dies in a partition. This will be 0 for TPMI + * version 1 information header. + * @partition_mask: Mask of all partitions. + * @partition_mask_current: Current partition mask as some may have been unbound. * * This structure is used store full SST information for a package. - * Each package has a unique OOB PCI device, which enumerates TPMI. - * Each Package will have multiple power_domains. + * Each package has one or multiple OOB PCI devices. Each package can contain multiple + * power domains. */ struct tpmi_sst_struct { int package_id; - int number_of_power_domains; - struct tpmi_per_power_domain_info *power_domain_info; + struct tpmi_per_power_domain_info *power_domain_info[SST_MAX_PARTITIONS]; + u16 cdie_mask[SST_MAX_PARTITIONS]; + u8 number_of_power_domains[SST_MAX_PARTITIONS]; + u8 io_dies[SST_MAX_PARTITIONS]; + u8 partition_mask; + u8 partition_mask_current; }; /** @@ -387,6 +401,126 @@ static int sst_main(struct auxiliary_device *auxdev, struct tpmi_per_power_domai return 0; } +static u8 isst_instance_count(struct tpmi_sst_struct *sst_inst) +{ + u8 i, max_part, count = 0; + + /* Partition mask starts from bit 0 and contains 1s only */ + max_part = hweight8(sst_inst->partition_mask); + for (i = 0; i < max_part; i++) + count += sst_inst->number_of_power_domains[i]; + + return count; +} + +/** + * map_cdies() - Map user domain ID to compute domain ID + * @sst_inst: TPMI Instance + * @id: User domain ID + * @partition: Resolved partition + * + * Helper function to map_partition_power_domain_id() to resolve compute + * domain ID and partition. Use hardware provided cdie_mask for a partition + * as is to resolve a compute domain ID. + * + * Return: %-EINVAL on error, otherwise mapped domain ID >= 0. + */ +static int map_cdies(struct tpmi_sst_struct *sst_inst, u8 id, u8 *partition) +{ + u8 i, max_part; + + max_part = hweight8(sst_inst->partition_mask); + for (i = 0; i < max_part; i++) { + if (!(sst_inst->cdie_mask[i] & BIT(id))) + continue; + + *partition = i; + return id - ffs(sst_inst->cdie_mask[i]) + 1; + } + + return -EINVAL; +} + +/** + * map_partition_power_domain_id() - Map user domain ID to partition domain ID + * @sst_inst: TPMI Instance + * @id: User domain ID + * @partition: Resolved partition + * + * In a partitioned system a CPU package has two separate MMIO ranges (Under + * two PCI devices). But the CPU package compute die/power domain IDs are + * unique in a package. User space can get compute die/power domain ID from + * CPUID and MSR 0x54 for a CPU. So, those IDs need to be preserved even if + * they are present in two different partitions with its own order. + * + * For example for command ISST_IF_COUNT_TPMI_INSTANCES, the valid_mask + * is 111111b for a 4 compute and 2 IO dies system. This is presented as + * provided by the hardware in a non-partitioned system with the following + * order: + * I1-I0-C3-C2-C1-C0 + * Here: "C": for compute and "I" for IO die. + * Compute dies are always present first in TPMI instances, as they have + * to map to the real power domain/die ID of a system. In a non-partitioned + * system there is no way to identify compute and IO die boundaries from + * this driver without reading each CPU's mapping. + * + * The same order needs to be preserved, even if those compute dies are + * distributed among multiple partitions. For example: + * Partition 1 can contain: I1-C1-C0 + * Partition 2 can contain: I2-C3-C2 + * + * This will require a conversion of user space IDs to the actual index into + * array of stored power domains for each partition. For the above example + * this function will return partition and index as follows: + * + * ============= ========= ===== ======== + * User space ID Partition Index Die type + * ============= ========= ===== ======== + * 0 0 0 Compute + * 1 0 1 Compute + * 2 1 0 Compute + * 3 1 1 Compute + * 4 0 2 IO + * 5 1 2 IO + * ============= ========= ===== ======== + * + * Return: %-EINVAL on error, otherwise mapped domain ID >= 0. + */ +static int map_partition_power_domain_id(struct tpmi_sst_struct *sst_inst, u8 id, u8 *partition) +{ + u8 i, io_start_id, max_part; + + *partition = 0; + + /* If any PCI device for partition is unbound, treat this as failure */ + if (sst_inst->partition_mask != sst_inst->partition_mask_current) + return -EINVAL; + + max_part = hweight8(sst_inst->partition_mask); + + /* IO Index begin here */ + io_start_id = fls(sst_inst->cdie_mask[max_part - 1]); + + if (id < io_start_id) + return map_cdies(sst_inst, id, partition); + + for (i = 0; i < max_part; i++) { + u8 io_id; + + io_id = id - io_start_id; + if (io_id < sst_inst->io_dies[i]) { + u8 cdie_range; + + cdie_range = fls(sst_inst->cdie_mask[i]) - ffs(sst_inst->cdie_mask[i]) + 1; + *partition = i; + return cdie_range + io_id; + } + io_start_id += sst_inst->io_dies[i]; + } + + return -EINVAL; +} + /* * Map a package and power_domain id to SST information structure unique for a power_domain. * The caller should call under isst_tpmi_dev_lock. @@ -395,6 +529,7 @@ static struct tpmi_per_power_domain_info *get_instance(int pkg_id, int power_dom { struct tpmi_per_power_domain_info *power_domain_info; struct tpmi_sst_struct *sst_inst; + u8 part; if (pkg_id < 0 || pkg_id > isst_common.max_index || pkg_id >= topology_max_packages()) @@ -404,10 +539,11 @@ static struct tpmi_per_power_domain_info *get_instance(int pkg_id, int power_dom if (!sst_inst) return NULL; - if (power_domain_id < 0 || power_domain_id >= sst_inst->number_of_power_domains) + power_domain_id = map_partition_power_domain_id(sst_inst, power_domain_id, &part); + if (power_domain_id < 0) return NULL; - power_domain_info = &sst_inst->power_domain_info[power_domain_id]; + power_domain_info = &sst_inst->power_domain_info[part][power_domain_id]; if (power_domain_info && !power_domain_info->sst_base) return NULL; @@ -579,6 +715,7 @@ static long isst_if_clos_assoc(void __user *argp) struct tpmi_sst_struct *sst_inst; int offset, shift, cpu; u64 val, mask, clos; + u8 part; if (copy_from_user(&clos_assoc, ptr, sizeof(clos_assoc))) return -EFAULT; @@ -602,10 +739,11 @@ static long isst_if_clos_assoc(void __user *argp) sst_inst = isst_common.sst_inst[pkg_id]; - if (clos_assoc.power_domain_id > sst_inst->number_of_power_domains) + punit_id = map_partition_power_domain_id(sst_inst, punit_id, &part); + if (punit_id < 0) return -EINVAL; - power_domain_info = &sst_inst->power_domain_info[punit_id]; + power_domain_info = &sst_inst->power_domain_info[part][punit_id]; if (assoc_cmds.get_set && power_domain_info->write_blocked) return -EPERM; @@ -1134,18 +1272,28 @@ static int isst_if_get_tpmi_instance_count(void __user *argp) if (tpmi_inst.socket_id >= topology_max_packages()) return -EINVAL; - tpmi_inst.count = isst_common.sst_inst[tpmi_inst.socket_id]->number_of_power_domains; - sst_inst = isst_common.sst_inst[tpmi_inst.socket_id]; + + tpmi_inst.count = isst_instance_count(sst_inst); + tpmi_inst.valid_mask = 0; - for (i = 0; i < sst_inst->number_of_power_domains; ++i) { + for (i = 0; i < tpmi_inst.count; i++) { struct tpmi_per_power_domain_info *pd_info; + u8 part; + int pd; + + pd = map_partition_power_domain_id(sst_inst, i, &part); + if (pd < 0) + continue; - pd_info = &sst_inst->power_domain_info[i]; + pd_info = &sst_inst->power_domain_info[part][pd]; if (pd_info->sst_base) tpmi_inst.valid_mask |= BIT(i); } + if (!tpmi_inst.valid_mask) + tpmi_inst.count = 0; + if (copy_to_user(argp, &tpmi_inst, sizeof(tpmi_inst))) return -EFAULT; @@ -1276,8 +1424,11 @@ int tpmi_sst_dev_add(struct auxiliary_device *auxdev) struct intel_tpmi_plat_info *plat_info; struct device *dev = &auxdev->dev; struct tpmi_sst_struct *tpmi_sst; - int i, ret, pkg = 0, inst = 0; - int num_resources; + u8 i, num_resources, io_die_cnt; + int ret, pkg = 0, inst = 0; + bool first_enum = false; + u16 cdie_mask; + u8 partition; ret = tpmi_get_feature_status(auxdev, TPMI_ID_SST, &read_blocked, &write_blocked); if (ret) @@ -1300,21 +1451,59 @@ int tpmi_sst_dev_add(struct auxiliary_device *auxdev) return -EINVAL; } - if (isst_common.sst_inst[pkg]) - return -EEXIST; + partition = plat_info->partition; + if (partition >= SST_MAX_PARTITIONS) { + dev_err(&auxdev->dev, "Invalid partition :%x\n", partition); + return -EINVAL; + } num_resources = tpmi_get_resource_count(auxdev); if (!num_resources) return -EINVAL; - tpmi_sst = devm_kzalloc(dev, sizeof(*tpmi_sst), GFP_KERNEL); - if (!tpmi_sst) - return -ENOMEM; + mutex_lock(&isst_tpmi_dev_lock); + + if (isst_common.sst_inst[pkg]) { + tpmi_sst = isst_common.sst_inst[pkg]; + } else { + /* + * tpmi_sst instance is for a package. So needs to be + * allocated only once for both partitions. We can't use + * devm_* allocation here as each partition is a + * different device, which can be unbound. + */ + tpmi_sst = kzalloc(sizeof(*tpmi_sst), GFP_KERNEL); + if (!tpmi_sst) { + ret = -ENOMEM; + goto unlock_exit; + } + first_enum = true; + } + + ret = 0; pd_info = devm_kcalloc(dev, num_resources, sizeof(*pd_info), GFP_KERNEL); - if (!pd_info) - return -ENOMEM; + if (!pd_info) { + ret = -ENOMEM; + goto unlock_free; + } + + /* Get the IO die count, if cdie_mask is present */ + if (plat_info->cdie_mask) { + u8 cdie_range; + + cdie_mask = plat_info->cdie_mask; + cdie_range = fls(cdie_mask) - ffs(cdie_mask) + 1; + io_die_cnt = num_resources - cdie_range; + } else { + /* + * This is a synthetic mask, careful when assuming that + * they are compute dies only. + */ + cdie_mask = (1 << num_resources) - 1; + io_die_cnt = 0; + } for (i = 0; i < num_resources; ++i) { struct resource *res; @@ -1330,11 +1519,20 @@ int tpmi_sst_dev_add(struct auxiliary_device *auxdev) pd_info[i].auxdev = auxdev; pd_info[i].write_blocked = write_blocked; pd_info[i].sst_base = devm_ioremap_resource(dev, res); - if (IS_ERR(pd_info[i].sst_base)) - return PTR_ERR(pd_info[i].sst_base); + if (IS_ERR(pd_info[i].sst_base)) { + ret = PTR_ERR(pd_info[i].sst_base); + goto unlock_free; + } ret = sst_main(auxdev, &pd_info[i]); if (ret) { + /* + * This entry is not valid, hardware can partially + * populate dies. In this case MMIO will have 0xFFs. + * Also possible some pre-production hardware has + * invalid data. But don't fail and continue to use + * other dies with valid data. + */ devm_iounmap(dev, pd_info[i].sst_base); pd_info[i].sst_base = NULL; continue; @@ -1343,30 +1541,53 @@ int tpmi_sst_dev_add(struct auxiliary_device *auxdev) ++inst; } - if (!inst) - return -ENODEV; + if (!inst) { + ret = -ENODEV; + goto unlock_free; + } tpmi_sst->package_id = pkg; - tpmi_sst->power_domain_info = pd_info; - tpmi_sst->number_of_power_domains = num_resources; + + tpmi_sst->power_domain_info[partition] = pd_info; + tpmi_sst->number_of_power_domains[partition] = num_resources; + tpmi_sst->cdie_mask[partition] = cdie_mask; + tpmi_sst->io_dies[partition] = io_die_cnt; + tpmi_sst->partition_mask |= BIT(partition); + tpmi_sst->partition_mask_current |= BIT(partition); + auxiliary_set_drvdata(auxdev, tpmi_sst); - mutex_lock(&isst_tpmi_dev_lock); if (isst_common.max_index < pkg) isst_common.max_index = pkg; isst_common.sst_inst[pkg] = tpmi_sst; + +unlock_free: + if (ret && first_enum) + kfree(tpmi_sst); +unlock_exit: mutex_unlock(&isst_tpmi_dev_lock); - return 0; + return ret; } EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_add, INTEL_TPMI_SST); void tpmi_sst_dev_remove(struct auxiliary_device *auxdev) { struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev); + struct intel_tpmi_plat_info *plat_info; + + plat_info = tpmi_get_platform_data(auxdev); + if (!plat_info) + return; mutex_lock(&isst_tpmi_dev_lock); - isst_common.sst_inst[tpmi_sst->package_id] = NULL; + tpmi_sst->power_domain_info[plat_info->partition] = NULL; + tpmi_sst->partition_mask_current &= ~BIT(plat_info->partition); + /* Free the package instance when the all partitions are removed */ + if (!tpmi_sst->partition_mask_current) { + kfree(tpmi_sst); + isst_common.sst_inst[tpmi_sst->package_id] = NULL; + } mutex_unlock(&isst_tpmi_dev_lock); } EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_remove, INTEL_TPMI_SST); @@ -1374,9 +1595,16 @@ EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_remove, INTEL_TPMI_SST); void tpmi_sst_dev_suspend(struct auxiliary_device *auxdev) { struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev); - struct tpmi_per_power_domain_info *power_domain_info = tpmi_sst->power_domain_info; + struct tpmi_per_power_domain_info *power_domain_info; + struct intel_tpmi_plat_info *plat_info; void __iomem *cp_base; + plat_info = tpmi_get_platform_data(auxdev); + if (!plat_info) + return; + + power_domain_info = tpmi_sst->power_domain_info[plat_info->partition]; + cp_base = power_domain_info->sst_base + power_domain_info->sst_header.cp_offset; power_domain_info->saved_sst_cp_control = readq(cp_base + SST_CP_CONTROL_OFFSET); @@ -1395,9 +1623,16 @@ EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_suspend, INTEL_TPMI_SST); void tpmi_sst_dev_resume(struct auxiliary_device *auxdev) { struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev); - struct tpmi_per_power_domain_info *power_domain_info = tpmi_sst->power_domain_info; + struct tpmi_per_power_domain_info *power_domain_info; + struct intel_tpmi_plat_info *plat_info; void __iomem *cp_base; + plat_info = tpmi_get_platform_data(auxdev); + if (!plat_info) + return; + + power_domain_info = tpmi_sst->power_domain_info[plat_info->partition]; + cp_base = power_domain_info->sst_base + power_domain_info->sst_header.cp_offset; writeq(power_domain_info->saved_sst_cp_control, cp_base + SST_CP_CONTROL_OFFSET); -- Gitee From f1339b6a3c4c3277a51933c3cd871e5013e41970 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Tue, 23 Apr 2024 13:46:17 -0700 Subject: [PATCH 1908/2138] platform/x86: ISST: Use in_range() to check package ID validity MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12076 commit e4e365b43460f9b2421164b6b661d138f87edad3 upstream. Use in_range() macro to simplify range check. No functional impact is expected. Intel-SIG: commit e4e365b43460 platform/x86: ISST: Use in_range() to check package ID validity. Backport Intel SST driver for 6.6 from 6.11 Signed-off-by: Srinivas Pandruvada Suggested-by: Andy Shevchenko Reviewed-by: Andy Shevchenko Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20240423204619.3946901-9-srinivas.pandruvada@linux.intel.com Reviewed-by: Hans de Goede Signed-off-by: Hans de Goede [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4181 --- drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c index b8da6847622b..e75fb9eba598 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c @@ -531,8 +531,7 @@ static struct tpmi_per_power_domain_info *get_instance(int pkg_id, int power_dom struct tpmi_sst_struct *sst_inst; u8 part; - if (pkg_id < 0 || pkg_id > isst_common.max_index || - pkg_id >= topology_max_packages()) + if (!in_range(pkg_id, 0, topology_max_packages()) || pkg_id > isst_common.max_index) return NULL; sst_inst = isst_common.sst_inst[pkg_id]; -- Gitee From 2910616742adecf9a57fe8986346c8b1bebc7fdc Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Tue, 23 Apr 2024 13:46:18 -0700 Subject: [PATCH 1909/2138] platform/x86: ISST: Add dev_fmt ANBZ: #12076 commit afad97495836774e882b475d6569df5b9e95e73e upstream. Add dev_fmt for formatting log messages. No functional impact is expected. Intel-SIG: commit afad97495836 platform/x86: ISST: Add dev_fmt. Backport intel SST driver update for 6.6 from 6.11 Signed-off-by: Srinivas Pandruvada Suggested-by: Andy Shevchenko Reviewed-by: Andy Shevchenko Link: https://lore.kernel.org/r/20240423204619.3946901-10-srinivas.pandruvada@linux.intel.com Reviewed-by: Hans de Goede Signed-off-by: Hans de Goede [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4181 --- drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c index e75fb9eba598..039333eac71a 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c @@ -17,6 +17,8 @@ * the hardware mapping. */ +#define dev_fmt(fmt) "tpmi_sst: " fmt + #include #include #include -- Gitee From 024de9a44063ae53ce1db5abfee9e12fe2a2699c Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Tue, 23 Apr 2024 13:46:19 -0700 Subject: [PATCH 1910/2138] platform/x86: ISST: Add missing MODULE_DESCRIPTION ANBZ: #12076 commit 05857e1f119e8f5300d0ae997594dacf5693a05c upstream. Add missing MODULE_DESCRIPTION() to ISST modules. Intel-SIG: commit 05857e1f119e platform/x86: ISST: Add missing MODULE_DESCRIPTION. Backport intel SST driver update for 6.6 from 6.11 Signed-off-by: Srinivas Pandruvada Reviewed-by: Andy Shevchenko Link: https://lore.kernel.org/r/20240423204619.3946901-11-srinivas.pandruvada@linux.intel.com Reviewed-by: Hans de Goede Signed-off-by: Hans de Goede [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4181 --- drivers/platform/x86/intel/speed_select_if/isst_if_common.c | 1 + drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c index 9040a3d39924..857cc8697942 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c @@ -841,4 +841,5 @@ void isst_if_cdev_unregister(int device_type) } EXPORT_SYMBOL_GPL(isst_if_cdev_unregister); +MODULE_DESCRIPTION("ISST common interface module"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c index 039333eac71a..6bcbb97b0101 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c @@ -1705,4 +1705,5 @@ EXPORT_SYMBOL_NS_GPL(tpmi_sst_exit, INTEL_TPMI_SST); MODULE_IMPORT_NS(INTEL_TPMI); MODULE_IMPORT_NS(INTEL_TPMI_POWER_DOMAIN); +MODULE_DESCRIPTION("ISST TPMI interface module"); MODULE_LICENSE("GPL"); -- Gitee From f8d6b7702362abca473e524ca4c12b4c6ae3b9d9 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Tue, 30 Apr 2024 15:10:52 -0700 Subject: [PATCH 1911/2138] platform/x86: ISST: Support SST-BF and SST-TF per level ANBZ: #12076 commit 76f09e22027fc0dbec1e9c82898d9059b4455df6 upstream. SST SST-BF and SST-TF can be enabled/disabled per SST-PP level. So return a mask of all levels, where the feature is supported, instead of just for level 0. Since the return value returns all levels mask, not just level 0, update API version. Intel-SIG: commit 76f09e22027f platform/x86: ISST: Support SST-BF and SST-TF per level. Backport intel SST driver update for 6.6 from 6.11 Signed-off-by: Srinivas Pandruvada Reviewed-by: Zhang Rui Link: https://lore.kernel.org/r/20240430221052.15825-1-srinivas.pandruvada@linux.intel.com Reviewed-by: Hans de Goede Signed-off-by: Hans de Goede [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4181 --- .../intel/speed_select_if/isst_tpmi_core.c | 38 +++++++++++++++---- 1 file changed, 31 insertions(+), 7 deletions(-) diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c index 6bcbb97b0101..7bac7841ff0a 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c @@ -847,6 +847,8 @@ static int isst_if_get_perf_level(void __user *argp) { struct isst_perf_level_info perf_level; struct tpmi_per_power_domain_info *power_domain_info; + unsigned long level_mask; + u8 level, support; if (copy_from_user(&perf_level, argp, sizeof(perf_level))) return -EFAULT; @@ -866,12 +868,34 @@ static int isst_if_get_perf_level(void __user *argp) SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH, SST_MUL_FACTOR_NONE) perf_level.enabled = !!(power_domain_info->sst_header.cap_mask & BIT(1)); - _read_bf_level_info("bf_support", perf_level.sst_bf_support, 0, 0, - SST_BF_FEATURE_SUPPORTED_START, SST_BF_FEATURE_SUPPORTED_WIDTH, - SST_MUL_FACTOR_NONE); - _read_tf_level_info("tf_support", perf_level.sst_tf_support, 0, 0, - SST_TF_FEATURE_SUPPORTED_START, SST_TF_FEATURE_SUPPORTED_WIDTH, - SST_MUL_FACTOR_NONE); + level_mask = perf_level.level_mask; + perf_level.sst_bf_support = 0; + for_each_set_bit(level, &level_mask, BITS_PER_BYTE) { + /* + * Read BF support for a level. Read output is updated + * to "support" variable by the below macro. + */ + _read_bf_level_info("bf_support", support, level, 0, SST_BF_FEATURE_SUPPORTED_START, + SST_BF_FEATURE_SUPPORTED_WIDTH, SST_MUL_FACTOR_NONE); + + /* If supported set the bit for the level */ + if (support) + perf_level.sst_bf_support |= BIT(level); + } + + perf_level.sst_tf_support = 0; + for_each_set_bit(level, &level_mask, BITS_PER_BYTE) { + /* + * Read TF support for a level. Read output is updated + * to "support" variable by the below macro. + */ + _read_tf_level_info("tf_support", support, level, 0, SST_TF_FEATURE_SUPPORTED_START, + SST_TF_FEATURE_SUPPORTED_WIDTH, SST_MUL_FACTOR_NONE); + + /* If supported set the bit for the level */ + if (support) + perf_level.sst_tf_support |= BIT(level); + } if (copy_to_user(argp, &perf_level, sizeof(perf_level))) return -EFAULT; @@ -1648,7 +1672,7 @@ void tpmi_sst_dev_resume(struct auxiliary_device *auxdev) } EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_resume, INTEL_TPMI_SST); -#define ISST_TPMI_API_VERSION 0x02 +#define ISST_TPMI_API_VERSION 0x03 int tpmi_sst_init(void) { -- Gitee From 8a8dd0bfefd855534addda21492c2d71d3ec9d2a Mon Sep 17 00:00:00 2001 From: Harshit Mogalapalli Date: Fri, 17 May 2024 07:49:46 -0700 Subject: [PATCH 1912/2138] platform/x86: ISST: fix use-after-free in tpmi_sst_dev_remove() ANBZ: #12076 commit a4edf675ba3357f60e2ee310acc15eb9cd5a8ae0 upstream. In tpmi_sst_dev_remove(), tpmi_sst is dereferenced after being freed. Fix this by reordering the kfree() post the dereference. Intel-SIG: commit a4edf675ba33 platform/x86: ISST: fix use-after-free in tpmi_sst_dev_remove(). Backport intel SST driver update for 6.6 from 6.11 Fixes: 9d1d36268f3d ("platform/x86: ISST: Support partitioned systems") Signed-off-by: Harshit Mogalapalli Reviewed-by: Hans de Goede Acked-by: Srinivas Pandruvada Link: https://lore.kernel.org/r/20240517144946.289615-1-harshit.m.mogalapalli@oracle.com Signed-off-by: Hans de Goede [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4181 --- drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c index 7bac7841ff0a..7fa360073f6e 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c @@ -1610,8 +1610,8 @@ void tpmi_sst_dev_remove(struct auxiliary_device *auxdev) tpmi_sst->partition_mask_current &= ~BIT(plat_info->partition); /* Free the package instance when the all partitions are removed */ if (!tpmi_sst->partition_mask_current) { - kfree(tpmi_sst); isst_common.sst_inst[tpmi_sst->package_id] = NULL; + kfree(tpmi_sst); } mutex_unlock(&isst_tpmi_dev_lock); } -- Gitee From c38c468b628fa22bc605428db9e7202dc9aa0203 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Fri, 31 May 2024 01:35:46 -0700 Subject: [PATCH 1913/2138] platform/x86: ISST: Add model specific loading for common module MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12076 commit 1630dc626c87b300627fe7591f4f63f8f136f935 upstream. SST common module is loaded when model specific or TPMI SST driver registers for services. There are model specific features used in SST common modules which are checked with a CPU model list. So, this module is model specific. There are some use cases where loading the common module independently only on the supported CPU models helps. The first use case is for preventing SST TPMI module loading if the model specific features are not implemented. The second use case for presenting information to user space when SST is used in OOB (Out of Band) mode. 1. With TPMI, SST interface is architectural. This means that no need to add new PCI device IDs for new CPU models. This means that there can be lag in adding CPU models for the model specific features in the common module. For example, before adding CPU model to GRANITERAPIDS_D to hpm_cpu_ids[], SST is still functional for some features and but will get/set wrong data for features like SST-CP. This is because IOCTL ISST_IF_GET_PHY_ID, will not give correct mapping for newer CPU models. So adding explicit model check during load time will prevent such cases. For unsupported CPU models, common driver will fail to load and hence dependent modules will not be loaded. 2. When the SST TPMI features are controlled by some OOB agent (not from OS interface), even if the CPU model is supported, there will be no user space interface available for tools as SST TPMI modules will not be loaded. User space interface is registered when TPMI modules call isst_if_cdev_register(). Even in this case user space orchestrator software needs to get power domain information to schedule workload and get/set turbo ratio limits. This information is exposed by the common module using IOCTLs ISST_IF_GET_PHY_ID and ISST_IF_MSR_COMMAND respectively. Since the user space MSR access can be locked, direct MSR access from the user space is not an option using /dev/cpu/*/msr. Converge all the existing model checks to one common place and use driver data to differentiate. On successful model check call isst_misc_reg(). Intel-SIG: commit 1630dc626c87 platform/x86: ISST: Add model specific loading for common module. Backport intel SST driver update for 6.6 from 6.11 Signed-off-by: Srinivas Pandruvada Reviewed-by: Zhang Rui Link: https://lore.kernel.org/r/20240531083554.1313110-2-srinivas.pandruvada@linux.intel.com Reviewed-by: Ilpo Järvinen Signed-off-by: Ilpo Järvinen [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4181 --- .../intel/speed_select_if/isst_if_common.c | 63 +++++++++++-------- 1 file changed, 37 insertions(+), 26 deletions(-) diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c index 857cc8697942..4ae2921f5c45 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c @@ -720,14 +720,6 @@ static struct miscdevice isst_if_char_driver = { .fops = &isst_if_char_driver_ops, }; -static const struct x86_cpu_id hpm_cpu_ids[] = { - X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D, NULL), - X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, NULL), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, NULL), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, NULL), - {} -}; - static int isst_misc_reg(void) { mutex_lock(&punit_misc_dev_reg_lock); @@ -735,12 +727,6 @@ static int isst_misc_reg(void) goto unlock_exit; if (!misc_usage_count) { - const struct x86_cpu_id *id; - - id = x86_match_cpu(hpm_cpu_ids); - if (id) - isst_hpm_support = true; - misc_device_ret = isst_if_cpu_info_init(); if (misc_device_ret) goto unlock_exit; @@ -788,8 +774,6 @@ static void isst_misc_unreg(void) */ int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb) { - int ret; - if (device_type >= ISST_IF_DEV_MAX) return -EINVAL; @@ -807,15 +791,6 @@ int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb) punit_callbacks[device_type].registered = 1; mutex_unlock(&punit_misc_dev_open_lock); - ret = isst_misc_reg(); - if (ret) { - /* - * No need of mutex as the misc device register failed - * as no one can open device yet. Hence no contention. - */ - punit_callbacks[device_type].registered = 0; - return ret; - } return 0; } EXPORT_SYMBOL_GPL(isst_if_cdev_register); @@ -831,7 +806,6 @@ EXPORT_SYMBOL_GPL(isst_if_cdev_register); */ void isst_if_cdev_unregister(int device_type) { - isst_misc_unreg(); mutex_lock(&punit_misc_dev_open_lock); punit_callbacks[device_type].def_ioctl = NULL; punit_callbacks[device_type].registered = 0; @@ -841,5 +815,42 @@ void isst_if_cdev_unregister(int device_type) } EXPORT_SYMBOL_GPL(isst_if_cdev_unregister); +#define SST_HPM_SUPPORTED 0x01 + +static const struct x86_cpu_id isst_cpu_ids[] = { + X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, SST_HPM_SUPPORTED), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, SST_HPM_SUPPORTED), + X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, 0), + X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D, SST_HPM_SUPPORTED), + X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, SST_HPM_SUPPORTED), + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, 0), + X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, 0), + X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, 0), + X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, 0), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, isst_cpu_ids); + +static int __init isst_if_common_init(void) +{ + const struct x86_cpu_id *id; + + id = x86_match_cpu(isst_cpu_ids); + if (!id) + return -ENODEV; + + if (id->driver_data == SST_HPM_SUPPORTED) + isst_hpm_support = true; + + return isst_misc_reg(); +} +module_init(isst_if_common_init) + +static void __exit isst_if_common_exit(void) +{ + isst_misc_unreg(); +} +module_exit(isst_if_common_exit) + MODULE_DESCRIPTION("ISST common interface module"); MODULE_LICENSE("GPL v2"); -- Gitee From 9b8a505244635276e9bc83252722ab72b8878545 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Fri, 31 May 2024 01:35:47 -0700 Subject: [PATCH 1914/2138] platform/x86: ISST: Avoid some SkyLake server models MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12076 commit 3ea025fb4b5f1a0b66df25eba50b2a1071f01080 upstream. Some SkyLake server variants don't support any SST functionality. No use of providing any SST related interfaces on them. All supported SkyLake servers provide mailbox interface via MSR. So check for the presence of MSR 0xB0 and 0xB1. If not present don't load common module. Move defines for MSR_OS_MAILBOX_INTERFACE and MSR_OS_MAILBOX_DATA to common header file to avoid duplicating them. Intel-SIG: commit 3ea025fb4b5f platform/x86: ISST: Avoid some SkyLake server models. Backport intel SST driver update for 6.6 from 6.11 Signed-off-by: Srinivas Pandruvada Link: https://lore.kernel.org/r/20240531083554.1313110-3-srinivas.pandruvada@linux.intel.com Reviewed-by: Ilpo Järvinen Signed-off-by: Ilpo Järvinen [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4181 --- .../x86/intel/speed_select_if/isst_if_common.c | 13 +++++++++++-- .../x86/intel/speed_select_if/isst_if_common.h | 3 +++ .../x86/intel/speed_select_if/isst_if_mbox_msr.c | 2 -- 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c index 4ae2921f5c45..4e4929c66e80 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c @@ -816,6 +816,7 @@ void isst_if_cdev_unregister(int device_type) EXPORT_SYMBOL_GPL(isst_if_cdev_unregister); #define SST_HPM_SUPPORTED 0x01 +#define SST_MBOX_SUPPORTED 0x02 static const struct x86_cpu_id isst_cpu_ids[] = { X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, SST_HPM_SUPPORTED), @@ -826,7 +827,7 @@ static const struct x86_cpu_id isst_cpu_ids[] = { X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, 0), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, 0), X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, 0), - X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, 0), + X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, SST_MBOX_SUPPORTED), {} }; MODULE_DEVICE_TABLE(x86cpu, isst_cpu_ids); @@ -839,8 +840,16 @@ static int __init isst_if_common_init(void) if (!id) return -ENODEV; - if (id->driver_data == SST_HPM_SUPPORTED) + if (id->driver_data == SST_HPM_SUPPORTED) { isst_hpm_support = true; + } else if (id->driver_data == SST_MBOX_SUPPORTED) { + u64 data; + + /* Can fail only on some Skylake-X generations */ + if (rdmsrl_safe(MSR_OS_MAILBOX_INTERFACE, &data) || + rdmsrl_safe(MSR_OS_MAILBOX_DATA, &data)) + return -ENODEV; + } return isst_misc_reg(); } diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.h b/drivers/platform/x86/intel/speed_select_if/isst_if_common.h index 1004f2c9cca8..378055fe1d16 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.h +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.h @@ -16,6 +16,9 @@ #define PCI_DEVICE_ID_INTEL_RAPL_PRIO_DEVID_1 0x3251 #define PCI_DEVICE_ID_INTEL_CFG_MBOX_DEVID_1 0x3259 +#define MSR_OS_MAILBOX_INTERFACE 0xB0 +#define MSR_OS_MAILBOX_DATA 0xB1 + /* * Validate maximum commands in a single request. * This is enough to handle command to every core in one ioctl, or all diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c index 1b6eab071068..48b608eaca5f 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_mbox_msr.c @@ -21,8 +21,6 @@ #include "isst_if_common.h" -#define MSR_OS_MAILBOX_INTERFACE 0xB0 -#define MSR_OS_MAILBOX_DATA 0xB1 #define MSR_OS_MAILBOX_BUSY_BIT 31 /* -- Gitee From 229620e43a8654e2415ea9a3f2185bb714aff6a0 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Fri, 31 May 2024 01:35:48 -0700 Subject: [PATCH 1915/2138] platform/x86: ISST: Use only TPMI interface when present MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12076 commit 2f9514f005530502452c34295e77bdfb395b5bc6 upstream. When the TPMI interface is present, use this interface instead of legacy. On some systems legacy IO device is also present. Using both interfaces together is confusing and may set the hardware in inconsistent state. When TPMI interface is present, don't load legacy drivers. Intel-SIG: commit 2f9514f00553 platform/x86: ISST: Use only TPMI interface when present. Backport intel SST driver update for 6.6 from 6.11 Signed-off-by: Srinivas Pandruvada Reviewed-by: Andy Shevchenko Reviewed-by: Zhang Rui Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20240531083554.1313110-4-srinivas.pandruvada@linux.intel.com Signed-off-by: Ilpo Järvinen [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4181 --- drivers/platform/x86/intel/speed_select_if/isst_if_common.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c index 4e4929c66e80..8a691595a6fb 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c @@ -777,6 +777,9 @@ int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb) if (device_type >= ISST_IF_DEV_MAX) return -EINVAL; + if (device_type < ISST_IF_DEV_TPMI && isst_hpm_support) + return -ENODEV; + mutex_lock(&punit_misc_dev_open_lock); /* Device is already open, we don't want to add new callbacks */ if (misc_device_open) { -- Gitee From 8862c7c84f0dfebca53ca017661277c6619cc04b Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Fri, 16 Aug 2024 09:36:26 -0700 Subject: [PATCH 1916/2138] platform/x86: ISST: Fix return value on last invalid resource MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12076 commit 46ee21e9f59205e54943dfe51b2dc8a9352ca37d upstream. When only the last resource is invalid, tpmi_sst_dev_add() is returing error even if there are other valid resources before. This function should return error when there are no valid resources. Here tpmi_sst_dev_add() is returning "ret" variable. But this "ret" variable contains the failure status of last call to sst_main(), which failed for the invalid resource. But there may be other valid resources before the last entry. To address this, do not update "ret" variable for sst_main() return status. If there are no valid resources, it is already checked for by !inst below the loop and -ENODEV is returned. Intel-SIG: commit 46ee21e9f592 platform/x86: ISST: Fix return value on last invalid resource. Backport intel SST driver update for 6.6 from 6.11 Fixes: 9d1d36268f3d ("platform/x86: ISST: Support partitioned systems") Signed-off-by: Srinivas Pandruvada Cc: stable@vger.kernel.org # 6.10+ Link: https://lore.kernel.org/r/20240816163626.415762-1-srinivas.pandruvada@linux.intel.com Reviewed-by: Ilpo Järvinen Signed-off-by: Ilpo Järvinen [ Yingbao Jia: amend commit log ] Signed-off-by: Yingbao Jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4181 --- drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c index 7fa360073f6e..404582307109 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_tpmi_core.c @@ -1549,8 +1549,7 @@ int tpmi_sst_dev_add(struct auxiliary_device *auxdev) goto unlock_free; } - ret = sst_main(auxdev, &pd_info[i]); - if (ret) { + if (sst_main(auxdev, &pd_info[i])) { /* * This entry is not valid, hardware can partially * populate dies. In this case MMIO will have 0xFFs. -- Gitee From 90d8fa86071e6b995950cf1a7169248f1c0f98ec Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Wed, 31 Jul 2024 11:42:56 -0700 Subject: [PATCH 1917/2138] platform/x86: ISST: Simplify isst_misc_reg() and isst_misc_unreg() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #12076 commit 440814caedb0e33c56f0478d7fa5b54479013904 upstream. After commit '1630dc626c87 ("platform/x86: ISST: Add model specific loading for common module")' isst_misc_reg() and isst_misc_unreg() can be simplified. Since these functions are only called during module_init() and module_exit() respectively, there is no contention while calling misc_register()/misc_deregister or isst_if_cpu_info_init()/ isst_if_cpu_info_exit(). Hence remove mutex and reference counting. Intel-SIG: commit 440814caedb0 platform/x86: ISST: Simplify isst_misc_reg() and isst_misc_unreg(). Backport Intel speed select ISST driver support on TPMI. Signed-off-by: Srinivas Pandruvada Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20240731184256.1852840-1-srinivas.pandruvada@linux.intel.com Reviewed-by: Hans de Goede Signed-off-by: Hans de Goede [ yingbao jia: amend commit log ] Signed-off-by: yingbao jia Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4181 --- .../intel/speed_select_if/isst_if_common.c | 42 +++++-------------- 1 file changed, 11 insertions(+), 31 deletions(-) diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c index 8a691595a6fb..bd51dee9418f 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c @@ -653,10 +653,6 @@ static long isst_if_def_ioctl(struct file *file, unsigned int cmd, /* Lock to prevent module registration when already opened by user space */ static DEFINE_MUTEX(punit_misc_dev_open_lock); -/* Lock to allow one shared misc device for all ISST interfaces */ -static DEFINE_MUTEX(punit_misc_dev_reg_lock); -static int misc_usage_count; -static int misc_device_ret; static int misc_device_open; static int isst_if_open(struct inode *inode, struct file *file) @@ -722,39 +718,23 @@ static struct miscdevice isst_if_char_driver = { static int isst_misc_reg(void) { - mutex_lock(&punit_misc_dev_reg_lock); - if (misc_device_ret) - goto unlock_exit; - - if (!misc_usage_count) { - misc_device_ret = isst_if_cpu_info_init(); - if (misc_device_ret) - goto unlock_exit; - - misc_device_ret = misc_register(&isst_if_char_driver); - if (misc_device_ret) { - isst_if_cpu_info_exit(); - goto unlock_exit; - } - } - misc_usage_count++; + int ret; -unlock_exit: - mutex_unlock(&punit_misc_dev_reg_lock); + ret = isst_if_cpu_info_init(); + if (ret) + return ret; - return misc_device_ret; + ret = misc_register(&isst_if_char_driver); + if (ret) + isst_if_cpu_info_exit(); + + return ret; } static void isst_misc_unreg(void) { - mutex_lock(&punit_misc_dev_reg_lock); - if (misc_usage_count) - misc_usage_count--; - if (!misc_usage_count && !misc_device_ret) { - misc_deregister(&isst_if_char_driver); - isst_if_cpu_info_exit(); - } - mutex_unlock(&punit_misc_dev_reg_lock); + misc_deregister(&isst_if_char_driver); + isst_if_cpu_info_exit(); } /** -- Gitee From d89b7ca1e6fee4dc7abacba8b2b05e30f1d272d9 Mon Sep 17 00:00:00 2001 From: Xin Hao Date: Thu, 22 Dec 2022 17:00:08 +0800 Subject: [PATCH 1918/2138] anolis: mm: pagecache_limit: add this feature support ANBZ: #12235 ANBZ: #3907 The "pagecache limit" feature is used to solve the problem that the system has large number memory of pagecache, which may cause insufficient remaining memory in the system, and, service performance and usage are affected, therefore, in some scenarios, you need to limit the pagecache usage to ensure that the remaining memory can meet service requirements. There we just add some basic interfaces for us to optionally enable or disable the "pagecache_limit" function. Signed-off-by: Xin Hao Reviewed-by: Xu Yu Reviewed-by: Rongwei Wang Reviewed-by: Kaihao Bai Link: https://gitee.com/anolis/cloud-kernel/pulls/1157 Signed-off-by: Weilin Tong Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4207 --- include/linux/pagecache_limit.h | 27 +++++++++ mm/Kconfig | 12 ++++ mm/Makefile | 1 + mm/pagecache_limit.c | 100 ++++++++++++++++++++++++++++++++ 4 files changed, 140 insertions(+) create mode 100644 include/linux/pagecache_limit.h create mode 100644 mm/pagecache_limit.c diff --git a/include/linux/pagecache_limit.h b/include/linux/pagecache_limit.h new file mode 100644 index 000000000000..427620d9950c --- /dev/null +++ b/include/linux/pagecache_limit.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _PAGECACHE_LIMIT_H +#define _PAGECACHE_LIMIT_H + +#ifdef CONFIG_PAGECACHE_LIMIT + +DECLARE_STATIC_KEY_FALSE(pagecache_limit_enabled_key); + +enum pgcache_limit_reclaim_type { + /* per-memcg or global pagecaeche reclaim defaut way is async */ + PGCACHE_RECLAIM_ASYNC = 0, + PGCACHE_RECLAIM_DIRECT +}; + +static inline bool pagecache_limit_enabled(void) +{ + return static_branch_unlikely(&pagecache_limit_enabled_key); +} + +#else +static inline bool pagecache_limit_enabled(void) +{ + return false; +} +#endif +#endif diff --git a/mm/Kconfig b/mm/Kconfig index 1f5ad4ec7f1d..b6338333d792 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -1296,4 +1296,16 @@ config ASYNC_FORK source "mm/damon/Kconfig" +config PAGECACHE_LIMIT + bool "Enable pagecache limit function" + depends on MMU && MEMCG + default n + help + This feature is used to solve the problem that the system has large number memory of + pagecache, which may cause insufficient remaining memory in the system, and, service + performance and usage are affected. Therefore, in some scenarios, you need to limit + the page cache usage to ensure that the remaining memory can meet service requirements. + + If unsure, say N. + endmenu diff --git a/mm/Makefile b/mm/Makefile index 555e4d70bfc9..8ecb638c9cc4 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -139,3 +139,4 @@ obj-$(CONFIG_HAVE_BOOTMEM_INFO_NODE) += bootmem_info.o obj-$(CONFIG_GENERIC_IOREMAP) += ioremap.o obj-$(CONFIG_SHRINKER_DEBUG) += shrinker_debug.o obj-$(CONFIG_ASYNC_FORK) += async_fork.o +obj-$(CONFIG_PAGECACHE_LIMIT) += pagecache_limit.o diff --git a/mm/pagecache_limit.c b/mm/pagecache_limit.c new file mode 100644 index 000000000000..23ec3a00dab1 --- /dev/null +++ b/mm/pagecache_limit.c @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define pr_fmt(fmt) "pagecache_limit: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +DEFINE_STATIC_KEY_FALSE(pagecache_limit_enabled_key); + +static int __init setup_pagecache_limit(char *s) +{ + if (!strcmp(s, "1")) + static_branch_enable(&pagecache_limit_enabled_key); + else if (!strcmp(s, "0")) + static_branch_disable(&pagecache_limit_enabled_key); + return 1; +} +__setup("pagecache_limit=", setup_pagecache_limit); + +#ifdef CONFIG_SYSFS +static ssize_t pagecache_limit_enabled_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", !!static_branch_unlikely(&pagecache_limit_enabled_key)); +} + +static ssize_t pagecache_limit_enabled_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + static DEFINE_MUTEX(mutex); + ssize_t ret = count; + + mutex_lock(&mutex); + + if (!strncmp(buf, "1", 1)) + static_branch_enable(&pagecache_limit_enabled_key); + else if (!strncmp(buf, "0", 1)) + static_branch_disable(&pagecache_limit_enabled_key); + else + ret = -EINVAL; + + mutex_unlock(&mutex); + return ret; +} + +static struct kobj_attribute pagecache_limit_enabled_attr = + __ATTR(enabled, 0644, pagecache_limit_enabled_show, + pagecache_limit_enabled_store); + +static struct attribute *pagecache_limit_attrs[] = { + &pagecache_limit_enabled_attr.attr, + NULL, +}; + +static struct attribute_group pagecache_limit_attr_group = { + .attrs = pagecache_limit_attrs, +}; + +static int __init pagecache_limit_init_sysfs(void) +{ + int err; + struct kobject *pagecache_limit_kobj; + + pagecache_limit_kobj = kobject_create_and_add("pagecache_limit", mm_kobj); + if (!pagecache_limit_kobj) { + pr_err("failed to create pagecache_limit kobject\n"); + return -ENOMEM; + } + err = sysfs_create_group(pagecache_limit_kobj, &pagecache_limit_attr_group); + if (err) { + pr_err("failed to register pagecache_limit group\n"); + goto delete_obj; + } + + return 0; + +delete_obj: + kobject_put(pagecache_limit_kobj); + return err; +} +#endif /* CONFIG_SYSFS */ + +static int __init pagecache_limit_init(void) +{ + int ret = -EINVAL; + +#ifdef CONFIG_SYSFS + ret = pagecache_limit_init_sysfs(); +#endif + + return ret; +} +module_init(pagecache_limit_init); -- Gitee From f4e4857c348ca2104615e62c129aad4ffe95ecfd Mon Sep 17 00:00:00 2001 From: Xin Hao Date: Tue, 10 Jan 2023 04:14:05 +0800 Subject: [PATCH 1919/2138] anolis: mm: pagecache_limit: add memcg granularity related inferface ANBZ: #12235 ANBZ: #3907 From this patch, the "pagecache_limit" feature begin to support memcg granularity pagecache reclaim function, this patch is aimed to add some key interfaces to configure this function. Signed-off-by: Xin Hao Reviewed-by: Xu Yu Reviewed-by: Rongwei Wang Reviewed-by: Kaihao Bai Link: https://gitee.com/anolis/cloud-kernel/pulls/1157 Signed-off-by: Weilin Tong Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4207 --- include/linux/memcontrol.h | 5 +++ mm/memcontrol.c | 67 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 72 insertions(+) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 21a898238c1a..f4568c0f2407 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -360,6 +360,11 @@ struct mem_cgroup { struct deferred_split deferred_split_queue; #endif +#ifdef CONFIG_PAGECACHE_LIMIT + bool allow_pgcache_limit; + unsigned long pgcache_limit_size; +#endif + #ifdef CONFIG_LRU_GEN /* per-memcg mm_struct list */ struct lru_gen_mm_list mm_list; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 13d1e667e027..102d7c2cdecd 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -72,6 +72,9 @@ #include "swap.h" #include +#ifdef CONFIG_PAGECACHE_LIMIT +#include +#endif #include @@ -5500,6 +5503,58 @@ static int mem_cgroup_slab_show(struct seq_file *m, void *p) static int memory_stat_show(struct seq_file *m, void *v); +#ifdef CONFIG_PAGECACHE_LIMIT +static u64 mem_cgroup_allow_pgcache_limit_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + + return READ_ONCE(memcg->allow_pgcache_limit); +} + +static int mem_cgroup_allow_pgcache_limit_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + + if (val > 1) + return -EINVAL; + + memcg->allow_pgcache_limit = val; + + return 0; +} + +static u64 mem_cgroup_pgcache_limit_size_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + unsigned long size; + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + + size = READ_ONCE(memcg->pgcache_limit_size); + + return size; +} + +static ssize_t mem_cgroup_pgcache_limit_size_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, + loff_t off) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); + struct page_counter *counter = &memcg->memory; + unsigned long size, max = counter->max * PAGE_SIZE; + + buf = strstrip(buf); + size = (unsigned long)memparse(buf, NULL); + if (size > max) + memcg->pgcache_limit_size = max; + else + memcg->pgcache_limit_size = size; + + return nbytes; +} +#endif /* CONFIG_PAGECACHE_LIMIT */ + static struct cftype mem_cgroup_legacy_files[] = { { .name = "usage_in_bytes", @@ -5683,6 +5738,18 @@ static struct cftype mem_cgroup_legacy_files[] = { .read_u64 = memcg_reap_background_read, .write_u64 = memcg_reap_background_write, }, +#ifdef CONFIG_PAGECACHE_LIMIT + { + .name = "pagecache_limit.enable", + .read_u64 = mem_cgroup_allow_pgcache_limit_read, + .write_u64 = mem_cgroup_allow_pgcache_limit_write, + }, + { + .name = "pagecache_limit.size", + .read_u64 = mem_cgroup_pgcache_limit_size_read, + .write = mem_cgroup_pgcache_limit_size_write, + }, +#endif { }, /* terminate */ }; -- Gitee From 03e9d60e1d504c9886bae966d677df43edfd1f92 Mon Sep 17 00:00:00 2001 From: Xin Hao Date: Wed, 18 Jan 2023 03:24:32 +0800 Subject: [PATCH 1920/2138] anolis: mm: pagecache_limit: add memcg synchronous reclaim support ANBZ: #12235 ANBZ: #3907 This patch is used to support memcg synchronous reclaim, it includes hierarchical attributes, it will also reclaim its sublevels memcg pagecache also, we do some optimizations to avoid large fluctuations in the direct reclaim path, for example, we only allow the mapped pages to be reclaimed when priority value is smaller than DEF_PRIORITY - 4. Signed-off-by: Xin Hao Reviewed-by: Xu Yu Reviewed-by: Rongwei Wang Reviewed-by: Kaihao Bai Link: https://gitee.com/anolis/cloud-kernel/pulls/1157 Signed-off-by: Weilin Tong Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4207 --- include/linux/memcontrol.h | 3 + include/linux/pagecache_limit.h | 27 ++++++++ mm/filemap.c | 6 ++ mm/memcontrol.c | 4 ++ mm/pagecache_limit.c | 70 +++++++++++++++++++++ mm/vmscan.c | 105 ++++++++++++++++++++++++++++++++ 6 files changed, 215 insertions(+) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index f4568c0f2407..ee45dd81c045 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -45,6 +45,9 @@ enum memcg_stat_item { enum memcg_exstat_item { MEMCG_WMARK_RECLAIM, +#ifdef CONFIG_PAGECACHE_LIMIT + MEMCG_PGCACHE_RECLAIM, +#endif MEMCG_NR_EXSTAT, }; diff --git a/include/linux/pagecache_limit.h b/include/linux/pagecache_limit.h index 427620d9950c..cfb143973264 100644 --- a/include/linux/pagecache_limit.h +++ b/include/linux/pagecache_limit.h @@ -17,11 +17,38 @@ static inline bool pagecache_limit_enabled(void) { return static_branch_unlikely(&pagecache_limit_enabled_key); } +bool is_memcg_pgcache_limit_enabled(struct mem_cgroup *memcg); +void memcg_add_pgcache_limit_reclaimed(struct mem_cgroup *memcg, + unsigned long nr); +unsigned long memcg_get_pgcache_overflow_size(struct mem_cgroup *memcg); +void __memcg_pagecache_shrink(struct mem_cgroup *memcg, + bool may_unmap, gfp_t gfp_mask); +void memcg_pagecache_shrink(struct mem_cgroup *memcg, gfp_t gfp_mask); #else static inline bool pagecache_limit_enabled(void) { return false; } +static inline bool is_memcg_pgcache_limit_enabled(struct mem_cgroup *memcg) +{ + return false; +} +static inline void memcg_add_pgcache_limit_reclaimed(struct mem_cgroup *memcg, + unsigned long nr) +{ +} +static inline unsigned long memcg_get_pgcache_overflow_size(struct mem_cgroup *memcg) +{ + return 0; +} +static inline void __memcg_pagecache_shrink(struct mem_cgroup *memcg, + bool may_unmap, gfp_t gfp_mask) +{ +} +static inline void memcg_pagecache_shrink(struct mem_cgroup *memcg, + gfp_t gfp_mask) +{ +} #endif #endif diff --git a/mm/filemap.c b/mm/filemap.c index 7c571d0d5443..6c05a33f2cad 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -47,6 +47,9 @@ #include #include #include +#ifdef CONFIG_PAGECACHE_LIMIT +#include +#endif #include "internal.h" #define CREATE_TRACE_POINTS @@ -857,6 +860,9 @@ noinline int __filemap_add_folio(struct address_space *mapping, if (error) return error; charged = true; +#ifdef CONFIG_PAGECACHE_LIMIT + memcg_pagecache_shrink(folio_memcg(folio), gfp); +#endif } VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 102d7c2cdecd..ee80828149a5 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4486,6 +4486,10 @@ static int memcg_exstat_show(struct seq_file *m, void *v) seq_printf(m, "wmark_reclaim_work_ms %llu\n", memcg_exstat_gather(memcg, MEMCG_WMARK_RECLAIM) >> 20); +#ifdef CONFIG_PAGECACHE_LIMIT + seq_printf(m, "pagecache_limit_reclaimed_kb %llu\n", + memcg_exstat_gather(memcg, MEMCG_PGCACHE_RECLAIM) * PAGE_SIZE >> 10); +#endif return 0; } diff --git a/mm/pagecache_limit.c b/mm/pagecache_limit.c index 23ec3a00dab1..548f443d1954 100644 --- a/mm/pagecache_limit.c +++ b/mm/pagecache_limit.c @@ -23,6 +23,76 @@ static int __init setup_pagecache_limit(char *s) } __setup("pagecache_limit=", setup_pagecache_limit); +bool is_memcg_pgcache_limit_enabled(struct mem_cgroup *memcg) +{ + if (!pagecache_limit_enabled()) + return false; + + return READ_ONCE(memcg->allow_pgcache_limit); +} + +static inline unsigned long memcg_get_pgcache_nr_pages(struct mem_cgroup *memcg) +{ + /* + * There use 'NR_INACTIVE_FILE' + 'NR_ACTIVE_FILE' + * to represent pagecache. + */ + return memcg_page_state(memcg, NR_INACTIVE_FILE) + + memcg_page_state(memcg, NR_ACTIVE_FILE); +} + +unsigned long memcg_get_pgcache_overflow_size(struct mem_cgroup *memcg) +{ + unsigned long limit_pgcache, total_pgcache; + + limit_pgcache = READ_ONCE(memcg->pgcache_limit_size) / PAGE_SIZE; + if (!limit_pgcache) + return 0; + + total_pgcache = memcg_get_pgcache_nr_pages(memcg); + if (total_pgcache > limit_pgcache) + return total_pgcache - limit_pgcache; + + return 0; +} + +void memcg_add_pgcache_limit_reclaimed(struct mem_cgroup *memcg, + unsigned long nr) +{ + struct mem_cgroup *iter; + + for (iter = memcg; iter; iter = parent_mem_cgroup(iter)) + __this_cpu_add(iter->exstat_cpu->item[MEMCG_PGCACHE_RECLAIM], + nr); +} + +void memcg_pagecache_shrink(struct mem_cgroup *memcg, gfp_t gfp_mask) +{ + struct mem_cgroup *tmp_memcg = memcg; + + if (!memcg || !is_memcg_pgcache_limit_enabled(memcg)) + return; + + /* + * We support pagecache to check not only current memcg, but also + * there parent memcg, to prevent the parent group which has large + * number of pagecache but not release it in time. + */ + do { + if (!memcg_get_pgcache_overflow_size(tmp_memcg)) + continue; + /* + * In direct memory reclaim path, we default support file pagecache + * which is unmapped, but we also concern most of pagecache are mapped, + * it would lead to "pagecache limit" has no effect, so in "sc.priority" + * traverses, we select the appropriate time to enable mapped pagecache + * to be reclaimed. + */ + __memcg_pagecache_shrink(tmp_memcg, false, gfp_mask); + } while ((tmp_memcg = parent_mem_cgroup(tmp_memcg)) && + is_memcg_pgcache_limit_enabled(tmp_memcg)); +} + #ifdef CONFIG_SYSFS static ssize_t pagecache_limit_enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) diff --git a/mm/vmscan.c b/mm/vmscan.c index 577649ccf468..00db58798f54 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -57,6 +57,9 @@ #include #include #include +#ifdef CONFIG_PAGECACHE_LIMIT +#include +#endif #include #include @@ -8212,3 +8215,105 @@ void check_move_unevictable_folios(struct folio_batch *fbatch) } } EXPORT_SYMBOL_GPL(check_move_unevictable_folios); + +#ifdef CONFIG_PAGECACHE_LIMIT +static int __pagecache_shrink(struct mem_cgroup *memcg, + struct scan_control *sc) +{ + unsigned long has_reclaimed = sc->nr_reclaimed; + struct mem_cgroup *new = memcg, *tmp; + struct lruvec *lruvec; + pg_data_t *pgdat; + int ret = 0, nid, reserved_nid = -1, current_nid = numa_node_id(); + + for_each_online_node(nid) { + /* there we fisrt select local numa node */ + if (reserved_nid < 0) { + reserved_nid = nid; + pgdat = NODE_DATA(current_nid); + } else if (nid == current_nid) { + pgdat = NODE_DATA(reserved_nid); + } else { + pgdat = NODE_DATA(nid); + } + + tmp = mem_cgroup_iter(new, NULL, NULL); + do { + + /* + * This loop can become CPU-bound when target memcgs + * aren't eligible for reclaim - either because they + * don't have any reclaimable pages, or because their + * memory is explicitly protected. Avoid soft lockups. + */ + cond_resched(); + + /* + * In case pagecahe limit is suddenly disabled, but + * the reclaim operation is still being performed. + */ + if (!is_memcg_pgcache_limit_enabled(memcg)) { + mem_cgroup_iter_break(new, tmp); + ret = -1; + goto out; + } + + lruvec = mem_cgroup_lruvec(tmp, pgdat); + shrink_lruvec(lruvec, sc); + if (sc->nr_reclaimed >= sc->nr_to_reclaim) { + mem_cgroup_iter_break(new, tmp); + goto out; + } + } while ((tmp = mem_cgroup_iter(new, tmp, NULL))); + } + +out: + memcg_add_pgcache_limit_reclaimed(memcg, + sc->nr_reclaimed - has_reclaimed); + return ret; +} + +void __memcg_pagecache_shrink(struct mem_cgroup *memcg, + bool may_unmap, gfp_t gfp_mask) +{ + unsigned long nr_should_reclaim; + struct scan_control sc = { + .gfp_mask = (current_gfp_context(gfp_mask) & GFP_RECLAIM_MASK) | + (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), + .reclaim_idx = ZONE_MOVABLE, + .may_swap = 0, + .may_unmap = may_unmap, + .may_writepage = 0, + .priority = DEF_PRIORITY, + .target_mem_cgroup = memcg, + }; + + /* + * We recheck here mainly in case the pagecache is already satisfied, + * especially in asynchronous scenarios. + */ + nr_should_reclaim = memcg_get_pgcache_overflow_size(memcg); + if (!nr_should_reclaim) + return; + + sc.nr_to_reclaim = max(nr_should_reclaim, SWAP_CLUSTER_MAX); + do { + if (!is_memcg_pgcache_limit_enabled(memcg)) + break; + + if (sc.nr_reclaimed >= sc.nr_to_reclaim) + break; + /* + * In case there no enough pagecache to be reclaimed during + * driect reclaim, we only enable mapped pages to be reclaimed + * when priority value is smaller than DEF_PRIORITY - 4. + */ + if (memcg->pgcache_limit_sync && + (sc.priority < DEF_PRIORITY - 4)) + sc.may_unmap = 1; + + if (__pagecache_shrink(memcg, &sc) < 0) + break; + } while (--sc.priority >= 0); +} +#endif -- Gitee From 9ca42581d30d9e8b1edaeef1736e6e0566da5f28 Mon Sep 17 00:00:00 2001 From: Xin Hao Date: Tue, 10 Jan 2023 21:56:18 +0800 Subject: [PATCH 1921/2138] anolis: mm: pagecache_limit: add memcg asynchronous reclaim support ANBZ: #12235 ANBZ: #3907 This patch add memcg pagecache limit asynchronous reclaim support, since pagecache limit asynchronous reclaim is scheduled by workqueue, it could do more work than synchronous reclaim, i.e. write out dirty page, etc, and in order to minimize the performance jitter when dirty pages to be reclaimed, we only enable dirty pages to be reclaimed when priority value is smaller than DEF_PRIORITY - 2, and the reclaim must be in asynchronous scenario. Signed-off-by: Xin Hao Reviewed-by: Xu Yu Reviewed-by: Rongwei Wang Reviewed-by: Kaihao Bai Link: https://gitee.com/anolis/cloud-kernel/pulls/1157 Signed-off-by: Weilin Tong Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4207 --- include/linux/memcontrol.h | 2 ++ include/linux/pagecache_limit.h | 5 ++++ mm/memcontrol.c | 46 +++++++++++++++++++++++++++++++++ mm/pagecache_limit.c | 20 +++++++++++++- mm/vmscan.c | 10 +++++++ 5 files changed, 82 insertions(+), 1 deletion(-) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index ee45dd81c045..34a205b1b776 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -366,6 +366,8 @@ struct mem_cgroup { #ifdef CONFIG_PAGECACHE_LIMIT bool allow_pgcache_limit; unsigned long pgcache_limit_size; + bool pgcache_limit_sync; + struct work_struct pgcache_limit_work; #endif #ifdef CONFIG_LRU_GEN diff --git a/include/linux/pagecache_limit.h b/include/linux/pagecache_limit.h index cfb143973264..c85122603ca3 100644 --- a/include/linux/pagecache_limit.h +++ b/include/linux/pagecache_limit.h @@ -6,6 +6,7 @@ #ifdef CONFIG_PAGECACHE_LIMIT DECLARE_STATIC_KEY_FALSE(pagecache_limit_enabled_key); +extern struct workqueue_struct *memcg_pgcache_limit_wq; enum pgcache_limit_reclaim_type { /* per-memcg or global pagecaeche reclaim defaut way is async */ @@ -24,6 +25,7 @@ unsigned long memcg_get_pgcache_overflow_size(struct mem_cgroup *memcg); void __memcg_pagecache_shrink(struct mem_cgroup *memcg, bool may_unmap, gfp_t gfp_mask); void memcg_pagecache_shrink(struct mem_cgroup *memcg, gfp_t gfp_mask); +void memcg_pgcache_limit_work_func(struct work_struct *work); #else static inline bool pagecache_limit_enabled(void) @@ -50,5 +52,8 @@ static inline void memcg_pagecache_shrink(struct mem_cgroup *memcg, gfp_t gfp_mask) { } +static inline void memcg_pgcache_limit_work_func(struct work_struct *work) +{ +} #endif #endif diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ee80828149a5..6430259386ed 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5557,6 +5557,32 @@ static ssize_t mem_cgroup_pgcache_limit_size_write(struct kernfs_open_file *of, return nbytes; } + +static u64 mem_cgroup_allow_pgcache_sync_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + + return READ_ONCE(memcg->pgcache_limit_sync); +} + +static int mem_cgroup_allow_pgcache_sync_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + + if (val > 1) + return -EINVAL; + if (memcg->pgcache_limit_sync == val) + return 0; + + if (val) + memcg->pgcache_limit_sync = PGCACHE_RECLAIM_DIRECT; + else + memcg->pgcache_limit_sync = PGCACHE_RECLAIM_ASYNC; + + return 0; +} #endif /* CONFIG_PAGECACHE_LIMIT */ static struct cftype mem_cgroup_legacy_files[] = { @@ -5753,6 +5779,11 @@ static struct cftype mem_cgroup_legacy_files[] = { .read_u64 = mem_cgroup_pgcache_limit_size_read, .write = mem_cgroup_pgcache_limit_size_write, }, + { + .name = "pagecache_limit.sync", + .read_u64 = mem_cgroup_allow_pgcache_sync_read, + .write_u64 = mem_cgroup_allow_pgcache_sync_write, + }, #endif { }, /* terminate */ }; @@ -5956,6 +5987,9 @@ static struct mem_cgroup *mem_cgroup_alloc(void) INIT_WORK(&memcg->high_work, high_work_func); INIT_WORK(&memcg->wmark_work, wmark_work_func); +#ifdef CONFIG_PAGECACHE_LIMIT + INIT_WORK(&memcg->pgcache_limit_work, memcg_pgcache_limit_work_func); +#endif INIT_LIST_HEAD(&memcg->oom_notify); mutex_init(&memcg->thresholds_lock); spin_lock_init(&memcg->move_lock); @@ -6158,6 +6192,9 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css) vmpressure_cleanup(&memcg->vmpressure); cancel_work_sync(&memcg->high_work); cancel_work_sync(&memcg->wmark_work); +#ifdef CONFIG_PAGECACHE_LIMIT + cancel_work_sync(&memcg->pgcache_limit_work); +#endif mem_cgroup_remove_from_trees(memcg); free_shrinker_info(memcg); mem_cgroup_free(memcg); @@ -8144,6 +8181,15 @@ static int __init mem_cgroup_init(void) if (!memcg_wmark_wq) return -ENOMEM; +#ifdef CONFIG_PAGECACHE_LIMIT + memcg_pgcache_limit_wq = alloc_workqueue("memcg_pgcache_limit", + WQ_FREEZABLE | + WQ_UNBOUND | WQ_MEM_RECLAIM, + WQ_UNBOUND_MAX_ACTIVE); + + if (!memcg_pgcache_limit_wq) + return -ENOMEM; +#endif cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, memcg_hotplug_cpu_dead); diff --git a/mm/pagecache_limit.c b/mm/pagecache_limit.c index 548f443d1954..8b8f39bbdec8 100644 --- a/mm/pagecache_limit.c +++ b/mm/pagecache_limit.c @@ -12,6 +12,7 @@ #include DEFINE_STATIC_KEY_FALSE(pagecache_limit_enabled_key); +struct workqueue_struct *memcg_pgcache_limit_wq; static int __init setup_pagecache_limit(char *s) { @@ -66,6 +67,19 @@ void memcg_add_pgcache_limit_reclaimed(struct mem_cgroup *memcg, nr); } +void memcg_pgcache_limit_work_func(struct work_struct *work) +{ + struct mem_cgroup *memcg; + + memcg = container_of(work, struct mem_cgroup, pgcache_limit_work); + if (!is_memcg_pgcache_limit_enabled(memcg)) + return; + + current->flags |= PF_MEMALLOC | PF_KSWAPD; + __memcg_pagecache_shrink(memcg, true, GFP_KERNEL); + current->flags &= ~(PF_MEMALLOC | PF_KSWAPD); +} + void memcg_pagecache_shrink(struct mem_cgroup *memcg, gfp_t gfp_mask) { struct mem_cgroup *tmp_memcg = memcg; @@ -88,7 +102,11 @@ void memcg_pagecache_shrink(struct mem_cgroup *memcg, gfp_t gfp_mask) * traverses, we select the appropriate time to enable mapped pagecache * to be reclaimed. */ - __memcg_pagecache_shrink(tmp_memcg, false, gfp_mask); + if (tmp_memcg->pgcache_limit_sync == PGCACHE_RECLAIM_DIRECT) + __memcg_pagecache_shrink(tmp_memcg, false, gfp_mask); + else + queue_work(memcg_pgcache_limit_wq, + &tmp_memcg->pgcache_limit_work); } while ((tmp_memcg = parent_mem_cgroup(tmp_memcg)) && is_memcg_pgcache_limit_enabled(tmp_memcg)); } diff --git a/mm/vmscan.c b/mm/vmscan.c index 00db58798f54..4fb25dda7b24 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -8312,6 +8312,16 @@ void __memcg_pagecache_shrink(struct mem_cgroup *memcg, (sc.priority < DEF_PRIORITY - 4)) sc.may_unmap = 1; + /* + * We only enable dirty pages to be reclaimed when priority + * value is smaller than DEF_PRIORITY - 2, and the reclaim + * must be in asynchronous scenario, in order to minimize the + * performance jitter when dirty pages to be reclaimed. + */ + if (current_is_kswapd() && !memcg->pgcache_limit_sync && + (sc.priority < DEF_PRIORITY - 2)) + sc.may_writepage = 1; + if (__pagecache_shrink(memcg, &sc) < 0) break; } while (--sc.priority >= 0); -- Gitee From ef07d895b421ea15013670a9f40be8fa69882ace Mon Sep 17 00:00:00 2001 From: Xu Yu Date: Thu, 21 Sep 2023 16:56:11 +0800 Subject: [PATCH 1922/2138] anolis: mm: pagecache_limit: inherit settings of parent memcg ANBZ: #12235 ANBZ: #6666 This makes the child memcg to inherit pagecache_limit settings of parent memcg, i.e., memory.pagecache_limit.sync and memory.pagecache_limit.sync. The memory.pagecache_limit.size does not participate in inheritance. Signed-off-by: Xu Yu Reviewed-by: Kaihao Bai Link: https://gitee.com/anolis/cloud-kernel/pulls/2224 Signed-off-by: Weilin Tong Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4207 --- mm/memcontrol.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 6430259386ed..373047f89a52 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -6049,7 +6049,10 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) #ifdef CONFIG_ASYNC_FORK memcg->async_fork = parent->async_fork; #endif - +#ifdef CONFIG_PAGECACHE_LIMIT + memcg->allow_pgcache_limit = parent->allow_pgcache_limit; + memcg->pgcache_limit_sync = parent->pgcache_limit_sync; +#endif page_counter_init(&memcg->memory, &parent->memory); page_counter_init(&memcg->swap, &parent->swap); page_counter_init(&memcg->kmem, &parent->kmem); -- Gitee From eceea8a24cc182e56a979fb24f0f154e08e8d5c8 Mon Sep 17 00:00:00 2001 From: Xu Yu Date: Tue, 2 Jan 2024 10:09:20 +0800 Subject: [PATCH 1923/2138] anolis: mm: pagecache_limit: handle sc->may_deactivate ANBZ: #12235 ANBZ: #7815 This handles sc->may_deactivate through prepare_scan_count(), in case active files dominate. Reported-by: Run Xin Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=7815 Signed-off-by: Xu Yu Reviewed-by: Kaihao Bai Link: https://gitee.com/anolis/cloud-kernel/pulls/2566 Signed-off-by: Weilin Tong Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4207 --- mm/vmscan.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mm/vmscan.c b/mm/vmscan.c index 4fb25dda7b24..72d78c595597 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -8237,6 +8237,9 @@ static int __pagecache_shrink(struct mem_cgroup *memcg, pgdat = NODE_DATA(nid); } + /* handle sc->may_deactivate etc. */ + prepare_scan_count(pgdat, sc); + tmp = mem_cgroup_iter(new, NULL, NULL); do { -- Gitee From d9ab015db249a5f1c2222e50eead19e293f669b2 Mon Sep 17 00:00:00 2001 From: tianyu Date: Wed, 28 Feb 2024 10:16:29 +0800 Subject: [PATCH 1924/2138] anolis: mm: pagecache_limit: fix unprotected percpu variables ANBZ: #12235 ANBZ: #8321 The reclaimed statistic is a percpu variable whose access should be protected by turning off preemption. Signed-off-by: tianyu Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2780 Signed-off-by: Weilin Tong Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4207 --- mm/pagecache_limit.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mm/pagecache_limit.c b/mm/pagecache_limit.c index 8b8f39bbdec8..01b711f9bee7 100644 --- a/mm/pagecache_limit.c +++ b/mm/pagecache_limit.c @@ -62,9 +62,13 @@ void memcg_add_pgcache_limit_reclaimed(struct mem_cgroup *memcg, { struct mem_cgroup *iter; + preempt_disable(); + for (iter = memcg; iter; iter = parent_mem_cgroup(iter)) __this_cpu_add(iter->exstat_cpu->item[MEMCG_PGCACHE_RECLAIM], nr); + + preempt_enable(); } void memcg_pgcache_limit_work_func(struct work_struct *work) -- Gitee From fd583c80ae5f205b59f94ddd5e93280573cc785c Mon Sep 17 00:00:00 2001 From: Xin Hao Date: Tue, 31 Jan 2023 19:36:00 +0800 Subject: [PATCH 1925/2138] anolis: configs: enable config of PAGECACHE_LIMIT ANBZ: #12235 ANBZ: #3907 This enables CONFIG_PAGECACHE_LIMIT on x86 and arm64 by default Signed-off-by: Xin Hao Reviewed-by: Xu Yu Reviewed-by: Rongwei Wang Reviewed-by: Kaihao Bai Link: https://gitee.com/anolis/cloud-kernel/pulls/1157 Signed-off-by: Weilin Tong Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4207 --- anolis/configs/L1-RECOMMEND/default/CONFIG_PAGECACHE_LIMIT | 1 + 1 file changed, 1 insertion(+) create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PAGECACHE_LIMIT diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PAGECACHE_LIMIT b/anolis/configs/L1-RECOMMEND/default/CONFIG_PAGECACHE_LIMIT new file mode 100644 index 000000000000..5727f56a0b3e --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PAGECACHE_LIMIT @@ -0,0 +1 @@ +CONFIG_PAGECACHE_LIMIT=y -- Gitee From e363ab9451278a56f173034009a2f892b1450e29 Mon Sep 17 00:00:00 2001 From: Kaihao Bai Date: Wed, 21 Feb 2024 17:47:39 +0800 Subject: [PATCH 1926/2138] anolis: mm: pagecache_limit: support page cache limit with cgroup v2 ANBZ: #12235 ANBZ: #8281 Pagecache limit of per cgroup has been supported in cgroup v1, extend it to cgroup v2. Signed-off-by: Kaihao Bai Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2777 Signed-off-by: Weilin Tong Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4207 --- mm/memcontrol.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 373047f89a52..5e6c25f9ed38 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -7574,6 +7574,23 @@ static struct cftype memory_files[] = { .read_u64 = memcg_reap_background_read, .write_u64 = memcg_reap_background_write, }, +#ifdef CONFIG_PAGECACHE_LIMIT + { + .name = "pagecache_limit.enable", + .read_u64 = mem_cgroup_allow_pgcache_limit_read, + .write_u64 = mem_cgroup_allow_pgcache_limit_write, + }, + { + .name = "pagecache_limit.size", + .read_u64 = mem_cgroup_pgcache_limit_size_read, + .write = mem_cgroup_pgcache_limit_size_write, + }, + { + .name = "pagecache_limit.sync", + .read_u64 = mem_cgroup_allow_pgcache_sync_read, + .write_u64 = mem_cgroup_allow_pgcache_sync_write, + }, +#endif { } /* terminate */ }; -- Gitee From 9473f7ceeaceeaae672e853b5bc32fb33f38ced6 Mon Sep 17 00:00:00 2001 From: Weilin Tong Date: Tue, 10 Dec 2024 13:55:52 +0800 Subject: [PATCH 1927/2138] anolis: mm: pagecache_limit: proactive refresh memcgroup stats ANBZ: #12235 In the 6.6 kernel, it is necessary to proactively refresh memcgroup statistics to obtain more accurate values which are crucial for correctly triggering the pagecache shrink process. In the synchronous path, this design may lead to some performance loss. Signed-off-by: Weilin Tong Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4207 --- mm/pagecache_limit.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/mm/pagecache_limit.c b/mm/pagecache_limit.c index 01b711f9bee7..bf5b75b34381 100644 --- a/mm/pagecache_limit.c +++ b/mm/pagecache_limit.c @@ -37,7 +37,12 @@ static inline unsigned long memcg_get_pgcache_nr_pages(struct mem_cgroup *memcg) /* * There use 'NR_INACTIVE_FILE' + 'NR_ACTIVE_FILE' * to represent pagecache. + * Due to changes in the memcg state update strategy, + * we need to proactively perform a refresh so that + * we could read accurate per-memcg lruvec stats. */ + cgroup_rstat_flush(memcg->css.cgroup); + return memcg_page_state(memcg, NR_INACTIVE_FILE) + memcg_page_state(memcg, NR_ACTIVE_FILE); } -- Gitee From f5ffceb9902013d1d3287887f912147c48f692d0 Mon Sep 17 00:00:00 2001 From: Zhao Qunqin Date: Wed, 4 Dec 2024 18:42:01 +0800 Subject: [PATCH 1928/2138] anolis: nvme: Add memory barrier before nvme_irq_handler ANBZ: #12294 Currently, there are certain models of NVMe SSDs that cannot guarantee data availability before the interrupt handling process, which can lead to NVMe timeout errors. Signed-off-by: Zhao Qunqin Signed-off-by: Juxin Gao Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/4210 --- drivers/nvme/host/pci.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 52c8fd3d5c47..9f4d9fbc2fa7 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1079,6 +1079,10 @@ static irqreturn_t nvme_irq(int irq, void *data) struct nvme_queue *nvmeq = data; DEFINE_IO_COMP_BATCH(iob); +#ifdef CONFIG_LOONGARCH + /* Ensure that the data is completely in place */ + mb(); +#endif if (nvme_poll_cq(nvmeq, &iob)) { if (!rq_list_empty(iob.req_list)) nvme_pci_complete_batch(&iob); -- Gitee From e179120d06e458bdae76b939d3e4323fd0426719 Mon Sep 17 00:00:00 2001 From: gaojuxin Date: Thu, 17 Oct 2024 14:02:32 +0800 Subject: [PATCH 1929/2138] anolis: ethernet: Add motorcomm yt6801 support ANBZ: #11468 - The Asus XC-LS3A6M motherboard(Loongson 3A6000),CE720Z2,CE720Z... uses a yt6801 controller. - This patch introduces an out of tree module to provide support for this NIC. [^1] - Refactor module tree to make it work with Kconfig. [^2] - Drop unneeded installation script (yt_nic_install.sh). - Drop README. some day,we can write a yt6801.rst in kenenl doc. - enable YT6801 for x86 arm64 and loongarch as module - Refactor according to the kernel code style [^1]: Ref: https://www.motor-comm.com/Public/Uploads/uploadfile/files/20240104/yt6801-linux-driver-1.0.27.zip [^2]: CONFIG_NET_VENDOR_MOTORCOMM =(y)=> CONFIG_YT8601 (tristate). Co-authored-by: Mingcong Bai Co-authored-by: Xiaotian Wu Signed-off-by: Yanteng Si Signed-off-by: gaojuxin Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/4079 --- arch/loongarch/configs/anolis_defconfig | 1 + arch/loongarch/configs/loongson3_defconfig | 1 + drivers/net/ethernet/Kconfig | 1 + drivers/net/ethernet/Makefile | 1 + drivers/net/ethernet/motorcomm/Kconfig | 27 + drivers/net/ethernet/motorcomm/Makefile | 4 + .../net/ethernet/motorcomm/yt6801/Makefile | 15 + .../net/ethernet/motorcomm/yt6801/fuxi-dbg.h | 15 + .../ethernet/motorcomm/yt6801/fuxi-efuse.c | 1344 ++++ .../ethernet/motorcomm/yt6801/fuxi-efuse.h | 25 + .../motorcomm/yt6801/fuxi-gmac-common.c | 939 +++ .../motorcomm/yt6801/fuxi-gmac-debugfs.c | 787 +++ .../motorcomm/yt6801/fuxi-gmac-desc.c | 601 ++ .../motorcomm/yt6801/fuxi-gmac-ethtool.c | 1114 +++ .../ethernet/motorcomm/yt6801/fuxi-gmac-hw.c | 6256 +++++++++++++++++ .../ethernet/motorcomm/yt6801/fuxi-gmac-net.c | 2329 ++++++ .../ethernet/motorcomm/yt6801/fuxi-gmac-pci.c | 250 + .../ethernet/motorcomm/yt6801/fuxi-gmac-phy.c | 256 + .../ethernet/motorcomm/yt6801/fuxi-gmac-reg.h | 1894 +++++ .../net/ethernet/motorcomm/yt6801/fuxi-gmac.h | 934 +++ .../net/ethernet/motorcomm/yt6801/fuxi-os.h | 515 ++ 21 files changed, 17309 insertions(+) create mode 100644 drivers/net/ethernet/motorcomm/Kconfig create mode 100644 drivers/net/ethernet/motorcomm/Makefile create mode 100644 drivers/net/ethernet/motorcomm/yt6801/Makefile create mode 100644 drivers/net/ethernet/motorcomm/yt6801/fuxi-dbg.h create mode 100644 drivers/net/ethernet/motorcomm/yt6801/fuxi-efuse.c create mode 100644 drivers/net/ethernet/motorcomm/yt6801/fuxi-efuse.h create mode 100644 drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-common.c create mode 100644 drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-debugfs.c create mode 100644 drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-desc.c create mode 100644 drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-ethtool.c create mode 100644 drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-hw.c create mode 100644 drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-net.c create mode 100644 drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-pci.c create mode 100644 drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-phy.c create mode 100644 drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-reg.h create mode 100644 drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac.h create mode 100644 drivers/net/ethernet/motorcomm/yt6801/fuxi-os.h diff --git a/arch/loongarch/configs/anolis_defconfig b/arch/loongarch/configs/anolis_defconfig index 48ca88722fda..f2311d31442b 100644 --- a/arch/loongarch/configs/anolis_defconfig +++ b/arch/loongarch/configs/anolis_defconfig @@ -877,6 +877,7 @@ CONFIG_8139TOO=m # CONFIG_8139TOO_PIO is not set CONFIG_8139TOO_8129=y CONFIG_R8169=m +CONFIG_YT6801=m # CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SAMSUNG is not set diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index e33401d67726..bbf18ed31761 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -875,6 +875,7 @@ CONFIG_8139TOO=m # CONFIG_8139TOO_PIO is not set CONFIG_8139TOO_8129=y CONFIG_R8169=m +CONFIG_YT6801=m # CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SAMSUNG is not set diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index ceca838cb86a..d6753a9ba00f 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -128,6 +128,7 @@ source "drivers/net/ethernet/mediatek/Kconfig" source "drivers/net/ethernet/mellanox/Kconfig" source "drivers/net/ethernet/micrel/Kconfig" source "drivers/net/ethernet/microchip/Kconfig" +source "drivers/net/ethernet/motorcomm/Kconfig" source "drivers/net/ethernet/mscc/Kconfig" source "drivers/net/ethernet/microsoft/Kconfig" source "drivers/net/ethernet/moxa/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index d24786d26214..5d715f4aff6b 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -61,6 +61,7 @@ obj-$(CONFIG_NET_VENDOR_MEDIATEK) += mediatek/ obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/ obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/ obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/ +obj-$(CONFIG_NET_VENDOR_MOTORCOMM) += motorcomm/ obj-$(CONFIG_NET_VENDOR_MICROSEMI) += mscc/ obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/ obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/ diff --git a/drivers/net/ethernet/motorcomm/Kconfig b/drivers/net/ethernet/motorcomm/Kconfig new file mode 100644 index 000000000000..2d058928936f --- /dev/null +++ b/drivers/net/ethernet/motorcomm/Kconfig @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2023 Motorcomm, Inc. + +config NET_VENDOR_MOTORCOMM + bool "Motorcomm devices" + default y + depends on PCI + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Motorcomm cards. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_MOTORCOMM + +config YT6801 + tristate "Motorcomm YT6801 Ethernet support" + depends on PCI + help + If you have a network (Ethernet) controller of this type, say Y here. + + To compile this driver as a module, choose M here. The module + will be called forcedeth. + +endif # NET_VENDOR_MOTORCOMM diff --git a/drivers/net/ethernet/motorcomm/Makefile b/drivers/net/ethernet/motorcomm/Makefile new file mode 100644 index 000000000000..af0a439d54a1 --- /dev/null +++ b/drivers/net/ethernet/motorcomm/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2023 Motorcomm, Inc. + +obj-$(CONFIG_YT6801) += yt6801/ diff --git a/drivers/net/ethernet/motorcomm/yt6801/Makefile b/drivers/net/ethernet/motorcomm/yt6801/Makefile new file mode 100644 index 000000000000..93b5c4510eb0 --- /dev/null +++ b/drivers/net/ethernet/motorcomm/yt6801/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (c) 2023 Motorcomm, Inc. + + +obj-$(CONFIG_YT6801) += yt6801.o + +yt6801-objs := fuxi-gmac-common.o \ + fuxi-gmac-desc.o \ + fuxi-gmac-ethtool.o \ + fuxi-gmac-hw.o \ + fuxi-gmac-net.o \ + fuxi-gmac-pci.o \ + fuxi-gmac-phy.o \ + fuxi-efuse.o \ + fuxi-gmac-debugfs.o diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-dbg.h b/drivers/net/ethernet/motorcomm/yt6801/fuxi-dbg.h new file mode 100644 index 000000000000..24282f8e2230 --- /dev/null +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-dbg.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Motorcomm Corporation. */ + +#ifndef _MP_DBG_H +#define _MP_DBG_H + +/* Message verbosity: lower values indicate higher urgency */ +#define MP_OFF 0 +#define MP_ERROR 1 +#define MP_WARN 2 +#define MP_TRACE 3 +#define MP_INFO 4 +#define MP_LOUD 5 + +#endif /* _MP_DBG_H */ \ No newline at end of file diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-efuse.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-efuse.c new file mode 100644 index 000000000000..ae4ca3d59ac4 --- /dev/null +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-efuse.c @@ -0,0 +1,1344 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Motorcomm Corporation. */ + +#include "fuxi-gmac.h" +#include "fuxi-gmac-reg.h" +#include "fuxi-efuse.h" + +/* read patch per index. */ +bool fxgmac_read_patch_from_efuse_per_index(struct fxgmac_pdata *pdata, + u8 index, u32 *offset, u32 *value) +{ + unsigned int wait, i; + u32 regval = 0; + bool succeed = false; + + if (index >= FUXI_EFUSE_MAX_ENTRY) { + FXGMAC_PR("Reading efuse out of range, index %d\n", index); + return false; + } + + if (offset) { + *offset = 0; + } + for (i = EFUSE_PATCH_ADDR_START_BYTE; i < EFUSE_PATCH_DATA_START_BYTE; + i++) { + regval = 0; + regval = FXGMAC_SET_REG_BITS( + regval, EFUSE_OP_ADDR_POS, EFUSE_OP_ADDR_LEN, + EFUSE_REGION_A_B_LENGTH + index * EFUSE_EACH_PATH_SIZE + + i); + regval = FXGMAC_SET_REG_BITS(regval, EFUSE_OP_START_POS, + EFUSE_OP_START_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, EFUSE_OP_MODE_POS, + EFUSE_OP_MODE_LEN, + EFUSE_OP_MODE_ROW_READ); + writereg(pdata->pAdapter, regval, + pdata->base_mem + EFUSE_OP_CTRL_0); + wait = 1000; + while (wait--) { + usleep_range_ex(pdata->pAdapter, 20, 50); + regval = readreg(pdata->pAdapter, + pdata->base_mem + EFUSE_OP_CTRL_1); + if (FXGMAC_GET_REG_BITS(regval, EFUSE_OP_DONE_POS, + EFUSE_OP_DONE_LEN)) { + succeed = true; + break; + } + } + if (succeed) { + if (offset) { + *offset |= + (FXGMAC_GET_REG_BITS( + regval, EFUSE_OP_RD_DATA_POS, + EFUSE_OP_RD_DATA_LEN) + << (i << 3)); + } + } else { + FXGMAC_PR("Fail to reading efuse Byte%d\n", + index * EFUSE_EACH_PATH_SIZE + i); + return succeed; + } + } + + if (value) { + *value = 0; + } + for (i = EFUSE_PATCH_DATA_START_BYTE; i < EFUSE_EACH_PATH_SIZE; i++) { + regval = 0; + regval = FXGMAC_SET_REG_BITS( + regval, EFUSE_OP_ADDR_POS, EFUSE_OP_ADDR_LEN, + EFUSE_REGION_A_B_LENGTH + index * EFUSE_EACH_PATH_SIZE + + i); + regval = FXGMAC_SET_REG_BITS(regval, EFUSE_OP_START_POS, + EFUSE_OP_START_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, EFUSE_OP_MODE_POS, + EFUSE_OP_MODE_LEN, + EFUSE_OP_MODE_ROW_READ); + writereg(pdata->pAdapter, regval, + pdata->base_mem + EFUSE_OP_CTRL_0); + wait = 1000; + while (wait--) { + usleep_range_ex(pdata->pAdapter, 20, 50); + regval = readreg(pdata->pAdapter, + pdata->base_mem + EFUSE_OP_CTRL_1); + if (FXGMAC_GET_REG_BITS(regval, EFUSE_OP_DONE_POS, + EFUSE_OP_DONE_LEN)) { + succeed = true; + break; + } + } + if (succeed) { + if (value) { + *value |= (FXGMAC_GET_REG_BITS( + regval, EFUSE_OP_RD_DATA_POS, + EFUSE_OP_RD_DATA_LEN) + << ((i - 2) << 3)); + } + } else { + FXGMAC_PR("Fail to reading efuse Byte%d\n", + index * EFUSE_EACH_PATH_SIZE + i); + return succeed; + } + } + + return succeed; +} + +bool fxgmac_read_patch_from_efuse(struct fxgmac_pdata *pdata, u32 offset, + u32 *value) /* read patch per index. */ +{ + u32 reg_offset, reg_val; + u32 cur_val = 0; + bool succeed = true; + u8 index = 0; + + if (offset >> 16) { + FXGMAC_PR( + "Reading efuse out of range, reg %d. reg must be 2bytes.\n", + index); + return false; + } + + for (index = 0; index < FUXI_EFUSE_MAX_ENTRY; index++) { + if (!fxgmac_read_patch_from_efuse_per_index( + pdata, index, ®_offset, ®_val)) { + succeed = false; + break; + } else if (reg_offset == offset) { + cur_val = reg_val; + } else if (0 == reg_offset && 0 == reg_val) { + break; /* first blank. We should write here. */ + } + } + + if (value) { + *value = cur_val; + } + + return succeed; +} + +bool fxgmac_write_patch_to_efuse_per_index(struct fxgmac_pdata *pdata, u8 index, + u32 offset, u32 value) +{ + unsigned int wait, i; + u32 reg_val; + bool succeed = false; + u32 cur_reg, cur_val; + u8 max_index = FUXI_EFUSE_MAX_ENTRY; + + if (offset >> 16) { + FXGMAC_PR( + "Reading efuse out of range, reg %d. reg must be 2bytes.\n", + index); + return false; + } + + fxgmac_efuse_read_data(pdata, EFUSE_LED_ADDR, ®_val); + if (EFUSE_LED_COMMON_SOLUTION == reg_val) { + max_index = FUXI_EFUSE_MAX_ENTRY_UNDER_LED_COMMON; + } + + if (index >= max_index) { + FXGMAC_PR("Writing efuse out of range, index %d max index %d\n", + index, max_index); + return false; + } + + if (fxgmac_read_patch_from_efuse_per_index(pdata, index, &cur_reg, + &cur_val)) { + if (cur_reg != 0 || cur_val != 0) { + FXGMAC_PR( + " The index %d has writed value, cannot rewrite it.\n", + index); + return false; + } + } else { + FXGMAC_PR("Cannot read index %d.\n", index); + return false; + } + + for (i = EFUSE_PATCH_ADDR_START_BYTE; i < EFUSE_PATCH_DATA_START_BYTE; + i++) { + reg_val = 0; + reg_val = FXGMAC_SET_REG_BITS( + reg_val, EFUSE_OP_ADDR_POS, EFUSE_OP_ADDR_LEN, + EFUSE_REGION_A_B_LENGTH + index * EFUSE_EACH_PATH_SIZE + + i); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_WR_DATA_POS, + EFUSE_OP_WR_DATA_LEN, + (offset >> (i << 3)) & 0xFF); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_START_POS, + EFUSE_OP_START_LEN, 1); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_MODE_POS, + EFUSE_OP_MODE_LEN, + EFUSE_OP_MODE_ROW_WRITE); + writereg(pdata->pAdapter, reg_val, + pdata->base_mem + EFUSE_OP_CTRL_0); + + succeed = false; + wait = 1000; + while (wait--) { + usleep_range_ex(pdata->pAdapter, 20, 50); + reg_val = readreg(pdata->pAdapter, + pdata->base_mem + EFUSE_OP_CTRL_1); + if (FXGMAC_GET_REG_BITS(reg_val, EFUSE_OP_DONE_POS, + EFUSE_OP_DONE_LEN)) { + succeed = true; + break; + } + } + if (!succeed) { + FXGMAC_PR("Fail to writing efuse Byte%d\n", + index * EFUSE_EACH_PATH_SIZE + i); + return succeed; + } + } + + for (i = 2; i < 6; i++) { + reg_val = 0; + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_ADDR_POS, + EFUSE_OP_ADDR_LEN, + 18 + index * 6 + i); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_WR_DATA_POS, + EFUSE_OP_WR_DATA_LEN, + (value >> ((i - 2) << 3)) & 0xFF); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_START_POS, + EFUSE_OP_START_LEN, 1); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_MODE_POS, + EFUSE_OP_MODE_LEN, + EFUSE_OP_MODE_ROW_WRITE); + writereg(pdata->pAdapter, reg_val, + pdata->base_mem + EFUSE_OP_CTRL_0); + + succeed = false; + wait = 1000; + while (wait--) { + usleep_range_ex(pdata->pAdapter, 20, 50); + reg_val = readreg(pdata->pAdapter, + pdata->base_mem + EFUSE_OP_CTRL_1); + if (FXGMAC_GET_REG_BITS(reg_val, EFUSE_OP_DONE_POS, + EFUSE_OP_DONE_LEN)) { + succeed = true; + break; + } + } + if (!succeed) { + FXGMAC_PR("Fail to writing efuse Byte%d\n", + index * EFUSE_EACH_PATH_SIZE + i); + return succeed; + } + } + + return succeed; +} + +bool fxgmac_write_patch_to_efuse(struct fxgmac_pdata *pdata, u32 offset, + u32 value) +{ + unsigned int wait, i; + u32 reg_offset, reg_val; + u32 cur_offset = 0, cur_val = 0; + bool succeed = false; + u8 index = 0; + + if (offset >> 16) { + FXGMAC_PR( + "Reading efuse out of range, reg %d. reg must be 2bytes.\n", + index); + return false; + } + + for (index = 0;; index++) { + if (!fxgmac_read_patch_from_efuse_per_index( + pdata, index, ®_offset, ®_val)) { + return false; + } else if (reg_offset == offset) { + cur_offset = reg_offset; + cur_val = reg_val; + } else if (0 == reg_offset && 0 == reg_val) { + break; /* first blank. We should write here. */ + } + } + + if (cur_offset == offset) { + if (cur_val == value) { + FXGMAC_PR("0x%x -> Reg0x%x already exists, ignore.\n", + value, offset); + return true; + } else { + FXGMAC_PR( + "Reg0x%x entry current value 0x%x, reprogram.\n", + offset, value); + } + } + + for (i = EFUSE_PATCH_ADDR_START_BYTE; i < EFUSE_PATCH_DATA_START_BYTE; + i++) { + reg_val = 0; + reg_val = FXGMAC_SET_REG_BITS( + reg_val, EFUSE_OP_ADDR_POS, EFUSE_OP_ADDR_LEN, + EFUSE_REGION_A_B_LENGTH + index * EFUSE_EACH_PATH_SIZE + + i); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_WR_DATA_POS, + EFUSE_OP_WR_DATA_LEN, + (offset >> (i << 3)) & 0xFF); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_START_POS, + EFUSE_OP_START_LEN, 1); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_MODE_POS, + EFUSE_OP_MODE_LEN, + EFUSE_OP_MODE_ROW_WRITE); + writereg(pdata->pAdapter, reg_val, + pdata->base_mem + EFUSE_OP_CTRL_0); + + succeed = false; + wait = 1000; + while (wait--) { + usleep_range_ex(pdata->pAdapter, 20, 50); + reg_val = readreg(pdata->pAdapter, + pdata->base_mem + EFUSE_OP_CTRL_1); + if (FXGMAC_GET_REG_BITS(reg_val, EFUSE_OP_DONE_POS, + EFUSE_OP_DONE_LEN)) { + succeed = true; + break; + } + } + if (!succeed) { + FXGMAC_PR("Fail to writing efuse Byte%d\n", + index * EFUSE_EACH_PATH_SIZE + i); + return succeed; + } + } + + for (i = EFUSE_PATCH_DATA_START_BYTE; i < EFUSE_EACH_PATH_SIZE; i++) { + reg_val = 0; + reg_val = FXGMAC_SET_REG_BITS( + reg_val, EFUSE_OP_ADDR_POS, EFUSE_OP_ADDR_LEN, + EFUSE_REGION_A_B_LENGTH + index * EFUSE_EACH_PATH_SIZE + + i); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_WR_DATA_POS, + EFUSE_OP_WR_DATA_LEN, + (value >> ((i - 2) << 3)) & 0xFF); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_START_POS, + EFUSE_OP_START_LEN, 1); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_MODE_POS, + EFUSE_OP_MODE_LEN, + EFUSE_OP_MODE_ROW_WRITE); + writereg(pdata->pAdapter, reg_val, + pdata->base_mem + EFUSE_OP_CTRL_0); + + succeed = false; + wait = 1000; + while (wait--) { + usleep_range_ex(pdata->pAdapter, 20, 50); + reg_val = readreg(pdata->pAdapter, + pdata->base_mem + EFUSE_OP_CTRL_1); + if (FXGMAC_GET_REG_BITS(reg_val, EFUSE_OP_DONE_POS, + EFUSE_OP_DONE_LEN)) { + succeed = true; + break; + } + } + if (!succeed) { + FXGMAC_PR("Fail to writing efuse Byte%d\n", + index * EFUSE_EACH_PATH_SIZE + i); + return succeed; + } + } + + return succeed; +} + +bool fxgmac_read_mac_subsys_from_efuse(struct fxgmac_pdata *pdata, u8 *mac_addr, + u32 *subsys, u32 *revid) +{ + u32 offset = 0, value = 0; + u32 machr = 0, maclr = 0; + bool succeed = true; + u8 index = 0; + + for (index = 0;; index++) { + if (!fxgmac_read_patch_from_efuse_per_index(pdata, index, + &offset, &value)) { + succeed = false; + break; /* reach the last item. */ + } + if (0x00 == offset) { + break; /* reach the blank. */ + } + if (MACA0LR_FROM_EFUSE == offset) { + maclr = value; + } + if (MACA0HR_FROM_EFUSE == offset) { + machr = value; + } + + if ((0x08 == offset) && revid) { + *revid = value; + } + if ((0x2C == offset) && subsys) { + *subsys = value; + } + } + if (mac_addr) { + mac_addr[5] = (u8)(maclr & 0xFF); + mac_addr[4] = (u8)((maclr >> 8) & 0xFF); + mac_addr[3] = (u8)((maclr >> 16) & 0xFF); + mac_addr[2] = (u8)((maclr >> 24) & 0xFF); + mac_addr[1] = (u8)(machr & 0xFF); + mac_addr[0] = (u8)((machr >> 8) & 0xFF); + } + + return succeed; +} + +bool fxgmac_write_mac_subsys_to_efuse(struct fxgmac_pdata *pdata, u8 *mac_addr, + u32 *subsys, u32 *revid) +{ + u32 machr = 0, maclr = 0, pcie_cfg_ctrl = PCIE_CFG_CTRL_DEFAULT_VAL; + bool succeed = true; + if (mac_addr) { + machr = readreg(pdata->pAdapter, + pdata->base_mem + MACA0HR_FROM_EFUSE); + maclr = readreg(pdata->pAdapter, + pdata->base_mem + MACA0LR_FROM_EFUSE); + DPRINTK("Current mac address from efuse is %02x-%02x-%02x-%02x-%02x-%02x.\n", + (machr >> 8) & 0xFF, machr & 0xFF, (maclr >> 24) & 0xFF, + (maclr >> 16) & 0xFF, (maclr >> 8) & 0xFF, + maclr & 0xFF); + + if (!fxgmac_write_patch_to_efuse(pdata, MACA0HR_FROM_EFUSE, + (((u32)mac_addr[0]) << 8) | + mac_addr[1])) { + succeed = false; + } + if (!fxgmac_write_patch_to_efuse( + pdata, MACA0LR_FROM_EFUSE, + (((u32)mac_addr[2]) << 24) | + (((u32)mac_addr[3]) << 16) | + (((u32)mac_addr[4]) << 8) | mac_addr[5])) { + succeed = false; + } + } + + if (revid) { + if (!fxgmac_write_patch_to_efuse(pdata, EFUSE_REVID_REGISTER, + *revid)) { + succeed = false; + } + } + if (subsys) { + pcie_cfg_ctrl = FXGMAC_SET_REG_BITS( + pcie_cfg_ctrl, MGMT_PCIE_CFG_CTRL_CS_EN_POS, + MGMT_PCIE_CFG_CTRL_CS_EN_LEN, 1); + if (!fxgmac_write_patch_to_efuse(pdata, MGMT_PCIE_CFG_CTRL, + pcie_cfg_ctrl)) { + succeed = false; + } + if (!fxgmac_write_patch_to_efuse(pdata, EFUSE_SUBSYS_REGISTER, + *subsys)) { + succeed = false; + } + pcie_cfg_ctrl = FXGMAC_SET_REG_BITS( + pcie_cfg_ctrl, MGMT_PCIE_CFG_CTRL_CS_EN_POS, + MGMT_PCIE_CFG_CTRL_CS_EN_LEN, 0); + if (!fxgmac_write_patch_to_efuse(pdata, MGMT_PCIE_CFG_CTRL, + pcie_cfg_ctrl)) { + succeed = false; + } + } + return succeed; +} + +bool fxgmac_write_mac_addr_to_efuse(struct fxgmac_pdata *pdata, u8 *mac_addr) +{ + u32 machr = 0, maclr = 0; + bool succeed = true; + + if (mac_addr) { + machr = readreg(pdata->pAdapter, + pdata->base_mem + MACA0HR_FROM_EFUSE); + maclr = readreg(pdata->pAdapter, + pdata->base_mem + MACA0LR_FROM_EFUSE); + DPRINTK("Current mac address from efuse is %02x-%02x-%02x-%02x-%02x-%02x.\n", + (machr >> 8) & 0xFF, machr & 0xFF, (maclr >> 24) & 0xFF, + (maclr >> 16) & 0xFF, (maclr >> 8) & 0xFF, + maclr & 0xFF); + + if (!fxgmac_write_patch_to_efuse(pdata, MACA0HR_FROM_EFUSE, + (((u32)mac_addr[0]) << 8) | + mac_addr[1])) { + succeed = false; + } + if (!fxgmac_write_patch_to_efuse( + pdata, MACA0LR_FROM_EFUSE, + (((u32)mac_addr[2]) << 24) | + (((u32)mac_addr[3]) << 16) | + (((u32)mac_addr[4]) << 8) | mac_addr[5])) { + succeed = false; + } + } + + return succeed; +} + +bool fxgmac_read_subsys_from_efuse(struct fxgmac_pdata *pdata, u32 *subsys, + u32 *revid) +{ + u32 offset = 0, value = 0; + u8 index; + bool succeed = true; + + for (index = 0;; index++) { + if (!fxgmac_read_patch_from_efuse_per_index(pdata, index, + &offset, &value)) { + succeed = false; + break; /* reach the last item. */ + } + if (0x00 == offset) { + break; /* reach the blank. */ + } + + if ((EFUSE_REVID_REGISTER == offset) && revid) { + *revid = value; + } else { + succeed = false; + } + if ((EFUSE_SUBSYS_REGISTER == offset) && subsys) { + *subsys = value; + } else { + succeed = false; + } + } + + return succeed; +} + +bool fxgmac_write_subsys_to_efuse(struct fxgmac_pdata *pdata, u32 *subsys, + u32 *revid) +{ + bool succeed = true; + + /* write subsys info */ + if (revid) { + if (!fxgmac_write_patch_to_efuse(pdata, EFUSE_REVID_REGISTER, + *revid)) { + succeed = false; + } + } + if (subsys) { + if (!fxgmac_write_patch_to_efuse(pdata, EFUSE_SUBSYS_REGISTER, + *subsys)) { + succeed = false; + } + } + return succeed; +} + +bool fxgmac_efuse_load(struct fxgmac_pdata *pdata) +{ + bool succeed = false; + unsigned int wait; + u32 reg_val = 0; + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_START_POS, + EFUSE_OP_START_LEN, 1); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_MODE_POS, + EFUSE_OP_MODE_LEN, + EFUSE_OP_MODE_AUTO_LOAD); + writereg(pdata->pAdapter, reg_val, pdata->base_mem + EFUSE_OP_CTRL_0); + + wait = 1000; + while (wait--) { + usleep_range_ex(pdata->pAdapter, 20, 50); + reg_val = readreg(pdata->pAdapter, + pdata->base_mem + EFUSE_OP_CTRL_1); + if (FXGMAC_GET_REG_BITS(reg_val, EFUSE_OP_DONE_POS, + EFUSE_OP_DONE_LEN)) { + succeed = true; + break; + } + } + if (!succeed) { + FXGMAC_PR("Fail to loading efuse, ctrl_1 0x%08x\n", reg_val); + } + return succeed; +} + +bool fxgmac_efuse_read_data(struct fxgmac_pdata *pdata, u32 offset, u32 *value) +{ + bool succeed = false; + unsigned int wait; + u32 reg_val = 0; + + if (value) { + *value = 0; + } + + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_ADDR_POS, + EFUSE_OP_ADDR_LEN, offset); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_START_POS, + EFUSE_OP_START_LEN, 1); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_MODE_POS, + EFUSE_OP_MODE_LEN, + EFUSE_OP_MODE_ROW_READ); + writereg(pdata->pAdapter, reg_val, pdata->base_mem + EFUSE_OP_CTRL_0); + wait = 1000; + while (wait--) { + usleep_range_ex(pdata->pAdapter, 20, 50); + reg_val = readreg(pdata->pAdapter, + pdata->base_mem + EFUSE_OP_CTRL_1); + if (FXGMAC_GET_REG_BITS(reg_val, EFUSE_OP_DONE_POS, + EFUSE_OP_DONE_LEN)) { + succeed = true; + break; + } + } + + if (succeed) { + if (value) { + *value = FXGMAC_GET_REG_BITS(reg_val, + EFUSE_OP_RD_DATA_POS, + EFUSE_OP_RD_DATA_LEN); + } + } else { + FXGMAC_PR("Fail to reading efuse Byte%d\n", offset); + } + + return succeed; +} + +bool fxgmac_efuse_write_oob(struct fxgmac_pdata *pdata) +{ + bool succeed = false; + unsigned int wait; + u32 reg_val, value; + + if (!fxgmac_efuse_read_data(pdata, EFUSE_OOB_ADDR, ®_val)) { + return succeed; + } + + if (FXGMAC_GET_REG_BITS(reg_val, EFUSE_OOB_POS, EFUSE_OOB_LEN)) { + FXGMAC_PR("OOB Ctrl bit already exists"); + return true; + } + + value = 0; + value = FXGMAC_SET_REG_BITS(value, EFUSE_OOB_POS, EFUSE_OOB_LEN, 1); + + reg_val = 0; + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_ADDR_POS, + EFUSE_OP_ADDR_LEN, EFUSE_OOB_ADDR); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_WR_DATA_POS, + EFUSE_OP_WR_DATA_LEN, value & 0xFF); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_START_POS, + EFUSE_OP_START_LEN, 1); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_MODE_POS, + EFUSE_OP_MODE_LEN, + EFUSE_OP_MODE_ROW_WRITE); + writereg(pdata->pAdapter, reg_val, pdata->base_mem + EFUSE_OP_CTRL_0); + + wait = 1000; + while (wait--) { + usleep_range_ex(pdata->pAdapter, 20, 50); + reg_val = readreg(pdata->pAdapter, + pdata->base_mem + EFUSE_OP_CTRL_1); + if (FXGMAC_GET_REG_BITS(reg_val, EFUSE_OP_DONE_POS, + EFUSE_OP_DONE_LEN)) { + succeed = true; + break; + } + } + + if (!succeed) { + FXGMAC_PR("Fail to writing efuse Byte OOB"); + } + + return succeed; +} + +bool fxgmac_efuse_write_led(struct fxgmac_pdata *pdata, u32 value) +{ + bool succeed = false; + unsigned int wait; + u32 reg_val; + + if (!fxgmac_efuse_read_data(pdata, EFUSE_LED_ADDR, ®_val)) { + return succeed; + } + + if (reg_val == value) { + FXGMAC_PR("Led Ctrl option already exists"); + return true; + } + + reg_val = 0; + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_ADDR_POS, + EFUSE_OP_ADDR_LEN, EFUSE_LED_ADDR); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_WR_DATA_POS, + EFUSE_OP_WR_DATA_LEN, value & 0xFF); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_START_POS, + EFUSE_OP_START_LEN, 1); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_MODE_POS, + EFUSE_OP_MODE_LEN, + EFUSE_OP_MODE_ROW_WRITE); + writereg(pdata->pAdapter, reg_val, pdata->base_mem + EFUSE_OP_CTRL_0); + + wait = 1000; + while (wait--) { + usleep_range_ex(pdata->pAdapter, 20, 50); + reg_val = readreg(pdata->pAdapter, + pdata->base_mem + EFUSE_OP_CTRL_1); + if (FXGMAC_GET_REG_BITS(reg_val, EFUSE_OP_DONE_POS, + EFUSE_OP_DONE_LEN)) { + succeed = true; + break; + } + } + + if (!succeed) { + FXGMAC_PR("Fail to writing efuse Byte LED"); + } + + return succeed; +} + +bool fxgmac_efuse_write_data(struct fxgmac_pdata *pdata, u32 offset, u32 value) +{ + bool succeed = false; + unsigned int wait; + u32 reg_val; + + if (!fxgmac_efuse_read_data(pdata, offset, ®_val)) { + return succeed; + } + + if (reg_val == value) { + FXGMAC_PR("offset 0x%x already exists", offset); + return true; + } + + reg_val = 0; + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_ADDR_POS, + EFUSE_OP_ADDR_LEN, offset & 0xFF); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_WR_DATA_POS, + EFUSE_OP_WR_DATA_LEN, value & 0xFF); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_START_POS, + EFUSE_OP_START_LEN, 1); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_MODE_POS, + EFUSE_OP_MODE_LEN, + EFUSE_OP_MODE_ROW_WRITE); + writereg(pdata->pAdapter, reg_val, pdata->base_mem + EFUSE_OP_CTRL_0); + + wait = 1000; + while (wait--) { + usleep_range_ex(pdata->pAdapter, 20, 50); + reg_val = readreg(pdata->pAdapter, + pdata->base_mem + EFUSE_OP_CTRL_1); + if (FXGMAC_GET_REG_BITS(reg_val, EFUSE_OP_DONE_POS, + EFUSE_OP_DONE_LEN)) { + succeed = true; + break; + } + } + + if (!succeed) { + FXGMAC_PR("Fail to writing efuse 0x%x Byte LED", offset); + } + + return succeed; +} + +static void fxgmac_read_led_efuse_config(struct fxgmac_pdata *pdata, + struct led_setting *pfirst, + struct led_setting *psecond) +{ + u32 val_high = 0, val_low = 0; + + /* read first area */ + fxgmac_efuse_read_data(pdata, EFUSE_FISRT_UPDATE_ADDR, &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 1), &val_low); + pfirst->disable_led_setting[4] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 2), &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 3), &val_low); + pfirst->disable_led_setting[3] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 4), &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 5), &val_low); + pfirst->disable_led_setting[2] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 6), &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 7), &val_low); + pfirst->disable_led_setting[1] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 8), &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 9), &val_low); + pfirst->disable_led_setting[0] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 10), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 11), &val_low); + pfirst->s5_led_setting[4] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 12), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 13), &val_low); + pfirst->s5_led_setting[3] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 14), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 15), &val_low); + pfirst->s5_led_setting[2] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 16), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 17), &val_low); + pfirst->s5_led_setting[1] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 18), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 19), &val_low); + pfirst->s5_led_setting[0] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 20), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 21), &val_low); + pfirst->s3_led_setting[4] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 22), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 23), &val_low); + pfirst->s3_led_setting[3] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 24), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 25), &val_low); + pfirst->s3_led_setting[2] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 26), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 27), &val_low); + pfirst->s3_led_setting[1] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 28), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 29), &val_low); + pfirst->s3_led_setting[0] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 30), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 31), &val_low); + pfirst->s0_led_setting[4] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 32), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 33), &val_low); + pfirst->s0_led_setting[3] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 34), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 35), &val_low); + pfirst->s0_led_setting[2] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 36), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 37), &val_low); + pfirst->s0_led_setting[1] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 38), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 39), &val_low); + pfirst->s0_led_setting[0] = ((val_high << 8) + val_low); + + /* read second area */ + fxgmac_efuse_read_data(pdata, EFUSE_SECOND_UPDATE_ADDR, &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 1), &val_low); + psecond->disable_led_setting[4] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 2), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 3), &val_low); + psecond->disable_led_setting[3] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 4), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 5), &val_low); + psecond->disable_led_setting[2] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 6), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 7), &val_low); + psecond->disable_led_setting[1] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 8), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 9), &val_low); + psecond->disable_led_setting[0] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 10), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 11), + &val_low); + psecond->s5_led_setting[4] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 12), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 13), + &val_low); + psecond->s5_led_setting[3] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 14), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 15), + &val_low); + psecond->s5_led_setting[2] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 16), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 17), + &val_low); + psecond->s5_led_setting[1] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 18), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 19), + &val_low); + psecond->s5_led_setting[0] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 20), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 21), + &val_low); + psecond->s3_led_setting[4] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 22), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 23), + &val_low); + psecond->s3_led_setting[3] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 24), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 25), + &val_low); + psecond->s3_led_setting[2] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 26), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 27), + &val_low); + psecond->s3_led_setting[1] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 28), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 29), + &val_low); + psecond->s3_led_setting[0] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 30), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 31), + &val_low); + psecond->s0_led_setting[4] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 32), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 33), + &val_low); + psecond->s0_led_setting[3] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 34), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 35), + &val_low); + psecond->s0_led_setting[2] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 36), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 37), + &val_low); + psecond->s0_led_setting[1] = ((val_high << 8) + val_low); + + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 38), + &val_high); + fxgmac_efuse_read_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 39), + &val_low); + psecond->s0_led_setting[0] = ((val_high << 8) + val_low); +} + +bool fxgmac_write_led_setting_to_efuse(struct fxgmac_pdata *pdata) +{ + struct led_setting led_config_first; + struct led_setting led_config_second; + bool bfirstflag = false, bsecondflag = false; + bool bsucceed = false; + + fxgmac_read_led_efuse_config(pdata, &led_config_first, + &led_config_second); + + if (0x00 == led_config_first.s0_led_setting[0] && + 0x00 == led_config_first.s0_led_setting[1] && + 0x00 == led_config_first.s0_led_setting[2] && + 0x00 == led_config_first.s0_led_setting[3] && + 0x00 == led_config_first.s0_led_setting[4] && + 0x00 == led_config_first.s3_led_setting[0] && + 0x00 == led_config_first.s3_led_setting[1] && + 0x00 == led_config_first.s3_led_setting[2] && + 0x00 == led_config_first.s3_led_setting[3] && + 0x00 == led_config_first.s3_led_setting[4] && + 0x00 == led_config_first.s5_led_setting[0] && + 0x00 == led_config_first.s5_led_setting[1] && + 0x00 == led_config_first.s5_led_setting[2] && + 0x00 == led_config_first.s5_led_setting[3] && + 0x00 == led_config_first.s5_led_setting[4] && + 0x00 == led_config_first.disable_led_setting[0] && + 0x00 == led_config_first.disable_led_setting[1] && + 0x00 == led_config_first.disable_led_setting[2] && + 0x00 == led_config_first.disable_led_setting[3] && + 0x00 == led_config_first.disable_led_setting[4]) { + bfirstflag = true; + } + + if (0x00 == led_config_second.s0_led_setting[0] && + 0x00 == led_config_second.s0_led_setting[1] && + 0x00 == led_config_second.s0_led_setting[2] && + 0x00 == led_config_second.s0_led_setting[3] && + 0x00 == led_config_second.s0_led_setting[4] && + 0x00 == led_config_second.s3_led_setting[0] && + 0x00 == led_config_second.s3_led_setting[1] && + 0x00 == led_config_second.s3_led_setting[2] && + 0x00 == led_config_second.s3_led_setting[3] && + 0x00 == led_config_second.s3_led_setting[4] && + 0x00 == led_config_second.s5_led_setting[0] && + 0x00 == led_config_second.s5_led_setting[1] && + 0x00 == led_config_second.s5_led_setting[2] && + 0x00 == led_config_second.s5_led_setting[3] && + 0x00 == led_config_second.s5_led_setting[4] && + 0x00 == led_config_second.disable_led_setting[0] && + 0x00 == led_config_second.disable_led_setting[1] && + 0x00 == led_config_second.disable_led_setting[2] && + 0x00 == led_config_second.disable_led_setting[3] && + 0x00 == led_config_second.disable_led_setting[4]) { + bsecondflag = true; + } + + if (bfirstflag && bsecondflag) { + /* update first area */ + fxgmac_efuse_write_data( + pdata, EFUSE_FISRT_UPDATE_ADDR, + (pdata->ledconfig.disable_led_setting[4] >> 8) & 0xFF); + fxgmac_efuse_write_data( + pdata, (EFUSE_FISRT_UPDATE_ADDR - 1), + pdata->ledconfig.disable_led_setting[4]); + fxgmac_efuse_write_data( + pdata, (EFUSE_FISRT_UPDATE_ADDR - 2), + (pdata->ledconfig.disable_led_setting[3] >> 8) & 0xFF); + fxgmac_efuse_write_data( + pdata, (EFUSE_FISRT_UPDATE_ADDR - 3), + pdata->ledconfig.disable_led_setting[3]); + fxgmac_efuse_write_data( + pdata, (EFUSE_FISRT_UPDATE_ADDR - 4), + (pdata->ledconfig.disable_led_setting[2] >> 8) & 0xFF); + fxgmac_efuse_write_data( + pdata, (EFUSE_FISRT_UPDATE_ADDR - 5), + pdata->ledconfig.disable_led_setting[2]); + fxgmac_efuse_write_data( + pdata, (EFUSE_FISRT_UPDATE_ADDR - 6), + (pdata->ledconfig.disable_led_setting[1] >> 8) & 0xFF); + fxgmac_efuse_write_data( + pdata, (EFUSE_FISRT_UPDATE_ADDR - 7), + pdata->ledconfig.disable_led_setting[1]); + fxgmac_efuse_write_data( + pdata, (EFUSE_FISRT_UPDATE_ADDR - 8), + (pdata->ledconfig.disable_led_setting[0] >> 8) & 0xFF); + fxgmac_efuse_write_data( + pdata, (EFUSE_FISRT_UPDATE_ADDR - 9), + pdata->ledconfig.disable_led_setting[0]); + + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 10), + (pdata->ledconfig.s5_led_setting[4] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 11), + pdata->ledconfig.s5_led_setting[4]); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 12), + (pdata->ledconfig.s5_led_setting[3] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 13), + pdata->ledconfig.s5_led_setting[3]); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 14), + (pdata->ledconfig.s5_led_setting[2] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 15), + pdata->ledconfig.s5_led_setting[2]); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 16), + (pdata->ledconfig.s5_led_setting[1] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 17), + pdata->ledconfig.s5_led_setting[1]); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 18), + (pdata->ledconfig.s5_led_setting[0] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 19), + pdata->ledconfig.s5_led_setting[0]); + + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 20), + (pdata->ledconfig.s3_led_setting[4] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 21), + pdata->ledconfig.s3_led_setting[4]); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 22), + (pdata->ledconfig.s3_led_setting[3] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 23), + pdata->ledconfig.s3_led_setting[3]); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 24), + (pdata->ledconfig.s3_led_setting[2] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 25), + pdata->ledconfig.s3_led_setting[2]); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 26), + (pdata->ledconfig.s3_led_setting[1] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 27), + pdata->ledconfig.s3_led_setting[1]); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 28), + (pdata->ledconfig.s3_led_setting[0] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 29), + pdata->ledconfig.s3_led_setting[0]); + + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 30), + (pdata->ledconfig.s0_led_setting[4] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 31), + pdata->ledconfig.s0_led_setting[4]); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 32), + (pdata->ledconfig.s0_led_setting[3] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 33), + pdata->ledconfig.s0_led_setting[3]); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 34), + (pdata->ledconfig.s0_led_setting[2] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 35), + pdata->ledconfig.s0_led_setting[2]); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 36), + (pdata->ledconfig.s0_led_setting[1] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 37), + pdata->ledconfig.s0_led_setting[1]); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 38), + (pdata->ledconfig.s0_led_setting[0] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_FISRT_UPDATE_ADDR - 39), + pdata->ledconfig.s0_led_setting[0]); + + bsucceed = true; + } else if (!bfirstflag && bsecondflag) { + /* update second area */ + fxgmac_efuse_write_data( + pdata, EFUSE_SECOND_UPDATE_ADDR, + (pdata->ledconfig.disable_led_setting[4] >> 8) & 0xFF); + fxgmac_efuse_write_data( + pdata, (EFUSE_SECOND_UPDATE_ADDR - 1), + pdata->ledconfig.disable_led_setting[4]); + fxgmac_efuse_write_data( + pdata, (EFUSE_SECOND_UPDATE_ADDR - 2), + (pdata->ledconfig.disable_led_setting[3] >> 8) & 0xFF); + fxgmac_efuse_write_data( + pdata, (EFUSE_SECOND_UPDATE_ADDR - 3), + pdata->ledconfig.disable_led_setting[3]); + fxgmac_efuse_write_data( + pdata, (EFUSE_SECOND_UPDATE_ADDR - 4), + (pdata->ledconfig.disable_led_setting[2] >> 8) & 0xFF); + fxgmac_efuse_write_data( + pdata, (EFUSE_SECOND_UPDATE_ADDR - 5), + pdata->ledconfig.disable_led_setting[2]); + fxgmac_efuse_write_data( + pdata, (EFUSE_SECOND_UPDATE_ADDR - 6), + (pdata->ledconfig.disable_led_setting[1] >> 8) & 0xFF); + fxgmac_efuse_write_data( + pdata, (EFUSE_SECOND_UPDATE_ADDR - 7), + pdata->ledconfig.disable_led_setting[1]); + fxgmac_efuse_write_data( + pdata, (EFUSE_SECOND_UPDATE_ADDR - 8), + (pdata->ledconfig.disable_led_setting[0] >> 8) & 0xFF); + fxgmac_efuse_write_data( + pdata, (EFUSE_SECOND_UPDATE_ADDR - 9), + pdata->ledconfig.disable_led_setting[0]); + + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 10), + (pdata->ledconfig.s5_led_setting[4] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 11), + pdata->ledconfig.s5_led_setting[4]); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 12), + (pdata->ledconfig.s5_led_setting[3] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 13), + pdata->ledconfig.s5_led_setting[3]); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 14), + (pdata->ledconfig.s5_led_setting[2] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 15), + pdata->ledconfig.s5_led_setting[2]); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 16), + (pdata->ledconfig.s5_led_setting[1] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 17), + pdata->ledconfig.s5_led_setting[1]); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 18), + (pdata->ledconfig.s5_led_setting[0] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 19), + pdata->ledconfig.s5_led_setting[0]); + + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 20), + (pdata->ledconfig.s3_led_setting[4] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 21), + pdata->ledconfig.s3_led_setting[4]); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 22), + (pdata->ledconfig.s3_led_setting[3] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 23), + pdata->ledconfig.s3_led_setting[3]); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 24), + (pdata->ledconfig.s3_led_setting[2] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 25), + pdata->ledconfig.s3_led_setting[2]); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 26), + (pdata->ledconfig.s3_led_setting[1] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 27), + pdata->ledconfig.s3_led_setting[1]); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 28), + (pdata->ledconfig.s3_led_setting[0] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 29), + pdata->ledconfig.s3_led_setting[0]); + + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 30), + (pdata->ledconfig.s0_led_setting[4] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 31), + pdata->ledconfig.s0_led_setting[4]); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 32), + (pdata->ledconfig.s0_led_setting[3] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 33), + pdata->ledconfig.s0_led_setting[3]); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 34), + (pdata->ledconfig.s0_led_setting[2] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 35), + pdata->ledconfig.s0_led_setting[2]); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 36), + (pdata->ledconfig.s0_led_setting[1] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 37), + pdata->ledconfig.s0_led_setting[1]); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 38), + (pdata->ledconfig.s0_led_setting[0] >> + 8) & 0xFF); + fxgmac_efuse_write_data(pdata, (EFUSE_SECOND_UPDATE_ADDR - 39), + pdata->ledconfig.s0_led_setting[0]); + + bsucceed = true; + } + + return bsucceed; +} + +bool fxgmac_read_led_setting_from_efuse(struct fxgmac_pdata *pdata) +{ + struct led_setting led_config_first; + struct led_setting led_config_second; + bool bfirstflag = false, bsecondflag = false; + bool bsucceed = false; + + fxgmac_read_led_efuse_config(pdata, &led_config_first, + &led_config_second); + + if (0x00 == led_config_first.s0_led_setting[0] && + 0x00 == led_config_first.s0_led_setting[1] && + 0x00 == led_config_first.s0_led_setting[2] && + 0x00 == led_config_first.s0_led_setting[3] && + 0x00 == led_config_first.s0_led_setting[4] && + 0x00 == led_config_first.s3_led_setting[0] && + 0x00 == led_config_first.s3_led_setting[1] && + 0x00 == led_config_first.s3_led_setting[2] && + 0x00 == led_config_first.s3_led_setting[3] && + 0x00 == led_config_first.s3_led_setting[4] && + 0x00 == led_config_first.s5_led_setting[0] && + 0x00 == led_config_first.s5_led_setting[1] && + 0x00 == led_config_first.s5_led_setting[2] && + 0x00 == led_config_first.s5_led_setting[3] && + 0x00 == led_config_first.s5_led_setting[4] && + 0x00 == led_config_first.disable_led_setting[0] && + 0x00 == led_config_first.disable_led_setting[1] && + 0x00 == led_config_first.disable_led_setting[2] && + 0x00 == led_config_first.disable_led_setting[3] && + 0x00 == led_config_first.disable_led_setting[4]) { + bfirstflag = true; + } + + if (0x00 == led_config_second.s0_led_setting[0] && + 0x00 == led_config_second.s0_led_setting[1] && + 0x00 == led_config_second.s0_led_setting[2] && + 0x00 == led_config_second.s0_led_setting[3] && + 0x00 == led_config_second.s0_led_setting[4] && + 0x00 == led_config_second.s3_led_setting[0] && + 0x00 == led_config_second.s3_led_setting[1] && + 0x00 == led_config_second.s3_led_setting[2] && + 0x00 == led_config_second.s3_led_setting[3] && + 0x00 == led_config_second.s3_led_setting[4] && + 0x00 == led_config_second.s5_led_setting[0] && + 0x00 == led_config_second.s5_led_setting[1] && + 0x00 == led_config_second.s5_led_setting[2] && + 0x00 == led_config_second.s5_led_setting[3] && + 0x00 == led_config_second.s5_led_setting[4] && + 0x00 == led_config_second.disable_led_setting[0] && + 0x00 == led_config_second.disable_led_setting[1] && + 0x00 == led_config_second.disable_led_setting[2] && + 0x00 == led_config_second.disable_led_setting[3] && + 0x00 == led_config_second.disable_led_setting[4]) { + bsecondflag = true; + } + + if (!bfirstflag && bsecondflag) { + /* read first area */ + memcpy(&pdata->led, &led_config_first, + sizeof(struct led_setting)); + bsucceed = true; + } else if (!bfirstflag && !bsecondflag) { + /* read second area */ + memcpy(&pdata->led, &led_config_second, + sizeof(struct led_setting)); + bsucceed = true; + } + + return bsucceed; +} \ No newline at end of file diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-efuse.h b/drivers/net/ethernet/motorcomm/yt6801/fuxi-efuse.h new file mode 100644 index 000000000000..fa0446958719 --- /dev/null +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-efuse.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Motorcomm Corporation. */ + +#ifndef __FUXI_EFUSE_H__ +#define __FUXI_EFUSE_H__ + + +bool fxgmac_read_patch_from_efuse(struct fxgmac_pdata *pdata, u32 offset, u32 *value); /* read patch per register offset. */ +bool fxgmac_read_patch_from_efuse_per_index(struct fxgmac_pdata *pdata, u8 index, u32 *offset, u32 *value); /* read patch per 0-based index. */ +bool fxgmac_write_patch_to_efuse(struct fxgmac_pdata *pdata, u32 offset, u32 value); +bool fxgmac_write_patch_to_efuse_per_index(struct fxgmac_pdata *pdata, u8 index, u32 offset, u32 value); +bool fxgmac_read_mac_subsys_from_efuse(struct fxgmac_pdata *pdata, u8 *mac_addr, u32 *subsys, u32 *revid); +bool fxgmac_write_mac_subsys_to_efuse(struct fxgmac_pdata *pdata, u8 *mac_addr, u32 *subsys, u32 *revid); +bool fxgmac_write_mac_addr_to_efuse(struct fxgmac_pdata *pdata, u8 *mac_addr); +bool fxgmac_read_subsys_from_efuse(struct fxgmac_pdata *pdata, u32 *subsys, u32 *revid); +bool fxgmac_write_subsys_to_efuse(struct fxgmac_pdata *pdata, u32 *subsys, u32 *revid); +bool fxgmac_efuse_load(struct fxgmac_pdata *pdata); +bool fxgmac_efuse_read_data(struct fxgmac_pdata *pdata, u32 offset, u32 *value); +bool fxgmac_efuse_write_data(struct fxgmac_pdata *pdata, u32 offset, u32 value); +bool fxgmac_efuse_write_oob(struct fxgmac_pdata *pdata); +bool fxgmac_efuse_write_led(struct fxgmac_pdata *pdata, u32 value); +bool fxgmac_read_led_setting_from_efuse(struct fxgmac_pdata *pdata); +bool fxgmac_write_led_setting_to_efuse(struct fxgmac_pdata *pdata); + +#endif /* __FUXI_EFUSE_H__ */ \ No newline at end of file diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-common.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-common.c new file mode 100644 index 000000000000..63cbf948cbfa --- /dev/null +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-common.c @@ -0,0 +1,939 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Motorcomm Corporation. */ + +#include +#include + +#include "fuxi-os.h" +#include "fuxi-gmac.h" +#include "fuxi-gmac-reg.h" + +MODULE_LICENSE("Dual BSD/GPL"); + +static int debug = 16; +module_param(debug, int, 0644); +MODULE_PARM_DESC(debug, "FUXI ethernet debug level (0=none,...,16=all)"); + +static unsigned char dev_addr[6] = { 0, 0x55, 0x7b, 0xb5, 0x7d, 0xf7 }; + +static void fxgmac_read_mac_addr(struct fxgmac_pdata *pdata) +{ + struct net_device *netdev = pdata->netdev; + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + DPRINTK("read mac from eFuse\n"); + + /* if efuse have mac addr, use it.if not, use static mac address. */ + hw_ops->read_mac_subsys_from_efuse(pdata, pdata->mac_addr, NULL, NULL); + if (ETH_IS_ZEROADDRESS(pdata->mac_addr)) { + /* Currently it uses a static mac address for test */ + memcpy(pdata->mac_addr, dev_addr, netdev->addr_len); + } +} + +static void fxgmac_default_config(struct fxgmac_pdata *pdata) +{ + pdata->tx_osp_mode = DMA_OSP_ENABLE; + pdata->tx_sf_mode = MTL_TSF_ENABLE; + pdata->rx_sf_mode = MTL_RSF_DISABLE; /* MTL_RSF_DISABLE 20210514 */ + pdata->pblx8 = DMA_PBL_X8_ENABLE; /* DMA_PBL_X8_ENABLE 20210514 */ + pdata->tx_pbl = DMA_PBL_32; + pdata->rx_pbl = DMA_PBL_32; /* DMA_PBL_32 20210514 */ + pdata->tx_threshold = MTL_TX_THRESHOLD_128; + pdata->rx_threshold = MTL_RX_THRESHOLD_128; + pdata->tx_pause = 1; + pdata->rx_pause = 1; + +#if FXGMAC_RSS_FEATURE_ENABLED + pdata->rss = 1; +#else + pdata->rss = 0; +#endif + /* open interrupt moderation default */ + pdata->intr_mod = 1; + pdata->crc_check = 1; + + /* set based on phy status. pdata->phy_speed = SPEED_1000; */ + pdata->sysclk_rate = FXGMAC_SYSCLOCK; + pdata->phy_autoeng = AUTONEG_ENABLE; /* default to autoneg */ + pdata->phy_duplex = DUPLEX_FULL; + pdata->expansion.phy_link = false; + pdata->phy_speed = SPEED_1000; + + /* default to magic */ + pdata->expansion.wol = WAKE_MAGIC; + + strscpy(pdata->drv_name, FXGMAC_DRV_NAME, sizeof(pdata->drv_name)); + strscpy(pdata->drv_ver, FXGMAC_DRV_VERSION, sizeof(pdata->drv_ver)); + + printk("FXGMAC_DRV_NAME:%s, FXGMAC_DRV_VERSION:%s\n", FXGMAC_DRV_NAME, + FXGMAC_DRV_VERSION); +} + +static void fxgmac_init_all_ops(struct fxgmac_pdata *pdata) +{ + fxgmac_init_desc_ops(&pdata->desc_ops); + fxgmac_init_hw_ops(&pdata->hw_ops); + + DPRINTK("register desc_ops and hw ops\n"); +} + +int fxgmac_init(struct fxgmac_pdata *pdata, bool save_private_reg) +{ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + struct net_device *netdev = pdata->netdev; + unsigned int i, dma_width; + int ret; + + /* Set all the function pointers */ + fxgmac_init_all_ops(pdata); + + /* Set default configuration data */ + fxgmac_default_config(pdata); + + /* Set irq, base_addr, MAC address, */ + netdev->irq = pdata->dev_irq; + netdev->base_addr = (unsigned long)pdata->base_mem; + fxgmac_read_mac_addr(pdata); + eth_hw_addr_set(netdev, pdata->mac_addr); + + if (save_private_reg) { + hw_ops->save_nonstick_reg(pdata); + } + + /* reset here to get hw features correctly */ + hw_ops->exit(pdata); + + /* Populate the hardware features */ + fxgmac_get_all_hw_features(pdata); + fxgmac_print_all_hw_features(pdata); + + /* TODO: Set the PHY mode to XLGMII */ + + /* Set the DMA mask */ +#ifdef CONFIG_ARM64 + dma_width = FUXI_DMA_BIT_MASK; +#else + dma_width = pdata->hw_feat.dma_width; +#endif + ret = dma_set_mask_and_coherent(pdata->dev, DMA_BIT_MASK(dma_width)); + if (ret) { + dev_err(pdata->dev, "dma_set_mask_and_coherent failed\n"); + return ret; + } + + /* Channel and ring params initializtion + * pdata->channel_count; + * pdata->tx_ring_count; + * pdata->rx_ring_count; + * pdata->tx_desc_count; + * pdata->rx_desc_count; + */ + BUILD_BUG_ON_NOT_POWER_OF_2(FXGMAC_TX_DESC_CNT); + pdata->tx_desc_count = FXGMAC_TX_DESC_CNT; + if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) { + dev_err(pdata->dev, "tx descriptor count (%d) is not valid\n", + pdata->tx_desc_count); + ret = -EINVAL; + return ret; + } + BUILD_BUG_ON_NOT_POWER_OF_2(FXGMAC_RX_DESC_CNT); + pdata->rx_desc_count = FXGMAC_RX_DESC_CNT; + if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) { + dev_err(pdata->dev, "rx descriptor count (%d) is not valid\n", + pdata->rx_desc_count); + ret = -EINVAL; + return ret; + } + + pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(), + pdata->hw_feat.tx_ch_cnt); + pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count, + pdata->hw_feat.tx_q_cnt); + pdata->tx_q_count = pdata->tx_ring_count; + +#if !(FXGMAC_NUM_OF_TX_Q_USED) + ret = netif_set_real_num_tx_queues(netdev, pdata->tx_q_count); +#else + ret = netif_set_real_num_tx_queues( + netdev, FXGMAC_NUM_OF_TX_Q_USED /*pdata->tx_q_count*/); +#endif + + DPRINTK("num_online_cpus:%u, tx_ch_cnt:%u, tx_q_cnt:%u, tx_ring_count:%u\n", + num_online_cpus(), pdata->hw_feat.tx_ch_cnt, + pdata->hw_feat.tx_q_cnt, pdata->tx_ring_count); + + if (ret) { + dev_err(pdata->dev, "error setting real tx queue count\n"); + return ret; + } + + pdata->rx_ring_count = min_t(unsigned int, + netif_get_num_default_rss_queues(), + pdata->hw_feat.rx_ch_cnt); +#ifdef FXGMAC_ONE_CHANNEL + pdata->rx_ring_count = 1; + pdata->hw_feat.rx_q_cnt = pdata->rx_ring_count; +#else + pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count, + pdata->hw_feat.rx_q_cnt); +#endif + pdata->rx_q_count = pdata->rx_ring_count; + ret = netif_set_real_num_rx_queues(netdev, pdata->rx_q_count); + if (ret) { + dev_err(pdata->dev, "error setting real rx queue count\n"); + return ret; + } + + pdata->channel_count = + max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count); + + DPRINTK("default rss queues:%u, rx_ch_cnt:%u, rx_q_cnt:%u, rx_ring_count:%u\n", + netif_get_num_default_rss_queues(), pdata->hw_feat.rx_ch_cnt, + pdata->hw_feat.rx_q_cnt, pdata->rx_ring_count); + DPRINTK("channel_count:%u, netdev tx channel_num=%u\n", + pdata->channel_count, netdev->num_tx_queues); + + /* Initialize RSS hash key and lookup table */ +#if FXGMAC_RSS_HASH_KEY_LINUX + netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key)); +#else + /* this is for test only. HW does not want to change Hash key */ + hw_ops->get_rss_hash_key(pdata, (u8 *)pdata->rss_key); +#endif + +#if FXGMAC_MSIX_CH0RXDIS_EN + for (i = 0; i < FXGMAC_RSS_MAX_TABLE_SIZE; i++) { + pdata->rss_table[i] = FXGMAC_SET_REG_BITS( + pdata->rss_table[i], MAC_RSSDR_DMCH_POS, + MAC_RSSDR_DMCH_LEN, (i % 3) + 1); /* eliminate ch0 */ + } +#else + for (i = 0; i < FXGMAC_RSS_MAX_TABLE_SIZE; i++) { + pdata->rss_table[i] = FXGMAC_SET_REG_BITS( + pdata->rss_table[i], MAC_RSSDR_DMCH_POS, + MAC_RSSDR_DMCH_LEN, + /* note, rx_ring_count should be equal to IRQ requsted + * for MSIx, 4 + */ + i % pdata->rx_ring_count); + } +#endif + + pdata->rss_options = FXGMAC_SET_REG_BITS(pdata->rss_options, + MAC_RSSCR_IP4TE_POS, + MAC_RSSCR_IP4TE_LEN, 1); + pdata->rss_options = FXGMAC_SET_REG_BITS(pdata->rss_options, + MAC_RSSCR_TCP4TE_POS, + MAC_RSSCR_TCP4TE_LEN, 1); + pdata->rss_options = FXGMAC_SET_REG_BITS(pdata->rss_options, + MAC_RSSCR_UDP4TE_POS, + MAC_RSSCR_UDP4TE_LEN, 1); + + /* config MTU supported, 20210726 */ + netdev->min_mtu = ETH_MIN_MTU; + netdev->max_mtu = + FXGMAC_JUMBO_PACKET_MTU + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); + /* + * netdev->extended->min_mtu = netdev->min_mtu; + * netdev->extended->max_mtu = netdev->max_mtu; + */ + + DPRINTK("rss_options:0x%x\n", pdata->rss_options); + + /* Set device operations */ + netdev->netdev_ops = fxgmac_get_netdev_ops(); + netdev->ethtool_ops = fxgmac_get_ethtool_ops(); + + /* Set device features */ + if (pdata->hw_feat.tso) { + netdev->hw_features = NETIF_F_TSO; + netdev->hw_features |= NETIF_F_TSO6; + netdev->hw_features |= NETIF_F_SG; + netdev->hw_features |= NETIF_F_IP_CSUM; + netdev->hw_features |= NETIF_F_IPV6_CSUM; + } else if (pdata->hw_feat.tx_coe) { + netdev->hw_features = NETIF_F_IP_CSUM; + netdev->hw_features |= NETIF_F_IPV6_CSUM; + } + + if (pdata->hw_feat.rx_coe) { + netdev->hw_features |= NETIF_F_RXCSUM; + netdev->hw_features |= NETIF_F_GRO; + } + + if (pdata->hw_feat.rss) { + netdev->hw_features |= + NETIF_F_RXHASH; /* it is NETIF_F_RXHASH_BIT finally */ + } + + netdev->vlan_features |= netdev->hw_features; + + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + pdata->vlan_strip = 1; + if (pdata->hw_feat.sa_vlan_ins) { + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; + } +#if FXGMAC_FILTER_SINGLE_VLAN_ENABLED + /* only can filter one vlan id */ + pdata->hw_feat.vlhash = 1; +#else + pdata->hw_feat.vlhash = 0; +#endif + + if (pdata->hw_feat.vlhash) { + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + pdata->vlan_filter = 1; + } + + netdev->features |= netdev->hw_features; + pdata->expansion.netdev_features = netdev->features; + + netdev->priv_flags |= IFF_UNICAST_FLT; + + /* Use default watchdog timeout */ + netdev->watchdog_timeo = + msecs_to_jiffies(5000); /* refer to sunxi-gmac, 5s */ + netdev->gso_max_size = NIC_MAX_TCP_OFFLOAD_SIZE; + + /* Tx coalesce parameters initialization */ + pdata->tx_usecs = FXGMAC_INIT_DMA_TX_USECS; + pdata->tx_frames = FXGMAC_INIT_DMA_TX_FRAMES; + + /* Rx coalesce parameters initialization */ + pdata->rx_riwt = hw_ops->usec_to_riwt(pdata, FXGMAC_INIT_DMA_RX_USECS); + + pdata->rx_usecs = FXGMAC_INIT_DMA_RX_USECS; + pdata->rx_frames = FXGMAC_INIT_DMA_RX_FRAMES; + + DPRINTK("fxgmac_init callout, ok.\n"); + + return 0; +} + +static void fxgmac_init_interrupt_scheme(struct fxgmac_pdata *pdata) +{ +#ifdef CONFIG_PCI_MSI + int vectors, rc, i, req_vectors; + /* check cpu core number. + * since we have 4 channels, we must ensure the number of cpu core > 4 + * otherwise, just roll back to legacy + */ + vectors = num_online_cpus(); + DPRINTK("num of cpu=%d\n", vectors); + if (vectors >= FXGMAC_MAX_DMA_CHANNELS) { + /* 0-3 for rx, 4 for tx, 5 for phy */ + req_vectors = FXGMAC_MSIX_INT_NUMS; + pdata->expansion.msix_entries = kcalloc( + req_vectors, sizeof(struct msix_entry), GFP_KERNEL); + if (!pdata->expansion.msix_entries) { + DPRINTK("MSIx, kcalloc err for msix entries, rollback to MSI..\n"); + goto enable_msi_interrupt; + } else { + for (i = 0; i < req_vectors; i++) + pdata->expansion.msix_entries[i].entry = i; + + rc = pci_enable_msix_range( + pdata->pdev, pdata->expansion.msix_entries, + req_vectors, req_vectors); + if (rc < 0) { + DPRINTK("enable MSIx failed,%d.\n", rc); + req_vectors = 0; /* indicate failure */ + } else { + req_vectors = rc; + } + + if (req_vectors >= FXGMAC_MAX_DMA_CHANNELS_PLUS_1TX) { + DPRINTK("enable MSIx ok, cpu=%d, vectors=%d.\n", + vectors, req_vectors); + pdata->expansion.int_flags = + FXGMAC_SET_REG_BITS( + pdata->expansion.int_flags, + FXGMAC_FLAG_INTERRUPT_POS, + FXGMAC_FLAG_INTERRUPT_LEN, + FXGMAC_FLAG_MSIX_ENABLED); + pdata->per_channel_irq = 1; + pdata->expansion.phy_irq = + pdata->expansion + .msix_entries[MSI_ID_PHY_OTHER] + .vector; + return; + } else if (req_vectors) { + DPRINTK("enable MSIx with only %d vector, while we need %d, rollback to MSI.\n", + req_vectors, vectors); + /* roll back to msi */ + pci_disable_msix(pdata->pdev); + kfree(pdata->expansion.msix_entries); + pdata->expansion.msix_entries = NULL; + req_vectors = 0; + } else { + DPRINTK("enable MSIx failure and clear msix entries.\n"); + /* roll back to msi */ + kfree(pdata->expansion.msix_entries); + pdata->expansion.msix_entries = NULL; + req_vectors = 0; + } + } + } + +enable_msi_interrupt: + rc = pci_enable_msi(pdata->pdev); + if (rc < 0) { + pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( + pdata->expansion.int_flags, FXGMAC_FLAG_INTERRUPT_POS, + FXGMAC_FLAG_INTERRUPT_LEN, FXGMAC_FLAG_LEGACY_ENABLED); + DPRINTK("enable MSI failure, rollback to LEGACY.\n"); + } else { + pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( + pdata->expansion.int_flags, FXGMAC_FLAG_INTERRUPT_POS, + FXGMAC_FLAG_INTERRUPT_LEN, FXGMAC_FLAG_MSI_ENABLED); + pdata->dev_irq = pdata->pdev->irq; + DPRINTK("enable MSI ok, irq=%d.\n", pdata->pdev->irq); + } +#else + pdata = pdata; +#endif +} + +int fxgmac_drv_probe(struct device *dev, struct fxgmac_resources *res) +{ + struct fxgmac_pdata *pdata; + struct net_device *netdev; + int ret; + + netdev = alloc_etherdev_mq(sizeof(struct fxgmac_pdata), + FXGMAC_MAX_DMA_CHANNELS); + + if (!netdev) { + dev_err(dev, "alloc_etherdev failed\n"); + return -ENOMEM; + } + + SET_NETDEV_DEV(netdev, dev); + dev_set_drvdata(dev, netdev); + pdata = netdev_priv(netdev); + pdata->dev = dev; + pdata->pdev = to_pci_dev(dev); + pdata->netdev = netdev; + + pdata->dev_irq = res->irq; + + /* default to legacy interrupt */ + pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( + pdata->expansion.int_flags, FXGMAC_FLAG_INTERRUPT_POS, + FXGMAC_FLAG_INTERRUPT_LEN, FXGMAC_FLAG_LEGACY_ENABLED); + pdata->expansion.phy_irq = pdata->dev_irq; + + fxgmac_init_interrupt_scheme(pdata); + + pdata->expansion.current_state = CURRENT_STATE_INIT; + + pdata->msg_enable = NETIF_MSG_DRV; + DPRINTK("netif msg_enable init to %08x\n", pdata->msg_enable); + + pdata->mac_regs = res->addr; + pdata->base_mem = res->addr; + pdata->mac_regs = pdata->mac_regs + FUXI_MAC_REGS_OFFSET; + + ret = fxgmac_init(pdata, true); + if (ret) { + dev_err(dev, "fxgmac init failed\n"); + goto err_free_netdev; + } + + pdata->hw_ops.read_led_config(pdata); + + netif_carrier_off(netdev); + ret = register_netdev(netdev); + if (ret) { + dev_err(dev, "net device registration failed\n"); + goto err_free_netdev; + } + if (netif_msg_drv(pdata)) + DPRINTK("fxgamc_drv_prob callout, netdev num_tx_q=%u\n", + netdev->num_tx_queues); + +#ifdef HAVE_FXGMAC_DEBUG_FS + fxgmac_dbg_init(pdata); + fxgmac_dbg_adapter_init(pdata); +#endif /* HAVE_FXGMAC_DEBUG_FS */ + + return 0; + +err_free_netdev: + free_netdev(netdev); + DPRINTK("fxgamc_drv_prob callout with err \n"); + + return ret; +} + +int fxgmac_drv_remove(struct device *dev) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + +#ifdef HAVE_FXGMAC_DEBUG_FS + fxgmac_dbg_adapter_exit(pdata); +#endif /*HAVE_FXGMAC_DEBUG_FS */ + hw_ops->led_under_shutdown(pdata); + + unregister_netdev(netdev); + free_netdev(netdev); + + return 0; +} + +void fxgmac_dump_tx_desc(struct fxgmac_pdata *pdata, struct fxgmac_ring *ring, + unsigned int idx, unsigned int count, + unsigned int flag) +{ + struct fxgmac_desc_data *desc_data; + struct fxgmac_dma_desc *dma_desc; + + while (count--) { + desc_data = FXGMAC_GET_DESC_DATA(ring, idx); + dma_desc = desc_data->dma_desc; + + netdev_dbg(pdata->netdev, + "TX: dma_desc=%p, dma_desc_addr=%pad\n", + desc_data->dma_desc, &desc_data->dma_desc_addr); + netdev_dbg(pdata->netdev, + "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx, + (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE", + le32_to_cpu(dma_desc->desc0), + le32_to_cpu(dma_desc->desc1), + le32_to_cpu(dma_desc->desc2), + le32_to_cpu(dma_desc->desc3)); + + idx++; + } +} + +void fxgmac_dump_rx_desc(struct fxgmac_pdata *pdata, struct fxgmac_ring *ring, + unsigned int idx) +{ + struct fxgmac_desc_data *desc_data; + struct fxgmac_dma_desc *dma_desc; + + desc_data = FXGMAC_GET_DESC_DATA(ring, idx); + dma_desc = desc_data->dma_desc; + + netdev_dbg(pdata->netdev, "RX: dma_desc=%p, dma_desc_addr=%pad\n", + desc_data->dma_desc, &desc_data->dma_desc_addr); + netdev_dbg(pdata->netdev, + "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", + idx, le32_to_cpu(dma_desc->desc0), + le32_to_cpu(dma_desc->desc1), le32_to_cpu(dma_desc->desc2), + le32_to_cpu(dma_desc->desc3)); +} + +void fxgmac_dbg_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx) +{ + struct ethhdr *eth = (struct ethhdr *)skb->data; + unsigned char buffer[128]; + unsigned int i; + + netdev_dbg(netdev, "\n************** SKB dump ****************\n"); + + netdev_dbg(netdev, "%s packet of %d bytes\n", (tx_rx ? "TX" : "RX"), + skb->len); + + netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest); + netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source); + netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto)); + + for (i = 0; i < skb->len; i += 32) { + unsigned int len = min(skb->len - i, 32U); + + hex_dump_to_buffer(&skb->data[i], len, 32, 1, buffer, + sizeof(buffer), false); + netdev_dbg(netdev, " %#06x: %s\n", i, buffer); + } + + netdev_dbg(netdev, "\n************** SKB dump ****************\n"); +} + +void fxgmac_print_pkt(struct net_device *netdev, struct sk_buff *skb, + bool tx_rx) +{ + unsigned char buffer[128]; + unsigned int i; + + for (i = 0; i < skb->len; i += 32) { + unsigned int len = min(skb->len - i, 32U); + + hex_dump_to_buffer(&skb->data[i], len, 32, 1, buffer, + sizeof(buffer), false); + DPRINTK(" %#06x: %s\n", i, buffer); + } +} + +void fxgmac_get_all_hw_features(struct fxgmac_pdata *pdata) +{ + struct fxgmac_hw_features *hw_feat = &pdata->hw_feat; + unsigned int mac_hfr0, mac_hfr1, mac_hfr2, mac_hfr3; + + mac_hfr0 = readl(pdata->mac_regs + MAC_HWF0R); + mac_hfr1 = readl(pdata->mac_regs + MAC_HWF1R); + mac_hfr2 = readl(pdata->mac_regs + MAC_HWF2R); + mac_hfr3 = readl(pdata->mac_regs + MAC_HWF3R); + + memset(hw_feat, 0, sizeof(*hw_feat)); + + hw_feat->version = readl(pdata->mac_regs + MAC_VR); + if (netif_msg_drv(pdata)) + DPRINTK("get offset 0x110, ver=%#x\n", + readl(pdata->mac_regs + 0x110)); + + /* Hardware feature register 0 */ + hw_feat->phyifsel = FXGMAC_GET_REG_BITS( + mac_hfr0, MAC_HWF0R_ACTPHYIFSEL_POS, MAC_HWF0R_ACTPHYIFSEL_LEN); + hw_feat->vlhash = FXGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_VLHASH_POS, + MAC_HWF0R_VLHASH_LEN); + hw_feat->sma = FXGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_SMASEL_POS, + MAC_HWF0R_SMASEL_LEN); + hw_feat->rwk = FXGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_RWKSEL_POS, + MAC_HWF0R_RWKSEL_LEN); + hw_feat->mgk = FXGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_MGKSEL_POS, + MAC_HWF0R_MGKSEL_LEN); + hw_feat->mmc = FXGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_MMCSEL_POS, + MAC_HWF0R_MMCSEL_LEN); + hw_feat->aoe = FXGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_ARPOFFSEL_POS, + MAC_HWF0R_ARPOFFSEL_LEN); + hw_feat->ts = FXGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_TSSEL_POS, + MAC_HWF0R_TSSEL_LEN); + hw_feat->eee = FXGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_EEESEL_POS, + MAC_HWF0R_EEESEL_LEN); + hw_feat->tx_coe = FXGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_TXCOESEL_POS, + MAC_HWF0R_TXCOESEL_LEN); + hw_feat->rx_coe = FXGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_RXCOESEL_POS, + MAC_HWF0R_RXCOESEL_LEN); + hw_feat->addn_mac = FXGMAC_GET_REG_BITS(mac_hfr0, + MAC_HWF0R_ADDMACADRSEL_POS, + MAC_HWF0R_ADDMACADRSEL_LEN); + hw_feat->ts_src = FXGMAC_GET_REG_BITS(mac_hfr0, MAC_HWF0R_TSSTSSEL_POS, + MAC_HWF0R_TSSTSSEL_LEN); + hw_feat->sa_vlan_ins = FXGMAC_GET_REG_BITS( + mac_hfr0, MAC_HWF0R_SAVLANINS_POS, MAC_HWF0R_SAVLANINS_LEN); + + /* Hardware feature register 1 */ + hw_feat->rx_fifo_size = FXGMAC_GET_REG_BITS( + mac_hfr1, MAC_HWF1R_RXFIFOSIZE_POS, MAC_HWF1R_RXFIFOSIZE_LEN); + hw_feat->tx_fifo_size = FXGMAC_GET_REG_BITS( + mac_hfr1, MAC_HWF1R_TXFIFOSIZE_POS, MAC_HWF1R_TXFIFOSIZE_LEN); + hw_feat->adv_ts_hi = FXGMAC_GET_REG_BITS( + mac_hfr1, MAC_HWF1R_ADVTHWORD_POS, MAC_HWF1R_ADVTHWORD_LEN); + hw_feat->dma_width = FXGMAC_GET_REG_BITS(mac_hfr1, MAC_HWF1R_ADDR64_POS, + MAC_HWF1R_ADDR64_LEN); + hw_feat->dcb = FXGMAC_GET_REG_BITS(mac_hfr1, MAC_HWF1R_DCBEN_POS, + MAC_HWF1R_DCBEN_LEN); + hw_feat->sph = FXGMAC_GET_REG_BITS(mac_hfr1, MAC_HWF1R_SPHEN_POS, + MAC_HWF1R_SPHEN_LEN); + hw_feat->tso = FXGMAC_GET_REG_BITS(mac_hfr1, MAC_HWF1R_TSOEN_POS, + MAC_HWF1R_TSOEN_LEN); + hw_feat->dma_debug = FXGMAC_GET_REG_BITS( + mac_hfr1, MAC_HWF1R_DBGMEMA_POS, MAC_HWF1R_DBGMEMA_LEN); +#if (FXGMAC_RSS_FEATURE_ENABLED) + hw_feat->rss = 1; +#else + /* = FXGMAC_GET_REG_BITS(mac_hfr1, + * MAC_HWF1R_RSSEN_POS, + * MAC_HWF1R_RSSEN_LEN); + */ + hw_feat->rss = 0; +#endif + /* FXGMAC_GET_REG_BITS(mac_hfr1, + * MAC_HWF1R_NUMTC_POS, + * MAC_HWF1R_NUMTC_LEN); + */ + hw_feat->tc_cnt = 3; + hw_feat->avsel = FXGMAC_GET_REG_BITS(mac_hfr1, MAC_HWF1R_AVSEL_POS, + MAC_HWF1R_AVSEL_LEN); + hw_feat->ravsel = FXGMAC_GET_REG_BITS(mac_hfr1, MAC_HWF1R_RAVSEL_POS, + MAC_HWF1R_RAVSEL_LEN); + hw_feat->hash_table_size = FXGMAC_GET_REG_BITS( + mac_hfr1, MAC_HWF1R_HASHTBLSZ_POS, MAC_HWF1R_HASHTBLSZ_LEN); + hw_feat->l3l4_filter_num = FXGMAC_GET_REG_BITS( + mac_hfr1, MAC_HWF1R_L3L4FNUM_POS, MAC_HWF1R_L3L4FNUM_LEN); + + + /* Hardware feature register 2 + * FXGMAC_GET_REG_BITS(mac_hfr2, + * MAC_HWF2R_RXQCNT_POS, + * MAC_HWF2R_RXQCNT_LEN) + */ + hw_feat->rx_q_cnt = 3; + hw_feat->tx_q_cnt = FXGMAC_GET_REG_BITS(mac_hfr2, MAC_HWF2R_TXQCNT_POS, + MAC_HWF2R_TXQCNT_LEN); + hw_feat->rx_ch_cnt = FXGMAC_GET_REG_BITS( + mac_hfr2, MAC_HWF2R_RXCHCNT_POS, MAC_HWF2R_RXCHCNT_LEN); + hw_feat->tx_ch_cnt = FXGMAC_GET_REG_BITS( + mac_hfr2, MAC_HWF2R_TXCHCNT_POS, MAC_HWF2R_TXCHCNT_LEN); + hw_feat->pps_out_num = FXGMAC_GET_REG_BITS( + mac_hfr2, MAC_HWF2R_PPSOUTNUM_POS, MAC_HWF2R_PPSOUTNUM_LEN); + hw_feat->aux_snap_num = FXGMAC_GET_REG_BITS( + mac_hfr2, MAC_HWF2R_AUXSNAPNUM_POS, MAC_HWF2R_AUXSNAPNUM_LEN); + + /* Translate the Hash Table size into actual number */ + switch (hw_feat->hash_table_size) { + case 0: + break; + case 1: + hw_feat->hash_table_size = 64; + break; + case 2: + hw_feat->hash_table_size = 128; + break; + case 3: + hw_feat->hash_table_size = 256; + break; + } + + /* Translate the address width setting into actual number */ + switch (hw_feat->dma_width) { + case 0: + hw_feat->dma_width = 32; + break; + case 1: + hw_feat->dma_width = 40; + break; + case 2: + hw_feat->dma_width = 48; + break; + default: + hw_feat->dma_width = 32; + } + + /* The Queue, Channel and TC counts are zero based so increment them + * to get the actual number + */ + hw_feat->rx_q_cnt++; + hw_feat->tx_q_cnt++; + hw_feat->rx_ch_cnt++; + hw_feat->tx_ch_cnt++; + hw_feat->tc_cnt++; + + hw_feat->hwfr3 = mac_hfr3; + DPRINTK("HWFR3: %u\n", mac_hfr3); +} + +void fxgmac_print_all_hw_features(struct fxgmac_pdata *pdata) +{ + char *str = NULL; + + DPRINTK("\n"); + DPRINTK("=====================================================\n"); + DPRINTK("\n"); + DPRINTK("HW support following features, ver=%#x\n", + pdata->hw_feat.version); + DPRINTK("\n"); + /* HW Feature Register0 */ + DPRINTK("VLAN Hash Filter Selected : %s\n", + pdata->hw_feat.vlhash ? "YES" : "NO"); + DPRINTK("SMA (MDIO) Interface : %s\n", + pdata->hw_feat.sma ? "YES" : "NO"); + DPRINTK("PMT Remote Wake-up Packet Enable : %s\n", + pdata->hw_feat.rwk ? "YES" : "NO"); + DPRINTK("PMT Magic Packet Enable : %s\n", + pdata->hw_feat.mgk ? "YES" : "NO"); + DPRINTK("RMON/MMC Module Enable : %s\n", + pdata->hw_feat.mmc ? "YES" : "NO"); + DPRINTK("ARP Offload Enabled : %s\n", + pdata->hw_feat.aoe ? "YES" : "NO"); + DPRINTK("IEEE 1588-2008 Timestamp Enabled : %s\n", + pdata->hw_feat.ts ? "YES" : "NO"); + DPRINTK("Energy Efficient Ethernet Enabled : %s\n", + pdata->hw_feat.eee ? "YES" : "NO"); + DPRINTK("Transmit Checksum Offload Enabled : %s\n", + pdata->hw_feat.tx_coe ? "YES" : "NO"); + DPRINTK("Receive Checksum Offload Enabled : %s\n", + pdata->hw_feat.rx_coe ? "YES" : "NO"); + DPRINTK("Additional MAC Addresses 1-31 Selected : %s\n", + pdata->hw_feat.addn_mac ? "YES" : "NO"); + + switch (pdata->hw_feat.ts_src) { + case 0: + str = "RESERVED"; + break; + case 1: + str = "INTERNAL"; + break; + case 2: + str = "EXTERNAL"; + break; + case 3: + str = "BOTH"; + break; + } + DPRINTK("Timestamp System Time Source : %s\n", str); + + DPRINTK("Source Address or VLAN Insertion Enable : %s\n", + pdata->hw_feat.sa_vlan_ins ? "YES" : "NO"); + + /* HW Feature Register1 */ + switch (pdata->hw_feat.rx_fifo_size) { + case 0: + str = "128 bytes"; + break; + case 1: + str = "256 bytes"; + break; + case 2: + str = "512 bytes"; + break; + case 3: + str = "1 KBytes"; + break; + case 4: + str = "2 KBytes"; + break; + case 5: + str = "4 KBytes"; + break; + case 6: + str = "8 KBytes"; + break; + case 7: + str = "16 KBytes"; + break; + case 8: + str = "32 kBytes"; + break; + case 9: + str = "64 KBytes"; + break; + case 10: + str = "128 KBytes"; + break; + case 11: + str = "256 KBytes"; + break; + default: + str = "RESERVED"; + } + DPRINTK("MTL Receive FIFO Size : %s\n", str); + + switch (pdata->hw_feat.tx_fifo_size) { + case 0: + str = "128 bytes"; + break; + case 1: + str = "256 bytes"; + break; + case 2: + str = "512 bytes"; + break; + case 3: + str = "1 KBytes"; + break; + case 4: + str = "2 KBytes"; + break; + case 5: + str = "4 KBytes"; + break; + case 6: + str = "8 KBytes"; + break; + case 7: + str = "16 KBytes"; + break; + case 8: + str = "32 kBytes"; + break; + case 9: + str = "64 KBytes"; + break; + case 10: + str = "128 KBytes"; + break; + case 11: + str = "256 KBytes"; + break; + default: + str = "RESERVED"; + } + DPRINTK("MTL Transmit FIFO Size : %s\n", str); + + DPRINTK("IEEE 1588 High Word Register Enable : %s\n", + pdata->hw_feat.adv_ts_hi ? "YES" : "NO"); + DPRINTK("Address width : %u\n", + pdata->hw_feat.dma_width); + DPRINTK("DCB Feature Enable : %s\n", + pdata->hw_feat.dcb ? "YES" : "NO"); + DPRINTK("Split Header Feature Enable : %s\n", + pdata->hw_feat.sph ? "YES" : "NO"); + DPRINTK("TCP Segmentation Offload Enable : %s\n", + pdata->hw_feat.tso ? "YES" : "NO"); + DPRINTK("DMA Debug Registers Enabled : %s\n", + pdata->hw_feat.dma_debug ? "YES" : "NO"); + DPRINTK("RSS Feature Enabled : %s\n", + pdata->hw_feat.rss ? "YES" : "NO"); + DPRINTK("*TODO*Number of Traffic classes : %u\n", + (pdata->hw_feat.tc_cnt)); + DPRINTK("AV Feature Enabled : %s\n", + pdata->hw_feat.avsel ? "YES" : "NO"); + DPRINTK("Rx Side Only AV Feature Enabled : %s\n", + (pdata->hw_feat.ravsel ? "YES" : "NO")); + DPRINTK("Hash Table Size : %u\n", + pdata->hw_feat.hash_table_size); + DPRINTK("Total number of L3 or L4 Filters : %u\n", + pdata->hw_feat.l3l4_filter_num); + + /* HW Feature Register2 */ + DPRINTK("Number of MTL Receive Queues : %u\n", + pdata->hw_feat.rx_q_cnt); + DPRINTK("Number of MTL Transmit Queues : %u\n", + pdata->hw_feat.tx_q_cnt); + DPRINTK("Number of DMA Receive Channels : %u\n", + pdata->hw_feat.rx_ch_cnt); + DPRINTK("Number of DMA Transmit Channels : %u\n", + pdata->hw_feat.tx_ch_cnt); + + switch (pdata->hw_feat.pps_out_num) { + case 0: + str = "No PPS output"; + break; + case 1: + str = "1 PPS output"; + break; + case 2: + str = "2 PPS output"; + break; + case 3: + str = "3 PPS output"; + break; + case 4: + str = "4 PPS output"; + break; + default: + str = "RESERVED"; + } + DPRINTK("Number of PPS Outputs : %s\n", str); + + switch (pdata->hw_feat.aux_snap_num) { + case 0: + str = "No auxiliary input"; + break; + case 1: + str = "1 auxiliary input"; + break; + case 2: + str = "2 auxiliary input"; + break; + case 3: + str = "3 auxiliary input"; + break; + case 4: + str = "4 auxiliary input"; + break; + default: + str = "RESERVED"; + } + DPRINTK("Number of Auxiliary Snapshot Inputs : %s", str); + + DPRINTK("\n"); + DPRINTK("=====================================================\n"); + DPRINTK("\n"); +} diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-debugfs.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-debugfs.c new file mode 100644 index 000000000000..4596d91b6e28 --- /dev/null +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-debugfs.c @@ -0,0 +1,787 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Motorcomm Corporation. */ + +#include "fuxi-gmac.h" +#include "fuxi-gmac-reg.h" +#ifdef HAVE_FXGMAC_DEBUG_FS +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TEST_MAC_HEAD 14 +#define TEST_TCP_HEAD_LEN_OFFSET 12 +#define TEST_TCP_OFFLOAD_LEN_OFFSET 48 +#define TEST_TCP_FIX_HEAD_LEN 24 +#define TEST_TCP_MSS_OFFSET 56 + +#define DF_MAX_NIC_NUM 16 + +#ifdef HAVE_FXGMAC_DEBUG_FS + +/** + * fxgmac_dbg_netdev_ops_read - read for netdev_ops datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t fxgmac_dbg_netdev_ops_read(struct file *filp, + char __user *buffer, size_t count, + loff_t *ppos) +{ + struct fxgmac_pdata *pdata = filp->private_data; + char *buf; + int len; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "%s: %s\n", pdata->netdev->name, + pdata->expansion.fxgmac_dbg_netdev_ops_buf); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + return len; +} + +/** + * fxgmac_dbg_netdev_ops_write - write into netdev_ops datum + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + **/ +static ssize_t fxgmac_dbg_netdev_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct fxgmac_pdata *pdata = filp->private_data; + int len; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + if (count >= sizeof(pdata->expansion.fxgmac_dbg_netdev_ops_buf)) + return -ENOSPC; + + len = simple_write_to_buffer( + pdata->expansion.fxgmac_dbg_netdev_ops_buf, + sizeof(pdata->expansion.fxgmac_dbg_netdev_ops_buf) - 1, ppos, + buffer, count); + if (len < 0) + return len; + + pdata->expansion.fxgmac_dbg_netdev_ops_buf[len] = '\0'; + + if (strncmp(pdata->expansion.fxgmac_dbg_netdev_ops_buf, "tx_timeout", + 10) == 0) { + DPRINTK("tx_timeout called\n"); + } else { + FXGMAC_PR("Unknown command: %s\n", + pdata->expansion.fxgmac_dbg_netdev_ops_buf); + FXGMAC_PR("Available commands:\n"); + FXGMAC_PR(" tx_timeout\n"); + } + return count; +} +#endif + +static void fxgmac_dbg_tx_pkt(struct fxgmac_pdata *pdata, u8 *pcmd_data) +{ + unsigned int pktLen = 0; + struct sk_buff *skb; + pfxgmac_test_packet pPkt; + u8 *pTx_data = NULL; + u8 *pSkb_data = NULL; + u32 offload_len = 0; + u8 ipHeadLen, tcpHeadLen, headTotalLen; + static u32 lastGsoSize = 806; /* initial default value */ + + /* get fxgmac_test_packet */ + pPkt = (pfxgmac_test_packet)(pcmd_data + sizeof(struct ext_ioctl_data)); + pktLen = pPkt->length; + + /* get pkt data */ + pTx_data = (u8 *)pPkt + sizeof(fxgmac_test_packet); + + /* alloc sk_buff */ + skb = alloc_skb(pktLen, GFP_ATOMIC); + if (!skb) { + DPRINTK("alloc skb fail\n"); + return; + } + + /* copy data to skb */ + pSkb_data = skb_put(skb, pktLen); + memset(pSkb_data, 0, pktLen); + memcpy(pSkb_data, pTx_data, pktLen); + + /* set skb parameters */ + skb->dev = pdata->netdev; + skb->pkt_type = PACKET_OUTGOING; + skb->protocol = ntohs(ETH_P_IP); + skb->no_fcs = 1; + skb->ip_summed = CHECKSUM_PARTIAL; + if (skb->len > 1514) { + /* TSO packet */ + /* set tso test flag */ + pdata->expansion.fxgmac_test_tso_flag = true; + + /* get protocol head length */ + ipHeadLen = (pSkb_data[TEST_MAC_HEAD] & 0xF) * 4; + tcpHeadLen = (pSkb_data[TEST_MAC_HEAD + ipHeadLen + + TEST_TCP_HEAD_LEN_OFFSET] >> + 4 & + 0xF) * + 4; + headTotalLen = TEST_MAC_HEAD + ipHeadLen + tcpHeadLen; + offload_len = (pSkb_data[TEST_TCP_OFFLOAD_LEN_OFFSET] << 8 | + pSkb_data[TEST_TCP_OFFLOAD_LEN_OFFSET + 1]) & + 0xFFFF; + /* set tso skb parameters */ + skb->transport_header = ipHeadLen + TEST_MAC_HEAD; + skb->network_header = TEST_MAC_HEAD; + skb->inner_network_header = TEST_MAC_HEAD; + skb->mac_len = TEST_MAC_HEAD; + + /* set skb_shinfo parameters */ + if (tcpHeadLen > TEST_TCP_FIX_HEAD_LEN) { + skb_shinfo(skb)->gso_size = + (pSkb_data[TEST_TCP_MSS_OFFSET] << 8 | + pSkb_data[TEST_TCP_MSS_OFFSET + 1]) & + 0xFFFF; + } else { + skb_shinfo(skb)->gso_size = 0; + } + if (skb_shinfo(skb)->gso_size != 0) { + lastGsoSize = skb_shinfo(skb)->gso_size; + } else { + skb_shinfo(skb)->gso_size = lastGsoSize; + } + /* get segment size */ + if (offload_len % skb_shinfo(skb)->gso_size == 0) { + skb_shinfo(skb)->gso_segs = + offload_len / skb_shinfo(skb)->gso_size; + pdata->expansion.fxgmac_test_last_tso_len = + skb_shinfo(skb)->gso_size + headTotalLen; + } else { + skb_shinfo(skb)->gso_segs = + offload_len / skb_shinfo(skb)->gso_size + 1; + pdata->expansion.fxgmac_test_last_tso_len = + offload_len % skb_shinfo(skb)->gso_size + + headTotalLen; + } + pdata->expansion.fxgmac_test_tso_seg_num = + skb_shinfo(skb)->gso_segs; + + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + skb_shinfo(skb)->frag_list = NULL; + skb->csum_start = skb_headroom(skb) + TEST_MAC_HEAD + ipHeadLen; + skb->csum_offset = skb->len - TEST_MAC_HEAD - ipHeadLen; + + pdata->expansion.fxgmac_test_packet_len = + skb_shinfo(skb)->gso_size + headTotalLen; + } else { + /* set non-TSO packet parameters */ + pdata->expansion.fxgmac_test_packet_len = skb->len; + } + + /* send data */ + if (dev_queue_xmit(skb) != NET_XMIT_SUCCESS) { + DPRINTK("xmit data fail \n"); + } +} + +static void fxgmac_dbg_rx_pkt(struct fxgmac_pdata *pdata, u8 *pcmd_data) +{ + unsigned int totalLen = 0; + struct sk_buff *rx_skb; + struct ext_ioctl_data *pcmd; + fxgmac_test_packet pkt; + void *addr = 0; + u8 *rx_data = (u8 *)kzalloc(FXGMAC_MAX_DBG_RX_DATA, GFP_KERNEL); + if (!rx_data) + return; + + /* initial dest data region */ + pcmd = (struct ext_ioctl_data *)pcmd_data; + addr = pcmd->cmd_buf.buf; + while (pdata->expansion.fxgmac_test_skb_arr_in_index != + pdata->expansion.fxgmac_test_skb_arr_out_index) { + /* get received skb data */ + rx_skb = + pdata->expansion.fxgmac_test_skb_array + [pdata->expansion.fxgmac_test_skb_arr_out_index]; + + if (rx_skb->len + sizeof(fxgmac_test_packet) + totalLen < + 64000) { + pkt.length = rx_skb->len; + pkt.type = 0x80; + pkt.buf[0].offset = + totalLen + sizeof(fxgmac_test_packet); + pkt.buf[0].length = rx_skb->len; + + /* get data from skb */ + memcpy(rx_data, rx_skb->data, rx_skb->len); + + /* update next pointer */ + if ((pdata->expansion.fxgmac_test_skb_arr_out_index + + 1) % FXGMAC_MAX_DBG_TEST_PKT == + pdata->expansion.fxgmac_test_skb_arr_in_index) { + pkt.next = NULL; + } else { + pkt.next = + (pfxgmac_test_packet)(addr + totalLen + + sizeof(fxgmac_test_packet) + + pkt.length); + } + + /* copy data to user space */ + if (copy_to_user((void *)(addr + totalLen), + (void *)(&pkt), + sizeof(fxgmac_test_packet))) { + DPRINTK("cppy pkt data to user fail..."); + } + if (copy_to_user((void *)(addr + totalLen + + sizeof(fxgmac_test_packet)), + (void *)rx_data, rx_skb->len)) { + DPRINTK("cppy data to user fail..."); + } + + /* update total length */ + totalLen += (sizeof(fxgmac_test_packet) + rx_skb->len); + + /* free skb */ + kfree_skb(rx_skb); + pdata->expansion.fxgmac_test_skb_array + [pdata->expansion.fxgmac_test_skb_arr_out_index] = + NULL; + + /* update gCurSkbOutIndex */ + pdata->expansion.fxgmac_test_skb_arr_out_index = + (pdata->expansion.fxgmac_test_skb_arr_out_index + + 1) % + FXGMAC_MAX_DBG_TEST_PKT; + } else { + DPRINTK("receive data more receive buffer... \n"); + break; + } + } + + if (rx_data) + kfree(rx_data); +} + +/* Based on the current application scenario, we only use CMD_DATA for data. + * if you use other struct, you should recalculate in_total_size + */ +long fxgmac_dbg_netdev_ops_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + bool ret = true; + int regval = 0; + struct fxgmac_pdata *pdata = file->private_data; + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + FXGMAC_PDATA_OF_PLATFORM *ex = &pdata->expansion; + CMD_DATA ex_data; + struct ext_ioctl_data pcmd; + u8 *data = NULL; + u8 *buf = NULL; + int in_total_size, in_data_size, out_total_size; + int ioctl_cmd_size = sizeof(struct ext_ioctl_data); + u8 mac[ETH_ALEN] = { 0 }; + struct sk_buff *tmpskb; + + if (!arg) { + DPRINTK("[%s] command arg is %lx !\n", __func__, arg); + goto err; + } + + /* check device type */ + if (_IOC_TYPE(cmd) != IOC_MAGIC) { + DPRINTK("[%s] command type [%c] error!\n", __func__, + _IOC_TYPE(cmd)); + goto err; + } + + /* check command number*/ + if (_IOC_NR(cmd) > IOC_MAXNR) { + DPRINTK("[%s] command numer [%d] exceeded!\n", __func__, + _IOC_NR(cmd)); + goto err; + } + + if (copy_from_user(&pcmd, (void *)arg, ioctl_cmd_size)) { + DPRINTK("copy data from user fail... \n"); + goto err; + } + + in_total_size = pcmd.cmd_buf.size_in; + in_data_size = in_total_size - ioctl_cmd_size; + out_total_size = pcmd.cmd_buf.size_out; + + buf = (u8 *)kzalloc(in_total_size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + if (copy_from_user(buf, (void *)arg, in_total_size)) { + DPRINTK("copy data from user fail... \n"); + goto err; + } + data = buf + ioctl_cmd_size; + + if (arg != 0) { + switch (pcmd.cmd_type) { + /* ioctl diag begin */ + case FUXI_DFS_IOCTL_DIAG_BEGIN: + DPRINTK("Debugfs received diag begin command.\n"); + if (netif_running(pdata->netdev)) { + fxgmac_restart_dev(pdata); + } + + /* release last loopback test abnormal exit buffer */ + while (ex->fxgmac_test_skb_arr_in_index != + ex->fxgmac_test_skb_arr_out_index) { + tmpskb = + ex->fxgmac_test_skb_array + [ex->fxgmac_test_skb_arr_out_index]; + if (tmpskb) { + kfree_skb(tmpskb); + ex->fxgmac_test_skb_array + [ex->fxgmac_test_skb_arr_out_index] = + NULL; + } + + ex->fxgmac_test_skb_arr_out_index = + (ex->fxgmac_test_skb_arr_out_index + + 1) % + FXGMAC_MAX_DBG_TEST_PKT; + } + + /* init loopback test parameters */ + ex->fxgmac_test_skb_arr_in_index = 0; + ex->fxgmac_test_skb_arr_out_index = 0; + ex->fxgmac_test_tso_flag = false; + ex->fxgmac_test_tso_seg_num = 0; + ex->fxgmac_test_last_tso_len = 0; + ex->fxgmac_test_packet_len = 0; + break; + + /* ioctl diag end */ + case FUXI_DFS_IOCTL_DIAG_END: + DPRINTK("Debugfs received diag end command.\n"); + if (netif_running(pdata->netdev)) { + fxgmac_restart_dev(pdata); + } + break; + + /* ioctl diag tx pkt */ + case FUXI_DFS_IOCTL_DIAG_TX_PKT: + fxgmac_dbg_tx_pkt(pdata, buf); + break; + + /* ioctl diag rx pkt */ + case FUXI_DFS_IOCTL_DIAG_RX_PKT: + fxgmac_dbg_rx_pkt(pdata, buf); + break; + + /* ioctl device reset */ + case FUXI_DFS_IOCTL_DEVICE_RESET: + DPRINTK("Debugfs received device reset command.\n"); + if (netif_running(pdata->netdev)) { + fxgmac_restart_dev(pdata); + } + break; + + case FXGMAC_EFUSE_LED_TEST: + DPRINTK("Debugfs received device led test command.\n"); + memcpy(&pdata->led, data, sizeof(struct led_setting)); + fxgmac_restart_dev(pdata); + break; + + case FXGMAC_EFUSE_UPDATE_LED_CFG: + DPRINTK("Debugfs received device led update command.\n"); + memcpy(&pdata->ledconfig, data, + sizeof(struct led_setting)); + ret = hw_ops->write_led_config(pdata); + hw_ops->read_led_config(pdata); + hw_ops->led_under_active(pdata); + break; + + case FXGMAC_EFUSE_WRITE_LED: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + DPRINTK("FXGMAC_EFUSE_WRITE_LED, val = 0x%x\n", + ex_data.val0); + ret = hw_ops->write_led(pdata, ex_data.val0); + break; + + case FXGMAC_EFUSE_WRITE_OOB: + DPRINTK("FXGMAC_EFUSE_WRITE_OOB.\n"); + ret = hw_ops->write_oob(pdata); + break; + + case FXGMAC_EFUSE_READ_REGIONABC: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + ret = hw_ops->read_efuse_data(pdata, ex_data.val0, + &ex_data.val1); + DPRINTK("FXGMAC_EFUSE_READ_REGIONABC, address = 0x%x, val = 0x%x\n", + ex_data.val0, ex_data.val1); + if (ret) { + memcpy(data, &ex_data, sizeof(CMD_DATA)); + out_total_size = + ioctl_cmd_size + sizeof(CMD_DATA); + if (copy_to_user((void *)arg, (void *)buf, + out_total_size)) + goto err; + } + break; + + case FXGMAC_EFUSE_WRITE_PATCH_REG: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + DPRINTK("FXGMAC_EFUSE_WRITE_PATCH_REG, address = 0x%x, val = 0x%x\n", + ex_data.val0, ex_data.val1); + ret = hw_ops->write_patch_to_efuse(pdata, ex_data.val0, + ex_data.val1); + break; + + case FXGMAC_EFUSE_READ_PATCH_REG: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + ret = hw_ops->read_patch_from_efuse(pdata, ex_data.val0, + &ex_data.val1); + DPRINTK("FXGMAC_EFUSE_READ_PATCH_REG, address = 0x%x, val = 0x%x\n", + ex_data.val0, ex_data.val1); + if (ret) { + memcpy(data, &ex_data, sizeof(CMD_DATA)); + out_total_size = + ioctl_cmd_size + sizeof(CMD_DATA); + if (copy_to_user((void *)arg, (void *)buf, + out_total_size)) + goto err; + } + break; + + case FXGMAC_EFUSE_WRITE_PATCH_PER_INDEX: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + ret = hw_ops->write_patch_to_efuse_per_index( + pdata, ex_data.val0, ex_data.val1, + ex_data.val2); + DPRINTK("FXGMAC_EFUSE_WRITE_PATCH_PER_INDEX, index = %d, address = 0x%x, val = 0x%x\n", + ex_data.val0, ex_data.val1, ex_data.val2); + break; + + case FXGMAC_EFUSE_READ_PATCH_PER_INDEX: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + ret = hw_ops->read_patch_from_efuse_per_index( + pdata, ex_data.val0, &ex_data.val1, + &ex_data.val2); + DPRINTK("FXGMAC_EFUSE_READ_PATCH_PER_INDEX, address = 0x%x, val = 0x%x\n", + ex_data.val1, ex_data.val2); + if (ret) { + memcpy(data, &ex_data, sizeof(CMD_DATA)); + out_total_size = + ioctl_cmd_size + sizeof(CMD_DATA); + if (copy_to_user((void *)arg, (void *)buf, + out_total_size)) + goto err; + } + break; + + case FXGMAC_EFUSE_LOAD: + DPRINTK("FXGMAC_EFUSE_LOAD.\n"); + ret = hw_ops->efuse_load(pdata); + break; + + case FXGMAC_GET_MAC_DATA: + ret = hw_ops->read_mac_subsys_from_efuse(pdata, mac, + NULL, NULL); + if (ret) { + memcpy(data, mac, ETH_ALEN); + out_total_size = ioctl_cmd_size + ETH_ALEN; + if (copy_to_user((void *)arg, (void *)buf, + out_total_size)) + goto err; + } + break; + + case FXGMAC_SET_MAC_DATA: + if (in_data_size != ETH_ALEN) + goto err; + memcpy(mac, data, ETH_ALEN); + ret = hw_ops->write_mac_subsys_to_efuse(pdata, mac, + NULL, NULL); + if (ret) { + eth_hw_addr_set(pdata->netdev, mac); + memcpy(pdata->mac_addr, mac, ETH_ALEN); + hw_ops->set_mac_address(pdata, mac); + hw_ops->set_mac_hash(pdata); + } + break; + + case FXGMAC_GET_SUBSYS_ID: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + ret = hw_ops->read_mac_subsys_from_efuse( + pdata, NULL, &ex_data.val0, NULL); + if (ret) { + ex_data.val1 = 0xFFFF; /* invalid value */ + memcpy(data, &ex_data, sizeof(CMD_DATA)); + out_total_size = + ioctl_cmd_size + sizeof(CMD_DATA); + if (copy_to_user((void *)arg, (void *)buf, + out_total_size)) + goto err; + } + break; + + case FXGMAC_SET_SUBSYS_ID: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + ret = hw_ops->write_mac_subsys_to_efuse( + pdata, NULL, &ex_data.val0, NULL); + break; + + case FXGMAC_GET_GMAC_REG: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + ex_data.val1 = hw_ops->get_gmac_register( + pdata, (u8 *)(pdata->mac_regs + ex_data.val0)); + memcpy(data, &ex_data, sizeof(CMD_DATA)); + out_total_size = ioctl_cmd_size + sizeof(CMD_DATA); + if (copy_to_user((void *)arg, (void *)buf, + out_total_size)) + goto err; + break; + + case FXGMAC_SET_GMAC_REG: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + regval = hw_ops->set_gmac_register( + pdata, (u8 *)(pdata->mac_regs + ex_data.val0), + ex_data.val1); + ret = (regval == 0 ? true : false); + break; + + case FXGMAC_GET_PHY_REG: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + regval = hw_ops->read_ephy_reg(pdata, ex_data.val0, + &ex_data.val1); + if (regval != -1) { + memcpy(data, &ex_data, sizeof(CMD_DATA)); + out_total_size = + ioctl_cmd_size + sizeof(CMD_DATA); + if (copy_to_user((void *)arg, (void *)buf, + out_total_size)) + goto err; + } + ret = (regval == -1 ? false : true); + break; + + case FXGMAC_SET_PHY_REG: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + regval = hw_ops->write_ephy_reg(pdata, ex_data.val0, + ex_data.val1); + ret = (regval == 0 ? true : false); + break; + + case FXGMAC_GET_PCIE_LOCATION: + ex_data.val0 = pdata->pdev->bus->number; + ex_data.val1 = PCI_SLOT(pdata->pdev->devfn); + ex_data.val2 = PCI_FUNC(pdata->pdev->devfn); + memcpy(data, &ex_data, sizeof(CMD_DATA)); + out_total_size = ioctl_cmd_size + sizeof(CMD_DATA); + if (copy_to_user((void *)arg, (void *)buf, + out_total_size)) + goto err; + break; + + case FXGMAC_GET_GSO_SIZE: + ex_data.val0 = pdata->netdev->gso_max_size; + memcpy(data, &ex_data, sizeof(CMD_DATA)); + out_total_size = ioctl_cmd_size + sizeof(CMD_DATA); + if (copy_to_user((void *)arg, (void *)buf, + out_total_size)) + goto err; + break; + + case FXGMAC_SET_GSO_SIZE: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + pdata->netdev->gso_max_size = ex_data.val0; + break; + + case FXGMAC_SET_RX_MODERATION: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + regval = readreg(pdata->pAdapter, + pdata->base_mem + INT_MOD); + regval = FXGMAC_SET_REG_BITS(regval, INT_MOD_RX_POS, + INT_MOD_RX_LEN, + ex_data.val0); + writereg(pdata->pAdapter, regval, + pdata->base_mem + INT_MOD); + break; + + case FXGMAC_SET_TX_MODERATION: + memcpy(&ex_data, data, sizeof(CMD_DATA)); + regval = readreg(pdata->pAdapter, + pdata->base_mem + INT_MOD); + regval = FXGMAC_SET_REG_BITS(regval, INT_MOD_TX_POS, + INT_MOD_TX_LEN, + ex_data.val0); + writereg(pdata->pAdapter, regval, + pdata->base_mem + INT_MOD); + break; + + case FXGMAC_GET_TXRX_MODERATION: + regval = readreg(pdata->pAdapter, + pdata->base_mem + INT_MOD); + ex_data.val0 = FXGMAC_GET_REG_BITS( + regval, INT_MOD_RX_POS, INT_MOD_RX_LEN); + ex_data.val1 = FXGMAC_GET_REG_BITS( + regval, INT_MOD_TX_POS, INT_MOD_TX_LEN); + memcpy(data, &ex_data, sizeof(CMD_DATA)); + out_total_size = ioctl_cmd_size + sizeof(CMD_DATA); + if (copy_to_user((void *)arg, (void *)buf, + out_total_size)) + goto err; + break; + + default: + DPRINTK("Debugfs received invalid command: %x.\n", + pcmd.cmd_type); + ret = false; + break; + } + } + + if (buf) + kfree(buf); + return ret ? FXGMAC_SUCCESS : FXGMAC_FAIL; + +err: + if (buf) + kfree(buf); + return FXGMAC_FAIL; +} + +#ifdef HAVE_FXGMAC_DEBUG_FS + +static struct file_operations fxgmac_dbg_netdev_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = fxgmac_dbg_netdev_ops_read, + .write = fxgmac_dbg_netdev_ops_write, + .unlocked_ioctl = fxgmac_dbg_netdev_ops_ioctl, +}; + +/** + * fxgmac_dbg_adapter_init - setup the debugfs directory for the adapter + * @adapter: the adapter that is starting up + **/ +void fxgmac_dbg_adapter_init(struct fxgmac_pdata *pdata) +{ + const char *name = pdata->drv_name; + struct dentry *pfile; + + pdata->expansion.dbg_adapter = + debugfs_create_dir(name, pdata->expansion.fxgmac_dbg_root); + if (pdata->expansion.dbg_adapter) { + pfile = debugfs_create_file("netdev_ops", 0600, + pdata->expansion.dbg_adapter, pdata, + &fxgmac_dbg_netdev_ops_fops); + if (!pfile) + DPRINTK("debugfs netdev_ops for %s failed\n", name); + } else { + DPRINTK("debugfs entry for %s failed\n", name); + } +} + +/** + * fxgmac_dbg_adapter_exit - clear out the adapter's debugfs entries + * @adapter: board private structure + **/ +void fxgmac_dbg_adapter_exit(struct fxgmac_pdata *pdata) +{ + if (pdata->expansion.dbg_adapter) + debugfs_remove_recursive(pdata->expansion.dbg_adapter); + pdata->expansion.dbg_adapter = NULL; +} + +/** + * fxgmac_dbg_init - start up debugfs for the driver + **/ +void fxgmac_dbg_init(struct fxgmac_pdata *pdata) +{ + unsigned int i; + char num[3]; + const char debug_path[] = "/sys/kernel/debug/"; + const char file_prefix[] = "fuxi_"; + char file_path[50]; + char file_name[8]; + + /* init file_path */ + memset(file_path, '\0', sizeof(file_path)); + memcpy(file_path, debug_path, sizeof(debug_path)); + + for (i = 0; i < DF_MAX_NIC_NUM; i++) { + /* init num and filename */ + memset(num, '\0', sizeof(num)); + memset(file_name, '\0', sizeof(file_name)); + + /* int to string */ + sprintf(num, "%d", i); + + /* file name */ + memcpy(file_name, file_prefix, sizeof(file_prefix)); + memcpy(file_name + strlen(file_prefix), num, sizeof(num)); + + /* file path */ + memcpy(file_path + sizeof(debug_path) - 1, file_name, + sizeof(file_name)); + + /* whether file exist */ + pdata->expansion.fxgmac_dbg_root = + debugfs_lookup(file_name, NULL); + if (!pdata->expansion.fxgmac_dbg_root) { + /* create file */ + pdata->expansion.fxgmac_dbg_root = + debugfs_create_dir(file_name, NULL); + if (IS_ERR(pdata->expansion.fxgmac_dbg_root)) + DPRINTK("fxgmac init of debugfs failed\n"); + + break; + } + } +} + +/** + * fxgmac_dbg_exit - clean out the driver's debugfs entries + **/ +void fxgmac_dbg_exit(struct fxgmac_pdata *pdata) +{ + if (pdata->expansion.fxgmac_dbg_root) + debugfs_remove_recursive(pdata->expansion.fxgmac_dbg_root); +} + +#endif /* HAVE_XLGMAC_DEBUG_FS */ diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-desc.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-desc.c new file mode 100644 index 000000000000..969d84eb44e2 --- /dev/null +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-desc.c @@ -0,0 +1,601 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Motorcomm Corporation. */ + +#include "fuxi-gmac.h" +#include "fuxi-gmac-reg.h" + +static void fxgmac_unmap_desc_data(struct fxgmac_pdata *pdata, + struct fxgmac_desc_data *desc_data) +{ + if (desc_data->skb_dma) { + if (desc_data->mapped_as_page) { + dma_unmap_page(pdata->dev, desc_data->skb_dma, + desc_data->skb_dma_len, DMA_TO_DEVICE); + } else { + dma_unmap_single(pdata->dev, desc_data->skb_dma, + desc_data->skb_dma_len, DMA_TO_DEVICE); + } + desc_data->skb_dma = 0; + desc_data->skb_dma_len = 0; + } + + if (desc_data->rx.buf.dma_base) { + dma_unmap_single(pdata->dev, desc_data->rx.buf.dma_base, + pdata->rx_buf_size, DMA_FROM_DEVICE); + desc_data->rx.buf.dma_base = 0; + } + + if (desc_data->skb) { + dev_kfree_skb_any(desc_data->skb); + desc_data->skb = NULL; + } + + memset(&desc_data->tx, 0, sizeof(desc_data->tx)); + memset(&desc_data->rx, 0, sizeof(desc_data->rx)); + + desc_data->mapped_as_page = 0; + + if (desc_data->state_saved) { + desc_data->state_saved = 0; + desc_data->state.skb = NULL; + desc_data->state.len = 0; + desc_data->state.error = 0; + } +} + +static void fxgmac_free_ring(struct fxgmac_pdata *pdata, + struct fxgmac_ring *ring) +{ + struct fxgmac_desc_data *desc_data; + unsigned int i; + + if (!ring) + return; + + if (ring->desc_data_head) { + for (i = 0; i < ring->dma_desc_count; i++) { + desc_data = FXGMAC_GET_DESC_DATA(ring, i); + fxgmac_unmap_desc_data(pdata, desc_data); + } + + kfree(ring->desc_data_head); + ring->desc_data_head = NULL; + } + + if (ring->dma_desc_head) { + dma_free_coherent( + pdata->dev, + (sizeof(struct fxgmac_dma_desc) * ring->dma_desc_count), + ring->dma_desc_head, ring->dma_desc_head_addr); + ring->dma_desc_head = NULL; + } +} + +static int fxgmac_init_ring(struct fxgmac_pdata *pdata, + struct fxgmac_ring *ring, + unsigned int dma_desc_count) +{ + if (!ring) + return 0; + /* Descriptors */ + ring->dma_desc_count = dma_desc_count; + ring->dma_desc_head = dma_alloc_coherent( + pdata->dev, (sizeof(struct fxgmac_dma_desc) * dma_desc_count), + &ring->dma_desc_head_addr, GFP_KERNEL); + if (!ring->dma_desc_head) + return -ENOMEM; + + /* Array of descriptor data */ + ring->desc_data_head = kcalloc( + dma_desc_count, sizeof(struct fxgmac_desc_data), GFP_KERNEL); + if (!ring->desc_data_head) + return -ENOMEM; + + netif_dbg( + pdata, drv, pdata->netdev, + "dma_desc_head=%p, dma_desc_head_addr=%pad, desc_data_head=%p\n", + ring->dma_desc_head, &ring->dma_desc_head_addr, + ring->desc_data_head); + + return 0; +} + +static void fxgmac_free_rings(struct fxgmac_pdata *pdata) +{ + struct fxgmac_channel *channel; + unsigned int i; + + if (!pdata->channel_head) + return; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + fxgmac_free_ring(pdata, channel->tx_ring); + fxgmac_free_ring(pdata, channel->rx_ring); + } +} + +static int fxgmac_alloc_rings(struct fxgmac_pdata *pdata) +{ + struct fxgmac_channel *channel; + unsigned int i; + int ret; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n", + channel->name); + + if (i < pdata->tx_ring_count) { + ret = fxgmac_init_ring(pdata, channel->tx_ring, + pdata->tx_desc_count); + + if (ret) { + netdev_alert(pdata->netdev, + "error initializing Tx ring"); + goto err_init_ring; + } + } + + netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n", + channel->name); + + ret = fxgmac_init_ring(pdata, channel->rx_ring, + pdata->rx_desc_count); + if (ret) { + netdev_alert(pdata->netdev, + "error initializing Rx ring\n"); + goto err_init_ring; + } + if (netif_msg_drv(pdata)) + DPRINTK("fxgmac_alloc_ring..ch=%u, tx_desc_cnt=%u, rx_desc_cnt=%u\n", + i, pdata->tx_desc_count, pdata->rx_desc_count); + } + if (netif_msg_drv(pdata)) + DPRINTK("alloc_rings callout ok\n"); + + return 0; + +err_init_ring: + fxgmac_free_rings(pdata); + + DPRINTK("alloc_rings callout err,%d\n", ret); + return ret; +} + +static void fxgmac_free_channels(struct fxgmac_pdata *pdata) +{ + if (!pdata->channel_head) + return; + if (netif_msg_drv(pdata)) + DPRINTK("free_channels, tx_ring=%p\n", + pdata->channel_head->tx_ring); + kfree(pdata->channel_head->tx_ring); + pdata->channel_head->tx_ring = NULL; + + if (netif_msg_drv(pdata)) + DPRINTK("free_channels, rx_ring=%p\n", + pdata->channel_head->rx_ring); + kfree(pdata->channel_head->rx_ring); + pdata->channel_head->rx_ring = NULL; + + if (netif_msg_drv(pdata)) + DPRINTK("free_channels, channel=%p\n", pdata->channel_head); + kfree(pdata->channel_head); + + pdata->channel_head = NULL; +} + +static int fxgmac_alloc_channels(struct fxgmac_pdata *pdata) +{ + struct fxgmac_channel *channel_head, *channel; + struct fxgmac_ring *tx_ring, *rx_ring; + int ret = -ENOMEM; + unsigned int i; + +#ifdef CONFIG_PCI_MSI + u32 msix = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_MSIX_POS, + FXGMAC_FLAG_MSIX_LEN); +#endif + + channel_head = kcalloc(pdata->channel_count, + sizeof(struct fxgmac_channel), GFP_KERNEL); + if (netif_msg_drv(pdata)) + DPRINTK("alloc_channels, channel_head=%p, size=%d*%ld\n", + channel_head, pdata->channel_count, + sizeof(struct fxgmac_channel)); + + if (!channel_head) + return ret; + + netif_dbg(pdata, drv, pdata->netdev, "channel_head=%p\n", channel_head); + + tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct fxgmac_ring), + GFP_KERNEL); + if (!tx_ring) + goto err_tx_ring; + + if (netif_msg_drv(pdata)) + DPRINTK("alloc_channels, tx_ring=%p, size=%d*%ld\n", tx_ring, + pdata->tx_ring_count, sizeof(struct fxgmac_ring)); + rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct fxgmac_ring), + GFP_KERNEL); + if (!rx_ring) + goto err_rx_ring; + + if (netif_msg_drv(pdata)) + DPRINTK("alloc_channels, rx_ring=%p, size=%d*%ld\n", rx_ring, + pdata->rx_ring_count, sizeof(struct fxgmac_ring)); + + for (i = 0, channel = channel_head; i < pdata->channel_count; + i++, channel++) { + snprintf(channel->name, sizeof(channel->name), "channel-%u", i); + channel->pdata = pdata; + channel->queue_index = i; + channel->dma_regs = + pdata->mac_regs + DMA_CH_BASE + (DMA_CH_INC * i); + + if (pdata->per_channel_irq) { + /* Get the per DMA interrupt */ +#ifdef CONFIG_PCI_MSI + if (msix) { + pdata->channel_irq[i] = + pdata->expansion.msix_entries[i].vector; + if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i)) { + pdata->channel_irq + [FXGMAC_MAX_DMA_CHANNELS] = + pdata->expansion + .msix_entries + [FXGMAC_MAX_DMA_CHANNELS] + .vector; + + if (pdata->channel_irq + [FXGMAC_MAX_DMA_CHANNELS] < + 0) { + netdev_err( + pdata->netdev, + "get_irq %u for tx failed\n", + i + 1); + goto err_irq; + } + + channel->expansion.dma_irq_tx = + pdata->channel_irq + [FXGMAC_MAX_DMA_CHANNELS]; + DPRINTK("fxgmac_alloc_channels, for MSIx, channel %d dma_irq_tx=%u\n", + i, + channel->expansion.dma_irq_tx); + } + } +#endif + ret = pdata->channel_irq[i]; + if (ret < 0) { + netdev_err(pdata->netdev, "get_irq %u failed\n", + i + 1); + goto err_irq; + } + channel->dma_irq = ret; + DPRINTK("fxgmac_alloc_channels, for MSIx, channel %d dma_irq=%u\n", + i, channel->dma_irq); + } + + if (i < pdata->tx_ring_count) + channel->tx_ring = tx_ring++; + + if (i < pdata->rx_ring_count) + channel->rx_ring = rx_ring++; + + netif_dbg(pdata, drv, pdata->netdev, + "%s: dma_regs=%p, tx_ring=%p, rx_ring=%p\n", + channel->name, channel->dma_regs, channel->tx_ring, + channel->rx_ring); + } + + pdata->channel_head = channel_head; + + if (netif_msg_drv(pdata)) + DPRINTK("alloc_channels callout ok\n"); + return 0; + +err_irq: + kfree(rx_ring); + +err_rx_ring: + kfree(tx_ring); + +err_tx_ring: + kfree(channel_head); + + DPRINTK("fxgmac alloc_channels callout err,%d\n", ret); + return ret; +} + +static void fxgmac_free_channels_and_rings(struct fxgmac_pdata *pdata) +{ + fxgmac_free_rings(pdata); + + fxgmac_free_channels(pdata); +} + +static int fxgmac_alloc_channels_and_rings(struct fxgmac_pdata *pdata) +{ + int ret; + + ret = fxgmac_alloc_channels(pdata); + if (ret) + goto err_alloc; + + ret = fxgmac_alloc_rings(pdata); + if (ret) + goto err_alloc; + + return 0; + +err_alloc: + fxgmac_free_channels_and_rings(pdata); + + return ret; +} + +static int fxgmac_map_rx_buffer(struct fxgmac_pdata *pdata, + struct fxgmac_ring *ring, + struct fxgmac_desc_data *desc_data) +{ + struct sk_buff *skb; + skb = __netdev_alloc_skb_ip_align(pdata->netdev, pdata->rx_buf_size, + GFP_ATOMIC); + if (!skb) { + netdev_err(pdata->netdev, "%s: Rx init fails; skb is NULL\n", + __func__); + return -ENOMEM; + } + + desc_data->skb = skb; + desc_data->rx.buf.dma_base = dma_map_single( + pdata->dev, skb->data, pdata->rx_buf_size, DMA_FROM_DEVICE); + if (dma_mapping_error(pdata->dev, desc_data->rx.buf.dma_base)) { + netdev_err(pdata->netdev, "%s: DMA mapping error\n", __func__); + dev_kfree_skb_any(skb); + return -EINVAL; + } + + return 0; +} + +static void fxgmac_tx_desc_init(struct fxgmac_pdata *pdata) +{ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + struct fxgmac_desc_data *desc_data; + struct fxgmac_dma_desc *dma_desc; + struct fxgmac_channel *channel; + struct fxgmac_ring *ring; + dma_addr_t dma_desc_addr; + unsigned int i, j; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + ring = channel->tx_ring; + if (!ring) + break; + + /* reset the tx timer status. 20220104 */ + channel->tx_timer_active = 0; + + dma_desc = ring->dma_desc_head; + dma_desc_addr = ring->dma_desc_head_addr; + + for (j = 0; j < ring->dma_desc_count; j++) { + desc_data = FXGMAC_GET_DESC_DATA(ring, j); + + desc_data->dma_desc = dma_desc; + desc_data->dma_desc_addr = dma_desc_addr; + + dma_desc++; + dma_desc_addr += sizeof(struct fxgmac_dma_desc); + } + + ring->cur = 0; + ring->dirty = 0; + memset(&ring->tx, 0, sizeof(ring->tx)); + + hw_ops->tx_desc_init(channel); + } +} + +static void fxgmac_rx_desc_init(struct fxgmac_pdata *pdata) +{ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + struct fxgmac_desc_data *desc_data; + struct fxgmac_dma_desc *dma_desc; + struct fxgmac_channel *channel; + struct fxgmac_ring *ring; + dma_addr_t dma_desc_addr; + unsigned int i, j; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + ring = channel->rx_ring; + if (!ring) + break; + + dma_desc = ring->dma_desc_head; + dma_desc_addr = ring->dma_desc_head_addr; + + for (j = 0; j < ring->dma_desc_count; j++) { + desc_data = FXGMAC_GET_DESC_DATA(ring, j); + + desc_data->dma_desc = dma_desc; + desc_data->dma_desc_addr = dma_desc_addr; + + if (fxgmac_map_rx_buffer(pdata, ring, desc_data)) + break; + + dma_desc++; + dma_desc_addr += sizeof(struct fxgmac_dma_desc); + } + + ring->cur = 0; + ring->dirty = 0; + + hw_ops->rx_desc_init(channel); + } +} + +static int fxgmac_map_tx_skb(struct fxgmac_channel *channel, + struct sk_buff *skb) +{ + struct fxgmac_pdata *pdata = channel->pdata; + struct fxgmac_ring *ring = channel->tx_ring; + unsigned int start_index, cur_index; + struct fxgmac_desc_data *desc_data; + unsigned int offset, datalen, len; + struct fxgmac_pkt_info *pkt_info; + skb_frag_t *frag; + unsigned int tso, vlan; + dma_addr_t skb_dma; + unsigned int i; + + offset = 0; + start_index = ring->cur; + cur_index = ring->cur; + + pkt_info = &ring->pkt_info; + pkt_info->desc_count = 0; + pkt_info->length = 0; + + tso = FXGMAC_GET_REG_BITS(pkt_info->attributes, + TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS, + TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN); + vlan = FXGMAC_GET_REG_BITS(pkt_info->attributes, + TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, + TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN); + + /* Save space for a context descriptor if needed */ + if ((tso && (pkt_info->mss != ring->tx.cur_mss)) || + (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag))) { + cur_index = FXGMAC_GET_ENTRY(cur_index, ring->dma_desc_count); + } + desc_data = FXGMAC_GET_DESC_DATA(ring, cur_index); + + if (tso) { + /* Map the TSO header */ + skb_dma = dma_map_single(pdata->dev, skb->data, + pkt_info->header_len, DMA_TO_DEVICE); + if (dma_mapping_error(pdata->dev, skb_dma)) { + netdev_alert(pdata->netdev, "dma_map_single failed\n"); + goto err_out; + } + desc_data->skb_dma = skb_dma; + desc_data->skb_dma_len = pkt_info->header_len; + netif_dbg(pdata, tx_queued, pdata->netdev, + "skb header: index=%u, dma=%pad, len=%u\n", cur_index, + &skb_dma, pkt_info->header_len); + + offset = pkt_info->header_len; + + pkt_info->length += pkt_info->header_len; + + cur_index = FXGMAC_GET_ENTRY(cur_index, ring->dma_desc_count); + desc_data = FXGMAC_GET_DESC_DATA(ring, cur_index); + } + + /* Map the (remainder of the) packet */ + for (datalen = skb_headlen(skb) - offset; datalen;) { + len = min_t(unsigned int, datalen, FXGMAC_TX_MAX_BUF_SIZE); + + skb_dma = dma_map_single(pdata->dev, skb->data + offset, len, + DMA_TO_DEVICE); + if (dma_mapping_error(pdata->dev, skb_dma)) { + netdev_alert(pdata->netdev, "dma_map_single failed\n"); + goto err_out; + } + desc_data->skb_dma = skb_dma; + desc_data->skb_dma_len = len; + netif_dbg(pdata, tx_queued, pdata->netdev, + "skb data: index=%u, dma=%pad, len=%u\n", cur_index, + &skb_dma, len); + + datalen -= len; + offset += len; + + pkt_info->length += len; + + cur_index = FXGMAC_GET_ENTRY(cur_index, ring->dma_desc_count); + desc_data = FXGMAC_GET_DESC_DATA(ring, cur_index); + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + netif_dbg(pdata, tx_queued, pdata->netdev, "mapping frag %u\n", + i); + frag = &skb_shinfo(skb)->frags[i]; + offset = 0; + + for (datalen = skb_frag_size(frag); datalen;) { + len = min_t(unsigned int, datalen, + FXGMAC_TX_MAX_BUF_SIZE); + + skb_dma = skb_frag_dma_map(pdata->dev, frag, offset, + len, DMA_TO_DEVICE); + + if (dma_mapping_error(pdata->dev, skb_dma)) { + netdev_alert(pdata->netdev, + "skb_frag_dma_map failed\n"); + goto err_out; + } + desc_data->skb_dma = skb_dma; + desc_data->skb_dma_len = len; + desc_data->mapped_as_page = 1; + netif_dbg(pdata, tx_queued, pdata->netdev, + "skb frag: index=%u, dma=%pad, len=%u\n", + cur_index, &skb_dma, len); + + datalen -= len; + offset += len; + + pkt_info->length += len; + + cur_index = FXGMAC_GET_ENTRY(cur_index, + ring->dma_desc_count); + desc_data = FXGMAC_GET_DESC_DATA(ring, cur_index); + } + } + + /* Save the skb address in the last entry. We always have some data + * that has been mapped so desc_data is always advanced past the last + * piece of mapped data - use the entry pointed to by cur_index - 1. + */ + desc_data = FXGMAC_GET_DESC_DATA( + ring, (cur_index - 1) & (ring->dma_desc_count - 1)); + desc_data->skb = skb; + + /* Save the number of descriptor entries used */ + if (start_index <= cur_index) + pkt_info->desc_count = cur_index - start_index; + else + pkt_info->desc_count = + ring->dma_desc_count - start_index + cur_index; + + return pkt_info->desc_count; + +err_out: + while (start_index < cur_index) { + desc_data = FXGMAC_GET_DESC_DATA(ring, start_index); + start_index = + FXGMAC_GET_ENTRY(start_index, ring->dma_desc_count); + fxgmac_unmap_desc_data(pdata, desc_data); + } + + return 0; +} + +void fxgmac_init_desc_ops(struct fxgmac_desc_ops *desc_ops) +{ + desc_ops->alloc_channles_and_rings = fxgmac_alloc_channels_and_rings; + desc_ops->free_channels_and_rings = fxgmac_free_channels_and_rings; + desc_ops->map_tx_skb = fxgmac_map_tx_skb; + desc_ops->map_rx_buffer = fxgmac_map_rx_buffer; + desc_ops->unmap_desc_data = fxgmac_unmap_desc_data; + desc_ops->tx_desc_init = fxgmac_tx_desc_init; + desc_ops->rx_desc_init = fxgmac_rx_desc_init; +} diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-ethtool.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-ethtool.c new file mode 100644 index 000000000000..05aa42f90ad8 --- /dev/null +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-ethtool.c @@ -0,0 +1,1114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Motorcomm Corporation. */ + +#include +#include +#include + +#include "fuxi-gmac.h" +#include "fuxi-gmac-reg.h" + +struct fxgmac_stats_desc { + char stat_string[ETH_GSTRING_LEN]; + int stat_offset; +}; + +#define FXGMAC_STAT(str, var) \ + { \ + str, offsetof(struct fxgmac_pdata, stats.var), \ + } + +static const struct fxgmac_stats_desc fxgmac_gstring_stats[] = { + /* MMC TX counters */ + FXGMAC_STAT("tx_bytes", txoctetcount_gb), + FXGMAC_STAT("tx_bytes_good", txoctetcount_g), + FXGMAC_STAT("tx_packets", txframecount_gb), + FXGMAC_STAT("tx_packets_good", txframecount_g), + FXGMAC_STAT("tx_unicast_packets", txunicastframes_gb), + FXGMAC_STAT("tx_broadcast_packets", txbroadcastframes_gb), + FXGMAC_STAT("tx_broadcast_packets_good", txbroadcastframes_g), + FXGMAC_STAT("tx_multicast_packets", txmulticastframes_gb), + FXGMAC_STAT("tx_multicast_packets_good", txmulticastframes_g), + FXGMAC_STAT("tx_vlan_packets_good", txvlanframes_g), + FXGMAC_STAT("tx_64_byte_packets", tx64octets_gb), + FXGMAC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb), + FXGMAC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb), + FXGMAC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb), + FXGMAC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb), + FXGMAC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb), + FXGMAC_STAT("tx_underflow_errors", txunderflowerror), + FXGMAC_STAT("tx_pause_frames", txpauseframes), + FXGMAC_STAT("tx_single_collision", txsinglecollision_g), + FXGMAC_STAT("tx_multiple_collision", txmultiplecollision_g), + FXGMAC_STAT("tx_deferred_frames", txdeferredframes), + FXGMAC_STAT("tx_late_collision_frames", txlatecollisionframes), + FXGMAC_STAT("tx_excessive_collision_frames", + txexcessivecollisionframes), + FXGMAC_STAT("tx_carrier_error_frames", txcarriererrorframes), + FXGMAC_STAT("tx_excessive_deferral_error", txexcessivedeferralerror), + FXGMAC_STAT("tx_oversize_frames_good", txoversize_g), + + /* MMC RX counters */ + FXGMAC_STAT("rx_bytes", rxoctetcount_gb), + FXGMAC_STAT("rx_bytes_good", rxoctetcount_g), + FXGMAC_STAT("rx_packets", rxframecount_gb), + FXGMAC_STAT("rx_unicast_packets_good", rxunicastframes_g), + FXGMAC_STAT("rx_broadcast_packets_good", rxbroadcastframes_g), + FXGMAC_STAT("rx_multicast_packets_good", rxmulticastframes_g), + FXGMAC_STAT("rx_vlan_packets_mac", rxvlanframes_gb), + FXGMAC_STAT("rx_64_byte_packets", rx64octets_gb), + FXGMAC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb), + FXGMAC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb), + FXGMAC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb), + FXGMAC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb), + FXGMAC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb), + FXGMAC_STAT("rx_undersize_packets_good", rxundersize_g), + FXGMAC_STAT("rx_oversize_packets_good", rxoversize_g), + FXGMAC_STAT("rx_crc_errors", rxcrcerror), + FXGMAC_STAT("rx_align_error", rxalignerror), + FXGMAC_STAT("rx_crc_errors_small_packets", rxrunterror), + FXGMAC_STAT("rx_crc_errors_giant_packets", rxjabbererror), + FXGMAC_STAT("rx_length_errors", rxlengtherror), + FXGMAC_STAT("rx_out_of_range_errors", rxoutofrangetype), + FXGMAC_STAT("rx_fifo_overflow_errors", rxfifooverflow), + FXGMAC_STAT("rx_watchdog_errors", rxwatchdogerror), + FXGMAC_STAT("rx_pause_frames", rxpauseframes), + FXGMAC_STAT("rx_receive_error_frames", rxreceiveerrorframe), + FXGMAC_STAT("rx_control_frames_good", rxcontrolframe_g), + + /* Extra counters */ + FXGMAC_STAT("tx_tso_packets", tx_tso_packets), + FXGMAC_STAT("rx_split_header_packets", rx_split_header_packets), + FXGMAC_STAT("tx_process_stopped", tx_process_stopped), + FXGMAC_STAT("rx_process_stopped", rx_process_stopped), + FXGMAC_STAT("tx_buffer_unavailable", tx_buffer_unavailable), + FXGMAC_STAT("rx_buffer_unavailable", rx_buffer_unavailable), + FXGMAC_STAT("fatal_bus_error", fatal_bus_error), + FXGMAC_STAT("tx_vlan_packets_net", tx_vlan_packets), + FXGMAC_STAT("rx_vlan_packets_net", rx_vlan_packets), + FXGMAC_STAT("napi_poll_isr", napi_poll_isr), + FXGMAC_STAT("napi_poll_txtimer", napi_poll_txtimer), + FXGMAC_STAT("alive_cnt_txtimer", cnt_alive_txtimer), + + FXGMAC_STAT("ephy_poll_timer", ephy_poll_timer_cnt), + FXGMAC_STAT("mgmt_int_isr", mgmt_int_isr), +}; + +#define FXGMAC_STATS_COUNT ARRAY_SIZE(fxgmac_gstring_stats) + +static void fxgmac_ethtool_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + u32 ver = pdata->hw_feat.version; + u32 sver, devid, userver; + + strscpy(drvinfo->driver, pdata->drv_name, sizeof(drvinfo->driver)); + strscpy(drvinfo->version, pdata->drv_ver, sizeof(drvinfo->version)); + strscpy(drvinfo->bus_info, dev_name(pdata->dev), + sizeof(drvinfo->bus_info)); + /* + * D|DEVID: Indicates the Device family + * U|USERVER: User-defined Version + */ + sver = FXGMAC_GET_REG_BITS(ver, MAC_VR_SVER_POS, MAC_VR_SVER_LEN); + devid = FXGMAC_GET_REG_BITS(ver, MAC_VR_DEVID_POS, MAC_VR_DEVID_LEN); + userver = FXGMAC_GET_REG_BITS(ver, MAC_VR_USERVER_POS, + MAC_VR_USERVER_LEN); + /*DPRINTK("xlgma: No userver (%x) here, sver (%x) should be 0x51\n", userver, sver);*/ + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "S.D.U: %x.%x.%x", sver, devid, userver); +} + +static u32 fxgmac_ethtool_get_msglevel(struct net_device *netdev) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + + return pdata->msg_enable; +} + +static void fxgmac_ethtool_set_msglevel(struct net_device *netdev, u32 msglevel) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + + DPRINTK("fxmac, set msglvl from %08x to %08x\n", pdata->msg_enable, + msglevel); + pdata->msg_enable = msglevel; +} + +static void fxgmac_ethtool_get_channels(struct net_device *netdev, + struct ethtool_channels *channel) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); +#if (FXGMAC_RSS_FEATURE_ENABLED) + /* report maximum channels */ + channel->max_combined = FXGMAC_MAX_DMA_CHANNELS; + channel->max_other = 0; + channel->other_count = 0; + + /* record RSS queues */ + channel->combined_count = FXGMAC_MAX_DMA_CHANNELS; + + /* nothing else to report if RSS is disabled */ + if (channel->combined_count == 1) + return; + DPRINTK("fxmac rss, get channels max=(combined %d, other %d), count(combined %d, other %d)\n", + channel->max_combined, channel->max_other, + channel->combined_count, channel->other_count); +#endif + + channel->max_rx = FXGMAC_MAX_DMA_CHANNELS; + channel->max_tx = FXGMAC_MAX_DMA_CHANNELS; + channel->rx_count = pdata->rx_q_count; + channel->tx_count = pdata->tx_q_count; + DPRINTK("fxmac, get channels max=(rx %d, tx %d), count(%d,%d)\n", + channel->max_rx, channel->max_tx, channel->rx_count, + channel->tx_count); +} + +static int +fxgmac_ethtool_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + + memset(ec, 0, sizeof(struct ethtool_coalesce)); + ec->rx_coalesce_usecs = pdata->rx_usecs; + ec->tx_coalesce_usecs = pdata->tx_usecs; + /*If we need to assign values to other members, + * we need to modify the supported_coalesce_params of fxgmac_ethtool_ops synchronously + */ + DPRINTK("fxmac, get coalesce\n"); + return 0; +} + +static int +fxgmac_ethtool_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + unsigned int rx_frames, rx_riwt, rx_usecs; + unsigned int tx_frames; + + /* Check for not supported parameters */ + if ((ec->rx_coalesce_usecs_irq) || (ec->rx_max_coalesced_frames_irq) || + (ec->tx_coalesce_usecs_high) || (ec->tx_max_coalesced_frames_irq) || + (ec->tx_coalesce_usecs_irq) || (ec->stats_block_coalesce_usecs) || + (ec->pkt_rate_low) || (ec->use_adaptive_rx_coalesce) || + (ec->use_adaptive_tx_coalesce) || + (ec->rx_max_coalesced_frames_low) || (ec->rx_coalesce_usecs_low) || + (ec->tx_coalesce_usecs_low) || (ec->tx_max_coalesced_frames_low) || + (ec->pkt_rate_high) || (ec->rx_coalesce_usecs_high) || + (ec->rx_max_coalesced_frames_high) || + (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval)) + return -EOPNOTSUPP; + + rx_usecs = ec->rx_coalesce_usecs; + rx_riwt = hw_ops->usec_to_riwt(pdata, rx_usecs); + rx_frames = ec->rx_max_coalesced_frames; + tx_frames = ec->tx_max_coalesced_frames; + + if ((rx_riwt > FXGMAC_MAX_DMA_RIWT) || + (rx_riwt < FXGMAC_MIN_DMA_RIWT) || + (rx_frames > pdata->rx_desc_count)) + return -EINVAL; + + if (tx_frames > pdata->tx_desc_count) + return -EINVAL; + + pdata->rx_riwt = rx_riwt; + pdata->rx_usecs = rx_usecs; + pdata->rx_frames = rx_frames; + hw_ops->config_rx_coalesce(pdata); + + pdata->tx_frames = tx_frames; + hw_ops->config_tx_coalesce(pdata); + + pdata->tx_usecs = ec->tx_coalesce_usecs; + hw_ops->set_interrupt_moderation(pdata); + + DPRINTK("fxmac, set coalesce\n"); + return 0; +} + +#if (FXGMAC_RSS_FEATURE_ENABLED) +static u32 fxgmac_get_rxfh_key_size(struct net_device *netdev) +{ + return FXGMAC_RSS_HASH_KEY_SIZE; +} + +static u32 fxgmac_rss_indir_size(struct net_device *netdev) +{ + return FXGMAC_RSS_MAX_TABLE_SIZE; +} + +static void fxgmac_get_reta(struct fxgmac_pdata *pdata, u32 *indir) +{ + int i, reta_size = FXGMAC_RSS_MAX_TABLE_SIZE; + u16 rss_m; +#ifdef FXGMAC_ONE_CHANNLE + rss_m = FXGMAC_MAX_DMA_CHANNELS; +#else + rss_m = FXGMAC_MAX_DMA_CHANNELS - + 1; /* mask for index of channel, 0-3 */ +#endif + + for (i = 0; i < reta_size; i++) + indir[i] = pdata->rss_table[i] & rss_m; +} + +static int fxgmac_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + + /* ETH_RSS_HASH_TOP __ETH_RSS_HASH(TOP) + * ETH_RSS_HASH_XOR __ETH_RSS_HASH(XOR) + * ETH_RSS_HASH_CRC32 __ETH_RSS_HASH(CRC32) + */ + if (hfunc) { + *hfunc = ETH_RSS_HASH_TOP; + DPRINTK("fxmac, get_rxfh for hash function\n"); + } + + if (indir) { + fxgmac_get_reta(pdata, indir); + DPRINTK("fxmac, get_rxfh for indirection tab\n"); + } + + if (key) { + memcpy(key, pdata->rss_key, fxgmac_get_rxfh_key_size(netdev)); + DPRINTK("fxmac, get_rxfh for hash key\n"); + } + + return 0; +} + +static int fxgmac_set_rxfh(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + int i; + u32 reta_entries = fxgmac_rss_indir_size(netdev); + int max_queues = FXGMAC_MAX_DMA_CHANNELS; + + DPRINTK("fxmac, set_rxfh callin, indir=%lx, key=%lx, func=%02x\n", + (unsigned long)indir, (unsigned long)key, hfunc); + + if (hfunc) + return -EINVAL; + + /* Fill out the redirection table */ + if (indir) { +#if FXGMAC_MSIX_CH0RXDIS_EN + max_queues = max_queues; + reta_entries = reta_entries; + i = i; + DPRINTK("fxmac, set_rxfh, change of indirect talbe is not supported.\n"); + return -EINVAL; +#else + /* double check user input. */ + for (i = 0; i < reta_entries; i++) + if (indir[i] >= max_queues) + return -EINVAL; + + for (i = 0; i < reta_entries; i++) + pdata->rss_table[i] = indir[i]; + + hw_ops->write_rss_lookup_table(pdata); +#endif + } + + /* Fill out the rss hash key */ + if (FXGMAC_RSS_HASH_KEY_LINUX && key) + hw_ops->set_rss_hash_key(pdata, key); + + return 0; +} + +static int fxgmac_get_rss_hash_opts(struct fxgmac_pdata *pdata, + struct ethtool_rxnfc *cmd) +{ + u32 reg_opt; + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + cmd->data = 0; + + reg_opt = hw_ops->get_rss_options(pdata); + DPRINTK("fxgmac_get_rss_hash_opts, hw=%02x, %02x\n", reg_opt, + pdata->rss_options); + + if (reg_opt != pdata->rss_options) { + DPRINTK("fxgmac_get_rss_hash_opts, warning, options are not consistent\n"); + } + + /* Report default options for RSS */ + switch (cmd->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + if (((TCP_V4_FLOW == (cmd->flow_type)) && + (FXGMAC_GET_REG_BITS(pdata->rss_options, + MAC_RSSCR_TCP4TE_POS, + MAC_RSSCR_TCP4TE_LEN))) || + ((UDP_V4_FLOW == (cmd->flow_type)) && + (FXGMAC_GET_REG_BITS(pdata->rss_options, + MAC_RSSCR_UDP4TE_POS, + MAC_RSSCR_UDP4TE_LEN)))) { + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + } + fallthrough; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + if (((TCP_V4_FLOW == (cmd->flow_type)) && + (FXGMAC_GET_REG_BITS(pdata->rss_options, + MAC_RSSCR_TCP4TE_POS, + MAC_RSSCR_TCP4TE_LEN))) || + ((UDP_V4_FLOW == (cmd->flow_type)) && + (FXGMAC_GET_REG_BITS(pdata->rss_options, + MAC_RSSCR_UDP4TE_POS, + MAC_RSSCR_UDP4TE_LEN))) || + (FXGMAC_GET_REG_BITS(pdata->rss_options, + MAC_RSSCR_IP4TE_POS, + MAC_RSSCR_IP4TE_LEN))) { + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + } + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + if (((TCP_V6_FLOW == (cmd->flow_type)) && + (FXGMAC_GET_REG_BITS(pdata->rss_options, + MAC_RSSCR_TCP6TE_POS, + MAC_RSSCR_TCP6TE_LEN))) || + ((UDP_V6_FLOW == (cmd->flow_type)) && + (FXGMAC_GET_REG_BITS(pdata->rss_options, + MAC_RSSCR_UDP6TE_POS, + MAC_RSSCR_UDP6TE_LEN)))) { + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + } + fallthrough; + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + if (((TCP_V6_FLOW == (cmd->flow_type)) && + (FXGMAC_GET_REG_BITS(pdata->rss_options, + MAC_RSSCR_TCP6TE_POS, + MAC_RSSCR_TCP6TE_LEN))) || + ((UDP_V6_FLOW == (cmd->flow_type)) && + (FXGMAC_GET_REG_BITS(pdata->rss_options, + MAC_RSSCR_UDP6TE_POS, + MAC_RSSCR_UDP6TE_LEN))) || + (FXGMAC_GET_REG_BITS(pdata->rss_options, + MAC_RSSCR_IP6TE_POS, + MAC_RSSCR_IP6TE_LEN))) { + cmd->data |= RXH_IP_SRC | RXH_IP_DST; + } + break; + default: + return -EINVAL; + } + + return 0; +} + +static int fxgmac_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct fxgmac_pdata *pdata = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = pdata->rx_q_count; + ret = 0; + DPRINTK("fxmac, get_rxnfc for rx ring cnt\n"); + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = 0; + ret = 0; + DPRINTK("fxmac, get_rxnfc for classify rule cnt\n"); + break; + case ETHTOOL_GRXCLSRULE: + DPRINTK("fxmac, get_rxnfc for classify rules\n"); + ret = 0; /* ixgbe_get_ethtool_fdir_entry(adapter, cmd); */ + break; + case ETHTOOL_GRXCLSRLALL: + cmd->rule_cnt = 0; + ret = 0; + /*ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, + (u32 *)rule_locs); + */ + DPRINTK("fxmac, get_rxnfc for classify both cnt and rules\n"); + break; + case ETHTOOL_GRXFH: + ret = fxgmac_get_rss_hash_opts(pdata, cmd); + DPRINTK("fxmac, get_rxnfc for hash options\n"); + break; + default: + break; + } + + return ret; +} + +#define UDP_RSS_FLAGS (BIT(MAC_RSSCR_UDP4TE_POS) | BIT(MAC_RSSCR_UDP6TE_POS)) +static int fxgmac_set_rss_hash_opt(struct fxgmac_pdata *pdata, + struct ethtool_rxnfc *nfc) +{ + u32 rssopt = 0; /* pdata->rss_options; */ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + DPRINTK("fxgmac_set_rss_hash_opt call in, nfc_data=%llx, cur opt=%x\n", + nfc->data, pdata->rss_options); + + /* For RSS, it does not support anything other than hashing + * to queues on src, dst IPs and L4 ports + */ + if (nfc->data & + ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + /* default to TCP flow and do nothting */ + if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST) || + !(nfc->data & RXH_L4_B_0_1) || !(nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + if (TCP_V4_FLOW == (nfc->flow_type)) { + rssopt = FXGMAC_SET_REG_BITS(rssopt, + MAC_RSSCR_IP4TE_POS, + MAC_RSSCR_IP4TE_LEN, 1); + rssopt = FXGMAC_SET_REG_BITS(rssopt, + MAC_RSSCR_TCP4TE_POS, + MAC_RSSCR_TCP4TE_LEN, 1); + } + + if (TCP_V6_FLOW == (nfc->flow_type)) { + rssopt = FXGMAC_SET_REG_BITS(rssopt, + MAC_RSSCR_IP6TE_POS, + MAC_RSSCR_IP6TE_LEN, 1); + rssopt = FXGMAC_SET_REG_BITS(rssopt, + MAC_RSSCR_TCP6TE_POS, + MAC_RSSCR_TCP6TE_LEN, 1); + } + break; + + case UDP_V4_FLOW: + if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST)) + return -EINVAL; + rssopt = FXGMAC_SET_REG_BITS(rssopt, MAC_RSSCR_IP4TE_POS, + MAC_RSSCR_IP4TE_LEN, 1); + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + rssopt = FXGMAC_SET_REG_BITS(rssopt, + MAC_RSSCR_UDP4TE_POS, + MAC_RSSCR_UDP4TE_LEN, 1); + break; + default: + return -EINVAL; + } + break; + case UDP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST)) + return -EINVAL; + rssopt = FXGMAC_SET_REG_BITS(rssopt, MAC_RSSCR_IP6TE_POS, + MAC_RSSCR_IP6TE_LEN, 1); + + switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + rssopt = FXGMAC_SET_REG_BITS(rssopt, + MAC_RSSCR_UDP6TE_POS, + MAC_RSSCR_UDP6TE_LEN, 1); + break; + default: + return -EINVAL; + } + break; + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case SCTP_V6_FLOW: + if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST) || + (nfc->data & RXH_L4_B_0_1) || (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* if options are changed, then update to hw */ + if (rssopt != pdata->rss_options) { + if ((rssopt & UDP_RSS_FLAGS) && + !(pdata->rss_options & UDP_RSS_FLAGS)) + DPRINTK("enabling UDP RSS: fragmented packets" + " may arrive out of order to the stack above\n"); + + DPRINTK("rss option changed from %x to %x\n", + pdata->rss_options, rssopt); + pdata->rss_options = rssopt; + hw_ops->set_rss_options(pdata); + } + + return 0; +} + +static int fxgmac_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct fxgmac_pdata *pdata = netdev_priv(dev); + + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + /* no support. rx classifier rule insert */ + DPRINTK("set_rxnfc for rx cls rule insert-n\\a\n"); + break; + case ETHTOOL_SRXCLSRLDEL: + /* no support. rx classifier rule delete */ + DPRINTK("set_rxnfc for rx cls rule del-n\\a\n"); + break; + case ETHTOOL_SRXFH: + DPRINTK("set_rxnfc for rx rss option\n"); + ret = fxgmac_set_rss_hash_opt(pdata, cmd); + break; + default: + break; + } + + return ret; +} +#endif /* FXGMAC_RSS_FEATURE_ENABLED */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) +static void fxgmac_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *exact) + +#else +static void fxgmac_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +#endif +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + + DPRINTK("fxmac, get_ringparam callin\n"); + + ring->rx_max_pending = FXGMAC_RX_DESC_CNT; + ring->tx_max_pending = FXGMAC_TX_DESC_CNT; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = pdata->rx_desc_count; + ring->tx_pending = pdata->tx_desc_count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) +static int fxgmac_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *exact) + +#else +static int fxgmac_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +#endif +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_desc_ops *desc_ops = &pdata->desc_ops; + + DPRINTK("fxmac, set_ringparam callin\n"); + + pdata->tx_desc_count = ring->tx_pending; + pdata->rx_desc_count = ring->rx_pending; + + fxgmac_stop(pdata); + fxgmac_free_tx_data(pdata); + fxgmac_free_rx_data(pdata); + desc_ops->alloc_channles_and_rings(pdata); + fxgmac_start(pdata); + + return 0; +} + +#if FXGMAC_WOL_FEATURE_ENABLED +static void fxgmac_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + + /* for further feature implementation + * wol->supported = WAKE_PHY | WAKE_UCAST | WAKE_MCAST | + * WAKE_BCAST | WAKE_MAGIC; + */ + + wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC | + WAKE_ARP; +#if FXGMAC_WOL_UPON_EPHY_LINK + wol->supported |= WAKE_PHY; +#endif + + wol->wolopts = 0; + if (!(pdata->hw_feat.rwk) || + !device_can_wakeup(/*pci_dev_to_dev*/ (pdata->dev))) { + DPRINTK("fxgmac get_wol, pci does not support wakeup\n"); + return; + } + wol->wolopts = pdata->expansion.wol; + DPRINTK("fxmac, get_wol, 0x%x, 0x%x\n", wol->wolopts, + pdata->expansion.wol); +} + +static int fxgmac_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + int ret; + + /* currently, we do not support these options */ +#if FXGMAC_WOL_UPON_EPHY_LINK +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)) + if (wol->wolopts & (WAKE_MAGICSECURE | WAKE_FILTER)) { +#else + if (wol->wolopts & WAKE_MAGICSECURE) { +#endif +#else +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)) + if (wol->wolopts & (WAKE_PHY | WAKE_MAGICSECURE | WAKE_FILTER)) { +#else + if (wol->wolopts & (WAKE_PHY | WAKE_MAGICSECURE)) { +#endif +#endif + DPRINTK("fxmac, set_wol, not supported wol options, 0x%x\n", + wol->wolopts); + return -EOPNOTSUPP; + } + + if (!(pdata->hw_feat.rwk)) { + DPRINTK("fxmac, set_wol, hw wol feature is n/a\n"); + ret = (wol->wolopts ? -EOPNOTSUPP : 0); + return ret; + } + + pdata->expansion.wol = 0; + if (wol->wolopts & WAKE_UCAST) + pdata->expansion.wol |= WAKE_UCAST; + + if (wol->wolopts & WAKE_MCAST) + pdata->expansion.wol |= WAKE_MCAST; + + if (wol->wolopts & WAKE_BCAST) + pdata->expansion.wol |= WAKE_BCAST; + + if (wol->wolopts & WAKE_MAGIC) + pdata->expansion.wol |= WAKE_MAGIC; + + if (wol->wolopts & WAKE_PHY) + pdata->expansion.wol |= WAKE_PHY; + + if (wol->wolopts & WAKE_ARP) + pdata->expansion.wol |= WAKE_ARP; + + hw_ops->set_pattern_data(pdata); + + hw_ops->config_wol(pdata, (!!(pdata->expansion.wol))); + + DPRINTK("fxmac, set_wol, opt=0x%x, 0x%x\n", wol->wolopts, + pdata->expansion.wol); + + return 0; +} +#endif /*FXGMAC_WOL_FEATURE_ENABLED*/ + +static int fxgmac_get_regs_len(struct net_device __always_unused *netdev) +{ + return FXGMAC_EPHY_REGS_LEN * sizeof(u32); +} + +static void fxgmac_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + u32 *regs_buff = p; + u8 i; + + memset(p, 0, FXGMAC_EPHY_REGS_LEN * sizeof(u32)); + for (i = REG_MII_BMCR; i < FXGMAC_EPHY_REGS_LEN; i++) { + hw_ops->read_ephy_reg(pdata, i, (unsigned int *)®s_buff[i]); + } + regs->version = regs_buff[REG_MII_PHYSID1] << 16 | + regs_buff[REG_MII_PHYSID2]; +} + +#if FXGMAC_PAUSE_FEATURE_ENABLED +static int fxgmac_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + u32 duplex, regval, link_status; + u32 adv = 0xFFFFFFFF; + + regval = fxgmac_ephy_autoneg_ability_get(pdata, &adv); + if (regval) + return -ETIMEDOUT; + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + + /* set the supported link speeds */ + ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half); + + /* Indicate pause support */ + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + ethtool_link_ksettings_add_link_mode(cmd, advertising, Asym_Pause); + + ethtool_link_ksettings_add_link_mode(cmd, supported, MII); + cmd->base.port = PORT_MII; + + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, ®val); + regval = FXGMAC_GET_REG_BITS(regval, PHY_CR_AUTOENG_POS, + PHY_CR_AUTOENG_LEN); + if (regval) { + if (pdata->phy_autoeng) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Autoneg); + else + clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + cmd->link_modes.advertising); + + if (adv & FXGMAC_ADVERTISE_10HALF) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Half); + if (adv & FXGMAC_ADVERTISE_10FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Full); + if (adv & FXGMAC_ADVERTISE_100HALF) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 100baseT_Half); + if (adv & FXGMAC_ADVERTISE_100FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 100baseT_Full); + if (adv & FXGMAC_ADVERTISE_1000FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseT_Full); + } else { + clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + cmd->link_modes.advertising); + switch (pdata->phy_speed) { + case SPEED_1000M: + if (pdata->phy_duplex) + ethtool_link_ksettings_add_link_mode( + cmd, advertising, 1000baseT_Full); + else + ethtool_link_ksettings_add_link_mode( + cmd, advertising, 1000baseT_Half); + break; + case SPEED_100M: + if (pdata->phy_duplex) + ethtool_link_ksettings_add_link_mode( + cmd, advertising, 100baseT_Full); + else + ethtool_link_ksettings_add_link_mode( + cmd, advertising, 100baseT_Half); + break; + case SPEED_10M: + if (pdata->phy_duplex) + ethtool_link_ksettings_add_link_mode( + cmd, advertising, 10baseT_Full); + else + ethtool_link_ksettings_add_link_mode( + cmd, advertising, 10baseT_Half); + break; + default: + break; + } + } + cmd->base.autoneg = pdata->phy_autoeng ? regval : 0; + + hw_ops->read_ephy_reg(pdata, REG_MII_SPEC_STATUS, ®val); + link_status = regval & (BIT(FUXI_EPHY_LINK_STATUS_BIT)); + if (link_status) { + duplex = FXGMAC_GET_REG_BITS(regval, PHY_MII_SPEC_DUPLEX_POS, + PHY_MII_SPEC_DUPLEX_LEN); + cmd->base.duplex = duplex; + cmd->base.speed = pdata->phy_speed; + } else { + cmd->base.duplex = DUPLEX_UNKNOWN; + cmd->base.speed = SPEED_UNKNOWN; + } + + return 0; +} + +static int fxgmac_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + u32 advertising, support, adv; + int ret; + struct fxphy_ag_adv; + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF) + return -EINVAL; + + pdata->phy_autoeng = cmd->base.autoneg; + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); + ethtool_convert_link_mode_to_legacy_u32(&support, + cmd->link_modes.supported); + advertising &= support; + + if (pdata->phy_autoeng || + (!pdata->phy_autoeng && cmd->base.speed == SPEED_1000)) { + ret = hw_ops->read_ephy_reg(pdata, REG_MII_ADVERTISE, &adv); + if (ret < 0) + return -ETIMEDOUT; + adv &= ~REG_BIT_ADVERTISE_100_10_CAP; + adv |= ethtool_adv_to_mii_adv_t(advertising); + ret = hw_ops->write_ephy_reg(pdata, REG_MII_ADVERTISE, adv); + if (ret < 0) + return -ETIMEDOUT; + ret = hw_ops->read_ephy_reg(pdata, REG_MII_CTRL1000, &adv); + if (ret < 0) + return -ETIMEDOUT; + adv &= ~REG_BIT_ADVERTISE_1000_CAP; + adv |= ethtool_adv_to_mii_ctrl1000_t(advertising); + ret = hw_ops->write_ephy_reg(pdata, REG_MII_CTRL1000, adv); + if (ret < 0) + return -ETIMEDOUT; + + ret = hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, &adv); + if (ret < 0) + return -ETIMEDOUT; + adv = FXGMAC_SET_REG_BITS(adv, PHY_CR_AUTOENG_POS, + PHY_CR_AUTOENG_LEN, 1); + ret = hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, adv); + if (ret < 0) + return -ETIMEDOUT; + + ret = hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, &adv); + if (ret < 0) + return -ETIMEDOUT; + adv = FXGMAC_SET_REG_BITS(adv, PHY_CR_RE_AUTOENG_POS, + PHY_CR_RE_AUTOENG_LEN, 1); + ret = hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, adv); + if (ret < 0) + return -ETIMEDOUT; + } else { + pdata->phy_duplex = cmd->base.duplex; + pdata->phy_speed = cmd->base.speed; + fxgmac_phy_force_speed(pdata, pdata->phy_speed); + fxgmac_phy_force_duplex(pdata, pdata->phy_duplex); + fxgmac_phy_force_autoneg(pdata, pdata->phy_autoeng); + } + + ret = fxgmac_ephy_soft_reset(pdata); + if (ret) { + printk("%s: ephy soft reset timeout.\n", __func__); + return -ETIMEDOUT; + } + + return 0; +} + +static void fxgmac_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + + pause->autoneg = 1; + pause->rx_pause = pdata->rx_pause; + pause->tx_pause = pdata->tx_pause; + + DPRINTK("fxmac get_pauseparam done, rx=%d, tx=%d\n", pdata->rx_pause, + pdata->tx_pause); +} + +static int fxgmac_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + unsigned int pre_rx_pause = pdata->rx_pause; + unsigned int pre_tx_pause = pdata->tx_pause; + + pdata->rx_pause = pause->rx_pause; + pdata->tx_pause = pause->tx_pause; + + if (pre_rx_pause != pdata->rx_pause) { + hw_ops->config_rx_flow_control(pdata); + DPRINTK("fxgmac set pause parameter, rx from %d to %d\n", + pre_rx_pause, pdata->rx_pause); + } + if (pre_tx_pause != pdata->tx_pause) { + hw_ops->config_tx_flow_control(pdata); + DPRINTK("fxgmac set pause parameter, tx from %d to %d\n", + pre_tx_pause, pdata->tx_pause); + } + + DPRINTK("fxgmac set pause parameter, autoneg=%d, rx=%d, tx=%d\n", + pause->autoneg, pause->rx_pause, pause->tx_pause); + + return 0; +} +#endif /*FXGMAC_PAUSE_FEATURE_ENABLED*/ + +/* yzhang added for debug sake. descriptors status checking + * 2021.03.29 + */ +#define FXGMAC_ETH_GSTRING_LEN 32 + +#define FXGMAC_TEST_LEN (sizeof(fxgmac_gstrings_test) / FXGMAC_ETH_GSTRING_LEN) +#define DBG_ETHTOOL_CHECK_NUM_OF_DESC 5 + +static void fxgmac_ethtool_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < FXGMAC_STATS_COUNT; i++) { + memcpy(data, fxgmac_gstring_stats[i].stat_string, + strlen(fxgmac_gstring_stats[i].stat_string)); + data += ETH_GSTRING_LEN; + } + break; + default: + WARN_ON(1); + break; + } +} + +static int fxgmac_ethtool_get_sset_count(struct net_device *netdev, + int stringset) +{ + int ret; + + switch (stringset) { + case ETH_SS_STATS: + ret = FXGMAC_STATS_COUNT; + break; + + default: + ret = -EOPNOTSUPP; + } + + return ret; +} + +static void fxgmac_ethtool_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, + u64 *data) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + u8 *stat; + int i; + +#if FXGMAC_PM_FEATURE_ENABLED + /* 20210709 for net power down */ + if (!test_bit(FXGMAC_POWER_STATE_DOWN, &pdata->expansion.powerstate)) +#endif + { + pdata->hw_ops.read_mmc_stats(pdata); + } + + for (i = 0; i < FXGMAC_STATS_COUNT; i++) { + stat = (u8 *)pdata + fxgmac_gstring_stats[i].stat_offset; + *data++ = *(u64 *)stat; + } +} + +static inline bool fxgmac_removed(void __iomem *addr) +{ + return unlikely(!addr); +} +#define FXGMAC_REMOVED(a) fxgmac_removed(a) + +static const struct ethtool_ops fxgmac_ethtool_ops = { + .get_drvinfo = fxgmac_ethtool_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_msglevel = fxgmac_ethtool_get_msglevel, + .set_msglevel = fxgmac_ethtool_set_msglevel, + .get_channels = fxgmac_ethtool_get_channels, + .get_coalesce = fxgmac_ethtool_get_coalesce, + .set_coalesce = fxgmac_ethtool_set_coalesce, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) + +/* The process of set is to get first and then set, + * and the result of get is preserved for values that have not been modified. + * + * Therefore, when using, it is necessary to ensure that this macro and the + * assignment operation in the get_coalesce are one-to-one correspondence, + * otherwise the macro and parameters will be verified when set, and the error + * of "Operation not supported " will be reported if the verification fails + */ +#ifdef ETHTOOL_COALESCE_USECS + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, +#endif +#endif + .get_strings = fxgmac_ethtool_get_strings, + .get_sset_count = fxgmac_ethtool_get_sset_count, + .get_ethtool_stats = fxgmac_ethtool_get_ethtool_stats, + .get_regs_len = fxgmac_get_regs_len, + .get_regs = fxgmac_get_regs, + .get_ringparam = fxgmac_get_ringparam, + .set_ringparam = fxgmac_set_ringparam, +#if (FXGMAC_RSS_FEATURE_ENABLED) + .get_rxnfc = fxgmac_get_rxnfc, + .set_rxnfc = fxgmac_set_rxnfc, + .get_rxfh_indir_size = fxgmac_rss_indir_size, + .get_rxfh_key_size = fxgmac_get_rxfh_key_size, + .get_rxfh = fxgmac_get_rxfh, + .set_rxfh = fxgmac_set_rxfh, +#endif +#if (FXGMAC_WOL_FEATURE_ENABLED) + .get_wol = fxgmac_get_wol, + .set_wol = fxgmac_set_wol, +#endif +#if (FXGMAC_PAUSE_FEATURE_ENABLED) +#ifdef ETHTOOL_GLINKSETTINGS + .get_link_ksettings = fxgmac_get_link_ksettings, + .set_link_ksettings = fxgmac_set_link_ksettings, +#endif /* ETHTOOL_GLINKSETTINGS */ + .get_pauseparam = fxgmac_get_pauseparam, + .set_pauseparam = fxgmac_set_pauseparam, +#endif +}; + +const struct ethtool_ops *fxgmac_get_ethtool_ops(void) +{ + return &fxgmac_ethtool_ops; +} diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-hw.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-hw.c new file mode 100644 index 000000000000..0517968365d7 --- /dev/null +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-hw.c @@ -0,0 +1,6256 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Motorcomm Corporation. */ + +#include "fuxi-os.h" +#include "fuxi-gmac.h" +#include "fuxi-gmac-reg.h" +#include "fuxi-efuse.h" + +void fxgmac_release_phy(struct fxgmac_pdata *pdata); +static void fxgmac_pwr_clock_ungate(struct fxgmac_pdata *pdata); +static void fxgmac_pwr_clock_gate(struct fxgmac_pdata *pdata); + +static int fxgmac_tx_complete(struct fxgmac_dma_desc *dma_desc) +{ +#if (FXGMAC_DUMMY_TX_DEBUG) + return 1; +#endif + return !FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, TX_NORMAL_DESC3_OWN_POS, + TX_NORMAL_DESC3_OWN_LEN); +} + +static int fxgmac_disable_rx_csum(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_IPC_POS, MAC_CR_IPC_LEN, 0); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); + + DPRINTK("fxgmac disable rx checksum.\n"); + return 0; +} + +static int fxgmac_enable_rx_csum(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_IPC_POS, MAC_CR_IPC_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); + + DPRINTK("fxgmac enable rx checksum.\n"); + return 0; +} + +static int fxgmac_set_mac_address(struct fxgmac_pdata *pdata, u8 *addr) +{ + unsigned int mac_addr_hi, mac_addr_lo; + + mac_addr_hi = (addr[5] << 8) | (addr[4] << 0); + mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | + (addr[0] << 0); + + writereg(pdata->pAdapter, mac_addr_hi, pdata->mac_regs + MAC_MACA0HR); + writereg(pdata->pAdapter, mac_addr_lo, pdata->mac_regs + MAC_MACA0LR); + + return 0; +} + +#if !defined(DPDK) +static void fxgmac_set_mac_reg(struct fxgmac_pdata *pdata, + struct netdev_hw_addr *ha, unsigned int *mac_reg) +{ + unsigned int mac_addr_hi, mac_addr_lo; + u8 *mac_addr; + + mac_addr_lo = 0; + mac_addr_hi = 0; + + if (ha) { + mac_addr = (u8 *)&mac_addr_lo; + mac_addr[0] = ha->addr[0]; + mac_addr[1] = ha->addr[1]; + mac_addr[2] = ha->addr[2]; + mac_addr[3] = ha->addr[3]; + mac_addr = (u8 *)&mac_addr_hi; + mac_addr[0] = ha->addr[4]; + mac_addr[1] = ha->addr[5]; + + netif_dbg(pdata, drv, pdata->netdev, + "adding mac address %pM at %#x\n", ha->addr, + *mac_reg); + + mac_addr_hi = FXGMAC_SET_REG_BITS( + mac_addr_hi, MAC_MACA1HR_AE_POS, MAC_MACA1HR_AE_LEN, 1); + } + + writereg(pdata->pAdapter, mac_addr_hi, pdata->mac_regs + *mac_reg); + *mac_reg += MAC_MACA_INC; + writereg(pdata->pAdapter, mac_addr_lo, pdata->mac_regs + *mac_reg); + *mac_reg += MAC_MACA_INC; +} +#endif + +static int fxgmac_enable_tx_vlan(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_VLANIR); + /* Indicate that VLAN Tx CTAGs come from mac_vlan_incl register */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANIR_VLTI_POS, + MAC_VLANIR_VLTI_LEN, 0); + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANIR_CSVL_POS, + MAC_VLANIR_CSVL_LEN, 0); + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANIR_VLP_POS, + MAC_VLANIR_VLP_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANIR_VLC_POS, + MAC_VLANIR_VLC_LEN, 2); + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANIR_VLT_POS, + MAC_VLANIR_VLT_LEN, pdata->vlan); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_VLANIR); + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_VLANTR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANTR_VL_POS, + MAC_VLANTR_VL_LEN, pdata->vlan); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_VLANTR); + + return 0; +} + +static int fxgmac_disable_tx_vlan(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_VLANIR); + + /* Indicate that VLAN Tx CTAGs come from mac_vlan_incl register + * Set VLAN Tag input enable + */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANIR_CSVL_POS, + MAC_VLANIR_CSVL_LEN, 0); + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANIR_VLTI_POS, + MAC_VLANIR_VLTI_LEN, /*0*/ 1); + /* Set VLAN priority control disable */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANIR_VLP_POS, + MAC_VLANIR_VLP_LEN, /*1*/ 0); + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANIR_VLC_POS, + MAC_VLANIR_VLC_LEN, 0); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_VLANIR); + + return 0; +} + +static int fxgmac_enable_rx_vlan_stripping(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_VLANTR); + /* Put the VLAN tag in the Rx descriptor */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLRXS_POS, + MAC_VLANTR_EVLRXS_LEN, 1); + /* Don't check the VLAN type */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANTR_DOVLTC_POS, + MAC_VLANTR_DOVLTC_LEN, 1); + /* Check only C-TAG (0x8100) packets */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANTR_ERSVLM_POS, + MAC_VLANTR_ERSVLM_LEN, 0); + /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANTR_ESVL_POS, + MAC_VLANTR_ESVL_LEN, 0); + /* Enable VLAN tag stripping */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS, + MAC_VLANTR_EVLS_LEN, 0x3); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_VLANTR); + DPRINTK("fxgmac enable MAC rx vlan stripping.\n"); + + return 0; +} + +static int fxgmac_disable_rx_vlan_stripping(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_VLANTR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS, + MAC_VLANTR_EVLS_LEN, 0); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_VLANTR); + DPRINTK("fxgmac disable MAC rx vlan stripping.\n"); + + return 0; +} + +static int fxgmac_enable_rx_vlan_filtering(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PFR); + /* Enable VLAN filtering */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_PFR_VTFE_POS, MAC_PFR_VTFE_LEN, + 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PFR); + +#if FXGMAC_FILTER_SINGLE_VLAN_ENABLED + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_VLANTR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANTR_VL_POS, + MAC_VLANTR_VL_LEN, pdata->vlan); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_VLANTR); +#else + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_VLANTR); + /* Enable VLAN Hash Table filtering */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANTR_VTHM_POS, + MAC_VLANTR_VTHM_LEN, 1); + /* Disable VLAN tag inverse matching */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANTR_VTIM_POS, + MAC_VLANTR_VTIM_LEN, 0); + /* Only filter on the lower 12-bits of the VLAN tag */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANTR_ETV_POS, + MAC_VLANTR_ETV_LEN, 1); +#endif + + return 0; +} + +static int fxgmac_disable_rx_vlan_filtering(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PFR); + /* Disable VLAN filtering */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_PFR_VTFE_POS, MAC_PFR_VTFE_LEN, + 0); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PFR); + +#if FXGMAC_FILTER_SINGLE_VLAN_ENABLED + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_VLANTR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANTR_VL_POS, + MAC_VLANTR_VL_LEN, pdata->vlan); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_VLANTR); +#endif + + return 0; +} + +#if FXGMAC_FILTER_MULTIPLE_VLAN_ENABLED +static u32 fxgmac_vid_crc32_le(__le16 vid_le) +{ + unsigned char *data = (unsigned char *)&vid_le; + unsigned char data_byte = 0; + u32 crc = ~0; + u32 temp = 0; + int i, bits; + + bits = get_bitmask_order(VLAN_VID_MASK); + for (i = 0; i < bits; i++) { + if ((i % 8) == 0) + data_byte = data[i / 8]; + + temp = ((crc & 1) ^ data_byte) & 1; + crc >>= 1; + data_byte >>= 1; + + if (temp) + crc ^= CRC32_POLY_LE; + } + + return crc; +} +#endif + +static int fxgmac_update_vlan_hash_table(struct fxgmac_pdata *pdata) +{ + u16 vlan_hash_table = 0; + u32 regval; +#if FXGMAC_FILTER_MULTIPLE_VLAN_ENABLED + __le16 vid_le; + u32 crc; + u16 vid; + /* Generate the VLAN Hash Table value */ + for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) { + /* Get the CRC32 value of the VLAN ID */ + vid_le = cpu_to_le16(vid); + crc = bitrev32(~fxgmac_vid_crc32_le(vid_le)) >> 28; + + vlan_hash_table |= (1 << crc); + } +#endif + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_VLANHTR); + /* Set the VLAN Hash Table filtering register */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANHTR_VLHT_POS, + MAC_VLANHTR_VLHT_LEN, vlan_hash_table); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_VLANHTR); + + DPRINTK("fxgmac_update_vlan_hash_tabl done, hash tbl=%08x.\n", + vlan_hash_table); + return 0; +} + +static int fxgmac_set_promiscuous_mode(struct fxgmac_pdata *pdata, + unsigned int enable) +{ + unsigned int val = enable ? 1 : 0; + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PFR); + + if (FXGMAC_GET_REG_BITS(regval, MAC_PFR_PR_POS, MAC_PFR_PR_LEN) == + val) { + return 0; + } + netif_dbg(pdata, drv, pdata->netdev, + "" STR_FORMAT " promiscuous mode\n", + enable ? "entering" : "leaving"); + + regval = FXGMAC_SET_REG_BITS(regval, MAC_PFR_PR_POS, MAC_PFR_PR_LEN, + val); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PFR); + + DbgPrintF(MP_TRACE, "" STR_FORMAT " - promiscuous mode=%d, reg=%x.", + __FUNCTION__, enable, regval); + DbgPrintF( + MP_TRACE, + "" STR_FORMAT + " - note, vlan filter is called when set promiscuous mode=%d.", + __FUNCTION__, enable); + + /* Hardware will still perform VLAN filtering in promiscuous mode */ + if (enable) { + fxgmac_disable_rx_vlan_filtering(pdata); + } else { + if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) { + fxgmac_enable_rx_vlan_filtering(pdata); + } + } + + DPRINTK("fxgmac set promisc mode=%d\n", enable); + return 0; +} + +static int fxgmac_enable_rx_broadcast(struct fxgmac_pdata *pdata, + unsigned int enable) +{ + /* mac reg bit is disable, so invert the val. */ + unsigned int val = enable ? 0 : 1; + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PFR); + + if (FXGMAC_GET_REG_BITS(regval, MAC_PFR_DBF_POS, MAC_PFR_DBF_LEN) == + val) { + return 0; + } + + regval = FXGMAC_SET_REG_BITS(regval, MAC_PFR_DBF_POS, MAC_PFR_DBF_LEN, + val); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PFR); + + DbgPrintF(MP_TRACE, "%s - bcast en=%d, bit-val=%d, reg=%x.", + __FUNCTION__, enable, val, regval); + return 0; +} + +static int fxgmac_set_all_multicast_mode(struct fxgmac_pdata *pdata, + unsigned int enable) +{ + unsigned int val = enable ? 1 : 0; + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PFR); + if (FXGMAC_GET_REG_BITS(regval, MAC_PFR_PM_POS, MAC_PFR_PM_LEN) == + val) { + return 0; + } + netif_dbg(pdata, drv, pdata->netdev, "" STR_FORMAT " allmulti mode\n", + enable ? "entering" : "leaving"); + + regval = FXGMAC_SET_REG_BITS(regval, MAC_PFR_PM_POS, MAC_PFR_PM_LEN, + val); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PFR); + + DbgPrintF(MP_TRACE, + "" STR_FORMAT " - Enable all Multicast=%d, regval=%#x.", + __FUNCTION__, enable, regval); + + return 0; +} + +static void fxgmac_set_mac_addn_addrs(struct fxgmac_pdata *pdata) +{ +#ifndef DPDK +#if FXGMAC_FILTER_MULTIPLE_MAC_ADDR_ENABLED + struct net_device *netdev = pdata->netdev; + struct netdev_hw_addr *ha; +#endif + unsigned int addn_macs; + unsigned int mac_reg; + + mac_reg = MAC_MACA1HR; + addn_macs = pdata->hw_feat.addn_mac; +#if FXGMAC_FILTER_MULTIPLE_MAC_ADDR_ENABLED + DPRINTK("xlgamc add mac addr callin\n"); + if (netdev_uc_count(netdev) > addn_macs) { + fxgmac_set_promiscuous_mode(pdata, 1); + } else { + netdev_for_each_uc_addr(ha, netdev) { + fxgmac_set_mac_reg(pdata, ha, &mac_reg); + addn_macs--; + } + + if (netdev_mc_count(netdev) > addn_macs) { + fxgmac_set_all_multicast_mode(pdata, 1); + } else { + netdev_for_each_mc_addr(ha, netdev) { + fxgmac_set_mac_reg(pdata, ha, &mac_reg); + addn_macs--; + } + } + } +#endif + /* Clear remaining additional MAC address entries */ + while (addn_macs--) { + fxgmac_set_mac_reg(pdata, NULL, &mac_reg); + } +#else + (void)pdata; +#endif +} + +#define GET_REG_AND_BIT_POS(reversalval, regOut, bitOut) \ + do { \ + regOut = (((reversalval) >> 5) & 0x7); \ + bitOut = ((reversalval) & 0x1f); \ + } while (0) + +static u32 fxgmac_crc32(unsigned char *Data, int Length) +{ + u32 Crc = (u32)~0; /* Initial value. 0xFFFFFFFF */ + + while (--Length >= 0) { + unsigned char Byte = *Data++; + int Bit; + + for (Bit = 8; --Bit >= 0; Byte >>= 1) { + if ((Crc ^ Byte) & 1) { + Crc >>= 1; + Crc ^= 0xedb88320; + } else { + Crc >>= 1; + } + } + } + + return ~Crc; +} + +/* + * configure multicast hash table, reg 0x2010~202c + * input: pmc_mac, pointer to mcast MAC. if it is null, then clean all registers. + * b_add, 1 to set the bit; 0 to clear the bit. + */ +static void fxgmac_config_multicast_mac_hash_table(struct fxgmac_pdata *pdata, + unsigned char *pmc_mac, + int b_add) +{ + unsigned int hash_reg, reg_bit; + unsigned int j; + u32 crc, reversal_crc, regval; + + if (!pmc_mac) { + for (j = 0; j < FXGMAC_MAC_HASH_TABLE_SIZE; j++) { + hash_reg = j; + hash_reg = (MAC_HTR0 + hash_reg * MAC_HTR_INC); + writereg(pdata->pAdapter, 0, + pdata->mac_regs + hash_reg); + } + DBGPRINT( + MP_TRACE, + ("> 24), hash_reg, reg_bit); + /* Set the MAC Hash Table registers */ + hash_reg = (MAC_HTR0 + hash_reg * MAC_HTR_INC); + regval = readreg(pdata->pAdapter, pdata->mac_regs + hash_reg); + + regval = FXGMAC_SET_REG_BITS(regval, reg_bit, 1, (b_add ? 1 : 0)); + + writereg(pdata->pAdapter, regval, pdata->mac_regs + hash_reg); +} + +static void fxgmac_set_mac_hash_table(struct fxgmac_pdata *pdata) +{ +#ifndef DPDK +#if FUXI_MAC_HASH_TABLE + struct net_device *netdev = pdata->netdev; + struct netdev_hw_addr *ha; + + netdev_for_each_mc_addr(ha, netdev) { + fxgmac_config_multicast_mac_hash_table(pdata, ha->addr, 1); + } +#endif + pdata = pdata; + +#else + (void)pdata; +#endif +} + +static int fxgmac_add_mac_addresses(struct fxgmac_pdata *pdata) +{ + if (pdata->hw_feat.hash_table_size) + fxgmac_set_mac_hash_table(pdata); + else + fxgmac_set_mac_addn_addrs(pdata); + + return 0; +} + +static void fxgmac_config_mac_address(struct fxgmac_pdata *pdata) +{ + u32 regval; + fxgmac_set_mac_address(pdata, pdata->mac_addr); + + /* Filtering is done using perfect filtering and hash filtering */ + if (pdata->hw_feat.hash_table_size) { + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PFR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_PFR_HPF_POS, + MAC_PFR_HPF_LEN, 1); +#if FUXI_MAC_HASH_TABLE + regval = FXGMAC_SET_REG_BITS(regval, MAC_PFR_HUC_POS, + MAC_PFR_HUC_LEN, 1); +#endif + regval = FXGMAC_SET_REG_BITS(regval, MAC_PFR_HMC_POS, + MAC_PFR_HMC_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PFR); + } +} + +static int fxgmac_config_crc_check(struct fxgmac_pdata *pdata) +{ + u32 regval, value; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_ECR); + value = (pdata->crc_check) ? 0 : 1; + regval = FXGMAC_SET_REG_BITS(regval, MAC_ECR_DCRCC_POS, + MAC_ECR_DCRCC_LEN, value); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_ECR); + + return 0; +} + +static int fxgmac_config_jumbo(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_JE_POS, MAC_CR_JE_LEN, + pdata->jumbo); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); + return 0; +} + +static void fxgmac_config_checksum_offload(struct fxgmac_pdata *pdata) +{ + if (pdata->netdev->features & NETIF_F_RXCSUM) + fxgmac_enable_rx_csum(pdata); + else + fxgmac_disable_rx_csum(pdata); +} + +static void fxgmac_config_vlan_support(struct fxgmac_pdata *pdata) +{ + fxgmac_disable_tx_vlan( + pdata); /* configure dynamical vlanID from TX Context. */ + + /* Set the current VLAN Hash Table register value */ + fxgmac_update_vlan_hash_table(pdata); + + if (pdata->vlan_filter) /* disable vlan rx filter by default */ + fxgmac_enable_rx_vlan_filtering(pdata); + else + fxgmac_disable_rx_vlan_filtering(pdata); + + if (pdata->vlan_strip) /* enable vlan rx strip by default */ + fxgmac_enable_rx_vlan_stripping(pdata); + else + fxgmac_disable_rx_vlan_stripping(pdata); +} + +static int fxgmac_config_rx_mode(struct fxgmac_pdata *pdata) +{ + struct net_device *netdev = pdata->netdev; + unsigned int pr_mode, am_mode; + + pr_mode = ((netdev->flags & IFF_PROMISC) != 0); + am_mode = ((netdev->flags & IFF_ALLMULTI) != 0); + + fxgmac_set_promiscuous_mode(pdata, pr_mode); + fxgmac_set_all_multicast_mode(pdata, am_mode); + + fxgmac_add_mac_addresses(pdata); + + return 0; +} + +static void fxgmac_prepare_tx_stop(struct fxgmac_pdata *pdata, + struct fxgmac_channel *channel) +{ + unsigned int tx_dsr, tx_pos, tx_qidx; + unsigned long tx_timeout; + unsigned int tx_status; + + pdata = pdata; + + /* Calculate the status register to read and the position within */ + if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) { + tx_dsr = DMA_DSR0; + tx_pos = (channel->queue_index * DMA_DSR_Q_LEN) + + DMA_DSR0_TPS_START; + } else { + tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE; + + tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC); + tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_LEN) + + DMA_DSRX_TPS_START; + } + +#if FXGMAC_TX_HANG_TIMER_EN + tx_timeout = jiffies + msecs_to_jiffies(100); /* 100ms */ +#else + tx_timeout = jiffies + (FXGMAC_DMA_STOP_TIMEOUT * HZ); +#endif + while (time_before(jiffies, tx_timeout)) { + tx_status = readreg(pdata->pAdapter, pdata->mac_regs + tx_dsr); + tx_status = + FXGMAC_GET_REG_BITS(tx_status, tx_pos, DMA_DSR_TPS_LEN); + if ((tx_status == DMA_TPS_STOPPED) || + (tx_status == DMA_TPS_SUSPENDED)) + break; + + usleep_range_ex(pdata->pAdapter, 500, 1000); + } + + if (!time_before(jiffies, tx_timeout)) + netdev_info(pdata->netdev, + "timed out waiting for Tx DMA channel %u to stop\n", + channel->queue_index); +} + +static void fxgmac_enable_tx(struct fxgmac_pdata *pdata) +{ +#ifndef DPDK + struct fxgmac_channel *channel; +#endif + unsigned int i; + u32 regval; + +#if FXGMAC_TX_HANG_TIMER_EN + pdata->tx_hang_restart_queuing = 0; +#endif + + /* Enable each Tx DMA channel */ +#ifndef DPDK + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_TCR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS, + DMA_CH_TCR_ST_LEN, 1); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_TCR)); + } +#else + PMD_INIT_FUNC_TRACE(); + struct fxgmac_tx_queue *txq; + struct rte_eth_dev *dev = pdata->expansion.eth_dev; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + /* Enable Tx DMA channel */ + FXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 1); + } +#endif + + /* Enable each Tx queue */ + for (i = 0; i < pdata->tx_q_count; i++) { + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TXQEN_POS, + MTL_Q_TQOMR_TXQEN_LEN, + MTL_Q_ENABLED); + writereg(pdata->pAdapter, regval, + FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + } + + /* Enable MAC Tx */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_TE_POS, MAC_CR_TE_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); +} + +static void fxgmac_disable_tx(struct fxgmac_pdata *pdata) +{ +#ifndef DPDK + struct fxgmac_channel *channel; +#endif + unsigned int i; + u32 regval; + + /* Prepare for Tx DMA channel stop */ +#ifndef DPDK + channel = pdata->channel_head; + if (channel != NULL) { + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + fxgmac_prepare_tx_stop(pdata, channel); + +#if FXGMAC_TX_HANG_TIMER_EN + pdata->tx_hang_restart_queuing = 0; +#endif + } + } + +#else + PMD_INIT_FUNC_TRACE(); + struct fxgmac_tx_queue *txq; + struct rte_eth_dev *dev = pdata->expansion.eth_dev; + + for (i = 0; i < pdata->tx_q_count; i++) { + txq = dev->data->tx_queues[i]; + fxgmac_txq_prepare_tx_stop(pdata, i); + } +#endif + + /* Disable MAC Tx */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_TE_POS, MAC_CR_TE_LEN, 0); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); + + /* Disable each Tx queue */ + for (i = 0; i < pdata->tx_q_count; i++) { + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TXQEN_POS, + MTL_Q_TQOMR_TXQEN_LEN, 0); + writereg(pdata->pAdapter, regval, + FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + } + + /* Disable each Tx DMA channel */ +#ifndef DPDK + channel = pdata->channel_head; + if (channel != NULL) { + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_TCR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS, + DMA_CH_TCR_ST_LEN, 0); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_TCR)); + } + } +#else + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + FXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 0); + } +#endif +} + +static void fxgmac_prepare_rx_stop(struct fxgmac_pdata *pdata, + unsigned int queue) +{ + unsigned int rx_status, prxq; + unsigned int rxqsts; + unsigned long rx_timeout; + /* The Rx engine cannot be stopped if it is actively processing + * packets. Wait for the Rx queue to empty the Rx fifo. Don't + * wait forever though... + */ +#if FXGMAC_TX_HANG_TIMER_EN + rx_timeout = + jiffies + msecs_to_jiffies(500); /* 500ms, larger is better */ +#else + rx_timeout = jiffies + (FXGMAC_DMA_STOP_TIMEOUT * HZ); +#endif + while (time_before(jiffies, rx_timeout)) { + rx_status = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, queue, MTL_Q_RQDR)); + prxq = FXGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_PRXQ_POS, + MTL_Q_RQDR_PRXQ_LEN); + rxqsts = FXGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_RXQSTS_POS, + MTL_Q_RQDR_RXQSTS_LEN); + if ((prxq == 0) && (rxqsts == 0)) + break; + + usleep_range_ex(pdata->pAdapter, 500, 1000); + } + + if (!time_before(jiffies, rx_timeout)) + netdev_info(pdata->netdev, + "timed out waiting for Rx queue %u to empty\n", + queue); +} + +static void fxgmac_enable_rx(struct fxgmac_pdata *pdata) +{ +#ifndef DPDK + struct fxgmac_channel *channel; +#endif + unsigned int regval, i; + + /* Enable each Rx DMA channel */ +#ifndef DPDK + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_RCR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS, + DMA_CH_RCR_SR_LEN, 1); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_RCR)); + } + +#else + PMD_INIT_FUNC_TRACE(); + struct fxgmac_rx_queue *rxq; + struct rte_eth_dev *dev = pdata->expansion.eth_dev; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + /* Enable Rx DMA channel */ + FXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 1); + } +#endif + + /* Enable each Rx queue */ + regval = 0; + for (i = 0; i < pdata->rx_q_count; i++) + regval |= (0x02 << (i << 1)); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_RQC0R); + +#ifndef DPDK + /* Enable MAC Rx */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_CST_POS, MAC_CR_CST_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_ACS_POS, MAC_CR_ACS_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_RE_POS, MAC_CR_RE_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); +#else + /* Enable MAC Rx */ + FXGMAC_IOWRITE_BITS(pdata, MAC_ECR, DCRCC, 1); + + /* Frame is forwarded after stripping CRC to application*/ + if (pdata->expansion.crc_strip_enable) { + FXGMAC_IOWRITE_BITS(pdata, MAC_CR, CST, 1); + FXGMAC_IOWRITE_BITS(pdata, MAC_CR, ACS, 1); + } + FXGMAC_IOWRITE_BITS(pdata, MAC_CR, RE, 1); +#endif +} +static void fxgmac_enable_channel_rx(struct fxgmac_pdata *pdata, + unsigned int queue) +{ + struct fxgmac_channel *channel; + unsigned int regval; + + /* Enable Rx DMA channel */ + channel = pdata->channel_head + queue; + + if (!channel->rx_ring) + return; + regval = readreg(pdata->pAdapter, FXGMAC_DMA_REG(channel, DMA_CH_RCR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS, + DMA_CH_RCR_SR_LEN, 1); + writereg(pdata->pAdapter, regval, FXGMAC_DMA_REG(channel, DMA_CH_RCR)); + /* Enable Rx queue */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_RQC0R); + regval |= (0x02 << (queue << 1)); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_RQC0R); + + /* Enable MAC Rx */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); + if (!(regval & ((0x01 << MAC_CR_CST_POS) | (0x01 << MAC_CR_ACS_POS) | + (0x01 << MAC_CR_RE_POS)))) { + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_CST_POS, + MAC_CR_CST_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_ACS_POS, + MAC_CR_ACS_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_RE_POS, + MAC_CR_RE_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); + } +} + +static void fxgmac_disable_rx(struct fxgmac_pdata *pdata) +{ +#ifndef DPDK + struct fxgmac_channel *channel; +#endif + unsigned int i; + u32 regval; + + /* Disable MAC Rx */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_CST_POS, MAC_CR_CST_LEN, 0); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_ACS_POS, MAC_CR_ACS_LEN, 0); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_RE_POS, MAC_CR_RE_LEN, 0); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); + + /* Prepare for Rx DMA channel stop */ +#ifndef DPDK + for (i = 0; i < pdata->rx_q_count; i++) + fxgmac_prepare_rx_stop(pdata, i); +#else + PMD_INIT_FUNC_TRACE(); + struct fxgmac_rx_queue *rxq; + struct rte_eth_dev *dev = pdata->expansion.eth_dev; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + fxgmac_prepare_rx_stop(pdata, i); + } +#endif + + /* Disable each Rx queue */ + writereg(pdata->pAdapter, 0, pdata->mac_regs + MAC_RQC0R); + + /* Disable each Rx DMA channel */ +#ifndef DPDK + channel = pdata->channel_head; + if (channel != NULL) { + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->rx_ring) + break; + + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_RCR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS, + DMA_CH_RCR_SR_LEN, 0); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_RCR)); + } + } +#else + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + FXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 0); + } +#endif +} + +static void fxgmac_tx_start_xmit(struct fxgmac_channel *channel, + struct fxgmac_ring *ring) +{ + struct fxgmac_pdata *pdata = channel->pdata; + struct fxgmac_desc_data *desc_data; + + /* Make sure everything is written before the register write */ + wmb(); + + /* Issue a poll command to Tx DMA by writing address + * of next immediate free descriptor + */ + desc_data = FXGMAC_GET_DESC_DATA(ring, ring->cur); + +#if !(FXGMAC_DUMMY_TX_DEBUG) + writereg(pdata->pAdapter, lower_32_bits(desc_data->dma_desc_addr), + FXGMAC_DMA_REG(channel, DMA_CH_TDTR_LO)); +#else + DPRINTK("dummy tx, fxgmac_tx_start_xmit, tail reg=0x%lx, val=%08x\n", + FXGMAC_DMA_REG(channel, DMA_CH_TDTR_LO) - pdata->mac_regs, + (u32)lower_32_bits(desc_data->dma_desc_addr)); +#endif + if (netif_msg_tx_done(pdata)) + DPRINTK("tx_start_xmit: dump before wr reg, dma base=0x%016llx, reg=0x%08x, tx timer usecs=%u, tx_timer_active=%u\n", + desc_data->dma_desc_addr, + readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_TDTR_LO)), + pdata->tx_usecs, channel->tx_timer_active); + + ring->tx.xmit_more = 0; +} + +static void fxgmac_dev_xmit(struct fxgmac_channel *channel) +{ + struct fxgmac_pdata *pdata = channel->pdata; + struct fxgmac_ring *ring = channel->tx_ring; + unsigned int tso_context, vlan_context; + struct fxgmac_desc_data *desc_data; + struct fxgmac_dma_desc *dma_desc; + struct fxgmac_pkt_info *pkt_info; + unsigned int csum, tso, vlan; + int start_index = ring->cur; + int cur_index = ring->cur; + int i; + + if (netif_msg_tx_done(pdata)) + DPRINTK("dev_xmit callin, desc cur=%d\n", cur_index); + + pkt_info = &ring->pkt_info; + csum = FXGMAC_GET_REG_BITS(pkt_info->attributes, + TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS, + TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN); + tso = FXGMAC_GET_REG_BITS(pkt_info->attributes, + TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS, + TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN); + vlan = FXGMAC_GET_REG_BITS(pkt_info->attributes, + TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, + TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN); + + if (tso && (pkt_info->mss != ring->tx.cur_mss)) + tso_context = 1; + else + tso_context = 0; + + if ((tso_context) && (netif_msg_tx_done(pdata))) { + /* tso is initialized to start... */ + DPRINTK("fxgmac_dev_xmit, tso_%s tso=0x%x, pkt_mss=%d, cur_mss=%d\n", + (pkt_info->mss) ? "start" : "stop", tso, pkt_info->mss, + ring->tx.cur_mss); + } + + if (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag)) + vlan_context = 1; + else + vlan_context = 0; + + if (vlan && (netif_msg_tx_done(pdata))) + DPRINTK("fxgmac_dev_xmi:pkt vlan=%d, ring vlan=%d, vlan_context=%d\n", + pkt_info->vlan_ctag, ring->tx.cur_vlan_ctag, + vlan_context); + + desc_data = FXGMAC_GET_DESC_DATA(ring, cur_index); + dma_desc = desc_data->dma_desc; + + /* Create a context descriptor if this is a TSO pkt_info */ + if (tso_context || vlan_context) { + if (tso_context) { + if (netif_msg_tx_done(pdata)) + DPRINTK("xlgamc dev xmit, construct tso context descriptor, mss=%u\n", + pkt_info->mss); + + /* Set the MSS size */ + dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc2, TX_CONTEXT_DESC2_MSS_POS, + TX_CONTEXT_DESC2_MSS_LEN, pkt_info->mss); + + /* Mark it as a CONTEXT descriptor */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_CONTEXT_DESC3_CTXT_POS, + TX_CONTEXT_DESC3_CTXT_LEN, 1); + + /* Indicate this descriptor contains the MSS */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_CONTEXT_DESC3_TCMSSV_POS, + TX_CONTEXT_DESC3_TCMSSV_LEN, 1); + + ring->tx.cur_mss = pkt_info->mss; + } + + if (vlan_context) { + netif_dbg(pdata, tx_queued, pdata->netdev, + "VLAN context descriptor, ctag=%u\n", + pkt_info->vlan_ctag); + + /* Mark it as a CONTEXT descriptor */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_CONTEXT_DESC3_CTXT_POS, + TX_CONTEXT_DESC3_CTXT_LEN, 1); + + /* Set the VLAN tag */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_CONTEXT_DESC3_VT_POS, + TX_CONTEXT_DESC3_VT_LEN, pkt_info->vlan_ctag); + + /* Indicate this descriptor contains the VLAN tag */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_CONTEXT_DESC3_VLTV_POS, + TX_CONTEXT_DESC3_VLTV_LEN, 1); + + ring->tx.cur_vlan_ctag = pkt_info->vlan_ctag; + } + + cur_index = FXGMAC_GET_ENTRY(cur_index, ring->dma_desc_count); + desc_data = FXGMAC_GET_DESC_DATA(ring, cur_index); + dma_desc = desc_data->dma_desc; + } + + /* Update buffer address (for TSO this is the header) */ + dma_desc->desc0 = cpu_to_le32(lower_32_bits(desc_data->skb_dma)); + dma_desc->desc1 = cpu_to_le32(upper_32_bits(desc_data->skb_dma)); + + /* Update the buffer length */ + dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc2, + TX_NORMAL_DESC2_HL_B1L_POS, + TX_NORMAL_DESC2_HL_B1L_LEN, + desc_data->skb_dma_len); + + /* VLAN tag insertion check */ + if (vlan) { + dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc2, TX_NORMAL_DESC2_VTIR_POS, + TX_NORMAL_DESC2_VTIR_LEN, TX_NORMAL_DESC2_VLAN_INSERT); + pdata->stats.tx_vlan_packets++; + } + + /* Timestamp enablement check */ + if (FXGMAC_GET_REG_BITS(pkt_info->attributes, + TX_PACKET_ATTRIBUTES_PTP_POS, + TX_PACKET_ATTRIBUTES_PTP_LEN)) + dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc2, TX_NORMAL_DESC2_TTSE_POS, + TX_NORMAL_DESC2_TTSE_LEN, 1); + + /* Mark it as First Descriptor */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, + TX_NORMAL_DESC3_FD_POS, + TX_NORMAL_DESC3_FD_LEN, 1); + + /* Mark it as a NORMAL descriptor */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, + TX_NORMAL_DESC3_CTXT_POS, + TX_NORMAL_DESC3_CTXT_LEN, 0); + + /* Set OWN bit if not the first descriptor */ + if (cur_index != start_index) + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_OWN_POS, + TX_NORMAL_DESC3_OWN_LEN, 1); + + if (tso) { + /* Enable TSO */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_TSE_POS, + TX_NORMAL_DESC3_TSE_LEN, 1); + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_TCPPL_POS, + TX_NORMAL_DESC3_TCPPL_LEN, pkt_info->tcp_payload_len); + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_TCPHDRLEN_POS, + TX_NORMAL_DESC3_TCPHDRLEN_LEN, + pkt_info->tcp_header_len / 4); + + pdata->stats.tx_tso_packets++; + } else { + /* Enable CRC and Pad Insertion */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_CPC_POS, + TX_NORMAL_DESC3_CPC_LEN, 0); + + /* Enable HW CSUM */ + if (csum) + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_CIC_POS, + TX_NORMAL_DESC3_CIC_LEN, 0x3); + + /* Set the total length to be transmitted */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, + TX_NORMAL_DESC3_FL_POS, + TX_NORMAL_DESC3_FL_LEN, + pkt_info->length); + } + if (netif_msg_tx_done(pdata)) + DPRINTK("dev_xmit before more descs, desc cur=%d, start=%d, desc=%#x,%#x,%#x,%#x\n", + cur_index, start_index, dma_desc->desc0, + dma_desc->desc1, dma_desc->desc2, dma_desc->desc3); + + if (start_index <= cur_index) + i = cur_index - start_index + 1; + else + i = ring->dma_desc_count - start_index + cur_index; + + for (; i < pkt_info->desc_count; i++) { + cur_index = FXGMAC_GET_ENTRY(cur_index, ring->dma_desc_count); + + desc_data = FXGMAC_GET_DESC_DATA(ring, cur_index); + dma_desc = desc_data->dma_desc; + + /* Update buffer address */ + dma_desc->desc0 = + cpu_to_le32(lower_32_bits(desc_data->skb_dma)); + dma_desc->desc1 = + cpu_to_le32(upper_32_bits(desc_data->skb_dma)); + + /* Update the buffer length */ + dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc2, TX_NORMAL_DESC2_HL_B1L_POS, + TX_NORMAL_DESC2_HL_B1L_LEN, desc_data->skb_dma_len); + + /* Set OWN bit */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_OWN_POS, + TX_NORMAL_DESC3_OWN_LEN, 1); + + /* Mark it as NORMAL descriptor */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_CTXT_POS, + TX_NORMAL_DESC3_CTXT_LEN, 0); + + /* Enable HW CSUM */ + if (csum) + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_CIC_POS, + TX_NORMAL_DESC3_CIC_LEN, 0x3); + } + + /* Set LAST bit for the last descriptor */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, + TX_NORMAL_DESC3_LD_POS, + TX_NORMAL_DESC3_LD_LEN, 1); + + dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc2, + TX_NORMAL_DESC2_IC_POS, + TX_NORMAL_DESC2_IC_LEN, 1); + + /* Save the Tx info to report back during cleanup */ + desc_data->tx.packets = pkt_info->tx_packets; + desc_data->tx.bytes = pkt_info->tx_bytes; + + if (netif_msg_tx_done(pdata)) + DPRINTK("dev_xmit last descs, desc cur=%d, desc=%#x,%#x,%#x,%#x\n", + cur_index, dma_desc->desc0, dma_desc->desc1, + dma_desc->desc2, dma_desc->desc3); + + /* In case the Tx DMA engine is running, make sure everything + * is written to the descriptor(s) before setting the OWN bit + * for the first descriptor + */ + dma_wmb(); + + /* Set OWN bit for the first descriptor */ + desc_data = FXGMAC_GET_DESC_DATA(ring, start_index); + dma_desc = desc_data->dma_desc; + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, + TX_NORMAL_DESC3_OWN_POS, + TX_NORMAL_DESC3_OWN_LEN, 1); + + if (netif_msg_tx_done(pdata)) + DPRINTK("dev_xmit first descs, start=%d, desc=%#x,%#x,%#x,%#x\n", + start_index, dma_desc->desc0, dma_desc->desc1, + dma_desc->desc2, dma_desc->desc3); + + if (netif_msg_tx_queued(pdata)) + fxgmac_dump_tx_desc(pdata, ring, start_index, + pkt_info->desc_count, 1); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)) + if (netif_msg_tx_done(pdata)) + DPRINTK("dev_xmit about to call tx_start_xmit, ring xmit_more=%d, txq_stopped=%x\n", + ring->tx.xmit_more, + netif_xmit_stopped(netdev_get_tx_queue( + pdata->netdev, channel->queue_index))); +#else /* ( LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,165))*/ + if (netif_msg_tx_done(pdata)) + DPRINTK("dev_xmit about to call tx_start_xmit, pkt xmit_more=%d, txq_stopped=%x\n", + pkt_info->skb->xmit_more, + netif_xmit_stopped(netdev_get_tx_queue( + pdata->netdev, channel->queue_index))); +#endif + + /* Make sure ownership is written to the descriptor */ + smp_wmb(); + + ring->cur = FXGMAC_GET_ENTRY(cur_index, ring->dma_desc_count); + + fxgmac_tx_start_xmit(channel, ring); + + /* yzhang for reduce debug output */ + if (netif_msg_tx_done(pdata)) { + DPRINTK("dev_xmit callout %s: descriptors %u to %u written\n", + channel->name, start_index & (ring->dma_desc_count - 1), + (ring->cur - 1) & (ring->dma_desc_count - 1)); + } +} + +static void fxgmac_get_rx_tstamp(struct fxgmac_pkt_info *pkt_info, + struct fxgmac_dma_desc *dma_desc) +{ + u64 nsec; + + nsec = le32_to_cpu(dma_desc->desc1); + nsec <<= 32; + nsec |= le32_to_cpu(dma_desc->desc0); + if (nsec != 0xffffffffffffffffULL) { + pkt_info->rx_tstamp = nsec; + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_RX_TSTAMP_POS, + RX_PACKET_ATTRIBUTES_RX_TSTAMP_LEN, 1); + } +} + +static void fxgmac_tx_desc_reset(struct fxgmac_desc_data *desc_data) +{ + struct fxgmac_dma_desc *dma_desc = desc_data->dma_desc; + + /* Reset the Tx descriptor + * Set buffer 1 (lo) address to zero + * Set buffer 1 (hi) address to zero + * Reset all other control bits (IC, TTSE, B2L & B1L) + * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc) + */ + dma_desc->desc0 = 0; + dma_desc->desc1 = 0; + dma_desc->desc2 = 0; + dma_desc->desc3 = 0; + + /* Make sure ownership is written to the descriptor */ + dma_wmb(); +} + +static void fxgmac_tx_desc_init(struct fxgmac_channel *channel) +{ + struct fxgmac_ring *ring = channel->tx_ring; + struct fxgmac_desc_data *desc_data; + int start_index = ring->cur; + unsigned int i; + start_index = start_index; + + /* Initialize all descriptors */ + for (i = 0; i < ring->dma_desc_count; i++) { + desc_data = FXGMAC_GET_DESC_DATA(ring, i); + + /* Initialize Tx descriptor */ + fxgmac_tx_desc_reset(desc_data); + } + + writereg(channel->pdata->pAdapter, channel->pdata->tx_desc_count - 1, + FXGMAC_DMA_REG(channel, DMA_CH_TDRLR)); + + /* Update the starting address of descriptor ring */ + desc_data = FXGMAC_GET_DESC_DATA(ring, start_index); + writereg(channel->pdata->pAdapter, + upper_32_bits(desc_data->dma_desc_addr), + FXGMAC_DMA_REG(channel, DMA_CH_TDLR_HI)); + writereg(channel->pdata->pAdapter, + lower_32_bits(desc_data->dma_desc_addr), + FXGMAC_DMA_REG(channel, DMA_CH_TDLR_LO)); +} + +static void fxgmac_rx_desc_reset(struct fxgmac_pdata *pdata, + struct fxgmac_desc_data *desc_data, + unsigned int index) +{ + struct fxgmac_dma_desc *dma_desc = desc_data->dma_desc; + + /* Reset the Rx descriptor + * Set buffer 1 (lo) address to header dma address (lo) + * Set buffer 1 (hi) address to header dma address (hi) + * Set buffer 2 (lo) address to buffer dma address (lo) + * Set buffer 2 (hi) address to buffer dma address (hi) and + * set control bits OWN and INTE + */ + dma_desc->desc0 = + cpu_to_le32(lower_32_bits(desc_data->rx.buf.dma_base)); + dma_desc->desc1 = + cpu_to_le32(upper_32_bits(desc_data->rx.buf.dma_base)); + dma_desc->desc2 = 0; + dma_desc->desc3 = 0; + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, + RX_NORMAL_DESC3_INTE_POS, + RX_NORMAL_DESC3_INTE_LEN, 1); + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, + RX_NORMAL_DESC3_BUF2V_POS, + RX_NORMAL_DESC3_BUF2V_LEN, 0); + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, + RX_NORMAL_DESC3_BUF1V_POS, + RX_NORMAL_DESC3_BUF1V_LEN, 1); + + /* Since the Rx DMA engine is likely running, make sure everything + * is written to the descriptor(s) before setting the OWN bit + * for the descriptor + */ + dma_wmb(); + + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, + RX_NORMAL_DESC3_OWN_POS, + RX_NORMAL_DESC3_OWN_LEN, 1); + + /* Make sure ownership is written to the descriptor */ + dma_wmb(); +} + +static void fxgmac_rx_desc_init(struct fxgmac_channel *channel) +{ + struct fxgmac_pdata *pdata = channel->pdata; + struct fxgmac_ring *ring = channel->rx_ring; + unsigned int start_index = ring->cur; + struct fxgmac_desc_data *desc_data; + unsigned int i; + + /* Initialize all descriptors */ + for (i = 0; i < ring->dma_desc_count; i++) { + desc_data = FXGMAC_GET_DESC_DATA(ring, i); + + /* Initialize Rx descriptor */ + fxgmac_rx_desc_reset(pdata, desc_data, i); + } + + /* Update the total number of Rx descriptors */ + writereg(pdata->pAdapter, ring->dma_desc_count - 1, + FXGMAC_DMA_REG(channel, DMA_CH_RDRLR)); + + /* Update the starting address of descriptor ring */ + desc_data = FXGMAC_GET_DESC_DATA(ring, start_index); + writereg(pdata->pAdapter, upper_32_bits(desc_data->dma_desc_addr), + FXGMAC_DMA_REG(channel, DMA_CH_RDLR_HI)); + writereg(pdata->pAdapter, lower_32_bits(desc_data->dma_desc_addr), + FXGMAC_DMA_REG(channel, DMA_CH_RDLR_LO)); + + /* Update the Rx Descriptor Tail Pointer */ + desc_data = FXGMAC_GET_DESC_DATA( + ring, start_index + ring->dma_desc_count - 1); + writereg(pdata->pAdapter, lower_32_bits(desc_data->dma_desc_addr), + FXGMAC_DMA_REG(channel, DMA_CH_RDTR_LO)); +} + +static int fxgmac_is_context_desc(struct fxgmac_dma_desc *dma_desc) +{ + /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */ + return FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, TX_NORMAL_DESC3_CTXT_POS, + TX_NORMAL_DESC3_CTXT_LEN); +} + +static int fxgmac_is_last_desc(struct fxgmac_dma_desc *dma_desc) +{ + /* Rx and Tx share LD bit, so check TDES3.LD bit */ + return FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, TX_NORMAL_DESC3_LD_POS, + TX_NORMAL_DESC3_LD_LEN); +} + +static int fxgmac_disable_tx_flow_control(struct fxgmac_pdata *pdata) +{ + unsigned int max_q_count, q_count; + unsigned int reg, regval; + unsigned int i; + + /* Clear MTL flow control */ + for (i = 0; i < pdata->rx_q_count; i++) { + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_EHFC_POS, + MTL_Q_RQOMR_EHFC_LEN, 0); + writereg(pdata->pAdapter, regval, + FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + } + + /* Clear MAC flow control */ + max_q_count = FXGMAC_MAX_FLOW_CONTROL_QUEUES; + q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); + reg = MAC_Q0TFCR; + for (i = 0; i < q_count; i++) { + regval = readreg(pdata->pAdapter, pdata->mac_regs + reg); + regval = FXGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_TFE_POS, + MAC_Q0TFCR_TFE_LEN, 0); + writereg(pdata->pAdapter, regval, pdata->mac_regs + reg); + + reg += MAC_QTFCR_INC; + } + + return 0; +} + +static int fxgmac_enable_tx_flow_control(struct fxgmac_pdata *pdata) +{ + unsigned int max_q_count, q_count; + unsigned int reg, regval; + unsigned int i; + + /* Set MTL flow control */ + for (i = 0; i < pdata->rx_q_count; i++) { + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_EHFC_POS, + MTL_Q_RQOMR_EHFC_LEN, 1); + writereg(pdata->pAdapter, regval, + FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + } + + /* Set MAC flow control */ + max_q_count = FXGMAC_MAX_FLOW_CONTROL_QUEUES; + q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); + reg = MAC_Q0TFCR; + for (i = 0; i < q_count; i++) { + regval = readreg(pdata->pAdapter, pdata->mac_regs + reg); + + /* Enable transmit flow control */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_TFE_POS, + MAC_Q0TFCR_TFE_LEN, 1); + /* Set pause time */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_Q0TFCR_PT_POS, + MAC_Q0TFCR_PT_LEN, 0xffff); + + writereg(pdata->pAdapter, regval, pdata->mac_regs + reg); + + reg += MAC_QTFCR_INC; + } + + return 0; +} + +static int fxgmac_disable_rx_flow_control(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_RFCR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_RFCR_RFE_POS, MAC_RFCR_RFE_LEN, + 0); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_RFCR); + + return 0; +} + +static int fxgmac_enable_rx_flow_control(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_RFCR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_RFCR_RFE_POS, MAC_RFCR_RFE_LEN, + 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_RFCR); + + return 0; +} + +static int fxgmac_config_tx_flow_control(struct fxgmac_pdata *pdata) +{ + if (pdata->tx_pause) + fxgmac_enable_tx_flow_control(pdata); + else + fxgmac_disable_tx_flow_control(pdata); + + return 0; +} + +static int fxgmac_config_rx_flow_control(struct fxgmac_pdata *pdata) +{ + if (pdata->rx_pause) + fxgmac_enable_rx_flow_control(pdata); + else + fxgmac_disable_rx_flow_control(pdata); + + return 0; +} + +static int fxgmac_config_rx_coalesce(struct fxgmac_pdata *pdata) +{ +#ifndef DPDK + struct fxgmac_channel *channel; + unsigned int i; + u32 regval; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->rx_ring) + break; + + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_RIWT)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_RIWT_RWT_POS, + DMA_CH_RIWT_RWT_LEN, + pdata->rx_riwt); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_RIWT)); + } +#else + struct fxgmac_rx_queue *rxq; + unsigned int i; + + for (i = 0; i < pdata->expansion.eth_dev->data->nb_rx_queues; i++) { + rxq = pdata->expansion.eth_dev->data->rx_queues[i]; + FXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RIWT, RWT, pdata->rx_riwt); + } +#endif + + return 0; +} + +static void fxgmac_config_rx_fep_disable(struct fxgmac_pdata *pdata) +{ + unsigned int i; + u32 regval; + + for (i = 0; i < pdata->rx_q_count; i++) { + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + /* 1:enable the rx queue forward packet with error + * status(crc error, gmii_er, watch dog timeout.or overflow) + */ + regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_FEP_POS, + MTL_Q_RQOMR_FEP_LEN, + MTL_FEP_ENABLE); + writereg(pdata->pAdapter, regval, + FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + } +} + +static void fxgmac_config_rx_fup_enable(struct fxgmac_pdata *pdata) +{ + unsigned int i; + u32 regval; + + for (i = 0; i < pdata->rx_q_count; i++) { + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_FUP_POS, + MTL_Q_RQOMR_FUP_LEN, 1); + writereg(pdata->pAdapter, regval, + FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + } +} + +static int fxgmac_config_tx_coalesce(struct fxgmac_pdata *pdata) +{ + pdata = pdata; + return 0; +} + +static void fxgmac_config_rx_buffer_size(struct fxgmac_pdata *pdata) +{ +#ifndef DPDK + struct fxgmac_channel *channel; + unsigned int i; + u32 regval; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->rx_ring) + break; + + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_RCR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_RCR_RBSZ_POS, + DMA_CH_RCR_RBSZ_LEN, + pdata->rx_buf_size); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_RCR)); + } +#else + struct fxgmac_rx_queue *rxq; + unsigned int i; + + for (i = 0; i < pdata->expansion.eth_dev->data->nb_rx_queues; i++) { + rxq = pdata->expansion.eth_dev->data->rx_queues[i]; + + rxq->buf_size = rte_pktmbuf_data_room_size(rxq->mb_pool) - + RTE_PKTMBUF_HEADROOM; + rxq->buf_size = (rxq->buf_size + FXGMAC_RX_BUF_ALIGN - 1) & + ~(FXGMAC_RX_BUF_ALIGN - 1); + + if (rxq->buf_size > pdata->rx_buf_size) + pdata->rx_buf_size = rxq->buf_size; + + FXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, RBSZ, rxq->buf_size); + } +#endif +} + +static void fxgmac_config_tso_mode(struct fxgmac_pdata *pdata) +{ +#ifndef DPDK + struct fxgmac_channel *channel; + unsigned int i; + u32 regval; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + if (pdata->hw_feat.tso) { + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_TCR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_TCR_TSE_POS, + DMA_CH_TCR_TSE_LEN, 1); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_TCR)); + } + } +#else + struct fxgmac_tx_queue *txq; + unsigned int i; + for (i = 0; i < pdata->expansion.eth_dev->data->nb_tx_queues; i++) { + txq = pdata->expansion.eth_dev->data->tx_queues[i]; + FXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, TSE, pdata->tx_pbl); + } +#endif +} + +static void fxgmac_config_sph_mode(struct fxgmac_pdata *pdata) +{ + unsigned int i; + u32 regval; + +#ifndef DPDK + struct fxgmac_channel *channel; + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->rx_ring) + break; + + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_CR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_CR_SPH_POS, + DMA_CH_CR_SPH_LEN, 0); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_CR)); + } +#else + struct fxgmac_rx_queue *rxq; + + for (i = 0; i < pdata->expansion.eth_dev->data->nb_rx_queues; i++) { + rxq = pdata->expansion.eth_dev->data->rx_queues[i]; + FXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_CR, SPH, pdata->rx_pbl); + } +#endif + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_ECR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_ECR_HDSMS_POS, + MAC_ECR_HDSMS_LEN, FXGMAC_SPH_HDSMS_SIZE); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_ECR); +} + +static unsigned int fxgmac_usec_to_riwt(struct fxgmac_pdata *pdata, + unsigned int usec) +{ + unsigned long rate; + unsigned int ret; + + rate = pdata->sysclk_rate; + + /* Convert the input usec value to the watchdog timer value. Each + * watchdog timer value is equivalent to 256 clock cycles. + * Calculate the required value as: + * ( usec * ( system_clock_mhz / 10^6) / 256 + */ + ret = (usec * (rate / 1000000)) / 256; + + return ret; +} + +static unsigned int fxgmac_riwt_to_usec(struct fxgmac_pdata *pdata, + unsigned int riwt) +{ + unsigned long rate; + unsigned int ret; + + rate = pdata->sysclk_rate; + + /* Convert the input watchdog timer value to the usec value. Each + * watchdog timer value is equivalent to 256 clock cycles. + * Calculate the required value as: + * ( riwt * 256) / ( system_clock_mhz / 10^6) + */ + ret = (riwt * 256) / (rate / 1000000); + + return ret; +} + +static int fxgmac_config_rx_threshold(struct fxgmac_pdata *pdata, + unsigned int val) +{ + unsigned int i; + u32 regval; + + for (i = 0; i < pdata->rx_q_count; i++) { + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RTC_POS, + MTL_Q_RQOMR_RTC_LEN, val); + writereg(pdata->pAdapter, regval, + FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + } + + return 0; +} + +static void fxgmac_config_mtl_mode(struct fxgmac_pdata *pdata) +{ + unsigned int i; + u32 regval; + + /* Set Tx to weighted round robin scheduling algorithm */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MTL_OMR); + regval = FXGMAC_SET_REG_BITS(regval, MTL_OMR_ETSALG_POS, + MTL_OMR_ETSALG_LEN, MTL_ETSALG_WRR); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MTL_OMR); + + /* Set Tx traffic classes to use WRR algorithm with equal weights */ + for (i = 0; i < pdata->tx_q_count /*hw_feat.tc_cnt*/; i++) { + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_TC_QWR)); + regval = FXGMAC_SET_REG_BITS(regval, MTL_TC_QWR_QW_POS, + MTL_TC_QWR_QW_LEN, 1); + writereg(pdata->pAdapter, regval, + FXGMAC_MTL_REG(pdata, i, MTL_TC_QWR)); + } + + /* Set Rx to strict priority algorithm */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MTL_OMR); + regval = FXGMAC_SET_REG_BITS(regval, MTL_OMR_RAA_POS, MTL_OMR_RAA_LEN, + MTL_RAA_SP); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MTL_OMR); +} + +static void fxgmac_config_queue_mapping(struct fxgmac_pdata *pdata) +{ + unsigned int ppq, ppq_extra, prio, prio_queues; + unsigned int queue; + unsigned int reg, regval; + unsigned int mask; + unsigned int i, j; + + /* Map the MTL Tx Queues to Traffic Classes + * Note: Tx Queues >= Traffic Classes + */ + queue = 0; + DPRINTK("need to map TXq(%u) to TC\n", queue); + + /* Map the 8 VLAN priority values to available MTL Rx queues */ + prio_queues = + min_t(unsigned int, IEEE_8021QAZ_MAX_TCS, pdata->rx_q_count); + ppq = IEEE_8021QAZ_MAX_TCS / prio_queues; + ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues; + + reg = MAC_RQC2R; + regval = 0; + for (i = 0, prio = 0; i < prio_queues;) { + mask = 0; + for (j = 0; j < ppq; j++) { + netif_dbg(pdata, drv, pdata->netdev, + "PRIO%u mapped to RXq%u\n", prio, i); + mask |= (1 << prio); + prio++; + } + + if (i < ppq_extra) { + netif_dbg(pdata, drv, pdata->netdev, + "PRIO%u mapped to RXq%u\n", prio, i); + mask |= (1 << prio); + prio++; + } + + regval |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3)); + + if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues)) + continue; + + writereg(pdata->pAdapter, regval, pdata->mac_regs + reg); + reg += MAC_RQC2_INC; + regval = 0; + } + + /* Configure one to one, MTL Rx queue to DMA Rx channel mapping + * ie Q0 <--> CH0, Q1 <--> CH1 ... Q11 <--> CH11 + */ + reg = MTL_RQDCM0R; + regval = readreg(pdata->pAdapter, pdata->mac_regs + reg); + regval |= (MTL_RQDCM0R_Q0MDMACH | MTL_RQDCM0R_Q1MDMACH | + MTL_RQDCM0R_Q2MDMACH | MTL_RQDCM0R_Q3MDMACH); + + if (pdata->rss) { + /* in version later 0617, need to enable DA-based DMA Channel Selection to let RSS work, + * ie, bit4,12,20,28 for Q0,1,2,3 individual + */ + regval |= (MTL_RQDCM0R_Q0DDMACH | MTL_RQDCM0R_Q1DDMACH | + MTL_RQDCM0R_Q2DDMACH | MTL_RQDCM0R_Q3DDMACH); + } + + writereg(pdata->pAdapter, regval, pdata->mac_regs + reg); + + reg += MTL_RQDCM_INC; + regval = readreg(pdata->pAdapter, pdata->mac_regs + reg); + regval |= (MTL_RQDCM1R_Q4MDMACH | MTL_RQDCM1R_Q5MDMACH | + MTL_RQDCM1R_Q6MDMACH | MTL_RQDCM1R_Q7MDMACH); + writereg(pdata->pAdapter, regval, pdata->mac_regs + reg); +} + +static unsigned int fxgmac_calculate_per_queue_fifo(unsigned int fifo_size, + unsigned int queue_count) +{ + unsigned int q_fifo_size; + unsigned int p_fifo; + + /* Calculate the configured fifo size */ + q_fifo_size = 1 << (fifo_size + 7); + + /* The configured value may not be the actual amount of fifo RAM */ + q_fifo_size = min_t(unsigned int, FXGMAC_MAX_FIFO, q_fifo_size); + + q_fifo_size = q_fifo_size / queue_count; + + /* Each increment in the queue fifo size represents 256 bytes of + * fifo, with 0 representing 256 bytes. Distribute the fifo equally + * between the queues. + */ + p_fifo = q_fifo_size / 256; + if (p_fifo) + p_fifo--; + + return p_fifo; +} + +static void fxgmac_config_tx_fifo_size(struct fxgmac_pdata *pdata) +{ + unsigned int fifo_size; + unsigned int i; + u32 regval; + + fifo_size = fxgmac_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size, + pdata->tx_q_count); + + for (i = 0; i < pdata->tx_q_count; i++) { + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TQS_POS, + MTL_Q_TQOMR_TQS_LEN, fifo_size); + writereg(pdata->pAdapter, regval, + FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + } + + netif_info(pdata, drv, pdata->netdev, + "%d Tx hardware queues, %d byte fifo per queue\n", + pdata->tx_q_count, ((fifo_size + 1) * 256)); +} + +static void fxgmac_config_rx_fifo_size(struct fxgmac_pdata *pdata) +{ + unsigned int fifo_size; + unsigned int i; + u32 regval; + + fifo_size = fxgmac_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size, + pdata->rx_q_count); + + for (i = 0; i < pdata->rx_q_count; i++) { + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RQS_POS, + MTL_Q_RQOMR_RQS_LEN, fifo_size); + writereg(pdata->pAdapter, regval, + FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + } + + netif_info(pdata, drv, pdata->netdev, + "%d Rx hardware queues, %d byte fifo per queue\n", + pdata->rx_q_count, ((fifo_size + 1) * 256)); +} + +static void fxgmac_config_flow_control_threshold(struct fxgmac_pdata *pdata) +{ + unsigned int i; + u32 regval; + + for (i = 0; i < pdata->rx_q_count; i++) { + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + /* Activate flow control when less than 6k left in fifo */ + regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RFA_POS, + MTL_Q_RQOMR_RFA_LEN, 6); + /* De-activate flow control when more than 10k left in fifo */ + regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RFD_POS, + MTL_Q_RQOMR_RFD_LEN, 10); + writereg(pdata->pAdapter, regval, + FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + } +} + +static int fxgmac_config_tx_threshold(struct fxgmac_pdata *pdata, + unsigned int val) +{ + unsigned int i; + u32 regval; + + for (i = 0; i < pdata->tx_q_count; i++) { + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TTC_POS, + MTL_Q_TQOMR_TTC_LEN, val); + writereg(pdata->pAdapter, regval, + FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + } + + return 0; +} + +static int fxgmac_config_rsf_mode(struct fxgmac_pdata *pdata, unsigned int val) +{ + unsigned int i; + u32 regval; + + for (i = 0; i < pdata->rx_q_count; i++) { + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RSF_POS, + MTL_Q_RQOMR_RSF_LEN, val); + writereg(pdata->pAdapter, regval, + FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); + } + + return 0; +} + +static int fxgmac_config_tsf_mode(struct fxgmac_pdata *pdata, unsigned int val) +{ + unsigned int i; + u32 regval; + + for (i = 0; i < pdata->tx_q_count; i++) { + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TSF_POS, + MTL_Q_TQOMR_TSF_LEN, val); + writereg(pdata->pAdapter, regval, + FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + } + + return 0; +} + +static int fxgmac_config_osp_mode(struct fxgmac_pdata *pdata) +{ +#ifndef DPDK + struct fxgmac_channel *channel; + unsigned int i; + u32 regval; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_TCR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_TCR_OSP_POS, + DMA_CH_TCR_OSP_LEN, + pdata->tx_osp_mode); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_TCR)); + } +#else + /* Force DMA to operate on second packet before closing descriptors + * of first packet + */ + struct fxgmac_tx_queue *txq; + unsigned int i; + + for (i = 0; i < pdata->expansion.eth_dev->data->nb_tx_queues; i++) { + txq = pdata->expansion.eth_dev->data->tx_queues[i]; + FXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, OSP, + pdata->tx_osp_mode); + } +#endif + return 0; +} + +static int fxgmac_config_pblx8(struct fxgmac_pdata *pdata) +{ +#ifndef DPDK + struct fxgmac_channel *channel; + unsigned int i; + u32 regval; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_CR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_CR_PBLX8_POS, + DMA_CH_CR_PBLX8_LEN, pdata->pblx8); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_CR)); + } +#else + struct fxgmac_tx_queue *txq; + unsigned int i; + + for (i = 0; i < pdata->expansion.eth_dev->data->nb_tx_queues; i++) { + txq = pdata->expansion.eth_dev->data->tx_queues[i]; + FXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_CR, PBLX8, pdata->pblx8); + } +#endif + + return 0; +} + +static int fxgmac_get_tx_pbl_val(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(pdata->channel_head, DMA_CH_TCR)); + regval = FXGMAC_GET_REG_BITS(regval, DMA_CH_TCR_PBL_POS, + DMA_CH_TCR_PBL_LEN); + return regval; +} + +static int fxgmac_config_tx_pbl_val(struct fxgmac_pdata *pdata) +{ +#ifndef DPDK + struct fxgmac_channel *channel; + unsigned int i; + u32 regval; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; + + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_TCR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_TCR_PBL_POS, + DMA_CH_TCR_PBL_LEN, pdata->tx_pbl); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_TCR)); + } +#else + struct fxgmac_tx_queue *txq; + unsigned int i; + + for (i = 0; i < pdata->expansion.eth_dev->data->nb_tx_queues; i++) { + txq = pdata->expansion.eth_dev->data->tx_queues[i]; + FXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, PBL, pdata->tx_pbl); + } +#endif + + return 0; +} + +static int fxgmac_get_rx_pbl_val(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(pdata->channel_head, DMA_CH_RCR)); + regval = FXGMAC_GET_REG_BITS(regval, DMA_CH_RCR_PBL_POS, + DMA_CH_RCR_PBL_LEN); + return regval; +} + +static int fxgmac_config_rx_pbl_val(struct fxgmac_pdata *pdata) +{ +#ifndef DPDK + struct fxgmac_channel *channel; + unsigned int i; + u32 regval; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->rx_ring) + break; + + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_RCR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_RCR_PBL_POS, + DMA_CH_RCR_PBL_LEN, pdata->rx_pbl); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_RCR)); + } +#else + struct fxgmac_rx_queue *rxq; + unsigned int i; + + for (i = 0; i < pdata->expansion.eth_dev->data->nb_rx_queues; i++) { + rxq = pdata->expansion.eth_dev->data->rx_queues[i]; + FXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, PBL, pdata->rx_pbl); + } +#endif + + return 0; +} + +static u64 fxgmac_mmc_read(struct fxgmac_pdata *pdata, unsigned int reg_lo) +{ + /* bool read_hi; */ + u64 val; + val = (u64)readreg(pdata->pAdapter, pdata->mac_regs + reg_lo); + + return val; +} + +static void fxgmac_tx_mmc_int(struct fxgmac_pdata *pdata) +{ + unsigned int mmc_isr = + readreg(pdata->pAdapter, pdata->mac_regs + MMC_TISR); + struct fxgmac_stats *stats = &pdata->stats; + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXOCTETCOUNT_GB_POS, + MMC_TISR_TXOCTETCOUNT_GB_LEN)) + stats->txoctetcount_gb += + fxgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXFRAMECOUNT_GB_POS, + MMC_TISR_TXFRAMECOUNT_GB_LEN)) + stats->txframecount_gb += + fxgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXBROADCASTFRAMES_G_POS, + MMC_TISR_TXBROADCASTFRAMES_G_LEN)) + stats->txbroadcastframes_g += + fxgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXMULTICASTFRAMES_G_POS, + MMC_TISR_TXMULTICASTFRAMES_G_LEN)) + stats->txmulticastframes_g += + fxgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TX64OCTETS_GB_POS, + MMC_TISR_TX64OCTETS_GB_LEN)) + stats->tx64octets_gb += + fxgmac_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TX65TO127OCTETS_GB_POS, + MMC_TISR_TX65TO127OCTETS_GB_LEN)) + stats->tx65to127octets_gb += + fxgmac_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TX128TO255OCTETS_GB_POS, + MMC_TISR_TX128TO255OCTETS_GB_LEN)) + stats->tx128to255octets_gb += + fxgmac_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TX256TO511OCTETS_GB_POS, + MMC_TISR_TX256TO511OCTETS_GB_LEN)) + stats->tx256to511octets_gb += + fxgmac_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TX512TO1023OCTETS_GB_POS, + MMC_TISR_TX512TO1023OCTETS_GB_LEN)) + stats->tx512to1023octets_gb += + fxgmac_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TX1024TOMAXOCTETS_GB_POS, + MMC_TISR_TX1024TOMAXOCTETS_GB_LEN)) + stats->tx1024tomaxoctets_gb += + fxgmac_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXUNICASTFRAMES_GB_POS, + MMC_TISR_TXUNICASTFRAMES_GB_LEN)) + stats->txunicastframes_gb += + fxgmac_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXMULTICASTFRAMES_GB_POS, + MMC_TISR_TXMULTICASTFRAMES_GB_LEN)) + stats->txmulticastframes_gb += + fxgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXBROADCASTFRAMES_GB_POS, + MMC_TISR_TXBROADCASTFRAMES_GB_LEN)) + stats->txbroadcastframes_g += + fxgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXUNDERFLOWERROR_POS, + MMC_TISR_TXUNDERFLOWERROR_LEN)) + stats->txunderflowerror += + fxgmac_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXSINGLECOLLISION_G_POS, + MMC_TISR_TXSINGLECOLLISION_G_LEN)) + stats->txsinglecollision_g += + fxgmac_mmc_read(pdata, MMC_TXSINGLECOLLISION_G); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXMULTIPLECOLLISION_G_POS, + MMC_TISR_TXMULTIPLECOLLISION_G_LEN)) + stats->txmultiplecollision_g += + fxgmac_mmc_read(pdata, MMC_TXMULTIPLECOLLISION_G); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXDEFERREDFRAMES_POS, + MMC_TISR_TXDEFERREDFRAMES_LEN)) + stats->txdeferredframes += + fxgmac_mmc_read(pdata, MMC_TXDEFERREDFRAMES); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXLATECOLLISIONFRAMES_POS, + MMC_TISR_TXLATECOLLISIONFRAMES_LEN)) + stats->txlatecollisionframes += + fxgmac_mmc_read(pdata, MMC_TXLATECOLLISIONFRAMES); + + if (FXGMAC_GET_REG_BITS(mmc_isr, + MMC_TISR_TXEXCESSIVECOLLISIONFRAMES_POS, + MMC_TISR_TXEXCESSIVECOLLISIONFRAMES_LEN)) + stats->txexcessivecollisionframes += + fxgmac_mmc_read(pdata, MMC_TXEXCESSIVECOLLSIONFRAMES); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXCARRIERERRORFRAMES_POS, + MMC_TISR_TXCARRIERERRORFRAMES_LEN)) + stats->txcarriererrorframes += + fxgmac_mmc_read(pdata, MMC_TXCARRIERERRORFRAMES); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXOCTETCOUNT_G_POS, + MMC_TISR_TXOCTETCOUNT_G_LEN)) + stats->txoctetcount_g += + fxgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXFRAMECOUNT_G_POS, + MMC_TISR_TXFRAMECOUNT_G_LEN)) + stats->txframecount_g += + fxgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXEXCESSIVEDEFERRALFRAMES_POS, + MMC_TISR_TXEXCESSIVEDEFERRALFRAMES_LEN)) + stats->txexcessivedeferralerror += + fxgmac_mmc_read(pdata, MMC_TXEXCESSIVEDEFERRALERROR); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXPAUSEFRAMES_POS, + MMC_TISR_TXPAUSEFRAMES_LEN)) + stats->txpauseframes += + fxgmac_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXVLANFRAMES_G_POS, + MMC_TISR_TXVLANFRAMES_G_LEN)) + stats->txvlanframes_g += + fxgmac_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXOVERSIZE_G_POS, + MMC_TISR_TXOVERSIZE_G_LEN)) + stats->txoversize_g += + fxgmac_mmc_read(pdata, MMC_TXOVERSIZEFRAMES); +} + +static void fxgmac_rx_mmc_int(struct fxgmac_pdata *pdata) +{ + unsigned int mmc_isr = + readreg(pdata->pAdapter, pdata->mac_regs + MMC_RISR); + struct fxgmac_stats *stats = &pdata->stats; + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXFRAMECOUNT_GB_POS, + MMC_RISR_RXFRAMECOUNT_GB_LEN)) + stats->rxframecount_gb += + fxgmac_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXOCTETCOUNT_GB_POS, + MMC_RISR_RXOCTETCOUNT_GB_LEN)) + stats->rxoctetcount_gb += + fxgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXOCTETCOUNT_G_POS, + MMC_RISR_RXOCTETCOUNT_G_LEN)) + stats->rxoctetcount_g += + fxgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXBROADCASTFRAMES_G_POS, + MMC_RISR_RXBROADCASTFRAMES_G_LEN)) + stats->rxbroadcastframes_g += + fxgmac_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXMULTICASTFRAMES_G_POS, + MMC_RISR_RXMULTICASTFRAMES_G_LEN)) + stats->rxmulticastframes_g += + fxgmac_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXCRCERROR_POS, + MMC_RISR_RXCRCERROR_LEN)) + stats->rxcrcerror += fxgmac_mmc_read(pdata, MMC_RXCRCERROR_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXALIGNERROR_POS, + MMC_RISR_RXALIGNERROR_LEN)) + stats->rxalignerror += fxgmac_mmc_read(pdata, MMC_RXALIGNERROR); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXRUNTERROR_POS, + MMC_RISR_RXRUNTERROR_LEN)) + stats->rxrunterror += fxgmac_mmc_read(pdata, MMC_RXRUNTERROR); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXJABBERERROR_POS, + MMC_RISR_RXJABBERERROR_LEN)) + stats->rxjabbererror += + fxgmac_mmc_read(pdata, MMC_RXJABBERERROR); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXUNDERSIZE_G_POS, + MMC_RISR_RXUNDERSIZE_G_LEN)) + stats->rxundersize_g += + fxgmac_mmc_read(pdata, MMC_RXUNDERSIZE_G); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXOVERSIZE_G_POS, + MMC_RISR_RXOVERSIZE_G_LEN)) + stats->rxoversize_g += fxgmac_mmc_read(pdata, MMC_RXOVERSIZE_G); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RX64OCTETS_GB_POS, + MMC_RISR_RX64OCTETS_GB_LEN)) + stats->rx64octets_gb += + fxgmac_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RX65TO127OCTETS_GB_POS, + MMC_RISR_RX65TO127OCTETS_GB_LEN)) + stats->rx65to127octets_gb += + fxgmac_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RX128TO255OCTETS_GB_POS, + MMC_RISR_RX128TO255OCTETS_GB_LEN)) + stats->rx128to255octets_gb += + fxgmac_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RX256TO511OCTETS_GB_POS, + MMC_RISR_RX256TO511OCTETS_GB_LEN)) + stats->rx256to511octets_gb += + fxgmac_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RX512TO1023OCTETS_GB_POS, + MMC_RISR_RX512TO1023OCTETS_GB_LEN)) + stats->rx512to1023octets_gb += + fxgmac_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RX1024TOMAXOCTETS_GB_POS, + MMC_RISR_RX1024TOMAXOCTETS_GB_LEN)) + stats->rx1024tomaxoctets_gb += + fxgmac_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXUNICASTFRAMES_G_POS, + MMC_RISR_RXUNICASTFRAMES_G_LEN)) + stats->rxunicastframes_g += + fxgmac_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXLENGTHERROR_POS, + MMC_RISR_RXLENGTHERROR_LEN)) + stats->rxlengtherror += + fxgmac_mmc_read(pdata, MMC_RXLENGTHERROR_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXOUTOFRANGETYPE_POS, + MMC_RISR_RXOUTOFRANGETYPE_LEN)) + stats->rxoutofrangetype += + fxgmac_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXPAUSEFRAMES_POS, + MMC_RISR_RXPAUSEFRAMES_LEN)) + stats->rxpauseframes += + fxgmac_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXFIFOOVERFLOW_POS, + MMC_RISR_RXFIFOOVERFLOW_LEN)) + stats->rxfifooverflow += + fxgmac_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXVLANFRAMES_GB_POS, + MMC_RISR_RXVLANFRAMES_GB_LEN)) + stats->rxvlanframes_gb += + fxgmac_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXWATCHDOGERROR_POS, + MMC_RISR_RXWATCHDOGERROR_LEN)) + stats->rxwatchdogerror += + fxgmac_mmc_read(pdata, MMC_RXWATCHDOGERROR); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXERRORFRAMES_POS, + MMC_RISR_RXERRORFRAMES_LEN)) + stats->rxreceiveerrorframe += + fxgmac_mmc_read(pdata, MMC_RXRECEIVEERRORFRAME); + + if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXERRORCONTROLFRAMES_POS, + MMC_RISR_RXERRORCONTROLFRAMES_LEN)) + stats->rxcontrolframe_g += + fxgmac_mmc_read(pdata, MMC_RXCONTROLFRAME_G); +} + +static void fxgmac_read_mmc_stats(struct fxgmac_pdata *pdata) +{ + struct fxgmac_stats *stats = &pdata->stats; + u32 regval; + + /* Freeze counters */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MMC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MMC_CR_MCF_POS, MMC_CR_MCF_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MMC_CR); + + stats->txoctetcount_gb += + fxgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); + + stats->txframecount_gb += + fxgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); + + stats->txbroadcastframes_g += + fxgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); + + stats->txmulticastframes_g += + fxgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); + + stats->tx64octets_gb += fxgmac_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); + + stats->tx65to127octets_gb += + fxgmac_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); + + stats->tx128to255octets_gb += + fxgmac_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); + + stats->tx256to511octets_gb += + fxgmac_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); + + stats->tx512to1023octets_gb += + fxgmac_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); + + stats->tx1024tomaxoctets_gb += + fxgmac_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); + + stats->txunicastframes_gb += + fxgmac_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); + + stats->txmulticastframes_gb += + fxgmac_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); + + stats->txbroadcastframes_g += + fxgmac_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); + + stats->txunderflowerror += + fxgmac_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); + + stats->txsinglecollision_g += + fxgmac_mmc_read(pdata, MMC_TXSINGLECOLLISION_G); + + stats->txmultiplecollision_g += + fxgmac_mmc_read(pdata, MMC_TXMULTIPLECOLLISION_G); + + stats->txdeferredframes += fxgmac_mmc_read(pdata, MMC_TXDEFERREDFRAMES); + + stats->txlatecollisionframes += + fxgmac_mmc_read(pdata, MMC_TXLATECOLLISIONFRAMES); + + stats->txexcessivecollisionframes += + fxgmac_mmc_read(pdata, MMC_TXEXCESSIVECOLLSIONFRAMES); + + stats->txcarriererrorframes += + fxgmac_mmc_read(pdata, MMC_TXCARRIERERRORFRAMES); + + stats->txoctetcount_g += fxgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); + + stats->txframecount_g += fxgmac_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); + + stats->txexcessivedeferralerror += + fxgmac_mmc_read(pdata, MMC_TXEXCESSIVEDEFERRALERROR); + + stats->txpauseframes += fxgmac_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); + + stats->txvlanframes_g += fxgmac_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); + + stats->txoversize_g += fxgmac_mmc_read(pdata, MMC_TXOVERSIZEFRAMES); + + stats->rxframecount_gb += + fxgmac_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); + + stats->rxoctetcount_gb += + fxgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); + + stats->rxoctetcount_g += fxgmac_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); + + stats->rxbroadcastframes_g += + fxgmac_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); + + stats->rxmulticastframes_g += + fxgmac_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); + + stats->rxcrcerror += fxgmac_mmc_read(pdata, MMC_RXCRCERROR_LO); + + stats->rxalignerror += fxgmac_mmc_read(pdata, MMC_RXALIGNERROR); + + stats->rxrunterror += fxgmac_mmc_read(pdata, MMC_RXRUNTERROR); + + stats->rxjabbererror += fxgmac_mmc_read(pdata, MMC_RXJABBERERROR); + + stats->rxundersize_g += fxgmac_mmc_read(pdata, MMC_RXUNDERSIZE_G); + + stats->rxoversize_g += fxgmac_mmc_read(pdata, MMC_RXOVERSIZE_G); + + stats->rx64octets_gb += fxgmac_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); + + stats->rx65to127octets_gb += + fxgmac_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); + + stats->rx128to255octets_gb += + fxgmac_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); + + stats->rx256to511octets_gb += + fxgmac_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); + + stats->rx512to1023octets_gb += + fxgmac_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); + + stats->rx1024tomaxoctets_gb += + fxgmac_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); + + stats->rxunicastframes_g += + fxgmac_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); + + stats->rxlengtherror += fxgmac_mmc_read(pdata, MMC_RXLENGTHERROR_LO); + + stats->rxoutofrangetype += + fxgmac_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); + + stats->rxpauseframes += fxgmac_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); + + stats->rxfifooverflow += fxgmac_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); + + stats->rxvlanframes_gb += + fxgmac_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); + + stats->rxwatchdogerror += fxgmac_mmc_read(pdata, MMC_RXWATCHDOGERROR); + + stats->rxreceiveerrorframe += + fxgmac_mmc_read(pdata, MMC_RXRECEIVEERRORFRAME); + + stats->rxcontrolframe_g += fxgmac_mmc_read(pdata, MMC_RXCONTROLFRAME_G); + + /* Un-freeze counters */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MMC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MMC_CR_MCF_POS, MMC_CR_MCF_LEN, 0); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MMC_CR); +} + +static void fxgmac_config_mmc(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MMC_CR); + /* Set counters to reset on read */ + regval = FXGMAC_SET_REG_BITS(regval, MMC_CR_ROR_POS, MMC_CR_ROR_LEN, 1); + /* Reset the counters */ + regval = FXGMAC_SET_REG_BITS(regval, MMC_CR_CR_POS, MMC_CR_CR_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MMC_CR); + +#if defined(FUXI_MISC_INT_HANDLE_FEATURE_EN) && FUXI_MISC_INT_HANDLE_FEATURE_EN + writereg(pdata->pAdapter, 0xffffffff, + pdata->mac_regs + MMC_IPCRXINTMASK); +#endif +} + +static int fxgmac_write_rss_reg(struct fxgmac_pdata *pdata, unsigned int type, + unsigned int index, unsigned int val) +{ + int ret = 0; + type = type; + + writereg(pdata->pAdapter, val, (pdata->base_mem + index)); + + return ret; +} + +static u32 fxgmac_read_rss_options(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->base_mem + MGMT_RSS_CTRL); + + /* Get the RSS options bits */ + regval = FXGMAC_GET_REG_BITS(regval, MGMT_RSS_CTRL_OPT_POS, + MGMT_RSS_CTRL_OPT_LEN); + + return regval; +} + +static int fxgmac_write_rss_options(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->base_mem + MGMT_RSS_CTRL); + + /* Set the RSS options */ + regval = FXGMAC_SET_REG_BITS(regval, MGMT_RSS_CTRL_OPT_POS, + MGMT_RSS_CTRL_OPT_LEN, pdata->rss_options); + + writereg(pdata->pAdapter, regval, (pdata->base_mem + MGMT_RSS_CTRL)); + + return 0; +} + +#if !defined(DPDK) +static int fxgmac_read_rss_hash_key(struct fxgmac_pdata *pdata, u8 *key_buf) +{ + unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32); + u32 *key = (u32 *)key_buf; + + while (key_regs--) { + (*key) = cpu_to_be32(readreg( + pdata->pAdapter, + pdata->base_mem + (MGMT_RSS_KEY0 + + key_regs * MGMT_RSS_KEY_REG_INC))); + + DBGPRINT( + MP_LOUD, + ("fxgmac_read_rss_hash_key: idx=%d, reg=%x, key=0x%08x\n", + key_regs, + MGMT_RSS_KEY0 + key_regs * MGMT_RSS_KEY_REG_INC, + (u32)(*key))); + key++; + } + + return 0; +} +#endif + +static int fxgmac_write_rss_hash_key(struct fxgmac_pdata *pdata) +{ + unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32); + u32 *key = (u32 *)&pdata->rss_key; + int ret; + + while (key_regs--) { + ret = fxgmac_write_rss_reg( + pdata, FXGMAC_RSS_HASH_KEY_TYPE, + MGMT_RSS_KEY0 + key_regs * MGMT_RSS_KEY_REG_INC, + cpu_to_be32(*key)); + if (ret) + return ret; + key++; + } + + return 0; +} + +static int fxgmac_write_rss_lookup_table(struct fxgmac_pdata *pdata) +{ + unsigned int i, j; + u32 regval = 0; + int ret; + + for (i = 0, j = 0; i < ARRAY_SIZE(pdata->rss_table); i++, j++) { + if (j < MGMT_RSS_IDT_ENTRY_PER_REG) { + regval |= + ((pdata->rss_table[i] & MGMT_RSS_IDT_ENTRY_MASK) + << (j * 2)); + } else { + ret = fxgmac_write_rss_reg( + pdata, FXGMAC_RSS_LOOKUP_TABLE_TYPE, + MGMT_RSS_IDT + (i / MGMT_RSS_IDT_ENTRY_PER_REG - + 1) * MGMT_RSS_IDT_REG_INC, + regval); + if (ret) + return ret; + + regval = pdata->rss_table[i]; + j = 0; + } + } + + if (j == MGMT_RSS_IDT_ENTRY_PER_REG) { + /* last IDT */ + fxgmac_write_rss_reg( + pdata, FXGMAC_RSS_LOOKUP_TABLE_TYPE, + MGMT_RSS_IDT + (i / MGMT_RSS_IDT_ENTRY_PER_REG - 1) * + MGMT_RSS_IDT_REG_INC, + regval); + } + + return 0; +} + +static int fxgmac_set_rss_hash_key(struct fxgmac_pdata *pdata, const u8 *key) +{ + memcpy(pdata->rss_key, key, sizeof(pdata->rss_key)); + + return fxgmac_write_rss_hash_key(pdata); +} + +static int fxgmac_set_rss_lookup_table(struct fxgmac_pdata *pdata, + const u32 *table) +{ + unsigned int i; + u32 tval; + +#if FXGMAC_MSIX_CH0RXDIS_EN + DPRINTK("Set_rss_table, rss ctrl eth=0x%08x\n", 0); + + return 0; +#endif + + for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { + tval = table[i]; + pdata->rss_table[i] = FXGMAC_SET_REG_BITS(pdata->rss_table[i], + MAC_RSSDR_DMCH_POS, + MAC_RSSDR_DMCH_LEN, + tval); + } + + return fxgmac_write_rss_lookup_table(pdata); +} + +static u32 log2ex(u32 value) +{ + u32 i = 31; + while (i > 0) { + if (value & 0x80000000) { + break; + } + value <<= 1; + i--; + } + return i; +} + +static int fxgmac_enable_rss(struct fxgmac_pdata *pdata) +{ + u32 regval; + u32 size = 0; + + int ret; + + if (!pdata->hw_feat.rss) { + return -EOPNOTSUPP; + } + + /* Program the hash key */ + ret = fxgmac_write_rss_hash_key(pdata); + if (ret) { + return ret; + } + + /* Program the lookup table */ + ret = fxgmac_write_rss_lookup_table(pdata); + if (ret) { + return ret; + } + + regval = readreg(pdata->pAdapter, pdata->base_mem + MGMT_RSS_CTRL); + + /* Set RSS IDT table size */ + size = log2ex(FXGMAC_RSS_MAX_TABLE_SIZE) - 1; + regval = FXGMAC_SET_REG_BITS(regval, MGMT_RSS_CTRL_TBL_SIZE_POS, + MGMT_RSS_CTRL_TBL_SIZE_LEN, size); + +#if FXGMAC_MSIX_CH0RXDIS_EN + /* set default cpu id to 1 */ + regval = FXGMAC_SET_REG_BITS(regval, 8, 2, 1); +#endif + /* Enable RSS */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_RSSCR_RSSE_POS, + MAC_RSSCR_RSSE_LEN, 1); + + /* Set the RSS options */ + regval = FXGMAC_SET_REG_BITS(regval, MGMT_RSS_CTRL_OPT_POS, + MGMT_RSS_CTRL_OPT_LEN, pdata->rss_options); + + writereg(pdata->pAdapter, regval, (pdata->base_mem + MGMT_RSS_CTRL)); + DPRINTK("enable_rss callout, rss ctrl reg=0x%08x\n", regval); + + return 0; +} + +static int fxgmac_disable_rss(struct fxgmac_pdata *pdata) +{ + u32 regval; + + if (!pdata->hw_feat.rss) + return -EOPNOTSUPP; + +#if FXGMAC_MSIX_CH0RXDIS_EN + DPRINTK("Disable_rss, rss ctrl eth=0x%08x\n", 0); + + return 0; +#endif + + regval = readreg(pdata->pAdapter, pdata->base_mem + MGMT_RSS_CTRL); + regval = FXGMAC_SET_REG_BITS(regval, MAC_RSSCR_RSSE_POS, + MAC_RSSCR_RSSE_LEN, 0); + + writereg(pdata->pAdapter, regval, (pdata->base_mem + MGMT_RSS_CTRL)); + DPRINTK("disable_rss, rss ctrl reg=0x%08x\n", regval); + + return 0; +} + +static void fxgmac_config_rss(struct fxgmac_pdata *pdata) +{ + int ret; + + if (!pdata->hw_feat.rss) + return; + + if (pdata->rss) + ret = fxgmac_enable_rss(pdata); + else + ret = fxgmac_disable_rss(pdata); + + if (ret) { + DBGPRINT(MP_ERROR, + ("fxgmac_config_rss: error configuring RSS\n")); + } +} + +static void fxgmac_update_aoe_ipv4addr(struct fxgmac_pdata *pdata, u8 *ip_addr) +{ + unsigned int regval, ipval = 0; + + /* enable or disable ARP offload engine. */ + if (!pdata->hw_feat.aoe) { + netdev_err( + pdata->netdev, + "error update ip addr - arp offload not supported.\n"); + return; + } + + if (ip_addr) { + ipval = (ip_addr[0] << 24) | (ip_addr[1] << 16) | + (ip_addr[2] << 8) | (ip_addr[3] << 0); + DPRINTK("%s, covert IP dotted-addr %s to binary 0x%08x ok.\n", + __FUNCTION__, ip_addr, cpu_to_be32(ipval)); + } else { + /* get ipv4 addr from net device */ + ipval = fxgmac_get_netdev_ip4addr(pdata); + DPRINTK("%s, Get net device binary IP ok, 0x%08x\n", + __FUNCTION__, cpu_to_be32(ipval)); + + ipval = cpu_to_be32(ipval); + } + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_ARP_PROTO_ADDR); + if (regval != /*cpu_to_be32*/ (ipval)) { + writereg(pdata->pAdapter, /*cpu_to_be32*/ (ipval), + pdata->mac_regs + MAC_ARP_PROTO_ADDR); + DPRINTK("%s, update arp ipaddr reg from 0x%08x to 0x%08x\n", + __FUNCTION__, regval, /*cpu_to_be32*/ (ipval)); + } +} + +static int fxgmac_enable_arp_offload(struct fxgmac_pdata *pdata) +{ + u32 regval; + + if (!pdata->hw_feat.aoe) + return -EOPNOTSUPP; + + /* Enable arpoffload */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_ARPEN_POS, MAC_CR_ARPEN_LEN, + 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); + + return 0; +} + +static int fxgmac_disable_arp_offload(struct fxgmac_pdata *pdata) +{ + u32 regval; + + if (!pdata->hw_feat.aoe) + return -EOPNOTSUPP; + /* disable arpoffload */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_ARPEN_POS, MAC_CR_ARPEN_LEN, + 0); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); + + return 0; +} + +/* this function config register for NS offload function + * parameters: + * index - 0~1, index to NS look up table. one entry of the lut is like this |remote|solicited|target0|target1| + * remote_addr - ipv6 addr where fuxi gets the NS solicitation pkt(request). in common, it is 0 to match any remote machine. + * solicited_addr - the solicited node multicast group address which fuxi computes and joins. + * target_addr1 - it is the target address in NS solicitation pkt. + * target_addr2 - second target address, any address (with last 6B same with target address?). + */ +static int fxgmac_set_ns_offload(struct fxgmac_pdata *pdata, unsigned int index, + unsigned char *remote_addr, + unsigned char *solicited_addr, + unsigned char *target_addr1, + unsigned char *target_addr2, + unsigned char *mac_addr) +{ + u32 regval; + u32 Address[4], mac_addr_hi, mac_addr_lo; + u8 i, remote_not_zero = 0; + + regval = readreg(pdata->pAdapter, pdata->base_mem + NS_TPID_PRO); + regval = FXGMAC_SET_REG_BITS(regval, NS_TPID_PRO_STPID_POS, + NS_TPID_PRO_STPID_LEN, 0X8100); + regval = FXGMAC_SET_REG_BITS(regval, NS_TPID_PRO_CTPID_POS, + NS_TPID_PRO_CTPID_LEN, 0X9100); + writereg(pdata->pAdapter, regval, pdata->base_mem + NS_TPID_PRO); + regval = readreg(pdata->pAdapter, + pdata->base_mem + 0X38 * index + NS_LUT_MAC_ADDR_CTL); + regval = FXGMAC_SET_REG_BITS(regval, NS_LUT_DST_CMP_TYPE_POS, + NS_LUT_DST_CMP_TYPE_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, NS_LUT_DST_IGNORED_POS, + NS_LUT_DST_IGNORED_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, NS_LUT_REMOTE_AWARED_POS, + NS_LUT_REMOTE_AWARED_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, NS_LUT_TARGET_ISANY_POS, + NS_LUT_TARGET_ISANY_LEN, 0); + writereg(pdata->pAdapter, regval, + pdata->base_mem + 0X38 * index + NS_LUT_MAC_ADDR_CTL); + + /* AR */ + for (i = 0; i < 16 / 4; i++) { + Address[i] = (remote_addr[i * 4 + 0] << 24) | + (remote_addr[i * 4 + 1] << 16) | + (remote_addr[i * 4 + 2] << 8) | + (remote_addr[i * 4 + 3] << 0); + writereg(pdata->pAdapter, Address[i], + pdata->base_mem + 0X38 * index + NS_LUT_ROMOTE0 + + 4 * i); + if (Address[i]) { + remote_not_zero = 1; + } + Address[i] = (target_addr1[i * 4 + 0] << 24) | + (target_addr1[i * 4 + 1] << 16) | + (target_addr1[i * 4 + 2] << 8) | + (target_addr1[i * 4 + 3] << 0); + writereg(pdata->pAdapter, Address[i], + pdata->base_mem + 0X38 * index + NS_LUT_TARGET0 + + 4 * i); + Address[i] = (solicited_addr[i * 4 + 0] << 24) | + (solicited_addr[i * 4 + 1] << 16) | + (solicited_addr[i * 4 + 2] << 8) | + (solicited_addr[i * 4 + 3] << 0); + writereg(pdata->pAdapter, Address[i], + pdata->base_mem + 0X38 * index + NS_LUT_SOLICITED0 + + 4 * i); + Address[i] = (target_addr2[i * 4 + 0] << 24) | + (target_addr2[i * 4 + 1] << 16) | + (target_addr2[i * 4 + 2] << 8) | + (target_addr2[i * 4 + 3] << 0); + writereg(pdata->pAdapter, Address[i], + pdata->base_mem + 0X10 * index + NS_LUT_TARGET4 + + 4 * i); + } + mac_addr_hi = (mac_addr[0] << 24) | (mac_addr[1] << 16) | + (mac_addr[2] << 8) | (mac_addr[3] << 0); + mac_addr_lo = (mac_addr[4] << 8) | (mac_addr[5] << 0); + + writereg(pdata->pAdapter, mac_addr_hi, + pdata->base_mem + 0X38 * index + NS_LUT_MAC_ADDR); + if (remote_not_zero == 0) { + regval = readreg(pdata->pAdapter, pdata->base_mem + + 0X38 * index + + NS_LUT_MAC_ADDR_CTL); + regval = FXGMAC_SET_REG_BITS(regval, NS_LUT_REMOTE_AWARED_POS, + NS_LUT_REMOTE_AWARED_LEN, 0); + regval = FXGMAC_SET_REG_BITS(regval, NS_LUT_MAC_ADDR_LOW_POS, + NS_LUT_MAC_ADDR_LOW_LEN, + mac_addr_lo); + writereg(pdata->pAdapter, regval, + pdata->base_mem + 0X38 * index + NS_LUT_MAC_ADDR_CTL); + } else { + regval = readreg(pdata->pAdapter, pdata->base_mem + + 0X38 * index + + NS_LUT_MAC_ADDR_CTL); + regval = FXGMAC_SET_REG_BITS(regval, NS_LUT_REMOTE_AWARED_POS, + NS_LUT_REMOTE_AWARED_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, NS_LUT_MAC_ADDR_LOW_POS, + NS_LUT_MAC_ADDR_LOW_LEN, + mac_addr_lo); + writereg(pdata->pAdapter, regval, + pdata->base_mem + 0X38 * index + NS_LUT_MAC_ADDR_CTL); + } + return 0; +} + +static void fxgmac_update_ns_offload_ipv6addr(struct fxgmac_pdata *pdata, + unsigned int param) +{ + struct net_device *netdev = pdata->netdev; + unsigned char addr_buf[5][16]; + + unsigned char *remote_addr = (unsigned char *)&addr_buf[0][0]; + unsigned char *solicited_addr = (unsigned char *)&addr_buf[1][0]; + unsigned char *target_addr1 = (unsigned char *)&addr_buf[2][0]; + unsigned char *mac_addr = (unsigned char *)&addr_buf[4][0]; + + /* get ipv6 addr from net device */ + if (NULL == fxgmac_get_netdev_ip6addr(pdata, target_addr1, + solicited_addr, + (FXGMAC_NS_IFA_LOCAL_LINK | + FXGMAC_NS_IFA_GLOBAL_UNICAST) & + param)) { + DPRINTK("%s, get net device ipv6 addr with err and ignore NS offload.\n", + __FUNCTION__); + + return; + } + + DPRINTK("%s, Get net device binary IPv6 ok, local-link=%pI6\n", + __FUNCTION__, target_addr1); + DPRINTK("%s, Get net device binary IPv6 ok, solicited =%pI6\n", + __FUNCTION__, solicited_addr); + + memcpy(mac_addr, netdev->dev_addr, netdev->addr_len); + DPRINTK("%s, Get net device MAC addr ok, ns_tab idx=%d, %02x:%02x:%02x:%02x:%02x:%02x\n", + __FUNCTION__, pdata->expansion.ns_offload_tab_idx, mac_addr[0], + mac_addr[1], mac_addr[2], mac_addr[3], mac_addr[4], + mac_addr[5]); + + memset(remote_addr, 0, 16); + fxgmac_set_ns_offload(pdata, pdata->expansion.ns_offload_tab_idx++, + remote_addr, solicited_addr, target_addr1, + target_addr1, mac_addr); + if (pdata->expansion.ns_offload_tab_idx >= 2) + pdata->expansion.ns_offload_tab_idx = 0; +} + +static int fxgmac_enable_ns_offload(struct fxgmac_pdata *pdata) +{ + writereg(pdata->pAdapter, 0X00000011, pdata->base_mem + NS_OF_GLB_CTL); + return 0; +} + +static int fxgmac_disable_ns_offload(struct fxgmac_pdata *pdata) +{ + writereg(pdata->pAdapter, 0X00000000, pdata->base_mem + NS_OF_GLB_CTL); + return 0; +} + +static int fxgmac_check_wake_pattern_fifo_pointer(struct fxgmac_pdata *pdata) +{ + u32 regval; + int ret = 0; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PMT_STA); + regval = FXGMAC_SET_REG_BITS(regval, MAC_PMT_STA_RWKFILTERST_POS, + MAC_PMT_STA_RWKFILTERST_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PMT_STA); + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PMT_STA); + regval = FXGMAC_GET_REG_BITS(regval, MAC_PMT_STA_RWKPTR_POS, + MAC_PMT_STA_RWKPTR_LEN); + if (regval != 0) { + DPRINTK("Remote fifo pointer is not 0\n"); + ret = -EINVAL; + } + return ret; +} + +static int fxgmac_set_wake_pattern_mask(struct fxgmac_pdata *pdata, + u32 filter_index, u8 register_index, + u32 Data) +{ + const u16 address_offset[16][3] = { + { 0x1020, 0x1024, 0x1028 }, { 0x102c, 0x1030, 0x1034 }, + { 0x1038, 0x103c, 0x1040 }, { 0x1044, 0x1050, 0x1054 }, + { 0x1058, 0x105c, 0x1060 }, { 0x1064, 0x1068, 0x106c }, + { 0x1070, 0x1074, 0x1078 }, { 0x107c, 0x1080, 0x1084 }, + { 0x1088, 0x108c, 0x1090 }, { 0x1134, 0x113c, 0x1140 }, + { 0x1208, 0x1200, 0x1204 }, { 0x1218, 0x1210, 0x1214 }, + { 0x1228, 0x1220, 0x1224 }, { 0x1238, 0x1230, 0x1234 }, + { 0x1248, 0x1240, 0x1244 }, { 0x1258, 0x1250, 0x1254 }, + }; + if (filter_index > 15 || register_index > 2) { + DbgPrintF( + MP_TRACE, + "%s - Remote mask pointer is over range, filter_index:%d, register_index:0x%x\n", + __FUNCTION__, filter_index, register_index); + return -1; + } + writereg(pdata->pAdapter, Data, + pdata->base_mem + + address_offset[filter_index][register_index]); + return 0; +} + +static u16 wol_crc16(u8 *pucframe, u16 uslen) +{ + int i; + + union type16 { + u16 raw; + struct { + u16 bit_0 : 1; + u16 bit_1 : 1; + u16 bit_2 : 1; + u16 bit_3 : 1; + u16 bit_4 : 1; + u16 bit_5 : 1; + u16 bit_6 : 1; + u16 bit_7 : 1; + u16 bit_8 : 1; + u16 bit_9 : 1; + u16 bit_10 : 1; + u16 bit_11 : 1; + u16 bit_12 : 1; + u16 bit_13 : 1; + u16 bit_14 : 1; + u16 bit_15 : 1; + } bits; + }; + + union type8 { + u16 raw; + + struct { + u16 bit_0 : 1; + u16 bit_1 : 1; + u16 bit_2 : 1; + u16 bit_3 : 1; + u16 bit_4 : 1; + u16 bit_5 : 1; + u16 bit_6 : 1; + u16 bit_7 : 1; + } bits; + }; + + union type16 crc, crc_comb; + union type8 next_crc, rrpe_data; + next_crc.raw = 0; + crc.raw = 0xffff; + for (i = 0; i < uslen; i++) { + rrpe_data.raw = pucframe[i]; + next_crc.bits.bit_0 = crc.bits.bit_15 ^ rrpe_data.bits.bit_0; + next_crc.bits.bit_1 = crc.bits.bit_14 ^ next_crc.bits.bit_0 ^ + rrpe_data.bits.bit_1; + next_crc.bits.bit_2 = crc.bits.bit_13 ^ next_crc.bits.bit_1 ^ + rrpe_data.bits.bit_2; + next_crc.bits.bit_3 = crc.bits.bit_12 ^ next_crc.bits.bit_2 ^ + rrpe_data.bits.bit_3; + next_crc.bits.bit_4 = crc.bits.bit_11 ^ next_crc.bits.bit_3 ^ + rrpe_data.bits.bit_4; + next_crc.bits.bit_5 = crc.bits.bit_10 ^ next_crc.bits.bit_4 ^ + rrpe_data.bits.bit_5; + next_crc.bits.bit_6 = crc.bits.bit_9 ^ next_crc.bits.bit_5 ^ + rrpe_data.bits.bit_6; + next_crc.bits.bit_7 = crc.bits.bit_8 ^ next_crc.bits.bit_6 ^ + rrpe_data.bits.bit_7; + + crc_comb.bits.bit_15 = crc.bits.bit_7 ^ next_crc.bits.bit_7; + crc_comb.bits.bit_14 = crc.bits.bit_6; + crc_comb.bits.bit_13 = crc.bits.bit_5; + crc_comb.bits.bit_12 = crc.bits.bit_4; + crc_comb.bits.bit_11 = crc.bits.bit_3; + crc_comb.bits.bit_10 = crc.bits.bit_2; + crc_comb.bits.bit_9 = crc.bits.bit_1 ^ next_crc.bits.bit_0; + crc_comb.bits.bit_8 = crc.bits.bit_0 ^ next_crc.bits.bit_1; + crc_comb.bits.bit_7 = next_crc.bits.bit_0 ^ next_crc.bits.bit_2; + crc_comb.bits.bit_6 = next_crc.bits.bit_1 ^ next_crc.bits.bit_3; + crc_comb.bits.bit_5 = next_crc.bits.bit_2 ^ next_crc.bits.bit_4; + crc_comb.bits.bit_4 = next_crc.bits.bit_3 ^ next_crc.bits.bit_5; + crc_comb.bits.bit_3 = next_crc.bits.bit_4 ^ next_crc.bits.bit_6; + crc_comb.bits.bit_2 = next_crc.bits.bit_5 ^ next_crc.bits.bit_7; + crc_comb.bits.bit_1 = next_crc.bits.bit_6; + crc_comb.bits.bit_0 = next_crc.bits.bit_7; + crc.raw = crc_comb.raw; + } + return crc.raw; +} + +static int fxgmac_set_wake_pattern(struct fxgmac_pdata *pdata, + struct wol_bitmap_pattern *wol_pattern, + u32 pattern_cnt) +{ + u32 i, j, kp, km, mask_index; + int z; + u16 map_index; + u8 mask[MAX_PATTERN_SIZE]; + u32 regval = 0; + u32 total_cnt = 0, pattern_inherited_cnt = 0; + u8 *ptdata, *ptmask; + + if (pattern_cnt > MAX_PATTERN_COUNT) { + DbgPrintF( + MP_TRACE, + "%s - Error: %d patterns, exceed %d, not supported!\n", + __FUNCTION__, pattern_cnt, MAX_PATTERN_COUNT); + return -1; + } + + /* Reset the FIFO head pointer. */ + if (fxgmac_check_wake_pattern_fifo_pointer(pdata)) { + DbgPrintF( + MP_TRACE, + "%s - Warning: the remote pattern array pointer is not be 0\n", + __FUNCTION__); + return -1; + } + + for (i = 0; i < pattern_cnt; i++) { + memcpy(&pdata->pattern[i], wol_pattern + i, + sizeof(wol_pattern[0])); + if (pattern_cnt + pattern_inherited_cnt < MAX_PATTERN_COUNT) { + if (wol_pattern[i].pattern_offset || + !(wol_pattern[i].mask_info[0] & 0x01)) { + memcpy(&pdata->pattern[pattern_cnt + + pattern_inherited_cnt], + wol_pattern + i, sizeof(wol_pattern[0])); + pattern_inherited_cnt++; + } + } + } + total_cnt = pattern_cnt + pattern_inherited_cnt; + + /* + * calculate the crc-16 of the mask pattern + * print the pattern and mask for debug purpose. + */ + for (i = 0; i < total_cnt; i++) { + /* Please program pattern[i] to NIC for pattern match wakeup. + * pattern_size, pattern_info, mask_info + */ + /* save the mask pattern */ + mask_index = 0; + map_index = 0; + for (j = 0; j < pdata->pattern[i].mask_size; j++) { + for (z = 0; + z < ((j == (MAX_PATTERN_SIZE / 8 - 1)) ? 7 : 8); + z++) { + if (pdata->pattern[i].mask_info[j] & + (0x01 << z)) { + mask[map_index] = + pdata->pattern[i].pattern_info + [pdata->pattern[i] + .pattern_offset + + mask_index]; + map_index++; + } + mask_index++; + } + } + /* calculate the crc-16 of the mask pattern */ + pdata->pattern[i].pattern_crc = wol_crc16(mask, map_index); + + /* Print pattern match, for debug purpose. */ + DbgPrintF(MP_LOUD, "%s - Pattern[%d]:", __FUNCTION__, i); + for (kp = 0, km = 0; + kp < sizeof(pdata->pattern[i].pattern_info); + kp += 16, km += 2) { + ptdata = &pdata->pattern[i].pattern_info[kp]; + ptmask = &pdata->pattern[i].mask_info[km]; + DBGPRINT( + MP_LOUD, + ("\n %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x Mask %02x-%02x", + ptdata[0], ptdata[1], ptdata[2], ptdata[3], + ptdata[4], ptdata[5], ptdata[6], ptdata[7], + ptdata[8], ptdata[9], ptdata[10], ptdata[11], + ptdata[12], ptdata[13], ptdata[14], ptdata[15], + ptmask[0], ptmask[1])); + } + DbgPrintF( + MP_LOUD, + "WritePatternToNic62 the %d patterns crc = %x mask length = %d, mask_offset=%x.\n", + i, pdata->pattern[i].pattern_crc, map_index, + pdata->pattern[i].pattern_offset); + memset(mask, 0, sizeof(mask)); + } + + /* Write patterns by FIFO block. */ + for (i = 0; i < (total_cnt + 3) / 4; i++) { + /* 1. Write the first 4Bytes of Filter. */ + writereg(pdata->pAdapter, + ((pdata->pattern[i * 4 + 0].mask_info[3] & 0x7f) + << 24) | + (pdata->pattern[i * 4 + 0].mask_info[2] + << 16) | + (pdata->pattern[i * 4 + 0].mask_info[1] << 8) | + (pdata->pattern[i * 4 + 0].mask_info[0] << 0), + pdata->mac_regs + MAC_RWK_PAC); + + writereg(pdata->pAdapter, + ((pdata->pattern[i * 4 + 1].mask_info[3] & 0x7f) + << 24) | + (pdata->pattern[i * 4 + 1].mask_info[2] + << 16) | + (pdata->pattern[i * 4 + 1].mask_info[1] << 8) | + (pdata->pattern[i * 4 + 1].mask_info[0] << 0), + pdata->mac_regs + MAC_RWK_PAC); + + writereg(pdata->pAdapter, + ((pdata->pattern[i * 4 + 2].mask_info[3] & 0x7f) + << 24) | + (pdata->pattern[i * 4 + 2].mask_info[2] + << 16) | + (pdata->pattern[i * 4 + 2].mask_info[1] << 8) | + (pdata->pattern[i * 4 + 2].mask_info[0] << 0), + pdata->mac_regs + MAC_RWK_PAC); + + writereg(pdata->pAdapter, + ((pdata->pattern[i * 4 + 3].mask_info[3] & 0x7f) + << 24) | + (pdata->pattern[i * 4 + 3].mask_info[2] + << 16) | + (pdata->pattern[i * 4 + 3].mask_info[1] << 8) | + (pdata->pattern[i * 4 + 3].mask_info[0] << 0), + pdata->mac_regs + MAC_RWK_PAC); + + /* 2. Write the Filter Command. */ + regval = 0; + /* Set filter enable bit. */ + regval |= ((i * 4 + 0) < total_cnt) ? (0x1 << 0) : 0x0; + regval |= ((i * 4 + 1) < total_cnt) ? (0x1 << 8) : 0x0; + regval |= ((i * 4 + 2) < total_cnt) ? (0x1 << 16) : 0x0; + regval |= ((i * 4 + 3) < total_cnt) ? (0x1 << 24) : 0x0; + /* Set filter address type, 0- unicast, 1 - multicast. */ + regval |= (i * 4 + 0 >= total_cnt) ? 0x0 : + (i * 4 + 0 >= pattern_cnt) ? (0x1 << (3 + 0)) : + pdata->pattern[i * 4 + 0].pattern_offset ? + 0x0 : + !(pdata->pattern[i * 4 + 0].mask_info[0] & 0x01) ? + 0x0 : + (pdata->pattern[i * 4 + 0].pattern_info[0] & 0x01) ? + (0x1 << (3 + 0)) : + 0x0; + regval |= (i * 4 + 1 >= total_cnt) ? 0x0 : + (i * 4 + 1 >= pattern_cnt) ? (0x1 << (3 + 8)) : + pdata->pattern[i * 4 + 1].pattern_offset ? + 0x0 : + !(pdata->pattern[i * 4 + 1].mask_info[0] & 0x01) ? + 0x0 : + (pdata->pattern[i * 4 + 1].pattern_info[0] & 0x01) ? + (0x1 << (3 + 8)) : + 0x0; + regval |= (i * 4 + 2 >= total_cnt) ? 0x0 : + (i * 4 + 2 >= pattern_cnt) ? (0x1 << (3 + 16)) : + pdata->pattern[i * 4 + 2].pattern_offset ? + 0x0 : + !(pdata->pattern[i * 4 + 2].mask_info[0] & 0x01) ? + 0x0 : + (pdata->pattern[i * 4 + 2].pattern_info[0] & 0x01) ? + (0x1 << (3 + 16)) : + 0x0; + regval |= (i * 4 + 3 >= total_cnt) ? 0x0 : + (i * 4 + 3 >= pattern_cnt) ? (0x1 << (3 + 24)) : + pdata->pattern[i * 4 + 3].pattern_offset ? + 0x0 : + !(pdata->pattern[i * 4 + 3].mask_info[0] & 0x01) ? + 0x0 : + (pdata->pattern[i * 4 + 3].pattern_info[0] & 0x01) ? + (0x1 << (3 + 24)) : + 0x0; + writereg(pdata->pAdapter, regval, + pdata->mac_regs + MAC_RWK_PAC); + + /* 3. Write the mask offset. */ + writereg(pdata->pAdapter, + (pdata->pattern[i * 4 + 3].pattern_offset << 24) | + (pdata->pattern[i * 4 + 2].pattern_offset + << 16) | + (pdata->pattern[i * 4 + 1].pattern_offset + << 8) | + (pdata->pattern[i * 4 + 0].pattern_offset + << 0), + pdata->mac_regs + MAC_RWK_PAC); + + /* 4. Write the masked data CRC. */ + writereg(pdata->pAdapter, + (pdata->pattern[i * 4 + 1].pattern_crc << 16) | + (pdata->pattern[i * 4 + 0].pattern_crc << 0), + pdata->mac_regs + MAC_RWK_PAC); + writereg(pdata->pAdapter, + (pdata->pattern[i * 4 + 3].pattern_crc << 16) | + (pdata->pattern[i * 4 + 2].pattern_crc << 0), + pdata->mac_regs + MAC_RWK_PAC); + } + + for (i = 0; i < total_cnt; i++) { + fxgmac_set_wake_pattern_mask( + pdata, i, 0, + ((pdata->pattern[i].mask_info[7] & 0x7f) << (24 + 1)) | + (pdata->pattern[i].mask_info[6] << (16 + 1)) | + (pdata->pattern[i].mask_info[5] << (8 + 1)) | + (pdata->pattern[i].mask_info[4] << (0 + 1)) | + ((pdata->pattern[i].mask_info[3] & 0x80) >> + 7)); /* global manager regitster mask bit 31~62 */ + fxgmac_set_wake_pattern_mask( + pdata, i, 1, + ((pdata->pattern[i].mask_info[11] & 0x7f) << (24 + 1)) | + (pdata->pattern[i].mask_info[10] << (16 + 1)) | + (pdata->pattern[i].mask_info[9] << (8 + 1)) | + (pdata->pattern[i].mask_info[8] << (0 + 1)) | + ((pdata->pattern[i].mask_info[7] & 0x80) >> + 7)); /* global manager regitster mask bit 63~94 */ + fxgmac_set_wake_pattern_mask( + pdata, i, 2, + ((pdata->pattern[i].mask_info[15] & 0x7f) << (24 + 1)) | + (pdata->pattern[i].mask_info[14] << (16 + 1)) | + (pdata->pattern[i].mask_info[13] << (8 + 1)) | + (pdata->pattern[i].mask_info[12] << (0 + 1)) | + ((pdata->pattern[i].mask_info[11] & 0x80) >> + 7)); /* global manager regitster mask bit 95~126 */ + } + + return 0; +} + +static int fxgmac_enable_wake_pattern(struct fxgmac_pdata *pdata) +{ + u32 regval; + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PMT_STA); + regval = FXGMAC_SET_REG_BITS(regval, MAC_PMT_STA_RWKFILTERST_POS, + MAC_PMT_STA_RWKFILTERST_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, MAC_PMT_STA_RWKPKTEN_POS, + MAC_PMT_STA_RWKPKTEN_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PMT_STA); + regval = readreg(pdata->pAdapter, pdata->base_mem + WOL_CTL); + regval = FXGMAC_SET_REG_BITS(regval, WOL_PKT_EN_POS, WOL_PKT_EN_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->base_mem + WOL_CTL); + return 0; +} + +static int fxgmac_disable_wake_pattern(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PMT_STA); + regval = FXGMAC_SET_REG_BITS(regval, MAC_PMT_STA_RWKFILTERST_POS, + MAC_PMT_STA_RWKFILTERST_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, MAC_PMT_STA_RWKPKTEN_POS, + MAC_PMT_STA_RWKPKTEN_LEN, 0); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PMT_STA); + regval = readreg(pdata->pAdapter, pdata->base_mem + WOL_CTL); + regval = FXGMAC_SET_REG_BITS(regval, WOL_PKT_EN_POS, WOL_PKT_EN_LEN, 0); + writereg(pdata->pAdapter, regval, pdata->base_mem + WOL_CTL); + return 0; +} + +static int fxgmac_enable_wake_magic_pattern(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PMT_STA); + regval = FXGMAC_SET_REG_BITS(regval, MAC_PMT_STA_MGKPKTEN_POS, + MAC_PMT_STA_MGKPKTEN_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PMT_STA); + regval = readreg(pdata->pAdapter, pdata->base_mem + WOL_CTL); + regval = FXGMAC_SET_REG_BITS(regval, WOL_PKT_EN_POS, WOL_PKT_EN_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->base_mem + WOL_CTL); + + /* Enable PME Enable Bit. */ + cfg_r32(pdata, REG_PM_STATCTRL, ®val); + regval = FXGMAC_SET_REG_BITS(regval, PM_CTRLSTAT_PME_EN_POS, + PM_CTRLSTAT_PME_EN_LEN, 1); + cfg_w32(pdata, REG_PM_STATCTRL, regval); + + return 0; +} + +static int fxgmac_disable_wake_magic_pattern(struct fxgmac_pdata *pdata) +{ + u32 regval; + regval = readreg(pdata->pAdapter, pdata->base_mem + WOL_CTL); + regval = FXGMAC_SET_REG_BITS(regval, WOL_PKT_EN_POS, WOL_PKT_EN_LEN, 0); + writereg(pdata->pAdapter, regval, pdata->base_mem + WOL_CTL); + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PMT_STA); + regval = FXGMAC_SET_REG_BITS(regval, MAC_PMT_STA_MGKPKTEN_POS, + MAC_PMT_STA_MGKPKTEN_LEN, 0); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PMT_STA); + return 0; +} + +#if defined(FUXI_PM_WPI_READ_FEATURE_EN) && FUXI_PM_WPI_READ_FEATURE_EN +/* + * enable Wake packet indication. called to enable before sleep/hibernation + * and no needed to call disable for that, fxgmac_get_wake_packet_indication will clear to normal once done. + */ +static void fxgmac_enable_wake_packet_indication(struct fxgmac_pdata *pdata, + int en) +{ + u32 val_wpi_crtl0; + + /* read-clear WoL event. */ + readreg(pdata->pAdapter, pdata->base_mem + MGMT_WOL_CTRL); + + /* get wake packet information */ + val_wpi_crtl0 = + (u32)readreg(pdata->pAdapter, pdata->base_mem + MGMT_WPI_CTRL0); + + /* prepare to write packet data by write wpi_mode to 1 */ + val_wpi_crtl0 = + FXGMAC_SET_REG_BITS(val_wpi_crtl0, MGMT_WPI_CTRL0_WPI_MODE_POS, + MGMT_WPI_CTRL0_WPI_MODE_LEN, + (en ? MGMT_WPI_CTRL0_WPI_MODE_WR : + MGMT_WPI_CTRL0_WPI_MODE_NORMAL)); + writereg(pdata->pAdapter, val_wpi_crtl0, + pdata->base_mem + MGMT_WPI_CTRL0); + + DbgPrintF(MP_TRACE, "%s - WPI pkt enable=%d, reg=%08x.\n", __FUNCTION__, + en, val_wpi_crtl0); + + return; +} + +/* + * this function read Wake up packet after MDIS resume + * input: + * pdata + * wpi_buf container of a packet. + * buf_size size of the packet container. since HW limit to 14bits, ie 16KB all together. + * output: + * wake_reason from HW, we can indentify 1)magic packet, or 2)pattern(remote wake packet) or WAKE_REASON_HW_ERR indicates err + * packet_size length of the wake packet. 0 indicates exception. + * + */ +static void fxgmac_get_wake_packet_indication(struct fxgmac_pdata *pdata, + int *wake_reason, + u32 *wake_pattern_number, + u8 *wpi_buf, u32 buf_size, + u32 *packet_size) +{ + u32 i, regval, val_wpi_crtl0, *dw_wpi_buf; + u32 data_len, data_len_dw, b_need_pkt = 0; + + *wake_reason = WAKE_REASON_NONE; + *packet_size = 0; + fxgmac_release_phy(pdata); + + /* try to check wake reason. GMAC reg 20c0 only tells Magic or remote-pattern + * read from MGMT_WOL_CTRL, 1530 instead. + */ + regval = (u32)readreg(pdata->pAdapter, pdata->base_mem + MGMT_WOL_CTRL); + DbgPrintF(MP_TRACE, "%s - 0x1530=%x.\n", __FUNCTION__, regval); + if (!regval) { + DbgPrintF(MP_TRACE, "%s - nothing for WPI pkt.\n", + __FUNCTION__); + return; + } + + if (regval & MGMT_WOL_CTRL_WPI_MGC_PKT) { + *wake_reason = WAKE_REASON_MAGIC; + b_need_pkt = 1; + } else if (regval & MGMT_WOL_CTRL_WPI_RWK_PKT) { + *wake_reason = WAKE_REASON_PATTERNMATCH; + b_need_pkt = 1; + *wake_pattern_number = 0; + + /* + * wake_pattern_number, HW should tell, tbd + */ + for (i = 0; i < MAX_PATTERN_COUNT; i++) { + if (regval & (MGMT_WOL_CTRL_WPI_RWK_PKT_NUMBER << i)) { + *wake_pattern_number = i; + break; + } + } + + } else if (regval & MGMT_WOL_CTRL_WPI_LINK_CHG) { + *wake_reason = WAKE_REASON_LINK; + } + + if (!b_need_pkt) { + DbgPrintF(MP_TRACE, "%s - wake by link and no WPI pkt.\n", + __FUNCTION__); + return; + } + + /* get wake packet information */ + val_wpi_crtl0 = + (u32)readreg(pdata->pAdapter, pdata->base_mem + MGMT_WPI_CTRL0); + + if (val_wpi_crtl0 & MGMT_WPI_CTRL0_WPI_FAIL) { + *wake_reason = WAKE_REASON_HW_ERR; + DbgPrintF(MP_TRACE, "%s - WPI pkt fail from hw.\n", + __FUNCTION__); + return; + } + + *packet_size = FXGMAC_GET_REG_BITS(val_wpi_crtl0, + MGMT_WPI_CTRL0_WPI_PKT_LEN_POS, + MGMT_WPI_CTRL0_WPI_PKT_LEN_LEN); + + if (0 == *packet_size) { + *wake_reason = WAKE_REASON_HW_ERR; + DbgPrintF(MP_TRACE, "%s - WPI pkt len is 0 from hw.\n", + __FUNCTION__); + return; + } + + DbgPrintF(MP_TRACE, "%s - WPI pkt len from hw, *packet_size=%u.\n", + __FUNCTION__, *packet_size); + + if (buf_size < *packet_size) { + DbgPrintF(MP_WARN, + "%s - too small buf_size=%u, WPI pkt len is %u.\n", + __FUNCTION__, buf_size, *packet_size); + data_len = buf_size; + } else { + data_len = *packet_size; + } + + /* prepare to read packet data by write wpi_mode to 2 */ + val_wpi_crtl0 = FXGMAC_SET_REG_BITS(val_wpi_crtl0, + MGMT_WPI_CTRL0_WPI_MODE_POS, + MGMT_WPI_CTRL0_WPI_MODE_LEN, + MGMT_WPI_CTRL0_WPI_MODE_RD); + writereg(pdata->pAdapter, val_wpi_crtl0, + pdata->base_mem + MGMT_WPI_CTRL0); + + dw_wpi_buf = (u32 *)wpi_buf; + data_len_dw = (data_len + 3) / 4; + + i = 0; + DbgPrintF( + MP_TRACE, + "%s - before retrieve, len=%d, len_dw=%d, reg_wpi_ctrl0=%08x.\n", + __FUNCTION__, data_len, data_len_dw, val_wpi_crtl0); + while ((0 == (val_wpi_crtl0 & MGMT_WPI_CTRL0_WPI_OP_DONE))) { + if (i < data_len_dw) { + regval = (u32)readreg(pdata->pAdapter, + pdata->base_mem + + MGMT_WPI_CTRL1_DATA); + /*dw_wpi_buf[i] = SWAP_BYTES_32(regval);*/ + dw_wpi_buf[i] = regval; + } else { + break; + } + + val_wpi_crtl0 = (u32)readreg(pdata->pAdapter, + pdata->base_mem + MGMT_WPI_CTRL0); + i++; + } + if (*packet_size <= MAC_CRC_LENGTH) { + DbgPrintF(MP_TRACE, + "%s - Warning, WPI pkt len is less 4 from hw.\n", + __FUNCTION__); + return; + } + *packet_size -= MAC_CRC_LENGTH; + + /* once read data complete and write wpi_mode to 0, normal */ + val_wpi_crtl0 = FXGMAC_SET_REG_BITS(val_wpi_crtl0, + MGMT_WPI_CTRL0_WPI_MODE_POS, + MGMT_WPI_CTRL0_WPI_MODE_LEN, + MGMT_WPI_CTRL0_WPI_MODE_NORMAL); + writereg(pdata->pAdapter, val_wpi_crtl0, + pdata->base_mem + MGMT_WPI_CTRL0); + + DbgPrintF( + MP_TRACE, + "%s - WPI done and back to normal mode, reg=%08x, read data=%dB.\n", + __FUNCTION__, val_wpi_crtl0, i * 4); + + return; +} +#endif /* FUXI_PM_WPI_READ_FEATURE_EN */ + +static int fxgmac_enable_wake_link_change(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->base_mem + WOL_CTL); + regval = FXGMAC_SET_REG_BITS(regval, WOL_LINKCHG_EN_POS, + WOL_LINKCHG_EN_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->base_mem + WOL_CTL); + return 0; +} +static int fxgmac_disable_wake_link_change(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->base_mem + WOL_CTL); + regval = FXGMAC_SET_REG_BITS(regval, WOL_LINKCHG_EN_POS, + WOL_LINKCHG_EN_LEN, 0); + writereg(pdata->pAdapter, regval, pdata->base_mem + WOL_CTL); + return 0; +} + +static void fxgmac_config_wol(struct fxgmac_pdata *pdata, int en) +{ + /* enable or disable WOL. this function only set wake-up type, and power related configure + * will be in other place, see power management. + */ + if (!pdata->hw_feat.rwk) { + netdev_err(pdata->netdev, + "error configuring WOL - not supported.\n"); + return; + } + + fxgmac_disable_wake_magic_pattern(pdata); + fxgmac_disable_wake_pattern(pdata); + fxgmac_disable_wake_link_change(pdata); + + if (en) { + /* config mac address for rx of magic or ucast */ + fxgmac_set_mac_address(pdata, (u8 *)(pdata->netdev->dev_addr)); + + /* Enable Magic packet */ + if (pdata->expansion.wol & WAKE_MAGIC) { + fxgmac_enable_wake_magic_pattern(pdata); + } + + /* Enable global unicast packet */ + if (pdata->expansion.wol & WAKE_UCAST || + pdata->expansion.wol & WAKE_MCAST || + pdata->expansion.wol & WAKE_BCAST || + pdata->expansion.wol & WAKE_ARP) { + fxgmac_enable_wake_pattern(pdata); + } + + /* Enable ephy link change */ + if ((FXGMAC_WOL_UPON_EPHY_LINK) && + (pdata->expansion.wol & WAKE_PHY)) { + fxgmac_enable_wake_link_change(pdata); + } + } + device_set_wakeup_enable(/*pci_dev_to_dev*/ (pdata->dev), en); + + DPRINTK("config_wol callout\n"); +} + +static int fxgmac_get_ephy_state(struct fxgmac_pdata *pdata) +{ + u32 value; + value = readreg(pdata->pAdapter, pdata->base_mem + MGMT_EPHY_CTRL); + return value; +} + +static void fxgmac_enable_dma_interrupts(struct fxgmac_pdata *pdata) +{ +#ifndef DPDK + unsigned int dma_ch_isr, dma_ch_ier; + struct fxgmac_channel *channel; + unsigned int i; + +#ifdef NIC_NET_ADAPETERCX + u32 regval; + /* config interrupt to level signal */ + regval = (u32)readreg(pdata->pAdapter, pdata->mac_regs + DMA_MR); + regval = FXGMAC_SET_REG_BITS(regval, DMA_MR_INTM_POS, DMA_MR_INTM_LEN, + 1); + regval = FXGMAC_SET_REG_BITS(regval, DMA_MR_QUREAD_POS, + DMA_MR_QUREAD_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + DMA_MR); +#endif + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + /* Clear all the interrupts which are set */ + dma_ch_isr = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_SR)); + writereg(pdata->pAdapter, dma_ch_isr, + FXGMAC_DMA_REG(channel, DMA_CH_SR)); + + /* Clear all interrupt enable bits */ + dma_ch_ier = 0; + + /* Enable following interrupts + * NIE - Normal Interrupt Summary Enable + * AIE - Abnormal Interrupt Summary Enable + * FBEE - Fatal Bus Error Enable + */ + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, DMA_CH_IER_NIE_POS, + DMA_CH_IER_NIE_LEN, 1); + /* dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, + * DMA_CH_IER_AIE_POS, DMA_CH_IER_AIE_LEN, 1); + */ + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, + DMA_CH_IER_FBEE_POS, + DMA_CH_IER_FBEE_LEN, 1); + + if (channel->tx_ring) { + /* Enable the following Tx interrupts + * TIE - Transmit Interrupt Enable (unless using + * per channel interrupts) + */ + if (!pdata->per_channel_irq) + dma_ch_ier = FXGMAC_SET_REG_BITS( + dma_ch_ier, DMA_CH_IER_TIE_POS, + DMA_CH_IER_TIE_LEN, 1); + if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i)) { + if (pdata->per_channel_irq) { + dma_ch_ier = FXGMAC_SET_REG_BITS( + dma_ch_ier, DMA_CH_IER_TIE_POS, + DMA_CH_IER_TIE_LEN, 1); + } + } + } + if (channel->rx_ring) { + /* Enable following Rx interrupts + * RBUE - Receive Buffer Unavailable Enable + * RIE - Receive Interrupt Enable (unless using + * per channel interrupts) + */ + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, + DMA_CH_IER_RBUE_POS, + DMA_CH_IER_RBUE_LEN, + 1); + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, + DMA_CH_IER_RIE_POS, + DMA_CH_IER_RIE_LEN, 1); + } + + writereg(pdata->pAdapter, dma_ch_ier, + FXGMAC_DMA_REG(channel, DMA_CH_IER)); + } +#else + struct fxgmac_tx_queue *txq; + unsigned int dma_ch_isr, dma_ch_ier; + unsigned int i; + + for (i = 0; i < pdata->expansion.eth_dev->data->nb_tx_queues; i++) { + txq = pdata->expansion.eth_dev->data->tx_queues[i]; + + /* Clear all the interrupts which are set */ + dma_ch_isr = FXGMAC_DMA_IOREAD(txq, DMA_CH_SR); + FXGMAC_DMA_IOWRITE(txq, DMA_CH_SR, dma_ch_isr); + + /* Clear all interrupt enable bits */ + dma_ch_ier = 0; + + /* Enable following interrupts + * NIE - Normal Interrupt Summary Enable + * AIE - Abnormal Interrupt Summary Enable + * FBEE - Fatal Bus Error Enable + */ + FXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 1); /* 0 fx 1 */ + FXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1); + FXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1); + + /* Enable following Rx interrupts + * RBUE - Receive Buffer Unavailable Enable + * RIE - Receive Interrupt Enable (unless using + * per channel interrupts in edge triggered + * mode) + */ + FXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1); /* 0 fx 1 */ + FXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0); /* 0 fx 1 */ + + FXGMAC_DMA_IOWRITE(txq, DMA_CH_IER, dma_ch_ier); + } +#endif +} + +static void fxgmac_enable_mtl_interrupts(struct fxgmac_pdata *pdata) +{ + unsigned int q_count, i; + unsigned int mtl_q_isr; + + q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); + for (i = 0; i < q_count; i++) { + /* Clear all the interrupts which are set */ + mtl_q_isr = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_ISR)); + writereg(pdata->pAdapter, mtl_q_isr, + FXGMAC_MTL_REG(pdata, i, MTL_Q_ISR)); + + /* No MTL interrupts to be enabled */ + writereg(pdata->pAdapter, 0, + FXGMAC_MTL_REG(pdata, i, MTL_Q_IER)); + } +} + +static void fxgmac_enable_mac_interrupts(struct fxgmac_pdata *pdata) +{ + unsigned int mac_ier = 0; + u32 regval; + + /* Enable Timestamp interrupt */ + mac_ier = FXGMAC_SET_REG_BITS(mac_ier, MAC_IER_TSIE_POS, + MAC_IER_TSIE_LEN, 1); + + writereg(pdata->pAdapter, mac_ier, pdata->mac_regs + MAC_IER); + + /* Enable all counter interrupts */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MMC_RIER); + regval = FXGMAC_SET_REG_BITS(regval, MMC_RIER_ALL_INTERRUPTS_POS, + MMC_RIER_ALL_INTERRUPTS_LEN, 0xffffffff); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MMC_RIER); + regval = readreg(pdata->pAdapter, pdata->mac_regs + MMC_TIER); + regval = FXGMAC_SET_REG_BITS(regval, MMC_TIER_ALL_INTERRUPTS_POS, + MMC_TIER_ALL_INTERRUPTS_LEN, 0xffffffff); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MMC_TIER); +} + +static int fxgmac_set_fxgmii_2500_speed(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_PS_POS, MAC_CR_PS_LEN, 0); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_FES_POS, MAC_CR_FES_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_DM_POS, MAC_CR_DM_LEN, + pdata->phy_duplex); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); + + return 0; +} + +static int fxgmac_set_fxgmii_1000_speed(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_PS_POS, MAC_CR_PS_LEN, 0); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_FES_POS, MAC_CR_FES_LEN, 0); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_DM_POS, MAC_CR_DM_LEN, + pdata->phy_duplex); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); + + return 0; +} + +static int fxgmac_set_fxgmii_100_speed(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_PS_POS, MAC_CR_PS_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_FES_POS, MAC_CR_FES_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_DM_POS, MAC_CR_DM_LEN, + pdata->phy_duplex); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); + + return 0; +} + +static int fxgmac_set_fxgmii_10_speed(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_PS_POS, MAC_CR_PS_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_FES_POS, MAC_CR_FES_LEN, 0); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_DM_POS, MAC_CR_DM_LEN, + pdata->phy_duplex); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); + + return 0; +} + +/** + * fxgmac_check_phy_link - Get link/speed status + * @pdata: pointer to gmac structure + * @speed: pointer to link speed + * @link_up: true is link is up, false otherwise + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Reads the links register to determine if link is up and the current speed + **/ +static int fxgmac_check_phy_link(struct fxgmac_pdata *pdata, u32 *speed, + bool *link_up, bool link_up_wait_to_complete) +{ + u16 link_reg = 0; + + struct net_device *netdev = pdata->netdev; + if (netdev->base_addr) { + link_reg = + (u16)(*((u32 *)(netdev->base_addr + MGMT_EPHY_CTRL))); + + /* + * check register address 0x1004 + * b[6:5] ephy_pause + * b[4:3] ephy_speed 0b10 1000m 0b01 100m + * b[2] ephy_duplex + * b[1] ephy_link + * b[0] ephy_reset. should be set to 1 before use phy. + */ + *link_up = false; + if (link_reg & MGMT_EPHY_CTRL_STA_EPHY_RELEASE) { + if (link_up) { + *link_up = (link_reg & + MGMT_EPHY_CTRL_STA_EPHY_LINKUP) ? + true : + false; + } + if (speed) + *speed = (link_reg & + MGMT_EPHY_CTRL_STA_SPEED_MASK) >> + MGMT_EPHY_CTRL_STA_SPEED_POS; + } else { + DPRINTK("fxgmac_check_phy_link ethernet PHY not released.\n"); + return -1; + } + } else { + DPRINTK("fxgmac_check_phy_link null base addr err\n"); + return -1; + } + + return 0; +} + +static int fxgmac_config_mac_speed(struct fxgmac_pdata *pdata) +{ + switch (pdata->phy_speed) { + case SPEED_2500: + fxgmac_set_fxgmii_2500_speed(pdata); + break; + case SPEED_1000: + fxgmac_set_fxgmii_1000_speed(pdata); + break; + case SPEED_100: + fxgmac_set_fxgmii_100_speed(pdata); + break; + case SPEED_10: + fxgmac_set_fxgmii_10_speed(pdata); + break; + } + return 0; +} + +static int fxgmac_write_ephy_reg(struct fxgmac_pdata *pdata, u32 reg_id, + u32 data) +{ + u32 regval; + u32 mdioctrl = reg_id * 0x10000 + 0x8000205; + int busy = 15; + + writereg(pdata->pAdapter, data, pdata->mac_regs + MAC_MDIO_DATA); + writereg(pdata->pAdapter, mdioctrl, pdata->mac_regs + MAC_MDIO_ADDRESS); + do { + regval = readreg(pdata->pAdapter, + pdata->mac_regs + MAC_MDIO_ADDRESS); + busy--; + } while ((regval & MAC_MDIO_ADDRESS_BUSY) && (busy)); + + DPRINTK("fxgmac_write_ephy_reg id %d %s, ctrl=0x%08x, data=0x%08x\n", + reg_id, (regval & 0x1) ? "err" : "ok", regval, data); + + return (regval & MAC_MDIO_ADDRESS_BUSY) ? -1 : 0; /* -1 indicates err */ +} + +static int fxgmac_read_ephy_reg(struct fxgmac_pdata *pdata, u32 reg_id, + u32 *data) +{ + u32 regval = 0, regret; + u32 mdioctrl = reg_id * 0x10000 + 0x800020d; + int busy = 15; + + writereg(pdata->pAdapter, mdioctrl, pdata->mac_regs + MAC_MDIO_ADDRESS); + do { + regval = readreg(pdata->pAdapter, + pdata->mac_regs + MAC_MDIO_ADDRESS); + busy--; + } while ((regval & MAC_MDIO_ADDRESS_BUSY) && (busy)); + + if (0 == (regval & MAC_MDIO_ADDRESS_BUSY)) { + regret = readreg(pdata->pAdapter, + pdata->mac_regs + MAC_MDIO_DATA); + if (data) + *data = regret; + return regret; + } + + DPRINTK("fxgmac_read_ephy_reg id=0x%02x err, busy=%d, ctrl=0x%08x.\n", + reg_id, busy, regval); + return -1; +} + +static int fxgmac_write_ephy_mmd_reg(struct fxgmac_pdata *pdata, u32 reg_id, + u32 mmd, u32 data) +{ + u32 regval; + u32 mdioctrl = (mmd << 16) + 0x8000207; + u32 regdata = (reg_id << 16) + data; + /* for phy mmd reg r/w operation, set more delay time than phy mii reg r/w */ + int busy = 60; + + writereg(pdata->pAdapter, regdata, pdata->mac_regs + MAC_MDIO_DATA); + writereg(pdata->pAdapter, mdioctrl, pdata->mac_regs + MAC_MDIO_ADDRESS); + do { + regval = readreg(pdata->pAdapter, + pdata->mac_regs + MAC_MDIO_ADDRESS); + busy--; + } while ((regval & MAC_MDIO_ADDRESS_BUSY) && (busy)); + + DPRINTK("fxgmac_write_ephy_mmd_reg id %d mmd %d %s, ctrl=0x%08x, data=0x%08x\n", + reg_id, mmd, (regval & 0x1) ? "err" : "ok", regval, data); + + return (regval & MAC_MDIO_ADDRESS_BUSY) ? -1 : 0; /* -1 indicates err */ +} + +static void fxgmac_config_flow_control(struct fxgmac_pdata *pdata) +{ + u32 regval = 0; + + fxgmac_config_tx_flow_control(pdata); + fxgmac_config_rx_flow_control(pdata); + + fxgmac_read_ephy_reg(pdata, REG_MII_ADVERTISE, ®val); + /* set auto negotiation advertisement pause ability */ + if (pdata->tx_pause || pdata->rx_pause) { + regval = FXGMAC_SET_REG_BITS(regval, + PHY_MII_ADVERTISE_PAUSE_POS, + PHY_MII_ADVERTISE_PAUSE_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, + PHY_MII_ADVERTISE_ASYPAUSE_POS, + PHY_MII_ADVERTISE_ASYPAUSE_LEN, 1); + } else { + regval = FXGMAC_SET_REG_BITS(regval, + PHY_MII_ADVERTISE_PAUSE_POS, + PHY_MII_ADVERTISE_PAUSE_LEN, 0); + regval = FXGMAC_SET_REG_BITS(regval, + PHY_MII_ADVERTISE_ASYPAUSE_POS, + PHY_MII_ADVERTISE_ASYPAUSE_LEN, 0); + } + fxgmac_write_ephy_reg(pdata, REG_MII_ADVERTISE, regval); + /* after change the auto negotiation advertisement need to soft reset */ + fxgmac_read_ephy_reg(pdata, REG_MII_BMCR, ®val); + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_RESET_POS, PHY_CR_RESET_LEN, + 1); + fxgmac_write_ephy_reg(pdata, REG_MII_BMCR, regval); +} + +static int fxgmac_set_ephy_autoneg_advertise(struct fxgmac_pdata *pdata, + struct fxphy_ag_adv phy_ag_adv) +{ + u32 regval = 0, ret = 0; + + if (phy_ag_adv.auto_neg_en) { + fxgmac_read_ephy_reg(pdata, REG_MII_BMCR, ®val); + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_AUTOENG_POS, + PHY_CR_AUTOENG_LEN, 1); + ret |= fxgmac_write_ephy_reg(pdata, REG_MII_BMCR, regval); + } else { + fxgmac_read_ephy_reg(pdata, REG_MII_BMCR, ®val); + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_AUTOENG_POS, + PHY_CR_AUTOENG_LEN, 0); + ret |= fxgmac_write_ephy_reg(pdata, REG_MII_BMCR, regval); + } + + fxgmac_read_ephy_reg(pdata, REG_MII_CTRL1000, ®val); + if (phy_ag_adv.full_1000m) { + regval = FXGMAC_SET_REG_BITS(regval, + PHY_MII_CTRL1000_1000FULL_POS, + PHY_MII_CTRL1000_1000FULL_LEN, 1); + } else { + regval = FXGMAC_SET_REG_BITS(regval, + PHY_MII_CTRL1000_1000FULL_POS, + PHY_MII_CTRL1000_1000FULL_LEN, 0); + } + if (phy_ag_adv.half_1000m) { + regval = FXGMAC_SET_REG_BITS(regval, + PHY_MII_CTRL1000_1000HALF_POS, + PHY_MII_CTRL1000_1000HALF_LEN, 1); + } else { + regval = FXGMAC_SET_REG_BITS(regval, + PHY_MII_CTRL1000_1000HALF_POS, + PHY_MII_CTRL1000_1000HALF_LEN, 0); + } + ret |= fxgmac_write_ephy_reg(pdata, REG_MII_CTRL1000, regval); + + fxgmac_read_ephy_reg(pdata, REG_MII_ADVERTISE, ®val); + + if (phy_ag_adv.full_100m) { + regval = FXGMAC_SET_REG_BITS(regval, + PHY_MII_ADVERTISE_100FULL_POS, + PHY_MII_ADVERTISE_100FULL_LEN, 1); + } else { + regval = FXGMAC_SET_REG_BITS(regval, + PHY_MII_ADVERTISE_100FULL_POS, + PHY_MII_ADVERTISE_100FULL_LEN, 0); + } + if (phy_ag_adv.half_100m) { + regval = FXGMAC_SET_REG_BITS(regval, + PHY_MII_ADVERTISE_100HALF_POS, + PHY_MII_ADVERTISE_100HALF_LEN, 1); + } else { + regval = FXGMAC_SET_REG_BITS(regval, + PHY_MII_ADVERTISE_100HALF_POS, + PHY_MII_ADVERTISE_100HALF_LEN, 0); + } + if (phy_ag_adv.full_10m) { + regval = FXGMAC_SET_REG_BITS(regval, + PHY_MII_ADVERTISE_10FULL_POS, + PHY_MII_ADVERTISE_10FULL_LEN, 1); + } else { + regval = FXGMAC_SET_REG_BITS(regval, + PHY_MII_ADVERTISE_10FULL_POS, + PHY_MII_ADVERTISE_10FULL_LEN, 0); + } + if (phy_ag_adv.half_10m) { + regval = FXGMAC_SET_REG_BITS(regval, + PHY_MII_ADVERTISE_10HALF_POS, + PHY_MII_ADVERTISE_10HALF_LEN, 1); + } else { + regval = FXGMAC_SET_REG_BITS(regval, + PHY_MII_ADVERTISE_10HALF_POS, + PHY_MII_ADVERTISE_10HALF_LEN, 0); + } + + ret |= fxgmac_write_ephy_reg(pdata, REG_MII_ADVERTISE, regval); + /* after change the auto negotiation advertisement need to soft reset */ + fxgmac_read_ephy_reg(pdata, REG_MII_BMCR, ®val); + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_RESET_POS, PHY_CR_RESET_LEN, + 1); + ret |= fxgmac_write_ephy_reg(pdata, REG_MII_BMCR, regval); + + return ret; +} + +static int fxgmac_phy_config(struct fxgmac_pdata *pdata) +{ + struct fxphy_ag_adv phy_ag_adv; + + if (pdata->phy_autoeng) { + phy_ag_adv.auto_neg_en = 1; + } else { + phy_ag_adv.auto_neg_en = 0; + } + switch (pdata->phy_speed) { + case SPEED_1000: + phy_ag_adv.full_1000m = 1, phy_ag_adv.half_1000m = 0, + phy_ag_adv.full_100m = 1, phy_ag_adv.half_100m = 1, + phy_ag_adv.full_10m = 1, phy_ag_adv.half_10m = 1; + break; + + case SPEED_100: + phy_ag_adv.full_1000m = 0, phy_ag_adv.half_1000m = 0; + if (pdata->phy_duplex) { + phy_ag_adv.full_100m = 1; + } else { + phy_ag_adv.full_100m = 0; + } + phy_ag_adv.half_100m = 1, phy_ag_adv.full_10m = 1, + phy_ag_adv.half_10m = 1; + break; + + case SPEED_10: + phy_ag_adv.full_1000m = 0, phy_ag_adv.half_1000m = 0; + phy_ag_adv.full_100m = 0, phy_ag_adv.half_100m = 0; + if (pdata->phy_duplex) { + phy_ag_adv.full_10m = 1; + } else { + phy_ag_adv.full_10m = 0; + } + phy_ag_adv.half_10m = 1; + break; + + default: + break; + } + return fxgmac_set_ephy_autoneg_advertise(pdata, phy_ag_adv); +} + +static void fxgmac_phy_green_ethernet(struct fxgmac_pdata *pdata) +{ + u32 regval = 0; + /* GREEN */ + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_REG_PMA_DBG0_ADC); + fxgmac_write_ephy_reg( + pdata, REG_MII_EXT_DATA, + REG_MII_EXT_ENABLE_GIGA_POWER_SAVING_FOR_SHORT_CABLE); + + /* CLD */ + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_REG_CLD_REG0); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + REG_MII_EXT_ENABLE_CLD_NP_WP); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_REG_CLD_REG1); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + REG_MII_EXT_ENABLE_CLD_GT_HT_BT); + + /* after change green ethernet & CLD need to soft reset */ + fxgmac_read_ephy_reg(pdata, REG_MII_BMCR, ®val); + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_RESET_POS, PHY_CR_RESET_LEN, + 1); + fxgmac_write_ephy_reg(pdata, REG_MII_BMCR, regval); +} + +static void fxgmac_phy_eee_feature(struct fxgmac_pdata *pdata) +{ + u32 regval = 0; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + DMA_SBMR); + regval = FXGMAC_SET_REG_BITS(regval, DMA_SBMR_EN_LPI_POS, + DMA_SBMR_EN_LPI_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, DMA_SBMR_LPI_XIT_PKT_POS, + DMA_SBMR_LPI_XIT_PKT_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, DMA_SBMR_AALE_POS, + DMA_SBMR_AALE_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + DMA_SBMR); + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_LPI_STA); + regval = FXGMAC_SET_REG_BITS(regval, MAC_LPIATE_POS, MAC_LPIATE_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, MAC_LPITXA_POS, MAC_LPITXA_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, MAC_PLS_POS, MAC_PLS_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, MAC_LPIEN_POS, MAC_LPIEN_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_LPI_STA); + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_LPI_TIMER); + regval = FXGMAC_SET_REG_BITS(regval, MAC_LPIET_POS, MAC_LPIET_LEN, + MAC_LPI_ENTRY_TIMER); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_LPI_TIMER); + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_LPI_CONTROL); + regval = FXGMAC_SET_REG_BITS(regval, MAC_TWT_POS, MAC_TWT_LEN, + MAC_TWT_TIMER); + regval = FXGMAC_SET_REG_BITS(regval, MAC_LST_POS, MAC_LST_LEN, + MAC_LST_TIMER); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_LPI_CONTROL); + + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_MS_TIC_COUNTER); + regval = FXGMAC_SET_REG_BITS(regval, MAC_MS_TIC_POS, MAC_MS_TIC_LEN, + MAC_MS_TIC); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_MS_TIC_COUNTER); + + fxgmac_write_ephy_mmd_reg(pdata, REG_MMD_EEE_ABILITY_REG, 0x07, + REG_MMD_EEE_ABILITY_VALUE); + + /* after change EEE need to soft reset */ + fxgmac_read_ephy_reg(pdata, REG_MII_BMCR, ®val); + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_RESET_POS, PHY_CR_RESET_LEN, + 1); + fxgmac_write_ephy_reg(pdata, REG_MII_BMCR, regval); +} + +static void fxgmac_reset_phy(struct fxgmac_pdata *pdata) +{ + u32 value = 0; + + value = FXGMAC_SET_REG_BITS(value, MGMT_EPHY_CTRL_RESET_POS, + MGMT_EPHY_CTRL_RESET_LEN, + MGMT_EPHY_CTRL_STA_EPHY_RESET); + writereg(pdata->pAdapter, value, pdata->base_mem + MGMT_EPHY_CTRL); + usleep_range_ex(pdata->pAdapter, 1500, 1500); +} + +void fxgmac_release_phy(struct fxgmac_pdata *pdata) +{ + u32 value = 0; + + value = FXGMAC_SET_REG_BITS(value, MGMT_EPHY_CTRL_RESET_POS, + MGMT_EPHY_CTRL_RESET_LEN, + MGMT_EPHY_CTRL_STA_EPHY_RELEASE); + writereg(pdata->pAdapter, value, pdata->base_mem + MGMT_EPHY_CTRL); + usleep_range_ex(pdata->pAdapter, 100, 150); + value = readreg(pdata->pAdapter, pdata->base_mem + MGMT_EPHY_CTRL); + DBGPRINT(MP_LOUD, ("0x1004: 0x%x\n", value)); +#ifdef AISC_MODE + fxgmac_read_ephy_reg(pdata, REG_MII_SPEC_CTRL, + &value); /* read phy specific control */ + value = FXGMAC_SET_REG_BITS(value, PHY_MII_SPEC_CTRL_CRS_ON_POS, + PHY_MII_SPEC_CTRL_CRS_ON_LEN, + 1); /* set on crs on */ + fxgmac_write_ephy_reg(pdata, REG_MII_SPEC_CTRL, + value); /* phy specific control set on crs on */ + + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, REG_MII_EXT_ANALOG_CFG3); + fxgmac_read_ephy_reg(pdata, REG_MII_EXT_DATA, &value); + /* VGA bandwidth, default is 2 after reset. Set to 0 to mitigate unstable issue in 130m. */ + value = FXGMAC_SET_REG_BITS(value, + MII_EXT_ANALOG_CFG3_ADC_START_CFG_POS, + MII_EXT_ANALOG_CFG3_ADC_START_CFG_LEN, + MII_EXT_ANALOG_CFG3_ADC_START_CFG_DEFAULT); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, value); + + fxgmac_efuse_read_data(pdata, EFUSE_LED_ADDR, &value); + /* led index use bit0~bit5 */ + value = FXGMAC_GET_REG_BITS(value, EFUSE_LED_POS, EFUSE_LED_LEN); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, REG_MII_EXT_ANALOG_CFG2); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + REG_MII_EXT_ANALOG_CFG2_LED_VALUE); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, REG_MII_EXT_ANALOG_CFG8); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + REG_MII_EXT_ANALOG_CFG8_LED_VALUE); + + if (EFUSE_LED_COMMON_SOLUTION != value) { + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED0_CFG); + switch (value) { + case EFUSE_LED_SOLUTION1: + fxgmac_write_ephy_reg( + pdata, REG_MII_EXT_DATA, + REG_MII_EXT_COMMON_LED0_CFG_VALUE_SOLUTION1); + break; + case EFUSE_LED_SOLUTION2: + fxgmac_write_ephy_reg( + pdata, REG_MII_EXT_DATA, + REG_MII_EXT_COMMON_LED0_CFG_VALUE_SOLUTION2); + break; + case EFUSE_LED_SOLUTION3: + case EFUSE_LED_SOLUTION4: + fxgmac_write_ephy_reg( + pdata, REG_MII_EXT_DATA, + REG_MII_EXT_COMMON_LED0_CFG_VALUE_SOLUTION3); + break; + default: + /* default solution */ + fxgmac_write_ephy_reg( + pdata, REG_MII_EXT_DATA, + REG_MII_EXT_COMMON_LED0_CFG_VALUE_SOLUTION0); + break; + } + + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED1_CFG); + switch (value) { + case EFUSE_LED_SOLUTION1: + case EFUSE_LED_SOLUTION4: + fxgmac_write_ephy_reg( + pdata, REG_MII_EXT_DATA, + REG_MII_EXT_COMMON_LED1_CFG_VALUE_SOLUTION1); + break; + case EFUSE_LED_SOLUTION2: + case EFUSE_LED_SOLUTION3: + fxgmac_write_ephy_reg( + pdata, REG_MII_EXT_DATA, + REG_MII_EXT_COMMON_LED1_CFG_VALUE_SOLUTION2); + break; + default: + /* default solution */ + fxgmac_write_ephy_reg( + pdata, REG_MII_EXT_DATA, + REG_MII_EXT_COMMON_LED1_CFG_VALUE_SOLUTION0); + break; + } + + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED2_CFG); + switch (value) { + case EFUSE_LED_SOLUTION1: + fxgmac_write_ephy_reg( + pdata, REG_MII_EXT_DATA, + REG_MII_EXT_COMMON_LED2_CFG_VALUE_SOLUTION0); + break; + case EFUSE_LED_SOLUTION2: + fxgmac_write_ephy_reg( + pdata, REG_MII_EXT_DATA, + REG_MII_EXT_COMMON_LED2_CFG_VALUE_SOLUTION2); + break; + case EFUSE_LED_SOLUTION3: + fxgmac_write_ephy_reg( + pdata, REG_MII_EXT_DATA, + REG_MII_EXT_COMMON_LED2_CFG_VALUE_SOLUTION3); + break; + case EFUSE_LED_SOLUTION4: + fxgmac_write_ephy_reg( + pdata, REG_MII_EXT_DATA, + REG_MII_EXT_COMMON_LED2_CFG_VALUE_SOLUTION4); + break; + default: + /* default solution */ + fxgmac_write_ephy_reg( + pdata, REG_MII_EXT_DATA, + REG_MII_EXT_COMMON_LED2_CFG_VALUE_SOLUTION0); + break; + } + + if (EFUSE_LED_SOLUTION2 == value) { + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED_BLINK_CFG); + fxgmac_write_ephy_reg( + pdata, REG_MII_EXT_DATA, + REG_MII_EXT_COMMON_LED_BLINK_CFG_SOLUTION2); + } + } +#endif +} + +static void fxgmac_enable_phy_check(struct fxgmac_pdata *pdata) +{ + u32 value = 0; + + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, REG_MII_EXT_PKG_CFG0); + fxgmac_read_ephy_reg(pdata, REG_MII_EXT_DATA, &value); + value = FXGMAC_SET_REG_BITS(value, REG_MII_EXT_PKG_CHECK_POS, + REG_MII_EXT_PKG_CHECK_LEN, + REG_MII_EXT_PKG_ENABLE_CHECK); + + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, REG_MII_EXT_PKG_CFG0); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, value); +} + +static void fxgmac_disable_phy_check(struct fxgmac_pdata *pdata) +{ + u32 value = 0; + + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, REG_MII_EXT_PKG_CFG0); + fxgmac_read_ephy_reg(pdata, REG_MII_EXT_DATA, &value); + value = FXGMAC_SET_REG_BITS(value, REG_MII_EXT_PKG_CHECK_POS, + REG_MII_EXT_PKG_CHECK_LEN, + REG_MII_EXT_PKG_DISABLE_CHECK); + + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, REG_MII_EXT_PKG_CFG0); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, value); +} + +static void fxgmac_setup_cable_loopback(struct fxgmac_pdata *pdata) +{ + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_SLEEP_CONTROL_REG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + REG_MII_EXT_SLEEP_REG_ENABLE_LOOPBACK); + + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, REG_MII_EXT_LPBK_REG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + REG_MII_EXT_LPBK_REG_ENABLE_LOOPBACK); + + fxgmac_write_ephy_reg(pdata, REG_MII_BMCR, + REG_MII_BMCR_ENABLE_LOOPBACK); +} + +static void fxgmac_clean_cable_loopback(struct fxgmac_pdata *pdata) +{ + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_SLEEP_CONTROL_REG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + REG_MII_EXT_SLEEP_REG_CLEAN_LOOPBACK); + + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, REG_MII_EXT_LPBK_REG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + REG_MII_EXT_LPBK_REG_CLEAN_LOOPBACK); + + fxgmac_write_ephy_reg(pdata, REG_MII_BMCR, + REG_MII_BMCR_DISABLE_LOOPBACK); +} + +static void fxgmac_disable_phy_sleep(struct fxgmac_pdata *pdata) +{ + u32 value = 0; + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_SLEEP_CONTROL_REG); + fxgmac_read_ephy_reg(pdata, REG_MII_EXT_DATA, &value); + + value = FXGMAC_SET_REG_BITS(value, MII_EXT_SLEEP_CONTROL1_EN_POS, + MII_EXT_SLEEP_CONTROL1_EN_LEN, 0); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_SLEEP_CONTROL_REG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, value); +} + +static void fxgmac_enable_phy_sleep(struct fxgmac_pdata *pdata) +{ + u32 value = 0; + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_SLEEP_CONTROL_REG); + fxgmac_read_ephy_reg(pdata, REG_MII_EXT_DATA, &value); + + value = FXGMAC_SET_REG_BITS(value, MII_EXT_SLEEP_CONTROL1_EN_POS, + MII_EXT_SLEEP_CONTROL1_EN_LEN, 1); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_SLEEP_CONTROL_REG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, value); +} + +static void fxgmac_close_phy_led(struct fxgmac_pdata *pdata) +{ + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED0_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, 0x00); + + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED1_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, 0x00); + + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED2_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, 0x00); +} + +static void fxmgac_config_led_under_active(struct fxgmac_pdata *pdata) +{ + u32 regval = 0; + fxgmac_efuse_read_data(pdata, EFUSE_LED_ADDR, ®val); + /* led index use bit0~bit5 */ + regval = FXGMAC_GET_REG_BITS(regval, EFUSE_LED_POS, EFUSE_LED_LEN); + if (EFUSE_LED_COMMON_SOLUTION == regval) { + DbgPrintF(MP_TRACE, "%s >>>", __FUNCTION__); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.s0_led_setting[0]); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED0_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.s0_led_setting[1]); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED1_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.s0_led_setting[2]); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED2_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.s0_led_setting[3]); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED_BLINK_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.s0_led_setting[4]); + } +} + +static void fxgmac_config_led_under_sleep(struct fxgmac_pdata *pdata) +{ + u32 regval = 0; + fxgmac_efuse_read_data(pdata, EFUSE_LED_ADDR, ®val); + /* led index use bit0~bit5 */ + regval = FXGMAC_GET_REG_BITS(regval, EFUSE_LED_POS, EFUSE_LED_LEN); + if (EFUSE_LED_COMMON_SOLUTION == regval) { + DbgPrintF(MP_TRACE, "%s >>>", __FUNCTION__); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.s3_led_setting[0]); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED0_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.s3_led_setting[1]); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED1_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.s3_led_setting[2]); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED2_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.s3_led_setting[3]); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED_BLINK_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.s3_led_setting[4]); + } +} + +static void fxgmac_config_led_under_shutdown(struct fxgmac_pdata *pdata) +{ + u32 regval = 0; + fxgmac_efuse_read_data(pdata, EFUSE_LED_ADDR, ®val); + /* led index use bit0~bit5 */ + regval = FXGMAC_GET_REG_BITS(regval, EFUSE_LED_POS, EFUSE_LED_LEN); + if (EFUSE_LED_COMMON_SOLUTION == regval) { + DbgPrintF(MP_TRACE, "%s >>>", __FUNCTION__); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.s5_led_setting[0]); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED0_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.s5_led_setting[1]); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED1_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.s5_led_setting[2]); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED2_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.s5_led_setting[3]); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED_BLINK_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.s5_led_setting[4]); + } +} + +static void fxgmac_config_led_under_disable(struct fxgmac_pdata *pdata) +{ + u32 regval = 0; + fxgmac_efuse_read_data(pdata, EFUSE_LED_ADDR, ®val); + /* led index use bit0~bit5 */ + regval = FXGMAC_GET_REG_BITS(regval, EFUSE_LED_POS, EFUSE_LED_LEN); + if (EFUSE_LED_COMMON_SOLUTION == regval) { + DbgPrintF(MP_TRACE, "%s >>>", __FUNCTION__); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.disable_led_setting[0]); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED0_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.disable_led_setting[1]); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED1_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.disable_led_setting[2]); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED2_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.disable_led_setting[3]); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED_BLINK_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + pdata->led.disable_led_setting[4]); + } else { + /* http://redmine.motor-comm.com/issues/4101 */ + /* for disable case, reset phy to close LED */ + fxgmac_reset_phy(pdata); + } +} + +extern void fxgmac_diag_get_rx_info(struct fxgmac_channel *channel); + +static int fxgmac_dev_read(struct fxgmac_channel *channel) +{ + struct fxgmac_pdata *pdata = channel->pdata; + struct fxgmac_ring *ring = channel->rx_ring; + struct net_device *netdev = pdata->netdev; + struct fxgmac_desc_data *desc_data; + struct fxgmac_dma_desc *dma_desc; + struct fxgmac_pkt_info *pkt_info; + unsigned int err, etlt, l34t; + + static unsigned int cnt_incomplete; + + desc_data = FXGMAC_GET_DESC_DATA(ring, ring->cur); + dma_desc = desc_data->dma_desc; + pkt_info = &ring->pkt_info; + + /* Check for data availability */ + if (FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_OWN_POS, + RX_NORMAL_DESC3_OWN_LEN)) { + return 1; + } + + /* Make sure descriptor fields are read after reading the OWN bit */ + dma_rmb(); + + if (netif_msg_rx_status(pdata)) + fxgmac_dump_rx_desc(pdata, ring, ring->cur); + + if (FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_CTXT_POS, + RX_NORMAL_DESC3_CTXT_LEN)) { + /* Timestamp Context Descriptor */ + fxgmac_get_rx_tstamp(pkt_info, dma_desc); + + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, RX_PACKET_ATTRIBUTES_CONTEXT_POS, + RX_PACKET_ATTRIBUTES_CONTEXT_LEN, 1); + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS, + RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN, 0); + if (netif_msg_rx_status(pdata)) + DPRINTK("dev_read context desc, ch=%s\n", channel->name); + return 0; + } + + /* Normal Descriptor, be sure Context Descriptor bit is off */ + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, RX_PACKET_ATTRIBUTES_CONTEXT_POS, + RX_PACKET_ATTRIBUTES_CONTEXT_LEN, 0); + + /* Get the header length */ + if (FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_FD_POS, + RX_NORMAL_DESC3_FD_LEN)) { + desc_data->rx.hdr_len = FXGMAC_GET_REG_BITS_LE( + dma_desc->desc2, RX_NORMAL_DESC2_HL_POS, + RX_NORMAL_DESC2_HL_LEN); + if (desc_data->rx.hdr_len) + pdata->stats.rx_split_header_packets++; + } + l34t = 0; + + /* Get the pkt_info length */ + desc_data->rx.len = FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, + RX_NORMAL_DESC3_PL_POS, + RX_NORMAL_DESC3_PL_LEN); + + if (!FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_LD_POS, + RX_NORMAL_DESC3_LD_LEN)) { + /* Not all the data has been transferred for this pkt_info */ + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_INCOMPLETE_POS, + RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN, 1); + cnt_incomplete++; + if ((cnt_incomplete < 2) && netif_msg_rx_status(pdata)) + DPRINTK("dev_read NOT last desc, pkt incomplete yet,%u\n", + cnt_incomplete); + + return 0; + } + if ((cnt_incomplete) && netif_msg_rx_status(pdata)) + DPRINTK("dev_read rx back to normal and incomplete cnt=%u\n", + cnt_incomplete); + cnt_incomplete = 0; /* when back to normal, reset cnt */ + + /* This is the last of the data for this pkt_info */ + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, RX_PACKET_ATTRIBUTES_INCOMPLETE_POS, + RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN, 0); + + /* Set checksum done indicator as appropriate */ + if (netdev->features & NETIF_F_RXCSUM) + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_CSUM_DONE_POS, + RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN, 1); + + /* Check for errors (only valid in last descriptor) */ + err = FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_ES_POS, + RX_NORMAL_DESC3_ES_LEN); + etlt = FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_ETLT_POS, + RX_NORMAL_DESC3_ETLT_LEN); + if ((err) && netif_msg_rx_status(pdata)) { + DPRINTK("dev_read:head_len=%u, pkt_len=%u, err=%u, etlt=%#x, desc2=0x%08x, desc3=0x%08x\n", + desc_data->rx.hdr_len, desc_data->rx.len, err, etlt, + dma_desc->desc2, dma_desc->desc3); + } + + if (!err || !etlt) { + /* No error if err is 0 or etlt is 0 */ + if ((etlt == 0x4 /*yzhang changed to 0x4, 0x09*/) && + (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) { + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, + RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN, 1); + pkt_info->vlan_ctag = FXGMAC_GET_REG_BITS_LE( + dma_desc->desc0, RX_NORMAL_DESC0_OVT_POS, + RX_NORMAL_DESC0_OVT_LEN); + netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n", + pkt_info->vlan_ctag); + } + } else { + if (etlt == 0x05 || etlt == 0x06) + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_CSUM_DONE_POS, + RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN, 0); + else + pkt_info->errors = FXGMAC_SET_REG_BITS( + pkt_info->errors, RX_PACKET_ERRORS_FRAME_POS, + RX_PACKET_ERRORS_FRAME_LEN, 1); + } + + return 0; +} + +static int fxgmac_enable_int(struct fxgmac_channel *channel, + enum fxgmac_int int_id) +{ + unsigned int dma_ch_ier; + + dma_ch_ier = readreg(channel->pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_IER)); + + switch (int_id) { + case FXGMAC_INT_DMA_CH_SR_TI: + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, DMA_CH_IER_TIE_POS, + DMA_CH_IER_TIE_LEN, 1); + break; + case FXGMAC_INT_DMA_CH_SR_TPS: + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, + DMA_CH_IER_TXSE_POS, + DMA_CH_IER_TXSE_LEN, 1); + break; + case FXGMAC_INT_DMA_CH_SR_TBU: + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, + DMA_CH_IER_TBUE_POS, + DMA_CH_IER_TBUE_LEN, 1); + break; + case FXGMAC_INT_DMA_CH_SR_RI: + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, DMA_CH_IER_RIE_POS, + DMA_CH_IER_RIE_LEN, 1); + break; + case FXGMAC_INT_DMA_CH_SR_RBU: + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, + DMA_CH_IER_RBUE_POS, + DMA_CH_IER_RBUE_LEN, 1); + break; + case FXGMAC_INT_DMA_CH_SR_RPS: + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, DMA_CH_IER_RSE_POS, + DMA_CH_IER_RSE_LEN, 1); + break; + case FXGMAC_INT_DMA_CH_SR_TI_RI: + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, DMA_CH_IER_TIE_POS, + DMA_CH_IER_TIE_LEN, 1); + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, DMA_CH_IER_RIE_POS, + DMA_CH_IER_RIE_LEN, 1); + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, DMA_CH_IER_NIE_POS, + DMA_CH_IER_NIE_LEN, 1); + break; + case FXGMAC_INT_DMA_CH_SR_FBE: + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, + DMA_CH_IER_FBEE_POS, + DMA_CH_IER_FBEE_LEN, 1); + break; + case FXGMAC_INT_DMA_ALL: + dma_ch_ier |= channel->saved_ier; + break; + default: + return -1; + } + + writereg(channel->pdata->pAdapter, dma_ch_ier, + FXGMAC_DMA_REG(channel, DMA_CH_IER)); + + return 0; +} + +static int fxgmac_disable_int(struct fxgmac_channel *channel, + enum fxgmac_int int_id) +{ + unsigned int dma_ch_ier; + + dma_ch_ier = readreg(channel->pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_IER)); + + switch (int_id) { + case FXGMAC_INT_DMA_CH_SR_TI: + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, DMA_CH_IER_TIE_POS, + DMA_CH_IER_TIE_LEN, 0); + break; + case FXGMAC_INT_DMA_CH_SR_TPS: + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, + DMA_CH_IER_TXSE_POS, + DMA_CH_IER_TXSE_LEN, 0); + break; + case FXGMAC_INT_DMA_CH_SR_TBU: + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, + DMA_CH_IER_TBUE_POS, + DMA_CH_IER_TBUE_LEN, 0); + break; + case FXGMAC_INT_DMA_CH_SR_RI: + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, DMA_CH_IER_RIE_POS, + DMA_CH_IER_RIE_LEN, 0); + break; + case FXGMAC_INT_DMA_CH_SR_RBU: + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, + DMA_CH_IER_RBUE_POS, + DMA_CH_IER_RBUE_LEN, 0); + break; + case FXGMAC_INT_DMA_CH_SR_RPS: + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, DMA_CH_IER_RSE_POS, + DMA_CH_IER_RSE_LEN, 0); + break; + case FXGMAC_INT_DMA_CH_SR_TI_RI: + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, DMA_CH_IER_TIE_POS, + DMA_CH_IER_TIE_LEN, 0); + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, DMA_CH_IER_RIE_POS, + DMA_CH_IER_RIE_LEN, 0); + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, DMA_CH_IER_NIE_POS, + DMA_CH_IER_NIE_LEN, 0); + break; + case FXGMAC_INT_DMA_CH_SR_FBE: + dma_ch_ier = FXGMAC_SET_REG_BITS(dma_ch_ier, + DMA_CH_IER_FBEE_POS, + DMA_CH_IER_FBEE_LEN, 0); + break; + case FXGMAC_INT_DMA_ALL: + channel->saved_ier = dma_ch_ier & FXGMAC_DMA_INTERRUPT_MASK; + dma_ch_ier &= ~FXGMAC_DMA_INTERRUPT_MASK; + break; + default: + return -1; + } + + writereg(channel->pdata->pAdapter, dma_ch_ier, + FXGMAC_DMA_REG(channel, DMA_CH_IER)); + + return 0; +} + +static int fxgmac_dismiss_DMA_int(struct fxgmac_channel *channel, int int_id) +{ + unsigned int dma_ch_ier; + + int_id = int_id; + dma_ch_ier = readreg(channel->pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_SR /*1160*/)); + writereg(channel->pdata->pAdapter, dma_ch_ier, + FXGMAC_DMA_REG(channel, DMA_CH_SR)); + + return 0; +} + +static void fxgmac_dismiss_MTL_Q_int(struct fxgmac_pdata *pdata) +{ + unsigned int q_count, i; + unsigned int mtl_q_isr; + + q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); + for (i = 0; i < q_count; i++) { + /* Clear all the interrupts which are set */ + mtl_q_isr = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_ISR)); + writereg(pdata->pAdapter, mtl_q_isr, + FXGMAC_MTL_REG(pdata, i, MTL_Q_ISR)); + } +} + +static int fxgmac_dismiss_MAC_int(struct fxgmac_pdata *pdata) +{ + u32 regval, regErrVal; + + /* all MAC interrupts in 0xb0 */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_ISR); + /* MAC tx/rx error interrupts in 0xb8 */ + regErrVal = readreg(pdata->pAdapter, pdata->mac_regs + MAC_TX_RX_STA); + return 0; +} + +static int fxgmac_dismiss_MAC_PMT_int(struct fxgmac_pdata *pdata) +{ + u32 regval; + + /* MAC PMT interrupts in 0xc0 */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PMT_STA); + return 0; +} + +static int fxgmac_dismiss_MAC_LPI_int(struct fxgmac_pdata *pdata) +{ + u32 regval; + + /* MAC PMT interrupts in 0xc0 */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_LPI_STA); + + return 0; +} + +static int fxgmac_dismiss_MAC_DBG_int(struct fxgmac_pdata *pdata) +{ + u32 regval; + + /* MAC PMT interrupts in 0xc0 */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_DBG_STA); + + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_DBG_STA); + + return 0; +} + +int fxgmac_dismiss_all_int(struct fxgmac_pdata *pdata) +{ + struct fxgmac_channel *channel; + unsigned int i, regval; + struct net_device *netdev = pdata->netdev; + + if (netif_msg_drv(pdata)) { + DPRINTK("fxgmac_dismiss_all_int callin\n"); + } + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + fxgmac_dismiss_DMA_int(channel, 0); + } + fxgmac_dismiss_MTL_Q_int(pdata); + fxgmac_dismiss_MAC_int(pdata); + fxgmac_dismiss_MAC_PMT_int(pdata); + fxgmac_dismiss_MAC_LPI_int(pdata); + fxgmac_dismiss_MAC_DBG_int(pdata); + + /* control module int to PCIe slot */ + if (netdev->base_addr) { + regval = (unsigned int)(*( + (u32 *)(netdev->base_addr + MGMT_INT_CTRL0))); + } + return 0; +} + +static void fxgmac_set_interrupt_moderation(struct fxgmac_pdata *pdata) +{ + u32 value = 0, time; + + pdata->intr_mod_timer = INT_MOD_IN_US; + + time = (pdata->intr_mod) ? pdata->intr_mod_timer : 0; + time = (pdata->intr_mod) ? pdata->tx_usecs : 0; + value = FXGMAC_SET_REG_BITS(value, INT_MOD_TX_POS, INT_MOD_TX_LEN, + time); + time = (pdata->intr_mod) ? pdata->rx_usecs : 0; + value = FXGMAC_SET_REG_BITS(value, INT_MOD_RX_POS, INT_MOD_RX_LEN, + time); + writereg(pdata->pAdapter, value, pdata->base_mem + INT_MOD); +} +static void fxgmac_enable_msix_rxtxinterrupt(struct fxgmac_pdata *pdata) +{ + u32 intid; + + for (intid = 0; intid < MSIX_TBL_RXTX_NUM; intid++) { + writereg(pdata->pAdapter, 0, + pdata->base_mem + MSIX_TBL_BASE_ADDR + + MSIX_TBL_MASK_OFFSET + intid * 16); + } +} +static void fxgmac_disable_msix_interrupt(struct fxgmac_pdata *pdata) +{ + u32 intid; + + for (intid = 0; intid < MSIX_TBL_MAX_NUM; intid++) { + writereg(pdata->pAdapter, 0x1, + pdata->base_mem + MSIX_TBL_BASE_ADDR + + MSIX_TBL_MASK_OFFSET + intid * 16); + } +} +static void fxgmac_enable_msix_rxtxphyinterrupt(struct fxgmac_pdata *pdata) +{ + u32 intid, regval = 0; +#if !(FUXI_EPHY_INTERRUPT_D0_OFF) + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; +#endif + + for (intid = 0; intid < MSIX_TBL_RXTX_NUM; intid++) { + writereg(pdata->pAdapter, 0, + pdata->base_mem + MSIX_TBL_BASE_ADDR + + MSIX_TBL_MASK_OFFSET + intid * 16); + } + writereg(pdata->pAdapter, 0, + pdata->base_mem + MSIX_TBL_BASE_ADDR + MSIX_TBL_MASK_OFFSET + + MSI_ID_PHY_OTHER * 16); +#if !(FUXI_EPHY_INTERRUPT_D0_OFF) + hw_ops->read_ephy_reg(pdata, REG_MII_INT_STATUS, + NULL); /* clear phy interrupt */ + regval = FXGMAC_SET_REG_BITS(0, PHY_INT_MASK_LINK_UP_POS, + PHY_INT_MASK_LINK_UP_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, PHY_INT_MASK_LINK_DOWN_POS, + PHY_INT_MASK_LINK_DOWN_LEN, 1); + hw_ops->write_ephy_reg( + pdata, REG_MII_INT_MASK, + regval); /* enable phy interrupt ASIC bit10 linkup bit11 linkdown */ +#endif +} +static void fxgmac_enable_msix_one_interrupt(struct fxgmac_pdata *pdata, + u32 intid) +{ + writereg(pdata->pAdapter, 0, + pdata->base_mem + MSIX_TBL_BASE_ADDR + MSIX_TBL_MASK_OFFSET + + intid * 16); +} + +static void fxgmac_disable_msix_one_interrupt(struct fxgmac_pdata *pdata, + u32 intid) +{ + writereg(pdata->pAdapter, 0x01, + pdata->base_mem + MSIX_TBL_BASE_ADDR + MSIX_TBL_MASK_OFFSET + + intid * 16); +} + +static bool fxgmac_enable_mgm_interrupt(struct fxgmac_pdata *pdata) +{ + writereg(pdata->pAdapter, 0xf0000000, pdata->base_mem + MGMT_INT_CTRL0); + return true; +} + +static bool fxgmac_disable_mgm_interrupt(struct fxgmac_pdata *pdata) +{ + writereg(pdata->pAdapter, 0xffff0000, pdata->base_mem + MGMT_INT_CTRL0); + return true; +} + +static int fxgmac_flush_tx_queues(struct fxgmac_pdata *pdata) +{ + unsigned int i, count; + u32 regval; + + for (i = 0; i < pdata->tx_q_count; i++) { + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_FTQ_POS, + MTL_Q_TQOMR_FTQ_LEN, 1); + writereg(pdata->pAdapter, regval, + FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + DPRINTK("fxgmac_flush_tx_queues, reg=0x%p, val=0x%08x\n", + FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR), regval); + } + + for (i = 0; i < pdata->tx_q_count; i++) { + count = 2000; + do { + usleep_range_ex(pdata->pAdapter, 40, 50); + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + regval = FXGMAC_GET_REG_BITS(regval, + MTL_Q_TQOMR_FTQ_POS, + MTL_Q_TQOMR_FTQ_LEN); + + } while (--count && regval); + DPRINTK("fxgmac_flush_tx_queues wait... reg=0x%p, val=0x%08x\n", + FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR), regval); + if (regval) { /*(!count)*/ + return -EBUSY; + } + } + + return 0; +} + +static void fxgmac_config_dma_bus(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->mac_regs + DMA_SBMR); + /* Set enhanced addressing mode */ + regval = FXGMAC_SET_REG_BITS(regval, DMA_SBMR_EAME_POS, + DMA_SBMR_EAME_LEN, 1); + /* Set the System Bus mode */ + regval = FXGMAC_SET_REG_BITS(regval, DMA_SBMR_FB_POS, DMA_SBMR_FB_LEN, + 0); + regval = FXGMAC_SET_REG_BITS(regval, DMA_SBMR_BLEN_4_POS, + DMA_SBMR_BLEN_4_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, DMA_SBMR_BLEN_8_POS, + DMA_SBMR_BLEN_8_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, DMA_SBMR_BLEN_16_POS, + DMA_SBMR_BLEN_16_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, DMA_SBMR_BLEN_32_POS, + DMA_SBMR_BLEN_32_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + DMA_SBMR); +} + +static void fxgmac_legacy_link_speed_setting(struct fxgmac_pdata *pdata) +{ + unsigned int i = 0, regval = 0; + + fxgmac_phy_config(pdata); + for (i = 0, regval = fxgmac_get_ephy_state(pdata); + (!(regval & MGMT_EPHY_CTRL_STA_EPHY_RELEASE) || + !(regval & MGMT_EPHY_CTRL_STA_EPHY_LINKUP)) && + (i < PHY_LINK_TIMEOUT); + regval = fxgmac_get_ephy_state(pdata), i++) { + usleep_range_ex(pdata->pAdapter, 2000, 2000); + } + fxgmac_read_ephy_reg(pdata, REG_MII_INT_STATUS, + NULL); /* clear phy interrupt. */ +} + +static void fxgmac_pre_powerdown(struct fxgmac_pdata *pdata, bool phyloopback) +{ + unsigned int regval = 0; + + fxgmac_disable_rx(pdata); + + /* HERE, WE NEED TO CONSIDER PHY CONFIG...TBD */ + DPRINTK("fxgmac_config_powerdown, phy and mac status update\n"); + /* for phy cable loopback, it can't configure phy speed, it will cause os resume again by link change although it has finished speed setting, */ + if (!phyloopback) { + /* When the Linux platform enters the s4 state, it goes through + * the suspend->resume->suspend process. The process of + * suspending again after resume is fast, and PHY + * auto-negotiation is not yet complete, so the + * auto-negotiation of PHY must be carried out again. When the + * Linux platform enters the s4 state, force speed to 10M. + */ + pdata->phy_speed = SPEED_10; + fxgmac_legacy_link_speed_setting(pdata); + } + + fxgmac_config_mac_speed(pdata); + + /* After enable OOB_WOL from efuse, mac will loopcheck phy status, and + * lead to panic sometimes. So we should disable it from powerup, + * enable it from power down. + */ + regval = (u32)readreg(pdata->pAdapter, pdata->base_mem + OOB_WOL_CTRL); + regval = FXGMAC_SET_REG_BITS(regval, OOB_WOL_CTRL_DIS_POS, + OOB_WOL_CTRL_DIS_LEN, 0); + writereg(pdata->pAdapter, regval, pdata->base_mem + OOB_WOL_CTRL); + usleep_range_ex(pdata->pAdapter, 2000, 2000); + + /* after enable OOB_WOL, recofigure mac addr again */ + fxgmac_set_mac_address(pdata, pdata->mac_addr); +} + +/* only supports four patterns, and patterns will be cleared on every call */ +static void fxgmac_set_pattern_data(struct fxgmac_pdata *pdata) +{ + u32 ip_addr, i = 0; + u8 type_offset, op_offset, tip_offset; + struct pattern_packet packet; + struct wol_bitmap_pattern + pattern[4]; /* for WAKE_UCAST, WAKE_BCAST, WAKE_MCAST, WAKE_ARP. */ + + memset(pattern, 0, sizeof(struct wol_bitmap_pattern) * 4); + + /* config ucast */ + if (pdata->expansion.wol & WAKE_UCAST) { + pattern[i].mask_info[0] = 0x3F; + pattern[i].mask_size = sizeof(pattern[0].mask_info); + memcpy(pattern[i].pattern_info, pdata->mac_addr, ETH_ALEN); + pattern[i].pattern_offset = 0; + i++; + } + + /* config bcast */ + if (pdata->expansion.wol & WAKE_BCAST) { + pattern[i].mask_info[0] = 0x3F; + pattern[i].mask_size = sizeof(pattern[0].mask_info); + memset(pattern[i].pattern_info, 0xFF, ETH_ALEN); + pattern[i].pattern_offset = 0; + i++; + } + + /* config mcast */ + if (pdata->expansion.wol & WAKE_MCAST) { + pattern[i].mask_info[0] = 0x7; + pattern[i].mask_size = sizeof(pattern[0].mask_info); + pattern[i].pattern_info[0] = 0x1; + pattern[i].pattern_info[1] = 0x0; + pattern[i].pattern_info[2] = 0x5E; + pattern[i].pattern_offset = 0; + i++; + } + + /* config arp */ + if (pdata->expansion.wol & WAKE_ARP) { + memset(pattern[i].mask_info, 0, sizeof(pattern[0].mask_info)); + type_offset = offsetof(struct pattern_packet, ar_pro); + pattern[i].mask_info[type_offset / 8] |= 1 << type_offset % 8; + type_offset++; + pattern[i].mask_info[type_offset / 8] |= 1 << type_offset % 8; + op_offset = offsetof(struct pattern_packet, ar_op); + pattern[i].mask_info[op_offset / 8] |= 1 << op_offset % 8; + op_offset++; + pattern[i].mask_info[op_offset / 8] |= 1 << op_offset % 8; + tip_offset = offsetof(struct pattern_packet, ar_tip); + pattern[i].mask_info[tip_offset / 8] |= 1 << tip_offset % 8; + tip_offset++; + pattern[i].mask_info[tip_offset / 8] |= 1 << type_offset % 8; + tip_offset++; + pattern[i].mask_info[tip_offset / 8] |= 1 << type_offset % 8; + tip_offset++; + pattern[i].mask_info[tip_offset / 8] |= 1 << type_offset % 8; + + packet.ar_pro = + 0x0 << 8 | + 0x08; /* arp type is 0x0800, notice that ar_pro and ar_op is big endian */ + packet.ar_op = + 0x1 + << 8; /* 1 is arp request,2 is arp replay, 3 is rarp request, 4 is rarp replay */ + ip_addr = fxgmac_get_netdev_ip4addr(pdata); + packet.ar_tip[0] = ip_addr & 0xFF; + packet.ar_tip[1] = (ip_addr >> 8) & 0xFF; + packet.ar_tip[2] = (ip_addr >> 16) & 0xFF; + packet.ar_tip[3] = (ip_addr >> 24) & 0xFF; + memcpy(pattern[i].pattern_info, &packet, MAX_PATTERN_SIZE); + pattern[i].mask_size = sizeof(pattern[0].mask_info); + pattern[i].pattern_offset = 0; + i++; + } + + fxgmac_set_wake_pattern(pdata, pattern, i); +} + +static void fxgmac_config_powerdown(struct fxgmac_pdata *pdata, + unsigned int wol) +{ + u32 regval = 0; + + fxgmac_disable_tx(pdata); + fxgmac_disable_rx(pdata); + + /* performs fxgmac power down sequence + * 1. set led + * 2. check wol. + * 3. check arp offloading + * 4. disable gmac rx + * 5. set gmac power down + */ + + /* Close LED when entering the S3, S4, S5 except solution3 */ + fxgmac_efuse_read_data(pdata, EFUSE_LED_ADDR, ®val); + /* led index use bit0~bit5 */ + regval = FXGMAC_GET_REG_BITS(regval, EFUSE_LED_POS, EFUSE_LED_LEN); + if (EFUSE_LED_COMMON_SOLUTION != regval) { + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED0_CFG); + if (EFUSE_LED_SOLUTION3 == regval) { + fxgmac_write_ephy_reg( + pdata, REG_MII_EXT_DATA, + REG_MII_EXT_COMMON_LED0_CFG_VALUE_SLEEP_SOLUTION3); + } else { + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, 0x00); + } + + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED1_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, 0x00); + + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_COMMON_LED2_CFG); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, 0x00); + } + + if (!test_bit(FXGMAC_POWER_STATE_DOWN, &pdata->expansion.powerstate)) { + netdev_err( + pdata->netdev, + "fxgmac powerstate is %lu when config power to down.\n", + pdata->expansion.powerstate); + } + +#if FXGMAC_WOL_FEATURE_ENABLED + fxgmac_config_wol(pdata, wol); +#endif +#if FXGMAC_AOE_FEATURE_ENABLED + /* use default arp offloading feature */ + fxgmac_update_aoe_ipv4addr(pdata, (u8 *)NULL); + fxgmac_enable_arp_offload(pdata); +#endif + +#if FXGMAC_NS_OFFLOAD_ENABLED + /* pls do not change the seq below */ + fxgmac_update_ns_offload_ipv6addr(pdata, FXGMAC_NS_IFA_GLOBAL_UNICAST); + fxgmac_update_ns_offload_ipv6addr(pdata, FXGMAC_NS_IFA_LOCAL_LINK); + fxgmac_enable_ns_offload(pdata); +#endif + + /* Enable MAC Rx TX */ + if (1) { + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_RE_POS, + MAC_CR_RE_LEN, 1); + if (pdata->hw_feat.aoe) { + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_TE_POS, + MAC_CR_TE_LEN, 1); + } + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); + } + + regval = readreg(pdata->pAdapter, pdata->base_mem + LPW_CTRL); + regval = FXGMAC_SET_REG_BITS(regval, LPW_CTRL_ASPM_LPW_EN_POS, + LPW_CTRL_ASPM_LPW_EN_LEN, + 1); /* Enable PCIE PM_L23. */ + + writereg(pdata->pAdapter, regval, pdata->base_mem + LPW_CTRL); + + /* set gmac power down */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PMT_STA); + regval = FXGMAC_SET_REG_BITS(regval, MAC_PMT_STA_PWRDWN_POS, + MAC_PMT_STA_PWRDWN_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PMT_STA); + + /* adjust sigdet threshold + * redmine.motor-comm.com/issues/5093 + * fix issue can not wake up os on some FT-D2000 platform, y + * this modification is only temporarif it is 55mv, wol maybe failed. + */ + + regval = readreg(pdata->pAdapter, pdata->base_mem + MGMT_SIGDET); + regval = FXGMAC_SET_REG_BITS(regval, MGMT_SIGDET_POS, MGMT_SIGDET_LEN, + MGMT_SIGDET_40MV); + writereg(pdata->pAdapter, regval, pdata->base_mem + MGMT_SIGDET); + DPRINTK("fxgmac_config_powerdown callout, reg=0x%08x\n", regval); +} + +static void fxgmac_config_powerup(struct fxgmac_pdata *pdata) +{ + u32 regval = 0; + + if (test_bit(FXGMAC_POWER_STATE_DOWN, &pdata->expansion.powerstate)) { + netdev_err( + pdata->netdev, + "fxgmac powerstate is %lu when config power to up.\n", + pdata->expansion.powerstate); + } + + /* After enable OOB_WOL from efuse, mac will loopcheck phy status, and lead to panic sometimes. + * So we should disable it from powerup, enable it from power down. + */ + regval = (u32)readreg(pdata->pAdapter, pdata->base_mem + OOB_WOL_CTRL); + regval = FXGMAC_SET_REG_BITS(regval, OOB_WOL_CTRL_DIS_POS, + OOB_WOL_CTRL_DIS_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->base_mem + OOB_WOL_CTRL); + + /* clear wpi mode whether or not waked by WOL, write reset value */ + regval = + (u32)readreg(pdata->pAdapter, pdata->base_mem + MGMT_WPI_CTRL0); + + regval = FXGMAC_SET_REG_BITS(regval, MGMT_WPI_CTRL0_WPI_MODE_POS, + MGMT_WPI_CTRL0_WPI_MODE_LEN, 0); + writereg(pdata->pAdapter, regval, pdata->base_mem + MGMT_WPI_CTRL0); + /* read pmt_status register to De-assert the pmt_intr_o */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PMT_STA); + /* wether or not waked up by WOL, write reset value */ + regval = FXGMAC_SET_REG_BITS(regval, MAC_PMT_STA_PWRDWN_POS, + MAC_PMT_STA_PWRDWN_LEN, 0); + /* write register to synchronized always-on block */ + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PMT_STA); + + /* Disable fast link mode*/ + cfg_r32(pdata, REG_POWER_EIOS, ®val); + regval = FXGMAC_SET_REG_BITS(regval, POWER_EIOS_POS, POWER_EIOS_LEN, 0); + cfg_w32(pdata, REG_POWER_EIOS, regval); + + fxgmac_pwr_clock_gate(pdata); +} + +#if FXGMAC_SANITY_CHECK_ENABLED +/* + * fxgmac_diag_sanity_check + * check if there is any error like tx q hang + * return: 0 normal and other fatal error + */ +static int fxgmac_diag_sanity_check(struct fxgmac_pdata *pdata) +{ + u32 reg_q_val, reg_tail_val; + static u32 reg_tail_pre; + static int cnt; + + reg_q_val = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, 0 /* tx channe 0 */, + 0x8 /* 0x2d08 */)); + if (!(reg_q_val & 0x10)) { /* tx q is empty */ + return 0; + } + reg_tail_val = + readreg(pdata->pAdapter, + FXGMAC_DMA_REG(pdata->channel_head, DMA_CH_TDTR_LO)); + if (reg_tail_pre != reg_tail_val) { + reg_tail_pre = reg_tail_val; + cnt = 0; + } else { + cnt++; + } + + if (cnt > 10) { + reg_q_val = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, 0 /* tx channe 0 */, + 0x8 /* 0x2d08 */)); + if (reg_q_val & 0x10) { /* double check */ + DPRINTK("fxgmac, WARNing, tx Q status is 0x%x and tail keeps unchanged for %d times, 0x%x\n", + reg_q_val, cnt, reg_tail_val); + return 1; + } + } + + return 0; +} +#endif +static void fxgmac_pwr_clock_gate(struct fxgmac_pdata *pdata) +{ + u32 regval = 0; + + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_SLEEP_CONTROL1); + fxgmac_read_ephy_reg(pdata, REG_MII_EXT_DATA, ®val); + /* close pll in sleep mode */ + regval = FXGMAC_SET_REG_BITS(regval, + MII_EXT_SLEEP_CONTROL1_PLLON_IN_SLP_POS, + MII_EXT_SLEEP_CONTROL1_PLLON_IN_SLP_LEN, + 0); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, regval); +} +static void fxgmac_pwr_clock_ungate(struct fxgmac_pdata *pdata) +{ + u32 regval = 0; + + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_SLEEP_CONTROL1); + fxgmac_read_ephy_reg(pdata, REG_MII_EXT_DATA, ®val); + /* keep pll in sleep mode */ + regval = FXGMAC_SET_REG_BITS(regval, + MII_EXT_SLEEP_CONTROL1_PLLON_IN_SLP_POS, + MII_EXT_SLEEP_CONTROL1_PLLON_IN_SLP_LEN, + 1); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, regval); +} + +/* context - pointer to struct nic_pdata. */ +static unsigned char fxgmac_suspend_int(void *context) +{ + /* ULONG_PTR addr; */ + u32 intid; +#if FUXI_EPHY_INTERRUPT_D0_OFF + u32 regval = 0; +#endif + u32 val_mgmt_intcrtl0; + struct fxgmac_pdata *pdata = (struct fxgmac_pdata *)context; + + val_mgmt_intcrtl0 = + (u32)readreg(pdata->pAdapter, pdata->base_mem + MGMT_INT_CTRL0); + /* disable management interrupts. enable only pmt interrupts. */ + val_mgmt_intcrtl0 = FXGMAC_SET_REG_BITS(val_mgmt_intcrtl0, + MGMT_INT_CTRL0_INT_MASK_POS, + MGMT_INT_CTRL0_INT_MASK_LEN, + MGMT_INT_CTRL0_INT_MASK_EX_PMT); + writereg(pdata->pAdapter, val_mgmt_intcrtl0, + pdata->base_mem + MGMT_INT_CTRL0); + + for (intid = 0; intid < MSIX_TBL_MAX_NUM; + intid++) { /* disable all msix */ + writereg(pdata->pAdapter, 0x1, + pdata->base_mem + MSIX_TBL_BASE_ADDR + + MSIX_TBL_MASK_OFFSET + intid * 16); + } + + /* enable pmt msix */ + writereg(pdata->pAdapter, 0x0, + pdata->base_mem + MSIX_TBL_BASE_ADDR + MSIX_TBL_MASK_OFFSET + + MSI_ID_PHY_OTHER * 16); + readreg(pdata->pAdapter, + pdata->base_mem + + MGMT_WOL_CTRL); /* read clear wake up reason */ + /* since Msix interrupt masked now, enable EPHY interrupt for case of link change wakeup */ + fxgmac_read_ephy_reg(pdata, REG_MII_INT_STATUS, + NULL); /* clear phy interrupt */ +#if FUXI_EPHY_INTERRUPT_D0_OFF + regval = FXGMAC_SET_REG_BITS(0, PHY_INT_MASK_LINK_UP_POS, + PHY_INT_MASK_LINK_UP_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, PHY_INT_MASK_LINK_DOWN_POS, + PHY_INT_MASK_LINK_DOWN_LEN, 1); + fxgmac_write_ephy_reg(pdata, REG_MII_INT_MASK, + regval); /* enable phy interrupt */ +#endif + + return true; +} +static int fxgmac_suspend_txrx(struct fxgmac_pdata *pdata) +{ + struct fxgmac_channel *channel; + unsigned int i; + u32 regval; + int busy = 15; + /* Prepare for Tx DMA channel stop */ + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) { + break; + } + fxgmac_prepare_tx_stop(pdata, channel); + } + + /* Disable each Tx DMA channel */ + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) { + break; + } + + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_TCR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS, + DMA_CH_TCR_ST_LEN, 0); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_TCR)); + DBGPRINT(MP_TRACE, (" %s disable tx dma", __FUNCTION__)); + } + + do { + regval = + readreg(pdata->pAdapter, pdata->mac_regs + MAC_DBG_STA); + busy--; + } while ((regval & MAC_DBG_STA_TX_BUSY) && (busy)); + + if (0 != (regval & MAC_DBG_STA_TX_BUSY)) { + regval = + readreg(pdata->pAdapter, pdata->mac_regs + MAC_DBG_STA); + DbgPrintF(MP_WARN, + "warning !!!timed out waiting for Tx MAC to stop\n"); + return -1; + } + /* wait empty Tx queue */ + for (i = 0; i < pdata->tx_q_count; i++) { + do { + regval = readreg(pdata->pAdapter, + FXGMAC_MTL_REG(pdata, i, MTL_TXQ_DEG)); + busy--; + } while ((regval & MTL_TXQ_DEG_TX_BUSY) && (busy)); + if (0 != (regval & MTL_TXQ_DEG_TX_BUSY)) { + regval = readreg(pdata->pAdapter, + pdata->mac_regs + MTL_TXQ_DEG); + DbgPrintF( + MP_WARN, + "warning !!!timed out waiting for tx queue %u to empty\n", + i); + return -1; + } + } + + /* Disable MAC TxRx */ + regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_TE_POS, MAC_CR_TE_LEN, 0); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_RE_POS, MAC_CR_RE_LEN, 0); + writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); + + /* Prepare for Rx DMA channel stop */ + for (i = 0; i < pdata->rx_q_count; i++) { + fxgmac_prepare_rx_stop(pdata, i); + } + /* Disable each Rx DMA channel */ + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->rx_ring) { + break; + } + + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_RCR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS, + DMA_CH_RCR_SR_LEN, 0); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_RCR)); + DBGPRINT(MP_TRACE, (" %s disable rx dma", __FUNCTION__)); + } + return 0; +} +static void fxgmac_resume_int(struct fxgmac_pdata *pdata) +{ + u32 intid, regval = 0; + u32 val_mgmt_intcrtl0; + + val_mgmt_intcrtl0 = + (u32)readreg(pdata->pAdapter, pdata->base_mem + MGMT_INT_CTRL0); + /* disable management interrupts. enable only pmt interrupts. */ + val_mgmt_intcrtl0 = FXGMAC_SET_REG_BITS( + val_mgmt_intcrtl0, MGMT_INT_CTRL0_INT_MASK_POS, + MGMT_INT_CTRL0_INT_MASK_LEN, MGMT_INT_CTRL0_INT_MASK_DISABLE); + writereg(pdata->pAdapter, val_mgmt_intcrtl0, + pdata->base_mem + MGMT_INT_CTRL0); + + for (intid = 0; intid < MSIX_TBL_RXTX_NUM; intid++) { + writereg(pdata->pAdapter, 0, + pdata->base_mem + MSIX_TBL_BASE_ADDR + + MSIX_TBL_MASK_OFFSET + intid * 16); + } + + for (intid = MSIX_TBL_RXTX_NUM; intid < MSIX_TBL_MAX_NUM; + intid++) { /* disable some msix */ + writereg(pdata->pAdapter, 0, + pdata->base_mem + MSIX_TBL_BASE_ADDR + + MSIX_TBL_MASK_OFFSET + intid * 16); + } + +#if FUXI_EPHY_INTERRUPT_D0_OFF + fxgmac_write_ephy_reg(pdata, REG_MII_INT_MASK, + 0x0); /* disable phy interrupt */ + fxgmac_read_ephy_reg(pdata, REG_MII_INT_STATUS, + NULL); /* clear phy interrupt */ +#else + regval = FXGMAC_SET_REG_BITS(0, PHY_INT_MASK_LINK_UP_POS, + PHY_INT_MASK_LINK_UP_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, PHY_INT_MASK_LINK_DOWN_POS, + PHY_INT_MASK_LINK_DOWN_LEN, 1); + fxgmac_write_ephy_reg(pdata, REG_MII_INT_MASK, + regval); /* enable phy interrupt */ +#endif +} + +static int fxgmac_hw_init(struct fxgmac_pdata *pdata) +{ + struct fxgmac_desc_ops *desc_ops = &pdata->desc_ops; + int ret; + u32 regval = 0; + + if (netif_msg_drv(pdata)) { + DPRINTK("fxgmac hw init call in\n"); + } + + /* Flush Tx queues */ + ret = fxgmac_flush_tx_queues(pdata); + if (ret) { + if (netif_msg_drv(pdata)) { + DPRINTK("fxgmac_hw_init call flush tx queue err.\n"); + } + return ret; + } + + /* Initialize DMA related features */ + fxgmac_config_dma_bus(pdata); + fxgmac_config_osp_mode(pdata); + fxgmac_config_pblx8(pdata); + fxgmac_config_tx_pbl_val(pdata); + fxgmac_config_rx_pbl_val(pdata); + fxgmac_config_rx_coalesce(pdata); + fxgmac_config_tx_coalesce(pdata); + fxgmac_config_rx_buffer_size(pdata); + fxgmac_config_tso_mode(pdata); + fxgmac_config_sph_mode(pdata); + fxgmac_config_rss(pdata); + fxgmac_config_wol(pdata, pdata->expansion.wol); + + desc_ops->tx_desc_init(pdata); + desc_ops->rx_desc_init(pdata); + fxgmac_enable_dma_interrupts(pdata); + + /* Initialize MTL related features */ + fxgmac_config_mtl_mode(pdata); + fxgmac_config_queue_mapping(pdata); + fxgmac_config_tsf_mode(pdata, pdata->tx_sf_mode); + fxgmac_config_rsf_mode(pdata, pdata->rx_sf_mode); + fxgmac_config_tx_threshold(pdata, pdata->tx_threshold); + fxgmac_config_rx_threshold(pdata, pdata->rx_threshold); + fxgmac_config_tx_fifo_size(pdata); + fxgmac_config_rx_fifo_size(pdata); + fxgmac_config_flow_control_threshold(pdata); + fxgmac_config_rx_fep_disable(pdata); + fxgmac_config_rx_fup_enable(pdata); + fxgmac_enable_mtl_interrupts(pdata); + + /* Initialize MAC related features */ + fxgmac_config_mac_address(pdata); + fxgmac_config_crc_check(pdata); + fxgmac_config_rx_mode(pdata); + fxgmac_config_jumbo(pdata); + fxgmac_config_flow_control(pdata); + fxgmac_config_mac_speed(pdata); + fxgmac_config_checksum_offload(pdata); + fxgmac_config_vlan_support(pdata); + fxgmac_config_mmc(pdata); + fxgmac_enable_mac_interrupts(pdata); + + /* enable EPhy link change interrupt */ + fxgmac_read_ephy_reg(pdata, REG_MII_INT_STATUS, + NULL); /* clear phy interrupt */ + regval = FXGMAC_SET_REG_BITS(0, PHY_INT_MASK_LINK_UP_POS, + PHY_INT_MASK_LINK_UP_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, PHY_INT_MASK_LINK_DOWN_POS, + PHY_INT_MASK_LINK_DOWN_LEN, 1); + fxgmac_write_ephy_reg(pdata, REG_MII_INT_MASK, + regval); /* enable phy interrupt */ + + if (netif_msg_drv(pdata)) { + DPRINTK("fxgmac hw init callout\n"); + } + return 0; +} + +static void fxgmac_save_nonstick_reg(struct fxgmac_pdata *pdata) +{ + u32 i; + for (i = REG_PCIE_TRIGGER; i < MSI_PBA_REG; i += 4) { + pdata->reg_nonstick[(i - REG_PCIE_TRIGGER) >> 2] = + readreg(pdata->pAdapter, pdata->base_mem + i); + } +} + +static void fxgmac_restore_nonstick_reg(struct fxgmac_pdata *pdata) +{ + u32 i; + for (i = REG_PCIE_TRIGGER; i < MSI_PBA_REG; i += 4) { + writereg(pdata->pAdapter, + pdata->reg_nonstick[(i - REG_PCIE_TRIGGER) >> 2], + pdata->base_mem + i); + } +} + +static void fxgmac_esd_restore_pcie_cfg(struct fxgmac_pdata *pdata) +{ + cfg_w32(pdata, REG_PCI_COMMAND, pdata->expansion.cfg_pci_cmd); + cfg_w32(pdata, REG_CACHE_LINE_SIZE, + pdata->expansion.cfg_cache_line_size); + cfg_w32(pdata, REG_MEM_BASE, pdata->expansion.cfg_mem_base); + cfg_w32(pdata, REG_MEM_BASE_HI, pdata->expansion.cfg_mem_base_hi); + cfg_w32(pdata, REG_IO_BASE, pdata->expansion.cfg_io_base); + cfg_w32(pdata, REG_INT_LINE, pdata->expansion.cfg_int_line); + cfg_w32(pdata, REG_DEVICE_CTRL1, pdata->expansion.cfg_device_ctrl1); + cfg_w32(pdata, REG_PCI_LINK_CTRL, pdata->expansion.cfg_pci_link_ctrl); + cfg_w32(pdata, REG_DEVICE_CTRL2, pdata->expansion.cfg_device_ctrl2); + cfg_w32(pdata, REG_MSIX_CAPABILITY, + pdata->expansion.cfg_msix_capability); +} + +static int fxgmac_hw_exit(struct fxgmac_pdata *pdata) +{ + u32 regval; + u32 value = 0; + + cfg_r32(pdata, REG_PCI_LINK_CTRL, ®val); + pdata->pcie_link_status = + FXGMAC_GET_REG_BITS(regval, PCI_LINK_CTRL_ASPM_CONTROL_POS, + PCI_LINK_CTRL_ASPM_CONTROL_LEN); + if (PCI_LINK_CTRL_L1_STATUS == (pdata->pcie_link_status & 0x02)) { + regval = FXGMAC_SET_REG_BITS(regval, + PCI_LINK_CTRL_ASPM_CONTROL_POS, + PCI_LINK_CTRL_ASPM_CONTROL_LEN, 0); + cfg_w32(pdata, REG_PCI_LINK_CTRL, regval); + } + + /* Issue a CHIP reset */ + regval = readreg(pdata->pAdapter, pdata->base_mem + SYS_RESET_REG); + DPRINTK("CHIP_RESET 0x%x\n", regval); + /* reg152c bit31 1->reset, self-clear, if read it again, it still set 1. */ + regval = FXGMAC_SET_REG_BITS(regval, SYS_RESET_POS, SYS_RESET_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->base_mem + SYS_RESET_REG); + + usleep_range_ex(pdata->pAdapter, 9000, 10000); + + /* reg152c reset will reset trigger circuit and reload efuse patch 0x1004=0x16, need to release ephy reset again */ + value = FXGMAC_SET_REG_BITS(value, MGMT_EPHY_CTRL_RESET_POS, + MGMT_EPHY_CTRL_RESET_LEN, + MGMT_EPHY_CTRL_STA_EPHY_RELEASE); + writereg(pdata->pAdapter, value, pdata->base_mem + MGMT_EPHY_CTRL); + usleep_range_ex(pdata->pAdapter, 100, 150); + + fxgmac_restore_nonstick_reg( + pdata); /* reset will clear nonstick registers. */ + + return 0; +} + +static int fxgmac_set_gmac_register(struct fxgmac_pdata *pdata, u8 *address, + unsigned int data) +{ + if (address < (u8 *)(pdata->base_mem)) { + return -1; + } + writereg(pdata->pAdapter, data, address); + return 0; +} + +static u32 fxgmac_get_gmac_register(struct fxgmac_pdata *pdata, u8 *address) +{ + u32 regval = 0; + + if (address > (u8 *)(pdata->base_mem)) { + regval = readreg(pdata->pAdapter, address); + } + return regval; +} + +static int fxgmac_pcie_init(struct fxgmac_pdata *pdata, bool ltr_en, + bool aspm_l1ss_en, bool aspm_l1_en, + bool aspm_l0s_en) +{ + u32 regval = 0; + u32 deviceid = 0; + + cfg_r32(pdata, REG_PCI_LINK_CTRL, ®val); + if (PCI_LINK_CTRL_L1_STATUS == (pdata->pcie_link_status & 0x02) && + 0x00 == FXGMAC_GET_REG_BITS(regval, PCI_LINK_CTRL_ASPM_CONTROL_POS, + PCI_LINK_CTRL_ASPM_CONTROL_LEN)) { + regval = FXGMAC_SET_REG_BITS(regval, + PCI_LINK_CTRL_ASPM_CONTROL_POS, + PCI_LINK_CTRL_ASPM_CONTROL_LEN, + pdata->pcie_link_status); + cfg_w32(pdata, REG_PCI_LINK_CTRL, regval); + } + + regval = FXGMAC_SET_REG_BITS(0, LTR_IDLE_ENTER_REQUIRE_POS, + LTR_IDLE_ENTER_REQUIRE_LEN, + LTR_IDLE_ENTER_REQUIRE); + regval = FXGMAC_SET_REG_BITS(regval, LTR_IDLE_ENTER_SCALE_POS, + LTR_IDLE_ENTER_SCALE_LEN, + LTR_IDLE_ENTER_SCALE); + regval = FXGMAC_SET_REG_BITS(regval, LTR_IDLE_ENTER_POS, + LTR_IDLE_ENTER_LEN, LTR_IDLE_ENTER_USVAL); + regval = (regval << 16) + regval; /* snoopy + non-snoopy */ + writereg(pdata->pAdapter, regval, pdata->base_mem + LTR_IDLE_ENTER); + + regval = 0; + regval = FXGMAC_SET_REG_BITS(0, LTR_IDLE_EXIT_REQUIRE_POS, + LTR_IDLE_EXIT_REQUIRE_LEN, + LTR_IDLE_EXIT_REQUIRE); + regval = FXGMAC_SET_REG_BITS(regval, LTR_IDLE_EXIT_SCALE_POS, + LTR_IDLE_EXIT_SCALE_LEN, + LTR_IDLE_EXIT_SCALE); + regval = FXGMAC_SET_REG_BITS(regval, LTR_IDLE_EXIT_POS, + LTR_IDLE_EXIT_LEN, LTR_IDLE_EXIT_USVAL); + regval = (regval << 16) + regval; /* snoopy + non-snoopy */ + writereg(pdata->pAdapter, regval, pdata->base_mem + LTR_IDLE_EXIT); + + regval = readreg(pdata->pAdapter, pdata->base_mem + LTR_CTRL); + if (ltr_en) { + regval = FXGMAC_SET_REG_BITS(regval, LTR_CTRL_EN_POS, + LTR_CTRL_EN_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, + LTR_CTRL_IDLE_THRE_TIMER_POS, + LTR_CTRL_IDLE_THRE_TIMER_LEN, + LTR_CTRL_IDLE_THRE_TIMER_VAL); + } else { + regval = FXGMAC_SET_REG_BITS(regval, LTR_CTRL_EN_POS, + LTR_CTRL_EN_LEN, 0); + } + writereg(pdata->pAdapter, regval, pdata->base_mem + LTR_CTRL); + + regval = readreg(pdata->pAdapter, pdata->base_mem + LPW_CTRL); + regval = FXGMAC_SET_REG_BITS(regval, LPW_CTRL_ASPM_L0S_EN_POS, + LPW_CTRL_ASPM_L0S_EN_LEN, + aspm_l0s_en ? 1 : 0); + regval = FXGMAC_SET_REG_BITS(regval, LPW_CTRL_ASPM_L1_EN_POS, + LPW_CTRL_ASPM_L1_EN_LEN, + aspm_l1_en ? 1 : 0); + regval = FXGMAC_SET_REG_BITS(regval, LPW_CTRL_L1SS_EN_POS, + LPW_CTRL_L1SS_EN_LEN, + aspm_l1ss_en ? 1 : 0); + writereg(pdata->pAdapter, regval, pdata->base_mem + LPW_CTRL); + + cfg_r32(pdata, REG_ASPM_CONTROL, ®val); + regval = FXGMAC_SET_REG_BITS(regval, ASPM_L1_IDLE_THRESHOLD_POS, + ASPM_L1_IDLE_THRESHOLD_LEN, + ASPM_L1_IDLE_THRESHOLD_1US); + cfg_w32(pdata, REG_ASPM_CONTROL, regval); + + regval = 0; + regval = FXGMAC_SET_REG_BITS(regval, PCIE_SERDES_PLL_AUTOOFF_POS, + PCIE_SERDES_PLL_AUTOOFF_LEN, 1); + writereg(pdata->pAdapter, regval, + pdata->base_mem + REG_PCIE_SERDES_PLL); + + /*fuxi nto adjust sigdet threshold*/ + cfg_r8(pdata, REG_PCI_REVID, ®val); + cfg_r16(pdata, REG_PCI_DEVICE_ID, &deviceid); + if (FUXI_REV_01 == regval && PCI_DEVICE_ID_FUXI == deviceid) { + regval = + readreg(pdata->pAdapter, pdata->base_mem + MGMT_SIGDET); + regval = FXGMAC_SET_REG_BITS(regval, MGMT_SIGDET_POS, + MGMT_SIGDET_LEN, MGMT_SIGDET_55MV); + writereg(pdata->pAdapter, regval, + pdata->base_mem + MGMT_SIGDET); + } + + return 0; +} + +static void fxgmac_trigger_pcie(struct fxgmac_pdata *pdata, u32 code) +{ + writereg(pdata->pAdapter, code, pdata->base_mem + REG_PCIE_TRIGGER); +} + +void fxgmac_init_hw_ops(struct fxgmac_hw_ops *hw_ops) +{ + hw_ops->init = fxgmac_hw_init; + hw_ops->exit = fxgmac_hw_exit; + hw_ops->save_nonstick_reg = fxgmac_save_nonstick_reg; + hw_ops->restore_nonstick_reg = fxgmac_restore_nonstick_reg; + hw_ops->esd_restore_pcie_cfg = fxgmac_esd_restore_pcie_cfg; + + hw_ops->set_gmac_register = fxgmac_set_gmac_register; + hw_ops->get_gmac_register = fxgmac_get_gmac_register; + + hw_ops->tx_complete = fxgmac_tx_complete; + hw_ops->enable_tx = fxgmac_enable_tx; + hw_ops->disable_tx = fxgmac_disable_tx; + hw_ops->enable_rx = fxgmac_enable_rx; + hw_ops->disable_rx = fxgmac_disable_rx; + hw_ops->enable_channel_rx = fxgmac_enable_channel_rx; + hw_ops->dev_xmit = fxgmac_dev_xmit; + hw_ops->dev_read = fxgmac_dev_read; + hw_ops->config_tso = fxgmac_config_tso_mode; + hw_ops->enable_int = fxgmac_enable_int; + hw_ops->disable_int = fxgmac_disable_int; + hw_ops->set_interrupt_moderation = fxgmac_set_interrupt_moderation; + hw_ops->enable_msix_rxtxinterrupt = fxgmac_enable_msix_rxtxinterrupt; + hw_ops->disable_msix_interrupt = fxgmac_disable_msix_interrupt; + hw_ops->enable_msix_rxtxphyinterrupt = + fxgmac_enable_msix_rxtxphyinterrupt; + hw_ops->enable_msix_one_interrupt = fxgmac_enable_msix_one_interrupt; + hw_ops->disable_msix_one_interrupt = fxgmac_disable_msix_one_interrupt; + hw_ops->enable_mgm_interrupt = fxgmac_enable_mgm_interrupt; + hw_ops->disable_mgm_interrupt = fxgmac_disable_mgm_interrupt; + + hw_ops->set_mac_address = fxgmac_set_mac_address; + hw_ops->set_mac_hash = fxgmac_add_mac_addresses; + hw_ops->config_rx_mode = fxgmac_config_rx_mode; + hw_ops->enable_rx_csum = fxgmac_enable_rx_csum; + hw_ops->disable_rx_csum = fxgmac_disable_rx_csum; + + /* For MII speed configuration */ + hw_ops->config_mac_speed = fxgmac_config_mac_speed; + hw_ops->get_xlgmii_phy_status = fxgmac_check_phy_link; + + /* For descriptor related operation */ + hw_ops->tx_desc_init = fxgmac_tx_desc_init; + hw_ops->rx_desc_init = fxgmac_rx_desc_init; + hw_ops->tx_desc_reset = fxgmac_tx_desc_reset; + hw_ops->rx_desc_reset = fxgmac_rx_desc_reset; + hw_ops->is_last_desc = fxgmac_is_last_desc; + hw_ops->is_context_desc = fxgmac_is_context_desc; + hw_ops->tx_start_xmit = fxgmac_tx_start_xmit; + hw_ops->set_pattern_data = fxgmac_set_pattern_data; + hw_ops->config_wol = fxgmac_config_wol; + hw_ops->get_rss_hash_key = fxgmac_read_rss_hash_key; + hw_ops->write_rss_lookup_table = fxgmac_write_rss_lookup_table; +#if FXGMAC_SANITY_CHECK_ENABLED + hw_ops->diag_sanity_check = fxgmac_diag_sanity_check; +#endif + + /* For Flow Control */ + hw_ops->config_tx_flow_control = fxgmac_config_tx_flow_control; + hw_ops->config_rx_flow_control = fxgmac_config_rx_flow_control; + + /*For Jumbo Frames*/ + hw_ops->enable_jumbo = fxgmac_config_jumbo; + + /* For Vlan related config */ + hw_ops->enable_tx_vlan = fxgmac_enable_tx_vlan; + hw_ops->disable_tx_vlan = fxgmac_disable_tx_vlan; + hw_ops->enable_rx_vlan_stripping = fxgmac_enable_rx_vlan_stripping; + hw_ops->disable_rx_vlan_stripping = fxgmac_disable_rx_vlan_stripping; + hw_ops->enable_rx_vlan_filtering = fxgmac_enable_rx_vlan_filtering; + hw_ops->disable_rx_vlan_filtering = fxgmac_disable_rx_vlan_filtering; + hw_ops->update_vlan_hash_table = fxgmac_update_vlan_hash_table; + + /* For RX coalescing */ + hw_ops->config_rx_coalesce = fxgmac_config_rx_coalesce; + hw_ops->config_tx_coalesce = fxgmac_config_tx_coalesce; + hw_ops->usec_to_riwt = fxgmac_usec_to_riwt; + hw_ops->riwt_to_usec = fxgmac_riwt_to_usec; + + /* For RX and TX threshold config */ + hw_ops->config_rx_threshold = fxgmac_config_rx_threshold; + hw_ops->config_tx_threshold = fxgmac_config_tx_threshold; + + /* For RX and TX Store and Forward Mode config */ + hw_ops->config_rsf_mode = fxgmac_config_rsf_mode; + hw_ops->config_tsf_mode = fxgmac_config_tsf_mode; + + /* For TX DMA Operating on Second Frame config */ + hw_ops->config_osp_mode = fxgmac_config_osp_mode; + + /* For RX and TX PBL config */ + hw_ops->config_rx_pbl_val = fxgmac_config_rx_pbl_val; + hw_ops->get_rx_pbl_val = fxgmac_get_rx_pbl_val; + hw_ops->config_tx_pbl_val = fxgmac_config_tx_pbl_val; + hw_ops->get_tx_pbl_val = fxgmac_get_tx_pbl_val; + hw_ops->config_pblx8 = fxgmac_config_pblx8; + + /* For MMC statistics support */ + hw_ops->tx_mmc_int = fxgmac_tx_mmc_int; + hw_ops->rx_mmc_int = fxgmac_rx_mmc_int; + hw_ops->read_mmc_stats = fxgmac_read_mmc_stats; + + /* For Receive Side Scaling */ + hw_ops->enable_rss = fxgmac_enable_rss; + hw_ops->disable_rss = fxgmac_disable_rss; + hw_ops->get_rss_options = fxgmac_read_rss_options; + hw_ops->set_rss_options = fxgmac_write_rss_options; + hw_ops->set_rss_hash_key = fxgmac_set_rss_hash_key; + hw_ops->set_rss_lookup_table = fxgmac_set_rss_lookup_table; + + /*For Offload*/ + hw_ops->set_arp_offload = fxgmac_update_aoe_ipv4addr; + hw_ops->enable_arp_offload = fxgmac_enable_arp_offload; + hw_ops->disable_arp_offload = fxgmac_disable_arp_offload; + + hw_ops->set_ns_offload = fxgmac_set_ns_offload; + hw_ops->enable_ns_offload = fxgmac_enable_ns_offload; + hw_ops->disable_ns_offload = fxgmac_disable_ns_offload; + + hw_ops->enable_wake_magic_pattern = fxgmac_enable_wake_magic_pattern; + hw_ops->disable_wake_magic_pattern = fxgmac_disable_wake_magic_pattern; + + hw_ops->enable_wake_link_change = fxgmac_enable_wake_link_change; + hw_ops->disable_wake_link_change = fxgmac_disable_wake_link_change; + + hw_ops->check_wake_pattern_fifo_pointer = + fxgmac_check_wake_pattern_fifo_pointer; + hw_ops->set_wake_pattern = fxgmac_set_wake_pattern; + hw_ops->enable_wake_pattern = fxgmac_enable_wake_pattern; + hw_ops->disable_wake_pattern = fxgmac_disable_wake_pattern; + hw_ops->set_wake_pattern_mask = fxgmac_set_wake_pattern_mask; +#if defined(FUXI_PM_WPI_READ_FEATURE_EN) && FUXI_PM_WPI_READ_FEATURE_EN + hw_ops->enable_wake_packet_indication = + fxgmac_enable_wake_packet_indication; + hw_ops->get_wake_packet_indication = fxgmac_get_wake_packet_indication; +#endif + + /*For phy write /read*/ + hw_ops->reset_phy = fxgmac_reset_phy; + hw_ops->release_phy = fxgmac_release_phy; + hw_ops->get_ephy_state = fxgmac_get_ephy_state; + hw_ops->write_ephy_reg = fxgmac_write_ephy_reg; + hw_ops->read_ephy_reg = fxgmac_read_ephy_reg; + hw_ops->set_ephy_autoneg_advertise = fxgmac_set_ephy_autoneg_advertise; + hw_ops->phy_config = fxgmac_phy_config; + hw_ops->close_phy_led = fxgmac_close_phy_led; + hw_ops->led_under_active = fxmgac_config_led_under_active; + hw_ops->led_under_sleep = fxgmac_config_led_under_sleep; + hw_ops->led_under_shutdown = fxgmac_config_led_under_shutdown; + hw_ops->led_under_disable = fxgmac_config_led_under_disable; + hw_ops->enable_phy_check = fxgmac_enable_phy_check; + hw_ops->disable_phy_check = fxgmac_disable_phy_check; + hw_ops->setup_cable_loopback = fxgmac_setup_cable_loopback; + hw_ops->clean_cable_loopback = fxgmac_clean_cable_loopback; + hw_ops->disable_phy_sleep = fxgmac_disable_phy_sleep; + hw_ops->enable_phy_sleep = fxgmac_enable_phy_sleep; + hw_ops->phy_green_ethernet = fxgmac_phy_green_ethernet; + hw_ops->phy_eee_feature = fxgmac_phy_eee_feature; + + /* For power management */ + hw_ops->pre_power_down = fxgmac_pre_powerdown; + hw_ops->config_power_down = fxgmac_config_powerdown; + hw_ops->config_power_up = fxgmac_config_powerup; + hw_ops->set_suspend_int = fxgmac_suspend_int; + hw_ops->set_resume_int = fxgmac_resume_int; + hw_ops->set_suspend_txrx = fxgmac_suspend_txrx; + hw_ops->set_pwr_clock_gate = fxgmac_pwr_clock_gate; + hw_ops->set_pwr_clock_ungate = fxgmac_pwr_clock_ungate; + + hw_ops->set_all_multicast_mode = fxgmac_set_all_multicast_mode; + hw_ops->config_multicast_mac_hash_table = + fxgmac_config_multicast_mac_hash_table; + hw_ops->set_promiscuous_mode = fxgmac_set_promiscuous_mode; + hw_ops->enable_rx_broadcast = fxgmac_enable_rx_broadcast; + + /* efuse relevant operation. */ + hw_ops->read_patch_from_efuse = + fxgmac_read_patch_from_efuse; /* read patch per register. */ + hw_ops->read_patch_from_efuse_per_index = + fxgmac_read_patch_from_efuse_per_index; /* read patch per index. */ + hw_ops->write_patch_to_efuse = fxgmac_write_patch_to_efuse; + hw_ops->write_patch_to_efuse_per_index = + fxgmac_write_patch_to_efuse_per_index; + hw_ops->read_mac_subsys_from_efuse = fxgmac_read_mac_subsys_from_efuse; + hw_ops->write_mac_subsys_to_efuse = fxgmac_write_mac_subsys_to_efuse; + hw_ops->efuse_load = fxgmac_efuse_load; + hw_ops->read_efuse_data = fxgmac_efuse_read_data; + hw_ops->write_oob = fxgmac_efuse_write_oob; + hw_ops->write_led = fxgmac_efuse_write_led; + hw_ops->write_led_config = fxgmac_write_led_setting_to_efuse; + hw_ops->read_led_config = fxgmac_read_led_setting_from_efuse; + + /* */ + hw_ops->pcie_init = fxgmac_pcie_init; + hw_ops->trigger_pcie = fxgmac_trigger_pcie; +} diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-net.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-net.c new file mode 100644 index 000000000000..b8734efb3642 --- /dev/null +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-net.c @@ -0,0 +1,2329 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Motorcomm Corporation. */ + +#include +#include +#include +#include +#include +#include + +#include "fuxi-os.h" +#include "fuxi-gmac.h" +#include "fuxi-gmac-reg.h" + +static int fxgmac_one_poll_rx(struct napi_struct *, int); +static int fxgmac_one_poll_tx(struct napi_struct *, int); +static int fxgmac_all_poll(struct napi_struct *, int); + +unsigned int fxgmac_get_netdev_ip4addr(struct fxgmac_pdata *pdata) +{ + struct net_device *netdev = pdata->netdev; + struct in_ifaddr *ifa; + unsigned int ipval = + 0xc0a801ca; /* here just hard code to 192.168.1.202 */ + + rcu_read_lock(); + /* we only get the first IPv4 addr. */ + ifa = rcu_dereference(netdev->ip_ptr->ifa_list); + if (ifa) { + /* binary ipv4 addr with __be */ + ipval = (unsigned int)ifa->ifa_address; + + DPRINTK("%s, netdev %s IPv4 address %pI4, mask: %pI4\n", + __FUNCTION__, ifa->ifa_label, &ifa->ifa_address, + &ifa->ifa_mask); + } + rcu_read_unlock(); + + return ipval; +} + +unsigned char *fxgmac_get_netdev_ip6addr(struct fxgmac_pdata *pdata, + unsigned char *ipval, + unsigned char *ip6addr_solicited, + unsigned int ifa_flag) +{ + struct net_device *netdev = pdata->netdev; + struct inet6_dev *i6dev; + struct inet6_ifaddr *ifp; + unsigned char local_ipval[16] = { 0 }; + unsigned char solicited_ipval[16] = { 0 }; + struct in6_addr *addr_ip6 = (struct in6_addr *)local_ipval; + struct in6_addr *addr_ip6_solicited = + (struct in6_addr *)solicited_ipval; + int err = -EADDRNOTAVAIL; + unsigned char *ret; + + if (ipval) { + addr_ip6 = (struct in6_addr *)ipval; + } + + if (ip6addr_solicited) { + addr_ip6_solicited = (struct in6_addr *)ip6addr_solicited; + } + + in6_pton("fe80::4808:8ffb:d93e:d753", -1, (u8 *)addr_ip6, -1, + NULL); /* here just hard code for default */ + + if (ifa_flag & FXGMAC_NS_IFA_GLOBAL_UNICAST) + DPRINTK("%s FXGMAC_NS_IFA_GLOBAL_UNICAST is set, %x\n", + __FUNCTION__, ifa_flag); + + if (ifa_flag & FXGMAC_NS_IFA_LOCAL_LINK) + DPRINTK("%s FXGMAC_NS_IFA_LOCAL_LINK is set, %x\n", + __FUNCTION__, ifa_flag); + + rcu_read_lock(); + i6dev = __in6_dev_get(netdev); + if (i6dev != NULL) { + read_lock_bh(&i6dev->lock); + list_for_each_entry(ifp, &i6dev->addr_list, if_list) { + /* here we need only the ll addr, use scope to filter out it. */ + if (((ifa_flag & FXGMAC_NS_IFA_GLOBAL_UNICAST) && (ifp->scope != IFA_LINK)) || ((ifa_flag & FXGMAC_NS_IFA_LOCAL_LINK) && (ifp->scope == IFA_LINK)/* && + !(ifp->flags & IFA_F_TENTATIVE)*/)) { + memcpy(addr_ip6, &ifp->addr, 16); + addrconf_addr_solict_mult(addr_ip6, + addr_ip6_solicited); + err = 0; + + break; + } + } + read_unlock_bh(&i6dev->lock); + } + rcu_read_unlock(); + + if (err) + DPRINTK("%s get ipv6 addr failed, use default.\n", + __FUNCTION__); + + ret = (err ? NULL : ipval); + + return ret; +} + +inline unsigned int fxgmac_tx_avail_desc(struct fxgmac_ring *ring) +{ + unsigned int avail; + + if (ring->dirty > ring->cur) + avail = ring->dirty - ring->cur; + else + avail = ring->dma_desc_count - ring->cur + ring->dirty; + + return avail; +} + +inline unsigned int fxgmac_rx_dirty_desc(struct fxgmac_ring *ring) +{ + unsigned int dirty; + + if (ring->dirty <= ring->cur) + dirty = ring->cur - ring->dirty; + else + dirty = ring->dma_desc_count - ring->dirty + ring->cur; + + return dirty; +} + +static int fxgmac_maybe_stop_tx_queue(struct fxgmac_channel *channel, + struct fxgmac_ring *ring, + unsigned int count) +{ + struct fxgmac_pdata *pdata = channel->pdata; + + if (count > fxgmac_tx_avail_desc(ring)) { + netif_info( + pdata, drv, pdata->netdev, + "Tx queue stopped, not enough descriptors available\n"); + netif_stop_subqueue(pdata->netdev, channel->queue_index); + ring->tx.queue_stopped = 1; + + /* If we haven't notified the hardware because of xmit_more + * support, tell it now + */ + if (ring->tx.xmit_more) + pdata->hw_ops.tx_start_xmit(channel, ring); + if (netif_msg_tx_done(pdata)) + DPRINTK("about stop tx q, ret BUSY\n"); + + return NETDEV_TX_BUSY; + } + + return 0; +} + +static void fxgmac_prep_vlan(struct sk_buff *skb, + struct fxgmac_pkt_info *pkt_info) +{ + if (skb_vlan_tag_present(skb)) + pkt_info->vlan_ctag = skb_vlan_tag_get(skb); +} + +static int fxgmac_prep_tso(struct fxgmac_pdata *pdata, struct sk_buff *skb, + struct fxgmac_pkt_info *pkt_info) +{ + int ret; + + if (!FXGMAC_GET_REG_BITS(pkt_info->attributes, + TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS, + TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN)) + return 0; + + ret = skb_cow_head(skb, 0); + if (ret) + return ret; + + pkt_info->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + pkt_info->tcp_header_len = tcp_hdrlen(skb); + pkt_info->tcp_payload_len = skb->len - pkt_info->header_len; + pkt_info->mss = skb_shinfo(skb)->gso_size; + + if (netif_msg_tx_done(pdata)) { + DPRINTK("header_len=%u\n", pkt_info->header_len); + DPRINTK("tcp_header_len=%u, tcp_payload_len=%u\n", + pkt_info->tcp_header_len, pkt_info->tcp_payload_len); + DPRINTK("mss=%u\n", pkt_info->mss); + } + /* Update the number of packets that will ultimately be transmitted + * along with the extra bytes for each extra packet + */ + pkt_info->tx_packets = skb_shinfo(skb)->gso_segs; + pkt_info->tx_bytes += (pkt_info->tx_packets - 1) * pkt_info->header_len; + + return 0; +} + +static int fxgmac_is_tso(struct sk_buff *skb) +{ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; + + return 1; +} + +static void fxgmac_prep_tx_pkt(struct fxgmac_pdata *pdata, + struct fxgmac_ring *ring, struct sk_buff *skb, + struct fxgmac_pkt_info *pkt_info) +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)) + skb_frag_t *frag; +#else + struct skb_frag_struct *frag; +#endif + unsigned int context_desc; + unsigned int len; + unsigned int i; + + pkt_info->skb = skb; + + context_desc = 0; + pkt_info->desc_count = 0; + + pkt_info->tx_packets = 1; + pkt_info->tx_bytes = skb->len; + if (netif_msg_tx_done(pdata)) + DPRINTK("fxgmac_prep_tx_pkt callin, pkt desc cnt=%d, skb len=%d, skbheadlen=%d\n", + pkt_info->desc_count, skb->len, skb_headlen(skb)); + + if (fxgmac_is_tso(skb)) { + /* TSO requires an extra descriptor if mss is different */ + if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) { + context_desc = 1; + pkt_info->desc_count++; + } + if (netif_msg_tx_done(pdata)) + DPRINTK("fxgmac_is_tso=%d, ip_summed=%d, skb gso=%d\n", + ((skb->ip_summed == CHECKSUM_PARTIAL) && + (skb_is_gso(skb))) ? + 1 : + 0, + skb->ip_summed, skb_is_gso(skb) ? 1 : 0); + + /* TSO requires an extra descriptor for TSO header */ + pkt_info->desc_count++; + + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, + TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS, + TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN, 1); + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, + TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS, + TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN, 1); + if (netif_msg_tx_done(pdata)) + DPRINTK("fxgmac_prep_tx_pkt, tso, pkt desc cnt=%d\n", + pkt_info->desc_count); + } else if (skb->ip_summed == CHECKSUM_PARTIAL) + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, + TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS, + TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN, 1); + + if (skb_vlan_tag_present(skb)) { + /* VLAN requires an extra descriptor if tag is different */ + if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag) + /* We can share with the TSO context descriptor */ + if (!context_desc) { + context_desc = 1; + pkt_info->desc_count++; + } + + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, + TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, + TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN, 1); + if (netif_msg_tx_done(pdata)) + DPRINTK("fxgmac_prep_tx_pkt, VLAN, pkt desc cnt=%d, vlan=0x%04x\n", + pkt_info->desc_count, skb_vlan_tag_get(skb)); + } + + for (len = skb_headlen(skb); len;) { + pkt_info->desc_count++; + len -= min_t(unsigned int, len, FXGMAC_TX_MAX_BUF_SIZE); + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + for (len = skb_frag_size(frag); len;) { + pkt_info->desc_count++; + len -= min_t(unsigned int, len, FXGMAC_TX_MAX_BUF_SIZE); + } + } + if (netif_msg_tx_done(pdata)) + DPRINTK("fxgmac_prep_tx_pkt callout, pkt desc cnt=%d, skb len=%d, skbheadlen=%d, frags=%d\n", + pkt_info->desc_count, skb->len, skb_headlen(skb), + skb_shinfo(skb)->nr_frags); +} + +static int fxgmac_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu) +{ + unsigned int rx_buf_size; + + if (mtu > FXGMAC_JUMBO_PACKET_MTU) { + netdev_alert(netdev, "MTU exceeds maximum supported value\n"); + return -EINVAL; + } + + rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + rx_buf_size = + clamp_val(rx_buf_size, FXGMAC_RX_MIN_BUF_SIZE, + PAGE_SIZE * 4 /* follow yonggang's suggestion */); + + rx_buf_size = (rx_buf_size + FXGMAC_RX_BUF_ALIGN - 1) & + ~(FXGMAC_RX_BUF_ALIGN - 1); + + return rx_buf_size; +} + +static void fxgmac_enable_rx_tx_ints(struct fxgmac_pdata *pdata) +{ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + struct fxgmac_channel *channel; + enum fxgmac_int int_id; + unsigned int i; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (channel->tx_ring && channel->rx_ring) + int_id = FXGMAC_INT_DMA_CH_SR_TI_RI; + else if (channel->tx_ring) + int_id = FXGMAC_INT_DMA_CH_SR_TI; + else if (channel->rx_ring) + int_id = FXGMAC_INT_DMA_CH_SR_RI; + else + continue; + + hw_ops->enable_int(channel, int_id); + } +} + +static void fxgmac_phy_process(struct fxgmac_pdata *pdata) +{ + int cur_link = 0; + int regval = 0; + int cur_speed = 0; + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + regval = hw_ops->get_ephy_state(pdata); + + /* We should make sure that PHY is done with the reset */ + if (regval & MGMT_EPHY_CTRL_STA_EPHY_RESET) { + pdata->expansion.phy_link = false; + return; + } + + cur_link = FXGMAC_GET_REG_BITS(regval, + MGMT_EPHY_CTRL_STA_EPHY_LINKUP_POS, + MGMT_EPHY_CTRL_STA_EPHY_LINKUP_LEN); + if (pdata->expansion.phy_link != cur_link) { + pdata->expansion.phy_link = cur_link; + if (pdata->expansion.phy_link) { + cur_speed = FXGMAC_GET_REG_BITS( + regval, MGMT_EPHY_CTRL_STA_SPEED_POS, + MGMT_EPHY_CTRL_STA_SPEED_LEN); + pdata->phy_speed = (cur_speed == 2) ? SPEED_1000 : + (cur_speed == 1) ? SPEED_100 : + SPEED_10; + pdata->phy_duplex = FXGMAC_GET_REG_BITS( + regval, MGMT_EPHY_CTRL_STA_EPHY_DUPLEX_POS, + MGMT_EPHY_CTRL_STA_EPHY_DUPLEX_LEN); + hw_ops->config_mac_speed(pdata); + + hw_ops->enable_rx(pdata); + hw_ops->enable_tx(pdata); + netif_carrier_on(pdata->netdev); + if (netif_running(pdata->netdev)) { + netif_tx_wake_all_queues(pdata->netdev); + DPRINTK("%s now is link up, mac_speed=%d.\n", + FXGMAC_DRV_NAME, pdata->phy_speed); + } + } else { + netif_carrier_off(pdata->netdev); + netif_tx_stop_all_queues(pdata->netdev); + pdata->phy_speed = SPEED_UNKNOWN; + pdata->phy_duplex = DUPLEX_UNKNOWN; + hw_ops->disable_rx(pdata); + hw_ops->disable_tx(pdata); + DPRINTK("%s now is link down\n", FXGMAC_DRV_NAME); + } + } +} + +static int fxgmac_phy_poll(struct napi_struct *napi, int budget) +{ + struct fxgmac_pdata *pdata = + container_of(napi, struct fxgmac_pdata, expansion.napi_phy); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + fxgmac_phy_process(pdata); + if (napi_complete_done(napi, 0)) + hw_ops->enable_msix_one_interrupt(pdata, MSI_ID_PHY_OTHER); + + return 0; +} + +static irqreturn_t fxgmac_phy_isr(int irq, void *data) +{ + struct fxgmac_pdata *pdata = data; + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->base_mem + MGMT_INT_CTRL0); + if (!(regval & MGMT_INT_CTRL0_INT_STATUS_PHY)) + return IRQ_HANDLED; + + hw_ops->disable_msix_one_interrupt(pdata, MSI_ID_PHY_OTHER); + hw_ops->read_ephy_reg(pdata, REG_MII_INT_STATUS, NULL); + if (napi_schedule_prep(&pdata->expansion.napi_phy)) { + __napi_schedule_irqoff(&pdata->expansion.napi_phy); + } + + return IRQ_HANDLED; +} + +static irqreturn_t fxgmac_isr(int irq, void *data) +{ + unsigned int dma_isr, dma_ch_isr, mac_isr; + struct fxgmac_pdata *pdata = data; + struct fxgmac_channel *channel; + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + unsigned int i, ti, ri; + u32 val; + + dma_isr = readreg(pdata->pAdapter, pdata->mac_regs + DMA_ISR); + + val = readreg(pdata->pAdapter, pdata->base_mem + MGMT_INT_CTRL0); + if (!(val & MGMT_INT_CTRL0_INT_STATUS_RXTXPHY_MASK)) + return IRQ_HANDLED; + + hw_ops->disable_mgm_interrupt(pdata); + pdata->expansion.mgm_intctrl_val = val; + + pdata->stats.mgmt_int_isr++; + + for (i = 0; i < pdata->channel_count; i++) { + channel = pdata->channel_head + i; + + dma_ch_isr = readl(FXGMAC_DMA_REG(channel, DMA_CH_SR)); + netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n", + i, dma_ch_isr); + + /* The TI or RI interrupt bits may still be set even if using + * per channel DMA interrupts. Check to be sure those are not + * enabled before using the private data napi structure. + */ + ti = FXGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TI_POS, + DMA_CH_SR_TI_LEN); + ri = FXGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RI_POS, + DMA_CH_SR_RI_LEN); + if (!pdata->per_channel_irq && (ti || ri)) { + if (napi_schedule_prep(&pdata->expansion.napi)) { + pdata->stats.napi_poll_isr++; + /* Turn on polling */ + __napi_schedule_irqoff(&pdata->expansion.napi); + } + } + + if (FXGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TPS_POS, + DMA_CH_SR_TPS_LEN)) + pdata->stats.tx_process_stopped++; + + if (FXGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RPS_POS, + DMA_CH_SR_RPS_LEN)) + pdata->stats.rx_process_stopped++; + + if (FXGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TBU_POS, + DMA_CH_SR_TBU_LEN)) + pdata->stats.tx_buffer_unavailable++; + + if (FXGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RBU_POS, + DMA_CH_SR_RBU_LEN)) + pdata->stats.rx_buffer_unavailable++; + + /* Restart the device on a Fatal Bus Error */ + if (FXGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_FBE_POS, + DMA_CH_SR_FBE_LEN)) { + pdata->stats.fatal_bus_error++; + schedule_work(&pdata->expansion.restart_work); + } + + /* Clear all interrupt signals */ + writel(dma_ch_isr, FXGMAC_DMA_REG(channel, DMA_CH_SR)); + } + + if (FXGMAC_GET_REG_BITS(dma_isr, DMA_ISR_MACIS_POS, + DMA_ISR_MACIS_LEN)) { + mac_isr = readl(pdata->mac_regs + MAC_ISR); + + if (FXGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCTXIS_POS, + MAC_ISR_MMCTXIS_LEN)) + hw_ops->tx_mmc_int(pdata); + + if (FXGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCRXIS_POS, + MAC_ISR_MMCRXIS_LEN)) + hw_ops->rx_mmc_int(pdata); + + /* Clear all interrupt signals */ + writel(mac_isr, (pdata->mac_regs + MAC_ISR)); + } + + if (pdata->expansion.mgm_intctrl_val & MGMT_INT_CTRL0_INT_STATUS_PHY) { + hw_ops->read_ephy_reg(pdata, REG_MII_INT_STATUS, &val); + if (napi_schedule_prep(&pdata->expansion.napi)) { + pdata->stats.napi_poll_isr++; + /* Turn on polling */ + __napi_schedule_irqoff(&pdata->expansion.napi); + } + } + + return IRQ_HANDLED; +} + +static irqreturn_t fxgmac_dma_isr(int irq, void *data) +{ + struct fxgmac_channel *channel = data; + struct fxgmac_pdata *pdata = channel->pdata; + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + u32 regval; + int message_id; + + if (irq == channel->expansion.dma_irq_tx) { + message_id = MSI_ID_TXQ0; + hw_ops->disable_msix_one_interrupt(pdata, message_id); + regval = 0; + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_SR_TI_POS, + DMA_CH_SR_TI_LEN, 1); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_SR)); + if (napi_schedule_prep(&channel->expansion.napi_tx)) { + __napi_schedule_irqoff(&channel->expansion.napi_tx); + } + } else { + message_id = channel->queue_index; + hw_ops->disable_msix_one_interrupt(pdata, message_id); + regval = 0; + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_SR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_SR_RI_POS, + DMA_CH_SR_RI_LEN, 1); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_SR)); + if (napi_schedule_prep(&channel->expansion.napi_rx)) { + __napi_schedule_irqoff(&channel->expansion.napi_rx); + } + } + + return IRQ_HANDLED; +} + +#if FXGMAC_TX_HANG_TIMER_EN +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) +static void fxgmac_tx_hang_timer_handler(struct timer_list *t) +#else +static void fxgmac_tx_hang_timer_handler(unsigned long data) +#endif +{ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) + struct fxgmac_channel *channel = + from_timer(channel, t, expansion.tx_hang_timer); +#else + struct fxgmac_channel *channel = (struct fxgmac_channel *)data; +#endif + +#if FXGMAC_TX_HANG_CHECH_DIRTY + struct fxgmac_ring *ring = channel->tx_ring; +#endif + struct fxgmac_pdata *pdata = channel->pdata; + struct net_device *netdev = pdata->netdev; + unsigned int hw_reg_cur; + unsigned int regval; + +#if FXGMAC_TX_HANG_CHECH_DIRTY + hw_reg_cur = ring->dirty; +#else + hw_reg_cur = readl( + FXGMAC_DMA_REG(channel, 0x44 /* tx desc curr pointer reg */)); +#endif + if (hw_reg_cur == channel->expansion.tx_hang_hw_cur) { + /* hw current desc still stucked */ + if (!pdata->tx_hang_restart_queuing) { + pdata->tx_hang_restart_queuing = 1; + DPRINTK("tx_hang_timer_handler: restart scheduled, at desc %u, queuing=%u.\n", + channel->expansion.tx_hang_hw_cur, + pdata->tx_hang_restart_queuing); + + netif_tx_stop_all_queues(netdev); + + /* Disable MAC Rx */ + regval = readl(pdata->mac_regs + MAC_CR); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_CST_POS, + MAC_CR_CST_LEN, 0); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_ACS_POS, + MAC_CR_ACS_LEN, 0); + regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_RE_POS, + MAC_CR_RE_LEN, 0); + writel(regval, pdata->mac_regs + MAC_CR); + + schedule_work(&pdata->expansion.restart_work); + } + } + + channel->expansion.tx_hang_timer_active = 0; +} + +static void fxgmac_tx_hang_timer_start(struct fxgmac_channel *channel) +{ + struct fxgmac_pdata *pdata = channel->pdata; + + /* Start the Tx hang timer */ + if (1 && !channel->expansion.tx_hang_timer_active) { + channel->expansion.tx_hang_timer_active = 1; + + /* FXGMAC_INIT_DMA_TX_USECS is desc3 polling period, we give 2 more checking period */ + mod_timer(&channel->expansion.tx_hang_timer, + jiffies + usecs_to_jiffies(FXGMAC_INIT_DMA_TX_USECS * + 10)); + } +} +#endif + +static void fxgmac_napi_enable(struct fxgmac_pdata *pdata, unsigned int add) +{ + struct fxgmac_channel *channel; + unsigned int i; + + if (pdata->per_channel_irq) { + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (add) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)) + netif_napi_add_weight( + pdata->netdev, + &channel->expansion.napi_rx, + fxgmac_one_poll_rx, NAPI_POLL_WEIGHT); +#else + netif_napi_add(pdata->netdev, + &channel->expansion.napi_rx, + fxgmac_one_poll_rx, + NAPI_POLL_WEIGHT); +#endif + } + napi_enable(&channel->expansion.napi_rx); + + if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i)) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)) + netif_napi_add_weight( + pdata->netdev, + &channel->expansion.napi_tx, + fxgmac_one_poll_tx, NAPI_POLL_WEIGHT); +#else + netif_napi_add(pdata->netdev, + &channel->expansion.napi_tx, + fxgmac_one_poll_tx, + NAPI_POLL_WEIGHT); +#endif + napi_enable(&channel->expansion.napi_tx); + } + if (netif_msg_drv(pdata)) + DPRINTK("napi_enable, msix ch%d napi enabled done, add=%d\n", + i, add); + } + + /* for phy */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)) + netif_napi_add_weight(pdata->netdev, &pdata->expansion.napi_phy, + fxgmac_phy_poll, NAPI_POLL_WEIGHT); +#else + netif_napi_add(pdata->netdev, &pdata->expansion.napi_phy, + fxgmac_phy_poll, NAPI_POLL_WEIGHT); +#endif + napi_enable(&pdata->expansion.napi_phy); + } else { + i = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_LEGACY_NAPI_FREE_POS, + FXGMAC_FLAG_LEGACY_NAPI_FREE_LEN); + if (!i) { + if (add) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)) + netif_napi_add_weight(pdata->netdev, + &pdata->expansion.napi, + fxgmac_all_poll, + NAPI_POLL_WEIGHT); +#else + netif_napi_add(pdata->netdev, + &pdata->expansion.napi, + fxgmac_all_poll, + NAPI_POLL_WEIGHT); +#endif + } + + napi_enable(&pdata->expansion.napi); + pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( + pdata->expansion.int_flags, + FXGMAC_FLAG_LEGACY_NAPI_FREE_POS, + FXGMAC_FLAG_LEGACY_NAPI_FREE_LEN, 1); + } + } +} + +static void fxgmac_napi_disable(struct fxgmac_pdata *pdata, unsigned int del) +{ + struct fxgmac_channel *channel; + unsigned int i; + + if (pdata->per_channel_irq) { + channel = pdata->channel_head; + if (channel != NULL) { + for (i = 0; i < pdata->channel_count; i++, channel++) { + napi_disable(&channel->expansion.napi_rx); + + if (del) { + netif_napi_del( + &channel->expansion.napi_rx); + } + + if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i)) { + napi_disable( + &channel->expansion.napi_tx); + netif_napi_del( + &channel->expansion.napi_tx); + } + if (netif_msg_drv(pdata)) + DPRINTK("napi_disable, msix ch%d napi disabled done, del=%d\n", + i, del); + } + + napi_disable(&pdata->expansion.napi_phy); + netif_napi_del(&pdata->expansion.napi_phy); + } + } else { + i = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_LEGACY_NAPI_FREE_POS, + FXGMAC_FLAG_LEGACY_NAPI_FREE_LEN); + if (i) { + napi_disable(&pdata->expansion.napi); + + if (del) + netif_napi_del(&pdata->expansion.napi); + pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( + pdata->expansion.int_flags, + FXGMAC_FLAG_LEGACY_NAPI_FREE_POS, + FXGMAC_FLAG_LEGACY_NAPI_FREE_LEN, 0); + } + } +} + +static int fxgmac_request_irqs(struct fxgmac_pdata *pdata) +{ + struct net_device *netdev = pdata->netdev; + struct fxgmac_channel *channel; + unsigned int i; + int ret; + u32 msi, msix, need_free; + + msi = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_MSI_POS, FXGMAC_FLAG_MSI_LEN); + + msix = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_MSIX_POS, FXGMAC_FLAG_MSIX_LEN); + + need_free = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_LEGACY_IRQ_FREE_POS, + FXGMAC_FLAG_LEGACY_IRQ_FREE_LEN); + + if (!msix) { + if (!need_free) { + ret = devm_request_irq(pdata->dev, pdata->dev_irq, + fxgmac_isr, + msi ? 0 : IRQF_SHARED, + netdev->name, pdata); + if (ret) { + netdev_alert( + netdev, + "error requesting irq %d, ret = %d\n", + pdata->dev_irq, ret); + return ret; + } + + pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( + pdata->expansion.int_flags, + FXGMAC_FLAG_LEGACY_IRQ_FREE_POS, + FXGMAC_FLAG_LEGACY_IRQ_FREE_LEN, 1); + } + } + + if (!pdata->per_channel_irq) + return 0; + + ret = devm_request_irq(pdata->dev, pdata->expansion.phy_irq, + fxgmac_phy_isr, 0, netdev->name, pdata); + if (ret) { + netdev_alert(netdev, "error requesting phy irq %d, ret = %d\n", + pdata->expansion.phy_irq, ret); + return ret; + } + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + snprintf(channel->expansion.dma_irq_name, + sizeof(channel->expansion.dma_irq_name) - 1, + "%s-ch%d-Rx-%u", netdev_name(netdev), i, + channel->queue_index); + if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i)) { + snprintf(channel->expansion.dma_irq_name_tx, + sizeof(channel->expansion.dma_irq_name_tx) - 1, + "%s-ch%d-Tx-%u", netdev_name(netdev), i, + channel->queue_index); + + ret = devm_request_irq( + pdata->dev, channel->expansion.dma_irq_tx, + fxgmac_dma_isr, 0, + channel->expansion.dma_irq_name_tx, channel); + + if (ret) { + DPRINTK("fxgmac_req_irqs, err with MSIx irq request for ch %d tx, ret=%d\n", + i, ret); + /* Using an unsigned int, 'i' will go to UINT_MAX and exit */ + devm_free_irq(pdata->dev, + channel->expansion.dma_irq_tx, + channel); + return ret; + } + + if (netif_msg_drv(pdata)) + DPRINTK("fxgmac_req_irqs, MSIx irq_tx request ok, ch=%d, irq=%d,%s\n", + i, channel->expansion.dma_irq_tx, + channel->expansion.dma_irq_name_tx); + } + ret = devm_request_irq(pdata->dev, channel->dma_irq, + fxgmac_dma_isr, 0, + channel->expansion.dma_irq_name, + channel); + if (ret) { + netdev_alert(netdev, "error requesting irq %d\n", + channel->dma_irq); + goto err_irq; + } + } + + if (netif_msg_drv(pdata)) + DPRINTK("fxgmac_req_irqs, MSIx irq request ok, total=%d,%d~%d\n", + i, (pdata->channel_head)[0].dma_irq, + (pdata->channel_head)[i - 1].dma_irq); + return 0; + +err_irq: + DPRINTK("fxgmac_req_irqs, err with MSIx irq request at %d, ret=%d\n", i, + ret); + + if (pdata->per_channel_irq) { + for (i--, channel--; i < pdata->channel_count; i--, channel--) { + if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i)) { + devm_free_irq(pdata->dev, + channel->expansion.dma_irq_tx, + channel); + } + devm_free_irq(pdata->dev, channel->dma_irq, channel); + } + + devm_free_irq(pdata->dev, pdata->expansion.phy_irq, pdata); + } + return ret; +} + +static void fxgmac_free_irqs(struct fxgmac_pdata *pdata) +{ + struct fxgmac_channel *channel; + unsigned int i = 0; + u32 need_free, msix; + + msix = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_MSIX_POS, FXGMAC_FLAG_MSIX_LEN); + + need_free = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_LEGACY_IRQ_FREE_POS, + FXGMAC_FLAG_LEGACY_IRQ_FREE_LEN); + + if (!msix) { + if (need_free) { + devm_free_irq(pdata->dev, pdata->dev_irq, pdata); + pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( + pdata->expansion.int_flags, + FXGMAC_FLAG_LEGACY_IRQ_FREE_POS, + FXGMAC_FLAG_LEGACY_IRQ_FREE_LEN, 0); + } + } + + if (!pdata->per_channel_irq) + return; + + channel = pdata->channel_head; + if (channel != NULL) { + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i)) { + devm_free_irq(pdata->dev, + channel->expansion.dma_irq_tx, + channel); + if (netif_msg_drv(pdata)) + DPRINTK("fxgmac_free_irqs, MSIx irq_tx clear done, ch=%d\n", + i); + } + devm_free_irq(pdata->dev, channel->dma_irq, channel); + } + + devm_free_irq(pdata->dev, pdata->expansion.phy_irq, pdata); + } + if (netif_msg_drv(pdata)) + DPRINTK("fxgmac_free_irqs, MSIx rx irq clear done, total=%d\n", + i); +} + +void fxgmac_free_tx_data(struct fxgmac_pdata *pdata) +{ + struct fxgmac_desc_ops *desc_ops = &pdata->desc_ops; + struct fxgmac_desc_data *desc_data; + struct fxgmac_channel *channel; + struct fxgmac_ring *ring; + unsigned int i, j; + + channel = pdata->channel_head; + if (channel != NULL) { + for (i = 0; i < pdata->channel_count; i++, channel++) { + ring = channel->tx_ring; + if (!ring) + break; + + for (j = 0; j < ring->dma_desc_count; j++) { + desc_data = FXGMAC_GET_DESC_DATA(ring, j); + desc_ops->unmap_desc_data(pdata, desc_data); + } + } + } +} + +void fxgmac_free_rx_data(struct fxgmac_pdata *pdata) +{ + struct fxgmac_desc_ops *desc_ops = &pdata->desc_ops; + struct fxgmac_desc_data *desc_data; + struct fxgmac_channel *channel; + struct fxgmac_ring *ring; + unsigned int i, j; + + channel = pdata->channel_head; + if (channel != NULL) { + for (i = 0; i < pdata->channel_count; i++, channel++) { + ring = channel->rx_ring; + if (!ring) + break; + + for (j = 0; j < ring->dma_desc_count; j++) { + desc_data = FXGMAC_GET_DESC_DATA(ring, j); + desc_ops->unmap_desc_data(pdata, desc_data); + } + } + } +} + +/* + * since kernel does not clear the MSI mask bits and + * this function clear MSI mask bits when MSI is enabled. + */ +static int fxgmac_disable_pci_msi_config(struct pci_dev *pdev) +{ + u16 pcie_cap_offset; + u32 pcie_msi_mask_bits; + int ret = 0; + + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_MSI); + if (pcie_cap_offset) { + ret = pci_read_config_dword(pdev, pcie_cap_offset, + &pcie_msi_mask_bits); + if (ret) { + printk(KERN_ERR + "read pci config space MSI cap. failed, %d\n", + ret); + ret = -EFAULT; + } + } + + pcie_msi_mask_bits = FXGMAC_SET_REG_BITS(pcie_msi_mask_bits, + PCI_CAP_ID_MSI_ENABLE_POS, + PCI_CAP_ID_MSI_ENABLE_LEN, 0); + ret = pci_write_config_dword(pdev, pcie_cap_offset, pcie_msi_mask_bits); + if (ret) { + printk(KERN_ERR "write pci config space MSI mask failed, %d\n", + ret); + ret = -EFAULT; + } + + return ret; +} + +static int fxgmac_disable_pci_msix_config(struct pci_dev *pdev) +{ + u16 pcie_cap_offset; + u32 pcie_msi_mask_bits; + int ret = 0; + + pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_MSIX); + if (pcie_cap_offset) { + ret = pci_read_config_dword(pdev, pcie_cap_offset, + &pcie_msi_mask_bits); + if (ret) { + printk(KERN_ERR + "read pci config space MSIX cap. failed, %d\n", + ret); + ret = -EFAULT; + } + } + + pcie_msi_mask_bits = FXGMAC_SET_REG_BITS(pcie_msi_mask_bits, + PCI_CAP_ID_MSIX_ENABLE_POS, + PCI_CAP_ID_MSIX_ENABLE_LEN, 0); + ret = pci_write_config_dword(pdev, pcie_cap_offset, pcie_msi_mask_bits); + if (ret) { + printk(KERN_ERR "write pci config space MSIX mask failed, %d\n", + ret); + ret = -EFAULT; + } + + return ret; +} + +int fxgmac_start(struct fxgmac_pdata *pdata) +{ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + struct net_device *netdev = pdata->netdev; + int ret; + unsigned int pcie_low_power = 0; + u32 regval; + + if (netif_msg_drv(pdata)) + DPRINTK("fxgmac start callin here.\n"); + + /* must reset software again here, to avoid flushing tx queue error caused by the system only run probe + * when installing driver on the arm platform. + */ + hw_ops->exit(pdata); + + if (FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_LEGACY_POS, + FXGMAC_FLAG_LEGACY_LEN)) { + /* + * we should disable msi and msix here when we use legacy interrupt, for two reasons: + * 1. Exit will restore msi and msix config regisiter, that may enable them. + * 2. When the driver that uses the msix interrupt by default is compiled + * into the OS, uninstall the driver through rmmod, and then install the + * driver that uses the legacy interrupt, at which time the msix enable + * will be turned on again by default after waking up from S4 on some platform. + * such as UOS platform. + */ + ret = fxgmac_disable_pci_msi_config(pdata->pdev); + ret |= fxgmac_disable_pci_msix_config(pdata->pdev); + if (ret) + return ret; + } + + hw_ops->reset_phy(pdata); + hw_ops->release_phy(pdata); + hw_ops->pcie_init(pdata, pcie_low_power & PCIE_LP_ASPM_LTR, + pcie_low_power & PCIE_LP_ASPM_L1SS, + pcie_low_power & PCIE_LP_ASPM_L1, + pcie_low_power & PCIE_LP_ASPM_L0S); + hw_ops->config_power_up(pdata); + + fxgmac_dismiss_all_int(pdata); + + ret = hw_ops->init(pdata); + if (ret) { + printk("fxgmac hw init error.\n"); + return ret; + } + fxgmac_napi_enable(pdata, 1); + + ret = fxgmac_request_irqs(pdata); + if (ret) + goto err_napi; + + hw_ops->enable_tx(pdata); + hw_ops->enable_rx(pdata); + + /* config interrupt to level signal */ + regval = (u32)readl((const volatile void *)(pdata->mac_regs + DMA_MR)); + regval = FXGMAC_SET_REG_BITS(regval, DMA_MR_INTM_POS, DMA_MR_INTM_LEN, + 1); + regval = FXGMAC_SET_REG_BITS(regval, DMA_MR_QUREAD_POS, + DMA_MR_QUREAD_LEN, 1); + writel(regval, pdata->mac_regs + DMA_MR); + + writel(0xF0000000, + (volatile void *)(netdev->base_addr + MGMT_INT_CTRL0)); + + hw_ops->set_interrupt_moderation(pdata); + + if (pdata->per_channel_irq) + hw_ops->enable_msix_rxtxphyinterrupt(pdata); + + fxgmac_enable_rx_tx_ints(pdata); + + hw_ops->led_under_active(pdata); + + return 0; + +err_napi: + fxgmac_napi_disable(pdata, 1); + hw_ops->exit(pdata); + DPRINTK("fxgmac start callout with irq err.\n"); + return ret; +} + +void fxgmac_stop(struct fxgmac_pdata *pdata) +{ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + struct net_device *netdev = pdata->netdev; + struct fxgmac_channel *channel; + struct netdev_queue *txq; + unsigned int i; + + if (pdata->per_channel_irq) { + hw_ops->disable_msix_interrupt(pdata); + } else { + hw_ops->disable_mgm_interrupt(pdata); + } + + pdata->expansion.phy_link = false; + + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + hw_ops->disable_tx(pdata); + hw_ops->disable_rx(pdata); + fxgmac_free_irqs(pdata); + fxgmac_napi_disable(pdata, 1); + + channel = pdata->channel_head; + if (channel != NULL) { + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + continue; + + txq = netdev_get_tx_queue(netdev, channel->queue_index); + netdev_tx_reset_queue(txq); + } + } + + switch (pdata->expansion.current_state) { + case CURRENT_STATE_SUSPEND: + hw_ops->led_under_sleep(pdata); + break; + case CURRENT_STATE_SHUTDOWN: + case CURRENT_STATE_RESTART: + hw_ops->led_under_shutdown(pdata); + break; + case CURRENT_STATE_CLOSE: + break; + default: + break; + } +} + +void fxgmac_restart_dev(struct fxgmac_pdata *pdata) +{ + int ret; + + /* If not running, "restart" will happen on open */ + if (!netif_running(pdata->netdev)) + return; + + pdata->expansion.current_state = CURRENT_STATE_RESTART; + fxgmac_stop(pdata); + + fxgmac_free_tx_data(pdata); + fxgmac_free_rx_data(pdata); + + ret = fxgmac_start(pdata); + if (ret) { + printk("fxgmac_restart_dev: fxgmac_start failed.\n"); + } +} + +static void fxgmac_restart(struct work_struct *work) +{ + struct fxgmac_pdata *pdata = + container_of(work, struct fxgmac_pdata, expansion.restart_work); + + rtnl_lock(); + + fxgmac_restart_dev(pdata); + + rtnl_unlock(); +} + +void fxgmac_net_powerup(struct fxgmac_pdata *pdata) +{ + int ret; + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + if (netif_msg_drv(pdata)) + DPRINTK("fxgmac_net_powerup callin\n"); + + /* signal that we are up now */ + pdata->expansion.powerstate = 0; /* clear all bits as normal now */ + if (__test_and_set_bit(FXGMAC_POWER_STATE_UP, + &pdata->expansion.powerstate)) { + return; /* do nothing if already up */ + } + + ret = fxgmac_start(pdata); + if (ret) { + printk("fxgmac_net_powerup: fxgmac_start error\n"); + return; + } + + /* must call it after fxgmac_start, because it will be enable in fxgmac_start */ + hw_ops->disable_arp_offload(pdata); + + if (netif_msg_drv(pdata)) { + DPRINTK("fxgmac_net_powerup callout, powerstate=%ld.\n", + pdata->expansion.powerstate); + } +} + +void fxgmac_net_powerdown(struct fxgmac_pdata *pdata, unsigned int wol) +{ + struct net_device *netdev = pdata->netdev; + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + if (netif_msg_drv(pdata)) + DPRINTK("fxgmac_net_powerdown callin here.\n"); + + /* signal that we are down to the interrupt handler */ + if (__test_and_set_bit(FXGMAC_POWER_STATE_DOWN, + &pdata->expansion.powerstate)) + return; /* do nothing if already down */ + + if (netif_msg_drv(pdata)) + DPRINTK("fxgmac_net_powerdown continue with down process.\n"); + /* phy polling timer should detect the state of fxgmac and stop link status polling accordingly */ + + __clear_bit(FXGMAC_POWER_STATE_UP, &pdata->expansion.powerstate); + + /* Shut off incoming Tx traffic */ + netif_tx_stop_all_queues(netdev); + + /* call carrier off first to avoid false dev_watchdog timeouts */ + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + /* Disable Rx */ + hw_ops->disable_rx(pdata); + + /* synchronize_rcu() needed for pending XDP buffers to drain */ + synchronize_rcu(); + + fxgmac_stop(pdata); /* some works are redundent in this call */ + + /* must call it after software reset */ + hw_ops->pre_power_down(pdata, false); + + /* set mac to lowpower mode and enable wol accordingly */ + hw_ops->config_power_down(pdata, wol); + + /* handle vfs if it is envolved */ + + /* similar work as in restart() for that, we do need a resume laterly */ + fxgmac_free_tx_data(pdata); + fxgmac_free_rx_data(pdata); + + if (netif_msg_drv(pdata)) + DPRINTK("fxgmac_net_powerdown callout, powerstate=%ld.\n", + pdata->expansion.powerstate); +} + +static int fxgmac_open(struct net_device *netdev) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_desc_ops *desc_ops; + int ret; + + if (netif_msg_drv(pdata)) + DPRINTK("fxgmac_open callin\n"); + + desc_ops = &pdata->desc_ops; + + /* TODO: Initialize the phy */ + + /* Calculate the Rx buffer size before allocating rings */ + ret = fxgmac_calc_rx_buf_size(netdev, netdev->mtu); + if (ret < 0) + return ret; + pdata->rx_buf_size = ret; + + /* Allocate the channels and rings */ + ret = desc_ops->alloc_channles_and_rings(pdata); + if (ret) + return ret; + + INIT_WORK(&pdata->expansion.restart_work, fxgmac_restart); + + ret = fxgmac_start(pdata); + if (ret) + goto err_channels_and_rings; + + if (netif_msg_drv(pdata)) + DPRINTK("fxgmac_open callout\n"); + + return 0; + +err_channels_and_rings: + desc_ops->free_channels_and_rings(pdata); + DPRINTK("fxgmac_open callout with channel alloc err\n"); + return ret; +} + +static int fxgmac_close(struct net_device *netdev) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_desc_ops *desc_ops; + + if (netif_msg_drv(pdata)) + DPRINTK("fxgmac_close callin\n"); + + desc_ops = &pdata->desc_ops; + + pdata->expansion.current_state = + (pdata->expansion.current_state == CURRENT_STATE_SHUTDOWN) ? + pdata->expansion.current_state : + CURRENT_STATE_CLOSE; + + /* Stop the device */ + fxgmac_stop(pdata); + + /* Free the channels and rings */ + desc_ops->free_channels_and_rings(pdata); + + pdata->hw_ops.reset_phy(pdata); + + if (netif_msg_drv(pdata)) + DPRINTK("fxgmac_close callout\n"); + + return 0; +} + +#if ((LINUX_VERSION_CODE > KERNEL_VERSION(4, 0, 0)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0))) +static void fxgmac_tx_timeout(struct net_device *netdev) +#else +static void fxgmac_tx_timeout(struct net_device *netdev, unsigned int unused) +#endif +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + + netdev_warn(netdev, "tx timeout, device restarting\n"); +#if FXGMAC_TX_HANG_TIMER_EN + if (!pdata->tx_hang_restart_queuing) + schedule_work(&pdata->expansion.restart_work); +#else + schedule_work(&pdata->expansion.restart_work); +#endif +} + +static int fxgmac_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_pkt_info *tx_pkt_info; + struct fxgmac_desc_ops *desc_ops; + struct fxgmac_channel *channel; + struct fxgmac_hw_ops *hw_ops; + struct netdev_queue *txq; + struct fxgmac_ring *ring; + int ret; + + desc_ops = &pdata->desc_ops; + hw_ops = &pdata->hw_ops; + + if (netif_msg_tx_done(pdata)) + DPRINTK("xmit callin, skb->len=%d, q=%d\n", skb->len, + skb->queue_mapping); + + channel = pdata->channel_head + skb->queue_mapping; + txq = netdev_get_tx_queue(netdev, channel->queue_index); + ring = channel->tx_ring; + tx_pkt_info = &ring->pkt_info; + + if (skb->len == 0) { + netif_err(pdata, tx_err, netdev, + "empty skb received from stack\n"); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* Prepare preliminary packet info for TX */ + memset(tx_pkt_info, 0, sizeof(*tx_pkt_info)); + fxgmac_prep_tx_pkt(pdata, ring, skb, tx_pkt_info); + + /* Check that there are enough descriptors available */ + ret = fxgmac_maybe_stop_tx_queue(channel, ring, + tx_pkt_info->desc_count); + if (ret) { + return ret; + } + + ret = fxgmac_prep_tso(pdata, skb, tx_pkt_info); + if (ret) { + netif_err(pdata, tx_err, netdev, + "error processing TSO packet\n"); + DPRINTK("dev_xmit, tx err for TSO\n"); + dev_kfree_skb_any(skb); + return ret; + } + fxgmac_prep_vlan(skb, tx_pkt_info); + + if (!desc_ops->map_tx_skb(channel, skb)) { + dev_kfree_skb_any(skb); + DPRINTK("xmit, map tx skb err\n"); + return NETDEV_TX_OK; + } + + /* Report on the actual number of bytes (to be) sent */ + netdev_tx_sent_queue(txq, tx_pkt_info->tx_bytes); + if (netif_msg_tx_done(pdata)) + DPRINTK("xmit, before hw_xmit, byte len=%d\n", + tx_pkt_info->tx_bytes); + + /* Configure required descriptor fields for transmission */ + hw_ops->dev_xmit(channel); +#if FXGMAC_DUMMY_TX_DEBUG + DPRINTK("tx hw_ops->dev_xmit ok\n"); +#endif + if (netif_msg_pktdata(pdata)) + fxgmac_dbg_pkt(netdev, skb, true); + + /* Stop the queue in advance if there may not be enough descriptors */ + fxgmac_maybe_stop_tx_queue(channel, ring, FXGMAC_TX_MAX_DESC_NR); + + return NETDEV_TX_OK; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) +static void fxgmac_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *s) +#else +static struct rtnl_link_stats64 *fxgmac_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *s) +#endif +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_stats *pstats = &pdata->stats; + +#if FXGMAC_PM_FEATURE_ENABLED + /* 20210709 for net power down */ + if (!test_bit(FXGMAC_POWER_STATE_DOWN, &pdata->expansion.powerstate)) +#endif + { + pdata->hw_ops.read_mmc_stats(pdata); + } + s->rx_packets = pstats->rxframecount_gb; + s->rx_bytes = pstats->rxoctetcount_gb; + s->rx_errors = pstats->rxframecount_gb - pstats->rxbroadcastframes_g - + pstats->rxmulticastframes_g - pstats->rxunicastframes_g; + s->multicast = pstats->rxmulticastframes_g; + s->rx_length_errors = pstats->rxlengtherror; + s->rx_crc_errors = pstats->rxcrcerror; + s->rx_fifo_errors = pstats->rxfifooverflow; + + s->tx_packets = pstats->txframecount_gb; + s->tx_bytes = pstats->txoctetcount_gb; + s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g; + s->tx_dropped = netdev->stats.tx_dropped; + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) + return s; +#endif +} + +static int fxgmac_set_mac_address(struct net_device *netdev, void *addr) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + struct sockaddr *saddr = addr; + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) + eth_hw_addr_set(netdev, saddr->sa_data); +#else + memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len); +#endif + memcpy(pdata->mac_addr, saddr->sa_data, netdev->addr_len); + + hw_ops->set_mac_address(pdata, saddr->sa_data); + hw_ops->set_mac_hash(pdata); + + DPRINTK("fxgmac, set mac addr to %02x:%02x:%02x:%02x:%02x:%02x\n", + netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], + netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]); + return 0; +} + +/* cmd = [0x89F0, 0x89FF] */ +static int fxgmac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct file f; + int ret = FXGMAC_SUCCESS; + struct fxgmac_pdata *pdata = netdev_priv(netdev); + + if (!netif_running(netdev)) + return -ENODEV; + + f.private_data = pdata; + + switch (cmd) { + case FXGMAC_DEV_CMD: + ret = fxgmac_dbg_netdev_ops_ioctl( + &f, FXGMAC_IOCTL_DFS_COMMAND, + (unsigned long)(ifr->ifr_data)); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) +static int fxgmac_siocdevprivate(struct net_device *dev, struct ifreq *ifr, + void __user *data, int cmd) +{ + return fxgmac_ioctl(dev, ifr, cmd); +} +#endif + +static int fxgmac_change_mtu(struct net_device *netdev, int mtu) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + int ret; +#ifdef FXGMAC_DEBUG + int old_mtu = netdev->mtu; +#endif + + fxgmac_stop(pdata); + fxgmac_free_tx_data(pdata); + + /* We must unmap rx desc's dma before we change rx_buf_size. */ + /* Becaues the size of the unmapped DMA is set according to rx_buf_size */ + fxgmac_free_rx_data(pdata); + + pdata->jumbo = mtu > ETH_DATA_LEN ? 1 : 0; + + ret = fxgmac_calc_rx_buf_size(netdev, mtu); + if (ret < 0) + return ret; + + pdata->rx_buf_size = ret; + netdev->mtu = mtu; + + if (netif_running(netdev)) + fxgmac_start(pdata); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + DPRINTK("fxgmac, set MTU from %d to %d. min, max=(%d,%d)\n", old_mtu, + netdev->mtu, netdev->min_mtu, netdev->max_mtu); +#else + DPRINTK("fxgmac, set MTU from %d to %d.\n", old_mtu, netdev->mtu); +#endif + + return 0; +} + +static int fxgmac_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, + u16 vid) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + set_bit(vid, pdata->active_vlans); +#if FXGMAC_FILTER_SINGLE_VLAN_ENABLED + pdata->vlan = vid; + hw_ops->enable_rx_vlan_filtering(pdata); +#else + hw_ops->update_vlan_hash_table(pdata); +#endif + DPRINTK("fxgmac, add rx vlan %d\n", vid); + return 0; +} + +static int fxgmac_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, + u16 vid) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + clear_bit(vid, pdata->active_vlans); +#if FXGMAC_FILTER_SINGLE_VLAN_ENABLED + pdata->vlan = 0; + hw_ops->disable_rx_vlan_filtering(pdata); +#else + hw_ops->update_vlan_hash_table(pdata); +#endif + + DPRINTK("fxgmac, del rx vlan %d\n", vid); + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void fxgmac_poll_controller(struct net_device *netdev) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_channel *channel; + unsigned int i; + + if (pdata->per_channel_irq) { + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) + fxgmac_dma_isr(channel->dma_irq, channel); + } else { + disable_irq(pdata->dev_irq); + fxgmac_isr(pdata->dev_irq, pdata); + enable_irq(pdata->dev_irq); + } +} +#endif /* CONFIG_NET_POLL_CONTROLLER */ + +static int fxgmac_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter, tso; + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + int ret = 0; + + rxhash = pdata->expansion.netdev_features & NETIF_F_RXHASH; + rxcsum = pdata->expansion.netdev_features & NETIF_F_RXCSUM; + rxvlan = pdata->expansion.netdev_features & NETIF_F_HW_VLAN_CTAG_RX; + rxvlan_filter = pdata->expansion.netdev_features & + NETIF_F_HW_VLAN_CTAG_FILTER; + tso = pdata->expansion.netdev_features & (NETIF_F_TSO | NETIF_F_TSO6); + + if ((features & (NETIF_F_TSO | NETIF_F_TSO6)) && !tso) { + printk("enable tso.\n"); + pdata->hw_feat.tso = 1; + hw_ops->config_tso(pdata); + } else if (!(features & (NETIF_F_TSO | NETIF_F_TSO6)) && tso) { + printk("disable tso.\n"); + pdata->hw_feat.tso = 0; + hw_ops->config_tso(pdata); + } + + if ((features & NETIF_F_RXHASH) && !rxhash) + ret = hw_ops->enable_rss(pdata); + else if (!(features & NETIF_F_RXHASH) && rxhash) + ret = hw_ops->disable_rss(pdata); + if (ret) + return ret; + + if ((features & NETIF_F_RXCSUM) && !rxcsum) + hw_ops->enable_rx_csum(pdata); + else if (!(features & NETIF_F_RXCSUM) && rxcsum) + hw_ops->disable_rx_csum(pdata); + + if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan) + hw_ops->enable_rx_vlan_stripping(pdata); + else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan) + hw_ops->disable_rx_vlan_stripping(pdata); + + if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter) + hw_ops->enable_rx_vlan_filtering(pdata); + else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter) + hw_ops->disable_rx_vlan_filtering(pdata); + + pdata->expansion.netdev_features = features; + + DPRINTK("fxgmac, set features done,%llx\n", (u64)features); + return 0; +} + +static void fxgmac_set_rx_mode(struct net_device *netdev) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + hw_ops->config_rx_mode(pdata); +} + +static const struct net_device_ops fxgmac_netdev_ops = { + .ndo_open = fxgmac_open, + .ndo_stop = fxgmac_close, + .ndo_start_xmit = fxgmac_xmit, + .ndo_tx_timeout = fxgmac_tx_timeout, + .ndo_get_stats64 = fxgmac_get_stats64, + .ndo_change_mtu = fxgmac_change_mtu, + .ndo_set_mac_address = fxgmac_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = fxgmac_ioctl, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) + .ndo_siocdevprivate = fxgmac_siocdevprivate, +#endif + .ndo_vlan_rx_add_vid = fxgmac_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = fxgmac_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = fxgmac_poll_controller, +#endif + .ndo_set_features = fxgmac_set_features, + .ndo_set_rx_mode = fxgmac_set_rx_mode, +}; + +const struct net_device_ops *fxgmac_get_netdev_ops(void) +{ + return &fxgmac_netdev_ops; +} + +static void fxgmac_rx_refresh(struct fxgmac_channel *channel) +{ + struct fxgmac_pdata *pdata = channel->pdata; + struct fxgmac_ring *ring = channel->rx_ring; + struct fxgmac_desc_data *desc_data; + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + while (ring->dirty != ring->cur) { + desc_data = FXGMAC_GET_DESC_DATA(ring, ring->dirty); + hw_ops->rx_desc_reset(pdata, desc_data, ring->dirty); + ring->dirty = + FXGMAC_GET_ENTRY(ring->dirty, ring->dma_desc_count); + } + + /* Make sure everything is written before the register write */ + wmb(); + + /* Update the Rx Tail Pointer Register with address of + * the last cleaned entry + */ + desc_data = FXGMAC_GET_DESC_DATA( + ring, (ring->dirty - 1) & (ring->dma_desc_count - 1)); + writel(lower_32_bits(desc_data->dma_desc_addr), + FXGMAC_DMA_REG(channel, DMA_CH_RDTR_LO)); +} + +static struct sk_buff *fxgmac_create_skb(struct fxgmac_pdata *pdata, + struct napi_struct *napi, + struct fxgmac_desc_data *desc_data, + unsigned int len) +{ + struct sk_buff *skb; + skb = __netdev_alloc_skb_ip_align(pdata->netdev, len, GFP_ATOMIC); + if (!skb) { + netdev_err(pdata->netdev, "%s: Rx init fails; skb is NULL\n", + __func__); + return NULL; + } + + dma_sync_single_for_cpu(pdata->dev, desc_data->rx.buf.dma_base, len, + DMA_FROM_DEVICE); + skb_copy_to_linear_data(skb, desc_data->skb->data, len); + skb_put(skb, len); + dma_sync_single_for_device(pdata->dev, desc_data->rx.buf.dma_base, len, + DMA_FROM_DEVICE); + + return skb; +} + +static int fxgmac_tx_poll(struct fxgmac_channel *channel) +{ + struct fxgmac_pdata *pdata = channel->pdata; + struct fxgmac_ring *ring = channel->tx_ring; + struct net_device *netdev = pdata->netdev; + unsigned int tx_packets = 0, tx_bytes = 0; + struct fxgmac_desc_data *desc_data; + struct fxgmac_dma_desc *dma_desc; + struct fxgmac_desc_ops *desc_ops; + struct fxgmac_hw_ops *hw_ops; + struct netdev_queue *txq; + int processed = 0; + unsigned int cur; + + static int fxgmac_restart_need; + static u32 change_cnt; + static u32 reg_cur_pre = 0xffffffff; + +#if FXGMAC_TX_HANG_TIMER_EN + static u32 reg_cur; +#endif + + desc_ops = &pdata->desc_ops; + hw_ops = &pdata->hw_ops; + + /* Nothing to do if there isn't a Tx ring for this channel */ + if (!ring) { + if (netif_msg_tx_done(pdata) && + (channel->queue_index < pdata->tx_q_count)) + DPRINTK("tx_poll, null point to ring %d\n", + channel->queue_index); + return 0; + } + if ((ring->cur != ring->dirty) && (netif_msg_tx_done(pdata))) + DPRINTK("tx_poll callin, ring_cur=%d, ring_dirty=%d, qIdx=%d\n", + ring->cur, ring->dirty, channel->queue_index); + + cur = ring->cur; + + /* Be sure we get ring->cur before accessing descriptor data */ + smp_rmb(); + + txq = netdev_get_tx_queue(netdev, channel->queue_index); + + while (ring->dirty != cur) { + desc_data = FXGMAC_GET_DESC_DATA(ring, ring->dirty); + dma_desc = desc_data->dma_desc; + + if (!hw_ops->tx_complete(dma_desc)) { +#if FXGMAC_TRIGGER_TX_HANG + struct net_device *netdev = pdata->netdev; +#define FXGMAC_HANG_THRESHOLD 1 + reg_cur = readl(FXGMAC_DMA_REG( + channel, 0x44 /* tx desc curr pointer reg */)); + + if (reg_cur != reg_cur_pre) { + reg_cur_pre = reg_cur; + change_cnt = 0; + } else { + change_cnt++; + } + + if (change_cnt > 2) { + DPRINTK("after complete check, cur=%d, dirty=%d, qIdx=%d, hw desc cur=%#x, pre=%#x\n", + ring->cur, ring->dirty, + channel->queue_index, reg_cur, + reg_cur_pre); + + if ((ring->cur > ring->dirty) && + ((ring->cur - ring->dirty) > + FXGMAC_HANG_THRESHOLD)) { + DPRINTK("after complete check warning..., too many TBD occupied by HW, 0xdbbb, %d.\n", + (ring->cur - ring->dirty)); + (*((u32 *)(netdev->base_addr + + 0x1000))) = 0xdbbb; + + if (!fxgmac_restart_need) { + schedule_work( + &pdata->expansion + .restart_work); + fxgmac_restart_need = 1; + change_cnt = 0; + } + } else if ((ring->cur < ring->dirty) && + ((ring->cur + (ring->dma_desc_count - + ring->dirty)) > + FXGMAC_HANG_THRESHOLD)) { + DPRINTK("after complete check warning..., too many TBD occupied by HW, 0xdb00, %d.\n", + (ring->cur + + (ring->dma_desc_count - + ring->dirty))); + (*((u32 *)(netdev->base_addr + + 0x1000))) = 0xdb00; + + if (!fxgmac_restart_need) { + schedule_work( + &pdata->expansion + .restart_work); + fxgmac_restart_need = 1; + change_cnt = 0; + } + } + } +#endif +#if FXGMAC_TX_HANG_TIMER_EN + if ((!pdata->tx_hang_restart_queuing) && + (!channel->expansion.tx_hang_timer_active)) { + reg_cur = ring->dirty; + if (reg_cur_pre != reg_cur) { + reg_cur_pre = reg_cur; + change_cnt = 0; + } else { + change_cnt++; + } + + if (change_cnt > 4) { +#if FXGMAC_TX_HANG_CHECH_DIRTY + channel->expansion.tx_hang_hw_cur = + ring->dirty; +#else + channel->expansion + .tx_hang_hw_cur = readl(FXGMAC_DMA_REG( + channel, + 0x44 /* tx desc curr pointer reg */)); +#endif + /* double check for race conditione */ + if ((!pdata->tx_hang_restart_queuing) && + (!channel->expansion + .tx_hang_timer_active)) { + DPRINTK("tx_hang polling: start timer at desc %u, timer act=%u, queuing=%u, qidx=%u.\n", + reg_cur, + channel->expansion + .tx_hang_timer_active, + pdata->tx_hang_restart_queuing, + channel->queue_index); + fxgmac_tx_hang_timer_start( + channel); + } + } + } +#endif + + break; + } + + reg_cur_pre = 0xffffffff; + fxgmac_restart_need = 0; + change_cnt = 0; + + /* Make sure descriptor fields are read after reading + * the OWN bit + */ + dma_rmb(); + + if (netif_msg_tx_done(pdata)) + fxgmac_dump_tx_desc(pdata, ring, ring->dirty, 1, 0); + + if (hw_ops->is_last_desc(dma_desc)) { + tx_packets += desc_data->tx.packets; + tx_bytes += desc_data->tx.bytes; + } + + /* Free the SKB and reset the descriptor for re-use */ + desc_ops->unmap_desc_data(pdata, desc_data); + hw_ops->tx_desc_reset(desc_data); + + processed++; + ring->dirty = + FXGMAC_GET_ENTRY(ring->dirty, ring->dma_desc_count); + } + + if (!processed) + return 0; + + netdev_tx_completed_queue(txq, tx_packets, tx_bytes); + + if ((ring->tx.queue_stopped == 1) && + (fxgmac_tx_avail_desc(ring) > FXGMAC_TX_DESC_MIN_FREE)) { + ring->tx.queue_stopped = 0; + netif_tx_wake_queue(txq); + } + + if (netif_msg_tx_done(pdata)) { + DPRINTK("tx_poll callout, processed=%d\n", processed); + } + + return processed; +} + +static int fxgmac_rx_poll(struct fxgmac_channel *channel, int budget) +{ + struct fxgmac_pdata *pdata = channel->pdata; + struct fxgmac_ring *ring = channel->rx_ring; + struct net_device *netdev = pdata->netdev; + unsigned int len; + unsigned int context_next, context; + struct fxgmac_desc_data *desc_data; + struct fxgmac_pkt_info *pkt_info; + unsigned int incomplete; + struct fxgmac_hw_ops *hw_ops; + struct napi_struct *napi; + struct sk_buff *skb; + int packet_count = 0; + u32 ipce, iphe; + + hw_ops = &pdata->hw_ops; + + /* Nothing to do if there isn't a Rx ring for this channel */ + if (!ring) + return 0; + + incomplete = 0; + context_next = 0; + + napi = (pdata->per_channel_irq) ? &channel->expansion.napi_rx : + &pdata->expansion.napi; + + desc_data = FXGMAC_GET_DESC_DATA(ring, ring->cur); + pkt_info = &ring->pkt_info; + + while (packet_count < budget) { + memset(pkt_info, 0, sizeof(*pkt_info)); + skb = NULL; + len = 0; + +read_again: + desc_data = FXGMAC_GET_DESC_DATA(ring, ring->cur); + + if (fxgmac_rx_dirty_desc(ring) > FXGMAC_RX_DESC_MAX_DIRTY) + fxgmac_rx_refresh(channel); + + if (hw_ops->dev_read(channel)) + break; + + ring->cur = FXGMAC_GET_ENTRY(ring->cur, ring->dma_desc_count); + + incomplete = FXGMAC_GET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_INCOMPLETE_POS, + RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN); + context_next = FXGMAC_GET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS, + RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN); + context = FXGMAC_GET_REG_BITS(pkt_info->attributes, + RX_PACKET_ATTRIBUTES_CONTEXT_POS, + RX_PACKET_ATTRIBUTES_CONTEXT_LEN); + + if (incomplete || context_next) + goto read_again; + + if (pkt_info->errors) { + netif_err(pdata, rx_err, netdev, + "error in received packet\n"); + dev_kfree_skb(skb); + goto next_packet; + } + + if (!context) { + len = desc_data->rx.len; + if (len > pdata->rx_buf_size) { + if (net_ratelimit()) + netdev_err( + pdata->netdev, + "len %d larger than size (%d)\n", + len, pdata->rx_buf_size); + pdata->netdev->stats.rx_dropped++; + goto next_packet; + } + + if (len == 0) { + if (net_ratelimit()) + netdev_err( + pdata->netdev, + "A packet of length 0 was received\n"); + pdata->netdev->stats.rx_length_errors++; + goto next_packet; + } + + if (len && !skb) { + skb = fxgmac_create_skb(pdata, napi, desc_data, + len); + if (unlikely(!skb)) { + if (net_ratelimit()) + netdev_warn( + pdata->netdev, + "create skb failed\n"); + goto next_packet; + } + } + } + + if (!skb) + goto next_packet; + + if (netif_msg_pktdata(pdata)) + fxgmac_print_pkt(netdev, skb, false); + + skb_checksum_none_assert(skb); + if (netdev->features & NETIF_F_RXCSUM) { + ipce = FXGMAC_GET_REG_BITS_LE( + desc_data->dma_desc->desc1, + RX_NORMAL_DESC1_WB_IPCE_POS, + RX_NORMAL_DESC1_WB_IPCE_LEN); + iphe = FXGMAC_GET_REG_BITS_LE( + desc_data->dma_desc->desc1, + RX_NORMAL_DESC1_WB_IPHE_POS, + RX_NORMAL_DESC1_WB_IPHE_LEN); + /* if csum error, let the stack verify checksum errors.otherwise don't verify */ + if (!ipce && !iphe && + FXGMAC_GET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_CSUM_DONE_POS, + RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + + if (FXGMAC_GET_REG_BITS(pkt_info->attributes, + RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, + RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN)) { + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + pkt_info->vlan_ctag); + pdata->stats.rx_vlan_packets++; + } + + if (FXGMAC_GET_REG_BITS(pkt_info->attributes, + RX_PACKET_ATTRIBUTES_RSS_HASH_POS, + RX_PACKET_ATTRIBUTES_RSS_HASH_LEN)) + skb_set_hash(skb, pkt_info->rss_hash, + pkt_info->rss_hash_type); + + skb->dev = netdev; + skb->protocol = eth_type_trans(skb, netdev); + skb_record_rx_queue(skb, channel->queue_index); + + if (pdata->expansion.fxgmac_test_tso_flag) { + /* tso test */ + if (pdata->expansion.fxgmac_test_tso_seg_num == 1) { + /* last segment */ + if (pdata->expansion.fxgmac_test_last_tso_len == + skb->len + FXGMAC_TEST_MAC_HEAD_LEN) { + /* receive last segment, reset flag */ + pdata->expansion.fxgmac_test_tso_flag = + false; + pdata->expansion + .fxgmac_test_tso_seg_num = 0; + pdata->expansion.fxgmac_test_packet_len = + 0; + pdata->expansion + .fxgmac_test_last_tso_len = 0; + + /* process packet */ + if ((pdata->expansion + .fxgmac_test_skb_arr_in_index + + 1) % FXGMAC_MAX_DBG_TEST_PKT != + pdata->expansion + .fxgmac_test_skb_arr_out_index) { + struct sk_buff *tmpskb = + skb_copy(skb, + GFP_ATOMIC); + skb_push( + tmpskb, + FXGMAC_TEST_MAC_HEAD_LEN); + + pdata->expansion.fxgmac_test_skb_array + [pdata->expansion + .fxgmac_test_skb_arr_in_index] = + tmpskb; + pdata->expansion + .fxgmac_test_skb_arr_in_index = + (pdata->expansion + .fxgmac_test_skb_arr_in_index + + 1) % + FXGMAC_MAX_DBG_TEST_PKT; + } else { + DPRINTK("loopback test buffer is full."); + } + } + } else { /* non last segment */ + if (pdata->expansion.fxgmac_test_packet_len == + skb->len + FXGMAC_TEST_MAC_HEAD_LEN) { + /* receive a segment */ + pdata->expansion + .fxgmac_test_tso_seg_num--; + + /* process packet */ + if ((pdata->expansion + .fxgmac_test_skb_arr_in_index + + 1) % FXGMAC_MAX_DBG_TEST_PKT != + pdata->expansion + .fxgmac_test_skb_arr_out_index) { + struct sk_buff *tmpskb = + skb_copy(skb, + GFP_ATOMIC); + skb_push( + tmpskb, + FXGMAC_TEST_MAC_HEAD_LEN); + + pdata->expansion.fxgmac_test_skb_array + [pdata->expansion + .fxgmac_test_skb_arr_in_index] = + tmpskb; + pdata->expansion + .fxgmac_test_skb_arr_in_index = + (pdata->expansion + .fxgmac_test_skb_arr_in_index + + 1) % + FXGMAC_MAX_DBG_TEST_PKT; + } else { + DPRINTK("loopback test buffer is full."); + } + } + } + } else if (pdata->expansion.fxgmac_test_packet_len != 0) { + /* xsum and phy loopback test */ + if (pdata->expansion.fxgmac_test_packet_len == + skb->len + FXGMAC_TEST_MAC_HEAD_LEN) { + /* reset fxg_packet_len */ + pdata->expansion.fxgmac_test_packet_len = 0; + + if ((pdata->expansion + .fxgmac_test_skb_arr_in_index + + 1) % FXGMAC_MAX_DBG_TEST_PKT != + pdata->expansion + .fxgmac_test_skb_arr_out_index) { + struct sk_buff *tmpskb = + skb_copy(skb, GFP_ATOMIC); + skb_push(tmpskb, + FXGMAC_TEST_MAC_HEAD_LEN); + + pdata->expansion.fxgmac_test_skb_array + [pdata->expansion + .fxgmac_test_skb_arr_in_index] = + tmpskb; + pdata->expansion + .fxgmac_test_skb_arr_in_index = + (pdata->expansion + .fxgmac_test_skb_arr_in_index + + 1) % + FXGMAC_MAX_DBG_TEST_PKT; + } else { + DPRINTK("loopback test buffer is full."); + } + } + } + napi_gro_receive(napi, skb); + +next_packet: + packet_count++; + + pdata->netdev->stats.rx_packets++; + pdata->netdev->stats.rx_bytes += len; + } + + fxgmac_rx_refresh(channel); + + return packet_count; +} + +static int fxgmac_one_poll_tx(struct napi_struct *napi, int budget) +{ + struct fxgmac_channel *channel = + container_of(napi, struct fxgmac_channel, expansion.napi_tx); + int ret = 0; + struct fxgmac_pdata *pdata = channel->pdata; + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + ret = fxgmac_tx_poll(channel); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + if (napi_complete_done(napi, 0)) { + hw_ops->enable_msix_one_interrupt(pdata, MSI_ID_TXQ0); + } +#else + napi_complete(napi); + hw_ops->enable_msix_one_interrupt(pdata, MSI_ID_TXQ0); +#endif + return 0; +} + +static int fxgmac_one_poll_rx(struct napi_struct *napi, int budget) +{ + struct fxgmac_channel *channel = + container_of(napi, struct fxgmac_channel, expansion.napi_rx); + int processed = 0; + struct fxgmac_pdata *pdata = channel->pdata; + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + processed = fxgmac_rx_poll(channel, budget); + if (processed < budget) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + /* if there no interrupt occured when this interrupt running, struct napi's state is NAPIF_STATE_SCHED, + * napi_complete_done return true and we can enable irq, it will not cause unbalanced iqr issure. + * if there more interrupt occured when this interrupt running, struct napi's state is NAPIF_STATE_SCHED | NAPIF_STATE_MISSED + * because napi_schedule_prep will make it. At this time napi_complete_done will return false and + * schedule poll again because of NAPIF_STATE_MISSED, it will cause unbalanced irq issure. + */ + if (napi_complete_done(napi, processed)) { + hw_ops->enable_msix_one_interrupt(pdata, + channel->queue_index); + } +#else + napi_complete(napi); + hw_ops->enable_msix_one_interrupt(pdata, channel->queue_index); +#endif + } + + return processed; +} + +static int fxgmac_all_poll(struct napi_struct *napi, int budget) +{ + struct fxgmac_pdata *pdata = + container_of(napi, struct fxgmac_pdata, expansion.napi); + struct fxgmac_channel *channel; + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + int processed; + unsigned int i; + + if (netif_msg_rx_status(pdata)) { + DPRINTK("rx all_poll callin budget=%d\n", budget); + } + + processed = 0; + do { + channel = pdata->channel_head; + /* Cleanup Tx ring first */ + /*since only 1 tx channel supported in this version, poll ch 0 always. */ + fxgmac_tx_poll(pdata->channel_head + 0); + for (i = 0; i < pdata->channel_count; i++, channel++) { + processed += fxgmac_rx_poll(channel, budget); + } + } while (false); + + /* for phy, we needn't to process any packet, so processed will be 0 */ + if (pdata->expansion.mgm_intctrl_val & MGMT_INT_CTRL0_INT_STATUS_PHY) { + fxgmac_phy_process(pdata); + pdata->expansion.mgm_intctrl_val &= + ~MGMT_INT_CTRL0_INT_STATUS_PHY; + } + + /* If we processed everything, we are done */ + if (processed < budget) { + /* Turn off polling */ + if (napi_complete_done(napi, processed)) + hw_ops->enable_mgm_interrupt(pdata); + } + + if ((processed) && (netif_msg_rx_status(pdata))) { + DPRINTK("rx all_poll callout received = %d\n", processed); + } + + return processed; +} diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-pci.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-pci.c new file mode 100644 index 000000000000..f6f8f4f6a5e9 --- /dev/null +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-pci.c @@ -0,0 +1,250 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Motorcomm Corporation. */ + +#include +#include +#include + +/* for file operation */ +#include + +#include "fuxi-gmac.h" +#include "fuxi-gmac-reg.h" + +#define FXGMAC_DBG 0 + +/* declarations */ +static void fxgmac_shutdown(struct pci_dev *pdev); + +static int fxgmac_probe(struct pci_dev *pcidev, const struct pci_device_id *id) +{ + struct device *dev = &pcidev->dev; + struct fxgmac_resources res; + int i, ret; + + ret = pcim_enable_device(pcidev); + if (ret) { + dev_err(dev, "ERROR: fxgmac_probe failed to enable device\n"); + return ret; + } + + for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { + if (pci_resource_len(pcidev, i) == 0) + continue; + ret = pcim_iomap_regions(pcidev, BIT(i), FXGMAC_DRV_NAME); + if (ret) + goto err_disable_device; + break; + } + + pci_set_master(pcidev); + + memset(&res, 0, sizeof(res)); + res.irq = pcidev->irq; + res.addr = pcim_iomap_table(pcidev)[i]; + + ret = fxgmac_drv_probe(&pcidev->dev, &res); + if (ret) + goto err_disable_device; + + return ret; + +err_disable_device: + pci_disable_device(pcidev); + return ret; +} + +static void fxgmac_remove(struct pci_dev *pcidev) +{ + struct net_device *netdev = dev_get_drvdata(&pcidev->dev); + struct fxgmac_pdata *pdata = netdev_priv(netdev); + +#ifdef CONFIG_PCI_MSI + u32 msix = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_MSIX_POS, + FXGMAC_FLAG_MSIX_LEN); +#endif + + fxgmac_drv_remove(&pcidev->dev); +#ifdef CONFIG_PCI_MSI + if (msix) { + pci_disable_msix(pcidev); + kfree(pdata->expansion.msix_entries); + pdata->expansion.msix_entries = NULL; + } +#endif + +#ifdef HAVE_FXGMAC_DEBUG_FS + fxgmac_dbg_exit(pdata); +#endif /* HAVE_FXGMAC_DEBUG_FS */ +} + +/* for Power management, 20210628 */ +static int __fxgmac_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct net_device *netdev = dev_get_drvdata(&pdev->dev); + struct fxgmac_pdata *pdata = netdev_priv(netdev); + u32 wufc = pdata->expansion.wol; +#ifdef CONFIG_PM + int retval = 0; +#endif + + DPRINTK("fxpm,_fxgmac_shutdown, callin\n"); + + rtnl_lock(); + + /* for linux shutdown, we just treat it as power off wol can be ignored + * for suspend, we do need recovery by wol + */ + fxgmac_net_powerdown(pdata, (unsigned int)!!wufc); + netif_device_detach(netdev); + rtnl_unlock(); + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) { + DPRINTK("fxpm,_fxgmac_shutdown, save pci state failed.\n"); + return retval; + } +#endif + + DPRINTK("fxpm,_fxgmac_shutdown, save pci state done.\n"); + + pci_wake_from_d3(pdev, !!wufc); + *enable_wake = !!wufc; + + pci_disable_device(pdev); + + DPRINTK("fxpm,_fxgmac_shutdown callout, enable wake=%d.\n", + *enable_wake); + + return 0; +} + +static void fxgmac_shutdown(struct pci_dev *pdev) +{ + bool wake; + struct net_device *netdev = dev_get_drvdata(&pdev->dev); + struct fxgmac_pdata *pdata = netdev_priv(netdev); + + DPRINTK("fxpm, fxgmac_shutdown callin\n"); + + pdata->expansion.current_state = CURRENT_STATE_SHUTDOWN; + __fxgmac_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } + DPRINTK("fxpm, fxgmac_shutdown callout, system power off=%d\n", + (system_state == SYSTEM_POWER_OFF) ? 1 : 0); +} + +#ifdef CONFIG_PM +/* yzhang, 20210628 for PM */ +static int fxgmac_suspend(struct pci_dev *pdev, + pm_message_t __always_unused state) +{ + int retval; + bool wake; + struct net_device *netdev = dev_get_drvdata(&pdev->dev); + struct fxgmac_pdata *pdata = netdev_priv(netdev); + + DPRINTK("fxpm, fxgmac_suspend callin\n"); + + pdata->expansion.current_state = CURRENT_STATE_SUSPEND; + + if (netif_running(netdev)) { + retval = __fxgmac_shutdown(pdev, &wake); + if (retval) + return retval; + } else { + wake = !!(pdata->expansion.wol); + } + + if (wake) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + + DPRINTK("fxpm, fxgmac_suspend callout to %s\n", + wake ? "sleep" : "D3hot"); + + return 0; +} + +static int fxgmac_resume(struct pci_dev *pdev) +{ + struct fxgmac_pdata *pdata; + struct net_device *netdev; + u32 err; + + DPRINTK("fxpm, fxgmac_resume callin\n"); + + netdev = dev_get_drvdata(&pdev->dev); + pdata = netdev_priv(netdev); + + pdata->expansion.current_state = CURRENT_STATE_RESUME; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + /* + * pci_restore_state clears dev->state_saved so call + * pci_save_state to restore it. + */ + pci_save_state(pdev); + + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(pdata->dev, + "fxgmac_resume, failed to enable PCI device from suspend\n"); + return err; + } + smp_mb__before_atomic(); + __clear_bit(FXGMAC_POWER_STATE_DOWN, &pdata->expansion.powerstate); + pci_set_master(pdev); + + pci_wake_from_d3(pdev, false); + + rtnl_lock(); + err = 0; + if (!err && netif_running(netdev)) + fxgmac_net_powerup(pdata); + + if (!err) + netif_device_attach(netdev); + + rtnl_unlock(); + + DPRINTK("fxpm, fxgmac_resume callout\n"); + + return err; +} +#endif + +static const struct pci_device_id fxgmac_pci_tbl[] = { { PCI_DEVICE(0x1f0a, + 0x6801) }, + { 0 } }; +MODULE_DEVICE_TABLE(pci, fxgmac_pci_tbl); + +static struct pci_driver fxgmac_pci_driver = { + .name = FXGMAC_DRV_NAME, + .id_table = fxgmac_pci_tbl, + .probe = fxgmac_probe, + .remove = fxgmac_remove, +#ifdef CONFIG_PM + /* currently, we only use USE_LEGACY_PM_SUPPORT */ + .suspend = fxgmac_suspend, + .resume = fxgmac_resume, +#endif + .shutdown = fxgmac_shutdown, +}; + +module_pci_driver(fxgmac_pci_driver); + +MODULE_DESCRIPTION(FXGMAC_DRV_DESC); +MODULE_VERSION(FXGMAC_DRV_VERSION); +MODULE_AUTHOR("Frank "); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-phy.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-phy.c new file mode 100644 index 000000000000..88066a110f41 --- /dev/null +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-phy.c @@ -0,0 +1,256 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Motorcomm Corporation. */ + +#include +#include + +#include "fuxi-gmac.h" +#include "fuxi-gmac-reg.h" + +void fxgmac_phy_force_speed(struct fxgmac_pdata *pdata, int speed) +{ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + u32 regval = 0; + unsigned int high_bit = 0, low_bit = 0; + + switch (speed) { + case SPEED_1000: + high_bit = 1, low_bit = 0; + break; + case SPEED_100: + high_bit = 0, low_bit = 1; + break; + case SPEED_10: + high_bit = 0, low_bit = 0; + break; + default: + break; + } + + /* disable autoneg */ + hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, ®val); + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_AUTOENG_POS, + PHY_CR_AUTOENG_LEN, 0); + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_SPEED_SEL_H_POS, + PHY_CR_SPEED_SEL_H_LEN, high_bit); + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_SPEED_SEL_L_POS, + PHY_CR_SPEED_SEL_L_LEN, low_bit); + hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, regval); +} + +void fxgmac_phy_force_duplex(struct fxgmac_pdata *pdata, int duplex) +{ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + u32 regval = 0; + hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, ®val); + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_DUPLEX_POS, + PHY_CR_DUPLEX_LEN, (duplex ? 1 : 0)); + hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, regval); +} + +void fxgmac_phy_force_autoneg(struct fxgmac_pdata *pdata, int autoneg) +{ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + u32 regval = 0; + hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, ®val); + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_AUTOENG_POS, + PHY_CR_AUTOENG_LEN, (autoneg ? 1 : 0)); + hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, regval); +} + +/* + * input: lport + * output: + * cap_mask, bit definitions: + * pause capbility and 100/10 capbilitys follow the definition of mii reg4. + * for 1000M capability, bit0=1000M half; bit1=1000M full, refer to mii reg9.[9:8]. + */ +int fxgmac_ephy_autoneg_ability_get(struct fxgmac_pdata *pdata, + unsigned int *cap_mask) +{ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + unsigned int val; + unsigned int reg; + + if ((!hw_ops->read_ephy_reg) || (!hw_ops->write_ephy_reg)) + return -1; + + reg = REG_MII_ADVERTISE; + if (hw_ops->read_ephy_reg(pdata, reg, &val) < 0) + goto busy_exit; + + if (FXGMAC_ADVERTISE_10HALF & val) { + *cap_mask |= FXGMAC_ADVERTISE_10HALF; + } else { + *cap_mask &= ~FXGMAC_ADVERTISE_10HALF; + } + + if (FXGMAC_ADVERTISE_10FULL & val) { + *cap_mask |= FXGMAC_ADVERTISE_10FULL; + } else { + *cap_mask &= ~FXGMAC_ADVERTISE_10FULL; + } + + if (FXGMAC_ADVERTISE_100HALF & val) { + *cap_mask |= FXGMAC_ADVERTISE_100HALF; + } else { + *cap_mask &= ~FXGMAC_ADVERTISE_100HALF; + } + + if (FXGMAC_ADVERTISE_100FULL & val) { + *cap_mask |= FXGMAC_ADVERTISE_100FULL; + } else { + *cap_mask &= ~FXGMAC_ADVERTISE_100FULL; + } + + if (FXGMAC_ADVERTISE_PAUSE_CAP & val) { + *cap_mask |= FXGMAC_ADVERTISE_PAUSE_CAP; + } else { + *cap_mask &= ~FXGMAC_ADVERTISE_PAUSE_CAP; + } + + if (FXGMAC_ADVERTISE_PAUSE_ASYM & val) { + *cap_mask |= FXGMAC_ADVERTISE_PAUSE_ASYM; + } else { + *cap_mask &= ~FXGMAC_ADVERTISE_PAUSE_ASYM; + } + + reg = REG_MII_CTRL1000; + if (hw_ops->read_ephy_reg(pdata, reg, &val) < 0) + goto busy_exit; + + if (REG_BIT_ADVERTISE_1000HALF & val) { + *cap_mask |= FXGMAC_ADVERTISE_1000HALF; + } else { + *cap_mask &= ~FXGMAC_ADVERTISE_1000HALF; + } + + if (REG_BIT_ADVERTISE_1000FULL & val) { + *cap_mask |= FXGMAC_ADVERTISE_1000FULL; + } else { + *cap_mask &= ~FXGMAC_ADVERTISE_1000FULL; + } + + return 0; + +busy_exit: + DPRINTK("fxgmac_ephy_autoneg_ability_get exit due to ephy reg access fail.\n"); + + return -1; +} + +int fxgmac_ephy_soft_reset(struct fxgmac_pdata *pdata) +{ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + int ret; + volatile unsigned int val; + int busy = 15; + + ret = hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, (unsigned int *)&val); + if (0 > ret) + goto busy_exit; + + ret = hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, (val | 0x8000)); + if (0 > ret) + goto busy_exit; + + do { + ret = hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, + (unsigned int *)&val); + busy--; + } while ((ret >= 0) && (0 != (val & 0x8000)) && (busy)); + + if (0 == (val & 0x8000)) + return 0; + + DPRINTK("fxgmac_ephy_soft_reset, timeout, busy=%d.\n", busy); + return -EBUSY; + +busy_exit: + DPRINTK("fxgmac_ephy_soft_reset exit due to ephy reg access fail.\n"); + + return ret; +} + +/* this function used to double check the speed. for fiber, to correct there is no 10M */ +static int fxgmac_ephy_adjust_status(u32 lport, int val, int is_utp, int *speed, + int *duplex) +{ + int speed_mode; + + *speed = -1; + *duplex = (val & BIT(FUXI_EPHY_DUPLEX_BIT)) >> FUXI_EPHY_DUPLEX_BIT; + speed_mode = (val & FUXI_EPHY_SPEED_MODE) >> FUXI_EPHY_SPEED_MODE_BIT; + switch (speed_mode) { + case 0: + if (is_utp) + *speed = SPEED_10M; + break; + case 1: + *speed = SPEED_100M; + break; + case 2: + *speed = SPEED_1000M; + break; + case 3: + break; + default: + break; + } + + return 0; +} + +/* + * this function for polling to get status of ephy link. + * output: + * speed: SPEED_10M, SPEED_100M, SPEED_1000M or -1; + * duplex: 0 or 1, see reg 0x11, bit YT8614_DUPLEX_BIT. + * ret_link: 0 or 1, link down or up. + * media: only valid when ret_link=1, (YT8614_SMI_SEL_SDS_SGMII + 1) for fiber; (YT8614_SMI_SEL_PHY + 1) for utp. -1 for link down. + */ +int fxgmac_ephy_status_get(struct fxgmac_pdata *pdata, int *speed, int *duplex, + int *ret_link, int *media) +{ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + int ret; + u16 reg; + volatile unsigned int val; + volatile int link; + int link_utp = 0, link_fiber = 0; + + reg = REG_MII_SPEC_STATUS; + ret = hw_ops->read_ephy_reg(pdata, reg, (unsigned int *)&val); + if (0 > ret) + goto busy_exit; + + link = val & (BIT(FUXI_EPHY_LINK_STATUS_BIT)); + if (link) { + link_utp = 1; + fxgmac_ephy_adjust_status(0, val, 1, speed, duplex); + } else { + link_utp = 0; + } + + if (link_utp || link_fiber) { + /* case of fiber of priority */ + if (link_utp) + *media = (FUXI_EPHY_SMI_SEL_PHY + 1); + if (link_fiber) + *media = (FUXI_EPHY_SMI_SEL_SDS_SGMII + 1); + + *ret_link = 1; + } else { + *ret_link = 0; + *media = -1; + *speed = -1; + *duplex = -1; + } + + return 0; + +busy_exit: + DPRINTK("fxgmac_ephy_status_get exit due to ephy reg access fail.\n"); + + return ret; +} diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-reg.h b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-reg.h new file mode 100644 index 000000000000..65d6288e6869 --- /dev/null +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-reg.h @@ -0,0 +1,1894 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Motorcomm Corporation. */ + +#ifndef __FUXI_GMAC_REG_H__ +#define __FUXI_GMAC_REG_H__ + +#define AISC_MODE + +#define FUXI_REV_01 0x01 /* The first NTO version. */ +#define FUXI_REV_03 0x03 /* ECO back on 07/2023. */ + +/* MAC register offsets */ +#define MAC_OFFSET 0x2000 +#define MAC_CR 0x0000 /* The MAC Configuration Register */ +#define MAC_ECR 0x0004 +#define MAC_PFR 0x0008 +#define MAC_HTR0 0x0010 +#define MAC_VLANTR 0x0050 +#define MAC_VLANHTR 0x0058 +#define MAC_VLANIR 0x0060 +#define MAC_Q0TFCR 0x0070 +#define MAC_RFCR 0x0090 +#define MAC_RQC0R 0x00a0 +#define MAC_RQC1R 0x00a4 +#define MAC_RQC2R 0x00a8 +#define MAC_RQC3R 0x00ac +#define MAC_ISR 0x00b0 +#define MAC_IER 0x00b4 +#define MAC_TX_RX_STA 0x00b8 +#define MAC_PMT_STA 0x00c0 +/* This is the FIFO address, the pointer will be increased + * automatically after writting. + */ +#define MAC_RWK_PAC 0x00c4 +#define MAC_LPI_STA 0x00d0 +#define MAC_LPI_CONTROL 0x00d4 +#define MAC_LPI_TIMER 0x00d8 +#define MAC_MS_TIC_COUNTER 0x00dc +#define MAC_AN_SR 0x00E4 +#define MAC_PHYIF_STA 0x00F8 +#define MAC_VR 0x0110 +#define MAC_DBG_STA 0x0114 +#define MAC_HWF0R 0x011c +#define MAC_HWF1R 0x0120 +#define MAC_HWF2R 0x0124 +#define MAC_HWF3R 0x0128 +#define MAC_MDIO_ADDRESS 0x0200 +#define MAC_MDIO_DATA 0x0204 +#define MAC_GPIO_SR 0x020c +#define MAC_ARP_PROTO_ADDR 0x0210 +#define MAC_CSR_SW_CTRL 0x0230 + +/* mac[5]->bit15:8, mac[4]->bit7:0 */ +#define MAC_MACA0HR 0x0300 +/* mac[0]->bit7:0, mac[1]->bit15:8, mac[2]->bit23:16, mac[3]->bit31:24 */ +#define MAC_MACA0LR 0x0304 + +#define MAC_MACA1HR 0x0308 +#define MAC_MACA1HR_AE_POS 31 +#define MAC_MACA1HR_AE_LEN 1 + +#define MAC_MACA1LR 0x030c + + +#define MAC_RSSCR 0x3c80 +#define MAC_RSSAR 0x3c88 +#define MAC_RSSDR 0x3c8c + + + +#define MAC_QTFCR_INC 4 +#define MAC_MACA_INC 4 +#define MAC_HTR_INC 4 +#define MAC_RQC2_INC 4 +#define MAC_RQC2_Q_PER_REG 4 + +/* MAC register entry bit positions and sizes */ +#define MAC_HWF0R_ADDMACADRSEL_POS 18 +#define MAC_HWF0R_ADDMACADRSEL_LEN 5 +#define MAC_HWF0R_ARPOFFSEL_POS 9 +#define MAC_HWF0R_ARPOFFSEL_LEN 1 +#define MAC_HWF0R_EEESEL_POS 13 +#define MAC_HWF0R_EEESEL_LEN 1 +#define MAC_HWF0R_ACTPHYIFSEL_POS 28 +#define MAC_HWF0R_ACTPHYIFSEL_LEN 3 +#define MAC_HWF0R_MGKSEL_POS 7 +#define MAC_HWF0R_MGKSEL_LEN 1 +#define MAC_HWF0R_MMCSEL_POS 8 +#define MAC_HWF0R_MMCSEL_LEN 1 +#define MAC_HWF0R_RWKSEL_POS 6 +#define MAC_HWF0R_RWKSEL_LEN 1 +#define MAC_HWF0R_RXCOESEL_POS 16 +#define MAC_HWF0R_RXCOESEL_LEN 1 +#define MAC_HWF0R_SAVLANINS_POS 27 +#define MAC_HWF0R_SAVLANINS_LEN 1 +#define MAC_HWF0R_SMASEL_POS 5 +#define MAC_HWF0R_SMASEL_LEN 1 +#define MAC_HWF0R_TSSEL_POS 12 +#define MAC_HWF0R_TSSEL_LEN 1 +#define MAC_HWF0R_TSSTSSEL_POS 25 +#define MAC_HWF0R_TSSTSSEL_LEN 2 +#define MAC_HWF0R_TXCOESEL_POS 14 +#define MAC_HWF0R_TXCOESEL_LEN 1 +#define MAC_HWF0R_VLHASH_POS 4 +#define MAC_HWF0R_VLHASH_LEN 1 +#define MAC_HWF1R_ADDR64_POS 14 +#define MAC_HWF1R_ADDR64_LEN 2 +#define MAC_HWF1R_ADVTHWORD_POS 13 +#define MAC_HWF1R_ADVTHWORD_LEN 1 +#define MAC_HWF1R_DBGMEMA_POS 19 +#define MAC_HWF1R_DBGMEMA_LEN 1 +#define MAC_HWF1R_DCBEN_POS 16 +#define MAC_HWF1R_DCBEN_LEN 1 +#define MAC_HWF1R_HASHTBLSZ_POS 24 +#define MAC_HWF1R_HASHTBLSZ_LEN 2 +#define MAC_HWF1R_L3L4FNUM_POS 27 +#define MAC_HWF1R_L3L4FNUM_LEN 4 +#define MAC_HWF1R_RAVSEL_POS 21 +#define MAC_HWF1R_RAVSEL_LEN 1 +#define MAC_HWF1R_AVSEL_POS 20 +#define MAC_HWF1R_AVSEL_LEN 1 +#define MAC_HWF1R_RXFIFOSIZE_POS 0 +#define MAC_HWF1R_RXFIFOSIZE_LEN 5 +#define MAC_HWF1R_SPHEN_POS 17 +#define MAC_HWF1R_SPHEN_LEN 1 +#define MAC_HWF1R_TSOEN_POS 18 +#define MAC_HWF1R_TSOEN_LEN 1 +#define MAC_HWF1R_TXFIFOSIZE_POS 6 +#define MAC_HWF1R_TXFIFOSIZE_LEN 5 +#define MAC_HWF2R_AUXSNAPNUM_POS 28 +#define MAC_HWF2R_AUXSNAPNUM_LEN 3 +#define MAC_HWF2R_PPSOUTNUM_POS 24 +#define MAC_HWF2R_PPSOUTNUM_LEN 3 +#define MAC_HWF2R_RXCHCNT_POS 12 +#define MAC_HWF2R_RXCHCNT_LEN 4 +#define MAC_HWF2R_RXQCNT_POS 0 +#define MAC_HWF2R_RXQCNT_LEN 4 +#define MAC_HWF2R_TXCHCNT_POS 18 +#define MAC_HWF2R_TXCHCNT_LEN 4 +#define MAC_HWF2R_TXQCNT_POS 6 +#define MAC_HWF2R_TXQCNT_LEN 4 +#define MAC_IER_TSIE_POS 12 +#define MAC_IER_TSIE_LEN 1 +#define MAC_ISR_MMCRXIS_POS 9 +#define MAC_ISR_MMCRXIS_LEN 1 +#define MAC_ISR_MMCTXIS_POS 10 +#define MAC_ISR_MMCTXIS_LEN 1 +#define MAC_ISR_PMTIS_POS 4 +#define MAC_ISR_PMTIS_LEN 1 +#define MAC_ISR_TSIS_POS 12 +#define MAC_ISR_TSIS_LEN 1 +#define MAC_MACA1HR_AE_POS 31 +#define MAC_MACA1HR_AE_LEN 1 +#define MAC_PFR_HMC_POS 2 +#define MAC_PFR_HMC_LEN 1 +#define MAC_PFR_HPF_POS 10 +#define MAC_PFR_HPF_LEN 1 +#define MAC_PFR_PM_POS 4 /* Pass all Multicast. */ +#define MAC_PFR_PM_LEN 1 +#define MAC_PFR_DBF_POS 5 /* Disable Broadcast Packets. */ +#define MAC_PFR_DBF_LEN 1 +/* Hash Unicast. 0x0 (DISABLE). compares the DA field with + * the values programmed in DA registers. + */ +#define MAC_PFR_HUC_POS 1 +#define MAC_PFR_HUC_LEN 1 +#define MAC_PFR_PR_POS 0 /* Enable Promiscuous Mode. */ +#define MAC_PFR_PR_LEN 1 +#define MAC_PFR_VTFE_POS 16 +#define MAC_PFR_VTFE_LEN 1 +#define MAC_Q0TFCR_PT_POS 16 +#define MAC_Q0TFCR_PT_LEN 16 +#define MAC_Q0TFCR_TFE_POS 1 +#define MAC_Q0TFCR_TFE_LEN 1 +#define MAC_CR_ARPEN_POS 31 +#define MAC_CR_ARPEN_LEN 1 +#define MAC_CR_ACS_POS 20 +#define MAC_CR_ACS_LEN 1 +#define MAC_CR_CST_POS 21 +#define MAC_CR_CST_LEN 1 +#define MAC_CR_IPC_POS 27 +#define MAC_CR_IPC_LEN 1 +#define MAC_CR_JE_POS 16 +#define MAC_CR_JE_LEN 1 +#define MAC_CR_LM_POS 12 +#define MAC_CR_LM_LEN 1 +#define MAC_CR_RE_POS 0 +#define MAC_CR_RE_LEN 1 +#define MAC_CR_PS_POS 15 +#define MAC_CR_PS_LEN 1 +#define MAC_CR_FES_POS 14 +#define MAC_CR_FES_LEN 1 +#define MAC_CR_DM_POS 13 +#define MAC_CR_DM_LEN 1 +#define MAC_CR_TE_POS 1 +#define MAC_CR_TE_LEN 1 +#define MAC_ECR_DCRCC_POS 16 +#define MAC_ECR_DCRCC_LEN 1 +#define MAC_ECR_HDSMS_POS 20 +#define MAC_ECR_HDSMS_LEN 3 +#define MAC_RFCR_PFCE_POS 8 +#define MAC_RFCR_PFCE_LEN 1 +#define MAC_RFCR_RFE_POS 0 +#define MAC_RFCR_RFE_LEN 1 +#define MAC_RFCR_UP_POS 1 +#define MAC_RFCR_UP_LEN 1 +#define MAC_RQC0R_RXQ0EN_POS 0 +#define MAC_RQC0R_RXQ0EN_LEN 2 +#define MAC_LPIIE_POS 5 +#define MAC_LPIIE_LEN 1 +#define MAC_LPIATE_POS 20 +#define MAC_LPIATE_LEN 1 +#define MAC_LPITXA_POS 19 +#define MAC_LPITXA_LEN 1 +#define MAC_PLS_POS 17 +#define MAC_PLS_LEN 1 +#define MAC_LPIEN_POS 16 +#define MAC_LPIEN_LEN 1 +#define MAC_LPI_ENTRY_TIMER 8 +#define MAC_LPIET_POS 3 +#define MAC_LPIET_LEN 17 +#define MAC_TWT_TIMER 0x10 +#define MAC_TWT_POS 0 +#define MAC_TWT_LEN 16 +#define MAC_LST_TIMER 2 +#define MAC_LST_POS 16 +#define MAC_LST_LEN 10 +#define MAC_MS_TIC 24 +#define MAC_MS_TIC_POS 0 +#define MAC_MS_TIC_LEN 12 + +/* RSS table */ +#define MAC_RSSAR_ADDRT_POS 2 +#define MAC_RSSAR_ADDRT_LEN 1 +#define MAC_RSSAR_CT_POS 1 +#define MAC_RSSAR_CT_LEN 1 +#define MAC_RSSAR_OB_POS 0 +#define MAC_RSSAR_OB_LEN 1 +#define MAC_RSSAR_RSSIA_POS 8 +#define MAC_RSSAR_RSSIA_LEN 8 +/* RSS control and options */ +/* note, below options definitions are used only for pdata->options, + * not for register, so the position is not consistent with register. + * [0] ipv4 + * [1] tcpv4 + * [2] udpv4 + * [3] ipv6 + * [4] tcpv6 + * [5] udpv6 + */ +#define MAC_RSSCR_IP4TE_POS 0 +#define MAC_RSSCR_IP4TE_LEN 1 +#define MAC_RSSCR_IP6TE_POS 3 +#define MAC_RSSCR_IP6TE_LEN 1 +#define MAC_RSSCR_TCP4TE_POS 1 +#define MAC_RSSCR_TCP4TE_LEN 1 +#define MAC_RSSCR_UDP4TE_POS 2 +#define MAC_RSSCR_UDP4TE_LEN 1 +#define MAC_RSSCR_TCP6TE_POS 4 +#define MAC_RSSCR_TCP6TE_LEN 1 +#define MAC_RSSCR_UDP6TE_POS 5 +#define MAC_RSSCR_UDP6TE_LEN 1 + +/* RSS indirection table */ +#define MAC_RSSDR_DMCH_POS 0 +#define MAC_RSSDR_DMCH_LEN 2 + +#define MAC_VLANHTR_VLHT_POS 0 +#define MAC_VLANHTR_VLHT_LEN 16 +#define MAC_VLANIR_VLTI_POS 20 +#define MAC_VLANIR_VLTI_LEN 1 +#define MAC_VLANIR_CSVL_POS 19 +#define MAC_VLANIR_CSVL_LEN 1 +#define MAC_VLANIR_VLP_POS 18 +#define MAC_VLANIR_VLP_LEN 1 +#define MAC_VLANIR_VLC_POS 16 +#define MAC_VLANIR_VLC_LEN 2 +#define MAC_VLANIR_VLT_POS 0 +#define MAC_VLANIR_VLT_LEN 16 +#define MAC_VLANTR_DOVLTC_POS 20 +#define MAC_VLANTR_DOVLTC_LEN 1 +#define MAC_VLANTR_ERSVLM_POS 19 +#define MAC_VLANTR_ERSVLM_LEN 1 +#define MAC_VLANTR_ESVL_POS 18 +#define MAC_VLANTR_ESVL_LEN 1 +#define MAC_VLANTR_ETV_POS 16 +#define MAC_VLANTR_ETV_LEN 1 +#define MAC_VLANTR_EVLS_POS 21 +#define MAC_VLANTR_EVLS_LEN 2 +#define MAC_VLANTR_EVLRXS_POS 24 +#define MAC_VLANTR_EVLRXS_LEN 1 +#define MAC_VLANTR_VL_POS 0 +#define MAC_VLANTR_VL_LEN 16 +#define MAC_VLANTR_VTHM_POS 25 +#define MAC_VLANTR_VTHM_LEN 1 +#define MAC_VLANTR_VTIM_POS 17 +#define MAC_VLANTR_VTIM_LEN 1 +#define MAC_VR_DEVID_POS 16 +#define MAC_VR_DEVID_LEN 16 +#define MAC_VR_SVER_POS 0 +#define MAC_VR_SVER_LEN 8 +#define MAC_VR_USERVER_POS 8 +#define MAC_VR_USERVER_LEN 8 + +#define MAC_DBG_STA_TX_BUSY 0x70000 +#define MTL_TXQ_DEG_TX_BUSY 0x10 + +#define MAC_MDIO_ADDRESS_BUSY 1 /* bit 0 */ + +#define MAC_MDIO_ADDR_GOC_POS 2 +#define MAC_MDIO_ADDR_GOC_LEN 2 +#define MAC_MDIO_ADDR_GB_POS 0 +#define MAC_MDIO_ADDR_GB_LEN 1 + +#define MAC_MDIO_DATA_RA_POS 16 +#define MAC_MDIO_DATA_RA_LEN 16 +#define MAC_MDIO_DATA_GD_POS 0 +#define MAC_MDIO_DATA_GD_LEN 16 + +/* bit definitions for PMT and WOL, 20210622 */ +#define MAC_PMT_STA_PWRDWN_POS 0 +#define MAC_PMT_STA_PWRDWN_LEN 1 +#define MAC_PMT_STA_MGKPKTEN_POS 1 +#define MAC_PMT_STA_MGKPKTEN_LEN 1 +#define MAC_PMT_STA_RWKPKTEN_POS 2 +#define MAC_PMT_STA_RWKPKTEN_LEN 1 +#define MAC_PMT_STA_MGKPRCVD_POS 5 +#define MAC_PMT_STA_MGKPRCVD_LEN 1 +#define MAC_PMT_STA_RWKPRCVD_POS 6 +#define MAC_PMT_STA_RWKPRCVD_LEN 1 +#define MAC_PMT_STA_GLBLUCAST_POS 9 +#define MAC_PMT_STA_GLBLUCAST_LEN 1 +#define MAC_PMT_STA_RWKPTR_POS 24 +#define MAC_PMT_STA_RWKPTR_LEN 4 +#define MAC_PMT_STA_RWKFILTERST_POS 31 +#define MAC_PMT_STA_RWKFILTERST_LEN 1 +/* MMC register offsets */ +#define MMC_CR 0x0700 +#define MMC_RISR 0x0704 +#define MMC_TISR 0x0708 +#define MMC_RIER 0x070c +#define MMC_TIER 0x0710 +#define MMC_TXOCTETCOUNT_GB_LO 0x0714 +#define MMC_TXFRAMECOUNT_GB_LO 0x0718 +#define MMC_TXBROADCASTFRAMES_G_LO 0x071c +#define MMC_TXMULTICASTFRAMES_G_LO 0x0720 +#define MMC_TX64OCTETS_GB_LO 0x0724 +#define MMC_TX65TO127OCTETS_GB_LO 0x0728 +#define MMC_TX128TO255OCTETS_GB_LO 0x072c +#define MMC_TX256TO511OCTETS_GB_LO 0x0730 +#define MMC_TX512TO1023OCTETS_GB_LO 0x0734 +#define MMC_TX1024TOMAXOCTETS_GB_LO 0x0738 +#define MMC_TXUNICASTFRAMES_GB_LO 0x073c +#define MMC_TXMULTICASTFRAMES_GB_LO 0x0740 +#define MMC_TXBROADCASTFRAMES_GB_LO 0x0744 +#define MMC_TXUNDERFLOWERROR_LO 0x0748 +#define MMC_TXSINGLECOLLISION_G 0x074c +#define MMC_TXMULTIPLECOLLISION_G 0x0750 +#define MMC_TXDEFERREDFRAMES 0x0754 +#define MMC_TXLATECOLLISIONFRAMES 0x0758 +#define MMC_TXEXCESSIVECOLLSIONFRAMES 0x075c +#define MMC_TXCARRIERERRORFRAMES 0x0760 +#define MMC_TXOCTETCOUNT_G_LO 0x0764 +#define MMC_TXFRAMECOUNT_G_LO 0x0768 +#define MMC_TXEXCESSIVEDEFERRALERROR 0x076c +#define MMC_TXPAUSEFRAMES_LO 0x0770 +#define MMC_TXVLANFRAMES_G_LO 0x0774 +#define MMC_TXOVERSIZEFRAMES 0x0778 +#define MMC_RXFRAMECOUNT_GB_LO 0x0780 +#define MMC_RXOCTETCOUNT_GB_LO 0x0784 +#define MMC_RXOCTETCOUNT_G_LO 0x0788 +#define MMC_RXBROADCASTFRAMES_G_LO 0x078c +#define MMC_RXMULTICASTFRAMES_G_LO 0x0790 +#define MMC_RXCRCERROR_LO 0x0794 +#define MMC_RXALIGNERROR 0x0798 +#define MMC_RXRUNTERROR 0x079c +#define MMC_RXJABBERERROR 0x07a0 +#define MMC_RXUNDERSIZE_G 0x07a4 +#define MMC_RXOVERSIZE_G 0x07a8 +#define MMC_RX64OCTETS_GB_LO 0x07ac +#define MMC_RX65TO127OCTETS_GB_LO 0x07b0 +#define MMC_RX128TO255OCTETS_GB_LO 0x07b4 +#define MMC_RX256TO511OCTETS_GB_LO 0x07b8 +#define MMC_RX512TO1023OCTETS_GB_LO 0x07bc +#define MMC_RX1024TOMAXOCTETS_GB_LO 0x07c0 +#define MMC_RXUNICASTFRAMES_G_LO 0x07c4 +#define MMC_RXLENGTHERROR_LO 0x07c8 +#define MMC_RXOUTOFRANGETYPE_LO 0x07cc +#define MMC_RXPAUSEFRAMES_LO 0x07d0 +#define MMC_RXFIFOOVERFLOW_LO 0x07d4 +#define MMC_RXVLANFRAMES_GB_LO 0x07d8 +#define MMC_RXWATCHDOGERROR 0x07dc +#define MMC_RXRECEIVEERRORFRAME 0x07e0 +#define MMC_RXCONTROLFRAME_G 0x07e4 + +#define MMC_IPCRXINTMASK 0x800 +#define MMC_IPCRXINT 0x808 + +/* MMC register entry bit positions and sizes */ +#define MMC_CR_CR_POS 0 +#define MMC_CR_CR_LEN 1 +#define MMC_CR_CSR_POS 1 +#define MMC_CR_CSR_LEN 1 +#define MMC_CR_ROR_POS 2 +#define MMC_CR_ROR_LEN 1 +#define MMC_CR_MCF_POS 3 +#define MMC_CR_MCF_LEN 1 +#define MMC_RIER_ALL_INTERRUPTS_POS 0 +#define MMC_RIER_ALL_INTERRUPTS_LEN 26 +#define MMC_RISR_RXFRAMECOUNT_GB_POS 0 +#define MMC_RISR_RXFRAMECOUNT_GB_LEN 1 +#define MMC_RISR_RXOCTETCOUNT_GB_POS 1 +#define MMC_RISR_RXOCTETCOUNT_GB_LEN 1 +#define MMC_RISR_RXOCTETCOUNT_G_POS 2 +#define MMC_RISR_RXOCTETCOUNT_G_LEN 1 +#define MMC_RISR_RXBROADCASTFRAMES_G_POS 3 +#define MMC_RISR_RXBROADCASTFRAMES_G_LEN 1 +#define MMC_RISR_RXMULTICASTFRAMES_G_POS 4 +#define MMC_RISR_RXMULTICASTFRAMES_G_LEN 1 +#define MMC_RISR_RXCRCERROR_POS 5 +#define MMC_RISR_RXCRCERROR_LEN 1 +#define MMC_RISR_RXALIGNERROR_POS 6 +#define MMC_RISR_RXALIGNERROR_LEN 1 +#define MMC_RISR_RXRUNTERROR_POS 7 +#define MMC_RISR_RXRUNTERROR_LEN 1 +#define MMC_RISR_RXJABBERERROR_POS 8 +#define MMC_RISR_RXJABBERERROR_LEN 1 +#define MMC_RISR_RXUNDERSIZE_G_POS 9 +#define MMC_RISR_RXUNDERSIZE_G_LEN 1 +#define MMC_RISR_RXOVERSIZE_G_POS 10 +#define MMC_RISR_RXOVERSIZE_G_LEN 1 +#define MMC_RISR_RX64OCTETS_GB_POS 11 +#define MMC_RISR_RX64OCTETS_GB_LEN 1 +#define MMC_RISR_RX65TO127OCTETS_GB_POS 12 +#define MMC_RISR_RX65TO127OCTETS_GB_LEN 1 +#define MMC_RISR_RX128TO255OCTETS_GB_POS 13 +#define MMC_RISR_RX128TO255OCTETS_GB_LEN 1 +#define MMC_RISR_RX256TO511OCTETS_GB_POS 14 +#define MMC_RISR_RX256TO511OCTETS_GB_LEN 1 +#define MMC_RISR_RX512TO1023OCTETS_GB_POS 15 +#define MMC_RISR_RX512TO1023OCTETS_GB_LEN 1 +#define MMC_RISR_RX1024TOMAXOCTETS_GB_POS 16 +#define MMC_RISR_RX1024TOMAXOCTETS_GB_LEN 1 +#define MMC_RISR_RXUNICASTFRAMES_G_POS 17 +#define MMC_RISR_RXUNICASTFRAMES_G_LEN 1 +#define MMC_RISR_RXLENGTHERROR_POS 18 +#define MMC_RISR_RXLENGTHERROR_LEN 1 +#define MMC_RISR_RXOUTOFRANGETYPE_POS 19 +#define MMC_RISR_RXOUTOFRANGETYPE_LEN 1 +#define MMC_RISR_RXPAUSEFRAMES_POS 20 +#define MMC_RISR_RXPAUSEFRAMES_LEN 1 +#define MMC_RISR_RXFIFOOVERFLOW_POS 21 +#define MMC_RISR_RXFIFOOVERFLOW_LEN 1 +#define MMC_RISR_RXVLANFRAMES_GB_POS 22 +#define MMC_RISR_RXVLANFRAMES_GB_LEN 1 +#define MMC_RISR_RXWATCHDOGERROR_POS 23 +#define MMC_RISR_RXWATCHDOGERROR_LEN 1 +#define MMC_RISR_RXERRORFRAMES_POS 24 +#define MMC_RISR_RXERRORFRAMES_LEN 1 +#define MMC_RISR_RXERRORCONTROLFRAMES_POS 25 +#define MMC_RISR_RXERRORCONTROLFRAMES_LEN 1 +#define MMC_RISR_RXLPIMICROSECOND_POS 26 /* no counter register */ +#define MMC_RISR_RXLPIMICROSECOND_LEN 1 +#define MMC_RISR_RXLPITRANSITION_POS 27 /* no counter register */ +#define MMC_RISR_RXLPITRANSITION_LEN 1 + +#define MMC_TIER_ALL_INTERRUPTS_POS 0 +#define MMC_TIER_ALL_INTERRUPTS_LEN 26 +#define MMC_TISR_TXOCTETCOUNT_GB_POS 0 +#define MMC_TISR_TXOCTETCOUNT_GB_LEN 1 +#define MMC_TISR_TXFRAMECOUNT_GB_POS 1 +#define MMC_TISR_TXFRAMECOUNT_GB_LEN 1 +#define MMC_TISR_TXBROADCASTFRAMES_G_POS 2 +#define MMC_TISR_TXBROADCASTFRAMES_G_LEN 1 +#define MMC_TISR_TXMULTICASTFRAMES_G_POS 3 +#define MMC_TISR_TXMULTICASTFRAMES_G_LEN 1 +#define MMC_TISR_TX64OCTETS_GB_POS 4 +#define MMC_TISR_TX64OCTETS_GB_LEN 1 +#define MMC_TISR_TX65TO127OCTETS_GB_POS 5 +#define MMC_TISR_TX65TO127OCTETS_GB_LEN 1 +#define MMC_TISR_TX128TO255OCTETS_GB_POS 6 +#define MMC_TISR_TX128TO255OCTETS_GB_LEN 1 +#define MMC_TISR_TX256TO511OCTETS_GB_POS 7 +#define MMC_TISR_TX256TO511OCTETS_GB_LEN 1 +#define MMC_TISR_TX512TO1023OCTETS_GB_POS 8 +#define MMC_TISR_TX512TO1023OCTETS_GB_LEN 1 +#define MMC_TISR_TX1024TOMAXOCTETS_GB_POS 9 +#define MMC_TISR_TX1024TOMAXOCTETS_GB_LEN 1 +#define MMC_TISR_TXUNICASTFRAMES_GB_POS 10 +#define MMC_TISR_TXUNICASTFRAMES_GB_LEN 1 +#define MMC_TISR_TXMULTICASTFRAMES_GB_POS 11 +#define MMC_TISR_TXMULTICASTFRAMES_GB_LEN 1 +#define MMC_TISR_TXBROADCASTFRAMES_GB_POS 12 +#define MMC_TISR_TXBROADCASTFRAMES_GB_LEN 1 +#define MMC_TISR_TXUNDERFLOWERROR_POS 13 +#define MMC_TISR_TXUNDERFLOWERROR_LEN 1 +#define MMC_TISR_TXSINGLECOLLISION_G_POS 14 +#define MMC_TISR_TXSINGLECOLLISION_G_LEN 1 +#define MMC_TISR_TXMULTIPLECOLLISION_G_POS 15 +#define MMC_TISR_TXMULTIPLECOLLISION_G_LEN 1 +#define MMC_TISR_TXDEFERREDFRAMES_POS 16 +#define MMC_TISR_TXDEFERREDFRAMES_LEN 1 +#define MMC_TISR_TXLATECOLLISIONFRAMES_POS 17 +#define MMC_TISR_TXLATECOLLISIONFRAMES_LEN 1 +#define MMC_TISR_TXEXCESSIVECOLLISIONFRAMES_POS 18 +#define MMC_TISR_TXEXCESSIVECOLLISIONFRAMES_LEN 1 +#define MMC_TISR_TXCARRIERERRORFRAMES_POS 19 +#define MMC_TISR_TXCARRIERERRORFRAMES_LEN 1 +#define MMC_TISR_TXOCTETCOUNT_G_POS 20 +#define MMC_TISR_TXOCTETCOUNT_G_LEN 1 +#define MMC_TISR_TXFRAMECOUNT_G_POS 21 +#define MMC_TISR_TXFRAMECOUNT_G_LEN 1 +#define MMC_TISR_TXEXCESSIVEDEFERRALFRAMES_POS 22 +#define MMC_TISR_TXEXCESSIVEDEFERRALFRAMES_LEN 1 +#define MMC_TISR_TXPAUSEFRAMES_POS 23 +#define MMC_TISR_TXPAUSEFRAMES_LEN 1 +#define MMC_TISR_TXVLANFRAMES_G_POS 24 +#define MMC_TISR_TXVLANFRAMES_G_LEN 1 +#define MMC_TISR_TXOVERSIZE_G_POS 25 +#define MMC_TISR_TXOVERSIZE_G_LEN 1 +#define MMC_TISR_TXLPIMICROSECOND_POS 26 /* no counter register */ +#define MMC_TISR_TXLPIMICROSECOND_LEN 1 +#define MMC_TISR_TXLPITRANSITION_POS 27 /* no counter register */ +#define MMC_TISR_TXLPITRANSITION_LEN 1 + +/* MTL register offsets */ +#define MTL_OMR 0x0c00 +#define MTL_FDDR 0x0c10 +#define MTL_INT_SR 0x0c20 +#define MTL_RQDCM0R 0x0c30 +#define MTL_ECC_INT_SR 0x0ccc + +#define MTL_RQDCM_INC 4 +#define MTL_RQDCM_Q_PER_REG 4 + +/* MTL register entry bit positions and sizes */ +#define MTL_OMR_ETSALG_POS 5 +#define MTL_OMR_ETSALG_LEN 2 +#define MTL_OMR_RAA_POS 2 +#define MTL_OMR_RAA_LEN 1 + +/* MTL queue register offsets + * Multiple queues can be active. The first queue has registers + * that begin at 0x0d00. Each subsequent queue has registers that + * are accessed using an offset of 0x40 from the previous queue. + */ +#define MTL_Q_BASE 0x0d00 +#define MTL_Q_INC 0x40 +#define MTL_Q_INT_CTL_SR 0x0d2c + +#define MTL_Q_TQOMR 0x00 +#define MTL_Q_RQOMR 0x30 +#define MTL_Q_RQDR 0x38 +#define MTL_Q_IER 0x2c +#define MTL_Q_ISR 0x2c /* no isr register */ +#define MTL_TXQ_DEG 0x08 /* transmit debug */ + +/* MTL queue register entry bit positions and sizes */ +#define MTL_Q_RQDR_PRXQ_POS 16 +#define MTL_Q_RQDR_PRXQ_LEN 14 +#define MTL_Q_RQDR_RXQSTS_POS 4 +#define MTL_Q_RQDR_RXQSTS_LEN 2 +#define MTL_Q_RQOMR_RFA_POS 8 +#define MTL_Q_RQOMR_RFA_LEN 6 +#define MTL_Q_RQOMR_RFD_POS 14 +#define MTL_Q_RQOMR_RFD_LEN 6 +#define MTL_Q_RQOMR_EHFC_POS 7 +#define MTL_Q_RQOMR_EHFC_LEN 1 +#define MTL_Q_RQOMR_RQS_POS 20 +#define MTL_Q_RQOMR_RQS_LEN 9 +#define MTL_Q_RQOMR_RSF_POS 5 +#define MTL_Q_RQOMR_RSF_LEN 1 +#define MTL_Q_RQOMR_FEP_POS 4 +#define MTL_Q_RQOMR_FEP_LEN 1 +#define MTL_Q_RQOMR_FUP_POS 3 +#define MTL_Q_RQOMR_FUP_LEN 1 +#define MTL_Q_RQOMR_RTC_POS 0 +#define MTL_Q_RQOMR_RTC_LEN 2 +#define MTL_Q_TQOMR_FTQ_POS 0 +#define MTL_Q_TQOMR_FTQ_LEN 1 +#define MTL_Q_TQOMR_TQS_POS 16 +#define MTL_Q_TQOMR_TQS_LEN 7 +#define MTL_Q_TQOMR_TSF_POS 1 +#define MTL_Q_TQOMR_TSF_LEN 1 +#define MTL_Q_TQOMR_TTC_POS 4 +#define MTL_Q_TQOMR_TTC_LEN 3 +#define MTL_Q_TQOMR_TXQEN_POS 2 +#define MTL_Q_TQOMR_TXQEN_LEN 2 + +/* MTL queue register value */ +#define MTL_RSF_DISABLE 0x00 +#define MTL_RSF_ENABLE 0x01 +#define MTL_TSF_DISABLE 0x00 +#define MTL_TSF_ENABLE 0x01 +#define MTL_FEP_DISABLE 0x00 +#define MTL_FEP_ENABLE 0x01 + +#define MTL_RX_THRESHOLD_64 0x00 +#define MTL_RX_THRESHOLD_32 0x01 +#define MTL_RX_THRESHOLD_96 0x02 +#define MTL_RX_THRESHOLD_128 0x03 +#define MTL_TX_THRESHOLD_32 0x00 +#define MTL_TX_THRESHOLD_64 0x01 +#define MTL_TX_THRESHOLD_96 0x02 +#define MTL_TX_THRESHOLD_128 0x03 +#define MTL_TX_THRESHOLD_192 0x04 +#define MTL_TX_THRESHOLD_256 0x05 +#define MTL_TX_THRESHOLD_384 0x06 +#define MTL_TX_THRESHOLD_512 0x07 + +#define MTL_ETSALG_WRR 0x00 +#define MTL_ETSALG_WFQ 0x01 +#define MTL_ETSALG_DWRR 0x02 +#define MTL_ETSALG_SP 0x03 + +#define MTL_RAA_SP 0x00 +#define MTL_RAA_WSP 0x01 + +#define MTL_Q_DISABLED 0x00 +#define MTL_Q_EN_IF_AV 0x01 +#define MTL_Q_ENABLED 0x02 + +#define MTL_RQDCM0R_Q0MDMACH 0x0 +#define MTL_RQDCM0R_Q1MDMACH 0x00000100 +#define MTL_RQDCM0R_Q2MDMACH 0x00020000 +#define MTL_RQDCM0R_Q3MDMACH 0x03000000 +#define MTL_RQDCM1R_Q4MDMACH 0x00000004 +#define MTL_RQDCM1R_Q5MDMACH 0x00000500 +#define MTL_RQDCM1R_Q6MDMACH 0x00060000 +#define MTL_RQDCM1R_Q7MDMACH 0x07000000 +#define MTL_RQDCM2R_Q8MDMACH 0x00000008 +#define MTL_RQDCM2R_Q9MDMACH 0x00000900 +#define MTL_RQDCM2R_Q10MDMACH 0x000A0000 +#define MTL_RQDCM2R_Q11MDMACH 0x0B000000 + +#define MTL_RQDCM0R_Q0DDMACH 0x10 +#define MTL_RQDCM0R_Q1DDMACH 0x00001000 +#define MTL_RQDCM0R_Q2DDMACH 0x00100000 +#define MTL_RQDCM0R_Q3DDMACH 0x10000000 +#define MTL_RQDCM1R_Q4DDMACH 0x00000010 +#define MTL_RQDCM1R_Q5DDMACH 0x00001000 +#define MTL_RQDCM1R_Q6DDMACH 0x00100000 +#define MTL_RQDCM1R_Q7DDMACH 0x10000000 + + +/* MTL traffic class register offsets + * Multiple traffic classes can be active. The first class has registers + * that begin at 0x1100. Each subsequent queue has registers that + * are accessed using an offset of 0x80 from the previous queue. + */ +/* NO TRAFFIC CLASS REGISTER DESCRIPTION */ +#define MTL_TC_BASE MTL_Q_BASE +#define MTL_TC_INC MTL_Q_INC + +#define MTL_TC_ETSCR 0x10 +#define MTL_TC_ETSSR 0x14 +#define MTL_TC_QWR 0x18 + +/* MTL traffic class register entry bit positions and sizes */ +#define MTL_TC_ETSCR_TSA_POS 0 +#define MTL_TC_ETSCR_TSA_LEN 2 +#define MTL_TC_QWR_QW_POS 0 +#define MTL_TC_QWR_QW_LEN 21 + +/* MTL traffic class register value */ +#define MTL_TSA_SP 0x00 +#define MTL_TSA_ETS 0x02 + +/* DMA register offsets */ +#define DMA_MR 0x1000 +#define DMA_SBMR 0x1004 +#define DMA_ISR 0x1008 +#define DMA_DSR0 0x100c +#define DMA_DSR1 0x1010 +#define DMA_DSR2 0x1014 +#define DMA_ECC_INT_SR 0x1088 + +/* DMA register entry bit positions and sizes */ +#define DMA_ISR_MACIS_POS 17 +#define DMA_ISR_MACIS_LEN 1 +#define DMA_ISR_MTLIS_POS 16 +#define DMA_ISR_MTLIS_LEN 1 +#define DMA_MR_SWR_POS 0 +#define DMA_MR_SWR_LEN 1 +#define DMA_MR_INTM_POS 16 +#define DMA_MR_INTM_LEN 2 +#define DMA_MR_QUREAD_POS 19 +#define DMA_MR_QUREAD_LEN 1 + +#define DMA_SBMR_EN_LPI_POS 31 +#define DMA_SBMR_EN_LPI_LEN 1 +#define DMA_SBMR_LPI_XIT_PKT_POS 30 +#define DMA_SBMR_LPI_XIT_PKT_LEN 1 +#define DMA_SBMR_WR_OSR_LMT_POS 24 +#define DMA_SBMR_WR_OSR_LMT_LEN 6 +#define DMA_SBMR_RD_OSR_LMT_POS 16 +#define DMA_SBMR_RD_OSR_LMT_LEN 8 +#define DMA_SBMR_EAME_POS 11 +#define DMA_SBMR_EAME_LEN 1 +#define DMA_SBMR_AALE_POS 10 +#define DMA_SBMR_AALE_LEN 1 +#define DMA_SBMR_BLEN_4_POS 1 +#define DMA_SBMR_BLEN_4_LEN 1 +#define DMA_SBMR_BLEN_8_POS 2 +#define DMA_SBMR_BLEN_8_LEN 1 +#define DMA_SBMR_BLEN_16_POS 3 +#define DMA_SBMR_BLEN_16_LEN 1 +#define DMA_SBMR_BLEN_32_POS 4 +#define DMA_SBMR_BLEN_32_LEN 1 +#define DMA_SBMR_BLEN_64_POS 5 +#define DMA_SBMR_BLEN_64_LEN 1 +#define DMA_SBMR_BLEN_128_POS 6 +#define DMA_SBMR_BLEN_128_LEN 1 +#define DMA_SBMR_BLEN_256_POS 7 +#define DMA_SBMR_BLEN_256_LEN 1 +#define DMA_SBMR_FB_POS 0 +#define DMA_SBMR_FB_LEN 1 + +/* DMA register values */ +#define DMA_DSR_RPS_LEN 4 +#define DMA_DSR_TPS_LEN 4 +#define DMA_DSR_Q_LEN (DMA_DSR_RPS_LEN + DMA_DSR_TPS_LEN) +#define DMA_DSR0_TPS_START 12 +#define DMA_DSRX_FIRST_QUEUE 3 +#define DMA_DSRX_INC 4 +#define DMA_DSRX_QPR 4 /* no definition */ +#define DMA_DSRX_TPS_START 4 +#define DMA_TPS_STOPPED 0x00 +#define DMA_TPS_SUSPENDED 0x06 + +/* DMA channel register offsets + * Multiple channels can be active. The first channel has registers + * that begin at 0x1100. Each subsequent channel has registers that + * are accessed using an offset of 0x80 from the previous channel. + */ +#define DMA_CH_BASE 0x1100 +#define DMA_CH_INC 0x80 + +#define DMA_CH_CR 0x00 +#define DMA_CH_TCR 0x04 +#define DMA_CH_RCR 0x08 +#define DMA_CH_TDLR_HI 0x10 +#define DMA_CH_TDLR_LO 0x14 +#define DMA_CH_RDLR_HI 0x18 +#define DMA_CH_RDLR_LO 0x1c +#define DMA_CH_TDTR_LO 0x20 +#define DMA_CH_RDTR_LO 0x28 +#define DMA_CH_TDRLR 0x2c +#define DMA_CH_RDRLR 0x30 +#define DMA_CH_IER 0x34 +#define DMA_CH_RIWT 0x38 +#define DMA_CH_SR 0x60 + +/* DMA channel register entry bit positions and sizes */ +#define DMA_CH_CR_PBLX8_POS 16 +#define DMA_CH_CR_PBLX8_LEN 1 +#define DMA_CH_CR_SPH_POS 24 +#define DMA_CH_CR_SPH_LEN 1 +#define DMA_CH_IER_AIE_POS 14 +#define DMA_CH_IER_AIE_LEN 1 +#define DMA_CH_IER_FBEE_POS 12 +#define DMA_CH_IER_FBEE_LEN 1 +#define DMA_CH_IER_NIE_POS 15 +#define DMA_CH_IER_NIE_LEN 1 +#define DMA_CH_IER_RBUE_POS 7 +#define DMA_CH_IER_RBUE_LEN 1 +#define DMA_CH_IER_RIE_POS 6 +#define DMA_CH_IER_RIE_LEN 1 +#define DMA_CH_IER_RSE_POS 8 +#define DMA_CH_IER_RSE_LEN 1 +#define DMA_CH_IER_TBUE_POS 2 +#define DMA_CH_IER_TBUE_LEN 1 +#define DMA_CH_IER_TIE_POS 0 +#define DMA_CH_IER_TIE_LEN 1 +#define DMA_CH_IER_TXSE_POS 1 +#define DMA_CH_IER_TXSE_LEN 1 +#define DMA_CH_RCR_PBL_POS 16 +#define DMA_CH_RCR_PBL_LEN 6 +#define DMA_CH_RCR_RBSZ_POS 1 +#define DMA_CH_RCR_RBSZ_LEN 14 +#define DMA_CH_RCR_SR_POS 0 +#define DMA_CH_RCR_SR_LEN 1 +#define DMA_CH_RIWT_RWT_POS 0 +#define DMA_CH_RIWT_RWT_LEN 8 +#define DMA_CH_SR_FBE_POS 12 +#define DMA_CH_SR_FBE_LEN 1 +#define DMA_CH_SR_RBU_POS 7 +#define DMA_CH_SR_RBU_LEN 1 +#define DMA_CH_SR_RI_POS 6 +#define DMA_CH_SR_RI_LEN 1 +#define DMA_CH_SR_RPS_POS 8 +#define DMA_CH_SR_RPS_LEN 1 +#define DMA_CH_SR_TBU_POS 2 +#define DMA_CH_SR_TBU_LEN 1 +#define DMA_CH_SR_TI_POS 0 +#define DMA_CH_SR_TI_LEN 1 +#define DMA_CH_SR_TPS_POS 1 +#define DMA_CH_SR_TPS_LEN 1 +#define DMA_CH_TCR_OSP_POS 4 +#define DMA_CH_TCR_OSP_LEN 1 +#define DMA_CH_TCR_PBL_POS 16 +#define DMA_CH_TCR_PBL_LEN 6 +#define DMA_CH_TCR_ST_POS 0 +#define DMA_CH_TCR_ST_LEN 1 +#define DMA_CH_TCR_TSE_POS 12 +#define DMA_CH_TCR_TSE_LEN 1 + +/* DMA channel register values */ +#define DMA_OSP_DISABLE 0x00 +#define DMA_OSP_ENABLE 0x01 +#define DMA_PBL_1 1 +#define DMA_PBL_2 2 +#define DMA_PBL_4 4 +#define DMA_PBL_8 8 +#define DMA_PBL_16 16 +#define DMA_PBL_32 32 +#define DMA_PBL_64 64 +#define DMA_PBL_128 128 +#define DMA_PBL_256 256 +#define DMA_PBL_X8_DISABLE 0x00 +#define DMA_PBL_X8_ENABLE 0x01 + +/* Descriptor/Packet entry bit positions and sizes */ +#define RX_PACKET_ERRORS_CRC_POS 2 +#define RX_PACKET_ERRORS_CRC_LEN 1 +#define RX_PACKET_ERRORS_FRAME_POS 3 +#define RX_PACKET_ERRORS_FRAME_LEN 1 +#define RX_PACKET_ERRORS_LENGTH_POS 0 +#define RX_PACKET_ERRORS_LENGTH_LEN 1 +#define RX_PACKET_ERRORS_OVERRUN_POS 1 +#define RX_PACKET_ERRORS_OVERRUN_LEN 1 + +#define RX_PACKET_ATTRIBUTES_CSUM_DONE_POS 0 +#define RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN 1 +#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS 1 +#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN 1 +#define RX_PACKET_ATTRIBUTES_INCOMPLETE_POS 2 +#define RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN 1 +#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS 3 +#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN 1 +#define RX_PACKET_ATTRIBUTES_CONTEXT_POS 4 +#define RX_PACKET_ATTRIBUTES_CONTEXT_LEN 1 +#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_POS 5 +#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_LEN 1 +#define RX_PACKET_ATTRIBUTES_RSS_HASH_POS 6 +#define RX_PACKET_ATTRIBUTES_RSS_HASH_LEN 1 + +#define RX_NORMAL_DESC0_OVT_POS 0 +#define RX_NORMAL_DESC0_OVT_LEN 16 +#define RX_NORMAL_DESC2_HL_POS 0 +#define RX_NORMAL_DESC2_HL_LEN 10 +#define RX_NORMAL_DESC3_CDA_LEN 1 +#define RX_NORMAL_DESC3_CTXT_POS 30 +#define RX_NORMAL_DESC3_CTXT_LEN 1 +#define RX_NORMAL_DESC3_ES_POS 15 +#define RX_NORMAL_DESC3_ES_LEN 1 +#define RX_NORMAL_DESC3_ETLT_POS 16 +#define RX_NORMAL_DESC3_ETLT_LEN 3 +#define RX_NORMAL_DESC3_FD_POS 29 +#define RX_NORMAL_DESC3_FD_LEN 1 +#define RX_NORMAL_DESC3_INTE_POS 30 +#define RX_NORMAL_DESC3_INTE_LEN 1 +#define RX_NORMAL_DESC3_L34T_LEN 4 +#define RX_NORMAL_DESC3_LD_POS 28 +#define RX_NORMAL_DESC3_LD_LEN 1 +#define RX_NORMAL_DESC3_OWN_POS 31 +#define RX_NORMAL_DESC3_OWN_LEN 1 +#define RX_NORMAL_DESC3_BUF2V_POS 25 +#define RX_NORMAL_DESC3_BUF2V_LEN 1 +#define RX_NORMAL_DESC3_BUF1V_POS 24 +#define RX_NORMAL_DESC3_BUF1V_LEN 1 +#define RX_NORMAL_DESC3_PL_POS 0 +#define RX_NORMAL_DESC3_PL_LEN 15 +#define RX_NORMAL_DESC3_RSV_LEN 1 + +/* Inner VLAN Tag. Valid only when Double VLAN tag processing + * and VLAN tag stripping are enabled. + */ +#define RX_NORMAL_DESC0_WB_IVT_POS 16 +#define RX_NORMAL_DESC0_WB_IVT_LEN 16 +#define RX_NORMAL_DESC0_WB_OVT_POS 0 /* Outer VLAN Tag. */ +#define RX_NORMAL_DESC0_WB_OVT_LEN 16 +#define RX_NORMAL_DESC0_WB_OVT_VLANID_POS 0 /* Outer VLAN ID. */ +#define RX_NORMAL_DESC0_WB_OVT_VLANID_LEN 12 +#define RX_NORMAL_DESC0_WB_OVT_CFI_POS 12 /* Outer VLAN CFI. */ +#define RX_NORMAL_DESC0_WB_OVT_CFI_LEN 1 +#define RX_NORMAL_DESC0_WB_OVT_PRIO_POS 13 /* Outer VLAN Priority. */ +#define RX_NORMAL_DESC0_WB_OVT_PRIO_LEN 3 + +#define RX_NORMAL_DESC1_WB_IPCE_POS 7 /* IP Payload Error. */ +#define RX_NORMAL_DESC1_WB_IPCE_LEN 1 +#define RX_NORMAL_DESC1_WB_IPV6_POS 5 /* IPV6 Header Present. */ +#define RX_NORMAL_DESC1_WB_IPV6_LEN 1 +#define RX_NORMAL_DESC1_WB_IPV4_POS 4 /* IPV4 Header Present. */ +#define RX_NORMAL_DESC1_WB_IPV4_LEN 1 +#define RX_NORMAL_DESC1_WB_IPHE_POS 3 /* P Header Error. */ +#define RX_NORMAL_DESC1_WB_IPHE_LEN 1 +#define RX_NORMAL_DESC1_WB_PT_POS 0 +#define RX_NORMAL_DESC1_WB_PT_LEN 3 + +/* Hash Filter Status. When this bit is set, it indicates + * that the packet passed the MAC address hash filter. + */ +#define RX_NORMAL_DESC2_WB_HF_POS 18 +#define RX_NORMAL_DESC2_WB_HF_LEN 1 +/* Destination Address Filter Fail. When Flexible RX Parser + * is disabled, and this bit is set, it indicates that the packet + * failed the DA Filter in the MAC. + */ +#define RX_NORMAL_DESC2_WB_DAF_POS 17 +#define RX_NORMAL_DESC2_WB_DAF_LEN 1 + +#define RX_NORMAL_DESC3_WB_LD_POS 28 +#define RX_NORMAL_DESC3_WB_LD_LEN 1 +/* When this bit is set, it indicates that the status in + * RDES0 is valid and it is written by the DMA. + */ +#define RX_NORMAL_DESC3_WB_RS0V_POS 25 +#define RX_NORMAL_DESC3_WB_RS0V_LEN 1 +/* When this bit is set, it indicates that a Cyclic Redundancy + * Check (CRC) Error occurred on the received packet. This field + * is valid only when the LD bit of RDES3 is set. + */ +#define RX_NORMAL_DESC3_WB_CE_POS 24 +#define RX_NORMAL_DESC3_WB_CE_LEN 1 + +#define RX_DESC3_L34T_IPV4_TCP 1 +#define RX_DESC3_L34T_IPV4_UDP 2 +#define RX_DESC3_L34T_IPV4_ICMP 3 +#define RX_DESC3_L34T_IPV6_TCP 9 +#define RX_DESC3_L34T_IPV6_UDP 10 +#define RX_DESC3_L34T_IPV6_ICMP 11 + +#define RX_DESC1_PT_UDP 1 +#define RX_DESC1_PT_TCP 2 +#define RX_DESC1_PT_ICMP 3 +#define RX_DESC1_PT_AV_TAG_DATA 6 +#define RX_DESC1_PT_AV_TAG_CTRL 7 +#define RX_DESC1_PT_AV_NOTAG_CTRL 5 + +#define RX_CONTEXT_DESC3_TSA_LEN 1 +#define RX_CONTEXT_DESC3_TSD_LEN 1 + +#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS 0 +#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN 1 +#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS 1 +#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN 1 +#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS 2 +#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN 1 +#define TX_PACKET_ATTRIBUTES_PTP_POS 3 +#define TX_PACKET_ATTRIBUTES_PTP_LEN 1 + +#define TX_CONTEXT_DESC2_MSS_POS 0 +#define TX_CONTEXT_DESC2_MSS_LEN 14 +#define TX_CONTEXT_DESC2_IVLTV_POS 16 /* Inner VLAN Tag. */ +#define TX_CONTEXT_DESC2_IVLTV_LEN 16 + +#define TX_CONTEXT_DESC3_CTXT_POS 30 +#define TX_CONTEXT_DESC3_CTXT_LEN 1 +#define TX_CONTEXT_DESC3_TCMSSV_POS 26 +#define TX_CONTEXT_DESC3_TCMSSV_LEN 1 +#define TX_CONTEXT_DESC3_IVTIR_POS 18 +#define TX_CONTEXT_DESC3_IVTIR_LEN 2 +/* Insert an inner VLAN tag with the tag value programmed + * in the MAC_Instxner_VLAN_Incl register or context + * descriptor. + */ +#define TX_CONTEXT_DESC3_IVTIR_INSERT 2 +/* Indicates that the Inner VLAN TAG, IVLTV field of context TDES2 is valid. */ +#define TX_CONTEXT_DESC3_IVLTV_POS 17 +#define TX_CONTEXT_DESC3_IVLTV_LEN 1 +/* Indicates that the VT field of context TDES3 is valid. */ +#define TX_CONTEXT_DESC3_VLTV_POS 16 +#define TX_CONTEXT_DESC3_VLTV_LEN 1 +#define TX_CONTEXT_DESC3_VT_POS 0 +#define TX_CONTEXT_DESC3_VT_LEN 16 + +/* Header Length or Buffer 1 Length. */ +#define TX_NORMAL_DESC2_HL_B1L_POS 0 +#define TX_NORMAL_DESC2_HL_B1L_LEN 14 +/* Interrupt on Completion. */ +#define TX_NORMAL_DESC2_IC_POS 31 +#define TX_NORMAL_DESC2_IC_LEN 1 +/* Transmit Timestamp Enable or External TSO Memory Write Enable. */ +#define TX_NORMAL_DESC2_TTSE_POS 30 +#define TX_NORMAL_DESC2_TTSE_LEN 1 +/* LAN Tag Insertion or Replacement. */ +#define TX_NORMAL_DESC2_VTIR_POS 14 +#define TX_NORMAL_DESC2_VTIR_LEN 2 +#define TX_NORMAL_DESC2_VLAN_INSERT 0x2 + +#define TX_NORMAL_DESC3_TCPPL_POS 0 +#define TX_NORMAL_DESC3_TCPPL_LEN 18 +/* Frame Length or TCP Payload Length. */ +#define TX_NORMAL_DESC3_FL_POS 0 +#define TX_NORMAL_DESC3_FL_LEN 15 +/* Checksum Insertion Control or TCP Payload Length. + * 2'b00: Checksum Insertion Disabled. + * 2'b01: Only IP header checksum calculation and insertion are enabled. + * 2'b10: IP header checksum and payload checksum calculation and insertion are + * enabled, but pseudo-header checksum is not calculated in hardware. + * 2'b11: IP Header checksum and payload checksum calculation and insertion are + * enabled, and pseudo - header checksum is calculated in hardware. */ +#define TX_NORMAL_DESC3_CIC_POS 16 +#define TX_NORMAL_DESC3_CIC_LEN 2 +/* TCP Segmentation Enable. */ +#define TX_NORMAL_DESC3_TSE_POS 18 +#define TX_NORMAL_DESC3_TSE_LEN 1 +/* THL: TCP/UDP Header Length.If the TSE bit is set, this field contains + * the length of the TCP / UDP header.The minimum value of this field must + * be 5 for TCP header.The value must be equal to 2 for UDP header. This + * field is valid only for the first descriptor. + */ +#define TX_NORMAL_DESC3_TCPHDRLEN_POS 19 +#define TX_NORMAL_DESC3_TCPHDRLEN_LEN 4 +#define TX_NORMAL_DESC3_CPC_POS 26 /* CRC Pad Control. */ +#define TX_NORMAL_DESC3_CPC_LEN 2 +#define TX_NORMAL_DESC3_LD_POS 28 /* Last Descriptor. */ +#define TX_NORMAL_DESC3_LD_LEN 1 +#define TX_NORMAL_DESC3_FD_POS 29 /* First Descriptor. */ +#define TX_NORMAL_DESC3_FD_LEN 1 +/* Context Type.This bit should be set to 1'b0 for normal descriptor. */ +#define TX_NORMAL_DESC3_CTXT_POS 30 +#define TX_NORMAL_DESC3_CTXT_LEN 1 +#define TX_NORMAL_DESC3_OWN_POS 31 /* Own Bit. */ +#define TX_NORMAL_DESC3_OWN_LEN 1 + +/* for ephy generic register definitions */ + +#define FXGMAC_EPHY_REGS_LEN 32 /* 32 ethernet phy registers under spec */ +#define REG_MII_BMCR 0x00 /* Basic mode control register */ +#define PHY_CR_RESET_POS 15 +#define PHY_CR_RESET_LEN 1 +#define PHY_CR_SPEED_SEL_H_POS 6 +#define PHY_CR_SPEED_SEL_H_LEN 1 +#define PHY_CR_SPEED_SEL_L_POS 13 +#define PHY_CR_SPEED_SEL_L_LEN 1 +#define PHY_CR_AUTOENG_POS 12 +#define PHY_CR_AUTOENG_LEN 1 +#define PHY_CR_RE_AUTOENG_POS 9 +#define PHY_CR_RE_AUTOENG_LEN 1 +#define PHY_CR_DUPLEX_POS 8 +#define PHY_CR_DUPLEX_LEN 1 +#define REG_MII_BMCR_ENABLE_LOOPBACK 0x8140 +#define REG_MII_BMCR_DISABLE_LOOPBACK 0x9140 +#define REG_MII_BMSR 0x01 /* Basic mode status register */ +#define REG_MII_PHYSID1 0x02 /* PHYS ID 1 */ +#define REG_MII_PHYSID2 0x03 /* PHYS ID 2 */ +#define REG_MII_ADVERTISE 0x04 /* Advertisement control reg */ +#define PHY_MII_ADVERTISE_ASYPAUSE_POS 11 +#define PHY_MII_ADVERTISE_ASYPAUSE_LEN 1 +#define PHY_MII_ADVERTISE_PAUSE_POS 10 +#define PHY_MII_ADVERTISE_PAUSE_LEN 1 +#define PHY_MII_ADVERTISE_100FULL_POS 8 +#define PHY_MII_ADVERTISE_100FULL_LEN 1 +#define PHY_MII_ADVERTISE_100HALF_POS 7 +#define PHY_MII_ADVERTISE_100HALF_LEN 1 +#define PHY_MII_ADVERTISE_10FULL_POS 6 +#define PHY_MII_ADVERTISE_10FULL_LEN 1 +#define PHY_MII_ADVERTISE_10HALF_POS 5 +#define PHY_MII_ADVERTISE_10HALF_LEN 1 +#define REG_MII_LPA 0x05 /* Link partner ability reg */ +#define REG_MII_EXPANSION 0x06 /* Expansion register */ +#define REG_MII_NEXT_PAGE 0x07 /* Next page register */ +#define REG_MII_LPR_NEXT_PAGE 0x08 /* LPR next page register */ +#define REG_MII_CTRL1000 0x09 /* 1000BASE-T control */ +#define PHY_MII_CTRL1000_1000FULL_POS 9 +#define PHY_MII_CTRL1000_1000FULL_LEN 1 +#define PHY_MII_CTRL1000_1000HALF_POS 8 +#define PHY_MII_CTRL1000_1000HALF_LEN 1 +#define REG_MII_STAT1000 0x0A /* 1000BASE-T status */ +#define PHY_MII_STAT1000_CFG_ERROR_POS 15 +#define PHY_MII_STAT1000_CFG_ERROR_LEN 1 + +#define REG_MII_MMD_CTRL 0x0D /* MMD access control register */ +#define REG_MII_MMD_DATA 0x0E /* MMD access data register */ + +#define REG_MII_ESTATUS 0x0F /* Extended Status */ + +#define REG_MII_SPEC_CTRL 0x10 /* PHY specific func control */ +#define PHY_MII_SPEC_CTRL_CRS_ON_POS 3 +#define PHY_MII_SPEC_CTRL_CRS_ON_LEN 1 +#define REG_MII_SPEC_STATUS 0x11 /* PHY specific status */ +#define PHY_MII_SPEC_DUPLEX_POS 13 +#define PHY_MII_SPEC_DUPLEX_LEN 1 +#define REG_MII_INT_MASK 0x12 /* Interrupt mask register */ + +#ifdef AISC_MODE +#define PHY_INT_MASK_LINK_UP_POS 10 +#define PHY_INT_MASK_LINK_UP_LEN 1 +#define PHY_INT_MASK_LINK_DOWN_POS 11 +#define PHY_INT_MASK_LINK_DOWN_LEN 1 +#else /* FPGA_MODE */ +#define PHY_INT_MASK_LINK_UP_POS 1 +#define PHY_INT_MASK_LINK_UP_LEN 1 +#define PHY_INT_MASK_LINK_DOWN_POS 0 +#define PHY_INT_MASK_LINK_DOWN_LEN 1 +#endif +#define REG_MII_INT_STATUS 0x13 /* Interrupt status register */ +#define PHY_INT_STAT_LINK_UP_POS 1 +#define PHY_INT_STAT_LINK_UP_LEN 1 +#define PHY_INT_STAT_LINK_DOWN_POS 0 +#define PHY_INT_STAT_LINK_DOWN_LEN 1 +#define REG_MII_DOWNG_CTRL 0x14 /* Speed auto downgrade control*/ +#define REG_MII_RERRCOUNTER 0x15 /* Receive error counter */ + +#define REG_MII_EXT_ADDR 0x1E /* Extended reg's address */ +#define REG_MII_EXT_DATA 0x1F /* Extended reg's date */ + +#define FXGMAC_EPHY_ID_MASK 0x0000ffff + +/* for ephy link capability + * Advertisement control register(0x04) + */ + /* Advertisement control register(0x04) */ +#define FXGMAC_ADVERTISE_SLCT 0x001f /* Selector bits */ +#define FXGMAC_ADVERTISE_CSMA 0x0001 /* Only selector supported */ +#define FXGMAC_ADVERTISE_1000FULL 0x0004 /* trt fir 1000BASE-T full duplex */ +#define FXGMAC_ADVERTISE_1000HALF 0x0008 /* try for 1000BASE-T half duplex */ +#define FXGMAC_ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */ +#define FXGMAC_ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */ +#define FXGMAC_ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */ +#define FXGMAC_ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */ +#define FXGMAC_ADVERTISE_100BASE4 0x0200 /* Try for 100mbps 4k packets */ +#define FXGMAC_ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */ +#define FXGMAC_ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymetric pause */ +#define FXGMAC_ADVERTISE_RESV 0x1000 /* Unused... */ +#define FXGMAC_ADVERTISE_RFAULT 0x2000 /* Say we can detect faults */ +#define FXGMAC_ADVERTISE_LPACK 0x4000 /* Ack link partners response */ +#define FXGMAC_ADVERTISE_NPAGE 0x8000 /* Next page bit */ + +/* 1000BASE-T Control register(0x09) */ +#define REG_BIT_ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */ +#define REG_BIT_ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */ + +#define REG_BIT_ADVERTISE_1000_CAP (REG_BIT_ADVERTISE_1000FULL | REG_BIT_ADVERTISE_1000HALF) +#define REG_BIT_ADVERTISE_100_10_CAP (FXGMAC_ADVERTISE_100FULL | FXGMAC_ADVERTISE_100HALF | FXGMAC_ADVERTISE_10FULL | FXGMAC_ADVERTISE_10HALF) + +#ifndef SPEED_1000M +#define SPEED_1000M 1000 +#endif +#ifndef SPEED_100M +#define SPEED_100M 100 +#endif +#ifndef SPEED_10M +#define SPEED_10M 10 +#endif + +#ifndef SPEED_UNKNOWN +#define SPEED_UNKNOWN 0xffff +#endif + +#ifndef DUPLEX_FULL +#define DUPLEX_FULL 1 +#endif +#ifndef DUPLEX_HALF +#define DUPLEX_HALF 0 +#endif + +#ifndef BIT +#define BIT(n) (0x1<<(n)) +#endif + +#ifndef FUXI_EPHY_SPEED_MODE_BIT +#define FUXI_EPHY_SPEED_MODE 0xc000 +#define FUXI_EPHY_DUPLEX 0x2000 +#define FUXI_EPHY_SPEED_MODE_BIT 14 +#define FUXI_EPHY_DUPLEX_BIT 13 +#define FUXI_EPHY_LINK_STATUS_BIT 10 + +#endif + +#define FUXI_EPHY_SMI_SEL_PHY 0x0 +#define FUXI_EPHY_SMI_SEL_SDS_QSGMII 0x02 +#define FUXI_EPHY_SMI_SEL_SDS_SGMII 0x03 + +#define REG_MII_EXT_ANALOG_CFG3 0x52 +#define MII_EXT_ANALOG_CFG3_ADC_START_CFG_POS 14 +#define MII_EXT_ANALOG_CFG3_ADC_START_CFG_LEN 2 +/* VGA bandwidth, default is 2 after reset. Set to 0 to mitigate + * unstable issue in 130m. + */ +#define MII_EXT_ANALOG_CFG3_ADC_START_CFG_DEFAULT 0x0 +#define MII_EXT_ANALOG_CFG3_ON_TIME_CFG_POS 12 +#define MII_EXT_ANALOG_CFG3_ON_TIME_CFG_LEN 2 +#define MII_EXT_ANALOG_CFG3_VGA_AMP_GAIN_CFG_POS 8 +#define MII_EXT_ANALOG_CFG3_VGA_AMP_GAIN_CFG_LEN 4 +#define MII_EXT_ANALOG_CFG3_VGA_IBIAS_CFG_POS 4 +#define MII_EXT_ANALOG_CFG3_VGA_IBIAS_CFG_LEN 3 +#define MII_EXT_ANALOG_CFG3_OCP_CFG_POS 2 +#define MII_EXT_ANALOG_CFG3_OCP_CFG_LEN 2 +#define MII_EXT_ANALOG_CFG3_VGA_LPF_CFG_POS 0 +#define MII_EXT_ANALOG_CFG3_VGA_LPF_CFG_LEN 2 + +#define REG_MII_EXT_PMA_DEBUG_KCOEF 0x78 +#define MII_EXT_PMA_DEBUG_KCOEF_IPR_KCOEF_GE_LNG_POS 8 +#define MII_EXT_PMA_DEBUG_KCOEF_IPR_KCOEF_GE_LNG_LEN 6 +/* After reset, it's 0x10. We need change it to 0x20 to make it + * easier to linkup in gigabit mode with long cable. + */ +#define MII_EXT_PMA_DEBUG_KCOEF_IPR_KCOEF_GE_LNG_DEFAULT 0x20 +#define MII_EXT_PMA_DEBUG_KCOEF_IPR_KCOEF_DEFAULT_POS 0 +#define MII_EXT_PMA_DEBUG_KCOEF_IPR_KCOEF_DEFAULT_LEN 6 + +#define REG_MII_EXT_LPBK_REG 0x0a +#define REG_MII_EXT_LPBK_REG_ENABLE_LOOPBACK 0x3a18 +#define REG_MII_EXT_LPBK_REG_CLEAN_LOOPBACK 0x3a08 +#define REG_MII_EXT_SLEEP_CONTROL_REG 0x27 +#define REG_MII_EXT_SLEEP_REG_ENABLE_LOOPBACK 0x6812 +#define REG_MII_EXT_SLEEP_REG_CLEAN_LOOPBACK 0xe812 + +#define REG_MII_EXT_ANALOG_CFG2 0x51 +#define REG_MII_EXT_ANALOG_CFG2_LED_VALUE 0x4a9 +#define REG_MII_EXT_ANALOG_CFG8 0x57 +#define REG_MII_EXT_ANALOG_CFG8_LED_VALUE 0x274c + +#define REG_MII_EXT_COMMON_LED_CFG 0xA00B +#define REG_MII_EXT_COMMON_LED0_CFG 0xA00C +#define REG_MII_EXT_COMMON_LED0_CFG_VALUE_SOLUTION0 0x2600 +#define REG_MII_EXT_COMMON_LED0_CFG_VALUE_SOLUTION1 0x00 +#define REG_MII_EXT_COMMON_LED0_CFG_VALUE_SOLUTION2 0x20 +#define REG_MII_EXT_COMMON_LED0_CFG_VALUE_SOLUTION3 0x2600 +#define REG_MII_EXT_COMMON_LED1_CFG 0xA00D +#define REG_MII_EXT_COMMON_LED1_CFG_VALUE_SOLUTION0 0x1800 +#define REG_MII_EXT_COMMON_LED1_CFG_VALUE_SOLUTION1 0x00 +#define REG_MII_EXT_COMMON_LED1_CFG_VALUE_SOLUTION2 0x40 +#define REG_MII_EXT_COMMON_LED2_CFG 0xA00E +#define REG_MII_EXT_COMMON_LED2_CFG_VALUE_SOLUTION0 0x00 +#define REG_MII_EXT_COMMON_LED2_CFG_VALUE_SOLUTION2 0x07 +#define REG_MII_EXT_COMMON_LED2_CFG_VALUE_SOLUTION3 0x20 +#define REG_MII_EXT_COMMON_LED2_CFG_VALUE_SOLUTION4 0x1800 +#define REG_MII_EXT_COMMON_LED_BLINK_CFG 0xA00F +#define REG_MII_EXT_COMMON_LED_BLINK_CFG_SOLUTION2 0x0F + +#define REG_MII_EXT_COMMON_LED0_CFG_VALUE_SLEEP_SOLUTION3 0x2600 + +#define REG_MII_EXT_PKG_CFG0 0xA0 +#define REG_MII_EXT_PKG_CHECK_POS 14 +#define REG_MII_EXT_PKG_CHECK_LEN 2 +#define REG_MII_EXT_PKG_ENABLE_CHECK 0x2 +#define REG_MII_EXT_PKG_DISABLE_CHECK 0x1 +#define REG_MII_EXT_SLEEP_CONTROL1 0x27 +#define MII_EXT_SLEEP_CONTROL1_EN_POS 15 +#define MII_EXT_SLEEP_CONTROL1_EN_LEN 1 +#define MII_EXT_SLEEP_CONTROL1_PLLON_IN_SLP_POS 14 +#define MII_EXT_SLEEP_CONTROL1_PLLON_IN_SLP_LEN 1 +#define REG_MII_EXT_PKG_RX_VALID0 0xA3 +#define REG_MII_EXT_REG_RX_VALID1 0xA4 +#define REG_MII_EXT_REG_RX_OS0 0xA5 +#define REG_MII_EXT_REG_RX_OS1 0xA6 +#define REG_MII_EXT_REG_RX_US0 0xA7 +#define REG_MII_EXT_REG_RX_US1 0xA8 +#define REG_MII_EXT_REG_RX_ERR 0xA9 +#define REG_MII_EXT_REG_RX_0S_BAD 0xAA +#define REG_MII_EXT_REG_RX_FRAGMENT 0xAB +#define REG_MII_EXT_REG_RX_NOSFD 0xAC +#define REG_MII_EXT_REG_TX_VALID0 0xAD +#define REG_MII_EXT_REG_TX_VALID1 0xAE +#define REG_MII_EXT_REG_TX_OS0 0xAF +#define REG_MII_EXT_REG_TX_OS1 0xB0 +#define REG_MII_EXT_REG_TX_US0 0xB1 +#define REG_MII_EXT_REG_TX_US1 0xB2 +#define REG_MII_EXT_REG_TX_ERR 0xB3 +#define REG_MII_EXT_REG_TX_OS_BAD 0xB4 +#define REG_MII_EXT_REG_TX_FRAGMENT 0xB5 +#define REG_MII_EXT_REG_TX_NOSFD 0xB6 +#define REG_MII_EXT_REG_PMA_DBG0_ADC 0x13 +#define REG_MII_EXT_ENABLE_GIGA_POWER_SAVING_FOR_SHORT_CABLE 0x3538 +#define REG_MII_EXT_REG_CLD_REG0 0x3A0 +#define REG_MII_EXT_ENABLE_CLD_NP_WP 0xEB24 +#define REG_MII_EXT_REG_CLD_REG1 0x3CC +#define REG_MII_EXT_ENABLE_CLD_GT_HT_BT 0x7001 +#define REG_MMD_EEE_ABILITY_REG 0x3C +#define REG_MMD_EEE_ABILITY_VALUE 0x06 + +/* Below registers don't belong to GMAC, it has zero offset, not 0x2000 offset. mem_base + REG_XXX. */ +/***When issue happens, driver write this register to trigger pcie sniffer. ***/ +#define REG_PCIE_TRIGGER 0x1000 +#define PCIE_TRIGGER_CODE_TX_HANG 0x00000002 +#define PCIE_TRIGGER_CODE_LINKDOWN 0x00000003 + + +#define MGMT_EPHY_CTRL 0x1004 +/* check register address 0x1004 +* b[6:5] ephy_pause +* b[4:3] ephy_speed 0b10 1000m 0b01 100m +* b[2] ephy_duplex +* b[1] ephy_link +* b[0] ephy_reset.0-reset, 1-unreset. Should be set to 1 before use phy. +*/ +#define MGMT_EPHY_CTRL_RESET_POS 0 +#define MGMT_EPHY_CTRL_RESET_LEN 1 +#define MGMT_EPHY_CTRL_STA_EPHY_RESET 0 /* 0: reset state. */ +#define MGMT_EPHY_CTRL_STA_EPHY_RELEASE 1 /* 1: release state. */ +#define MGMT_EPHY_CTRL_STA_EPHY_LINKUP 2 /* 1: link up; 0: link down. */ +#define MGMT_EPHY_CTRL_STA_EPHY_LINKUP_POS 1 +#define MGMT_EPHY_CTRL_STA_EPHY_LINKUP_LEN 1 +#define MGMT_EPHY_CTRL_STA_EPHY_DUPLEX_POS 2 /* ephy duplex */ +#define MGMT_EPHY_CTRL_STA_EPHY_DUPLEX_LEN 1 + +#define MGMT_EPHY_CTRL_STA_SPEED_POS 3 +#define MGMT_EPHY_CTRL_STA_SPEED_LEN 2 +#define MGMT_EPHY_CTRL_STA_SPEED_MASK 0x18 + +#define MGMT_EPHY_CTRL_ERROR_VAULE 0xFFFFFFFF + +#define MGMT_PCIE_EP_CTRL 0x1008 + +#define MGMT_PCIE_EP_CTRL_DBI_CS_EN_POS 0 +#define MGMT_PCIE_EP_CTRL_DBI_CS_EN_LEN 1 + +#define MGMT_PCIE_CFG_CTRL 0x8BC +#define PCIE_CFG_CTRL_DEFAULT_VAL 0x7ff40 + +#define MGMT_PCIE_CFG_CTRL_CS_EN_POS 0 +#define MGMT_PCIE_CFG_CTRL_CS_EN_LEN 1 + +/***power management ***/ +#define WOL_CTL 0x100C +/* set means magic and remote packet wakeup enable */ +#define WOL_PKT_EN_POS 1 +#define WOL_PKT_EN_LEN 1 +/* set means link change wakeup enable */ +#define WOL_LINKCHG_EN_POS 0 +#define WOL_LINKCHG_EN_LEN 1 + +#define OOB_WOL_CTRL 0x1010 +#define OOB_WOL_CTRL_DIS_POS 0 +#define OOB_WOL_CTRL_DIS_LEN 1 + +/* b3:0 per rx ch interrupt + * b7:4 per tx ch interrupt + * b8 Safety interrupt signal for un-correctable error + * b9 Safety interrupt signal for correctable error + * b10 Interrupt signal to host system + * b11 Magic Packet Received or Remote Wake-up Packet Received + * b12 ethernet phy interrupt + */ +#define MGMT_INT_CTRL0 0x1100 + +/* MAC management registers bit positions and sizes */ +#define MGMT_INT_CTRL0_INT_MASK_POS 16 +#define MGMT_INT_CTRL0_INT_MASK_LEN 16 +#define MGMT_INT_CTRL0_INT_MASK_MASK 0xFFFF +#define MGMT_INT_CTRL0_INT_MASK_RXCH 0xF +#define MGMT_INT_CTRL0_INT_MASK_TXCH 0x10 +#define MGMT_INT_CTRL0_INT_MASK_EX_PMT 0xF7FF +#define MGMT_INT_CTRL0_INT_MASK_DISABLE 0xF000 + +#define MGMT_INT_CTRL0_INT_STATUS_POS 0 +#define MGMT_INT_CTRL0_INT_STATUS_LEN 16 +#define MGMT_INT_CTRL0_INT_STATUS_MASK 0xFFFF +#define MGMT_INT_CTRL0_INT_STATUS_RX 0x0001 +#define MGMT_INT_CTRL0_INT_STATUS_TX 0x0010 +#define MGMT_INI_CTRL0_INT_STATUS_TX_INVERSE 0xFFEF +#define MGMG_INT_CTRL0_INT_STATUS_PHY_INVERSE 0xFFDF +#define MGMT_INT_CTRL0_INT_STATUS_PHY 0x0020 + +#define MGMT_INT_CTRL0_INT_MASK_RXCH_POS 16 +#define MGMT_INT_CTRL0_INT_STATUS_RXCH_POS 0 +#define MGMT_INT_CTRL0_INT_STATUS_RXCH_LEN 4 +#define MGMT_INT_CTRL0_INT_STATUS_RXCH_MASK 0xF +#define MGMT_INT_CTRL0_INT_STATUS_RXTX_LEN 5 +#define MGMT_INT_CTRL0_INT_STATUS_RXTX_MASK 0x1F +#define MGMT_INT_CTRL0_INT_STATUS_RXTXPHY_MASK 0x3F + +#define MGMT_INT_CTRL0_INT_MASK_TXCH_POS 20 +#define MGMT_INT_CTRL0_INT_STATUS_TXCH_POS 4 +#define MGMT_INT_CTRL0_INT_STATUS_TXCH_LEN 1 +#define MGMT_INT_CTRL0_INT_STATUS_TXCH_MASK 0x1 + + +/* Interrupt Ctrl1 */ +#define INT_CTRL1 0x1104 +#define INT_CTRL1_TMR_CNT_CFG_MAX_POS 0 /* Timer counter cfg max. Default 0x19, 1us. */ +#define INT_CTRL1_TMR_CNT_CFG_MAX_LEN 10 +#define INT_CTRL1_TMR_CNT_CFG_DEF_VAL 0x19 +#define INT_CTRL1_MSI_AIO_EN_POS 16 +#define INT_CTRL1_MSI_AIO_EN_LEN 1 + +/* Interrupt Moderation */ +#define INT_MOD 0x1108 +#define INT_MOD_TX_POS 16 +#define INT_MOD_TX_LEN 12 +#define INT_MOD_RX_POS 0 +#define INT_MOD_RX_LEN 12 +#define INT_MOD_IN_US 200 /*in us*/ + +/* PCIE LTR 2 working modes: +Two working mode: +1. SW trigger +LTR idle threshold timer set as 0, enable LTR enable will trigger one LTR message +Note: PCIe cfg enable should set in initilization before enable LTR. +2. HW auto trigger +LTR idle threshold timer set as one non-zero value, HW monitor system status, +when system idle timer over threshold, HW send out LTR message +system exit idle state, send out one LTR exit message. +*/ +#define LTR_CTRL 0x1130 +#define LTR_CTRL_IDLE_THRE_TIMER_POS 16 +#define LTR_CTRL_IDLE_THRE_TIMER_LEN 14 /* in 8ns units*/ +#define LTR_CTRL_IDLE_THRE_TIMER_VAL 0x3FFF +#define LTR_CTRL_EN_POS 0 +#define LTR_CTRL_EN_LEN 1 + +#define LTR_CTRL1 0x1134 /* LTR latency message, only for SW enable. */ +#define LTR_CTRL1_LTR_MSG_POS 0 +#define LTR_CTRL1_LTR_MSG_LEN 32 + +#define LTR_CTRL2 0x1138 +#define LTR_CTRL2_DBG_DATA_POS 0 +#define LTR_CTRL2_DBG_DATA_LEN 32 + +#define LTR_IDLE_ENTER 0x113C /* LTR_CTRL3, LTR latency message, only for System IDLE Start. */ +#define LTR_IDLE_ENTER_POS 0 +#define LTR_IDLE_ENTER_LEN 10 +#define LTR_IDLE_ENTER_USVAL 900 +#define LTR_IDLE_ENTER_SCALE_POS 10 +#define LTR_IDLE_ENTER_SCALE_LEN 5 +#define LTR_IDLE_ENTER_SCALE 2 /* 0-1ns, 1-32ns, 2-1024ns, 3-32,768ns, 4-1,048,576ns, 5-33,554,432ns, 110-111-Not Permitted.*/ +#define LTR_IDLE_ENTER_REQUIRE_POS 15 +#define LTR_IDLE_ENTER_REQUIRE_LEN 1 +#define LTR_IDLE_ENTER_REQUIRE 1 + +#define LTR_IDLE_EXIT 0x1140 /* LTR_CTRL4, LTR latency message, only for System IDLE End. */ +#define LTR_IDLE_EXIT_POS 0 +#define LTR_IDLE_EXIT_LEN 10 +#define LTR_IDLE_EXIT_USVAL 2 +#define LTR_IDLE_EXIT_SCALE_POS 10 +#define LTR_IDLE_EXIT_SCALE_LEN 5 +#define LTR_IDLE_EXIT_SCALE 2 +#define LTR_IDLE_EXIT_REQUIRE_POS 15 +#define LTR_IDLE_EXIT_REQUIRE_LEN 1 +#define LTR_IDLE_EXIT_REQUIRE 1 + +#define LPW_CTRL 0x1188 +#define LPW_CTRL_L1SS_EN_POS 22 +#define LPW_CTRL_L1SS_EN_LEN 1 +#define LPW_CTRL_L1SS_SEL_POS 21 /* 0 - up to both CFG0x158 and reg1188 L1ss setting. 1 - up to CFG0x158 L1ss setting. */ +#define LPW_CTRL_L1SS_SEL_LEN 1 +#define LPW_CTRL_L1SS_SEL_CFG 1 +#define LPW_CTRL_ASPM_L1_CPM_POS 19 /*L1.CPM mode enable bit. Default 0, set as 1 enable this mode. clkreq pin need to connect RC*/ +#define LPW_CTRL_ASPM_L1_CPM_LEN 1 +#define LPW_CTRL_ASPM_L0S_EN_POS 17 +#define LPW_CTRL_ASPM_L0S_EN_LEN 1 +#define LPW_CTRL_ASPM_L1_EN_POS 16 +#define LPW_CTRL_ASPM_L1_EN_LEN 1 +#define LPW_CTRL_ASPM_LPW_EN_POS 9 /* application ready to enter L23. */ +#define LPW_CTRL_ASPM_LPW_EN_LEN 1 +#define LPW_CTRL_SYS_CLK_125_SEL_POS 8 /* system 125M select: 125M or 62.5MHz. Default: 125MHz.*/ +#define LPW_CTRL_SYS_CLK_125_SEL_LEN 1 +#define LPW_CTRL_PCIE_RADM_CG_EN_POS 5 /* clock gating enable bit of PCIe Radm clock. Default 1; set as 1, enable gating.*/ +#define LPW_CTRL_PCIE_RADM_CG_EN_LEN 1 +#define LPW_CTRL_PCIE_CORE_CG_EN_POS 4 /* clock gating enable bit of PCIe Core clock. Default 1; set as 1, enable gating.*/ +#define LPW_CTRL_PCIE_CORE_CG_EN_LEN 1 +#define LPW_CTRL_PCIE_AXI_CG_EN_POS 3 /* clock gating enable bit of PCIe AXI clock.Default 1; set as 1, enable gating.*/ +#define LPW_CTRL_PCIE_AXI_CG_EN_LEN 1 +#define LPW_CTRL_GMAC_AXI_CG_EN_POS 2 /* clock gating enable bit of GMAC AXI clock. Default 1; set as 1, enable gating.*/ +#define LPW_CTRL_GMAC_AXI_CG_EN_LEN 1 +#define LPW_CTRL_MDIO2APB_CG_EN_POS 1 /* clock gating enable bit of MDIO2APB, default 1. Set as 1, enable clock gating feature. */ +#define LPW_CTRL_MDIO2APB_CG_EN_LEN 1 +#define LPW_CTRL_OTP_CLK_ON_POS 0 /* Turn on before SW OTP operation, default 1. */ +#define LPW_CTRL_OTP_CLK_ON_LEN 1 + +#define MSI_PBA_REG 0x1300 +#define SYS_RESET_REG 0x152C +#define SYS_RESET_POS 31 +#define SYS_RESET_LEN 1 + +#define REG_PCIE_PSM_STATE 0x1994 /* PCIe PHY power state. */ +#define PCIE_PSM_STATE_POS 0 +#define PCIE_PSM_STATE_LEN 4 +#define PCIE_PSM_STATE_P0 2 +#define PCIE_PSM_STATE_P0s 3 +#define PCIE_PSM_STATE_P1 4 +#define PCIE_PSM_STATE_P1_CPM 5 +#define PCIE_PSM_STATE_P1_1 6 +#define PCIE_PSM_STATE_P1_2 7 +#define PCIE_PSM_STATE_P2 8 + +#define REG_PCIE_SERDES_STATUS 0x1998 +#define PCIE_SERDES_STATUS_DRV_ON_POS 11 +#define PCIE_SERDES_STATUS_DRV_ON_LEN 1 +#define PCIE_SERDES_STATUS_RX_PD_POS 10 +#define PCIE_SERDES_STATUS_RX_PD_LEN 1 +#define PCIE_SERDES_STATUS_PI_PD_POS 9 +#define PCIE_SERDES_STATUS_PI_PD_LEN 1 +#define PCIE_SERDES_STATUS_SIGDET_ON_POS 8 +#define PCIE_SERDES_STATUS_SIGDET_ON_LEN 1 +#define PCIE_SERDES_STATUS_TX_VCM_POS 7 +#define PCIE_SERDES_STATUS_TX_VCM_LEN 1 +#define PCIE_SERDES_STATUS_RX_RT50_POS 6 +#define PCIE_SERDES_STATUS_RX_RT50_LEN 1 +#define PCIE_SERDES_STATUS_BEACON_ON_POS 5 +#define PCIE_SERDES_STATUS_BEACON_ON_LEN 1 +#define PCIE_SERDES_STATUS_PLL_ON_POS 4 +#define PCIE_SERDES_STATUS_PLL_ON_LEN 1 +#define PCIE_SERDES_STATUS_REFCLK_ON_POS 3 +#define PCIE_SERDES_STATUS_REFCLK_ON_LEN 1 +#define PCIE_SERDES_STATUS_LDO_ON_POS 2 +#define PCIE_SERDES_STATUS_LDO_ON_LEN 1 +#define PCIE_SERDES_STATUS_HW_EN_SDS_BIAS_POS 1 +#define PCIE_SERDES_STATUS_HW_EN_SDS_BIAS_LEN 1 +#define PCIE_SERDES_STATUS_HW_BIAS_ON_POS 0 +#define PCIE_SERDES_STATUS_HW_BIAS_ON_LEN 1 + +#define REG_PCIE_SERDES_PLL 0x199C +#define PCIE_SERDES_PLL_AUTOOFF_POS 0 +#define PCIE_SERDES_PLL_AUTOOFF_LEN 1 + +#define NS_OF_GLB_CTL 0x1B00 +#define NS_TPID_PRO 0x1B04 +#define NS_LUT_ROMOTE0 0x1B08 +#define NS_LUT_ROMOTE1 0X1B0C +#define NS_LUT_ROMOTE2 0X1B10 +#define NS_LUT_ROMOTE3 0X1B14 +#define NS_LUT_TARGET0 0X1B18 +#define NS_LUT_TARGET1 0X1B1C +#define NS_LUT_TARGET2 0X1B20 +#define NS_LUT_TARGET3 0X1B24 +#define NS_LUT_SOLICITED0 0X1B28 +#define NS_LUT_SOLICITED1 0X1B2C +#define NS_LUT_SOLICITED2 0X1B30 +#define NS_LUT_SOLICITED3 0X1B34 +#define NS_LUT_MAC_ADDR 0X1B38 +#define NS_LUT_MAC_ADDR_CTL 0X1B3C +#define NS_LUT_TARGET4 0X1B78 +#define NS_LUT_TARGET5 0X1B7c +#define NS_LUT_TARGET6 0X1B80 +#define NS_LUT_TARGET7 0X1B84 + +#define NS_OF_GLB_CTL_TX_CLK_EN_POS 2 +#define NS_OF_GLB_CTL_TX_CLK_EN_LEN 1 +#define NS_OF_GLB_CTL_RX_CLK_EN_POS 1 +#define NS_OF_GLB_CTL_RX_CLK_EN_LEN 1 +#define NS_OF_GLB_CTL_EN_POS 0 +#define NS_OF_GLB_CTL_EN_ELN 1 +#define NS_TPID_PRO_STPID_POS 16 +#define NS_TPID_PRO_STPID_LEN 16 +#define NS_TPID_PRO_CTPID_POS 0 +#define NS_TPID_PRO_CTPID_LEN 16 +#define NS_LUT_DST_CMP_TYPE_POS 19 +#define NS_LUT_DST_CMP_TYPE_LEN 1 +#define NS_LUT_DST_IGNORED_POS 18 +#define NS_LUT_DST_IGNORED_LEN 1 +#define NS_LUT_REMOTE_AWARED_POS 17 +#define NS_LUT_REMOTE_AWARED_LEN 1 +#define NS_LUT_TARGET_ISANY_POS 16 +#define NS_LUT_TARGET_ISANY_LEN 1 +#define NS_LUT_MAC_ADDR_LOW_POS 0 +#define NS_LUT_MAC_ADDR_LOW_LEN 16 + +/* RSS implementation registers, 20210817 */ + +/* 10 RSS key registers */ +#define MGMT_RSS_KEY0 0x1020 +#define MGMT_RSS_KEY9 0x1044 +#define MGMT_RSS_KEY_REG_INC 0x4 + +/* RSS control register */ +#define MGMT_RSS_CTRL 0x1048 +/* b31 enable + * b12:10 indirection table size. 2^(val+1) + * b9:8 default Queue NO. + * b7:0 hash type or options + */ + +/* RSS ctrl register bit definitions. + * [0] ipv4 + * [1] tcpv4 + * [2] udpv4 + * [3] ipv6 + * [4] tcpv6 + * [5] udpv6 +* [6] only ipv4 udp check IP hash +* [7] only ipv6 udp check IP hash + */ +#define MGMT_RSS_CTRL_OPT_POS 0 +#define MGMT_RSS_CTRL_OPT_LEN 8 +#define MGMT_RSS_CTRL_OPT_MASK 0xFF +#define MGMT_RSS_CTRL_IPV4_EN 0x01 +#define MGMT_RSS_CTRL_TCPV4_EN 0x02 +#define MGMT_RSS_CTRL_UDPV4_EN 0x04 +#define MGMT_RSS_CTRL_IPV6_EN 0x08 +#define MGMT_RSS_CTRL_TCPV6_EN 0x10 +#define MGMT_RSS_CTRL_UDPV6_EN 0x20 +#define MGMT_RSS_CTRL_IPV4 0x0 +#define MGMT_RSS_CTRL_IPV4 0x0 + +#define MGMT_RSS_CTRL_DEFAULT_Q_POS 8 +#define MGMT_RSS_CTRL_DEFAULT_Q_LEN 2 +#define MGMT_RSS_CTRL_DEFAULT_Q_MASK 0x3 + +#define MGMT_RSS_CTRL_TBL_SIZE_POS 10 +#define MGMT_RSS_CTRL_TBL_SIZE_LEN 3 +#define MGMT_RSS_CTRL_TBL_SIZE_MASK 0x7 + +#define MAC_RSSCR_RSSE_POS 31 +#define MAC_RSSCR_RSSE_LEN 1 + +/* rss indirection table (IDT) */ +#define MGMT_RSS_IDT 0x1050 +/* b0:1 entry0 + * b2:3 entry1 + * ... + */ +#define MGMT_RSS_IDT_REG_INC 4 +#define MGMT_RSS_IDT_ENTRY_PER_REG 16 +#define MGMT_RSS_IDT_ENTRY_MASK 0x3 +#define MAC_CRC_LENGTH 4 + + /* osc_ctrl */ +#define MGMT_XST_OSC_CTRL 0x1158 +#define MGMT_XST_OSC_CTRL_XST_OSC_SEL_POS 2 +#define MGMT_XST_OSC_CTRL_XST_OSC_SEL_LEN 1 +#define MGMT_XST_OSC_CTRL_EN_OSC_POS 1 +#define MGMT_XST_OSC_CTRL_EN_OSC_LEN 1 +#define MGMT_XST_OSC_CTRL_EN_XST_POS 0 +#define MGMT_XST_OSC_CTRL_EN_XST_LEN 1 + +/* for WPI, yzhang, 20210826 */ +#define MGMT_WPI_CTRL0 0x1160 + /* b1:0 wpi_mode "2b00: normal working mode; 2b01: WPI write mode, work in sleep mode; 2b10: WPI read mode, work after sleep before normal working mode;" + * b2 ram_op_done Each row ram read done, SW can start read after done; + * b3 wpi_op_done WPI read done for the total packet; + * b17:4 wpi_pkt_len WOL packet length, unit byte; + * b31 wpi_fail Error status in Sleep mode; + */ +#define MGMT_WPI_CTRL0_WPI_MODE_POS 0 +#define MGMT_WPI_CTRL0_WPI_MODE_LEN 2 +#define MGMT_WPI_CTRL0_WPI_MODE_NORMAL 0x00 /* normal working mode. */ +/* WPI write mode, work in sleep mode. */ +#define MGMT_WPI_CTRL0_WPI_MODE_WR 0x01 +/* WPI read mode, work after sleep before normal working mode. */ +#define MGMT_WPI_CTRL0_WPI_MODE_RD 0x02 +#define MGMT_WPI_CTRL0_RAM_OP_DONE 0x4 +#define MGMT_WPI_CTRL0_WPI_OP_DONE 0x8 +#define MGMT_WPI_CTRL0_WPI_PKT_LEN_POS 4 +#define MGMT_WPI_CTRL0_WPI_PKT_LEN_LEN 14 +#define MGMT_WPI_CTRL0_WPI_FAIL 0x80000000 + +#define MGMT_WPI_CTRL1_DATA 0x1164 + +#define MGMT_WOL_CTRL 0x1530 + /* b0 link_chg_status 1: waken by link-change + * b1 mgk_pkt_status 1: waken by magic-packet + * b2 rwk_pkt_status 1: waken by remote patten packet + */ +#define MGMT_WOL_CTRL_WPI_LINK_CHG 1 +#define MGMT_WOL_CTRL_WPI_MGC_PKT 2 +#define MGMT_WOL_CTRL_WPI_RWK_PKT 4 +#define MGMT_WOL_CTRL_WPI_RWK_PKT_NUMBER 0x010000 + +#define MGMT_RMK_CTRL 0x1400 + +#define MGMT_SIGDET 0x17F8 +#define MGMT_SIGDET_POS 13 +#define MGMT_SIGDET_LEN 3 +#define MGMT_SIGDET_55MV 7 +#define MGMT_SIGDET_50MV 6 +#define MGMT_SIGDET_45MV 5 /* default value */ +#define MGMT_SIGDET_40MV 4 +#define MGMT_SIGDET_35MV 3 +#define MGMT_SIGDET_30MV 2 +#define MGMT_SIGDET_25MV 1 +#define MGMT_SIGDET_20MV 0 + +#define FXGMAC_MTL_REG(pdata, n, reg) \ + ((pdata)->mac_regs + MTL_Q_BASE + ((n) * MTL_Q_INC) + (reg)) + +#define FXGMAC_DMA_REG(channel, reg) ((channel)->dma_regs + (reg)) + +#define MSI_ID_RXQ0 0 +#define MSI_ID_RXQ1 1 +#define MSI_ID_RXQ2 2 +#define MSI_ID_RXQ3 3 +#define MSI_ID_TXQ0 4 + +#if 1/* msi table modify to 6 0~3 rx 4 tx 5 phy/other */ +#define MSI_ID_PHY_OTHER 5 + +#define MSIX_TBL_MAX_NUM 6 +#define MSIX_TBL_RXTX_NUM 5 + +#else +#define MSI_ID_TXQ1 5 +#define MSI_ID_TXQ2 6 +#define MSI_ID_TXQ3 7 +#define MSI_ID_SFTUE 8 +#define MSI_ID_SFTCE 9 +#define MSI_ID_SBD 10 +#define MSI_ID_PMT 11 +#define MSI_ID_PHY 12 + +#define MSIX_TBL_MAX_NUM 16 +#define MSIX_TBL_RXTX_NUM 8 +#endif +#define MSIX_TBL_BASE_ADDR 0x1200 +#define MSIX_TBL_MASK_OFFSET 0xC +#define MSIX_TBL_DATA_OFFSET 0x8 +#define MSIX_TBL_ADDR_OFFSET 0x0 + +/******************************************************************* + efuse entry. val31:0 -> offset15:0 + offset7:0 + offset15:8 + val7:0 + val15:8 + val23:16 + val31:24 +*******************************************************************/ +#define EFUSE_OP_CTRL_0 0x1500 +#define EFUSE_OP_WR_DATA_POS 16 +#define EFUSE_OP_WR_DATA_LEN 8 +#define EFUSE_OP_ADDR_POS 8 +#define EFUSE_OP_ADDR_LEN 8 +#define EFUSE_OP_START_POS 2 +#define EFUSE_OP_START_LEN 1 +#define EFUSE_OP_MODE_POS 0 +#define EFUSE_OP_MODE_LEN 2 +#define EFUSE_OP_MODE_ROW_WRITE 0x0 +#define EFUSE_OP_MODE_ROW_READ 0x1 +#define EFUSE_OP_MODE_AUTO_LOAD 0x2 +#define EFUSE_OP_MODE_READ_BLANK 0x3 + +#define EFUSE_OP_CTRL_1 0x1504 +#define EFUSE_OP_RD_DATA_POS 24 +#define EFUSE_OP_RD_DATA_LEN 8 +#define EFUSE_OP_BIST_ERR_ADDR_POS 16 +#define EFUSE_OP_BIST_ERR_ADDR_LEN 8 +#define EFUSE_OP_BIST_ERR_CNT_POS 8 +#define EFUSE_OP_BIST_ERR_CNT_LEN 8 +#define EFUSE_OP_PGM_PASS_POS 2 +#define EFUSE_OP_PGM_PASS_LEN 1 +#define EFUSE_OP_DONE_POS 1 +#define EFUSE_OP_DONE_LEN 1 + +/* efuse layout refer to http://redmine.motor-comm.com/issues/3856 */ +#define EFUSE_FISRT_UPDATE_ADDR 255 +#define EFUSE_SECOND_UPDATE_ADDR 209 +#define FUXI_EFUSE_MAX_ENTRY 39 +#define FUXI_EFUSE_MAX_ENTRY_UNDER_LED_COMMON 24 +#define EFUSE_PATCH_ADDR_START_BYTE 0 +#define EFUSE_PATCH_DATA_START_BYTE 2 +#define EFUSE_REGION_A_B_LENGTH 18 +#define EFUSE_EACH_PATH_SIZE 6 + +#define EFUSE_REVID_REGISTER 0x0008 +#define EFUSE_SUBSYS_REGISTER 0x002C +/* mac[5]->bit7:0, mac[4]->bit15:8, mac[3]->bit23:16, mac[2]->bit31:24. */ +#define MACA0LR_FROM_EFUSE 0x1520 +/* mac[1]->bit7:0, mac[0]->bit15:8. mac[6] = + * {00, 01, 02, 03, 04, 05} 00-01-02-03-04-05. + */ +#define MACA0HR_FROM_EFUSE 0x1524 + +#define EFUSE_LED_ADDR 0x00 +#define EFUSE_LED_POS 0 +#define EFUSE_LED_LEN 5 +#define EFUSE_OOB_ADDR 0x07 +#define EFUSE_OOB_POS 2 +#define EFUSE_OOB_LEN 1 +#define EFUSE_LED_SOLUTION0 0 +#define EFUSE_LED_SOLUTION1 1 +#define EFUSE_LED_SOLUTION2 2 +#define EFUSE_LED_SOLUTION3 3 +#define EFUSE_LED_SOLUTION4 4 +#define EFUSE_LED_COMMON_SOLUTION 0x1F + +/******************** Below for pcie configuration register. *********************/ +#define REG_PCI_VENDOR_ID 0x0 /* WORD reg */ +#define REG_PCI_DEVICE_ID 0x2 /* WORD reg */ +#define PCI_DEVICE_ID_FUXI 0x6801 + +#define REG_PCI_COMMAND 0x4 +#define PCI_COMMAND_IO_SPACE_POS 0 +#define PCI_COMMAND_IO_SPACE_LEN 1 +#define PCI_COMAMND_MEM_SPACE_POS 1 +#define PCI_COMAMND_MEM_SPACE_LEN 1 +#define PCI_COMMAND_MASTER_POS 2 +#define PCI_COMMAND_MASTER_LEN 1 +#define PCI_COMMAND_DIS_INT_POS 10 +#define PCI_COMMAND_DIS_INT_LEN 1 +#define PCI_COMMAND_INTX_STATUS_POS 19 +#define PCI_COMMAND_INTX_STATUS_LEN 1 + +#define REG_PCI_REVID 0x8 /* BYTE reg */ +#define REG_PCI_PROGRAM_INTF 0x9 /* BYTE reg */ /* PCI Class Program Interface */ +#define REG_PCI_SUB_CLASS 0xA /* BYTE reg */ +#define REG_PCI_BASE_CLASS 0xB /* BYTE reg */ +#define REG_CACHE_LINE_SIZE 0xC + + +#define REG_MEM_BASE 0x10 /* DWORD or QWORD reg */ +#define REG_MEM_BASE_HI 0x14 /* DWORD or QWORD reg */ + +#define REG_IO_BASE 0x20 /* DWORD reg */ + +#define REG_PCI_SUB_VENDOR_ID 0x2C /* WORD reg */ +#define REG_PCI_SUB_DEVICE_ID 0x2E /* WORD reg */ + +#define REG_INT_LINE 0x3C /* BYTE reg */ + +#define REG_PM_STATCTRL 0x44 /* WORD reg */ +#define PM_STATCTRL_PWR_STAT_POS 0 +#define PM_STATCTRL_PWR_STAT_LEN 2 +#define PM_STATCTRL_PWR_STAT_D3 3 +#define PM_STATCTRL_PWR_STAT_D0 0 +#define PM_CTRLSTAT_PME_EN_POS 8 +#define PM_CTRLSTAT_PME_EN_LEN 1 +#define PM_CTRLSTAT_DATA_SEL_POS 9 +#define PM_CTRLSTAT_DATA_SEL_LEN 4 +#define PM_CTRLSTAT_DATA_SCAL_POS 13 +#define PM_CTRLSTAT_DATA_SCAL_LEN 2 +#define PM_CTRLSTAT_PME_STAT_POS 15 +#define PM_CTRLSTAT_PME_STAT_LEN 1 + +#define REG_DEVICE_CTRL1 0x78 +#define DEVICE_CTRL1_CONTROL_POS 0 +#define DEVICE_CTRL1_CONTROL_LEN 16 +#define DEVICE_CTRL1_STATUS_POS 16 +#define DEVICE_CTRL1_STATUS_LEN 16 + +#define REG_PCI_LINK_CTRL 0x80 +#define PCI_LINK_CTRL_CONTROL_POS 0 +#define PCI_LINK_CTRL_CONTROL_LEN 16 +#define PCI_LINK_CTRL_ASPM_CONTROL_POS 0 +#define PCI_LINK_CTRL_ASPM_CONTROL_LEN 2 +#define PCI_LINK_CTRL_L1_STATUS 2 +#define PCI_LINK_CTRL_CONTROL_CPM_POS 8 /*L1.CPM mode enable bit. Default 0, set as 1 enable this mode. clkreq pin need to connect RC*/ +#define PCI_LINK_CTRL_CONTROL_CPM_LEN 1 +#define PCI_LINK_CTRL_STATUS_POS 16 +#define PCI_LINK_CTRL_STATUS_LEN 16 + +#define REG_DEVICE_CTRL2 0x98 /* WORD reg */ +#define DEVICE_CTRL2_LTR_EN_POS 10 /* Enable from BIOS side. */ +#define DEVICE_CTRL2_LTR_EN_LEN 1 + +#define REG_MSIX_CAPABILITY 0xB0 + +/* ASPM L1ss PM Substates */ +#define REG_ASPM_L1SS_CAP 0x154 /* Capabilities Register */ +#define ASPM_L1SS_CAP_PCIPM_L1_2_POS 0 /* PCI-PM L1.2 Supported */ +#define ASPM_L1SS_CAP_PCIPM_L1_2_LEN 1 +#define ASPM_L1SS_CAP_PCIPM_L1_1_POS 1 /* PCI-PM L1.1 Supported */ +#define ASPM_L1SS_CAP_PCIPM_L1_1_LEN 1 +#define ASPM_L1SS_CAP_ASPM_L1_2_POS 2 /* ASPM L1.2 Supported */ +#define ASPM_L1SS_CAP_ASPM_L1_2_LEN 1 +#define ASPM_L1SS_CAP_ASPM_L1_1_POS 3 /* ASPM L1.1 Supported */ +#define ASPM_L1SS_CAP_ASPM_L1_1_LEN 1 +#define ASPM_L1SS_CAP_L1_PM_SS_POS 4 /* L1 PM Substates Supported */ +#define ASPM_L1SS_CAP_L1_PM_SS_LEN 1 +#define ASPM_L1SS_CAP_CM_RESTORE_TIME_POS 8 /* Port Common_Mode_Restore_Time */ +#define ASPM_L1SS_CAP_CM_RESTORE_TIME_LEN 8 +#define ASPM_L1SS_CAP_P_PWR_ON_SCALE_POS 16 /* Port T_POWER_ON scale */ +#define ASPM_L1SS_CAP_P_PWR_ON_SCALE_LEN 2 +#define ASPM_L1SS_CAP_P_PWR_ON_VALUE_POS 19 /* Port T_POWER_ON value */ +#define ASPM_L1SS_CAP_P_PWR_ON_VALUE_LEN 5 + +#define REG_ASPM_L1SS_CTRL1 0x158 +#define REG_ASPM_L1SS_CTRL1_VALUE 0x405e000f +#define ASPM_L1SS_CTRL1_L12_PCIPM_EN_POS 0 /* L1.2 in D3 state. */ +#define ASPM_L1SS_CTRL1_L12_PCIPM_EN_LEN 1 +#define ASPM_L1SS_CTRL1_L11_PCIPM_EN_POS 1 /* L1.1 in D3 state. */ +#define ASPM_L1SS_CTRL1_L11_PCIPM_EN_LEN 1 +#define ASPM_L1SS_CTRL1_L12_EN_POS 2 +#define ASPM_L1SS_CTRL1_L12_EN_LEN 1 +#define ASPM_L1SS_CTRL1_L11_EN_POS 3 +#define ASPM_L1SS_CTRL1_L11_EN_LEN 1 +#define ASPM_L1SS_CTRL1_CM_RESTORE_TIME_POS 8 /* Common_Mode_Restore_Time */ +#define ASPM_L1SS_CTRL1_CM_RESTORE_TIME_LEN 8 +#define ASPM_L1SS_CTRL1_LTR_L12_TH_VALUE_POS 16 /* LTR_L1.2_THRESHOLD_Value */ +#define ASPM_L1SS_CTRL1_LTR_L12_TH_VALUE_LEN 10 +#define ASPM_L1SS_CTRL1_L12_TH_SCALE_POS 29 /* LTR_L1.2_THRESHOLD_Scale */ +#define ASPM_L1SS_CTRL1_L12_TH_SCALE_LEN 3 + +#define REG_ASPM_L1SS_CTL2 0x15c /* Control 2 Register */ + +#define REG_ASPM_CONTROL 0x70C +#define ASPM_L1_IDLE_THRESHOLD_POS 27 +#define ASPM_L1_IDLE_THRESHOLD_LEN 3 +#define ASPM_L1_IDLE_THRESHOLD_1US 0 +#define ASPM_L1_IDLE_THRESHOLD_2US 1 +#define ASPM_L1_IDLE_THRESHOLD_4US 2 +#define ASPM_L1_IDLE_THRESHOLD_8US 3 /* default value after reset. */ +#define ASPM_L1_IDLE_THRESHOLD_16US 4 +#define ASPM_L1_IDLE_THRESHOLD_32US 5 +#define ASPM_L1_IDLE_THRESHOLD_64US 6 + +#define REG_POWER_EIOS 0x710 +#define POWER_EIOS_POS 7 +#define POWER_EIOS_LEN 1 + +#endif /* __FUXI_GMAC_REG_H__ */ diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac.h b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac.h new file mode 100644 index 000000000000..ea01ebdadc4e --- /dev/null +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac.h @@ -0,0 +1,934 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Motorcomm Corporation. */ + +#ifndef __FUXI_GMAC_H__ +#define __FUXI_GMAC_H__ + +#include "fuxi-os.h" + +/* For fpga before 20210507 */ +#define FXGMAC_FPGA_VER_B4_0507 0 +#define FXGMAC_FPGA_VER_20210507 1 + +#define FXGMAC_DRV_NAME "yt6801" + +#define FXGMAC_DRV_DESC "Motorcomm FUXI GMAC Driver" + +#define FUXI_MAC_REGS_OFFSET 0x2000 + +/* 1: in normal D0 state, turn off ephy link change interrupt. */ +#define FUXI_EPHY_INTERRUPT_D0_OFF 0 +/* 1:when rec buffer is not enough, to create rbd and rec buffer, + * but the rdb need to be continus with the intialized rdb, so + * close the feature + */ +#define FUXI_ALLOC_NEW_RECBUFFER 0 + +#define RESUME_MAX_TIME 3000000 +#define PHY_LINK_TIMEOUT 3000 +#define ESD_RESET_MAXIMUM 0 + +#define REGWR_RETRY_MAXIMUM 2600 +#define PCIE_LINKDOWN_VALUE 0xFFFFFFFF + +#define FXGMAC_MSIX_Q_VECTORS 4 + +#define FXGMAC_IS_CHANNEL_WITH_TX_IRQ(chId) (0 == (chId) ? 1 : 0) + +/* flags for ipv6 NS offload address, local link or Global unicast */ +#define FXGMAC_NS_IFA_LOCAL_LINK 1 +#define FXGMAC_NS_IFA_GLOBAL_UNICAST 2 + +#define FXGMAX_ASPM_WAR_EN +/* Descriptor related parameters */ +#if FXGMAC_TX_HANG_TIMER_EN +#define FXGMAC_TX_DESC_CNT 1024 +#else +/* 256 to make sure the tx ring is in the 4k range when + * FXGMAC_TX_HANG_TIMER_EN is 0 + */ +#define FXGMAC_TX_DESC_CNT 256 +#endif +#define FXGMAC_TX_DESC_MIN_FREE (FXGMAC_TX_DESC_CNT >> 3) +#define FXGMAC_TX_DESC_MAX_PROC (FXGMAC_TX_DESC_CNT >> 1) +#define FXGMAC_RX_DESC_CNT 1024 +#define FXGMAC_RX_DESC_MAX_DIRTY (FXGMAC_RX_DESC_CNT >> 3) + +/* Descriptors required for maximum contiguous TSO/GSO packet */ +#define FXGMAC_TX_MAX_SPLIT ((GSO_MAX_SIZE / FXGMAC_TX_MAX_BUF_SIZE) + 1) + +/* Maximum possible descriptors needed for a SKB */ +#define FXGMAC_TX_MAX_DESC_NR (MAX_SKB_FRAGS + FXGMAC_TX_MAX_SPLIT + 2) + +#define FXGMAC_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1)) +#define FXGMAC_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) +#define FXGMAC_RX_BUF_ALIGN 64 + +/* Maximum Size for Splitting the Header Data + * Keep in sync with SKB_ALLOC_SIZE + * 3'b000: 64 bytes, 3'b001: 128 bytes + * 3'b010: 256 bytes, 3'b011: 512 bytes + * 3'b100: 1023 bytes , 3'b101'3'b111: Reserved + */ +#define FXGMAC_SPH_HDSMS_SIZE 3 +#define FXGMAC_SKB_ALLOC_SIZE 512 + +/* In Linux Driver, it set MAX_FIFO size 131072, here it uses + * the same value as windows driver + */ +#define FXGMAC_MAX_FIFO 81920 + +#define FXGMAC_MAX_DMA_CHANNELS FXGMAC_MSIX_Q_VECTORS +#define FXGMAC_DMA_STOP_TIMEOUT 5 +#define FXGMAC_DMA_INTERRUPT_MASK 0x31c7 +#define FXGMAC_MAX_DMA_CHANNELS_PLUS_1TX (FXGMAC_MAX_DMA_CHANNELS + 1) + +/* Default coalescing parameters */ +#define FXGMAC_INIT_DMA_TX_USECS INT_MOD_IN_US +#define FXGMAC_INIT_DMA_TX_FRAMES 25 +#define FXGMAC_INIT_DMA_RX_USECS INT_MOD_IN_US +#define FXGMAC_INIT_DMA_RX_FRAMES 25 +#define FXGMAC_MAX_DMA_RIWT 0xff +#define FXGMAC_MIN_DMA_RIWT 0x01 + +/* Flow control queue count */ +#define FXGMAC_MAX_FLOW_CONTROL_QUEUES 8 + +/* System clock is 125 MHz */ +#define FXGMAC_SYSCLOCK 125000000 + +/* Maximum MAC address hash table size (256 bits = 8 bytes) */ +#define FXGMAC_MAC_HASH_TABLE_SIZE 8 + +/* wol pattern settings */ +#define MAX_PATTERN_SIZE 128 /* PATTERN length */ +#define MAX_PATTERN_COUNT 16 /* pattern count */ +#define MAX_LPP_ARP_OFFLOAD_COUNT 1 +#define MAX_LPP_NS_OFFLOAD_COUNT 2 + +#define MAX_WPI_LENGTH_SIZE 1536 /* WPI packet. */ +#define PM_WAKE_PKT_ALIGN 8 /* try use 64 bit boundary... */ + +/* Receive Side Scaling */ +#define FXGMAC_RSS_HASH_KEY_SIZE 40 +#define FXGMAC_RSS_MAX_TABLE_SIZE 128 +#define FXGMAC_RSS_LOOKUP_TABLE_TYPE 0 +#define FXGMAC_RSS_HASH_KEY_TYPE 1 +#define MAX_MSI_COUNT 16 /* Max Msi/Msix supported. */ + +#define FXGMAC_STD_PACKET_MTU 1500 +#define FXGMAC_JUMBO_PACKET_MTU 9014 + +#define NIC_MAX_TCP_OFFLOAD_SIZE 7300 +#define NIC_MIN_LSO_SEGMENT_COUNT 2 + +/* power management */ +#define FXGMAC_POWER_STATE_DOWN 0 +#define FXGMAC_POWER_STATE_UP 1 + +struct wol_bitmap_pattern { + u32 flags; + u32 pattern_size; + u32 mask_size; + u8 mask_info[MAX_PATTERN_SIZE / 8]; + u8 pattern_info[MAX_PATTERN_SIZE]; + u8 pattern_offset; + u16 pattern_crc; +}; + +struct led_setting { + u32 s0_led_setting[5]; + u32 s3_led_setting[5]; + u32 s5_led_setting[5]; + u32 disable_led_setting[5]; +}; + +typedef struct led_setting LED_SETTING; +typedef struct wol_bitmap_pattern WOL_BITMAP_PATTERN; + +/* note, maybe we should refer to NDIS_PM_WAKE_REASON_TYPE + * to avoid duplication definition.... + */ +typedef enum { + WAKE_REASON_NONE = 0, + WAKE_REASON_MAGIC, + WAKE_REASON_PATTERNMATCH, + WAKE_REASON_LINK, + WAKE_REASON_TCPSYNV4, + WAKE_REASON_TCPSYNV6, + /* for wake up method like Link-change, for that, + * GMAC cannot identify and need more checking. + */ + WAKE_REASON_TBD, + WAKE_REASON_HW_ERR, +} WAKE_REASON; + +/* Helper macro for descriptor handling + * Always use FXGMAC_GET_DESC_DATA to access the descriptor data + */ +#define FXGMAC_GET_DESC_DATA(ring, idx) ((ring)->desc_data_head + (idx)) +#define FXGMAC_GET_ENTRY(x, size) ((x + 1) & (size - 1)) + +struct fxgmac_pdata; + +enum fxgmac_int { + FXGMAC_INT_DMA_CH_SR_TI, + FXGMAC_INT_DMA_CH_SR_TPS, + FXGMAC_INT_DMA_CH_SR_TBU, + FXGMAC_INT_DMA_CH_SR_RI, + FXGMAC_INT_DMA_CH_SR_RBU, + FXGMAC_INT_DMA_CH_SR_RPS, + FXGMAC_INT_DMA_CH_SR_TI_RI, + FXGMAC_INT_DMA_CH_SR_FBE, + FXGMAC_INT_DMA_ALL, +}; + +struct fxgmac_stats { + /* MMC TX counters */ + u64 txoctetcount_gb; + u64 txframecount_gb; + u64 txbroadcastframes_g; + u64 txmulticastframes_g; + u64 tx64octets_gb; + u64 tx65to127octets_gb; + u64 tx128to255octets_gb; + u64 tx256to511octets_gb; + u64 tx512to1023octets_gb; + u64 tx1024tomaxoctets_gb; + u64 txunicastframes_gb; + u64 txmulticastframes_gb; + u64 txbroadcastframes_gb; + u64 txunderflowerror; + u64 txsinglecollision_g; + u64 txmultiplecollision_g; + u64 txdeferredframes; + u64 txlatecollisionframes; + u64 txexcessivecollisionframes; + u64 txcarriererrorframes; + u64 txoctetcount_g; + u64 txframecount_g; + u64 txexcessivedeferralerror; + u64 txpauseframes; + u64 txvlanframes_g; + u64 txoversize_g; + + /* MMC RX counters */ + u64 rxframecount_gb; + u64 rxoctetcount_gb; + u64 rxoctetcount_g; + u64 rxbroadcastframes_g; + u64 rxmulticastframes_g; + u64 rxcrcerror; + u64 rxalignerror; + u64 rxrunterror; + u64 rxjabbererror; + u64 rxundersize_g; + u64 rxoversize_g; + u64 rx64octets_gb; + u64 rx65to127octets_gb; + u64 rx128to255octets_gb; + u64 rx256to511octets_gb; + u64 rx512to1023octets_gb; + u64 rx1024tomaxoctets_gb; + u64 rxunicastframes_g; + u64 rxlengtherror; + u64 rxoutofrangetype; + u64 rxpauseframes; + u64 rxfifooverflow; + u64 rxvlanframes_gb; + u64 rxwatchdogerror; + u64 rxreceiveerrorframe; + u64 rxcontrolframe_g; + + /* Extra counters */ + u64 tx_tso_packets; + u64 rx_split_header_packets; + u64 tx_process_stopped; + u64 rx_process_stopped; + u64 tx_buffer_unavailable; + u64 rx_buffer_unavailable; + u64 fatal_bus_error; + u64 tx_vlan_packets; + u64 rx_vlan_packets; + u64 napi_poll_isr; + u64 napi_poll_txtimer; + u64 cnt_alive_txtimer; + + u64 ephy_poll_timer_cnt; + u64 mgmt_int_isr; +}; + +struct fxgmac_ring_buf { + struct sk_buff *skb; + DMA_ADDR_T skb_dma; + unsigned int skb_len; +}; + +/* Common Tx and Rx DMA hardware descriptor */ +struct fxgmac_dma_desc { + __le32 desc0; + __le32 desc1; + __le32 desc2; + __le32 desc3; +}; + +/* Page allocation related values */ +struct fxgmac_page_alloc { + struct page *pages; + unsigned int pages_len; + unsigned int pages_offset; + DMA_ADDR_T pages_dma; +}; + +/* Ring entry buffer data */ +struct fxgmac_buffer_data { + struct fxgmac_page_alloc pa; + struct fxgmac_page_alloc pa_unmap; + + DMA_ADDR_T dma_base; + unsigned long dma_off; + unsigned int dma_len; +}; + +/* Tx-related desc data */ +struct fxgmac_tx_desc_data { + unsigned int packets; /* BQL packet count */ + unsigned int bytes; /* BQL byte count */ +}; + +/* Rx-related desc data */ +struct fxgmac_rx_desc_data { + struct fxgmac_buffer_data hdr; /* Header locations */ + struct fxgmac_buffer_data buf; /* Payload locations */ + + unsigned short hdr_len; /* Length of received header */ + unsigned short len; /* Length of received packet */ +}; + +struct fxgmac_pkt_info { + struct sk_buff *skb; + + unsigned int attributes; + + unsigned int errors; + + /* descriptors needed for this packet */ + unsigned int desc_count; + unsigned int length; + + unsigned int tx_packets; + unsigned int tx_bytes; + + unsigned int header_len; + unsigned int tcp_header_len; + unsigned int tcp_payload_len; + unsigned short mss; + + unsigned short vlan_ctag; + + u64 rx_tstamp; + + u32 rss_hash; + RSS_HASH_TYPE rss_hash_type; +}; + +struct fxgmac_desc_data { + /* dma_desc: Virtual address of descriptor + * dma_desc_addr: DMA address of descriptor + */ + struct fxgmac_dma_desc *dma_desc; + DMA_ADDR_T dma_desc_addr; + + /* skb: Virtual address of SKB + * skb_dma: DMA address of SKB data + * skb_dma_len: Length of SKB DMA area + */ + struct sk_buff *skb; + DMA_ADDR_T skb_dma; + unsigned int skb_dma_len; + + /* Tx/Rx -related data */ + struct fxgmac_tx_desc_data tx; + struct fxgmac_rx_desc_data rx; + + unsigned int mapped_as_page; + + /* Incomplete receive save location. If the budget is exhausted + * or the last descriptor (last normal descriptor or a following + * context descriptor) has not been DMA'd yet the current state + * of the receive processing needs to be saved. + */ + unsigned int state_saved; + struct { + struct sk_buff *skb; + unsigned int len; + unsigned int error; + } state; +}; + +struct fxgmac_ring { + /* Per packet related information */ + struct fxgmac_pkt_info pkt_info; + + /* Virtual/DMA addresses of DMA descriptor list and the total count */ + struct fxgmac_dma_desc *dma_desc_head; + DMA_ADDR_T dma_desc_head_addr; + unsigned int dma_desc_count; + + /* Array of descriptor data corresponding the DMA descriptor + * (always use the FXGMAC_GET_DESC_DATA macro to access this data) + */ + struct fxgmac_desc_data *desc_data_head; + + /* Page allocation for RX buffers */ + struct fxgmac_page_alloc rx_hdr_pa; + struct fxgmac_page_alloc rx_buf_pa; + + /* Ring index values + * cur - Tx: index of descriptor to be used for current transfer + * Rx: index of descriptor to check for packet availability + * dirty - Tx: index of descriptor to check for transfer complete + * Rx: index of descriptor to check for buffer reallocation + */ + unsigned int cur; + unsigned int dirty; + + /* Coalesce frame count used for interrupt bit setting */ + unsigned int coalesce_count; + + struct { + unsigned int xmit_more; + unsigned int queue_stopped; + unsigned short cur_mss; + unsigned short cur_vlan_ctag; + } tx; +} ____cacheline_aligned; + +struct fxgmac_channel { + char name[16]; + + /* Address of private data area for device */ + struct fxgmac_pdata *pdata; + + /* Queue index and base address of queue's DMA registers */ + unsigned int queue_index; + + IOMEM dma_regs; + + /* Per channel interrupt irq number */ + u32 dma_irq; + FXGMAC_CHANNEL_OF_PLATFORM expansion; + + unsigned int saved_ier; + + unsigned int tx_timer_active; + + struct fxgmac_ring *tx_ring; + struct fxgmac_ring *rx_ring; +} ____cacheline_aligned; + +struct fxphy_ag_adv { + u8 auto_neg_en : 1; + u8 full_1000m : 1; + u8 half_1000m : 1; + u8 full_100m : 1; + u8 half_100m : 1; + u8 full_10m : 1; + u8 half_10m : 1; +}; + +struct fxgmac_desc_ops { + int (*alloc_channles_and_rings)(struct fxgmac_pdata *pdata); + void (*free_channels_and_rings)(struct fxgmac_pdata *pdata); + int (*map_tx_skb)(struct fxgmac_channel *channel, struct sk_buff *skb); + int (*map_rx_buffer)(struct fxgmac_pdata *pdata, + struct fxgmac_ring *ring, + struct fxgmac_desc_data *desc_data); + void (*unmap_desc_data)(struct fxgmac_pdata *pdata, + struct fxgmac_desc_data *desc_data); + void (*tx_desc_init)(struct fxgmac_pdata *pdata); + void (*rx_desc_init)(struct fxgmac_pdata *pdata); +}; + +struct fxgmac_hw_ops { + int (*init)(struct fxgmac_pdata *pdata); + int (*exit)(struct fxgmac_pdata *pdata); + void (*save_nonstick_reg)(struct fxgmac_pdata *pdata); + void (*restore_nonstick_reg)(struct fxgmac_pdata *pdata); + int (*set_gmac_register)(struct fxgmac_pdata *pdata, u8 *address, + unsigned int data); + u32 (*get_gmac_register)(struct fxgmac_pdata *pdata, u8 *address); + void (*esd_restore_pcie_cfg)(struct fxgmac_pdata *pdata); + + int (*tx_complete)(struct fxgmac_dma_desc *dma_desc); + + void (*enable_tx)(struct fxgmac_pdata *pdata); + void (*disable_tx)(struct fxgmac_pdata *pdata); + void (*enable_rx)(struct fxgmac_pdata *pdata); + void (*disable_rx)(struct fxgmac_pdata *pdata); + void (*enable_channel_rx)(struct fxgmac_pdata *pdata, + unsigned int queue); + + int (*enable_int)(struct fxgmac_channel *channel, + enum fxgmac_int int_id); + int (*disable_int)(struct fxgmac_channel *channel, + enum fxgmac_int int_id); + void (*set_interrupt_moderation)(struct fxgmac_pdata *pdata); + void (*enable_msix_rxtxinterrupt)(struct fxgmac_pdata *pdata); + void (*disable_msix_interrupt)(struct fxgmac_pdata *pdata); + void (*enable_msix_rxtxphyinterrupt)(struct fxgmac_pdata *pdata); + void (*enable_msix_one_interrupt)(struct fxgmac_pdata *pdata, + u32 intid); + void (*disable_msix_one_interrupt)(struct fxgmac_pdata *pdata, + u32 intid); + bool (*enable_mgm_interrupt)(struct fxgmac_pdata *pdata); + bool (*disable_mgm_interrupt)(struct fxgmac_pdata *pdata); + + void (*dev_xmit)(struct fxgmac_channel *channel); + int (*dev_read)(struct fxgmac_channel *channel); + + int (*set_mac_address)(struct fxgmac_pdata *pdata, u8 *addr); + int (*set_mac_hash)(struct fxgmac_pdata *pdata); + int (*config_rx_mode)(struct fxgmac_pdata *pdata); + int (*enable_rx_csum)(struct fxgmac_pdata *pdata); + int (*disable_rx_csum)(struct fxgmac_pdata *pdata); + void (*config_tso)(struct fxgmac_pdata *pdata); + + /* For MII speed configuration */ + int (*config_mac_speed)(struct fxgmac_pdata *pdata); + int (*set_xlgmii_2500_speed)(struct fxgmac_pdata *pdata); + int (*set_xlgmii_1000_speed)(struct fxgmac_pdata *pdata); + int (*set_xlgmii_100_speed)(struct fxgmac_pdata *pdata); + int (*get_xlgmii_phy_status)(struct fxgmac_pdata *pdata, u32 *speed, + bool *link_up, + bool link_up_wait_to_complete); + + /* For descriptor related operation */ + void (*tx_desc_init)(struct fxgmac_channel *channel); + void (*rx_desc_init)(struct fxgmac_channel *channel); + void (*tx_desc_reset)(struct fxgmac_desc_data *desc_data); + void (*rx_desc_reset)(struct fxgmac_pdata *pdata, + struct fxgmac_desc_data *desc_data, + unsigned int index); + int (*is_last_desc)(struct fxgmac_dma_desc *dma_desc); + int (*is_context_desc)(struct fxgmac_dma_desc *dma_desc); + void (*tx_start_xmit)(struct fxgmac_channel *channel, + struct fxgmac_ring *ring); + void (*set_pattern_data)(struct fxgmac_pdata *pdata); + void (*config_wol)(struct fxgmac_pdata *pdata, int en); + + /* For Flow Control */ + int (*config_tx_flow_control)(struct fxgmac_pdata *pdata); + int (*config_rx_flow_control)(struct fxgmac_pdata *pdata); + + /* For Jumbo Frames */ + int (*config_mtu)(struct fxgmac_pdata *pdata); + int (*enable_jumbo)(struct fxgmac_pdata *pdata); + + /* For Vlan related config */ + int (*enable_tx_vlan)(struct fxgmac_pdata *pdata); + int (*disable_tx_vlan)(struct fxgmac_pdata *pdata); + int (*enable_rx_vlan_stripping)(struct fxgmac_pdata *pdata); + int (*disable_rx_vlan_stripping)(struct fxgmac_pdata *pdata); + int (*enable_rx_vlan_filtering)(struct fxgmac_pdata *pdata); + int (*disable_rx_vlan_filtering)(struct fxgmac_pdata *pdata); + int (*update_vlan_hash_table)(struct fxgmac_pdata *pdata); + + /* For RX coalescing */ + int (*config_rx_coalesce)(struct fxgmac_pdata *pdata); + int (*config_tx_coalesce)(struct fxgmac_pdata *pdata); + unsigned int (*usec_to_riwt)(struct fxgmac_pdata *pdata, + unsigned int usec); + unsigned int (*riwt_to_usec)(struct fxgmac_pdata *pdata, + unsigned int riwt); + + /* For RX and TX threshold config */ + int (*config_rx_threshold)(struct fxgmac_pdata *pdata, + unsigned int val); + int (*config_tx_threshold)(struct fxgmac_pdata *pdata, + unsigned int val); + + /* For RX and TX Store and Forward Mode config */ + int (*config_rsf_mode)(struct fxgmac_pdata *pdata, unsigned int val); + int (*config_tsf_mode)(struct fxgmac_pdata *pdata, unsigned int val); + + /* For TX DMA Operate on Second Frame config */ + int (*config_osp_mode)(struct fxgmac_pdata *pdata); + + /* For RX and TX PBL config */ + int (*config_rx_pbl_val)(struct fxgmac_pdata *pdata); + int (*get_rx_pbl_val)(struct fxgmac_pdata *pdata); + int (*config_tx_pbl_val)(struct fxgmac_pdata *pdata); + int (*get_tx_pbl_val)(struct fxgmac_pdata *pdata); + int (*config_pblx8)(struct fxgmac_pdata *pdata); + + /* For MMC statistics */ + void (*rx_mmc_int)(struct fxgmac_pdata *pdata); + void (*tx_mmc_int)(struct fxgmac_pdata *pdata); + void (*read_mmc_stats)(struct fxgmac_pdata *pdata); + bool (*update_stats_counters)(struct fxgmac_pdata *pdata, + bool ephy_check_en); + + /* For Receive Side Scaling */ + int (*enable_rss)(struct fxgmac_pdata *pdata); + int (*disable_rss)(struct fxgmac_pdata *pdata); + u32 (*get_rss_options)(struct fxgmac_pdata *pdata); + int (*set_rss_options)(struct fxgmac_pdata *pdata); + int (*set_rss_hash_key)(struct fxgmac_pdata *pdata, const u8 *key); + int (*set_rss_lookup_table)(struct fxgmac_pdata *pdata, + const u32 *table); + + /*For Offload*/ + void (*set_arp_offload)(struct fxgmac_pdata *pdata, + unsigned char *ip_addr); + int (*enable_arp_offload)(struct fxgmac_pdata *pdata); + int (*disable_arp_offload)(struct fxgmac_pdata *pdata); + + /*NS offload*/ + int (*set_ns_offload)(struct fxgmac_pdata *pdata, unsigned int index, + unsigned char *remote_addr, + unsigned char *solicited_addr, + unsigned char *target_addr1, + unsigned char *target_addr2, + unsigned char *mac_addr); + int (*enable_ns_offload)(struct fxgmac_pdata *pdata); + int (*disable_ns_offload)(struct fxgmac_pdata *pdata); + + int (*enable_wake_magic_pattern)(struct fxgmac_pdata *pdata); + int (*disable_wake_magic_pattern)(struct fxgmac_pdata *pdata); + + int (*enable_wake_link_change)(struct fxgmac_pdata *pdata); + int (*disable_wake_link_change)(struct fxgmac_pdata *pdata); + + int (*check_wake_pattern_fifo_pointer)(struct fxgmac_pdata *pdata); + int (*set_wake_pattern)(struct fxgmac_pdata *pdata, + struct wol_bitmap_pattern *wol_pattern, + u32 pattern_cnt); + int (*enable_wake_pattern)(struct fxgmac_pdata *pdata); + int (*disable_wake_pattern)(struct fxgmac_pdata *pdata); + int (*set_wake_pattern_mask)(struct fxgmac_pdata *pdata, + u32 filter_index, u8 register_index, + u32 Data); +#if defined(FUXI_PM_WPI_READ_FEATURE_EN) && FUXI_PM_WPI_READ_FEATURE_EN + void (*get_wake_packet_indication)(struct fxgmac_pdata *pdata, + int *wake_reason, + u32 *wake_pattern_number, + u8 *wpi_buf, u32 buf_size, + u32 *packet_size); + void (*enable_wake_packet_indication)(struct fxgmac_pdata *pdata, + int en); +#endif + + void (*reset_phy)(struct fxgmac_pdata *pdata); + /*for release phy, phy write and read, and provide clock to GMAC. */ + void (*release_phy)(struct fxgmac_pdata *pdata); + void (*enable_phy_check)(struct fxgmac_pdata *pdata); + void (*disable_phy_check)(struct fxgmac_pdata *pdata); + void (*setup_cable_loopback)(struct fxgmac_pdata *pdata); + void (*clean_cable_loopback)(struct fxgmac_pdata *pdata); + void (*disable_phy_sleep)(struct fxgmac_pdata *pdata); + void (*enable_phy_sleep)(struct fxgmac_pdata *pdata); + void (*phy_green_ethernet)(struct fxgmac_pdata *pdata); + void (*phy_eee_feature)(struct fxgmac_pdata *pdata); + int (*get_ephy_state)(struct fxgmac_pdata *pdata); + int (*write_ephy_reg)(struct fxgmac_pdata *pdata, u32 val, u32 data); + int (*read_ephy_reg)(struct fxgmac_pdata *pdata, u32 val, u32 *data); + int (*set_ephy_autoneg_advertise)(struct fxgmac_pdata *pdata, + struct fxphy_ag_adv phy_ag_adv); + int (*phy_config)(struct fxgmac_pdata *pdata); + void (*close_phy_led)(struct fxgmac_pdata *pdata); + void (*led_under_active)(struct fxgmac_pdata *pdata); + void (*led_under_sleep)(struct fxgmac_pdata *pdata); + void (*led_under_shutdown)(struct fxgmac_pdata *pdata); + void (*led_under_disable)(struct fxgmac_pdata *pdata); + + /* For power management */ + void (*pre_power_down)(struct fxgmac_pdata *pdata, bool phyloopback); + int (*diag_sanity_check)(struct fxgmac_pdata *pdata); + int (*write_rss_lookup_table)(struct fxgmac_pdata *pdata); + int (*get_rss_hash_key)(struct fxgmac_pdata *pdata, u8 *key_buf); + void (*config_power_down)(struct fxgmac_pdata *pdata, unsigned int wol); + void (*config_power_up)(struct fxgmac_pdata *pdata); + unsigned char (*set_suspend_int)(void *pdata); + void (*set_resume_int)(struct fxgmac_pdata *pdata); + int (*set_suspend_txrx)(struct fxgmac_pdata *pdata); + void (*set_pwr_clock_gate)(struct fxgmac_pdata *pdata); + void (*set_pwr_clock_ungate)(struct fxgmac_pdata *pdata); + + /* for multicast address list */ + int (*set_all_multicast_mode)(struct fxgmac_pdata *pdata, + unsigned int enable); + void (*config_multicast_mac_hash_table)(struct fxgmac_pdata *pdata, + unsigned char *pmc_mac, + int b_add); + + /* for packet filter-promiscuous and broadcast */ + int (*set_promiscuous_mode)(struct fxgmac_pdata *pdata, + unsigned int enable); + int (*enable_rx_broadcast)(struct fxgmac_pdata *pdata, + unsigned int enable); + + /* efuse relevant operation. */ + bool (*read_patch_from_efuse)(struct fxgmac_pdata *pdata, u32 offset, + u32 *value); /* read patch per index. */ + bool (*read_patch_from_efuse_per_index)( + struct fxgmac_pdata *pdata, u8 index, u32 *offset, + u32 *value); /* read patch per index. */ + bool (*write_patch_to_efuse)(struct fxgmac_pdata *pdata, u32 offset, + u32 value); + bool (*write_patch_to_efuse_per_index)(struct fxgmac_pdata *pdata, + u8 index, u32 offset, u32 value); + bool (*read_mac_subsys_from_efuse)(struct fxgmac_pdata *pdata, + u8 *mac_addr, u32 *subsys, + u32 *revid); + bool (*write_mac_subsys_to_efuse)(struct fxgmac_pdata *pdata, + u8 *mac_addr, u32 *subsys, + u32 *revid); + bool (*read_mac_addr_from_efuse)(struct fxgmac_pdata *pdata, + u8 *mac_addr); + bool (*write_mac_addr_to_efuse)(struct fxgmac_pdata *pdata, + u8 *mac_addr); + bool (*efuse_load)(struct fxgmac_pdata *pdata); + bool (*read_efuse_data)(struct fxgmac_pdata *pdata, u32 offset, + u32 *value); + bool (*write_oob)(struct fxgmac_pdata *pdata); + bool (*write_led)(struct fxgmac_pdata *pdata, u32 value); + bool (*read_led_config)(struct fxgmac_pdata *pdata); + bool (*write_led_config)(struct fxgmac_pdata *pdata); + + int (*pcie_init)(struct fxgmac_pdata *pdata, bool ltr_en, + bool aspm_l1ss_en, bool aspm_l1_en, bool aspm_l0s_en); + void (*trigger_pcie)( + struct fxgmac_pdata *pdata, + u32 code); /* To trigger pcie sniffer for analysis. */ +#ifdef DPDK + int (*phy_init)(struct fxgmac_pdata *); + int (*phy_start)(struct fxgmac_pdata *); + void (*phy_stop)(struct fxgmac_pdata *); + void (*phy_status)(struct fxgmac_pdata *); + void (*an_isr)( + struct fxgmac_pdata + *); /* phy_if->an_isr For single interrupt support */ +#endif +}; + +/* This structure contains flags that indicate what hardware features + * or configurations are present in the device. + */ +struct fxgmac_hw_features { + /* HW Version */ + unsigned int version; + + /* HW Feature Register0 */ + unsigned int phyifsel; /* PHY interface support */ + unsigned int vlhash; /* VLAN Hash Filter */ + unsigned int sma; /* SMA(MDIO) Interface */ + unsigned int rwk; /* PMT remote wake-up packet */ + unsigned int mgk; /* PMT magic packet */ + unsigned int mmc; /* RMON module */ + unsigned int aoe; /* ARP Offload */ + unsigned int ts; /* IEEE 1588-2008 Advanced Timestamp */ + unsigned int eee; /* Energy Efficient Ethernet */ + unsigned int tx_coe; /* Tx Checksum Offload */ + unsigned int rx_coe; /* Rx Checksum Offload */ + unsigned int addn_mac; /* Additional MAC Addresses */ + unsigned int ts_src; /* Timestamp Source */ + unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */ + + /* HW Feature Register1 */ + unsigned int rx_fifo_size; /* MTL Receive FIFO Size */ + unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */ + unsigned int adv_ts_hi; /* Advance Timestamping High Word */ + unsigned int dma_width; /* DMA width */ + unsigned int dcb; /* DCB Feature */ + unsigned int sph; /* Split Header Feature */ + unsigned int tso; /* TCP Segmentation Offload */ + unsigned int dma_debug; /* DMA Debug Registers */ + unsigned int rss; /* Receive Side Scaling */ + unsigned int tc_cnt; /* Number of Traffic Classes */ + unsigned int avsel; /* AV Feature Enable */ + unsigned int ravsel; /* Rx Side Only AV Feature Enable */ + unsigned int hash_table_size; /* Hash Table Size */ + unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */ + + /* HW Feature Register2 */ + unsigned int rx_q_cnt; /* Number of MTL Receive Queues */ + unsigned int tx_q_cnt; /* Number of MTL Transmit Queues */ + unsigned int rx_ch_cnt; /* Number of DMA Receive Channels */ + unsigned int tx_ch_cnt; /* Number of DMA Transmit Channels */ + unsigned int pps_out_num; /* Number of PPS outputs */ + unsigned int aux_snap_num; /* Number of Aux snapshot inputs */ + + /* HW Feature Register3 */ + u32 hwfr3; +}; + +struct fxgmac_resources { + IOMEM addr; + int irq; +}; + +struct fxgmac_pdata { + struct net_device *netdev; + struct device *dev; + PCI_DEV *pdev; + void *pAdapter; + + struct fxgmac_hw_ops hw_ops; + struct fxgmac_desc_ops desc_ops; + + /* Device statistics */ + struct fxgmac_stats stats; + + u32 msg_enable; + u32 reg_nonstick[0x300 >> 2]; + + /* MAC registers base */ + IOMEM mac_regs; + IOMEM base_mem; + + /* Hardware features of the device */ + struct fxgmac_hw_features hw_feat; + + /* Rings for Tx/Rx on a DMA channel */ + struct fxgmac_channel *channel_head; + unsigned int channel_count; + unsigned int tx_ring_count; + unsigned int rx_ring_count; + unsigned int tx_desc_count; + unsigned int rx_desc_count; + unsigned int tx_q_count; + unsigned int rx_q_count; + + /* Tx/Rx common settings */ + unsigned int pblx8; + + /* Tx settings */ + unsigned int tx_sf_mode; + unsigned int tx_threshold; + unsigned int tx_pbl; + unsigned int tx_osp_mode; +#if FXGMAC_TX_HANG_TIMER_EN + /* for tx hang checking. 20211227 */ + unsigned int tx_hang_restart_queuing; +#endif + + /* Rx settings */ + unsigned int rx_sf_mode; + unsigned int rx_threshold; + unsigned int rx_pbl; + + /* Tx coalescing settings */ + unsigned int tx_usecs; + unsigned int tx_frames; + + /* Rx coalescing settings */ + unsigned int rx_riwt; + unsigned int rx_usecs; + unsigned int rx_frames; + + /* Current Rx buffer size */ + unsigned int rx_buf_size; + + /* Flow control settings */ + unsigned int tx_pause; + unsigned int rx_pause; + + /* Jumbo frames */ + unsigned int mtu; + unsigned int jumbo; + + /* CRC checking */ + unsigned int crc_check; + + /* MSIX */ + unsigned int msix; + + /* RSS */ + unsigned int rss; + + /* VlanID */ + unsigned int vlan; + unsigned int vlan_exist; + unsigned int vlan_filter; + unsigned int vlan_strip; + + /* Interrupt Moderation */ + unsigned int intr_mod; + unsigned int intr_mod_timer; + + /* Device interrupt number */ + int dev_irq; + unsigned int per_channel_irq; + /* change type from int to u32 to match MSIx, p_msix_entry.vector; */ + u32 channel_irq[FXGMAC_MAX_DMA_CHANNELS_PLUS_1TX]; + + /* Netdev related settings */ + unsigned char mac_addr[ETH_ALEN]; + + /* Filtering support */ +#if FXGMAC_FILTER_MULTIPLE_VLAN_ENABLED + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; +#endif + + /* Device clocks */ + unsigned long sysclk_rate; + + /* Receive Side Scaling settings */ + u8 rss_key[FXGMAC_RSS_HASH_KEY_SIZE]; + u32 rss_table[FXGMAC_RSS_MAX_TABLE_SIZE]; + u32 rss_options; + + int phy_speed; + int phy_duplex; + int phy_autoeng; + + char drv_name[32]; + char drv_ver[32]; + + struct wol_bitmap_pattern pattern[MAX_PATTERN_COUNT]; + + struct led_setting led; + struct led_setting ledconfig; + + FXGMAC_PDATA_OF_PLATFORM expansion; + + u32 pcie_link_status; +}; + +#define FXGMAC_FLAG_MSI_CAPABLE ((u32)(1 << 0)) /* bit0 */ +#define FXGMAC_FLAG_MSI_ENABLED ((u32)(1 << 1)) /* bit1 */ +#define FXGMAC_FLAG_MSIX_CAPABLE ((u32)(1 << 2)) /* bit2 */ +#define FXGMAC_FLAG_MSIX_ENABLED ((u32)(1 << 3)) /* bit3 */ +#define FXGMAC_FLAG_LEGACY_ENABLED ((u32)(1 << 4)) /* bit4 */ + +#define FXGMAC_FLAG_INTERRUPT_POS 0 +#define FXGMAC_FLAG_INTERRUPT_LEN 5 + +#define FXGMAC_FLAG_MSI_POS 1 +#define FXGMAC_FLAG_MSI_LEN 1 +#define FXGMAC_FLAG_MSIX_POS 3 +#define FXGMAC_FLAG_MSIX_LEN 1 +#define FXGMAC_FLAG_LEGACY_POS 4 +#define FXGMAC_FLAG_LEGACY_LEN 1 +#define FXGMAC_FLAG_LEGACY_IRQ_FREE_POS 31 /* bit31 */ +#define FXGMAC_FLAG_LEGACY_IRQ_FREE_LEN 1 +#define FXGMAC_FLAG_LEGACY_NAPI_FREE_POS 30 /* bit30 */ +#define FXGMAC_FLAG_LEGACY_NAPI_FREE_LEN 1 + +void fxgmac_init_desc_ops(struct fxgmac_desc_ops *desc_ops); +void fxgmac_init_hw_ops(struct fxgmac_hw_ops *hw_ops); +const struct net_device_ops *fxgmac_get_netdev_ops(void); +const struct ethtool_ops *fxgmac_get_ethtool_ops(void); +void fxgmac_dump_tx_desc(struct fxgmac_pdata *pdata, struct fxgmac_ring *ring, + unsigned int idx, unsigned int count, + unsigned int flag); +void fxgmac_dump_rx_desc(struct fxgmac_pdata *pdata, struct fxgmac_ring *ring, + unsigned int idx); +void fxgmac_dbg_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx); +void fxgmac_get_all_hw_features(struct fxgmac_pdata *pdata); +void fxgmac_print_all_hw_features(struct fxgmac_pdata *pdata); +int fxgmac_drv_probe(struct device *dev, struct fxgmac_resources *res); +int fxgmac_drv_remove(struct device *dev); + +#endif /* __FUXI_GMAC_H__ */ diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-os.h b/drivers/net/ethernet/motorcomm/yt6801/fuxi-os.h new file mode 100644 index 000000000000..1a40267e1fa2 --- /dev/null +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-os.h @@ -0,0 +1,515 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2021 Motorcomm Corporation. */ + + +#ifndef __FUXI_OS_H__ +#define __FUXI_OS_H__ + +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_PCI_MSI +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "fuxi-dbg.h" + +struct fxgmac_ring; +struct fxgmac_pdata; + +#define FXGMAC_DRV_VERSION "1.0.27" + +#define PCIE_LP_ASPM_L0S 1 +#define PCIE_LP_ASPM_L1 2 +#define PCIE_LP_ASPM_L1SS 4 +#define PCIE_LP_ASPM_LTR 8 + +#define FXGMAC_FAIL -1 +#define FXGMAC_SUCCESS 0 +#define FXGMAC_DEV_CMD (SIOCDEVPRIVATE + 1) +#define FXGMAC_IOCTL_DFS_COMMAND _IOWR('M', 0x80, struct ext_ioctl_data) + +#define FXGMAC_MAX_DBG_TEST_PKT 150 +#define FXGMAC_MAX_DBG_BUF_LEN 64000 +#define FXGMAC_MAX_DBG_RX_DATA 1600 +#define FXGMAC_NETDEV_OPS_BUF_LEN 256 + +#define FXGMAC_TEST_MAC_HEAD_LEN 14 + +#define FUXI_PM_WPI_READ_FEATURE_EN 1 + +#define RSS_Q_COUNT 4 + +#define FXGMAC_TX_HANG_TIMER_EN 0 +/* only for debug. for normal run, pls keep them both 0 + * 0: use default tx q; other: specify txq-1: 1 txq; + */ +#define FXGMAC_NUM_OF_TX_Q_USED 0 +/* 1 to enable a dummy tx, ie, no tail for gmac; */ +#define FXGMAC_DUMMY_TX_DEBUG 0 +/* 1 to trigger(write reg 0x1000) for sniffer stop */ +#define FXGMAC_TRIGGER_TX_HANG 0 + +/* driver feature configuration */ +#if FXGMAC_TX_HANG_TIMER_EN +/* 0: check hw current desc; 1: check software dirty */ +#define FXGMAC_TX_HANG_CHECH_DIRTY 0 +#endif + +/* 1:poll tx of 4 channels; 0: since only 1 tx channel supported in this + * version, poll ch 0 always. + */ + +#define FXGMAC_FULL_TX_CHANNEL 0 + +#ifdef CONFIG_ARM64 +/* when you want to run this driver on 64bit arm, you should open this, + * otherwise dma's mask cannot be set successfully. + */ +#define FUXI_DMA_BIT_MASK 64 +#endif + +#ifdef CONFIG_PCI_MSI +/* should be same as FXGMAC_MAX_DMA_CHANNELS + 1 tx_irq */ +#define FXGMAC_MAX_MSIX_Q_VECTORS (FXGMAC_MSIX_Q_VECTORS + 1) +#define FXGMAC_MSIX_CH0RXDIS_EN 0 /* set to 1 for ch0 unbalance fix; */ +#define FXGMAC_MSIX_INTCTRL_EN 1 + +#define FXGMAC_PHY_INT_NUM 1 +#define FXGMAC_MSIX_INT_NUMS (FXGMAC_MAX_MSIX_Q_VECTORS + FXGMAC_PHY_INT_NUM) +#else /* for case of no CONFIG_PCI_MSI */ +/* NO modification needed! for non-MSI, set to 0 always */ +#define FXGMAC_MSIX_CH0RXDIS_EN 0 +#define FXGMAC_MSIX_INTCTRL_EN 0 +#endif + +/*RSS features*/ +#ifdef FXGMAC_ONE_CHANNEL +#define FXGMAC_RSS_FEATURE_ENABLED 0 /* 1:enable rss ; 0: rss not included. */ +#else +#define FXGMAC_RSS_FEATURE_ENABLED 1 /* 1:enable rss ; 0: rss not included. */ +#endif +#define FXGMAC_RSS_HASH_KEY_LINUX 1 /* 0:hard to default rss key ;1: normal hash key process from Linux. */ + +/*WOL features*/ +#define FXGMAC_WOL_FEATURE_ENABLED 1 /* 1:enable wol ; 0: wol not included. */ +/*since wol upon link will cause issue, disabled it always. */ +#define FXGMAC_WOL_UPON_EPHY_LINK 1 /* 1:enable ephy link change wol ; 0: ephy link change wol is not supported. */ + +/*Pause features*/ +#define FXGMAC_PAUSE_FEATURE_ENABLED 1 /* 1:enable flow control/pause framce ; 0: flow control/pause frame not included. */ + +/*ARP offload engine (AOE)*/ +#define FXGMAC_AOE_FEATURE_ENABLED 1 /* 1:enable arp offload engine ; 0: aoe is not included. */ + +/*NS offload engine*/ +#define FXGMAC_NS_OFFLOAD_ENABLED 1 /* 1:enable NS offload for IPv6 ; 0: NS is not included. */ + +/*for fpga ver after, which needs release phy before set of MAC tx/rx */ +#define FXGMAC_TXRX_EN_AFTER_PHY_RELEASE 1 /* 1:release ephy before mac tx/rx bits are set. */ + +/*power management features*/ +#define FXGMAC_PM_FEATURE_ENABLED 1 /* 1:enable PM ; 0: PM not included. */ + +/*sanity check*/ +#define FXGMAC_SANITY_CHECK_ENABLED 0 /* 1:enable health checking; */ + +/*vlan id filter*/ +#define FXGMAC_FILTER_SINGLE_VLAN_ENABLED 1 /* 1:enable health checking; */ +#define FXGMAC_FILTER_MULTIPLE_VLAN_ENABLED 1 +#define FUXI_MAC_HASH_TABLE 1 +#define FXGMAC_FILTER_MULTIPLE_MAC_ADDR_ENABLED 1 +#define FUXI_MISC_INT_HANDLE_FEATURE_EN 1 + +#define HAVE_FXGMAC_DEBUG_FS + +#ifndef offsetof +#define offsetof(TYPE, MEMBER) ((size_t) &(((TYPE *)0)->MEMBER)) +#endif + +#define ETH_IS_ZEROADDRESS(Address) \ + ((((u8 *)(Address))[0] == ((u8)0x00)) \ + && (((u8 *)(Address))[1] == ((u8)0x00)) \ + && (((u8 *)(Address))[2] == ((u8)0x00)) \ + && (((u8 *)(Address))[3] == ((u8)0x00)) \ + && (((u8 *)(Address))[4] == ((u8)0x00)) \ + && (((u8 *)(Address))[5] == ((u8)0x00))) + + /* read from 8bit register via pci config space */ +#define cfg_r8(_pdata, reg, pdat) pci_read_config_byte((_pdata)->pdev, (reg), (u8 *)(pdat)) + + /* read from 16bit register via pci config space */ +#define cfg_r16(_pdata, reg, pdat) pci_read_config_word((_pdata)->pdev, (reg), (u16 *)(pdat)) + + /* read from 32bit register via pci config space */ +#define cfg_r32(_pdata, reg, pdat) pci_read_config_dword((_pdata)->pdev, (reg), (u32 *)(pdat)) + +/* write to 8bit register via pci config space */ +#define cfg_w8(_pdata, reg, val) pci_write_config_byte((_pdata)->pdev, (reg), (u8)(val)) + +/* write to 16bit register via pci config space */ +#define cfg_w16(_pdata, reg, val) pci_write_config_word((_pdata)->pdev, (reg), (u16)(val)) + +/* write to 32bit register via pci config space */ +#define cfg_w32(_pdata, reg, val) pci_write_config_dword((_pdata)->pdev, (reg), (u32)(val)) + +#define readreg(pAdapter, addr) (readl(addr)) +#define writereg(pAdapter, val, addr) (writel(val, addr)) +#define usleep_range_ex(pAdapter, a, b) (usleep_range(a, b)) +#define _CR(Record, TYPE, Field) ((TYPE *) ((char *) (Record) - (char *) &(((TYPE *) 0)->Field))) + +#define FXGMAC_GET_REG_BITS(var, pos, len) ({ \ + typeof(pos) _pos = (pos); \ + typeof(len) _len = (len); \ + ((var) & GENMASK(_pos + _len - 1, _pos)) >> (_pos); \ +}) + +#define FXGMAC_GET_REG_BITS_LE(var, pos, len) ({ \ + typeof(pos) _pos = (pos); \ + typeof(len) _len = (len); \ + typeof(var) _var = le32_to_cpu((var)); \ + ((_var) & GENMASK(_pos + _len - 1, _pos)) >> (_pos); \ +}) + +#define FXGMAC_SET_REG_BITS(var, pos, len, val) ({ \ + typeof(var) _var = (var); \ + typeof(pos) _pos = (pos); \ + typeof(len) _len = (len); \ + typeof(val) _val = (val); \ + _val = (_val << _pos) & GENMASK(_pos + _len - 1, _pos); \ + _var = (_var & ~GENMASK(_pos + _len - 1, _pos)) | _val; \ +}) + +#define FXGMAC_SET_REG_BITS_LE(var, pos, len, val) ({ \ + typeof(var) _var = (var); \ + typeof(pos) _pos = (pos); \ + typeof(len) _len = (len); \ + typeof(val) _val = (val); \ + _val = (_val << _pos) & GENMASK(_pos + _len - 1, _pos); \ + _var = (_var & ~GENMASK(_pos + _len - 1, _pos)) | _val; \ + cpu_to_le32(_var); \ +}) + +#define STR_FORMAT "%s" + +#define DbgPrintF(level, fmt, ...) +#define DBGPRINT(Level, Fmt) +#define DBGPRINT_RAW(Level, Fmt) +#define DBGPRINT_S(Status, Fmt) +#define DBGPRINT_UNICODE(Level, UString) +#define Dump(p, cb, fAddress, ulGroup) + +#undef ASSERT +#define ASSERT(x) + +#define DbgPrintOidName(_Oid) +#define DbgPrintAddress(_pAddress) + +#define fxgmac_dump_buffer(_skb, _len, _tx_rx) +#define DumpLine(_p, _cbLine, _fAddress, _ulGroup) + +#ifndef FXGMAC_DEBUG +#define FXGMAC_DEBUG +#endif + +/* For debug prints */ +#ifdef FXGMAC_DEBUG +#define FXGMAC_PR(fmt, args...) \ + pr_alert("[%s,%d]:" fmt, __func__, __LINE__, ## args) + +#define DPRINTK printk +#else +#define FXGMAC_PR(x...) do { } while (0) +#define DPRINTK(x...) +#endif + +#define IOC_MAGIC 'M' +#define IOC_MAXNR (0x80 + 5) + +#define FUXI_DFS_IOCTL_DEVICE_INACTIVE 0x10001 +#define FUXI_DFS_IOCTL_DEVICE_RESET 0x10002 +#define FUXI_DFS_IOCTL_DIAG_BEGIN 0x10003 +#define FUXI_DFS_IOCTL_DIAG_END 0x10004 +#define FUXI_DFS_IOCTL_DIAG_TX_PKT 0x10005 +#define FUXI_DFS_IOCTL_DIAG_RX_PKT 0x10006 + +#define FXGMAC_EFUSE_UPDATE_LED_CFG 0x10007 +#define FXGMAC_EFUSE_WRITE_LED 0x10008 +#define FXGMAC_EFUSE_WRITE_PATCH_REG 0x10009 +#define FXGMAC_EFUSE_WRITE_PATCH_PER_INDEX 0x1000A +#define FXGMAC_EFUSE_WRITE_OOB 0x1000B +#define FXGMAC_EFUSE_LOAD 0x1000C +#define FXGMAC_EFUSE_READ_REGIONABC 0x1000D +#define FXGMAC_EFUSE_READ_PATCH_REG 0x1000E +#define FXGMAC_EFUSE_READ_PATCH_PER_INDEX 0x1000F +#define FXGMAC_EFUSE_LED_TEST 0x10010 + +#define FXGMAC_GET_MAC_DATA 0x10011 +#define FXGMAC_SET_MAC_DATA 0x10012 +#define FXGMAC_GET_SUBSYS_ID 0x10013 +#define FXGMAC_SET_SUBSYS_ID 0x10014 +#define FXGMAC_GET_GMAC_REG 0x10015 +#define FXGMAC_SET_GMAC_REG 0x10016 +#define FXGMAC_GET_PHY_REG 0x10017 +#define FXGMAC_SET_PHY_REG 0x10018 +#define FXGMAC_EPHYSTATISTICS 0x10019 +#define FXGMAC_GET_STATISTICS 0x1001A +#define FXGMAC_GET_PCIE_LOCATION 0x1001B + +#define FXGMAC_GET_GSO_SIZE 0x1001C +#define FXGMAC_SET_GSO_SIZE 0x1001D +#define FXGMAC_SET_RX_MODERATION 0x1001E +#define FXGMAC_SET_TX_MODERATION 0x1001F +#define FXGMAC_GET_TXRX_MODERATION 0x10020 + +#define MAX_PKT_BUF 1 +#define FXGAMC_MAX_DATA_SIZE (1024 * 4 + 16) + +#ifndef PCI_CAP_ID_MSI +#define PCI_CAP_ID_MSI 0x05 /* Message Signalled Interrupts */ +#endif + +#ifndef PCI_CAP_ID_MSIX +#define PCI_CAP_ID_MSIX 0x11 /* MSI-X */ +#endif + +#define PCI_CAP_ID_MSI_ENABLE_POS 0x10 +#define PCI_CAP_ID_MSI_ENABLE_LEN 0x1 +#define PCI_CAP_ID_MSIX_ENABLE_POS 0x1F +#define PCI_CAP_ID_MSIX_ENABLE_LEN 0x1 + +#ifndef fallthrough +#if __has_attribute(__fallthrough__) +# define fallthrough __attribute__((__fallthrough__)) +#else +# define fallthrough do {} while (0) /* fallthrough */ +#endif +#endif + + +#pragma pack(1) +/* it's better to make this struct's size to 128byte. */ +struct pattern_packet{ + u8 ether_daddr[ETH_ALEN]; + u8 ether_saddr[ETH_ALEN]; + u16 ether_type; + + __be16 ar_hrd; /* format of hardware address */ + __be16 ar_pro; /* format of protocol */ + unsigned char ar_hln; /* length of hardware address */ + unsigned char ar_pln; /* length of protocol address */ + __be16 ar_op; /* ARP opcode (command) */ + unsigned char ar_sha[ETH_ALEN]; /* sender hardware address */ + unsigned char ar_sip[4]; /* sender IP address */ + unsigned char ar_tha[ETH_ALEN]; /* target hardware address */ + unsigned char ar_tip[4]; /* target IP address */ + + u8 reverse[86]; +}; +#pragma pack() + +typedef enum { + CURRENT_STATE_SHUTDOWN = 0, + CURRENT_STATE_RESUME = 1, + CURRENT_STATE_INIT = 2, + CURRENT_STATE_SUSPEND = 3, + CURRENT_STATE_CLOSE = 4, + CURRENT_STATE_OPEN = 5, + CURRENT_STATE_RESTART = 6, + CURRENT_STATE_REMOVE = 7, +} CURRENT_STATE; + +typedef dma_addr_t DMA_ADDR_T; +typedef enum pkt_hash_types RSS_HASH_TYPE; +typedef void __iomem *IOMEM; +typedef struct pci_dev PCI_DEV; + +struct ext_command_buf { + void *buf; + u32 size_in; + u32 size_out; +}; + +struct ext_command_mac { + u32 num; + union { + u32 val32; + u16 val16; + u8 val8; + }; +}; + +struct ext_command_mii { + u16 dev; + u16 num; + u16 val; +}; + +struct ext_ioctl_data { + u32 cmd_type; + struct ext_command_buf cmd_buf; +}; + +typedef struct _fxgmac_test_buf { + u8 *addr; + u32 offset; + u32 length; +} fxgmac_test_buf, *pfxgmac_test_buf; + +typedef struct _fxgmac_test_packet { + struct _fxgmac_test_packet *next; + u32 length; /* total length of the packet(buffers) */ + u32 type; /* packet type, vlan, ip checksum, TSO, etc. */ + + fxgmac_test_buf buf[MAX_PKT_BUF]; + fxgmac_test_buf sGList[MAX_PKT_BUF]; + u16 vlanID; + u16 mss; + u32 hash; + u16 cpuNum; + u16 xsum; /* rx, ip-payload checksum */ + u16 csumStart; /* custom checksum offset to the mac-header */ + u16 csumPos; /* custom checksom position (to the mac_header) */ + void *upLevelReserved[4]; + void *lowLevelReserved[4]; +} fxgmac_test_packet, *pfxgmac_test_packet; + +typedef struct fxgmac_channel_of_platform { + char dma_irq_name[IFNAMSIZ + 32]; + + /* for MSIx to match the type of struct msix_entry.vector */ + u32 dma_irq_tx; + char dma_irq_name_tx[IFNAMSIZ + 32]; + + /* Netdev related settings */ + struct napi_struct napi_tx; + + /* Netdev related settings */ + struct napi_struct napi_rx; + struct timer_list tx_timer; + +#if FXGMAC_TX_HANG_TIMER_EN + unsigned int tx_hang_timer_active; + struct timer_list tx_hang_timer; + unsigned int tx_hang_hw_cur; +#endif +} FXGMAC_CHANNEL_OF_PLATFORM; + +typedef struct per_regisiter_info { + unsigned int size; + unsigned int address; + unsigned int value; + unsigned char data[FXGAMC_MAX_DATA_SIZE]; +} PER_REG_INFO; + +/* for FXGMAC_EFUSE_WRITE_PATCH_PER_INDEX, val0 is index, val1 is offset, + * val2 is value. + */ +typedef struct ext_command_data { + u32 val0; + u32 val1; + u32 val2; +} CMD_DATA; + +typedef struct fxgmac_pdata_of_platform { + u32 cfg_pci_cmd; + u32 cfg_cache_line_size; + u32 cfg_mem_base; + u32 cfg_mem_base_hi; + u32 cfg_io_base; + u32 cfg_int_line; + u32 cfg_device_ctrl1; + u32 cfg_pci_link_ctrl; + u32 cfg_device_ctrl2; + u32 cfg_msix_capability; + + struct work_struct restart_work; + u32 int_flags; /* legacy, msi or msix */ + int phy_irq; +#ifdef CONFIG_PCI_MSI + struct msix_entry *msix_entries; +#endif + + /* power management and wol*/ + u32 wol; /* wol options */ + unsigned long powerstate; /* power state */ + unsigned int ns_offload_tab_idx; /* for ns-offload table. 2 entries supported. */ + CURRENT_STATE current_state; + netdev_features_t netdev_features; + struct napi_struct napi; + struct napi_struct napi_phy; + u32 mgm_intctrl_val; + bool phy_link; + bool fxgmac_test_tso_flag; + u32 fxgmac_test_tso_seg_num; + u32 fxgmac_test_last_tso_len; + u32 fxgmac_test_packet_len; + volatile u32 fxgmac_test_skb_arr_in_index; + volatile u32 fxgmac_test_skb_arr_out_index; + struct sk_buff *fxgmac_test_skb_array[FXGMAC_MAX_DBG_TEST_PKT]; +#ifdef HAVE_FXGMAC_DEBUG_FS + struct dentry *dbg_adapter; + struct dentry *fxgmac_dbg_root; + char fxgmac_dbg_netdev_ops_buf[FXGMAC_NETDEV_OPS_BUF_LEN]; +#endif +} FXGMAC_PDATA_OF_PLATFORM; + +void fxgmac_print_pkt(struct net_device *netdev, struct sk_buff *skb, + bool tx_rx); +int fxgmac_dismiss_all_int(struct fxgmac_pdata *pdata); + +#ifdef HAVE_FXGMAC_DEBUG_FS +void fxgmac_dbg_adapter_init(struct fxgmac_pdata *pdata); +void fxgmac_dbg_adapter_exit(struct fxgmac_pdata *pdata); +void fxgmac_dbg_init(struct fxgmac_pdata *pdata); +void fxgmac_dbg_exit(struct fxgmac_pdata *pdata); +#endif /* HAVE_FXGMAC_DEBUG_FS */ + +void fxgmac_restart_dev(struct fxgmac_pdata *pdata); +long fxgmac_dbg_netdev_ops_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); + +int fxgmac_init(struct fxgmac_pdata *pdata, bool save_private_reg); +/* for phy interface */ +int fxgmac_ephy_autoneg_ability_get(struct fxgmac_pdata *pdata, + unsigned int *cap_mask); +int fxgmac_ephy_status_get(struct fxgmac_pdata *pdata, int *speed, + int *duplex, int *ret_link, int *media); +int fxgmac_ephy_soft_reset(struct fxgmac_pdata *pdata); +void fxgmac_phy_force_speed(struct fxgmac_pdata *pdata, int speed); +void fxgmac_phy_force_duplex(struct fxgmac_pdata *pdata, int duplex); +void fxgmac_phy_force_autoneg(struct fxgmac_pdata *pdata, int autoneg); + +unsigned int fxgmac_get_netdev_ip4addr(struct fxgmac_pdata *pdata); +unsigned char *fxgmac_get_netdev_ip6addr(struct fxgmac_pdata *pdata, + unsigned char *ipval, + unsigned char *ip6addr_solicited, + unsigned int ifa_flag); + +#if FXGMAC_PM_FEATURE_ENABLED +void fxgmac_net_powerdown(struct fxgmac_pdata *pdata, unsigned int wol); +void fxgmac_net_powerup(struct fxgmac_pdata *pdata); +#endif + +inline unsigned int fxgmac_tx_avail_desc(struct fxgmac_ring *ring); +inline unsigned int fxgmac_rx_dirty_desc(struct fxgmac_ring *ring); +int fxgmac_start(struct fxgmac_pdata *pdata); +void fxgmac_stop(struct fxgmac_pdata *pdata); +void fxgmac_free_rx_data(struct fxgmac_pdata *pdata); +void fxgmac_free_tx_data(struct fxgmac_pdata *pdata); + +#endif /* __FUXI_OS_H__ */ -- Gitee From fde38a0be100a3708c4eaf20d0ef54c8456471d9 Mon Sep 17 00:00:00 2001 From: Zhao Qunqin Date: Wed, 4 Dec 2024 17:39:26 +0800 Subject: [PATCH 1930/2138] anolis: pci/quirks: Fix pm transition of devices under pcie port ANBZ: #12295 The CPU cannot access the PCI header of the device if it's in low-power mode on the LS7A2000 bridge chip. Signed-off-by: Zhao Qunqin Signed-off-by: Juxin Gao Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/4212 --- drivers/pci/quirks.c | 30 ++++++++++++++++++++++++++++++ include/linux/pci.h | 1 + 2 files changed, 31 insertions(+) diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 82cc68844f30..7a6db6e83200 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -410,6 +410,36 @@ static void loongson_pcie_msi_quirk(struct pci_dev *dev) } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, 0x7a59, loongson_pcie_msi_quirk); +#define DEV_PCIE_PORT_4 0x7a39 +#define DEV_PCIE_PORT_5 0x7a49 +#define DEV_PCIE_PORT_6 0x7a59 +#define DEV_PCIE_PORT_7 0x7a69 +static void loongson_d3_and_link_quirk(struct pci_dev *dev) +{ + struct pci_bus *bus = dev->bus; + struct pci_dev *bridge; + static const struct pci_device_id bridge_devids[] = { + { PCI_VDEVICE(LOONGSON, DEV_PCIE_PORT_4) }, + { PCI_VDEVICE(LOONGSON, DEV_PCIE_PORT_5) }, + { PCI_VDEVICE(LOONGSON, DEV_PCIE_PORT_6) }, + { PCI_VDEVICE(LOONGSON, DEV_PCIE_PORT_7) }, + { 0, }, + }; + + /* look for the matching bridge */ + while (!pci_is_root_bus(bus)) { + bridge = bus->self; + bus = bus->parent; + if (bridge && pci_match_id(bridge_devids, bridge)) { + dev->dev_flags |= (PCI_DEV_FLAGS_NO_D3 | + PCI_DEV_FLAGS_NO_LINK_SPEED_CHANGE); + dev->no_d1d2 = 1; + break; + } + } +} +DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, loongson_d3_and_link_quirk); + /* Chipsets where PCI->PCI transfers vanish or hang */ static void quirk_nopcipci(struct pci_dev *dev) { diff --git a/include/linux/pci.h b/include/linux/pci.h index 379ac091df36..5f41f7e2f4e4 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -245,6 +245,7 @@ enum pci_dev_flags { PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11), /* Device does honor MSI masking despite saying otherwise */ PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12), + PCI_DEV_FLAGS_NO_LINK_SPEED_CHANGE = (__force pci_dev_flags_t) (1 << 15), }; enum pci_irq_reroute_variant { -- Gitee From 2c86a43dfa737aa47f4754943450e59af02a97f4 Mon Sep 17 00:00:00 2001 From: Xu Yu Date: Sat, 26 Mar 2022 18:03:57 +0800 Subject: [PATCH 1931/2138] anolis: mm: add ability to restrict swapout in global reclaim ANBZ: #12338 ANBZ: #726 Even vm_swappiness is set to 0, swapout can happen in global reclaim when there is tiny page cache, which is undesirable in some situations. This introduces a debugfs interface named strict_swappiness. When vm_swappiness is 0, global swapout can be completely disabled with strict_swappiness set to 1, as follows. $ echo 1 > /sys/kernel/debug/strict_swappiness The default value of strict_swappiness is 0, and the default behavior is not changed. Suggested-by: zhaoyan.liao Signed-off-by: Xu Yu Acked-by: Gang Deng Signed-off-by: Weilin Tong Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4219 --- mm/vmscan.c | 38 +++++++++++++++++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index 72d78c595597..d8db6b1ffcaf 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -191,6 +191,42 @@ struct scan_control { */ int vm_swappiness = 60; +/* + * Even vm_swappiness is set to 0, swapout can happen in + * global reclaim when there is few page cache. When + * strict_swappiness is set, such global swapout can be + * completely disabled. + */ +static int strict_swappiness; + +#ifdef CONFIG_DEBUG_FS +static int strict_swappiness_get(void *data, u64 *val) +{ + *val = strict_swappiness; + return 0; +} + +static int strict_swappiness_set(void *data, u64 val) +{ + if (val > 1) + return -EINVAL; + + strict_swappiness = val; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(strict_swappiness_fops, + strict_swappiness_get, strict_swappiness_set, "%llu\n"); + +static int __init strict_swappiness_debugfs(void) +{ + debugfs_create_file_unsafe("strict_swappiness", 0644, NULL, NULL, + &strict_swappiness_fops); + return 0; +} +late_initcall(strict_swappiness_debugfs); +#endif + LIST_HEAD(shrinker_list); DECLARE_RWSEM(shrinker_rwsem); @@ -2995,7 +3031,7 @@ static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc) * thrashing file LRU becomes infinitely more attractive than * anon pages. Try to detect this based on file LRU size. */ - if (!cgroup_reclaim(sc)) { + if (!cgroup_reclaim(sc) && !strict_swappiness) { unsigned long total_high_wmark = 0; unsigned long free, anon; int z; -- Gitee From 2cc4b5ed8ac8eeb4d2398ee3b997955ffd7a99b9 Mon Sep 17 00:00:00 2001 From: zhongjiang-ali Date: Fri, 8 Apr 2022 00:24:13 +0800 Subject: [PATCH 1932/2138] anolis: mm: introduce ability to reserve page cache on system wide ANBZ: #12338 ANBZ: #849 The kernel generally prefers to reclaim page cache over anonymous pages, and only reclaims page cache when there is no swap configured. However, in some extreme scenario, where the application allocates a large amount of anonymous memory, the page cache is almost completely exhausted, while the OOM killer barely fires. The system can suffer from heavy IO, and the application performance can be significantly affected, or even the application may become unresponsive, since the page cache, including the application program instruction, is thrashing. In such scenario, some users do want OOM instead of half-dead. This provides user the ability to reserve page cache on system wide. With appropriate amount of page cache reserved, OOM killer can be triggered in time, and some key processes can make progress (cooperate with oom_score_adj, for example). Enable the feature with: echo XXX > /proc/sys/vm/min_cache_kbytes disable the feature with: echo 0 > /proc/sys/vm/min_cache_kbytes Signed-off-by: zhongjiang-ali Acked-by: Gang Deng Suggested-by: yinbinbin Reviewed-by: Xu Yu Signed-off-by: Weilin Tong Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4219 --- include/linux/mmzone.h | 1 + include/linux/swap.h | 1 + mm/page_alloc.c | 53 ++++++++++++++++++++++++++++++++++++++++++ mm/vmscan.c | 25 ++++++++++++++++++-- 4 files changed, 78 insertions(+), 2 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 1a6fdfc16b8f..569df4b25e2e 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -1344,6 +1344,7 @@ typedef struct pglist_data { */ unsigned long totalreserve_pages; + unsigned long min_cache_pages; #ifdef CONFIG_NUMA /* * node reclaim becomes active if more unmapped pages exist. diff --git a/include/linux/swap.h b/include/linux/swap.h index 5762d794ebab..5eb47d3554f0 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -384,6 +384,7 @@ extern struct list_lru shadow_nodes; /* linux/mm/page_alloc.c */ extern unsigned long totalreserve_pages; +extern unsigned long sysctl_min_cache_kbytes; /* Definition of global_zone_page_state not available yet */ #define nr_free_pages() global_zone_page_state(NR_FREE_PAGES) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index dd0cf7492de6..b232a53dabc8 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5869,6 +5869,51 @@ static int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int wr return 0; } +static void setup_min_cache_kbytes(void) +{ + pg_data_t *pgdat; + struct zone *zone; + unsigned long lowmem_pages = 0; + unsigned long min_cache_pages = sysctl_min_cache_kbytes >> (PAGE_SHIFT - 10); + + for_each_online_pgdat(pgdat) + pgdat->min_cache_pages = 0; + + for_each_zone(zone) { + if (!is_highmem(zone)) + lowmem_pages += zone_managed_pages(zone); + } + + for_each_zone(zone) { + u64 tmp; + + /* + * Make sure that lowmem zone reserve a mount of file pages + * to avoid thrashing. highmem zone is allowed to eat up + * memory as soon as possible. + */ + if (!is_highmem(zone)) { + tmp = zone_managed_pages(zone) * min_cache_pages; + do_div(tmp, lowmem_pages); + zone->zone_pgdat->min_cache_pages += tmp; + } + } +} + +static int sysctl_min_cache_kbytes_sysctl_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *length, loff_t *ppos) +{ + int rc; + + rc = proc_doulongvec_minmax(table, write, buffer, length, ppos); + if (rc) + return rc; + + setup_min_cache_kbytes(); + + return 0; +} + #ifdef CONFIG_NUMA static void setup_min_unmapped_ratio(void) { @@ -6030,6 +6075,14 @@ static struct ctl_table page_alloc_sysctl_table[] = { .mode = 0644, .proc_handler = lowmem_reserve_ratio_sysctl_handler, }, + { + .procname = "min_cache_kbytes", + .data = &sysctl_min_cache_kbytes, + .maxlen = sizeof(sysctl_min_cache_kbytes), + .mode = 0644, + .proc_handler = sysctl_min_cache_kbytes_sysctl_handler, + .extra1 = SYSCTL_LONG_ZERO, + }, #ifdef CONFIG_NUMA { .procname = "numa_zonelist_order", diff --git a/mm/vmscan.c b/mm/vmscan.c index d8db6b1ffcaf..6218b64fb7cb 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -140,6 +140,9 @@ struct scan_control { /* Always discard instead of demoting to lower tier memory */ unsigned int no_demotion:1; + /* The file pages on the current node are not allowed to reclaim */ + unsigned int file_is_reserved:1; + /* Allocation order */ s8 order; @@ -190,6 +193,8 @@ struct scan_control { * From 0 .. 200. Higher means more swappy. */ int vm_swappiness = 60; +/* The min page cache should be reserved in the system */ +unsigned long sysctl_min_cache_kbytes; /* * Even vm_swappiness is set to 0, swapout can happen in @@ -3031,9 +3036,11 @@ static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc) * thrashing file LRU becomes infinitely more attractive than * anon pages. Try to detect this based on file LRU size. */ - if (!cgroup_reclaim(sc) && !strict_swappiness) { + if (!cgroup_reclaim(sc)) { unsigned long total_high_wmark = 0; + unsigned long total_min_wmark = 0; unsigned long free, anon; + unsigned long min_cache_kbytes; int z; free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); @@ -3047,6 +3054,7 @@ static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc) continue; total_high_wmark += high_wmark_pages(zone); + total_min_wmark += min_wmark_pages(zone); } /* @@ -3060,6 +3068,17 @@ static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc) file + free <= total_high_wmark && !(sc->may_deactivate & DEACTIVATE_ANON) && anon >> sc->priority; + + /* + * Reserve a specified amount of page caches in case of thrashing. + * OOM killer is preferred when the system page cache is below the + * given watermark. + */ + min_cache_kbytes = READ_ONCE(sysctl_min_cache_kbytes); + if (min_cache_kbytes) { + sc->file_is_reserved = (sc->may_deactivate & DEACTIVATE_FILE) && + file <= min(total_min_wmark, pgdat->min_cache_pages); + } } } @@ -3114,7 +3133,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, /* * If the system is almost out of file pages, force-scan anon. */ - if (sc->file_is_tiny) { + if (sc->file_is_tiny && !strict_swappiness) { scan_balance = SCAN_ANON; goto out; } @@ -3257,6 +3276,8 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, /* Scan one type exclusively */ if ((scan_balance == SCAN_FILE) != file) scan = 0; + else if (sc->file_is_reserved && file) + scan = 0; break; default: /* Look ma, no brain */ -- Gitee From 59ae61bfee07d7ad42ff40e2283d1d519110e4f7 Mon Sep 17 00:00:00 2001 From: Xu Yu Date: Sat, 22 Jul 2023 12:25:08 +0800 Subject: [PATCH 1933/2138] anolis: mm: make min_cache_kbytes behave explicitly ANBZ: #12338 ANBZ: #6025 The page cache reserve feature, i.e., min_cache_kbytes, does not take effect in some scenarios. This is because that min_cache_kbytes is subject to sc->may_deactivate and min_wmark. This makes min_cache_kbytes behave explicitly, i.e., eliminating additional restrictions such as sc->may_deactivate and min_wmark. Fixes: 1f86172dbe16 ("anolis: mm: introduce ability to reserve page cache on system wide") Signed-off-by: Xu Yu Reviewed-by: zhongjiang-ali Link: https://gitee.com/anolis/cloud-kernel/pulls/1968 Signed-off-by: Weilin Tong Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4219 --- mm/vmscan.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index 6218b64fb7cb..f12e2df60001 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3038,7 +3038,6 @@ static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc) */ if (!cgroup_reclaim(sc)) { unsigned long total_high_wmark = 0; - unsigned long total_min_wmark = 0; unsigned long free, anon; unsigned long min_cache_kbytes; int z; @@ -3054,7 +3053,6 @@ static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc) continue; total_high_wmark += high_wmark_pages(zone); - total_min_wmark += min_wmark_pages(zone); } /* @@ -3075,10 +3073,8 @@ static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc) * given watermark. */ min_cache_kbytes = READ_ONCE(sysctl_min_cache_kbytes); - if (min_cache_kbytes) { - sc->file_is_reserved = (sc->may_deactivate & DEACTIVATE_FILE) && - file <= min(total_min_wmark, pgdat->min_cache_pages); - } + if (min_cache_kbytes) + sc->file_is_reserved = file <= pgdat->min_cache_pages; } } -- Gitee From 0890767a5b633fe8f69089770dded961080ec041 Mon Sep 17 00:00:00 2001 From: Xu Yu Date: Sat, 22 Jul 2023 15:04:38 +0800 Subject: [PATCH 1934/2138] anolis: mm: limit min_cache_kbytes ANBZ: #12338 ANBZ: #6025 This limits min_cache_kbytes to half of total system memory at most. Signed-off-by: Xu Yu Reviewed-by: zhongjiang-ali Link: https://gitee.com/anolis/cloud-kernel/pulls/1968 Signed-off-by: Weilin Tong Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4219 --- mm/page_alloc.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b232a53dabc8..434264ea1690 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5904,12 +5904,22 @@ static int sysctl_min_cache_kbytes_sysctl_handler(struct ctl_table *table, int w void __user *buffer, size_t *length, loff_t *ppos) { int rc; + unsigned long min_cache_pages; + unsigned long old_min_cache_kbytes = sysctl_min_cache_kbytes; rc = proc_doulongvec_minmax(table, write, buffer, length, ppos); if (rc) return rc; - setup_min_cache_kbytes(); + if (write) { + min_cache_pages = sysctl_min_cache_kbytes >> (PAGE_SHIFT - 10); + if (min_cache_pages > totalram_pages() / 2) { + sysctl_min_cache_kbytes = old_min_cache_kbytes; + return -EINVAL; + } + + setup_min_cache_kbytes(); + } return 0; } -- Gitee From 16ef6d714669f0ee9f33afa60e9c795d3af1f2cc Mon Sep 17 00:00:00 2001 From: Kaihao Bai Date: Mon, 11 Dec 2023 17:33:15 +0800 Subject: [PATCH 1935/2138] anolis: mm: fix the judgement of min_pagecache_bytes ANBZ: #12338 ANBZ: #7710 The min_pagecache feature is only added with a condition check in SCAN_FILE/SCAN_ANON. In this way, other cases will still reclaim file pages. Signed-off-by: Kaihao Bai Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2499 Signed-off-by: Weilin Tong Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4219 --- mm/vmscan.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index f12e2df60001..f330d82b28e8 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3272,14 +3272,15 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, /* Scan one type exclusively */ if ((scan_balance == SCAN_FILE) != file) scan = 0; - else if (sc->file_is_reserved && file) - scan = 0; break; default: /* Look ma, no brain */ BUG(); } + if (sc->file_is_reserved && file) + scan = 0; + nr[lru] = scan; } } -- Gitee From 826b260fd92447ae8485d4daf20620ab7f65543b Mon Sep 17 00:00:00 2001 From: Yuanhe Shu Date: Tue, 29 Oct 2024 15:00:11 +0800 Subject: [PATCH 1936/2138] anolis: mm: set default value for min_cache_kbytes ANBZ: #12338 ANBZ: #11569 Set the default value for min_cache_kbytes to resolve the issue of not triggering OOM. min_cache_kbytes would be set to: 150M, when total memory is ( 0, 4G] 300M, when total memory is ( 4G, 8G] 400M, when total memory is ( 8G, 16G] 500M, when total memory is (16G, 128G] 1024M, when total memory is above 128G Signed-off-by: Yuanhe Shu Reviewed-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4044 Signed-off-by: Weilin Tong Link: https://gitee.com/anolis/cloud-kernel/pulls/4219 --- include/linux/mm.h | 1 + mm/memory_hotplug.c | 2 ++ mm/page_alloc.c | 35 +++++++++++++++++++++++++++++++++++ 3 files changed, 38 insertions(+) diff --git a/include/linux/mm.h b/include/linux/mm.h index a61842b79ae3..cdf70d8b0648 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3201,6 +3201,7 @@ extern int __meminit early_pfn_to_nid(unsigned long pfn); #endif extern void set_dma_reserve(unsigned long new_dma_reserve); +extern int __meminit init_min_cache_kbytes(void); extern void mem_init(void); extern void __init mmap_init(void); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 9beed7c71a8e..6384603b61c1 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1205,6 +1205,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, /* reinitialise watermarks and update pcp limits */ init_per_zone_wmark_min(); + init_min_cache_kbytes(); kswapd_run(nid); kcompactd_run(nid); @@ -2014,6 +2015,7 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages, /* reinitialise watermarks and update pcp limits */ init_per_zone_wmark_min(); + init_min_cache_kbytes(); if (!populated_zone(zone)) { zone_pcp_reset(zone); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 434264ea1690..4331ed98a5fe 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5900,6 +5900,41 @@ static void setup_min_cache_kbytes(void) } } +/* + * Initialise min_cache_kbytes. + * + * 0 < total memory <= 4G, min_cache_kbytes: 150M + * 4G < total memory <= 8G, min_cache_kbytes: 300M + * 8G < total memory <= 16G, min_cache_kbytes: 400M + * 16G < total memory <= 128G, min_cache_kbytes: 500M + * total memory > 128G, min_cache_kbytes: 1024M + */ + +int __meminit init_min_cache_kbytes(void) +{ + unsigned long total_ram_bytes = totalram_pages() << PAGE_SHIFT; + + if (total_ram_bytes <= 4UL * SZ_1G) + /* limit min_cache_kbytes to 1/2 of total memory at most */ + if (total_ram_bytes / 2 < 150 * SZ_1M) + sysctl_min_cache_kbytes = total_ram_bytes / 2 / SZ_1K; + else + sysctl_min_cache_kbytes = 150 * SZ_1K; + else if (total_ram_bytes <= 8UL * SZ_1G) + sysctl_min_cache_kbytes = 300 * SZ_1K; + else if (total_ram_bytes <= 16UL * SZ_1G) + sysctl_min_cache_kbytes = 400 * SZ_1K; + else if (total_ram_bytes <= 128UL * SZ_1G) + sysctl_min_cache_kbytes = 500 * SZ_1K; + else + sysctl_min_cache_kbytes = 1024 * SZ_1K; + + setup_min_cache_kbytes(); + + return 0; +} +postcore_initcall(init_min_cache_kbytes) + static int sysctl_min_cache_kbytes_sysctl_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { -- Gitee From 52689964748e7880430a8bbd5711e04fc50c089f Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 3 Apr 2024 23:59:41 +0800 Subject: [PATCH 1937/2138] cpumask: add cpumask_any_and_but() ANBZ: #12339 commit 897fa2c38c076c801bd1f1238af0af927e339c8f upstream. In some cases, it's useful to be able to select a random cpu from the intersection of two masks, excluding a particular CPU. For example, in some systems an uncore PMU is shared by a subset of CPUs, and management of this PMU is assigned to some arbitrary CPU in this set. Whenever the management CPU is hotplugged out, we wish to migrate responsibility to another arbitrary CPU which is both in this set and online. Today we can use cpumask_any_and() to select an arbitrary CPU in the intersection of two masks. We can also use cpumask_any_but() to select any arbitrary cpu in a mask excluding, a particular CPU. To do both, we either need to use a temporary cpumask, which is wasteful, or use some lower-level cpumask helpers, which can be unclear. This patch adds a new cpumask_any_and_but() to cater for these cases. Signed-off-by: Mark Rutland Cc: Thomas Gleixner Cc: Andrew Morton Cc: Peter Zijlstra Cc: Rusty Russell Cc: linux-kernel@vger.kernel.org Signed-off-by: Dawei Li Acked-by: Yury Norov Link: https://lore.kernel.org/r/20240403155950.2068109-2-dawei.li@shingroup.cn Signed-off-by: Will Deacon Conflicts: include/linux/cpumask.h Signed-off-by: Jing Zhang Reviewed-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/4218 --- include/linux/cpumask.h | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index dbdbf1451cad..a354aa5dd57d 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -388,7 +388,30 @@ unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) } /** - * cpumask_nth - get the first cpu in a cpumask + * cpumask_any_and_but - pick a "random" cpu from *mask1 & *mask2, but not this one. + * @mask1: the first input cpumask + * @mask2: the second input cpumask + * @cpu: the cpu to ignore + * + * Returns >= nr_cpu_ids if no cpus set. + */ +static inline +unsigned int cpumask_any_and_but(const struct cpumask *mask1, + const struct cpumask *mask2, + unsigned int cpu) +{ + unsigned int i; + + cpumask_check(cpu); + i = cpumask_first_and(mask1, mask2); + if (i != cpu) + return i; + + return cpumask_next_and(cpu, mask1, mask2); +} + +/** + * cpumask_nth - get the Nth cpu in a cpumask * @srcp: the cpumask pointer * @cpu: the N'th cpu to find, starting from 0 * -- Gitee From 91687da294ec9891087f4c8e3debe15a226055b1 Mon Sep 17 00:00:00 2001 From: Dawei Li Date: Wed, 3 Apr 2024 23:59:46 +0800 Subject: [PATCH 1938/2138] perf/dwc_pcie: Avoid placing cpumask on the stack ANBZ: #12339 commit cf276ee46bc44aa188d6a9ea36f83118f48bac67 upstream. In general it's preferable to avoid placing cpumasks on the stack, as for large values of NR_CPUS these can consume significant amounts of stack space and make stack overflows more likely. Use cpumask_any_and_but() to avoid the need for a temporary cpumask on the stack. Suggested-by: Mark Rutland Reviewed-by: Mark Rutland Signed-off-by: Dawei Li Reviewed-by: Shuai Xue Link: https://lore.kernel.org/r/20240403155950.2068109-7-dawei.li@shingroup.cn Signed-off-by: Will Deacon Signed-off-by: Jing Zhang Link: https://gitee.com/anolis/cloud-kernel/pulls/4218 --- drivers/perf/dwc_pcie_pmu.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c index 957058ad0099..c5e328f23841 100644 --- a/drivers/perf/dwc_pcie_pmu.c +++ b/drivers/perf/dwc_pcie_pmu.c @@ -690,9 +690,8 @@ static int dwc_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_n { struct dwc_pcie_pmu *pcie_pmu; struct pci_dev *pdev; - int node; - cpumask_t mask; unsigned int target; + int node; pcie_pmu = hlist_entry_safe(cpuhp_node, struct dwc_pcie_pmu, cpuhp_node); /* Nothing to do if this CPU doesn't own the PMU */ @@ -702,10 +701,9 @@ static int dwc_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_n pcie_pmu->on_cpu = -1; pdev = pcie_pmu->pdev; node = dev_to_node(&pdev->dev); - if (cpumask_and(&mask, cpumask_of_node(node), cpu_online_mask) && - cpumask_andnot(&mask, &mask, cpumask_of(cpu))) - target = cpumask_any(&mask); - else + + target = cpumask_any_and_but(cpumask_of_node(node), cpu_online_mask, cpu); + if (target >= nr_cpu_ids) target = cpumask_any_but(cpu_online_mask, cpu); if (target >= nr_cpu_ids) { -- Gitee From 2deadacb6dabf5e84190bb5630bc6df59e8c7d44 Mon Sep 17 00:00:00 2001 From: Krishna chaitanya chundru Date: Fri, 16 Aug 2024 20:47:20 +0530 Subject: [PATCH 1939/2138] perf/dwc_pcie: Fix registration issue in multi PCIe controller instances ANBZ: #12339 commit e669388537c472142804eb5a0449cc23d5409694 upstream. When there are multiple of instances of PCIe controllers, registration to perf driver fails with this error. sysfs: cannot create duplicate filename '/devices/platform/dwc_pcie_pmu.0' CPU: 0 PID: 166 Comm: modprobe Not tainted 6.10.0-rc2-next-20240607-dirty Hardware name: Qualcomm SA8775P Ride (DT) Call trace: dump_backtrace.part.8+0x98/0xf0 show_stack+0x14/0x1c dump_stack_lvl+0x74/0x88 dump_stack+0x14/0x1c sysfs_warn_dup+0x60/0x78 sysfs_create_dir_ns+0xe8/0x100 kobject_add_internal+0x94/0x224 kobject_add+0xa8/0x118 device_add+0x298/0x7b4 platform_device_add+0x1a0/0x228 platform_device_register_full+0x11c/0x148 dwc_pcie_register_dev+0x74/0xf0 [dwc_pcie_pmu] dwc_pcie_pmu_init+0x7c/0x1000 [dwc_pcie_pmu] do_one_initcall+0x58/0x1c0 do_init_module+0x58/0x208 load_module+0x1804/0x188c __do_sys_init_module+0x18c/0x1f0 __arm64_sys_init_module+0x14/0x1c invoke_syscall+0x40/0xf8 el0_svc_common.constprop.1+0x70/0xf4 do_el0_svc+0x18/0x20 el0_svc+0x28/0xb0 el0t_64_sync_handler+0x9c/0xc0 el0t_64_sync+0x160/0x164 kobject: kobject_add_internal failed for dwc_pcie_pmu.0 with -EEXIST, don't try to register things with the same name in the same directory. This is because of having same bdf value for devices under two different controllers. Update the logic to use sbdf which is a unique number in case of multi instance also. Fixes: af9597adc2f1 ("drivers/perf: add DesignWare PCIe PMU driver") Signed-off-by: Krishna chaitanya chundru Reviewed-by: Yicong Yang Link: https://lore.kernel.org/r/20240816-dwc_pmu_fix-v2-1-198b8ab1077c@quicinc.com Signed-off-by: Will Deacon Signed-off-by: Jing Zhang Reviewed-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/4218 --- drivers/perf/dwc_pcie_pmu.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c index c5e328f23841..85a5155d6018 100644 --- a/drivers/perf/dwc_pcie_pmu.c +++ b/drivers/perf/dwc_pcie_pmu.c @@ -556,10 +556,10 @@ static int dwc_pcie_register_dev(struct pci_dev *pdev) { struct platform_device *plat_dev; struct dwc_pcie_dev_info *dev_info; - u32 bdf; + u32 sbdf; - bdf = PCI_DEVID(pdev->bus->number, pdev->devfn); - plat_dev = platform_device_register_data(NULL, "dwc_pcie_pmu", bdf, + sbdf = (pci_domain_nr(pdev->bus) << 16) | PCI_DEVID(pdev->bus->number, pdev->devfn); + plat_dev = platform_device_register_data(NULL, "dwc_pcie_pmu", sbdf, pdev, sizeof(*pdev)); if (IS_ERR(plat_dev)) @@ -611,15 +611,15 @@ static int dwc_pcie_pmu_probe(struct platform_device *plat_dev) struct pci_dev *pdev = plat_dev->dev.platform_data; struct dwc_pcie_pmu *pcie_pmu; char *name; - u32 bdf, val; + u32 sbdf, val; u16 vsec; int ret; vsec = pci_find_vsec_capability(pdev, pdev->vendor, DWC_PCIE_VSEC_RAS_DES_ID); pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val); - bdf = PCI_DEVID(pdev->bus->number, pdev->devfn); - name = devm_kasprintf(&plat_dev->dev, GFP_KERNEL, "dwc_rootport_%x", bdf); + sbdf = plat_dev->id; + name = devm_kasprintf(&plat_dev->dev, GFP_KERNEL, "dwc_rootport_%x", sbdf); if (!name) return -ENOMEM; @@ -650,7 +650,7 @@ static int dwc_pcie_pmu_probe(struct platform_device *plat_dev) ret = cpuhp_state_add_instance(dwc_pcie_pmu_hp_state, &pcie_pmu->cpuhp_node); if (ret) { - pci_err(pdev, "Error %d registering hotplug @%x\n", ret, bdf); + pci_err(pdev, "Error %d registering hotplug @%x\n", ret, sbdf); return ret; } @@ -663,7 +663,7 @@ static int dwc_pcie_pmu_probe(struct platform_device *plat_dev) ret = perf_pmu_register(&pcie_pmu->pmu, name, -1); if (ret) { - pci_err(pdev, "Error %d registering PMU @%x\n", ret, bdf); + pci_err(pdev, "Error %d registering PMU @%x\n", ret, sbdf); return ret; } ret = devm_add_action_or_reset(&plat_dev->dev, dwc_pcie_unregister_pmu, -- Gitee From 0e9d13278f97a96d5def320fcd2252dde3a9c142 Mon Sep 17 00:00:00 2001 From: Krishna chaitanya chundru Date: Fri, 16 Aug 2024 20:47:22 +0530 Subject: [PATCH 1940/2138] perf/dwc_pcie: Always register for PCIe bus notifier ANBZ: #12339 commit b94b05478fb6a09033bf70c6edd03f8930a0fe24 upstream. When the PCIe devices are discovered late, the driver can't find the PCIe devices and returns in the init without registering with the bus notifier. Due to that the devices which are discovered late the driver can't register for this. Register for bus notifier & driver even if the device is not found as part of init. Fixes: af9597adc2f1 ("drivers/perf: add DesignWare PCIe PMU driver") Signed-off-by: Krishna chaitanya chundru Reviewed-by: Yicong Yang Link: https://lore.kernel.org/r/20240816-dwc_pmu_fix-v2-3-198b8ab1077c@quicinc.com Signed-off-by: Will Deacon Signed-off-by: Jing Zhang Reviewed-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/4218 --- drivers/perf/dwc_pcie_pmu.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c index 85a5155d6018..f205ecad2e4c 100644 --- a/drivers/perf/dwc_pcie_pmu.c +++ b/drivers/perf/dwc_pcie_pmu.c @@ -726,7 +726,6 @@ static struct platform_driver dwc_pcie_pmu_driver = { static int __init dwc_pcie_pmu_init(void) { struct pci_dev *pdev = NULL; - bool found = false; int ret; for_each_pci_dev(pdev) { @@ -738,11 +737,7 @@ static int __init dwc_pcie_pmu_init(void) pci_dev_put(pdev); return ret; } - - found = true; } - if (!found) - return -ENODEV; ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "perf/dwc_pcie_pmu:online", -- Gitee From 9024859881fcf0818b8ba2b190e5689c509261f9 Mon Sep 17 00:00:00 2001 From: Krishna chaitanya chundru Date: Fri, 16 Aug 2024 20:47:23 +0530 Subject: [PATCH 1941/2138] perf/dwc_pcie: Add support for QCOM vendor devices ANBZ: #12339 commit db9e7a83d30821ba50a84e9726099946900abde8 upstream. Update the vendor table with QCOM PCIe vendorid. Signed-off-by: Krishna chaitanya chundru Reviewed-by: Yicong Yang Link: https://lore.kernel.org/r/20240816-dwc_pmu_fix-v2-4-198b8ab1077c@quicinc.com Signed-off-by: Will Deacon Signed-off-by: Jing Zhang Reviewed-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/4218 --- drivers/perf/dwc_pcie_pmu.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c index f205ecad2e4c..4ca50f9b6dfe 100644 --- a/drivers/perf/dwc_pcie_pmu.c +++ b/drivers/perf/dwc_pcie_pmu.c @@ -107,6 +107,7 @@ struct dwc_pcie_vendor_id { static const struct dwc_pcie_vendor_id dwc_pcie_vendor_ids[] = { {.vendor_id = PCI_VENDOR_ID_ALIBABA }, + {.vendor_id = PCI_VENDOR_ID_QCOM }, {} /* terminator */ }; -- Gitee From 6fb6e196adf2dd9b4b84d00701fefc39abb64f48 Mon Sep 17 00:00:00 2001 From: Yunhui Cui Date: Thu, 19 Sep 2024 11:46:01 +0800 Subject: [PATCH 1942/2138] drivers perf: remove unused field pmu_node ANBZ: #12339 commit cc847678998305c72c9850efa3fd040b274a8180 upstream. The driver does not use the pmu_node field, so remove it. Signed-off-by: Yunhui Cui Reviewed-by: Shuai Xue Link: https://lore.kernel.org/r/20240919034601.2453-1-cuiyunhui@bytedance.com Signed-off-by: Will Deacon Signed-off-by: Jing Zhang Link: https://gitee.com/anolis/cloud-kernel/pulls/4218 --- drivers/perf/dwc_pcie_pmu.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c index 4ca50f9b6dfe..59526a48499f 100644 --- a/drivers/perf/dwc_pcie_pmu.c +++ b/drivers/perf/dwc_pcie_pmu.c @@ -82,7 +82,6 @@ struct dwc_pcie_pmu { u16 ras_des_offset; u32 nr_lanes; - struct list_head pmu_node; struct hlist_node cpuhp_node; struct perf_event *event[DWC_PCIE_EVENT_TYPE_MAX]; int on_cpu; -- Gitee From b443484673a9d234f3b9eeb11bd3d7176dbfdbca Mon Sep 17 00:00:00 2001 From: Ilkka Koskinen Date: Wed, 16 Oct 2024 14:01:36 -0700 Subject: [PATCH 1943/2138] perf/dwc_pcie: Convert the events with mixed case to lowercase ANBZ: #12339 commit 759b5fc6cc3e3c5841f6a3e4638b39534b0fc716 upstream. Group #1 events had both upper and lower case characters in their names. Trying to count such events with perf tool results in an error: $ perf stat -e dwc_rootport_10008/Tx_PCIe_TLP_Data_Payload/ sleep 1 event syntax error: 'dwc_rootport_10008/Tx_PCIe_TLP_Data_Payload/' \___ Bad event or PMU Unable to find PMU or event on a PMU of 'dwc_rootport_10008' event syntax error: '..port_10008/Tx_PCIe_TLP_Data_Payload/' \___ unknown term 'Tx_PCIe_TLP_Data_Payload' for pmu 'dwc_rootport_10008' valid terms: eventid,type,lane,config,config1,config2,config3,name,period,percore,metric-id Run 'perf list' for a list of valid events Usage: perf stat [] [] -e, --event event selector. use 'perf list' to list available events Perf tool assumes the event names are either in lower or upper case. This is also mentioned in Documentation/ABI/testing/sysfs-bus-event_source-devices-events "As performance monitoring event names are case insensitive in the perf tool, the perf tool only looks for lower or upper case event names in sysfs to avoid scanning the directory. It is therefore required the name of the event here is either lower or upper case." Change the Group #1 events names to lower case. Signed-off-by: Ilkka Koskinen Link: https://lore.kernel.org/r/20241016210136.65452-1-ilkka@os.amperecomputing.com Signed-off-by: Will Deacon Signed-off-by: Jing Zhang Reviewed-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/4218 --- drivers/perf/dwc_pcie_pmu.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c index 59526a48499f..126d2c3516ad 100644 --- a/drivers/perf/dwc_pcie_pmu.c +++ b/drivers/perf/dwc_pcie_pmu.c @@ -202,10 +202,10 @@ static struct attribute *dwc_pcie_pmu_time_event_attrs[] = { DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_AUX, 0x09), /* Group #1 */ - DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Tx_PCIe_TLP_Data_Payload, 0x20), - DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Rx_PCIe_TLP_Data_Payload, 0x21), - DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Tx_CCIX_TLP_Data_Payload, 0x22), - DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Rx_CCIX_TLP_Data_Payload, 0x23), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(tx_pcie_tlp_data_payload, 0x20), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(rx_pcie_tlp_data_payload, 0x21), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(tx_ccix_tlp_data_payload, 0x22), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(rx_ccix_tlp_data_payload, 0x23), /* * Leave it to the user to specify the lane ID to avoid generating -- Gitee From 6f70de7f936c139cb100853b2fef8ec7aa5ed914 Mon Sep 17 00:00:00 2001 From: Ilkka Koskinen Date: Tue, 8 Oct 2024 23:18:22 +0000 Subject: [PATCH 1944/2138] perf/dwc_pcie: Add support for Ampere SoCs ANBZ: #12339 commit 83d511c3ca0cb70a55d8b0ae3e753448fb00272b upstream. Add support for Ampere SoCs by adding Ampere's vendor ID to the vendor list. Signed-off-by: Ilkka Koskinen Link: https://lore.kernel.org/r/20241008231824.5102-2-ilkka@os.amperecomputing.com Signed-off-by: Will Deacon Signed-off-by: Jing Zhang Reviewed-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/4218 --- drivers/perf/dwc_pcie_pmu.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c index 126d2c3516ad..edb5a809c928 100644 --- a/drivers/perf/dwc_pcie_pmu.c +++ b/drivers/perf/dwc_pcie_pmu.c @@ -106,6 +106,7 @@ struct dwc_pcie_vendor_id { static const struct dwc_pcie_vendor_id dwc_pcie_vendor_ids[] = { {.vendor_id = PCI_VENDOR_ID_ALIBABA }, + {.vendor_id = PCI_VENDOR_ID_AMPERE }, {.vendor_id = PCI_VENDOR_ID_QCOM }, {} /* terminator */ }; -- Gitee From e8b61483a8ed6df907551e15707963d0b078c590 Mon Sep 17 00:00:00 2001 From: Ilkka Koskinen Date: Tue, 8 Oct 2024 23:18:24 +0000 Subject: [PATCH 1945/2138] perf/dwc_pcie: Fix typos in event names ANBZ: #12339 commit 94b3ad10c2e1d0e761756a844b78a21101dd1810 upstream. Fix a few typos in event names Signed-off-by: Ilkka Koskinen Reviewed-by: Jing Zhang Reviewed-by: Shuai Xue Link: https://lore.kernel.org/r/20241008231824.5102-4-ilkka@os.amperecomputing.com Signed-off-by: Will Deacon Signed-off-by: Jing Zhang Link: https://gitee.com/anolis/cloud-kernel/pulls/4218 --- drivers/perf/dwc_pcie_pmu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c index edb5a809c928..9cbea9675e21 100644 --- a/drivers/perf/dwc_pcie_pmu.c +++ b/drivers/perf/dwc_pcie_pmu.c @@ -216,9 +216,9 @@ static struct attribute *dwc_pcie_pmu_time_event_attrs[] = { DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_update_fc_dllp, 0x601), DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_ack_dllp, 0x602), DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_update_fc_dllp, 0x603), - DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_nulified_tlp, 0x604), - DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_nulified_tlp, 0x605), - DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_duplicate_tl, 0x606), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_nullified_tlp, 0x604), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_nullified_tlp, 0x605), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_duplicate_tlp, 0x606), DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_memory_write, 0x700), DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_memory_read, 0x701), DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_configuration_write, 0x702), -- Gitee From 93c75b9b935efe35f5216eb498327d4e9fcfc419 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Tue, 6 Feb 2024 21:56:15 -0500 Subject: [PATCH 1946/2138] fs: super_set_uuid() ANBZ: #11101 commit a4af51ce229b1e1eab003966dbfebf9d80093a77 upstream. Some weird old filesytems have UUID-like things that we wish to expose as UUIDs, but are smaller; add a length field so that the new FS_IOC_(GET|SET)UUID ioctls can handle them in generic code. And add a helper super_set_uuid(), for setting nonstandard length uuids. Helper is now required for the new FS_IOC_GETUUID ioctl; if super_set_uuid() hasn't been called, the ioctl won't be supported. Reviewed-by: Dave Chinner Signed-off-by: Kent Overstreet Link: https://lore.kernel.org/r/20240207025624.1019754-2-kent.overstreet@linux.dev Signed-off-by: Christian Brauner Signed-off-by: Hongzhen Luo Reviewed-by: Gao Xiang Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4230 --- fs/ext4/super.c | 2 +- fs/f2fs/super.c | 2 +- fs/gfs2/ops_fstype.c | 2 +- fs/kernfs/mount.c | 4 +++- fs/ocfs2/super.c | 4 ++-- fs/ubifs/super.c | 2 +- fs/xfs/xfs_mount.c | 2 +- include/linux/fs.h | 9 +++++++++ mm/shmem.c | 4 +++- 9 files changed, 22 insertions(+), 9 deletions(-) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 58b3e5a9c832..32ed37fe40b8 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -5362,7 +5362,7 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb) sb->s_qcop = &ext4_qctl_operations; sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ; #endif - memcpy(&sb->s_uuid, es->s_uuid, sizeof(es->s_uuid)); + super_set_uuid(sb, es->s_uuid, sizeof(es->s_uuid)); INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */ mutex_init(&sbi->s_orphan_lock); diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index b72fa103b963..0b780d67b466 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -4448,7 +4448,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) sb->s_time_gran = 1; sb->s_flags = (sb->s_flags & ~SB_POSIXACL) | (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0); - memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid)); + super_set_uuid(sb, (void *) raw_super->uuid, sizeof(raw_super->uuid)); sb->s_iflags |= SB_I_CGROUPWB; /* init f2fs-specific super block info */ diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index f4c066aa24b9..65da3ae6d48e 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -215,7 +215,7 @@ static void gfs2_sb_in(struct gfs2_sbd *sdp, const void *buf) memcpy(sb->sb_lockproto, str->sb_lockproto, GFS2_LOCKNAME_LEN); memcpy(sb->sb_locktable, str->sb_locktable, GFS2_LOCKNAME_LEN); - memcpy(&s->s_uuid, str->sb_uuid, 16); + super_set_uuid(s, str->sb_uuid, 16); } /** diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c index c4bf26142eec..433b10d06669 100644 --- a/fs/kernfs/mount.c +++ b/fs/kernfs/mount.c @@ -360,7 +360,9 @@ int kernfs_get_tree(struct fs_context *fc) } sb->s_flags |= SB_ACTIVE; - uuid_gen(&sb->s_uuid); + uuid_t uuid; + uuid_gen(&uuid); + super_set_uuid(sb, uuid.b, sizeof(uuid)); down_write(&root->kernfs_supers_rwsem); list_add(&info->node, &info->root->supers); diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 9f6bbb4a0844..fb6b8bf3c0a8 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -2029,8 +2029,8 @@ static int ocfs2_initialize_super(struct super_block *sb, cbits = le32_to_cpu(di->id2.i_super.s_clustersize_bits); bbits = le32_to_cpu(di->id2.i_super.s_blocksize_bits); sb->s_maxbytes = ocfs2_max_file_offset(bbits, cbits); - memcpy(&sb->s_uuid, di->id2.i_super.s_uuid, - sizeof(di->id2.i_super.s_uuid)); + super_set_uuid(sb, di->id2.i_super.s_uuid, + sizeof(di->id2.i_super.s_uuid)); osb->osb_dx_mask = (1 << (cbits - bbits)) - 1; diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 3409488d39ba..78f31bddff74 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -2246,7 +2246,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent) goto out_umount; } - import_uuid(&sb->s_uuid, c->uuid); + super_set_uuid(sb, c->uuid, sizeof(c->uuid)); mutex_unlock(&c->umount_mutex); return 0; diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 0a0fd19573d8..27141289c9d6 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -62,7 +62,7 @@ xfs_uuid_mount( int hole, i; /* Publish UUID in struct super_block */ - uuid_copy(&mp->m_super->s_uuid, uuid); + super_set_uuid(mp->m_super, uuid->b, sizeof(*uuid)); if (xfs_has_nouuid(mp)) return 0; diff --git a/include/linux/fs.h b/include/linux/fs.h index 45991a7a9a2e..4a2b04d2faa9 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1266,6 +1266,7 @@ struct super_block { char s_id[32]; /* Informational name */ uuid_t s_uuid; /* UUID */ + u8 s_uuid_len; /* Default 16, possibly smaller for weird filesystems */ unsigned int s_max_links; @@ -2492,6 +2493,14 @@ extern __printf(2, 3) int super_setup_bdi_name(struct super_block *sb, char *fmt, ...); extern int super_setup_bdi(struct super_block *sb); +static inline void super_set_uuid(struct super_block *sb, const u8 *uuid, unsigned len) +{ + if (WARN_ON(len > sizeof(sb->s_uuid))) + len = sizeof(sb->s_uuid); + sb->s_uuid_len = len; + memcpy(&sb->s_uuid, uuid, len); +} + extern int current_umask(void); extern void ihold(struct inode * inode); diff --git a/mm/shmem.c b/mm/shmem.c index a5413999b52e..43c169779b56 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -4661,7 +4661,9 @@ static int shmem_fill_super(struct super_block *sb, struct fs_context *fc) #ifdef CONFIG_TMPFS_POSIX_ACL sb->s_flags |= SB_POSIXACL; #endif - uuid_gen(&sb->s_uuid); + uuid_t uuid; + uuid_gen(&uuid); + super_set_uuid(sb, uuid.b, sizeof(uuid)); #ifdef CONFIG_TMPFS_QUOTA if (ctx->seen & SHMEM_SEEN_QUOTA) { -- Gitee From 338d31c09fdeaccc38d59143bf4b2601281cc776 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Tue, 6 Feb 2024 21:56:17 -0500 Subject: [PATCH 1947/2138] fs: FS_IOC_GETUUID ANBZ: #11101 commit 41bcbe59c3b3fa7171dd2e3a365e6d5154198f30 upstream. Add a new generic ioctls for querying the filesystem UUID. These are lifted versions of the ext4 ioctls, with one change: we're not using a flexible array member, because UUIDs will never be more than 16 bytes. This patch adds a generic implementation of FS_IOC_GETFSUUID, which reads from super_block->s_uuid. We're not lifting SETFSUUID from ext4 - that can be done on offline filesystems by the people who need it, trying to do it online is just asking for too much trouble. Cc: Christian Brauner Cc: Jan Kara Cc: Dave Chinner Cc: Darrick J. Wong Cc: Theodore Ts'o Cc: linux-fsdevel@vger.kernel.org Signed-off-by: Kent Overstreet Link: https://lore.kernel.org/r/20240207025624.1019754-4-kent.overstreet@linux.dev Signed-off-by: Christian Brauner Signed-off-by: Hongzhen Luo Reviewed-by: Gao Xiang Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4230 --- .../userspace-api/ioctl/ioctl-number.rst | 3 ++- fs/ioctl.c | 16 ++++++++++++++++ include/uapi/linux/fs.h | 15 +++++++++++++++ 3 files changed, 33 insertions(+), 1 deletion(-) diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst index 4ea5b837399a..63f7ed8d0796 100644 --- a/Documentation/userspace-api/ioctl/ioctl-number.rst +++ b/Documentation/userspace-api/ioctl/ioctl-number.rst @@ -82,8 +82,9 @@ Code Seq# Include File Comments 0x10 00-0F drivers/char/s390/vmcp.h 0x10 10-1F arch/s390/include/uapi/sclp_ctl.h 0x10 20-2F arch/s390/include/uapi/asm/hypfs.h -0x12 all linux/fs.h +0x12 all linux/fs.h BLK* ioctls linux/blkpg.h +0x15 all linux/fs.h FS_IOC_* ioctls 0x1b all InfiniBand Subsystem 0x20 all drivers/cdrom/cm206.h diff --git a/fs/ioctl.c b/fs/ioctl.c index 76cf22ac97d7..74eab9549383 100644 --- a/fs/ioctl.c +++ b/fs/ioctl.c @@ -763,6 +763,19 @@ static int ioctl_fssetxattr(struct file *file, void __user *argp) return err; } +static int ioctl_getfsuuid(struct file *file, void __user *argp) +{ + struct super_block *sb = file_inode(file)->i_sb; + struct fsuuid2 u = { .len = sb->s_uuid_len, }; + + if (!sb->s_uuid_len) + return -ENOIOCTLCMD; + + memcpy(&u.uuid[0], &sb->s_uuid, sb->s_uuid_len); + + return copy_to_user(argp, &u, sizeof(u)) ? -EFAULT : 0; +} + /* * do_vfs_ioctl() is not for drivers and not intended to be EXPORT_SYMBOL()'d. * It's just a simple helper for sys_ioctl and compat_sys_ioctl. @@ -845,6 +858,9 @@ static int do_vfs_ioctl(struct file *filp, unsigned int fd, case FS_IOC_FSSETXATTR: return ioctl_fssetxattr(filp, argp); + case FS_IOC_GETFSUUID: + return ioctl_getfsuuid(filp, argp); + default: if (S_ISREG(inode->i_mode)) return file_ioctl(filp, cmd, argp); diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h index b7b56871029c..ecfb32466e4f 100644 --- a/include/uapi/linux/fs.h +++ b/include/uapi/linux/fs.h @@ -64,6 +64,19 @@ struct fstrim_range { __u64 minlen; }; +/* + * We include a length field because some filesystems (vfat) have an identifier + * that we do want to expose as a UUID, but doesn't have the standard length. + * + * We use a fixed size buffer beacuse this interface will, by fiat, never + * support "UUIDs" longer than 16 bytes; we don't want to force all downstream + * users to have to deal with that. + */ +struct fsuuid2 { + __u8 len; + __u8 uuid[16]; +}; + /* extent-same (dedupe) ioctls; these MUST match the btrfs ioctl definitions */ #define FILE_DEDUPE_RANGE_SAME 0 #define FILE_DEDUPE_RANGE_DIFFERS 1 @@ -215,6 +228,8 @@ struct fsxattr { #define FS_IOC_FSSETXATTR _IOW('X', 32, struct fsxattr) #define FS_IOC_GETFSLABEL _IOR(0x94, 49, char[FSLABEL_MAX]) #define FS_IOC_SETFSLABEL _IOW(0x94, 50, char[FSLABEL_MAX]) +/* Returns the external filesystem UUID, the same one blkid returns */ +#define FS_IOC_GETFSUUID _IOR(0x15, 0, struct fsuuid2) /* * Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS) -- Gitee From 55afdb2f657c8b44170ebaf4c240577825946353 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Tue, 6 Feb 2024 21:56:19 -0500 Subject: [PATCH 1948/2138] fs: add FS_IOC_GETFSSYSFSPATH ANBZ: #11101 commit ae8c511757304e0c393661b5ed2ad7073e2a351d upstream. Add a new ioctl for getting the sysfs name of a filesystem - the path under /sys/fs. This is going to let us standardize exporting data from sysfs across filesystems, e.g. time stats. The returned path will always be of the form "$FSTYP/$SYSFS_IDENTIFIER", where the sysfs identifier may be a UUID (for bcachefs) or a device name (xfs). Cc: Christian Brauner Cc: Jan Kara Cc: Dave Chinner Cc: Darrick J. Wong Cc: Theodore Ts'o Cc: Josef Bacik Signed-off-by: Kent Overstreet Link: https://lore.kernel.org/r/20240207025624.1019754-6-kent.overstreet@linux.dev Signed-off-by: Christian Brauner Signed-off-by: Hongzhen Luo Reviewed-by: Gao Xiang Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4230 --- fs/ioctl.c | 17 ++++++++++++++++ include/linux/fs.h | 43 +++++++++++++++++++++++++++++++++++++++++ include/uapi/linux/fs.h | 10 ++++++++++ 3 files changed, 70 insertions(+) diff --git a/fs/ioctl.c b/fs/ioctl.c index 74eab9549383..1d5abfdf0f22 100644 --- a/fs/ioctl.c +++ b/fs/ioctl.c @@ -776,6 +776,20 @@ static int ioctl_getfsuuid(struct file *file, void __user *argp) return copy_to_user(argp, &u, sizeof(u)) ? -EFAULT : 0; } +static int ioctl_get_fs_sysfs_path(struct file *file, void __user *argp) +{ + struct super_block *sb = file_inode(file)->i_sb; + + if (!strlen(sb->s_sysfs_name)) + return -ENOIOCTLCMD; + + struct fs_sysfs_path u = {}; + + u.len = scnprintf(u.name, sizeof(u.name), "%s/%s", sb->s_type->name, sb->s_sysfs_name); + + return copy_to_user(argp, &u, sizeof(u)) ? -EFAULT : 0; +} + /* * do_vfs_ioctl() is not for drivers and not intended to be EXPORT_SYMBOL()'d. * It's just a simple helper for sys_ioctl and compat_sys_ioctl. @@ -861,6 +875,9 @@ static int do_vfs_ioctl(struct file *filp, unsigned int fd, case FS_IOC_GETFSUUID: return ioctl_getfsuuid(filp, argp); + case FS_IOC_GETFSSYSFSPATH: + return ioctl_get_fs_sysfs_path(filp, argp); + default: if (S_ISREG(inode->i_mode)) return file_ioctl(filp, cmd, argp); diff --git a/include/linux/fs.h b/include/linux/fs.h index 4a2b04d2faa9..4cdaabab293f 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1264,10 +1264,23 @@ struct super_block { struct fsnotify_mark_connector __rcu *s_fsnotify_marks; #endif + /* + * q: why are s_id and s_sysfs_name not the same? both are human + * readable strings that identify the filesystem + * a: s_id is allowed to change at runtime; it's used in log messages, + * and we want to when a device starts out as single device (s_id is dev + * name) but then a device is hot added and we have to switch to + * identifying it by UUID + * but s_sysfs_name is a handle for programmatic access, and can't + * change at runtime + */ char s_id[32]; /* Informational name */ uuid_t s_uuid; /* UUID */ u8 s_uuid_len; /* Default 16, possibly smaller for weird filesystems */ + /* if set, fs shows up under sysfs at /sys/fs/$FSTYP/s_sysfs_name */ + char s_sysfs_name[UUID_STRING_LEN + 1]; + unsigned int s_max_links; /* @@ -2501,6 +2514,36 @@ static inline void super_set_uuid(struct super_block *sb, const u8 *uuid, unsign memcpy(&sb->s_uuid, uuid, len); } +/* set sb sysfs name based on sb->s_bdev */ +static inline void super_set_sysfs_name_bdev(struct super_block *sb) +{ + snprintf(sb->s_sysfs_name, sizeof(sb->s_sysfs_name), "%pg", sb->s_bdev); +} + +/* set sb sysfs name based on sb->s_uuid */ +static inline void super_set_sysfs_name_uuid(struct super_block *sb) +{ + WARN_ON(sb->s_uuid_len != sizeof(sb->s_uuid)); + snprintf(sb->s_sysfs_name, sizeof(sb->s_sysfs_name), "%pU", sb->s_uuid.b); +} + +/* set sb sysfs name based on sb->s_id */ +static inline void super_set_sysfs_name_id(struct super_block *sb) +{ + strscpy(sb->s_sysfs_name, sb->s_id, sizeof(sb->s_sysfs_name)); +} + +/* try to use something standard before you use this */ +__printf(2, 3) +static inline void super_set_sysfs_name_generic(struct super_block *sb, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + vsnprintf(sb->s_sysfs_name, sizeof(sb->s_sysfs_name), fmt, args); + va_end(args); +} + extern int current_umask(void); extern void ihold(struct inode * inode); diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h index ecfb32466e4f..95ae28284997 100644 --- a/include/uapi/linux/fs.h +++ b/include/uapi/linux/fs.h @@ -77,6 +77,11 @@ struct fsuuid2 { __u8 uuid[16]; }; +struct fs_sysfs_path { + __u8 len; + __u8 name[128]; +}; + /* extent-same (dedupe) ioctls; these MUST match the btrfs ioctl definitions */ #define FILE_DEDUPE_RANGE_SAME 0 #define FILE_DEDUPE_RANGE_DIFFERS 1 @@ -230,6 +235,11 @@ struct fsxattr { #define FS_IOC_SETFSLABEL _IOW(0x94, 50, char[FSLABEL_MAX]) /* Returns the external filesystem UUID, the same one blkid returns */ #define FS_IOC_GETFSUUID _IOR(0x15, 0, struct fsuuid2) +/* + * Returns the path component under /sys/fs/ that refers to this filesystem; + * also /sys/kernel/debug/ for filesystems with debugfs exports + */ +#define FS_IOC_GETFSSYSFSPATH _IOR(0x15, 1, struct fs_sysfs_path) /* * Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS) -- Gitee From b4f4dcf6ccdf510d56260bfe52ee6bb8e6671d3e Mon Sep 17 00:00:00 2001 From: Huang Xiaojia Date: Mon, 24 Jun 2024 14:37:04 +0800 Subject: [PATCH 1949/2138] erofs: convert to use super_set_uuid to support for FS_IOC_GETFSUUID ANBZ: #11101 commit cc69a681b2573e8865e29758f1a5b284328efb2d upstream. FS_IOC_GETFSUUID ioctl exposes the uuid of a filesystem. To support the ioctl, init sb->s_uuid with super_set_uuid(). Signed-off-by: Huang Xiaojia Reviewed-by: Gao Xiang Reviewed-by: Chao Yu Link: https://lore.kernel.org/r/20240624063704.2476070-1-huangxiaojia2@huawei.com Signed-off-by: Gao Xiang Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4224 --- fs/erofs/super.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 23d049ddeefe..ae8c778d6c6b 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -329,7 +329,7 @@ static int erofs_read_superblock(struct super_block *sb) sbi->build_time = le64_to_cpu(dsb->build_time); sbi->build_time_nsec = le32_to_cpu(dsb->build_time_nsec); - memcpy(&sb->s_uuid, dsb->uuid, sizeof(dsb->uuid)); + super_set_uuid(sb, (void *)dsb->uuid, sizeof(dsb->uuid)); ret = strscpy(sbi->volume_name, dsb->volume_name, sizeof(dsb->volume_name)); -- Gitee From 4d740cdc111ae9880f16a2e20b61a4f065463d29 Mon Sep 17 00:00:00 2001 From: Huang Xiaojia Date: Sat, 20 Jul 2024 16:23:35 +0800 Subject: [PATCH 1950/2138] erofs: add support for FS_IOC_GETFSSYSFSPATH ANBZ: #11101 commit 684b290abc774202ff88897648f24520f40c916b upstream. FS_IOC_GETFSSYSFSPATH ioctl exposes /sys/fs path of a given filesystem, potentially standarizing sysfs reporting. This patch add support for FS_IOC_GETFSSYSFSPATH for erofs, "erofs/" will be outputted for bdev cases, "erofs/[domain_id,]" will be outputted for fscache cases. Signed-off-by: Huang Xiaojia Link: https://lore.kernel.org/r/20240720082335.441563-1-huangxiaojia2@huawei.com Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4224 --- fs/erofs/super.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/fs/erofs/super.c b/fs/erofs/super.c index ae8c778d6c6b..a69c15f355e8 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -562,6 +562,21 @@ static const struct export_operations erofs_export_ops = { .get_parent = erofs_get_parent, }; +static void erofs_set_sysfs_name(struct super_block *sb) +{ + struct erofs_sb_info *sbi = EROFS_SB(sb); + + if (erofs_is_fscache_mode(sb)) { + if (sbi->domain_id) + super_set_sysfs_name_generic(sb, "%s,%s",sbi->domain_id, + sbi->fsid); + else + super_set_sysfs_name_generic(sb, "%s", sbi->fsid); + return; + } + super_set_sysfs_name_id(sb); +} + static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) { struct inode *inode; @@ -629,6 +644,7 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) sb->s_flags |= SB_POSIXACL; else sb->s_flags &= ~SB_POSIXACL; + erofs_set_sysfs_name(sb); #ifdef CONFIG_EROFS_FS_ZIP xa_init(&sbi->managed_pslots); -- Gitee From a122953471aedcb92992636d4436a4c99eac01f5 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Wed, 28 Aug 2024 17:52:32 +0800 Subject: [PATCH 1951/2138] erofs: clean up erofs_register_sysfs() ANBZ: #11101 commit 59aadaa7ebafbc57e642d772cfc02c2b907e5b89 upstream. After commit 684b290abc77 ("erofs: add support for FS_IOC_GETFSSYSFSPATH"), `sb->s_sysfs_name` is now valid. Just use it to get rid of duplicated logic. Reviewed-by: Sandeep Dhavale Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240828095232.571946-1-hsiangkao@linux.alibaba.com Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4224 --- fs/erofs/super.c | 2 +- fs/erofs/sysfs.c | 30 ++++++------------------------ 2 files changed, 7 insertions(+), 25 deletions(-) diff --git a/fs/erofs/super.c b/fs/erofs/super.c index a69c15f355e8..ad028595e291 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -644,7 +644,6 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) sb->s_flags |= SB_POSIXACL; else sb->s_flags &= ~SB_POSIXACL; - erofs_set_sysfs_name(sb); #ifdef CONFIG_EROFS_FS_ZIP xa_init(&sbi->managed_pslots); @@ -682,6 +681,7 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) if (err) return err; + erofs_set_sysfs_name(sb); err = erofs_register_sysfs(sb); if (err) return err; diff --git a/fs/erofs/sysfs.c b/fs/erofs/sysfs.c index 435e515c0792..63cffd0fd261 100644 --- a/fs/erofs/sysfs.c +++ b/fs/erofs/sysfs.c @@ -205,34 +205,16 @@ static struct kobject erofs_feat = { int erofs_register_sysfs(struct super_block *sb) { struct erofs_sb_info *sbi = EROFS_SB(sb); - char *name; - char *str = NULL; int err; - if (erofs_is_fscache_mode(sb)) { - if (sbi->domain_id) { - str = kasprintf(GFP_KERNEL, "%s,%s", sbi->domain_id, - sbi->fsid); - if (!str) - return -ENOMEM; - name = str; - } else { - name = sbi->fsid; - } - } else { - name = sb->s_id; - } sbi->s_kobj.kset = &erofs_root; init_completion(&sbi->s_kobj_unregister); - err = kobject_init_and_add(&sbi->s_kobj, &erofs_sb_ktype, NULL, "%s", name); - kfree(str); - if (err) - goto put_sb_kobj; - return 0; - -put_sb_kobj: - kobject_put(&sbi->s_kobj); - wait_for_completion(&sbi->s_kobj_unregister); + err = kobject_init_and_add(&sbi->s_kobj, &erofs_sb_ktype, NULL, "%s", + sb->s_sysfs_name); + if (err) { + kobject_put(&sbi->s_kobj); + wait_for_completion(&sbi->s_kobj_unregister); + } return err; } -- Gitee From 815efd672fc0a882e7d2d9a868b0a4ceaa625dc3 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Tue, 10 Sep 2024 15:08:47 +0800 Subject: [PATCH 1952/2138] erofs: handle overlapped pclusters out of crafted images properly ANBZ: #11101 commit 9e2f9d34dd12e6e5b244ec488bcebd0c2d566c50 upstream. syzbot reported a task hang issue due to a deadlock case where it is waiting for the folio lock of a cached folio that will be used for cache I/Os. After looking into the crafted fuzzed image, I found it's formed with several overlapped big pclusters as below: Ext: logical offset | length : physical offset | length 0: 0.. 16384 | 16384 : 151552.. 167936 | 16384 1: 16384.. 32768 | 16384 : 155648.. 172032 | 16384 2: 32768.. 49152 | 16384 : 537223168.. 537239552 | 16384 ... Here, extent 0/1 are physically overlapped although it's entirely _impossible_ for normal filesystem images generated by mkfs. First, managed folios containing compressed data will be marked as up-to-date and then unlocked immediately (unlike in-place folios) when compressed I/Os are complete. If physical blocks are not submitted in the incremental order, there should be separate BIOs to avoid dependency issues. However, the current code mis-arranges z_erofs_fill_bio_vec() and BIO submission which causes unexpected BIO waits. Second, managed folios will be connected to their own pclusters for efficient inter-queries. However, this is somewhat hard to implement easily if overlapped big pclusters exist. Again, these only appear in fuzzed images so let's simply fall back to temporary short-lived pages for correctness. Additionally, it justifies that referenced managed folios cannot be truncated for now and reverts part of commit 2080ca1ed3e4 ("erofs: tidy up `struct z_erofs_bvec`") for simplicity although it shouldn't be any difference. Reported-by: syzbot+4fc98ed414ae63d1ada2@syzkaller.appspotmail.com Reported-by: syzbot+de04e06b28cfecf2281c@syzkaller.appspotmail.com Reported-by: syzbot+c8c8238b394be4a1087d@syzkaller.appspotmail.com Tested-by: syzbot+4fc98ed414ae63d1ada2@syzkaller.appspotmail.com Closes: https://lore.kernel.org/r/0000000000002fda01061e334873@google.com Fixes: 8e6c8fa9f2e9 ("erofs: enable big pcluster feature") Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240910070847.3356592-1-hsiangkao@linux.alibaba.com Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4224 --- fs/erofs/zdata.c | 71 ++++++++++++++++++++++++++---------------------- 1 file changed, 38 insertions(+), 33 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 424f656cd765..a0bae499c5ff 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -1428,6 +1428,7 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, struct z_erofs_bvec zbv; struct address_space *mapping; struct folio *folio; + struct page *page; int bs = i_blocksize(f->inode); /* Except for inplace folios, the entire folio can be used for I/Os */ @@ -1450,7 +1451,6 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, * file-backed folios will be used instead. */ if (folio->private == (void *)Z_EROFS_PREALLOCATED_PAGE) { - folio->private = 0; tocache = true; goto out_tocache; } @@ -1468,7 +1468,7 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, } folio_lock(folio); - if (folio->mapping == mc) { + if (likely(folio->mapping == mc)) { /* * The cached folio is still in managed cache but without * a valid `->private` pcluster hint. Let's reconnect them. @@ -1478,41 +1478,44 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, /* compressed_bvecs[] already takes a ref before */ folio_put(folio); } - - /* no need to submit if it is already up-to-date */ - if (folio_test_uptodate(folio)) { - folio_unlock(folio); - bvec->bv_page = NULL; + if (likely(folio->private == pcl)) { + /* don't submit cache I/Os again if already uptodate */ + if (folio_test_uptodate(folio)) { + folio_unlock(folio); + bvec->bv_page = NULL; + } + return; } - return; + /* + * Already linked with another pcluster, which only appears in + * crafted images by fuzzers for now. But handle this anyway. + */ + tocache = false; /* use temporary short-lived pages */ + } else { + DBG_BUGON(1); /* referenced managed folios can't be truncated */ + tocache = true; } - - /* - * It has been truncated, so it's unsafe to reuse this one. Let's - * allocate a new page for compressed data. - */ - DBG_BUGON(folio->mapping); - tocache = true; folio_unlock(folio); folio_put(folio); out_allocfolio: - zbv.page = erofs_allocpage(&f->pagepool, gfp | __GFP_NOFAIL); + page = erofs_allocpage(&f->pagepool, gfp | __GFP_NOFAIL); spin_lock(&pcl->obj.lockref.lock); - if (pcl->compressed_bvecs[nr].page) { - erofs_pagepool_add(&f->pagepool, zbv.page); + if (unlikely(pcl->compressed_bvecs[nr].page != zbv.page)) { + erofs_pagepool_add(&f->pagepool, page); spin_unlock(&pcl->obj.lockref.lock); cond_resched(); goto repeat; } - bvec->bv_page = pcl->compressed_bvecs[nr].page = zbv.page; - folio = page_folio(zbv.page); - /* first mark it as a temporary shortlived folio (now 1 ref) */ - folio->private = (void *)Z_EROFS_SHORTLIVED_PAGE; + bvec->bv_page = pcl->compressed_bvecs[nr].page = page; + folio = page_folio(page); spin_unlock(&pcl->obj.lockref.lock); out_tocache: if (!tocache || bs != PAGE_SIZE || - filemap_add_folio(mc, folio, pcl->obj.index + nr, gfp)) + filemap_add_folio(mc, folio, pcl->obj.index + nr, gfp)) { + /* turn into a temporary shortlived folio (1 ref) */ + folio->private = (void *)Z_EROFS_SHORTLIVED_PAGE; return; + } folio_attach_private(folio, pcl); /* drop a refcount added by allocpage (then 2 refs in total here) */ folio_put(folio); @@ -1647,13 +1650,10 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, cur = mdev.m_pa; end = cur + pcl->pclustersize; do { - z_erofs_fill_bio_vec(&bvec, f, pcl, i++, mc); - if (!bvec.bv_page) - continue; - + bvec.bv_page = NULL; if (bio && (cur != last_pa || bio->bi_bdev != mdev.m_bdev)) { -io_retry: +drain_io: if (!erofs_is_fscache_mode(sb)) submit_bio(bio); else @@ -1666,6 +1666,15 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, bio = NULL; } + if (!bvec.bv_page) { + z_erofs_fill_bio_vec(&bvec, f, pcl, i++, mc); + if (!bvec.bv_page) + continue; + if (cur + bvec.bv_len > end) + bvec.bv_len = end - cur; + DBG_BUGON(bvec.bv_len < sb->s_blocksize); + } + if (unlikely(PageWorkingset(bvec.bv_page)) && !memstall) { psi_memstall_enter(&pflags); @@ -1685,13 +1694,9 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, ++nr_bios; } - if (cur + bvec.bv_len > end) - bvec.bv_len = end - cur; - DBG_BUGON(bvec.bv_len < sb->s_blocksize); if (!bio_add_page(bio, bvec.bv_page, bvec.bv_len, bvec.bv_offset)) - goto io_retry; - + goto drain_io; last_pa = cur + bvec.bv_len; bypass = false; } while ((cur += bvec.bv_len) < end); -- Gitee From 7c04d786377d975857a7c08137f36e2a28d96da4 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Fri, 30 Aug 2024 11:28:37 +0800 Subject: [PATCH 1953/2138] erofs: add file-backed mount support ANBZ: #11101 commit fb176750266a3d7f42ebdcf28e8ba40350b27847 upstream. It actually has been around for years: For containers and other sandbox use cases, there will be thousands (and even more) of authenticated (sub)images running on the same host, unlike OS images. Of course, all scenarios can use the same EROFS on-disk format, but bdev-backed mounts just work well for OS images since golden data is dumped into real block devices. However, it's somewhat hard for container runtimes to manage and isolate so many unnecessary virtual block devices safely and efficiently [1]: they just look like a burden to orchestrators and file-backed mounts are preferred indeed. There were already enough attempts such as Incremental FS, the original ComposeFS and PuzzleFS acting in the same way for immutable fses. As for current EROFS users, ComposeFS, containerd and Android APEXs will be directly benefited from it. On the other hand, previous experimental feature "erofs over fscache" was once also intended to provide a similar solution (inspired by Incremental FS discussion [2]), but the following facts show file-backed mounts will be a better approach: - Fscache infrastructure has recently been moved into new Netfslib which is an unexpected dependency to EROFS really, although it originally claims "it could be used for caching other things such as ISO9660 filesystems too." [3] - It takes an unexpectedly long time to upstream Fscache/Cachefiles enhancements. For example, the failover feature took more than one year, and the deamonless feature is still far behind now; - Ongoing HSM "fanotify pre-content hooks" [4] together with this will perfectly supersede "erofs over fscache" in a simpler way since developers (mainly containerd folks) could leverage their existing caching mechanism entirely in userspace instead of strictly following the predefined in-kernel caching tree hierarchy. After "fanotify pre-content hooks" lands upstream to provide the same functionality, "erofs over fscache" will be removed then (as an EROFS internal improvement and EROFS will not have to bother with on-demand fetching and/or caching improvements anymore.) [1] https://github.com/containers/storage/pull/2039 [2] https://lore.kernel.org/r/CAOQ4uxjbVxnubaPjVaGYiSwoGDTdpWbB=w_AeM6YM=zVixsUfQ@mail.gmail.com [3] https://docs.kernel.org/filesystems/caching/fscache.html [4] https://lore.kernel.org/r/cover.1723670362.git.josef@toxicpanda.com Closes: https://github.com/containers/composefs/issues/144 Reviewed-by: Sandeep Dhavale Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240830032840.3783206-1-hsiangkao@linux.alibaba.com Conflicts: 1. fs/erofs/data.c 1) erofs_fill_from_devinfo() 2. fs/erofs/super.c 1) erofs_init_device() Resolution: 1. fs/erofs/data.c 1) erofs_fill_from_devinfo(): Reimplement it according to the intent of this upstream patch, as this upstream patch depends on other third-party patches (e.g., file_bdev(), etc.). 2. fs/erofs/super.c 1) erofs_init_device(): Reimplement it according to the intent of this upstream patch, as the upstream patch depends on other third-party patches (e.g., file_bdev(), etc.). Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4224 --- fs/erofs/Kconfig | 17 ++++++++++ fs/erofs/data.c | 38 ++++++++++++++--------- fs/erofs/inode.c | 5 ++- fs/erofs/internal.h | 10 +++++- fs/erofs/super.c | 76 +++++++++++++++++++++++++++++++-------------- 5 files changed, 106 insertions(+), 40 deletions(-) diff --git a/fs/erofs/Kconfig b/fs/erofs/Kconfig index 5cedeb5b957a..cf99d819b63d 100644 --- a/fs/erofs/Kconfig +++ b/fs/erofs/Kconfig @@ -74,6 +74,23 @@ config EROFS_FS_SECURITY If you are not using a security module, say N. +config EROFS_FS_BACKED_BY_FILE + bool "File-backed EROFS filesystem support" + depends on EROFS_FS + default y + help + This allows EROFS to use filesystem image files directly, without + the intercession of loopback block devices or likewise. It is + particularly useful for container images with numerous blobs and + other sandboxes, where loop devices behave intricately. It can also + be used to simplify error-prone lifetime management of unnecessary + virtual block devices. + + Note that this feature, along with ongoing fanotify pre-content + hooks, will eventually replace "EROFS over fscache." + + If you don't want to enable this feature, say N. + config EROFS_FS_ZIP bool "EROFS Data Compression Support" depends on EROFS_FS diff --git a/fs/erofs/data.c b/fs/erofs/data.c index b2fe7744949d..43d40c3211a0 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -59,8 +59,12 @@ void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset, void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb) { - if (erofs_is_fscache_mode(sb)) - buf->mapping = EROFS_SB(sb)->s_fscache->inode->i_mapping; + struct erofs_sb_info *sbi = EROFS_SB(sb); + + if (erofs_is_fileio_mode(sbi)) + buf->mapping = file_inode(sbi->fdev)->i_mapping; + else if (erofs_is_fscache_mode(sb)) + buf->mapping = sbi->s_fscache->inode->i_mapping; else buf->mapping = sb->s_bdev->bd_inode->i_mapping; } @@ -189,10 +193,25 @@ int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map) return err; } +static void erofs_fill_from_devinfo(struct super_block *sb, + struct erofs_map_dev *map, + struct erofs_device_info *dif) +{ + map->m_bdev = dif->bdev_handle ? dif->bdev_handle->bdev : NULL; + if (!dif->file && !map->m_bdev) { + erofs_err(sb, "invalid device handle for path %s", dif->path); + DBG_BUGON(1); + } + map->m_daxdev = dif->dax_dev; + map->m_dax_part_off = dif->dax_part_off; + map->m_fscache = dif->fscache; +} + int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map) { struct erofs_dev_context *devs = EROFS_SB(sb)->devs; struct erofs_device_info *dif; + erofs_off_t startoff, length; int id; map->m_bdev = sb->s_bdev; @@ -212,29 +231,20 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map) up_read(&devs->rwsem); return 0; } - map->m_bdev = dif->bdev_handle ? dif->bdev_handle->bdev : NULL; - map->m_daxdev = dif->dax_dev; - map->m_dax_part_off = dif->dax_part_off; - map->m_fscache = dif->fscache; + erofs_fill_from_devinfo(sb, map, dif); up_read(&devs->rwsem); } else if (devs->extra_devices && !devs->flatdev) { down_read(&devs->rwsem); idr_for_each_entry(&devs->tree, dif, id) { - erofs_off_t startoff, length; - if (!dif->mapped_blkaddr) continue; + startoff = erofs_pos(sb, dif->mapped_blkaddr); length = erofs_pos(sb, dif->blocks); - if (map->m_pa >= startoff && map->m_pa < startoff + length) { map->m_pa -= startoff; - map->m_bdev = dif->bdev_handle ? - dif->bdev_handle->bdev : NULL; - map->m_daxdev = dif->dax_dev; - map->m_dax_part_off = dif->dax_part_off; - map->m_fscache = dif->fscache; + erofs_fill_from_devinfo(sb, map, dif); break; } } diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c index 7a63af980c10..5d39c00f1709 100644 --- a/fs/erofs/inode.c +++ b/fs/erofs/inode.c @@ -249,7 +249,10 @@ static int erofs_fill_inode(struct inode *inode) } mapping_set_large_folios(inode->i_mapping); - if (erofs_inode_is_data_compressed(vi->datalayout)) { + if (erofs_is_fileio_mode(EROFS_SB(inode->i_sb))) { + /* XXX: data I/Os will be implemented in the following patches */ + err = -EOPNOTSUPP; + } else if (erofs_inode_is_data_compressed(vi->datalayout)) { #ifdef CONFIG_EROFS_FS_ZIP DO_ONCE_LITE_IF(inode->i_blkbits != PAGE_SHIFT, erofs_info, inode->i_sb, diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 5bd27052fd73..3c3ccdc7364d 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -50,6 +50,7 @@ struct erofs_device_info { char *path; struct erofs_fscache *fscache; struct bdev_handle *bdev_handle; + struct file *file; struct dax_device *dax_dev; u64 dax_part_off; @@ -130,6 +131,7 @@ struct erofs_sb_info { struct erofs_sb_lz4_info lz4; #endif /* CONFIG_EROFS_FS_ZIP */ + struct file *fdev; struct inode *packed_inode; struct erofs_dev_context *devs; struct dax_device *dax_dev; @@ -190,9 +192,15 @@ struct erofs_sb_info { #define set_opt(opt, option) ((opt)->mount_opt |= EROFS_MOUNT_##option) #define test_opt(opt, option) ((opt)->mount_opt & EROFS_MOUNT_##option) +static inline bool erofs_is_fileio_mode(struct erofs_sb_info *sbi) +{ + return IS_ENABLED(CONFIG_EROFS_FS_BACKED_BY_FILE) && sbi->fdev; +} + static inline bool erofs_is_fscache_mode(struct super_block *sb) { - return IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && !sb->s_bdev; + return IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && + !erofs_is_fileio_mode(EROFS_SB(sb)) && !sb->s_bdev; } enum { diff --git a/fs/erofs/super.c b/fs/erofs/super.c index ad028595e291..780217286ac2 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -10,6 +10,7 @@ #include #include #include +#include #include "xattr.h" #define CREATE_TRACE_POINTS @@ -162,6 +163,7 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb, struct erofs_fscache *fscache; struct erofs_deviceslot *dis; struct bdev_handle *bdev_handle; + struct file *file; dis = erofs_read_metabuf(buf, sb, *pos, EROFS_KMAP); if (IS_ERR(dis)) @@ -183,13 +185,20 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb, return PTR_ERR(fscache); dif->fscache = fscache; } else if (!sbi->devs->flatdev) { - bdev_handle = bdev_open_by_path(dif->path, BLK_OPEN_READ, - sb->s_type, NULL); - if (IS_ERR(bdev_handle)) - return PTR_ERR(bdev_handle); - dif->bdev_handle = bdev_handle; - dif->dax_dev = fs_dax_get_by_bdev(bdev_handle->bdev, - &dif->dax_part_off, NULL, NULL); + if (erofs_is_fileio_mode(sbi)) { + file = filp_open(dif->path, O_RDONLY | O_LARGEFILE, 0); + if (IS_ERR(file)) + return PTR_ERR(file); + dif->file = file; + } else { + bdev_handle = bdev_open_by_path(dif->path, BLK_OPEN_READ, + sb->s_type, NULL); + if (IS_ERR(bdev_handle)) + return PTR_ERR(bdev_handle); + dif->bdev_handle = bdev_handle; + dif->dax_dev = fs_dax_get_by_bdev(bdev_handle->bdev, + &dif->dax_part_off, NULL, NULL); + } } dif->blocks = le32_to_cpu(dis->blocks); @@ -566,15 +575,16 @@ static void erofs_set_sysfs_name(struct super_block *sb) { struct erofs_sb_info *sbi = EROFS_SB(sb); - if (erofs_is_fscache_mode(sb)) { - if (sbi->domain_id) - super_set_sysfs_name_generic(sb, "%s,%s",sbi->domain_id, - sbi->fsid); - else - super_set_sysfs_name_generic(sb, "%s", sbi->fsid); - return; - } - super_set_sysfs_name_id(sb); + if (sbi->domain_id) + super_set_sysfs_name_generic(sb, "%s,%s", sbi->domain_id, + sbi->fsid); + else if (sbi->fsid) + super_set_sysfs_name_generic(sb, "%s", sbi->fsid); + else if (erofs_is_fileio_mode(sbi)) + super_set_sysfs_name_generic(sb, "%s", + bdi_dev_name(sb->s_bdi)); + else + super_set_sysfs_name_id(sb); } static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) @@ -589,14 +599,15 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) sb->s_op = &erofs_sops; sbi->blkszbits = PAGE_SHIFT; - if (erofs_is_fscache_mode(sb)) { + if (!sb->s_bdev) { sb->s_blocksize = PAGE_SIZE; sb->s_blocksize_bits = PAGE_SHIFT; - err = erofs_fscache_register_fs(sb); - if (err) - return err; - + if (erofs_is_fscache_mode(sb)) { + err = erofs_fscache_register_fs(sb); + if (err) + return err; + } err = super_setup_bdi(sb); if (err) return err; @@ -693,11 +704,24 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) static int erofs_fc_get_tree(struct fs_context *fc) { struct erofs_sb_info *sbi = fc->s_fs_info; + int ret; if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid) return get_tree_nodev(fc, erofs_fc_fill_super); - return get_tree_bdev(fc, erofs_fc_fill_super); + ret = get_tree_bdev(fc, erofs_fc_fill_super); +#ifdef CONFIG_EROFS_FS_BACKED_BY_FILE + if (ret == -ENOTBLK) { + if (!fc->source) + return invalf(fc, "No source specified"); + sbi->fdev = filp_open(fc->source, O_RDONLY | O_LARGEFILE, 0); + if (IS_ERR(sbi->fdev)) + return PTR_ERR(sbi->fdev); + + return get_tree_nodev(fc, erofs_fc_fill_super); + } +#endif + return ret; } static int erofs_fc_reconfigure(struct fs_context *fc) @@ -729,6 +753,8 @@ static int erofs_release_device_info(int id, void *ptr, void *data) fs_put_dax(dif->dax_dev, NULL); if (dif->bdev_handle) bdev_release(dif->bdev_handle); + if (dif->file) + fput(dif->file); erofs_fscache_unregister_cookie(dif->fscache); dif->fscache = NULL; kfree(dif->path); @@ -791,7 +817,7 @@ static void erofs_kill_sb(struct super_block *sb) { struct erofs_sb_info *sbi = EROFS_SB(sb); - if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid) + if ((IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid) || sbi->fdev) kill_anon_super(sb); else kill_block_super(sb); @@ -801,6 +827,8 @@ static void erofs_kill_sb(struct super_block *sb) erofs_fscache_unregister_fs(sb); kfree(sbi->fsid); kfree(sbi->domain_id); + if (sbi->fdev) + fput(sbi->fdev); kfree(sbi); sb->s_fs_info = NULL; } @@ -903,7 +931,7 @@ static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf) buf->f_namelen = EROFS_NAME_LEN; if (uuid_is_null(&sb->s_uuid)) - buf->f_fsid = u64_to_fsid(erofs_is_fscache_mode(sb) ? 0 : + buf->f_fsid = u64_to_fsid(!sb->s_bdev ? 0 : huge_encode_dev(sb->s_bdev->bd_dev)); else buf->f_fsid = uuid_to_fsid(sb->s_uuid.b); -- Gitee From 368c29fa61abb45b52f7b015b6364c5c82573580 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Thu, 5 Sep 2024 17:30:31 +0800 Subject: [PATCH 1954/2138] erofs: support unencoded inodes for fileio ANBZ: #11101 commit ce63cb62d794c98c7631c2296fa845f2a8d0a4a1 upstream. Since EROFS only needs to handle read requests in simple contexts, Just directly use vfs_iocb_iter_read() for data I/Os. Reviewed-by: Sandeep Dhavale Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240905093031.2745929-1-hsiangkao@linux.alibaba.com Conflicts: 1.fs/erofs/data.c 1) erofs_fill_from_devinfo(): Reimplement it according to the intent of the upstream patch, as the upstream depends on (file_bdev(), etc.). Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4224 --- fs/erofs/Makefile | 1 + fs/erofs/data.c | 46 +++++++++++- fs/erofs/fileio.c | 178 ++++++++++++++++++++++++++++++++++++++++++++ fs/erofs/inode.c | 17 +++-- fs/erofs/internal.h | 7 +- fs/erofs/zdata.c | 46 ++---------- 6 files changed, 244 insertions(+), 51 deletions(-) create mode 100644 fs/erofs/fileio.c diff --git a/fs/erofs/Makefile b/fs/erofs/Makefile index 097d672e6b14..4331d53c7109 100644 --- a/fs/erofs/Makefile +++ b/fs/erofs/Makefile @@ -7,4 +7,5 @@ erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o zutil.o erofs-$(CONFIG_EROFS_FS_ZIP_LZMA) += decompressor_lzma.o erofs-$(CONFIG_EROFS_FS_ZIP_DEFLATE) += decompressor_deflate.o erofs-$(CONFIG_EROFS_FS_ZIP_ZSTD) += decompressor_zstd.o +erofs-$(CONFIG_EROFS_FS_BACKED_BY_FILE) += fileio.o erofs-$(CONFIG_EROFS_FS_ONDEMAND) += fscache.o diff --git a/fs/erofs/data.c b/fs/erofs/data.c index 43d40c3211a0..b50eca31bd54 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -132,7 +132,7 @@ int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map) if (map->m_la >= inode->i_size) { /* leave out-of-bound access unmapped */ map->m_flags = 0; - map->m_plen = 0; + map->m_plen = map->m_llen; goto out; } @@ -198,8 +198,9 @@ static void erofs_fill_from_devinfo(struct super_block *sb, struct erofs_device_info *dif) { map->m_bdev = dif->bdev_handle ? dif->bdev_handle->bdev : NULL; - if (!dif->file && !map->m_bdev) { - erofs_err(sb, "invalid device handle for path %s", dif->path); + map->m_fp = dif->file; + if (!map->m_bdev && !map->m_fp) { + erofs_err(sb, "invalid device handle and file for path %s", dif->path); DBG_BUGON(1); } map->m_daxdev = dif->dax_dev; @@ -218,6 +219,7 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map) map->m_daxdev = EROFS_SB(sb)->dax_dev; map->m_dax_part_off = EROFS_SB(sb)->dax_part_off; map->m_fscache = EROFS_SB(sb)->s_fscache; + map->m_fp = EROFS_SB(sb)->fdev; if (map->m_deviceid) { down_read(&devs->rwsem); @@ -253,6 +255,42 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map) return 0; } +/* + * bit 30: I/O error occurred on this folio + * bit 0 - 29: remaining parts to complete this folio + */ +#define EROFS_ONLINEFOLIO_EIO (1 << 30) + +void erofs_onlinefolio_init(struct folio *folio) +{ + union { + atomic_t o; + void *v; + } u = { .o = ATOMIC_INIT(1) }; + + folio->private = u.v; /* valid only if file-backed folio is locked */ +} + +void erofs_onlinefolio_split(struct folio *folio) +{ + atomic_inc((atomic_t *)&folio->private); +} + +void erofs_onlinefolio_end(struct folio *folio, int err) +{ + int orig, v; + + do { + orig = atomic_read((atomic_t *)&folio->private); + v = (orig - 1) | (err ? EROFS_ONLINEFOLIO_EIO : 0); + } while (atomic_cmpxchg((atomic_t *)&folio->private, orig, v) != orig); + + if (v & ~EROFS_ONLINEFOLIO_EIO) + return; + folio->private = 0; + folio_end_read(folio, !(v & EROFS_ONLINEFOLIO_EIO)); +} + static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, unsigned int flags, struct iomap *iomap, struct iomap *srcmap) { @@ -402,7 +440,7 @@ static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) } /* for uncompressed (aligned) files and raw access for other files */ -const struct address_space_operations erofs_raw_access_aops = { +const struct address_space_operations erofs_aops = { .read_folio = erofs_read_folio, .readahead = erofs_readahead, .bmap = erofs_bmap, diff --git a/fs/erofs/fileio.c b/fs/erofs/fileio.c new file mode 100644 index 000000000000..42b346593bf5 --- /dev/null +++ b/fs/erofs/fileio.c @@ -0,0 +1,178 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2024, Alibaba Cloud + */ +#include "internal.h" +#include + +struct erofs_fileio_rq { + struct bio_vec bvecs[BIO_MAX_VECS]; + struct bio bio; + struct kiocb iocb; +}; + +struct erofs_fileio { + struct erofs_map_blocks map; + struct erofs_map_dev dev; + struct erofs_fileio_rq *rq; +}; + +static void erofs_fileio_ki_complete(struct kiocb *iocb, long ret) +{ + struct erofs_fileio_rq *rq = + container_of(iocb, struct erofs_fileio_rq, iocb); + struct folio_iter fi; + + DBG_BUGON(rq->bio.bi_end_io); + if (ret > 0) { + if (ret != rq->bio.bi_iter.bi_size) { + bio_advance(&rq->bio, ret); + zero_fill_bio(&rq->bio); + } + ret = 0; + } + bio_for_each_folio_all(fi, &rq->bio) { + DBG_BUGON(folio_test_uptodate(fi.folio)); + erofs_onlinefolio_end(fi.folio, ret); + } + bio_uninit(&rq->bio); + kfree(rq); +} + +static void erofs_fileio_rq_submit(struct erofs_fileio_rq *rq) +{ + struct iov_iter iter; + int ret; + + if (!rq) + return; + rq->iocb.ki_pos = rq->bio.bi_iter.bi_sector << SECTOR_SHIFT; + rq->iocb.ki_ioprio = get_current_ioprio(); + rq->iocb.ki_complete = erofs_fileio_ki_complete; + rq->iocb.ki_flags = (rq->iocb.ki_filp->f_mode & FMODE_CAN_ODIRECT) ? + IOCB_DIRECT : 0; + iov_iter_bvec(&iter, ITER_DEST, rq->bvecs, rq->bio.bi_vcnt, + rq->bio.bi_iter.bi_size); + ret = vfs_iocb_iter_read(rq->iocb.ki_filp, &rq->iocb, &iter); + if (ret != -EIOCBQUEUED) + erofs_fileio_ki_complete(&rq->iocb, ret); +} + +static struct erofs_fileio_rq *erofs_fileio_rq_alloc(struct erofs_map_dev *mdev) +{ + struct erofs_fileio_rq *rq = kzalloc(sizeof(*rq), + GFP_KERNEL | __GFP_NOFAIL); + + bio_init(&rq->bio, NULL, rq->bvecs, BIO_MAX_VECS, REQ_OP_READ); + rq->iocb.ki_filp = mdev->m_fp; + return rq; +} + +static int erofs_fileio_scan_folio(struct erofs_fileio *io, struct folio *folio) +{ + struct inode *inode = folio_inode(folio); + struct erofs_map_blocks *map = &io->map; + unsigned int cur = 0, end = folio_size(folio), len, attached = 0; + loff_t pos = folio_pos(folio), ofs; + struct iov_iter iter; + struct bio_vec bv; + int err = 0; + + erofs_onlinefolio_init(folio); + while (cur < end) { + if (!in_range(pos + cur, map->m_la, map->m_llen)) { + map->m_la = pos + cur; + map->m_llen = end - cur; + err = erofs_map_blocks(inode, map); + if (err) + break; + } + + ofs = folio_pos(folio) + cur - map->m_la; + len = min_t(loff_t, map->m_llen - ofs, end - cur); + if (map->m_flags & EROFS_MAP_META) { + struct erofs_buf buf = __EROFS_BUF_INITIALIZER; + void *src; + + src = erofs_read_metabuf(&buf, inode->i_sb, + map->m_pa + ofs, EROFS_KMAP); + if (IS_ERR(src)) { + err = PTR_ERR(src); + break; + } + bvec_set_folio(&bv, folio, len, cur); + iov_iter_bvec(&iter, ITER_DEST, &bv, 1, len); + if (copy_to_iter(src, len, &iter) != len) { + erofs_put_metabuf(&buf); + err = -EIO; + break; + } + erofs_put_metabuf(&buf); + } else if (!(map->m_flags & EROFS_MAP_MAPPED)) { + folio_zero_segment(folio, cur, cur + len); + attached = 0; + } else { + if (io->rq && (map->m_pa + ofs != io->dev.m_pa || + map->m_deviceid != io->dev.m_deviceid)) { +io_retry: + erofs_fileio_rq_submit(io->rq); + io->rq = NULL; + } + + if (!io->rq) { + io->dev = (struct erofs_map_dev) { + .m_pa = io->map.m_pa + ofs, + .m_deviceid = io->map.m_deviceid, + }; + err = erofs_map_dev(inode->i_sb, &io->dev); + if (err) + break; + io->rq = erofs_fileio_rq_alloc(&io->dev); + io->rq->bio.bi_iter.bi_sector = io->dev.m_pa >> 9; + attached = 0; + } + if (!attached++) + erofs_onlinefolio_split(folio); + if (!bio_add_folio(&io->rq->bio, folio, len, cur)) + goto io_retry; + io->dev.m_pa += len; + } + cur += len; + } + erofs_onlinefolio_end(folio, err); + return err; +} + +static int erofs_fileio_read_folio(struct file *file, struct folio *folio) +{ + struct erofs_fileio io = {}; + int err; + + trace_erofs_read_folio(folio, true); + err = erofs_fileio_scan_folio(&io, folio); + erofs_fileio_rq_submit(io.rq); + return err; +} + +static void erofs_fileio_readahead(struct readahead_control *rac) +{ + struct inode *inode = rac->mapping->host; + struct erofs_fileio io = {}; + struct folio *folio; + int err; + + trace_erofs_readpages(inode, readahead_index(rac), + readahead_count(rac), true); + while ((folio = readahead_folio(rac))) { + err = erofs_fileio_scan_folio(&io, folio); + if (err && err != -EINTR) + erofs_err(inode->i_sb, "readahead error at folio %lu @ nid %llu", + folio->index, EROFS_I(inode)->nid); + } + erofs_fileio_rq_submit(io.rq); +} + +const struct address_space_operations erofs_fileio_aops = { + .read_folio = erofs_fileio_read_folio, + .readahead = erofs_fileio_readahead, +}; diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c index 5d39c00f1709..d9a48b1244e5 100644 --- a/fs/erofs/inode.c +++ b/fs/erofs/inode.c @@ -249,11 +249,14 @@ static int erofs_fill_inode(struct inode *inode) } mapping_set_large_folios(inode->i_mapping); - if (erofs_is_fileio_mode(EROFS_SB(inode->i_sb))) { - /* XXX: data I/Os will be implemented in the following patches */ - err = -EOPNOTSUPP; - } else if (erofs_inode_is_data_compressed(vi->datalayout)) { + if (erofs_inode_is_data_compressed(vi->datalayout)) { #ifdef CONFIG_EROFS_FS_ZIP +#ifdef CONFIG_EROFS_FS_BACKED_BY_FILE + if (erofs_is_fileio_mode(EROFS_SB(inode->i_sb))) { + err = -EOPNOTSUPP; + goto out_unlock; + } +#endif DO_ONCE_LITE_IF(inode->i_blkbits != PAGE_SHIFT, erofs_info, inode->i_sb, "EXPERIMENTAL EROFS subpage compressed block support in use. Use at your own risk!"); @@ -262,10 +265,14 @@ static int erofs_fill_inode(struct inode *inode) err = -EOPNOTSUPP; #endif } else { - inode->i_mapping->a_ops = &erofs_raw_access_aops; + inode->i_mapping->a_ops = &erofs_aops; #ifdef CONFIG_EROFS_FS_ONDEMAND if (erofs_is_fscache_mode(inode->i_sb)) inode->i_mapping->a_ops = &erofs_fscache_access_aops; +#endif +#ifdef CONFIG_EROFS_FS_BACKED_BY_FILE + if (erofs_is_fileio_mode(EROFS_SB(inode->i_sb))) + inode->i_mapping->a_ops = &erofs_fileio_aops; #endif } out_unlock: diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 3c3ccdc7364d..1e0103551a46 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -373,6 +373,7 @@ struct erofs_map_dev { struct erofs_fscache *m_fscache; struct block_device *m_bdev; struct dax_device *m_daxdev; + struct file *m_fp; u64 m_dax_part_off; erofs_off_t m_pa; @@ -381,7 +382,8 @@ struct erofs_map_dev { extern const struct super_operations erofs_sops; -extern const struct address_space_operations erofs_raw_access_aops; +extern const struct address_space_operations erofs_aops; +extern const struct address_space_operations erofs_fileio_aops; extern const struct address_space_operations z_erofs_aops; extern const struct address_space_operations erofs_fscache_access_aops; @@ -412,6 +414,9 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *dev); int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len); int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map); +void erofs_onlinefolio_init(struct folio *folio); +void erofs_onlinefolio_split(struct folio *folio); +void erofs_onlinefolio_end(struct folio *folio, int err); struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid); int erofs_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index a0bae499c5ff..3371dcb549dc 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -122,42 +122,6 @@ static bool erofs_folio_is_managed(struct erofs_sb_info *sbi, struct folio *fo) return fo->mapping == MNGD_MAPPING(sbi); } -/* - * bit 30: I/O error occurred on this folio - * bit 0 - 29: remaining parts to complete this folio - */ -#define Z_EROFS_FOLIO_EIO (1 << 30) - -static void z_erofs_onlinefolio_init(struct folio *folio) -{ - union { - atomic_t o; - void *v; - } u = { .o = ATOMIC_INIT(1) }; - - folio->private = u.v; /* valid only if file-backed folio is locked */ -} - -static void z_erofs_onlinefolio_split(struct folio *folio) -{ - atomic_inc((atomic_t *)&folio->private); -} - -static void z_erofs_onlinefolio_end(struct folio *folio, int err) -{ - int orig, v; - - do { - orig = atomic_read((atomic_t *)&folio->private); - v = (orig - 1) | (err ? Z_EROFS_FOLIO_EIO : 0); - } while (atomic_cmpxchg((atomic_t *)&folio->private, orig, v) != orig); - - if (v & ~Z_EROFS_FOLIO_EIO) - return; - folio->private = 0; - folio_end_read(folio, !(v & Z_EROFS_FOLIO_EIO)); -} - #define Z_EROFS_ONSTACK_PAGES 32 /* @@ -965,7 +929,7 @@ static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *f, int err = 0; tight = (bs == PAGE_SIZE); - z_erofs_onlinefolio_init(folio); + erofs_onlinefolio_init(folio); do { if (offset + end - 1 < map->m_la || offset + end - 1 >= map->m_la + map->m_llen) { @@ -1024,7 +988,7 @@ static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *f, if (err) break; - z_erofs_onlinefolio_split(folio); + erofs_onlinefolio_split(folio); if (f->pcl->pageofs_out != (map->m_la & ~PAGE_MASK)) f->pcl->multibases = true; if (f->pcl->length < offset + end - map->m_la) { @@ -1044,7 +1008,7 @@ static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *f, tight = (bs == PAGE_SIZE); } } while ((end = cur) > 0); - z_erofs_onlinefolio_end(folio, err); + erofs_onlinefolio_end(folio, err); return err; } @@ -1147,7 +1111,7 @@ static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be, cur += len; } kunmap_local(dst); - z_erofs_onlinefolio_end(page_folio(bvi->bvec.page), err); + erofs_onlinefolio_end(page_folio(bvi->bvec.page), err); list_del(p); kfree(bvi); } @@ -1302,7 +1266,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, DBG_BUGON(z_erofs_page_is_invalidated(page)); if (!z_erofs_is_shortlived_page(page)) { - z_erofs_onlinefolio_end(page_folio(page), err); + erofs_onlinefolio_end(page_folio(page), err); continue; } if (pcl->algorithmformat != Z_EROFS_COMPRESSION_LZ4) { -- Gitee From 0ae92f2dcd72ca9fe11bc8b2d996b07601ad0f88 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Fri, 30 Aug 2024 11:28:39 +0800 Subject: [PATCH 1955/2138] erofs: support compressed inodes for fileio ANBZ: #11101 commit 283213718f5d618dfe88d16a3c63a077a12f15ec upstream. Use pseudo bios just like the previous fscache approach since merged bio_vecs can be filled properly with unique interfaces. Reviewed-by: Sandeep Dhavale Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240830032840.3783206-3-hsiangkao@linux.alibaba.com Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4224 --- fs/erofs/fileio.c | 22 ++++++++++++++++++---- fs/erofs/inode.c | 6 ------ fs/erofs/internal.h | 8 ++++++++ fs/erofs/zdata.c | 27 +++++++++++++++++---------- 4 files changed, 43 insertions(+), 20 deletions(-) diff --git a/fs/erofs/fileio.c b/fs/erofs/fileio.c index 42b346593bf5..3af96b1e2c2a 100644 --- a/fs/erofs/fileio.c +++ b/fs/erofs/fileio.c @@ -23,7 +23,6 @@ static void erofs_fileio_ki_complete(struct kiocb *iocb, long ret) container_of(iocb, struct erofs_fileio_rq, iocb); struct folio_iter fi; - DBG_BUGON(rq->bio.bi_end_io); if (ret > 0) { if (ret != rq->bio.bi_iter.bi_size) { bio_advance(&rq->bio, ret); @@ -31,9 +30,13 @@ static void erofs_fileio_ki_complete(struct kiocb *iocb, long ret) } ret = 0; } - bio_for_each_folio_all(fi, &rq->bio) { - DBG_BUGON(folio_test_uptodate(fi.folio)); - erofs_onlinefolio_end(fi.folio, ret); + if (rq->bio.bi_end_io) { + rq->bio.bi_end_io(&rq->bio); + } else { + bio_for_each_folio_all(fi, &rq->bio) { + DBG_BUGON(folio_test_uptodate(fi.folio)); + erofs_onlinefolio_end(fi.folio, ret); + } } bio_uninit(&rq->bio); kfree(rq); @@ -68,6 +71,17 @@ static struct erofs_fileio_rq *erofs_fileio_rq_alloc(struct erofs_map_dev *mdev) return rq; } +struct bio *erofs_fileio_bio_alloc(struct erofs_map_dev *mdev) +{ + return &erofs_fileio_rq_alloc(mdev)->bio; +} + +void erofs_fileio_submit_bio(struct bio *bio) +{ + return erofs_fileio_rq_submit(container_of(bio, struct erofs_fileio_rq, + bio)); +} + static int erofs_fileio_scan_folio(struct erofs_fileio *io, struct folio *folio) { struct inode *inode = folio_inode(folio); diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c index d9a48b1244e5..67cd8a59c00c 100644 --- a/fs/erofs/inode.c +++ b/fs/erofs/inode.c @@ -251,12 +251,6 @@ static int erofs_fill_inode(struct inode *inode) mapping_set_large_folios(inode->i_mapping); if (erofs_inode_is_data_compressed(vi->datalayout)) { #ifdef CONFIG_EROFS_FS_ZIP -#ifdef CONFIG_EROFS_FS_BACKED_BY_FILE - if (erofs_is_fileio_mode(EROFS_SB(inode->i_sb))) { - err = -EOPNOTSUPP; - goto out_unlock; - } -#endif DO_ONCE_LITE_IF(inode->i_blkbits != PAGE_SHIFT, erofs_info, inode->i_sb, "EXPERIMENTAL EROFS subpage compressed block support in use. Use at your own risk!"); diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 1e0103551a46..ea1f9afe88db 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -490,6 +490,14 @@ static inline void z_erofs_exit_subsystem(void) {} static inline int erofs_init_managed_cache(struct super_block *sb) { return 0; } #endif /* !CONFIG_EROFS_FS_ZIP */ +#ifdef CONFIG_EROFS_FS_BACKED_BY_FILE +struct bio *erofs_fileio_bio_alloc(struct erofs_map_dev *mdev); +void erofs_fileio_submit_bio(struct bio *bio); +#else +static inline struct bio *erofs_fileio_bio_alloc(struct erofs_map_dev *mdev) { return NULL; } +static inline void erofs_fileio_submit_bio(struct bio *bio) {} +#endif + #ifdef CONFIG_EROFS_FS_ONDEMAND int erofs_fscache_register_fs(struct super_block *sb); void erofs_fscache_unregister_fs(struct super_block *sb); diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 3371dcb549dc..dca11ab0ab75 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -1618,10 +1618,12 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, if (bio && (cur != last_pa || bio->bi_bdev != mdev.m_bdev)) { drain_io: - if (!erofs_is_fscache_mode(sb)) - submit_bio(bio); - else + if (erofs_is_fileio_mode(EROFS_SB(sb))) + erofs_fileio_submit_bio(bio); + else if (erofs_is_fscache_mode(sb)) erofs_fscache_submit_bio(bio); + else + submit_bio(bio); if (memstall) { psi_memstall_leave(&pflags); @@ -1646,10 +1648,13 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, } if (!bio) { - bio = erofs_is_fscache_mode(sb) ? - erofs_fscache_bio_alloc(&mdev) : - bio_alloc(mdev.m_bdev, BIO_MAX_VECS, - REQ_OP_READ, GFP_NOIO); + if (erofs_is_fileio_mode(EROFS_SB(sb))) + bio = erofs_fileio_bio_alloc(&mdev); + else if (erofs_is_fscache_mode(sb)) + bio = erofs_fscache_bio_alloc(&mdev); + else + bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS, + REQ_OP_READ, GFP_NOIO); bio->bi_end_io = z_erofs_endio; bio->bi_iter.bi_sector = cur >> 9; bio->bi_private = q[JQ_SUBMIT]; @@ -1672,10 +1677,12 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, } while (owned_head != Z_EROFS_PCLUSTER_TAIL); if (bio) { - if (!erofs_is_fscache_mode(sb)) - submit_bio(bio); - else + if (erofs_is_fileio_mode(EROFS_SB(sb))) + erofs_fileio_submit_bio(bio); + else if (erofs_is_fscache_mode(sb)) erofs_fscache_submit_bio(bio); + else + submit_bio(bio); if (memstall) psi_memstall_leave(&pflags); } -- Gitee From ca74ff4d952311acac7506491cd72aa2aa7845b5 Mon Sep 17 00:00:00 2001 From: Hongzhen Luo Date: Tue, 15 Oct 2024 18:38:36 +0800 Subject: [PATCH 1956/2138] erofs: fix blksize < PAGE_SIZE for file-backed mounts ANBZ: #11101 commit bae0854160939a64a092516ff1b2f221402b843b upstream. Adjust sb->s_blocksize{,_bits} directly for file-backed mounts when the fs block size is smaller than PAGE_SIZE. Previously, EROFS used sb_set_blocksize(), which caused a panic if bdev-backed mounts is not used. Fixes: fb176750266a ("erofs: add file-backed mount support") Signed-off-by: Hongzhen Luo Link: https://lore.kernel.org/r/20241015103836.3757438-1-hongzhen@linux.alibaba.com Signed-off-by: Gao Xiang Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4224 --- fs/erofs/super.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 780217286ac2..22f342b335f7 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -631,7 +631,11 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) errorfc(fc, "unsupported blksize for fscache mode"); return -EINVAL; } - if (!sb_set_blocksize(sb, 1 << sbi->blkszbits)) { + + if (erofs_is_fileio_mode(sbi)) { + sb->s_blocksize = 1 << sbi->blkszbits; + sb->s_blocksize_bits = sbi->blkszbits; + } else if (!sb_set_blocksize(sb, 1 << sbi->blkszbits)) { errorfc(fc, "failed to set erofs blksize"); return -EINVAL; } -- Gitee From 7b447d6c605905c82f9f8dc37e4014766413f300 Mon Sep 17 00:00:00 2001 From: Yiyang Wu Date: Mon, 2 Sep 2024 16:31:46 +0800 Subject: [PATCH 1957/2138] erofs: use kmemdup_nul in erofs_fill_symlink ANBZ: #11101 commit b1bbb9a637a329873e14b596b7e6fa2fd44b87b4 upstream. Remove open coding in erofs_fill_symlink. Suggested-by: Al Viro Link: https://lore.kernel.org/all/20240425222847.GN2118490@ZenIV Signed-off-by: Yiyang Wu Link: https://lore.kernel.org/r/20240902083147.450558-2-toolmanp@tlmp.cc Reviewed-by: Gao Xiang Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4224 --- fs/erofs/inode.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c index 67cd8a59c00c..7d6cad63f931 100644 --- a/fs/erofs/inode.c +++ b/fs/erofs/inode.c @@ -178,7 +178,6 @@ static int erofs_fill_symlink(struct inode *inode, void *kaddr, { struct erofs_inode *vi = EROFS_I(inode); loff_t off; - char *lnk; m_pofs += vi->xattr_isize; /* check if it cannot be handled with fast symlink scheme */ @@ -189,14 +188,9 @@ static int erofs_fill_symlink(struct inode *inode, void *kaddr, return 0; } - lnk = kmalloc(inode->i_size + 1, GFP_KERNEL); - if (!lnk) + inode->i_link = kmemdup_nul(kaddr + m_pofs, inode->i_size, GFP_KERNEL); + if (!inode->i_link) return -ENOMEM; - - memcpy(lnk, kaddr + m_pofs, inode->i_size); - lnk[inode->i_size] = '\0'; - - inode->i_link = lnk; inode->i_op = &erofs_fast_symlink_iops; return 0; } -- Gitee From 9b54399675f2cbceff5834f336a07a2963361b87 Mon Sep 17 00:00:00 2001 From: Yiyang Wu Date: Mon, 2 Sep 2024 17:34:12 +0800 Subject: [PATCH 1958/2138] erofs: refactor read_inode calling convention ANBZ: #11101 commit 53d514b970106976fd64f593ea13b55ebf26b3ff upstream. Refactor out the iop binding behavior out of the erofs_fill_symlink and move erofs_buf into the erofs_read_inode, so that erofs_fill_inode can only deal with inode operation bindings and can be decoupled from metabuf operations. This results in better calling conventions. Note that after this patch, we do not need erofs_buf and ofs as parameters any more when calling erofs_read_inode as all the data operations are now included in itself. Suggested-by: Al Viro Link: https://lore.kernel.org/all/20240425222847.GN2118490@ZenIV/ Signed-off-by: Yiyang Wu Reviewed-by: Gao Xiang Reviewed-by: Chao Yu Link: https://lore.kernel.org/r/20240902093412.509083-1-toolmanp@tlmp.cc Signed-off-by: Gao Xiang Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4224 --- fs/erofs/inode.c | 111 ++++++++++++++++++++++------------------------- 1 file changed, 52 insertions(+), 59 deletions(-) diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c index 7d6cad63f931..f4a9c3b300a3 100644 --- a/fs/erofs/inode.c +++ b/fs/erofs/inode.c @@ -8,8 +8,24 @@ #include -static void *erofs_read_inode(struct erofs_buf *buf, - struct inode *inode, unsigned int *ofs) +static int erofs_fill_symlink(struct inode *inode, void *kaddr, + unsigned int m_pofs) +{ + struct erofs_inode *vi = EROFS_I(inode); + loff_t off; + + m_pofs += vi->xattr_isize; + /* check if it cannot be handled with fast symlink scheme */ + if (vi->datalayout != EROFS_INODE_FLAT_INLINE || inode->i_size < 0 || + check_add_overflow(m_pofs, inode->i_size, &off) || + off > i_blocksize(inode)) + return 0; + + inode->i_link = kmemdup_nul(kaddr + m_pofs, inode->i_size, GFP_KERNEL); + return inode->i_link ? 0 : -ENOMEM; +} + +static int erofs_read_inode(struct inode *inode) { struct super_block *sb = inode->i_sb; struct erofs_sb_info *sbi = EROFS_SB(sb); @@ -20,20 +36,21 @@ static void *erofs_read_inode(struct erofs_buf *buf, struct erofs_inode_compact *dic; struct erofs_inode_extended *die, *copied = NULL; union erofs_inode_i_u iu; - unsigned int ifmt; - int err; + struct erofs_buf buf = __EROFS_BUF_INITIALIZER; + unsigned int ifmt, ofs; + int err = 0; blkaddr = erofs_blknr(sb, inode_loc); - *ofs = erofs_blkoff(sb, inode_loc); + ofs = erofs_blkoff(sb, inode_loc); - kaddr = erofs_read_metabuf(buf, sb, erofs_pos(sb, blkaddr), EROFS_KMAP); + kaddr = erofs_read_metabuf(&buf, sb, erofs_pos(sb, blkaddr), EROFS_KMAP); if (IS_ERR(kaddr)) { erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld", vi->nid, PTR_ERR(kaddr)); - return kaddr; + return PTR_ERR(kaddr); } - dic = kaddr + *ofs; + dic = kaddr + ofs; ifmt = le16_to_cpu(dic->i_format); if (ifmt & ~EROFS_I_ALL) { erofs_err(sb, "unsupported i_format %u of nid %llu", @@ -54,11 +71,11 @@ static void *erofs_read_inode(struct erofs_buf *buf, case EROFS_INODE_LAYOUT_EXTENDED: vi->inode_isize = sizeof(struct erofs_inode_extended); /* check if the extended inode acrosses block boundary */ - if (*ofs + vi->inode_isize <= sb->s_blocksize) { - *ofs += vi->inode_isize; + if (ofs + vi->inode_isize <= sb->s_blocksize) { + ofs += vi->inode_isize; die = (struct erofs_inode_extended *)dic; } else { - const unsigned int gotten = sb->s_blocksize - *ofs; + const unsigned int gotten = sb->s_blocksize - ofs; copied = kmalloc(vi->inode_isize, GFP_KERNEL); if (!copied) { @@ -66,16 +83,16 @@ static void *erofs_read_inode(struct erofs_buf *buf, goto err_out; } memcpy(copied, dic, gotten); - kaddr = erofs_read_metabuf(buf, sb, erofs_pos(sb, blkaddr + 1), + kaddr = erofs_read_metabuf(&buf, sb, erofs_pos(sb, blkaddr + 1), EROFS_KMAP); if (IS_ERR(kaddr)) { erofs_err(sb, "failed to get inode payload block (nid: %llu), err %ld", vi->nid, PTR_ERR(kaddr)); kfree(copied); - return kaddr; + return PTR_ERR(kaddr); } - *ofs = vi->inode_isize - gotten; - memcpy((u8 *)copied + gotten, kaddr, *ofs); + ofs = vi->inode_isize - gotten; + memcpy((u8 *)copied + gotten, kaddr, ofs); die = copied; } vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount); @@ -91,11 +108,10 @@ static void *erofs_read_inode(struct erofs_buf *buf, inode->i_size = le64_to_cpu(die->i_size); kfree(copied); - copied = NULL; break; case EROFS_INODE_LAYOUT_COMPACT: vi->inode_isize = sizeof(struct erofs_inode_compact); - *ofs += vi->inode_isize; + ofs += vi->inode_isize; vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount); inode->i_mode = le16_to_cpu(dic->i_mode); @@ -120,6 +136,11 @@ static void *erofs_read_inode(struct erofs_buf *buf, case S_IFDIR: case S_IFLNK: vi->raw_blkaddr = le32_to_cpu(iu.raw_blkaddr); + if(S_ISLNK(inode->i_mode)) { + err = erofs_fill_symlink(inode, kaddr, ofs); + if (err) + goto err_out; + } break; case S_IFCHR: case S_IFBLK: @@ -164,51 +185,24 @@ static void *erofs_read_inode(struct erofs_buf *buf, inode->i_blocks = round_up(inode->i_size, sb->s_blocksize) >> 9; else inode->i_blocks = nblks << (sb->s_blocksize_bits - 9); - return kaddr; err_out: - DBG_BUGON(1); - kfree(copied); - erofs_put_metabuf(buf); - return ERR_PTR(err); -} - -static int erofs_fill_symlink(struct inode *inode, void *kaddr, - unsigned int m_pofs) -{ - struct erofs_inode *vi = EROFS_I(inode); - loff_t off; - - m_pofs += vi->xattr_isize; - /* check if it cannot be handled with fast symlink scheme */ - if (vi->datalayout != EROFS_INODE_FLAT_INLINE || inode->i_size < 0 || - check_add_overflow(m_pofs, inode->i_size, &off) || - off > i_blocksize(inode)) { - inode->i_op = &erofs_symlink_iops; - return 0; - } - - inode->i_link = kmemdup_nul(kaddr + m_pofs, inode->i_size, GFP_KERNEL); - if (!inode->i_link) - return -ENOMEM; - inode->i_op = &erofs_fast_symlink_iops; - return 0; + DBG_BUGON(err); + erofs_put_metabuf(&buf); + return err; } static int erofs_fill_inode(struct inode *inode) { struct erofs_inode *vi = EROFS_I(inode); - struct erofs_buf buf = __EROFS_BUF_INITIALIZER; - void *kaddr; - unsigned int ofs; - int err = 0; + int err; trace_erofs_fill_inode(inode); /* read inode base data from disk */ - kaddr = erofs_read_inode(&buf, inode, &ofs); - if (IS_ERR(kaddr)) - return PTR_ERR(kaddr); + err = erofs_read_inode(inode); + if (err) + return err; /* setup the new inode */ switch (inode->i_mode & S_IFMT) { @@ -225,9 +219,10 @@ static int erofs_fill_inode(struct inode *inode) inode_nohighmem(inode); break; case S_IFLNK: - err = erofs_fill_symlink(inode, kaddr, ofs); - if (err) - goto out_unlock; + if (inode->i_link) + inode->i_op = &erofs_fast_symlink_iops; + else + inode->i_op = &erofs_symlink_iops; inode_nohighmem(inode); break; case S_IFCHR: @@ -236,10 +231,9 @@ static int erofs_fill_inode(struct inode *inode) case S_IFSOCK: inode->i_op = &erofs_generic_iops; init_special_inode(inode, inode->i_mode, inode->i_rdev); - goto out_unlock; + return 0; default: - err = -EFSCORRUPTED; - goto out_unlock; + return -EFSCORRUPTED; } mapping_set_large_folios(inode->i_mapping); @@ -263,8 +257,7 @@ static int erofs_fill_inode(struct inode *inode) inode->i_mapping->a_ops = &erofs_fileio_aops; #endif } -out_unlock: - erofs_put_metabuf(&buf); + return err; } -- Gitee From 551c6b87abb1e86df316f57536f707e3d6089c71 Mon Sep 17 00:00:00 2001 From: Hongzhen Luo Date: Thu, 5 Sep 2024 11:03:39 +0800 Subject: [PATCH 1959/2138] erofs: simplify erofs_map_blocks_flatmode() ANBZ: #11101 commit 8bdb6a8393dc32e3ab2cf89081e5b0f95cb7221b upstream. Get rid of redundant variables (nblocks, offset) and a dead branch (!tailendpacking). Signed-off-by: Hongzhen Luo Reviewed-by: Gao Xiang Reviewed-by: Chao Yu Link: https://lore.kernel.org/r/20240905030339.1474396-1-hongzhen@linux.alibaba.com Signed-off-by: Gao Xiang Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4224 --- fs/erofs/data.c | 28 +++++++++------------------- 1 file changed, 9 insertions(+), 19 deletions(-) diff --git a/fs/erofs/data.c b/fs/erofs/data.c index b50eca31bd54..79bdd5abe30d 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -79,38 +79,28 @@ void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb, static int erofs_map_blocks_flatmode(struct inode *inode, struct erofs_map_blocks *map) { - erofs_blk_t nblocks, lastblk; - u64 offset = map->m_la; struct erofs_inode *vi = EROFS_I(inode); struct super_block *sb = inode->i_sb; bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE); + erofs_blk_t lastblk = erofs_iblks(inode) - tailendpacking; - nblocks = erofs_iblks(inode); - lastblk = nblocks - tailendpacking; - - /* there is no hole in flatmode */ - map->m_flags = EROFS_MAP_MAPPED; - if (offset < erofs_pos(sb, lastblk)) { + map->m_flags = EROFS_MAP_MAPPED; /* no hole in flat inodes */ + if (map->m_la < erofs_pos(sb, lastblk)) { map->m_pa = erofs_pos(sb, vi->raw_blkaddr) + map->m_la; - map->m_plen = erofs_pos(sb, lastblk) - offset; - } else if (tailendpacking) { + map->m_plen = erofs_pos(sb, lastblk) - map->m_la; + } else { + DBG_BUGON(!tailendpacking); map->m_pa = erofs_iloc(inode) + vi->inode_isize + - vi->xattr_isize + erofs_blkoff(sb, offset); - map->m_plen = inode->i_size - offset; + vi->xattr_isize + erofs_blkoff(sb, map->m_la); + map->m_plen = inode->i_size - map->m_la; /* inline data should be located in the same meta block */ if (erofs_blkoff(sb, map->m_pa) + map->m_plen > sb->s_blocksize) { - erofs_err(sb, "inline data cross block boundary @ nid %llu", - vi->nid); + erofs_err(sb, "inline data across blocks @ nid %llu", vi->nid); DBG_BUGON(1); return -EFSCORRUPTED; } map->m_flags |= EROFS_MAP_META; - } else { - erofs_err(sb, "internal error @ nid: %llu (size %llu), m_la 0x%llx", - vi->nid, inode->i_size, map->m_la); - DBG_BUGON(1); - return -EIO; } return 0; } -- Gitee From 02208e6bfb71b7610aa09d2b9272a372d2e8b191 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Thu, 5 Sep 2024 16:47:32 +0800 Subject: [PATCH 1960/2138] erofs: sunset unneeded NOFAILs ANBZ: #11101 commit 2349d2fa02db19ebc5e9033ddc3ed09e22c4abb5 upstream. With iterative development, our codebase can now deal with compressed buffer misses properly if both in-place I/O and compressed buffer allocation fail. Note that if readahead fails (with non-uptodate folios), the original request will then fall back to synchronous read, and `.read_folio()` should return appropriate errnos; otherwise -EIO will be passed to user space, which is unexpected. To simplify rarely encountered failure paths, a mimic decompression will be just used. Before that, failure reasons are recorded in compressed_bvecs[] and they also act as placeholders to avoid in-place pages. They will be parsed just before decompression and then pass back to `.read_folio()`. Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240905084732.2684515-1-hsiangkao@linux.alibaba.com Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4224 --- fs/erofs/zdata.c | 57 ++++++++++++++++++++++++++---------------------- 1 file changed, 31 insertions(+), 26 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index dca11ab0ab75..b720dba14edd 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -1154,9 +1154,10 @@ static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be, struct z_erofs_bvec *bvec = &pcl->compressed_bvecs[i]; struct page *page = bvec->page; - /* compressed data ought to be valid before decompressing */ - if (!page) { - err = -EIO; + /* compressed data ought to be valid when decompressing */ + if (IS_ERR(page) || !page) { + bvec->page = NULL; /* clear the failure reason */ + err = page ? PTR_ERR(page) : -EIO; continue; } be->compressed_pages[i] = page; @@ -1232,8 +1233,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, .inplace_io = overlapped, .partial_decoding = pcl->partial, .fillgaps = pcl->multibases, - .gfp = pcl->besteffort ? - GFP_KERNEL | __GFP_NOFAIL : + .gfp = pcl->besteffort ? GFP_KERNEL : GFP_NOWAIT | __GFP_NORETRY }, be->pagepool); @@ -1297,8 +1297,8 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, return err; } -static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io, - struct page **pagepool) +static int z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io, + struct page **pagepool) { struct z_erofs_decompress_backend be = { .sb = io->sb, @@ -1307,6 +1307,7 @@ static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io, LIST_HEAD_INIT(be.decompressed_secondary_bvecs), }; z_erofs_next_pcluster_t owned = io->head; + int err = io->eio ? -EIO : 0; while (owned != Z_EROFS_PCLUSTER_TAIL) { DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL); @@ -1314,12 +1315,13 @@ static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io, be.pcl = container_of(owned, struct z_erofs_pcluster, next); owned = READ_ONCE(be.pcl->next); - z_erofs_decompress_pcluster(&be, io->eio ? -EIO : 0); + err = z_erofs_decompress_pcluster(&be, err) ?: err; if (z_erofs_is_inline_pcluster(be.pcl)) z_erofs_free_pcluster(be.pcl); else erofs_workgroup_put(&be.pcl->obj); } + return err; } static void z_erofs_decompressqueue_work(struct work_struct *work) @@ -1462,17 +1464,21 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, folio_unlock(folio); folio_put(folio); out_allocfolio: - page = erofs_allocpage(&f->pagepool, gfp | __GFP_NOFAIL); + page = erofs_allocpage(&f->pagepool, gfp); spin_lock(&pcl->obj.lockref.lock); if (unlikely(pcl->compressed_bvecs[nr].page != zbv.page)) { - erofs_pagepool_add(&f->pagepool, page); + if (page) + erofs_pagepool_add(&f->pagepool, page); spin_unlock(&pcl->obj.lockref.lock); cond_resched(); goto repeat; } - bvec->bv_page = pcl->compressed_bvecs[nr].page = page; - folio = page_folio(page); + pcl->compressed_bvecs[nr].page = page ? page : ERR_PTR(-ENOMEM); spin_unlock(&pcl->obj.lockref.lock); + bvec->bv_page = page; + if (!page) + return; + folio = page_folio(page); out_tocache: if (!tocache || bs != PAGE_SIZE || filemap_add_folio(mc, folio, pcl->obj.index + nr, gfp)) { @@ -1698,26 +1704,28 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, z_erofs_decompress_kickoff(q[JQ_SUBMIT], nr_bios); } -static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f, - bool force_fg, bool ra) +static int z_erofs_runqueue(struct z_erofs_decompress_frontend *f, + unsigned int ra_folios) { struct z_erofs_decompressqueue io[NR_JOBQUEUES]; + struct erofs_sb_info *sbi = EROFS_I_SB(f->inode); + bool force_fg = z_erofs_is_sync_decompress(sbi, ra_folios); + int err; if (f->owned_head == Z_EROFS_PCLUSTER_TAIL) - return; - z_erofs_submit_queue(f, io, &force_fg, ra); + return 0; + z_erofs_submit_queue(f, io, &force_fg, !!ra_folios); /* handle bypass queue (no i/o pclusters) immediately */ - z_erofs_decompress_queue(&io[JQ_BYPASS], &f->pagepool); - + err = z_erofs_decompress_queue(&io[JQ_BYPASS], &f->pagepool); if (!force_fg) - return; + return err; /* wait until all bios are completed */ wait_for_completion_io(&io[JQ_SUBMIT].u.done); /* handle synchronous decompress queue in the caller context */ - z_erofs_decompress_queue(&io[JQ_SUBMIT], &f->pagepool); + return z_erofs_decompress_queue(&io[JQ_SUBMIT], &f->pagepool) ?: err; } /* @@ -1779,7 +1787,6 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f, static int z_erofs_read_folio(struct file *file, struct folio *folio) { struct inode *const inode = folio->mapping->host; - struct erofs_sb_info *const sbi = EROFS_I_SB(inode); struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); int err; @@ -1791,9 +1798,8 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio) z_erofs_pcluster_readmore(&f, NULL, false); z_erofs_pcluster_end(&f); - /* if some compressed cluster ready, need submit them anyway */ - z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, 0), false); - + /* if some pclusters are ready, need submit them anyway */ + err = z_erofs_runqueue(&f, 0) ?: err; if (err && err != -EINTR) erofs_err(inode->i_sb, "read error %d @ %lu of nid %llu", err, folio->index, EROFS_I(inode)->nid); @@ -1806,7 +1812,6 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio) static void z_erofs_readahead(struct readahead_control *rac) { struct inode *const inode = rac->mapping->host; - struct erofs_sb_info *const sbi = EROFS_I_SB(inode); struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); struct folio *head = NULL, *folio; unsigned int nr_folios; @@ -1836,7 +1841,7 @@ static void z_erofs_readahead(struct readahead_control *rac) z_erofs_pcluster_readmore(&f, rac, false); z_erofs_pcluster_end(&f); - z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, nr_folios), true); + (void)z_erofs_runqueue(&f, nr_folios); erofs_put_metabuf(&f.map.buf); erofs_release_pages(&f.pagepool); } -- Gitee From eb422efec67f760ddb757df28bc3282db88729a3 Mon Sep 17 00:00:00 2001 From: Chunhai Guo Date: Fri, 6 Sep 2024 06:11:10 -0600 Subject: [PATCH 1961/2138] erofs: allocate more short-lived pages from reserved pool first ANBZ: #11101 commit 79f504a2cd3c0b7d953d0015618a2a41559a2cfd upstream. This patch aims to allocate bvpages and short-lived compressed pages from the reserved pool first. After applying this patch, there are three benefits. 1. It reduces the page allocation time. The bvpages and short-lived compressed pages account for about 4% of the pages allocated from the system in the multi-app launch benchmarks [1]. It reduces the page allocation time accordingly and lowers the likelihood of blockage by page allocation in low memory scenarios. 2. The pages in the reserved pool will be allocated on demand. Currently, bvpages and short-lived compressed pages are short-lived pages allocated from the system, and the pages in the reserved pool all originate from short-lived pages. Consequently, the number of reserved pool pages will increase to z_erofs_rsv_nrpages over time. With this patch, all short-lived pages are allocated from the reserved pool first, so the number of reserved pool pages will only increase when there are not enough pages. Thus, even if z_erofs_rsv_nrpages is set to a large number for specific reasons, the actual number of reserved pool pages may remain low as per demand. In the multi-app launch benchmarks [1], z_erofs_rsv_nrpages is set at 256, while the number of reserved pool pages remains below 64. 3. When erofs cache decompression is disabled (EROFS_ZIP_CACHE_DISABLED), all pages will *only* be allocated from the reserved pool for erofs. This will significantly reduce the memory pressure from erofs. [1] For additional details on the multi-app launch benchmarks, please refer to commit 0f6273ab4637 ("erofs: add a reserved buffer pool for lz4 decompression"). Signed-off-by: Chunhai Guo Reviewed-by: Gao Xiang Reviewed-by: Chao Yu Link: https://lore.kernel.org/r/20240906121110.3701889-1-guochunhai@vivo.com Signed-off-by: Gao Xiang Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4224 --- fs/erofs/zdata.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index b720dba14edd..8936790618c6 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -196,7 +196,8 @@ static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter, struct page *nextpage = *candidate_bvpage; if (!nextpage) { - nextpage = erofs_allocpage(pagepool, GFP_KERNEL); + nextpage = __erofs_allocpage(pagepool, GFP_KERNEL, + true); if (!nextpage) return -ENOMEM; set_page_private(nextpage, Z_EROFS_SHORTLIVED_PAGE); @@ -1464,7 +1465,7 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, folio_unlock(folio); folio_put(folio); out_allocfolio: - page = erofs_allocpage(&f->pagepool, gfp); + page = __erofs_allocpage(&f->pagepool, gfp, true); spin_lock(&pcl->obj.lockref.lock); if (unlikely(pcl->compressed_bvecs[nr].page != zbv.page)) { if (page) -- Gitee From 82a978efa84b3f5d323bea5642f020e88ba8bfd6 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Thu, 12 Sep 2024 15:41:56 +0800 Subject: [PATCH 1962/2138] erofs: restrict pcluster size limitations ANBZ: #11101 commit 7c3ca1838a7831855cbf2e6927a10e0e4723edf6 upstream. Error out if {en,de}encoded size of a pcluster is unsupported: Maximum supported encoded size (of a pcluster): 1 MiB Maximum supported decoded size (of a pcluster): 12 MiB Users can still choose to use supported large configurations (e.g., for archival purposes), but there may be performance penalties in low-memory scenarios compared to smaller pclusters. Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240912074156.2925394-1-hsiangkao@linux.alibaba.com Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4224 --- fs/erofs/erofs_fs.h | 5 ++++- fs/erofs/zmap.c | 42 ++++++++++++++++++++---------------------- 2 files changed, 24 insertions(+), 23 deletions(-) diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h index 6c0c270c42e1..c8f2ae845bd2 100644 --- a/fs/erofs/erofs_fs.h +++ b/fs/erofs/erofs_fs.h @@ -288,9 +288,12 @@ struct erofs_dirent { #define EROFS_NAME_LEN 255 -/* maximum supported size of a physical compression cluster */ +/* maximum supported encoded size of a physical compressed cluster */ #define Z_EROFS_PCLUSTER_MAX_SIZE (1024 * 1024) +/* maximum supported decoded size of a physical compressed cluster */ +#define Z_EROFS_PCLUSTER_MAX_DSIZE (12 * 1024 * 1024) + /* available compression algorithm types (for h_algorithmtype) */ enum { Z_EROFS_COMPRESSION_LZ4 = 0, diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c index 8d28cfc6a4b8..4704241341ef 100644 --- a/fs/erofs/zmap.c +++ b/fs/erofs/zmap.c @@ -688,32 +688,30 @@ int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map, int err = 0; trace_erofs_map_blocks_enter(inode, map, flags); - - /* when trying to read beyond EOF, leave it unmapped */ - if (map->m_la >= inode->i_size) { + if (map->m_la >= inode->i_size) { /* post-EOF unmapped extent */ map->m_llen = map->m_la + 1 - inode->i_size; map->m_la = inode->i_size; map->m_flags = 0; - goto out; - } - - err = z_erofs_fill_inode_lazy(inode); - if (err) - goto out; - - if ((vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER) && - !vi->z_tailextent_headlcn) { - map->m_la = 0; - map->m_llen = inode->i_size; - map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_FULL_MAPPED | - EROFS_MAP_FRAGMENT; - goto out; + } else { + err = z_erofs_fill_inode_lazy(inode); + if (!err) { + if ((vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER) && + !vi->z_tailextent_headlcn) { + map->m_la = 0; + map->m_llen = inode->i_size; + map->m_flags = EROFS_MAP_MAPPED | + EROFS_MAP_FULL_MAPPED | EROFS_MAP_FRAGMENT; + } else { + err = z_erofs_do_map_blocks(inode, map, flags); + } + } + if (!err && (map->m_flags & EROFS_MAP_ENCODED) && + unlikely(map->m_plen > Z_EROFS_PCLUSTER_MAX_SIZE || + map->m_llen > Z_EROFS_PCLUSTER_MAX_DSIZE)) + err = -EOPNOTSUPP; + if (err) + map->m_llen = 0; } - - err = z_erofs_do_map_blocks(inode, map, flags); -out: - if (err) - map->m_llen = 0; trace_erofs_map_blocks_exit(inode, map, flags, err); return err; } -- Gitee From 46fd5cc7c9b9da25ded0040a89327e157d8ed319 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Thu, 12 Sep 2024 16:35:38 +0800 Subject: [PATCH 1963/2138] erofs: reject inodes with negative i_size ANBZ: #11101 commit 025497e1d176a9e063d1e60699527e2f3a871935 upstream. Negative i_size is never supported, although crafted images with inodes having negative i_size will NOT lead to security issues in our current codebase: The following image can verify this (gzip+base64 encoded): H4sICCmk4mYAA3Rlc3QuaW1nAGNgGAWjYBSMVPDo4dcH3jP2aTED2TwMKgxMUHHNJY/SQDQX LxcDIw3tZwXit44MDNpQ/n8gQJZ/vxjijosPuSyZ0DUDgQqcZoKzVYFsDShbHeh6PT29ktTi Eqz2g/y2pBFiLxDMh4lhs5+W4TAKRsEoGAWjYBSMglEwCkYBPQAAS2DbowAQAAA= Mark as bad inodes for such corrupted inodes explicitly. Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240912083538.3011860-1-hsiangkao@linux.alibaba.com Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4224 --- fs/erofs/inode.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c index f4a9c3b300a3..a0e1b6ad8c50 100644 --- a/fs/erofs/inode.c +++ b/fs/erofs/inode.c @@ -5,7 +5,6 @@ * Copyright (C) 2021, Alibaba Cloud */ #include "xattr.h" - #include static int erofs_fill_symlink(struct inode *inode, void *kaddr, @@ -16,7 +15,7 @@ static int erofs_fill_symlink(struct inode *inode, void *kaddr, m_pofs += vi->xattr_isize; /* check if it cannot be handled with fast symlink scheme */ - if (vi->datalayout != EROFS_INODE_FLAT_INLINE || inode->i_size < 0 || + if (vi->datalayout != EROFS_INODE_FLAT_INLINE || check_add_overflow(m_pofs, inode->i_size, &off) || off > i_blocksize(inode)) return 0; @@ -131,6 +130,11 @@ static int erofs_read_inode(struct inode *inode) goto err_out; } + if (unlikely(inode->i_size < 0)) { + erofs_err(sb, "negative i_size @ nid %llu", vi->nid); + err = -EFSCORRUPTED; + goto err_out; + } switch (inode->i_mode & S_IFMT) { case S_IFREG: case S_IFDIR: @@ -185,7 +189,6 @@ static int erofs_read_inode(struct inode *inode) inode->i_blocks = round_up(inode->i_size, sb->s_blocksize) >> 9; else inode->i_blocks = nblks << (sb->s_blocksize_bits - 9); - err_out: DBG_BUGON(err); erofs_put_metabuf(&buf); -- Gitee From 1a7c63aed498194aece0403d82a87884e2c2c7da Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Tue, 17 Sep 2024 21:08:03 +0800 Subject: [PATCH 1964/2138] erofs: ensure regular inodes for file-backed mounts ANBZ: #11101 commit 416a8b2c02fe2a5a9fbdf2a35ea294b78d939f84 upstream. Only regular inodes are allowed for file-backed mounts, not directories (as seen in the original syzbot case) or special inodes. Also ensure that .read_folio() is implemented on the underlying fs for the primary device. Fixes: fb176750266a ("erofs: add file-backed mount support") Reported-by: syzbot+001306cd9c92ce0df23f@syzkaller.appspotmail.com Closes: https://lore.kernel.org/r/00000000000011bdde0622498ee3@google.com Tested-by: syzbot+001306cd9c92ce0df23f@syzkaller.appspotmail.com Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20240917130803.32418-1-hsiangkao@linux.alibaba.com Conflicts: 1. fs/erofs/super.c 1) erofs_init_device() Resolution: 1. fs/erofs/super.c 1) erofs_init_device(): Reimplement according to the intent of the upstream patch. Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4224 --- fs/erofs/super.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 22f342b335f7..1be0bd8f3c09 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -189,6 +189,10 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb, file = filp_open(dif->path, O_RDONLY | O_LARGEFILE, 0); if (IS_ERR(file)) return PTR_ERR(file); + if (!S_ISREG(file_inode(file)->i_mode)) { + fput(file); + return -EINVAL; + } dif->file = file; } else { bdev_handle = bdev_open_by_path(dif->path, BLK_OPEN_READ, @@ -722,7 +726,10 @@ static int erofs_fc_get_tree(struct fs_context *fc) if (IS_ERR(sbi->fdev)) return PTR_ERR(sbi->fdev); - return get_tree_nodev(fc, erofs_fc_fill_super); + if (S_ISREG(file_inode(sbi->fdev)->i_mode) && + sbi->fdev->f_mapping->a_ops->read_folio) + return get_tree_nodev(fc, erofs_fc_fill_super); + fput(sbi->fdev); } #endif return ret; -- Gitee From e4975563932ddce8c27397d8cc9e5da684c3623d Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Thu, 10 Oct 2024 17:04:19 +0800 Subject: [PATCH 1965/2138] erofs: get rid of z_erofs_try_to_claim_pcluster() ANBZ: #11101 commit 2402082e5332a2d27be82b4a2bb42490f9c5134b upstream. Just fold it into the caller for simplicity. Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20241010090420.405871-1-hsiangkao@linux.alibaba.com Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4224 --- fs/erofs/zdata.c | 29 +++++++++-------------------- 1 file changed, 9 insertions(+), 20 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 8936790618c6..a569ff9dfd04 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -710,24 +710,6 @@ static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe, return ret; } -static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f) -{ - struct z_erofs_pcluster *pcl = f->pcl; - z_erofs_next_pcluster_t *owned_head = &f->owned_head; - - /* type 1, nil pcluster (this pcluster doesn't belong to any chain.) */ - if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL, - *owned_head) == Z_EROFS_PCLUSTER_NIL) { - *owned_head = &pcl->next; - /* so we can attach this pcluster to our submission chain. */ - f->mode = Z_EROFS_PCLUSTER_FOLLOWED; - return; - } - - /* type 2, it belongs to an ongoing chain */ - f->mode = Z_EROFS_PCLUSTER_INFLIGHT; -} - static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe) { struct erofs_map_blocks *map = &fe->map; @@ -803,7 +785,6 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe) int ret; DBG_BUGON(fe->pcl); - /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */ DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL); @@ -823,7 +804,15 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe) if (ret == -EEXIST) { mutex_lock(&fe->pcl->lock); - z_erofs_try_to_claim_pcluster(fe); + /* check if this pcluster hasn't been linked into any chain. */ + if (cmpxchg(&fe->pcl->next, Z_EROFS_PCLUSTER_NIL, + fe->owned_head) == Z_EROFS_PCLUSTER_NIL) { + /* .. so it can be attached to our submission chain */ + fe->owned_head = &fe->pcl->next; + fe->mode = Z_EROFS_PCLUSTER_FOLLOWED; + } else { /* otherwise, it belongs to an inflight chain */ + fe->mode = Z_EROFS_PCLUSTER_INFLIGHT; + } } else if (ret) { return ret; } -- Gitee From c081310b72cb37cb0a8ea9772728e3533de685ba Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Fri, 11 Oct 2024 07:58:30 +0800 Subject: [PATCH 1966/2138] erofs: get rid of kaddr in `struct z_erofs_maprecorder` ANBZ: #11101 commit ae54567eaa87fd863ab61084a3828e1c36b0ffb0 upstream. `kaddr` becomes useless after switching to metabuf. Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20241010235830.1535616-1-hsiangkao@linux.alibaba.com Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4224 --- fs/erofs/zmap.c | 32 ++++++++++++-------------------- 1 file changed, 12 insertions(+), 20 deletions(-) diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c index 4704241341ef..60414b8028e4 100644 --- a/fs/erofs/zmap.c +++ b/fs/erofs/zmap.c @@ -10,8 +10,6 @@ struct z_erofs_maprecorder { struct inode *inode; struct erofs_map_blocks *map; - void *kaddr; - unsigned long lcn; /* compression extent information gathered */ u8 type, headtype; @@ -33,14 +31,11 @@ static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m, struct z_erofs_lcluster_index *di; unsigned int advise; - m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb, - pos, EROFS_KMAP); - if (IS_ERR(m->kaddr)) - return PTR_ERR(m->kaddr); - - m->nextpackoff = pos + sizeof(struct z_erofs_lcluster_index); + di = erofs_read_metabuf(&m->map->buf, inode->i_sb, pos, EROFS_KMAP); + if (IS_ERR(di)) + return PTR_ERR(di); m->lcn = lcn; - di = m->kaddr; + m->nextpackoff = pos + sizeof(struct z_erofs_lcluster_index); advise = le16_to_cpu(di->di_advise); m->type = advise & Z_EROFS_LI_LCLUSTER_TYPE_MASK; @@ -53,8 +48,7 @@ static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m, DBG_BUGON(1); return -EFSCORRUPTED; } - m->compressedblks = m->delta[0] & - ~Z_EROFS_LI_D0_CBLKCNT; + m->compressedblks = m->delta[0] & ~Z_EROFS_LI_D0_CBLKCNT; m->delta[0] = 1; } m->delta[1] = le16_to_cpu(di->di_u.delta[1]); @@ -110,9 +104,9 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, struct erofs_inode *const vi = EROFS_I(m->inode); const unsigned int lclusterbits = vi->z_logical_clusterbits; unsigned int vcnt, lo, lobits, encodebits, nblk, bytes; - int i; - u8 *in, type; bool big_pcluster; + u8 *in, type; + int i; if (1 << amortizedshift == 4 && lclusterbits <= 14) vcnt = 2; @@ -121,6 +115,10 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, else return -EOPNOTSUPP; + in = erofs_read_metabuf(&m->map->buf, m->inode->i_sb, pos, EROFS_KMAP); + if (IS_ERR(in)) + return PTR_ERR(in); + /* it doesn't equal to round_up(..) */ m->nextpackoff = round_down(pos, vcnt << amortizedshift) + (vcnt << amortizedshift); @@ -128,9 +126,7 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, lobits = max(lclusterbits, ilog2(Z_EROFS_LI_D0_CBLKCNT) + 1U); encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt; bytes = pos & ((vcnt << amortizedshift) - 1); - - in = m->kaddr - bytes; - + in -= bytes; i = bytes >> amortizedshift; lo = decode_compactedbits(lobits, in, encodebits * i, &type); @@ -255,10 +251,6 @@ static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m, amortizedshift = 2; out: pos += lcn * (1 << amortizedshift); - m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb, - pos, EROFS_KMAP); - if (IS_ERR(m->kaddr)) - return PTR_ERR(m->kaddr); return unpack_compacted_index(m, amortizedshift, pos, lookahead); } -- Gitee From 1c2c26608c79a5c3a6d617303d2c22694452a0f7 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Wed, 9 Oct 2024 11:31:50 +0800 Subject: [PATCH 1967/2138] fs/super.c: introduce get_tree_bdev_flags() ANBZ: #11101 commit 4021e685139d567b3fc862f54101ae9dbb15d8b5 upstream. As Allison reported [1], currently get_tree_bdev() will store "Can't lookup blockdev" error message. Although it makes sense for pure bdev-based fses, this message may mislead users who try to use EROFS file-backed mounts since get_tree_nodev() is used as a fallback then. Add get_tree_bdev_flags() to specify extensible flags [2] and GET_TREE_BDEV_QUIET_LOOKUP to silence "Can't lookup blockdev" message since it's misleading to EROFS file-backed mounts now. [1] https://lore.kernel.org/r/CAOYeF9VQ8jKVmpy5Zy9DNhO6xmWSKMB-DO8yvBB0XvBE7=3Ugg@mail.gmail.com [2] https://lore.kernel.org/r/ZwUkJEtwIpUA4qMz@infradead.org Suggested-by: Christoph Hellwig Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20241009033151.2334888-1-hsiangkao@linux.alibaba.com Reviewed-by: Christoph Hellwig Reviewed-by: Jan Kara Signed-off-by: Christian Brauner Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4224 --- fs/super.c | 26 ++++++++++++++++++++------ include/linux/fs_context.h | 6 ++++++ 2 files changed, 26 insertions(+), 6 deletions(-) diff --git a/fs/super.c b/fs/super.c index b142e71eb8df..f2cea05a0052 100644 --- a/fs/super.c +++ b/fs/super.c @@ -1544,13 +1544,14 @@ int setup_bdev_super(struct super_block *sb, int sb_flags, EXPORT_SYMBOL_GPL(setup_bdev_super); /** - * get_tree_bdev - Get a superblock based on a single block device + * get_tree_bdev_flags - Get a superblock based on a single block device * @fc: The filesystem context holding the parameters * @fill_super: Helper to initialise a new superblock + * @flags: GET_TREE_BDEV_* flags */ -int get_tree_bdev(struct fs_context *fc, - int (*fill_super)(struct super_block *, - struct fs_context *)) +int get_tree_bdev_flags(struct fs_context *fc, + int (*fill_super)(struct super_block *sb, + struct fs_context *fc), unsigned int flags) { struct super_block *s; int error = 0; @@ -1561,10 +1562,10 @@ int get_tree_bdev(struct fs_context *fc, error = lookup_bdev(fc->source, &dev); if (error) { - errorf(fc, "%s: Can't lookup blockdev", fc->source); + if (!(flags & GET_TREE_BDEV_QUIET_LOOKUP)) + errorf(fc, "%s: Can't lookup blockdev", fc->source); return error; } - fc->sb_flags |= SB_NOSEC; s = sget_dev(fc, dev); if (IS_ERR(s)) @@ -1600,6 +1601,19 @@ int get_tree_bdev(struct fs_context *fc, fc->root = dget(s->s_root); return 0; } +EXPORT_SYMBOL_GPL(get_tree_bdev_flags); + +/** + * get_tree_bdev - Get a superblock based on a single block device + * @fc: The filesystem context holding the parameters + * @fill_super: Helper to initialise a new superblock + */ +int get_tree_bdev(struct fs_context *fc, + int (*fill_super)(struct super_block *, + struct fs_context *)) +{ + return get_tree_bdev_flags(fc, fill_super, 0); +} EXPORT_SYMBOL(get_tree_bdev); static int test_bdev_super(struct super_block *s, void *data) diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h index c13e99cbbf81..4b4bfef6f053 100644 --- a/include/linux/fs_context.h +++ b/include/linux/fs_context.h @@ -160,6 +160,12 @@ extern int get_tree_keyed(struct fs_context *fc, int setup_bdev_super(struct super_block *sb, int sb_flags, struct fs_context *fc); + +#define GET_TREE_BDEV_QUIET_LOOKUP 0x0001 +int get_tree_bdev_flags(struct fs_context *fc, + int (*fill_super)(struct super_block *sb, + struct fs_context *fc), unsigned int flags); + extern int get_tree_bdev(struct fs_context *fc, int (*fill_super)(struct super_block *sb, struct fs_context *fc)); -- Gitee From 96670680f839303cdebc074278a4226cee7d42ec Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Wed, 9 Oct 2024 11:31:51 +0800 Subject: [PATCH 1968/2138] erofs: use get_tree_bdev_flags() to avoid misleading messages ANBZ: #11101 commit 14c2d97265ea5989000c428dbb7321cbd4a85f9b upstream. Users can pass in an arbitrary source path for the proper type of a mount then without "Can't lookup blockdev" error message. Reported-by: Allison Karlitskaya Closes: https://lore.kernel.org/r/CAOYeF9VQ8jKVmpy5Zy9DNhO6xmWSKMB-DO8yvBB0XvBE7=3Ugg@mail.gmail.com Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20241009033151.2334888-2-hsiangkao@linux.alibaba.com Signed-off-by: Christian Brauner Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4224 --- fs/erofs/super.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 1be0bd8f3c09..1d3e5ca82fc2 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -717,7 +717,9 @@ static int erofs_fc_get_tree(struct fs_context *fc) if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid) return get_tree_nodev(fc, erofs_fc_fill_super); - ret = get_tree_bdev(fc, erofs_fc_fill_super); + ret = get_tree_bdev_flags(fc, erofs_fc_fill_super, + IS_ENABLED(CONFIG_EROFS_FS_BACKED_BY_FILE) ? + GET_TREE_BDEV_QUIET_LOOKUP : 0); #ifdef CONFIG_EROFS_FS_BACKED_BY_FILE if (ret == -ENOTBLK) { if (!fc->source) -- Gitee From 9ec47bd6ca1c34f0b18458b361d3f99c5dae366a Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Fri, 11 Oct 2024 14:51:28 +0800 Subject: [PATCH 1969/2138] erofs: add SEEK_{DATA,HOLE} support ANBZ: #11101 commit 83a8836fa19a3930da72aaef553cbecd36a2966a upstream. Many userspace programs (including erofs-utils itself) use SEEK_DATA / SEEK_HOLE to parse hole extents in addition to FIEMAP. Reviewed-by: Chao Yu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20241011065128.2097377-1-hsiangkao@linux.alibaba.com Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4224 --- fs/erofs/data.c | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/fs/erofs/data.c b/fs/erofs/data.c index 79bdd5abe30d..4daab86138ed 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -472,8 +472,32 @@ static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma) #define erofs_file_mmap generic_file_readonly_mmap #endif +static loff_t erofs_file_llseek(struct file *file, loff_t offset, int whence) +{ + struct inode *inode = file->f_mapping->host; + const struct iomap_ops *ops = &erofs_iomap_ops; + + if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) +#ifdef CONFIG_EROFS_FS_ZIP + ops = &z_erofs_iomap_report_ops; +#else + return generic_file_llseek(file, offset, whence); +#endif + + if (whence == SEEK_HOLE) + offset = iomap_seek_hole(inode, offset, ops); + else if (whence == SEEK_DATA) + offset = iomap_seek_data(inode, offset, ops); + else + return generic_file_llseek(file, offset, whence); + + if (offset < 0) + return offset; + return vfs_setpos(file, offset, inode->i_sb->s_maxbytes); +} + const struct file_operations erofs_file_fops = { - .llseek = generic_file_llseek, + .llseek = erofs_file_llseek, .read_iter = erofs_file_read_iter, .mmap = erofs_file_mmap, .get_unmapped_area = thp_get_unmapped_area, -- Gitee From 72f8074be034be0ed548f37586b461c4dc74b39f Mon Sep 17 00:00:00 2001 From: Gou Hao Date: Thu, 14 Nov 2024 09:32:47 +0800 Subject: [PATCH 1970/2138] erofs: simplify definition of the log functions ANBZ: #11101 commit 90655ee279b299c33dc51be87787b54222d3d2fb upstream. Use printk instead of pr_info/err to reduce redundant code. Signed-off-by: Gou Hao Reviewed-by: Gao Xiang Link: https://lore.kernel.org/r/20241114013247.30821-1-gouhao@uniontech.com Signed-off-by: Gao Xiang Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4224 --- fs/erofs/internal.h | 14 ++++---------- fs/erofs/super.c | 29 +++++++---------------------- 2 files changed, 11 insertions(+), 32 deletions(-) diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index ea1f9afe88db..d43b1d3e6c94 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -20,18 +20,12 @@ #include #include "erofs_fs.h" -/* redefine pr_fmt "erofs: " */ -#undef pr_fmt -#define pr_fmt(fmt) "erofs: " fmt - -__printf(3, 4) void _erofs_err(struct super_block *sb, - const char *function, const char *fmt, ...); +__printf(2, 3) void _erofs_printk(struct super_block *sb, const char *fmt, ...); #define erofs_err(sb, fmt, ...) \ - _erofs_err(sb, __func__, fmt "\n", ##__VA_ARGS__) -__printf(3, 4) void _erofs_info(struct super_block *sb, - const char *function, const char *fmt, ...); + _erofs_printk(sb, KERN_ERR fmt "\n", ##__VA_ARGS__) #define erofs_info(sb, fmt, ...) \ - _erofs_info(sb, __func__, fmt "\n", ##__VA_ARGS__) + _erofs_printk(sb, KERN_INFO fmt "\n", ##__VA_ARGS__) + #ifdef CONFIG_EROFS_FS_DEBUG #define DBG_BUGON BUG_ON #else diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 1d3e5ca82fc2..96c77f5e0971 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -18,37 +18,22 @@ static struct kmem_cache *erofs_inode_cachep __read_mostly; -void _erofs_err(struct super_block *sb, const char *func, const char *fmt, ...) +void _erofs_printk(struct super_block *sb, const char *fmt, ...) { struct va_format vaf; va_list args; + int level; va_start(args, fmt); - vaf.fmt = fmt; + level = printk_get_level(fmt); + vaf.fmt = printk_skip_level(fmt); vaf.va = &args; - - if (sb) - pr_err("(device %s): %s: %pV", sb->s_id, func, &vaf); - else - pr_err("%s: %pV", func, &vaf); - va_end(args); -} - -void _erofs_info(struct super_block *sb, const char *func, const char *fmt, ...) -{ - struct va_format vaf; - va_list args; - - va_start(args, fmt); - - vaf.fmt = fmt; - vaf.va = &args; - if (sb) - pr_info("(device %s): %pV", sb->s_id, &vaf); + printk("%c%cerofs (device %s): %pV", + KERN_SOH_ASCII, level, sb->s_id, &vaf); else - pr_info("%pV", &vaf); + printk("%c%cerofs: %pV", KERN_SOH_ASCII, level, &vaf); va_end(args); } -- Gitee From 19aede442cf6092b8007cb699994168026b16302 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Fri, 15 Nov 2024 07:49:05 +0800 Subject: [PATCH 1971/2138] erofs: fix file-backed mounts over FUSE ANBZ: #11101 commit 3a23787ca8756920d65fda39f41353a4be1d1642 upstream. syzbot reported a null-ptr-deref in fuse_read_args_fill: fuse_read_folio+0xb0/0x100 fs/fuse/file.c:905 filemap_read_folio+0xc6/0x2a0 mm/filemap.c:2367 do_read_cache_folio+0x263/0x5c0 mm/filemap.c:3825 read_mapping_folio include/linux/pagemap.h:1011 [inline] erofs_bread+0x34d/0x7e0 fs/erofs/data.c:41 erofs_read_superblock fs/erofs/super.c:281 [inline] erofs_fc_fill_super+0x2b9/0x2500 fs/erofs/super.c:625 Unlike most filesystems, some network filesystems and FUSE need unavoidable valid `file` pointers for their read I/Os [1]. Anyway, those use cases need to be supported too. [1] https://docs.kernel.org/filesystems/vfs.html Reported-by: syzbot+0b1279812c46e48bb0c1@syzkaller.appspotmail.com Closes: https://lore.kernel.org/r/6727bbdf.050a0220.3c8d68.0a7e.GAE@google.com Fixes: fb176750266a ("erofs: add file-backed mount support") Tested-by: syzbot+0b1279812c46e48bb0c1@syzkaller.appspotmail.com Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20241114234905.1873723-1-hsiangkao@linux.alibaba.com Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4224 --- fs/erofs/data.c | 10 ++++++---- fs/erofs/internal.h | 1 + 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/fs/erofs/data.c b/fs/erofs/data.c index 4daab86138ed..4cb42d698f52 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -38,7 +38,7 @@ void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset, } if (!folio || !folio_contains(folio, index)) { erofs_put_metabuf(buf); - folio = read_mapping_folio(buf->mapping, index, NULL); + folio = read_mapping_folio(buf->mapping, index, buf->file); if (IS_ERR(folio)) return folio; } @@ -61,9 +61,11 @@ void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb) { struct erofs_sb_info *sbi = EROFS_SB(sb); - if (erofs_is_fileio_mode(sbi)) - buf->mapping = file_inode(sbi->fdev)->i_mapping; - else if (erofs_is_fscache_mode(sb)) + buf->file = NULL; + if (erofs_is_fileio_mode(sbi)) { + buf->file = sbi->fdev; /* some fs like FUSE needs it */ + buf->mapping = buf->file->f_mapping; + } else if (erofs_is_fscache_mode(sb)) buf->mapping = sbi->s_fscache->inode->i_mapping; else buf->mapping = sb->s_bdev->bd_inode->i_mapping; diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index d43b1d3e6c94..39dddbf55eee 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -216,6 +216,7 @@ enum erofs_kmap_type { struct erofs_buf { struct address_space *mapping; + struct file *file; struct page *page; void *base; enum erofs_kmap_type kmap_type; -- Gitee From 43f278ff2a9560cb62b5d992f0edf34863564634 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Thu, 14 Nov 2024 17:58:13 +0800 Subject: [PATCH 1972/2138] erofs: get rid of `buf->kmap_type` ANBZ: #11101 commit ec4f59d1a99de86e5c14cf97946e94d5cef98ab0 upstream. After commit 927e5010ff5b ("erofs: use kmap_local_page() only for erofs_bread()"), `buf->kmap_type` actually has no use at all. Let's get rid of `buf->kmap_type` now. Suggested-by: Al Viro Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20241114095813.839866-1-hsiangkao@linux.alibaba.com Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4224 --- fs/erofs/data.c | 18 +++++------------- fs/erofs/internal.h | 1 - 2 files changed, 5 insertions(+), 14 deletions(-) diff --git a/fs/erofs/data.c b/fs/erofs/data.c index 4cb42d698f52..85710a566a03 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -10,10 +10,10 @@ void erofs_unmap_metabuf(struct erofs_buf *buf) { - if (buf->kmap_type == EROFS_KMAP) - kunmap_local(buf->base); + if (!buf->base) + return; + kunmap_local(buf->base); buf->base = NULL; - buf->kmap_type = EROFS_NO_KMAP; } void erofs_put_metabuf(struct erofs_buf *buf) @@ -43,15 +43,8 @@ void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset, return folio; } buf->page = folio_file_page(folio, index); - - if (buf->kmap_type == EROFS_NO_KMAP) { - if (type == EROFS_KMAP) - buf->base = kmap_local_page(buf->page); - buf->kmap_type = type; - } else if (buf->kmap_type != type) { - DBG_BUGON(1); - return ERR_PTR(-EFAULT); - } + if (!buf->base && type == EROFS_KMAP) + buf->base = kmap_local_page(buf->page); if (type == EROFS_NO_KMAP) return NULL; return buf->base + (offset & ~PAGE_MASK); @@ -351,7 +344,6 @@ static int erofs_iomap_end(struct inode *inode, loff_t pos, loff_t length, struct erofs_buf buf = { .page = kmap_to_page(ptr), .base = ptr, - .kmap_type = EROFS_KMAP, }; DBG_BUGON(iomap->type != IOMAP_INLINE); diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 39dddbf55eee..12ee38400aa2 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -219,7 +219,6 @@ struct erofs_buf { struct file *file; struct page *page; void *base; - enum erofs_kmap_type kmap_type; }; #define __EROFS_BUF_INITIALIZER ((struct erofs_buf){ .page = NULL }) -- Gitee From 1f2ab833834dadfef7a968f781eee8ca6eba734f Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Fri, 15 Nov 2024 15:46:25 +0800 Subject: [PATCH 1973/2138] erofs: clarify direct I/O support ANBZ: #11101 commit b49c0215b176e9c2e0998e7929eeb9261c9a7919 upstream. Currently, only filesystems backed by block devices support direct I/O. Also remove the unnecessary strict checks that can be supported with iomap. Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20241115074625.2520728-1-hsiangkao@linux.alibaba.com Signed-off-by: Hongzhen Luo Acked-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4224 --- fs/erofs/data.c | 15 +-------------- fs/erofs/inode.c | 12 ++++++------ 2 files changed, 7 insertions(+), 20 deletions(-) diff --git a/fs/erofs/data.c b/fs/erofs/data.c index 85710a566a03..a286a57ee9d9 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -404,22 +404,9 @@ static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) if (IS_DAX(inode)) return dax_iomap_rw(iocb, to, &erofs_iomap_ops); #endif - if (iocb->ki_flags & IOCB_DIRECT) { - struct block_device *bdev = inode->i_sb->s_bdev; - unsigned int blksize_mask; - - if (bdev) - blksize_mask = bdev_logical_block_size(bdev) - 1; - else - blksize_mask = i_blocksize(inode) - 1; - - if ((iocb->ki_pos | iov_iter_count(to) | - iov_iter_alignment(to)) & blksize_mask) - return -EINVAL; - + if ((iocb->ki_flags & IOCB_DIRECT) && inode->i_sb->s_bdev) return iomap_dio_rw(iocb, to, &erofs_iomap_ops, NULL, 0, NULL, 0); - } return filemap_read(iocb, to, 0); } diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c index a0e1b6ad8c50..c8ef6ebf9208 100644 --- a/fs/erofs/inode.c +++ b/fs/erofs/inode.c @@ -317,6 +317,7 @@ int erofs_getattr(struct mnt_idmap *idmap, const struct path *path, unsigned int query_flags) { struct inode *const inode = d_inode(path->dentry); + struct block_device *bdev = inode->i_sb->s_bdev; bool compressed = erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout); @@ -329,15 +330,14 @@ int erofs_getattr(struct mnt_idmap *idmap, const struct path *path, /* * Return the DIO alignment restrictions if requested. * - * In EROFS, STATX_DIOALIGN is not supported in ondemand mode and - * compressed files, so in these cases we report no DIO support. + * In EROFS, STATX_DIOALIGN is only supported in bdev-based mode + * and uncompressed inodes, otherwise we report no DIO support. */ if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) { stat->result_mask |= STATX_DIOALIGN; - if (!erofs_is_fscache_mode(inode->i_sb) && !compressed) { - stat->dio_mem_align = - bdev_logical_block_size(inode->i_sb->s_bdev); - stat->dio_offset_align = stat->dio_mem_align; + if (bdev && !compressed) { + stat->dio_mem_align = bdev_dma_alignment(bdev) + 1; + stat->dio_offset_align = bdev_logical_block_size(bdev); } } generic_fillattr(idmap, request_mask, inode, stat); -- Gitee From 377d600787c91cda4ad6c2c07ae55a4c8250cca3 Mon Sep 17 00:00:00 2001 From: haodongdong Date: Thu, 12 Dec 2024 14:00:31 +0800 Subject: [PATCH 1974/2138] anolis: scsi: leapioraid: supports LEAPIO RAID controller ANBZ: #12362 This commit is to support LeapIO LEAPIO RAID controllers. RAID controllers support RAID 0/1/5/6/10/50/60 modes; RAID support SAS/SATA HDD/SSD. Signed-off-by: haodongdong Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/4241 --- .../default/CONFIG_SCSI_LEAPIORAID | 1 + arch/loongarch/configs/anolis-debug_defconfig | 1 + arch/loongarch/configs/anolis_defconfig | 1 + arch/loongarch/configs/loongson3_defconfig | 1 + arch/sw_64/configs/anolis_xuelang_defconfig | 1 + arch/sw_64/configs/junzhang_defconfig | 1 + arch/sw_64/configs/xuelang_defconfig | 1 + drivers/scsi/Kconfig | 1 + drivers/scsi/Makefile | 1 + drivers/scsi/leapioraid/Kconfig | 13 + drivers/scsi/leapioraid/Makefile | 9 + drivers/scsi/leapioraid/leapioraid.h | 2026 ++++ drivers/scsi/leapioraid/leapioraid_app.c | 2226 ++++ drivers/scsi/leapioraid/leapioraid_func.c | 7074 ++++++++++++ drivers/scsi/leapioraid/leapioraid_func.h | 1258 +++ drivers/scsi/leapioraid/leapioraid_os.c | 9823 +++++++++++++++++ .../scsi/leapioraid/leapioraid_transport.c | 1926 ++++ 17 files changed, 24364 insertions(+) create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_LEAPIORAID create mode 100644 drivers/scsi/leapioraid/Kconfig create mode 100644 drivers/scsi/leapioraid/Makefile create mode 100644 drivers/scsi/leapioraid/leapioraid.h create mode 100644 drivers/scsi/leapioraid/leapioraid_app.c create mode 100644 drivers/scsi/leapioraid/leapioraid_func.c create mode 100644 drivers/scsi/leapioraid/leapioraid_func.h create mode 100644 drivers/scsi/leapioraid/leapioraid_os.c create mode 100644 drivers/scsi/leapioraid/leapioraid_transport.c diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_LEAPIORAID b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_LEAPIORAID new file mode 100644 index 000000000000..55062a39cfe4 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_SCSI_LEAPIORAID @@ -0,0 +1 @@ +CONFIG_SCSI_LEAPIORAID=m diff --git a/arch/loongarch/configs/anolis-debug_defconfig b/arch/loongarch/configs/anolis-debug_defconfig index d750f96d973f..4765e24c2f06 100644 --- a/arch/loongarch/configs/anolis-debug_defconfig +++ b/arch/loongarch/configs/anolis-debug_defconfig @@ -2229,6 +2229,7 @@ CONFIG_SCSI_MPT2SAS_MAX_SGE=128 CONFIG_SCSI_MPT3SAS_MAX_SGE=128 CONFIG_SCSI_MPT2SAS=m # CONFIG_SCSI_MPI3MR is not set +CONFIG_SCSI_LEAPIORAID=m CONFIG_SCSI_SMARTPQI=m # CONFIG_SCSI_HPTIOP is not set # CONFIG_SCSI_BUSLOGIC is not set diff --git a/arch/loongarch/configs/anolis_defconfig b/arch/loongarch/configs/anolis_defconfig index f2311d31442b..413e7279ffdc 100644 --- a/arch/loongarch/configs/anolis_defconfig +++ b/arch/loongarch/configs/anolis_defconfig @@ -715,6 +715,7 @@ CONFIG_MEGARAID_LEGACY=y CONFIG_MEGARAID_SAS=m CONFIG_SCSI_MPT3SAS=y CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_LEAPIORAID=m CONFIG_SCSI_SMARTPQI=m CONFIG_LIBFC=m CONFIG_LIBFCOE=m diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index bbf18ed31761..4998e3f0d96c 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -713,6 +713,7 @@ CONFIG_MEGARAID_LEGACY=y CONFIG_MEGARAID_SAS=m CONFIG_SCSI_MPT3SAS=y CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_LEAPIORAID=m CONFIG_SCSI_SMARTPQI=m CONFIG_LIBFC=m CONFIG_LIBFCOE=m diff --git a/arch/sw_64/configs/anolis_xuelang_defconfig b/arch/sw_64/configs/anolis_xuelang_defconfig index 8c9c4dda69ed..001dfb0187f6 100644 --- a/arch/sw_64/configs/anolis_xuelang_defconfig +++ b/arch/sw_64/configs/anolis_xuelang_defconfig @@ -581,6 +581,7 @@ CONFIG_SCSI_HPSA=m CONFIG_SCSI_AHA152X=m CONFIG_MEGARAID_SAS=m CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_LEAPIORAID=m CONFIG_SCSI_SMARTPQI=m CONFIG_LIBFC=m CONFIG_LIBFCOE=m diff --git a/arch/sw_64/configs/junzhang_defconfig b/arch/sw_64/configs/junzhang_defconfig index 4f25770ca193..e202359a2936 100644 --- a/arch/sw_64/configs/junzhang_defconfig +++ b/arch/sw_64/configs/junzhang_defconfig @@ -417,6 +417,7 @@ CONFIG_SCSI_CXGB4_ISCSI=m CONFIG_SCSI_BNX2_ISCSI=m CONFIG_MEGARAID_SAS=m CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_LEAPIORAID=m CONFIG_SCSI_DH=y CONFIG_SCSI_DH_RDAC=y CONFIG_SCSI_DH_HP_SW=y diff --git a/arch/sw_64/configs/xuelang_defconfig b/arch/sw_64/configs/xuelang_defconfig index b1c0101d0089..9f2944cc3b4f 100644 --- a/arch/sw_64/configs/xuelang_defconfig +++ b/arch/sw_64/configs/xuelang_defconfig @@ -414,6 +414,7 @@ CONFIG_SCSI_CXGB4_ISCSI=m CONFIG_SCSI_BNX2_ISCSI=m CONFIG_MEGARAID_SAS=m CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_LEAPIORAID=m CONFIG_SCSI_DH=y CONFIG_SCSI_DH_RDAC=y CONFIG_SCSI_DH_HP_SW=y diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 23bce8995a55..cd658370af46 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -489,6 +489,7 @@ source "drivers/scsi/esas2r/Kconfig" source "drivers/scsi/megaraid/Kconfig.megaraid" source "drivers/scsi/mpt3sas/Kconfig" source "drivers/scsi/mpi3mr/Kconfig" +source "drivers/scsi/leapioraid/Kconfig" source "drivers/scsi/smartpqi/Kconfig" config SCSI_HPTIOP diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index f055bfd54a68..ab9ce4a6bae5 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -100,6 +100,7 @@ obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ obj-$(CONFIG_MEGARAID_SAS) += megaraid/ obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas/ obj-$(CONFIG_SCSI_MPI3MR) += mpi3mr/ +obj-$(CONFIG_SCSI_LEAPIORAID) += leapioraid/ obj-$(CONFIG_SCSI_ACARD) += atp870u.o obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o obj-$(CONFIG_SCSI_INITIO) += initio.o diff --git a/drivers/scsi/leapioraid/Kconfig b/drivers/scsi/leapioraid/Kconfig new file mode 100644 index 000000000000..744f3b633c03 --- /dev/null +++ b/drivers/scsi/leapioraid/Kconfig @@ -0,0 +1,13 @@ +# +# Kernel configuration file for the LEAPIORAID +# + +config SCSI_LEAPIORAID + tristate "LeapIO RAID Adapter" + depends on PCI && SCSI + select SCSI_SAS_ATTRS + select RAID_ATTRS + select IRQ_POLL + help + This driver supports LEAPIO RAID controller, which supports PCI Express Gen4 interface + and supports SAS/SATA HDD/SSD. diff --git a/drivers/scsi/leapioraid/Makefile b/drivers/scsi/leapioraid/Makefile new file mode 100644 index 000000000000..1a3786a56cb7 --- /dev/null +++ b/drivers/scsi/leapioraid/Makefile @@ -0,0 +1,9 @@ +# +# Makefile for the LEAPIORAID drivers. +# + +obj-$(CONFIG_SCSI_LEAPIORAID) += leapioraid.o +leapioraid-objs += leapioraid_func.o \ + leapioraid_os.o \ + leapioraid_transport.o \ + leapioraid_app.o diff --git a/drivers/scsi/leapioraid/leapioraid.h b/drivers/scsi/leapioraid/leapioraid.h new file mode 100644 index 000000000000..30908fffe43b --- /dev/null +++ b/drivers/scsi/leapioraid/leapioraid.h @@ -0,0 +1,2026 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * + * Copyright 2000-2020 Broadcom Inc. All rights reserved. + * + * Copyright (C) 2024 LeapIO Tech Inc. + * + */ + +#ifndef LEAPIORAID_H +#define LEAPIORAID_H + +typedef u8 U8; +typedef __le16 U16; +typedef __le32 U32; +typedef __le64 U64 __aligned(4); + +#define LEAPIORAID_IOC_STATE_RESET (0x00000000) +#define LEAPIORAID_IOC_STATE_READY (0x10000000) +#define LEAPIORAID_IOC_STATE_OPERATIONAL (0x20000000) +#define LEAPIORAID_IOC_STATE_FAULT (0x40000000) +#define LEAPIORAID_IOC_STATE_COREDUMP (0x50000000) +#define LEAPIORAID_IOC_STATE_MASK (0xF0000000) + +struct LeapioraidSysInterfaceRegs_t { + U32 Doorbell; + U32 WriteSequence; + U32 HostDiagnostic; + U32 Reserved1; + U32 DiagRWData; + U32 DiagRWAddressLow; + U32 DiagRWAddressHigh; + U32 Reserved2[5]; + U32 HostInterruptStatus; + U32 HostInterruptMask; + U32 DCRData; + U32 DCRAddress; + U32 Reserved3[2]; + U32 ReplyFreeHostIndex; + U32 Reserved4[8]; + U32 ReplyPostHostIndex; + U32 Reserved5; + U32 HCBSize; + U32 HCBAddressLow; + U32 HCBAddressHigh; + U32 Reserved6[12]; + U32 Scratchpad[4]; + U32 RequestDescriptorPostLow; + U32 RequestDescriptorPostHigh; + U32 AtomicRequestDescriptorPost; + U32 IocLogBufPosition; + U32 HostLogBufPosition; + U32 Reserved7[11]; +}; + +#define LEAPIORAID_DOORBELL_USED (0x08000000) +#define LEAPIORAID_DOORBELL_DATA_MASK (0x0000FFFF) +#define LEAPIORAID_DOORBELL_FUNCTION_SHIFT (24) +#define LEAPIORAID_DOORBELL_ADD_DWORDS_SHIFT (16) + +#define LEAPIORAID_DIAG_RESET_ADAPTER (0x00000004) + +#define LEAPIORAID_HIS_SYS2IOC_DB_STATUS (0x80000000) +#define LEAPIORAID_HIS_IOC2SYS_DB_STATUS (0x00000001) + +#define LEAPIORAID_RPHI_MSIX_INDEX_SHIFT (24) + +#define LEAPIORAID_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00) +#define LEAPIORAID_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06) +#define LEAPIORAID_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE (0x08) +#define LEAPIORAID_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO (0x0C) + +struct LEAPIORAID_DEFAULT_REQUEST_DESCRIPTOR { + U8 RequestFlags; + U8 MSIxIndex; + U16 SMID; + U16 LMID; + U16 DescriptorTypeDependent; +}; + +struct LEAPIORAID_HIGH_PRIORITY_REQUEST_DESCRIPTOR { + U8 RequestFlags; + U8 MSIxIndex; + U16 SMID; + U16 LMID; + U16 Reserved1; +}; + +struct LEAPIORAID_SCSI_IO_REQUEST_DESCRIPTOR { + U8 RequestFlags; + U8 MSIxIndex; + U16 SMID; + U16 LMID; + U16 DevHandle; +}; + +typedef +struct LEAPIORAID_SCSI_IO_REQUEST_DESCRIPTOR + LEAPIORAID_FP_SCSI_IO_REQUEST_DESCRIPTOR; + +union LeapioraidReqDescUnion_t { + struct LEAPIORAID_DEFAULT_REQUEST_DESCRIPTOR Default; + struct LEAPIORAID_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority; + struct LEAPIORAID_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO; + LEAPIORAID_FP_SCSI_IO_REQUEST_DESCRIPTOR FastPathSCSIIO; + U64 Words; +}; + +struct LeapioraidAtomicReqDesc_t { + U8 RequestFlags; + U8 MSIxIndex; + U16 SMID; +}; + +#define LEAPIORAID_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F) +#define LEAPIORAID_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00) +#define LEAPIORAID_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY (0x01) +#define LEAPIORAID_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS (0x06) +#define LEAPIORAID_RPY_DESCRIPT_FLAGS_UNUSED (0x0F) + +struct LeapioraidDefaultRepDesc_t { + U8 ReplyFlags; + U8 MSIxIndex; + U16 DescriptorTypeDependent1; + U32 DescriptorTypeDependent2; +}; + +struct LEAPIORAID_ADDRESS_REPLY_DESCRIPTOR { + U8 ReplyFlags; + U8 MSIxIndex; + U16 SMID; + U32 ReplyFrameAddress; +}; + +struct LEAPIORAID_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR { + U8 ReplyFlags; + U8 MSIxIndex; + U16 SMID; + U16 TaskTag; + U16 Reserved1; +}; + +typedef +struct LEAPIORAID_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR + LEAPIORAID_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR; + +union LeapioraidRepDescUnion_t { + struct LeapioraidDefaultRepDesc_t Default; + struct LEAPIORAID_ADDRESS_REPLY_DESCRIPTOR AddressReply; + struct LEAPIORAID_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess; + LEAPIORAID_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR FastPathSCSIIOSuccess; + U64 Words; +}; + +#define LEAPIORAID_FUNC_SCSI_IO_REQUEST (0x00) +#define LEAPIORAID_FUNC_SCSI_TASK_MGMT (0x01) +#define LEAPIORAID_FUNC_IOC_INIT (0x02) +#define LEAPIORAID_FUNC_IOC_FACTS (0x03) +#define LEAPIORAID_FUNC_CONFIG (0x04) +#define LEAPIORAID_FUNC_PORT_FACTS (0x05) +#define LEAPIORAID_FUNC_PORT_ENABLE (0x06) +#define LEAPIORAID_FUNC_EVENT_NOTIFICATION (0x07) +#define LEAPIORAID_FUNC_EVENT_ACK (0x08) +#define LEAPIORAID_FUNC_FW_DOWNLOAD (0x09) +#define LEAPIORAID_FUNC_FW_UPLOAD (0x12) +#define LEAPIORAID_FUNC_RAID_ACTION (0x15) +#define LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH (0x16) +#define LEAPIORAID_FUNC_SCSI_ENCLOSURE_PROCESSOR (0x18) +#define LEAPIORAID_FUNC_SMP_PASSTHROUGH (0x1A) +#define LEAPIORAID_FUNC_SAS_IO_UNIT_CONTROL (0x1B) +#define LEAPIORAID_FUNC_IO_UNIT_CONTROL (0x1B) +#define LEAPIORAID_FUNC_SATA_PASSTHROUGH (0x1C) +#define LEAPIORAID_FUNC_IOC_MESSAGE_UNIT_RESET (0x40) +#define LEAPIORAID_FUNC_HANDSHAKE (0x42) +#define LEAPIORAID_FUNC_LOG_INIT (0x57) + +#define LEAPIORAID_IOCSTATUS_MASK (0x7FFF) +#define LEAPIORAID_IOCSTATUS_SUCCESS (0x0000) +#define LEAPIORAID_IOCSTATUS_INVALID_FUNCTION (0x0001) +#define LEAPIORAID_IOCSTATUS_BUSY (0x0002) +#define LEAPIORAID_IOCSTATUS_INVALID_SGL (0x0003) +#define LEAPIORAID_IOCSTATUS_INTERNAL_ERROR (0x0004) +#define LEAPIORAID_IOCSTATUS_INVALID_VPID (0x0005) +#define LEAPIORAID_IOCSTATUS_INSUFFICIENT_RESOURCES (0x0006) +#define LEAPIORAID_IOCSTATUS_INVALID_FIELD (0x0007) +#define LEAPIORAID_IOCSTATUS_INVALID_STATE (0x0008) +#define LEAPIORAID_IOCSTATUS_OP_STATE_NOT_SUPPORTED (0x0009) +#define LEAPIORAID_IOCSTATUS_INSUFFICIENT_POWER (0x000A) + +#define LEAPIORAID_IOCSTATUS_CONFIG_INVALID_ACTION (0x0020) +#define LEAPIORAID_IOCSTATUS_CONFIG_INVALID_TYPE (0x0021) +#define LEAPIORAID_IOCSTATUS_CONFIG_INVALID_PAGE (0x0022) +#define LEAPIORAID_IOCSTATUS_CONFIG_INVALID_DATA (0x0023) +#define LEAPIORAID_IOCSTATUS_CONFIG_NO_DEFAULTS (0x0024) +#define LEAPIORAID_IOCSTATUS_CONFIG_CANT_COMMIT (0x0025) + +#define LEAPIORAID_IOCSTATUS_SCSI_RECOVERED_ERROR (0x0040) +#define LEAPIORAID_IOCSTATUS_SCSI_INVALID_DEVHANDLE (0x0042) +#define LEAPIORAID_IOCSTATUS_SCSI_DEVICE_NOT_THERE (0x0043) +#define LEAPIORAID_IOCSTATUS_SCSI_DATA_OVERRUN (0x0044) +#define LEAPIORAID_IOCSTATUS_SCSI_DATA_UNDERRUN (0x0045) +#define LEAPIORAID_IOCSTATUS_SCSI_IO_DATA_ERROR (0x0046) +#define LEAPIORAID_IOCSTATUS_SCSI_PROTOCOL_ERROR (0x0047) +#define LEAPIORAID_IOCSTATUS_SCSI_TASK_TERMINATED (0x0048) +#define LEAPIORAID_IOCSTATUS_SCSI_RESIDUAL_MISMATCH (0x0049) +#define LEAPIORAID_IOCSTATUS_SCSI_TASK_MGMT_FAILED (0x004A) +#define LEAPIORAID_IOCSTATUS_SCSI_IOC_TERMINATED (0x004B) +#define LEAPIORAID_IOCSTATUS_SCSI_EXT_TERMINATED (0x004C) + +#define LEAPIORAID_IOCSTATUS_EEDP_GUARD_ERROR (0x004D) +#define LEAPIORAID_IOCSTATUS_EEDP_REF_TAG_ERROR (0x004E) +#define LEAPIORAID_IOCSTATUS_EEDP_APP_TAG_ERROR (0x004F) + +#define LEAPIORAID_IOCSTATUS_TARGET_INVALID_IO_INDEX (0x0062) +#define LEAPIORAID_IOCSTATUS_TARGET_ABORTED (0x0063) +#define LEAPIORAID_IOCSTATUS_TARGET_NO_CONN_RETRYABLE (0x0064) +#define LEAPIORAID_IOCSTATUS_TARGET_NO_CONNECTION (0x0065) +#define LEAPIORAID_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH (0x006A) +#define LEAPIORAID_IOCSTATUS_TARGET_DATA_OFFSET_ERROR (0x006D) +#define LEAPIORAID_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA (0x006E) +#define LEAPIORAID_IOCSTATUS_TARGET_IU_TOO_SHORT (0x006F) +#define LEAPIORAID_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT (0x0070) +#define LEAPIORAID_IOCSTATUS_TARGET_NAK_RECEIVED (0x0071) + +#define LEAPIORAID_IOCSTATUS_SAS_SMP_REQUEST_FAILED (0x0090) +#define LEAPIORAID_IOCSTATUS_SAS_SMP_DATA_OVERRUN (0x0091) +#define LEAPIORAID_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE (0x8000) + +struct LeapioraidReqHeader_t { + U16 FunctionDependent1; + U8 ChainOffset; + U8 Function; + U16 FunctionDependent2; + U8 FunctionDependent3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved1; +}; + +struct LeapioraidDefaultRep_t { + U16 FunctionDependent1; + U8 MsgLength; + U8 Function; + U16 FunctionDependent2; + U8 FunctionDependent3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved1; + U16 FunctionDependent5; + U16 IOCStatus; + U32 IOCLogInfo; +}; + +struct LEAPIORAID_VERSION_STRUCT { + U8 Dev; + U8 Unit; + U8 Minor; + U8 Major; +}; + +union LEAPIORAID_VERSION_UNION { + struct LEAPIORAID_VERSION_STRUCT Struct; + U32 Word; +}; + +struct LeapioSGESimple32_t { + U32 FlagsLength; + U32 Address; +}; + +struct LeapioSGESimple64_t { + U32 FlagsLength; + U64 Address; +}; + +struct LEAPIORAID_SGE_SIMPLE_UNION { + U32 FlagsLength; + union { + U32 Address32; + U64 Address64; + } u; +}; + +struct LEAPIORAID_SGE_CHAIN_UNION { + U16 Length; + U8 NextChainOffset; + U8 Flags; + union { + U32 Address32; + U64 Address64; + } u; +}; + +#define LEAPIORAID_SGE_FLAGS_LAST_ELEMENT (0x80) +#define LEAPIORAID_SGE_FLAGS_END_OF_BUFFER (0x40) +#define LEAPIORAID_SGE_FLAGS_END_OF_LIST (0x01) +#define LEAPIORAID_SGE_FLAGS_SHIFT (24) +#define LEAPIORAID_SGE_FLAGS_SIMPLE_ELEMENT (0x10) +#define LEAPIORAID_SGE_FLAGS_SYSTEM_ADDRESS (0x00) +#define LEAPIORAID_SGE_FLAGS_HOST_TO_IOC (0x04) +#define LEAPIORAID_SGE_FLAGS_32_BIT_ADDRESSING (0x00) +#define LEAPIORAID_SGE_FLAGS_64_BIT_ADDRESSING (0x02) + +struct LEAPIORAID_IEEE_SGE_SIMPLE32 { + U32 Address; + U32 FlagsLength; +}; + +struct LEAPIORAID_IEEE_SGE_SIMPLE64 { + U64 Address; + U32 Length; + U16 Reserved1; + U8 Reserved2; + U8 Flags; +}; + +union LEAPIORAID_IEEE_SGE_SIMPLE_UNION { + struct LEAPIORAID_IEEE_SGE_SIMPLE32 Simple32; + struct LEAPIORAID_IEEE_SGE_SIMPLE64 Simple64; +}; + +union LEAPIORAID_IEEE_SGE_CHAIN_UNION { + struct LEAPIORAID_IEEE_SGE_SIMPLE32 Chain32; + struct LEAPIORAID_IEEE_SGE_SIMPLE64 Chain64; +}; + +struct LEAPIORAID_IEEE_SGE_CHAIN64 { + U64 Address; + U32 Length; + U16 Reserved1; + U8 NextChainOffset; + U8 Flags; +}; + +union LEAPIORAID_IEEE_SGE_IO_UNION { + struct LEAPIORAID_IEEE_SGE_SIMPLE64 IeeeSimple; + struct LEAPIORAID_IEEE_SGE_CHAIN64 IeeeChain; +}; + +#define LEAPIORAID_IEEE_SGE_FLAGS_END_OF_LIST (0x40) +#define LEAPIORAID_IEEE_SGE_FLAGS_SIMPLE_ELEMENT (0x00) +#define LEAPIORAID_IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80) +#define LEAPIORAID_IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00) + +union LEAPIORAID_SIMPLE_SGE_UNION { + struct LEAPIORAID_SGE_SIMPLE_UNION LeapioSimple; + union LEAPIORAID_IEEE_SGE_SIMPLE_UNION IeeeSimple; +}; + +union LEAPIORAID_SGE_IO_UNION { + struct LEAPIORAID_SGE_SIMPLE_UNION LeapioSimple; + struct LEAPIORAID_SGE_CHAIN_UNION LeapioChain; + union LEAPIORAID_IEEE_SGE_SIMPLE_UNION IeeeSimple; + union LEAPIORAID_IEEE_SGE_CHAIN_UNION IeeeChain; +}; + +struct LEAPIORAID_CONFIG_PAGE_HEADER { + U8 PageVersion; + U8 PageLength; + U8 PageNumber; + U8 PageType; +}; + +struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER { + U8 PageVersion; + U8 Reserved1; + U8 PageNumber; + U8 PageType; + U16 ExtPageLength; + U8 ExtPageType; + U8 Reserved2; +}; + +#define LEAPIORAID_CONFIG_PAGETYPE_IO_UNIT (0x00) +#define LEAPIORAID_CONFIG_PAGETYPE_IOC (0x01) +#define LEAPIORAID_CONFIG_PAGETYPE_BIOS (0x02) +#define LEAPIORAID_CONFIG_PAGETYPE_RAID_VOLUME (0x08) +#define LEAPIORAID_CONFIG_PAGETYPE_MANUFACTURING (0x09) +#define LEAPIORAID_CONFIG_PAGETYPE_RAID_PHYSDISK (0x0A) +#define LEAPIORAID_CONFIG_PAGETYPE_EXTENDED (0x0F) +#define LEAPIORAID_CONFIG_PAGETYPE_MASK (0x0F) +#define LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_IO_UNIT (0x10) +#define LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_EXPANDER (0x11) +#define LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_DEVICE (0x12) +#define LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_PHY (0x13) +#define LEAPIORAID_CONFIG_EXTPAGETYPE_LOG (0x14) +#define LEAPIORAID_CONFIG_EXTPAGETYPE_ENCLOSURE (0x15) +#define LEAPIORAID_CONFIG_EXTPAGETYPE_RAID_CONFIG (0x16) +#define LEAPIORAID_CONFIG_EXTPAGETYPE_DRIVER_MAPPING (0x17) +#define LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_PORT (0x18) +#define LEAPIORAID_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING (0x1A) + +#define LEAPIORAID_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define LEAPIORAID_RAID_VOLUME_PGAD_FORM_HANDLE (0x10000000) + +#define LEAPIORAID_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM (0x00000000) +#define LEAPIORAID_PHYSDISK_PGAD_FORM_PHYSDISKNUM (0x10000000) + +#define LEAPIORAID_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL (0x00000000) +#define LEAPIORAID_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM (0x10000000) +#define LEAPIORAID_SAS_EXPAND_PGAD_FORM_HNDL (0x20000000) +#define LEAPIORAID_SAS_EXPAND_PGAD_PHYNUM_SHIFT (16) +#define LEAPIORAID_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE (0x20000000) +#define LEAPIORAID_SAS_PHY_PGAD_FORM_PHY_NUMBER (0x00000000) +#define LEAPIORAID_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define LEAPIORAID_SAS_ENCLOS_PGAD_FORM_HANDLE (0x10000000) +#define LEAPIORAID_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM (0x00000000) + +struct LeapioraidCfgReq_t { + U8 Action; + U8 SGLFlags; + U8 ChainOffset; + U8 Function; + U16 ExtPageLength; + U8 ExtPageType; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved1; + U8 Reserved2; + U8 ProxyVF_ID; + U16 Reserved4; + U32 Reserved3; + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U32 PageAddress; + union LEAPIORAID_SGE_IO_UNION PageBufferSGE; +}; + +#define LEAPIORAID_CONFIG_ACTION_PAGE_HEADER (0x00) +#define LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT (0x01) +#define LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_CURRENT (0x02) +#define LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_NVRAM (0x04) + +struct LeapioraidCfgRep_t { + U8 Action; + U8 SGLFlags; + U8 MsgLength; + U8 Function; + U16 ExtPageLength; + U8 ExtPageType; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved1; + U16 Reserved2; + U16 IOCStatus; + U32 IOCLogInfo; + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; +}; + +struct LeapioraidManP0_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U8 ChipName[16]; + U8 ChipRevision[8]; + U8 BoardName[16]; + U8 BoardAssembly[16]; + U8 BoardTracerNumber[16]; +}; + +struct LEAPIORAID_MANPAGE7_CONNECTOR_INFO { + U32 Pinout; + U8 Connector[16]; + U8 Location; + U8 ReceptacleID; + U16 Slot; + U16 Slotx2; + U16 Slotx4; +}; + +struct LeapioraidIOUnitP0_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U64 UniqueValue; + union LEAPIORAID_VERSION_UNION NvdataVersionDefault; + union LEAPIORAID_VERSION_UNION NvdataVersionPersistent; +}; + +struct LeapioraidIOUnitP1_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U32 Flags; +}; + +#define LEAPIORAID_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE (0x00000100) +#define LEAPIORAID_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING (0x00000020) + +struct LEAPIORAID_IOUNIT8_SENSOR { + U16 Flags; + U16 Reserved1; + U16 Threshold[4]; + U32 Reserved2; + U32 Reserved3; + U32 Reserved4; +}; + +struct LeapioraidIOUnitP8_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U32 Reserved1; + U32 Reserved2; + U8 NumSensors; + U8 PollingInterval; + U16 Reserved3; + struct LEAPIORAID_IOUNIT8_SENSOR Sensor[]; +}; + +struct LeapioraidIOCP1_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U32 Flags; + U32 CoalescingTimeout; + U8 CoalescingDepth; + U8 PCISlotNum; + U8 PCIBusNum; + U8 PCIDomainSegment; + U32 Reserved1; + U32 ProductSpecific; +}; + +struct LeapioraidIOCP8_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U8 NumDevsPerEnclosure; + U8 Reserved1; + U16 Reserved2; + U16 MaxPersistentEntries; + U16 MaxNumPhysicalMappedIDs; + U16 Flags; + U16 Reserved3; + U16 IRVolumeMappingFlags; + U16 Reserved4; + U32 Reserved5; +}; + +#define LEAPIORAID_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE (0x00000003) +#define LEAPIORAID_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING (0x00000000) + +struct LEAPIORAID_BOOT_DEVICE_ADAPTER_ORDER { + U32 Reserved1; + U32 Reserved2; + U32 Reserved3; + U32 Reserved4; + U32 Reserved5; + U32 Reserved6; +}; + +struct LEAPIORAID_BOOT_DEVICE_SAS_WWID { + U64 SASAddress; + U8 LUN[8]; + U32 Reserved1; + U32 Reserved2; +}; + +struct LEAPIORAID_BOOT_DEVICE_ENCLOSURE_SLOT { + U64 EnclosureLogicalID; + U32 Reserved1; + U32 Reserved2; + U16 SlotNumber; + U16 Reserved3; + U32 Reserved4; +}; + +struct LEAPIORAID_BOOT_DEVICE_DEVICE_NAME { + U64 DeviceName; + U8 LUN[8]; + U32 Reserved1; + U32 Reserved2; +}; + +union LEAPIORAID_BIOSPAGE2_BOOT_DEVICE { + struct LEAPIORAID_BOOT_DEVICE_ADAPTER_ORDER AdapterOrder; + struct LEAPIORAID_BOOT_DEVICE_SAS_WWID SasWwid; + struct LEAPIORAID_BOOT_DEVICE_ENCLOSURE_SLOT EnclosureSlot; + struct LEAPIORAID_BOOT_DEVICE_DEVICE_NAME DeviceName; +}; + +struct LeapioraidBiosP2_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U32 Reserved1; + U32 Reserved2; + U32 Reserved3; + U32 Reserved4; + U32 Reserved5; + U32 Reserved6; + U8 ReqBootDeviceForm; + U8 Reserved7; + U16 Reserved8; + union LEAPIORAID_BIOSPAGE2_BOOT_DEVICE RequestedBootDevice; + U8 ReqAltBootDeviceForm; + U8 Reserved9; + U16 Reserved10; + union LEAPIORAID_BIOSPAGE2_BOOT_DEVICE RequestedAltBootDevice; + U8 CurrentBootDeviceForm; + U8 Reserved11; + U16 Reserved12; + union LEAPIORAID_BIOSPAGE2_BOOT_DEVICE CurrentBootDevice; +}; + +#define LEAPIORAID_BIOSPAGE2_FORM_MASK (0x0F) +#define LEAPIORAID_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED (0x00) +#define LEAPIORAID_BIOSPAGE2_FORM_SAS_WWID (0x05) +#define LEAPIORAID_BIOSPAGE2_FORM_ENCLOSURE_SLOT (0x06) +#define LEAPIORAID_BIOSPAGE2_FORM_DEVICE_NAME (0x07) + +struct LEAPIORAID_ADAPTER_INFO { + U8 PciBusNumber; + U8 PciDeviceAndFunctionNumber; + U16 AdapterFlags; +}; + +struct LEAPIORAID_ADAPTER_ORDER_AUX { + U64 WWID; + U32 Reserved1; + U32 Reserved2; +}; + +struct LeapioraidBiosP3_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U32 GlobalFlags; + U32 BiosVersion; + struct LEAPIORAID_ADAPTER_INFO AdapterOrder[4]; + U32 Reserved1; + struct LEAPIORAID_ADAPTER_ORDER_AUX AdapterOrderAux[4]; +}; + +struct LEAPIORAID_RAIDVOL0_PHYS_DISK { + U8 RAIDSetNum; + U8 PhysDiskMap; + U8 PhysDiskNum; + U8 Reserved; +}; + +struct LEAPIORAID_RAIDVOL0_SETTINGS { + U16 Settings; + U8 HotSparePool; + U8 Reserved; +}; + +struct LeapioraidRaidVolP0_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U16 DevHandle; + U8 VolumeState; + U8 VolumeType; + U32 VolumeStatusFlags; + struct LEAPIORAID_RAIDVOL0_SETTINGS VolumeSettings; + U64 MaxLBA; + U32 StripeSize; + U16 BlockSize; + U16 Reserved1; + U8 SupportedPhysDisks; + U8 ResyncRate; + U16 DataScrubDuration; + U8 NumPhysDisks; + U8 Reserved2; + U8 Reserved3; + U8 InactiveStatus; + struct LEAPIORAID_RAIDVOL0_PHYS_DISK PhysDisk[]; +}; + +#define LEAPIORAID_RAID_VOL_STATE_MISSING (0x00) +#define LEAPIORAID_RAID_VOL_STATE_FAILED (0x01) +#define LEAPIORAID_RAID_VOL_STATE_INITIALIZING (0x02) +#define LEAPIORAID_RAID_VOL_STATE_ONLINE (0x03) +#define LEAPIORAID_RAID_VOL_STATE_DEGRADED (0x04) +#define LEAPIORAID_RAID_VOL_STATE_OPTIMAL (0x05) +#define LEAPIORAID_RAID_VOL_TYPE_RAID0 (0x00) +#define LEAPIORAID_RAID_VOL_TYPE_RAID1E (0x01) +#define LEAPIORAID_RAID_VOL_TYPE_RAID1 (0x02) +#define LEAPIORAID_RAID_VOL_TYPE_RAID10 (0x05) +#define LEAPIORAID_RAID_VOL_TYPE_UNKNOWN (0xFF) + +#define LEAPIORAID_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS (0x00010000) + +struct LeapioraidRaidVolP1_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U16 DevHandle; + U16 Reserved0; + U8 GUID[24]; + U8 Name[16]; + U64 WWID; + U32 Reserved1; + U32 Reserved2; +}; + +struct LEAPIORAID_RAIDPHYSDISK0_SETTINGS { + U16 Reserved1; + U8 HotSparePool; + U8 Reserved2; +}; + +struct LEAPIORAID_RAIDPHYSDISK0_INQUIRY_DATA { + U8 VendorID[8]; + U8 ProductID[16]; + U8 ProductRevLevel[4]; + U8 SerialNum[32]; +}; + +struct LeapioraidRaidPDP0_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U16 DevHandle; + U8 Reserved1; + U8 PhysDiskNum; + struct LEAPIORAID_RAIDPHYSDISK0_SETTINGS PhysDiskSettings; + U32 Reserved2; + struct LEAPIORAID_RAIDPHYSDISK0_INQUIRY_DATA InquiryData; + U32 Reserved3; + U8 PhysDiskState; + U8 OfflineReason; + U8 IncompatibleReason; + U8 PhysDiskAttributes; + U32 PhysDiskStatusFlags; + U64 DeviceMaxLBA; + U64 HostMaxLBA; + U64 CoercedMaxLBA; + U16 BlockSize; + U16 Reserved5; + U32 Reserved6; +}; + +#define LEAPIORAID_RAID_PD_STATE_NOT_CONFIGURED (0x00) +#define LEAPIORAID_RAID_PD_STATE_NOT_COMPATIBLE (0x01) +#define LEAPIORAID_RAID_PD_STATE_OFFLINE (0x02) +#define LEAPIORAID_RAID_PD_STATE_ONLINE (0x03) +#define LEAPIORAID_RAID_PD_STATE_HOT_SPARE (0x04) +#define LEAPIORAID_RAID_PD_STATE_DEGRADED (0x05) +#define LEAPIORAID_RAID_PD_STATE_REBUILDING (0x06) +#define LEAPIORAID_RAID_PD_STATE_OPTIMAL (0x07) + +#define LEAPIORAID_SAS_NEG_LINK_RATE_MASK_PHYSICAL (0x0F) +#define LEAPIORAID_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE (0x00) +#define LEAPIORAID_SAS_NEG_LINK_RATE_PHY_DISABLED (0x01) +#define LEAPIORAID_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED (0x02) +#define LEAPIORAID_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE (0x03) +#define LEAPIORAID_SAS_NEG_LINK_RATE_PORT_SELECTOR (0x04) +#define LEAPIORAID_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS (0x05) +#define LEAPIORAID_SAS_NEG_LINK_RATE_1_5 (0x08) +#define LEAPIORAID_SAS_NEG_LINK_RATE_3_0 (0x09) +#define LEAPIORAID_SAS_NEG_LINK_RATE_6_0 (0x0A) +#define LEAPIORAID_SAS_NEG_LINK_RATE_12_0 (0x0B) + +#define LEAPIORAID_SAS_PHYINFO_VIRTUAL_PHY (0x00001000) + +#define LEAPIORAID_SAS_PRATE_MIN_RATE_MASK (0x0F) +#define LEAPIORAID_SAS_HWRATE_MIN_RATE_MASK (0x0F) + +struct LEAPIORAID_SAS_IO_UNIT0_PHY_DATA { + U8 Port; + U8 PortFlags; + U8 PhyFlags; + U8 NegotiatedLinkRate; + U32 ControllerPhyDeviceInfo; + U16 AttachedDevHandle; + U16 ControllerDevHandle; + U32 DiscoveryStatus; + U32 Reserved; +}; + +struct LeapioraidSasIOUnitP0_t { + struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header; + U32 Reserved1; + U8 NumPhys; + U8 Reserved2; + U16 Reserved3; + struct LEAPIORAID_SAS_IO_UNIT0_PHY_DATA PhyData[]; +}; + +#define LEAPIORAID_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS (0x08) +#define LEAPIORAID_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG (0x01) +#define LEAPIORAID_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED (0x10) +#define LEAPIORAID_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08) + +struct LEAPIORAID_SAS_IO_UNIT1_PHY_DATA { + U8 Port; + U8 PortFlags; + U8 PhyFlags; + U8 MaxMinLinkRate; + U32 ControllerPhyDeviceInfo; + U16 MaxTargetPortConnectTime; + U16 Reserved1; +}; + +struct LeapioraidSasIOUnitP1_t { + struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header; + U16 ControlFlags; + U16 SASNarrowMaxQueueDepth; + U16 AdditionalControlFlags; + U16 SASWideMaxQueueDepth; + U8 NumPhys; + U8 SATAMaxQDepth; + U8 ReportDeviceMissingDelay; + U8 IODeviceMissingDelay; + struct LEAPIORAID_SAS_IO_UNIT1_PHY_DATA PhyData[]; +}; + +#define LEAPIORAID_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK (0x7F) +#define LEAPIORAID_SASIOUNIT1_REPORT_MISSING_UNIT_16 (0x80) +#define LEAPIORAID_SASIOUNIT1_PHYFLAGS_ZONING_ENABLE (0x10) +#define LEAPIORAID_SASIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08) + +struct LeapioraidExpanderP0_t { + struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header; + U8 PhysicalPort; + U8 ReportGenLength; + U16 EnclosureHandle; + U64 SASAddress; + U32 DiscoveryStatus; + U16 DevHandle; + U16 ParentDevHandle; + U16 ExpanderChangeCount; + U16 ExpanderRouteIndexes; + U8 NumPhys; + U8 SASLevel; + U16 Flags; + U16 STPBusInactivityTimeLimit; + U16 STPMaxConnectTimeLimit; + U16 STP_SMP_NexusLossTime; + U16 MaxNumRoutedSasAddresses; + U64 ActiveZoneManagerSASAddress; + U16 ZoneLockInactivityLimit; + U16 Reserved1; + U8 TimeToReducedFunc; + U8 InitialTimeToReducedFunc; + U8 MaxReducedFuncTime; + U8 Reserved2; +}; + +struct LeapioraidExpanderP1_t { + struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header; + U8 PhysicalPort; + U8 Reserved1; + U16 Reserved2; + U8 NumPhys; + U8 Phy; + U16 NumTableEntriesProgrammed; + U8 ProgrammedLinkRate; + U8 HwLinkRate; + U16 AttachedDevHandle; + U32 PhyInfo; + U32 AttachedDeviceInfo; + U16 ExpanderDevHandle; + U8 ChangeCount; + U8 NegotiatedLinkRate; + U8 PhyIdentifier; + U8 AttachedPhyIdentifier; + U8 Reserved3; + U8 DiscoveryInfo; + U32 AttachedPhyInfo; + U8 ZoneGroup; + U8 SelfConfigStatus; + U16 Reserved4; +}; + +struct LeapioraidSasDevP0_t { + struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header; + U16 Slot; + U16 EnclosureHandle; + U64 SASAddress; + U16 ParentDevHandle; + U8 PhyNum; + U8 AccessStatus; + U16 DevHandle; + U8 AttachedPhyIdentifier; + U8 ZoneGroup; + U32 DeviceInfo; + U16 Flags; + U8 PhysicalPort; + U8 MaxPortConnections; + U64 DeviceName; + U8 PortGroups; + U8 DmaGroup; + U8 ControlGroup; + U8 EnclosureLevel; + U8 ConnectorName[4]; + U32 Reserved3; +}; + +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_NO_ERRORS (0x00) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED (0x01) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED (0x02) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT (0x03) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION (0x04) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE (0x05) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE (0x06) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED (0x07) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN (0x10) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x11) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_DIAG (0x12) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION (0x13) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER (0x14) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_PIO_SN (0x15) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN (0x16) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN (0x17) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION (0x18) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE (0x19) +#define LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_MAX (0x1F) +#define LEAPIORAID_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE (0x2000) +#define LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY (0x0400) +#define LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE (0x0200) +#define LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED (0x0040) +#define LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020) +#define LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010) +#define LEAPIORAID_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID (0x0002) +#define LEAPIORAID_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001) + +struct LeapioraidSasPhyP0_t { + struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header; + U16 OwnerDevHandle; + U16 Reserved1; + U16 AttachedDevHandle; + U8 AttachedPhyIdentifier; + U8 Reserved2; + U32 AttachedPhyInfo; + U8 ProgrammedLinkRate; + U8 HwLinkRate; + U8 ChangeCount; + U8 Flags; + U32 PhyInfo; + U8 NegotiatedLinkRate; + U8 Reserved3; + U16 Reserved4; +}; + +struct LeapioraidSasPhyP1_t { + struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header; + U32 Reserved1; + U32 InvalidDwordCount; + U32 RunningDisparityErrorCount; + U32 LossDwordSynchCount; + U32 PhyResetProblemCount; +}; + +struct LeapioraidSasEncP0_t { + struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header; + U32 Reserved1; + U64 EnclosureLogicalID; + U16 Flags; + U16 EnclosureHandle; + U16 NumSlots; + U16 StartSlot; + U8 ChassisSlot; + U8 EnclosureLevel; + U16 SEPDevHandle; + U8 OEMRD; + U8 Reserved1a; + U16 Reserved2; + U32 Reserved3; +}; + +#define LEAPIORAID_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020) + +struct LEAPIORAID_RAIDCONFIG0_CONFIG_ELEMENT { + U16 ElementFlags; + U16 VolDevHandle; + U8 HotSparePool; + U8 PhysDiskNum; + U16 PhysDiskDevHandle; +}; + +#define LEAPIORAID_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE (0x000F) +#define LEAPIORAID_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT (0x0001) +#define LEAPIORAID_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT (0x0002) +#define LEAPIORAID_RAIDCONFIG0_EFLAGS_OCE_ELEMENT (0x0003) + +struct LeapioraidRaidCfgP0_t { + struct LEAPIORAID_CONFIG_EXTENDED_PAGE_HEADER Header; + U8 NumHotSpares; + U8 NumPhysDisks; + U8 NumVolumes; + U8 ConfigNum; + U32 Flags; + U8 ConfigGUID[24]; + U32 Reserved1; + U8 NumElements; + U8 Reserved2; + U16 Reserved3; + struct LEAPIORAID_RAIDCONFIG0_CONFIG_ELEMENT ConfigElement[]; +}; + +struct LeapioraidFWImgHeader_t { + U32 Signature; + U32 Signature0; + U32 Signature1; + U32 Signature2; + union LEAPIORAID_VERSION_UNION LEAPIOVersion; + union LEAPIORAID_VERSION_UNION FWVersion; + union LEAPIORAID_VERSION_UNION NVDATAVersion; + union LEAPIORAID_VERSION_UNION PackageVersion; + U16 VendorID; + U16 ProductID; + U16 ProtocolFlags; + U16 Reserved26; + U32 IOCCapabilities; + U32 ImageSize; + U32 NextImageHeaderOffset; + U32 Checksum; + U32 Reserved38; + U32 Reserved3C; + U32 Reserved40; + U32 Reserved44; + U32 Reserved48; + U32 Reserved4C; + U32 Reserved50; + U32 Reserved54; + U32 Reserved58; + U32 Reserved5C; + U32 BootFlags; + U32 FirmwareVersionNameWhat; + U8 FirmwareVersionName[32]; + U32 VendorNameWhat; + U8 VendorName[32]; + U32 PackageNameWhat; + U8 PackageName[32]; + U32 ReservedD0; + U32 ReservedD4; + U32 ReservedD8; + U32 ReservedDC; + U32 ReservedE0; + U32 ReservedE4; + U32 ReservedE8; + U32 ReservedEC; + U32 ReservedF0; + U32 ReservedF4; + U32 ReservedF8; + U32 ReservedFC; +}; + +struct LEAPIORAID_HASH_EXCLUSION_FORMAT { + U32 Offset; + U32 Size; +}; + +struct LeapioraidComptImgHeader_t { + U32 Signature0; + U32 LoadAddress; + U32 DataSize; + U32 StartAddress; + U32 Signature1; + U32 FlashOffset; + U32 FlashSize; + U32 VersionStringOffset; + U32 BuildDateStringOffset; + U32 BuildTimeStringOffset; + U32 EnvironmentVariableOffset; + U32 ApplicationSpecific; + U32 Signature2; + U32 HeaderSize; + U32 Crc; + U8 NotFlashImage; + U8 Compressed; + U16 Reserved3E; + U32 SecondaryFlashOffset; + U32 Reserved44; + U32 Reserved48; + union LEAPIORAID_VERSION_UNION RMCInterfaceVersion; + union LEAPIORAID_VERSION_UNION Reserved50; + union LEAPIORAID_VERSION_UNION FWVersion; + union LEAPIORAID_VERSION_UNION NvdataVersion; + struct LEAPIORAID_HASH_EXCLUSION_FORMAT HashExclusion[4]; + U32 NextImageHeaderOffset; + U32 Reserved80[32]; +}; + +struct LEAPIORAID_SCSI_IO_CDB_EEDP32 { + U8 CDB[20]; + __be32 PrimaryReferenceTag; + U16 PrimaryApplicationTag; + U16 PrimaryApplicationTagMask; + U32 TransferLength; +}; + +union LEAPIO_SCSI_IO_CDB_UNION { + U8 CDB32[32]; + struct LEAPIORAID_SCSI_IO_CDB_EEDP32 EEDP32; + struct LEAPIORAID_SGE_SIMPLE_UNION SGE; +}; + +struct LeapioSCSIIOReq_t { + U16 DevHandle; + U8 ChainOffset; + U8 Function; + U16 Reserved1; + U8 Reserved2; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved3; + U32 SenseBufferLowAddress; + U16 SGLFlags; + U8 SenseBufferLength; + U8 Reserved4; + U8 SGLOffset0; + U8 SGLOffset1; + U8 SGLOffset2; + U8 SGLOffset3; + U32 SkipCount; + U32 DataLength; + U32 BidirectionalDataLength; + U16 IoFlags; + U16 EEDPFlags; + U32 EEDPBlockSize; + U32 SecondaryReferenceTag; + U16 SecondaryApplicationTag; + U16 ApplicationTagTranslationMask; + U8 LUN[8]; + U32 Control; + union LEAPIO_SCSI_IO_CDB_UNION CDB; + union LEAPIORAID_SGE_IO_UNION SGL; +}; + +#define LEAPIORAID_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR (0x00) + +#define LEAPIORAID_SCSIIO_CONTROL_ADDCDBLEN_SHIFT (26) +#define LEAPIORAID_SCSIIO_CONTROL_NODATATRANSFER (0x00000000) +#define LEAPIORAID_SCSIIO_CONTROL_WRITE (0x01000000) +#define LEAPIORAID_SCSIIO_CONTROL_READ (0x02000000) +#define LEAPIORAID_SCSIIO_CONTROL_BIDIRECTIONAL (0x03000000) +#define LEAPIORAID_SCSIIO_CONTROL_CMDPRI_SHIFT (11) +#define LEAPIORAID_SCSIIO_CONTROL_SIMPLEQ (0x00000000) +#define LEAPIORAID_SCSIIO_CONTROL_ORDEREDQ (0x00000200) +#define LEAPIORAID_SCSIIO_CONTROL_TLR_ON (0x00000040) + +union LEAPIORAID_SCSI_IO_CDB_UNION { + U8 CDB32[32]; + struct LEAPIORAID_SCSI_IO_CDB_EEDP32 EEDP32; + struct LEAPIORAID_IEEE_SGE_SIMPLE64 SGE; +}; + +struct LeapioraidSCSIIOReq_t { + U16 DevHandle; + U8 ChainOffset; + U8 Function; + U16 Reserved1; + U8 Reserved2; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved3; + U32 SenseBufferLowAddress; + U8 DMAFlags; + U8 Reserved5; + U8 SenseBufferLength; + U8 Reserved4; + U8 SGLOffset0; + U8 SGLOffset1; + U8 SGLOffset2; + U8 SGLOffset3; + U32 SkipCount; + U32 DataLength; + U32 BidirectionalDataLength; + U16 IoFlags; + U16 EEDPFlags; + U16 EEDPBlockSize; + U16 Reserved6; + U32 SecondaryReferenceTag; + U16 SecondaryApplicationTag; + U16 ApplicationTagTranslationMask; + U8 LUN[8]; + U32 Control; + union LEAPIORAID_SCSI_IO_CDB_UNION CDB; + union LEAPIORAID_IEEE_SGE_IO_UNION SGL; +}; + +struct LeapioraidSCSIIORep_t { + U16 DevHandle; + U8 MsgLength; + U8 Function; + U16 Reserved1; + U8 Reserved2; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved3; + U8 SCSIStatus; + U8 SCSIState; + U16 IOCStatus; + U32 IOCLogInfo; + U32 TransferCount; + U32 SenseCount; + U32 ResponseInfo; + U16 TaskTag; + U16 SCSIStatusQualifier; + U32 BidirectionalTransferCount; + U32 EEDPErrorOffset; + U16 EEDPObservedAppTag; + U16 EEDPObservedGuard; + U32 EEDPObservedRefTag; +}; + +#define LEAPIORAID_SCSI_STATUS_GOOD (0x00) +#define LEAPIORAID_SCSI_STATUS_CHECK_CONDITION (0x02) +#define LEAPIORAID_SCSI_STATUS_CONDITION_MET (0x04) +#define LEAPIORAID_SCSI_STATUS_BUSY (0x08) +#define LEAPIORAID_SCSI_STATUS_INTERMEDIATE (0x10) +#define LEAPIORAID_SCSI_STATUS_INTERMEDIATE_CONDMET (0x14) +#define LEAPIORAID_SCSI_STATUS_RESERVATION_CONFLICT (0x18) +#define LEAPIORAID_SCSI_STATUS_COMMAND_TERMINATED (0x22) +#define LEAPIORAID_SCSI_STATUS_TASK_SET_FULL (0x28) +#define LEAPIORAID_SCSI_STATUS_ACA_ACTIVE (0x30) +#define LEAPIORAID_SCSI_STATUS_TASK_ABORTED (0x40) +#define LEAPIORAID_SCSI_STATE_RESPONSE_INFO_VALID (0x10) +#define LEAPIORAID_SCSI_STATE_TERMINATED (0x08) +#define LEAPIORAID_SCSI_STATE_NO_SCSI_STATUS (0x04) +#define LEAPIORAID_SCSI_STATE_AUTOSENSE_FAILED (0x02) +#define LEAPIORAID_SCSI_STATE_AUTOSENSE_VALID (0x01) + +struct LeapioraidSCSITmgReq_t { + U16 DevHandle; + U8 ChainOffset; + U8 Function; + U8 Reserved1; + U8 TaskType; + U8 Reserved2; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved3; + U8 LUN[8]; + U32 Reserved4[7]; + U16 TaskMID; + U16 Reserved5; +}; + +#define LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01) +#define LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02) +#define LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03) +#define LEAPIORAID_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05) +#define LEAPIORAID_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07) +#define LEAPIORAID_SCSITASKMGMT_MSGFLAGS_LINK_RESET (0x00) + +struct LeapioraidSCSITmgRep_t { + U16 DevHandle; + U8 MsgLength; + U8 Function; + U8 ResponseCode; + U8 TaskType; + U8 Reserved1; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved2; + U16 Reserved3; + U16 IOCStatus; + U32 IOCLogInfo; + U32 TerminationCount; + U32 ResponseInfo; +}; + +#define LEAPIORAID_SCSITASKMGMT_RSP_TM_COMPLETE (0x00) +#define LEAPIORAID_SCSITASKMGMT_RSP_INVALID_FRAME (0x02) +#define LEAPIORAID_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED (0x04) +#define LEAPIORAID_SCSITASKMGMT_RSP_TM_FAILED (0x05) +#define LEAPIORAID_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08) +#define LEAPIORAID_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09) +#define LEAPIORAID_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80) + +struct LeapioraidSepReq_t { + U16 DevHandle; + U8 ChainOffset; + U8 Function; + U8 Action; + U8 Flags; + U8 Reserved1; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved2; + U32 SlotStatus; + U32 Reserved3; + U32 Reserved4; + U32 Reserved5; + U16 Slot; + U16 EnclosureHandle; +}; + +#define LEAPIORAID_SEP_REQ_ACTION_WRITE_STATUS (0x00) +#define LEAPIORAID_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS (0x00) +#define LEAPIORAID_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS (0x01) +#define LEAPIORAID_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT (0x00000040) + +struct LeapioraidSepRep_t { + U16 DevHandle; + U8 MsgLength; + U8 Function; + U8 Action; + U8 Flags; + U8 Reserved1; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved2; + U16 Reserved3; + U16 IOCStatus; + U32 IOCLogInfo; + U32 SlotStatus; + U32 Reserved4; + U16 Slot; + U16 EnclosureHandle; +}; + +struct LeapioraidIOCInitReq_t { + U8 WhoInit; + U8 Reserved1; + U8 ChainOffset; + U8 Function; + U16 Reserved2; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; + U16 MsgVersion; + U16 HeaderVersion; + U32 Reserved5; + U16 ConfigurationFlags; + U8 HostPageSize; + U8 HostMSIxVectors; + U16 Reserved8; + U16 SystemRequestFrameSize; + U16 ReplyDescriptorPostQueueDepth; + U16 ReplyFreeQueueDepth; + U32 SenseBufferAddressHigh; + U32 SystemReplyAddressHigh; + U64 SystemRequestFrameBaseAddress; + U64 ReplyDescriptorPostQueueAddress; + U64 ReplyFreeQueueAddress; + U64 TimeStamp; +}; + +#define LEAPIORAID_WHOINIT_HOST_DRIVER (0x04) +#define LEAPIORAID_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE (0x01) + +struct LeapioraidIOCInitRDPQArrayEntry { + U64 RDPQBaseAddress; + U32 Reserved1; + U32 Reserved2; +}; + +struct LeapioraidIOCInitRep_t { + U8 WhoInit; + U8 Reserved1; + U8 MsgLength; + U8 Function; + U16 Reserved2; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; + U16 Reserved5; + U16 IOCStatus; + U32 IOCLogInfo; +}; + +struct LeapioraidIOCLogReq_t { + U16 Reserved1; + U8 ChainOffset; + U8 Function; + U16 Reserved2; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; + U64 BufAddr; + U32 BufSize; +}; + +struct LeapioraidIOCLogRep_t { + U16 Reserved1; + U8 MsgLength; + U8 Function; + U16 Reserved2; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; + U16 Reserved5; + U16 IOCStatus; + U32 IOCLogInfo; +}; + +struct LeapioraidIOCFactsReq_t { + U16 Reserved1; + U8 ChainOffset; + U8 Function; + U16 Reserved2; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; +}; + +struct LeapioraidIOCFactsRep_t { + U16 MsgVersion; + U8 MsgLength; + U8 Function; + U16 HeaderVersion; + U8 IOCNumber; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved1; + U16 IOCExceptions; + U16 IOCStatus; + U32 IOCLogInfo; + U8 MaxChainDepth; + U8 WhoInit; + U8 NumberOfPorts; + U8 MaxMSIxVectors; + U16 RequestCredit; + U16 ProductID; + U32 IOCCapabilities; + union LEAPIORAID_VERSION_UNION FWVersion; + U16 IOCRequestFrameSize; + U16 IOCMaxChainSegmentSize; + U16 MaxInitiators; + U16 MaxTargets; + U16 MaxSasExpanders; + U16 MaxEnclosures; + U16 ProtocolFlags; + U16 HighPriorityCredit; + U16 MaxReplyDescriptorPostQueueDepth; + U8 ReplyFrameSize; + U8 MaxVolumes; + U16 MaxDevHandle; + U16 MaxPersistentEntries; + U16 MinDevHandle; + U8 CurrentHostPageSize; + U8 Reserved4; + U8 SGEModifierMask; + U8 SGEModifierValue; + U8 SGEModifierShift; + U8 Reserved5; +}; + +#define LEAPIORAID_IOCFACTS_CAPABILITY_ATOMIC_REQ (0x00080000) +#define LEAPIORAID_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE (0x00040000) +#define LEAPIORAID_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000) +#define LEAPIORAID_IOCFACTS_CAPABILITY_EVENT_REPLAY (0x00002000) +#define LEAPIORAID_IOCFACTS_CAPABILITY_INTEGRATED_RAID (0x00001000) +#define LEAPIORAID_IOCFACTS_CAPABILITY_TLR (0x00000800) +#define LEAPIORAID_IOCFACTS_CAPABILITY_MULTICAST (0x00000100) +#define LEAPIORAID_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET (0x00000080) +#define LEAPIORAID_IOCFACTS_CAPABILITY_EEDP (0x00000040) +#define LEAPIORAID_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING (0x00000004) +#define LEAPIORAID_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002) +#define LEAPIORAID_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001) + +struct LeapioraidPortFactsReq_t { + U16 Reserved1; + U8 ChainOffset; + U8 Function; + U16 Reserved2; + U8 PortNumber; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved3; +}; + +struct LeapioraidPortFactsRep_t { + U16 Reserved1; + U8 MsgLength; + U8 Function; + U16 Reserved2; + U8 PortNumber; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved3; + U16 Reserved4; + U16 IOCStatus; + U32 IOCLogInfo; + U8 Reserved5; + U8 PortType; + U16 Reserved6; + U16 MaxPostedCmdBuffers; + U16 Reserved7; +}; + +struct LeapioraidPortEnableReq_t { + U16 Reserved1; + U8 ChainOffset; + U8 Function; + U8 Reserved2; + U8 PortFlags; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; +}; + +struct LeapioraidPortEnableRep_t { + U16 Reserved1; + U8 MsgLength; + U8 Function; + U8 Reserved2; + U8 PortFlags; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; + U16 Reserved5; + U16 IOCStatus; + U32 IOCLogInfo; +}; + +#define LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS (4) +struct LeapioraidEventNotificationReq_t { + U16 Reserved1; + U8 ChainOffset; + U8 Function; + U16 Reserved2; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; + U32 Reserved5; + U32 Reserved6; + U32 EventMasks[LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS]; + U16 SASBroadcastPrimitiveMasks; + U16 SASNotifyPrimitiveMasks; + U32 Reserved8; +}; + +struct LeapioraidEventNotificationRep_t { + U16 EventDataLength; + U8 MsgLength; + U8 Function; + U16 Reserved1; + U8 AckRequired; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved2; + U16 Reserved3; + U16 IOCStatus; + U32 IOCLogInfo; + U16 Event; + U16 Reserved4; + U32 EventContext; + U32 EventData[]; +}; + +#define LEAPIORAID_EVENT_NOTIFICATION_ACK_REQUIRED (0x01) +#define LEAPIORAID_EVENT_LOG_DATA (0x0001) +#define LEAPIORAID_EVENT_STATE_CHANGE (0x0002) +#define LEAPIORAID_EVENT_HARD_RESET_RECEIVED (0x0005) +#define LEAPIORAID_EVENT_EVENT_CHANGE (0x000A) +#define LEAPIORAID_EVENT_SAS_DEVICE_STATUS_CHANGE (0x000F) +#define LEAPIORAID_EVENT_IR_OPERATION_STATUS (0x0014) +#define LEAPIORAID_EVENT_SAS_DISCOVERY (0x0016) +#define LEAPIORAID_EVENT_SAS_BROADCAST_PRIMITIVE (0x0017) +#define LEAPIORAID_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE (0x0018) +#define LEAPIORAID_EVENT_SAS_INIT_TABLE_OVERFLOW (0x0019) +#define LEAPIORAID_EVENT_SAS_TOPOLOGY_CHANGE_LIST (0x001C) +#define LEAPIORAID_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE (0x001D) +#define LEAPIORAID_EVENT_IR_VOLUME (0x001E) +#define LEAPIORAID_EVENT_IR_PHYSICAL_DISK (0x001F) +#define LEAPIORAID_EVENT_IR_CONFIGURATION_CHANGE_LIST (0x0020) +#define LEAPIORAID_EVENT_LOG_ENTRY_ADDED (0x0021) +#define LEAPIORAID_EVENT_SAS_QUIESCE (0x0025) +#define LEAPIORAID_EVENT_TEMP_THRESHOLD (0x0027) +#define LEAPIORAID_EVENT_SAS_DEVICE_DISCOVERY_ERROR (0x0035) + +struct LeapioraidEventDataSasDeviceStatusChange_t { + U16 TaskTag; + U8 ReasonCode; + U8 PhysicalPort; + U8 ASC; + U8 ASCQ; + U16 DevHandle; + U32 Reserved2; + U64 SASAddress; + U8 LUN[8]; +}; + +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_SMART_DATA (0x05) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED (0x07) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET (0x08) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL (0x09) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION (0x0D) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET (0x0E) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL (0x0F) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE (0x10) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY (0x11) +#define LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY (0x12) + +struct LeapioraidEventDataIrOpStatus_t { + U16 VolDevHandle; + U16 Reserved1; + U8 RAIDOperation; + U8 PercentComplete; + U16 Reserved2; + U32 ElapsedSeconds; +}; + +#define LEAPIORAID_EVENT_IR_RAIDOP_RESYNC (0x00) +#define LEAPIORAID_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION (0x01) +#define LEAPIORAID_EVENT_IR_RAIDOP_CONSISTENCY_CHECK (0x02) +#define LEAPIORAID_EVENT_IR_RAIDOP_BACKGROUND_INIT (0x03) +#define LEAPIORAID_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT (0x04) + +struct LeapioraidEventDataIrVol_t { + U16 VolDevHandle; + U8 ReasonCode; + U8 Reserved1; + U32 NewValue; + U32 PreviousValue; +}; + +#define LEAPIORAID_EVENT_IR_VOLUME_RC_STATE_CHANGED (0x03) +struct LeapioraidEventDataIrPhyDisk_t { + U16 Reserved1; + U8 ReasonCode; + U8 PhysDiskNum; + U16 PhysDiskDevHandle; + U16 Reserved2; + U16 Slot; + U16 EnclosureHandle; + U32 NewValue; + U32 PreviousValue; +}; + +#define LEAPIORAID_EVENT_IR_PHYSDISK_RC_STATE_CHANGED (0x03) + +struct LeapioraidEventIrCfgEle_t { + U16 ElementFlags; + U16 VolDevHandle; + U8 ReasonCode; + U8 PhysDiskNum; + U16 PhysDiskDevHandle; +}; + +#define LEAPIORAID_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK (0x000F) +#define LEAPIORAID_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT (0x0000) +#define LEAPIORAID_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT (0x0001) +#define LEAPIORAID_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT (0x0002) +#define LEAPIORAID_EVENT_IR_CHANGE_RC_ADDED (0x01) +#define LEAPIORAID_EVENT_IR_CHANGE_RC_REMOVED (0x02) +#define LEAPIORAID_EVENT_IR_CHANGE_RC_NO_CHANGE (0x03) +#define LEAPIORAID_EVENT_IR_CHANGE_RC_HIDE (0x04) +#define LEAPIORAID_EVENT_IR_CHANGE_RC_UNHIDE (0x05) +#define LEAPIORAID_EVENT_IR_CHANGE_RC_VOLUME_CREATED (0x06) +#define LEAPIORAID_EVENT_IR_CHANGE_RC_VOLUME_DELETED (0x07) +#define LEAPIORAID_EVENT_IR_CHANGE_RC_PD_CREATED (0x08) +#define LEAPIORAID_EVENT_IR_CHANGE_RC_PD_DELETED (0x09) + +struct LeapioraidEventDataIrCfgChangeList_t { + U8 NumElements; + U8 Reserved1; + U8 Reserved2; + U8 ConfigNum; + U32 Flags; + struct LeapioraidEventIrCfgEle_t ConfigElement[]; +}; + +#define LEAPIORAID_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG (0x00000001) +struct LeapioraidEventDataSasDiscovery_t { + U8 Flags; + U8 ReasonCode; + U8 PhysicalPort; + U8 Reserved1; + U32 DiscoveryStatus; +}; + +#define LEAPIORAID_EVENT_SAS_DISC_RC_STARTED (0x01) + +struct LeapioraidEventDataSasBroadcastPrimitive_t { + U8 PhyNum; + U8 Port; + U8 PortWidth; + U8 Primitive; +}; + +#define LEAPIORAID_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT (0x04) + +struct LEAPIORAID_EVENT_SAS_TOPO_PHY_ENTRY { + U16 AttachedDevHandle; + U8 LinkRate; + U8 PhyStatus; +}; + +struct LeapioraidEventDataSasTopoChangeList_t { + U16 EnclosureHandle; + U16 ExpanderDevHandle; + U8 NumPhys; + U8 Reserved1; + U16 Reserved2; + U8 NumEntries; + U8 StartPhyNum; + U8 ExpStatus; + U8 PhysicalPort; + struct LEAPIORAID_EVENT_SAS_TOPO_PHY_ENTRY PHY[]; +}; + +#define LEAPIORAID_EVENT_SAS_TOPO_ES_ADDED (0x01) +#define LEAPIORAID_EVENT_SAS_TOPO_ES_NOT_RESPONDING (0x02) +#define LEAPIORAID_EVENT_SAS_TOPO_ES_RESPONDING (0x03) +#define LEAPIORAID_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING (0x04) +#define LEAPIORAID_EVENT_SAS_TOPO_PHYSTATUS_VACANT (0x80) +#define LEAPIORAID_EVENT_SAS_TOPO_RC_MASK (0x0F) +#define LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_ADDED (0x01) +#define LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING (0x02) +#define LEAPIORAID_EVENT_SAS_TOPO_RC_PHY_CHANGED (0x03) +#define LEAPIORAID_EVENT_SAS_TOPO_RC_NO_CHANGE (0x04) +#define LEAPIORAID_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING (0x05) + +struct LeapioraidEventDataSasEnclDevStatusChange_t { + U16 EnclosureHandle; + U8 ReasonCode; + U8 PhysicalPort; + U64 EnclosureLogicalID; + U16 NumSlots; + U16 StartSlot; + U32 PhyBits; +}; + +#define LEAPIORAID_EVENT_SAS_ENCL_RC_ADDED (0x01) +#define LEAPIORAID_EVENT_SAS_ENCL_RC_NOT_RESPONDING (0x02) + +struct LeapioraidEventDataSasDeviceDiscoveryError_t { + U16 DevHandle; + U8 ReasonCode; + U8 PhysicalPort; + U32 Reserved1[2]; + U64 SASAddress; + U32 Reserved2[2]; +}; + +#define LEAPIORAID_EVENT_SAS_DISC_ERR_SMP_FAILED (0x01) +#define LEAPIORAID_EVENT_SAS_DISC_ERR_SMP_TIMEOUT (0x02) + +struct LeapioraidEventAckReq_t { + U16 Reserved1; + U8 ChainOffset; + U8 Function; + U16 Reserved2; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; + U16 Event; + U16 Reserved5; + U32 EventContext; +}; + +struct LeapioraidFWUploadReq_t { + U8 ImageType; + U8 Reserved1; + U8 ChainOffset; + U8 Function; + U16 Reserved2; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; + U32 Reserved5; + U32 Reserved6; + U32 Reserved7; + U32 ImageOffset; + U32 ImageSize; + union LEAPIORAID_IEEE_SGE_IO_UNION SGL; +}; + +struct LeapioraidFWUploadRep_t { + U8 ImageType; + U8 Reserved1; + U8 MsgLength; + U8 Function; + U16 Reserved2; + U8 Reserved3; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved4; + U16 Reserved5; + U16 IOCStatus; + U32 IOCLogInfo; + U32 ActualImageSize; +}; + +struct LeapioraidIoUnitControlReq_t { + U8 Operation; + U8 Reserved1; + U8 ChainOffset; + U8 Function; + U16 DevHandle; + U8 IOCParameter; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved3; + U16 Reserved4; + U8 PhyNum; + U8 PrimFlags; + U32 Primitive; + U8 LookupMethod; + U8 Reserved5; + U16 SlotNumber; + U64 LookupAddress; + U32 IOCParameterValue; + U32 IOCParameterValue2; + U32 Reserved8; +}; + +#define LEAPIORAID_CTRL_OP_REMOVE_DEVICE (0x0D) + +struct LeapioraidIoUnitControlRep_t { + U8 Operation; + U8 Reserved1; + U8 MsgLength; + U8 Function; + U16 DevHandle; + U8 IOCParameter; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved3; + U16 Reserved4; + U16 IOCStatus; + U32 IOCLogInfo; +}; + +struct LEAPIORAID_RAID_ACTION_RATE_DATA { + U8 RateToChange; + U8 RateOrMode; + U16 DataScrubDuration; +}; + +struct LEAPIORAID_RAID_ACTION_START_RAID_FUNCTION { + U8 RAIDFunction; + U8 Flags; + U16 Reserved1; +}; + +struct LEAPIORAID_RAID_ACTION_STOP_RAID_FUNCTION { + U8 RAIDFunction; + U8 Flags; + U16 Reserved1; +}; + +struct LEAPIORAID_RAID_ACTION_HOT_SPARE { + U8 HotSparePool; + U8 Reserved1; + U16 DevHandle; +}; + +struct LEAPIORAID_RAID_ACTION_FW_UPDATE_MODE { + U8 Flags; + U8 DeviceFirmwareUpdateModeTimeout; + U16 Reserved1; +}; + +union LEAPIORAID_RAID_ACTION_DATA { + U32 Word; + struct LEAPIORAID_RAID_ACTION_RATE_DATA Rates; + struct LEAPIORAID_RAID_ACTION_START_RAID_FUNCTION StartRaidFunction; + struct LEAPIORAID_RAID_ACTION_STOP_RAID_FUNCTION StopRaidFunction; + struct LEAPIORAID_RAID_ACTION_HOT_SPARE HotSpare; + struct LEAPIORAID_RAID_ACTION_FW_UPDATE_MODE FwUpdateMode; +}; + +struct LeapioraidRaidActionReq_t { + U8 Action; + U8 Reserved1; + U8 ChainOffset; + U8 Function; + U16 VolDevHandle; + U8 PhysDiskNum; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved2; + U32 Reserved3; + union LEAPIORAID_RAID_ACTION_DATA ActionDataWord; + struct LEAPIORAID_SGE_SIMPLE_UNION ActionDataSGE; +}; + +struct LEAPIORAID_RAID_VOL_INDICATOR { + U64 TotalBlocks; + U64 BlocksRemaining; + U32 Flags; + U32 ElapsedSeconds; +}; + +struct LEAPIORAID_RAID_COMPATIBILITY_RESULT_STRUCT { + U8 State; + U8 Reserved1; + U16 Reserved2; + U32 GenericAttributes; + U32 OEMSpecificAttributes; + U32 Reserved3; + U32 Reserved4; +}; + +union LEAPIORAID_RAID_ACTION_REPLY_DATA { + U32 Word[6]; + struct LEAPIORAID_RAID_VOL_INDICATOR RaidVolumeIndicator; + U16 VolDevHandle; + U8 VolumeState; + U8 PhysDiskNum; + struct LEAPIORAID_RAID_COMPATIBILITY_RESULT_STRUCT RaidCompatibilityResult; +}; + +struct LeapioraidRaidActionRep_t { + U8 Action; + U8 Reserved1; + U8 MsgLength; + U8 Function; + U16 VolDevHandle; + U8 PhysDiskNum; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved2; + U16 Reserved3; + U16 IOCStatus; + U32 IOCLogInfo; + union LEAPIORAID_RAID_ACTION_REPLY_DATA ActionData; +}; + +#define LEAPIORAID_SAS_DEVICE_INFO_SEP (0x00004000) +#define LEAPIORAID_SAS_DEVICE_INFO_ATAPI_DEVICE (0x00002000) +#define LEAPIORAID_SAS_DEVICE_INFO_SSP_TARGET (0x00000400) +#define LEAPIORAID_SAS_DEVICE_INFO_STP_TARGET (0x00000200) +#define LEAPIORAID_SAS_DEVICE_INFO_SMP_TARGET (0x00000100) +#define LEAPIORAID_SAS_DEVICE_INFO_SATA_DEVICE (0x00000080) +#define LEAPIORAID_SAS_DEVICE_INFO_SSP_INITIATOR (0x00000040) +#define LEAPIORAID_SAS_DEVICE_INFO_STP_INITIATOR (0x00000020) +#define LEAPIORAID_SAS_DEVICE_INFO_SMP_INITIATOR (0x00000010) +#define LEAPIORAID_SAS_DEVICE_INFO_SATA_HOST (0x00000008) +#define LEAPIORAID_SAS_DEVICE_INFO_MASK_DEVICE_TYPE (0x00000007) +#define LEAPIORAID_SAS_DEVICE_INFO_NO_DEVICE (0x00000000) +#define LEAPIORAID_SAS_DEVICE_INFO_END_DEVICE (0x00000001) +#define LEAPIORAID_SAS_DEVICE_INFO_EDGE_EXPANDER (0x00000002) +#define LEAPIORAID_SAS_DEVICE_INFO_FANOUT_EXPANDER (0x00000003) + +struct LeapioraidSmpPassthroughReq_t { + U8 PassthroughFlags; + U8 PhysicalPort; + U8 ChainOffset; + U8 Function; + U16 RequestDataLength; + U8 SGLFlags; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved1; + U32 Reserved2; + U64 SASAddress; + U32 Reserved3; + U32 Reserved4; + union LEAPIORAID_SIMPLE_SGE_UNION SGL; +}; + +struct LeapioraidSmpPassthroughRep_t { + U8 PassthroughFlags; + U8 PhysicalPort; + U8 MsgLength; + U8 Function; + U16 ResponseDataLength; + U8 SGLFlags; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved1; + U8 Reserved2; + U8 SASStatus; + U16 IOCStatus; + U32 IOCLogInfo; + U32 Reserved3; + U8 ResponseData[4]; +}; + +struct LeapioraidSasIoUnitControlReq_t { + U8 Operation; + U8 Reserved1; + U8 ChainOffset; + U8 Function; + U16 DevHandle; + U8 IOCParameter; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved3; + U16 Reserved4; + U8 PhyNum; + U8 PrimFlags; + U32 Primitive; + U8 LookupMethod; + U8 Reserved5; + U16 SlotNumber; + U64 LookupAddress; + U32 IOCParameterValue; + U32 Reserved7; + U32 Reserved8; +}; + +#define LEAPIORAID_SAS_OP_PHY_LINK_RESET (0x06) +#define LEAPIORAID_SAS_OP_PHY_HARD_RESET (0x07) +#define LEAPIORAID_SAS_OP_REMOVE_DEVICE (0x0D) +struct LeapioraidSasIoUnitControlRep_t { + U8 Operation; + U8 Reserved1; + U8 MsgLength; + U8 Function; + U16 DevHandle; + U8 IOCParameter; + U8 MsgFlags; + U8 VP_ID; + U8 VF_ID; + U16 Reserved3; + U16 Reserved4; + U16 IOCStatus; + U32 IOCLogInfo; +}; +#endif diff --git a/drivers/scsi/leapioraid/leapioraid_app.c b/drivers/scsi/leapioraid/leapioraid_app.c new file mode 100644 index 000000000000..9d699721d1be --- /dev/null +++ b/drivers/scsi/leapioraid/leapioraid_app.c @@ -0,0 +1,2226 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Management Module Support for MPT (Message Passing Technology) based + * controllers + * + * Copyright (C) 2013-2021 LSI Corporation + * Copyright (C) 2013-2021 Avago Technologies + * Copyright (C) 2013-2021 Broadcom Inc. + * (mailto:MPT-FusionLinux.pdl@broadcom.com) + * + * Copyright (C) 2024 LeapIO Tech Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "leapioraid_func.h" + +#ifdef __KERNEL__ +#include +#endif +#include "leapioraid_func.h" + +#define LEAPIORAID_DEV_NAME "leapioraid_ctl" + +#define LEAPIORAID_MAGIC_NUMBER 'L' +#define LEAPIORAID_IOCTL_DEFAULT_TIMEOUT (10) + +#define LEAPIORAID_IOCINFO \ + _IOWR(LEAPIORAID_MAGIC_NUMBER, 17, struct leapio_ioctl_iocinfo) +#define LEAPIORAID_COMMAND \ + _IOWR(LEAPIORAID_MAGIC_NUMBER, 20, struct leapio_ioctl_command) +#ifdef CONFIG_COMPAT +#define LEAPIORAID_COMMAND32 \ + _IOWR(LEAPIORAID_MAGIC_NUMBER, 20, struct leapio_ioctl_command32) +#endif +#define LEAPIORAID_EVENTQUERY \ + _IOWR(LEAPIORAID_MAGIC_NUMBER, 21, struct leapio_ioctl_eventquery) +#define LEAPIORAID_EVENTENABLE \ + _IOWR(LEAPIORAID_MAGIC_NUMBER, 22, struct leapio_ioctl_eventenable) +#define LEAPIORAID_EVENTREPORT \ + _IOWR(LEAPIORAID_MAGIC_NUMBER, 23, struct leapio_ioctl_eventreport) +#define LEAPIORAID_HARDRESET \ + _IOWR(LEAPIORAID_MAGIC_NUMBER, 24, struct leapio_ioctl_diag_reset) +#define LEAPIORAID_BTDHMAPPING \ + _IOWR(LEAPIORAID_MAGIC_NUMBER, 31, struct leapio_ioctl_btdh_mapping) + +struct leapio_ioctl_header { + uint32_t ioc_number; + uint32_t port_number; + uint32_t max_data_size; +}; + +struct leapio_ioctl_diag_reset { + struct leapio_ioctl_header hdr; +}; + +struct leapio_ioctl_pci_info { + union { + struct { + uint32_t device:5; + uint32_t function:3; + uint32_t bus:24; + } bits; + uint32_t word; + } u; + uint32_t segment_id; +}; + +struct leapio_ioctl_iocinfo { + struct leapio_ioctl_header hdr; + uint32_t adapter_type; + uint32_t port_number; + uint32_t pci_id; + uint32_t hw_rev; + uint32_t subsystem_device; + uint32_t subsystem_vendor; + uint32_t rsvd0; + uint32_t firmware_version; + uint32_t bios_version; + uint8_t driver_version[32]; + uint8_t rsvd1; + uint8_t scsi_id; + uint16_t rsvd2; + struct leapio_ioctl_pci_info pci_information; +}; + +#define LEAPIORAID_CTL_EVENT_LOG_SIZE (200) +struct leapio_ioctl_eventquery { + struct leapio_ioctl_header hdr; + uint16_t event_entries; + uint16_t rsvd; + uint32_t event_types[LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS]; +}; + +struct leapio_ioctl_eventenable { + struct leapio_ioctl_header hdr; + uint32_t event_types[4]; +}; + +#define LEAPIORAID_EVENT_DATA_SIZE (192) +struct LEAPIORAID_IOCTL_EVENTS { + uint32_t event; + uint32_t context; + uint8_t data[LEAPIORAID_EVENT_DATA_SIZE]; +}; + +struct leapio_ioctl_eventreport { + struct leapio_ioctl_header hdr; + struct LEAPIORAID_IOCTL_EVENTS event_data[]; +}; + +struct leapio_ioctl_command { + struct leapio_ioctl_header hdr; + uint32_t timeout; + void __user *reply_frame_buf_ptr; + void __user *data_in_buf_ptr; + void __user *data_out_buf_ptr; + void __user *sense_data_ptr; + uint32_t max_reply_bytes; + uint32_t data_in_size; + uint32_t data_out_size; + uint32_t max_sense_bytes; + uint32_t data_sge_offset; + uint8_t mf[]; +}; + +#ifdef CONFIG_COMPAT +struct leapio_ioctl_command32 { + struct leapio_ioctl_header hdr; + uint32_t timeout; + uint32_t reply_frame_buf_ptr; + uint32_t data_in_buf_ptr; + uint32_t data_out_buf_ptr; + uint32_t sense_data_ptr; + uint32_t max_reply_bytes; + uint32_t data_in_size; + uint32_t data_out_size; + uint32_t max_sense_bytes; + uint32_t data_sge_offset; + uint8_t mf[]; +}; +#endif + +struct leapio_ioctl_btdh_mapping { + struct leapio_ioctl_header hdr; + uint32_t id; + uint32_t bus; + uint16_t handle; + uint16_t rsvd; +}; + +static struct fasync_struct *leapioraid_async_queue; +static DECLARE_WAIT_QUEUE_HEAD(leapioraid_ctl_poll_wait); + +enum leapioraid_block_state { + NON_BLOCKING, + BLOCKING, +}; + +static void +leapioraid_ctl_display_some_debug( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, + char *calling_function_name, + struct LeapioraidDefaultRep_t *mpi_reply) +{ + struct LeapioraidCfgReq_t *mpi_request; + char *desc = NULL; + + if (!(ioc->logging_level & LEAPIORAID_DEBUG_IOCTL)) + return; + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + switch (mpi_request->Function) { + case LEAPIORAID_FUNC_SCSI_IO_REQUEST: + { + struct LeapioSCSIIOReq_t *scsi_request = + (struct LeapioSCSIIOReq_t *) mpi_request; + snprintf(ioc->tmp_string, LEAPIORAID_STRING_LENGTH, + "scsi_io, cmd(0x%02x), cdb_len(%d)", + scsi_request->CDB.CDB32[0], + le16_to_cpu(scsi_request->IoFlags) & 0xF); + desc = ioc->tmp_string; + break; + } + case LEAPIORAID_FUNC_SCSI_TASK_MGMT: + desc = "task_mgmt"; + break; + case LEAPIORAID_FUNC_IOC_INIT: + desc = "ioc_init"; + break; + case LEAPIORAID_FUNC_IOC_FACTS: + desc = "ioc_facts"; + break; + case LEAPIORAID_FUNC_CONFIG: + { + struct LeapioraidCfgReq_t *config_request = + (struct LeapioraidCfgReq_t *) mpi_request; + snprintf(ioc->tmp_string, LEAPIORAID_STRING_LENGTH, + "config, type(0x%02x), ext_type(0x%02x), number(%d)", + (config_request->Header.PageType & + LEAPIORAID_CONFIG_PAGETYPE_MASK), + config_request->ExtPageType, + config_request->Header.PageNumber); + desc = ioc->tmp_string; + break; + } + case LEAPIORAID_FUNC_PORT_FACTS: + desc = "port_facts"; + break; + case LEAPIORAID_FUNC_PORT_ENABLE: + desc = "port_enable"; + break; + case LEAPIORAID_FUNC_EVENT_NOTIFICATION: + desc = "event_notification"; + break; + case LEAPIORAID_FUNC_FW_DOWNLOAD: + desc = "fw_download"; + break; + case LEAPIORAID_FUNC_FW_UPLOAD: + desc = "fw_upload"; + break; + case LEAPIORAID_FUNC_RAID_ACTION: + desc = "raid_action"; + break; + case LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH: + { + struct LeapioSCSIIOReq_t *scsi_request = + (struct LeapioSCSIIOReq_t *) mpi_request; + snprintf(ioc->tmp_string, LEAPIORAID_STRING_LENGTH, + "raid_pass, cmd(0x%02x), cdb_len(%d)", + scsi_request->CDB.CDB32[0], + le16_to_cpu(scsi_request->IoFlags) & 0xF); + desc = ioc->tmp_string; + break; + } + case LEAPIORAID_FUNC_SAS_IO_UNIT_CONTROL: + desc = "sas_iounit_cntl"; + break; + case LEAPIORAID_FUNC_SATA_PASSTHROUGH: + desc = "sata_pass"; + break; + case LEAPIORAID_FUNC_SMP_PASSTHROUGH: + desc = "smp_passthrough"; + break; + } + if (!desc) + return; + pr_info("%s %s: %s, smid(%d)\n", + ioc->name, calling_function_name, desc, smid); + if (!mpi_reply) + return; + if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo) + pr_info( + "%s \tiocstatus(0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo)); + if (mpi_request->Function == LEAPIORAID_FUNC_SCSI_IO_REQUEST || + mpi_request->Function == + LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH) { + struct LeapioraidSCSIIORep_t *scsi_reply = + (struct LeapioraidSCSIIORep_t *) mpi_reply; + struct leapioraid_sas_device *sas_device = NULL; + + sas_device = leapioraid_get_sdev_by_handle(ioc, + le16_to_cpu(scsi_reply->DevHandle)); + if (sas_device) { + pr_info("%s \tsas_address(0x%016llx), phy(%d)\n", + ioc->name, (unsigned long long) + sas_device->sas_address, sas_device->phy); + if (sas_device->enclosure_handle != 0) + pr_info( + "%s \tenclosure_logical_id(0x%016llx), slot(%d)\n", + ioc->name, (unsigned long long) + sas_device->enclosure_logical_id, + sas_device->slot); + leapioraid_sas_device_put(sas_device); + } + if (scsi_reply->SCSIState || scsi_reply->SCSIStatus) + pr_info( + "%s \tscsi_state(0x%02x), scsi_status (0x%02x)\n", + ioc->name, scsi_reply->SCSIState, scsi_reply->SCSIStatus); + } +} + +u8 +leapioraid_ctl_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + struct LeapioraidDefaultRep_t *mpi_reply; + struct LeapioraidSCSIIORep_t *scsiio_reply; + const void *sense_data; + u32 sz; + + if (ioc->ctl_cmds.status == LEAPIORAID_CMD_NOT_USED) + return 1; + if (ioc->ctl_cmds.smid != smid) + return 1; + ioc->ctl_cmds.status |= LEAPIORAID_CMD_COMPLETE; + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply) { + memcpy(ioc->ctl_cmds.reply, mpi_reply, + mpi_reply->MsgLength * 4); + ioc->ctl_cmds.status |= LEAPIORAID_CMD_REPLY_VALID; + if (mpi_reply->Function == LEAPIORAID_FUNC_SCSI_IO_REQUEST || + mpi_reply->Function == + LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH) { + scsiio_reply = (struct LeapioraidSCSIIORep_t *) mpi_reply; + if (scsiio_reply->SCSIState & + LEAPIORAID_SCSI_STATE_AUTOSENSE_VALID) { + sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, + le32_to_cpu(scsiio_reply->SenseCount)); + sense_data = + leapioraid_base_get_sense_buffer(ioc, smid); + memcpy(ioc->ctl_cmds.sense, sense_data, sz); + } + } + } + leapioraid_ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply); + ioc->ctl_cmds.status &= ~LEAPIORAID_CMD_PENDING; + complete(&ioc->ctl_cmds.done); + return 1; +} + +static int leapioraid_ctl_check_event_type( + struct LEAPIORAID_ADAPTER *ioc, u16 event) +{ + u16 i; + u32 desired_event; + + if (event >= 128 || !event || !ioc->event_log) + return 0; + desired_event = (1 << (event % 32)); + if (!desired_event) + desired_event = 1; + i = event / 32; + return desired_event & ioc->event_type[i]; +} + +void +leapioraid_ctl_add_to_event_log( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventNotificationRep_t *mpi_reply) +{ + struct LEAPIORAID_IOCTL_EVENTS *event_log; + u16 event; + int i; + u32 sz, event_data_sz; + u8 send_aen = 0; + + if (!ioc->event_log) + return; + event = le16_to_cpu(mpi_reply->Event); + if (leapioraid_ctl_check_event_type(ioc, event)) { + i = ioc->event_context % LEAPIORAID_CTL_EVENT_LOG_SIZE; + event_log = ioc->event_log; + event_log[i].event = event; + event_log[i].context = ioc->event_context++; + event_data_sz = le16_to_cpu(mpi_reply->EventDataLength) * 4; + sz = min_t(u32, event_data_sz, LEAPIORAID_EVENT_DATA_SIZE); + memset(event_log[i].data, 0, LEAPIORAID_EVENT_DATA_SIZE); + memcpy(event_log[i].data, mpi_reply->EventData, sz); + send_aen = 1; + } + if (event == LEAPIORAID_EVENT_LOG_ENTRY_ADDED || + (send_aen && !ioc->aen_event_read_flag)) { + ioc->aen_event_read_flag = 1; + wake_up_interruptible(&leapioraid_ctl_poll_wait); + if (leapioraid_async_queue) + kill_fasync(&leapioraid_async_queue, SIGIO, POLL_IN); + } +} + +u8 +leapioraid_ctl_event_callback( + struct LEAPIORAID_ADAPTER *ioc, u8 msix_index, + u32 reply) +{ + struct LeapioraidEventNotificationRep_t *mpi_reply; + + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply) + leapioraid_ctl_add_to_event_log(ioc, mpi_reply); + return 1; +} + +static int +leapioraid_ctl_verify_adapter( + int ioc_number, struct LEAPIORAID_ADAPTER **iocpp) +{ + struct LEAPIORAID_ADAPTER *ioc; + + spin_lock(&leapioraid_gioc_lock); + list_for_each_entry(ioc, &leapioraid_ioc_list, list) { + if (ioc->id != ioc_number) + continue; + spin_unlock(&leapioraid_gioc_lock); + *iocpp = ioc; + return ioc_number; + } + spin_unlock(&leapioraid_gioc_lock); + *iocpp = NULL; + return -1; +} + +void +leapioraid_ctl_clear_outstanding_ioctls(struct LEAPIORAID_ADAPTER *ioc) +{ + if (ioc->ctl_cmds.status & LEAPIORAID_CMD_PENDING) { + ioc->ctl_cmds.status |= LEAPIORAID_CMD_RESET; + leapioraid_base_free_smid(ioc, ioc->ctl_cmds.smid); + complete(&ioc->ctl_cmds.done); + } +} + +void +leapioraid_ctl_reset_handler(struct LEAPIORAID_ADAPTER *ioc, int reset_phase) +{ + switch (reset_phase) { + case LEAPIORAID_IOC_PRE_RESET_PHASE: + dtmprintk(ioc, pr_info( + "%s %s: LEAPIORAID_IOC_PRE_RESET_PHASE\n", ioc->name, + __func__)); + break; + case LEAPIORAID_IOC_AFTER_RESET_PHASE: + dtmprintk(ioc, pr_info( + "%s %s: LEAPIORAID_IOC_AFTER_RESET_PHASE\n", ioc->name, + __func__)); + leapioraid_ctl_clear_outstanding_ioctls(ioc); + break; + case LEAPIORAID_IOC_DONE_RESET_PHASE: + dtmprintk(ioc, pr_info( + "%s %s: LEAPIORAID_IOC_DONE_RESET_PHASE\n", ioc->name, + __func__)); + break; + } +} + +static int +leapioraid_ctl_fasync(int fd, struct file *filep, int mode) +{ + return fasync_helper(fd, filep, mode, &leapioraid_async_queue); +} + +int +leapioraid_ctl_release(struct inode *inode, struct file *filep) +{ + return fasync_helper(-1, filep, 0, &leapioraid_async_queue); +} + +static unsigned int +leapioraid_ctl_poll(struct file *filep, poll_table *wait) +{ + struct LEAPIORAID_ADAPTER *ioc; + + poll_wait(filep, &leapioraid_ctl_poll_wait, wait); + spin_lock(&leapioraid_gioc_lock); + list_for_each_entry(ioc, &leapioraid_ioc_list, list) { + if (ioc->aen_event_read_flag) { + spin_unlock(&leapioraid_gioc_lock); + return POLLIN | POLLRDNORM; + } + } + spin_unlock(&leapioraid_gioc_lock); + return 0; +} + +static int +leapioraid_ctl_set_task_mid(struct LEAPIORAID_ADAPTER *ioc, + struct leapio_ioctl_command *karg, + struct LeapioraidSCSITmgReq_t *tm_request) +{ + u8 found = 0; + u16 smid; + u16 handle; + struct scsi_cmnd *scmd; + struct LEAPIORAID_DEVICE *priv_data; + struct LeapioraidSCSITmgRep_t *tm_reply; + u32 sz; + u32 lun; + char *desc = NULL; + struct leapioraid_scsiio_tracker *st = NULL; + + if (tm_request->TaskType == LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK) + desc = "abort_task"; + else if (tm_request->TaskType == + LEAPIORAID_SCSITASKMGMT_TASKTYPE_QUERY_TASK) + desc = "query_task"; + else + return 0; + lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN); + handle = le16_to_cpu(tm_request->DevHandle); + for (smid = ioc->shost->can_queue; smid && !found; smid--) { + scmd = leapioraid_scsihost_scsi_lookup_get(ioc, smid); + if (scmd == NULL || scmd->device == NULL || + scmd->device->hostdata == NULL) + continue; + if (lun != scmd->device->lun) + continue; + priv_data = scmd->device->hostdata; + if (priv_data->sas_target == NULL) + continue; + if (priv_data->sas_target->handle != handle) + continue; + st = leapioraid_base_scsi_cmd_priv(scmd); + if ((!st) || (st->smid == 0)) + continue; + if (!tm_request->TaskMID || tm_request->TaskMID == st->smid) { + tm_request->TaskMID = cpu_to_le16(st->smid); + found = 1; + } + } + if (!found) { + dctlprintk(ioc, pr_info( + "%s %s: handle(0x%04x), lun(%d), no active mid!!\n", + ioc->name, desc, + le16_to_cpu(tm_request->DevHandle), + lun)); + tm_reply = ioc->ctl_cmds.reply; + tm_reply->DevHandle = tm_request->DevHandle; + tm_reply->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT; + tm_reply->TaskType = tm_request->TaskType; + tm_reply->MsgLength = + sizeof(struct LeapioraidSCSITmgRep_t) / 4; + tm_reply->VP_ID = tm_request->VP_ID; + tm_reply->VF_ID = tm_request->VF_ID; + sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz); + if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply, + sz)) + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + return 1; + } + dctlprintk(ioc, pr_info( + "%s %s: handle(0x%04x), lun(%d), task_mid(%d)\n", + ioc->name, desc, + le16_to_cpu(tm_request->DevHandle), lun, + le16_to_cpu(tm_request->TaskMID))); + return 0; +} + +static long +leapioraid_ctl_do_command(struct LEAPIORAID_ADAPTER *ioc, + struct leapio_ioctl_command karg, void __user *mf) +{ + struct LeapioraidReqHeader_t *mpi_request = NULL, *request; + struct LeapioraidDefaultRep_t *mpi_reply; + u16 smid; + unsigned long timeout; + u8 issue_reset; + u32 sz, sz_arg; + void *psge; + void *data_out = NULL; + dma_addr_t data_out_dma = 0; + size_t data_out_sz = 0; + void *data_in = NULL; + dma_addr_t data_in_dma = 0; + size_t data_in_sz = 0; + long ret; + u16 device_handle = LEAPIORAID_INVALID_DEVICE_HANDLE; + + issue_reset = 0; + if (ioc->ctl_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: ctl_cmd in use\n", + ioc->name, __func__); + ret = -EAGAIN; + goto out; + } + ret = leapioraid_wait_for_ioc_to_operational(ioc, 10); + if (ret) + goto out; + mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL); + if (!mpi_request) { + ret = -ENOMEM; + goto out; + } + if (karg.data_sge_offset * 4 > ioc->request_sz || + karg.data_sge_offset > (UINT_MAX / 4)) { + ret = -EINVAL; + goto out; + } + if (copy_from_user(mpi_request, mf, karg.data_sge_offset * 4)) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__, + __func__); + ret = -EFAULT; + goto out; + } + if (mpi_request->Function == LEAPIORAID_FUNC_SCSI_TASK_MGMT) { + smid = leapioraid_base_get_smid_hpr(ioc, ioc->ctl_cb_idx); + if (!smid) { + pr_err( + "%s %s: failed obtaining a smid\n", ioc->name, + __func__); + ret = -EAGAIN; + goto out; + } + } else { + smid = ioc->shost->can_queue + LEAPIORAID_INTERNAL_SCSIIO_FOR_IOCTL; + } + ret = 0; + ioc->ctl_cmds.status = LEAPIORAID_CMD_PENDING; + memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); + request = leapioraid_base_get_msg_frame(ioc, smid); + memset(request, 0, ioc->request_sz); + memcpy(request, mpi_request, karg.data_sge_offset * 4); + ioc->ctl_cmds.smid = smid; + data_out_sz = karg.data_out_size; + data_in_sz = karg.data_in_size; + if (mpi_request->Function == LEAPIORAID_FUNC_SCSI_IO_REQUEST || + mpi_request->Function == LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH + || mpi_request->Function == LEAPIORAID_FUNC_SCSI_TASK_MGMT + || mpi_request->Function == LEAPIORAID_FUNC_SATA_PASSTHROUGH) { + device_handle = le16_to_cpu(mpi_request->FunctionDependent1); + if (!device_handle || (device_handle > ioc->facts.MaxDevHandle)) { + ret = -EINVAL; + leapioraid_base_free_smid(ioc, smid); + goto out; + } + } + if (data_out_sz) { + data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz, + &data_out_dma, GFP_ATOMIC); + if (!data_out) { + ret = -ENOMEM; + leapioraid_base_free_smid(ioc, smid); + goto out; + } + if (copy_from_user(data_out, karg.data_out_buf_ptr, + data_out_sz)) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -EFAULT; + leapioraid_base_free_smid(ioc, smid); + goto out; + } + } + if (data_in_sz) { + data_in = dma_alloc_coherent(&ioc->pdev->dev, data_in_sz, + &data_in_dma, GFP_ATOMIC); + if (!data_in) { + ret = -ENOMEM; + leapioraid_base_free_smid(ioc, smid); + goto out; + } + } + psge = (void *)request + (karg.data_sge_offset * 4); + leapioraid_ctl_display_some_debug(ioc, smid, "ctl_request", NULL); + init_completion(&ioc->ctl_cmds.done); + switch (mpi_request->Function) { + case LEAPIORAID_FUNC_SCSI_IO_REQUEST: + case LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH: + { + struct LeapioSCSIIOReq_t *scsiio_request = + (struct LeapioSCSIIOReq_t *) request; + scsiio_request->SenseBufferLength = + SCSI_SENSE_BUFFERSIZE; + scsiio_request->SenseBufferLowAddress = + leapioraid_base_get_sense_buffer_dma(ioc, smid); + memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE); + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + if (test_bit + (device_handle, ioc->device_remove_in_progress)) { + dtmprintk(ioc, + pr_info( + "%s handle(0x%04x) :ioctl failed due to device removal in progress\n", + ioc->name, device_handle)); + leapioraid_base_free_smid(ioc, smid); + ret = -EINVAL; + goto out; + } + if (mpi_request->Function == + LEAPIORAID_FUNC_SCSI_IO_REQUEST) + ioc->put_smid_scsi_io(ioc, smid, device_handle); + else + ioc->put_smid_default(ioc, smid); + break; + } + case LEAPIORAID_FUNC_SCSI_TASK_MGMT: + { + struct LeapioraidSCSITmgReq_t *tm_request = + (struct LeapioraidSCSITmgReq_t *) request; + dtmprintk(ioc, + pr_info("%s TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n", + ioc->name, + le16_to_cpu(tm_request->DevHandle), + tm_request->TaskType)); + ioc->got_task_abort_from_ioctl = 1; + if (tm_request->TaskType == + LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK || + tm_request->TaskType == + LEAPIORAID_SCSITASKMGMT_TASKTYPE_QUERY_TASK) { + if (leapioraid_ctl_set_task_mid(ioc, &karg, tm_request)) { + leapioraid_base_free_smid(ioc, smid); + ioc->got_task_abort_from_ioctl = 0; + goto out; + } + } + ioc->got_task_abort_from_ioctl = 0; + if (test_bit + (device_handle, ioc->device_remove_in_progress)) { + dtmprintk(ioc, + pr_info( + "%s handle(0x%04x) :ioctl failed due to device removal in progress\n", + ioc->name, device_handle)); + leapioraid_base_free_smid(ioc, smid); + ret = -EINVAL; + goto out; + } + leapioraid_scsihost_set_tm_flag(ioc, + le16_to_cpu(tm_request->DevHandle)); + ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + ioc->put_smid_hi_priority(ioc, smid, 0); + break; + } + case LEAPIORAID_FUNC_SMP_PASSTHROUGH: + { + struct LeapioraidSmpPassthroughReq_t *smp_request = + (struct LeapioraidSmpPassthroughReq_t *) mpi_request; + u8 *data; + + if (!ioc->multipath_on_hba) + smp_request->PhysicalPort = 0xFF; + if (smp_request->PassthroughFlags & + 0x80) + data = (u8 *) &smp_request->SGL; + else { + if (unlikely(data_out == NULL)) { + pr_err( + "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + leapioraid_base_free_smid(ioc, smid); + ret = -EINVAL; + goto out; + } + data = data_out; + } + if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) { + ioc->ioc_link_reset_in_progress = 1; + ioc->ignore_loginfos = 1; + } + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + ioc->put_smid_default(ioc, smid); + break; + } + case LEAPIORAID_FUNC_SATA_PASSTHROUGH: + { + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + if (test_bit + (device_handle, ioc->device_remove_in_progress)) { + dtmprintk(ioc, + pr_info( + "%s handle(0x%04x) :ioctl failed due to device removal in progress\n", + ioc->name, device_handle)); + leapioraid_base_free_smid(ioc, smid); + ret = -EINVAL; + goto out; + } + ioc->put_smid_default(ioc, smid); + break; + } + case LEAPIORAID_FUNC_FW_DOWNLOAD: + case LEAPIORAID_FUNC_FW_UPLOAD: + { + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + ioc->put_smid_default(ioc, smid); + break; + } + case LEAPIORAID_FUNC_SAS_IO_UNIT_CONTROL: + { + struct LeapioraidSasIoUnitControlReq_t *sasiounit_request = + (struct LeapioraidSasIoUnitControlReq_t *) mpi_request; + if (sasiounit_request->Operation == + LEAPIORAID_SAS_OP_PHY_HARD_RESET + || sasiounit_request->Operation == + LEAPIORAID_SAS_OP_PHY_LINK_RESET) { + ioc->ioc_link_reset_in_progress = 1; + ioc->ignore_loginfos = 1; + } + } + fallthrough; + default: + ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + ioc->put_smid_default(ioc, smid); + break; + } + timeout = karg.timeout; + if (timeout < LEAPIORAID_IOCTL_DEFAULT_TIMEOUT) + timeout = LEAPIORAID_IOCTL_DEFAULT_TIMEOUT; + wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout * HZ); + if (mpi_request->Function == LEAPIORAID_FUNC_SCSI_TASK_MGMT) { + struct LeapioraidSCSITmgReq_t *tm_request = + (struct LeapioraidSCSITmgReq_t *) mpi_request; + leapioraid_scsihost_clear_tm_flag(ioc, + le16_to_cpu(tm_request->DevHandle)); + } else if ((mpi_request->Function == LEAPIORAID_FUNC_SMP_PASSTHROUGH + || mpi_request->Function == + LEAPIORAID_FUNC_SAS_IO_UNIT_CONTROL) + && ioc->ioc_link_reset_in_progress) { + ioc->ioc_link_reset_in_progress = 0; + ioc->ignore_loginfos = 0; + } + if (!(ioc->ctl_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + leapioraid_check_cmd_timeout(ioc, + ioc->ctl_cmds.status, mpi_request, + karg.data_sge_offset, issue_reset); + goto issue_host_reset; + } + mpi_reply = ioc->ctl_cmds.reply; + if (mpi_reply->Function == LEAPIORAID_FUNC_SCSI_TASK_MGMT && + (ioc->logging_level & LEAPIORAID_DEBUG_TM)) { + struct LeapioraidSCSITmgRep_t *tm_reply = + (struct LeapioraidSCSITmgRep_t *) mpi_reply; + pr_info( + "%s TASK_MGMT: IOCStatus(0x%04x), IOCLogInfo(0x%08x), TerminationCount(0x%08x)\n", + ioc->name, + le16_to_cpu(tm_reply->IOCStatus), + le32_to_cpu(tm_reply->IOCLogInfo), + le32_to_cpu(tm_reply->TerminationCount)); + } + if (data_in_sz) { + if (copy_to_user(karg.data_in_buf_ptr, data_in, data_in_sz)) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -ENODATA; + goto out; + } + } + if (karg.max_reply_bytes) { + sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz); + if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply, + sz)) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -ENODATA; + goto out; + } + } + if (karg.max_sense_bytes && (mpi_request->Function == + LEAPIORAID_FUNC_SCSI_IO_REQUEST + || mpi_request->Function == + LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH)) { + if (karg.sense_data_ptr == NULL) { + pr_err( + "%s Response buffer provided by application is NULL; Response data will not be returned.\n", + ioc->name); + goto out; + } + sz_arg = SCSI_SENSE_BUFFERSIZE; + sz = min_t(u32, karg.max_sense_bytes, sz_arg); + if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense, sz)) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + ret = -ENODATA; + goto out; + } + } +issue_host_reset: + if (issue_reset) { + ret = -ENODATA; + if ((mpi_request->Function == LEAPIORAID_FUNC_SCSI_IO_REQUEST + || mpi_request->Function == + LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH + || mpi_request->Function == + LEAPIORAID_FUNC_SATA_PASSTHROUGH)) { + pr_err( + "%s issue target reset: handle = (0x%04x)\n", + ioc->name, + le16_to_cpu(mpi_request->FunctionDependent1)); + leapioraid_halt_firmware(ioc, 0); + leapioraid_scsihost_issue_locked_tm(ioc, + le16_to_cpu + (mpi_request->FunctionDependent1), + 0, 0, 0, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET, + smid, 30, + LEAPIORAID_SCSITASKMGMT_MSGFLAGS_LINK_RESET); + } else + leapioraid_base_hard_reset_handler(ioc, + FORCE_BIG_HAMMER); + } +out: + if (data_in) + dma_free_coherent(&ioc->pdev->dev, data_in_sz, data_in, + data_in_dma); + if (data_out) + dma_free_coherent(&ioc->pdev->dev, data_out_sz, data_out, + data_out_dma); + kfree(mpi_request); + ioc->ctl_cmds.status = LEAPIORAID_CMD_NOT_USED; + return ret; +} + +static long +leapioraid_ctl_getiocinfo( + struct LEAPIORAID_ADAPTER *ioc, void __user *arg) +{ + struct leapio_ioctl_iocinfo karg; + u8 revision; + + dctlprintk(ioc, pr_info("%s %s: enter\n", ioc->name, + __func__)); + memset(&karg, 0, sizeof(karg)); + if (ioc->pfacts) + karg.port_number = ioc->pfacts[0].PortNumber; + pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision); + karg.hw_rev = revision; + karg.pci_id = ioc->pdev->device; + karg.subsystem_device = ioc->pdev->subsystem_device; + karg.subsystem_vendor = ioc->pdev->subsystem_vendor; + karg.pci_information.u.bits.bus = ioc->pdev->bus->number; + karg.pci_information.u.bits.device = PCI_SLOT(ioc->pdev->devfn); + karg.pci_information.u.bits.function = PCI_FUNC(ioc->pdev->devfn); + karg.pci_information.segment_id = pci_domain_nr(ioc->pdev->bus); + karg.firmware_version = ioc->facts.FWVersion.Word; + strscpy(karg.driver_version, ioc->driver_name, sizeof(karg.driver_version)); + strcat(karg.driver_version, "-"); + karg.adapter_type = 0x06; + strcat(karg.driver_version, LEAPIORAID_DRIVER_VERSION); + karg.adapter_type = 0x07; + karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); + if (copy_to_user(arg, &karg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + return 0; +} + +static long +leapioraid_ctl_eventquery( + struct LEAPIORAID_ADAPTER *ioc, void __user *arg) +{ + struct leapio_ioctl_eventquery karg; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + dctlprintk(ioc, pr_info("%s %s: enter\n", ioc->name, + __func__)); + karg.event_entries = LEAPIORAID_CTL_EVENT_LOG_SIZE; + memcpy(karg.event_types, ioc->event_type, + LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); + if (copy_to_user(arg, &karg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + return 0; +} + +static long +leapioraid_ctl_eventenable( + struct LEAPIORAID_ADAPTER *ioc, void __user *arg) +{ + struct leapio_ioctl_eventenable karg; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + dctlprintk(ioc, pr_info("%s %s: enter\n", ioc->name, + __func__)); + memcpy(ioc->event_type, karg.event_types, + LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); + leapioraid_base_validate_event_type(ioc, ioc->event_type); + if (ioc->event_log) + return 0; + ioc->event_context = 0; + ioc->aen_event_read_flag = 0; + ioc->event_log = kcalloc(LEAPIORAID_CTL_EVENT_LOG_SIZE, + sizeof(struct LEAPIORAID_IOCTL_EVENTS), + GFP_KERNEL); + if (!ioc->event_log) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -ENOMEM; + } + return 0; +} + +static long +leapioraid_ctl_eventreport( + struct LEAPIORAID_ADAPTER *ioc, void __user *arg) +{ + struct leapio_ioctl_eventreport karg; + u32 number_bytes, max_events, max; + struct leapio_ioctl_eventreport __user *uarg = arg; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + dctlprintk(ioc, pr_info("%s %s: enter\n", ioc->name, + __func__)); + number_bytes = karg.hdr.max_data_size - + sizeof(struct leapio_ioctl_header); + max_events = number_bytes / sizeof(struct LEAPIORAID_IOCTL_EVENTS); + max = min_t(u32, LEAPIORAID_CTL_EVENT_LOG_SIZE, max_events); + if (!max || !ioc->event_log) + return -ENODATA; + number_bytes = max * sizeof(struct LEAPIORAID_IOCTL_EVENTS); + if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + ioc->aen_event_read_flag = 0; + return 0; +} + +static long +leapioraid_ctl_do_reset( + struct LEAPIORAID_ADAPTER *ioc, void __user *arg) +{ + struct leapio_ioctl_diag_reset karg; + int retval; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + if (ioc->shost_recovery || + ioc->pci_error_recovery || ioc->is_driver_loading || + ioc->remove_host) + return -EAGAIN; + dctlprintk(ioc, pr_info("%s %s: enter\n", ioc->name, + __func__)); + ioc->reset_from_user = 1; + scsi_block_requests(ioc->shost); + retval = leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + scsi_unblock_requests(ioc->shost); + pr_info("%s ioctl: host reset: %s\n", + ioc->name, ((!retval) ? "SUCCESS" : "FAILED")); + return 0; +} + +static int +leapioraid_ctl_btdh_search_sas_device(struct LEAPIORAID_ADAPTER *ioc, + struct leapio_ioctl_btdh_mapping *btdh) +{ + struct leapioraid_sas_device *sas_device; + unsigned long flags; + int rc = 0; + + if (list_empty(&ioc->sas_device_list)) + return rc; + spin_lock_irqsave(&ioc->sas_device_lock, flags); + list_for_each_entry(sas_device, &ioc->sas_device_list, list) { + if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && + btdh->handle == sas_device->handle) { + btdh->bus = sas_device->channel; + btdh->id = sas_device->id; + rc = 1; + goto out; + } else if (btdh->bus == sas_device->channel && btdh->id == + sas_device->id && btdh->handle == 0xFFFF) { + btdh->handle = sas_device->handle; + rc = 1; + goto out; + } + } +out: + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return rc; +} + +static int +leapioraid_ctl_btdh_search_raid_device(struct LEAPIORAID_ADAPTER *ioc, + struct leapio_ioctl_btdh_mapping *btdh) +{ + struct leapioraid_raid_device *raid_device; + unsigned long flags; + int rc = 0; + + if (list_empty(&ioc->raid_device_list)) + return rc; + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && + btdh->handle == raid_device->handle) { + btdh->bus = raid_device->channel; + btdh->id = raid_device->id; + rc = 1; + goto out; + } else if (btdh->bus == raid_device->channel && btdh->id == + raid_device->id && btdh->handle == 0xFFFF) { + btdh->handle = raid_device->handle; + rc = 1; + goto out; + } + } +out: + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + return rc; +} + +static long +leapioraid_ctl_btdh_mapping( + struct LEAPIORAID_ADAPTER *ioc, void __user *arg) +{ + struct leapio_ioctl_btdh_mapping karg; + int rc; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + dctlprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + rc = leapioraid_ctl_btdh_search_sas_device(ioc, &karg); + if (!rc) + leapioraid_ctl_btdh_search_raid_device(ioc, &karg); + if (copy_to_user(arg, &karg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + return 0; +} + +#ifdef CONFIG_COMPAT +static long +leapioraid_ctl_compat_command( + struct LEAPIORAID_ADAPTER *ioc, unsigned int cmd, + void __user *arg) +{ + struct leapio_ioctl_command32 karg32; + struct leapio_ioctl_command32 __user *uarg; + struct leapio_ioctl_command karg; + + if (_IOC_SIZE(cmd) != sizeof(struct leapio_ioctl_command32)) + return -EINVAL; + uarg = (struct leapio_ioctl_command32 __user *)arg; + if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + memset(&karg, 0, sizeof(struct leapio_ioctl_command)); + karg.hdr.ioc_number = karg32.hdr.ioc_number; + karg.hdr.port_number = karg32.hdr.port_number; + karg.hdr.max_data_size = karg32.hdr.max_data_size; + karg.timeout = karg32.timeout; + karg.max_reply_bytes = karg32.max_reply_bytes; + karg.data_in_size = karg32.data_in_size; + karg.data_out_size = karg32.data_out_size; + karg.max_sense_bytes = karg32.max_sense_bytes; + karg.data_sge_offset = karg32.data_sge_offset; + karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr); + karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr); + karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr); + karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr); + return leapioraid_ctl_do_command(ioc, karg, &uarg->mf); +} +#endif + +static long +leapioraid_ctl_ioctl_main( + struct file *file, unsigned int cmd, void __user *arg, + u8 compat) +{ + struct LEAPIORAID_ADAPTER *ioc; + struct leapio_ioctl_header ioctl_header; + enum leapioraid_block_state state; + long ret = -ENOIOCTLCMD; + + if (copy_from_user(&ioctl_header, (char __user *)arg, + sizeof(struct leapio_ioctl_header))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + if (leapioraid_ctl_verify_adapter(ioctl_header.ioc_number, + &ioc) == -1 || !ioc) + return -ENODEV; + mutex_lock(&ioc->pci_access_mutex); + if (ioc->shost_recovery || + ioc->pci_error_recovery || ioc->is_driver_loading || + ioc->remove_host) { + ret = -EAGAIN; + goto unlock_pci_access; + } + state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING; + if (state == NON_BLOCKING) { + if (!mutex_trylock(&ioc->ctl_cmds.mutex)) { + ret = -EAGAIN; + goto unlock_pci_access; + } + } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) { + ret = -ERESTARTSYS; + goto unlock_pci_access; + } + switch (cmd) { + case LEAPIORAID_IOCINFO: + if (_IOC_SIZE(cmd) == sizeof(struct leapio_ioctl_iocinfo)) + ret = leapioraid_ctl_getiocinfo(ioc, arg); + break; +#ifdef CONFIG_COMPAT + case LEAPIORAID_COMMAND32: +#endif + case LEAPIORAID_COMMAND: + { + struct leapio_ioctl_command __user *uarg; + struct leapio_ioctl_command karg; + +#ifdef CONFIG_COMPAT + if (compat) { + ret = + leapioraid_ctl_compat_command(ioc, cmd, arg); + break; + } +#endif + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + ret = -EFAULT; + break; + } + if (karg.hdr.ioc_number != ioctl_header.ioc_number) { + ret = -EINVAL; + break; + } + if (_IOC_SIZE(cmd) == + sizeof(struct leapio_ioctl_command)) { + uarg = arg; + ret = + leapioraid_ctl_do_command(ioc, karg, + &uarg->mf); + } + break; + } + case LEAPIORAID_EVENTQUERY: + if (_IOC_SIZE(cmd) == sizeof(struct leapio_ioctl_eventquery)) + ret = leapioraid_ctl_eventquery(ioc, arg); + break; + case LEAPIORAID_EVENTENABLE: + if (_IOC_SIZE(cmd) == sizeof(struct leapio_ioctl_eventenable)) + ret = leapioraid_ctl_eventenable(ioc, arg); + break; + case LEAPIORAID_EVENTREPORT: + ret = leapioraid_ctl_eventreport(ioc, arg); + break; + case LEAPIORAID_HARDRESET: + if (_IOC_SIZE(cmd) == sizeof(struct leapio_ioctl_diag_reset)) + ret = leapioraid_ctl_do_reset(ioc, arg); + break; + case LEAPIORAID_BTDHMAPPING: + if (_IOC_SIZE(cmd) == sizeof(struct leapio_ioctl_btdh_mapping)) + ret = leapioraid_ctl_btdh_mapping(ioc, arg); + break; + default: + dctlprintk(ioc, pr_err( + "%s unsupported ioctl opcode(0x%08x)\n", + ioc->name, cmd)); + break; + } + mutex_unlock(&ioc->ctl_cmds.mutex); +unlock_pci_access: + mutex_unlock(&ioc->pci_access_mutex); + return ret; +} + +static long +leapioraid_ctl_ioctl( + struct file *file, unsigned int cmd, unsigned long arg) +{ + long ret; + + ret = leapioraid_ctl_ioctl_main(file, cmd, (void __user *)arg, 0); + return ret; +} + +#ifdef CONFIG_COMPAT +static long +leapioraid_ctl_ioctl_compat( + struct file *file, unsigned int cmd, unsigned long arg) +{ + long ret; + + ret = leapioraid_ctl_ioctl_main(file, cmd, (void __user *)arg, 1); + return ret; +} +#endif + +static ssize_t +version_fw_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n", + (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, + (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, + (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, + ioc->facts.FWVersion.Word & 0x000000FF); +} +static DEVICE_ATTR_RO(version_fw); + +static ssize_t +version_bios_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion); + + return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n", + (version & 0xFF000000) >> 24, + (version & 0x00FF0000) >> 16, + (version & 0x0000FF00) >> 8, version & 0x000000FF); +} +static DEVICE_ATTR_RO(version_bios); + +static ssize_t +version_leapioraid_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%03x.%02x\n", + ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8); +} +static DEVICE_ATTR_RO(version_leapioraid); + +static ssize_t +version_product_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName); +} +static DEVICE_ATTR_RO(version_product); + +static ssize_t +version_nvdata_persistent_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%08xh\n", + le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word)); +} +static DEVICE_ATTR_RO(version_nvdata_persistent); + +static ssize_t +version_nvdata_default_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%08xh\n", + le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word)); +} +static DEVICE_ATTR_RO(version_nvdata_default); + +static ssize_t +board_name_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName); +} +static DEVICE_ATTR_RO(board_name); + +static ssize_t +board_assembly_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly); +} +static DEVICE_ATTR_RO(board_assembly); + +static ssize_t +board_tracer_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber); +} +static DEVICE_ATTR_RO(board_tracer); + +static ssize_t +io_delay_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay); +} +static DEVICE_ATTR_RO(io_delay); + +static ssize_t +device_delay_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay); +} +static DEVICE_ATTR_RO(device_delay); + +static ssize_t +fw_queue_depth_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit); +} +static DEVICE_ATTR_RO(fw_queue_depth); + +static ssize_t +host_sas_address_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "0x%016llx\n", + (unsigned long long)ioc->sas_hba.sas_address); +} +static DEVICE_ATTR_RO(host_sas_address); + +static ssize_t +logging_level_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level); +} + +static ssize_t +logging_level_store( + struct device *cdev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + int val = 0; + + if (kstrtoint(buf, 0, &val)) + return -EINVAL; + ioc->logging_level = val; + pr_info("%s logging_level=%08xh\n", ioc->name, + ioc->logging_level); + return strlen(buf); +} +static DEVICE_ATTR_RW(logging_level); + +static ssize_t +fwfault_debug_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug); +} + +static ssize_t +fwfault_debug_store( + struct device *cdev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + int val = 0; + + if (kstrtoint(buf, 0, &val)) + return -EINVAL; + ioc->fwfault_debug = val; + pr_info("%s fwfault_debug=%d\n", ioc->name, + ioc->fwfault_debug); + return strlen(buf); +} +static DEVICE_ATTR_RW(fwfault_debug); + +static +struct leapioraid_raid_device *leapioraid_ctl_raid_device_find_by_handle( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct leapioraid_raid_device *raid_device, *r; + + r = NULL; + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (raid_device->handle != handle) + continue; + r = raid_device; + goto out; + } +out: + return r; +} + +u8 +leapioraid_ctl_tm_done( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + u8 rc; + unsigned long flags; + struct leapioraid_sas_device *sas_device; + struct leapioraid_raid_device *raid_device; + u16 smid_task_abort; + u16 handle; + struct LeapioraidSCSITmgReq_t *mpi_request; + struct LeapioraidSCSITmgRep_t *mpi_reply = + leapioraid_base_get_reply_virt_addr(ioc, reply); + + rc = 1; + if (unlikely(!mpi_reply)) { + pr_err( + "%s mpi_reply not valid at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return rc; + } + handle = le16_to_cpu(mpi_reply->DevHandle); + sas_device = leapioraid_get_sdev_by_handle(ioc, handle); + if (sas_device) { + smid_task_abort = 0; + if (mpi_reply->TaskType == + LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + smid_task_abort = le16_to_cpu(mpi_request->TaskMID); + } + pr_info("\tcomplete: sas_addr(0x%016llx), handle(0x%04x), smid(%d), term(%d)\n", + (unsigned long long)sas_device->sas_address, handle, + (smid_task_abort ? smid_task_abort : smid), + le32_to_cpu(mpi_reply->TerminationCount)); + leapioraid_sas_device_put(sas_device); + } + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = leapioraid_ctl_raid_device_find_by_handle(ioc, handle); + if (raid_device) + pr_info("\tcomplete: wwid(0x%016llx), handle(0x%04x), smid(%d), term(%d)\n", + (unsigned long long)raid_device->wwid, handle, + smid, le32_to_cpu(mpi_reply->TerminationCount)); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + ioc->terminated_tm_count += le32_to_cpu(mpi_reply->TerminationCount); + if (ioc->out_of_frames) { + rc = 0; + leapioraid_base_free_smid(ioc, smid); + ioc->out_of_frames = 0; + wake_up(&ioc->no_frames_tm_wq); + } + ioc->pending_tm_count--; + if (!ioc->pending_tm_count) + wake_up(&ioc->pending_tm_wq); + return rc; +} + +static void +leapioraid_ctl_tm_sysfs(struct LEAPIORAID_ADAPTER *ioc, u8 task_type) +{ + struct leapioraid_sas_device *sas_device; + struct leapioraid_raid_device *raid_device; + struct LeapioraidSCSITmgReq_t *mpi_request; + u16 smid, handle, hpr_smid; + struct LEAPIORAID_DEVICE *device_priv_data; + struct LEAPIORAID_TARGET *target_priv_data; + struct scsi_cmnd *scmd; + struct scsi_device *sdev; + unsigned long flags; + int tm_count; + int lun; + u32 doorbell; + struct leapioraid_scsiio_tracker *st; + u8 tr_method = 0x00; + + if (list_empty(&ioc->sas_device_list)) + return; + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + if (ioc->shost_recovery || ioc->remove_host) { + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + pr_err( + "%s %s: busy : host reset in progress, try later\n", + ioc->name, __func__); + return; + } + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + scsi_block_requests(ioc->shost); + init_waitqueue_head(&ioc->pending_tm_wq); + ioc->ignore_loginfos = 1; + ioc->pending_tm_count = 0; + ioc->terminated_tm_count = 0; + ioc->out_of_frames = 0; + tm_count = 0; + switch (task_type) { + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK: + for (smid = 1; smid <= ioc->shost->can_queue; smid++) { + if (list_empty(&ioc->hpr_free_list)) { + ioc->out_of_frames = 1; + init_waitqueue_head(&ioc->no_frames_tm_wq); + wait_event_timeout(ioc->no_frames_tm_wq, + !ioc->out_of_frames, HZ); + } + scmd = leapioraid_scsihost_scsi_lookup_get(ioc, smid); + if (!scmd) + continue; + st = leapioraid_base_scsi_cmd_priv(scmd); + if ((!st) || (st->cb_idx == 0xFF) || (st->smid == 0)) + continue; + lun = scmd->device->lun; + device_priv_data = scmd->device->hostdata; + if (!device_priv_data || !device_priv_data->sas_target) + continue; + target_priv_data = device_priv_data->sas_target; + if (!target_priv_data) + continue; + if (target_priv_data->flags & + LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT || + target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_VOLUME) + continue; + handle = device_priv_data->sas_target->handle; + hpr_smid = leapioraid_base_get_smid_hpr(ioc, + ioc->ctl_tm_cb_idx); + if (!hpr_smid) { + pr_err( + "%s %s: out of hi-priority requests!!\n", + ioc->name, __func__); + goto out_of_frames; + } + mpi_request = + leapioraid_base_get_msg_frame(ioc, hpr_smid); + memset(mpi_request, 0, + sizeof(struct LeapioraidSCSITmgReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = + LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK; + mpi_request->TaskMID = cpu_to_le16(st->smid); + int_to_scsilun(lun, + (struct scsi_lun *)mpi_request->LUN); + starget_printk(KERN_INFO, + device_priv_data->sas_target->starget, + "sending tm: sas_addr(0x%016llx), handle(0x%04x), smid(%d)\n", + (unsigned long long) + device_priv_data->sas_target->sas_address, handle, st->smid); + ioc->pending_tm_count++; + tm_count++; + doorbell = leapioraid_base_get_iocstate(ioc, 0); + if ((doorbell & + LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_FAULT + || (doorbell & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) + goto fault_in_progress; + ioc->put_smid_hi_priority(ioc, hpr_smid, 0); + } + break; + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET: + spin_lock_irqsave(&ioc->sas_device_lock, flags); + list_for_each_entry(sas_device, &ioc->sas_device_list, list) { + if (list_empty(&ioc->hpr_free_list)) { + spin_unlock_irqrestore(&ioc->sas_device_lock, + flags); + ioc->out_of_frames = 1; + init_waitqueue_head(&ioc->no_frames_tm_wq); + wait_event_timeout(ioc->no_frames_tm_wq, + !ioc->out_of_frames, HZ); + spin_lock_irqsave(&ioc->sas_device_lock, flags); + } + if (!sas_device->starget) + continue; + if (test_bit(sas_device->handle, ioc->pd_handles)) + continue; + hpr_smid = leapioraid_base_get_smid_hpr(ioc, + ioc->ctl_tm_cb_idx); + if (!hpr_smid) { + pr_err( + "%s %s: out of hi-priority requests!!\n", + ioc->name, __func__); + spin_unlock_irqrestore(&ioc->sas_device_lock, + flags); + goto out_of_frames; + } + mpi_request = + leapioraid_base_get_msg_frame(ioc, hpr_smid); + memset(mpi_request, 0, + sizeof(struct LeapioraidSCSITmgReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT; + mpi_request->DevHandle = + cpu_to_le16(sas_device->handle); + mpi_request->TaskType = + LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET; + starget_printk(KERN_INFO, + sas_device->starget, + "sending tm: sas_addr(0x%016llx), handle(0x%04x), smid(%d)\n", + (unsigned long long)sas_device->sas_address, + sas_device->handle, + hpr_smid); + ioc->pending_tm_count++; + tm_count++; + doorbell = leapioraid_base_get_iocstate(ioc, 0); + if ((doorbell & + LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_FAULT + || (doorbell & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) { + spin_unlock_irqrestore(&ioc->sas_device_lock, + flags); + goto fault_in_progress; + } + ioc->put_smid_hi_priority(ioc, hpr_smid, 0); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (list_empty(&ioc->hpr_free_list)) { + spin_unlock_irqrestore(&ioc->raid_device_lock, + flags); + ioc->out_of_frames = 1; + init_waitqueue_head(&ioc->no_frames_tm_wq); + wait_event_timeout(ioc->no_frames_tm_wq, + !ioc->out_of_frames, HZ); + spin_lock_irqsave(&ioc->raid_device_lock, + flags); + } + if (!raid_device->starget) + continue; + hpr_smid = leapioraid_base_get_smid_hpr(ioc, + ioc->ctl_tm_cb_idx); + if (!hpr_smid) { + pr_err("%s %s: out of hi-priority requests!!\n", + ioc->name, __func__); + spin_unlock_irqrestore(&ioc->raid_device_lock, + flags); + goto out_of_frames; + } + mpi_request = + leapioraid_base_get_msg_frame(ioc, hpr_smid); + memset(mpi_request, 0, + sizeof(struct LeapioraidSCSITmgReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT; + mpi_request->DevHandle = + cpu_to_le16(raid_device->handle); + mpi_request->TaskType = + LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET; + starget_printk(KERN_INFO, + raid_device->starget, + "sending tm: wwid(0x%016llx), handle(0x%04x), smid(%d)\n", + (unsigned long long)raid_device->wwid, + raid_device->handle, hpr_smid); + ioc->pending_tm_count++; + tm_count++; + doorbell = leapioraid_base_get_iocstate(ioc, 0); + if ((doorbell & + LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_FAULT + || (doorbell & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) { + spin_unlock_irqrestore(&ioc->raid_device_lock, + flags); + goto fault_in_progress; + } + ioc->put_smid_hi_priority(ioc, hpr_smid, 0); + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + break; + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: + shost_for_each_device(sdev, ioc->shost) { + if (list_empty(&ioc->hpr_free_list)) { + ioc->out_of_frames = 1; + init_waitqueue_head(&ioc->no_frames_tm_wq); + wait_event_timeout(ioc->no_frames_tm_wq, + !ioc->out_of_frames, HZ); + } + device_priv_data = sdev->hostdata; + if (!device_priv_data || !device_priv_data->sas_target) + continue; + target_priv_data = device_priv_data->sas_target; + if (!target_priv_data) + continue; + if (target_priv_data->flags & + LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT) + continue; + if ((target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_VOLUME) + && (task_type == + LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET)) + continue; + handle = device_priv_data->sas_target->handle; + hpr_smid = leapioraid_base_get_smid_hpr(ioc, + ioc->ctl_tm_cb_idx); + if (!hpr_smid) { + pr_err("%s %s: out of hi-priority requests!!\n", + ioc->name, __func__); + scsi_device_put(sdev); + goto out_of_frames; + } + mpi_request = + leapioraid_base_get_msg_frame(ioc, hpr_smid); + memset(mpi_request, 0, + sizeof(struct LeapioraidSCSITmgReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = task_type; + mpi_request->MsgFlags = tr_method; + int_to_scsilun(sdev->lun, (struct scsi_lun *) + mpi_request->LUN); + sdev_printk(KERN_INFO, sdev, + "sending tm: sas_addr(0x%016llx), handle(0x%04x), smid(%d)\n", + (unsigned long long)target_priv_data->sas_address, + handle, hpr_smid); + ioc->pending_tm_count++; + tm_count++; + doorbell = leapioraid_base_get_iocstate(ioc, 0); + if ((doorbell & + LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_FAULT + || (doorbell & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) { + scsi_device_put(sdev); + goto fault_in_progress; + } + ioc->put_smid_hi_priority(ioc, hpr_smid, 0); + } + break; + } +out_of_frames: + if (ioc->pending_tm_count) + wait_event_timeout(ioc->pending_tm_wq, + !ioc->pending_tm_count, 30 * HZ); + pr_info("%s task management requests issued(%d)\n", + ioc->name, tm_count); + pr_info("%s number IO terminated(%d)\n", + ioc->name, ioc->terminated_tm_count); +fault_in_progress: + scsi_unblock_requests(ioc->shost); + ioc->ignore_loginfos = 0; +} + +static ssize_t +task_management_store( + struct device *cdev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + int opcode = 0; + + if (kstrtoint(buf, 0, &opcode)) + return -EINVAL; + switch (opcode) { + case 1: + ioc->reset_from_user = 1; + scsi_block_requests(ioc->shost); + pr_err("%s sysfs: diag reset issued: %s\n", ioc->name, + ((!leapioraid_base_hard_reset_handler(ioc, + FORCE_BIG_HAMMER)) + ? "SUCCESS" : "FAILED")); + scsi_unblock_requests(ioc->shost); + break; + case 2: + ioc->reset_from_user = 1; + scsi_block_requests(ioc->shost); + pr_err("%s sysfs: message unit reset issued: %s\n", ioc->name, + ((!leapioraid_base_hard_reset_handler(ioc, + SOFT_RESET)) ? + "SUCCESS" : "FAILED")); + scsi_unblock_requests(ioc->shost); + break; + case 3: + pr_err("%s sysfs: TASKTYPE_ABORT_TASK :\n", ioc->name); + ioc->got_task_abort_from_sysfs = 1; + leapioraid_ctl_tm_sysfs(ioc, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK); + ioc->got_task_abort_from_sysfs = 0; + break; + case 4: + pr_err("%s sysfs: TASKTYPE_TARGET_RESET:\n", ioc->name); + leapioraid_ctl_tm_sysfs(ioc, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET); + break; + case 5: + pr_err("%s sysfs: TASKTYPE_LOGICAL_UNIT_RESET:\n", ioc->name); + leapioraid_ctl_tm_sysfs(ioc, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET); + break; + case 6: + pr_info("%s sysfs: TASKTYPE_ABRT_TASK_SET\n", ioc->name); + leapioraid_ctl_tm_sysfs(ioc, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET); + break; + default: + pr_info("%s unsupported opcode(%d)\n", + ioc->name, opcode); + break; + }; + return strlen(buf); +} +static DEVICE_ATTR_WO(task_management); + +static ssize_t +ioc_reset_count_show( + struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%d\n", ioc->ioc_reset_count); +} +static DEVICE_ATTR_RO(ioc_reset_count); + +static ssize_t +reply_queue_count_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + u8 reply_queue_count; + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + if ((ioc->facts.IOCCapabilities & + LEAPIORAID_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable) + reply_queue_count = ioc->reply_queue_count; + else + reply_queue_count = 1; + return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count); +} +static DEVICE_ATTR_RO(reply_queue_count); + +static ssize_t +drv_support_bitmap_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "0x%08x\n", ioc->drv_support_bitmap); +} +static DEVICE_ATTR_RO(drv_support_bitmap); + +static ssize_t +enable_sdev_max_qd_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + return snprintf(buf, PAGE_SIZE, "%d\n", ioc->enable_sdev_max_qd); +} + +static ssize_t +enable_sdev_max_qd_store(struct device *cdev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct LEAPIORAID_TARGET *sas_target_priv_data; + int val = 0; + struct scsi_device *sdev; + struct leapioraid_raid_device *raid_device; + int qdepth; + + if (kstrtoint(buf, 0, &val)) + return -EINVAL; + switch (val) { + case 0: + ioc->enable_sdev_max_qd = 0; + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + sas_target_priv_data = sas_device_priv_data->sas_target; + if (!sas_target_priv_data) + continue; + if (sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_VOLUME) { + raid_device = + leapioraid_raid_device_find_by_handle(ioc, + sas_target_priv_data->handle); + switch (raid_device->volume_type) { + case LEAPIORAID_RAID_VOL_TYPE_RAID0: + if (raid_device->device_info & + LEAPIORAID_SAS_DEVICE_INFO_SSP_TARGET) + qdepth = + LEAPIORAID_SAS_QUEUE_DEPTH; + else + qdepth = + LEAPIORAID_SATA_QUEUE_DEPTH; + break; + case LEAPIORAID_RAID_VOL_TYPE_RAID1E: + case LEAPIORAID_RAID_VOL_TYPE_RAID1: + case LEAPIORAID_RAID_VOL_TYPE_RAID10: + case LEAPIORAID_RAID_VOL_TYPE_UNKNOWN: + default: + qdepth = LEAPIORAID_RAID_QUEUE_DEPTH; + } + } else + qdepth = + (sas_target_priv_data->sas_dev->port_type > + 1) ? ioc->max_wideport_qd : ioc->max_narrowport_qd; + leapioraid__scsihost_change_queue_depth(sdev, qdepth); + } + break; + case 1: + ioc->enable_sdev_max_qd = 1; + shost_for_each_device(sdev, ioc->shost) { + leapioraid__scsihost_change_queue_depth(sdev, + shost->can_queue); + } + break; + default: + return -EINVAL; + } + return strlen(buf); +} +static DEVICE_ATTR_RW(enable_sdev_max_qd); + +static struct attribute *leapioraid_host_attrs[] = { + &dev_attr_version_fw.attr, + &dev_attr_version_bios.attr, + &dev_attr_version_leapioraid.attr, + &dev_attr_version_product.attr, + &dev_attr_version_nvdata_persistent.attr, + &dev_attr_version_nvdata_default.attr, + &dev_attr_board_name.attr, + &dev_attr_board_assembly.attr, + &dev_attr_board_tracer.attr, + &dev_attr_io_delay.attr, + &dev_attr_device_delay.attr, + &dev_attr_logging_level.attr, + &dev_attr_fwfault_debug.attr, + &dev_attr_fw_queue_depth.attr, + &dev_attr_host_sas_address.attr, + &dev_attr_task_management.attr, + &dev_attr_ioc_reset_count.attr, + &dev_attr_reply_queue_count.attr, + &dev_attr_drv_support_bitmap.attr, + &dev_attr_enable_sdev_max_qd.attr, + NULL, +}; + +static const struct attribute_group leapioraid_host_attr_group = { + .attrs = leapioraid_host_attrs +}; + +const struct attribute_group *leapioraid_host_groups[] = { + &leapioraid_host_attr_group, + NULL +}; + +static ssize_t +sas_address_show( + struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct LEAPIORAID_DEVICE *sas_device_priv_data = sdev->hostdata; + + return snprintf( + buf, PAGE_SIZE, "0x%016llx\n", + (unsigned long long)sas_device_priv_data->sas_target->sas_address); +} +static DEVICE_ATTR_RO(sas_address); + +static ssize_t +sas_device_handle_show( + struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct LEAPIORAID_DEVICE *sas_device_priv_data = sdev->hostdata; + + return snprintf(buf, PAGE_SIZE, "0x%04x\n", + sas_device_priv_data->sas_target->handle); +} +static DEVICE_ATTR_RO(sas_device_handle); + +static ssize_t +sas_ncq_prio_enable_show( + struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct LEAPIORAID_DEVICE *sas_device_priv_data = sdev->hostdata; + + return snprintf(buf, PAGE_SIZE, "%d\n", + sas_device_priv_data->ncq_prio_enable); +} + +static ssize_t +sas_ncq_prio_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct LEAPIORAID_DEVICE *sas_device_priv_data = sdev->hostdata; + int ncq_prio_enable = 0; + + if (kstrtoint(buf, 0, &ncq_prio_enable)) + return -EINVAL; + if (!leapioraid_scsihost_ncq_prio_supp(sdev)) + return -EINVAL; + sas_device_priv_data->ncq_prio_enable = ncq_prio_enable; + return strlen(buf); +} +static DEVICE_ATTR_RW(sas_ncq_prio_enable); + +static struct attribute *leapioraid_dev_attrs[] = { + &dev_attr_sas_address.attr, + &dev_attr_sas_device_handle.attr, + &dev_attr_sas_ncq_prio_enable.attr, + NULL, +}; +static const struct attribute_group leapioraid_dev_attr_group = { + .attrs = leapioraid_dev_attrs +}; +const struct attribute_group *leapioraid_dev_groups[] = { + &leapioraid_dev_attr_group, + NULL +}; + +static const struct +file_operations leapioraid_ctl_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = leapioraid_ctl_ioctl, + .poll = leapioraid_ctl_poll, + .fasync = leapioraid_ctl_fasync, +#ifdef CONFIG_COMPAT + .compat_ioctl = leapioraid_ctl_ioctl_compat, +#endif +}; + +static struct miscdevice leapioraid_ctl_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = LEAPIORAID_DEV_NAME, + .fops = &leapioraid_ctl_fops, +}; + +void leapioraid_ctl_init(void) +{ + leapioraid_async_queue = NULL; + if (misc_register(&leapioraid_ctl_dev) < 0) + pr_err("%s can't register misc device\n", + LEAPIORAID_DRIVER_NAME); + init_waitqueue_head(&leapioraid_ctl_poll_wait); +} + +void leapioraid_ctl_exit(void) +{ + struct LEAPIORAID_ADAPTER *ioc; + + list_for_each_entry(ioc, &leapioraid_ioc_list, list) { + kfree(ioc->event_log); + } + misc_deregister(&leapioraid_ctl_dev); +} diff --git a/drivers/scsi/leapioraid/leapioraid_func.c b/drivers/scsi/leapioraid/leapioraid_func.c new file mode 100644 index 000000000000..2d80a86da007 --- /dev/null +++ b/drivers/scsi/leapioraid/leapioraid_func.c @@ -0,0 +1,7074 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This is the Fusion MPT base driver providing common API layer interface + * for access to MPT (Message Passing Technology) firmware. + * + * Copyright (C) 2013-2021 LSI Corporation + * Copyright (C) 2013-2021 Avago Technologies + * Copyright (C) 2013-2021 Broadcom Inc. + * (mailto:MPT-FusionLinux.pdl@broadcom.com) + * + * Copyright (C) 2024 LeapIO Tech Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "leapioraid_func.h" +#include +#include +#include + +static char *dest_ip = "127.0.0.1"; +module_param(dest_ip, charp, 0000); +MODULE_PARM_DESC(dest_ip, "Destination IP address"); + +static u16 port_no = 6666; +module_param(port_no, ushort, 0000); +MODULE_PARM_DESC(port_no, "Destination Port number"); +static struct sockaddr_in dest_addr; +static struct socket *sock; +static struct msghdr msg; + +#define LEAPIORAID_LOG_POLLING_INTERVAL 1 +static LEAPIORAID_CALLBACK leapioraid_callbacks[LEAPIORAID_MAX_CALLBACKS]; +#define LEAPIORAID_FAULT_POLLING_INTERVAL 1000 +#define LEAPIORAID_MAX_HBA_QUEUE_DEPTH 1024 + +static int smp_affinity_enable = 1; +module_param(smp_affinity_enable, int, 0444); +MODULE_PARM_DESC(smp_affinity_enable, + "SMP affinity feature enable/disable Default: enable(1)"); + +static int max_msix_vectors = -1; +module_param(max_msix_vectors, int, 0444); +MODULE_PARM_DESC(max_msix_vectors, " max msix vectors"); + +static int irqpoll_weight = -1; +module_param(irqpoll_weight, int, 0444); +MODULE_PARM_DESC(irqpoll_weight, + "irq poll weight (default= one fourth of HBA queue depth)"); + +static int leapioraid_fwfault_debug; + +static int perf_mode = -1; + +static int poll_queues; +module_param(poll_queues, int, 0444); +MODULE_PARM_DESC(poll_queues, + "Number of queues to be use for io_uring poll mode.\n\t\t" + "This parameter is effective only if host_tagset_enable=1. &\n\t\t" + "when poll_queues are enabled then &\n\t\t" + "perf_mode is set to latency mode. &\n\t\t"); + +enum leapioraid_perf_mode { + LEAPIORAID_PERF_MODE_DEFAULT = -1, + LEAPIORAID_PERF_MODE_BALANCED = 0, + LEAPIORAID_PERF_MODE_IOPS = 1, + LEAPIORAID_PERF_MODE_LATENCY = 2, +}; + +static void +leapioraid_base_clear_outstanding_leapioraid_commands( + struct LEAPIORAID_ADAPTER *ioc); +static +int leapioraid_base_wait_on_iocstate(struct LEAPIORAID_ADAPTER *ioc, + u32 ioc_state, int timeout); + +static int +leapioraid_scsihost_set_fwfault_debug( + const char *val, const struct kernel_param *kp) +{ + int ret = param_set_int(val, kp); + struct LEAPIORAID_ADAPTER *ioc; + + if (ret) + return ret; + pr_info("setting fwfault_debug(%d)\n", + leapioraid_fwfault_debug); + spin_lock(&leapioraid_gioc_lock); + list_for_each_entry(ioc, &leapioraid_ioc_list, list) + ioc->fwfault_debug = leapioraid_fwfault_debug; + spin_unlock(&leapioraid_gioc_lock); + return 0; +} + +module_param_call( + leapioraid_fwfault_debug, + leapioraid_scsihost_set_fwfault_debug, + param_get_int, &leapioraid_fwfault_debug, 0644); + +static inline u32 +leapioraid_base_readl_aero( + const void __iomem *addr, u8 retry_count) +{ + u32 i = 0, ret_val; + + do { + ret_val = readl(addr); + i++; + } while (ret_val == 0 && i < retry_count); + return ret_val; +} + +u8 +leapioraid_base_check_cmd_timeout( + struct LEAPIORAID_ADAPTER *ioc, + U8 status, void *mpi_request, int sz) +{ + u8 issue_reset = 0; + + if (!(status & LEAPIORAID_CMD_RESET)) + issue_reset = 1; + pr_err("%s Command %s\n", ioc->name, + ((issue_reset == + 0) ? "terminated due to Host Reset" : "Timeout")); + leapioraid_debug_dump_mf(mpi_request, sz); + return issue_reset; +} + +static int +leapioraid_remove_dead_ioc_func(void *arg) +{ + struct LEAPIORAID_ADAPTER *ioc = (struct LEAPIORAID_ADAPTER *)arg; + struct pci_dev *pdev; + + if (ioc == NULL) + return -1; + pdev = ioc->pdev; + if (pdev == NULL) + return -1; +#if defined(DISABLE_RESET_SUPPORT) + ssleep(2); +#endif + + pci_stop_and_remove_bus_device(pdev); + return 0; +} + +u8 +leapioraid_base_pci_device_is_unplugged(struct LEAPIORAID_ADAPTER *ioc) +{ + struct pci_dev *pdev = ioc->pdev; + struct pci_bus *bus = pdev->bus; + int devfn = pdev->devfn; + u32 vendor_id; + + if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &vendor_id)) + return 1; + if (vendor_id == 0xffffffff || vendor_id == 0x00000000 || + vendor_id == 0x0000ffff || vendor_id == 0xffff0000) + return 1; + if ((vendor_id & 0xffff) == 0x0001) + return 1; + return 0; +} + +u8 +leapioraid_base_pci_device_is_available(struct LEAPIORAID_ADAPTER *ioc) +{ + if (ioc->pci_error_recovery + || leapioraid_base_pci_device_is_unplugged(ioc)) + return 0; + return 1; +} + +static void +leapioraid_base_sync_drv_fw_timestamp(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidIoUnitControlReq_t *mpi_request; + struct LeapioraidIoUnitControlRep_t *mpi_reply; + u16 smid; + ktime_t current_time; + u64 TimeStamp = 0; + u8 issue_reset = 0; + + mutex_lock(&ioc->scsih_cmds.mutex); + if (ioc->scsih_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s: scsih_cmd in use %s\n", ioc->name, __func__); + goto out; + } + ioc->scsih_cmds.status = LEAPIORAID_CMD_PENDING; + smid = leapioraid_base_get_smid(ioc, ioc->scsih_cb_idx); + if (!smid) { + pr_err("%s: failed obtaining a smid %s\n", ioc->name, __func__); + ioc->scsih_cmds.status = LEAPIORAID_CMD_NOT_USED; + goto out; + } + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->scsih_cmds.smid = smid; + memset(mpi_request, 0, sizeof(struct LeapioraidIoUnitControlReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_IO_UNIT_CONTROL; + mpi_request->Operation = 0x0F; + mpi_request->IOCParameter = 0x81; + current_time = ktime_get_real(); + TimeStamp = ktime_to_ms(current_time); + mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp & 0xFFFFFFFF); + mpi_request->IOCParameterValue2 = cpu_to_le32(TimeStamp >> 32); + init_completion(&ioc->scsih_cmds.done); + ioc->put_smid_default(ioc, smid); + dinitprintk(ioc, pr_err( + "%s Io Unit Control Sync TimeStamp (sending), @time %lld ms\n", + ioc->name, TimeStamp)); + wait_for_completion_timeout(&ioc->scsih_cmds.done, + 10 * HZ); + if (!(ioc->scsih_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + leapioraid_check_cmd_timeout(ioc, + ioc->scsih_cmds.status, + mpi_request, + sizeof + (struct LeapioraidSasIoUnitControlReq_t) + / 4, issue_reset); + goto issue_host_reset; + } + if (ioc->scsih_cmds.status & LEAPIORAID_CMD_REPLY_VALID) { + mpi_reply = ioc->scsih_cmds.reply; + dinitprintk(ioc, pr_err( + "%s Io Unit Control sync timestamp (complete): ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo))); + } +issue_host_reset: + if (issue_reset) + leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + ioc->scsih_cmds.status = LEAPIORAID_CMD_NOT_USED; +out: + mutex_unlock(&ioc->scsih_cmds.mutex); +} + +static int +leapioraid_udp_init(void) +{ + int ret; + u32 ip; + + if (sock) + return 0; + if (!in4_pton(dest_ip, -1, (u8 *) &ip, -1, NULL)) { + pr_err("Invalid IP address: %s, set to default: 127.0.0.1\n", + dest_ip); + dest_ip = "127.0.0.1"; + } + ret = + sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, IPPROTO_UDP, + &sock); + memset(&dest_addr, 0, sizeof(dest_addr)); + dest_addr.sin_family = AF_INET; + dest_addr.sin_addr.s_addr = ip; + dest_addr.sin_port = htons(port_no); + memset(&msg, 0, sizeof(msg)); + msg.msg_name = &dest_addr; + msg.msg_namelen = sizeof(struct sockaddr_in); + return ret; +} + +static void +leapioraid_udp_exit(void) +{ + if (sock) + sock_release(sock); +} + +static int +leapioraid_send_udp_pkg(void *buf, U32 datasize) +{ + int ret; + struct kvec vec; + + vec.iov_len = datasize; + vec.iov_base = buf; + ret = kernel_sendmsg(sock, &msg, &vec, 1, vec.iov_len); + if (ret <= 0) { + pr_err_ratelimited("Sending UDP packet failed: errorno = %d", + ret); + return 0; + } else { + return ret; + } +} + +static void +leapioraid_base_pcie_log_work(struct work_struct *work) +{ + struct LEAPIORAID_ADAPTER *ioc = + container_of(work, struct LEAPIORAID_ADAPTER, pcie_log_work.work); + unsigned long flags; + u32 host_logbuf_position, ioc_logbuf_position; + u32 datasize, offset, send_sz, actual_send_sz; + + while (true) { + host_logbuf_position = + ioc->base_readl(&ioc->chip->HostLogBufPosition, 0); + ioc_logbuf_position = + ioc->base_readl(&ioc->chip->IocLogBufPosition, 0); + datasize = ioc_logbuf_position - host_logbuf_position; + offset = host_logbuf_position % SYS_LOG_BUF_SIZE; + if (datasize == 0) { + goto rearm_timer; + } else if (datasize > SYS_LOG_BUF_SIZE) { + pr_err("log thread error:data size overflow\n"); + return; + } + + if (offset + datasize > SYS_LOG_BUF_SIZE) + send_sz = SYS_LOG_BUF_SIZE - offset; + else + send_sz = datasize; + + if (send_sz > MAX_UPD_PAYLOAD_SZ) + send_sz = MAX_UPD_PAYLOAD_SZ; + + actual_send_sz = + leapioraid_send_udp_pkg(ioc->log_buffer + offset, send_sz); + host_logbuf_position += actual_send_sz; + writel(host_logbuf_position, &ioc->chip->HostLogBufPosition); + } +rearm_timer: + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + if (ioc->pcie_log_work_q) + queue_delayed_work(ioc->pcie_log_work_q, + &ioc->pcie_log_work, + msecs_to_jiffies(LEAPIORAID_LOG_POLLING_INTERVAL)); + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); +} + +void +leapioraid_base_start_log_watchdog(struct LEAPIORAID_ADAPTER *ioc) +{ + unsigned long flags; + + if (ioc->pcie_log_work_q) + return; + leapioraid_udp_init(); + INIT_DELAYED_WORK(&ioc->pcie_log_work, leapioraid_base_pcie_log_work); + snprintf(ioc->pcie_log_work_q_name, + sizeof(ioc->pcie_log_work_q_name), "poll_%s%u_status", + ioc->driver_name, ioc->id); + ioc->pcie_log_work_q = + create_singlethread_workqueue(ioc->pcie_log_work_q_name); + if (!ioc->pcie_log_work_q) { + pr_err("%s %s: failed (line=%d)\n", ioc->name, + __func__, __LINE__); + return; + } + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + if (ioc->pcie_log_work_q) + queue_delayed_work(ioc->pcie_log_work_q, + &ioc->pcie_log_work, + msecs_to_jiffies(LEAPIORAID_LOG_POLLING_INTERVAL)); + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); +} + +void +leapioraid_base_stop_log_watchdog(struct LEAPIORAID_ADAPTER *ioc) +{ + unsigned long flags; + struct workqueue_struct *wq; + + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + wq = ioc->pcie_log_work_q; + ioc->pcie_log_work_q = NULL; + leapioraid_udp_exit(); + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + if (wq) { + if (!cancel_delayed_work_sync(&ioc->pcie_log_work)) + flush_workqueue(wq); + destroy_workqueue(wq); + } +} + +static void +leapioraid_base_fault_reset_work(struct work_struct *work) +{ + struct LEAPIORAID_ADAPTER *ioc = + container_of(work, struct LEAPIORAID_ADAPTER, + fault_reset_work.work); + unsigned long flags; + u32 doorbell; + int rc; + struct task_struct *p; + + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + if ((ioc->shost_recovery && (ioc->ioc_coredump_loop == 0)) || + ioc->pci_error_recovery || ioc->remove_host) + goto rearm_timer; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + doorbell = leapioraid_base_get_iocstate(ioc, 0); + if ((doorbell & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_MASK) { + pr_err( + "%s SAS host is non-operational !!!!\n", ioc->name); + if (ioc->non_operational_loop++ < 5) { + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, + flags); + goto rearm_timer; + } + ioc->remove_host = 1; + leapioraid_base_pause_mq_polling(ioc); + ioc->schedule_dead_ioc_flush_running_cmds(ioc); + p = kthread_run(leapioraid_remove_dead_ioc_func, ioc, + "%s_dead_ioc_%d", ioc->driver_name, ioc->id); + if (IS_ERR(p)) + pr_err( + "%s %s: Running leapioraid_dead_ioc thread failed !!!!\n", + ioc->name, __func__); + else + pr_err( + "%s %s: Running leapioraid_dead_ioc thread success !!!!\n", + ioc->name, __func__); + return; + } + if ((doorbell & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_COREDUMP) { + u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ? + ioc->manu_pg11.CoreDumpTOSec : + 15; + timeout /= (LEAPIORAID_FAULT_POLLING_INTERVAL / 1000); + if (ioc->ioc_coredump_loop == 0) { + leapioraid_base_coredump_info(ioc, doorbell & + LEAPIORAID_DOORBELL_DATA_MASK); + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, + flags); + ioc->shost_recovery = 1; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, + flags); + leapioraid_base_pause_mq_polling(ioc); + leapioraid_scsihost_clear_outstanding_scsi_tm_commands + (ioc); + leapioraid_base_mask_interrupts(ioc); + leapioraid_base_clear_outstanding_leapioraid_commands(ioc); + leapioraid_ctl_clear_outstanding_ioctls(ioc); + } + drsprintk(ioc, + pr_info("%s %s: CoreDump loop %d.", + ioc->name, __func__, ioc->ioc_coredump_loop)); + if (ioc->ioc_coredump_loop++ < timeout) { + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, + flags); + goto rearm_timer; + } + } + if (ioc->ioc_coredump_loop) { + if ((doorbell & LEAPIORAID_IOC_STATE_MASK) != + LEAPIORAID_IOC_STATE_COREDUMP) + pr_err( + "%s %s: CoreDump completed. LoopCount: %d", + ioc->name, __func__, ioc->ioc_coredump_loop); + else + pr_err( + "%s %s: CoreDump Timed out. LoopCount: %d", + ioc->name, __func__, ioc->ioc_coredump_loop); + ioc->ioc_coredump_loop = 0xFF; + } + ioc->non_operational_loop = 0; + if ((doorbell & LEAPIORAID_IOC_STATE_MASK) != + LEAPIORAID_IOC_STATE_OPERATIONAL) { + rc = leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + pr_warn("%s %s: hard reset: %s\n", ioc->name, + __func__, (rc == 0) ? "success" : "failed"); + doorbell = leapioraid_base_get_iocstate(ioc, 0); + if ((doorbell & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_FAULT) { + leapioraid_print_fault_code(ioc, + doorbell & + LEAPIORAID_DOORBELL_DATA_MASK); + } else if ((doorbell & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) + leapioraid_base_coredump_info(ioc, + doorbell & + LEAPIORAID_DOORBELL_DATA_MASK); + if (rc + && (doorbell & LEAPIORAID_IOC_STATE_MASK) != + LEAPIORAID_IOC_STATE_OPERATIONAL) + return; + } + ioc->ioc_coredump_loop = 0; + if (ioc->time_sync_interval && + ++ioc->timestamp_update_count >= ioc->time_sync_interval) { + ioc->timestamp_update_count = 0; + leapioraid_base_sync_drv_fw_timestamp(ioc); + } + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); +rearm_timer: + if (ioc->fault_reset_work_q) + queue_delayed_work(ioc->fault_reset_work_q, + &ioc->fault_reset_work, + msecs_to_jiffies(LEAPIORAID_FAULT_POLLING_INTERVAL)); + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); +} + +static void +leapioraid_base_hba_hot_unplug_work(struct work_struct *work) +{ + struct LEAPIORAID_ADAPTER *ioc = + container_of(work, struct LEAPIORAID_ADAPTER, + hba_hot_unplug_work.work); + unsigned long flags; + + spin_lock_irqsave(&ioc->hba_hot_unplug_lock, flags); + if (ioc->shost_recovery || ioc->pci_error_recovery) + goto rearm_timer; + if (leapioraid_base_pci_device_is_unplugged(ioc)) { + if (ioc->remove_host) { + pr_err("%s The host is removeing!!!\n", + ioc->name); + goto rearm_timer; + } + ioc->remove_host = 1; + leapioraid_base_clear_outstanding_leapioraid_commands(ioc); + leapioraid_base_pause_mq_polling(ioc); + leapioraid_scsihost_clear_outstanding_scsi_tm_commands(ioc); + leapioraid_ctl_clear_outstanding_ioctls(ioc); + } +rearm_timer: + if (ioc->hba_hot_unplug_work_q) + queue_delayed_work(ioc->hba_hot_unplug_work_q, + &ioc->hba_hot_unplug_work, + msecs_to_jiffies + (1000)); + spin_unlock_irqrestore(&ioc->hba_hot_unplug_lock, flags); +} + +void +leapioraid_base_start_watchdog(struct LEAPIORAID_ADAPTER *ioc) +{ + unsigned long flags; + + if (ioc->fault_reset_work_q) + return; + ioc->timestamp_update_count = 0; + INIT_DELAYED_WORK(&ioc->fault_reset_work, + leapioraid_base_fault_reset_work); + snprintf(ioc->fault_reset_work_q_name, + sizeof(ioc->fault_reset_work_q_name), "poll_%s%u_status", + ioc->driver_name, ioc->id); + ioc->fault_reset_work_q = + create_singlethread_workqueue(ioc->fault_reset_work_q_name); + if (!ioc->fault_reset_work_q) { + pr_err("%s %s: failed (line=%d)\n", + ioc->name, __func__, __LINE__); + return; + } + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + if (ioc->fault_reset_work_q) + queue_delayed_work(ioc->fault_reset_work_q, + &ioc->fault_reset_work, + msecs_to_jiffies(LEAPIORAID_FAULT_POLLING_INTERVAL)); + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + if (ioc->open_pcie_trace) + leapioraid_base_start_log_watchdog(ioc); +} + +void +leapioraid_base_stop_watchdog(struct LEAPIORAID_ADAPTER *ioc) +{ + unsigned long flags; + struct workqueue_struct *wq; + + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + wq = ioc->fault_reset_work_q; + ioc->fault_reset_work_q = NULL; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + if (wq) { + if (!cancel_delayed_work_sync(&ioc->fault_reset_work)) + flush_workqueue(wq); + destroy_workqueue(wq); + } + if (ioc->open_pcie_trace) + leapioraid_base_stop_log_watchdog(ioc); +} + +void +leapioraid_base_start_hba_unplug_watchdog(struct LEAPIORAID_ADAPTER *ioc) +{ + unsigned long flags; + + if (ioc->hba_hot_unplug_work_q) + return; + INIT_DELAYED_WORK(&ioc->hba_hot_unplug_work, + leapioraid_base_hba_hot_unplug_work); + snprintf(ioc->hba_hot_unplug_work_q_name, + sizeof(ioc->hba_hot_unplug_work_q_name), + "poll_%s%u_hba_unplug", ioc->driver_name, ioc->id); + ioc->hba_hot_unplug_work_q = + create_singlethread_workqueue(ioc->hba_hot_unplug_work_q_name); + if (!ioc->hba_hot_unplug_work_q) { + pr_err("%s %s: failed (line=%d)\n", + ioc->name, __func__, __LINE__); + return; + } + spin_lock_irqsave(&ioc->hba_hot_unplug_lock, flags); + if (ioc->hba_hot_unplug_work_q) + queue_delayed_work(ioc->hba_hot_unplug_work_q, + &ioc->hba_hot_unplug_work, + msecs_to_jiffies(LEAPIORAID_FAULT_POLLING_INTERVAL)); + spin_unlock_irqrestore(&ioc->hba_hot_unplug_lock, flags); +} + +void +leapioraid_base_stop_hba_unplug_watchdog(struct LEAPIORAID_ADAPTER *ioc) +{ + unsigned long flags; + struct workqueue_struct *wq; + + spin_lock_irqsave(&ioc->hba_hot_unplug_lock, flags); + wq = ioc->hba_hot_unplug_work_q; + ioc->hba_hot_unplug_work_q = NULL; + spin_unlock_irqrestore(&ioc->hba_hot_unplug_lock, flags); + if (wq) { + if (!cancel_delayed_work_sync(&ioc->hba_hot_unplug_work)) + flush_workqueue(wq); + destroy_workqueue(wq); + } +} + +static void +leapioraid_base_stop_smart_polling(struct LEAPIORAID_ADAPTER *ioc) +{ + struct workqueue_struct *wq; + + wq = ioc->smart_poll_work_q; + ioc->smart_poll_work_q = NULL; + if (wq) { + if (!cancel_delayed_work(&ioc->smart_poll_work)) + flush_workqueue(wq); + destroy_workqueue(wq); + } +} + +void +leapioraid_base_fault_info(struct LEAPIORAID_ADAPTER *ioc, u16 fault_code) +{ + pr_err("%s fault_state(0x%04x)!\n", + ioc->name, fault_code); +} + +void +leapioraid_base_coredump_info(struct LEAPIORAID_ADAPTER *ioc, u16 fault_code) +{ + pr_err("%s coredump_state(0x%04x)!\n", + ioc->name, fault_code); +} + +int +leapioraid_base_wait_for_coredump_completion(struct LEAPIORAID_ADAPTER *ioc, + const char *caller) +{ + u8 timeout = + (ioc->manu_pg11.CoreDumpTOSec) ? ioc->manu_pg11.CoreDumpTOSec : 15; + int ioc_state = + leapioraid_base_wait_on_iocstate(ioc, LEAPIORAID_IOC_STATE_FAULT, + timeout); + + if (ioc_state) + pr_err("%s %s: CoreDump timed out. (ioc_state=0x%x)\n", + ioc->name, caller, ioc_state); + else + pr_info("%s %s: CoreDump completed. (ioc_state=0x%x)\n", + ioc->name, caller, ioc_state); + return ioc_state; +} + +void +leapioraid_halt_firmware(struct LEAPIORAID_ADAPTER *ioc, u8 set_fault) +{ + u32 doorbell; + + if ((!ioc->fwfault_debug) && (!set_fault)) + return; + if (!set_fault) + dump_stack(); + doorbell = + ioc->base_readl(&ioc->chip->Doorbell, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY); + if ((doorbell & LEAPIORAID_IOC_STATE_MASK) + == LEAPIORAID_IOC_STATE_FAULT) { + leapioraid_print_fault_code(ioc, doorbell); + } else if ((doorbell & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) + leapioraid_base_coredump_info(ioc, + doorbell & + LEAPIORAID_DOORBELL_DATA_MASK); + else { + writel(0xC0FFEE00, &ioc->chip->Doorbell); + if (!set_fault) + pr_err("%s Firmware is halted due to command timeout\n", + ioc->name); + } + if (set_fault) + return; + if (ioc->fwfault_debug == 2) { + for (;;) + ; + } else + panic("panic in %s\n", __func__); +} + +static void +leapioraid_base_group_cpus_on_irq(struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_adapter_reply_queue *reply_q; + unsigned int i, cpu, group, nr_cpus, nr_msix, index = 0; + int iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index; + int unmanaged_q_count = ioc->high_iops_queues + iopoll_q_count; + + cpu = cpumask_first(cpu_online_mask); + nr_msix = ioc->reply_queue_count - unmanaged_q_count; + nr_cpus = num_online_cpus(); + group = nr_cpus / nr_msix; + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + if (reply_q->msix_index < ioc->high_iops_queues || + reply_q->msix_index >= ioc->iopoll_q_start_index) + continue; + if (cpu >= nr_cpus) + break; + if (index < nr_cpus % nr_msix) + group++; + for (i = 0; i < group; i++) { + ioc->cpu_msix_table[cpu] = reply_q->msix_index; + cpu = cpumask_next(cpu, cpu_online_mask); + } + index++; + } +} + +static void +leapioraid_base_sas_ioc_info(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidDefaultRep_t *mpi_reply, + struct LeapioraidReqHeader_t *request_hdr) +{ + u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + char *desc = NULL; + u16 frame_sz; + char *func_str = NULL; + + if (request_hdr->Function == LEAPIORAID_FUNC_SCSI_IO_REQUEST || + request_hdr->Function == LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH + || request_hdr->Function == LEAPIORAID_FUNC_EVENT_NOTIFICATION) + return; + if (ioc_status == LEAPIORAID_IOCSTATUS_CONFIG_INVALID_PAGE) + return; + switch (ioc_status) { + case LEAPIORAID_IOCSTATUS_INVALID_FUNCTION: + desc = "invalid function"; + break; + case LEAPIORAID_IOCSTATUS_BUSY: + desc = "busy"; + break; + case LEAPIORAID_IOCSTATUS_INVALID_SGL: + desc = "invalid sgl"; + break; + case LEAPIORAID_IOCSTATUS_INTERNAL_ERROR: + desc = "internal error"; + break; + case LEAPIORAID_IOCSTATUS_INVALID_VPID: + desc = "invalid vpid"; + break; + case LEAPIORAID_IOCSTATUS_INSUFFICIENT_RESOURCES: + desc = "insufficient resources"; + break; + case LEAPIORAID_IOCSTATUS_INSUFFICIENT_POWER: + desc = "insufficient power"; + break; + case LEAPIORAID_IOCSTATUS_INVALID_FIELD: + desc = "invalid field"; + break; + case LEAPIORAID_IOCSTATUS_INVALID_STATE: + desc = "invalid state"; + break; + case LEAPIORAID_IOCSTATUS_OP_STATE_NOT_SUPPORTED: + desc = "op state not supported"; + break; + case LEAPIORAID_IOCSTATUS_CONFIG_INVALID_ACTION: + desc = "config invalid action"; + break; + case LEAPIORAID_IOCSTATUS_CONFIG_INVALID_TYPE: + desc = "config invalid type"; + break; + case LEAPIORAID_IOCSTATUS_CONFIG_INVALID_DATA: + desc = "config invalid data"; + break; + case LEAPIORAID_IOCSTATUS_CONFIG_NO_DEFAULTS: + desc = "config no defaults"; + break; + case LEAPIORAID_IOCSTATUS_CONFIG_CANT_COMMIT: + desc = "config can not commit"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_RECOVERED_ERROR: + case LEAPIORAID_IOCSTATUS_SCSI_INVALID_DEVHANDLE: + case LEAPIORAID_IOCSTATUS_SCSI_DEVICE_NOT_THERE: + case LEAPIORAID_IOCSTATUS_SCSI_DATA_OVERRUN: + case LEAPIORAID_IOCSTATUS_SCSI_DATA_UNDERRUN: + case LEAPIORAID_IOCSTATUS_SCSI_IO_DATA_ERROR: + case LEAPIORAID_IOCSTATUS_SCSI_PROTOCOL_ERROR: + case LEAPIORAID_IOCSTATUS_SCSI_TASK_TERMINATED: + case LEAPIORAID_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: + case LEAPIORAID_IOCSTATUS_SCSI_TASK_MGMT_FAILED: + case LEAPIORAID_IOCSTATUS_SCSI_IOC_TERMINATED: + case LEAPIORAID_IOCSTATUS_SCSI_EXT_TERMINATED: + break; + case LEAPIORAID_IOCSTATUS_EEDP_GUARD_ERROR: + if (!ioc->disable_eedp_support) + desc = "eedp guard error"; + break; + case LEAPIORAID_IOCSTATUS_EEDP_REF_TAG_ERROR: + if (!ioc->disable_eedp_support) + desc = "eedp ref tag error"; + break; + case LEAPIORAID_IOCSTATUS_EEDP_APP_TAG_ERROR: + if (!ioc->disable_eedp_support) + desc = "eedp app tag error"; + break; + case LEAPIORAID_IOCSTATUS_TARGET_INVALID_IO_INDEX: + desc = "target invalid io index"; + break; + case LEAPIORAID_IOCSTATUS_TARGET_ABORTED: + desc = "target aborted"; + break; + case LEAPIORAID_IOCSTATUS_TARGET_NO_CONN_RETRYABLE: + desc = "target no conn retryable"; + break; + case LEAPIORAID_IOCSTATUS_TARGET_NO_CONNECTION: + desc = "target no connection"; + break; + case LEAPIORAID_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH: + desc = "target xfer count mismatch"; + break; + case LEAPIORAID_IOCSTATUS_TARGET_DATA_OFFSET_ERROR: + desc = "target data offset error"; + break; + case LEAPIORAID_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA: + desc = "target too much write data"; + break; + case LEAPIORAID_IOCSTATUS_TARGET_IU_TOO_SHORT: + desc = "target iu too short"; + break; + case LEAPIORAID_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT: + desc = "target ack nak timeout"; + break; + case LEAPIORAID_IOCSTATUS_TARGET_NAK_RECEIVED: + desc = "target nak received"; + break; + case LEAPIORAID_IOCSTATUS_SAS_SMP_REQUEST_FAILED: + desc = "smp request failed"; + break; + case LEAPIORAID_IOCSTATUS_SAS_SMP_DATA_OVERRUN: + desc = "smp data overrun"; + break; + default: + break; + } + if (!desc) + return; + switch (request_hdr->Function) { + case LEAPIORAID_FUNC_CONFIG: + frame_sz = sizeof(struct LeapioraidCfgReq_t) + ioc->sge_size; + func_str = "config_page"; + break; + case LEAPIORAID_FUNC_SCSI_TASK_MGMT: + frame_sz = sizeof(struct LeapioraidSCSITmgReq_t); + func_str = "task_mgmt"; + break; + case LEAPIORAID_FUNC_SAS_IO_UNIT_CONTROL: + frame_sz = sizeof(struct LeapioraidSasIoUnitControlReq_t); + func_str = "sas_iounit_ctl"; + break; + case LEAPIORAID_FUNC_SCSI_ENCLOSURE_PROCESSOR: + frame_sz = sizeof(struct LeapioraidSepReq_t); + func_str = "enclosure"; + break; + case LEAPIORAID_FUNC_IOC_INIT: + frame_sz = sizeof(struct LeapioraidIOCInitReq_t); + func_str = "ioc_init"; + break; + case LEAPIORAID_FUNC_PORT_ENABLE: + frame_sz = sizeof(struct LeapioraidPortEnableReq_t); + func_str = "port_enable"; + break; + case LEAPIORAID_FUNC_SMP_PASSTHROUGH: + frame_sz = + sizeof(struct LeapioraidSmpPassthroughReq_t) + ioc->sge_size; + func_str = "smp_passthru"; + break; + default: + frame_sz = 32; + func_str = "unknown"; + break; + } + pr_warn("%s ioc_status: %s(0x%04x), request(0x%p), (%s)\n", + ioc->name, desc, ioc_status, request_hdr, func_str); + leapioraid_debug_dump_mf(request_hdr, frame_sz / 4); +} + +static void +leapioraid_base_display_event_data(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventNotificationRep_t *mpi_reply) +{ + char *desc = NULL; + u16 event; + + if (!(ioc->logging_level & LEAPIORAID_DEBUG_EVENTS)) + return; + event = le16_to_cpu(mpi_reply->Event); + if (ioc->warpdrive_msg) { + switch (event) { + case LEAPIORAID_EVENT_IR_OPERATION_STATUS: + case LEAPIORAID_EVENT_IR_VOLUME: + case LEAPIORAID_EVENT_IR_PHYSICAL_DISK: + case LEAPIORAID_EVENT_IR_CONFIGURATION_CHANGE_LIST: + case LEAPIORAID_EVENT_LOG_ENTRY_ADDED: + return; + } + } + switch (event) { + case LEAPIORAID_EVENT_LOG_DATA: + desc = "Log Data"; + break; + case LEAPIORAID_EVENT_STATE_CHANGE: + desc = "Status Change"; + break; + case LEAPIORAID_EVENT_HARD_RESET_RECEIVED: + desc = "Hard Reset Received"; + break; + case LEAPIORAID_EVENT_EVENT_CHANGE: + desc = "Event Change"; + break; + case LEAPIORAID_EVENT_SAS_DEVICE_STATUS_CHANGE: + desc = "Device Status Change"; + break; + case LEAPIORAID_EVENT_IR_OPERATION_STATUS: + desc = "IR Operation Status"; + break; + case LEAPIORAID_EVENT_SAS_DISCOVERY: + { + struct LeapioraidEventDataSasDiscovery_t *event_data = + (struct LeapioraidEventDataSasDiscovery_t *) mpi_reply->EventData; + pr_info("%s SAS Discovery: (%s)", + ioc->name, + (event_data->ReasonCode == + LEAPIORAID_EVENT_SAS_DISC_RC_STARTED) ? "start" : + "stop"); + if (event_data->DiscoveryStatus) + pr_info("discovery_status(0x%08x)", + le32_to_cpu(event_data->DiscoveryStatus)); + pr_info("\n"); + return; + } + case LEAPIORAID_EVENT_SAS_BROADCAST_PRIMITIVE: + desc = "SAS Broadcast Primitive"; + break; + case LEAPIORAID_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE: + desc = "SAS Init Device Status Change"; + break; + case LEAPIORAID_EVENT_SAS_INIT_TABLE_OVERFLOW: + desc = "SAS Init Table Overflow"; + break; + case LEAPIORAID_EVENT_SAS_TOPOLOGY_CHANGE_LIST: + desc = "SAS Topology Change List"; + break; + case LEAPIORAID_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: + desc = "SAS Enclosure Device Status Change"; + break; + case LEAPIORAID_EVENT_IR_VOLUME: + desc = "IR Volume"; + break; + case LEAPIORAID_EVENT_IR_PHYSICAL_DISK: + desc = "IR Physical Disk"; + break; + case LEAPIORAID_EVENT_IR_CONFIGURATION_CHANGE_LIST: + desc = "IR Configuration Change List"; + break; + case LEAPIORAID_EVENT_LOG_ENTRY_ADDED: + desc = "Log Entry Added"; + break; + case LEAPIORAID_EVENT_TEMP_THRESHOLD: + desc = "Temperature Threshold"; + break; + case LEAPIORAID_EVENT_SAS_DEVICE_DISCOVERY_ERROR: + desc = "SAS Device Discovery Error"; + break; + } + if (!desc) + return; + pr_info("%s %s\n", ioc->name, desc); +} + +static void +leapioraid_base_sas_log_info(struct LEAPIORAID_ADAPTER *ioc, u32 log_info) +{ + union loginfo_type { + u32 loginfo; + struct { + u32 subcode:16; + u32 code:8; + u32 originator:4; + u32 bus_type:4; + } dw; + }; + union loginfo_type sas_loginfo; + char *originator_str = NULL; + + sas_loginfo.loginfo = log_info; + if (sas_loginfo.dw.bus_type != 3) + return; + if (log_info == 0x31170000) + return; + if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info == + 0x31140000 || log_info == 0x31130000)) + return; + switch (sas_loginfo.dw.originator) { + case 0: + originator_str = "IOP"; + break; + case 1: + originator_str = "PL"; + break; + case 2: + if (ioc->warpdrive_msg) + originator_str = "WarpDrive"; + else + originator_str = "IR"; + break; + } + pr_warn("%s log_info(0x%08x):\n\t\t" + "originator(%s), code(0x%02x), sub_code(0x%04x)\n", + ioc->name, + log_info, + originator_str, + sas_loginfo.dw.code, + sas_loginfo.dw.subcode); +} + +static void +leapioraid_base_display_reply_info(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply) +{ + struct LeapioraidDefaultRep_t *mpi_reply; + u16 ioc_status; + u32 loginfo = 0; + + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (unlikely(!mpi_reply)) { + pr_err( + "%s mpi_reply not valid at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return; + } + ioc_status = le16_to_cpu(mpi_reply->IOCStatus); + if ((ioc_status & LEAPIORAID_IOCSTATUS_MASK) && + (ioc->logging_level & LEAPIORAID_DEBUG_REPLY)) { + leapioraid_base_sas_ioc_info(ioc, mpi_reply, + leapioraid_base_get_msg_frame(ioc, + smid)); + } + if (ioc_status & LEAPIORAID_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) { + loginfo = le32_to_cpu(mpi_reply->IOCLogInfo); + leapioraid_base_sas_log_info(ioc, loginfo); + } +} + +u8 +leapioraid_base_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + struct LeapioraidDefaultRep_t *mpi_reply; + + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply && mpi_reply->Function == LEAPIORAID_FUNC_EVENT_ACK) + return leapioraid_check_for_pending_internal_cmds(ioc, smid); + if (ioc->base_cmds.status == LEAPIORAID_CMD_NOT_USED) + return 1; + ioc->base_cmds.status |= LEAPIORAID_CMD_COMPLETE; + if (mpi_reply) { + ioc->base_cmds.status |= LEAPIORAID_CMD_REPLY_VALID; + memcpy(ioc->base_cmds.reply, mpi_reply, + mpi_reply->MsgLength * 4); + } + ioc->base_cmds.status &= ~LEAPIORAID_CMD_PENDING; + complete(&ioc->base_cmds.done); + return 1; +} + +static u8 +leapioraid_base_async_event( + struct LEAPIORAID_ADAPTER *ioc, u8 msix_index, u32 reply) +{ + struct LeapioraidEventNotificationRep_t *mpi_reply; + struct LeapioraidEventAckReq_t *ack_request; + u16 smid; + struct leapioraid_event_ack_list *delayed_event_ack; + + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (!mpi_reply) + return 1; + if (mpi_reply->Function != LEAPIORAID_FUNC_EVENT_NOTIFICATION) + return 1; + leapioraid_base_display_event_data(ioc, mpi_reply); + if (!(mpi_reply->AckRequired & LEAPIORAID_EVENT_NOTIFICATION_ACK_REQUIRED)) + goto out; + smid = leapioraid_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + delayed_event_ack = + kzalloc(sizeof(*delayed_event_ack), GFP_ATOMIC); + if (!delayed_event_ack) + goto out; + INIT_LIST_HEAD(&delayed_event_ack->list); + delayed_event_ack->Event = mpi_reply->Event; + delayed_event_ack->EventContext = mpi_reply->EventContext; + list_add_tail(&delayed_event_ack->list, + &ioc->delayed_event_ack_list); + dewtprintk(ioc, pr_err( + "%s DELAYED: EVENT ACK: event (0x%04x)\n", + ioc->name, + le16_to_cpu(mpi_reply->Event))); + goto out; + } + ack_request = leapioraid_base_get_msg_frame(ioc, smid); + memset(ack_request, 0, sizeof(struct LeapioraidEventAckReq_t)); + ack_request->Function = LEAPIORAID_FUNC_EVENT_ACK; + ack_request->Event = mpi_reply->Event; + ack_request->EventContext = mpi_reply->EventContext; + ack_request->VF_ID = 0; + ack_request->VP_ID = 0; + ioc->put_smid_default(ioc, smid); +out: + leapioraid_scsihost_event_callback(ioc, msix_index, reply); + leapioraid_ctl_event_callback(ioc, msix_index, reply); + return 1; +} + +inline +struct leapioraid_scsiio_tracker *leapioraid_base_scsi_cmd_priv( + struct scsi_cmnd *scmd) +{ + return scsi_cmd_priv(scmd); +} + +struct leapioraid_scsiio_tracker *leapioraid_get_st_from_smid( + struct LEAPIORAID_ADAPTER *ioc, u16 smid) +{ + struct scsi_cmnd *cmd; + + if (WARN_ON(!smid) || WARN_ON(smid >= ioc->hi_priority_smid)) + return NULL; + cmd = leapioraid_scsihost_scsi_lookup_get(ioc, smid); + if (cmd) + return leapioraid_base_scsi_cmd_priv(cmd); + return NULL; +} + +static u8 +leapioraid_base_get_cb_idx(struct LEAPIORAID_ADAPTER *ioc, u16 smid) +{ + int i; + u16 ctl_smid = ioc->shost->can_queue + LEAPIORAID_INTERNAL_SCSIIO_FOR_IOCTL; + u16 discovery_smid = + ioc->shost->can_queue + LEAPIORAID_INTERNAL_SCSIIO_FOR_DISCOVERY; + u8 cb_idx = 0xFF; + + if (smid < ioc->hi_priority_smid) { + struct leapioraid_scsiio_tracker *st; + + if (smid < ctl_smid) { + st = leapioraid_get_st_from_smid(ioc, smid); + if (st) + cb_idx = st->cb_idx; + } else if (smid < discovery_smid) + cb_idx = ioc->ctl_cb_idx; + else + cb_idx = ioc->scsih_cb_idx; + } else if (smid < ioc->internal_smid) { + i = smid - ioc->hi_priority_smid; + cb_idx = ioc->hpr_lookup[i].cb_idx; + } else if (smid <= ioc->hba_queue_depth) { + i = smid - ioc->internal_smid; + cb_idx = ioc->internal_lookup[i].cb_idx; + } + return cb_idx; +} + +void +leapioraid_base_pause_mq_polling(struct LEAPIORAID_ADAPTER *ioc) +{ + int iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index; + int qid; + + for (qid = 0; qid < iopoll_q_count; qid++) + atomic_set(&ioc->blk_mq_poll_queues[qid].pause, 1); + for (qid = 0; qid < iopoll_q_count; qid++) { + while (atomic_read(&ioc->blk_mq_poll_queues[qid].busy)) { + cpu_relax(); + udelay(500); + } + } +} + +void +leapioraid_base_resume_mq_polling(struct LEAPIORAID_ADAPTER *ioc) +{ + int iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index; + int qid; + + for (qid = 0; qid < iopoll_q_count; qid++) + atomic_set(&ioc->blk_mq_poll_queues[qid].pause, 0); +} + +void +leapioraid_base_mask_interrupts(struct LEAPIORAID_ADAPTER *ioc) +{ + u32 him_register; + + ioc->mask_interrupts = 1; + him_register = + ioc->base_readl(&ioc->chip->HostInterruptMask, + LEAPIORAID_READL_RETRY_COUNT_OF_THREE); + him_register |= + 0x00000001 + 0x00000008 + 0x40000000; + writel(him_register, &ioc->chip->HostInterruptMask); + ioc->base_readl(&ioc->chip->HostInterruptMask, + LEAPIORAID_READL_RETRY_COUNT_OF_THREE); +} + +void +leapioraid_base_unmask_interrupts(struct LEAPIORAID_ADAPTER *ioc) +{ + u32 him_register; + + him_register = + ioc->base_readl(&ioc->chip->HostInterruptMask, + LEAPIORAID_READL_RETRY_COUNT_OF_THREE); + him_register &= ~0x00000008; + writel(him_register, &ioc->chip->HostInterruptMask); + ioc->mask_interrupts = 0; +} + +union leapioraid_reply_descriptor { + u64 word; + struct { + u32 low; + u32 high; + } u; +}; + +static int +leapioraid_base_process_reply_queue( + struct leapioraid_adapter_reply_queue *reply_q) +{ + union leapioraid_reply_descriptor rd; + u64 completed_cmds; + u8 request_descript_type; + u16 smid; + u8 cb_idx; + u32 reply; + u8 msix_index = reply_q->msix_index; + struct LEAPIORAID_ADAPTER *ioc = reply_q->ioc; + union LeapioraidRepDescUnion_t *rpf; + u8 rc; + + completed_cmds = 0; + if (!atomic_add_unless(&reply_q->busy, 1, 1)) + return completed_cmds; + rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index]; + request_descript_type = rpf->Default.ReplyFlags + & LEAPIORAID_RPY_DESCRIPT_FLAGS_TYPE_MASK; + if (request_descript_type == LEAPIORAID_RPY_DESCRIPT_FLAGS_UNUSED) { + atomic_dec(&reply_q->busy); + return 1; + } + cb_idx = 0xFF; + do { + rd.word = le64_to_cpu(rpf->Words); + if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX) + goto out; + reply = 0; + smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1); + if (request_descript_type == + LEAPIORAID_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS || + request_descript_type == + LEAPIORAID_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) { + cb_idx = leapioraid_base_get_cb_idx(ioc, smid); + if ((likely(cb_idx < LEAPIORAID_MAX_CALLBACKS)) && + (likely(leapioraid_callbacks[cb_idx] != NULL))) { + rc = leapioraid_callbacks[cb_idx] (ioc, smid, + msix_index, 0); + if (rc) + leapioraid_base_free_smid(ioc, smid); + } + } else if (request_descript_type == + LEAPIORAID_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) { + reply = + le32_to_cpu(rpf->AddressReply.ReplyFrameAddress); + if (reply > ioc->reply_dma_max_address + || reply < ioc->reply_dma_min_address) + reply = 0; + if (smid) { + cb_idx = leapioraid_base_get_cb_idx(ioc, smid); + if ((likely(cb_idx < LEAPIORAID_MAX_CALLBACKS)) && + (likely(leapioraid_callbacks[cb_idx] != NULL))) { + rc = leapioraid_callbacks[cb_idx] (ioc, + smid, + msix_index, + reply); + if (reply) + leapioraid_base_display_reply_info + (ioc, smid, msix_index, + reply); + if (rc) + leapioraid_base_free_smid(ioc, + smid); + } + } else { + leapioraid_base_async_event(ioc, msix_index, reply); + } + if (reply) { + ioc->reply_free_host_index = + (ioc->reply_free_host_index == + (ioc->reply_free_queue_depth - 1)) ? + 0 : ioc->reply_free_host_index + 1; + ioc->reply_free[ioc->reply_free_host_index] = + cpu_to_le32(reply); + wmb(); /* Make sure that all write ops are in order */ + writel(ioc->reply_free_host_index, + &ioc->chip->ReplyFreeHostIndex); + } + } + rpf->Words = cpu_to_le64(ULLONG_MAX); + reply_q->reply_post_host_index = + (reply_q->reply_post_host_index == + (ioc->reply_post_queue_depth - 1)) ? 0 : + reply_q->reply_post_host_index + 1; + request_descript_type = + reply_q->reply_post_free[reply_q->reply_post_host_index].Default.ReplyFlags + & LEAPIORAID_RPY_DESCRIPT_FLAGS_TYPE_MASK; + completed_cmds++; + if (completed_cmds >= ioc->thresh_hold) { + if (ioc->combined_reply_queue) { + writel(reply_q->reply_post_host_index | + ((msix_index & 7) << + LEAPIORAID_RPHI_MSIX_INDEX_SHIFT), + ioc->replyPostRegisterIndex[msix_index / + 8]); + } else { + writel(reply_q->reply_post_host_index | + (msix_index << + LEAPIORAID_RPHI_MSIX_INDEX_SHIFT), + &ioc->chip->ReplyPostHostIndex); + } + if (!reply_q->is_blk_mq_poll_q && + !reply_q->irq_poll_scheduled) { + reply_q->irq_poll_scheduled = true; + irq_poll_sched(&reply_q->irqpoll); + } + atomic_dec(&reply_q->busy); + return completed_cmds; + } + if (request_descript_type == LEAPIORAID_RPY_DESCRIPT_FLAGS_UNUSED) + goto out; + if (!reply_q->reply_post_host_index) + rpf = reply_q->reply_post_free; + else + rpf++; + } while (1); +out: + if (!completed_cmds) { + atomic_dec(&reply_q->busy); + return completed_cmds; + } + wmb(); /* Make sure that all write ops are in order */ + if (ioc->combined_reply_queue) { + writel(reply_q->reply_post_host_index | ((msix_index & 7) << + LEAPIORAID_RPHI_MSIX_INDEX_SHIFT), + ioc->replyPostRegisterIndex[msix_index / 8]); + } else { + writel(reply_q->reply_post_host_index | (msix_index << + LEAPIORAID_RPHI_MSIX_INDEX_SHIFT), + &ioc->chip->ReplyPostHostIndex); + } + atomic_dec(&reply_q->busy); + return completed_cmds; +} + +int leapioraid_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num) +{ + struct LEAPIORAID_ADAPTER *ioc = + (struct LEAPIORAID_ADAPTER *)shost->hostdata; + struct leapioraid_adapter_reply_queue *reply_q; + int num_entries = 0; + int qid = queue_num - ioc->iopoll_q_start_index; + + if (atomic_read(&ioc->blk_mq_poll_queues[qid].pause) || + !atomic_add_unless(&ioc->blk_mq_poll_queues[qid].busy, 1, 1)) + return 0; + reply_q = ioc->blk_mq_poll_queues[qid].reply_q; + num_entries = leapioraid_base_process_reply_queue(reply_q); + atomic_dec(&ioc->blk_mq_poll_queues[qid].busy); + return num_entries; +} + +static irqreturn_t +leapioraid_base_interrupt(int irq, void *bus_id) +{ + struct leapioraid_adapter_reply_queue *reply_q = bus_id; + struct LEAPIORAID_ADAPTER *ioc = reply_q->ioc; + + if (ioc->mask_interrupts) + return IRQ_NONE; + if (reply_q->irq_poll_scheduled) + return IRQ_HANDLED; + return ((leapioraid_base_process_reply_queue(reply_q) > 0) ? + IRQ_HANDLED : IRQ_NONE); +} + +static +int leapioraid_base_irqpoll(struct irq_poll *irqpoll, int budget) +{ + struct leapioraid_adapter_reply_queue *reply_q; + int num_entries = 0; + + reply_q = container_of(irqpoll, + struct leapioraid_adapter_reply_queue, irqpoll); + if (reply_q->irq_line_enable) { + disable_irq_nosync(reply_q->os_irq); + reply_q->irq_line_enable = false; + } + num_entries = leapioraid_base_process_reply_queue(reply_q); + if (num_entries < budget) { + irq_poll_complete(irqpoll); + reply_q->irq_poll_scheduled = false; + reply_q->irq_line_enable = true; + enable_irq(reply_q->os_irq); + } + return num_entries; +} + +static void +leapioraid_base_init_irqpolls(struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_adapter_reply_queue *reply_q, *next; + + if (list_empty(&ioc->reply_queue_list)) + return; + list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { + if (reply_q->is_blk_mq_poll_q) + continue; + irq_poll_init(&reply_q->irqpoll, ioc->thresh_hold, + leapioraid_base_irqpoll); + reply_q->irq_poll_scheduled = false; + reply_q->irq_line_enable = true; + reply_q->os_irq = pci_irq_vector(ioc->pdev, + reply_q->msix_index); + } +} + +static inline int +leapioraid_base_is_controller_msix_enabled(struct LEAPIORAID_ADAPTER *ioc) +{ + return (ioc->facts.IOCCapabilities & + LEAPIORAID_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable; +} + +void +leapioraid_base_sync_reply_irqs(struct LEAPIORAID_ADAPTER *ioc, u8 poll) +{ + struct leapioraid_adapter_reply_queue *reply_q; + + if (!leapioraid_base_is_controller_msix_enabled(ioc)) + return; + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + if (ioc->shost_recovery || ioc->remove_host || + ioc->pci_error_recovery) + return; + if (reply_q->msix_index == 0) + continue; + if (reply_q->is_blk_mq_poll_q) { + leapioraid_base_process_reply_queue(reply_q); + continue; + } + synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index)); + if (reply_q->irq_poll_scheduled) { + irq_poll_disable(&reply_q->irqpoll); + irq_poll_enable(&reply_q->irqpoll); + if (reply_q->irq_poll_scheduled) { + reply_q->irq_poll_scheduled = false; + reply_q->irq_line_enable = true; + enable_irq(reply_q->os_irq); + } + } + if (poll) + leapioraid_base_process_reply_queue(reply_q); + } +} + +void +leapioraid_base_release_callback_handler(u8 cb_idx) +{ + leapioraid_callbacks[cb_idx] = NULL; +} + +u8 +leapioraid_base_register_callback_handler(LEAPIORAID_CALLBACK cb_func) +{ + u8 cb_idx; + + for (cb_idx = LEAPIORAID_MAX_CALLBACKS - 1; cb_idx; cb_idx--) + if (leapioraid_callbacks[cb_idx] == NULL) + break; + leapioraid_callbacks[cb_idx] = cb_func; + return cb_idx; +} + +void +leapioraid_base_initialize_callback_handler(void) +{ + u8 cb_idx; + + for (cb_idx = 0; cb_idx < LEAPIORAID_MAX_CALLBACKS; cb_idx++) + leapioraid_base_release_callback_handler(cb_idx); +} + +static void +leapioraid_base_build_zero_len_sge( + struct LEAPIORAID_ADAPTER *ioc, void *paddr) +{ + u32 flags_length = (u32) ((LEAPIORAID_SGE_FLAGS_LAST_ELEMENT | + LEAPIORAID_SGE_FLAGS_END_OF_BUFFER | + LEAPIORAID_SGE_FLAGS_END_OF_LIST | + LEAPIORAID_SGE_FLAGS_SIMPLE_ELEMENT) << + LEAPIORAID_SGE_FLAGS_SHIFT); + + ioc->base_add_sg_single(paddr, flags_length, -1); +} + +static void +leapioraid_base_add_sg_single_32(void *paddr, u32 flags_length, + dma_addr_t dma_addr) +{ + struct LeapioSGESimple32_t *sgel = paddr; + + flags_length |= (LEAPIORAID_SGE_FLAGS_32_BIT_ADDRESSING | + LEAPIORAID_SGE_FLAGS_SYSTEM_ADDRESS) << + LEAPIORAID_SGE_FLAGS_SHIFT; + sgel->FlagsLength = cpu_to_le32(flags_length); + sgel->Address = cpu_to_le32(dma_addr); +} + +static void +leapioraid_base_add_sg_single_64(void *paddr, u32 flags_length, + dma_addr_t dma_addr) +{ + struct LeapioSGESimple64_t *sgel = paddr; + + flags_length |= (LEAPIORAID_SGE_FLAGS_64_BIT_ADDRESSING | + LEAPIORAID_SGE_FLAGS_SYSTEM_ADDRESS) << + LEAPIORAID_SGE_FLAGS_SHIFT; + sgel->FlagsLength = cpu_to_le32(flags_length); + sgel->Address = cpu_to_le64(dma_addr); +} + +static +struct leapioraid_chain_tracker *leapioraid_base_get_chain_buffer_tracker( + struct LEAPIORAID_ADAPTER *ioc, + struct scsi_cmnd *scmd) +{ + struct leapioraid_chain_tracker *chain_req; + struct leapioraid_scsiio_tracker *st = leapioraid_base_scsi_cmd_priv(scmd); + u16 smid = st->smid; + u8 chain_offset = + atomic_read(&ioc->chain_lookup[smid - 1].chain_offset); + + if (chain_offset == ioc->chains_needed_per_io) + return NULL; + chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset]; + atomic_inc(&ioc->chain_lookup[smid - 1].chain_offset); + return chain_req; +} + +static void +leapioraid_base_build_sg(struct LEAPIORAID_ADAPTER *ioc, void *psge, + dma_addr_t data_out_dma, size_t data_out_sz, + dma_addr_t data_in_dma, size_t data_in_sz) +{ + u32 sgl_flags; + + if (!data_out_sz && !data_in_sz) { + leapioraid_base_build_zero_len_sge(ioc, psge); + return; + } + if (data_out_sz && data_in_sz) { + sgl_flags = (LEAPIORAID_SGE_FLAGS_SIMPLE_ELEMENT | + LEAPIORAID_SGE_FLAGS_END_OF_BUFFER | + LEAPIORAID_SGE_FLAGS_HOST_TO_IOC); + sgl_flags = sgl_flags << LEAPIORAID_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + data_out_sz, data_out_dma); + psge += ioc->sge_size; + sgl_flags = (LEAPIORAID_SGE_FLAGS_SIMPLE_ELEMENT | + LEAPIORAID_SGE_FLAGS_LAST_ELEMENT | + LEAPIORAID_SGE_FLAGS_END_OF_BUFFER | + LEAPIORAID_SGE_FLAGS_END_OF_LIST); + sgl_flags = sgl_flags << LEAPIORAID_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + data_in_sz, data_in_dma); + } else if (data_out_sz) { + sgl_flags = (LEAPIORAID_SGE_FLAGS_SIMPLE_ELEMENT | + LEAPIORAID_SGE_FLAGS_LAST_ELEMENT | + LEAPIORAID_SGE_FLAGS_END_OF_BUFFER | + LEAPIORAID_SGE_FLAGS_END_OF_LIST | + LEAPIORAID_SGE_FLAGS_HOST_TO_IOC); + sgl_flags = sgl_flags << LEAPIORAID_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + data_out_sz, data_out_dma); + } else if (data_in_sz) { + sgl_flags = (LEAPIORAID_SGE_FLAGS_SIMPLE_ELEMENT | + LEAPIORAID_SGE_FLAGS_LAST_ELEMENT | + LEAPIORAID_SGE_FLAGS_END_OF_BUFFER | + LEAPIORAID_SGE_FLAGS_END_OF_LIST); + sgl_flags = sgl_flags << LEAPIORAID_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + data_in_sz, data_in_dma); + } +} + +u32 +leapioraid_base_mod64(u64 dividend, u32 divisor) +{ + u32 remainder; + + if (!divisor) { + pr_err("leapioraid : DIVISOR is zero, in div fn\n"); + return 0; + } + remainder = do_div(dividend, divisor); + return remainder; +} + +static void +leapioraid_base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, + u32 length, dma_addr_t dma_addr) +{ + struct LEAPIORAID_IEEE_SGE_CHAIN64 *sgel = paddr; + + sgel->Flags = flags; + sgel->NextChainOffset = chain_offset; + sgel->Length = cpu_to_le32(length); + sgel->Address = cpu_to_le64(dma_addr); +} + +static void +leapioraid_base_build_zero_len_sge_ieee(struct LEAPIORAID_ADAPTER *ioc, + void *paddr) +{ + u8 sgl_flags = (LEAPIORAID_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + LEAPIORAID_IEEE_SGE_FLAGS_SYSTEM_ADDR | + LEAPIORAID_IEEE_SGE_FLAGS_END_OF_LIST); + + leapioraid_base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1); +} + +static int +leapioraid_base_build_sg_scmd_ieee(struct LEAPIORAID_ADAPTER *ioc, + struct scsi_cmnd *scmd, u16 smid) +{ + struct LeapioraidSCSIIOReq_t *mpi_request; + dma_addr_t chain_dma; + struct scatterlist *sg_scmd; + void *sg_local, *chain; + u32 chain_offset; + u32 chain_length; + int sges_left; + u32 sges_in_segment; + u8 simple_sgl_flags; + u8 simple_sgl_flags_last; + u8 chain_sgl_flags; + struct leapioraid_chain_tracker *chain_req; + + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + simple_sgl_flags = LEAPIORAID_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + LEAPIORAID_IEEE_SGE_FLAGS_SYSTEM_ADDR; + simple_sgl_flags_last = simple_sgl_flags | + LEAPIORAID_IEEE_SGE_FLAGS_END_OF_LIST; + chain_sgl_flags = LEAPIORAID_IEEE_SGE_FLAGS_CHAIN_ELEMENT | + LEAPIORAID_IEEE_SGE_FLAGS_SYSTEM_ADDR; + + sg_scmd = scsi_sglist(scmd); + sges_left = scsi_dma_map(scmd); + if (sges_left < 0) { + pr_err_ratelimited + ("sd %s: scsi_dma_map failed: request for %d bytes!\n", + dev_name(&scmd->device->sdev_gendev), scsi_bufflen(scmd)); + return -ENOMEM; + } + sg_local = &mpi_request->SGL; + sges_in_segment = (ioc->request_sz - + offsetof(struct LeapioraidSCSIIOReq_t, + SGL)) / ioc->sge_size_ieee; + if (sges_left <= sges_in_segment) + goto fill_in_last_segment; + mpi_request->ChainOffset = (sges_in_segment - 1) + + (offsetof(struct LeapioraidSCSIIOReq_t, SGL) / ioc->sge_size_ieee); + while (sges_in_segment > 1) { + leapioraid_base_add_sg_single_ieee(sg_local, simple_sgl_flags, + 0, sg_dma_len(sg_scmd), + sg_dma_address(sg_scmd)); + + sg_scmd = sg_next(sg_scmd); + sg_local += ioc->sge_size_ieee; + sges_left--; + sges_in_segment--; + } + chain_req = leapioraid_base_get_chain_buffer_tracker(ioc, scmd); + if (!chain_req) + return -1; + chain = chain_req->chain_buffer; + chain_dma = chain_req->chain_buffer_dma; + do { + sges_in_segment = (sges_left <= + ioc->max_sges_in_chain_message) ? sges_left : + ioc->max_sges_in_chain_message; + chain_offset = (sges_left == sges_in_segment) ? + 0 : sges_in_segment; + chain_length = sges_in_segment * ioc->sge_size_ieee; + if (chain_offset) + chain_length += ioc->sge_size_ieee; + leapioraid_base_add_sg_single_ieee(sg_local, chain_sgl_flags, + chain_offset, chain_length, + chain_dma); + sg_local = chain; + if (!chain_offset) + goto fill_in_last_segment; + while (sges_in_segment) { + leapioraid_base_add_sg_single_ieee(sg_local, + simple_sgl_flags, 0, + sg_dma_len(sg_scmd), + sg_dma_address + (sg_scmd)); + + sg_scmd = sg_next(sg_scmd); + sg_local += ioc->sge_size_ieee; + sges_left--; + sges_in_segment--; + } + chain_req = leapioraid_base_get_chain_buffer_tracker(ioc, scmd); + if (!chain_req) + return -1; + chain = chain_req->chain_buffer; + chain_dma = chain_req->chain_buffer_dma; + } while (1); +fill_in_last_segment: + while (sges_left > 0) { + if (sges_left == 1) + leapioraid_base_add_sg_single_ieee(sg_local, + simple_sgl_flags_last, + 0, + sg_dma_len(sg_scmd), + sg_dma_address + (sg_scmd)); + else + leapioraid_base_add_sg_single_ieee(sg_local, + simple_sgl_flags, 0, + sg_dma_len(sg_scmd), + sg_dma_address + (sg_scmd)); + + sg_scmd = sg_next(sg_scmd); + sg_local += ioc->sge_size_ieee; + sges_left--; + } + return 0; +} + +static void +leapioraid_base_build_sg_ieee(struct LEAPIORAID_ADAPTER *ioc, void *psge, + dma_addr_t data_out_dma, size_t data_out_sz, + dma_addr_t data_in_dma, size_t data_in_sz) +{ + u8 sgl_flags; + + if (!data_out_sz && !data_in_sz) { + leapioraid_base_build_zero_len_sge_ieee(ioc, psge); + return; + } + if (data_out_sz && data_in_sz) { + sgl_flags = LEAPIORAID_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + LEAPIORAID_IEEE_SGE_FLAGS_SYSTEM_ADDR; + leapioraid_base_add_sg_single_ieee(psge, sgl_flags, 0, + data_out_sz, data_out_dma); + psge += ioc->sge_size_ieee; + sgl_flags |= LEAPIORAID_IEEE_SGE_FLAGS_END_OF_LIST; + leapioraid_base_add_sg_single_ieee(psge, sgl_flags, 0, + data_in_sz, data_in_dma); + } else if (data_out_sz) { + sgl_flags = LEAPIORAID_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + LEAPIORAID_IEEE_SGE_FLAGS_END_OF_LIST | + LEAPIORAID_IEEE_SGE_FLAGS_SYSTEM_ADDR; + leapioraid_base_add_sg_single_ieee(psge, sgl_flags, 0, + data_out_sz, data_out_dma); + } else if (data_in_sz) { + sgl_flags = LEAPIORAID_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + LEAPIORAID_IEEE_SGE_FLAGS_END_OF_LIST | + LEAPIORAID_IEEE_SGE_FLAGS_SYSTEM_ADDR; + leapioraid_base_add_sg_single_ieee(psge, sgl_flags, 0, + data_in_sz, data_in_dma); + } +} + +#define leapioraid_convert_to_kb(x) ((x) << (PAGE_SHIFT - 10)) +static int +leapioraid_base_config_dma_addressing(struct LEAPIORAID_ADAPTER *ioc, + struct pci_dev *pdev) +{ + struct sysinfo s; + char *desc = "64"; + u64 consistant_dma_mask = DMA_BIT_MASK(64); + u64 dma_mask = DMA_BIT_MASK(64); + + consistant_dma_mask = DMA_BIT_MASK(63); + dma_mask = DMA_BIT_MASK(63); + desc = "63"; + ioc->dma_mask = 63; + if (ioc->use_32bit_dma) + consistant_dma_mask = DMA_BIT_MASK(32); + if (sizeof(dma_addr_t) > 4) { + if (!dma_set_mask(&pdev->dev, dma_mask) && + !dma_set_coherent_mask(&pdev->dev, consistant_dma_mask)) { + ioc->base_add_sg_single = + &leapioraid_base_add_sg_single_64; + ioc->sge_size = sizeof(struct LeapioSGESimple64_t); + if (!ioc->use_32bit_dma) + goto out; + return 0; + } + } + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) + && !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) { + ioc->base_add_sg_single = &leapioraid_base_add_sg_single_32; + ioc->sge_size = sizeof(struct LeapioSGESimple32_t); + desc = "32"; + ioc->dma_mask = 32; + } else + return -ENODEV; +out: + si_meminfo(&s); + pr_info("%s %s BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n", + ioc->name, desc, leapioraid_convert_to_kb(s.totalram)); + return 0; +} + +int +leapioraid_base_check_and_get_msix_vectors(struct pci_dev *pdev) +{ + int base; + u16 message_control, msix_vector_count; + + base = pci_find_capability(pdev, PCI_CAP_ID_MSIX); + if (!base) + return -EINVAL; + pci_read_config_word(pdev, base + 2, &message_control); + msix_vector_count = (message_control & 0x3FF) + 1; + return msix_vector_count; +} + +enum leapioraid_pci_bus_speed { + LEAPIORAID_PCIE_SPEED_2_5GT = 0x14, + LEAPIORAID_PCIE_SPEED_5_0GT = 0x15, + LEAPIORAID_PCIE_SPEED_8_0GT = 0x16, + LEAPIORAID_PCIE_SPEED_16_0GT = 0x17, + LEAPIORAID_PCI_SPEED_UNKNOWN = 0xff, +}; + +const unsigned char leapioraid_pcie_link_speed[] = { + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCIE_SPEED_2_5GT, + LEAPIORAID_PCIE_SPEED_5_0GT, + LEAPIORAID_PCIE_SPEED_8_0GT, + LEAPIORAID_PCIE_SPEED_16_0GT, + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCI_SPEED_UNKNOWN, + LEAPIORAID_PCI_SPEED_UNKNOWN +}; + +static void +leapioraid_base_check_and_enable_high_iops_queues( + struct LEAPIORAID_ADAPTER *ioc, + int hba_msix_vector_count, + int iopoll_q_count) +{ + u16 lnksta; + enum leapioraid_pci_bus_speed speed; + + if (perf_mode == LEAPIORAID_PERF_MODE_IOPS || + perf_mode == LEAPIORAID_PERF_MODE_LATENCY || iopoll_q_count) { + ioc->high_iops_queues = 0; + return; + } + if (perf_mode == LEAPIORAID_PERF_MODE_DEFAULT) { + pcie_capability_read_word(ioc->pdev, PCI_EXP_LNKSTA, &lnksta); + speed = leapioraid_pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + dev_info(&ioc->pdev->dev, "PCIe device speed is %s\n", + speed == LEAPIORAID_PCIE_SPEED_2_5GT ? "2.5GHz" : + speed == LEAPIORAID_PCIE_SPEED_5_0GT ? "5.0GHz" : + speed == LEAPIORAID_PCIE_SPEED_8_0GT ? "8.0GHz" : + speed == LEAPIORAID_PCIE_SPEED_16_0GT ? "16.0GHz" : + "Unknown"); + if (speed < LEAPIORAID_PCIE_SPEED_16_0GT) { + ioc->high_iops_queues = 0; + return; + } + } + if (!reset_devices && + hba_msix_vector_count == LEAPIORAID_GEN35_MAX_MSIX_QUEUES && + num_online_cpus() >= LEAPIORAID_HIGH_IOPS_REPLY_QUEUES && + max_msix_vectors == -1) + ioc->high_iops_queues = LEAPIORAID_HIGH_IOPS_REPLY_QUEUES; + else + ioc->high_iops_queues = 0; +} + +void +leapioraid_base_disable_msix(struct LEAPIORAID_ADAPTER *ioc) +{ + if (!ioc->msix_enable) + return; + pci_free_irq_vectors(ioc->pdev); + kfree(ioc->blk_mq_poll_queues); + ioc->msix_enable = 0; +} + +void +leapioraid_base_free_irq(struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_adapter_reply_queue *reply_q, *next; + + if (list_empty(&ioc->reply_queue_list)) + return; + list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { + list_del(&reply_q->list); + if (reply_q->is_blk_mq_poll_q) { + kfree(reply_q); + continue; + } + irq_poll_disable(&reply_q->irqpoll); + if (ioc->smp_affinity_enable) + irq_set_affinity_hint(pci_irq_vector(ioc->pdev, + reply_q->msix_index), NULL); + free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index), + reply_q); + kfree(reply_q); + } +} + +static int +leapioraid_base_request_irq(struct LEAPIORAID_ADAPTER *ioc, u8 index) +{ + struct leapioraid_adapter_reply_queue *reply_q; + int r; + u8 qid; + + reply_q = kzalloc(sizeof(struct leapioraid_adapter_reply_queue), + GFP_KERNEL); + if (!reply_q) + return -ENOMEM; + + reply_q->ioc = ioc; + reply_q->msix_index = index; + atomic_set(&reply_q->busy, 0); + if (index >= ioc->iopoll_q_start_index) { + qid = index - ioc->iopoll_q_start_index; + snprintf(reply_q->name, LEAPIORAID_NAME_LENGTH, "%s%u-mq-poll%u", + ioc->driver_name, ioc->id, qid); + reply_q->is_blk_mq_poll_q = 1; + ioc->blk_mq_poll_queues[qid].reply_q = reply_q; + INIT_LIST_HEAD(&reply_q->list); + list_add_tail(&reply_q->list, &ioc->reply_queue_list); + return 0; + } + if (ioc->msix_enable) + snprintf(reply_q->name, LEAPIORAID_NAME_LENGTH, "%s%u-msix%u", + ioc->driver_name, ioc->id, index); + else + snprintf(reply_q->name, LEAPIORAID_NAME_LENGTH, "%s%d", + ioc->driver_name, ioc->id); + r = request_irq(pci_irq_vector(ioc->pdev, index), leapioraid_base_interrupt, + IRQF_SHARED, reply_q->name, reply_q); + if (r) { + pr_err("%s unable to allocate interrupt %d!\n", reply_q->name, + pci_irq_vector(ioc->pdev, index)); + kfree(reply_q); + return -EBUSY; + } + + INIT_LIST_HEAD(&reply_q->list); + list_add_tail(&reply_q->list, &ioc->reply_queue_list); + return 0; +} + +static int leapioraid_base_alloc_irq_vectors(struct LEAPIORAID_ADAPTER *ioc) +{ + int i, irq_flags = PCI_IRQ_MSIX; + struct irq_affinity desc = {.pre_vectors = ioc->high_iops_queues }; + struct irq_affinity *descp = &desc; + int nr_msix_vectors = ioc->iopoll_q_start_index; + + if (ioc->smp_affinity_enable) + irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES; + else + descp = NULL; + dinitprintk(ioc, pr_err( + "%s high_iops_queues: %d,\n\t\t" + "reply_queue_count: %d, nr_msix_vectors: %d\n", + ioc->name, + ioc->high_iops_queues, + ioc->reply_queue_count, + nr_msix_vectors)); + i = pci_alloc_irq_vectors_affinity( + ioc->pdev, + ioc->high_iops_queues, + nr_msix_vectors, irq_flags, descp); + return i; +} + +static int +leapioraid_base_enable_msix(struct LEAPIORAID_ADAPTER *ioc) +{ + int r, i, msix_vector_count, local_max_msix_vectors; + int iopoll_q_count = 0; + + ioc->msix_load_balance = false; + msix_vector_count = + leapioraid_base_check_and_get_msix_vectors(ioc->pdev); + if (msix_vector_count <= 0) { + dfailprintk(ioc, pr_info("%s msix not supported\n", ioc->name)); + goto try_ioapic; + } + dinitprintk(ioc, pr_err( + "%s MSI-X vectors supported: %d, no of cores: %d\n", + ioc->name, msix_vector_count, ioc->cpu_count)); + ioc->reply_queue_count = min_t(int, ioc->cpu_count, msix_vector_count); + if (!ioc->rdpq_array_enable && max_msix_vectors == -1) { + if (reset_devices) + local_max_msix_vectors = 1; + else + local_max_msix_vectors = 8; + } else + local_max_msix_vectors = max_msix_vectors; + if (local_max_msix_vectors == 0) + goto try_ioapic; + if (!ioc->combined_reply_queue) { + pr_err( + "%s combined reply queue is off, so enabling msix load balance\n", + ioc->name); + ioc->msix_load_balance = true; + } + if (ioc->msix_load_balance) + ioc->smp_affinity_enable = 0; + if (!ioc->smp_affinity_enable || ioc->reply_queue_count <= 1) + ioc->shost->host_tagset = 0; + if (ioc->shost->host_tagset) + iopoll_q_count = poll_queues; + if (iopoll_q_count) { + ioc->blk_mq_poll_queues = kcalloc(iopoll_q_count, + sizeof(struct + leapioraid_blk_mq_poll_queue), + GFP_KERNEL); + if (!ioc->blk_mq_poll_queues) + iopoll_q_count = 0; + } + leapioraid_base_check_and_enable_high_iops_queues(ioc, + msix_vector_count, + iopoll_q_count); + ioc->reply_queue_count = + min_t(int, ioc->reply_queue_count + ioc->high_iops_queues, + msix_vector_count); + if (local_max_msix_vectors > 0) + ioc->reply_queue_count = min_t(int, local_max_msix_vectors, + ioc->reply_queue_count); + if (iopoll_q_count) { + if (ioc->reply_queue_count < (iopoll_q_count + 1)) + iopoll_q_count = 0; + ioc->reply_queue_count = + min(ioc->reply_queue_count + iopoll_q_count, + msix_vector_count); + } + ioc->iopoll_q_start_index = ioc->reply_queue_count - iopoll_q_count; + r = leapioraid_base_alloc_irq_vectors(ioc); + if (r < 0) { + pr_warn( + "%s pci_alloc_irq_vectors failed (r=%d) !!!\n", + ioc->name, r); + goto try_ioapic; + } + ioc->msix_enable = 1; + for (i = 0; i < ioc->reply_queue_count; i++) { + r = leapioraid_base_request_irq(ioc, i); + if (r) { + leapioraid_base_free_irq(ioc); + leapioraid_base_disable_msix(ioc); + goto try_ioapic; + } + } + dinitprintk(ioc, + pr_info("%s High IOPs queues : %s\n", + ioc->name, + ioc->high_iops_queues ? "enabled" : "disabled")); + return 0; +try_ioapic: + ioc->high_iops_queues = 0; + dinitprintk(ioc, pr_err( + "%s High IOPs queues : disabled\n", ioc->name)); + ioc->reply_queue_count = 1; + ioc->iopoll_q_start_index = ioc->reply_queue_count - 0; + r = leapioraid_base_request_irq(ioc, 0); + return r; +} + +static void +leapioraid_base_import_managed_irqs_affinity( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_adapter_reply_queue *reply_q; + unsigned int cpu, nr_msix; + int local_numa_node; + unsigned int index = 0; + + nr_msix = ioc->reply_queue_count; + if (!nr_msix) + return; + if (ioc->smp_affinity_enable) { + if (ioc->high_iops_queues) { + local_numa_node = dev_to_node(&ioc->pdev->dev); + for (index = 0; index < ioc->high_iops_queues; index++) { + irq_set_affinity_hint(pci_irq_vector(ioc->pdev, + index), + cpumask_of_node + (local_numa_node)); + } + } + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + const cpumask_t *mask; + + if (reply_q->msix_index < ioc->high_iops_queues || + reply_q->msix_index >= ioc->iopoll_q_start_index) + continue; + mask = pci_irq_get_affinity(ioc->pdev, + reply_q->msix_index); + if (!mask) { + dinitprintk(ioc, pr_warn( + "%s no affinity for msi %x\n", + ioc->name, + reply_q->msix_index)); + goto fall_back; + } + for_each_cpu_and(cpu, mask, cpu_online_mask) { + if (cpu >= ioc->cpu_msix_table_sz) + break; + ioc->cpu_msix_table[cpu] = reply_q->msix_index; + } + } + return; + } +fall_back: + leapioraid_base_group_cpus_on_irq(ioc); +} + +static void +leapioraid_base_assign_reply_queues(struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_adapter_reply_queue *reply_q; + int reply_queue; + + if (!leapioraid_base_is_controller_msix_enabled(ioc)) + return; + if (ioc->msix_load_balance) + return; + memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz); + if (ioc->reply_queue_count > ioc->facts.MaxMSIxVectors) { + ioc->reply_queue_count = ioc->facts.MaxMSIxVectors; + reply_queue = 0; + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + reply_q->msix_index = reply_queue; + if (++reply_queue == ioc->reply_queue_count) + reply_queue = 0; + } + } + leapioraid_base_import_managed_irqs_affinity(ioc); +} + +static int +leapioraid_base_wait_for_doorbell_int( + struct LEAPIORAID_ADAPTER *ioc, int timeout) +{ + u32 cntdn, count; + u32 int_status; + + count = 0; + cntdn = 1000 * timeout; + do { + int_status = + ioc->base_readl(&ioc->chip->HostInterruptStatus, + LEAPIORAID_READL_RETRY_COUNT_OF_THREE); + if (int_status & LEAPIORAID_HIS_IOC2SYS_DB_STATUS) { + dhsprintk(ioc, pr_info( + "%s %s: successful count(%d), timeout(%d)\n", + ioc->name, __func__, count, + timeout)); + return 0; + } + usleep_range(1000, 1100); + count++; + } while (--cntdn); + pr_err("%s %s: failed due to timeout count(%d), int_status(%x)!\n", + ioc->name, __func__, count, int_status); + return -EFAULT; +} + +static int +leapioraid_base_spin_on_doorbell_int(struct LEAPIORAID_ADAPTER *ioc, + int timeout) +{ + u32 cntdn, count; + u32 int_status; + + count = 0; + cntdn = 2000 * timeout; + do { + int_status = + ioc->base_readl(&ioc->chip->HostInterruptStatus, + LEAPIORAID_READL_RETRY_COUNT_OF_THREE); + if (int_status & LEAPIORAID_HIS_IOC2SYS_DB_STATUS) { + dhsprintk(ioc, pr_info( + "%s %s: successful count(%d), timeout(%d)\n", + ioc->name, __func__, count, + timeout)); + return 0; + } + udelay(500); + count++; + } while (--cntdn); + pr_err("%s %s: failed due to timeout count(%d), int_status(%x)!\n", + ioc->name, __func__, count, int_status); + return -EFAULT; +} + +static int +leapioraid_base_wait_for_doorbell_ack(struct LEAPIORAID_ADAPTER *ioc, + int timeout) +{ + u32 cntdn, count; + u32 int_status; + u32 doorbell; + + count = 0; + cntdn = 1000 * timeout; + do { + int_status = + ioc->base_readl(&ioc->chip->HostInterruptStatus, + LEAPIORAID_READL_RETRY_COUNT_OF_THREE); + if (!(int_status & LEAPIORAID_HIS_SYS2IOC_DB_STATUS)) { + dhsprintk(ioc, pr_info( + "%s %s: successful count(%d), timeout(%d)\n", + ioc->name, __func__, count, + timeout)); + return 0; + } else if (int_status & LEAPIORAID_HIS_IOC2SYS_DB_STATUS) { + doorbell = + ioc->base_readl(&ioc->chip->Doorbell, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY); + if ((doorbell & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_FAULT) { + leapioraid_print_fault_code(ioc, doorbell); + return -EFAULT; + } + if ((doorbell & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) { + leapioraid_base_coredump_info(ioc, doorbell); + return -EFAULT; + } + } else if (int_status == 0xFFFFFFFF) + goto out; + usleep_range(1000, 1100); + count++; + } while (--cntdn); +out: + pr_err("%s %s: failed due to timeout count(%d), int_status(%x)!\n", + ioc->name, __func__, count, int_status); + return -EFAULT; +} + +static int +leapioraid_base_wait_for_doorbell_not_used(struct LEAPIORAID_ADAPTER *ioc, + int timeout) +{ + u32 cntdn, count; + u32 doorbell_reg; + + count = 0; + cntdn = 1000 * timeout; + do { + doorbell_reg = + ioc->base_readl(&ioc->chip->Doorbell, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY); + if (!(doorbell_reg & LEAPIORAID_DOORBELL_USED)) { + dhsprintk(ioc, pr_info( + "%s %s: successful count(%d), timeout(%d)\n", + ioc->name, __func__, count, + timeout)); + return 0; + } + usleep_range(1000, 1100); + count++; + } while (--cntdn); + pr_err("%s %s: failed due to timeout count(%d), doorbell_reg(%x)!\n", + ioc->name, __func__, count, doorbell_reg); + return -EFAULT; +} + +static int +leapioraid_base_handshake_req_reply_wait(struct LEAPIORAID_ADAPTER *ioc, + int request_bytes, u32 *request, + int reply_bytes, u16 *reply, + int timeout) +{ + struct LeapioraidDefaultRep_t *default_reply + = (struct LeapioraidDefaultRep_t *) reply; + int i; + u8 failed; + __le32 *mfp; + + if ((ioc->base_readl(&ioc->chip->Doorbell, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY) & LEAPIORAID_DOORBELL_USED)) { + pr_err("%s doorbell is in use (line=%d)\n", ioc->name, __LINE__); + return -EFAULT; + } + if (ioc->base_readl(&ioc->chip->HostInterruptStatus, + LEAPIORAID_READL_RETRY_COUNT_OF_THREE) & + LEAPIORAID_HIS_IOC2SYS_DB_STATUS) + writel(0, &ioc->chip->HostInterruptStatus); + writel(((LEAPIORAID_FUNC_HANDSHAKE << LEAPIORAID_DOORBELL_FUNCTION_SHIFT) + | ((request_bytes / 4) << LEAPIORAID_DOORBELL_ADD_DWORDS_SHIFT)), + &ioc->chip->Doorbell); + if ((leapioraid_base_spin_on_doorbell_int(ioc, 5))) { + pr_err("%s doorbell handshake int failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + writel(0, &ioc->chip->HostInterruptStatus); + if ((leapioraid_base_wait_for_doorbell_ack(ioc, 5))) { + pr_err("%s doorbell handshake ack failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + for (i = 0, failed = 0; i < request_bytes / 4 && !failed; i++) { + writel((u32) (request[i]), &ioc->chip->Doorbell); + if ((leapioraid_base_wait_for_doorbell_ack(ioc, 5))) + failed = 1; + } + if (failed) { + pr_err("%s doorbell handshake sending request failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + if ((leapioraid_base_wait_for_doorbell_int(ioc, timeout))) { + pr_err("%s doorbell handshake int failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + reply[0] = + (u16) (ioc->base_readl(&ioc->chip->Doorbell, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY) + & LEAPIORAID_DOORBELL_DATA_MASK); + writel(0, &ioc->chip->HostInterruptStatus); + if ((leapioraid_base_wait_for_doorbell_int(ioc, 5))) { + pr_err("%s doorbell handshake int failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + reply[1] = + (u16) (ioc->base_readl(&ioc->chip->Doorbell, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY) + & LEAPIORAID_DOORBELL_DATA_MASK); + writel(0, &ioc->chip->HostInterruptStatus); + for (i = 2; i < default_reply->MsgLength * 2; i++) { + if ((leapioraid_base_wait_for_doorbell_int(ioc, 5))) { + pr_err("%s doorbell handshake int failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + if (i >= reply_bytes / 2) + ioc->base_readl(&ioc->chip->Doorbell, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY); + else + reply[i] = + (u16) (ioc->base_readl(&ioc->chip->Doorbell, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY) + & LEAPIORAID_DOORBELL_DATA_MASK); + writel(0, &ioc->chip->HostInterruptStatus); + } + if (leapioraid_base_wait_for_doorbell_int(ioc, 5)) { + pr_err("%s doorbell handshake int failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + if (leapioraid_base_wait_for_doorbell_not_used(ioc, 5) != 0) { + dhsprintk(ioc, + pr_info("%s doorbell is in use (line=%d)\n", + ioc->name, __LINE__)); + } + writel(0, &ioc->chip->HostInterruptStatus); + if (ioc->logging_level & LEAPIORAID_DEBUG_INIT) { + mfp = (__le32 *) reply; + pr_info("%s \toffset:data\n", ioc->name); + for (i = 0; i < reply_bytes / 4; i++) + pr_info("%s \t[0x%02x]:%08x\n", + ioc->name, i * 4, le32_to_cpu(mfp[i])); + } + return 0; +} + +static int +leapioraid_base_wait_on_iocstate( + struct LEAPIORAID_ADAPTER *ioc, u32 ioc_state, + int timeout) +{ + u32 count, cntdn; + u32 current_state; + + count = 0; + cntdn = 1000 * timeout; + do { + current_state = leapioraid_base_get_iocstate(ioc, 1); + if (current_state == ioc_state) + return 0; + if (count && current_state == LEAPIORAID_IOC_STATE_FAULT) + break; + usleep_range(1000, 1100); + count++; + } while (--cntdn); + return current_state; +} + +static inline void +leapioraid_base_dump_reg_set(struct LEAPIORAID_ADAPTER *ioc) +{ + unsigned int i, sz = 256; + u32 __iomem *reg = (u32 __iomem *) ioc->chip; + + pr_info("%s System Register set:\n", ioc->name); + for (i = 0; i < (sz / sizeof(u32)); i++) + pr_info("%08x: %08x\n", (i * 4), readl(®[i])); +} + +int +leapioraid_base_unlock_and_get_host_diagnostic( + struct LEAPIORAID_ADAPTER *ioc, + u32 *host_diagnostic) +{ + u32 count; + + *host_diagnostic = 0; + count = 0; + do { + drsprintk(ioc, pr_info("%s write magic sequence\n", ioc->name)); + writel(0x0, &ioc->chip->WriteSequence); + writel(0xF, &ioc->chip->WriteSequence); + writel(0x4, &ioc->chip->WriteSequence); + writel(0xB, &ioc->chip->WriteSequence); + writel(0x2, &ioc->chip->WriteSequence); + writel(0x7, &ioc->chip->WriteSequence); + writel(0xD, &ioc->chip->WriteSequence); + msleep(100); + if (count++ > 20) { + pr_err("%s Giving up writing magic sequence after 20 retries\n", + ioc->name); + leapioraid_base_dump_reg_set(ioc); + return -EFAULT; + } + *host_diagnostic = + ioc->base_readl(&ioc->chip->HostDiagnostic, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY); + drsprintk(ioc, pr_info( + "%s wrote magic sequence: cnt(%d), host_diagnostic(0x%08x)\n", + ioc->name, count, *host_diagnostic)); + } while ((*host_diagnostic & 0x00000080) == 0); + return 0; +} + +void +leapioraid_base_lock_host_diagnostic(struct LEAPIORAID_ADAPTER *ioc) +{ + drsprintk(ioc, pr_info("%s disable writes to the diagnostic register\n", + ioc->name)); + writel(0x0, &ioc->chip->WriteSequence); +} + +static int +leapioraid_base_diag_reset(struct LEAPIORAID_ADAPTER *ioc) +{ + u32 host_diagnostic; + u32 ioc_state; + u32 count; + u32 hcb_size; + + pr_info("%s sending diag reset !!\n", ioc->name); + drsprintk(ioc, + pr_info("%s Locking pci cfg space access\n", + ioc->name)); + pci_cfg_access_lock(ioc->pdev); + drsprintk(ioc, pr_info("%s clear interrupts\n", + ioc->name)); + mutex_lock(&ioc->hostdiag_unlock_mutex); + if (leapioraid_base_unlock_and_get_host_diagnostic + (ioc, &host_diagnostic)) { + mutex_unlock(&ioc->hostdiag_unlock_mutex); + goto out; + } + hcb_size = + ioc->base_readl(&ioc->chip->HCBSize, LEAPIORAID_READL_RETRY_COUNT_OF_THREE); + drsprintk(ioc, + pr_info("%s diag reset: issued\n", + ioc->name)); + writel(host_diagnostic | LEAPIORAID_DIAG_RESET_ADAPTER, + &ioc->chip->HostDiagnostic); +#if defined(DISABLE_RESET_SUPPORT) + count = 0; + do { + msleep(50); + host_diagnostic = + ioc->base_readl(&ioc->chip->HostDiagnostic, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY); + if (host_diagnostic == 0xFFFFFFFF) + goto out; + else if (count++ >= 300) + goto out; + if (!(count % 20)) + pr_info("waiting on diag reset bit to clear, count = %d\n", + (count / 20)); + } while (host_diagnostic & LEAPIORAID_DIAG_RESET_ADAPTER); +#else + msleep(50); + for (count = 0; count < (300000 / 256); count++) { + host_diagnostic = + ioc->base_readl(&ioc->chip->HostDiagnostic, + LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY); + if (host_diagnostic == 0xFFFFFFFF) { + pr_err("%s Invalid host diagnostic register value\n", + ioc->name); + leapioraid_base_dump_reg_set(ioc); + goto out; + } + if (!(host_diagnostic & LEAPIORAID_DIAG_RESET_ADAPTER)) + break; + + msleep(256); + } +#endif + if (host_diagnostic & 0x00000100) { + drsprintk(ioc, pr_info( + "%s restart IOC assuming HCB Address points to good F/W\n", + ioc->name)); + host_diagnostic &= ~0x00001800; + host_diagnostic |= 0x00000800; + writel(host_diagnostic, &ioc->chip->HostDiagnostic); + drsprintk(ioc, pr_err( + "%s re-enable the HCDW\n", ioc->name)); + writel(hcb_size | 0x00000001, + &ioc->chip->HCBSize); + } + drsprintk(ioc, pr_info("%s restart the adapter\n", + ioc->name)); + writel(host_diagnostic & ~0x00000002, + &ioc->chip->HostDiagnostic); + leapioraid_base_lock_host_diagnostic(ioc); + mutex_unlock(&ioc->hostdiag_unlock_mutex); + drsprintk(ioc, pr_info("%s Wait for FW to go to the READY state\n", + ioc->name)); + ioc_state = + leapioraid_base_wait_on_iocstate( + ioc, LEAPIORAID_IOC_STATE_READY, 20); + if (ioc_state) { + pr_err("%s %s: failed going to ready state (ioc_state=0x%x)\n", + ioc->name, __func__, ioc_state); + leapioraid_base_dump_reg_set(ioc); + goto out; + } + drsprintk(ioc, pr_err( + "%s Unlocking pci cfg space access\n", ioc->name)); + pci_cfg_access_unlock(ioc->pdev); + if (ioc->open_pcie_trace) + leapioraid_base_trace_log_init(ioc); + pr_info("%s diag reset: SUCCESS\n", ioc->name); + return 0; +out: + drsprintk(ioc, pr_err( + "%s Unlocking pci cfg space access\n", ioc->name)); + pci_cfg_access_unlock(ioc->pdev); + pr_err("%s diag reset: FAILED\n", ioc->name); + mutex_unlock(&ioc->hostdiag_unlock_mutex); + return -EFAULT; +} + +static int +leapioraid_base_wait_for_iocstate( + struct LEAPIORAID_ADAPTER *ioc, int timeout) +{ + u32 ioc_state; + int rc; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + if (!leapioraid_base_pci_device_is_available(ioc)) + return 0; + ioc_state = leapioraid_base_get_iocstate(ioc, 0); + dhsprintk(ioc, pr_info("%s %s: ioc_state(0x%08x)\n", + ioc->name, __func__, ioc_state)); + if (((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_READY) || + (ioc_state & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_OPERATIONAL) + return 0; + if (ioc_state & LEAPIORAID_DOORBELL_USED) { + dhsprintk(ioc, + pr_info("%s unexpected doorbell active!\n", ioc->name)); + goto issue_diag_reset; + } + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_FAULT) { + leapioraid_print_fault_code(ioc, ioc_state & + LEAPIORAID_DOORBELL_DATA_MASK); + goto issue_diag_reset; + } else if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) { + pr_err("%s %s: Skipping the diag reset here. (ioc_state=0x%x)\n", + ioc->name, __func__, ioc_state); + return -EFAULT; + } + ioc_state = + leapioraid_base_wait_on_iocstate(ioc, LEAPIORAID_IOC_STATE_READY, + timeout); + if (ioc_state) { + pr_err("%s %s: failed going to ready state (ioc_state=0x%x)\n", + ioc->name, __func__, ioc_state); + return -EFAULT; + } +issue_diag_reset: + rc = leapioraid_base_diag_reset(ioc); + return rc; +} + +int +leapioraid_base_check_for_fault_and_issue_reset( + struct LEAPIORAID_ADAPTER *ioc) +{ + u32 ioc_state; + int rc = -EFAULT; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + if (!leapioraid_base_pci_device_is_available(ioc)) + return rc; + ioc_state = leapioraid_base_get_iocstate(ioc, 0); + dhsprintk(ioc, pr_info("%s %s: ioc_state(0x%08x)\n", + ioc->name, __func__, ioc_state)); + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_FAULT) { + leapioraid_print_fault_code(ioc, ioc_state & + LEAPIORAID_DOORBELL_DATA_MASK); + leapioraid_base_mask_interrupts(ioc); + rc = leapioraid_base_diag_reset(ioc); + } else if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) { + leapioraid_base_coredump_info(ioc, + ioc_state & + LEAPIORAID_DOORBELL_DATA_MASK); + leapioraid_base_wait_for_coredump_completion(ioc, __func__); + leapioraid_base_mask_interrupts(ioc); + rc = leapioraid_base_diag_reset(ioc); + } + return rc; +} + +static int +leapioraid_base_get_ioc_facts(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidIOCFactsReq_t mpi_request; + struct LeapioraidIOCFactsRep_t mpi_reply; + struct leapioraid_facts *facts; + int mpi_reply_sz, mpi_request_sz, r; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + r = leapioraid_base_wait_for_iocstate(ioc, 10); + if (r) { + pr_err( + "%s %s: failed getting to correct state\n", ioc->name, + __func__); + return r; + } + mpi_reply_sz = sizeof(struct LeapioraidIOCFactsRep_t); + mpi_request_sz = sizeof(struct LeapioraidIOCFactsReq_t); + memset(&mpi_request, 0, mpi_request_sz); + mpi_request.Function = LEAPIORAID_FUNC_IOC_FACTS; + r = leapioraid_base_handshake_req_reply_wait(ioc, mpi_request_sz, + (u32 *) &mpi_request, + mpi_reply_sz, + (u16 *) &mpi_reply, 5); + if (r != 0) { + pr_err("%s %s: handshake failed (r=%d)\n", + ioc->name, __func__, r); + return r; + } + facts = &ioc->facts; + memset(facts, 0, sizeof(struct leapioraid_facts)); + facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion); + facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion); + facts->IOCNumber = mpi_reply.IOCNumber; + pr_info("%s IOC Number : %d\n", ioc->name, facts->IOCNumber); + ioc->IOCNumber = facts->IOCNumber; + facts->VP_ID = mpi_reply.VP_ID; + facts->VF_ID = mpi_reply.VF_ID; + facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions); + facts->MaxChainDepth = mpi_reply.MaxChainDepth; + facts->WhoInit = mpi_reply.WhoInit; + facts->NumberOfPorts = mpi_reply.NumberOfPorts; + facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors; + if (ioc->msix_enable && (facts->MaxMSIxVectors <= 16)) + ioc->combined_reply_queue = 0; + facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit); + facts->MaxReplyDescriptorPostQueueDepth = + le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth); + facts->ProductID = le16_to_cpu(mpi_reply.ProductID); + facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities); + if ((facts->IOCCapabilities & LEAPIORAID_IOCFACTS_CAPABILITY_INTEGRATED_RAID)) + ioc->ir_firmware = 1; + if ((facts->IOCCapabilities & LEAPIORAID_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) + && (!reset_devices)) + ioc->rdpq_array_capable = 1; + else + ioc->rdpq_array_capable = 0; + if (facts->IOCCapabilities & LEAPIORAID_IOCFACTS_CAPABILITY_ATOMIC_REQ) + ioc->atomic_desc_capable = 1; + else + ioc->atomic_desc_capable = 0; + + facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word); + facts->IOCRequestFrameSize = le16_to_cpu(mpi_reply.IOCRequestFrameSize); + facts->IOCMaxChainSegmentSize = + le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize); + facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators); + facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets); + ioc->shost->max_id = -1; + facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders); + facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures); + facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags); + facts->HighPriorityCredit = le16_to_cpu(mpi_reply.HighPriorityCredit); + facts->ReplyFrameSize = mpi_reply.ReplyFrameSize; + facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle); + facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize; + ioc->page_size = 1 << facts->CurrentHostPageSize; + if (ioc->page_size == 1) { + pr_err( + "%s CurrentHostPageSize is 0: Setting host page to 4k\n", + ioc->name); + ioc->page_size = 1 << 12; + } + dinitprintk(ioc, + pr_info("%s CurrentHostPageSize(%d)\n", + ioc->name, facts->CurrentHostPageSize)); + dinitprintk(ioc, + pr_info("%s hba queue depth(%d), max chains per io(%d)\n", + ioc->name, facts->RequestCredit, facts->MaxChainDepth)); + dinitprintk(ioc, + pr_info("%s request frame size(%d), reply frame size(%d)\n", + ioc->name, + facts->IOCRequestFrameSize * 4, + facts->ReplyFrameSize * 4)); + return 0; +} + +static void +leapioraid_base_unmap_resources(struct LEAPIORAID_ADAPTER *ioc) +{ + struct pci_dev *pdev = ioc->pdev; + + pr_info("%s %s\n", ioc->name, __func__); + leapioraid_base_free_irq(ioc); + leapioraid_base_disable_msix(ioc); + kfree(ioc->replyPostRegisterIndex); + mutex_lock(&ioc->pci_access_mutex); + if (ioc->chip_phys) { + iounmap(ioc->chip); + ioc->chip_phys = 0; + } + + pci_release_selected_regions(ioc->pdev, ioc->bars); + pci_disable_device(pdev); + mutex_unlock(&ioc->pci_access_mutex); +} + +int +leapioraid_base_map_resources(struct LEAPIORAID_ADAPTER *ioc) +{ + struct pci_dev *pdev = ioc->pdev; + u32 memap_sz; + u32 pio_sz; + int i, r = 0, rc; + u64 pio_chip = 0; + phys_addr_t chip_phys = 0; + struct leapioraid_adapter_reply_queue *reply_q; + int iopoll_q_count = 0; + + dinitprintk(ioc, pr_info("%s %s\n", + ioc->name, __func__)); + + ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); + if (pci_enable_device_mem(pdev)) { + pr_warn("%s pci_enable_device_mem: failed\n", ioc->name); + return -ENODEV; + } + if (pci_request_selected_regions(pdev, ioc->bars, ioc->driver_name)) { + pr_warn("%s pci_request_selected_regions: failed\n", ioc->name); + r = -ENODEV; + goto out_fail; + } + + pci_set_master(pdev); + + if (leapioraid_base_config_dma_addressing(ioc, pdev) != 0) { + pr_warn("%s no suitable DMA mask for %s\n", + ioc->name, pci_name(pdev)); + r = -ENODEV; + goto out_fail; + } + for (i = 0, memap_sz = 0, pio_sz = 0; i < DEVICE_COUNT_RESOURCE; i++) { + if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { + if (pio_sz) + continue; + pio_chip = (u64) pci_resource_start(pdev, i); + pio_sz = pci_resource_len(pdev, i); + } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { + if (memap_sz) + continue; + ioc->chip_phys = pci_resource_start(pdev, i); + chip_phys = ioc->chip_phys; + memap_sz = pci_resource_len(pdev, i); + ioc->chip = ioremap(ioc->chip_phys, memap_sz); + if (ioc->chip == NULL) { + pr_err("%s unable to map adapter memory!\n", + ioc->name); + r = -EINVAL; + goto out_fail; + } + } + } + leapioraid_base_mask_interrupts(ioc); + r = leapioraid_base_get_ioc_facts(ioc); + if (r) { + rc = leapioraid_base_check_for_fault_and_issue_reset(ioc); + if (rc || (leapioraid_base_get_ioc_facts(ioc))) + goto out_fail; + } + if (!ioc->rdpq_array_enable_assigned) { + ioc->rdpq_array_enable = ioc->rdpq_array_capable; + ioc->rdpq_array_enable_assigned = 1; + } + r = leapioraid_base_enable_msix(ioc); + if (r) + goto out_fail; + iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index; + for (i = 0; i < iopoll_q_count; i++) { + atomic_set(&ioc->blk_mq_poll_queues[i].busy, 0); + atomic_set(&ioc->blk_mq_poll_queues[i].pause, 0); + } + if (!ioc->is_driver_loading) + leapioraid_base_init_irqpolls(ioc); + if (ioc->combined_reply_queue) { + ioc->replyPostRegisterIndex = kcalloc(ioc->nc_reply_index_count, + sizeof(resource_size_t *), + GFP_KERNEL); + if (!ioc->replyPostRegisterIndex) { + pr_err("%s allocation for reply Post Register Index failed!!!\n", + ioc->name); + r = -ENOMEM; + goto out_fail; + } + + for (i = 0; i < ioc->nc_reply_index_count; i++) { + ioc->replyPostRegisterIndex[i] = (resource_size_t *) + ((u8 *) &ioc->chip->Doorbell + + 0x0000030C + + (i * 0x10)); + } + } + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + if (reply_q->msix_index >= ioc->iopoll_q_start_index) { + pr_info("%s enabled: index: %d\n", + reply_q->name, reply_q->msix_index); + continue; + } + pr_info("%s %s: IRQ %d\n", + reply_q->name, + ((ioc->msix_enable) ? "PCI-MSI-X enabled" : + "IO-APIC enabled"), pci_irq_vector(ioc->pdev, + reply_q->msix_index)); + } + pr_info("%s iomem(%pap), mapped(0x%p), size(%d)\n", + ioc->name, &chip_phys, ioc->chip, memap_sz); + pr_info("%s ioport(0x%016llx), size(%d)\n", + ioc->name, (unsigned long long)pio_chip, pio_sz); + + pci_save_state(pdev); + return 0; +out_fail: + leapioraid_base_unmap_resources(ioc); + return r; +} + +void *leapioraid_base_get_msg_frame( + struct LEAPIORAID_ADAPTER *ioc, u16 smid) +{ + return (void *)(ioc->request + (smid * ioc->request_sz)); +} + +void *leapioraid_base_get_sense_buffer( + struct LEAPIORAID_ADAPTER *ioc, u16 smid) +{ + return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE)); +} + +__le32 +leapioraid_base_get_sense_buffer_dma( + struct LEAPIORAID_ADAPTER *ioc, u16 smid) +{ + return cpu_to_le32(ioc->sense_dma + ((smid - 1) * + SCSI_SENSE_BUFFERSIZE)); +} + +__le64 +leapioraid_base_get_sense_buffer_dma_64(struct LEAPIORAID_ADAPTER *ioc, + u16 smid) +{ + return cpu_to_le64(ioc->sense_dma + ((smid - 1) * + SCSI_SENSE_BUFFERSIZE)); +} + +void *leapioraid_base_get_reply_virt_addr(struct LEAPIORAID_ADAPTER *ioc, + u32 phys_addr) +{ + if (!phys_addr) + return NULL; + return ioc->reply + (phys_addr - (u32) ioc->reply_dma); +} + +static inline u8 +leapioraid_base_get_msix_index( + struct LEAPIORAID_ADAPTER *ioc, struct scsi_cmnd *scmd) +{ + if (ioc->msix_load_balance) + return ioc->reply_queue_count ? + leapioraid_base_mod64(atomic64_add_return(1, &ioc->total_io_cnt), + ioc->reply_queue_count) : 0; + if (scmd && ioc->shost->nr_hw_queues > 1) { + u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); + + return blk_mq_unique_tag_to_hwq(tag) + ioc->high_iops_queues; + } + return ioc->cpu_msix_table[raw_smp_processor_id()]; +} + +inline unsigned long +leapioraid_base_sdev_nr_inflight_request(struct LEAPIORAID_ADAPTER *ioc, + struct scsi_cmnd *scmd) +{ + return scsi_device_busy(scmd->device); +} + +static inline u8 +leapioraid_base_get_high_iops_msix_index(struct LEAPIORAID_ADAPTER *ioc, + struct scsi_cmnd *scmd) +{ + if (leapioraid_base_sdev_nr_inflight_request(ioc, scmd) > + LEAPIORAID_DEVICE_HIGH_IOPS_DEPTH) + return + leapioraid_base_mod64((atomic64_add_return + (1, + &ioc->high_iops_outstanding) / + LEAPIORAID_HIGH_IOPS_BATCH_COUNT), + LEAPIORAID_HIGH_IOPS_REPLY_QUEUES); + return leapioraid_base_get_msix_index(ioc, scmd); +} + +u16 +leapioraid_base_get_smid(struct LEAPIORAID_ADAPTER *ioc, u8 cb_idx) +{ + unsigned long flags; + struct leapioraid_request_tracker *request; + u16 smid; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + if (list_empty(&ioc->internal_free_list)) { + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + pr_err("%s %s: smid not available\n", + ioc->name, __func__); + return 0; + } + request = list_entry(ioc->internal_free_list.next, + struct leapioraid_request_tracker, tracker_list); + request->cb_idx = cb_idx; + smid = request->smid; + list_del(&request->tracker_list); + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return smid; +} + +u16 +leapioraid_base_get_smid_scsiio(struct LEAPIORAID_ADAPTER *ioc, u8 cb_idx, + struct scsi_cmnd *scmd) +{ + struct leapioraid_scsiio_tracker *request; + u16 smid; + u32 tag = scsi_cmd_to_rq(scmd)->tag; + u32 unique_tag; + + unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); + tag = blk_mq_unique_tag_to_tag(unique_tag); + ioc->io_queue_num[tag] = blk_mq_unique_tag_to_hwq(unique_tag); + request = leapioraid_base_scsi_cmd_priv(scmd); + smid = tag + 1; + request->cb_idx = cb_idx; + request->smid = smid; + request->scmd = scmd; + return smid; +} + +u16 +leapioraid_base_get_smid_hpr(struct LEAPIORAID_ADAPTER *ioc, u8 cb_idx) +{ + unsigned long flags; + struct leapioraid_request_tracker *request; + u16 smid; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + if (list_empty(&ioc->hpr_free_list)) { + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return 0; + } + request = list_entry(ioc->hpr_free_list.next, + struct leapioraid_request_tracker, tracker_list); + request->cb_idx = cb_idx; + smid = request->smid; + list_del(&request->tracker_list); + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return smid; +} + +static void +leapioraid_base_recovery_check(struct LEAPIORAID_ADAPTER *ioc) +{ + if (ioc->shost_recovery && ioc->pending_io_count) { + if (ioc->pending_io_count == 1) + wake_up(&ioc->reset_wq); + ioc->pending_io_count--; + } +} + +void +leapioraid_base_clear_st(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_scsiio_tracker *st) +{ + if (!st) + return; + if (WARN_ON(st->smid == 0)) + return; + st->cb_idx = 0xFF; + st->direct_io = 0; + st->scmd = NULL; + atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0); +} + +void +leapioraid_base_free_smid(struct LEAPIORAID_ADAPTER *ioc, u16 smid) +{ + unsigned long flags; + int i; + struct leapioraid_scsiio_tracker *st; + void *request; + + if (smid < ioc->hi_priority_smid) { + st = leapioraid_get_st_from_smid(ioc, smid); + if (!st) { + leapioraid_base_recovery_check(ioc); + return; + } + request = leapioraid_base_get_msg_frame(ioc, smid); + memset(request, 0, ioc->request_sz); + leapioraid_base_clear_st(ioc, st); + leapioraid_base_recovery_check(ioc); + ioc->io_queue_num[smid - 1] = 0xFFFF; + return; + } + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + if (smid < ioc->internal_smid) { + i = smid - ioc->hi_priority_smid; + ioc->hpr_lookup[i].cb_idx = 0xFF; + list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list); + } else if (smid <= ioc->hba_queue_depth) { + i = smid - ioc->internal_smid; + ioc->internal_lookup[i].cb_idx = 0xFF; + list_add(&ioc->internal_lookup[i].tracker_list, + &ioc->internal_free_list); + } + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); +} + +#if defined(writeq) && defined(CONFIG_64BIT) +static inline void +leapioraid_base_writeq( + __u64 b, void __iomem *addr, spinlock_t *writeq_lock) +{ + writeq(b, addr); +} +#else +static inline void +leapioraid_base_writeq( + __u64 b, void __iomem *addr, spinlock_t *writeq_lock) +{ + unsigned long flags; + __u64 data_out = b; + + spin_lock_irqsave(writeq_lock, flags); + writel((u32) (data_out), addr); + writel((u32) (data_out >> 32), (addr + 4)); + spin_unlock_irqrestore(writeq_lock, flags); +} +#endif + +static u8 +leapioraid_base_set_and_get_msix_index( + struct LEAPIORAID_ADAPTER *ioc, u16 smid) +{ + struct leapioraid_scsiio_tracker *st; + + st = (smid < + ioc->hi_priority_smid) ? (leapioraid_get_st_from_smid(ioc, + smid)) + : (NULL); + if (st == NULL) + return leapioraid_base_get_msix_index(ioc, NULL); + st->msix_io = ioc->get_msix_index_for_smlio(ioc, st->scmd); + return st->msix_io; +} + +static void +leapioraid_base_put_smid_scsi_io(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u16 handle) +{ + union LeapioraidReqDescUnion_t descriptor; + u64 *request = (u64 *) &descriptor; + + descriptor.SCSIIO.RequestFlags = LEAPIORAID_REQ_DESCRIPT_FLAGS_SCSI_IO; + descriptor.SCSIIO.MSIxIndex + = leapioraid_base_set_and_get_msix_index(ioc, smid); + descriptor.SCSIIO.SMID = cpu_to_le16(smid); + descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); + descriptor.SCSIIO.LMID = 0; + leapioraid_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} + +static void +leapioraid_base_put_smid_fast_path(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u16 handle) +{ + union LeapioraidReqDescUnion_t descriptor; + u64 *request = (u64 *) &descriptor; + + descriptor.SCSIIO.RequestFlags = + LEAPIORAID_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; + descriptor.SCSIIO.MSIxIndex + = leapioraid_base_set_and_get_msix_index(ioc, smid); + descriptor.SCSIIO.SMID = cpu_to_le16(smid); + descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); + descriptor.SCSIIO.LMID = 0; + leapioraid_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} + +static void +leapioraid_base_put_smid_hi_priority(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u16 msix_task) +{ + union LeapioraidReqDescUnion_t descriptor; + u64 *request; + + request = (u64 *) &descriptor; + descriptor.HighPriority.RequestFlags = + LEAPIORAID_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; + descriptor.HighPriority.MSIxIndex = msix_task; + descriptor.HighPriority.SMID = cpu_to_le16(smid); + descriptor.HighPriority.LMID = 0; + descriptor.HighPriority.Reserved1 = 0; + leapioraid_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} + +static void +leapioraid_base_put_smid_default(struct LEAPIORAID_ADAPTER *ioc, u16 smid) +{ + union LeapioraidReqDescUnion_t descriptor; + u64 *request; + + request = (u64 *) &descriptor; + descriptor.Default.RequestFlags = + LEAPIORAID_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; + descriptor.Default.MSIxIndex + = leapioraid_base_set_and_get_msix_index(ioc, smid); + descriptor.Default.SMID = cpu_to_le16(smid); + descriptor.Default.LMID = 0; + descriptor.Default.DescriptorTypeDependent = 0; + leapioraid_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} + +static void +leapioraid_base_put_smid_scsi_io_atomic(struct LEAPIORAID_ADAPTER *ioc, + u16 smid, u16 handle) +{ + struct LeapioraidAtomicReqDesc_t descriptor; + u32 *request = (u32 *) &descriptor; + + descriptor.RequestFlags = LEAPIORAID_REQ_DESCRIPT_FLAGS_SCSI_IO; + descriptor.MSIxIndex = leapioraid_base_set_and_get_msix_index(ioc, smid); + descriptor.SMID = cpu_to_le16(smid); + writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); +} + +static void +leapioraid_base_put_smid_fast_path_atomic(struct LEAPIORAID_ADAPTER *ioc, + u16 smid, u16 handle) +{ + struct LeapioraidAtomicReqDesc_t descriptor; + u32 *request = (u32 *) &descriptor; + + descriptor.RequestFlags = LEAPIORAID_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; + descriptor.MSIxIndex = leapioraid_base_set_and_get_msix_index(ioc, smid); + descriptor.SMID = cpu_to_le16(smid); + writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); +} + +static void +leapioraid_base_put_smid_hi_priority_atomic(struct LEAPIORAID_ADAPTER *ioc, + u16 smid, u16 msix_task) +{ + struct LeapioraidAtomicReqDesc_t descriptor; + u32 *request = (u32 *) &descriptor; + + descriptor.RequestFlags = LEAPIORAID_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; + descriptor.MSIxIndex = msix_task; + descriptor.SMID = cpu_to_le16(smid); + writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); +} + +static void +leapioraid_base_put_smid_default_atomic(struct LEAPIORAID_ADAPTER *ioc, + u16 smid) +{ + struct LeapioraidAtomicReqDesc_t descriptor; + u32 *request = (u32 *)(&descriptor); + + descriptor.RequestFlags = LEAPIORAID_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; + descriptor.MSIxIndex = leapioraid_base_set_and_get_msix_index(ioc, smid); + descriptor.SMID = cpu_to_le16(smid); + writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost); +} + +static int +leapioraid_base_display_fwpkg_version(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidFWImgHeader_t *fw_img_hdr; + struct LeapioraidComptImgHeader_t *cmp_img_hdr; + struct LeapioraidFWUploadReq_t *mpi_request; + struct LeapioraidFWUploadRep_t mpi_reply; + int r = 0, issue_diag_reset = 0; + u32 package_version = 0; + void *fwpkg_data = NULL; + dma_addr_t fwpkg_data_dma; + u16 smid, ioc_status; + size_t data_length; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + if (ioc->base_cmds.status & LEAPIORAID_CMD_PENDING) { + pr_err("%s %s: internal command already in use\n", ioc->name, + __func__); + return -EAGAIN; + } + data_length = sizeof(struct LeapioraidFWImgHeader_t); + fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length, + &fwpkg_data_dma, GFP_ATOMIC); + if (!fwpkg_data) + return -ENOMEM; + + smid = leapioraid_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + r = -EAGAIN; + goto out; + } + ioc->base_cmds.status = LEAPIORAID_CMD_PENDING; + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->base_cmds.smid = smid; + memset(mpi_request, 0, sizeof(struct LeapioraidFWUploadReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_FW_UPLOAD; + mpi_request->ImageType = 0x01; + mpi_request->ImageSize = data_length; + ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma, + data_length); + init_completion(&ioc->base_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->base_cmds.done, 15 * HZ); + dinitprintk(ioc, pr_info("%s %s: complete\n", + ioc->name, __func__)); + if (!(ioc->base_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + pr_err("%s %s: timeout\n", + ioc->name, __func__); + leapioraid_debug_dump_mf(mpi_request, + sizeof(struct LeapioraidFWUploadReq_t) / 4); + issue_diag_reset = 1; + } else { + memset(&mpi_reply, 0, sizeof(struct LeapioraidFWUploadRep_t)); + if (ioc->base_cmds.status & LEAPIORAID_CMD_REPLY_VALID) { + memcpy(&mpi_reply, ioc->base_cmds.reply, + sizeof(struct LeapioraidFWUploadRep_t)); + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status == LEAPIORAID_IOCSTATUS_SUCCESS) { + fw_img_hdr = + (struct LeapioraidFWImgHeader_t *) fwpkg_data; + if (le32_to_cpu(fw_img_hdr->Signature) == + 0xEB000042) { + cmp_img_hdr = + (struct LeapioraidComptImgHeader_t + *) (fwpkg_data); + package_version = + le32_to_cpu(cmp_img_hdr->ApplicationSpecific); + } else + package_version = + le32_to_cpu(fw_img_hdr->PackageVersion.Word); + if (package_version) + pr_err( + "%s FW Package Version(%02d.%02d.%02d.%02d)\n", + ioc->name, + ((package_version) & 0xFF000000) + >> 24, + ((package_version) & 0x00FF0000) + >> 16, + ((package_version) & 0x0000FF00) + >> 8, + (package_version) & 0x000000FF); + } else { + leapioraid_debug_dump_mf(&mpi_reply, + sizeof(struct LeapioraidFWUploadRep_t) / + 4); + } + } + } + ioc->base_cmds.status = LEAPIORAID_CMD_NOT_USED; +out: + if (fwpkg_data) + dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data, + fwpkg_data_dma); + if (issue_diag_reset) { + if (ioc->drv_internal_flags & LEAPIORAID_DRV_INERNAL_FIRST_PE_ISSUED) + return -EFAULT; + if (leapioraid_base_check_for_fault_and_issue_reset(ioc)) + return -EFAULT; + r = -EAGAIN; + } + return r; +} + +static void +leapioraid_base_display_ioc_capabilities(struct LEAPIORAID_ADAPTER *ioc) +{ + int i = 0; + char desc[17] = { 0 }; + u8 revision; + u32 iounit_pg1_flags; + + pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision); + strscpy(desc, ioc->manu_pg0.ChipName, sizeof(desc)); + pr_info("%s %s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x)\n", + ioc->name, desc, + (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, + (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, + (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, + ioc->facts.FWVersion.Word & 0x000000FF, revision); + pr_info("%s Protocol=(", ioc->name); + if (ioc->facts.ProtocolFlags & LEAPIORAID_IOCFACTS_PROTOCOL_SCSI_INITIATOR) { + pr_info("Initiator"); + i++; + } + if (ioc->facts.ProtocolFlags & LEAPIORAID_IOCFACTS_PROTOCOL_SCSI_TARGET) { + pr_info("%sTarget", i ? "," : ""); + i++; + } + i = 0; + pr_info("), "); + pr_info("Capabilities=("); + if ((!ioc->warpdrive_msg) && (ioc->facts.IOCCapabilities & + LEAPIORAID_IOCFACTS_CAPABILITY_INTEGRATED_RAID)) { + pr_info("Raid"); + i++; + } + if (ioc->facts.IOCCapabilities & LEAPIORAID_IOCFACTS_CAPABILITY_TLR) { + pr_info("%sTLR", i ? "," : ""); + i++; + } + if (ioc->facts.IOCCapabilities & LEAPIORAID_IOCFACTS_CAPABILITY_MULTICAST) { + pr_info("%sMulticast", i ? "," : ""); + i++; + } + if (ioc->facts.IOCCapabilities & + LEAPIORAID_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) { + pr_info("%sBIDI Target", i ? "," : ""); + i++; + } + if (ioc->facts.IOCCapabilities & LEAPIORAID_IOCFACTS_CAPABILITY_EEDP) { + pr_info("%sEEDP", i ? "," : ""); + i++; + } + if (ioc->facts.IOCCapabilities & + LEAPIORAID_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) { + pr_info("%sTask Set Full", i ? "," : ""); + i++; + } + iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); + if (!(iounit_pg1_flags & LEAPIORAID_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) { + pr_info("%sNCQ", i ? "," : ""); + i++; + } + pr_info(")\n"); +} + +static int +leapioraid_base_update_ioc_page1_inlinewith_perf_mode( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidIOCP1_t ioc_pg1; + struct LeapioraidCfgRep_t mpi_reply; + int rc; + + rc = leapioraid_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1_copy); + if (rc) + return rc; + memcpy(&ioc_pg1, &ioc->ioc_pg1_copy, sizeof(struct LeapioraidIOCP1_t)); + switch (perf_mode) { + case LEAPIORAID_PERF_MODE_DEFAULT: + case LEAPIORAID_PERF_MODE_BALANCED: + if (ioc->high_iops_queues) { + pr_err( + "%s Enable int coalescing only for first %d reply queues\n", + ioc->name, LEAPIORAID_HIGH_IOPS_REPLY_QUEUES); + ioc_pg1.ProductSpecific = cpu_to_le32(0x80000000 | + ((1 << + LEAPIORAID_HIGH_IOPS_REPLY_QUEUES + / 8) - 1)); + rc = leapioraid_config_set_ioc_pg1(ioc, &mpi_reply, + &ioc_pg1); + if (rc) + return rc; + pr_err("%s performance mode: balanced\n", ioc->name); + return 0; + } + fallthrough; + case LEAPIORAID_PERF_MODE_LATENCY: + ioc_pg1.CoalescingTimeout = cpu_to_le32(0xa); + ioc_pg1.Flags |= cpu_to_le32(0x00000001); + ioc_pg1.ProductSpecific = 0; + rc = leapioraid_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1); + if (rc) + return rc; + pr_err("%s performance mode: latency\n", ioc->name); + break; + case LEAPIORAID_PERF_MODE_IOPS: + pr_err( + "%s performance mode: iops with coalescing timeout: 0x%x\n", + ioc->name, le32_to_cpu(ioc_pg1.CoalescingTimeout)); + ioc_pg1.Flags |= cpu_to_le32(0x00000001); + ioc_pg1.ProductSpecific = 0; + rc = leapioraid_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1); + if (rc) + return rc; + break; + } + return 0; +} + +static int +leapioraid_base_assign_fw_reported_qd(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasIOUnitP1_t *sas_iounit_pg1 = NULL; + int sz; + int rc = 0; + + ioc->max_wideport_qd = LEAPIORAID_SAS_QUEUE_DEPTH; + ioc->max_narrowport_qd = LEAPIORAID_SAS_QUEUE_DEPTH; + ioc->max_sata_qd = LEAPIORAID_SATA_QUEUE_DEPTH; + + sz = offsetof(struct LeapioraidSasIOUnitP1_t, PhyData); + sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg1) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return rc; + } + rc = leapioraid_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz); + if (rc) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc->max_wideport_qd = + (le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth)) ? + le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth) : + LEAPIORAID_SAS_QUEUE_DEPTH; + ioc->max_narrowport_qd = + (le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth)) ? + le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth) : + LEAPIORAID_SAS_QUEUE_DEPTH; + ioc->max_sata_qd = (sas_iounit_pg1->SATAMaxQDepth) ? + sas_iounit_pg1->SATAMaxQDepth : LEAPIORAID_SATA_QUEUE_DEPTH; +out: + dinitprintk(ioc, pr_err( + "%s MaxWidePortQD: 0x%x MaxNarrowPortQD: 0x%x MaxSataQD: 0x%x\n", + ioc->name, ioc->max_wideport_qd, + ioc->max_narrowport_qd, ioc->max_sata_qd)); + kfree(sas_iounit_pg1); + return rc; +} + +static int +leapioraid_base_static_config_pages(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidCfgRep_t mpi_reply; + u32 iounit_pg1_flags; + int rc; + + rc = leapioraid_config_get_manufacturing_pg0(ioc, &mpi_reply, + &ioc->manu_pg0); + if (rc) + return rc; + if (ioc->ir_firmware) { + rc = leapioraid_config_get_manufacturing_pg10(ioc, &mpi_reply, + &ioc->manu_pg10); + if (rc) + return rc; + } + rc = leapioraid_config_get_manufacturing_pg11(ioc, &mpi_reply, + &ioc->manu_pg11); + if (rc) + return rc; + + ioc->time_sync_interval = + ioc->manu_pg11.TimeSyncInterval & 0x7F; + if (ioc->time_sync_interval) { + if (ioc->manu_pg11.TimeSyncInterval & 0x80) + ioc->time_sync_interval = + ioc->time_sync_interval * 3600; + else + ioc->time_sync_interval = + ioc->time_sync_interval * 60; + dinitprintk(ioc, pr_info( + "%s Driver-FW TimeSync interval is %d seconds.\n\t\t" + "ManuPg11 TimeSync Unit is in %s's", + ioc->name, + ioc->time_sync_interval, + ((ioc->manu_pg11.TimeSyncInterval & 0x80) + ? "Hour" : "Minute"))); + } + rc = leapioraid_base_assign_fw_reported_qd(ioc); + if (rc) + return rc; + rc = leapioraid_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2); + if (rc) + return rc; + rc = leapioraid_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3); + if (rc) + return rc; + rc = leapioraid_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8); + if (rc) + return rc; + rc = leapioraid_config_get_iounit_pg0(ioc, &mpi_reply, + &ioc->iounit_pg0); + if (rc) + return rc; + rc = leapioraid_config_get_iounit_pg1(ioc, &mpi_reply, + &ioc->iounit_pg1); + if (rc) + return rc; + rc = leapioraid_config_get_iounit_pg8(ioc, &mpi_reply, + &ioc->iounit_pg8); + if (rc) + return rc; + leapioraid_base_display_ioc_capabilities(ioc); + iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); + if ((ioc->facts.IOCCapabilities & + LEAPIORAID_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING)) + iounit_pg1_flags &= + ~LEAPIORAID_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; + else + iounit_pg1_flags |= + LEAPIORAID_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; + ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags); + rc = leapioraid_config_set_iounit_pg1(ioc, &mpi_reply, + &ioc->iounit_pg1); + if (rc) + return rc; + if (ioc->iounit_pg8.NumSensors) + ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors; + + rc = leapioraid_base_update_ioc_page1_inlinewith_perf_mode(ioc); + if (rc) + return rc; + + return 0; +} + +void +leapioraid_free_enclosure_list(struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_enclosure_node *enclosure_dev, *enclosure_dev_next; + + list_for_each_entry_safe(enclosure_dev, + enclosure_dev_next, &ioc->enclosure_list, + list) { + list_del(&enclosure_dev->list); + kfree(enclosure_dev); + } +} + +static void +leapioraid_base_release_memory_pools(struct LEAPIORAID_ADAPTER *ioc) +{ + int i, j; + int dma_alloc_count = 0; + struct leapioraid_chain_tracker *ct; + int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1; + + dexitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + if (ioc->request) { + dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz, + ioc->request, ioc->request_dma); + dexitprintk(ioc, + pr_info("%s request_pool(0x%p): free\n", + ioc->name, ioc->request)); + ioc->request = NULL; + } + if (ioc->sense) { + dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma); + dma_pool_destroy(ioc->sense_dma_pool); + dexitprintk(ioc, pr_info("%s sense_pool(0x%p): free\n", + ioc->name, ioc->sense)); + ioc->sense = NULL; + } + if (ioc->reply) { + dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma); + dma_pool_destroy(ioc->reply_dma_pool); + dexitprintk(ioc, pr_info("%s reply_pool(0x%p): free\n", + ioc->name, ioc->reply)); + ioc->reply = NULL; + } + if (ioc->reply_free) { + dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free, + ioc->reply_free_dma); + dma_pool_destroy(ioc->reply_free_dma_pool); + dexitprintk(ioc, pr_info("%s reply_free_pool(0x%p): free\n", + ioc->name, ioc->reply_free)); + ioc->reply_free = NULL; + } + if (ioc->reply_post) { + dma_alloc_count = DIV_ROUND_UP(count, + LEAPIORAID_RDPQ_MAX_INDEX_IN_ONE_CHUNK); + for (i = 0; i < count; i++) { + if (i % LEAPIORAID_RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0 + && dma_alloc_count) { + if (ioc->reply_post[i].reply_post_free) { + dma_pool_free(ioc->reply_post_free_dma_pool, + ioc->reply_post[i].reply_post_free, + ioc->reply_post[i].reply_post_free_dma); + pr_err( + "%s reply_post_free_pool(0x%p): free\n", + ioc->name, + ioc->reply_post[i].reply_post_free); + ioc->reply_post[i].reply_post_free = + NULL; + } + --dma_alloc_count; + } + } + dma_pool_destroy(ioc->reply_post_free_dma_pool); + if (ioc->reply_post_free_array && ioc->rdpq_array_enable) { + dma_pool_free(ioc->reply_post_free_array_dma_pool, + ioc->reply_post_free_array, + ioc->reply_post_free_array_dma); + ioc->reply_post_free_array = NULL; + } + dma_pool_destroy(ioc->reply_post_free_array_dma_pool); + kfree(ioc->reply_post); + } + if (ioc->config_page) { + dexitprintk(ioc, pr_err( + "%s config_page(0x%p): free\n", ioc->name, + ioc->config_page)); + dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz, + ioc->config_page, ioc->config_page_dma); + } + kfree(ioc->hpr_lookup); + kfree(ioc->internal_lookup); + if (ioc->chain_lookup) { + for (i = 0; i < ioc->scsiio_depth; i++) { + for (j = ioc->chains_per_prp_buffer; + j < ioc->chains_needed_per_io; j++) { + ct = &ioc->chain_lookup[i].chains_per_smid[j]; + if (ct && ct->chain_buffer) + dma_pool_free(ioc->chain_dma_pool, + ct->chain_buffer, + ct->chain_buffer_dma); + } + kfree(ioc->chain_lookup[i].chains_per_smid); + } + dma_pool_destroy(ioc->chain_dma_pool); + kfree(ioc->chain_lookup); + ioc->chain_lookup = NULL; + } + kfree(ioc->io_queue_num); + ioc->io_queue_num = NULL; +} + +static int +leapioraid_check_same_4gb_region(dma_addr_t start_address, u32 pool_sz) +{ + dma_addr_t end_address; + + end_address = start_address + pool_sz - 1; + if (upper_32_bits(start_address) == upper_32_bits(end_address)) + return 1; + else + return 0; +} + +static inline int +leapioraid_base_reduce_hba_queue_depth(struct LEAPIORAID_ADAPTER *ioc) +{ + int reduce_sz = 64; + + if ((ioc->hba_queue_depth - reduce_sz) > + (ioc->internal_depth + LEAPIORAID_INTERNAL_SCSIIO_CMDS_COUNT)) { + ioc->hba_queue_depth -= reduce_sz; + return 0; + } else + return -ENOMEM; +} + +static int +leapioraid_base_allocate_reply_post_free_array(struct LEAPIORAID_ADAPTER *ioc, + int reply_post_free_array_sz) +{ + ioc->reply_post_free_array_dma_pool = + dma_pool_create("reply_post_free_array pool", + &ioc->pdev->dev, reply_post_free_array_sz, 16, 0); + if (!ioc->reply_post_free_array_dma_pool) { + dinitprintk(ioc, + pr_err + ("reply_post_free_array pool: dma_pool_create failed\n")); + return -ENOMEM; + } + ioc->reply_post_free_array = + dma_pool_alloc(ioc->reply_post_free_array_dma_pool, + GFP_KERNEL, &ioc->reply_post_free_array_dma); + if (!ioc->reply_post_free_array) { + dinitprintk(ioc, + pr_err + ("reply_post_free_array pool: dma_pool_alloc failed\n")); + return -EAGAIN; + } + if (!leapioraid_check_same_4gb_region(ioc->reply_post_free_array_dma, + reply_post_free_array_sz)) { + dinitprintk(ioc, pr_err( + "Bad Reply Free Pool! Reply Free (0x%p)\n\t\t" + "Reply Free dma = (0x%llx)\n", + ioc->reply_free, + (unsigned long long)ioc->reply_free_dma)); + ioc->use_32bit_dma = 1; + return -EAGAIN; + } + return 0; +} + +static int +base_alloc_rdpq_dma_pool(struct LEAPIORAID_ADAPTER *ioc, int sz) +{ + int i = 0; + u32 dma_alloc_count = 0; + int reply_post_free_sz = ioc->reply_post_queue_depth * + sizeof(struct LeapioraidDefaultRepDesc_t); + int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1; + + ioc->reply_post = + kcalloc(count, sizeof(struct leapioraid_reply_post_struct), GFP_KERNEL); + if (!ioc->reply_post) { + pr_err("%s reply_post_free pool: kcalloc failed\n", ioc->name); + return -ENOMEM; + } + dma_alloc_count = DIV_ROUND_UP( + count, LEAPIORAID_RDPQ_MAX_INDEX_IN_ONE_CHUNK); + ioc->reply_post_free_dma_pool = + dma_pool_create("reply_post_free pool", &ioc->pdev->dev, sz, 16, 0); + if (!ioc->reply_post_free_dma_pool) { + pr_err("reply_post_free pool: dma_pool_create failed\n"); + return -ENOMEM; + } + for (i = 0; i < count; i++) { + if ((i % LEAPIORAID_RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0) && dma_alloc_count) { + ioc->reply_post[i].reply_post_free = + dma_pool_zalloc(ioc->reply_post_free_dma_pool, + GFP_KERNEL, + &ioc->reply_post[i].reply_post_free_dma); + if (!ioc->reply_post[i].reply_post_free) { + pr_err("reply_post_free pool: dma_pool_alloc failed\n"); + return -EAGAIN; + } + if (!leapioraid_check_same_4gb_region + (ioc->reply_post[i].reply_post_free_dma, sz)) { + dinitprintk(ioc, pr_err( + "%s bad Replypost free pool(0x%p) dma = (0x%llx)\n", + ioc->name, + ioc->reply_post[i].reply_post_free, + (unsigned long long) + ioc->reply_post[i].reply_post_free_dma)); + ioc->use_32bit_dma = 1; + return -EAGAIN; + } + dma_alloc_count--; + } else { + ioc->reply_post[i].reply_post_free = + (union LeapioraidRepDescUnion_t *) + ((long)ioc->reply_post[i - 1].reply_post_free + + reply_post_free_sz); + ioc->reply_post[i].reply_post_free_dma = (dma_addr_t) + (ioc->reply_post[i - 1].reply_post_free_dma + + reply_post_free_sz); + } + } + return 0; +} + +static int +leapioraid_base_allocate_chain_dma_pool(struct LEAPIORAID_ADAPTER *ioc, int sz) +{ + int i = 0, j = 0; + struct leapioraid_chain_tracker *ctr; + + ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev, + ioc->chain_segment_sz, 16, 0); + if (!ioc->chain_dma_pool) { + pr_err("%s chain_dma_pool: dma_pool_create failed\n", ioc->name); + return -ENOMEM; + } + for (i = 0; i < ioc->scsiio_depth; i++) { + for (j = ioc->chains_per_prp_buffer; + j < ioc->chains_needed_per_io; j++) { + ctr = &ioc->chain_lookup[i].chains_per_smid[j]; + ctr->chain_buffer = dma_pool_alloc(ioc->chain_dma_pool, + GFP_KERNEL, + &ctr->chain_buffer_dma); + if (!ctr->chain_buffer) + return -EAGAIN; + if (!leapioraid_check_same_4gb_region + (ctr->chain_buffer_dma, ioc->chain_segment_sz)) { + pr_err( + "%s buffers not in same 4G! buff=(0x%p) dma=(0x%llx)\n", + ioc->name, + ctr->chain_buffer, + (unsigned long long)ctr->chain_buffer_dma); + ioc->use_32bit_dma = 1; + return -EAGAIN; + } + } + } + dinitprintk(ioc, pr_info( + "%s chain_lookup depth(%d), frame_size(%d), pool_size(%d kB)\n", + ioc->name, ioc->scsiio_depth, + ioc->chain_segment_sz, + ((ioc->scsiio_depth * + (ioc->chains_needed_per_io - + ioc->chains_per_prp_buffer) * + ioc->chain_segment_sz)) / 1024)); + return 0; +} + +static int +leapioraid_base_allocate_sense_dma_pool(struct LEAPIORAID_ADAPTER *ioc, int sz) +{ + ioc->sense_dma_pool = + dma_pool_create("sense pool", &ioc->pdev->dev, sz, 4, 0); + if (!ioc->sense_dma_pool) { + pr_err("%s sense pool: dma_pool_create failed\n", ioc->name); + return -ENOMEM; + } + ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, + GFP_KERNEL, &ioc->sense_dma); + if (!ioc->sense) { + pr_err("%s sense pool: dma_pool_alloc failed\n", ioc->name); + return -EAGAIN; + } + if (!leapioraid_check_same_4gb_region(ioc->sense_dma, sz)) { + dinitprintk(ioc, + pr_err("Bad Sense Pool! sense (0x%p) sense_dma = (0x%llx)\n", + ioc->sense, + (unsigned long long)ioc->sense_dma)); + ioc->use_32bit_dma = 1; + return -EAGAIN; + } + pr_err( + "%s sense pool(0x%p) - dma(0x%llx): depth(%d),\n\t\t" + "element_size(%d), pool_size (%d kB)\n", + ioc->name, + ioc->sense, + (unsigned long long)ioc->sense_dma, + ioc->scsiio_depth, + SCSI_SENSE_BUFFERSIZE, sz / 1024); + return 0; +} + +static int +leapioraid_base_allocate_reply_free_dma_pool(struct LEAPIORAID_ADAPTER *ioc, + int sz) +{ + ioc->reply_free_dma_pool = + dma_pool_create("reply_free pool", &ioc->pdev->dev, sz, 16, 0); + if (!ioc->reply_free_dma_pool) { + pr_err("%s reply_free pool: dma_pool_create failed\n", ioc->name); + return -ENOMEM; + } + ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool, + GFP_KERNEL, &ioc->reply_free_dma); + if (!ioc->reply_free) { + pr_err("%s reply_free pool: dma_pool_alloc failed\n", ioc->name); + return -EAGAIN; + } + if (!leapioraid_check_same_4gb_region(ioc->reply_free_dma, sz)) { + dinitprintk(ioc, pr_err( + "Bad Reply Free Pool! Reply Free (0x%p)\n\t\t" + "Reply Free dma = (0x%llx)\n", + ioc->reply_free, + (unsigned long long)ioc->reply_free_dma)); + ioc->use_32bit_dma = 1; + return -EAGAIN; + } + memset(ioc->reply_free, 0, sz); + dinitprintk(ioc, pr_info( + "%s reply_free pool(0x%p): depth(%d),\n\t\t" + "element_size(%d), pool_size(%d kB)\n", + ioc->name, + ioc->reply_free, + ioc->reply_free_queue_depth, 4, + sz / 1024)); + dinitprintk(ioc, + pr_info("%s reply_free_dma (0x%llx)\n", + ioc->name, (unsigned long long)ioc->reply_free_dma)); + return 0; +} + +static int +leapioraid_base_allocate_reply_pool(struct LEAPIORAID_ADAPTER *ioc, int sz) +{ + ioc->reply_dma_pool = dma_pool_create("reply pool", + &ioc->pdev->dev, sz, 4, 0); + if (!ioc->reply_dma_pool) { + pr_err("%s reply pool: dma_pool_create failed\n", ioc->name); + return -ENOMEM; + } + ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL, + &ioc->reply_dma); + if (!ioc->reply) { + pr_err("%s reply pool: dma_pool_alloc failed\n", ioc->name); + return -EAGAIN; + } + if (!leapioraid_check_same_4gb_region(ioc->reply_dma, sz)) { + dinitprintk(ioc, + pr_err("Bad Reply Pool! Reply (0x%p) Reply dma = (0x%llx)\n", + ioc->reply, + (unsigned long long)ioc->reply_dma)); + ioc->use_32bit_dma = 1; + return -EAGAIN; + } + ioc->reply_dma_min_address = (u32) (ioc->reply_dma); + ioc->reply_dma_max_address = (u32) (ioc->reply_dma) + sz; + pr_err( + "%s reply pool(0x%p) - dma(0x%llx): depth(%d)\n\t\t" + "frame_size(%d), pool_size(%d kB)\n", + ioc->name, + ioc->reply, + (unsigned long long)ioc->reply_dma, + ioc->reply_free_queue_depth, + ioc->reply_sz, + sz / 1024); + return 0; +} + +static int +leapioraid_base_allocate_memory_pools(struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_facts *facts; + u16 max_sge_elements; + u16 chains_needed_per_io; + u32 sz, total_sz, reply_post_free_sz, rc = 0; + u32 retry_sz; + u32 rdpq_sz = 0, sense_sz = 0, reply_post_free_array_sz = 0; + u16 max_request_credit; + unsigned short sg_tablesize; + u16 sge_size; + int i = 0; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + retry_sz = 0; + facts = &ioc->facts; + sg_tablesize = LEAPIORAID_SG_DEPTH; + if (reset_devices) + sg_tablesize = min_t(unsigned short, sg_tablesize, + LEAPIORAID_KDUMP_MIN_PHYS_SEGMENTS); + if (sg_tablesize < LEAPIORAID_MIN_PHYS_SEGMENTS) + sg_tablesize = LEAPIORAID_MIN_PHYS_SEGMENTS; + else if (sg_tablesize > LEAPIORAID_MAX_PHYS_SEGMENTS) { + sg_tablesize = min_t(unsigned short, sg_tablesize, + LEAPIORAID_MAX_SG_SEGMENTS); + pr_warn( + "%s sg_tablesize(%u) is bigger than kernel defined %s(%u)\n", + ioc->name, + sg_tablesize, LEAPIORAID_MAX_PHYS_SEGMENTS_STRING, + LEAPIORAID_MAX_PHYS_SEGMENTS); + } + ioc->shost->sg_tablesize = sg_tablesize; + ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)), + (facts->RequestCredit / 4)); + if (ioc->internal_depth < LEAPIORAID_INTERNAL_CMDS_COUNT) { + if (facts->RequestCredit <= (LEAPIORAID_INTERNAL_CMDS_COUNT + + LEAPIORAID_INTERNAL_SCSIIO_CMDS_COUNT)) { + pr_err( + "%s RequestCredits not enough, it has %d credits\n", + ioc->name, + facts->RequestCredit); + return -ENOMEM; + } + ioc->internal_depth = 10; + } + ioc->hi_priority_depth = ioc->internal_depth - (5); + if (reset_devices) + max_request_credit = min_t(u16, facts->RequestCredit, + (LEAPIORAID_KDUMP_SCSI_IO_DEPTH + + ioc->internal_depth)); + else + max_request_credit = min_t(u16, facts->RequestCredit, + LEAPIORAID_MAX_HBA_QUEUE_DEPTH); +retry: + ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth; + ioc->request_sz = facts->IOCRequestFrameSize * 4; + ioc->reply_sz = facts->ReplyFrameSize * 4; + if (facts->IOCMaxChainSegmentSize) + ioc->chain_segment_sz = + facts->IOCMaxChainSegmentSize * LEAPIORAID_MAX_CHAIN_ELEMT_SZ; + else + ioc->chain_segment_sz = + LEAPIORAID_DEFAULT_NUM_FWCHAIN_ELEMTS * LEAPIORAID_MAX_CHAIN_ELEMT_SZ; + sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee); +retry_allocation: + total_sz = 0; + max_sge_elements = + ioc->request_sz - + ((sizeof(struct LeapioraidSCSIIOReq_t) - + sizeof(union LEAPIORAID_IEEE_SGE_IO_UNION)) + 2 * sge_size); + ioc->max_sges_in_main_message = max_sge_elements / sge_size; + max_sge_elements = ioc->chain_segment_sz - sge_size; + ioc->max_sges_in_chain_message = max_sge_elements / sge_size; + chains_needed_per_io = ((ioc->shost->sg_tablesize - + ioc->max_sges_in_main_message) / + ioc->max_sges_in_chain_message) + + 1; + if (chains_needed_per_io > facts->MaxChainDepth) { + chains_needed_per_io = facts->MaxChainDepth; + ioc->shost->sg_tablesize = min_t(u16, + ioc->max_sges_in_main_message + + (ioc->max_sges_in_chain_message * + chains_needed_per_io), + ioc->shost->sg_tablesize); + } + ioc->chains_needed_per_io = chains_needed_per_io; + ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; + ioc->reply_post_queue_depth = ioc->hba_queue_depth + + ioc->reply_free_queue_depth + 1; + if (ioc->reply_post_queue_depth % 16) + ioc->reply_post_queue_depth += + 16 - (ioc->reply_post_queue_depth % 16); + if (ioc->reply_post_queue_depth > + facts->MaxReplyDescriptorPostQueueDepth) { + ioc->reply_post_queue_depth = + facts->MaxReplyDescriptorPostQueueDepth - + (facts->MaxReplyDescriptorPostQueueDepth % 16); + ioc->hba_queue_depth = + ((ioc->reply_post_queue_depth - 64) / 2) - 1; + ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; + } + pr_info( + "%s scatter gather: sge_in_main_msg(%d),\n\t\t" + "sge_per_chain(%d), sge_per_io(%d), chains_per_io(%d)\n", + ioc->name, + ioc->max_sges_in_main_message, + ioc->max_sges_in_chain_message, + ioc->shost->sg_tablesize, + ioc->chains_needed_per_io); + ioc->scsiio_depth = ioc->hba_queue_depth - + ioc->hi_priority_depth - ioc->internal_depth; + ioc->shost->can_queue = + ioc->scsiio_depth - LEAPIORAID_INTERNAL_SCSIIO_CMDS_COUNT; + dinitprintk(ioc, pr_info("%s scsi host: can_queue depth (%d)\n", ioc->name, + ioc->shost->can_queue)); + sz = ((ioc->scsiio_depth + 1) * ioc->request_sz); + sz += (ioc->hi_priority_depth * ioc->request_sz); + sz += (ioc->internal_depth * ioc->request_sz); + ioc->request_dma_sz = sz; + ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz, + &ioc->request_dma, GFP_KERNEL); + if (!ioc->request) { + if (ioc->scsiio_depth < LEAPIORAID_SAS_QUEUE_DEPTH) { + rc = -ENOMEM; + goto out; + } + retry_sz = 64; + if ((ioc->hba_queue_depth - retry_sz) > + (ioc->internal_depth + LEAPIORAID_INTERNAL_SCSIIO_CMDS_COUNT)) { + ioc->hba_queue_depth -= retry_sz; + goto retry_allocation; + } else { + rc = -ENOMEM; + goto out; + } + } + memset(ioc->request, 0, sz); + if (retry_sz) + pr_err( + "%s request pool: dma_alloc_consistent succeed:\n\t\t" + "hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n", + ioc->name, + ioc->hba_queue_depth, + ioc->chains_needed_per_io, + ioc->request_sz, + sz / 1024); + ioc->hi_priority = + ioc->request + ((ioc->scsiio_depth + 1) * ioc->request_sz); + ioc->hi_priority_dma = + ioc->request_dma + ((ioc->scsiio_depth + 1) * ioc->request_sz); + ioc->internal = + ioc->hi_priority + (ioc->hi_priority_depth * ioc->request_sz); + ioc->internal_dma = + ioc->hi_priority_dma + (ioc->hi_priority_depth * ioc->request_sz); + pr_info( + "%s request pool(0x%p) - dma(0x%llx):\n\t\t" + "depth(%d), frame_size(%d), pool_size(%d kB)\n", + ioc->name, + ioc->request, + (unsigned long long)ioc->request_dma, + ioc->hba_queue_depth, + ioc->request_sz, + (ioc->hba_queue_depth * ioc->request_sz) / 1024); + total_sz += sz; + ioc->io_queue_num = kcalloc(ioc->scsiio_depth, sizeof(u16), GFP_KERNEL); + if (!ioc->io_queue_num) { + rc = -ENOMEM; + goto out; + } + dinitprintk(ioc, pr_info("%s scsiio(0x%p): depth(%d)\n", + ioc->name, ioc->request, ioc->scsiio_depth)); + ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth, + sizeof(struct leapioraid_request_tracker), GFP_KERNEL); + if (!ioc->hpr_lookup) { + rc = -ENOMEM; + goto out; + } + ioc->hi_priority_smid = ioc->scsiio_depth + 1; + dinitprintk(ioc, pr_info( + "%s hi_priority(0x%p): depth(%d), start smid(%d)\n", + ioc->name, ioc->hi_priority, ioc->hi_priority_depth, + ioc->hi_priority_smid)); + ioc->internal_lookup = + kcalloc(ioc->internal_depth, sizeof(struct leapioraid_request_tracker), + GFP_KERNEL); + if (!ioc->internal_lookup) { + pr_err("%s internal_lookup: kcalloc failed\n", + ioc->name); + rc = -ENOMEM; + goto out; + } + ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth; + dinitprintk(ioc, pr_info( + "%s internal(0x%p): depth(%d), start smid(%d)\n", + ioc->name, ioc->internal, ioc->internal_depth, + ioc->internal_smid)); + sz = ioc->scsiio_depth * sizeof(struct leapioraid_chain_lookup); + ioc->chain_lookup = kzalloc(sz, GFP_KERNEL); + if (!ioc->chain_lookup) { + if ((max_request_credit - 64) > + (ioc->internal_depth + LEAPIORAID_INTERNAL_SCSIIO_CMDS_COUNT)) { + max_request_credit -= 64; + leapioraid_base_release_memory_pools(ioc); + goto retry; + } else { + pr_err( + "%s chain_lookup: __get_free_pages failed\n", + ioc->name); + rc = -ENOMEM; + goto out; + } + } + sz = ioc->chains_needed_per_io * sizeof(struct leapioraid_chain_tracker); + for (i = 0; i < ioc->scsiio_depth; i++) { + ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL); + if (!ioc->chain_lookup[i].chains_per_smid) { + if ((max_request_credit - 64) > + (ioc->internal_depth + + LEAPIORAID_INTERNAL_SCSIIO_CMDS_COUNT)) { + max_request_credit -= 64; + leapioraid_base_release_memory_pools(ioc); + goto retry; + } else { + pr_err("%s chain_lookup: kzalloc failed\n", ioc->name); + rc = -ENOMEM; + goto out; + } + } + } + ioc->chains_per_prp_buffer = 0; + rc = leapioraid_base_allocate_chain_dma_pool(ioc, ioc->chain_segment_sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) { + if (ioc->use_32bit_dma && ioc->dma_mask > 32) + goto try_32bit_dma; + else { + if ((max_request_credit - 64) > + (ioc->internal_depth + + LEAPIORAID_INTERNAL_SCSIIO_CMDS_COUNT)) { + max_request_credit -= 64; + leapioraid_base_release_memory_pools(ioc); + goto retry_allocation; + } else { + pr_err("%s chain_lookup: dma_pool_alloc failed\n", ioc->name); + return -ENOMEM; + } + } + } + total_sz += ioc->chain_segment_sz * + ((ioc->chains_needed_per_io - ioc->chains_per_prp_buffer) * + ioc->scsiio_depth); + sense_sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE; + rc = leapioraid_base_allocate_sense_dma_pool(ioc, sense_sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; + total_sz += sense_sz; + sz = ioc->reply_free_queue_depth * ioc->reply_sz; + rc = leapioraid_base_allocate_reply_pool(ioc, sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; + total_sz += sz; + sz = ioc->reply_free_queue_depth * 4; + rc = leapioraid_base_allocate_reply_free_dma_pool(ioc, sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; + total_sz += sz; + reply_post_free_sz = ioc->reply_post_queue_depth * + sizeof(struct LeapioraidDefaultRepDesc_t); + rdpq_sz = reply_post_free_sz * LEAPIORAID_RDPQ_MAX_INDEX_IN_ONE_CHUNK; + if ((leapioraid_base_is_controller_msix_enabled(ioc) + && !ioc->rdpq_array_enable) + || (ioc->reply_queue_count < LEAPIORAID_RDPQ_MAX_INDEX_IN_ONE_CHUNK)) + rdpq_sz = reply_post_free_sz * ioc->reply_queue_count; + rc = base_alloc_rdpq_dma_pool(ioc, rdpq_sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; + else { + if (ioc->rdpq_array_enable && rc == 0) { + reply_post_free_array_sz = ioc->reply_queue_count * + sizeof(struct LeapioraidIOCInitRDPQArrayEntry); + rc = leapioraid_base_allocate_reply_post_free_array( + ioc, reply_post_free_array_sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; + } + } + total_sz += rdpq_sz; + ioc->config_page_sz = 512; + ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev, + ioc->config_page_sz, + &ioc->config_page_dma, + GFP_KERNEL); + if (!ioc->config_page) { + pr_err("%s config page: dma_pool_alloc failed\n", ioc->name); + rc = -ENOMEM; + goto out; + } + pr_err("%s config page(0x%p) - dma(0x%llx): size(%d)\n", + ioc->name, ioc->config_page, + (unsigned long long)ioc->config_page_dma, + ioc->config_page_sz); + total_sz += ioc->config_page_sz; + pr_info("%s Allocated physical memory: size(%d kB)\n", + ioc->name, total_sz / 1024); + pr_info( + "%s Current IOC Queue Depth(%d), Max Queue Depth(%d)\n", + ioc->name, + ioc->shost->can_queue, + facts->RequestCredit); + return 0; +try_32bit_dma: + leapioraid_base_release_memory_pools(ioc); + if (ioc->use_32bit_dma && (ioc->dma_mask > 32)) { + if (leapioraid_base_config_dma_addressing(ioc, ioc->pdev) != 0) { + pr_err("Setting 32 bit coherent DMA mask Failed %s\n", + pci_name(ioc->pdev)); + return -ENODEV; + } + } else if (leapioraid_base_reduce_hba_queue_depth(ioc) != 0) + return -ENOMEM; + goto retry_allocation; +out: + return rc; +} + +static void +leapioraid_base_flush_ios_and_panic( + struct LEAPIORAID_ADAPTER *ioc, u16 fault_code) +{ + ioc->adapter_over_temp = 1; + leapioraid_base_stop_smart_polling(ioc); + leapioraid_base_stop_watchdog(ioc); + leapioraid_base_stop_hba_unplug_watchdog(ioc); + leapioraid_base_pause_mq_polling(ioc); + leapioraid_scsihost_flush_running_cmds(ioc); + leapioraid_print_fault_code(ioc, fault_code); +} + +u32 +leapioraid_base_get_iocstate(struct LEAPIORAID_ADAPTER *ioc, int cooked) +{ + u32 s, sc; + + s = ioc->base_readl( + &ioc->chip->Doorbell, LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY); + sc = s & LEAPIORAID_IOC_STATE_MASK; + if (sc != LEAPIORAID_IOC_STATE_MASK) { + if ((sc == LEAPIORAID_IOC_STATE_FAULT) && + ((s & LEAPIORAID_DOORBELL_DATA_MASK) == + LEAPIORAID_IFAULT_IOP_OVER_TEMP_THRESHOLD_EXCEEDED)) { + leapioraid_base_flush_ios_and_panic(ioc, + s & + LEAPIORAID_DOORBELL_DATA_MASK); + panic("TEMPERATURE FAULT: STOPPING; panic in %s\n", + __func__); + } + } + return cooked ? sc : s; +} + +static int +leapioraid_base_send_ioc_reset( + struct LEAPIORAID_ADAPTER *ioc, u8 reset_type, int timeout) +{ + u32 ioc_state; + int r = 0; + unsigned long flags; + + if (reset_type != LEAPIORAID_FUNC_IOC_MESSAGE_UNIT_RESET) { + pr_err("%s %s: unknown reset_type\n", + ioc->name, __func__); + return -EFAULT; + } + if (!(ioc->facts.IOCCapabilities & + LEAPIORAID_IOCFACTS_CAPABILITY_EVENT_REPLAY)) + return -EFAULT; + pr_info("%s sending message unit reset !!\n", + ioc->name); + writel(reset_type << LEAPIORAID_DOORBELL_FUNCTION_SHIFT, + &ioc->chip->Doorbell); + if ((leapioraid_base_wait_for_doorbell_ack(ioc, 15))) + r = -EFAULT; + ioc_state = leapioraid_base_get_iocstate(ioc, 0); + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_COREDUMP + && (ioc->is_driver_loading == 1 + || ioc->fault_reset_work_q == NULL)) { + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + leapioraid_base_coredump_info(ioc, ioc_state); + leapioraid_base_wait_for_coredump_completion(ioc, __func__); + r = -EFAULT; + goto out; + } + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + if (r != 0) + goto out; + ioc_state = + leapioraid_base_wait_on_iocstate(ioc, LEAPIORAID_IOC_STATE_READY, + timeout); + if (ioc_state) { + pr_err("%s %s: failed going to ready state (ioc_state=0x%x)\n", + ioc->name, __func__, ioc_state); + r = -EFAULT; + goto out; + } +out: + pr_info("%s message unit reset: %s\n", + ioc->name, ((r == 0) ? "SUCCESS" : "FAILED")); + return r; +} + +int +leapioraid_wait_for_ioc_to_operational(struct LEAPIORAID_ADAPTER *ioc, + int wait_count) +{ + int wait_state_count = 0; + u32 ioc_state; + + if (leapioraid_base_pci_device_is_unplugged(ioc)) + return -EFAULT; + ioc_state = leapioraid_base_get_iocstate(ioc, 1); + while (ioc_state != LEAPIORAID_IOC_STATE_OPERATIONAL) { + if (leapioraid_base_pci_device_is_unplugged(ioc)) + return -EFAULT; + if (ioc->is_driver_loading) + return -ETIME; + if (wait_state_count++ == wait_count) { + pr_err( + "%s %s: failed due to ioc not operational\n", + ioc->name, __func__); + return -EFAULT; + } + ssleep(1); + ioc_state = leapioraid_base_get_iocstate(ioc, 1); + pr_info("%s %s: waiting for operational state(count=%d)\n", + ioc->name, __func__, wait_state_count); + } + if (wait_state_count) + pr_info("%s %s: ioc is operational\n", + ioc->name, __func__); + return 0; +} + +int +leapioraid_base_sas_iounit_control(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidSasIoUnitControlRep_t *mpi_reply, + struct LeapioraidSasIoUnitControlReq_t *mpi_request) +{ + u16 smid; + u8 issue_reset; + int rc; + void *request; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + mutex_lock(&ioc->base_cmds.mutex); + if (ioc->base_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: base_cmd in use\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + rc = leapioraid_wait_for_ioc_to_operational(ioc, 10); + if (rc) + goto out; + smid = leapioraid_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + rc = 0; + ioc->base_cmds.status = LEAPIORAID_CMD_PENDING; + request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->base_cmds.smid = smid; + memcpy(request, mpi_request, sizeof(struct LeapioraidSasIoUnitControlReq_t)); + if (mpi_request->Operation == LEAPIORAID_SAS_OP_PHY_HARD_RESET || + mpi_request->Operation == LEAPIORAID_SAS_OP_PHY_LINK_RESET) + ioc->ioc_link_reset_in_progress = 1; + init_completion(&ioc->base_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->base_cmds.done, + msecs_to_jiffies(10000)); + if ((mpi_request->Operation == LEAPIORAID_SAS_OP_PHY_HARD_RESET || + mpi_request->Operation == LEAPIORAID_SAS_OP_PHY_LINK_RESET) && + ioc->ioc_link_reset_in_progress) + ioc->ioc_link_reset_in_progress = 0; + if (!(ioc->base_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + leapioraid_check_cmd_timeout(ioc, + ioc->base_cmds.status, mpi_request, + sizeof + (struct LeapioraidSasIoUnitControlReq_t) + / 4, issue_reset); + goto issue_host_reset; + } + if (ioc->base_cmds.status & LEAPIORAID_CMD_REPLY_VALID) + memcpy(mpi_reply, ioc->base_cmds.reply, + sizeof(struct LeapioraidSasIoUnitControlRep_t)); + else + memset(mpi_reply, 0, sizeof(struct LeapioraidSasIoUnitControlRep_t)); + ioc->base_cmds.status = LEAPIORAID_CMD_NOT_USED; + goto out; +issue_host_reset: + if (issue_reset) + leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + ioc->base_cmds.status = LEAPIORAID_CMD_NOT_USED; + rc = -EFAULT; +out: + mutex_unlock(&ioc->base_cmds.mutex); + return rc; +} + +int +leapioraid_base_scsi_enclosure_processor(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidSepRep_t *mpi_reply, + struct LeapioraidSepReq_t *mpi_request) +{ + u16 smid; + u8 issue_reset; + int rc; + void *request; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + mutex_lock(&ioc->base_cmds.mutex); + if (ioc->base_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: base_cmd in use\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + rc = leapioraid_wait_for_ioc_to_operational(ioc, 10); + if (rc) + goto out; + smid = leapioraid_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + rc = 0; + ioc->base_cmds.status = LEAPIORAID_CMD_PENDING; + request = leapioraid_base_get_msg_frame(ioc, smid); + memset(request, 0, ioc->request_sz); + ioc->base_cmds.smid = smid; + memcpy(request, mpi_request, sizeof(struct LeapioraidSepReq_t)); + init_completion(&ioc->base_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->base_cmds.done, + msecs_to_jiffies(10000)); + if (!(ioc->base_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + leapioraid_check_cmd_timeout(ioc, + ioc->base_cmds.status, mpi_request, + sizeof(struct LeapioraidSepReq_t) / 4, + issue_reset); + goto issue_host_reset; + } + if (ioc->base_cmds.status & LEAPIORAID_CMD_REPLY_VALID) + memcpy(mpi_reply, ioc->base_cmds.reply, + sizeof(struct LeapioraidSepRep_t)); + else + memset(mpi_reply, 0, sizeof(struct LeapioraidSepRep_t)); + ioc->base_cmds.status = LEAPIORAID_CMD_NOT_USED; + goto out; +issue_host_reset: + if (issue_reset) + leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + ioc->base_cmds.status = LEAPIORAID_CMD_NOT_USED; + rc = -EFAULT; +out: + mutex_unlock(&ioc->base_cmds.mutex); + return rc; +} + +static int +leapioraid_base_get_port_facts(struct LEAPIORAID_ADAPTER *ioc, int port) +{ + struct LeapioraidPortFactsReq_t mpi_request; + struct LeapioraidPortFactsRep_t mpi_reply; + struct leapioraid_port_facts *pfacts; + int mpi_reply_sz, mpi_request_sz, r; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + mpi_reply_sz = sizeof(struct LeapioraidPortFactsRep_t); + mpi_request_sz = sizeof(struct LeapioraidPortFactsReq_t); + memset(&mpi_request, 0, mpi_request_sz); + mpi_request.Function = LEAPIORAID_FUNC_PORT_FACTS; + mpi_request.PortNumber = port; + r = leapioraid_base_handshake_req_reply_wait(ioc, mpi_request_sz, + (u32 *) &mpi_request, + mpi_reply_sz, + (u16 *) &mpi_reply, 5); + if (r != 0) { + pr_err("%s %s: handshake failed (r=%d)\n", + ioc->name, __func__, r); + return r; + } + pfacts = &ioc->pfacts[port]; + memset(pfacts, 0, sizeof(struct leapioraid_port_facts)); + pfacts->PortNumber = mpi_reply.PortNumber; + pfacts->VP_ID = mpi_reply.VP_ID; + pfacts->VF_ID = mpi_reply.VF_ID; + pfacts->MaxPostedCmdBuffers = + le16_to_cpu(mpi_reply.MaxPostedCmdBuffers); + return 0; +} + +static int +leapioraid_base_send_ioc_init(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidIOCInitReq_t mpi_request; + struct LeapioraidIOCInitRep_t mpi_reply; + int i, r = 0; + ktime_t current_time; + u16 ioc_status; + u32 reply_post_free_ary_sz; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + memset(&mpi_request, 0, sizeof(struct LeapioraidIOCInitReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_IOC_INIT; + mpi_request.WhoInit = LEAPIORAID_WHOINIT_HOST_DRIVER; + mpi_request.VF_ID = 0; + mpi_request.VP_ID = 0; + mpi_request.MsgVersion = cpu_to_le16(0x0206); + mpi_request.HeaderVersion = cpu_to_le16(0x3A00); + mpi_request.HostPageSize = 12; + if (leapioraid_base_is_controller_msix_enabled(ioc)) + mpi_request.HostMSIxVectors = ioc->reply_queue_count; + mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz / 4); + mpi_request.ReplyDescriptorPostQueueDepth = + cpu_to_le16(ioc->reply_post_queue_depth); + mpi_request.ReplyFreeQueueDepth = + cpu_to_le16(ioc->reply_free_queue_depth); + mpi_request.SenseBufferAddressHigh = + cpu_to_le32((u64) ioc->sense_dma >> 32); + mpi_request.SystemReplyAddressHigh = + cpu_to_le32((u64) ioc->reply_dma >> 32); + mpi_request.SystemRequestFrameBaseAddress = + cpu_to_le64((u64) ioc->request_dma); + mpi_request.ReplyFreeQueueAddress = + cpu_to_le64((u64) ioc->reply_free_dma); + if (ioc->rdpq_array_enable) { + reply_post_free_ary_sz = ioc->reply_queue_count * + sizeof(struct LeapioraidIOCInitRDPQArrayEntry); + memset(ioc->reply_post_free_array, 0, reply_post_free_ary_sz); + for (i = 0; i < ioc->reply_queue_count; i++) + ioc->reply_post_free_array[i].RDPQBaseAddress = + cpu_to_le64((u64) ioc->reply_post[i].reply_post_free_dma); + mpi_request.MsgFlags = LEAPIORAID_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE; + mpi_request.ReplyDescriptorPostQueueAddress = + cpu_to_le64((u64) ioc->reply_post_free_array_dma); + } else { + mpi_request.ReplyDescriptorPostQueueAddress = + cpu_to_le64((u64) ioc->reply_post[0].reply_post_free_dma); + } + mpi_request.ConfigurationFlags |= 0x0002; + current_time = ktime_get_real(); + mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time)); + if (ioc->logging_level & LEAPIORAID_DEBUG_INIT) { + __le32 *mfp; + int i; + + mfp = (__le32 *) &mpi_request; + pr_info("%s \toffset:data\n", ioc->name); + for (i = 0; i < sizeof(struct LeapioraidIOCInitReq_t) / 4; i++) + pr_info("%s \t[0x%02x]:%08x\n", + ioc->name, i * 4, le32_to_cpu(mfp[i])); + } + r = leapioraid_base_handshake_req_reply_wait(ioc, + sizeof + (struct LeapioraidIOCInitReq_t), + (u32 *) &mpi_request, + sizeof + (struct LeapioraidIOCInitRep_t), + (u16 *) &mpi_reply, 30); + if (r != 0) { + pr_err("%s %s: handshake failed (r=%d)\n", + ioc->name, __func__, r); + return r; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS || mpi_reply.IOCLogInfo) { + pr_err("%s %s: failed\n", ioc->name, + __func__); + r = -EIO; + } + ioc->timestamp_update_count = 0; + return r; +} + +int +leapioraid_base_trace_log_init(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidIOCLogReq_t mpi_request; + struct LeapioraidIOCLogRep_t mpi_reply; + u16 ioc_status; + u32 r; + + dinitprintk(ioc, + pr_info("%s %s\n", ioc->name, __func__)); + if (ioc->log_buffer == NULL) { + ioc->log_buffer = + dma_alloc_coherent(&ioc->pdev->dev, SYS_LOG_BUF_SIZE, + &ioc->log_buffer_dma, GFP_KERNEL); + } + memset(&mpi_request, 0, sizeof(struct LeapioraidIOCLogReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_LOG_INIT; + mpi_request.BufAddr = ioc->log_buffer_dma; + mpi_request.BufSize = SYS_LOG_BUF_SIZE; + r = leapioraid_base_handshake_req_reply_wait(ioc, + sizeof + (struct LeapioraidIOCLogReq_t), + (u32 *) &mpi_request, + sizeof + (struct LeapioraidIOCLogRep_t), + (u16 *) &mpi_reply, 30); + if (r != 0) { + pr_err("%s %s: handshake failed (r=%d)\n", + ioc->name, __func__, r); + return r; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS || mpi_reply.IOCLogInfo) { + pr_err("%s %s: failed\n", ioc->name, + __func__); + r = -EIO; + } + return r; +} + +static int +leapioraid_base_trace_log_exit(struct LEAPIORAID_ADAPTER *ioc) +{ + if (ioc->log_buffer) + dma_free_coherent(&ioc->pdev->dev, SYS_LOG_BUF_SIZE, + ioc->log_buffer, ioc->log_buffer_dma); + return 0; +} + +u8 +leapioraid_port_enable_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply) +{ + struct LeapioraidDefaultRep_t *mpi_reply; + u16 ioc_status; + + if (ioc->port_enable_cmds.status == LEAPIORAID_CMD_NOT_USED) + return 1; + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (!mpi_reply) + return 1; + if (mpi_reply->Function != LEAPIORAID_FUNC_PORT_ENABLE) + return 1; + ioc->port_enable_cmds.status &= ~LEAPIORAID_CMD_PENDING; + ioc->port_enable_cmds.status |= LEAPIORAID_CMD_COMPLETE; + ioc->port_enable_cmds.status |= LEAPIORAID_CMD_REPLY_VALID; + memcpy(ioc->port_enable_cmds.reply, mpi_reply, + mpi_reply->MsgLength * 4); + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) + ioc->port_enable_failed = 1; + if (ioc->port_enable_cmds.status & LEAPIORAID_CMD_COMPLETE_ASYNC) { + ioc->port_enable_cmds.status &= ~LEAPIORAID_CMD_COMPLETE_ASYNC; + if (ioc_status == LEAPIORAID_IOCSTATUS_SUCCESS) { + leapioraid_port_enable_complete(ioc); + return 1; + } + + ioc->start_scan_failed = ioc_status; + ioc->start_scan = 0; + return 1; + } + complete(&ioc->port_enable_cmds.done); + return 1; +} + +static int +leapioraid_base_send_port_enable(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidPortEnableReq_t *mpi_request; + struct LeapioraidPortEnableRep_t *mpi_reply; + int r = 0; + u16 smid; + u16 ioc_status; + + pr_info("%s sending port enable !!\n", ioc->name); + if (ioc->port_enable_cmds.status & LEAPIORAID_CMD_PENDING) { + pr_err( + "%s %s: internal command already in use\n", ioc->name, + __func__); + return -EAGAIN; + } + smid = leapioraid_base_get_smid(ioc, ioc->port_enable_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + return -EAGAIN; + } + ioc->port_enable_cmds.status = LEAPIORAID_CMD_PENDING; + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->port_enable_cmds.smid = smid; + memset(mpi_request, 0, sizeof(struct LeapioraidPortEnableReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_PORT_ENABLE; + init_completion(&ioc->port_enable_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300 * HZ); + if (!(ioc->port_enable_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + pr_err("%s %s: timeout\n", + ioc->name, __func__); + leapioraid_debug_dump_mf(mpi_request, + sizeof(struct LeapioraidPortEnableReq_t) / 4); + if (ioc->port_enable_cmds.status & LEAPIORAID_CMD_RESET) + r = -EFAULT; + else + r = -ETIME; + goto out; + } + mpi_reply = ioc->port_enable_cmds.reply; + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err( + "%s %s: failed with (ioc_status=0x%08x)\n", ioc->name, + __func__, ioc_status); + r = -EFAULT; + goto out; + } +out: + ioc->port_enable_cmds.status = LEAPIORAID_CMD_NOT_USED; + pr_info("%s port enable: %s\n", ioc->name, ((r == 0) ? + "SUCCESS" + : + "FAILED")); + return r; +} + +int +leapioraid_port_enable(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidPortEnableReq_t *mpi_request; + u16 smid; + + pr_info("%s sending port enable !!\n", ioc->name); + if (ioc->port_enable_cmds.status & LEAPIORAID_CMD_PENDING) { + pr_err( + "%s %s: internal command already in use\n", ioc->name, + __func__); + return -EAGAIN; + } + smid = leapioraid_base_get_smid(ioc, ioc->port_enable_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + return -EAGAIN; + } + ioc->drv_internal_flags |= LEAPIORAID_DRV_INERNAL_FIRST_PE_ISSUED; + ioc->port_enable_cmds.status = LEAPIORAID_CMD_PENDING; + ioc->port_enable_cmds.status |= LEAPIORAID_CMD_COMPLETE_ASYNC; + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->port_enable_cmds.smid = smid; + memset(mpi_request, 0, sizeof(struct LeapioraidPortEnableReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_PORT_ENABLE; + ioc->put_smid_default(ioc, smid); + return 0; +} + +static int +leapioraid_base_determine_wait_on_discovery(struct LEAPIORAID_ADAPTER *ioc) +{ + if (ioc->ir_firmware) + return 1; + if (!ioc->bios_pg3.BiosVersion) + return 0; + if ((ioc->bios_pg2.CurrentBootDeviceForm & + LEAPIORAID_BIOSPAGE2_FORM_MASK) == + LEAPIORAID_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED && + (ioc->bios_pg2.ReqBootDeviceForm & + LEAPIORAID_BIOSPAGE2_FORM_MASK) == + LEAPIORAID_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED && + (ioc->bios_pg2.ReqAltBootDeviceForm & + LEAPIORAID_BIOSPAGE2_FORM_MASK) == + LEAPIORAID_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED) + return 0; + return 1; +} + +static void +leapioraid_base_unmask_events(struct LEAPIORAID_ADAPTER *ioc, u16 event) +{ + u32 desired_event; + + if (event >= 128) + return; + desired_event = (1 << (event % 32)); + if (event < 32) + ioc->event_masks[0] &= ~desired_event; + else if (event < 64) + ioc->event_masks[1] &= ~desired_event; + else if (event < 96) + ioc->event_masks[2] &= ~desired_event; + else if (event < 128) + ioc->event_masks[3] &= ~desired_event; +} + +static int +leapioraid_base_event_notification(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidEventNotificationReq_t *mpi_request; + u16 smid; + int r = 0; + int i, issue_diag_reset = 0; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + if (ioc->base_cmds.status & LEAPIORAID_CMD_PENDING) { + pr_err( + "%s %s: internal command already in use\n", ioc->name, + __func__); + return -EAGAIN; + } + smid = leapioraid_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + return -EAGAIN; + } + ioc->base_cmds.status = LEAPIORAID_CMD_PENDING; + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->base_cmds.smid = smid; + memset(mpi_request, 0, sizeof(struct LeapioraidEventNotificationReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_EVENT_NOTIFICATION; + mpi_request->VF_ID = 0; + mpi_request->VP_ID = 0; + for (i = 0; i < LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS; i++) + mpi_request->EventMasks[i] = cpu_to_le32(ioc->event_masks[i]); + init_completion(&ioc->base_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->base_cmds.done, 30 * HZ); + if (!(ioc->base_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + pr_err("%s %s: timeout\n", + ioc->name, __func__); + leapioraid_debug_dump_mf(mpi_request, + sizeof(struct LeapioraidEventNotificationReq_t) / 4); + if (ioc->base_cmds.status & LEAPIORAID_CMD_RESET) + r = -EFAULT; + else + issue_diag_reset = 1; + } else + dinitprintk(ioc, pr_info("%s %s: complete\n", + ioc->name, __func__)); + ioc->base_cmds.status = LEAPIORAID_CMD_NOT_USED; + if (issue_diag_reset) { + if (ioc->drv_internal_flags & LEAPIORAID_DRV_INERNAL_FIRST_PE_ISSUED) + return -EFAULT; + if (leapioraid_base_check_for_fault_and_issue_reset(ioc)) + return -EFAULT; + r = -EAGAIN; + } + return r; +} + +void +leapioraid_base_validate_event_type(struct LEAPIORAID_ADAPTER *ioc, + u32 *event_type) +{ + int i, j; + u32 event_mask, desired_event; + u8 send_update_to_fw; + + for (i = 0, send_update_to_fw = 0; i < + LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS; i++) { + event_mask = ~event_type[i]; + desired_event = 1; + for (j = 0; j < 32; j++) { + if (!(event_mask & desired_event) && + (ioc->event_masks[i] & desired_event)) { + ioc->event_masks[i] &= ~desired_event; + send_update_to_fw = 1; + } + desired_event = (desired_event << 1); + } + } + if (!send_update_to_fw) + return; + mutex_lock(&ioc->base_cmds.mutex); + leapioraid_base_event_notification(ioc); + mutex_unlock(&ioc->base_cmds.mutex); +} + +int +leapioraid_base_make_ioc_ready(struct LEAPIORAID_ADAPTER *ioc, + enum reset_type type) +{ + u32 ioc_state; + int rc; + int count; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + if (!leapioraid_base_pci_device_is_available(ioc)) + return 0; + ioc_state = leapioraid_base_get_iocstate(ioc, 0); + dhsprintk(ioc, pr_info("%s %s: ioc_state(0x%08x)\n", + ioc->name, __func__, ioc_state)); + count = 0; + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_RESET) { + while ((ioc_state & LEAPIORAID_IOC_STATE_MASK) != + LEAPIORAID_IOC_STATE_READY) { + if (count++ == 10) { + pr_err( + "%s %s: failed going to ready state (ioc_state=0x%x)\n", + ioc->name, __func__, ioc_state); + return -EFAULT; + } + ssleep(1); + ioc_state = leapioraid_base_get_iocstate(ioc, 0); + } + } + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_READY) + return 0; + if (ioc_state & LEAPIORAID_DOORBELL_USED) { + pr_info("%s unexpected doorbell active!\n", + ioc->name); + goto issue_diag_reset; + } + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_FAULT) { + leapioraid_print_fault_code(ioc, ioc_state & + LEAPIORAID_DOORBELL_DATA_MASK); + goto issue_diag_reset; + } + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_COREDUMP) { + if (ioc->ioc_coredump_loop != 0xFF) { + leapioraid_base_coredump_info(ioc, ioc_state & + LEAPIORAID_DOORBELL_DATA_MASK); + leapioraid_base_wait_for_coredump_completion(ioc, + __func__); + } + goto issue_diag_reset; + } + if (type == FORCE_BIG_HAMMER) + goto issue_diag_reset; + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_OPERATIONAL) + if (! + (leapioraid_base_send_ioc_reset + (ioc, LEAPIORAID_FUNC_IOC_MESSAGE_UNIT_RESET, 15))) { + return 0; + } +issue_diag_reset: + rc = leapioraid_base_diag_reset(ioc); + return rc; +} + +static int +leapioraid_base_make_ioc_operational(struct LEAPIORAID_ADAPTER *ioc) +{ + int r, rc, i, index; + unsigned long flags; + u32 reply_address; + u16 smid; + struct leapioraid_tr_list *delayed_tr, *delayed_tr_next; + struct leapioraid_sc_list *delayed_sc, *delayed_sc_next; + struct leapioraid_event_ack_list *delayed_event_ack, *delayed_event_ack_next; + struct leapioraid_adapter_reply_queue *reply_q; + union LeapioraidRepDescUnion_t *reply_post_free_contig; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + list_for_each_entry_safe(delayed_tr, delayed_tr_next, + &ioc->delayed_tr_list, list) { + list_del(&delayed_tr->list); + kfree(delayed_tr); + } + list_for_each_entry_safe(delayed_tr, delayed_tr_next, + &ioc->delayed_tr_volume_list, list) { + list_del(&delayed_tr->list); + kfree(delayed_tr); + } + list_for_each_entry_safe(delayed_tr, delayed_tr_next, + &ioc->delayed_internal_tm_list, list) { + list_del(&delayed_tr->list); + kfree(delayed_tr); + } + list_for_each_entry_safe(delayed_sc, delayed_sc_next, + &ioc->delayed_sc_list, list) { + list_del(&delayed_sc->list); + kfree(delayed_sc); + } + list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next, + &ioc->delayed_event_ack_list, list) { + list_del(&delayed_event_ack->list); + kfree(delayed_event_ack); + } + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + INIT_LIST_HEAD(&ioc->hpr_free_list); + smid = ioc->hi_priority_smid; + for (i = 0; i < ioc->hi_priority_depth; i++, smid++) { + ioc->hpr_lookup[i].cb_idx = 0xFF; + ioc->hpr_lookup[i].smid = smid; + list_add_tail(&ioc->hpr_lookup[i].tracker_list, + &ioc->hpr_free_list); + } + INIT_LIST_HEAD(&ioc->internal_free_list); + smid = ioc->internal_smid; + for (i = 0; i < ioc->internal_depth; i++, smid++) { + ioc->internal_lookup[i].cb_idx = 0xFF; + ioc->internal_lookup[i].smid = smid; + list_add_tail(&ioc->internal_lookup[i].tracker_list, + &ioc->internal_free_list); + } + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + for (i = 0, reply_address = (u32) ioc->reply_dma; + i < ioc->reply_free_queue_depth; i++, reply_address += + ioc->reply_sz) { + ioc->reply_free[i] = cpu_to_le32(reply_address); + } + if (ioc->is_driver_loading) + leapioraid_base_assign_reply_queues(ioc); + index = 0; + reply_post_free_contig = ioc->reply_post[0].reply_post_free; + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + if (ioc->rdpq_array_enable) { + reply_q->reply_post_free = + ioc->reply_post[index++].reply_post_free; + } else { + reply_q->reply_post_free = reply_post_free_contig; + reply_post_free_contig += ioc->reply_post_queue_depth; + } + reply_q->reply_post_host_index = 0; + for (i = 0; i < ioc->reply_post_queue_depth; i++) + reply_q->reply_post_free[i].Words = + cpu_to_le64(ULLONG_MAX); + if (!leapioraid_base_is_controller_msix_enabled(ioc)) + goto skip_init_reply_post_free_queue; + } +skip_init_reply_post_free_queue: + r = leapioraid_base_send_ioc_init(ioc); + if (r) { + if (!ioc->is_driver_loading) + return r; + rc = leapioraid_base_check_for_fault_and_issue_reset(ioc); + if (rc || (leapioraid_base_send_ioc_init(ioc))) + return r; + } + ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1; + writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex); + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + if (ioc->combined_reply_queue) { + for (i = 0; i < ioc->nc_reply_index_count; i++) + writel((reply_q->msix_index & 7) << + LEAPIORAID_RPHI_MSIX_INDEX_SHIFT, + ioc->replyPostRegisterIndex[i]); + } else { + writel(reply_q->msix_index << LEAPIORAID_RPHI_MSIX_INDEX_SHIFT, + &ioc->chip->ReplyPostHostIndex); + } + if (!leapioraid_base_is_controller_msix_enabled(ioc)) + goto skip_init_reply_post_host_index; + } +skip_init_reply_post_host_index: + leapioraid_base_unmask_interrupts(ioc); + r = leapioraid_base_display_fwpkg_version(ioc); + if (r) + return r; + r = leapioraid_base_static_config_pages(ioc); + if (r) + return r; + r = leapioraid_base_event_notification(ioc); + if (r) + return r; + leapioraid_base_start_hba_unplug_watchdog(ioc); + if (!ioc->shost_recovery) { + ioc->wait_for_discovery_to_complete = + leapioraid_base_determine_wait_on_discovery(ioc); + return r; + } + r = leapioraid_base_send_port_enable(ioc); + if (r) + return r; + return r; +} + +void +leapioraid_base_free_resources(struct LEAPIORAID_ADAPTER *ioc) +{ + dexitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + if (!ioc->chip_phys) + return; + leapioraid_base_mask_interrupts(ioc); + ioc->shost_recovery = 1; + leapioraid_base_make_ioc_ready(ioc, SOFT_RESET); + ioc->shost_recovery = 0; + leapioraid_base_unmap_resources(ioc); +} + +int +leapioraid_base_attach(struct LEAPIORAID_ADAPTER *ioc) +{ + int r, rc, i; + int cpu_id, last_cpu_id = 0; + + dinitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + ioc->cpu_count = num_online_cpus(); + for_each_online_cpu(cpu_id) + last_cpu_id = cpu_id; + ioc->cpu_msix_table_sz = last_cpu_id + 1; + ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL); + ioc->reply_queue_count = 1; + if (!ioc->cpu_msix_table) { + r = -ENOMEM; + goto out_free_resources; + } + ioc->rdpq_array_enable_assigned = 0; + ioc->use_32bit_dma = 0; + ioc->dma_mask = 64; + ioc->base_readl = &leapioraid_base_readl_aero; + ioc->smp_affinity_enable = smp_affinity_enable; + r = leapioraid_base_map_resources(ioc); + if (r) + goto out_free_resources; + pci_set_drvdata(ioc->pdev, ioc->shost); + r = leapioraid_base_get_ioc_facts(ioc); + if (r) { + rc = leapioraid_base_check_for_fault_and_issue_reset(ioc); + if (rc || (leapioraid_base_get_ioc_facts(ioc))) + goto out_free_resources; + } + + ioc->build_sg_scmd = &leapioraid_base_build_sg_scmd_ieee; + ioc->build_sg = &leapioraid_base_build_sg_ieee; + ioc->build_zero_len_sge = + &leapioraid_base_build_zero_len_sge_ieee; + ioc->sge_size_ieee = sizeof(struct LEAPIORAID_IEEE_SGE_SIMPLE64); + if (ioc->high_iops_queues) + ioc->get_msix_index_for_smlio = + &leapioraid_base_get_high_iops_msix_index; + else + ioc->get_msix_index_for_smlio = &leapioraid_base_get_msix_index; + + if (ioc->atomic_desc_capable) { + ioc->put_smid_default = + &leapioraid_base_put_smid_default_atomic; + ioc->put_smid_scsi_io = + &leapioraid_base_put_smid_scsi_io_atomic; + ioc->put_smid_fast_path = + &leapioraid_base_put_smid_fast_path_atomic; + ioc->put_smid_hi_priority = + &leapioraid_base_put_smid_hi_priority_atomic; + } else { + ioc->put_smid_default = &leapioraid_base_put_smid_default; + ioc->put_smid_scsi_io = &leapioraid_base_put_smid_scsi_io; + ioc->put_smid_fast_path = &leapioraid_base_put_smid_fast_path; + ioc->put_smid_hi_priority = + &leapioraid_base_put_smid_hi_priority; + } + ioc->build_sg_mpi = &leapioraid_base_build_sg; + ioc->build_zero_len_sge_mpi = &leapioraid_base_build_zero_len_sge; + r = leapioraid_base_make_ioc_ready(ioc, SOFT_RESET); + if (r) + goto out_free_resources; + if (ioc->open_pcie_trace) { + r = leapioraid_base_trace_log_init(ioc); + if (r) { + pr_err("log init failed\n"); + goto out_free_resources; + } + } + ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts, + sizeof(struct leapioraid_port_facts), GFP_KERNEL); + if (!ioc->pfacts) { + r = -ENOMEM; + goto out_free_resources; + } + for (i = 0; i < ioc->facts.NumberOfPorts; i++) { + r = leapioraid_base_get_port_facts(ioc, i); + if (r) { + rc = leapioraid_base_check_for_fault_and_issue_reset + (ioc); + if (rc || (leapioraid_base_get_port_facts(ioc, i))) + goto out_free_resources; + } + } + r = leapioraid_base_allocate_memory_pools(ioc); + if (r) + goto out_free_resources; + if (irqpoll_weight > 0) + ioc->thresh_hold = irqpoll_weight; + else + ioc->thresh_hold = ioc->hba_queue_depth / 4; + leapioraid_base_init_irqpolls(ioc); + init_waitqueue_head(&ioc->reset_wq); + ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8); + if (ioc->facts.MaxDevHandle % 8) + ioc->pd_handles_sz++; + ioc->pd_handles = kzalloc(ioc->pd_handles_sz, GFP_KERNEL); + if (!ioc->pd_handles) { + r = -ENOMEM; + goto out_free_resources; + } + ioc->blocking_handles = kzalloc(ioc->pd_handles_sz, GFP_KERNEL); + if (!ioc->blocking_handles) { + r = -ENOMEM; + goto out_free_resources; + } + ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8); + if (ioc->facts.MaxDevHandle % 8) + ioc->pend_os_device_add_sz++; + ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz, + GFP_KERNEL); + if (!ioc->pend_os_device_add) + goto out_free_resources; + ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz; + ioc->device_remove_in_progress = + kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL); + if (!ioc->device_remove_in_progress) + goto out_free_resources; + ioc->tm_tr_retry_sz = ioc->facts.MaxDevHandle * sizeof(u8); + ioc->tm_tr_retry = kzalloc(ioc->tm_tr_retry_sz, GFP_KERNEL); + if (!ioc->tm_tr_retry) + goto out_free_resources; + ioc->fwfault_debug = leapioraid_fwfault_debug; + mutex_init(&ioc->base_cmds.mutex); + ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->base_cmds.status = LEAPIORAID_CMD_NOT_USED; + ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->port_enable_cmds.status = LEAPIORAID_CMD_NOT_USED; + ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->transport_cmds.status = LEAPIORAID_CMD_NOT_USED; + mutex_init(&ioc->transport_cmds.mutex); + ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->scsih_cmds.status = LEAPIORAID_CMD_NOT_USED; + mutex_init(&ioc->scsih_cmds.mutex); + ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->tm_cmds.status = LEAPIORAID_CMD_NOT_USED; + mutex_init(&ioc->tm_cmds.mutex); + ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->config_cmds.status = LEAPIORAID_CMD_NOT_USED; + mutex_init(&ioc->config_cmds.mutex); + ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); + ioc->ctl_cmds.status = LEAPIORAID_CMD_NOT_USED; + mutex_init(&ioc->ctl_cmds.mutex); + + if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply || + !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply || + !ioc->tm_cmds.reply || !ioc->config_cmds.reply || + !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) { + r = -ENOMEM; + goto out_free_resources; + } + for (i = 0; i < LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS; i++) + ioc->event_masks[i] = -1; + leapioraid_base_unmask_events(ioc, LEAPIORAID_EVENT_SAS_DISCOVERY); + leapioraid_base_unmask_events(ioc, + LEAPIORAID_EVENT_SAS_BROADCAST_PRIMITIVE); + leapioraid_base_unmask_events(ioc, + LEAPIORAID_EVENT_SAS_TOPOLOGY_CHANGE_LIST); + leapioraid_base_unmask_events(ioc, + LEAPIORAID_EVENT_SAS_DEVICE_STATUS_CHANGE); + leapioraid_base_unmask_events(ioc, + LEAPIORAID_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE); + leapioraid_base_unmask_events(ioc, + LEAPIORAID_EVENT_IR_CONFIGURATION_CHANGE_LIST); + leapioraid_base_unmask_events(ioc, LEAPIORAID_EVENT_IR_VOLUME); + leapioraid_base_unmask_events(ioc, LEAPIORAID_EVENT_IR_PHYSICAL_DISK); + leapioraid_base_unmask_events(ioc, LEAPIORAID_EVENT_IR_OPERATION_STATUS); + leapioraid_base_unmask_events(ioc, LEAPIORAID_EVENT_LOG_ENTRY_ADDED); + leapioraid_base_unmask_events(ioc, LEAPIORAID_EVENT_TEMP_THRESHOLD); + leapioraid_base_unmask_events(ioc, + LEAPIORAID_EVENT_SAS_DEVICE_DISCOVERY_ERROR); + r = leapioraid_base_make_ioc_operational(ioc); + if (r == -EAGAIN) + r = leapioraid_base_make_ioc_operational(ioc); + if (r) + goto out_free_resources; + memcpy(&ioc->prev_fw_facts, &ioc->facts, + sizeof(struct leapioraid_facts)); + ioc->non_operational_loop = 0; + ioc->ioc_coredump_loop = 0; + ioc->got_task_abort_from_ioctl = 0; + ioc->got_task_abort_from_sysfs = 0; + return 0; +out_free_resources: + ioc->remove_host = 1; + leapioraid_base_free_resources(ioc); + leapioraid_base_release_memory_pools(ioc); + pci_set_drvdata(ioc->pdev, NULL); + kfree(ioc->cpu_msix_table); + kfree(ioc->pd_handles); + kfree(ioc->blocking_handles); + kfree(ioc->tm_tr_retry); + kfree(ioc->device_remove_in_progress); + kfree(ioc->pend_os_device_add); + kfree(ioc->tm_cmds.reply); + kfree(ioc->transport_cmds.reply); + kfree(ioc->scsih_cmds.reply); + kfree(ioc->config_cmds.reply); + kfree(ioc->base_cmds.reply); + kfree(ioc->port_enable_cmds.reply); + kfree(ioc->ctl_cmds.reply); + kfree(ioc->ctl_cmds.sense); + kfree(ioc->pfacts); + ioc->ctl_cmds.reply = NULL; + ioc->base_cmds.reply = NULL; + ioc->tm_cmds.reply = NULL; + ioc->scsih_cmds.reply = NULL; + ioc->transport_cmds.reply = NULL; + ioc->config_cmds.reply = NULL; + ioc->pfacts = NULL; + return r; +} + +void +leapioraid_base_detach(struct LEAPIORAID_ADAPTER *ioc) +{ + dexitprintk(ioc, pr_info("%s %s\n", ioc->name, + __func__)); + if (ioc->open_pcie_trace) + leapioraid_base_trace_log_exit(ioc); + leapioraid_base_stop_watchdog(ioc); + leapioraid_base_stop_hba_unplug_watchdog(ioc); + leapioraid_base_free_resources(ioc); + leapioraid_base_release_memory_pools(ioc); + leapioraid_free_enclosure_list(ioc); + pci_set_drvdata(ioc->pdev, NULL); + kfree(ioc->cpu_msix_table); + kfree(ioc->pd_handles); + kfree(ioc->blocking_handles); + kfree(ioc->tm_tr_retry); + kfree(ioc->device_remove_in_progress); + kfree(ioc->pend_os_device_add); + kfree(ioc->pfacts); + kfree(ioc->ctl_cmds.reply); + kfree(ioc->ctl_cmds.sense); + kfree(ioc->base_cmds.reply); + kfree(ioc->port_enable_cmds.reply); + kfree(ioc->tm_cmds.reply); + kfree(ioc->transport_cmds.reply); + kfree(ioc->scsih_cmds.reply); + kfree(ioc->config_cmds.reply); +} + +static void +leapioraid_base_clear_outstanding_leapioraid_commands(struct LEAPIORAID_ADAPTER + *ioc) +{ + struct leapioraid_internal_qcmd *scsih_qcmd, *scsih_qcmd_next; + unsigned long flags; + + if (ioc->transport_cmds.status & LEAPIORAID_CMD_PENDING) { + ioc->transport_cmds.status |= LEAPIORAID_CMD_RESET; + leapioraid_base_free_smid(ioc, ioc->transport_cmds.smid); + complete(&ioc->transport_cmds.done); + } + if (ioc->base_cmds.status & LEAPIORAID_CMD_PENDING) { + ioc->base_cmds.status |= LEAPIORAID_CMD_RESET; + leapioraid_base_free_smid(ioc, ioc->base_cmds.smid); + complete(&ioc->base_cmds.done); + } + if (ioc->port_enable_cmds.status & LEAPIORAID_CMD_PENDING) { + ioc->port_enable_failed = 1; + ioc->port_enable_cmds.status |= LEAPIORAID_CMD_RESET; + leapioraid_base_free_smid(ioc, ioc->port_enable_cmds.smid); + if (ioc->is_driver_loading) { + ioc->start_scan_failed = + LEAPIORAID_IOCSTATUS_INTERNAL_ERROR; + ioc->start_scan = 0; + } else + complete(&ioc->port_enable_cmds.done); + } + if (ioc->config_cmds.status & LEAPIORAID_CMD_PENDING) { + ioc->config_cmds.status |= LEAPIORAID_CMD_RESET; + leapioraid_base_free_smid(ioc, ioc->config_cmds.smid); + ioc->config_cmds.smid = USHORT_MAX; + complete(&ioc->config_cmds.done); + } + spin_lock_irqsave(&ioc->scsih_q_internal_lock, flags); + list_for_each_entry_safe(scsih_qcmd, scsih_qcmd_next, + &ioc->scsih_q_intenal_cmds, list) { + if ((scsih_qcmd->status) & LEAPIORAID_CMD_PENDING) { + scsih_qcmd->status |= LEAPIORAID_CMD_RESET; + leapioraid_base_free_smid(ioc, scsih_qcmd->smid); + } + } + spin_unlock_irqrestore(&ioc->scsih_q_internal_lock, flags); +} + +static void +leapioraid_base_reset_handler(struct LEAPIORAID_ADAPTER *ioc, int reset_phase) +{ + leapioraid_scsihost_reset_handler(ioc, reset_phase); + leapioraid_ctl_reset_handler(ioc, reset_phase); + switch (reset_phase) { + case LEAPIORAID_IOC_PRE_RESET_PHASE: + dtmprintk(ioc, pr_info("%s %s: LEAPIORAID_IOC_PRE_RESET_PHASE\n", + ioc->name, __func__)); + break; + case LEAPIORAID_IOC_AFTER_RESET_PHASE: + dtmprintk(ioc, pr_info("%s %s: LEAPIORAID_IOC_AFTER_RESET_PHASE\n", + ioc->name, __func__)); + leapioraid_base_clear_outstanding_leapioraid_commands(ioc); + break; + case LEAPIORAID_IOC_DONE_RESET_PHASE: + dtmprintk(ioc, pr_info("%s %s: LEAPIORAID_IOC_DONE_RESET_PHASE\n", + ioc->name, __func__)); + break; + } +} + +void +leapioraid_wait_for_commands_to_complete(struct LEAPIORAID_ADAPTER *ioc) +{ + u32 ioc_state; + unsigned long flags; + u16 i; + struct leapioraid_scsiio_tracker *st; + + ioc->pending_io_count = 0; + if (!leapioraid_base_pci_device_is_available(ioc)) { + pr_err("%s %s: pci error recovery reset or pci device unplug occurred\n", + ioc->name, __func__); + return; + } + ioc_state = leapioraid_base_get_iocstate(ioc, 0); + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) != + LEAPIORAID_IOC_STATE_OPERATIONAL) + return; + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + for (i = 1; i <= ioc->scsiio_depth; i++) { + st = leapioraid_get_st_from_smid(ioc, i); + if (st && st->smid != 0) { + if (st->cb_idx != 0xFF) + ioc->pending_io_count++; + } + } + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + if (!ioc->pending_io_count) + return; + wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ); +} + +static int +leapioraid_base_check_ioc_facts_changes(struct LEAPIORAID_ADAPTER *ioc) +{ + u16 pd_handles_sz, tm_tr_retry_sz; + void *pd_handles = NULL, *blocking_handles = NULL; + void *pend_os_device_add = NULL, *device_remove_in_progress = NULL; + u8 *tm_tr_retry = NULL; + struct leapioraid_facts *old_facts = &ioc->prev_fw_facts; + + if (ioc->facts.MaxDevHandle > old_facts->MaxDevHandle) { + pd_handles_sz = (ioc->facts.MaxDevHandle / 8); + if (ioc->facts.MaxDevHandle % 8) + pd_handles_sz++; + pd_handles = krealloc(ioc->pd_handles, pd_handles_sz, + GFP_KERNEL); + if (!pd_handles) { + pr_err( + "%s Unable to allocate the memory for pd_handles of sz: %d\n", + ioc->name, pd_handles_sz); + return -ENOMEM; + } + memset(pd_handles + ioc->pd_handles_sz, 0, + (pd_handles_sz - ioc->pd_handles_sz)); + ioc->pd_handles = pd_handles; + blocking_handles = + krealloc(ioc->blocking_handles, pd_handles_sz, GFP_KERNEL); + if (!blocking_handles) { + pr_err( + "%s Unable to allocate the memory for blocking_handles of sz: %d\n", + ioc->name, pd_handles_sz); + return -ENOMEM; + } + memset(blocking_handles + ioc->pd_handles_sz, 0, + (pd_handles_sz - ioc->pd_handles_sz)); + ioc->blocking_handles = blocking_handles; + ioc->pd_handles_sz = pd_handles_sz; + pend_os_device_add = + krealloc(ioc->pend_os_device_add, pd_handles_sz, + GFP_KERNEL); + if (!pend_os_device_add) { + pr_err( + "%s Unable to allocate the memory for pend_os_device_add of sz: %d\n", + ioc->name, pd_handles_sz); + return -ENOMEM; + } + memset(pend_os_device_add + ioc->pend_os_device_add_sz, 0, + (pd_handles_sz - ioc->pend_os_device_add_sz)); + ioc->pend_os_device_add = pend_os_device_add; + ioc->pend_os_device_add_sz = pd_handles_sz; + device_remove_in_progress = + krealloc(ioc->device_remove_in_progress, pd_handles_sz, + GFP_KERNEL); + if (!device_remove_in_progress) { + pr_err( + "%s Unable to allocate the memory for device_remove_in_progress of sz: %d\n", + ioc->name, pd_handles_sz); + return -ENOMEM; + } + memset(device_remove_in_progress + + ioc->device_remove_in_progress_sz, 0, + (pd_handles_sz - ioc->device_remove_in_progress_sz)); + ioc->device_remove_in_progress = device_remove_in_progress; + ioc->device_remove_in_progress_sz = pd_handles_sz; + tm_tr_retry_sz = ioc->facts.MaxDevHandle * sizeof(u8); + tm_tr_retry = krealloc(ioc->tm_tr_retry, tm_tr_retry_sz, + GFP_KERNEL); + if (!tm_tr_retry) { + pr_err( + "%s Unable to allocate the memory for tm_tr_retry of sz: %d\n", + ioc->name, tm_tr_retry_sz); + return -ENOMEM; + } + memset(tm_tr_retry + ioc->tm_tr_retry_sz, 0, + (tm_tr_retry_sz - ioc->tm_tr_retry_sz)); + ioc->tm_tr_retry = tm_tr_retry; + ioc->tm_tr_retry_sz = tm_tr_retry_sz; + } + memcpy(&ioc->prev_fw_facts, &ioc->facts, + sizeof(struct leapioraid_facts)); + return 0; +} + +int +leapioraid_base_hard_reset_handler( + struct LEAPIORAID_ADAPTER *ioc, + enum reset_type type) +{ + int r; + unsigned long flags; + + dtmprintk(ioc, pr_info("%s %s: enter\n", ioc->name, + __func__)); + if (!mutex_trylock(&ioc->reset_in_progress_mutex)) { + do { + ssleep(1); + } while (ioc->shost_recovery == 1); + dtmprintk(ioc, + pr_info("%s %s: exit\n", ioc->name, + __func__)); + return ioc->ioc_reset_status; + } + if (!leapioraid_base_pci_device_is_available(ioc)) { + pr_err( + "%s %s: pci error recovery reset or pci device unplug occurred\n", + ioc->name, __func__); + if (leapioraid_base_pci_device_is_unplugged(ioc)) { + leapioraid_base_pause_mq_polling(ioc); + ioc->schedule_dead_ioc_flush_running_cmds(ioc); + leapioraid_base_resume_mq_polling(ioc); + } + r = 0; + goto out_unlocked; + } + leapioraid_halt_firmware(ioc, 0); + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + ioc->shost_recovery = 1; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + leapioraid_base_get_iocstate(ioc, 0); + leapioraid_base_reset_handler(ioc, LEAPIORAID_IOC_PRE_RESET_PHASE); + leapioraid_wait_for_commands_to_complete(ioc); + leapioraid_base_mask_interrupts(ioc); + leapioraid_base_pause_mq_polling(ioc); + r = leapioraid_base_make_ioc_ready(ioc, type); + if (r) + goto out; + leapioraid_base_reset_handler(ioc, LEAPIORAID_IOC_AFTER_RESET_PHASE); + if (ioc->is_driver_loading && ioc->port_enable_failed) { + ioc->remove_host = 1; + r = -EFAULT; + goto out; + } + r = leapioraid_base_get_ioc_facts(ioc); + if (r) + goto out; + r = leapioraid_base_check_ioc_facts_changes(ioc); + if (r) { + pr_err( + "%s Some of the parameters got changed in this\n\t\t" + "new firmware image and it requires system reboot\n", + ioc->name); + goto out; + } + if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable) + panic( + "%s: Issue occurred with flashing controller firmware.\n\t\t" + "Please reboot the system and ensure that the correct\n\t\t" + "firmware version is running\n", + ioc->name); + r = leapioraid_base_make_ioc_operational(ioc); + if (!r) + leapioraid_base_reset_handler(ioc, LEAPIORAID_IOC_DONE_RESET_PHASE); +out: + pr_info("%s %s: %s\n", + ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")); + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + ioc->ioc_reset_status = r; + ioc->shost_recovery = 0; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + ioc->ioc_reset_count++; + mutex_unlock(&ioc->reset_in_progress_mutex); +#if defined(DISABLE_RESET_SUPPORT) + if (r != 0) { + struct task_struct *p; + + ioc->remove_host = 1; + ioc->schedule_dead_ioc_flush_running_cmds(ioc); + p = kthread_run(leapioraid_remove_dead_ioc_func, ioc, + "leapioraid_dead_ioc_%d", ioc->id); + if (IS_ERR(p)) + pr_err( + "%s %s: Running leapioraid_dead_ioc thread failed !!!!\n", + ioc->name, __func__); + else + pr_err( + "%s %s: Running leapioraid_dead_ioc thread success !!!!\n", + ioc->name, __func__); + } +#else + if (r != 0) + ioc->schedule_dead_ioc_flush_running_cmds(ioc); +#endif + leapioraid_base_resume_mq_polling(ioc); +out_unlocked: + dtmprintk(ioc, pr_info("%s %s: exit\n", ioc->name, + __func__)); + return r; +} + +struct config_request { + u16 sz; + void *page; + dma_addr_t page_dma; +}; + +static void +leapioraid_config_display_some_debug(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + char *calling_function_name, + struct LeapioraidDefaultRep_t *mpi_reply) +{ + struct LeapioraidCfgReq_t *mpi_request; + char *desc = NULL; + + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + switch (mpi_request->Header.PageType & LEAPIORAID_CONFIG_PAGETYPE_MASK) { + case LEAPIORAID_CONFIG_PAGETYPE_IO_UNIT: + desc = "io_unit"; + break; + case LEAPIORAID_CONFIG_PAGETYPE_IOC: + desc = "ioc"; + break; + case LEAPIORAID_CONFIG_PAGETYPE_BIOS: + desc = "bios"; + break; + case LEAPIORAID_CONFIG_PAGETYPE_RAID_VOLUME: + desc = "raid_volume"; + break; + case LEAPIORAID_CONFIG_PAGETYPE_MANUFACTURING: + desc = "manufacturing"; + break; + case LEAPIORAID_CONFIG_PAGETYPE_RAID_PHYSDISK: + desc = "physdisk"; + break; + case LEAPIORAID_CONFIG_PAGETYPE_EXTENDED: + switch (mpi_request->ExtPageType) { + case LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_IO_UNIT: + desc = "sas_io_unit"; + break; + case LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_EXPANDER: + desc = "sas_expander"; + break; + case LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_DEVICE: + desc = "sas_device"; + break; + case LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_PHY: + desc = "sas_phy"; + break; + case LEAPIORAID_CONFIG_EXTPAGETYPE_LOG: + desc = "log"; + break; + case LEAPIORAID_CONFIG_EXTPAGETYPE_ENCLOSURE: + desc = "enclosure"; + break; + case LEAPIORAID_CONFIG_EXTPAGETYPE_RAID_CONFIG: + desc = "raid_config"; + break; + case LEAPIORAID_CONFIG_EXTPAGETYPE_DRIVER_MAPPING: + desc = "driver_mapping"; + break; + case LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_PORT: + desc = "sas_port"; + break; + case LEAPIORAID_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING: + desc = "ext_manufacturing"; + break; + } + break; + } + if (!desc) + return; + pr_info("%s %s: %s(%d), action(%d), form(0x%08x), smid(%d)\n", + ioc->name, calling_function_name, desc, + mpi_request->Header.PageNumber, mpi_request->Action, + le32_to_cpu(mpi_request->PageAddress), smid); + if (!mpi_reply) + return; + if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo) + pr_err( + "%s \tiocstatus(0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo)); +} + +static int +leapioraid_config_alloc_config_dma_memory(struct LEAPIORAID_ADAPTER *ioc, + struct config_request *mem) +{ + int r = 0; + + if (mem->sz > ioc->config_page_sz) { + mem->page = dma_alloc_coherent(&ioc->pdev->dev, mem->sz, + &mem->page_dma, GFP_KERNEL); + if (!mem->page) + r = -ENOMEM; + } else { + mem->page = ioc->config_page; + mem->page_dma = ioc->config_page_dma; + } + ioc->config_vaddr = mem->page; + return r; +} + +static void +leapioraid_config_free_config_dma_memory(struct LEAPIORAID_ADAPTER *ioc, + struct config_request *mem) +{ + if (mem->sz > ioc->config_page_sz) + dma_free_coherent(&ioc->pdev->dev, mem->sz, mem->page, + mem->page_dma); +} + +u8 +leapioraid_config_done( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + struct LeapioraidDefaultRep_t *mpi_reply; + + if (ioc->config_cmds.status == LEAPIORAID_CMD_NOT_USED) + return 1; + if (ioc->config_cmds.smid != smid) + return 1; + ioc->config_cmds.status |= LEAPIORAID_CMD_COMPLETE; + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply) { + ioc->config_cmds.status |= LEAPIORAID_CMD_REPLY_VALID; + memcpy(ioc->config_cmds.reply, mpi_reply, + mpi_reply->MsgLength * 4); + } + ioc->config_cmds.status &= ~LEAPIORAID_CMD_PENDING; + if (ioc->logging_level & LEAPIORAID_DEBUG_CONFIG) + leapioraid_config_display_some_debug( + ioc, smid, "config_done", mpi_reply); + ioc->config_cmds.smid = USHORT_MAX; + complete(&ioc->config_cmds.done); + return 1; +} + +static int +leapioraid_config_request( + struct LEAPIORAID_ADAPTER *ioc, struct LeapioraidCfgReq_t *mpi_request, + struct LeapioraidCfgRep_t *mpi_reply, int timeout, + void *config_page, u16 config_page_sz) +{ + u16 smid; + struct LeapioraidCfgReq_t *config_request; + int r; + u8 retry_count, issue_host_reset = 0; + struct config_request mem; + u32 ioc_status = UINT_MAX; + u8 issue_reset; + + mutex_lock(&ioc->config_cmds.mutex); + if (ioc->config_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: config_cmd in use\n", + ioc->name, __func__); + mutex_unlock(&ioc->config_cmds.mutex); + return -EAGAIN; + } + retry_count = 0; + memset(&mem, 0, sizeof(struct config_request)); + mpi_request->VF_ID = 0; + mpi_request->VP_ID = 0; + if (config_page) { + mpi_request->Header.PageVersion = mpi_reply->Header.PageVersion; + mpi_request->Header.PageNumber = mpi_reply->Header.PageNumber; + mpi_request->Header.PageType = mpi_reply->Header.PageType; + mpi_request->Header.PageLength = mpi_reply->Header.PageLength; + mpi_request->ExtPageLength = mpi_reply->ExtPageLength; + mpi_request->ExtPageType = mpi_reply->ExtPageType; + if (mpi_request->Header.PageLength) + mem.sz = mpi_request->Header.PageLength * 4; + else + mem.sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4; + r = leapioraid_config_alloc_config_dma_memory(ioc, &mem); + if (r != 0) + goto out; + if (mpi_request->Action == + LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_CURRENT || + mpi_request->Action == + LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_NVRAM) { + ioc->base_add_sg_single(&mpi_request->PageBufferSGE, + LEAPIORAID_CONFIG_COMMON_WRITE_SGLFLAGS + | mem.sz, mem.page_dma); + memcpy(mem.page, config_page, + min_t(u16, mem.sz, config_page_sz)); + } else { + memset(config_page, 0, config_page_sz); + ioc->base_add_sg_single(&mpi_request->PageBufferSGE, + LEAPIORAID_CONFIG_COMMON_SGLFLAGS + | mem.sz, mem.page_dma); + memset(mem.page, 0, min_t(u16, mem.sz, config_page_sz)); + } + } +retry_config: + if (retry_count) { + if (retry_count > 2) { + r = -EFAULT; + goto free_mem; + } + pr_info("%s %s: attempting retry (%d)\n", + ioc->name, __func__, retry_count); + } + r = leapioraid_wait_for_ioc_to_operational(ioc, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT); + if (r) { + if (r == -ETIME) + issue_host_reset = 1; + goto free_mem; + } + smid = leapioraid_base_get_smid(ioc, ioc->config_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + ioc->config_cmds.status = LEAPIORAID_CMD_NOT_USED; + r = -EAGAIN; + goto free_mem; + } + r = 0; + memset(mpi_reply, 0, sizeof(struct LeapioraidCfgRep_t)); + memset(ioc->config_cmds.reply, 0, sizeof(struct LeapioraidCfgRep_t)); + ioc->config_cmds.status = LEAPIORAID_CMD_PENDING; + config_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->config_cmds.smid = smid; + memcpy(config_request, mpi_request, sizeof(struct LeapioraidCfgReq_t)); + if (ioc->logging_level & LEAPIORAID_DEBUG_CONFIG) + leapioraid_config_display_some_debug(ioc, smid, "config_request", NULL); + init_completion(&ioc->config_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->config_cmds.done, timeout * HZ); + if (!(ioc->config_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + if (!(ioc->logging_level & LEAPIORAID_DEBUG_CONFIG)) + leapioraid_config_display_some_debug(ioc, smid, + "config_request no reply", + NULL); + leapioraid_check_cmd_timeout(ioc, ioc->config_cmds.status, + mpi_request, + sizeof(struct LeapioraidCfgReq_t) / 4, + issue_reset); + pr_info("%s issue_reset=%d\n", __func__, issue_reset); + retry_count++; + if (ioc->config_cmds.smid == smid) + leapioraid_base_free_smid(ioc, smid); + if (ioc->config_cmds.status & LEAPIORAID_CMD_RESET) + goto retry_config; + if (ioc->shost_recovery || ioc->pci_error_recovery) { + issue_host_reset = 0; + r = -EFAULT; + } else + issue_host_reset = 1; + goto free_mem; + } + if (ioc->config_cmds.status & LEAPIORAID_CMD_REPLY_VALID) { + memcpy(mpi_reply, ioc->config_cmds.reply, + sizeof(struct LeapioraidCfgRep_t)); + if ((mpi_request->Header.PageType & 0xF) != + (mpi_reply->Header.PageType & 0xF)) { + if (!(ioc->logging_level & LEAPIORAID_DEBUG_CONFIG)) + leapioraid_config_display_some_debug(ioc, smid, + "config_request", + NULL); + leapioraid_debug_dump_mf(mpi_request, ioc->request_sz / 4); + leapioraid_debug_dump_reply(mpi_reply, ioc->reply_sz / 4); + panic( + "%s %s: Firmware BUG: mpi_reply mismatch:\n\t\t" + "Requested PageType(0x%02x) Reply PageType(0x%02x)\n", + ioc->name, + __func__, + (mpi_request->Header.PageType & 0xF), + (mpi_reply->Header.PageType & 0xF)); + } + if (((mpi_request->Header.PageType & 0xF) == + LEAPIORAID_CONFIG_PAGETYPE_EXTENDED) && + mpi_request->ExtPageType != mpi_reply->ExtPageType) { + if (!(ioc->logging_level & LEAPIORAID_DEBUG_CONFIG)) + leapioraid_config_display_some_debug(ioc, smid, + "config_request", + NULL); + leapioraid_debug_dump_mf(mpi_request, ioc->request_sz / 4); + leapioraid_debug_dump_reply(mpi_reply, ioc->reply_sz / 4); + panic( + "%s %s: Firmware BUG: mpi_reply mismatch:\n\t\t" + "Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n", + ioc->name, + __func__, + mpi_request->ExtPageType, + mpi_reply->ExtPageType); + } + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) + & LEAPIORAID_IOCSTATUS_MASK; + } + if (retry_count) + pr_info("%s %s: retry (%d) completed!!\n", + ioc->name, __func__, retry_count); + if ((ioc_status == LEAPIORAID_IOCSTATUS_SUCCESS) && + config_page && mpi_request->Action == + LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT) { + u8 *p = (u8 *) mem.page; + + if (p) { + if ((mpi_request->Header.PageType & 0xF) != + (p[3] & 0xF)) { + if (! + (ioc->logging_level & LEAPIORAID_DEBUG_CONFIG)) + leapioraid_config_display_some_debug(ioc, smid, + "config_request", + NULL); + leapioraid_debug_dump_mf(mpi_request, + ioc->request_sz / 4); + leapioraid_debug_dump_reply(mpi_reply, ioc->reply_sz / 4); + leapioraid_debug_dump_config(p, min_t(u16, mem.sz, + config_page_sz) / + 4); + panic( + "%s %s: Firmware BUG: config page mismatch:\n\t\t" + "Requested PageType(0x%02x) Reply PageType(0x%02x)\n", + ioc->name, + __func__, + (mpi_request->Header.PageType & 0xF), + (p[3] & 0xF)); + } + if (((mpi_request->Header.PageType & 0xF) == + LEAPIORAID_CONFIG_PAGETYPE_EXTENDED) && + (mpi_request->ExtPageType != p[6])) { + if (! + (ioc->logging_level & LEAPIORAID_DEBUG_CONFIG)) + leapioraid_config_display_some_debug(ioc, smid, + "config_request", + NULL); + leapioraid_debug_dump_mf(mpi_request, + ioc->request_sz / 4); + leapioraid_debug_dump_reply(mpi_reply, ioc->reply_sz / 4); + leapioraid_debug_dump_config(p, min_t(u16, mem.sz, + config_page_sz) / + 4); + panic( + "%s %s: Firmware BUG: config page mismatch:\n\t\t" + "Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n", + ioc->name, + __func__, + mpi_request->ExtPageType, + p[6]); + } + } + memcpy(config_page, mem.page, min_t(u16, mem.sz, + config_page_sz)); + } +free_mem: + if (config_page) + leapioraid_config_free_config_dma_memory(ioc, &mem); +out: + ioc->config_cmds.status = LEAPIORAID_CMD_NOT_USED; + mutex_unlock(&ioc->config_cmds.mutex); + if (issue_host_reset) { + if (ioc->drv_internal_flags & LEAPIORAID_DRV_INERNAL_FIRST_PE_ISSUED) { + leapioraid_base_hard_reset_handler(ioc, + FORCE_BIG_HAMMER); + r = -EFAULT; + } else { + if (leapioraid_base_check_for_fault_and_issue_reset + (ioc)) + return -EFAULT; + r = -EAGAIN; + } + } + return r; +} + +int +leapioraid_config_get_manufacturing_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidManP0_t * + config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = 0x00; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_manufacturing_pg10(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidManuP10_t *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 10; + mpi_request.Header.PageVersion = 0x00; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_manufacturing_pg11(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidManuP11_t + *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 11; + mpi_request.Header.PageVersion = 0x00; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_set_manufacturing_pg11(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidManuP11_t + *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 11; + mpi_request.Header.PageVersion = 0x00; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_bios_pg2(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidBiosP2_t *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_BIOS; + mpi_request.Header.PageNumber = 2; + mpi_request.Header.PageVersion = 0x04; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_bios_pg3(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidBiosP3_t *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_BIOS; + mpi_request.Header.PageNumber = 3; + mpi_request.Header.PageVersion = 0x01; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_iounit_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOUnitP0_t *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_IO_UNIT; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = 0x02; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_iounit_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOUnitP1_t *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_IO_UNIT; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = 0x04; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_set_iounit_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOUnitP1_t *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_IO_UNIT; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = 0x04; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_iounit_pg8(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOUnitP8_t *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_IO_UNIT; + mpi_request.Header.PageNumber = 8; + mpi_request.Header.PageVersion = 0x00; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_ioc_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOCP1_t *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_IOC; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = 0x00; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_set_ioc_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOCP1_t *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_IOC; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = 0x00; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_ioc_pg8(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOCP8_t *config_page) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_IOC; + mpi_request.Header.PageNumber = 8; + mpi_request.Header.PageVersion = 0x00; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_sas_device_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasDevP0_t *config_page, + u32 form, u32 handle) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_DEVICE; + mpi_request.Header.PageVersion = 0x09; + mpi_request.Header.PageNumber = 0; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_number_hba_phys(struct LEAPIORAID_ADAPTER *ioc, + u8 *num_phys) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + u16 ioc_status; + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasIOUnitP0_t config_page; + + *num_phys = 0; + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = 0x05; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, &mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, &mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page, + sizeof(struct LeapioraidSasIOUnitP0_t)); + if (!r) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status == LEAPIORAID_IOCSTATUS_SUCCESS) + *num_phys = config_page.NumPhys; + } +out: + return r; +} + +int +leapioraid_config_get_sas_iounit_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasIOUnitP0_t *config_page, + u16 sz) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = 0x05; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sz); +out: + return r; +} + +int +leapioraid_config_get_sas_iounit_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasIOUnitP1_t *config_page, + u16 sz) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = 0x09; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sz); +out: + return r; +} + +int +leapioraid_config_set_sas_iounit_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasIOUnitP1_t *config_page, + u16 sz) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = 0x09; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_CURRENT; + leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_WRITE_NVRAM; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sz); +out: + return r; +} + +int +leapioraid_config_get_expander_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidExpanderP0_t *config_page, + u32 form, u32 handle) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_EXPANDER; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = 0x06; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_expander_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidExpanderP1_t *config_page, + u32 phy_number, u16 handle) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_EXPANDER; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = 0x02; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.PageAddress = + cpu_to_le32(LEAPIORAID_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM | + (phy_number << LEAPIORAID_SAS_EXPAND_PGAD_PHYNUM_SHIFT) | + handle); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_enclosure_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasEncP0_t *config_page, + u32 form, u32 handle) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_ENCLOSURE; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = 0x04; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_phy_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasPhyP0_t *config_page, + u32 phy_number) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_PHY; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = 0x03; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.PageAddress = + cpu_to_le32(LEAPIORAID_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_phy_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasPhyP1_t *config_page, + u32 phy_number) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_SAS_PHY; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = 0x01; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.PageAddress = + cpu_to_le32(LEAPIORAID_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_raid_volume_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidRaidVolP1_t *config_page, + u32 form, u32 handle) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_RAID_VOLUME; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = 0x03; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_number_pds(struct LEAPIORAID_ADAPTER *ioc, + u16 handle, u8 *num_pds) +{ + struct LeapioraidCfgReq_t mpi_request; + struct LeapioraidRaidVolP0_t config_page; + struct LeapioraidCfgRep_t mpi_reply; + int r; + u16 ioc_status; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + *num_pds = 0; + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_RAID_VOLUME; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = 0x0A; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, &mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.PageAddress = + cpu_to_le32(LEAPIORAID_RAID_VOLUME_PGAD_FORM_HANDLE | handle); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, &mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page, + sizeof(struct LeapioraidRaidVolP0_t)); + if (!r) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status == LEAPIORAID_IOCSTATUS_SUCCESS) + *num_pds = config_page.NumPhysDisks; + } +out: + return r; +} + +int +leapioraid_config_get_raid_volume_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidRaidVolP0_t *config_page, + u32 form, u32 handle, u16 sz) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_RAID_VOLUME; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = 0x0A; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sz); +out: + return r; +} + +int +leapioraid_config_get_phys_disk_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidRaidPDP0_t *config_page, + u32 form, u32 form_specific) +{ + struct LeapioraidCfgReq_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_RAID_PHYSDISK; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = 0x05; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.PageAddress = cpu_to_le32(form | form_specific); + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + r = leapioraid_config_request(ioc, &mpi_request, mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); +out: + return r; +} + +int +leapioraid_config_get_volume_handle(struct LEAPIORAID_ADAPTER *ioc, + u16 pd_handle, u16 *volume_handle) +{ + struct LeapioraidRaidCfgP0_t *config_page = NULL; + struct LeapioraidCfgReq_t mpi_request; + struct LeapioraidCfgRep_t mpi_reply; + int r, i, config_page_sz; + u16 ioc_status; + int config_num; + u16 element_type; + u16 phys_disk_dev_handle; + + *volume_handle = 0; + memset(&mpi_request, 0, sizeof(struct LeapioraidCfgReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_CONFIG; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = LEAPIORAID_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = LEAPIORAID_CONFIG_EXTPAGETYPE_RAID_CONFIG; + mpi_request.Header.PageVersion = 0x00; + mpi_request.Header.PageNumber = 0; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = leapioraid_config_request(ioc, &mpi_request, &mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = LEAPIORAID_CONFIG_ACTION_PAGE_READ_CURRENT; + config_page_sz = (le16_to_cpu(mpi_reply.ExtPageLength) * 4); + config_page = kmalloc(config_page_sz, GFP_KERNEL); + if (!config_page) { + r = -1; + goto out; + } + config_num = 0xff; + while (1) { + mpi_request.PageAddress = cpu_to_le32(config_num + + LEAPIORAID_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM); + r = leapioraid_config_request(ioc, &mpi_request, &mpi_reply, + LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT, + config_page, config_page_sz); + if (r) + goto out; + r = -1; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) + goto out; + for (i = 0; i < config_page->NumElements; i++) { + element_type = + le16_to_cpu(config_page->ConfigElement[i].ElementFlags) & + LEAPIORAID_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE; + if (element_type == + LEAPIORAID_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT + || element_type == + LEAPIORAID_RAIDCONFIG0_EFLAGS_OCE_ELEMENT) { + phys_disk_dev_handle = + le16_to_cpu(config_page->ConfigElement[i].PhysDiskDevHandle); + if (phys_disk_dev_handle == pd_handle) { + *volume_handle = + le16_to_cpu + (config_page->ConfigElement[i].VolDevHandle); + r = 0; + goto out; + } + } else if (element_type == + LEAPIORAID_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT) { + *volume_handle = 0; + r = 0; + goto out; + } + } + config_num = config_page->ConfigNum; + } +out: + kfree(config_page); + return r; +} + +int +leapioraid_config_get_volume_wwid(struct LEAPIORAID_ADAPTER *ioc, + u16 volume_handle, u64 *wwid) +{ + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidRaidVolP1_t raid_vol_pg1; + + *wwid = 0; + if (!(leapioraid_config_get_raid_volume_pg1(ioc, &mpi_reply, + &raid_vol_pg1, + LEAPIORAID_RAID_VOLUME_PGAD_FORM_HANDLE, + volume_handle))) { + *wwid = le64_to_cpu(raid_vol_pg1.WWID); + return 0; + } else + return -1; +} diff --git a/drivers/scsi/leapioraid/leapioraid_func.h b/drivers/scsi/leapioraid/leapioraid_func.h new file mode 100644 index 000000000000..9cf8206ccb3c --- /dev/null +++ b/drivers/scsi/leapioraid/leapioraid_func.h @@ -0,0 +1,1258 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This is the Fusion MPT base driver providing common API layer interface + * for access to MPT (Message Passing Technology) firmware. + * + * Copyright (C) 2013-2021 LSI Corporation + * Copyright (C) 2013-2021 Avago Technologies + * Copyright (C) 2013-2021 Broadcom Inc. + * (mailto:MPT-FusionLinux.pdl@broadcom.com) + * + * Copyright (C) 2024 LeapIO Tech Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + */ + +#ifndef LEAPIORAID_FUNC_H_INCLUDED +#define LEAPIORAID_FUNC_H_INCLUDED + +#include "leapioraid.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef fallthrough +#define fallthrough +#endif + +#define SYS_LOG_BUF_SIZE (0x20000) +#define MAX_UPD_PAYLOAD_SZ (0x4000) + +#define LEAPIORAID_DRIVER_NAME "LeapIoRaid" +#define LEAPIORAID_AUTHOR "LeapIO Inc." +#define LEAPIORAID_DESCRIPTION "LEAPIO RAID Driver" +#define LEAPIORAID_DRIVER_VERSION "1.00.00.00" +#define LEAPIORAID_MAJOR_VERSION (1) +#define LEAPIORAID_MINOR_VERSION (00) +#define LEAPIORAID_BUILD_VERSION (00) +#define LEAPIORAID_RELEASE_VERSION (00) + +#define LEAPIORAID_VENDOR_ID (0xD405) +#define LEAPIORAID_DEVICE_ID_1 (0x1000) +#define LEAPIORAID_DEVICE_ID_2 (0x1001) + +#define LEAPIORAID_MAX_PHYS_SEGMENTS SG_CHUNK_SIZE + +#define LEAPIORAID_MIN_PHYS_SEGMENTS (16) +#define LEAPIORAID_KDUMP_MIN_PHYS_SEGMENTS (32) + +#define LEAPIORAID_MAX_SG_SEGMENTS SG_MAX_SEGMENTS +#define LEAPIORAID_MAX_PHYS_SEGMENTS_STRING "SG_CHUNK_SIZE" + +#define LEAPIORAID_SG_DEPTH LEAPIORAID_MAX_PHYS_SEGMENTS + + +#define LEAPIORAID_CONFIG_PAGE_DEFAULT_TIMEOUT 15 +#define LEAPIORAID_CONFIG_COMMON_SGLFLAGS ((LEAPIORAID_SGE_FLAGS_SIMPLE_ELEMENT | \ + LEAPIORAID_SGE_FLAGS_LAST_ELEMENT | LEAPIORAID_SGE_FLAGS_END_OF_BUFFER \ + | LEAPIORAID_SGE_FLAGS_END_OF_LIST) << LEAPIORAID_SGE_FLAGS_SHIFT) +#define LEAPIORAID_CONFIG_COMMON_WRITE_SGLFLAGS ((LEAPIORAID_SGE_FLAGS_SIMPLE_ELEMENT | \ + LEAPIORAID_SGE_FLAGS_LAST_ELEMENT | LEAPIORAID_SGE_FLAGS_END_OF_BUFFER \ + | LEAPIORAID_SGE_FLAGS_END_OF_LIST | LEAPIORAID_SGE_FLAGS_HOST_TO_IOC) \ + << LEAPIORAID_SGE_FLAGS_SHIFT) + +#define LEAPIORAID_SATA_QUEUE_DEPTH (32) +#define LEAPIORAID_SAS_QUEUE_DEPTH (64) +#define LEAPIORAID_RAID_QUEUE_DEPTH (64) +#define LEAPIORAID_KDUMP_SCSI_IO_DEPTH (64) +#define LEAPIORAID_RAID_MAX_SECTORS (128) + +#define LEAPIORAID_NAME_LENGTH (32) +#define LEAPIORAID_DRIVER_NAME_LENGTH (24) +#define LEAPIORAID_STRING_LENGTH (64) + +#define LEAPIORAID_FRAME_START_OFFSET (256) +#define LEAPIORAID_REPLY_FREE_POOL_SIZE (512) +#define LEAPIORAID_MAX_CALLBACKS (32) +#define LEAPIORAID_MAX_HBA_NUM_PHYS (16) + +#define LEAPIORAID_INTERNAL_CMDS_COUNT (10) +#define LEAPIORAID_INTERNAL_SCSIIO_CMDS_COUNT (3) +#define LEAPIORAID_INTERNAL_SCSIIO_FOR_IOCTL (1) +#define LEAPIORAID_INTERNAL_SCSIIO_FOR_DISCOVERY (2) + +#define LEAPIORAID_INVALID_DEVICE_HANDLE (0xFFFF) +#define LEAPIORAID_MAX_CHAIN_ELEMT_SZ (16) +#define LEAPIORAID_DEFAULT_NUM_FWCHAIN_ELEMTS (8) +#define LEAPIORAID_READL_RETRY_COUNT_OF_THIRTY (30) +#define LEAPIORAID_READL_RETRY_COUNT_OF_THREE (3) + +#define LEAPIORAID_IOC_PRE_RESET_PHASE (1) +#define LEAPIORAID_IOC_AFTER_RESET_PHASE (2) +#define LEAPIORAID_IOC_DONE_RESET_PHASE (3) + +#define LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT (0x01) +#define LEAPIORAID_TARGET_FLAGS_VOLUME (0x02) +#define LEAPIORAID_TARGET_FASTPATH_IO (0x08) + +#define LEAPIORAID_DEVICE_HIGH_IOPS_DEPTH (8) +#define LEAPIORAID_HIGH_IOPS_REPLY_QUEUES (8) +#define LEAPIORAID_HIGH_IOPS_BATCH_COUNT (16) +#define LEAPIORAID_GEN35_MAX_MSIX_QUEUES (128) +#define LEAPIORAID_RDPQ_MAX_INDEX_IN_ONE_CHUNK (16) + +#define LEAPIORAID_IFAULT_IOP_OVER_TEMP_THRESHOLD_EXCEEDED (0x2810) + +#ifndef DID_TRANSPORT_DISRUPTED +#define DID_TRANSPORT_DISRUPTED DID_BUS_BUSY +#endif +#ifndef ULLONG_MAX +#define ULLONG_MAX (~0ULL) +#endif +#ifndef USHORT_MAX +#define USHORT_MAX ((u16)(~0U)) +#endif +#ifndef UINT_MAX +#define UINT_MAX (~0U) +#endif + +static inline void *leapioraid_shost_private(struct Scsi_Host *shost) +{ + return (void *)shost->hostdata; +} + +struct LeapioraidManuP10_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + U8 OEMIdentifier; + U8 Reserved1; + U16 Reserved2; + U32 Reserved3; + U32 GenericFlags0; + U32 GenericFlags1; + U32 Reserved4; + U32 OEMSpecificFlags0; + U32 OEMSpecificFlags1; + U32 Reserved5[18]; +}; + +struct LeapioraidManuP11_t { + struct LEAPIORAID_CONFIG_PAGE_HEADER Header; + __le32 Reserved1; + u8 Reserved2; + u8 EEDPTagMode; + u8 Reserved3; + u8 Reserved4; + __le32 Reserved5[8]; + u16 AddlFlags2; + u8 AddlFlags3; + u8 Reserved6; + __le32 Reserved7[7]; + u8 AbortTO; + u8 NumPerDevEvents; + u8 HostTraceBufferDecrementSizeKB; + u8 HostTraceBufferFlags; + u16 HostTraceBufferMaxSizeKB; + u16 HostTraceBufferMinSizeKB; + u8 CoreDumpTOSec; + u8 TimeSyncInterval; + u16 Reserved9; + __le32 Reserved10; +}; + +struct LEAPIORAID_TARGET { + struct scsi_target *starget; + u64 sas_address; + struct leapioraid_raid_device *raid_device; + u16 handle; + int num_luns; + u32 flags; + u8 deleted; + u8 tm_busy; + struct leapioraid_hba_port *port; + struct leapioraid_sas_device *sas_dev; +}; + +#define LEAPIORAID_DEVICE_FLAGS_INIT (0x01) +#define LEAPIORAID_DEVICE_TLR_ON (0x02) + +struct LEAPIORAID_DEVICE { + struct LEAPIORAID_TARGET *sas_target; + unsigned int lun; + u32 flags; + u8 configured_lun; + u8 block; + u8 deleted; + u8 tlr_snoop_check; + u8 ignore_delay_remove; + u8 ncq_prio_enable; + unsigned long ata_command_pending; +}; + +#define LEAPIORAID_CMND_PENDING_BIT (0) +#define LEAPIORAID_CMD_NOT_USED (0x8000) +#define LEAPIORAID_CMD_COMPLETE (0x0001) +#define LEAPIORAID_CMD_PENDING (0x0002) +#define LEAPIORAID_CMD_REPLY_VALID (0x0004) +#define LEAPIORAID_CMD_RESET (0x0008) +#define LEAPIORAID_CMD_COMPLETE_ASYNC (0x0010) + +struct leapioraid_internal_cmd { + struct mutex mutex; + struct completion done; + void *reply; + void *sense; + u16 status; + u16 smid; +}; + +struct leapioraid_scsi_io_transfer { + u16 handle; + u8 is_raid; + enum dma_data_direction dir; + u32 data_length; + dma_addr_t data_dma; + u8 sense[SCSI_SENSE_BUFFERSIZE]; + u32 lun; + u8 cdb_length; + u8 cdb[32]; + u8 timeout; + u8 VF_ID; + u8 VP_ID; + u8 valid_reply; + u32 sense_length; + u16 ioc_status; + u8 scsi_state; + u8 scsi_status; + u32 log_info; + u32 transfer_length; +}; + +struct leapioraid_internal_qcmd { + struct list_head list; + void *request; + void *reply; + void *sense; + u16 status; + u16 smid; + struct leapioraid_scsi_io_transfer *transfer_packet; +}; + +#define LEAPIORAID_WIDE_PORT_API (1) +#define LEAPIORAID_WIDE_PORT_API_PLUS (1) + +struct leapioraid_sas_device { + struct list_head list; + struct scsi_target *starget; + u64 sas_address; + u64 device_name; + u16 handle; + u64 sas_address_parent; + u16 enclosure_handle; + u64 enclosure_logical_id; + u16 volume_handle; + u64 volume_wwid; + u32 device_info; + int id; + int channel; + u16 slot; + u8 phy; + u8 responding; + u8 fast_path; + u8 pfa_led_on; + struct kref refcount; + u8 *serial_number; + u8 pend_sas_rphy_add; + u8 enclosure_level; + u8 chassis_slot; + u8 is_chassis_slot_valid; + u8 connector_name[5]; + u8 ssd_device; + u8 supports_sata_smart; + u8 port_type; + struct leapioraid_hba_port *port; + struct sas_rphy *rphy; +}; + +static inline +void leapioraid_sas_device_get(struct leapioraid_sas_device *s) +{ + kref_get(&s->refcount); +} + +static inline +void leapioraid_sas_device_free(struct kref *r) +{ + kfree(container_of(r, struct leapioraid_sas_device, refcount)); +} + +static inline +void leapioraid_sas_device_put(struct leapioraid_sas_device *s) +{ + kref_put(&s->refcount, leapioraid_sas_device_free); +} + +struct leapioraid_raid_device { + struct list_head list; + struct scsi_target *starget; + struct scsi_device *sdev; + u64 wwid; + u16 handle; + u16 block_sz; + int id; + int channel; + u8 volume_type; + u8 num_pds; + u8 responding; + u8 percent_complete; + u8 direct_io_enabled; + u8 stripe_exponent; + u8 block_exponent; + u64 max_lba; + u32 stripe_sz; + u32 device_info; + u16 pd_handle[8]; +}; + +struct leapioraid_boot_device { + int channel; + void *device; +}; + +struct leapioraid_sas_port { + struct list_head port_list; + u8 num_phys; + struct leapioraid_hba_port *hba_port; + struct sas_identify remote_identify; + struct sas_rphy *rphy; +#if defined(LEAPIORAID_WIDE_PORT_API) + struct sas_port *port; +#endif + struct list_head phy_list; +}; + +struct leapioraid_sas_phy { + struct list_head port_siblings; + struct sas_identify identify; + struct sas_identify remote_identify; + struct sas_phy *phy; + u8 phy_id; + u16 handle; + u16 attached_handle; + u8 phy_belongs_to_port; + u8 hba_vphy; + struct leapioraid_hba_port *port; +}; + +struct leapioraid_raid_sas_node { + struct list_head list; + struct device *parent_dev; + u8 num_phys; + u64 sas_address; + u16 handle; + u64 sas_address_parent; + u16 enclosure_handle; + u64 enclosure_logical_id; + u8 responding; + u8 nr_phys_allocated; + struct leapioraid_hba_port *port; + struct leapioraid_sas_phy *phy; + struct list_head sas_port_list; + struct sas_rphy *rphy; +}; + +struct leapioraid_enclosure_node { + struct list_head list; + struct LeapioraidSasEncP0_t pg0; +}; + +enum reset_type { + FORCE_BIG_HAMMER, + SOFT_RESET, +}; + +struct leapioraid_chain_tracker { + void *chain_buffer; + dma_addr_t chain_buffer_dma; +}; + +struct leapioraid_chain_lookup { + struct leapioraid_chain_tracker *chains_per_smid; + atomic_t chain_offset; +}; + +struct leapioraid_scsiio_tracker { + u16 smid; + struct scsi_cmnd *scmd; + u8 cb_idx; + u8 direct_io; + struct list_head chain_list; + u16 msix_io; +}; + +struct leapioraid_request_tracker { + u16 smid; + u8 cb_idx; + struct list_head tracker_list; +}; + +struct leapioraid_tr_list { + struct list_head list; + u16 handle; + u16 state; +}; + +struct leapioraid_sc_list { + struct list_head list; + u16 handle; +}; + +struct leapioraid_event_ack_list { + struct list_head list; + U16 Event; + U32 EventContext; +}; + +struct leapioraid_adapter_reply_queue { + struct LEAPIORAID_ADAPTER *ioc; + u8 msix_index; + u32 reply_post_host_index; + union LeapioraidRepDescUnion_t *reply_post_free; + char name[LEAPIORAID_NAME_LENGTH]; + atomic_t busy; + cpumask_var_t affinity_hint; + u32 os_irq; + struct irq_poll irqpoll; + bool irq_poll_scheduled; + bool irq_line_enable; + bool is_blk_mq_poll_q; + struct list_head list; +}; + +struct leapioraid_blk_mq_poll_queue { + atomic_t busy; + atomic_t pause; + struct leapioraid_adapter_reply_queue *reply_q; +}; + +union leapioraid_version_union { + struct LEAPIORAID_VERSION_STRUCT Struct; + u32 Word; +}; + +typedef void (*LEAPIORAID_ADD_SGE)(void *paddr, u32 flags_length, + dma_addr_t dma_addr); +typedef int (*LEAPIORAID_BUILD_SG_SCMD)(struct LEAPIORAID_ADAPTER *ioc, + struct scsi_cmnd *scmd, u16 smid); +typedef void (*LEAPIORAID_BUILD_SG)(struct LEAPIORAID_ADAPTER *ioc, void *psge, + dma_addr_t data_out_dma, size_t data_out_sz, + dma_addr_t data_in_dma, size_t data_in_sz); +typedef void (*LEAPIORAID_BUILD_ZERO_LEN_SGE)(struct LEAPIORAID_ADAPTER *ioc, + void *paddr); +typedef void (*PUT_SMID_IO_FP_HIP_TA)(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u16 funcdep); +typedef void (*PUT_SMID_DEFAULT)(struct LEAPIORAID_ADAPTER *ioc, u16 smid); +typedef u32(*BASE_READ_REG) (const void __iomem *addr, + u8 retry_count); +typedef u8(*GET_MSIX_INDEX) (struct LEAPIORAID_ADAPTER *ioc, + struct scsi_cmnd *scmd); + +struct leapioraid_facts { + u16 MsgVersion; + u16 HeaderVersion; + u8 IOCNumber; + u8 VP_ID; + u8 VF_ID; + u16 IOCExceptions; + u16 IOCStatus; + u32 IOCLogInfo; + u8 MaxChainDepth; + u8 WhoInit; + u8 NumberOfPorts; + u8 MaxMSIxVectors; + u16 RequestCredit; + u16 ProductID; + u32 IOCCapabilities; + union leapioraid_version_union FWVersion; + u16 IOCRequestFrameSize; + u16 IOCMaxChainSegmentSize; + u16 MaxInitiators; + u16 MaxTargets; + u16 MaxSasExpanders; + u16 MaxEnclosures; + u16 ProtocolFlags; + u16 HighPriorityCredit; + u16 MaxReplyDescriptorPostQueueDepth; + u8 ReplyFrameSize; + u8 MaxVolumes; + u16 MaxDevHandle; + u16 MaxPersistentEntries; + u16 MinDevHandle; + u8 CurrentHostPageSize; +}; + +struct leapioraid_port_facts { + u8 PortNumber; + u8 VP_ID; + u8 VF_ID; + u8 PortType; + u16 MaxPostedCmdBuffers; +}; + +struct leapioraid_reply_post_struct { + union LeapioraidRepDescUnion_t *reply_post_free; + dma_addr_t reply_post_free_dma; +}; + +struct leapioraid_virtual_phy { + struct list_head list; + u64 sas_address; + u32 phy_mask; + u8 flags; +}; + +#define LEAPIORAID_VPHY_FLAG_DIRTY_PHY (0x01) +struct leapioraid_hba_port { + struct list_head list; + u64 sas_address; + u32 phy_mask; + u8 port_id; + u8 flags; + u32 vphys_mask; + struct list_head vphys_list; +}; + +#define LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT (0x01) +#define LEAPIORAID_HBA_PORT_FLAG_NEW_PORT (0x02) +#define LEAPIORAID_MULTIPATH_DISABLED_PORT_ID (0xFF) + +typedef void (*LEAPIORAID_FLUSH_RUNNING_CMDS)(struct LEAPIORAID_ADAPTER * + ioc); + +struct LEAPIORAID_ADAPTER { + struct list_head list; + struct Scsi_Host *shost; + u8 id; + u8 IOCNumber; + int cpu_count; + char name[LEAPIORAID_NAME_LENGTH]; + char driver_name[LEAPIORAID_DRIVER_NAME_LENGTH]; + char tmp_string[LEAPIORAID_STRING_LENGTH]; + struct pci_dev *pdev; + struct LeapioraidSysInterfaceRegs_t __iomem *chip; + phys_addr_t chip_phys; + int logging_level; + int fwfault_debug; + u8 ir_firmware; + int bars; + u8 mask_interrupts; + struct mutex pci_access_mutex; + char fault_reset_work_q_name[48]; + char hba_hot_unplug_work_q_name[48]; + struct workqueue_struct *fault_reset_work_q; + struct workqueue_struct *hba_hot_unplug_work_q; + struct delayed_work fault_reset_work; + struct delayed_work hba_hot_unplug_work; + struct workqueue_struct *smart_poll_work_q; + struct delayed_work smart_poll_work; + u8 adapter_over_temp; + char firmware_event_name[48]; + struct workqueue_struct *firmware_event_thread; + spinlock_t fw_event_lock; + struct list_head fw_event_list; + struct leapioraid_fw_event_work *current_event; + u8 fw_events_cleanup; + int aen_event_read_flag; + u8 broadcast_aen_busy; + u16 broadcast_aen_pending; + u8 shost_recovery; + u8 got_task_abort_from_ioctl; + u8 got_task_abort_from_sysfs; + struct mutex reset_in_progress_mutex; + struct mutex hostdiag_unlock_mutex; + spinlock_t ioc_reset_in_progress_lock; + spinlock_t hba_hot_unplug_lock; + u8 ioc_link_reset_in_progress; + int ioc_reset_status; + u8 ignore_loginfos; + u8 remove_host; + u8 pci_error_recovery; + u8 wait_for_discovery_to_complete; + u8 is_driver_loading; + u8 port_enable_failed; + u8 start_scan; + u16 start_scan_failed; + u8 msix_enable; + u8 *cpu_msix_table; + resource_size_t **reply_post_host_index; + u16 cpu_msix_table_sz; + u32 ioc_reset_count; + LEAPIORAID_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds; + u32 non_operational_loop; + u8 ioc_coredump_loop; + u32 timestamp_update_count; + u32 time_sync_interval; + u8 multipath_on_hba; + atomic64_t total_io_cnt; + atomic64_t high_iops_outstanding; + bool msix_load_balance; + u16 thresh_hold; + u8 high_iops_queues; + u8 iopoll_q_start_index; + u32 drv_internal_flags; + u32 drv_support_bitmap; + u32 dma_mask; + bool enable_sdev_max_qd; + bool use_32bit_dma; + struct leapioraid_blk_mq_poll_queue *blk_mq_poll_queues; + u8 scsi_io_cb_idx; + u8 tm_cb_idx; + u8 transport_cb_idx; + u8 scsih_cb_idx; + u8 ctl_cb_idx; + u8 ctl_tm_cb_idx; + u8 base_cb_idx; + u8 port_enable_cb_idx; + u8 config_cb_idx; + u8 tm_tr_cb_idx; + u8 tm_tr_volume_cb_idx; + u8 tm_tr_internal_cb_idx; + u8 tm_sas_control_cb_idx; + struct leapioraid_internal_cmd base_cmds; + struct leapioraid_internal_cmd port_enable_cmds; + struct leapioraid_internal_cmd transport_cmds; + struct leapioraid_internal_cmd scsih_cmds; + struct leapioraid_internal_cmd tm_cmds; + struct leapioraid_internal_cmd ctl_cmds; + struct leapioraid_internal_cmd config_cmds; + struct list_head scsih_q_intenal_cmds; + spinlock_t scsih_q_internal_lock; + LEAPIORAID_ADD_SGE base_add_sg_single; + LEAPIORAID_BUILD_SG_SCMD build_sg_scmd; + LEAPIORAID_BUILD_SG build_sg; + LEAPIORAID_BUILD_ZERO_LEN_SGE build_zero_len_sge; + u16 sge_size_ieee; + LEAPIORAID_BUILD_SG build_sg_mpi; + LEAPIORAID_BUILD_ZERO_LEN_SGE build_zero_len_sge_mpi; + u32 event_type[LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS]; + u32 event_context; + void *event_log; + u32 event_masks[LEAPIORAID_EVENT_NOTIFY_EVENTMASK_WORDS]; + u8 disable_eedp_support; + u8 tm_custom_handling; + u16 max_shutdown_latency; + u16 max_wideport_qd; + u16 max_narrowport_qd; + u8 max_sata_qd; + struct leapioraid_facts facts; + struct leapioraid_facts prev_fw_facts; + struct leapioraid_port_facts *pfacts; + struct LeapioraidManP0_t manu_pg0; + struct LeapioraidManuP10_t manu_pg10; + struct LeapioraidManuP11_t manu_pg11; + struct LeapioraidBiosP2_t bios_pg2; + struct LeapioraidBiosP3_t bios_pg3; + struct LeapioraidIOCP8_t ioc_pg8; + struct LeapioraidIOUnitP0_t iounit_pg0; + struct LeapioraidIOUnitP1_t iounit_pg1; + struct LeapioraidIOUnitP8_t iounit_pg8; + struct LeapioraidIOCP1_t ioc_pg1_copy; + struct leapioraid_boot_device req_boot_device; + struct leapioraid_boot_device req_alt_boot_device; + struct leapioraid_boot_device current_boot_device; + struct leapioraid_raid_sas_node sas_hba; + struct list_head sas_expander_list; + struct list_head enclosure_list; + spinlock_t sas_node_lock; + struct list_head sas_device_list; + struct list_head sas_device_init_list; + spinlock_t sas_device_lock; + struct list_head pcie_device_list; + struct list_head pcie_device_init_list; + spinlock_t pcie_device_lock; + struct list_head raid_device_list; + spinlock_t raid_device_lock; + u8 io_missing_delay; + u16 device_missing_delay; + int sas_id; + int pcie_target_id; + void *blocking_handles; + void *pd_handles; + u16 pd_handles_sz; + void *pend_os_device_add; + u16 pend_os_device_add_sz; + u16 config_page_sz; + void *config_page; + dma_addr_t config_page_dma; + void *config_vaddr; + u16 hba_queue_depth; + u16 sge_size; + u16 scsiio_depth; + u16 request_sz; + u8 *request; + dma_addr_t request_dma; + u32 request_dma_sz; + spinlock_t scsi_lookup_lock; + int pending_io_count; + wait_queue_head_t reset_wq; + int pending_tm_count; + u32 terminated_tm_count; + wait_queue_head_t pending_tm_wq; + u8 out_of_frames; + wait_queue_head_t no_frames_tm_wq; + u16 *io_queue_num; + u32 page_size; + struct leapioraid_chain_lookup *chain_lookup; + struct list_head free_chain_list; + struct dma_pool *chain_dma_pool; + u16 max_sges_in_main_message; + u16 max_sges_in_chain_message; + u16 chains_needed_per_io; + u16 chain_segment_sz; + u16 chains_per_prp_buffer; + u16 hi_priority_smid; + u8 *hi_priority; + dma_addr_t hi_priority_dma; + u16 hi_priority_depth; + struct leapioraid_request_tracker *hpr_lookup; + struct list_head hpr_free_list; + u16 internal_smid; + u8 *internal; + dma_addr_t internal_dma; + u16 internal_depth; + struct leapioraid_request_tracker *internal_lookup; + struct list_head internal_free_list; + u8 *sense; + dma_addr_t sense_dma; + struct dma_pool *sense_dma_pool; + u16 reply_sz; + u8 *reply; + dma_addr_t reply_dma; + u32 reply_dma_max_address; + u32 reply_dma_min_address; + struct dma_pool *reply_dma_pool; + u16 reply_free_queue_depth; + __le32 *reply_free; + dma_addr_t reply_free_dma; + struct dma_pool *reply_free_dma_pool; + u32 reply_free_host_index; + u16 reply_post_queue_depth; + struct leapioraid_reply_post_struct *reply_post; + struct dma_pool *reply_post_free_dma_pool; + struct dma_pool *reply_post_free_array_dma_pool; + struct LeapioraidIOCInitRDPQArrayEntry *reply_post_free_array; + dma_addr_t reply_post_free_array_dma; + u8 reply_queue_count; + struct list_head reply_queue_list; + u8 rdpq_array_capable; + u8 rdpq_array_enable; + u8 rdpq_array_enable_assigned; + u8 combined_reply_queue; + u8 nc_reply_index_count; + u8 smp_affinity_enable; + resource_size_t **replyPostRegisterIndex; + struct list_head delayed_tr_list; + struct list_head delayed_tr_volume_list; + struct list_head delayed_internal_tm_list; + struct list_head delayed_sc_list; + struct list_head delayed_event_ack_list; + u32 ring_buffer_offset; + u32 ring_buffer_sz; + u8 reset_from_user; + u8 hide_ir_msg; + u8 warpdrive_msg; + u8 mfg_pg10_hide_flag; + u8 hide_drives; + u8 atomic_desc_capable; + BASE_READ_REG base_readl; + PUT_SMID_IO_FP_HIP_TA put_smid_scsi_io; + PUT_SMID_IO_FP_HIP_TA put_smid_fast_path; + PUT_SMID_IO_FP_HIP_TA put_smid_hi_priority; + PUT_SMID_DEFAULT put_smid_default; + GET_MSIX_INDEX get_msix_index_for_smlio; + void *device_remove_in_progress; + u16 device_remove_in_progress_sz; + u8 *tm_tr_retry; + u32 tm_tr_retry_sz; + u8 temp_sensors_count; + struct list_head port_table_list; + u8 *log_buffer; + dma_addr_t log_buffer_dma; + char pcie_log_work_q_name[48]; + struct workqueue_struct *pcie_log_work_q; + struct delayed_work pcie_log_work; + u32 open_pcie_trace; +}; + +#define LEAPIORAID_DEBUG (0x00000001) +#define LEAPIORAID_DEBUG_MSG_FRAME (0x00000002) +#define LEAPIORAID_DEBUG_SG (0x00000004) +#define LEAPIORAID_DEBUG_EVENTS (0x00000008) +#define LEAPIORAID_DEBUG_EVENT_WORK_TASK (0x00000010) +#define LEAPIORAID_DEBUG_INIT (0x00000020) +#define LEAPIORAID_DEBUG_EXIT (0x00000040) +#define LEAPIORAID_DEBUG_FAIL (0x00000080) +#define LEAPIORAID_DEBUG_TM (0x00000100) +#define LEAPIORAID_DEBUG_REPLY (0x00000200) +#define LEAPIORAID_DEBUG_HANDSHAKE (0x00000400) +#define LEAPIORAID_DEBUG_CONFIG (0x00000800) +#define LEAPIORAID_DEBUG_DL (0x00001000) +#define LEAPIORAID_DEBUG_RESET (0x00002000) +#define LEAPIORAID_DEBUG_SCSI (0x00004000) +#define LEAPIORAID_DEBUG_IOCTL (0x00008000) +#define LEAPIORAID_DEBUG_CSMISAS (0x00010000) +#define LEAPIORAID_DEBUG_SAS (0x00020000) +#define LEAPIORAID_DEBUG_TRANSPORT (0x00040000) +#define LEAPIORAID_DEBUG_TASK_SET_FULL (0x00080000) + +#define LEAPIORAID_CHECK_LOGGING(IOC, CMD, BITS) \ +{ \ + if (IOC->logging_level & BITS) \ + CMD; \ +} + +#define dprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG) +#define dsgprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_SG) +#define devtprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_EVENTS) +#define dewtprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_EVENT_WORK_TASK) +#define dinitprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_INIT) +#define dexitprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_EXIT) +#define dfailprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_FAIL) +#define dtmprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_TM) +#define dreplyprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_REPLY) +#define dhsprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_HANDSHAKE) +#define dcprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_CONFIG) +#define ddlprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_DL) +#define drsprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_RESET) +#define dsprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_SCSI) +#define dctlprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_IOCTL) +#define dcsmisasprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_CSMISAS) +#define dsasprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_SAS) +#define dsastransport(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_SAS_WIDE) +#define dmfprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_MSG_FRAME) +#define dtsfprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_TASK_SET_FULL) +#define dtransportprintk(IOC, CMD) \ + LEAPIORAID_CHECK_LOGGING(IOC, CMD, LEAPIORAID_DEBUG_TRANSPORT) + +static inline void +leapioraid_debug_dump_mf(void *mpi_request, int sz) +{ + int i; + __le32 *mfp = (__le32 *) mpi_request; + + pr_info("mf:\n\t"); + for (i = 0; i < sz; i++) { + if (i && ((i % 8) == 0)) + pr_info("\n\t"); + pr_info("%08x ", le32_to_cpu(mfp[i])); + } + pr_info("\n"); +} + +static inline void +leapioraid_debug_dump_reply(void *mpi_request, int sz) +{ + int i; + __le32 *mfp = (__le32 *) mpi_request; + + pr_info("reply:\n\t"); + for (i = 0; i < sz; i++) { + if (i && ((i % 8) == 0)) + pr_info("\n\t"); + pr_info("%08x ", le32_to_cpu(mfp[i])); + } + pr_info("\n"); +} + +static inline void +leapioraid_debug_dump_config(void *mpi_request, int sz) +{ + int i; + __le32 *mfp = (__le32 *) mpi_request; + + pr_info("config:\n\t"); + for (i = 0; i < sz; i++) { + if (i && ((i % 8) == 0)) + pr_info("\n\t"); + pr_info("%08x ", le32_to_cpu(mfp[i])); + } + pr_info("\n"); +} + +#define LEAPIORAID_DRV_INTERNAL_BITMAP_BLK_MQ (0x00000001) +#define LEAPIORAID_DRV_INERNAL_FIRST_PE_ISSUED (0x00000002) + +typedef u8(*LEAPIORAID_CALLBACK) (struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply); + +#define SCSIH_MAP_QUEUE(shost) static void leapioraid_scsihost_map_queues(shost) + +extern struct list_head leapioraid_ioc_list; +extern spinlock_t leapioraid_gioc_lock; +void leapioraid_base_start_watchdog(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_base_stop_watchdog(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_base_start_log_watchdog(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_base_stop_log_watchdog(struct LEAPIORAID_ADAPTER *ioc); +int leapioraid_base_trace_log_init(struct LEAPIORAID_ADAPTER *ioc); +int leapioraid_base_attach(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_base_detach(struct LEAPIORAID_ADAPTER *ioc); +int leapioraid_base_map_resources(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_base_free_resources(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_free_enclosure_list(struct LEAPIORAID_ADAPTER *ioc); +int leapioraid_base_hard_reset_handler(struct LEAPIORAID_ADAPTER *ioc, + enum reset_type type); +void *leapioraid_base_get_msg_frame(struct LEAPIORAID_ADAPTER *ioc, u16 smid); +void *leapioraid_base_get_sense_buffer(struct LEAPIORAID_ADAPTER *ioc, + u16 smid); +__le32 leapioraid_base_get_sense_buffer_dma(struct LEAPIORAID_ADAPTER *ioc, + u16 smid); +__le64 leapioraid_base_get_sense_buffer_dma_64(struct LEAPIORAID_ADAPTER *ioc, + u16 smid); +void leapioraid_base_sync_reply_irqs(struct LEAPIORAID_ADAPTER *ioc, u8 poll); +u16 leapioraid_base_get_smid_hpr(struct LEAPIORAID_ADAPTER *ioc, u8 cb_idx); +u16 leapioraid_base_get_smid_scsiio(struct LEAPIORAID_ADAPTER *ioc, u8 cb_idx, + struct scsi_cmnd *scmd); +u16 leapioraid_base_get_smid(struct LEAPIORAID_ADAPTER *ioc, u8 cb_idx); +void leapioraid_base_free_smid(struct LEAPIORAID_ADAPTER *ioc, u16 smid); +void leapioraid_base_initialize_callback_handler(void); +u8 leapioraid_base_register_callback_handler(LEAPIORAID_CALLBACK cb_func); +void leapioraid_base_release_callback_handler(u8 cb_idx); +u8 leapioraid_base_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply); +u8 leapioraid_port_enable_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply); +void *leapioraid_base_get_reply_virt_addr(struct LEAPIORAID_ADAPTER *ioc, + u32 phys_addr); +u32 leapioraid_base_get_iocstate(struct LEAPIORAID_ADAPTER *ioc, int cooked); +int leapioraid_base_check_and_get_msix_vectors(struct pci_dev *pdev); +void leapioraid_base_fault_info(struct LEAPIORAID_ADAPTER *ioc, u16 fault_code); +#define leapioraid_print_fault_code(ioc, fault_code) \ + do { \ + pr_err("%s fault info from func: %s\n", ioc->name, __func__); \ + leapioraid_base_fault_info(ioc, fault_code); \ + } while (0) +void leapioraid_base_coredump_info(struct LEAPIORAID_ADAPTER *ioc, + u16 fault_code); +int leapioraid_base_wait_for_coredump_completion(struct LEAPIORAID_ADAPTER *ioc, + const char *caller); +int leapioraid_base_sas_iounit_control(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidSasIoUnitControlRep_t * + mpi_reply, + struct LeapioraidSasIoUnitControlReq_t * + mpi_request); +int leapioraid_base_scsi_enclosure_processor(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidSepRep_t *mpi_reply, + struct LeapioraidSepReq_t *mpi_request); +void leapioraid_base_validate_event_type(struct LEAPIORAID_ADAPTER *ioc, + u32 *event_type); +void leapioraid_halt_firmware(struct LEAPIORAID_ADAPTER *ioc, u8 set_fault); +struct leapioraid_scsiio_tracker *leapioraid_get_st_from_smid( + struct LEAPIORAID_ADAPTER *ioc, u16 smid); +void leapioraid_base_clear_st(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_scsiio_tracker *st); +struct leapioraid_scsiio_tracker *leapioraid_base_scsi_cmd_priv( + struct scsi_cmnd *scmd); +int +leapioraid_base_check_for_fault_and_issue_reset(struct LEAPIORAID_ADAPTER *ioc); +int leapioraid_port_enable(struct LEAPIORAID_ADAPTER *ioc); +u8 leapioraid_base_pci_device_is_unplugged(struct LEAPIORAID_ADAPTER *ioc); +u8 leapioraid_base_pci_device_is_available(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_base_free_irq(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_base_disable_msix(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_wait_for_commands_to_complete(struct LEAPIORAID_ADAPTER *ioc); +u8 leapioraid_base_check_cmd_timeout(struct LEAPIORAID_ADAPTER *ioc, + u8 status, void *mpi_request, int sz); +#define leapioraid_check_cmd_timeout(ioc, status, mpi_request, sz, issue_reset) \ + do { \ + pr_err("%s In func: %s\n", ioc->name, __func__); \ + issue_reset = leapioraid_base_check_cmd_timeout(ioc, status, mpi_request, sz); \ + } while (0) +int leapioraid_wait_for_ioc_to_operational(struct LEAPIORAID_ADAPTER *ioc, + int wait_count); +void leapioraid_base_start_hba_unplug_watchdog(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_base_stop_hba_unplug_watchdog(struct LEAPIORAID_ADAPTER *ioc); +int leapioraid_base_make_ioc_ready(struct LEAPIORAID_ADAPTER *ioc, + enum reset_type type); +void leapioraid_base_mask_interrupts(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_base_unmask_interrupts(struct LEAPIORAID_ADAPTER *ioc); +int leapioraid_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num); +void leapioraid_base_pause_mq_polling(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_base_resume_mq_polling(struct LEAPIORAID_ADAPTER *ioc); +int leapioraid_base_unlock_and_get_host_diagnostic(struct LEAPIORAID_ADAPTER + *ioc, u32 *host_diagnostic); +void leapioraid_base_lock_host_diagnostic(struct LEAPIORAID_ADAPTER *ioc); +extern char driver_name[LEAPIORAID_NAME_LENGTH]; +struct scsi_cmnd *leapioraid_scsihost_scsi_lookup_get(struct LEAPIORAID_ADAPTER + *ioc, u16 smid); +u8 leapioraid_scsihost_event_callback(struct LEAPIORAID_ADAPTER *ioc, + u8 msix_index, u32 reply); +void leapioraid_scsihost_reset_handler(struct LEAPIORAID_ADAPTER *ioc, + int reset_phase); +int leapioraid_scsihost_issue_tm(struct LEAPIORAID_ADAPTER *ioc, u16 handle, + uint channel, uint id, uint lun, u8 type, + u16 smid_task, u8 timeout, u8 tr_method); +int leapioraid_scsihost_issue_locked_tm(struct LEAPIORAID_ADAPTER *ioc, + u16 handle, uint channel, uint id, + uint lun, u8 type, u16 smid_task, + u8 timeout, u8 tr_method); +void leapioraid_scsihost_set_tm_flag(struct LEAPIORAID_ADAPTER *ioc, + u16 handle); +void leapioraid_scsihost_clear_tm_flag(struct LEAPIORAID_ADAPTER *ioc, + u16 handle); +void leapioraid_expander_remove( + struct LEAPIORAID_ADAPTER *ioc, u64 sas_address, + struct leapioraid_hba_port *port); +void leapioraid_device_remove_by_sas_address(struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, + struct leapioraid_hba_port *port); +u8 leapioraid_check_for_pending_internal_cmds(struct LEAPIORAID_ADAPTER *ioc, + u16 smid); +struct leapioraid_hba_port *leapioraid_get_port_by_id( + struct LEAPIORAID_ADAPTER *ioc, u8 port, u8 skip_dirty_flag); +struct leapioraid_virtual_phy *leapioraid_get_vphy_by_phy( + struct LEAPIORAID_ADAPTER *ioc, struct leapioraid_hba_port *port, u32 phy); +struct leapioraid_raid_sas_node *leapioraid_scsihost_expander_find_by_handle( + struct LEAPIORAID_ADAPTER *ioc, u16 handle); +struct leapioraid_raid_sas_node *leapioraid_scsihost_expander_find_by_sas_address( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, + struct leapioraid_hba_port *port); +struct leapioraid_sas_device *__leapioraid_get_sdev_by_addr_and_rphy( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, + struct sas_rphy *rphy); +struct leapioraid_sas_device *leapioraid_get_sdev_by_addr( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, + struct leapioraid_hba_port *port); +struct leapioraid_sas_device *leapioraid_get_sdev_by_handle( + struct LEAPIORAID_ADAPTER *ioc, u16 handle); +void leapioraid_scsihost_flush_running_cmds(struct LEAPIORAID_ADAPTER *ioc); +void leapioraid_port_enable_complete(struct LEAPIORAID_ADAPTER *ioc); +struct leapioraid_raid_device *leapioraid_raid_device_find_by_handle( + struct LEAPIORAID_ADAPTER *ioc, u16 handle); +void leapioraid_scsihost_sas_device_remove(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_device *sas_device); +void leapioraid_scsihost_clear_outstanding_scsi_tm_commands( + struct LEAPIORAID_ADAPTER *ioc); +u32 leapioraid_base_mod64(u64 dividend, u32 divisor); +void +leapioraid__scsihost_change_queue_depth(struct scsi_device *sdev, int qdepth); +u8 leapioraid_scsihost_ncq_prio_supp(struct scsi_device *sdev); +u8 leapioraid_config_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply); +int leapioraid_config_get_number_hba_phys(struct LEAPIORAID_ADAPTER *ioc, + u8 *num_phys); +int leapioraid_config_get_manufacturing_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidManP0_t * + config_page); +int leapioraid_config_get_manufacturing_pg10(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidManuP10_t + *config_page); +int leapioraid_config_get_manufacturing_pg11(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidManuP11_t + *config_page); +int leapioraid_config_set_manufacturing_pg11(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidManuP11_t + *config_page); +int leapioraid_config_get_bios_pg2(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidBiosP2_t *config_page); +int leapioraid_config_get_bios_pg3(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidBiosP3_t *config_page); +int leapioraid_config_get_iounit_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOUnitP0_t *config_page); +int leapioraid_config_get_sas_device_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasDevP0_t *config_page, + u32 form, u32 handle); +int leapioraid_config_get_sas_iounit_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasIOUnitP0_t *config_page, + u16 sz); +int leapioraid_config_get_iounit_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOUnitP1_t *config_page); +int leapioraid_config_set_iounit_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOUnitP1_t *config_page); +int leapioraid_config_get_iounit_pg8(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOUnitP8_t *config_page); +int leapioraid_config_get_sas_iounit_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasIOUnitP1_t *config_page, + u16 sz); +int leapioraid_config_set_sas_iounit_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasIOUnitP1_t *config_page, + u16 sz); +int leapioraid_config_get_ioc_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOCP1_t *config_page); +int leapioraid_config_set_ioc_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOCP1_t *config_page); +int leapioraid_config_get_ioc_pg8(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidIOCP8_t *config_page); +int leapioraid_config_get_expander_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidExpanderP0_t *config_page, + u32 form, u32 handle); +int leapioraid_config_get_expander_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidExpanderP1_t *config_page, + u32 phy_number, u16 handle); +int leapioraid_config_get_enclosure_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasEncP0_t * + config_page, u32 form, u32 handle); +int leapioraid_config_get_phy_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasPhyP0_t *config_page, + u32 phy_number); +int leapioraid_config_get_phy_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidSasPhyP1_t *config_page, + u32 phy_number); +int leapioraid_config_get_raid_volume_pg1(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidRaidVolP1_t *config_page, + u32 form, u32 handle); +int leapioraid_config_get_number_pds(struct LEAPIORAID_ADAPTER *ioc, u16 handle, + u8 *num_pds); +int leapioraid_config_get_raid_volume_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidRaidVolP0_t *config_page, + u32 form, u32 handle, u16 sz); +int leapioraid_config_get_phys_disk_pg0(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidCfgRep_t *mpi_reply, + struct LeapioraidRaidPDP0_t * + config_page, u32 form, + u32 form_specific); +int leapioraid_config_get_volume_handle(struct LEAPIORAID_ADAPTER *ioc, + u16 pd_handle, u16 *volume_handle); +int leapioraid_config_get_volume_wwid(struct LEAPIORAID_ADAPTER *ioc, + u16 volume_handle, u64 *wwid); +extern const struct attribute_group *leapioraid_host_groups[]; +extern const struct attribute_group *leapioraid_dev_groups[]; +void leapioraid_ctl_init(void); +void leapioraid_ctl_exit(void); +u8 leapioraid_ctl_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply); +u8 leapioraid_ctl_tm_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply); +void leapioraid_ctl_reset_handler(struct LEAPIORAID_ADAPTER *ioc, + int reset_phase); +u8 leapioraid_ctl_event_callback(struct LEAPIORAID_ADAPTER *ioc, u8 msix_index, + u32 reply); +void leapioraid_ctl_add_to_event_log(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventNotificationRep_t * + mpi_reply); +void leapioraid_ctl_clear_outstanding_ioctls(struct LEAPIORAID_ADAPTER *ioc); +int leapioraid_ctl_release(struct inode *inode, struct file *filep); +void ctl_init(void); +void ctl_exit(void); +u8 leapioraid_transport_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply); +struct leapioraid_sas_port *leapioraid_transport_port_add( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle, u64 sas_address, + struct leapioraid_hba_port *port); +void leapioraid_transport_port_remove(struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, u64 sas_address_parent, + struct leapioraid_hba_port *port); +int leapioraid_transport_add_host_phy( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_phy *leapioraid_phy, + struct LeapioraidSasPhyP0_t phy_pg0, + struct device *parent_dev); +int leapioraid_transport_add_expander_phy(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_phy *leapioraid_phy, + struct LeapioraidExpanderP1_t expander_pg1, + struct device *parent_dev); +void leapioraid_transport_update_links(struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, u16 handle, + u8 phy_number, u8 link_rate, + struct leapioraid_hba_port *port); +extern struct sas_function_template leapioraid_transport_functions; +extern struct scsi_transport_template *leapioraid_transport_template; +void +leapioraid_transport_del_phy_from_an_existing_port(struct LEAPIORAID_ADAPTER + *ioc, + struct leapioraid_raid_sas_node *sas_node, + struct leapioraid_sas_phy + *leapioraid_phy); +#if defined(LEAPIORAID_WIDE_PORT_API) +void +leapioraid_transport_add_phy_to_an_existing_port( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_sas_node *sas_node, + struct leapioraid_sas_phy + *leapioraid_phy, + u64 sas_address, + struct leapioraid_hba_port *port); +#endif +#endif diff --git a/drivers/scsi/leapioraid/leapioraid_os.c b/drivers/scsi/leapioraid/leapioraid_os.c new file mode 100644 index 000000000000..368a3c859a04 --- /dev/null +++ b/drivers/scsi/leapioraid/leapioraid_os.c @@ -0,0 +1,9823 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Scsi Host Layer for MPT (Message Passing Technology) based controllers + * + * Copyright (C) 2013-2021 LSI Corporation + * Copyright (C) 2013-2021 Avago Technologies + * Copyright (C) 2013-2021 Broadcom Inc. + * (mailto:MPT-FusionLinux.pdl@broadcom.com) + * + * Copyright (C) 2024 LeapIO Tech Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "leapioraid_func.h" +#include + +#define RAID_CHANNEL 1 + +static void leapioraid_scsihost_expander_node_remove( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_sas_node *sas_expander); +static void leapioraid_firmware_event_work(struct work_struct *work); +static void leapioraid_firmware_event_work_delayed(struct work_struct *work); +static enum device_responsive_state +leapioraid_scsihost_inquiry_vpd_sn(struct LEAPIORAID_ADAPTER *ioc, u16 handle, + u8 **serial_number); +static enum device_responsive_state +leapioraid_scsihost_inquiry_vpd_supported_pages(struct LEAPIORAID_ADAPTER *ioc, + u16 handle, u32 lun, void *data, + u32 data_length); +static enum device_responsive_state leapioraid_scsihost_ata_pass_thru_idd( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle, + u8 *is_ssd_device, + u8 tr_timeout, + u8 tr_method); +static enum device_responsive_state +leapioraid_scsihost_wait_for_target_to_become_ready( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle, u8 retry_count, u8 is_pd, + u8 tr_timeout, u8 tr_method); +static enum device_responsive_state +leapioraid_scsihost_wait_for_device_to_become_ready( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle, u8 retry_count, u8 is_pd, + int lun, u8 tr_timeout, u8 tr_method); +static void leapioraid_scsihost_remove_device( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_device *sas_device); +static int leapioraid_scsihost_add_device( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, + u8 retry_count, u8 is_pd); +static u8 leapioraid_scsihost_check_for_pending_tm( + struct LEAPIORAID_ADAPTER *ioc, u16 smid); +static void leapioraid_scsihost_send_event_to_turn_on_pfa_led( + struct LEAPIORAID_ADAPTER *ioc, u16 handle); +static void leapioraid_scsihost_complete_devices_scanning( + struct LEAPIORAID_ADAPTER *ioc); + +LIST_HEAD(leapioraid_ioc_list); +DEFINE_SPINLOCK(leapioraid_gioc_lock); + +MODULE_AUTHOR(LEAPIORAID_AUTHOR); +MODULE_DESCRIPTION(LEAPIORAID_DESCRIPTION); +MODULE_LICENSE("GPL"); +MODULE_VERSION(LEAPIORAID_DRIVER_VERSION); + +static u8 scsi_io_cb_idx = -1; +static u8 tm_cb_idx = -1; +static u8 ctl_cb_idx = -1; +static u8 ctl_tm_cb_idx = -1; +static u8 base_cb_idx = -1; +static u8 port_enable_cb_idx = -1; +static u8 transport_cb_idx = -1; +static u8 scsih_cb_idx = -1; +static u8 config_cb_idx = -1; +static int leapioraid_ids; +static u8 tm_tr_cb_idx = -1; +static u8 tm_tr_volume_cb_idx = -1; +static u8 tm_tr_internal_cb_idx = -1; +static u8 tm_sas_control_cb_idx = -1; +static u32 logging_level; + +MODULE_PARM_DESC(logging_level, + " bits for enabling additional logging info (default=0)"); + +static int open_pcie_trace; +module_param(open_pcie_trace, int, 0444); +MODULE_PARM_DESC(open_pcie_trace, "open_pcie_trace: open=1/default=0(close)"); + +static int disable_discovery = -1; +module_param(disable_discovery, int, 0444); +MODULE_PARM_DESC(disable_discovery, "disable discovery"); + +static struct raid_template *leapioraid_raid_template; + +enum device_responsive_state { + DEVICE_READY, + DEVICE_RETRY, + DEVICE_RETRY_UA, + DEVICE_START_UNIT, + DEVICE_STOP_UNIT, + DEVICE_ERROR, +}; + +struct sense_info { + u8 skey; + u8 asc; + u8 ascq; +}; + +#define LEAPIORAID_TURN_ON_PFA_LED (0xFFFC) +#define LEAPIORAID_PORT_ENABLE_COMPLETE (0xFFFD) +#define LEAPIORAID_REMOVE_UNRESPONDING_DEVICES (0xFFFF) + +struct leapioraid_fw_event_work { + struct list_head list; + struct work_struct work; + u8 cancel_pending_work; + struct delayed_work delayed_work; + u8 delayed_work_active; + struct LEAPIORAID_ADAPTER *ioc; + u16 device_handle; + u8 VF_ID; + u8 VP_ID; + u8 ignore; + u16 event; + struct kref refcount; + void *event_data; + u8 *retries; +}; + +static void +leapioraid_fw_event_work_free(struct kref *r) +{ + struct leapioraid_fw_event_work *fw_work; + + fw_work = container_of( + r, struct leapioraid_fw_event_work, refcount); + kfree(fw_work->event_data); + kfree(fw_work->retries); + kfree(fw_work); +} + +static void +leapioraid_fw_event_work_get( + struct leapioraid_fw_event_work *fw_work) +{ + kref_get(&fw_work->refcount); +} + +static void +leapioraid_fw_event_work_put(struct leapioraid_fw_event_work *fw_work) +{ + kref_put(&fw_work->refcount, leapioraid_fw_event_work_free); +} + +static +struct leapioraid_fw_event_work *leapioraid_alloc_fw_event_work(int len) +{ + struct leapioraid_fw_event_work *fw_event; + + fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC); + if (!fw_event) + return NULL; + kref_init(&fw_event->refcount); + return fw_event; +} + +static int +leapioraid_scsihost_set_debug_level( + const char *val, const struct kernel_param *kp) +{ + int ret = param_set_int(val, kp); + struct LEAPIORAID_ADAPTER *ioc; + + if (ret) + return ret; + pr_info("setting logging_level(0x%08x)\n", logging_level); + spin_lock(&leapioraid_gioc_lock); + list_for_each_entry(ioc, &leapioraid_ioc_list, list) + ioc->logging_level = logging_level; + spin_unlock(&leapioraid_gioc_lock); + return 0; +} + +module_param_call(logging_level, + leapioraid_scsihost_set_debug_level, param_get_int, + &logging_level, 0644); + +static inline int +leapioraid_scsihost_srch_boot_sas_address(u64 sas_address, + struct LEAPIORAID_BOOT_DEVICE_SAS_WWID *boot_device) +{ + return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0; +} + +static inline int +leapioraid_scsihost_srch_boot_device_name(u64 device_name, + struct LEAPIORAID_BOOT_DEVICE_DEVICE_NAME *boot_device) +{ + return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0; +} + +static inline int +leapioraid_scsihost_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number, + struct LEAPIORAID_BOOT_DEVICE_ENCLOSURE_SLOT *boot_device) +{ + return (enclosure_logical_id == + le64_to_cpu(boot_device->EnclosureLogicalID) + && slot_number == le16_to_cpu(boot_device->SlotNumber)) ? 1 : 0; +} + +static void +leapioraid_scsihost_display_enclosure_chassis_info( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_device *sas_device, + struct scsi_device *sdev, + struct scsi_target *starget) +{ + if (sdev) { + if (sas_device->enclosure_handle != 0) + sdev_printk(KERN_INFO, sdev, + "enclosure logical id(0x%016llx), slot(%d)\n", + (unsigned long long)sas_device->enclosure_logical_id, + sas_device->slot); + if (sas_device->connector_name[0] != '\0') + sdev_printk(KERN_INFO, sdev, + "enclosure level(0x%04x), connector name( %s)\n", + sas_device->enclosure_level, + sas_device->connector_name); + if (sas_device->is_chassis_slot_valid) + sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n", + sas_device->chassis_slot); + } else if (starget) { + if (sas_device->enclosure_handle != 0) + starget_printk(KERN_INFO, starget, + "enclosure logical id(0x%016llx), slot(%d)\n", + (unsigned long long)sas_device->enclosure_logical_id, + sas_device->slot); + if (sas_device->connector_name[0] != '\0') + starget_printk(KERN_INFO, starget, + "enclosure level(0x%04x), connector name( %s)\n", + sas_device->enclosure_level, + sas_device->connector_name); + if (sas_device->is_chassis_slot_valid) + starget_printk(KERN_INFO, starget, + "chassis slot(0x%04x)\n", sas_device->chassis_slot); + } else { + if (sas_device->enclosure_handle != 0) + pr_info("%s enclosure logical id(0x%016llx), slot(%d)\n", + ioc->name, + (unsigned long long)sas_device->enclosure_logical_id, + sas_device->slot); + if (sas_device->connector_name[0] != '\0') + pr_info("%s enclosure level(0x%04x),connector name( %s)\n", + ioc->name, + sas_device->enclosure_level, + sas_device->connector_name); + if (sas_device->is_chassis_slot_valid) + pr_info("%s chassis slot(0x%04x)\n", + ioc->name, sas_device->chassis_slot); + } +} + +struct leapioraid_hba_port *leapioraid_get_port_by_id( + struct LEAPIORAID_ADAPTER *ioc, + u8 port_id, u8 skip_dirty_flag) +{ + struct leapioraid_hba_port *port, *port_next; + + if (!ioc->multipath_on_hba) + port_id = LEAPIORAID_MULTIPATH_DISABLED_PORT_ID; + list_for_each_entry_safe(port, port_next, &ioc->port_table_list, list) { + if (port->port_id != port_id) + continue; + if (port->flags & LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT) + continue; + return port; + } + if (skip_dirty_flag) { + port = port_next = NULL; + list_for_each_entry_safe(port, port_next, + &ioc->port_table_list, list) { + if (port->port_id != port_id) + continue; + return port; + } + } + if (unlikely(!ioc->multipath_on_hba)) { + port = kzalloc(sizeof(struct leapioraid_hba_port), GFP_ATOMIC); + if (!port) + return NULL; + + port->port_id = LEAPIORAID_MULTIPATH_DISABLED_PORT_ID; + pr_err( + "%s hba_port entry: %p, port: %d is added to hba_port list\n", + ioc->name, port, port->port_id); + list_add_tail(&port->list, &ioc->port_table_list); + return port; + } + return NULL; +} + +struct leapioraid_virtual_phy *leapioraid_get_vphy_by_phy( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_hba_port *port, u32 phy) +{ + struct leapioraid_virtual_phy *vphy, *vphy_next; + + if (!port->vphys_mask) + return NULL; + list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) { + if (vphy->phy_mask & (1 << phy)) + return vphy; + } + return NULL; +} + +static int +leapioraid_scsihost_is_boot_device(u64 sas_address, u64 device_name, + u64 enclosure_logical_id, u16 slot, u8 form, + union LEAPIORAID_BIOSPAGE2_BOOT_DEVICE *boot_device) +{ + int rc = 0; + + switch (form) { + case LEAPIORAID_BIOSPAGE2_FORM_SAS_WWID: + if (!sas_address) + break; + rc = leapioraid_scsihost_srch_boot_sas_address(sas_address, + &boot_device->SasWwid); + break; + case LEAPIORAID_BIOSPAGE2_FORM_ENCLOSURE_SLOT: + if (!enclosure_logical_id) + break; + rc = leapioraid_scsihost_srch_boot_encl_slot( + enclosure_logical_id, + slot, + &boot_device->EnclosureSlot); + break; + case LEAPIORAID_BIOSPAGE2_FORM_DEVICE_NAME: + if (!device_name) + break; + rc = leapioraid_scsihost_srch_boot_device_name(device_name, + &boot_device->DeviceName); + break; + case LEAPIORAID_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED: + break; + } + return rc; +} + +static int +leapioraid_scsihost_get_sas_address( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, + u64 *sas_address) +{ + struct LeapioraidSasDevP0_t sas_device_pg0; + struct LeapioraidCfgRep_t mpi_reply; + u32 ioc_status; + + *sas_address = 0; + if ((leapioraid_config_get_sas_device_pg0 + (ioc, &mpi_reply, &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -ENXIO; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status == LEAPIORAID_IOCSTATUS_SUCCESS) { + if ((handle <= ioc->sas_hba.num_phys) && + (!(le32_to_cpu(sas_device_pg0.DeviceInfo) & + LEAPIORAID_SAS_DEVICE_INFO_SEP))) + *sas_address = ioc->sas_hba.sas_address; + else + *sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + return 0; + } + if (ioc_status == LEAPIORAID_IOCSTATUS_CONFIG_INVALID_PAGE) + return -ENXIO; + pr_err("%s handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n", + ioc->name, handle, ioc_status, + __FILE__, __LINE__, __func__); + return -EIO; +} + +static void +leapioraid_scsihost_determine_boot_device( + struct LEAPIORAID_ADAPTER *ioc, void *device, + u32 channel) +{ + struct leapioraid_sas_device *sas_device; + struct leapioraid_raid_device *raid_device; + u64 sas_address; + u64 device_name; + u64 enclosure_logical_id; + u16 slot; + + if (!ioc->is_driver_loading) + return; + if (!ioc->bios_pg3.BiosVersion) + return; + if (channel == RAID_CHANNEL) { + raid_device = device; + sas_address = raid_device->wwid; + device_name = 0; + enclosure_logical_id = 0; + slot = 0; + } else { + sas_device = device; + sas_address = sas_device->sas_address; + device_name = sas_device->device_name; + enclosure_logical_id = sas_device->enclosure_logical_id; + slot = sas_device->slot; + } + if (!ioc->req_boot_device.device) { + if (leapioraid_scsihost_is_boot_device(sas_address, device_name, + enclosure_logical_id, slot, + (ioc->bios_pg2.ReqBootDeviceForm & + LEAPIORAID_BIOSPAGE2_FORM_MASK), + &ioc->bios_pg2.RequestedBootDevice)) { + dinitprintk(ioc, + pr_err( + "%s %s: req_boot_device(0x%016llx)\n", + ioc->name, __func__, + (unsigned long long)sas_address)); + ioc->req_boot_device.device = device; + ioc->req_boot_device.channel = channel; + } + } + if (!ioc->req_alt_boot_device.device) { + if (leapioraid_scsihost_is_boot_device(sas_address, device_name, + enclosure_logical_id, slot, + (ioc->bios_pg2.ReqAltBootDeviceForm & + LEAPIORAID_BIOSPAGE2_FORM_MASK), + &ioc->bios_pg2.RequestedAltBootDevice)) { + dinitprintk(ioc, + pr_err( + "%s %s: req_alt_boot_device(0x%016llx)\n", + ioc->name, __func__, + (unsigned long long)sas_address)); + ioc->req_alt_boot_device.device = device; + ioc->req_alt_boot_device.channel = channel; + } + } + if (!ioc->current_boot_device.device) { + if (leapioraid_scsihost_is_boot_device(sas_address, device_name, + enclosure_logical_id, slot, + (ioc->bios_pg2.CurrentBootDeviceForm & + LEAPIORAID_BIOSPAGE2_FORM_MASK), + &ioc->bios_pg2.CurrentBootDevice)) { + dinitprintk(ioc, + pr_err( + "%s %s: current_boot_device(0x%016llx)\n", + ioc->name, __func__, + (unsigned long long)sas_address)); + ioc->current_boot_device.device = device; + ioc->current_boot_device.channel = channel; + } + } +} + +static +struct leapioraid_sas_device *__leapioraid_get_sdev_from_target( + struct LEAPIORAID_ADAPTER *ioc, + struct LEAPIORAID_TARGET *tgt_priv) +{ + struct leapioraid_sas_device *ret; + + assert_spin_locked(&ioc->sas_device_lock); + ret = tgt_priv->sas_dev; + if (ret) + leapioraid_sas_device_get(ret); + return ret; +} + +static +struct leapioraid_sas_device *leapioraid_get_sdev_from_target( + struct LEAPIORAID_ADAPTER *ioc, + struct LEAPIORAID_TARGET *tgt_priv) +{ + struct leapioraid_sas_device *ret; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + ret = __leapioraid_get_sdev_from_target(ioc, tgt_priv); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return ret; +} + +static +struct leapioraid_sas_device *__leapioraid_get_sdev_by_addr( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, struct leapioraid_hba_port *port) +{ + struct leapioraid_sas_device *sas_device; + + if (!port) + return NULL; + assert_spin_locked(&ioc->sas_device_lock); + list_for_each_entry(sas_device, &ioc->sas_device_list, list) + if (sas_device->sas_address == sas_address && + sas_device->port == port) + goto found_device; + list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) + if (sas_device->sas_address == sas_address && + sas_device->port == port) + goto found_device; + return NULL; +found_device: + leapioraid_sas_device_get(sas_device); + return sas_device; +} + +struct leapioraid_sas_device *__leapioraid_get_sdev_by_addr_and_rphy( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, + struct sas_rphy *rphy) +{ + struct leapioraid_sas_device *sas_device; + + assert_spin_locked(&ioc->sas_device_lock); + list_for_each_entry(sas_device, &ioc->sas_device_list, list) + if (sas_device->sas_address == sas_address && + (sas_device->rphy == rphy)) + goto found_device; + list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) + if (sas_device->sas_address == sas_address && + (sas_device->rphy == rphy)) + goto found_device; + return NULL; +found_device: + leapioraid_sas_device_get(sas_device); + return sas_device; +} + +struct leapioraid_sas_device *leapioraid_get_sdev_by_addr( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, + struct leapioraid_hba_port *port) +{ + struct leapioraid_sas_device *sas_device = NULL; + unsigned long flags; + + if (!port) + return sas_device; + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_addr(ioc, sas_address, port); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return sas_device; +} + +static struct leapioraid_sas_device *__leapioraid_get_sdev_by_handle( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct leapioraid_sas_device *sas_device; + + assert_spin_locked(&ioc->sas_device_lock); + list_for_each_entry(sas_device, &ioc->sas_device_list, list) + if (sas_device->handle == handle) + goto found_device; + list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) + if (sas_device->handle == handle) + goto found_device; + return NULL; +found_device: + leapioraid_sas_device_get(sas_device); + return sas_device; +} + +struct leapioraid_sas_device *leapioraid_get_sdev_by_handle( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct leapioraid_sas_device *sas_device; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_handle(ioc, handle); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return sas_device; +} + +void +leapioraid_scsihost_sas_device_remove(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_device *sas_device) +{ + unsigned long flags; + int was_on_sas_device_list = 0; + + if (!sas_device) + return; + pr_info("%s %s: removing handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, __func__, sas_device->handle, + (unsigned long long)sas_device->sas_address); + leapioraid_scsihost_display_enclosure_chassis_info( + ioc, sas_device, NULL, NULL); + spin_lock_irqsave(&ioc->sas_device_lock, flags); + if (!list_empty(&sas_device->list)) { + list_del_init(&sas_device->list); + was_on_sas_device_list = 1; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (was_on_sas_device_list) { + kfree(sas_device->serial_number); + leapioraid_sas_device_put(sas_device); + } +} + +static void +leapioraid_scsihost_device_remove_by_handle( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct leapioraid_sas_device *sas_device; + unsigned long flags; + int was_on_sas_device_list = 0; + + if (ioc->shost_recovery) + return; + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_handle(ioc, handle); + if (sas_device) { + if (!list_empty(&sas_device->list)) { + list_del_init(&sas_device->list); + was_on_sas_device_list = 1; + leapioraid_sas_device_put(sas_device); + } + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (was_on_sas_device_list) { + leapioraid_scsihost_remove_device(ioc, sas_device); + leapioraid_sas_device_put(sas_device); + } +} + +void +leapioraid_device_remove_by_sas_address( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, struct leapioraid_hba_port *port) +{ + struct leapioraid_sas_device *sas_device; + unsigned long flags; + int was_on_sas_device_list = 0; + + if (ioc->shost_recovery) + return; + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_addr(ioc, sas_address, port); + if (sas_device) { + if (!list_empty(&sas_device->list)) { + list_del_init(&sas_device->list); + was_on_sas_device_list = 1; + leapioraid_sas_device_put(sas_device); + } + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (was_on_sas_device_list) { + leapioraid_scsihost_remove_device(ioc, sas_device); + leapioraid_sas_device_put(sas_device); + } +} + +static void +leapioraid_scsihost_sas_device_add( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_device *sas_device) +{ + unsigned long flags; + + dewtprintk(ioc, pr_info("%s %s: handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, + __func__, sas_device->handle, + (unsigned long long)sas_device->sas_address)); + dewtprintk(ioc, + leapioraid_scsihost_display_enclosure_chassis_info(ioc, sas_device, + NULL, NULL)); + spin_lock_irqsave(&ioc->sas_device_lock, flags); + leapioraid_sas_device_get(sas_device); + list_add_tail(&sas_device->list, &ioc->sas_device_list); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (ioc->hide_drives) { + clear_bit(sas_device->handle, ioc->pend_os_device_add); + return; + } + if (!leapioraid_transport_port_add(ioc, sas_device->handle, + sas_device->sas_address_parent, + sas_device->port)) { + leapioraid_scsihost_sas_device_remove(ioc, sas_device); + } else if (!sas_device->starget) { + if (!ioc->is_driver_loading) { + leapioraid_transport_port_remove(ioc, + sas_device->sas_address, + sas_device->sas_address_parent, + sas_device->port); + leapioraid_scsihost_sas_device_remove(ioc, sas_device); + } + } else + clear_bit(sas_device->handle, ioc->pend_os_device_add); +} + +static void +leapioraid_scsihost_sas_device_init_add( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_device *sas_device) +{ + unsigned long flags; + + dewtprintk(ioc, pr_info("%s %s: handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, + __func__, sas_device->handle, + (unsigned long long)sas_device->sas_address)); + dewtprintk(ioc, + leapioraid_scsihost_display_enclosure_chassis_info(ioc, sas_device, + NULL, NULL)); + spin_lock_irqsave(&ioc->sas_device_lock, flags); + leapioraid_sas_device_get(sas_device); + list_add_tail(&sas_device->list, &ioc->sas_device_init_list); + leapioraid_scsihost_determine_boot_device(ioc, sas_device, 0); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +} + +static +struct leapioraid_raid_device *leapioraid_scsihost_raid_device_find_by_id( + struct LEAPIORAID_ADAPTER *ioc, int id, int channel) +{ + struct leapioraid_raid_device *raid_device, *r; + + r = NULL; + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (raid_device->id == id && raid_device->channel == channel) { + r = raid_device; + goto out; + } + } +out: + return r; +} + +struct leapioraid_raid_device *leapioraid_raid_device_find_by_handle( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct leapioraid_raid_device *raid_device, *r; + + r = NULL; + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (raid_device->handle != handle) + continue; + r = raid_device; + goto out; + } +out: + return r; +} + +static +struct leapioraid_raid_device *leapioraid_scsihost_raid_device_find_by_wwid( + struct LEAPIORAID_ADAPTER *ioc, u64 wwid) +{ + struct leapioraid_raid_device *raid_device, *r; + + r = NULL; + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (raid_device->wwid != wwid) + continue; + r = raid_device; + goto out; + } +out: + return r; +} + +static void +leapioraid_scsihost_raid_device_add(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_device *raid_device) +{ + unsigned long flags; + u8 protection_mask; + + dewtprintk(ioc, pr_info("%s %s: handle(0x%04x), wwid(0x%016llx)\n", + ioc->name, + __func__, raid_device->handle, + (unsigned long long)raid_device->wwid)); + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_add_tail(&raid_device->list, &ioc->raid_device_list); + if (!ioc->disable_eedp_support) { + protection_mask = scsi_host_get_prot(ioc->shost); + if (protection_mask & SHOST_DIX_TYPE0_PROTECTION) { + scsi_host_set_prot(ioc->shost, protection_mask & 0x77); + pr_err( + "%s: Disabling DIX0 because of unsupport!\n", + ioc->name); + } + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); +} + +static void +leapioraid_scsihost_raid_device_remove(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_device *raid_device) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_del(&raid_device->list); + kfree(raid_device); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); +} + +struct leapioraid_raid_sas_node *leapioraid_scsihost_expander_find_by_handle( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct leapioraid_raid_sas_node *sas_expander, *r; + + r = NULL; + list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { + if (sas_expander->handle != handle) + continue; + r = sas_expander; + goto out; + } +out: + return r; +} + +static +struct leapioraid_enclosure_node *leapioraid_scsihost_enclosure_find_by_handle( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle) +{ + struct leapioraid_enclosure_node *enclosure_dev, *r; + + r = NULL; + list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) { + if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle) + continue; + r = enclosure_dev; + goto out; + } +out: + return r; +} + +struct leapioraid_raid_sas_node *leapioraid_scsihost_expander_find_by_sas_address( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, + struct leapioraid_hba_port *port) +{ + struct leapioraid_raid_sas_node *sas_expander, *r; + + r = NULL; + if (!port) + return r; + list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { + if (sas_expander->sas_address != sas_address || + sas_expander->port != port) + continue; + r = sas_expander; + goto out; + } +out: + return r; +} + +static void +leapioraid_scsihost_expander_node_add(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_sas_node *sas_expander) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + list_add_tail(&sas_expander->list, &ioc->sas_expander_list); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); +} + +static int +leapioraid_scsihost_is_sas_end_device(u32 device_info) +{ + if (device_info & LEAPIORAID_SAS_DEVICE_INFO_END_DEVICE && + ((device_info & LEAPIORAID_SAS_DEVICE_INFO_SSP_TARGET) | + (device_info & LEAPIORAID_SAS_DEVICE_INFO_STP_TARGET) | + (device_info & LEAPIORAID_SAS_DEVICE_INFO_SATA_DEVICE))) + return 1; + else + return 0; +} + +static u8 +leapioraid_scsihost_scsi_lookup_find_by_target( + struct LEAPIORAID_ADAPTER *ioc, int id, + int channel) +{ + int smid; + struct scsi_cmnd *scmd; + + for (smid = 1; smid <= ioc->shost->can_queue; smid++) { + scmd = leapioraid_scsihost_scsi_lookup_get(ioc, smid); + if (!scmd) + continue; + if (scmd->device->id == id && scmd->device->channel == channel) + return 1; + } + return 0; +} + +static u8 +leapioraid_scsihost_scsi_lookup_find_by_lun( + struct LEAPIORAID_ADAPTER *ioc, int id, + unsigned int lun, int channel) +{ + int smid; + struct scsi_cmnd *scmd; + + for (smid = 1; smid <= ioc->shost->can_queue; smid++) { + scmd = leapioraid_scsihost_scsi_lookup_get(ioc, smid); + if (!scmd) + continue; + if (scmd->device->id == id && + scmd->device->channel == channel && + scmd->device->lun == lun) + return 1; + } + return 0; +} + +struct scsi_cmnd *leapioraid_scsihost_scsi_lookup_get( + struct LEAPIORAID_ADAPTER *ioc, u16 smid) +{ + struct scsi_cmnd *scmd = NULL; + struct leapioraid_scsiio_tracker *st; + struct LeapioraidSCSIIOReq_t *mpi_request; + u32 unique_tag = smid - 1; + + if (smid > 0 && smid <= ioc->shost->can_queue) { + unique_tag = + ioc->io_queue_num[smid - + 1] << BLK_MQ_UNIQUE_TAG_BITS | (smid - 1); + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + if (!mpi_request->DevHandle) + return scmd; + scmd = scsi_host_find_tag(ioc->shost, unique_tag); + if (scmd) { + st = leapioraid_base_scsi_cmd_priv(scmd); + if ((!st) || (st->cb_idx == 0xFF) || (st->smid == 0)) + scmd = NULL; + } + } + return scmd; +} + +static void +leapioraid_scsihost_display_sdev_qd(struct scsi_device *sdev) +{ + if (sdev->inquiry_len <= 7) + return; + sdev_printk(KERN_INFO, sdev, + "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n", + sdev->queue_depth, sdev->tagged_supported, + sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1)); +} + +static int +leapioraid_scsihost_change_queue_depth( + struct scsi_device *sdev, int qdepth) +{ + struct Scsi_Host *shost = sdev->host; + int max_depth; + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct leapioraid_sas_device *sas_device; + unsigned long flags; + + max_depth = shost->can_queue; + + goto not_sata; + + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + goto not_sata; + sas_target_priv_data = sas_device_priv_data->sas_target; + if (!sas_target_priv_data) + goto not_sata; + if ((sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_VOLUME)) + goto not_sata; + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = + __leapioraid_get_sdev_from_target(ioc, sas_target_priv_data); + if (sas_device) { + if (sas_device->device_info & LEAPIORAID_SAS_DEVICE_INFO_SATA_DEVICE) + max_depth = LEAPIORAID_SATA_QUEUE_DEPTH; + leapioraid_sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +not_sata: + if (!sdev->tagged_supported) + max_depth = 1; + if (qdepth > max_depth) + qdepth = max_depth; + scsi_change_queue_depth(sdev, qdepth); + leapioraid_scsihost_display_sdev_qd(sdev); + return sdev->queue_depth; +} + +void +leapioraid__scsihost_change_queue_depth( + struct scsi_device *sdev, int qdepth) +{ + struct Scsi_Host *shost = sdev->host; + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + + if (ioc->enable_sdev_max_qd) + qdepth = shost->can_queue; + leapioraid_scsihost_change_queue_depth(sdev, qdepth); +} + +static int +leapioraid_scsihost_target_alloc(struct scsi_target *starget) +{ + struct Scsi_Host *shost = dev_to_shost(&starget->dev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct leapioraid_sas_device *sas_device; + struct leapioraid_raid_device *raid_device; + unsigned long flags; + struct sas_rphy *rphy; + + sas_target_priv_data = + kzalloc(sizeof(struct LEAPIORAID_TARGET), GFP_KERNEL); + if (!sas_target_priv_data) + return -ENOMEM; + starget->hostdata = sas_target_priv_data; + sas_target_priv_data->starget = starget; + sas_target_priv_data->handle = LEAPIORAID_INVALID_DEVICE_HANDLE; + if (starget->channel == RAID_CHANNEL) { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = leapioraid_scsihost_raid_device_find_by_id( + ioc, starget->id, starget->channel); + if (raid_device) { + sas_target_priv_data->handle = raid_device->handle; + sas_target_priv_data->sas_address = raid_device->wwid; + sas_target_priv_data->flags |= + LEAPIORAID_TARGET_FLAGS_VOLUME; + raid_device->starget = starget; + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + return 0; + } + spin_lock_irqsave(&ioc->sas_device_lock, flags); + rphy = dev_to_rphy(starget->dev.parent); + sas_device = __leapioraid_get_sdev_by_addr_and_rphy(ioc, + rphy->identify.sas_address, rphy); + if (sas_device) { + sas_target_priv_data->handle = sas_device->handle; + sas_target_priv_data->sas_address = sas_device->sas_address; + sas_target_priv_data->port = sas_device->port; + sas_target_priv_data->sas_dev = sas_device; + sas_device->starget = starget; + sas_device->id = starget->id; + sas_device->channel = starget->channel; + if (test_bit(sas_device->handle, ioc->pd_handles)) + sas_target_priv_data->flags |= + LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT; + if (sas_device->fast_path) + sas_target_priv_data->flags |= + LEAPIORAID_TARGET_FASTPATH_IO; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return 0; +} + +static void +leapioraid_scsihost_target_destroy(struct scsi_target *starget) +{ + struct Scsi_Host *shost = dev_to_shost(&starget->dev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct leapioraid_sas_device *sas_device; + struct leapioraid_raid_device *raid_device; + unsigned long flags; + + sas_target_priv_data = starget->hostdata; + if (!sas_target_priv_data) + return; + if (starget->channel == RAID_CHANNEL) { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = leapioraid_scsihost_raid_device_find_by_id( + ioc, starget->id, starget->channel); + if (raid_device) { + raid_device->starget = NULL; + raid_device->sdev = NULL; + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + goto out; + } + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = + __leapioraid_get_sdev_from_target(ioc, sas_target_priv_data); + if (sas_device && (sas_device->starget == starget) + && (sas_device->id == starget->id) + && (sas_device->channel == starget->channel)) + sas_device->starget = NULL; + if (sas_device) { + sas_target_priv_data->sas_dev = NULL; + leapioraid_sas_device_put(sas_device); + leapioraid_sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +out: + kfree(sas_target_priv_data); + starget->hostdata = NULL; +} + +static int +leapioraid_scsihost_slave_alloc(struct scsi_device *sdev) +{ + struct Scsi_Host *shost; + struct LEAPIORAID_ADAPTER *ioc; + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct scsi_target *starget; + struct leapioraid_raid_device *raid_device; + struct leapioraid_sas_device *sas_device; + unsigned long flags; + + sas_device_priv_data = + kzalloc(sizeof(*sas_device_priv_data), GFP_KERNEL); + if (!sas_device_priv_data) + return -ENOMEM; + sas_device_priv_data->lun = sdev->lun; + sas_device_priv_data->flags = LEAPIORAID_DEVICE_FLAGS_INIT; + starget = scsi_target(sdev); + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->num_luns++; + sas_device_priv_data->sas_target = sas_target_priv_data; + sdev->hostdata = sas_device_priv_data; + if ((sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT)) + sdev->no_uld_attach = 1; + shost = dev_to_shost(&starget->dev); + ioc = leapioraid_shost_private(shost); + if (starget->channel == RAID_CHANNEL) { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = leapioraid_scsihost_raid_device_find_by_id(ioc, + starget->id, + starget->channel); + if (raid_device) + raid_device->sdev = sdev; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + } + if (!(sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_VOLUME)) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_addr(ioc, + sas_target_priv_data->sas_address, + sas_target_priv_data->port); + if (sas_device && (sas_device->starget == NULL)) { + sdev_printk(KERN_INFO, sdev, + "%s : sas_device->starget set to starget @ %d\n", + __func__, __LINE__); + sas_device->starget = starget; + } + if (sas_device) + leapioraid_sas_device_put(sas_device); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } + return 0; +} + +static void +leapioraid_scsihost_slave_destroy(struct scsi_device *sdev) +{ + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct scsi_target *starget; + struct Scsi_Host *shost; + struct LEAPIORAID_ADAPTER *ioc; + struct leapioraid_sas_device *sas_device; + unsigned long flags; + + if (!sdev->hostdata) + return; + starget = scsi_target(sdev); + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->num_luns--; + shost = dev_to_shost(&starget->dev); + ioc = leapioraid_shost_private(shost); + if (!(sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_VOLUME)) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_from_target(ioc, + sas_target_priv_data); + if (sas_device && !sas_target_priv_data->num_luns) + sas_device->starget = NULL; + if (sas_device) + leapioraid_sas_device_put(sas_device); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } + kfree(sdev->hostdata); + sdev->hostdata = NULL; +} + +static void +leapioraid_scsihost_display_sata_capabilities( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle, struct scsi_device *sdev) +{ + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasDevP0_t sas_device_pg0; + u32 ioc_status; + u16 flags; + u32 device_info; + + if ((leapioraid_config_get_sas_device_pg0 + (ioc, &mpi_reply, &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + flags = le16_to_cpu(sas_device_pg0.Flags); + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + sdev_printk(KERN_INFO, sdev, + "atapi(%s), ncq(%s), asyn_notify(%s),\n\t\t" + "smart(%s), fua(%s), sw_preserve(%s)\n", + (device_info & LEAPIORAID_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : + "n", + (flags & LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" + : "n", + (flags & LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) + ? "y" : "n", + (flags & LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? + "y" : "n", + (flags & LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" + : "n", + (flags & LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : + "n"); +} + +static int +leapioraid_scsihost_is_raid(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + + return (sdev->channel == RAID_CHANNEL) ? 1 : 0; +} + +static void +leapioraid_scsihost_get_resync(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(sdev->host); + static struct leapioraid_raid_device *raid_device; + unsigned long flags; + struct LeapioraidRaidVolP0_t vol_pg0; + struct LeapioraidCfgRep_t mpi_reply; + u32 volume_status_flags; + u8 percent_complete; + u16 handle; + + percent_complete = 0; + handle = 0; + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = leapioraid_scsihost_raid_device_find_by_id( + ioc, sdev->id, sdev->channel); + if (raid_device) { + handle = raid_device->handle; + percent_complete = raid_device->percent_complete; + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (!handle) + goto out; + if (leapioraid_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, + LEAPIORAID_RAID_VOLUME_PGAD_FORM_HANDLE, + handle, + sizeof + (struct LeapioraidRaidVolP0_t))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + percent_complete = 0; + goto out; + } + volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags); + if (!(volume_status_flags & + LEAPIORAID_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS)) + percent_complete = 0; +out: + raid_set_resync(leapioraid_raid_template, dev, percent_complete); +} + +static void +leapioraid_scsihost_get_state(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(sdev->host); + static struct leapioraid_raid_device *raid_device; + unsigned long flags; + struct LeapioraidRaidVolP0_t vol_pg0; + struct LeapioraidCfgRep_t mpi_reply; + u32 volstate; + enum raid_state state = RAID_STATE_UNKNOWN; + u16 handle = 0; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = leapioraid_scsihost_raid_device_find_by_id( + ioc, sdev->id, sdev->channel); + if (raid_device) + handle = raid_device->handle; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (!raid_device) + goto out; + if (leapioraid_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, + LEAPIORAID_RAID_VOLUME_PGAD_FORM_HANDLE, + handle, + sizeof + (struct LeapioraidRaidVolP0_t))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags); + if (volstate & LEAPIORAID_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) { + state = RAID_STATE_RESYNCING; + goto out; + } + switch (vol_pg0.VolumeState) { + case LEAPIORAID_RAID_VOL_STATE_OPTIMAL: + case LEAPIORAID_RAID_VOL_STATE_ONLINE: + state = RAID_STATE_ACTIVE; + break; + case LEAPIORAID_RAID_VOL_STATE_DEGRADED: + state = RAID_STATE_DEGRADED; + break; + case LEAPIORAID_RAID_VOL_STATE_FAILED: + case LEAPIORAID_RAID_VOL_STATE_MISSING: + state = RAID_STATE_OFFLINE; + break; + } +out: + raid_set_state(leapioraid_raid_template, dev, state); +} + +static void +leapioraid_scsihost_set_level(struct LEAPIORAID_ADAPTER *ioc, + struct scsi_device *sdev, u8 volume_type) +{ + enum raid_level level = RAID_LEVEL_UNKNOWN; + + switch (volume_type) { + case LEAPIORAID_RAID_VOL_TYPE_RAID0: + level = RAID_LEVEL_0; + break; + case LEAPIORAID_RAID_VOL_TYPE_RAID10: + case LEAPIORAID_RAID_VOL_TYPE_RAID1E: + level = RAID_LEVEL_10; + break; + case LEAPIORAID_RAID_VOL_TYPE_RAID1: + level = RAID_LEVEL_1; + break; + } + raid_set_level(leapioraid_raid_template, &sdev->sdev_gendev, level); +} + +static int +leapioraid_scsihost_get_volume_capabilities( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_device *raid_device) +{ + struct LeapioraidRaidVolP0_t *vol_pg0; + struct LeapioraidRaidPDP0_t pd_pg0; + struct LeapioraidSasDevP0_t sas_device_pg0; + struct LeapioraidCfgRep_t mpi_reply; + u16 sz; + u8 num_pds; + + if ((leapioraid_config_get_number_pds(ioc, raid_device->handle, + &num_pds)) || !num_pds) { + dfailprintk(ioc, pr_warn( + "%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__)); + return 1; + } + raid_device->num_pds = num_pds; + sz = offsetof(struct LeapioraidRaidVolP0_t, PhysDisk) + (num_pds * + sizeof + (struct LEAPIORAID_RAIDVOL0_PHYS_DISK)); + vol_pg0 = kzalloc(sz, GFP_KERNEL); + if (!vol_pg0) { + dfailprintk(ioc, pr_warn( + "%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__)); + return 1; + } + if ((leapioraid_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0, + LEAPIORAID_RAID_VOLUME_PGAD_FORM_HANDLE, + raid_device->handle, sz))) { + dfailprintk(ioc, + pr_warn( + "%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__)); + kfree(vol_pg0); + return 1; + } + raid_device->volume_type = vol_pg0->VolumeType; + if (!(leapioraid_config_get_phys_disk_pg0(ioc, &mpi_reply, + &pd_pg0, + LEAPIORAID_PHYSDISK_PGAD_FORM_PHYSDISKNUM, + vol_pg0->PhysDisk[0].PhysDiskNum))) { + if (! + (leapioraid_config_get_sas_device_pg0 + (ioc, &mpi_reply, &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, + le16_to_cpu(pd_pg0.DevHandle)))) { + raid_device->device_info = + le32_to_cpu(sas_device_pg0.DeviceInfo); + } + } + kfree(vol_pg0); + return 0; +} + +static void +leapioraid_scsihost_enable_tlr( + struct LEAPIORAID_ADAPTER *ioc, struct scsi_device *sdev) +{ + u8 data[30]; + u8 page_len, ii; + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct leapioraid_sas_device *sas_device; + + if (sdev->type != TYPE_TAPE) + return; + if (!(ioc->facts.IOCCapabilities & LEAPIORAID_IOCFACTS_CAPABILITY_TLR)) + return; + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + return; + sas_target_priv_data = sas_device_priv_data->sas_target; + if (!sas_target_priv_data) + return; + if (leapioraid_scsihost_inquiry_vpd_supported_pages(ioc, + sas_target_priv_data->handle, + sdev->lun, data, + sizeof(data)) != + DEVICE_READY) { + sas_device = + leapioraid_get_sdev_by_addr(ioc, + sas_target_priv_data->sas_address, + sas_target_priv_data->port); + if (sas_device) { + sdev_printk(KERN_INFO, sdev, + "%s: DEVICE NOT READY: handle(0x%04x),\n\t\t" + "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n", + __func__, + sas_device->handle, + (unsigned long long)sas_device->sas_address, + sas_device->phy, + (unsigned long long)sas_device->device_name); + leapioraid_scsihost_display_enclosure_chassis_info(NULL, + sas_device, + sdev, NULL); + leapioraid_sas_device_put(sas_device); + } + return; + } + page_len = data[3]; + for (ii = 4; ii < page_len + 4; ii++) { + if (data[ii] == 0x90) { + sas_device_priv_data->flags |= LEAPIORAID_DEVICE_TLR_ON; + return; + } + } +} + +static void +leapioraid_scsihost_enable_ssu_on_sata( + struct leapioraid_sas_device *sas_device, + struct scsi_device *sdev) +{ + if (!(sas_device->device_info & LEAPIORAID_SAS_DEVICE_INFO_SATA_DEVICE)) + return; + if (sas_device->ssd_device) { + sdev->manage_system_start_stop = 1; + sdev->manage_runtime_start_stop = 1; + } +} + +static int +leapioraid_scsihost_slave_configure(struct scsi_device *sdev) +{ + struct Scsi_Host *shost = sdev->host; + struct LEAPIORAID_ADAPTER *ioc = leapioraid_shost_private(shost); + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct leapioraid_sas_device *sas_device; + struct leapioraid_raid_device *raid_device; + unsigned long flags; + int qdepth; + u8 ssp_target = 0; + char *ds = ""; + char *r_level = ""; + u16 handle, volume_handle = 0; + u64 volume_wwid = 0; + u8 *serial_number = NULL; + enum device_responsive_state retval; + u8 count = 0; + + qdepth = 1; + sas_device_priv_data = sdev->hostdata; + sas_device_priv_data->configured_lun = 1; + sas_device_priv_data->flags &= ~LEAPIORAID_DEVICE_FLAGS_INIT; + sas_target_priv_data = sas_device_priv_data->sas_target; + handle = sas_target_priv_data->handle; + if (sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_VOLUME) { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = + leapioraid_raid_device_find_by_handle(ioc, handle); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (!raid_device) { + dfailprintk(ioc, pr_warn( + "%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, + __func__)); + return 1; + } + if (leapioraid_scsihost_get_volume_capabilities(ioc, raid_device)) { + dfailprintk(ioc, pr_warn( + "%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, + __func__)); + return 1; + } + if (raid_device->device_info & + LEAPIORAID_SAS_DEVICE_INFO_SSP_TARGET) { + qdepth = LEAPIORAID_SAS_QUEUE_DEPTH; + ds = "SSP"; + } else { + qdepth = LEAPIORAID_SATA_QUEUE_DEPTH; + if (raid_device->device_info & + LEAPIORAID_SAS_DEVICE_INFO_SATA_DEVICE) + ds = "SATA"; + else + ds = "STP"; + } + switch (raid_device->volume_type) { + case LEAPIORAID_RAID_VOL_TYPE_RAID0: + r_level = "RAID0"; + break; + case LEAPIORAID_RAID_VOL_TYPE_RAID1E: + qdepth = LEAPIORAID_RAID_QUEUE_DEPTH; + if (ioc->manu_pg10.OEMIdentifier && + (le32_to_cpu(ioc->manu_pg10.GenericFlags0) & + 0x00000004) && + !(raid_device->num_pds % 2)) + r_level = "RAID10"; + else + r_level = "RAID1E"; + break; + case LEAPIORAID_RAID_VOL_TYPE_RAID1: + qdepth = LEAPIORAID_RAID_QUEUE_DEPTH; + r_level = "RAID1"; + break; + case LEAPIORAID_RAID_VOL_TYPE_RAID10: + qdepth = LEAPIORAID_RAID_QUEUE_DEPTH; + r_level = "RAID10"; + break; + case LEAPIORAID_RAID_VOL_TYPE_UNKNOWN: + default: + qdepth = LEAPIORAID_RAID_QUEUE_DEPTH; + r_level = "RAIDX"; + break; + } + if (!ioc->warpdrive_msg) + sdev_printk( + KERN_INFO, sdev, + "%s: handle(0x%04x), wwid(0x%016llx), pd_count(%d), type(%s)\n", + r_level, raid_device->handle, + (unsigned long long)raid_device->wwid, + raid_device->num_pds, ds); + if (shost->max_sectors > LEAPIORAID_RAID_MAX_SECTORS) { + blk_queue_max_hw_sectors(sdev->request_queue, + LEAPIORAID_RAID_MAX_SECTORS); + sdev_printk(KERN_INFO, sdev, + "Set queue's max_sector to: %u\n", + LEAPIORAID_RAID_MAX_SECTORS); + } + leapioraid__scsihost_change_queue_depth(sdev, qdepth); + leapioraid_scsihost_set_level(ioc, sdev, raid_device->volume_type); + return 0; + } + if (sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT) { + if (leapioraid_config_get_volume_handle(ioc, handle, + &volume_handle)) { + dfailprintk(ioc, pr_warn( + "%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, + __func__)); + return 1; + } + if (volume_handle && leapioraid_config_get_volume_wwid(ioc, + volume_handle, + &volume_wwid)) { + dfailprintk(ioc, + pr_warn( + "%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, + __func__)); + return 1; + } + } + leapioraid_scsihost_inquiry_vpd_sn(ioc, handle, &serial_number); + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_addr(ioc, + sas_device_priv_data->sas_target->sas_address, + sas_device_priv_data->sas_target->port); + if (!sas_device) { + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + dfailprintk(ioc, pr_warn( + "%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__)); + kfree(serial_number); + return 1; + } + sas_device->volume_handle = volume_handle; + sas_device->volume_wwid = volume_wwid; + sas_device->serial_number = serial_number; + if (sas_device->device_info & LEAPIORAID_SAS_DEVICE_INFO_SSP_TARGET) { + qdepth = (sas_device->port_type > 1) ? + ioc->max_wideport_qd : ioc->max_narrowport_qd; + ssp_target = 1; + if (sas_device->device_info & LEAPIORAID_SAS_DEVICE_INFO_SEP) { + sdev_printk(KERN_WARNING, sdev, + "set ignore_delay_remove for handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle); + sas_device_priv_data->ignore_delay_remove = 1; + ds = "SES"; + } else + ds = "SSP"; + } else { + qdepth = ioc->max_sata_qd; + if (sas_device->device_info & LEAPIORAID_SAS_DEVICE_INFO_STP_TARGET) + ds = "STP"; + else if (sas_device->device_info & + LEAPIORAID_SAS_DEVICE_INFO_SATA_DEVICE) + ds = "SATA"; + } + sdev_printk( + KERN_INFO, sdev, + "%s: handle(0x%04x), sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n", + ds, handle, (unsigned long long)sas_device->sas_address, + sas_device->phy, + (unsigned long long)sas_device->device_name); + leapioraid_scsihost_display_enclosure_chassis_info( + NULL, sas_device, sdev, NULL); + leapioraid_sas_device_put(sas_device); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (!ssp_target) { + leapioraid_scsihost_display_sata_capabilities(ioc, handle, sdev); + do { + retval = leapioraid_scsihost_ata_pass_thru_idd(ioc, handle, + &sas_device->ssd_device, 30, 0); + } while ((retval == DEVICE_RETRY || retval == DEVICE_RETRY_UA) + && count++ < 3); + } + leapioraid_scsihost_enable_ssu_on_sata(sas_device, sdev); + if (serial_number) + sdev_printk(KERN_INFO, sdev, "serial_number(%s)\n", + serial_number); + leapioraid__scsihost_change_queue_depth(sdev, qdepth); + if (ssp_target) { + sas_read_port_mode_page(sdev); + leapioraid_scsihost_enable_tlr(ioc, sdev); + } + + return 0; +} + +static int +leapioraid_scsihost_bios_param( + struct scsi_device *sdev, struct block_device *bdev, + sector_t capacity, int params[]) +{ + int heads; + int sectors; + sector_t cylinders; + ulong dummy; + + heads = 64; + sectors = 32; + dummy = heads * sectors; + cylinders = capacity; + sector_div(cylinders, dummy); + if ((ulong) capacity >= 0x200000) { + heads = 255; + sectors = 63; + dummy = heads * sectors; + cylinders = capacity; + sector_div(cylinders, dummy); + } + params[0] = heads; + params[1] = sectors; + params[2] = cylinders; + return 0; +} + +static void +leapioraid_scsihost_response_code( + struct LEAPIORAID_ADAPTER *ioc, u8 response_code) +{ + char *desc; + + switch (response_code) { + case LEAPIORAID_SCSITASKMGMT_RSP_TM_COMPLETE: + desc = "task management request completed"; + break; + case LEAPIORAID_SCSITASKMGMT_RSP_INVALID_FRAME: + desc = "invalid frame"; + break; + case LEAPIORAID_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: + desc = "task management request not supported"; + break; + case LEAPIORAID_SCSITASKMGMT_RSP_TM_FAILED: + desc = "task management request failed"; + break; + case LEAPIORAID_SCSITASKMGMT_RSP_TM_SUCCEEDED: + desc = "task management request succeeded"; + break; + case LEAPIORAID_SCSITASKMGMT_RSP_TM_INVALID_LUN: + desc = "invalid lun"; + break; + case 0xA: + desc = "overlapped tag attempted"; + break; + case LEAPIORAID_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: + desc = "task queued, however not sent to target"; + break; + default: + desc = "unknown"; + break; + } + pr_warn("%s response_code(0x%01x): %s\n", + ioc->name, response_code, desc); +} + +static u8 +leapioraid_scsihost_tm_done( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + struct LeapioraidDefaultRep_t *mpi_reply; + + if (ioc->tm_cmds.status == LEAPIORAID_CMD_NOT_USED) + return 1; + if (ioc->tm_cmds.smid != smid) + return 1; + ioc->tm_cmds.status |= LEAPIORAID_CMD_COMPLETE; + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply) { + memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength * 4); + ioc->tm_cmds.status |= LEAPIORAID_CMD_REPLY_VALID; + } + ioc->tm_cmds.status &= ~LEAPIORAID_CMD_PENDING; + complete(&ioc->tm_cmds.done); + return 1; +} + +void +leapioraid_scsihost_set_tm_flag( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + u8 skip = 0; + + shost_for_each_device(sdev, ioc->shost) { + if (skip) + continue; + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (sas_device_priv_data->sas_target->handle == handle) { + sas_device_priv_data->sas_target->tm_busy = 1; + skip = 1; + ioc->ignore_loginfos = 1; + } + } +} + +void +leapioraid_scsihost_clear_tm_flag( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + u8 skip = 0; + + shost_for_each_device(sdev, ioc->shost) { + if (skip) + continue; + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (sas_device_priv_data->sas_target->handle == handle) { + sas_device_priv_data->sas_target->tm_busy = 0; + skip = 1; + ioc->ignore_loginfos = 0; + } + } +} + +static int +leapioraid_scsihost_tm_cmd_map_status( + struct LEAPIORAID_ADAPTER *ioc, uint channel, + uint id, uint lun, u8 type, u16 smid_task) +{ + if (smid_task <= ioc->shost->can_queue) { + switch (type) { + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET: + if (! + (leapioraid_scsihost_scsi_lookup_find_by_target + (ioc, id, channel))) + return SUCCESS; + break; + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: + if (! + (leapioraid_scsihost_scsi_lookup_find_by_lun + (ioc, id, lun, channel))) + return SUCCESS; + break; + default: + return SUCCESS; + } + } else if (smid_task == ioc->scsih_cmds.smid) { + if ((ioc->scsih_cmds.status & LEAPIORAID_CMD_COMPLETE) || + (ioc->scsih_cmds.status & LEAPIORAID_CMD_NOT_USED)) + return SUCCESS; + } else if (smid_task == ioc->ctl_cmds.smid) { + if ((ioc->ctl_cmds.status & LEAPIORAID_CMD_COMPLETE) || + (ioc->ctl_cmds.status & LEAPIORAID_CMD_NOT_USED)) + return SUCCESS; + } + return FAILED; +} + +static int +leapioraid_scsihost_tm_post_processing(struct LEAPIORAID_ADAPTER *ioc, u16 handle, + uint channel, uint id, uint lun, u8 type, + u16 smid_task) +{ + int rc; + + rc = leapioraid_scsihost_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task); + if (rc == SUCCESS) + return rc; + pr_err( + "%s Poll finish of smid(%d),task_type(0x%02x),handle(0x%04x)\n", + ioc->name, + smid_task, + type, + handle); + leapioraid_base_mask_interrupts(ioc); + leapioraid_base_sync_reply_irqs(ioc, 1); + leapioraid_base_unmask_interrupts(ioc); + return leapioraid_scsihost_tm_cmd_map_status( + ioc, channel, id, lun, type, smid_task); +} + +int +leapioraid_scsihost_issue_tm( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, + uint channel, uint id, uint lun, u8 type, + u16 smid_task, u8 timeout, u8 tr_method) +{ + struct LeapioraidSCSITmgReq_t *mpi_request; + struct LeapioraidSCSITmgRep_t *mpi_reply; + struct LeapioraidSCSIIOReq_t *request; + u16 smid = 0; + u32 ioc_state; + struct leapioraid_scsiio_tracker *scsi_lookup = NULL; + int rc; + u16 msix_task = 0; + u8 issue_reset = 0; + + lockdep_assert_held(&ioc->tm_cmds.mutex); + if (ioc->tm_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_info("%s %s: tm_cmd busy!!!\n", + __func__, ioc->name); + return FAILED; + } + if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery) { + pr_info("%s %s: host reset in progress!\n", + __func__, ioc->name); + return FAILED; + } + ioc_state = leapioraid_base_get_iocstate(ioc, 0); + if (ioc_state & LEAPIORAID_DOORBELL_USED) { + pr_info("%s unexpected doorbell active!\n", + ioc->name); + rc = leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + return (!rc) ? SUCCESS : FAILED; + } + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == LEAPIORAID_IOC_STATE_FAULT) { + leapioraid_print_fault_code(ioc, ioc_state & + LEAPIORAID_DOORBELL_DATA_MASK); + rc = leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + return (!rc) ? SUCCESS : FAILED; + } else if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) { + leapioraid_base_coredump_info(ioc, + ioc_state & + LEAPIORAID_DOORBELL_DATA_MASK); + rc = leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + return (!rc) ? SUCCESS : FAILED; + } + smid = leapioraid_base_get_smid_hpr(ioc, ioc->tm_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + return FAILED; + } + if (type == LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK) + scsi_lookup = leapioraid_get_st_from_smid(ioc, smid_task); + dtmprintk(ioc, pr_info( + "%s sending tm: handle(0x%04x),\n\t\t" + "task_type(0x%02x), timeout(%d) tr_method(0x%x) smid(%d)\n", + ioc->name, + handle, + type, + timeout, + tr_method, + smid_task)); + ioc->tm_cmds.status = LEAPIORAID_CMD_PENDING; + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->tm_cmds.smid = smid; + memset(mpi_request, 0, sizeof(struct LeapioraidSCSITmgReq_t)); + memset(ioc->tm_cmds.reply, 0, sizeof(struct LeapioraidSCSITmgRep_t)); + mpi_request->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = type; + mpi_request->MsgFlags = tr_method; + if (type == LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK || + type == LEAPIORAID_SCSITASKMGMT_TASKTYPE_QUERY_TASK) + mpi_request->TaskMID = cpu_to_le16(smid_task); + int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN); + leapioraid_scsihost_set_tm_flag(ioc, handle); + init_completion(&ioc->tm_cmds.done); + if ((type == LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK) && + (scsi_lookup && (scsi_lookup->msix_io < ioc->reply_queue_count))) + msix_task = scsi_lookup->msix_io; + else + msix_task = 0; + ioc->put_smid_hi_priority(ioc, smid, msix_task); + wait_for_completion_timeout(&ioc->tm_cmds.done, timeout * HZ); + if (!(ioc->tm_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + leapioraid_check_cmd_timeout(ioc, + ioc->tm_cmds.status, mpi_request, + sizeof + (struct LeapioraidSCSITmgReq_t) + / 4, issue_reset); + if (issue_reset) { + rc = leapioraid_base_hard_reset_handler(ioc, + FORCE_BIG_HAMMER); + rc = (!rc) ? SUCCESS : FAILED; + goto out; + } + } + leapioraid_base_sync_reply_irqs(ioc, 0); + if (ioc->tm_cmds.status & LEAPIORAID_CMD_REPLY_VALID) { + mpi_reply = ioc->tm_cmds.reply; + dtmprintk(ioc, pr_info( + "%s complete tm: ioc_status(0x%04x),\n\t\t" + "loginfo(0x%08x), term_count(0x%08x)\n", + ioc->name, + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo), + le32_to_cpu(mpi_reply->TerminationCount))); + if (ioc->logging_level & LEAPIORAID_DEBUG_TM) { + leapioraid_scsihost_response_code( + ioc, mpi_reply->ResponseCode); + if (mpi_reply->IOCStatus) + leapioraid_debug_dump_mf( + mpi_request, + sizeof(struct LeapioraidSCSITmgReq_t) / 4); + } + } + switch (type) { + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK: + rc = SUCCESS; + request = leapioraid_base_get_msg_frame(ioc, smid_task); + if (le16_to_cpu(request->DevHandle) != handle) + break; + pr_err( + "%s Task abort tm failed:\n\t\t" + "handle(0x%04x), timeout(%d),\n\t\t" + "tr_method(0x%x), smid(%d), msix_index(%d)\n", + ioc->name, + handle, + timeout, + tr_method, + smid_task, + msix_task); + rc = FAILED; + break; + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET: + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: + rc = leapioraid_scsihost_tm_post_processing( + ioc, handle, channel, id, lun, type, smid_task); + break; + case LEAPIORAID_SCSITASKMGMT_TASKTYPE_QUERY_TASK: + rc = SUCCESS; + break; + default: + rc = FAILED; + break; + } +out: + leapioraid_scsihost_clear_tm_flag(ioc, handle); + ioc->tm_cmds.status = LEAPIORAID_CMD_NOT_USED; + return rc; +} + +int +leapioraid_scsihost_issue_locked_tm( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, + uint channel, uint id, uint lun, u8 type, + u16 smid_task, u8 timeout, u8 tr_method) +{ + int ret; + + mutex_lock(&ioc->tm_cmds.mutex); + ret = leapioraid_scsihost_issue_tm( + ioc, handle, channel, id, lun, type, + smid_task, timeout, tr_method); + mutex_unlock(&ioc->tm_cmds.mutex); + return ret; +} + +static void +leapioraid_scsihost_tm_display_info( + struct LEAPIORAID_ADAPTER *ioc, + struct scsi_cmnd *scmd) +{ + struct scsi_target *starget = scmd->device->sdev_target; + struct LEAPIORAID_TARGET *priv_target = starget->hostdata; + struct leapioraid_sas_device *sas_device = NULL; + unsigned long flags; + char *device_str = NULL; + + if (!priv_target) + return; + if (ioc->warpdrive_msg) + device_str = "WarpDrive"; + else + device_str = "volume"; + scsi_print_command(scmd); + if (priv_target->flags & LEAPIORAID_TARGET_FLAGS_VOLUME) { + starget_printk( + KERN_INFO, starget, "%s handle(0x%04x), %s wwid(0x%016llx)\n", + device_str, + priv_target->handle, device_str, + (unsigned long long)priv_target->sas_address); + } else { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = + __leapioraid_get_sdev_from_target(ioc, priv_target); + if (sas_device) { + if (priv_target->flags & + LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT) { + starget_printk(KERN_INFO, starget, + "volume handle(0x%04x), volume wwid(0x%016llx)\n", + sas_device->volume_handle, + (unsigned long long)sas_device->volume_wwid); + } + starget_printk(KERN_INFO, starget, + "%s: handle(0x%04x), sas_address(0x%016llx), phy(%d)\n", + __func__, sas_device->handle, + (unsigned long long)sas_device->sas_address, sas_device->phy); + leapioraid_scsihost_display_enclosure_chassis_info(NULL, + sas_device, + NULL, starget); + leapioraid_sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } +} + +static int +leapioraid_scsihost_abort(struct scsi_cmnd *scmd) +{ + struct LEAPIORAID_ADAPTER *ioc + = leapioraid_shost_private(scmd->device->host); + struct LEAPIORAID_DEVICE *sas_device_priv_data; + u16 handle; + int r; + struct leapioraid_scsiio_tracker *st + = leapioraid_base_scsi_cmd_priv(scmd); + u8 timeout = 30; + + sdev_printk( + KERN_INFO, scmd->device, + "attempting task abort! scmd(0x%p), outstanding for %u ms & timeout %u ms\n", + scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc), + (scsi_cmd_to_rq(scmd)->timeout / HZ) * 1000); + leapioraid_scsihost_tm_display_info(ioc, scmd); + if (leapioraid_base_pci_device_is_unplugged(ioc) || ioc->remove_host) { + sdev_printk(KERN_INFO, scmd->device, "%s scmd(0x%p)\n", + ((ioc->remove_host) ? ("shost is getting removed!") + : ("pci device been removed!")), scmd); + if (st && st->smid) + leapioraid_base_free_smid(ioc, st->smid); + scmd->result = DID_NO_CONNECT << 16; + r = FAILED; + goto out; + } + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + sdev_printk(KERN_INFO, scmd->device, + "device been deleted! scmd(0x%p)\n", scmd); + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + r = SUCCESS; + goto out; + } + if (st == NULL || st->cb_idx == 0xFF) { + sdev_printk(KERN_INFO, scmd->device, + "No ref at driver, assuming scmd(0x%p) might have completed\n", + scmd); + scmd->result = DID_RESET << 16; + r = SUCCESS; + goto out; + } + if (sas_device_priv_data->sas_target->flags & + LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT || + sas_device_priv_data->sas_target->flags & LEAPIORAID_TARGET_FLAGS_VOLUME) { + scmd->result = DID_RESET << 16; + r = FAILED; + goto out; + } + leapioraid_halt_firmware(ioc, 0); + handle = sas_device_priv_data->sas_target->handle; + r = leapioraid_scsihost_issue_locked_tm( + ioc, handle, + scmd->device->channel, + scmd->device->id, + scmd->device->lun, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK, + st->smid, timeout, 0); +out: + sdev_printk( + KERN_INFO, scmd->device, + "task abort: %s scmd(0x%p)\n", + ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); + return r; +} + +static int +leapioraid_scsihost_dev_reset(struct scsi_cmnd *scmd) +{ + struct LEAPIORAID_ADAPTER *ioc + = leapioraid_shost_private(scmd->device->host); + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct leapioraid_sas_device *sas_device = NULL; + u16 handle; + u8 tr_method = 0; + u8 tr_timeout = 30; + int r; + struct scsi_target *starget = scmd->device->sdev_target; + struct LEAPIORAID_TARGET *target_priv_data = starget->hostdata; + + sdev_printk(KERN_INFO, scmd->device, + "attempting device reset! scmd(0x%p)\n", scmd); + leapioraid_scsihost_tm_display_info(ioc, scmd); + if (leapioraid_base_pci_device_is_unplugged(ioc) || ioc->remove_host) { + sdev_printk(KERN_INFO, scmd->device, "%s scmd(0x%p)\n", + ((ioc->remove_host) ? ("shost is getting removed!") + : ("pci device been removed!")), scmd); + scmd->result = DID_NO_CONNECT << 16; + r = FAILED; + goto out; + } + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + sdev_printk(KERN_INFO, scmd->device, + "device been deleted! scmd(0x%p)\n", scmd); + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + r = SUCCESS; + goto out; + } + handle = 0; + if (sas_device_priv_data->sas_target->flags & + LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT) { + sas_device = leapioraid_get_sdev_from_target(ioc, + target_priv_data); + if (sas_device) + handle = sas_device->volume_handle; + } else + handle = sas_device_priv_data->sas_target->handle; + if (!handle) { + scmd->result = DID_RESET << 16; + r = FAILED; + goto out; + } + tr_method = LEAPIORAID_SCSITASKMGMT_MSGFLAGS_LINK_RESET; + r = leapioraid_scsihost_issue_locked_tm(ioc, handle, + scmd->device->channel, + scmd->device->id, + scmd->device->lun, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, + 0, tr_timeout, tr_method); +out: + sdev_printk(KERN_INFO, scmd->device, + "device reset: %s scmd(0x%p)\n", + ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); + if (sas_device) + leapioraid_sas_device_put(sas_device); + return r; +} + +static int +leapioraid_scsihost_target_reset(struct scsi_cmnd *scmd) +{ + struct LEAPIORAID_ADAPTER *ioc + = leapioraid_shost_private(scmd->device->host); + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct leapioraid_sas_device *sas_device = NULL; + u16 handle; + u8 tr_method = 0; + u8 tr_timeout = 30; + int r; + struct scsi_target *starget = scmd->device->sdev_target; + struct LEAPIORAID_TARGET *target_priv_data = starget->hostdata; + + starget_printk(KERN_INFO, starget, + "attempting target reset! scmd(0x%p)\n", scmd); + leapioraid_scsihost_tm_display_info(ioc, scmd); + if (leapioraid_base_pci_device_is_unplugged(ioc) || ioc->remove_host) { + sdev_printk(KERN_INFO, scmd->device, "%s scmd(0x%p)\n", + ((ioc->remove_host) ? ("shost is getting removed!") + : ("pci device been removed!")), scmd); + scmd->result = DID_NO_CONNECT << 16; + r = FAILED; + goto out; + } + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + starget_printk(KERN_INFO, starget, + "target been deleted! scmd(0x%p)\n", scmd); + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + r = SUCCESS; + goto out; + } + handle = 0; + if (sas_device_priv_data->sas_target->flags & + LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT) { + sas_device = leapioraid_get_sdev_from_target(ioc, + target_priv_data); + if (sas_device) + handle = sas_device->volume_handle; + } else + handle = sas_device_priv_data->sas_target->handle; + if (!handle) { + scmd->result = DID_RESET << 16; + r = FAILED; + goto out; + } + tr_method = LEAPIORAID_SCSITASKMGMT_MSGFLAGS_LINK_RESET; + r = leapioraid_scsihost_issue_locked_tm(ioc, handle, + scmd->device->channel, + scmd->device->id, 0, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET, + 0, tr_timeout, tr_method); +out: + starget_printk(KERN_INFO, starget, + "target reset: %s scmd(0x%p)\n", + ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); + if (sas_device) + leapioraid_sas_device_put(sas_device); + return r; +} + +static int +leapioraid_scsihost_host_reset(struct scsi_cmnd *scmd) +{ + struct LEAPIORAID_ADAPTER *ioc + = leapioraid_shost_private(scmd->device->host); + int r, retval; + + pr_info("%s attempting host reset! scmd(0x%p)\n", + ioc->name, scmd); + scsi_print_command(scmd); + if (ioc->is_driver_loading || ioc->remove_host) { + pr_info("%s Blocking the host reset\n", + ioc->name); + r = FAILED; + goto out; + } + retval = leapioraid_base_hard_reset_handler( + ioc, FORCE_BIG_HAMMER); + r = (retval < 0) ? FAILED : SUCCESS; +out: + pr_info("%s host reset: %s scmd(0x%p)\n", + ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), + scmd); + return r; +} + +static void +leapioraid_scsihost_fw_event_add(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + unsigned long flags; + + if (ioc->firmware_event_thread == NULL) + return; + spin_lock_irqsave(&ioc->fw_event_lock, flags); + leapioraid_fw_event_work_get(fw_event); + INIT_LIST_HEAD(&fw_event->list); + list_add_tail(&fw_event->list, &ioc->fw_event_list); + INIT_WORK(&fw_event->work, leapioraid_firmware_event_work); + leapioraid_fw_event_work_get(fw_event); + queue_work(ioc->firmware_event_thread, &fw_event->work); + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); +} + +static void +leapioraid_scsihost_fw_event_del_from_list( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->fw_event_lock, flags); + if (!list_empty(&fw_event->list)) { + list_del_init(&fw_event->list); + leapioraid_fw_event_work_put(fw_event); + } + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); +} + +static void +leapioraid_scsihost_fw_event_requeue( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event, unsigned long delay) +{ + unsigned long flags; + + if (ioc->firmware_event_thread == NULL) + return; + spin_lock_irqsave(&ioc->fw_event_lock, flags); + leapioraid_fw_event_work_get(fw_event); + list_add_tail(&fw_event->list, &ioc->fw_event_list); + if (!fw_event->delayed_work_active) { + fw_event->delayed_work_active = 1; + INIT_DELAYED_WORK(&fw_event->delayed_work, + leapioraid_firmware_event_work_delayed); + } + queue_delayed_work(ioc->firmware_event_thread, &fw_event->delayed_work, + msecs_to_jiffies(delay)); + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); +} + +static void +leapioraid_scsihost_error_recovery_delete_devices( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_fw_event_work *fw_event; + + fw_event = leapioraid_alloc_fw_event_work(0); + if (!fw_event) + return; + fw_event->event = LEAPIORAID_REMOVE_UNRESPONDING_DEVICES; + fw_event->ioc = ioc; + leapioraid_scsihost_fw_event_add(ioc, fw_event); + leapioraid_fw_event_work_put(fw_event); +} + +void +leapioraid_port_enable_complete(struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_fw_event_work *fw_event; + + fw_event = leapioraid_alloc_fw_event_work(0); + if (!fw_event) + return; + fw_event->event = LEAPIORAID_PORT_ENABLE_COMPLETE; + fw_event->ioc = ioc; + leapioraid_scsihost_fw_event_add(ioc, fw_event); + leapioraid_fw_event_work_put(fw_event); +} + +static struct leapioraid_fw_event_work *dequeue_next_fw_event( + struct LEAPIORAID_ADAPTER *ioc) +{ + unsigned long flags; + struct leapioraid_fw_event_work *fw_event = NULL; + + spin_lock_irqsave(&ioc->fw_event_lock, flags); + if (!list_empty(&ioc->fw_event_list)) { + fw_event = list_first_entry(&ioc->fw_event_list, + struct leapioraid_fw_event_work, list); + list_del_init(&fw_event->list); + leapioraid_fw_event_work_put(fw_event); + } + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); + return fw_event; +} + +static void +leapioraid_scsihost_fw_event_cleanup_queue( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_fw_event_work *fw_event; + bool rc = false; + + if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) || + !ioc->firmware_event_thread || in_interrupt()) + return; + + ioc->fw_events_cleanup = 1; + if (ioc->shost_recovery && ioc->current_event) + ioc->current_event->ignore = 1; + while ((fw_event = dequeue_next_fw_event(ioc)) || + (fw_event = ioc->current_event)) { + if (fw_event == ioc->current_event && + ioc->current_event->event != + LEAPIORAID_REMOVE_UNRESPONDING_DEVICES) { + ioc->current_event = NULL; + continue; + } + if (fw_event->event == LEAPIORAID_PORT_ENABLE_COMPLETE) { + ioc->port_enable_cmds.status |= LEAPIORAID_CMD_RESET; + ioc->start_scan = 0; + } + if (fw_event->delayed_work_active) + rc = cancel_delayed_work_sync(&fw_event->delayed_work); + else + rc = cancel_work_sync(&fw_event->work); + if (rc) + leapioraid_fw_event_work_put(fw_event); + } + ioc->fw_events_cleanup = 0; +} + +static void +leapioraid_scsihost_internal_device_block( + struct scsi_device *sdev, + struct LEAPIORAID_DEVICE + *sas_device_priv_data) +{ + int r = 0; + + sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle); + sas_device_priv_data->block = 1; + + r = scsi_internal_device_block_nowait(sdev); + if (r == -EINVAL) + sdev_printk(KERN_WARNING, sdev, + "device_block failed with return(%d) for handle(0x%04x)\n", + r, sas_device_priv_data->sas_target->handle); +} + +static void +leapioraid_scsihost_internal_device_unblock(struct scsi_device *sdev, + struct LEAPIORAID_DEVICE + *sas_device_priv_data) +{ + int r = 0; + + sdev_printk(KERN_WARNING, sdev, + "device_unblock and setting to running, handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle); + sas_device_priv_data->block = 0; + + r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING); + if (r == -EINVAL) { + sdev_printk(KERN_WARNING, sdev, + "device_unblock failed with return(%d)\n\t\t" + "for handle(0x%04x) performing a block followed by an unblock\n", + r, + sas_device_priv_data->sas_target->handle); + sas_device_priv_data->block = 1; + r = scsi_internal_device_block_nowait(sdev); + if (r) + sdev_printk(KERN_WARNING, sdev, + "retried device_block failed with return(%d)\n\t\t" + "for handle(0x%04x)\n", + r, + sas_device_priv_data->sas_target->handle); + sas_device_priv_data->block = 0; + + r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING); + if (r) + sdev_printk(KERN_WARNING, sdev, + "retried device_unblock failed\n\t\t" + "with return(%d) for handle(0x%04x)\n", + r, + sas_device_priv_data->sas_target->handle); + } +} + +static void +leapioraid_scsihost_ublock_io_all_device( + struct LEAPIORAID_ADAPTER *ioc, u8 no_turs) +{ + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct LEAPIORAID_TARGET *sas_target; + enum device_responsive_state rc; + struct scsi_device *sdev; + struct leapioraid_sas_device *sas_device = NULL; + int count; + u8 tr_timeout = 30; + u8 tr_method = 0; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + sas_target = sas_device_priv_data->sas_target; + if (!sas_target || sas_target->deleted) + continue; + if (!sas_device_priv_data->block) + continue; + count = 0; + if (no_turs) { + sdev_printk(KERN_WARNING, sdev, + "device_unblocked, handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle); + leapioraid_scsihost_internal_device_unblock(sdev, + sas_device_priv_data); + continue; + } + do { + rc = leapioraid_scsihost_wait_for_device_to_become_ready( + ioc, + sas_target->handle, + 0, + (sas_target->flags + & LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT), + sdev->lun, + tr_timeout, + tr_method); + if (rc == DEVICE_RETRY || rc == DEVICE_START_UNIT + || rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA) + ssleep(1); + } while ((rc == DEVICE_RETRY || rc == DEVICE_START_UNIT || + rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA) + && count++ < 144); + sas_device_priv_data->block = 0; + if (rc != DEVICE_READY) + sas_device_priv_data->deleted = 1; + leapioraid_scsihost_internal_device_unblock( + sdev, sas_device_priv_data); + if (rc != DEVICE_READY) { + sdev_printk(KERN_WARNING, sdev, + "%s: device_offlined, handle(0x%04x)\n", + __func__, + sas_device_priv_data->sas_target->handle); + scsi_device_set_state(sdev, SDEV_OFFLINE); + sas_device = leapioraid_get_sdev_by_addr(ioc, + sas_device_priv_data->sas_target->sas_address, + sas_device_priv_data->sas_target->port); + if (sas_device) { + leapioraid_scsihost_display_enclosure_chassis_info( + NULL, + sas_device, + sdev, + NULL); + leapioraid_sas_device_put(sas_device); + } + } else + sdev_printk(KERN_WARNING, sdev, + "device_unblocked, handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle); + } +} + +static void +leapioraid_scsihost_ublock_io_device_wait( + struct LEAPIORAID_ADAPTER *ioc, u64 sas_address, + struct leapioraid_hba_port *port) +{ + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct LEAPIORAID_TARGET *sas_target; + enum device_responsive_state rc; + struct scsi_device *sdev; + int count, host_reset_completion_count; + struct leapioraid_sas_device *sas_device; + u8 tr_timeout = 30; + u8 tr_method = 0; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + sas_target = sas_device_priv_data->sas_target; + if (!sas_target) + continue; + if (sas_target->sas_address != sas_address || + sas_target->port != port) + continue; + if (sdev->sdev_state == SDEV_OFFLINE) { + sas_device_priv_data->block = 1; + sas_device_priv_data->deleted = 0; + scsi_device_set_state(sdev, SDEV_RUNNING); + scsi_internal_device_block_nowait(sdev); + } + } + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + sas_target = sas_device_priv_data->sas_target; + if (!sas_target) + continue; + if (sas_target->sas_address != sas_address || + sas_target->port != port) + continue; + if (!sas_device_priv_data->block) + continue; + count = 0; + do { + host_reset_completion_count = 0; + rc = leapioraid_scsihost_wait_for_device_to_become_ready( + ioc, + sas_target->handle, + 0, + (sas_target->flags & LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT), + sdev->lun, + tr_timeout, + tr_method); + if (rc == DEVICE_RETRY || rc == DEVICE_START_UNIT + || rc == DEVICE_STOP_UNIT + || rc == DEVICE_RETRY_UA) { + do { + msleep(500); + host_reset_completion_count++; + } while (rc == DEVICE_RETRY && + ioc->shost_recovery); + if (host_reset_completion_count > 1) { + rc = leapioraid_scsihost_wait_for_device_to_become_ready( + ioc, sas_target->handle, 0, + (sas_target->flags + & LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT), + sdev->lun, tr_timeout, tr_method); + if (rc == DEVICE_RETRY + || rc == DEVICE_START_UNIT + || rc == DEVICE_STOP_UNIT + || rc == DEVICE_RETRY_UA) + msleep(500); + } + continue; + } + } while ((rc == DEVICE_RETRY || rc == DEVICE_START_UNIT || + rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA) + && count++ <= 144); + sas_device_priv_data->block = 0; + if (rc != DEVICE_READY) + sas_device_priv_data->deleted = 1; + + scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING); + + if (rc != DEVICE_READY) { + sdev_printk(KERN_WARNING, sdev, + "%s: device_offlined, handle(0x%04x)\n", + __func__, + sas_device_priv_data->sas_target->handle); + sas_device = + leapioraid_get_sdev_by_handle(ioc, + sas_device_priv_data->sas_target->handle); + if (sas_device) { + leapioraid_scsihost_display_enclosure_chassis_info(NULL, + sas_device, + sdev, + NULL); + leapioraid_sas_device_put(sas_device); + } + scsi_device_set_state(sdev, SDEV_OFFLINE); + } else { + sdev_printk(KERN_WARNING, sdev, + "device_unblocked, handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle); + } + } +} + +static void +leapioraid_scsihost_ublock_io_device( + struct LEAPIORAID_ADAPTER *ioc, u64 sas_address, + struct leapioraid_hba_port *port) +{ + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) + continue; + if (sas_device_priv_data->sas_target->sas_address + != sas_address || + sas_device_priv_data->sas_target->port != port) + continue; + if (sas_device_priv_data->block) { + leapioraid_scsihost_internal_device_unblock(sdev, + sas_device_priv_data); + } + scsi_device_set_state(sdev, SDEV_OFFLINE); + } +} + +static void leapioraid_scsihost_block_io_all_device( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (sas_device_priv_data->block) + continue; + if (sas_device_priv_data->ignore_delay_remove) { + sdev_printk(KERN_INFO, sdev, + "%s skip device_block for SES handle(0x%04x)\n", + __func__, + sas_device_priv_data->sas_target->handle); + continue; + } + leapioraid_scsihost_internal_device_block( + sdev, sas_device_priv_data); + } +} + +static void +leapioraid_scsihost_block_io_device( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + struct leapioraid_sas_device *sas_device; + + sas_device = leapioraid_get_sdev_by_handle(ioc, handle); + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (sas_device_priv_data->sas_target->handle != handle) + continue; + if (sas_device_priv_data->block) + continue; + if (sas_device && sas_device->pend_sas_rphy_add) + continue; + if (sas_device_priv_data->ignore_delay_remove) { + sdev_printk(KERN_INFO, sdev, + "%s skip device_block for SES handle(0x%04x)\n", + __func__, + sas_device_priv_data->sas_target->handle); + continue; + } + leapioraid_scsihost_internal_device_block( + sdev, sas_device_priv_data); + } + if (sas_device) + leapioraid_sas_device_put(sas_device); +} + +static void +leapioraid_scsihost_block_io_to_children_attached_to_ex( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_sas_node *sas_expander) +{ + struct leapioraid_sas_port *leapioraid_port; + struct leapioraid_sas_device *sas_device; + struct leapioraid_raid_sas_node *expander_sibling; + unsigned long flags; + + if (!sas_expander) + return; + list_for_each_entry(leapioraid_port, + &sas_expander->sas_port_list, port_list) { + if (leapioraid_port->remote_identify.device_type == + SAS_END_DEVICE) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_addr(ioc, + leapioraid_port->remote_identify.sas_address, + leapioraid_port->hba_port); + if (sas_device) { + set_bit(sas_device->handle, + ioc->blocking_handles); + leapioraid_sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } + } + list_for_each_entry(leapioraid_port, + &sas_expander->sas_port_list, port_list) { + if (leapioraid_port->remote_identify.device_type == + SAS_EDGE_EXPANDER_DEVICE || + leapioraid_port->remote_identify.device_type == + SAS_FANOUT_EXPANDER_DEVICE) { + expander_sibling = + leapioraid_scsihost_expander_find_by_sas_address + (ioc, leapioraid_port->remote_identify.sas_address, + leapioraid_port->hba_port); + leapioraid_scsihost_block_io_to_children_attached_to_ex( + ioc, expander_sibling); + } + } +} + +static void +leapioraid_scsihost_block_io_to_children_attached_directly( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventDataSasTopoChangeList_t *event_data) +{ + int i; + u16 handle; + u16 reason_code; + + for (i = 0; i < event_data->NumEntries; i++) { + handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); + if (!handle) + continue; + reason_code = event_data->PHY[i].PhyStatus & + LEAPIORAID_EVENT_SAS_TOPO_RC_MASK; + if (reason_code == + LEAPIORAID_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING) + leapioraid_scsihost_block_io_device(ioc, handle); + } +} + +static void +leapioraid_scsihost_tm_tr_send( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct LeapioraidSCSITmgReq_t *mpi_request; + u16 smid; + struct leapioraid_sas_device *sas_device = NULL; + struct LEAPIORAID_TARGET *sas_target_priv_data = NULL; + u64 sas_address = 0; + unsigned long flags; + struct leapioraid_tr_list *delayed_tr; + u32 ioc_state; + struct leapioraid_hba_port *port = NULL; + u8 tr_method = 0; + + if (ioc->pci_error_recovery) { + dewtprintk(ioc, pr_info( + "%s %s: host in pci error recovery: handle(0x%04x)\n", + __func__, ioc->name, handle)); + return; + } + ioc_state = leapioraid_base_get_iocstate(ioc, 1); + if (ioc_state != LEAPIORAID_IOC_STATE_OPERATIONAL) { + dewtprintk(ioc, pr_info( + "%s %s: host is not operational: handle(0x%04x)\n", + __func__, ioc->name, handle)); + return; + } + if (test_bit(handle, ioc->pd_handles)) + return; + clear_bit(handle, ioc->pend_os_device_add); + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_handle(ioc, handle); + if (sas_device && sas_device->starget && sas_device->starget->hostdata) { + sas_target_priv_data = sas_device->starget->hostdata; + sas_target_priv_data->deleted = 1; + sas_address = sas_device->sas_address; + port = sas_device->port; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (!sas_device) + tr_method = LEAPIORAID_SCSITASKMGMT_MSGFLAGS_LINK_RESET; + + if (sas_target_priv_data) { + dewtprintk(ioc, pr_err( + "%s %s: setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, __func__, handle, + (unsigned long long)sas_address)); + if (sas_device) { + dewtprintk(ioc, + leapioraid_scsihost_display_enclosure_chassis_info( + ioc, + sas_device, + NULL, + NULL)); + } + leapioraid_scsihost_ublock_io_device(ioc, sas_address, port); + sas_target_priv_data->handle = + LEAPIORAID_INVALID_DEVICE_HANDLE; + } + smid = leapioraid_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx); + if (!smid) { + delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); + if (!delayed_tr) + goto out; + INIT_LIST_HEAD(&delayed_tr->list); + delayed_tr->handle = handle; + list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); + dewtprintk(ioc, pr_err( + "%s DELAYED:tr:handle(0x%04x), (open)\n", + ioc->name, handle)); + goto out; + } + dewtprintk(ioc, pr_info( + "%s tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", + ioc->name, handle, + smid, ioc->tm_tr_cb_idx)); + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, sizeof(struct LeapioraidSCSITmgReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET; + mpi_request->MsgFlags = tr_method; + set_bit(handle, ioc->device_remove_in_progress); + ioc->put_smid_hi_priority(ioc, smid, 0); +out: + if (sas_device) + leapioraid_sas_device_put(sas_device); +} + +static u8 +leapioraid_scsihost_tm_tr_complete( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply) +{ + u16 handle; + struct LeapioraidSCSITmgReq_t *mpi_request_tm; + struct LeapioraidSCSITmgRep_t *mpi_reply = + leapioraid_base_get_reply_virt_addr(ioc, reply); + struct LeapioraidSasIoUnitControlReq_t *mpi_request; + u16 smid_sas_ctrl; + u32 ioc_state; + struct leapioraid_sc_list *delayed_sc; + + if (ioc->pci_error_recovery) { + dewtprintk(ioc, pr_info( + "%s %s: host in pci error recovery\n", __func__, + ioc->name)); + return 1; + } + ioc_state = leapioraid_base_get_iocstate(ioc, 1); + if (ioc_state != LEAPIORAID_IOC_STATE_OPERATIONAL) { + dewtprintk(ioc, pr_info( + "%s %s: host is not operational\n", __func__, ioc->name)); + return 1; + } + if (unlikely(!mpi_reply)) { + pr_err( + "%s mpi_reply not valid at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return 1; + } + mpi_request_tm = leapioraid_base_get_msg_frame(ioc, smid); + handle = le16_to_cpu(mpi_request_tm->DevHandle); + if (handle != le16_to_cpu(mpi_reply->DevHandle)) { + dewtprintk(ioc, pr_err( + "%s spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", + ioc->name, handle, + le16_to_cpu(mpi_reply->DevHandle), smid)); + return 0; + } + dewtprintk(ioc, pr_err( + "%s tr_complete: handle(0x%04x), (open) smid(%d),\n\t\t" + "ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n", + ioc->name, + handle, + smid, + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo), + le32_to_cpu(mpi_reply->TerminationCount))); + smid_sas_ctrl = + leapioraid_base_get_smid(ioc, ioc->tm_sas_control_cb_idx); + if (!smid_sas_ctrl) { + delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC); + if (!delayed_sc) + return leapioraid_scsihost_check_for_pending_tm(ioc, smid); + INIT_LIST_HEAD(&delayed_sc->list); + delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle); + list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list); + dewtprintk(ioc, pr_err( + "%s DELAYED:sc:handle(0x%04x), (open)\n", + ioc->name, handle)); + return leapioraid_scsihost_check_for_pending_tm(ioc, smid); + } + dewtprintk(ioc, pr_info( + "%s sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", + ioc->name, handle, + smid_sas_ctrl, ioc->tm_sas_control_cb_idx)); + mpi_request = leapioraid_base_get_msg_frame(ioc, smid_sas_ctrl); + memset(mpi_request, 0, sizeof(struct LeapioraidIoUnitControlReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_IO_UNIT_CONTROL; + mpi_request->Operation = LEAPIORAID_CTRL_OP_REMOVE_DEVICE; + mpi_request->DevHandle = mpi_request_tm->DevHandle; + ioc->put_smid_default(ioc, smid_sas_ctrl); + return leapioraid_scsihost_check_for_pending_tm(ioc, smid); +} + +inline bool +leapioraid_scsihost_allow_scmd_to_device( + struct LEAPIORAID_ADAPTER *ioc, + struct scsi_cmnd *scmd) +{ + if (ioc->pci_error_recovery) + return false; + if (ioc->adapter_over_temp) + return false; + if (ioc->remove_host) { + if (leapioraid_base_pci_device_is_unplugged(ioc)) + return false; + switch (scmd->cmnd[0]) { + case SYNCHRONIZE_CACHE: + case START_STOP: + return true; + default: + return false; + } + } + return true; +} + +static u8 +leapioraid_scsihost_sas_control_complete( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply) +{ + struct LeapioraidDefaultRep_t *mpi_reply = + leapioraid_base_get_reply_virt_addr(ioc, reply); + u16 dev_handle; + + if (likely(mpi_reply)) { + dev_handle + = ((struct LeapioraidIoUnitControlRep_t *)mpi_reply)->DevHandle; + dewtprintk(ioc, pr_err( + "%s sc_complete:handle(0x%04x), (open) smid(%d),\n\t\t" + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, + le16_to_cpu(dev_handle), + smid, + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo))); + if (le16_to_cpu(mpi_reply->IOCStatus) == + LEAPIORAID_IOCSTATUS_SUCCESS) { + clear_bit(le16_to_cpu(dev_handle), + ioc->device_remove_in_progress); + ioc->tm_tr_retry[le16_to_cpu(dev_handle)] = 0; + } else if (ioc->tm_tr_retry[le16_to_cpu(dev_handle)] < 3) { + dewtprintk(ioc, pr_err( + "%s re-initiating tm_tr_send:handle(0x%04x)\n", + ioc->name, + le16_to_cpu(dev_handle))); + ioc->tm_tr_retry[le16_to_cpu(dev_handle)]++; + leapioraid_scsihost_tm_tr_send(ioc, le16_to_cpu(dev_handle)); + } else { + dewtprintk(ioc, pr_err( + "%s Exiting out of tm_tr_send retries:handle(0x%04x)\n", + ioc->name, + le16_to_cpu(dev_handle))); + ioc->tm_tr_retry[le16_to_cpu(dev_handle)] = 0; + clear_bit(le16_to_cpu(dev_handle), + ioc->device_remove_in_progress); + } + } else { + pr_err( + "%s mpi_reply not valid at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + } + return leapioraid_check_for_pending_internal_cmds(ioc, smid); +} + +static void +leapioraid_scsihost_tm_tr_volume_send( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct LeapioraidSCSITmgReq_t *mpi_request; + u16 smid; + struct leapioraid_tr_list *delayed_tr; + + if (ioc->pci_error_recovery) { + dewtprintk(ioc, pr_info( + "%s %s: host reset in progress!\n", __func__, ioc->name)); + return; + } + smid = leapioraid_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx); + if (!smid) { + delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); + if (!delayed_tr) + return; + INIT_LIST_HEAD(&delayed_tr->list); + delayed_tr->handle = handle; + list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list); + dewtprintk(ioc, pr_err( + "%s DELAYED:tr:handle(0x%04x), (open)\n", + ioc->name, handle)); + return; + } + dewtprintk(ioc, pr_info( + "%s tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", + ioc->name, handle, + smid, ioc->tm_tr_volume_cb_idx)); + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, sizeof(struct LeapioraidSCSITmgReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET; + ioc->put_smid_hi_priority(ioc, smid, 0); +} + +static u8 +leapioraid_scsihost_tm_volume_tr_complete( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply) +{ + u16 handle; + struct LeapioraidSCSITmgReq_t *mpi_request_tm; + struct LeapioraidSCSITmgRep_t *mpi_reply = + leapioraid_base_get_reply_virt_addr(ioc, reply); + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + dewtprintk(ioc, pr_info( + "%s %s: host reset in progress!\n", __func__, ioc->name)); + return 1; + } + if (unlikely(!mpi_reply)) { + pr_err( + "%s mpi_reply not valid at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return 1; + } + mpi_request_tm = leapioraid_base_get_msg_frame(ioc, smid); + handle = le16_to_cpu(mpi_request_tm->DevHandle); + if (handle != le16_to_cpu(mpi_reply->DevHandle)) { + dewtprintk(ioc, pr_err( + "%s spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", + ioc->name, handle, + le16_to_cpu(mpi_reply->DevHandle), smid)); + return 0; + } + dewtprintk(ioc, pr_err( + "%s tr_complete:handle(0x%04x), (open) smid(%d),\n\t\t" + "ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n", + ioc->name, + handle, + smid, + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo), + le32_to_cpu(mpi_reply->TerminationCount))); + return leapioraid_scsihost_check_for_pending_tm(ioc, smid); +} + +static void +leapioraid_scsihost_tm_internal_tr_send( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct leapioraid_tr_list *delayed_tr; + struct LeapioraidSCSITmgReq_t *mpi_request; + u16 smid; + u8 tr_method = LEAPIORAID_SCSITASKMGMT_MSGFLAGS_LINK_RESET; + + smid = leapioraid_base_get_smid_hpr(ioc, ioc->tm_tr_internal_cb_idx); + if (!smid) { + delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); + if (!delayed_tr) + return; + INIT_LIST_HEAD(&delayed_tr->list); + delayed_tr->handle = handle; + list_add_tail(&delayed_tr->list, + &ioc->delayed_internal_tm_list); + dewtprintk(ioc, + pr_err( + "%s DELAYED:tr:handle(0x%04x), (open)\n", + ioc->name, handle)); + return; + } + dewtprintk(ioc, pr_info( + "%s tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", + ioc->name, handle, + smid, ioc->tm_tr_internal_cb_idx)); + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, sizeof(struct LeapioraidSCSITmgReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET; + mpi_request->MsgFlags = tr_method; + ioc->put_smid_hi_priority(ioc, smid, 0); +} + +static u8 +leapioraid_scsihost_tm_internal_tr_complete( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply) +{ + struct LeapioraidSCSITmgRep_t *mpi_reply = + leapioraid_base_get_reply_virt_addr(ioc, reply); + + if (likely(mpi_reply)) { + dewtprintk(ioc, pr_err( + "%s tr_complete:handle(0x%04x),\n\t\t" + "(open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, + le16_to_cpu(mpi_reply->DevHandle), + smid, + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo))); + } else { + pr_err("%s mpi_reply not valid at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return 1; + } + return leapioraid_scsihost_check_for_pending_tm(ioc, smid); +} + +static void +leapioraid_scsihost_issue_delayed_event_ack( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, + U16 event, U32 event_context) +{ + struct LeapioraidEventAckReq_t *ack_request; + int i = smid - ioc->internal_smid; + unsigned long flags; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx; + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + dewtprintk(ioc, pr_info( + "%s EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n", + ioc->name, le16_to_cpu(event), + smid, ioc->base_cb_idx)); + ack_request = leapioraid_base_get_msg_frame(ioc, smid); + memset(ack_request, 0, sizeof(struct LeapioraidEventAckReq_t)); + ack_request->Function = LEAPIORAID_FUNC_EVENT_ACK; + ack_request->Event = event; + ack_request->EventContext = event_context; + ack_request->VF_ID = 0; + ack_request->VP_ID = 0; + ioc->put_smid_default(ioc, smid); +} + +static void +leapioraid_scsihost_issue_delayed_sas_io_unit_ctrl( + struct LEAPIORAID_ADAPTER *ioc, + u16 smid, u16 handle) +{ + struct LeapioraidSasIoUnitControlReq_t *mpi_request; + u32 ioc_state; + int i = smid - ioc->internal_smid; + unsigned long flags; + + if (ioc->remove_host) { + dewtprintk(ioc, pr_info( + "%s %s: host has been removed\n", __func__, ioc->name)); + return; + } else if (ioc->pci_error_recovery) { + dewtprintk(ioc, pr_info( + "%s %s: host in pci error recovery\n", __func__, + ioc->name)); + return; + } + ioc_state = leapioraid_base_get_iocstate(ioc, 1); + if (ioc_state != LEAPIORAID_IOC_STATE_OPERATIONAL) { + dewtprintk(ioc, pr_info( + "%s %s: host is not operational\n", __func__, ioc->name)); + return; + } + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx; + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + dewtprintk(ioc, pr_info( + "%s sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", + ioc->name, handle, + smid, ioc->tm_sas_control_cb_idx)); + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, sizeof(struct LeapioraidIoUnitControlReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_IO_UNIT_CONTROL; + mpi_request->Operation = 0x0D; + mpi_request->DevHandle = cpu_to_le16(handle); + ioc->put_smid_default(ioc, smid); +} + +u8 +leapioraid_check_for_pending_internal_cmds(struct LEAPIORAID_ADAPTER *ioc, + u16 smid) +{ + struct leapioraid_sc_list *delayed_sc; + struct leapioraid_event_ack_list *delayed_event_ack; + + if (!list_empty(&ioc->delayed_event_ack_list)) { + delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next, + struct leapioraid_event_ack_list, list); + leapioraid_scsihost_issue_delayed_event_ack(ioc, smid, + delayed_event_ack->Event, + delayed_event_ack->EventContext); + list_del(&delayed_event_ack->list); + kfree(delayed_event_ack); + return 0; + } + if (!list_empty(&ioc->delayed_sc_list)) { + delayed_sc = list_entry(ioc->delayed_sc_list.next, + struct leapioraid_sc_list, list); + leapioraid_scsihost_issue_delayed_sas_io_unit_ctrl(ioc, smid, + delayed_sc->handle); + list_del(&delayed_sc->list); + kfree(delayed_sc); + return 0; + } + return 1; +} + +static u8 +leapioraid_scsihost_check_for_pending_tm( + struct LEAPIORAID_ADAPTER *ioc, u16 smid) +{ + struct leapioraid_tr_list *delayed_tr; + + if (!list_empty(&ioc->delayed_tr_volume_list)) { + delayed_tr = list_entry(ioc->delayed_tr_volume_list.next, + struct leapioraid_tr_list, list); + leapioraid_base_free_smid(ioc, smid); + leapioraid_scsihost_tm_tr_volume_send(ioc, delayed_tr->handle); + list_del(&delayed_tr->list); + kfree(delayed_tr); + return 0; + } + if (!list_empty(&ioc->delayed_tr_list)) { + delayed_tr = list_entry(ioc->delayed_tr_list.next, + struct leapioraid_tr_list, list); + leapioraid_base_free_smid(ioc, smid); + leapioraid_scsihost_tm_tr_send(ioc, delayed_tr->handle); + list_del(&delayed_tr->list); + kfree(delayed_tr); + return 0; + } + if (!list_empty(&ioc->delayed_internal_tm_list)) { + delayed_tr = list_entry(ioc->delayed_internal_tm_list.next, + struct leapioraid_tr_list, list); + leapioraid_base_free_smid(ioc, smid); + leapioraid_scsihost_tm_internal_tr_send( + ioc, delayed_tr->handle); + list_del(&delayed_tr->list); + kfree(delayed_tr); + return 0; + } + return 1; +} + +static void +leapioraid_scsihost_check_topo_delete_events( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventDataSasTopoChangeList_t *event_data) +{ + struct leapioraid_fw_event_work *fw_event; + struct LeapioraidEventDataSasTopoChangeList_t *local_event_data; + u16 expander_handle; + struct leapioraid_raid_sas_node *sas_expander; + unsigned long flags; + int i, reason_code; + u16 handle; + + for (i = 0; i < event_data->NumEntries; i++) { + handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); + if (!handle) + continue; + reason_code = event_data->PHY[i].PhyStatus & + LEAPIORAID_EVENT_SAS_TOPO_RC_MASK; + if (reason_code == + LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING) + leapioraid_scsihost_tm_tr_send(ioc, handle); + } + expander_handle = le16_to_cpu(event_data->ExpanderDevHandle); + if (expander_handle < ioc->sas_hba.num_phys) { + leapioraid_scsihost_block_io_to_children_attached_directly( + ioc, event_data); + return; + } + if (event_data->ExpStatus == + LEAPIORAID_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) { + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_expander = leapioraid_scsihost_expander_find_by_handle( + ioc, expander_handle); + leapioraid_scsihost_block_io_to_children_attached_to_ex( + ioc, sas_expander); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + do { + handle = find_first_bit(ioc->blocking_handles, + ioc->facts.MaxDevHandle); + if (handle < ioc->facts.MaxDevHandle) + leapioraid_scsihost_block_io_device(ioc, handle); + } while (test_and_clear_bit(handle, ioc->blocking_handles)); + } else if (event_data->ExpStatus == + LEAPIORAID_EVENT_SAS_TOPO_ES_RESPONDING) + leapioraid_scsihost_block_io_to_children_attached_directly( + ioc, event_data); + if (event_data->ExpStatus != LEAPIORAID_EVENT_SAS_TOPO_ES_NOT_RESPONDING) + return; + spin_lock_irqsave(&ioc->fw_event_lock, flags); + list_for_each_entry(fw_event, &ioc->fw_event_list, list) { + if (fw_event->event != LEAPIORAID_EVENT_SAS_TOPOLOGY_CHANGE_LIST || + fw_event->ignore) + continue; + local_event_data = fw_event->event_data; + if (local_event_data->ExpStatus == + LEAPIORAID_EVENT_SAS_TOPO_ES_ADDED || + local_event_data->ExpStatus == + LEAPIORAID_EVENT_SAS_TOPO_ES_RESPONDING) { + if (le16_to_cpu(local_event_data->ExpanderDevHandle) == + expander_handle) { + dewtprintk(ioc, pr_err( + "%s setting ignoring flag\n", + ioc->name)); + fw_event->ignore = 1; + } + } + } + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); +} + +static void +leapioraid_scsihost_set_volume_delete_flag( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct leapioraid_raid_device *raid_device; + struct LEAPIORAID_TARGET *sas_target_priv_data; + unsigned long flags; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = leapioraid_raid_device_find_by_handle( + ioc, handle); + if (raid_device && raid_device->starget && + raid_device->starget->hostdata) { + sas_target_priv_data = raid_device->starget->hostdata; + sas_target_priv_data->deleted = 1; + dewtprintk(ioc, pr_err( + "%s setting delete flag: handle(0x%04x), wwid(0x%016llx)\n", + ioc->name, handle, + (unsigned long long)raid_device->wwid)); + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); +} + +static void +leapioraid_scsihost_set_volume_handle_for_tr( + u16 handle, u16 *a, u16 *b) +{ + if (!handle || handle == *a || handle == *b) + return; + if (!*a) + *a = handle; + else if (!*b) + *b = handle; +} + +static void +leapioraid_scsihost_check_ir_config_unhide_events( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventDataIrCfgChangeList_t *event_data) +{ + struct LeapioraidEventIrCfgEle_t *element; + int i; + u16 handle, volume_handle, a, b; + struct leapioraid_tr_list *delayed_tr; + + a = 0; + b = 0; + element = + (struct LeapioraidEventIrCfgEle_t *) &event_data->ConfigElement[0]; + for (i = 0; i < event_data->NumElements; i++, element++) { + if (le32_to_cpu(event_data->Flags) & + LEAPIORAID_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) + continue; + if (element->ReasonCode == + LEAPIORAID_EVENT_IR_CHANGE_RC_VOLUME_DELETED || + element->ReasonCode == LEAPIORAID_EVENT_IR_CHANGE_RC_REMOVED) { + volume_handle = le16_to_cpu(element->VolDevHandle); + leapioraid_scsihost_set_volume_delete_flag(ioc, volume_handle); + leapioraid_scsihost_set_volume_handle_for_tr( + volume_handle, &a, &b); + } + } + element = + (struct LeapioraidEventIrCfgEle_t *) &event_data->ConfigElement[0]; + for (i = 0; i < event_data->NumElements; i++, element++) { + if (le32_to_cpu(event_data->Flags) & + LEAPIORAID_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) + continue; + if (element->ReasonCode == LEAPIORAID_EVENT_IR_CHANGE_RC_UNHIDE) { + volume_handle = le16_to_cpu(element->VolDevHandle); + leapioraid_scsihost_set_volume_handle_for_tr( + volume_handle, &a, &b); + } + } + if (a) + leapioraid_scsihost_tm_tr_volume_send(ioc, a); + if (b) + leapioraid_scsihost_tm_tr_volume_send(ioc, b); + element = + (struct LeapioraidEventIrCfgEle_t *) &event_data->ConfigElement[0]; + for (i = 0; i < event_data->NumElements; i++, element++) { + if (element->ReasonCode != LEAPIORAID_EVENT_IR_CHANGE_RC_UNHIDE) + continue; + handle = le16_to_cpu(element->PhysDiskDevHandle); + volume_handle = le16_to_cpu(element->VolDevHandle); + clear_bit(handle, ioc->pd_handles); + if (!volume_handle) + leapioraid_scsihost_tm_tr_send(ioc, handle); + else if (volume_handle == a || volume_handle == b) { + delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); + BUG_ON(!delayed_tr); + INIT_LIST_HEAD(&delayed_tr->list); + delayed_tr->handle = handle; + list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); + dewtprintk(ioc, pr_err( + "%s DELAYED:tr:handle(0x%04x), (open)\n", + ioc->name, handle)); + } else + leapioraid_scsihost_tm_tr_send(ioc, handle); + } +} + +static void +leapioraid_scsihost_check_volume_delete_events( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventDataIrVol_t *event_data) +{ + u32 state; + + if (event_data->ReasonCode != LEAPIORAID_EVENT_IR_VOLUME_RC_STATE_CHANGED) + return; + state = le32_to_cpu(event_data->NewValue); + if (state == LEAPIORAID_RAID_VOL_STATE_MISSING || state == + LEAPIORAID_RAID_VOL_STATE_FAILED) + leapioraid_scsihost_set_volume_delete_flag( + ioc, le16_to_cpu(event_data->VolDevHandle)); +} + +static int +leapioraid_scsihost_set_satl_pending( + struct scsi_cmnd *scmd, bool pending) +{ + struct LEAPIORAID_DEVICE *priv = scmd->device->hostdata; + + if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16) + return 0; + if (pending) + return test_and_set_bit(LEAPIORAID_CMND_PENDING_BIT, + &priv->ata_command_pending); + clear_bit(LEAPIORAID_CMND_PENDING_BIT, &priv->ata_command_pending); + return 0; +} + +void +leapioraid_scsihost_flush_running_cmds( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct scsi_cmnd *scmd; + struct leapioraid_scsiio_tracker *st; + u16 smid; + u16 count = 0; + + for (smid = 1; smid <= ioc->shost->can_queue; smid++) { + scmd = leapioraid_scsihost_scsi_lookup_get(ioc, smid); + if (!scmd) + continue; + count++; + st = leapioraid_base_scsi_cmd_priv(scmd); + if (st && st->smid == 0) + continue; + leapioraid_scsihost_set_satl_pending(scmd, false); + leapioraid_base_get_msg_frame(ioc, smid); + scsi_dma_unmap(scmd); + + leapioraid_base_clear_st(ioc, st); + if ((!leapioraid_base_pci_device_is_available(ioc)) || + (ioc->ioc_reset_status != 0) + || ioc->adapter_over_temp || ioc->remove_host) + scmd->result = DID_NO_CONNECT << 16; + else + scmd->result = DID_RESET << 16; + scsi_done(scmd); + } + dtmprintk(ioc, pr_info("%s completing %d cmds\n", + ioc->name, count)); +} + +static inline u8 scsih_is_io_belongs_to_RT_class( + struct scsi_cmnd *scmd) +{ + struct request *rq = scsi_cmd_to_rq(scmd); + + return (IOPRIO_PRIO_CLASS(req_get_ioprio(rq)) == IOPRIO_CLASS_RT); +} + +static int +leapioraid_scsihost_qcmd( + struct Scsi_Host *shost, struct scsi_cmnd *scmd) +{ + struct LEAPIORAID_ADAPTER *ioc + = leapioraid_shost_private(scmd->device->host); + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct LeapioraidSCSIIOReq_t *mpi_request; + u32 mpi_control; + u16 smid; + u16 handle; + int rc = 0; + + if (ioc->logging_level & LEAPIORAID_DEBUG_SCSI) + scsi_print_command(scmd); + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + goto out; + } + if (!(leapioraid_scsihost_allow_scmd_to_device(ioc, scmd))) { + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + goto out; + } + sas_target_priv_data = sas_device_priv_data->sas_target; + handle = sas_target_priv_data->handle; + if (handle == LEAPIORAID_INVALID_DEVICE_HANDLE) { + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + goto out; + } + if (sas_device_priv_data->block && + scmd->device->host->shost_state == SHOST_RECOVERY && + scmd->cmnd[0] == TEST_UNIT_READY) { + scsi_build_sense(scmd, 0, UNIT_ATTENTION, + 0x29, 0x07); + scsi_done(scmd); + goto out; + } + if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) { + rc = SCSI_MLQUEUE_HOST_BUSY; + goto out; + } else if (sas_target_priv_data->deleted || + sas_device_priv_data->deleted) { + scmd->result = DID_NO_CONNECT << 16; + scsi_done(scmd); + goto out; + } else if (sas_target_priv_data->tm_busy || sas_device_priv_data->block) { + rc = SCSI_MLQUEUE_DEVICE_BUSY; + goto out; + } + do { + if (test_bit(LEAPIORAID_CMND_PENDING_BIT, + &sas_device_priv_data->ata_command_pending)) { + rc = SCSI_MLQUEUE_DEVICE_BUSY; + goto out; + } + } while (leapioraid_scsihost_set_satl_pending(scmd, true)); + if (scmd->sc_data_direction == DMA_FROM_DEVICE) + mpi_control = LEAPIORAID_SCSIIO_CONTROL_READ; + else if (scmd->sc_data_direction == DMA_TO_DEVICE) + mpi_control = LEAPIORAID_SCSIIO_CONTROL_WRITE; + else + mpi_control = LEAPIORAID_SCSIIO_CONTROL_NODATATRANSFER; + mpi_control |= LEAPIORAID_SCSIIO_CONTROL_SIMPLEQ; + if (sas_device_priv_data->ncq_prio_enable) { + if (scsih_is_io_belongs_to_RT_class(scmd)) + mpi_control |= 1 << LEAPIORAID_SCSIIO_CONTROL_CMDPRI_SHIFT; + } + if ((sas_device_priv_data->flags & LEAPIORAID_DEVICE_TLR_ON) && + scmd->cmd_len != 32) + mpi_control |= LEAPIORAID_SCSIIO_CONTROL_TLR_ON; + smid = leapioraid_base_get_smid_scsiio( + ioc, ioc->scsi_io_cb_idx, scmd); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + rc = SCSI_MLQUEUE_HOST_BUSY; + leapioraid_scsihost_set_satl_pending(scmd, false); + goto out; + } + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + if (scmd->cmd_len == 32) + mpi_control |= 4 << LEAPIORAID_SCSIIO_CONTROL_ADDCDBLEN_SHIFT; + mpi_request->Function = LEAPIORAID_FUNC_SCSI_IO_REQUEST; + if (sas_device_priv_data->sas_target->flags & + LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT) + mpi_request->Function = + LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH; + else + mpi_request->Function = LEAPIORAID_FUNC_SCSI_IO_REQUEST; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); + mpi_request->Control = cpu_to_le32(mpi_control); + mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len); + mpi_request->MsgFlags = LEAPIORAID_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR; + mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; + mpi_request->SenseBufferLowAddress = + leapioraid_base_get_sense_buffer_dma(ioc, smid); + mpi_request->SGLOffset0 = offsetof(struct LeapioraidSCSIIOReq_t, SGL) / 4; + int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *) + mpi_request->LUN); + memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); + if (mpi_request->DataLength) { + if (ioc->build_sg_scmd(ioc, scmd, smid)) { + leapioraid_base_free_smid(ioc, smid); + rc = SCSI_MLQUEUE_HOST_BUSY; + leapioraid_scsihost_set_satl_pending(scmd, false); + goto out; + } + } else + ioc->build_zero_len_sge(ioc, &mpi_request->SGL); + if (likely(mpi_request->Function == LEAPIORAID_FUNC_SCSI_IO_REQUEST)) { + if (sas_target_priv_data->flags & LEAPIORAID_TARGET_FASTPATH_IO) { + mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len | 0x4000); + ioc->put_smid_fast_path(ioc, smid, handle); + } else + ioc->put_smid_scsi_io(ioc, smid, + le16_to_cpu(mpi_request->DevHandle)); + } else + ioc->put_smid_default(ioc, smid); +out: + return rc; +} + +static void +leapioraid_scsihost_normalize_sense( + char *sense_buffer, struct sense_info *data) +{ + if ((sense_buffer[0] & 0x7F) >= 0x72) { + data->skey = sense_buffer[1] & 0x0F; + data->asc = sense_buffer[2]; + data->ascq = sense_buffer[3]; + } else { + data->skey = sense_buffer[2] & 0x0F; + data->asc = sense_buffer[12]; + data->ascq = sense_buffer[13]; + } +} + +static void +leapioraid_scsihost_scsi_ioc_info( + struct LEAPIORAID_ADAPTER *ioc, struct scsi_cmnd *scmd, + struct LeapioraidSCSIIORep_t *mpi_reply, u16 smid, + u8 scsi_status, u16 error_response_count) +{ + u32 response_info; + u8 *response_bytes; + u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + u8 scsi_state = mpi_reply->SCSIState; + char *desc_ioc_state = NULL; + char *desc_scsi_status = NULL; + char *desc_scsi_state = ioc->tmp_string; + u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); + struct leapioraid_sas_device *sas_device = NULL; + struct scsi_target *starget = scmd->device->sdev_target; + struct LEAPIORAID_TARGET *priv_target = starget->hostdata; + char *device_str = NULL; + + if (!priv_target) + return; + if (ioc->warpdrive_msg) + device_str = "WarpDrive"; + else + device_str = "volume"; + if (log_info == 0x31170000) + return; + switch (ioc_status) { + case LEAPIORAID_IOCSTATUS_SUCCESS: + desc_ioc_state = "success"; + break; + case LEAPIORAID_IOCSTATUS_INVALID_FUNCTION: + desc_ioc_state = "invalid function"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_RECOVERED_ERROR: + desc_ioc_state = "scsi recovered error"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_INVALID_DEVHANDLE: + desc_ioc_state = "scsi invalid dev handle"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_DEVICE_NOT_THERE: + desc_ioc_state = "scsi device not there"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_DATA_OVERRUN: + desc_ioc_state = "scsi data overrun"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_DATA_UNDERRUN: + desc_ioc_state = "scsi data underrun"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_IO_DATA_ERROR: + desc_ioc_state = "scsi io data error"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_PROTOCOL_ERROR: + desc_ioc_state = "scsi protocol error"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_TASK_TERMINATED: + desc_ioc_state = "scsi task terminated"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: + desc_ioc_state = "scsi residual mismatch"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_TASK_MGMT_FAILED: + desc_ioc_state = "scsi task mgmt failed"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_IOC_TERMINATED: + desc_ioc_state = "scsi ioc terminated"; + break; + case LEAPIORAID_IOCSTATUS_SCSI_EXT_TERMINATED: + desc_ioc_state = "scsi ext terminated"; + break; + case LEAPIORAID_IOCSTATUS_EEDP_GUARD_ERROR: + if (!ioc->disable_eedp_support) { + desc_ioc_state = "eedp guard error"; + break; + } + fallthrough; + case LEAPIORAID_IOCSTATUS_EEDP_REF_TAG_ERROR: + if (!ioc->disable_eedp_support) { + desc_ioc_state = "eedp ref tag error"; + break; + } + fallthrough; + case LEAPIORAID_IOCSTATUS_EEDP_APP_TAG_ERROR: + if (!ioc->disable_eedp_support) { + desc_ioc_state = "eedp app tag error"; + break; + } + fallthrough; + case LEAPIORAID_IOCSTATUS_INSUFFICIENT_POWER: + desc_ioc_state = "insufficient power"; + break; + default: + desc_ioc_state = "unknown"; + break; + } + switch (scsi_status) { + case LEAPIORAID_SCSI_STATUS_GOOD: + desc_scsi_status = "good"; + break; + case LEAPIORAID_SCSI_STATUS_CHECK_CONDITION: + desc_scsi_status = "check condition"; + break; + case LEAPIORAID_SCSI_STATUS_CONDITION_MET: + desc_scsi_status = "condition met"; + break; + case LEAPIORAID_SCSI_STATUS_BUSY: + desc_scsi_status = "busy"; + break; + case LEAPIORAID_SCSI_STATUS_INTERMEDIATE: + desc_scsi_status = "intermediate"; + break; + case LEAPIORAID_SCSI_STATUS_INTERMEDIATE_CONDMET: + desc_scsi_status = "intermediate condmet"; + break; + case LEAPIORAID_SCSI_STATUS_RESERVATION_CONFLICT: + desc_scsi_status = "reservation conflict"; + break; + case LEAPIORAID_SCSI_STATUS_COMMAND_TERMINATED: + desc_scsi_status = "command terminated"; + break; + case LEAPIORAID_SCSI_STATUS_TASK_SET_FULL: + desc_scsi_status = "task set full"; + break; + case LEAPIORAID_SCSI_STATUS_ACA_ACTIVE: + desc_scsi_status = "aca active"; + break; + case LEAPIORAID_SCSI_STATUS_TASK_ABORTED: + desc_scsi_status = "task aborted"; + break; + default: + desc_scsi_status = "unknown"; + break; + } + desc_scsi_state[0] = '\0'; + if (!scsi_state) + desc_scsi_state = " "; + if (scsi_state & LEAPIORAID_SCSI_STATE_RESPONSE_INFO_VALID) + strcat(desc_scsi_state, "response info "); + if (scsi_state & LEAPIORAID_SCSI_STATE_TERMINATED) + strcat(desc_scsi_state, "state terminated "); + if (scsi_state & LEAPIORAID_SCSI_STATE_NO_SCSI_STATUS) + strcat(desc_scsi_state, "no status "); + if (scsi_state & LEAPIORAID_SCSI_STATE_AUTOSENSE_FAILED) + strcat(desc_scsi_state, "autosense failed "); + if (scsi_state & LEAPIORAID_SCSI_STATE_AUTOSENSE_VALID) + strcat(desc_scsi_state, "autosense valid "); + scsi_print_command(scmd); + if (priv_target->flags & LEAPIORAID_TARGET_FLAGS_VOLUME) { + pr_warn("%s \t%s wwid(0x%016llx)\n", + ioc->name, device_str, + (unsigned long long)priv_target->sas_address); + } else { + sas_device = leapioraid_get_sdev_from_target(ioc, priv_target); + if (sas_device) { + pr_warn( + "%s \t%s: sas_address(0x%016llx), phy(%d)\n", + ioc->name, __func__, (unsigned long long) + sas_device->sas_address, sas_device->phy); + leapioraid_scsihost_display_enclosure_chassis_info(ioc, + sas_device, + NULL, NULL); + leapioraid_sas_device_put(sas_device); + } + } + pr_warn( + "%s \thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n", + ioc->name, le16_to_cpu(mpi_reply->DevHandle), desc_ioc_state, + ioc_status, smid); + pr_warn("%s \trequest_len(%d), underflow(%d), resid(%d)\n", + ioc->name, scsi_bufflen(scmd), scmd->underflow, + scsi_get_resid(scmd)); + pr_warn("%s \ttag(%d), transfer_count(%d), sc->result(0x%08x)\n", + ioc->name, + le16_to_cpu(mpi_reply->TaskTag), + le32_to_cpu(mpi_reply->TransferCount), scmd->result); + pr_warn("%s \tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n", + ioc->name, desc_scsi_status, + scsi_status, desc_scsi_state, scsi_state); + if (scsi_state & LEAPIORAID_SCSI_STATE_AUTOSENSE_VALID) { + struct sense_info data; + + leapioraid_scsihost_normalize_sense(scmd->sense_buffer, &data); + pr_warn( + "%s \t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n", + ioc->name, + data.skey, data.asc, data.ascq, + le32_to_cpu(mpi_reply->SenseCount)); + } + if (scsi_state & LEAPIORAID_SCSI_STATE_RESPONSE_INFO_VALID) { + response_info = le32_to_cpu(mpi_reply->ResponseInfo); + response_bytes = (u8 *) &response_info; + leapioraid_scsihost_response_code(ioc, response_bytes[0]); + } +} + +static void +leapioraid_scsihost_turn_on_pfa_led( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct LeapioraidSepRep_t mpi_reply; + struct LeapioraidSepReq_t mpi_request; + struct leapioraid_sas_device *sas_device; + + sas_device = leapioraid_get_sdev_by_handle(ioc, handle); + if (!sas_device) + return; + memset(&mpi_request, 0, sizeof(struct LeapioraidSepReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_SCSI_ENCLOSURE_PROCESSOR; + mpi_request.Action = LEAPIORAID_SEP_REQ_ACTION_WRITE_STATUS; + mpi_request.SlotStatus = + cpu_to_le32(LEAPIORAID_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT); + mpi_request.DevHandle = cpu_to_le16(handle); + mpi_request.Flags = LEAPIORAID_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS; + if ((leapioraid_base_scsi_enclosure_processor(ioc, &mpi_reply, + &mpi_request)) != 0) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + sas_device->pfa_led_on = 1; + if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { + dewtprintk(ioc, pr_info( + "%s enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo))); + goto out; + } +out: + leapioraid_sas_device_put(sas_device); +} + +static void +leapioraid_scsihost_turn_off_pfa_led(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_device *sas_device) +{ + struct LeapioraidSepRep_t mpi_reply; + struct LeapioraidSepReq_t mpi_request; + + memset(&mpi_request, 0, sizeof(struct LeapioraidSepReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_SCSI_ENCLOSURE_PROCESSOR; + mpi_request.Action = LEAPIORAID_SEP_REQ_ACTION_WRITE_STATUS; + mpi_request.SlotStatus = 0; + mpi_request.Slot = cpu_to_le16(sas_device->slot); + mpi_request.DevHandle = 0; + mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle); + mpi_request.Flags = LEAPIORAID_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS; + if ((leapioraid_base_scsi_enclosure_processor(ioc, &mpi_reply, + &mpi_request)) != 0) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { + dewtprintk(ioc, pr_info( + "%s enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo))); + return; + } +} + +static void +leapioraid_scsihost_send_event_to_turn_on_pfa_led( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle) +{ + struct leapioraid_fw_event_work *fw_event; + + fw_event = leapioraid_alloc_fw_event_work(0); + if (!fw_event) + return; + fw_event->event = LEAPIORAID_TURN_ON_PFA_LED; + fw_event->device_handle = handle; + fw_event->ioc = ioc; + leapioraid_scsihost_fw_event_add(ioc, fw_event); + leapioraid_fw_event_work_put(fw_event); +} + +static void +leapioraid_scsihost_smart_predicted_fault( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, + u8 from_sata_smart_polling) +{ + struct scsi_target *starget; + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct LeapioraidEventNotificationRep_t *event_reply; + struct LeapioraidEventDataSasDeviceStatusChange_t *event_data; + struct leapioraid_sas_device *sas_device; + ssize_t sz; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_handle(ioc, handle); + if (!sas_device) + goto out_unlock; + + starget = sas_device->starget; + sas_target_priv_data = starget->hostdata; + if ((sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT) + || ((sas_target_priv_data->flags & LEAPIORAID_TARGET_FLAGS_VOLUME))) + goto out_unlock; + leapioraid_scsihost_display_enclosure_chassis_info(NULL, sas_device, NULL, + starget); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (from_sata_smart_polling) + leapioraid_scsihost_send_event_to_turn_on_pfa_led(ioc, handle); + sz = offsetof(struct LeapioraidEventNotificationRep_t, EventData) + + sizeof(struct LeapioraidEventDataSasDeviceStatusChange_t); + event_reply = kzalloc(sz, GFP_ATOMIC); + if (!event_reply) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + event_reply->Function = LEAPIORAID_FUNC_EVENT_NOTIFICATION; + event_reply->Event = + cpu_to_le16(LEAPIORAID_EVENT_SAS_DEVICE_STATUS_CHANGE); + event_reply->MsgLength = sz / 4; + event_reply->EventDataLength = + cpu_to_le16(sizeof(struct LeapioraidEventDataSasDeviceStatusChange_t) / 4); + event_data = (struct LeapioraidEventDataSasDeviceStatusChange_t *) + event_reply->EventData; + event_data->ReasonCode = LEAPIORAID_EVENT_SAS_DEV_STAT_RC_SMART_DATA; + event_data->ASC = 0x5D; + event_data->DevHandle = cpu_to_le16(handle); + event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address); + leapioraid_ctl_add_to_event_log(ioc, event_reply); + kfree(event_reply); +out: + if (sas_device) + leapioraid_sas_device_put(sas_device); + return; +out_unlock: + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + goto out; +} + +static u8 +leapioraid_scsihost_io_done( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + struct LeapioraidSCSIIOReq_t *mpi_request; + struct LeapioraidSCSIIORep_t *mpi_reply; + struct scsi_cmnd *scmd; + u16 ioc_status, error_response_count = 0; + u32 xfer_cnt; + u8 scsi_state; + u8 scsi_status; + u32 log_info; + struct LEAPIORAID_DEVICE *sas_device_priv_data; + u32 response_code = 0; + struct leapioraid_scsiio_tracker *st; + + scmd = leapioraid_scsihost_scsi_lookup_get(ioc, smid); + if (scmd == NULL) + return 1; + leapioraid_scsihost_set_satl_pending(scmd, false); + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply == NULL) { + scmd->result = DID_OK << 16; + goto out; + } + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target || + sas_device_priv_data->sas_target->deleted) { + scmd->result = DID_NO_CONNECT << 16; + goto out; + } + ioc_status = le16_to_cpu(mpi_reply->IOCStatus); + st = leapioraid_base_scsi_cmd_priv(scmd); + if (st->direct_io && ((ioc_status & LEAPIORAID_IOCSTATUS_MASK) + != LEAPIORAID_IOCSTATUS_SCSI_TASK_TERMINATED)) { + st->scmd = scmd; + st->direct_io = 0; + memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); + mpi_request->DevHandle = + cpu_to_le16(sas_device_priv_data->sas_target->handle); + ioc->put_smid_scsi_io(ioc, smid, + sas_device_priv_data->sas_target->handle); + return 0; + } + scsi_state = mpi_reply->SCSIState; + if (scsi_state & LEAPIORAID_SCSI_STATE_RESPONSE_INFO_VALID) + response_code = le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF; + if (!sas_device_priv_data->tlr_snoop_check) { + sas_device_priv_data->tlr_snoop_check++; + if ((sas_device_priv_data->flags & LEAPIORAID_DEVICE_TLR_ON) && + response_code == LEAPIORAID_SCSITASKMGMT_RSP_INVALID_FRAME) + sas_device_priv_data->flags &= ~LEAPIORAID_DEVICE_TLR_ON; + } + if (ioc_status & LEAPIORAID_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) + log_info = le32_to_cpu(mpi_reply->IOCLogInfo); + else + log_info = 0; + ioc_status &= LEAPIORAID_IOCSTATUS_MASK; + scsi_status = mpi_reply->SCSIStatus; + xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); + scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt); + if (ioc_status == LEAPIORAID_IOCSTATUS_SCSI_DATA_UNDERRUN + && xfer_cnt == 0 + && (scsi_status == LEAPIORAID_SCSI_STATUS_BUSY + || scsi_status == LEAPIORAID_SCSI_STATUS_RESERVATION_CONFLICT + || scsi_status == LEAPIORAID_SCSI_STATUS_TASK_SET_FULL)) { + ioc_status = LEAPIORAID_IOCSTATUS_SUCCESS; + } + if (scsi_state & LEAPIORAID_SCSI_STATE_AUTOSENSE_VALID) { + struct sense_info data; + const void *sense_data = leapioraid_base_get_sense_buffer(ioc, + smid); + u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, + le32_to_cpu(mpi_reply->SenseCount)); + memcpy(scmd->sense_buffer, sense_data, sz); + leapioraid_scsihost_normalize_sense(scmd->sense_buffer, &data); + if (data.asc == 0x5D) + leapioraid_scsihost_smart_predicted_fault(ioc, + le16_to_cpu(mpi_reply->DevHandle), + 0); + } + switch (ioc_status) { + case LEAPIORAID_IOCSTATUS_BUSY: + case LEAPIORAID_IOCSTATUS_INSUFFICIENT_RESOURCES: + scmd->result = SAM_STAT_BUSY; + break; + case LEAPIORAID_IOCSTATUS_SCSI_DEVICE_NOT_THERE: + scmd->result = DID_NO_CONNECT << 16; + break; + case LEAPIORAID_IOCSTATUS_SCSI_IOC_TERMINATED: + if (sas_device_priv_data->block) { + scmd->result = DID_TRANSPORT_DISRUPTED << 16; + goto out; + } + if (log_info == 0x31110630) { + if (scmd->retries > 2) { + scmd->result = DID_NO_CONNECT << 16; + scsi_device_set_state(scmd->device, + SDEV_OFFLINE); + } else { + scmd->result = DID_SOFT_ERROR << 16; + scmd->device->expecting_cc_ua = 1; + } + break; + } else if (log_info == 0x32010081) { + scmd->result = DID_RESET << 16; + break; + } else if ((scmd->device->channel == RAID_CHANNEL) && + (scsi_state == (LEAPIORAID_SCSI_STATE_TERMINATED | + LEAPIORAID_SCSI_STATE_NO_SCSI_STATUS))) { + scmd->result = DID_RESET << 16; + break; + } + scmd->result = DID_SOFT_ERROR << 16; + break; + case LEAPIORAID_IOCSTATUS_SCSI_TASK_TERMINATED: + case LEAPIORAID_IOCSTATUS_SCSI_EXT_TERMINATED: + scmd->result = DID_RESET << 16; + break; + case LEAPIORAID_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: + if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt)) + scmd->result = DID_SOFT_ERROR << 16; + else + scmd->result = (DID_OK << 16) | scsi_status; + break; + case LEAPIORAID_IOCSTATUS_SCSI_DATA_UNDERRUN: + scmd->result = (DID_OK << 16) | scsi_status; + if ((scsi_state & LEAPIORAID_SCSI_STATE_AUTOSENSE_VALID)) + break; + if (xfer_cnt < scmd->underflow) { + if (scsi_status == SAM_STAT_BUSY) + scmd->result = SAM_STAT_BUSY; + else + scmd->result = DID_SOFT_ERROR << 16; + } else if (scsi_state & (LEAPIORAID_SCSI_STATE_AUTOSENSE_FAILED | + LEAPIORAID_SCSI_STATE_NO_SCSI_STATUS)) + scmd->result = DID_SOFT_ERROR << 16; + else if (scsi_state & LEAPIORAID_SCSI_STATE_TERMINATED) + scmd->result = DID_RESET << 16; + else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) { + mpi_reply->SCSIState = + LEAPIORAID_SCSI_STATE_AUTOSENSE_VALID; + mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION; + scsi_build_sense(scmd, 0, + ILLEGAL_REQUEST, 0x20, + 0); + } + break; + case LEAPIORAID_IOCSTATUS_SCSI_DATA_OVERRUN: + scsi_set_resid(scmd, 0); + fallthrough; + case LEAPIORAID_IOCSTATUS_SCSI_RECOVERED_ERROR: + case LEAPIORAID_IOCSTATUS_SUCCESS: + scmd->result = (DID_OK << 16) | scsi_status; + if (response_code == + LEAPIORAID_SCSITASKMGMT_RSP_INVALID_FRAME || + (scsi_state & (LEAPIORAID_SCSI_STATE_AUTOSENSE_FAILED | + LEAPIORAID_SCSI_STATE_NO_SCSI_STATUS))) + scmd->result = DID_SOFT_ERROR << 16; + else if (scsi_state & LEAPIORAID_SCSI_STATE_TERMINATED) + scmd->result = DID_RESET << 16; + break; + case LEAPIORAID_IOCSTATUS_EEDP_GUARD_ERROR: + case LEAPIORAID_IOCSTATUS_EEDP_REF_TAG_ERROR: + fallthrough; + case LEAPIORAID_IOCSTATUS_EEDP_APP_TAG_ERROR: + fallthrough; + case LEAPIORAID_IOCSTATUS_SCSI_PROTOCOL_ERROR: + case LEAPIORAID_IOCSTATUS_INVALID_FUNCTION: + case LEAPIORAID_IOCSTATUS_INVALID_SGL: + case LEAPIORAID_IOCSTATUS_INTERNAL_ERROR: + case LEAPIORAID_IOCSTATUS_INVALID_FIELD: + case LEAPIORAID_IOCSTATUS_INVALID_STATE: + case LEAPIORAID_IOCSTATUS_SCSI_IO_DATA_ERROR: + case LEAPIORAID_IOCSTATUS_SCSI_TASK_MGMT_FAILED: + case LEAPIORAID_IOCSTATUS_INSUFFICIENT_POWER: + default: + scmd->result = DID_SOFT_ERROR << 16; + break; + } + if (scmd->result && (ioc->logging_level & LEAPIORAID_DEBUG_REPLY)) + leapioraid_scsihost_scsi_ioc_info( + ioc, scmd, mpi_reply, smid, scsi_status, + error_response_count); +out: + scsi_dma_unmap(scmd); + leapioraid_base_free_smid(ioc, smid); + scsi_done(scmd); + return 0; +} + +static void +leapioraid_scsihost_update_vphys_after_reset( + struct LEAPIORAID_ADAPTER *ioc) +{ + u16 sz, ioc_status; + int i; + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasIOUnitP0_t *sas_iounit_pg0 = NULL; + u16 attached_handle; + u64 attached_sas_addr; + u8 found = 0, port_id; + struct LeapioraidSasPhyP0_t phy_pg0; + struct leapioraid_hba_port *port, *port_next, *mport; + struct leapioraid_virtual_phy *vphy, *vphy_next; + struct leapioraid_sas_device *sas_device; + + list_for_each_entry_safe(port, port_next, &ioc->port_table_list, list) { + if (!port->vphys_mask) + continue; + list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, + list) { + vphy->flags |= LEAPIORAID_VPHY_FLAG_DIRTY_PHY; + } + } + sz = offsetof(struct LeapioraidSasIOUnitP0_t, PhyData) + + (ioc->sas_hba.num_phys + * sizeof(struct LEAPIORAID_SAS_IO_UNIT0_PHY_DATA)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + if ((leapioraid_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz)) != 0) + goto out; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) + goto out; + for (i = 0; i < ioc->sas_hba.num_phys; i++) { + if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) < + LEAPIORAID_SAS_NEG_LINK_RATE_1_5) + continue; + if (!(le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) + & LEAPIORAID_SAS_DEVICE_INFO_SEP)) + continue; + if ((leapioraid_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, + i))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + continue; + } + if (! + (le32_to_cpu(phy_pg0.PhyInfo) & + LEAPIORAID_SAS_PHYINFO_VIRTUAL_PHY)) + continue; + attached_handle = + le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle); + if (leapioraid_scsihost_get_sas_address + (ioc, attached_handle, &attached_sas_addr) + != 0) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + continue; + } + found = 0; + port = port_next = NULL; + list_for_each_entry_safe(port, port_next, &ioc->port_table_list, + list) { + if (!port->vphys_mask) + continue; + list_for_each_entry_safe(vphy, vphy_next, + &port->vphys_list, list) { + if (! + (vphy->flags & LEAPIORAID_VPHY_FLAG_DIRTY_PHY)) + continue; + if (vphy->sas_address != attached_sas_addr) + continue; + if (!(vphy->phy_mask & (1 << i))) + vphy->phy_mask = (1 << i); + port_id = sas_iounit_pg0->PhyData[i].Port; + mport = + leapioraid_get_port_by_id(ioc, port_id, 1); + if (!mport) { + mport = + kzalloc(sizeof(struct leapioraid_hba_port), + GFP_KERNEL); + if (!mport) { + pr_err( + "%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, + __LINE__, __func__); + break; + } + mport->port_id = port_id; + pr_err( + "%s %s: hba_port entry: %p, port: %d is added to hba_port list\n", + ioc->name, __func__, mport, + mport->port_id); + list_add_tail(&mport->list, + &ioc->port_table_list); + } + if (port != mport) { + if (!mport->vphys_mask) + INIT_LIST_HEAD(&mport->vphys_list); + mport->vphys_mask |= (1 << i); + port->vphys_mask &= ~(1 << i); + list_move(&vphy->list, + &mport->vphys_list); + sas_device = + leapioraid_get_sdev_by_addr(ioc, + attached_sas_addr, + port); + if (sas_device) + sas_device->port = mport; + } + if (mport->flags & LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT) { + mport->sas_address = 0; + mport->phy_mask = 0; + mport->flags &= + ~LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT; + } + vphy->flags &= ~LEAPIORAID_VPHY_FLAG_DIRTY_PHY; + found = 1; + break; + } + if (found) + break; + } + } +out: + kfree(sas_iounit_pg0); +} + +static u8 +leapioraid_scsihost_get_port_table_after_reset( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_hba_port *port_table) +{ + u16 sz, ioc_status; + int i, j; + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasIOUnitP0_t *sas_iounit_pg0 = NULL; + u16 attached_handle; + u64 attached_sas_addr; + u8 found = 0, port_count = 0, port_id; + + sz = offsetof(struct LeapioraidSasIOUnitP0_t, PhyData) + + (ioc->sas_hba.num_phys + * sizeof(struct LEAPIORAID_SAS_IO_UNIT0_PHY_DATA)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return port_count; + } + if ((leapioraid_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz)) != 0) + goto out; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) + goto out; + for (i = 0; i < ioc->sas_hba.num_phys; i++) { + found = 0; + if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) < + LEAPIORAID_SAS_NEG_LINK_RATE_1_5) + continue; + attached_handle = + le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle); + if (leapioraid_scsihost_get_sas_address + (ioc, attached_handle, &attached_sas_addr) + != 0) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + continue; + } + for (j = 0; j < port_count; j++) { + port_id = sas_iounit_pg0->PhyData[i].Port; + if ((port_table[j].port_id == port_id) && + (port_table[j].sas_address == attached_sas_addr)) { + port_table[j].phy_mask |= (1 << i); + found = 1; + break; + } + } + if (found) + continue; + port_id = sas_iounit_pg0->PhyData[i].Port; + port_table[port_count].port_id = port_id; + port_table[port_count].phy_mask = (1 << i); + port_table[port_count].sas_address = attached_sas_addr; + port_count++; + } +out: + kfree(sas_iounit_pg0); + return port_count; +} + +enum hba_port_matched_codes { + NOT_MATCHED = 0, + MATCHED_WITH_ADDR_AND_PHYMASK, + MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT, + MATCHED_WITH_ADDR_AND_SUBPHYMASK, + MATCHED_WITH_ADDR, +}; +static int +leapioraid_scsihost_look_and_get_matched_port_entry( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_hba_port *port_entry, + struct leapioraid_hba_port **matched_port_entry, + int *count) +{ + struct leapioraid_hba_port *port_table_entry, *matched_port = NULL; + enum hba_port_matched_codes matched_code = NOT_MATCHED; + int lcount = 0; + + *matched_port_entry = NULL; + list_for_each_entry(port_table_entry, &ioc->port_table_list, list) { + if (!(port_table_entry->flags & LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT)) + continue; + if ((port_table_entry->sas_address == port_entry->sas_address) + && (port_table_entry->phy_mask == port_entry->phy_mask)) { + matched_code = MATCHED_WITH_ADDR_AND_PHYMASK; + matched_port = port_table_entry; + break; + } + if ((port_table_entry->sas_address == port_entry->sas_address) + && (port_table_entry->phy_mask & port_entry->phy_mask) + && (port_table_entry->port_id == port_entry->port_id)) { + matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT; + matched_port = port_table_entry; + continue; + } + if ((port_table_entry->sas_address == port_entry->sas_address) + && (port_table_entry->phy_mask & port_entry->phy_mask)) { + if (matched_code == + MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT) + continue; + matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK; + matched_port = port_table_entry; + continue; + } + if (port_table_entry->sas_address == port_entry->sas_address) { + if (matched_code == + MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT) + continue; + if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK) + continue; + matched_code = MATCHED_WITH_ADDR; + matched_port = port_table_entry; + lcount++; + } + } + *matched_port_entry = matched_port; + if (matched_code == MATCHED_WITH_ADDR) + *count = lcount; + return matched_code; +} + +static void +leapioraid_scsihost_del_phy_part_of_anther_port( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_hba_port *port_table, + int index, u8 port_count, int offset) +{ + struct leapioraid_raid_sas_node *sas_node = &ioc->sas_hba; + u32 i, found = 0; + + for (i = 0; i < port_count; i++) { + if (i == index) + continue; + if (port_table[i].phy_mask & (1 << offset)) { + leapioraid_transport_del_phy_from_an_existing_port( + ioc, + sas_node, + &sas_node->phy + [offset]); + found = 1; + break; + } + } + if (!found) + port_table[index].phy_mask |= (1 << offset); +} + +static void +leapioraid_scsihost_add_or_del_phys_from_existing_port( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_hba_port *hba_port_entry, + struct leapioraid_hba_port *port_table, + int index, u8 port_count) +{ + u32 phy_mask, offset = 0; + struct leapioraid_raid_sas_node *sas_node = &ioc->sas_hba; + + phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask; + for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) { + if (phy_mask & (1 << offset)) { + if (!(port_table[index].phy_mask & (1 << offset))) { + leapioraid_scsihost_del_phy_part_of_anther_port( + ioc, + port_table, + index, + port_count, + offset); + } else { +#if defined(LEAPIORAID_WIDE_PORT_API) + if (sas_node->phy[offset].phy_belongs_to_port) + leapioraid_transport_del_phy_from_an_existing_port + (ioc, sas_node, + &sas_node->phy[offset]); + leapioraid_transport_add_phy_to_an_existing_port + (ioc, sas_node, &sas_node->phy[offset], + hba_port_entry->sas_address, + hba_port_entry); +#endif + } + } + } +} + +static void +leapioraid_scsihost_del_dirty_vphy(struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_hba_port *port, *port_next; + struct leapioraid_virtual_phy *vphy, *vphy_next; + + list_for_each_entry_safe(port, port_next, &ioc->port_table_list, list) { + if (!port->vphys_mask) + continue; + list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, + list) { + if (vphy->flags & LEAPIORAID_VPHY_FLAG_DIRTY_PHY) { + drsprintk(ioc, pr_err( + "%s Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n", + ioc->name, vphy, + port->port_id, + vphy->phy_mask)); + port->vphys_mask &= ~vphy->phy_mask; + list_del(&vphy->list); + kfree(vphy); + } + } + if (!port->vphys_mask && !port->sas_address) + port->flags |= LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT; + } +} + +static void +leapioraid_scsihost_del_dirty_port_entries( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_hba_port *port, *port_next; + + list_for_each_entry_safe(port, port_next, &ioc->port_table_list, list) { + if (!(port->flags & LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT) || + port->flags & LEAPIORAID_HBA_PORT_FLAG_NEW_PORT) + continue; + drsprintk(ioc, pr_err( + "%s Deleting port table entry %p having Port id: %d\t, Phy_mask 0x%08x\n", + ioc->name, port, port->port_id, + port->phy_mask)); + list_del(&port->list); + kfree(port); + } +} + +static void +leapioraid_scsihost_sas_port_refresh(struct LEAPIORAID_ADAPTER *ioc) +{ + u8 port_count = 0; + struct leapioraid_hba_port *port_table; + struct leapioraid_hba_port *port_table_entry; + struct leapioraid_hba_port *port_entry = NULL; + int i, j, ret, count = 0, lcount = 0; + u64 sas_addr; + u8 num_phys; + + drsprintk(ioc, pr_err( + "%s updating ports for sas_host(0x%016llx)\n", + ioc->name, + (unsigned long long)ioc->sas_hba.sas_address)); + leapioraid_config_get_number_hba_phys(ioc, &num_phys); + if (!num_phys) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + if (num_phys > ioc->sas_hba.nr_phys_allocated) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + ioc->sas_hba.num_phys = num_phys; + port_table = kcalloc(ioc->sas_hba.num_phys, + sizeof(struct leapioraid_hba_port), GFP_KERNEL); + if (!port_table) + return; + port_count = leapioraid_scsihost_get_port_table_after_reset( + ioc, port_table); + if (!port_count) + return; + drsprintk(ioc, + pr_info("%s New Port table\n", ioc->name)); + for (j = 0; j < port_count; j++) + drsprintk(ioc, pr_err( + "%s Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n", + ioc->name, port_table[j].port_id, + port_table[j].phy_mask, + port_table[j].sas_address)); + list_for_each_entry(port_table_entry, &ioc->port_table_list, list) { + port_table_entry->flags |= LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT; + } + drsprintk(ioc, + pr_info("%s Old Port table\n", ioc->name)); + port_table_entry = NULL; + list_for_each_entry(port_table_entry, &ioc->port_table_list, list) { + drsprintk(ioc, pr_err( + "%s Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n", + ioc->name, port_table_entry->port_id, + port_table_entry->phy_mask, + port_table_entry->sas_address)); + } + for (j = 0; j < port_count; j++) { + ret = leapioraid_scsihost_look_and_get_matched_port_entry(ioc, + &port_table[j], + &port_entry, + &count); + if (!port_entry) { + drsprintk(ioc, pr_err( + "%s No Matched entry for sas_addr(0x%16llx), Port:%d\n", + ioc->name, + port_table[j].sas_address, + port_table[j].port_id)); + continue; + } + switch (ret) { + case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT: + case MATCHED_WITH_ADDR_AND_SUBPHYMASK: + leapioraid_scsihost_add_or_del_phys_from_existing_port(ioc, + port_entry, + port_table, + j, + port_count); + break; + case MATCHED_WITH_ADDR: + sas_addr = port_table[j].sas_address; + for (i = 0; i < port_count; i++) { + if (port_table[i].sas_address == sas_addr) + lcount++; + } + if ((count > 1) || (lcount > 1)) + port_entry = NULL; + else + leapioraid_scsihost_add_or_del_phys_from_existing_port + (ioc, port_entry, port_table, j, + port_count); + } + if (!port_entry) + continue; + if (port_entry->port_id != port_table[j].port_id) + port_entry->port_id = port_table[j].port_id; + port_entry->flags &= ~LEAPIORAID_HBA_PORT_FLAG_DIRTY_PORT; + port_entry->phy_mask = port_table[j].phy_mask; + } + port_table_entry = NULL; +} + +static +struct leapioraid_virtual_phy *leapioraid_scsihost_alloc_vphy( + struct LEAPIORAID_ADAPTER *ioc, + u8 port_id, u8 phy_num) +{ + struct leapioraid_virtual_phy *vphy; + struct leapioraid_hba_port *port; + + port = leapioraid_get_port_by_id(ioc, port_id, 0); + if (!port) + return NULL; + vphy = leapioraid_get_vphy_by_phy(ioc, port, phy_num); + if (!vphy) { + vphy = kzalloc(sizeof(struct leapioraid_virtual_phy), GFP_KERNEL); + if (!vphy) + return NULL; + if (!port->vphys_mask) + INIT_LIST_HEAD(&port->vphys_list); + port->vphys_mask |= (1 << phy_num); + vphy->phy_mask |= (1 << phy_num); + list_add_tail(&vphy->list, &port->vphys_list); + pr_info( + "%s vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n", + ioc->name, vphy, port->port_id, phy_num); + } + return vphy; +} + +static void +leapioraid_scsihost_sas_host_refresh(struct LEAPIORAID_ADAPTER *ioc) +{ + u16 sz; + u16 ioc_status; + int i; + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasIOUnitP0_t *sas_iounit_pg0 = NULL; + u16 attached_handle; + u8 link_rate, port_id; + struct leapioraid_hba_port *port; + struct LeapioraidSasPhyP0_t phy_pg0; + + dtmprintk(ioc, pr_err( + "%s updating handles for sas_host(0x%016llx)\n", + ioc->name, + (unsigned long long)ioc->sas_hba.sas_address)); + sz = offsetof(struct LeapioraidSasIOUnitP0_t, + PhyData) + + (ioc->sas_hba.num_phys * sizeof(struct LEAPIORAID_SAS_IO_UNIT0_PHY_DATA)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + if ((leapioraid_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz)) != 0) + goto out; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) + goto out; + for (i = 0; i < ioc->sas_hba.num_phys; i++) { + link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4; + if (i == 0) + ioc->sas_hba.handle = + le16_to_cpu(sas_iounit_pg0->PhyData[0].ControllerDevHandle); + port_id = sas_iounit_pg0->PhyData[i].Port; + if (!(leapioraid_get_port_by_id(ioc, port_id, 0))) { + port = kzalloc(sizeof(struct leapioraid_hba_port), GFP_KERNEL); + if (!port) + goto out; + + port->port_id = port_id; + pr_info( + "%s hba_port entry: %p, port: %d is added to hba_port list\n", + ioc->name, port, port->port_id); + if (ioc->shost_recovery) + port->flags = LEAPIORAID_HBA_PORT_FLAG_NEW_PORT; + list_add_tail(&port->list, &ioc->port_table_list); + } + if (le32_to_cpu + (sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) + & LEAPIORAID_SAS_DEVICE_INFO_SEP + && (link_rate >= LEAPIORAID_SAS_NEG_LINK_RATE_1_5)) { + if ((leapioraid_config_get_phy_pg0 + (ioc, &mpi_reply, &phy_pg0, i))) { + pr_err( + "%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + continue; + } + if (! + (le32_to_cpu(phy_pg0.PhyInfo) & + LEAPIORAID_SAS_PHYINFO_VIRTUAL_PHY)) + continue; + if (!leapioraid_scsihost_alloc_vphy(ioc, port_id, i)) + goto out; + ioc->sas_hba.phy[i].hba_vphy = 1; + } + ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; + attached_handle = + le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle); + if (attached_handle + && link_rate < LEAPIORAID_SAS_NEG_LINK_RATE_1_5) + link_rate = LEAPIORAID_SAS_NEG_LINK_RATE_1_5; + ioc->sas_hba.phy[i].port = + leapioraid_get_port_by_id(ioc, port_id, 0); + if (!ioc->sas_hba.phy[i].phy) { + if ((leapioraid_config_get_phy_pg0 + (ioc, &mpi_reply, &phy_pg0, i))) { + pr_err( + "%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + continue; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err( + "%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + continue; + } + ioc->sas_hba.phy[i].phy_id = i; + leapioraid_transport_add_host_phy(ioc, + &ioc->sas_hba.phy[i], + phy_pg0, + ioc->sas_hba.parent_dev); + continue; + } + leapioraid_transport_update_links(ioc, ioc->sas_hba.sas_address, + attached_handle, i, link_rate, + ioc->sas_hba.phy[i].port); + } +out: + kfree(sas_iounit_pg0); +} + +static void +leapioraid_scsihost_sas_host_add(struct LEAPIORAID_ADAPTER *ioc) +{ + int i; + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasIOUnitP0_t *sas_iounit_pg0 = NULL; + struct LeapioraidSasIOUnitP1_t *sas_iounit_pg1 = NULL; + struct LeapioraidSasPhyP0_t phy_pg0; + struct LeapioraidSasDevP0_t sas_device_pg0; + struct LeapioraidSasEncP0_t enclosure_pg0; + u16 ioc_status; + u16 sz; + u8 device_missing_delay; + u8 num_phys, port_id; + struct leapioraid_hba_port *port; + + leapioraid_config_get_number_hba_phys(ioc, &num_phys); + if (!num_phys) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + ioc->sas_hba.nr_phys_allocated = + max_t(u8, LEAPIORAID_MAX_HBA_NUM_PHYS, num_phys); + ioc->sas_hba.phy = + kcalloc(ioc->sas_hba.nr_phys_allocated, + sizeof(struct leapioraid_sas_phy), + GFP_KERNEL); + if (!ioc->sas_hba.phy) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + ioc->sas_hba.num_phys = num_phys; + sz = offsetof(struct LeapioraidSasIOUnitP0_t, + PhyData) + + (ioc->sas_hba.num_phys + * sizeof(struct LEAPIORAID_SAS_IO_UNIT0_PHY_DATA)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + if ((leapioraid_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) + & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + sz = offsetof(struct LeapioraidSasIOUnitP1_t, + PhyData) + + (ioc->sas_hba.num_phys + * sizeof(struct LEAPIORAID_SAS_IO_UNIT1_PHY_DATA)); + sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg1) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + if ((leapioraid_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc->io_missing_delay = sas_iounit_pg1->IODeviceMissingDelay; + device_missing_delay = sas_iounit_pg1->ReportDeviceMissingDelay; + if (device_missing_delay & LEAPIORAID_SASIOUNIT1_REPORT_MISSING_UNIT_16) + ioc->device_missing_delay = (device_missing_delay & + LEAPIORAID_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) + * 16; + else + ioc->device_missing_delay = device_missing_delay & + LEAPIORAID_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; + ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev; + for (i = 0; i < ioc->sas_hba.num_phys; i++) { + if ((leapioraid_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, + i))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + if (i == 0) + ioc->sas_hba.handle = + le16_to_cpu(sas_iounit_pg0->PhyData[0].ControllerDevHandle); + port_id = sas_iounit_pg0->PhyData[i].Port; + if (!(leapioraid_get_port_by_id(ioc, port_id, 0))) { + port = kzalloc(sizeof(struct leapioraid_hba_port), GFP_KERNEL); + if (!port) + goto out; + + port->port_id = port_id; + pr_info( + "%s hba_port entry: %p, port: %d is added to hba_port list\n", + ioc->name, port, port->port_id); + list_add_tail(&port->list, &ioc->port_table_list); + } + if ((le32_to_cpu(phy_pg0.PhyInfo) & + LEAPIORAID_SAS_PHYINFO_VIRTUAL_PHY) + && (phy_pg0.NegotiatedLinkRate >> 4) >= + LEAPIORAID_SAS_NEG_LINK_RATE_1_5) { + if (!leapioraid_scsihost_alloc_vphy(ioc, port_id, i)) + goto out; + ioc->sas_hba.phy[i].hba_vphy = 1; + } + ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; + ioc->sas_hba.phy[i].phy_id = i; + ioc->sas_hba.phy[i].port = + leapioraid_get_port_by_id(ioc, port_id, 0); + leapioraid_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i], + phy_pg0, + ioc->sas_hba.parent_dev); + } + if ((leapioraid_config_get_sas_device_pg0 + (ioc, &mpi_reply, &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc->sas_hba.enclosure_handle = + le16_to_cpu(sas_device_pg0.EnclosureHandle); + ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + pr_info( + "%s host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n", + ioc->name, + ioc->sas_hba.handle, + (unsigned long long)ioc->sas_hba.sas_address, + ioc->sas_hba.num_phys); + if (ioc->sas_hba.enclosure_handle) { + if (!(leapioraid_config_get_enclosure_pg0(ioc, &mpi_reply, + &enclosure_pg0, + LEAPIORAID_SAS_ENCLOS_PGAD_FORM_HANDLE, + ioc->sas_hba.enclosure_handle))) + ioc->sas_hba.enclosure_logical_id = + le64_to_cpu(enclosure_pg0.EnclosureLogicalID); + } +out: + kfree(sas_iounit_pg1); + kfree(sas_iounit_pg0); +} + +static int +leapioraid_scsihost_expander_add( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct leapioraid_raid_sas_node *sas_expander; + struct leapioraid_enclosure_node *enclosure_dev; + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidExpanderP0_t expander_pg0; + struct LeapioraidExpanderP1_t expander_pg1; + u32 ioc_status; + u16 parent_handle; + u64 sas_address, sas_address_parent = 0; + int i; + unsigned long flags; + u8 port_id; + struct leapioraid_sas_port *leapioraid_port = NULL; + int rc = 0; + + if (!handle) + return -1; + if (ioc->shost_recovery || ioc->pci_error_recovery) + return -1; + if ((leapioraid_config_get_expander_pg0( + ioc, &mpi_reply, &expander_pg0, + LEAPIORAID_SAS_EXPAND_PGAD_FORM_HNDL, + handle))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -1; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) + & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -1; + } + parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle); + if (leapioraid_scsihost_get_sas_address( + ioc, parent_handle, &sas_address_parent) + != 0) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -1; + } + port_id = expander_pg0.PhysicalPort; + if (sas_address_parent != ioc->sas_hba.sas_address) { + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_expander = + leapioraid_scsihost_expander_find_by_sas_address( + ioc, + sas_address_parent, + leapioraid_get_port_by_id(ioc, port_id, 0)); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (!sas_expander) { + rc = leapioraid_scsihost_expander_add(ioc, parent_handle); + if (rc != 0) + return rc; + } + } + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_address = le64_to_cpu(expander_pg0.SASAddress); + sas_expander = leapioraid_scsihost_expander_find_by_sas_address( + ioc, + sas_address, + leapioraid_get_port_by_id(ioc, port_id, 0)); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (sas_expander) + return 0; + sas_expander = kzalloc(sizeof(struct leapioraid_raid_sas_node), + GFP_KERNEL); + if (!sas_expander) + return -1; + + sas_expander->handle = handle; + sas_expander->num_phys = expander_pg0.NumPhys; + sas_expander->sas_address_parent = sas_address_parent; + sas_expander->sas_address = sas_address; + sas_expander->port = leapioraid_get_port_by_id(ioc, port_id, 0); + if (!sas_expander->port) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + pr_info( + "%s expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", + ioc->name, + handle, parent_handle, + (unsigned long long)sas_expander->sas_address, + sas_expander->num_phys); + if (!sas_expander->num_phys) { + rc = -1; + goto out_fail; + } + sas_expander->phy = kcalloc(sas_expander->num_phys, + sizeof(struct leapioraid_sas_phy), GFP_KERNEL); + if (!sas_expander->phy) { + rc = -1; + goto out_fail; + } + INIT_LIST_HEAD(&sas_expander->sas_port_list); + leapioraid_port = leapioraid_transport_port_add( + ioc, handle, + sas_address_parent, + sas_expander->port); + if (!leapioraid_port) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + sas_expander->parent_dev = &leapioraid_port->rphy->dev; + sas_expander->rphy = leapioraid_port->rphy; + for (i = 0; i < sas_expander->num_phys; i++) { + if ((leapioraid_config_get_expander_pg1( + ioc, &mpi_reply, + &expander_pg1, i, + handle))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + sas_expander->phy[i].handle = handle; + sas_expander->phy[i].phy_id = i; + sas_expander->phy[i].port = + leapioraid_get_port_by_id(ioc, port_id, 0); + if ((leapioraid_transport_add_expander_phy + (ioc, &sas_expander->phy[i], expander_pg1, + sas_expander->parent_dev))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + } + if (sas_expander->enclosure_handle) { + enclosure_dev = + leapioraid_scsihost_enclosure_find_by_handle( + ioc, + sas_expander->enclosure_handle); + if (enclosure_dev) + sas_expander->enclosure_logical_id = + le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); + } + leapioraid_scsihost_expander_node_add(ioc, sas_expander); + return 0; +out_fail: + if (leapioraid_port) + leapioraid_transport_port_remove(ioc, + sas_expander->sas_address, + sas_address_parent, + sas_expander->port); + kfree(sas_expander); + return rc; +} + +void +leapioraid_expander_remove( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, struct leapioraid_hba_port *port) +{ + struct leapioraid_raid_sas_node *sas_expander; + unsigned long flags; + + if (ioc->shost_recovery) + return; + if (!port) + return; + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_expander = leapioraid_scsihost_expander_find_by_sas_address( + ioc, sas_address, port); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (sas_expander) + leapioraid_scsihost_expander_node_remove( + ioc, sas_expander); +} + +static u8 +leapioraid_scsihost_done( + struct LEAPIORAID_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + struct LeapioraidDefaultRep_t *mpi_reply; + + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (ioc->scsih_cmds.status == LEAPIORAID_CMD_NOT_USED) + return 1; + if (ioc->scsih_cmds.smid != smid) + return 1; + ioc->scsih_cmds.status |= LEAPIORAID_CMD_COMPLETE; + if (mpi_reply) { + memcpy(ioc->scsih_cmds.reply, mpi_reply, + mpi_reply->MsgLength * 4); + ioc->scsih_cmds.status |= LEAPIORAID_CMD_REPLY_VALID; + } + ioc->scsih_cmds.status &= ~LEAPIORAID_CMD_PENDING; + complete(&ioc->scsih_cmds.done); + return 1; +} + +static int +leapioraid_scsi_send_scsi_io( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_scsi_io_transfer *transfer_packet, + u8 tr_timeout, u8 tr_method) +{ + struct LeapioraidSCSIIORep_t *mpi_reply; + struct LeapioSCSIIOReq_t *mpi_request; + u16 smid; + u8 issue_reset = 0; + int rc; + void *priv_sense; + u32 mpi_control; + void *psge; + dma_addr_t data_out_dma = 0; + dma_addr_t data_in_dma = 0; + size_t data_in_sz = 0; + size_t data_out_sz = 0; + u16 handle; + u8 retry_count = 0, host_reset_count = 0; + int tm_return_code; + + if (ioc->pci_error_recovery) { + pr_err("%s %s: pci error recovery in progress!\n", + ioc->name, __func__); + return -EFAULT; + } + if (ioc->shost_recovery) { + pr_info("%s %s: host recovery in progress!\n", + ioc->name, __func__); + return -EAGAIN; + } + handle = transfer_packet->handle; + if (handle == LEAPIORAID_INVALID_DEVICE_HANDLE) { + pr_info("%s %s: no device!\n", + __func__, ioc->name); + return -EFAULT; + } + mutex_lock(&ioc->scsih_cmds.mutex); + if (ioc->scsih_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: scsih_cmd in use\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } +retry_loop: + if (test_bit(handle, ioc->device_remove_in_progress)) { + pr_info("%s %s: device removal in progress\n", + ioc->name, __func__); + rc = -EFAULT; + goto out; + } + ioc->scsih_cmds.status = LEAPIORAID_CMD_PENDING; + rc = leapioraid_wait_for_ioc_to_operational(ioc, 10); + if (rc) + goto out; + smid = ioc->shost->can_queue + + LEAPIORAID_INTERNAL_SCSIIO_FOR_DISCOVERY; + rc = 0; + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->scsih_cmds.smid = smid; + memset(mpi_request, 0, sizeof(struct LeapioSCSIIOReq_t)); + if (transfer_packet->is_raid) + mpi_request->Function = + LEAPIORAID_FUNC_RAID_SCSI_IO_PASSTHROUGH; + else + mpi_request->Function = LEAPIORAID_FUNC_SCSI_IO_REQUEST; + mpi_request->DevHandle = cpu_to_le16(handle); + switch (transfer_packet->dir) { + case DMA_TO_DEVICE: + mpi_control = LEAPIORAID_SCSIIO_CONTROL_WRITE; + data_out_dma = transfer_packet->data_dma; + data_out_sz = transfer_packet->data_length; + break; + case DMA_FROM_DEVICE: + mpi_control = LEAPIORAID_SCSIIO_CONTROL_READ; + data_in_dma = transfer_packet->data_dma; + data_in_sz = transfer_packet->data_length; + break; + case DMA_BIDIRECTIONAL: + mpi_control = LEAPIORAID_SCSIIO_CONTROL_BIDIRECTIONAL; + BUG(); + break; + default: + case DMA_NONE: + mpi_control = LEAPIORAID_SCSIIO_CONTROL_NODATATRANSFER; + break; + } + psge = &mpi_request->SGL; + ioc->build_sg( + ioc, psge, data_out_dma, + data_out_sz, data_in_dma, + data_in_sz); + mpi_request->Control = cpu_to_le32(mpi_control | + LEAPIORAID_SCSIIO_CONTROL_SIMPLEQ); + mpi_request->DataLength = cpu_to_le32(transfer_packet->data_length); + mpi_request->MsgFlags = LEAPIORAID_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR; + mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; + mpi_request->SenseBufferLowAddress = + leapioraid_base_get_sense_buffer_dma(ioc, smid); + priv_sense = leapioraid_base_get_sense_buffer(ioc, smid); + mpi_request->SGLOffset0 = offsetof(struct LeapioSCSIIOReq_t, SGL) / 4; + mpi_request->IoFlags = cpu_to_le16(transfer_packet->cdb_length); + int_to_scsilun(transfer_packet->lun, (struct scsi_lun *) + mpi_request->LUN); + memcpy(mpi_request->CDB.CDB32, transfer_packet->cdb, + transfer_packet->cdb_length); + init_completion(&ioc->scsih_cmds.done); + if (likely(mpi_request->Function == LEAPIORAID_FUNC_SCSI_IO_REQUEST)) + ioc->put_smid_scsi_io(ioc, smid, handle); + else + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->scsih_cmds.done, + transfer_packet->timeout * HZ); + if (!(ioc->scsih_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + leapioraid_check_cmd_timeout(ioc, + ioc->scsih_cmds.status, + mpi_request, + sizeof(struct LeapioSCSIIOReq_t) / 4, + issue_reset); + goto issue_target_reset; + } + if (ioc->scsih_cmds.status & LEAPIORAID_CMD_REPLY_VALID) { + transfer_packet->valid_reply = 1; + mpi_reply = ioc->scsih_cmds.reply; + transfer_packet->sense_length = + le32_to_cpu(mpi_reply->SenseCount); + if (transfer_packet->sense_length) + memcpy(transfer_packet->sense, priv_sense, + transfer_packet->sense_length); + transfer_packet->transfer_length = + le32_to_cpu(mpi_reply->TransferCount); + transfer_packet->ioc_status = + le16_to_cpu(mpi_reply->IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + transfer_packet->scsi_state = mpi_reply->SCSIState; + transfer_packet->scsi_status = mpi_reply->SCSIStatus; + transfer_packet->log_info = le32_to_cpu(mpi_reply->IOCLogInfo); + } + goto out; +issue_target_reset: + if (issue_reset) { + pr_info("%s issue target reset: handle(0x%04x)\n", ioc->name, handle); + tm_return_code = + leapioraid_scsihost_issue_locked_tm(ioc, handle, + 0xFFFFFFFF, 0xFFFFFFFF, + 0, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_TARGET_RESET, + smid, tr_timeout, + tr_method); + if (tm_return_code == SUCCESS) { + pr_err( + "%s target reset completed: handle (0x%04x)\n", + ioc->name, handle); + if (((ioc->scsih_cmds.status & LEAPIORAID_CMD_COMPLETE) + && retry_count++ < 3) + || ((ioc->scsih_cmds.status & LEAPIORAID_CMD_RESET) + && host_reset_count++ == 0)) { + pr_info("%s issue retry: handle (0x%04x)\n", + ioc->name, handle); + goto retry_loop; + } + } else + pr_err("%s target reset didn't complete: handle(0x%04x)\n", + ioc->name, handle); + rc = -EFAULT; + } else + rc = -EAGAIN; +out: + ioc->scsih_cmds.status = LEAPIORAID_CMD_NOT_USED; + mutex_unlock(&ioc->scsih_cmds.mutex); + return rc; +} + +static enum device_responsive_state +leapioraid_scsihost_determine_disposition( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_scsi_io_transfer *transfer_packet) +{ + static enum device_responsive_state rc; + struct sense_info sense_info = { 0, 0, 0 }; + u8 check_sense = 0; + char *desc = NULL; + + if (!transfer_packet->valid_reply) + return DEVICE_READY; + switch (transfer_packet->ioc_status) { + case LEAPIORAID_IOCSTATUS_BUSY: + case LEAPIORAID_IOCSTATUS_INSUFFICIENT_RESOURCES: + case LEAPIORAID_IOCSTATUS_SCSI_TASK_TERMINATED: + case LEAPIORAID_IOCSTATUS_SCSI_IO_DATA_ERROR: + case LEAPIORAID_IOCSTATUS_SCSI_EXT_TERMINATED: + rc = DEVICE_RETRY; + break; + case LEAPIORAID_IOCSTATUS_SCSI_IOC_TERMINATED: + if (transfer_packet->log_info == 0x31170000) { + rc = DEVICE_RETRY; + break; + } + if (transfer_packet->cdb[0] == REPORT_LUNS) + rc = DEVICE_READY; + else + rc = DEVICE_RETRY; + break; + case LEAPIORAID_IOCSTATUS_SCSI_DATA_UNDERRUN: + case LEAPIORAID_IOCSTATUS_SCSI_RECOVERED_ERROR: + case LEAPIORAID_IOCSTATUS_SUCCESS: + if (!transfer_packet->scsi_state && + !transfer_packet->scsi_status) { + rc = DEVICE_READY; + break; + } + if (transfer_packet->scsi_state & + LEAPIORAID_SCSI_STATE_AUTOSENSE_VALID) { + rc = DEVICE_ERROR; + check_sense = 1; + break; + } + if (transfer_packet->scsi_state & + (LEAPIORAID_SCSI_STATE_AUTOSENSE_FAILED | + LEAPIORAID_SCSI_STATE_NO_SCSI_STATUS | + LEAPIORAID_SCSI_STATE_TERMINATED)) { + rc = DEVICE_RETRY; + break; + } + if (transfer_packet->scsi_status >= LEAPIORAID_SCSI_STATUS_BUSY) { + rc = DEVICE_RETRY; + break; + } + rc = DEVICE_READY; + break; + case LEAPIORAID_IOCSTATUS_SCSI_PROTOCOL_ERROR: + if (transfer_packet->scsi_state & LEAPIORAID_SCSI_STATE_TERMINATED) + rc = DEVICE_RETRY; + else + rc = DEVICE_ERROR; + break; + case LEAPIORAID_IOCSTATUS_INSUFFICIENT_POWER: + default: + rc = DEVICE_ERROR; + break; + } + if (check_sense) { + leapioraid_scsihost_normalize_sense( + transfer_packet->sense, &sense_info); + if (sense_info.skey == UNIT_ATTENTION) + rc = DEVICE_RETRY_UA; + else if (sense_info.skey == NOT_READY) { + if (sense_info.asc == 0x3a) + rc = DEVICE_READY; + else if (sense_info.asc == 0x04) { + if (sense_info.ascq == 0x03 || + sense_info.ascq == 0x0b || + sense_info.ascq == 0x0c) { + rc = DEVICE_ERROR; + } else + rc = DEVICE_START_UNIT; + } else if (sense_info.asc == 0x3e && !sense_info.ascq) + rc = DEVICE_START_UNIT; + } else if (sense_info.skey == ILLEGAL_REQUEST && + transfer_packet->cdb[0] == REPORT_LUNS) { + rc = DEVICE_READY; + } else if (sense_info.skey == MEDIUM_ERROR) { + if (sense_info.asc == 0x31) + rc = DEVICE_READY; + } else if (sense_info.skey == HARDWARE_ERROR) { + if (sense_info.asc == 0x19) + rc = DEVICE_READY; + } + } + if (ioc->logging_level & LEAPIORAID_DEBUG_EVENT_WORK_TASK) { + switch (rc) { + case DEVICE_READY: + desc = "ready"; + break; + case DEVICE_RETRY: + desc = "retry"; + break; + case DEVICE_RETRY_UA: + desc = "retry_ua"; + break; + case DEVICE_START_UNIT: + desc = "start_unit"; + break; + case DEVICE_STOP_UNIT: + desc = "stop_unit"; + break; + case DEVICE_ERROR: + desc = "error"; + break; + } + pr_info( + "%s \tioc_status(0x%04x), loginfo(0x%08x),\n\t\t" + "scsi_status(0x%02x), scsi_state(0x%02x), rc(%s)\n", + ioc->name, + transfer_packet->ioc_status, + transfer_packet->log_info, + transfer_packet->scsi_status, + transfer_packet->scsi_state, + desc); + if (check_sense) + pr_info("%s \t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x]\n", + ioc->name, + sense_info.skey, sense_info.asc, + sense_info.ascq); + } + return rc; +} + +static enum device_responsive_state +leapioraid_scsihost_inquiry_vpd_sn( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, + u8 **serial_number) +{ + struct leapioraid_scsi_io_transfer *transfer_packet; + enum device_responsive_state rc; + u8 *inq_data; + int return_code; + u32 data_length; + u8 len; + u8 tr_timeout = 30; + u8 tr_method = 0; + + inq_data = NULL; + transfer_packet + = kzalloc(sizeof(struct leapioraid_scsi_io_transfer), GFP_KERNEL); + if (!transfer_packet) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_RETRY; + goto out; + } + data_length = 252; + inq_data = dma_alloc_coherent(&ioc->pdev->dev, data_length, + &transfer_packet->data_dma, GFP_ATOMIC); + if (!inq_data) { + rc = DEVICE_RETRY; + goto out; + } + + rc = DEVICE_READY; + memset(inq_data, 0, data_length); + transfer_packet->handle = handle; + transfer_packet->dir = DMA_FROM_DEVICE; + transfer_packet->data_length = data_length; + transfer_packet->cdb_length = 6; + transfer_packet->cdb[0] = INQUIRY; + transfer_packet->cdb[1] = 1; + transfer_packet->cdb[2] = 0x80; + transfer_packet->cdb[4] = data_length; + transfer_packet->timeout = 30; + tr_method = LEAPIORAID_SCSITASKMGMT_MSGFLAGS_LINK_RESET; + return_code = + leapioraid_scsi_send_scsi_io( + ioc, transfer_packet, tr_timeout, tr_method); + switch (return_code) { + case 0: + rc = leapioraid_scsihost_determine_disposition( + ioc, transfer_packet); + if (rc == DEVICE_READY) { + len = strlen(&inq_data[4]) + 1; + *serial_number = kmalloc(len, GFP_KERNEL); + if (*serial_number) + strscpy(*serial_number, &inq_data[4], sizeof(*serial_number)); + } + break; + case -EAGAIN: + rc = DEVICE_RETRY; + break; + case -EFAULT: + default: + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_ERROR; + break; + } +out: + if (inq_data) + dma_free_coherent(&ioc->pdev->dev, data_length, inq_data, + transfer_packet->data_dma); + kfree(transfer_packet); + return rc; +} + +static enum device_responsive_state +leapioraid_scsihost_inquiry_vpd_supported_pages( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle, u32 lun, void *data, + u32 data_length) +{ + struct leapioraid_scsi_io_transfer *transfer_packet; + enum device_responsive_state rc; + void *inq_data; + int return_code; + + inq_data = NULL; + transfer_packet = kzalloc(sizeof(struct leapioraid_scsi_io_transfer), + GFP_KERNEL); + if (!transfer_packet) { + rc = DEVICE_RETRY; + goto out; + } + inq_data = dma_alloc_coherent(&ioc->pdev->dev, data_length, + &transfer_packet->data_dma, GFP_ATOMIC); + if (!inq_data) { + rc = DEVICE_RETRY; + goto out; + } + rc = DEVICE_READY; + memset(inq_data, 0, data_length); + transfer_packet->handle = handle; + transfer_packet->dir = DMA_FROM_DEVICE; + transfer_packet->data_length = data_length; + transfer_packet->cdb_length = 6; + transfer_packet->lun = lun; + transfer_packet->cdb[0] = INQUIRY; + transfer_packet->cdb[1] = 1; + transfer_packet->cdb[4] = data_length; + transfer_packet->timeout = 30; + return_code = leapioraid_scsi_send_scsi_io( + ioc, transfer_packet, 30, 0); + switch (return_code) { + case 0: + rc = leapioraid_scsihost_determine_disposition( + ioc, transfer_packet); + if (rc == DEVICE_READY) + memcpy(data, inq_data, data_length); + break; + case -EAGAIN: + rc = DEVICE_RETRY; + break; + case -EFAULT: + default: + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_ERROR; + break; + } +out: + if (inq_data) + dma_free_coherent(&ioc->pdev->dev, data_length, inq_data, + transfer_packet->data_dma); + kfree(transfer_packet); + return rc; +} + +static enum device_responsive_state +leapioraid_scsihost_report_luns( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, void *data, + u32 data_length, u8 retry_count, u8 is_pd, u8 tr_timeout, + u8 tr_method) +{ + struct leapioraid_scsi_io_transfer *transfer_packet; + enum device_responsive_state rc; + void *lun_data; + int return_code; + int retries; + + lun_data = NULL; + transfer_packet = kzalloc(sizeof(struct leapioraid_scsi_io_transfer), + GFP_KERNEL); + if (!transfer_packet) { + rc = DEVICE_RETRY; + goto out; + } + lun_data = dma_alloc_coherent(&ioc->pdev->dev, data_length, + &transfer_packet->data_dma, GFP_ATOMIC); + if (!lun_data) { + rc = DEVICE_RETRY; + goto out; + } + for (retries = 0; retries < 4; retries++) { + rc = DEVICE_ERROR; + pr_info("%s REPORT_LUNS: handle(0x%04x), retries(%d)\n", + ioc->name, handle, retries); + memset(lun_data, 0, data_length); + transfer_packet->handle = handle; + transfer_packet->dir = DMA_FROM_DEVICE; + transfer_packet->data_length = data_length; + transfer_packet->cdb_length = 12; + transfer_packet->cdb[0] = REPORT_LUNS; + transfer_packet->cdb[6] = (data_length >> 24) & 0xFF; + transfer_packet->cdb[7] = (data_length >> 16) & 0xFF; + transfer_packet->cdb[8] = (data_length >> 8) & 0xFF; + transfer_packet->cdb[9] = data_length & 0xFF; + transfer_packet->timeout = 30; + transfer_packet->is_raid = is_pd; + return_code = + leapioraid_scsi_send_scsi_io(ioc, transfer_packet, tr_timeout, + tr_method); + switch (return_code) { + case 0: + rc = leapioraid_scsihost_determine_disposition(ioc, + transfer_packet); + if (rc == DEVICE_READY) { + memcpy(data, lun_data, data_length); + goto out; + } else if (rc == DEVICE_ERROR) + goto out; + break; + case -EAGAIN: + rc = DEVICE_RETRY; + break; + case -EFAULT: + default: + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + } +out: + if (lun_data) + dma_free_coherent(&ioc->pdev->dev, data_length, lun_data, + transfer_packet->data_dma); + kfree(transfer_packet); + if ((rc == DEVICE_RETRY || rc == DEVICE_START_UNIT || + rc == DEVICE_RETRY_UA) && retry_count >= 144) + rc = DEVICE_ERROR; + return rc; +} + +static enum device_responsive_state +leapioraid_scsihost_start_unit( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, u32 lun, + u8 is_pd, u8 tr_timeout, u8 tr_method) +{ + struct leapioraid_scsi_io_transfer *transfer_packet; + enum device_responsive_state rc; + int return_code; + + transfer_packet = kzalloc(sizeof(struct leapioraid_scsi_io_transfer), + GFP_KERNEL); + if (!transfer_packet) { + rc = DEVICE_RETRY; + goto out; + } + + rc = DEVICE_READY; + transfer_packet->handle = handle; + transfer_packet->dir = DMA_NONE; + transfer_packet->lun = lun; + transfer_packet->cdb_length = 6; + transfer_packet->cdb[0] = START_STOP; + transfer_packet->cdb[1] = 1; + transfer_packet->cdb[4] = 1; + transfer_packet->timeout = 30; + transfer_packet->is_raid = is_pd; + pr_info("%s START_UNIT: handle(0x%04x), lun(%d)\n", + ioc->name, handle, lun); + return_code = + leapioraid_scsi_send_scsi_io( + ioc, transfer_packet, tr_timeout, tr_method); + switch (return_code) { + case 0: + rc = leapioraid_scsihost_determine_disposition( + ioc, transfer_packet); + break; + case -EAGAIN: + rc = DEVICE_RETRY; + break; + case -EFAULT: + default: + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_ERROR; + break; + } +out: + kfree(transfer_packet); + return rc; +} + +static enum device_responsive_state +leapioraid_scsihost_test_unit_ready( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, u32 lun, + u8 is_pd, u8 tr_timeout, u8 tr_method) +{ + struct leapioraid_scsi_io_transfer *transfer_packet; + enum device_responsive_state rc; + int return_code; + int sata_init_failure = 0; + + transfer_packet = kzalloc(sizeof(struct leapioraid_scsi_io_transfer), + GFP_KERNEL); + if (!transfer_packet) { + rc = DEVICE_RETRY; + goto out; + } + rc = DEVICE_READY; + transfer_packet->handle = handle; + transfer_packet->dir = DMA_NONE; + transfer_packet->lun = lun; + transfer_packet->cdb_length = 6; + transfer_packet->cdb[0] = TEST_UNIT_READY; + transfer_packet->timeout = 30; + transfer_packet->is_raid = is_pd; +sata_init_retry: + pr_info("%s TEST_UNIT_READY: handle(0x%04x), lun(%d)\n", + ioc->name, handle, lun); + return_code = + leapioraid_scsi_send_scsi_io( + ioc, transfer_packet, tr_timeout, tr_method); + switch (return_code) { + case 0: + rc = leapioraid_scsihost_determine_disposition( + ioc, transfer_packet); + if (rc == DEVICE_RETRY && + transfer_packet->log_info == 0x31111000) { + if (!sata_init_failure++) { + pr_err( + "%s SATA Initialization Timeout,sending a retry\n", + ioc->name); + rc = DEVICE_READY; + goto sata_init_retry; + } else { + pr_err( + "%s SATA Initialization Failed\n", + ioc->name); + rc = DEVICE_ERROR; + } + } + break; + case -EAGAIN: + rc = DEVICE_RETRY; + break; + case -EFAULT: + default: + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_ERROR; + break; + } +out: + kfree(transfer_packet); + return rc; +} + +static enum device_responsive_state +leapioraid_scsihost_ata_pass_thru_idd( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, + u8 *is_ssd_device, u8 tr_timeout, u8 tr_method) +{ + struct leapioraid_scsi_io_transfer *transfer_packet; + enum device_responsive_state rc; + u16 *idd_data; + int return_code; + u32 data_length; + + idd_data = NULL; + transfer_packet = kzalloc(sizeof(struct leapioraid_scsi_io_transfer), + GFP_KERNEL); + if (!transfer_packet) { + rc = DEVICE_RETRY; + goto out; + } + data_length = 512; + idd_data = dma_alloc_coherent(&ioc->pdev->dev, data_length, + &transfer_packet->data_dma, GFP_ATOMIC); + if (!idd_data) { + rc = DEVICE_RETRY; + goto out; + } + rc = DEVICE_READY; + memset(idd_data, 0, data_length); + transfer_packet->handle = handle; + transfer_packet->dir = DMA_FROM_DEVICE; + transfer_packet->data_length = data_length; + transfer_packet->cdb_length = 12; + transfer_packet->cdb[0] = ATA_12; + transfer_packet->cdb[1] = 0x8; + transfer_packet->cdb[2] = 0xd; + transfer_packet->cdb[3] = 0x1; + transfer_packet->cdb[9] = 0xec; + transfer_packet->timeout = 30; + return_code = leapioraid_scsi_send_scsi_io( + ioc, transfer_packet, 30, 0); + switch (return_code) { + case 0: + rc = leapioraid_scsihost_determine_disposition( + ioc, transfer_packet); + if (rc == DEVICE_READY) { + if (le16_to_cpu(idd_data[217]) == 1) + *is_ssd_device = 1; + } + break; + case -EAGAIN: + rc = DEVICE_RETRY; + break; + case -EFAULT: + default: + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = DEVICE_ERROR; + break; + } +out: + if (idd_data) { + dma_free_coherent(&ioc->pdev->dev, data_length, idd_data, + transfer_packet->data_dma); + } + kfree(transfer_packet); + return rc; +} + +static enum device_responsive_state +leapioraid_scsihost_wait_for_device_to_become_ready( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle, u8 retry_count, u8 is_pd, + int lun, u8 tr_timeout, u8 tr_method) +{ + enum device_responsive_state rc; + + if (ioc->pci_error_recovery) + return DEVICE_ERROR; + if (ioc->shost_recovery) + return DEVICE_RETRY; + rc = leapioraid_scsihost_test_unit_ready( + ioc, handle, lun, is_pd, tr_timeout, + tr_method); + if (rc == DEVICE_READY || rc == DEVICE_ERROR) + return rc; + else if (rc == DEVICE_START_UNIT) { + rc = leapioraid_scsihost_start_unit( + ioc, handle, lun, is_pd, tr_timeout, + tr_method); + if (rc == DEVICE_ERROR) + return rc; + rc = leapioraid_scsihost_test_unit_ready( + ioc, handle, lun, is_pd, + tr_timeout, tr_method); + } + if ((rc == DEVICE_RETRY || rc == DEVICE_START_UNIT || + rc == DEVICE_RETRY_UA) && retry_count >= 144) + rc = DEVICE_ERROR; + return rc; +} + +static enum device_responsive_state +leapioraid_scsihost_wait_for_target_to_become_ready( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle, u8 retry_count, u8 is_pd, + u8 tr_timeout, u8 tr_method) +{ + enum device_responsive_state rc; + struct scsi_lun *lun_data; + u32 length, num_luns; + u8 *data; + int lun; + struct scsi_lun *lunp; + + lun_data = + kcalloc(255, sizeof(struct scsi_lun), GFP_KERNEL); + if (!lun_data) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return DEVICE_RETRY; + } + rc = leapioraid_scsihost_report_luns(ioc, handle, lun_data, + 255 * sizeof(struct scsi_lun), + retry_count, is_pd, tr_timeout, tr_method); + if (rc != DEVICE_READY) + goto out; + data = (u8 *) lun_data; + length = ((data[0] << 24) | (data[1] << 16) | + (data[2] << 8) | (data[3] << 0)); + num_luns = (length / sizeof(struct scsi_lun)); + lunp = &lun_data[1]; + lun = (num_luns) ? scsilun_to_int(&lun_data[1]) : 0; + rc = leapioraid_scsihost_wait_for_device_to_become_ready( + ioc, handle, retry_count, + is_pd, lun, tr_timeout, + tr_method); + if (rc == DEVICE_ERROR) { + struct scsi_lun *lunq; + + for (lunq = lunp++; lunq <= &lun_data[num_luns]; lunq++) { + rc = leapioraid_scsihost_wait_for_device_to_become_ready(ioc, + handle, + retry_count, + is_pd, + scsilun_to_int + (lunq), + tr_timeout, + tr_method); + if (rc != DEVICE_ERROR) + goto out; + } + } +out: + kfree(lun_data); + return rc; +} + +static u8 +leapioraid_scsihost_check_access_status( + struct LEAPIORAID_ADAPTER *ioc, u64 sas_address, + u16 handle, u8 access_status) +{ + u8 rc = 1; + char *desc = NULL; + + switch (access_status) { + case LEAPIORAID_SAS_DEVICE0_ASTATUS_NO_ERRORS: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION: + rc = 0; + break; + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED: + desc = "sata capability failed"; + break; + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT: + desc = "sata affiliation conflict"; + break; + case LEAPIORAID_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE: + desc = "route not addressable"; + break; + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE: + desc = "smp error not addressable"; + break; + case LEAPIORAID_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED: + desc = "device blocked"; + break; + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_DIAG: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_PIO_SN: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE: + case LEAPIORAID_SAS_DEVICE0_ASTATUS_SIF_MAX: + desc = "sata initialization failed"; + break; + default: + desc = "unknown"; + break; + } + if (!rc) + return 0; + pr_err( + "%s discovery errors(%s): sas_address(0x%016llx),\n\t\t" + "handle(0x%04x)\n", + ioc->name, + desc, + (unsigned long long)sas_address, + handle); + return rc; +} + +static void +leapioraid_scsihost_check_device(struct LEAPIORAID_ADAPTER *ioc, + u64 parent_sas_address, u16 handle, u8 phy_number, + u8 link_rate) +{ + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasDevP0_t sas_device_pg0; + struct leapioraid_sas_device *sas_device = NULL; + struct leapioraid_enclosure_node *enclosure_dev = NULL; + u32 ioc_status; + unsigned long flags; + u64 sas_address; + struct scsi_target *starget; + struct LEAPIORAID_TARGET *sas_target_priv_data; + u32 device_info; + u8 *serial_number = NULL; + u8 *original_serial_number = NULL; + int rc; + struct leapioraid_hba_port *port; + + if ((leapioraid_config_get_sas_device_pg0 + (ioc, &mpi_reply, &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) + return; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) + & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) + return; + if (phy_number != sas_device_pg0.PhyNum) + return; + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + if (!(leapioraid_scsihost_is_sas_end_device(device_info))) + return; + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + port = leapioraid_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0); + if (!port) + goto out_unlock; + sas_device = __leapioraid_get_sdev_by_addr(ioc, sas_address, port); + if (!sas_device) + goto out_unlock; + if (unlikely(sas_device->handle != handle)) { + starget = sas_device->starget; + sas_target_priv_data = starget->hostdata; + starget_printk(KERN_INFO, starget, + "handle changed from(0x%04x) to (0x%04x)!!!\n", + sas_device->handle, handle); + sas_target_priv_data->handle = handle; + sas_device->handle = handle; + if (le16_to_cpu(sas_device_pg0.Flags) & + LEAPIORAID_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { + sas_device->enclosure_level = + sas_device_pg0.EnclosureLevel; + memcpy(sas_device->connector_name, + sas_device_pg0.ConnectorName, 4); + sas_device->connector_name[4] = '\0'; + } else { + sas_device->enclosure_level = 0; + sas_device->connector_name[0] = '\0'; + } + sas_device->enclosure_handle = + le16_to_cpu(sas_device_pg0.EnclosureHandle); + sas_device->is_chassis_slot_valid = 0; + enclosure_dev = + leapioraid_scsihost_enclosure_find_by_handle(ioc, + sas_device->enclosure_handle); + if (enclosure_dev) { + sas_device->enclosure_logical_id = + le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); + if (le16_to_cpu(enclosure_dev->pg0.Flags) & + LEAPIORAID_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { + sas_device->is_chassis_slot_valid = 1; + sas_device->chassis_slot = + enclosure_dev->pg0.ChassisSlot; + } + } + } + if (!(le16_to_cpu(sas_device_pg0.Flags) & + LEAPIORAID_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { + pr_err("%s device is not present handle(0x%04x), flags!!!\n", + ioc->name, handle); + goto out_unlock; + } + if (leapioraid_scsihost_check_access_status(ioc, sas_address, handle, + sas_device_pg0.AccessStatus)) + goto out_unlock; + original_serial_number = sas_device->serial_number; + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + leapioraid_scsihost_ublock_io_device_wait(ioc, sas_address, port); + if (!original_serial_number) + goto out; + if (leapioraid_scsihost_inquiry_vpd_sn(ioc, handle, &serial_number) == + DEVICE_READY && serial_number) { + rc = strcmp(original_serial_number, serial_number); + kfree(serial_number); + if (!rc) + goto out; + leapioraid_device_remove_by_sas_address(ioc, sas_address, port); + leapioraid_transport_update_links(ioc, parent_sas_address, + handle, phy_number, link_rate, + port); + leapioraid_scsihost_add_device(ioc, handle, 0, 0); + } + goto out; +out_unlock: + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +out: + if (sas_device) + leapioraid_sas_device_put(sas_device); +} + +static int +leapioraid_scsihost_add_device( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, u8 retry_count, + u8 is_pd) +{ + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasDevP0_t sas_device_pg0; + struct leapioraid_sas_device *sas_device; + struct leapioraid_enclosure_node *enclosure_dev = NULL; + u32 ioc_status; + u64 sas_address; + u32 device_info; + enum device_responsive_state rc; + u8 connector_name[5], port_id; + + if ((leapioraid_config_get_sas_device_pg0 + (ioc, &mpi_reply, &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return 0; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) + & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return 0; + } + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + if (!(leapioraid_scsihost_is_sas_end_device(device_info))) + return 0; + set_bit(handle, ioc->pend_os_device_add); + sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + if (!(le16_to_cpu(sas_device_pg0.Flags) & + LEAPIORAID_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { + pr_err("%s device is not present handle(0x04%x)!!!\n", + ioc->name, handle); + return 0; + } + if (leapioraid_scsihost_check_access_status( + ioc, sas_address, handle, + sas_device_pg0.AccessStatus)) + return 0; + port_id = sas_device_pg0.PhysicalPort; + sas_device = leapioraid_get_sdev_by_addr(ioc, + sas_address, + leapioraid_get_port_by_id(ioc, port_id, 0)); + if (sas_device) { + clear_bit(handle, ioc->pend_os_device_add); + leapioraid_sas_device_put(sas_device); + return 0; + } + if (le16_to_cpu(sas_device_pg0.EnclosureHandle)) { + enclosure_dev = + leapioraid_scsihost_enclosure_find_by_handle(ioc, + le16_to_cpu + (sas_device_pg0.EnclosureHandle)); + if (enclosure_dev == NULL) + pr_info( + "%s Enclosure handle(0x%04x)doesn't\n\t\t" + "match with enclosure device!\n", + ioc->name, + le16_to_cpu(sas_device_pg0.EnclosureHandle)); + } + if (!ioc->wait_for_discovery_to_complete) { + pr_info( + "%s detecting: handle(0x%04x), sas_address(0x%016llx), phy(%d)\n", + ioc->name, handle, + (unsigned long long)sas_address, + sas_device_pg0.PhyNum); + rc = leapioraid_scsihost_wait_for_target_to_become_ready( + ioc, handle, + retry_count, + is_pd, 30, 0); + if (rc != DEVICE_READY) { + if (le16_to_cpu(sas_device_pg0.EnclosureHandle) != 0) + dewtprintk(ioc, + pr_info("%s %s: device not ready: slot(%d)\n", + ioc->name, __func__, + le16_to_cpu(sas_device_pg0.Slot))); + if (le16_to_cpu(sas_device_pg0.Flags) & + LEAPIORAID_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { + memcpy(connector_name, + sas_device_pg0.ConnectorName, 4); + connector_name[4] = '\0'; + dewtprintk(ioc, + pr_info( + "%s %s: device not ready: enclosure level(0x%04x), connector name( %s)\n", + ioc->name, __func__, + sas_device_pg0.EnclosureLevel, + connector_name)); + } + if ((enclosure_dev) + && (le16_to_cpu(enclosure_dev->pg0.Flags) & + LEAPIORAID_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID)) + pr_err( + "%s chassis slot(0x%04x)\n", ioc->name, + enclosure_dev->pg0.ChassisSlot); + if (rc == DEVICE_RETRY || rc == DEVICE_START_UNIT + || rc == DEVICE_STOP_UNIT || rc == DEVICE_RETRY_UA) + return 1; + else if (rc == DEVICE_ERROR) + return 0; + } + } + sas_device = kzalloc(sizeof(struct leapioraid_sas_device), + GFP_KERNEL); + if (!sas_device) + return 0; + + kref_init(&sas_device->refcount); + sas_device->handle = handle; + if (leapioraid_scsihost_get_sas_address(ioc, + le16_to_cpu(sas_device_pg0.ParentDevHandle), + &sas_device->sas_address_parent) != 0) + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + sas_device->enclosure_handle = + le16_to_cpu(sas_device_pg0.EnclosureHandle); + if (sas_device->enclosure_handle != 0) + sas_device->slot = le16_to_cpu(sas_device_pg0.Slot); + sas_device->device_info = device_info; + sas_device->sas_address = sas_address; + sas_device->port = leapioraid_get_port_by_id(ioc, port_id, 0); + if (!sas_device->port) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + sas_device->phy = sas_device_pg0.PhyNum; + sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) & + LEAPIORAID_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? + 1 : 0; + sas_device->supports_sata_smart = + (le16_to_cpu(sas_device_pg0.Flags) & + LEAPIORAID_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED); + if (le16_to_cpu(sas_device_pg0.Flags) & + LEAPIORAID_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { + sas_device->enclosure_level = sas_device_pg0.EnclosureLevel; + memcpy(sas_device->connector_name, + sas_device_pg0.ConnectorName, 4); + sas_device->connector_name[4] = '\0'; + } else { + sas_device->enclosure_level = 0; + sas_device->connector_name[0] = '\0'; + } + sas_device->is_chassis_slot_valid = 0; + if (enclosure_dev) { + sas_device->enclosure_logical_id = + le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); + if (le16_to_cpu(enclosure_dev->pg0.Flags) & + LEAPIORAID_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { + sas_device->is_chassis_slot_valid = 1; + sas_device->chassis_slot = + enclosure_dev->pg0.ChassisSlot; + } + } + sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName); + sas_device->port_type = sas_device_pg0.MaxPortConnections; + pr_err( + "%s handle(0x%0x) sas_address(0x%016llx) port_type(0x%0x)\n", + ioc->name, handle, sas_device->sas_address, + sas_device->port_type); + if (ioc->wait_for_discovery_to_complete) + leapioraid_scsihost_sas_device_init_add(ioc, sas_device); + else + leapioraid_scsihost_sas_device_add(ioc, sas_device); +out: + leapioraid_sas_device_put(sas_device); + return 0; +} + +static void +leapioraid_scsihost_remove_device(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_device *sas_device) +{ + struct LEAPIORAID_TARGET *sas_target_priv_data; + + if (sas_device->pfa_led_on) { + leapioraid_scsihost_turn_off_pfa_led(ioc, sas_device); + sas_device->pfa_led_on = 0; + } + dewtprintk(ioc, pr_info( + "%s %s: enter: handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, __func__, sas_device->handle, + (unsigned long long)sas_device->sas_address)); + dewtprintk(ioc, + leapioraid_scsihost_display_enclosure_chassis_info( + ioc, sas_device, NULL, NULL)); + if (sas_device->starget && sas_device->starget->hostdata) { + sas_target_priv_data = sas_device->starget->hostdata; + sas_target_priv_data->deleted = 1; + leapioraid_scsihost_ublock_io_device( + ioc, sas_device->sas_address, + sas_device->port); + sas_target_priv_data->handle = + LEAPIORAID_INVALID_DEVICE_HANDLE; + } + if (!ioc->hide_drives) + leapioraid_transport_port_remove(ioc, + sas_device->sas_address, + sas_device->sas_address_parent, + sas_device->port); + pr_info("%s removing handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, sas_device->handle, + (unsigned long long)sas_device->sas_address); + leapioraid_scsihost_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL); + dewtprintk(ioc, pr_info( + "%s %s: exit: handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, __func__, sas_device->handle, + (unsigned long long) + sas_device->sas_address)); + dewtprintk(ioc, + leapioraid_scsihost_display_enclosure_chassis_info( + ioc, sas_device, NULL, NULL)); + kfree(sas_device->serial_number); +} + +static void +leapioraid_scsihost_sas_topology_change_event_debug( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventDataSasTopoChangeList_t *event_data) +{ + int i; + u16 handle; + u16 reason_code; + u8 phy_number; + char *status_str = NULL; + u8 link_rate, prev_link_rate; + + switch (event_data->ExpStatus) { + case LEAPIORAID_EVENT_SAS_TOPO_ES_ADDED: + status_str = "add"; + break; + case LEAPIORAID_EVENT_SAS_TOPO_ES_NOT_RESPONDING: + status_str = "remove"; + break; + case LEAPIORAID_EVENT_SAS_TOPO_ES_RESPONDING: + case 0: + status_str = "responding"; + break; + case LEAPIORAID_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: + status_str = "remove delay"; + break; + default: + status_str = "unknown status"; + break; + } + pr_info("%s sas topology change: (%s)\n", + ioc->name, status_str); + pr_info( + "\thandle(0x%04x), enclosure_handle(0x%04x)\n\t\t" + "start_phy(%02d), count(%d)\n", + le16_to_cpu(event_data->ExpanderDevHandle), + le16_to_cpu(event_data->EnclosureHandle), + event_data->StartPhyNum, + event_data->NumEntries); + for (i = 0; i < event_data->NumEntries; i++) { + handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); + if (!handle) + continue; + phy_number = event_data->StartPhyNum + i; + reason_code = event_data->PHY[i].PhyStatus & + LEAPIORAID_EVENT_SAS_TOPO_RC_MASK; + switch (reason_code) { + case LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_ADDED: + status_str = "target add"; + break; + case LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: + status_str = "target remove"; + break; + case LEAPIORAID_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING: + status_str = "delay target remove"; + break; + case LEAPIORAID_EVENT_SAS_TOPO_RC_PHY_CHANGED: + status_str = "link rate change"; + break; + case LEAPIORAID_EVENT_SAS_TOPO_RC_NO_CHANGE: + status_str = "target responding"; + break; + default: + status_str = "unknown"; + break; + } + link_rate = event_data->PHY[i].LinkRate >> 4; + prev_link_rate = event_data->PHY[i].LinkRate & 0xF; + pr_info( + "\tphy(%02d), attached_handle(0x%04x): %s:\n\t\t" + "link rate: new(0x%02x), old(0x%02x)\n", + phy_number, + handle, + status_str, + link_rate, + prev_link_rate); + } +} + +static int +leapioraid_scsihost_sas_topology_change_event( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + int i; + u16 parent_handle, handle; + u16 reason_code; + u8 phy_number, max_phys; + struct leapioraid_raid_sas_node *sas_expander; + struct leapioraid_sas_device *sas_device; + u64 sas_address; + unsigned long flags; + u8 link_rate, prev_link_rate; + int rc; + int requeue_event; + struct leapioraid_hba_port *port; + struct LeapioraidEventDataSasTopoChangeList_t *event_data = + fw_event->event_data; + + if (ioc->logging_level & LEAPIORAID_DEBUG_EVENT_WORK_TASK) + leapioraid_scsihost_sas_topology_change_event_debug( + ioc, event_data); + if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery) + return 0; + if (!ioc->sas_hba.num_phys) + leapioraid_scsihost_sas_host_add(ioc); + else + leapioraid_scsihost_sas_host_refresh(ioc); + if (fw_event->ignore) { + dewtprintk(ioc, + pr_info("%s ignoring expander event\n", + ioc->name)); + return 0; + } + parent_handle = le16_to_cpu(event_data->ExpanderDevHandle); + port = leapioraid_get_port_by_id(ioc, event_data->PhysicalPort, 0); + if (event_data->ExpStatus == LEAPIORAID_EVENT_SAS_TOPO_ES_ADDED) + if (leapioraid_scsihost_expander_add(ioc, parent_handle) != 0) + return 0; + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_expander = leapioraid_scsihost_expander_find_by_handle( + ioc, parent_handle); + if (sas_expander) { + sas_address = sas_expander->sas_address; + max_phys = sas_expander->num_phys; + port = sas_expander->port; + } else if (parent_handle < ioc->sas_hba.num_phys) { + sas_address = ioc->sas_hba.sas_address; + max_phys = ioc->sas_hba.num_phys; + } else { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return 0; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + for (i = 0, requeue_event = 0; i < event_data->NumEntries; i++) { + if (fw_event->ignore) { + dewtprintk(ioc, pr_info( + "%s ignoring expander event\n", + ioc->name)); + return 0; + } + if (ioc->remove_host || ioc->pci_error_recovery) + return 0; + phy_number = event_data->StartPhyNum + i; + if (phy_number >= max_phys) + continue; + reason_code = event_data->PHY[i].PhyStatus & + LEAPIORAID_EVENT_SAS_TOPO_RC_MASK; + if ((event_data->PHY[i].PhyStatus & + LEAPIORAID_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code != + LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) + continue; + if (fw_event->delayed_work_active && (reason_code == + LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) { + dewtprintk(ioc, + pr_info( + "%s ignoring Targ not responding\n\t\t" + "event phy in re-queued event processing\n", + ioc->name)); + continue; + } + handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); + if (!handle) + continue; + link_rate = event_data->PHY[i].LinkRate >> 4; + prev_link_rate = event_data->PHY[i].LinkRate & 0xF; + switch (reason_code) { + case LEAPIORAID_EVENT_SAS_TOPO_RC_PHY_CHANGED: + if (ioc->shost_recovery) + break; + if (link_rate == prev_link_rate) + break; + leapioraid_transport_update_links(ioc, sas_address, + handle, phy_number, + link_rate, port); + if (link_rate < LEAPIORAID_SAS_NEG_LINK_RATE_1_5) + break; + leapioraid_scsihost_check_device(ioc, sas_address, handle, + phy_number, link_rate); + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_handle(ioc, + handle); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (sas_device) { + leapioraid_sas_device_put(sas_device); + break; + } + if (!test_bit(handle, ioc->pend_os_device_add)) + break; + dewtprintk(ioc, pr_err( + "%s handle(0x%04x) device not found:\n\t\t" + "convert event to a device add\n", + ioc->name, handle)); + event_data->PHY[i].PhyStatus &= 0xF0; + event_data->PHY[i].PhyStatus |= + LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_ADDED; + fallthrough; + case LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_ADDED: + if (ioc->shost_recovery) + break; + leapioraid_transport_update_links(ioc, sas_address, + handle, phy_number, + link_rate, port); + if (link_rate < LEAPIORAID_SAS_NEG_LINK_RATE_1_5) + break; + rc = leapioraid_scsihost_add_device(ioc, handle, + fw_event->retries[i], 0); + if (rc) { + fw_event->retries[i]++; + requeue_event = 1; + } else { + event_data->PHY[i].PhyStatus |= + LEAPIORAID_EVENT_SAS_TOPO_PHYSTATUS_VACANT; + } + break; + case LEAPIORAID_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: + leapioraid_scsihost_device_remove_by_handle(ioc, handle); + break; + } + } + if (event_data->ExpStatus == LEAPIORAID_EVENT_SAS_TOPO_ES_NOT_RESPONDING + && sas_expander) + leapioraid_expander_remove(ioc, sas_address, port); + return requeue_event; +} + +static void +leapioraid_scsihost_sas_device_status_change_event_debug( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventDataSasDeviceStatusChange_t *event_data) +{ + char *reason_str = NULL; + + switch (event_data->ReasonCode) { + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_SMART_DATA: + reason_str = "smart data"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED: + reason_str = "unsupported device discovered"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: + reason_str = "internal device reset"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL: + reason_str = "internal task abort"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL: + reason_str = "internal task abort set"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL: + reason_str = "internal clear task set"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL: + reason_str = "internal query task"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE: + reason_str = "sata init failure"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET: + reason_str = "internal device reset complete"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL: + reason_str = "internal task abort complete"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION: + reason_str = "internal async notification"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY: + reason_str = "expander reduced functionality"; + break; + case LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY: + reason_str = "expander reduced functionality complete"; + break; + default: + reason_str = "unknown reason"; + break; + } + pr_info("%s device status change: (%s)\n" + "\thandle(0x%04x), sas address(0x%016llx), tag(%d)", + ioc->name, reason_str, le16_to_cpu(event_data->DevHandle), + (unsigned long long)le64_to_cpu(event_data->SASAddress), + le16_to_cpu(event_data->TaskTag)); + if (event_data->ReasonCode == LEAPIORAID_EVENT_SAS_DEV_STAT_RC_SMART_DATA) + pr_info("%s , ASC(0x%x), ASCQ(0x%x)\n", + ioc->name, event_data->ASC, event_data->ASCQ); + pr_info("\n"); +} + +static void +leapioraid_scsihost_sas_device_status_change_event( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventDataSasDeviceStatusChange_t *event_data) +{ + struct LEAPIORAID_TARGET *target_priv_data; + struct leapioraid_sas_device *sas_device; + u64 sas_address; + unsigned long flags; + + if ((ioc->facts.HeaderVersion >> 8) < 0xC) + return; + if (event_data->ReasonCode != + LEAPIORAID_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET && + event_data->ReasonCode != + LEAPIORAID_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET) + return; + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_address = le64_to_cpu(event_data->SASAddress); + sas_device = __leapioraid_get_sdev_by_addr( + ioc, sas_address, + leapioraid_get_port_by_id(ioc, event_data->PhysicalPort, 0)); + if (!sas_device || !sas_device->starget) + goto out; + target_priv_data = sas_device->starget->hostdata; + if (!target_priv_data) + goto out; + if (event_data->ReasonCode == + LEAPIORAID_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET) + target_priv_data->tm_busy = 1; + else + target_priv_data->tm_busy = 0; + if (ioc->logging_level & LEAPIORAID_DEBUG_EVENT_WORK_TASK) + pr_err( + "%s %s tm_busy flag for handle(0x%04x)\n", ioc->name, + (target_priv_data->tm_busy == 1) ? "Enable" : "Disable", + target_priv_data->handle); +out: + if (sas_device) + leapioraid_sas_device_put(sas_device); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +} + +static void +leapioraid_scsihost_sas_enclosure_dev_status_change_event_debug( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventDataSasEnclDevStatusChange_t *event_data) +{ + char *reason_str = NULL; + + switch (event_data->ReasonCode) { + case LEAPIORAID_EVENT_SAS_ENCL_RC_ADDED: + reason_str = "enclosure add"; + break; + case LEAPIORAID_EVENT_SAS_ENCL_RC_NOT_RESPONDING: + reason_str = "enclosure remove"; + break; + default: + reason_str = "unknown reason"; + break; + } + pr_info( + "%s enclosure status change: (%s)\n\thandle(0x%04x),\n\t\t" + "enclosure logical id(0x%016llx) number slots(%d)\n", + ioc->name, + reason_str, + le16_to_cpu(event_data->EnclosureHandle), + (unsigned long long)le64_to_cpu(event_data->EnclosureLogicalID), + le16_to_cpu(event_data->StartSlot)); +} + +static void +leapioraid_scsihost_sas_enclosure_dev_status_change_event( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + struct LeapioraidCfgRep_t mpi_reply; + struct leapioraid_enclosure_node *enclosure_dev = NULL; + struct LeapioraidEventDataSasEnclDevStatusChange_t *event_data = + fw_event->event_data; + int rc; + + if (ioc->logging_level & LEAPIORAID_DEBUG_EVENT_WORK_TASK) + leapioraid_scsihost_sas_enclosure_dev_status_change_event_debug( + ioc, fw_event->event_data); + if (ioc->shost_recovery) + return; + event_data->EnclosureHandle = le16_to_cpu(event_data->EnclosureHandle); + if (event_data->EnclosureHandle) + enclosure_dev = + leapioraid_scsihost_enclosure_find_by_handle(ioc, + event_data->EnclosureHandle); + switch (event_data->ReasonCode) { + case LEAPIORAID_EVENT_SAS_ENCL_RC_ADDED: + if (!enclosure_dev) { + enclosure_dev = + kzalloc(sizeof(struct leapioraid_enclosure_node), GFP_KERNEL); + if (!enclosure_dev) { + pr_err("%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return; + } + rc = leapioraid_config_get_enclosure_pg0(ioc, + &mpi_reply, + &enclosure_dev->pg0, + LEAPIORAID_SAS_ENCLOS_PGAD_FORM_HANDLE, + event_data->EnclosureHandle); + if (rc + || (le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK)) { + kfree(enclosure_dev); + return; + } + list_add_tail(&enclosure_dev->list, + &ioc->enclosure_list); + } + break; + case LEAPIORAID_EVENT_SAS_ENCL_RC_NOT_RESPONDING: + if (enclosure_dev) { + list_del(&enclosure_dev->list); + kfree(enclosure_dev); + } + break; + default: + break; + } +} + +static void +leapioraid_scsihost_sas_broadcast_primitive_event( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + struct scsi_cmnd *scmd; + struct scsi_device *sdev; + u16 smid, handle; + u32 lun; + struct LEAPIORAID_DEVICE *sas_device_priv_data; + u32 termination_count; + u32 query_count; + struct LeapioraidSCSITmgRep_t *mpi_reply; + struct LeapioraidEventDataSasBroadcastPrimitive_t *event_data = + fw_event->event_data; + u16 ioc_status; + unsigned long flags; + int r; + u8 max_retries = 0; + u8 task_abort_retries; + struct leapioraid_scsiio_tracker *st; + + mutex_lock(&ioc->tm_cmds.mutex); + dewtprintk(ioc, + pr_info( + "%s %s: enter: phy number(%d), width(%d)\n", + ioc->name, __func__, + event_data->PhyNum, event_data->PortWidth)); + leapioraid_scsihost_block_io_all_device(ioc); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + mpi_reply = ioc->tm_cmds.reply; +broadcast_aen_retry: + if (max_retries++ == 5) { + dewtprintk(ioc, pr_info("%s %s: giving up\n", + ioc->name, __func__)); + goto out; + } else if (max_retries > 1) + dewtprintk(ioc, pr_info("%s %s: %d retry\n", + ioc->name, __func__, max_retries - 1)); + termination_count = 0; + query_count = 0; + for (smid = 1; smid <= ioc->shost->can_queue; smid++) { + if (ioc->shost_recovery) + goto out; + scmd = leapioraid_scsihost_scsi_lookup_get(ioc, smid); + if (!scmd) + continue; + st = leapioraid_base_scsi_cmd_priv(scmd); + if (!st || st->smid == 0) + continue; + sdev = scmd->device; + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) + continue; + if (sas_device_priv_data->sas_target->flags & + LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT) + continue; + if (sas_device_priv_data->sas_target->flags & + LEAPIORAID_TARGET_FLAGS_VOLUME) + continue; + handle = sas_device_priv_data->sas_target->handle; + lun = sas_device_priv_data->lun; + query_count++; + if (ioc->shost_recovery) + goto out; + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + r = leapioraid_scsihost_issue_tm(ioc, handle, 0, 0, lun, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_QUERY_TASK, + st->smid, 30, 0); + if (r == FAILED) { + sdev_printk(KERN_WARNING, sdev, + "leapioraid_scsihost_issue_tm:\n\t\t" + "FAILED when sending QUERY_TASK: scmd(%p)\n", + scmd); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + goto broadcast_aen_retry; + } + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) + & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + sdev_printk(KERN_WARNING, sdev, + "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n", + ioc_status, scmd); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + goto broadcast_aen_retry; + } + if (mpi_reply->ResponseCode == + LEAPIORAID_SCSITASKMGMT_RSP_TM_SUCCEEDED || + mpi_reply->ResponseCode == + LEAPIORAID_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) { + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + continue; + } + task_abort_retries = 0; +tm_retry: + if (task_abort_retries++ == 60) { + dewtprintk(ioc, pr_err( + "%s %s: ABORT_TASK: giving up\n", + ioc->name, __func__)); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + goto broadcast_aen_retry; + } + if (ioc->shost_recovery) + goto out_no_lock; + r = leapioraid_scsihost_issue_tm(ioc, handle, sdev->channel, + sdev->id, sdev->lun, + LEAPIORAID_SCSITASKMGMT_TASKTYPE_ABORT_TASK, + st->smid, 30, 0); + if (r == FAILED) { + sdev_printk(KERN_WARNING, sdev, + "ABORT_TASK: FAILED : scmd(%p)\n", scmd); + goto tm_retry; + } + if (task_abort_retries > 1) + sdev_printk(KERN_WARNING, sdev, + "leapioraid_scsihost_issue_tm:\n\t\t" + "ABORT_TASK: RETRIES (%d): scmd(%p)\n", + task_abort_retries - 1, + scmd); + termination_count += le32_to_cpu(mpi_reply->TerminationCount); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + } + if (ioc->broadcast_aen_pending) { + dewtprintk(ioc, + pr_info("%s %s: loop back due to pending AEN\n", + ioc->name, __func__)); + ioc->broadcast_aen_pending = 0; + goto broadcast_aen_retry; + } +out: + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); +out_no_lock: + dewtprintk(ioc, pr_err( + "%s %s - exit, query_count = %d termination_count = %d\n", + ioc->name, __func__, query_count, + termination_count)); + ioc->broadcast_aen_busy = 0; + if (!ioc->shost_recovery) + leapioraid_scsihost_ublock_io_all_device(ioc, 1); + mutex_unlock(&ioc->tm_cmds.mutex); +} + +static void +leapioraid_scsihost_sas_discovery_event( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + struct LeapioraidEventDataSasDiscovery_t *event_data + = fw_event->event_data; + + if (ioc->logging_level & LEAPIORAID_DEBUG_EVENT_WORK_TASK) { + pr_info("%s sas discovery event: (%s)", + ioc->name, + (event_data->ReasonCode == + LEAPIORAID_EVENT_SAS_DISC_RC_STARTED) ? "start" : "stop"); + if (event_data->DiscoveryStatus) + pr_info("discovery_status(0x%08x)", + le32_to_cpu(event_data->DiscoveryStatus)); + pr_info("\n"); + } + if (event_data->ReasonCode == LEAPIORAID_EVENT_SAS_DISC_RC_STARTED && + !ioc->sas_hba.num_phys) { + if (disable_discovery > 0 && ioc->shost_recovery) { + while (ioc->shost_recovery) + ssleep(1); + } + leapioraid_scsihost_sas_host_add(ioc); + } +} + +static void +leapioraid_scsihost_sas_device_discovery_error_event( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + struct LeapioraidEventDataSasDeviceDiscoveryError_t *event_data = + fw_event->event_data; + + switch (event_data->ReasonCode) { + case LEAPIORAID_EVENT_SAS_DISC_ERR_SMP_FAILED: + pr_warn( + "%s SMP command sent to the expander(handle:0x%04x,\n\t\t" + "sas_address:0x%016llx,physical_port:0x%02x) has failed\n", + ioc->name, + le16_to_cpu(event_data->DevHandle), + (unsigned long long)le64_to_cpu(event_data->SASAddress), + event_data->PhysicalPort); + break; + case LEAPIORAID_EVENT_SAS_DISC_ERR_SMP_TIMEOUT: + pr_warn( + "%s SMP command sent to the expander(handle:0x%04x,\n\t\t" + "sas_address:0x%016llx,physical_port:0x%02x) has timed out\n", + ioc->name, + le16_to_cpu(event_data->DevHandle), + (unsigned long long)le64_to_cpu(event_data->SASAddress), + event_data->PhysicalPort); + break; + default: + break; + } +} + +static int +leapioraid_scsihost_ir_fastpath( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, + u8 phys_disk_num) +{ + struct LeapioraidRaidActionReq_t *mpi_request; + struct LeapioraidRaidActionRep_t *mpi_reply; + u16 smid; + u8 issue_reset = 0; + int rc = 0; + u16 ioc_status; + u32 log_info; + + mutex_lock(&ioc->scsih_cmds.mutex); + if (ioc->scsih_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: scsih_cmd in use\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + ioc->scsih_cmds.status = LEAPIORAID_CMD_PENDING; + smid = leapioraid_base_get_smid(ioc, ioc->scsih_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + ioc->scsih_cmds.status = LEAPIORAID_CMD_NOT_USED; + rc = -EAGAIN; + goto out; + } + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->scsih_cmds.smid = smid; + memset(mpi_request, 0, sizeof(struct LeapioraidRaidActionReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_RAID_ACTION; + mpi_request->Action = 0x24; + mpi_request->PhysDiskNum = phys_disk_num; + dewtprintk(ioc, pr_info( + "%s IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n", + ioc->name, handle, phys_disk_num)); + init_completion(&ioc->scsih_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->scsih_cmds.done, 10 * HZ); + if (!(ioc->scsih_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + leapioraid_check_cmd_timeout(ioc, + ioc->scsih_cmds.status, + mpi_request, + sizeof(struct LeapioraidRaidActionReq_t) + / 4, issue_reset); + rc = -EFAULT; + goto out; + } + if (ioc->scsih_cmds.status & LEAPIORAID_CMD_REPLY_VALID) { + mpi_reply = ioc->scsih_cmds.reply; + ioc_status = le16_to_cpu(mpi_reply->IOCStatus); + if (ioc_status & LEAPIORAID_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) + log_info = le32_to_cpu(mpi_reply->IOCLogInfo); + else + log_info = 0; + ioc_status &= LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + dewtprintk(ioc, pr_err( + "%s IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n", + ioc->name, ioc_status, + log_info)); + rc = -EFAULT; + } else + dewtprintk(ioc, pr_err( + "%s IR RAID_ACTION: completed successfully\n", + ioc->name)); + } +out: + ioc->scsih_cmds.status = LEAPIORAID_CMD_NOT_USED; + mutex_unlock(&ioc->scsih_cmds.mutex); + if (issue_reset) + leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + return rc; +} + +static void +leapioraid_scsihost_reprobe_lun( + struct scsi_device *sdev, void *no_uld_attach) +{ + int rc; + + sdev->no_uld_attach = no_uld_attach ? 1 : 0; + sdev_printk(KERN_INFO, sdev, "%s raid component\n", + sdev->no_uld_attach ? "hiding" : "exposing"); + rc = scsi_device_reprobe(sdev); + pr_info("%s rc=%d\n", __func__, rc); +} + +static void +leapioraid_scsihost_sas_volume_add(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventIrCfgEle_t *element) +{ + struct leapioraid_raid_device *raid_device; + unsigned long flags; + u64 wwid; + u16 handle = le16_to_cpu(element->VolDevHandle); + int rc; + + leapioraid_config_get_volume_wwid(ioc, handle, &wwid); + if (!wwid) { + pr_err("%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return; + } + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = leapioraid_scsihost_raid_device_find_by_wwid( + ioc, wwid); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (raid_device) + return; + raid_device = kzalloc(sizeof(struct leapioraid_raid_device), + GFP_KERNEL); + if (!raid_device) + return; + + raid_device->id = ioc->sas_id++; + raid_device->channel = RAID_CHANNEL; + raid_device->handle = handle; + raid_device->wwid = wwid; + leapioraid_scsihost_raid_device_add(ioc, raid_device); + if (!ioc->wait_for_discovery_to_complete) { + rc = scsi_add_device(ioc->shost, RAID_CHANNEL, + raid_device->id, 0); + if (rc) + leapioraid_scsihost_raid_device_remove(ioc, raid_device); + } else { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + leapioraid_scsihost_determine_boot_device( + ioc, raid_device, RAID_CHANNEL); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + } +} + +static void +leapioraid_scsihost_sas_volume_delete( + struct LEAPIORAID_ADAPTER *ioc, u16 handle) +{ + struct leapioraid_raid_device *raid_device; + unsigned long flags; + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct scsi_target *starget = NULL; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = leapioraid_raid_device_find_by_handle(ioc, handle); + if (raid_device) { + if (raid_device->starget) { + starget = raid_device->starget; + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->deleted = 1; + } + pr_info("%s removing handle(0x%04x), wwid(0x%016llx)\n", + ioc->name, raid_device->handle, + (unsigned long long)raid_device->wwid); + list_del(&raid_device->list); + kfree(raid_device); + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (starget) + scsi_remove_target(&starget->dev); +} + +static void +leapioraid_scsihost_sas_pd_expose( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventIrCfgEle_t *element) +{ + struct leapioraid_sas_device *sas_device; + struct scsi_target *starget = NULL; + struct LEAPIORAID_TARGET *sas_target_priv_data; + unsigned long flags; + u16 handle = le16_to_cpu(element->PhysDiskDevHandle); + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_handle(ioc, handle); + if (sas_device) { + sas_device->volume_handle = 0; + sas_device->volume_wwid = 0; + clear_bit(handle, ioc->pd_handles); + if (sas_device->starget && sas_device->starget->hostdata) { + starget = sas_device->starget; + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->flags &= + ~LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT; + sas_device->pfa_led_on = 0; + leapioraid_sas_device_put(sas_device); + } + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (!sas_device) + return; + if (starget) + starget_for_each_device(starget, NULL, leapioraid_scsihost_reprobe_lun); +} + +static void +leapioraid_scsihost_sas_pd_hide( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventIrCfgEle_t *element) +{ + struct leapioraid_sas_device *sas_device; + struct scsi_target *starget = NULL; + struct LEAPIORAID_TARGET *sas_target_priv_data; + unsigned long flags; + u16 handle = le16_to_cpu(element->PhysDiskDevHandle); + u16 volume_handle = 0; + u64 volume_wwid = 0; + + leapioraid_config_get_volume_handle(ioc, handle, &volume_handle); + if (volume_handle) + leapioraid_config_get_volume_wwid(ioc, volume_handle, + &volume_wwid); + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_handle(ioc, handle); + if (sas_device) { + set_bit(handle, ioc->pd_handles); + if (sas_device->starget && sas_device->starget->hostdata) { + starget = sas_device->starget; + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->flags |= + LEAPIORAID_TARGET_FLAGS_RAID_COMPONENT; + sas_device->volume_handle = volume_handle; + sas_device->volume_wwid = volume_wwid; + leapioraid_sas_device_put(sas_device); + } + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (!sas_device) + return; + leapioraid_scsihost_ir_fastpath(ioc, handle, element->PhysDiskNum); + if (starget) + starget_for_each_device(starget, (void *)1, + leapioraid_scsihost_reprobe_lun); +} + +static void +leapioraid_scsihost_sas_pd_delete(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventIrCfgEle_t *element) +{ + u16 handle = le16_to_cpu(element->PhysDiskDevHandle); + + leapioraid_scsihost_device_remove_by_handle(ioc, handle); +} + +static void +leapioraid_scsihost_sas_pd_add(struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventIrCfgEle_t *element) +{ + struct leapioraid_sas_device *sas_device; + u16 handle = le16_to_cpu(element->PhysDiskDevHandle); + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasDevP0_t sas_device_pg0; + u32 ioc_status; + u64 sas_address; + u16 parent_handle; + + set_bit(handle, ioc->pd_handles); + sas_device = leapioraid_get_sdev_by_handle(ioc, handle); + if (sas_device) { + leapioraid_scsihost_ir_fastpath(ioc, handle, element->PhysDiskNum); + leapioraid_sas_device_put(sas_device); + return; + } + if ((leapioraid_config_get_sas_device_pg0 + (ioc, &mpi_reply, &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (!leapioraid_scsihost_get_sas_address(ioc, parent_handle, &sas_address)) + leapioraid_transport_update_links(ioc, sas_address, handle, + sas_device_pg0.PhyNum, + LEAPIORAID_SAS_NEG_LINK_RATE_1_5, + leapioraid_get_port_by_id(ioc, + sas_device_pg0.PhysicalPort, + 0)); + leapioraid_scsihost_ir_fastpath(ioc, handle, element->PhysDiskNum); + leapioraid_scsihost_add_device(ioc, handle, 0, 1); +} + +static void +leapioraid_scsihost_sas_ir_config_change_event_debug( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventDataIrCfgChangeList_t *event_data) +{ + struct LeapioraidEventIrCfgEle_t *element; + u8 element_type; + int i; + char *reason_str = NULL, *element_str = NULL; + + element = + (struct LeapioraidEventIrCfgEle_t *) &event_data->ConfigElement[0]; + pr_info("%s raid config change: (%s), elements(%d)\n", + ioc->name, + (le32_to_cpu(event_data->Flags) & + LEAPIORAID_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? "foreign" : + "native", event_data->NumElements); + for (i = 0; i < event_data->NumElements; i++, element++) { + switch (element->ReasonCode) { + case LEAPIORAID_EVENT_IR_CHANGE_RC_ADDED: + reason_str = "add"; + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_REMOVED: + reason_str = "remove"; + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_NO_CHANGE: + reason_str = "no change"; + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_HIDE: + reason_str = "hide"; + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_UNHIDE: + reason_str = "unhide"; + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_VOLUME_CREATED: + reason_str = "volume_created"; + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_VOLUME_DELETED: + reason_str = "volume_deleted"; + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_PD_CREATED: + reason_str = "pd_created"; + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_PD_DELETED: + reason_str = "pd_deleted"; + break; + default: + reason_str = "unknown reason"; + break; + } + element_type = le16_to_cpu(element->ElementFlags) & + LEAPIORAID_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK; + switch (element_type) { + case LEAPIORAID_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT: + element_str = "volume"; + break; + case LEAPIORAID_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT: + element_str = "phys disk"; + break; + case LEAPIORAID_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT: + element_str = "hot spare"; + break; + default: + element_str = "unknown element"; + break; + } + pr_info( + "\t(%s:%s), vol handle(0x%04x), pd handle(0x%04x), pd num(0x%02x)\n", + element_str, + reason_str, le16_to_cpu(element->VolDevHandle), + le16_to_cpu(element->PhysDiskDevHandle), + element->PhysDiskNum); + } +} + +static void +leapioraid_scsihost_sas_ir_config_change_event( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + struct LeapioraidEventIrCfgEle_t *element; + int i; + u8 foreign_config; + struct LeapioraidEventDataIrCfgChangeList_t *event_data + = fw_event->event_data; + + if ((ioc->logging_level & LEAPIORAID_DEBUG_EVENT_WORK_TASK) + && !ioc->warpdrive_msg) + leapioraid_scsihost_sas_ir_config_change_event_debug(ioc, event_data); + foreign_config = (le32_to_cpu(event_data->Flags) & + LEAPIORAID_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0; + element = + (struct LeapioraidEventIrCfgEle_t *) &event_data->ConfigElement[0]; + if (ioc->shost_recovery) { + for (i = 0; i < event_data->NumElements; i++, element++) { + if (element->ReasonCode == + LEAPIORAID_EVENT_IR_CHANGE_RC_HIDE) + leapioraid_scsihost_ir_fastpath(ioc, + le16_to_cpu(element->PhysDiskDevHandle), + element->PhysDiskNum); + } + return; + } + for (i = 0; i < event_data->NumElements; i++, element++) { + switch (element->ReasonCode) { + case LEAPIORAID_EVENT_IR_CHANGE_RC_VOLUME_CREATED: + case LEAPIORAID_EVENT_IR_CHANGE_RC_ADDED: + if (!foreign_config) + leapioraid_scsihost_sas_volume_add(ioc, element); + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_VOLUME_DELETED: + case LEAPIORAID_EVENT_IR_CHANGE_RC_REMOVED: + if (!foreign_config) + leapioraid_scsihost_sas_volume_delete(ioc, + le16_to_cpu + (element->VolDevHandle)); + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_PD_CREATED: + leapioraid_scsihost_sas_pd_hide(ioc, element); + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_PD_DELETED: + leapioraid_scsihost_sas_pd_expose(ioc, element); + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_HIDE: + leapioraid_scsihost_sas_pd_add(ioc, element); + break; + case LEAPIORAID_EVENT_IR_CHANGE_RC_UNHIDE: + leapioraid_scsihost_sas_pd_delete(ioc, element); + break; + } + } +} + +static void +leapioraid_scsihost_sas_ir_volume_event( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + u64 wwid; + unsigned long flags; + struct leapioraid_raid_device *raid_device; + u16 handle; + u32 state; + int rc; + struct LeapioraidEventDataIrVol_t *event_data + = fw_event->event_data; + + if (ioc->shost_recovery) + return; + if (event_data->ReasonCode != LEAPIORAID_EVENT_IR_VOLUME_RC_STATE_CHANGED) + return; + handle = le16_to_cpu(event_data->VolDevHandle); + state = le32_to_cpu(event_data->NewValue); + if (!ioc->warpdrive_msg) + dewtprintk(ioc, + pr_info("%s %s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", + ioc->name, + __func__, handle, + le32_to_cpu(event_data->PreviousValue), + state)); + switch (state) { + case LEAPIORAID_RAID_VOL_STATE_MISSING: + case LEAPIORAID_RAID_VOL_STATE_FAILED: + leapioraid_scsihost_sas_volume_delete(ioc, handle); + break; + case LEAPIORAID_RAID_VOL_STATE_ONLINE: + case LEAPIORAID_RAID_VOL_STATE_DEGRADED: + case LEAPIORAID_RAID_VOL_STATE_OPTIMAL: + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = + leapioraid_raid_device_find_by_handle(ioc, handle); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (raid_device) + break; + leapioraid_config_get_volume_wwid(ioc, handle, &wwid); + if (!wwid) { + pr_err( + "%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + break; + } + raid_device = kzalloc(sizeof(struct leapioraid_raid_device), + GFP_KERNEL); + if (!raid_device) + break; + + raid_device->id = ioc->sas_id++; + raid_device->channel = RAID_CHANNEL; + raid_device->handle = handle; + raid_device->wwid = wwid; + leapioraid_scsihost_raid_device_add(ioc, raid_device); + rc = scsi_add_device(ioc->shost, RAID_CHANNEL, + raid_device->id, 0); + if (rc) + leapioraid_scsihost_raid_device_remove(ioc, raid_device); + break; + case LEAPIORAID_RAID_VOL_STATE_INITIALIZING: + default: + break; + } +} + +static void +leapioraid_scsihost_sas_ir_physical_disk_event( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + u16 handle, parent_handle; + u32 state; + struct leapioraid_sas_device *sas_device; + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasDevP0_t sas_device_pg0; + u32 ioc_status; + struct LeapioraidEventDataIrPhyDisk_t *event_data + = fw_event->event_data; + u64 sas_address; + + if (ioc->shost_recovery) + return; + if (event_data->ReasonCode != + LEAPIORAID_EVENT_IR_PHYSDISK_RC_STATE_CHANGED) + return; + handle = le16_to_cpu(event_data->PhysDiskDevHandle); + state = le32_to_cpu(event_data->NewValue); + if (!ioc->warpdrive_msg) + dewtprintk(ioc, + pr_info("%s %s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", + ioc->name, + __func__, handle, + le32_to_cpu(event_data->PreviousValue), + state)); + switch (state) { + case LEAPIORAID_RAID_PD_STATE_ONLINE: + case LEAPIORAID_RAID_PD_STATE_DEGRADED: + case LEAPIORAID_RAID_PD_STATE_REBUILDING: + case LEAPIORAID_RAID_PD_STATE_OPTIMAL: + case LEAPIORAID_RAID_PD_STATE_HOT_SPARE: + set_bit(handle, ioc->pd_handles); + sas_device = leapioraid_get_sdev_by_handle(ioc, handle); + if (sas_device) { + leapioraid_sas_device_put(sas_device); + return; + } + if ((leapioraid_config_get_sas_device_pg0( + ioc, &mpi_reply, + &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, + handle))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (!leapioraid_scsihost_get_sas_address + (ioc, parent_handle, &sas_address)) + leapioraid_transport_update_links(ioc, sas_address, + handle, + sas_device_pg0.PhyNum, + LEAPIORAID_SAS_NEG_LINK_RATE_1_5, + leapioraid_get_port_by_id + (ioc, + sas_device_pg0.PhysicalPort, 0)); + leapioraid_scsihost_add_device(ioc, handle, 0, 1); + break; + case LEAPIORAID_RAID_PD_STATE_OFFLINE: + case LEAPIORAID_RAID_PD_STATE_NOT_CONFIGURED: + case LEAPIORAID_RAID_PD_STATE_NOT_COMPATIBLE: + default: + break; + } +} + +static void +leapioraid_scsihost_sas_ir_operation_status_event_debug( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidEventDataIrOpStatus_t *event_data) +{ + char *reason_str = NULL; + + switch (event_data->RAIDOperation) { + case LEAPIORAID_EVENT_IR_RAIDOP_RESYNC: + reason_str = "resync"; + break; + case LEAPIORAID_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION: + reason_str = "online capacity expansion"; + break; + case LEAPIORAID_EVENT_IR_RAIDOP_CONSISTENCY_CHECK: + reason_str = "consistency check"; + break; + case LEAPIORAID_EVENT_IR_RAIDOP_BACKGROUND_INIT: + reason_str = "background init"; + break; + case LEAPIORAID_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT: + reason_str = "make data consistent"; + break; + } + if (!reason_str) + return; + pr_info( + "%s raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n", + ioc->name, reason_str, + le16_to_cpu(event_data->VolDevHandle), + event_data->PercentComplete); +} + +static void +leapioraid_scsihost_sas_ir_operation_status_event( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + struct LeapioraidEventDataIrOpStatus_t *event_data + = fw_event->event_data; + static struct leapioraid_raid_device *raid_device; + unsigned long flags; + u16 handle; + + if ((ioc->logging_level & LEAPIORAID_DEBUG_EVENT_WORK_TASK) + && !ioc->warpdrive_msg) + leapioraid_scsihost_sas_ir_operation_status_event_debug( + ioc, event_data); + if (event_data->RAIDOperation == LEAPIORAID_EVENT_IR_RAIDOP_RESYNC) { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + handle = le16_to_cpu(event_data->VolDevHandle); + raid_device = + leapioraid_raid_device_find_by_handle(ioc, handle); + if (raid_device) + raid_device->percent_complete = + event_data->PercentComplete; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + } +} + +static void +leapioraid_scsihost_prep_device_scan(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (sas_device_priv_data && sas_device_priv_data->sas_target) + sas_device_priv_data->sas_target->deleted = 1; + } +} + +static void +leapioraid_scsihost_update_device_qdepth(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LEAPIORAID_DEVICE *sas_device_priv_data; + struct leapioraid_sas_device *sas_device; + struct scsi_device *sdev; + u16 qdepth; + + pr_info("%s Update Devices with FW Reported QD\n", + ioc->name); + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (sas_device_priv_data && sas_device_priv_data->sas_target) { + sas_device = sas_device_priv_data->sas_target->sas_dev; + if (sas_device && + sas_device->device_info & LEAPIORAID_SAS_DEVICE_INFO_SSP_TARGET) + qdepth = + (sas_device->port_type > + 1) ? ioc->max_wideport_qd : ioc->max_narrowport_qd; + else if (sas_device + && sas_device->device_info & + LEAPIORAID_SAS_DEVICE_INFO_SATA_DEVICE) + qdepth = ioc->max_sata_qd; + else + continue; + leapioraid__scsihost_change_queue_depth(sdev, qdepth); + } + } +} + +static void +leapioraid_scsihost_mark_responding_sas_device( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidSasDevP0_t *sas_device_pg0) +{ + struct LEAPIORAID_TARGET *sas_target_priv_data = NULL; + struct scsi_target *starget; + struct leapioraid_sas_device *sas_device; + struct leapioraid_enclosure_node *enclosure_dev = NULL; + unsigned long flags; + struct leapioraid_hba_port *port; + + port = leapioraid_get_port_by_id(ioc, sas_device_pg0->PhysicalPort, 0); + if (sas_device_pg0->EnclosureHandle) { + enclosure_dev = + leapioraid_scsihost_enclosure_find_by_handle(ioc, + le16_to_cpu + (sas_device_pg0->EnclosureHandle)); + if (enclosure_dev == NULL) + pr_info( + "%s Enclosure handle(0x%04x)doesn't match with enclosure device!\n", + ioc->name, sas_device_pg0->EnclosureHandle); + } + spin_lock_irqsave(&ioc->sas_device_lock, flags); + list_for_each_entry(sas_device, &ioc->sas_device_list, list) { + if ((sas_device->sas_address == + le64_to_cpu(sas_device_pg0->SASAddress)) + && (sas_device->slot == le16_to_cpu(sas_device_pg0->Slot)) + && (sas_device->port == port)) { + sas_device->responding = 1; + starget = sas_device->starget; + if (starget && starget->hostdata) { + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->tm_busy = 0; + sas_target_priv_data->deleted = 0; + } else + sas_target_priv_data = NULL; + if (starget) { + starget_printk(KERN_INFO, starget, + "handle(0x%04x), sas_address(0x%016llx), port: %d\n", + sas_device->handle, + (unsigned long long)sas_device->sas_address, + sas_device->port->port_id); + if (sas_device->enclosure_handle != 0) + starget_printk(KERN_INFO, starget, + "enclosure logical id(0x%016llx), slot(%d)\n", + (unsigned long long) + sas_device->enclosure_logical_id, + sas_device->slot); + } + if (le16_to_cpu(sas_device_pg0->Flags) & + LEAPIORAID_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { + sas_device->enclosure_level = + sas_device_pg0->EnclosureLevel; + memcpy(sas_device->connector_name, + sas_device_pg0->ConnectorName, 4); + sas_device->connector_name[4] = '\0'; + } else { + sas_device->enclosure_level = 0; + sas_device->connector_name[0] = '\0'; + } + sas_device->enclosure_handle = + le16_to_cpu(sas_device_pg0->EnclosureHandle); + sas_device->is_chassis_slot_valid = 0; + if (enclosure_dev) { + sas_device->enclosure_logical_id = + le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); + if (le16_to_cpu(enclosure_dev->pg0.Flags) & + LEAPIORAID_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) { + sas_device->is_chassis_slot_valid = 1; + sas_device->chassis_slot = + enclosure_dev->pg0.ChassisSlot; + } + } + if (sas_device->handle == + le16_to_cpu(sas_device_pg0->DevHandle)) + goto out; + pr_info("\thandle changed from(0x%04x)!!!\n", + sas_device->handle); + sas_device->handle = + le16_to_cpu(sas_device_pg0->DevHandle); + if (sas_target_priv_data) + sas_target_priv_data->handle = + le16_to_cpu(sas_device_pg0->DevHandle); + goto out; + } + } +out: + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +} + +static void +leapioraid_scsihost_create_enclosure_list_after_reset( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_enclosure_node *enclosure_dev; + struct LeapioraidCfgRep_t mpi_reply; + u16 enclosure_handle; + int rc; + + leapioraid_free_enclosure_list(ioc); + enclosure_handle = 0xFFFF; + do { + enclosure_dev = + kzalloc(sizeof(struct leapioraid_enclosure_node), GFP_KERNEL); + if (!enclosure_dev) { + pr_err("%s failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return; + } + rc = leapioraid_config_get_enclosure_pg0(ioc, &mpi_reply, + &enclosure_dev->pg0, + LEAPIORAID_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE, + enclosure_handle); + if (rc || (le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK)) { + kfree(enclosure_dev); + return; + } + list_add_tail(&enclosure_dev->list, &ioc->enclosure_list); + enclosure_handle = + le16_to_cpu(enclosure_dev->pg0.EnclosureHandle); + } while (1); +} + +static void +leapioraid_scsihost_search_responding_sas_devices( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidSasDevP0_t sas_device_pg0; + struct LeapioraidCfgRep_t mpi_reply; + u16 ioc_status; + u16 handle; + u32 device_info; + + pr_info("%s search for end-devices: start\n", + ioc->name); + if (list_empty(&ioc->sas_device_list)) + goto out; + handle = 0xFFFF; + while (!(leapioraid_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, + handle))) { + ioc_status = + le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_info( + "%s \tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, __func__, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(sas_device_pg0.DevHandle); + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + if (!(leapioraid_scsihost_is_sas_end_device(device_info))) + continue; + leapioraid_scsihost_mark_responding_sas_device( + ioc, &sas_device_pg0); + } +out: + pr_info("%s search for end-devices: complete\n", + ioc->name); +} + +static void +leapioraid_scsihost_mark_responding_raid_device( + struct LEAPIORAID_ADAPTER *ioc, u64 wwid, u16 handle) +{ + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct scsi_target *starget; + struct leapioraid_raid_device *raid_device; + unsigned long flags; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (raid_device->wwid == wwid && raid_device->starget) { + starget = raid_device->starget; + if (starget && starget->hostdata) { + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->deleted = 0; + } else + sas_target_priv_data = NULL; + raid_device->responding = 1; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + starget_printk(KERN_INFO, raid_device->starget, + "handle(0x%04x), wwid(0x%016llx)\n", + handle, + (unsigned long long)raid_device->wwid); + spin_lock_irqsave(&ioc->raid_device_lock, flags); + if (raid_device->handle == handle) { + spin_unlock_irqrestore(&ioc->raid_device_lock, + flags); + return; + } + pr_info("\thandle changed from(0x%04x)!!!\n", + raid_device->handle); + raid_device->handle = handle; + if (sas_target_priv_data) + sas_target_priv_data->handle = handle; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + return; + } + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); +} + +static void +leapioraid_scsihost_search_responding_raid_devices( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidRaidVolP1_t volume_pg1; + struct LeapioraidRaidVolP0_t volume_pg0; + struct LeapioraidRaidPDP0_t pd_pg0; + struct LeapioraidCfgRep_t mpi_reply; + u16 ioc_status; + u16 handle; + u8 phys_disk_num; + + if (!ioc->ir_firmware) + return; + pr_info("%s search for raid volumes: start\n", + ioc->name); + if (list_empty(&ioc->raid_device_list)) + goto out; + handle = 0xFFFF; + while (!(leapioraid_config_get_raid_volume_pg1(ioc, &mpi_reply, + &volume_pg1, + LEAPIORAID_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, + handle))) { + ioc_status = + le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_info("%s \tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, __func__, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(volume_pg1.DevHandle); + if (leapioraid_config_get_raid_volume_pg0(ioc, &mpi_reply, + &volume_pg0, + LEAPIORAID_RAID_VOLUME_PGAD_FORM_HANDLE, + handle, + sizeof + (struct LeapioraidRaidVolP0_t))) + continue; + if (volume_pg0.VolumeState == LEAPIORAID_RAID_VOL_STATE_OPTIMAL || + volume_pg0.VolumeState == LEAPIORAID_RAID_VOL_STATE_ONLINE || + volume_pg0.VolumeState == LEAPIORAID_RAID_VOL_STATE_DEGRADED) + leapioraid_scsihost_mark_responding_raid_device(ioc, + le64_to_cpu + (volume_pg1.WWID), + handle); + } + phys_disk_num = 0xFF; + memset(ioc->pd_handles, 0, ioc->pd_handles_sz); + while (!(leapioraid_config_get_phys_disk_pg0(ioc, &mpi_reply, + &pd_pg0, + LEAPIORAID_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, + phys_disk_num))) { + ioc_status = + le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_info("%s \tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, __func__, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + phys_disk_num = pd_pg0.PhysDiskNum; + handle = le16_to_cpu(pd_pg0.DevHandle); + set_bit(handle, ioc->pd_handles); + } +out: + pr_info("%s search for responding raid volumes: complete\n", + ioc->name); +} + +static void +leapioraid_scsihost_mark_responding_expander( + struct LEAPIORAID_ADAPTER *ioc, + struct LeapioraidExpanderP0_t *expander_pg0) +{ + struct leapioraid_raid_sas_node *sas_expander; + unsigned long flags; + int i; + u8 port_id = expander_pg0->PhysicalPort; + struct leapioraid_hba_port *port = leapioraid_get_port_by_id( + ioc, port_id, 0); + struct leapioraid_enclosure_node *enclosure_dev = NULL; + u16 handle = le16_to_cpu(expander_pg0->DevHandle); + u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle); + u64 sas_address = le64_to_cpu(expander_pg0->SASAddress); + + if (enclosure_handle) + enclosure_dev = + leapioraid_scsihost_enclosure_find_by_handle(ioc, + enclosure_handle); + spin_lock_irqsave(&ioc->sas_node_lock, flags); + list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { + if (sas_expander->sas_address != sas_address || + (sas_expander->port != port)) + continue; + sas_expander->responding = 1; + if (enclosure_dev) { + sas_expander->enclosure_logical_id = + le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); + sas_expander->enclosure_handle = + le16_to_cpu(expander_pg0->EnclosureHandle); + } + if (sas_expander->handle == handle) + goto out; + pr_info( + "\texpander(0x%016llx): handle changed from(0x%04x) to (0x%04x)!!!\n", + (unsigned long long)sas_expander->sas_address, + sas_expander->handle, handle); + sas_expander->handle = handle; + for (i = 0; i < sas_expander->num_phys; i++) + sas_expander->phy[i].handle = handle; + goto out; + } +out: + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); +} + +static void +leapioraid_scsihost_search_responding_expanders( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidExpanderP0_t expander_pg0; + struct LeapioraidCfgRep_t mpi_reply; + u16 ioc_status; + u64 sas_address; + u16 handle; + u8 port; + + pr_info("%s search for expanders: start\n", + ioc->name); + if (list_empty(&ioc->sas_expander_list)) + goto out; + handle = 0xFFFF; + while (! + (leapioraid_config_get_expander_pg0 + (ioc, &mpi_reply, &expander_pg0, + LEAPIORAID_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { + ioc_status = + le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_info( + "%s \tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, __func__, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(expander_pg0.DevHandle); + sas_address = le64_to_cpu(expander_pg0.SASAddress); + port = expander_pg0.PhysicalPort; + pr_info( + "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n", + handle, + (unsigned long long)sas_address, + ((ioc->multipath_on_hba) ? + (port) : (LEAPIORAID_MULTIPATH_DISABLED_PORT_ID))); + leapioraid_scsihost_mark_responding_expander( + ioc, &expander_pg0); + } +out: + pr_info("%s search for expanders: complete\n", + ioc->name); +} + +static void +leapioraid_scsihost_remove_unresponding_devices( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_sas_device *sas_device, *sas_device_next; + struct leapioraid_raid_sas_node *sas_expander, *sas_expander_next; + struct leapioraid_raid_device *raid_device, *raid_device_next; + struct list_head tmp_list; + unsigned long flags; + LIST_HEAD(head); + + pr_info("%s removing unresponding devices: start\n", + ioc->name); + pr_err("%s removing unresponding devices: sas end-devices\n", + ioc->name); + spin_lock_irqsave(&ioc->sas_device_lock, flags); + list_for_each_entry_safe(sas_device, sas_device_next, + &ioc->sas_device_init_list, list) { + list_del_init(&sas_device->list); + leapioraid_sas_device_put(sas_device); + } + list_for_each_entry_safe(sas_device, sas_device_next, + &ioc->sas_device_list, list) { + if (!sas_device->responding) + list_move_tail(&sas_device->list, &head); + else + sas_device->responding = 0; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + list_for_each_entry_safe(sas_device, sas_device_next, &head, list) { + leapioraid_scsihost_remove_device(ioc, sas_device); + list_del_init(&sas_device->list); + leapioraid_sas_device_put(sas_device); + } + if (ioc->ir_firmware) { + pr_info("%s removing unresponding devices: volumes\n", + ioc->name); + list_for_each_entry_safe(raid_device, raid_device_next, + &ioc->raid_device_list, list) { + if (!raid_device->responding) + leapioraid_scsihost_sas_volume_delete(ioc, + raid_device->handle); + else + raid_device->responding = 0; + } + } + pr_err("%s removing unresponding devices: expanders\n", + ioc->name); + spin_lock_irqsave(&ioc->sas_node_lock, flags); + INIT_LIST_HEAD(&tmp_list); + list_for_each_entry_safe(sas_expander, sas_expander_next, + &ioc->sas_expander_list, list) { + if (!sas_expander->responding) + list_move_tail(&sas_expander->list, &tmp_list); + else + sas_expander->responding = 0; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + list_for_each_entry_safe( + sas_expander, sas_expander_next, &tmp_list, list) { + leapioraid_scsihost_expander_node_remove(ioc, sas_expander); + } + pr_err("%s removing unresponding devices: complete\n", ioc->name); + leapioraid_scsihost_ublock_io_all_device(ioc, 0); +} + +static void +leapioraid_scsihost_refresh_expander_links( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_sas_node *sas_expander, u16 handle) +{ + struct LeapioraidExpanderP1_t expander_pg1; + struct LeapioraidCfgRep_t mpi_reply; + int i; + + for (i = 0; i < sas_expander->num_phys; i++) { + if ((leapioraid_config_get_expander_pg1(ioc, &mpi_reply, + &expander_pg1, i, + handle))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + leapioraid_transport_update_links(ioc, + sas_expander->sas_address, + le16_to_cpu(expander_pg1.AttachedDevHandle), + i, + expander_pg1.NegotiatedLinkRate >> 4, + sas_expander->port); + } +} + +static void +leapioraid_scsihost_scan_for_devices_after_reset( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidExpanderP0_t expander_pg0; + struct LeapioraidSasDevP0_t sas_device_pg0; + struct LeapioraidRaidVolP1_t *volume_pg1; + struct LeapioraidRaidVolP0_t *volume_pg0; + struct LeapioraidRaidPDP0_t pd_pg0; + struct LeapioraidEventIrCfgEle_t element; + struct LeapioraidCfgRep_t mpi_reply; + u8 phys_disk_num, port_id; + u16 ioc_status; + u16 handle, parent_handle; + u64 sas_address; + struct leapioraid_sas_device *sas_device; + struct leapioraid_raid_sas_node *expander_device; + static struct leapioraid_raid_device *raid_device; + u8 retry_count; + unsigned long flags; + + volume_pg0 = kzalloc(sizeof(*volume_pg0), GFP_KERNEL); + if (!volume_pg0) + return; + + volume_pg1 = kzalloc(sizeof(*volume_pg1), GFP_KERNEL); + if (!volume_pg1) { + kfree(volume_pg0); + return; + } + pr_info("%s scan devices: start\n", ioc->name); + leapioraid_scsihost_sas_host_refresh(ioc); + pr_info("%s \tscan devices: expanders start\n", + ioc->name); + handle = 0xFFFF; + while (! + (leapioraid_config_get_expander_pg0 + (ioc, &mpi_reply, &expander_pg0, + LEAPIORAID_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { + ioc_status = + le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err( + "%s \tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(expander_pg0.DevHandle); + spin_lock_irqsave(&ioc->sas_node_lock, flags); + port_id = expander_pg0.PhysicalPort; + expander_device = + leapioraid_scsihost_expander_find_by_sas_address( + ioc, + le64_to_cpu + (expander_pg0.SASAddress), + leapioraid_get_port_by_id + (ioc, + port_id, + 0)); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (expander_device) + leapioraid_scsihost_refresh_expander_links( + ioc, expander_device, handle); + else { + pr_err( + "%s \tBEFORE adding expander:\n\t\t" + "handle (0x%04x), sas_addr(0x%016llx)\n", + ioc->name, handle, (unsigned long long) + le64_to_cpu(expander_pg0.SASAddress)); + leapioraid_scsihost_expander_add(ioc, handle); + pr_info( + "%s \tAFTER adding expander:\n\t\t" + "handle (0x%04x), sas_addr(0x%016llx)\n", + ioc->name, handle, (unsigned long long) + le64_to_cpu(expander_pg0.SASAddress)); + } + } + pr_info("%s \tscan devices: expanders complete\n", + ioc->name); + if (!ioc->ir_firmware) + goto skip_to_sas; + pr_info("%s \tscan devices: phys disk start\n", + ioc->name); + phys_disk_num = 0xFF; + while (!(leapioraid_config_get_phys_disk_pg0(ioc, &mpi_reply, + &pd_pg0, + LEAPIORAID_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, + phys_disk_num))) { + ioc_status = + le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err( + "%s \tbreak from phys disk scan:\n\t\t" + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, + ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + phys_disk_num = pd_pg0.PhysDiskNum; + handle = le16_to_cpu(pd_pg0.DevHandle); + sas_device = leapioraid_get_sdev_by_handle(ioc, handle); + if (sas_device) { + leapioraid_sas_device_put(sas_device); + continue; + } + if (leapioraid_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, + handle) != 0) + continue; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err( + "%s \tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (!leapioraid_scsihost_get_sas_address(ioc, parent_handle, + &sas_address)) { + pr_err( + "%s \tBEFORE adding phys disk:\n\t\t" + "handle (0x%04x), sas_addr(0x%016llx)\n", + ioc->name, handle, (unsigned long long) + le64_to_cpu(sas_device_pg0.SASAddress)); + port_id = sas_device_pg0.PhysicalPort; + leapioraid_transport_update_links(ioc, sas_address, + handle, + sas_device_pg0.PhyNum, + LEAPIORAID_SAS_NEG_LINK_RATE_1_5, + leapioraid_get_port_by_id + (ioc, port_id, 0)); + set_bit(handle, ioc->pd_handles); + retry_count = 0; + while (leapioraid_scsihost_add_device + (ioc, handle, retry_count++, 1)) { + ssleep(1); + } + pr_err( + "%s \tAFTER adding phys disk:\n\t\t" + "handle (0x%04x), sas_addr(0x%016llx)\n", + ioc->name, handle, (unsigned long long) + le64_to_cpu(sas_device_pg0.SASAddress)); + } + } + pr_info("%s \tscan devices: phys disk complete\n", + ioc->name); + pr_info("%s \tscan devices: volumes start\n", + ioc->name); + handle = 0xFFFF; + while (!(leapioraid_config_get_raid_volume_pg1(ioc, &mpi_reply, + volume_pg1, + LEAPIORAID_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, + handle))) { + ioc_status = + le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err( + "%s \tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(volume_pg1->DevHandle); + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = leapioraid_scsihost_raid_device_find_by_wwid( + ioc, le64_to_cpu(volume_pg1->WWID)); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (raid_device) + continue; + if (leapioraid_config_get_raid_volume_pg0(ioc, &mpi_reply, + volume_pg0, + LEAPIORAID_RAID_VOLUME_PGAD_FORM_HANDLE, + handle, + sizeof + (struct LeapioraidRaidVolP0_t))) + continue; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err( + "%s \tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + if (volume_pg0->VolumeState == LEAPIORAID_RAID_VOL_STATE_OPTIMAL || + volume_pg0->VolumeState == LEAPIORAID_RAID_VOL_STATE_ONLINE || + volume_pg0->VolumeState == + LEAPIORAID_RAID_VOL_STATE_DEGRADED) { + memset(&element, 0, + sizeof(struct LeapioraidEventIrCfgEle_t)); + element.ReasonCode = LEAPIORAID_EVENT_IR_CHANGE_RC_ADDED; + element.VolDevHandle = volume_pg1->DevHandle; + pr_info("%s \tBEFORE adding volume: handle (0x%04x)\n", + ioc->name, volume_pg1->DevHandle); + leapioraid_scsihost_sas_volume_add(ioc, &element); + pr_info("%s \tAFTER adding volume: handle (0x%04x)\n", + ioc->name, volume_pg1->DevHandle); + } + } + pr_info("%s \tscan devices: volumes complete\n", + ioc->name); +skip_to_sas: + pr_info("%s \tscan devices: sas end devices start\n", + ioc->name); + handle = 0xFFFF; + while (!(leapioraid_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, + handle))) { + ioc_status = + le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err( + "%s \tbreak from sas end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(sas_device_pg0.DevHandle); + if (! + (leapioraid_scsihost_is_sas_end_device + (le32_to_cpu(sas_device_pg0.DeviceInfo)))) + continue; + port_id = sas_device_pg0.PhysicalPort; + sas_device = leapioraid_get_sdev_by_addr(ioc, + le64_to_cpu + (sas_device_pg0.SASAddress), + leapioraid_get_port_by_id + (ioc, port_id, 0)); + if (sas_device) { + leapioraid_sas_device_put(sas_device); + continue; + } + parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (!leapioraid_scsihost_get_sas_address + (ioc, parent_handle, &sas_address)) { + pr_err( + "%s \tBEFORE adding sas end device:\n\t\t" + "handle (0x%04x), sas_addr(0x%016llx)\n", + ioc->name, handle, (unsigned long long) + le64_to_cpu(sas_device_pg0.SASAddress)); + leapioraid_transport_update_links(ioc, sas_address, + handle, + sas_device_pg0.PhyNum, + LEAPIORAID_SAS_NEG_LINK_RATE_1_5, + leapioraid_get_port_by_id + (ioc, port_id, 0)); + retry_count = 0; + while (leapioraid_scsihost_add_device + (ioc, handle, retry_count++, 0)) { + ssleep(1); + } + pr_err( + "%s \tAFTER adding sas end device:\n\t\t" + "handle (0x%04x), sas_addr(0x%016llx)\n", + ioc->name, handle, (unsigned long long) + le64_to_cpu(sas_device_pg0.SASAddress)); + } + } + pr_err("%s \tscan devices: sas end devices complete\n", ioc->name); + kfree(volume_pg0); + kfree(volume_pg1); + pr_info("%s scan devices: complete\n", ioc->name); +} + +void +leapioraid_scsihost_clear_outstanding_scsi_tm_commands( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_internal_qcmd *scsih_qcmd, *scsih_qcmd_next; + unsigned long flags; + + if (ioc->scsih_cmds.status & LEAPIORAID_CMD_PENDING) { + ioc->scsih_cmds.status |= LEAPIORAID_CMD_RESET; + leapioraid_base_free_smid(ioc, ioc->scsih_cmds.smid); + complete(&ioc->scsih_cmds.done); + } + if (ioc->tm_cmds.status & LEAPIORAID_CMD_PENDING) { + ioc->tm_cmds.status |= LEAPIORAID_CMD_RESET; + leapioraid_base_free_smid(ioc, ioc->tm_cmds.smid); + complete(&ioc->tm_cmds.done); + } + spin_lock_irqsave(&ioc->scsih_q_internal_lock, flags); + list_for_each_entry_safe(scsih_qcmd, scsih_qcmd_next, + &ioc->scsih_q_intenal_cmds, list) { + scsih_qcmd->status |= LEAPIORAID_CMD_RESET; + leapioraid_base_free_smid(ioc, scsih_qcmd->smid); + } + spin_unlock_irqrestore(&ioc->scsih_q_internal_lock, flags); + memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz); + memset(ioc->device_remove_in_progress, 0, + ioc->device_remove_in_progress_sz); + memset(ioc->tm_tr_retry, 0, ioc->tm_tr_retry_sz); + leapioraid_scsihost_fw_event_cleanup_queue(ioc); + leapioraid_scsihost_flush_running_cmds(ioc); +} + +void +leapioraid_scsihost_reset_handler(struct LEAPIORAID_ADAPTER *ioc, + int reset_phase) +{ + switch (reset_phase) { + case LEAPIORAID_IOC_PRE_RESET_PHASE: + dtmprintk(ioc, pr_info( + "%s %s: LEAPIORAID_IOC_PRE_RESET_PHASE\n", + ioc->name, __func__)); + break; + case LEAPIORAID_IOC_AFTER_RESET_PHASE: + dtmprintk(ioc, pr_info( + "%s %s: LEAPIORAID_IOC_AFTER_RESET_PHASE\n", + ioc->name, __func__)); + leapioraid_scsihost_clear_outstanding_scsi_tm_commands(ioc); + break; + case LEAPIORAID_IOC_DONE_RESET_PHASE: + dtmprintk(ioc, pr_info( + "%s %s: LEAPIORAID_IOC_DONE_RESET_PHASE\n", + ioc->name, __func__)); + if (!(disable_discovery > 0 && !ioc->sas_hba.num_phys)) { + if (ioc->multipath_on_hba) { + leapioraid_scsihost_sas_port_refresh(ioc); + leapioraid_scsihost_update_vphys_after_reset(ioc); + } + leapioraid_scsihost_prep_device_scan(ioc); + leapioraid_scsihost_create_enclosure_list_after_reset(ioc); + leapioraid_scsihost_search_responding_sas_devices(ioc); + leapioraid_scsihost_search_responding_raid_devices(ioc); + leapioraid_scsihost_search_responding_expanders(ioc); + leapioraid_scsihost_error_recovery_delete_devices(ioc); + } + break; + } +} + +static void +leapioraid_fw_work(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_fw_event_work *fw_event) +{ + ioc->current_event = fw_event; + leapioraid_scsihost_fw_event_del_from_list(ioc, fw_event); + if (ioc->remove_host || ioc->pci_error_recovery) { + leapioraid_fw_event_work_put(fw_event); + ioc->current_event = NULL; + return; + } + switch (fw_event->event) { + case LEAPIORAID_REMOVE_UNRESPONDING_DEVICES: + while (scsi_host_in_recovery(ioc->shost) || ioc->shost_recovery) { + if (ioc->remove_host || ioc->fw_events_cleanup) + goto out; + ssleep(1); + } + leapioraid_scsihost_remove_unresponding_devices(ioc); + leapioraid_scsihost_del_dirty_vphy(ioc); + leapioraid_scsihost_del_dirty_port_entries(ioc); + leapioraid_scsihost_update_device_qdepth(ioc); + leapioraid_scsihost_scan_for_devices_after_reset(ioc); + if (ioc->is_driver_loading) + leapioraid_scsihost_complete_devices_scanning(ioc); + break; + case LEAPIORAID_PORT_ENABLE_COMPLETE: + ioc->start_scan = 0; + dewtprintk(ioc, pr_info( + "%s port enable: complete from worker thread\n", + ioc->name)); + break; + case LEAPIORAID_TURN_ON_PFA_LED: + leapioraid_scsihost_turn_on_pfa_led(ioc, fw_event->device_handle); + break; + case LEAPIORAID_EVENT_SAS_TOPOLOGY_CHANGE_LIST: + if (leapioraid_scsihost_sas_topology_change_event(ioc, fw_event)) { + leapioraid_scsihost_fw_event_requeue(ioc, fw_event, 1000); + ioc->current_event = NULL; + return; + } + break; + case LEAPIORAID_EVENT_SAS_DEVICE_STATUS_CHANGE: + if (ioc->logging_level & LEAPIORAID_DEBUG_EVENT_WORK_TASK) + leapioraid_scsihost_sas_device_status_change_event_debug( + ioc, + (struct LeapioraidEventDataSasDeviceStatusChange_t *) + fw_event->event_data); + break; + case LEAPIORAID_EVENT_SAS_DISCOVERY: + leapioraid_scsihost_sas_discovery_event( + ioc, fw_event); + break; + case LEAPIORAID_EVENT_SAS_DEVICE_DISCOVERY_ERROR: + leapioraid_scsihost_sas_device_discovery_error_event( + ioc, fw_event); + break; + case LEAPIORAID_EVENT_SAS_BROADCAST_PRIMITIVE: + leapioraid_scsihost_sas_broadcast_primitive_event( + ioc, fw_event); + break; + case LEAPIORAID_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: + leapioraid_scsihost_sas_enclosure_dev_status_change_event( + ioc, fw_event); + break; + case LEAPIORAID_EVENT_IR_CONFIGURATION_CHANGE_LIST: + leapioraid_scsihost_sas_ir_config_change_event( + ioc, fw_event); + break; + case LEAPIORAID_EVENT_IR_VOLUME: + leapioraid_scsihost_sas_ir_volume_event( + ioc, fw_event); + break; + case LEAPIORAID_EVENT_IR_PHYSICAL_DISK: + leapioraid_scsihost_sas_ir_physical_disk_event( + ioc, fw_event); + break; + case LEAPIORAID_EVENT_IR_OPERATION_STATUS: + leapioraid_scsihost_sas_ir_operation_status_event( + ioc, fw_event); + break; + default: + break; + } +out: + leapioraid_fw_event_work_put(fw_event); + ioc->current_event = NULL; +} + +static void +leapioraid_firmware_event_work(struct work_struct *work) +{ + struct leapioraid_fw_event_work *fw_event = container_of(work, + struct leapioraid_fw_event_work, + work); + + leapioraid_fw_work(fw_event->ioc, fw_event); +} + +static void +leapioraid_firmware_event_work_delayed(struct work_struct *work) +{ + struct leapioraid_fw_event_work *fw_event = container_of(work, + struct leapioraid_fw_event_work, + delayed_work.work); + + leapioraid_fw_work(fw_event->ioc, fw_event); +} + +u8 +leapioraid_scsihost_event_callback(struct LEAPIORAID_ADAPTER *ioc, + u8 msix_index, u32 reply) +{ + struct leapioraid_fw_event_work *fw_event; + struct LeapioraidEventNotificationRep_t *mpi_reply; + u16 event; + u16 sz; + + if (ioc->pci_error_recovery) + return 1; + + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (unlikely(!mpi_reply)) { + pr_err("%s mpi_reply not valid at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return 1; + } + event = le16_to_cpu(mpi_reply->Event); + switch (event) { + case LEAPIORAID_EVENT_SAS_BROADCAST_PRIMITIVE: + { + struct LeapioraidEventDataSasBroadcastPrimitive_t *baen_data = + (struct LeapioraidEventDataSasBroadcastPrimitive_t *) + mpi_reply->EventData; + if (baen_data->Primitive != + LEAPIORAID_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT) + return 1; + if (ioc->broadcast_aen_busy) { + ioc->broadcast_aen_pending++; + return 1; + } + ioc->broadcast_aen_busy = 1; + break; + } + case LEAPIORAID_EVENT_SAS_TOPOLOGY_CHANGE_LIST: + leapioraid_scsihost_check_topo_delete_events( + ioc, + (struct LeapioraidEventDataSasTopoChangeList_t *) + mpi_reply->EventData); + if (ioc->shost_recovery) + return 1; + break; + case LEAPIORAID_EVENT_IR_CONFIGURATION_CHANGE_LIST: + leapioraid_scsihost_check_ir_config_unhide_events( + ioc, + (struct LeapioraidEventDataIrCfgChangeList_t *) + mpi_reply->EventData); + break; + case LEAPIORAID_EVENT_IR_VOLUME: + leapioraid_scsihost_check_volume_delete_events( + ioc, + (struct LeapioraidEventDataIrVol_t *) + mpi_reply->EventData); + break; + case LEAPIORAID_EVENT_LOG_ENTRY_ADDED: + fallthrough; + case LEAPIORAID_EVENT_SAS_DEVICE_STATUS_CHANGE: + leapioraid_scsihost_sas_device_status_change_event( + ioc, + (struct LeapioraidEventDataSasDeviceStatusChange_t *) + mpi_reply->EventData); + break; + case LEAPIORAID_EVENT_IR_OPERATION_STATUS: + case LEAPIORAID_EVENT_SAS_DISCOVERY: + case LEAPIORAID_EVENT_SAS_DEVICE_DISCOVERY_ERROR: + case LEAPIORAID_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: + case LEAPIORAID_EVENT_IR_PHYSICAL_DISK: + break; + default: + return 1; + } + fw_event = leapioraid_alloc_fw_event_work(0); + if (!fw_event) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return 1; + } + sz = le16_to_cpu(mpi_reply->EventDataLength) * 4; + fw_event->event_data = kzalloc(sz, GFP_ATOMIC); + if (!fw_event->event_data) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + leapioraid_fw_event_work_put(fw_event); + return 1; + } + if (event == LEAPIORAID_EVENT_SAS_TOPOLOGY_CHANGE_LIST) { + struct LeapioraidEventDataSasTopoChangeList_t *topo_event_data = + (struct LeapioraidEventDataSasTopoChangeList_t *) + mpi_reply->EventData; + fw_event->retries = kzalloc(topo_event_data->NumEntries, + GFP_ATOMIC); + if (!fw_event->retries) { + kfree(fw_event->event_data); + leapioraid_fw_event_work_put(fw_event); + return 1; + } + } + memcpy(fw_event->event_data, mpi_reply->EventData, sz); + fw_event->ioc = ioc; + fw_event->VF_ID = mpi_reply->VF_ID; + fw_event->VP_ID = mpi_reply->VP_ID; + fw_event->event = event; + leapioraid_scsihost_fw_event_add(ioc, fw_event); + leapioraid_fw_event_work_put(fw_event); + return 1; +} + +static void +leapioraid_scsihost_expander_node_remove( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_sas_node *sas_expander) +{ + struct leapioraid_sas_port *leapioraid_port, *next; + unsigned long flags; + int port_id; + + list_for_each_entry_safe(leapioraid_port, next, + &sas_expander->sas_port_list, port_list) { + if (ioc->shost_recovery) + return; + if (leapioraid_port->remote_identify.device_type == + SAS_END_DEVICE) + leapioraid_device_remove_by_sas_address(ioc, + leapioraid_port->remote_identify.sas_address, + leapioraid_port->hba_port); + else if (leapioraid_port->remote_identify.device_type == + SAS_EDGE_EXPANDER_DEVICE + || leapioraid_port->remote_identify.device_type == + SAS_FANOUT_EXPANDER_DEVICE) + leapioraid_expander_remove(ioc, + leapioraid_port->remote_identify.sas_address, + leapioraid_port->hba_port); + } + port_id = sas_expander->port->port_id; + leapioraid_transport_port_remove(ioc, sas_expander->sas_address, + sas_expander->sas_address_parent, + sas_expander->port); + pr_info( + "%s expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n", + ioc->name, + sas_expander->handle, + (unsigned long long)sas_expander->sas_address, + port_id); + spin_lock_irqsave(&ioc->sas_node_lock, flags); + list_del(&sas_expander->list); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + kfree(sas_expander->phy); + kfree(sas_expander); +} + +static void +leapioraid_scsihost_ir_shutdown(struct LEAPIORAID_ADAPTER *ioc) +{ + struct LeapioraidRaidActionReq_t *mpi_request; + struct LeapioraidRaidActionRep_t *mpi_reply; + u16 smid; + + if (!ioc->ir_firmware) + return; + + if (list_empty(&ioc->raid_device_list)) + return; + if (leapioraid_base_pci_device_is_unplugged(ioc)) + return; + mutex_lock(&ioc->scsih_cmds.mutex); + if (ioc->scsih_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: scsih_cmd in use\n", + ioc->name, __func__); + goto out; + } + ioc->scsih_cmds.status = LEAPIORAID_CMD_PENDING; + smid = leapioraid_base_get_smid(ioc, ioc->scsih_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + ioc->scsih_cmds.status = LEAPIORAID_CMD_NOT_USED; + goto out; + } + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->scsih_cmds.smid = smid; + memset(mpi_request, 0, sizeof(struct LeapioraidRaidActionReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_RAID_ACTION; + mpi_request->Action = 0x20; + if (!ioc->warpdrive_msg) + pr_info("%s IR shutdown (sending)\n", + ioc->name); + init_completion(&ioc->scsih_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->scsih_cmds.done, 10 * HZ); + if (!(ioc->scsih_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + pr_err("%s %s: timeout\n", + ioc->name, __func__); + goto out; + } + if (ioc->scsih_cmds.status & LEAPIORAID_CMD_REPLY_VALID) { + mpi_reply = ioc->scsih_cmds.reply; + if (!ioc->warpdrive_msg) + pr_info( + "%s IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo)); + } +out: + ioc->scsih_cmds.status = LEAPIORAID_CMD_NOT_USED; + mutex_unlock(&ioc->scsih_cmds.mutex); +} + +static int +leapioraid_scsihost_get_shost_and_ioc(struct pci_dev *pdev, + struct Scsi_Host **shost, + struct LEAPIORAID_ADAPTER **ioc) +{ + *shost = pci_get_drvdata(pdev); + if (*shost == NULL) { + dev_err(&pdev->dev, "pdev's driver data is null\n"); + return -ENXIO; + } + *ioc = leapioraid_shost_private(*shost); + if (*ioc == NULL) { + dev_err(&pdev->dev, "shost's private data is null\n"); + return -ENXIO; + } + return 0; +} + +static void +leapioraid_scsihost_remove(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = NULL; + struct LEAPIORAID_ADAPTER *ioc = NULL; + struct leapioraid_sas_port *leapioraid_port, *next_port; + struct leapioraid_raid_device *raid_device, *next; + struct LEAPIORAID_TARGET *sas_target_priv_data; + struct workqueue_struct *wq; + unsigned long flags; + struct leapioraid_hba_port *port, *port_next; + struct leapioraid_virtual_phy *vphy, *vphy_next; + struct LeapioraidCfgRep_t mpi_reply; + + if (leapioraid_scsihost_get_shost_and_ioc(pdev, &shost, &ioc)) { + dev_err(&pdev->dev, "unable to remove device\n"); + return; + } + + while (ioc->is_driver_loading) + ssleep(1); + + ioc->remove_host = 1; + leapioraid_wait_for_commands_to_complete(ioc); + spin_lock_irqsave(&ioc->hba_hot_unplug_lock, flags); + if (leapioraid_base_pci_device_is_unplugged(ioc)) { + leapioraid_base_pause_mq_polling(ioc); + leapioraid_scsihost_flush_running_cmds(ioc); + } + leapioraid_scsihost_fw_event_cleanup_queue(ioc); + spin_unlock_irqrestore(&ioc->hba_hot_unplug_lock, flags); + spin_lock_irqsave(&ioc->fw_event_lock, flags); + wq = ioc->firmware_event_thread; + ioc->firmware_event_thread = NULL; + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); + if (wq) + destroy_workqueue(wq); + leapioraid_config_set_ioc_pg1(ioc, &mpi_reply, + &ioc->ioc_pg1_copy); + leapioraid_scsihost_ir_shutdown(ioc); + sas_remove_host(shost); + scsi_remove_host(shost); + list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list, + list) { + if (raid_device->starget) { + sas_target_priv_data = raid_device->starget->hostdata; + sas_target_priv_data->deleted = 1; + scsi_remove_target(&raid_device->starget->dev); + } + pr_info("%s removing handle(0x%04x), wwid(0x%016llx)\n", + ioc->name, raid_device->handle, + (unsigned long long)raid_device->wwid); + leapioraid_scsihost_raid_device_remove(ioc, raid_device); + } + list_for_each_entry_safe(leapioraid_port, next_port, + &ioc->sas_hba.sas_port_list, port_list) { + if (leapioraid_port->remote_identify.device_type == + SAS_END_DEVICE) + leapioraid_device_remove_by_sas_address(ioc, + leapioraid_port->remote_identify.sas_address, + leapioraid_port->hba_port); + else if (leapioraid_port->remote_identify.device_type == + SAS_EDGE_EXPANDER_DEVICE + || leapioraid_port->remote_identify.device_type == + SAS_FANOUT_EXPANDER_DEVICE) + leapioraid_expander_remove(ioc, + leapioraid_port->remote_identify.sas_address, + leapioraid_port->hba_port); + } + list_for_each_entry_safe(port, port_next, &ioc->port_table_list, list) { + if (port->vphys_mask) { + list_for_each_entry_safe(vphy, vphy_next, + &port->vphys_list, list) { + list_del(&vphy->list); + kfree(vphy); + } + } + list_del(&port->list); + kfree(port); + } + if (ioc->sas_hba.num_phys) { + kfree(ioc->sas_hba.phy); + ioc->sas_hba.phy = NULL; + ioc->sas_hba.num_phys = 0; + } + leapioraid_base_detach(ioc); + spin_lock(&leapioraid_gioc_lock); + list_del(&ioc->list); + spin_unlock(&leapioraid_gioc_lock); + scsi_host_put(shost); +} + +static void +leapioraid_scsihost_shutdown(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = NULL; + struct LEAPIORAID_ADAPTER *ioc = NULL; + struct workqueue_struct *wq; + unsigned long flags; + struct LeapioraidCfgRep_t mpi_reply; + + if (leapioraid_scsihost_get_shost_and_ioc(pdev, &shost, &ioc)) { + dev_err(&pdev->dev, "unable to shutdown device\n"); + return; + } + ioc->remove_host = 1; + leapioraid_wait_for_commands_to_complete(ioc); + leapioraid_scsihost_fw_event_cleanup_queue(ioc); + spin_lock_irqsave(&ioc->fw_event_lock, flags); + wq = ioc->firmware_event_thread; + ioc->firmware_event_thread = NULL; + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); + if (wq) + destroy_workqueue(wq); + leapioraid_config_set_ioc_pg1(ioc, &mpi_reply, + &ioc->ioc_pg1_copy); + leapioraid_scsihost_ir_shutdown(ioc); + leapioraid_base_mask_interrupts(ioc); + ioc->shost_recovery = 1; + leapioraid_base_make_ioc_ready(ioc, SOFT_RESET); + ioc->shost_recovery = 0; + leapioraid_base_free_irq(ioc); + leapioraid_base_disable_msix(ioc); +} + +static void +leapioraid_scsihost_probe_boot_devices(struct LEAPIORAID_ADAPTER *ioc) +{ + u32 channel; + void *device; + struct leapioraid_sas_device *sas_device; + struct leapioraid_raid_device *raid_device; + u16 handle; + u64 sas_address_parent; + u64 sas_address; + unsigned long flags; + int rc; + struct leapioraid_hba_port *port; + u8 protection_mask; + + if (!ioc->bios_pg3.BiosVersion) + return; + + device = NULL; + if (ioc->req_boot_device.device) { + device = ioc->req_boot_device.device; + channel = ioc->req_boot_device.channel; + } else if (ioc->req_alt_boot_device.device) { + device = ioc->req_alt_boot_device.device; + channel = ioc->req_alt_boot_device.channel; + } else if (ioc->current_boot_device.device) { + device = ioc->current_boot_device.device; + channel = ioc->current_boot_device.channel; + } + if (!device) + return; + if (channel == RAID_CHANNEL) { + raid_device = device; + if (raid_device->starget) + return; + if (!ioc->disable_eedp_support) { + protection_mask = scsi_host_get_prot(ioc->shost); + if (protection_mask & SHOST_DIX_TYPE0_PROTECTION) { + scsi_host_set_prot(ioc->shost, + protection_mask & 0x77); + pr_err( + "%s: Disabling DIX0 because of unsupport!\n", + ioc->name); + } + } + rc = scsi_add_device(ioc->shost, RAID_CHANNEL, + raid_device->id, 0); + if (rc) + leapioraid_scsihost_raid_device_remove(ioc, raid_device); + } else { + sas_device = device; + if (sas_device->starget) + return; + spin_lock_irqsave(&ioc->sas_device_lock, flags); + handle = sas_device->handle; + sas_address_parent = sas_device->sas_address_parent; + sas_address = sas_device->sas_address; + port = sas_device->port; + list_move_tail(&sas_device->list, &ioc->sas_device_list); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + if (!port) + return; + + if (ioc->hide_drives) + return; + + if (!leapioraid_transport_port_add(ioc, handle, + sas_address_parent, port)) { + leapioraid_scsihost_sas_device_remove(ioc, sas_device); + } else if (!sas_device->starget) { + if (!ioc->is_driver_loading) { + leapioraid_transport_port_remove(ioc, + sas_address, + sas_address_parent, + port); + leapioraid_scsihost_sas_device_remove(ioc, sas_device); + } + } + } +} + +static void +leapioraid_scsihost_probe_raid(struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_raid_device *raid_device, *raid_next; + int rc; + + list_for_each_entry_safe(raid_device, raid_next, + &ioc->raid_device_list, list) { + if (raid_device->starget) + continue; + rc = scsi_add_device(ioc->shost, RAID_CHANNEL, + raid_device->id, 0); + if (rc) + leapioraid_scsihost_raid_device_remove(ioc, raid_device); + } +} + +static +struct leapioraid_sas_device *leapioraid_get_next_sas_device( + struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_sas_device *sas_device = NULL; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + if (!list_empty(&ioc->sas_device_init_list)) { + sas_device = list_first_entry(&ioc->sas_device_init_list, + struct leapioraid_sas_device, list); + leapioraid_sas_device_get(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return sas_device; +} + +static void +leapioraid_sas_device_make_active(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_device *sas_device) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + if (!list_empty(&sas_device->list)) { + list_del_init(&sas_device->list); + leapioraid_sas_device_put(sas_device); + } + leapioraid_sas_device_get(sas_device); + list_add_tail(&sas_device->list, &ioc->sas_device_list); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +} + +static void +leapioraid_scsihost_probe_sas(struct LEAPIORAID_ADAPTER *ioc) +{ + struct leapioraid_sas_device *sas_device; + + while ((sas_device = leapioraid_get_next_sas_device(ioc))) { + if (ioc->hide_drives) { + leapioraid_sas_device_make_active(ioc, sas_device); + leapioraid_sas_device_put(sas_device); + continue; + } + if (!leapioraid_transport_port_add(ioc, sas_device->handle, + sas_device->sas_address_parent, + sas_device->port)) { + leapioraid_scsihost_sas_device_remove(ioc, sas_device); + leapioraid_sas_device_put(sas_device); + continue; + } else if (!sas_device->starget) { + if (!ioc->is_driver_loading) { + leapioraid_transport_port_remove(ioc, + sas_device->sas_address, + sas_device->sas_address_parent, + sas_device->port); + leapioraid_scsihost_sas_device_remove(ioc, sas_device); + leapioraid_sas_device_put(sas_device); + continue; + } + } + leapioraid_sas_device_make_active(ioc, sas_device); + leapioraid_sas_device_put(sas_device); + } +} + +static void +leapioraid_scsihost_probe_devices(struct LEAPIORAID_ADAPTER *ioc) +{ + u16 volume_mapping_flags; + + if (!(ioc->facts.ProtocolFlags + & LEAPIORAID_IOCFACTS_PROTOCOL_SCSI_INITIATOR)) + return; + leapioraid_scsihost_probe_boot_devices(ioc); + + if (ioc->ir_firmware) { + volume_mapping_flags = + le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) & + LEAPIORAID_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; + if (volume_mapping_flags == + LEAPIORAID_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) { + leapioraid_scsihost_probe_raid(ioc); + leapioraid_scsihost_probe_sas(ioc); + } else { + leapioraid_scsihost_probe_sas(ioc); + leapioraid_scsihost_probe_raid(ioc); + } + } else { + leapioraid_scsihost_probe_sas(ioc); + } +} + +static void +leapioraid_scsihost_scan_start(struct Scsi_Host *shost) +{ + struct LEAPIORAID_ADAPTER *ioc = shost_priv(shost); + int rc; + + if (disable_discovery > 0) + return; + ioc->start_scan = 1; + rc = leapioraid_port_enable(ioc); + if (rc != 0) + pr_info("%s port enable: FAILED\n", + ioc->name); +} + +void +leapioraid_scsihost_complete_devices_scanning(struct LEAPIORAID_ADAPTER *ioc) +{ + if (ioc->wait_for_discovery_to_complete) { + ioc->wait_for_discovery_to_complete = 0; + leapioraid_scsihost_probe_devices(ioc); + } + leapioraid_base_start_watchdog(ioc); + ioc->is_driver_loading = 0; +} + +static int +leapioraid_scsihost_scan_finished( + struct Scsi_Host *shost, unsigned long time) +{ + struct LEAPIORAID_ADAPTER *ioc = shost_priv(shost); + u32 ioc_state; + int issue_hard_reset = 0; + + if (disable_discovery > 0) { + ioc->is_driver_loading = 0; + ioc->wait_for_discovery_to_complete = 0; + goto out; + } + if (time >= (300 * HZ)) { + ioc->port_enable_cmds.status = LEAPIORAID_CMD_NOT_USED; + pr_info("%s port enable: FAILED with timeout (timeout=300s)\n", + ioc->name); + ioc->is_driver_loading = 0; + goto out; + } + if (ioc->start_scan) { + ioc_state = leapioraid_base_get_iocstate(ioc, 0); + if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_FAULT) { + leapioraid_print_fault_code(ioc, + ioc_state & + LEAPIORAID_DOORBELL_DATA_MASK); + issue_hard_reset = 1; + goto out; + } else if ((ioc_state & LEAPIORAID_IOC_STATE_MASK) == + LEAPIORAID_IOC_STATE_COREDUMP) { + leapioraid_base_coredump_info(ioc, + ioc_state & + LEAPIORAID_DOORBELL_DATA_MASK); + leapioraid_base_wait_for_coredump_completion(ioc, + __func__); + issue_hard_reset = 1; + goto out; + } + return 0; + } + if (ioc->port_enable_cmds.status & LEAPIORAID_CMD_RESET) { + pr_err("%s port enable: aborted due to diag reset\n", + ioc->name); + ioc->port_enable_cmds.status = LEAPIORAID_CMD_NOT_USED; + goto out; + } + if (ioc->start_scan_failed) { + pr_info("%s port enable: FAILED with (ioc_status=0x%08x)\n", + ioc->name, ioc->start_scan_failed); + ioc->is_driver_loading = 0; + ioc->wait_for_discovery_to_complete = 0; + ioc->remove_host = 1; + goto out; + } + pr_info("%s port enable: SUCCESS\n", ioc->name); + ioc->port_enable_cmds.status = LEAPIORAID_CMD_NOT_USED; + leapioraid_scsihost_complete_devices_scanning(ioc); +out: + if (issue_hard_reset) { + ioc->port_enable_cmds.status = LEAPIORAID_CMD_NOT_USED; + if (leapioraid_base_hard_reset_handler(ioc, SOFT_RESET)) + ioc->is_driver_loading = 0; + } + return 1; +} + +SCSIH_MAP_QUEUE(struct Scsi_Host *shost) +{ + struct LEAPIORAID_ADAPTER *ioc = + (struct LEAPIORAID_ADAPTER *)shost->hostdata; + struct blk_mq_queue_map *map; + int i, qoff, offset; + int nr_msix_vectors = ioc->iopoll_q_start_index; + int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors; + + if (shost->nr_hw_queues == 1) + return; + for (i = 0, qoff = 0; i < shost->nr_maps; i++) { + map = &shost->tag_set.map[i]; + map->nr_queues = 0; + offset = 0; + if (i == HCTX_TYPE_DEFAULT) { + map->nr_queues = + nr_msix_vectors - ioc->high_iops_queues; + offset = ioc->high_iops_queues; + } else if (i == HCTX_TYPE_POLL) + map->nr_queues = iopoll_q_count; + if (!map->nr_queues) + BUG_ON(i == HCTX_TYPE_DEFAULT); + map->queue_offset = qoff; + if (i != HCTX_TYPE_POLL) + blk_mq_pci_map_queues(map, ioc->pdev, offset); + else + blk_mq_map_queues(map); + qoff += map->nr_queues; + } +} + +static struct scsi_host_template leapioraid_driver_template = { + .module = THIS_MODULE, + .name = "LEAPIO RAID Host", + .proc_name = LEAPIORAID_DRIVER_NAME, + .queuecommand = leapioraid_scsihost_qcmd, + .target_alloc = leapioraid_scsihost_target_alloc, + .slave_alloc = leapioraid_scsihost_slave_alloc, + .slave_configure = leapioraid_scsihost_slave_configure, + .target_destroy = leapioraid_scsihost_target_destroy, + .slave_destroy = leapioraid_scsihost_slave_destroy, + .scan_finished = leapioraid_scsihost_scan_finished, + .scan_start = leapioraid_scsihost_scan_start, + .change_queue_depth = leapioraid_scsihost_change_queue_depth, + .eh_abort_handler = leapioraid_scsihost_abort, + .eh_device_reset_handler = leapioraid_scsihost_dev_reset, + .eh_target_reset_handler = leapioraid_scsihost_target_reset, + .eh_host_reset_handler = leapioraid_scsihost_host_reset, + .bios_param = leapioraid_scsihost_bios_param, + .can_queue = 1, + .this_id = -1, + .sg_tablesize = LEAPIORAID_SG_DEPTH, + .max_sectors = 128, + .max_segment_size = 0xffffffff, + .cmd_per_lun = 128, + .shost_groups = leapioraid_host_groups, + .sdev_groups = leapioraid_dev_groups, + .track_queue_depth = 1, + .cmd_size = sizeof(struct leapioraid_scsiio_tracker), + .map_queues = leapioraid_scsihost_map_queues, + .mq_poll = leapioraid_blk_mq_poll, +}; + +static struct raid_function_template leapioraid_raid_functions = { + .cookie = &leapioraid_driver_template, + .is_raid = leapioraid_scsihost_is_raid, + .get_resync = leapioraid_scsihost_get_resync, + .get_state = leapioraid_scsihost_get_state, +}; + +static int +leapioraid_scsihost_probe( + struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct LEAPIORAID_ADAPTER *ioc; + struct Scsi_Host *shost = NULL; + int rv; + + shost = scsi_host_alloc(&leapioraid_driver_template, + sizeof(struct LEAPIORAID_ADAPTER)); + if (!shost) + return -ENODEV; + ioc = shost_priv(shost); + memset(ioc, 0, sizeof(struct LEAPIORAID_ADAPTER)); + ioc->id = leapioraid_ids++; + sprintf(ioc->driver_name, "%s", LEAPIORAID_DRIVER_NAME); + + ioc->combined_reply_queue = 1; + ioc->nc_reply_index_count = 16; + ioc->multipath_on_hba = 1; + + ioc = leapioraid_shost_private(shost); + INIT_LIST_HEAD(&ioc->list); + spin_lock(&leapioraid_gioc_lock); + list_add_tail(&ioc->list, &leapioraid_ioc_list); + spin_unlock(&leapioraid_gioc_lock); + ioc->shost = shost; + ioc->pdev = pdev; + + ioc->scsi_io_cb_idx = scsi_io_cb_idx; + ioc->tm_cb_idx = tm_cb_idx; + ioc->ctl_cb_idx = ctl_cb_idx; + ioc->ctl_tm_cb_idx = ctl_tm_cb_idx; + ioc->base_cb_idx = base_cb_idx; + ioc->port_enable_cb_idx = port_enable_cb_idx; + ioc->transport_cb_idx = transport_cb_idx; + ioc->scsih_cb_idx = scsih_cb_idx; + ioc->config_cb_idx = config_cb_idx; + ioc->tm_tr_cb_idx = tm_tr_cb_idx; + ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx; + ioc->tm_tr_internal_cb_idx = tm_tr_internal_cb_idx; + ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx; + + ioc->logging_level = logging_level; + ioc->schedule_dead_ioc_flush_running_cmds = + &leapioraid_scsihost_flush_running_cmds; + ioc->open_pcie_trace = open_pcie_trace; + ioc->enable_sdev_max_qd = 0; + ioc->max_shutdown_latency = 6; + ioc->drv_support_bitmap |= 0x00000001; + ioc->drv_support_bitmap |= 0x00000002; + + mutex_init(&ioc->reset_in_progress_mutex); + mutex_init(&ioc->hostdiag_unlock_mutex); + mutex_init(&ioc->pci_access_mutex); + spin_lock_init(&ioc->ioc_reset_in_progress_lock); + spin_lock_init(&ioc->scsi_lookup_lock); + spin_lock_init(&ioc->sas_device_lock); + spin_lock_init(&ioc->sas_node_lock); + spin_lock_init(&ioc->fw_event_lock); + spin_lock_init(&ioc->raid_device_lock); + spin_lock_init(&ioc->scsih_q_internal_lock); + spin_lock_init(&ioc->hba_hot_unplug_lock); + INIT_LIST_HEAD(&ioc->sas_device_list); + INIT_LIST_HEAD(&ioc->port_table_list); + INIT_LIST_HEAD(&ioc->sas_device_init_list); + INIT_LIST_HEAD(&ioc->sas_expander_list); + INIT_LIST_HEAD(&ioc->enclosure_list); + INIT_LIST_HEAD(&ioc->fw_event_list); + INIT_LIST_HEAD(&ioc->raid_device_list); + INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list); + INIT_LIST_HEAD(&ioc->delayed_tr_list); + INIT_LIST_HEAD(&ioc->delayed_sc_list); + INIT_LIST_HEAD(&ioc->delayed_event_ack_list); + INIT_LIST_HEAD(&ioc->delayed_tr_volume_list); + INIT_LIST_HEAD(&ioc->delayed_internal_tm_list); + INIT_LIST_HEAD(&ioc->scsih_q_intenal_cmds); + INIT_LIST_HEAD(&ioc->reply_queue_list); + sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id); + + shost->max_cmd_len = 32; + shost->max_lun = 8; + shost->transportt = leapioraid_transport_template; + shost->unique_id = ioc->id; + + ioc->drv_internal_flags |= LEAPIORAID_DRV_INTERNAL_BITMAP_BLK_MQ; + + ioc->disable_eedp_support = 1; + snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), + "fw_event_%s%u", ioc->driver_name, ioc->id); + ioc->firmware_event_thread = + alloc_ordered_workqueue(ioc->firmware_event_name, 0); + if (!ioc->firmware_event_thread) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rv = -ENODEV; + goto out_thread_fail; + } + + shost->host_tagset = 0; + ioc->is_driver_loading = 1; + if ((leapioraid_base_attach(ioc))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rv = -ENODEV; + goto out_attach_fail; + } + ioc->hide_drives = 0; + + shost->nr_hw_queues = 1; + rv = scsi_add_host(shost, &pdev->dev); + if (rv) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + spin_lock(&leapioraid_gioc_lock); + list_del(&ioc->list); + spin_unlock(&leapioraid_gioc_lock); + goto out_add_shost_fail; + } + + scsi_scan_host(shost); + + return 0; +out_add_shost_fail: + leapioraid_base_detach(ioc); +out_attach_fail: + destroy_workqueue(ioc->firmware_event_thread); +out_thread_fail: + spin_lock(&leapioraid_gioc_lock); + list_del(&ioc->list); + spin_unlock(&leapioraid_gioc_lock); + scsi_host_put(shost); + return rv; +} + +#ifdef CONFIG_PM +static int +leapioraid_scsihost_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct Scsi_Host *shost = NULL; + struct LEAPIORAID_ADAPTER *ioc = NULL; + pci_power_t device_state; + int rc; + + rc = leapioraid_scsihost_get_shost_and_ioc(pdev, &shost, &ioc); + if (rc) { + dev_err(&pdev->dev, "unable to suspend device\n"); + return rc; + } + leapioraid_base_stop_watchdog(ioc); + leapioraid_base_stop_hba_unplug_watchdog(ioc); + scsi_block_requests(shost); + device_state = pci_choose_state(pdev, state); + leapioraid_scsihost_ir_shutdown(ioc); + pr_info("%s pdev=0x%p, slot=%s, entering operating state [D%d]\n", + ioc->name, pdev, + pci_name(pdev), device_state); + pci_save_state(pdev); + leapioraid_base_free_resources(ioc); + pci_set_power_state(pdev, device_state); + return 0; +} + +static int +leapioraid_scsihost_resume(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = NULL; + struct LEAPIORAID_ADAPTER *ioc = NULL; + pci_power_t device_state = pdev->current_state; + int r; + + r = leapioraid_scsihost_get_shost_and_ioc(pdev, &shost, &ioc); + if (r) { + dev_err(&pdev->dev, "unable to resume device\n"); + return r; + } + pr_info("%s pdev=0x%p, slot=%s, previous operating state [D%d]\n", + ioc->name, pdev, + pci_name(pdev), device_state); + pci_set_power_state(pdev, PCI_D0); + pci_enable_wake(pdev, PCI_D0, 0); + pci_restore_state(pdev); + ioc->pdev = pdev; + r = leapioraid_base_map_resources(ioc); + if (r) + return r; + pr_err("%s issuing hard reset as part of OS resume\n", + ioc->name); + leapioraid_base_hard_reset_handler(ioc, SOFT_RESET); + scsi_unblock_requests(shost); + leapioraid_base_start_watchdog(ioc); + leapioraid_base_start_hba_unplug_watchdog(ioc); + return 0; +} +#endif + +static pci_ers_result_t +leapioraid_scsihost_pci_error_detected( + struct pci_dev *pdev, pci_channel_state_t state) +{ + struct Scsi_Host *shost = NULL; + struct LEAPIORAID_ADAPTER *ioc = NULL; + + if (leapioraid_scsihost_get_shost_and_ioc(pdev, &shost, &ioc)) { + dev_err(&pdev->dev, "device unavailable\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pr_err("%s PCI error: detected callback, state(%d)!!\n", + ioc->name, state); + switch (state) { + case pci_channel_io_normal: + return PCI_ERS_RESULT_CAN_RECOVER; + case pci_channel_io_frozen: + ioc->pci_error_recovery = 1; + scsi_block_requests(ioc->shost); + leapioraid_base_stop_watchdog(ioc); + leapioraid_base_stop_hba_unplug_watchdog(ioc); + leapioraid_base_free_resources(ioc); + return PCI_ERS_RESULT_NEED_RESET; + case pci_channel_io_perm_failure: + ioc->pci_error_recovery = 1; + leapioraid_base_stop_watchdog(ioc); + leapioraid_base_stop_hba_unplug_watchdog(ioc); + leapioraid_base_pause_mq_polling(ioc); + leapioraid_scsihost_flush_running_cmds(ioc); + return PCI_ERS_RESULT_DISCONNECT; + } + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t +leapioraid_scsihost_pci_slot_reset(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = NULL; + struct LEAPIORAID_ADAPTER *ioc = NULL; + int rc; + + if (leapioraid_scsihost_get_shost_and_ioc(pdev, &shost, &ioc)) { + dev_err(&pdev->dev, "unable to perform slot reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pr_err("%s PCI error: slot reset callback!!\n", + ioc->name); + ioc->pci_error_recovery = 0; + ioc->pdev = pdev; + pci_restore_state(pdev); + rc = leapioraid_base_map_resources(ioc); + if (rc) + return PCI_ERS_RESULT_DISCONNECT; + pr_info("%s issuing hard reset as part of PCI slot reset\n", + ioc->name); + rc = leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + pr_info("%s hard reset: %s\n", + ioc->name, (rc == 0) ? "success" : "failed"); + if (!rc) + return PCI_ERS_RESULT_RECOVERED; + else + return PCI_ERS_RESULT_DISCONNECT; +} + +static void +leapioraid_scsihost_pci_resume(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = NULL; + struct LEAPIORAID_ADAPTER *ioc = NULL; + + if (leapioraid_scsihost_get_shost_and_ioc(pdev, &shost, &ioc)) { + dev_err(&pdev->dev, "unable to resume device\n"); + return; + } + pr_err("%s PCI error: resume callback!!\n", + ioc->name); + + pci_aer_clear_nonfatal_status(pdev); + + leapioraid_base_start_watchdog(ioc); + leapioraid_base_start_hba_unplug_watchdog(ioc); + scsi_unblock_requests(ioc->shost); +} + +static pci_ers_result_t +leapioraid_scsihost_pci_mmio_enabled(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = NULL; + struct LEAPIORAID_ADAPTER *ioc = NULL; + + if (leapioraid_scsihost_get_shost_and_ioc(pdev, &shost, &ioc)) { + dev_err(&pdev->dev, "unable to enable mmio\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + pr_err("%s: PCI error: mmio enabled callback!!!\n", + ioc->name); + return PCI_ERS_RESULT_RECOVERED; +} + +u8 leapioraid_scsihost_ncq_prio_supp(struct scsi_device *sdev) +{ + u8 ncq_prio_supp = 0; + + struct scsi_vpd *vpd; + + rcu_read_lock(); + vpd = rcu_dereference(sdev->vpd_pg89); + if (!vpd || vpd->len < 214) + goto out; + ncq_prio_supp = (vpd->data[213] >> 4) & 1; +out: + rcu_read_unlock(); + return ncq_prio_supp; +} + +static const struct pci_device_id leapioraid_pci_table[] = { + { 0x1556, 0x1111, PCI_ANY_ID, PCI_ANY_ID }, + { LEAPIORAID_VENDOR_ID, LEAPIORAID_DEVICE_ID_1, PCI_ANY_ID, PCI_ANY_ID }, + { LEAPIORAID_VENDOR_ID, LEAPIORAID_DEVICE_ID_2, PCI_ANY_ID, PCI_ANY_ID }, + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, leapioraid_pci_table); +static struct pci_error_handlers leapioraid_err_handler = { + .error_detected = leapioraid_scsihost_pci_error_detected, + .mmio_enabled = leapioraid_scsihost_pci_mmio_enabled, + .slot_reset = leapioraid_scsihost_pci_slot_reset, + .resume = leapioraid_scsihost_pci_resume, +}; + +static struct pci_driver leapioraid_driver = { + .name = LEAPIORAID_DRIVER_NAME, + .id_table = leapioraid_pci_table, + .probe = leapioraid_scsihost_probe, + .remove = leapioraid_scsihost_remove, + .shutdown = leapioraid_scsihost_shutdown, + .err_handler = &leapioraid_err_handler, +#ifdef CONFIG_PM + .suspend = leapioraid_scsihost_suspend, + .resume = leapioraid_scsihost_resume, +#endif +}; + +static int +leapioraid_scsihost_init(void) +{ + leapioraid_ids = 0; + leapioraid_base_initialize_callback_handler(); + + scsi_io_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_scsihost_io_done); + tm_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_scsihost_tm_done); + base_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_base_done); + port_enable_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_port_enable_done); + transport_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_transport_done); + scsih_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_scsihost_done); + config_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_config_done); + ctl_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_ctl_done); + ctl_tm_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_ctl_tm_done); + tm_tr_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_scsihost_tm_tr_complete); + tm_tr_volume_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_scsihost_tm_volume_tr_complete); + tm_tr_internal_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_scsihost_tm_internal_tr_complete); + tm_sas_control_cb_idx = + leapioraid_base_register_callback_handler( + leapioraid_scsihost_sas_control_complete); + + return 0; +} + +static void +leapioraid_scsihost_exit(void) +{ + leapioraid_base_release_callback_handler(scsi_io_cb_idx); + leapioraid_base_release_callback_handler(tm_cb_idx); + leapioraid_base_release_callback_handler(base_cb_idx); + leapioraid_base_release_callback_handler(port_enable_cb_idx); + leapioraid_base_release_callback_handler(transport_cb_idx); + leapioraid_base_release_callback_handler(scsih_cb_idx); + leapioraid_base_release_callback_handler(config_cb_idx); + leapioraid_base_release_callback_handler(ctl_cb_idx); + leapioraid_base_release_callback_handler(ctl_tm_cb_idx); + leapioraid_base_release_callback_handler(tm_tr_cb_idx); + leapioraid_base_release_callback_handler(tm_tr_volume_cb_idx); + leapioraid_base_release_callback_handler(tm_tr_internal_cb_idx); + leapioraid_base_release_callback_handler(tm_sas_control_cb_idx); + + raid_class_release(leapioraid_raid_template); + sas_release_transport(leapioraid_transport_template); +} + +static int __init leapioraid_init(void) +{ + int error; + + pr_info("%s version %s loaded\n", LEAPIORAID_DRIVER_NAME, + LEAPIORAID_DRIVER_VERSION); + leapioraid_transport_template = + sas_attach_transport(&leapioraid_transport_functions); + + if (!leapioraid_transport_template) + return -ENODEV; + + leapioraid_raid_template = + raid_class_attach(&leapioraid_raid_functions); + if (!leapioraid_raid_template) { + sas_release_transport(leapioraid_transport_template); + return -ENODEV; + } + + error = leapioraid_scsihost_init(); + if (error) { + leapioraid_scsihost_exit(); + return error; + } + leapioraid_ctl_init(); + error = pci_register_driver(&leapioraid_driver); + if (error) + leapioraid_scsihost_exit(); + return error; +} + +static void __exit leapioraid_exit(void) +{ + pr_info("leapioraid_ids version %s unloading\n", + LEAPIORAID_DRIVER_VERSION); + leapioraid_ctl_exit(); + pci_unregister_driver(&leapioraid_driver); + leapioraid_scsihost_exit(); +} + +module_init(leapioraid_init); +module_exit(leapioraid_exit); diff --git a/drivers/scsi/leapioraid/leapioraid_transport.c b/drivers/scsi/leapioraid/leapioraid_transport.c new file mode 100644 index 000000000000..e7ff263a8b6e --- /dev/null +++ b/drivers/scsi/leapioraid/leapioraid_transport.c @@ -0,0 +1,1926 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SAS Transport Layer for MPT (Message Passing Technology) based controllers + * + * Copyright (C) 2013-2018 LSI Corporation + * Copyright (C) 2013-2018 Avago Technologies + * Copyright (C) 2013-2018 Broadcom Inc. + * (mailto:MPT-FusionLinux.pdl@broadcom.com) + * + * Copyright (C) 2024 LeapIO Tech Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "leapioraid_func.h" + +static +struct leapioraid_raid_sas_node *leapioraid_transport_sas_node_find_by_sas_address( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, struct leapioraid_hba_port *port) +{ + if (ioc->sas_hba.sas_address == sas_address) + return &ioc->sas_hba; + else + return leapioraid_scsihost_expander_find_by_sas_address(ioc, + sas_address, + port); +} + +static inline u8 +leapioraid_transport_get_port_id_by_sas_phy(struct sas_phy *phy) +{ + u8 port_id = 0xFF; + struct leapioraid_hba_port *port = phy->hostdata; + + if (port) + port_id = port->port_id; + else + BUG(); + return port_id; +} + +static int +leapioraid_transport_find_parent_node( + struct LEAPIORAID_ADAPTER *ioc, struct sas_phy *phy) +{ + unsigned long flags; + struct leapioraid_hba_port *port = phy->hostdata; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + if (leapioraid_transport_sas_node_find_by_sas_address(ioc, + phy->identify.sas_address, + port) == NULL) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return -EINVAL; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return 0; +} + +static u8 +leapioraid_transport_get_port_id_by_rphy(struct LEAPIORAID_ADAPTER *ioc, + struct sas_rphy *rphy) +{ + struct leapioraid_raid_sas_node *sas_expander; + struct leapioraid_sas_device *sas_device; + unsigned long flags; + u8 port_id = 0xFF; + + if (!rphy) + return port_id; + if (rphy->identify.device_type == SAS_EDGE_EXPANDER_DEVICE || + rphy->identify.device_type == SAS_FANOUT_EXPANDER_DEVICE) { + spin_lock_irqsave(&ioc->sas_node_lock, flags); + list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { + if (sas_expander->rphy == rphy) { + port_id = sas_expander->port->port_id; + break; + } + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + } else if (rphy->identify.device_type == SAS_END_DEVICE) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_addr_and_rphy( + ioc, rphy->identify.sas_address, rphy); + if (sas_device) { + port_id = sas_device->port->port_id; + leapioraid_sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } + return port_id; +} + +static enum sas_linkrate +leapioraid_transport_convert_phy_link_rate(u8 link_rate) +{ + enum sas_linkrate rc; + + switch (link_rate) { + case LEAPIORAID_SAS_NEG_LINK_RATE_1_5: + rc = SAS_LINK_RATE_1_5_GBPS; + break; + case LEAPIORAID_SAS_NEG_LINK_RATE_3_0: + rc = SAS_LINK_RATE_3_0_GBPS; + break; + case LEAPIORAID_SAS_NEG_LINK_RATE_6_0: + rc = SAS_LINK_RATE_6_0_GBPS; + break; + case LEAPIORAID_SAS_NEG_LINK_RATE_12_0: + rc = SAS_LINK_RATE_12_0_GBPS; + break; + case LEAPIORAID_SAS_NEG_LINK_RATE_PHY_DISABLED: + rc = SAS_PHY_DISABLED; + break; + case LEAPIORAID_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED: + rc = SAS_LINK_RATE_FAILED; + break; + case LEAPIORAID_SAS_NEG_LINK_RATE_PORT_SELECTOR: + rc = SAS_SATA_PORT_SELECTOR; + break; + case LEAPIORAID_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS: + default: + case LEAPIORAID_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE: + case LEAPIORAID_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE: + rc = SAS_LINK_RATE_UNKNOWN; + break; + } + return rc; +} + +static int +leapioraid_transport_set_identify( + struct LEAPIORAID_ADAPTER *ioc, u16 handle, + struct sas_identify *identify) +{ + struct LeapioraidSasDevP0_t sas_device_pg0; + struct LeapioraidCfgRep_t mpi_reply; + u32 device_info; + u32 ioc_status; + + if ((ioc->shost_recovery && !ioc->is_driver_loading) + || ioc->pci_error_recovery) { + pr_info("%s %s: host reset in progress!\n", + __func__, ioc->name); + return -EFAULT; + } + if ((leapioraid_config_get_sas_device_pg0 + (ioc, &mpi_reply, &sas_device_pg0, + LEAPIORAID_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -ENXIO; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s handle(0x%04x), ioc_status(0x%04x)\nfailure at %s:%d/%s()!\n", + ioc->name, handle, + ioc_status, __FILE__, __LINE__, __func__); + return -EIO; + } + memset(identify, 0, sizeof(struct sas_identify)); + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + identify->sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + identify->phy_identifier = sas_device_pg0.PhyNum; + switch (device_info & LEAPIORAID_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) { + case LEAPIORAID_SAS_DEVICE_INFO_NO_DEVICE: + identify->device_type = SAS_PHY_UNUSED; + break; + case LEAPIORAID_SAS_DEVICE_INFO_END_DEVICE: + identify->device_type = SAS_END_DEVICE; + break; + case LEAPIORAID_SAS_DEVICE_INFO_EDGE_EXPANDER: + identify->device_type = SAS_EDGE_EXPANDER_DEVICE; + break; + case LEAPIORAID_SAS_DEVICE_INFO_FANOUT_EXPANDER: + identify->device_type = SAS_FANOUT_EXPANDER_DEVICE; + break; + } + if (device_info & LEAPIORAID_SAS_DEVICE_INFO_SSP_INITIATOR) + identify->initiator_port_protocols |= SAS_PROTOCOL_SSP; + if (device_info & LEAPIORAID_SAS_DEVICE_INFO_STP_INITIATOR) + identify->initiator_port_protocols |= SAS_PROTOCOL_STP; + if (device_info & LEAPIORAID_SAS_DEVICE_INFO_SMP_INITIATOR) + identify->initiator_port_protocols |= SAS_PROTOCOL_SMP; + if (device_info & LEAPIORAID_SAS_DEVICE_INFO_SATA_HOST) + identify->initiator_port_protocols |= SAS_PROTOCOL_SATA; + if (device_info & LEAPIORAID_SAS_DEVICE_INFO_SSP_TARGET) + identify->target_port_protocols |= SAS_PROTOCOL_SSP; + if (device_info & LEAPIORAID_SAS_DEVICE_INFO_STP_TARGET) + identify->target_port_protocols |= SAS_PROTOCOL_STP; + if (device_info & LEAPIORAID_SAS_DEVICE_INFO_SMP_TARGET) + identify->target_port_protocols |= SAS_PROTOCOL_SMP; + if (device_info & LEAPIORAID_SAS_DEVICE_INFO_SATA_DEVICE) + identify->target_port_protocols |= SAS_PROTOCOL_SATA; + return 0; +} + +u8 +leapioraid_transport_done(struct LEAPIORAID_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply) +{ + struct LeapioraidDefaultRep_t *mpi_reply; + + mpi_reply = leapioraid_base_get_reply_virt_addr(ioc, reply); + if (ioc->transport_cmds.status == LEAPIORAID_CMD_NOT_USED) + return 1; + if (ioc->transport_cmds.smid != smid) + return 1; + ioc->transport_cmds.status |= LEAPIORAID_CMD_COMPLETE; + if (mpi_reply) { + memcpy(ioc->transport_cmds.reply, mpi_reply, + mpi_reply->MsgLength * 4); + ioc->transport_cmds.status |= LEAPIORAID_CMD_REPLY_VALID; + } + ioc->transport_cmds.status &= ~LEAPIORAID_CMD_PENDING; + complete(&ioc->transport_cmds.done); + return 1; +} + +#if defined(LEAPIORAID_WIDE_PORT_API) +struct leapioraid_rep_manu_request { + u8 smp_frame_type; + u8 function; + u8 reserved; + u8 request_length; +}; + +struct leapioraid_rep_manu_reply { + u8 smp_frame_type; + u8 function; + u8 function_result; + u8 response_length; + u16 expander_change_count; + u8 reserved0[2]; + u8 sas_format; + u8 reserved2[3]; + u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN]; + u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN]; + u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN]; + u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN]; + u16 component_id; + u8 component_revision_id; + u8 reserved3; + u8 vendor_specific[8]; +}; + +static int +leapioraid_transport_expander_report_manufacture( + struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, + struct sas_expander_device *edev, + u8 port_id) +{ + struct LeapioraidSmpPassthroughReq_t *mpi_request; + struct LeapioraidSmpPassthroughRep_t *mpi_reply; + struct leapioraid_rep_manu_reply *manufacture_reply; + struct leapioraid_rep_manu_request *manufacture_request; + int rc; + u16 smid; + void *psge; + u8 issue_reset = 0; + void *data_out = NULL; + dma_addr_t data_out_dma; + dma_addr_t data_in_dma; + size_t data_in_sz; + size_t data_out_sz; + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + pr_info("%s %s: host reset in progress!\n", + __func__, ioc->name); + return -EFAULT; + } + mutex_lock(&ioc->transport_cmds.mutex); + if (ioc->transport_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: transport_cmds in use\n", + ioc->name, __func__); + mutex_unlock(&ioc->transport_cmds.mutex); + return -EAGAIN; + } + ioc->transport_cmds.status = LEAPIORAID_CMD_PENDING; + rc = leapioraid_wait_for_ioc_to_operational(ioc, 10); + if (rc) + goto out; + smid = leapioraid_base_get_smid(ioc, ioc->transport_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + rc = 0; + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->transport_cmds.smid = smid; + data_out_sz = sizeof(struct leapioraid_rep_manu_request); + data_in_sz = sizeof(struct leapioraid_rep_manu_reply); + data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz + data_in_sz, + &data_out_dma, GFP_ATOMIC); + if (!data_out) { + rc = -ENOMEM; + leapioraid_base_free_smid(ioc, smid); + goto out; + } + data_in_dma = data_out_dma + sizeof(struct leapioraid_rep_manu_request); + manufacture_request = data_out; + manufacture_request->smp_frame_type = 0x40; + manufacture_request->function = 1; + manufacture_request->reserved = 0; + manufacture_request->request_length = 0; + memset(mpi_request, 0, sizeof(struct LeapioraidSmpPassthroughReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SMP_PASSTHROUGH; + mpi_request->PhysicalPort = port_id; + mpi_request->SASAddress = cpu_to_le64(sas_address); + mpi_request->RequestDataLength = cpu_to_le16(data_out_sz); + psge = &mpi_request->SGL; + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, + data_in_sz); + dtransportprintk(ioc, + pr_info("%s report_manufacture - send to sas_addr(0x%016llx)\n", + ioc->name, + (unsigned long long)sas_address)); + init_completion(&ioc->transport_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->transport_cmds.done, 10 * HZ); + if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + pr_err("%s %s: timeout\n", + ioc->name, __func__); + leapioraid_debug_dump_mf(mpi_request, + sizeof(struct LeapioraidSmpPassthroughReq_t) / 4); + if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + dtransportprintk(ioc, + pr_info("%s report_manufacture - complete\n", ioc->name)); + if (ioc->transport_cmds.status & LEAPIORAID_CMD_REPLY_VALID) { + u8 *tmp; + + mpi_reply = ioc->transport_cmds.reply; + dtransportprintk(ioc, pr_err( + "%s report_manufacture - reply data transfer size(%d)\n", + ioc->name, + le16_to_cpu(mpi_reply->ResponseDataLength))); + if (le16_to_cpu(mpi_reply->ResponseDataLength) != + sizeof(struct leapioraid_rep_manu_reply)) + goto out; + manufacture_reply = data_out + sizeof(struct leapioraid_rep_manu_request); + strscpy(edev->vendor_id, manufacture_reply->vendor_id, + sizeof(edev->vendor_id)); + strscpy(edev->product_id, manufacture_reply->product_id, + sizeof(edev->product_id)); + strscpy(edev->product_rev, manufacture_reply->product_rev, + sizeof(edev->product_rev)); + edev->level = manufacture_reply->sas_format & 1; + if (edev->level) { + strscpy(edev->component_vendor_id, + manufacture_reply->component_vendor_id, + sizeof(edev->component_vendor_id)); + tmp = (u8 *) &manufacture_reply->component_id; + edev->component_id = tmp[0] << 8 | tmp[1]; + edev->component_revision_id = + manufacture_reply->component_revision_id; + } + } else + dtransportprintk(ioc, pr_err( + "%s report_manufacture - no reply\n", + ioc->name)); +issue_host_reset: + if (issue_reset) + leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); +out: + ioc->transport_cmds.status = LEAPIORAID_CMD_NOT_USED; + if (data_out) + dma_free_coherent(&ioc->pdev->dev, data_out_sz + data_in_sz, + data_out, data_out_dma); + mutex_unlock(&ioc->transport_cmds.mutex); + return rc; +} +#endif + +static void +leapioraid_transport_delete_port(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_port *leapioraid_port) +{ + u64 sas_address = leapioraid_port->remote_identify.sas_address; + struct leapioraid_hba_port *port = leapioraid_port->hba_port; + enum sas_device_type device_type = + leapioraid_port->remote_identify.device_type; + +#if defined(LEAPIORAID_WIDE_PORT_API) + dev_info(&leapioraid_port->port->dev, + "remove: sas_addr(0x%016llx)\n", + (unsigned long long)sas_address); +#endif + ioc->logging_level |= LEAPIORAID_DEBUG_TRANSPORT; + if (device_type == SAS_END_DEVICE) + leapioraid_device_remove_by_sas_address(ioc, sas_address, port); + else if (device_type == SAS_EDGE_EXPANDER_DEVICE || + device_type == SAS_FANOUT_EXPANDER_DEVICE) + leapioraid_expander_remove(ioc, sas_address, port); + ioc->logging_level &= ~LEAPIORAID_DEBUG_TRANSPORT; +} + +#if defined(LEAPIORAID_WIDE_PORT_API) +static void +leapioraid_transport_delete_phy(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_port *leapioraid_port, + struct leapioraid_sas_phy *leapioraid_phy) +{ + u64 sas_address = leapioraid_port->remote_identify.sas_address; + + dev_info(&leapioraid_phy->phy->dev, + "remove: sas_addr(0x%016llx), phy(%d)\n", + (unsigned long long)sas_address, leapioraid_phy->phy_id); + list_del(&leapioraid_phy->port_siblings); + leapioraid_port->num_phys--; + sas_port_delete_phy(leapioraid_port->port, leapioraid_phy->phy); + leapioraid_phy->phy_belongs_to_port = 0; +} + +static void +leapioraid_transport_add_phy(struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_port *leapioraid_port, + struct leapioraid_sas_phy *leapioraid_phy) +{ + u64 sas_address = leapioraid_port->remote_identify.sas_address; + + dev_info(&leapioraid_phy->phy->dev, + "add: sas_addr(0x%016llx), phy(%d)\n", (unsigned long long) + sas_address, leapioraid_phy->phy_id); + list_add_tail(&leapioraid_phy->port_siblings, + &leapioraid_port->phy_list); + leapioraid_port->num_phys++; + sas_port_add_phy(leapioraid_port->port, leapioraid_phy->phy); + leapioraid_phy->phy_belongs_to_port = 1; +} + +void +leapioraid_transport_add_phy_to_an_existing_port( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_sas_node *sas_node, + struct leapioraid_sas_phy *leapioraid_phy, + u64 sas_address, + struct leapioraid_hba_port *port) +{ + struct leapioraid_sas_port *leapioraid_port; + struct leapioraid_sas_phy *phy_srch; + + if (leapioraid_phy->phy_belongs_to_port == 1) + return; + if (!port) + return; + list_for_each_entry(leapioraid_port, &sas_node->sas_port_list, + port_list) { + if (leapioraid_port->remote_identify.sas_address != sas_address) + continue; + if (leapioraid_port->hba_port != port) + continue; + list_for_each_entry(phy_srch, &leapioraid_port->phy_list, + port_siblings) { + if (phy_srch == leapioraid_phy) + return; + } + leapioraid_transport_add_phy(ioc, leapioraid_port, leapioraid_phy); + return; + } +} +#endif + +void +leapioraid_transport_del_phy_from_an_existing_port( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_sas_node *sas_node, + struct leapioraid_sas_phy *leapioraid_phy) +{ + struct leapioraid_sas_port *leapioraid_port, *next; + struct leapioraid_sas_phy *phy_srch; + + if (leapioraid_phy->phy_belongs_to_port == 0) + return; + list_for_each_entry_safe(leapioraid_port, next, + &sas_node->sas_port_list, port_list) { + list_for_each_entry(phy_srch, &leapioraid_port->phy_list, + port_siblings) { + if (phy_srch != leapioraid_phy) + continue; +#if defined(LEAPIORAID_WIDE_PORT_API) + if (leapioraid_port->num_phys == 1 + && !ioc->shost_recovery) + leapioraid_transport_delete_port(ioc, leapioraid_port); + else + leapioraid_transport_delete_phy(ioc, leapioraid_port, + leapioraid_phy); +#else + leapioraid_transport_delete_port(ioc, leapioraid_port); +#endif + return; + } + } +} + +static void +leapioraid_transport_sanity_check( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_raid_sas_node *sas_node, u64 sas_address, + struct leapioraid_hba_port *port) +{ + int i; + + for (i = 0; i < sas_node->num_phys; i++) { + if (sas_node->phy[i].remote_identify.sas_address != sas_address + || sas_node->phy[i].port != port) + continue; + if (sas_node->phy[i].phy_belongs_to_port == 1) + leapioraid_transport_del_phy_from_an_existing_port(ioc, + sas_node, + &sas_node->phy + [i]); + } +} + +struct leapioraid_sas_port *leapioraid_transport_port_add( + struct LEAPIORAID_ADAPTER *ioc, + u16 handle, u64 sas_address, + struct leapioraid_hba_port *hba_port) +{ + struct leapioraid_sas_phy *leapioraid_phy, *next; + struct leapioraid_sas_port *leapioraid_port; + unsigned long flags; + struct leapioraid_raid_sas_node *sas_node; + struct sas_rphy *rphy; + struct leapioraid_sas_device *sas_device = NULL; + int i; +#if defined(LEAPIORAID_WIDE_PORT_API) + struct sas_port *port; +#endif + struct leapioraid_virtual_phy *vphy = NULL; + + if (!hba_port) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return NULL; + } + leapioraid_port = kzalloc(sizeof(struct leapioraid_sas_port), GFP_KERNEL); + if (!leapioraid_port) + return NULL; + INIT_LIST_HEAD(&leapioraid_port->port_list); + INIT_LIST_HEAD(&leapioraid_port->phy_list); + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_node = leapioraid_transport_sas_node_find_by_sas_address( + ioc, + sas_address, + hba_port); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (!sas_node) { + pr_err("%s %s: Could not find parent sas_address(0x%016llx)!\n", + ioc->name, + __func__, (unsigned long long)sas_address); + goto out_fail; + } + if ((leapioraid_transport_set_identify(ioc, handle, + &leapioraid_port->remote_identify))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } + if (leapioraid_port->remote_identify.device_type == SAS_PHY_UNUSED) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } + leapioraid_port->hba_port = hba_port; + leapioraid_transport_sanity_check(ioc, sas_node, + leapioraid_port->remote_identify.sas_address, + hba_port); + for (i = 0; i < sas_node->num_phys; i++) { + if (sas_node->phy[i].remote_identify.sas_address != + leapioraid_port->remote_identify.sas_address || + sas_node->phy[i].port != hba_port) + continue; + list_add_tail(&sas_node->phy[i].port_siblings, + &leapioraid_port->phy_list); + leapioraid_port->num_phys++; + if (sas_node->handle <= ioc->sas_hba.num_phys) { + if (!sas_node->phy[i].hba_vphy) { + hba_port->phy_mask |= (1 << i); + continue; + } + vphy = leapioraid_get_vphy_by_phy(ioc, hba_port, i); + if (!vphy) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } + } + } + if (!leapioraid_port->num_phys) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } + if (leapioraid_port->remote_identify.device_type == SAS_END_DEVICE) { + sas_device = leapioraid_get_sdev_by_addr(ioc, + leapioraid_port->remote_identify.sas_address, + leapioraid_port->hba_port); + if (!sas_device) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } + sas_device->pend_sas_rphy_add = 1; + } +#if defined(LEAPIORAID_WIDE_PORT_API) + if (!sas_node->parent_dev) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } + port = sas_port_alloc_num(sas_node->parent_dev); + if ((sas_port_add(port))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } + list_for_each_entry(leapioraid_phy, &leapioraid_port->phy_list, + port_siblings) { + if ((ioc->logging_level & LEAPIORAID_DEBUG_TRANSPORT)) + dev_info(&port->dev, + "add: handle(0x%04x), sas_addr(0x%016llx), phy(%d)\n", + handle, + (unsigned long long) + leapioraid_port->remote_identify.sas_address, + leapioraid_phy->phy_id); + sas_port_add_phy(port, leapioraid_phy->phy); + leapioraid_phy->phy_belongs_to_port = 1; + leapioraid_phy->port = hba_port; + } + leapioraid_port->port = port; + if (leapioraid_port->remote_identify.device_type == SAS_END_DEVICE) { + rphy = sas_end_device_alloc(port); + sas_device->rphy = rphy; + if (sas_node->handle <= ioc->sas_hba.num_phys) { + if (!vphy) + hba_port->sas_address = sas_device->sas_address; + else + vphy->sas_address = sas_device->sas_address; + } + } else { + rphy = sas_expander_alloc(port, + leapioraid_port->remote_identify.device_type); + if (sas_node->handle <= ioc->sas_hba.num_phys) + hba_port->sas_address = + leapioraid_port->remote_identify.sas_address; + } +#else + leapioraid_phy = + list_entry(leapioraid_port->phy_list.next, struct leapioraid_sas_phy, + port_siblings); + if (leapioraid_port->remote_identify.device_type == SAS_END_DEVICE) { + rphy = sas_end_device_alloc(leapioraid_phy->phy); + sas_device->rphy = rphy; + } else + rphy = sas_expander_alloc(leapioraid_phy->phy, + leapioraid_port->remote_identify.device_type); +#endif + rphy->identify = leapioraid_port->remote_identify; + if ((sas_rphy_add(rphy))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + } + if (leapioraid_port->remote_identify.device_type == SAS_END_DEVICE) { + sas_device->pend_sas_rphy_add = 0; + leapioraid_sas_device_put(sas_device); + } + dev_info(&rphy->dev, + "%s: added: handle(0x%04x), sas_addr(0x%016llx)\n", + __func__, handle, (unsigned long long) + leapioraid_port->remote_identify.sas_address); + leapioraid_port->rphy = rphy; + spin_lock_irqsave(&ioc->sas_node_lock, flags); + list_add_tail(&leapioraid_port->port_list, &sas_node->sas_port_list); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); +#if defined(LEAPIORAID_WIDE_PORT_API) + if (leapioraid_port->remote_identify.device_type == + LEAPIORAID_SAS_DEVICE_INFO_EDGE_EXPANDER || + leapioraid_port->remote_identify.device_type == + LEAPIORAID_SAS_DEVICE_INFO_FANOUT_EXPANDER) + leapioraid_transport_expander_report_manufacture(ioc, + leapioraid_port->remote_identify.sas_address, + rphy_to_expander_device + (rphy), + hba_port->port_id); +#endif + return leapioraid_port; +out_fail: + list_for_each_entry_safe(leapioraid_phy, next, + &leapioraid_port->phy_list, port_siblings) + list_del(&leapioraid_phy->port_siblings); + kfree(leapioraid_port); + return NULL; +} + +void +leapioraid_transport_port_remove(struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, u64 sas_address_parent, + struct leapioraid_hba_port *port) +{ + int i; + unsigned long flags; + struct leapioraid_sas_port *leapioraid_port, *next; + struct leapioraid_raid_sas_node *sas_node; + u8 found = 0; +#if defined(LEAPIORAID_WIDE_PORT_API) + struct leapioraid_sas_phy *leapioraid_phy, *next_phy; +#endif + struct leapioraid_hba_port *hba_port, *hba_port_next = NULL; + struct leapioraid_virtual_phy *vphy, *vphy_next = NULL; + + if (!port) + return; + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_node = leapioraid_transport_sas_node_find_by_sas_address( + ioc, + sas_address_parent, + port); + if (!sas_node) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return; + } + list_for_each_entry_safe(leapioraid_port, next, + &sas_node->sas_port_list, port_list) { + if (leapioraid_port->remote_identify.sas_address != sas_address) + continue; + if (leapioraid_port->hba_port != port) + continue; + found = 1; + list_del(&leapioraid_port->port_list); + goto out; + } +out: + if (!found) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return; + } + if ((sas_node->handle <= ioc->sas_hba.num_phys) && + (ioc->multipath_on_hba)) { + if (port->vphys_mask) { + list_for_each_entry_safe(vphy, vphy_next, + &port->vphys_list, list) { + if (vphy->sas_address != sas_address) + continue; + pr_err( + "%s remove vphy entry: %p of port:%p,\n\t\t" + "from %d port's vphys list\n", + ioc->name, + vphy, + port, + port->port_id); + port->vphys_mask &= ~vphy->phy_mask; + list_del(&vphy->list); + kfree(vphy); + } + if (!port->vphys_mask && !port->sas_address) { + pr_err( + "%s remove hba_port entry: %p port: %d\n\t\t" + "from hba_port list\n", + ioc->name, + port, + port->port_id); + list_del(&port->list); + kfree(port); + } + } + list_for_each_entry_safe(hba_port, hba_port_next, + &ioc->port_table_list, list) { + if (hba_port != port) + continue; + if (hba_port->sas_address != sas_address) + continue; + if (!port->vphys_mask) { + pr_err( + "%s remove hba_port entry: %p port: %d\n\t\t" + "from hba_port list\n", + ioc->name, + hba_port, + hba_port->port_id); + list_del(&hba_port->list); + kfree(hba_port); + } else { + pr_err( + "%s clearing sas_address from hba_port entry: %p\n\t\t" + "port: %d from hba_port list\n", + ioc->name, + hba_port, + hba_port->port_id); + port->sas_address = 0; + } + break; + } + } + for (i = 0; i < sas_node->num_phys; i++) { + if (sas_node->phy[i].remote_identify.sas_address == sas_address) { + memset(&sas_node->phy[i].remote_identify, 0, + sizeof(struct sas_identify)); + sas_node->phy[i].hba_vphy = 0; + } + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); +#if defined(LEAPIORAID_WIDE_PORT_API) + list_for_each_entry_safe(leapioraid_phy, next_phy, + &leapioraid_port->phy_list, port_siblings) { + if ((ioc->logging_level & LEAPIORAID_DEBUG_TRANSPORT)) + dev_info(&leapioraid_port->port->dev, + "remove: sas_addr(0x%016llx), phy(%d)\n", + (unsigned long long) + leapioraid_port->remote_identify.sas_address, + leapioraid_phy->phy_id); + leapioraid_phy->phy_belongs_to_port = 0; + if (!ioc->remove_host) + sas_port_delete_phy(leapioraid_port->port, + leapioraid_phy->phy); + list_del(&leapioraid_phy->port_siblings); + } + if (!ioc->remove_host) + sas_port_delete(leapioraid_port->port); + pr_info("%s %s: removed: sas_addr(0x%016llx)\n", + ioc->name, __func__, (unsigned long long)sas_address); +#else + if ((ioc->logging_level & LEAPIORAID_DEBUG_TRANSPORT)) + dev_info(&leapioraid_port->rphy->dev, + "remove: sas_addr(0x%016llx)\n", + (unsigned long long)sas_address); + if (!ioc->remove_host) + sas_rphy_delete(leapioraid_port->rphy); + pr_info("%s %s: removed: sas_addr(0x%016llx)\n", + ioc->name, __func__, (unsigned long long)sas_address); +#endif + kfree(leapioraid_port); +} + +int +leapioraid_transport_add_host_phy( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_phy *leapioraid_phy, + struct LeapioraidSasPhyP0_t phy_pg0, + struct device *parent_dev) +{ + struct sas_phy *phy; + int phy_index = leapioraid_phy->phy_id; + + INIT_LIST_HEAD(&leapioraid_phy->port_siblings); + phy = sas_phy_alloc(parent_dev, phy_index); + if (!phy) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -1; + } + if ((leapioraid_transport_set_identify(ioc, leapioraid_phy->handle, + &leapioraid_phy->identify))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + sas_phy_free(phy); + return -1; + } + phy->identify = leapioraid_phy->identify; + leapioraid_phy->attached_handle = + le16_to_cpu(phy_pg0.AttachedDevHandle); + if (leapioraid_phy->attached_handle) + leapioraid_transport_set_identify( + ioc, leapioraid_phy->attached_handle, + &leapioraid_phy->remote_identify); + phy->identify.phy_identifier = leapioraid_phy->phy_id; + phy->negotiated_linkrate = + leapioraid_transport_convert_phy_link_rate( + phy_pg0.NegotiatedLinkRate & + LEAPIORAID_SAS_NEG_LINK_RATE_MASK_PHYSICAL); + phy->minimum_linkrate_hw = + leapioraid_transport_convert_phy_link_rate( + phy_pg0.HwLinkRate & + LEAPIORAID_SAS_HWRATE_MIN_RATE_MASK); + phy->maximum_linkrate_hw = + leapioraid_transport_convert_phy_link_rate( + phy_pg0.HwLinkRate >> 4); + phy->minimum_linkrate = + leapioraid_transport_convert_phy_link_rate( + phy_pg0.ProgrammedLinkRate & + LEAPIORAID_SAS_PRATE_MIN_RATE_MASK); + phy->maximum_linkrate = + leapioraid_transport_convert_phy_link_rate( + phy_pg0.ProgrammedLinkRate >> 4); + phy->hostdata = leapioraid_phy->port; +#if !defined(LEAPIORAID_WIDE_PORT_API_PLUS) + phy->local_attached = 1; +#endif +#if !defined(LEAPIORAID_WIDE_PORT_API) + phy->port_identifier = phy_index; +#endif + if ((sas_phy_add(phy))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + sas_phy_free(phy); + return -1; + } + if ((ioc->logging_level & LEAPIORAID_DEBUG_TRANSPORT)) + dev_info(&phy->dev, + "add: handle(0x%04x), sas_addr(0x%016llx)\n" + "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n", + leapioraid_phy->handle, (unsigned long long) + leapioraid_phy->identify.sas_address, + leapioraid_phy->attached_handle, (unsigned long long) + leapioraid_phy->remote_identify.sas_address); + leapioraid_phy->phy = phy; + return 0; +} + +int +leapioraid_transport_add_expander_phy( + struct LEAPIORAID_ADAPTER *ioc, + struct leapioraid_sas_phy *leapioraid_phy, + struct LeapioraidExpanderP1_t expander_pg1, + struct device *parent_dev) +{ + struct sas_phy *phy; + int phy_index = leapioraid_phy->phy_id; + + INIT_LIST_HEAD(&leapioraid_phy->port_siblings); + phy = sas_phy_alloc(parent_dev, phy_index); + if (!phy) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -1; + } + if ((leapioraid_transport_set_identify(ioc, leapioraid_phy->handle, + &leapioraid_phy->identify))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + sas_phy_free(phy); + return -1; + } + phy->identify = leapioraid_phy->identify; + leapioraid_phy->attached_handle = + le16_to_cpu(expander_pg1.AttachedDevHandle); + if (leapioraid_phy->attached_handle) + leapioraid_transport_set_identify( + ioc, leapioraid_phy->attached_handle, + &leapioraid_phy->remote_identify); + phy->identify.phy_identifier = leapioraid_phy->phy_id; + phy->negotiated_linkrate = + leapioraid_transport_convert_phy_link_rate( + expander_pg1.NegotiatedLinkRate & + LEAPIORAID_SAS_NEG_LINK_RATE_MASK_PHYSICAL); + phy->minimum_linkrate_hw = + leapioraid_transport_convert_phy_link_rate( + expander_pg1.HwLinkRate & + LEAPIORAID_SAS_HWRATE_MIN_RATE_MASK); + phy->maximum_linkrate_hw = + leapioraid_transport_convert_phy_link_rate( + expander_pg1.HwLinkRate >> 4); + phy->minimum_linkrate = + leapioraid_transport_convert_phy_link_rate( + expander_pg1.ProgrammedLinkRate & + LEAPIORAID_SAS_PRATE_MIN_RATE_MASK); + phy->maximum_linkrate = + leapioraid_transport_convert_phy_link_rate( + expander_pg1.ProgrammedLinkRate >> 4); + phy->hostdata = leapioraid_phy->port; +#if !defined(LEAPIORAID_WIDE_PORT_API) + phy->port_identifier = phy_index; +#endif + if ((sas_phy_add(phy))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + sas_phy_free(phy); + return -1; + } + if ((ioc->logging_level & LEAPIORAID_DEBUG_TRANSPORT)) + dev_info(&phy->dev, + "add: handle(0x%04x), sas_addr(0x%016llx)\n" + "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n", + leapioraid_phy->handle, (unsigned long long) + leapioraid_phy->identify.sas_address, + leapioraid_phy->attached_handle, (unsigned long long) + leapioraid_phy->remote_identify.sas_address); + leapioraid_phy->phy = phy; + return 0; +} + +void +leapioraid_transport_update_links(struct LEAPIORAID_ADAPTER *ioc, + u64 sas_address, u16 handle, u8 phy_number, + u8 link_rate, struct leapioraid_hba_port *port) +{ + unsigned long flags; + struct leapioraid_raid_sas_node *sas_node; + struct leapioraid_sas_phy *leapioraid_phy; + struct leapioraid_hba_port *hba_port = NULL; + + if (ioc->shost_recovery || ioc->pci_error_recovery) + return; + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_node = leapioraid_transport_sas_node_find_by_sas_address(ioc, + sas_address, port); + if (!sas_node) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return; + } + leapioraid_phy = &sas_node->phy[phy_number]; + leapioraid_phy->attached_handle = handle; + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (handle && (link_rate >= LEAPIORAID_SAS_NEG_LINK_RATE_1_5)) { + leapioraid_transport_set_identify(ioc, handle, + &leapioraid_phy->remote_identify); +#if defined(LEAPIORAID_WIDE_PORT_API) + if ((sas_node->handle <= ioc->sas_hba.num_phys) && + (ioc->multipath_on_hba)) { + list_for_each_entry(hba_port, + &ioc->port_table_list, list) { + if (hba_port->sas_address == sas_address && + hba_port == port) + hba_port->phy_mask |= + (1 << leapioraid_phy->phy_id); + } + } + leapioraid_transport_add_phy_to_an_existing_port(ioc, sas_node, + leapioraid_phy, + leapioraid_phy->remote_identify.sas_address, + port); +#endif + } else + memset(&leapioraid_phy->remote_identify, 0, sizeof(struct + sas_identify)); + if (leapioraid_phy->phy) + leapioraid_phy->phy->negotiated_linkrate = + leapioraid_transport_convert_phy_link_rate(link_rate); + if ((ioc->logging_level & LEAPIORAID_DEBUG_TRANSPORT)) + dev_info(&leapioraid_phy->phy->dev, + "refresh: parent sas_addr(0x%016llx),\n" + "\tlink_rate(0x%02x), phy(%d)\n" + "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n", + (unsigned long long)sas_address, + link_rate, phy_number, handle, (unsigned long long) + leapioraid_phy->remote_identify.sas_address); +} + +static inline void *phy_to_ioc(struct sas_phy *phy) +{ + struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); + + return leapioraid_shost_private(shost); +} + +static inline void *rphy_to_ioc(struct sas_rphy *rphy) +{ + struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent); + + return leapioraid_shost_private(shost); +} + +struct leapioraid_phy_error_log_request { + u8 smp_frame_type; + u8 function; + u8 allocated_response_length; + u8 request_length; + u8 reserved_1[5]; + u8 phy_identifier; + u8 reserved_2[2]; +}; + +struct leapioraid_phy_error_log_reply { + u8 smp_frame_type; + u8 function; + u8 function_result; + u8 response_length; + __be16 expander_change_count; + u8 reserved_1[3]; + u8 phy_identifier; + u8 reserved_2[2]; + __be32 invalid_dword; + __be32 running_disparity_error; + __be32 loss_of_dword_sync; + __be32 phy_reset_problem; +}; + +static int +leapioraid_transport_get_expander_phy_error_log( + struct LEAPIORAID_ADAPTER *ioc, struct sas_phy *phy) +{ + struct LeapioraidSmpPassthroughReq_t *mpi_request; + struct LeapioraidSmpPassthroughRep_t *mpi_reply; + struct leapioraid_phy_error_log_request *phy_error_log_request; + struct leapioraid_phy_error_log_reply *phy_error_log_reply; + int rc; + u16 smid; + void *psge; + u8 issue_reset = 0; + void *data_out = NULL; + dma_addr_t data_out_dma; + u32 sz; + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + pr_info("%s %s: host reset in progress!\n", + __func__, ioc->name); + return -EFAULT; + } + mutex_lock(&ioc->transport_cmds.mutex); + if (ioc->transport_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: transport_cmds in use\n", + ioc->name, __func__); + mutex_unlock(&ioc->transport_cmds.mutex); + return -EAGAIN; + } + ioc->transport_cmds.status = LEAPIORAID_CMD_PENDING; + rc = leapioraid_wait_for_ioc_to_operational(ioc, 10); + if (rc) + goto out; + smid = leapioraid_base_get_smid(ioc, ioc->transport_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->transport_cmds.smid = smid; + sz = sizeof(struct leapioraid_phy_error_log_request) + + sizeof(struct leapioraid_phy_error_log_reply); + data_out = + dma_alloc_coherent(&ioc->pdev->dev, sz, &data_out_dma, + GFP_ATOMIC); + if (!data_out) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + rc = -ENOMEM; + leapioraid_base_free_smid(ioc, smid); + goto out; + } + rc = -EINVAL; + memset(data_out, 0, sz); + phy_error_log_request = data_out; + phy_error_log_request->smp_frame_type = 0x40; + phy_error_log_request->function = 0x11; + phy_error_log_request->request_length = 2; + phy_error_log_request->allocated_response_length = 0; + phy_error_log_request->phy_identifier = phy->number; + memset(mpi_request, 0, sizeof(struct LeapioraidSmpPassthroughReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SMP_PASSTHROUGH; + mpi_request->PhysicalPort = leapioraid_transport_get_port_id_by_sas_phy(phy); + mpi_request->VF_ID = 0; + mpi_request->VP_ID = 0; + mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address); + mpi_request->RequestDataLength = + cpu_to_le16(sizeof(struct leapioraid_phy_error_log_request)); + psge = &mpi_request->SGL; + ioc->build_sg(ioc, psge, data_out_dma, + sizeof(struct leapioraid_phy_error_log_request), + data_out_dma + sizeof(struct leapioraid_phy_error_log_request), + sizeof(struct leapioraid_phy_error_log_reply)); + dtransportprintk(ioc, pr_info( + "%s phy_error_log - send to sas_addr(0x%016llx), phy(%d)\n", + ioc->name, + (unsigned long long)phy->identify.sas_address, + phy->number)); + init_completion(&ioc->transport_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->transport_cmds.done, 10 * HZ); + if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + pr_err("%s %s: timeout\n", + ioc->name, __func__); + leapioraid_debug_dump_mf(mpi_request, + sizeof(struct LeapioraidSmpPassthroughReq_t) / 4); + if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + dtransportprintk(ioc, pr_info("%s phy_error_log - complete\n", ioc->name)); + if (ioc->transport_cmds.status & LEAPIORAID_CMD_REPLY_VALID) { + mpi_reply = ioc->transport_cmds.reply; + dtransportprintk(ioc, pr_err( + "%s phy_error_log - reply data transfer size(%d)\n", + ioc->name, + le16_to_cpu(mpi_reply->ResponseDataLength))); + if (le16_to_cpu(mpi_reply->ResponseDataLength) != + sizeof(struct leapioraid_phy_error_log_reply)) + goto out; + phy_error_log_reply = data_out + + sizeof(struct leapioraid_phy_error_log_request); + dtransportprintk(ioc, pr_err( + "%s phy_error_log - function_result(%d)\n", + ioc->name, + phy_error_log_reply->function_result)); + phy->invalid_dword_count = + be32_to_cpu(phy_error_log_reply->invalid_dword); + phy->running_disparity_error_count = + be32_to_cpu(phy_error_log_reply->running_disparity_error); + phy->loss_of_dword_sync_count = + be32_to_cpu(phy_error_log_reply->loss_of_dword_sync); + phy->phy_reset_problem_count = + be32_to_cpu(phy_error_log_reply->phy_reset_problem); + rc = 0; + } else + dtransportprintk(ioc, pr_err( + "%s phy_error_log - no reply\n", + ioc->name)); +issue_host_reset: + if (issue_reset) + leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); +out: + ioc->transport_cmds.status = LEAPIORAID_CMD_NOT_USED; + if (data_out) + dma_free_coherent(&ioc->pdev->dev, sz, data_out, data_out_dma); + mutex_unlock(&ioc->transport_cmds.mutex); + return rc; +} + +static int +leapioraid_transport_get_linkerrors(struct sas_phy *phy) +{ + struct LEAPIORAID_ADAPTER *ioc = phy_to_ioc(phy); + struct LeapioraidCfgRep_t mpi_reply; + struct LeapioraidSasPhyP1_t phy_pg1; + int rc = 0; + + rc = leapioraid_transport_find_parent_node(ioc, phy); + if (rc) + return rc; + if (phy->identify.sas_address != ioc->sas_hba.sas_address) + return leapioraid_transport_get_expander_phy_error_log(ioc, phy); + if ((leapioraid_config_get_phy_pg1(ioc, &mpi_reply, &phy_pg1, + phy->number))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -ENXIO; + } + if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) + pr_info("%s phy(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, + phy->number, + le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo)); + phy->invalid_dword_count = le32_to_cpu(phy_pg1.InvalidDwordCount); + phy->running_disparity_error_count = + le32_to_cpu(phy_pg1.RunningDisparityErrorCount); + phy->loss_of_dword_sync_count = + le32_to_cpu(phy_pg1.LossDwordSynchCount); + phy->phy_reset_problem_count = + le32_to_cpu(phy_pg1.PhyResetProblemCount); + return 0; +} + +static int +leapioraid_transport_get_enclosure_identifier( + struct sas_rphy *rphy, u64 *identifier) +{ + struct LEAPIORAID_ADAPTER *ioc = rphy_to_ioc(rphy); + struct leapioraid_sas_device *sas_device; + unsigned long flags; + int rc; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_addr_and_rphy(ioc, + rphy->identify.sas_address, rphy); + if (sas_device) { + *identifier = sas_device->enclosure_logical_id; + rc = 0; + leapioraid_sas_device_put(sas_device); + } else { + *identifier = 0; + rc = -ENXIO; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return rc; +} + +static int +leapioraid_transport_get_bay_identifier(struct sas_rphy *rphy) +{ + struct LEAPIORAID_ADAPTER *ioc = rphy_to_ioc(rphy); + struct leapioraid_sas_device *sas_device; + unsigned long flags; + int rc; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __leapioraid_get_sdev_by_addr_and_rphy(ioc, + rphy->identify.sas_address, rphy); + if (sas_device) { + rc = sas_device->slot; + leapioraid_sas_device_put(sas_device); + } else { + rc = -ENXIO; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return rc; +} + +struct leapioraid_phy_control_request { + u8 smp_frame_type; + u8 function; + u8 allocated_response_length; + u8 request_length; + u16 expander_change_count; + u8 reserved_1[3]; + u8 phy_identifier; + u8 phy_operation; + u8 reserved_2[13]; + u64 attached_device_name; + u8 programmed_min_physical_link_rate; + u8 programmed_max_physical_link_rate; + u8 reserved_3[6]; +}; + +struct leapioraid_phy_control_reply { + u8 smp_frame_type; + u8 function; + u8 function_result; + u8 response_length; +}; + +#define LEAPIORAID_SMP_PHY_CONTROL_LINK_RESET (0x01) +#define LEAPIORAID_SMP_PHY_CONTROL_HARD_RESET (0x02) +#define LEAPIORAID_SMP_PHY_CONTROL_DISABLE (0x03) +static int +leapioraid_transport_expander_phy_control( + struct LEAPIORAID_ADAPTER *ioc, + struct sas_phy *phy, u8 phy_operation) +{ + struct LeapioraidSmpPassthroughReq_t *mpi_request; + struct LeapioraidSmpPassthroughRep_t *mpi_reply; + struct leapioraid_phy_control_request *phy_control_request; + struct leapioraid_phy_control_reply *phy_control_reply; + int rc; + u16 smid; + void *psge; + u8 issue_reset = 0; + void *data_out = NULL; + dma_addr_t data_out_dma; + u32 sz; + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + pr_info("%s %s: host reset in progress!\n", + __func__, ioc->name); + return -EFAULT; + } + mutex_lock(&ioc->transport_cmds.mutex); + if (ioc->transport_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: transport_cmds in use\n", + ioc->name, __func__); + mutex_unlock(&ioc->transport_cmds.mutex); + return -EAGAIN; + } + ioc->transport_cmds.status = LEAPIORAID_CMD_PENDING; + rc = leapioraid_wait_for_ioc_to_operational(ioc, 10); + if (rc) + goto out; + smid = leapioraid_base_get_smid(ioc, ioc->transport_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->transport_cmds.smid = smid; + sz = sizeof(struct leapioraid_phy_control_request) + + sizeof(struct leapioraid_phy_control_reply); + data_out = + dma_alloc_coherent(&ioc->pdev->dev, sz, &data_out_dma, + GFP_ATOMIC); + if (!data_out) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + rc = -ENOMEM; + leapioraid_base_free_smid(ioc, smid); + goto out; + } + rc = -EINVAL; + memset(data_out, 0, sz); + phy_control_request = data_out; + phy_control_request->smp_frame_type = 0x40; + phy_control_request->function = 0x91; + phy_control_request->request_length = 9; + phy_control_request->allocated_response_length = 0; + phy_control_request->phy_identifier = phy->number; + phy_control_request->phy_operation = phy_operation; + phy_control_request->programmed_min_physical_link_rate = + phy->minimum_linkrate << 4; + phy_control_request->programmed_max_physical_link_rate = + phy->maximum_linkrate << 4; + memset(mpi_request, 0, sizeof(struct LeapioraidSmpPassthroughReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SMP_PASSTHROUGH; + mpi_request->PhysicalPort = leapioraid_transport_get_port_id_by_sas_phy(phy); + mpi_request->VF_ID = 0; + mpi_request->VP_ID = 0; + mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address); + mpi_request->RequestDataLength = + cpu_to_le16(sizeof(struct leapioraid_phy_error_log_request)); + psge = &mpi_request->SGL; + ioc->build_sg(ioc, psge, data_out_dma, + sizeof(struct leapioraid_phy_control_request), + data_out_dma + sizeof(struct leapioraid_phy_control_request), + sizeof(struct leapioraid_phy_control_reply)); + dtransportprintk(ioc, pr_info( + "%s phy_control - send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n", + ioc->name, + (unsigned long long)phy->identify.sas_address, + phy->number, phy_operation)); + init_completion(&ioc->transport_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->transport_cmds.done, 10 * HZ); + if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + pr_err("%s %s: timeout\n", + ioc->name, __func__); + leapioraid_debug_dump_mf(mpi_request, + sizeof(struct LeapioraidSmpPassthroughReq_t) / 4); + if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + dtransportprintk(ioc, pr_info( + "%s phy_control - complete\n", ioc->name)); + if (ioc->transport_cmds.status & LEAPIORAID_CMD_REPLY_VALID) { + mpi_reply = ioc->transport_cmds.reply; + dtransportprintk(ioc, pr_err( + "%s phy_control - reply data transfer size(%d)\n", + ioc->name, + le16_to_cpu(mpi_reply->ResponseDataLength))); + if (le16_to_cpu(mpi_reply->ResponseDataLength) != + sizeof(struct leapioraid_phy_control_reply)) + goto out; + phy_control_reply = data_out + + sizeof(struct leapioraid_phy_control_request); + dtransportprintk(ioc, pr_err( + "%s phy_control - function_result(%d)\n", + ioc->name, + phy_control_reply->function_result)); + rc = 0; + } else + dtransportprintk(ioc, pr_err( + "%s phy_control - no reply\n", + ioc->name)); +issue_host_reset: + if (issue_reset) + leapioraid_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); +out: + ioc->transport_cmds.status = LEAPIORAID_CMD_NOT_USED; + if (data_out) + dma_free_coherent(&ioc->pdev->dev, sz, data_out, data_out_dma); + mutex_unlock(&ioc->transport_cmds.mutex); + return rc; +} + +static int +leapioraid_transport_phy_reset(struct sas_phy *phy, int hard_reset) +{ + struct LEAPIORAID_ADAPTER *ioc = phy_to_ioc(phy); + struct LeapioraidSasIoUnitControlRep_t mpi_reply; + struct LeapioraidSasIoUnitControlReq_t mpi_request; + int rc = 0; + + rc = leapioraid_transport_find_parent_node(ioc, phy); + if (rc) + return rc; + if (phy->identify.sas_address != ioc->sas_hba.sas_address) + return leapioraid_transport_expander_phy_control(ioc, phy, + (hard_reset == + 1) ? + LEAPIORAID_SMP_PHY_CONTROL_HARD_RESET + : + LEAPIORAID_SMP_PHY_CONTROL_LINK_RESET); + memset(&mpi_request, 0, sizeof(struct LeapioraidSasIoUnitControlReq_t)); + mpi_request.Function = LEAPIORAID_FUNC_SAS_IO_UNIT_CONTROL; + mpi_request.Operation = hard_reset ? + LEAPIORAID_SAS_OP_PHY_HARD_RESET : LEAPIORAID_SAS_OP_PHY_LINK_RESET; + mpi_request.PhyNum = phy->number; + if ((leapioraid_base_sas_iounit_control(ioc, &mpi_reply, &mpi_request))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -ENXIO; + } + if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) + pr_info("%s phy(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, + phy->number, + le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo)); + return 0; +} + +static int +leapioraid_transport_phy_enable(struct sas_phy *phy, int enable) +{ + struct LEAPIORAID_ADAPTER *ioc = phy_to_ioc(phy); + struct LeapioraidSasIOUnitP1_t *sas_iounit_pg1 = NULL; + struct LeapioraidSasIOUnitP0_t *sas_iounit_pg0 = NULL; + struct LeapioraidCfgRep_t mpi_reply; + u16 ioc_status; + u16 sz; + int rc = 0; + int i, discovery_active; + + rc = leapioraid_transport_find_parent_node(ioc, phy); + if (rc) + return rc; + if (phy->identify.sas_address != ioc->sas_hba.sas_address) + return leapioraid_transport_expander_phy_control(ioc, phy, + (enable == + 1) ? + LEAPIORAID_SMP_PHY_CONTROL_LINK_RESET + : + LEAPIORAID_SMP_PHY_CONTROL_DISABLE); + sz = offsetof(struct LeapioraidSasIOUnitP0_t, + PhyData) + + (ioc->sas_hba.num_phys * sizeof(struct LEAPIORAID_SAS_IO_UNIT0_PHY_DATA)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENOMEM; + goto out; + } + if ((leapioraid_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -EIO; + goto out; + } + for (i = 0, discovery_active = 0; i < ioc->sas_hba.num_phys; i++) { + if (sas_iounit_pg0->PhyData[i].PortFlags & + LEAPIORAID_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS) { + pr_err( + "%s discovery is active on port = %d, phy = %d:\n\t\t" + "unable to enable/disable phys, try again later!\n", + ioc->name, + sas_iounit_pg0->PhyData[i].Port, + i); + discovery_active = 1; + } + } + if (discovery_active) { + rc = -EAGAIN; + goto out; + } + sz = offsetof(struct LeapioraidSasIOUnitP1_t, + PhyData) + + (ioc->sas_hba.num_phys * sizeof(struct LEAPIORAID_SAS_IO_UNIT1_PHY_DATA)); + sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg1) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENOMEM; + goto out; + } + if ((leapioraid_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -EIO; + goto out; + } + for (i = 0; i < ioc->sas_hba.num_phys; i++) { + sas_iounit_pg1->PhyData[i].Port = + sas_iounit_pg0->PhyData[i].Port; + sas_iounit_pg1->PhyData[i].PortFlags = + (sas_iounit_pg0->PhyData[i].PortFlags & + LEAPIORAID_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG); + sas_iounit_pg1->PhyData[i].PhyFlags = + (sas_iounit_pg0->PhyData[i].PhyFlags & + (LEAPIORAID_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED + + LEAPIORAID_SASIOUNIT0_PHYFLAGS_PHY_DISABLED)); + } + if (enable) + sas_iounit_pg1->PhyData[phy->number].PhyFlags + &= ~LEAPIORAID_SASIOUNIT1_PHYFLAGS_PHY_DISABLE; + else + sas_iounit_pg1->PhyData[phy->number].PhyFlags + |= LEAPIORAID_SASIOUNIT1_PHYFLAGS_PHY_DISABLE; + leapioraid_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, + sz); + if (enable) + leapioraid_transport_phy_reset(phy, 0); +out: + kfree(sas_iounit_pg1); + kfree(sas_iounit_pg0); + return rc; +} + +static int +leapioraid_transport_phy_speed( + struct sas_phy *phy, struct sas_phy_linkrates *rates) +{ + struct LEAPIORAID_ADAPTER *ioc = phy_to_ioc(phy); + struct LeapioraidSasIOUnitP1_t *sas_iounit_pg1 = NULL; + struct LeapioraidSasPhyP0_t phy_pg0; + struct LeapioraidCfgRep_t mpi_reply; + u16 ioc_status; + u16 sz; + int i; + int rc = 0; + + rc = leapioraid_transport_find_parent_node(ioc, phy); + if (rc) + return rc; + if (!rates->minimum_linkrate) + rates->minimum_linkrate = phy->minimum_linkrate; + else if (rates->minimum_linkrate < phy->minimum_linkrate_hw) + rates->minimum_linkrate = phy->minimum_linkrate_hw; + if (!rates->maximum_linkrate) + rates->maximum_linkrate = phy->maximum_linkrate; + else if (rates->maximum_linkrate > phy->maximum_linkrate_hw) + rates->maximum_linkrate = phy->maximum_linkrate_hw; + if (phy->identify.sas_address != ioc->sas_hba.sas_address) { + phy->minimum_linkrate = rates->minimum_linkrate; + phy->maximum_linkrate = rates->maximum_linkrate; + return leapioraid_transport_expander_phy_control(ioc, phy, + LEAPIORAID_SMP_PHY_CONTROL_LINK_RESET); + } + sz = offsetof(struct LeapioraidSasIOUnitP1_t, + PhyData) + + (ioc->sas_hba.num_phys * sizeof(struct LEAPIORAID_SAS_IO_UNIT1_PHY_DATA)); + sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg1) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENOMEM; + goto out; + } + if ((leapioraid_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz))) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; + if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -EIO; + goto out; + } + for (i = 0; i < ioc->sas_hba.num_phys; i++) { + if (phy->number != i) { + sas_iounit_pg1->PhyData[i].MaxMinLinkRate = + (ioc->sas_hba.phy[i].phy->minimum_linkrate + + (ioc->sas_hba.phy[i].phy->maximum_linkrate << 4)); + } else { + sas_iounit_pg1->PhyData[i].MaxMinLinkRate = + (rates->minimum_linkrate + + (rates->maximum_linkrate << 4)); + } + } + if (leapioraid_config_set_sas_iounit_pg1 + (ioc, &mpi_reply, sas_iounit_pg1, sz)) { + pr_err("%s failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + leapioraid_transport_phy_reset(phy, 0); + if (!leapioraid_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, + phy->number)) { + phy->minimum_linkrate = + leapioraid_transport_convert_phy_link_rate( + phy_pg0.ProgrammedLinkRate & + LEAPIORAID_SAS_PRATE_MIN_RATE_MASK); + phy->maximum_linkrate = + leapioraid_transport_convert_phy_link_rate( + phy_pg0.ProgrammedLinkRate >> 4); + phy->negotiated_linkrate = + leapioraid_transport_convert_phy_link_rate( + phy_pg0.NegotiatedLinkRate & + LEAPIORAID_SAS_NEG_LINK_RATE_MASK_PHYSICAL); + } +out: + kfree(sas_iounit_pg1); + return rc; +} + +static int +leapioraid_transport_map_smp_buffer( + struct device *dev, struct bsg_buffer *buf, + dma_addr_t *dma_addr, size_t *dma_len, void **p) +{ + if (buf->sg_cnt > 1) { + *p = dma_alloc_coherent(dev, buf->payload_len, dma_addr, + GFP_KERNEL); + if (!*p) + return -ENOMEM; + *dma_len = buf->payload_len; + } else { + if (!dma_map_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL)) + return -ENOMEM; + *dma_addr = sg_dma_address(buf->sg_list); + *dma_len = sg_dma_len(buf->sg_list); + *p = NULL; + } + return 0; +} + +static void +leapioraid_transport_unmap_smp_buffer( + struct device *dev, struct bsg_buffer *buf, + dma_addr_t dma_addr, void *p) +{ + if (p) + dma_free_coherent(dev, buf->payload_len, p, dma_addr); + else + dma_unmap_sg(dev, buf->sg_list, 1, DMA_BIDIRECTIONAL); +} + +static void +leapioraid_transport_smp_handler( + struct bsg_job *job, struct Scsi_Host *shost, + struct sas_rphy *rphy) +{ + struct LEAPIORAID_ADAPTER *ioc = shost_priv(shost); + struct LeapioraidSmpPassthroughReq_t *mpi_request; + struct LeapioraidSmpPassthroughRep_t *mpi_reply; + int rc; + u16 smid; + u32 ioc_state; + void *psge; + dma_addr_t dma_addr_in; + dma_addr_t dma_addr_out; + void *addr_in = NULL; + void *addr_out = NULL; + size_t dma_len_in; + size_t dma_len_out; + u16 wait_state_count; + unsigned int reslen = 0; + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + pr_info("%s %s: host reset in progress!\n", + __func__, ioc->name); + rc = -EFAULT; + goto job_done; + } + rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex); + if (rc) + goto job_done; + if (ioc->transport_cmds.status != LEAPIORAID_CMD_NOT_USED) { + pr_err("%s %s: transport_cmds in use\n", + ioc->name, __func__); + mutex_unlock(&ioc->transport_cmds.mutex); + rc = -EAGAIN; + goto job_done; + } + ioc->transport_cmds.status = LEAPIORAID_CMD_PENDING; + rc = leapioraid_transport_map_smp_buffer( + &ioc->pdev->dev, &job->request_payload, + &dma_addr_out, &dma_len_out, &addr_out); + if (rc) + goto out; + if (addr_out) { + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, addr_out, + job->request_payload.payload_len); + } + rc = leapioraid_transport_map_smp_buffer( + &ioc->pdev->dev, &job->reply_payload, + &dma_addr_in, &dma_len_in, &addr_in); + if (rc) + goto unmap_out; + wait_state_count = 0; + ioc_state = leapioraid_base_get_iocstate(ioc, 1); + while (ioc_state != LEAPIORAID_IOC_STATE_OPERATIONAL) { + if (wait_state_count++ == 10) { + pr_err( + "%s %s: failed due to ioc not operational\n", + ioc->name, __func__); + rc = -EFAULT; + goto unmap_in; + } + ssleep(1); + ioc_state = leapioraid_base_get_iocstate(ioc, 1); + pr_info( + "%s %s: waiting for operational state(count=%d)\n", + ioc->name, __func__, wait_state_count); + } + if (wait_state_count) + pr_info("%s %s: ioc is operational\n", + ioc->name, __func__); + smid = leapioraid_base_get_smid(ioc, ioc->transport_cb_idx); + if (!smid) { + pr_err("%s %s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto unmap_in; + } + rc = 0; + mpi_request = leapioraid_base_get_msg_frame(ioc, smid); + ioc->transport_cmds.smid = smid; + memset(mpi_request, 0, sizeof(struct LeapioraidSmpPassthroughReq_t)); + mpi_request->Function = LEAPIORAID_FUNC_SMP_PASSTHROUGH; + mpi_request->PhysicalPort = leapioraid_transport_get_port_id_by_rphy( + ioc, rphy); + mpi_request->SASAddress = (rphy) ? + cpu_to_le64(rphy->identify.sas_address) : + cpu_to_le64(ioc->sas_hba.sas_address); + mpi_request->RequestDataLength = cpu_to_le16(dma_len_out - 4); + psge = &mpi_request->SGL; + ioc->build_sg(ioc, psge, dma_addr_out, dma_len_out - 4, dma_addr_in, + dma_len_in - 4); + dtransportprintk(ioc, pr_info( + "%s %s - sending smp request\n", ioc->name, + __func__)); + init_completion(&ioc->transport_cmds.done); + ioc->put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->transport_cmds.done, 10 * HZ); + if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_COMPLETE)) { + pr_err("%s %s : timeout\n", __func__, ioc->name); + leapioraid_debug_dump_mf(mpi_request, + sizeof(struct LeapioraidSmpPassthroughReq_t) / 4); + if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_RESET)) { + leapioraid_base_hard_reset_handler(ioc, + FORCE_BIG_HAMMER); + rc = -ETIMEDOUT; + goto unmap_in; + } + } + dtransportprintk(ioc, pr_info( + "%s %s - complete\n", ioc->name, __func__)); + if (!(ioc->transport_cmds.status & LEAPIORAID_CMD_REPLY_VALID)) { + dtransportprintk(ioc, pr_info( + "%s %s - no reply\n", ioc->name, + __func__)); + rc = -ENXIO; + goto unmap_in; + } + mpi_reply = ioc->transport_cmds.reply; + dtransportprintk(ioc, + pr_info( + "%s %s - reply data transfer size(%d)\n", + ioc->name, __func__, + le16_to_cpu(mpi_reply->ResponseDataLength))); + memcpy(job->reply, mpi_reply, sizeof(*mpi_reply)); + job->reply_len = sizeof(*mpi_reply); + reslen = le16_to_cpu(mpi_reply->ResponseDataLength); + if (addr_in) { + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, addr_in, + job->reply_payload.payload_len); + } + rc = 0; +unmap_in: + leapioraid_transport_unmap_smp_buffer( + &ioc->pdev->dev, &job->reply_payload, + dma_addr_in, addr_in); +unmap_out: + leapioraid_transport_unmap_smp_buffer( + &ioc->pdev->dev, &job->request_payload, + dma_addr_out, addr_out); +out: + ioc->transport_cmds.status = LEAPIORAID_CMD_NOT_USED; + mutex_unlock(&ioc->transport_cmds.mutex); +job_done: + bsg_job_done(job, rc, reslen); +} + +struct sas_function_template leapioraid_transport_functions = { + .get_linkerrors = leapioraid_transport_get_linkerrors, + .get_enclosure_identifier = leapioraid_transport_get_enclosure_identifier, + .get_bay_identifier = leapioraid_transport_get_bay_identifier, + .phy_reset = leapioraid_transport_phy_reset, + .phy_enable = leapioraid_transport_phy_enable, + .set_phy_speed = leapioraid_transport_phy_speed, + .smp_handler = leapioraid_transport_smp_handler, +}; + +struct scsi_transport_template *leapioraid_transport_template; -- Gitee From 191469ee06fcf778e0c6d805acfc3f6f5b39663b Mon Sep 17 00:00:00 2001 From: Weilin Tong Date: Wed, 18 Dec 2024 11:08:49 +0800 Subject: [PATCH 1975/2138] anolis: mm: modify default CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE ANBZ: #12438 In systems with large memory environments, kmemleak has been failing to allocate sufficient memory from its pool, leading to memory leak detection issues. To address this, the default value of CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE has been increased from its previous setting to 32000. Signed-off-by: Weilin Tong Reviewed-by: Baolin Wang Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/4245 --- anolis/configs/custom-overrides/debug/default.config | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/anolis/configs/custom-overrides/debug/default.config b/anolis/configs/custom-overrides/debug/default.config index f0c9f36c8058..d45df92ffa62 100644 --- a/anolis/configs/custom-overrides/debug/default.config +++ b/anolis/configs/custom-overrides/debug/default.config @@ -64,7 +64,7 @@ CONFIG_CONSTRUCTORS=y CONFIG_DEBUG_ATOMIC_SLEEP=y CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN=y CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y -CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE=16000 +CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE=32000 # CONFIG_DEBUG_KMEMLEAK_TEST is not set # CONFIG_DEBUG_KOBJECT_RELEASE is not set CONFIG_DEBUG_LOCK_ALLOC=y -- Gitee From 52c4b0ecb59a2be45da6aeb614901bf0c5258877 Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Wed, 18 Dec 2024 17:27:08 +0800 Subject: [PATCH 1976/2138] anolis: arch/loongarch/kvm: Fix extioi restart issue ANBZ: #12451 When the virtual machine is restarted, the data in extioi is not zeroed, and there is a residual set interrupt bit, resulting in a hang Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/4248 Reviewed-by: Juxin Gao --- arch/loongarch/include/asm/kvm_extioi.h | 1 + arch/loongarch/kvm/intc/extioi.c | 21 +++++++++++++++++++++ arch/loongarch/kvm/vcpu.c | 2 ++ 3 files changed, 24 insertions(+) diff --git a/arch/loongarch/include/asm/kvm_extioi.h b/arch/loongarch/include/asm/kvm_extioi.h index d2af039a7d6f..c2bd295d0edc 100644 --- a/arch/loongarch/include/asm/kvm_extioi.h +++ b/arch/loongarch/include/asm/kvm_extioi.h @@ -92,4 +92,5 @@ struct loongarch_extioi { void extioi_set_irq(struct loongarch_extioi *s, int irq, int level); int kvm_loongarch_register_extioi_device(void); +int kvm_loongarch_reset_extioi(struct kvm *kvm); #endif /* LOONGARCH_EXTIOI_H */ diff --git a/arch/loongarch/kvm/intc/extioi.c b/arch/loongarch/kvm/intc/extioi.c index 48141823aaa3..5327066f16ae 100644 --- a/arch/loongarch/kvm/intc/extioi.c +++ b/arch/loongarch/kvm/intc/extioi.c @@ -781,3 +781,24 @@ int kvm_loongarch_register_extioi_device(void) return kvm_register_device_ops(&kvm_loongarch_extioi_dev_ops, KVM_DEV_TYPE_LA_EXTIOI); } + +int kvm_loongarch_reset_extioi(struct kvm *kvm) +{ + struct loongarch_extioi *extioi = kvm->arch.extioi; + unsigned long flags; + u8 offset, size; + u8 *pstart; + + if (!extioi) + return -EINVAL; + + pstart = (char *)&extioi->nodetype; + offset = (char *)&extioi->nodetype - (char *)extioi; + size = sizeof(struct loongarch_extioi) - offset; + + loongarch_ext_irq_lock(extioi, flags); + memset(pstart, 0, size); + loongarch_ext_irq_unlock(extioi, flags); + + return 0; +} diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index e9b397543fdf..50bd40d36eb9 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -870,6 +870,8 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu, break; case KVM_REG_LOONGARCH_VCPU_RESET: vcpu->arch.st.guest_addr = 0; + if (vcpu->vcpu_id == 0) + kvm_loongarch_reset_extioi(vcpu->kvm); memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending)); memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear)); break; -- Gitee From bad54c72a457ae3a0a3f0343f47f2c84969680c1 Mon Sep 17 00:00:00 2001 From: qiuzhiteng Date: Fri, 13 Dec 2024 13:55:07 +0800 Subject: [PATCH 1977/2138] anolis: x86/mm: Enhanced Hygon processor's processing capabilities for large memory copying. ANBZ: #12183 The following methods are used to improve the large memory copy performance of the Hygon processor between kernel and user mode. Prefetch is a technique for reading blocks of data from main memory at very high data rates, then operating on them within the cache. Results are then written out to memory, all with high efficiency. The code can employ a very special instruction: NT. This is a streaming store instruction for writing data to memory. This instruction bypasses the on-chip cache and sends data directly into a write-combining buffer. Because NT allows the CPU to avoid reading the old data from the memory destination address, NT can effectively improve the total write bandwidth. There are similar optimizations for reading data from memory. Interruptions may occur when copying large memory, which may trigger thread switching. You need to save the current MMX register context and continue copying when switching back to the thread next time. Signed-off-by: zhuchao Signed-off-by: qiuzhiteng Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4223 --- arch/x86/Kconfig | 1 + arch/x86/Kconfig.fpu | 62 ++++++ arch/x86/include/asm/fpu/api.h | 37 ++++ arch/x86/include/asm/fpu/sched.h | 58 +++++ arch/x86/include/asm/thread_info.h | 1 + arch/x86/include/asm/uaccess_64.h | 84 ++++++++ arch/x86/kernel/cpu/common.c | 15 ++ arch/x86/kernel/cpu/hygon.c | 89 ++++++++ arch/x86/kernel/fpu/core.c | 108 ++++++++++ arch/x86/kernel/fpu/init.c | 11 + arch/x86/kernel/fpu/xstate.c | 9 + arch/x86/kernel/process_64.c | 8 + arch/x86/lib/Makefile | 2 + arch/x86/lib/copy_user_avx2.S | 335 +++++++++++++++++++++++++++++ arch/x86/lib/copy_user_sse2.S | 255 ++++++++++++++++++++++ 15 files changed, 1075 insertions(+) create mode 100644 arch/x86/Kconfig.fpu create mode 100644 arch/x86/lib/copy_user_avx2.S create mode 100644 arch/x86/lib/copy_user_sse2.S diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 1936bbf7d2d1..17099e117c55 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -900,6 +900,7 @@ config INTEL_TDX_GUEST endif # HYPERVISOR_GUEST source "arch/x86/Kconfig.cpu" +source "arch/x86/Kconfig.fpu" config HPET_TIMER def_bool X86_64 diff --git a/arch/x86/Kconfig.fpu b/arch/x86/Kconfig.fpu new file mode 100644 index 000000000000..04a235105186 --- /dev/null +++ b/arch/x86/Kconfig.fpu @@ -0,0 +1,62 @@ +# SPDX-License-Identifier: GPL-2.0 + +menuconfig USING_FPU_IN_KERNEL_NONATOMIC + bool "Hygon large memory copy support" + help + This option enables support for optimized large memory copy operations + on Hygon processors in the kernel space using SSE2 or AVX2 non-temporal (NT) + copy instructions. NT instructions are streaming store instructions that bypass + the on-chip cache and send data directly to a write-combining buffer. + + When this option is enabled, you can choose the specific instruction set to use + for large memory copy: SSE2 or AVX2. Using these instruction sets can improve data + throughput and reduce the number of cache misses during memory copy operations. + +if USING_FPU_IN_KERNEL_NONATOMIC + +choice + prompt "X86_HYGON_LMC" + depends on X86_64 && CPU_SUP_HYGON + default X86_HYGON_LMC_SSE2_ON + help + Select the type of non-temporal (NT) copy instructions to use for + large memory copy operations between kernel and user mode. You can + choose between SSE2 or AVX2 instructions based on the processor + capabilities and the size of the memory being copied. + + To use this feature, you also need to configure the data copy size. + The file is in `/sys/c86_features/hygon_c86/nt_cpy_mini_len`. Please + refer to configuration 4096 and above. + +config X86_HYGON_LMC_SSE2_ON + bool "Using sse2 nt copy for large memory copy" + help + When this feature is enabled, the kernel will use the + copy_user_sse2_opt_string function for large memory copy operations. + + SSE2 (Streaming SIMD Extensions 2) instructions support non-temporal + (NT) stores that bypass the CPU cache and write data directly to + memory. This can improve performance for large memory copies by reducing + cache pollution and taking advantage of the write-combining buffer. + + However, using SSE2 NT copy may require saving and restoring MMX and + SSE2 register contexts during thread switching if an interruption occurs. + +config X86_HYGON_LMC_AVX2_ON + bool "Using avx2 nt copy for large memory copy" + help + When this feature is enabled, the kernel will use the + copy_user_avx2_pf64_nt_string function for large memory copy operations. + + AVX2 (Advanced Vector Extensions 2) instructions provide enhanced + vector processing capabilities and support for non-temporal (NT) stores, + which can significantly improve memory copy performance for large blocks + of data. By bypassing the cache and writing data directly to memory, + AVX2 NT copy can achieve higher throughput than SSE2 NT copy. + + Similar to SSE2, using AVX2 NT copy may require saving and restoring + AVX2 register contexts if an interruption occurs during large memory + copying, to ensure the process continues smoothly after resuming. + +endchoice +endif diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h index a2be3aefff9f..c7c3074f383b 100644 --- a/arch/x86/include/asm/fpu/api.h +++ b/arch/x86/include/asm/fpu/api.h @@ -49,6 +49,43 @@ static inline void kernel_fpu_begin(void) #endif } +#if defined(CONFIG_X86_HYGON_LMC_SSE2_ON) || \ + defined(CONFIG_X86_HYGON_LMC_AVX2_ON) +extern int kernel_fpu_begin_nonatomic_mask(unsigned int kfpu_mask); +extern void kernel_fpu_end_nonatomic(void); + +/* Code that is unaware of kernel_fpu_begin_nonatomic_mask() can use this */ +static inline int kernel_fpu_begin_nonatomic(void) +{ +#ifdef CONFIG_X86_64 + /* + * Any 64-bit code that uses 387 instructions must explicitly request + * KFPU_387. + */ + return kernel_fpu_begin_nonatomic_mask(KFPU_MXCSR); +#else + /* + * 32-bit kernel code may use 387 operations as well as SSE2, etc, + * as long as it checks that the CPU has the required capability. + */ + return kernel_fpu_begin_nonatomic_mask(KFPU_387 | KFPU_MXCSR); +#endif +} + +/* + * It means we call kernel_fpu_end after kernel_fpu_begin_nonatomic + * func, but before kernel_fpu_end_nonatomic + */ +static inline void check_using_kernel_fpu(void) +{ + WARN_ON_ONCE(test_thread_flag(TIF_USING_FPU_NONATOMIC)); +} + +#else +static inline void check_using_kernel_fpu(void) { } + +#endif + /* * Use fpregs_lock() while editing CPU's FPU registers or fpu->fpstate. * A context switch will (and softirq might) save CPU's FPU registers to diff --git a/arch/x86/include/asm/fpu/sched.h b/arch/x86/include/asm/fpu/sched.h index ca6e5e5f16b2..0c02a89b9f88 100644 --- a/arch/x86/include/asm/fpu/sched.h +++ b/arch/x86/include/asm/fpu/sched.h @@ -66,4 +66,62 @@ static inline void switch_fpu_finish(void) set_thread_flag(TIF_NEED_FPU_LOAD); } +/* + * Kernel FPU state switching for scheduling. + * + * This is a two-stage process: + * + * - switch_kernel_fpu_prepare() saves the old kernel fpu state. + * This is done within the context of the old process. + * + * - switch_kernel_fpu_finish() restore new kernel fpu state. + * + * The kernel FPU context is only stored/restored for a user task in kernel + * mode and PF_KTHREAD is used to distinguish between kernel and user threads. + */ +#if defined(CONFIG_X86_HYGON_LMC_SSE2_ON) || \ + defined(CONFIG_X86_HYGON_LMC_AVX2_ON) +extern void save_fpregs_to_fpkernelstate(struct fpu *kfpu); +extern unsigned long get_fpu_registers_pos(struct fpu *fpu, unsigned int off); +static inline void switch_kernel_fpu_prepare(struct task_struct *prev, int cpu) +{ + struct fpu *old_fpu = &prev->thread.fpu; + + if (!test_thread_flag(TIF_USING_FPU_NONATOMIC)) + return; + + if (static_cpu_has(X86_FEATURE_FPU) && !(prev->flags & PF_KTHREAD)) + save_fpregs_to_fpkernelstate(old_fpu); +} + +/* Internal helper for switch_kernel_fpu_finish() and signal frame setup */ +static inline void fpregs_restore_kernelregs(struct fpu *kfpu) +{ + kernel_fpu_states_restore(NULL, (void *)get_fpu_registers_pos(kfpu, MAX_FPU_CTX_SIZE), + MAX_FPU_CTX_SIZE); +} + +/* Loading of the complete FPU state immediately. */ +static inline void switch_kernel_fpu_finish(struct task_struct *next) +{ + struct fpu *new_fpu = &next->thread.fpu; + + if (next->flags & PF_KTHREAD) + return; + + if (cpu_feature_enabled(X86_FEATURE_FPU) && + test_ti_thread_flag((struct thread_info *)next, + TIF_USING_FPU_NONATOMIC)) + fpregs_restore_kernelregs(new_fpu); +} +#else +static inline void switch_kernel_fpu_prepare(struct task_struct *prev, int cpu) +{ +} +static inline void switch_kernel_fpu_finish(struct task_struct *next) +{ +} + +#endif + #endif /* _ASM_X86_FPU_SCHED_H */ diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 2c6554052ace..1c62f52a6ae8 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -102,6 +102,7 @@ struct thread_info { #define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */ #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */ #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */ +#define TIF_USING_FPU_NONATOMIC 30 /* using fpu in kernel non-atomic context */ #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index f2c02e4469cc..e42507ec4b1d 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -11,6 +11,12 @@ #include #include #include +#if defined(CONFIG_X86_HYGON_LMC_SSE2_ON) || \ + defined(CONFIG_X86_HYGON_LMC_AVX2_ON) +#include +#endif + +extern struct static_key_false hygon_lmc_key; #ifdef CONFIG_ADDRESS_MASKING /* @@ -97,6 +103,74 @@ static inline bool __access_ok(const void __user *ptr, unsigned long size) * Copy To/From Userspace */ +#ifdef CONFIG_X86_HYGON_LMC_SSE2_ON +void fpu_save_xmm0_3(void *to, const void *from, unsigned long len); +void fpu_restore_xmm0_3(void *to, const void *from, unsigned long len); + +#define kernel_fpu_states_save fpu_save_xmm0_3 +#define kernel_fpu_states_restore fpu_restore_xmm0_3 + +__must_check unsigned long copy_user_sse2_opt_string(void *to, const void *from, + unsigned long len); + +#define MAX_FPU_CTX_SIZE 64 +#define KERNEL_FPU_NONATOMIC_SIZE (2 * (MAX_FPU_CTX_SIZE)) + +#define copy_user_large_memory_generic_string copy_user_sse2_opt_string + +#endif + +#ifdef CONFIG_X86_HYGON_LMC_AVX2_ON +void fpu_save_ymm0_7(void *to, const void *from, unsigned long len); +void fpu_restore_ymm0_7(void *to, const void *from, unsigned long len); + +#define kernel_fpu_states_save fpu_save_ymm0_7 +#define kernel_fpu_states_restore fpu_restore_ymm0_7 + +__must_check unsigned long +copy_user_avx2_pf64_nt_string(void *to, const void *from, unsigned long len); + +#define MAX_FPU_CTX_SIZE 256 +#define KERNEL_FPU_NONATOMIC_SIZE (2 * (MAX_FPU_CTX_SIZE)) + +#define copy_user_large_memory_generic_string copy_user_avx2_pf64_nt_string +#endif + +#if defined(CONFIG_X86_HYGON_LMC_SSE2_ON) || \ + defined(CONFIG_X86_HYGON_LMC_AVX2_ON) +unsigned int get_nt_block_copy_mini_len(void); +static inline bool Hygon_LMC_check(unsigned long len) +{ + unsigned int nt_blk_cpy_mini_len = get_nt_block_copy_mini_len(); + + if (((nt_blk_cpy_mini_len) && (nt_blk_cpy_mini_len <= len) && + (system_state == SYSTEM_RUNNING) && + (!kernel_fpu_begin_nonatomic()))) + return true; + else + return false; +} +static inline unsigned long +copy_large_memory_generic_string(void *to, const void *from, unsigned long len) +{ + unsigned long ret; + + ret = copy_user_large_memory_generic_string(to, from, len); + kernel_fpu_end_nonatomic(); + return ret; +} +#else +static inline bool Hygon_LMC_check(unsigned long len) +{ + return false; +} +static inline unsigned long +copy_large_memory_generic_string(void *to, const void *from, unsigned long len) +{ + return 0; +} +#endif + /* Handles exceptions in both to and from, but doesn't do access_ok */ __must_check unsigned long rep_movs_alternative(void *to, const void *from, unsigned len); @@ -104,6 +178,16 @@ rep_movs_alternative(void *to, const void *from, unsigned len); static __always_inline __must_check unsigned long copy_user_generic(void *to, const void *from, unsigned long len) { + /* Check if Hygon large memory copy support enabled. */ + if (static_branch_unlikely(&hygon_lmc_key)) { + if (Hygon_LMC_check(len)) { + unsigned long ret; + + ret = copy_large_memory_generic_string(to, from, len); + return ret; + } + } + stac(); /* * If CPU has FSRM feature, use 'rep movs'. diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index a844110691f9..8bb0d2bd8f2c 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -86,6 +86,9 @@ EXPORT_SYMBOL_GPL(get_llc_id); /* L2 cache ID of each logical CPU */ DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_l2c_id) = BAD_APICID; +DEFINE_STATIC_KEY_FALSE(hygon_lmc_key); +EXPORT_SYMBOL_GPL(hygon_lmc_key); + static struct ppin_info { int feature; int msr_ppin_ctl; @@ -2398,6 +2401,17 @@ void arch_smt_update(void) apic_smt_update(); } +#if defined(CONFIG_X86_HYGON_LMC_SSE2_ON) || \ + defined(CONFIG_X86_HYGON_LMC_AVX2_ON) +static inline void update_lmc_branch_cond(void) +{ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + static_branch_enable(&hygon_lmc_key); +} +#else +static inline void update_lmc_branch_cond(void) { } +#endif + void __init arch_cpu_finalize_init(void) { identify_boot_cpu(); @@ -2416,6 +2430,7 @@ void __init arch_cpu_finalize_init(void) cpu_select_mitigations(); arch_smt_update(); + update_lmc_branch_cond(); if (IS_ENABLED(CONFIG_X86_32)) { /* diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index 4b90eeb5110d..b3b26ded3bc9 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -15,6 +15,9 @@ #include #include #include +#include +#include +#include #include #include "cpu.h" @@ -464,3 +467,89 @@ static const struct cpu_dev hygon_cpu_dev = { }; cpu_dev_register(hygon_cpu_dev); + +#if defined(CONFIG_X86_HYGON_LMC_SSE2_ON) || \ + defined(CONFIG_X86_HYGON_LMC_AVX2_ON) +struct hygon_c86_info { + unsigned int nt_cpy_mini_len; +}; + +static struct hygon_c86_info hygon_c86_data = { .nt_cpy_mini_len = 0 }; + +void set_c86_features_para_invalid(void) +{ + memset((void *)&hygon_c86_data, 0, sizeof(struct hygon_c86_info)); +} + +unsigned int get_nt_block_copy_mini_len(void) +{ + unsigned int mini_len = hygon_c86_data.nt_cpy_mini_len; + + return mini_len; +} +EXPORT_SYMBOL_GPL(get_nt_block_copy_mini_len); + +static ssize_t show_nt_cpy_mini_len(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return snprintf(buf, 40, "%d\n", hygon_c86_data.nt_cpy_mini_len); +} + +static ssize_t store_nt_cpy_mini_len(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + unsigned long val; + ssize_t ret; + + ret = kstrtoul(buf, 0, &val); + if (ret) + return ret; + + hygon_c86_data.nt_cpy_mini_len = val; + + return count; +} + +static struct kobj_attribute nt_cpy_mini_len_attribute = __ATTR( + nt_cpy_mini_len, 0600, show_nt_cpy_mini_len, store_nt_cpy_mini_len); + +static struct attribute *c86_default_attrs[] = { + &nt_cpy_mini_len_attribute.attr, NULL +}; + +const struct attribute_group hygon_c86_attr_group = { + .attrs = c86_default_attrs, + .name = "hygon_c86", +}; + +static struct kobject *c86_features_kobj; +static int __init kobject_hygon_c86_init(void) +{ + int ret; + + if (boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) + goto err_out; + + c86_features_kobj = kobject_create_and_add("c86_features", NULL); + + if (c86_features_kobj) { + ret = sysfs_create_group(c86_features_kobj, + &hygon_c86_attr_group); + if (ret) + goto err_out; + } + + return 0; +err_out: + set_c86_features_para_invalid(); + if (c86_features_kobj) { + sysfs_remove_group(c86_features_kobj, &hygon_c86_attr_group); + kobject_del(c86_features_kobj); + } + + return -1; +} +subsys_initcall(kobject_hygon_c86_init); + +#endif diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 4b414b0ab069..510570b569e9 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -33,6 +33,8 @@ DEFINE_STATIC_KEY_FALSE(__fpu_state_size_dynamic); DEFINE_PER_CPU(u64, xfd_state); #endif +extern struct static_key_false hygon_lmc_key; + /* The FPU state configuration data for kernel and user space */ struct fpu_state_config fpu_kernel_cfg __ro_after_init; struct fpu_state_config fpu_user_cfg __ro_after_init; @@ -421,6 +423,8 @@ EXPORT_SYMBOL_GPL(fpu_copy_uabi_to_guest_fpstate); void kernel_fpu_begin_mask(unsigned int kfpu_mask) { preempt_disable(); + if (static_branch_unlikely(&hygon_lmc_key)) + check_using_kernel_fpu(); WARN_ON_FPU(!irq_fpu_usable()); WARN_ON_FPU(this_cpu_read(in_kernel_fpu)); @@ -445,6 +449,9 @@ EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask); void kernel_fpu_end(void) { + if (static_branch_unlikely(&hygon_lmc_key)) + check_using_kernel_fpu(); + WARN_ON_FPU(!this_cpu_read(in_kernel_fpu)); this_cpu_write(in_kernel_fpu, false); @@ -452,6 +459,107 @@ void kernel_fpu_end(void) } EXPORT_SYMBOL_GPL(kernel_fpu_end); +#if defined(CONFIG_X86_HYGON_LMC_SSE2_ON) || \ + defined(CONFIG_X86_HYGON_LMC_AVX2_ON) + +extern unsigned int fpu_kernel_nonatomic_xstate_size; +unsigned int get_fpustate_free_space(struct fpu *fpu) +{ + if ((fpu_kernel_cfg.default_size + fpu_kernel_nonatomic_xstate_size) > + sizeof(fpu->fpstate->regs)) + return 0; + return fpu_kernel_nonatomic_xstate_size; +} + +unsigned long get_fpu_registers_pos(struct fpu *fpu, unsigned int off) +{ + unsigned long addr = 0; + + if (fpu && (fpu_kernel_nonatomic_xstate_size > off)) { + addr = (unsigned long)&fpu->__fpstate.regs.__padding[0]; + addr += fpu_kernel_cfg.default_size + off; + } + return addr; +} + +/* + * We can call kernel_fpu_begin_nonatomic in non-atomic task context. + */ +int kernel_fpu_begin_nonatomic_mask(unsigned int kfpu_mask) +{ + preempt_disable(); + /* we not support Nested call */ + if (test_thread_flag(TIF_USING_FPU_NONATOMIC)) + goto err; + + if (KERNEL_FPU_NONATOMIC_SIZE > + get_fpustate_free_space(¤t->thread.fpu)) + goto err; + + /* + * This means we call kernel_fpu_begin_nonatomic after kernel_fpu_begin, + * but before kernel_fpu_end. + */ + if (this_cpu_read(in_kernel_fpu)) + goto err; + + if (in_interrupt()) + goto err; + + if (current->flags & PF_KTHREAD) + goto err; + + if (!test_thread_flag(TIF_NEED_FPU_LOAD)) { + set_thread_flag(TIF_NEED_FPU_LOAD); + save_fpregs_to_fpstate(¤t->thread.fpu); + } + /* Set thread flag: TIC_USING_FPU_NONATOMIC */ + set_thread_flag(TIF_USING_FPU_NONATOMIC); + + __cpu_invalidate_fpregs_state(); + + /* Put sane initial values into the control registers. */ + if (likely(kfpu_mask & KFPU_MXCSR) && boot_cpu_has(X86_FEATURE_XMM)) + ldmxcsr(MXCSR_DEFAULT); + + if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU)) + asm volatile ("fninit"); + + preempt_enable(); + + return 0; + +err: + preempt_enable(); + + return -1; +} +EXPORT_SYMBOL_GPL(kernel_fpu_begin_nonatomic_mask); + +void kernel_fpu_end_nonatomic(void) +{ + preempt_disable(); + /* + * This means we call kernel_fpu_end_nonatomic after kernel_fpu_begin, + * but before kernel_fpu_end. + */ + WARN_ON_FPU(this_cpu_read(in_kernel_fpu)); + + WARN_ON_FPU(!test_thread_flag(TIF_USING_FPU_NONATOMIC)); + + clear_thread_flag(TIF_USING_FPU_NONATOMIC); + preempt_enable(); +} +EXPORT_SYMBOL_GPL(kernel_fpu_end_nonatomic); + +void save_fpregs_to_fpkernelstate(struct fpu *kfpu) +{ + kernel_fpu_states_save((void *)get_fpu_registers_pos(kfpu, + MAX_FPU_CTX_SIZE), + NULL, MAX_FPU_CTX_SIZE); +} +#endif + /* * Sync the FPU register state to current's memory register state when the * current task owns the FPU. The hardware register state is preserved. diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c index 998a08f17e33..e55abf20aa18 100644 --- a/arch/x86/kernel/fpu/init.c +++ b/arch/x86/kernel/fpu/init.c @@ -133,6 +133,12 @@ static void __init fpu__init_system_generic(void) fpu__init_system_mxcsr(); } +#if defined(CONFIG_X86_HYGON_LMC_SSE2_ON) || \ + defined(CONFIG_X86_HYGON_LMC_AVX2_ON) +unsigned int fpu_kernel_nonatomic_xstate_size; +EXPORT_SYMBOL_GPL(fpu_kernel_nonatomic_xstate_size); +#endif + /* * Enforce that 'MEMBER' is the last field of 'TYPE'. * @@ -161,6 +167,11 @@ static void __init fpu__init_task_struct_size(void) * size. */ task_size += fpu_kernel_cfg.default_size; +#if defined(CONFIG_X86_HYGON_LMC_SSE2_ON) || \ + defined(CONFIG_X86_HYGON_LMC_AVX2_ON) + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + task_size += fpu_kernel_nonatomic_xstate_size; +#endif /* * We dynamically size 'struct fpu', so we require that diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 255ff8f6c527..c1ebfe4b4a8d 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -677,6 +677,11 @@ static unsigned int __init get_xsave_size_user(void) static int __init init_xstate_size(void) { +#if defined(CONFIG_X86_HYGON_LMC_SSE2_ON) || \ + defined(CONFIG_X86_HYGON_LMC_AVX2_ON) + extern unsigned int fpu_kernel_nonatomic_xstate_size; +#endif + /* Recompute the context size for enabled features: */ unsigned int user_size, kernel_size, kernel_default_size; bool compacted = cpu_feature_enabled(X86_FEATURE_XCOMPACTED); @@ -710,6 +715,10 @@ static int __init init_xstate_size(void) fpu_user_cfg.default_size = xstate_calculate_size(fpu_user_cfg.default_features, false); +#if defined(CONFIG_X86_HYGON_LMC_SSE2_ON) || \ + defined(CONFIG_X86_HYGON_LMC_AVX2_ON) + fpu_kernel_nonatomic_xstate_size = KERNEL_FPU_NONATOMIC_SIZE; +#endif return 0; } diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index fa04091e2f4e..fbadfa84a02d 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -63,6 +63,8 @@ #include "process.h" +extern struct static_key_false hygon_lmc_key; + /* Prints also some state that isn't saved in the pt_regs */ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode, const char *log_lvl) @@ -571,6 +573,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) if (!test_thread_flag(TIF_NEED_FPU_LOAD)) switch_fpu_prepare(prev_fpu, cpu); + if (static_branch_unlikely(&hygon_lmc_key)) + switch_kernel_fpu_prepare(prev_p, cpu); + /* We must save %fs and %gs before load_TLS() because * %fs and %gs may be cleared by load_TLS(). * @@ -625,6 +630,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) switch_fpu_finish(); + if (static_branch_unlikely(&hygon_lmc_key)) + switch_kernel_fpu_finish(next_p); + /* Reload sp0. */ update_task_stack(next_p); diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index f0dae4fb6d07..b6a951dd3439 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -60,5 +60,7 @@ endif lib-y += clear_page_64.o copy_page_64.o lib-y += memmove_64.o memset_64.o lib-y += copy_user_64.o copy_user_uncached_64.o + lib-$(CONFIG_X86_HYGON_LMC_SSE2_ON) += copy_user_sse2.o + lib-$(CONFIG_X86_HYGON_LMC_AVX2_ON) += copy_user_avx2.o lib-y += cmpxchg16b_emu.o endif diff --git a/arch/x86/lib/copy_user_avx2.S b/arch/x86/lib/copy_user_avx2.S new file mode 100644 index 000000000000..a2a785aaccb2 --- /dev/null +++ b/arch/x86/lib/copy_user_avx2.S @@ -0,0 +1,335 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright © 2011 Siarhei Siamashka + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PREFETCH_DISTANCE 64 + +#define PREFETCH(addr) prefetchnta addr + +.macro ALIGN_DESTINATION_32 + /* check for bad alignment of destination, there is 32Bytes, for we will use vmovntdq */ + /* if <32Bytes, jb .Lcopy_user_string */ + cmpq $32, %rdx + jb .Lcopy_user_string + + /* + * Adjust unaligned destination addresses, for 32-bit aligned ones, + * only the lower 32 bits need to be checked. + */ + movl %edi, %ecx + andl $31, %ecx + jz .Lcopy_user_string /* already aligned */ + + subl $32, %ecx + negl %ecx + subl %ecx, %edx + +300: + movb (%rsi), %al +301: + movb %al, (%rdi) + incq %rsi + incq %rdi + decl %ecx + jnz 300b + jmp .Lcopy_user_string + +303: + addl %ecx,%edx/* ecx is zerorest also */ + jmp Lavx2_copy_user_handle_tail + + _ASM_EXTABLE_UA(300b, 303b) + _ASM_EXTABLE_UA(301b, 303b) + +.Lcopy_user_string: +.endm + +/* + * large block copy, use avx2 nt & prefetchnta + */ +SYM_FUNC_START(copy_user_avx2_pf64_nt_string) + ASM_STAC + ALIGN_DESTINATION_32 + + /* if len < 256 jmp to Lless_than_256_bytes_cpy */ + cmpq $256, %rdx + jb Lless_than_256_bytes_cpy + + /* + * Check if src is aligned, for 32-bit aligned ones, + * only the lower 32 bits need to be checked. + */ + movl %esi, %ecx /* check if src is aligned */ + andl $31, %ecx + jnz large_block_nt_unaligned_cpy + +large_block_nt_aligned_cpy: + PREFETCH(PREFETCH_DISTANCE(%rsi)) + PREFETCH((PREFETCH_DISTANCE + 64)(%rsi)) + PREFETCH((PREFETCH_DISTANCE + 128)(%rsi)) + PREFETCH((PREFETCH_DISTANCE + 192)(%rsi)) + PREFETCH((PREFETCH_DISTANCE + 256)(%rsi)) + +32: + vmovdqa 0(%rsi), %ymm0 +33: + vmovdqa 32(%rsi), %ymm1 +34: + vmovdqa 64(%rsi), %ymm2 +35: + vmovdqa 96(%rsi), %ymm3 +36: + vmovdqa 128(%rsi), %ymm4 +37: + vmovdqa 160(%rsi), %ymm5 +38: + vmovdqa 192(%rsi), %ymm6 +39: + vmovdqa 224(%rsi), %ymm7 + +40: + vmovntdq %ymm0, 0(%rdi) +41: + vmovntdq %ymm1, 32(%rdi) +42: + vmovntdq %ymm2, 64(%rdi) +43: + vmovntdq %ymm3, 96(%rdi) +44: + vmovntdq %ymm4, 128(%rdi) +45: + vmovntdq %ymm5, 160(%rdi) +46: + vmovntdq %ymm6, 192(%rdi) +47: + vmovntdq %ymm7, 224(%rdi) + + add $256, %rsi + add $256, %rdi + subq $256, %rdx + cmpq $256, %rdx + jg large_block_nt_aligned_cpy + + vzeroupper + sfence + jmp Lless_than_256_bytes_cpy + +large_block_nt_unaligned_cpy: + PREFETCH(PREFETCH_DISTANCE(%rsi)) + PREFETCH((PREFETCH_DISTANCE + 64)(%rsi)) + PREFETCH((PREFETCH_DISTANCE + 128)(%rsi)) + PREFETCH((PREFETCH_DISTANCE + 192)(%rsi)) + PREFETCH((PREFETCH_DISTANCE + 256)(%rsi)) + +48: + vmovdqu 0(%rsi), %ymm0 +49: + vmovdqu 32(%rsi), %ymm1 +50: + vmovdqu 64(%rsi), %ymm2 +51: + vmovdqu 96(%rsi), %ymm3 +52: + vmovdqu 128(%rsi), %ymm4 +53: + vmovdqu 160(%rsi), %ymm5 +54: + vmovdqu 192(%rsi), %ymm6 +55: + vmovdqu 224(%rsi), %ymm7 + +56: + vmovntdq %ymm0, 0(%rdi) +57: + vmovntdq %ymm1, 32(%rdi) +58: + vmovntdq %ymm2, 64(%rdi) +59: + vmovntdq %ymm3, 96(%rdi) +60: + vmovntdq %ymm4, 128(%rdi) +61: + vmovntdq %ymm5, 160(%rdi) +62: + vmovntdq %ymm6, 192(%rdi) +63: + vmovntdq %ymm7, 224(%rdi) + + add $256, %rsi + add $256, %rdi + subq $256, %rdx + cmpq $256, %rdx + jg large_block_nt_unaligned_cpy + + vzeroupper + sfence + jmp Lless_than_256_bytes_cpy + +88: + vzeroupper + jmp Lavx2_copy_user_handle_tail + + _ASM_EXTABLE_UA(32b, 88b) + _ASM_EXTABLE_UA(33b, 88b) + _ASM_EXTABLE_UA(34b, 88b) + _ASM_EXTABLE_UA(35b, 88b) + _ASM_EXTABLE_UA(36b, 88b) + _ASM_EXTABLE_UA(37b, 88b) + _ASM_EXTABLE_UA(38b, 88b) + _ASM_EXTABLE_UA(39b, 88b) + + _ASM_EXTABLE_UA(40b, 88b) + _ASM_EXTABLE_UA(41b, 88b) + _ASM_EXTABLE_UA(42b, 88b) + _ASM_EXTABLE_UA(43b, 88b) + _ASM_EXTABLE_UA(44b, 88b) + _ASM_EXTABLE_UA(45b, 88b) + _ASM_EXTABLE_UA(46b, 88b) + _ASM_EXTABLE_UA(47b, 88b) + _ASM_EXTABLE_UA(48b, 88b) + _ASM_EXTABLE_UA(49b, 88b) + + _ASM_EXTABLE_UA(50b, 88b) + _ASM_EXTABLE_UA(51b, 88b) + _ASM_EXTABLE_UA(52b, 88b) + _ASM_EXTABLE_UA(53b, 88b) + _ASM_EXTABLE_UA(54b, 88b) + _ASM_EXTABLE_UA(55b, 88b) + _ASM_EXTABLE_UA(56b, 88b) + _ASM_EXTABLE_UA(57b, 88b) + _ASM_EXTABLE_UA(58b, 88b) + _ASM_EXTABLE_UA(59b, 88b) + + _ASM_EXTABLE_UA(60b, 88b) + _ASM_EXTABLE_UA(61b, 88b) + _ASM_EXTABLE_UA(62b, 88b) + _ASM_EXTABLE_UA(63b, 88b) +SYM_FUNC_END(copy_user_avx2_pf64_nt_string) +EXPORT_SYMBOL(copy_user_avx2_pf64_nt_string) + +/* + * If len < 256 bytes, then we use rep mov directly. + * + * Input: + * rdi destination + * rsi source + * edx len + * + * Output: + * eax uncopied bytes or 0 if successful. + */ +SYM_CODE_START_LOCAL(Lless_than_256_bytes_cpy) + movl %edx, %ecx +90: + rep movsb + + xorl %eax,%eax + ASM_CLAC + RET + +99: + mov %ecx,%eax + + ASM_CLAC + RET + + _ASM_EXTABLE_UA(90b, 99b) +SYM_CODE_END(Lless_than_256_bytes_cpy) + +/* + * Try to copy last bytes and clear the rest if needed. + * Since protection fault in copy_from/to_user is not a normal situation, + * it is not necessary to optimize tail handling. + * Don't try to copy the tail if machine check happened + * + * Input: + * rdi destination + * rsi source + * rdx count + * + * Output: + * eax uncopied bytes or 0 if successful. + */ + +SYM_CODE_START_LOCAL(Lavx2_copy_user_handle_tail) + movq %rdx,%rcx + cmp $X86_TRAP_MC,%eax /* check if X86_TRAP_MC */ + je 3f + +1: rep movsb +2: mov %rcx,%rax + + ASM_CLAC + RET + +3: xorl %eax,%eax + ASM_CLAC + RET + + _ASM_EXTABLE_UA(1b, 2b) +SYM_CODE_END(Lavx2_copy_user_handle_tail) + +/* + * Called when task schedule. we call fpu_save_%ymm0_7 to save old + * task's fpu states and we call fpu_restore_%ymm0_7 to restore new + * task's fpu states. + */ +SYM_FUNC_START(fpu_restore_ymm0_7) + vmovdqu 0(%rsi), %ymm0 + vmovdqu 32(%rsi), %ymm1 + vmovdqu 64(%rsi), %ymm2 + vmovdqu 96(%rsi), %ymm3 + vmovdqu 128(%rsi), %ymm4 + vmovdqu 160(%rsi), %ymm5 + vmovdqu 192(%rsi), %ymm6 + vmovdqu 224(%rsi), %ymm7 + + xorl %eax,%eax + RET//ret +SYM_FUNC_END(fpu_restore_ymm0_7) +EXPORT_SYMBOL(fpu_restore_ymm0_7) + +SYM_FUNC_START(fpu_save_ymm0_7) + vmovdqu %ymm0, 0(%rdi) + vmovdqu %ymm1, 32(%rdi) + vmovdqu %ymm2, 64(%rdi) + vmovdqu %ymm3, 96(%rdi) + vmovdqu %ymm4, 128(%rdi) + vmovdqu %ymm5, 160(%rdi) + vmovdqu %ymm6, 192(%rdi) + vmovdqu %ymm7, 224(%rdi) + + xorl %eax,%eax + RET +SYM_FUNC_END(fpu_save_ymm0_7) +EXPORT_SYMBOL(fpu_save_ymm0_7) diff --git a/arch/x86/lib/copy_user_sse2.S b/arch/x86/lib/copy_user_sse2.S new file mode 100644 index 000000000000..5422ff03ce2e --- /dev/null +++ b/arch/x86/lib/copy_user_sse2.S @@ -0,0 +1,255 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright © 2011 Siarhei Siamashka + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PREFETCH_DISTANCE 256 + +.macro ALIGN_DESTINATION_16 + /* check for bad alignment of destination, there is 16Bytes, for we will use movdqa */ + /* if len<16Bytes, jb .Lcopy_user_string */ + cmpq $16,%rdx + jb .Lcopy_user_string + + /* + * Adjust unaligned destination addresses, for 16-bit aligned ones, + * only the lower 32 bits need to be checked. + */ + movl %edi,%ecx + andl $15,%ecx + jz .Lcopy_user_string /* already aligned */ + + subl $16,%ecx + negl %ecx + subl %ecx,%edx + +200: + movb (%rsi),%al +201: + movb %al,(%rdi) + incq %rsi + incq %rdi + decl %ecx + jnz 200b + jmp .Lcopy_user_string + +203: + addl %ecx,%edx/* ecx is zerorest also */ + jmp Lsse2_copy_user_handle_tail + + _ASM_EXTABLE_UA(200b, 203b) + _ASM_EXTABLE_UA(201b, 203b) + +.Lcopy_user_string: +.endm +/*****************************************************************************/ +SYM_FUNC_START(copy_user_sse2_opt_string) + ASM_STAC + ALIGN_DESTINATION_16 + + cmpq $64,%rdx + jb 70f /* less then 64 bytes, avoid the costly 'rep' */ + + /* + * Check if src is aligned, for 16-bit aligned ones, + * only the lower 32 bits need to be checked. + */ + movl %esi,%ecx + andl $15,%ecx + jnz 20f + +10: + prefetchnta PREFETCH_DISTANCE(%rsi) +11: + prefetchnta (PREFETCH_DISTANCE + 32)(%rsi) +12: + movdqa (%rsi),%xmm0 +13: + movdqa 16(%rsi),%xmm1 +14: + movdqa 32(%rsi),%xmm2 +15: + movdqa 48(%rsi),%xmm3 +16: + movntdq %xmm0,0(%rdi) +17: + movntdq %xmm1,16(%rdi) +18: + movntdq %xmm2,32(%rdi) +19: + movntdq %xmm3,48(%rdi) + add $64,%rsi + add $64,%rdi + subq $64,%rdx + cmpq $64,%rdx + jg 10b + sfence + jmp 70f + +20: + prefetchnta PREFETCH_DISTANCE(%rsi) +21: + prefetchnta (PREFETCH_DISTANCE + 32)(%rsi) +22: + movdqu (%rsi),%xmm0 +23: + movdqu 16(%rsi),%xmm1 +24: + movdqu 32(%rsi),%xmm2 +25: + movdqu 48(%rsi),%xmm3 +26: + movntdq %xmm0,0(%rdi) +27: + movntdq %xmm1,16(%rdi) +28: + movntdq %xmm2,32(%rdi) +29: + movntdq %xmm3,48(%rdi) + add $64,%rsi + add $64,%rdi + subq $64,%rdx + cmpq $64,%rdx + jg 20b + sfence + +70: + movl %edx,%ecx +80: + rep + movsb + + xorl %eax,%eax + ASM_CLAC + RET//ret + +99: + movl %ecx,%edx /* ecx is zerorest also */ +100: + sfence + jmp Lsse2_copy_user_handle_tail + + _ASM_EXTABLE_UA(10b, 100b) + _ASM_EXTABLE_UA(11b, 100b) + _ASM_EXTABLE_UA(12b, 100b) + _ASM_EXTABLE_UA(13b, 100b) + _ASM_EXTABLE_UA(14b, 100b) + _ASM_EXTABLE_UA(15b, 100b) + _ASM_EXTABLE_UA(16b, 100b) + _ASM_EXTABLE_UA(17b, 100b) + _ASM_EXTABLE_UA(18b, 100b) + _ASM_EXTABLE_UA(19b, 100b) + + _ASM_EXTABLE_UA(20b, 100b) + _ASM_EXTABLE_UA(21b, 100b) + _ASM_EXTABLE_UA(22b, 100b) + _ASM_EXTABLE_UA(23b, 100b) + _ASM_EXTABLE_UA(24b, 100b) + _ASM_EXTABLE_UA(25b, 100b) + _ASM_EXTABLE_UA(26b, 100b) + _ASM_EXTABLE_UA(27b, 100b) + _ASM_EXTABLE_UA(28b, 100b) + _ASM_EXTABLE_UA(29b, 100b) + + _ASM_EXTABLE_UA(80b, 99b) +SYM_FUNC_END(copy_user_sse2_opt_string) +EXPORT_SYMBOL(copy_user_sse2_opt_string) + +SYM_FUNC_START(fpu_restore_xmm0_3) + ASM_STAC + movdqu (%rsi),%xmm0 + movdqu 16(%rsi),%xmm1 + movdqu 32(%rsi),%xmm2 + movdqu 48(%rsi),%xmm3 + + xorl %eax,%eax + ASM_CLAC + RET//ret +SYM_FUNC_END(fpu_restore_xmm0_3) +EXPORT_SYMBOL(fpu_restore_xmm0_3) + +SYM_FUNC_START(fpu_save_xmm0_3) + ASM_STAC + + movdqu %xmm0,(%rdi) + movdqu %xmm1,16(%rdi) + movdqu %xmm2,32(%rdi) + movdqu %xmm3,48(%rdi) + + xorl %eax,%eax + ASM_CLAC + RET//ret +SYM_FUNC_END(fpu_save_xmm0_3) +EXPORT_SYMBOL(fpu_save_xmm0_3) + +/* + * Try to copy last bytes and clear the rest if needed. + * Since protection fault in copy_from/to_user is not a normal situation, + * it is not necessary to optimize tail handling. + * Don't try to copy the tail if machine check happened + * + * Input: + * rdi destination + * rsi source + * rdx count + * + * Output: + * eax uncopied bytes or 0 if successful. + */ +SYM_CODE_START_LOCAL(Lsse2_copy_user_handle_tail) + movq %rdx,%rcx + /* + * The trap number and error code are both 32 bits. + */ + cmp $X86_TRAP_MC,%eax /* check if X86_TRAP_MC */ + je 3f +1: rep movsb +2: mov %rcx,%rax + ASM_CLAC + RET + + /* + * Return zero to pretend that this copy succeeded. This + * is counter-intuitive, but needed to prevent the code + * in lib/iov_iter.c from retrying and running back into + * the poison cache line again. The machine check handler + * will ensure that a SIGBUS is sent to the task. + */ +3: xorl %eax,%eax + ASM_CLAC + RET + + _ASM_EXTABLE_UA(1b, 2b) +SYM_CODE_END(Lsse2_copy_user_handle_tail) + +/*****************************************************************************/ -- Gitee From d322033d05cb0879b04e846f63d863be7a915bda Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Wed, 14 Aug 2024 14:02:47 +1200 Subject: [PATCH 1978/2138] mm: override mTHP "enabled" defaults at kernel cmdline ANBZ: #9728 commit dd4d30d1cdbe826e6569b44453c2d9bb9424d234 upstream Add thp_anon= cmdline parameter to allow specifying the default enablement of each supported anon THP size. The parameter accepts the following format and can be provided multiple times to configure each size: thp_anon=,[KMG]:;-[KMG]: An example: thp_anon=16K-64K:always;128K,512K:inherit;256K:madvise;1M-2M:never See Documentation/admin-guide/mm/transhuge.rst for more details. Configuring the defaults at boot time is useful to allow early user space to take advantage of mTHP before its been configured through sysfs. [v-songbaohua@oppo.com: use get_oder() and check size is is_power_of_2] Link: https://lkml.kernel.org/r/20240814224635.43272-1-21cnbao@gmail.com [ryan.roberts@arm.com: some minor cleanup according to David's comments] Link: https://lkml.kernel.org/r/20240820105244.62703-1-21cnbao@gmail.com Link: https://lkml.kernel.org/r/20240814020247.67297-1-21cnbao@gmail.com Signed-off-by: Ryan Roberts Co-developed-by: Barry Song Signed-off-by: Barry Song Reviewed-by: Baolin Wang Tested-by: Baolin Wang Acked-by: David Hildenbrand Cc: Jonathan Corbet Cc: Lance Yang Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4360 --- .../admin-guide/kernel-parameters.txt | 9 ++ Documentation/admin-guide/mm/transhuge.rst | 38 +++++-- mm/huge_memory.c | 98 ++++++++++++++++++- 3 files changed, 137 insertions(+), 8 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index d2629f0c8288..dfefaf652114 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -6427,6 +6427,15 @@ : poll all this frequency 0: no polling (default) + thp_anon= [KNL] + Format: ,[KMG]:;-[KMG]: + state is one of "always", "madvise", "never" or "inherit". + Control the default behavior of the system with respect + to anonymous transparent hugepages. + Can be used multiple times for multiple anon THP sizes. + See Documentation/admin-guide/mm/transhuge.rst for more + details. + threadirqs [KNL] Force threading of all interrupt handlers except those marked explicitly IRQF_NO_THREAD. diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index 4ece123bc5e6..407cd34d4479 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -283,13 +283,37 @@ processes. Exceeding the number would block the collapse:: A higher value may increase memory footprint for some workloads. -Boot parameter -============== - -You can change the sysfs boot time defaults of Transparent Hugepage -Support by passing the parameter ``transparent_hugepage=always`` or -``transparent_hugepage=madvise`` or ``transparent_hugepage=never`` -to the kernel command line. +Boot parameters +=============== + +You can change the sysfs boot time default for the top-level "enabled" +control by passing the parameter ``transparent_hugepage=always`` or +``transparent_hugepage=madvise`` or ``transparent_hugepage=never`` to the +kernel command line. + +Alternatively, each supported anonymous THP size can be controlled by +passing ``thp_anon=,[KMG]:;-[KMG]:``, +where ```` is the THP size (must be a power of 2 of PAGE_SIZE and +supported anonymous THP) and ```` is one of ``always``, ``madvise``, +``never`` or ``inherit``. + +For example, the following will set 16K, 32K, 64K THP to ``always``, +set 128K, 512K to ``inherit``, set 256K to ``madvise`` and 1M, 2M +to ``never``:: + + thp_anon=16K-64K:always;128K,512K:inherit;256K:madvise;1M-2M:never + +``thp_anon=`` may be specified multiple times to configure all THP sizes as +required. If ``thp_anon=`` is specified at least once, any anon THP sizes +not explicitly configured on the command line are implicitly set to +``never``. + +``transparent_hugepage`` setting only affects the global toggle. If +``thp_anon`` is not specified, PMD_ORDER THP will default to ``inherit``. +However, if a valid ``thp_anon`` setting is provided by the user, the +PMD_ORDER THP policy will be overridden. If the policy for PMD_ORDER +is not defined within a valid ``thp_anon``, its policy will default to +``never``. Hugepages in tmpfs/shmem ======================== diff --git a/mm/huge_memory.c b/mm/huge_memory.c index bf29e70bb5e4..c783c15088b4 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -75,6 +75,7 @@ unsigned long huge_zero_pfn __read_mostly = ~0UL; unsigned long huge_anon_orders_always __read_mostly; unsigned long huge_anon_orders_madvise __read_mostly; unsigned long huge_anon_orders_inherit __read_mostly; +static bool anon_orders_configured __initdata; unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, unsigned long vm_flags, bool smaps, @@ -721,7 +722,8 @@ static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) * disable all other sizes. powerpc's PMD_ORDER isn't a compile-time * constant so we have to do this here. */ - huge_anon_orders_inherit = BIT(PMD_ORDER); + if (!anon_orders_configured) + huge_anon_orders_inherit = BIT(PMD_ORDER); *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); if (unlikely(!*hugepage_kobj)) { @@ -884,6 +886,100 @@ static int __init setup_transparent_hugepage(char *str) } __setup("transparent_hugepage=", setup_transparent_hugepage); +static inline int get_order_from_str(const char *size_str) +{ + unsigned long size; + char *endptr; + int order; + + size = memparse(size_str, &endptr); + + if (!is_power_of_2(size)) + goto err; + order = get_order(size); + if (BIT(order) & ~THP_ORDERS_ALL_ANON) + goto err; + + return order; +err: + pr_err("invalid size %s in thp_anon boot parameter\n", size_str); + return -EINVAL; +} + +static char str_dup[PAGE_SIZE] __initdata; +static int __init setup_thp_anon(char *str) +{ + char *token, *range, *policy, *subtoken; + unsigned long always, inherit, madvise; + char *start_size, *end_size; + int start, end, nr; + char *p; + + if (!str || strlen(str) + 1 > PAGE_SIZE) + goto err; + strcpy(str_dup, str); + + always = huge_anon_orders_always; + madvise = huge_anon_orders_madvise; + inherit = huge_anon_orders_inherit; + p = str_dup; + while ((token = strsep(&p, ";")) != NULL) { + range = strsep(&token, ":"); + policy = token; + + if (!policy) + goto err; + + while ((subtoken = strsep(&range, ",")) != NULL) { + if (strchr(subtoken, '-')) { + start_size = strsep(&subtoken, "-"); + end_size = subtoken; + + start = get_order_from_str(start_size); + end = get_order_from_str(end_size); + } else { + start = end = get_order_from_str(subtoken); + } + + if (start < 0 || end < 0 || start > end) + goto err; + + nr = end - start + 1; + if (!strcmp(policy, "always")) { + bitmap_set(&always, start, nr); + bitmap_clear(&inherit, start, nr); + bitmap_clear(&madvise, start, nr); + } else if (!strcmp(policy, "madvise")) { + bitmap_set(&madvise, start, nr); + bitmap_clear(&inherit, start, nr); + bitmap_clear(&always, start, nr); + } else if (!strcmp(policy, "inherit")) { + bitmap_set(&inherit, start, nr); + bitmap_clear(&madvise, start, nr); + bitmap_clear(&always, start, nr); + } else if (!strcmp(policy, "never")) { + bitmap_clear(&inherit, start, nr); + bitmap_clear(&madvise, start, nr); + bitmap_clear(&always, start, nr); + } else { + pr_err("invalid policy %s in thp_anon boot parameter\n", policy); + goto err; + } + } + } + + huge_anon_orders_always = always; + huge_anon_orders_madvise = madvise; + huge_anon_orders_inherit = inherit; + anon_orders_configured = true; + return 1; + +err: + pr_warn("thp_anon=%s: error parsing string, ignoring setting\n", str); + return 0; +} +__setup("thp_anon=", setup_thp_anon); + pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) { if (likely(vma->vm_flags & VM_WRITE)) -- Gitee From 9cc2c81b954b3e095a44b8552f66a20941051251 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ma=C3=ADra=20Canal?= Date: Fri, 1 Nov 2024 13:54:05 -0300 Subject: [PATCH 1979/2138] mm: fix docs for the kernel parameter ``thp_anon=`` MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #9728 commit 652e1a51465f2e8e75590bc3dd1e3a3b61020568 upstream If we add ``thp_anon=32,64K:always`` to the kernel command line, we will see the following error: [ 0.000000] huge_memory: thp_anon=32,64K:always: error parsing string, ignoring setting This happens because the correct format isn't ``thp_anon=,[KMG]:```, as [KMG] must follow each number to especify its unit. So, the correct format is ``thp_anon=[KMG],[KMG]:```. Therefore, adjust the documentation to reflect the correct format of the parameter ``thp_anon=``. Link: https://lkml.kernel.org/r/20241101165719.1074234-3-mcanal@igalia.com Fixes: dd4d30d1cdbe ("mm: override mTHP "enabled" defaults at kernel cmdline") Signed-off-by: Maíra Canal Acked-by: Barry Song Acked-by: David Hildenbrand Cc: Baolin Wang Cc: Hugh Dickins Cc: Jonathan Corbet Cc: Lance Yang Cc: Ryan Roberts Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4360 --- Documentation/admin-guide/kernel-parameters.txt | 2 +- Documentation/admin-guide/mm/transhuge.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index dfefaf652114..71ff55c6928d 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -6428,7 +6428,7 @@ 0: no polling (default) thp_anon= [KNL] - Format: ,[KMG]:;-[KMG]: + Format: [KMG],[KMG]:;[KMG]-[KMG]: state is one of "always", "madvise", "never" or "inherit". Control the default behavior of the system with respect to anonymous transparent hugepages. diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index 407cd34d4479..c0fbd4290d3a 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -292,7 +292,7 @@ control by passing the parameter ``transparent_hugepage=always`` or kernel command line. Alternatively, each supported anonymous THP size can be controlled by -passing ``thp_anon=,[KMG]:;-[KMG]:``, +passing ``thp_anon=[KMG],[KMG]:;[KMG]-[KMG]:``, where ```` is the THP size (must be a power of 2 of PAGE_SIZE and supported anonymous THP) and ```` is one of ``always``, ``madvise``, ``never`` or ``inherit``. -- Gitee From d4b3822116ab413b976d90a6b01116cdcf614c95 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ma=C3=ADra=20Canal?= Date: Fri, 1 Nov 2024 13:54:06 -0300 Subject: [PATCH 1980/2138] mm: shmem: control THP support through the kernel command line MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #9728 commit 949042811117d2f437ef6b529a69d45e2ee2d429 upstream Patch series "mm: add more kernel parameters to control mTHP", v5. This series introduces four patches related to the kernel parameters controlling mTHP and a fifth patch replacing `strcpy()` for `strscpy()` in the file `mm/huge_memory.c`. The first patch is a straightforward documentation update, correcting the format of the kernel parameter ``thp_anon=``. The second, third, and fourth patches focus on controlling THP support for shmem via the kernel command line. The second patch introduces a parameter to control the global default huge page allocation policy for the internal shmem mount. The third patch moves a piece of code to a shared header to ease the implementation of the fourth patch. Finally, the fourth patch implements a parameter similar to ``thp_anon=``, but for shmem. The goal of these changes is to simplify the configuration of systems that rely on mTHP support for shmem. For instance, a platform with a GPU that benefits from huge pages may want to enable huge pages for shmem. Having these kernel parameters streamlines the configuration process and ensures consistency across setups. This patch (of 4): Add a new kernel command line to control the hugepage allocation policy for the internal shmem mount, ``transparent_hugepage_shmem``. The parameter is similar to ``transparent_hugepage`` and has the following format: transparent_hugepage_shmem= where ```` is one of the seven valid policies available for shmem. Configuring the default huge page allocation policy for the internal shmem mount can be beneficial for DRM GPU drivers. Just as CPU architectures, GPUs can also take advantage of huge pages, but this is possible only if DRM GEM objects are backed by huge pages. Since GEM uses shmem to allocate anonymous pageable memory, having control over the default huge page allocation policy allows for the exploration of huge pages use on GPUs that rely on GEM objects backed by shmem. Link: https://lkml.kernel.org/r/20241101165719.1074234-2-mcanal@igalia.com Link: https://lkml.kernel.org/r/20241101165719.1074234-4-mcanal@igalia.com Signed-off-by: Maíra Canal Reviewed-by: Baolin Wang Acked-by: David Hildenbrand Cc: Barry Song Cc: dri-devel@lists.freedesktop.org Cc: Hugh Dickins Cc: Jonathan Corbet Cc: kernel-dev@igalia.com Cc: Lance Yang Cc: Ryan Roberts Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4360 --- .../admin-guide/kernel-parameters.txt | 7 ++ Documentation/admin-guide/mm/transhuge.rst | 6 ++ mm/shmem.c | 72 +++++++++++++------ 3 files changed, 62 insertions(+), 23 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 71ff55c6928d..f21bf03b9b83 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -6629,6 +6629,13 @@ See Documentation/admin-guide/mm/transhuge.rst for more details. + transparent_hugepage_shmem= [KNL] + Format: [always|within_size|advise|never|deny|force] + Can be used to control the hugepage allocation policy for + the internal shmem mount. + See Documentation/admin-guide/mm/transhuge.rst + for more details. + trusted.source= [KEYS] Format: This parameter identifies the trust source as a backend diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index c0fbd4290d3a..0421e5cab65f 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -315,6 +315,12 @@ PMD_ORDER THP policy will be overridden. If the policy for PMD_ORDER is not defined within a valid ``thp_anon``, its policy will default to ``never``. +Similarly to ``transparent_hugepage``, you can control the hugepage +allocation policy for the internal shmem mount by using the kernel parameter +``transparent_hugepage_shmem=``, where ```` is one of the +seven valid policies for shmem (``always``, ``within_size``, ``advise``, +``never``, ``deny``, and ``force``). + Hugepages in tmpfs/shmem ======================== diff --git a/mm/shmem.c b/mm/shmem.c index 43c169779b56..69102aa4d679 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -586,24 +586,39 @@ static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, vma, vm_flags); } -#if defined(CONFIG_SYSFS) static int shmem_parse_huge(const char *str) { + int huge; + + if (!str) + return -EINVAL; + if (!strcmp(str, "never")) - return SHMEM_HUGE_NEVER; - if (!strcmp(str, "always")) - return SHMEM_HUGE_ALWAYS; - if (!strcmp(str, "within_size")) - return SHMEM_HUGE_WITHIN_SIZE; - if (!strcmp(str, "advise")) - return SHMEM_HUGE_ADVISE; - if (!strcmp(str, "deny")) - return SHMEM_HUGE_DENY; - if (!strcmp(str, "force")) - return SHMEM_HUGE_FORCE; - return -EINVAL; + huge = SHMEM_HUGE_NEVER; + else if (!strcmp(str, "always")) + huge = SHMEM_HUGE_ALWAYS; + else if (!strcmp(str, "within_size")) + huge = SHMEM_HUGE_WITHIN_SIZE; + else if (!strcmp(str, "advise")) + huge = SHMEM_HUGE_ADVISE; + else if (!strcmp(str, "deny")) + huge = SHMEM_HUGE_DENY; + else if (!strcmp(str, "force")) + huge = SHMEM_HUGE_FORCE; + else + return -EINVAL; + + if (!has_transparent_hugepage() && + huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY) + return -EINVAL; + + /* Do not override huge allocation policy with non-PMD sized mTHP */ + if (huge == SHMEM_HUGE_FORCE && + huge_shmem_orders_inherit != BIT(HPAGE_PMD_ORDER)) + return -EINVAL; + + return huge; } -#endif #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS) static const char *shmem_format_huge(int huge) @@ -5013,15 +5028,7 @@ static ssize_t shmem_enabled_store(struct kobject *kobj, huge = shmem_parse_huge(tmp); if (huge == -EINVAL) - return -EINVAL; - if (!has_transparent_hugepage() && - huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY) - return -EINVAL; - - /* Do not override huge allocation policy with non-PMD sized mTHP */ - if (huge == SHMEM_HUGE_FORCE && - huge_shmem_orders_inherit != BIT(HPAGE_PMD_ORDER)) - return -EINVAL; + return huge; shmem_huge = huge; if (shmem_huge > SHMEM_HUGE_DENY) @@ -5118,6 +5125,25 @@ struct kobj_attribute thpsize_shmem_enabled_attr = __ATTR(shmem_enabled, 0644, thpsize_shmem_enabled_show, thpsize_shmem_enabled_store); #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */ +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) + +static int __init setup_transparent_hugepage_shmem(char *str) +{ + int huge; + + huge = shmem_parse_huge(str); + if (huge == -EINVAL) { + pr_warn("transparent_hugepage_shmem= cannot parse, ignored\n"); + return huge; + } + + shmem_huge = huge; + return 1; +} +__setup("transparent_hugepage_shmem=", setup_transparent_hugepage_shmem); + +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + #else /* !CONFIG_SHMEM */ /* -- Gitee From 6ce5466b8fdb469e2098f967c9d2197e81feb550 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ma=C3=ADra=20Canal?= Date: Fri, 1 Nov 2024 13:54:07 -0300 Subject: [PATCH 1981/2138] mm: move ``get_order_from_str()`` to internal.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #9728 commit 1c8d48497525d77acfb7bdaaa246a887e754f379 upstream In order to implement a kernel parameter similar to ``thp_anon=`` for shmem, we'll need the function ``get_order_from_str()``. Instead of duplicating the function, move the function to a shared header, in which both mm/shmem.c and mm/huge_memory.c will be able to use it. Link: https://lkml.kernel.org/r/20241101165719.1074234-5-mcanal@igalia.com Signed-off-by: Maíra Canal Reviewed-by: Baolin Wang Cc: Barry Song Cc: David Hildenbrand Cc: Hugh Dickins Cc: Jonathan Corbet Cc: Lance Yang Cc: Ryan Roberts Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4360 --- mm/huge_memory.c | 38 +++++++++++++++----------------------- mm/internal.h | 22 ++++++++++++++++++++++ 2 files changed, 37 insertions(+), 23 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index c783c15088b4..61f18d588806 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -886,26 +886,6 @@ static int __init setup_transparent_hugepage(char *str) } __setup("transparent_hugepage=", setup_transparent_hugepage); -static inline int get_order_from_str(const char *size_str) -{ - unsigned long size; - char *endptr; - int order; - - size = memparse(size_str, &endptr); - - if (!is_power_of_2(size)) - goto err; - order = get_order(size); - if (BIT(order) & ~THP_ORDERS_ALL_ANON) - goto err; - - return order; -err: - pr_err("invalid size %s in thp_anon boot parameter\n", size_str); - return -EINVAL; -} - static char str_dup[PAGE_SIZE] __initdata; static int __init setup_thp_anon(char *str) { @@ -935,10 +915,22 @@ static int __init setup_thp_anon(char *str) start_size = strsep(&subtoken, "-"); end_size = subtoken; - start = get_order_from_str(start_size); - end = get_order_from_str(end_size); + start = get_order_from_str(start_size, THP_ORDERS_ALL_ANON); + end = get_order_from_str(end_size, THP_ORDERS_ALL_ANON); } else { - start = end = get_order_from_str(subtoken); + start_size = end_size = subtoken; + start = end = get_order_from_str(subtoken, + THP_ORDERS_ALL_ANON); + } + + if (start == -EINVAL) { + pr_err("invalid size %s in thp_anon boot parameter\n", start_size); + goto err; + } + + if (end == -EINVAL) { + pr_err("invalid size %s in thp_anon boot parameter\n", end_size); + goto err; } if (start < 0 || end < 0 || start > end) diff --git a/mm/internal.h b/mm/internal.h index 5bc0370185cd..c11dd833b58a 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1258,6 +1258,28 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, unsigned int flags); +/* + * Parses a string with mem suffixes into its order. Useful to parse kernel + * parameters. + */ +static inline int get_order_from_str(const char *size_str, + unsigned long valid_orders) +{ + unsigned long size; + char *endptr; + int order; + + size = memparse(size_str, &endptr); + + if (!is_power_of_2(size)) + return -EINVAL; + order = get_order(size); + if (BIT(order) & ~valid_orders) + return -EINVAL; + + return order; +} + enum { /* mark page accessed */ FOLL_TOUCH = 1 << 16, -- Gitee From d4e676bef28843368463b264e3326efd3caa03de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ma=C3=ADra=20Canal?= Date: Fri, 1 Nov 2024 13:54:08 -0300 Subject: [PATCH 1982/2138] mm: shmem: override mTHP shmem default with a kernel parameter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #9728 commit 24f9cd195fbc9382ae0ed8b332e6302d1722d8e0 upstream Add the ``thp_shmem=`` kernel command line to allow specifying the default policy of each supported shmem hugepage size. The kernel parameter accepts the following format: thp_shmem=[KMG],[KMG]:;[KMG]-[KMG]: For example, thp_shmem=16K-64K:always;128K,512K:inherit;256K:advise;1M-2M:never;4M-8M:within_size Some GPUs may benefit from using huge pages. Since DRM GEM uses shmem to allocate anonymous pageable memory, it's essential to control the huge page allocation policy for the internal shmem mount. This control can be achieved through the ``transparent_hugepage_shmem=`` parameter. Beyond just setting the allocation policy, it's crucial to have granular control over the size of huge pages that can be allocated. The GPU may support only specific huge page sizes, and allocating pages larger/smaller than those sizes would be ineffective. Link: https://lkml.kernel.org/r/20241101165719.1074234-6-mcanal@igalia.com Signed-off-by: Maíra Canal Reviewed-by: Baolin Wang Cc: Barry Song Cc: David Hildenbrand Cc: Hugh Dickins Cc: Jonathan Corbet Cc: Lance Yang Cc: Ryan Roberts Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4360 --- .../admin-guide/kernel-parameters.txt | 10 ++ Documentation/admin-guide/mm/transhuge.rst | 17 +++ mm/shmem.c | 105 +++++++++++++++++- 3 files changed, 131 insertions(+), 1 deletion(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index f21bf03b9b83..977b201f52b0 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -6440,6 +6440,16 @@ Force threading of all interrupt handlers except those marked explicitly IRQF_NO_THREAD. + thp_shmem= [KNL] + Format: [KMG],[KMG]:;[KMG]-[KMG]: + Control the default policy of each hugepage size for the + internal shmem mount. is one of policies available + for the shmem mount ("always", "inherit", "never", "within_size", + and "advise"). + It can be used multiple times for multiple shmem THP sizes. + See Documentation/admin-guide/mm/transhuge.rst for more + details. + topology= [S390] Format: {off | on} Specify if the kernel should make use of the cpu diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index 0421e5cab65f..c05cbfe1318c 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -321,6 +321,23 @@ allocation policy for the internal shmem mount by using the kernel parameter seven valid policies for shmem (``always``, ``within_size``, ``advise``, ``never``, ``deny``, and ``force``). +In the same manner as ``thp_anon`` controls each supported anonymous THP +size, ``thp_shmem`` controls each supported shmem THP size. ``thp_shmem`` +has the same format as ``thp_anon``, but also supports the policy +``within_size``. + +``thp_shmem=`` may be specified multiple times to configure all THP sizes +as required. If ``thp_shmem=`` is specified at least once, any shmem THP +sizes not explicitly configured on the command line are implicitly set to +``never``. + +``transparent_hugepage_shmem`` setting only affects the global toggle. If +``thp_shmem`` is not specified, PMD_ORDER hugepage will default to +``inherit``. However, if a valid ``thp_shmem`` setting is provided by the +user, the PMD_ORDER hugepage policy will be overridden. If the policy for +PMD_ORDER is not defined within a valid ``thp_shmem``, its policy will +default to ``never``. + Hugepages in tmpfs/shmem ======================== diff --git a/mm/shmem.c b/mm/shmem.c index 69102aa4d679..fd2d03fc381f 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -135,6 +135,7 @@ static unsigned long huge_shmem_orders_always __read_mostly; static unsigned long huge_shmem_orders_madvise __read_mostly; static unsigned long huge_shmem_orders_inherit __read_mostly; static unsigned long huge_shmem_orders_within_size __read_mostly; +static bool shmem_orders_configured __initdata; #endif #ifdef CONFIG_TMPFS @@ -4973,7 +4974,8 @@ void __init shmem_init(void) * Default to setting PMD-sized THP to inherit the global setting and * disable all other multi-size THPs. */ - huge_shmem_orders_inherit = BIT(HPAGE_PMD_ORDER); + if (!shmem_orders_configured) + huge_shmem_orders_inherit = BIT(HPAGE_PMD_ORDER); #endif return; @@ -5142,6 +5144,107 @@ static int __init setup_transparent_hugepage_shmem(char *str) } __setup("transparent_hugepage_shmem=", setup_transparent_hugepage_shmem); +static char str_dup[PAGE_SIZE] __initdata; +static int __init setup_thp_shmem(char *str) +{ + char *token, *range, *policy, *subtoken; + unsigned long always, inherit, madvise, within_size; + char *start_size, *end_size; + int start, end, nr; + char *p; + + if (!str || strlen(str) + 1 > PAGE_SIZE) + goto err; + strcpy(str_dup, str); + + always = huge_shmem_orders_always; + inherit = huge_shmem_orders_inherit; + madvise = huge_shmem_orders_madvise; + within_size = huge_shmem_orders_within_size; + p = str_dup; + while ((token = strsep(&p, ";")) != NULL) { + range = strsep(&token, ":"); + policy = token; + + if (!policy) + goto err; + + while ((subtoken = strsep(&range, ",")) != NULL) { + if (strchr(subtoken, '-')) { + start_size = strsep(&subtoken, "-"); + end_size = subtoken; + + start = get_order_from_str(start_size, + THP_ORDERS_ALL_FILE_DEFAULT); + end = get_order_from_str(end_size, + THP_ORDERS_ALL_FILE_DEFAULT); + } else { + start_size = end_size = subtoken; + start = end = get_order_from_str(subtoken, + THP_ORDERS_ALL_FILE_DEFAULT); + } + + if (start == -EINVAL) { + pr_err("invalid size %s in thp_shmem boot parameter\n", + start_size); + goto err; + } + + if (end == -EINVAL) { + pr_err("invalid size %s in thp_shmem boot parameter\n", + end_size); + goto err; + } + + if (start < 0 || end < 0 || start > end) + goto err; + + nr = end - start + 1; + if (!strcmp(policy, "always")) { + bitmap_set(&always, start, nr); + bitmap_clear(&inherit, start, nr); + bitmap_clear(&madvise, start, nr); + bitmap_clear(&within_size, start, nr); + } else if (!strcmp(policy, "advise")) { + bitmap_set(&madvise, start, nr); + bitmap_clear(&inherit, start, nr); + bitmap_clear(&always, start, nr); + bitmap_clear(&within_size, start, nr); + } else if (!strcmp(policy, "inherit")) { + bitmap_set(&inherit, start, nr); + bitmap_clear(&madvise, start, nr); + bitmap_clear(&always, start, nr); + bitmap_clear(&within_size, start, nr); + } else if (!strcmp(policy, "within_size")) { + bitmap_set(&within_size, start, nr); + bitmap_clear(&inherit, start, nr); + bitmap_clear(&madvise, start, nr); + bitmap_clear(&always, start, nr); + } else if (!strcmp(policy, "never")) { + bitmap_clear(&inherit, start, nr); + bitmap_clear(&madvise, start, nr); + bitmap_clear(&always, start, nr); + bitmap_clear(&within_size, start, nr); + } else { + pr_err("invalid policy %s in thp_shmem boot parameter\n", policy); + goto err; + } + } + } + + huge_shmem_orders_always = always; + huge_shmem_orders_madvise = madvise; + huge_shmem_orders_inherit = inherit; + huge_shmem_orders_within_size = within_size; + shmem_orders_configured = true; + return 1; + +err: + pr_warn("thp_shmem=%s: error parsing string, ignoring setting\n", str); + return 0; +} +__setup("thp_shmem=", setup_thp_shmem); + #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #else /* !CONFIG_SHMEM */ -- Gitee From da43191d77928b418cc4390d26574969250fada1 Mon Sep 17 00:00:00 2001 From: haodongdong Date: Fri, 27 Dec 2024 11:21:11 +0800 Subject: [PATCH 1983/2138] anolis: scsi: leapioraid: fix hiding ugood disk problem ANBZ: #13049 fix hiding ugood disk problem. fix "The address of local variable 'mpi_request' might be accessed at non-zero index in function leapioraid_base_send_ioc_init" Signed-off-by: haodongdong Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/4401 --- drivers/scsi/leapioraid/leapioraid_app.c | 1 - drivers/scsi/leapioraid/leapioraid_func.c | 15 ++++++++------- drivers/scsi/leapioraid/leapioraid_func.h | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/scsi/leapioraid/leapioraid_app.c b/drivers/scsi/leapioraid/leapioraid_app.c index 9d699721d1be..6e7f6bf87778 100644 --- a/drivers/scsi/leapioraid/leapioraid_app.c +++ b/drivers/scsi/leapioraid/leapioraid_app.c @@ -53,7 +53,6 @@ #include #include #include -#include "leapioraid_func.h" #ifdef __KERNEL__ #include diff --git a/drivers/scsi/leapioraid/leapioraid_func.c b/drivers/scsi/leapioraid/leapioraid_func.c index 2d80a86da007..97e0f893ab4c 100644 --- a/drivers/scsi/leapioraid/leapioraid_func.c +++ b/drivers/scsi/leapioraid/leapioraid_func.c @@ -4746,14 +4746,11 @@ leapioraid_base_send_ioc_init(struct LEAPIORAID_ADAPTER *ioc) current_time = ktime_get_real(); mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time)); if (ioc->logging_level & LEAPIORAID_DEBUG_INIT) { - __le32 *mfp; - int i; - mfp = (__le32 *) &mpi_request; pr_info("%s \toffset:data\n", ioc->name); - for (i = 0; i < sizeof(struct LeapioraidIOCInitReq_t) / 4; i++) - pr_info("%s \t[0x%02x]:%08x\n", - ioc->name, i * 4, le32_to_cpu(mfp[i])); + leapioraid_debug_dump_mf(&mpi_request, + sizeof(struct LeapioraidIOCInitReq_t) / 4); + } r = leapioraid_base_handshake_req_reply_wait(ioc, sizeof @@ -7022,7 +7019,11 @@ leapioraid_config_get_volume_handle(struct LEAPIORAID_ADAPTER *ioc, r = -1; ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & LEAPIORAID_IOCSTATUS_MASK; - if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) + if (ioc_status == LEAPIORAID_IOCSTATUS_CONFIG_INVALID_PAGE) { + *volume_handle = 0; + r = 0; + goto out; + } else if (ioc_status != LEAPIORAID_IOCSTATUS_SUCCESS) goto out; for (i = 0; i < config_page->NumElements; i++) { element_type = diff --git a/drivers/scsi/leapioraid/leapioraid_func.h b/drivers/scsi/leapioraid/leapioraid_func.h index 9cf8206ccb3c..8ca8fc0a6f26 100644 --- a/drivers/scsi/leapioraid/leapioraid_func.h +++ b/drivers/scsi/leapioraid/leapioraid_func.h @@ -103,7 +103,7 @@ #define LEAPIORAID_KDUMP_SCSI_IO_DEPTH (64) #define LEAPIORAID_RAID_MAX_SECTORS (128) -#define LEAPIORAID_NAME_LENGTH (32) +#define LEAPIORAID_NAME_LENGTH (48) #define LEAPIORAID_DRIVER_NAME_LENGTH (24) #define LEAPIORAID_STRING_LENGTH (64) -- Gitee From f83106faeaccaeb7a3d7c9518f178e64daf951e5 Mon Sep 17 00:00:00 2001 From: Pankaj Raghav Date: Mon, 15 Jan 2024 11:25:22 +0100 Subject: [PATCH 1984/2138] readahead: use ilog2 instead of a while loop in page_cache_ra_order() ANBZ: #9728 commit e03c16fb4af1dfc615a4e1f51be0d5fe5840b904 upstream A while loop is used to adjust the new_order to be lower than the ra->size. ilog2 could be used to do the same instead of using a loop. ilog2 typically resolves to a bit scan reverse instruction. This is particularly useful when ra->size is smaller than the 2^new_order as it resolves in one instruction instead of looping to find the new_order. No functional changes. Link: https://lkml.kernel.org/r/20240115102523.2336742-1-kernel@pankajraghav.com Signed-off-by: Pankaj Raghav Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4378 --- mm/readahead.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/mm/readahead.c b/mm/readahead.c index 41500002fbc4..deccc49a57c7 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -505,10 +505,8 @@ void page_cache_ra_order(struct readahead_control *ractl, if (new_order < MAX_PAGECACHE_ORDER) new_order += 2; - if (new_order > MAX_PAGECACHE_ORDER) - new_order = MAX_PAGECACHE_ORDER; - while ((1 << new_order) > ra->size) - new_order--; + new_order = min_t(unsigned int, MAX_PAGECACHE_ORDER, new_order); + new_order = min_t(unsigned int, new_order, ilog2(ra->size)); /* See comment in page_cache_ra_unbounded() */ nofs = memalloc_nofs_save(); -- Gitee From c5f71a6717386cd9aada2436c2f96b05d2583e91 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Wed, 25 Dec 2024 15:05:46 +0800 Subject: [PATCH 1985/2138] mm: mTHP user controls to configure pagecache large folio sizes ANBZ: #9728 cherry-picked from: https://lore.kernel.org/lkml/20240717071257.4141363-1-ryan.roberts@arm.com/T/#m25b51aa890b123202cda93fa0e67340b3e4b26b6 Add mTHP controls to sysfs to allow user space to configure the folio sizes that can be considered for allocation of file-backed memory: /sys/kernel/mm/transparent_hugepage/hugepages-*kB/file_enable For now, the control can be set to either `always` or `never` to enable or disable that size. More options may be added in future. By default, at boot, all folio sizes are enabled, and the algorithm used to select a folio size remains conceptually unchanged; increase by 2 enabled orders each time a readahead marker is hit then reduce to the closest enabled order to fit within bounds of ra size, index alignment and EOF. So when all folio sizes are enabled, behavior should be unchanged. When folio sizes are disabled, the algorithm will never select them. Systems such as Android are always under extreme memory pressure and as a result fragmentation often causes attempts to allocate large folios to fail and fallback to smaller folios. By fixing the pagecache to one large folio size (e.g. 64K) plus fallback to small folios, a large source of this fragmentation can be removed and 64K mTHP allocations succeed more often, allowing the system to benefit from improved performance on arm64 and other arches that support "contpte". Signed-off-by: Ryan Roberts Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4378 --- Documentation/admin-guide/mm/transhuge.rst | 21 +++++++++++ include/linux/huge_mm.h | 42 ++++++++++++--------- mm/filemap.c | 15 +++++--- mm/huge_memory.c | 43 ++++++++++++++++++++++ mm/readahead.c | 43 ++++++++++++++++++---- 5 files changed, 134 insertions(+), 30 deletions(-) diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index c05cbfe1318c..f6b3aa8e2df1 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -283,6 +283,27 @@ processes. Exceeding the number would block the collapse:: A higher value may increase memory footprint for some workloads. +File-Backed Hugepages +--------------------- + +The kernel will automatically select an appropriate THP size for file-backed +memory from a set of allowed sizes. By default all THP sizes that the page cache +supports are allowed, but this set can be modified with one of:: + + echo always >/sys/kernel/mm/transparent_hugepage/hugepages-kB/file_enabled + echo never >/sys/kernel/mm/transparent_hugepage/hugepages-kB/file_enabled + +where is the hugepage size being addressed, the available sizes for which +vary by system. ``always`` adds the hugepage size to the set of allowed sizes, +and ``never`` removes the hugepage size from the set of allowed sizes. + +In some situations, constraining the allowed sizes can reduce memory +fragmentation, resulting in fewer allocation fallbacks and improved system +performance. + +Note that any changes to the allowed set of sizes only applies to future +file-backed THP allocations. + Boot parameters =============== diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index f6c139d2edf9..11eaca1a5d90 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -95,6 +95,24 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr; #define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \ (!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order))) +static inline int lowest_order(unsigned long orders) +{ + if (orders) + return __ffs(orders); + return -1; +} + +static inline int highest_order(unsigned long orders) +{ + return fls_long(orders) - 1; +} + +static inline int next_order(unsigned long *orders, int prev) +{ + *orders &= ~BIT(prev); + return highest_order(*orders); +} + enum mthp_stat_item { MTHP_STAT_ANON_FAULT_ALLOC, MTHP_STAT_ANON_FAULT_FALLBACK, @@ -155,6 +173,12 @@ extern unsigned long transparent_hugepage_flags; extern unsigned long huge_anon_orders_always; extern unsigned long huge_anon_orders_madvise; extern unsigned long huge_anon_orders_inherit; +extern unsigned long huge_file_orders_always; + +static inline unsigned long file_orders_always(void) +{ + return READ_ONCE(huge_file_orders_always); +} static inline bool hugepage_global_enabled(void) { @@ -169,17 +193,6 @@ static inline bool hugepage_global_always(void) (1< MAX_PAGECACHE_ORDER) - order = MAX_PAGECACHE_ORDER; + + orders = file_orders_always() | BIT(0); + orders &= BIT(order + 1) - 1; /* If we're not aligned, allocate a smaller folio */ if (index & ((1UL << order) - 1)) - order = __ffs(index); + orders &= BIT(__ffs(index) + 1) - 1; + order = highest_order(orders); - do { + while (orders) { gfp_t alloc_gfp = gfp; err = -ENOMEM; @@ -2000,7 +2003,9 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, break; folio_put(folio); folio = NULL; - } while (order-- > 0); + + order = next_order(&orders, order); + }; if (err == -EEXIST) goto repeat; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 61f18d588806..902e33499660 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -75,6 +75,7 @@ unsigned long huge_zero_pfn __read_mostly = ~0UL; unsigned long huge_anon_orders_always __read_mostly; unsigned long huge_anon_orders_madvise __read_mostly; unsigned long huge_anon_orders_inherit __read_mostly; +unsigned long huge_file_orders_always __read_mostly; static bool anon_orders_configured __initdata; unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, @@ -506,6 +507,37 @@ static ssize_t anon_enabled_store(struct kobject *kobj, return ret; } +static ssize_t file_enabled_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + int order = to_thpsize(kobj)->order; + const char *output; + + if (test_bit(order, &huge_file_orders_always)) + output = "[always] never"; + else + output = "always [never]"; + + return sysfs_emit(buf, "%s\n", output); +} + +static ssize_t file_enabled_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int order = to_thpsize(kobj)->order; + ssize_t ret = count; + + if (sysfs_streq(buf, "always")) + set_bit(order, &huge_file_orders_always); + else if (sysfs_streq(buf, "never")) + clear_bit(order, &huge_file_orders_always); + else + ret = -EINVAL; + + return ret; +} + static struct kobj_attribute anon_enabled_attr = __ATTR(enabled, 0644, anon_enabled_show, anon_enabled_store); @@ -518,7 +550,11 @@ static const struct attribute_group anon_ctrl_attr_grp = { .attrs = anon_ctrl_attrs, }; +static struct kobj_attribute file_enabled_attr = + __ATTR(file_enabled, 0644, file_enabled_show, file_enabled_store); + static struct attribute *file_ctrl_attrs[] = { + &file_enabled_attr.attr, #ifdef CONFIG_SHMEM &thpsize_shmem_enabled_attr.attr, #endif @@ -725,6 +761,13 @@ static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) if (!anon_orders_configured) huge_anon_orders_inherit = BIT(PMD_ORDER); + /* + * For pagecache, default to enabling all orders. powerpc's PMD_ORDER + * (and therefore THP_ORDERS_ALL_FILE_DEFAULT) isn't a compile-time + * constant so we have to do this here. + */ + huge_file_orders_always = THP_ORDERS_ALL_FILE_DEFAULT; + *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); if (unlikely(!*hugepage_kobj)) { pr_err("failed to create transparent hugepage kobject\n"); diff --git a/mm/readahead.c b/mm/readahead.c index deccc49a57c7..8eaabf7c91f4 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -486,6 +486,34 @@ static inline int ra_alloc_folio(struct readahead_control *ractl, pgoff_t index, return 0; } +static int select_new_order(int old_order, int max_order, unsigned long orders) +{ + unsigned long hi_orders, lo_orders; + + /* + * Select the next order to use from the set in `orders`, while ensuring + * we don't go above max_order. Prefer the next + 1 highest allowed + * order after old_order, unless there isn't one, in which case return + * the closest allowed order, which is either the next highest allowed + * order or less than or equal to old_order. The "next + 1" skip + * behaviour is intended to allow ramping up to large folios quickly. + */ + + orders &= BIT(max_order + 1) - 1; + VM_WARN_ON(!orders); + hi_orders = orders & ~(BIT(old_order + 1) - 1); + + if (hi_orders) { + old_order = lowest_order(hi_orders); + hi_orders &= ~BIT(old_order); + if (hi_orders) + return lowest_order(hi_orders); + } + + lo_orders = orders & (BIT(old_order + 1) - 1); + return highest_order(lo_orders); +} + void page_cache_ra_order(struct readahead_control *ractl, struct file_ra_state *ra, unsigned int new_order) { @@ -496,17 +524,15 @@ void page_cache_ra_order(struct readahead_control *ractl, unsigned int nofs; int err = 0; gfp_t gfp = readahead_gfp_mask(mapping); + unsigned long orders; - if (!mapping_large_folio_support(mapping) || ra->size < 4) + if (!mapping_large_folio_support(mapping)) goto fallback; limit = min(limit, index + ra->size - 1); - if (new_order < MAX_PAGECACHE_ORDER) - new_order += 2; - - new_order = min_t(unsigned int, MAX_PAGECACHE_ORDER, new_order); - new_order = min_t(unsigned int, new_order, ilog2(ra->size)); + orders = file_orders_always() | BIT(0); + new_order = select_new_order(new_order, ilog2(ra->size), orders); /* See comment in page_cache_ra_unbounded() */ nofs = memalloc_nofs_save(); @@ -516,9 +542,10 @@ void page_cache_ra_order(struct readahead_control *ractl, /* Align with smaller pages if needed */ if (index & ((1UL << order) - 1)) - order = __ffs(index); + order = select_new_order(order, __ffs(index), orders); /* Don't allocate pages past EOF */ - while (index + (1UL << order) - 1 > limit) + while (index + (1UL << order) - 1 > limit && + (BIT(order) & orders) == 0) order--; err = ra_alloc_folio(ractl, index, mark, order, gfp); if (err) -- Gitee From 168f8ea9cec754e5310822bc83e2134a77bea78a Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Wed, 25 Dec 2024 15:56:21 +0800 Subject: [PATCH 1986/2138] mm: Introduce "always+exec" for mTHP file_enabled control ANBZ: #9728 cherry-picked from: https://lore.kernel.org/lkml/20240717071257.4141363-1-ryan.roberts@arm.com/T/#md06a4a7a606cb90824f322fec868ee0d7620a876 In addition to `always` and `never`, add `always+exec` as an option for: /sys/kernel/mm/transparent_hugepage/hugepages-*kB/file_enabled `always+exec` acts like `always` but additionally marks the hugepage size as the preferred hugepage size for sections of any file mapped with execute permission. A maximum of one hugepage size can be marked as `exec` at a time, so applying it to a new size implicitly removes it from any size it was previously set for. Change readahead to use this flagged exec size; when a request is made for an executable mapping, do a synchronous read of the size in a naturally aligned manner. On arm64 if memory is physically contiguous and naturally aligned to the "contpte" size, we can use contpte mappings, which improves utilization of the TLB. When paired with the "multi-size THP" changes, this works well to reduce dTLB pressure. However iTLB pressure is still high due to executable mappings having a low liklihood of being in the required folio size and mapping alignment, even when the filesystem supports readahead into large folios (e.g. XFS). The reason for the low liklihood is that the current readahead algorithm starts with an order-2 folio and increases the folio order by 2 every time the readahead mark is hit. But most executable memory is faulted in fairly randomly and so the readahead mark is rarely hit and most executable folios remain order-2. This is observed impirically and confirmed from discussion with a gnu linker expert; in general, the linker does nothing to group temporally accessed text together spacially. Additionally, with the current read-around approach there are no alignment guarrantees between the file and folio. This is insufficient for arm64's contpte mapping requirement (order-4 for 4K base pages). So it seems reasonable to special-case the read(ahead) logic for executable mappings. The trade-off is performance improvement (due to more efficient storage of the translations in iTLB) vs potential read amplification (due to reading too much data around the fault which won't be used), and the latter is independent of base page size. Of course if no hugepage size is marked as `always+exec` the old behaviour is maintained. Performance Benchmarking ------------------------ The below shows kernel compilation and speedometer javascript benchmarks on Ampere Altra arm64 system. When the patch is applied, `always+exec` is set for 64K folios. First, confirmation that this patch causes more memory to be contained in 64K folios (this is for all file-backed memory so includes non-executable too): | File-backed folios | Speedometer | Kernel Compile | | by size as percentage |-----------------|-----------------| | of all mapped file mem | before | after | before | after | |=========================|========|========|========|========| |file-thp-aligned-16kB | 45% | 9% | 46% | 7% | |file-thp-aligned-32kB | 2% | 0% | 3% | 1% | |file-thp-aligned-64kB | 3% | 63% | 5% | 80% | |file-thp-aligned-128kB | 11% | 11% | 0% | 0% | |file-thp-unaligned-16kB | 1% | 0% | 3% | 1% | |file-thp-unaligned-128kB | 1% | 0% | 0% | 0% | |file-thp-partial | 0% | 0% | 0% | 0% | |-------------------------|--------|--------|--------|--------| |file-cont-aligned-64kB | 16% | 75% | 5% | 80% | The above shows that for both use cases, the amount of file memory backed by 16K folios reduces and the amount backed by 64K folios increases significantly. And the amount of memory that is contpte-mapped significantly increases (last line). And this is reflected in performance improvement: Kernel Compilation (smaller is faster): | kernel | real-time | kern-time | user-time | peak memory | |----------|-------------|-------------|-------------|---------------| | before | 0.0% | 0.0% | 0.0% | 0.0% | | after | -1.6% | -2.1% | -1.7% | 0.0% | Speedometer (bigger is faster): | kernel | runs_per_min | peak memory | |----------|----------------|---------------| | before | 0.0% | 0.0% | | after | 1.3% | 1.0% | Both benchmarks show a ~1.5% improvement once the patch is applied. Signed-off-by: Ryan Roberts Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4378 --- Documentation/admin-guide/mm/transhuge.rst | 6 +++++ include/linux/huge_mm.h | 11 ++++++++ mm/filemap.c | 11 ++++++++ mm/huge_memory.c | 31 +++++++++++++++++----- 4 files changed, 52 insertions(+), 7 deletions(-) diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index f6b3aa8e2df1..25021a6ec9d5 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -291,12 +291,18 @@ memory from a set of allowed sizes. By default all THP sizes that the page cache supports are allowed, but this set can be modified with one of:: echo always >/sys/kernel/mm/transparent_hugepage/hugepages-kB/file_enabled + echo always+exec >/sys/kernel/mm/transparent_hugepage/hugepages-kB/file_enabled echo never >/sys/kernel/mm/transparent_hugepage/hugepages-kB/file_enabled where is the hugepage size being addressed, the available sizes for which vary by system. ``always`` adds the hugepage size to the set of allowed sizes, and ``never`` removes the hugepage size from the set of allowed sizes. +``always+exec`` acts like ``always`` but additionally marks the hugepage size as +the preferred hugepage size for sections of any file mapped executable. A +maximum of one hugepage size can be marked as ``exec`` at a time, so applying it +to a new size implicitly removes it from any size it was previously set for. + In some situations, constraining the allowed sizes can reduce memory fragmentation, resulting in fewer allocation fallbacks and improved system performance. diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 11eaca1a5d90..9633047656e9 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -174,12 +174,18 @@ extern unsigned long huge_anon_orders_always; extern unsigned long huge_anon_orders_madvise; extern unsigned long huge_anon_orders_inherit; extern unsigned long huge_file_orders_always; +extern int huge_file_exec_order; static inline unsigned long file_orders_always(void) { return READ_ONCE(huge_file_orders_always); } +static inline int file_exec_order(void) +{ + return READ_ONCE(huge_file_exec_order); +} + static inline bool hugepage_global_enabled(void) { return transparent_hugepage_flags & @@ -605,6 +611,11 @@ static inline unsigned long file_orders_always(void) { return 0; } + +static inline int file_exec_order(void) +{ + return -1; +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ static inline int split_folio_to_list(struct folio *folio, diff --git a/mm/filemap.c b/mm/filemap.c index e89c36f2ca06..c4b6715b1bd0 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3214,6 +3214,7 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) struct file *fpin = NULL; unsigned long vm_flags = vmf->vma->vm_flags; unsigned int mmap_miss; + int exec_order = file_exec_order(); #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* Use the readahead code, even if readahead is disabled */ @@ -3233,6 +3234,16 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) } #endif + /* If explicit order is set for exec mappings, use it. */ + if ((vm_flags & VM_EXEC) && exec_order >= 0) { + fpin = maybe_unlock_mmap_for_io(vmf, fpin); + ra->size = 1UL << exec_order; + ra->async_size = 0; + ractl._index &= ~((unsigned long)ra->size - 1); + page_cache_ra_order(&ractl, ra, exec_order); + return fpin; + } + /* If we don't want any read-ahead, don't bother */ if (vm_flags & VM_RAND_READ) return fpin; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 902e33499660..cdfa964bfbbd 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -76,6 +76,7 @@ unsigned long huge_anon_orders_always __read_mostly; unsigned long huge_anon_orders_madvise __read_mostly; unsigned long huge_anon_orders_inherit __read_mostly; unsigned long huge_file_orders_always __read_mostly; +int huge_file_exec_order __read_mostly = -1; static bool anon_orders_configured __initdata; unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, @@ -443,6 +444,7 @@ static const struct attribute_group hugepage_attr_group = { static void hugepage_exit_sysfs(struct kobject *hugepage_kobj); static void thpsize_release(struct kobject *kobj); static DEFINE_SPINLOCK(huge_anon_orders_lock); +static DEFINE_SPINLOCK(huge_file_orders_lock); static LIST_HEAD(thpsize_list); static ssize_t anon_enabled_show(struct kobject *kobj, @@ -512,11 +514,15 @@ static ssize_t file_enabled_show(struct kobject *kobj, { int order = to_thpsize(kobj)->order; const char *output; + bool exec; - if (test_bit(order, &huge_file_orders_always)) - output = "[always] never"; - else - output = "always [never]"; + if (test_bit(order, &huge_file_orders_always)) { + exec = READ_ONCE(huge_file_exec_order) == order; + output = exec ? "always [always+exec] never" : + "[always] always+exec never"; + } else { + output = "always always+exec [never]"; + } return sysfs_emit(buf, "%s\n", output); } @@ -528,13 +534,24 @@ static ssize_t file_enabled_store(struct kobject *kobj, int order = to_thpsize(kobj)->order; ssize_t ret = count; - if (sysfs_streq(buf, "always")) + spin_lock(&huge_file_orders_lock); + + if (sysfs_streq(buf, "always")) { set_bit(order, &huge_file_orders_always); - else if (sysfs_streq(buf, "never")) + if (huge_file_exec_order == order) + huge_file_exec_order = -1; + } else if (sysfs_streq(buf, "always+exec")) { + set_bit(order, &huge_file_orders_always); + huge_file_exec_order = order; + } else if (sysfs_streq(buf, "never")) { clear_bit(order, &huge_file_orders_always); - else + if (huge_file_exec_order == order) + huge_file_exec_order = -1; + } else { ret = -EINVAL; + } + spin_unlock(&huge_file_orders_lock); return ret; } -- Gitee From debff58db18ed18e69830067b4fe223ceca09591 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Wed, 25 Dec 2024 16:55:37 +0800 Subject: [PATCH 1987/2138] mm: Override mTHP "file_enabled" defaults at kernel cmdline ANBZ: #9728 cherry-picked from: https://lore.kernel.org/lkml/20240717071257.4141363-1-ryan.roberts@arm.com/T/#mb70537979115e89c8398c6f2b3d3e70ec438c8d0 Add thp_file= cmdline parameter to allow specifying the default enablement of each supported file-backed THP size. The parameter accepts the following format and can be provided multiple times to configure each size: thp_file=[KMG]: See Documentation/admin-guide/mm/transhuge.rst for more details. Configuring the defaults at boot time is often necessary because its not always possible to drop active executable pages from the page cache, especially if they are well used like libc. The command line parameter allows configuring the values before the first page is installed in the page cache. Signed-off-by: Ryan Roberts Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4378 --- .../admin-guide/kernel-parameters.txt | 8 ++++ Documentation/admin-guide/mm/transhuge.rst | 13 ++++++ mm/huge_memory.c | 45 ++++++++++++++++++- 3 files changed, 65 insertions(+), 1 deletion(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 977b201f52b0..503a55b1d9a7 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -6436,6 +6436,14 @@ See Documentation/admin-guide/mm/transhuge.rst for more details. + thp_file= [KNL] + Format: [KMG]:always|always+exec|never + Can be used to control the default behavior of the + system with respect to file-backed transparent hugepages. + Can be used multiple times for multiple file-backed THP + sizes. See Documentation/admin-guide/mm/transhuge.rst + for more details. + threadirqs [KNL] Force threading of all interrupt handlers except those marked explicitly IRQF_NO_THREAD. diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index 25021a6ec9d5..9e4375981d91 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -365,6 +365,19 @@ user, the PMD_ORDER hugepage policy will be overridden. If the policy for PMD_ORDER is not defined within a valid ``thp_shmem``, its policy will default to ``never``. +Each supported file-backed THP size can be controlled by passing +``thp_file=[KMG]:``, where ```` is the THP size and +```` is one of ``always``, ``always+exec`` or ``never``. + +For example, the following will set 64K THP to ``always+exec``:: + + thp_file=64K:always+exec + +``thp_file=`` may be specified multiple times to configure all THP sizes as +required. If ``thp_file=`` is specified at least once, any file-backed THP +sizes not explicitly configured on the command line are implicitly set to +``never``. + Hugepages in tmpfs/shmem ======================== diff --git a/mm/huge_memory.c b/mm/huge_memory.c index cdfa964bfbbd..32d6b68c87ec 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -78,6 +78,7 @@ unsigned long huge_anon_orders_inherit __read_mostly; unsigned long huge_file_orders_always __read_mostly; int huge_file_exec_order __read_mostly = -1; static bool anon_orders_configured __initdata; +static bool file_orders_configured; unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, unsigned long vm_flags, bool smaps, @@ -783,7 +784,10 @@ static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) * (and therefore THP_ORDERS_ALL_FILE_DEFAULT) isn't a compile-time * constant so we have to do this here. */ - huge_file_orders_always = THP_ORDERS_ALL_FILE_DEFAULT; + if (!file_orders_configured) { + huge_file_orders_always = THP_ORDERS_ALL_FILE_DEFAULT; + file_orders_configured = true; + } *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); if (unlikely(!*hugepage_kobj)) { @@ -1032,6 +1036,45 @@ static int __init setup_thp_anon(char *str) } __setup("thp_anon=", setup_thp_anon); +static int __init setup_thp_file(char *str) +{ + unsigned long size; + char *state; + int order; + int ret = 0; + + if (!str) + goto out; + + size = (unsigned long)memparse(str, &state); + order = ilog2(size >> PAGE_SHIFT); + if (*state != ':' || !is_power_of_2(size) || size <= PAGE_SIZE || + !(BIT(order) & THP_ORDERS_ALL_FILE_DEFAULT)) + goto out; + + state++; + + if (!strcmp(state, "always")) { + set_bit(order, &huge_file_orders_always); + ret = 1; + } else if (!strcmp(state, "always+exec")) { + set_bit(order, &huge_file_orders_always); + huge_file_exec_order = order; + ret = 1; + } else if (!strcmp(state, "never")) { + clear_bit(order, &huge_file_orders_always); + ret = 1; + } + + if (ret) + file_orders_configured = true; +out: + if (!ret) + pr_warn("thp_file=%s: cannot parse, ignored\n", str); + return ret; +} +__setup("thp_file=", setup_thp_file); + pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) { if (likely(vma->vm_flags & VM_WRITE)) -- Gitee From 89c5434aa80e861cba28633ede7669dfd7fc58f5 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Thu, 26 Dec 2024 15:49:48 +0800 Subject: [PATCH 1988/2138] anolis: mm: optimize the 'thp_file' cmdline format MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #9728 Similar to the ‘thp_anon’ parameter, change the 'thp_file' to support the setting of policies with multiple sizes. Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4378 --- mm/huge_memory.c | 98 ++++++++++++++++++++++++++++++++++-------------- 1 file changed, 70 insertions(+), 28 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 32d6b68c87ec..bf770b231664 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1038,40 +1038,82 @@ __setup("thp_anon=", setup_thp_anon); static int __init setup_thp_file(char *str) { - unsigned long size; - char *state; - int order; - int ret = 0; + char *token, *range, *policy, *subtoken; + unsigned long always; + char *start_size, *end_size; + int start, end, nr, exec; + char *p; - if (!str) - goto out; + if (!str || strlen(str) + 1 > PAGE_SIZE) + goto err; + strcpy(str_dup, str); - size = (unsigned long)memparse(str, &state); - order = ilog2(size >> PAGE_SHIFT); - if (*state != ':' || !is_power_of_2(size) || size <= PAGE_SIZE || - !(BIT(order) & THP_ORDERS_ALL_FILE_DEFAULT)) - goto out; + always = huge_file_orders_always; + exec = huge_file_exec_order; + p = str_dup; + while ((token = strsep(&p, ";")) != NULL) { + range = strsep(&token, ":"); + policy = token; + + if (!policy) + goto err; - state++; + while ((subtoken = strsep(&range, ",")) != NULL) { + if (strchr(subtoken, '-')) { + start_size = strsep(&subtoken, "-"); + end_size = subtoken; - if (!strcmp(state, "always")) { - set_bit(order, &huge_file_orders_always); - ret = 1; - } else if (!strcmp(state, "always+exec")) { - set_bit(order, &huge_file_orders_always); - huge_file_exec_order = order; - ret = 1; - } else if (!strcmp(state, "never")) { - clear_bit(order, &huge_file_orders_always); - ret = 1; + start = get_order_from_str(start_size, + THP_ORDERS_ALL_FILE_DEFAULT); + end = get_order_from_str(end_size, + THP_ORDERS_ALL_FILE_DEFAULT); + } else { + start_size = end_size = subtoken; + start = end = get_order_from_str(subtoken, + THP_ORDERS_ALL_FILE_DEFAULT); + } + + if (start == -EINVAL) { + pr_err("invalid size %s in thp_shmem boot parameter\n", + start_size); + goto err; + } + + if (end == -EINVAL) { + pr_err("invalid size %s in thp_shmem boot parameter\n", + end_size); + goto err; + } + + if (start < 0 || end < 0 || start > end) + goto err; + + nr = end - start + 1; + if (!strcmp(policy, "always")) { + bitmap_set(&always, start, nr); + } else if (!strcmp(policy, "always+exec")) { + if (nr != 1) + goto err; + bitmap_set(&always, start, nr); + exec = start; + } else if (!strcmp(policy, "never")) { + bitmap_clear(&always, start, nr); + if (exec != -1 && !test_bit(exec, &always)) + exec = -1; + } else { + pr_err("invalid policy %s in thp_file boot parameter\n", policy); + goto err; + } + } } - if (ret) - file_orders_configured = true; -out: - if (!ret) - pr_warn("thp_file=%s: cannot parse, ignored\n", str); - return ret; + huge_file_orders_always = always; + huge_file_exec_order = exec; + file_orders_configured = true; + return 1; +err: + pr_warn("thp_file=%s: cannot parse, ignored\n", str); + return 0; } __setup("thp_file=", setup_thp_file); -- Gitee From 5fd57332c9d9f4faad33eda57c56f7d513a76171 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Wed, 25 Dec 2024 17:13:51 +0800 Subject: [PATCH 1989/2138] anolis: mm: add mTHP counters for file folios ANBZ: #9728 Add mTHP counters for file folios. Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4378 --- Documentation/admin-guide/mm/transhuge.rst | 4 ++++ include/linux/huge_mm.h | 1 + mm/filemap.c | 8 +++++++- mm/huge_memory.c | 2 ++ 4 files changed, 14 insertions(+), 1 deletion(-) diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index 9e4375981d91..a44131c4765e 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -625,6 +625,10 @@ nr_anon_partially_mapped an anonymous THP as "partially mapped" and count it here, even though it is not actually partially mapped anymore. +file_alloc + is incremented every time a file huge page is successfully + allocated. + As the system ages, allocating huge pages may be expensive as the system uses memory compaction to copy data around memory to free a huge page for use. There are some counters in ``/proc/vmstat`` to help diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 9633047656e9..5a7100db2956 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -127,6 +127,7 @@ enum mthp_stat_item { MTHP_STAT_SPLIT_DEFERRED, MTHP_STAT_NR_ANON, MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, + MTHP_STAT_FILE_ALLOC, __MTHP_STAT_COUNT }; diff --git a/mm/filemap.c b/mm/filemap.c index c4b6715b1bd0..f53e5732083b 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1003,9 +1003,15 @@ struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order) folio = __folio_alloc_node(gfp, order, n); } while (!folio && read_mems_allowed_retry(cpuset_mems_cookie)); + if (folio) + count_mthp_stat(order, MTHP_STAT_FILE_ALLOC); return folio; } - return folio_alloc(gfp, order); + + folio = folio_alloc(gfp, order); + if (folio) + count_mthp_stat(order, MTHP_STAT_FILE_ALLOC); + return folio; } EXPORT_SYMBOL(filemap_alloc_folio); #endif diff --git a/mm/huge_memory.c b/mm/huge_memory.c index bf770b231664..7611126e04a1 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -637,6 +637,7 @@ DEFINE_MTHP_STAT_ATTR(split_failed, MTHP_STAT_SPLIT_FAILED); DEFINE_MTHP_STAT_ATTR(split_deferred, MTHP_STAT_SPLIT_DEFERRED); DEFINE_MTHP_STAT_ATTR(nr_anon, MTHP_STAT_NR_ANON); DEFINE_MTHP_STAT_ATTR(nr_anon_partially_mapped, MTHP_STAT_NR_ANON_PARTIALLY_MAPPED); +DEFINE_MTHP_STAT_ATTR(file_alloc, MTHP_STAT_FILE_ALLOC); static struct attribute *anon_stats_attrs[] = { &anon_fault_alloc_attr.attr, @@ -658,6 +659,7 @@ static struct attribute_group anon_stats_attr_grp = { }; static struct attribute *file_stats_attrs[] = { + &file_alloc_attr.attr, #ifdef CONFIG_SHMEM &shmem_alloc_attr.attr, &shmem_fallback_attr.attr, -- Gitee From 63336da01b3325d4ef6b0d540c9e0c667f8447d6 Mon Sep 17 00:00:00 2001 From: Rongwei Wang Date: Mon, 30 Dec 2024 14:21:39 +0800 Subject: [PATCH 1990/2138] anolis: mm, thp: hugetext: make PIC binary mapping address THP align ANBZ: #9728 The patch mainly to make mmap address of PIC binary is aligned with HPAGE_PMD_SIZE. If not so, the ELF binary that is generated with -fPIC compile option can not use hugepages, because of the mapping address is randomly selected by kernel. Note: Baolin Wang changed the code to make it suitable for the file mTHP. Signed-off-by: Rongwei Wang Signed-off-by: Baolin Wang Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/4378 --- fs/binfmt_elf.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index fb2c8d14327a..9016f46f98ab 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1037,6 +1037,7 @@ static int load_elf_binary(struct linux_binprm *bprm) unsigned long k, vaddr; unsigned long total_size = 0; unsigned long alignment; + int exec_order = file_exec_order(); if (elf_ppnt->p_type != PT_LOAD) continue; @@ -1161,6 +1162,10 @@ static int load_elf_binary(struct linux_binprm *bprm) retval = -EINVAL; goto out_free_dentry; } + + if (exec_order > 0 && interpreter && + total_size >= (PAGE_SIZE << exec_order)) + load_bias &= ~((PAGE_SIZE << exec_order) - 1); } error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, -- Gitee From 1d2ae5eb5f2c3e320481e595e3e46973e8b0990e Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Fri, 12 Apr 2024 14:47:50 +0800 Subject: [PATCH 1991/2138] mm: move mm counter updating out of set_pte_range() ANBZ: #9728 commit 1f2d8b4421bd0da2c97fb8bad5cc85fc929fef64 upstream Patch series "mm: batch mm counter updating in filemap_map_pages()", v3. Let's batch mm counter updating to accelerate filemap_map_pages(). This patch (of 2): In order to support batch mm counter updating in filemap_map_pages(), move mm counter updating out of set_pte_range(), the folios are file from filemap, and distinguish folios by vmf->flags and vma->vm_flags from another caller finish_fault(). Link: https://lkml.kernel.org/r/20240412064751.119015-1-wangkefeng.wang@huawei.com Link: https://lkml.kernel.org/r/20240412064751.119015-2-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4420 --- mm/filemap.c | 4 ++++ mm/memory.c | 10 ++++++---- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/mm/filemap.c b/mm/filemap.c index f53e5732083b..20d569b822d1 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3588,6 +3588,8 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, skip: if (count) { set_pte_range(vmf, folio, page, count, addr); + add_mm_counter(vmf->vma->vm_mm, mm_counter_file(page), + count); folio_ref_add(folio, count); if (in_range(vmf->address, addr, count * PAGE_SIZE)) ret = VM_FAULT_NOPAGE; @@ -3602,6 +3604,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, if (count) { set_pte_range(vmf, folio, page, count, addr); + add_mm_counter(vmf->vma->vm_mm, mm_counter_file(page), count); folio_ref_add(folio, count); if (in_range(vmf->address, addr, count * PAGE_SIZE)) ret = VM_FAULT_NOPAGE; @@ -3636,6 +3639,7 @@ static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf, ret = VM_FAULT_NOPAGE; set_pte_range(vmf, folio, page, 1, addr); + add_mm_counter(vmf->vma->vm_mm, mm_counter_file(page), 1); folio_ref_inc(folio); return ret; diff --git a/mm/memory.c b/mm/memory.c index 1e8f547eaa62..2bded21e4d25 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5071,12 +5071,10 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio, entry = pte_mkuffd_wp(entry); /* copy-on-write page */ if (write && !(vma->vm_flags & VM_SHARED)) { - add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr); VM_BUG_ON_FOLIO(nr != 1, folio); folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE); folio_add_lru_vma(folio, vma); } else { - add_mm_counter(vma->vm_mm, mm_counter_file(page), nr); folio_add_file_rmap_ptes(folio, page, nr, vma); } set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr); @@ -5114,11 +5112,13 @@ vm_fault_t finish_fault(struct vm_fault *vmf) struct page *page; struct folio *folio; vm_fault_t ret; - int nr_pages; + int type, nr_pages; unsigned long addr = vmf->address; + bool is_cow = (vmf->flags & FAULT_FLAG_WRITE) && + !(vma->vm_flags & VM_SHARED); /* Did we COW the page? */ - if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) + if (is_cow) page = vmf->cow_page; else page = vmf->page; @@ -5197,6 +5197,8 @@ vm_fault_t finish_fault(struct vm_fault *vmf) folio_ref_add(folio, nr_pages - 1); set_pte_range(vmf, folio, page, nr_pages, addr); + type = is_cow ? MM_ANONPAGES : mm_counter_file(page); + add_mm_counter(vma->vm_mm, type, nr_pages); ret = 0; unlock: -- Gitee From e62ad460e79272ed5479216c95b4faadf6c3fb07 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Fri, 12 Apr 2024 14:47:51 +0800 Subject: [PATCH 1992/2138] mm: filemap: batch mm counter updating in filemap_map_pages() ANBZ: #9728 commit ceca44991f3dd5a67b4e0ded6379c5e93e84cb31 upstream Like copy_pte_range()/zap_pte_range(), make mm counter batch updating in filemap_map_pages(), since folios type are same(MM_SHMEMPAGES or MM_FILEPAGES) in filemap_map_pages(), only check the first folio type is enough, the 'lat_pagefault -P 1 file' test from lmbench shows 12% improvement, and the percpu_counter_add_batch() is gone from perf flame graph. Link: https://lkml.kernel.org/r/20240412064751.119015-3-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4420 --- mm/filemap.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/mm/filemap.c b/mm/filemap.c index 20d569b822d1..92c9cbb8823b 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3562,7 +3562,7 @@ static struct folio *next_uptodate_folio(struct xa_state *xas, static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, struct folio *folio, unsigned long start, unsigned long addr, unsigned int nr_pages, - unsigned int *mmap_miss) + unsigned long *rss, unsigned int *mmap_miss) { vm_fault_t ret = 0; struct page *page = folio_page(folio, start); @@ -3588,8 +3588,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, skip: if (count) { set_pte_range(vmf, folio, page, count, addr); - add_mm_counter(vmf->vma->vm_mm, mm_counter_file(page), - count); + *rss += count; folio_ref_add(folio, count); if (in_range(vmf->address, addr, count * PAGE_SIZE)) ret = VM_FAULT_NOPAGE; @@ -3604,7 +3603,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, if (count) { set_pte_range(vmf, folio, page, count, addr); - add_mm_counter(vmf->vma->vm_mm, mm_counter_file(page), count); + *rss += count; folio_ref_add(folio, count); if (in_range(vmf->address, addr, count * PAGE_SIZE)) ret = VM_FAULT_NOPAGE; @@ -3617,7 +3616,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf, struct folio *folio, unsigned long addr, - unsigned int *mmap_miss) + unsigned long *rss, unsigned int *mmap_miss) { vm_fault_t ret = 0; struct page *page = &folio->page; @@ -3639,7 +3638,7 @@ static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf, ret = VM_FAULT_NOPAGE; set_pte_range(vmf, folio, page, 1, addr); - add_mm_counter(vmf->vma->vm_mm, mm_counter_file(page), 1); + (*rss)++; folio_ref_inc(folio); return ret; @@ -3656,7 +3655,8 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf, XA_STATE(xas, &mapping->i_pages, start_pgoff); struct folio *folio; vm_fault_t ret = 0; - unsigned int nr_pages = 0, mmap_miss = 0, mmap_miss_saved; + unsigned long rss = 0; + unsigned int nr_pages = 0, mmap_miss = 0, mmap_miss_saved, folio_type; rcu_read_lock(); folio = next_uptodate_folio(&xas, mapping, end_pgoff); @@ -3675,6 +3675,8 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf, folio_put(folio); goto out; } + + folio_type = mm_counter_file(&folio->page); do { unsigned long end; @@ -3686,15 +3688,16 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf, if (!folio_test_large(folio)) ret |= filemap_map_order0_folio(vmf, - folio, addr, &mmap_miss); + folio, addr, &rss, &mmap_miss); else ret |= filemap_map_folio_range(vmf, folio, xas.xa_index - folio->index, addr, - nr_pages, &mmap_miss); + nr_pages, &rss, &mmap_miss); folio_unlock(folio); folio_put(folio); } while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL); + add_mm_counter(vma->vm_mm, folio_type, rss); pte_unmap_unlock(vmf->pte, vmf->ptl); out: rcu_read_unlock(); -- Gitee From 739e125b6c74c94c5a3f2e274e061efd7daad02c Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Thu, 23 Nov 2023 09:58:44 +0200 Subject: [PATCH 1993/2138] perf tests: Skip record test if test_loop symbol is missing ANBZ: #13058 commit 3c489dbe69c155c86c4460491d11520cf8ec3637 upstream. perf record test depends on finding symbol test_loop in perf, and fails if perf has been stripped and no debug object is available. In that case, skip the test instead. Example: Note, building with perl support adds option -Wl,-E which causes the linker to add all (global) symbols to the dynamic symbol table. So the test_loop symbol, being global, does not get stripped unless NO_LIBPERL=1 Before: $ make NO_LIBPERL=1 -C tools/perf >/dev/null 2>&1 $ strip tools/perf/perf $ tools/perf/perf buildid-cache -p `realpath tools/perf/perf` $ tools/perf/perf test -v 'record tests' 91: perf record tests : --- start --- test child forked, pid 118750 Basic --per-thread mode test Per-thread record [Failed missing output] Register capture test Register capture test [Success] Basic --system-wide mode test System-wide record [Skipped not supported] Basic target workload test Workload record [Failed missing output] test child finished with -1 ---- end ---- perf record tests: FAILED! After: $ tools/perf/perf test -v 'record tests' 91: perf record tests : --- start --- test child forked, pid 120025 perf does not have symbol 'test_loop' perf is missing symbols - skipping test test child finished with -2 ---- end ---- perf record tests: Skip Signed-off-by: Adrian Hunter Acked-by: Ian Rogers Cc: German Gomez Cc: James Clark Cc: Jiri Olsa Cc: Leo Yan Cc: Namhyung Kim Link: https://lore.kernel.org/r/20231123075848.9652-5-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo Reviewed-by: shuai xue Link: https://gitee.com/anolis/cloud-kernel/pulls/4409 --- tools/perf/tests/shell/record.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tools/perf/tests/shell/record.sh b/tools/perf/tests/shell/record.sh index 4fbc74805d52..3988b10b3f59 100755 --- a/tools/perf/tests/shell/record.sh +++ b/tools/perf/tests/shell/record.sh @@ -7,10 +7,15 @@ set -e shelldir=$(dirname "$0") . "${shelldir}"/lib/waiting.sh +# shellcheck source=lib/perf_has_symbol.sh +. "${shelldir}"/lib/perf_has_symbol.sh +testsym="test_loop" + +skip_test_missing_symbol ${testsym} + err=0 perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX) testprog="perf test -w thloop" -testsym="test_loop" cleanup() { rm -rf "${perfdata}" -- Gitee From c4e9aa91b2d0d2a0d19c4e5ec6cda75ffe6a4238 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Thu, 23 Nov 2023 09:58:45 +0200 Subject: [PATCH 1994/2138] perf tests: Skip Arm64 callgraphs test if leafloop symbol is missing ANBZ: #13058 commit fc1de29a8b8ad46b590b2d389b53b4ecf9758273 upstream. The test "Check Arm64 callgraphs are complete in fp mode" depends on finding symbol leafloop in perf, and fails if perf has been stripped and no debug object is available. In that case, skip the test instead. Signed-off-by: Adrian Hunter Acked-by: Ian Rogers Cc: German Gomez Cc: James Clark Cc: Jiri Olsa Cc: Leo Yan Cc: Namhyung Kim Link: https://lore.kernel.org/r/20231123075848.9652-6-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo Reviewed-by: shuai xue Link: https://gitee.com/anolis/cloud-kernel/pulls/4409 --- tools/perf/tests/shell/test_arm_callgraph_fp.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/perf/tests/shell/test_arm_callgraph_fp.sh b/tools/perf/tests/shell/test_arm_callgraph_fp.sh index 60cd35c73e47..730526c632ce 100755 --- a/tools/perf/tests/shell/test_arm_callgraph_fp.sh +++ b/tools/perf/tests/shell/test_arm_callgraph_fp.sh @@ -2,8 +2,14 @@ # Check Arm64 callgraphs are complete in fp mode # SPDX-License-Identifier: GPL-2.0 +shelldir=$(dirname "$0") +# shellcheck source=lib/perf_has_symbol.sh +. "${shelldir}"/lib/perf_has_symbol.sh + lscpu | grep -q "aarch64" || exit 2 +skip_test_missing_symbol leafloop + PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXX) TEST_PROGRAM="perf test -w leafloop" -- Gitee From 6e0e91bb9f26830510985c7b09a6544e4e26fbd5 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Thu, 23 Nov 2023 09:58:46 +0200 Subject: [PATCH 1995/2138] perf tests: Skip branch stack sampling test if brstack_bench symbol is missing ANBZ: #13058 commit fcfb5a6189f55669c931dce9fec85280655c515f upstream. The test "Check branch stack sampling" depends on finding symbol brstack_bench (and several others) in perf, and fails if perf has been stripped and no debug object is available. In that case, skip the test instead. Example: Before: $ strip tools/perf/perf $ tools/perf/perf buildid-cache -p `realpath tools/perf/perf` $ tools/perf/perf test -v 'branch stack sampling' 112: Check branch stack sampling : --- start --- test child forked, pid 123741 Testing user branch stack sampling + grep -E -m1 ^brstack_bench\+[^ ]*/brstack_foo\+[^ ]*/IND_CALL/.*$ /tmp/__perf_test.program.5Dz1U/perf.script + cleanup + rm -rf /tmp/__perf_test.program.5Dz1U test child finished with -1 ---- end ---- Check branch stack sampling: FAILED! After: $ tools/perf/perf test -v 'branch stack sampling' 112: Check branch stack sampling : --- start --- test child forked, pid 125157 perf does not have symbol 'brstack_bench' perf is missing symbols - skipping test test child finished with -2 ---- end ---- Check branch stack sampling: Skip Signed-off-by: Adrian Hunter Acked-by: Ian Rogers Cc: German Gomez Cc: James Clark Cc: Jiri Olsa Cc: Leo Yan Cc: Namhyung Kim Link: https://lore.kernel.org/r/20231123075848.9652-7-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo Reviewed-by: shuai xue Link: https://gitee.com/anolis/cloud-kernel/pulls/4409 --- tools/perf/tests/shell/test_brstack.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/perf/tests/shell/test_brstack.sh b/tools/perf/tests/shell/test_brstack.sh index 09908d71c994..5f14d0cb013f 100755 --- a/tools/perf/tests/shell/test_brstack.sh +++ b/tools/perf/tests/shell/test_brstack.sh @@ -4,6 +4,10 @@ # SPDX-License-Identifier: GPL-2.0 # German Gomez , 2022 +shelldir=$(dirname "$0") +# shellcheck source=lib/perf_has_symbol.sh +. "${shelldir}"/lib/perf_has_symbol.sh + # skip the test if the hardware doesn't support branch stack sampling # and if the architecture doesn't support filter types: any,save_type,u if ! perf record -o- --no-buildid --branch-filter any,save_type,u -- true > /dev/null 2>&1 ; then @@ -11,6 +15,8 @@ if ! perf record -o- --no-buildid --branch-filter any,save_type,u -- true > /dev exit 2 fi +skip_test_missing_symbol brstack_bench + TMPDIR=$(mktemp -d /tmp/__perf_test.program.XXXXX) TESTPROG="perf test -w brstack" -- Gitee From 76ab338627f8c3a22fe02f5468e6b83fe835793e Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Thu, 23 Nov 2023 09:58:47 +0200 Subject: [PATCH 1996/2138] perf tests: Make data symbol test wait for perf to start ANBZ: #13058 commit 3b24b15cf6fb2dbe1d009a52c9ddcb7721503d8f upstream. The perf data symbol test waits 1 second for perf to run and collect data, which may be too little if perf takes a long time to start up, which has been noticed on systems with many CPUs. Use existing wait_for_perf_to_start helper to wait for perf to start. Signed-off-by: Adrian Hunter Acked-by: Ian Rogers Cc: German Gomez Cc: James Clark Cc: Jiri Olsa Cc: Leo Yan Cc: Namhyung Kim Link: https://lore.kernel.org/r/20231123075848.9652-8-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo Reviewed-by: shuai xue Link: https://gitee.com/anolis/cloud-kernel/pulls/4409 --- tools/perf/tests/shell/test_data_symbol.sh | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/tools/perf/tests/shell/test_data_symbol.sh b/tools/perf/tests/shell/test_data_symbol.sh index 69bb6fe86c50..e50e54e94f6f 100755 --- a/tools/perf/tests/shell/test_data_symbol.sh +++ b/tools/perf/tests/shell/test_data_symbol.sh @@ -4,6 +4,10 @@ # SPDX-License-Identifier: GPL-2.0 # Leo Yan , 2022 +shelldir=$(dirname "$0") +# shellcheck source=lib/waiting.sh +. "${shelldir}"/lib/waiting.sh + skip_if_no_mem_event() { perf mem record -e list 2>&1 | grep -E -q 'available' && return 0 return 2 @@ -13,6 +17,7 @@ skip_if_no_mem_event || exit 2 TEST_PROGRAM="perf test -w datasym" PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXX) +ERR_FILE=$(mktemp /tmp/__perf_test.stderr.XXXXX) check_result() { # The memory report format is as below: @@ -50,13 +55,15 @@ echo "Recording workload..." # specific CPU and test in per-CPU mode. is_amd=$(grep -E -c 'vendor_id.*AuthenticAMD' /proc/cpuinfo) if (($is_amd >= 1)); then - perf mem record -o ${PERF_DATA} -C 0 -- taskset -c 0 $TEST_PROGRAM & + perf mem record -vvv -o ${PERF_DATA} -C 0 -- taskset -c 0 $TEST_PROGRAM 2>"${ERR_FILE}" & else - perf mem record --all-user -o ${PERF_DATA} -- $TEST_PROGRAM & + perf mem record -vvv --all-user -o ${PERF_DATA} -- $TEST_PROGRAM 2>"${ERR_FILE}" & fi PERFPID=$! +wait_for_perf_to_start ${PERFPID} "${ERR_FILE}" + sleep 1 kill $PERFPID -- Gitee From fbdf33783829200e4e9879a74875af415a443057 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Thu, 23 Nov 2023 09:58:48 +0200 Subject: [PATCH 1997/2138] perf tests: Skip data symbol test if buf1 symbol is missing ANBZ: #13058 commit 124bf6360ad8fe9267017d10b5dd465d4af73247 upstream. perf data symbol test depends on finding symbol buf1 in perf, and fails if perf has been stripped and no debug object is available. In that case, skip the test instead. Example: Before: $ strip tools/perf/perf $ tools/perf/perf buildid-cache -p `realpath tools/perf/perf` $ tools/perf/perf test -v 'data symbol' 113: Test data symbol : --- start --- test child forked, pid 125646 Recording workload... [ perf record: Woken up 3 times to write data ] [ perf record: Captured and wrote 0.577 MB /tmp/__perf_test.perf.data.Jhbdp (7794 samples) ] Cleaning up files... test child finished with -1 ---- end ---- Test data symbol: FAILED! After: $ tools/perf/perf test -v 'data symbol' 113: Test data symbol : --- start --- test child forked, pid 125747 perf does not have symbol 'buf1' perf is missing symbols - skipping test test child finished with -2 ---- end ---- Test data symbol: Skip Signed-off-by: Adrian Hunter Acked-by: Ian Rogers Cc: German Gomez Cc: James Clark Cc: Jiri Olsa Cc: Leo Yan Cc: Namhyung Kim Link: https://lore.kernel.org/r/20231123075848.9652-9-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo Reviewed-by: shuai xue Link: https://gitee.com/anolis/cloud-kernel/pulls/4409 --- tools/perf/tests/shell/test_data_symbol.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/perf/tests/shell/test_data_symbol.sh b/tools/perf/tests/shell/test_data_symbol.sh index e50e54e94f6f..3dfa91832aa8 100755 --- a/tools/perf/tests/shell/test_data_symbol.sh +++ b/tools/perf/tests/shell/test_data_symbol.sh @@ -8,6 +8,9 @@ shelldir=$(dirname "$0") # shellcheck source=lib/waiting.sh . "${shelldir}"/lib/waiting.sh +# shellcheck source=lib/perf_has_symbol.sh +. "${shelldir}"/lib/perf_has_symbol.sh + skip_if_no_mem_event() { perf mem record -e list 2>&1 | grep -E -q 'available' && return 0 return 2 @@ -15,6 +18,8 @@ skip_if_no_mem_event() { skip_if_no_mem_event || exit 2 +skip_test_missing_symbol buf1 + TEST_PROGRAM="perf test -w datasym" PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXX) ERR_FILE=$(mktemp /tmp/__perf_test.stderr.XXXXX) -- Gitee From 5ea7716a31e95dc588a9b020b893f02e3a7a55d0 Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Mon, 30 Dec 2024 14:44:11 +0800 Subject: [PATCH 1998/2138] perf/x86/intel/uncore: Fix the lack of ch_mask format for SPR ANBZ: #13058 cherry-picked from https://lore.kernel.org/all/1735542672-113067-1-git-send-email-renyu.zj@linux.alibaba.com/ perf stat errors out with UNC_CHA_TOR_INSERTS.IA_HIT_CXL_ACC_LOCAL event because of lack of ch_mask format in drivers, and perf test "104: perf all PMU test (exclusive)" failed. $perf stat -e perf stat -e UNC_CHA_TOR_INSERTS.IA_HIT_CXL_ACC_LOCAL sleep 1 Initial error: event syntax error: 'UNC_CHA_TOR_INSERTS.IA_HIT_CXL_ACC_LOCAL' \___ unknown term 'ch_mask' for pmu 'uncore_cha_0' 104: perf all PMU test (exclusive) : FAILED! Add ch_mask format for SPR to fix it. Fixes: 949b11381f81 ("perf/x86/intel/uncore: Add Sapphire Rapids server CHA support") Signed-off-by: Jing Zhang Reviewed-by: shuai xue Link: https://gitee.com/anolis/cloud-kernel/pulls/4409 --- arch/x86/events/intel/uncore_snbep.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index dcfabf678807..23632761b341 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -5963,6 +5963,7 @@ static struct attribute *spr_uncore_cha_formats_attr[] = { &format_attr_inv.attr, &format_attr_thresh8.attr, &format_attr_filter_tid5.attr, + &format_attr_ch_mask.attr, NULL, }; static const struct attribute_group spr_uncore_chabox_format_group = { -- Gitee From 9b0bd7be803ac147ba0dc15c75b2a2650cac5dff Mon Sep 17 00:00:00 2001 From: hanliyang Date: Tue, 24 Sep 2024 15:16:39 +0800 Subject: [PATCH 1999/2138] anolis: KVM: SVM: CSV: Fix the vm_size even if CSV3 feature is unsupported on Hygon CPUs ANBZ: #13043 Assume the userspace request CSV3's KVM ioctl interface on Hygon CPUs before C86-4G, the vm_size if as value sizeof(struct kvm_svm), and functions for the CSV3's KVM ioctl interface try to check whether the guest is a CSV3 guest by access the structure as below: struct kvm_csv_info { ...... bool csv3_active; ...... }; struct kvm_svm_csv { struct kvm_svm kvm_svm; struct kvm_csv_info csv_info; }; But the csv_info field of struct kvm_svm_csv will not be allocated, the functions for the CSV3's KVM ioctl interface will not get value of csv3_active field of struct kvm_csv_info. Always fix the vm_size in csv_init() to address the above issue. Fixes: 431fa90c376f ("anolis: KVM: SVM: CSV: Add KVM_CSV3_INIT command") Fixes: b47f55b6965b ("anolis: KVM: SVM: CSV: Add KVM_CSV3_LAUNCH_ENCRYPT_DATA command") Fixes: c2b61109d02e ("anolis: KVM: SVM: CSV: Add KVM_CSV3_LAUNCH_ENCRYPT_VMCB command") Fixes: 2f5ed4963843 ("anolis: KVM: SVM: CSV: Manage CSV3 guest's nested page table") Fixes: ee5068f27f93 ("anolis: KVM: SVM: CSV: Add KVM_CSV3_SEND_ENCRYPT_DATA command") Fixes: 7810f4f82863 ("anolis: KVM: SVM: CSV: Add KVM_CSV3_SEND_ENCRYPT_CONTEXT command") Fixes: bdba0423f9c6 ("anolis: KVM: SVM: CSV: Add KVM_CSV3_RECEIVE_ENCRYPT_DATA command") Fixes: dd12cebeebd5 ("anolis: KVM: SVM: CSV: Add KVM_CSV3_RECEIVE_ENCRYPT_CONTEXT command") Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4387 --- arch/x86/kvm/svm/csv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index f6e8a97678af..fbc8251648d2 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -2623,6 +2623,7 @@ void __init csv_init(struct kvm_x86_ops *ops) memcpy(&csv_x86_ops, ops, sizeof(struct kvm_x86_ops)); + ops->vm_size = sizeof(struct kvm_svm_csv); ops->mem_enc_ioctl = csv_mem_enc_ioctl; ops->vm_attestation = csv_vm_attestation; ops->control_pre_system_reset = csv_control_pre_system_reset; @@ -2630,7 +2631,6 @@ void __init csv_init(struct kvm_x86_ops *ops) if (boot_cpu_has(X86_FEATURE_SEV_ES) && boot_cpu_has(X86_FEATURE_CSV3)) { ops->vm_destroy = csv_vm_destroy; - ops->vm_size = sizeof(struct kvm_svm_csv); ops->handle_exit = csv_handle_exit; ops->guest_memory_reclaimed = csv_guest_memory_reclaimed; } -- Gitee From 8b9ebcae8e763882c9d101b0ac5a40ac50240aa2 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Mon, 7 Oct 2024 18:16:23 +0800 Subject: [PATCH 2000/2138] anolis: crypto: ccp: Get api version again when update Hygon CSV firmware at runtime ANBZ: #13043 The commit af688a93cb58 ("anolis: crypto: ccp: Implement CSV_DOWNLOAD_FIRMWARE ioctl command") support update Hygon CSV firmware at runtime, but it don't update API version info in the driver after issues the DOWNLOAD_FIRMWARE command. When we want use the new features in the updated firmware, the version check in this driver will fail. To address this problem, we should regain the api version when DOWNLOAD_FIRMWARE command returns. Fixes: af688a93cb58 ("anolis: crypto: ccp: Implement CSV_DOWNLOAD_FIRMWARE ioctl command") Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4387 --- drivers/crypto/ccp/hygon/csv-dev.c | 53 ++++++++++++++++++++++++++++-- 1 file changed, 51 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index 4e6b9403f9a6..8da1d2cef19c 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -129,6 +129,45 @@ static int csv_ioctl_do_hgsc_import(struct sev_issue_cmd *argp) return ret; } +static int csv_get_api_version(void) +{ + struct sev_device *sev; + struct sev_user_data_status *status; + int error = 0, ret; + + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + if (!psp_master || !psp_master->sev_data) + return -ENODEV; + + sev = psp_master->sev_data; + + status = kzalloc(sizeof(*status), GFP_KERNEL); + if (!status) + return -ENOMEM; + + ret = hygon_psp_hooks.__sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, + status, &error); + if (ret) { + dev_err(sev->dev, + "CSV: failed to get status. Error: %#x\n", error); + goto e_free_status; + } + + sev->api_major = status->api_major; + sev->api_minor = status->api_minor; + sev->build = status->build; + sev->state = status->state; + + csv_update_api_version(status); + + ret = 0; +e_free_status: + kfree(status); + return ret; +} + static int csv_ioctl_do_download_firmware(struct sev_issue_cmd *argp) { struct sev_data_download_firmware *data = NULL; @@ -186,10 +225,20 @@ static int csv_ioctl_do_download_firmware(struct sev_issue_cmd *argp) ret = hygon_psp_hooks.__sev_do_cmd_locked(SEV_CMD_DOWNLOAD_FIRMWARE, data, &argp->error); - if (ret) + if (ret) { pr_err("Failed to update CSV firmware: %#x\n", argp->error); - else + goto err_free_page; + } else { pr_info("CSV firmware update successful\n"); + } + + /* + * Synchronize API version status. The return value of csv_get_api_version + * will inform the user of any error encountered when attempting to + * communicate with the Hygon PSP after the DOWNLOAD_FIRMWARE API completes + * successfully. + */ + ret = csv_get_api_version(); err_free_page: __free_pages(p, order); -- Gitee From 4771c927b60d0a0148898d039f43be948a862b2a Mon Sep 17 00:00:00 2001 From: hanliyang Date: Sat, 2 Nov 2024 18:56:25 +0800 Subject: [PATCH 2001/2138] anolis: KVM: SVM: CSV: Ensure all the GPRs and some non-GPRs are synced before LAUNCH_ENCRYPT_VMCB ANBZ: #13043 Even though most of the GPRs is zero at reset state, we should explicitly set these before LAUNCH_ENCRYPT_VMCB. The DR6 register is not zero at reset state, we should explicitly set DR6 before LAUNCH_ENCRYPT_VMCB. The PKRU currently is unsupported on Hygon CPUs, this register is zero at reset state, nevertheless explicitly set PKRU before LAUNCH_ENCRYPT_VMCB. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4387 --- arch/x86/kvm/svm/csv.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index fbc8251648d2..9ce191e7c7c6 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -1208,14 +1208,43 @@ static int csv3_sync_vmsa(struct vcpu_svm *svm) if (svm->vcpu.guest_debug || (svm->vmcb->save.dr7 & ~DR7_FIXED_1)) return -EINVAL; + /* + * CSV3 will use a VMSA that is pointed to by the VMCB, not + * the traditional VMSA that is part of the VMCB. Copy the + * traditional VMSA as it has been built so far (in prep + * for LAUNCH_ENCRYPT_VMCB) to be the initial CSV3 state. + */ memcpy(save, &svm->vmcb->save, sizeof(svm->vmcb->save)); /* Sync registgers per spec. */ save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX]; + save->rbx = svm->vcpu.arch.regs[VCPU_REGS_RBX]; + save->rcx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX]; + save->rsp = svm->vcpu.arch.regs[VCPU_REGS_RSP]; + save->rbp = svm->vcpu.arch.regs[VCPU_REGS_RBP]; + save->rsi = svm->vcpu.arch.regs[VCPU_REGS_RSI]; + save->rdi = svm->vcpu.arch.regs[VCPU_REGS_RDI]; +#ifdef CONFIG_X86_64 + save->r8 = svm->vcpu.arch.regs[VCPU_REGS_R8]; + save->r9 = svm->vcpu.arch.regs[VCPU_REGS_R9]; + save->r10 = svm->vcpu.arch.regs[VCPU_REGS_R10]; + save->r11 = svm->vcpu.arch.regs[VCPU_REGS_R11]; + save->r12 = svm->vcpu.arch.regs[VCPU_REGS_R12]; + save->r13 = svm->vcpu.arch.regs[VCPU_REGS_R13]; + save->r14 = svm->vcpu.arch.regs[VCPU_REGS_R14]; + save->r15 = svm->vcpu.arch.regs[VCPU_REGS_R15]; +#endif save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP]; + + /* Sync some non-GPR registers before encrypting */ save->xcr0 = svm->vcpu.arch.xcr0; + save->pkru = svm->vcpu.arch.pkru; save->xss = svm->vcpu.arch.ia32_xss; + save->dr6 = svm->vcpu.arch.dr6; + + pr_debug("Virtual Machine Save Area (VMSA):\n"); + print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1, save, sizeof(*save), false); return 0; } -- Gitee From 596096b2126ef4048bf436355521292454a7a511 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Wed, 25 Sep 2024 21:18:41 +0800 Subject: [PATCH 2002/2138] anolis: crypto: ccp: Provide csv_get_extension_info() to present extensions of newer CSV firmware ANBZ: #13043 As more and more confidential computing features are provided, the hypervisor and userspace VMM should recognize the extended features. Provide csv_get_extension_info() to present the extended confidential computing features of the newer CSV firmware so that the hypervisor can utilize the extended features when launch and running a confidential guest. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4387 --- drivers/crypto/ccp/hygon/csv-dev.c | 32 ++++++++++++++++++++++++++++++ include/linux/psp-hygon.h | 21 ++++++++++++++++++++ 2 files changed, 53 insertions(+) diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index 8da1d2cef19c..62e8835e9b0e 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -665,6 +665,38 @@ int csv_check_stat_queue_status(int *psp_ret) } EXPORT_SYMBOL_GPL(csv_check_stat_queue_status); +int csv_get_extension_info(void *buf, size_t *size) +{ + /* If @hygon_csv_build is 0, this means CSV firmware doesn't exist or + * the psp device doesn't exist. + */ + if (hygon_csv_build == 0) + return -ENODEV; + + /* The caller must provide valid @buf and the @buf must >= 4 bytes in + * size. + */ + if (!buf || !size || *size < sizeof(uint32_t)) { + if (size) + *size = sizeof(uint32_t); + + return -EINVAL; + } + + /* Since firmware with build id 2200, support: + * a. issue LAUNCH_ENCRYPT_DATA command more than once for a + * CSV3 guest. + * b. inject secret to a CSV3 guest. + */ + if (csv_version_greater_or_equal(2200)) { + *(uint32_t *)buf |= CSV_EXT_CSV3_MULT_LUP_DATA; + *(uint32_t *)buf |= CSV_EXT_CSV3_INJ_SECRET; + } + + return 0; +} +EXPORT_SYMBOL_GPL(csv_get_extension_info); + #ifdef CONFIG_HYGON_CSV int csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error) diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index 39c1a149e658..acd849f9ee9e 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -19,6 +19,11 @@ #define CSV_FW_MAX_SIZE 0x80000 /* 512KB */ +#define CSV_EXT_CSV3_MULT_LUP_DATA_BIT 0 +#define CSV_EXT_CSV3_MULT_LUP_DATA (1 << CSV_EXT_CSV3_MULT_LUP_DATA_BIT) +#define CSV_EXT_CSV3_INJ_SECRET_BIT 1 +#define CSV_EXT_CSV3_INJ_SECRET (1 << CSV_EXT_CSV3_INJ_SECRET_BIT) + /** * Guest/platform management commands for CSV */ @@ -507,6 +512,20 @@ int kvm_pv_psp_copy_forward_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, g int kvm_pv_psp_forward_op(struct kvm_vpsp *vpsp, uint32_t cmd, gpa_t data_gpa, uint32_t psp_ret); + +/** + * csv_get_extension_info - collect extension set of the firmware + * + * @buf: The buffer to save extension set + * @size: The size of the @buf + * + * Returns: + * 0 if @buf is filled with extension bitflags + * -%ENODEV if the CSV device is not available + * -%EINVAL if @buf is NULL or @size is too smaller + */ +int csv_get_extension_info(void *buf, size_t *size); + #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ static inline int psp_do_cmd(int cmd, void *data, int *psp_ret) { return -ENODEV; } @@ -542,6 +561,8 @@ static inline int kvm_pv_psp_forward_op(struct kvm_vpsp *vpsp, uint32_t cmd, gpa_t data_gpa, uint32_t psp_ret) { return -ENODEV; } +static inline int csv_get_extension_info(void *buf, size_t *size) { return -ENODEV; } + #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ typedef int (*p2c_notifier_t)(uint32_t id, uint64_t data); -- Gitee From a311941e4e34c170723e3b828604259b0d5f024b Mon Sep 17 00:00:00 2001 From: hanliyang Date: Thu, 26 Sep 2024 14:59:25 +0800 Subject: [PATCH 2003/2138] anolis: KVM: SVM: CSV: Provide KVM_CAP_HYGON_COCO_EXT interface ANBZ: #13043 The CSV1/2/3 firmware will provide more confidential features, it's recommended that the user space VMM (e.g. Qemu) inquiry about which features are supported by the system and decide to utilise some of these supported features. Provide KVM_CAP_HYGON_COCO_EXT ioctl interface so that the user space VMM, KVM, and firmware can negotiate how to interoperate with each other. The KVM_CAP_HYGON_COCO_EXT interface will address many compatibility issues when any one of the user space VMM, KVM, or firmware is not up-to-date. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4387 --- arch/x86/include/asm/kvm-x86-ops.h | 2 + arch/x86/include/asm/kvm_host.h | 2 + arch/x86/kvm/svm/csv.c | 83 ++++++++++++++++++++++++++++++ arch/x86/kvm/x86.c | 23 +++++++++ include/uapi/linux/kvm.h | 7 +++ 5 files changed, 117 insertions(+) diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 348b78389406..7c702a0faa32 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -140,6 +140,8 @@ KVM_X86_OP_OPTIONAL(vm_attestation) KVM_X86_OP_OPTIONAL(arch_hypercall) KVM_X86_OP_OPTIONAL(control_pre_system_reset) KVM_X86_OP_OPTIONAL(control_post_system_reset) +KVM_X86_OP_OPTIONAL(get_hygon_coco_extension) +KVM_X86_OP_OPTIONAL(enable_hygon_coco_extension) #undef KVM_X86_OP #undef KVM_X86_OP_OPTIONAL diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 97fc08a49c91..afd6dee38a89 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1763,6 +1763,8 @@ struct kvm_x86_ops { int (*vm_attestation)(struct kvm *kvm, unsigned long gpa, unsigned long len); int (*control_pre_system_reset)(struct kvm *kvm); int (*control_post_system_reset)(struct kvm *kvm); + int (*get_hygon_coco_extension)(struct kvm *kvm); + int (*enable_hygon_coco_extension)(struct kvm *kvm, u32 arg); int (*arch_hypercall)(struct kvm *kvm, u64 nr, u64 a0, u64 a1, u64 a2, u64 a3); }; diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 9ce191e7c7c6..e1e9f0ee7c56 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -871,6 +871,13 @@ struct kvm_csv_info { struct list_head smr_list; /* List of guest secure memory regions */ unsigned long nodemask; /* Nodemask where CSV3 guest's memory resides */ + + /* The following 5 fields record the extension status for current VM */ + bool fw_ext_valid; /* if @fw_ext field is valid */ + u32 fw_ext; /* extensions supported by current platform */ + bool kvm_ext_valid; /* if @kvm_ext field is valid */ + u32 kvm_ext; /* extensions supported by KVM */ + u32 inuse_ext; /* extensions inused by current VM */ }; struct kvm_svm_csv { @@ -2604,6 +2611,80 @@ static void csv_free_asid_userid_array(void) #endif /* CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID */ +/** + * When userspace recognizes these extensions, it is suggested that the userspace + * enables these extensions through KVM_ENABLE_CAP, so that both the userspace + * and KVM can utilize these extensions. + */ +static int csv_get_hygon_coco_extension(struct kvm *kvm) +{ + struct kvm_csv_info *csv; + size_t len = sizeof(uint32_t); + int ret = 0; + + if (!kvm) + return 0; + + csv = &to_kvm_svm_csv(kvm)->csv_info; + + if (csv->fw_ext_valid == false) { + ret = csv_get_extension_info(&csv->fw_ext, &len); + + if (ret == -ENODEV) { + pr_err("Unable to interact with CSV firmware!\n"); + return 0; + } else if (ret == -EINVAL) { + pr_err("Need %ld bytes to record fw extension!\n", len); + return 0; + } + + csv->fw_ext_valid = true; + } + + /* The kvm_ext field of kvm_csv_info is filled in only if the fw_ext + * field of kvm_csv_info is valid. + */ + if (csv->kvm_ext_valid == false) { + /* Currently, KVM doesn't support any extensions, we don't need + * to fill in kvm_ext field of kvm_csv_info here. + */ + csv->kvm_ext_valid = true; + } + + /* Return extension info only if both fw_ext and kvm_ext fields of + * kvm_csv_info are valid. + */ + pr_debug("%s: fw_ext=%#x kvm_ext=%#x\n", + __func__, csv->fw_ext, csv->kvm_ext); + return (int)csv->kvm_ext; +} + +/** + * Return 0 means KVM accept the negotiation from userspace. Both the + * userspace and KVM should not utilise extensions if failed to negotiate. + */ +static int csv_enable_hygon_coco_extension(struct kvm *kvm, u32 arg) +{ + struct kvm_csv_info *csv; + + if (!kvm) + return -EINVAL; + + csv = &to_kvm_svm_csv(kvm)->csv_info; + + /* Negotiation is accepted only if both the fw_ext and kvm_ext fields + * of kvm_csv_info are valid and the virtual machine is a CSV3 guest. + */ + if (csv->fw_ext_valid && csv->kvm_ext_valid && csv3_guest(kvm)) { + csv->inuse_ext = csv->kvm_ext & arg; + pr_debug("%s: inuse_ext=%#x\n", __func__, csv->inuse_ext); + return csv->inuse_ext; + } + + /* Userspace should not utilise the extensions */ + return -EINVAL; +} + void __init csv_hardware_setup(unsigned int max_csv_asid) { unsigned int nr_asids = max_csv_asid + 1; @@ -2657,6 +2738,8 @@ void __init csv_init(struct kvm_x86_ops *ops) ops->vm_attestation = csv_vm_attestation; ops->control_pre_system_reset = csv_control_pre_system_reset; ops->control_post_system_reset = csv_control_post_system_reset; + ops->get_hygon_coco_extension = csv_get_hygon_coco_extension; + ops->enable_hygon_coco_extension = csv_enable_hygon_coco_extension; if (boot_cpu_has(X86_FEATURE_SEV_ES) && boot_cpu_has(X86_FEATURE_CSV3)) { ops->vm_destroy = csv_vm_destroy; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index d65a54c97924..a1c763bb697f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4646,6 +4646,18 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = static_call(kvm_x86_has_emulated_msr)(kvm, MSR_AMD64_SEV_ES_GHCB); break; + case KVM_CAP_HYGON_COCO_EXT: + r = 0; + + /* + * Before running a Hygon confidential guest, the userspace + * should find the advanced extensions of the Hygon CSV + * technology. If the userspace recognize the extensions, it's + * suggested that the userspace to utilise extensions. + */ + if (is_x86_vendor_hygon() && kvm_x86_ops.get_hygon_coco_extension) + r = static_call(kvm_x86_get_hygon_coco_extension)(kvm); + break; default: break; } @@ -6523,6 +6535,17 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, } mutex_unlock(&kvm->lock); break; + case KVM_CAP_HYGON_COCO_EXT: + r = -EINVAL; + + /* + * The userspace negotiate with KVM to utilise extensions of + * Hygon CSV technology. + */ + if (is_x86_vendor_hygon() && kvm_x86_ops.enable_hygon_coco_extension) + r = static_call(kvm_x86_enable_hygon_coco_extension)(kvm, + (u32)cap->args[0]); + break; default: r = -EINVAL; break; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index b4b5a6982086..75c77eff45ea 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1202,6 +1202,13 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229 #define KVM_CAP_SEV_ES_GHCB 500 +#define KVM_CAP_HYGON_COCO_EXT 501 +/* support userspace to request firmware to build CSV3 guest's memory space */ +#define KVM_CAP_HYGON_COCO_EXT_CSV3_SET_PRIV_MEM (1 << 0) +/* support request to update CSV3 guest's memory region multiple times */ +#define KVM_CAP_HYGON_COCO_EXT_CSV3_MULT_LUP_DATA (1 << 1) +/* support request to inject secret to CSV3 guest */ +#define KVM_CAP_HYGON_COCO_EXT_CSV3_INJ_SECRET (1 << 2) #ifdef KVM_CAP_IRQ_ROUTING -- Gitee From 954d8f2d298f8ef777efc7c5ce1a8ea17c14336c Mon Sep 17 00:00:00 2001 From: hanliyang Date: Thu, 26 Sep 2024 16:49:28 +0800 Subject: [PATCH 2004/2138] anolis: KVM: SVM: CSV: Provide KVM_CSV3_SET_GUEST_PRIVATE_MEMORY ioctl interface ANBZ: #13043 For newer CSV1/2/3 firmware, multiple LAUNCH_ENCRYPT_DATA commands are allowed to be issued. However, SET_GUEST_PRIVATE_MEMORY command can only be issued once. Provide a separate ioctl interface KVM_CSV3_SET_GUEST_PRIVATE_MEMORY here. The user space VMM can negotiate with KVM on whether to enable the capability KVM_CAP_HYGON_COCO_EXT_CSV3_SET_PRIV_MEM. When this capability is enabled, the user space VMM should explicitly request the KVM_CSV3_SET_GUEST_PRIVATE_MEMORY ioctl interface, and the KVM_CSV3_LAUNCH_ENCRYPT_DATA ioctl handler will skip the process of issuing the SET_GUEST_PRIVATE_MEMORY command, in addition, the user space VMM will have the chance to request KVM_CSV3_LAUNCH_ENCRYPT_DATA ioctl interface more than once. When this capability is disabled, the user space will not request the KVM_CSV3_SET_GUEST_PRIVATE_MEMORY ioctl interface, and the KVM_CSV3_LAUNCH_ENCRYPT_DATA ioctl handler will still issue the SET_GUEST_PRIVATE_MEMORY command. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4387 --- arch/x86/kvm/svm/csv.c | 35 ++++++++++++++++++++++++----------- include/uapi/linux/kvm.h | 2 ++ 2 files changed, 26 insertions(+), 11 deletions(-) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index e1e9f0ee7c56..38f0d13f6934 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -965,7 +965,7 @@ static bool csv3_is_mmio_pfn(kvm_pfn_t pfn) E820_TYPE_RAM); } -static int csv3_set_guest_private_memory(struct kvm *kvm) +static int csv3_set_guest_private_memory(struct kvm *kvm, struct kvm_sev_cmd *argp) { struct kvm_memslots *slots = kvm_memslots(kvm); struct kvm_memory_slot *memslot; @@ -980,7 +980,7 @@ static int csv3_set_guest_private_memory(struct kvm *kvm) LIST_HEAD(tmp_list); struct list_head *pos, *q; u32 i = 0, count = 0, remainder; - int ret = 0, error; + int ret = 0; u64 size = 0, nr_smr = 0, nr_pages = 0; u32 smr_entry_shift; int bkt; @@ -992,6 +992,10 @@ static int csv3_set_guest_private_memory(struct kvm *kvm) if (!csv3_guest(kvm)) return -ENOTTY; + /* The smr_list should be initialized only once */ + if (!list_empty(&csv->smr_list)) + return -EFAULT; + nodes_clear(nodemask); for_each_set_bit(i, &csv->nodemask, BITS_PER_LONG) if (i < MAX_NUMNODES) @@ -1074,7 +1078,7 @@ static int csv3_set_guest_private_memory(struct kvm *kvm) /* set secury memory region for launch enrypt data */ ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_SET_GUEST_PRIVATE_MEMORY, - set_guest_private_memory, &error); + set_guest_private_memory, &argp->error); if (ret) goto e_free_smr; @@ -1134,10 +1138,17 @@ static int csv3_launch_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) goto exit; } - /* Allocate all the guest memory from CMA */ - ret = csv3_set_guest_private_memory(kvm); - if (ret) - goto exit; + /* + * If userspace request to invoke CSV3_CMD_SET_GUEST_PRIVATE_MEMORY + * explicitly, we should not calls to csv3_set_guest_private_memory() + * here. + */ + if (!(csv->inuse_ext & KVM_CAP_HYGON_COCO_EXT_CSV3_SET_PRIV_MEM)) { + /* Allocate all the guest memory from CMA */ + ret = csv3_set_guest_private_memory(kvm, argp); + if (ret) + goto exit; + } num_entries = params.len / PAGE_SIZE; num_entries_in_block = ARRAY_SIZE(blocks->entry); @@ -1607,7 +1618,7 @@ static int csv3_receive_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) if (unlikely(list_empty(&csv->smr_list))) { /* Allocate all the guest memory from CMA */ - ret = csv3_set_guest_private_memory(kvm); + ret = csv3_set_guest_private_memory(kvm, argp); if (ret) goto exit; } @@ -2339,6 +2350,9 @@ static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) case KVM_CSV3_RECEIVE_ENCRYPT_CONTEXT: r = csv3_receive_encrypt_context(kvm, &sev_cmd); break; + case KVM_CSV3_SET_GUEST_PRIVATE_MEMORY: + r = csv3_set_guest_private_memory(kvm, &sev_cmd); + break; default: /* * If the command is compatible between CSV and SEV, the @@ -2645,9 +2659,8 @@ static int csv_get_hygon_coco_extension(struct kvm *kvm) * field of kvm_csv_info is valid. */ if (csv->kvm_ext_valid == false) { - /* Currently, KVM doesn't support any extensions, we don't need - * to fill in kvm_ext field of kvm_csv_info here. - */ + if (csv3_guest(kvm)) + csv->kvm_ext |= KVM_CAP_HYGON_COCO_EXT_CSV3_SET_PRIV_MEM; csv->kvm_ext_valid = true; } diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 75c77eff45ea..07ea54b36f1d 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -2342,6 +2342,8 @@ enum csv3_cmd_id { KVM_CSV3_RECEIVE_ENCRYPT_DATA, KVM_CSV3_RECEIVE_ENCRYPT_CONTEXT, + KVM_CSV3_SET_GUEST_PRIVATE_MEMORY = 0xc8, + KVM_CSV3_NR_MAX, }; -- Gitee From dce588782eb450ea465d1f42dc066f7209215baa Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 27 Sep 2024 15:12:20 +0800 Subject: [PATCH 2005/2138] anolis: KVM: SVM: CSV: Support issue non-4K aligned CSV3_CMD_LAUNCH_ENCRYPT_DATA and more than once ANBZ: #13043 So far, the KVM_CSV3_LAUNCH_ENCRYPT_DATA handler only process 4K aligned data, this is insufficient because we need encrypt Non-4K aligned data to CSV3 guest's private memory in some cases. To address this, we provide new function csv3_launch_encrypt_data_alt_2 to process Non-4K aligned data. The new function will be called only when the cap KVM_CAP_HYGON_COCO_EXT_CSV3_MULT_LUP_DATA is enabled for current CSV3 guest. In addition, to simplify the KVM_CSV3_LAUNCH_ENCRYPT_DATA request from the user space, the function csv3_launch_encrypt_data_alt_2 allows issue CSV3_CMD_LAUNCH_ENCRYPT_DATA more than once if necessary. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4387 --- arch/x86/kvm/svm/csv.c | 234 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 229 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 38f0d13f6934..f2f63992bb95 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -1110,7 +1110,12 @@ static int csv3_set_guest_private_memory(struct kvm *kvm, struct kvm_sev_cmd *ar return ret; } -static int csv3_launch_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) +/** + * csv3_launch_encrypt_data_alt_1 - The legacy handler to encrypt CSV3 + * guest's memory before VMRUN. + */ +static int csv3_launch_encrypt_data_alt_1(struct kvm *kvm, + struct kvm_sev_cmd *argp) { struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; struct kvm_csv3_launch_encrypt_data params; @@ -1124,9 +1129,6 @@ static int csv3_launch_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) unsigned long pfn, pfn_sme_mask; int ret = 0; - if (!csv3_guest(kvm)) - return -ENOTTY; - if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params))) { ret = -EFAULT; @@ -1218,6 +1220,225 @@ static int csv3_launch_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +#define MAX_ENTRIES_PER_BLOCK \ + (sizeof(((struct encrypt_data_block *)0)->entry) / \ + sizeof(((struct encrypt_data_block *)0)->entry[0])) +#define MAX_BLOCKS_PER_CSV3_LUP_DATA \ + (sizeof(((struct csv3_data_launch_encrypt_data *)0)->data_blocks) / \ + sizeof(((struct csv3_data_launch_encrypt_data *)0)->data_blocks[0])) +#define MAX_ENTRIES_PER_CSV3_LUP_DATA \ + (MAX_BLOCKS_PER_CSV3_LUP_DATA * MAX_ENTRIES_PER_BLOCK) + +/** + * __csv3_launch_encrypt_data - The helper for handler + * csv3_launch_encrypt_data_alt_2. + */ +static int __csv3_launch_encrypt_data(struct kvm *kvm, + struct kvm_sev_cmd *argp, + struct kvm_csv3_launch_encrypt_data *params, + void *src_buf, + unsigned int start_pgoff, + unsigned int end_pgoff) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct csv3_data_launch_encrypt_data *data = NULL; + struct encrypt_data_block *block = NULL; + struct page **pages = NULL; + unsigned long len, remain_len; + unsigned long pfn, pfn_sme_mask, last_pfn; + unsigned int pgoff = start_pgoff; + int i, j; + int ret = -ENOMEM; + + /* Alloc command buffer for CSV3_CMD_LAUNCH_ENCRYPT_DATA command */ + data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT); + if (!data) + return -ENOMEM; + + /* Alloc pages for data_blocks[] in the command buffer */ + len = ARRAY_SIZE(data->data_blocks) * sizeof(struct page *); + pages = kzalloc(len, GFP_KERNEL_ACCOUNT); + if (!pages) + goto e_free_data; + + for (i = 0; i < ARRAY_SIZE(data->data_blocks); i++) { + pages[i] = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); + if (!pages[i]) + goto e_free_pages; + } + + i = 0; + while (i < ARRAY_SIZE(data->data_blocks) && pgoff < end_pgoff) { + block = (struct encrypt_data_block *)page_to_virt(pages[i]); + + j = 0; + last_pfn = 0; + while (j < ARRAY_SIZE(block->entry) && pgoff < end_pgoff) { + pfn = vmalloc_to_pfn(src_buf + (pgoff << PAGE_SHIFT)); + pfn_sme_mask = __sme_set(pfn << PAGE_SHIFT) >> PAGE_SHIFT; + + /* + * One entry can record a number of contiguous physical + * pages. If the current page is not adjacent to the + * previous physical page, we should record the page to + * the next entry. If entries of current block is used + * up, we should try the next block. + */ + if (last_pfn && (last_pfn + 1 == pfn)) { + block->entry[j].npages++; + } else if (j < (ARRAY_SIZE(block->entry) - 1)) { + /* @last_pfn == 0 means fill in entry[0] */ + if (likely(last_pfn != 0)) + j++; + block->entry[j].pfn = pfn_sme_mask; + block->entry[j].npages = 1; + } else { + break; + } + + /* + * Succeed to record one page, increase the page offset. + * We also record the pfn of current page so that we can + * record the contiguous physical pages into one entry. + */ + last_pfn = pfn; + pgoff++; + } + + i++; + } + + if (pgoff < end_pgoff) { + pr_err("CSV3: Fail to fill in LAUNCH_ENCRYPT_DATA command!\n"); + goto e_free_pages; + } + + len = (end_pgoff - start_pgoff) << PAGE_SHIFT; + clflush_cache_range(src_buf + (start_pgoff << PAGE_SHIFT), len); + + /* Fill in command buffer */ + data->handle = csv->sev->handle; + + if (start_pgoff == 0) { + data->gpa = params->gpa; + len -= params->gpa & ~PAGE_MASK; + } else { + data->gpa = (params->gpa & PAGE_MASK) + (start_pgoff << PAGE_SHIFT); + } + remain_len = params->len - (data->gpa - params->gpa); + + data->length = (len <= remain_len) ? len : remain_len; + + for (j = 0; j < i; j++) + data->data_blocks[j] = __sme_set(page_to_phys(pages[j])); + + /* Issue command */ + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_LAUNCH_ENCRYPT_DATA, + data, &argp->error); + +e_free_pages: + for (i = 0; i < ARRAY_SIZE(data->data_blocks); i++) { + if (pages[i]) + __free_page(pages[i]); + } + kfree(pages); +e_free_data: + kfree(data); + + return ret; +} + +/** + * csv3_launch_encrypt_data_alt_2 - The handler to support encrypt CSV3 + * guest's memory before VMRUN. This handler support issue API command + * multiple times, both the GPA and length of the memory region are not + * required to be 4K-aligned. + */ +static int csv3_launch_encrypt_data_alt_2(struct kvm *kvm, + struct kvm_sev_cmd *argp) +{ + struct kvm_csv3_launch_encrypt_data params; + void *buffer = NULL; + unsigned long len; + unsigned int total_pages, start_pgoff, next_pgoff; + int ret = 0; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) { + return -EFAULT; + } + + /* Both the GPA and length must be 16 Bytes aligned at least */ + if (!params.len || + !params.uaddr || + !IS_ALIGNED(params.len, 16) || + !IS_ALIGNED(params.gpa, 16)) { + return -EINVAL; + } + + /* + * Alloc buffer to save source data. When we copy source data from + * userspace to the buffer, the data in the first page of the buffer + * should keep the offset as params.gpa. + */ + len = PAGE_ALIGN((params.gpa & ~PAGE_MASK) + params.len); + total_pages = len >> PAGE_SHIFT; + next_pgoff = 0; + + buffer = vzalloc(len); + if (!buffer) + return -ENOMEM; + + if (copy_from_user(buffer + (params.gpa & ~PAGE_MASK), + (void __user *)params.uaddr, params.len)) { + ret = -EFAULT; + goto e_free_buffer; + } + + /* + * If the source data is too large, we should issue command more than + * once. The LAUNCH_ENCRYPT_DATA API updates not only the measurement + * of the data, but also the measurement of the metadata correspond to + * the data. The guest owner is obligated to verify the launch + * measurement, so guest owner must be aware of the launch measurement + * of each LAUNCH_ENCRYPT_DATA API command. If we processing pages more + * than MAX_ENTRIES_PER_CSV3_LUP_DATA in each API command, the guest + * owner could not able to calculate the correct measurement and fail + * to verify the launch measurement. For this reason, we limit the + * maximum number of pages processed by each API command to + * MAX_ENTRIES_PER_CSV3_LUP_DATA. + */ + while (next_pgoff < total_pages) { + start_pgoff = next_pgoff; + next_pgoff += MAX_ENTRIES_PER_CSV3_LUP_DATA; + + if (next_pgoff > total_pages) + next_pgoff = total_pages; + + ret = __csv3_launch_encrypt_data(kvm, argp, ¶ms, + buffer, start_pgoff, next_pgoff); + if (ret) + goto e_free_buffer; + } + +e_free_buffer: + vfree(buffer); + return ret; +} + +static int csv3_launch_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (!(csv->inuse_ext & KVM_CAP_HYGON_COCO_EXT_CSV3_MULT_LUP_DATA)) + return csv3_launch_encrypt_data_alt_1(kvm, argp); + + return csv3_launch_encrypt_data_alt_2(kvm, argp); +} + static int csv3_sync_vmsa(struct vcpu_svm *svm) { struct sev_es_save_area *save = svm->sev_es.vmsa; @@ -2659,8 +2880,11 @@ static int csv_get_hygon_coco_extension(struct kvm *kvm) * field of kvm_csv_info is valid. */ if (csv->kvm_ext_valid == false) { - if (csv3_guest(kvm)) + if (csv3_guest(kvm)) { csv->kvm_ext |= KVM_CAP_HYGON_COCO_EXT_CSV3_SET_PRIV_MEM; + if (csv->fw_ext & CSV_EXT_CSV3_MULT_LUP_DATA) + csv->kvm_ext |= KVM_CAP_HYGON_COCO_EXT_CSV3_MULT_LUP_DATA; + } csv->kvm_ext_valid = true; } -- Gitee From 2c1416193f6b61175c6dd1b6fba4d1258beb1d55 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 27 Sep 2024 21:07:06 +0800 Subject: [PATCH 2006/2138] anolis: KVM: SVM: CSV: Support inject secret to Hygon CSV3 guest ANBZ: #13043 We should provide GPA in LAUNCH_SECRET API command buffer for CSV3 guest. We introduce a appropriate function csv_launch_secret to process user space KVM_SEV_LAUNCH_SECRET ioctl request, irrespective of whether it is a CSV, CSV2 or CSV3 guest. For CSV3 guest, the member guest_uaddr of the structure kvm_sev_launch_secret should be the value of GPA. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4387 --- arch/x86/kvm/svm/csv.c | 95 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 95 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index f2f63992bb95..147a43e38254 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -2505,6 +2506,95 @@ static void csv_guest_memory_reclaimed(struct kvm *kvm) } } +static int csv_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct sev_data_launch_secret data; + struct kvm_sev_launch_secret params; + struct page **pages; + void *blob, *hdr; + unsigned long n, i; + int ret, offset; + + if (!sev_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params))) + return -EFAULT; + + memset(&data, 0, sizeof(data)); + + if (!csv3_guest(kvm) || + !(csv->inuse_ext & KVM_CAP_HYGON_COCO_EXT_CSV3_INJ_SECRET)) { + pages = hygon_kvm_hooks.sev_pin_memory(kvm, params.guest_uaddr, + params.guest_len, &n, 1); + if (IS_ERR(pages)) + return PTR_ERR(pages); + + /* + * Flush (on non-coherent CPUs) before LAUNCH_SECRET encrypts + * pages in place; the cache may contain the data that was + * written unencrypted. + */ + hygon_kvm_hooks.sev_clflush_pages(pages, n); + + /* + * The secret must be copied into contiguous memory region, + * lets verify that userspace memory pages are contiguous + * before we issue command. + */ + if (hygon_kvm_hooks.get_num_contig_pages(0, pages, n) != n) { + ret = -EINVAL; + goto e_unpin_memory; + } + + offset = params.guest_uaddr & (PAGE_SIZE - 1); + data.guest_address = __sme_page_pa(pages[0]) + offset; + } else { + /* It's gpa for CSV3 guest */ + data.guest_address = params.guest_uaddr; + } + data.guest_len = params.guest_len; + + blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len); + if (IS_ERR(blob)) { + ret = PTR_ERR(blob); + goto e_unpin_memory; + } + + data.trans_address = __psp_pa(blob); + data.trans_len = params.trans_len; + + hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len); + if (IS_ERR(hdr)) { + ret = PTR_ERR(hdr); + goto e_free_blob; + } + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; + + data.handle = sev->handle; + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, + &data, &argp->error); + + kfree(hdr); + +e_free_blob: + kfree(blob); +e_unpin_memory: + if (!csv3_guest(kvm) || + !(csv->inuse_ext & KVM_CAP_HYGON_COCO_EXT_CSV3_INJ_SECRET)) { + /* content of memory is updated, mark pages dirty */ + for (i = 0; i < n; i++) { + set_page_dirty_lock(pages[i]); + mark_page_accessed(pages[i]); + } + hygon_kvm_hooks.sev_unpin_memory(kvm, pages, n); + } + return ret; +} + static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -2528,6 +2618,9 @@ static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) r = csv_command_batch(kvm, &sev_cmd); mutex_unlock(&csv_cmd_batch_mutex); break; + case KVM_SEV_LAUNCH_SECRET: + r = csv_launch_secret(kvm, &sev_cmd); + break; case KVM_SEV_SEND_UPDATE_VMSA: /* * Hygon implement the specific interface, although @@ -2884,6 +2977,8 @@ static int csv_get_hygon_coco_extension(struct kvm *kvm) csv->kvm_ext |= KVM_CAP_HYGON_COCO_EXT_CSV3_SET_PRIV_MEM; if (csv->fw_ext & CSV_EXT_CSV3_MULT_LUP_DATA) csv->kvm_ext |= KVM_CAP_HYGON_COCO_EXT_CSV3_MULT_LUP_DATA; + if (csv->fw_ext & CSV_EXT_CSV3_INJ_SECRET) + csv->kvm_ext |= KVM_CAP_HYGON_COCO_EXT_CSV3_INJ_SECRET; } csv->kvm_ext_valid = true; } -- Gitee From 6bc5c009156402f996070df01cfee975da7a0cf9 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Wed, 21 Aug 2024 11:40:14 +0800 Subject: [PATCH 2007/2138] anolis: x86/csv: Define ATTESTATION secure call command ANBZ: #13042 Add definition of ATTESTATION secure call command and error codes of the secure call. Add struct csv_guest_user_data_attestation to support communicate between user-space and kernel-space. Add struct csv3_data_attestation_report to support communicate between X86 and PSP. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Jia Zhang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4388 --- arch/x86/kernel/csv-shared.c | 23 ++++++++++++++++++++++- include/linux/psp-hygon.h | 22 ++++++++++++++++++++++ include/uapi/linux/psp-hygon.h | 13 +++++++++++++ 3 files changed, 57 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/csv-shared.c b/arch/x86/kernel/csv-shared.c index 0763195764da..9383f5d0a476 100644 --- a/arch/x86/kernel/csv-shared.c +++ b/arch/x86/kernel/csv-shared.c @@ -76,12 +76,33 @@ * CSV3_SECURE_CMD_UPDATE_SECURE_CALL_TABLE: * CSV3 guest wants to change the secure call pages. * The secure processor re-init the secure call context. + * + * CSV3_SECURE_CMD_REQ_REPORT: + * CSV3 guest wants to request attestation report. + * The secure processor will update the request message buffer and respond + * buffer to indicate the result of this request. */ enum csv3_secure_command_type { - CSV3_SECURE_CMD_ENC = 1, + /* The secure call request should below CSV3_SECURE_CMD_ACK */ + CSV3_SECURE_CMD_ENC = 0x1, CSV3_SECURE_CMD_DEC, CSV3_SECURE_CMD_RESET, CSV3_SECURE_CMD_UPDATE_SECURE_CALL_TABLE, + CSV3_SECURE_CMD_REQ_REPORT = 0x7, + + /* SECURE_CMD_ACK indicates secure call request can be handled */ + CSV3_SECURE_CMD_ACK = 0x6b, + + /* + * The following values are the error code of the secure call + * when firmware can't handling the specific secure call command + * as expected. + */ + CSV3_SECURE_CMD_ERROR_INTERNAL = 0x6c, + CSV3_SECURE_CMD_ERROR_INVALID_COMMAND = 0x6d, + CSV3_SECURE_CMD_ERROR_INVALID_PARAM = 0x6e, + CSV3_SECURE_CMD_ERROR_INVALID_ADDRESS = 0x6f, + CSV3_SECURE_CMD_ERROR_INVALID_LENGTH = 0x70, }; /* diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index acd849f9ee9e..f69c1110b571 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -310,6 +310,28 @@ struct csv3_data_dbg_read_mem { u32 size; /* In */ } __packed; +/** + * struct csv3_data_attestation_report - ATTESTATION secure call command parameters + * + * @handle: handle of the VM to process + * @resp_gpa: guest physical address to save the generated report + * @resp_length: length of the generated report + * @req_gpa: guest physical address of the input for the report + * @req_length: length of the input for the report + * @fw_error_code: firmware status code when generating the report + */ +struct csv3_data_attestation_report { + u32 handle; /* Out */ + u32 reserved1; + u64 resp_gpa; /* In */ + u8 reserved2[16]; + u32 resp_len; /* In/Out */ + u32 reserved3; + u64 req_gpa; /* In */ + u32 req_len; /* In,Out */ + u32 fw_error_code; /* Out */ +} __packed; + /** * struct csv3_data_send_encrypt_data - SEND_ENCRYPT_DATA command parameters * diff --git a/include/uapi/linux/psp-hygon.h b/include/uapi/linux/psp-hygon.h index 0e65afbeea3c..7bb6a4f8b6e0 100644 --- a/include/uapi/linux/psp-hygon.h +++ b/include/uapi/linux/psp-hygon.h @@ -55,4 +55,17 @@ struct csv_user_data_download_firmware { __u32 length; /* In */ } __packed; +/** + * struct csv_guest_user_data_attestation - ATTESTATION command parameters + * + * @user_data: user specified data for the attestation report + * @mnonce: user's random nonce + * @hash: sm3 hash of the @user_data and @mnonce + */ +struct csv_guest_user_data_attestation { + __u8 user_data[64]; /* In */ + __u8 monce[16]; /* In */ + __u8 hash[32]; /* In */ +} __packed; + #endif /* __PSP_HYGON_USER_H__ */ -- Gitee From 0118fb5953829d05b83ba3d491b9f59227184e22 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Wed, 21 Aug 2024 12:01:23 +0800 Subject: [PATCH 2008/2138] anolis: x86/csv: Add support for CSV3 ATTESTATION secure call ANBZ: #13042 Expose the function to modules so that user can get attestation report of CSV3 guest through ioctl interface. It's suggested that the user make use of the ioctl interface of the module csv-guest. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Jia Zhang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4388 --- arch/x86/include/asm/csv.h | 2 + arch/x86/kernel/csv.c | 73 +++++++++++++++++++++++++++++++++ arch/x86/mm/mem_encrypt_hygon.c | 1 + 3 files changed, 76 insertions(+) diff --git a/arch/x86/include/asm/csv.h b/arch/x86/include/asm/csv.h index e2fcaf4ded5f..18ddf881a6fc 100644 --- a/arch/x86/include/asm/csv.h +++ b/arch/x86/include/asm/csv.h @@ -66,6 +66,7 @@ void __init csv_early_update_memory_dec(u64 vaddr, u64 pages); void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, bool enc); void csv_memory_enc_dec(u64 vaddr, u64 pages, bool enc); +int csv3_issue_request_report(phys_addr_t paddr, size_t size); #else /* !CONFIG_HYGON_CSV */ @@ -79,6 +80,7 @@ static inline void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, bool enc) { } static inline void csv_memory_enc_dec(u64 vaddr, u64 pages, bool enc) { } +static inline int csv3_issue_request_report(phys_addr_t paddr, size_t size) { return -EIO; } #endif /* CONFIG_HYGON_CSV */ diff --git a/arch/x86/kernel/csv.c b/arch/x86/kernel/csv.c index 4f80c97798de..84a76ae3c062 100644 --- a/arch/x86/kernel/csv.c +++ b/arch/x86/kernel/csv.c @@ -285,3 +285,76 @@ void csv_memory_enc_dec(u64 vaddr, u64 pages, bool enc) __csv3_memory_enc_dec(csv3_secure_call, vaddr & PAGE_MASK, pages, enc); } + +static void print_secure_call_error(enum csv3_secure_command_type code) +{ + switch (code) { + case CSV3_SECURE_CMD_ACK: + pr_debug("secure call: handled\n"); + break; + case CSV3_SECURE_CMD_ERROR_INTERNAL: + pr_err("secure call: internal error\n"); + break; + case CSV3_SECURE_CMD_ERROR_INVALID_COMMAND: + pr_err("secure call: unsupported cmd\n"); + break; + case CSV3_SECURE_CMD_ERROR_INVALID_PARAM: + pr_err("secure call: invalid param\n"); + break; + case CSV3_SECURE_CMD_ERROR_INVALID_ADDRESS: + pr_err("secure call: invalid address\n"); + break; + case CSV3_SECURE_CMD_ERROR_INVALID_LENGTH: + pr_err("secure call: invalid length\n"); + break; + default: + pr_err("secure call: shouldn't reach here\n"); + break; + } +} + +int csv3_issue_request_report(phys_addr_t paddr, size_t size) +{ + struct secure_call_pages *sc_page_info; + struct csv3_secure_call_cmd *sc_wr, *sc_rd; + unsigned long flags; + int sc_page_idx; + enum csv3_secure_command_type sc_return_code; + + /* + * secure call pages needs to access with IRQs disabled because it is + * using a per-CPU data. + */ + local_irq_save(flags); + + sc_page_info = this_cpu_read(secure_call_data); + sc_page_idx = this_cpu_read(secure_call_page_idx); + + sc_wr = sc_page_idx ? &sc_page_info->page_a : &sc_page_info->page_b; + sc_rd = sc_page_idx ? &sc_page_info->page_b : &sc_page_info->page_a; + + sc_wr->cmd_type = CSV3_SECURE_CMD_REQ_REPORT; + sc_wr->nums = 1; + sc_wr->unused = 0; + sc_wr->entry[0].base_address = (u64)paddr; + sc_wr->entry[0].size = size; + + /* + * Write command in sc_wr must be done before retrieve status code + * from sc_rd, and it's ensured by the smp_mb below. + */ + smp_mb(); + + sc_return_code = sc_rd->cmd_type; + + this_cpu_write(secure_call_page_idx, sc_page_idx ^ 1); + + /* Leave per-CPU data access */ + local_irq_restore(flags); + + /* Print return code of the secure call */ + print_secure_call_error(sc_return_code); + + return sc_return_code == CSV3_SECURE_CMD_ACK ? 0 : -EIO; +} +EXPORT_SYMBOL_GPL(csv3_issue_request_report); diff --git a/arch/x86/mm/mem_encrypt_hygon.c b/arch/x86/mm/mem_encrypt_hygon.c index 1871850cbb60..52ec3fa041fe 100644 --- a/arch/x86/mm/mem_encrypt_hygon.c +++ b/arch/x86/mm/mem_encrypt_hygon.c @@ -130,6 +130,7 @@ bool noinstr csv3_active(void) else return false; } +EXPORT_SYMBOL_GPL(csv3_active); /******************************************************************************/ /**************************** CSV3 CMA interfaces *****************************/ -- Gitee From a2f798c84123952e5a1f4e987ad614e4c92e1bc0 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Wed, 21 Aug 2024 14:55:06 +0800 Subject: [PATCH 2009/2138] anolis: virt/csv-guest: Provide interface for request of CSV3 attestation report ANBZ: #13042 This change allows user in the CSV3 guest to get attestation report. Currently, the input from user-space for CSV3 attestation report is same as that for CSV attestation report. Signed-off-by: hanliyang Reviewed-by: Xingrui Yi Reviewed-by: Jia Zhang Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4388 --- drivers/virt/coco/csv-guest/csv-guest.c | 127 +++++++++++++++++++++++- 1 file changed, 122 insertions(+), 5 deletions(-) diff --git a/drivers/virt/coco/csv-guest/csv-guest.c b/drivers/virt/coco/csv-guest/csv-guest.c index 7db8177637ce..6a77c68b19b4 100644 --- a/drivers/virt/coco/csv-guest/csv-guest.c +++ b/drivers/virt/coco/csv-guest/csv-guest.c @@ -12,21 +12,28 @@ #include #include #include +#include #include +#include + #include "csv-guest.h" -static long csv_get_report(void __user *argp) +/* Mutex to serialize the command handling. */ +static DEFINE_MUTEX(csv_cmd_mutex); + +static int csv_get_report(unsigned long arg) { u8 *csv_report; long ret; struct csv_report_req req; - if (copy_from_user(&req, argp, sizeof(struct csv_report_req))) + if (copy_from_user(&req, (void __user *)arg, + sizeof(struct csv_report_req))) return -EFAULT; - if (req.len < CSV_REPORT_INPUT_DATA_LEN) + if (req.len < CSV_REPORT_INPUT_DATA_LEN || !req.report_data) return -EINVAL; csv_report = kzalloc(req.len, GFP_KERNEL); @@ -54,14 +61,124 @@ static long csv_get_report(void __user *argp) return ret; } +static int csv3_get_report(unsigned long arg) +{ + struct csv_report_req input; + struct page *page = NULL; + struct csv3_data_attestation_report *cmd_buff = NULL; + void *req_buff = NULL; + void *resp_buff = NULL; + int ret; + + if (copy_from_user(&input, (void __user *)arg, sizeof(input))) + return -EFAULT; + + if (!input.len || !input.report_data) + return -EINVAL; + + /* Use alloc_page for alignment */ + page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); + if (!page) + return -ENOMEM; + cmd_buff = (struct csv3_data_attestation_report *)page_address(page); + + /* + * Query the firmware to get minimum length of request buffer and + * respond buffer. + */ + ret = csv3_issue_request_report(__pa(cmd_buff), sizeof(*cmd_buff)); + + /* + * The input.len must be the maxinum length of the req and resp buffer + * at least, otherwise return with error. + */ + if (input.len < max(cmd_buff->req_len, cmd_buff->resp_len)) { + ret = -EINVAL; + goto err; + } + + /* Use alloc_page for alignment */ + page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); + if (!page) { + ret = -ENOMEM; + goto err; + } + req_buff = page_address(page); + + /* Use alloc_page for alignment */ + page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); + if (!page) { + ret = -ENOMEM; + goto err; + } + resp_buff = page_address(page); + + /* Copy user's input data */ + if (copy_from_user(req_buff, input.report_data, cmd_buff->req_len)) { + ret = -EFAULT; + goto err; + } + + /* + * The req_len and resp_len fields has already been filled by firmware + * when we query the lengths from firmware. + */ + cmd_buff->req_gpa = __pa(req_buff); + cmd_buff->resp_gpa = __pa(resp_buff); + + ret = csv3_issue_request_report(__pa(cmd_buff), sizeof(*cmd_buff)); + if (ret || (!ret && cmd_buff->fw_error_code)) { + pr_err("%s: fail to generate report, fw_error:%#x ret:%d\n", + __func__, cmd_buff->fw_error_code, ret); + ret = -EIO; + goto err; + } + + /* Copy attestation report to user */ + if (copy_to_user(input.report_data, resp_buff, cmd_buff->resp_len)) + ret = -EFAULT; + +err: + if (resp_buff) + free_page((unsigned long)resp_buff); + if (req_buff) + free_page((unsigned long)req_buff); + if (cmd_buff) + free_page((unsigned long)cmd_buff); + + return ret; +} + +static int get_report(unsigned long arg) +{ + int ret = -ENOTTY; + + lockdep_assert_held(&csv_cmd_mutex); + + if (csv3_active()) + ret = csv3_get_report(arg); + else if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) + ret = csv_get_report(arg); + return ret; +} + static long csv_guest_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { + int ret = -ENOTTY; + + mutex_lock(&csv_cmd_mutex); + switch (cmd) { case CSV_CMD_GET_REPORT: - return csv_get_report((void __user *)arg); + ret = get_report(arg); + break; default: - return -ENOTTY; + break; } + + mutex_unlock(&csv_cmd_mutex); + + return ret; } static const struct file_operations csv_guest_fops = { -- Gitee From 05fa00db6e5fde9c0102232a691a2ae3f6996b3f Mon Sep 17 00:00:00 2001 From: Juxin Gao Date: Sat, 4 Jan 2025 17:09:59 +0800 Subject: [PATCH 2010/2138] anolis: drivers/gpu: Driver of drm will not be used on LA7A2000 by default ANBZ: #13113 Signed-off-by: TianRui Zhao Signed-off-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4441 --- arch/loongarch/configs/anolis_defconfig | 2 +- arch/loongarch/configs/loongson3_defconfig | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/loongarch/configs/anolis_defconfig b/arch/loongarch/configs/anolis_defconfig index 413e7279ffdc..274612ac7042 100644 --- a/arch/loongarch/configs/anolis_defconfig +++ b/arch/loongarch/configs/anolis_defconfig @@ -1485,7 +1485,7 @@ CONFIG_DRM_AST_LOONGSON=y CONFIG_DRM_MGAG200=m CONFIG_DRM_QXL=m CONFIG_DRM_VIRTIO_GPU=m -CONFIG_DRM_LOONGSON=y +CONFIG_DRM_LOONGSON=m CONFIG_DRM_BOCHS=m CONFIG_DRM_CIRRUS_QEMU=m CONFIG_DRM_INSPUR=m diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index 4998e3f0d96c..1a51e214097f 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -1483,7 +1483,7 @@ CONFIG_DRM_AST_LOONGSON=y CONFIG_DRM_MGAG200=m CONFIG_DRM_QXL=m CONFIG_DRM_VIRTIO_GPU=m -CONFIG_DRM_LOONGSON=y +CONFIG_DRM_LOONGSON=m CONFIG_DRM_BOCHS=m CONFIG_DRM_CIRRUS_QEMU=m CONFIG_DRM_INSPUR=m -- Gitee From 04b3131b028060ef7eba10ea0353022bfc03bbff Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Mon, 2 Dec 2024 11:11:39 +0800 Subject: [PATCH 2011/2138] LoongArch: Use accessors to page table entries instead of direct dereference ANBZ: #13113 commit 4574815abf43e2bf05643e1b3f7a2e5d6df894f0 upstream As very well explained in commit 20a004e7b017cce282 ("arm64: mm: Use READ_ONCE/WRITE_ONCE when accessing page tables"), an architecture whose page table walker can modify the PTE in parallel must use READ_ONCE()/ WRITE_ONCE() macro to avoid any compiler transformation. So apply that to LoongArch which is such an architecture, in order to avoid potential problems. Similar to commit edf955647269422e ("riscv: Use accessors to page table entries instead of direct dereference"). Signed-off-by: Huacai Chen Signed-off-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4441 --- arch/loongarch/include/asm/hugetlb.h | 4 +-- arch/loongarch/include/asm/kfence.h | 6 ++-- arch/loongarch/include/asm/pgtable.h | 48 +++++++++++++++++----------- arch/loongarch/kvm/mmu.c | 8 ++--- arch/loongarch/mm/hugetlbpage.c | 6 ++-- arch/loongarch/mm/init.c | 10 +++--- arch/loongarch/mm/kasan_init.c | 10 +++--- arch/loongarch/mm/pgtable.c | 2 +- 8 files changed, 52 insertions(+), 42 deletions(-) diff --git a/arch/loongarch/include/asm/hugetlb.h b/arch/loongarch/include/asm/hugetlb.h index 427b487fbfd6..376c0708e297 100644 --- a/arch/loongarch/include/asm/hugetlb.h +++ b/arch/loongarch/include/asm/hugetlb.h @@ -44,7 +44,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t clear; - pte_t pte = *ptep; + pte_t pte = ptep_get(ptep); pte_val(clear) = (unsigned long)invalid_pte_table; set_pte_at(mm, addr, ptep, clear); @@ -75,7 +75,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, pte_t pte, int dirty) { - int changed = !pte_same(*ptep, pte); + int changed = !pte_same(ptep_get(ptep), pte); if (changed) { set_pte_at(vma->vm_mm, addr, ptep, pte); diff --git a/arch/loongarch/include/asm/kfence.h b/arch/loongarch/include/asm/kfence.h index 6c82aea1c993..2835b41d2a84 100644 --- a/arch/loongarch/include/asm/kfence.h +++ b/arch/loongarch/include/asm/kfence.h @@ -43,13 +43,13 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect) { pte_t *pte = virt_to_kpte(addr); - if (WARN_ON(!pte) || pte_none(*pte)) + if (WARN_ON(!pte) || pte_none(ptep_get(pte))) return false; if (protect) - set_pte(pte, __pte(pte_val(*pte) & ~(_PAGE_VALID | _PAGE_PRESENT))); + set_pte(pte, __pte(pte_val(ptep_get(pte)) & ~(_PAGE_VALID | _PAGE_PRESENT))); else - set_pte(pte, __pte(pte_val(*pte) | (_PAGE_VALID | _PAGE_PRESENT))); + set_pte(pte, __pte(pte_val(ptep_get(pte)) | (_PAGE_VALID | _PAGE_PRESENT))); preempt_disable(); local_flush_tlb_one(addr); diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h index f5300b66a39d..6776f147ffde 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -106,6 +106,9 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; #define KFENCE_AREA_START (VMEMMAP_END + 1) #define KFENCE_AREA_END (KFENCE_AREA_START + KFENCE_AREA_SIZE - 1) +#define ptep_get(ptep) READ_ONCE(*(ptep)) +#define pmdp_get(pmdp) READ_ONCE(*(pmdp)) + #define pte_ERROR(e) \ pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) #ifndef __PAGETABLE_PMD_FOLDED @@ -147,11 +150,6 @@ static inline int p4d_present(p4d_t p4d) return p4d_val(p4d) != (unsigned long)invalid_pud_table; } -static inline void p4d_clear(p4d_t *p4dp) -{ - p4d_val(*p4dp) = (unsigned long)invalid_pud_table; -} - static inline pud_t *p4d_pgtable(p4d_t p4d) { return (pud_t *)p4d_val(p4d); @@ -159,7 +157,12 @@ static inline pud_t *p4d_pgtable(p4d_t p4d) static inline void set_p4d(p4d_t *p4d, p4d_t p4dval) { - *p4d = p4dval; + WRITE_ONCE(*p4d, p4dval); +} + +static inline void p4d_clear(p4d_t *p4dp) +{ + set_p4d(p4dp, __p4d((unsigned long)invalid_pud_table)); } #define p4d_phys(p4d) PHYSADDR(p4d_val(p4d)) @@ -193,17 +196,20 @@ static inline int pud_present(pud_t pud) return pud_val(pud) != (unsigned long)invalid_pmd_table; } -static inline void pud_clear(pud_t *pudp) +static inline pmd_t *pud_pgtable(pud_t pud) { - pud_val(*pudp) = ((unsigned long)invalid_pmd_table); + return (pmd_t *)pud_val(pud); } -static inline pmd_t *pud_pgtable(pud_t pud) +static inline void set_pud(pud_t *pud, pud_t pudval) { - return (pmd_t *)pud_val(pud); + WRITE_ONCE(*pud, pudval); } -#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0) +static inline void pud_clear(pud_t *pudp) +{ + set_pud(pudp, __pud((unsigned long)invalid_pmd_table)); +} #define pud_phys(pud) PHYSADDR(pud_val(pud)) #define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT)) @@ -231,12 +237,15 @@ static inline int pmd_present(pmd_t pmd) return pmd_val(pmd) != (unsigned long)invalid_pte_table; } -static inline void pmd_clear(pmd_t *pmdp) +static inline void set_pmd(pmd_t *pmd, pmd_t pmdval) { - pmd_val(*pmdp) = ((unsigned long)invalid_pte_table); + WRITE_ONCE(*pmd, pmdval); } -#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0) +static inline void pmd_clear(pmd_t *pmdp) +{ + set_pmd(pmdp, __pmd((unsigned long)invalid_pte_table)); +} #define pmd_phys(pmd) PHYSADDR(pmd_val(pmd)) @@ -314,7 +323,8 @@ extern void paging_init(void); static inline void set_pte(pte_t *ptep, pte_t pteval) { - *ptep = pteval; + WRITE_ONCE(*ptep, pteval); + if (pte_val(pteval) & _PAGE_GLOBAL) { pte_t *buddy = ptep_buddy(ptep); /* @@ -341,8 +351,8 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) : [global] "r" (page_global)); #else /* !CONFIG_SMP */ - if (pte_none(*buddy)) - pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; + if (pte_none(ptep_get(buddy))) + WRITE_ONCE(*buddy, __pte(pte_val(ptep_get(buddy)) | _PAGE_GLOBAL)); #endif /* CONFIG_SMP */ } } @@ -350,7 +360,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { /* Preserve global status for the pair */ - if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) + if (pte_val(ptep_get(ptep_buddy(ptep))) & _PAGE_GLOBAL) set_pte(ptep, __pte(_PAGE_GLOBAL)); else set_pte(ptep, __pte(0)); @@ -591,7 +601,7 @@ static inline pmd_t pmd_mkinvalid(pmd_t pmd) static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long address, pmd_t *pmdp) { - pmd_t old = *pmdp; + pmd_t old = pmdp_get(pmdp); pmd_clear(pmdp); diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c index d312921f3ab9..cb701649f56f 100644 --- a/arch/loongarch/kvm/mmu.c +++ b/arch/loongarch/kvm/mmu.c @@ -746,19 +746,19 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, * value) and then p*d_offset() walks into the target huge page instead * of the old page table (sees the new value). */ - pgd = READ_ONCE(*pgd_offset(kvm->mm, hva)); + pgd = pgdp_get(pgd_offset(kvm->mm, hva)); if (pgd_none(pgd)) goto out; - p4d = READ_ONCE(*p4d_offset(&pgd, hva)); + p4d = p4dp_get(p4d_offset(&pgd, hva)); if (p4d_none(p4d) || !p4d_present(p4d)) goto out; - pud = READ_ONCE(*pud_offset(&p4d, hva)); + pud = pudp_get(pud_offset(&p4d, hva)); if (pud_none(pud) || !pud_present(pud)) goto out; - pmd = READ_ONCE(*pmd_offset(&pud, hva)); + pmd = pmdp_get(pmd_offset(&pud, hva)); if (pmd_none(pmd) || !pmd_present(pmd)) goto out; diff --git a/arch/loongarch/mm/hugetlbpage.c b/arch/loongarch/mm/hugetlbpage.c index 1e76fcb83093..62ddcea0aa14 100644 --- a/arch/loongarch/mm/hugetlbpage.c +++ b/arch/loongarch/mm/hugetlbpage.c @@ -39,11 +39,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, pmd_t *pmd = NULL; pgd = pgd_offset(mm, addr); - if (pgd_present(*pgd)) { + if (pgd_present(pgdp_get(pgd))) { p4d = p4d_offset(pgd, addr); - if (p4d_present(*p4d)) { + if (p4d_present(p4dp_get(p4d))) { pud = pud_offset(p4d, addr); - if (pud_present(*pud)) + if (pud_present(pudp_get(pud))) pmd = pmd_offset(pud, addr); } } diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c index 4dd53427f657..71fa4b773eb3 100644 --- a/arch/loongarch/mm/init.c +++ b/arch/loongarch/mm/init.c @@ -140,7 +140,7 @@ void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node, int __meminit vmemmap_check_pmd(pmd_t *pmd, int node, unsigned long addr, unsigned long next) { - int huge = pmd_val(*pmd) & _PAGE_HUGE; + int huge = pmd_val(pmdp_get(pmd)) & _PAGE_HUGE; if (huge) vmemmap_verify((pte_t *)pmd, node, addr, next); @@ -172,7 +172,7 @@ pte_t * __init populate_kernel_pte(unsigned long addr) pud_t *pud; pmd_t *pmd; - if (p4d_none(*p4d)) { + if (p4d_none(p4dp_get(p4d))) { pud = memblock_alloc(PAGE_SIZE, PAGE_SIZE); if (!pud) panic("%s: Failed to allocate memory\n", __func__); @@ -183,7 +183,7 @@ pte_t * __init populate_kernel_pte(unsigned long addr) } pud = pud_offset(p4d, addr); - if (pud_none(*pud)) { + if (pud_none(pudp_get(pud))) { pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE); if (!pmd) panic("%s: Failed to allocate memory\n", __func__); @@ -194,7 +194,7 @@ pte_t * __init populate_kernel_pte(unsigned long addr) } pmd = pmd_offset(pud, addr); - if (!pmd_present(*pmd)) { + if (!pmd_present(pmdp_get(pmd))) { pte_t *pte; pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); @@ -215,7 +215,7 @@ void __init __set_fixmap(enum fixed_addresses idx, BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); ptep = populate_kernel_pte(addr); - if (!pte_none(*ptep)) { + if (!pte_none(ptep_get(ptep))) { pte_ERROR(*ptep); return; } diff --git a/arch/loongarch/mm/kasan_init.c b/arch/loongarch/mm/kasan_init.c index 082cb2a6f1ef..7277b7583e1b 100644 --- a/arch/loongarch/mm/kasan_init.c +++ b/arch/loongarch/mm/kasan_init.c @@ -112,7 +112,7 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node) static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, bool early) { - if (__pmd_none(early, READ_ONCE(*pmdp))) { + if (__pmd_none(early, pmdp_get(pmdp))) { phys_addr_t pte_phys = early ? __pa_symbol(kasan_early_shadow_pte) : kasan_alloc_zeroed_page(node); if (!early) @@ -125,7 +125,7 @@ static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, bool early) { - if (__pud_none(early, READ_ONCE(*pudp))) { + if (__pud_none(early, pudp_get(pudp))) { phys_addr_t pmd_phys = early ? __pa_symbol(kasan_early_shadow_pmd) : kasan_alloc_zeroed_page(node); if (!early) @@ -138,7 +138,7 @@ static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node, bool early) { - if (__p4d_none(early, READ_ONCE(*p4dp))) { + if (__p4d_none(early, p4dp_get(p4dp))) { phys_addr_t pud_phys = early ? __pa_symbol(kasan_early_shadow_pud) : kasan_alloc_zeroed_page(node); if (!early) @@ -174,7 +174,7 @@ static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr, : kasan_alloc_zeroed_page(node); next = addr + PAGE_SIZE; set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL)); - } while (ptep++, addr = next, addr != end && __pte_none(early, READ_ONCE(*ptep))); + } while (ptep++, addr = next, addr != end && __pte_none(early, ptep_get(ptep))); } static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr, @@ -186,7 +186,7 @@ static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr, do { next = pmd_addr_end(addr, end); kasan_pte_populate(pmdp, addr, next, node, early); - } while (pmdp++, addr = next, addr != end && __pmd_none(early, READ_ONCE(*pmdp))); + } while (pmdp++, addr = next, addr != end && __pmd_none(early, pmdp_get(pmdp))); } static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr, diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c index 2aae72e63871..6a038a27373a 100644 --- a/arch/loongarch/mm/pgtable.c +++ b/arch/loongarch/mm/pgtable.c @@ -128,7 +128,7 @@ pmd_t mk_pmd(struct page *page, pgprot_t prot) void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) { - *pmdp = pmd; + WRITE_ONCE(*pmdp, pmd); flush_tlb_all(); } -- Gitee From e36b87c4b6d41848afd1feb84ab157cc02ac768c Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Mon, 2 Dec 2024 11:11:40 +0800 Subject: [PATCH 2012/2138] LoongArch: Improve hardware page table walker ANBZ: #13113 commit f93f67d06b1023313ef1662eac490e29c025c030 upstream LoongArch has similar problems explained in commit 7f0b1bf04511348995d6 ("arm64: Fix barriers used for page table modifications"), when hardware page table walker (PTW) enabled, speculative accesses may cause spurious page fault in kernel space. Theoretically, in order to completely avoid spurious page fault we need a "dbar + ibar" pair between the page table modifications and the subsequent memory accesses using the corresponding virtual address. But "ibar" is too heavy for performace, so we only use a "dbar 0b11000" in set_pte(). And let spurious_fault() filter the rest rare spurious page faults which should be avoided by "ibar". Besides, we replace the llsc loop with amo in set_pte() which has better performace, and refactor mmu_context.h to 1) avoid any load/store/branch instructions between the writing of CSR.ASID & CSR.PGDL, 2) ensure flush tlb operation is after updating ASID. Signed-off-by: Huacai Chen Signed-off-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4441 --- arch/loongarch/include/asm/atomic.h | 2 ++ arch/loongarch/include/asm/mmu_context.h | 35 +++++++++++++++----- arch/loongarch/include/asm/pgtable.h | 32 ++++++++---------- arch/loongarch/mm/fault.c | 41 ++++++++++++++++++++++++ 4 files changed, 83 insertions(+), 27 deletions(-) diff --git a/arch/loongarch/include/asm/atomic.h b/arch/loongarch/include/asm/atomic.h index e27f0c72d324..2143202cb380 100644 --- a/arch/loongarch/include/asm/atomic.h +++ b/arch/loongarch/include/asm/atomic.h @@ -15,6 +15,7 @@ #define __LL "ll.w " #define __SC "sc.w " #define __AMADD "amadd.w " +#define __AMOR "amor.w " #define __AMAND_DB "amand_db.w " #define __AMOR_DB "amor_db.w " #define __AMXOR_DB "amxor_db.w " @@ -22,6 +23,7 @@ #define __LL "ll.d " #define __SC "sc.d " #define __AMADD "amadd.d " +#define __AMOR "amor.d " #define __AMAND_DB "amand_db.d " #define __AMOR_DB "amor_db.d " #define __AMXOR_DB "amxor_db.d " diff --git a/arch/loongarch/include/asm/mmu_context.h b/arch/loongarch/include/asm/mmu_context.h index 9f97c3453b9c..304363bd3935 100644 --- a/arch/loongarch/include/asm/mmu_context.h +++ b/arch/loongarch/include/asm/mmu_context.h @@ -49,12 +49,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) /* Normal, classic get_new_mmu_context */ static inline void -get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) +get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, bool *need_flush) { u64 asid = asid_cache(cpu); if (!((++asid) & cpu_asid_mask(&cpu_data[cpu]))) - local_flush_tlb_user(); /* start new asid cycle */ + *need_flush = true; /* start new asid cycle */ cpu_context(cpu, mm) = asid_cache(cpu) = asid; } @@ -74,21 +74,34 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) return 0; } +static inline void atomic_update_pgd_asid(unsigned long asid, unsigned long pgdl) +{ + __asm__ __volatile__( + "csrwr %[pgdl_val], %[pgdl_reg] \n\t" + "csrwr %[asid_val], %[asid_reg] \n\t" + : [asid_val] "+r" (asid), [pgdl_val] "+r" (pgdl) + : [asid_reg] "i" (LOONGARCH_CSR_ASID), [pgdl_reg] "i" (LOONGARCH_CSR_PGDL) + : "memory" + ); +} + static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { + bool need_flush = false; unsigned int cpu = smp_processor_id(); /* Check if our ASID is of an older version and thus invalid */ if (!asid_valid(next, cpu)) - get_new_mmu_context(next, cpu); - - write_csr_asid(cpu_asid(cpu, next)); + get_new_mmu_context(next, cpu, &need_flush); if (next != &init_mm) - csr_write64((unsigned long)next->pgd, LOONGARCH_CSR_PGDL); + atomic_update_pgd_asid(cpu_asid(cpu, next), (unsigned long)next->pgd); else - csr_write64((unsigned long)invalid_pg_dir, LOONGARCH_CSR_PGDL); + atomic_update_pgd_asid(cpu_asid(cpu, next), (unsigned long)invalid_pg_dir); + + if (need_flush) + local_flush_tlb_user(); /* Flush tlb after update ASID */ /* * Mark current->active_mm as not "active" anymore. @@ -135,9 +148,15 @@ drop_mmu_context(struct mm_struct *mm, unsigned int cpu) asid = read_csr_asid() & cpu_asid_mask(¤t_cpu_data); if (asid == cpu_asid(cpu, mm)) { + bool need_flush = false; + if (!current->mm || (current->mm == mm)) { - get_new_mmu_context(mm, cpu); + get_new_mmu_context(mm, cpu, &need_flush); + write_csr_asid(cpu_asid(cpu, mm)); + if (need_flush) + local_flush_tlb_user(); /* Flush tlb after update ASID */ + goto out; } } diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h index 6776f147ffde..5db17c367b1e 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -331,29 +331,23 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) * Make sure the buddy is global too (if it's !none, * it better already be global) */ + if (pte_none(ptep_get(buddy))) { #ifdef CONFIG_SMP - /* - * For SMP, multiple CPUs can race, so we need to do - * this atomically. - */ - unsigned long page_global = _PAGE_GLOBAL; - unsigned long tmp; - - __asm__ __volatile__ ( - "1:" __LL "%[tmp], %[buddy] \n" - " bnez %[tmp], 2f \n" - " or %[tmp], %[tmp], %[global] \n" - __SC "%[tmp], %[buddy] \n" - " beqz %[tmp], 1b \n" - " nop \n" - "2: \n" - __WEAK_LLSC_MB - : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) - : [global] "r" (page_global)); + /* + * For SMP, multiple CPUs can race, so we need + * to do this atomically. + */ + __asm__ __volatile__( + __AMOR "$zero, %[global], %[buddy] \n" + : [buddy] "+ZB" (buddy->pte) + : [global] "r" (_PAGE_GLOBAL) + : "memory"); + + DBAR(0b11000); /* o_wrw = 0b11000 */ #else /* !CONFIG_SMP */ - if (pte_none(ptep_get(buddy))) WRITE_ONCE(*buddy, __pte(pte_val(ptep_get(buddy)) | _PAGE_GLOBAL)); #endif /* CONFIG_SMP */ + } } } diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c index 97b40defde06..deefd9617d00 100644 --- a/arch/loongarch/mm/fault.c +++ b/arch/loongarch/mm/fault.c @@ -31,11 +31,52 @@ int show_unhandled_signals = 1; +static int __kprobes spurious_fault(unsigned long write, unsigned long address) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + if (!(address & __UA_LIMIT)) + return 0; + + pgd = pgd_offset_k(address); + if (!pgd_present(pgdp_get(pgd))) + return 0; + + p4d = p4d_offset(pgd, address); + if (!p4d_present(p4dp_get(p4d))) + return 0; + + pud = pud_offset(p4d, address); + if (!pud_present(pudp_get(pud))) + return 0; + + pmd = pmd_offset(pud, address); + if (!pmd_present(pmdp_get(pmd))) + return 0; + + if (pmd_leaf(*pmd)) { + return write ? pmd_write(pmdp_get(pmd)) : 1; + } else { + pte = pte_offset_kernel(pmd, address); + if (!pte_present(ptep_get(pte))) + return 0; + + return write ? pte_write(ptep_get(pte)) : 1; + } +} + static void __kprobes no_context(struct pt_regs *regs, unsigned long write, unsigned long address) { const int field = sizeof(unsigned long) * 2; + if (spurious_fault(write, address)) + return; + /* Are we prepared to handle this kernel fault? */ if (fixup_exception(regs)) return; -- Gitee From 7a200e5ef367ae5370a23b6006ac475faf000b1a Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Mon, 2 Dec 2024 11:11:38 +0800 Subject: [PATCH 2013/2138] LoongArch: Remove superfluous flush_dcache_page() definition ANBZ: #13113 commit 82bf60a6fed806d57e284a1fb40dbc1ad5097611 upstream LoongArch doesn't have cache aliases, so flush_dcache_page() is a no-op. There is a generic implementation for this case in include/asm-generic/ cacheflush.h. So remove the superfluous flush_dcache_page() definition, which also silences such build warnings: In file included from crypto/scompress.c:12: include/crypto/scatterwalk.h: In function 'scatterwalk_pagedone': include/crypto/scatterwalk.h:76:30: warning: variable 'page' set but not used [-Wunused-but-set-variable] 76 | struct page *page; | ^~~~ crypto/scompress.c: In function 'scomp_acomp_comp_decomp': >> crypto/scompress.c:174:38: warning: unused variable 'dst_page' [-Wunused-variable] 174 | struct page *dst_page = sg_page(req->dst); | Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202403091614.NeUw5zcv-lkp@intel.com/ Suggested-by: Barry Song Acked-by: Barry Song Signed-off-by: Huacai Chen Signed-off-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4441 --- arch/loongarch/include/asm/cacheflush.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/arch/loongarch/include/asm/cacheflush.h b/arch/loongarch/include/asm/cacheflush.h index 80bd74106985..f8754d08a31a 100644 --- a/arch/loongarch/include/asm/cacheflush.h +++ b/arch/loongarch/include/asm/cacheflush.h @@ -37,8 +37,6 @@ void local_flush_icache_range(unsigned long start, unsigned long end); #define flush_icache_range local_flush_icache_range #define flush_icache_user_range local_flush_icache_range -#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 - #define flush_cache_all() do { } while (0) #define flush_cache_mm(mm) do { } while (0) #define flush_cache_dup_mm(mm) do { } while (0) @@ -47,7 +45,6 @@ void local_flush_icache_range(unsigned long start, unsigned long end); #define flush_cache_vmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0) #define flush_icache_user_page(vma, page, addr, len) do { } while (0) -#define flush_dcache_page(page) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0) -- Gitee From af905bd91892119514af54216cbf3dd9b7a78d22 Mon Sep 17 00:00:00 2001 From: Bibo Mao Date: Mon, 2 Dec 2024 11:11:41 +0800 Subject: [PATCH 2014/2138] LoongArch: Set initial pte entry with PAGE_GLOBAL for kernel space ANBZ: #13113 commit d2f8671045b41871053dedaf3035a06ad53d2736 upstream There are two pages in one TLB entry on LoongArch system. For kernel space, it requires both two pte entries (buddies) with PAGE_GLOBAL bit set, otherwise HW treats it as non-global tlb, there will be potential problems if tlb entry for kernel space is not global. Such as fail to flush kernel tlb with the function local_flush_tlb_kernel_range() which supposed only flush tlb with global bit. Kernel address space areas include percpu, vmalloc, vmemmap, fixmap and kasan areas. For these areas both two consecutive page table entries should be enabled with PAGE_GLOBAL bit. So with function set_pte() and pte_clear(), pte buddy entry is checked and set besides its own pte entry. However it is not atomic operation to set both two pte entries, there is problem with test_vmalloc test case. So function kernel_pte_init() is added to init a pte table when it is created for kernel address space, and the default initial pte value is PAGE_GLOBAL rather than zero at beginning. Then only its own pte entry need update with function set_pte() and pte_clear(), nothing to do with the pte buddy entry. Signed-off-by: Bibo Mao Signed-off-by: Huacai Chen Signed-off-by: Juxin Gao Link: https://gitee.com/anolis/cloud-kernel/pulls/4441 --- arch/loongarch/include/asm/pgalloc.h | 11 +++++++++ arch/loongarch/include/asm/pgtable.h | 35 ++++++---------------------- arch/loongarch/mm/init.c | 2 ++ arch/loongarch/mm/pgtable.c | 20 ++++++++++++++++ include/linux/mm.h | 3 ++- mm/kasan/init.c | 8 ++++++- mm/sparse-vmemmap.c | 5 ++++ 7 files changed, 54 insertions(+), 30 deletions(-) diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h index 79470f0b4f1d..c9f9895f237d 100644 --- a/arch/loongarch/include/asm/pgalloc.h +++ b/arch/loongarch/include/asm/pgalloc.h @@ -10,6 +10,7 @@ #define __HAVE_ARCH_PMD_ALLOC_ONE #define __HAVE_ARCH_PUD_ALLOC_ONE +#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL #include static inline void pmd_populate_kernel(struct mm_struct *mm, @@ -44,6 +45,16 @@ extern void pagetable_init(void); extern pgd_t *pgd_alloc(struct mm_struct *mm); +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) +{ + pte_t *pte = __pte_alloc_one_kernel(mm); + + if (pte) + kernel_pte_init(pte); + + return pte; +} + #define __pte_free_tlb(tlb, pte, address) \ do { \ pagetable_pte_dtor(page_ptdesc(pte)); \ diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h index 5db17c367b1e..4f4498ce2255 100644 --- a/arch/loongarch/include/asm/pgtable.h +++ b/arch/loongarch/include/asm/pgtable.h @@ -269,6 +269,7 @@ extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pm extern void pgd_init(void *addr); extern void pud_init(void *addr); extern void pmd_init(void *addr); +extern void kernel_pte_init(void *addr); /* * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that @@ -325,39 +326,17 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) { WRITE_ONCE(*ptep, pteval); - if (pte_val(pteval) & _PAGE_GLOBAL) { - pte_t *buddy = ptep_buddy(ptep); - /* - * Make sure the buddy is global too (if it's !none, - * it better already be global) - */ - if (pte_none(ptep_get(buddy))) { #ifdef CONFIG_SMP - /* - * For SMP, multiple CPUs can race, so we need - * to do this atomically. - */ - __asm__ __volatile__( - __AMOR "$zero, %[global], %[buddy] \n" - : [buddy] "+ZB" (buddy->pte) - : [global] "r" (_PAGE_GLOBAL) - : "memory"); - - DBAR(0b11000); /* o_wrw = 0b11000 */ -#else /* !CONFIG_SMP */ - WRITE_ONCE(*buddy, __pte(pte_val(ptep_get(buddy)) | _PAGE_GLOBAL)); -#endif /* CONFIG_SMP */ - } - } + if (pte_val(pteval) & _PAGE_GLOBAL) + DBAR(0b11000); /* o_wrw = 0b11000 */ +#endif } static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - /* Preserve global status for the pair */ - if (pte_val(ptep_get(ptep_buddy(ptep))) & _PAGE_GLOBAL) - set_pte(ptep, __pte(_PAGE_GLOBAL)); - else - set_pte(ptep, __pte(0)); + pte_t pte = ptep_get(ptep); + pte_val(pte) &= _PAGE_GLOBAL; + set_pte(ptep, pte); } #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1) diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c index 71fa4b773eb3..14f74a55baae 100644 --- a/arch/loongarch/mm/init.c +++ b/arch/loongarch/mm/init.c @@ -200,7 +200,9 @@ pte_t * __init populate_kernel_pte(unsigned long addr) pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); if (!pte) panic("%s: Failed to allocate memory\n", __func__); + pmd_populate_kernel(&init_mm, pmd, pte); + kernel_pte_init(pte); } return pte_offset_kernel(pmd, addr); diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c index 6a038a27373a..3b2fbe74d7d9 100644 --- a/arch/loongarch/mm/pgtable.c +++ b/arch/loongarch/mm/pgtable.c @@ -116,6 +116,26 @@ void pud_init(void *addr) EXPORT_SYMBOL_GPL(pud_init); #endif +void kernel_pte_init(void *addr) +{ + unsigned long *p, *end; + + p = (unsigned long *)addr; + end = p + PTRS_PER_PTE; + + do { + p[0] = _PAGE_GLOBAL; + p[1] = _PAGE_GLOBAL; + p[2] = _PAGE_GLOBAL; + p[3] = _PAGE_GLOBAL; + p[4] = _PAGE_GLOBAL; + p += 8; + p[-3] = _PAGE_GLOBAL; + p[-2] = _PAGE_GLOBAL; + p[-1] = _PAGE_GLOBAL; + } while (p != end); +} + pmd_t mk_pmd(struct page *page, pgprot_t prot) { pmd_t pmd; diff --git a/include/linux/mm.h b/include/linux/mm.h index cdf70d8b0648..787d56b35b22 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3807,8 +3807,9 @@ void *sparse_buffer_alloc(unsigned long size); struct page * __populate_section_memmap(unsigned long pfn, unsigned long nr_pages, int nid, struct vmem_altmap *altmap, struct dev_pagemap *pgmap); -void pmd_init(void *addr); void pud_init(void *addr); +void pmd_init(void *addr); +void kernel_pte_init(void *addr); pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node); pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node); diff --git a/mm/kasan/init.c b/mm/kasan/init.c index 89895f38f722..ac607c306292 100644 --- a/mm/kasan/init.c +++ b/mm/kasan/init.c @@ -106,6 +106,10 @@ static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr, } } +void __weak __meminit kernel_pte_init(void *addr) +{ +} + static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr, unsigned long end) { @@ -126,8 +130,10 @@ static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr, if (slab_is_available()) p = pte_alloc_one_kernel(&init_mm); - else + else { p = early_alloc(PAGE_SIZE, NUMA_NO_NODE); + kernel_pte_init(p); + } if (!p) return -ENOMEM; diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index a2cbe44c48e1..2628fc02be08 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -184,6 +184,10 @@ static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node) return p; } +void __weak __meminit kernel_pte_init(void *addr) +{ +} + pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) { pmd_t *pmd = pmd_offset(pud, addr); @@ -191,6 +195,7 @@ pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); if (!p) return NULL; + kernel_pte_init(p); pmd_populate_kernel(&init_mm, pmd, p); } return pmd; -- Gitee From 2304f59bb8b5998d8ae4c86ec5fe01c540c11362 Mon Sep 17 00:00:00 2001 From: gaojuxin Date: Mon, 6 Jan 2025 16:28:05 +0800 Subject: [PATCH 2015/2138] anolis: LoongArch: Fixup crashkernel cmdline ANBZ: #13118 Signed-off-by: gaojuxin Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/4446 --- anolis/cmdline/loongarch64 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/anolis/cmdline/loongarch64 b/anolis/cmdline/loongarch64 index 1125741dc591..a38a393148b9 100644 --- a/anolis/cmdline/loongarch64 +++ b/anolis/cmdline/loongarch64 @@ -1,3 +1,3 @@ systemd.unified_cgroup_hierarchy=0 cgroup.memory=nokmem -crashkernel=0M-2G:0M,2G-8G:192M,8G-:256M +crashkernel=1024M -- Gitee From 9d4a5e2e54be99dab878e8a10824b707594d9d5a Mon Sep 17 00:00:00 2001 From: Yinan Liu Date: Mon, 6 Jan 2025 14:04:15 +0800 Subject: [PATCH 2016/2138] anolis: cputime: add cmdline suppprt for disable irqtime_account ANBZ: #9312 When CONFIG_IRQ_TIME_ACCOUNTING is enable, the system will count the the time spent on interrupts when hi/si enters and exits. When interrupts occur frequently, clock source access is slow on some architectures, which will cause considerable performance overhead. Therefore, a unified cmdline is added to provide each platform with the ability to turn off IRQ_TIME_ACCOUNTING. Signed-off-by: Yinan Liu Reviewed-by: Tianchen Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4445 --- kernel/sched/cputime.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 8685c8e019f8..e3e3bf3c39d4 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -24,14 +24,28 @@ DEFINE_PER_CPU(struct irqtime, cpu_irqtime); static int sched_clock_irqtime; +static int no_sched_clock_irqtime; + +static int __init irqtime_account_setup(char *str) +{ + if (!strcmp(str, "off")) { + no_sched_clock_irqtime = 1; + pr_info("The irqtime account is currently disabled!"); + } + return 1; +} +__setup("irqtime_account=", irqtime_account_setup); + void enable_sched_clock_irqtime(void) { - sched_clock_irqtime = 1; + if (!no_sched_clock_irqtime) + sched_clock_irqtime = 1; } void disable_sched_clock_irqtime(void) { - sched_clock_irqtime = 0; + if (!no_sched_clock_irqtime) + sched_clock_irqtime = 0; } static void irqtime_account_delta(struct irqtime *irqtime, u64 delta, -- Gitee From 66753f84097fab01b6bf2ae846b3070e8943dbf6 Mon Sep 17 00:00:00 2001 From: Yinan Liu Date: Tue, 31 Dec 2024 13:58:19 +0800 Subject: [PATCH 2017/2138] anolis: selftests: Revert "selftests/rseq: Fix mm_cid test failure" ANBZ: #13087 This reverts commit 73a4f5a704a2022484e9c42ea06ef05ac42a8621. This patch adapts to glibc 2.40 and later, while ali6000 uses glibc 2.32. It is temporarily reverted. Signed-off-by: Yinan Liu Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/4422 --- tools/testing/selftests/rseq/rseq.c | 110 +++++++++------------------- tools/testing/selftests/rseq/rseq.h | 10 ++- 2 files changed, 43 insertions(+), 77 deletions(-) diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c index 5b9772cdf265..96e812bdf8a4 100644 --- a/tools/testing/selftests/rseq/rseq.c +++ b/tools/testing/selftests/rseq/rseq.c @@ -60,6 +60,12 @@ unsigned int rseq_size = -1U; /* Flags used during rseq registration. */ unsigned int rseq_flags; +/* + * rseq feature size supported by the kernel. 0 if the registration was + * unsuccessful. + */ +unsigned int rseq_feature_size = -1U; + static int rseq_ownership; static int rseq_reg_success; /* At least one rseq registration has succeded. */ @@ -105,43 +111,6 @@ int rseq_available(void) } } -/* The rseq areas need to be at least 32 bytes. */ -static -unsigned int get_rseq_min_alloc_size(void) -{ - unsigned int alloc_size = rseq_size; - - if (alloc_size < ORIG_RSEQ_ALLOC_SIZE) - alloc_size = ORIG_RSEQ_ALLOC_SIZE; - return alloc_size; -} - -/* - * Return the feature size supported by the kernel. - * - * Depending on the value returned by getauxval(AT_RSEQ_FEATURE_SIZE): - * - * 0: Return ORIG_RSEQ_FEATURE_SIZE (20) - * > 0: Return the value from getauxval(AT_RSEQ_FEATURE_SIZE). - * - * It should never return a value below ORIG_RSEQ_FEATURE_SIZE. - */ -static -unsigned int get_rseq_kernel_feature_size(void) -{ - unsigned long auxv_rseq_feature_size, auxv_rseq_align; - - auxv_rseq_align = getauxval(AT_RSEQ_ALIGN); - assert(!auxv_rseq_align || auxv_rseq_align <= RSEQ_THREAD_AREA_ALLOC_SIZE); - - auxv_rseq_feature_size = getauxval(AT_RSEQ_FEATURE_SIZE); - assert(!auxv_rseq_feature_size || auxv_rseq_feature_size <= RSEQ_THREAD_AREA_ALLOC_SIZE); - if (auxv_rseq_feature_size) - return auxv_rseq_feature_size; - else - return ORIG_RSEQ_FEATURE_SIZE; -} - int rseq_register_current_thread(void) { int rc; @@ -150,7 +119,7 @@ int rseq_register_current_thread(void) /* Treat libc's ownership as a successful registration. */ return 0; } - rc = sys_rseq(&__rseq_abi, get_rseq_min_alloc_size(), 0, RSEQ_SIG); + rc = sys_rseq(&__rseq_abi, rseq_size, 0, RSEQ_SIG); if (rc) { if (RSEQ_READ_ONCE(rseq_reg_success)) { /* Incoherent success/failure within process. */ @@ -171,12 +140,28 @@ int rseq_unregister_current_thread(void) /* Treat libc's ownership as a successful unregistration. */ return 0; } - rc = sys_rseq(&__rseq_abi, get_rseq_min_alloc_size(), RSEQ_ABI_FLAG_UNREGISTER, RSEQ_SIG); + rc = sys_rseq(&__rseq_abi, rseq_size, RSEQ_ABI_FLAG_UNREGISTER, RSEQ_SIG); if (rc) return -1; return 0; } +static +unsigned int get_rseq_feature_size(void) +{ + unsigned long auxv_rseq_feature_size, auxv_rseq_align; + + auxv_rseq_align = getauxval(AT_RSEQ_ALIGN); + assert(!auxv_rseq_align || auxv_rseq_align <= RSEQ_THREAD_AREA_ALLOC_SIZE); + + auxv_rseq_feature_size = getauxval(AT_RSEQ_FEATURE_SIZE); + assert(!auxv_rseq_feature_size || auxv_rseq_feature_size <= RSEQ_THREAD_AREA_ALLOC_SIZE); + if (auxv_rseq_feature_size) + return auxv_rseq_feature_size; + else + return ORIG_RSEQ_FEATURE_SIZE; +} + static __attribute__((constructor)) void rseq_init(void) { @@ -193,54 +178,28 @@ void rseq_init(void) } if (libc_rseq_size_p && libc_rseq_offset_p && libc_rseq_flags_p && *libc_rseq_size_p != 0) { - unsigned int libc_rseq_size; - /* rseq registration owned by glibc */ rseq_offset = *libc_rseq_offset_p; - libc_rseq_size = *libc_rseq_size_p; + rseq_size = *libc_rseq_size_p; rseq_flags = *libc_rseq_flags_p; - - /* - * Previous versions of glibc expose the value - * 32 even though the kernel only supported 20 - * bytes initially. Therefore treat 32 as a - * special-case. glibc 2.40 exposes a 20 bytes - * __rseq_size without using getauxval(3) to - * query the supported size, while still allocating a 32 - * bytes area. Also treat 20 as a special-case. - * - * Special-cases are handled by using the following - * value as active feature set size: - * - * rseq_size = min(32, get_rseq_kernel_feature_size()) - */ - switch (libc_rseq_size) { - case ORIG_RSEQ_FEATURE_SIZE: - fallthrough; - case ORIG_RSEQ_ALLOC_SIZE: - { - unsigned int rseq_kernel_feature_size = get_rseq_kernel_feature_size(); - - if (rseq_kernel_feature_size < ORIG_RSEQ_ALLOC_SIZE) - rseq_size = rseq_kernel_feature_size; - else - rseq_size = ORIG_RSEQ_ALLOC_SIZE; - break; - } - default: - /* Otherwise just use the __rseq_size from libc as rseq_size. */ - rseq_size = libc_rseq_size; - break; - } + rseq_feature_size = get_rseq_feature_size(); + if (rseq_feature_size > rseq_size) + rseq_feature_size = rseq_size; return; } rseq_ownership = 1; if (!rseq_available()) { rseq_size = 0; + rseq_feature_size = 0; return; } rseq_offset = (void *)&__rseq_abi - rseq_thread_pointer(); rseq_flags = 0; + rseq_feature_size = get_rseq_feature_size(); + if (rseq_feature_size == ORIG_RSEQ_FEATURE_SIZE) + rseq_size = ORIG_RSEQ_ALLOC_SIZE; + else + rseq_size = RSEQ_THREAD_AREA_ALLOC_SIZE; } static __attribute__((destructor)) @@ -250,6 +209,7 @@ void rseq_exit(void) return; rseq_offset = 0; rseq_size = -1U; + rseq_feature_size = -1U; rseq_ownership = 0; } diff --git a/tools/testing/selftests/rseq/rseq.h b/tools/testing/selftests/rseq/rseq.h index 4e217b620e0c..d7364ea4d201 100644 --- a/tools/testing/selftests/rseq/rseq.h +++ b/tools/testing/selftests/rseq/rseq.h @@ -68,6 +68,12 @@ extern unsigned int rseq_size; /* Flags used during rseq registration. */ extern unsigned int rseq_flags; +/* + * rseq feature size supported by the kernel. 0 if the registration was + * unsuccessful. + */ +extern unsigned int rseq_feature_size; + enum rseq_mo { RSEQ_MO_RELAXED = 0, RSEQ_MO_CONSUME = 1, /* Unused */ @@ -187,7 +193,7 @@ static inline uint32_t rseq_current_cpu(void) static inline bool rseq_node_id_available(void) { - return (int) rseq_size >= rseq_offsetofend(struct rseq_abi, node_id); + return (int) rseq_feature_size >= rseq_offsetofend(struct rseq_abi, node_id); } /* @@ -201,7 +207,7 @@ static inline uint32_t rseq_current_node_id(void) static inline bool rseq_mm_cid_available(void) { - return (int) rseq_size >= rseq_offsetofend(struct rseq_abi, mm_cid); + return (int) rseq_feature_size >= rseq_offsetofend(struct rseq_abi, mm_cid); } static inline uint32_t rseq_current_mm_cid(void) -- Gitee From fb327a8302daf4bbfb70ca58fc408cfbfd572500 Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Tue, 7 Jan 2025 10:06:06 +0800 Subject: [PATCH 2018/2138] anolis: Revert "perf/x86/intel/uncore: Fix the lack of ch_mask format for SPR" ANBZ: #13058 This reverts commit b10335e85425ee93c4c497671945d23c779281d1. There is no ch_mask on SPR. It should be Extended Umask. The Converter should avoid generating the PortMask and FCMask for SPR/EMR/GNR. Maybe the event description is inaccurate. So revert it. Signed-off-by: Jing Zhang Reviewed-by: Shuai Xue Link: https://gitee.com/anolis/cloud-kernel/pulls/4451 --- arch/x86/events/intel/uncore_snbep.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 23632761b341..dcfabf678807 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -5963,7 +5963,6 @@ static struct attribute *spr_uncore_cha_formats_attr[] = { &format_attr_inv.attr, &format_attr_thresh8.attr, &format_attr_filter_tid5.attr, - &format_attr_ch_mask.attr, NULL, }; static const struct attribute_group spr_uncore_chabox_format_group = { -- Gitee From d6b296c3fbd78bae24c0efa1c0eb0bc4d3ee6136 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 7 Nov 2024 11:39:59 -0800 Subject: [PATCH 2019/2138] KVM: selftests: Don't bother deleting memslots in KVM when freeing VMs ANBZ: #13121 commit 5afe18dfa47daead88517b095b6e0ce012f031f8 upstream When freeing a VM, don't call into KVM to manually remove each memslot, simply cleanup and free any userspace assets associated with the memory region. KVM is ultimately responsible for ensuring kernel resources are freed when the VM is destroyed, deleting memslots one-by-one is unnecessarily slow, and unless a test is already leaking the VM fd, the VM will be destroyed when kvm_vm_release() is called. Not deleting KVM's memslot also allows cleaning up dead VMs without having to care whether or not the to-be-freed VM is dead or alive. Reported-by: Eric Auger Reviewed-by: Eric Auger Tested-by: Eric Auger Reported-by: Mark Brown Signed-off-by: Sean Christopherson Link: https://lore.kernel.org/kvmarm/Zy0bcM0m-N18gAZz@google.com/ Signed-off-by: Oliver Upton [Ruidong: fix conflict with c0d1a] Signed-off-by: Ruidong Tian Reviewed-by: Bin Guo Link: https://gitee.com/anolis/cloud-kernel/pulls/4453 --- tools/testing/selftests/kvm/lib/kvm_util.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 7a8af1821f5d..bc1ef536b640 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -685,9 +685,6 @@ static void __vm_mem_region_delete(struct kvm_vm *vm, hash_del(®ion->slot_node); } - region->region.memory_size = 0; - vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region); - sparsebit_free(®ion->unused_phy_pages); ret = munmap(region->mmap_start, region->mmap_size); TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret)); @@ -1178,7 +1175,12 @@ void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa) */ void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot) { - __vm_mem_region_delete(vm, memslot2region(vm, slot), true); + struct userspace_mem_region *region = memslot2region(vm, slot); + + region->region.memory_size = 0; + vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region); + + __vm_mem_region_delete(vm, region, true); } /* Returns the size of a vCPU's kvm_run structure. */ -- Gitee From 30000a0cc5a4f06283dd61384127603f0ebb4a89 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 3 Apr 2024 16:37:59 +0800 Subject: [PATCH 2020/2138] arm64: mm: cleanup __do_page_fault() ANBZ: #13134 commit 6ea02ee489799317c6640ac014c49b1d1b7124c5 upstream Patch series "arch/mm/fault: accelerate pagefault when badaccess", v2. After VMA lock-based page fault handling enabled, if bad access met under per-vma lock, it will fallback to mmap_lock-based handling, so it leads to unnessary mmap lock and vma find again. A test from lmbench shows 34% improve after this changes on arm64, lat_sig -P 1 prot lat_sig 0.29194 -> 0.19198 This patch (of 7): The __do_page_fault() only calls handle_mm_fault() after vm_flags checked, and it is only called by do_page_fault(), let's squash it into do_page_fault() to cleanup code. Link: https://lkml.kernel.org/r/20240403083805.1818160-1-wangkefeng.wang@huawei.com Link: https://lkml.kernel.org/r/20240403083805.1818160-2-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Reviewed-by: Suren Baghdasaryan Reviewed-by: Catalin Marinas Cc: Albert Ou Cc: Alexander Gordeev Cc: Andy Lutomirski Cc: Christophe Leroy Cc: Dave Hansen Cc: Gerald Schaefer Cc: Michael Ellerman Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Peter Zijlstra Cc: Russell King Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4458 --- arch/arm64/mm/fault.c | 27 +++++++-------------------- 1 file changed, 7 insertions(+), 20 deletions(-) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 0f3983c4769c..e0914f21696d 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -494,25 +494,6 @@ static void do_bad_area(unsigned long far, unsigned long esr, } } -#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000) -#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000) - -static vm_fault_t __do_page_fault(struct mm_struct *mm, - struct vm_area_struct *vma, unsigned long addr, - unsigned int mm_flags, unsigned long vm_flags, - struct pt_regs *regs) -{ - /* - * Ok, we have a good vm_area for this memory access, so we can handle - * it. - * Check that the permissions on the VMA allow for the fault which - * occurred. - */ - if (!(vma->vm_flags & vm_flags)) - return VM_FAULT_BADACCESS; - return handle_mm_fault(vma, addr, mm_flags, regs); -} - static bool is_el0_instruction_abort(unsigned long esr) { return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW; @@ -527,6 +508,9 @@ static bool is_write_abort(unsigned long esr) return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM); } +#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000) +#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000) + static int __kprobes do_page_fault(unsigned long far, unsigned long esr, struct pt_regs *regs) { @@ -623,7 +607,10 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, goto done; } - fault = __do_page_fault(mm, vma, addr, mm_flags, vm_flags, regs); + if (!(vma->vm_flags & vm_flags)) + fault = VM_FAULT_BADACCESS; + else + fault = handle_mm_fault(vma, addr, mm_flags, regs); /* Quick path to respond to signals */ if (fault_signal_pending(fault, regs)) { -- Gitee From 159f43f7a8b562788f91fa2c31bb77eac4ac4d2a Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 3 Apr 2024 16:38:00 +0800 Subject: [PATCH 2021/2138] arm64: mm: accelerate pagefault when VM_FAULT_BADACCESS ANBZ: #13134 commit faab3d0f250aba863b19bb2d72daea0ae90a1d5d upstream The vm_flags of vma already checked under per-VMA lock, if it is a bad access, directly set fault to VM_FAULT_BADACCESS and handle error, no need to retry with mmap_lock again, the latency time reduces 34% in 'lat_sig -P 1 prot lat_sig' from lmbench testcase. Since the page fault is handled under per-VMA lock, count it as a vma lock event with VMA_LOCK_SUCCESS. Link: https://lkml.kernel.org/r/20240403083805.1818160-3-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Reviewed-by: Suren Baghdasaryan Reviewed-by: Catalin Marinas Cc: Albert Ou Cc: Alexander Gordeev Cc: Andy Lutomirski Cc: Christophe Leroy Cc: Dave Hansen Cc: Gerald Schaefer Cc: Michael Ellerman Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Peter Zijlstra Cc: Russell King Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4458 --- arch/arm64/mm/fault.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index e0914f21696d..f8b16fc10b3f 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -580,7 +580,9 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, if (!(vma->vm_flags & vm_flags)) { vma_end_read(vma); - goto lock_mmap; + fault = VM_FAULT_BADACCESS; + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); + goto done; } fault = handle_mm_fault(vma, addr, mm_flags | FAULT_FLAG_VMA_LOCK, regs); if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED))) -- Gitee From e36b5c059b39ef3abddf945e863668409a88669b Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 3 Apr 2024 16:38:02 +0800 Subject: [PATCH 2022/2138] powerpc: mm: accelerate pagefault when badaccess ANBZ: #13134 commit 0cec9541dcc550ce4a710c9a789f5f24e7c1d66d upstream The access_[pkey]_error() of vma already checked under per-VMA lock, if it is a bad access, directly handle error, no need to retry with mmap_lock again. In order to release the correct lock, pass the mm_struct into bad_access_pkey()/bad_access(), if mm is NULL, release vma lock, or release mmap_lock. Since the page faut is handled under per-VMA lock, count it as a vma lock event with VMA_LOCK_SUCCESS. Link: https://lkml.kernel.org/r/20240403083805.1818160-5-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Acked-by: Michael Ellerman (powerpc) Cc: Albert Ou Cc: Alexander Gordeev Cc: Andy Lutomirski Cc: Catalin Marinas Cc: Christophe Leroy Cc: Dave Hansen Cc: Gerald Schaefer Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Peter Zijlstra Cc: Russell King Cc: Suren Baghdasaryan Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4458 --- arch/powerpc/mm/fault.c | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index d3e0f5b3ecc7..554547c4bbad 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -71,23 +71,26 @@ static noinline int bad_area_nosemaphore(struct pt_regs *regs, unsigned long add return __bad_area_nosemaphore(regs, address, SEGV_MAPERR); } -static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code) +static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code, + struct mm_struct *mm, struct vm_area_struct *vma) { - struct mm_struct *mm = current->mm; /* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ - mmap_read_unlock(mm); + if (mm) + mmap_read_unlock(mm); + else + vma_end_read(vma); return __bad_area_nosemaphore(regs, address, si_code); } static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, + struct mm_struct *mm, struct vm_area_struct *vma) { - struct mm_struct *mm = current->mm; int pkey; /* @@ -109,7 +112,10 @@ static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, */ pkey = vma_pkey(vma); - mmap_read_unlock(mm); + if (mm) + mmap_read_unlock(mm); + else + vma_end_read(vma); /* * If we are in kernel mode, bail out with a SEGV, this will @@ -124,9 +130,10 @@ static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, return 0; } -static noinline int bad_access(struct pt_regs *regs, unsigned long address) +static noinline int bad_access(struct pt_regs *regs, unsigned long address, + struct mm_struct *mm, struct vm_area_struct *vma) { - return __bad_area(regs, address, SEGV_ACCERR); + return __bad_area(regs, address, SEGV_ACCERR, mm, vma); } static int do_sigbus(struct pt_regs *regs, unsigned long address, @@ -484,13 +491,13 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address, if (unlikely(access_pkey_error(is_write, is_exec, (error_code & DSISR_KEYFAULT), vma))) { - vma_end_read(vma); - goto lock_mmap; + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); + return bad_access_pkey(regs, address, NULL, vma); } if (unlikely(access_error(is_write, is_exec, vma))) { - vma_end_read(vma); - goto lock_mmap; + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); + return bad_access(regs, address, NULL, vma); } fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs); @@ -524,10 +531,10 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address, if (unlikely(access_pkey_error(is_write, is_exec, (error_code & DSISR_KEYFAULT), vma))) - return bad_access_pkey(regs, address, vma); + return bad_access_pkey(regs, address, mm, vma); if (unlikely(access_error(is_write, is_exec, vma))) - return bad_access(regs, address); + return bad_access(regs, address, mm, vma); /* * If for any reason at all we couldn't handle the fault, -- Gitee From bd822e0d37fd0da39802e4218a19ccac7ff0ab1d Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 3 Apr 2024 16:38:04 +0800 Subject: [PATCH 2023/2138] s390: mm: accelerate pagefault when badaccess ANBZ: #13134 commit 82b7a618397c80736506c05d9add4aaea91297e8 upstream The vm_flags of vma already checked under per-VMA lock, if it is a bad access, directly handle error, no need to retry with mmap_lock again. Since the page faut is handled under per-VMA lock, count it as a vma lock event with VMA_LOCK_SUCCESS. Link: https://lkml.kernel.org/r/20240403083805.1818160-7-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Reviewed-by: Heiko Carstens Cc: Albert Ou Cc: Alexander Gordeev Cc: Andy Lutomirski Cc: Catalin Marinas Cc: Christophe Leroy Cc: Dave Hansen Cc: Gerald Schaefer Cc: Michael Ellerman Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Peter Zijlstra Cc: Russell King Cc: Suren Baghdasaryan Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4458 --- arch/s390/mm/fault.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 1a231181a413..17b483c52815 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -414,7 +414,8 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access) goto lock_mmap; if (!(vma->vm_flags & access)) { vma_end_read(vma); - goto lock_mmap; + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); + return handle_fault_error_nolock(regs, SEGV_ACCERR); } fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs); if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED))) -- Gitee From 1095111c0ef1b9e734ca774809cac5a8407524a3 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 3 Apr 2024 16:38:05 +0800 Subject: [PATCH 2024/2138] x86: mm: accelerate pagefault when badaccess ANBZ: #13134 commit bc7996c864bf58102f640474e04ec5ab04911ac1 upstream The access_error() of vma is already checked under per-VMA lock, if it is a bad access, directly handle error, no need to retry with mmap_lock again. In order to release the correct lock, pass the mm_struct into bad_area_access_error(). If mm is NULL, release vma lock, or release mmap_lock. Since the page faut is handled under per-VMA lock, count it as a vma lock event with VMA_LOCK_SUCCESS. Link: https://lkml.kernel.org/r/20240403083805.1818160-8-wangkefeng.wang@huawei.com Reviewed-by: Suren Baghdasaryan Signed-off-by: Kefeng Wang Cc: Albert Ou Cc: Alexander Gordeev Cc: Andy Lutomirski Cc: Catalin Marinas Cc: Christophe Leroy Cc: Dave Hansen Cc: Gerald Schaefer Cc: Michael Ellerman Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Peter Zijlstra Cc: Russell King Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4458 --- arch/x86/mm/fault.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 6529b3e2cff3..7add6b93a7b4 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -828,14 +828,17 @@ bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, static void __bad_area(struct pt_regs *regs, unsigned long error_code, - unsigned long address, u32 pkey, int si_code) + unsigned long address, struct mm_struct *mm, + struct vm_area_struct *vma, u32 pkey, int si_code) { - struct mm_struct *mm = current->mm; /* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ - mmap_read_unlock(mm); + if (mm) + mmap_read_unlock(mm); + else + vma_end_read(vma); __bad_area_nosemaphore(regs, error_code, address, pkey, si_code); } @@ -859,7 +862,8 @@ static inline bool bad_area_access_from_pkeys(unsigned long error_code, static noinline void bad_area_access_error(struct pt_regs *regs, unsigned long error_code, - unsigned long address, struct vm_area_struct *vma) + unsigned long address, struct mm_struct *mm, + struct vm_area_struct *vma) { /* * This OSPKE check is not strictly necessary at runtime. @@ -889,9 +893,9 @@ bad_area_access_error(struct pt_regs *regs, unsigned long error_code, */ u32 pkey = vma_pkey(vma); - __bad_area(regs, error_code, address, pkey, SEGV_PKUERR); + __bad_area(regs, error_code, address, mm, vma, pkey, SEGV_PKUERR); } else { - __bad_area(regs, error_code, address, 0, SEGV_ACCERR); + __bad_area(regs, error_code, address, mm, vma, 0, SEGV_ACCERR); } } @@ -1318,8 +1322,9 @@ void do_user_addr_fault(struct pt_regs *regs, goto lock_mmap; if (unlikely(access_error(error_code, vma))) { - vma_end_read(vma); - goto lock_mmap; + bad_area_access_error(regs, error_code, address, NULL, vma); + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); + return; } fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs); if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED))) @@ -1353,7 +1358,7 @@ void do_user_addr_fault(struct pt_regs *regs, * we can handle it.. */ if (unlikely(access_error(error_code, vma))) { - bad_area_access_error(regs, error_code, address, vma); + bad_area_access_error(regs, error_code, address, mm, vma); return; } -- Gitee From d333ce4bc8c01bd28924fbf41697330703ac44eb Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Fri, 10 Jan 2025 09:19:16 +0800 Subject: [PATCH 2025/2138] anolis: LoongArch: fix compile error when enable CONFIG_PARAVIRT ANBZ: #13368 Delete duplicate function definitions Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/4471 Reviewed-by: Juxin Gao --- arch/loongarch/kernel/paravirt.c | 327 +++---------------------------- 1 file changed, 30 insertions(+), 297 deletions(-) diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c index 92e37d0e6b22..9cc27c3feb69 100644 --- a/arch/loongarch/kernel/paravirt.c +++ b/arch/loongarch/kernel/paravirt.c @@ -4,9 +4,9 @@ #include #include #include -#include #include #include +#include static int has_steal_clock; struct static_key paravirt_steal_enabled; @@ -21,85 +21,6 @@ static u64 native_steal_clock(int cpu) DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock); -static bool steal_acc = true; -static int __init parse_no_stealacc(char *arg) -{ - steal_acc = false; - return 0; -} -early_param("no-steal-acc", parse_no_stealacc); - -static u64 para_steal_clock(int cpu) -{ - u64 steal; - struct kvm_steal_time *src; - int version; - - src = &per_cpu(steal_time, cpu); - do { - - version = src->version; - /* Make sure that the version is read before the steal */ - virt_rmb(); - steal = src->steal; - /* Make sure that the steal is read before the next version */ - virt_rmb(); - - } while ((version & 1) || (version != src->version)); - return steal; -} - -static int pv_register_steal_time(void) -{ - int cpu = smp_processor_id(); - struct kvm_steal_time *st; - unsigned long addr; - - if (!has_steal_clock) - return -EPERM; - - st = &per_cpu(steal_time, cpu); - addr = per_cpu_ptr_to_phys(st); - - /* The whole structure kvm_steal_time should be one page */ - if (PFN_DOWN(addr) != PFN_DOWN(addr + sizeof(*st))) { - pr_warn("Illegal PV steal time addr %lx\n", addr); - return -EFAULT; - } - - addr |= KVM_STEAL_PHYS_VALID; - kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, addr); - return 0; -} - -static bool steal_acc = true; - -static int __init parse_no_stealacc(char *arg) -{ - steal_acc = false; - return 0; -} -early_param("no-steal-acc", parse_no_stealacc); - -static u64 paravt_steal_clock(int cpu) -{ - int version; - u64 steal; - struct kvm_steal_time *src; - - src = &per_cpu(steal_time, cpu); - do { - - version = src->version; - virt_rmb(); /* Make sure that the version is read before the steal */ - steal = src->steal; - virt_rmb(); /* Make sure that the steal is read before the next version */ - - } while ((version & 1) || (version != src->version)); - - return steal; -} - static bool steal_acc = true; static int __init parse_no_stealacc(char *arg) @@ -131,7 +52,7 @@ static u64 paravt_steal_clock(int cpu) #ifdef CONFIG_SMP static void pv_send_ipi_single(int cpu, unsigned int action) { - unsigned int min, old; + int min, old; irq_cpustat_t *info = &per_cpu(irq_stat, cpu); old = atomic_fetch_or(BIT(action), &info->message); @@ -139,13 +60,14 @@ static void pv_send_ipi_single(int cpu, unsigned int action) return; min = cpu_logical_map(cpu); - kvm_hypercall3(KVM_HCALL_FUNC_PV_IPI, 1, 0, min); + kvm_hypercall3(KVM_HCALL_FUNC_IPI, 1, 0, min); } -#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG) +#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG) + static void pv_send_ipi_mask(const struct cpumask *mask, unsigned int action) { - unsigned int cpu, i, min = 0, max = 0, old; + int i, cpu, min = 0, max = 0, old; __uint128_t bitmap = 0; irq_cpustat_t *info; @@ -162,20 +84,20 @@ static void pv_send_ipi_mask(const struct cpumask *mask, unsigned int action) cpu = cpu_logical_map(i); if (!bitmap) { min = max = cpu; - } else if (cpu > min && cpu < min + KVM_IPI_CLUSTER_SIZE) { - max = cpu > max ? cpu : max; - } else if (cpu < min && (max - cpu) < KVM_IPI_CLUSTER_SIZE) { + } else if (cpu < min && cpu > (max - KVM_IPI_CLUSTER_SIZE)) { + /* cpu < min, and bitmap still enough */ bitmap <<= min - cpu; min = cpu; + } else if (cpu > min && cpu < (min + KVM_IPI_CLUSTER_SIZE)) { + /* cpu > min, and bitmap still enough */ + max = cpu > max ? cpu : max; } else { /* - * Physical cpuid is sorted in ascending order ascend - * for the next mask calculation, send IPI here - * directly and skip the remainding cpus + * With cpu, bitmap will exceed KVM_IPI_CLUSTER_SIZE, + * send IPI here directly and skip the remaining CPUs. */ - kvm_hypercall3(KVM_HCALL_FUNC_PV_IPI, - (unsigned long)bitmap, - (unsigned long)(bitmap >> BITS_PER_LONG), min); + kvm_hypercall3(KVM_HCALL_FUNC_IPI, (unsigned long)bitmap, + (unsigned long)(bitmap >> BITS_PER_LONG), min); min = max = cpu; bitmap = 0; } @@ -183,76 +105,51 @@ static void pv_send_ipi_mask(const struct cpumask *mask, unsigned int action) } if (bitmap) - kvm_hypercall3(KVM_HCALL_FUNC_PV_IPI, (unsigned long)bitmap, + kvm_hypercall3(KVM_HCALL_FUNC_IPI, (unsigned long)bitmap, (unsigned long)(bitmap >> BITS_PER_LONG), min); } -static irqreturn_t loongson_do_swi(int irq, void *dev) +static irqreturn_t pv_ipi_interrupt(int irq, void *dev) { + u32 action; irq_cpustat_t *info; - long action; - /* Clear swi interrupt */ + /* Clear SWI interrupt */ clear_csr_estat(1 << INT_SWI0); info = this_cpu_ptr(&irq_stat); action = atomic_xchg(&info->message, 0); - if (action & SMP_CALL_FUNCTION) { - generic_smp_call_function_interrupt(); - info->ipi_irqs[IPI_CALL_FUNCTION]++; - } if (action & SMP_RESCHEDULE) { scheduler_ipi(); info->ipi_irqs[IPI_RESCHEDULE]++; } + if (action & SMP_CALL_FUNCTION) { + generic_smp_call_function_interrupt(); + info->ipi_irqs[IPI_CALL_FUNCTION]++; + } + return IRQ_HANDLED; } static void pv_init_ipi(void) { - int r, swi0; + int r, swi; - swi0 = get_percpu_irq(INT_SWI0); - if (swi0 < 0) + swi = get_percpu_irq(INT_SWI0); + if (swi < 0) panic("SWI0 IRQ mapping failed\n"); - irq_set_percpu_devid(swi0); - r = request_percpu_irq(swi0, loongson_do_swi, "SWI0", &irq_stat); + irq_set_percpu_devid(swi); + r = request_percpu_irq(swi, pv_ipi_interrupt, "SWI0-IPI", &irq_stat); if (r < 0) panic("SWI0 IRQ request failed\n"); } - -static void pv_disable_steal_time(void) -{ - if (has_steal_clock) - kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, 0); -} - -static int pv_cpu_online(unsigned int cpu) -{ - unsigned long flags; - - local_irq_save(flags); - pv_register_steal_time(); - local_irq_restore(flags); - return 0; -} - -static int pv_cpu_down_prepare(unsigned int cpu) -{ - unsigned long flags; - - local_irq_save(flags); - pv_disable_steal_time(); - local_irq_restore(flags); - return 0; -} #endif bool kvm_para_available(void) { - static int hypervisor_type; int config; + static int hypervisor_type; if (!cpu_has_hypervisor) return false; @@ -293,56 +190,6 @@ int __init pv_ipi_init(void) return 0; } -static void pv_cpu_reboot(void *unused) -{ - pv_disable_steal_time(); -} - -static int pv_reboot_notify(struct notifier_block *nb, unsigned long code, - void *unused) -{ - on_each_cpu(pv_cpu_reboot, NULL, 1); - return NOTIFY_DONE; -} - -static struct notifier_block pv_reboot_nb = { - .notifier_call = pv_reboot_notify, -}; - -int __init pv_time_init(void) -{ - int feature; - - if (!cpu_has_hypervisor) - return 0; - if (!kvm_para_available()) - return 0; - - feature = read_cpucfg(CPUCFG_KVM_FEATURE); - if (!(feature & KVM_FEATURE_STEAL_TIME)) - return 0; - - has_steal_clock = 1; - if (pv_register_steal_time()) { - has_steal_clock = 0; - return 0; - } - - register_reboot_notifier(&pv_reboot_nb); - static_call_update(pv_steal_clock, para_steal_clock); - static_key_slow_inc(¶virt_steal_enabled); - if (steal_acc) - static_key_slow_inc(¶virt_steal_rq_enabled); - -#ifdef CONFIG_SMP - if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "loongarch/pv:online", - pv_cpu_online, pv_cpu_down_prepare) < 0) - pr_err("Failed to install cpu hotplug callbacks\n"); -#endif - pr_info("Using stolen time PV\n"); - return 0; -} - static int pv_enable_steal_time(void) { int cpu = smp_processor_id(); @@ -451,120 +298,6 @@ int __init pv_time_init(void) return 0; } -static int pv_enable_steal_time(void) -{ - int cpu = smp_processor_id(); - unsigned long addr; - struct kvm_steal_time *st; - - if (!has_steal_clock) - return -EPERM; - - st = &per_cpu(steal_time, cpu); - addr = per_cpu_ptr_to_phys(st); - - /* The whole structure kvm_steal_time should be in one page */ - if (PFN_DOWN(addr) != PFN_DOWN(addr + sizeof(*st))) { - pr_warn("Illegal PV steal time addr %lx\n", addr); - return -EFAULT; - } - - addr |= KVM_STEAL_PHYS_VALID; - kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, addr); - - return 0; -} - -static void pv_disable_steal_time(void) -{ - if (has_steal_clock) - kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, 0); -} - -#ifdef CONFIG_SMP -static int pv_time_cpu_online(unsigned int cpu) -{ - unsigned long flags; - - local_irq_save(flags); - pv_enable_steal_time(); - local_irq_restore(flags); - - return 0; -} - -static int pv_time_cpu_down_prepare(unsigned int cpu) -{ - unsigned long flags; - - local_irq_save(flags); - pv_disable_steal_time(); - local_irq_restore(flags); - - return 0; -} -#endif - -static void pv_cpu_reboot(void *unused) -{ - pv_disable_steal_time(); -} - -static int pv_reboot_notify(struct notifier_block *nb, unsigned long code, void *unused) -{ - on_each_cpu(pv_cpu_reboot, NULL, 1); - return NOTIFY_DONE; -} - -static struct notifier_block pv_reboot_nb = { - .notifier_call = pv_reboot_notify, -}; - -int __init pv_time_init(void) -{ - int r, feature; - - if (!cpu_has_hypervisor) - return 0; - if (!kvm_para_available()) - return 0; - - feature = read_cpucfg(CPUCFG_KVM_FEATURE); - if (!(feature & KVM_FEATURE_STEAL_TIME)) - return 0; - - has_steal_clock = 1; - r = pv_enable_steal_time(); - if (r < 0) { - has_steal_clock = 0; - return 0; - } - register_reboot_notifier(&pv_reboot_nb); - -#ifdef CONFIG_SMP - r = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, - "loongarch/pv_time:online", - pv_time_cpu_online, pv_time_cpu_down_prepare); - if (r < 0) { - has_steal_clock = 0; - pr_err("Failed to install cpu hotplug callbacks\n"); - return r; - } -#endif - - static_call_update(pv_steal_clock, paravt_steal_clock); - - static_key_slow_inc(¶virt_steal_enabled); -#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING - if (steal_acc) - static_key_slow_inc(¶virt_steal_rq_enabled); -#endif - - pr_info("Using paravirt steal-time\n"); - - return 0; -} - int __init pv_spinlock_init(void) { if (!cpu_has_hypervisor) -- Gitee From c4c41ba482b557d36d10bf051367dcf40e9a2f4c Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Thu, 9 Jan 2025 16:58:04 +0800 Subject: [PATCH 2026/2138] anolis: LoongArch: KVM: enable ptw for kvm ANBZ: #13344 Add the ptw feature bit to cpucfg Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/4463 Reviewed-by: Juxin Gao --- arch/loongarch/kvm/exit.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index 19cb22da35de..5579ee8afedc 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -44,6 +44,8 @@ static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst) switch (index) { case 0 ... (KVM_MAX_CPUCFG_REGS - 1): vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index]; + if (cpu_has_ptw && (index == LOONGARCH_CPUCFG2)) + vcpu->arch.gprs[rd] |= CPUCFG2_PTW; break; case CPUCFG_KVM_SIG: /* CPUCFG emulation between 0x40000000 -- 0x400000ff */ -- Gitee From c5cadf75e5d56de40faa44e86c4c4afbcb30b3b4 Mon Sep 17 00:00:00 2001 From: Xianglai Li Date: Fri, 27 Dec 2024 18:57:42 +0800 Subject: [PATCH 2027/2138] anolis: driver/iommu: Fixed multiple vfio devices not working properly ANBZ: #13051 Fixed 16 vfio devices cannot be pass-through to VMS. Signed-off-by: Xianglai Li Link: https://gitee.com/anolis/cloud-kernel/pulls/4404 Reviewed-by: Juxin Gao --- drivers/iommu/loongarch_iommu.c | 9 ++++++++- drivers/iommu/loongarch_iommu.h | 2 +- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/loongarch_iommu.c b/drivers/iommu/loongarch_iommu.c index b158467918ba..490d68f4a913 100644 --- a/drivers/iommu/loongarch_iommu.c +++ b/drivers/iommu/loongarch_iommu.c @@ -441,8 +441,10 @@ static int domain_id_alloc(struct loongarch_iommu *iommu) if (id < MAX_DOMAIN_ID) __set_bit(id, iommu->domain_bitmap); spin_unlock(&iommu->domain_bitmap_lock); - if (id >= MAX_DOMAIN_ID) + if (id >= MAX_DOMAIN_ID) { + id = -1; pr_err("LA-IOMMU: Alloc domain id over max domain id\n"); + } return id; } @@ -593,6 +595,7 @@ static struct iommu_domain *la_iommu_domain_alloc(unsigned int type) struct dom_info *info; switch (type) { + case IOMMU_DOMAIN_BLOCKED: case IOMMU_DOMAIN_UNMANAGED: info = alloc_dom_info(); if (info == NULL) @@ -830,6 +833,9 @@ static int la_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) struct iommu_info *info; unsigned short bdf; + if (domain != NULL && domain->type == IOMMU_DOMAIN_BLOCKED) + return 0; + la_iommu_detach_dev(dev); if (domain == NULL) @@ -914,6 +920,7 @@ static void la_iommu_detach_dev(struct device *dev) spin_lock(&iommu_entry->devlock); do_detach(dev_data); spin_unlock(&iommu_entry->devlock); + dev_data->domain = NULL; pci_info(pdev, "%s iommu devid %x sigment %x\n", __func__, iommu->devid, iommu->segment); diff --git a/drivers/iommu/loongarch_iommu.h b/drivers/iommu/loongarch_iommu.h index cf5640d95900..a411d2b34d01 100644 --- a/drivers/iommu/loongarch_iommu.h +++ b/drivers/iommu/loongarch_iommu.h @@ -169,7 +169,7 @@ struct dom_entry { struct la_iommu_dev_data { struct list_head list; /* for iommu_entry->dev_list */ struct loongarch_iommu *iommu; - struct iommu_info *iommu_entry; + struct iommu_info *iommu_entry; struct iommu_domain *domain; struct device *dev; unsigned short bdf; -- Gitee From 99357d9e4e02754c6949440ac58ea68964403ffd Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Fri, 10 Jan 2025 13:42:04 +0800 Subject: [PATCH 2028/2138] anolis: configs: add the kconfig specifications in markdown format ANBZ: #8598 This specifications has been split and stored in anolis/configs/examination/ according to their the architectures, this patch imports a whole one in markdown format. Signed-off-by: Qiao Ma Reviewed-by: Jacob Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4473 --- .../specification/KCONFIG_specification.md | 641 ++++++++++++++++++ 1 file changed, 641 insertions(+) create mode 100644 anolis/configs/specification/KCONFIG_specification.md diff --git a/anolis/configs/specification/KCONFIG_specification.md b/anolis/configs/specification/KCONFIG_specification.md new file mode 100644 index 000000000000..8082348d6c05 --- /dev/null +++ b/anolis/configs/specification/KCONFIG_specification.md @@ -0,0 +1,641 @@ +# Anolis 23 KCONFIG规范列表 + +【说明】 + +- 应:该选项属于强制要求 +- 宜:该选项属于建议 +- N/A:该选项不做要求 + +| 编号 | kconfig名称 | x86 | arm | loongarch | sw64 | x86 | arm | loongarch | sw64 | 备注 | +| ---- | --------------------------------------- | ------ | ------ | -------------- | ------ | ----------------------------------- | ----------------------------------- | ------------------------------------------------------ | -------------------------------------------- | -------------------------------- | +| 1. | CONFIG_CPU_ISOLATION | 应 | 应 | 应 | 应 | y | y | y | y | | +| 2. | CONFIG_MEMCG | 应 | 应 | 应 | 应 | y | y | y | y | | +| 3. | CONFIG_FAIR_GROUP_SCHED | 应 | 应 | 应 | 应 | y | y | y | y | | +| 4. | CONFIG_CFS_BANDWIDTH | 应 | 应 | 应 | 应 | y | y | y | y | | +| 5. | CONFIG_CPUSETS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 6. | CONFIG_UTS_NS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 7. | CONFIG_IPC_NS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 8. | CONFIG_PID_NS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 9. | CONFIG_NET_NS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 10. | CONFIG_BLK_DEV_INITRD | 应 | 应 | 应 | 应 | y | y | y | y | | +| 11. | CONFIG_SYSCTL | 应 | 应 | 应 | 应 | y | y | y | y | | +| 12. | CONFIG_MULTIUSER | 应 | 应 | 应 | 应 | y | y | y | y | | +| 13. | CONFIG_POSIX_TIMERS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 14. | CONFIG_PRINTK | 应 | 应 | 应 | 应 | y | y | y | y | | +| 15. | CONFIG_BUG | 应 | 应 | 应 | 应 | y | y | y | y | | +| 16. | CONFIG_ELF_CORE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 17. | CONFIG_FUTEX | 应 | 应 | 应 | 应 | y | y | y | y | | +| 18. | CONFIG_EPOLL | 应 | 应 | 应 | 应 | y | y | y | y | | +| 19. | CONFIG_SHMEM | 应 | 应 | 应 | 应 | y | y | y | y | | +| 20. | CONFIG_AIO | 应 | 应 | 应 | 应 | y | y | y | y | | +| 21. | CONFIG_IO_URING | 应 | 应 | 应 | 应 | y | y | y | y | | +| 22. | CONFIG_KALLSYMS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 23. | CONFIG_KALLSYMS_ALL | 应 | 应 | 应 | 应 | y | y | y | y | | +| 24. | CONFIG_RCU_STALL_COMMON | 应 | 应 | 应 | 应 | y | y | y | y | | +| 25. | CONFIG_NUMA_BALANCING | 应 | 应 | 应 | 应 | y | y | y | y | | +| 26. | CONFIG_SYSFS_SYSCALL | 应 | 应 | 应 | 应 | y | y | y | y | | +| 27. | CONFIG_FHANDLE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 28. | CONFIG_SIGNALFD | 应 | 应 | 应 | 应 | y | y | y | y | | +| 29. | CONFIG_EVENTFD | 应 | 应 | 应 | 应 | y | y | y | y | | +| 30. | CONFIG_TREE_RCU | 应 | 应 | 应 | 应 | y | y | y | y | | +| 31. | CONFIG_ADVISE_SYSCALLS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 32. | CONFIG_PSI | 应 | 应 | 应 | 应 | y | y | y | y | | +| 33. | CONFIG_USER_NS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 34. | CONFIG_TIMERFD | 应 | 应 | 应 | 应 | y | y | y | y | | +| 35. | CONFIG_PAGE_COUNTER | 应 | 应 | 应 | 应 | y | y | y | y | | +| 36. | CONFIG_MEMBARRIER | 应 | 应 | 应 | 应 | y | y | y | y | | +| 37. | CONFIG_NR_CPUS | 应 | 应 | 应 | 应 | [1024,8192] | [1024,8192] | 256 | 512 | | +| 38. | CONFIG_64BIT | 应 | 应 | 应 | 应 | y | y | y | y | | +| 39. | CONFIG_MMU | 应 | 应 | 应 | 应 | y | y | y | y | | +| 40. | CONFIG_SMP | 应 | 应 | 应 | 应 | y | y | y | y | | +| 41. | CONFIG_PARAVIRT | 应 | 应 | N/A | N/A | y | y | N/A | N/A | | +| 42. | CONFIG_DMI | 应 | 应 | 应 | 应 | y | y | y | y | | +| 43. | CONFIG_SCHED_MC | 应 | 应 | N/A | N/A | y | y | N/A | N/A | | +| 44. | CONFIG_NUMA | 应 | 应 | 应 | 应 | y | y | y | y | | +| 45. | CONFIG_RELOCATABLE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 46. | CONFIG_SCHED_SMT | 应 | 应 | 应 | 应 | y | y | y | y | | +| 47. | CONFIG_GENERIC_BUG | 应 | 应 | 应 | N/A | y | y | y | N/A | | +| 48. | CONFIG_PERF_EVENTS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 49. | CONFIG_TRACEPOINTS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 50. | CONFIG_KEXEC | 应 | 应 | 应 | 应 | y | y | y | y | | +| 51. | CONFIG_KEXEC_FILE | 应 | 应 | N/A | N/A | y | y | N/A | N/A | | +| 52. | CONFIG_CRASH_DUMP | 应 | 应 | 应 | N/A | y | y | y | N/A | | +| 53. | CONFIG_DEBUG_KERNEL | 应 | 应 | 应 | 应 | y | y | y | y | | +| 54. | CONFIG_MAGIC_SYSRQ | 应 | 应 | 应 | 应 | y | y | y | y | | +| 55. | CONFIG_DEBUG_FS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 56. | CONFIG_PANIC_ON_OOPS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 57. | CONFIG_LOCKUP_DETECTOR | 应 | 应 | 应 | N/A | y | y | y | N/A | | +| 58. | CONFIG_SOFTLOCKUP_DETECTOR | 应 | 应 | 应 | 应 | y | y | y | y | | +| 59. | CONFIG_HARDLOCKUP_DETECTOR | 应 | 应 | 应 | 应 | y | y | y | y | | +| 60. | CONFIG_DETECT_HUNG_TASK | 应 | 应 | 应 | 应 | y | y | y | y | | +| 61. | CONFIG_STACKTRACE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 62. | CONFIG_FTRACE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 63. | CONFIG_DYNAMIC_FTRACE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 64. | CONFIG_FTRACE_SYSCALLS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 65. | CONFIG_BLK_DEV_IO_TRACE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 66. | CONFIG_DEBUG_INFO | 应 | 应 | 应 | 应 | y | y | y | y | | +| 67. | CONFIG_DEBUG_INFO_BTF | 应 | 应 | 应 | N/A | y | y | y | N/A | | +| 68. | CONFIG_CRASH_CORE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 69. | CONFIG_KEXEC_CORE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 70. | CONFIG_PVPANIC | 应 | 应 | 应 | 应 | y | y | y | y | | +| 71. | CONFIG_BPF | 应 | 应 | 应 | 应 | y | y | y | y | | +| 72. | CONFIG_BPF_SYSCALL | 应 | 应 | 应 | 应 | y | y | y | y | | +| 73. | CONFIG_BPF_JIT | 应 | 应 | 应 | 应 | y | y | y | y | | +| 75. | CONFIG_LSM | 应 | 应 | 应 | 应 | lockdown,yama,integrity,selinux,bpf | lockdown,yama,integrity,selinux,bpf | landlock,lockdown,yama,loadpin,safesetid,integrity,bpf | landlock,lockdown,yama,loadpin,safesetid,bpf | | +| 76. | CONFIG_CRYPTO_SM4 | 应 | 应 | 应 | 应 | m | m | y | y | | +| 77. | CONFIG_CRYPTO_SM4_GENERIC | 应 | 应 | 应 | 应 | m | m | y | y | | +| 78. | CONFIG_SECURITY | 应 | 应 | 应 | 应 | y | y | y | y | | +| 79. | CONFIG_SECURITYFS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 80. | CONFIG_SECURITY_NETWORK | 应 | 应 | 应 | 应 | y | y | y | y | | +| 81. | CONFIG_SECURITY_PATH | 应 | 应 | 应 | 应 | y | y | y | y | | +| 82. | CONFIG_SECURITY_SELINUX | 应 | 应 | 应 | 应 | y | y | y | y | | +| 83. | CONFIG_INTEGRITY | 应 | 应 | 应 | 应 | y | y | y | y | | +| 84. | CONFIG_IMA | 应 | 应 | 应 | 应 | y | y | y | y | | +| 85. | CONFIG_EVM | 应 | 应 | 应 | 应 | y | y | y | y | | +| 86. | CONFIG_CRYPTO | 应 | 应 | 应 | 应 | y | y | y | y | | +| 87. | CONFIG_CRYPTO_ALGAPI | 应 | 应 | 应 | 应 | y | y | y | y | | +| 88. | CONFIG_CRYPTO_ALGAPI2 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 89. | CONFIG_CRYPTO_AEAD | 应 | 应 | 应 | 应 | y | y | y | y | | +| 90. | CONFIG_CRYPTO_AEAD2 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 91. | CONFIG_CRYPTO_SKCIPHER | 应 | 应 | 应 | 应 | y | y | y | y | | +| 92. | CONFIG_CRYPTO_SKCIPHER2 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 93. | CONFIG_CRYPTO_HASH | 应 | 应 | 应 | 应 | y | y | y | y | | +| 94. | CONFIG_CRYPTO_HASH2 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 95. | CONFIG_CRYPTO_RNG | 应 | 应 | 应 | 应 | y | y | y | y | | +| 96. | CONFIG_CRYPTO_RNG2 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 97. | CONFIG_CRYPTO_AKCIPHER2 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 98. | CONFIG_CRYPTO_AKCIPHER | 应 | 应 | 应 | 应 | y | y | y | y | | +| 99. | CONFIG_CRYPTO_MANAGER | 应 | 应 | 应 | 应 | y | y | y | y | | +| 100. | CONFIG_CRYPTO_MANAGER2 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 101. | CONFIG_CRYPTO_RSA | 应 | 应 | 应 | 应 | y | y | y | y | | +| 102. | CONFIG_CRYPTO_SM2 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 103. | CONFIG_CRYPTO_AES | 应 | 应 | 应 | 应 | y | y | y | y | | +| 104. | CONFIG_CRYPTO_GCM | 应 | 应 | 应 | 应 | y | y | y | y | | +| 105. | CONFIG_CRYPTO_GHASH | 应 | 应 | 应 | 应 | y | y | y | y | | +| 106. | CONFIG_CRYPTO_SHA256 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 107. | CONFIG_CRYPTO_SM3 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 108. | CONFIG_CRYPTO_SM3_GENERIC | 应 | 应 | 应 | 应 | y | y | y | y | | +| 109. | CONFIG_ASYMMETRIC_KEY_TYPE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 110. | CONFIG_CRYPTO_LIB_AES | 应 | 应 | 应 | 应 | y | y | y | y | | +| 111. | CONFIG_CRYPTO_LIB_SHA256 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 112. | CONFIG_KEYS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 113. | CONFIG_SYSTEM_TRUSTED_KEYRING | 应 | 应 | 应 | 应 | y | y | y | y | | +| 114. | CONFIG_TRUSTED_KEYS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 115. | CONFIG_BLOCK | 应 | 应 | 应 | 应 | y | y | y | y | | +| 116. | CONFIG_IOSCHED_BFQ | 应 | 应 | 应 | 应 | y | y | y | y | | +| 117. | CONFIG_BLK_MQ_PCI | 应 | 应 | 应 | 应 | y | y | y | y | | +| 118. | CONFIG_BLK_MQ_VIRTIO | 应 | 应 | 应 | 应 | y | y | y | y | | +| 119. | CONFIG_MQ_IOSCHED_DEADLINE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 120. | CONFIG_FREEZER | 应 | 应 | 应 | 应 | y | y | y | y | | +| 121. | CONFIG_ACPI_IPMI | 应 | 应 | 应 | 应 | m | m | m | m | | +| 122. | CONFIG_PM | 应 | 应 | 应 | 应 | y | y | y | y | | +| 123. | CONFIG_ACPI | 应 | 应 | 应 | 应 | y | y | y | y | | +| 124. | CONFIG_ACPI_PROCESSOR | 应 | 应 | 应 | N/A | y | y | y | N/A | | +| 125. | CONFIG_ACPI_NUMA | 应 | 应 | 应 | 应 | y | y | y | y | | +| 126. | CONFIG_CPU_FREQ | 应 | 应 | 应 | 应 | y | y | y | y | | +| 127. | CONFIG_CPU_IDLE | 应 | 应 | N/A | N/A | y | y | N/A | N/A | | +| 128. | CONFIG_ACPI_APEI_PCIEAER | 应 | 应 | N/A | N/A | y | y | N/A | N/A | | +| 130. | CONFIG_ACPI_PCI_SLOT | 应 | 应 | 应 | 应 | y | y | y | y | | +| 131. | CONFIG_SYSVIPC | 应 | 应 | 应 | 应 | y | y | y | y | | +| 132. | CONFIG_AUDIT | 应 | 应 | 应 | 应 | y | y | y | y | | +| 133. | CONFIG_HOTPLUG_CPU | 应 | 应 | 应 | 应 | y | y | y | y | | +| 134. | CONFIG_KPROBES | 应 | 应 | 应 | 应 | y | y | y | y | | +| 135. | CONFIG_SECCOMP | 应 | 应 | 应 | 应 | y | y | y | y | | +| 136. | CONFIG_MODULES | 应 | 应 | 应 | 应 | y | y | y | y | | +| 137. | CONFIG_MODULE_UNLOAD | 应 | 应 | 应 | 应 | y | y | y | y | | +| 138. | CONFIG_MODULE_SIG | 应 | 应 | 应 | 应 | y | y | y | y | | +| 139. | CONFIG_BINFMT_ELF | 应 | 应 | 应 | 应 | y | y | y | y | | +| 140. | CONFIG_BINFMT_SCRIPT | 应 | 应 | 应 | 应 | y | y | y | y | | +| 141. | CONFIG_COREDUMP | 应 | 应 | 应 | 应 | y | y | y | y | | +| 142. | CONFIG_SYSVIPC_SYSCTL | 应 | 应 | 应 | 应 | y | y | y | y | | +| 143. | CONFIG_UPROBES | 应 | 应 | 应 | 应 | y | y | y | y | | +| 144. | CONFIG_KRETPROBES | 应 | 应 | 应 | 应 | y | y | y | y | | +| 145. | CONFIG_STACKPROTECTOR | 应 | 应 | 应 | N/A | y | y | y | N/A | | +| 146. | CONFIG_ELFCORE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 147. | CONFIG_POSIX_MQUEUE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 148. | CONFIG_POSIX_MQUEUE_SYSCTL | 应 | 应 | 应 | 应 | y | y | y | y | | +| 149. | CONFIG_THREAD_INFO_IN_TASK | 应 | 应 | N/A | N/A | y | y | N/A | N/A | | +| 150. | CONFIG_VMAP_STACK | 应 | 应 | N/A | N/A | y | y | N/A | N/A | | +| 151. | CONFIG_MODVERSIONS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 152. | CONFIG_SWAP | 应 | 应 | 应 | 应 | y | y | y | y | | +| 153. | CONFIG_SLUB | 应 | 应 | 应 | 应 | y | y | y | y | | +| 154. | CONFIG_SPARSEMEM | 应 | 应 | 应 | 应 | y | y | y | y | | +| 155. | CONFIG_MEMORY_HOTPLUG | 应 | 应 | 应 | 应 | y | y | y | y | | +| 156. | CONFIG_COMPACTION | 应 | 应 | 应 | 应 | y | y | y | y | | +| 157. | CONFIG_MIGRATION | 应 | 应 | 应 | 应 | y | y | y | y | | +| 158. | CONFIG_TRANSPARENT_HUGEPAGE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 159. | CONFIG_ZONE_DMA | 应 | 应 | N/A | N/A | y | y | N/A | N/A | | +| 160. | CONFIG_ZONE_DMA32 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 161. | CONFIG_USERFAULTFD | 应 | 应 | 应 | 应 | y | y | y | y | | +| 162. | CONFIG_MEMFD_CREATE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 163. | CONFIG_VM_EVENT_COUNTERS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 164. | CONFIG_EFI | 应 | 应 | 应 | 应 | y | y | y | y | | +| 165. | CONFIG_EFI_STUB | 应 | 应 | 应 | N/A | y | y | y | N/A | | +| 166. | CONFIG_TLS | 应 | 应 | 应 | 应 | m | m | m | m | | +| 167. | CONFIG_SMC | 应 | 应 | 应 | 应 | m | m | m | m | | +| 168. | CONFIG_INET_DIAG | 应 | 应 | 应 | 应 | m | m | m | m | | +| 169. | CONFIG_NF_CONNTRACK | 应 | 应 | 应 | 应 | m | m | m | m | | +| 170. | CONFIG_NF_NAT | 应 | 应 | 应 | 应 | m | m | m | m | | +| 171. | CONFIG_NF_TABLES | 应 | 应 | 应 | 应 | m | m | m | m | | +| 172. | CONFIG_IP_SET | 应 | 应 | 应 | 应 | m | m | m | m | | +| 173. | CONFIG_IP_VS | 应 | 应 | 应 | 应 | m | m | m | m | | +| 174. | CONFIG_IP_NF_RAW | 应 | 应 | 应 | 应 | m | m | m | m | | +| 175. | CONFIG_IP_NF_SECURITY | 应 | 应 | 应 | 应 | m | m | m | m | | +| 176. | CONFIG_IP_NF_ARPTABLES | 应 | 应 | 应 | 应 | m | m | m | m | | +| 177. | CONFIG_BRIDGE | 应 | 应 | 应 | 应 | m | m | m | m | | +| 178. | CONFIG_NET_SCH_INGRESS | 应 | 应 | 应 | 应 | m | m | m | m | | +| 179. | CONFIG_DNS_RESOLVER | 应 | 应 | 应 | 应 | m | m | y | m | | +| 180. | CONFIG_VSOCKETS | 应 | 应 | 应 | 应 | m | m | m | m | | +| 181. | CONFIG_NETLINK_DIAG | 应 | 应 | 应 | 应 | m | m | m | m | | +| 182. | CONFIG_VLAN_8021Q | 应 | 应 | 应 | 应 | m | m | m | m | | +| 183. | CONFIG_INET_TCP_DIAG | 应 | 应 | 应 | 应 | m | m | m | m | | +| 184. | CONFIG_INET_UDP_DIAG | 应 | 应 | 应 | 应 | m | m | m | m | | +| 185. | CONFIG_PACKET_DIAG | 应 | 应 | 应 | 应 | m | m | m | m | | +| 186. | CONFIG_NETFILTER_XT_MARK | 应 | 应 | 应 | 应 | m | m | m | m | | +| 187. | CONFIG_IP_NF_IPTABLES | 应 | 应 | 应 | 应 | m | m | m | m | | +| 188. | CONFIG_NET | 应 | 应 | 应 | 应 | y | y | y | y | | +| 189. | CONFIG_PACKET | 应 | 应 | 应 | 应 | y | y | y | y | | +| 190. | CONFIG_UNIX | 应 | 应 | 应 | 应 | y | y | y | y | | +| 191. | CONFIG_XFRM_USER | 应 | 应 | 应 | 应 | y | y | y | y | | +| 192. | CONFIG_XDP_SOCKETS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 193. | CONFIG_INET | 应 | 应 | 应 | 应 | y | y | y | y | | +| 194. | CONFIG_SYN_COOKIES | 应 | 应 | 应 | 应 | y | y | y | y | | +| 195. | CONFIG_NETFILTER | 应 | 应 | 应 | 应 | y | y | y | y | | +| 196. | CONFIG_NETFILTER_INGRESS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 197. | CONFIG_NF_TABLES_INET | 应 | 应 | 应 | 应 | y | y | y | y | | +| 198. | CONFIG_NET_SCHED | 应 | 应 | 应 | 应 | y | y | y | y | | +| 199. | CONFIG_NET_CLS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 200. | CONFIG_NET_CLS_ACT | 应 | 应 | 应 | 应 | y | y | y | y | | +| 201. | CONFIG_RPS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 202. | CONFIG_XPS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 203. | CONFIG_XFRM | 应 | 应 | 应 | 应 | y | y | y | y | | +| 204. | CONFIG_TCP_CONG_ADVANCED | 应 | 应 | 应 | 应 | y | y | y | y | | +| 205. | CONFIG_TCP_CONG_CUBIC | 应 | 应 | 应 | 应 | y | y | m | m | | +| 206. | CONFIG_IPV6 | 应 | 应 | 应 | 应 | y | y | m | m | | +| 207. | CONFIG_NETFILTER_ADVANCED | 应 | 应 | 应 | 应 | y | y | y | y | | +| 208. | CONFIG_NF_TABLES_IPV4 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 209. | CONFIG_NF_TABLES_IPV6 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 210. | CONFIG_NF_TABLES_ARP | 应 | 应 | 应 | 应 | y | y | y | y | | +| 211. | CONFIG_NET_SCH_FQ_CODEL | 应 | 应 | 应 | 应 | y | y | y | y | | +| 212. | CONFIG_CGROUPS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 213. | CONFIG_BLK_CGROUP | 应 | 应 | 应 | 应 | y | y | y | y | | +| 214. | CONFIG_CGROUP_SCHED | 应 | 应 | 应 | 应 | y | y | y | y | | +| 215. | CONFIG_CGROUP_PIDS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 216. | CONFIG_CGROUP_RDMA | 应 | 应 | 应 | 应 | y | y | y | y | | +| 217. | CONFIG_CGROUP_HUGETLB | 应 | 应 | 应 | 应 | y | y | y | y | | +| 218. | CONFIG_CGROUP_DEVICE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 219. | CONFIG_CGROUP_CPUACCT | 应 | 应 | 应 | 应 | y | y | y | y | | +| 220. | CONFIG_CGROUP_PERF | 应 | 应 | 应 | 应 | y | y | y | y | | +| 221. | CONFIG_CGROUP_BPF | 应 | 应 | 应 | 应 | y | y | y | y | | +| 222. | CONFIG_NAMESPACES | 应 | 应 | 应 | 应 | y | y | y | y | | +| 223. | CONFIG_CGROUP_FREEZER | 应 | 应 | 应 | 应 | y | y | y | y | | +| 224. | CONFIG_ZRAM | 应 | 应 | 应 | 应 | m | m | m | m | | +| 225. | CONFIG_NVME_CORE | 应 | 应 | 应 | 应 | m | m | m | y | | +| 226. | CONFIG_BLK_DEV_NVME | 应 | 应 | 应 | 应 | m | m | m | m | | +| 227. | CONFIG_BONDING | 应 | 应 | 应 | 应 | m | m | m | m | | +| 228. | CONFIG_TUN | 应 | 应 | 应 | 应 | m | m | m | y | | +| 229. | CONFIG_TAP | 应 | 应 | 应 | 应 | m | m | m | m | | +| 230. | CONFIG_VETH | 应 | 应 | 应 | 应 | m | m | m | m | | +| 231. | CONFIG_VIRTIO_NET | 应 | 应 | 应 | 应 | m | m | m | m | | +| 232. | CONFIG_IPMI_HANDLER | 应 | 应 | 应 | 应 | m | m | m | m | | +| 233. | CONFIG_INFINIBAND | 应 | 应 | 应 | 应 | m | m | m | m | | +| 234. | CONFIG_UIO | 应 | 应 | 应 | 应 | m | m | m | m | | +| 235. | CONFIG_VFIO | 应 | 应 | 应 | 应 | m | m | m | m | | +| 236. | CONFIG_VFIO_PCI | 应 | 应 | 应 | 应 | m | m | m | m | | +| 237. | CONFIG_VHOST_NET | 应 | 应 | 应 | 应 | m | m | m | m | | +| 238. | CONFIG_VHOST_VSOCK | 应 | 应 | 应 | 应 | m | m | m | m | | +| 239. | CONFIG_VXLAN | 应 | 应 | 应 | 应 | m | m | m | m | | +| 240. | CONFIG_IPMI_SI | 应 | 应 | 应 | 应 | m | m | m | m | | +| 241. | CONFIG_SOFT_WATCHDOG | 应 | 应 | 应 | 应 | m | m | m | m | | +| 242. | CONFIG_VHOST | 应 | 应 | 应 | 应 | m | m | m | m | | +| 243. | CONFIG_NVME_TARGET | 应 | 应 | 应 | 应 | m | m | m | y | | +| 244. | CONFIG_BLK_DEV_DM | 应 | 应 | 应 | 应 | m | m | m | m | | +| 245. | CONFIG_NET_TEAM | 应 | 应 | 应 | 应 | m | m | m | m | | +| 246. | CONFIG_SATA_AHCI | 应 | 应 | 应 | 应 | m | m | y | y | | +| 247. | CONFIG_I2C | 应 | 应 | 应 | 应 | y | y | y | y | | +| 248. | CONFIG_VIRTIO_MEM | 应 | 应 | N/A | N/A | m | m | N/A | N/A | | +| 249. | CONFIG_ATA | 应 | 应 | 应 | 应 | m | m | y | y | | +| 250. | CONFIG_ETHTOOL_NETLINK | 应 | 应 | 应 | 应 | y | y | y | y | | +| 251. | CONFIG_BLK_DEV | 应 | 应 | 应 | 应 | y | y | y | y | | +| 252. | CONFIG_SCSI | 应 | 应 | 应 | 应 | y | y | y | y | | +| 253. | CONFIG_MD | 应 | 应 | 应 | 应 | y | y | y | y | | +| 254. | CONFIG_NETDEVICES | 应 | 应 | 应 | 应 | y | y | y | y | | +| 255. | CONFIG_NET_CORE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 256. | CONFIG_INPUT | 应 | 应 | 应 | 应 | y | y | y | y | | +| 257. | CONFIG_INPUT_KEYBOARD | 应 | 应 | 应 | 应 | y | y | y | y | | +| 258. | CONFIG_INPUT_MOUSE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 259. | CONFIG_SERIO | 应 | 应 | 应 | 应 | y | y | y | y | | +| 260. | CONFIG_TTY | 应 | 应 | 应 | 应 | y | y | y | y | | +| 261. | CONFIG_HW_RANDOM | 应 | 应 | 应 | 应 | y | y | y | y | | +| 262. | CONFIG_HWMON | 应 | 应 | 应 | 应 | y | y | y | y | | +| 263. | CONFIG_THERMAL | 应 | 应 | 应 | 应 | y | y | y | y | | +| 264. | CONFIG_FB | 应 | 应 | 应 | 应 | y | y | y | y | | +| 265. | CONFIG_HDMI | 应 | 应 | 应 | 应 | y | y | y | y | | +| 266. | CONFIG_FRAMEBUFFER_CONSOLE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 267. | CONFIG_USB_SUPPORT | 应 | 应 | 应 | 应 | y | y | y | y | | +| 268. | CONFIG_USB | 应 | 应 | 应 | 应 | y | y | y | y | | +| 269. | CONFIG_EDAC | 应 | 应 | N/A | N/A | y | y | N/A | N/A | | +| 270. | CONFIG_RTC_CLASS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 271. | CONFIG_DMADEVICES | 应 | 应 | 应 | 应 | y | y | y | y | | +| 272. | CONFIG_VIRTIO_MENU | 应 | 应 | 应 | 应 | y | y | y | y | | +| 273. | CONFIG_VIRTIO_MMIO | 宜 | 宜 | 宜 | 宜 | m | m | m | y | | +| 274. | CONFIG_COMMON_CLK | 应 | 应 | 应 | 应 | y | y | y | y | | +| 275. | CONFIG_IOMMU_SUPPORT | 应 | 应 | 应 | 应 | y | y | y | y | | +| 276. | CONFIG_RAS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 277. | CONFIG_SCSI_SAS_ATA | 应 | 应 | 应 | 应 | y | y | y | y | | +| 278. | CONFIG_SERIAL_CORE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 279. | CONFIG_DEVMEM | 应 | 应 | 应 | 应 | y | y | y | y | | +| 280. | CONFIG_WATCHDOG | 应 | 应 | 应 | 应 | y | y | y | y | | +| 281. | CONFIG_DMA_ENGINE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 282. | CONFIG_VIRTIO_PCI_LEGACY | 应 | 应 | 应 | 应 | y | y | y | y | | +| 283. | CONFIG_SERIAL_8250 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 284. | CONFIG_FB_EFI | 应 | 应 | 应 | 应 | y | y | y | y | | +| 285. | CONFIG_VT | 应 | 应 | 应 | 应 | y | y | y | y | | +| 286. | CONFIG_VT_CONSOLE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 287. | CONFIG_VIRTIO | 应 | 应 | 应 | 应 | y | m | y | y | | +| 288. | CONFIG_VIRTIO_PCI | 应 | 应 | 应 | 应 | y | m | y | y | | +| 289. | CONFIG_SPI | 应 | 应 | 应 | 应 | y | y | y | y | | +| 290. | CONFIG_DEVTMPFS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 291. | CONFIG_DEVTMPFS_MOUNT | 应 | 应 | 应 | 应 | y | y | y | y | | +| 292. | CONFIG_AUXILIARY_BUS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 293. | CONFIG_PCI | 应 | 应 | 应 | 应 | y | y | y | y | | +| 294. | CONFIG_PCIEPORTBUS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 295. | CONFIG_HOTPLUG_PCI_PCIE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 296. | CONFIG_PCI_MSI | 应 | 应 | 应 | 应 | y | y | y | y | | +| 297. | CONFIG_HOTPLUG_PCI | 应 | 应 | 应 | 应 | y | y | y | y | | +| 298. | CONFIG_PCIEAER | 应 | 应 | 应 | 应 | y | y | y | y | | +| 299. | CONFIG_PCI_ATS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 300. | CONFIG_PCI_IOV | 应 | 应 | 应 | 应 | y | y | y | y | | +| 301. | CONFIG_PCIEASPM | 应 | 应 | 应 | 应 | y | y | y | y | | +| 302. | CONFIG_HIGH_RES_TIMERS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 303. | CONFIG_NO_HZ_COMMON | 应 | 应 | 应 | 应 | y | y | y | y | | +| 304. | CONFIG_NO_HZ | 应 | 应 | 应 | 应 | y | y | y | y | | +| 305. | CONFIG_IP_VS_IPV6 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 306. | CONFIG_XFS_FS | 应 | 应 | 应 | 应 | m | m | y | y | | +| 307. | CONFIG_FUSE_FS | 应 | 应 | 应 | 应 | m | m | m | m | | +| 308. | CONFIG_VIRTIO_FS | 应 | 应 | 应 | 应 | m | m | m | m | | +| 309. | CONFIG_OVERLAY_FS | 应 | 应 | 应 | 应 | m | m | y | m | | +| 310. | CONFIG_ISO9660_FS | 应 | 应 | 应 | 应 | m | m | m | m | | +| 311. | CONFIG_SQUASHFS | 应 | 应 | 应 | 应 | m | m | m | m | | +| 312. | CONFIG_EROFS_FS | 应 | 应 | 应 | 应 | m | m | m | m | | +| 313. | CONFIG_NFS_FS | 应 | 应 | 应 | 应 | m | m | y | m | | +| 314. | CONFIG_NFS_V4 | 应 | 应 | 应 | 应 | m | m | m | m | | +| 315. | CONFIG_SUNRPC | 应 | 应 | 应 | 应 | m | m | y | m | | +| 316. | CONFIG_NLS_UTF8 | 应 | 应 | 应 | 应 | m | m | y | m | | +| 317. | CONFIG_FAT_FS | 应 | 应 | 应 | 应 | m | m | m | m | | +| 318. | CONFIG_VFAT_FS | 应 | 应 | 应 | 应 | m | m | m | m | | +| 319. | CONFIG_NFSD | 应 | 应 | 应 | 应 | m | m | y | m | | +| 320. | CONFIG_LOCKD | 应 | 应 | 应 | 应 | m | m | y | m | | +| 321. | CONFIG_DAX | 应 | 应 | 应 | 应 | y | y | y | y | | +| 322. | CONFIG_FS_DAX | 应 | 应 | N/A | N/A | y | y | y | y | | +| 323. | CONFIG_FILE_LOCKING | 应 | 应 | 应 | 应 | y | y | y | y | | +| 324. | CONFIG_FSNOTIFY | 应 | 应 | 应 | 应 | y | y | y | y | | +| 325. | CONFIG_DNOTIFY | 应 | 应 | 应 | 应 | y | y | y | y | | +| 326. | CONFIG_FANOTIFY | 应 | 应 | 应 | 应 | y | y | y | y | | +| 327. | CONFIG_QUOTA | 应 | 应 | 应 | 应 | y | y | y | y | | +| 328. | CONFIG_PROC_FS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 329. | CONFIG_PROC_KCORE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 330. | CONFIG_PROC_VMCORE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 331. | CONFIG_PROC_SYSCTL | 应 | 应 | 应 | 应 | y | y | y | y | | +| 332. | CONFIG_KERNFS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 333. | CONFIG_SYSFS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 334. | CONFIG_TMPFS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 335. | CONFIG_HUGETLBFS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 336. | CONFIG_HUGETLB_PAGE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 337. | CONFIG_MISC_FILESYSTEMS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 338. | CONFIG_NETWORK_FILESYSTEMS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 339. | CONFIG_NFS_FSCACHE | 应 | 应 | 应 | 应 | y | y | y | y | | +| 340. | CONFIG_NFSD_V4 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 341. | CONFIG_LOCKD_V4 | 应 | 应 | 应 | 应 | y | y | y | y | | +| 342. | CONFIG_NFS_COMMON | 应 | 应 | 应 | 应 | y | y | y | y | | +| 343. | CONFIG_NLS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 344. | CONFIG_NLS_ASCII | 应 | 应 | 应 | 应 | y | y | y | y | | +| 345. | CONFIG_PSTORE | 应 | 应 | 应 | 应 | y | y | m | m | | +| 346. | CONFIG_AUTOFS_FS | 应 | 应 | 应 | 应 | y | y | y | y | | +| 347. | CONFIG_KVM | 应 | 应 | 应 | 应 | m | y | y | y | | +| 348. | CONFIG_VIRTUALIZATION | 应 | 应 | 应 | 应 | y | y | y | y | | +| 349. | CONFIG_KVM_MMIO | 应 | 应 | 应 | N/A | y | y | y | N/A | | +| 350. | CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK | 应 | 应 | 应 | 应 | y | y | y | y | | +| 351. | CONFIG_GENERIC_MSI_IRQ | 应 | 应 | 应 | 应 | y | y | y | y | | +| 352. | CONFIG_IRQ_MSI_IOMMU | 应 | 应 | N/A | 应 | y | y | N/A | y | | +| 353. | CONFIG_NODES_SHIFT | 应 | 应 | 应 | 应 | 6/8/10 | 6/8/10 | 6 | 7 | | +| 354. | CONFIG_NTFS3_FS | 应 | 应 | 应 | N/A | m | m | m | N/A | | +| 355. | CONFIG_BLK_DEV_SD | 应 | 应 | 应 | 应 | m | m | m | y | | +| 356. | CONFIG_ACPI_THERMAL | 应 | 应 | 应 | N/A | y | y | y | N/A | | +| 357. | CONFIG_TRACING | 应 | 应 | 应 | 应 | y | y | y | y | | +| 358. | CONFIG_GPIO_ACPI | 应 | 应 | 应 | 应 | y | y | y | y | | +| 359. | CONFIG_MEMORY_FAILURE | 应 | 应 | N/A | N/A | y | y | N/A | N/A | | +| 360. | CONFIG_LIVEPATCH | 应 | 宜 | 宜 | 宜 | y | y | y | y | | +| 361. | CONFIG_PCIE_EDR | 应 | 应 | N/A | 应 | y | y | N/A | y | | +| 362. | CONFIG_RANDOMIZE_BASE | 应 | 应 | 宜 | 宜 | y | y | N/A | N/A | | +| 363. | CONFIG_X86_64 | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 364. | CONFIG_INSTRUCTION_DECODER | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 365. | CONFIG_KVM_GUEST | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 366. | CONFIG_X86_TSC | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 367. | CONFIG_IA32_FEAT_CTL | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 368. | CONFIG_CPU_SUP_INTEL | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 369. | CONFIG_CPU_SUP_AMD | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 370. | CONFIG_CPU_SUP_HYGON | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 371. | CONFIG_CPU_SUP_ZHAOXIN | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 372. | CONFIG_X86_SGX | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 373. | CONFIG_X86 | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 374. | CONFIG_X86_X2APIC | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 375. | CONFIG_HYPERVISOR_GUEST | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 376. | CONFIG_HPET_TIMER | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 377. | CONFIG_X86_LOCAL_APIC | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 378. | CONFIG_X86_IO_APIC | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 379. | CONFIG_ARCH_CPUIDLE_HALTPOLL | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 380. | CONFIG_PARAVIRT_CLOCK | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 381. | CONFIG_X86_64_SMP | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 382. | CONFIG_X86_CPUID | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 383. | CONFIG_X86_MSR | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 384. | CONFIG_CRYPTO_SIMD | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 385. | CONFIG_CPU_MITIGATIONS | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | 同CONFIG_SPECULATION_MITIGATIONS | +| 386. | CONFIG_VGA_CONSOLE | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 387. | CONFIG_IRQ_REMAP | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 388. | CONFIG_KVM_INTEL | 应 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 389. | CONFIG_KVM_AMD | 应 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 390. | CONFIG_CPU_SUP_CENTAUR | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 391. | CONFIG_INTEL_IOMMU | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 392. | CONFIG_X86_MCE | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 393. | CONFIG_X86_MCE_INTEL | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 394. | CONFIG_MICROCODE | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 395. | CONFIG_AMD_MEM_ENCRYPT | 应 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 396. | CONFIG_ARM_SMMU | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 397. | CONFIG_ARM64 | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 398. | CONFIG_ARM64_HW_AFDBM | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 399. | CONFIG_ARM64_PAN | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 400. | CONFIG_ARM64_RAS_EXTN | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 401. | CONFIG_ARM64_CNP | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 402. | CONFIG_ARM64_SVE | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 403. | CONFIG_ARM64_PSEUDO_NMI | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 404. | CONFIG_ARM_SMMU_V3 | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 405. | CONFIG_ARM_GIC | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 406. | CONFIG_ARM_GIC_V2M | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 407. | CONFIG_ARM_GIC_V3 | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 408. | CONFIG_ARM_GIC_V3_ITS | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 409. | CONFIG_ARM_GIC_V3_ITS_PCI | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 410. | CONFIG_ARM_PMU | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 411. | CONFIG_CPU_LITTLE_ENDIAN | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 412. | CONFIG_ARCH_HISI | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 413. | CONFIG_ARM64_E0PD | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 414. | CONFIG_ARM64_EPAN | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 415. | CONFIG_ARM_CCN | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 416. | CONFIG_ARM_SPE_PMU | N/A | 应 | N/A | N/A | N/A | m | N/A | N/A | | +| 417. | CONFIG_HISI_PMU | N/A | 应 | N/A | N/A | N/A | m | N/A | N/A | | +| 418. | CONFIG_CPU_PM | N/A | 应 | 应 | N/A | N/A | y | y | N/A | | +| 419. | CONFIG_HISILICON_LPC | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 420. | CONFIG_PCI_HOST_GENERIC | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 421. | CONFIG_PCI_HISI | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 422. | CONFIG_GENERIC_IRQ_IPI | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 423. | CONFIG_GPIO_HISI | N/A | 应 | N/A | N/A | N/A | m | N/A | N/A | | +| 424. | CONFIG_HISI_PCIE_PMU | N/A | 应 | N/A | N/A | N/A | m | N/A | N/A | | +| 425. | CONFIG_ARM_SMMU_V3_PMU | N/A | 应 | N/A | N/A | N/A | m | N/A | N/A | | +| 426. | CONFIG_SCSI_HISI_SAS | N/A | 应 | N/A | N/A | N/A | m | N/A | N/A | | +| 427. | CONFIG_SCSI_HISI_SAS_PCI | N/A | 应 | N/A | N/A | N/A | m | N/A | N/A | | +| 428. | CONFIG_DRM_HISI_HIBMC | N/A | 应 | N/A | N/A | N/A | m | N/A | N/A | | +| 429. | CONFIG_ARM64_SME | N/A | 应 | N/A | N/A | N/A | y | N/A | N/A | | +| 430. | CONFIG_SPI_HISI_KUNPENG | N/A | 应 | N/A | N/A | N/A | m | N/A | N/A | | +| 431. | CONFIG_LOONGARCH | N/A | N/A | 应 | 应 | N/A | N/A | y | N/A | | +| 432. | CONFIG_UNWINDER_PROLOGUE | N/A | N/A | 应 | N/A | N/A | N/A | y | N/A | | +| 433. | CONFIG SW64 | N/A | N/A | N/A | 应 | N/A | N/A | N/A | y | | +| 434. | CONFIG_NONCACHE_PAGE | N/A | N/A | N/A | 应 | N/A | N/A | N/A | y | | +| 435. | CONFIG_SW64_CHIP3 | N/A | N/A | N/A | 应 | N/A | N/A | N/A | y | | +| 436. | CONFIG_SW64_CPUFREQ | N/A | N/A | N/A | 应 | N/A | N/A | N/A | y | | +| 437. | CONFIG_SW64_CPUAUTOPLUG | N/A | N/A | N/A | 应 | N/A | N/A | N/A | y | | +| 438. | CONFIG_DEEP_MEMCPY | N/A | N/A | N/A | 应 | N/A | N/A | N/A | y | | +| 439. | CONFIG_DEEP_MEMSET | N/A | N/A | N/A | 应 | N/A | N/A | N/A | y | | +| 440. | CONFIG_RSEQ | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 441. | CONFIG_MEMCG_KMEM | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 442. | CONFIG_CHECKPOINT_RESTORE | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 443. | CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 444. | CONFIG_BASE_FULL | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 445. | CONFIG_TASKSTATS | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 446. | CONFIG_PROC_PID_CPUSET | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 447. | CONFIG_SCHED_AUTOGROUP | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 448. | CONFIG_PGTABLE_LEVELS | 宜 | 宜 | 宜 | 宜 | 5 | 4 | 3 | 4 | | +| 449. | CONFIG_PROFILING | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 450. | CONFIG_CRYPTO_CRC32 | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 451. | CONFIG_SECURITY_INFINIBAND | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 452. | CONFIG_SECURITY_NETWORK_XFRM | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 453. | CONFIG_CRYPTO_LZO | 宜 | 宜 | 宜 | 宜 | y | y | m | m | | +| 454. | CONFIG_CUSE | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 455. | CONFIG_CRYPTO_FIPS | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 456. | CONFIG_RATIONAL | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 457. | CONFIG_SYSTEM_TRUSTED_KEYS | 宜 | 宜 | 宜 | 宜 | N/A | N/A | 0 | 0 | 需要配置一个值但不做限定 | +| 458. | CONFIG_MQ_IOSCHED_KYBER | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 459. | CONFIG_BLK_PM | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 460. | CONFIG_CRC16 | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 461. | CONFIG_BUILD_SALT | 宜 | 宜 | 宜 | 宜 | 0 | N/A | 0 | 0 | 需要配置一个值但不做限定 | +| 462. | CONFIG_HZ | 宜 | 宜 | 宜 | 宜 | 100/250/1000 | 100/250/1000 | 250 | 250 | | +| 463. | CONFIG_ZSMALLOC | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 464. | CONFIG_KSM | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 465. | CONFIG_IP_VS_PROTO_TCP | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 466. | CONFIG_IP_VS_RR | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 467. | CONFIG_TCP_CONG_BBR | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 468. | CONFIG_MPTCP | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 469. | CONFIG_NET_ACT_POLICE | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 470. | CONFIG_NET_ACT_GACT | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 471. | CONFIG_NETFILTER_XTABLES | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 472. | CONFIG_CGROUP_WRITEBACK | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 473. | CONFIG_USB_ACM | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 474. | CONFIG_RTC_INTF_DEV | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 475. | CONFIG_NVME_FABRICS | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 476. | CONFIG_NVME_RDMA | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 477. | CONFIG_NVME_TCP | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 478. | CONFIG_MACVLAN | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 479. | CONFIG_USB_XHCI_HCD | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 480. | CONFIG_USB_EHCI_HCD | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 481. | CONFIG_USB_EHCI_PCI | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 482. | CONFIG_MEGARAID_SAS | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 483. | CONFIG_SCSI_MPT3SAS | 宜 | 宜 | 宜 | 宜 | m | m | y | m | | +| 484. | CONFIG_BNX2 | 宜 | 宜 | 宜 | 宜 | m | m | y | m | | +| 485. | CONFIG_BNX2X | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 486. | CONFIG_BNXT | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 487. | CONFIG_CHELSIO_T4 | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 488. | CONFIG_IGB | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 489. | CONFIG_IXGBE | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 490. | CONFIG_IXGBEVF | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 491. | CONFIG_I40E | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 492. | CONFIG_I40EVF | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 493. | CONFIG_ICE | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 494. | CONFIG_MLX4_EN | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 495. | CONFIG_MLX4_CORE | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 496. | CONFIG_MLX5_CORE | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 497. | CONFIG_MLX5_CORE_EN | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 498. | CONFIG_NGBE | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 499. | CONFIG_TXGBE | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 500. | CONFIG_MTD | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 501. | CONFIG_USB_STORAGE | 宜 | 宜 | 宜 | 宜 | m | m | m | y | | +| 502. | CONFIG_VIRTIO_CONSOLE | 宜 | 宜 | 宜 | 宜 | m | m | y | m | | +| 503. | CONFIG_DRM | 宜 | 宜 | 宜 | 宜 | m | m | y | y | | +| 504. | CONFIG_SCSI_MPT2SAS | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 505. | CONFIG_FCOE | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 506. | CONFIG_E1000 | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 507. | CONFIG_FSCACHE | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 508. | CONFIG_NFS_V3 | 宜 | 宜 | 宜 | 宜 | m | m | m | m | 推荐 | +| 509. | CONFIG_NFS_V4_1 | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 510. | CONFIG_NFS_V4_2 | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 511. | CONFIG_NFSD_V3_ACL | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 512. | CONFIG_NFS_ACL_SUPPORT | 宜 | 宜 | 宜 | 宜 | m | m | y | y | | +| 513. | CONFIG_CONFIGFS_FS | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 514. | CONFIG_CIFS | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 515. | CONFIG_BTRFS_FS | 宜 | 宜 | 宜 | 宜 | m | m | y | y | | +| 516. | CONFIG_SPARSEMEM_VMEMMAP | 宜 | 宜 | 宜 | 宜 | y | y | y | y | | +| 517. | CONFIG_VIRTIO_BLK | 宜 | 宜 | 宜 | 宜 | m | m | m | m | | +| 518. | CONFIG_EXT3_FS | 宜 | 宜 | 宜 | 宜 | m | m | y | y | | +| 519. | CONFIG_EXT4_FS | 宜 | 宜 | 宜 | 宜 | m | m | y | y | | +| 520. | CONFIG_JBD2 | 宜 | 宜 | 宜 | 宜 | m | m | y | y | | +| 521. | CONFIG_LOG_BUF_SHIFT | 宜 | 宜 | 宜 | 宜 | 20 | 20 | 18 | 18 | | +| 522. | CONFIG_LOG_CPU_MAX_BUF_SHIFT | 宜 | 宜 | 宜 | 宜 | 12 | 12 | 12 | 12 | | +| 523. | CONFIG_RTC_SYSTOHC | 宜 | 宜 | 应 | 应 | y | y | y | y | | +| 524. | CONFIG_ILLEGAL_POINTER_VALUE | 宜 | 宜 | N/A | N/A | 0xdead000000000000 | 0xdead000000000000 | N/A | N/A | | +| 525. | CONFIG_DAMON | 宜 | 宜 | N/A | N/A | y | y | N/A | N/A | | +| 526. | CONFIG_CXL_BUS | 宜 | 宜 | N/A | N/A | m | m | N/A | N/A | | +| 527. | CONFIG_CXL_PCI | 宜 | 宜 | N/A | N/A | m | m | N/A | N/A | | +| 528. | CONFIG_NO_HZ_FULL | 宜 | 宜 | N/A | N/A | y | y | N/A | N/A | | +| 529. | CONFIG_NTB | 宜 | 宜 | N/A | N/A | m | m | N/A | N/A | | +| 530. | CONFIG_UACCE | 宜 | 宜 | N/A | N/A | m | m | N/A | N/A | | +| 531. | CONFIG_VIRT_CPU_ACCOUNTING | 宜 | 宜 | N/A | N/A | y | y | N/A | N/A | | +| 532. | CONFIG_INET_MPTCP_DIAG | 宜 | 宜 | N/A | N/A | m | m | m | m | | +| 533. | CONFIG_VIRT_CPU_ACCOUNTING_GEN | 宜 | 宜 | N/A | N/A | y | y | N/A | N/A | | +| 534. | CONFIG_PVPANIC_MMIO | 宜 | 宜 | N/A | N/A | m | m | N/A | N/A | | +| 535. | CONFIG_HINIC | 宜 | 宜 | N/A | N/A | m | m | N/A | N/A | | +| 536. | CONFIG_SCHED_CLUSTER | 宜 | 宜 | N/A | N/A | y | y | N/A | N/A | | +| 537. | CONFIG_ACPI_HMAT | 宜 | 宜 | N/A | N/A | y | y | N/A | N/A | | +| 538. | CONFIG_ACPI_APEI | 宜 | 宜 | N/A | N/A | y | y | N/A | N/A | | +| 539. | CONFIG_ACPI_APEI_GHES | 宜 | 宜 | N/A | N/A | y | y | N/A | N/A | | +| 540. | CONFIG_ACPI_APEI_MEMORY_FAILURE | 宜 | 宜 | N/A | N/A | y | y | N/A | N/A | | +| 541. | CONFIG_STACKPROTECTOR_STRONG | 宜 | 宜 | 宜 | N/A | y | y | y | N/A | | +| 542. | CONFIG_SCHED_MC_PRIO | 宜 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 543. | CONFIG_X86_CMPXCHG64 | 宜 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 544. | CONFIG_X86_CMOV | 宜 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 545. | CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS | 宜 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 546. | CONFIG_X86_VSYSCALL_EMULATION | 宜 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 547. | CONFIG_X86_IOPL_IOPERM | 宜 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 548. | CONFIG_X86_DIRECT_GBPAGES | 宜 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 549. | CONFIG_X86_MPPARSE | 宜 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 550. | CONFIG_OUTPUT_FORMAT | 宜 | N/A | N/A | N/A | elf64-x86-64 | N/A | N/A | N/A | | +| 551. | CONFIG_PARAVIRT_SPINLOCKS | 宜 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 552. | CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64 | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 553. | CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64 | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 554. | CONFIG_CRYPTO_SM3_AVX_X86_64 | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 555. | CONFIG_INTEL_IDXD_BUS | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 556. | CONFIG_INTEL_PMT_CLASS | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 557. | CONFIG_INTEL_TPMI | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 558. | CONFIG_UNWINDER_ORC | 宜 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 559. | CONFIG_VFIO_MDEV | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 560. | CONFIG_VFIO_IOMMU_TYPE1 | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 561. | CONFIG_X86_INTEL_PSTATE | 宜 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 562. | CONFIG_INTEL_IDLE | 宜 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 563. | CONFIG_COMPAT | 宜 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 564. | CONFIG_INTEL_PMC_CORE | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 565. | CONFIG_INTEL_IFS | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 566. | CONFIG_SATA_ZHAOXIN | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 567. | CONFIG_HW_RANDOM_ZHAOXIN | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 568. | CONFIG_CRYPTO_DEV_ZHAOXIN | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 569. | CONFIG_CRYPTO_DEV_ZHAOXIN_AES | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 570. | CONFIG_CRYPTO_DEV_ZHAOXIN_SHA | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 571. | CONFIG_SENSORS_ZHAOXIN_CPUTEMP | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 572. | CONFIG_I2C_ZHAOXIN | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 573. | CONFIG_CRYPTO_SM3_ZHAOXIN_GMI | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 574. | CONFIG_CRYPTO_SM4_ZHAOXIN_GMI | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 575. | CONFIG_PINCTRL_ZHAOXIN | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 576. | CONFIG_PINCTRL_KX7000 | 宜 | N/A | N/A | N/A | m | N/A | N/A | N/A | | +| 577. | CONFIG_CRYPTO_DEV_CCP | 宜 | N/A | N/A | N/A | y | N/A | N/A | N/A | | +| 578. | CONFIG_ARM64_PMEM | N/A | 宜 | N/A | N/A | N/A | y | N/A | N/A | 同类推荐 | +| 579. | CONFIG_ARM64_4K_PAGES | N/A | 宜 | N/A | N/A | N/A | y | N/A | N/A | | +| 580. | CONFIG_CRYPTO_SM3_ARM64_CE | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 581. | CONFIG_SCSI_HISI_SAS | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 582. | CONFIG_HNS | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 583. | CONFIG_HNS3 | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 584. | CONFIG_RESET_HISI | N/A | 宜 | N/A | N/A | N/A | y | N/A | N/A | | +| 585. | CONFIG_I2C_HISI | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 586. | CONFIG_CRYPTO_DEV_HISI_SEC | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 587. | CONFIG_CRYPTO_DEV_HISI_HPRE | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 588. | CONFIG_CRYPTO_DEV_HISI_TRNG | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 589. | CONFIG_ARM64_AMU_EXTN | N/A | 宜 | N/A | N/A | N/A | y | N/A | N/A | | +| 590. | CONFIG_HISI_THERMAL | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 591. | CONFIG_GENERIC_PHY | N/A | 宜 | N/A | N/A | N/A | y | N/A | N/A | | +| 592. | CONFIG_KUNPENG_HCCS | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 593. | CONFIG_ARM64_64K_PAGES | N/A | 宜 | N/A | N/A | N/A | y | N/A | N/A | | +| 594. | CONFIG_ARM64_VA_BITS_48 | N/A | 宜 | N/A | N/A | N/A | y | N/A | N/A | | +| 595. | CONFIG_ARM64_PA_BITS_48 | N/A | 宜 | N/A | N/A | N/A | y | N/A | N/A | | +| 596. | CONFIG_ARM64_LSE_ATOMICS | N/A | 宜 | N/A | N/A | N/A | y | N/A | N/A | | +| 597. | CONFIG_ARCH_PHYTIUM | N/A | 宜 | N/A | N/A | N/A | y | N/A | N/A | | +| 598. | CONFIG_ARM_GIC_PHYTIUM_2500 | N/A | 宜 | N/A | N/A | N/A | y | N/A | N/A | | +| 599. | CONFIG_CRYPTO_DEV_HISI_QM | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 600. | CONFIG_CRYPTO_DEV_HISI_SEC2 | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 601. | CONFIG_CRYPTO_DEV_HISI_ZIP | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 602. | CONFIG_INFINIBAND_HNS | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 603. | CONFIG_INFINIBAND_HNS_HIP08 | N/A | 宜 | N/A | N/A | N/A | y | N/A | N/A | | +| 604. | CONFIG_CORESIGHT | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 605. | CONFIG_SPI_HISI_SFC_V3XX | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 606. | CONFIG_SPI_MASTER | N/A | 宜 | N/A | N/A | N/A | y | N/A | N/A | | +| 607. | CONFIG_ARM_SMMU_V3_PMU | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 608. | CONFIG_ARM_SMMU_V3_SVA | N/A | 宜 | N/A | N/A | N/A | y | N/A | N/A | | +| 609. | CONFIG_DRM_PHYTIUM | N/A | 宜 | N/A | N/A | N/A | m | m | N/A | | +| 610. | CONFIG_ACPI_CPPC_CPUFREQ | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 611. | CONFIG_ACPI_APEI_SEA | N/A | 宜 | N/A | N/A | N/A | y | N/A | N/A | | +| 612. | CONFIG_VFIO_PLATFORM | N/A | 宜 | N/A | N/A | N/A | m | N/A | N/A | | +| 613. | CONFIG_ARCH_STRICT_ALIGN | N/A | N/A | 宜 | N/A | N/A | N/A | y | N/A | | +| 614. | CONFIG_ARCH_FORCE_MAX_ORDER | N/A | N/A | 宜 | N/A | N/A | N/A | 11 | N/A | | +| 615. | CONFIG_ARCH_IOREMAP | N/A | N/A | 宜 | N/A | N/A | N/A | y | N/A | | +| 616. | CONFIG_16KB_3LEVEL | N/A | N/A | 宜 | N/A | N/A | N/A | y | N/A | 同类推荐 | +| 617. | CONFIG_16KB_2LEVEL | N/A | N/A | 宜 | N/A | N/A | N/A | y | N/A | | +| 618. | CONFIG_64KB_3LEVEL | N/A | N/A | 宜 | N/A | N/A | N/A | y | N/A | | +| 619. | CONFIG_64KB_2LEVEL | N/A | N/A | 宜 | N/A | N/A | N/A | y | N/A | | +| 620. | CONFIG_4KB_3LEVEL | N/A | N/A | 宜 | N/A | N/A | N/A | y | N/A | | +| 621. | CONFIG_4KB_4LEVEL | N/A | N/A | 宜 | N/A | N/A | N/A | y | N/A | | +| 622. | CONFIG_ARCH_WRITECOMBINE | N/A | N/A | 宜 | N/A | N/A | N/A | y | N/A | | +| 623. | CONFIG_CPU_HAS_LSX | N/A | N/A | 宜 | N/A | N/A | N/A | y | N/A | | +| 624. | CONFIG_CPU_HAS_LASX | N/A | N/A | 宜 | N/A | N/A | N/A | y | N/A | | +| 625. | CONFIG_CPU_HAS_LBT | N/A | N/A | 宜 | N/A | N/A | N/A | y | N/A | | +| 626. | CONFIG_I2C_LS2X | N/A | N/A | 宜 | N/A | N/A | N/A | m | N/A | | +| 627. | CONFIG_SPI_LOONGSON_PCI | N/A | N/A | 宜 | N/A | N/A | N/A | y | N/A | | +| 628. | CONFIG_DWMAC_LOONGSON | N/A | N/A | 宜 | N/A | N/A | N/A | m | N/A | | +| 629. | CONFIG_DRM_LOONGSON | N/A | N/A | 宜 | N/A | N/A | N/A | y | N/A | | +| 630. | CONFIG_FB_LS2K500 | N/A | N/A | 宜 | N/A | N/A | N/A | m | N/A | | +| 631. | CONFIG_GPIO_LOONGSON_64BIT | N/A | N/A | 宜 | N/A | N/A | N/A | y | N/A | | +| 632. | CONFIG_RTC_DRV_LOONGSON | N/A | N/A | 宜 | N/A | N/A | N/A | y | N/A | | +| 633. | CONFIG_CRYPTO_CRC32_LOONGARCH | N/A | N/A | 宜 | N/A | N/A | N/A | m | N/A | | \ No newline at end of file -- Gitee From 2cf57e0b0f826c48a1250dd41c98ddb74d82a2b2 Mon Sep 17 00:00:00 2001 From: Wardenjohn Date: Mon, 23 Dec 2024 16:26:48 +0800 Subject: [PATCH 2029/2138] livepatch: Rename KLP_* to KLP_TRANSITION_* bugzilla: https://bugzilla.openanolis.cn/show_bug.cgi?id=13020 ANBZ: #13020 commit d927752f287fe10965612541593468ffcfa9231f upstream livepatch: Rename KLP_* to KLP_TRANSITION_* The original macros of KLP_* is about the state of the transition. Rename macros of KLP_* to KLP_TRANSITION_* to fix the confusing description of klp transition state. Signed-off-by: Wardenjohn Reviewed-by: Petr Mladek Tested-by: Petr Mladek Acked-by: Josh Poimboeuf Acked-by: Miroslav Benes Link: https://lore.kernel.org/r/20240507050111.38195-2-zhangwarden@gmail.com Signed-off-by: Petr Mladek Signed-off-by: zhangyongde.zyd Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/4349 --- include/linux/livepatch.h | 6 ++-- init/init_task.c | 2 +- kernel/livepatch/core.c | 4 +-- kernel/livepatch/patch.c | 4 +-- kernel/livepatch/transition.c | 54 +++++++++++++++++------------------ 5 files changed, 35 insertions(+), 35 deletions(-) diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 9b9b38e89563..51a258c24ff5 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -18,9 +18,9 @@ #if IS_ENABLED(CONFIG_LIVEPATCH) /* task patch states */ -#define KLP_UNDEFINED -1 -#define KLP_UNPATCHED 0 -#define KLP_PATCHED 1 +#define KLP_TRANSITION_IDLE -1 +#define KLP_TRANSITION_UNPATCHED 0 +#define KLP_TRANSITION_PATCHED 1 /** * struct klp_func - function structure for live patching diff --git a/init/init_task.c b/init/init_task.c index fd9e27185e23..ea99f5d9d076 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -203,7 +203,7 @@ struct task_struct init_task .trace_recursion = 0, #endif #ifdef CONFIG_LIVEPATCH - .patch_state = KLP_UNDEFINED, + .patch_state = KLP_TRANSITION_IDLE, #endif #ifdef CONFIG_SECURITY .security = NULL, diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index ecbc9b6aba3a..52426665eecc 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -973,7 +973,7 @@ static int __klp_disable_patch(struct klp_patch *patch) if (klp_transition_patch) return -EBUSY; - klp_init_transition(patch, KLP_UNPATCHED); + klp_init_transition(patch, KLP_TRANSITION_UNPATCHED); klp_for_each_object(patch, obj) if (obj->patched) @@ -1008,7 +1008,7 @@ static int __klp_enable_patch(struct klp_patch *patch) pr_notice("enabling patch '%s'\n", patch->mod->name); - klp_init_transition(patch, KLP_PATCHED); + klp_init_transition(patch, KLP_TRANSITION_PATCHED); /* * Enforce the order of the func->transition writes in diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c index 4152c71507e2..90408500e5a3 100644 --- a/kernel/livepatch/patch.c +++ b/kernel/livepatch/patch.c @@ -95,9 +95,9 @@ static void notrace klp_ftrace_handler(unsigned long ip, patch_state = current->patch_state; - WARN_ON_ONCE(patch_state == KLP_UNDEFINED); + WARN_ON_ONCE(patch_state == KLP_TRANSITION_IDLE); - if (patch_state == KLP_UNPATCHED) { + if (patch_state == KLP_TRANSITION_UNPATCHED) { /* * Use the previously patched version of the function. * If no previous patches exist, continue with the diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c index e54c3d60a904..ba069459c101 100644 --- a/kernel/livepatch/transition.c +++ b/kernel/livepatch/transition.c @@ -23,7 +23,7 @@ static DEFINE_PER_CPU(unsigned long[MAX_STACK_ENTRIES], klp_stack_entries); struct klp_patch *klp_transition_patch; -static int klp_target_state = KLP_UNDEFINED; +static int klp_target_state = KLP_TRANSITION_IDLE; static unsigned int klp_signals_cnt; @@ -96,16 +96,16 @@ static void klp_complete_transition(void) pr_debug("'%s': completing %s transition\n", klp_transition_patch->mod->name, - klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); + klp_target_state == KLP_TRANSITION_PATCHED ? "patching" : "unpatching"); - if (klp_transition_patch->replace && klp_target_state == KLP_PATCHED) { + if (klp_transition_patch->replace && klp_target_state == KLP_TRANSITION_PATCHED) { klp_unpatch_replaced_patches(klp_transition_patch); klp_discard_nops(klp_transition_patch); } - if (klp_target_state == KLP_UNPATCHED) { + if (klp_target_state == KLP_TRANSITION_UNPATCHED) { /* - * All tasks have transitioned to KLP_UNPATCHED so we can now + * All tasks have transitioned to KLP_TRANSITION_UNPATCHED so we can now * remove the new functions from the func_stack. */ klp_unpatch_objects(klp_transition_patch); @@ -123,36 +123,36 @@ static void klp_complete_transition(void) klp_for_each_func(obj, func) func->transition = false; - /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */ - if (klp_target_state == KLP_PATCHED) + /* Prevent klp_ftrace_handler() from seeing KLP_TRANSITION_IDLE state */ + if (klp_target_state == KLP_TRANSITION_PATCHED) klp_synchronize_transition(); read_lock(&tasklist_lock); for_each_process_thread(g, task) { WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); - task->patch_state = KLP_UNDEFINED; + task->patch_state = KLP_TRANSITION_IDLE; } read_unlock(&tasklist_lock); for_each_possible_cpu(cpu) { task = idle_task(cpu); WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING)); - task->patch_state = KLP_UNDEFINED; + task->patch_state = KLP_TRANSITION_IDLE; } klp_for_each_object(klp_transition_patch, obj) { if (!klp_is_object_loaded(obj)) continue; - if (klp_target_state == KLP_PATCHED) + if (klp_target_state == KLP_TRANSITION_PATCHED) klp_post_patch_callback(obj); - else if (klp_target_state == KLP_UNPATCHED) + else if (klp_target_state == KLP_TRANSITION_UNPATCHED) klp_post_unpatch_callback(obj); } pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name, - klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); + klp_target_state == KLP_TRANSITION_PATCHED ? "patching" : "unpatching"); - klp_target_state = KLP_UNDEFINED; + klp_target_state = KLP_TRANSITION_IDLE; klp_transition_patch = NULL; } @@ -164,13 +164,13 @@ static void klp_complete_transition(void) */ void klp_cancel_transition(void) { - if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED)) + if (WARN_ON_ONCE(klp_target_state != KLP_TRANSITION_PATCHED)) return; pr_debug("'%s': canceling patching transition, going to unpatch\n", klp_transition_patch->mod->name); - klp_target_state = KLP_UNPATCHED; + klp_target_state = KLP_TRANSITION_UNPATCHED; klp_complete_transition(); } @@ -218,7 +218,7 @@ static int klp_check_stack_func(struct klp_func *func, unsigned long *entries, struct klp_ops *ops; int i; - if (klp_target_state == KLP_UNPATCHED) { + if (klp_target_state == KLP_TRANSITION_UNPATCHED) { /* * Check for the to-be-unpatched function * (the func itself). @@ -455,7 +455,7 @@ void klp_try_complete_transition(void) struct klp_patch *patch; bool complete = true; - WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); + WARN_ON_ONCE(klp_target_state == KLP_TRANSITION_IDLE); /* * Try to switch the tasks to the target patch state by walking their @@ -532,11 +532,11 @@ void klp_start_transition(void) struct task_struct *g, *task; unsigned int cpu; - WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED); + WARN_ON_ONCE(klp_target_state == KLP_TRANSITION_IDLE); pr_notice("'%s': starting %s transition\n", klp_transition_patch->mod->name, - klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); + klp_target_state == KLP_TRANSITION_PATCHED ? "patching" : "unpatching"); /* * Mark all normal tasks as needing a patch state update. They'll @@ -578,7 +578,7 @@ void klp_init_transition(struct klp_patch *patch, int state) struct klp_func *func; int initial_state = !state; - WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED); + WARN_ON_ONCE(klp_target_state != KLP_TRANSITION_IDLE); klp_transition_patch = patch; @@ -589,7 +589,7 @@ void klp_init_transition(struct klp_patch *patch, int state) klp_target_state = state; pr_debug("'%s': initializing %s transition\n", patch->mod->name, - klp_target_state == KLP_PATCHED ? "patching" : "unpatching"); + klp_target_state == KLP_TRANSITION_PATCHED ? "patching" : "unpatching"); /* * Initialize all tasks to the initial patch state to prepare them for @@ -597,7 +597,7 @@ void klp_init_transition(struct klp_patch *patch, int state) */ read_lock(&tasklist_lock); for_each_process_thread(g, task) { - WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED); + WARN_ON_ONCE(task->patch_state != KLP_TRANSITION_IDLE); task->patch_state = initial_state; } read_unlock(&tasklist_lock); @@ -607,19 +607,19 @@ void klp_init_transition(struct klp_patch *patch, int state) */ for_each_possible_cpu(cpu) { task = idle_task(cpu); - WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED); + WARN_ON_ONCE(task->patch_state != KLP_TRANSITION_IDLE); task->patch_state = initial_state; } /* * Enforce the order of the task->patch_state initializations and the * func->transition updates to ensure that klp_ftrace_handler() doesn't - * see a func in transition with a task->patch_state of KLP_UNDEFINED. + * see a func in transition with a task->patch_state of KLP_TRANSITION_IDLE. * * Also enforce the order of the klp_target_state write and future * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() and * __klp_sched_try_switch() don't set a task->patch_state to - * KLP_UNDEFINED. + * KLP_TRANSITION_IDLE. */ smp_wmb(); @@ -652,7 +652,7 @@ void klp_reverse_transition(void) pr_debug("'%s': reversing transition from %s\n", klp_transition_patch->mod->name, - klp_target_state == KLP_PATCHED ? "patching to unpatching" : + klp_target_state == KLP_TRANSITION_PATCHED ? "patching to unpatching" : "unpatching to patching"); /* @@ -741,7 +741,7 @@ void klp_force_transition(void) klp_update_patch_state(idle_task(cpu)); /* Set forced flag for patches being removed. */ - if (klp_target_state == KLP_UNPATCHED) + if (klp_target_state == KLP_TRANSITION_UNPATCHED) klp_transition_patch->forced = true; else if (klp_transition_patch->replace) { klp_for_each_patch(patch) { -- Gitee From ee404ab7bf8cc6545b79f9a63ef52ec0dc1833be Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Wed, 1 Jan 2025 14:00:37 +0100 Subject: [PATCH 2030/2138] fuse: respect FOPEN_KEEP_CACHE on opendir ANBZ: #13391 commit 03f275adb8fbd7b4ebe96a1ad5044d8e602692dc upstream. The re-factoring of fuse_dir_open() missed the need to invalidate directory inode page cache with open flag FOPEN_KEEP_CACHE. Fixes: 7de64d521bf92 ("fuse: break up fuse_open_common()") Reported-by: Prince Kumar Closes: https://lore.kernel.org/linux-fsdevel/CAEW=TRr7CYb4LtsvQPLj-zx5Y+EYBmGfM24SuzwyDoGVNoKm7w@mail.gmail.com/ Signed-off-by: Amir Goldstein Link: https://lore.kernel.org/r/20250101130037.96680-1-amir73il@gmail.com Reviewed-by: Bernd Schubert Signed-off-by: Christian Brauner Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4475 --- fs/fuse/dir.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index dddcd8c630e0..b6c0ad4308e0 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1658,6 +1658,8 @@ static int fuse_dir_open(struct inode *inode, struct file *file) */ if (ff->open_flags & (FOPEN_STREAM | FOPEN_NONSEEKABLE)) nonseekable_open(inode, file); + if (!(ff->open_flags & FOPEN_KEEP_CACHE)) + invalidate_inode_pages2(inode->i_mapping); } return err; -- Gitee From 85e7766dda8d4712c970c86f4122de303b1abf55 Mon Sep 17 00:00:00 2001 From: Max Gurtovoy Date: Sun, 6 Oct 2024 21:43:24 +0300 Subject: [PATCH 2031/2138] virtio_fs: add informative log for new tag discovery ANBZ: #13391 commit 22d984f1b90f960f9aa92e31b0d310c5f90be8a6 upstream. Enhance the device probing process by adding a log message when a new virtio-fs tag is successfully discovered. This improvement provides better visibility into the initialization of virtio-fs devices. Signed-off-by: Max Gurtovoy Message-Id: <20241006184324.8497-1-mgurtovoy@nvidia.com> Signed-off-by: Michael S. Tsirkin Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4475 --- fs/fuse/virtio_fs.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index da036795b5fb..6b59600f5fbd 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -521,6 +521,7 @@ static int virtio_fs_read_tag(struct virtio_device *vdev, struct virtio_fs *fs) return -EINVAL; } + dev_info(&vdev->dev, "discovered new tag: %s\n", fs->tag); return 0; } -- Gitee From dff8448c6c012ef9b441cfc81f644c9ef31aa1d0 Mon Sep 17 00:00:00 2001 From: Max Gurtovoy Date: Sun, 6 Oct 2024 21:43:41 +0300 Subject: [PATCH 2032/2138] virtio_fs: store actual queue index in mq_map ANBZ: #13391 commit df28040c7f24559ffb23d5323eaa2f023a107dbe upstream. This will eliminate the need for index recalculation during the fast path. Signed-off-by: Max Gurtovoy Message-Id: <20241006184341.9081-1-mgurtovoy@nvidia.com> Signed-off-by: Michael S. Tsirkin Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4475 --- fs/fuse/virtio_fs.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index 6b59600f5fbd..00c5ac948aba 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -242,7 +242,7 @@ static ssize_t cpu_list_show(struct kobject *kobj, qid = fsvq->vq->index; for (cpu = 0; cpu < nr_cpu_ids; cpu++) { - if (qid < VQ_REQUEST || (fs->mq_map[cpu] == qid - VQ_REQUEST)) { + if (qid < VQ_REQUEST || (fs->mq_map[cpu] == qid)) { if (first) ret = snprintf(buf + pos, size - pos, "%u", cpu); else @@ -875,23 +875,23 @@ static void virtio_fs_map_queues(struct virtio_device *vdev, struct virtio_fs *f goto fallback; for_each_cpu(cpu, mask) - fs->mq_map[cpu] = q; + fs->mq_map[cpu] = q + VQ_REQUEST; } return; fallback: /* Attempt to map evenly in groups over the CPUs */ masks = group_cpus_evenly(fs->num_request_queues); - /* If even this fails we default to all CPUs use queue zero */ + /* If even this fails we default to all CPUs use first request queue */ if (!masks) { for_each_possible_cpu(cpu) - fs->mq_map[cpu] = 0; + fs->mq_map[cpu] = VQ_REQUEST; return; } for (q = 0; q < fs->num_request_queues; q++) { for_each_cpu(cpu, &masks[q]) - fs->mq_map[cpu] = q; + fs->mq_map[cpu] = q + VQ_REQUEST; } kfree(masks); } @@ -1486,7 +1486,7 @@ static void virtio_fs_send_req(struct fuse_iqueue *fiq, struct fuse_req *req) clear_bit(FR_PENDING, &req->flags); fs = fiq->priv; - queue_id = VQ_REQUEST + fs->mq_map[raw_smp_processor_id()]; + queue_id = fs->mq_map[raw_smp_processor_id()]; pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u queue_id %u\n", __func__, req->in.h.opcode, req->in.h.unique, -- Gitee From 453905ffcf70cd0db301425c081cc8bf06fab085 Mon Sep 17 00:00:00 2001 From: Alex Kogan Date: Fri, 14 May 2021 16:07:38 -0400 Subject: [PATCH 2033/2138] locking/qspinlock: Rename mcs lock/unlock macros and make them more generic ANBZ: #13056 cherry-picked from https://lore.kernel.org/all/20210514200743.3026725-2-alex.kogan@oracle.com/ The mcs unlock macro (arch_mcs_lock_handoff) should accept the value to be stored into the lock argument as another argument. This allows using the same macro in cases where the value to be stored when passing the lock is different from 1. Signed-off-by: Alex Kogan Reviewed-by: Steve Sistare Reviewed-by: Waiman Long Signed-off-by: Kong Yingqiao Reviewed-by: Cruz Zhao Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4406 --- arch/arm/include/asm/mcs_spinlock.h | 6 +++--- include/asm-generic/mcs_spinlock.h | 4 ++-- kernel/locking/mcs_spinlock.h | 18 +++++++++--------- kernel/locking/qspinlock.c | 4 ++-- kernel/locking/qspinlock_paravirt.h | 2 +- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/arch/arm/include/asm/mcs_spinlock.h b/arch/arm/include/asm/mcs_spinlock.h index 529d2cf4d06f..1eb4d733459c 100644 --- a/arch/arm/include/asm/mcs_spinlock.h +++ b/arch/arm/include/asm/mcs_spinlock.h @@ -6,7 +6,7 @@ #include /* MCS spin-locking. */ -#define arch_mcs_spin_lock_contended(lock) \ +#define arch_mcs_spin_wait(lock) \ do { \ /* Ensure prior stores are observed before we enter wfe. */ \ smp_mb(); \ @@ -14,9 +14,9 @@ do { \ wfe(); \ } while (0) \ -#define arch_mcs_spin_unlock_contended(lock) \ +#define arch_mcs_lock_handoff(lock, val) \ do { \ - smp_store_release(lock, 1); \ + smp_store_release((lock), (val)); \ dsb_sev(); \ } while (0) diff --git a/include/asm-generic/mcs_spinlock.h b/include/asm-generic/mcs_spinlock.h index 10cd4ffc6ba2..f933d99c63e0 100644 --- a/include/asm-generic/mcs_spinlock.h +++ b/include/asm-generic/mcs_spinlock.h @@ -4,8 +4,8 @@ /* * Architectures can define their own: * - * arch_mcs_spin_lock_contended(l) - * arch_mcs_spin_unlock_contended(l) + * arch_mcs_spin_wait(l) + * arch_mcs_lock_handoff(l, val) * * See kernel/locking/mcs_spinlock.c. */ diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h index 85251d8771d9..e794babc519a 100644 --- a/kernel/locking/mcs_spinlock.h +++ b/kernel/locking/mcs_spinlock.h @@ -21,7 +21,7 @@ struct mcs_spinlock { int count; /* nesting count, see qspinlock.c */ }; -#ifndef arch_mcs_spin_lock_contended +#ifndef arch_mcs_spin_wait /* * Using smp_cond_load_acquire() provides the acquire semantics * required so that subsequent operations happen after the @@ -29,20 +29,20 @@ struct mcs_spinlock { * ARM64 would like to do spin-waiting instead of purely * spinning, and smp_cond_load_acquire() provides that behavior. */ -#define arch_mcs_spin_lock_contended(l) \ -do { \ - smp_cond_load_acquire(l, VAL); \ +#define arch_mcs_spin_wait(l) \ +do { \ + smp_cond_load_acquire(l, VAL); \ } while (0) #endif -#ifndef arch_mcs_spin_unlock_contended +#ifndef arch_mcs_lock_handoff /* * smp_store_release() provides a memory barrier to ensure all * operations in the critical section has been completed before * unlocking. */ -#define arch_mcs_spin_unlock_contended(l) \ - smp_store_release((l), 1) +#define arch_mcs_lock_handoff(l, val) \ + smp_store_release((l), (val)) #endif /* @@ -91,7 +91,7 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node) WRITE_ONCE(prev->next, node); /* Wait until the lock holder passes the lock down. */ - arch_mcs_spin_lock_contended(&node->locked); + arch_mcs_spin_wait(&node->locked); } /* @@ -115,7 +115,7 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) } /* Pass lock to next waiter. */ - arch_mcs_spin_unlock_contended(&next->locked); + arch_mcs_lock_handoff(&next->locked, 1); } #endif /* __LINUX_MCS_SPINLOCK_H */ diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index ebe6b8ec7cb3..07c396408e84 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -474,7 +474,7 @@ void __lockfunc queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) WRITE_ONCE(prev->next, node); pv_wait_node(node, prev); - arch_mcs_spin_lock_contended(&node->locked); + arch_mcs_spin_wait(&node->locked); /* * While waiting for the MCS lock, the next pointer may have @@ -553,7 +553,7 @@ void __lockfunc queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) if (!next) next = smp_cond_load_relaxed(&node->next, (VAL)); - arch_mcs_spin_unlock_contended(&next->locked); + arch_mcs_lock_handoff(&next->locked, 1); pv_kick_node(lock, next); release: diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h index 6a0184e9c234..b280d4d1f586 100644 --- a/kernel/locking/qspinlock_paravirt.h +++ b/kernel/locking/qspinlock_paravirt.h @@ -368,7 +368,7 @@ static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node) * * Matches with smp_store_mb() and cmpxchg() in pv_wait_node() * - * The write to next->locked in arch_mcs_spin_unlock_contended() + * The write to next->locked in arch_mcs_lock_handoff() * must be ordered before the read of pn->state in the cmpxchg() * below for the code to work correctly. To guarantee full ordering * irrespective of the success or failure of the cmpxchg(), -- Gitee From d06037c93bdaba190a8fdf95a6c3be17916c70c7 Mon Sep 17 00:00:00 2001 From: Alex Kogan Date: Fri, 14 May 2021 16:07:39 -0400 Subject: [PATCH 2034/2138] locking/qspinlock: Refactor the qspinlock slow path ANBZ: #13056 cherry-picked from https://lore.kernel.org/all/20210514200743.3026725-3-alex.kogan@oracle.com/ Move some of the code manipulating the spin lock into separate functions. This would allow easier integration of alternative ways to manipulate that lock. Signed-off-by: Alex Kogan Reviewed-by: Steve Sistare Reviewed-by: Waiman Long Signed-off-by: Kong Yingqiao Reviewed-by: Cruz Zhao Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4406 --- kernel/locking/qspinlock.c | 38 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 07c396408e84..49c3b4c0dce8 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -290,6 +290,34 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock, #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath #endif +/* + * __try_clear_tail - try to clear tail by setting the lock value to + * _Q_LOCKED_VAL. + * @lock: Pointer to the queued spinlock structure + * @val: Current value of the lock + * @node: Pointer to the MCS node of the lock holder + */ +static __always_inline bool __try_clear_tail(struct qspinlock *lock, + u32 val, + struct mcs_spinlock *node) +{ + return atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL); +} + +/* + * __mcs_lock_handoff - pass the MCS lock to the next waiter + * @node: Pointer to the MCS node of the lock holder + * @next: Pointer to the MCS node of the first waiter in the MCS queue + */ +static __always_inline void __mcs_lock_handoff(struct mcs_spinlock *node, + struct mcs_spinlock *next) +{ + arch_mcs_lock_handoff(&next->locked, 1); +} + +#define try_clear_tail __try_clear_tail +#define mcs_lock_handoff __mcs_lock_handoff + #endif /* _GEN_PV_LOCK_SLOWPATH */ /** @@ -536,7 +564,7 @@ void __lockfunc queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) * PENDING will make the uncontended transition fail. */ if ((val & _Q_TAIL_MASK) == tail) { - if (atomic_try_cmpxchg_relaxed(&lock->val, &val, _Q_LOCKED_VAL)) + if (try_clear_tail(lock, val, node)) goto release; /* No contention */ } @@ -553,7 +581,7 @@ void __lockfunc queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) if (!next) next = smp_cond_load_relaxed(&node->next, (VAL)); - arch_mcs_lock_handoff(&next->locked, 1); + mcs_lock_handoff(node, next); pv_kick_node(lock, next); release: @@ -580,6 +608,12 @@ EXPORT_SYMBOL(queued_spin_lock_slowpath); #undef pv_kick_node #undef pv_wait_head_or_lock +#undef try_clear_tail +#define try_clear_tail __try_clear_tail + +#undef mcs_lock_handoff +#define mcs_lock_handoff __mcs_lock_handoff + #undef queued_spin_lock_slowpath #define queued_spin_lock_slowpath __pv_queued_spin_lock_slowpath -- Gitee From 56fba3d69200a5f94ef9d63f891fd0810cb68c3d Mon Sep 17 00:00:00 2001 From: Alex Kogan Date: Fri, 14 May 2021 16:07:40 -0400 Subject: [PATCH 2035/2138] locking/qspinlock: Introduce CNA into the slow path of qspinlock ANBZ: #13056 cherry-picked from https://lore.kernel.org/all/20210514200743.3026725-3-alex.kogan@oracle.com/ In CNA, spinning threads are organized in two queues, a primary queue for threads running on the same node as the current lock holder, and a secondary queue for threads running on other nodes. After acquiring the MCS lock and before acquiring the spinlock, the MCS lock holder checks whether the next waiter in the primary queue (if exists) is running on the same NUMA node. If it is not, that waiter is detached from the main queue and moved into the tail of the secondary queue. This way, we gradually filter the primary queue, leaving only waiters running on the same preferred NUMA node. For more details, see https://arxiv.org/abs/1810.05600. Note that this variant of CNA may introduce starvation by continuously passing the lock between waiters in the main queue. This issue will be addressed later in the series. Enabling CNA is controlled via a new configuration option (NUMA_AWARE_SPINLOCKS). By default, the CNA variant is patched in at the boot time only if we run on a multi-node machine in native environment and the new config is enabled. (For the time being, the patching requires CONFIG_PARAVIRT_SPINLOCKS to be enabled as well. However, this should be resolved once static_call() is available.) This default behavior can be overridden with the new kernel boot command-line option "numa_spinlock=on/off" (default is "auto"). Signed-off-by: Alex Kogan Reviewed-by: Steve Sistare Reviewed-by: Waiman Long Signed-off-by: Kong Yingqiao Reviewed-by: Cruz Zhao Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4406 --- .../admin-guide/kernel-parameters.txt | 10 + arch/x86/Kconfig | 20 ++ arch/x86/include/asm/qspinlock.h | 4 + arch/x86/kernel/alternative.c | 4 + kernel/locking/mcs_spinlock.h | 2 +- kernel/locking/qspinlock.c | 42 ++- kernel/locking/qspinlock_cna.h | 325 ++++++++++++++++++ 7 files changed, 402 insertions(+), 5 deletions(-) create mode 100644 kernel/locking/qspinlock_cna.h diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 503a55b1d9a7..84d58b9d0980 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -4041,6 +4041,16 @@ NUMA balancing. Allowed values are enable and disable + numa_spinlock= [NUMA, PV_OPS] Select the NUMA-aware variant + of spinlock. The options are: + auto - Enable this variant if running on a multi-node + machine in native environment. + on - Unconditionally enable this variant. + off - Unconditionally disable this variant. + + Not specifying this option is equivalent to + numa_spinlock=auto. + numa_zonelist_order= [KNL, BOOT] Select zonelist order for NUMA. 'node', 'default' can be specified This can be set from sysctl after boot. diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 17099e117c55..6018b6900a68 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1567,6 +1567,26 @@ config NUMA Otherwise, you should say N. +config NUMA_AWARE_SPINLOCKS + bool "Numa-aware spinlocks" + depends on NUMA + depends on QUEUED_SPINLOCKS + depends on 64BIT + # For now, we depend on PARAVIRT_SPINLOCKS to make the patching work. + # This is awkward, but hopefully would be resolved once static_call() + # is available. + depends on PARAVIRT_SPINLOCKS + default y + help + Introduce NUMA (Non Uniform Memory Access) awareness into + the slow path of spinlocks. + + In this variant of qspinlock, the kernel will try to keep the lock + on the same node, thus reducing the number of remote cache misses, + while trading some of the short term fairness for better performance. + + Say N if you want absolute first come first serve fairness. + config AMD_NUMA def_bool y prompt "Old style AMD Opteron NUMA detection" diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h index e897046c5d2c..b0ebee446563 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -27,6 +27,10 @@ static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lo return val; } +#ifdef CONFIG_NUMA_AWARE_SPINLOCKS +extern void cna_configure_spin_lock_slowpath(void); +#endif + #ifdef CONFIG_PARAVIRT_SPINLOCKS extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); extern void __pv_init_lock_hash(void); diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 183d42302243..f3cb4309a8b3 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -1683,6 +1683,10 @@ void __init alternative_instructions(void) */ paravirt_set_cap(); +#if defined(CONFIG_NUMA_AWARE_SPINLOCKS) + cna_configure_spin_lock_slowpath(); +#endif + /* * First patch paravirt functions, such that we overwrite the indirect * call with the direct call. diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h index e794babc519a..3926aad129ed 100644 --- a/kernel/locking/mcs_spinlock.h +++ b/kernel/locking/mcs_spinlock.h @@ -17,7 +17,7 @@ struct mcs_spinlock { struct mcs_spinlock *next; - int locked; /* 1 if lock acquired */ + unsigned int locked; /* 1 if lock acquired */ int count; /* nesting count, see qspinlock.c */ }; diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 49c3b4c0dce8..d3f99060b60f 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -11,7 +11,7 @@ * Peter Zijlstra */ -#ifndef _GEN_PV_LOCK_SLOWPATH +#if !defined(_GEN_PV_LOCK_SLOWPATH) && !defined(_GEN_CNA_LOCK_SLOWPATH) #include #include @@ -72,7 +72,8 @@ /* * On 64-bit architectures, the mcs_spinlock structure will be 16 bytes in * size and four of them will fit nicely in one 64-byte cacheline. For - * pvqspinlock, however, we need more space for extra data. To accommodate + * pvqspinlock, however, we need more space for extra data. The same also + * applies for the NUMA-aware variant of spinlocks (CNA). To accommodate * that, we insert two more long words to pad it up to 32 bytes. IOW, only * two of them can fit in a cacheline in this case. That is OK as it is rare * to have more than 2 levels of slowpath nesting in actual use. We don't @@ -81,7 +82,7 @@ */ struct qnode { struct mcs_spinlock mcs; -#ifdef CONFIG_PARAVIRT_SPINLOCKS +#if defined(CONFIG_PARAVIRT_SPINLOCKS) || defined(CONFIG_NUMA_AWARE_SPINLOCKS) long reserved[2]; #endif }; @@ -105,6 +106,8 @@ struct qnode { * Exactly fits one 64-byte cacheline on a 64-bit architecture. * * PV doubles the storage and uses the second cacheline for PV state. + * CNA also doubles the storage and uses the second cacheline for + * CNA-specific state. */ static DEFINE_PER_CPU_ALIGNED(struct qnode, qnodes[MAX_NODES]); @@ -318,7 +321,7 @@ static __always_inline void __mcs_lock_handoff(struct mcs_spinlock *node, #define try_clear_tail __try_clear_tail #define mcs_lock_handoff __mcs_lock_handoff -#endif /* _GEN_PV_LOCK_SLOWPATH */ +#endif /* _GEN_PV_LOCK_SLOWPATH && _GEN_CNA_LOCK_SLOWPATH */ /** * queued_spin_lock_slowpath - acquire the queued spinlock @@ -594,6 +597,37 @@ void __lockfunc queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) } EXPORT_SYMBOL(queued_spin_lock_slowpath); +/* + * Generate the code for NUMA-aware spinlocks + */ +#if !defined(_GEN_CNA_LOCK_SLOWPATH) && defined(CONFIG_NUMA_AWARE_SPINLOCKS) +#define _GEN_CNA_LOCK_SLOWPATH + +#undef pv_init_node +#define pv_init_node cna_init_node + +#undef pv_wait_head_or_lock +#define pv_wait_head_or_lock cna_wait_head_or_lock + +#undef try_clear_tail +#define try_clear_tail cna_try_clear_tail + +#undef mcs_lock_handoff +#define mcs_lock_handoff cna_lock_handoff + +#undef queued_spin_lock_slowpath +/* + * defer defining queued_spin_lock_slowpath until after the include to + * avoid a name clash with the identically named field in pv_ops.lock + * (see cna_configure_spin_lock_slowpath()) + */ +#include "qspinlock_cna.h" +#define queued_spin_lock_slowpath __cna_queued_spin_lock_slowpath + +#include "qspinlock.c" + +#endif + /* * Generate the paravirt code for queued_spin_unlock_slowpath(). */ diff --git a/kernel/locking/qspinlock_cna.h b/kernel/locking/qspinlock_cna.h new file mode 100644 index 000000000000..ca564e64e5de --- /dev/null +++ b/kernel/locking/qspinlock_cna.h @@ -0,0 +1,325 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _GEN_CNA_LOCK_SLOWPATH +#error "do not include this file" +#endif + +#include + +/* + * Implement a NUMA-aware version of MCS (aka CNA, or compact NUMA-aware lock). + * + * In CNA, spinning threads are organized in two queues, a primary queue for + * threads running on the same NUMA node as the current lock holder, and a + * secondary queue for threads running on other nodes. Schematically, it + * looks like this: + * + * cna_node + * +----------+ +--------+ +--------+ + * |mcs:next | --> |mcs:next| --> ... |mcs:next| --> NULL [Primary queue] + * |mcs:locked| -. +--------+ +--------+ + * +----------+ | + * `----------------------. + * v + * +--------+ +--------+ + * |mcs:next| --> ... |mcs:next| [Secondary queue] + * +--------+ +--------+ + * ^ | + * `--------------------' + * + * N.B. locked := 1 if secondary queue is absent. Otherwise, it contains the + * encoded pointer to the tail of the secondary queue, which is organized as a + * circular list. + * + * After acquiring the MCS lock and before acquiring the spinlock, the MCS lock + * holder checks whether the next waiter in the primary queue (if exists) is + * running on the same NUMA node. If it is not, that waiter is detached from the + * main queue and moved into the tail of the secondary queue. This way, we + * gradually filter the primary queue, leaving only waiters running on the same + * preferred NUMA node. + * + * For more details, see https://arxiv.org/abs/1810.05600. + * + * Authors: Alex Kogan + * Dave Dice + */ + +struct cna_node { + struct mcs_spinlock mcs; + u16 numa_node; + u16 real_numa_node; + u32 encoded_tail; /* self */ +}; + +static void __init cna_init_nodes_per_cpu(unsigned int cpu) +{ + struct mcs_spinlock *base = per_cpu_ptr(&qnodes[0].mcs, cpu); + int numa_node = cpu_to_node(cpu); + int i; + + for (i = 0; i < MAX_NODES; i++) { + struct cna_node *cn = (struct cna_node *)grab_mcs_node(base, i); + + cn->real_numa_node = numa_node; + cn->encoded_tail = encode_tail(cpu, i); + /* + * make sure @encoded_tail is not confused with other valid + * values for @locked (0 or 1) + */ + WARN_ON(cn->encoded_tail <= 1); + } +} + +static int __init cna_init_nodes(void) +{ + unsigned int cpu; + + /* + * this will break on 32bit architectures, so we restrict + * the use of CNA to 64bit only (see arch/x86/Kconfig) + */ + BUILD_BUG_ON(sizeof(struct cna_node) > sizeof(struct qnode)); + /* we store an ecoded tail word in the node's @locked field */ + BUILD_BUG_ON(sizeof(u32) > sizeof(unsigned int)); + + for_each_possible_cpu(cpu) + cna_init_nodes_per_cpu(cpu); + + return 0; +} + +static __always_inline void cna_init_node(struct mcs_spinlock *node) +{ + struct cna_node *cn = (struct cna_node *)node; + + cn->numa_node = cn->real_numa_node; +} + +/* + * cna_splice_head -- splice the entire secondary queue onto the head of the + * primary queue. + * + * Returns the new primary head node or NULL on failure. + */ +static struct mcs_spinlock * +cna_splice_head(struct qspinlock *lock, u32 val, + struct mcs_spinlock *node, struct mcs_spinlock *next) +{ + struct mcs_spinlock *head_2nd, *tail_2nd; + u32 new; + + tail_2nd = decode_tail(node->locked); + head_2nd = tail_2nd->next; + + if (next) { + /* + * If the primary queue is not empty, the primary tail doesn't + * need to change and we can simply link the secondary tail to + * the old primary head. + */ + tail_2nd->next = next; + } else { + /* + * When the primary queue is empty, the secondary tail becomes + * the primary tail. + */ + + /* + * Speculatively break the secondary queue's circular link such + * that when the secondary tail becomes the primary tail it all + * works out. + */ + tail_2nd->next = NULL; + + /* + * tail_2nd->next = NULL; old = xchg_tail(lock, tail); + * prev = decode_tail(old); + * try_cmpxchg_release(...); WRITE_ONCE(prev->next, node); + * + * If the following cmpxchg() succeeds, our stores will not + * collide. + */ + new = ((struct cna_node *)tail_2nd)->encoded_tail | + _Q_LOCKED_VAL; + if (!atomic_try_cmpxchg_release(&lock->val, &val, new)) { + /* Restore the secondary queue's circular link. */ + tail_2nd->next = head_2nd; + return NULL; + } + } + + /* The primary queue head now is what was the secondary queue head. */ + return head_2nd; +} + +static inline bool cna_try_clear_tail(struct qspinlock *lock, u32 val, + struct mcs_spinlock *node) +{ + /* + * We're here because the primary queue is empty; check the secondary + * queue for remote waiters. + */ + if (node->locked > 1) { + struct mcs_spinlock *next; + + /* + * When there are waiters on the secondary queue, try to move + * them back onto the primary queue and let them rip. + */ + next = cna_splice_head(lock, val, node, NULL); + if (next) { + arch_mcs_lock_handoff(&next->locked, 1); + return true; + } + + return false; + } + + /* Both queues are empty. Do what MCS does. */ + return __try_clear_tail(lock, val, node); +} + +/* + * cna_splice_next -- splice the next node from the primary queue onto + * the secondary queue. + */ +static void cna_splice_next(struct mcs_spinlock *node, + struct mcs_spinlock *next, + struct mcs_spinlock *nnext) +{ + /* remove 'next' from the main queue */ + node->next = nnext; + + /* stick `next` on the secondary queue tail */ + if (node->locked <= 1) { /* if secondary queue is empty */ + /* create secondary queue */ + next->next = next; + } else { + /* add to the tail of the secondary queue */ + struct mcs_spinlock *tail_2nd = decode_tail(node->locked); + struct mcs_spinlock *head_2nd = tail_2nd->next; + + tail_2nd->next = next; + next->next = head_2nd; + } + + node->locked = ((struct cna_node *)next)->encoded_tail; +} + +/* + * cna_order_queue - check whether the next waiter in the main queue is on + * the same NUMA node as the lock holder; if not, and it has a waiter behind + * it in the main queue, move the former onto the secondary queue. + * Returns 1 if the next waiter runs on the same NUMA node; 0 otherwise. + */ +static int cna_order_queue(struct mcs_spinlock *node) +{ + struct mcs_spinlock *next = READ_ONCE(node->next); + struct cna_node *cn = (struct cna_node *)node; + int numa_node, next_numa_node; + + if (!next) + return 0; + + numa_node = cn->numa_node; + next_numa_node = ((struct cna_node *)next)->numa_node; + + if (next_numa_node != numa_node) { + struct mcs_spinlock *nnext = READ_ONCE(next->next); + + if (nnext) + cna_splice_next(node, next, nnext); + + return 0; + } + return 1; +} + +#define LOCK_IS_BUSY(lock) (atomic_read(&(lock)->val) & _Q_LOCKED_PENDING_MASK) + +/* Abuse the pv_wait_head_or_lock() hook to get some work done */ +static __always_inline u32 cna_wait_head_or_lock(struct qspinlock *lock, + struct mcs_spinlock *node) +{ + /* + * Try and put the time otherwise spent spin waiting on + * _Q_LOCKED_PENDING_MASK to use by sorting our lists. + */ + while (LOCK_IS_BUSY(lock) && !cna_order_queue(node)) + cpu_relax(); + + return 0; /* we lied; we didn't wait, go do so now */ +} + +static inline void cna_lock_handoff(struct mcs_spinlock *node, + struct mcs_spinlock *next) +{ + u32 val = 1; + + if (node->locked > 1) { + struct cna_node *cn = (struct cna_node *)node; + + val = node->locked; /* preseve secondary queue */ + + /* + * We have a local waiter, either real or fake one; + * reload @next in case it was changed by cna_order_queue(). + */ + next = node->next; + + /* + * Pass over NUMA node id of primary queue, to maintain the + * preference even if the next waiter is on a different node. + */ + ((struct cna_node *)next)->numa_node = cn->numa_node; + } + + arch_mcs_lock_handoff(&next->locked, val); +} + +/* + * Constant (boot-param configurable) flag selecting the NUMA-aware variant + * of spinlock. Possible values: -1 (off) / 0 (auto, default) / 1 (on). + */ +static int numa_spinlock_flag; + +static int __init numa_spinlock_setup(char *str) +{ + if (!strcmp(str, "auto")) { + numa_spinlock_flag = 0; + return 1; + } else if (!strcmp(str, "on")) { + numa_spinlock_flag = 1; + return 1; + } else if (!strcmp(str, "off")) { + numa_spinlock_flag = -1; + return 1; + } + + return 0; +} +__setup("numa_spinlock=", numa_spinlock_setup); + +void __cna_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); + +/* + * Switch to the NUMA-friendly slow path for spinlocks when we have + * multiple NUMA nodes in native environment, unless the user has + * overridden this default behavior by setting the numa_spinlock flag. + */ +void __init cna_configure_spin_lock_slowpath(void) +{ + + if (numa_spinlock_flag < 0) + return; + + if (numa_spinlock_flag == 0 && (nr_node_ids < 2 || + pv_ops.lock.queued_spin_lock_slowpath != + native_queued_spin_lock_slowpath)) + return; + + cna_init_nodes(); + + pv_ops.lock.queued_spin_lock_slowpath = __cna_queued_spin_lock_slowpath; + + pr_info("Enabling CNA spinlock\n"); +} -- Gitee From e8260db064472d462c5d4a69dc18ce9d05fbf7ac Mon Sep 17 00:00:00 2001 From: Alex Kogan Date: Fri, 14 May 2021 16:07:41 -0400 Subject: [PATCH 2036/2138] locking/qspinlock: Introduce starvation avoidance into CNA ANBZ: #13056 cherry-picked from https://lore.kernel.org/all/20210514200743.3026725-3-alex.kogan@oracle.com/ Keep track of the time the thread at the head of the secondary queue has been waiting, and force inter-node handoff once this time passes a preset threshold. The default value for the threshold (1ms) can be overridden with the new kernel boot command-line option "qspinlock.numa_spinlock_threshold_ns". Signed-off-by: Alex Kogan Reviewed-by: Steve Sistare Reviewed-by: Waiman Long Signed-off-by: Kong Yingqiao Reviewed-by: Cruz Zhao Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4406 --- .../admin-guide/kernel-parameters.txt | 8 ++ kernel/locking/qspinlock_cna.h | 81 +++++++++++++++---- 2 files changed, 73 insertions(+), 16 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 84d58b9d0980..181971c1d4e0 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -4739,6 +4739,14 @@ [KNL] Number of legacy pty's. Overwrites compiled-in default number. + qspinlock.numa_spinlock_threshold_ns= [NUMA, PV_OPS] + Set the time threshold in nanoseconds for the + number of intra-node lock hand-offs before the + NUMA-aware spinlock is forced to be passed to + a thread on another NUMA node. Smaller values + result in a more fair, but less performant spinlock, + and vice versa. The default value is 1000000 (=1ms). + quiet [KNL] Disable most log messages r128= [HW,DRM] diff --git a/kernel/locking/qspinlock_cna.h b/kernel/locking/qspinlock_cna.h index ca564e64e5de..0b991c340fb1 100644 --- a/kernel/locking/qspinlock_cna.h +++ b/kernel/locking/qspinlock_cna.h @@ -4,6 +4,8 @@ #endif #include +#include +#include /* * Implement a NUMA-aware version of MCS (aka CNA, or compact NUMA-aware lock). @@ -37,19 +39,39 @@ * gradually filter the primary queue, leaving only waiters running on the same * preferred NUMA node. * + * We change the NUMA node preference after a waiter at the head of the + * secondary queue spins for a certain amount of time (1ms, by default). + * We do that by flushing the secondary queue into the head of the primary queue, + * effectively changing the preference to the NUMA node of the waiter at the head + * of the secondary queue at the time of the flush. + * * For more details, see https://arxiv.org/abs/1810.05600. * * Authors: Alex Kogan * Dave Dice */ +#define FLUSH_SECONDARY_QUEUE 1 + struct cna_node { struct mcs_spinlock mcs; u16 numa_node; u16 real_numa_node; u32 encoded_tail; /* self */ + u64 start_time; }; +static ulong numa_spinlock_threshold_ns = 1000000; /* 1ms, by default */ +module_param(numa_spinlock_threshold_ns, ulong, 0644); + +static inline bool intra_node_threshold_reached(struct cna_node *cn) +{ + u64 current_time = local_clock(); + u64 threshold = cn->start_time + numa_spinlock_threshold_ns; + + return current_time > threshold; +} + static void __init cna_init_nodes_per_cpu(unsigned int cpu) { struct mcs_spinlock *base = per_cpu_ptr(&qnodes[0].mcs, cpu); @@ -92,6 +114,7 @@ static __always_inline void cna_init_node(struct mcs_spinlock *node) struct cna_node *cn = (struct cna_node *)node; cn->numa_node = cn->real_numa_node; + cn->start_time = 0; } /* @@ -191,8 +214,14 @@ static void cna_splice_next(struct mcs_spinlock *node, /* stick `next` on the secondary queue tail */ if (node->locked <= 1) { /* if secondary queue is empty */ + struct cna_node *cn = (struct cna_node *)node; + /* create secondary queue */ next->next = next; + + cn->start_time = local_clock(); + /* secondary queue is not empty iff start_time != 0 */ + WARN_ON(!cn->start_time); } else { /* add to the tail of the secondary queue */ struct mcs_spinlock *tail_2nd = decode_tail(node->locked); @@ -240,12 +269,18 @@ static int cna_order_queue(struct mcs_spinlock *node) static __always_inline u32 cna_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node) { - /* - * Try and put the time otherwise spent spin waiting on - * _Q_LOCKED_PENDING_MASK to use by sorting our lists. - */ - while (LOCK_IS_BUSY(lock) && !cna_order_queue(node)) - cpu_relax(); + struct cna_node *cn = (struct cna_node *)node; + + if (!cn->start_time || !intra_node_threshold_reached(cn)) { + /* + * Try and put the time otherwise spent spin waiting on + * _Q_LOCKED_PENDING_MASK to use by sorting our lists. + */ + while (LOCK_IS_BUSY(lock) && !cna_order_queue(node)) + cpu_relax(); + } else { + cn->start_time = FLUSH_SECONDARY_QUEUE; + } return 0; /* we lied; we didn't wait, go do so now */ } @@ -253,24 +288,38 @@ static __always_inline u32 cna_wait_head_or_lock(struct qspinlock *lock, static inline void cna_lock_handoff(struct mcs_spinlock *node, struct mcs_spinlock *next) { + struct cna_node *cn = (struct cna_node *)node; u32 val = 1; - if (node->locked > 1) { - struct cna_node *cn = (struct cna_node *)node; + if (cn->start_time != FLUSH_SECONDARY_QUEUE) { + if (node->locked > 1) { + val = node->locked; /* preseve secondary queue */ + + /* + * We have a local waiter, either real or fake one; + * reload @next in case it was changed by cna_order_queue(). + */ + next = node->next; - val = node->locked; /* preseve secondary queue */ + /* + * Pass over NUMA node id of primary queue, to maintain the + * preference even if the next waiter is on a different node. + */ + ((struct cna_node *)next)->numa_node = cn->numa_node; + ((struct cna_node *)next)->start_time = cn->start_time; + } + } else { /* - * We have a local waiter, either real or fake one; - * reload @next in case it was changed by cna_order_queue(). + * We decided to flush the secondary queue; + * this can only happen if that queue is not empty. */ - next = node->next; - + WARN_ON(node->locked <= 1); /* - * Pass over NUMA node id of primary queue, to maintain the - * preference even if the next waiter is on a different node. + * Splice the secondary queue onto the primary queue and pass the lock + * to the longest waiting remote waiter. */ - ((struct cna_node *)next)->numa_node = cn->numa_node; + next = cna_splice_head(NULL, 0, node, next); } arch_mcs_lock_handoff(&next->locked, val); -- Gitee From 376f64e4e8bf1171b7bc0c29cfba168090aed3f0 Mon Sep 17 00:00:00 2001 From: Alex Kogan Date: Fri, 14 May 2021 16:07:42 -0400 Subject: [PATCH 2037/2138] locking/qspinlock: Avoid moving certain threads between waiting queues in CNA ANBZ: #13056 cherry-picked from https://lore.kernel.org/all/20210514200743.3026725-3-alex.kogan@oracle.com/ Prohibit moving certain threads (e.g., in irq and nmi contexts) to the secondary queue. Those prioritized threads will always stay in the primary queue, and so will have a shorter wait time for the lock. Signed-off-by: Alex Kogan Reviewed-by: Steve Sistare Reviewed-by: Waiman Long Signed-off-by: Kong Yingqiao Reviewed-by: Cruz Zhao Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4406 --- kernel/locking/qspinlock_cna.h | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/kernel/locking/qspinlock_cna.h b/kernel/locking/qspinlock_cna.h index 0b991c340fb1..ffc5c3301f0f 100644 --- a/kernel/locking/qspinlock_cna.h +++ b/kernel/locking/qspinlock_cna.h @@ -6,6 +6,7 @@ #include #include #include +#include /* * Implement a NUMA-aware version of MCS (aka CNA, or compact NUMA-aware lock). @@ -37,7 +38,8 @@ * running on the same NUMA node. If it is not, that waiter is detached from the * main queue and moved into the tail of the secondary queue. This way, we * gradually filter the primary queue, leaving only waiters running on the same - * preferred NUMA node. + * preferred NUMA node. Note that certain priortized waiters (e.g., in + * irq and nmi contexts) are excluded from being moved to the secondary queue. * * We change the NUMA node preference after a waiter at the head of the * secondary queue spins for a certain amount of time (1ms, by default). @@ -53,6 +55,8 @@ #define FLUSH_SECONDARY_QUEUE 1 +#define CNA_PRIORITY_NODE 0xffff + struct cna_node { struct mcs_spinlock mcs; u16 numa_node; @@ -111,9 +115,10 @@ static int __init cna_init_nodes(void) static __always_inline void cna_init_node(struct mcs_spinlock *node) { + bool priority = !in_task() || irqs_disabled() || rt_task(current); struct cna_node *cn = (struct cna_node *)node; - cn->numa_node = cn->real_numa_node; + cn->numa_node = priority ? CNA_PRIORITY_NODE : cn->real_numa_node; cn->start_time = 0; } @@ -252,7 +257,7 @@ static int cna_order_queue(struct mcs_spinlock *node) numa_node = cn->numa_node; next_numa_node = ((struct cna_node *)next)->numa_node; - if (next_numa_node != numa_node) { + if (next_numa_node != numa_node && next_numa_node != CNA_PRIORITY_NODE) { struct mcs_spinlock *nnext = READ_ONCE(next->next); if (nnext) @@ -272,6 +277,13 @@ static __always_inline u32 cna_wait_head_or_lock(struct qspinlock *lock, struct cna_node *cn = (struct cna_node *)node; if (!cn->start_time || !intra_node_threshold_reached(cn)) { + /* + * We are at the head of the wait queue, no need to use + * the fake NUMA node ID. + */ + if (cn->numa_node == CNA_PRIORITY_NODE) + cn->numa_node = cn->real_numa_node; + /* * Try and put the time otherwise spent spin waiting on * _Q_LOCKED_PENDING_MASK to use by sorting our lists. -- Gitee From dbfeb71463709e332f082187609d4c75287a3e23 Mon Sep 17 00:00:00 2001 From: Alex Kogan Date: Fri, 14 May 2021 16:07:43 -0400 Subject: [PATCH 2038/2138] locking/qspinlock: Introduce the shuffle reduction optimization into CNA ANBZ: #13056 cherry-picked from https://lore.kernel.org/all/20210514200743.3026725-3-alex.kogan@oracle.com/ This performance optimization chooses probabilistically to avoid moving threads from the main queue into the secondary one when the secondary queue is empty. It is helpful when the lock is only lightly contended. In particular, it makes CNA less eager to create a secondary queue, but does not introduce any extra delays for threads waiting in that queue once it is created. Signed-off-by: Alex Kogan Reviewed-by: Steve Sistare Reviewed-by: Waiman Long Signed-off-by: Kong Yingqiao Reviewed-by: Cruz Zhao Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4406 --- kernel/locking/qspinlock_cna.h | 39 ++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/kernel/locking/qspinlock_cna.h b/kernel/locking/qspinlock_cna.h index ffc5c3301f0f..17d56c739e57 100644 --- a/kernel/locking/qspinlock_cna.h +++ b/kernel/locking/qspinlock_cna.h @@ -7,6 +7,7 @@ #include #include #include +#include /* * Implement a NUMA-aware version of MCS (aka CNA, or compact NUMA-aware lock). @@ -76,6 +77,34 @@ static inline bool intra_node_threshold_reached(struct cna_node *cn) return current_time > threshold; } +/* + * Controls the probability for enabling the ordering of the main queue + * when the secondary queue is empty. The chosen value reduces the amount + * of unnecessary shuffling of threads between the two waiting queues + * when the contention is low, while responding fast enough and enabling + * the shuffling when the contention is high. + */ +#define SHUFFLE_REDUCTION_PROB_ARG (7) + +/* Per-CPU pseudo-random number seed */ +static DEFINE_PER_CPU(u32, seed); + +/* + * Return false with probability 1 / 2^@num_bits. + * Intuitively, the larger @num_bits the less likely false is to be returned. + * @num_bits must be a number between 0 and 31. + */ +static bool probably(unsigned int num_bits) +{ + u32 s; + + s = this_cpu_read(seed); + s = next_pseudo_random32(s); + this_cpu_write(seed, s); + + return s & ((1 << num_bits) - 1); +} + static void __init cna_init_nodes_per_cpu(unsigned int cpu) { struct mcs_spinlock *base = per_cpu_ptr(&qnodes[0].mcs, cpu); @@ -276,6 +305,16 @@ static __always_inline u32 cna_wait_head_or_lock(struct qspinlock *lock, { struct cna_node *cn = (struct cna_node *)node; + if (node->locked <= 1 && probably(SHUFFLE_REDUCTION_PROB_ARG)) { + /* + * When the secondary queue is empty, skip the calls to + * cna_order_queue() below with high probability. This optimization + * reduces the overhead of unnecessary shuffling of threads + * between waiting queues when the lock is only lightly contended. + */ + return 0; + } + if (!cn->start_time || !intra_node_threshold_reached(cn)) { /* * We are at the head of the wait queue, no need to use -- Gitee From e4e097c9d3f83b3631aacad1f775d91c38cde341 Mon Sep 17 00:00:00 2001 From: Kong Yingqiao Date: Fri, 3 Jan 2025 13:55:10 +0800 Subject: [PATCH 2039/2138] anolis: qspinlock: Disable CNA by default ANBZ: #13056 Disable CNA by default, this default behavior can be overridden with the kernel boot command-line option "numa_spinlock=on/off/auto". Signed-off-by: Kong Yingqiao Reviewed-by: Cruz Zhao Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4406 --- Documentation/admin-guide/kernel-parameters.txt | 6 ++++-- kernel/locking/qspinlock_cna.h | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 181971c1d4e0..11b6b92dcd5f 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -4044,12 +4044,14 @@ numa_spinlock= [NUMA, PV_OPS] Select the NUMA-aware variant of spinlock. The options are: auto - Enable this variant if running on a multi-node - machine in native environment. + machine in native environment. (Under this option, if + paravirt spinlock is already enabled, this variant will + not be enabled.) on - Unconditionally enable this variant. off - Unconditionally disable this variant. Not specifying this option is equivalent to - numa_spinlock=auto. + numa_spinlock=off. numa_zonelist_order= [KNL, BOOT] Select zonelist order for NUMA. 'node', 'default' can be specified diff --git a/kernel/locking/qspinlock_cna.h b/kernel/locking/qspinlock_cna.h index 17d56c739e57..2d834bc8d7dd 100644 --- a/kernel/locking/qspinlock_cna.h +++ b/kernel/locking/qspinlock_cna.h @@ -380,7 +380,7 @@ static inline void cna_lock_handoff(struct mcs_spinlock *node, * Constant (boot-param configurable) flag selecting the NUMA-aware variant * of spinlock. Possible values: -1 (off) / 0 (auto, default) / 1 (on). */ -static int numa_spinlock_flag; +static int numa_spinlock_flag = -1; static int __init numa_spinlock_setup(char *str) { -- Gitee From c8d3305e6c86165812e9e4e2a624d466b5c726d3 Mon Sep 17 00:00:00 2001 From: Kong Yingqiao Date: Fri, 3 Jan 2025 14:34:32 +0800 Subject: [PATCH 2040/2138] anolis: configs: Enable CONFIG_NUMA_AWARE_SPINLOCKS on x86 ANBZ: #13056 Enable CONFIG_NUMA_AWARE_SPINLOCKS on x86. The spinlock has numa-aware ability if configure numa_spinlock=on/auto at the same time. Signed-off-by: Kong Yingqiao Reviewed-by: Cruz Zhao Reviewed-by: Artie Ding Link: https://gitee.com/anolis/cloud-kernel/pulls/4406 --- anolis/configs/L1-RECOMMEND/x86/CONFIG_NUMA_AWARE_SPINLOCKS | 1 + 1 file changed, 1 insertion(+) create mode 100644 anolis/configs/L1-RECOMMEND/x86/CONFIG_NUMA_AWARE_SPINLOCKS diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_NUMA_AWARE_SPINLOCKS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NUMA_AWARE_SPINLOCKS new file mode 100644 index 000000000000..cec88812022a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/x86/CONFIG_NUMA_AWARE_SPINLOCKS @@ -0,0 +1 @@ +CONFIG_NUMA_AWARE_SPINLOCKS=y -- Gitee From ddd445494a688609124e1c73cc894c47255baae4 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Tue, 27 Aug 2019 13:54:48 +0800 Subject: [PATCH 2041/2138] anolis: memcg: Introduce memory.wmark_min_adj ANBZ: #12209 OpenAnolis Bug Tracker: 0000482 In co-location environment, there are more or less some memory overcommitment, then BATCH tasks may break the shared global min watermark resulting in all types of applications falling into the direct reclaim slow path hurting the RT of LS tasks. (NOTE: BATCH tasks tolerate big latency spike even in seconds as long as doesn't hurt its overal throughput. While LS tasks are very Latency-Sensitive, they may time out or fail in case of sudden latency spike lasts like hundreds of ms typically.) Actually BATCH tasks are not sensitive to memory latency, they can be assigned a strict min watermark which is different from that of LS tasks(which can be aissgned a lenient min watermark accordingly), thus isolating each other in case of global memory allocation. This is kind of like the idea behind ALLOC_HARDER for rt_task(), see gfp_to_alloc_flags(). memory.wmark_min_adj stands for memcg global WMARK_MIN adjustment, it is used to realize separate min watermarks above-mentioned for memcgs, its valid value is within [-25, 50], specifically: negative value means to be relative to [0, WMARK_MIN], positive value means to be relative to [WMARK_MIN, WMARK_LOW]. For examples, -25 means "WMARK_MIN + (WMARK_MIN - 0) * (-25%)" 50 means "WMARK_MIN + (WMARK_LOW - WMARK_MIN) * 50%" Note that the minimum -25 is what ALLOC_HARDER uses which is safe for us to adopt, and the maximum 50 is one experienced value. Negative memory.wmark_min_adj means high QoS requirements, it can allocate below the global WMARK_MIN, which is kind of like the idea behind ALLOC_HARDER, see gfp_to_alloc_flags(). Positive memory.wmark_min_adj means low QoS requirements, thus when allocation broke memcg min watermark, it should trigger direct reclaim traditionally, and we trigger throttle instead to further prevent them from disturbing others. With this interface, we can assign positive values for BATCH memcgs and negative values for LS memcgs. memory.wmark_min_adj default value is 0, and inherit from its parent, Note that the final effective wmark_min_adj will consider all the hierarchical values, its value is the maximal(most conservative) wmark_min_adj along the hierarchy but excluding intermediate default values(zero). Reviewed-by: Yang Shi Reviewed-by: Gavin Shan Signed-off-by: Xunlei Pang Signed-off-by: Gang Deng Reviewed-by: Xu Yu Acked-by: Xunlei Pang Signed-off-by: Kun(llfl) Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4192 --- include/linux/memcontrol.h | 24 +++++ include/linux/resume_user_mode.h | 1 + include/linux/sched.h | 1 + mm/memcontrol.c | 171 +++++++++++++++++++++++++++++++ mm/page_alloc.c | 19 ++++ 5 files changed, 216 insertions(+) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 34a205b1b776..9346db133fd7 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -74,6 +74,8 @@ struct mem_cgroup_reclaim_cookie { unsigned int generation; }; +struct alloc_context; + #ifdef CONFIG_MEMCG #define MEM_CGROUP_ID_SHIFT 16 @@ -328,6 +330,9 @@ struct mem_cgroup { /* memory.exstat */ struct mem_cgroup_exstat_cpu __percpu *exstat_cpu; + int wmark_min_adj; /* user-set value */ + int wmark_min_eadj; /* value in effect */ + unsigned int wmark_ratio; struct work_struct wmark_work; unsigned int wmark_scale_factor; @@ -995,6 +1000,7 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, } void mem_cgroup_handle_over_high(gfp_t gfp_mask); +void mem_cgroup_wmark_min_throttle(void); unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg); @@ -1246,6 +1252,10 @@ static inline bool is_wmark_ok(struct mem_cgroup *memcg, bool high) return page_counter_read(&memcg->memory) < memcg->memory.wmark_low; } +int memcg_get_wmark_min_adj(struct task_struct *curr); +void memcg_check_wmark_min_adj(struct task_struct *curr, + struct alloc_context *ac); + #else /* CONFIG_MEMCG */ #define MEM_CGROUP_ID_SHIFT 0 @@ -1564,6 +1574,10 @@ static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask) { } +static inline void mem_cgroup_wmark_min_throttle(void) +{ +} + static inline void mem_cgroup_enter_user_fault(void) { } @@ -1699,6 +1713,16 @@ static inline bool is_wmark_ok(struct mem_cgroup *memcg, bool low) { return false; } + +static inline int memcg_get_wmark_min_adj(struct task_struct *curr) +{ + return 0; +} + +static inline void memcg_check_wmark_min_adj(struct task_struct *curr, + struct alloc_context *ac) +{ +} #endif /* CONFIG_MEMCG */ #ifdef CONFIG_ASYNC_FORK diff --git a/include/linux/resume_user_mode.h b/include/linux/resume_user_mode.h index f8f3e958e9cf..4a63bc7b4bfe 100644 --- a/include/linux/resume_user_mode.h +++ b/include/linux/resume_user_mode.h @@ -56,6 +56,7 @@ static inline void resume_user_mode_work(struct pt_regs *regs) #endif mem_cgroup_handle_over_high(GFP_KERNEL); + mem_cgroup_wmark_min_throttle(); blkcg_maybe_throttle_current(); rseq_handle_notify_resume(NULL, regs); diff --git a/include/linux/sched.h b/include/linux/sched.h index 5a0298975a06..4b92e943de0c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1512,6 +1512,7 @@ struct task_struct { /* Number of pages to reclaim on returning to userland: */ unsigned int memcg_nr_pages_over_high; + unsigned int wmark_min_throttle_ms; /* Used by memcontrol for targeted memcg charge: */ struct mem_cgroup *active_memcg; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 5e6c25f9ed38..ff12dce604ff 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -4630,6 +4631,168 @@ static ssize_t memory_wmark_scale_factor_write(struct kernfs_open_file *of, return nbytes; } +/* + * Figure out the maximal(most conservative) @wmark_min_adj along + * the hierarchy but excluding intermediate default zero, as the + * effective one. Example: + * root + * / \ + * A D + * / \ + * B C + * / \ + * E F + * + * wmark_min_adj: A -10, B -25, C 0, D 50, E -25, F 50 + * wmark_min_eadj: A -10, B -10, C 0, D 50, E -10, F 50 + */ +static void memcg_update_wmark_min_adj(struct mem_cgroup *memcg, int val) +{ + struct mem_cgroup *p; + struct mem_cgroup *iter; + + mutex_lock(&cgroup_mutex); + memcg->wmark_min_adj = val; + /* update hierarchical wmark_min_eadj, pre-order iteration */ + for_each_mem_cgroup_tree(iter, memcg) { + if (!mem_cgroup_online(iter)) + continue; + val = iter->wmark_min_adj; + p = parent_mem_cgroup(iter); + if (p && p->wmark_min_eadj && p->wmark_min_eadj > val) + val = p->wmark_min_eadj; + iter->wmark_min_eadj = val; + } + mutex_unlock(&cgroup_mutex); +} + +static int memory_wmark_min_adj_show(struct seq_file *m, void *v) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); + + /* show the final effective value */ + seq_printf(m, "%d\n", memcg->wmark_min_eadj); + + return 0; +} + +static ssize_t memory_wmark_min_adj_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); + int ret, wmark_min_adj; + + buf = strstrip(buf); + ret = kstrtoint(buf, 0, &wmark_min_adj); + if (ret) + return ret; + + if (wmark_min_adj < -25 || wmark_min_adj > 50) + return -EINVAL; + + memcg_update_wmark_min_adj(memcg, wmark_min_adj); + + return nbytes; +} + +int memcg_get_wmark_min_adj(struct task_struct *curr) +{ + struct mem_cgroup *memcg; + int val; + + if (mem_cgroup_disabled()) + return 0; + + rcu_read_lock(); + memcg = mem_cgroup_from_css(task_css(curr, memory_cgrp_id)); + if (mem_cgroup_is_root(memcg)) + val = 0; + else + val = memcg->wmark_min_eadj; + rcu_read_unlock(); + + return val; +} + +/* + * Scheduled by global page allocation to be executed from the userland + * return path and throttle when free is under memcg's global WMARK_MIN. + */ +void mem_cgroup_wmark_min_throttle(void) +{ + unsigned int msec = current->wmark_min_throttle_ms; + unsigned long pflags; + + if (likely(!msec)) + return; + psi_memstall_enter(&pflags); + msleep_interruptible(msec); + psi_memstall_leave(&pflags); + current->wmark_min_throttle_ms = 0; +} + +#define WMARK_MIN_THROTTLE_MS 100UL +/* + * Tasks in memcg having positive memory.wmark_min_adj has its + * own global min watermark higher than the global WMARK_MIN: + * "WMARK_MIN + (WMARK_LOW - WMARK_MIN) * memory.wmark_min_adj" + * + * Positive memory.wmark_min_adj means low QoS requirements. When + * allocation broke memcg min watermark, it should trigger direct + * reclaim traditionally, here trigger throttle instead to further + * prevent them from disturbing others. + * + * The throttle time is simply linearly proportional to the pages + * consumed below memcg's min watermark. + * + * The base throttle time is WMARK_MIN_THROTTLE_MS, and the maximal + * throttle time is ten times WMARK_MIN_THROTTLE_MS. + * + * The actual throttling will be executed from the userland return + * path, see mem_cgroup_wmark_min_throttle(). + */ +void memcg_check_wmark_min_adj(struct task_struct *curr, + struct alloc_context *ac) +{ + struct zoneref *z; + struct zone *zone; + unsigned long wmark_min, wmark, min_low_gap, free_pages; + int wmark_min_adj = memcg_get_wmark_min_adj(curr); + + if (wmark_min_adj <= 0) + return; + + if (curr->wmark_min_throttle_ms) + return; + + z = first_zones_zonelist(ac->zonelist, ac->highest_zoneidx, ac->nodemask); + for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, + ac->nodemask) { + if (cpusets_enabled() && + !__cpuset_zone_allowed(zone, __GFP_HARDWALL)) + continue; + + wmark_min = min_wmark_pages(zone); + min_low_gap = low_wmark_pages(zone) - wmark_min; + free_pages = zone_page_state(zone, NR_FREE_PAGES); + wmark = wmark_min + min_low_gap * wmark_min_adj / 100; + if (free_pages < wmark && wmark > wmark_min) { + unsigned long msec; + + /* + * The throttle time is simply linearly proportional + * to the pages consumed below memcg's min watermark. + */ + msec = (wmark - free_pages) * WMARK_MIN_THROTTLE_MS / + (wmark - wmark_min); + msec = clamp(msec, 1UL, 10 * WMARK_MIN_THROTTLE_MS); + curr->wmark_min_throttle_ms = msec; + set_notify_resume(curr); + break; + } + } +} + static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) { struct mem_cgroup_threshold_ary *t; @@ -5647,6 +5810,12 @@ static struct cftype mem_cgroup_legacy_files[] = { .seq_show = memory_wmark_scale_factor_show, .write = memory_wmark_scale_factor_write, }, + { + .name = "wmark_min_adj", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = memory_wmark_min_adj_show, + .write = memory_wmark_min_adj_write, + }, { .name = "force_empty", .write = mem_cgroup_force_empty_write, @@ -6042,6 +6211,8 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent)); WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable)); WRITE_ONCE(memcg->wmark_ratio, READ_ONCE(parent->wmark_ratio)); + WRITE_ONCE(memcg->wmark_min_adj, READ_ONCE(parent->wmark_min_adj)); + WRITE_ONCE(memcg->wmark_min_eadj, READ_ONCE(parent->wmark_min_eadj)); memcg->reap_background = parent->reap_background; /* Default gap is 0.5% max limit */ memcg->wmark_scale_factor = parent->wmark_scale_factor ? diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4331ed98a5fe..7955e5c22f70 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2888,6 +2888,14 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, long min = mark; int o; + /* apply negative memory.wmark_min_adj */ + if ((alloc_flags & ALLOC_WMARK_MASK) == ALLOC_WMARK_MIN) { + int min_adj = memcg_get_wmark_min_adj(current); + + if (min_adj < 0) + min -= mark * (-min_adj) / 100; + } + /* free_pages may go negative - that's OK */ free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags); @@ -2920,6 +2928,13 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, min -= min / 2; } + /* + * Only happens due to memory.wmark_min_adj. + * Guarantee safe min after memory.wmark_min_adj? + */ + if (min < mark / 4) + min = mark / 4; + /* * Check watermarks for an order-0 allocation request. If these * are not met, then a high-order request also cannot go ahead @@ -4218,6 +4233,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, warn_alloc(gfp_mask, ac->nodemask, "page allocation failure: order:%u", order); got_pg: + + if (ac->migratetype == MIGRATE_MOVABLE) + memcg_check_wmark_min_adj(current, ac); + return page; } -- Gitee From c7b0bf1a201239e0f951153ff4dc156323d5ff84 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Sun, 1 Sep 2019 19:10:49 +0800 Subject: [PATCH 2042/2138] anolis: memcg: Account throttled time due to memory.wmark_min_adj ANBZ: #12209 OpenAnolis Bug Tracker: 0000482 Add wmark_min_throttled_ms in the memory.exstat interface, show it on the first line for compatibility. Reviewed-by: Yang Shi Reviewed-by: Gavin Shan Signed-off-by: Xunlei Pang Signed-off-by: Gang Deng Reviewed-by: Xu Yu Acked-by: Xunlei Pang Signed-off-by: Kun(llfl) Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4192 --- include/linux/memcontrol.h | 1 + mm/memcontrol.c | 9 +++++++++ 2 files changed, 10 insertions(+) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 9346db133fd7..c8d14d42c4b6 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -44,6 +44,7 @@ enum memcg_stat_item { }; enum memcg_exstat_item { + MEMCG_WMARK_MIN, MEMCG_WMARK_RECLAIM, #ifdef CONFIG_PAGECACHE_LIMIT MEMCG_PGCACHE_RECLAIM, diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ff12dce604ff..85b8e4cbf221 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4484,6 +4484,8 @@ static int memcg_exstat_show(struct seq_file *m, void *v) { struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); + seq_printf(m, "wmark_min_throttled_ms %llu\n", + memcg_exstat_gather(memcg, MEMCG_WMARK_MIN)); seq_printf(m, "wmark_reclaim_work_ms %llu\n", memcg_exstat_gather(memcg, MEMCG_WMARK_RECLAIM) >> 20); @@ -4722,6 +4724,7 @@ void mem_cgroup_wmark_min_throttle(void) { unsigned int msec = current->wmark_min_throttle_ms; unsigned long pflags; + struct mem_cgroup *memcg, *iter; if (likely(!msec)) return; @@ -4729,6 +4732,12 @@ void mem_cgroup_wmark_min_throttle(void) msleep_interruptible(msec); psi_memstall_leave(&pflags); current->wmark_min_throttle_ms = 0; + + /* Account throttled time hierarchically, ignore premature sleep */ + memcg = get_mem_cgroup_from_mm(current->mm); + for (iter = memcg; iter; iter = parent_mem_cgroup(iter)) + __this_cpu_add(iter->exstat_cpu->item[MEMCG_WMARK_MIN], msec); + css_put(&memcg->css); } #define WMARK_MIN_THROTTLE_MS 100UL -- Gitee From 834687756d183674803b27af19b64029e95dd064 Mon Sep 17 00:00:00 2001 From: Yi Tao Date: Fri, 11 Nov 2022 12:01:57 +0800 Subject: [PATCH 2043/2138] anolis: memcg: Support memory.wmark_min_adj in cgroup v2 ANBZ: #12209 Some cloud native components are supporting cgroup v2 and depend on memory.wmark_min_adj, so support memory.wmark_min_adj in cgroup v2. Signed-off-by: Yi Tao Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/861 Signed-off-by: Kun(llfl) Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4192 --- mm/memcontrol.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 85b8e4cbf221..e965e428ac99 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -7676,6 +7676,12 @@ static struct cftype memory_files[] = { .seq_show = memory_max_show, .write = memory_max_write, }, + { + .name = "wmark_min_adj", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = memory_wmark_min_adj_show, + .write = memory_wmark_min_adj_write, + }, { .name = "priority", .flags = CFTYPE_NOT_ON_ROOT, -- Gitee From 84eaa344576e5d3693ef0228b31877842aea7033 Mon Sep 17 00:00:00 2001 From: Issac Hai Date: Fri, 30 Apr 2021 10:26:13 +0800 Subject: [PATCH 2044/2138] anolis: meminfo: reduce active and inactive from memcg usage ANBZ: #13547 When a rich container is empty, the usage might be negative because of miscalculated cached. So we reduce active and inactive from memcg usage. Signed-off-by: Issac Hai Acked-by: Xunlei Pang Signed-off-by: Xunlei Pang Signed-off-by: zhongjiang-ali Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/3003 Link: https://gitee.com/anolis/cloud-kernel/pulls/3018 Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4496 --- mm/memcontrol.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e965e428ac99..762bff3ccac9 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -8967,7 +8967,6 @@ void memcg_meminfo(struct mem_cgroup *memcg, unsigned long pagecache, memcg_wmark, swap_size; int i; - ext->cached = memcg_page_state(memcg, NR_FILE_PAGES); ext->file_dirty = memcg_page_state(memcg, NR_FILE_DIRTY); ext->writeback = memcg_page_state(memcg, NR_WRITEBACK); ext->anon_mapped = memcg_page_state(memcg, NR_ANON_MAPPED); @@ -9032,6 +9031,8 @@ void memcg_meminfo(struct mem_cgroup *memcg, ext->available = info->freeram + pagecache; ext->available += ext->slab_reclaimable - min(ext->slab_reclaimable / 2, memcg_wmark); + ext->cached = usage - ext->lrupages[LRU_INACTIVE_ANON] - + ext->lrupages[LRU_ACTIVE_ANON]; } #endif /* CONFIG_SWAP */ -- Gitee From 789a59abcd616f2e08b901c5772aa1424152c719 Mon Sep 17 00:00:00 2001 From: Xu Yu Date: Tue, 10 May 2022 13:23:48 +0800 Subject: [PATCH 2045/2138] anolis: mm: correct slab meminfo in rich container ANBZ: #13547 The slab stat, i.e., NR_SLAB_RECLAIMABLE_B and NR_SLAB_UNRECLAIMABLE_B, are stored in bytes in memcg vmstats. On the other hand, the slab stat are stored in pages in global vm_node_stat. The slab stat in pages are needed by meminfo_proc_show(). This makes the slab stat obtained from the memcg of rich container represented in pages for meminfo_proc_show(). Fixes: 2f5c25e0dad0 ("ck: meminfo: Add meminfo support for rich container") Signed-off-by: Xu Yu Reviewed-by: Gang Deng Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4496 --- mm/memcontrol.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 762bff3ccac9..c1ef72b4be74 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -8971,9 +8971,9 @@ void memcg_meminfo(struct mem_cgroup *memcg, ext->writeback = memcg_page_state(memcg, NR_WRITEBACK); ext->anon_mapped = memcg_page_state(memcg, NR_ANON_MAPPED); ext->file_mapped = memcg_page_state(memcg, NR_FILE_MAPPED); - ext->slab_reclaimable = memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B); + ext->slab_reclaimable = memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B) >> PAGE_SHIFT; ext->slab_unreclaimable = - memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B); + memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B) >> PAGE_SHIFT; ext->kernel_stack_kb = memcg_page_state(memcg, NR_KERNEL_STACK_KB); ext->writeback_temp = 0; #ifdef CONFIG_TRANSPARENT_HUGEPAGE -- Gitee From f29265866480cfeaac0f70435f993d84928bed64 Mon Sep 17 00:00:00 2001 From: Xiang Zheng Date: Wed, 25 Aug 2021 15:10:52 +0800 Subject: [PATCH 2046/2138] anolis: meminfo: Clean warnings in meminfo_proc_show() ANBZ: #11817 fix #36380966 If CONFIG_DEBUG_VM is enabled, calling global_node_page_state() with NR_SLAB_RECLAIMABLE_B or NR_SLAB_UNRECLAIMABLE_B will cause the following warning log: [ 35.563010] WARNING: CPU: 75 PID: 950 at include/linux/vmstat.h:226 meminfo_proc_show+0xbe/0xad0 [ 35.571785] Modules linked in: [ 35.592472] RIP: 0010:meminfo_proc_show+0xbe/0xad0 ... [ 35.616008] RSP: 0018:ffffc9000323bc38 EFLAGS: 00010286 [ 35.621225] RAX: 000000000019800a RBX: 0000000000000000 RCX: 0000000000000000 [ 35.628352] RDX: 000000000019800a RSI: 0000000000000000 RDI: 00000000001a1161 [ 35.635484] RBP: ffff88810394f000 R08: 0000000000000000 R09: 0000000000000040 [ 35.642614] R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000445 [ 35.649741] R13: 0000000000000076 R14: 0000000000000000 R15: 000000000019800a [ 35.656872] FS: 00007f76497e8740(0000) GS:ffff888179cc0000(0000) knlGS:0000000000000000 [ 35.664948] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 35.670686] CR2: 00007fff89a86d78 CR3: 0000000109cc6003 CR4: 0000000000770ee0 [ 35.677809] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 35.684932] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 35.692058] PKRU: 55555554 [ 35.694772] Call Trace: [ 35.697227] ? kmem_cache_alloc+0xe4/0x1f0 [ 35.701323] ? browse_rb+0x125/0x160 [ 35.704902] ? validate_mm+0xb4/0xf0 [ 35.708481] ? __vma_adjust+0x721/0xc30 [ 35.716323] ? seq_read_iter+0x351/0x440 [ 35.720241] seq_read_iter+0xff/0x440 [ 35.723898] ? mmap_region+0x22f/0x710 [ 35.727643] proc_reg_read_iter+0x3f/0x70 [ 35.731657] new_sync_read+0x10f/0x190 [ 35.735408] vfs_read+0xf1/0x180 [ 35.738631] ksys_read+0x5f/0xe0 [ 35.741858] do_syscall_64+0x2d/0x40 [ 35.745437] entry_SYSCALL_64_after_hwframe+0x44/0xa9 [ 35.750490] RIP: 0033:0x7f7648ec09a0 ... [ 35.772813] RSP: 002b:00007fff89a86c68 EFLAGS: 00000202 ORIG_RAX: 0000000000000000 [ 35.780378] RAX: ffffffffffffffda RBX: 00000000024b38e0 RCX: 00007f7648ec09a0 [ 35.787502] RDX: 0000000000000400 RSI: 00007f76497ec000 RDI: 0000000000000003 [ 35.794625] RBP: 00007f7649195380 R08: ffffffffffffffff R09: 0000000000000000 [ 35.801750] R10: 0000000000000022 R11: 0000000000000202 R12: 00007f7649194838 [ 35.808874] R13: 00007f7649194838 R14: 0000000000000d70 R15: 00007f76491955a8 [ 35.815997] ---[ end trace 3961a3972055d1e6 ]- Use global_node_page_state_pages() instead. Fixes: 258ca1c67ee6 ("ck: meminfo: Add meminfo support for rich container") Signed-off-by: Xiang Zheng Acked-by: Xunlei Pang Reviewed-by: Joseph Qi Signed-off-by: Kun(llfl) Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4100 --- fs/proc/meminfo.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index fc14e63d0c15..fa51e15ea094 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -69,9 +69,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v) ext.anon_mapped = global_node_page_state(NR_ANON_MAPPED); ext.file_mapped = global_node_page_state(NR_FILE_MAPPED); ext.slab_reclaimable = - global_node_page_state(NR_SLAB_RECLAIMABLE_B); + global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B); ext.slab_unreclaimable = - global_node_page_state(NR_SLAB_UNRECLAIMABLE_B); + global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B); ext.kernel_stack_kb = global_node_page_state(NR_KERNEL_STACK_KB); ext.writeback_temp = global_node_page_state(NR_WRITEBACK_TEMP); -- Gitee From ad0b582b5dd8f95edcf1a5545431eddf2db80781 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 14 Aug 2024 16:14:21 +0100 Subject: [PATCH 2047/2138] iov_iter: Provide copy_folio_from_iter() ANBZ: #12255 commit 197a3de607d92b3d72e69edf5470e0a8fae548cc upstream. Provide a copy_folio_from_iter() wrapper. Signed-off-by: David Howells cc: Alexander Viro cc: Christian Brauner cc: Matthew Wilcox cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org cc: linux-mm@kvack.org Link: https://lore.kernel.org/r/20240814203850.2240469-14-dhowells@redhat.com/ # v2 Signed-off-by: Christian Brauner Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4491 --- include/linux/uio.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/include/linux/uio.h b/include/linux/uio.h index 42bce38a8e87..6ff9a0728486 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -185,6 +185,12 @@ static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset, return copy_page_to_iter(&folio->page, offset, bytes, i); } +static inline size_t copy_folio_from_iter(struct folio *folio, size_t offset, + size_t bytes, struct iov_iter *i) +{ + return copy_page_from_iter(&folio->page, offset, bytes, i); +} + static inline size_t copy_folio_from_iter_atomic(struct folio *folio, size_t offset, size_t bytes, struct iov_iter *i) { -- Gitee From 9b69564ffd49247f465d535087fdcf429e0b435d Mon Sep 17 00:00:00 2001 From: Vivek Kasireddy Date: Sun, 23 Jun 2024 23:36:09 -0700 Subject: [PATCH 2048/2138] mm/gup: introduce unpin_folio/unpin_folios helpers ANBZ: #12255 commit 6cc040542ba7b2c60e5119cd04d841fcf048c872 upstream. Patch series "mm/gup: Introduce memfd_pin_folios() for pinning memfd folios", v16. Currently, some drivers (e.g, Udmabuf) that want to longterm-pin the pages/folios associated with a memfd, do so by simply taking a reference on them. This is not desirable because the pages/folios may reside in Movable zone or CMA block. Therefore, having drivers use memfd_pin_folios() API ensures that the folios are appropriately pinned via FOLL_PIN for longterm DMA. This patchset also introduces a few helpers and converts the Udmabuf driver to use folios and memfd_pin_folios() API to longterm-pin the folios for DMA. Two new Udmabuf selftests are also included to test the driver and the new API. This patch (of 9): These helpers are the folio versions of unpin_user_page/unpin_user_pages. They are currently only useful for unpinning folios pinned by memfd_pin_folios() or other associated routines. However, they could find new uses in the future, when more and more folio-only helpers are added to GUP. We should probably sanity check the folio as part of unpin similar to how it is done in unpin_user_page/unpin_user_pages but we cannot cleanly do that at the moment without also checking the subpage. Therefore, sanity checking needs to be added to these routines once we have a way to determine if any given folio is anon-exclusive (via a per folio AnonExclusive flag). Link: https://lkml.kernel.org/r/20240624063952.1572359-1-vivek.kasireddy@intel.com Link: https://lkml.kernel.org/r/20240624063952.1572359-2-vivek.kasireddy@intel.com Signed-off-by: Vivek Kasireddy Suggested-by: David Hildenbrand Reviewed-by: David Hildenbrand Acked-by: Dave Airlie Acked-by: Gerd Hoffmann Cc: Matthew Wilcox Cc: Christoph Hellwig Cc: Jason Gunthorpe Cc: Peter Xu Cc: Christoph Hellwig Cc: Daniel Vetter Cc: Dongwon Kim Cc: Hugh Dickins Cc: Junxiao Chang Cc: Oscar Salvador Cc: Arnd Bergmann Cc: Christoph Hellwig Cc: Mike Kravetz Cc: Shuah Khan Signed-off-by: Andrew Morton Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4491 --- include/linux/mm.h | 2 ++ mm/gup.c | 47 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+) diff --git a/include/linux/mm.h b/include/linux/mm.h index 787d56b35b22..2078d68167c1 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1591,11 +1591,13 @@ static inline void put_page(struct page *page) #define GUP_PIN_COUNTING_BIAS (1U << 10) void unpin_user_page(struct page *page); +void unpin_folio(struct folio *folio); void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, bool make_dirty); void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, bool make_dirty); void unpin_user_pages(struct page **pages, unsigned long npages); +void unpin_folios(struct folio **folios, unsigned long nfolios); static inline bool is_cow_mapping(vm_flags_t flags) { diff --git a/mm/gup.c b/mm/gup.c index ce217ce2a584..0e2feae1ed35 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -188,6 +188,19 @@ void unpin_user_page(struct page *page) } EXPORT_SYMBOL(unpin_user_page); +/** + * unpin_folio() - release a dma-pinned folio + * @folio: pointer to folio to be released + * + * Folios that were pinned via memfd_pin_folios() or other similar routines + * must be released either using unpin_folio() or unpin_folios(). + */ +void unpin_folio(struct folio *folio) +{ + gup_put_folio(folio, 1, FOLL_PIN); +} +EXPORT_SYMBOL_GPL(unpin_folio); + /** * folio_add_pin - Try to get an additional pin on a pinned folio * @folio: The folio to be pinned @@ -400,6 +413,40 @@ void unpin_user_pages(struct page **pages, unsigned long npages) } EXPORT_SYMBOL(unpin_user_pages); +/** + * unpin_folios() - release an array of gup-pinned folios. + * @folios: array of folios to be marked dirty and released. + * @nfolios: number of folios in the @folios array. + * + * For each folio in the @folios array, release the folio using gup_put_folio. + * + * Please see the unpin_folio() documentation for details. + */ +void unpin_folios(struct folio **folios, unsigned long nfolios) +{ + unsigned long i = 0, j; + + /* + * If this WARN_ON() fires, then the system *might* be leaking folios + * (by leaving them pinned), but probably not. More likely, gup/pup + * returned a hard -ERRNO error to the caller, who erroneously passed + * it here. + */ + if (WARN_ON(IS_ERR_VALUE(nfolios))) + return; + + while (i < nfolios) { + for (j = i + 1; j < nfolios; j++) + if (folios[i] != folios[j]) + break; + + if (folios[i]) + gup_put_folio(folios[i], j - i, FOLL_PIN); + i = j; + } +} +EXPORT_SYMBOL_GPL(unpin_folios); + /* * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's * lifecycle. Avoid setting the bit unless necessary, or it might cause write -- Gitee From cc3c6002aa8172259e637c45f5c2e0cc3b5f0d77 Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Thu, 24 Oct 2024 10:17:57 -0700 Subject: [PATCH 2049/2138] fuse: support folios in struct fuse_args_pages and fuse_copy_pages() ANBZ: #12255 commit a669c2df36db5fa7a2674ec5ae10548760702f99 upstream. This adds support in struct fuse_args_pages and fuse_copy_pages() for using folios instead of pages for transferring data. Both folios and pages must be supported right now in struct fuse_args_pages and fuse_copy_pages() until all request types have been converted to use folios. Once all have been converted, then struct fuse_args_pages and fuse_copy_pages() will only support folios. Right now in fuse, all folios are one page (large folios are not yet supported). As such, copying folio->page is sufficient for copying the entire folio in fuse_copy_pages(). No functional changes. Signed-off-by: Joanne Koong Reviewed-by: Josef Bacik Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4491 --- fs/fuse/dev.c | 40 ++++++++++++++++++++++++++++++++-------- fs/fuse/fuse_i.h | 22 +++++++++++++++++++--- 2 files changed, 51 insertions(+), 11 deletions(-) diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 0edce7ecfce4..fecf22b1f37e 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -1001,17 +1001,41 @@ static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes, struct fuse_req *req = cs->req; struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args); + if (ap->uses_folios) { + for (i = 0; i < ap->num_folios && (nbytes || zeroing); i++) { + int err; + unsigned int offset = ap->folio_descs[i].offset; + unsigned int count = min(nbytes, ap->folio_descs[i].length); + struct page *orig, *pagep; - for (i = 0; i < ap->num_pages && (nbytes || zeroing); i++) { - int err; - unsigned int offset = ap->descs[i].offset; - unsigned int count = min(nbytes, ap->descs[i].length); + orig = pagep = &ap->folios[i]->page; - err = fuse_copy_page(cs, &ap->pages[i], offset, count, zeroing); - if (err) - return err; + err = fuse_copy_page(cs, &pagep, offset, count, zeroing); + if (err) + return err; + + nbytes -= count; + + /* + * fuse_copy_page may have moved a page from a pipe + * instead of copying into our given page, so update + * the folios if it was replaced. + */ + if (pagep != orig) + ap->folios[i] = page_folio(pagep); + } + } else { + for (i = 0; i < ap->num_pages && (nbytes || zeroing); i++) { + int err; + unsigned int offset = ap->descs[i].offset; + unsigned int count = min(nbytes, ap->descs[i].length); - nbytes -= count; + err = fuse_copy_page(cs, &ap->pages[i], offset, count, zeroing); + if (err) + return err; + + nbytes -= count; + } } return 0; } diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index a46d69ba0110..43da1bbb4e55 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -272,6 +272,12 @@ struct fuse_page_desc { unsigned int offset; }; +/** FUSE folio descriptor */ +struct fuse_folio_desc { + unsigned int length; + unsigned int offset; +}; + struct fuse_args { uint64_t nodeid; uint32_t opcode; @@ -300,9 +306,19 @@ struct fuse_args { struct fuse_args_pages { struct fuse_args args; - struct page **pages; - struct fuse_page_desc *descs; - unsigned int num_pages; + union { + struct { + struct page **pages; + struct fuse_page_desc *descs; + unsigned int num_pages; + }; + struct { + struct folio **folios; + struct fuse_folio_desc *folio_descs; + unsigned int num_folios; + }; + }; + bool uses_folios; }; #define FUSE_ARGS(args) struct fuse_args args = {} -- Gitee From 5a3e7acd1c469220ff101c75eeeea0a0b73648dd Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Thu, 24 Oct 2024 10:17:58 -0700 Subject: [PATCH 2050/2138] fuse: add support in virtio for requests using folios ANBZ: #12255 commit 29279e1d4284a29cdd4af11e9a19800b8fda2962 upstream. Until all requests have been converted to use folios instead of pages, virtio will need to support both types. Once all requests have been converted, then virtio will support just folios. No functional changes. Signed-off-by: Joanne Koong Reviewed-by: Josef Bacik Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4491 --- fs/fuse/virtio_fs.c | 87 +++++++++++++++++++++++++++++---------------- 1 file changed, 56 insertions(+), 31 deletions(-) diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index 00c5ac948aba..4d2ddc831e7a 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -766,6 +766,7 @@ static void virtio_fs_request_complete(struct fuse_req *req, struct fuse_args_pages *ap; unsigned int len, i, thislen; struct page *page; + struct folio *folio; /* * TODO verify that server properly follows FUSE protocol @@ -777,15 +778,29 @@ static void virtio_fs_request_complete(struct fuse_req *req, if (args->out_pages && args->page_zeroing) { len = args->out_args[args->out_numargs - 1].size; ap = container_of(args, typeof(*ap), args); - for (i = 0; i < ap->num_pages; i++) { - thislen = ap->descs[i].length; - if (len < thislen) { - WARN_ON(ap->descs[i].offset); - page = ap->pages[i]; - zero_user_segment(page, len, thislen); - len = 0; - } else { - len -= thislen; + if (ap->uses_folios) { + for (i = 0; i < ap->num_folios; i++) { + thislen = ap->folio_descs[i].length; + if (len < thislen) { + WARN_ON(ap->folio_descs[i].offset); + folio = ap->folios[i]; + folio_zero_segment(folio, len, thislen); + len = 0; + } else { + len -= thislen; + } + } + } else { + for (i = 0; i < ap->num_pages; i++) { + thislen = ap->descs[i].length; + if (len < thislen) { + WARN_ON(ap->descs[i].offset); + page = ap->pages[i]; + zero_user_segment(page, len, thislen); + len = 0; + } else { + len -= thislen; + } } } } @@ -1271,16 +1286,22 @@ static void virtio_fs_send_interrupt(struct fuse_iqueue *fiq, struct fuse_req *r } /* Count number of scatter-gather elements required */ -static unsigned int sg_count_fuse_pages(struct fuse_page_desc *page_descs, - unsigned int num_pages, - unsigned int total_len) +static unsigned int sg_count_fuse_pages(struct fuse_args_pages *ap, + unsigned int total_len) { unsigned int i; unsigned int this_len; - for (i = 0; i < num_pages && total_len; i++) { - this_len = min(page_descs[i].length, total_len); - total_len -= this_len; + if (ap->uses_folios) { + for (i = 0; i < ap->num_folios && total_len; i++) { + this_len = min(ap->folio_descs[i].length, total_len); + total_len -= this_len; + } + } else { + for (i = 0; i < ap->num_pages && total_len; i++) { + this_len = min(ap->descs[i].length, total_len); + total_len -= this_len; + } } return i; @@ -1298,8 +1319,7 @@ static unsigned int sg_count_fuse_req(struct fuse_req *req) if (args->in_pages) { size = args->in_args[args->in_numargs - 1].size; - total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages, - size); + total_sgs += sg_count_fuse_pages(ap, size); } if (!test_bit(FR_ISREPLY, &req->flags)) @@ -1312,28 +1332,35 @@ static unsigned int sg_count_fuse_req(struct fuse_req *req) if (args->out_pages) { size = args->out_args[args->out_numargs - 1].size; - total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages, - size); + total_sgs += sg_count_fuse_pages(ap, size); } return total_sgs; } -/* Add pages to scatter-gather list and return number of elements used */ +/* Add pages/folios to scatter-gather list and return number of elements used */ static unsigned int sg_init_fuse_pages(struct scatterlist *sg, - struct page **pages, - struct fuse_page_desc *page_descs, - unsigned int num_pages, + struct fuse_args_pages *ap, unsigned int total_len) { unsigned int i; unsigned int this_len; - for (i = 0; i < num_pages && total_len; i++) { - sg_init_table(&sg[i], 1); - this_len = min(page_descs[i].length, total_len); - sg_set_page(&sg[i], pages[i], this_len, page_descs[i].offset); - total_len -= this_len; + if (ap->uses_folios) { + for (i = 0; i < ap->num_folios && total_len; i++) { + sg_init_table(&sg[i], 1); + this_len = min(ap->folio_descs[i].length, total_len); + sg_set_folio(&sg[i], ap->folios[i], this_len, + ap->folio_descs[i].offset); + total_len -= this_len; + } + } else { + for (i = 0; i < ap->num_pages && total_len; i++) { + sg_init_table(&sg[i], 1); + this_len = min(ap->descs[i].length, total_len); + sg_set_page(&sg[i], ap->pages[i], this_len, ap->descs[i].offset); + total_len -= this_len; + } } return i; @@ -1357,9 +1384,7 @@ static unsigned int sg_init_fuse_args(struct scatterlist *sg, sg_init_one(&sg[total_sgs++], argbuf, len); if (argpages) - total_sgs += sg_init_fuse_pages(&sg[total_sgs], - ap->pages, ap->descs, - ap->num_pages, + total_sgs += sg_init_fuse_pages(&sg[total_sgs], ap, args[numargs - 1].size); if (len_used) -- Gitee From 4571273285f2fa442d3925c7e074e56c5a2dc1c8 Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Thu, 24 Oct 2024 10:17:59 -0700 Subject: [PATCH 2051/2138] fuse: convert cuse to use folios ANBZ: #12255 commit ee80369a8aa850a992e93127bd16023fe1425010 upstream. Convert cuse requests to use a folio instead of a page. No functional changes. Signed-off-by: Joanne Koong Reviewed-by: Josef Bacik Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4491 --- fs/fuse/cuse.c | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c index b6cad106c37e..6018af98dd08 100644 --- a/fs/fuse/cuse.c +++ b/fs/fuse/cuse.c @@ -303,8 +303,8 @@ struct cuse_init_args { struct fuse_args_pages ap; struct cuse_init_in in; struct cuse_init_out out; - struct page *page; - struct fuse_page_desc desc; + struct folio *folio; + struct fuse_folio_desc desc; }; /** @@ -322,7 +322,7 @@ static void cuse_process_init_reply(struct fuse_mount *fm, struct fuse_args_pages *ap = &ia->ap; struct cuse_conn *cc = fc_to_cc(fc), *pos; struct cuse_init_out *arg = &ia->out; - struct page *page = ap->pages[0]; + struct folio *folio = ap->folios[0]; struct cuse_devinfo devinfo = { }; struct device *dev; struct cdev *cdev; @@ -339,7 +339,7 @@ static void cuse_process_init_reply(struct fuse_mount *fm, /* parse init reply */ cc->unrestricted_ioctl = arg->flags & CUSE_UNRESTRICTED_IOCTL; - rc = cuse_parse_devinfo(page_address(page), ap->args.out_args[1].size, + rc = cuse_parse_devinfo(folio_address(folio), ap->args.out_args[1].size, &devinfo); if (rc) goto err; @@ -407,7 +407,7 @@ static void cuse_process_init_reply(struct fuse_mount *fm, kobject_uevent(&dev->kobj, KOBJ_ADD); out: kfree(ia); - __free_page(page); + folio_put(folio); return; err_cdev: @@ -425,7 +425,7 @@ static void cuse_process_init_reply(struct fuse_mount *fm, static int cuse_send_init(struct cuse_conn *cc) { int rc; - struct page *page; + struct folio *folio; struct fuse_mount *fm = &cc->fm; struct cuse_init_args *ia; struct fuse_args_pages *ap; @@ -433,13 +433,14 @@ static int cuse_send_init(struct cuse_conn *cc) BUILD_BUG_ON(CUSE_INIT_INFO_MAX > PAGE_SIZE); rc = -ENOMEM; - page = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!page) + + folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, 0); + if (!folio) goto err; ia = kzalloc(sizeof(*ia), GFP_KERNEL); if (!ia) - goto err_free_page; + goto err_free_folio; ap = &ia->ap; ia->in.major = FUSE_KERNEL_VERSION; @@ -455,18 +456,19 @@ static int cuse_send_init(struct cuse_conn *cc) ap->args.out_args[1].size = CUSE_INIT_INFO_MAX; ap->args.out_argvar = true; ap->args.out_pages = true; - ap->num_pages = 1; - ap->pages = &ia->page; - ap->descs = &ia->desc; - ia->page = page; + ap->uses_folios = true; + ap->num_folios = 1; + ap->folios = &ia->folio; + ap->folio_descs = &ia->desc; + ia->folio = folio; ia->desc.length = ap->args.out_args[1].size; ap->args.end = cuse_process_init_reply; rc = fuse_simple_background(fm, &ap->args, GFP_KERNEL); if (rc) { kfree(ia); -err_free_page: - __free_page(page); +err_free_folio: + folio_put(folio); } err: return rc; -- Gitee From 447796add78ace8fdac6b45c332b05bd63818ff2 Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Thu, 24 Oct 2024 10:18:00 -0700 Subject: [PATCH 2052/2138] fuse: convert readlink to use folios ANBZ: #12255 commit c1e4862b135954dd59596fbd454321ca4109b67e upstream. Convert readlink requests to use a folio instead of a page. No functional changes. Signed-off-by: Joanne Koong Reviewed-by: Josef Bacik Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4491 --- fs/fuse/dir.c | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index b6c0ad4308e0..2b8c73d66885 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1563,14 +1563,15 @@ static int fuse_permission(struct mnt_idmap *idmap, return err; } -static int fuse_readlink_page(struct inode *inode, struct page *page) +static int fuse_readlink_page(struct inode *inode, struct folio *folio) { struct fuse_mount *fm = get_fuse_mount(inode); - struct fuse_page_desc desc = { .length = PAGE_SIZE - 1 }; + struct fuse_folio_desc desc = { .length = PAGE_SIZE - 1 }; struct fuse_args_pages ap = { - .num_pages = 1, - .pages = &page, - .descs = &desc, + .uses_folios = true, + .num_folios = 1, + .folios = &folio, + .folio_descs = &desc, }; char *link; ssize_t res; @@ -1592,7 +1593,7 @@ static int fuse_readlink_page(struct inode *inode, struct page *page) if (WARN_ON(res >= PAGE_SIZE)) return -EIO; - link = page_address(page); + link = folio_address(folio); link[res] = '\0'; return 0; @@ -1602,7 +1603,7 @@ static const char *fuse_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *callback) { struct fuse_conn *fc = get_fuse_conn(inode); - struct page *page; + struct folio *folio; int err; err = -EIO; @@ -1616,20 +1617,20 @@ static const char *fuse_get_link(struct dentry *dentry, struct inode *inode, if (!dentry) goto out_err; - page = alloc_page(GFP_KERNEL); + folio = folio_alloc(GFP_KERNEL, 0); err = -ENOMEM; - if (!page) + if (!folio) goto out_err; - err = fuse_readlink_page(inode, page); + err = fuse_readlink_page(inode, folio); if (err) { - __free_page(page); + folio_put(folio); goto out_err; } - set_delayed_call(callback, page_put_link, page); + set_delayed_call(callback, page_put_link, &folio->page); - return page_address(page); + return folio_address(folio); out_err: return ERR_PTR(err); @@ -2199,7 +2200,7 @@ void fuse_init_dir(struct inode *inode) static int fuse_symlink_read_folio(struct file *null, struct folio *folio) { - int err = fuse_readlink_page(folio->mapping->host, &folio->page); + int err = fuse_readlink_page(folio->mapping->host, folio); if (!err) folio_mark_uptodate(folio); -- Gitee From 125916f46fb77ae803e753290416307f479609e6 Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Thu, 24 Oct 2024 10:18:01 -0700 Subject: [PATCH 2053/2138] fuse: convert readdir to use folios ANBZ: #12255 commit 02b78c7a7a0c72aee6f600a167e6adee9417ac0e upstream. Convert readdir requests to use a folio instead of a page. No functional changes. Signed-off-by: Joanne Koong Reviewed-by: Josef Bacik Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4491 --- fs/fuse/readdir.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c index d3f685104d48..b6eea5e6b69f 100644 --- a/fs/fuse/readdir.c +++ b/fs/fuse/readdir.c @@ -331,24 +331,25 @@ static int fuse_readdir_uncached(struct file *file, struct dir_context *ctx) { int plus; ssize_t res; - struct page *page; + struct folio *folio; struct inode *inode = file_inode(file); struct fuse_mount *fm = get_fuse_mount(inode); struct fuse_io_args ia = {}; struct fuse_args_pages *ap = &ia.ap; - struct fuse_page_desc desc = { .length = PAGE_SIZE }; + struct fuse_folio_desc desc = { .length = PAGE_SIZE }; u64 attr_version = 0; bool locked; - page = alloc_page(GFP_KERNEL); - if (!page) + folio = folio_alloc(GFP_KERNEL, 0); + if (!folio) return -ENOMEM; plus = fuse_use_readdirplus(inode, ctx); ap->args.out_pages = true; - ap->num_pages = 1; - ap->pages = &page; - ap->descs = &desc; + ap->uses_folios = true; + ap->num_folios = 1; + ap->folios = &folio; + ap->folio_descs = &desc; if (plus) { attr_version = fuse_get_attr_version(fm->fc); fuse_read_args_fill(&ia, file, ctx->pos, PAGE_SIZE, @@ -367,15 +368,15 @@ static int fuse_readdir_uncached(struct file *file, struct dir_context *ctx) if (ff->open_flags & FOPEN_CACHE_DIR) fuse_readdir_cache_end(file, ctx->pos); } else if (plus) { - res = parse_dirplusfile(page_address(page), res, + res = parse_dirplusfile(folio_address(folio), res, file, ctx, attr_version); } else { - res = parse_dirfile(page_address(page), res, file, + res = parse_dirfile(folio_address(folio), res, file, ctx); } } - __free_page(page); + folio_put(folio); fuse_invalidate_atime(inode); return res; } -- Gitee From e62d8851ab0fe42285410085445ef530bcb5c4c6 Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Thu, 24 Oct 2024 10:18:02 -0700 Subject: [PATCH 2054/2138] fuse: convert reads to use folios ANBZ: #12255 commit 51b025301824f16d51243aa505709d678f2e059e upstream. Convert read requests to use folios instead of pages. No functional changes. Signed-off-by: Joanne Koong Reviewed-by: Josef Bacik Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4491 --- fs/fuse/file.c | 67 ++++++++++++++++++++++++++++++++---------------- fs/fuse/fuse_i.h | 12 +++++++++ 2 files changed, 57 insertions(+), 22 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 97f7b3254360..39627ff8c77b 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -760,12 +760,37 @@ static struct fuse_io_args *fuse_io_alloc(struct fuse_io_priv *io, return ia; } +static struct fuse_io_args *fuse_io_folios_alloc(struct fuse_io_priv *io, + unsigned int nfolios) +{ + struct fuse_io_args *ia; + + ia = kzalloc(sizeof(*ia), GFP_KERNEL); + if (ia) { + ia->io = io; + ia->ap.uses_folios = true; + ia->ap.folios = fuse_folios_alloc(nfolios, GFP_KERNEL, + &ia->ap.folio_descs); + if (!ia->ap.folios) { + kfree(ia); + ia = NULL; + } + } + return ia; +} + static void fuse_io_free(struct fuse_io_args *ia) { kfree(ia->ap.pages); kfree(ia); } +static void fuse_io_folios_free(struct fuse_io_args *ia) +{ + kfree(ia->ap.folios); + kfree(ia); +} + static void fuse_aio_complete_req(struct fuse_mount *fm, struct fuse_args *args, int err) { @@ -865,7 +890,7 @@ static void fuse_short_read(struct inode *inode, u64 attr_ver, size_t num_read, * reached the client fs yet. So the hole is not present there. */ if (!fc->writeback_cache) { - loff_t pos = page_offset(ap->pages[0]) + num_read; + loff_t pos = folio_pos(ap->folios[0]) + num_read; fuse_read_update_size(inode, pos, attr_ver); } } @@ -875,14 +900,14 @@ static int fuse_do_readfolio(struct file *file, struct folio *folio) struct inode *inode = folio->mapping->host; struct fuse_mount *fm = get_fuse_mount(inode); loff_t pos = folio_pos(folio); - struct fuse_page_desc desc = { .length = PAGE_SIZE }; - struct page *page = &folio->page; + struct fuse_folio_desc desc = { .length = PAGE_SIZE }; struct fuse_io_args ia = { .ap.args.page_zeroing = true, .ap.args.out_pages = true, - .ap.num_pages = 1, - .ap.pages = &page, - .ap.descs = &desc, + .ap.uses_folios = true, + .ap.num_folios = 1, + .ap.folios = &folio, + .ap.folio_descs = &desc, }; ssize_t res; u64 attr_ver; @@ -941,8 +966,8 @@ static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args, size_t num_read = args->out_args[0].size; struct address_space *mapping = NULL; - for (i = 0; mapping == NULL && i < ap->num_pages; i++) - mapping = ap->pages[i]->mapping; + for (i = 0; mapping == NULL && i < ap->num_folios; i++) + mapping = ap->folios[i]->mapping; if (mapping) { struct inode *inode = mapping->host; @@ -956,15 +981,12 @@ static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args, fuse_invalidate_atime(inode); } - for (i = 0; i < ap->num_pages; i++) { - struct folio *folio = page_folio(ap->pages[i]); - - folio_end_read(folio, !err); - } + for (i = 0; i < ap->num_folios; i++) + folio_end_read(ap->folios[i], !err); if (ia->ff) fuse_file_put(ia->ff, false); - fuse_io_free(ia); + fuse_io_folios_free(ia); } static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file) @@ -972,8 +994,9 @@ static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file) struct fuse_file *ff = file->private_data; struct fuse_mount *fm = ff->fm; struct fuse_args_pages *ap = &ia->ap; - loff_t pos = page_offset(ap->pages[0]); - size_t count = ap->num_pages << PAGE_SHIFT; + loff_t pos = folio_pos(ap->folios[0]); + /* Currently, all folios in FUSE are one page */ + size_t count = ap->num_folios << PAGE_SHIFT; ssize_t res; int err; @@ -984,7 +1007,7 @@ static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file) /* Don't overflow end offset */ if (pos + (count - 1) == LLONG_MAX) { count--; - ap->descs[ap->num_pages - 1].length--; + ap->folio_descs[ap->num_folios - 1].length--; } WARN_ON((loff_t) (pos + count) < 0); @@ -1045,16 +1068,16 @@ static void fuse_readahead(struct readahead_control *rac) */ break; - ia = fuse_io_alloc(NULL, cur_pages); + ia = fuse_io_folios_alloc(NULL, cur_pages); if (!ia) return; ap = &ia->ap; - while (ap->num_pages < cur_pages) { + while (ap->num_folios < cur_pages) { folio = readahead_folio(rac); - ap->pages[ap->num_pages] = &folio->page; - ap->descs[ap->num_pages].length = folio_size(folio); - ap->num_pages++; + ap->folios[ap->num_folios] = folio; + ap->folio_descs[ap->num_folios].length = folio_size(folio); + ap->num_folios++; } fuse_send_readpages(ia, rac->file); nr_pages -= cur_pages; diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 43da1bbb4e55..ea26b3c67d9d 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -986,6 +986,18 @@ static inline struct page **fuse_pages_alloc(unsigned int npages, gfp_t flags, return pages; } +static inline struct folio **fuse_folios_alloc(unsigned int nfolios, gfp_t flags, + struct fuse_folio_desc **desc) +{ + struct folio **folios; + + folios = kzalloc(nfolios * (sizeof(struct folio *) + + sizeof(struct fuse_folio_desc)), flags); + *desc = (void *) (folios + nfolios); + + return folios; +} + static inline void fuse_page_descs_length_init(struct fuse_page_desc *descs, unsigned int index, unsigned int nr_pages) -- Gitee From c53a4d2985d4c7d282199cc98729ec6d4be9ffda Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Thu, 24 Oct 2024 10:18:03 -0700 Subject: [PATCH 2055/2138] fuse: convert writes (non-writeback) to use folios ANBZ: #12255 commit f2ef459bab7326f4800ec2098cf073fbda2185af upstream. Convert non-writeback write requests to use folios instead of pages. No functional changes. Signed-off-by: Joanne Koong Reviewed-by: Josef Bacik Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4491 --- fs/fuse/file.c | 33 ++++++++++++++++++--------------- fs/fuse/fuse_i.h | 2 +- 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 39627ff8c77b..1f36560d3bb7 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1197,8 +1197,8 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia, bool short_write; int err; - for (i = 0; i < ap->num_pages; i++) - fuse_wait_on_page_writeback(inode, ap->pages[i]->index); + for (i = 0; i < ap->num_folios; i++) + fuse_wait_on_folio_writeback(inode, ap->folios[i]); fuse_write_args_fill(ia, ff, pos, count); ia->write.in.flags = fuse_write_flags(iocb); @@ -1210,10 +1210,10 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia, err = -EIO; short_write = ia->write.out.size < count; - offset = ap->descs[0].offset; + offset = ap->folio_descs[0].offset; count = ia->write.out.size; - for (i = 0; i < ap->num_pages; i++) { - struct folio *folio = page_folio(ap->pages[i]); + for (i = 0; i < ap->num_folios; i++) { + struct folio *folio = ap->folios[i]; if (err) { folio_clear_uptodate(folio); @@ -1227,7 +1227,7 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia, } offset = 0; } - if (ia->write.page_locked && (i == ap->num_pages - 1)) + if (ia->write.folio_locked && (i == ap->num_folios - 1)) folio_unlock(folio); folio_put(folio); } @@ -1243,11 +1243,12 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia, struct fuse_args_pages *ap = &ia->ap; struct fuse_conn *fc = get_fuse_conn(mapping->host); unsigned offset = pos & (PAGE_SIZE - 1); + unsigned int nr_pages = 0; size_t count = 0; int err; ap->args.in_pages = true; - ap->descs[0].offset = offset; + ap->folio_descs[0].offset = offset; do { size_t tmp; @@ -1283,9 +1284,10 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia, } err = 0; - ap->pages[ap->num_pages] = &folio->page; - ap->descs[ap->num_pages].length = tmp; - ap->num_pages++; + ap->folios[ap->num_folios] = folio; + ap->folio_descs[ap->num_folios].length = tmp; + ap->num_folios++; + nr_pages++; count += tmp; pos += tmp; @@ -1300,13 +1302,13 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia, if (folio_test_uptodate(folio)) { folio_unlock(folio); } else { - ia->write.page_locked = true; + ia->write.folio_locked = true; break; } if (!fc->big_writes) break; } while (iov_iter_count(ii) && count < fc->max_write && - ap->num_pages < max_pages && offset == 0); + nr_pages < max_pages && offset == 0); return count > 0 ? count : err; } @@ -1340,8 +1342,9 @@ static ssize_t fuse_perform_write(struct kiocb *iocb, struct iov_iter *ii) unsigned int nr_pages = fuse_wr_pages(pos, iov_iter_count(ii), fc->max_pages); - ap->pages = fuse_pages_alloc(nr_pages, GFP_KERNEL, &ap->descs); - if (!ap->pages) { + ap->uses_folios = true; + ap->folios = fuse_folios_alloc(nr_pages, GFP_KERNEL, &ap->folio_descs); + if (!ap->folios) { err = -ENOMEM; break; } @@ -1363,7 +1366,7 @@ static ssize_t fuse_perform_write(struct kiocb *iocb, struct iov_iter *ii) err = -EIO; } } - kfree(ap->pages); + kfree(ap->folios); } while (!err && iov_iter_count(ii)); fuse_write_update_attr(inode, pos, res); diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index ea26b3c67d9d..476b64e553ff 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -1053,7 +1053,7 @@ struct fuse_io_args { struct { struct fuse_write_in in; struct fuse_write_out out; - bool page_locked; + bool folio_locked; } write; }; struct fuse_args_pages ap; -- Gitee From 99d6dc0623930fdce30a25bbf9bc79e329028e0b Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Thu, 24 Oct 2024 10:18:04 -0700 Subject: [PATCH 2056/2138] fuse: convert ioctls to use folios ANBZ: #12255 commit ac1cf6e3bbe3dd371bd61a423437c1f67bba8b2a upstream. Convert ioctl requests to use folios instead of pages. No functional changes. Signed-off-by: Joanne Koong Reviewed-by: Josef Bacik Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4491 --- fs/fuse/fuse_i.h | 10 ++++++++++ fs/fuse/ioctl.c | 32 ++++++++++++++++---------------- 2 files changed, 26 insertions(+), 16 deletions(-) diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 476b64e553ff..0e9671c4b350 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -1008,6 +1008,16 @@ static inline void fuse_page_descs_length_init(struct fuse_page_desc *descs, descs[i].length = PAGE_SIZE - descs[i].offset; } +static inline void fuse_folio_descs_length_init(struct fuse_folio_desc *descs, + unsigned int index, + unsigned int nr_folios) +{ + int i; + + for (i = index; i < index + nr_folios; i++) + descs[i].length = PAGE_SIZE - descs[i].offset; +} + static inline void fuse_sync_bucket_dec(struct fuse_sync_bucket *bucket) { /* Need RCU protection to prevent use after free after the decrement */ diff --git a/fs/fuse/ioctl.c b/fs/fuse/ioctl.c index 726640fa439e..dc3e7c8ff97b 100644 --- a/fs/fuse/ioctl.c +++ b/fs/fuse/ioctl.c @@ -201,12 +201,12 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE); err = -ENOMEM; - ap.pages = fuse_pages_alloc(fm->fc->max_pages, GFP_KERNEL, &ap.descs); + ap.folios = fuse_folios_alloc(fm->fc->max_pages, GFP_KERNEL, &ap.folio_descs); iov_page = (struct iovec *) __get_free_page(GFP_KERNEL); - if (!ap.pages || !iov_page) + if (!ap.folios || !iov_page) goto out; - fuse_page_descs_length_init(ap.descs, 0, fm->fc->max_pages); + fuse_folio_descs_length_init(ap.folio_descs, 0, fm->fc->max_pages); /* * If restricted, initialize IO parameters as encoded in @cmd. @@ -244,14 +244,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, err = -ENOMEM; if (max_pages > fm->fc->max_pages) goto out; - while (ap.num_pages < max_pages) { - ap.pages[ap.num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); - if (!ap.pages[ap.num_pages]) + ap.uses_folios = true; + while (ap.num_folios < max_pages) { + ap.folios[ap.num_folios] = folio_alloc(GFP_KERNEL | __GFP_HIGHMEM, 0); + if (!ap.folios[ap.num_folios]) goto out; - ap.num_pages++; + ap.num_folios++; } - /* okay, let's send it to the client */ ap.args.opcode = FUSE_IOCTL; ap.args.nodeid = ff->nodeid; @@ -265,8 +265,8 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, err = -EFAULT; iov_iter_init(&ii, ITER_SOURCE, in_iov, in_iovs, in_size); - for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= ap.num_pages); i++) { - c = copy_page_from_iter(ap.pages[i], 0, PAGE_SIZE, &ii); + for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= ap.num_folios); i++) { + c = copy_folio_from_iter(ap.folios[i], 0, PAGE_SIZE, &ii); if (c != PAGE_SIZE && iov_iter_count(&ii)) goto out; } @@ -304,7 +304,7 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV) goto out; - vaddr = kmap_local_page(ap.pages[0]); + vaddr = kmap_local_folio(ap.folios[0], 0); err = fuse_copy_ioctl_iovec(fm->fc, iov_page, vaddr, transferred, in_iovs + out_iovs, (flags & FUSE_IOCTL_COMPAT) != 0); @@ -332,17 +332,17 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, err = -EFAULT; iov_iter_init(&ii, ITER_DEST, out_iov, out_iovs, transferred); - for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= ap.num_pages); i++) { - c = copy_page_to_iter(ap.pages[i], 0, PAGE_SIZE, &ii); + for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= ap.num_folios); i++) { + c = copy_folio_to_iter(ap.folios[i], 0, PAGE_SIZE, &ii); if (c != PAGE_SIZE && iov_iter_count(&ii)) goto out; } err = 0; out: free_page((unsigned long) iov_page); - while (ap.num_pages) - __free_page(ap.pages[--ap.num_pages]); - kfree(ap.pages); + while (ap.num_folios) + folio_put(ap.folios[--ap.num_folios]); + kfree(ap.folios); return err ? err : outarg.result; } -- Gitee From 03034efd22d1901cb021bf16fd3c36a24a7c3b32 Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Thu, 24 Oct 2024 10:18:05 -0700 Subject: [PATCH 2057/2138] fuse: convert retrieves to use folios ANBZ: #12255 commit 448895df0366041366a84861350ce471446bf560 upstream. Convert retrieve requests to use folios instead of pages. No functional changes. Signed-off-by: Joanne Koong Reviewed-by: Josef Bacik Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4491 --- fs/fuse/dev.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index fecf22b1f37e..27933408d6a4 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -1701,7 +1701,7 @@ static void fuse_retrieve_end(struct fuse_mount *fm, struct fuse_args *args, struct fuse_retrieve_args *ra = container_of(args, typeof(*ra), ap.args); - release_pages(ra->ap.pages, ra->ap.num_pages); + release_pages(ra->ap.folios, ra->ap.num_folios); kfree(ra); } @@ -1715,7 +1715,7 @@ static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode, unsigned int num; unsigned int offset; size_t total_len = 0; - unsigned int num_pages; + unsigned int num_pages, cur_pages = 0; struct fuse_conn *fc = fm->fc; struct fuse_retrieve_args *ra; size_t args_size = sizeof(*ra); @@ -1734,15 +1734,16 @@ static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode, num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; num_pages = min(num_pages, fc->max_pages); - args_size += num_pages * (sizeof(ap->pages[0]) + sizeof(ap->descs[0])); + args_size += num_pages * (sizeof(ap->folios[0]) + sizeof(ap->folio_descs[0])); ra = kzalloc(args_size, GFP_KERNEL); if (!ra) return -ENOMEM; ap = &ra->ap; - ap->pages = (void *) (ra + 1); - ap->descs = (void *) (ap->pages + num_pages); + ap->folios = (void *) (ra + 1); + ap->folio_descs = (void *) (ap->folios + num_pages); + ap->uses_folios = true; args = &ap->args; args->nodeid = outarg->nodeid; @@ -1753,7 +1754,7 @@ static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode, index = outarg->offset >> PAGE_SHIFT; - while (num && ap->num_pages < num_pages) { + while (num && cur_pages < num_pages) { struct folio *folio; unsigned int this_num; @@ -1762,10 +1763,11 @@ static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode, break; this_num = min_t(unsigned, num, PAGE_SIZE - offset); - ap->pages[ap->num_pages] = &folio->page; - ap->descs[ap->num_pages].offset = offset; - ap->descs[ap->num_pages].length = this_num; - ap->num_pages++; + ap->folios[ap->num_folios] = folio; + ap->folio_descs[ap->num_folios].offset = offset; + ap->folio_descs[ap->num_folios].length = this_num; + ap->num_folios++; + cur_pages++; offset = 0; num -= this_num; -- Gitee From 6ee8bdad18c546933e23fdb02a94500e23535378 Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Thu, 24 Oct 2024 10:18:06 -0700 Subject: [PATCH 2058/2138] fuse: convert writebacks to use folios ANBZ: #12255 commit cbe9c115b7441dd790540436118eee4626ec9979 upstream. Convert writeback requests to use folios instead of pages. No functional changes. Signed-off-by: Joanne Koong Reviewed-by: Josef Bacik Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4491 --- fs/fuse/file.c | 126 +++++++++++++++++++++++++------------------------ 1 file changed, 64 insertions(+), 62 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 1f36560d3bb7..c111a33e5583 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -436,7 +436,7 @@ static struct fuse_writepage_args *fuse_find_writeback(struct fuse_inode *fi, wpa = rb_entry(n, struct fuse_writepage_args, writepages_entry); WARN_ON(get_fuse_inode(wpa->inode) != fi); curr_index = wpa->ia.write.in.offset >> PAGE_SHIFT; - if (idx_from >= curr_index + wpa->ia.ap.num_pages) + if (idx_from >= curr_index + wpa->ia.ap.num_folios) n = n->rb_right; else if (idx_to < curr_index) n = n->rb_left; @@ -1805,12 +1805,12 @@ static void fuse_writepage_free(struct fuse_writepage_args *wpa) if (wpa->bucket) fuse_sync_bucket_dec(wpa->bucket); - for (i = 0; i < ap->num_pages; i++) - __free_page(ap->pages[i]); + for (i = 0; i < ap->num_folios; i++) + folio_put(ap->folios[i]); fuse_file_put(wpa->ia.ff, false); - kfree(ap->pages); + kfree(ap->folios); kfree(wpa); } @@ -1830,8 +1830,8 @@ static void fuse_writepage_finish(struct fuse_writepage_args *wpa) struct fuse_inode *fi = get_fuse_inode(inode); int i; - for (i = 0; i < ap->num_pages; i++) - fuse_writepage_finish_stat(inode, page_folio(ap->pages[i])); + for (i = 0; i < ap->num_folios; i++) + fuse_writepage_finish_stat(inode, ap->folios[i]); wake_up(&fi->page_waitq); } @@ -1846,7 +1846,8 @@ __acquires(fi->lock) struct fuse_inode *fi = get_fuse_inode(wpa->inode); struct fuse_write_in *inarg = &wpa->ia.write.in; struct fuse_args *args = &wpa->ia.ap.args; - __u64 data_size = wpa->ia.ap.num_pages * PAGE_SIZE; + /* Currently, all folios in FUSE are one page */ + __u64 data_size = wpa->ia.ap.num_folios * PAGE_SIZE; int err; fi->writectr++; @@ -1887,7 +1888,7 @@ __acquires(fi->lock) next = aux->next; aux->next = NULL; fuse_writepage_finish_stat(aux->inode, - page_folio(aux->ia.ap.pages[0])); + aux->ia.ap.folios[0]); fuse_writepage_free(aux); } @@ -1922,11 +1923,11 @@ static struct fuse_writepage_args *fuse_insert_writeback(struct rb_root *root, struct fuse_writepage_args *wpa) { pgoff_t idx_from = wpa->ia.write.in.offset >> PAGE_SHIFT; - pgoff_t idx_to = idx_from + wpa->ia.ap.num_pages - 1; + pgoff_t idx_to = idx_from + wpa->ia.ap.num_folios - 1; struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; - WARN_ON(!wpa->ia.ap.num_pages); + WARN_ON(!wpa->ia.ap.num_folios); while (*p) { struct fuse_writepage_args *curr; pgoff_t curr_index; @@ -1937,7 +1938,7 @@ static struct fuse_writepage_args *fuse_insert_writeback(struct rb_root *root, WARN_ON(curr->inode != wpa->inode); curr_index = curr->ia.write.in.offset >> PAGE_SHIFT; - if (idx_from >= curr_index + curr->ia.ap.num_pages) + if (idx_from >= curr_index + curr->ia.ap.num_folios) p = &(*p)->rb_right; else if (idx_to < curr_index) p = &(*p)->rb_left; @@ -2069,9 +2070,10 @@ static struct fuse_writepage_args *fuse_writepage_args_alloc(void) wpa = kzalloc(sizeof(*wpa), GFP_NOFS); if (wpa) { ap = &wpa->ia.ap; - ap->num_pages = 0; - ap->pages = fuse_pages_alloc(1, GFP_NOFS, &ap->descs); - if (!ap->pages) { + ap->num_folios = 0; + ap->uses_folios = true; + ap->folios = fuse_folios_alloc(1, GFP_NOFS, &ap->folio_descs); + if (!ap->folios) { kfree(wpa); wpa = NULL; } @@ -2095,16 +2097,16 @@ static void fuse_writepage_add_to_bucket(struct fuse_conn *fc, } static void fuse_writepage_args_page_fill(struct fuse_writepage_args *wpa, struct folio *folio, - struct folio *tmp_folio, uint32_t page_index) + struct folio *tmp_folio, uint32_t folio_index) { struct inode *inode = folio->mapping->host; struct fuse_args_pages *ap = &wpa->ia.ap; folio_copy(tmp_folio, folio); - ap->pages[page_index] = &tmp_folio->page; - ap->descs[page_index].offset = 0; - ap->descs[page_index].length = PAGE_SIZE; + ap->folios[folio_index] = tmp_folio; + ap->folio_descs[folio_index].offset = 0; + ap->folio_descs[folio_index].length = PAGE_SIZE; inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); node_stat_add_folio(tmp_folio, NR_WRITEBACK_TEMP); @@ -2161,7 +2163,7 @@ static int fuse_writepage_locked(struct folio *folio) goto err_writepage_args; ap = &wpa->ia.ap; - ap->num_pages = 1; + ap->num_folios = 1; folio_start_writeback(folio); fuse_writepage_args_page_fill(wpa, folio, tmp_folio, 0); @@ -2189,32 +2191,32 @@ struct fuse_fill_wb_data { struct fuse_writepage_args *wpa; struct fuse_file *ff; struct inode *inode; - struct page **orig_pages; - unsigned int max_pages; + struct folio **orig_folios; + unsigned int max_folios; }; static bool fuse_pages_realloc(struct fuse_fill_wb_data *data) { struct fuse_args_pages *ap = &data->wpa->ia.ap; struct fuse_conn *fc = get_fuse_conn(data->inode); - struct page **pages; - struct fuse_page_desc *descs; - unsigned int npages = min_t(unsigned int, - max_t(unsigned int, data->max_pages * 2, - FUSE_DEFAULT_MAX_PAGES_PER_REQ), + struct folio **folios; + struct fuse_folio_desc *descs; + unsigned int nfolios = min_t(unsigned int, + max_t(unsigned int, data->max_folios * 2, + FUSE_DEFAULT_MAX_PAGES_PER_REQ), fc->max_pages); - WARN_ON(npages <= data->max_pages); + WARN_ON(nfolios <= data->max_folios); - pages = fuse_pages_alloc(npages, GFP_NOFS, &descs); - if (!pages) + folios = fuse_folios_alloc(nfolios, GFP_NOFS, &descs); + if (!folios) return false; - memcpy(pages, ap->pages, sizeof(struct page *) * ap->num_pages); - memcpy(descs, ap->descs, sizeof(struct fuse_page_desc) * ap->num_pages); - kfree(ap->pages); - ap->pages = pages; - ap->descs = descs; - data->max_pages = npages; + memcpy(folios, ap->folios, sizeof(struct folio *) * ap->num_folios); + memcpy(descs, ap->folio_descs, sizeof(struct fuse_folio_desc) * ap->num_folios); + kfree(ap->folios); + ap->folios = folios; + ap->folio_descs = descs; + data->max_folios = nfolios; return true; } @@ -2224,7 +2226,7 @@ static void fuse_writepages_send(struct fuse_fill_wb_data *data) struct fuse_writepage_args *wpa = data->wpa; struct inode *inode = data->inode; struct fuse_inode *fi = get_fuse_inode(inode); - int num_pages = wpa->ia.ap.num_pages; + int num_folios = wpa->ia.ap.num_folios; int i; spin_lock(&fi->lock); @@ -2232,8 +2234,8 @@ static void fuse_writepages_send(struct fuse_fill_wb_data *data) fuse_flush_writepages(inode); spin_unlock(&fi->lock); - for (i = 0; i < num_pages; i++) - end_page_writeback(data->orig_pages[i]); + for (i = 0; i < num_folios; i++) + folio_end_writeback(data->orig_folios[i]); } /* @@ -2244,15 +2246,15 @@ static void fuse_writepages_send(struct fuse_fill_wb_data *data) * swapping the new temp page with the old one. */ static bool fuse_writepage_add(struct fuse_writepage_args *new_wpa, - struct page *page) + struct folio *folio) { struct fuse_inode *fi = get_fuse_inode(new_wpa->inode); struct fuse_writepage_args *tmp; struct fuse_writepage_args *old_wpa; struct fuse_args_pages *new_ap = &new_wpa->ia.ap; - WARN_ON(new_ap->num_pages != 0); - new_ap->num_pages = 1; + WARN_ON(new_ap->num_folios != 0); + new_ap->num_folios = 1; spin_lock(&fi->lock); old_wpa = fuse_insert_writeback(&fi->writepages, new_wpa); @@ -2266,9 +2268,9 @@ static bool fuse_writepage_add(struct fuse_writepage_args *new_wpa, WARN_ON(tmp->inode != new_wpa->inode); curr_index = tmp->ia.write.in.offset >> PAGE_SHIFT; - if (curr_index == page->index) { - WARN_ON(tmp->ia.ap.num_pages != 1); - swap(tmp->ia.ap.pages[0], new_ap->pages[0]); + if (curr_index == folio->index) { + WARN_ON(tmp->ia.ap.num_folios != 1); + swap(tmp->ia.ap.folios[0], new_ap->folios[0]); break; } } @@ -2282,7 +2284,7 @@ static bool fuse_writepage_add(struct fuse_writepage_args *new_wpa, if (tmp) { fuse_writepage_finish_stat(new_wpa->inode, - page_folio(new_ap->pages[0])); + folio); fuse_writepage_free(new_wpa); } @@ -2293,7 +2295,7 @@ static bool fuse_writepage_need_send(struct fuse_conn *fc, struct folio *folio, struct fuse_args_pages *ap, struct fuse_fill_wb_data *data) { - WARN_ON(!ap->num_pages); + WARN_ON(!ap->num_folios); /* * Being under writeback is unlikely but possible. For example direct @@ -2305,19 +2307,19 @@ static bool fuse_writepage_need_send(struct fuse_conn *fc, struct folio *folio, return true; /* Reached max pages */ - if (ap->num_pages == fc->max_pages) + if (ap->num_folios == fc->max_pages) return true; /* Reached max write bytes */ - if ((ap->num_pages + 1) * PAGE_SIZE > fc->max_write) + if ((ap->num_folios + 1) * PAGE_SIZE > fc->max_write) return true; /* Discontinuity */ - if (data->orig_pages[ap->num_pages - 1]->index + 1 != folio_index(folio)) + if (data->orig_folios[ap->num_folios - 1]->index + 1 != folio_index(folio)) return true; /* Need to grow the pages array? If so, did the expansion fail? */ - if (ap->num_pages == data->max_pages && !fuse_pages_realloc(data)) + if (ap->num_folios == data->max_folios && !fuse_pages_realloc(data)) return true; return false; @@ -2361,7 +2363,7 @@ static int fuse_writepages_fill(struct folio *folio, * This is ensured by holding the page lock in page_mkwrite() while * checking fuse_page_is_writeback(). We already hold the page lock * since clear_page_dirty_for_io() and keep it held until we add the - * request to the fi->writepages list and increment ap->num_pages. + * request to the fi->writepages list and increment ap->num_folios. * After this fuse_page_is_writeback() will indicate that the page is * under writeback, so we can release the page lock. */ @@ -2373,13 +2375,13 @@ static int fuse_writepages_fill(struct folio *folio, goto out_unlock; } fuse_file_get(wpa->ia.ff); - data->max_pages = 1; + data->max_folios = 1; ap = &wpa->ia.ap; } folio_start_writeback(folio); - fuse_writepage_args_page_fill(wpa, folio, tmp_folio, ap->num_pages); - data->orig_pages[ap->num_pages] = &folio->page; + fuse_writepage_args_page_fill(wpa, folio, tmp_folio, ap->num_folios); + data->orig_folios[ap->num_folios] = folio; err = 0; if (data->wpa) { @@ -2388,9 +2390,9 @@ static int fuse_writepages_fill(struct folio *folio, * fuse_page_is_writeback(). */ spin_lock(&fi->lock); - ap->num_pages++; + ap->num_folios++; spin_unlock(&fi->lock); - } else if (fuse_writepage_add(wpa, &folio->page)) { + } else if (fuse_writepage_add(wpa, folio)) { data->wpa = wpa; } else { folio_end_writeback(folio); @@ -2422,21 +2424,21 @@ static int fuse_writepages(struct address_space *mapping, data.ff = NULL; err = -ENOMEM; - data.orig_pages = kcalloc(fc->max_pages, - sizeof(struct page *), - GFP_NOFS); - if (!data.orig_pages) + data.orig_folios = kcalloc(fc->max_pages, + sizeof(struct folio *), + GFP_NOFS); + if (!data.orig_folios) goto out; err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data); if (data.wpa) { - WARN_ON(!data.wpa->ia.ap.num_pages); + WARN_ON(!data.wpa->ia.ap.num_folios); fuse_writepages_send(&data); } if (data.ff) fuse_file_put(data.ff, false); - kfree(data.orig_pages); + kfree(data.orig_folios); out: return err; } -- Gitee From 8fe4cb80e77825563d2f6a2e9337ecd0909e6ce4 Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Thu, 24 Oct 2024 10:18:07 -0700 Subject: [PATCH 2059/2138] mm/writeback: add folio_mark_dirty_lock() ANBZ: #12255 commit 7fce207af5ec074a9a50e90eb866b17ca4a90f06 upstream. Add a new convenience helper folio_mark_dirty_lock() that grabs the folio lock before calling folio_mark_dirty(). Refactor set_page_dirty_lock() to directly use folio_mark_dirty_lock(). Signed-off-by: Joanne Koong Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4491 --- include/linux/mm.h | 1 + mm/folio-compat.c | 6 ++++++ mm/page-writeback.c | 22 +++++++++++----------- 3 files changed, 18 insertions(+), 11 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 2078d68167c1..d9ae9afdb51b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2519,6 +2519,7 @@ struct kvec; struct page *get_dump_page(unsigned long addr); bool folio_mark_dirty(struct folio *folio); +bool folio_mark_dirty_lock(struct folio *folio); bool set_page_dirty(struct page *page); int set_page_dirty_lock(struct page *page); diff --git a/mm/folio-compat.c b/mm/folio-compat.c index a546271db69b..cde4a40f6645 100644 --- a/mm/folio-compat.c +++ b/mm/folio-compat.c @@ -64,6 +64,12 @@ int __set_page_dirty_nobuffers(struct page *page) } EXPORT_SYMBOL(__set_page_dirty_nobuffers); +int set_page_dirty_lock(struct page *page) +{ + return folio_mark_dirty_lock(page_folio(page)); +} +EXPORT_SYMBOL(set_page_dirty_lock); + bool clear_page_dirty_for_io(struct page *page) { return folio_clear_dirty_for_io(page_folio(page)); diff --git a/mm/page-writeback.c b/mm/page-writeback.c index e632ec9b6421..ed27e185d95f 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2808,25 +2808,25 @@ bool folio_mark_dirty(struct folio *folio) EXPORT_SYMBOL(folio_mark_dirty); /* - * set_page_dirty() is racy if the caller has no reference against - * page->mapping->host, and if the page is unlocked. This is because another - * CPU could truncate the page off the mapping and then free the mapping. + * folio_mark_dirty() is racy if the caller has no reference against + * folio->mapping->host, and if the folio is unlocked. This is because another + * CPU could truncate the folio off the mapping and then free the mapping. * - * Usually, the page _is_ locked, or the caller is a user-space process which + * Usually, the folio _is_ locked, or the caller is a user-space process which * holds a reference on the inode by having an open file. * - * In other cases, the page should be locked before running set_page_dirty(). + * In other cases, the folio should be locked before running folio_mark_dirty(). */ -int set_page_dirty_lock(struct page *page) +bool folio_mark_dirty_lock(struct folio *folio) { - int ret; + bool ret; - lock_page(page); - ret = set_page_dirty(page); - unlock_page(page); + folio_lock(folio); + ret = folio_mark_dirty(folio); + folio_unlock(folio); return ret; } -EXPORT_SYMBOL(set_page_dirty_lock); +EXPORT_SYMBOL(folio_mark_dirty_lock); /* * This cancels just the dirty bit on the kernel page itself, it does NOT -- Gitee From eab782e74fef8276d0148e605157f59329b5d9d6 Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Thu, 24 Oct 2024 10:18:08 -0700 Subject: [PATCH 2060/2138] fuse: convert direct io to use folios ANBZ: #12255 commit 3b97c3652d9128ab7f8c9b8adec6108611fdb153 upstream. Convert direct io requests to use folios instead of pages. No functional changes. Signed-off-by: Joanne Koong Reviewed-by: Josef Bacik Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4491 --- fs/fuse/file.c | 80 +++++++++++++++++++++--------------------------- fs/fuse/fuse_i.h | 22 ------------- 2 files changed, 35 insertions(+), 67 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index c111a33e5583..dd370355fd6e 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -665,11 +665,11 @@ static void fuse_release_user_pages(struct fuse_args_pages *ap, ssize_t nres, { unsigned int i; - for (i = 0; i < ap->num_pages; i++) { + for (i = 0; i < ap->num_folios; i++) { if (should_dirty) - set_page_dirty_lock(ap->pages[i]); + folio_mark_dirty_lock(ap->folios[i]); if (ap->args.is_pinned) - unpin_user_page(ap->pages[i]); + unpin_folio(ap->folios[i]); } if (nres > 0 && ap->args.invalidate_vmap) @@ -742,24 +742,6 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos) kref_put(&io->refcnt, fuse_io_release); } -static struct fuse_io_args *fuse_io_alloc(struct fuse_io_priv *io, - unsigned int npages) -{ - struct fuse_io_args *ia; - - ia = kzalloc(sizeof(*ia), GFP_KERNEL); - if (ia) { - ia->io = io; - ia->ap.pages = fuse_pages_alloc(npages, GFP_KERNEL, - &ia->ap.descs); - if (!ia->ap.pages) { - kfree(ia); - ia = NULL; - } - } - return ia; -} - static struct fuse_io_args *fuse_io_folios_alloc(struct fuse_io_priv *io, unsigned int nfolios) { @@ -779,12 +761,6 @@ static struct fuse_io_args *fuse_io_folios_alloc(struct fuse_io_priv *io, return ia; } -static void fuse_io_free(struct fuse_io_args *ia) -{ - kfree(ia->ap.pages); - kfree(ia); -} - static void fuse_io_folios_free(struct fuse_io_args *ia) { kfree(ia->ap.folios); @@ -821,7 +797,7 @@ static void fuse_aio_complete_req(struct fuse_mount *fm, struct fuse_args *args, fuse_release_user_pages(&ia->ap, err ?: nres, io->should_dirty); fuse_aio_complete(io, err, pos); - fuse_io_free(ia); + fuse_io_folios_free(ia); } static ssize_t fuse_async_req_send(struct fuse_mount *fm, @@ -1530,6 +1506,7 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii, bool use_pages_for_kvec_io) { bool flush_or_invalidate = false; + unsigned int nr_pages = 0; size_t nbytes = 0; /* # bytes already packed in req */ ssize_t ret = 0; @@ -1559,15 +1536,23 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii, } } - while (nbytes < *nbytesp && ap->num_pages < max_pages) { - unsigned npages; + /* + * Until there is support for iov_iter_extract_folios(), we have to + * manually extract pages using iov_iter_extract_pages() and then + * copy that to a folios array. + */ + struct page **pages = kzalloc(max_pages * sizeof(struct page *), + GFP_KERNEL); + if (!pages) + return -ENOMEM; + + while (nbytes < *nbytesp && nr_pages < max_pages) { + unsigned nfolios, i; size_t start; - struct page **pt_pages; - pt_pages = &ap->pages[ap->num_pages]; - ret = iov_iter_extract_pages(ii, &pt_pages, + ret = iov_iter_extract_pages(ii, &pages, *nbytesp - nbytes, - max_pages - ap->num_pages, + max_pages - nr_pages, 0, &start); if (ret < 0) break; @@ -1575,15 +1560,20 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii, nbytes += ret; ret += start; - npages = DIV_ROUND_UP(ret, PAGE_SIZE); + /* Currently, all folios in FUSE are one page */ + nfolios = DIV_ROUND_UP(ret, PAGE_SIZE); - ap->descs[ap->num_pages].offset = start; - fuse_page_descs_length_init(ap->descs, ap->num_pages, npages); + ap->folio_descs[ap->num_folios].offset = start; + fuse_folio_descs_length_init(ap->folio_descs, ap->num_folios, nfolios); + for (i = 0; i < nfolios; i++) + ap->folios[i + ap->num_folios] = page_folio(pages[i]); - ap->num_pages += npages; - ap->descs[ap->num_pages - 1].length -= + ap->num_folios += nfolios; + ap->folio_descs[ap->num_folios - 1].length -= (PAGE_SIZE - ret) & (PAGE_SIZE - 1); + nr_pages += nfolios; } + kfree(pages); if (write && flush_or_invalidate) flush_kernel_vmap_range(ap->args.vmap_base, nbytes); @@ -1623,14 +1613,14 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, bool fopen_direct_io = ff->open_flags & FOPEN_DIRECT_IO; max_pages = iov_iter_npages(iter, fc->max_pages); - ia = fuse_io_alloc(io, max_pages); + ia = fuse_io_folios_alloc(io, max_pages); if (!ia) return -ENOMEM; if (fopen_direct_io && fc->direct_io_allow_mmap) { res = filemap_write_and_wait_range(mapping, pos, pos + count - 1); if (res) { - fuse_io_free(ia); + fuse_io_folios_free(ia); return res; } } @@ -1645,7 +1635,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, if (fopen_direct_io && write) { res = invalidate_inode_pages2_range(mapping, idx_from, idx_to); if (res) { - fuse_io_free(ia); + fuse_io_folios_free(ia); return res; } } @@ -1672,7 +1662,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, if (!io->async || nres < 0) { fuse_release_user_pages(&ia->ap, nres, io->should_dirty); - fuse_io_free(ia); + fuse_io_folios_free(ia); } ia = NULL; if (nres < 0) { @@ -1691,13 +1681,13 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, } if (count) { max_pages = iov_iter_npages(iter, fc->max_pages); - ia = fuse_io_alloc(io, max_pages); + ia = fuse_io_folios_alloc(io, max_pages); if (!ia) break; } } if (ia) - fuse_io_free(ia); + fuse_io_folios_free(ia); if (res > 0) *ppos = pos; diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 0e9671c4b350..c1ee272e0534 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -974,18 +974,6 @@ static inline bool fuse_is_bad(struct inode *inode) return unlikely(test_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state)); } -static inline struct page **fuse_pages_alloc(unsigned int npages, gfp_t flags, - struct fuse_page_desc **desc) -{ - struct page **pages; - - pages = kzalloc(npages * (sizeof(struct page *) + - sizeof(struct fuse_page_desc)), flags); - *desc = (void *) (pages + npages); - - return pages; -} - static inline struct folio **fuse_folios_alloc(unsigned int nfolios, gfp_t flags, struct fuse_folio_desc **desc) { @@ -998,16 +986,6 @@ static inline struct folio **fuse_folios_alloc(unsigned int nfolios, gfp_t flags return folios; } -static inline void fuse_page_descs_length_init(struct fuse_page_desc *descs, - unsigned int index, - unsigned int nr_pages) -{ - int i; - - for (i = index; i < index + nr_pages; i++) - descs[i].length = PAGE_SIZE - descs[i].offset; -} - static inline void fuse_folio_descs_length_init(struct fuse_folio_desc *descs, unsigned int index, unsigned int nr_folios) -- Gitee From 06d0fd1a1f8753db7b95705abee5ba8a3f67d349 Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Thu, 24 Oct 2024 10:18:09 -0700 Subject: [PATCH 2061/2138] fuse: remove pages for requests and exclusively use folios ANBZ: #12255 commit 68bfb7eb7f7de355d5b3812c25a2a36e9eead97b upstream. All fuse requests use folios instead of pages for transferring data. Remove pages from the requests and exclusively use folios. No functional changes. [SzM: rename back folio_descs -> descs, etc.] Signed-off-by: Joanne Koong Reviewed-by: Josef Bacik Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4491 --- fs/fuse/cuse.c | 3 +- fs/fuse/dev.c | 57 ++++++++++----------------- fs/fuse/dir.c | 3 +- fs/fuse/file.c | 58 +++++++++++++--------------- fs/fuse/fuse_i.h | 22 ++--------- fs/fuse/ioctl.c | 5 +-- fs/fuse/readdir.c | 3 +- fs/fuse/virtio_fs.c | 93 +++++++++++++++++---------------------------- 8 files changed, 90 insertions(+), 154 deletions(-) diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c index 6018af98dd08..40f3e640e055 100644 --- a/fs/fuse/cuse.c +++ b/fs/fuse/cuse.c @@ -456,10 +456,9 @@ static int cuse_send_init(struct cuse_conn *cc) ap->args.out_args[1].size = CUSE_INIT_INFO_MAX; ap->args.out_argvar = true; ap->args.out_pages = true; - ap->uses_folios = true; ap->num_folios = 1; ap->folios = &ia->folio; - ap->folio_descs = &ia->desc; + ap->descs = &ia->desc; ia->folio = folio; ia->desc.length = ap->args.out_args[1].size; ap->args.end = cuse_process_init_reply; diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 27933408d6a4..52b65377c0c3 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -1001,41 +1001,27 @@ static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes, struct fuse_req *req = cs->req; struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args); - if (ap->uses_folios) { - for (i = 0; i < ap->num_folios && (nbytes || zeroing); i++) { - int err; - unsigned int offset = ap->folio_descs[i].offset; - unsigned int count = min(nbytes, ap->folio_descs[i].length); - struct page *orig, *pagep; + for (i = 0; i < ap->num_folios && (nbytes || zeroing); i++) { + int err; + unsigned int offset = ap->descs[i].offset; + unsigned int count = min(nbytes, ap->descs[i].length); + struct page *orig, *pagep; - orig = pagep = &ap->folios[i]->page; + orig = pagep = &ap->folios[i]->page; - err = fuse_copy_page(cs, &pagep, offset, count, zeroing); - if (err) - return err; - - nbytes -= count; - - /* - * fuse_copy_page may have moved a page from a pipe - * instead of copying into our given page, so update - * the folios if it was replaced. - */ - if (pagep != orig) - ap->folios[i] = page_folio(pagep); - } - } else { - for (i = 0; i < ap->num_pages && (nbytes || zeroing); i++) { - int err; - unsigned int offset = ap->descs[i].offset; - unsigned int count = min(nbytes, ap->descs[i].length); + err = fuse_copy_page(cs, &pagep, offset, count, zeroing); + if (err) + return err; - err = fuse_copy_page(cs, &ap->pages[i], offset, count, zeroing); - if (err) - return err; + nbytes -= count; - nbytes -= count; - } + /* + * fuse_copy_page may have moved a page from a pipe instead of + * copying into our given page, so update the folios if it was + * replaced. + */ + if (pagep != orig) + ap->folios[i] = page_folio(pagep); } return 0; } @@ -1734,7 +1720,7 @@ static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode, num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; num_pages = min(num_pages, fc->max_pages); - args_size += num_pages * (sizeof(ap->folios[0]) + sizeof(ap->folio_descs[0])); + args_size += num_pages * (sizeof(ap->folios[0]) + sizeof(ap->descs[0])); ra = kzalloc(args_size, GFP_KERNEL); if (!ra) @@ -1742,8 +1728,7 @@ static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode, ap = &ra->ap; ap->folios = (void *) (ra + 1); - ap->folio_descs = (void *) (ap->folios + num_pages); - ap->uses_folios = true; + ap->descs = (void *) (ap->folios + num_pages); args = &ap->args; args->nodeid = outarg->nodeid; @@ -1764,8 +1749,8 @@ static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode, this_num = min_t(unsigned, num, PAGE_SIZE - offset); ap->folios[ap->num_folios] = folio; - ap->folio_descs[ap->num_folios].offset = offset; - ap->folio_descs[ap->num_folios].length = this_num; + ap->descs[ap->num_folios].offset = offset; + ap->descs[ap->num_folios].length = this_num; ap->num_folios++; cur_pages++; diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 2b8c73d66885..ccf086473595 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1568,10 +1568,9 @@ static int fuse_readlink_page(struct inode *inode, struct folio *folio) struct fuse_mount *fm = get_fuse_mount(inode); struct fuse_folio_desc desc = { .length = PAGE_SIZE - 1 }; struct fuse_args_pages ap = { - .uses_folios = true, .num_folios = 1, .folios = &folio, - .folio_descs = &desc, + .descs = &desc, }; char *link; ssize_t res; diff --git a/fs/fuse/file.c b/fs/fuse/file.c index dd370355fd6e..5be240680b6f 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -742,7 +742,7 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos) kref_put(&io->refcnt, fuse_io_release); } -static struct fuse_io_args *fuse_io_folios_alloc(struct fuse_io_priv *io, +static struct fuse_io_args *fuse_io_alloc(struct fuse_io_priv *io, unsigned int nfolios) { struct fuse_io_args *ia; @@ -750,9 +750,8 @@ static struct fuse_io_args *fuse_io_folios_alloc(struct fuse_io_priv *io, ia = kzalloc(sizeof(*ia), GFP_KERNEL); if (ia) { ia->io = io; - ia->ap.uses_folios = true; ia->ap.folios = fuse_folios_alloc(nfolios, GFP_KERNEL, - &ia->ap.folio_descs); + &ia->ap.descs); if (!ia->ap.folios) { kfree(ia); ia = NULL; @@ -761,7 +760,7 @@ static struct fuse_io_args *fuse_io_folios_alloc(struct fuse_io_priv *io, return ia; } -static void fuse_io_folios_free(struct fuse_io_args *ia) +static void fuse_io_free(struct fuse_io_args *ia) { kfree(ia->ap.folios); kfree(ia); @@ -797,7 +796,7 @@ static void fuse_aio_complete_req(struct fuse_mount *fm, struct fuse_args *args, fuse_release_user_pages(&ia->ap, err ?: nres, io->should_dirty); fuse_aio_complete(io, err, pos); - fuse_io_folios_free(ia); + fuse_io_free(ia); } static ssize_t fuse_async_req_send(struct fuse_mount *fm, @@ -880,10 +879,9 @@ static int fuse_do_readfolio(struct file *file, struct folio *folio) struct fuse_io_args ia = { .ap.args.page_zeroing = true, .ap.args.out_pages = true, - .ap.uses_folios = true, .ap.num_folios = 1, .ap.folios = &folio, - .ap.folio_descs = &desc, + .ap.descs = &desc, }; ssize_t res; u64 attr_ver; @@ -962,7 +960,7 @@ static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args, if (ia->ff) fuse_file_put(ia->ff, false); - fuse_io_folios_free(ia); + fuse_io_free(ia); } static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file) @@ -983,7 +981,7 @@ static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file) /* Don't overflow end offset */ if (pos + (count - 1) == LLONG_MAX) { count--; - ap->folio_descs[ap->num_folios - 1].length--; + ap->descs[ap->num_folios - 1].length--; } WARN_ON((loff_t) (pos + count) < 0); @@ -1044,7 +1042,7 @@ static void fuse_readahead(struct readahead_control *rac) */ break; - ia = fuse_io_folios_alloc(NULL, cur_pages); + ia = fuse_io_alloc(NULL, cur_pages); if (!ia) return; ap = &ia->ap; @@ -1052,7 +1050,7 @@ static void fuse_readahead(struct readahead_control *rac) while (ap->num_folios < cur_pages) { folio = readahead_folio(rac); ap->folios[ap->num_folios] = folio; - ap->folio_descs[ap->num_folios].length = folio_size(folio); + ap->descs[ap->num_folios].length = folio_size(folio); ap->num_folios++; } fuse_send_readpages(ia, rac->file); @@ -1186,7 +1184,7 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia, err = -EIO; short_write = ia->write.out.size < count; - offset = ap->folio_descs[0].offset; + offset = ap->descs[0].offset; count = ia->write.out.size; for (i = 0; i < ap->num_folios; i++) { struct folio *folio = ap->folios[i]; @@ -1224,7 +1222,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia, int err; ap->args.in_pages = true; - ap->folio_descs[0].offset = offset; + ap->descs[0].offset = offset; do { size_t tmp; @@ -1261,7 +1259,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia, err = 0; ap->folios[ap->num_folios] = folio; - ap->folio_descs[ap->num_folios].length = tmp; + ap->descs[ap->num_folios].length = tmp; ap->num_folios++; nr_pages++; @@ -1318,8 +1316,7 @@ static ssize_t fuse_perform_write(struct kiocb *iocb, struct iov_iter *ii) unsigned int nr_pages = fuse_wr_pages(pos, iov_iter_count(ii), fc->max_pages); - ap->uses_folios = true; - ap->folios = fuse_folios_alloc(nr_pages, GFP_KERNEL, &ap->folio_descs); + ap->folios = fuse_folios_alloc(nr_pages, GFP_KERNEL, &ap->descs); if (!ap->folios) { err = -ENOMEM; break; @@ -1563,13 +1560,13 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii, /* Currently, all folios in FUSE are one page */ nfolios = DIV_ROUND_UP(ret, PAGE_SIZE); - ap->folio_descs[ap->num_folios].offset = start; - fuse_folio_descs_length_init(ap->folio_descs, ap->num_folios, nfolios); + ap->descs[ap->num_folios].offset = start; + fuse_folio_descs_length_init(ap->descs, ap->num_folios, nfolios); for (i = 0; i < nfolios; i++) ap->folios[i + ap->num_folios] = page_folio(pages[i]); ap->num_folios += nfolios; - ap->folio_descs[ap->num_folios - 1].length -= + ap->descs[ap->num_folios - 1].length -= (PAGE_SIZE - ret) & (PAGE_SIZE - 1); nr_pages += nfolios; } @@ -1613,14 +1610,14 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, bool fopen_direct_io = ff->open_flags & FOPEN_DIRECT_IO; max_pages = iov_iter_npages(iter, fc->max_pages); - ia = fuse_io_folios_alloc(io, max_pages); + ia = fuse_io_alloc(io, max_pages); if (!ia) return -ENOMEM; if (fopen_direct_io && fc->direct_io_allow_mmap) { res = filemap_write_and_wait_range(mapping, pos, pos + count - 1); if (res) { - fuse_io_folios_free(ia); + fuse_io_free(ia); return res; } } @@ -1635,7 +1632,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, if (fopen_direct_io && write) { res = invalidate_inode_pages2_range(mapping, idx_from, idx_to); if (res) { - fuse_io_folios_free(ia); + fuse_io_free(ia); return res; } } @@ -1662,7 +1659,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, if (!io->async || nres < 0) { fuse_release_user_pages(&ia->ap, nres, io->should_dirty); - fuse_io_folios_free(ia); + fuse_io_free(ia); } ia = NULL; if (nres < 0) { @@ -1681,13 +1678,13 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, } if (count) { max_pages = iov_iter_npages(iter, fc->max_pages); - ia = fuse_io_folios_alloc(io, max_pages); + ia = fuse_io_alloc(io, max_pages); if (!ia) break; } } if (ia) - fuse_io_folios_free(ia); + fuse_io_free(ia); if (res > 0) *ppos = pos; @@ -2061,8 +2058,7 @@ static struct fuse_writepage_args *fuse_writepage_args_alloc(void) if (wpa) { ap = &wpa->ia.ap; ap->num_folios = 0; - ap->uses_folios = true; - ap->folios = fuse_folios_alloc(1, GFP_NOFS, &ap->folio_descs); + ap->folios = fuse_folios_alloc(1, GFP_NOFS, &ap->descs); if (!ap->folios) { kfree(wpa); wpa = NULL; @@ -2095,8 +2091,8 @@ static void fuse_writepage_args_page_fill(struct fuse_writepage_args *wpa, struc folio_copy(tmp_folio, folio); ap->folios[folio_index] = tmp_folio; - ap->folio_descs[folio_index].offset = 0; - ap->folio_descs[folio_index].length = PAGE_SIZE; + ap->descs[folio_index].offset = 0; + ap->descs[folio_index].length = PAGE_SIZE; inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); node_stat_add_folio(tmp_folio, NR_WRITEBACK_TEMP); @@ -2202,10 +2198,10 @@ static bool fuse_pages_realloc(struct fuse_fill_wb_data *data) return false; memcpy(folios, ap->folios, sizeof(struct folio *) * ap->num_folios); - memcpy(descs, ap->folio_descs, sizeof(struct fuse_folio_desc) * ap->num_folios); + memcpy(descs, ap->descs, sizeof(struct fuse_folio_desc) * ap->num_folios); kfree(ap->folios); ap->folios = folios; - ap->folio_descs = descs; + ap->descs = descs; data->max_folios = nfolios; return true; diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index c1ee272e0534..f4a47da5575b 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -266,12 +266,6 @@ struct fuse_arg { void *value; }; -/** FUSE page descriptor */ -struct fuse_page_desc { - unsigned int length; - unsigned int offset; -}; - /** FUSE folio descriptor */ struct fuse_folio_desc { unsigned int length; @@ -306,19 +300,9 @@ struct fuse_args { struct fuse_args_pages { struct fuse_args args; - union { - struct { - struct page **pages; - struct fuse_page_desc *descs; - unsigned int num_pages; - }; - struct { - struct folio **folios; - struct fuse_folio_desc *folio_descs; - unsigned int num_folios; - }; - }; - bool uses_folios; + struct folio **folios; + struct fuse_folio_desc *descs; + unsigned int num_folios; }; #define FUSE_ARGS(args) struct fuse_args args = {} diff --git a/fs/fuse/ioctl.c b/fs/fuse/ioctl.c index dc3e7c8ff97b..27115c618e94 100644 --- a/fs/fuse/ioctl.c +++ b/fs/fuse/ioctl.c @@ -201,12 +201,12 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE); err = -ENOMEM; - ap.folios = fuse_folios_alloc(fm->fc->max_pages, GFP_KERNEL, &ap.folio_descs); + ap.folios = fuse_folios_alloc(fm->fc->max_pages, GFP_KERNEL, &ap.descs); iov_page = (struct iovec *) __get_free_page(GFP_KERNEL); if (!ap.folios || !iov_page) goto out; - fuse_folio_descs_length_init(ap.folio_descs, 0, fm->fc->max_pages); + fuse_folio_descs_length_init(ap.descs, 0, fm->fc->max_pages); /* * If restricted, initialize IO parameters as encoded in @cmd. @@ -244,7 +244,6 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, err = -ENOMEM; if (max_pages > fm->fc->max_pages) goto out; - ap.uses_folios = true; while (ap.num_folios < max_pages) { ap.folios[ap.num_folios] = folio_alloc(GFP_KERNEL | __GFP_HIGHMEM, 0); if (!ap.folios[ap.num_folios]) diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c index b6eea5e6b69f..7ebdba80fa52 100644 --- a/fs/fuse/readdir.c +++ b/fs/fuse/readdir.c @@ -346,10 +346,9 @@ static int fuse_readdir_uncached(struct file *file, struct dir_context *ctx) plus = fuse_use_readdirplus(inode, ctx); ap->args.out_pages = true; - ap->uses_folios = true; ap->num_folios = 1; ap->folios = &folio; - ap->folio_descs = &desc; + ap->descs = &desc; if (plus) { attr_version = fuse_get_attr_version(fm->fc); fuse_read_args_fill(&ia, file, ctx->pos, PAGE_SIZE, diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index 4d2ddc831e7a..6706b7be8652 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -765,7 +765,6 @@ static void virtio_fs_request_complete(struct fuse_req *req, struct fuse_args *args; struct fuse_args_pages *ap; unsigned int len, i, thislen; - struct page *page; struct folio *folio; /* @@ -778,29 +777,15 @@ static void virtio_fs_request_complete(struct fuse_req *req, if (args->out_pages && args->page_zeroing) { len = args->out_args[args->out_numargs - 1].size; ap = container_of(args, typeof(*ap), args); - if (ap->uses_folios) { - for (i = 0; i < ap->num_folios; i++) { - thislen = ap->folio_descs[i].length; - if (len < thislen) { - WARN_ON(ap->folio_descs[i].offset); - folio = ap->folios[i]; - folio_zero_segment(folio, len, thislen); - len = 0; - } else { - len -= thislen; - } - } - } else { - for (i = 0; i < ap->num_pages; i++) { - thislen = ap->descs[i].length; - if (len < thislen) { - WARN_ON(ap->descs[i].offset); - page = ap->pages[i]; - zero_user_segment(page, len, thislen); - len = 0; - } else { - len -= thislen; - } + for (i = 0; i < ap->num_folios; i++) { + thislen = ap->descs[i].length; + if (len < thislen) { + WARN_ON(ap->descs[i].offset); + folio = ap->folios[i]; + folio_zero_segment(folio, len, thislen); + len = 0; + } else { + len -= thislen; } } } @@ -1286,22 +1271,16 @@ static void virtio_fs_send_interrupt(struct fuse_iqueue *fiq, struct fuse_req *r } /* Count number of scatter-gather elements required */ -static unsigned int sg_count_fuse_pages(struct fuse_args_pages *ap, - unsigned int total_len) +static unsigned int sg_count_fuse_folios(struct fuse_folio_desc *folio_descs, + unsigned int num_folios, + unsigned int total_len) { unsigned int i; unsigned int this_len; - if (ap->uses_folios) { - for (i = 0; i < ap->num_folios && total_len; i++) { - this_len = min(ap->folio_descs[i].length, total_len); - total_len -= this_len; - } - } else { - for (i = 0; i < ap->num_pages && total_len; i++) { - this_len = min(ap->descs[i].length, total_len); - total_len -= this_len; - } + for (i = 0; i < num_folios && total_len; i++) { + this_len = min(folio_descs[i].length, total_len); + total_len -= this_len; } return i; @@ -1319,7 +1298,8 @@ static unsigned int sg_count_fuse_req(struct fuse_req *req) if (args->in_pages) { size = args->in_args[args->in_numargs - 1].size; - total_sgs += sg_count_fuse_pages(ap, size); + total_sgs += sg_count_fuse_folios(ap->descs, ap->num_folios, + size); } if (!test_bit(FR_ISREPLY, &req->flags)) @@ -1332,35 +1312,28 @@ static unsigned int sg_count_fuse_req(struct fuse_req *req) if (args->out_pages) { size = args->out_args[args->out_numargs - 1].size; - total_sgs += sg_count_fuse_pages(ap, size); + total_sgs += sg_count_fuse_folios(ap->descs, ap->num_folios, + size); } return total_sgs; } -/* Add pages/folios to scatter-gather list and return number of elements used */ -static unsigned int sg_init_fuse_pages(struct scatterlist *sg, - struct fuse_args_pages *ap, - unsigned int total_len) +/* Add folios to scatter-gather list and return number of elements used */ +static unsigned int sg_init_fuse_folios(struct scatterlist *sg, + struct folio **folios, + struct fuse_folio_desc *folio_descs, + unsigned int num_folios, + unsigned int total_len) { unsigned int i; unsigned int this_len; - if (ap->uses_folios) { - for (i = 0; i < ap->num_folios && total_len; i++) { - sg_init_table(&sg[i], 1); - this_len = min(ap->folio_descs[i].length, total_len); - sg_set_folio(&sg[i], ap->folios[i], this_len, - ap->folio_descs[i].offset); - total_len -= this_len; - } - } else { - for (i = 0; i < ap->num_pages && total_len; i++) { - sg_init_table(&sg[i], 1); - this_len = min(ap->descs[i].length, total_len); - sg_set_page(&sg[i], ap->pages[i], this_len, ap->descs[i].offset); - total_len -= this_len; - } + for (i = 0; i < num_folios && total_len; i++) { + sg_init_table(&sg[i], 1); + this_len = min(folio_descs[i].length, total_len); + sg_set_folio(&sg[i], folios[i], this_len, folio_descs[i].offset); + total_len -= this_len; } return i; @@ -1384,8 +1357,10 @@ static unsigned int sg_init_fuse_args(struct scatterlist *sg, sg_init_one(&sg[total_sgs++], argbuf, len); if (argpages) - total_sgs += sg_init_fuse_pages(&sg[total_sgs], ap, - args[numargs - 1].size); + total_sgs += sg_init_fuse_folios(&sg[total_sgs], + ap->folios, ap->descs, + ap->num_folios, + args[numargs - 1].size); if (len_used) *len_used = len; -- Gitee From 3a12236a3513df8f133cf958d786590fc1e142cb Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Wed, 11 Dec 2024 12:55:56 -0800 Subject: [PATCH 2062/2138] fuse: fix direct io folio offset and length calculation ANBZ: #12255 commit 7a4f541873734f41f9645ec147cfae72ef3ffd00 upstream. For the direct io case, the pages from userspace may be part of a huge folio, even if all folios in the page cache for fuse are small. Fix the logic for calculating the offset and length of the folio for the direct io case, which currently incorrectly assumes that all folios encountered are one page size. Fixes: 3b97c3652d91 ("fuse: convert direct io to use folios") Signed-off-by: Joanne Koong Reviewed-by: Jingbo Xu Reviewed-by: Bernd Schubert Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4491 --- fs/fuse/file.c | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 5be240680b6f..8e557a9ca744 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1556,18 +1556,22 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii, nbytes += ret; - ret += start; - /* Currently, all folios in FUSE are one page */ - nfolios = DIV_ROUND_UP(ret, PAGE_SIZE); - - ap->descs[ap->num_folios].offset = start; - fuse_folio_descs_length_init(ap->descs, ap->num_folios, nfolios); - for (i = 0; i < nfolios; i++) - ap->folios[i + ap->num_folios] = page_folio(pages[i]); - - ap->num_folios += nfolios; - ap->descs[ap->num_folios - 1].length -= - (PAGE_SIZE - ret) & (PAGE_SIZE - 1); + nfolios = DIV_ROUND_UP(ret + start, PAGE_SIZE); + + for (i = 0; i < nfolios; i++) { + struct folio *folio = page_folio(pages[i]); + unsigned int offset = start + + (folio_page_idx(folio, pages[i]) << PAGE_SHIFT); + unsigned int len = min_t(unsigned int, ret, PAGE_SIZE - start); + + ap->descs[ap->num_folios].offset = offset; + ap->descs[ap->num_folios].length = len; + ap->folios[ap->num_folios] = folio; + start = 0; + ret -= len; + ap->num_folios++; + } + nr_pages += nfolios; } kfree(pages); -- Gitee From d822d0e5459ebdf0ded08bfecf369022898de2e5 Mon Sep 17 00:00:00 2001 From: Bernd Schubert Date: Tue, 3 Dec 2024 00:01:10 +0100 Subject: [PATCH 2063/2138] fuse: Set *nbytesp=0 in fuse_get_user_pages on allocation failure ANBZ: #12255 commit 78f2560fc9fa5ccaaf23ac78edb732c08bad7a92 upstream. In fuse_get_user_pages(), set *nbytesp to 0 when struct page **pages allocation fails. This prevents the caller (fuse_direct_io) from making incorrect assumptions that could lead to NULL pointer dereferences when processing the request reply. Previously, *nbytesp was left unmodified on allocation failure, which could cause issues if the caller assumed pages had been added to ap->descs[] when they hadn't. Reported-by: syzbot+87b8e6ed25dbc41759f7@syzkaller.appspotmail.com Closes: https://syzkaller.appspot.com/bug?extid=87b8e6ed25dbc41759f7 Fixes: 3b97c3652d91 ("fuse: convert direct io to use folios") Signed-off-by: Bernd Schubert Reviewed-by: Joanne Koong Tested-by: Dmitry Antipov Tested-by: David Howells Signed-off-by: Miklos Szeredi Signed-off-by: Jingbo Xu Reviewed-by: Joseph Qi Link: https://gitee.com/anolis/cloud-kernel/pulls/4491 --- fs/fuse/file.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 8e557a9ca744..ccc91256c4d6 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1540,8 +1540,10 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii, */ struct page **pages = kzalloc(max_pages * sizeof(struct page *), GFP_KERNEL); - if (!pages) - return -ENOMEM; + if (!pages) { + ret = -ENOMEM; + goto out; + } while (nbytes < *nbytesp && nr_pages < max_pages) { unsigned nfolios, i; @@ -1587,6 +1589,7 @@ static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii, else ap->args.out_pages = true; +out: *nbytesp = nbytes; return ret < 0 ? ret : 0; -- Gitee From d0773d25401890bfab28e1143264a773392b50fe Mon Sep 17 00:00:00 2001 From: Qinyun Tan Date: Wed, 15 Jan 2025 11:26:33 +0800 Subject: [PATCH 2064/2138] anolis: spec: remove local modules sign ANBZ: #13369 ANCK do not need local modules sign now, just remove relative support from kernel spec. Signed-off-by: Qinyun Tan Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/4472 --- anolis/rpm/kernel.spec.template | 43 +-------------------------------- 1 file changed, 1 insertion(+), 42 deletions(-) diff --git a/anolis/rpm/kernel.spec.template b/anolis/rpm/kernel.spec.template index ea0675613d0d..e5c978e59695 100644 --- a/anolis/rpm/kernel.spec.template +++ b/anolis/rpm/kernel.spec.template @@ -8,9 +8,6 @@ # We have to override the new %%install behavior because, well... the kernel is special. %global __spec_install_pre %{___build_pre} -# Sign modules on all arches -%global signmodules 0 - # define buildid .local %global dist %%DIST%% @@ -262,8 +259,6 @@ BuildRequires: asciidoc Source0: linux-%{kernelversion}-%{pkg_release}.tar.xz -%define modsign_cmd %{SOURCE18} - Source20: kernel-%{version}-aarch64.config Source21: kernel-%{version}-aarch64-debug.config Source39: kernel-%{version}-x86_64.config @@ -759,11 +754,6 @@ BuildKernel() { %{make} -s %{?_smp_mflags} mrproper cp configs/$Config .config - %if %{signmodules} - cp %{SOURCE11} certs/. - cp %{SOURCE12} certs/. - %endif - Arch=`head -1 .config | cut -b 3-` echo USING ARCH=$Arch @@ -1020,14 +1010,6 @@ BuildKernel() { rm System.map popd -%if %{signmodules} - if [ $DoModules -eq 1 ]; then - # Save the signing keys so we can sign the modules in __modsign_install_post - cp certs/signing_key.pem certs/signing_key.pem.sign${Flav} - cp certs/signing_key.x509 certs/signing_key.x509.sign${Flav} - fi -%endif - # Move the devel headers out of the root file system mkdir -p $RPM_BUILD_ROOT/usr/src/kernels mv $RPM_BUILD_ROOT/lib/modules/$KernelVer/build $RPM_BUILD_ROOT/$DevelDir @@ -1146,28 +1128,6 @@ chmod -R a=rX Documentation find Documentation -type d | xargs chmod u+w %endif -# In the modsign case, we do 3 things. 1) We check the "flavour" and hard -# code the value in the following invocations. This is somewhat sub-optimal -# but we're doing this inside of an RPM macro and it isn't as easy as it -# could be because of that. 2) We restore the .tmp_versions/ directory from -# the one we saved off in BuildKernel above. This is to make sure we're -# signing the modules we actually built/installed in that flavour. 3) We -# grab the arch and invoke mod-sign.sh command to actually sign the modules. -# -# We have to do all of those things _after_ find-debuginfo runs, otherwise -# that will strip the signature off of the modules. - -%define __modsign_install_post \ - if [ "%{signmodules}" -eq "1" ]; then \ - if [ "%{with_debug}" -ne "0" ]; then \ - %{modsign_cmd} certs/signing_key.pem.sign+debug certs/signing_key.x509.sign+debug $RPM_BUILD_ROOT/lib/modules/%{KVERREL}+debug/ \ - fi \ - if [ "%{with_up}" -ne "0" ]; then \ - %{modsign_cmd} certs/signing_key.pem.sign certs/signing_key.x509.sign $RPM_BUILD_ROOT/lib/modules/%{KVERREL}/ \ - fi \ - fi \ -%{nil} - ### ### Special hacks for debuginfo subpackages. ### @@ -1193,8 +1153,7 @@ find Documentation -type d | xargs chmod u+w %define __spec_install_post \ %{?__debug_package:%{__debug_install_post}}\ %{__arch_install_post}\ - %{__os_install_post}\ - %{__modsign_install_post} + %{__os_install_post} ### ### install -- Gitee From 3b307230116a6a1003889c4a8378188efffb0964 Mon Sep 17 00:00:00 2001 From: Qiao Ma Date: Wed, 15 Jan 2025 15:01:55 +0800 Subject: [PATCH 2065/2138] anolis: configs: adjust examination rules to adapt for specification ANBZ: #8598 The kconfig specification which was imported recently has some differences comparing to the examination rules, align them. Signed-off-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/4502 --- anolis/configs/examination/L0-MANDATORY/arm64.config | 2 -- anolis/configs/examination/L0-MANDATORY/loongarch.config | 2 -- anolis/configs/examination/L0-MANDATORY/x86.config | 2 -- anolis/configs/examination/L1-RECOMMEND/loongarch.config | 2 +- 4 files changed, 1 insertion(+), 7 deletions(-) diff --git a/anolis/configs/examination/L0-MANDATORY/arm64.config b/anolis/configs/examination/L0-MANDATORY/arm64.config index 58414684fe3d..0f5c593d2b4d 100644 --- a/anolis/configs/examination/L0-MANDATORY/arm64.config +++ b/anolis/configs/examination/L0-MANDATORY/arm64.config @@ -1,11 +1,9 @@ # UNLIMITED CONFIG_LSM -CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" # CHOICE CONFIG_NODES_SHIFT 6/8/10 # RANGE CONFIG_NR_CPUS 1024,8192 CONFIG_64BIT=y CONFIG_ACPI=y CONFIG_ACPI_APEI_PCIEAER=y -CONFIG_ACPI_HOTPLUG_CPU=y CONFIG_ACPI_IPMI=m CONFIG_ACPI_NUMA=y CONFIG_ACPI_PCI_SLOT=y diff --git a/anolis/configs/examination/L0-MANDATORY/loongarch.config b/anolis/configs/examination/L0-MANDATORY/loongarch.config index 7ffa35318f42..11899dd44cad 100644 --- a/anolis/configs/examination/L0-MANDATORY/loongarch.config +++ b/anolis/configs/examination/L0-MANDATORY/loongarch.config @@ -1,11 +1,9 @@ # UNLIMITED CONFIG_LSM ## CONFIG_NFS_FSCACHE=y -CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" CONFIG_NODES_SHIFT=6 CONFIG_NR_CPUS=256 CONFIG_64BIT=y CONFIG_ACPI=y -CONFIG_ACPI_HOTPLUG_CPU=y CONFIG_ACPI_IPMI=m CONFIG_ACPI_NUMA=y CONFIG_ACPI_PCI_SLOT=y diff --git a/anolis/configs/examination/L0-MANDATORY/x86.config b/anolis/configs/examination/L0-MANDATORY/x86.config index 2025290f6549..19ca9ca965b1 100644 --- a/anolis/configs/examination/L0-MANDATORY/x86.config +++ b/anolis/configs/examination/L0-MANDATORY/x86.config @@ -1,5 +1,4 @@ # UNLIMITED CONFIG_LSM -CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" # CHOICE CONFIG_NODES_SHIFT 6/8/10 # RANGE CONFIG_NR_CPUS 1024,8192 @@ -10,7 +9,6 @@ CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" CONFIG_64BIT=y CONFIG_ACPI=y CONFIG_ACPI_APEI_PCIEAER=y -CONFIG_ACPI_HOTPLUG_CPU=y CONFIG_ACPI_IPMI=m CONFIG_ACPI_NUMA=y CONFIG_ACPI_PCI_SLOT=y diff --git a/anolis/configs/examination/L1-RECOMMEND/loongarch.config b/anolis/configs/examination/L1-RECOMMEND/loongarch.config index d842bed7dca5..09acb6eaaf43 100644 --- a/anolis/configs/examination/L1-RECOMMEND/loongarch.config +++ b/anolis/configs/examination/L1-RECOMMEND/loongarch.config @@ -1,6 +1,6 @@ CONFIG_ARCH_FORCE_MAX_ORDER=11 # UNLIMITED CONFIG_BUILD_SALT -# UNLIMITED CONFIG_EXT3_FS +CONFIG_EXT3_FS=y CONFIG_HZ=250 CONFIG_LOG_BUF_SHIFT=18 CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 -- Gitee From 2def03fa8590c9d80fe69848461d4d4ca8e43f85 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Sat, 11 Jan 2025 15:52:39 +0800 Subject: [PATCH 2066/2138] anolis: Revert "anolis: mm/page_alloc: don't use PCP list for THP-sized allocations when using PF_MEMALLOC_PIN" ANBZ: #13479 The commit debb128a1015 ("anolis: mm/page_alloc: don't use PCP list for THP-sized allocations when using PF_MEMALLOC_PIN") is a workaround version of the commit bf14ed81f571 ("mm/page_alloc: Separate THP PCP into movable and non-movable categories") on the Linux mainline. Now, the commit bf14ed81f571 ("mm/page_alloc: Separate THP PCP into movable and non-movable categories") has been backported to the branch devel-6.6 of cloud-kernel. Revert the workaround version. Fixes: debb128a1015 ("anolis: mm/page_alloc: don't use PCP list for THP-sized allocations when using PF_MEMALLOC_PIN") Signed-off-by: hanliyang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4483 --- mm/page_alloc.c | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7955e5c22f70..cb5651bbad3b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2816,20 +2816,10 @@ struct page *rmqueue(struct zone *preferred_zone, WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); if (likely(pcp_allowed_order(order))) { -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA || - order != pageblock_order) { - page = rmqueue_pcplist(preferred_zone, zone, order, - migratetype, alloc_flags); - if (likely(page)) - goto out; - } -#else page = rmqueue_pcplist(preferred_zone, zone, order, migratetype, alloc_flags); if (likely(page)) goto out; -#endif } page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, -- Gitee From 597bb6b60ed912f1fc7720c12c4a16544691b01d Mon Sep 17 00:00:00 2001 From: yangge Date: Wed, 3 Jul 2024 20:02:33 +0800 Subject: [PATCH 2067/2138] mm/gup: clear the LRU flag of a page before adding to LRU batch ANBZ: #13479 commit 33dfe9204f29b415bbc0abb1a50642d1ba94f5e9 upstream. If a large number of CMA memory are configured in system (for example, the CMA memory accounts for 50% of the system memory), starting a virtual virtual machine with device passthrough, it will call pin_user_pages_remote(..., FOLL_LONGTERM, ...) to pin memory. Normally if a page is present and in CMA area, pin_user_pages_remote() will migrate the page from CMA area to non-CMA area because of FOLL_LONGTERM flag. But the current code will cause the migration failure due to unexpected page refcounts, and eventually cause the virtual machine fail to start. If a page is added in LRU batch, its refcount increases one, remove the page from LRU batch decreases one. Page migration requires the page is not referenced by others except page mapping. Before migrating a page, we should try to drain the page from LRU batch in case the page is in it, however, folio_test_lru() is not sufficient to tell whether the page is in LRU batch or not, if the page is in LRU batch, the migration will fail. To solve the problem above, we modify the logic of adding to LRU batch. Before adding a page to LRU batch, we clear the LRU flag of the page so that we can check whether the page is in LRU batch by folio_test_lru(page). It's quite valuable, because likely we don't want to blindly drain the LRU batch simply because there is some unexpected reference on a page, as described above. This change makes the LRU flag of a page invisible for longer, which may impact some programs. For example, as long as a page is on a LRU batch, we cannot isolate it, and we cannot check if it's an LRU page. Further, a page can now only be on exactly one LRU batch. This doesn't seem to matter much, because a new page is allocated from buddy and added to the lru batch, or be isolated, it's LRU flag may also be invisible for a long time. Link: https://lkml.kernel.org/r/1720075944-27201-1-git-send-email-yangge1116@126.com Link: https://lkml.kernel.org/r/1720008153-16035-1-git-send-email-yangge1116@126.com Fixes: 9a4e9f3b2d73 ("mm: update get_user_pages_longterm to migrate pages allocated from CMA region") Signed-off-by: yangge Cc: Aneesh Kumar K.V Cc: Baolin Wang Cc: David Hildenbrand Cc: Barry Song <21cnbao@gmail.com> Cc: Hugh Dickins Signed-off-by: Andrew Morton Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4483 --- mm/swap.c | 43 +++++++++++++++++++++++++++++++------------ 1 file changed, 31 insertions(+), 12 deletions(-) diff --git a/mm/swap.c b/mm/swap.c index 104da8994f27..ddc765b4288f 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -201,10 +201,6 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn) for (i = 0; i < folio_batch_count(fbatch); i++) { struct folio *folio = fbatch->folios[i]; - /* block memcg migration while the folio moves between lru */ - if (move_fn != lru_add_fn && !folio_test_clear_lru(folio)) - continue; - lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags); move_fn(lruvec, folio); @@ -246,11 +242,16 @@ static void lru_move_tail_fn(struct lruvec *lruvec, struct folio *folio) void folio_rotate_reclaimable(struct folio *folio) { if (!folio_test_locked(folio) && !folio_test_dirty(folio) && - !folio_test_unevictable(folio) && folio_test_lru(folio)) { + !folio_test_unevictable(folio)) { struct folio_batch *fbatch; unsigned long flags; folio_get(folio); + if (!folio_test_clear_lru(folio)) { + folio_put(folio); + return; + } + local_lock_irqsave(&lru_rotate.lock, flags); fbatch = this_cpu_ptr(&lru_rotate.fbatch); folio_batch_add_and_move(fbatch, folio, lru_move_tail_fn); @@ -343,11 +344,15 @@ static void folio_activate_drain(int cpu) void folio_activate(struct folio *folio) { - if (folio_test_lru(folio) && !folio_test_active(folio) && - !folio_test_unevictable(folio)) { + if (!folio_test_active(folio) && !folio_test_unevictable(folio)) { struct folio_batch *fbatch; folio_get(folio); + if (!folio_test_clear_lru(folio)) { + folio_put(folio); + return; + } + local_lock(&cpu_fbatches.lock); fbatch = this_cpu_ptr(&cpu_fbatches.activate); folio_batch_add_and_move(fbatch, folio, folio_activate_fn); @@ -688,6 +693,11 @@ void deactivate_file_folio(struct folio *folio) return; folio_get(folio); + if (!folio_test_clear_lru(folio)) { + folio_put(folio); + return; + } + local_lock(&cpu_fbatches.lock); fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate_file); folio_batch_add_and_move(fbatch, folio, lru_deactivate_file_fn); @@ -704,11 +714,16 @@ void deactivate_file_folio(struct folio *folio) */ void folio_deactivate(struct folio *folio) { - if (folio_test_lru(folio) && !folio_test_unevictable(folio) && - (folio_test_active(folio) || lru_gen_enabled())) { + if (!folio_test_unevictable(folio) && (folio_test_active(folio) || + lru_gen_enabled())) { struct folio_batch *fbatch; folio_get(folio); + if (!folio_test_clear_lru(folio)) { + folio_put(folio); + return; + } + local_lock(&cpu_fbatches.lock); fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate); folio_batch_add_and_move(fbatch, folio, lru_deactivate_fn); @@ -725,12 +740,16 @@ void folio_deactivate(struct folio *folio) */ void folio_mark_lazyfree(struct folio *folio) { - if (folio_test_lru(folio) && folio_test_anon(folio) && - folio_test_swapbacked(folio) && !folio_test_swapcache(folio) && - !folio_test_unevictable(folio)) { + if (folio_test_anon(folio) && folio_test_swapbacked(folio) && + !folio_test_swapcache(folio) && !folio_test_unevictable(folio)) { struct folio_batch *fbatch; folio_get(folio); + if (!folio_test_clear_lru(folio)) { + folio_put(folio); + return; + } + local_lock(&cpu_fbatches.lock); fbatch = this_cpu_ptr(&cpu_fbatches.lru_lazyfree); folio_batch_add_and_move(fbatch, folio, lru_lazyfree_fn); -- Gitee From b0d313b628cb248a248ad0b44df41079c8586ea6 Mon Sep 17 00:00:00 2001 From: Chris Li Date: Thu, 5 Sep 2024 01:08:17 -0700 Subject: [PATCH 2068/2138] mm: vmscan.c: fix OOM on swap stress test ANBZ: #13479 commit 0885ef4705607936fc36a38fd74356e1c465b023 upstream. I found a regression on mm-unstable during my swap stress test, using tmpfs to compile linux. The test OOM very soon after the make spawns many cc processes. It bisects down to this change: 33dfe9204f29b415bbc0abb1a50642d1ba94f5e9 (mm/gup: clear the LRU flag of a page before adding to LRU batch) Yu Zhao propose the fix: "I think this is one of the potential side effects -- Huge mentioned earlier about isolate_lru_folios():" I test that with it the swap stress test no longer OOM. Link: https://lore.kernel.org/r/CAOUHufYi9h0kz5uW3LHHS3ZrVwEq-kKp8S6N-MZUmErNAXoXmw@mail.gmail.com/ Link: https://lkml.kernel.org/r/20240905-lru-flag-v2-1-8a2d9046c594@kernel.org Fixes: 33dfe9204f29 ("mm/gup: clear the LRU flag of a page before adding to LRU batch") Signed-off-by: Chris Li Suggested-by: Yu Zhao Suggested-by: Hugh Dickins Closes: https://lore.kernel.org/all/CAF8kJuNP5iTj2p07QgHSGOJsiUfYpJ2f4R1Q5-3BN9JiD9W_KA@mail.gmail.com/ Signed-off-by: Andrew Morton Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4483 --- mm/vmscan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index f330d82b28e8..3f4adabd8b8d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -5037,7 +5037,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c } /* ineligible */ - if (zone > sc->reclaim_idx) { + if (!folio_test_lru(folio) || zone > sc->reclaim_idx) { gen = folio_inc_gen(lruvec, folio, false); list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); return true; -- Gitee From c4b20580947b8c39fb624aa26acc1560353f2393 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Sun, 12 Jan 2025 17:21:08 +0800 Subject: [PATCH 2069/2138] anolis: Revert "anolis: mm/gup: don't check if a page is in lru before draining it" ANBZ: #13479 The commit 7e3fc896ad4d ("anolis: mm/gup: don't check if a page is in lru before draining it") is a workaround version of the commit 33dfe9204f29 ("mm/gup: clear the LRU flag of a page before adding to LRU batch") on the Linux mainline. We have backported the Linux mainline's version to the branch devel-6.6 of cloud-kernel. Revert the workaround version. Fixes: 7e3fc896ad4d ("anolis: mm/gup: don't check if a page is in lru before draining it") Signed-off-by: hanliyang Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4483 --- mm/gup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/gup.c b/mm/gup.c index 0e2feae1ed35..fb07b00460d7 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -2028,7 +2028,7 @@ static unsigned long collect_longterm_unpinnable_pages( continue; } - if (drain_allow) { + if (!folio_test_lru(folio) && drain_allow) { lru_add_drain_all(); drain_allow = false; } -- Gitee From cd81939809fa4618e09b4f35f09d75c3a2cd20fc Mon Sep 17 00:00:00 2001 From: Xu Yu Date: Fri, 29 Jan 2021 15:29:57 +0800 Subject: [PATCH 2070/2138] anolis: mm, memcg: record memory stall latency in every memcg ANBZ: #13556 to #32655321 This probes and calculates the latency of global direct reclaim, memcg direct reclaim, direct compact, global direct swapout, memcg direct swapout, and direct swapin, respectively, and then group into the latency histogram in the corresponding memcg. Besides, the total latency is accumulated each time the histogram is updated, and the latency in each memcg is aggregated from all child memcgs, respectively. We name this feature as memsli, and sample usage of global direct reclaim latency is shown below: $ cat memory.direct_reclaim_global_latency 0-1ms: 228 1-5ms: 283 5-10ms: 0 10-100ms: 0 100-500ms: 0 500-1000ms: 0 >=1000ms: 0 total(ms): 539 Each line indicates the count of global direct reclaim within the appropriate latency range. To clear the latency histogram: $ echo 0 > memory.direct_reclaim_global_latency $ cat memory.direct_reclaim_global_latency 0-1ms: 0 1-5ms: 0 5-10ms: 0 10-100ms: 0 100-500ms: 0 500-1000ms: 0 >=1000ms: 0 total(ms): 0 Since this feature also records latency histogram of swapout and swapin, which are NOT always in the slow memory path, the overhead introduced could be non trivial in some extreme scenarios. This therefore provides a procfs interface to enable or disable this feature. The feature is enabled by default, and you can disable it by: $ echo 0 > /proc/memsli/enabled Apparently, you can check current switch status by: $ cat /proc/memsli/enabled Note that disabling memsli at runtime will NOT clear the existing latency histogram. You still need to manually reset the specified latency histograms by echo 0 into the corresponding cgroup control files. Signed-off-by: Xu Yu Reviewed-by: Xunlei Pang Reviewed-by: Yang Shi Signed-off-by: Kun(llfl) Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4499 --- include/linux/memcontrol.h | 44 +++++++ mm/Kconfig | 9 ++ mm/memcontrol.c | 254 +++++++++++++++++++++++++++++++++++++ mm/memory.c | 11 +- mm/page_alloc.c | 6 + mm/shmem.c | 3 + mm/vmscan.c | 8 ++ 7 files changed, 333 insertions(+), 2 deletions(-) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index c8d14d42c4b6..45ffa4fbede9 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -77,6 +77,33 @@ struct mem_cgroup_reclaim_cookie { struct alloc_context; +enum mem_lat_stat_item { + MEM_LAT_GLOBAL_DIRECT_RECLAIM, /* global direct reclaim latency */ + MEM_LAT_MEMCG_DIRECT_RECLAIM, /* memcg direct reclaim latency */ + MEM_LAT_DIRECT_COMPACT, /* direct compact latency */ + MEM_LAT_GLOBAL_DIRECT_SWAPOUT, /* global direct swapout latency */ + MEM_LAT_MEMCG_DIRECT_SWAPOUT, /* memcg direct swapout latency */ + MEM_LAT_DIRECT_SWAPIN, /* direct swapin latency */ + MEM_LAT_NR_STAT, +}; + +/* Memory latency histogram distribution, in milliseconds */ +enum mem_lat_count_t { + MEM_LAT_0_1, + MEM_LAT_1_5, + MEM_LAT_5_10, + MEM_LAT_10_100, + MEM_LAT_100_500, + MEM_LAT_500_1000, + MEM_LAT_1000_INF, + MEM_LAT_TOTAL, + MEM_LAT_NR_COUNT, +}; + +struct mem_cgroup_lat_stat_cpu { + unsigned long item[MEM_LAT_NR_STAT][MEM_LAT_NR_COUNT]; +}; + #ifdef CONFIG_MEMCG #define MEM_CGROUP_ID_SHIFT 16 @@ -369,6 +396,10 @@ struct mem_cgroup { struct deferred_split deferred_split_queue; #endif +#ifdef CONFIG_MEMSLI + struct mem_cgroup_lat_stat_cpu __percpu *lat_stat_cpu; +#endif + #ifdef CONFIG_PAGECACHE_LIMIT bool allow_pgcache_limit; unsigned long pgcache_limit_size; @@ -1726,6 +1757,19 @@ static inline void memcg_check_wmark_min_adj(struct task_struct *curr, } #endif /* CONFIG_MEMCG */ +#ifdef CONFIG_MEMSLI +extern void memcg_lat_stat_start(u64 *start); +extern void memcg_lat_stat_end(enum mem_lat_stat_item sidx, u64 start); +#else +static inline void memcg_lat_stat_start(u64 *start) +{ +} + +static inline void memcg_lat_stat_end(enum mem_lat_stat_item sidx, u64 start) +{ +} +#endif /* CONFIG_MEMSLI */ + #ifdef CONFIG_ASYNC_FORK static inline unsigned long task_async_fork(struct task_struct *p) { diff --git a/mm/Kconfig b/mm/Kconfig index b6338333d792..4d1b98f6bb5e 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -1164,6 +1164,15 @@ config DMAPOOL_TEST config ARCH_HAS_PTE_SPECIAL bool +config MEMSLI + bool "Support memory latency histogram, a.k.a, memsli" + depends on MEMCG + default y + help + This probes and calculates various kinds of memory latency, and then + groups into the corresponding latency histogram in every memory + cgroup, respectively. + # # Some architectures require a special hugepage directory format that is # required to support multiple hugepage sizes. For example a4fe3ce76 diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c1ef72b4be74..5287c47b4bf2 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -66,6 +66,7 @@ #include #include #include +#include #include "internal.h" #include #include @@ -94,6 +95,11 @@ static bool cgroup_memory_nosocket __ro_after_init; /* Kernel memory accounting disabled? */ static bool cgroup_memory_nokmem __ro_after_init; +#ifdef CONFIG_MEMSLI +/* Cgroup memory SLI disabled? */ +static DEFINE_STATIC_KEY_FALSE(cgroup_memory_nosli); +#endif /* CONFIG_MEMSLI */ + /* BPF memory accounting disabled? */ static bool cgroup_memory_nobpf __ro_after_init; @@ -2792,12 +2798,14 @@ void mem_cgroup_handle_over_high(gfp_t gfp_mask) int nr_retries = MAX_RECLAIM_RETRIES; struct mem_cgroup *memcg; bool in_retry = false; + u64 start; if (likely(!nr_pages)) return; memcg = get_mem_cgroup_from_mm(current->mm); current->memcg_nr_pages_over_high = 0; + memcg_lat_stat_start(&start); retry_reclaim: /* @@ -2859,6 +2867,7 @@ void mem_cgroup_handle_over_high(gfp_t gfp_mask) psi_memstall_leave(&pflags); out: + memcg_lat_stat_end(MEM_LAT_MEMCG_DIRECT_RECLAIM, start); css_put(&memcg->css); } @@ -2875,6 +2884,7 @@ static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, bool drained = false; bool raised_max_event = false; unsigned long pflags; + u64 start; retry: if (consume_stock(memcg, nr_pages)) @@ -2915,10 +2925,12 @@ static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, memcg_memory_event(mem_over_limit, MEMCG_MAX); raised_max_event = true; + memcg_lat_stat_start(&start); psi_memstall_enter(&pflags); nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, gfp_mask, reclaim_options); psi_memstall_leave(&pflags); + memcg_lat_stat_end(MEM_LAT_MEMCG_DIRECT_RECLAIM, start); if (mem_cgroup_margin(mem_over_limit) >= nr_pages) goto retry; @@ -4520,6 +4532,144 @@ static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, return 0; } +#ifdef CONFIG_MEMSLI +#define MEMCG_LAT_STAT_SMP_WRITE(name, sidx) \ +static void smp_write_##name(void *info) \ +{ \ + struct mem_cgroup *memcg = (struct mem_cgroup *)info; \ + int i; \ + \ + for (i = MEM_LAT_0_1; i < MEM_LAT_NR_COUNT; i++) \ + this_cpu_write(memcg->lat_stat_cpu->item[sidx][i], 0); \ +} + +MEMCG_LAT_STAT_SMP_WRITE(global_direct_reclaim, MEM_LAT_GLOBAL_DIRECT_RECLAIM) +MEMCG_LAT_STAT_SMP_WRITE(memcg_direct_reclaim, MEM_LAT_MEMCG_DIRECT_RECLAIM) +MEMCG_LAT_STAT_SMP_WRITE(direct_compact, MEM_LAT_DIRECT_COMPACT) +MEMCG_LAT_STAT_SMP_WRITE(global_direct_swapout, MEM_LAT_GLOBAL_DIRECT_SWAPOUT) +MEMCG_LAT_STAT_SMP_WRITE(memcg_direct_swapout, MEM_LAT_MEMCG_DIRECT_SWAPOUT) +MEMCG_LAT_STAT_SMP_WRITE(direct_swapin, MEM_LAT_DIRECT_SWAPIN) + +smp_call_func_t smp_memcg_lat_write_funcs[] = { + smp_write_global_direct_reclaim, + smp_write_memcg_direct_reclaim, + smp_write_direct_compact, + smp_write_global_direct_swapout, + smp_write_memcg_direct_swapout, + smp_write_direct_swapin, +}; + +static int memcg_lat_stat_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + enum mem_lat_stat_item idx = cft->private; + smp_call_func_t func = smp_memcg_lat_write_funcs[idx]; + + if (val != 0) + return -EINVAL; + + func((void *)memcg); + smp_call_function(func, (void *)memcg, 1); + + return 0; +} + +static u64 memcg_lat_stat_gather(struct mem_cgroup *memcg, + enum mem_lat_stat_item sidx, + enum mem_lat_count_t cidx) +{ + u64 sum = 0; + int cpu; + + for_each_possible_cpu(cpu) + sum += per_cpu_ptr(memcg->lat_stat_cpu, cpu)->item[sidx][cidx]; + + return sum; +} + +static int memcg_lat_stat_show(struct seq_file *m, void *v) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); + enum mem_lat_stat_item idx = seq_cft(m)->private; + + seq_printf(m, "0-1ms: \t%llu\n", + memcg_lat_stat_gather(memcg, idx, MEM_LAT_0_1)); + seq_printf(m, "1-5ms: \t%llu\n", + memcg_lat_stat_gather(memcg, idx, MEM_LAT_1_5)); + seq_printf(m, "5-10ms: \t%llu\n", + memcg_lat_stat_gather(memcg, idx, MEM_LAT_5_10)); + seq_printf(m, "10-100ms: \t%llu\n", + memcg_lat_stat_gather(memcg, idx, MEM_LAT_10_100)); + seq_printf(m, "100-500ms: \t%llu\n", + memcg_lat_stat_gather(memcg, idx, MEM_LAT_100_500)); + seq_printf(m, "500-1000ms: \t%llu\n", + memcg_lat_stat_gather(memcg, idx, MEM_LAT_500_1000)); + seq_printf(m, ">=1000ms: \t%llu\n", + memcg_lat_stat_gather(memcg, idx, MEM_LAT_1000_INF)); + seq_printf(m, "total(ms): \t%llu\n", + memcg_lat_stat_gather(memcg, idx, MEM_LAT_TOTAL) >> 20); + + return 0; +} + +static enum mem_lat_count_t get_mem_lat_count_idx(u64 duration) +{ + enum mem_lat_count_t idx; + + duration = duration >> 20; + if (duration < 1) + idx = MEM_LAT_0_1; + else if (duration < 5) + idx = MEM_LAT_1_5; + else if (duration < 10) + idx = MEM_LAT_5_10; + else if (duration < 100) + idx = MEM_LAT_10_100; + else if (duration < 500) + idx = MEM_LAT_100_500; + else if (duration < 1000) + idx = MEM_LAT_500_1000; + else + idx = MEM_LAT_1000_INF; + + return idx; +} + +void memcg_lat_stat_start(u64 *start) +{ + if (!static_branch_unlikely(&cgroup_memory_nosli) && + !mem_cgroup_disabled()) + *start = ktime_get_ns(); + else + *start = 0; +} + +void memcg_lat_stat_end(enum mem_lat_stat_item sidx, u64 start) +{ + struct mem_cgroup *memcg, *iter; + enum mem_lat_count_t cidx; + u64 duration; + + if (static_branch_unlikely(&cgroup_memory_nosli) || + mem_cgroup_disabled()) + return; + + if (start == 0) + return; + + duration = ktime_get_ns() - start; + cidx = get_mem_lat_count_idx(duration); + memcg = get_mem_cgroup_from_mm(current->mm); + for (iter = memcg; iter; iter = parent_mem_cgroup(iter)) { + this_cpu_inc(iter->lat_stat_cpu->item[sidx][cidx]); + this_cpu_add(iter->lat_stat_cpu->item[sidx][MEM_LAT_TOTAL], + duration); + } + css_put(&memcg->css); +} +#endif /* CONFIG_MEMSLI */ + static u64 mem_cgroup_priority_read(struct cgroup_subsys_state *css, struct cftype *cft) { @@ -5791,6 +5941,44 @@ static struct cftype mem_cgroup_legacy_files[] = { .name = "stat", .seq_show = memory_stat_show, }, +#ifdef CONFIG_MEMSLI + { + .name = "direct_reclaim_global_latency", + .private = MEM_LAT_GLOBAL_DIRECT_RECLAIM, + .write_u64 = memcg_lat_stat_write, + .seq_show = memcg_lat_stat_show, + }, + { + .name = "direct_reclaim_memcg_latency", + .private = MEM_LAT_MEMCG_DIRECT_RECLAIM, + .write_u64 = memcg_lat_stat_write, + .seq_show = memcg_lat_stat_show, + }, + { + .name = "direct_compact_latency", + .private = MEM_LAT_DIRECT_COMPACT, + .write_u64 = memcg_lat_stat_write, + .seq_show = memcg_lat_stat_show, + }, + { + .name = "direct_swapout_global_latency", + .private = MEM_LAT_GLOBAL_DIRECT_SWAPOUT, + .write_u64 = memcg_lat_stat_write, + .seq_show = memcg_lat_stat_show, + }, + { + .name = "direct_swapout_memcg_latency", + .private = MEM_LAT_MEMCG_DIRECT_SWAPOUT, + .write_u64 = memcg_lat_stat_write, + .seq_show = memcg_lat_stat_show, + }, + { + .name = "direct_swapin_latency", + .private = MEM_LAT_DIRECT_SWAPIN, + .write_u64 = memcg_lat_stat_write, + .seq_show = memcg_lat_stat_show, + }, +#endif /* CONFIG_MEMSLI */ { .name = "exstat", .seq_show = memcg_exstat_show, @@ -6116,6 +6304,9 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg) kfree(memcg->vmstats); free_percpu(memcg->vmstats_percpu); free_percpu(memcg->exstat_cpu); +#ifdef CONFIG_MEMSLI + free_percpu(memcg->lat_stat_cpu); +#endif kfree(memcg); } @@ -6152,6 +6343,13 @@ static struct mem_cgroup *mem_cgroup_alloc(void) if (!memcg->vmstats_percpu) goto fail; +#ifdef CONFIG_MEMSLI + memcg->lat_stat_cpu = alloc_percpu_gfp(struct mem_cgroup_lat_stat_cpu, + GFP_KERNEL_ACCOUNT); + if (!memcg->lat_stat_cpu) + goto fail; +#endif + memcg->exstat_cpu = alloc_percpu(struct mem_cgroup_exstat_cpu); if (!memcg->exstat_cpu) goto fail; @@ -8361,6 +8559,58 @@ static int __init enable_cgroup_writeback_v1(char *s) __setup("cgwb_v1", enable_cgroup_writeback_v1); #endif +#ifdef CONFIG_MEMSLI +static int memsli_enabled_show(struct seq_file *m, void *v) +{ + seq_printf(m, "%d\n", !static_key_enabled(&cgroup_memory_nosli)); + return 0; +} + +static int memsli_enabled_open(struct inode *inode, struct file *file) +{ + return single_open(file, memsli_enabled_show, NULL); +} + +static ssize_t memsli_enabled_write(struct file *file, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + char val = -1; + int ret = count; + + if (count < 1 || *ppos) { + ret = -EINVAL; + goto out; + } + + if (copy_from_user(&val, ubuf, 1)) { + ret = -EFAULT; + goto out; + } + + switch (val) { + case '0': + static_branch_enable(&cgroup_memory_nosli); + break; + case '1': + static_branch_disable(&cgroup_memory_nosli); + break; + default: + ret = -EINVAL; + } + +out: + return ret; +} + +static const struct proc_ops memsli_enabled_proc_ops = { + .proc_open = memsli_enabled_open, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_write = memsli_enabled_write, + .proc_release = single_release, +}; +#endif /* CONFIG_MEMSLI */ + /* * subsys_initcall() for memory controller. * @@ -8372,6 +8622,10 @@ __setup("cgwb_v1", enable_cgroup_writeback_v1); static int __init mem_cgroup_init(void) { int cpu, node; +#ifdef CONFIG_MEMSLI + proc_mkdir("memsli", NULL); + proc_create("memsli/enabled", 0600, NULL, &memsli_enabled_proc_ops); +#endif /* CONFIG_MEMSLI */ /* * Currently s32 type (can refer to struct batched_lruvec_stat) is diff --git a/mm/memory.c b/mm/memory.c index 2bded21e4d25..5b935ff35ca2 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5791,8 +5791,15 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) if (!vmf->pte) return do_pte_missing(vmf); - if (!pte_present(vmf->orig_pte)) - return do_swap_page(vmf); + if (!pte_present(vmf->orig_pte)) { + vm_fault_t retval; + u64 start; + + memcg_lat_stat_start(&start); + retval = do_swap_page(vmf); + memcg_lat_stat_end(MEM_LAT_DIRECT_SWAPIN, start); + return retval; + } if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) return do_numa_page(vmf); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index cb5651bbad3b..fccb6908192a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3424,10 +3424,12 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, struct page *page = NULL; unsigned long pflags; unsigned int noreclaim_flag; + u64 start; if (!order) return NULL; + memcg_lat_stat_start(&start); psi_memstall_enter(&pflags); delayacct_compact_start(); noreclaim_flag = memalloc_noreclaim_save(); @@ -3437,6 +3439,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, memalloc_noreclaim_restore(noreclaim_flag); psi_memstall_leave(&pflags); + memcg_lat_stat_end(MEM_LAT_DIRECT_COMPACT, start); delayacct_compact_end(); if (*compact_result == COMPACT_SKIPPED) @@ -3668,11 +3671,13 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order, { unsigned int noreclaim_flag; unsigned long progress; + u64 start; cond_resched(); /* We now go into synchronous reclaim */ cpuset_memory_pressure_bump(); + memcg_lat_stat_start(&start); fs_reclaim_acquire(gfp_mask); noreclaim_flag = memalloc_noreclaim_save(); @@ -3681,6 +3686,7 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order, memalloc_noreclaim_restore(noreclaim_flag); fs_reclaim_release(gfp_mask); + memcg_lat_stat_end(MEM_LAT_GLOBAL_DIRECT_RECLAIM, start); cond_resched(); diff --git a/mm/shmem.c b/mm/shmem.c index fd2d03fc381f..fbf6ff38a0a3 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2153,6 +2153,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, struct folio *folio = NULL; swp_entry_t swap; int error, nr_pages; + u64 start; VM_BUG_ON(!*foliop || !xa_is_value(*foliop)); swap = radix_to_swp_entry(*foliop); @@ -2204,7 +2205,9 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, } /* Here we actually start the io */ + memcg_lat_stat_start(&start); folio = shmem_swapin(swap, gfp, info, index); + memcg_lat_stat_end(MEM_LAT_DIRECT_SWAPIN, start); if (!folio) { error = -ENOMEM; goto failed; diff --git a/mm/vmscan.c b/mm/vmscan.c index 3f4adabd8b8d..f49a349fe3a2 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1770,6 +1770,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, LIST_HEAD(demote_folios); unsigned int nr_reclaimed = 0; unsigned int pgactivate = 0; + u64 start = 0; bool do_demote_pass; struct swap_iocb *plug = NULL; struct lruvec *target_lruvec; @@ -2072,6 +2073,8 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, * starts and then write it out here. */ try_to_unmap_flush_dirty(); + if (!current_is_kswapd()) + memcg_lat_stat_start(&start); switch (pageout(folio, mapping, &plug, folio_list)) { case PAGE_KEEP: goto keep_locked; @@ -2087,6 +2090,11 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, } goto activate_locked; case PAGE_SUCCESS: + if (!current_is_kswapd()) + memcg_lat_stat_end(cgroup_reclaim(sc) ? + MEM_LAT_MEMCG_DIRECT_SWAPOUT : + MEM_LAT_GLOBAL_DIRECT_SWAPOUT, + start); if (nr_pages > 1 && !folio_test_large(folio)) { sc->nr_scanned -= (nr_pages - 1); nr_pages = 1; -- Gitee From 50082c2f471565c915376be70fe7a82fcfd29c51 Mon Sep 17 00:00:00 2001 From: Xu Yu Date: Fri, 29 Jan 2021 15:31:26 +0800 Subject: [PATCH 2071/2138] anolis: configs: enable CONFIG_MEMSLI ANBZ: #13556 to #32655321 This enables CONFIG_MEMSLI by default, for both x86_64 and aarch64. Signed-off-by: Xu Yu Reviewed-by: Xunlei Pang Signed-off-by: Kun(llfl) Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4499 --- anolis/configs/L1-RECOMMEND/default/CONFIG_MEMSLI | 1 + 1 file changed, 1 insertion(+) create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_MEMSLI diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_MEMSLI b/anolis/configs/L1-RECOMMEND/default/CONFIG_MEMSLI new file mode 100644 index 000000000000..ae1f861749f0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_MEMSLI @@ -0,0 +1 @@ +CONFIG_MEMSLI=y -- Gitee From c4d76205fffabce9ac834568c7539bfae1a23f51 Mon Sep 17 00:00:00 2001 From: Xu Yu Date: Fri, 29 Jan 2021 16:03:36 +0800 Subject: [PATCH 2072/2138] anolis: mm, memcg: introduce memsli monitor ANBZ: #13556 to #32655321 Memsli monitor aims to provide a memory pressure detection mechanism, which notifies user whenever the specified memory latency of interest increases, taking advantage of cgroup.event_control with eventfd. The concepts of monitor window and threshold, namely duration and frequency of memory stall, are not implemented for the moment. Effective detection of occurrence of memory stall in slow path is sufficient in most scenarios. By the way, multiple types of memory stall can be monitored concurrently in the same memcg. Supposing that we want to monitor "memcg direct reclaim" latency in the "test" memcg, one workable way to do it is: 1) create an eventfd as @efd 2) open the memory latency histogram file as @cfd, i.e., /sys/fs/cgroup/memory/test/memory.direct_reclaim_memcg_latency 3) write "@efd @cfd" to cgroup.event_control to register an event listener, i.e., /sys/fs/cgroup/memory/test/cgroup.event_control 4) call read(@efd, ...) or poll(...) to wait for an event to occur 5) close @efd to unregister the event listener. Signed-off-by: Xu Yu Reviewed-by: Xunlei Pang Reviewed-by: Wei Yang Signed-off-by: Kun(llfl) Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4499 --- include/linux/memcontrol.h | 2 + mm/memcontrol.c | 102 ++++++++++++++++++++++++++++++++++++- 2 files changed, 103 insertions(+), 1 deletion(-) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 45ffa4fbede9..f78f00829dcf 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -398,6 +398,8 @@ struct mem_cgroup { #ifdef CONFIG_MEMSLI struct mem_cgroup_lat_stat_cpu __percpu *lat_stat_cpu; + struct list_head lat_stat_notify[MEM_LAT_NR_STAT]; + struct mutex lat_stat_notify_lock; #endif #ifdef CONFIG_PAGECACHE_LIMIT diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 5287c47b4bf2..27b90d49c2e0 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -135,7 +135,7 @@ struct mem_cgroup_tree { static struct mem_cgroup_tree soft_limit_tree __read_mostly; -/* for OOM */ +/* for OOM and MEMSLI */ struct mem_cgroup_eventfd_list { struct list_head list; struct eventfd_ctx *eventfd; @@ -4613,6 +4613,82 @@ static int memcg_lat_stat_show(struct seq_file *m, void *v) return 0; } +static int __memcg_lat_stat_register_event(struct mem_cgroup *memcg, + struct eventfd_ctx *eventfd, const char *args, + enum mem_lat_stat_item sidx) +{ + struct mem_cgroup_eventfd_list *evt; + + evt = kmalloc(sizeof(*evt), GFP_KERNEL); + if (!evt) + return -ENOMEM; + + mutex_lock(&memcg->lat_stat_notify_lock); + + evt->eventfd = eventfd; + list_add(&evt->list, &memcg->lat_stat_notify[sidx]); + + mutex_unlock(&memcg->lat_stat_notify_lock); + + return 0; +} + +static void __memcg_lat_stat_unregister_event(struct mem_cgroup *memcg, + struct eventfd_ctx *eventfd, enum mem_lat_stat_item sidx) +{ + struct mem_cgroup_eventfd_list *evt, *tmp; + + mutex_lock(&memcg->lat_stat_notify_lock); + + list_for_each_entry_safe(evt, tmp, &memcg->lat_stat_notify[sidx], + list) { + if (evt->eventfd == eventfd) { + list_del(&evt->list); + kfree(evt); + } + } + + mutex_unlock(&memcg->lat_stat_notify_lock); +} + +#define MEMCG_LAT_STAT_REGISTER_EVENT(name, sidx) \ +static int register_event_##name(struct mem_cgroup *memcg, \ + struct eventfd_ctx *eventfd, const char *args) \ +{ \ + return __memcg_lat_stat_register_event(memcg, eventfd, args, sidx); \ +} \ +static void unregister_event_##name(struct mem_cgroup *memcg, \ + struct eventfd_ctx *eventfd) \ +{ \ + return __memcg_lat_stat_unregister_event(memcg, eventfd, sidx); \ +} + +MEMCG_LAT_STAT_REGISTER_EVENT(global_direct_reclaim, + MEM_LAT_GLOBAL_DIRECT_RECLAIM) +MEMCG_LAT_STAT_REGISTER_EVENT(memcg_direct_reclaim, + MEM_LAT_MEMCG_DIRECT_RECLAIM) +MEMCG_LAT_STAT_REGISTER_EVENT(direct_compact, + MEM_LAT_DIRECT_COMPACT) +MEMCG_LAT_STAT_REGISTER_EVENT(global_direct_swapout, + MEM_LAT_GLOBAL_DIRECT_SWAPOUT) +MEMCG_LAT_STAT_REGISTER_EVENT(memcg_direct_swapout, + MEM_LAT_MEMCG_DIRECT_SWAPOUT) +MEMCG_LAT_STAT_REGISTER_EVENT(direct_swapin, + MEM_LAT_DIRECT_SWAPIN) + +static void memcg_lat_stat_notify_event(struct mem_cgroup *memcg, + enum mem_lat_stat_item sidx) +{ + struct mem_cgroup_eventfd_list *evt; + + mutex_lock(&memcg->lat_stat_notify_lock); + + list_for_each_entry(evt, &memcg->lat_stat_notify[sidx], list) + eventfd_signal(evt->eventfd, 1); + + mutex_unlock(&memcg->lat_stat_notify_lock); +} + static enum mem_lat_count_t get_mem_lat_count_idx(u64 duration) { enum mem_lat_count_t idx; @@ -4665,6 +4741,7 @@ void memcg_lat_stat_end(enum mem_lat_stat_item sidx, u64 start) this_cpu_inc(iter->lat_stat_cpu->item[sidx][cidx]); this_cpu_add(iter->lat_stat_cpu->item[sidx][MEM_LAT_TOTAL], duration); + memcg_lat_stat_notify_event(iter, sidx); } css_put(&memcg->css); } @@ -5739,6 +5816,26 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of, } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) { event->register_event = memsw_cgroup_usage_register_event; event->unregister_event = memsw_cgroup_usage_unregister_event; +#ifdef CONFIG_MEMSLI + } else if (!strcmp(name, "memory.direct_reclaim_global_latency")) { + event->register_event = register_event_global_direct_reclaim; + event->unregister_event = unregister_event_global_direct_reclaim; + } else if (!strcmp(name, "memory.direct_reclaim_memcg_latency")) { + event->register_event = register_event_memcg_direct_reclaim; + event->unregister_event = unregister_event_memcg_direct_reclaim; + } else if (!strcmp(name, "memory.direct_compact_latency")) { + event->register_event = register_event_direct_compact; + event->unregister_event = unregister_event_direct_compact; + } else if (!strcmp(name, "memory.direct_swapout_global_latency")) { + event->register_event = register_event_global_direct_swapout; + event->unregister_event = unregister_event_global_direct_swapout; + } else if (!strcmp(name, "memory.direct_swapout_memcg_latency")) { + event->register_event = register_event_memcg_direct_swapout; + event->unregister_event = unregister_event_memcg_direct_swapout; + } else if (!strcmp(name, "memory.direct_swapin_latency")) { + event->register_event = register_event_direct_swapin; + event->unregister_event = unregister_event_direct_swapin; +#endif } else { ret = -EINVAL; goto out_put_cfile; @@ -6348,6 +6445,9 @@ static struct mem_cgroup *mem_cgroup_alloc(void) GFP_KERNEL_ACCOUNT); if (!memcg->lat_stat_cpu) goto fail; + for (i = 0; i < MEM_LAT_NR_STAT; i++) + INIT_LIST_HEAD(&memcg->lat_stat_notify[i]); + mutex_init(&memcg->lat_stat_notify_lock); #endif memcg->exstat_cpu = alloc_percpu(struct mem_cgroup_exstat_cpu); -- Gitee From 608af7496de783907098896c4b3be545538cdcfb Mon Sep 17 00:00:00 2001 From: Xu Yu Date: Sat, 10 Oct 2020 15:12:13 +0800 Subject: [PATCH 2073/2138] anolis: mm, memcg: optimize eventfds with rculist ANBZ: #13556 to #32655321 With rculist, lat_stat_notify_lock is not required when performing event notification. Event registration and unregistration, however, should still be done with lat_stat_notify_lock held. Signed-off-by: Xu Yu Reviewed-by: Xunlei Pang Reviewed-by: Wei Yang Signed-off-by: Kun(llfl) Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4499 --- mm/memcontrol.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 27b90d49c2e0..96bff47e5f04 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -138,6 +138,7 @@ static struct mem_cgroup_tree soft_limit_tree __read_mostly; /* for OOM and MEMSLI */ struct mem_cgroup_eventfd_list { struct list_head list; + struct rcu_head rcu; struct eventfd_ctx *eventfd; }; @@ -4626,7 +4627,7 @@ static int __memcg_lat_stat_register_event(struct mem_cgroup *memcg, mutex_lock(&memcg->lat_stat_notify_lock); evt->eventfd = eventfd; - list_add(&evt->list, &memcg->lat_stat_notify[sidx]); + list_add_rcu(&evt->list, &memcg->lat_stat_notify[sidx]); mutex_unlock(&memcg->lat_stat_notify_lock); @@ -4643,8 +4644,8 @@ static void __memcg_lat_stat_unregister_event(struct mem_cgroup *memcg, list_for_each_entry_safe(evt, tmp, &memcg->lat_stat_notify[sidx], list) { if (evt->eventfd == eventfd) { - list_del(&evt->list); - kfree(evt); + list_del_rcu(&evt->list); + kfree_rcu(evt, rcu); } } @@ -4681,12 +4682,12 @@ static void memcg_lat_stat_notify_event(struct mem_cgroup *memcg, { struct mem_cgroup_eventfd_list *evt; - mutex_lock(&memcg->lat_stat_notify_lock); + rcu_read_lock(); - list_for_each_entry(evt, &memcg->lat_stat_notify[sidx], list) + list_for_each_entry_rcu(evt, &memcg->lat_stat_notify[sidx], list) eventfd_signal(evt->eventfd, 1); - mutex_unlock(&memcg->lat_stat_notify_lock); + rcu_read_unlock(); } static enum mem_lat_count_t get_mem_lat_count_idx(u64 duration) -- Gitee From 2a7cf42acb97393c79fbff055768ed5598caf358 Mon Sep 17 00:00:00 2001 From: Xu Yu Date: Mon, 1 Feb 2021 17:30:31 +0800 Subject: [PATCH 2074/2138] anolis: mm, memcg: make direct swapin latency more accurate ANBZ: #13556 to #32655321 The handling of migration_entry in do_swap_page is non-neglectible sometimes, and should not be included in the direct swapin latency. This distinguishes actual page swapin by VM_FAULT_MAJOR and VM_FAULT_OOM, making the direct swapin latency in memsli more accurate. Signed-off-by: Xu Yu Reviewed-by: Xunlei Pang Signed-off-by: Kun(llfl) Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4499 --- mm/memory.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mm/memory.c b/mm/memory.c index 5b935ff35ca2..3a32fa031a0b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5797,7 +5797,8 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) memcg_lat_stat_start(&start); retval = do_swap_page(vmf); - memcg_lat_stat_end(MEM_LAT_DIRECT_SWAPIN, start); + if (retval & (VM_FAULT_MAJOR | VM_FAULT_OOM)) + memcg_lat_stat_end(MEM_LAT_DIRECT_SWAPIN, start); return retval; } -- Gitee From 95198dc92c3861202829d5a889eda8f89c4dcb34 Mon Sep 17 00:00:00 2001 From: Bo Liu Date: Mon, 18 Sep 2023 14:46:11 +0800 Subject: [PATCH 2075/2138] anolis: mm, memcg: increase memsli interface for cgroup v2 ANBZ: #13556 Patch "ck: mm, memcg: record memory stall latency in every memcg" introduces memsli to cgroup v1, this patch increase memsli interface for cgroup v2. Signed-off-by: Bo Liu Reviewed-by: Kaihao Bai Reviewed-by: Xu Yu Link: https://gitee.com/anolis/cloud-kernel/pulls/2196 Signed-off-by: Kun(llfl) Reviewed-by: Baolin Wang Link: https://gitee.com/anolis/cloud-kernel/pulls/4499 --- mm/memcontrol.c | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 96bff47e5f04..28dbcb0625d8 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -8014,6 +8014,48 @@ static struct cftype memory_files[] = { .seq_show = memory_wmark_scale_factor_show, .write = memory_wmark_scale_factor_write, }, +#ifdef CONFIG_MEMSLI + { + .name = "direct_reclaim_global_latency", + .private = MEM_LAT_GLOBAL_DIRECT_RECLAIM, + .write_u64 = memcg_lat_stat_write, + .seq_show = memcg_lat_stat_show, + }, + { + .name = "direct_reclaim_memcg_latency", + .private = MEM_LAT_MEMCG_DIRECT_RECLAIM, + .write_u64 = memcg_lat_stat_write, + .seq_show = memcg_lat_stat_show, + }, + { + .name = "direct_compact_latency", + .private = MEM_LAT_DIRECT_COMPACT, + .write_u64 = memcg_lat_stat_write, + .seq_show = memcg_lat_stat_show, + }, + { + .name = "direct_swapout_global_latency", + .private = MEM_LAT_GLOBAL_DIRECT_SWAPOUT, + .write_u64 = memcg_lat_stat_write, + .seq_show = memcg_lat_stat_show, + }, + { + .name = "direct_swapout_memcg_latency", + .private = MEM_LAT_MEMCG_DIRECT_SWAPOUT, + .write_u64 = memcg_lat_stat_write, + .seq_show = memcg_lat_stat_show, + }, + { + .name = "direct_swapin_latency", + .private = MEM_LAT_DIRECT_SWAPIN, + .write_u64 = memcg_lat_stat_write, + .seq_show = memcg_lat_stat_show, + }, +#endif /* CONFIG_MEMSLI */ + { + .name = "exstat", + .seq_show = memcg_exstat_show, + }, { .name = "events", .flags = CFTYPE_NOT_ON_ROOT, -- Gitee From 6db277b8eeae0a65f8e1d21c6aa7f58c422fee5a Mon Sep 17 00:00:00 2001 From: Daniel Xu Date: Sun, 28 Jan 2024 18:24:06 -0700 Subject: [PATCH 2076/2138] bpf: btf: Support flags for BTF_SET8 sets ANBZ: #13548 commit 79b47344bbc5a693a92ed6b2b09dac59254bfac8 upstream. This commit adds support for flags on BTF_SET8s. struct btf_id_set8 already supported 32 bits worth of flags, but was only used for alignment purposes before. We now use these bits to encode flags. The first use case is tagging kfunc sets with a flag so that pahole can recognize which BTF_ID_FLAGS(func, ..) are actual kfuncs. Signed-off-by: Daniel Xu Link: https://lore.kernel.org/r/7bb152ec76d6c2c930daec88e995bf18484a5ebb.1706491398.git.dxu@dxuuu.xyz Signed-off-by: Alexei Starovoitov Signed-off-by: Tianchen Ding Reviewed-by: Yuanhe Shu Link: https://gitee.com/anolis/cloud-kernel/pulls/4504 --- include/linux/btf_ids.h | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h index a9cb10b0e2e9..dca09b7f21dc 100644 --- a/include/linux/btf_ids.h +++ b/include/linux/btf_ids.h @@ -21,6 +21,7 @@ struct btf_id_set8 { #include /* for __PASTE */ #include /* for __maybe_unused */ +#include /* * Following macros help to define lists of BTF IDs placed @@ -183,17 +184,18 @@ extern struct btf_id_set name; * .word (1 << 3) | (1 << 1) | (1 << 2) * */ -#define __BTF_SET8_START(name, scope) \ +#define __BTF_SET8_START(name, scope, flags) \ +__BTF_ID_LIST(name, local) \ asm( \ ".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ "." #scope " __BTF_ID__set8__" #name "; \n" \ "__BTF_ID__set8__" #name ":; \n" \ -".zero 8 \n" \ +".zero 4 \n" \ +".long " __stringify(flags) "\n" \ ".popsection; \n"); #define BTF_SET8_START(name) \ -__BTF_ID_LIST(name, local) \ -__BTF_SET8_START(name, local) +__BTF_SET8_START(name, local, 0) #define BTF_SET8_END(name) \ asm( \ -- Gitee From fee4253505554f997f87d0d0853c7bfa5aee0796 Mon Sep 17 00:00:00 2001 From: Daniel Xu Date: Sun, 28 Jan 2024 18:24:07 -0700 Subject: [PATCH 2077/2138] bpf: btf: Add BTF_KFUNCS_START/END macro pair ANBZ: #13548 commit a05e90427ef6706f59188b379ad6366b9d298bc5 upstream. This macro pair is functionally equivalent to BTF_SET8_START/END, except with BTF_SET8_KFUNCS flag set in the btf_id_set8 flags field. The next commit will codemod all kfunc set8s to this new variant such that all kfuncs are tagged as such in .BTF_ids section. Signed-off-by: Daniel Xu Link: https://lore.kernel.org/r/d536c57c7c2af428686853cc7396b7a44faa53b7.1706491398.git.dxu@dxuuu.xyz Signed-off-by: Alexei Starovoitov Signed-off-by: Tianchen Ding Reviewed-by: Yuanhe Shu Link: https://gitee.com/anolis/cloud-kernel/pulls/4504 --- include/linux/btf_ids.h | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h index dca09b7f21dc..e24aabfe8ecc 100644 --- a/include/linux/btf_ids.h +++ b/include/linux/btf_ids.h @@ -8,6 +8,9 @@ struct btf_id_set { u32 ids[]; }; +/* This flag implies BTF_SET8 holds kfunc(s) */ +#define BTF_SET8_KFUNCS (1 << 0) + struct btf_id_set8 { u32 cnt; u32 flags; @@ -204,6 +207,12 @@ asm( \ ".popsection; \n"); \ extern struct btf_id_set8 name; +#define BTF_KFUNCS_START(name) \ +__BTF_SET8_START(name, local, BTF_SET8_KFUNCS) + +#define BTF_KFUNCS_END(name) \ +BTF_SET8_END(name) + #else #define BTF_ID_LIST(name) static u32 __maybe_unused name[64]; @@ -218,6 +227,8 @@ extern struct btf_id_set8 name; #define BTF_SET_END(name) #define BTF_SET8_START(name) static struct btf_id_set8 __maybe_unused name = { 0 }; #define BTF_SET8_END(name) +#define BTF_KFUNCS_START(name) static struct btf_id_set8 __maybe_unused name = { .flags = BTF_SET8_KFUNCS }; +#define BTF_KFUNCS_END(name) #endif /* CONFIG_DEBUG_INFO_BTF */ -- Gitee From 37085bc974d2cc2dbca1704f2521d208eaf318e6 Mon Sep 17 00:00:00 2001 From: Daniel Xu Date: Sun, 28 Jan 2024 18:24:08 -0700 Subject: [PATCH 2078/2138] bpf: treewide: Annotate BPF kfuncs in BTF ANBZ: #13548 commit 6f3189f38a3e995232e028a4c341164c4aca1b20 upstream. This commit marks kfuncs as such inside the .BTF_ids section. The upshot of these annotations is that we'll be able to automatically generate kfunc prototypes for downstream users. The process is as follows: 1. In source, use BTF_KFUNCS_START/END macro pair to mark kfuncs 2. During build, pahole injects into BTF a "bpf_kfunc" BTF_DECL_TAG for each function inside BTF_KFUNCS sets 3. At runtime, vmlinux or module BTF is made available in sysfs 4. At runtime, bpftool (or similar) can look at provided BTF and generate appropriate prototypes for functions with "bpf_kfunc" tag To ensure future kfunc are similarly tagged, we now also return error inside kfunc registration for untagged kfuncs. For vmlinux kfuncs, we also WARN(), as initcall machinery does not handle errors. Signed-off-by: Daniel Xu Acked-by: Benjamin Tissoires Link: https://lore.kernel.org/r/e55150ceecbf0a5d961e608941165c0bee7bc943.1706491398.git.dxu@dxuuu.xyz Signed-off-by: Alexei Starovoitov [dtcccc: fixup for bpf_relay] Signed-off-by: Tianchen Ding Reviewed-by: Yuanhe Shu Link: https://gitee.com/anolis/cloud-kernel/pulls/4504 --- Documentation/bpf/kfuncs.rst | 8 ++++---- drivers/hid/bpf/hid_bpf_dispatch.c | 8 ++++---- kernel/bpf/bpf_relay.c | 4 ++-- kernel/bpf/btf.c | 8 ++++++++ kernel/bpf/cpumask.c | 4 ++-- kernel/bpf/helpers.c | 8 ++++---- kernel/bpf/map_iter.c | 4 ++-- kernel/cgroup/rstat.c | 4 ++-- kernel/trace/bpf_trace.c | 4 ++-- net/bpf/test_run.c | 8 ++++---- net/core/filter.c | 16 ++++++++-------- net/core/xdp.c | 4 ++-- net/ipv4/bpf_tcp_ca.c | 4 ++-- net/ipv4/fou_bpf.c | 4 ++-- net/ipv4/tcp_bbr.c | 4 ++-- net/ipv4/tcp_cubic.c | 4 ++-- net/ipv4/tcp_dctcp.c | 4 ++-- net/netfilter/nf_conntrack_bpf.c | 4 ++-- net/netfilter/nf_nat_bpf.c | 4 ++-- net/xfrm/xfrm_interface_bpf.c | 4 ++-- .../selftests/bpf/bpf_testmod/bpf_testmod.c | 8 ++++---- 21 files changed, 64 insertions(+), 56 deletions(-) diff --git a/Documentation/bpf/kfuncs.rst b/Documentation/bpf/kfuncs.rst index 723408e399ab..18920610ab7c 100644 --- a/Documentation/bpf/kfuncs.rst +++ b/Documentation/bpf/kfuncs.rst @@ -153,10 +153,10 @@ In addition to kfuncs' arguments, verifier may need more information about the type of kfunc(s) being registered with the BPF subsystem. To do so, we define flags on a set of kfuncs as follows:: - BTF_SET8_START(bpf_task_set) + BTF_KFUNCS_START(bpf_task_set) BTF_ID_FLAGS(func, bpf_get_task_pid, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_put_pid, KF_RELEASE) - BTF_SET8_END(bpf_task_set) + BTF_KFUNCS_END(bpf_task_set) This set encodes the BTF ID of each kfunc listed above, and encodes the flags along with it. Ofcourse, it is also allowed to specify no flags. @@ -323,10 +323,10 @@ Once the kfunc is prepared for use, the final step to making it visible is registering it with the BPF subsystem. Registration is done per BPF program type. An example is shown below:: - BTF_SET8_START(bpf_task_set) + BTF_KFUNCS_START(bpf_task_set) BTF_ID_FLAGS(func, bpf_get_task_pid, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_put_pid, KF_RELEASE) - BTF_SET8_END(bpf_task_set) + BTF_KFUNCS_END(bpf_task_set) static const struct btf_kfunc_id_set bpf_task_kfunc_set = { .owner = THIS_MODULE, diff --git a/drivers/hid/bpf/hid_bpf_dispatch.c b/drivers/hid/bpf/hid_bpf_dispatch.c index 7903c8638e81..c2ffa01585ed 100644 --- a/drivers/hid/bpf/hid_bpf_dispatch.c +++ b/drivers/hid/bpf/hid_bpf_dispatch.c @@ -172,9 +172,9 @@ hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t rdwr * The following set contains all functions we agree BPF programs * can use. */ -BTF_SET8_START(hid_bpf_kfunc_ids) +BTF_KFUNCS_START(hid_bpf_kfunc_ids) BTF_ID_FLAGS(func, hid_bpf_get_data, KF_RET_NULL) -BTF_SET8_END(hid_bpf_kfunc_ids) +BTF_KFUNCS_END(hid_bpf_kfunc_ids) static const struct btf_kfunc_id_set hid_bpf_kfunc_set = { .owner = THIS_MODULE, @@ -479,12 +479,12 @@ static const struct btf_kfunc_id_set hid_bpf_fmodret_set = { }; /* for syscall HID-BPF */ -BTF_SET8_START(hid_bpf_syscall_kfunc_ids) +BTF_KFUNCS_START(hid_bpf_syscall_kfunc_ids) BTF_ID_FLAGS(func, hid_bpf_attach_prog) BTF_ID_FLAGS(func, hid_bpf_allocate_context, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE) BTF_ID_FLAGS(func, hid_bpf_hw_request) -BTF_SET8_END(hid_bpf_syscall_kfunc_ids) +BTF_KFUNCS_END(hid_bpf_syscall_kfunc_ids) static const struct btf_kfunc_id_set hid_bpf_syscall_kfunc_set = { .owner = THIS_MODULE, diff --git a/kernel/bpf/bpf_relay.c b/kernel/bpf/bpf_relay.c index f041e6e479d2..6f0004c7541b 100644 --- a/kernel/bpf/bpf_relay.c +++ b/kernel/bpf/bpf_relay.c @@ -457,9 +457,9 @@ __bpf_kfunc int bpf_anolis_relay_write(void *data, size_t size__sz, int index) __bpf_kfunc_end_defs(); -BTF_SET8_START(bpf_relay_kfunc_ids) +BTF_KFUNCS_START(bpf_relay_kfunc_ids) BTF_ID_FLAGS(func, bpf_anolis_relay_write, KF_TRUSTED_ARGS) -BTF_SET8_END(bpf_relay_kfunc_ids) +BTF_KFUNCS_END(bpf_relay_kfunc_ids) static const struct btf_kfunc_id_set bpf_relay_kfunc_set = { .owner = THIS_MODULE, diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index eb473839d721..e36ed7653f6f 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -7929,6 +7929,14 @@ int register_btf_kfunc_id_set(enum bpf_prog_type prog_type, { enum btf_kfunc_hook hook; + /* All kfuncs need to be tagged as such in BTF. + * WARN() for initcall registrations that do not check errors. + */ + if (!(kset->set->flags & BTF_SET8_KFUNCS)) { + WARN_ON(!kset->owner); + return -EINVAL; + } + hook = bpf_prog_type_to_kfunc_hook(prog_type); return __register_btf_kfunc_id_set(hook, kset); } diff --git a/kernel/bpf/cpumask.c b/kernel/bpf/cpumask.c index 6acecc8ebd61..317f74895370 100644 --- a/kernel/bpf/cpumask.c +++ b/kernel/bpf/cpumask.c @@ -413,7 +413,7 @@ __bpf_kfunc u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1, __bpf_kfunc_end_defs(); -BTF_SET8_START(cpumask_kfunc_btf_ids) +BTF_KFUNCS_START(cpumask_kfunc_btf_ids) BTF_ID_FLAGS(func, bpf_cpumask_create, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_cpumask_release, KF_RELEASE) BTF_ID_FLAGS(func, bpf_cpumask_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS) @@ -438,7 +438,7 @@ BTF_ID_FLAGS(func, bpf_cpumask_full, KF_RCU) BTF_ID_FLAGS(func, bpf_cpumask_copy, KF_RCU) BTF_ID_FLAGS(func, bpf_cpumask_any_distribute, KF_RCU) BTF_ID_FLAGS(func, bpf_cpumask_any_and_distribute, KF_RCU) -BTF_SET8_END(cpumask_kfunc_btf_ids) +BTF_KFUNCS_END(cpumask_kfunc_btf_ids) static const struct btf_kfunc_id_set cpumask_kfunc_set = { .owner = THIS_MODULE, diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index e4b00ac1abce..ce958e21a7de 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -2541,7 +2541,7 @@ __bpf_kfunc void bpf_rcu_read_unlock(void) __bpf_kfunc_end_defs(); -BTF_SET8_START(generic_btf_ids) +BTF_KFUNCS_START(generic_btf_ids) #ifdef CONFIG_KEXEC_CORE BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE) #endif @@ -2566,7 +2566,7 @@ BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_task_under_cgroup, KF_RCU) #endif BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL) -BTF_SET8_END(generic_btf_ids) +BTF_KFUNCS_END(generic_btf_ids) static const struct btf_kfunc_id_set generic_kfunc_set = { .owner = THIS_MODULE, @@ -2582,7 +2582,7 @@ BTF_ID(struct, cgroup) BTF_ID(func, bpf_cgroup_release_dtor) #endif -BTF_SET8_START(common_btf_ids) +BTF_KFUNCS_START(common_btf_ids) BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx) BTF_ID_FLAGS(func, bpf_rdonly_cast) BTF_ID_FLAGS(func, bpf_rcu_read_lock) @@ -2597,7 +2597,7 @@ BTF_ID_FLAGS(func, bpf_dynptr_is_null) BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly) BTF_ID_FLAGS(func, bpf_dynptr_size) BTF_ID_FLAGS(func, bpf_dynptr_clone) -BTF_SET8_END(common_btf_ids) +BTF_KFUNCS_END(common_btf_ids) static const struct btf_kfunc_id_set common_kfunc_set = { .owner = THIS_MODULE, diff --git a/kernel/bpf/map_iter.c b/kernel/bpf/map_iter.c index 6abd7c5df4b3..9575314f40a6 100644 --- a/kernel/bpf/map_iter.c +++ b/kernel/bpf/map_iter.c @@ -213,9 +213,9 @@ __bpf_kfunc s64 bpf_map_sum_elem_count(const struct bpf_map *map) __bpf_kfunc_end_defs(); -BTF_SET8_START(bpf_map_iter_kfunc_ids) +BTF_KFUNCS_START(bpf_map_iter_kfunc_ids) BTF_ID_FLAGS(func, bpf_map_sum_elem_count, KF_TRUSTED_ARGS) -BTF_SET8_END(bpf_map_iter_kfunc_ids) +BTF_KFUNCS_END(bpf_map_iter_kfunc_ids) static const struct btf_kfunc_id_set bpf_map_iter_kfunc_set = { .owner = THIS_MODULE, diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c index 3f5c19916951..d426513994bb 100644 --- a/kernel/cgroup/rstat.c +++ b/kernel/cgroup/rstat.c @@ -571,10 +571,10 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) } /* Add bpf kfuncs for cgroup_rstat_updated() and cgroup_rstat_flush() */ -BTF_SET8_START(bpf_rstat_kfunc_ids) +BTF_KFUNCS_START(bpf_rstat_kfunc_ids) BTF_ID_FLAGS(func, cgroup_rstat_updated) BTF_ID_FLAGS(func, cgroup_rstat_flush, KF_SLEEPABLE) -BTF_SET8_END(bpf_rstat_kfunc_ids) +BTF_KFUNCS_END(bpf_rstat_kfunc_ids) static const struct btf_kfunc_id_set bpf_rstat_kfunc_set = { .owner = THIS_MODULE, diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 087bc60963db..d031aae4b66c 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1406,14 +1406,14 @@ __bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr, __bpf_kfunc_end_defs(); -BTF_SET8_START(key_sig_kfunc_set) +BTF_KFUNCS_START(key_sig_kfunc_set) BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE) BTF_ID_FLAGS(func, bpf_lookup_system_key, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_key_put, KF_RELEASE) #ifdef CONFIG_SYSTEM_DATA_VERIFICATION BTF_ID_FLAGS(func, bpf_verify_pkcs7_signature, KF_SLEEPABLE) #endif -BTF_SET8_END(key_sig_kfunc_set) +BTF_KFUNCS_END(key_sig_kfunc_set) static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = { .owner = THIS_MODULE, diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index ca397f454eec..07589fdc6445 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -618,21 +618,21 @@ CFI_NOSEAL(bpf_kfunc_call_memb_release_dtor); __bpf_kfunc_end_defs(); -BTF_SET8_START(bpf_test_modify_return_ids) +BTF_KFUNCS_START(bpf_test_modify_return_ids) BTF_ID_FLAGS(func, bpf_modify_return_test) BTF_ID_FLAGS(func, bpf_modify_return_test2) BTF_ID_FLAGS(func, bpf_fentry_test1, KF_SLEEPABLE) -BTF_SET8_END(bpf_test_modify_return_ids) +BTF_KFUNCS_END(bpf_test_modify_return_ids) static const struct btf_kfunc_id_set bpf_test_modify_return_set = { .owner = THIS_MODULE, .set = &bpf_test_modify_return_ids, }; -BTF_SET8_START(test_sk_check_kfunc_ids) +BTF_KFUNCS_START(test_sk_check_kfunc_ids) BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE) BTF_ID_FLAGS(func, bpf_kfunc_call_memb_release, KF_RELEASE) -BTF_SET8_END(test_sk_check_kfunc_ids) +BTF_KFUNCS_END(test_sk_check_kfunc_ids) static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size, u32 size, u32 headroom, u32 tailroom) diff --git a/net/core/filter.c b/net/core/filter.c index efa1e93c3b2c..a9c8582ede94 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -11907,17 +11907,17 @@ int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags, return 0; } -BTF_SET8_START(bpf_kfunc_check_set_skb) +BTF_KFUNCS_START(bpf_kfunc_check_set_skb) BTF_ID_FLAGS(func, bpf_dynptr_from_skb, KF_TRUSTED_ARGS) -BTF_SET8_END(bpf_kfunc_check_set_skb) +BTF_KFUNCS_END(bpf_kfunc_check_set_skb) -BTF_SET8_START(bpf_kfunc_check_set_xdp) +BTF_KFUNCS_START(bpf_kfunc_check_set_xdp) BTF_ID_FLAGS(func, bpf_dynptr_from_xdp) -BTF_SET8_END(bpf_kfunc_check_set_xdp) +BTF_KFUNCS_END(bpf_kfunc_check_set_xdp) -BTF_SET8_START(bpf_kfunc_check_set_sock_addr) +BTF_KFUNCS_START(bpf_kfunc_check_set_sock_addr) BTF_ID_FLAGS(func, bpf_sock_addr_set_sun_path) -BTF_SET8_END(bpf_kfunc_check_set_sock_addr) +BTF_KFUNCS_END(bpf_kfunc_check_set_sock_addr) static const struct btf_kfunc_id_set bpf_kfunc_set_skb = { .owner = THIS_MODULE, @@ -11991,9 +11991,9 @@ __bpf_kfunc int bpf_sock_destroy(struct sock_common *sock) __bpf_kfunc_end_defs(); -BTF_SET8_START(bpf_sk_iter_kfunc_ids) +BTF_KFUNCS_START(bpf_sk_iter_kfunc_ids) BTF_ID_FLAGS(func, bpf_sock_destroy, KF_TRUSTED_ARGS) -BTF_SET8_END(bpf_sk_iter_kfunc_ids) +BTF_KFUNCS_END(bpf_sk_iter_kfunc_ids) static int tracing_iter_filter(const struct bpf_prog *prog, u32 kfunc_id) { diff --git a/net/core/xdp.c b/net/core/xdp.c index 1642222e350b..39738e00e732 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -734,11 +734,11 @@ __bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash, __bpf_kfunc_end_defs(); -BTF_SET8_START(xdp_metadata_kfunc_ids) +BTF_KFUNCS_START(xdp_metadata_kfunc_ids) #define XDP_METADATA_KFUNC(_, name) BTF_ID_FLAGS(func, name, KF_TRUSTED_ARGS) XDP_METADATA_KFUNC_xxx #undef XDP_METADATA_KFUNC -BTF_SET8_END(xdp_metadata_kfunc_ids) +BTF_KFUNCS_END(xdp_metadata_kfunc_ids) static const struct btf_kfunc_id_set xdp_metadata_kfunc_set = { .owner = THIS_MODULE, diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c index ae8b15e6896f..edecdf8229df 100644 --- a/net/ipv4/bpf_tcp_ca.c +++ b/net/ipv4/bpf_tcp_ca.c @@ -195,13 +195,13 @@ bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id, } } -BTF_SET8_START(bpf_tcp_ca_check_kfunc_ids) +BTF_KFUNCS_START(bpf_tcp_ca_check_kfunc_ids) BTF_ID_FLAGS(func, tcp_reno_ssthresh) BTF_ID_FLAGS(func, tcp_reno_cong_avoid) BTF_ID_FLAGS(func, tcp_reno_undo_cwnd) BTF_ID_FLAGS(func, tcp_slow_start) BTF_ID_FLAGS(func, tcp_cong_avoid_ai) -BTF_SET8_END(bpf_tcp_ca_check_kfunc_ids) +BTF_KFUNCS_END(bpf_tcp_ca_check_kfunc_ids) static const struct btf_kfunc_id_set bpf_tcp_ca_kfunc_set = { .owner = THIS_MODULE, diff --git a/net/ipv4/fou_bpf.c b/net/ipv4/fou_bpf.c index 4da03bf45c9b..06e5572f296f 100644 --- a/net/ipv4/fou_bpf.c +++ b/net/ipv4/fou_bpf.c @@ -100,10 +100,10 @@ __bpf_kfunc int bpf_skb_get_fou_encap(struct __sk_buff *skb_ctx, __bpf_kfunc_end_defs(); -BTF_SET8_START(fou_kfunc_set) +BTF_KFUNCS_START(fou_kfunc_set) BTF_ID_FLAGS(func, bpf_skb_set_fou_encap) BTF_ID_FLAGS(func, bpf_skb_get_fou_encap) -BTF_SET8_END(fou_kfunc_set) +BTF_KFUNCS_END(fou_kfunc_set) static const struct btf_kfunc_id_set fou_bpf_kfunc_set = { .owner = THIS_MODULE, diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c index 146792cd26fe..56bb7a9621ab 100644 --- a/net/ipv4/tcp_bbr.c +++ b/net/ipv4/tcp_bbr.c @@ -1154,7 +1154,7 @@ static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = { .set_state = bbr_set_state, }; -BTF_SET8_START(tcp_bbr_check_kfunc_ids) +BTF_KFUNCS_START(tcp_bbr_check_kfunc_ids) #ifdef CONFIG_X86 #ifdef CONFIG_DYNAMIC_FTRACE BTF_ID_FLAGS(func, bbr_init) @@ -1167,7 +1167,7 @@ BTF_ID_FLAGS(func, bbr_min_tso_segs) BTF_ID_FLAGS(func, bbr_set_state) #endif #endif -BTF_SET8_END(tcp_bbr_check_kfunc_ids) +BTF_KFUNCS_END(tcp_bbr_check_kfunc_ids) static const struct btf_kfunc_id_set tcp_bbr_kfunc_set = { .owner = THIS_MODULE, diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 0fd78ecb67e7..44869ea089e3 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -485,7 +485,7 @@ static struct tcp_congestion_ops cubictcp __read_mostly = { .name = "cubic", }; -BTF_SET8_START(tcp_cubic_check_kfunc_ids) +BTF_KFUNCS_START(tcp_cubic_check_kfunc_ids) #ifdef CONFIG_X86 #ifdef CONFIG_DYNAMIC_FTRACE BTF_ID_FLAGS(func, cubictcp_init) @@ -496,7 +496,7 @@ BTF_ID_FLAGS(func, cubictcp_cwnd_event) BTF_ID_FLAGS(func, cubictcp_acked) #endif #endif -BTF_SET8_END(tcp_cubic_check_kfunc_ids) +BTF_KFUNCS_END(tcp_cubic_check_kfunc_ids) static const struct btf_kfunc_id_set tcp_cubic_kfunc_set = { .owner = THIS_MODULE, diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c index 8ad62713b0ba..b004280855f8 100644 --- a/net/ipv4/tcp_dctcp.c +++ b/net/ipv4/tcp_dctcp.c @@ -271,7 +271,7 @@ static struct tcp_congestion_ops dctcp_reno __read_mostly = { .name = "dctcp-reno", }; -BTF_SET8_START(tcp_dctcp_check_kfunc_ids) +BTF_KFUNCS_START(tcp_dctcp_check_kfunc_ids) #ifdef CONFIG_X86 #ifdef CONFIG_DYNAMIC_FTRACE BTF_ID_FLAGS(func, dctcp_init) @@ -282,7 +282,7 @@ BTF_ID_FLAGS(func, dctcp_cwnd_undo) BTF_ID_FLAGS(func, dctcp_state) #endif #endif -BTF_SET8_END(tcp_dctcp_check_kfunc_ids) +BTF_KFUNCS_END(tcp_dctcp_check_kfunc_ids) static const struct btf_kfunc_id_set tcp_dctcp_kfunc_set = { .owner = THIS_MODULE, diff --git a/net/netfilter/nf_conntrack_bpf.c b/net/netfilter/nf_conntrack_bpf.c index 475358ec8212..d2492d050fe6 100644 --- a/net/netfilter/nf_conntrack_bpf.c +++ b/net/netfilter/nf_conntrack_bpf.c @@ -467,7 +467,7 @@ __bpf_kfunc int bpf_ct_change_status(struct nf_conn *nfct, u32 status) __bpf_kfunc_end_defs(); -BTF_SET8_START(nf_ct_kfunc_set) +BTF_KFUNCS_START(nf_ct_kfunc_set) BTF_ID_FLAGS(func, bpf_xdp_ct_alloc, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_xdp_ct_lookup, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_skb_ct_alloc, KF_ACQUIRE | KF_RET_NULL) @@ -478,7 +478,7 @@ BTF_ID_FLAGS(func, bpf_ct_set_timeout, KF_TRUSTED_ARGS) BTF_ID_FLAGS(func, bpf_ct_change_timeout, KF_TRUSTED_ARGS) BTF_ID_FLAGS(func, bpf_ct_set_status, KF_TRUSTED_ARGS) BTF_ID_FLAGS(func, bpf_ct_change_status, KF_TRUSTED_ARGS) -BTF_SET8_END(nf_ct_kfunc_set) +BTF_KFUNCS_END(nf_ct_kfunc_set) static const struct btf_kfunc_id_set nf_conntrack_kfunc_set = { .owner = THIS_MODULE, diff --git a/net/netfilter/nf_nat_bpf.c b/net/netfilter/nf_nat_bpf.c index 6e3b2f58855f..481be15609b1 100644 --- a/net/netfilter/nf_nat_bpf.c +++ b/net/netfilter/nf_nat_bpf.c @@ -54,9 +54,9 @@ __bpf_kfunc int bpf_ct_set_nat_info(struct nf_conn___init *nfct, __bpf_kfunc_end_defs(); -BTF_SET8_START(nf_nat_kfunc_set) +BTF_KFUNCS_START(nf_nat_kfunc_set) BTF_ID_FLAGS(func, bpf_ct_set_nat_info, KF_TRUSTED_ARGS) -BTF_SET8_END(nf_nat_kfunc_set) +BTF_KFUNCS_END(nf_nat_kfunc_set) static const struct btf_kfunc_id_set nf_bpf_nat_kfunc_set = { .owner = THIS_MODULE, diff --git a/net/xfrm/xfrm_interface_bpf.c b/net/xfrm/xfrm_interface_bpf.c index 7d5e920141e9..5ea15037ebd1 100644 --- a/net/xfrm/xfrm_interface_bpf.c +++ b/net/xfrm/xfrm_interface_bpf.c @@ -93,10 +93,10 @@ __bpf_kfunc int bpf_skb_set_xfrm_info(struct __sk_buff *skb_ctx, const struct bp __bpf_kfunc_end_defs(); -BTF_SET8_START(xfrm_ifc_kfunc_set) +BTF_KFUNCS_START(xfrm_ifc_kfunc_set) BTF_ID_FLAGS(func, bpf_skb_get_xfrm_info) BTF_ID_FLAGS(func, bpf_skb_set_xfrm_info) -BTF_SET8_END(xfrm_ifc_kfunc_set) +BTF_KFUNCS_END(xfrm_ifc_kfunc_set) static const struct btf_kfunc_id_set xfrm_interface_kfunc_set = { .owner = THIS_MODULE, diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index 139c36fa3635..4006583b8bd9 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -340,11 +340,11 @@ static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = { .write = bpf_testmod_test_write, }; -BTF_SET8_START(bpf_testmod_common_kfunc_ids) +BTF_KFUNCS_START(bpf_testmod_common_kfunc_ids) BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW) BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY) -BTF_SET8_END(bpf_testmod_common_kfunc_ids) +BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids) static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = { .owner = THIS_MODULE, @@ -490,7 +490,7 @@ __bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused return arg; } -BTF_SET8_START(bpf_testmod_check_kfunc_ids) +BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids) BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc) BTF_ID_FLAGS(func, bpf_kfunc_call_test1) BTF_ID_FLAGS(func, bpf_kfunc_call_test2) @@ -516,7 +516,7 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU) BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE) BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg) BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset) -BTF_SET8_END(bpf_testmod_check_kfunc_ids) +BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids) static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = { .owner = THIS_MODULE, -- Gitee From bf1cda82f41ec05f3dbf6aeaf77f9d5ec0e65242 Mon Sep 17 00:00:00 2001 From: Benjamin Tissoires Date: Wed, 21 Feb 2024 17:25:18 +0100 Subject: [PATCH 2079/2138] bpf: introduce in_sleepable() helper ANBZ: #13548 commit dfe6625df48ec54c6dc9b86d361f26962d09de88 upstream. No code change, but it'll allow to have only one place to change everything when we add in_sleepable in cur_state. Signed-off-by: Benjamin Tissoires Link: https://lore.kernel.org/r/20240221-hid-bpf-sleepable-v3-2-1fb378ca6301@kernel.org Signed-off-by: Alexei Starovoitov Signed-off-by: Tianchen Ding Reviewed-by: Yuanhe Shu Link: https://gitee.com/anolis/cloud-kernel/pulls/4504 --- kernel/bpf/verifier.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 9a2cdddb5beb..b63235293b25 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5316,6 +5316,11 @@ static int map_kptr_match_type(struct bpf_verifier_env *env, return -EINVAL; } +static bool in_sleepable(struct bpf_verifier_env *env) +{ + return env->prog->aux->sleepable; +} + /* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock() * can dereference RCU protected pointers and result is PTR_TRUSTED. */ @@ -5323,7 +5328,7 @@ static bool in_rcu_cs(struct bpf_verifier_env *env) { return env->cur_state->active_rcu_lock || env->cur_state->active_lock.ptr || - !env->prog->aux->sleepable; + !in_sleepable(env); } /* Once GCC supports btf_type_tag the following mechanism will be replaced with tag check */ @@ -9983,7 +9988,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn return -EINVAL; } - if (!env->prog->aux->sleepable && fn->might_sleep) { + if (!in_sleepable(env) && fn->might_sleep) { verbose(env, "helper call might sleep in a non-sleepable prog\n"); return -EINVAL; } @@ -10013,7 +10018,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn return -EINVAL; } - if (env->prog->aux->sleepable && is_storage_get_function(func_id)) + if (in_sleepable(env) && is_storage_get_function(func_id)) env->insn_aux_data[insn_idx].storage_get_func_atomic = true; } @@ -11731,7 +11736,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, } sleepable = is_kfunc_sleepable(&meta); - if (sleepable && !env->prog->aux->sleepable) { + if (sleepable && !in_sleepable(env)) { verbose(env, "program must be sleepable to call sleepable kfunc %s\n", func_name); return -EACCES; } @@ -19132,7 +19137,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env) } if (is_storage_get_function(insn->imm)) { - if (!env->prog->aux->sleepable || + if (!in_sleepable(env) || env->insn_aux_data[i + delta].storage_get_func_atomic) insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_ATOMIC); else -- Gitee From 1891e7a509266099a74d0664f5c3df8876c380f1 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 8 Mar 2024 16:47:39 -0800 Subject: [PATCH 2080/2138] bpf: move sleepable flag from bpf_prog_aux to bpf_prog ANBZ: #13548 commit 66c8473135c62f478301a0e5b3012f203562dfa6 upstream. prog->aux->sleepable is checked very frequently as part of (some) BPF program run hot paths. So this extra aux indirection seems wasteful and on busy systems might cause unnecessary memory cache misses. Let's move sleepable flag into prog itself to eliminate unnecessary pointer dereference. Signed-off-by: Andrii Nakryiko Acked-by: Jiri Olsa Message-ID: <20240309004739.2961431-1-andrii@kernel.org> Signed-off-by: Alexei Starovoitov Signed-off-by: Tianchen Ding Reviewed-by: Yuanhe Shu Link: https://gitee.com/anolis/cloud-kernel/pulls/4504 --- include/linux/bpf.h | 8 ++++---- kernel/bpf/bpf_iter.c | 4 ++-- kernel/bpf/core.c | 2 +- kernel/bpf/syscall.c | 8 ++++---- kernel/bpf/trampoline.c | 4 ++-- kernel/bpf/verifier.c | 12 ++++++------ kernel/events/core.c | 2 +- kernel/trace/bpf_trace.c | 2 +- net/bpf/bpf_dummy_struct_ops.c | 2 +- 9 files changed, 22 insertions(+), 22 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 72409d7e926f..43c1d870e27b 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1466,7 +1466,6 @@ struct bpf_prog_aux { bool offload_requested; /* Program is bound and offloaded to the netdev. */ bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */ bool func_proto_unreliable; - bool sleepable; bool tail_call_reachable; bool xdp_has_frags; /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ @@ -1552,7 +1551,8 @@ struct bpf_prog { enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */ call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */ call_get_func_ip:1, /* Do we call get_func_ip() */ - tstamp_type_access:1; /* Accessed __sk_buff->tstamp_type */ + tstamp_type_access:1, /* Accessed __sk_buff->tstamp_type */ + sleepable:1; /* BPF program is sleepable */ enum bpf_prog_type type; /* Type of BPF program */ enum bpf_attach_type expected_attach_type; /* For some prog types */ u32 len; /* Number of filter blocks */ @@ -2066,14 +2066,14 @@ bpf_prog_run_array_uprobe(const struct bpf_prog_array *array, old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); item = &array->items[0]; while ((prog = READ_ONCE(item->prog))) { - if (!prog->aux->sleepable) + if (!prog->sleepable) rcu_read_lock(); run_ctx.bpf_cookie = item->bpf_cookie; ret &= run_prog(prog, ctx); item++; - if (!prog->aux->sleepable) + if (!prog->sleepable) rcu_read_unlock(); } bpf_reset_run_ctx(old_run_ctx); diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c index 3ea907fa71e1..ec7118517fa9 100644 --- a/kernel/bpf/bpf_iter.c +++ b/kernel/bpf/bpf_iter.c @@ -548,7 +548,7 @@ int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, return -ENOENT; /* Only allow sleepable program for resched-able iterator */ - if (prog->aux->sleepable && !bpf_iter_target_support_resched(tinfo)) + if (prog->sleepable && !bpf_iter_target_support_resched(tinfo)) return -EINVAL; link = kzalloc(sizeof(*link), GFP_USER | __GFP_NOWARN); @@ -697,7 +697,7 @@ int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx) struct bpf_run_ctx run_ctx, *old_run_ctx; int ret; - if (prog->aux->sleepable) { + if (prog->sleepable) { rcu_read_lock_trace(); migrate_disable(); might_fault(); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index a94e90ce0b2f..49e6430c1fec 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -2722,7 +2722,7 @@ void __bpf_free_used_maps(struct bpf_prog_aux *aux, bool sleepable; u32 i; - sleepable = aux->sleepable; + sleepable = aux->prog->sleepable; for (i = 0; i < len; i++) { map = used_maps[i]; if (map->ops->map_poke_untrack) diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index ba38c08a9a05..bfdede97786b 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -2156,7 +2156,7 @@ static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred) btf_put(prog->aux->attach_btf); if (deferred) { - if (prog->aux->sleepable) + if (prog->sleepable) call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu); else call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); @@ -2681,11 +2681,11 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size) } prog->expected_attach_type = attr->expected_attach_type; + prog->sleepable = !!(attr->prog_flags & BPF_F_SLEEPABLE); prog->aux->attach_btf = attach_btf; prog->aux->attach_btf_id = attr->attach_btf_id; prog->aux->dst_prog = dst_prog; prog->aux->dev_bound = !!attr->prog_ifindex; - prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE; prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS; err = security_bpf_prog_alloc(prog->aux); @@ -2906,7 +2906,7 @@ static void bpf_link_free(struct bpf_link *link) bpf_link_free_id(link->id); if (link->prog) { - sleepable = link->prog->aux->sleepable; + sleepable = link->prog->sleepable; /* detach BPF program, clean up used resources */ ops->release(link); } @@ -5402,7 +5402,7 @@ static int bpf_prog_bind_map(union bpf_attr *attr) /* The bpf program will not access the bpf map, but for the sake of * simplicity, increase sleepable_refcnt for sleepable program as well. */ - if (prog->aux->sleepable) + if (prog->sleepable) atomic64_inc(&map->sleepable_refcnt); memcpy(used_maps_new, used_maps_old, sizeof(used_maps_old[0]) * prog->aux->used_map_cnt); diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index e97aeda3a86b..45477c604a1a 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -1003,7 +1003,7 @@ void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr) bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog) { - bool sleepable = prog->aux->sleepable; + bool sleepable = prog->sleepable; if (bpf_prog_check_recur(prog)) return sleepable ? __bpf_prog_enter_sleepable_recur : @@ -1018,7 +1018,7 @@ bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog) bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog) { - bool sleepable = prog->aux->sleepable; + bool sleepable = prog->sleepable; if (bpf_prog_check_recur(prog)) return sleepable ? __bpf_prog_exit_sleepable_recur : diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index b63235293b25..2d8cebd545a4 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5318,7 +5318,7 @@ static int map_kptr_match_type(struct bpf_verifier_env *env, static bool in_sleepable(struct bpf_verifier_env *env) { - return env->prog->aux->sleepable; + return env->prog->sleepable; } /* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock() @@ -17543,7 +17543,7 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env, return -EINVAL; } - if (prog->aux->sleepable) + if (prog->sleepable) switch (map->map_type) { case BPF_MAP_TYPE_HASH: case BPF_MAP_TYPE_LRU_HASH: @@ -17727,7 +17727,7 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env) return -E2BIG; } - if (env->prog->aux->sleepable) + if (env->prog->sleepable) atomic64_inc(&map->sleepable_refcnt); /* hold the map. If the program is rejected by verifier, * the map will be released by release_maps() or it @@ -20039,7 +20039,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log, } } - if (prog->aux->sleepable) { + if (prog->sleepable) { ret = -EINVAL; switch (prog->type) { case BPF_PROG_TYPE_TRACING: @@ -20150,14 +20150,14 @@ static int check_attach_btf_id(struct bpf_verifier_env *env) u64 key; if (prog->type == BPF_PROG_TYPE_SYSCALL) { - if (prog->aux->sleepable) + if (prog->sleepable) /* attach_btf_id checked to be zero already */ return 0; verbose(env, "Syscall programs can only be sleepable\n"); return -EINVAL; } - if (prog->aux->sleepable && !can_be_sleepable(prog)) { + if (prog->sleepable && !can_be_sleepable(prog)) { verbose(env, "Only fentry/fexit/fmod_ret, lsm, iter, uprobe, and struct_ops programs can be sleepable\n"); return -EINVAL; } diff --git a/kernel/events/core.c b/kernel/events/core.c index b6d70669c918..a1deaf7e4205 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -10598,7 +10598,7 @@ int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, (is_syscall_tp && prog->type != BPF_PROG_TYPE_TRACEPOINT)) return -EINVAL; - if (prog->type == BPF_PROG_TYPE_KPROBE && prog->aux->sleepable && !is_uprobe) + if (prog->type == BPF_PROG_TYPE_KPROBE && prog->sleepable && !is_uprobe) /* only uprobe programs are allowed to be sleepable */ return -EINVAL; diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index d031aae4b66c..c4dd9325861e 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -3105,7 +3105,7 @@ static int uprobe_prog_run(struct bpf_uprobe *uprobe, .uprobe = uprobe, }; struct bpf_prog *prog = link->link.prog; - bool sleepable = prog->aux->sleepable; + bool sleepable = prog->sleepable; struct bpf_run_ctx *old_run_ctx; if (link->task && current->mm != link->task->mm) diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c index c639bdafe6b0..8e96189838fc 100644 --- a/net/bpf/bpf_dummy_struct_ops.c +++ b/net/bpf/bpf_dummy_struct_ops.c @@ -170,7 +170,7 @@ static int bpf_dummy_ops_check_member(const struct btf_type *t, case offsetof(struct bpf_dummy_ops, test_sleepable): break; default: - if (prog->aux->sleepable) + if (prog->sleepable) return -EINVAL; } -- Gitee From 9e52a6548fc5054558a9706ee76cbd3213e8085f Mon Sep 17 00:00:00 2001 From: Song Liu Date: Thu, 14 Sep 2023 15:25:42 -0700 Subject: [PATCH 2081/2138] bpf: Charge modmem for struct_ops trampoline ANBZ: #13548 commit 5c04433daf9ed8b28d4900112be1fd19e1786b25 upstream. Current code charges modmem for regular trampoline, but not for struct_ops trampoline. Add bpf_jit_[charge|uncharge]_modmem() to struct_ops so the trampoline is charged in both cases. Signed-off-by: Song Liu Link: https://lore.kernel.org/r/20230914222542.2986059-1-song@kernel.org Signed-off-by: Martin KaFai Lau Signed-off-by: Tianchen Ding Reviewed-by: Yuanhe Shu Link: https://gitee.com/anolis/cloud-kernel/pulls/4504 --- kernel/bpf/bpf_struct_ops.c | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index 81eb5f4370ee..deffc612b232 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -616,7 +616,10 @@ static void __bpf_struct_ops_map_free(struct bpf_map *map) if (st_map->links) bpf_struct_ops_map_put_progs(st_map); bpf_map_area_free(st_map->links); - bpf_jit_free_exec(st_map->image); + if (st_map->image) { + bpf_jit_free_exec(st_map->image); + bpf_jit_uncharge_modmem(PAGE_SIZE); + } bpf_map_area_free(st_map->uvalue); bpf_map_area_free(st_map); } @@ -658,6 +661,7 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) struct bpf_struct_ops_map *st_map; const struct btf_type *t, *vt; struct bpf_map *map; + int ret; st_ops = bpf_struct_ops_find_value(attr->btf_vmlinux_value_type_id); if (!st_ops) @@ -682,12 +686,27 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) st_map->st_ops = st_ops; map = &st_map->map; + ret = bpf_jit_charge_modmem(PAGE_SIZE); + if (ret) { + __bpf_struct_ops_map_free(map); + return ERR_PTR(ret); + } + + st_map->image = bpf_jit_alloc_exec(PAGE_SIZE); + if (!st_map->image) { + /* __bpf_struct_ops_map_free() uses st_map->image as flag + * for "charged or not". In this case, we need to unchange + * here. + */ + bpf_jit_uncharge_modmem(PAGE_SIZE); + __bpf_struct_ops_map_free(map); + return ERR_PTR(-ENOMEM); + } st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE); st_map->links = bpf_map_area_alloc(btf_type_vlen(t) * sizeof(struct bpf_links *), NUMA_NO_NODE); - st_map->image = bpf_jit_alloc_exec(PAGE_SIZE); - if (!st_map->uvalue || !st_map->links || !st_map->image) { + if (!st_map->uvalue || !st_map->links) { __bpf_struct_ops_map_free(map); return ERR_PTR(-ENOMEM); } @@ -908,4 +927,3 @@ int bpf_struct_ops_link_create(union bpf_attr *attr) kfree(link); return err; } - -- Gitee From 8c1c2a65c2836b817c75c32a1d7cec7f4cd83428 Mon Sep 17 00:00:00 2001 From: Kui-Feng Lee Date: Fri, 19 Jan 2024 14:49:52 -0800 Subject: [PATCH 2082/2138] bpf: refactory struct_ops type initialization to a function. ANBZ: #13548 commit 3b1f89e747cd4b24244f2798a35d28815b744303 upstream. Move the majority of the code to bpf_struct_ops_init_one(), which can then be utilized for the initialization of newly registered dynamically allocated struct_ops types in the following patches. Signed-off-by: Kui-Feng Lee Link: https://lore.kernel.org/r/20240119225005.668602-2-thinker.li@gmail.com Signed-off-by: Martin KaFai Lau Signed-off-by: Tianchen Ding Reviewed-by: Yuanhe Shu Link: https://gitee.com/anolis/cloud-kernel/pulls/4504 --- include/linux/btf.h | 1 + kernel/bpf/bpf_struct_ops.c | 157 +++++++++++++++++++----------------- kernel/bpf/btf.c | 5 ++ 3 files changed, 89 insertions(+), 74 deletions(-) diff --git a/include/linux/btf.h b/include/linux/btf.h index f6180d83c266..4480df4e9fbf 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -136,6 +136,7 @@ struct btf_struct_metas { extern const struct file_operations btf_fops; +const char *btf_get_name(const struct btf *btf); void btf_get(struct btf *btf); void btf_put(struct btf *btf); int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_sz); diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index deffc612b232..cfe905bdcf96 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -110,102 +110,111 @@ const struct bpf_prog_ops bpf_struct_ops_prog_ops = { static const struct btf_type *module_type; -void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log) +static void bpf_struct_ops_init_one(struct bpf_struct_ops *st_ops, + struct btf *btf, + struct bpf_verifier_log *log) { - s32 type_id, value_id, module_id; const struct btf_member *member; - struct bpf_struct_ops *st_ops; const struct btf_type *t; + s32 type_id, value_id; char value_name[128]; const char *mname; - u32 i, j; + int i; - /* Ensure BTF type is emitted for "struct bpf_struct_ops_##_name" */ -#define BPF_STRUCT_OPS_TYPE(_name) BTF_TYPE_EMIT(struct bpf_struct_ops_##_name); -#include "bpf_struct_ops_types.h" -#undef BPF_STRUCT_OPS_TYPE + if (strlen(st_ops->name) + VALUE_PREFIX_LEN >= + sizeof(value_name)) { + pr_warn("struct_ops name %s is too long\n", + st_ops->name); + return; + } + sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name); - module_id = btf_find_by_name_kind(btf, "module", BTF_KIND_STRUCT); - if (module_id < 0) { - pr_warn("Cannot find struct module in btf_vmlinux\n"); + value_id = btf_find_by_name_kind(btf, value_name, + BTF_KIND_STRUCT); + if (value_id < 0) { + pr_warn("Cannot find struct %s in %s\n", + value_name, btf_get_name(btf)); return; } - module_type = btf_type_by_id(btf, module_id); - for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) { - st_ops = bpf_struct_ops[i]; + type_id = btf_find_by_name_kind(btf, st_ops->name, + BTF_KIND_STRUCT); + if (type_id < 0) { + pr_warn("Cannot find struct %s in %s\n", + st_ops->name, btf_get_name(btf)); + return; + } + t = btf_type_by_id(btf, type_id); + if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) { + pr_warn("Cannot support #%u members in struct %s\n", + btf_type_vlen(t), st_ops->name); + return; + } - if (strlen(st_ops->name) + VALUE_PREFIX_LEN >= - sizeof(value_name)) { - pr_warn("struct_ops name %s is too long\n", + for_each_member(i, t, member) { + const struct btf_type *func_proto; + + mname = btf_name_by_offset(btf, member->name_off); + if (!*mname) { + pr_warn("anon member in struct %s is not supported\n", st_ops->name); - continue; + break; } - sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name); - value_id = btf_find_by_name_kind(btf, value_name, - BTF_KIND_STRUCT); - if (value_id < 0) { - pr_warn("Cannot find struct %s in btf_vmlinux\n", - value_name); - continue; + if (__btf_member_bitfield_size(t, member)) { + pr_warn("bit field member %s in struct %s is not supported\n", + mname, st_ops->name); + break; } - type_id = btf_find_by_name_kind(btf, st_ops->name, - BTF_KIND_STRUCT); - if (type_id < 0) { - pr_warn("Cannot find struct %s in btf_vmlinux\n", - st_ops->name); - continue; - } - t = btf_type_by_id(btf, type_id); - if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) { - pr_warn("Cannot support #%u members in struct %s\n", - btf_type_vlen(t), st_ops->name); - continue; + func_proto = btf_type_resolve_func_ptr(btf, + member->type, + NULL); + if (func_proto && + btf_distill_func_proto(log, btf, + func_proto, mname, + &st_ops->func_models[i])) { + pr_warn("Error in parsing func ptr %s in struct %s\n", + mname, st_ops->name); + break; } + } - for_each_member(j, t, member) { - const struct btf_type *func_proto; + if (i == btf_type_vlen(t)) { + if (st_ops->init(btf)) { + pr_warn("Error in init bpf_struct_ops %s\n", + st_ops->name); + } else { + st_ops->type_id = type_id; + st_ops->type = t; + st_ops->value_id = value_id; + st_ops->value_type = btf_type_by_id(btf, + value_id); + } + } +} - mname = btf_name_by_offset(btf, member->name_off); - if (!*mname) { - pr_warn("anon member in struct %s is not supported\n", - st_ops->name); - break; - } +void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log) +{ + struct bpf_struct_ops *st_ops; + s32 module_id; + u32 i; - if (__btf_member_bitfield_size(t, member)) { - pr_warn("bit field member %s in struct %s is not supported\n", - mname, st_ops->name); - break; - } + /* Ensure BTF type is emitted for "struct bpf_struct_ops_##_name" */ +#define BPF_STRUCT_OPS_TYPE(_name) BTF_TYPE_EMIT(struct bpf_struct_ops_##_name); +#include "bpf_struct_ops_types.h" +#undef BPF_STRUCT_OPS_TYPE - func_proto = btf_type_resolve_func_ptr(btf, - member->type, - NULL); - if (func_proto && - btf_distill_func_proto(log, btf, - func_proto, mname, - &st_ops->func_models[j])) { - pr_warn("Error in parsing func ptr %s in struct %s\n", - mname, st_ops->name); - break; - } - } + module_id = btf_find_by_name_kind(btf, "module", BTF_KIND_STRUCT); + if (module_id < 0) { + pr_warn("Cannot find struct module in %s\n", btf_get_name(btf)); + return; + } + module_type = btf_type_by_id(btf, module_id); - if (j == btf_type_vlen(t)) { - if (st_ops->init(btf)) { - pr_warn("Error in init bpf_struct_ops %s\n", - st_ops->name); - } else { - st_ops->type_id = type_id; - st_ops->type = t; - st_ops->value_id = value_id; - st_ops->value_type = btf_type_by_id(btf, - value_id); - } - } + for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) { + st_ops = bpf_struct_ops[i]; + bpf_struct_ops_init_one(st_ops, btf, log); } } diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index e36ed7653f6f..43d1a45d286c 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -1707,6 +1707,11 @@ static void btf_free_rcu(struct rcu_head *rcu) btf_free(btf); } +const char *btf_get_name(const struct btf *btf) +{ + return btf->name; +} + void btf_get(struct btf *btf) { refcount_inc(&btf->refcnt); -- Gitee From 70ca2583f37a422a17e6f0b32ca6556f9603480c Mon Sep 17 00:00:00 2001 From: Kui-Feng Lee Date: Fri, 19 Jan 2024 14:49:53 -0800 Subject: [PATCH 2083/2138] bpf: get type information with BTF_ID_LIST ANBZ: #13548 commit 95678395386d45fa0a075d2e7a6866326a469d76 upstream. Get ready to remove bpf_struct_ops_init() in the future. By using BTF_ID_LIST, it is possible to gather type information while building instead of runtime. Signed-off-by: Kui-Feng Lee Link: https://lore.kernel.org/r/20240119225005.668602-3-thinker.li@gmail.com Signed-off-by: Martin KaFai Lau Signed-off-by: Tianchen Ding Reviewed-by: Yuanhe Shu Link: https://gitee.com/anolis/cloud-kernel/pulls/4504 --- kernel/bpf/bpf_struct_ops.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index cfe905bdcf96..2bdc53a1bfcc 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -108,7 +108,12 @@ const struct bpf_prog_ops bpf_struct_ops_prog_ops = { #endif }; -static const struct btf_type *module_type; +BTF_ID_LIST(st_ops_ids) +BTF_ID(struct, module) + +enum { + IDX_MODULE_ID, +}; static void bpf_struct_ops_init_one(struct bpf_struct_ops *st_ops, struct btf *btf, @@ -197,7 +202,6 @@ static void bpf_struct_ops_init_one(struct bpf_struct_ops *st_ops, void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log) { struct bpf_struct_ops *st_ops; - s32 module_id; u32 i; /* Ensure BTF type is emitted for "struct bpf_struct_ops_##_name" */ @@ -205,13 +209,6 @@ void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log) #include "bpf_struct_ops_types.h" #undef BPF_STRUCT_OPS_TYPE - module_id = btf_find_by_name_kind(btf, "module", BTF_KIND_STRUCT); - if (module_id < 0) { - pr_warn("Cannot find struct module in %s\n", btf_get_name(btf)); - return; - } - module_type = btf_type_by_id(btf, module_id); - for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) { st_ops = bpf_struct_ops[i]; bpf_struct_ops_init_one(st_ops, btf, log); @@ -381,6 +378,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; const struct bpf_struct_ops *st_ops = st_map->st_ops; struct bpf_struct_ops_value *uvalue, *kvalue; + const struct btf_type *module_type; const struct btf_member *member; const struct btf_type *t = st_ops->type; struct bpf_tramp_links *tlinks; @@ -428,6 +426,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, image = st_map->image; image_end = st_map->image + PAGE_SIZE; + module_type = btf_type_by_id(btf_vmlinux, st_ops_ids[IDX_MODULE_ID]); for_each_member(i, t, member) { const struct btf_type *mtype, *ptype; struct bpf_prog *prog; -- Gitee From 25d385eb5add36b35f97491f250cd35d150d70b9 Mon Sep 17 00:00:00 2001 From: Kui-Feng Lee Date: Fri, 19 Jan 2024 14:49:54 -0800 Subject: [PATCH 2084/2138] bpf, net: introduce bpf_struct_ops_desc. ANBZ: #13548 commit 4c5763ed996a61b51d721d0968d0df957826ea49 upstream. Move some of members of bpf_struct_ops to bpf_struct_ops_desc. type_id is unavailabe in bpf_struct_ops anymore. Modules should get it from the btf received by kmod's init function. Cc: netdev@vger.kernel.org Signed-off-by: Kui-Feng Lee Link: https://lore.kernel.org/r/20240119225005.668602-4-thinker.li@gmail.com Signed-off-by: Martin KaFai Lau Signed-off-by: Tianchen Ding Reviewed-by: Yuanhe Shu Link: https://gitee.com/anolis/cloud-kernel/pulls/4504 --- include/linux/bpf.h | 15 ++++--- kernel/bpf/bpf_struct_ops.c | 80 +++++++++++++++++----------------- kernel/bpf/verifier.c | 8 ++-- net/bpf/bpf_dummy_struct_ops.c | 11 ++++- net/ipv4/bpf_tcp_ca.c | 8 +++- 5 files changed, 73 insertions(+), 49 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 43c1d870e27b..df640520c890 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1711,18 +1711,23 @@ struct bpf_struct_ops { void (*unreg)(void *kdata); int (*update)(void *kdata, void *old_kdata); int (*validate)(void *kdata); - const struct btf_type *type; - const struct btf_type *value_type; + void *cfi_stubs; const char *name; struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS]; +}; + +struct bpf_struct_ops_desc { + struct bpf_struct_ops *st_ops; + + const struct btf_type *type; + const struct btf_type *value_type; u32 type_id; u32 value_id; - void *cfi_stubs; }; #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL) #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA)) -const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id); +const struct bpf_struct_ops_desc *bpf_struct_ops_find(u32 type_id); void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log); bool bpf_struct_ops_get(const void *kdata); void bpf_struct_ops_put(const void *kdata); @@ -1766,7 +1771,7 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); #endif #else -static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id) +static inline const struct bpf_struct_ops_desc *bpf_struct_ops_find(u32 type_id) { return NULL; } diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index 2bdc53a1bfcc..53c8437824d4 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -32,7 +32,7 @@ struct bpf_struct_ops_value { struct bpf_struct_ops_map { struct bpf_map map; struct rcu_head rcu; - const struct bpf_struct_ops *st_ops; + const struct bpf_struct_ops_desc *st_ops_desc; /* protect map_update */ struct mutex lock; /* link has all the bpf_links that is populated @@ -92,9 +92,9 @@ enum { __NR_BPF_STRUCT_OPS_TYPE, }; -static struct bpf_struct_ops * const bpf_struct_ops[] = { +static struct bpf_struct_ops_desc bpf_struct_ops[] = { #define BPF_STRUCT_OPS_TYPE(_name) \ - [BPF_STRUCT_OPS_TYPE_##_name] = &bpf_##_name, + [BPF_STRUCT_OPS_TYPE_##_name] = { .st_ops = &bpf_##_name }, #include "bpf_struct_ops_types.h" #undef BPF_STRUCT_OPS_TYPE }; @@ -115,10 +115,11 @@ enum { IDX_MODULE_ID, }; -static void bpf_struct_ops_init_one(struct bpf_struct_ops *st_ops, - struct btf *btf, - struct bpf_verifier_log *log) +static void bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc, + struct btf *btf, + struct bpf_verifier_log *log) { + struct bpf_struct_ops *st_ops = st_ops_desc->st_ops; const struct btf_member *member; const struct btf_type *t; s32 type_id, value_id; @@ -190,18 +191,18 @@ static void bpf_struct_ops_init_one(struct bpf_struct_ops *st_ops, pr_warn("Error in init bpf_struct_ops %s\n", st_ops->name); } else { - st_ops->type_id = type_id; - st_ops->type = t; - st_ops->value_id = value_id; - st_ops->value_type = btf_type_by_id(btf, - value_id); + st_ops_desc->type_id = type_id; + st_ops_desc->type = t; + st_ops_desc->value_id = value_id; + st_ops_desc->value_type = btf_type_by_id(btf, + value_id); } } } void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log) { - struct bpf_struct_ops *st_ops; + struct bpf_struct_ops_desc *st_ops_desc; u32 i; /* Ensure BTF type is emitted for "struct bpf_struct_ops_##_name" */ @@ -210,14 +211,14 @@ void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log) #undef BPF_STRUCT_OPS_TYPE for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) { - st_ops = bpf_struct_ops[i]; - bpf_struct_ops_init_one(st_ops, btf, log); + st_ops_desc = &bpf_struct_ops[i]; + bpf_struct_ops_desc_init(st_ops_desc, btf, log); } } extern struct btf *btf_vmlinux; -static const struct bpf_struct_ops * +static const struct bpf_struct_ops_desc * bpf_struct_ops_find_value(u32 value_id) { unsigned int i; @@ -226,14 +227,14 @@ bpf_struct_ops_find_value(u32 value_id) return NULL; for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) { - if (bpf_struct_ops[i]->value_id == value_id) - return bpf_struct_ops[i]; + if (bpf_struct_ops[i].value_id == value_id) + return &bpf_struct_ops[i]; } return NULL; } -const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id) +const struct bpf_struct_ops_desc *bpf_struct_ops_find(u32 type_id) { unsigned int i; @@ -241,8 +242,8 @@ const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id) return NULL; for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) { - if (bpf_struct_ops[i]->type_id == type_id) - return bpf_struct_ops[i]; + if (bpf_struct_ops[i].type_id == type_id) + return &bpf_struct_ops[i]; } return NULL; @@ -302,7 +303,7 @@ static void *bpf_struct_ops_map_lookup_elem(struct bpf_map *map, void *key) static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map) { - const struct btf_type *t = st_map->st_ops->type; + const struct btf_type *t = st_map->st_ops_desc->type; u32 i; for (i = 0; i < btf_type_vlen(t); i++) { @@ -376,11 +377,12 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, void *value, u64 flags) { struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; - const struct bpf_struct_ops *st_ops = st_map->st_ops; + const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc; + const struct bpf_struct_ops *st_ops = st_ops_desc->st_ops; struct bpf_struct_ops_value *uvalue, *kvalue; const struct btf_type *module_type; const struct btf_member *member; - const struct btf_type *t = st_ops->type; + const struct btf_type *t = st_ops_desc->type; struct bpf_tramp_links *tlinks; void *udata, *kdata; int prog_fd, err; @@ -393,7 +395,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, if (*(u32 *)key != 0) return -E2BIG; - err = check_zero_holes(st_ops->value_type, value); + err = check_zero_holes(st_ops_desc->value_type, value); if (err) return err; @@ -486,7 +488,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, } if (prog->type != BPF_PROG_TYPE_STRUCT_OPS || - prog->aux->attach_btf_id != st_ops->type_id || + prog->aux->attach_btf_id != st_ops_desc->type_id || prog->expected_attach_type != i) { bpf_prog_put(prog); err = -EINVAL; @@ -583,7 +585,7 @@ static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key) BPF_STRUCT_OPS_STATE_TOBEFREE); switch (prev_state) { case BPF_STRUCT_OPS_STATE_INUSE: - st_map->st_ops->unreg(&st_map->kvalue.data); + st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data); bpf_map_put(map); return 0; case BPF_STRUCT_OPS_STATE_TOBEFREE: @@ -664,22 +666,22 @@ static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr) static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) { - const struct bpf_struct_ops *st_ops; + const struct bpf_struct_ops_desc *st_ops_desc; size_t st_map_size; struct bpf_struct_ops_map *st_map; const struct btf_type *t, *vt; struct bpf_map *map; int ret; - st_ops = bpf_struct_ops_find_value(attr->btf_vmlinux_value_type_id); - if (!st_ops) + st_ops_desc = bpf_struct_ops_find_value(attr->btf_vmlinux_value_type_id); + if (!st_ops_desc) return ERR_PTR(-ENOTSUPP); - vt = st_ops->value_type; + vt = st_ops_desc->value_type; if (attr->value_size != vt->size) return ERR_PTR(-EINVAL); - t = st_ops->type; + t = st_ops_desc->type; st_map_size = sizeof(*st_map) + /* kvalue stores the @@ -691,7 +693,7 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) if (!st_map) return ERR_PTR(-ENOMEM); - st_map->st_ops = st_ops; + st_map->st_ops_desc = st_ops_desc; map = &st_map->map; ret = bpf_jit_charge_modmem(PAGE_SIZE); @@ -729,8 +731,8 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) static u64 bpf_struct_ops_map_mem_usage(const struct bpf_map *map) { struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; - const struct bpf_struct_ops *st_ops = st_map->st_ops; - const struct btf_type *vt = st_ops->value_type; + const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc; + const struct btf_type *vt = st_ops_desc->value_type; u64 usage; usage = sizeof(*st_map) + @@ -804,7 +806,7 @@ static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link) /* st_link->map can be NULL if * bpf_struct_ops_link_create() fails to register. */ - st_map->st_ops->unreg(&st_map->kvalue.data); + st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data); bpf_map_put(&st_map->map); } kfree(st_link); @@ -851,7 +853,7 @@ static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map if (!bpf_struct_ops_valid_to_reg(new_map)) return -EINVAL; - if (!st_map->st_ops->update) + if (!st_map->st_ops_desc->st_ops->update) return -EOPNOTSUPP; mutex_lock(&update_mutex); @@ -864,12 +866,12 @@ static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map old_st_map = container_of(old_map, struct bpf_struct_ops_map, map); /* The new and old struct_ops must be the same type. */ - if (st_map->st_ops != old_st_map->st_ops) { + if (st_map->st_ops_desc != old_st_map->st_ops_desc) { err = -EINVAL; goto err_out; } - err = st_map->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data); + err = st_map->st_ops_desc->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data); if (err) goto err_out; @@ -920,7 +922,7 @@ int bpf_struct_ops_link_create(union bpf_attr *attr) if (err) goto err_out; - err = st_map->st_ops->reg(st_map->kvalue.data); + err = st_map->st_ops_desc->st_ops->reg(st_map->kvalue.data); if (err) { bpf_link_cleanup(&link_primer); link = NULL; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 2d8cebd545a4..0b2fcdf0f583 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -19726,6 +19726,7 @@ static void print_verification_stats(struct bpf_verifier_env *env) static int check_struct_ops_btf_id(struct bpf_verifier_env *env) { const struct btf_type *t, *func_proto; + const struct bpf_struct_ops_desc *st_ops_desc; const struct bpf_struct_ops *st_ops; const struct btf_member *member; struct bpf_prog *prog = env->prog; @@ -19738,14 +19739,15 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env) } btf_id = prog->aux->attach_btf_id; - st_ops = bpf_struct_ops_find(btf_id); - if (!st_ops) { + st_ops_desc = bpf_struct_ops_find(btf_id); + if (!st_ops_desc) { verbose(env, "attach_btf_id %u is not a supported struct\n", btf_id); return -ENOTSUPP; } + st_ops = st_ops_desc->st_ops; - t = st_ops->type; + t = st_ops_desc->type; member_idx = prog->expected_attach_type; if (member_idx >= btf_type_vlen(t)) { verbose(env, "attach to invalid member idx %u of struct %s\n", diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c index 8e96189838fc..9b13ca02be4d 100644 --- a/net/bpf/bpf_dummy_struct_ops.c +++ b/net/bpf/bpf_dummy_struct_ops.c @@ -22,6 +22,8 @@ struct bpf_dummy_ops_test_args { struct bpf_dummy_ops_state state; }; +static struct btf *bpf_dummy_ops_btf; + static struct bpf_dummy_ops_test_args * dummy_ops_init_args(const union bpf_attr *kattr, unsigned int nr) { @@ -90,9 +92,15 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr, void *image = NULL; unsigned int op_idx; int prog_ret; + s32 type_id; int err; - if (prog->aux->attach_btf_id != st_ops->type_id) + type_id = btf_find_by_name_kind(bpf_dummy_ops_btf, + bpf_bpf_dummy_ops.name, + BTF_KIND_STRUCT); + if (type_id < 0) + return -EINVAL; + if (prog->aux->attach_btf_id != type_id) return -EOPNOTSUPP; func_proto = prog->aux->attach_func_proto; @@ -149,6 +157,7 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr, static int bpf_dummy_init(struct btf *btf) { + bpf_dummy_ops_btf = btf; return 0; } diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c index edecdf8229df..cf97f4997d5e 100644 --- a/net/ipv4/bpf_tcp_ca.c +++ b/net/ipv4/bpf_tcp_ca.c @@ -20,6 +20,7 @@ static u32 unsupported_ops[] = { static const struct btf_type *tcp_sock_type; static u32 tcp_sock_id, sock_id; +static const struct btf_type *tcp_congestion_ops_type; static int bpf_tcp_ca_init(struct btf *btf) { @@ -36,6 +37,11 @@ static int bpf_tcp_ca_init(struct btf *btf) tcp_sock_id = type_id; tcp_sock_type = btf_type_by_id(btf, tcp_sock_id); + type_id = btf_find_by_name_kind(btf, "tcp_congestion_ops", BTF_KIND_STRUCT); + if (type_id < 0) + return -EINVAL; + tcp_congestion_ops_type = btf_type_by_id(btf, type_id); + return 0; } @@ -149,7 +155,7 @@ static u32 prog_ops_moff(const struct bpf_prog *prog) u32 midx; midx = prog->expected_attach_type; - t = bpf_tcp_congestion_ops.type; + t = tcp_congestion_ops_type; m = &btf_type_member(t)[midx]; return __btf_member_bit_offset(t, m) / 8; -- Gitee From 03c5a992415c62f22517af61ac5922b5d07c97aa Mon Sep 17 00:00:00 2001 From: Kui-Feng Lee Date: Fri, 19 Jan 2024 14:49:55 -0800 Subject: [PATCH 2085/2138] bpf: add struct_ops_tab to btf. ANBZ: #13548 commit e61995111a76633376419d1bccede8696e94e6e5 upstream. Maintain a registry of registered struct_ops types in the per-btf (module) struct_ops_tab. This registry allows for easy lookup of struct_ops types that are registered by a specific module. It is a preparation work for supporting kernel module struct_ops in a latter patch. Each struct_ops will be registered under its own kernel module btf and will be stored in the newly added btf->struct_ops_tab. The bpf verifier and bpf syscall (e.g. prog and map cmd) can find the struct_ops and its btf type/size/id... information from btf->struct_ops_tab. Signed-off-by: Kui-Feng Lee Link: https://lore.kernel.org/r/20240119225005.668602-5-thinker.li@gmail.com Signed-off-by: Martin KaFai Lau Signed-off-by: Tianchen Ding Reviewed-by: Yuanhe Shu Link: https://gitee.com/anolis/cloud-kernel/pulls/4504 --- kernel/bpf/btf.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 43d1a45d286c..5401d2a0da82 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -241,6 +241,12 @@ struct btf_id_dtor_kfunc_tab { struct btf_id_dtor_kfunc dtors[]; }; +struct btf_struct_ops_tab { + u32 cnt; + u32 capacity; + struct bpf_struct_ops_desc ops[]; +}; + struct btf { void *data; struct btf_type **types; @@ -258,6 +264,7 @@ struct btf { struct btf_kfunc_set_tab *kfunc_set_tab; struct btf_id_dtor_kfunc_tab *dtor_kfunc_tab; struct btf_struct_metas *struct_meta_tab; + struct btf_struct_ops_tab *struct_ops_tab; /* split BTF support */ struct btf *base_btf; @@ -1688,11 +1695,20 @@ static void btf_free_struct_meta_tab(struct btf *btf) btf->struct_meta_tab = NULL; } +static void btf_free_struct_ops_tab(struct btf *btf) +{ + struct btf_struct_ops_tab *tab = btf->struct_ops_tab; + + kfree(tab); + btf->struct_ops_tab = NULL; +} + static void btf_free(struct btf *btf) { btf_free_struct_meta_tab(btf); btf_free_dtor_kfunc_tab(btf); btf_free_kfunc_set_tab(btf); + btf_free_struct_ops_tab(btf); kvfree(btf->types); kvfree(btf->resolved_sizes); kvfree(btf->resolved_ids); @@ -8616,6 +8632,45 @@ bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log, return !strncmp(reg_name, arg_name, cmp_len); } +static int +btf_add_struct_ops(struct btf *btf, struct bpf_struct_ops *st_ops) +{ + struct btf_struct_ops_tab *tab, *new_tab; + int i; + + tab = btf->struct_ops_tab; + if (!tab) { + tab = kzalloc(offsetof(struct btf_struct_ops_tab, ops[4]), + GFP_KERNEL); + if (!tab) + return -ENOMEM; + tab->capacity = 4; + btf->struct_ops_tab = tab; + } + + for (i = 0; i < tab->cnt; i++) + if (tab->ops[i].st_ops == st_ops) + return -EEXIST; + + if (tab->cnt == tab->capacity) { + new_tab = krealloc(tab, + offsetof(struct btf_struct_ops_tab, + ops[tab->capacity * 2]), + GFP_KERNEL); + if (!new_tab) + return -ENOMEM; + tab = new_tab; + tab->capacity *= 2; + btf->struct_ops_tab = tab; + } + + tab->ops[btf->struct_ops_tab->cnt].st_ops = st_ops; + + btf->struct_ops_tab->cnt++; + + return 0; +} + bool btf_param_match_suffix(const struct btf *btf, const struct btf_param *arg, const char *suffix) -- Gitee From a05e5cec5d243c147a5ec561358228c2cd09ef08 Mon Sep 17 00:00:00 2001 From: Kui-Feng Lee Date: Fri, 19 Jan 2024 14:49:56 -0800 Subject: [PATCH 2086/2138] bpf: make struct_ops_map support btfs other than btf_vmlinux. ANBZ: #13548 commit 47f4f657acd5d04c78c5c5ac7022cba9ce3b4a7d upstream. Once new struct_ops can be registered from modules, btf_vmlinux is no longer the only btf that struct_ops_map would face. st_map should remember what btf it should use to get type information. Signed-off-by: Kui-Feng Lee Link: https://lore.kernel.org/r/20240119225005.668602-6-thinker.li@gmail.com Signed-off-by: Martin KaFai Lau Signed-off-by: Tianchen Ding Reviewed-by: Yuanhe Shu Link: https://gitee.com/anolis/cloud-kernel/pulls/4504 --- kernel/bpf/bpf_struct_ops.c | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index 53c8437824d4..12d7ed6327d3 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -46,6 +46,8 @@ struct bpf_struct_ops_map { * "links[]". */ void *image; + /* The owner moduler's btf. */ + struct btf *btf; /* uvalue->data stores the kernel struct * (e.g. tcp_congestion_ops) that is more useful * to userspace than the kvalue. For example, @@ -314,7 +316,7 @@ static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map) } } -static int check_zero_holes(const struct btf_type *t, void *data) +static int check_zero_holes(const struct btf *btf, const struct btf_type *t, void *data) { const struct btf_member *member; u32 i, moff, msize, prev_mend = 0; @@ -326,8 +328,8 @@ static int check_zero_holes(const struct btf_type *t, void *data) memchr_inv(data + prev_mend, 0, moff - prev_mend)) return -EINVAL; - mtype = btf_type_by_id(btf_vmlinux, member->type); - mtype = btf_resolve_size(btf_vmlinux, mtype, &msize); + mtype = btf_type_by_id(btf, member->type); + mtype = btf_resolve_size(btf, mtype, &msize); if (IS_ERR(mtype)) return PTR_ERR(mtype); prev_mend = moff + msize; @@ -395,12 +397,12 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, if (*(u32 *)key != 0) return -E2BIG; - err = check_zero_holes(st_ops_desc->value_type, value); + err = check_zero_holes(st_map->btf, st_ops_desc->value_type, value); if (err) return err; uvalue = value; - err = check_zero_holes(t, uvalue->data); + err = check_zero_holes(st_map->btf, t, uvalue->data); if (err) return err; @@ -436,7 +438,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, u32 moff; moff = __btf_member_bit_offset(t, member) / 8; - ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL); + ptype = btf_type_resolve_ptr(st_map->btf, member->type, NULL); if (ptype == module_type) { if (*(void **)(udata + moff)) goto reset_unlock; @@ -461,8 +463,8 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, if (!ptype || !btf_type_is_func_proto(ptype)) { u32 msize; - mtype = btf_type_by_id(btf_vmlinux, member->type); - mtype = btf_resolve_size(btf_vmlinux, mtype, &msize); + mtype = btf_type_by_id(st_map->btf, member->type); + mtype = btf_resolve_size(st_map->btf, mtype, &msize); if (IS_ERR(mtype)) { err = PTR_ERR(mtype); goto reset_unlock; @@ -602,6 +604,7 @@ static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key) static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key, struct seq_file *m) { + struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; void *value; int err; @@ -611,7 +614,8 @@ static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key, err = bpf_struct_ops_map_sys_lookup_elem(map, key, value); if (!err) { - btf_type_seq_show(btf_vmlinux, map->btf_vmlinux_value_type_id, + btf_type_seq_show(st_map->btf, + map->btf_vmlinux_value_type_id, value, m); seq_puts(m, "\n"); } @@ -721,6 +725,8 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) return ERR_PTR(-ENOMEM); } + st_map->btf = btf_vmlinux; + mutex_init(&st_map->lock); set_vm_flush_reset_perms(st_map->image); bpf_map_init_from_attr(map, attr); -- Gitee From 393caf7e6b6e6be079bc087338417bf7e79ee3eb Mon Sep 17 00:00:00 2001 From: Kui-Feng Lee Date: Fri, 19 Jan 2024 14:49:57 -0800 Subject: [PATCH 2087/2138] bpf: pass btf object id in bpf_map_info. ANBZ: #13548 commit 1338b93346587a2a6ac79bbcf55ef5b357745573 upstream. Include btf object id (btf_obj_id) in bpf_map_info so that tools (ex: bpftools struct_ops dump) know the correct btf from the kernel to look up type information of struct_ops types. Since struct_ops types can be defined and registered in a module. The type information of a struct_ops type are defined in the btf of the module defining it. The userspace tools need to know which btf is for the module defining a struct_ops type. Signed-off-by: Kui-Feng Lee Link: https://lore.kernel.org/r/20240119225005.668602-7-thinker.li@gmail.com Signed-off-by: Martin KaFai Lau Signed-off-by: Tianchen Ding Reviewed-by: Yuanhe Shu Link: https://gitee.com/anolis/cloud-kernel/pulls/4504 --- include/linux/bpf.h | 4 ++++ include/uapi/linux/bpf.h | 2 +- kernel/bpf/bpf_struct_ops.c | 7 +++++++ kernel/bpf/syscall.c | 2 ++ tools/include/uapi/linux/bpf.h | 2 +- 5 files changed, 15 insertions(+), 2 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index df640520c890..cc22cb0614f0 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1770,6 +1770,7 @@ struct bpf_dummy_ops { int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); #endif +void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map); #else static inline const struct bpf_struct_ops_desc *bpf_struct_ops_find(u32 type_id) { @@ -1797,6 +1798,9 @@ static inline int bpf_struct_ops_link_create(union bpf_attr *attr) { return -EOPNOTSUPP; } +static inline void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map) +{ +} #endif diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 431bc700bcfb..d27cf57e37b0 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -6472,7 +6472,7 @@ struct bpf_map_info { __u32 btf_id; __u32 btf_key_type_id; __u32 btf_value_type_id; - __u32 :32; /* alignment pad */ + __u32 btf_vmlinux_id; __u64 map_extra; } __attribute__((aligned(8))); diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index 12d7ed6327d3..c729d195034c 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -943,3 +943,10 @@ int bpf_struct_ops_link_create(union bpf_attr *attr) kfree(link); return err; } + +void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map) +{ + struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; + + info->btf_vmlinux_id = btf_obj_id(st_map->btf); +} diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index bfdede97786b..b5f52b687f83 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -4701,6 +4701,8 @@ static int bpf_map_get_info_by_fd(struct file *file, info.btf_value_type_id = map->btf_value_type_id; } info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; + if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) + bpf_map_struct_ops_info_fill(&info, map); if (bpf_map_is_offloaded(map)) { err = bpf_map_offload_info_fill(&info, map); diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 977ec094bc2a..3120effa7100 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -6475,7 +6475,7 @@ struct bpf_map_info { __u32 btf_id; __u32 btf_key_type_id; __u32 btf_value_type_id; - __u32 :32; /* alignment pad */ + __u32 btf_vmlinux_id; __u64 map_extra; } __attribute__((aligned(8))); -- Gitee From a6ae6147c601f4516cda87d942e7fc82892388ca Mon Sep 17 00:00:00 2001 From: Kui-Feng Lee Date: Fri, 19 Jan 2024 14:49:58 -0800 Subject: [PATCH 2088/2138] bpf: lookup struct_ops types from a given module BTF. ANBZ: #13548 commit 689423db3bda2244c24db8a64de4cdb37be1de41 upstream. This is a preparation for searching for struct_ops types from a specified module. BTF is always btf_vmlinux now. This patch passes a pointer of BTF to bpf_struct_ops_find_value() and bpf_struct_ops_find(). Once the new registration API of struct_ops types is used, other BTFs besides btf_vmlinux can also be passed to them. Signed-off-by: Kui-Feng Lee Link: https://lore.kernel.org/r/20240119225005.668602-8-thinker.li@gmail.com Signed-off-by: Martin KaFai Lau Signed-off-by: Tianchen Ding Reviewed-by: Yuanhe Shu Link: https://gitee.com/anolis/cloud-kernel/pulls/4504 --- include/linux/bpf.h | 4 ++-- kernel/bpf/bpf_struct_ops.c | 11 ++++++----- kernel/bpf/verifier.c | 2 +- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index cc22cb0614f0..f17a568522c7 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1727,7 +1727,7 @@ struct bpf_struct_ops_desc { #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL) #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA)) -const struct bpf_struct_ops_desc *bpf_struct_ops_find(u32 type_id); +const struct bpf_struct_ops_desc *bpf_struct_ops_find(struct btf *btf, u32 type_id); void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log); bool bpf_struct_ops_get(const void *kdata); void bpf_struct_ops_put(const void *kdata); @@ -1772,7 +1772,7 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr, #endif void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map); #else -static inline const struct bpf_struct_ops_desc *bpf_struct_ops_find(u32 type_id) +static inline const struct bpf_struct_ops_desc *bpf_struct_ops_find(struct btf *btf, u32 type_id) { return NULL; } diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index c729d195034c..70cf80e8b2e8 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -221,11 +221,11 @@ void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log) extern struct btf *btf_vmlinux; static const struct bpf_struct_ops_desc * -bpf_struct_ops_find_value(u32 value_id) +bpf_struct_ops_find_value(struct btf *btf, u32 value_id) { unsigned int i; - if (!value_id || !btf_vmlinux) + if (!value_id || !btf) return NULL; for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) { @@ -236,11 +236,12 @@ bpf_struct_ops_find_value(u32 value_id) return NULL; } -const struct bpf_struct_ops_desc *bpf_struct_ops_find(u32 type_id) +const struct bpf_struct_ops_desc * +bpf_struct_ops_find(struct btf *btf, u32 type_id) { unsigned int i; - if (!type_id || !btf_vmlinux) + if (!type_id || !btf) return NULL; for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) { @@ -677,7 +678,7 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) struct bpf_map *map; int ret; - st_ops_desc = bpf_struct_ops_find_value(attr->btf_vmlinux_value_type_id); + st_ops_desc = bpf_struct_ops_find_value(btf_vmlinux, attr->btf_vmlinux_value_type_id); if (!st_ops_desc) return ERR_PTR(-ENOTSUPP); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 0b2fcdf0f583..4423ffc56665 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -19739,7 +19739,7 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env) } btf_id = prog->aux->attach_btf_id; - st_ops_desc = bpf_struct_ops_find(btf_id); + st_ops_desc = bpf_struct_ops_find(btf_vmlinux, btf_id); if (!st_ops_desc) { verbose(env, "attach_btf_id %u is not a supported struct\n", btf_id); -- Gitee From 3fa7b4a5fab87c2bc27b9131c89f3b3f51180c15 Mon Sep 17 00:00:00 2001 From: Kui-Feng Lee Date: Fri, 19 Jan 2024 14:49:59 -0800 Subject: [PATCH 2089/2138] bpf: pass attached BTF to the bpf_struct_ops subsystem ANBZ: #13548 commit fcc2c1fb0651477c8ed78a3a293c175ccd70697a upstream. Pass the fd of a btf from the userspace to the bpf() syscall, and then convert the fd into a btf. The btf is generated from the module that defines the target BPF struct_ops type. In order to inform the kernel about the module that defines the target struct_ops type, the userspace program needs to provide a btf fd for the respective module's btf. This btf contains essential information on the types defined within the module, including the target struct_ops type. A btf fd must be provided to the kernel for struct_ops maps and for the bpf programs attached to those maps. In the case of the bpf programs, the attach_btf_obj_fd parameter is passed as part of the bpf_attr and is converted into a btf. This btf is then stored in the prog->aux->attach_btf field. Here, it just let the verifier access attach_btf directly. In the case of struct_ops maps, a btf fd is passed as value_type_btf_obj_fd of bpf_attr. The bpf_struct_ops_map_alloc() function converts the fd to a btf and stores it as st_map->btf. A flag BPF_F_VTYPE_BTF_OBJ_FD is added for map_flags to indicate that the value of value_type_btf_obj_fd is set. Signed-off-by: Kui-Feng Lee Link: https://lore.kernel.org/r/20240119225005.668602-9-thinker.li@gmail.com Signed-off-by: Martin KaFai Lau Signed-off-by: Tianchen Ding Reviewed-by: Yuanhe Shu Link: https://gitee.com/anolis/cloud-kernel/pulls/4504 --- include/uapi/linux/bpf.h | 8 +++++ kernel/bpf/bpf_struct_ops.c | 65 ++++++++++++++++++++++++---------- kernel/bpf/syscall.c | 2 +- kernel/bpf/verifier.c | 9 +++-- tools/include/uapi/linux/bpf.h | 8 +++++ 5 files changed, 70 insertions(+), 22 deletions(-) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index d27cf57e37b0..806c7535537c 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1327,6 +1327,9 @@ enum { /* Get path from provided FD in BPF_OBJ_PIN/BPF_OBJ_GET commands */ BPF_F_PATH_FD = (1U << 14), + +/* Flag for value_type_btf_obj_fd, the fd is available */ + BPF_F_VTYPE_BTF_OBJ_FD = (1U << 15), }; /* Flags for BPF_PROG_QUERY. */ @@ -1400,6 +1403,11 @@ union bpf_attr { * to using 5 hash functions). */ __u64 map_extra; + + __s32 value_type_btf_obj_fd; /* fd pointing to a BTF + * type data for + * btf_vmlinux_value_type_id. + */ }; struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index 70cf80e8b2e8..aa06aee724f9 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -636,6 +636,7 @@ static void __bpf_struct_ops_map_free(struct bpf_map *map) bpf_jit_uncharge_modmem(PAGE_SIZE); } bpf_map_area_free(st_map->uvalue); + btf_put(st_map->btf); bpf_map_area_free(st_map); } @@ -664,7 +665,8 @@ static void bpf_struct_ops_map_free(struct bpf_map *map) static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr) { if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 || - (attr->map_flags & ~BPF_F_LINK) || !attr->btf_vmlinux_value_type_id) + (attr->map_flags & ~(BPF_F_LINK | BPF_F_VTYPE_BTF_OBJ_FD)) || + !attr->btf_vmlinux_value_type_id) return -EINVAL; return 0; } @@ -676,15 +678,36 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) struct bpf_struct_ops_map *st_map; const struct btf_type *t, *vt; struct bpf_map *map; + struct btf *btf; int ret; - st_ops_desc = bpf_struct_ops_find_value(btf_vmlinux, attr->btf_vmlinux_value_type_id); - if (!st_ops_desc) - return ERR_PTR(-ENOTSUPP); + if (attr->map_flags & BPF_F_VTYPE_BTF_OBJ_FD) { + /* The map holds btf for its whole life time. */ + btf = btf_get_by_fd(attr->value_type_btf_obj_fd); + if (IS_ERR(btf)) + return ERR_CAST(btf); + if (!btf_is_module(btf)) { + btf_put(btf); + return ERR_PTR(-EINVAL); + } + } else { + btf = bpf_get_btf_vmlinux(); + if (IS_ERR(btf)) + return ERR_CAST(btf); + btf_get(btf); + } + + st_ops_desc = bpf_struct_ops_find_value(btf, attr->btf_vmlinux_value_type_id); + if (!st_ops_desc) { + ret = -ENOTSUPP; + goto errout; + } vt = st_ops_desc->value_type; - if (attr->value_size != vt->size) - return ERR_PTR(-EINVAL); + if (attr->value_size != vt->size) { + ret = -EINVAL; + goto errout; + } t = st_ops_desc->type; @@ -695,17 +718,17 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) (vt->size - sizeof(struct bpf_struct_ops_value)); st_map = bpf_map_area_alloc(st_map_size, NUMA_NO_NODE); - if (!st_map) - return ERR_PTR(-ENOMEM); + if (!st_map) { + ret = -ENOMEM; + goto errout; + } st_map->st_ops_desc = st_ops_desc; map = &st_map->map; ret = bpf_jit_charge_modmem(PAGE_SIZE); - if (ret) { - __bpf_struct_ops_map_free(map); - return ERR_PTR(ret); - } + if (ret) + goto errout_free; st_map->image = bpf_jit_alloc_exec(PAGE_SIZE); if (!st_map->image) { @@ -714,25 +737,31 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) * here. */ bpf_jit_uncharge_modmem(PAGE_SIZE); - __bpf_struct_ops_map_free(map); - return ERR_PTR(-ENOMEM); + ret = -ENOMEM; + goto errout_free; } st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE); st_map->links = bpf_map_area_alloc(btf_type_vlen(t) * sizeof(struct bpf_links *), NUMA_NO_NODE); if (!st_map->uvalue || !st_map->links) { - __bpf_struct_ops_map_free(map); - return ERR_PTR(-ENOMEM); + ret = -ENOMEM; + goto errout_free; } - - st_map->btf = btf_vmlinux; + st_map->btf = btf; mutex_init(&st_map->lock); set_vm_flush_reset_perms(st_map->image); bpf_map_init_from_attr(map, attr); return map; + +errout_free: + __bpf_struct_ops_map_free(map); +errout: + btf_put(btf); + + return ERR_PTR(ret); } static u64 bpf_struct_ops_map_mem_usage(const struct bpf_map *map) diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index b5f52b687f83..5a2e8cdce19e 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1122,7 +1122,7 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf, return ret; } -#define BPF_MAP_CREATE_LAST_FIELD map_extra +#define BPF_MAP_CREATE_LAST_FIELD value_type_btf_obj_fd /* called via syscall */ static int map_create(union bpf_attr *attr) { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 4423ffc56665..c878d9d3e9d4 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -19731,6 +19731,7 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env) const struct btf_member *member; struct bpf_prog *prog = env->prog; u32 btf_id, member_idx; + struct btf *btf; const char *mname; if (!prog->gpl_compatible) { @@ -19738,8 +19739,10 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env) return -EINVAL; } + btf = prog->aux->attach_btf ?: bpf_get_btf_vmlinux(); + btf_id = prog->aux->attach_btf_id; - st_ops_desc = bpf_struct_ops_find(btf_vmlinux, btf_id); + st_ops_desc = bpf_struct_ops_find(btf, btf_id); if (!st_ops_desc) { verbose(env, "attach_btf_id %u is not a supported struct\n", btf_id); @@ -19756,8 +19759,8 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env) } member = &btf_type_member(t)[member_idx]; - mname = btf_name_by_offset(btf_vmlinux, member->name_off); - func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type, + mname = btf_name_by_offset(btf, member->name_off); + func_proto = btf_type_resolve_func_ptr(btf, member->type, NULL); if (!func_proto) { verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n", diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 3120effa7100..e3c9d5c7d9b8 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1327,6 +1327,9 @@ enum { /* Get path from provided FD in BPF_OBJ_PIN/BPF_OBJ_GET commands */ BPF_F_PATH_FD = (1U << 14), + +/* Flag for value_type_btf_obj_fd, the fd is available */ + BPF_F_VTYPE_BTF_OBJ_FD = (1U << 15), }; /* Flags for BPF_PROG_QUERY. */ @@ -1400,6 +1403,11 @@ union bpf_attr { * to using 5 hash functions). */ __u64 map_extra; + + __s32 value_type_btf_obj_fd; /* fd pointing to a BTF + * type data for + * btf_vmlinux_value_type_id. + */ }; struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ -- Gitee From 58197ed590f5604e07921fcfd17e999edbf216fe Mon Sep 17 00:00:00 2001 From: Kui-Feng Lee Date: Fri, 19 Jan 2024 14:50:00 -0800 Subject: [PATCH 2090/2138] bpf: hold module refcnt in bpf_struct_ops map creation and prog verification. ANBZ: #13548 commit e3f87fdfed7b770dd7066b02262b12747881e76d upstream. To ensure that a module remains accessible whenever a struct_ops object of a struct_ops type provided by the module is still in use. struct bpf_struct_ops_map doesn't hold a refcnt to btf anymore since a module will hold a refcnt to it's btf already. But, struct_ops programs are different. They hold their associated btf, not the module since they need only btf to assure their types (signatures). However, verifier holds the refcnt of the associated module of a struct_ops type temporarily when verify a struct_ops prog. Verifier needs the help from the verifier operators (struct bpf_verifier_ops) provided by the owner module to verify data access of a prog, provide information, and generate code. This patch also add a count of links (links_cnt) to bpf_struct_ops_map. It avoids bpf_struct_ops_map_put_progs() from accessing btf after calling module_put() in bpf_struct_ops_map_free(). Signed-off-by: Kui-Feng Lee Link: https://lore.kernel.org/r/20240119225005.668602-10-thinker.li@gmail.com Signed-off-by: Martin KaFai Lau Signed-off-by: Tianchen Ding Reviewed-by: Yuanhe Shu Link: https://gitee.com/anolis/cloud-kernel/pulls/4504 --- include/linux/bpf.h | 1 + include/linux/bpf_verifier.h | 1 + kernel/bpf/bpf_struct_ops.c | 29 +++++++++++++++++++++++------ kernel/bpf/verifier.c | 11 +++++++++++ 4 files changed, 36 insertions(+), 6 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index f17a568522c7..102cc1662cb3 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1712,6 +1712,7 @@ struct bpf_struct_ops { int (*update)(void *kdata, void *old_kdata); int (*validate)(void *kdata); void *cfi_stubs; + struct module *owner; const char *name; struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS]; }; diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index d7d586015a66..c7d44c1ee9cf 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -612,6 +612,7 @@ struct bpf_verifier_env { u32 prev_insn_idx; struct bpf_prog *prog; /* eBPF program being verified */ const struct bpf_verifier_ops *ops; + struct module *attach_btf_mod; /* The owner module of prog->aux->attach_btf */ struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ int stack_size; /* number of states to be processed */ bool strict_alignment; /* perform strict pointer alignment checks */ diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index aa06aee724f9..5826f134bc1d 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -40,6 +40,7 @@ struct bpf_struct_ops_map { * (in kvalue.data). */ struct bpf_link **links; + u32 links_cnt; /* image is a page that has all the trampolines * that stores the func args before calling the bpf_prog. * A PAGE_SIZE "image" is enough to store all trampoline for @@ -306,10 +307,9 @@ static void *bpf_struct_ops_map_lookup_elem(struct bpf_map *map, void *key) static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map) { - const struct btf_type *t = st_map->st_ops_desc->type; u32 i; - for (i = 0; i < btf_type_vlen(t); i++) { + for (i = 0; i < st_map->links_cnt; i++) { if (st_map->links[i]) { bpf_link_put(st_map->links[i]); st_map->links[i] = NULL; @@ -636,12 +636,20 @@ static void __bpf_struct_ops_map_free(struct bpf_map *map) bpf_jit_uncharge_modmem(PAGE_SIZE); } bpf_map_area_free(st_map->uvalue); - btf_put(st_map->btf); bpf_map_area_free(st_map); } static void bpf_struct_ops_map_free(struct bpf_map *map) { + struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; + + /* st_ops->owner was acquired during map_alloc to implicitly holds + * the btf's refcnt. The acquire was only done when btf_is_module() + * st_map->btf cannot be NULL here. + */ + if (btf_is_module(st_map->btf)) + module_put(st_map->st_ops_desc->st_ops->owner); + /* The struct_ops's function may switch to another struct_ops. * * For example, bpf_tcp_cc_x->init() may switch to @@ -677,6 +685,7 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) size_t st_map_size; struct bpf_struct_ops_map *st_map; const struct btf_type *t, *vt; + struct module *mod = NULL; struct bpf_map *map; struct btf *btf; int ret; @@ -690,11 +699,18 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) btf_put(btf); return ERR_PTR(-EINVAL); } + + mod = btf_try_get_module(btf); + /* mod holds a refcnt to btf. We don't need an extra refcnt + * here. + */ + btf_put(btf); + if (!mod) + return ERR_PTR(-EINVAL); } else { btf = bpf_get_btf_vmlinux(); if (IS_ERR(btf)) return ERR_CAST(btf); - btf_get(btf); } st_ops_desc = bpf_struct_ops_find_value(btf, attr->btf_vmlinux_value_type_id); @@ -741,8 +757,9 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) goto errout_free; } st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE); + st_map->links_cnt = btf_type_vlen(t); st_map->links = - bpf_map_area_alloc(btf_type_vlen(t) * sizeof(struct bpf_links *), + bpf_map_area_alloc(st_map->links_cnt * sizeof(struct bpf_links *), NUMA_NO_NODE); if (!st_map->uvalue || !st_map->links) { ret = -ENOMEM; @@ -759,7 +776,7 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) errout_free: __bpf_struct_ops_map_free(map); errout: - btf_put(btf); + module_put(mod); return ERR_PTR(ret); } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index c878d9d3e9d4..c9e8919f6abd 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -19740,6 +19740,15 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env) } btf = prog->aux->attach_btf ?: bpf_get_btf_vmlinux(); + if (btf_is_module(btf)) { + /* Make sure st_ops is valid through the lifetime of env */ + env->attach_btf_mod = btf_try_get_module(btf); + if (!env->attach_btf_mod) { + verbose(env, "struct_ops module %s is not found\n", + btf_get_name(btf)); + return -ENOTSUPP; + } + } btf_id = prog->aux->attach_btf_id; st_ops_desc = bpf_struct_ops_find(btf, btf_id); @@ -20469,6 +20478,8 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3 env->prog->expected_attach_type = 0; *prog = env->prog; + + module_put(env->attach_btf_mod); err_unlock: if (!is_priv) mutex_unlock(&bpf_verifier_lock); -- Gitee From 7733c7c672318841ea8c25c457e5f075624ea256 Mon Sep 17 00:00:00 2001 From: Kui-Feng Lee Date: Fri, 19 Jan 2024 14:50:01 -0800 Subject: [PATCH 2091/2138] bpf: validate value_type ANBZ: #13548 commit 612d087d4ba54cef47946e22e5dabad762dd7ed5 upstream. A value_type should consist of three components: refcnt, state, and data. refcnt and state has been move to struct bpf_struct_ops_common_value to make it easier to check the value type. Signed-off-by: Kui-Feng Lee Link: https://lore.kernel.org/r/20240119225005.668602-11-thinker.li@gmail.com Signed-off-by: Martin KaFai Lau Signed-off-by: Tianchen Ding Reviewed-by: Yuanhe Shu Link: https://gitee.com/anolis/cloud-kernel/pulls/4504 --- include/linux/bpf.h | 12 +++++ kernel/bpf/bpf_struct_ops.c | 93 ++++++++++++++++++++++++------------- 2 files changed, 72 insertions(+), 33 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 102cc1662cb3..302bef3accd2 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1726,6 +1726,18 @@ struct bpf_struct_ops_desc { u32 value_id; }; +enum bpf_struct_ops_state { + BPF_STRUCT_OPS_STATE_INIT, + BPF_STRUCT_OPS_STATE_INUSE, + BPF_STRUCT_OPS_STATE_TOBEFREE, + BPF_STRUCT_OPS_STATE_READY, +}; + +struct bpf_struct_ops_common_value { + refcount_t refcnt; + enum bpf_struct_ops_state state; +}; + #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL) #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA)) const struct bpf_struct_ops_desc *bpf_struct_ops_find(struct btf *btf, u32 type_id); diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index 5826f134bc1d..2fa673d3f8f5 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -13,19 +13,8 @@ #include #include -enum bpf_struct_ops_state { - BPF_STRUCT_OPS_STATE_INIT, - BPF_STRUCT_OPS_STATE_INUSE, - BPF_STRUCT_OPS_STATE_TOBEFREE, - BPF_STRUCT_OPS_STATE_READY, -}; - -#define BPF_STRUCT_OPS_COMMON_VALUE \ - refcount_t refcnt; \ - enum bpf_struct_ops_state state - struct bpf_struct_ops_value { - BPF_STRUCT_OPS_COMMON_VALUE; + struct bpf_struct_ops_common_value common; char data[] ____cacheline_aligned_in_smp; }; @@ -81,8 +70,8 @@ static DEFINE_MUTEX(update_mutex); #define BPF_STRUCT_OPS_TYPE(_name) \ extern struct bpf_struct_ops bpf_##_name; \ \ -struct bpf_struct_ops_##_name { \ - BPF_STRUCT_OPS_COMMON_VALUE; \ +struct bpf_struct_ops_##_name { \ + struct bpf_struct_ops_common_value common; \ struct _name data ____cacheline_aligned_in_smp; \ }; #include "bpf_struct_ops_types.h" @@ -113,11 +102,49 @@ const struct bpf_prog_ops bpf_struct_ops_prog_ops = { BTF_ID_LIST(st_ops_ids) BTF_ID(struct, module) +BTF_ID(struct, bpf_struct_ops_common_value) enum { IDX_MODULE_ID, + IDX_ST_OPS_COMMON_VALUE_ID, }; +extern struct btf *btf_vmlinux; + +static bool is_valid_value_type(struct btf *btf, s32 value_id, + const struct btf_type *type, + const char *value_name) +{ + const struct btf_type *common_value_type; + const struct btf_member *member; + const struct btf_type *vt, *mt; + + vt = btf_type_by_id(btf, value_id); + if (btf_vlen(vt) != 2) { + pr_warn("The number of %s's members should be 2, but we get %d\n", + value_name, btf_vlen(vt)); + return false; + } + member = btf_type_member(vt); + mt = btf_type_by_id(btf, member->type); + common_value_type = btf_type_by_id(btf_vmlinux, + st_ops_ids[IDX_ST_OPS_COMMON_VALUE_ID]); + if (mt != common_value_type) { + pr_warn("The first member of %s should be bpf_struct_ops_common_value\n", + value_name); + return false; + } + member++; + mt = btf_type_by_id(btf, member->type); + if (mt != type) { + pr_warn("The second member of %s should be %s\n", + value_name, btf_name_by_offset(btf, type->name_off)); + return false; + } + + return true; +} + static void bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc, struct btf *btf, struct bpf_verifier_log *log) @@ -138,14 +165,6 @@ static void bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc, } sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name); - value_id = btf_find_by_name_kind(btf, value_name, - BTF_KIND_STRUCT); - if (value_id < 0) { - pr_warn("Cannot find struct %s in %s\n", - value_name, btf_get_name(btf)); - return; - } - type_id = btf_find_by_name_kind(btf, st_ops->name, BTF_KIND_STRUCT); if (type_id < 0) { @@ -160,6 +179,16 @@ static void bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc, return; } + value_id = btf_find_by_name_kind(btf, value_name, + BTF_KIND_STRUCT); + if (value_id < 0) { + pr_warn("Cannot find struct %s in %s\n", + value_name, btf_get_name(btf)); + return; + } + if (!is_valid_value_type(btf, value_id, t, value_name)) + return; + for_each_member(i, t, member) { const struct btf_type *func_proto; @@ -219,8 +248,6 @@ void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log) } } -extern struct btf *btf_vmlinux; - static const struct bpf_struct_ops_desc * bpf_struct_ops_find_value(struct btf *btf, u32 value_id) { @@ -276,7 +303,7 @@ int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, kvalue = &st_map->kvalue; /* Pair with smp_store_release() during map_update */ - state = smp_load_acquire(&kvalue->state); + state = smp_load_acquire(&kvalue->common.state); if (state == BPF_STRUCT_OPS_STATE_INIT) { memset(value, 0, map->value_size); return 0; @@ -287,7 +314,7 @@ int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, */ uvalue = value; memcpy(uvalue, st_map->uvalue, map->value_size); - uvalue->state = state; + uvalue->common.state = state; /* This value offers the user space a general estimate of how * many sockets are still utilizing this struct_ops for TCP @@ -295,7 +322,7 @@ int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, * should sufficiently meet our present goals. */ refcnt = atomic64_read(&map->refcnt) - atomic64_read(&map->usercnt); - refcount_set(&uvalue->refcnt, max_t(s64, refcnt, 0)); + refcount_set(&uvalue->common.refcnt, max_t(s64, refcnt, 0)); return 0; } @@ -407,7 +434,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, if (err) return err; - if (uvalue->state || refcount_read(&uvalue->refcnt)) + if (uvalue->common.state || refcount_read(&uvalue->common.refcnt)) return -EINVAL; tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL); @@ -419,7 +446,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, mutex_lock(&st_map->lock); - if (kvalue->state != BPF_STRUCT_OPS_STATE_INIT) { + if (kvalue->common.state != BPF_STRUCT_OPS_STATE_INIT) { err = -EBUSY; goto unlock; } @@ -534,7 +561,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, * * Pair with smp_load_acquire() during lookup_elem(). */ - smp_store_release(&kvalue->state, BPF_STRUCT_OPS_STATE_READY); + smp_store_release(&kvalue->common.state, BPF_STRUCT_OPS_STATE_READY); goto unlock; } @@ -552,7 +579,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, * It ensures the above udata updates (e.g. prog->aux->id) * can be seen once BPF_STRUCT_OPS_STATE_INUSE is set. */ - smp_store_release(&kvalue->state, BPF_STRUCT_OPS_STATE_INUSE); + smp_store_release(&kvalue->common.state, BPF_STRUCT_OPS_STATE_INUSE); goto unlock; } @@ -583,7 +610,7 @@ static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key) if (st_map->map.map_flags & BPF_F_LINK) return -EOPNOTSUPP; - prev_state = cmpxchg(&st_map->kvalue.state, + prev_state = cmpxchg(&st_map->kvalue.common.state, BPF_STRUCT_OPS_STATE_INUSE, BPF_STRUCT_OPS_STATE_TOBEFREE); switch (prev_state) { @@ -844,7 +871,7 @@ static bool bpf_struct_ops_valid_to_reg(struct bpf_map *map) return map->map_type == BPF_MAP_TYPE_STRUCT_OPS && map->map_flags & BPF_F_LINK && /* Pair with smp_store_release() during map_update */ - smp_load_acquire(&st_map->kvalue.state) == BPF_STRUCT_OPS_STATE_READY; + smp_load_acquire(&st_map->kvalue.common.state) == BPF_STRUCT_OPS_STATE_READY; } static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link) -- Gitee From 22cfcf269b1f6bb864d8046fb65508f63ae98dae Mon Sep 17 00:00:00 2001 From: Kui-Feng Lee Date: Fri, 19 Jan 2024 14:50:02 -0800 Subject: [PATCH 2092/2138] bpf, net: switch to dynamic registration ANBZ: #13548 commit f6be98d19985411ca1f3d53413d94d5b7f41c200 upstream. Replace the static list of struct_ops types with per-btf struct_ops_tab to enable dynamic registration. Both bpf_dummy_ops and bpf_tcp_ca now utilize the registration function instead of being listed in bpf_struct_ops_types.h. Cc: netdev@vger.kernel.org Signed-off-by: Kui-Feng Lee Link: https://lore.kernel.org/r/20240119225005.668602-12-thinker.li@gmail.com Signed-off-by: Martin KaFai Lau Signed-off-by: Tianchen Ding Reviewed-by: Yuanhe Shu Link: https://gitee.com/anolis/cloud-kernel/pulls/4504 --- include/linux/bpf.h | 27 +++++--- include/linux/btf.h | 12 ++++ kernel/bpf/bpf_struct_ops.c | 100 ++++-------------------------- kernel/bpf/bpf_struct_ops_types.h | 12 ---- kernel/bpf/btf.c | 86 +++++++++++++++++++++++-- net/bpf/bpf_dummy_struct_ops.c | 11 +++- net/ipv4/bpf_tcp_ca.c | 12 +++- 7 files changed, 142 insertions(+), 118 deletions(-) delete mode 100644 kernel/bpf/bpf_struct_ops_types.h diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 302bef3accd2..66b263c91685 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1739,9 +1739,20 @@ struct bpf_struct_ops_common_value { }; #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL) +/* This macro helps developer to register a struct_ops type and generate + * type information correctly. Developers should use this macro to register + * a struct_ops type instead of calling __register_bpf_struct_ops() directly. + */ +#define register_bpf_struct_ops(st_ops, type) \ + ({ \ + struct bpf_struct_ops_##type { \ + struct bpf_struct_ops_common_value common; \ + struct type data ____cacheline_aligned_in_smp; \ + }; \ + BTF_TYPE_EMIT(struct bpf_struct_ops_##type); \ + __register_bpf_struct_ops(st_ops); \ + }) #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA)) -const struct bpf_struct_ops_desc *bpf_struct_ops_find(struct btf *btf, u32 type_id); -void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log); bool bpf_struct_ops_get(const void *kdata); void bpf_struct_ops_put(const void *kdata); int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, @@ -1783,16 +1794,12 @@ struct bpf_dummy_ops { int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); #endif +int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc, + struct btf *btf, + struct bpf_verifier_log *log); void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map); #else -static inline const struct bpf_struct_ops_desc *bpf_struct_ops_find(struct btf *btf, u32 type_id) -{ - return NULL; -} -static inline void bpf_struct_ops_init(struct btf *btf, - struct bpf_verifier_log *log) -{ -} +#define register_bpf_struct_ops(st_ops, type) ({ (void *)(st_ops); 0; }) static inline bool bpf_try_module_get(const void *data, struct module *owner) { return try_module_get(owner); diff --git a/include/linux/btf.h b/include/linux/btf.h index 4480df4e9fbf..ca4c482efded 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -500,6 +500,18 @@ bool btf_param_match_suffix(const struct btf *btf, struct bpf_verifier_log; +#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL) +struct bpf_struct_ops; +int __register_bpf_struct_ops(struct bpf_struct_ops *st_ops); +const struct bpf_struct_ops_desc *bpf_struct_ops_find_value(struct btf *btf, u32 value_id); +const struct bpf_struct_ops_desc *bpf_struct_ops_find(struct btf *btf, u32 type_id); +#else +static inline const struct bpf_struct_ops_desc *bpf_struct_ops_find(struct btf *btf, u32 type_id) +{ + return NULL; +} +#endif + #ifdef CONFIG_BPF_SYSCALL const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id); const char *btf_name_by_offset(const struct btf *btf, u32 offset); diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index 2fa673d3f8f5..d2e43f2a244e 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -62,35 +62,6 @@ static DEFINE_MUTEX(update_mutex); #define VALUE_PREFIX "bpf_struct_ops_" #define VALUE_PREFIX_LEN (sizeof(VALUE_PREFIX) - 1) -/* bpf_struct_ops_##_name (e.g. bpf_struct_ops_tcp_congestion_ops) is - * the map's value exposed to the userspace and its btf-type-id is - * stored at the map->btf_vmlinux_value_type_id. - * - */ -#define BPF_STRUCT_OPS_TYPE(_name) \ -extern struct bpf_struct_ops bpf_##_name; \ - \ -struct bpf_struct_ops_##_name { \ - struct bpf_struct_ops_common_value common; \ - struct _name data ____cacheline_aligned_in_smp; \ -}; -#include "bpf_struct_ops_types.h" -#undef BPF_STRUCT_OPS_TYPE - -enum { -#define BPF_STRUCT_OPS_TYPE(_name) BPF_STRUCT_OPS_TYPE_##_name, -#include "bpf_struct_ops_types.h" -#undef BPF_STRUCT_OPS_TYPE - __NR_BPF_STRUCT_OPS_TYPE, -}; - -static struct bpf_struct_ops_desc bpf_struct_ops[] = { -#define BPF_STRUCT_OPS_TYPE(_name) \ - [BPF_STRUCT_OPS_TYPE_##_name] = { .st_ops = &bpf_##_name }, -#include "bpf_struct_ops_types.h" -#undef BPF_STRUCT_OPS_TYPE -}; - const struct bpf_verifier_ops bpf_struct_ops_verifier_ops = { }; @@ -145,9 +116,9 @@ static bool is_valid_value_type(struct btf *btf, s32 value_id, return true; } -static void bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc, - struct btf *btf, - struct bpf_verifier_log *log) +int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc, + struct btf *btf, + struct bpf_verifier_log *log) { struct bpf_struct_ops *st_ops = st_ops_desc->st_ops; const struct btf_member *member; @@ -161,7 +132,7 @@ static void bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc, sizeof(value_name)) { pr_warn("struct_ops name %s is too long\n", st_ops->name); - return; + return -EINVAL; } sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name); @@ -170,13 +141,13 @@ static void bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc, if (type_id < 0) { pr_warn("Cannot find struct %s in %s\n", st_ops->name, btf_get_name(btf)); - return; + return -EINVAL; } t = btf_type_by_id(btf, type_id); if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) { pr_warn("Cannot support #%u members in struct %s\n", btf_type_vlen(t), st_ops->name); - return; + return -EINVAL; } value_id = btf_find_by_name_kind(btf, value_name, @@ -184,10 +155,10 @@ static void bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc, if (value_id < 0) { pr_warn("Cannot find struct %s in %s\n", value_name, btf_get_name(btf)); - return; + return -EINVAL; } if (!is_valid_value_type(btf, value_id, t, value_name)) - return; + return -EINVAL; for_each_member(i, t, member) { const struct btf_type *func_proto; @@ -196,13 +167,13 @@ static void bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc, if (!*mname) { pr_warn("anon member in struct %s is not supported\n", st_ops->name); - break; + return -EOPNOTSUPP; } if (__btf_member_bitfield_size(t, member)) { pr_warn("bit field member %s in struct %s is not supported\n", mname, st_ops->name); - break; + return -EOPNOTSUPP; } func_proto = btf_type_resolve_func_ptr(btf, @@ -214,7 +185,7 @@ static void bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc, &st_ops->func_models[i])) { pr_warn("Error in parsing func ptr %s in struct %s\n", mname, st_ops->name); - break; + return -EINVAL; } } @@ -222,6 +193,7 @@ static void bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc, if (st_ops->init(btf)) { pr_warn("Error in init bpf_struct_ops %s\n", st_ops->name); + return -EINVAL; } else { st_ops_desc->type_id = type_id; st_ops_desc->type = t; @@ -230,54 +202,8 @@ static void bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc, value_id); } } -} -void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log) -{ - struct bpf_struct_ops_desc *st_ops_desc; - u32 i; - - /* Ensure BTF type is emitted for "struct bpf_struct_ops_##_name" */ -#define BPF_STRUCT_OPS_TYPE(_name) BTF_TYPE_EMIT(struct bpf_struct_ops_##_name); -#include "bpf_struct_ops_types.h" -#undef BPF_STRUCT_OPS_TYPE - - for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) { - st_ops_desc = &bpf_struct_ops[i]; - bpf_struct_ops_desc_init(st_ops_desc, btf, log); - } -} - -static const struct bpf_struct_ops_desc * -bpf_struct_ops_find_value(struct btf *btf, u32 value_id) -{ - unsigned int i; - - if (!value_id || !btf) - return NULL; - - for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) { - if (bpf_struct_ops[i].value_id == value_id) - return &bpf_struct_ops[i]; - } - - return NULL; -} - -const struct bpf_struct_ops_desc * -bpf_struct_ops_find(struct btf *btf, u32 type_id) -{ - unsigned int i; - - if (!type_id || !btf) - return NULL; - - for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) { - if (bpf_struct_ops[i].type_id == type_id) - return &bpf_struct_ops[i]; - } - - return NULL; + return 0; } static int bpf_struct_ops_map_get_next_key(struct bpf_map *map, void *key, diff --git a/kernel/bpf/bpf_struct_ops_types.h b/kernel/bpf/bpf_struct_ops_types.h deleted file mode 100644 index 5678a9ddf817..000000000000 --- a/kernel/bpf/bpf_struct_ops_types.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* internal file - do not include directly */ - -#ifdef CONFIG_BPF_JIT -#ifdef CONFIG_NET -BPF_STRUCT_OPS_TYPE(bpf_dummy_ops) -#endif -#ifdef CONFIG_INET -#include -BPF_STRUCT_OPS_TYPE(tcp_congestion_ops) -#endif -#endif diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 5401d2a0da82..d9b3f65cc6ea 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -5785,8 +5786,6 @@ struct btf *btf_parse_vmlinux(void) /* btf_parse_vmlinux() runs under bpf_verifier_lock */ bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]); - bpf_struct_ops_init(btf, log); - refcount_set(&btf->refcnt, 1); err = btf_alloc_id(btf); @@ -8632,11 +8631,13 @@ bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log, return !strncmp(reg_name, arg_name, cmp_len); } +#ifdef CONFIG_BPF_JIT static int -btf_add_struct_ops(struct btf *btf, struct bpf_struct_ops *st_ops) +btf_add_struct_ops(struct btf *btf, struct bpf_struct_ops *st_ops, + struct bpf_verifier_log *log) { struct btf_struct_ops_tab *tab, *new_tab; - int i; + int i, err; tab = btf->struct_ops_tab; if (!tab) { @@ -8666,11 +8667,88 @@ btf_add_struct_ops(struct btf *btf, struct bpf_struct_ops *st_ops) tab->ops[btf->struct_ops_tab->cnt].st_ops = st_ops; + err = bpf_struct_ops_desc_init(&tab->ops[btf->struct_ops_tab->cnt], btf, log); + if (err) + return err; + btf->struct_ops_tab->cnt++; return 0; } +const struct bpf_struct_ops_desc * +bpf_struct_ops_find_value(struct btf *btf, u32 value_id) +{ + const struct bpf_struct_ops_desc *st_ops_list; + unsigned int i; + u32 cnt; + + if (!value_id) + return NULL; + if (!btf->struct_ops_tab) + return NULL; + + cnt = btf->struct_ops_tab->cnt; + st_ops_list = btf->struct_ops_tab->ops; + for (i = 0; i < cnt; i++) { + if (st_ops_list[i].value_id == value_id) + return &st_ops_list[i]; + } + + return NULL; +} + +const struct bpf_struct_ops_desc * +bpf_struct_ops_find(struct btf *btf, u32 type_id) +{ + const struct bpf_struct_ops_desc *st_ops_list; + unsigned int i; + u32 cnt; + + if (!type_id) + return NULL; + if (!btf->struct_ops_tab) + return NULL; + + cnt = btf->struct_ops_tab->cnt; + st_ops_list = btf->struct_ops_tab->ops; + for (i = 0; i < cnt; i++) { + if (st_ops_list[i].type_id == type_id) + return &st_ops_list[i]; + } + + return NULL; +} + +int __register_bpf_struct_ops(struct bpf_struct_ops *st_ops) +{ + struct bpf_verifier_log *log; + struct btf *btf; + int err = 0; + + btf = btf_get_module_btf(st_ops->owner); + if (!btf) + return -EINVAL; + + log = kzalloc(sizeof(*log), GFP_KERNEL | __GFP_NOWARN); + if (!log) { + err = -ENOMEM; + goto errout; + } + + log->level = BPF_LOG_KERNEL; + + err = btf_add_struct_ops(btf, st_ops, log); + +errout: + kfree(log); + btf_put(btf); + + return err; +} +EXPORT_SYMBOL_GPL(__register_bpf_struct_ops); +#endif + bool btf_param_match_suffix(const struct btf *btf, const struct btf_param *arg, const char *suffix) diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c index 9b13ca02be4d..c931905ca4d5 100644 --- a/net/bpf/bpf_dummy_struct_ops.c +++ b/net/bpf/bpf_dummy_struct_ops.c @@ -7,7 +7,7 @@ #include #include -extern struct bpf_struct_ops bpf_bpf_dummy_ops; +static struct bpf_struct_ops bpf_bpf_dummy_ops; /* A common type for test_N with return value in bpf_dummy_ops */ typedef int (*dummy_ops_test_ret_fn)(struct bpf_dummy_ops_state *state, ...); @@ -257,7 +257,7 @@ static struct bpf_dummy_ops __bpf_bpf_dummy_ops = { .test_sleepable = bpf_dummy_test_sleepable, }; -struct bpf_struct_ops bpf_bpf_dummy_ops = { +static struct bpf_struct_ops bpf_bpf_dummy_ops = { .verifier_ops = &bpf_dummy_verifier_ops, .init = bpf_dummy_init, .check_member = bpf_dummy_ops_check_member, @@ -266,4 +266,11 @@ struct bpf_struct_ops bpf_bpf_dummy_ops = { .unreg = bpf_dummy_unreg, .name = "bpf_dummy_ops", .cfi_stubs = &__bpf_bpf_dummy_ops, + .owner = THIS_MODULE, }; + +static int __init bpf_dummy_struct_ops_init(void) +{ + return register_bpf_struct_ops(&bpf_bpf_dummy_ops, bpf_dummy_ops); +} +late_initcall(bpf_dummy_struct_ops_init); diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c index cf97f4997d5e..5d83d7b058fa 100644 --- a/net/ipv4/bpf_tcp_ca.c +++ b/net/ipv4/bpf_tcp_ca.c @@ -12,7 +12,7 @@ #include /* "extern" is to avoid sparse warning. It is only used in bpf_struct_ops.c. */ -extern struct bpf_struct_ops bpf_tcp_congestion_ops; +static struct bpf_struct_ops bpf_tcp_congestion_ops; static u32 unsupported_ops[] = { offsetof(struct tcp_congestion_ops, get_info), @@ -345,7 +345,7 @@ static struct tcp_congestion_ops __bpf_ops_tcp_congestion_ops = { .release = __bpf_tcp_ca_release, }; -struct bpf_struct_ops bpf_tcp_congestion_ops = { +static struct bpf_struct_ops bpf_tcp_congestion_ops = { .verifier_ops = &bpf_tcp_ca_verifier_ops, .reg = bpf_tcp_ca_reg, .unreg = bpf_tcp_ca_unreg, @@ -356,10 +356,16 @@ struct bpf_struct_ops bpf_tcp_congestion_ops = { .validate = bpf_tcp_ca_validate, .name = "tcp_congestion_ops", .cfi_stubs = &__bpf_ops_tcp_congestion_ops, + .owner = THIS_MODULE, }; static int __init bpf_tcp_ca_kfunc_init(void) { - return register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_tcp_ca_kfunc_set); + int ret; + + ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_tcp_ca_kfunc_set); + ret = ret ?: register_bpf_struct_ops(&bpf_tcp_congestion_ops, tcp_congestion_ops); + + return ret; } late_initcall(bpf_tcp_ca_kfunc_init); -- Gitee From d6d6c50b2078b5849268002afa178e4051c22e78 Mon Sep 17 00:00:00 2001 From: Kui-Feng Lee Date: Fri, 19 Jan 2024 14:50:03 -0800 Subject: [PATCH 2093/2138] libbpf: Find correct module BTFs for struct_ops maps and progs. ANBZ: #13548 commit 9e926acda0c2e21bca431a1818665ddcd6939755 upstream. Locate the module BTFs for struct_ops maps and progs and pass them to the kernel. This ensures that the kernel correctly resolves type IDs from the appropriate module BTFs. For the map of a struct_ops object, the FD of the module BTF is set to bpf_map to keep a reference to the module BTF. The FD is passed to the kernel as value_type_btf_obj_fd when the struct_ops object is loaded. For a bpf_struct_ops prog, attach_btf_obj_fd of bpf_prog is the FD of a module BTF in the kernel. Signed-off-by: Kui-Feng Lee Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20240119225005.668602-13-thinker.li@gmail.com Signed-off-by: Martin KaFai Lau Signed-off-by: Tianchen Ding Reviewed-by: Yuanhe Shu Link: https://gitee.com/anolis/cloud-kernel/pulls/4504 --- tools/lib/bpf/bpf.c | 4 +++- tools/lib/bpf/bpf.h | 4 +++- tools/lib/bpf/libbpf.c | 41 ++++++++++++++++++++++++++--------- tools/lib/bpf/libbpf_probes.c | 1 + 4 files changed, 38 insertions(+), 12 deletions(-) diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index b0f1913763a3..ee18aea4a7b5 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -169,7 +169,8 @@ int bpf_map_create(enum bpf_map_type map_type, __u32 max_entries, const struct bpf_map_create_opts *opts) { - const size_t attr_sz = offsetofend(union bpf_attr, map_extra); + const size_t attr_sz = offsetofend(union bpf_attr, + value_type_btf_obj_fd); union bpf_attr attr; int fd; @@ -191,6 +192,7 @@ int bpf_map_create(enum bpf_map_type map_type, attr.btf_key_type_id = OPTS_GET(opts, btf_key_type_id, 0); attr.btf_value_type_id = OPTS_GET(opts, btf_value_type_id, 0); attr.btf_vmlinux_value_type_id = OPTS_GET(opts, btf_vmlinux_value_type_id, 0); + attr.value_type_btf_obj_fd = OPTS_GET(opts, value_type_btf_obj_fd, -1); attr.inner_map_fd = OPTS_GET(opts, inner_map_fd, 0); attr.map_flags = OPTS_GET(opts, map_flags, 0); diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 107fef748868..db0ff8ade19a 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -51,8 +51,10 @@ struct bpf_map_create_opts { __u32 numa_node; __u32 map_ifindex; + __s32 value_type_btf_obj_fd; + size_t:0; }; -#define bpf_map_create_opts__last_field map_ifindex +#define bpf_map_create_opts__last_field value_type_btf_obj_fd LIBBPF_API int bpf_map_create(enum bpf_map_type map_type, const char *map_name, diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 2fad178949ef..7f78edee5a43 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -518,6 +518,7 @@ struct bpf_map { struct bpf_map_def def; __u32 numa_node; __u32 btf_var_idx; + int mod_btf_fd; __u32 btf_key_type_id; __u32 btf_value_type_id; __u32 btf_vmlinux_value_type_id; @@ -918,22 +919,29 @@ find_member_by_name(const struct btf *btf, const struct btf_type *t, return NULL; } +static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name, + __u16 kind, struct btf **res_btf, + struct module_btf **res_mod_btf); + #define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_" static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix, const char *name, __u32 kind); static int -find_struct_ops_kern_types(const struct btf *btf, const char *tname, +find_struct_ops_kern_types(struct bpf_object *obj, const char *tname, + struct module_btf **mod_btf, const struct btf_type **type, __u32 *type_id, const struct btf_type **vtype, __u32 *vtype_id, const struct btf_member **data_member) { const struct btf_type *kern_type, *kern_vtype; const struct btf_member *kern_data_member; + struct btf *btf; __s32 kern_vtype_id, kern_type_id; __u32 i; - kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT); + kern_type_id = find_ksym_btf_id(obj, tname, BTF_KIND_STRUCT, + &btf, mod_btf); if (kern_type_id < 0) { pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n", tname); @@ -987,14 +995,16 @@ static bool bpf_map__is_struct_ops(const struct bpf_map *map) } /* Init the map's fields that depend on kern_btf */ -static int bpf_map__init_kern_struct_ops(struct bpf_map *map, - const struct btf *btf, - const struct btf *kern_btf) +static int bpf_map__init_kern_struct_ops(struct bpf_map *map) { const struct btf_member *member, *kern_member, *kern_data_member; const struct btf_type *type, *kern_type, *kern_vtype; __u32 i, kern_type_id, kern_vtype_id, kern_data_off; + struct bpf_object *obj = map->obj; + const struct btf *btf = obj->btf; struct bpf_struct_ops *st_ops; + const struct btf *kern_btf; + struct module_btf *mod_btf; void *data, *kern_data; const char *tname; int err; @@ -1002,16 +1012,19 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map, st_ops = map->st_ops; type = st_ops->type; tname = st_ops->tname; - err = find_struct_ops_kern_types(kern_btf, tname, + err = find_struct_ops_kern_types(obj, tname, &mod_btf, &kern_type, &kern_type_id, &kern_vtype, &kern_vtype_id, &kern_data_member); if (err) return err; + kern_btf = mod_btf ? mod_btf->btf : obj->btf_vmlinux; + pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n", map->name, st_ops->type_id, kern_type_id, kern_vtype_id); + map->mod_btf_fd = mod_btf ? mod_btf->fd : -1; map->def.value_size = kern_vtype->size; map->btf_vmlinux_value_type_id = kern_vtype_id; @@ -1087,6 +1100,8 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map, return -ENOTSUP; } + if (mod_btf) + prog->attach_btf_obj_fd = mod_btf->fd; prog->attach_btf_id = kern_type_id; prog->expected_attach_type = kern_member_idx; @@ -1129,8 +1144,7 @@ static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj) if (!bpf_map__is_struct_ops(map)) continue; - err = bpf_map__init_kern_struct_ops(map, obj->btf, - obj->btf_vmlinux); + err = bpf_map__init_kern_struct_ops(map); if (err) return err; } @@ -5111,8 +5125,13 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b create_attr.numa_node = map->numa_node; create_attr.map_extra = map->map_extra; - if (bpf_map__is_struct_ops(map)) + if (bpf_map__is_struct_ops(map)) { create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; + if (map->mod_btf_fd >= 0) { + create_attr.value_type_btf_obj_fd = map->mod_btf_fd; + create_attr.map_flags |= BPF_F_VTYPE_BTF_OBJ_FD; + } + } if (obj->btf && btf__fd(obj->btf) >= 0) { create_attr.btf_fd = btf__fd(obj->btf); @@ -9423,7 +9442,9 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attac *btf_obj_fd = 0; *btf_type_id = 1; } else { - err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id); + err = find_kernel_btf_id(prog->obj, attach_name, + attach_type, btf_obj_fd, + btf_type_id); } if (err) { pr_warn("prog '%s': failed to find kernel BTF type ID of '%s': %d\n", diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c index 9c4db90b92b6..98373d126d9d 100644 --- a/tools/lib/bpf/libbpf_probes.c +++ b/tools/lib/bpf/libbpf_probes.c @@ -326,6 +326,7 @@ static int probe_map_create(enum bpf_map_type map_type) case BPF_MAP_TYPE_STRUCT_OPS: /* we'll get -ENOTSUPP for invalid BTF type ID for struct_ops */ opts.btf_vmlinux_value_type_id = 1; + opts.value_type_btf_obj_fd = -1; exp_err = -524; /* -ENOTSUPP */ break; case BPF_MAP_TYPE_BLOOM_FILTER: -- Gitee From 2a1ba64d519de1875f22a380899a8fc2684b30e4 Mon Sep 17 00:00:00 2001 From: Kui-Feng Lee Date: Fri, 19 Jan 2024 14:50:04 -0800 Subject: [PATCH 2094/2138] bpf: export btf_ctx_access to modules. ANBZ: #13548 commit 7c81c2490c73e614c6d48e4f339f4f224140b565 upstream. The module requires the use of btf_ctx_access() to invoke bpf_tracing_btf_ctx_access() from a module. This function is valuable for implementing validation functions that ensure proper access to ctx. Signed-off-by: Kui-Feng Lee Link: https://lore.kernel.org/r/20240119225005.668602-14-thinker.li@gmail.com Signed-off-by: Martin KaFai Lau Signed-off-by: Tianchen Ding Reviewed-by: Yuanhe Shu Link: https://gitee.com/anolis/cloud-kernel/pulls/4504 --- kernel/bpf/btf.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index d9b3f65cc6ea..a2c56951a747 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -6144,6 +6144,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type, __btf_name_by_offset(btf, t->name_off)); return true; } +EXPORT_SYMBOL_GPL(btf_ctx_access); enum bpf_struct_walk_result { /* < 0 error */ -- Gitee From 42df967f462f1ef69435e8901274cc7266c3e0d9 Mon Sep 17 00:00:00 2001 From: Kui-Feng Lee Date: Fri, 19 Jan 2024 14:50:05 -0800 Subject: [PATCH 2095/2138] selftests/bpf: test case for register_bpf_struct_ops(). ANBZ: #13548 commit 0253e0590e2dc46996534371d56b5297099aed4e upstream. Create a new struct_ops type called bpf_testmod_ops within the bpf_testmod module. When a struct_ops object is registered, the bpf_testmod module will invoke test_2 from the module. Signed-off-by: Kui-Feng Lee Link: https://lore.kernel.org/r/20240119225005.668602-15-thinker.li@gmail.com Signed-off-by: Martin KaFai Lau Signed-off-by: Tianchen Ding Reviewed-by: Yuanhe Shu Link: https://gitee.com/anolis/cloud-kernel/pulls/4504 --- .../selftests/bpf/bpf_testmod/bpf_testmod.c | 66 +++++++++++++++++ .../selftests/bpf/bpf_testmod/bpf_testmod.h | 5 ++ .../bpf/prog_tests/test_struct_ops_module.c | 74 +++++++++++++++++++ .../selftests/bpf/progs/struct_ops_module.c | 29 ++++++++ 4 files changed, 174 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c create mode 100644 tools/testing/selftests/bpf/progs/struct_ops_module.c diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index 4006583b8bd9..77527d866b9f 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ +#include #include #include #include @@ -518,11 +519,75 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg) BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset) BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids) +static int bpf_testmod_ops_init(struct btf *btf) +{ + return 0; +} + +static bool bpf_testmod_ops_is_valid_access(int off, int size, + enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info) +{ + return bpf_tracing_btf_ctx_access(off, size, type, prog, info); +} + +static int bpf_testmod_ops_init_member(const struct btf_type *t, + const struct btf_member *member, + void *kdata, const void *udata) +{ + return 0; +} + static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = { .owner = THIS_MODULE, .set = &bpf_testmod_check_kfunc_ids, }; +static const struct bpf_verifier_ops bpf_testmod_verifier_ops = { + .is_valid_access = bpf_testmod_ops_is_valid_access, +}; + +static int bpf_dummy_reg(void *kdata) +{ + struct bpf_testmod_ops *ops = kdata; + int r; + + r = ops->test_2(4, 3); + + return 0; +} + +static void bpf_dummy_unreg(void *kdata) +{ +} + +static int bpf_testmod_test_1(void) +{ + return 0; +} + +static int bpf_testmod_test_2(int a, int b) +{ + return 0; +} + +static struct bpf_testmod_ops __bpf_testmod_ops = { + .test_1 = bpf_testmod_test_1, + .test_2 = bpf_testmod_test_2, +}; + +struct bpf_struct_ops bpf_bpf_testmod_ops = { + .verifier_ops = &bpf_testmod_verifier_ops, + .init = bpf_testmod_ops_init, + .init_member = bpf_testmod_ops_init_member, + .reg = bpf_dummy_reg, + .unreg = bpf_dummy_unreg, + .cfi_stubs = &__bpf_testmod_ops, + .name = "bpf_testmod_ops", + .owner = THIS_MODULE, +}; + extern int bpf_fentry_test1(int a); static int bpf_testmod_init(void) @@ -533,6 +598,7 @@ static int bpf_testmod_init(void) ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set); + ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops); if (ret < 0) return ret; if (bpf_fentry_test1(0) < 0) diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h index f32793efe095..ca5435751c79 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h @@ -28,4 +28,9 @@ struct bpf_iter_testmod_seq { int cnt; }; +struct bpf_testmod_ops { + int (*test_1)(void); + int (*test_2)(int a, int b); +}; + #endif /* _BPF_TESTMOD_H */ diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c new file mode 100644 index 000000000000..ae98a48775ec --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include +#include + +#include "struct_ops_module.skel.h" + +static void check_map_info(struct bpf_map_info *info) +{ + struct bpf_btf_info btf_info; + char btf_name[256]; + u32 btf_info_len = sizeof(btf_info); + int err, fd; + + fd = bpf_btf_get_fd_by_id(info->btf_vmlinux_id); + if (!ASSERT_GE(fd, 0, "get_value_type_btf_obj_fd")) + return; + + memset(&btf_info, 0, sizeof(btf_info)); + btf_info.name = ptr_to_u64(btf_name); + btf_info.name_len = sizeof(btf_name); + err = bpf_btf_get_info_by_fd(fd, &btf_info, &btf_info_len); + if (!ASSERT_OK(err, "get_value_type_btf_obj_info")) + goto cleanup; + + if (!ASSERT_EQ(strcmp(btf_name, "bpf_testmod"), 0, "get_value_type_btf_obj_name")) + goto cleanup; + +cleanup: + close(fd); +} + +static void test_struct_ops_load(void) +{ + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts); + struct struct_ops_module *skel; + struct bpf_map_info info = {}; + struct bpf_link *link; + int err; + u32 len; + + skel = struct_ops_module__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "struct_ops_module_open")) + return; + + err = struct_ops_module__load(skel); + if (!ASSERT_OK(err, "struct_ops_module_load")) + goto cleanup; + + len = sizeof(info); + err = bpf_map_get_info_by_fd(bpf_map__fd(skel->maps.testmod_1), &info, + &len); + if (!ASSERT_OK(err, "bpf_map_get_info_by_fd")) + goto cleanup; + + link = bpf_map__attach_struct_ops(skel->maps.testmod_1); + ASSERT_OK_PTR(link, "attach_test_mod_1"); + + /* test_2() will be called from bpf_dummy_reg() in bpf_testmod.c */ + ASSERT_EQ(skel->bss->test_2_result, 7, "test_2_result"); + + bpf_link__destroy(link); + + check_map_info(&info); + +cleanup: + struct_ops_module__destroy(skel); +} + +void serial_test_struct_ops_module(void) +{ + if (test__start_subtest("test_struct_ops_load")) + test_struct_ops_load(); +} diff --git a/tools/testing/selftests/bpf/progs/struct_ops_module.c b/tools/testing/selftests/bpf/progs/struct_ops_module.c new file mode 100644 index 000000000000..5a411db986cd --- /dev/null +++ b/tools/testing/selftests/bpf/progs/struct_ops_module.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include +#include +#include +#include "../bpf_testmod/bpf_testmod.h" + +char _license[] SEC("license") = "GPL"; + +int test_2_result = 0; + +SEC("struct_ops/test_1") +int BPF_PROG(test_1) +{ + return 0xdeadbeef; +} + +SEC("struct_ops/test_2") +int BPF_PROG(test_2, int a, int b) +{ + test_2_result = a + b; + return a + b; +} + +SEC(".struct_ops.link") +struct bpf_testmod_ops testmod_1 = { + .test_1 = (void *)test_1, + .test_2 = (void *)test_2, +}; -- Gitee From ec9bc6f5054b14653ef65ce4fbca9da3553f4134 Mon Sep 17 00:00:00 2001 From: Kui-Feng Lee Date: Thu, 25 Jan 2024 18:31:13 -0800 Subject: [PATCH 2096/2138] bpf: Fix error checks against bpf_get_btf_vmlinux(). ANBZ: #13548 commit e6be8cd5d3cf54ccd0ae66027d6f4697b15f4c3e upstream. In bpf_struct_ops_map_alloc, it needs to check for NULL in the returned pointer of bpf_get_btf_vmlinux() when CONFIG_DEBUG_INFO_BTF is not set. ENOTSUPP is used to preserve the same behavior before the struct_ops kmod support. In the function check_struct_ops_btf_id(), instead of redoing the bpf_get_btf_vmlinux() that has already been done in syscall.c, the fix here is to check for prog->aux->attach_btf_id. BPF_PROG_TYPE_STRUCT_OPS must require attach_btf_id and syscall.c guarantees a valid attach_btf as long as attach_btf_id is set. When attach_btf_id is not set, this patch returns -ENOTSUPP because it is what the selftest in test_libbpf_probe_prog_types() and libbpf_probes.c are expecting for feature probing purpose. Changes from v1: - Remove an unnecessary NULL check in check_struct_ops_btf_id() Reported-by: syzbot+88f0aafe5f950d7489d7@syzkaller.appspotmail.com Closes: https://lore.kernel.org/bpf/00000000000040d68a060fc8db8c@google.com/ Reported-by: syzbot+1336f3d4b10bcda75b89@syzkaller.appspotmail.com Closes: https://lore.kernel.org/bpf/00000000000026353b060fc21c07@google.com/ Fixes: fcc2c1fb0651 ("bpf: pass attached BTF to the bpf_struct_ops subsystem") Signed-off-by: Kui-Feng Lee Link: https://lore.kernel.org/r/20240126023113.1379504-1-thinker.li@gmail.com Signed-off-by: Martin KaFai Lau Signed-off-by: Tianchen Ding Reviewed-by: Yuanhe Shu Link: https://gitee.com/anolis/cloud-kernel/pulls/4504 --- kernel/bpf/bpf_struct_ops.c | 2 ++ kernel/bpf/verifier.c | 5 ++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index d2e43f2a244e..b354c3883e75 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -664,6 +664,8 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) btf = bpf_get_btf_vmlinux(); if (IS_ERR(btf)) return ERR_CAST(btf); + if (!btf) + return ERR_PTR(-ENOTSUPP); } st_ops_desc = bpf_struct_ops_find_value(btf, attr->btf_vmlinux_value_type_id); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index c9e8919f6abd..4948ef414d67 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -19739,7 +19739,10 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env) return -EINVAL; } - btf = prog->aux->attach_btf ?: bpf_get_btf_vmlinux(); + if (!prog->aux->attach_btf_id) + return -ENOTSUPP; + + btf = prog->aux->attach_btf; if (btf_is_module(btf)) { /* Make sure st_ops is valid through the lifetime of env */ env->attach_btf_mod = btf_try_get_module(btf); -- Gitee From 18bc341ed0dca992b9f101a92d4def5bc53758ab Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Thu, 8 Feb 2024 14:24:21 +0800 Subject: [PATCH 2097/2138] bpf, btf: Fix return value of register_btf_id_dtor_kfuncs ANBZ: #13548 commit b9a395f0f7af66fe8224450481b99d4f83b57207 upstream. The same as __register_btf_kfunc_id_set(), to let the modules with stripped btf section loaded, this patch changes the return value of register_btf_id_dtor_kfuncs() too from -ENOENT to 0 when btf is NULL. Signed-off-by: Geliang Tang Link: https://lore.kernel.org/r/eab65586d7fb0e72f2707d3747c7d4a5d60c823f.1707373307.git.tanggeliang@kylinos.cn Signed-off-by: Martin KaFai Lau Signed-off-by: Tianchen Ding Reviewed-by: Yuanhe Shu Link: https://gitee.com/anolis/cloud-kernel/pulls/4504 --- kernel/bpf/btf.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index a2c56951a747..a99c8c321f27 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -8039,10 +8039,8 @@ int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_c pr_err("missing vmlinux BTF, cannot register dtor kfuncs\n"); return -ENOENT; } - if (owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) { - pr_err("missing module BTF, cannot register dtor kfuncs\n"); - return -ENOENT; - } + if (owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) + pr_warn("missing module BTF, cannot register dtor kfuncs\n"); return 0; } if (IS_ERR(btf)) -- Gitee From c633115ded4439f2409c1a128935ebf46d656121 Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Thu, 8 Feb 2024 14:24:22 +0800 Subject: [PATCH 2098/2138] bpf, btf: Add check_btf_kconfigs helper ANBZ: #13548 commit 9e60b0e02550aaf5f2301e49353641a5e3701674 upstream. This patch extracts duplicate code on error path when btf_get_module_btf() returns NULL from the functions __register_btf_kfunc_id_set() and register_btf_id_dtor_kfuncs() into a new helper named check_btf_kconfigs() to check CONFIG_DEBUG_INFO_BTF and CONFIG_DEBUG_INFO_BTF_MODULES in it. Signed-off-by: Geliang Tang Acked-by: Jiri Olsa Link: https://lore.kernel.org/r/fa5537fc55f1e4d0bfd686598c81b7ab9dbd82b7.1707373307.git.tanggeliang@kylinos.cn Signed-off-by: Martin KaFai Lau Signed-off-by: Tianchen Ding Reviewed-by: Yuanhe Shu Link: https://gitee.com/anolis/cloud-kernel/pulls/4504 --- kernel/bpf/btf.c | 33 +++++++++++++++------------------ 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index a99c8c321f27..1189ace95b94 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -7558,6 +7558,17 @@ static struct btf *btf_get_module_btf(const struct module *module) return btf; } +static int check_btf_kconfigs(const struct module *module, const char *feature) +{ + if (!module && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) { + pr_err("missing vmlinux BTF, cannot register %s\n", feature); + return -ENOENT; + } + if (module && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) + pr_warn("missing module BTF, cannot register %s\n", feature); + return 0; +} + BPF_CALL_4(bpf_btf_find_by_name_kind, char *, name, int, name_sz, u32, kind, int, flags) { struct btf *btf = NULL; @@ -7918,15 +7929,8 @@ static int __register_btf_kfunc_id_set(enum btf_kfunc_hook hook, int ret, i; btf = btf_get_module_btf(kset->owner); - if (!btf) { - if (!kset->owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) { - pr_err("missing vmlinux BTF, cannot register kfuncs\n"); - return -ENOENT; - } - if (kset->owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) - pr_warn("missing module BTF, cannot register kfuncs\n"); - return 0; - } + if (!btf) + return check_btf_kconfigs(kset->owner, "kfunc"); if (IS_ERR(btf)) return PTR_ERR(btf); @@ -8034,15 +8038,8 @@ int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_c int ret; btf = btf_get_module_btf(owner); - if (!btf) { - if (!owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) { - pr_err("missing vmlinux BTF, cannot register dtor kfuncs\n"); - return -ENOENT; - } - if (owner && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) - pr_warn("missing module BTF, cannot register dtor kfuncs\n"); - return 0; - } + if (!btf) + return check_btf_kconfigs(owner, "dtor kfuncs"); if (IS_ERR(btf)) return PTR_ERR(btf); -- Gitee From 4992b41f9e12f54271c9de760449e3d362a80d31 Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Thu, 8 Feb 2024 14:24:23 +0800 Subject: [PATCH 2099/2138] bpf, btf: Check btf for register_bpf_struct_ops ANBZ: #13548 commit 947e56f82fd783a1ec1c9359b20b5699d09cae14 upstream. Similar to the handling in the functions __register_btf_kfunc_id_set() and register_btf_id_dtor_kfuncs(), this patch uses the newly added helper check_btf_kconfigs() to handle module with its btf section stripped. While at it, the patch also adds the missed IS_ERR() check to fix the commit f6be98d19985 ("bpf, net: switch to dynamic registration") Fixes: f6be98d19985 ("bpf, net: switch to dynamic registration") Signed-off-by: Geliang Tang Link: https://lore.kernel.org/r/69082b9835463fe36f9e354bddf2d0a97df39c2b.1707373307.git.tanggeliang@kylinos.cn Signed-off-by: Martin KaFai Lau Signed-off-by: Tianchen Ding Reviewed-by: Yuanhe Shu Link: https://gitee.com/anolis/cloud-kernel/pulls/4504 --- kernel/bpf/btf.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 1189ace95b94..0f80f38cd4f7 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -8724,7 +8724,9 @@ int __register_bpf_struct_ops(struct bpf_struct_ops *st_ops) btf = btf_get_module_btf(st_ops->owner); if (!btf) - return -EINVAL; + return check_btf_kconfigs(st_ops->owner, "struct_ops"); + if (IS_ERR(btf)) + return PTR_ERR(btf); log = kzalloc(sizeof(*log), GFP_KERNEL | __GFP_NOWARN); if (!log) { -- Gitee From 9a7f0c31558243196e181f0ce60ee6b6673e9af1 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 24 Jan 2024 14:44:18 -0800 Subject: [PATCH 2100/2138] libbpf: Ensure undefined bpf_attr field stays 0 ANBZ: #13548 commit c9f115564561af63db662791e9a35fcf1dfefd2a upstream. The commit 9e926acda0c2 ("libbpf: Find correct module BTFs for struct_ops maps and progs.") sets a newly added field (value_type_btf_obj_fd) to -1 in libbpf when the caller of the libbpf's bpf_map_create did not define this field by passing a NULL "opts" or passing in a "opts" that does not cover this new field. OPT_HAS(opts, field) is used to decide if the field is defined or not: ((opts) && opts->sz >= offsetofend(typeof(*(opts)), field)) Once OPTS_HAS decided the field is not defined, that field should be set to 0. For this particular new field (value_type_btf_obj_fd), its corresponding map_flags "BPF_F_VTYPE_BTF_OBJ_FD" is not set. Thus, the kernel does not treat it as an fd field. Fixes: 9e926acda0c2 ("libbpf: Find correct module BTFs for struct_ops maps and progs.") Reported-by: Andrii Nakryiko Signed-off-by: Martin KaFai Lau Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20240124224418.2905133-1-martin.lau@linux.dev Signed-off-by: Alexei Starovoitov Signed-off-by: Tianchen Ding Reviewed-by: Yuanhe Shu Link: https://gitee.com/anolis/cloud-kernel/pulls/4504 --- tools/lib/bpf/bpf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index ee18aea4a7b5..af46488e4ea9 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -192,7 +192,7 @@ int bpf_map_create(enum bpf_map_type map_type, attr.btf_key_type_id = OPTS_GET(opts, btf_key_type_id, 0); attr.btf_value_type_id = OPTS_GET(opts, btf_value_type_id, 0); attr.btf_vmlinux_value_type_id = OPTS_GET(opts, btf_vmlinux_value_type_id, 0); - attr.value_type_btf_obj_fd = OPTS_GET(opts, value_type_btf_obj_fd, -1); + attr.value_type_btf_obj_fd = OPTS_GET(opts, value_type_btf_obj_fd, 0); attr.inner_map_fd = OPTS_GET(opts, inner_map_fd, 0); attr.map_flags = OPTS_GET(opts, map_flags, 0); -- Gitee From 50ca52808c8f05bed278775dd82c7cea83d9cb72 Mon Sep 17 00:00:00 2001 From: Ankit Agrawal Date: Sat, 24 Feb 2024 20:35:43 +0530 Subject: [PATCH 2101/2138] KVM: arm64: Introduce new flag for non-cacheable IO memory ANBZ: #13559 commit c034ec84e8795cf379bd47cc8871445f070a0110 upstream. Currently, KVM for ARM64 maps at stage 2 memory that is considered device (i.e. it is not RAM) with DEVICE_nGnRE memory attributes; this setting overrides (as per the ARM architecture [1]) any device MMIO mapping present at stage 1, resulting in a set-up whereby a guest operating system cannot determine device MMIO mapping memory attributes on its own but it is always overridden by the KVM stage 2 default. This set-up does not allow guest operating systems to select device memory attributes independently from KVM stage-2 mappings (refer to [1], "Combining stage 1 and stage 2 memory type attributes"), which turns out to be an issue in that guest operating systems (e.g. Linux) may request to map devices MMIO regions with memory attributes that guarantee better performance (e.g. gathering attribute - that for some devices can generate larger PCIe memory writes TLPs) and specific operations (e.g. unaligned transactions) such as the NormalNC memory type. The default device stage 2 mapping was chosen in KVM for ARM64 since it was considered safer (i.e. it would not allow guests to trigger uncontained failures ultimately crashing the machine) but this turned out to be asynchronous (SError) defeating the purpose. Failures containability is a property of the platform and is independent from the memory type used for MMIO device memory mappings. Actually, DEVICE_nGnRE memory type is even more problematic than Normal-NC memory type in terms of faults containability in that e.g. aborts triggered on DEVICE_nGnRE loads cannot be made, architecturally, synchronous (i.e. that would imply that the processor should issue at most 1 load transaction at a time - it cannot pipeline them - otherwise the synchronous abort semantics would break the no-speculation attribute attached to DEVICE_XXX memory). This means that regardless of the combined stage1+stage2 mappings a platform is safe if and only if device transactions cannot trigger uncontained failures and that in turn relies on platform capabilities and the device type being assigned (i.e. PCIe AER/DPC error containment and RAS architecture[3]); therefore the default KVM device stage 2 memory attributes play no role in making device assignment safer for a given platform (if the platform design adheres to design guidelines outlined in [3]) and therefore can be relaxed. For all these reasons, relax the KVM stage 2 device memory attributes from DEVICE_nGnRE to Normal-NC. The NormalNC was chosen over a different Normal memory type default at stage-2 (e.g. Normal Write-through) to avoid cache allocation/snooping. Relaxing S2 KVM device MMIO mappings to Normal-NC is not expected to trigger any issue on guest device reclaim use cases either (i.e. device MMIO unmap followed by a device reset) at least for PCIe devices, in that in PCIe a device reset is architected and carried out through PCI config space transactions that are naturally ordered with respect to MMIO transactions according to the PCI ordering rules. Having Normal-NC S2 default puts guests in control (thanks to stage1+stage2 combined memory attributes rules [1]) of device MMIO regions memory mappings, according to the rules described in [1] and summarized here ([(S1) - stage1], [(S2) - stage 2]): S1 | S2 | Result NORMAL-WB | NORMAL-NC | NORMAL-NC NORMAL-WT | NORMAL-NC | NORMAL-NC NORMAL-NC | NORMAL-NC | NORMAL-NC DEVICE | NORMAL-NC | DEVICE It is worth noting that currently, to map devices MMIO space to user space in a device pass-through use case the VFIO framework applies memory attributes derived from pgprot_noncached() settings applied to VMAs, which result in device-nGnRnE memory attributes for the stage-1 VMM mappings. This means that a userspace mapping for device MMIO space carried out with the current VFIO framework and a guest OS mapping for the same MMIO space may result in a mismatched alias as described in [2]. Defaulting KVM device stage-2 mappings to Normal-NC attributes does not change anything in this respect, in that the mismatched aliases would only affect (refer to [2] for a detailed explanation) ordering between the userspace and GuestOS mappings resulting stream of transactions (i.e. it does not cause loss of property for either stream of transactions on its own), which is harmless given that the userspace and GuestOS access to the device is carried out through independent transactions streams. A Normal-NC flag is not present today. So add a new kvm_pgtable_prot (KVM_PGTABLE_PROT_NORMAL_NC) flag for it, along with its corresponding PTE value 0x5 (0b101) determined from [1]. Lastly, adapt the stage2 PTE property setter function (stage2_set_prot_attr) to handle the NormalNC attribute. The entire discussion leading to this patch series may be followed through the following links. Link: https://lore.kernel.org/all/20230907181459.18145-3-ankita@nvidia.com Link: https://lore.kernel.org/r/20231205033015.10044-1-ankita@nvidia.com [1] section D8.5.5 - DDI0487J_a_a-profile_architecture_reference_manual.pdf [2] section B2.8 - DDI0487J_a_a-profile_architecture_reference_manual.pdf [3] sections 1.7.7.3/1.8.5.2/appendix C - DEN0029H_SBSA_7.1.pdf Suggested-by: Jason Gunthorpe Acked-by: Catalin Marinas Acked-by: Will Deacon Reviewed-by: Marc Zyngier Signed-off-by: Ankit Agrawal Link: https://lore.kernel.org/r/20240224150546.368-2-ankita@nvidia.com Signed-off-by: Oliver Upton Signed-off-by: Qinyun Tan Reviewed-by: Guanghui Feng Link: https://gitee.com/anolis/cloud-kernel/pulls/4503 --- arch/arm64/include/asm/kvm_pgtable.h | 2 ++ arch/arm64/include/asm/memory.h | 2 ++ arch/arm64/kvm/hyp/pgtable.c | 24 +++++++++++++++++++----- 3 files changed, 23 insertions(+), 5 deletions(-) diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index d3e354bb8351..de0b8845df7a 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -169,6 +169,7 @@ enum kvm_pgtable_stage2_flags { * @KVM_PGTABLE_PROT_W: Write permission. * @KVM_PGTABLE_PROT_R: Read permission. * @KVM_PGTABLE_PROT_DEVICE: Device attributes. + * @KVM_PGTABLE_PROT_NORMAL_NC: Normal noncacheable attributes. * @KVM_PGTABLE_PROT_SW0: Software bit 0. * @KVM_PGTABLE_PROT_SW1: Software bit 1. * @KVM_PGTABLE_PROT_SW2: Software bit 2. @@ -180,6 +181,7 @@ enum kvm_pgtable_prot { KVM_PGTABLE_PROT_R = BIT(2), KVM_PGTABLE_PROT_DEVICE = BIT(3), + KVM_PGTABLE_PROT_NORMAL_NC = BIT(4), KVM_PGTABLE_PROT_SW0 = BIT(55), KVM_PGTABLE_PROT_SW1 = BIT(56), diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index fde4186cc387..c247e5f29d5a 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -147,6 +147,7 @@ * Memory types for Stage-2 translation */ #define MT_S2_NORMAL 0xf +#define MT_S2_NORMAL_NC 0x5 #define MT_S2_DEVICE_nGnRE 0x1 /* @@ -154,6 +155,7 @@ * Stage-2 enforces Normal-WB and Device-nGnRE */ #define MT_S2_FWB_NORMAL 6 +#define MT_S2_FWB_NORMAL_NC 5 #define MT_S2_FWB_DEVICE_nGnRE 1 #ifdef CONFIG_ARM64_4K_PAGES diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index ca0bf0b92ca0..b0586d79d2e0 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -695,15 +695,29 @@ void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot, kvm_pte_t *ptep) { - bool device = prot & KVM_PGTABLE_PROT_DEVICE; - kvm_pte_t attr = device ? KVM_S2_MEMATTR(pgt, DEVICE_nGnRE) : - KVM_S2_MEMATTR(pgt, NORMAL); + kvm_pte_t attr; u32 sh = KVM_PTE_LEAF_ATTR_LO_S2_SH_IS; + switch (prot & (KVM_PGTABLE_PROT_DEVICE | + KVM_PGTABLE_PROT_NORMAL_NC)) { + case KVM_PGTABLE_PROT_DEVICE | KVM_PGTABLE_PROT_NORMAL_NC: + return -EINVAL; + case KVM_PGTABLE_PROT_DEVICE: + if (prot & KVM_PGTABLE_PROT_X) + return -EINVAL; + attr = KVM_S2_MEMATTR(pgt, DEVICE_nGnRE); + break; + case KVM_PGTABLE_PROT_NORMAL_NC: + if (prot & KVM_PGTABLE_PROT_X) + return -EINVAL; + attr = KVM_S2_MEMATTR(pgt, NORMAL_NC); + break; + default: + attr = KVM_S2_MEMATTR(pgt, NORMAL); + } + if (!(prot & KVM_PGTABLE_PROT_X)) attr |= KVM_PTE_LEAF_ATTR_HI_S2_XN; - else if (device) - return -EINVAL; if (prot & KVM_PGTABLE_PROT_R) attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R; -- Gitee From eda32d95484e59925e83d90ea43e6d418d57c592 Mon Sep 17 00:00:00 2001 From: Ankit Agrawal Date: Sat, 24 Feb 2024 20:35:44 +0530 Subject: [PATCH 2102/2138] mm: Introduce new flag to indicate wc safe ANBZ: #13559 commit 5c656fcdd6c60f71fccb07fe7b9d8d7e6c9811ff upstream. The VM_ALLOW_ANY_UNCACHED flag is implemented for ARM64, allowing KVM stage 2 device mapping attributes to use NormalNC rather than DEVICE_nGnRE, which allows guest mappings supporting write-combining attributes (WC). ARM does not architecturally guarantee this is safe, and indeed some MMIO regions like the GICv2 VCPU interface can trigger uncontained faults if NormalNC is used. Even worse, the expectation is that there are platforms where even DEVICE_nGnRE can allow uncontained faults in corner cases. Unfortunately existing ARM IP requires platform integration to take responsibility to prevent this. To safely use VFIO in KVM the platform must guarantee full safety in the guest where no action taken against a MMIO mapping can trigger an uncontained failure. The assumption is that most VFIO PCI platforms support this for both mapping types, at least in common flows, based on some expectations of how PCI IP is integrated. This can be enabled more broadly, for instance into vfio-platform drivers, but only after the platform vendor completes auditing for safety. The VMA flag VM_ALLOW_ANY_UNCACHED was found to be the simplest and cleanest way to communicate the information from VFIO to KVM that mapping the region in S2 as NormalNC is safe. KVM consumes it to activate the code that does the S2 mapping as NormalNC. Suggested-by: Catalin Marinas Reviewed-by: Jason Gunthorpe Reviewed-by: Marc Zyngier Acked-by: David Hildenbrand Signed-off-by: Ankit Agrawal Link: https://lore.kernel.org/r/20240224150546.368-3-ankita@nvidia.com Signed-off-by: Oliver Upton Signed-off-by: Qinyun Tan Reviewed-by: Guanghui Feng Link: https://gitee.com/anolis/cloud-kernel/pulls/4503 --- include/linux/mm.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/include/linux/mm.h b/include/linux/mm.h index d9ae9afdb51b..6b911df2b638 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -397,6 +397,20 @@ extern unsigned int kobjsize(const void *objp); # define VM_UFFD_MINOR VM_NONE #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */ +/* + * This flag is used to connect VFIO to arch specific KVM code. It + * indicates that the memory under this VMA is safe for use with any + * non-cachable memory type inside KVM. Some VFIO devices, on some + * platforms, are thought to be unsafe and can cause machine crashes + * if KVM does not lock down the memory type. + */ +#ifdef CONFIG_64BIT +#define VM_ALLOW_ANY_UNCACHED_BIT 39 +#define VM_ALLOW_ANY_UNCACHED BIT(VM_ALLOW_ANY_UNCACHED_BIT) +#else +#define VM_ALLOW_ANY_UNCACHED VM_NONE +#endif + /* Bits set in the VMA until the stack is in its final location */ #define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY) -- Gitee From cc2146732405f2591296319590c13cb148eece8c Mon Sep 17 00:00:00 2001 From: Ankit Agrawal Date: Sat, 24 Feb 2024 20:35:45 +0530 Subject: [PATCH 2103/2138] KVM: arm64: Set io memory s2 pte as normalnc for vfio pci device ANBZ: #13559 commit 8c47ce3e1d2c285349edf426b98e8460ce3e2f33 upstream. To provide VM with the ability to get device IO memory with NormalNC property, map device MMIO in KVM for ARM64 at stage2 as NormalNC. Having NormalNC S2 default puts guests in control (based on [1], "Combining stage 1 and stage 2 memory type attributes") of device MMIO regions memory mappings. The rules are summarized below: ([(S1) - stage1], [(S2) - stage 2]) S1 | S2 | Result NORMAL-WB | NORMAL-NC | NORMAL-NC NORMAL-WT | NORMAL-NC | NORMAL-NC NORMAL-NC | NORMAL-NC | NORMAL-NC DEVICE | NORMAL-NC | DEVICE Still this cannot be generalized to non PCI devices such as GICv2. There is insufficient information and uncertainity in the behavior of non PCI driver. A driver must indicate support using the new flag VM_ALLOW_ANY_UNCACHED. Adapt KVM to make use of the flag VM_ALLOW_ANY_UNCACHED as indicator to activate the S2 setting to NormalNc. [1] section D8.5.5 of DDI0487J_a_a-profile_architecture_reference_manual.pdf Suggested-by: Catalin Marinas Acked-by: Jason Gunthorpe Reviewed-by: Catalin Marinas Reviewed-by: Marc Zyngier Signed-off-by: Ankit Agrawal Link: https://lore.kernel.org/r/20240224150546.368-4-ankita@nvidia.com Signed-off-by: Oliver Upton Signed-off-by: Qinyun Tan Reviewed-by: Guanghui Feng Link: https://gitee.com/anolis/cloud-kernel/pulls/4503 --- arch/arm64/kvm/mmu.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 482280fe22d7..68f225d354d2 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1398,7 +1398,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, int ret = 0; bool write_fault, writable, force_pte = false; bool exec_fault, mte_allowed; - bool device = false; + bool device = false, vfio_allow_any_uc = false; unsigned long mmu_seq; struct kvm *kvm = vcpu->kvm; struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; @@ -1490,6 +1490,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, gfn = fault_ipa >> PAGE_SHIFT; mte_allowed = kvm_vma_mte_allowed(vma); + vfio_allow_any_uc = vma->vm_flags & VM_ALLOW_ANY_UNCACHED; + /* Don't use the VMA after the unlock -- it may have vanished */ vma = NULL; @@ -1576,10 +1578,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (exec_fault) prot |= KVM_PGTABLE_PROT_X; - if (device) - prot |= KVM_PGTABLE_PROT_DEVICE; - else if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) + if (device) { + if (vfio_allow_any_uc) + prot |= KVM_PGTABLE_PROT_NORMAL_NC; + else + prot |= KVM_PGTABLE_PROT_DEVICE; + } else if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) { prot |= KVM_PGTABLE_PROT_X; + } /* * Under the premise of getting a FSC_PERM fault, we just need to relax -- Gitee From a644b4a20c0910f2db02f4b3c57d36dd3f0f9282 Mon Sep 17 00:00:00 2001 From: Ankit Agrawal Date: Sat, 24 Feb 2024 20:35:46 +0530 Subject: [PATCH 2104/2138] vfio: Convey kvm that the vfio-pci device is wc safe ANBZ: #13559 commit a39d3a966a090989b89c0287a67cd98c85ae2f52 upstream. The VM_ALLOW_ANY_UNCACHED flag is implemented for ARM64, allowing KVM stage 2 device mapping attributes to use Normal-NC rather than DEVICE_nGnRE, which allows guest mappings supporting write-combining attributes (WC). ARM does not architecturally guarantee this is safe, and indeed some MMIO regions like the GICv2 VCPU interface can trigger uncontained faults if Normal-NC is used. To safely use VFIO in KVM the platform must guarantee full safety in the guest where no action taken against a MMIO mapping can trigger an uncontained failure. The expectation is that most VFIO PCI platforms support this for both mapping types, at least in common flows, based on some expectations of how PCI IP is integrated. So make vfio-pci set the VM_ALLOW_ANY_UNCACHED flag. Suggested-by: Catalin Marinas Acked-by: Jason Gunthorpe Acked-by: Catalin Marinas Acked-by: Alex Williamson Reviewed-by: David Hildenbrand Reviewed-by: Marc Zyngier Signed-off-by: Ankit Agrawal Link: https://lore.kernel.org/r/20240224150546.368-5-ankita@nvidia.com Signed-off-by: Oliver Upton Signed-off-by: Qinyun Tan Reviewed-by: Guanghui Feng Link: https://gitee.com/anolis/cloud-kernel/pulls/4503 --- drivers/vfio/pci/vfio_pci_core.c | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c index a8f259bc2f4d..2df176edc642 100644 --- a/drivers/vfio/pci/vfio_pci_core.c +++ b/drivers/vfio/pci/vfio_pci_core.c @@ -1882,8 +1882,25 @@ int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma /* * See remap_pfn_range(), called from vfio_pci_fault() but we can't * change vm_flags within the fault handler. Set them now. + * + * VM_ALLOW_ANY_UNCACHED: The VMA flag is implemented for ARM64, + * allowing KVM stage 2 device mapping attributes to use Normal-NC + * rather than DEVICE_nGnRE, which allows guest mappings + * supporting write-combining attributes (WC). ARM does not + * architecturally guarantee this is safe, and indeed some MMIO + * regions like the GICv2 VCPU interface can trigger uncontained + * faults if Normal-NC is used. + * + * To safely use VFIO in KVM the platform must guarantee full + * safety in the guest where no action taken against a MMIO + * mapping can trigger an uncontained failure. The assumption is + * that most VFIO PCI platforms support this for both mapping types, + * at least in common flows, based on some expectations of how + * PCI IP is integrated. Hence VM_ALLOW_ANY_UNCACHED is set in + * the VMA flags. */ - vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP); + vm_flags_set(vma, VM_ALLOW_ANY_UNCACHED | VM_IO | VM_PFNMAP | + VM_DONTEXPAND | VM_DONTDUMP); vma->vm_ops = &vfio_pci_mmap_ops; return 0; -- Gitee From 2a479724b169284030f5ef99e4d86c15ced27c1f Mon Sep 17 00:00:00 2001 From: Ankit Agrawal Date: Tue, 20 Feb 2024 17:20:53 +0530 Subject: [PATCH 2105/2138] vfio/pci: rename and export do_io_rw() ANBZ: #13559 commit 4de676d494cd8fb2b4c65e58c19ebbdb36673957 upstream. do_io_rw() is used to read/write to the device MMIO. The grace hopper VFIO PCI variant driver require this functionality to read/write to its memory. Rename this as vfio_pci_core functions and export as GPL. Reviewed-by: Kevin Tian Reviewed-by: Yishai Hadas Signed-off-by: Ankit Agrawal Link: https://lore.kernel.org/r/20240220115055.23546-2-ankita@nvidia.com Signed-off-by: Alex Williamson Signed-off-by: Qinyun Tan Reviewed-by: Guanghui Feng Link: https://gitee.com/anolis/cloud-kernel/pulls/4503 --- drivers/vfio/pci/vfio_pci_rdwr.c | 16 +++++++++------- include/linux/vfio_pci_core.h | 5 ++++- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c index e27de61ac9fe..15484e27b26f 100644 --- a/drivers/vfio/pci/vfio_pci_rdwr.c +++ b/drivers/vfio/pci/vfio_pci_rdwr.c @@ -94,10 +94,10 @@ VFIO_IOREAD(32) * reads with -1. This is intended for handling MSI-X vector tables and * leftover space for ROM BARs. */ -static ssize_t do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem, - void __iomem *io, char __user *buf, - loff_t off, size_t count, size_t x_start, - size_t x_end, bool iswrite) +ssize_t vfio_pci_core_do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem, + void __iomem *io, char __user *buf, + loff_t off, size_t count, size_t x_start, + size_t x_end, bool iswrite) { ssize_t done = 0; int ret; @@ -199,6 +199,7 @@ static ssize_t do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem, return done; } +EXPORT_SYMBOL_GPL(vfio_pci_core_do_io_rw); static int vfio_pci_setup_barmap(struct vfio_pci_core_device *vdev, int bar) { @@ -276,8 +277,8 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf, x_end = vdev->msix_offset + vdev->msix_size; } - done = do_io_rw(vdev, res->flags & IORESOURCE_MEM, io, buf, pos, - count, x_start, x_end, iswrite); + done = vfio_pci_core_do_io_rw(vdev, res->flags & IORESOURCE_MEM, io, buf, pos, + count, x_start, x_end, iswrite); if (done >= 0) *ppos += done; @@ -345,7 +346,8 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf, * probing, so we don't currently worry about access in relation * to the memory enable bit in the command register. */ - done = do_io_rw(vdev, false, iomem, buf, off, count, 0, 0, iswrite); + done = vfio_pci_core_do_io_rw(vdev, false, iomem, buf, off, count, + 0, 0, iswrite); vga_put(vdev->pdev, rsrc); diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h index 562e8754869d..d478e6f1be02 100644 --- a/include/linux/vfio_pci_core.h +++ b/include/linux/vfio_pci_core.h @@ -129,5 +129,8 @@ void vfio_pci_core_disable(struct vfio_pci_core_device *vdev); void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev); pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev, pci_channel_state_t state); - +ssize_t vfio_pci_core_do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem, + void __iomem *io, char __user *buf, + loff_t off, size_t count, size_t x_start, + size_t x_end, bool iswrite); #endif /* VFIO_PCI_CORE_H */ -- Gitee From 32e231403e01ec25c6ec3d07c2b9e0ec2b6ec5a2 Mon Sep 17 00:00:00 2001 From: Ankit Agrawal Date: Tue, 20 Feb 2024 17:20:54 +0530 Subject: [PATCH 2106/2138] vfio/pci: rename and export range_intersect_range ANBZ: #13559 commit 30e920e1debb437e5aea7a4ccdab61634354297a upstream. range_intersect_range determines an overlap between two ranges. If an overlap, the helper function returns the overlapping offset and size. The VFIO PCI variant driver emulates the PCI config space BAR offset registers. These offset may be accessed for read/write with a variety of lengths including sub-word sizes from sub-word offsets. The driver makes use of this helper function to read/write the targeted part of the emulated register. Make this a vfio_pci_core function, rename and export as GPL. Also update references in virtio driver. Reviewed-by: Kevin Tian Reviewed-by: Yishai Hadas Signed-off-by: Ankit Agrawal Link: https://lore.kernel.org/r/20240220115055.23546-3-ankita@nvidia.com Signed-off-by: Alex Williamson Signed-off-by: Qinyun Tan Reviewed-by: Guanghui Feng Link: https://gitee.com/anolis/cloud-kernel/pulls/4503 --- drivers/vfio/pci/vfio_pci_config.c | 42 ++++++++++++++++++++++++++++++ include/linux/vfio_pci_core.h | 5 ++++ 2 files changed, 47 insertions(+) diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index a2ad4f7c716b..ea2745c1ac5e 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -1978,3 +1978,45 @@ ssize_t vfio_pci_config_rw(struct vfio_pci_core_device *vdev, char __user *buf, return done; } + +/** + * vfio_pci_core_range_intersect_range() - Determine overlap between a buffer + * and register offset ranges. + * @buf_start: start offset of the buffer + * @buf_cnt: number of buffer bytes + * @reg_start: start register offset + * @reg_cnt: number of register bytes + * @buf_offset: start offset of overlap in the buffer + * @intersect_count: number of overlapping bytes + * @register_offset: start offset of overlap in register + * + * Returns: true if there is overlap, false if not. + * The overlap start and size is returned through function args. + */ +bool vfio_pci_core_range_intersect_range(loff_t buf_start, size_t buf_cnt, + loff_t reg_start, size_t reg_cnt, + loff_t *buf_offset, + size_t *intersect_count, + size_t *register_offset) +{ + if (buf_start <= reg_start && + buf_start + buf_cnt > reg_start) { + *buf_offset = reg_start - buf_start; + *intersect_count = min_t(size_t, reg_cnt, + buf_start + buf_cnt - reg_start); + *register_offset = 0; + return true; + } + + if (buf_start > reg_start && + buf_start < reg_start + reg_cnt) { + *buf_offset = 0; + *intersect_count = min_t(size_t, buf_cnt, + reg_start + reg_cnt - buf_start); + *register_offset = buf_start - reg_start; + return true; + } + + return false; +} +EXPORT_SYMBOL_GPL(vfio_pci_core_range_intersect_range); diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h index d478e6f1be02..783d414ce788 100644 --- a/include/linux/vfio_pci_core.h +++ b/include/linux/vfio_pci_core.h @@ -133,4 +133,9 @@ ssize_t vfio_pci_core_do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem, void __iomem *io, char __user *buf, loff_t off, size_t count, size_t x_start, size_t x_end, bool iswrite); +bool vfio_pci_core_range_intersect_range(loff_t buf_start, size_t buf_cnt, + loff_t reg_start, size_t reg_cnt, + loff_t *buf_offset, + size_t *intersect_count, + size_t *register_offset); #endif /* VFIO_PCI_CORE_H */ -- Gitee From bcf9d2de066d5a5099f3067efee194d2a18155a1 Mon Sep 17 00:00:00 2001 From: Ankit Agrawal Date: Tue, 20 Feb 2024 17:20:55 +0530 Subject: [PATCH 2107/2138] vfio/nvgrace-gpu: Add vfio pci variant module for grace hopper ANBZ: #13559 commit 701ab935859fcfd4a8c8a97f3ee4fb5294a9d481 upstream. NVIDIA's upcoming Grace Hopper Superchip provides a PCI-like device for the on-chip GPU that is the logical OS representation of the internal proprietary chip-to-chip cache coherent interconnect. The device is peculiar compared to a real PCI device in that whilst there is a real 64b PCI BAR1 (comprising region 2 & region 3) on the device, it is not used to access device memory once the faster chip-to-chip interconnect is initialized (occurs at the time of host system boot). The device memory is accessed instead using the chip-to-chip interconnect that is exposed as a contiguous physically addressable region on the host. This device memory aperture can be obtained from host ACPI table using device_property_read_u64(), according to the FW specification. Since the device memory is cache coherent with the CPU, it can be mmap into the user VMA with a cacheable mapping using remap_pfn_range() and used like a regular RAM. The device memory is not added to the host kernel, but mapped directly as this reduces memory wastage due to struct pages. There is also a requirement of a minimum reserved 1G uncached region (termed as resmem) to support the Multi-Instance GPU (MIG) feature [1]. This is to work around a HW defect. Based on [2], the requisite properties (uncached, unaligned access) can be achieved through a VM mapping (S1) of NORMAL_NC and host (S2) mapping with MemAttr[2:0]=0b101. To provide a different non-cached property to the reserved 1G region, it needs to be carved out from the device memory and mapped as a separate region in Qemu VMA with pgprot_writecombine(). pgprot_writecombine() sets the Qemu VMA page properties (pgprot) as NORMAL_NC. Provide a VFIO PCI variant driver that adapts the unique device memory representation into a more standard PCI representation facing userspace. The variant driver exposes these two regions - the non-cached reserved (resmem) and the cached rest of the device memory (termed as usemem) as separate VFIO 64b BAR regions. This is divergent from the baremetal approach, where the device memory is exposed as a device memory region. The decision for a different approach was taken in view of the fact that it would necessiate additional code in Qemu to discover and insert those regions in the VM IPA, along with the additional VM ACPI DSDT changes to communicate the device memory region IPA to the VM workloads. Moreover, this behavior would have to be added to a variety of emulators (beyond top of tree Qemu) out there desiring grace hopper support. Since the device implements 64-bit BAR0, the VFIO PCI variant driver maps the uncached carved out region to the next available PCI BAR (i.e. comprising of region 2 and 3). The cached device memory aperture is assigned BAR region 4 and 5. Qemu will then naturally generate a PCI device in the VM with the uncached aperture reported as BAR2 region, the cacheable as BAR4. The variant driver provides emulation for these fake BARs' PCI config space offset registers. The hardware ensures that the system does not crash when the memory is accessed with the memory enable turned off. It synthesis ~0 reads and dropped writes on such access. So there is no need to support the disablement/enablement of BAR through PCI_COMMAND config space register. The memory layout on the host looks like the following: devmem (memlength) |--------------------------------------------------| |-------------cached------------------------|--NC--| | | usemem.memphys resmem.memphys PCI BARs need to be aligned to the power-of-2, but the actual memory on the device may not. A read or write access to the physical address from the last device PFN up to the next power-of-2 aligned physical address results in reading ~0 and dropped writes. Note that the GPU device driver [6] is capable of knowing the exact device memory size through separate means. The device memory size is primarily kept in the system ACPI tables for use by the VFIO PCI variant module. Note that the usemem memory is added by the VM Nvidia device driver [5] to the VM kernel as memblocks. Hence make the usable memory size memblock (MEMBLK_SIZE) aligned. This is a hardwired ABI value between the GPU FW and VFIO driver. The VM device driver make use of the same value for its calculation to determine USEMEM size. Currently there is no provision in KVM for a S2 mapping with MemAttr[2:0]=0b101, but there is an ongoing effort to provide the same [3]. As previously mentioned, resmem is mapped pgprot_writecombine(), that sets the Qemu VMA page properties (pgprot) as NORMAL_NC. Using the proposed changes in [3] and [4], KVM marks the region with MemAttr[2:0]=0b101 in S2. If the device memory properties are not present, the driver registers the vfio-pci-core function pointers. Since there are no ACPI memory properties generated for the VM, the variant driver inside the VM will only use the vfio-pci-core ops and hence try to map the BARs as non cached. This is not a problem as the CPUs have FWB enabled which blocks the VM mapping's ability to override the cacheability set by the host mapping. This goes along with a qemu series [6] to provides the necessary implementation of the Grace Hopper Superchip firmware specification so that the guest operating system can see the correct ACPI modeling for the coherent GPU device. Verified with the CUDA workload in the VM. [1] https://www.nvidia.com/en-in/technologies/multi-instance-gpu/ [2] section D8.5.5 of https://developer.arm.com/documentation/ddi0487/latest/ [3] https://lore.kernel.org/all/20240211174705.31992-1-ankita@nvidia.com/ [4] https://lore.kernel.org/all/20230907181459.18145-2-ankita@nvidia.com/ [5] https://github.com/NVIDIA/open-gpu-kernel-modules [6] https://lore.kernel.org/all/20231203060245.31593-1-ankita@nvidia.com/ Reviewed-by: Kevin Tian Reviewed-by: Yishai Hadas Reviewed-by: Zhi Wang Signed-off-by: Aniket Agashe Signed-off-by: Ankit Agrawal Link: https://lore.kernel.org/r/20240220115055.23546-4-ankita@nvidia.com Signed-off-by: Alex Williamson Signed-off-by: Qinyun Tan Reviewed-by: Guanghui Feng Link: https://gitee.com/anolis/cloud-kernel/pulls/4503 --- MAINTAINERS | 6 + drivers/vfio/pci/Kconfig | 2 + drivers/vfio/pci/Makefile | 2 + drivers/vfio/pci/nvgrace-gpu/Kconfig | 10 + drivers/vfio/pci/nvgrace-gpu/Makefile | 3 + drivers/vfio/pci/nvgrace-gpu/main.c | 879 ++++++++++++++++++++++++++ 6 files changed, 902 insertions(+) create mode 100644 drivers/vfio/pci/nvgrace-gpu/Kconfig create mode 100644 drivers/vfio/pci/nvgrace-gpu/Makefile create mode 100644 drivers/vfio/pci/nvgrace-gpu/main.c diff --git a/MAINTAINERS b/MAINTAINERS index 0166f496d16b..5a3cab8bbe28 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -22659,6 +22659,12 @@ L: kvm@vger.kernel.org S: Maintained F: drivers/vfio/pci/mlx5/ +VFIO NVIDIA GRACE GPU DRIVER +M: Ankit Agrawal +L: kvm@vger.kernel.org +S: Supported +F: drivers/vfio/pci/nvgrace-gpu/ + VFIO PCI DEVICE SPECIFIC DRIVERS R: Jason Gunthorpe R: Yishai Hadas diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig index 04ac975432a3..1b5ca334e225 100644 --- a/drivers/vfio/pci/Kconfig +++ b/drivers/vfio/pci/Kconfig @@ -67,4 +67,6 @@ source "drivers/vfio/pci/pds/Kconfig" source "drivers/vfio/pci/qat/Kconfig" +source "drivers/vfio/pci/nvgrace-gpu/Kconfig" + endmenu diff --git a/drivers/vfio/pci/Makefile b/drivers/vfio/pci/Makefile index 52aa7423e6df..a84c7f823b37 100644 --- a/drivers/vfio/pci/Makefile +++ b/drivers/vfio/pci/Makefile @@ -14,4 +14,6 @@ obj-$(CONFIG_HISI_ACC_VFIO_PCI) += hisilicon/ obj-$(CONFIG_PDS_VFIO_PCI) += pds/ +obj-$(CONFIG_NVGRACE_GPU_VFIO_PCI) += nvgrace-gpu/ + obj-$(CONFIG_QAT_VFIO_PCI) += qat/ diff --git a/drivers/vfio/pci/nvgrace-gpu/Kconfig b/drivers/vfio/pci/nvgrace-gpu/Kconfig new file mode 100644 index 000000000000..a7f624b37e41 --- /dev/null +++ b/drivers/vfio/pci/nvgrace-gpu/Kconfig @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-only +config NVGRACE_GPU_VFIO_PCI + tristate "VFIO support for the GPU in the NVIDIA Grace Hopper Superchip" + depends on ARM64 || (COMPILE_TEST && 64BIT) + select VFIO_PCI_CORE + help + VFIO support for the GPU in the NVIDIA Grace Hopper Superchip is + required to assign the GPU device to userspace using KVM/qemu/etc. + + If you don't know what to do here, say N. diff --git a/drivers/vfio/pci/nvgrace-gpu/Makefile b/drivers/vfio/pci/nvgrace-gpu/Makefile new file mode 100644 index 000000000000..3ca8c187897a --- /dev/null +++ b/drivers/vfio/pci/nvgrace-gpu/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_NVGRACE_GPU_VFIO_PCI) += nvgrace-gpu-vfio-pci.o +nvgrace-gpu-vfio-pci-y := main.o diff --git a/drivers/vfio/pci/nvgrace-gpu/main.c b/drivers/vfio/pci/nvgrace-gpu/main.c new file mode 100644 index 000000000000..25814006352d --- /dev/null +++ b/drivers/vfio/pci/nvgrace-gpu/main.c @@ -0,0 +1,879 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved + */ + +#include +#include + +/* + * The device memory usable to the workloads running in the VM is cached + * and showcased as a 64b device BAR (comprising of BAR4 and BAR5 region) + * to the VM and is represented as usemem. + * Moreover, the VM GPU device driver needs a non-cacheable region to + * support the MIG feature. This region is also exposed as a 64b BAR + * (comprising of BAR2 and BAR3 region) and represented as resmem. + */ +#define RESMEM_REGION_INDEX VFIO_PCI_BAR2_REGION_INDEX +#define USEMEM_REGION_INDEX VFIO_PCI_BAR4_REGION_INDEX + +/* Memory size expected as non cached and reserved by the VM driver */ +#define RESMEM_SIZE SZ_1G + +/* A hardwired and constant ABI value between the GPU FW and VFIO driver. */ +#define MEMBLK_SIZE SZ_512M + +/* + * The state of the two device memory region - resmem and usemem - is + * saved as struct mem_region. + */ +struct mem_region { + phys_addr_t memphys; /* Base physical address of the region */ + size_t memlength; /* Region size */ + size_t bar_size; /* Reported region BAR size */ + __le64 bar_val; /* Emulated BAR offset registers */ + union { + void *memaddr; + void __iomem *ioaddr; + }; /* Base virtual address of the region */ +}; + +struct nvgrace_gpu_pci_core_device { + struct vfio_pci_core_device core_device; + /* Cached and usable memory for the VM. */ + struct mem_region usemem; + /* Non cached memory carved out from the end of device memory */ + struct mem_region resmem; + /* Lock to control device memory kernel mapping */ + struct mutex remap_lock; +}; + +static void nvgrace_gpu_init_fake_bar_emu_regs(struct vfio_device *core_vdev) +{ + struct nvgrace_gpu_pci_core_device *nvdev = + container_of(core_vdev, struct nvgrace_gpu_pci_core_device, + core_device.vdev); + + nvdev->resmem.bar_val = 0; + nvdev->usemem.bar_val = 0; +} + +/* Choose the structure corresponding to the fake BAR with a given index. */ +static struct mem_region * +nvgrace_gpu_memregion(int index, + struct nvgrace_gpu_pci_core_device *nvdev) +{ + if (index == USEMEM_REGION_INDEX) + return &nvdev->usemem; + + if (index == RESMEM_REGION_INDEX) + return &nvdev->resmem; + + return NULL; +} + +static int nvgrace_gpu_open_device(struct vfio_device *core_vdev) +{ + struct vfio_pci_core_device *vdev = + container_of(core_vdev, struct vfio_pci_core_device, vdev); + struct nvgrace_gpu_pci_core_device *nvdev = + container_of(core_vdev, struct nvgrace_gpu_pci_core_device, + core_device.vdev); + int ret; + + ret = vfio_pci_core_enable(vdev); + if (ret) + return ret; + + if (nvdev->usemem.memlength) { + nvgrace_gpu_init_fake_bar_emu_regs(core_vdev); + mutex_init(&nvdev->remap_lock); + } + + vfio_pci_core_finish_enable(vdev); + + return 0; +} + +static void nvgrace_gpu_close_device(struct vfio_device *core_vdev) +{ + struct nvgrace_gpu_pci_core_device *nvdev = + container_of(core_vdev, struct nvgrace_gpu_pci_core_device, + core_device.vdev); + + /* Unmap the mapping to the device memory cached region */ + if (nvdev->usemem.memaddr) { + memunmap(nvdev->usemem.memaddr); + nvdev->usemem.memaddr = NULL; + } + + /* Unmap the mapping to the device memory non-cached region */ + if (nvdev->resmem.ioaddr) { + iounmap(nvdev->resmem.ioaddr); + nvdev->resmem.ioaddr = NULL; + } + + mutex_destroy(&nvdev->remap_lock); + + vfio_pci_core_close_device(core_vdev); +} + +static int nvgrace_gpu_mmap(struct vfio_device *core_vdev, + struct vm_area_struct *vma) +{ + struct nvgrace_gpu_pci_core_device *nvdev = + container_of(core_vdev, struct nvgrace_gpu_pci_core_device, + core_device.vdev); + struct mem_region *memregion; + unsigned long start_pfn; + u64 req_len, pgoff, end; + unsigned int index; + int ret = 0; + + index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT); + + memregion = nvgrace_gpu_memregion(index, nvdev); + if (!memregion) + return vfio_pci_core_mmap(core_vdev, vma); + + /* + * Request to mmap the BAR. Map to the CPU accessible memory on the + * GPU using the memory information gathered from the system ACPI + * tables. + */ + pgoff = vma->vm_pgoff & + ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1); + + if (check_sub_overflow(vma->vm_end, vma->vm_start, &req_len) || + check_add_overflow(PHYS_PFN(memregion->memphys), pgoff, &start_pfn) || + check_add_overflow(PFN_PHYS(pgoff), req_len, &end)) + return -EOVERFLOW; + + /* + * Check that the mapping request does not go beyond available device + * memory size + */ + if (end > memregion->memlength) + return -EINVAL; + + /* + * The carved out region of the device memory needs the NORMAL_NC + * property. Communicate as such to the hypervisor. + */ + if (index == RESMEM_REGION_INDEX) + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + + /* + * Perform a PFN map to the memory and back the device BAR by the + * GPU memory. + * + * The available GPU memory size may not be power-of-2 aligned. The + * remainder is only backed by vfio_device_ops read/write handlers. + * + * During device reset, the GPU is safely disconnected to the CPU + * and access to the BAR will be immediately returned preventing + * machine check. + */ + ret = remap_pfn_range(vma, vma->vm_start, start_pfn, + req_len, vma->vm_page_prot); + if (ret) + return ret; + + vma->vm_pgoff = start_pfn; + + return 0; +} + +static long +nvgrace_gpu_ioctl_get_region_info(struct vfio_device *core_vdev, + unsigned long arg) +{ + struct nvgrace_gpu_pci_core_device *nvdev = + container_of(core_vdev, struct nvgrace_gpu_pci_core_device, + core_device.vdev); + unsigned long minsz = offsetofend(struct vfio_region_info, offset); + struct vfio_info_cap caps = { .buf = NULL, .size = 0 }; + struct vfio_region_info_cap_sparse_mmap *sparse; + struct vfio_region_info info; + struct mem_region *memregion; + u32 size; + int ret; + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + + /* + * Request to determine the BAR region information. Send the + * GPU memory information. + */ + memregion = nvgrace_gpu_memregion(info.index, nvdev); + if (!memregion) + return vfio_pci_core_ioctl(core_vdev, + VFIO_DEVICE_GET_REGION_INFO, arg); + + size = struct_size(sparse, areas, 1); + + /* + * Setup for sparse mapping for the device memory. Only the + * available device memory on the hardware is shown as a + * mappable region. + */ + sparse = kzalloc(size, GFP_KERNEL); + if (!sparse) + return -ENOMEM; + + sparse->nr_areas = 1; + sparse->areas[0].offset = 0; + sparse->areas[0].size = memregion->memlength; + sparse->header.id = VFIO_REGION_INFO_CAP_SPARSE_MMAP; + sparse->header.version = 1; + + ret = vfio_info_add_capability(&caps, &sparse->header, size); + kfree(sparse); + if (ret) + return ret; + + info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); + /* + * The region memory size may not be power-of-2 aligned. + * Given that the memory as a BAR and may not be + * aligned, roundup to the next power-of-2. + */ + info.size = memregion->bar_size; + info.flags = VFIO_REGION_INFO_FLAG_READ | + VFIO_REGION_INFO_FLAG_WRITE | + VFIO_REGION_INFO_FLAG_MMAP; + + if (caps.size) { + info.flags |= VFIO_REGION_INFO_FLAG_CAPS; + if (info.argsz < sizeof(info) + caps.size) { + info.argsz = sizeof(info) + caps.size; + info.cap_offset = 0; + } else { + vfio_info_cap_shift(&caps, sizeof(info)); + if (copy_to_user((void __user *)arg + + sizeof(info), caps.buf, + caps.size)) { + kfree(caps.buf); + return -EFAULT; + } + info.cap_offset = sizeof(info); + } + kfree(caps.buf); + } + return copy_to_user((void __user *)arg, &info, minsz) ? + -EFAULT : 0; +} + +static long nvgrace_gpu_ioctl(struct vfio_device *core_vdev, + unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case VFIO_DEVICE_GET_REGION_INFO: + return nvgrace_gpu_ioctl_get_region_info(core_vdev, arg); + case VFIO_DEVICE_IOEVENTFD: + return -ENOTTY; + case VFIO_DEVICE_RESET: + nvgrace_gpu_init_fake_bar_emu_regs(core_vdev); + fallthrough; + default: + return vfio_pci_core_ioctl(core_vdev, cmd, arg); + } +} + +static __le64 +nvgrace_gpu_get_read_value(size_t bar_size, u64 flags, __le64 val64) +{ + u64 tmp_val; + + tmp_val = le64_to_cpu(val64); + tmp_val &= ~(bar_size - 1); + tmp_val |= flags; + + return cpu_to_le64(tmp_val); +} + +/* + * Both the usable (usemem) and the reserved (resmem) device memory region + * are exposed as a 64b fake device BARs in the VM. These fake BARs must + * respond to the accesses on their respective PCI config space offsets. + * + * resmem BAR owns PCI_BASE_ADDRESS_2 & PCI_BASE_ADDRESS_3. + * usemem BAR owns PCI_BASE_ADDRESS_4 & PCI_BASE_ADDRESS_5. + */ +static ssize_t +nvgrace_gpu_read_config_emu(struct vfio_device *core_vdev, + char __user *buf, size_t count, loff_t *ppos) +{ + struct nvgrace_gpu_pci_core_device *nvdev = + container_of(core_vdev, struct nvgrace_gpu_pci_core_device, + core_device.vdev); + u64 pos = *ppos & VFIO_PCI_OFFSET_MASK; + struct mem_region *memregion = NULL; + __le64 val64; + size_t register_offset; + loff_t copy_offset; + size_t copy_count; + int ret; + + ret = vfio_pci_core_read(core_vdev, buf, count, ppos); + if (ret < 0) + return ret; + + if (vfio_pci_core_range_intersect_range(pos, count, PCI_BASE_ADDRESS_2, + sizeof(val64), + ©_offset, ©_count, + ®ister_offset)) + memregion = nvgrace_gpu_memregion(RESMEM_REGION_INDEX, nvdev); + else if (vfio_pci_core_range_intersect_range(pos, count, + PCI_BASE_ADDRESS_4, + sizeof(val64), + ©_offset, ©_count, + ®ister_offset)) + memregion = nvgrace_gpu_memregion(USEMEM_REGION_INDEX, nvdev); + + if (memregion) { + val64 = nvgrace_gpu_get_read_value(memregion->bar_size, + PCI_BASE_ADDRESS_MEM_TYPE_64 | + PCI_BASE_ADDRESS_MEM_PREFETCH, + memregion->bar_val); + if (copy_to_user(buf + copy_offset, + (void *)&val64 + register_offset, copy_count)) { + /* + * The position has been incremented in + * vfio_pci_core_read. Reset the offset back to the + * starting position. + */ + *ppos -= count; + return -EFAULT; + } + } + + return count; +} + +static ssize_t +nvgrace_gpu_write_config_emu(struct vfio_device *core_vdev, + const char __user *buf, size_t count, loff_t *ppos) +{ + struct nvgrace_gpu_pci_core_device *nvdev = + container_of(core_vdev, struct nvgrace_gpu_pci_core_device, + core_device.vdev); + u64 pos = *ppos & VFIO_PCI_OFFSET_MASK; + struct mem_region *memregion = NULL; + size_t register_offset; + loff_t copy_offset; + size_t copy_count; + + if (vfio_pci_core_range_intersect_range(pos, count, PCI_BASE_ADDRESS_2, + sizeof(u64), ©_offset, + ©_count, ®ister_offset)) + memregion = nvgrace_gpu_memregion(RESMEM_REGION_INDEX, nvdev); + else if (vfio_pci_core_range_intersect_range(pos, count, PCI_BASE_ADDRESS_4, + sizeof(u64), ©_offset, + ©_count, ®ister_offset)) + memregion = nvgrace_gpu_memregion(USEMEM_REGION_INDEX, nvdev); + + if (memregion) { + if (copy_from_user((void *)&memregion->bar_val + register_offset, + buf + copy_offset, copy_count)) + return -EFAULT; + *ppos += copy_count; + return copy_count; + } + + return vfio_pci_core_write(core_vdev, buf, count, ppos); +} + +/* + * Ad hoc map the device memory in the module kernel VA space. Primarily needed + * as vfio does not require the userspace driver to only perform accesses through + * mmaps of the vfio-pci BAR regions and such accesses should be supported using + * vfio_device_ops read/write implementations. + * + * The usemem region is cacheable memory and hence is memremaped. + * The resmem region is non-cached and is mapped using ioremap_wc (NORMAL_NC). + */ +static int +nvgrace_gpu_map_device_mem(int index, + struct nvgrace_gpu_pci_core_device *nvdev) +{ + struct mem_region *memregion; + int ret = 0; + + memregion = nvgrace_gpu_memregion(index, nvdev); + if (!memregion) + return -EINVAL; + + mutex_lock(&nvdev->remap_lock); + + if (memregion->memaddr) + goto unlock; + + if (index == USEMEM_REGION_INDEX) + memregion->memaddr = memremap(memregion->memphys, + memregion->memlength, + MEMREMAP_WB); + else + memregion->ioaddr = ioremap_wc(memregion->memphys, + memregion->memlength); + + if (!memregion->memaddr) + ret = -ENOMEM; + +unlock: + mutex_unlock(&nvdev->remap_lock); + + return ret; +} + +/* + * Read the data from the device memory (mapped either through ioremap + * or memremap) into the user buffer. + */ +static int +nvgrace_gpu_map_and_read(struct nvgrace_gpu_pci_core_device *nvdev, + char __user *buf, size_t mem_count, loff_t *ppos) +{ + unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); + u64 offset = *ppos & VFIO_PCI_OFFSET_MASK; + int ret; + + if (!mem_count) + return 0; + + /* + * Handle read on the BAR regions. Map to the target device memory + * physical address and copy to the request read buffer. + */ + ret = nvgrace_gpu_map_device_mem(index, nvdev); + if (ret) + return ret; + + if (index == USEMEM_REGION_INDEX) { + if (copy_to_user(buf, + (u8 *)nvdev->usemem.memaddr + offset, + mem_count)) + ret = -EFAULT; + } else { + /* + * The hardware ensures that the system does not crash when + * the device memory is accessed with the memory enable + * turned off. It synthesizes ~0 on such read. So there is + * no need to check or support the disablement/enablement of + * BAR through PCI_COMMAND config space register. Pass + * test_mem flag as false. + */ + ret = vfio_pci_core_do_io_rw(&nvdev->core_device, false, + nvdev->resmem.ioaddr, + buf, offset, mem_count, + 0, 0, false); + } + + return ret; +} + +/* + * Read count bytes from the device memory at an offset. The actual device + * memory size (available) may not be a power-of-2. So the driver fakes + * the size to a power-of-2 (reported) when exposing to a user space driver. + * + * Reads starting beyond the reported size generate -EINVAL; reads extending + * beyond the actual device size is filled with ~0; reads extending beyond + * the reported size are truncated. + */ +static ssize_t +nvgrace_gpu_read_mem(struct nvgrace_gpu_pci_core_device *nvdev, + char __user *buf, size_t count, loff_t *ppos) +{ + u64 offset = *ppos & VFIO_PCI_OFFSET_MASK; + unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); + struct mem_region *memregion; + size_t mem_count, i; + u8 val = 0xFF; + int ret; + + /* No need to do NULL check as caller does. */ + memregion = nvgrace_gpu_memregion(index, nvdev); + + if (offset >= memregion->bar_size) + return -EINVAL; + + /* Clip short the read request beyond reported BAR size */ + count = min(count, memregion->bar_size - (size_t)offset); + + /* + * Determine how many bytes to be actually read from the device memory. + * Read request beyond the actual device memory size is filled with ~0, + * while those beyond the actual reported size is skipped. + */ + if (offset >= memregion->memlength) + mem_count = 0; + else + mem_count = min(count, memregion->memlength - (size_t)offset); + + ret = nvgrace_gpu_map_and_read(nvdev, buf, mem_count, ppos); + if (ret) + return ret; + + /* + * Only the device memory present on the hardware is mapped, which may + * not be power-of-2 aligned. A read to an offset beyond the device memory + * size is filled with ~0. + */ + for (i = mem_count; i < count; i++) { + ret = put_user(val, (unsigned char __user *)(buf + i)); + if (ret) + return ret; + } + + *ppos += count; + return count; +} + +static ssize_t +nvgrace_gpu_read(struct vfio_device *core_vdev, + char __user *buf, size_t count, loff_t *ppos) +{ + unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); + struct nvgrace_gpu_pci_core_device *nvdev = + container_of(core_vdev, struct nvgrace_gpu_pci_core_device, + core_device.vdev); + + if (nvgrace_gpu_memregion(index, nvdev)) + return nvgrace_gpu_read_mem(nvdev, buf, count, ppos); + + if (index == VFIO_PCI_CONFIG_REGION_INDEX) + return nvgrace_gpu_read_config_emu(core_vdev, buf, count, ppos); + + return vfio_pci_core_read(core_vdev, buf, count, ppos); +} + +/* + * Write the data to the device memory (mapped either through ioremap + * or memremap) from the user buffer. + */ +static int +nvgrace_gpu_map_and_write(struct nvgrace_gpu_pci_core_device *nvdev, + const char __user *buf, size_t mem_count, + loff_t *ppos) +{ + unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); + loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; + int ret; + + if (!mem_count) + return 0; + + ret = nvgrace_gpu_map_device_mem(index, nvdev); + if (ret) + return ret; + + if (index == USEMEM_REGION_INDEX) { + if (copy_from_user((u8 *)nvdev->usemem.memaddr + pos, + buf, mem_count)) + return -EFAULT; + } else { + /* + * The hardware ensures that the system does not crash when + * the device memory is accessed with the memory enable + * turned off. It drops such writes. So there is no need to + * check or support the disablement/enablement of BAR + * through PCI_COMMAND config space register. Pass test_mem + * flag as false. + */ + ret = vfio_pci_core_do_io_rw(&nvdev->core_device, false, + nvdev->resmem.ioaddr, + (char __user *)buf, pos, mem_count, + 0, 0, true); + } + + return ret; +} + +/* + * Write count bytes to the device memory at a given offset. The actual device + * memory size (available) may not be a power-of-2. So the driver fakes the + * size to a power-of-2 (reported) when exposing to a user space driver. + * + * Writes extending beyond the reported size are truncated; writes starting + * beyond the reported size generate -EINVAL. + */ +static ssize_t +nvgrace_gpu_write_mem(struct nvgrace_gpu_pci_core_device *nvdev, + size_t count, loff_t *ppos, const char __user *buf) +{ + unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); + u64 offset = *ppos & VFIO_PCI_OFFSET_MASK; + struct mem_region *memregion; + size_t mem_count; + int ret = 0; + + /* No need to do NULL check as caller does. */ + memregion = nvgrace_gpu_memregion(index, nvdev); + + if (offset >= memregion->bar_size) + return -EINVAL; + + /* Clip short the write request beyond reported BAR size */ + count = min(count, memregion->bar_size - (size_t)offset); + + /* + * Determine how many bytes to be actually written to the device memory. + * Do not write to the offset beyond available size. + */ + if (offset >= memregion->memlength) + goto exitfn; + + /* + * Only the device memory present on the hardware is mapped, which may + * not be power-of-2 aligned. Drop access outside the available device + * memory on the hardware. + */ + mem_count = min(count, memregion->memlength - (size_t)offset); + + ret = nvgrace_gpu_map_and_write(nvdev, buf, mem_count, ppos); + if (ret) + return ret; + +exitfn: + *ppos += count; + return count; +} + +static ssize_t +nvgrace_gpu_write(struct vfio_device *core_vdev, + const char __user *buf, size_t count, loff_t *ppos) +{ + struct nvgrace_gpu_pci_core_device *nvdev = + container_of(core_vdev, struct nvgrace_gpu_pci_core_device, + core_device.vdev); + unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); + + if (nvgrace_gpu_memregion(index, nvdev)) + return nvgrace_gpu_write_mem(nvdev, count, ppos, buf); + + if (index == VFIO_PCI_CONFIG_REGION_INDEX) + return nvgrace_gpu_write_config_emu(core_vdev, buf, count, ppos); + + return vfio_pci_core_write(core_vdev, buf, count, ppos); +} + +static const struct vfio_device_ops nvgrace_gpu_pci_ops = { + .name = "nvgrace-gpu-vfio-pci", + .init = vfio_pci_core_init_dev, + .release = vfio_pci_core_release_dev, + .open_device = nvgrace_gpu_open_device, + .close_device = nvgrace_gpu_close_device, + .ioctl = nvgrace_gpu_ioctl, + .device_feature = vfio_pci_core_ioctl_feature, + .read = nvgrace_gpu_read, + .write = nvgrace_gpu_write, + .mmap = nvgrace_gpu_mmap, + .request = vfio_pci_core_request, + .match = vfio_pci_core_match, + .bind_iommufd = vfio_iommufd_physical_bind, + .unbind_iommufd = vfio_iommufd_physical_unbind, + .attach_ioas = vfio_iommufd_physical_attach_ioas, + .detach_ioas = vfio_iommufd_physical_detach_ioas, +}; + +static const struct vfio_device_ops nvgrace_gpu_pci_core_ops = { + .name = "nvgrace-gpu-vfio-pci-core", + .init = vfio_pci_core_init_dev, + .release = vfio_pci_core_release_dev, + .open_device = nvgrace_gpu_open_device, + .close_device = vfio_pci_core_close_device, + .ioctl = vfio_pci_core_ioctl, + .device_feature = vfio_pci_core_ioctl_feature, + .read = vfio_pci_core_read, + .write = vfio_pci_core_write, + .mmap = vfio_pci_core_mmap, + .request = vfio_pci_core_request, + .match = vfio_pci_core_match, + .bind_iommufd = vfio_iommufd_physical_bind, + .unbind_iommufd = vfio_iommufd_physical_unbind, + .attach_ioas = vfio_iommufd_physical_attach_ioas, + .detach_ioas = vfio_iommufd_physical_detach_ioas, +}; + +static int +nvgrace_gpu_fetch_memory_property(struct pci_dev *pdev, + u64 *pmemphys, u64 *pmemlength) +{ + int ret; + + /* + * The memory information is present in the system ACPI tables as DSD + * properties nvidia,gpu-mem-base-pa and nvidia,gpu-mem-size. + */ + ret = device_property_read_u64(&pdev->dev, "nvidia,gpu-mem-base-pa", + pmemphys); + if (ret) + return ret; + + if (*pmemphys > type_max(phys_addr_t)) + return -EOVERFLOW; + + ret = device_property_read_u64(&pdev->dev, "nvidia,gpu-mem-size", + pmemlength); + if (ret) + return ret; + + if (*pmemlength > type_max(size_t)) + return -EOVERFLOW; + + /* + * If the C2C link is not up due to an error, the coherent device + * memory size is returned as 0. Fail in such case. + */ + if (*pmemlength == 0) + return -ENOMEM; + + return ret; +} + +static int +nvgrace_gpu_init_nvdev_struct(struct pci_dev *pdev, + struct nvgrace_gpu_pci_core_device *nvdev, + u64 memphys, u64 memlength) +{ + int ret = 0; + + /* + * The VM GPU device driver needs a non-cacheable region to support + * the MIG feature. Since the device memory is mapped as NORMAL cached, + * carve out a region from the end with a different NORMAL_NC + * property (called as reserved memory and represented as resmem). This + * region then is exposed as a 64b BAR (region 2 and 3) to the VM, while + * exposing the rest (termed as usable memory and represented using usemem) + * as cacheable 64b BAR (region 4 and 5). + * + * devmem (memlength) + * |-------------------------------------------------| + * | | + * usemem.memphys resmem.memphys + */ + nvdev->usemem.memphys = memphys; + + /* + * The device memory exposed to the VM is added to the kernel by the + * VM driver module in chunks of memory block size. Only the usable + * memory (usemem) is added to the kernel for usage by the VM + * workloads. Make the usable memory size memblock aligned. + */ + if (check_sub_overflow(memlength, RESMEM_SIZE, + &nvdev->usemem.memlength)) { + ret = -EOVERFLOW; + goto done; + } + + /* + * The USEMEM part of the device memory has to be MEMBLK_SIZE + * aligned. This is a hardwired ABI value between the GPU FW and + * VFIO driver. The VM device driver is also aware of it and make + * use of the value for its calculation to determine USEMEM size. + */ + nvdev->usemem.memlength = round_down(nvdev->usemem.memlength, + MEMBLK_SIZE); + if (nvdev->usemem.memlength == 0) { + ret = -EINVAL; + goto done; + } + + if ((check_add_overflow(nvdev->usemem.memphys, + nvdev->usemem.memlength, + &nvdev->resmem.memphys)) || + (check_sub_overflow(memlength, nvdev->usemem.memlength, + &nvdev->resmem.memlength))) { + ret = -EOVERFLOW; + goto done; + } + + /* + * The memory regions are exposed as BARs. Calculate and save + * the BAR size for them. + */ + nvdev->usemem.bar_size = roundup_pow_of_two(nvdev->usemem.memlength); + nvdev->resmem.bar_size = roundup_pow_of_two(nvdev->resmem.memlength); +done: + return ret; +} + +static int nvgrace_gpu_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + const struct vfio_device_ops *ops = &nvgrace_gpu_pci_core_ops; + struct nvgrace_gpu_pci_core_device *nvdev; + u64 memphys, memlength; + int ret; + + ret = nvgrace_gpu_fetch_memory_property(pdev, &memphys, &memlength); + if (!ret) + ops = &nvgrace_gpu_pci_ops; + + nvdev = vfio_alloc_device(nvgrace_gpu_pci_core_device, core_device.vdev, + &pdev->dev, ops); + if (IS_ERR(nvdev)) + return PTR_ERR(nvdev); + + dev_set_drvdata(&pdev->dev, &nvdev->core_device); + + if (ops == &nvgrace_gpu_pci_ops) { + /* + * Device memory properties are identified in the host ACPI + * table. Set the nvgrace_gpu_pci_core_device structure. + */ + ret = nvgrace_gpu_init_nvdev_struct(pdev, nvdev, + memphys, memlength); + if (ret) + goto out_put_vdev; + } + + ret = vfio_pci_core_register_device(&nvdev->core_device); + if (ret) + goto out_put_vdev; + + return ret; + +out_put_vdev: + vfio_put_device(&nvdev->core_device.vdev); + return ret; +} + +static void nvgrace_gpu_remove(struct pci_dev *pdev) +{ + struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev); + + vfio_pci_core_unregister_device(core_device); + vfio_put_device(&core_device->vdev); +} + +static const struct pci_device_id nvgrace_gpu_vfio_pci_table[] = { + /* GH200 120GB */ + { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_NVIDIA, 0x2342) }, + /* GH200 480GB */ + { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_NVIDIA, 0x2345) }, + {} +}; + +MODULE_DEVICE_TABLE(pci, nvgrace_gpu_vfio_pci_table); + +static struct pci_driver nvgrace_gpu_vfio_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = nvgrace_gpu_vfio_pci_table, + .probe = nvgrace_gpu_probe, + .remove = nvgrace_gpu_remove, + .err_handler = &vfio_pci_core_err_handlers, + .driver_managed_dma = true, +}; + +module_pci_driver(nvgrace_gpu_vfio_pci_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Ankit Agrawal "); +MODULE_AUTHOR("Aniket Agashe "); +MODULE_DESCRIPTION("VFIO NVGRACE GPU PF - User Level driver for NVIDIA devices with CPU coherently accessible device memory"); -- Gitee From 2372cb979909b5876ffdc361871dfb2f41df489f Mon Sep 17 00:00:00 2001 From: Ankit Agrawal Date: Thu, 29 Feb 2024 19:39:34 +0000 Subject: [PATCH 2108/2138] vfio/nvgrace-gpu: Convey kvm to map device memory region as noncached ANBZ: #13559 commit 81617c17bf58f008a57da74b97e60a0bf8e971fd upstream. The NVIDIA Grace Hopper GPUs have device memory that is supposed to be used as a regular RAM. It is accessible through CPU-GPU chip-to-chip cache coherent interconnect and is present in the system physical address space. The device memory is split into two regions - termed as usemem and resmem - in the system physical address space, with each region mapped and exposed to the VM as a separate fake device BAR [1]. Owing to a hardware defect for Multi-Instance GPU (MIG) feature [2], there is a requirement - as a workaround - for the resmem BAR to display uncached memory characteristics. Based on [3], on system with FWB enabled such as Grace Hopper, the requisite properties (uncached, unaligned access) can be achieved through a VM mapping (S1) of NORMAL_NC and host mapping (S2) of MT_S2_FWB_NORMAL_NC. KVM currently maps the MMIO region in S2 as MT_S2_FWB_DEVICE_nGnRE by default. The fake device BARs thus displays DEVICE_nGnRE behavior in the VM. The following table summarizes the behavior for the various S1 and S2 mapping combinations for systems with FWB enabled [3]. S1 | S2 | Result NORMAL_NC | NORMAL_NC | NORMAL_NC NORMAL_NC | DEVICE_nGnRE | DEVICE_nGnRE Recently a change was added that modifies this default behavior and make KVM map MMIO as MT_S2_FWB_NORMAL_NC when a VMA flag VM_ALLOW_ANY_UNCACHED is set [4]. Setting S2 as MT_S2_FWB_NORMAL_NC provides the desired behavior (uncached, unaligned access) for resmem. To use VM_ALLOW_ANY_UNCACHED flag, the platform must guarantee that no action taken on the MMIO mapping can trigger an uncontained failure. The Grace Hopper satisfies this requirement. So set the VM_ALLOW_ANY_UNCACHED flag in the VMA. Applied over next-20240227. base-commit: 22ba90670a51 Link: https://lore.kernel.org/all/20240220115055.23546-4-ankita@nvidia.com/ [1] Link: https://www.nvidia.com/en-in/technologies/multi-instance-gpu/ [2] Link: https://developer.arm.com/documentation/ddi0487/latest/ section D8.5.5 [3] Link: https://lore.kernel.org/all/20240224150546.368-1-ankita@nvidia.com/ [4] Cc: Alex Williamson Cc: Kevin Tian Cc: Jason Gunthorpe Cc: Vikram Sethi Cc: Zhi Wang Signed-off-by: Ankit Agrawal Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20240229193934.2417-1-ankita@nvidia.com Signed-off-by: Alex Williamson Signed-off-by: Qinyun Tan Reviewed-by: Guanghui Feng Link: https://gitee.com/anolis/cloud-kernel/pulls/4503 --- drivers/vfio/pci/nvgrace-gpu/main.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/vfio/pci/nvgrace-gpu/main.c b/drivers/vfio/pci/nvgrace-gpu/main.c index 25814006352d..a7fd018aa548 100644 --- a/drivers/vfio/pci/nvgrace-gpu/main.c +++ b/drivers/vfio/pci/nvgrace-gpu/main.c @@ -160,8 +160,17 @@ static int nvgrace_gpu_mmap(struct vfio_device *core_vdev, * The carved out region of the device memory needs the NORMAL_NC * property. Communicate as such to the hypervisor. */ - if (index == RESMEM_REGION_INDEX) + if (index == RESMEM_REGION_INDEX) { + /* + * The nvgrace-gpu module has no issues with uncontained + * failures on NORMAL_NC accesses. VM_ALLOW_ANY_UNCACHED is + * set to communicate to the KVM to S2 map as NORMAL_NC. + * This opens up guest usage of NORMAL_NC for this mapping. + */ + vm_flags_set(vma, VM_ALLOW_ANY_UNCACHED); + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + } /* * Perform a PFN map to the memory and back the device BAR by the -- Gitee From 4a85de17da0c62a4b3def5472586135c3788ebf3 Mon Sep 17 00:00:00 2001 From: Ankit Agrawal Date: Sun, 13 Oct 2024 07:52:16 +0000 Subject: [PATCH 2109/2138] vfio/nvgrace-gpu: Add a new GH200 SKU to the devid table ANBZ: #13559 commit 12cd88a9116acf79416a39adcd8bb1337ae7cee1 upstream. NVIDIA is planning to productize a new Grace Hopper superchip SKU with device ID 0x2348. Add the SKU devid to nvgrace_gpu_vfio_pci_table. Signed-off-by: Ankit Agrawal Link: https://lore.kernel.org/r/20241013075216.19229-1-ankita@nvidia.com Signed-off-by: Alex Williamson Signed-off-by: Qinyun Tan Reviewed-by: Guanghui Feng Link: https://gitee.com/anolis/cloud-kernel/pulls/4503 --- drivers/vfio/pci/nvgrace-gpu/main.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/vfio/pci/nvgrace-gpu/main.c b/drivers/vfio/pci/nvgrace-gpu/main.c index a7fd018aa548..a467085038f0 100644 --- a/drivers/vfio/pci/nvgrace-gpu/main.c +++ b/drivers/vfio/pci/nvgrace-gpu/main.c @@ -866,6 +866,8 @@ static const struct pci_device_id nvgrace_gpu_vfio_pci_table[] = { { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_NVIDIA, 0x2342) }, /* GH200 480GB */ { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_NVIDIA, 0x2345) }, + /* GH200 SKU */ + { PCI_DRIVER_OVERRIDE_DEVICE_VFIO(PCI_VENDOR_ID_NVIDIA, 0x2348) }, {} }; -- Gitee From f8b9c53b58cb96ab9f4ab87f14e04eb298ac5873 Mon Sep 17 00:00:00 2001 From: Kartik Date: Tue, 17 Oct 2023 10:53:15 +0530 Subject: [PATCH 2110/2138] mm/util: Introduce kmemdup_array() ANBZ: #13487 commit 7092e9b3bed1252c7d3f5812b9fb9d82375b73a6 upstream. Introduce kmemdup_array() API to duplicate `n` number of elements from a given array. This internally uses kmemdup to allocate and duplicate the `src` array. Signed-off-by: Kartik Acked-by: Kees Cook Signed-off-by: Thierry Reding Signed-off-by: Qinyun Tan Reviewed-by: Guanghui Feng Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/4493 --- include/linux/string.h | 1 + mm/util.c | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/include/linux/string.h b/include/linux/string.h index 5077776e995e..361294697f8c 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -219,6 +219,7 @@ extern char *kstrndup(const char *s, size_t len, gfp_t gfp); extern void *kmemdup(const void *src, size_t len, gfp_t gfp) __realloc_size(2); extern void *kvmemdup(const void *src, size_t len, gfp_t gfp) __realloc_size(2); extern char *kmemdup_nul(const char *s, size_t len, gfp_t gfp); +extern void *kmemdup_array(const void *src, size_t element_size, size_t count, gfp_t gfp); extern char **argv_split(gfp_t gfp, const char *str, int *argcp); extern void argv_free(char **argv); diff --git a/mm/util.c b/mm/util.c index 2f5c912cc0a2..7e3d8adde0b2 100644 --- a/mm/util.c +++ b/mm/util.c @@ -135,6 +135,23 @@ void *kmemdup(const void *src, size_t len, gfp_t gfp) } EXPORT_SYMBOL(kmemdup); +/** + * kmemdup_array - duplicate a given array. + * + * @src: array to duplicate. + * @element_size: size of each element of array. + * @count: number of elements to duplicate from array. + * @gfp: GFP mask to use. + * + * Return: duplicated array of @src or %NULL in case of error, + * result is physically contiguous. Use kfree() to free. + */ +void *kmemdup_array(const void *src, size_t element_size, size_t count, gfp_t gfp) +{ + return kmemdup(src, size_mul(element_size, count), gfp); +} +EXPORT_SYMBOL(kmemdup_array); + /** * kvmemdup - duplicate region of memory * -- Gitee From 1e21012757290f318f4a2a023cadd756a6cef4b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Mon, 25 Sep 2023 11:55:23 +0200 Subject: [PATCH 2111/2138] soc/tegra: cbb: tegra194-cbb: Convert to platform remove callback returning void MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #13487 commit d3f785d74b3d066b17da57d25290b41471b4ebeb upstream. The .remove() callback for a platform driver returns an int which makes many driver authors wrongly assume it's possible to do error handling by returning an error code. However the value returned is ignored (apart from emitting a warning) and this typically results in resource leaks. To improve here there is a quest to make the remove callback return void. In the first step of this quest all drivers are converted to .remove_new() which already returns void. Eventually after all drivers are converted, .remove_new() will be renamed to .remove(). Trivially convert this driver from always returning zero in the remove callback to the void returning variant. Signed-off-by: Uwe Kleine-König Signed-off-by: Thierry Reding Signed-off-by: Qinyun Tan Reviewed-by: Guanghui Feng Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/4493 --- drivers/soc/tegra/cbb/tegra194-cbb.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/soc/tegra/cbb/tegra194-cbb.c b/drivers/soc/tegra/cbb/tegra194-cbb.c index cf6886f362d3..9cbc562ae7d3 100644 --- a/drivers/soc/tegra/cbb/tegra194-cbb.c +++ b/drivers/soc/tegra/cbb/tegra194-cbb.c @@ -2293,7 +2293,7 @@ static int tegra194_cbb_probe(struct platform_device *pdev) return tegra_cbb_register(&cbb->base); } -static int tegra194_cbb_remove(struct platform_device *pdev) +static void tegra194_cbb_remove(struct platform_device *pdev) { struct tegra194_cbb *cbb = platform_get_drvdata(pdev); struct tegra_cbb *noc, *tmp; @@ -2311,8 +2311,6 @@ static int tegra194_cbb_remove(struct platform_device *pdev) } spin_unlock_irqrestore(&cbb_lock, flags); - - return 0; } static int __maybe_unused tegra194_cbb_resume_noirq(struct device *dev) @@ -2332,7 +2330,7 @@ static const struct dev_pm_ops tegra194_cbb_pm = { static struct platform_driver tegra194_cbb_driver = { .probe = tegra194_cbb_probe, - .remove = tegra194_cbb_remove, + .remove_new = tegra194_cbb_remove, .driver = { .name = "tegra194-cbb", .of_match_table = of_match_ptr(tegra194_cbb_match), -- Gitee From 6998fb8a2623ff51f48ea6a72fd27af86d3fd0e4 Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Thu, 12 Oct 2023 17:35:36 +0200 Subject: [PATCH 2112/2138] soc/tegra: pmc: Drop the ->opp_to_performance_state() callback ANBZ: #13487 commit cda263907a6f88c75fb97cf7adecffaafb6237ec upstream. Since commit 7c41cdcd3bbe ("OPP: Simplify the over-designed pstate <-> level dance"), there is no longer any need for genpd providers to assign the ->opp_to_performance_state(), hence let's drop it. Cc: Thierry Reding Cc: Jonathan Hunter Cc: linux-tegra@vger.kernel.org Signed-off-by: Ulf Hansson Signed-off-by: Thierry Reding Signed-off-by: Qinyun Tan Reviewed-by: Guanghui Feng Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/4493 --- drivers/soc/tegra/pmc.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index 162f52456f65..f432aa022ace 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -1393,13 +1393,6 @@ tegra_pmc_core_pd_set_performance_state(struct generic_pm_domain *genpd, return 0; } -static unsigned int -tegra_pmc_core_pd_opp_to_performance_state(struct generic_pm_domain *genpd, - struct dev_pm_opp *opp) -{ - return dev_pm_opp_get_level(opp); -} - static int tegra_pmc_core_pd_add(struct tegra_pmc *pmc, struct device_node *np) { struct generic_pm_domain *genpd; @@ -1412,7 +1405,6 @@ static int tegra_pmc_core_pd_add(struct tegra_pmc *pmc, struct device_node *np) genpd->name = "core"; genpd->set_performance_state = tegra_pmc_core_pd_set_performance_state; - genpd->opp_to_performance_state = tegra_pmc_core_pd_opp_to_performance_state; err = devm_pm_opp_set_regulators(pmc->dev, rname); if (err) -- Gitee From 5c11a4402bd240803bcd2b76f807c3488466e198 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 12 Nov 2023 08:04:14 +0100 Subject: [PATCH 2113/2138] soc/tegra: pmc: Remove some old and deprecated functions and constants ANBZ: #13487 commit 9863084dd9939e53eb67a689f13503e8025434ac upstream. These TEGRA_IO_RAIL_... functions and constants have been deprecated in commit 21b499105178 ("soc/tegra: pmc: Add I/O pad voltage support") in 2016-11. There seems to be no users since kernel 4.16. Remove them now. Signed-off-by: Christophe JAILLET Signed-off-by: Thierry Reding Signed-off-by: Qinyun Tan Reviewed-by: Guanghui Feng Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/4493 --- drivers/soc/tegra/pmc.c | 24 ------------------------ include/soc/tegra/pmc.h | 18 ------------------ 2 files changed, 42 deletions(-) diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index f432aa022ace..6dfcc7f50ece 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -1777,30 +1777,6 @@ static int tegra_io_pad_get_voltage(struct tegra_pmc *pmc, enum tegra_io_pad id) return TEGRA_IO_PAD_VOLTAGE_3V3; } -/** - * tegra_io_rail_power_on() - enable power to I/O rail - * @id: Tegra I/O pad ID for which to enable power - * - * See also: tegra_io_pad_power_enable() - */ -int tegra_io_rail_power_on(unsigned int id) -{ - return tegra_io_pad_power_enable(id); -} -EXPORT_SYMBOL(tegra_io_rail_power_on); - -/** - * tegra_io_rail_power_off() - disable power to I/O rail - * @id: Tegra I/O pad ID for which to disable power - * - * See also: tegra_io_pad_power_disable() - */ -int tegra_io_rail_power_off(unsigned int id) -{ - return tegra_io_pad_power_disable(id); -} -EXPORT_SYMBOL(tegra_io_rail_power_off); - #ifdef CONFIG_PM_SLEEP enum tegra_suspend_mode tegra_pmc_get_suspend_mode(void) { diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h index aadb845d281d..c545875d0ff1 100644 --- a/include/soc/tegra/pmc.h +++ b/include/soc/tegra/pmc.h @@ -148,10 +148,6 @@ enum tegra_io_pad { TEGRA_IO_PAD_AO_HV, }; -/* deprecated, use TEGRA_IO_PAD_{HDMI,LVDS} instead */ -#define TEGRA_IO_RAIL_HDMI TEGRA_IO_PAD_HDMI -#define TEGRA_IO_RAIL_LVDS TEGRA_IO_PAD_LVDS - #ifdef CONFIG_SOC_TEGRA_PMC int tegra_powergate_power_on(unsigned int id); int tegra_powergate_power_off(unsigned int id); @@ -164,10 +160,6 @@ int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk, int tegra_io_pad_power_enable(enum tegra_io_pad id); int tegra_io_pad_power_disable(enum tegra_io_pad id); -/* deprecated, use tegra_io_pad_power_{enable,disable}() instead */ -int tegra_io_rail_power_on(unsigned int id); -int tegra_io_rail_power_off(unsigned int id); - void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode); void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode); @@ -211,16 +203,6 @@ static inline int tegra_io_pad_get_voltage(enum tegra_io_pad id) return -ENOSYS; } -static inline int tegra_io_rail_power_on(unsigned int id) -{ - return -ENOSYS; -} - -static inline int tegra_io_rail_power_off(unsigned int id) -{ - return -ENOSYS; -} - static inline void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode) { } -- Gitee From 798c19cbf6083df4c3a0581f0457818036c844a5 Mon Sep 17 00:00:00 2001 From: Kartik Date: Tue, 17 Oct 2023 10:53:16 +0530 Subject: [PATCH 2114/2138] soc/tegra: fuse: Use dev_err_probe for probe failures ANBZ: #13487 commit 4569e604b5abc2eacc30dd6ac7e3e0fbaa87bc42 upstream. Currently, in tegra_fuse_probe() if clock/reset get fails, then the driver prints an error if the error is not caused by -EPROBE_DEFER. This can be improved by using dev_err_probe() instead. So, return dev_err_probe() if clock/reset get fails. Signed-off-by: Kartik Signed-off-by: Thierry Reding Signed-off-by: Qinyun Tan Reviewed-by: Guanghui Feng Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/4493 --- drivers/soc/tegra/fuse/fuse-tegra.c | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c index a2c28f493a75..98805885158e 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra.c +++ b/drivers/soc/tegra/fuse/fuse-tegra.c @@ -131,13 +131,8 @@ static int tegra_fuse_probe(struct platform_device *pdev) fuse->phys = res->start; fuse->clk = devm_clk_get(&pdev->dev, "fuse"); - if (IS_ERR(fuse->clk)) { - if (PTR_ERR(fuse->clk) != -EPROBE_DEFER) - dev_err(&pdev->dev, "failed to get FUSE clock: %ld", - PTR_ERR(fuse->clk)); - - return PTR_ERR(fuse->clk); - } + if (IS_ERR(fuse->clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(fuse->clk), "failed to get FUSE clock\n"); platform_set_drvdata(pdev, fuse); fuse->dev = &pdev->dev; @@ -179,12 +174,8 @@ static int tegra_fuse_probe(struct platform_device *pdev) } fuse->rst = devm_reset_control_get_optional(&pdev->dev, "fuse"); - if (IS_ERR(fuse->rst)) { - err = PTR_ERR(fuse->rst); - dev_err(&pdev->dev, "failed to get FUSE reset: %pe\n", - fuse->rst); - return err; - } + if (IS_ERR(fuse->rst)) + return dev_err_probe(&pdev->dev, PTR_ERR(fuse->rst), "failed to get FUSE reset\n"); /* * FUSE clock is enabled at a boot time, hence this resume/suspend -- Gitee From 79053e93817e62a94b41ba5b374ba684552d4443 Mon Sep 17 00:00:00 2001 From: Kartik Date: Tue, 17 Oct 2023 10:53:17 +0530 Subject: [PATCH 2115/2138] soc/tegra: fuse: Refactor resource mapping MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ANBZ: #13487 commit f0139d666685ef339fe5b588696db754e0ac8159 upstream. To prepare for adding ACPI support to the tegra-apbmisc driver, relocate the code responsible for mapping memory resources from the function ‘tegra_init_apbmisc’ to the function ‘tegra_init_apbmisc_resources.’ This adjustment will allow the code to be shared between ‘tegra_init_apbmisc’ and the upcoming ‘tegra_acpi_init_apbmisc’ function. Signed-off-by: Kartik Signed-off-by: Thierry Reding Signed-off-by: Qinyun Tan Reviewed-by: Guanghui Feng Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/4493 --- drivers/soc/tegra/fuse/tegra-apbmisc.c | 37 +++++++++++++++----------- 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c index da970f3dbf35..06c1b3a2c7ec 100644 --- a/drivers/soc/tegra/fuse/tegra-apbmisc.c +++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c @@ -160,9 +160,28 @@ void __init tegra_init_revision(void) tegra_sku_info.platform = tegra_get_platform(); } -void __init tegra_init_apbmisc(void) +static void tegra_init_apbmisc_resources(struct resource *apbmisc, + struct resource *straps) { void __iomem *strapping_base; + + apbmisc_base = ioremap(apbmisc->start, resource_size(apbmisc)); + if (apbmisc_base) + chipid = readl_relaxed(apbmisc_base + 4); + else + pr_err("failed to map APBMISC registers\n"); + + strapping_base = ioremap(straps->start, resource_size(straps)); + if (strapping_base) { + strapping = readl_relaxed(strapping_base); + iounmap(strapping_base); + } else { + pr_err("failed to map strapping options registers\n"); + } +} + +void __init tegra_init_apbmisc(void) +{ struct resource apbmisc, straps; struct device_node *np; @@ -219,21 +238,7 @@ void __init tegra_init_apbmisc(void) } } - apbmisc_base = ioremap(apbmisc.start, resource_size(&apbmisc)); - if (!apbmisc_base) { - pr_err("failed to map APBMISC registers\n"); - } else { - chipid = readl_relaxed(apbmisc_base + 4); - } - - strapping_base = ioremap(straps.start, resource_size(&straps)); - if (!strapping_base) { - pr_err("failed to map strapping options registers\n"); - } else { - strapping = readl_relaxed(strapping_base); - iounmap(strapping_base); - } - + tegra_init_apbmisc_resources(&apbmisc, &straps); long_ram_code = of_property_read_bool(np, "nvidia,long-ram-code"); put: -- Gitee From 11e9065d75dadf2155d5e814e223edbc576ac225 Mon Sep 17 00:00:00 2001 From: Kartik Date: Tue, 17 Oct 2023 10:53:18 +0530 Subject: [PATCH 2116/2138] soc/tegra: fuse: Add tegra_acpi_init_apbmisc() ANBZ: #13487 commit 7b0c505eb3414619feef0bd8acc455858f17c1c6 upstream. In preparation to ACPI support in Tegra fuse driver add function tegra_acpi_init_apbmisc() to initialize tegra-apbmisc driver. Also, document the reason of calling tegra_init_apbmisc() at early init. Note that function tegra_acpi_init_apbmisc() is not placed in the __init section, because it will be called during probe. Signed-off-by: Kartik Signed-off-by: Thierry Reding Signed-off-by: Qinyun Tan Reviewed-by: Guanghui Feng Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/4493 --- drivers/soc/tegra/fuse/fuse.h | 1 + drivers/soc/tegra/fuse/tegra-apbmisc.c | 72 ++++++++++++++++++++++++++ 2 files changed, 73 insertions(+) diff --git a/drivers/soc/tegra/fuse/fuse.h b/drivers/soc/tegra/fuse/fuse.h index 90f23be73894..a41e9f85281a 100644 --- a/drivers/soc/tegra/fuse/fuse.h +++ b/drivers/soc/tegra/fuse/fuse.h @@ -69,6 +69,7 @@ struct tegra_fuse { void tegra_init_revision(void); void tegra_init_apbmisc(void); +void tegra_acpi_init_apbmisc(void); u32 __init tegra_fuse_read_spare(unsigned int spare); u32 __init tegra_fuse_read_early(unsigned int offset); diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c index 06c1b3a2c7ec..6457f80821bb 100644 --- a/drivers/soc/tegra/fuse/tegra-apbmisc.c +++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c @@ -3,9 +3,11 @@ * Copyright (c) 2014-2023, NVIDIA CORPORATION. All rights reserved. */ +#include #include #include #include +#include #include #include @@ -180,6 +182,12 @@ static void tegra_init_apbmisc_resources(struct resource *apbmisc, } } +/** + * tegra_init_apbmisc - Initializes Tegra APBMISC and Strapping registers. + * + * This is called during early init as some of the old 32-bit ARM code needs + * information from the APBMISC registers very early during boot. + */ void __init tegra_init_apbmisc(void) { struct resource apbmisc, straps; @@ -244,3 +252,67 @@ void __init tegra_init_apbmisc(void) put: of_node_put(np); } + +#ifdef CONFIG_ACPI +static const struct acpi_device_id apbmisc_acpi_match[] = { + { "NVDA2010" }, + { /* sentinel */ } +}; + +void tegra_acpi_init_apbmisc(void) +{ + struct resource *resources[2] = { NULL }; + struct resource_entry *rentry; + struct acpi_device *adev = NULL; + struct list_head resource_list; + int rcount = 0; + int ret; + + adev = acpi_dev_get_first_match_dev(apbmisc_acpi_match[0].id, NULL, -1); + if (!adev) + return; + + INIT_LIST_HEAD(&resource_list); + + ret = acpi_dev_get_memory_resources(adev, &resource_list); + if (ret < 0) { + pr_err("failed to get APBMISC memory resources"); + goto out_put_acpi_dev; + } + + /* + * Get required memory resources. + * + * resources[0]: apbmisc. + * resources[1]: straps. + */ + resource_list_for_each_entry(rentry, &resource_list) { + if (rcount >= ARRAY_SIZE(resources)) + break; + + resources[rcount++] = rentry->res; + } + + if (!resources[0]) { + pr_err("failed to get APBMISC registers\n"); + goto out_free_resource_list; + } + + if (!resources[1]) { + pr_err("failed to get strapping options registers\n"); + goto out_free_resource_list; + } + + tegra_init_apbmisc_resources(resources[0], resources[1]); + +out_free_resource_list: + acpi_dev_free_resource_list(&resource_list); + +out_put_acpi_dev: + acpi_dev_put(adev); +} +#else +void tegra_acpi_init_apbmisc(void) +{ +} +#endif -- Gitee From f929b7a3bf7be38a89de2aa6d9700ae0a05a98e4 Mon Sep 17 00:00:00 2001 From: Kartik Date: Tue, 17 Oct 2023 10:53:19 +0530 Subject: [PATCH 2117/2138] soc/tegra: fuse: Add function to add lookups ANBZ: #13487 commit 71661c1c8c34d100be03b229dfc7cd7d2db7f62e upstream. Add helper function tegra_fuse_add_lookups() to register Tegra fuse nvmem lookups. So, this can be shared between tegra_fuse_init() and ACPI probe, which is to be introduced later. Use kmemdup_array to duplicate fuse->soc->lookups. Signed-off-by: Kartik Signed-off-by: Thierry Reding Signed-off-by: Qinyun Tan Reviewed-by: Guanghui Feng Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/4493 --- drivers/soc/tegra/fuse/fuse-tegra.c | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c index 98805885158e..4ebb5597a77b 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra.c +++ b/drivers/soc/tegra/fuse/fuse-tegra.c @@ -113,6 +113,18 @@ static void tegra_fuse_restore(void *base) fuse->clk = NULL; } +static int tegra_fuse_add_lookups(struct tegra_fuse *fuse) +{ + fuse->lookups = kmemdup_array(fuse->soc->lookups, sizeof(*fuse->lookups), + fuse->soc->num_lookups, GFP_KERNEL); + if (!fuse->lookups) + return -ENOMEM; + + nvmem_add_cell_lookups(fuse->lookups, fuse->soc->num_lookups); + + return 0; +} + static int tegra_fuse_probe(struct platform_device *pdev) { void __iomem *base = fuse->base; @@ -398,6 +410,7 @@ static int __init tegra_init_fuse(void) const struct of_device_id *match; struct device_node *np; struct resource regs; + int err; tegra_init_apbmisc(); @@ -495,15 +508,11 @@ static int __init tegra_init_fuse(void) pr_debug("Tegra CPU Speedo ID %d, SoC Speedo ID %d\n", tegra_sku_info.cpu_speedo_id, tegra_sku_info.soc_speedo_id); - if (fuse->soc->lookups) { - size_t size = sizeof(*fuse->lookups) * fuse->soc->num_lookups; - - fuse->lookups = kmemdup(fuse->soc->lookups, size, GFP_KERNEL); - if (fuse->lookups) - nvmem_add_cell_lookups(fuse->lookups, fuse->soc->num_lookups); - } + err = tegra_fuse_add_lookups(fuse); + if (err) + pr_err("failed to add FUSE lookups\n"); - return 0; + return err; } early_initcall(tegra_init_fuse); -- Gitee From 79130df559953273000983f18600abb0a4993449 Mon Sep 17 00:00:00 2001 From: Kartik Date: Tue, 17 Oct 2023 10:53:20 +0530 Subject: [PATCH 2118/2138] soc/tegra: fuse: Add function to print SKU info ANBZ: #13487 commit 13a69354147e0aaf39695bfb9062738916e924a0 upstream. Add helper function tegra_fuse_print_sku_info() to print Tegra SKU information. So, it can be shared between tegra_fuse_init() and ACPI probe which is to be introduced later. Signed-off-by: Kartik Signed-off-by: Thierry Reding Signed-off-by: Qinyun Tan Reviewed-by: Guanghui Feng Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/4493 --- drivers/soc/tegra/fuse/fuse-tegra.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c index 4ebb5597a77b..7a93c6512f7b 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra.c +++ b/drivers/soc/tegra/fuse/fuse-tegra.c @@ -113,6 +113,16 @@ static void tegra_fuse_restore(void *base) fuse->clk = NULL; } +static void tegra_fuse_print_sku_info(struct tegra_sku_info *tegra_sku_info) +{ + pr_info("Tegra Revision: %s SKU: %d CPU Process: %d SoC Process: %d\n", + tegra_revision_name[tegra_sku_info->revision], + tegra_sku_info->sku_id, tegra_sku_info->cpu_process_id, + tegra_sku_info->soc_process_id); + pr_debug("Tegra CPU Speedo ID %d, SoC Speedo ID %d\n", + tegra_sku_info->cpu_speedo_id, tegra_sku_info->soc_speedo_id); +} + static int tegra_fuse_add_lookups(struct tegra_fuse *fuse) { fuse->lookups = kmemdup_array(fuse->soc->lookups, sizeof(*fuse->lookups), @@ -501,12 +511,7 @@ static int __init tegra_init_fuse(void) fuse->soc->init(fuse); - pr_info("Tegra Revision: %s SKU: %d CPU Process: %d SoC Process: %d\n", - tegra_revision_name[tegra_sku_info.revision], - tegra_sku_info.sku_id, tegra_sku_info.cpu_process_id, - tegra_sku_info.soc_process_id); - pr_debug("Tegra CPU Speedo ID %d, SoC Speedo ID %d\n", - tegra_sku_info.cpu_speedo_id, tegra_sku_info.soc_speedo_id); + tegra_fuse_print_sku_info(&tegra_sku_info); err = tegra_fuse_add_lookups(fuse); if (err) -- Gitee From ec9e182f3585337d1065ac79089ef43dc356cf03 Mon Sep 17 00:00:00 2001 From: Kartik Date: Tue, 17 Oct 2023 10:53:21 +0530 Subject: [PATCH 2119/2138] soc/tegra: fuse: Add ACPI support for Tegra194 and Tegra234 ANBZ: #13487 commit 972167c690801ddf60e88da50493b4ffe103c7f2 upstream. Add ACPI support for Tegra194 & Tegra243 SoC's. This requires following modifications to the probe when ACPI boot is used: - Initialize soc data. - Add nvmem lookups. - Register soc device. - use devm_clk_get_optional() instead of devm_clk_get() to get fuse->clk, as fuse clocks are not required when using ACPI boot. Also, drop '__init' keyword for tegra_soc_device_register() as this is also used by tegra_fuse_probe() and use dev_err_probe() wherever applicable. Signed-off-by: Kartik Signed-off-by: Thierry Reding Signed-off-by: Qinyun Tan Reviewed-by: Guanghui Feng Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/4493 --- drivers/soc/tegra/fuse/fuse-tegra.c | 52 +++++++++++++++++++++++++++-- 1 file changed, 49 insertions(+), 3 deletions(-) diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c index 7a93c6512f7b..1c758f121f91 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra.c +++ b/drivers/soc/tegra/fuse/fuse-tegra.c @@ -3,11 +3,13 @@ * Copyright (c) 2013-2023, NVIDIA CORPORATION. All rights reserved. */ +#include #include #include #include #include #include +#include #include #include #include @@ -152,7 +154,38 @@ static int tegra_fuse_probe(struct platform_device *pdev) return PTR_ERR(fuse->base); fuse->phys = res->start; - fuse->clk = devm_clk_get(&pdev->dev, "fuse"); + /* Initialize the soc data and lookups if using ACPI boot. */ + if (is_acpi_node(dev_fwnode(&pdev->dev)) && !fuse->soc) { + u8 chip; + + tegra_acpi_init_apbmisc(); + + chip = tegra_get_chip_id(); + switch (chip) { +#if defined(CONFIG_ARCH_TEGRA_194_SOC) + case TEGRA194: + fuse->soc = &tegra194_fuse_soc; + break; +#endif +#if defined(CONFIG_ARCH_TEGRA_234_SOC) + case TEGRA234: + fuse->soc = &tegra234_fuse_soc; + break; +#endif + default: + return dev_err_probe(&pdev->dev, -EINVAL, "Unsupported SoC: %02x\n", chip); + } + + fuse->soc->init(fuse); + tegra_fuse_print_sku_info(&tegra_sku_info); + tegra_soc_device_register(); + + err = tegra_fuse_add_lookups(fuse); + if (err) + return dev_err_probe(&pdev->dev, err, "failed to add FUSE lookups\n"); + } + + fuse->clk = devm_clk_get_optional(&pdev->dev, "fuse"); if (IS_ERR(fuse->clk)) return dev_err_probe(&pdev->dev, PTR_ERR(fuse->clk), "failed to get FUSE clock\n"); @@ -275,10 +308,17 @@ static const struct dev_pm_ops tegra_fuse_pm = { SET_SYSTEM_SLEEP_PM_OPS(tegra_fuse_suspend, tegra_fuse_resume) }; +static const struct acpi_device_id tegra_fuse_acpi_match[] = { + { "NVDA200F" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(acpi, tegra_fuse_acpi_match); + static struct platform_driver tegra_fuse_driver = { .driver = { .name = "tegra-fuse", .of_match_table = tegra_fuse_match, + .acpi_match_table = tegra_fuse_acpi_match, .pm = &tegra_fuse_pm, .suppress_bind_attrs = true, }, @@ -300,7 +340,13 @@ u32 __init tegra_fuse_read_early(unsigned int offset) int tegra_fuse_readl(unsigned long offset, u32 *value) { - if (!fuse->read || !fuse->clk) + /* + * Wait for fuse->clk to be initialized if device-tree boot is used. + */ + if (is_of_node(dev_fwnode(fuse->dev)) && !fuse->clk) + return -EPROBE_DEFER; + + if (!fuse->read) return -EPROBE_DEFER; if (IS_ERR(fuse->clk)) @@ -383,7 +429,7 @@ const struct attribute_group tegra194_soc_attr_group = { }; #endif -struct device * __init tegra_soc_device_register(void) +struct device *tegra_soc_device_register(void) { struct soc_device_attribute *attr; struct soc_device *dev; -- Gitee From db6fdd3f922dd049a7a682dab41a6f146d0b320b Mon Sep 17 00:00:00 2001 From: Kartik Date: Tue, 17 Oct 2023 10:53:22 +0530 Subject: [PATCH 2120/2138] soc/tegra: fuse: Add support for Tegra241 ANBZ: #13487 commit 8402074f30238ee1bdc70b843932cd7350830ab6 upstream. Add support for Tegra241 which use ACPI boot. Signed-off-by: Kartik Signed-off-by: Thierry Reding Signed-off-by: Qinyun Tan Reviewed-by: Guanghui Feng Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/4493 --- drivers/soc/tegra/Kconfig | 5 +++++ drivers/soc/tegra/fuse/fuse-tegra.c | 5 +++++ drivers/soc/tegra/fuse/fuse-tegra30.c | 20 ++++++++++++++++++++ drivers/soc/tegra/fuse/fuse.h | 4 ++++ drivers/soc/tegra/fuse/tegra-apbmisc.c | 1 + include/soc/tegra/fuse.h | 1 + 6 files changed, 36 insertions(+) diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig index 6f3098822969..5f5d9d663fef 100644 --- a/drivers/soc/tegra/Kconfig +++ b/drivers/soc/tegra/Kconfig @@ -133,6 +133,11 @@ config ARCH_TEGRA_234_SOC help Enable support for the NVIDIA Tegra234 SoC. +config ARCH_TEGRA_241_SOC + bool "NVIDIA Tegra241 SoC" + help + Enable support for the NVIDIA Tegra241 SoC. + endif endif diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c index 1c758f121f91..233b8e7bb41b 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra.c +++ b/drivers/soc/tegra/fuse/fuse-tegra.c @@ -171,6 +171,11 @@ static int tegra_fuse_probe(struct platform_device *pdev) case TEGRA234: fuse->soc = &tegra234_fuse_soc; break; +#endif +#if defined(CONFIG_ARCH_TEGRA_241_SOC) + case TEGRA241: + fuse->soc = &tegra241_fuse_soc; + break; #endif default: return dev_err_probe(&pdev->dev, -EINVAL, "Unsupported SoC: %02x\n", chip); diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c index e94d46372a63..2070d36c510d 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra30.c +++ b/drivers/soc/tegra/fuse/fuse-tegra30.c @@ -678,3 +678,23 @@ const struct tegra_fuse_soc tegra234_fuse_soc = { .clk_suspend_on = false, }; #endif + +#if defined(CONFIG_ARCH_TEGRA_241_SOC) +static const struct tegra_fuse_info tegra241_fuse_info = { + .read = tegra30_fuse_read, + .size = 0x16008, + .spare = 0xcf0, +}; + +static const struct nvmem_keepout tegra241_fuse_keepouts[] = { + { .start = 0xc, .end = 0x1600c } +}; + +const struct tegra_fuse_soc tegra241_fuse_soc = { + .init = tegra30_fuse_init, + .info = &tegra241_fuse_info, + .keepouts = tegra241_fuse_keepouts, + .num_keepouts = ARRAY_SIZE(tegra241_fuse_keepouts), + .soc_attr_group = &tegra194_soc_attr_group, +}; +#endif diff --git a/drivers/soc/tegra/fuse/fuse.h b/drivers/soc/tegra/fuse/fuse.h index a41e9f85281a..f3b705327c20 100644 --- a/drivers/soc/tegra/fuse/fuse.h +++ b/drivers/soc/tegra/fuse/fuse.h @@ -136,4 +136,8 @@ extern const struct tegra_fuse_soc tegra194_fuse_soc; extern const struct tegra_fuse_soc tegra234_fuse_soc; #endif +#ifdef CONFIG_ARCH_TEGRA_241_SOC +extern const struct tegra_fuse_soc tegra241_fuse_soc; +#endif + #endif diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c index 6457f80821bb..e2ca5d55fd31 100644 --- a/drivers/soc/tegra/fuse/tegra-apbmisc.c +++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c @@ -64,6 +64,7 @@ bool tegra_is_silicon(void) switch (tegra_get_chip_id()) { case TEGRA194: case TEGRA234: + case TEGRA241: case TEGRA264: if (tegra_get_platform() == 0) return true; diff --git a/include/soc/tegra/fuse.h b/include/soc/tegra/fuse.h index 3a513be50243..8f421b9f7585 100644 --- a/include/soc/tegra/fuse.h +++ b/include/soc/tegra/fuse.h @@ -17,6 +17,7 @@ #define TEGRA186 0x18 #define TEGRA194 0x19 #define TEGRA234 0x23 +#define TEGRA241 0x24 #define TEGRA264 0x26 #define TEGRA_FUSE_SKU_CALIB_0 0xf0 -- Gitee From cdbe2ea5861097a6fc316d886d777f2b5d410ab8 Mon Sep 17 00:00:00 2001 From: Kartik Date: Wed, 20 Dec 2023 11:40:13 +0530 Subject: [PATCH 2121/2138] soc/tegra: fuse: Define tegra194_soc_attr_group for Tegra241 ANBZ: #13487 commit 7a849d0b757c8afa642e112a676767687e46d3a5 upstream. Tegra241 SoC data uses tegra194_soc_attr_group, which is only defined if config CONFIG_ARCH_TEGRA_194_SOC or CONFIG_ARCH_TEGRA_234_SOC or both are enabled. This causes a build failure if both of these configs are disabled and CONFIG_ARCH_TEGRA_241_SOC is enabled. Define tegra194_soc_attr_group if CONFIG_ARCH_TEGRA_241_SOC is enabled. Signed-off-by: Kartik Acked-by: Randy Dunlap Tested-by: Randy Dunlap # build-tested Signed-off-by: Thierry Reding Signed-off-by: Qinyun Tan Reviewed-by: Guanghui Feng Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/4493 --- drivers/soc/tegra/fuse/fuse-tegra.c | 3 ++- drivers/soc/tegra/fuse/fuse.h | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c index 233b8e7bb41b..c34efa5bf44c 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra.c +++ b/drivers/soc/tegra/fuse/fuse-tegra.c @@ -407,7 +407,8 @@ const struct attribute_group tegra_soc_attr_group = { }; #if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \ - IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC) + IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC) || \ + IS_ENABLED(CONFIG_ARCH_TEGRA_241_SOC) static ssize_t platform_show(struct device *dev, struct device_attribute *attr, char *buf) { diff --git a/drivers/soc/tegra/fuse/fuse.h b/drivers/soc/tegra/fuse/fuse.h index f3b705327c20..9fee6ad6ad9e 100644 --- a/drivers/soc/tegra/fuse/fuse.h +++ b/drivers/soc/tegra/fuse/fuse.h @@ -124,7 +124,8 @@ extern const struct tegra_fuse_soc tegra186_fuse_soc; #endif #if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \ - IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC) + IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC) || \ + IS_ENABLED(CONFIG_ARCH_TEGRA_241_SOC) extern const struct attribute_group tegra194_soc_attr_group; #endif -- Gitee From ccf76ac46f7e49bc9e5a7df91e1bc0b6efa26c82 Mon Sep 17 00:00:00 2001 From: Jon Hunter Date: Mon, 29 Jan 2024 13:46:59 +0000 Subject: [PATCH 2122/2138] soc/tegra: fuse: Fix crash in tegra_fuse_readl() ANBZ: #13487 commit 81b3f0efbbced8dbf4ef4a4c0008a7ada427b38d upstream. Commit c5b2d43e67bb ("soc/tegra: fuse: Add ACPI support for Tegra194 and Tegra234") updated the Tegra fuse driver to add ACPI support and added a test to the tegra_fuse_readl() function to check if the device is booting with device-tree. This test passes 'fuse->dev' variable to dev_fwnode() but does not check first is 'fuse->dev' is valid. This is causing a crash to occur in Tegra XUSB PHY driver that calls the tegra_fuse_readl() function before 'fuse->dev' variable has been initialised ... Unable to handle kernel NULL pointer dereference at virtual address 0000000000000290 Mem abort info: ESR = 0x0000000096000004 EC = 0x25: DABT (current EL), IL = 32 bits SET = 0, FnV = 0 EA = 0, S1PTW = 0 FSC = 0x04: level 0 translation fault Data abort info: ISV = 0, ISS = 0x00000004, ISS2 = 0x00000000 CM = 0, WnR = 0, TnD = 0, TagAccess = 0 GCS = 0, Overlay = 0, DirtyBit = 0, Xs = 0 [0000000000000290] user address but active_mm is swapper Internal error: Oops: 0000000096000004 [#1] PREEMPT SMP Modules linked in: CPU: 7 PID: 70 Comm: kworker/u16:4 Not tainted 6.8.0-rc1-next-20240129-02825-g596764183be8 #1 Hardware name: NVIDIA Jetson AGX Xavier Developer Kit (DT) Workqueue: events_unbound deferred_probe_work_func pstate: 60400009 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) pc : __dev_fwnode+0x0/0x18 lr : tegra_fuse_readl+0x24/0x98 sp : ffff80008393ba10 x29: ffff80008393ba10 x28: 0000000000000000 x27: ffff800081233c10 x26: 00000000000001c8 x25: ffff000080b7bc10 x24: ffff000082df3b00 x23: fffffffffffffff4 x22: 0000000000000004 x21: ffff80008393ba84 x20: 00000000000000f0 x19: ffff800082f1e000 x18: ffff800081d72000 x17: 0000000000000001 x16: 0000000000000001 x15: ffff800082fcdfff x14: 0000000000000000 x13: 0000000003541000 x12: 0000000000000020 x11: 0140000000000000 x10: ffff800080000000 x9 : 0000000000000000 x8 : ffff000082df3b40 x7 : 0000000000000000 x6 : 000000000000003f x5 : 00000000ffffffff x4 : 0000000000000dc0 x3 : 00000000000000c0 x2 : 0000000000000001 x1 : ffff80008393ba84 x0 : 0000000000000000 Call trace: __dev_fwnode+0x0/0x18 tegra186_xusb_padctl_probe+0xb0/0x1a8 tegra_xusb_padctl_probe+0x7c/0xebc platform_probe+0x90/0xd8 really_probe+0x13c/0x29c __driver_probe_device+0x7c/0x124 driver_probe_device+0x38/0x11c __device_attach_driver+0x90/0xdc bus_for_each_drv+0x78/0xdc __device_attach+0xfc/0x188 device_initial_probe+0x10/0x18 bus_probe_device+0xa4/0xa8 deferred_probe_work_func+0x80/0xb4 process_scheduled_works+0x178/0x3e0 worker_thread+0x164/0x2e8 kthread+0xfc/0x11c ret_from_fork+0x10/0x20 Code: a8c27bfd d65f03c0 128002a0 d65f03c0 (f9414801) ---[ end trace 0000000000000000 ]--- Fix this by verifying that 'fuse->dev' is valid before passing to dev_fwnode(). Fixes: c5b2d43e67bb ("soc/tegra: fuse: Add ACPI support for Tegra194 and Tegra234") Signed-off-by: Jon Hunter Reviewed-by: Kartik Signed-off-by: Thierry Reding Signed-off-by: Qinyun Tan Reviewed-by: Guanghui Feng Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/4493 --- drivers/soc/tegra/fuse/fuse-tegra.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c index c34efa5bf44c..b6bfd6729df3 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra.c +++ b/drivers/soc/tegra/fuse/fuse-tegra.c @@ -345,6 +345,9 @@ u32 __init tegra_fuse_read_early(unsigned int offset) int tegra_fuse_readl(unsigned long offset, u32 *value) { + if (!fuse->dev) + return -EPROBE_DEFER; + /* * Wait for fuse->clk to be initialized if device-tree boot is used. */ -- Gitee From 87bcae69e14d8addf7d16501299f9d92266cd84f Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 3 Jan 2024 11:26:49 +0100 Subject: [PATCH 2123/2138] soc/tegra: Fix build failure on Tegra241 ANBZ: #13487 commit d820100a1bdec5fd671310de902dc8baea317a3a upstream. If all the other SoCs are disabled, the driver fails to build: drivers/soc/tegra/fuse/fuse-tegra30.c:684:17: error: 'tegra30_fuse_read' undeclared here (not in a function); did you mean 'tegra_fuse_readl'? 684 | .read = tegra30_fuse_read, | ^~~~~~~~~~~~~~~~~ | tegra_fuse_readl drivers/soc/tegra/fuse/fuse-tegra30.c:694:17: error: 'tegra30_fuse_init' undeclared here (not in a function); did you mean 'tegra_fuse_info'? 694 | .init = tegra30_fuse_init, | ^~~~~~~~~~~~~~~~~ Fix the list of SoCs using this function to include the newly added one. Fixes: dee509eb9cd5 ("soc/tegra: fuse: Add support for Tegra241") Signed-off-by: Arnd Bergmann Reviewed-by: Jon Hunter Reviewed-by: Kartik Signed-off-by: Thierry Reding Signed-off-by: Qinyun Tan Reviewed-by: Guanghui Feng Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/4493 --- drivers/soc/tegra/fuse/fuse-tegra30.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c index 2070d36c510d..eb14e5ff5a0a 100644 --- a/drivers/soc/tegra/fuse/fuse-tegra30.c +++ b/drivers/soc/tegra/fuse/fuse-tegra30.c @@ -38,7 +38,8 @@ defined(CONFIG_ARCH_TEGRA_210_SOC) || \ defined(CONFIG_ARCH_TEGRA_186_SOC) || \ defined(CONFIG_ARCH_TEGRA_194_SOC) || \ - defined(CONFIG_ARCH_TEGRA_234_SOC) + defined(CONFIG_ARCH_TEGRA_234_SOC) || \ + defined(CONFIG_ARCH_TEGRA_241_SOC) static u32 tegra30_fuse_read_early(struct tegra_fuse *fuse, unsigned int offset) { if (WARN_ON(!fuse->base)) -- Gitee From 62d9a9033b2fb78217561bd277aaaa0fd432c03d Mon Sep 17 00:00:00 2001 From: Petlozu Pravareshwar Date: Sun, 11 Feb 2024 17:17:25 +0000 Subject: [PATCH 2124/2138] soc/tegra: pmc: Update address mapping sequence for PMC apertures ANBZ: #13487 commit 6f4429e21a7fef60df80c567eed0af189e2c02c7 upstream. On Tegra SoCs prior to Tegra186, PMC has single address range only. Starting from and after Tegra186, PMC has additional address ranges apart from base address range. Currently in PMC driver, we try to map these additional address ranges on all SoCs and if we fail then we assume that the range is not valid for an SoC. This change makes it more explicit on which address ranges are expected to be present on which SoCs and maps the additional address ranges only on SoCs from and after Tegra186. Signed-off-by: Petlozu Pravareshwar Signed-off-by: Thierry Reding Signed-off-by: Qinyun Tan Reviewed-by: Guanghui Feng Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/4493 --- drivers/soc/tegra/pmc.c | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index 6dfcc7f50ece..0bc983f6b088 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -384,6 +384,7 @@ struct tegra_pmc_soc { bool has_blink_output; bool has_usb_sleepwalk; bool supports_core_domain; + bool has_single_mmio_aperture; }; /** @@ -2885,31 +2886,28 @@ static int tegra_pmc_probe(struct platform_device *pdev) if (IS_ERR(base)) return PTR_ERR(base); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "wake"); - if (res) { + if (pmc->soc->has_single_mmio_aperture) { + pmc->wake = base; + pmc->aotag = base; + pmc->scratch = base; + } else { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "wake"); pmc->wake = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(pmc->wake)) return PTR_ERR(pmc->wake); - } else { - pmc->wake = base; - } - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aotag"); - if (res) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "aotag"); pmc->aotag = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(pmc->aotag)) return PTR_ERR(pmc->aotag); - } else { - pmc->aotag = base; - } - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "scratch"); - if (res) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "scratch"); pmc->scratch = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(pmc->scratch)) return PTR_ERR(pmc->scratch); - } else { - pmc->scratch = base; } pmc->clk = devm_clk_get_optional(&pdev->dev, "pclk"); @@ -3300,6 +3298,7 @@ static const struct tegra_pmc_soc tegra20_pmc_soc = { .num_pmc_clks = 0, .has_blink_output = true, .has_usb_sleepwalk = true, + .has_single_mmio_aperture = true, }; static const char * const tegra30_powergates[] = { @@ -3361,6 +3360,7 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = { .num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data), .has_blink_output = true, .has_usb_sleepwalk = true, + .has_single_mmio_aperture = true, }; static const char * const tegra114_powergates[] = { @@ -3418,6 +3418,7 @@ static const struct tegra_pmc_soc tegra114_pmc_soc = { .num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data), .has_blink_output = true, .has_usb_sleepwalk = true, + .has_single_mmio_aperture = true, }; static const char * const tegra124_powergates[] = { @@ -3562,6 +3563,7 @@ static const struct tegra_pmc_soc tegra124_pmc_soc = { .num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data), .has_blink_output = true, .has_usb_sleepwalk = true, + .has_single_mmio_aperture = true, }; static const char * const tegra210_powergates[] = { @@ -3725,6 +3727,7 @@ static const struct tegra_pmc_soc tegra210_pmc_soc = { .num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data), .has_blink_output = true, .has_usb_sleepwalk = true, + .has_single_mmio_aperture = true, }; static const struct tegra_io_pad_soc tegra186_io_pads[] = { @@ -3922,6 +3925,7 @@ static const struct tegra_pmc_soc tegra186_pmc_soc = { .num_pmc_clks = 0, .has_blink_output = false, .has_usb_sleepwalk = false, + .has_single_mmio_aperture = false, }; static const struct tegra_io_pad_soc tegra194_io_pads[] = { @@ -4107,6 +4111,7 @@ static const struct tegra_pmc_soc tegra194_pmc_soc = { .num_pmc_clks = 0, .has_blink_output = false, .has_usb_sleepwalk = false, + .has_single_mmio_aperture = false, }; static const struct tegra_io_pad_soc tegra234_io_pads[] = { @@ -4235,6 +4240,7 @@ static const struct tegra_pmc_soc tegra234_pmc_soc = { .pmc_clks_data = NULL, .num_pmc_clks = 0, .has_blink_output = false, + .has_single_mmio_aperture = false, }; static const struct of_device_id tegra_pmc_match[] = { -- Gitee From 0e79aa4ba31ad6a1a5e43c6cc187d9fb85b41719 Mon Sep 17 00:00:00 2001 From: Petlozu Pravareshwar Date: Sun, 11 Feb 2024 17:17:27 +0000 Subject: [PATCH 2125/2138] soc/tegra: pmc: Update scratch as an optional aperture ANBZ: #13487 commit ccd8e76fdb8d4219097b09660cfc41385e055906 upstream. Scratch address space register is used to store reboot reason. For some Tegra234 systems, the scratch space is not available to store the reboot reason. This is because scratch region on these systems is not accessible by the kernel as restricted by the Hypervisor. Such systems would delist scratch aperture from PMC DT node. Hence this change makes scratch as optional aperture and also avoids registering reboot notifier if scratch address space isn't mapped. Signed-off-by: Petlozu Pravareshwar Signed-off-by: Thierry Reding Signed-off-by: Qinyun Tan Reviewed-by: Guanghui Feng Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/4493 --- drivers/soc/tegra/pmc.c | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index 0bc983f6b088..6948f78c7a4a 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -2903,11 +2903,16 @@ static int tegra_pmc_probe(struct platform_device *pdev) if (IS_ERR(pmc->aotag)) return PTR_ERR(pmc->aotag); + /* "scratch" is an optional aperture */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "scratch"); - pmc->scratch = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(pmc->scratch)) - return PTR_ERR(pmc->scratch); + if (res) { + pmc->scratch = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pmc->scratch)) + return PTR_ERR(pmc->scratch); + } else { + pmc->scratch = NULL; + } } pmc->clk = devm_clk_get_optional(&pdev->dev, "pclk"); @@ -2919,12 +2924,15 @@ static int tegra_pmc_probe(struct platform_device *pdev) * PMC should be last resort for restarting since it soft-resets * CPU without resetting everything else. */ - err = devm_register_reboot_notifier(&pdev->dev, - &tegra_pmc_reboot_notifier); - if (err) { - dev_err(&pdev->dev, "unable to register reboot notifier, %d\n", - err); - return err; + if (pmc->scratch) { + err = devm_register_reboot_notifier(&pdev->dev, + &tegra_pmc_reboot_notifier); + if (err) { + dev_err(&pdev->dev, + "unable to register reboot notifier, %d\n", + err); + return err; + } } err = devm_register_sys_off_handler(&pdev->dev, -- Gitee From b1613c567ee758f50eabb83916e356c9428966cc Mon Sep 17 00:00:00 2001 From: Prathamesh Shete Date: Fri, 16 Feb 2024 08:04:50 +0000 Subject: [PATCH 2126/2138] soc/tegra: pmc: Add SD wake event for Tegra234 ANBZ: #13487 commit ae7d2d9b8ebe9f107c500808d5bcd68397645720 upstream. Add SD wake event for Tegra234 so that system can be woken up from suspend when SD card hot-plug/unplug event is detected. Signed-off-by: Prathamesh Shete Signed-off-by: Petlozu Pravareshwar Signed-off-by: Thierry Reding Signed-off-by: Qinyun Tan Reviewed-by: Guanghui Feng Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/4493 --- drivers/soc/tegra/pmc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index 6948f78c7a4a..d6bfcea5ee65 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -3,7 +3,7 @@ * drivers/soc/tegra/pmc.c * * Copyright (c) 2010 Google, Inc - * Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2024, NVIDIA CORPORATION. All rights reserved. * * Author: * Colin Cross @@ -4209,6 +4209,7 @@ static const char * const tegra234_reset_sources[] = { }; static const struct tegra_wake_event tegra234_wake_events[] = { + TEGRA_WAKE_GPIO("sd-wake", 8, 0, TEGRA234_MAIN_GPIO(G, 7)), TEGRA_WAKE_IRQ("pmu", 24, 209), TEGRA_WAKE_GPIO("power", 29, 1, TEGRA234_AON_GPIO(EE, 4)), TEGRA_WAKE_GPIO("mgbe", 56, 0, TEGRA234_MAIN_GPIO(Y, 3)), -- Gitee From 0e1b845e252cb14dbe3caab726efa37c161718e5 Mon Sep 17 00:00:00 2001 From: Jon Hunter Date: Wed, 3 Apr 2024 12:42:08 +0100 Subject: [PATCH 2127/2138] soc/tegra: pmc: Add EQOS wake event for Tegra194 and Tegra234 ANBZ: #13487 commit de024f63cea3ec833bc8a55be2753879e2750db9 upstream. Add the wake event for the EQOS ethernet controller on Tegra194 and Tegra234 devices, so that system can be woken up by an event from this ethernet controller. Signed-off-by: Jon Hunter Signed-off-by: Thierry Reding Signed-off-by: Qinyun Tan Reviewed-by: Guanghui Feng Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/4493 --- drivers/soc/tegra/pmc.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index d6bfcea5ee65..91d0ad6ddefc 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -4074,6 +4074,7 @@ static const char * const tegra194_reset_sources[] = { }; static const struct tegra_wake_event tegra194_wake_events[] = { + TEGRA_WAKE_GPIO("eqos", 20, 0, TEGRA194_MAIN_GPIO(G, 4)), TEGRA_WAKE_IRQ("pmu", 24, 209), TEGRA_WAKE_GPIO("power", 29, 1, TEGRA194_AON_GPIO(EE, 4)), TEGRA_WAKE_IRQ("rtc", 73, 10), @@ -4210,6 +4211,7 @@ static const char * const tegra234_reset_sources[] = { static const struct tegra_wake_event tegra234_wake_events[] = { TEGRA_WAKE_GPIO("sd-wake", 8, 0, TEGRA234_MAIN_GPIO(G, 7)), + TEGRA_WAKE_GPIO("eqos", 20, 0, TEGRA234_MAIN_GPIO(G, 4)), TEGRA_WAKE_IRQ("pmu", 24, 209), TEGRA_WAKE_GPIO("power", 29, 1, TEGRA234_AON_GPIO(EE, 4)), TEGRA_WAKE_GPIO("mgbe", 56, 0, TEGRA234_MAIN_GPIO(Y, 3)), -- Gitee From 3d4a0c93792f1abec3f0e909b748b84f25ad9775 Mon Sep 17 00:00:00 2001 From: Jon Hunter Date: Tue, 11 Jun 2024 08:56:03 +0100 Subject: [PATCH 2128/2138] soc/tegra: pmc: Simplify resource lookup ANBZ: #13487 commit cfcd6c46fec46cac3bf6658838d3ea329aff37aa upstream. Commit 6f4429e21a7f ("soc/tegra: pmc: Update address mapping sequence for PMC apertures") updated the resource lookup code in the PMC driver. Instead of calling platform_get_resource_byname() and devm_ioremap_resource() simplify the code by simply calling devm_platform_ioremap_resource_byname(). Signed-off-by: Jon Hunter Signed-off-by: Thierry Reding Signed-off-by: Qinyun Tan Reviewed-by: Guanghui Feng Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/4493 --- drivers/soc/tegra/pmc.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index 91d0ad6ddefc..6c37d6eb8b49 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -2891,15 +2891,11 @@ static int tegra_pmc_probe(struct platform_device *pdev) pmc->aotag = base; pmc->scratch = base; } else { - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "wake"); - pmc->wake = devm_ioremap_resource(&pdev->dev, res); + pmc->wake = devm_platform_ioremap_resource_byname(pdev, "wake"); if (IS_ERR(pmc->wake)) return PTR_ERR(pmc->wake); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "aotag"); - pmc->aotag = devm_ioremap_resource(&pdev->dev, res); + pmc->aotag = devm_platform_ioremap_resource_byname(pdev, "aotag"); if (IS_ERR(pmc->aotag)) return PTR_ERR(pmc->aotag); -- Gitee From f775fe8c8990135cf14e2e1249e1b61d74b80a38 Mon Sep 17 00:00:00 2001 From: Jinjie Ruan Date: Tue, 27 Aug 2024 19:46:04 +0800 Subject: [PATCH 2129/2138] soc/tegra: pmc: Simplify with scoped for each OF child loop ANBZ: #13487 commit 4d57a840560c3ff04fed07a06b3aec7cbac4bff0 upstream. Use scoped for_each_child_of_node_scoped() when iterating over device nodes to make code a bit simpler. Signed-off-by: Jinjie Ruan Signed-off-by: Thierry Reding Signed-off-by: Qinyun Tan Reviewed-by: Guanghui Feng Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/4493 --- drivers/soc/tegra/pmc.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index 6c37d6eb8b49..a08c377933c5 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -1438,7 +1438,7 @@ static int tegra_powergate_init(struct tegra_pmc *pmc, struct device_node *parent) { struct of_phandle_args child_args, parent_args; - struct device_node *np, *child; + struct device_node *np; int err = 0; /* @@ -1457,12 +1457,10 @@ static int tegra_powergate_init(struct tegra_pmc *pmc, if (!np) return 0; - for_each_child_of_node(np, child) { + for_each_child_of_node_scoped(np, child) { err = tegra_powergate_add(pmc, child); - if (err < 0) { - of_node_put(child); + if (err < 0) break; - } if (of_parse_phandle_with_args(child, "power-domains", "#power-domain-cells", @@ -1474,10 +1472,8 @@ static int tegra_powergate_init(struct tegra_pmc *pmc, err = of_genpd_add_subdomain(&parent_args, &child_args); of_node_put(parent_args.np); - if (err) { - of_node_put(child); + if (err) break; - } } of_node_put(np); -- Gitee From d35aee0199d04f6744e09810e47bc81f7ad6b700 Mon Sep 17 00:00:00 2001 From: Qinyun Tan Date: Mon, 13 Jan 2025 19:08:41 +0800 Subject: [PATCH 2130/2138] anolis: configs: refresh kconfigs ANBZ: #13487 Refresh Kconfigs and put relative unknown level kconfigs to L1-RECOMMEND or L2-OPTIONAL level. No Functional Changes! Refresh kconfigs by follow command: > make -C anolis/ dist-configs-update Signed-off-by: Qinyun Tan Reviewed-by: Guanghui Feng Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/4493 --- anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_SME | 1 - .../configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_BACKED_BY_FILE | 1 + anolis/configs/L1-RECOMMEND/default/CONFIG_PROC_MEM_ALWAYS_FORCE | 1 + anolis/configs/L1-RECOMMEND/default/CONFIG_Z3FOLD | 1 - .../L1-RECOMMEND/default/CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD | 1 - anolis/configs/L2-OPTIONAL/default/CONFIG_EROFS_FS_ZIP_ZSTD | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MCP2200 | 1 + .../configs/L2-OPTIONAL/{x86 => default}/CONFIG_MICROSOFT_MANA | 0 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MOTORCOMM | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_PROC_MEM_FORCE_PTRACE | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_PROC_MEM_NO_FORCE | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_YT6801 | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_Z3FOLD_DEPRECATED | 1 + .../default/CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_ADDRESS_MASKING | 1 - .../configs/L2-OPTIONAL/x86/CONFIG_USING_FPU_IN_KERNEL_NONATOMIC | 1 + 16 files changed, 11 insertions(+), 4 deletions(-) delete mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_SME create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_BACKED_BY_FILE create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_PROC_MEM_ALWAYS_FORCE delete mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_Z3FOLD delete mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_EROFS_FS_ZIP_ZSTD create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MCP2200 rename anolis/configs/L2-OPTIONAL/{x86 => default}/CONFIG_MICROSOFT_MANA (100%) create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MOTORCOMM create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PROC_MEM_FORCE_PTRACE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_PROC_MEM_NO_FORCE create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_YT6801 create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_Z3FOLD_DEPRECATED create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED delete mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ADDRESS_MASKING create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_USING_FPU_IN_KERNEL_NONATOMIC diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_SME b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_SME deleted file mode 100644 index 701e39d85196..000000000000 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARM64_SME +++ /dev/null @@ -1 +0,0 @@ -CONFIG_ARM64_SME=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_BACKED_BY_FILE b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_BACKED_BY_FILE new file mode 100644 index 000000000000..88e0babd0932 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_EROFS_FS_BACKED_BY_FILE @@ -0,0 +1 @@ +CONFIG_EROFS_FS_BACKED_BY_FILE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_PROC_MEM_ALWAYS_FORCE b/anolis/configs/L1-RECOMMEND/default/CONFIG_PROC_MEM_ALWAYS_FORCE new file mode 100644 index 000000000000..44da3b23f1d1 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_PROC_MEM_ALWAYS_FORCE @@ -0,0 +1 @@ +CONFIG_PROC_MEM_ALWAYS_FORCE=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_Z3FOLD b/anolis/configs/L1-RECOMMEND/default/CONFIG_Z3FOLD deleted file mode 100644 index 6a3fff219f3a..000000000000 --- a/anolis/configs/L1-RECOMMEND/default/CONFIG_Z3FOLD +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_Z3FOLD is not set diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD b/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD deleted file mode 100644 index c963eaebfbd7..000000000000 --- a/anolis/configs/L1-RECOMMEND/default/CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_EROFS_FS_ZIP_ZSTD b/anolis/configs/L2-OPTIONAL/default/CONFIG_EROFS_FS_ZIP_ZSTD new file mode 100644 index 000000000000..fdfca6dcc262 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_EROFS_FS_ZIP_ZSTD @@ -0,0 +1 @@ +# CONFIG_EROFS_FS_ZIP_ZSTD is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MCP2200 b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MCP2200 new file mode 100644 index 000000000000..cc0d5e686b07 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_HID_MCP2200 @@ -0,0 +1 @@ +# CONFIG_HID_MCP2200 is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_MICROSOFT_MANA b/anolis/configs/L2-OPTIONAL/default/CONFIG_MICROSOFT_MANA similarity index 100% rename from anolis/configs/L2-OPTIONAL/x86/CONFIG_MICROSOFT_MANA rename to anolis/configs/L2-OPTIONAL/default/CONFIG_MICROSOFT_MANA diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MOTORCOMM b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MOTORCOMM new file mode 100644 index 000000000000..3256085a6394 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_NET_VENDOR_MOTORCOMM @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_MOTORCOMM=y diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PROC_MEM_FORCE_PTRACE b/anolis/configs/L2-OPTIONAL/default/CONFIG_PROC_MEM_FORCE_PTRACE new file mode 100644 index 000000000000..bd7b76112fa4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PROC_MEM_FORCE_PTRACE @@ -0,0 +1 @@ +# CONFIG_PROC_MEM_FORCE_PTRACE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_PROC_MEM_NO_FORCE b/anolis/configs/L2-OPTIONAL/default/CONFIG_PROC_MEM_NO_FORCE new file mode 100644 index 000000000000..bbdc2449cd55 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_PROC_MEM_NO_FORCE @@ -0,0 +1 @@ +# CONFIG_PROC_MEM_NO_FORCE is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_YT6801 b/anolis/configs/L2-OPTIONAL/default/CONFIG_YT6801 new file mode 100644 index 000000000000..ded35b1a9428 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_YT6801 @@ -0,0 +1 @@ +# CONFIG_YT6801 is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_Z3FOLD_DEPRECATED b/anolis/configs/L2-OPTIONAL/default/CONFIG_Z3FOLD_DEPRECATED new file mode 100644 index 000000000000..e759a23add4a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_Z3FOLD_DEPRECATED @@ -0,0 +1 @@ +# CONFIG_Z3FOLD_DEPRECATED is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED b/anolis/configs/L2-OPTIONAL/default/CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED new file mode 100644 index 000000000000..a9c94a578e9c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED @@ -0,0 +1 @@ +# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADDRESS_MASKING b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADDRESS_MASKING deleted file mode 100644 index c918428df741..000000000000 --- a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ADDRESS_MASKING +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_ADDRESS_MASKING is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_USING_FPU_IN_KERNEL_NONATOMIC b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USING_FPU_IN_KERNEL_NONATOMIC new file mode 100644 index 000000000000..4b9bc743b271 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_USING_FPU_IN_KERNEL_NONATOMIC @@ -0,0 +1 @@ +# CONFIG_USING_FPU_IN_KERNEL_NONATOMIC is not set -- Gitee From 53e8890c23248dde6e926e98e4645409bfb76727 Mon Sep 17 00:00:00 2001 From: Qinyun Tan Date: Mon, 13 Jan 2025 20:10:49 +0800 Subject: [PATCH 2131/2138] anolis: Kconfig: open some arm64 soc Kconfigs by default. ANBZ: #13487 Adjust some arm64 soc Kconfigs to accommodate certain potential scenarios: CONFIG_DEVICE_PRIVATE=y CONFIG_SPI_TEGRA210_QUAD=y CONFIG_ARCH_TEGRA=y CONFIG_TCG_TIS_SPI=y CONFIG_MTD_SPI_NOR=y CONFIG_SENSORS_ACPI_POWER=m CONFIG_ARCH_TEGRA_241_SOC=y CONFIG_GPIO_TEGRA186=y CONFIG_DMABUF_HEAPS=y CONFIG_DMABUF_HEAPS_SYSTEM=y CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y Signed-off-by: Qinyun Tan Reviewed-by: Guanghui Feng Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/4493 --- anolis/configs/L1-RECOMMEND/arm64/CONFIG_AHCI_TEGRA | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARCH_TEGRA | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARCH_TEGRA_241_SOC | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_FREQ_GOV_SCHEDUTIL | 1 - anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEVICE_PRIVATE | 1 - anolis/configs/L1-RECOMMEND/arm64/CONFIG_DMABUF_HEAPS | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_DMABUF_HEAPS_SYSTEM | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_GPIO_TEGRA186 | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_MTD | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_MTD_SPI_NOR | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_TEGRA210_QUAD | 1 + anolis/configs/L1-RECOMMEND/arm64/CONFIG_TCG_TIS_SPI | 1 + .../L1-RECOMMEND/{x86 => default}/CONFIG_CPU_FREQ_GOV_SCHEDUTIL | 0 .../configs/L1-RECOMMEND/{x86 => default}/CONFIG_DEVICE_PRIVATE | 0 anolis/configs/L1-RECOMMEND/{x86 => default}/CONFIG_UCLAMP_TASK | 0 .../default => L1-RECOMMEND/x86}/CONFIG_DMABUF_HEAPS | 0 .../configs/{L2-OPTIONAL/default => L1-RECOMMEND/x86}/CONFIG_MTD | 0 .../{L2-OPTIONAL/default => L1-RECOMMEND/x86}/CONFIG_MTD_SPI_NOR | 0 .../configs/{L2-OPTIONAL => L1-RECOMMEND}/x86/CONFIG_TCG_TIS_SPI | 0 .../configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_RESET_CONTROLLER | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA | 1 - anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_132_SOC | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_186_SOC | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_194_SOC | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_210_SOC | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_234_SOC | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_GIC_PM | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_TEGRA186_CPUFREQ | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLK_TEGRA_BPMP | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMABUF_HEAPS_CMA | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_NOUVEAU_SVM | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TEGRA | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_TEGRA | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_HSA_AMD_SVM | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_TEGRA | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_TEGRA_BPMP | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_TEGRA | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_NVEC | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_TEGRA | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_SPI_NOR_SWP_DISABLE | 1 + .../L2-OPTIONAL/arm64/CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_SPI_NOR_SWP_KEEP | 1 + .../configs/L2-OPTIONAL/arm64/CONFIG_MTD_SPI_NOR_USE_4K_SECTORS | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_NOUVEAU_PLATFORM_DRIVER | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_TEGRA | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_TEGRA_XUSB | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_TEGRA_XUSB | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWM_TEGRA | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESET_TEGRA_BPMP | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_TEGRA | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_8250_TEGRA | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_TEGRA_TCU | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_TEGRA_FUSE | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_TEGRA_PMC | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_TEGRA_POWERGATE_BPMP | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_CADENCE_XSPI | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_HISI_SFC | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_MEM | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_SN_F_OSPI | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_TEGRA20_SFLASH | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_ZYNQMP_GQSPI | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_SPI | 1 - anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA186_GPC_DMA | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA186_TIMER | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA20_APB_DMA | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA210_ADMA | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_ACONNECT | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_AHB | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_BPMP | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_BPMP_THERMAL | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_GMI | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_HOST1X | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_HSP_MBOX | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_IVC | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_SOCTHERM | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_WATCHDOG | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_EHCI_TEGRA | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_TEGRA_PHY | 1 + anolis/configs/L2-OPTIONAL/{x86 => default}/CONFIG_PM_OPP | 0 anolis/configs/L2-OPTIONAL/{x86 => default}/CONFIG_TEST_HMM | 0 anolis/configs/L2-OPTIONAL/{default => x86}/CONFIG_SPI_MEM | 0 81 files changed, 67 insertions(+), 4 deletions(-) create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_AHCI_TEGRA create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARCH_TEGRA create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARCH_TEGRA_241_SOC delete mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_FREQ_GOV_SCHEDUTIL delete mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEVICE_PRIVATE create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_DMABUF_HEAPS create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_DMABUF_HEAPS_SYSTEM create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_GPIO_TEGRA186 create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_MTD create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_MTD_SPI_NOR create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_TEGRA210_QUAD create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_TCG_TIS_SPI rename anolis/configs/L1-RECOMMEND/{x86 => default}/CONFIG_CPU_FREQ_GOV_SCHEDUTIL (100%) rename anolis/configs/L1-RECOMMEND/{x86 => default}/CONFIG_DEVICE_PRIVATE (100%) rename anolis/configs/L1-RECOMMEND/{x86 => default}/CONFIG_UCLAMP_TASK (100%) rename anolis/configs/{L2-OPTIONAL/default => L1-RECOMMEND/x86}/CONFIG_DMABUF_HEAPS (100%) rename anolis/configs/{L2-OPTIONAL/default => L1-RECOMMEND/x86}/CONFIG_MTD (100%) rename anolis/configs/{L2-OPTIONAL/default => L1-RECOMMEND/x86}/CONFIG_MTD_SPI_NOR (100%) rename anolis/configs/{L2-OPTIONAL => L1-RECOMMEND}/x86/CONFIG_TCG_TIS_SPI (100%) create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_RESET_CONTROLLER delete mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_132_SOC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_186_SOC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_194_SOC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_210_SOC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_234_SOC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_GIC_PM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_TEGRA186_CPUFREQ create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLK_TEGRA_BPMP create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMABUF_HEAPS_CMA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_NOUVEAU_SVM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TEGRA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_TEGRA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_HSA_AMD_SVM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_TEGRA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_TEGRA_BPMP create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_TEGRA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_NVEC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_TEGRA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_SPI_NOR_SWP_DISABLE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_SPI_NOR_SWP_KEEP create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_SPI_NOR_USE_4K_SECTORS create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_NOUVEAU_PLATFORM_DRIVER create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_TEGRA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_TEGRA_XUSB create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_TEGRA_XUSB create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWM_TEGRA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESET_TEGRA_BPMP create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_TEGRA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_8250_TEGRA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_TEGRA_TCU create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_TEGRA_FUSE create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_TEGRA_PMC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_TEGRA_POWERGATE_BPMP create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_CADENCE_XSPI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_HISI_SFC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_MEM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_SN_F_OSPI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_TEGRA20_SFLASH create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_ZYNQMP_GQSPI delete mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_SPI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA186_GPC_DMA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA186_TIMER create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA20_APB_DMA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA210_ADMA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_ACONNECT create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_AHB create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_BPMP create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_BPMP_THERMAL create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_GMI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_HOST1X create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_HSP_MBOX create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_IVC create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_SOCTHERM create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_WATCHDOG create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_EHCI_TEGRA create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_TEGRA_PHY rename anolis/configs/L2-OPTIONAL/{x86 => default}/CONFIG_PM_OPP (100%) rename anolis/configs/L2-OPTIONAL/{x86 => default}/CONFIG_TEST_HMM (100%) rename anolis/configs/L2-OPTIONAL/{default => x86}/CONFIG_SPI_MEM (100%) diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_AHCI_TEGRA b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_AHCI_TEGRA new file mode 100644 index 000000000000..30ac5b5e32c0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_AHCI_TEGRA @@ -0,0 +1 @@ +# CONFIG_AHCI_TEGRA is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARCH_TEGRA b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARCH_TEGRA new file mode 100644 index 000000000000..3b74d56f4c11 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARCH_TEGRA @@ -0,0 +1 @@ +CONFIG_ARCH_TEGRA=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARCH_TEGRA_241_SOC b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARCH_TEGRA_241_SOC new file mode 100644 index 000000000000..253aee154ee7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_ARCH_TEGRA_241_SOC @@ -0,0 +1 @@ +CONFIG_ARCH_TEGRA_241_SOC=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_FREQ_GOV_SCHEDUTIL b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_FREQ_GOV_SCHEDUTIL deleted file mode 100644 index f9ae389f5e8e..000000000000 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_CPU_FREQ_GOV_SCHEDUTIL +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEVICE_PRIVATE b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEVICE_PRIVATE deleted file mode 100644 index 838736fb6da1..000000000000 --- a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DEVICE_PRIVATE +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_DEVICE_PRIVATE is not set diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DMABUF_HEAPS b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DMABUF_HEAPS new file mode 100644 index 000000000000..f4f69ff9b0ca --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DMABUF_HEAPS @@ -0,0 +1 @@ +CONFIG_DMABUF_HEAPS=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DMABUF_HEAPS_SYSTEM b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DMABUF_HEAPS_SYSTEM new file mode 100644 index 000000000000..f89629641a9c --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_DMABUF_HEAPS_SYSTEM @@ -0,0 +1 @@ +CONFIG_DMABUF_HEAPS_SYSTEM=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GPIO_TEGRA186 b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GPIO_TEGRA186 new file mode 100644 index 000000000000..55ab719f65dd --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_GPIO_TEGRA186 @@ -0,0 +1 @@ +CONFIG_GPIO_TEGRA186=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_MTD b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_MTD new file mode 100644 index 000000000000..eab98dd5c438 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_MTD @@ -0,0 +1 @@ +CONFIG_MTD=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_MTD_SPI_NOR b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_MTD_SPI_NOR new file mode 100644 index 000000000000..5c6d557f1079 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_MTD_SPI_NOR @@ -0,0 +1 @@ +CONFIG_MTD_SPI_NOR=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_TEGRA210_QUAD b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_TEGRA210_QUAD new file mode 100644 index 000000000000..7e9fa5a882f3 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_SPI_TEGRA210_QUAD @@ -0,0 +1 @@ +CONFIG_SPI_TEGRA210_QUAD=y diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_TCG_TIS_SPI b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_TCG_TIS_SPI new file mode 100644 index 000000000000..79fbc0cc6d94 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_TCG_TIS_SPI @@ -0,0 +1 @@ +CONFIG_TCG_TIS_SPI=y diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_FREQ_GOV_SCHEDUTIL b/anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_SCHEDUTIL similarity index 100% rename from anolis/configs/L1-RECOMMEND/x86/CONFIG_CPU_FREQ_GOV_SCHEDUTIL rename to anolis/configs/L1-RECOMMEND/default/CONFIG_CPU_FREQ_GOV_SCHEDUTIL diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_DEVICE_PRIVATE b/anolis/configs/L1-RECOMMEND/default/CONFIG_DEVICE_PRIVATE similarity index 100% rename from anolis/configs/L1-RECOMMEND/x86/CONFIG_DEVICE_PRIVATE rename to anolis/configs/L1-RECOMMEND/default/CONFIG_DEVICE_PRIVATE diff --git a/anolis/configs/L1-RECOMMEND/x86/CONFIG_UCLAMP_TASK b/anolis/configs/L1-RECOMMEND/default/CONFIG_UCLAMP_TASK similarity index 100% rename from anolis/configs/L1-RECOMMEND/x86/CONFIG_UCLAMP_TASK rename to anolis/configs/L1-RECOMMEND/default/CONFIG_UCLAMP_TASK diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_HEAPS b/anolis/configs/L1-RECOMMEND/x86/CONFIG_DMABUF_HEAPS similarity index 100% rename from anolis/configs/L2-OPTIONAL/default/CONFIG_DMABUF_HEAPS rename to anolis/configs/L1-RECOMMEND/x86/CONFIG_DMABUF_HEAPS diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MTD similarity index 100% rename from anolis/configs/L2-OPTIONAL/default/CONFIG_MTD rename to anolis/configs/L1-RECOMMEND/x86/CONFIG_MTD diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SPI_NOR b/anolis/configs/L1-RECOMMEND/x86/CONFIG_MTD_SPI_NOR similarity index 100% rename from anolis/configs/L2-OPTIONAL/default/CONFIG_MTD_SPI_NOR rename to anolis/configs/L1-RECOMMEND/x86/CONFIG_MTD_SPI_NOR diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_SPI b/anolis/configs/L1-RECOMMEND/x86/CONFIG_TCG_TIS_SPI similarity index 100% rename from anolis/configs/L2-OPTIONAL/x86/CONFIG_TCG_TIS_SPI rename to anolis/configs/L1-RECOMMEND/x86/CONFIG_TCG_TIS_SPI diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_RESET_CONTROLLER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_RESET_CONTROLLER new file mode 100644 index 000000000000..bf8f541016ca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_HAS_RESET_CONTROLLER @@ -0,0 +1 @@ +CONFIG_ARCH_HAS_RESET_CONTROLLER=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA deleted file mode 100644 index 85b1b424972d..000000000000 --- a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_ARCH_TEGRA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_132_SOC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_132_SOC new file mode 100644 index 000000000000..16eca84f92af --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_132_SOC @@ -0,0 +1 @@ +# CONFIG_ARCH_TEGRA_132_SOC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_186_SOC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_186_SOC new file mode 100644 index 000000000000..1cafdb24fa7d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_186_SOC @@ -0,0 +1 @@ +CONFIG_ARCH_TEGRA_186_SOC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_194_SOC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_194_SOC new file mode 100644 index 000000000000..719c9f05fa66 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_194_SOC @@ -0,0 +1 @@ +# CONFIG_ARCH_TEGRA_194_SOC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_210_SOC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_210_SOC new file mode 100644 index 000000000000..76117199a477 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_210_SOC @@ -0,0 +1 @@ +# CONFIG_ARCH_TEGRA_210_SOC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_234_SOC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_234_SOC new file mode 100644 index 000000000000..4c94a03094e3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARCH_TEGRA_234_SOC @@ -0,0 +1 @@ +# CONFIG_ARCH_TEGRA_234_SOC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_GIC_PM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_GIC_PM new file mode 100644 index 000000000000..8c4e75c2f5b6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_GIC_PM @@ -0,0 +1 @@ +CONFIG_ARM_GIC_PM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_TEGRA186_CPUFREQ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_TEGRA186_CPUFREQ new file mode 100644 index 000000000000..9e4de8815d43 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ARM_TEGRA186_CPUFREQ @@ -0,0 +1 @@ +# CONFIG_ARM_TEGRA186_CPUFREQ is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLK_TEGRA_BPMP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLK_TEGRA_BPMP new file mode 100644 index 000000000000..f6c6f3d59eff --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_CLK_TEGRA_BPMP @@ -0,0 +1 @@ +CONFIG_CLK_TEGRA_BPMP=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMABUF_HEAPS_CMA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMABUF_HEAPS_CMA new file mode 100644 index 000000000000..ce7b3dc64bc3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DMABUF_HEAPS_CMA @@ -0,0 +1 @@ +# CONFIG_DMABUF_HEAPS_CMA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_NOUVEAU_SVM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_NOUVEAU_SVM new file mode 100644 index 000000000000..dbd2333b2894 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_NOUVEAU_SVM @@ -0,0 +1 @@ +# CONFIG_DRM_NOUVEAU_SVM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TEGRA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TEGRA new file mode 100644 index 000000000000..37ca811bfebc --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_DRM_TEGRA @@ -0,0 +1 @@ +# CONFIG_DRM_TEGRA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_TEGRA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_TEGRA new file mode 100644 index 000000000000..72b59399c32c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_GPIO_TEGRA @@ -0,0 +1 @@ +CONFIG_GPIO_TEGRA=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HSA_AMD_SVM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HSA_AMD_SVM new file mode 100644 index 000000000000..59b1db6bcfbb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_HSA_AMD_SVM @@ -0,0 +1 @@ +CONFIG_HSA_AMD_SVM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_TEGRA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_TEGRA new file mode 100644 index 000000000000..eb085793b529 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_TEGRA @@ -0,0 +1 @@ +# CONFIG_I2C_TEGRA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_TEGRA_BPMP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_TEGRA_BPMP new file mode 100644 index 000000000000..1357b8f6b829 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_I2C_TEGRA_BPMP @@ -0,0 +1 @@ +CONFIG_I2C_TEGRA_BPMP=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_TEGRA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_TEGRA new file mode 100644 index 000000000000..e9a849277c1b --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_KEYBOARD_TEGRA @@ -0,0 +1 @@ +# CONFIG_KEYBOARD_TEGRA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_NVEC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_NVEC new file mode 100644 index 000000000000..a17523130a73 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MFD_NVEC @@ -0,0 +1 @@ +# CONFIG_MFD_NVEC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_TEGRA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_TEGRA new file mode 100644 index 000000000000..9b576da7b61a --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MMC_SDHCI_TEGRA @@ -0,0 +1 @@ +# CONFIG_MMC_SDHCI_TEGRA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_SPI_NOR_SWP_DISABLE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_SPI_NOR_SWP_DISABLE new file mode 100644 index 000000000000..7c05500dc235 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_SPI_NOR_SWP_DISABLE @@ -0,0 +1 @@ +# CONFIG_MTD_SPI_NOR_SWP_DISABLE is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE new file mode 100644 index 000000000000..baab16dac50c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE @@ -0,0 +1 @@ +CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_SPI_NOR_SWP_KEEP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_SPI_NOR_SWP_KEEP new file mode 100644 index 000000000000..598f82ddcb88 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_SPI_NOR_SWP_KEEP @@ -0,0 +1 @@ +# CONFIG_MTD_SPI_NOR_SWP_KEEP is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_SPI_NOR_USE_4K_SECTORS b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_SPI_NOR_USE_4K_SECTORS new file mode 100644 index 000000000000..740f5c1f6b65 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_MTD_SPI_NOR_USE_4K_SECTORS @@ -0,0 +1 @@ +CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NOUVEAU_PLATFORM_DRIVER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NOUVEAU_PLATFORM_DRIVER new file mode 100644 index 000000000000..5f06f4c60e8e --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_NOUVEAU_PLATFORM_DRIVER @@ -0,0 +1 @@ +CONFIG_NOUVEAU_PLATFORM_DRIVER=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_TEGRA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_TEGRA new file mode 100644 index 000000000000..74e5837a4fb1 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PCI_TEGRA @@ -0,0 +1 @@ +# CONFIG_PCI_TEGRA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_TEGRA_XUSB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_TEGRA_XUSB new file mode 100644 index 000000000000..66baf7a802e2 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PHY_TEGRA_XUSB @@ -0,0 +1 @@ +# CONFIG_PHY_TEGRA_XUSB is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_TEGRA_XUSB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_TEGRA_XUSB new file mode 100644 index 000000000000..a707d08020c6 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PINCTRL_TEGRA_XUSB @@ -0,0 +1 @@ +CONFIG_PINCTRL_TEGRA_XUSB=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWM_TEGRA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWM_TEGRA new file mode 100644 index 000000000000..212beb44c7b7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_PWM_TEGRA @@ -0,0 +1 @@ +# CONFIG_PWM_TEGRA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESET_TEGRA_BPMP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESET_TEGRA_BPMP new file mode 100644 index 000000000000..89fb5cfd7d5f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RESET_TEGRA_BPMP @@ -0,0 +1 @@ +CONFIG_RESET_TEGRA_BPMP=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_TEGRA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_TEGRA new file mode 100644 index 000000000000..6e2e438ff20d --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_RTC_DRV_TEGRA @@ -0,0 +1 @@ +# CONFIG_RTC_DRV_TEGRA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_8250_TEGRA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_8250_TEGRA new file mode 100644 index 000000000000..b9ebe42687a3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_8250_TEGRA @@ -0,0 +1 @@ +CONFIG_SERIAL_8250_TEGRA=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_TEGRA_TCU b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_TEGRA_TCU new file mode 100644 index 000000000000..4999de05d2db --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SERIAL_TEGRA_TCU @@ -0,0 +1 @@ +# CONFIG_SERIAL_TEGRA_TCU is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_TEGRA_FUSE b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_TEGRA_FUSE new file mode 100644 index 000000000000..36d2d3822def --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_TEGRA_FUSE @@ -0,0 +1 @@ +CONFIG_SOC_TEGRA_FUSE=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_TEGRA_PMC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_TEGRA_PMC new file mode 100644 index 000000000000..a6d72efba8fb --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_TEGRA_PMC @@ -0,0 +1 @@ +CONFIG_SOC_TEGRA_PMC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_TEGRA_POWERGATE_BPMP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_TEGRA_POWERGATE_BPMP new file mode 100644 index 000000000000..81d94f8cc3bf --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SOC_TEGRA_POWERGATE_BPMP @@ -0,0 +1 @@ +CONFIG_SOC_TEGRA_POWERGATE_BPMP=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_CADENCE_XSPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_CADENCE_XSPI new file mode 100644 index 000000000000..382ba298b7d3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_CADENCE_XSPI @@ -0,0 +1 @@ +# CONFIG_SPI_CADENCE_XSPI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_HISI_SFC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_HISI_SFC new file mode 100644 index 000000000000..37a685730420 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_HISI_SFC @@ -0,0 +1 @@ +# CONFIG_SPI_HISI_SFC is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_MEM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_MEM new file mode 100644 index 000000000000..118458c27a84 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_MEM @@ -0,0 +1 @@ +CONFIG_SPI_MEM=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_SN_F_OSPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_SN_F_OSPI new file mode 100644 index 000000000000..6f2ee22f2c03 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_SN_F_OSPI @@ -0,0 +1 @@ +# CONFIG_SPI_SN_F_OSPI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_TEGRA20_SFLASH b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_TEGRA20_SFLASH new file mode 100644 index 000000000000..446ce07282cd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_TEGRA20_SFLASH @@ -0,0 +1 @@ +# CONFIG_SPI_TEGRA20_SFLASH is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_ZYNQMP_GQSPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_ZYNQMP_GQSPI new file mode 100644 index 000000000000..48e520fd68c5 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_SPI_ZYNQMP_GQSPI @@ -0,0 +1 @@ +# CONFIG_SPI_ZYNQMP_GQSPI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_SPI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_SPI deleted file mode 100644 index bfd1ff673b66..000000000000 --- a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TCG_TIS_SPI +++ /dev/null @@ -1 +0,0 @@ -CONFIG_TCG_TIS_SPI=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA186_GPC_DMA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA186_GPC_DMA new file mode 100644 index 000000000000..d83d1f1a0818 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA186_GPC_DMA @@ -0,0 +1 @@ +# CONFIG_TEGRA186_GPC_DMA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA186_TIMER b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA186_TIMER new file mode 100644 index 000000000000..0013fe53336c --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA186_TIMER @@ -0,0 +1 @@ +# CONFIG_TEGRA186_TIMER is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA20_APB_DMA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA20_APB_DMA new file mode 100644 index 000000000000..9b8efaf50411 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA20_APB_DMA @@ -0,0 +1 @@ +# CONFIG_TEGRA20_APB_DMA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA210_ADMA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA210_ADMA new file mode 100644 index 000000000000..634ab1ad7fcd --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA210_ADMA @@ -0,0 +1 @@ +# CONFIG_TEGRA210_ADMA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_ACONNECT b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_ACONNECT new file mode 100644 index 000000000000..beeba6713612 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_ACONNECT @@ -0,0 +1 @@ +# CONFIG_TEGRA_ACONNECT is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_AHB b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_AHB new file mode 100644 index 000000000000..e11cec707a44 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_AHB @@ -0,0 +1 @@ +CONFIG_TEGRA_AHB=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_BPMP b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_BPMP new file mode 100644 index 000000000000..348002b0bc68 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_BPMP @@ -0,0 +1 @@ +CONFIG_TEGRA_BPMP=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_BPMP_THERMAL b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_BPMP_THERMAL new file mode 100644 index 000000000000..e1e1b80f43b3 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_BPMP_THERMAL @@ -0,0 +1 @@ +# CONFIG_TEGRA_BPMP_THERMAL is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_GMI b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_GMI new file mode 100644 index 000000000000..c062dcb429c9 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_GMI @@ -0,0 +1 @@ +# CONFIG_TEGRA_GMI is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_HOST1X b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_HOST1X new file mode 100644 index 000000000000..3e42b2b35a00 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_HOST1X @@ -0,0 +1 @@ +# CONFIG_TEGRA_HOST1X is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_HSP_MBOX b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_HSP_MBOX new file mode 100644 index 000000000000..3d46b7c93a15 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_HSP_MBOX @@ -0,0 +1 @@ +CONFIG_TEGRA_HSP_MBOX=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_IVC b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_IVC new file mode 100644 index 000000000000..cdcacbec25a4 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_IVC @@ -0,0 +1 @@ +CONFIG_TEGRA_IVC=y diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_SOCTHERM b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_SOCTHERM new file mode 100644 index 000000000000..b7444627edca --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_SOCTHERM @@ -0,0 +1 @@ +# CONFIG_TEGRA_SOCTHERM is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_WATCHDOG b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_WATCHDOG new file mode 100644 index 000000000000..024c0dc62e33 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_TEGRA_WATCHDOG @@ -0,0 +1 @@ +# CONFIG_TEGRA_WATCHDOG is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_EHCI_TEGRA b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_EHCI_TEGRA new file mode 100644 index 000000000000..43a204ef511f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_EHCI_TEGRA @@ -0,0 +1 @@ +# CONFIG_USB_EHCI_TEGRA is not set diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_TEGRA_PHY b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_TEGRA_PHY new file mode 100644 index 000000000000..084266b2f48f --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_USB_TEGRA_PHY @@ -0,0 +1 @@ +# CONFIG_USB_TEGRA_PHY is not set diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_PM_OPP b/anolis/configs/L2-OPTIONAL/default/CONFIG_PM_OPP similarity index 100% rename from anolis/configs/L2-OPTIONAL/x86/CONFIG_PM_OPP rename to anolis/configs/L2-OPTIONAL/default/CONFIG_PM_OPP diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_TEST_HMM b/anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_HMM similarity index 100% rename from anolis/configs/L2-OPTIONAL/x86/CONFIG_TEST_HMM rename to anolis/configs/L2-OPTIONAL/default/CONFIG_TEST_HMM diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MEM b/anolis/configs/L2-OPTIONAL/x86/CONFIG_SPI_MEM similarity index 100% rename from anolis/configs/L2-OPTIONAL/default/CONFIG_SPI_MEM rename to anolis/configs/L2-OPTIONAL/x86/CONFIG_SPI_MEM -- Gitee From f3e27cb7b43f88a34f0800e18d91c22759ba706d Mon Sep 17 00:00:00 2001 From: Xingrui Yi Date: Fri, 17 Jan 2025 11:28:28 +0800 Subject: [PATCH 2132/2138] anolis: set prof_cpu_mask to NULL after free when cpumask offstack ANBZ: #13584 When profile_init failed if prof_buffer is not allocated, prof_cpu_mask will be kfree by free_cpumask_var but not set to NULL with CONFIG_CPUMASK_OFFSTACK, thus profile_tick will use prof_cpu_mask after free. Signed-off-by: Xingrui Yi Reviewed-by: Qiao Ma Reviewed-by: Xunlei Pang Link: https://gitee.com/anolis/cloud-kernel/pulls/4514 --- kernel/profile.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kernel/profile.c b/kernel/profile.c index 984f819b701c..fe754a85635c 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -124,6 +124,9 @@ int __ref profile_init(void) return 0; free_cpumask_var(prof_cpu_mask); +#ifdef CONFIG_CPUMASK_OFFSTACK + prof_cpu_mask = NULL; +#endif return -ENOMEM; } -- Gitee From d3a93d8d336c4c4b0f5d4b62d65fb8a25e2babc1 Mon Sep 17 00:00:00 2001 From: haodongdong Date: Mon, 20 Jan 2025 11:09:02 +0800 Subject: [PATCH 2133/2138] anolis: scsi: leapioraid: update leapioraid driver version ANBZ: #13593 update leapioraid driver version Signed-off-by: haodongdong Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/4522 --- drivers/scsi/leapioraid/leapioraid_func.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/leapioraid/leapioraid_func.h b/drivers/scsi/leapioraid/leapioraid_func.h index 8ca8fc0a6f26..76babcb40766 100644 --- a/drivers/scsi/leapioraid/leapioraid_func.h +++ b/drivers/scsi/leapioraid/leapioraid_func.h @@ -67,10 +67,10 @@ #define LEAPIORAID_DRIVER_NAME "LeapIoRaid" #define LEAPIORAID_AUTHOR "LeapIO Inc." #define LEAPIORAID_DESCRIPTION "LEAPIO RAID Driver" -#define LEAPIORAID_DRIVER_VERSION "1.00.00.00" +#define LEAPIORAID_DRIVER_VERSION "1.02.02.00" #define LEAPIORAID_MAJOR_VERSION (1) -#define LEAPIORAID_MINOR_VERSION (00) -#define LEAPIORAID_BUILD_VERSION (00) +#define LEAPIORAID_MINOR_VERSION (02) +#define LEAPIORAID_BUILD_VERSION (02) #define LEAPIORAID_RELEASE_VERSION (00) #define LEAPIORAID_VENDOR_ID (0xD405) -- Gitee From 614d1e0e00096c5d6f87244c07dfc92ee07e4d0e Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Sat, 26 Dec 2020 11:40:13 +0800 Subject: [PATCH 2134/2138] anolis: arm64: mm: Add a new parameter to adjust the reserve percpu memory for modules ANBZ: #7770 For interface DEFINE_PER_CPU, it will apply reserved percpu memory from host. Currently, linux kernel only support 8192 bytes reserved percpu memory. In some scenario, it may not be enough for lots of kernel modules. For example, kvm on arm64 will consume about 3.5KB reserved percpu memory. So we can add a new command line parameter to adjust the reserved percpu memory to provide extension of the size of this area. Signed-off-by: Baolin Wang Signed-off-by: Xiang Zheng Acked-by: Xunlei Pang Signed-off-by: xuejun-xj Link: https://gitee.com/anolis/cloud-kernel/pulls/2522 Link: https://gitee.com/anolis/cloud-kernel/pulls/4513 --- drivers/base/arch_numa.c | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c index 5b59d133b6af..9cb049bd375b 100644 --- a/drivers/base/arch_numa.c +++ b/drivers/base/arch_numa.c @@ -141,6 +141,22 @@ void __init early_map_cpu_to_node(unsigned int cpu, int nid) } #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA +static size_t pcpu_mod_reserved = PERCPU_MODULE_RESERVE; + +#ifdef CONFIG_ARM64 +static __init int set_reserve_pcpu(char *str) +{ + if (!str) + return -EINVAL; + + pcpu_mod_reserved = (size_t)memparse(str, NULL); + pr_notice("Reserve module percpu memory to %zuKB\n", + pcpu_mod_reserved >> 10); + return 0; +} +early_param("pcpu_mod_reserve", set_reserve_pcpu); +#endif + unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; EXPORT_SYMBOL(__per_cpu_offset); @@ -165,7 +181,7 @@ void __init setup_per_cpu_areas(void) * Always reserve area for module percpu variables. That's * what the legacy allocator did. */ - rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, + rc = pcpu_embed_first_chunk(pcpu_mod_reserved, PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, pcpu_cpu_distance, early_cpu_to_node); @@ -178,7 +194,7 @@ void __init setup_per_cpu_areas(void) #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK if (rc < 0) - rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, early_cpu_to_node); + rc = pcpu_page_first_chunk(pcpu_mod_reserved, early_cpu_to_node); #endif if (rc < 0) panic("Failed to initialize percpu areas (err=%d).", rc); -- Gitee From b66d4f1656cc8912cd936d33401960cde801c83b Mon Sep 17 00:00:00 2001 From: Qinyun Tan Date: Tue, 21 Jan 2025 13:58:32 +0800 Subject: [PATCH 2135/2138] anolis: configs: refresh kconfigs ANBZ: #13487 1. Refresh Kconfigs and put relative unknown level kconfigs to L1-RECOMMEND or L2-OPTIONAL level. 2. set CONFIG_NVGRACE_GPU_VFIO_PCI=m Refresh kconfigs by follow command: > make -C anolis/ dist-configs-update Signed-off-by: Qinyun Tan Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/4531 --- anolis/configs/L1-RECOMMEND/arm64/CONFIG_NVGRACE_GPU_VFIO_PCI | 1 + anolis/configs/L2-OPTIONAL/arm64/CONFIG_ZRAM_TRACK_ENTRY_ACTIME | 1 + anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_COUNTED_BY | 1 + anolis/configs/L2-OPTIONAL/x86/CONFIG_ZRAM_TRACK_ENTRY_ACTIME | 1 + 4 files changed, 4 insertions(+) create mode 100644 anolis/configs/L1-RECOMMEND/arm64/CONFIG_NVGRACE_GPU_VFIO_PCI create mode 100644 anolis/configs/L2-OPTIONAL/arm64/CONFIG_ZRAM_TRACK_ENTRY_ACTIME create mode 100644 anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_COUNTED_BY create mode 100644 anolis/configs/L2-OPTIONAL/x86/CONFIG_ZRAM_TRACK_ENTRY_ACTIME diff --git a/anolis/configs/L1-RECOMMEND/arm64/CONFIG_NVGRACE_GPU_VFIO_PCI b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_NVGRACE_GPU_VFIO_PCI new file mode 100644 index 000000000000..06d7e70c3aab --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/arm64/CONFIG_NVGRACE_GPU_VFIO_PCI @@ -0,0 +1 @@ +CONFIG_NVGRACE_GPU_VFIO_PCI=m diff --git a/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ZRAM_TRACK_ENTRY_ACTIME b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ZRAM_TRACK_ENTRY_ACTIME new file mode 100644 index 000000000000..d986e38c4fad --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/arm64/CONFIG_ZRAM_TRACK_ENTRY_ACTIME @@ -0,0 +1 @@ +# CONFIG_ZRAM_TRACK_ENTRY_ACTIME is not set diff --git a/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_COUNTED_BY b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_COUNTED_BY new file mode 100644 index 000000000000..0305414244a7 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/default/CONFIG_CC_HAS_COUNTED_BY @@ -0,0 +1 @@ +CONFIG_CC_HAS_COUNTED_BY=y diff --git a/anolis/configs/L2-OPTIONAL/x86/CONFIG_ZRAM_TRACK_ENTRY_ACTIME b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ZRAM_TRACK_ENTRY_ACTIME new file mode 100644 index 000000000000..f7f4c4c57546 --- /dev/null +++ b/anolis/configs/L2-OPTIONAL/x86/CONFIG_ZRAM_TRACK_ENTRY_ACTIME @@ -0,0 +1 @@ +CONFIG_ZRAM_TRACK_ENTRY_ACTIME=y -- Gitee From c2bd5c93f9474fffa10b68e33f62379211fb7084 Mon Sep 17 00:00:00 2001 From: Qinyun Tan Date: Tue, 21 Jan 2025 14:00:46 +0800 Subject: [PATCH 2136/2138] anolis: configs: enable CONFIG_PREEMPT_NONE=y by default ANBZ: #13487 In the 6.6 kernel, support for dynamic preemption switch control has been introduced, allowing us to manage the kernel's preemption behavior via command line parameters. As a result, we can unify the default preemption Kconfig settings for both x86 and arm64 architectures. Signed-off-by: Qinyun Tan Reviewed-by: Qiao Ma Link: https://gitee.com/anolis/cloud-kernel/pulls/4531 --- anolis/configs/L0-MANDATORY/arm64/CONFIG_PREEMPT_NONE | 1 - anolis/configs/L0-MANDATORY/arm64/CONFIG_PREEMPT_VOLUNTARY | 1 - anolis/configs/L0-MANDATORY/{x86 => default}/CONFIG_PREEMPT_NONE | 0 .../L0-MANDATORY/{x86 => default}/CONFIG_PREEMPT_VOLUNTARY | 0 4 files changed, 2 deletions(-) delete mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_PREEMPT_NONE delete mode 100644 anolis/configs/L0-MANDATORY/arm64/CONFIG_PREEMPT_VOLUNTARY rename anolis/configs/L0-MANDATORY/{x86 => default}/CONFIG_PREEMPT_NONE (100%) rename anolis/configs/L0-MANDATORY/{x86 => default}/CONFIG_PREEMPT_VOLUNTARY (100%) diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_PREEMPT_NONE b/anolis/configs/L0-MANDATORY/arm64/CONFIG_PREEMPT_NONE deleted file mode 100644 index 45e3146818c9..000000000000 --- a/anolis/configs/L0-MANDATORY/arm64/CONFIG_PREEMPT_NONE +++ /dev/null @@ -1 +0,0 @@ -# CONFIG_PREEMPT_NONE is not set diff --git a/anolis/configs/L0-MANDATORY/arm64/CONFIG_PREEMPT_VOLUNTARY b/anolis/configs/L0-MANDATORY/arm64/CONFIG_PREEMPT_VOLUNTARY deleted file mode 100644 index 4762d5ecdb30..000000000000 --- a/anolis/configs/L0-MANDATORY/arm64/CONFIG_PREEMPT_VOLUNTARY +++ /dev/null @@ -1 +0,0 @@ -CONFIG_PREEMPT_VOLUNTARY=y diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_PREEMPT_NONE b/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_NONE similarity index 100% rename from anolis/configs/L0-MANDATORY/x86/CONFIG_PREEMPT_NONE rename to anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_NONE diff --git a/anolis/configs/L0-MANDATORY/x86/CONFIG_PREEMPT_VOLUNTARY b/anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_VOLUNTARY similarity index 100% rename from anolis/configs/L0-MANDATORY/x86/CONFIG_PREEMPT_VOLUNTARY rename to anolis/configs/L0-MANDATORY/default/CONFIG_PREEMPT_VOLUNTARY -- Gitee From 93acb20ca2d2bef45655ec703d9550b697b58f47 Mon Sep 17 00:00:00 2001 From: Liqiang Date: Tue, 21 Jan 2025 17:31:18 +0800 Subject: [PATCH 2137/2138] anolis: drivers: support for xsc drivers from Yunsilicon Technology ANBZ: #13616 Yunsilicon XSC drivers provide both Ethernet and RDMA features for metaScale SmartNICs. Signed-off-by: Liqiang Reviewed-by: Tianxin Reviewed-by: Weihonggang Reviewed-by: Wanrenyong Reviewed-by: Jacky Reviewed-by: Guixin Liu Link: https://gitee.com/anolis/cloud-kernel/pulls/4532 --- MAINTAINERS | 6 + .../default/CONFIG_INFINIBAND_XSC | 1 + .../default/CONFIG_NET_VENDOR_YUNSILICON | 1 + .../default/CONFIG_YUNSILICON_XSC_ETH | 1 + .../default/CONFIG_YUNSILICON_XSC_PCI | 1 + drivers/infiniband/Kconfig | 1 + drivers/infiniband/hw/Makefile | 1 + drivers/infiniband/hw/xsc/Kconfig | 15 + drivers/infiniband/hw/xsc/Makefile | 17 + drivers/infiniband/hw/xsc/ah.c | 127 + drivers/infiniband/hw/xsc/counters.c | 538 +++ drivers/infiniband/hw/xsc/counters.h | 53 + drivers/infiniband/hw/xsc/cq.c | 690 ++++ drivers/infiniband/hw/xsc/devx.c | 81 + drivers/infiniband/hw/xsc/ib_peer_mem.h | 68 + drivers/infiniband/hw/xsc/ib_umem_ex.c | 211 + drivers/infiniband/hw/xsc/ib_umem_ex.h | 53 + drivers/infiniband/hw/xsc/main.c | 1201 ++++++ drivers/infiniband/hw/xsc/mem.c | 343 ++ drivers/infiniband/hw/xsc/mr.c | 500 +++ drivers/infiniband/hw/xsc/peer_mem.c | 317 ++ drivers/infiniband/hw/xsc/peer_mem.h | 228 ++ drivers/infiniband/hw/xsc/private_dev.c | 1031 +++++ drivers/infiniband/hw/xsc/qp.c | 1939 ++++++++++ drivers/infiniband/hw/xsc/rtt.c | 412 ++ drivers/infiniband/hw/xsc/user.h | 277 ++ drivers/infiniband/hw/xsc/xsc_ib.h | 627 +++ drivers/infiniband/hw/xsc/xsc_ib_compat.h | 62 + drivers/infiniband/hw/xsc/xsc_ib_sysfs.c | 64 + drivers/infiniband/hw/xsc/xsc_rdma_ctrl.c | 715 ++++ drivers/infiniband/hw/xsc/xsc_rdma_ctrl.h | 13 + drivers/net/ethernet/Kconfig | 1 + drivers/net/ethernet/Makefile | 1 + drivers/net/ethernet/yunsilicon/Kconfig | 26 + drivers/net/ethernet/yunsilicon/Makefile | 8 + .../net/ethernet/yunsilicon/xsc/common/cq.h | 90 + .../ethernet/yunsilicon/xsc/common/device.h | 140 + .../ethernet/yunsilicon/xsc/common/doorbell.h | 49 + .../ethernet/yunsilicon/xsc/common/driver.h | 341 ++ .../net/ethernet/yunsilicon/xsc/common/port.h | 40 + .../net/ethernet/yunsilicon/xsc/common/qp.h | 198 + .../net/ethernet/yunsilicon/xsc/common/qpts.h | 72 + .../ethernet/yunsilicon/xsc/common/res_obj.h | 122 + .../ethernet/yunsilicon/xsc/common/version.h | 10 + .../ethernet/yunsilicon/xsc/common/vport.h | 127 + .../yunsilicon/xsc/common/xsc_auto_hw.h | 97 + .../ethernet/yunsilicon/xsc/common/xsc_cmd.h | 2513 ++++++++++++ .../ethernet/yunsilicon/xsc/common/xsc_core.h | 1315 +++++++ .../yunsilicon/xsc/common/xsc_eswitch.h | 39 + .../ethernet/yunsilicon/xsc/common/xsc_fs.h | 54 + .../ethernet/yunsilicon/xsc/common/xsc_hsi.h | 373 ++ .../yunsilicon/xsc/common/xsc_ioctl.h | 317 ++ .../ethernet/yunsilicon/xsc/common/xsc_lag.h | 136 + .../yunsilicon/xsc/common/xsc_macro.h | 21 + .../yunsilicon/xsc/common/xsc_port_ctrl.h | 30 + .../ethernet/yunsilicon/xsc/common/xsc_pp.h | 46 + .../ethernet/yunsilicon/xsc/common/xsc_pph.h | 175 + .../ethernet/yunsilicon/xsc/common/xsc_reg.h | 35 + .../net/ethernet/yunsilicon/xsc/net/Kconfig | 16 + .../net/ethernet/yunsilicon/xsc/net/Makefile | 11 + .../net/ethernet/yunsilicon/xsc/net/main.c | 3397 +++++++++++++++++ .../net/ethernet/yunsilicon/xsc/net/ut_main.c | 124 + .../ethernet/yunsilicon/xsc/net/xsc_accel.h | 43 + .../ethernet/yunsilicon/xsc/net/xsc_dcbnl.c | 1482 +++++++ .../net/ethernet/yunsilicon/xsc/net/xsc_eth.h | 230 ++ .../yunsilicon/xsc/net/xsc_eth_common.h | 296 ++ .../yunsilicon/xsc/net/xsc_eth_compat.h | 11 + .../yunsilicon/xsc/net/xsc_eth_ctrl.c | 654 ++++ .../yunsilicon/xsc/net/xsc_eth_ctrl.h | 12 + .../yunsilicon/xsc/net/xsc_eth_debug.h | 112 + .../ethernet/yunsilicon/xsc/net/xsc_eth_dim.c | 119 + .../ethernet/yunsilicon/xsc/net/xsc_eth_dim.h | 47 + .../yunsilicon/xsc/net/xsc_eth_ethtool.c | 1279 +++++++ .../yunsilicon/xsc/net/xsc_eth_ethtool.h | 19 + .../ethernet/yunsilicon/xsc/net/xsc_eth_rx.c | 804 ++++ .../yunsilicon/xsc/net/xsc_eth_stats.c | 651 ++++ .../yunsilicon/xsc/net/xsc_eth_stats.h | 183 + .../yunsilicon/xsc/net/xsc_eth_sysfs.c | 373 ++ .../ethernet/yunsilicon/xsc/net/xsc_eth_tx.c | 564 +++ .../yunsilicon/xsc/net/xsc_eth_txrx.c | 173 + .../yunsilicon/xsc/net/xsc_eth_txrx.h | 82 + .../net/ethernet/yunsilicon/xsc/net/xsc_fs.c | 133 + .../ethernet/yunsilicon/xsc/net/xsc_hw_comm.c | 199 + .../ethernet/yunsilicon/xsc/net/xsc_hw_comm.h | 11 + .../ethernet/yunsilicon/xsc/net/xsc_queue.h | 280 ++ .../net/ethernet/yunsilicon/xsc/pci/Kconfig | 17 + .../net/ethernet/yunsilicon/xsc/pci/Makefile | 16 + .../net/ethernet/yunsilicon/xsc/pci/alloc.c | 338 ++ .../net/ethernet/yunsilicon/xsc/pci/cmd2.c | 2148 +++++++++++ drivers/net/ethernet/yunsilicon/xsc/pci/cq.c | 159 + .../net/ethernet/yunsilicon/xsc/pci/debugfs.c | 866 +++++ .../net/ethernet/yunsilicon/xsc/pci/devlink.c | 36 + .../net/ethernet/yunsilicon/xsc/pci/devlink.h | 16 + drivers/net/ethernet/yunsilicon/xsc/pci/eq.c | 364 ++ .../net/ethernet/yunsilicon/xsc/pci/eswitch.c | 812 ++++ .../net/ethernet/yunsilicon/xsc/pci/eswitch.h | 170 + drivers/net/ethernet/yunsilicon/xsc/pci/fw.c | 316 ++ .../ethernet/yunsilicon/xsc/pci/fw/bitops.h | 43 + .../net/ethernet/yunsilicon/xsc/pci/fw/cmd.c | 277 ++ .../net/ethernet/yunsilicon/xsc/pci/fw/cmdq.h | 55 + .../ethernet/yunsilicon/xsc/pci/fw/osdep.c | 46 + .../ethernet/yunsilicon/xsc/pci/fw/osdep.h | 31 + .../yunsilicon/xsc/pci/fw/xsc_counters.h | 40 + .../ethernet/yunsilicon/xsc/pci/fw/xsc_flow.c | 196 + .../ethernet/yunsilicon/xsc/pci/fw/xsc_flow.h | 66 + .../ethernet/yunsilicon/xsc/pci/fw/xsc_fw.h | 67 + .../ethernet/yunsilicon/xsc/pci/fw/xsc_mem.c | 16 + .../yunsilicon/xsc/pci/fw/xsc_reg_struct.h | 22 + .../ethernet/yunsilicon/xsc/pci/fw/xsc_res.c | 325 ++ .../net/ethernet/yunsilicon/xsc/pci/intf.c | 268 ++ .../net/ethernet/yunsilicon/xsc/pci/main.c | 937 +++++ drivers/net/ethernet/yunsilicon/xsc/pci/mr.c | 246 ++ .../net/ethernet/yunsilicon/xsc/pci/pci_irq.c | 515 +++ drivers/net/ethernet/yunsilicon/xsc/pci/pd.c | 50 + .../net/ethernet/yunsilicon/xsc/pci/port.c | 277 ++ drivers/net/ethernet/yunsilicon/xsc/pci/qp.c | 478 +++ .../net/ethernet/yunsilicon/xsc/pci/qpts.c | 212 + .../net/ethernet/yunsilicon/xsc/pci/res_obj.c | 450 +++ .../net/ethernet/yunsilicon/xsc/pci/sriov.c | 288 ++ .../ethernet/yunsilicon/xsc/pci/sriov_sysfs.c | 1063 ++++++ .../yunsilicon/xsc/pci/tmp_cmdq_defines.h | 18 + .../net/ethernet/yunsilicon/xsc/pci/vport.c | 954 +++++ drivers/net/ethernet/yunsilicon/xsc/pci/wq.c | 98 + drivers/net/ethernet/yunsilicon/xsc/pci/wq.h | 170 + .../net/ethernet/yunsilicon/xsc/pci/xsc_lag.c | 1418 +++++++ .../yunsilicon/xsc/pci/xsc_pci_ctrl.c | 909 +++++ .../yunsilicon/xsc/pci/xsc_pci_ctrl.h | 51 + .../yunsilicon/xsc/pci/xsc_port_ctrl.c | 512 +++ 128 files changed, 42631 insertions(+) create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_XSC create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_NET_VENDOR_YUNSILICON create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_YUNSILICON_XSC_ETH create mode 100644 anolis/configs/L1-RECOMMEND/default/CONFIG_YUNSILICON_XSC_PCI create mode 100644 drivers/infiniband/hw/xsc/Kconfig create mode 100644 drivers/infiniband/hw/xsc/Makefile create mode 100644 drivers/infiniband/hw/xsc/ah.c create mode 100644 drivers/infiniband/hw/xsc/counters.c create mode 100644 drivers/infiniband/hw/xsc/counters.h create mode 100644 drivers/infiniband/hw/xsc/cq.c create mode 100644 drivers/infiniband/hw/xsc/devx.c create mode 100644 drivers/infiniband/hw/xsc/ib_peer_mem.h create mode 100644 drivers/infiniband/hw/xsc/ib_umem_ex.c create mode 100644 drivers/infiniband/hw/xsc/ib_umem_ex.h create mode 100644 drivers/infiniband/hw/xsc/main.c create mode 100644 drivers/infiniband/hw/xsc/mem.c create mode 100644 drivers/infiniband/hw/xsc/mr.c create mode 100644 drivers/infiniband/hw/xsc/peer_mem.c create mode 100644 drivers/infiniband/hw/xsc/peer_mem.h create mode 100644 drivers/infiniband/hw/xsc/private_dev.c create mode 100644 drivers/infiniband/hw/xsc/qp.c create mode 100644 drivers/infiniband/hw/xsc/rtt.c create mode 100644 drivers/infiniband/hw/xsc/user.h create mode 100644 drivers/infiniband/hw/xsc/xsc_ib.h create mode 100644 drivers/infiniband/hw/xsc/xsc_ib_compat.h create mode 100644 drivers/infiniband/hw/xsc/xsc_ib_sysfs.c create mode 100644 drivers/infiniband/hw/xsc/xsc_rdma_ctrl.c create mode 100644 drivers/infiniband/hw/xsc/xsc_rdma_ctrl.h create mode 100644 drivers/net/ethernet/yunsilicon/Kconfig create mode 100644 drivers/net/ethernet/yunsilicon/Makefile create mode 100644 drivers/net/ethernet/yunsilicon/xsc/common/cq.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/common/device.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/common/doorbell.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/common/driver.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/common/port.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/common/qp.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/common/qpts.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/common/res_obj.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/common/version.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/common/vport.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/common/xsc_auto_hw.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/common/xsc_cmd.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/common/xsc_core.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/common/xsc_eswitch.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/common/xsc_fs.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/common/xsc_hsi.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/common/xsc_ioctl.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/common/xsc_lag.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/common/xsc_macro.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/common/xsc_port_ctrl.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/common/xsc_pp.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/common/xsc_pph.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/common/xsc_reg.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/net/Kconfig create mode 100644 drivers/net/ethernet/yunsilicon/xsc/net/Makefile create mode 100644 drivers/net/ethernet/yunsilicon/xsc/net/main.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/net/ut_main.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/net/xsc_accel.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/net/xsc_dcbnl.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_common.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_compat.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_debug.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_dim.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_dim.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_rx.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_sysfs.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_tx.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/net/xsc_fs.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/net/xsc_hw_comm.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/net/xsc_hw_comm.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/net/xsc_queue.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/Kconfig create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/Makefile create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/alloc.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/cmd2.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/cq.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/debugfs.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/devlink.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/devlink.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/eq.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/fw.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/fw/bitops.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/fw/cmd.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/fw/cmdq.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_counters.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_fw.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_mem.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_reg_struct.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_res.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/intf.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/main.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/mr.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/pci_irq.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/pd.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/port.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/qp.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/qpts.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/res_obj.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/sriov.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/sriov_sysfs.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/tmp_cmdq_defines.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/vport.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/wq.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/wq.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/xsc_lag.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.c create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.h create mode 100644 drivers/net/ethernet/yunsilicon/xsc/pci/xsc_port_ctrl.c diff --git a/MAINTAINERS b/MAINTAINERS index 5a3cab8bbe28..f6c91f5b2ad2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -23863,6 +23863,12 @@ S: Maintained F: Documentation/input/devices/yealink.rst F: drivers/input/misc/yealink.* +YUNSILICON XSC DRIVERS +M: Weihonggang +S: Maintained +F: drivers/infiniband/hw/xsc +F: drivers/net/ethernet/yunsilicon/xsc + Z3FOLD COMPRESSED PAGE ALLOCATOR M: Vitaly Wool R: Miaohe Lin diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_XSC b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_XSC new file mode 100644 index 000000000000..734ca6c9dfe0 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_INFINIBAND_XSC @@ -0,0 +1 @@ +CONFIG_INFINIBAND_XSC=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_VENDOR_YUNSILICON b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_VENDOR_YUNSILICON new file mode 100644 index 000000000000..f6aca2a290f7 --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_NET_VENDOR_YUNSILICON @@ -0,0 +1 @@ +CONFIG_NET_VENDOR_YUNSILICON=y diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_YUNSILICON_XSC_ETH b/anolis/configs/L1-RECOMMEND/default/CONFIG_YUNSILICON_XSC_ETH new file mode 100644 index 000000000000..343284c7c0de --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_YUNSILICON_XSC_ETH @@ -0,0 +1 @@ +CONFIG_YUNSILICON_XSC_ETH=m diff --git a/anolis/configs/L1-RECOMMEND/default/CONFIG_YUNSILICON_XSC_PCI b/anolis/configs/L1-RECOMMEND/default/CONFIG_YUNSILICON_XSC_PCI new file mode 100644 index 000000000000..3a3fbc36325a --- /dev/null +++ b/anolis/configs/L1-RECOMMEND/default/CONFIG_YUNSILICON_XSC_PCI @@ -0,0 +1 @@ +CONFIG_YUNSILICON_XSC_PCI=m diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index a5827d11e934..9d6a7cbab0ae 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig @@ -95,6 +95,7 @@ source "drivers/infiniband/hw/qedr/Kconfig" source "drivers/infiniband/hw/qib/Kconfig" source "drivers/infiniband/hw/usnic/Kconfig" source "drivers/infiniband/hw/vmw_pvrdma/Kconfig" +source "drivers/infiniband/hw/xsc/Kconfig" source "drivers/infiniband/sw/rdmavt/Kconfig" endif # !UML source "drivers/infiniband/sw/rxe/Kconfig" diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile index 1211f4317a9f..b8fc3871dd18 100644 --- a/drivers/infiniband/hw/Makefile +++ b/drivers/infiniband/hw/Makefile @@ -15,3 +15,4 @@ obj-$(CONFIG_INFINIBAND_HNS) += hns/ obj-$(CONFIG_INFINIBAND_QEDR) += qedr/ obj-$(CONFIG_INFINIBAND_BNXT_RE) += bnxt_re/ obj-$(CONFIG_INFINIBAND_ERDMA) += erdma/ +obj-$(CONFIG_INFINIBAND_XSC) += xsc/ diff --git a/drivers/infiniband/hw/xsc/Kconfig b/drivers/infiniband/hw/xsc/Kconfig new file mode 100644 index 000000000000..6c3d4b7b330e --- /dev/null +++ b/drivers/infiniband/hw/xsc/Kconfig @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +# All rights reserved. + +config INFINIBAND_XSC + tristate "Yunsilicon XSC RDMA driver" + default n + depends on NETDEVICES && ETHERNET && PCI && INET + depends on YUNSILICON_XSC_PCI && YUNSILICON_XSC_ETH + help + This driver provides RDMA support for + Yunsilicon XSC devices. + + To compile this driver as a module, choose M here. The module + will be called xsc_ib. diff --git a/drivers/infiniband/hw/xsc/Makefile b/drivers/infiniband/hw/xsc/Makefile new file mode 100644 index 000000000000..b4fa5748bbad --- /dev/null +++ b/drivers/infiniband/hw/xsc/Makefile @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +# All rights reserved. + +ccflags-y := -I$(srctree)/drivers/net/ethernet/yunsilicon/xsc +ccflags-y += -Wno-implicit-fallthrough +ifeq ($(USE_INTERNAL_IB_CORE), 1) + ccflags-y += -include /usr/src/ofa_kernel/include/rdma/ib_umem.h +endif + +obj-$(CONFIG_INFINIBAND_XSC) += xsc_ib.o + +xsc_ib-y := main.o xsc_rdma_ctrl.o cq.o qp.o mem.o mr.o ah.o \ + counters.o devx.o private_dev.o ib_umem_ex.o\ + rtt.o xsc_ib_sysfs.o + +xsc_ib-$(CONFIG_XSC_PEER_SUPPORT) += peer_mem.o diff --git a/drivers/infiniband/hw/xsc/ah.c b/drivers/infiniband/hw/xsc/ah.c new file mode 100644 index 000000000000..39da2861897d --- /dev/null +++ b/drivers/infiniband/hw/xsc/ah.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include "xsc_ib.h" +#include "user.h" + +static u32 xsc_calc_roce_udp_flow_label(void) +{ + u32 factor = 0; + u32 hash = 0; + u32 flow_label = 0; + + /*This function will generate a 20 bit flow_label*/ + factor = (IB_GRH_FLOWLABEL_MASK - IB_ROCE_UDP_ENCAP_VALID_PORT_MIN + 1); + hash = get_random_u32() % factor; + flow_label = hash & IB_GRH_FLOWLABEL_MASK; + + return flow_label; +} + +static u16 xsc_ah_get_udp_sport(const struct xsc_ib_dev *dev, + struct rdma_ah_attr *ah_attr) +{ + enum ib_gid_type gid_type = ah_attr->grh.sgid_attr->gid_type; + u16 sport = 0; + u32 fl = 0; + + if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP && + (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) && + (ah_attr->grh.flow_label & IB_GRH_FLOWLABEL_MASK)) { + fl = ah_attr->grh.flow_label; + } else { + /*generate a 20bit flow_label and output to user layer*/ + fl = xsc_calc_roce_udp_flow_label(); + ah_attr->grh.flow_label = fl; + } + + sport = xsc_flow_label_to_udp_sport(fl); + xsc_ib_dbg(dev, "fl=0x%x,sport=0x%x\n", fl, sport); + return sport; +} + +static struct ib_ah *create_ib_ah(struct xsc_ib_dev *dev, + struct xsc_ib_ah *ah, + struct rdma_ah_attr *ah_attr) +{ + enum ib_gid_type gid_type; + + if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) { + const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr); + + memcpy(ah->av.rgid, &grh->dgid, 16); + ah->av.grh_gid_fl = cpu_to_be32(grh->flow_label | + (1 << 30) | + grh->sgid_index << 20); + ah->av.hop_limit = grh->hop_limit; + ah->av.tclass = grh->traffic_class; + } + + ah->av.stat_rate_sl = (rdma_ah_get_static_rate(ah_attr) << 4); + + if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) { + gid_type = ah_attr->grh.sgid_attr->gid_type; + + memcpy(ah->av.rmac, ah_attr->roce.dmac, + sizeof(ah_attr->roce.dmac)); + + ah->av.udp_sport = xsc_ah_get_udp_sport(dev, ah_attr); + ah->av.stat_rate_sl |= (rdma_ah_get_sl(ah_attr) & 0x7) << 1; + if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) +#define XSC_ECN_ENABLED BIT(1) + ah->av.tclass |= XSC_ECN_ENABLED; + } else { + ah->av.rlid = cpu_to_be16(rdma_ah_get_dlid(ah_attr)); + ah->av.fl_mlid = rdma_ah_get_path_bits(ah_attr) & 0x7f; + ah->av.stat_rate_sl |= (rdma_ah_get_sl(ah_attr) & 0xf); + } + + return &ah->ibah; +} + +xsc_ib_create_ah_def() +{ + struct xsc_ib_ah *ah = to_mah(ibah); + struct xsc_ib_dev *dev = to_mdev(ibah->device); + struct rdma_ah_attr *ah_attr = init_attr->ah_attr; + enum rdma_ah_attr_type ah_type = ah_attr->type; + + if (ah_type == RDMA_AH_ATTR_TYPE_ROCE && + !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) + return RET_VALUE(-EINVAL); + + if (ah_type == RDMA_AH_ATTR_TYPE_ROCE && udata) { + int err; + struct xsc_ib_create_ah_resp resp = {}; + u32 min_resp_len = offsetof(typeof(resp), dmac) + + sizeof(resp.dmac); + + if (udata->outlen < min_resp_len) + return RET_VALUE(-EINVAL); + + resp.response_length = min_resp_len; + memcpy(resp.dmac, ah_attr->roce.dmac, ETH_ALEN); + err = ib_copy_to_udata(udata, &resp, resp.response_length); + if (err) + return RET_VALUE(err); + } + + create_ib_ah(dev, ah, ah_attr); /* never fails */ + return 0; +} + +int xsc_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) +{ + return 0; +} + +xsc_ib_destroy_ah_def() +{ + return 0; +} diff --git a/drivers/infiniband/hw/xsc/counters.c b/drivers/infiniband/hw/xsc/counters.c new file mode 100644 index 000000000000..971ecf4ff1af --- /dev/null +++ b/drivers/infiniband/hw/xsc/counters.c @@ -0,0 +1,538 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_hsi.h" +#include "common/driver.h" +#include "common/xsc_lag.h" +#include "common/xsc_cmd.h" +#include "counters.h" + +#define COUNTERS_FILE_NAME "counters" +#define COUNTERS_NAMES_FILE_NAME "counters_names" +#define COUNTERS_VALUE_FILE_NAME "counters_value" +#define COUNTERS_ATTER_GROUP_NAME "counters" +#define GLOBAL_COUNTERS_GROUP_NAME "global_counters" +#define GLOBAL_COUNTERS_FILE_NAME "counters" + +static const struct counter_desc hw_rdma_stats_pf_desc[] = { + /*by mac port*/ + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_tx_pkts) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_tx_bytes) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_rx_pkts) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_rx_bytes) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, np_cnp_sent) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rp_cnp_handled) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, np_ecn_marked_roce_packets) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rp_cnp_ignored) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, read_rsp_out_of_seq) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, implied_nak_seq_err) }, + /*by function*/ + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, out_of_sequence) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, packet_seq_err) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, out_of_buffer) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rnr_nak_retry_err) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, local_ack_timeout_err) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rx_read_requests) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rx_write_requests) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, duplicate_requests) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_tx_pkts_func) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_tx_payload_bytes) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_rx_pkts_func) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_rx_payload_bytes) }, + /*global*/ + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_loopback_pkts) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_pf, rdma_loopback_bytes) }, +}; + +static const struct counter_desc hw_rdma_stats_vf_desc[] = { + /*by function*/ + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, rdma_tx_pkts_func) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, rdma_tx_payload_bytes) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, rdma_rx_pkts_func) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, rdma_rx_payload_bytes) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, out_of_sequence) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, packet_seq_err) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, out_of_buffer) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, rnr_nak_retry_err) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, local_ack_timeout_err) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, rx_read_requests) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, rx_write_requests) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_rdma_vf, duplicate_requests) }, +}; + +static const struct counter_desc hw_global_rdma_stats_desc[] = { + { XSC_DECLARE_STAT(struct xsc_hw_global_stats_rdma, rdma_loopback_pkts) }, + { XSC_DECLARE_STAT(struct xsc_hw_global_stats_rdma, rdma_loopback_bytes) }, + { XSC_DECLARE_STAT(struct xsc_hw_global_stats_rdma, rx_icrc_encapsulated) }, + { XSC_DECLARE_STAT(struct xsc_hw_global_stats_rdma, req_cqe_error) }, + { XSC_DECLARE_STAT(struct xsc_hw_global_stats_rdma, resp_cqe_error) }, + { XSC_DECLARE_STAT(struct xsc_hw_global_stats_rdma, cqe_msg_code_error) }, +}; + +static int get_hw_stats_rdma(struct xsc_core_device *dev, struct xsc_hw_stats_rdma *stats_rdma) +{ + int i = 0; + int ret; + int inlen; + struct xsc_lag *lag; + struct xsc_hw_stats_mbox_in *in; + struct xsc_hw_stats_rdma_mbox_out out; + struct xsc_core_device *xdev_tmp; + + memset(stats_rdma, 0, sizeof(*stats_rdma)); + + if (!dev) + return -1; + + inlen = sizeof(struct xsc_hw_stats_mbox_in) + XSC_MAX_PORTS; + in = kzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + xsc_board_lag_lock(dev); + if (xsc_lag_is_roce(dev)) { + lag = xsc_get_lag(dev); + in->lag_member_num = lag->xsc_member_cnt; + list_for_each_entry(xdev_tmp, &lag->slave_list, slave_node) + in->member_port[i++] = xdev_tmp->mac_port; + in->is_lag = 1; + } else { + in->is_lag = 0; + in->mac_port = dev->mac_port; + } + xsc_board_lag_unlock(dev); + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_HW_STATS_RDMA); + memset(&out, 0, sizeof(out)); + ret = xsc_cmd_exec(dev, (void *)in, inlen, (void *)&out, sizeof(out)); + if (ret || out.hdr.status) { + kfree(in); + return -1; + } + + memcpy(stats_rdma, &out.hw_stats, sizeof(*stats_rdma)); + kfree(in); + return 0; +} + +static ssize_t counters_names_show(struct kobject *kobjs, + struct attribute *attr, char *buf) +{ + int i; + int desc_size; + ssize_t count = 0; + const struct counter_desc *desc; + struct xsc_counters_attribute *xsc_counters_name_attr; + + xsc_counters_name_attr = container_of(attr, + struct xsc_counters_attribute, + attr); + + if (is_support_hw_pf_stats(xsc_counters_name_attr->dev)) { + desc = &hw_rdma_stats_pf_desc[0]; + desc_size = ARRAY_SIZE(hw_rdma_stats_pf_desc); + } else { + desc = &hw_rdma_stats_vf_desc[0]; + desc_size = ARRAY_SIZE(hw_rdma_stats_vf_desc); + } + + for (i = 0; i < desc_size; ++i) + count += sprintf(&buf[count], "%s\n", desc[i].format); + + return count; +} + +static ssize_t counters_show(struct kobject *kobjs, + struct attribute *attr, char *buf) +{ + int i; + int ret; + u8 *stats; + u64 value; + int desc_size; + ssize_t count = 0; + const struct counter_desc *desc; + struct xsc_hw_stats_rdma stats_rdma; + struct xsc_counters_attribute *xsc_counters_attr; + + xsc_counters_attr = container_of(attr, + struct xsc_counters_attribute, + attr); + + ret = get_hw_stats_rdma(xsc_counters_attr->dev, &stats_rdma); + if (ret || is_support_hw_pf_stats(xsc_counters_attr->dev) != stats_rdma.is_pf) + return 0; + + if (is_support_hw_pf_stats(xsc_counters_attr->dev)) { + desc = &hw_rdma_stats_pf_desc[0]; + desc_size = ARRAY_SIZE(hw_rdma_stats_pf_desc); + stats = (u8 *)&stats_rdma.stats.pf_stats; + } else { + desc = &hw_rdma_stats_vf_desc[0]; + desc_size = ARRAY_SIZE(hw_rdma_stats_vf_desc); + stats = (u8 *)&stats_rdma.stats.vf_stats; + } + + for (i = 0 ; i < desc_size; i++) { + value = *(u64 *)(stats + desc[i].offset); + value = be64_to_cpu(value); + count += sprintf(&buf[count], "%-26s %-20llu\n", + desc[i].format, value); + } + + return count; +} + +static ssize_t counters_value_read(struct file *file, + struct kobject *kob, + struct bin_attribute *bin_attr, + char *buf, loff_t loff, size_t size) +{ + int i; + int ret; + u8 *stats; + int bin_size; + int desc_size; + u64 *tmp_value; + struct xsc_core_device *xdev; + const struct counter_desc *desc; + struct xsc_hw_stats_rdma stats_rdma; + struct xsc_counters_bin_attribute *xsc_counters_bin_attr; + + xsc_counters_bin_attr = container_of(&bin_attr->attr, + struct xsc_counters_bin_attribute, + attr); + + if (xsc_counters_bin_attr->size > size || xsc_counters_bin_attr->size == 0) + return 0; + + xdev = (struct xsc_core_device *)xsc_counters_bin_attr->private; + ret = get_hw_stats_rdma(xdev, &stats_rdma); + if (ret || is_support_hw_pf_stats(xdev) != stats_rdma.is_pf) + return 0; + + if (is_support_hw_pf_stats(xdev)) { + desc = &hw_rdma_stats_pf_desc[0]; + desc_size = ARRAY_SIZE(hw_rdma_stats_pf_desc); + stats = (u8 *)&stats_rdma.stats.pf_stats; + } else { + desc = &hw_rdma_stats_vf_desc[0]; + desc_size = ARRAY_SIZE(hw_rdma_stats_vf_desc); + stats = (u8 *)&stats_rdma.stats.vf_stats; + } + + bin_size = desc_size * sizeof(u64); + if (xsc_counters_bin_attr->size < bin_size) + return 0; + + tmp_value = kzalloc(xsc_counters_bin_attr->size, GFP_KERNEL); + if (!tmp_value) + return 0; + + for (i = 0; i < desc_size; i++) { + tmp_value[i] = *(u64 *)(stats + desc[i].offset); + tmp_value[i] = be64_to_cpu(tmp_value[i]); + } + + memcpy(buf, tmp_value, xsc_counters_bin_attr->size); + + kfree(tmp_value); + return xsc_counters_bin_attr->size; +} + +static int counters_sysfs_init(struct ib_device *ib_dev, struct xsc_core_device *dev) +{ + struct xsc_counters_attribute *xsc_counters_name, *xsc_counters; + struct xsc_counters_bin_attribute *xsc_counters_bin; + struct attribute_group *counters_attr_g; + struct bin_attribute **counters_bin_attrs; + struct attribute **counters_attrs; + int ret = -ENOMEM; + + xsc_counters_name = kzalloc(sizeof(*xsc_counters_name), GFP_KERNEL); + if (!xsc_counters_name) + return -ENOMEM; + + xsc_counters = kzalloc(sizeof(*xsc_counters), GFP_KERNEL); + if (!xsc_counters) + goto err_xsc_counters; + + xsc_counters_bin = kzalloc(sizeof(*xsc_counters_bin), GFP_KERNEL); + if (!xsc_counters_bin) + goto err_xsc_counters_bin; + + counters_bin_attrs = kzalloc(sizeof(*counters_bin_attrs) * 2, GFP_KERNEL); + if (!counters_bin_attrs) + goto err_counters_bin_attrs; + + counters_attrs = kzalloc(sizeof(*counters_attrs) * 3, GFP_KERNEL); + if (!counters_attrs) + goto err_counters_attrs; + + counters_attr_g = kzalloc(sizeof(*counters_attr_g), GFP_KERNEL); + if (!counters_attr_g) + goto err_counters_attr_g; + + sysfs_attr_init(&xsc_counters_name->attr); + xsc_counters_name->attr.name = COUNTERS_NAMES_FILE_NAME; + xsc_counters_name->attr.mode = 0444; + xsc_counters_name->show = counters_names_show; + xsc_counters_name->dev = dev; + + sysfs_attr_init(&xsc_counters->attr); + xsc_counters->attr.name = COUNTERS_FILE_NAME; + xsc_counters->attr.mode = 0444; + xsc_counters->show = counters_show; + xsc_counters->dev = dev; + + sysfs_attr_init(&xsc_counters_bin->attr); + xsc_counters_bin->attr.name = COUNTERS_VALUE_FILE_NAME; + xsc_counters_bin->attr.mode = 0444; + xsc_counters_bin->read = counters_value_read; + xsc_counters_bin->private = dev; + xsc_counters_bin->size = sizeof(struct xsc_hw_stats_rdma); + + counters_bin_attrs[0] = (struct bin_attribute *)xsc_counters_bin; + counters_attrs[0] = (struct attribute *)xsc_counters_name; + counters_attrs[1] = (struct attribute *)xsc_counters; + + counters_attr_g->name = COUNTERS_ATTER_GROUP_NAME; + counters_attr_g->attrs = counters_attrs; + counters_attr_g->bin_attrs = counters_bin_attrs; + + dev->counters_priv = counters_attr_g; + + ret = sysfs_create_group(&ib_dev->dev.kobj, counters_attr_g); + if (ret) + goto err_counters_create_group; + + return 0; + +err_counters_create_group: + kfree(counters_attr_g); + counters_attr_g = NULL; + +err_counters_attr_g: + kfree(counters_attrs); + counters_attrs = NULL; + +err_counters_attrs: + kfree(counters_bin_attrs); + counters_bin_attrs = NULL; + +err_counters_bin_attrs: + kfree(xsc_counters_bin); + xsc_counters_bin = NULL; + +err_xsc_counters_bin: + kfree(xsc_counters); + xsc_counters = NULL; + +err_xsc_counters: + kfree(xsc_counters_name); + xsc_counters_name = NULL; + + return ret; +} + +static void counters_sysfs_fini(struct ib_device *ib_dev, struct xsc_core_device *dev) +{ + struct xsc_counters_attribute *xsc_counters_name, *xsc_counters; + struct xsc_counters_bin_attribute *xsc_counters_bin; + struct bin_attribute **counters_bin_attrs; + struct attribute **counters_attrs; + struct attribute_group *counters_attr_g; + + counters_attr_g = dev->counters_priv; + counters_attrs = counters_attr_g->attrs; + counters_bin_attrs = counters_attr_g->bin_attrs; + + xsc_counters_bin = (struct xsc_counters_bin_attribute *)counters_bin_attrs[0]; + xsc_counters_name = (struct xsc_counters_attribute *)counters_attrs[0]; + xsc_counters = (struct xsc_counters_attribute *)counters_attrs[1]; + + if (counters_attr_g) { + sysfs_remove_group(&ib_dev->dev.kobj, counters_attr_g); + kfree(counters_attr_g); + counters_attr_g = NULL; + } + + kfree(counters_attrs); + counters_attrs = NULL; + + kfree(counters_bin_attrs); + counters_bin_attrs = NULL; + + kfree(xsc_counters_bin); + xsc_counters_bin = NULL; + + kfree(xsc_counters_name); + xsc_counters_name = NULL; + + kfree(xsc_counters); + xsc_counters = NULL; +} + +static ssize_t global_cnt_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct xsc_global_cnt_attributes *a = + container_of(attr, struct xsc_global_cnt_attributes, attr); + struct xsc_global_cnt_interface *g = + container_of(kobj, struct xsc_global_cnt_interface, kobj); + + if (!a->show) + return -EIO; + + return a->show(g, a, buf); +} + +static ssize_t global_cnt_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, size_t size) +{ + struct xsc_global_cnt_attributes *a = + container_of(attr, struct xsc_global_cnt_attributes, attr); + struct xsc_global_cnt_interface *g = + container_of(kobj, struct xsc_global_cnt_interface, kobj); + + if (!a->store) + return -EIO; + + return a->store(g, a, buf, size); +} + +static ssize_t global_counters_show(struct xsc_global_cnt_interface *g, + struct xsc_global_cnt_attributes *a, char *buf) +{ + int i; + int ret; + u8 *stats; + u64 value; + int desc_size; + ssize_t count = 0; + const struct counter_desc *desc; + struct xsc_hw_global_stats_mbox_in in; + struct xsc_hw_global_stats_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_HW_GLOBAL_STATS); + ret = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(in), + (void *)&out, sizeof(out)); + if (ret || out.hdr.status) + return 0; + + desc = &hw_global_rdma_stats_desc[0]; + desc_size = ARRAY_SIZE(hw_global_rdma_stats_desc); + stats = (u8 *)&out.hw_stats; + + for (i = 0 ; i < desc_size; i++) { + value = *(u64 *)(stats + desc[i].offset); + value = be64_to_cpu(value); + count += sprintf(&buf[count], "%-26s %-20llu\n", + desc[i].format, value); + } + + return count; +} + +static ssize_t global_counters_store(struct xsc_global_cnt_interface *g, + struct xsc_global_cnt_attributes *a, + const char *buf, size_t count) +{ + return -EOPNOTSUPP; +} + +#define GLOBAL_CNT_ATTR(_name) struct xsc_global_cnt_attributes xsc_global_cnt_attr_##_name = \ + __ATTR(_name, 0444, global_##_name##_show, global_##_name##_store) + +GLOBAL_CNT_ATTR(counters); + +static const struct sysfs_ops global_cnt_sysfs_ops = { + .show = global_cnt_attr_show, + .store = global_cnt_attr_store, +}; + +static struct attribute *global_cnt_attrs[] = { + &xsc_global_cnt_attr_counters.attr, + NULL +}; + +ATTRIBUTE_GROUPS(global_cnt); + +static const struct kobj_type global_cnt_ktype = { + .sysfs_ops = &global_cnt_sysfs_ops, + .default_groups = global_cnt_groups, +}; + +static struct xsc_global_cnt_interface *g_global_cnt_interface; + +static int global_cnt_sysfs_init(struct ib_device *ib_dev, struct xsc_core_device *xdev) +{ + struct xsc_global_cnt_interface *tmp; + int err; + + if (!xdev || !xsc_core_is_pf(xdev) || xdev->pf_id != 0) + return 0; + + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + err = kobject_init_and_add(&tmp->kobj, &global_cnt_ktype, + &ib_dev->dev.kobj, GLOBAL_COUNTERS_GROUP_NAME); + if (err) + goto error_return; + + g_global_cnt_interface = tmp; + tmp->xdev = xdev; + return 0; + +error_return: + kobject_put(&tmp->kobj); + kfree(tmp); + return err; +} + +static void global_cnt_sysfs_fini(struct xsc_core_device *xdev) +{ + if (!g_global_cnt_interface || !xdev || !xsc_core_is_pf(xdev) || xdev->pf_id != 0) + return; + + kobject_put(&g_global_cnt_interface->kobj); + kfree(g_global_cnt_interface); + g_global_cnt_interface = NULL; +} + +int xsc_counters_init(struct ib_device *ib_dev, struct xsc_core_device *dev) +{ + int ret; + + ret = counters_sysfs_init(ib_dev, dev); + if (ret) + goto error_return; + + ret = global_cnt_sysfs_init(ib_dev, dev); + if (ret) + goto error_global_cnt; + + return 0; + +error_global_cnt: + counters_sysfs_fini(ib_dev, dev); +error_return: + return ret; +} + +void xsc_counters_fini(struct ib_device *ib_dev, struct xsc_core_device *dev) +{ + counters_sysfs_fini(ib_dev, dev); + global_cnt_sysfs_fini(dev); +} diff --git a/drivers/infiniband/hw/xsc/counters.h b/drivers/infiniband/hw/xsc/counters.h new file mode 100644 index 000000000000..001a57b8372d --- /dev/null +++ b/drivers/infiniband/hw/xsc/counters.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __COUNTERS_H__ +#define __COUNTERS_H__ + +#define STRING_LEN 32 +#define XSC_DECLARE_STAT(type, fld) ""#fld, offsetof(type, fld) + +struct counter_desc { + char format[STRING_LEN]; + size_t offset; /* Byte offset */ +}; + +struct xsc_counters_attribute { + struct attribute attr; + ssize_t (*show)(struct kobject *kobj, + struct attribute *attr, char *buf); + ssize_t (*store)(struct kobject *kobj, + struct attribute *attr, const char *buf, + size_t count); + struct xsc_core_device *dev; +}; + +struct xsc_counters_bin_attribute { + struct attribute attr; + size_t size; + void *private; + ssize_t (*read)(struct file *f, struct kobject *k, struct bin_attribute *bin_attr, + char *buf, loff_t l, size_t s); + ssize_t (*write)(struct file *f, struct kobject *k, struct bin_attribute *bin_attr, + char *buf, loff_t l, size_t s); + int (*mmap)(struct file *f, struct kobject *k, struct bin_attribute *bin_attr, + struct vm_area_struct *vma); +}; + +struct xsc_global_cnt_interface { + struct xsc_core_device *xdev; + struct kobject kobj; +}; + +struct xsc_global_cnt_attributes { + struct attribute attr; + ssize_t (*show)(struct xsc_global_cnt_interface *g, struct xsc_global_cnt_attributes *a, + char *buf); + ssize_t (*store)(struct xsc_global_cnt_interface *g, struct xsc_global_cnt_attributes *a, + const char *buf, size_t count); +}; + +#endif diff --git a/drivers/infiniband/hw/xsc/cq.c b/drivers/infiniband/hw/xsc/cq.c new file mode 100644 index 000000000000..102902410b86 --- /dev/null +++ b/drivers/infiniband/hw/xsc/cq.c @@ -0,0 +1,690 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "xsc_ib.h" +#include "user.h" +#include "common/xsc_hsi.h" +#include + +enum { + CQ_OK = 0, + CQ_EMPTY = -1, + CQ_POLL_ERR = -2 +}; + +enum { + XSC_CQE_APP_TAG_MATCHING = 1, +}; + +enum { + XSC_CQE_APP_OP_TM_CONSUMED = 0x1, + XSC_CQE_APP_OP_TM_EXPECTED = 0x2, + XSC_CQE_APP_OP_TM_UNEXPECTED = 0x3, + XSC_CQE_APP_OP_TM_NO_TAG = 0x4, + XSC_CQE_APP_OP_TM_APPEND = 0x5, + XSC_CQE_APP_OP_TM_REMOVE = 0x6, + XSC_CQE_APP_OP_TM_NOOP = 0x7, + XSC_CQE_APP_OP_TM_CONSUMED_SW_RDNV = 0x9, + XSC_CQE_APP_OP_TM_CONSUMED_MSG = 0xA, + XSC_CQE_APP_OP_TM_CONSUMED_MSG_SW_RDNV = 0xB, + XSC_CQE_APP_OP_TM_MSG_COMPLETION_CANCELED = 0xC, +}; + +static const u32 xsc_msg_opcode[][2][2] = { + [XSC_MSG_OPCODE_SEND][XSC_REQ][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_REQ_SEND, + [XSC_MSG_OPCODE_SEND][XSC_REQ][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_REQ_SEND_IMMDT, + [XSC_MSG_OPCODE_SEND][XSC_RSP][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_RSP_RECV, + [XSC_MSG_OPCODE_SEND][XSC_RSP][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_RSP_RECV_IMMDT, + [XSC_MSG_OPCODE_RDMA_WRITE][XSC_REQ][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_REQ_WRITE, + [XSC_MSG_OPCODE_RDMA_WRITE][XSC_REQ][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_REQ_WRITE_IMMDT, + [XSC_MSG_OPCODE_RDMA_WRITE][XSC_RSP][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_CQE_ERROR, + [XSC_MSG_OPCODE_RDMA_WRITE][XSC_RSP][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_RSP_WRITE_IMMDT, + [XSC_MSG_OPCODE_RDMA_READ][XSC_REQ][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_REQ_READ, + [XSC_MSG_OPCODE_RDMA_READ][XSC_REQ][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_CQE_ERROR, + [XSC_MSG_OPCODE_RDMA_READ][XSC_RSP][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_CQE_ERROR, + [XSC_MSG_OPCODE_RDMA_READ][XSC_RSP][XSC_WITH_IMMDT] = XSC_OPCODE_RDMA_CQE_ERROR, + [XSC_MSG_OPCODE_MAD][XSC_REQ][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_MAD_REQ_SEND, + [XSC_MSG_OPCODE_MAD][XSC_RSP][XSC_WITHOUT_IMMDT] = XSC_OPCODE_RDMA_MAD_RSP_RECV, +}; + +static const u32 xsc_cqe_opcode[] = { + [XSC_OPCODE_RDMA_REQ_SEND] = IB_WC_SEND, + [XSC_OPCODE_RDMA_REQ_SEND_IMMDT] = IB_WC_SEND, + [XSC_OPCODE_RDMA_RSP_RECV] = IB_WC_RECV, + [XSC_OPCODE_RDMA_RSP_RECV_IMMDT] = IB_WC_RECV, + [XSC_OPCODE_RDMA_REQ_WRITE] = IB_WC_RDMA_WRITE, + [XSC_OPCODE_RDMA_REQ_WRITE_IMMDT] = IB_WC_RDMA_WRITE, + [XSC_OPCODE_RDMA_RSP_WRITE_IMMDT] = IB_WC_RECV_RDMA_WITH_IMM, + [XSC_OPCODE_RDMA_REQ_READ] = IB_WC_RDMA_READ, + [XSC_OPCODE_RDMA_MAD_REQ_SEND] = IB_WC_SEND, + [XSC_OPCODE_RDMA_MAD_RSP_RECV] = IB_WC_RECV, +}; + +int xsc_stall_num_loop = 60; +int xsc_stall_cq_poll_min = 60; +int xsc_stall_cq_poll_max = 100000; +int xsc_stall_cq_inc_step = 100; +int xsc_stall_cq_dec_step = 10; + +static inline u8 xsc_get_cqe_opcode(struct xsc_cqe *cqe) +{ + if (cqe->is_error) + return cqe->type ? XSC_OPCODE_RDMA_RSP_ERROR : XSC_OPCODE_RDMA_REQ_ERROR; + if (cqe->msg_opcode > XSC_MSG_OPCODE_MAD) + return XSC_OPCODE_RDMA_CQE_ERROR; + return xsc_msg_opcode[cqe->msg_opcode][cqe->type][cqe->with_immdt]; +} + +static void xsc_ib_cq_comp(struct xsc_core_cq *cq) +{ + struct ib_cq *ibcq = &to_xibcq(cq)->ibcq; + + ibcq->comp_handler(ibcq, ibcq->cq_context); +} + +static void xsc_ib_cq_event(struct xsc_core_cq *xcq, enum xsc_event type) +{ + struct xsc_ib_cq *cq = container_of(xcq, struct xsc_ib_cq, xcq); + struct xsc_ib_dev *dev = to_mdev(cq->ibcq.device); + struct ib_cq *ibcq = &cq->ibcq; + struct ib_event event; + + if (type != XSC_EVENT_TYPE_CQ_ERROR) { + xsc_ib_err(dev, "Unexpected event type %d on CQ %06x\n", + type, xcq->cqn); + return; + } + + if (ibcq->event_handler) { + event.device = &dev->ib_dev; + event.event = IB_EVENT_CQ_ERR; + event.element.cq = ibcq; + ibcq->event_handler(&event, ibcq->cq_context); + } +} + +static void *get_cqe_from_buf(struct xsc_ib_cq_buf *buf, int n, int size) +{ + return xsc_buf_offset(&buf->buf, n * size); +} + +static void *get_cqe(struct xsc_ib_cq *cq, int n) +{ + return get_cqe_from_buf(&cq->buf, n, cq->xcq.cqe_sz); +} + +static void *get_sw_cqe(struct xsc_ib_cq *cq, int n) +{ + struct xsc_cqe *cqe; + + cqe = (struct xsc_cqe *)get_cqe(cq, n & (cq->ibcq.cqe - 1)); + + return ((cqe->owner & XSC_CQE_OWNER_MASK) ^ + !!(n & cq->ibcq.cqe)) ? NULL : cqe; +} + +static inline void handle_good_req(struct ib_wc *wc, + struct xsc_cqe *cqe, + u8 opcode) +{ + wc->opcode = xsc_cqe_opcode[opcode]; + if (opcode == XSC_OPCODE_RDMA_REQ_READ) + wc->byte_len = RD_LE_32(cqe->msg_len); + wc->status = IB_WC_SUCCESS; +} + +static void handle_responder(struct ib_wc *wc, struct xsc_cqe *cqe, + struct xsc_ib_qp *qp, u8 opcode) +{ + struct xsc_ib_wq *wq = &qp->rq; + u16 idx; + + wc->byte_len = RD_LE_32(cqe->msg_len); + wc->opcode = xsc_cqe_opcode[opcode]; + wc->status = IB_WC_SUCCESS; + + idx = wq->tail & (wq->wqe_cnt - 1); + wc->wr_id = wq->wrid[idx]; + ++wq->tail; +} + +static void *get_wqe(struct xsc_ib_qp *qp, int offset) +{ + return xsc_buf_offset(&qp->buf, offset); +} + +static void *get_recv_wqe(struct xsc_ib_qp *qp, int n) +{ + return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); +} + +static void *get_seg_wqe(void *first, int n) +{ + return first + (n << XSC_BASE_WQE_SHIFT); +} + +static void xsc_handle_rdma_mad_resp_recv(struct xsc_ib_cq *cq, + struct xsc_ib_qp **cur_qp, + struct ib_wc *wc, + struct xsc_cqe *cqe, + u8 opcode) +{ + struct xsc_ib_dev *dev = to_mdev(cq->ibcq.device); + void *recv; + struct xsc_wqe_data_seg *data_seg; + struct iphdr *ip4h = NULL; + struct ipv6hdr *ip6h; + struct udphdr *udph; + struct ib_unpacked_eth *eth; + struct ib_unpacked_vlan *vlan; + struct ib_grh *grh; + struct ib_mad *mad; + struct rxe_bth *bth; + struct rxe_deth *deth; + unsigned int pading_sz = 0; + struct xsc_ib_wq *wq; + int idx; + u16 eth_type; + void *l3_start; + + wq = &(*cur_qp)->rq; + idx = wq->tail & (wq->wqe_cnt - 1); + + handle_responder(wc, cqe, *cur_qp, opcode); + + data_seg = get_seg_wqe(get_recv_wqe(*cur_qp, idx), 0); + recv = xsc_ib_recv_mad_sg_virt_addr(&dev->ib_dev, wc, data_seg->va); + + eth = (struct ib_unpacked_eth *)recv; + grh = (struct ib_grh *)recv; + if (eth->type == htons(ETH_P_8021Q)) { + vlan = (struct ib_unpacked_vlan *)(eth + 1); + eth_type = ntohs(vlan->type); + l3_start = vlan + 1; + + wc->vlan_id = ntohs(vlan->tag) & 0x0fff; + wc->sl = (ntohs(vlan->tag) >> 13) & 0x7; + wc->wc_flags |= IB_WC_WITH_VLAN; + } else { + eth_type = ntohs(eth->type); + l3_start = eth + 1; + } + + if (eth_type == ETH_P_IP) { + ip4h = (struct iphdr *)l3_start; + udph = (struct udphdr *)(ip4h + 1); + } else { + ip6h = (struct ipv6hdr *)l3_start; + udph = (struct udphdr *)(ip6h + 1); + } + bth = (struct rxe_bth *)(udph + 1); + deth = (struct rxe_deth *)(bth + 1); + mad = (struct ib_mad *)(deth + 1); + + if (eth_type == ETH_P_IP) { + pading_sz = sizeof(*grh) - sizeof(*ip4h); + memmove((u8 *)(grh + 1) - sizeof(*ip4h), ip4h, sizeof(*ip4h)); + memset(grh, 0, pading_sz); + } else { + memmove(grh, ip6h, sizeof(*ip6h)); + } + memmove(grh + 1, mad, sizeof(*mad)); + + wc->wc_flags |= IB_WC_GRH; + + xsc_ib_dbg(dev, "recv cqe idx:%u, len:%u\n", wq->tail, wc->byte_len); + xsc_ib_info(dev, "qp[%d] recv MAD packet, msg_len=%d\n", (*cur_qp)->xqp.qpn, wc->byte_len); + wc->status = IB_WC_SUCCESS; +} + +static int xsc_poll_one(struct xsc_ib_cq *cq, + struct xsc_ib_qp **cur_qp, + struct ib_wc *wc) +{ + struct xsc_ib_dev *dev = to_mdev(cq->ibcq.device); + struct xsc_core_qp *xqp; + struct xsc_ib_wq *wq; + u8 opcode; + u32 qpn; + int idx; + struct xsc_cqe *cqe; + u32 *p = NULL; + + cqe = get_sw_cqe(cq, cq->xcq.cons_index); + if (!cqe) + return -EAGAIN; + + ++cq->xcq.cons_index; + + /* Make sure we read CQ entry contents after we've checked the + * ownership bit. + */ + rmb(); + + p = (u32 *)cqe; + + qpn = cqe->qp_id; + qpn = le32_to_cpu(qpn); + if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) { + /* We do not have to take the QP table lock here, + * because CQs will be locked while QPs are removed + * from the table. + */ + xqp = __xsc_qp_lookup(dev->xdev, qpn); + if (unlikely(!xqp)) { + xsc_ib_warn(dev, "CQE@CQ %d for unknown QPN %d\n", + cq->xcq.cqn, qpn); + return -EINVAL; + } + + *cur_qp = to_xibqp(xqp); + } + + memset(wc, 0, sizeof(*wc)); + wc->qp = &(*cur_qp)->ibqp; + opcode = xsc_get_cqe_opcode(cqe); + switch (opcode) { + case XSC_OPCODE_RDMA_REQ_SEND: + case XSC_OPCODE_RDMA_REQ_WRITE: + case XSC_OPCODE_RDMA_REQ_READ: + case XSC_OPCODE_RDMA_MAD_REQ_SEND: + wq = &(*cur_qp)->sq; + idx = cqe->wqe_id >> (wq->wqe_shift - XSC_BASE_WQE_SHIFT); + idx &= (wq->wqe_cnt - 1); + handle_good_req(wc, cqe, opcode); + wc->wr_id = wq->wrid[idx]; + wq->tail = wq->wqe_head[idx] + 1; + xsc_ib_dbg(dev, "wqeid:%u, wq tail:%u qpn:%u\n", idx, wq->tail, qpn); + wc->status = IB_WC_SUCCESS; + break; + case XSC_OPCODE_RDMA_RSP_RECV: + wq = &(*cur_qp)->rq; + handle_responder(wc, cqe, *cur_qp, opcode); + xsc_ib_dbg(dev, "recv cqe idx:%u, len:%u, qpn:%u\n", wq->tail, wc->byte_len, qpn); + wc->status = IB_WC_SUCCESS; + break; + + case XSC_OPCODE_RDMA_MAD_RSP_RECV: + xsc_ib_dbg(dev, "recv MAD, qpn:%u\n", qpn); + xsc_handle_rdma_mad_resp_recv(cq, cur_qp, wc, cqe, opcode); + break; + + default: + xsc_ib_err(dev, "completion error\n%08x %08x %08x %08x %08x %08x\n", + p[0], p[1], p[2], p[3], p[5], p[6]); + wc->status = IB_WC_GENERAL_ERR; + wc->wr_id = 0; + break; + } + + return 0; +} + +int xsc_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) +{ + struct xsc_ib_cq *cq = to_xcq(ibcq); + struct xsc_core_cq *xcq = &cq->xcq; + struct xsc_ib_qp *cur_qp = NULL; + int npolled = 0; + int err = 0; + unsigned long flags; + u32 next_cid; + + spin_lock_irqsave(&cq->lock, flags); + next_cid = xcq->cons_index; + + for (npolled = 0; npolled < num_entries; npolled++) { + err = xsc_poll_one(cq, &cur_qp, wc + npolled); + if (err) + break; + } + + /* make sure cqe read out before update ci */ + rmb(); + + if (next_cid != xcq->cons_index) + xsc_cq_set_ci(xcq); + + spin_unlock_irqrestore(&cq->lock, flags); + + return npolled; +} + +int xsc_cqe_is_empty(struct xsc_ib_cq *cq) +{ + struct xsc_cqe *cqe = get_sw_cqe(cq, cq->xcq.cons_index); + + if (!cqe) + return 1; + + return 0; +} + +int xsc_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) +{ +#ifdef MSIX_SUPPORT + union xsc_cq_doorbell db; + struct xsc_ib_cq *xcq = to_xcq(ibcq); + struct xsc_core_cq *cq = &xcq->xcq; + int ret = 0; + unsigned long irq_flags; + + spin_lock_irqsave(&xcq->lock, irq_flags); + db.val = 0; + db.cq_next_cid = cq->cons_index; + db.cq_id = cq->cqn; + if (flags & IB_CQ_NEXT_COMP) + db.arm = 0; + else if (flags & IB_CQ_SOLICITED) + db.arm = 1;/* arm next:0 arm solicited:1 */ + + if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && (!xsc_cqe_is_empty(xcq))) { + ret = 1; + goto out; + } + + /* make sure val write to memory done */ + wmb(); + writel(db.val, REG_ADDR(cq->dev, cq->arm_db)); +out: + spin_unlock_irqrestore(&xcq->lock, irq_flags); + return ret; +#else + if ((flags & IB_CQ_REPORT_MISSED_EVENTS)) + return 1; + return 0; +#endif +} + +static int alloc_cq_buf(struct xsc_ib_dev *dev, struct xsc_ib_cq_buf *buf, + int nent, int cqe_size) +{ + int err; + + err = xsc_buf_alloc(dev->xdev, nent * cqe_size, + PAGE_SIZE, &buf->buf); + if (err) + return err; + + buf->cqe_size = cqe_size; + + return 0; +} + +static void free_cq_buf(struct xsc_ib_dev *dev, struct xsc_ib_cq_buf *buf) +{ + xsc_buf_free(dev->xdev, &buf->buf); +} + +static int create_cq_user(struct xsc_ib_dev *dev, struct ib_udata *udata, + struct ib_ucontext *context, struct xsc_ib_cq *cq, + int entries, struct xsc_create_cq_mbox_in **cqb, + int *cqe_size, int *index, int *inlen) +{ + struct xsc_ib_create_cq ucmd; + int page_shift; + int npages; + int ncont; + int err; + int log_cq_sz; + int hw_npages; + + if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) + return -EFAULT; + + *cqe_size = ucmd.cqe_size; + + cq->buf.umem = ib_umem_get(&dev->ib_dev, ucmd.buf_addr, + entries * ucmd.cqe_size, + IB_ACCESS_LOCAL_WRITE); + if (IS_ERR(cq->buf.umem)) { + err = PTR_ERR(cq->buf.umem); + return err; + } + + xsc_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift, + &ncont, NULL); + if (ncont != npages) { + xsc_ib_dbg(dev, "bad page_shift:%d, ncont:%d\n", page_shift, ncont); + /* amber doesn't support compound pages */ + page_shift = PAGE_SHIFT; + ncont = npages; + xsc_ib_dbg(dev, "overwrite to page_shift:%d, ncont:%d\n", page_shift, ncont); + } + log_cq_sz = ilog2(entries); + hw_npages = DIV_ROUND_UP((1 << log_cq_sz) * sizeof(struct xsc_cqe), PAGE_SIZE_4K); + xsc_ib_info(dev, "addr 0x%llx, entries %d, size %u, npages %d, page_shift %d, ncont %d, hw_npages %d\n", + ucmd.buf_addr, entries, ucmd.cqe_size, npages, page_shift, ncont, hw_npages); + + *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * hw_npages; + *cqb = xsc_vzalloc(*inlen); + if (!*cqb) { + err = -ENOMEM; + goto err_umem; + } + xsc_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, hw_npages, true); + (*cqb)->ctx.pa_num = cpu_to_be16(hw_npages); + + return 0; + +err_umem: + ib_umem_release(cq->buf.umem); + return err; +} + +static void destroy_cq_user(struct xsc_ib_cq *cq, struct ib_udata *udata) +{ + ib_umem_release(cq->buf.umem); +} + +static int create_cq_kernel(struct xsc_ib_dev *dev, struct xsc_ib_cq *cq, + int entries, int cqe_size, + struct xsc_create_cq_mbox_in **cqb, + int *index, int *inlen) +{ + int err; + int i = 0; + struct xsc_cqe *cqe = NULL; + int hw_npages; + + cq->xcq.cqe_sz = cqe_size; + + err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size); + if (err) + return err; + + for (i = 0; i < entries; i++) { + cqe = (struct xsc_cqe *)get_cqe(cq, i); + cqe->owner = 1; + } + + hw_npages = DIV_ROUND_UP(entries * cqe_size, PAGE_SIZE_4K); + *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * hw_npages; + *cqb = xsc_vzalloc(*inlen); + if (!*cqb) { + err = -ENOMEM; + goto err_buf; + } + xsc_fill_page_array(&cq->buf.buf, (*cqb)->pas, hw_npages); + (*cqb)->ctx.pa_num = cpu_to_be16(hw_npages); + + return 0; + +err_buf: + free_cq_buf(dev, &cq->buf); + return err; +} + +static void destroy_cq_kernel(struct xsc_ib_dev *dev, struct xsc_ib_cq *cq) +{ + free_cq_buf(dev, &cq->buf); +} + +xsc_ib_create_cq_def() +{ + struct ib_device *ibdev = ibcq->device; + int entries = attr->cqe; + int vector = attr->comp_vector; + struct xsc_create_cq_mbox_in *cqb = NULL; + struct xsc_ib_dev *dev = to_mdev(ibdev); + struct xsc_ib_cq *cq; + int index; + int inlen; + int cqe_size; + int irqn; + int err; + unsigned int eqn; + + entries = roundup_pow_of_two(entries); + + xsc_ib_info(dev, "entries:%d, vector:%d, max_cqes:%d\n", entries, vector, + dev->xdev->caps.max_cqes); + + if (entries > dev->xdev->caps.max_cqes) + entries = dev->xdev->caps.max_cqes; + cq = to_xcq(ibcq); + cq->ibcq.cqe = entries; + mutex_init(&cq->resize_mutex); + spin_lock_init(&cq->lock); + cq->resize_buf = NULL; + cq->resize_umem = NULL; + + if (udata) { + err = create_cq_user(dev, udata, NULL, cq, entries, + &cqb, &cqe_size, &index, &inlen); + if (err) + goto err_create; + } else { + cqe_size = sizeof(struct xsc_cqe); + err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, &index, &inlen); + if (err) + goto err_create; + } + + cq->cqe_size = cqe_size; + cqb->ctx.log_cq_sz = ilog2(entries); + cqb->ctx.glb_func_id = cpu_to_be16(dev->xdev->glb_func_id); + + err = xsc_vector2eqn(dev->xdev, vector, &eqn, &irqn); + if (err) + goto err_cqb; + + cqb->ctx.eqn = eqn; + cqb->ctx.eqn = cpu_to_be16(cqb->ctx.eqn); + + err = xsc_core_create_cq(dev->xdev, &cq->xcq, cqb, inlen); + if (err) + goto err_cqb; + + xsc_ib_info(dev, "succeeded to create cqn %d, vector=%d, cq_sz=%d, eqn=%d\n", + cq->xcq.cqn, vector, entries, eqn); + cq->xcq.irqn = irqn; + cq->xcq.comp = xsc_ib_cq_comp; + cq->xcq.event = xsc_ib_cq_event; + + if (udata) { + if (ib_copy_to_udata(udata, &cq->xcq.cqn, sizeof(__u32))) { + err = -EFAULT; + goto err_cmd; + } + } + + xsc_vfree(cqb); + + return 0; + +err_cmd: + xsc_core_destroy_cq(dev->xdev, &cq->xcq); + +err_cqb: + xsc_vfree(cqb); + if (udata) + destroy_cq_user(cq, udata); + else + destroy_cq_kernel(dev, cq); + +err_create: + return RET_VALUE(err); +} + +xsc_ib_destroy_cq_def() +{ + struct xsc_ib_dev *dev = to_mdev(cq->device); + struct xsc_ib_cq *xcq = to_xcq(cq); + + xsc_core_destroy_cq(dev->xdev, &xcq->xcq); + if (udata) + destroy_cq_user(xcq, udata); + else + destroy_cq_kernel(dev, xcq); + + return 0; +} + +static int is_equal_rsn(struct xsc_cqe *cqe, u32 rsn) +{ + u32 qpn = le32_to_cpu(cqe->qp_id); + return rsn == qpn; +} + +void __xsc_ib_cq_clean(struct xsc_ib_cq *cq, u32 rsn) +{ + struct xsc_cqe *cqe, *dest; + u32 prod_index; + int nfreed = 0; + u8 owner_bit; + + if (!cq) + return; + + /* First we need to find the current producer index, so we + * know where to start cleaning from. It doesn't matter if HW + * adds new entries after this loop -- the QP we're worried + * about is already in RESET, so the new entries won't come + * from our QP and therefore don't need to be checked. + */ + for (prod_index = cq->xcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++) + if (prod_index == cq->xcq.cons_index + cq->ibcq.cqe) + break; + + /* Now sweep backwards through the CQ, removing CQ entries + * that match our QP by copying older entries on top of them. + */ + while ((int)(--prod_index) - (int)cq->xcq.cons_index >= 0) { + cqe = (struct xsc_cqe *)get_cqe(cq, prod_index & (cq->ibcq.cqe - 1)); + if (is_equal_rsn(cqe, rsn)) { + ++nfreed; + } else if (nfreed) { + dest = (struct xsc_cqe *)get_cqe(cq, (prod_index + nfreed) & + (cq->ibcq.cqe - 1)); + owner_bit = dest->owner & XSC_CQE_OWNER_MASK; + memcpy(dest, cqe, cq->xcq.cqe_sz); + dest->owner = owner_bit | + (dest->owner & ~XSC_CQE_OWNER_MASK); + } + } + + if (nfreed) { + cq->xcq.cons_index += nfreed; + /* Make sure update of buffer contents is done before + * updating consumer index. + */ + wmb(); + xsc_cq_set_ci(&cq->xcq); + } +} + +void xsc_ib_cq_clean(struct xsc_ib_cq *cq, u32 qpn) +{ + if (!cq) + return; + + spin_lock_irq(&cq->lock); + __xsc_ib_cq_clean(cq, qpn); + spin_unlock_irq(&cq->lock); +} diff --git a/drivers/infiniband/hw/xsc/devx.c b/drivers/infiniband/hw/xsc/devx.c new file mode 100644 index 000000000000..fca43076bae1 --- /dev/null +++ b/drivers/infiniband/hw/xsc/devx.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ +#include +#include +#include +#include +#include +#include "common/driver.h" +#include "xsc_ib.h" +#define UVERBS_MODULE_NAME xsc_ib +#include +#include "user.h" + +static struct xsc_ib_ucontext *devx_uattrs2uctx(struct uverbs_attr_bundle *attrs) +{ + return to_xucontext(ib_uverbs_get_ucontext(attrs)); +} + +static bool devx_is_general_cmd(void *in) +{ + struct xsc_inbox_hdr *hdr = + (struct xsc_inbox_hdr *)in; + u16 opcode = be16_to_cpu(hdr->opcode); + + switch (opcode) { + case XSC_CMD_OP_QUERY_HCA_CAP: + return true; + default: + return false; + } +} + +static int UVERBS_HANDLER(XSC_IB_METHOD_DEVX_OTHER)(struct uverbs_attr_bundle *attrs) +{ + struct xsc_ib_ucontext *c; + struct xsc_ib_dev *dev; + void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, XSC_IB_ATTR_DEVX_OTHER_CMD_IN); + int cmd_out_len = uverbs_attr_get_len(attrs, XSC_IB_ATTR_DEVX_OTHER_CMD_OUT); + void *cmd_out; + int err; + + c = devx_uattrs2uctx(attrs); + if (IS_ERR(c)) + return PTR_ERR(c); + dev = to_mdev(c->ibucontext.device); + + if (!devx_is_general_cmd(cmd_in)) + return -EINVAL; + + cmd_out = uverbs_zalloc(attrs, cmd_out_len); + if (IS_ERR(cmd_out)) + return PTR_ERR(cmd_out); + + err = xsc_cmd_exec(dev->xdev, cmd_in, + uverbs_attr_get_len(attrs, XSC_IB_ATTR_DEVX_OTHER_CMD_IN), + cmd_out, cmd_out_len); + if (err) + return err; + + return uverbs_copy_to(attrs, XSC_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out, cmd_out_len); +} + +DECLARE_UVERBS_NAMED_METHOD(XSC_IB_METHOD_DEVX_OTHER, + UVERBS_ATTR_PTR_IN(XSC_IB_ATTR_DEVX_OTHER_CMD_IN, + UVERBS_ATTR_MIN_SIZE(sizeof(struct xsc_inbox_hdr)), + UA_MANDATORY, + UA_ALLOC_AND_COPY), + UVERBS_ATTR_PTR_OUT(XSC_IB_ATTR_DEVX_OTHER_CMD_OUT, + UVERBS_ATTR_MIN_SIZE(sizeof(struct xsc_outbox_hdr)), + UA_MANDATORY)); + +DECLARE_UVERBS_GLOBAL_METHODS(XSC_IB_OBJECT_DEVX, + &UVERBS_METHOD(XSC_IB_METHOD_DEVX_OTHER)); + +const struct uverbs_object_tree_def *xsc_ib_get_devx_tree(void) +{ + return NULL; +} diff --git a/drivers/infiniband/hw/xsc/ib_peer_mem.h b/drivers/infiniband/hw/xsc/ib_peer_mem.h new file mode 100644 index 000000000000..b955ac53bfde --- /dev/null +++ b/drivers/infiniband/hw/xsc/ib_peer_mem.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#if !defined(IB_PEER_MEM_H) +#define IB_PEER_MEM_H + +#include "peer_mem.h" + +struct ib_peer_memory_statistics { + atomic64_t num_alloc_mrs; + atomic64_t num_dealloc_mrs; + atomic64_t num_reg_pages; + atomic64_t num_dereg_pages; + atomic64_t num_reg_bytes; + atomic64_t num_dereg_bytes; + unsigned long num_free_callbacks; +}; + +struct ib_ucontext; +struct ib_umem_ex; +struct invalidation_ctx; + +struct ib_peer_memory_client { + const struct peer_memory_client *peer_mem; + struct list_head core_peer_list; + int invalidation_required; + struct kref ref; + struct completion unload_comp; + /* lock is used via the invalidation flow */ + struct mutex lock; + struct list_head core_ticket_list; + u64 last_ticket; + struct ib_peer_memory_statistics stats; +}; + +enum ib_peer_mem_flags { + IB_PEER_MEM_ALLOW = 1, + IB_PEER_MEM_INVAL_SUPP = (1 << 1), +}; + +struct core_ticket { + unsigned long key; + void *context; + struct list_head ticket_list; +}; + +struct ib_peer_memory_client *ib_get_peer_client(struct ib_ucontext *context, unsigned long addr, + size_t size, unsigned long peer_mem_flags, + void **peer_client_context); + +void ib_put_peer_client(struct ib_peer_memory_client *ib_peer_client, + void *peer_client_context); + +int ib_peer_create_invalidation_ctx(struct ib_peer_memory_client *ib_peer_mem, + struct ib_umem_ex *umem, + struct invalidation_ctx **invalidation_ctx); + +void ib_peer_destroy_invalidation_ctx(struct ib_peer_memory_client *ib_peer_mem, + struct invalidation_ctx *invalidation_ctx); + +int ib_get_peer_private_data(struct ib_ucontext *context, __u64 peer_id, + char *peer_name); +void ib_put_peer_private_data(struct ib_ucontext *context); + +#endif diff --git a/drivers/infiniband/hw/xsc/ib_umem_ex.c b/drivers/infiniband/hw/xsc/ib_umem_ex.c new file mode 100644 index 000000000000..b2d57a885b65 --- /dev/null +++ b/drivers/infiniband/hw/xsc/ib_umem_ex.c @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ +#include +#ifndef CONFIG_INFINIBAND_PEER_MEMORY +#include "ib_peer_mem.h" +#endif + +#include +#include "ib_umem_ex.h" + +#if defined(IB_CORE_UMEM_EX_V1) +#define get_mm(umem_ctx) ((umem_ctx)->mm) +#elif defined(IB_CORE_UMEM_EX_V2) +#define get_mm(umem_ctx) ((umem_ctx)->owning_mm) +#endif + +#if defined(IB_CORE_UMEM_EX_V1) || defined(IB_CORE_UMEM_EX_V2) +static struct ib_umem_ex *peer_umem_get(struct ib_peer_memory_client *ib_peer_mem, + struct ib_umem_ex *umem_ex, unsigned long addr, + int dmasync, unsigned long peer_mem_flags) +{ + int ret; + const struct peer_memory_client *peer_mem = ib_peer_mem->peer_mem; + struct invalidation_ctx *invalidation_ctx = NULL; + struct ib_umem *umem = (struct ib_umem *)umem_ex; + + umem_ex->ib_peer_mem = ib_peer_mem; + if (peer_mem_flags & IB_PEER_MEM_INVAL_SUPP) { + ret = ib_peer_create_invalidation_ctx(ib_peer_mem, umem_ex, &invalidation_ctx); + if (ret) + goto end; + } + + /* + * We always request write permissions to the pages, to force breaking of any CoW + * during the registration of the MR. For read-only MRs we use the "force" flag to + * indicate that CoW breaking is required but the registration should not fail if + * referencing read-only areas. + */ + ret = peer_mem->get_pages(addr, umem->length, + 1, !umem->writable, + &umem->sg_head, + umem_ex->peer_mem_client_context, + invalidation_ctx ? + invalidation_ctx->context_ticket : 0); + if (ret) + goto out; + + ret = peer_mem->dma_map(&umem->sg_head, + umem_ex->peer_mem_client_context, + umem->context->device->dma_device, + dmasync, + &umem->nmap); + if (ret) + goto put_pages; + + atomic64_add(umem->nmap, &ib_peer_mem->stats.num_reg_pages); + atomic64_add(umem->nmap * BIT(PAGE_SHIFT), &ib_peer_mem->stats.num_reg_bytes); + atomic64_inc(&ib_peer_mem->stats.num_alloc_mrs); + return umem_ex; + +put_pages: + peer_mem->put_pages(&umem->sg_head, umem_ex->peer_mem_client_context); +out: + if (invalidation_ctx) + ib_peer_destroy_invalidation_ctx(ib_peer_mem, invalidation_ctx); +end: + ib_put_peer_client(ib_peer_mem, umem_ex->peer_mem_client_context); + // renamed in different kernel + mmdrop(get_mm(umem)); + kfree(umem_ex); + return ERR_PTR(ret); +} +#endif + +struct ib_umem_ex *ib_umem_ex(struct ib_umem *umem) +{ + struct ib_umem_ex *ret_umem; + + if (!umem) + return ERR_PTR(-EINVAL); + +#ifdef CONFIG_INFINIBAND_PEER_MEMORY + ret_umem = (struct ib_umem_ex *)umem; +#else + ret_umem = kzalloc(sizeof(*ret_umem), GFP_KERNEL); + if (!ret_umem) + return ERR_PTR(-ENOMEM); + + ret_umem->umem = *umem; + kfree(umem); +#endif + return ret_umem; +} + +struct ib_umem_ex *ib_client_umem_get(struct ib_ucontext *context, + unsigned long addr, + size_t size, int access, + int dmasync, u8 *peer_exists) +{ +#if defined(IB_CORE_UMEM_EX_V1) || defined(IB_CORE_UMEM_EX_V2) + struct ib_peer_memory_client *peer_mem_client; + struct ib_umem_ex *umem_ex; + struct ib_umem *umem; + + /* + * If the combination of the addr and size requested for this memory + * region causes an integer overflow, return error. + */ + if (((addr + size) < addr) || + PAGE_ALIGN(addr + size) < (addr + size)) + return ERR_PTR(-EINVAL); + + if (!can_do_mlock()) + return ERR_PTR(-EPERM); + + umem_ex = kzalloc(sizeof(*umem_ex), GFP_KERNEL); + if (!umem_ex) + return ERR_PTR(-ENOMEM); + umem = &umem_ex->umem; + + umem->context = context; + umem->length = size; + umem->address = addr; + umem->writable = ib_access_writable(access); + get_mm(umem) = current->mm; + +#if defined(IB_CORE_UMEM_EX_V1) + umem->odp_data = NULL; +#endif + + mmgrab(get_mm(umem)); + + peer_mem_client = ib_get_peer_client(context, addr, size, + IB_PEER_MEM_ALLOW | IB_PEER_MEM_INVAL_SUPP, + &umem_ex->peer_mem_client_context); + if (peer_mem_client) { + *peer_exists = 1; + umem->hugetlb = 0; + return peer_umem_get(peer_mem_client, umem_ex, addr, dmasync, + IB_PEER_MEM_ALLOW | IB_PEER_MEM_INVAL_SUPP); + } + + return ERR_PTR(-ENOMEM); +#else + return NULL; +#endif +} + +void ib_umem_ex_release(struct ib_umem_ex *umem_ex) +{ + struct ib_umem *umem = (struct ib_umem *)umem_ex; +#if defined(IB_CORE_UMEM_EX_V1) || defined(IB_CORE_UMEM_EX_V2) + struct ib_peer_memory_client *ib_peer_mem = umem_ex->ib_peer_mem; + const struct peer_memory_client *peer_mem; + struct invalidation_ctx *invalidation_ctx; + + if (ib_peer_mem) { + peer_mem = ib_peer_mem->peer_mem; + invalidation_ctx = umem_ex->invalidation_ctx; + + if (invalidation_ctx) + ib_peer_destroy_invalidation_ctx(ib_peer_mem, invalidation_ctx); + + peer_mem->dma_unmap(&umem->sg_head, + umem_ex->peer_mem_client_context, + umem->context->device->dma_device); + peer_mem->put_pages(&umem->sg_head, + umem_ex->peer_mem_client_context); + atomic64_add(umem->nmap, &ib_peer_mem->stats.num_dereg_pages); + atomic64_add(umem->nmap * BIT(PAGE_SHIFT), + &ib_peer_mem->stats.num_dereg_bytes); + atomic64_inc(&ib_peer_mem->stats.num_dealloc_mrs); + ib_put_peer_client(ib_peer_mem, umem_ex->peer_mem_client_context); + kfree(umem_ex); + } else { + // kernel ib umem release + ib_umem_release(umem); + } +#else + ib_umem_release(umem); +#endif +} + +int ib_client_umem_activate_invalidation_notifier(struct ib_umem_ex *umem_ex, + umem_invalidate_func_t func, + void *cookie) +{ +#if defined(IB_CORE_UMEM_EX_V1) || defined(IB_CORE_UMEM_EX_V2) + struct invalidation_ctx *invalidation_ctx = umem_ex->invalidation_ctx; + int ret = 0; + + mutex_lock(&umem_ex->ib_peer_mem->lock); + if (invalidation_ctx->peer_invalidated) { + pr_err("ib_umem_activate_invalidation_notifier: pages were invalidated by peer\n"); + ret = -EINVAL; + goto end; + } + invalidation_ctx->func = func; + invalidation_ctx->cookie = cookie; + /* from that point any pending invalidations can be called */ +end: + mutex_unlock(&umem_ex->ib_peer_mem->lock); + return ret; +#else + return 0; +#endif +} diff --git a/drivers/infiniband/hw/xsc/ib_umem_ex.h b/drivers/infiniband/hw/xsc/ib_umem_ex.h new file mode 100644 index 000000000000..034d1c55e5aa --- /dev/null +++ b/drivers/infiniband/hw/xsc/ib_umem_ex.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_IB_UMEM_EX_H +#define XSC_IB_UMEM_EX_H + +#include + +struct ib_umem_ex; +struct invalidation_ctx; + +// ib umem ex ib_umem add peer memory support +struct ib_umem_ex { + struct ib_umem umem; +#ifndef CONFIG_INFINIBAND_PEER_MEMORY + struct ib_peer_memory_client *ib_peer_mem; + struct invalidation_ctx *invalidation_ctx; + void *peer_mem_client_context; +#endif +}; + +// expand ib_umem to ib_umem_ex by reallocate +struct ib_umem_ex *ib_umem_ex(struct ib_umem *umem); + +#ifndef CONFIG_INFINIBAND_PEER_MEMORY +typedef void (*umem_invalidate_func_t)(void *invalidation_cookie, + struct ib_umem_ex *umem_ex, unsigned long addr, size_t size); + +struct invalidation_ctx { + struct ib_umem_ex *umem_ex; + u64 context_ticket; + umem_invalidate_func_t func; + void *cookie; + int peer_callback; + int inflight_invalidation; + int peer_invalidated; + struct completion comp; +}; +#endif + +struct ib_umem_ex *ib_client_umem_get(struct ib_ucontext *context, + unsigned long addr, size_t size, int access, + int dmasync, u8 *peer_exists); + +void ib_umem_ex_release(struct ib_umem_ex *umem_ex); + +int ib_client_umem_activate_invalidation_notifier(struct ib_umem_ex *umem_ex, + umem_invalidate_func_t func, + void *cookie); +#endif diff --git a/drivers/infiniband/hw/xsc/main.c b/drivers/infiniband/hw/xsc/main.c new file mode 100644 index 000000000000..9381b9fc4266 --- /dev/null +++ b/drivers/infiniband/hw/xsc/main.c @@ -0,0 +1,1201 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifdef HAVE_GENERIC_KMAP_TYPE +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_hsi.h" +#include "common/xsc_cmd.h" +#include "common/driver.h" +#include "common/xsc_lag.h" + +#include +#include +#include + +#include "user.h" +#include "xsc_ib.h" +#include "xsc_rdma_ctrl.h" + +#define DRIVER_NAME "xsc_ib" +#define DRIVER_VERSION "1.0" +#define DRIVER_RELDATE "Jan 2022" + +MODULE_DESCRIPTION("Yunsilicon Amber HCA IB driver"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRIVER_VERSION); + +static char xsc_version[] = + DRIVER_NAME ": Yunsilicon Infiniband driver" + DRIVER_VERSION " (" DRIVER_RELDATE ")\n"; + +static int xsc_ib_query_device(struct ib_device *ibdev, + struct ib_device_attr *props, + struct ib_udata *udata) +{ + struct xsc_ib_dev *dev = to_mdev(ibdev); + int max_rq_sg; + int max_sq_sg; + u64 flags; + struct xsc_ib_query_device_resp resp; + size_t resp_len; + u64 max_tso; + int err = -ENOMEM; + union xsc_ib_fw_ver fw_ver; + + memset(&resp, 0, sizeof(resp)); + memset(props, 0, sizeof(*props)); + + resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length); + /*check param*/ + if (udata->outlen && udata->outlen < resp_len) + return -EINVAL; + + if (udata->inlen && !ib_is_udata_cleared(udata, 0, udata->inlen)) + return -EINVAL; + + resp.response_length = resp_len; + + fw_ver.data = 0; + fw_ver.s.ver_major = dev->xdev->fw_version_major; + fw_ver.s.ver_minor = dev->xdev->fw_version_minor; + fw_ver.s.ver_patch = dev->xdev->fw_version_patch; + fw_ver.s.ver_tweak = dev->xdev->fw_version_tweak; + props->fw_ver = fw_ver.data; + + props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | + IB_DEVICE_PORT_ACTIVE_EVENT | + IB_DEVICE_SYS_IMAGE_GUID | + IB_DEVICE_RC_RNR_NAK_GEN; + props->kernel_cap_flags = IBK_BLOCK_MULTICAST_LOOPBACK; + props->kernel_cap_flags |= IBK_LOCAL_DMA_LKEY; + + flags = dev->xdev->caps.flags; + if (flags & XSC_DEV_CAP_FLAG_BAD_PKEY_CNTR) + props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; + if (flags & XSC_DEV_CAP_FLAG_BAD_QKEY_CNTR) + props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; + if (flags & XSC_DEV_CAP_FLAG_APM) + props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; + if (flags & XSC_DEV_CAP_FLAG_XRC) + props->device_cap_flags |= IB_DEVICE_XRC; + props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; + + props->page_size_cap = dev->xdev->caps.min_page_sz; + props->max_mr_size = (1 << dev->xdev->caps.log_max_mtt) * PAGE_SIZE; + props->max_qp = 1 << dev->xdev->caps.log_max_qp; + props->max_qp_wr = (32 * 1024); /* hack for GPFS */ + max_rq_sg = dev->xdev->caps.max_rq_desc_sz / sizeof(struct xsc_wqe_data_seg); + max_sq_sg = (dev->xdev->caps.max_sq_desc_sz - sizeof(struct xsc_wqe_ctrl_seg_2)) / + sizeof(struct xsc_wqe_data_seg_2); + + props->max_send_sge = dev->xdev->caps.send_ds_num - XSC_CTRL_SEG_NUM - + XSC_RADDR_SEG_NUM; + props->max_recv_sge = dev->xdev->caps.recv_ds_num; + props->max_sge_rd = 1;/*max sge per read wqe*/ + props->max_cq = 1 << dev->xdev->caps.log_max_cq; + props->max_cqe = dev->xdev->caps.max_cqes - 1; + props->max_mr = 1 << dev->xdev->caps.log_max_mkey; + props->max_pd = 1 << dev->xdev->caps.log_max_pd; + props->max_qp_rd_atom = dev->xdev->caps.max_ra_req_qp; + props->max_qp_init_rd_atom = dev->xdev->caps.max_ra_res_qp; + props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; + props->max_srq = + dev->xdev->caps.log_max_srq ? (1 << dev->xdev->caps.log_max_srq) : 0; + props->max_srq_wr = dev->xdev->caps.max_srq_wqes - 1; + props->max_srq_sge = dev->xdev->caps.log_max_srq ? (max_rq_sg - 1) : 0; + props->max_fast_reg_page_list_len = (unsigned int)-1; + props->local_ca_ack_delay = dev->xdev->caps.local_ca_ack_delay; + props->atomic_cap = dev->xdev->caps.flags & XSC_DEV_CAP_FLAG_ATOMIC ? + IB_ATOMIC_HCA : IB_ATOMIC_NONE; + props->masked_atomic_cap = IB_ATOMIC_HCA; + props->max_mcast_grp = + dev->xdev->caps.log_max_mcg ? (1 << dev->xdev->caps.log_max_mcg) : 0; + props->max_mcast_qp_attach = dev->xdev->caps.max_qp_mcg; + props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * + props->max_mcast_grp; + + props->sys_image_guid = dev->xdev->board_info->guid; + props->vendor_id = dev->xdev->pdev->vendor; + props->vendor_part_id = dev->xdev->pdev->device; + props->hw_ver = ((dev->xdev->chip_ver_l & 0xffff) << 16) | + (dev->xdev->hotfix_num & 0xffff); + props->max_pkeys = 0x80; + props->max_wq_type_rq = 1 << dev->xdev->caps.log_max_qp; + + props->hca_core_clock = dev->xdev->caps.hca_core_clock * 1000;//KHz + props->rss_caps.max_rwq_indirection_tables = + dev->xdev->caps.max_rwq_indirection_tables; + props->rss_caps.max_rwq_indirection_table_size = + dev->xdev->caps.max_rwq_indirection_table_size; + props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET; + + /*response tso_caps extend param*/ + if (field_avail(typeof(resp), tso_caps, udata->outlen)) { + max_tso = dev->xdev->caps.log_max_tso ? (1 << dev->xdev->caps.log_max_tso) : 0; + if (max_tso) { + resp.tso_caps.max_tso = max_tso; + resp.tso_caps.supported_qpts |= 1 << IB_QPT_RAW_PACKET; + resp.response_length += sizeof(resp.tso_caps); + } + } + + /*response rss_caps extend param*/ + if (field_avail(typeof(resp), rss_caps, udata->outlen)) { + resp.rss_caps.rx_hash_function = XSC_RX_HASH_FUNC_TOEPLITZ; + resp.rss_caps.rx_hash_fields_mask = + XSC_RX_HASH_SRC_IPV4 | + XSC_RX_HASH_DST_IPV4 | + XSC_RX_HASH_SRC_IPV6 | + XSC_RX_HASH_DST_IPV6 | + XSC_RX_HASH_SRC_PORT_TCP | + XSC_RX_HASH_DST_PORT_TCP | + XSC_RX_HASH_SRC_PORT_UDP | + XSC_RX_HASH_DST_PORT_UDP | + XSC_RX_HASH_INNER; + resp.response_length += sizeof(resp.rss_caps); + } + + /*response packet pacing caps*/ + if (field_avail(typeof(resp), packet_pacing_caps, udata->outlen)) { + resp.packet_pacing_caps.qp_rate_limit_max = + dev->xdev->caps.qp_rate_limit_max; + resp.packet_pacing_caps.qp_rate_limit_min = + dev->xdev->caps.qp_rate_limit_min; + resp.packet_pacing_caps.supported_qpts |= 1 << IB_QPT_RAW_PACKET; + + resp.response_length += sizeof(resp.packet_pacing_caps); + } + + /*copy response data to user*/ + if (udata->outlen) { + err = ib_copy_to_udata(udata, &resp, resp.response_length); + if (err) { + xsc_ib_err(dev, "copy response info to udata fail,err=%d\n", err); + return err; + } + } + + return 0; +} + +void xsc_calc_link_info(struct xsc_core_device *xdev, + struct ib_port_attr *props) +{ + switch (xsc_get_link_speed(xdev)) { + case MODULE_SPEED_10G: + props->active_speed = XSC_RDMA_LINK_SPEED_10GB; + props->active_width = 1; + break; + case MODULE_SPEED_25G: + props->active_speed = XSC_RDMA_LINK_SPEED_25GB; + props->active_width = 1; + break; + case MODULE_SPEED_40G_R4: + props->active_speed = XSC_RDMA_LINK_SPEED_10GB; + props->active_width = 2; + break; + case MODULE_SPEED_50G_R: + props->active_speed = XSC_RDMA_LINK_SPEED_50GB; + props->active_width = 1; + break; + case MODULE_SPEED_50G_R2: + props->active_speed = XSC_RDMA_LINK_SPEED_50GB; + props->active_width = 1; + break; + case MODULE_SPEED_100G_R2: + props->active_speed = XSC_RDMA_LINK_SPEED_25GB; + props->active_width = 2; + break; + case MODULE_SPEED_100G_R4: + props->active_speed = XSC_RDMA_LINK_SPEED_25GB; + props->active_width = 2; + break; + case MODULE_SPEED_200G_R4: + props->active_speed = XSC_RDMA_LINK_SPEED_50GB; + props->active_width = 2; + break; + case MODULE_SPEED_200G_R8: + props->active_speed = XSC_RDMA_LINK_SPEED_25GB; + props->active_width = 4; + break; + case MODULE_SPEED_400G_R8: + props->active_speed = XSC_RDMA_LINK_SPEED_50GB; + props->active_width = 4; + break; + default: + props->active_speed = XSC_RDMA_LINK_SPEED_25GB; + props->active_width = 1; + break; + } +} + +static enum rdma_link_layer xsc_ib_port_link_layer(struct ib_device *ibdev, u32 port) +{ + return IB_LINK_LAYER_ETHERNET; +} + +int xsc_ib_query_port(struct ib_device *ibdev, u32 port, + struct ib_port_attr *props) +{ + struct xsc_ib_dev *dev = to_mdev(ibdev); + struct net_device *ndev = dev->netdev; + struct xsc_core_device *xdev = dev->xdev; + + if (port < 1 || port > xdev->caps.num_ports) { + xsc_ib_warn(dev, "invalid port number %d\n", port); + return -EINVAL; + } + + memset(props, 0, sizeof(*props)); + + props->state = IB_PORT_ACTIVE; + props->max_mtu = IB_MTU_4096; + props->active_mtu = min(props->max_mtu, xsc_net_to_ib_mtu(ndev->mtu)); + props->gid_tbl_len = 256; + props->port_cap_flags = 0x4010000; + props->max_msg_sz = 0x40000000; + props->bad_pkey_cntr = 0; + props->qkey_viol_cntr = 0; + props->pkey_tbl_len = 1; + props->lid = 0; + props->sm_lid = 0; + props->lmc = 0; + props->max_vl_num = 0; + props->sm_sl = 0; + props->subnet_timeout = 0; + props->init_type_reply = 0; + if (!is_support_rdma(xdev)) { + props->active_width = 1; + props->active_speed = XSC_RDMA_LINK_SPEED_25GB; + } else { + xsc_calc_link_info(xdev, props); + } + + props->phys_state = netif_carrier_ok(ndev) ? XSC_RDMA_PHY_STATE_LINK_UP : + XSC_RDMA_PHY_STATE_DISABLED; + return 0; +} + +const struct xsc_gid xsc_gid_zero; + +static int xsc_ib_query_gid(struct ib_device *ibdev, u32 port_num, + int index, union ib_gid *gid) +{ + struct xsc_ib_dev *dev = to_mdev(ibdev); + struct xsc_sgid_tbl *sgid_tbl = &dev->ib_res.sgid_tbl; + + /* Ignore port_num */ + memset(gid, 0, sizeof(*gid)); + if (index >= sgid_tbl->max) + return -EINVAL; + + memcpy(gid, &sgid_tbl->tbl[index], sizeof(*gid)); + + return 0; +} + +static int xsc_ib_del_gid(const struct ib_gid_attr *attr, void **context) +{ + int index = 0; + struct xsc_ib_dev *dev = to_mdev(attr->device); + struct xsc_gid *gid_raw = (struct xsc_gid *)&attr->gid; + struct xsc_sgid_tbl *sgid_tbl = &dev->ib_res.sgid_tbl; + + if (!sgid_tbl) + return -EINVAL; + + if (!sgid_tbl->count) + return -ENOMEM; + + for (index = 0; index < sgid_tbl->max; index++) { + if (!memcmp(&sgid_tbl->tbl[index], gid_raw, sizeof(*gid_raw))) + break; + } + + if (index == sgid_tbl->max) + return 0; + + memcpy(&sgid_tbl->tbl[index], &xsc_gid_zero, sizeof(xsc_gid_zero)); + sgid_tbl->count--; + xsc_ib_info(dev, "Del gid from index:%u, count:%u\n", index, sgid_tbl->count); + + return 0; +} + +int xsc_ib_add_gid(const struct ib_gid_attr *attr, void **context) +{ + int i = 0; + u32 free_idx = 0; + struct xsc_ib_dev *dev = to_mdev(attr->device); + struct xsc_gid *gid_raw = (struct xsc_gid *)&attr->gid; + struct xsc_sgid_tbl *sgid_tbl = &dev->ib_res.sgid_tbl; + + if (!sgid_tbl) + return -EINVAL; + + if (sgid_tbl->count == sgid_tbl->max) + return -ENOMEM; + + free_idx = sgid_tbl->max; + for (i = 0; i < sgid_tbl->max; i++) { + if (!memcmp(&sgid_tbl->tbl[i], gid_raw, sizeof(*gid_raw))) { + return 0; + } else if (!memcmp(&sgid_tbl->tbl[i], &xsc_gid_zero, sizeof(xsc_gid_zero)) && + free_idx == sgid_tbl->max) { + free_idx = i; + } + } + + if (free_idx == sgid_tbl->max) + return -ENOMEM; + + memcpy(&sgid_tbl->tbl[free_idx], gid_raw, sizeof(*gid_raw)); + sgid_tbl->count++; + xsc_ib_info(dev, "Add gid to index:%u, count:%u, max:%u\n", free_idx, sgid_tbl->count, + sgid_tbl->max); + + return 0; +} + +static int xsc_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, + u16 *pkey) +{ + *pkey = 0xffff; + return 0; +} + +struct xsc_reg_node_desc { + u8 desc[64]; +}; + +static int xsc_ib_modify_device(struct ib_device *ibdev, int mask, + struct ib_device_modify *props) +{ + struct xsc_ib_dev *dev = to_mdev(ibdev); + struct xsc_reg_node_desc in; + struct xsc_reg_node_desc out; + int err; + + return 0; + + if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) + return -EOPNOTSUPP; + + if (!(mask & IB_DEVICE_MODIFY_NODE_DESC)) + return 0; + + /* + * If possible, pass node desc to FW, so it can generate + * a 144 trap. If cmd fails, just ignore. + */ + memcpy(&in, props->node_desc, 64); + err = xsc_core_access_reg(dev->xdev, &in, sizeof(in), &out, + sizeof(out), XSC_REG_NODE_DESC, 0, 1); + if (err) + return err; + + memcpy(ibdev->node_desc, props->node_desc, 64); + + return err; +} + +static int xsc_ib_modify_port(struct ib_device *ibdev, u32 port, int mask, + struct ib_port_modify *props) +{ + struct xsc_ib_dev *dev = to_mdev(ibdev); + struct ib_port_attr attr; + u32 tmp; + int err; + + return 0; + + mutex_lock(&dev->cap_mask_mutex); + + err = xsc_ib_query_port(ibdev, port, &attr); + if (err) + goto out; + + tmp = (attr.port_cap_flags | props->set_port_cap_mask) & + ~props->clr_port_cap_mask; + + err = xsc_set_port_caps(dev->xdev, port, tmp); + +out: + mutex_unlock(&dev->cap_mask_mutex); + return err; +} + +xsc_ib_alloc_ucontext_def() +{ + struct ib_device *ibdev = uctx->device; + struct xsc_ib_dev *dev = to_mdev(ibdev); + struct xsc_ib_alloc_ucontext_req req; + struct xsc_ib_alloc_ucontext_resp resp; + struct xsc_ib_ucontext *context; + int err; + + if (!dev->ib_active) + return RET_VALUE(-EAGAIN); + + err = ib_copy_from_udata(&req, udata, sizeof(req)); + if (err) + return RET_VALUE(err); + + resp.qp_tab_size = 1 << dev->xdev->caps.log_max_qp; + resp.cache_line_size = L1_CACHE_BYTES; + resp.max_sq_desc_sz = dev->xdev->caps.max_sq_desc_sz; + resp.max_rq_desc_sz = dev->xdev->caps.max_rq_desc_sz; + resp.max_send_wqebb = dev->xdev->caps.max_wqes; + resp.max_recv_wr = dev->xdev->caps.max_wqes; + resp.qpm_tx_db = dev->xdev->regs.tx_db; + resp.qpm_rx_db = dev->xdev->regs.rx_db; + resp.cqm_next_cid_reg = dev->xdev->regs.complete_reg; + resp.cqm_armdb = dev->xdev->regs.complete_db; + resp.send_ds_num = dev->xdev->caps.send_ds_num; + resp.recv_ds_num = dev->xdev->caps.recv_ds_num; + resp.cmds_supp_uhw |= XSC_USER_CMDS_SUPP_UHW_QUERY_DEVICE; + + context = to_xucontext(uctx); + + INIT_LIST_HEAD(&context->db_page_list); + mutex_init(&context->db_page_mutex); + + resp.num_ports = dev->xdev->caps.num_ports; + err = ib_copy_to_udata(udata, &resp, sizeof(resp)); + if (err) + goto out_ctx; + + return 0; + +out_ctx: + return RET_VALUE(err); +} + +xsc_ib_dealloc_ucontext_def() +{ +} + +static int xsc_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) +{ + struct xsc_ib_dev *dev = to_mdev(ibcontext->device); + struct xsc_core_device *xdev = dev->xdev; + unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; + resource_size_t reg_base; + resource_size_t reg_size = vma->vm_end - vma->vm_start; + + xsc_core_dbg(xdev, "offset:0x%lx", offset); + + if (offset == (xdev->regs.tx_db & PAGE_MASK)) + reg_base = pci_resource_start(xdev->pdev, xdev->bar_num) + + (xdev->regs.tx_db & PAGE_MASK); + else if (offset == (xdev->regs.rx_db & PAGE_MASK)) + reg_base = pci_resource_start(xdev->pdev, xdev->bar_num) + + (xdev->regs.rx_db & PAGE_MASK); + else if (offset == (xdev->regs.complete_reg & PAGE_MASK)) + reg_base = pci_resource_start(xdev->pdev, xdev->bar_num) + + (xdev->regs.complete_reg & PAGE_MASK); + else if (offset == (xdev->regs.complete_db & PAGE_MASK)) + reg_base = pci_resource_start(xdev->pdev, xdev->bar_num) + + (xdev->regs.complete_db & PAGE_MASK); + else + return -EINVAL; + + xsc_core_dbg(xdev, "regbase:0x%llx", reg_base); + + reg_base = xsc_core_is_pf(xdev) ? reg_base - 0xA0000000 : reg_base; + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + return remap_pfn_range(vma, vma->vm_start, reg_base >> PAGE_SHIFT, + reg_size, vma->vm_page_prot); + + return 0; +} + +xsc_ib_alloc_pd_def() +{ + struct ib_device *ibdev = ibpd->device; + struct xsc_ib_alloc_pd_resp resp; + struct xsc_ib_pd *pd; + int err; + + pd = to_mpd(ibpd); + + err = xsc_core_alloc_pd(to_mdev(ibdev)->xdev, &pd->pdn); + if (err) { + kfree(pd); + return RET_VALUE(err); + } + + if (udata) { + resp.pdn = pd->pdn; + if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { + xsc_core_dealloc_pd(to_mdev(ibdev)->xdev, pd->pdn); + + return RET_VALUE(-EFAULT); + } + } else { + pd->pa_lkey = 0; + } + + return 0; +} + +xsc_ib_dealloc_pd_def() +{ + struct xsc_ib_dev *mdev = to_mdev(pd->device); + struct xsc_ib_pd *mpd = to_mpd(pd); + + xsc_core_dealloc_pd(mdev->xdev, mpd->pdn); + + return 0; +} + +static int xsc_port_immutable(struct ib_device *ibdev, u32 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = ib_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE | + RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; + immutable->max_mad_size = IB_MGMT_MAD_SIZE * 2; + + return 0; +} + +static void _xsc_get_netdev(struct xsc_ib_dev *dev) +{ + struct net_device *netdev = (struct net_device *)(dev->xdev->netdev); + + dev->netdev = netdev; +} + +static struct net_device *xsc_get_netdev(struct ib_device *ibdev, u32 port_num) +{ + struct xsc_ib_dev *xsc_ib_dev = to_mdev(ibdev); + struct net_device *dev = xsc_ib_dev->netdev; + struct xsc_core_device *xdev = xsc_ib_dev->xdev; + + if (dev) { + xsc_board_lag_lock(xdev); + if (xsc_lag_is_roce(xdev)) { + struct net_device *upper = NULL; + + rcu_read_lock(); + upper = netdev_master_upper_dev_get_rcu(dev); + if (upper) { + struct net_device *active; + + active = bond_option_active_slave_get_rcu(netdev_priv(upper)); + if (active) + dev = active; + } + rcu_read_unlock(); + } + dev_hold(dev); + xsc_board_lag_unlock(xdev); + } + + return dev; +} + +void xsc_get_guid(const u8 *dev_addr, u8 *guid) +{ + u8 mac[ETH_ALEN]; + + /* MAC-48 to EUI-64 mapping */ + memcpy(mac, dev_addr, ETH_ALEN); + guid[0] = mac[0] ^ 2; + guid[1] = mac[1]; + guid[2] = mac[2]; + guid[3] = 0xff; + guid[4] = 0xfe; + guid[5] = mac[3]; + guid[6] = mac[4]; + guid[7] = mac[5]; +} + +static int init_node_data(struct xsc_ib_dev *dev) +{ + int err = -ENOMEM; + + strscpy(dev->ib_dev.node_desc, "xsc_node_desc", sizeof(dev->ib_dev.node_desc)); + + if (unlikely(!dev->netdev->dev_addr)) + _xsc_get_netdev(dev); + xsc_get_guid(dev->netdev->dev_addr, (u8 *)&dev->ib_dev.node_guid); + err = 0; + return err; +} + +void xsc_core_event(struct xsc_core_device *xdev, enum xsc_dev_event event, + unsigned long param) +{ + struct xsc_priv *priv = &xdev->priv; + struct xsc_device_context *dev_ctx; + unsigned long flags; + + spin_lock_irqsave(&priv->ctx_lock, flags); + + /* After xsc_detach_device, the dev_ctx->intf is still set and dev_ctx is + * still in priv->ctx_list. In this case, only notify the dev_ctx if its + * ADDED or ATTACHED bit are set. + */ + list_for_each_entry(dev_ctx, &priv->ctx_list, list) { + if (dev_ctx->intf->event) + dev_ctx->intf->event(xdev, dev_ctx->context, 0, param); + } + spin_unlock_irqrestore(&priv->ctx_lock, flags); +} + +static void xsc_ib_event(struct xsc_core_device *dev, void *context, + enum xsc_dev_event event, unsigned long data) +{ + struct xsc_ib_dev *ibdev = (struct xsc_ib_dev *)context; + struct ib_event ibev; + u8 port = 0; + + switch (event) { + case XSC_DEV_EVENT_SYS_ERROR: + ibdev->ib_active = false; + ibev.event = IB_EVENT_DEVICE_FATAL; + break; + + case XSC_DEV_EVENT_PORT_UP: + ibev.event = IB_EVENT_PORT_ACTIVE; + port = *(u8 *)data; + break; + + case XSC_DEV_EVENT_PORT_DOWN: + ibev.event = IB_EVENT_PORT_ERR; + port = *(u8 *)data; + break; + + case XSC_DEV_EVENT_PORT_INITIALIZED: + /* not used by ULPs */ + return; + + case XSC_DEV_EVENT_LID_CHANGE: + ibev.event = IB_EVENT_LID_CHANGE; + port = *(u8 *)data; + break; + + case XSC_DEV_EVENT_PKEY_CHANGE: + ibev.event = IB_EVENT_PKEY_CHANGE; + port = *(u8 *)data; + break; + + case XSC_DEV_EVENT_GUID_CHANGE: + ibev.event = IB_EVENT_GID_CHANGE; + port = *(u8 *)data; + break; + + case XSC_DEV_EVENT_CLIENT_REREG: + ibev.event = IB_EVENT_CLIENT_REREGISTER; + port = *(u8 *)data; + break; + } + + ibev.device = &ibdev->ib_dev; + ibev.element.port_num = port; + + if (ibdev->ib_active) + ib_dispatch_event(&ibev); +} + +static int get_port_caps(struct xsc_ib_dev *dev) +{ + struct ib_device_attr *dprops = NULL; + struct ib_port_attr *pprops = NULL; + int err = -ENOMEM; + u32 port; + /*used to prevent coredump when insmod xsc*/ + struct ib_udata uhw = {.inlen = 0, .outlen = 0}; + + pprops = kmalloc(sizeof(*pprops), GFP_KERNEL); + if (!pprops) + goto out; + + dprops = kmalloc(sizeof(*dprops), GFP_KERNEL); + if (!dprops) + goto out; + + err = xsc_ib_query_device(&dev->ib_dev, dprops, &uhw); + if (err) { + xsc_ib_warn(dev, "query_device failed %d\n", err); + goto out; + } + + for (port = 1; port <= dev->xdev->caps.num_ports; port++) { + err = xsc_ib_query_port(&dev->ib_dev, port, pprops); + if (err) { + xsc_ib_warn(dev, "query_port %d failed %d\n", port, err); + break; + } + dev->xdev->caps.port[port - 1].pkey_table_len = dprops->max_pkeys; + dev->xdev->caps.port[port - 1].gid_table_len = pprops->gid_tbl_len; + xsc_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n", + dprops->max_pkeys, pprops->gid_tbl_len); + } + +out: + kfree(pprops); + kfree(dprops); + + return err; +} + +static int xsc_create_dev_res(struct xsc_ib_res *ib_res) +{ + struct xsc_ib_dev *dev; + + dev = container_of(ib_res, struct xsc_ib_dev, ib_res); + ib_res->sgid_tbl.max = dev->xdev->caps.port[0].gid_table_len; + + ib_res->sgid_tbl.tbl = kcalloc(ib_res->sgid_tbl.max, sizeof(struct xsc_gid), + GFP_KERNEL); + + if (!ib_res->sgid_tbl.tbl) + return -ENOMEM; + + return 0; +} + +static void xsc_destroy_dev_res(struct xsc_ib_res *ib_res) +{ + kfree(ib_res->sgid_tbl.tbl); +} + +static int populate_specs_root(struct xsc_ib_dev *dev) +{ + const struct uverbs_object_tree_def **trees = + (const struct uverbs_object_tree_def **)dev->driver_trees; + size_t num_trees = 0; + + trees[num_trees++] = xsc_ib_get_devx_tree(); + WARN_ON(num_trees >= ARRAY_SIZE(dev->driver_trees)); + trees[num_trees] = NULL; + + return 0; +} + +static void crc_table_init(struct xsc_ib_dev *dev) +{ + u32 c, i, j; + + for (i = 0; i < 256; i++) { + c = i; + for (j = 0; j < 8; j++) { + if (c & 1) + c = 0xedb88320L ^ (c >> 1); + else + c = c >> 1; + } + dev->crc_32_table[i] = c; + } +} + +static void xsc_ib_get_dev_fw_str(struct ib_device *ibdev, char *str) +{ + struct xsc_core_device *dev = to_mdev(ibdev)->xdev; + u8 ver_major = dev->fw_version_major; + u8 ver_minor = dev->fw_version_minor; + u16 ver_patch = dev->fw_version_patch; + u32 ver_tweak = dev->fw_version_tweak; + + if (ver_tweak == 0) { + snprintf(str, IB_FW_VERSION_NAME_MAX, "v%u.%u.%u", + ver_major, ver_minor, ver_patch); + } else { + snprintf(str, IB_FW_VERSION_NAME_MAX, "v%u.%u.%u+%u", + ver_major, ver_minor, ver_patch, ver_tweak); + } +} + +static void xsc_ib_dev_setting(struct xsc_ib_dev *dev) +{ + dev->ib_dev.ops.owner = THIS_MODULE; + dev->ib_dev.ops.uverbs_abi_ver = XSC_IB_UVERBS_ABI_VERSION; + dev->ib_dev.ops.driver_id = (enum rdma_driver_id)RDMA_DRIVER_XSC5; + dev->ib_dev.ops.uverbs_no_driver_id_binding = 1; + dev->ib_dev.ops.query_device = xsc_ib_query_device; + dev->ib_dev.ops.query_port = xsc_ib_query_port; + dev->ib_dev.ops.query_gid = xsc_ib_query_gid; + dev->ib_dev.ops.add_gid = xsc_ib_add_gid; + dev->ib_dev.ops.del_gid = xsc_ib_del_gid; + dev->ib_dev.ops.query_pkey = xsc_ib_query_pkey; + + dev->ib_dev.ops.modify_device = xsc_ib_modify_device; + dev->ib_dev.ops.modify_port = xsc_ib_modify_port; + dev->ib_dev.ops.alloc_ucontext = xsc_ib_alloc_ucontext; + dev->ib_dev.ops.dealloc_ucontext = xsc_ib_dealloc_ucontext; + dev->ib_dev.ops.mmap = xsc_ib_mmap; + + dev->ib_dev.ops.alloc_pd = xsc_ib_alloc_pd; + dev->ib_dev.ops.dealloc_pd = xsc_ib_dealloc_pd; + dev->ib_dev.ops.create_ah = xsc_ib_create_ah; + dev->ib_dev.ops.query_ah = xsc_ib_query_ah; + dev->ib_dev.ops.destroy_ah = xsc_ib_destroy_ah; + + dev->ib_dev.ops.get_link_layer = xsc_ib_port_link_layer; + dev->ib_dev.ops.get_netdev = xsc_get_netdev; + + dev->ib_dev.ops.create_qp = xsc_ib_create_qp; + dev->ib_dev.ops.modify_qp = xsc_ib_modify_qp; + dev->ib_dev.ops.query_qp = xsc_ib_query_qp; + dev->ib_dev.ops.destroy_qp = xsc_ib_destroy_qp; + dev->ib_dev.ops.post_send = xsc_ib_post_send; + dev->ib_dev.ops.post_recv = xsc_ib_post_recv; + dev->ib_dev.ops.create_cq = xsc_ib_create_cq; + dev->ib_dev.ops.destroy_cq = xsc_ib_destroy_cq; + dev->ib_dev.ops.poll_cq = xsc_ib_poll_cq; + dev->ib_dev.ops.req_notify_cq = xsc_ib_arm_cq; + dev->ib_dev.ops.get_dma_mr = xsc_ib_get_dma_mr; + dev->ib_dev.ops.reg_user_mr = xsc_ib_reg_user_mr;//optional + dev->ib_dev.ops.dereg_mr = xsc_ib_dereg_mr; + dev->ib_dev.ops.alloc_mr = xsc_ib_alloc_mr; + dev->ib_dev.ops.map_mr_sg = xsc_ib_map_mr_sg; + + dev->ib_dev.ops.get_port_immutable = xsc_port_immutable; + + dev->ib_dev.ops.drain_sq = xsc_ib_drain_sq; + dev->ib_dev.ops.drain_rq = xsc_ib_drain_rq; + dev->ib_dev.ops.get_dev_fw_str = xsc_ib_get_dev_fw_str; + + dev->ib_dev.ops INIT_RDMA_OBJ_SIZE(ib_ah, xsc_ib_ah, ibah); + dev->ib_dev.ops INIT_RDMA_OBJ_SIZE(ib_cq, xsc_ib_cq, ibcq); + dev->ib_dev.ops INIT_RDMA_OBJ_SIZE(ib_pd, xsc_ib_pd, ibpd); + dev->ib_dev.ops INIT_RDMA_OBJ_SIZE(ib_ucontext, xsc_ib_ucontext, ibucontext); + dev->ib_dev.ops INIT_RDMA_OBJ_SIZE(ib_qp, xsc_ib_qp, ibqp); +} + +static void xsc_get_port_state(struct net_device *ndev, enum xsc_dev_event *ev) +{ + *ev = XSC_DEV_EVENT_PORT_DOWN; + if (netif_running(ndev) && netif_carrier_ok(ndev)) + *ev = XSC_DEV_EVENT_PORT_UP; +} + +static int xsc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) +{ + struct xsc_ib_dev *ibdev = container_of(this, struct xsc_ib_dev, nb); + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); + enum xsc_dev_event ev; + u8 port = 1; + + if (ndev != ibdev->netdev) + goto done; + + xsc_ib_info(ibdev, "netdev notfiy event:%ld\n", event); + switch (event) { + case NETDEV_CHANGE: + case NETDEV_UP: + case NETDEV_DOWN: + xsc_get_port_state(ibdev->netdev, &ev); + xsc_ib_event(ibdev->xdev, ibdev, ev, (unsigned long)&port); + break; + default: + break; + } +done: + return NOTIFY_DONE; +} + +static int xsc_register_netdev_notifier(struct xsc_ib_dev *ibdev) +{ + ibdev->nb.notifier_call = xsc_netdev_event; + return register_netdevice_notifier(&ibdev->nb); +} + +static int xsc_unregister_netdev_notifier(struct xsc_ib_dev *ibdev) +{ + return unregister_netdevice_notifier(&ibdev->nb); +} + +static int init_one(struct xsc_core_device *xdev, + struct xsc_ib_dev **m_ibdev) +{ + struct xsc_ib_dev *dev; + int err; + + pr_info_once("%s", xsc_version); + + dev = (struct xsc_ib_dev *)ib_alloc_device(xsc_ib_dev, ib_dev); + if (!dev) + return -ENOMEM; + + dev->xdev = xdev; + xdev->event = xsc_core_event; + _xsc_get_netdev(dev); + err = get_port_caps(dev); + if (err) + goto err_free; + if (!xdev->caps.msix_enable) + dev->num_comp_vectors = 1; + else + dev->num_comp_vectors = xdev->dev_res->eq_table.num_comp_vectors; + + if (xsc_lag_is_roce(xdev)) + strscpy(dev->ib_dev.name, "xscale_bond_%d", IB_DEVICE_NAME_MAX); + else + strscpy(dev->ib_dev.name, "xscale_%d", IB_DEVICE_NAME_MAX); + + dev->ib_dev.node_type = RDMA_NODE_IB_CA; + dev->ib_dev.local_dma_lkey = 0xFF; + dev->num_ports = xdev->caps.num_ports; + dev->ib_dev.phys_port_cnt = dev->num_ports; + dev->ib_dev.num_comp_vectors = dev->num_comp_vectors; + dev->ib_dev.dev.parent = &xdev->pdev->dev; + xsc_ib_dev_setting(dev); + dev->cm_dscp = DSCP_PCP_UNSET; + dev->cm_pcp = DSCP_PCP_UNSET; + dev->force_pcp = DSCP_PCP_UNSET; + dev->force_dscp = DSCP_PCP_UNSET; + + dev->ib_dev.uverbs_cmd_mask = + (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | + (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | + (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | + (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | + (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | + (1ull << IB_USER_VERBS_CMD_CREATE_AH) | + (1ull << IB_USER_VERBS_CMD_DESTROY_AH) | + (1ull << IB_USER_VERBS_CMD_REG_MR) | + (1ull << IB_USER_VERBS_CMD_REREG_MR) | + (1ull << IB_USER_VERBS_CMD_DEREG_MR) | + (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | + (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | + (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | + (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | + (1ull << IB_USER_VERBS_CMD_CREATE_QP) | + (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | + (1ull << IB_USER_VERBS_CMD_QUERY_QP) | + (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | + (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | + (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | + (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | + (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | + (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | + (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | + (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | + (1ull << IB_USER_VERBS_CMD_OPEN_QP); + + init_node_data(dev); + + mutex_init(&dev->cap_mask_mutex); + spin_lock_init(&dev->mr_lock); + + err = xsc_create_dev_res(&dev->ib_res); + if (err) + goto err_free; + + crc_table_init(dev); + + populate_specs_root(dev); + + xsc_reg_local_dma_mr(xdev); + + if (ib_register_device(&dev->ib_dev, dev->ib_dev.name, dev->xdev->device)) + goto err_rsrc; + + rdma_roce_rescan_device(&dev->ib_dev); + dev->ib_active = true; + *m_ibdev = dev; + + xdev->xsc_ib_dev = dev; + + xsc_register_netdev_notifier(dev); + + xsc_counters_init(&dev->ib_dev, xdev); + + xsc_priv_dev_init(&dev->ib_dev, xdev); + + xsc_rtt_sysfs_init(&dev->ib_dev, xdev); + + xsc_ib_sysfs_init(&dev->ib_dev, xdev); + + return 0; + +err_rsrc: + xsc_destroy_dev_res(&dev->ib_res); + +err_free: + ib_dealloc_device((struct ib_device *)dev); + + return err; +} + +static void remove_one(struct xsc_core_device *xdev, void *intf_ctx) +{ + struct xsc_ib_dev *dev = (struct xsc_ib_dev *)intf_ctx; + + xsc_rtt_sysfs_fini(xdev); + xsc_ib_sysfs_fini(&dev->ib_dev, xdev); + xsc_priv_dev_fini(&dev->ib_dev, xdev); + xsc_counters_fini(&dev->ib_dev, xdev); + xsc_unregister_netdev_notifier(dev); + ib_unregister_device(&dev->ib_dev); + ib_dealloc_device(&dev->ib_dev); +} + +static void init_iommu_state(struct xsc_ib_dev *xdev) +{ + if (xdev) { + struct iommu_domain *domain; + + xdev->iommu_state = XSC_IB_IOMMU_MAP_DISABLE; + domain = iommu_get_domain_for_dev(xdev->ib_dev.dma_device); + if (domain) { + if (domain->type & __IOMMU_DOMAIN_DMA_API) + xdev->iommu_state = XSC_IB_IOMMU_MAP_NORMAL; + } else { + /* try to allocate dma memory, if dma address is not equal to phys address, + * the iommu map is enabled, but iommu domain is unknown. + */ + dma_addr_t dma_addr; + + void *tmp = dma_alloc_coherent(xdev->ib_dev.dma_device, PAGE_SIZE, + &dma_addr, GFP_KERNEL); + if (tmp) { + if (virt_to_phys(tmp) != dma_addr) + xdev->iommu_state = XSC_IB_IOMMU_MAP_UNKNOWN_DOMAIN; + dma_free_coherent(xdev->ib_dev.dma_device, PAGE_SIZE, + tmp, dma_addr); + } + } + + if (xdev->iommu_state) + xsc_ib_dbg(xdev, "ibdev supports iommu dma map, state=%d\n", + xdev->iommu_state); + else + xsc_ib_dbg(xdev, "ibdev does not support iommu dma map\n"); + } +} + +static bool xsc_need_create_ib_device(struct xsc_core_device *dev) +{ + if (xsc_get_roce_lag_xdev(dev) == dev) + return true; + + return false; +} + +static void *xsc_add(struct xsc_core_device *xpdev) +{ + struct xsc_ib_dev *m_ibdev = NULL; + int ret = -1; + + if (!xsc_need_create_ib_device(xpdev)) + return NULL; + + pr_info("add rdma driver\n"); + + ret = init_one(xpdev, &m_ibdev); + if (ret) { + pr_err("xsc ib dev add fail, ret = %d\n", ret); + return NULL; + } + + init_iommu_state(m_ibdev); + + return m_ibdev; +} + +static void xsc_remove(struct xsc_core_device *xpdev, void *context) +{ + pr_info("remove rdma driver\n"); + remove_one(xpdev, context); +} + +static struct xsc_interface xsc_interface = { + .add = xsc_add, + .remove = xsc_remove, + .event = xsc_ib_event, + .protocol = XSC_INTERFACE_PROTOCOL_IB, +}; + +int xsc_ib_reboot_event_handler(struct notifier_block *nb, unsigned long action, void *data) +{ + pr_info("xsc ib driver recv %lu event\n", action); + + if (exist_incomplete_qp_flush()) { + xsc_set_exit_flag(); + return NOTIFY_OK; + } + + xsc_remove_rdma_driver(); + + return NOTIFY_OK; +} + +struct notifier_block xsc_ib_nb = { + .notifier_call = xsc_ib_reboot_event_handler, + .next = NULL, + .priority = 2, +}; + +void xsc_remove_rdma_driver(void) +{ + xsc_rdma_ctrl_fini(); + xsc_unregister_interface(&xsc_interface); + xsc_priv_unregister_chrdev_region(); +} + +static int __init xsc_ib_init(void) +{ + int ret; + + ret = xsc_priv_alloc_chrdev_region(); + if (ret) + goto out; + + ret = xsc_register_interface(&xsc_interface); + if (ret) { + xsc_priv_unregister_chrdev_region(); + goto out; + } + + ret = xsc_rdma_ctrl_init(); + if (ret != 0) { + pr_err("failed to register port control node\n"); + xsc_unregister_interface(&xsc_interface); + xsc_priv_unregister_chrdev_region(); + goto out; + } + + register_reboot_notifier(&xsc_ib_nb); + + return 0; +out: + return ret; +} + +static void __exit xsc_ib_cleanup(void) +{ + unregister_reboot_notifier(&xsc_ib_nb); + xsc_remove_rdma_driver(); +} + +module_init(xsc_ib_init); +module_exit(xsc_ib_cleanup); diff --git a/drivers/infiniband/hw/xsc/mem.c b/drivers/infiniband/hw/xsc/mem.c new file mode 100644 index 000000000000..cf258aa8ea51 --- /dev/null +++ b/drivers/infiniband/hw/xsc/mem.c @@ -0,0 +1,343 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "xsc_ib.h" + +static inline int xsc_count_trailing_zeros(unsigned long x) +{ +#define COUNT_TRAILING_ZEROS_0 (-1) + + if (sizeof(x) == 4) + return ffs(x); + else + return (x != 0) ? __ffs(x) : COUNT_TRAILING_ZEROS_0; +} + +int xsc_find_chunk_cont_0(struct xsc_pa_chunk *chunk, + int is_first, + int is_last) +{ + static const int max_count = sizeof(int) << 3; + dma_addr_t pa, end_pa; + u64 va, end_va; + size_t length; + int start_count, end_count; + int va_start_count, va_end_count; + + pa = chunk->pa; + va = chunk->va; + length = chunk->length; + end_pa = pa + length; + end_va = va + length; + start_count = max_count; + end_count = max_count; + + if (!is_first) { + start_count = xsc_count_trailing_zeros((unsigned long)pa); + va_start_count = xsc_count_trailing_zeros(va); + start_count = min_t(int, start_count, va_start_count); + } + + if (!is_last) { + end_count = xsc_count_trailing_zeros((unsigned long)end_pa); + va_end_count = xsc_count_trailing_zeros(end_va); + end_count = min_t(int, end_count, va_end_count); + } + + return start_count > end_count ? end_count : start_count; +} + +int xsc_find_best_pgsz(struct ib_umem *umem, + unsigned long pgsz_bitmap, + unsigned long virt, + int *npages, + int *shift, + u64 **pas) +{ + struct scatterlist *sg; + unsigned long va; + dma_addr_t pa; + struct xsc_pa_chunk *chunk, *tmp; + struct list_head chunk_list; + int i; + int chunk_cnt; + int min_count_0 = sizeof(int) << 3; + int count_0; + int is_first = 0, is_end = 0; + size_t pgsz; + u64 mask; + int err = 0; + int pa_index; + u64 chunk_pa; + int chunk_npages; + unsigned long page_shift = PAGE_SHIFT; + + pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, 0); + + va = (virt >> page_shift) << page_shift; + + INIT_LIST_HEAD(&chunk_list); + chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); + if (!chunk) { + err = -ENOMEM; + goto err_alloc; + } + list_add_tail(&chunk->list, &chunk_list); + + chunk_cnt = 1; + for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) { + pa = sg_dma_address(sg); + if (i == 0) { + chunk->va = va; + chunk->pa = pa; + chunk->length = sg_dma_len(sg); + va += chunk->length; + continue; + } + + if (pa == chunk->pa + chunk->length) { + chunk->length += sg_dma_len(sg); + va += sg_dma_len(sg); + } else { + chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); + if (!chunk) { + err = -ENOMEM; + goto err_alloc; + } + chunk->va = va; + chunk->pa = pa; + chunk->length = sg_dma_len(sg); + va += chunk->length; + list_add_tail(&chunk->list, &chunk_list); + chunk_cnt++; + } + } + + i = 0; + list_for_each_entry(chunk, &chunk_list, list) { + is_first = (i == 0 ? 1 : 0); + is_end = (i == chunk_cnt - 1 ? 1 : 0); + count_0 = xsc_find_chunk_cont_0(chunk, is_first, is_end); + if (count_0 < min_count_0) + min_count_0 = count_0; + i++; + } + + pgsz_bitmap &= GENMASK(min_count_0, 0); + pgsz = rounddown_pow_of_two(pgsz_bitmap); + *shift = ilog2(pgsz); + *npages = 0; + + if (chunk_cnt == 1) { + list_for_each_entry(chunk, &chunk_list, list) { + mask = GENMASK(*shift - 1, min_t(int, page_shift, *shift)); + *npages += DIV_ROUND_UP(chunk->length + (virt & mask), pgsz); + *pas = vmalloc(*npages * sizeof(u64)); + if (!*pas) { + err = -ENOMEM; + goto err_alloc; + } + + chunk_pa = chunk->pa - (virt & mask); + for (i = 0; i < *npages; i++) + (*pas)[i] = chunk_pa + i * pgsz; + } + } else { + list_for_each_entry(chunk, &chunk_list, list) { + *npages += DIV_ROUND_UP(chunk->length, pgsz); + } + + *pas = vmalloc(*npages * sizeof(u64)); + if (!*pas) { + err = -ENOMEM; + goto err_alloc; + } + + pa_index = 0; + list_for_each_entry(chunk, &chunk_list, list) { + chunk_npages = DIV_ROUND_UP(chunk->length, pgsz); + chunk_pa = chunk->pa; + for (i = 0; i < chunk_npages; i++) { + if (pa_index == 0) { + mask = GENMASK(*shift - 1, + min_t(int, page_shift, *shift)); + chunk_pa -= (virt & mask); + } + (*pas)[pa_index] = chunk_pa + i * pgsz; + + pa_index++; + } + } + } + +err_alloc: + list_for_each_entry_safe(chunk, tmp, &chunk_list, list) { + list_del(&chunk->list); + kfree(chunk); + } + return err; +} + +/* @umem: umem object to scan + * @addr: ib virtual address requested by the user + * @count: number of PAGE_SIZE pages covered by umem + * @shift: page shift for the compound pages found in the region + * @ncont: number of compund pages + * @order: log2 of the number of compound pages + */ +void __xsc_ib_cont_pages(struct ib_umem *umem, u64 addr, + unsigned long max_page_shift, + int *count, int *shift, + int *ncont, int *order) +{ + unsigned long tmp; + unsigned long m; + u64 base = ~0, p = 0; + u64 len, pfn; + int i = 0; + struct scatterlist *sg; + int entry; + unsigned long page_shift = PAGE_SHIFT; + + addr = addr >> page_shift; + tmp = (unsigned long)addr; + m = find_first_bit(&tmp, BITS_PER_LONG); + if (max_page_shift) + m = min_t(unsigned long, max_page_shift - page_shift, m); + for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, entry) { + len = sg_dma_len(sg) >> page_shift; + pfn = sg_dma_address(sg) >> page_shift; + if (base + p != pfn) { + /* If either the offset or the new + * base are unaligned update m + */ + tmp = (unsigned long)(pfn | p); + if (!IS_ALIGNED(tmp, 1 << m)) + m = find_first_bit(&tmp, BITS_PER_LONG); + + base = pfn; + p = 0; + } + + p += len; + i += len; + } + + if (i) { + m = min_t(unsigned long, ilog2(roundup_pow_of_two(i)), m); + + if (order) + *order = ilog2(roundup_pow_of_two(i) >> m); + + *ncont = DIV_ROUND_UP(i, (1 << m)); + } else { + m = 0; + + if (order) + *order = 0; + + *ncont = 0; + } + *shift = page_shift + m; + *count = i; +} + +void xsc_ib_cont_pages(struct ib_umem *umem, u64 addr, + int *count, int *shift, + int *ncont, int *order) +{ + // no limit for page_shift + __xsc_ib_cont_pages(umem, addr, 0, count, shift, ncont, order); +} + +void __xsc_ib_populate_pas(struct xsc_ib_dev *dev, struct ib_umem *umem, + int page_shift, size_t offset, size_t num_pages, + __be64 *pas, int access_flags, bool need_to_devide) +{ + unsigned long umem_page_shift = PAGE_SHIFT; + int shift = page_shift - umem_page_shift; + int mask = (1 << shift) - 1; + int i = 0; + int k, idx; + u64 cur = 0; + u64 base; + int len; + struct scatterlist *sg; + int entry; + + for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, entry) { + len = sg_dma_len(sg) >> umem_page_shift; + if (need_to_devide) + len = sg_dma_len(sg) >> PAGE_SHIFT_4K; + else + len = sg_dma_len(sg) >> umem_page_shift; + base = sg_dma_address(sg); + + /* Skip elements below offset */ + if (i + len < offset << shift) { + i += len; + continue; + } + + /* Skip pages below offset */ + if (i < offset << shift) { + k = (offset << shift) - i; + i = offset << shift; + } else { + k = 0; + } + + for (; k < len; k++) { + if (!(i & mask)) { + if (need_to_devide) + cur = base + (k << PAGE_SHIFT_4K); + else + cur = base + (k << umem_page_shift); + cur |= access_flags; + idx = (i >> shift) - offset; + + pas[idx] = cpu_to_be64(cur); + xsc_ib_dbg(dev, "pas[%d] 0x%llx\n", + i >> shift, be64_to_cpu(pas[idx])); + } + i++; + + /* Stop after num_pages reached */ + if (i >> shift >= offset + num_pages) + return; + } + } +} + +void xsc_ib_populate_pas(struct xsc_ib_dev *dev, struct ib_umem *umem, + int page_shift, __be64 *pas, int npages, bool need_to_devide) +{ + return __xsc_ib_populate_pas(dev, umem, page_shift, 0, + npages, pas, 0, need_to_devide); +} + +int xsc_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset) +{ + u64 page_size; + u64 page_mask; + u64 off_size; + u64 off_mask; + u64 buf_off; + + page_size = 1 << page_shift; + page_mask = page_size - 1; + buf_off = addr & page_mask; + off_size = page_size >> 6; + off_mask = off_size - 1; + + if (buf_off & off_mask) + return -EINVAL; + + *offset = buf_off >> ilog2(off_size); + return 0; +} diff --git a/drivers/infiniband/hw/xsc/mr.c b/drivers/infiniband/hw/xsc/mr.c new file mode 100644 index 000000000000..2dddd3b6f716 --- /dev/null +++ b/drivers/infiniband/hw/xsc/mr.c @@ -0,0 +1,500 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "common/xsc_cmd.h" +#include +#include "ib_umem_ex.h" +#include "xsc_ib.h" + +#ifndef CONFIG_INFINIBAND_PEER_MEMORY +static void xsc_invalidate_umem(void *invalidation_cookie, + struct ib_umem_ex *umem, + unsigned long addr, size_t size); +#endif + +enum { + DEF_CACHE_SIZE = 10, +}; + +struct ib_mr *xsc_ib_get_dma_mr(struct ib_pd *pd, int acc) +{ + struct xsc_ib_dev *dev = to_mdev(pd->device); + struct xsc_core_device *xdev = dev->xdev; + struct xsc_register_mr_mbox_in *in; + struct xsc_register_mr_request *req; + struct xsc_ib_mr *mr; + int err; + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) { + err = -ENOMEM; + goto err_free; + } + + req = &in->req; + req->acc = convert_access(acc); + req->va_base = 0; + req->map_en = !(XSC_MPT_MAP_EN); + + err = xsc_core_create_mkey(xdev, &mr->mmr); + if (err) + goto err_in; + req->mkey = cpu_to_be32(mr->mmr.key); + err = xsc_core_register_mr(xdev, &mr->mmr, in, sizeof(*in)); + if (err) + goto err_reg_mr; + kfree(in); + mr->ibmr.lkey = mr->mmr.key; + mr->ibmr.rkey = mr->mmr.key; + mr->umem = NULL; + + return &mr->ibmr; +err_reg_mr: + xsc_core_destroy_mkey(xdev, &mr->mmr); +err_in: + kfree(in); + +err_free: + kfree(mr); + + return ERR_PTR(err); +} + +void xsc_fill_pas(int npages, u64 *pas, __be64 *req_pas) +{ + int i; + + for (i = 0; i < npages; i++) + req_pas[i] = cpu_to_be64(pas[i]); +} + +static struct xsc_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr, + u64 length, struct ib_umem *umem, + int npages, u64 *pas, int page_shift, + int access_flags) +{ + struct xsc_ib_dev *dev = to_mdev(pd->device); + struct xsc_register_mr_mbox_in *in; + struct xsc_ib_mr *mr; + int inlen; + int err; + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) { + err = -ENOMEM; + goto err_0; + } + + inlen = sizeof(*in) + sizeof(*in->req.pas) * npages; + in = xsc_vzalloc(inlen); + if (!in) { + err = -ENOMEM; + goto err_1; + } + err = xsc_core_create_mkey(dev->xdev, &mr->mmr); + if (err) { + xsc_ib_warn(dev, "create mkey failed\n"); + goto err_2; + } + + xsc_fill_pas(npages, pas, in->req.pas); + + in->req.acc = convert_access(access_flags); + in->req.pa_num = cpu_to_be32(npages); + in->req.pdn = cpu_to_be32(to_mpd(pd)->pdn); + in->req.va_base = cpu_to_be64(virt_addr); + in->req.map_en = XSC_MPT_MAP_EN; + in->req.len = cpu_to_be32((u32)length); + in->req.page_mode = (page_shift == XSC_PAGE_SHIFT_4K ? XSC_PAGE_MODE_4K : + (page_shift == XSC_PAGE_SHIFT_64K ? XSC_PAGE_MODE_64K : + (page_shift == XSC_PAGE_SHIFT_2M ? XSC_PAGE_MODE_2M : XSC_PAGE_MODE_1G))); + in->req.mkey = cpu_to_be32(mr->mmr.key); + err = xsc_core_register_mr(dev->xdev, &mr->mmr, in, inlen); + if (err) { + xsc_ib_warn(dev, "register mr failed, err = %d\n", err); + goto err_reg_mr; + } + mr->umem = umem; + xsc_vfree(in); + vfree(pas); + + xsc_ib_dbg(dev, "mkey = 0x%x\n", mr->mmr.key); + + return mr; +err_reg_mr: + xsc_core_destroy_mkey(dev->xdev, &mr->mmr); +err_2: + xsc_vfree(in); +err_1: + kfree(mr); +err_0: + vfree(pas); + + return ERR_PTR(err); +} + +struct ib_mr *xsc_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, + u64 virt_addr, int access_flags, + struct ib_udata *udata) +{ + struct xsc_ib_dev *dev = to_mdev(pd->device); + struct xsc_ib_mr *mr = NULL; + struct ib_umem_ex *umem_ex; + struct ib_umem *umem; + int page_shift; + int npages; + u64 *pas; + int err; + struct ib_peer_memory_client *ib_peer_mem = NULL; + struct xsc_ib_peer_id *xsc_ib_peer_id = NULL; + + xsc_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx\n", + start, virt_addr, length); +#ifdef CONFIG_INFINIBAND_PEER_MEMORY + umem = ib_umem_get_peer(&dev->ib_dev, start, length, + access_flags, IB_PEER_MEM_INVAL_SUPP); +#else + umem = ib_umem_get(&dev->ib_dev, start, length, access_flags); +#endif + if (IS_ERR(umem)) { + // check client peer memory +#ifdef CONFIG_INFINIBAND_PEER_MEMORY + xsc_ib_warn(dev, "umem get failed\n"); + return (void *)umem; +#else + u8 peer_exists = 0; + + umem_ex = ib_client_umem_get(pd->uobject->context, + start, length, access_flags, 0, &peer_exists); + if (!peer_exists) { + xsc_ib_dbg(dev, "umem get failed\n"); + return (void *)umem; + } + ib_peer_mem = umem_ex->ib_peer_mem; + xsc_ib_peer_id = kzalloc(sizeof(*xsc_ib_peer_id), GFP_KERNEL); + if (!xsc_ib_peer_id) { + err = -ENOMEM; + goto error; + } + init_completion(&xsc_ib_peer_id->comp); + err = ib_client_umem_activate_invalidation_notifier(umem_ex, + xsc_invalidate_umem, + xsc_ib_peer_id); + if (err) + goto error; +#endif + } else { + umem_ex = ib_umem_ex(umem); + if (IS_ERR(umem_ex)) { + err = -ENOMEM; + goto error; + } + } + umem = &umem_ex->umem; + + err = xsc_find_best_pgsz(umem, 0x40211000, start, &npages, &page_shift, &pas); + if (err) { + vfree(pas); + pas = NULL; + xsc_ib_warn(dev, "find best page size failed\n"); + goto error; + } + if (!npages) { + xsc_ib_warn(dev, "avoid zero region\n"); + err = -EINVAL; + goto error; + } + + xsc_ib_dbg(dev, "npages %d, page_shift %d\n", npages, page_shift); + + mr = reg_create(pd, virt_addr, length, umem, npages, pas, page_shift, access_flags); + if (IS_ERR(mr)) { + err = PTR_ERR(mr); + goto error; + } + + xsc_ib_dbg(dev, "mkey 0x%x\n", mr->mmr.key); + + mr->umem = umem; + mr->npages = npages; + spin_lock(&dev->mr_lock); + dev->xdev->dev_res->reg_pages += npages; + spin_unlock(&dev->mr_lock); + mr->ibmr.lkey = mr->mmr.key; + mr->ibmr.rkey = mr->mmr.key; + mr->ibmr.length = length; + atomic_set(&mr->invalidated, 0); + if (ib_peer_mem) { + init_completion(&mr->invalidation_comp); + xsc_ib_peer_id->mr = mr; + mr->peer_id = xsc_ib_peer_id; + complete(&xsc_ib_peer_id->comp); + } + + return &mr->ibmr; + +error: + if (xsc_ib_peer_id) { + complete(&xsc_ib_peer_id->comp); + kfree(xsc_ib_peer_id); + xsc_ib_peer_id = NULL; + } + + ib_umem_ex_release(umem_ex); + return ERR_PTR(err); +} + +xsc_ib_dereg_mr_def() +{ + struct xsc_ib_dev *dev = to_mdev(ibmr->device); + struct xsc_ib_mr *mr = to_mmr(ibmr); + struct ib_umem *umem = mr->umem; + struct ib_umem_ex *umem_ex = (struct ib_umem_ex *)umem; + int npages = mr->npages; + int err; + + xsc_ib_dbg(dev, "dereg mkey = 0x%x\n", mr->mmr.key); + + if (atomic_inc_return(&mr->invalidated) > 1) { + /* In case there is inflight invalidation call pending for its termination */ + wait_for_completion(&mr->invalidation_comp); + kfree(mr); + return 0; + } + + if (mr->npages) { + err = xsc_core_dereg_mr(dev->xdev, &mr->mmr); + if (err) { + xsc_ib_warn(dev, "failed to dereg mr 0x%x (%d)\n", + mr->mmr.key, err); + atomic_set(&mr->invalidated, 0); + return err; + } + } + err = xsc_core_destroy_mkey(dev->xdev, &mr->mmr); + if (err) { + xsc_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n", + mr->mmr.key, err); + atomic_set(&mr->invalidated, 0); + return err; + } + + if (umem_ex) { + ib_umem_ex_release(umem_ex); + spin_lock(&dev->mr_lock); + dev->xdev->dev_res->reg_pages -= npages; + spin_unlock(&dev->mr_lock); + } + + kfree(mr->pas); + kfree(mr); + + return 0; +} + +#ifndef CONFIG_INFINIBAND_PEER_MEMORY +static void xsc_invalidate_umem(void *invalidation_cookie, + struct ib_umem_ex *umem, + unsigned long addr, + size_t size) +{ + struct xsc_ib_mr *mr; + struct xsc_ib_dev *dev; + struct xsc_ib_peer_id *peer_id = (struct xsc_ib_peer_id *)invalidation_cookie; + + wait_for_completion(&peer_id->comp); + if (!peer_id->mr) + return; + + mr = peer_id->mr; + /* This function is called under client peer lock so its resources are race protected */ + if (atomic_inc_return(&mr->invalidated) > 1) { + umem->invalidation_ctx->inflight_invalidation = 1; + return; + } + + umem->invalidation_ctx->peer_callback = 1; + dev = to_mdev(mr->ibmr.device); + xsc_core_destroy_mkey(dev->xdev, &mr->mmr); + xsc_core_dereg_mr(dev->xdev, &mr->mmr); + complete(&mr->invalidation_comp); +} +#endif + +xsc_ib_alloc_mr_def() +{ + struct xsc_ib_dev *dev = to_mdev(pd->device); + struct xsc_ib_mr *mr; + int err; + + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + + mr->npages = 0; + mr->mmr.pd = to_mpd(pd)->pdn; + mr->pas = kcalloc(max_num_sg, sizeof(__be64), GFP_KERNEL); + if (!mr->pas) { + err = -ENOMEM; + goto err_alloc; + } + + err = xsc_core_create_mkey(dev->xdev, &mr->mmr); + if (err) + goto err_create_mkey; + mr->ibmr.lkey = mr->mmr.key; + mr->ibmr.rkey = mr->mmr.key; + mr->ibmr.device = &dev->ib_dev; + + return &mr->ibmr; +err_create_mkey: + kfree(mr->pas); +err_alloc: + kfree(mr); + return ERR_PTR(err); +} + +static int xsc_set_page(struct ib_mr *ibmr, u64 pa) +{ + struct xsc_ib_mr *mmr = to_mmr(ibmr); + + mmr->pas[mmr->npages] = pa; + mmr->npages++; + return 0; +} + +u8 xsc_get_mr_page_mode(struct xsc_core_device *xdev, u32 page_shift) +{ + u8 page_mode = 0; + + page_mode = (page_shift == XSC_PAGE_SHIFT_4K ? XSC_PAGE_MODE_4K : + (page_shift == XSC_PAGE_SHIFT_64K ? XSC_PAGE_MODE_64K : + (page_shift == XSC_PAGE_SHIFT_2M ? XSC_PAGE_MODE_2M : XSC_PAGE_MODE_1G))); + + return page_mode; +} + +int xsc_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, + int sg_nents, unsigned int *sg_offset) +{ + struct xsc_ib_mr *mmr = to_mmr(ibmr); + + mmr->npages = 0; + return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, xsc_set_page); +} + +int xsc_wr_reg_mr(struct xsc_ib_dev *dev, const struct ib_send_wr *wr) +{ + const struct ib_reg_wr *reg_wr = container_of(wr, struct ib_reg_wr, wr); + struct ib_mr *ibmr = reg_wr->mr; + struct xsc_ib_mr *mmr = to_mmr(ibmr); + struct xsc_register_mr_mbox_in *in; + int inlen; + int i; + int err; + __be64 *pas; + + inlen = sizeof(*in) + sizeof(__be64) * mmr->npages; + in = kzalloc(inlen, GFP_ATOMIC); + if (!in) + return -ENOMEM; + + in->req.pdn = cpu_to_be32(mmr->mmr.pd); + in->req.mkey = cpu_to_be32(ibmr->rkey); + in->req.acc = convert_access(reg_wr->access); + in->req.page_mode = 0; + in->req.map_en = XSC_MPT_MAP_EN; + + if (xsc_ib_iommu_dma_map(ibmr->device)) { + static u32 support_page_shift[] = {12, 16, 21, 30}; + u64 va_base; + u64 pa_base; + int len; + int i; + u32 page_shift; + + for (i = 0; i < ARRAY_SIZE(support_page_shift); i++) { + page_shift = support_page_shift[i]; + va_base = ALIGN_DOWN(ibmr->iova, 1 << page_shift); + len = ibmr->iova + ibmr->length - va_base; + if (len <= (1 << page_shift)) { + in->req.page_mode = xsc_get_mr_page_mode(dev->xdev, page_shift); + pa_base = ALIGN_DOWN(mmr->pas[0], (1 << page_shift)); + in->req.page_mode = xsc_get_mr_page_mode(dev->xdev, page_shift); + in->req.pa_num = cpu_to_be32(1); + in->req.len = cpu_to_be32(len); + in->req.va_base = cpu_to_be64(va_base); + in->req.pas[0] = cpu_to_be64(pa_base); + goto out; + } + } + + xsc_ib_warn(dev, "Not found suitable page mode for iommu dma map, using 4k mode"); + } + + in->req.page_mode = xsc_get_mr_page_mode(dev->xdev, PAGE_SHIFT_4K); + in->req.va_base = cpu_to_be64(ibmr->iova); + in->req.pa_num = cpu_to_be32(mmr->npages); + in->req.len = cpu_to_be32(ibmr->length); + pas = in->req.pas; + for (i = 0; i < mmr->npages; i++) + pas[i] = cpu_to_be64(mmr->pas[i]); + +out: + xsc_ib_dbg(dev, "iova=%llx, pas=%llx, req.page_mode=%u, req.va_base=%llx, req.pas=%llx, req.len=%d, req.pa_num=%d\n", + ibmr->iova, + mmr->pas[0], + in->req.page_mode, + be64_to_cpu(in->req.va_base), + be64_to_cpu(in->req.pas[0]), + be32_to_cpu(in->req.len), + be32_to_cpu(in->req.pa_num)); + + err = xsc_core_register_mr(dev->xdev, &mmr->mmr, in, sizeof(*in)); + + kfree(in); + return err; +} + +int xsc_wr_invalidate_mr(struct xsc_ib_dev *dev, const struct ib_send_wr *wr) +{ + struct xsc_core_mr mr; + int err = 0; + + if (!wr) + return -1; + mr.key = wr->ex.invalidate_rkey; + err = xsc_core_dereg_mr(dev->xdev, &mr); + return err; +} + +void xsc_reg_local_dma_mr(struct xsc_core_device *dev) +{ + struct xsc_register_mr_mbox_in in; + int err = 0; + + in.req.pdn = 0; + in.req.pa_num = 0; + in.req.len = 0; + in.req.mkey = cpu_to_be32(0xFF); + in.req.acc = XSC_PERM_LOCAL_WRITE | XSC_PERM_LOCAL_READ; + in.req.page_mode = 0; + in.req.map_en = !(XSC_MPT_MAP_EN); + in.req.va_base = 0; + + err = xsc_core_register_mr(dev, NULL, &in, sizeof(in)); + if (err) + xsc_core_err(dev, "\n"); +} diff --git a/drivers/infiniband/hw/xsc/peer_mem.c b/drivers/infiniband/hw/xsc/peer_mem.c new file mode 100644 index 000000000000..eba572973b39 --- /dev/null +++ b/drivers/infiniband/hw/xsc/peer_mem.c @@ -0,0 +1,317 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "ib_peer_mem.h" +#include +#include "ib_umem_ex.h" + +static DEFINE_MUTEX(peer_memory_mutex); +static LIST_HEAD(peer_memory_list); + +static void complete_peer(struct kref *kref); + +/* Caller should be holding the peer client lock, ib_peer_client->lock */ +static struct core_ticket *ib_peer_search_context(struct ib_peer_memory_client *ib_peer_client, + u64 key) +{ + struct core_ticket *core_ticket; + + list_for_each_entry(core_ticket, &ib_peer_client->core_ticket_list, + ticket_list) { + if (core_ticket->key == key) + return core_ticket; + } + + return NULL; +} + +static int ib_invalidate_peer_memory(void *reg_handle, u64 core_context) +{ + struct ib_peer_memory_client *ib_peer_client = reg_handle; + struct invalidation_ctx *invalidation_ctx; + struct core_ticket *core_ticket; + int need_unlock = 1; + + mutex_lock(&ib_peer_client->lock); + ib_peer_client->stats.num_free_callbacks += 1; + core_ticket = ib_peer_search_context(ib_peer_client, core_context); + if (!core_ticket) + goto out; + + invalidation_ctx = (struct invalidation_ctx *)core_ticket->context; + /* If context is not ready yet, mark it to be invalidated */ + if (!invalidation_ctx->func) { + invalidation_ctx->peer_invalidated = 1; + goto out; + } + invalidation_ctx->func(invalidation_ctx->cookie, + invalidation_ctx->umem_ex, 0, 0); + if (invalidation_ctx->inflight_invalidation) { + /* init the completion to wait on before letting other thread to run */ + init_completion(&invalidation_ctx->comp); + mutex_unlock(&ib_peer_client->lock); + need_unlock = 0; + wait_for_completion(&invalidation_ctx->comp); + } + + kfree(invalidation_ctx); +out: + if (need_unlock) + mutex_unlock(&ib_peer_client->lock); + + return 0; +} + +static int ib_peer_insert_context(struct ib_peer_memory_client *ib_peer_client, + void *context, + u64 *context_ticket) +{ + struct core_ticket *core_ticket = kzalloc(sizeof(*core_ticket), GFP_KERNEL); + + if (!core_ticket) + return -ENOMEM; + + mutex_lock(&ib_peer_client->lock); + core_ticket->key = ib_peer_client->last_ticket++; + core_ticket->context = context; + list_add_tail(&core_ticket->ticket_list, + &ib_peer_client->core_ticket_list); + *context_ticket = core_ticket->key; + mutex_unlock(&ib_peer_client->lock); + + return 0; +} + +/* + * Caller should be holding the peer client lock, specifically, + * the caller should hold ib_peer_client->lock + */ +static int ib_peer_remove_context(struct ib_peer_memory_client *ib_peer_client, + u64 key) +{ + struct core_ticket *core_ticket; + + list_for_each_entry(core_ticket, &ib_peer_client->core_ticket_list, + ticket_list) { + if (core_ticket->key == key) { + list_del(&core_ticket->ticket_list); + kfree(core_ticket); + return 0; + } + } + + return 1; +} + +/* + * ib_peer_create_invalidation_ctx - creates invalidation context for a given umem + * @ib_peer_mem: peer client to be used + * @umem: umem struct belongs to that context + * @invalidation_ctx: output context + */ +int ib_peer_create_invalidation_ctx(struct ib_peer_memory_client *ib_peer_mem, + struct ib_umem_ex *umem_ex, + struct invalidation_ctx **invalidation_ctx) +{ + int ret; + struct invalidation_ctx *ctx; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ret = ib_peer_insert_context(ib_peer_mem, ctx, + &ctx->context_ticket); + if (ret) { + kfree(ctx); + return ret; + } + + ctx->umem_ex = umem_ex; + umem_ex->invalidation_ctx = ctx; + *invalidation_ctx = ctx; + + return 0; +} + +/** + * ** ib_peer_destroy_invalidation_ctx - destroy a given invalidation context + * ** @ib_peer_mem: peer client to be used + * ** @invalidation_ctx: context to be invalidated + * **/ +void ib_peer_destroy_invalidation_ctx(struct ib_peer_memory_client *ib_peer_mem, + struct invalidation_ctx *invalidation_ctx) +{ + int peer_callback; + int inflight_invalidation; + + /* If we are under peer callback lock was already taken.*/ + if (!invalidation_ctx->peer_callback) + mutex_lock(&ib_peer_mem->lock); + ib_peer_remove_context(ib_peer_mem, invalidation_ctx->context_ticket); + /* make sure to check inflight flag after took the lock and remove from tree. + * in addition, from that point using local variables for peer_callback and + * inflight_invalidation as after the complete invalidation_ctx can't be accessed + * any more as it may be freed by the callback. + */ + peer_callback = invalidation_ctx->peer_callback; + inflight_invalidation = invalidation_ctx->inflight_invalidation; + if (inflight_invalidation) + complete(&invalidation_ctx->comp); + + /* On peer callback lock is handled externally */ + if (!peer_callback) + mutex_unlock(&ib_peer_mem->lock); + + /* in case under callback context or callback is pending + * let it free the invalidation context + */ + if (!peer_callback && !inflight_invalidation) + kfree(invalidation_ctx); +} + +static int ib_memory_peer_check_mandatory(const struct peer_memory_client + *peer_client) +{ +#define PEER_MEM_MANDATORY_FUNC(x) { offsetof(struct peer_memory_client, x), #x } + static const struct { + size_t offset; + char *name; + } mandatory_table[] = { + PEER_MEM_MANDATORY_FUNC(acquire), + PEER_MEM_MANDATORY_FUNC(get_pages), + PEER_MEM_MANDATORY_FUNC(put_pages), + PEER_MEM_MANDATORY_FUNC(get_page_size), + PEER_MEM_MANDATORY_FUNC(dma_map), + PEER_MEM_MANDATORY_FUNC(dma_unmap) + }; + int i; + + for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) { + if (!*(void **)((void *)peer_client + mandatory_table[i].offset)) { + pr_err("Peer memory %s is missing mandatory function %s\n", + peer_client->name, mandatory_table[i].name); + return -EINVAL; + } + } + + return 0; +} + +static void complete_peer(struct kref *kref) +{ + struct ib_peer_memory_client *ib_peer_client = + container_of(kref, struct ib_peer_memory_client, ref); + + complete(&ib_peer_client->unload_comp); +} + +void *ib_register_peer_memory_client(const struct peer_memory_client *peer_client, + invalidate_peer_memory *invalidate_callback) +{ + struct ib_peer_memory_client *ib_peer_client; + + if (ib_memory_peer_check_mandatory(peer_client)) + return NULL; + + ib_peer_client = kzalloc(sizeof(*ib_peer_client), GFP_KERNEL); + if (!ib_peer_client) + return NULL; + + INIT_LIST_HEAD(&ib_peer_client->core_ticket_list); + mutex_init(&ib_peer_client->lock); + init_completion(&ib_peer_client->unload_comp); + kref_init(&ib_peer_client->ref); + ib_peer_client->peer_mem = peer_client; + + /* Once peer supplied a non NULL callback it's an indication that + * invalidation support is required for any memory owning. + */ + if (invalidate_callback) { + *invalidate_callback = ib_invalidate_peer_memory; + ib_peer_client->invalidation_required = 1; + } + ib_peer_client->last_ticket = 1; + + mutex_lock(&peer_memory_mutex); + list_add_tail(&ib_peer_client->core_peer_list, &peer_memory_list); + + mutex_unlock(&peer_memory_mutex); + return ib_peer_client; +} +EXPORT_SYMBOL(ib_register_peer_memory_client); + +void ib_unregister_peer_memory_client(void *reg_handle) +{ + struct ib_peer_memory_client *ib_peer_client = reg_handle; + + mutex_lock(&peer_memory_mutex); + list_del(&ib_peer_client->core_peer_list); + mutex_unlock(&peer_memory_mutex); + + kref_put(&ib_peer_client->ref, complete_peer); + wait_for_completion(&ib_peer_client->unload_comp); + kfree(ib_peer_client); +} +EXPORT_SYMBOL(ib_unregister_peer_memory_client); + +struct ib_peer_memory_client *ib_get_peer_client(struct ib_ucontext *context, unsigned long addr, + size_t size, unsigned long peer_mem_flags, + void **peer_client_context) +{ + struct ib_peer_memory_client *ib_peer_client = NULL; + + int ret = 0; + + mutex_lock(&peer_memory_mutex); + list_for_each_entry(ib_peer_client, &peer_memory_list, core_peer_list) { + /* In case peer requires invalidation it can't own + * memory which doesn't support it + */ + if ((ib_peer_client->invalidation_required && + (!(peer_mem_flags & IB_PEER_MEM_INVAL_SUPP)))) + continue; + ret = ib_peer_client->peer_mem->acquire(addr, size, NULL, NULL, + peer_client_context); + if (ret > 0) + goto found; + } + + ib_peer_client = NULL; + +found: + if (ib_peer_client) + kref_get(&ib_peer_client->ref); + + mutex_unlock(&peer_memory_mutex); + + return ib_peer_client; +} +EXPORT_SYMBOL(ib_get_peer_client); + +void ib_put_peer_client(struct ib_peer_memory_client *ib_peer_client, + void *peer_client_context) +{ + if (ib_peer_client->peer_mem->release) + ib_peer_client->peer_mem->release(peer_client_context); + + kref_put(&ib_peer_client->ref, complete_peer); +} +EXPORT_SYMBOL(ib_put_peer_client); + +int ib_get_peer_private_data(struct ib_ucontext *context, u64 peer_id, + char *peer_name) +{ + pr_warn("predefine peer mem is not supported by now"); + return -1; +} +EXPORT_SYMBOL(ib_get_peer_private_data); + +void ib_put_peer_private_data(struct ib_ucontext *context) +{ + pr_warn("predefine peer mem is not supported by now"); +} +EXPORT_SYMBOL(ib_put_peer_private_data); diff --git a/drivers/infiniband/hw/xsc/peer_mem.h b/drivers/infiniband/hw/xsc/peer_mem.h new file mode 100644 index 000000000000..7e3f803ac246 --- /dev/null +++ b/drivers/infiniband/hw/xsc/peer_mem.h @@ -0,0 +1,228 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#if !defined(PEER_MEM_H) +#define PEER_MEM_H + +#include +#include +#include +#include +#include +#include + +#define IB_PEER_MEMORY_NAME_MAX 64 +#define IB_PEER_MEMORY_VER_MAX 16 +#define PEER_MEM_U64_CORE_CONTEXT + +/** + * struct peer_memory_client - registration information for peer client. + * @name: peer client name + * @version: peer client version + * @acquire: callback function to be used by IB core to detect whether a + * virtual address in under the responsibility of a specific peer client. + * @get_pages: callback function to be used by IB core asking the peer client to pin + * the physical pages of the given address range and returns that information. + * It equivalents to the kernel API of get_user_pages(), but targets peer memory. + * @dma_map: callback function to be used by IB core asking the peer client to fill + * the dma address mapping for a given address range. + * @dma_unmap: callback function to be used by IB core asking the peer client to take + * relevant actions to unmap the memory. + * @put_pages: callback function to be used by IB core asking the peer client to remove the + * pinning from the given memory. + * It's the peer-direct equivalent of the kernel API put_page. + * @get_page_size: callback function to be used by IB core to query the peer client for + * the page size for the given allocation. + * @release: callback function to be used by IB core asking peer client to release all + * resources associated with previous acquire call. The call will be performed + * only for contexts that have been successfully acquired (i.e. acquire returned a + * non-zero value). + * Additionally, IB core guarentees that there will be no pages pinned through this + * context when the callback is called. + * + * The subsections in this description contain detailed description + * of the callback arguments and expected return values for the + * callbacks defined in this struct. + * + * acquire: + * + * Callback function to be used by IB core to detect + * whether a virtual address in under the responsibility + * of a specific peer client. + * + * addr [IN] - virtual address to be checked whether belongs to peer. + * + * size [IN] - size of memory area starting at addr. + * + * peer_mem_private_data [IN] - The contents of ib_ucontext-> peer_mem_private_data. + * This parameter allows usage of the peer-direct + * API in implementations where it is impossible + * to detect if the memory belongs to the device + * based upon the virtual address alone. In such + * cases, the peer device can create a special + * ib_ucontext, which will be associated with the + * relevant peer memory. + * + * peer_mem_name [IN] - The contents of ib_ucontext-> peer_mem_name. + * Used to identify the peer memory client that + * initialized the ib_ucontext. + * This parameter is normally used along with + * peer_mem_private_data. + * client_context [OUT] - peer opaque data which holds a peer context for + * the acquired address range, will be provided + * back to the peer memory in subsequent + * calls for that given memory. + * + * If peer takes responsibility on the given address range further calls for memory + * management will be directed to the callbacks of this peer client. + * + * Return - 1 in case peer client takes responsibility on that range otherwise 0. + * Any peer internal error should resulted in a zero answer, in case address + * range really belongs to the peer, no owner will be found and application + * will get an error + * from IB Core as expected. + * + * get_pages: + * + * Callback function to be used by IB core asking the + * peer client to pin the physical pages of the given + * address range and returns that information. It + * equivalents to the kernel API of get_user_pages(), but + * targets peer memory. + * + * addr [IN] - start virtual address of that given allocation. + * + * size [IN] - size of memory area starting at addr. + * + * write [IN] - indicates whether the pages will be written to by the caller. + * Same meaning as of kernel API get_user_pages, can be + * ignored if not relevant. + * + * force [IN] - indicates whether to force write access even if user + * mapping is read only. Same meaning as of kernel API + * get_user_pages, can be ignored if not relevant. + * + * sg_head [IN/OUT] - pointer to head of struct sg_table. + * The peer client should allocate a table big + * enough to store all of the required entries. This + * function should fill the table with physical addresses + * and sizes of the memory segments composing this + * memory mapping. + * The table allocation can be done using sg_alloc_table. + * Filling in the physical memory addresses and size can + * be done using sg_set_page. + * + * client_context [IN] - peer context for the given allocation, as received from + * the acquire call. + * + * core_context [IN] - IB core context. If the peer client wishes to + * invalidate any of the pages pinned through this API, + * it must provide this context as an argument to the + * invalidate callback. + * + * Return - 0 success, otherwise errno error code. + * + * dma_map: + * + * Callback function to be used by IB core asking the peer client to fill + * the dma address mapping for a given address range. + * + * sg_head [IN/OUT] - pointer to head of struct sg_table. The peer memory + * should fill the dma_address & dma_length for + * each scatter gather entry in the table. + * + * client_context [IN] - peer context for the allocation mapped. + * + * dma_device [IN] - the RDMA capable device which requires access to the + * peer memory. + * + * dmasync [IN] - flush in-flight DMA when the memory region is written. + * Same meaning as with host memory mapping, can be ignored if + * not relevant. + * + * nmap [OUT] - number of mapped/set entries. + * + * Return - 0 success, otherwise errno error code. + * + * dma_unmap: + * + * Callback function to be used by IB core asking the peer client to take + * relevant actions to unmap the memory. + * + * sg_head [IN] - pointer to head of struct sg_table. The peer memory + * should fill the dma_address & dma_length for + * each scatter gather entry in the table. + * + * client_context [IN] - peer context for the allocation mapped. + * + * dma_device [IN] - the RDMA capable device which requires access to the + * peer memory. + * + * Return - 0 success, otherwise errno error code. + * + * put_pages: + * + * Callback function to be used by IB core asking the peer client to remove the + * pinning from the given memory. + * It's the peer-direct equivalent of the kernel API put_page. + * + * sg_head [IN] - pointer to head of struct sg_table. + * + * client_context [IN] - peer context for that given allocation. + * + * get_page_size: + * + * Callback function to be used by IB core to query the + * peer client for the page size for the given + * allocation. + * + * sg_head [IN] - pointer to head of struct sg_table. + * + * client_context [IN] - peer context for that given allocation. + * + * Return - Page size in bytes + * + * release: + * + * Callback function to be used by IB core asking peer + * client to release all resources associated with + * previous acquire call. The call will be performed only + * for contexts that have been successfully acquired + * (i.e. acquire returned a non-zero value). + * Additionally, IB core guarentees that there will be no + * pages pinned through this context when the callback is + * called. + * + * client_context [IN] - peer context for the given allocation. + * + **/ +struct peer_memory_client { + char name[IB_PEER_MEMORY_NAME_MAX]; + char version[IB_PEER_MEMORY_VER_MAX]; + int (*acquire)(unsigned long addr, size_t size, void *peer_mem_private_data, + char *peer_mem_name, void **client_context); + int (*get_pages)(unsigned long addr, + size_t size, int write, int force, + struct sg_table *sg_head, + void *client_context, u64 core_context); + int (*dma_map)(struct sg_table *sg_head, void *client_context, + struct device *dma_device, int dmasync, int *nmap); + int (*dma_unmap)(struct sg_table *sg_head, void *client_context, + struct device *dma_device); + void (*put_pages)(struct sg_table *sg_head, void *client_context); + unsigned long (*get_page_size)(void *client_context); + void (*release)(void *client_context); + void* (*get_context_private_data)(u64 peer_id); + void (*put_context_private_data)(void *context); +}; + +typedef int (*invalidate_peer_memory)(void *reg_handle, u64 core_context); + +void *ib_register_peer_memory_client(const struct peer_memory_client *peer_client, + invalidate_peer_memory *invalidate_callback); +void ib_unregister_peer_memory_client(void *reg_handle); + +#endif diff --git a/drivers/infiniband/hw/xsc/private_dev.c b/drivers/infiniband/hw/xsc/private_dev.c new file mode 100644 index 000000000000..29fe98fd6b0c --- /dev/null +++ b/drivers/infiniband/hw/xsc/private_dev.c @@ -0,0 +1,1031 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_hsi.h" +#include "common/xsc_lag.h" +#include "common/res_obj.h" +#include "xsc_ib.h" + +#define FEATURE_ONCHIP_FT_MASK BIT(4) +#define FEATURE_DMA_RW_TBL_MASK BIT(8) +#define FEATURE_PCT_EXP_MASK BIT(9) + +static int xsc_priv_dev_open(struct inode *inode, struct file *file) +{ + struct xsc_priv_device *priv_dev = + container_of(inode->i_cdev, struct xsc_priv_device, cdev); + struct xsc_core_device *xdev = + container_of(priv_dev, struct xsc_core_device, priv_device); + struct xsc_bdf_file *bdf_file; + + bdf_file = kzalloc(sizeof(*bdf_file), GFP_KERNEL); + if (!file) + return -ENOMEM; + + INIT_RADIX_TREE(&bdf_file->obj_tree, GFP_ATOMIC); + spin_lock_init(&bdf_file->obj_lock); + + bdf_file->xdev = xdev; + bdf_file->key = bdf_to_key(pci_domain_nr(xdev->pdev->bus), + xdev->pdev->bus->number, xdev->pdev->devfn); + bdf_file->restore_nic_fn = NULL; + + radix_tree_preload(GFP_KERNEL); + spin_lock(&priv_dev->bdf_lock); + radix_tree_insert(&priv_dev->bdf_tree, bdf_file->key, bdf_file); + spin_unlock(&priv_dev->bdf_lock); + radix_tree_preload_end(); + file->private_data = bdf_file; + + return 0; +} + +static int xsc_priv_dev_release(struct inode *inode, struct file *filp) +{ + struct xsc_bdf_file *bdf_file = filp->private_data; + struct xsc_core_device *xdev = bdf_file->xdev; + + xsc_close_bdf_file(bdf_file); + + if (bdf_file->restore_nic_fn) { + xsc_set_user_mode(xdev, false); + bdf_file->restore_nic_fn(xdev); + } + + spin_lock(&xdev->priv_device.bdf_lock); + radix_tree_delete(&xdev->priv_device.bdf_tree, bdf_file->key); + spin_unlock(&xdev->priv_device.bdf_lock); + + kfree(bdf_file); + + return 0; +} + +static long xsc_ioctl_mem_free(struct xsc_priv_device *priv_dev, struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr) +{ + struct xsc_ioctl_mem_info *minfo; + struct xsc_ioctl_data_tl *tl; + struct xsc_ioctl_mbox_in *in; + struct xsc_mem_entry *m_ent; + char tname[TASK_COMM_LEN]; + int in_size; + int err = 0; + u8 lfound = 0; + + in_size = sizeof(struct xsc_ioctl_mbox_in) + hdr->attr.length; + in = kvzalloc(in_size, GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->len = hdr->attr.length; + err = copy_from_user(in->data, user_hdr->attr.data, hdr->attr.length); + if (err) { + kvfree(in); + return -EFAULT; + } + + if (in->len > sizeof(struct xsc_ioctl_data_tl)) { + tl = (struct xsc_ioctl_data_tl *)(in->data); + if (tl->length != sizeof(struct xsc_ioctl_mem_info)) { + kvfree(in); + return -EFAULT; + } + minfo = (struct xsc_ioctl_mem_info *)(tl + 1); + if (minfo->vir_addr && minfo->phy_addr) { + memset(tname, 0, sizeof(tname)); + get_task_comm(tname, current); + + spin_lock_irq(&priv_dev->mem_lock); + list_for_each_entry(m_ent, &priv_dev->mem_list, list) { + if ((!strcmp(m_ent->task_name, tname)) && + m_ent->mem_info.mem_num == minfo->mem_num && + m_ent->mem_info.size == minfo->size) { + if (m_ent->mem_info.phy_addr == minfo->phy_addr && + m_ent->mem_info.vir_addr == minfo->vir_addr) { + lfound = 1; + list_del(&m_ent->list); + } else { + err = -ENOMEM; + } + break; + } + } + spin_unlock_irq(&priv_dev->mem_lock); + + if (lfound) { + dma_free_coherent(&xdev->pdev->dev, + minfo->size, + (void *)minfo->vir_addr, + minfo->phy_addr); + } + } else { + kvfree(in); + return -EFAULT; + } + } + + hdr->attr.error = err; + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + err = -EFAULT; + if (copy_to_user((void *)user_hdr->attr.data, in->data, in->len)) + err = -EFAULT; + + kvfree(in); + return err; +} + +static long xsc_ioctl_mem_alloc(struct xsc_priv_device *priv_dev, + struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, + struct xsc_ioctl_hdr *hdr) +{ + struct xsc_ioctl_mem_info *minfo; + struct xsc_ioctl_data_tl *tl; + struct xsc_ioctl_mbox_in *in; + struct xsc_mem_entry *m_ent; + char tname[TASK_COMM_LEN]; + u64 vaddr = 0; + u64 paddr = 0; + int in_size; + int err = 0; + u8 lfound = 0; + u8 needfree = 0; + + in_size = sizeof(struct xsc_ioctl_mbox_in) + hdr->attr.length; + in = kvzalloc(in_size, GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->len = hdr->attr.length; + err = copy_from_user(in->data, user_hdr->attr.data, hdr->attr.length); + if (err) { + kvfree(in); + return -EFAULT; + } + + if (in->len > sizeof(struct xsc_ioctl_data_tl)) { + tl = (struct xsc_ioctl_data_tl *)(in->data); + if (tl->length != sizeof(struct xsc_ioctl_mem_info)) { + kvfree(in); + return -EFAULT; + } + minfo = (struct xsc_ioctl_mem_info *)(tl + 1); + memset(tname, 0, sizeof(tname)); + get_task_comm(tname, current); + + spin_lock_irq(&priv_dev->mem_lock); + list_for_each_entry(m_ent, &priv_dev->mem_list, list) { + if ((!strcmp(m_ent->task_name, tname)) && + m_ent->mem_info.mem_num == minfo->mem_num) { + if (m_ent->mem_info.size == minfo->size) { + minfo->phy_addr = m_ent->mem_info.phy_addr; + minfo->vir_addr = m_ent->mem_info.vir_addr; + lfound = 1; + } else { + needfree = 1; + list_del(&m_ent->list); + } + break; + } + } + spin_unlock_irq(&priv_dev->mem_lock); + + if (needfree) { + dma_free_coherent(&xdev->pdev->dev, + m_ent->mem_info.size, + (void *)m_ent->mem_info.vir_addr, + m_ent->mem_info.phy_addr); + } + + if (!lfound) { + vaddr = (u64)dma_alloc_coherent(&xdev->pdev->dev, + minfo->size, + (dma_addr_t *)&paddr, + GFP_KERNEL); + if (vaddr) { + memset((void *)vaddr, 0, minfo->size); + minfo->phy_addr = paddr; + minfo->vir_addr = vaddr; + m_ent = kzalloc(sizeof(*m_ent), GFP_KERNEL); + if (!m_ent) { + kvfree(in); + return -ENOMEM; + } + strscpy(m_ent->task_name, tname, sizeof(m_ent->task_name)); + m_ent->mem_info.mem_num = minfo->mem_num; + m_ent->mem_info.size = minfo->size; + m_ent->mem_info.phy_addr = paddr; + m_ent->mem_info.vir_addr = vaddr; + spin_lock_irq(&priv_dev->mem_lock); + list_add(&m_ent->list, &priv_dev->mem_list); + spin_unlock_irq(&priv_dev->mem_lock); + } else { + kvfree(in); + return -ENOMEM; + } + } + } + + hdr->attr.error = err; + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + err = -EFAULT; + if (copy_to_user((void *)user_hdr->attr.data, in->data, in->len)) + err = -EFAULT; + + kvfree(in); + return err; +} + +static long xsc_priv_dev_ioctl_mem(struct file *filp, unsigned long arg) +{ + struct xsc_bdf_file *bdf_file = filp->private_data; + struct xsc_core_device *xdev = bdf_file->xdev; + struct xsc_priv_device *priv_dev = &xdev->priv_device; + struct xsc_ioctl_hdr __user *user_hdr = + (struct xsc_ioctl_hdr __user *)arg; + struct xsc_ioctl_hdr hdr; + int err; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) + return -EFAULT; + + /* check valid */ + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) + return -EINVAL; + + /* check ioctl cmd */ + switch (hdr.attr.opcode) { + case XSC_IOCTL_MEM_ALLOC: + return xsc_ioctl_mem_alloc(priv_dev, xdev, user_hdr, &hdr); + case XSC_IOCTL_MEM_FREE: + return xsc_ioctl_mem_free(priv_dev, xdev, user_hdr, &hdr); + default: + return -EINVAL; + } +} + +static int xsc_priv_modify_qp(struct xsc_core_device *xdev, void *in, void *out) +{ + int ret = 0, i = 0; + struct xsc_ioctl_qp_range *resp; + struct xsc_ioctl_data_tl *tl; + int insize; + struct xsc_modify_qp_mbox_in *mailin; + struct xsc_modify_qp_mbox_out mailout; + u32 qpn; + + tl = (struct xsc_ioctl_data_tl *)out; + resp = (struct xsc_ioctl_qp_range *)(tl + 1); + xsc_core_dbg(xdev, "xsc_ioctl_qp_range: qpn:%d, num:%d, opcode:%d\n", + resp->qpn, resp->num, resp->opcode); + if (resp->num == 0) { + xsc_core_err(xdev, "xsc_ioctl_qp_range: resp->num == 0\n"); + return 0; + } + qpn = resp->qpn; + insize = sizeof(struct xsc_modify_qp_mbox_in); + mailin = kvzalloc(insize, GFP_KERNEL); + if (!mailin) + return -ENOMEM; + for (i = 0; i < resp->num; i++) { + mailin->hdr.opcode = cpu_to_be16(resp->opcode); + mailin->qpn = cpu_to_be32(qpn + i); + ret = xsc_cmd_exec(xdev, mailin, insize, &mailout, sizeof(mailout)); + xsc_core_dbg(xdev, "modify qp state qpn:%d\n", qpn + i); + } + kvfree(mailin); + + return ret; +} + +static int xsc_priv_dev_ioctl_get_phy(struct xsc_core_device *xdev, + void *in, void *out) +{ + int ret = 0; + struct xsc_eswitch *esw = xdev->priv.eswitch; + struct xsc_ioctl_data_tl *tl = (struct xsc_ioctl_data_tl *)out; + struct xsc_ioctl_get_phy_info_res *resp; + u16 lag_id = xsc_get_lag_id(xdev); + + switch (tl->opmod) { + case XSC_IOCTL_OP_GET_LOCAL: + resp = (struct xsc_ioctl_get_phy_info_res *)(tl + 1); + + resp->pcie_no = xdev->pcie_no; + resp->func_id = xdev->glb_func_id; + resp->pcie_host = xdev->caps.pcie_host; + resp->mac_phy_port = xdev->mac_port; + resp->funcid_to_logic_port_off = xdev->caps.funcid_to_logic_port; + resp->lag_id = lag_id; + resp->raw_qp_id_base = xdev->caps.raweth_qp_id_base; + resp->raw_rss_qp_id_base = xdev->caps.raweth_rss_qp_id_base; + resp->lag_port_start = xdev->caps.lag_logic_port_ofst; + resp->send_seg_num = xdev->caps.send_ds_num; + resp->recv_seg_num = xdev->caps.recv_ds_num; + resp->raw_tpe_qp_num = xdev->caps.raw_tpe_qp_num; + resp->chip_version = xdev->chip_ver_l; + resp->on_chip_tbl_vld = + (xdev->feature_flag & FEATURE_ONCHIP_FT_MASK) ? 1 : 0; + resp->dma_rw_tbl_vld = + (xdev->feature_flag & FEATURE_DMA_RW_TBL_MASK) ? 1 : 0; + resp->pct_compress_vld = + (xdev->feature_flag & FEATURE_PCT_EXP_MASK) ? 1 : 0; + + xsc_core_dbg(xdev, "%d,%d,%d,%d,%d,%d\n", + resp->pcie_no, resp->func_id, resp->pcie_host, + resp->mac_phy_port, resp->lag_id, + resp->funcid_to_logic_port_off); + resp->pf0_vf_funcid_base = xdev->caps.pf0_vf_funcid_base; + resp->pf0_vf_funcid_top = xdev->caps.pf0_vf_funcid_top; + resp->pf1_vf_funcid_base = xdev->caps.pf1_vf_funcid_base; + resp->pf1_vf_funcid_top = xdev->caps.pf1_vf_funcid_top; + resp->pcie0_pf_funcid_base = xdev->caps.pcie0_pf_funcid_base; + resp->pcie0_pf_funcid_top = xdev->caps.pcie0_pf_funcid_top; + resp->pcie1_pf_funcid_base = xdev->caps.pcie1_pf_funcid_base; + resp->pcie1_pf_funcid_top = xdev->caps.pcie1_pf_funcid_top; + resp->hca_core_clock = xdev->caps.hca_core_clock; + resp->mac_bit = xdev->caps.mac_bit; + if (xsc_core_is_pf(xdev)) { + mutex_lock(&esw->mode_lock); + resp->esw_mode = esw->mode; + mutex_unlock(&esw->mode_lock); + } else { + resp->esw_mode = 0; + } + resp->board_id = xdev->board_info->board_id; + break; + + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int xsc_priv_dev_ioctl_get_force_pcp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_force_pcp *resp = (struct xsc_ioctl_force_pcp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + resp->pcp = ib_dev->force_pcp; + return 0; +} + +static int xsc_priv_dev_ioctl_get_force_dscp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_force_dscp *resp = (struct xsc_ioctl_force_dscp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + resp->dscp = ib_dev->force_dscp; + return 0; +} + +static int xsc_priv_dev_ioctl_set_force_pcp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_force_pcp *req = (struct xsc_ioctl_force_pcp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + if (req->pcp < 0 || (req->pcp > QOS_PCP_MAX && req->pcp != DSCP_PCP_UNSET)) + return -EINVAL; + + ib_dev->force_pcp = req->pcp; + return 0; +} + +static int xsc_priv_dev_ioctl_set_force_dscp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_force_dscp *req = (struct xsc_ioctl_force_dscp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + if (req->dscp < 0 || (req->dscp > QOS_DSCP_MAX && req->dscp != DSCP_PCP_UNSET)) + return -EINVAL; + + ib_dev->force_dscp = req->dscp; + return 0; +} + +int xsc_priv_dev_exec_ioctl(struct xsc_core_device *xdev, void *in, int in_size, void *out, + int out_size) +{ + int opcode, ret = 0; + struct xsc_ioctl_attr *hdr; + + hdr = (struct xsc_ioctl_attr *)in; + opcode = hdr->opcode; + switch (opcode) { + case XSC_IOCTL_GET_PHY_INFO: + ret = xsc_priv_dev_ioctl_get_phy(xdev, in, out); + break; + case XSC_IOCTL_GET_FORCE_PCP: + xsc_core_dbg(xdev, "getting global pcp\n"); + ret = xsc_priv_dev_ioctl_get_force_pcp(xdev, in, out); + break; + case XSC_IOCTL_GET_FORCE_DSCP: + ret = xsc_priv_dev_ioctl_get_force_dscp(xdev, in, out); + break; + case XSC_IOCTL_SET_QP_STATUS: + xsc_core_dbg(xdev, "case XSC_IOCTL_SET_QP_STATUS:\n"); + ret = xsc_priv_modify_qp(xdev, in, out); + break; + case XSC_IOCTL_SET_FORCE_PCP: + xsc_core_dbg(xdev, "setting global pcp\n"); + ret = xsc_priv_dev_ioctl_set_force_pcp(xdev, in, out); + break; + case XSC_IOCTL_SET_FORCE_DSCP: + xsc_core_dbg(xdev, "setting global dscp\n"); + ret = xsc_priv_dev_ioctl_set_force_dscp(xdev, in, out); + break; + default: + ret = -EINVAL; + break; + } + + xsc_core_dbg(xdev, "xsc_priv_dev exec_ioctl.ret=%u\n", ret); + + return ret; +} + +static long xsc_priv_dev_ioctl_getinfo(struct file *filp, unsigned long arg) +{ + struct xsc_bdf_file *bdf_file = filp->private_data; + struct xsc_core_device *xdev = bdf_file->xdev; + struct xsc_ioctl_hdr __user *user_hdr = + (struct xsc_ioctl_hdr __user *)arg; + struct xsc_ioctl_hdr hdr; + struct xsc_ioctl_hdr *in; + int in_size; + int err; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) + return -EFAULT; + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) + return -EINVAL; + switch (hdr.attr.opcode) { + case XSC_IOCTL_GET_PHY_INFO: + case XSC_IOCTL_GET_FORCE_PCP: + case XSC_IOCTL_GET_FORCE_DSCP: + case XSC_IOCTL_SET_QP_STATUS: + case XSC_IOCTL_SET_FORCE_PCP: + case XSC_IOCTL_SET_FORCE_DSCP: + case XSC_IOCTL_GET_CONTEXT: + break; + default: + return -EINVAL; + } + in_size = sizeof(struct xsc_ioctl_hdr) + hdr.attr.length; + in = kvzalloc(in_size, GFP_KERNEL); + if (!in) + return -EFAULT; + in->attr.opcode = hdr.attr.opcode; + in->attr.length = hdr.attr.length; + err = copy_from_user(in->attr.data, user_hdr->attr.data, hdr.attr.length); + if (err) { + kvfree(in); + return -EFAULT; + } + err = xsc_priv_dev_exec_ioctl(xdev, &in->attr, + (in_size - offsetof(struct xsc_ioctl_hdr, attr)), + in->attr.data, + hdr.attr.length); + in->attr.error = err; + if (copy_to_user((void *)arg, in, in_size)) + err = -EFAULT; + kvfree(in); + return err; +} + +static int xsc_ioctl_flow_add_obj(struct xsc_bdf_file *file, struct xsc_ioctl_data_tl *tl, + char *data, unsigned int datalen) +{ + int err = 0; + struct xsc_flow_pct_v4_add *pct_v4; + struct xsc_flow_pct_v6_add *pct_v6; + + switch (tl->table) { + case XSC_FLOW_TBL_PCT_V4: + case XSC_FLOW_TBL_BM_PCT_V4: + pct_v4 = (struct xsc_flow_pct_v4_add *)(tl + 1); + err = xsc_alloc_pct_obj(file, pct_v4->priority, data, datalen); + break; + case XSC_FLOW_TBL_PCT_V6: + case XSC_FLOW_TBL_BM_PCT_V6: + pct_v6 = (struct xsc_flow_pct_v6_add *)(tl + 1); + err = xsc_alloc_pct_obj(file, pct_v6->priority, data, datalen); + break; + default: + break; + } + + return err; +} + +static void xsc_ioctl_flow_destroy_obj(struct xsc_bdf_file *file, struct xsc_ioctl_data_tl *tl) +{ + struct xsc_flow_pct_v4_del *pct_v4; + struct xsc_flow_pct_v6_del *pct_v6; + + switch (tl->table) { + case XSC_FLOW_TBL_PCT_V4: + case XSC_FLOW_TBL_BM_PCT_V4: + pct_v4 = (struct xsc_flow_pct_v4_del *)(tl + 1); + xsc_destroy_pct_obj(file, pct_v4->priority); + break; + case XSC_FLOW_TBL_PCT_V6: + case XSC_FLOW_TBL_BM_PCT_V6: + pct_v6 = (struct xsc_flow_pct_v6_del *)(tl + 1); + xsc_destroy_pct_obj(file, pct_v6->priority); + break; + default: + break; + } +} + +static int xsc_ioctl_flow_cmdq_handle_res_obj(struct xsc_bdf_file *file, + char *data, unsigned int datalen) +{ + struct xsc_ioctl_data_tl *tl; + int err = 0; + + tl = (struct xsc_ioctl_data_tl *)data; + + switch (tl->opmod) { + case XSC_IOCTL_OP_ADD: + err = xsc_ioctl_flow_add_obj(file, tl, data, datalen); + break; + case XSC_IOCTL_OP_DEL: + xsc_ioctl_flow_destroy_obj(file, tl); + break; + default: + break; + } + + return err; +} + +static int xsc_ioctl_flow_cmdq(struct xsc_bdf_file *file, + struct xsc_ioctl_hdr __user *user_hdr, + struct xsc_ioctl_hdr *hdr) +{ + struct xsc_ioctl_mbox_in *in; + struct xsc_ioctl_mbox_out *out; + int in_size; + int out_size; + int err; + + in_size = sizeof(struct xsc_ioctl_mbox_in) + hdr->attr.length; + in = kvzalloc(in_size, GFP_KERNEL); + if (!in) + return -EFAULT; + + in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); + in->len = __cpu_to_be16(hdr->attr.length); + err = copy_from_user(in->data, user_hdr->attr.data, hdr->attr.length); + if (err) { + kvfree(in); + return -EFAULT; + } + + err = xsc_ioctl_flow_cmdq_handle_res_obj(file, in->data, hdr->attr.length); + if (err) { + kvfree(in); + return -EFAULT; + } + + out_size = sizeof(struct xsc_ioctl_mbox_out) + hdr->attr.length; + out = kvzalloc(out_size, GFP_KERNEL); + if (!out) { + kvfree(in); + return -ENOMEM; + } + memcpy(out->data, in->data, hdr->attr.length); + out->len = in->len; + err = xsc_cmd_exec(file->xdev, in, in_size, out, out_size); + + hdr->attr.error = __be32_to_cpu(out->error); + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + err = -EFAULT; + if (copy_to_user((void *)user_hdr->attr.data, out->data, hdr->attr.length)) + err = -EFAULT; + + kvfree(in); + kvfree(out); + return err; +} + +static int xsc_ioctl_modify_raw_qp(struct xsc_priv_device *priv_dev, + struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, + struct xsc_ioctl_hdr *hdr) +{ + struct xsc_modify_raw_qp_mbox_in *in; + struct xsc_modify_raw_qp_mbox_out *out; + int err; + + if (hdr->attr.length != sizeof(struct xsc_modify_raw_qp_request)) + return -EINVAL; + + in = kvzalloc(sizeof(struct xsc_modify_raw_qp_mbox_in), GFP_KERNEL); + if (!in) + goto err_in; + out = kvzalloc(sizeof(struct xsc_modify_raw_qp_mbox_out), GFP_KERNEL); + if (!out) + goto err_out; + + err = copy_from_user(&in->req, user_hdr->attr.data, + sizeof(struct xsc_modify_raw_qp_request)); + if (err) + goto err; + + in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); + in->pcie_no = xdev->pcie_no; + + err = xsc_cmd_exec(xdev, in, sizeof(struct xsc_modify_raw_qp_mbox_in), + out, sizeof(struct xsc_modify_raw_qp_mbox_out)); + + hdr->attr.error = __be32_to_cpu(out->hdr.status); + + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + goto err; + + kvfree(in); + kvfree(out); + return 0; + +err: + kvfree(out); +err_out: + kvfree(in); +err_in: + return -EFAULT; +} + +static void xsc_handle_multiqp_create(struct xsc_bdf_file *file, void *in, + unsigned int inlen, void *out) +{ + u16 qp_num = 0; + int i = 0; + struct xsc_create_qp_request *req = NULL; + void *ptr = NULL; + int len = 0; + u32 qpn_base = be32_to_cpu(((struct xsc_create_multiqp_mbox_out *)out)->qpn_base); + + qp_num = be16_to_cpu(((struct xsc_create_multiqp_mbox_in *)in)->qp_num); + ptr = ((struct xsc_create_multiqp_mbox_in *)in)->data; + for (i = 0; i < qp_num; i++) { + req = (struct xsc_create_qp_request *)ptr; + len = sizeof(struct xsc_create_qp_request) + + be16_to_cpu(req->pa_num) * sizeof(u64); + xsc_alloc_qp_obj(file, qpn_base + i, (char *)req, len); + ptr += len; + } +} + +static void xsc_pci_ctrl_cmdq_handle_res_obj(struct xsc_bdf_file *file, + void *in, unsigned int inlen, void *out, int opcode) +{ + unsigned int idx; + + switch (opcode) { + case XSC_CMD_OP_ALLOC_PD: + idx = be32_to_cpu(((struct xsc_alloc_pd_mbox_out *)out)->pdn); + xsc_alloc_pd_obj(file, idx, in, inlen); + break; + case XSC_CMD_OP_DEALLOC_PD: + idx = be32_to_cpu(((struct xsc_dealloc_pd_mbox_in *)in)->pdn); + xsc_destroy_pd_obj(file, idx); + break; + case XSC_CMD_OP_CREATE_MKEY: + idx = be32_to_cpu(((struct xsc_create_mkey_mbox_out *)out)->mkey); + xsc_alloc_mr_obj(file, idx, in, inlen); + break; + case XSC_CMD_OP_DESTROY_MKEY: + idx = be32_to_cpu(((struct xsc_destroy_mkey_mbox_in *)in)->mkey); + xsc_destroy_mr_obj(file, idx); + break; + case XSC_CMD_OP_CREATE_CQ: + idx = be32_to_cpu(((struct xsc_create_cq_mbox_out *)out)->cqn); + xsc_alloc_cq_obj(file, idx, in, inlen); + break; + case XSC_CMD_OP_DESTROY_CQ: + idx = be32_to_cpu(((struct xsc_destroy_cq_mbox_in *)in)->cqn); + xsc_destroy_cq_obj(file, idx); + break; + case XSC_CMD_OP_CREATE_QP: + idx = be32_to_cpu(((struct xsc_create_qp_mbox_out *)out)->qpn); + xsc_alloc_qp_obj(file, idx, in, inlen); + break; + case XSC_CMD_OP_DESTROY_QP: + idx = be32_to_cpu(((struct xsc_destroy_qp_mbox_in *)in)->qpn); + xsc_destroy_qp_obj(file, idx); + break; + case XSC_CMD_OP_CREATE_MULTI_QP: + xsc_handle_multiqp_create(file, in, inlen, out); + break; + default: + break; + } +} + +static long xsc_priv_dev_ioctl_cmdq(struct file *filp, unsigned long arg) +{ + struct xsc_bdf_file *bdf_file = filp->private_data; + struct xsc_priv_device *priv_dev = &bdf_file->xdev->priv_device; + struct xsc_core_device *xdev = bdf_file->xdev; + struct xsc_ioctl_hdr __user *user_hdr = + (struct xsc_ioctl_hdr __user *)arg; + struct xsc_ioctl_hdr hdr; + int err; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) + return -EFAULT; + + /* check valid */ + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) + return -EINVAL; + + /* check ioctl cmd */ + switch (hdr.attr.opcode) { + case XSC_CMD_OP_IOCTL_FLOW: + return xsc_ioctl_flow_cmdq(bdf_file, user_hdr, &hdr); + case XSC_CMD_OP_MODIFY_RAW_QP: + return xsc_ioctl_modify_raw_qp(priv_dev, xdev, user_hdr, &hdr); + default: + return -EINVAL; + } +} + +static long xsc_priv_dev_ioctl_cmdq_raw(struct file *filp, unsigned long arg) +{ + struct xsc_bdf_file *bdf_file = filp->private_data; + struct xsc_core_device *xdev = bdf_file->xdev; + struct xsc_ioctl_hdr __user *user_hdr = + (struct xsc_ioctl_hdr __user *)arg; + struct xsc_ioctl_hdr hdr; + int err; + void *in; + void *out; + u16 out_len; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) + return -EFAULT; + + /* check valid */ + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) + return -EINVAL; + + in = kvzalloc(hdr.attr.length, GFP_KERNEL); + if (!in) + return -ENOMEM; + out_len = min_t(u16, hdr.attr.length, (u16)MAX_MBOX_OUT_LEN); + out = kvzalloc(out_len, GFP_KERNEL); + if (!out) { + kfree(in); + return -ENOMEM; + } + + err = copy_from_user(in, user_hdr->attr.data, hdr.attr.length); + if (err) { + err = -EFAULT; + goto err_exit; + } + + xsc_cmd_exec(xdev, in, hdr.attr.length, out, out_len); + xsc_pci_ctrl_cmdq_handle_res_obj(bdf_file, in, hdr.attr.length, out, hdr.attr.opcode); + + if (copy_to_user((void *)user_hdr, &hdr, sizeof(hdr))) + err = -EFAULT; + if (copy_to_user((void *)user_hdr->attr.data, out, out_len)) + err = -EFAULT; +err_exit: + kfree(in); + kfree(out); + return err; +} + +static int xsc_ioctl_user_mode(struct file *filp, unsigned long arg) +{ + struct xsc_bdf_file *bdf_file = filp->private_data; + struct xsc_core_device *dev = bdf_file->xdev; + struct xsc_ioctl_hdr __user *user_hdr = + (struct xsc_ioctl_hdr __user *)arg; + struct xsc_ioctl_hdr hdr; + struct xsc_ioctl_user_mode_attr *attr; + u8 *buf; + int err = 0; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) { + xsc_core_err(dev, "fail to copy from user user_hdr\n"); + return -EFAULT; + } + + /* check valid */ + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) { + xsc_core_err(dev, "invalid check filed %u\n", hdr.check_filed); + return -EINVAL; + } + + buf = kvzalloc(hdr.attr.length, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + err = copy_from_user(buf, user_hdr->attr.data, hdr.attr.length); + if (err) { + xsc_core_err(dev, "failed to copy ioctl user data.\n"); + kvfree(buf); + return -EFAULT; + } + + switch (hdr.attr.opcode) { + case XSC_IOCTL_OPCODE_ENABLE_USER_MODE: + attr = (struct xsc_ioctl_user_mode_attr *)buf; + xsc_set_user_mode(dev, (attr->enable ? true : false)); + if (attr->enable) + bdf_file->restore_nic_fn = xsc_eth_restore_nic_hca; + else + bdf_file->restore_nic_fn = NULL; + + break; + default: + err = -EOPNOTSUPP; + break; + } + + kvfree(buf); + return err; +} + +static long xsc_priv_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + int err; + + switch (cmd) { + case XSC_IOCTL_CMDQ: + err = xsc_priv_dev_ioctl_cmdq(filp, arg); + break; + case XSC_IOCTL_DRV_GET: + case XSC_IOCTL_DRV_SET: + // TODO refactor to split driver get and set + err = xsc_priv_dev_ioctl_getinfo(filp, arg); + break; + case XSC_IOCTL_MEM: + err = xsc_priv_dev_ioctl_mem(filp, arg); + break; + case XSC_IOCTL_CMDQ_RAW: + err = xsc_priv_dev_ioctl_cmdq_raw(filp, arg); + break; + case XSC_IOCTL_USER_MODE: + err = xsc_ioctl_user_mode(filp, arg); + break; + default: + err = -EFAULT; + break; + } + return err; +} + +static const struct file_operations dev_fops = { + .owner = THIS_MODULE, + .open = xsc_priv_dev_open, + .unlocked_ioctl = xsc_priv_dev_ioctl, + .compat_ioctl = xsc_priv_dev_ioctl, + .release = xsc_priv_dev_release, +}; + +#define XSC_MAX_CDEV_NUM 1024 +static dev_t g_priv_cdev_no; +static int g_priv_cdev_cnt; +static char *g_priv_class_name = "xscale"; +static struct class *g_priv_class; +DECLARE_BITMAP(g_bitmap_cdev_id, XSC_MAX_CDEV_NUM); + +int xsc_priv_dev_init(struct ib_device *ib_dev, struct xsc_core_device *dev) +{ + int ret; + int dev_id = 0; + struct xsc_priv_device *priv_dev = &dev->priv_device; + + if (g_priv_cdev_cnt >= XSC_MAX_CDEV_NUM) { + xsc_core_err(dev, "too many xscale cdevice\n"); + priv_dev->devno = U32_MAX; + return -EBUSY; + } + + sprintf(priv_dev->device_name, "%s", ib_dev->name); + + xsc_core_dbg(dev, "device_name %s\n", priv_dev->device_name); + + cdev_init(&priv_dev->cdev, &dev_fops); + priv_dev->cdev.owner = THIS_MODULE; + dev_id = find_first_zero_bit(g_bitmap_cdev_id, XSC_MAX_CDEV_NUM); + priv_dev->devno = g_priv_cdev_no + dev_id; + + ret = cdev_add(&priv_dev->cdev, priv_dev->devno, 1); + if (ret) { + xsc_core_err(dev, "%s cdev_add error ret:%d major:%d\n", + priv_dev->device_name, ret, MAJOR(priv_dev->devno)); + return ret; + } + + device_create(g_priv_class, NULL, priv_dev->devno, + NULL, "%s!%s", g_priv_class_name, priv_dev->device_name); + g_priv_cdev_cnt++; + set_bit(dev_id, g_bitmap_cdev_id); + + INIT_LIST_HEAD(&priv_dev->mem_list); + spin_lock_init(&priv_dev->mem_lock); + + INIT_RADIX_TREE(&priv_dev->bdf_tree, GFP_ATOMIC); + spin_lock_init(&priv_dev->bdf_lock); + + xsc_core_dbg(dev, "init success\n"); + + return 0; +} + +void xsc_priv_dev_fini(struct ib_device *ib_dev, struct xsc_core_device *dev) +{ + struct xsc_priv_device *priv_dev; + struct cdev *char_dev; + struct xsc_bdf_file *bdf_file; + struct radix_tree_iter iter; + void **slot; + int dev_id = 0; + + if (!dev || !ib_dev) { + pr_err("[%s:%d] device is null pointer\n", __func__, __LINE__); + return; + } + + priv_dev = &dev->priv_device; + if (priv_dev->devno == U32_MAX) + return; + + char_dev = &priv_dev->cdev; + + dev_id = MINOR(priv_dev->devno); + spin_lock(&priv_dev->bdf_lock); + radix_tree_for_each_slot(slot, &priv_dev->bdf_tree, &iter, 0) { + bdf_file = (struct xsc_bdf_file *)(*slot); + xsc_close_bdf_file(bdf_file); + radix_tree_iter_delete(&priv_dev->bdf_tree, &iter, slot); + kfree(bdf_file); + } + spin_unlock(&priv_dev->bdf_lock); + device_destroy(g_priv_class, priv_dev->devno); + cdev_del(&priv_dev->cdev); + + clear_bit(dev_id, g_bitmap_cdev_id); + g_priv_cdev_cnt--; + xsc_core_dbg(dev, "fini success\n"); +} + +int xsc_priv_alloc_chrdev_region(void) +{ + int ret = 0; + char *device_name = "xscale"; + + ret = alloc_chrdev_region(&g_priv_cdev_no, 0, XSC_MAX_CDEV_NUM, device_name); + if (ret) { + pr_err("%s cant't get major %d\n", device_name, MAJOR(g_priv_cdev_no)); + return ret; + } + g_priv_class = class_create(g_priv_class_name); + g_priv_cdev_cnt = 0; + + return 0; +} + +void xsc_priv_unregister_chrdev_region(void) +{ + class_destroy(g_priv_class); + unregister_chrdev_region(g_priv_cdev_no, XSC_MAX_CDEV_NUM); +} diff --git a/drivers/infiniband/hw/xsc/qp.c b/drivers/infiniband/hw/xsc/qp.c new file mode 100644 index 000000000000..6df90c841af4 --- /dev/null +++ b/drivers/infiniband/hw/xsc/qp.c @@ -0,0 +1,1939 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "xsc_ib.h" +#include "user.h" +#include "common/xsc_hsi.h" +#include "common/xsc_lag.h" +#include +#include +#include + +/* not supported currently */ +static int wq_signature; + +#define MAD_QUEUE_DEPTH 128 + +enum { + XSC_IB_CACHE_LINE_SIZE = 64, +}; + +#define MAC_INVALID 0xff + +#define LAG_PORT_NUM_MASK_EN 0x80000000 +#define LAG_PORT_NUM_MASK_EN_OFFSET 31 +#define LAG_PORT_NUM_MASK 0x30000 +#define LAG_PORT_NUM_OFFSET 16 + +#define UDP_SPORT_MASK_EN 0x40000000 +#define UDP_SPORT_MASK_EN_OFFSET 30 +#define UDP_SPORT_MASK 0xffff +#define UDP_SPORT_OFFSET 0 + +static const u32 xsc_ib_opcode[] = { + [IB_WR_SEND] = XSC_MSG_OPCODE_SEND, + [IB_WR_SEND_WITH_IMM] = XSC_MSG_OPCODE_SEND, + [IB_WR_RDMA_WRITE] = XSC_MSG_OPCODE_RDMA_WRITE, + [IB_WR_RDMA_WRITE_WITH_IMM] = XSC_MSG_OPCODE_RDMA_WRITE, + [IB_WR_RDMA_READ] = XSC_MSG_OPCODE_RDMA_READ, + [IB_WR_LOCAL_INV] = XSC_MSG_OPCODE_SEND, + [IB_WR_REG_MR] = XSC_MSG_OPCODE_SEND, + [IB_WR_SEND_WITH_INV] = XSC_MSG_OPCODE_SEND, +}; + +static int is_qp0(enum ib_qp_type qp_type) +{ + return qp_type == IB_QPT_SMI; +} + +static int is_qp1(enum ib_qp_type qp_type) +{ + return qp_type == IB_QPT_GSI; +} + +static int is_sqp(enum ib_qp_type qp_type) +{ + return is_qp0(qp_type) || is_qp1(qp_type); +} + +static void *get_wqe(struct xsc_ib_qp *qp, int offset) +{ + return xsc_buf_offset(&qp->buf, offset); +} + +static void *get_recv_wqe(struct xsc_ib_qp *qp, int n) +{ + return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); +} + +static void *get_seg_wqe(void *first, int n) +{ + return first + (n << XSC_BASE_WQE_SHIFT); +} + +void *xsc_get_send_wqe(struct xsc_ib_qp *qp, int n) +{ + return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift)); +} + +static int iboe_tos_to_sl(struct net_device *ndev, int tos) +{ + int prio; + struct net_device *dev; + + prio = rt_tos2priority(tos); + dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev; + if (dev->num_tc) + return netdev_get_prio_tc_map(dev, prio); + +#if IS_ENABLED(CONFIG_VLAN_8021Q) + if (is_vlan_dev(ndev)) + return (vlan_dev_get_egress_qos_mask(ndev, prio) & + VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; +#endif + return 0; +} + +static inline void set_remote_addr_seg(struct xsc_wqe_data_seg *remote_seg, + u32 msg_len, u64 remote_addr, u32 rkey) +{ + remote_seg->in_line = 0; + WR_LE_32(remote_seg->seg_len, msg_len); + WR_LE_32(remote_seg->mkey, rkey); + WR_LE_64(remote_seg->va, remote_addr); +} + +static void set_local_data_seg(struct xsc_wqe_data_seg *data_seg, struct ib_sge *sg) +{ + data_seg->in_line = 0; + WR_LE_32(data_seg->seg_len, sg->length); + WR_LE_32(data_seg->mkey, sg->lkey); + WR_LE_64(data_seg->va, sg->addr); +} + +static int set_data_inl_seg(struct xsc_ib_qp *qp, const struct ib_send_wr *wr, void *ctrl) +{ + struct xsc_wqe_data_seg *data_seg; + unsigned int seg_index; + void *addr; + int len; + int i; + + for (i = 0, seg_index = 1; i < wr->num_sge; ++i, ++seg_index) { + if (likely(wr->sg_list[i].length)) { + addr = (void *)wr->sg_list[i].addr; + len = wr->sg_list[i].length; + + if (unlikely(len > qp->max_inline_data)) + return -ENOMEM; + + data_seg = get_seg_wqe(ctrl, seg_index); + data_seg->in_line = 1; + data_seg->len = len; + memcpy(data_seg->in_line_data, addr, len); + } + } + + return 0; +} + +static __be32 send_ieth(const struct ib_send_wr *wr) +{ + switch (wr->opcode) { + case IB_WR_SEND_WITH_IMM: + case IB_WR_RDMA_WRITE_WITH_IMM: + return wr->ex.imm_data; + default: + return 0; + } +} + +static void xsc_ib_qp_event(struct xsc_core_qp *qp, int type) +{ + struct ib_qp *ibqp = &to_xibqp(qp)->ibqp; + struct ib_event event; + + if (ibqp->event_handler) { + event.device = ibqp->device; + event.element.qp = ibqp; + switch (type) { + case XSC_EVENT_TYPE_WQ_CATAS_ERROR: + event.event = IB_EVENT_QP_FATAL; + break; + case XSC_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + event.event = IB_EVENT_QP_REQ_ERR; + break; + case XSC_EVENT_TYPE_WQ_ACCESS_ERROR: + event.event = IB_EVENT_QP_ACCESS_ERR; + break; + default: + pr_warn("xsc_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn); + return; + } + + ibqp->event_handler(&event, ibqp->qp_context); + } +} + +static int set_rq_size(struct xsc_ib_dev *dev, struct ib_qp_cap *cap, + int has_rq, struct xsc_ib_qp *qp, struct xsc_ib_create_qp *ucmd) +{ + u32 wqe_cnt = roundup_pow_of_two(cap->max_recv_wr); + + /* Sanity check RQ size before proceeding */ + if (wqe_cnt > dev->xdev->caps.max_wqes) { + xsc_ib_warn(dev, "max_recv_wr:%d exceed max rq depth\n", cap->max_recv_wr); + wqe_cnt = dev->xdev->caps.max_wqes; + } + + if (!has_rq) { + qp->rq.max_gs = 0; + qp->rq.wqe_cnt = 0; + qp->rq.wqe_shift = 0; + } else { + if (ucmd) { + qp->rq.wqe_cnt = ucmd->rq_wqe_count; + qp->rq.wqe_shift = ucmd->rq_wqe_shift; + qp->rq.max_gs = 1; + qp->rq.max_post = qp->rq.wqe_cnt; + } else { + qp->rq.wqe_cnt = wqe_cnt; + qp->rq.wqe_shift = dev->xdev->caps.recv_wqe_shift; + qp->rq.max_gs = dev->xdev->caps.recv_ds_num; + qp->rq.max_post = qp->rq.wqe_cnt; + } + } + + return 0; +} + +static int calc_sq_size(struct xsc_ib_dev *dev, struct ib_qp_init_attr *attr, + struct xsc_ib_qp *qp) +{ + int wqe_size; + int wq_size; + + if (!attr->cap.max_send_wr) { + xsc_ib_err(dev, "invalid max_send_wr:%d\n", attr->cap.max_send_wr); + return -1; + } + + wqe_size = 1 << dev->xdev->caps.send_wqe_shift; + qp->max_inline_data = (dev->xdev->caps.send_ds_num - 2) * sizeof(struct xsc_wqe_data_seg); + attr->cap.max_inline_data = qp->max_inline_data; + + qp->sq.wqe_cnt = roundup_pow_of_two(attr->cap.max_send_wr); + qp->sq.wqe_cnt = min_t(int, qp->sq.wqe_cnt, (int)dev->xdev->caps.max_wqes); + qp->sq.ds_cnt = qp->sq.wqe_cnt << (dev->xdev->caps.send_wqe_shift - XSC_BASE_WQE_SHIFT); + wq_size = qp->sq.wqe_cnt * wqe_size; + qp->sq.wqe_shift = ilog2(wqe_size); + qp->sq.max_gs = dev->xdev->caps.send_ds_num - XSC_CTRL_SEG_NUM - XSC_RADDR_SEG_NUM; + qp->sq.max_post = qp->sq.wqe_cnt; + + return wq_size; +} + +static int qp_has_rq(struct ib_qp_init_attr *attr) +{ + if (attr->qp_type == IB_QPT_XRC_INI || + attr->qp_type == IB_QPT_XRC_TGT || attr->srq || + !attr->cap.max_recv_wr) + return 0; + + return 1; +} + +static enum xsc_qp_state to_xsc_state(enum ib_qp_state state) +{ + switch (state) { + case IB_QPS_RESET: return XSC_QP_STATE_RST; + case IB_QPS_INIT: return XSC_QP_STATE_INIT; + case IB_QPS_RTR: return XSC_QP_STATE_RTR; + case IB_QPS_RTS: return XSC_QP_STATE_RTS; + case IB_QPS_SQD: return XSC_QP_STATE_SQD; + case IB_QPS_SQE: return XSC_QP_STATE_SQER; + case IB_QPS_ERR: return XSC_QP_STATE_ERR; + default: return -1; + } +} + +static char *qp_state_to_str(enum ib_qp_state state) +{ + switch (state) { + case IB_QPS_RESET: return "RST"; + case IB_QPS_INIT: return "INIT"; + case IB_QPS_RTR: return "RTR"; + case IB_QPS_RTS: return "RTS"; + case IB_QPS_SQD: return "SQD"; + case IB_QPS_SQE: return "SQE"; + case IB_QPS_ERR: return "ERR"; + default: return "UNKNOWN"; + } +} + +static int create_user_qp(struct xsc_ib_dev *dev, struct ib_pd *pd, + struct xsc_ib_qp *qp, struct ib_udata *udata, + struct xsc_create_qp_mbox_in **in, + struct xsc_ib_create_qp_resp *resp, int *inlen) +{ + struct xsc_ib_ucontext *context; + struct xsc_ib_create_qp ucmd; + int page_shift; + int npages; + u32 offset; + int ncont; + int err; + int hw_npages; + + err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); + if (err) { + xsc_ib_err(dev, "failed to copy from udata, err=%d\n", err); + return err; + } + xsc_ib_info(dev, "buf_addr:0x%lx db_addr:0x%lx sq cnt:%u, rq cnt:%u, rq shift:%u\n", + (uintptr_t)ucmd.buf_addr, (uintptr_t)ucmd.db_addr, + ucmd.sq_wqe_count, ucmd.rq_wqe_count, ucmd.rq_wqe_shift); + + context = to_xucontext(pd->uobject->context); + + qp->sq.ds_cnt = ucmd.sq_wqe_count; + qp->sq.wqe_cnt = ucmd.sq_wqe_count; + qp->sq.wqe_shift = XSC_BASE_WQE_SHIFT; + qp->rq.ds_cnt = ucmd.rq_wqe_count; + qp->rq.wqe_cnt = ucmd.rq_wqe_count; + qp->rq.wqe_shift = XSC_BASE_WQE_SHIFT; + + qp->buf_size = (qp->sq.wqe_cnt << qp->sq.wqe_shift) + (qp->rq.wqe_cnt << qp->rq.wqe_shift); + qp->umem = ib_umem_get(&dev->ib_dev, ucmd.buf_addr, qp->buf_size, 0); + if (IS_ERR(qp->umem)) { + xsc_ib_err(dev, "umem_get failed\n"); + err = PTR_ERR(qp->umem); + goto err_uuar; + } + + xsc_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift, + &ncont, NULL); + if (ncont != npages) { + page_shift = PAGE_SHIFT; + ncont = npages; + } + + hw_npages = DIV_ROUND_UP(qp->buf_size, PAGE_SIZE_4K); + err = xsc_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset); + if (err) { + xsc_ib_err(dev, "bad offset:%d\n", offset); + goto err_umem; + } + xsc_ib_info(dev, "npage:%d, page_shift:%d, ncont:%d, offset:%d, hw_npages %d\n", + npages, page_shift, ncont, offset, hw_npages); + + *inlen = sizeof(**in) + sizeof(*((*in)->req.pas)) * hw_npages; + *in = xsc_vzalloc(*inlen); + if (!*in) { + err = -ENOMEM; + goto err_umem; + } + xsc_ib_populate_pas(dev, qp->umem, page_shift, (*in)->req.pas, hw_npages, true); + (*in)->req.pa_num = cpu_to_be16(hw_npages); + + err = ib_copy_to_udata(udata, resp, sizeof(*resp)); + if (err) { + xsc_ib_err(dev, "failed to copy to udata, err=%d\n", err); + goto err_umem; + } + qp->create_type = XSC_QP_USER; + + return 0; + +err_umem: + ib_umem_release(qp->umem); + +err_uuar: + return err; +} + +static void destroy_qp_user(struct ib_pd *pd, struct xsc_ib_qp *qp) +{ + struct xsc_ib_ucontext *context; + + context = to_xucontext(pd->uobject->context); + ib_umem_release(qp->umem); +} + +#define MAX_QP1_SQ_HDR_SIZE_V2 512 +#define MAX_QP1_SQ_HDR_SIZE 86 + /* Ethernet header = 14 */ + /* ib_grh = 40 (provided by MAD) */ + /* ib_bth + ib_deth = 20 */ + /* MAD = 256 (provided by MAD) */ + /* iCRC = 4 */ +#define MAX_QP1_RQ_HDR_SIZE_V2 512 + +static int create_kernel_qp(struct xsc_ib_dev *dev, + struct ib_qp_init_attr *init_attr, + struct xsc_ib_qp *qp, + struct xsc_create_qp_mbox_in **in, int *inlen) +{ + int err; + int sq_size; + int hw_npages; + + sq_size = calc_sq_size(dev, init_attr, qp); + if (sq_size < 0) { + err = -ENOMEM; + xsc_ib_err(dev, "err %d\n", err); + return err; + } + + qp->rq.ds_cnt = qp->rq.wqe_cnt << (qp->rq.wqe_shift - XSC_BASE_WQE_SHIFT); + qp->rq.offset = 0; + qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; + qp->buf_size = qp->sq.offset + sq_size; + qp->send_psn = 0; + + err = xsc_buf_alloc(dev->xdev, qp->buf_size, PAGE_SIZE, &qp->buf); + if (err) { + xsc_ib_err(dev, "failed to alloc qp buffer,err=%d\n", err); + return err; + } + + qp->sq.qend = qp->buf.direct.buf + qp->sq.offset + sq_size; + hw_npages = DIV_ROUND_UP(qp->buf_size, PAGE_SIZE_4K); + *inlen = sizeof(**in) + sizeof(*(*in)->req.pas) * hw_npages; + *in = xsc_vzalloc(*inlen); + if (!*in) { + err = -ENOMEM; + goto err_buf; + } + + xsc_fill_page_array(&qp->buf, (*in)->req.pas, hw_npages); + (*in)->req.pa_num = cpu_to_be16(hw_npages); + + qp->sq.wrid = kmalloc_array(qp->sq.wqe_cnt, sizeof(*qp->sq.wrid), GFP_KERNEL); + qp->sq.wr_data = kmalloc_array(qp->sq.wqe_cnt, sizeof(*qp->sq.wr_data), GFP_KERNEL); + qp->rq.wrid = kmalloc_array(qp->rq.wqe_cnt, sizeof(*qp->rq.wrid), GFP_KERNEL); + qp->sq.w_list = kmalloc_array(qp->sq.wqe_cnt, sizeof(*qp->sq.w_list), GFP_KERNEL); + qp->sq.wqe_head = kmalloc_array(qp->sq.wqe_cnt, sizeof(*qp->sq.wqe_head), GFP_KERNEL); + + if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid || + !qp->sq.w_list || !qp->sq.wqe_head) { + err = -ENOMEM; + goto err_wrid; + } + qp->create_type = XSC_QP_KERNEL; + + if (init_attr->qp_type == IB_QPT_GSI) { + qp->sq.mad_index = 0; + qp->sq.mad_queue_depth = MAD_QUEUE_DEPTH; + qp->sq.hdr_size = MAX_QP1_SQ_HDR_SIZE_V2 * MAD_QUEUE_DEPTH; + qp->sq.hdr_buf = dma_alloc_coherent(dev->ib_dev.dma_device, + qp->sq.hdr_size, + &qp->sq.hdr_dma, + GFP_KERNEL); + if (!qp->sq.hdr_buf) { + err = -ENOMEM; + xsc_ib_err(dev, "Failed to create sq_hdr_buf"); + goto err_wrid; + } + } + + return 0; + +err_wrid: + kfree(qp->sq.wqe_head); + kfree(qp->sq.w_list); + kfree(qp->sq.wrid); + kfree(qp->sq.wr_data); + kfree(qp->rq.wrid); + +err_buf: + xsc_buf_free(dev->xdev, &qp->buf); + return err; +} + +static void destroy_qp_kernel(struct xsc_ib_dev *dev, struct xsc_ib_qp *qp) +{ + if (qp->sq.hdr_buf) + dma_free_coherent(dev->ib_dev.dma_device, qp->sq.hdr_size, + qp->sq.hdr_buf, qp->sq.hdr_dma); + kfree(qp->sq.wqe_head); + kfree(qp->sq.w_list); + kfree(qp->sq.wrid); + kfree(qp->sq.wr_data); + kfree(qp->rq.wrid); + xsc_buf_free(dev->xdev, &qp->buf); +} + +static u8 ib_to_xsc_qp_type(enum ib_qp_type qp_type, __u32 flags) +{ + if (qp_type == IB_QPT_RC) { + return XSC_QUEUE_TYPE_RDMA_RC; + } else if ((qp_type == IB_QPT_GSI) || (qp_type == IB_QPT_SMI)) { + return XSC_QUEUE_TYPE_RDMA_MAD; + } else if (qp_type == IB_QPT_RAW_PACKET) { + if (flags & XSC_QP_FLAG_RAWPACKET_TSO) + return XSC_QUEUE_TYPE_RAW_TSO; + else if (flags & XSC_QP_FLAG_RAWPACKET_TX) + return XSC_QUEUE_TYPE_RAW_TX; + else + return XSC_QUEUE_TYPE_RAW; + } else { + return XSC_QUEUE_TYPE_INVALID; + } +} + +static int create_qp_common(struct xsc_ib_dev *dev, struct ib_pd *pd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata, struct xsc_ib_qp *qp) +{ + struct xsc_ib_resources *devr = &dev->devr; + struct xsc_ib_create_qp_resp resp; + struct xsc_create_qp_mbox_in *in = NULL; + struct xsc_ib_create_qp ucmd; + int inlen = sizeof(*in); + int err; + char buf[256]; + char *ptr = buf; + int ret = 0; + + mutex_init(&qp->mutex); + spin_lock_init(&qp->sq.lock); + spin_lock_init(&qp->rq.lock); + spin_lock_init(&qp->lock); + + if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) + qp->sq_signal_bits = XSC_WQE_CTRL_CQ_UPDATE; + + if (pd && pd->uobject) { + if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { + xsc_ib_err(dev, "failed to copy from udata\n"); + return -EFAULT; + } + + qp->wq_sig = !!(ucmd.flags & XSC_QP_FLAG_SIGNATURE); + qp->scat_cqe = !!(ucmd.flags & XSC_QP_FLAG_SCATTER_CQE); + } else { + qp->wq_sig = !!wq_signature; + } + + qp->has_rq = qp_has_rq(init_attr); + + err = set_rq_size(dev, &init_attr->cap, qp->has_rq, + qp, (pd && pd->uobject) ? &ucmd : NULL); + if (err) { + xsc_ib_err(dev, "failed to set rq size %d\n", err); + return err; + } + + if (pd) { + if (pd->uobject) { + err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen); + if (err) + xsc_ib_err(dev, "failed to create user qp, err = %d\n", err); + } else { + err = create_kernel_qp(dev, init_attr, qp, &in, &inlen); + if (err) + xsc_ib_err(dev, "failed to create kernel qp, err = %d\n", err); + else + qp->pa_lkey = to_mpd(pd)->pa_lkey; + } + + if (err) + return err; + } else { + in = xsc_vzalloc(sizeof(*in)); + if (!in) + return -ENOMEM; + + qp->create_type = XSC_QP_EMPTY; + } + + if (is_sqp(init_attr->qp_type)) + qp->port = init_attr->port_num; + + in->req.qp_type = init_attr->qp_type; + if (is_qp1(init_attr->qp_type)) + in->req.input_qpn = cpu_to_be16(1); + + if (init_attr->qp_type != XSC_IB_QPT_REG_UMR) + in->req.pdn = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn); + + if (qp->rq.ds_cnt) + in->req.log_rq_sz = ilog2(qp->rq.ds_cnt); + + if (qp->sq.ds_cnt) + in->req.log_sq_sz = ilog2(qp->sq.ds_cnt); + else + in->req.log_sq_sz = ilog2(0x80); + + if (init_attr->send_cq) { + qp->send_cq = init_attr->send_cq; + in->req.cqn_send = to_xcq(init_attr->send_cq)->xcq.cqn; + in->req.cqn_send = cpu_to_be16(in->req.cqn_send); +#ifndef MSIX_SUPPORT + init_attr->send_cq->comp_handler(init_attr->send_cq, + init_attr->send_cq->cq_context); +#endif + } + + if (init_attr->recv_cq) { + qp->recv_cq = init_attr->recv_cq; + in->req.cqn_recv = to_xcq(init_attr->recv_cq)->xcq.cqn; + in->req.cqn_recv = cpu_to_be16(in->req.cqn_recv); + } + + in->req.qp_type = ib_to_xsc_qp_type(init_attr->qp_type, ucmd.flags); + + if (in->req.qp_type == XSC_QUEUE_TYPE_INVALID) { + xsc_ib_err(dev, "invalid qp type:%d\n", init_attr->qp_type); + goto err_create; + } + in->req.glb_funcid = cpu_to_be16(dev->xdev->glb_func_id); + + qp->xqp.qp_type_internal = in->req.qp_type; + + err = xsc_core_create_qp(dev->xdev, &qp->xqp, in, inlen); + if (err) { + xsc_ib_err(dev, "create qp failed, err=%d\n", err); + goto err_create; + } + + qp->doorbell_qpn = qp->xqp.qpn; + + qp->xqp.event = xsc_ib_qp_event; + qp->xqp.qp_type = init_attr->qp_type; + ret += snprintf(ptr + ret, 256 - ret, "pdn=%d,", to_mpd(pd ? pd : devr->p0)->pdn); + ret += snprintf(ptr + ret, 256 - ret, "log_rq_sz=%d,", in->req.log_rq_sz); + ret += snprintf(ptr + ret, 256 - ret, "log_sq_sz=%d,", in->req.log_sq_sz); + ret += snprintf(ptr + ret, 256 - ret, "scqn=%d,", to_xcq(init_attr->send_cq)->xcq.cqn); + ret += snprintf(ptr + ret, 256 - ret, "rcqn=%d", to_xcq(init_attr->recv_cq)->xcq.cqn); + + xsc_ib_info(dev, "succeeded to create qp:%d, %s\n", qp->xqp.qpn, buf); + + xsc_vfree(in); + + return 0; + +err_create: + if (qp->create_type == XSC_QP_USER) + destroy_qp_user(pd, qp); + else if (qp->create_type == XSC_QP_KERNEL) + destroy_qp_kernel(dev, qp); + + xsc_vfree(in); + return err; +} + +static void xsc_ib_lock_cqs(struct xsc_ib_cq *send_cq, struct xsc_ib_cq *recv_cq) + __acquires(&send_cq->lock) __acquires(&recv_cq->lock) +{ + if (send_cq) { + if (recv_cq) { + if (send_cq->xcq.cqn < recv_cq->xcq.cqn) { + spin_lock_irq(&send_cq->lock); + spin_lock_nested(&recv_cq->lock, + SINGLE_DEPTH_NESTING); + } else if (send_cq->xcq.cqn == recv_cq->xcq.cqn) { + spin_lock_irq(&send_cq->lock); + __acquire(&recv_cq->lock); + } else { + spin_lock_irq(&recv_cq->lock); + spin_lock_nested(&send_cq->lock, + SINGLE_DEPTH_NESTING); + } + } else { + spin_lock_irq(&send_cq->lock); + } + } else if (recv_cq) { + spin_lock_irq(&recv_cq->lock); + } +} + +static void xsc_ib_unlock_cqs(struct xsc_ib_cq *send_cq, struct xsc_ib_cq *recv_cq) + __releases(&send_cq->lock) __releases(&recv_cq->lock) +{ + if (send_cq) { + if (recv_cq) { + if (send_cq->xcq.cqn < recv_cq->xcq.cqn) { + spin_unlock(&recv_cq->lock); + spin_unlock_irq(&send_cq->lock); + } else if (send_cq->xcq.cqn == recv_cq->xcq.cqn) { + __release(&recv_cq->lock); + spin_unlock_irq(&send_cq->lock); + } else { + spin_unlock(&send_cq->lock); + spin_unlock_irq(&recv_cq->lock); + } + } else { + spin_unlock_irq(&send_cq->lock); + } + } else if (recv_cq) { + spin_unlock_irq(&recv_cq->lock); + } +} + +static struct xsc_ib_pd *get_pd(struct xsc_ib_qp *qp) +{ + return to_mpd(qp->ibqp.pd); +} + +static void get_cqs(struct xsc_ib_qp *qp, + struct xsc_ib_cq **send_cq, struct xsc_ib_cq **recv_cq) +{ + switch (qp->ibqp.qp_type) { + case IB_QPT_XRC_TGT: + *send_cq = NULL; + *recv_cq = NULL; + break; + case XSC_IB_QPT_REG_UMR: + case IB_QPT_XRC_INI: + *send_cq = to_xcq(qp->ibqp.send_cq); + *recv_cq = NULL; + break; + + case IB_QPT_SMI: + case IB_QPT_GSI: + case IB_QPT_RC: + case IB_QPT_UC: + case IB_QPT_UD: + case IB_QPT_RAW_IPV6: + case IB_QPT_RAW_ETHERTYPE: + *send_cq = to_xcq(qp->ibqp.send_cq); + *recv_cq = to_xcq(qp->ibqp.recv_cq); + break; + + case IB_QPT_RAW_PACKET: + case IB_QPT_MAX: + default: + *send_cq = NULL; + *recv_cq = NULL; + break; + } +} + +static void destroy_qp_common(struct xsc_ib_dev *dev, struct xsc_ib_qp *qp) +{ + struct xsc_ib_cq *send_cq, *recv_cq; + struct xsc_modify_qp_mbox_in *in; + int err; + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return; + + if (qp->xqp.qp_type_internal == XSC_QUEUE_TYPE_RAW || + qp->xqp.qp_type_internal == XSC_QUEUE_TYPE_RAW_TSO || + qp->xqp.qp_type_internal == XSC_QUEUE_TYPE_RAW_TX || + qp->state != IB_QPS_RESET) + if (xsc_core_qp_modify(dev->xdev, to_xsc_state(qp->state), + XSC_QP_STATE_RST, in, sizeof(*in), &qp->xqp)) + xsc_ib_warn(dev, "modify QP %06x to RESET failed\n", qp->xqp.qpn); + + get_cqs(qp, &send_cq, &recv_cq); + + if (qp->create_type == XSC_QP_KERNEL) { + xsc_ib_lock_cqs(send_cq, recv_cq); + __xsc_ib_cq_clean(recv_cq, qp->xqp.qpn); + if (send_cq != recv_cq) + __xsc_ib_cq_clean(send_cq, qp->xqp.qpn); + xsc_ib_unlock_cqs(send_cq, recv_cq); + } + + err = xsc_core_destroy_qp(dev->xdev, &qp->xqp); + if (err) + xsc_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->xqp.qpn); + kfree(in); + + if (qp->create_type == XSC_QP_KERNEL) + destroy_qp_kernel(dev, qp); + else if (qp->create_type == XSC_QP_USER) + destroy_qp_user(&get_pd(qp)->ibpd, qp); +} + +static const char *ib_qp_type_str(enum ib_qp_type type) +{ + switch (type) { + case IB_QPT_SMI: + return "IB_QPT_SMI"; + case IB_QPT_GSI: + return "IB_QPT_GSI"; + case IB_QPT_RC: + return "IB_QPT_RC"; + case IB_QPT_UC: + return "IB_QPT_UC"; + case IB_QPT_UD: + return "IB_QPT_UD"; + case IB_QPT_RAW_IPV6: + return "IB_QPT_RAW_IPV6"; + case IB_QPT_RAW_ETHERTYPE: + return "IB_QPT_RAW_ETHERTYPE"; + case IB_QPT_XRC_INI: + return "IB_QPT_XRC_INI"; + case IB_QPT_XRC_TGT: + return "IB_QPT_XRC_TGT"; + case IB_QPT_RAW_PACKET: + return "IB_QPT_RAW_PACKET"; + case XSC_IB_QPT_REG_UMR: + return "XSC_IB_QPT_REG_UMR"; + case IB_QPT_MAX: + default: + return "Invalid QP type"; + } +} + +int xsc_ib_create_qp(struct ib_qp *ibqp, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata) +{ + struct xsc_ib_dev *dev; + struct xsc_ib_qp *qp; + struct ib_pd *pd = ibqp->pd; + int err; + + if (pd) { + dev = to_mdev(pd->device); + } else { + /* being cautious here */ + if (init_attr->qp_type != IB_QPT_XRC_TGT && + init_attr->qp_type != XSC_IB_QPT_REG_UMR) { + pr_warn("%s: no PD for transport %s\n", __func__, + ib_qp_type_str(init_attr->qp_type)); + return RET_VALUE(-EINVAL); + } + dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device); + } + + if (init_attr->qp_type != IB_QPT_RAW_PACKET) { + if (!is_support_rdma(dev->xdev) || + (is_qp1(init_attr->qp_type) && !is_support_rdma_cm(dev->xdev))) { + return RET_VALUE(-EPROTONOSUPPORT); + } + } + + qp = to_xqp(ibqp); + + qp->xqp.mac_id = MAC_INVALID; + + switch (init_attr->qp_type) { + case IB_QPT_RC: + case IB_QPT_SMI: + case IB_QPT_GSI: + case IB_QPT_RAW_PACKET: + err = create_qp_common(dev, pd, init_attr, udata, qp); + if (err) { + xsc_ib_err(dev, "create_qp_common failed\n"); + return RET_VALUE(err); + } + + if (is_qp0(init_attr->qp_type)) { + qp->ibqp.qp_num = 0; + } else if (is_qp1(init_attr->qp_type)) { + qp->ibqp.qp_num = 1; + dev->xdev->gsi_qpn = qp->xqp.qpn; + } else { + qp->ibqp.qp_num = qp->xqp.qpn; + } + + break; + + case IB_QPT_RAW_IPV6: + case IB_QPT_RAW_ETHERTYPE: + case IB_QPT_MAX: + default: + xsc_ib_err(dev, "unsupported qp type %d\n", + init_attr->qp_type); + /* Don't support raw QPs */ + return RET_VALUE(-EINVAL); + } + + return 0; +} + +xsc_ib_destroy_qp_def() +{ + struct xsc_ib_dev *dev = to_mdev(qp->device); + struct xsc_ib_qp *xqp = to_xqp(qp); + struct xsc_core_device *xdev = dev->xdev; + struct xsc_lag *lag; + + destroy_qp_common(dev, xqp); + + xsc_board_lag_lock(xdev); + if (xqp->xqp.mac_id != MAC_INVALID && xsc_lag_is_roce(xdev)) { + lag = xsc_get_lag(xdev); + atomic_dec(&lag->qp_cnt[xqp->xqp.mac_id]); + } + xsc_board_lag_unlock(xdev); + + return 0; +} + +static inline u16 xsc_calc_udp_sport(u32 lqpn, u32 rqpn) +{ + unsigned char *p; + u8 ports[2]; + u16 sport; + u64 tqpn; + + tqpn = ((u64)(lqpn & 0xffffff)) * ((u64)(rqpn & 0xffffff)); + p = (unsigned char *)&tqpn; + ports[0] = p[0] ^ p[2] ^ p[4]; + ports[1] = p[1] ^ p[3] ^ p[5]; + sport = *((u16 *)ports) | 0xC000; + + return sport; +} + +static inline void xsc_path_set_udp_sport(struct xsc_qp_path *path, + const struct rdma_ah_attr *ah, + u32 lqpn, u32 rqpn) +{ + if ((ah->grh.flow_label & UDP_SPORT_MASK) != 0) { + if ((ah->grh.flow_label & UDP_SPORT_MASK_EN) == 0) + path->sport = cpu_to_be16(xsc_flow_label_to_udp_sport(ah->grh.flow_label)); + else + path->sport = cpu_to_be16((ah->grh.flow_label & UDP_SPORT_MASK) >> + UDP_SPORT_OFFSET); + } else { + path->sport = cpu_to_be16(xsc_calc_udp_sport(lqpn, rqpn)); + } +} + +static int xsc_set_path(struct xsc_ib_dev *dev, const struct rdma_ah_attr *ah, + struct xsc_qp_path *path, u8 port, int attr_mask, + u32 path_flags, const struct ib_qp_attr *attr, struct xsc_ib_qp *qp) +{ + struct ib_global_route *grh = rdma_ah_retrieve_grh((struct rdma_ah_attr *)ah); + union ib_gid *dgid = &grh->dgid; + const struct ib_gid_attr *sgid_attr = grh->sgid_attr; + union ib_gid *sgid = &((struct ib_gid_attr *)sgid_attr)->gid; + union { + struct sockaddr _sockaddr; + struct sockaddr_in _sockaddr_in; + struct sockaddr_in6 _sockaddr_in6; + } sgid_addr, dgid_addr; + int force_pcp, force_dscp; + char buf[256] = {0}; + char *ptr = buf; + int ret = 0; + + if (ah->type == RDMA_AH_ATTR_TYPE_ROCE) { + if (!(rdma_ah_get_ah_flags(ah) & IB_AH_GRH)) + return -EINVAL; + + if (qp->ibqp.qp_type == IB_QPT_RC || + qp->ibqp.qp_type == IB_QPT_UC || + qp->ibqp.qp_type == IB_QPT_XRC_INI || + qp->ibqp.qp_type == IB_QPT_XRC_TGT) + xsc_path_set_udp_sport(path, ah, qp->ibqp.qp_num, attr->dest_qp_num); + + if (sgid_attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) { + xsc_ib_err(dev, "gid type not ROCEv2\n"); + return -EINVAL; + } + + force_dscp = dev->force_dscp; + if (force_dscp == DSCP_PCP_UNSET) + path->ecn_dscp = (grh->traffic_class >> 2) & 0x3f; + else + path->ecn_dscp = force_dscp; + path->hop_limit = grh->hop_limit; + + rdma_gid2ip((struct sockaddr *)&sgid_addr, sgid); + rdma_gid2ip((struct sockaddr *)&dgid_addr, dgid); + + if (sgid_addr._sockaddr.sa_family == AF_INET && + dgid_addr._sockaddr.sa_family == AF_INET) { + memcpy(path->sip, &sgid_addr._sockaddr_in.sin_addr.s_addr, + sizeof(struct in_addr)); + memcpy(path->dip, &dgid_addr._sockaddr_in.sin_addr.s_addr, + sizeof(struct in_addr)); + path->af_type = AF_INET; + ret += snprintf(ptr + ret, 256 - ret, "sip=%#x,", + be32_to_cpu(path->sip[0])); + ret += snprintf(ptr + ret, 256 - ret, "dip=%#x,", + be32_to_cpu(path->dip[0])); + } else if (sgid_addr._sockaddr.sa_family == AF_INET6 && + dgid_addr._sockaddr.sa_family == AF_INET6) { + memcpy(path->sip, &sgid_addr._sockaddr_in6.sin6_addr.s6_addr, + sizeof(path->sip)); + memcpy(path->dip, &dgid_addr._sockaddr_in6.sin6_addr.s6_addr, + sizeof(path->dip)); + path->af_type = AF_INET6; + ret += snprintf(ptr + ret, 256 - ret, "sip=%08x%08x%08x%08x,", + be32_to_cpu(path->sip[0]), be32_to_cpu(path->sip[1]), + be32_to_cpu(path->sip[2]), be32_to_cpu(path->sip[3])); + ret += snprintf(ptr + ret, 256 - ret, "dip=%08x%08x%08x%08x,", + be32_to_cpu(path->dip[0]), be32_to_cpu(path->dip[1]), + be32_to_cpu(path->dip[2]), be32_to_cpu(path->dip[3])); + } else { + return -EINVAL; + } + + ether_addr_copy(path->smac, dev->netdev->dev_addr); + + memcpy(path->dmac, ah->roce.dmac, sizeof(ah->roce.dmac)); + ret += snprintf(ptr + ret, 256 - ret, "smac=%02x%02x%02x%02x%02x%02x,", + path->smac[0], path->smac[1], path->smac[2], + path->smac[3], path->smac[4], path->smac[5]); + ret += snprintf(ptr + ret, 256 - ret, "dmac=%02x%02x%02x%02x%02x%02x", + path->dmac[0], path->dmac[1], path->dmac[2], + path->dmac[3], path->dmac[4], path->dmac[5]); + xsc_ib_info(dev, "ib path info:%s\n", buf); + + if (is_vlan_dev(sgid_attr->ndev)) { + path->vlan_valid = 1; + path->vlan_id = cpu_to_be16(vlan_dev_vlan_id(sgid_attr->ndev)); + + force_pcp = dev->force_pcp; + if (force_pcp == DSCP_PCP_UNSET) + path->dci_cfi_prio_sl = (ah->sl & 0x7); + else + path->dci_cfi_prio_sl = force_pcp; + } else { + path->vlan_valid = 0; + } + } + xsc_ib_info(dev, "path dscp %d pcp %d\n", path->ecn_dscp, path->dci_cfi_prio_sl); + return 0; +} + +static inline u8 __xsc_get_min_qp_cnt_mac(struct xsc_lag *lag) +{ + int array_size = lag->xsc_member_cnt; + int min = atomic_read(&lag->qp_cnt[0]); + u8 mac_index = 0, i; + + for (i = 0; i < array_size; i++) { + if (atomic_read(&lag->qp_cnt[i]) < min) { + min = atomic_read(&lag->qp_cnt[i]); + mac_index = i; + } + } + + return mac_index; +} +static int __xsc_ib_modify_qp(struct ib_qp *ibqp, + const struct ib_qp_attr *attr, int attr_mask, + enum ib_qp_state cur_state, enum ib_qp_state new_state) +{ + struct xsc_ib_dev *dev = to_mdev(ibqp->device); + struct xsc_ib_qp *qp = to_xqp(ibqp); + struct xsc_ib_cq *send_cq, *recv_cq; + struct xsc_qp_context *context; + struct xsc_modify_qp_mbox_in *in; + struct xsc_qp_path path; + int sqd_event; + int err; + struct xsc_lag *lag; + u8 lag_port_num; + char buf[256] = {0}; + char *ptr = buf; + int ret = 0; + struct xsc_core_device *xdev = dev->xdev; + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + context = &qp->ctx; + + if (attr_mask & IB_QP_PATH_MTU) { + if (attr->path_mtu != IB_MTU_1024 && + attr->path_mtu != IB_MTU_4096) { + xsc_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu); + } + + context->mtu_mode = (attr->path_mtu < IB_MTU_4096) ? 0 : 1; + ret = snprintf(ptr, 256, "path_mtu=%d,", attr->path_mtu); + } + + if (attr_mask & IB_QP_DEST_QPN) { + context->remote_qpn = cpu_to_be32(attr->dest_qp_num); + ret += snprintf(ptr + ret, 256 - ret, "dest_qp_num=%d,", attr->dest_qp_num); + } + + if (attr_mask & IB_QP_AV) { + err = xsc_set_path(dev, &attr->ah_attr, &path, + attr_mask & IB_QP_PORT ? attr->port_num : qp->port, + attr_mask, 0, attr, qp); + if (err) + goto out; + + context->src_udp_port = path.sport; + context->dscp = path.ecn_dscp; + context->hop_limit = path.hop_limit; + context->ip_type = (path.af_type == AF_INET ? 0 : 1); + context->ip_type = cpu_to_be16(context->ip_type); + memcpy(context->dip, path.dip, sizeof(context->dip)); + memcpy(context->sip, path.sip, sizeof(context->sip)); + memcpy(context->dmac, path.dmac, sizeof(path.dmac)); + memcpy(context->smac, path.smac, sizeof(path.smac)); + + context->vlan_valid = path.vlan_valid; + context->dci_cfi_prio_sl = path.dci_cfi_prio_sl; + context->vlan_id = path.vlan_id; + + xsc_board_lag_lock(xdev); + if (xsc_lag_is_roce(xdev)) { + lag = xsc_get_lag(xdev); + context->lag_id = cpu_to_be16(lag->lag_id); + context->lag_sel_en = 1; + lag_port_num = lag->xsc_member_cnt; + if ((attr->ah_attr.grh.flow_label & LAG_PORT_NUM_MASK_EN) != 0) { + context->lag_sel = ((attr->ah_attr.grh.flow_label & + LAG_PORT_NUM_MASK) >> + LAG_PORT_NUM_OFFSET) % + lag_port_num; + } else { + context->lag_sel = __xsc_get_min_qp_cnt_mac(lag); + } + + if (qp->xqp.mac_id != MAC_INVALID && + context->lag_sel != qp->xqp.mac_id) + atomic_dec(&lag->qp_cnt[qp->xqp.mac_id]); + + qp->xqp.mac_id = context->lag_sel; + atomic_inc(&lag->qp_cnt[qp->xqp.mac_id]); + } + xsc_board_lag_unlock(xdev); + } + + if (attr_mask & IB_QP_RNR_RETRY) { + context->rnr_retry = attr->rnr_retry; + ret += snprintf(ptr + ret, 256 - ret, "rnr_retry=%d,", attr->rnr_retry); + } + + if (attr_mask & IB_QP_RETRY_CNT) { + context->retry_cnt = attr->retry_cnt; + ret += snprintf(ptr + ret, 256 - ret, "retry_cnt=%d,", attr->retry_cnt); + } + + if (attr_mask & IB_QP_SQ_PSN) { + context->next_send_psn = cpu_to_be32(attr->sq_psn); + ret += snprintf(ptr + ret, 256 - ret, "sq_psn=%#x,", attr->sq_psn); + } + + if (attr_mask & IB_QP_RQ_PSN) { + context->next_recv_psn = cpu_to_be32(attr->rq_psn); + ret += snprintf(ptr + ret, 256 - ret, "rq_psn=%#x,", attr->rq_psn); + } + + if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && + attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) + sqd_event = 1; + else + sqd_event = 0; + + memcpy(&in->ctx, context, sizeof(*context)); + err = xsc_core_qp_modify(xdev, to_xsc_state(cur_state), + to_xsc_state(new_state), in, sqd_event, + &qp->xqp); + if (err) { + xsc_ib_err(dev, "failed to modify qp[%d] from %s to %s\n", + qp->xqp.qpn, qp_state_to_str(cur_state), qp_state_to_str(new_state)); + goto out; + } + + qp->state = new_state; + xsc_ib_info(dev, "succeeded to modify qp[%d] from %s to %s with attr_mask=%#x, %s\n", + qp->xqp.qpn, qp_state_to_str(cur_state), qp_state_to_str(new_state), + attr_mask, buf); + + if (attr_mask & IB_QP_ACCESS_FLAGS) + qp->atomic_rd_en = attr->qp_access_flags; + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) + qp->resp_depth = attr->max_dest_rd_atomic; + if (attr_mask & IB_QP_PORT) + qp->port = attr->port_num; + if (attr_mask & IB_QP_ALT_PATH) + qp->alt_port = attr->alt_port_num; + + /* + * If we moved a kernel QP to RESET, clean up all old CQ + * entries and reinitialize the QP. + */ + if (new_state == IB_QPS_RESET && !ibqp->uobject) { + get_cqs(qp, &send_cq, &recv_cq); + xsc_ib_cq_clean(recv_cq, qp->xqp.qpn); + if (send_cq != recv_cq) + xsc_ib_cq_clean(send_cq, qp->xqp.qpn); + + qp->rq.head = 0; + qp->rq.tail = 0; + qp->sq.head = 0; + qp->sq.tail = 0; + qp->sq.cur_post = 0; + qp->sq.last_poll = 0; + } + +out: + kfree(in); + return err; +} + +int xsc_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata) +{ + struct xsc_ib_dev *dev = to_mdev(ibqp->device); + struct xsc_ib_qp *qp = to_xqp(ibqp); + enum ib_qp_state cur_state, new_state; + int err = -EINVAL; + + mutex_lock(&qp->mutex); + + cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; + new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; + + if ((attr_mask & IB_QP_PORT) && + (attr->port_num == 0 || attr->port_num > dev->xdev->caps.num_ports)) { + xsc_ib_err(dev, "error port num\n"); + goto out; + } + + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && + attr->max_rd_atomic > dev->xdev->caps.max_ra_res_qp) { + xsc_ib_err(dev, "rd atomic:%u exeeded", attr->max_rd_atomic); + goto out; + } + + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && + attr->max_dest_rd_atomic > dev->xdev->caps.max_ra_req_qp) { + xsc_ib_err(dev, "dest rd atomic:%u exeeded", attr->max_dest_rd_atomic); + goto out; + } + + if (cur_state == new_state && cur_state == IB_QPS_RESET) { + err = 0; + goto out; + } + + err = __xsc_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state); +out: + mutex_unlock(&qp->mutex); + return err; +} + +static int xsc_wq_overflow(struct xsc_ib_wq *wq, int nreq, struct xsc_ib_cq *cq) +{ + unsigned int cur; + + cur = wq->head - wq->tail; + if (likely(cur + nreq < wq->max_post)) + return 0; + + spin_lock(&cq->lock); + cur = wq->head - wq->tail; + spin_unlock(&cq->lock); + + return cur + nreq >= wq->max_post; +} + +static inline void xsc_post_send_db(struct xsc_ib_qp *qp, + struct xsc_core_device *xdev, + int nreq) +{ + u16 next_pid; + union xsc_db_data db; + + if (unlikely(!nreq)) + return; + + qp->sq.head += nreq; + + next_pid = qp->sq.head << (qp->sq.wqe_shift - XSC_BASE_WQE_SHIFT); + db.sq_next_pid = next_pid; + db.sqn = qp->doorbell_qpn; + /* + * Make sure that descriptors are written before + * updating doorbell record and ringing the doorbell + */ + wmb(); + writel(db.raw_data, REG_ADDR(xdev, xdev->regs.tx_db)); +} + +static inline u32 xsc_crc32(struct xsc_ib_dev *dev, u32 crc, u8 *buf, size_t len) +{ + u32 i; + + for (i = 0; i < len; i++) + crc = dev->crc_32_table[(crc ^ buf[i]) & 0xff] ^ (crc >> 8); + + return crc; +} + +#define BTH_QPN_MASK (0x00ffffff) +#define BTH_PSN_MASK (0x00ffffff) + +/* Compute a partial ICRC for all the IB transport headers. */ +u32 xsc_icrc_hdr(struct xsc_ib_dev *dev, void *pkt, u32 size, u32 *icrc) +{ + struct iphdr *ip4h = NULL; + struct ipv6hdr *ip6h = NULL; + struct udphdr *udph; + struct ib_unpacked_eth *eth; + struct rxe_bth *bth; + struct ib_unpacked_deth *deth; + struct ib_unpacked_vlan *vlan; + int crc; + int crc_field_len; + __be16 l3_type; + u8 *l3_start; + + int hdr_size; + + /* pseudo header buffer size is calculate using ipv6 header size since + * it is bigger than ipv4 + */ + u8 pshdr[sizeof(struct udphdr) + + sizeof(struct ipv6hdr) + + sizeof(*bth) + sizeof(*deth)]; + + eth = pkt; + + if (eth->type == htons(ETH_P_8021Q)) { + vlan = (struct ib_unpacked_vlan *)(eth + 1); + l3_type = vlan->type; + l3_start = (u8 *)(vlan + 1); + size -= 4; + } else { + l3_type = eth->type; + l3_start = (u8 *)(eth + 1); + } + + hdr_size = sizeof(struct udphdr) + + (l3_type == htons(ETH_P_IP) ? + sizeof(struct iphdr) : sizeof(struct ipv6hdr)); + + crc_field_len = hdr_size + sizeof(*bth) + sizeof(*deth); + + if (crc_field_len != size) { + xsc_ib_err(dev, "Unmatched hdr: expect %d actual %d\n", + crc_field_len, size); + return -EINVAL; + } + + ip4h = (struct iphdr *)(l3_start); + ip6h = (struct ipv6hdr *)(l3_start); + udph = (struct udphdr *)(ip4h + 1); + bth = (struct rxe_bth *)(udph + 1); + + memcpy(pshdr, l3_start, crc_field_len); + + /* This seed is the result of computing a CRC with a seed of + * 0xfffffff and 8 bytes of 0xff representing a masked LRH. + */ + crc = 0xdebb20e3; + + if (l3_type == htons(ETH_P_IP)) { /* IPv4 */ + memcpy(pshdr, ip4h, hdr_size); + ip4h = (struct iphdr *)pshdr; + udph = (struct udphdr *)(ip4h + 1); + + ip4h->ttl = 0xff; + ip4h->check = CSUM_MANGLED_0; + ip4h->tos = 0xff; + } else { /* IPv6 */ + memcpy(pshdr, ip6h, hdr_size); + ip6h = (struct ipv6hdr *)pshdr; + udph = (struct udphdr *)(ip6h + 1); + + memset(ip6h->flow_lbl, 0xff, sizeof(ip6h->flow_lbl)); + ip6h->priority = 0xf; + ip6h->hop_limit = 0xff; + } + udph->check = CSUM_MANGLED_0; + + bth = (struct rxe_bth *)(udph + 1); + /* exclude bth.resv8a */ + bth->qpn |= cpu_to_be32(~BTH_QPN_MASK); + + *icrc = xsc_crc32(dev, crc, pshdr, crc_field_len); + + return 0; +} + +/* Routine for sending QP1 packets for RoCE V1 an V2 + */ + // TO BE DONE: sq hdr buf should be create dynamically for mult entry +int build_qp1_send_v2(struct xsc_ib_dev *dev, + struct xsc_ib_qp *qp, + const struct ib_send_wr *wr, + struct ib_sge *sge, + int payload_size, u32 *crc) +{ + struct xsc_ib_ah *ah = container_of(ud_wr((struct ib_send_wr *)wr)->ah, struct xsc_ib_ah, + ibah); + const struct ib_gid_attr *sgid_attr = ah->ibah.sgid_attr; + u16 ether_type; + union ib_gid dgid; + bool is_eth = false; + bool is_vlan = false; + bool is_grh = false; + bool is_udp = false; + u8 ip_version = 0; + u16 vlan_id = 0xFFFF; + int rc = 0; + int cm_pcp = 0; + void *hdr_buf; + + memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr)); + + if (!qp->sq.hdr_buf) { + xsc_ib_err(dev, "QP1 buffer is empty!"); + return -ENOMEM; + } + hdr_buf = (u8 *)qp->sq.hdr_buf + MAX_QP1_SQ_HDR_SIZE_V2 * qp->sq.mad_index; + + if (!sgid_attr || !sgid_attr->ndev) { + xsc_ib_err(dev, "sgid_addr or ndev is null\n"); + return -ENXIO; + } + + if (is_vlan_dev(sgid_attr->ndev)) + vlan_id = vlan_dev_vlan_id(sgid_attr->ndev); + + is_udp = sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP; + memcpy(&dgid.raw, &ah->av.rgid, 16); + if (is_udp) { + if (ipv6_addr_v4mapped((struct in6_addr *)&sgid_attr->gid)) { + ip_version = 4; + ether_type = ETH_P_IP; + } else { + ip_version = 6; + ether_type = ETH_P_IPV6; + } + is_grh = false; + } else { + ether_type = ETH_P_IBOE; + is_grh = true; + } + + is_eth = true; + is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false; + + ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh, + ip_version, is_udp, 0, &qp->qp1_hdr); + + /* ETH */ + ether_addr_copy(qp->qp1_hdr.eth.dmac_h, ah->av.rmac); + ether_addr_copy(qp->qp1_hdr.eth.smac_h, dev->netdev->dev_addr); + + /* For vlan, check the sgid for vlan existence */ + if (!is_vlan) { + qp->qp1_hdr.eth.type = cpu_to_be16(ether_type); + } else { + if (dev->cm_pcp != DSCP_PCP_UNSET) + cm_pcp = dev->cm_pcp << 13; + else + cm_pcp = (iboe_tos_to_sl(sgid_attr->ndev, ah->av.tclass) << 13); + qp->qp1_hdr.vlan.type = cpu_to_be16(ether_type); + qp->qp1_hdr.vlan.tag = cpu_to_be16(vlan_id | cm_pcp); + } + +#define ECN_CAPABLE_TRANSPORT 0x2 + if (is_grh || ip_version == 6) { + memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid_attr->gid.raw, + sizeof(sgid_attr->gid)); + memcpy(qp->qp1_hdr.grh.destination_gid.raw, ah->av.rgid, + sizeof(ah->av.rgid)); + qp->qp1_hdr.grh.hop_limit = ah->av.hop_limit; + + if (dev->cm_dscp != DSCP_PCP_UNSET) + qp->qp1_hdr.grh.traffic_class = (dev->cm_dscp << 2) | ECN_CAPABLE_TRANSPORT; + else + qp->qp1_hdr.grh.traffic_class = ECN_CAPABLE_TRANSPORT; + } + + if (ip_version == 4) { + if (dev->cm_dscp != DSCP_PCP_UNSET) + qp->qp1_hdr.ip4.tos = (dev->cm_dscp << 2) | ECN_CAPABLE_TRANSPORT; + else + qp->qp1_hdr.ip4.tos = ECN_CAPABLE_TRANSPORT; + qp->qp1_hdr.ip4.id = 0; + qp->qp1_hdr.ip4.frag_off = htons(IP_DF); + qp->qp1_hdr.ip4.ttl = ah->av.hop_limit; + + memcpy(&qp->qp1_hdr.ip4.saddr, sgid_attr->gid.raw + 12, 4); + memcpy(&qp->qp1_hdr.ip4.daddr, ah->av.rgid + 12, 4); + qp->qp1_hdr.ip4.check = ib_ud_ip4_csum(&qp->qp1_hdr); + } + + if (is_udp) { + qp->qp1_hdr.udp.dport = htons(ROCE_V2_UDP_DPORT); + qp->qp1_hdr.udp.sport = htons(ah->av.udp_sport); + qp->qp1_hdr.udp.csum = 0; + xsc_ib_dbg(dev, "CM packet used udp_sport=%d\n", ah->av.udp_sport); + } + + /* BTH */ + if (wr->opcode == IB_WR_SEND_WITH_IMM) { + qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; + qp->qp1_hdr.immediate_present = 1; + } else { + qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY; + } + if (wr->send_flags & IB_SEND_SOLICITED) + qp->qp1_hdr.bth.solicited_event = 1; + /* pad_count */ + qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3; + + /* P_key for QP1 is for all members */ + qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF); + qp->qp1_hdr.bth.destination_qpn = IB_QP1; + qp->qp1_hdr.bth.ack_req = 0; + qp->send_psn++; + qp->send_psn &= BTH_PSN_MASK; + qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn); + /* DETH */ + /* Use the priviledged Q_Key for QP1 */ + qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY); + qp->qp1_hdr.deth.source_qpn = IB_QP1; + + /* Pack the QP1 to the transmit buffer */ + sge->addr = (dma_addr_t)(qp->sq.hdr_dma + MAX_QP1_SQ_HDR_SIZE_V2 * qp->sq.mad_index); + sge->lkey = 0xFFFFFFFF; + sge->length = MAX_QP1_SQ_HDR_SIZE; + + ib_ud_header_pack(&qp->qp1_hdr, hdr_buf); + /* + * Max Header buf size for IPV6 RoCE V2 is 86, + * which is same as the QP1 SQ header buffer. + * Header buf size for IPV4 RoCE V2 can be 66. + * ETH(14) + VLAN(4)+ IP(20) + UDP (8) + BTH(20). + * Subtract 20 bytes from QP1 SQ header buf size + */ + if (is_udp && ip_version == 4) + sge->length -= 20; + /* + * Max Header buf size for RoCE V1 is 78. + * ETH(14) + VLAN(4) + GRH(40) + BTH(20). + * Subtract 8 bytes from QP1 SQ header buf size + */ + if (!is_udp) + sge->length -= 8; + + /* Subtract 4 bytes for non vlan packets */ + if (!is_vlan) + sge->length -= 4; + + rc = xsc_icrc_hdr(dev, hdr_buf, sge->length - sizeof(struct ib_unpacked_eth), crc); + if (rc) { + xsc_ib_err(dev, "CRC error: hdr size %ld\n", + sge->length - sizeof(struct ib_unpacked_eth)); + } + return rc; +} + +static void zero_send_ds(struct xsc_ib_qp *qp, int idx) +{ + void *seg; + int i; + int ds_num; + u64 *p; + + ds_num = XSC_SEND_SEG_NUM << (qp->sq.wqe_shift - XSC_SEND_WQE_SHIFT); + seg = (void *)xsc_get_send_wqe(qp, idx); + for (i = 1; i < ds_num; i++) { + p = get_seg_wqe(seg, i); + p[0] = 0; + p[1] = 0; + } +} + +int xsc_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, + const struct ib_send_wr **bad_wr) +{ + struct xsc_ib_qp *qp = to_xqp(ibqp); + struct xsc_ib_dev *dev = to_mdev(ibqp->device); + void *seg; + struct xsc_send_wqe_ctrl_seg *ctrl; + struct xsc_wqe_data_seg *data_seg; + u32 crc; + int nreq; + int err = 0; + int i; + unsigned int idx; + unsigned long irqflag = 0; + struct ib_sge sg; + u8 *cur_p = NULL; + u8 *mad_send_base = NULL; + struct ib_wc wc; + void *vaddr; + int sig = 0; + + if (wr->opcode == IB_WR_LOCAL_INV) { + wc.status = IB_WC_SUCCESS; + wc.wr_cqe = wr->wr_cqe; + wc.qp = ibqp; + sig = qp->sq_signal_bits == XSC_WQE_CTRL_CQ_UPDATE ? + 1 : wr->send_flags & IB_SEND_SIGNALED; + if (xsc_wr_invalidate_mr(dev, wr)) + wc.status = IB_WC_GENERAL_ERR; + + if (wr->wr_cqe && wr->wr_cqe->done && sig) + wr->wr_cqe->done(qp->send_cq, &wc); + wr = wr->next; + if (!wr) + return 0; + } + + if (wr->opcode == IB_WR_REG_MR) { + wc.status = IB_WC_SUCCESS; + wc.qp = ibqp; + sig = qp->sq_signal_bits == XSC_WQE_CTRL_CQ_UPDATE ? + 1 : wr->send_flags & IB_SEND_SIGNALED; + if (xsc_wr_reg_mr(dev, wr)) + wc.status = IB_WC_GENERAL_ERR; + if (wr->wr_cqe && wr->wr_cqe->done && sig) + wr->wr_cqe->done(qp->send_cq, &wc); + } + + spin_lock_irqsave(&qp->sq.lock, irqflag); + + for (nreq = 0; wr; ++nreq, wr = wr->next) { + unsigned int seg_index = 1; + unsigned int msg_len = 0; + struct ib_sge *sgl = &wr->sg_list[0]; + int sg_n = wr->num_sge; + + if (unlikely(wr->opcode < 0 || wr->opcode >= ARRAY_SIZE(xsc_ib_opcode))) { + xsc_ib_err(dev, "bad opcode %d\n", wr->opcode); + err = EINVAL; + *bad_wr = wr; + goto out; + } + + if (unlikely(xsc_wq_overflow(&qp->sq, nreq, + to_xcq(qp->ibqp.send_cq)))) { + xsc_ib_err(dev, "send work queue overflow\n"); + err = ENOMEM; + *bad_wr = wr; + goto out; + } + + if (unlikely(wr->num_sge > qp->sq.max_gs)) { + xsc_ib_err(dev, "max gs exceeded %d (max = %d)\n", + wr->num_sge, qp->sq.max_gs); + err = ENOMEM; + *bad_wr = wr; + goto out; + } + + if (unlikely(wr->opcode == IB_WR_RDMA_READ && wr->num_sge > 1)) { + xsc_ib_err(dev, "rdma read, max gs exceeded %d (max = 1)\n", + wr->num_sge); + err = ENOMEM; + *bad_wr = wr; + goto out; + } + + idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); + zero_send_ds(qp, idx); + seg = xsc_get_send_wqe(qp, idx); + ctrl = seg; + ctrl->wqe_id = cpu_to_le16(qp->sq.cur_post << + (qp->sq.wqe_shift - XSC_BASE_WQE_SHIFT)); + ctrl->ds_data_num = 0; + ctrl->se = wr->send_flags & IB_SEND_SOLICITED ? 1 : 0; + ctrl->ce = wr->send_flags & IB_SEND_SIGNALED ? 1 : 0; + for (i = 0; i < wr->num_sge; ++i) { + if (likely(wr->sg_list[i].length)) + msg_len += wr->sg_list[i].length; + } + ctrl->msg_len = msg_len; + ctrl->with_immdt = 0; + + if (unlikely(wr->opcode == IB_WR_RDMA_READ && msg_len == 0)) { + xsc_ib_err(dev, "rdma read, msg len should not be 0\n"); + /* workaround, return success for posting zero-length read */ + err = 0; + goto out; + } + switch (ibqp->qp_type) { + case IB_QPT_RC: + ctrl->ds_data_num = wr->num_sge; + switch (wr->opcode) { + case IB_WR_SEND_WITH_INV: + case IB_WR_SEND: + break; + case IB_WR_SEND_WITH_IMM: + ctrl->with_immdt = 1; + ctrl->opcode_data = send_ieth(wr); + break; + case IB_WR_RDMA_WRITE_WITH_IMM: + ctrl->with_immdt = 1; + ctrl->opcode_data = send_ieth(wr); + case IB_WR_RDMA_READ: + case IB_WR_RDMA_WRITE: + ctrl->with_immdt = 0; + ctrl->ds_data_num++; + data_seg = get_seg_wqe(ctrl, seg_index); + set_remote_addr_seg(data_seg, + msg_len, + rdma_wr(wr)->remote_addr, + rdma_wr(wr)->rkey); + seg_index++; + break; + case IB_WR_REG_MR: + break; + default: + xsc_ib_err(dev, "debug: opcode:%u NOT supported\n", wr->opcode); + err = EPERM; + *bad_wr = wr; + goto out; + } + ctrl->msg_opcode = xsc_ib_opcode[wr->opcode]; + break; + case IB_QPT_UD: + case IB_QPT_GSI: + ctrl->msg_opcode = XSC_MSG_OPCODE_MAD; + ctrl->ds_data_num++; + data_seg = get_seg_wqe(ctrl, seg_index); + mad_send_base = (u8 *)qp->sq.hdr_buf + + MAX_QP1_SQ_HDR_SIZE_V2 * qp->sq.mad_index; + + err = build_qp1_send_v2(dev, qp, wr, &sg, msg_len, &crc); + if (err) { + *bad_wr = wr; + goto out; + } + + cur_p = mad_send_base + sg.length; + for (i = 0; i < wr->num_sge; ++i) { + if (likely(wr->sg_list[i].length)) { + vaddr = xsc_ib_send_mad_sg_virt_addr(&dev->ib_dev, wr, i); + memcpy(cur_p, vaddr, wr->sg_list[i].length); + } + cur_p += wr->sg_list[i].length; + } + crc = xsc_crc32(dev, crc, mad_send_base + sg.length, ctrl->msg_len); + ctrl->msg_len += sg.length; + seg_index++; + + *(u32 *)&mad_send_base[ctrl->msg_len] = ~crc; + ctrl->msg_len += sizeof(crc); + sg.length = ctrl->msg_len; + set_local_data_seg(data_seg, &sg); + xsc_ib_info(dev, "qp[%d] send MAD packet, msg_len:%d\n", + qp->xqp.qpn, ctrl->msg_len); + qp->sq.mad_index = (qp->sq.mad_index + 1) % MAD_QUEUE_DEPTH; + + sg_n = 0; + break; + default: + xsc_ib_err(dev, "qp type:%u NOT supported\n", ibqp->qp_type); + err = EPERM; + *bad_wr = wr; + goto out; + } + + if (wr->opcode == IB_WR_REG_MR) { + nreq--; + continue; + } + + if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) { + err = set_data_inl_seg(qp, wr, ctrl); + if (unlikely(err)) { + *bad_wr = wr; + xsc_ib_err(dev, "inline layout failed, err %d\n", err); + goto out; + } + } else { + for (i = 0; i < sg_n; ++i, ++seg_index) { + if (likely(sgl[i].length)) { + data_seg = get_seg_wqe(ctrl, seg_index); + set_local_data_seg(data_seg, &sgl[i]); + } + } + } + qp->sq.wrid[idx] = wr->wr_id; + qp->sq.wqe_head[idx] = qp->sq.head + nreq; + qp->sq.cur_post += 1; + } +out: + xsc_ib_dbg(dev, "nreq:%d\n", nreq); + xsc_post_send_db(qp, dev->xdev, nreq); + spin_unlock_irqrestore(&qp->sq.lock, irqflag); + + return err; +} + +int xsc_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, + const struct ib_recv_wr **bad_wr) +{ + struct xsc_ib_qp *qp = to_xqp(ibqp); + struct xsc_ib_dev *dev = to_mdev(ibqp->device); + struct xsc_core_device *xdev = dev->xdev; + struct xsc_wqe_data_seg *recv_head; + struct xsc_wqe_data_seg *data_seg; + unsigned long flags; + int err = 0; + u16 next_pid = 0; + union xsc_db_data db; + int nreq; + u16 idx; + int i; + + spin_lock_irqsave(&qp->rq.lock, flags); + + idx = qp->rq.head & (qp->rq.wqe_cnt - 1); + + for (nreq = 0; wr; ++nreq, wr = wr->next) { + if (unlikely(xsc_wq_overflow(&qp->rq, nreq, to_xcq(qp->ibqp.recv_cq)))) { + xsc_ib_err(dev, "recv work queue overflow\n"); + err = ENOMEM; + *bad_wr = wr; + goto out; + } + + if (unlikely(wr->num_sge > qp->rq.max_gs)) { + xsc_ib_err(dev, "max gs exceeded %d (max = %d)\n", + wr->num_sge, qp->rq.max_gs); + err = EINVAL; + *bad_wr = wr; + goto out; + } + + recv_head = get_recv_wqe(qp, idx); + + for (i = 0; i < wr->num_sge; ++i) { + if (unlikely(!wr->sg_list[i].length)) + continue; + data_seg = get_seg_wqe(recv_head, i); + data_seg->in_line = 0; + WR_LE_64(data_seg->va, wr->sg_list[i].addr); + WR_LE_32(data_seg->mkey, wr->sg_list[i].lkey); + if (is_qp1(qp->xqp.qp_type)) + WR_LE_32(data_seg->seg_len, xdev->caps.rx_pkt_len_max); + else + WR_LE_32(data_seg->seg_len, wr->sg_list[i].length); + } + + qp->rq.wrid[idx] = wr->wr_id; + + idx = (idx + 1) & (qp->rq.wqe_cnt - 1); + } + +out: + if (likely(nreq)) { + qp->rq.head += nreq; + next_pid = qp->rq.head << (qp->rq.wqe_shift - XSC_BASE_WQE_SHIFT); + db.rq_next_pid = next_pid; + db.rqn = qp->doorbell_qpn; + + /* + * Make sure that descriptors are written before + * doorbell record. + */ + wmb(); + + writel(db.raw_data, REG_ADDR(xdev, xdev->regs.rx_db)); + } + + spin_unlock_irqrestore(&qp->rq.lock, flags); + + return err; +} + +static inline enum ib_qp_state to_ib_qp_state(enum xsc_qp_state xsc_state) +{ + switch (xsc_state) { + case XSC_QP_STATE_RST: return IB_QPS_RESET; + case XSC_QP_STATE_INIT: return IB_QPS_INIT; + case XSC_QP_STATE_RTR: return IB_QPS_RTR; + case XSC_QP_STATE_RTS: return IB_QPS_RTS; + case XSC_QP_STATE_SQ_DRAINING: + case XSC_QP_STATE_SQD: return IB_QPS_SQD; + case XSC_QP_STATE_SQER: return IB_QPS_SQE; + case XSC_QP_STATE_ERR: return IB_QPS_ERR; + default: return -1; + } +} + +static inline enum ib_mig_state to_ib_mig_state(int xsc_mig_state) +{ + switch (xsc_mig_state) { + case XSC_QP_PM_ARMED: return IB_MIG_ARMED; + case XSC_QP_PM_REARM: return IB_MIG_REARM; + case XSC_QP_PM_MIGRATED: return IB_MIG_MIGRATED; + default: return -1; + } +} + +int xsc_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, + struct ib_qp_init_attr *qp_init_attr) +{ + struct xsc_ib_dev *dev = to_mdev(ibqp->device); + struct xsc_ib_qp *qp = to_xqp(ibqp); + struct xsc_query_qp_mbox_out *outb; + struct xsc_qp_context *context; + int xsc_state; + int err = 0; + + mutex_lock(&qp->mutex); + outb = kzalloc(sizeof(*outb), GFP_KERNEL); + if (!outb) { + err = -ENOMEM; + goto out; + } + context = &outb->ctx; + err = xsc_core_qp_query(dev->xdev, &qp->xqp, outb, sizeof(*outb)); + if (err) + goto out_free; + + qp_attr->qp_state = qp->state; + qp_attr->path_mtu = context->mtu_mode ? IB_MTU_4096 : IB_MTU_1024; + qp_attr->rq_psn = be32_to_cpu(context->next_recv_psn) & 0xffffff; + qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff; + qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff; + qp_attr->sq_draining = xsc_state == XSC_QP_STATE_SQ_DRAINING; + qp_attr->retry_cnt = context->retry_cnt; + qp_attr->rnr_retry = context->rnr_retry; + qp_attr->cur_qp_state = qp_attr->qp_state; + qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; + qp_attr->cap.max_recv_sge = qp->rq.max_gs; + + if (!ibqp->uobject) { + qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; + qp_attr->cap.max_send_sge = qp->sq.max_gs; + } else { + qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; + qp_attr->cap.max_send_sge = qp->sq.max_gs; + } + + /* We don't support inline sends for kernel QPs (yet), and we + * don't know what userspace's value should be. + */ + qp_attr->cap.max_inline_data = 0; + + qp_init_attr->cap = qp_attr->cap; + + qp_init_attr->create_flags = 0; + if (qp->flags & XSC_IB_QP_BLOCK_MULTICAST_LOOPBACK) + qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK; + + qp_init_attr->sq_sig_type = qp->sq_signal_bits & XSC_WQE_CTRL_CQ_UPDATE ? + IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; + +out_free: + kfree(outb); + +out: + mutex_unlock(&qp->mutex); + return err; +} + +void xsc_ib_drain_rq(struct ib_qp *qp __maybe_unused) +{ +} + +void xsc_ib_drain_sq(struct ib_qp *qp __maybe_unused) +{ +} diff --git a/drivers/infiniband/hw/xsc/rtt.c b/drivers/infiniband/hw/xsc/rtt.c new file mode 100644 index 000000000000..e7a68f1ab41a --- /dev/null +++ b/drivers/infiniband/hw/xsc/rtt.c @@ -0,0 +1,412 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_hsi.h" +#include "common/driver.h" +#include "common/xsc_cmd.h" + +struct xsc_rtt_interface { + struct xsc_core_device *xdev; + struct kobject kobj; +}; + +struct xsc_rtt_attributes { + struct attribute attr; + ssize_t (*show)(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + char *buf); + ssize_t (*store)(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + const char *buf, size_t count); +}; + +static ssize_t enable_show(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + char *buf) +{ + int err; + struct xsc_inbox_hdr in; + struct xsc_rtt_en_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.opcode = __cpu_to_be16(XSC_CMD_OP_GET_RTT_EN); + err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_inbox_hdr), + (void *)&out, sizeof(struct xsc_rtt_en_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(g->xdev, "Failed to get rtt en, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + return sprintf(buf, "%u\n", out.en); +} + +static ssize_t enable_store(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + const char *buf, size_t count) +{ + int err; + u16 rtt_enable; + struct xsc_rtt_en_mbox_in in; + struct xsc_rtt_en_mbox_out out; + + err = kstrtou16(buf, 0, &rtt_enable); + if (err != 0) + return -EINVAL; + + if (rtt_enable > 1) { + xsc_core_err(g->xdev, "Failed to set rtt en, rtt_enable(%u) out of range[0,1]\n", + rtt_enable); + return -EINVAL; + } + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_SET_RTT_EN); + in.en = rtt_enable; + + err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_rtt_en_mbox_in), + (void *)&out, sizeof(struct xsc_rtt_en_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(g->xdev, "Failed to set rtt en, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + return count; +} + +static ssize_t qpn_show(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + char *buf) +{ + int err, i; + u32 count = 0; + struct xsc_inbox_hdr in; + struct xsc_get_rtt_qpn_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.opcode = __cpu_to_be16(XSC_CMD_OP_GET_RTT_QPN); + err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_inbox_hdr), + (void *)&out, sizeof(struct xsc_get_rtt_qpn_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(g->xdev, "Failed to get rtt qpn, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + for (i = 0; i < (XSC_RTT_CFG_QPN_MAX - 1); i++) + count += sprintf(&buf[count], "%hu,", __be32_to_cpu(out.qpn[i])); + + count += sprintf(&buf[count], "%hu\n", __be32_to_cpu(out.qpn[i])); + + return count; +} + +#define RTT_CFG_QPN_FORMAT "%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u," \ +"%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u" + +static ssize_t qpn_store(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + const char *buf, size_t count) +{ + int err, i, num; + struct xsc_rtt_qpn_mbox_in in; + struct xsc_rtt_qpn_mbox_out out; + u32 *ptr = in.qpn; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + num = sscanf(buf, RTT_CFG_QPN_FORMAT, &ptr[0], &ptr[1], &ptr[2], &ptr[3], &ptr[4], + &ptr[5], &ptr[6], &ptr[7], &ptr[8], &ptr[9], &ptr[10], &ptr[11], &ptr[12], + &ptr[13], &ptr[14], &ptr[15], &ptr[16], &ptr[17], &ptr[18], &ptr[19], + &ptr[20], &ptr[21], &ptr[22], &ptr[23], &ptr[24], &ptr[25], &ptr[26], + &ptr[27], &ptr[28], &ptr[29], &ptr[30], &ptr[31]); + if (num == 0) + return -EINVAL; + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_SET_RTT_QPN); + + for (i = 0 ; i < XSC_RTT_CFG_QPN_MAX; i++) + in.qpn[i] = __cpu_to_be32(ptr[i]); + + err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_rtt_qpn_mbox_in), + (void *)&out, sizeof(struct xsc_rtt_qpn_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(g->xdev, "Failed to set rtt qpn, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + return count; +} + +static ssize_t period_show(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + char *buf) +{ + int err; + struct xsc_inbox_hdr in; + struct xsc_rtt_period_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.opcode = __cpu_to_be16(XSC_CMD_OP_GET_RTT_PERIOD); + err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_inbox_hdr), + (void *)&out, sizeof(struct xsc_rtt_period_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(g->xdev, "Failed to get rtt period, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + return sprintf(buf, "%u\n", __be32_to_cpu(out.period)); +} + +#define RTT_CFG_PERIOD_MAX 10000 //ms, 10s +#define RTT_CFG_PERIOD_MIN 1000 //ms, 1s +static ssize_t period_store(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + const char *buf, size_t count) +{ + int err; + u32 rtt_period; + struct xsc_rtt_period_mbox_in in; + struct xsc_rtt_period_mbox_out out; + + err = kstrtouint(buf, 0, &rtt_period); + if (err != 0) + return -EINVAL; + + if (rtt_period > RTT_CFG_PERIOD_MAX || rtt_period < RTT_CFG_PERIOD_MIN) + return -EINVAL; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_SET_RTT_PERIOD); + in.period = __cpu_to_be32(rtt_period); + + err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_rtt_period_mbox_in), + (void *)&out, sizeof(struct xsc_rtt_period_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(g->xdev, "Failed to set rtt period, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + return count; +} + +static ssize_t result_show(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + char *buf) +{ + int i, err; + u32 count = 0; + struct xsc_inbox_hdr in; + struct xsc_rtt_result_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.opcode = __cpu_to_be16(XSC_CMD_OP_GET_RTT_RESULT); + + err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_inbox_hdr), + (void *)&out, sizeof(struct xsc_rtt_result_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(g->xdev, "Failed to get rtt result, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + for (i = 0; i < (XSC_RTT_CFG_QPN_MAX - 1); i++) + count += sprintf(&buf[count], "%lld,", __be64_to_cpu(out.result[i])); + + count += sprintf(&buf[count], "%lld\n", __be64_to_cpu(out.result[i])); + + return count; +} + +static ssize_t result_store(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + const char *buf, size_t count) +{ + return -EOPNOTSUPP; +} + +static ssize_t stats_show(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + char *buf) +{ + int err; + u32 count = 0; + struct xsc_inbox_hdr in; + struct xsc_rtt_stats_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.opcode = __cpu_to_be16(XSC_CMD_OP_GET_RTT_STATS); + + err = xsc_cmd_exec(g->xdev, (void *)&in, sizeof(struct xsc_inbox_hdr), + (void *)&out, sizeof(struct xsc_rtt_stats_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(g->xdev, "Failed to get rtt stats, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + count += sprintf(&buf[count], "rtt_succ_snd_req_cnt %llu\n", + __be64_to_cpu(out.stats.rtt_succ_snd_req_cnt)); + count += sprintf(&buf[count], "rtt_succ_snd_rsp_cnt %llu\n", + __be64_to_cpu(out.stats.rtt_succ_snd_rsp_cnt)); + count += sprintf(&buf[count], "rtt_fail_snd_req_cnt %llu\n", + __be64_to_cpu(out.stats.rtt_fail_snd_req_cnt)); + count += sprintf(&buf[count], "rtt_fail_snd_rsp_cnt %llu\n", + __be64_to_cpu(out.stats.rtt_fail_snd_rsp_cnt)); + count += sprintf(&buf[count], "rtt_rcv_req_cnt %llu\n", + __be64_to_cpu(out.stats.rtt_rcv_req_cnt)); + count += sprintf(&buf[count], "rtt_rcv_rsp_cnt %llu\n", + __be64_to_cpu(out.stats.rtt_rcv_rsp_cnt)); + count += sprintf(&buf[count], "rtt_rcv_unk_cnt %llu\n", + __be64_to_cpu(out.stats.rtt_rcv_unk_cnt)); + count += sprintf(&buf[count], "rtt_grp_invalid_cnt %llu\n", + __be64_to_cpu(out.stats.rtt_grp_invalid_cnt)); + + return count; +} + +static ssize_t stats_store(struct xsc_rtt_interface *g, struct xsc_rtt_attributes *a, + const char *buf, size_t count) +{ + return -EOPNOTSUPP; +} + +#define RTT_ATTR(_name) struct xsc_rtt_attributes xsc_rtt_attr_##_name = \ + __ATTR(rtt_probe_##_name, 0644, _name##_show, _name##_store) + +RTT_ATTR(enable); +RTT_ATTR(qpn); +RTT_ATTR(period); +RTT_ATTR(result); +RTT_ATTR(stats); + +static ssize_t rtt_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct xsc_rtt_attributes *ga = + container_of(attr, struct xsc_rtt_attributes, attr); + struct xsc_rtt_interface *g = container_of(kobj, struct xsc_rtt_interface, kobj); + + if (!ga->show) + return -EIO; + + return ga->show(g, ga, buf); +} + +static ssize_t rtt_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, size_t size) +{ + struct xsc_rtt_attributes *ga = + container_of(attr, struct xsc_rtt_attributes, attr); + struct xsc_rtt_interface *g = container_of(kobj, struct xsc_rtt_interface, kobj); + + if (!ga->store) + return -EIO; + + return ga->store(g, ga, buf, size); +} + +static const struct sysfs_ops rtt_sysfs_ops = { + .show = rtt_attr_show, + .store = rtt_attr_store, +}; + +static struct attribute *rtt_attrs[] = { + &xsc_rtt_attr_enable.attr, + &xsc_rtt_attr_qpn.attr, + &xsc_rtt_attr_period.attr, + &xsc_rtt_attr_result.attr, + &xsc_rtt_attr_stats.attr, + NULL +}; + +ATTRIBUTE_GROUPS(rtt); + +static const struct kobj_type rtt_ktype = { + .sysfs_ops = &rtt_sysfs_ops, + .default_groups = rtt_groups, +}; + +int xsc_rtt_sysfs_init(struct ib_device *ib_dev, struct xsc_core_device *xdev) +{ + struct xsc_rtt_interface *tmp; + int err; + + if (!xdev || !xsc_core_is_pf(xdev) || xdev->pf_id != 0) + return -EACCES; + + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + err = kobject_init_and_add(&tmp->kobj, &rtt_ktype, + &ib_dev->dev.kobj, "rtt"); + if (err) + goto rtt_attr_err; + + xdev->rtt_priv = tmp; + tmp->xdev = xdev; + return 0; + +rtt_attr_err: + kobject_put(&tmp->kobj); + kfree(tmp); + return err; +} + +void xsc_rtt_sysfs_fini(struct xsc_core_device *xdev) +{ + int err; + struct xsc_rtt_en_mbox_in in; + struct xsc_rtt_en_mbox_out out; + struct xsc_rtt_period_mbox_in period_in; + struct xsc_rtt_period_mbox_out period_out; + struct xsc_rtt_interface *rtt; + + if (!xdev || !xdev->rtt_priv) + return; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_SET_RTT_EN); + in.en = 0; + + err = xsc_cmd_exec(xdev, (void *)&in, sizeof(struct xsc_rtt_en_mbox_in), + (void *)&out, sizeof(struct xsc_rtt_en_mbox_out)); + if (err || out.hdr.status) + xsc_core_err(xdev, "Failed to set rtt disable, err(%u), status(%u)\n", + err, out.hdr.status); + + memset(&period_in, 0, sizeof(period_in)); + memset(&period_out, 0, sizeof(period_out)); + + period_in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_SET_RTT_PERIOD); + period_in.period = __cpu_to_be32(RTT_CFG_PERIOD_MAX); + + err = xsc_cmd_exec(xdev, (void *)&period_in, sizeof(struct xsc_rtt_period_mbox_in), + (void *)&period_out, sizeof(struct xsc_rtt_period_mbox_out)); + if (err || period_out.hdr.status) + xsc_core_err(xdev, "Failed to set rtt period default, err(%u), status(%u)\n", + err, out.hdr.status); + + rtt = xdev->rtt_priv; + kobject_put(&rtt->kobj); + kfree(rtt); + xdev->rtt_priv = NULL; +} diff --git a/drivers/infiniband/hw/xsc/user.h b/drivers/infiniband/hw/xsc/user.h new file mode 100644 index 000000000000..6e2b6ff542ae --- /dev/null +++ b/drivers/infiniband/hw/xsc/user.h @@ -0,0 +1,277 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_IB_USER_H +#define XSC_IB_USER_H + +#include +#include /* For ETH_ALEN. */ +#include + +enum xsc_ib_devx_methods { + XSC_IB_METHOD_DEVX_OTHER = (1U << UVERBS_ID_NS_SHIFT), + XSC_IB_METHOD_DEVX_QUERY_UAR, + XSC_IB_METHOD_DEVX_QUERY_EQN, +}; + +enum xsc_ib_devx_other_attrs { + XSC_IB_ATTR_DEVX_OTHER_CMD_IN = (1U << UVERBS_ID_NS_SHIFT), + XSC_IB_ATTR_DEVX_OTHER_CMD_OUT, +}; + +enum xsc_ib_objects { + XSC_IB_OBJECT_DEVX = (1U << UVERBS_ID_NS_SHIFT), + XSC_IB_OBJECT_DEVX_OBJ, + XSC_IB_OBJECT_DEVX_UMEM, + XSC_IB_OBJECT_FLOW_MATCHER, +}; + +/* Increment this value if any changes that break userspace ABI + * compatibility are made. + */ +#define XSC_IB_UVERBS_ABI_VERSION 1 + +/* Make sure that all structs defined in this file remain laid out so + * that they pack the same way on 32-bit and 64-bit architectures (to + * avoid incompatibility between 32-bit userspace and 64-bit kernels). + * In particular do not use pointer types -- pass pointers in __u64 + * instead. + */ + +enum { + XSC_QP_FLAG_SIGNATURE = 1 << 0, + XSC_QP_FLAG_SCATTER_CQE = 1 << 1, + XSC_QP_FLAG_TUNNEL_OFFLOADS = 1 << 2, + XSC_QP_FLAG_BFREG_INDEX = 1 << 3, + XSC_QP_FLAG_TYPE_DCT = 1 << 4, + XSC_QP_FLAG_TYPE_DCI = 1 << 5, + XSC_QP_FLAG_TIR_ALLOW_SELF_LB_UC = 1 << 6, + XSC_QP_FLAG_TIR_ALLOW_SELF_LB_MC = 1 << 7, + XSC_QP_FLAG_ALLOW_SCATTER_CQE = 1 << 8, + XSC_QP_FLAG_RAWPACKET_TSO = 1 << 9, + XSC_QP_FLAG_RAWPACKET_TX = 1 << 10, +}; + +struct xsc_ib_alloc_ucontext_req { + __u32 rsvd0; + __u32 rsvd1; +}; + +enum xsc_user_cmds_supp_uhw { + XSC_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0, + XSC_USER_CMDS_SUPP_UHW_CREATE_AH = 1 << 1, +}; + +struct xsc_ib_alloc_ucontext_resp { + __u32 qp_tab_size; + __u32 cache_line_size; + __u16 max_sq_desc_sz; + __u16 max_rq_desc_sz; + __u32 max_send_wqebb; + __u32 max_recv_wr; + __u16 num_ports; + __u16 reserved; + __u64 qpm_tx_db; + __u64 qpm_rx_db; + __u64 cqm_next_cid_reg; + __u64 cqm_armdb; + __u32 send_ds_num; + __u32 recv_ds_num; + __u32 cmds_supp_uhw; +}; + +struct xsc_ib_create_qp { + __u64 buf_addr; + __u64 db_addr; + __u32 sq_wqe_count; + __u32 rq_wqe_count; + __u32 rq_wqe_shift; + __u32 flags; +}; + +struct xsc_ib_create_qp_resp { + __u32 uuar_index; + __u32 reserved; +}; + +struct xsc_ib_create_cq { + __u64 buf_addr; + __u64 db_addr; + __u32 cqe_size; +}; + +struct xsc_ib_create_cq_resp { + __u32 cqn; + __u32 reserved; +}; + +struct xsc_ib_create_ah_resp { + __u32 response_length; + __u8 dmac[ETH_ALEN]; + __u8 reserved[6]; +}; + +struct xsc_ib_alloc_pd_resp { + __u32 pdn; +}; + +struct xsc_ib_tso_caps { + __u32 max_tso; /* Maximum tso payload size in bytes */ + + /* Corresponding bit will be set if qp type from + * 'enum ib_qp_type' is supported, e.g. + * supported_qpts |= 1 << IB_QPT_UD + */ + __u32 supported_qpts; +}; + +/* RX Hash function flags */ +enum xsc_rx_hash_function_flags { + XSC_RX_HASH_FUNC_TOEPLITZ = 1 << 0, +}; + +enum xsc_rdma_link_speed { + XSC_RDMA_LINK_SPEED_2_5GB = 1 << 0, + XSC_RDMA_LINK_SPEED_5GB = 1 << 1, + XSC_RDMA_LINK_SPEED_10GB = 1 << 3, + XSC_RDMA_LINK_SPEED_14GB = 1 << 4, + XSC_RDMA_LINK_SPEED_25GB = 1 << 5, + XSC_RDMA_LINK_SPEED_50GB = 1 << 6, + XSC_RDMA_LINK_SPEED_100GB = 1 << 7, +}; + +enum xsc_rdma_phys_state { + XSC_RDMA_PHY_STATE_SLEEP = 1, + XSC_RDMA_PHY_STATE_POLLING, + XSC_RDMA_PHY_STATE_DISABLED, + XSC_RDMA_PHY_STATE_PORT_CONFIGURATION_TRAINNING, + XSC_RDMA_PHY_STATE_LINK_UP, + XSC_RDMA_PHY_STATE_LINK_ERROR_RECOVERY, + XSC_RDMA_PHY_STATE_PHY_TEST, +}; + +/* + * RX Hash flags, these flags allows to set which incoming packet's field should + * participates in RX Hash. Each flag represent certain packet's field, + * when the flag is set the field that is represented by the flag will + * participate in RX Hash calculation. + * Note: *IPV4 and *IPV6 flags can't be enabled together on the same QP + * and *TCP and *UDP flags can't be enabled together on the same QP. + */ +enum xsc_rx_hash_fields { + XSC_RX_HASH_SRC_IPV4 = 1 << 0, + XSC_RX_HASH_DST_IPV4 = 1 << 1, + XSC_RX_HASH_SRC_IPV6 = 1 << 2, + XSC_RX_HASH_DST_IPV6 = 1 << 3, + XSC_RX_HASH_SRC_PORT_TCP = 1 << 4, + XSC_RX_HASH_DST_PORT_TCP = 1 << 5, + XSC_RX_HASH_SRC_PORT_UDP = 1 << 6, + XSC_RX_HASH_DST_PORT_UDP = 1 << 7, + XSC_RX_HASH_IPSEC_SPI = 1 << 8, + /* Save bits for future fields */ + XSC_RX_HASH_INNER = (1UL << 31), +}; + +struct xsc_ib_rss_caps { + __aligned_u64 rx_hash_fields_mask; /* enum xsc_rx_hash_fields */ + __u8 rx_hash_function; /* enum xsc_rx_hash_function_flags */ + __u8 reserved[7]; +}; + +enum xsc_ib_cqe_comp_res_format { + XSC_IB_CQE_RES_FORMAT_HASH = 1 << 0, + XSC_IB_CQE_RES_FORMAT_CSUM = 1 << 1, + XSC_IB_CQE_RES_FORMAT_CSUM_STRIDX = 1 << 2, +}; + +struct xsc_ib_cqe_comp_caps { + __u32 max_num; + __u32 supported_format; /* enum xsc_ib_cqe_comp_res_format */ +}; + +enum xsc_ib_packet_pacing_cap_flags { + XSC_IB_PP_SUPPORT_BURST = 1 << 0, +}; + +struct xsc_packet_pacing_caps { + __u32 qp_rate_limit_min; + __u32 qp_rate_limit_max; /* In kpbs */ + + /* Corresponding bit will be set if qp type from + * 'enum ib_qp_type' is supported, e.g. + * supported_qpts |= 1 << IB_QPT_RAW_PACKET + */ + __u32 supported_qpts; + __u8 cap_flags; /* enum xsc_ib_packet_pacing_cap_flags */ + __u8 reserved[3]; +}; + +enum xsc_ib_mpw_caps { + MPW_RESERVED = 1 << 0, + XSC_IB_ALLOW_MPW = 1 << 1, + XSC_IB_SUPPORT_EMPW = 1 << 2, +}; + +enum xsc_ib_sw_parsing_offloads { + XSC_IB_SW_PARSING = 1 << 0, + XSC_IB_SW_PARSING_CSUM = 1 << 1, + XSC_IB_SW_PARSING_LSO = 1 << 2, +}; + +struct xsc_ib_sw_parsing_caps { + __u32 sw_parsing_offloads; /* enum xsc_ib_sw_parsing_offloads */ + + /* Corresponding bit will be set if qp type from + * 'enum ib_qp_type' is supported, e.g. + * supported_qpts |= 1 << IB_QPT_RAW_PACKET + */ + __u32 supported_qpts; +}; + +struct xsc_ib_striding_rq_caps { + __u32 min_single_stride_log_num_of_bytes; + __u32 max_single_stride_log_num_of_bytes; + __u32 min_single_wqe_log_num_of_strides; + __u32 max_single_wqe_log_num_of_strides; + + /* Corresponding bit will be set if qp type from + * 'enum ib_qp_type' is supported, e.g. + * supported_qpts |= 1 << IB_QPT_RAW_PACKET + */ + __u32 supported_qpts; + __u32 reserved; +}; + +enum xsc_ib_query_dev_resp_flags { + /* Support 128B CQE compression */ + XSC_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0, + XSC_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD = 1 << 1, +}; + +enum xsc_ib_tunnel_offloads { + XSC_IB_TUNNELED_OFFLOADS_VXLAN = 1 << 0, + XSC_IB_TUNNELED_OFFLOADS_GRE = 1 << 1, + XSC_IB_TUNNELED_OFFLOADS_GENEVE = 1 << 2, + XSC_IB_TUNNELED_OFFLOADS_MPLS_GRE = 1 << 3, + XSC_IB_TUNNELED_OFFLOADS_MPLS_UDP = 1 << 4, +}; + +struct xsc_ib_query_device_resp { + __u32 comp_mask; + __u32 response_length; + struct xsc_ib_tso_caps tso_caps; + struct xsc_ib_rss_caps rss_caps; + struct xsc_ib_cqe_comp_caps cqe_comp_caps; + struct xsc_packet_pacing_caps packet_pacing_caps; + __u32 xsc_ib_support_multi_pkt_send_wqes; + __u32 flags; /* Use enum xsc_ib_query_dev_resp_flags */ + struct xsc_ib_sw_parsing_caps sw_parsing_caps; + struct xsc_ib_striding_rq_caps striding_rq_caps; + __u32 tunnel_offloads_caps; /* enum xsc_ib_tunnel_offloads */ + __u32 reserved; +}; + +#endif /* XSC_IB_USER_H */ diff --git a/drivers/infiniband/hw/xsc/xsc_ib.h b/drivers/infiniband/hw/xsc/xsc_ib.h new file mode 100644 index 000000000000..0753b3ba1c32 --- /dev/null +++ b/drivers/infiniband/hw/xsc/xsc_ib.h @@ -0,0 +1,627 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_IB_H +#define XSC_IB_H + +#include +#include +#include +#include +#include +#include +#include "common/xsc_core.h" +#include "common/driver.h" +#include "common/cq.h" +#include "common/qp.h" +#include +#include +#include +#include +#include + +#include "xsc_ib_compat.h" + +#define xsc_ib_dbg(dev, format, arg...) \ +do { \ + if (xsc_log_level <= XSC_LOG_LEVEL_DBG) \ + pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, \ + __func__, __LINE__, current->pid, ##arg); \ +} while (0) + +#define xsc_ib_err(dev, format, arg...) \ +do { \ + if (xsc_log_level <= XSC_LOG_LEVEL_ERR) \ + pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, \ + __func__, __LINE__, current->pid, ##arg); \ +} while (0) + +#define xsc_ib_warn(dev, format, arg...) \ +do { \ + if (xsc_log_level <= XSC_LOG_LEVEL_WARN) \ + pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, \ + __func__, __LINE__, current->pid, ##arg); \ +} while (0) + +#define xsc_ib_info(dev, format, arg...) \ +do { \ + if (xsc_log_level <= XSC_LOG_LEVEL_INFO) \ + pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, \ + __func__, __LINE__, current->pid, ##arg); \ +} while (0) + +struct xsc_ib_ucontext { + struct ib_ucontext ibucontext; + struct list_head db_page_list; + + /* protect doorbell record alloc/free + */ + struct mutex db_page_mutex; +}; + +#define field_avail(type, fld, sz) (offsetof(type, fld) + \ + sizeof(((type *)0)->fld) <= (sz)) + +static inline struct xsc_ib_ucontext *to_xucontext(struct ib_ucontext *ibucontext) +{ + return container_of(ibucontext, struct xsc_ib_ucontext, ibucontext); +} + +struct xsc_ib_pd { + struct ib_pd ibpd; + u32 pdn; + u32 pa_lkey; +}; + +/* Use macros here so that don't have to duplicate + * enum ib_send_flags and enum ib_qp_type for low-level driver + */ + +#define XSC_IB_QPT_REG_UMR IB_QPT_RESERVED1 + +enum { + XSC_PAGE_SHIFT_4K = 12, + XSC_PAGE_SHIFT_64K = 16, + XSC_PAGE_SHIFT_2M = 21, + XSC_PAGE_SHIFT_1G = 30, +}; + +enum { + XSC_PAGE_MODE_4K = 0, + XSC_PAGE_MODE_64K = 1, + XSC_PAGE_MODE_2M = 2, + XSC_PAGE_MODE_1G = 3, +}; + +struct wr_list { + u16 opcode; + u16 next; +}; + +struct xsc_ib_wq { + u64 *wrid; + u32 *wr_data; + struct wr_list *w_list; + unsigned long *wqe_head; + u16 unsig_count; + + /* serialize post to the work queue + */ + spinlock_t lock; + int wqe_cnt; + int ds_cnt; + int max_post; + int max_gs; + int offset; + int wqe_shift; + unsigned int head; + unsigned int tail; + u16 cur_post; + u16 last_poll; + void *qend; + void *hdr_buf; + u32 hdr_size; + dma_addr_t hdr_dma; + int mad_queue_depth; + int mad_index; +}; + +enum { + XSC_QP_USER, + XSC_QP_KERNEL, + XSC_QP_EMPTY +}; + +struct xsc_ib_qp { + struct ib_qp ibqp; + struct xsc_core_qp xqp; + struct xsc_buf buf; + + struct xsc_db db; + struct xsc_ib_wq rq; + + u32 doorbell_qpn; + u8 sq_signal_bits; + u8 fm_cache; + int sq_max_wqes_per_wr; + int sq_spare_wqes; + struct xsc_ib_wq sq; + + struct ib_umem *umem; + int buf_size; + + /* serialize qp state modifications + */ + struct mutex mutex; + u16 xrcdn; + u32 flags; + u8 port; + u8 alt_port; + u8 atomic_rd_en; + u8 resp_depth; + u8 state; + int xsc_type; + int wq_sig; + int scat_cqe; + int max_inline_data; + int has_rq; + + int create_type; + u32 pa_lkey; + /* For QP1 */ + struct ib_ud_header qp1_hdr; + u32 send_psn; + struct xsc_qp_context ctx; + struct ib_cq *send_cq; + struct ib_cq *recv_cq; + /* For qp resources */ + spinlock_t lock; +}; + +struct xsc_ib_cq_buf { + struct xsc_buf buf; + struct ib_umem *umem; + int cqe_size; +}; + +enum xsc_ib_qp_flags { + XSC_IB_QP_BLOCK_MULTICAST_LOOPBACK = 1 << 0, + XSC_IB_QP_SIGNATURE_HANDLING = 1 << 1, +}; + +struct xsc_shared_mr_info { + int mr_id; + struct ib_umem *umem; +}; + +struct xsc_ib_cq { + struct ib_cq ibcq; + struct xsc_core_cq xcq; + struct xsc_ib_cq_buf buf; + struct xsc_db db; + + /* serialize access to the CQ + */ + spinlock_t lock; + + /* protect resize cq + */ + struct mutex resize_mutex; + struct xsc_ib_cq_resize *resize_buf; + struct ib_umem *resize_umem; + int cqe_size; +}; + +struct xsc_ib_xrcd { + struct ib_xrcd ibxrcd; + u32 xrcdn; +}; + +struct xsc_ib_peer_id; + +struct xsc_ib_mr { + struct ib_mr ibmr; + struct xsc_core_mr mmr; + struct ib_umem *umem; + struct xsc_shared_mr_info *smr_info; + struct list_head list; + int order; + __be64 *pas; + dma_addr_t dma; + int npages; + struct completion done; + enum ib_wc_status status; + struct xsc_ib_peer_id *peer_id; + atomic_t invalidated; + struct completion invalidation_comp; +}; + +struct xsc_ib_peer_id { + struct completion comp; + struct xsc_ib_mr *mr; +}; + +struct xsc_cache_ent { + struct list_head head; + /* sync access to the cahce entry + */ + spinlock_t lock; + + struct dentry *dir; + char name[4]; + u32 order; + u32 size; + u32 cur; + u32 miss; + u32 limit; + + struct dentry *fsize; + struct dentry *fcur; + struct dentry *fmiss; + struct dentry *flimit; + + struct xsc_ib_dev *dev; + struct work_struct work; + struct delayed_work dwork; +}; + +struct xsc_mr_cache { + struct workqueue_struct *wq; + struct xsc_cache_ent ent[MAX_MR_CACHE_ENTRIES]; + int stopped; + struct dentry *root; + unsigned long last_add; +}; + +struct xsc_gid { + u8 data[16]; +}; + +struct xsc_sgid_tbl { + struct xsc_gid *tbl; + u32 max; + u32 count; +}; + +struct xsc_ib_res { + struct xsc_sgid_tbl sgid_tbl; +}; + +struct xsc_ib_resources { + struct ib_cq *c0; + struct ib_xrcd *x0; + struct ib_xrcd *x1; + struct ib_pd *p0; + struct ib_srq *s0; +}; + +struct xsc_ib_dev { + struct ib_device ib_dev; + struct uverbs_object_tree_def *driver_trees[6]; + struct net_device *netdev; + struct xsc_core_device *xdev; + XSC_DECLARE_DOORBELL_LOCK(uar_lock); + struct list_head eqs_list; + int num_ports; + int num_comp_vectors; + /* serialize update of capability mask + */ + struct mutex cap_mask_mutex; + u8 ib_active; + /* sync used page count stats + */ + spinlock_t mr_lock; + struct xsc_ib_res ib_res; + struct xsc_ib_resources devr; + struct xsc_mr_cache cache; + u32 crc_32_table[256]; + int cm_pcp; + int cm_dscp; + int force_pcp; + int force_dscp; + int iommu_state; + struct notifier_block nb; +}; + +union xsc_ib_fw_ver { + u64 data; + struct { + u8 ver_major; + u8 ver_minor; + u16 ver_patch; + u32 ver_tweak; + } s; +}; + +struct xsc_pa_chunk { + struct list_head list; + u64 va; + dma_addr_t pa; + size_t length; +}; + +static inline struct xsc_ib_cq *to_xibcq(struct xsc_core_cq *xcq) +{ + return container_of(xcq, struct xsc_ib_cq, xcq); +} + +static inline struct xsc_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd) +{ + return container_of(ibxrcd, struct xsc_ib_xrcd, ibxrcd); +} + +static inline struct xsc_ib_dev *to_mdev(struct ib_device *ibdev) +{ + return container_of(ibdev, struct xsc_ib_dev, ib_dev); +} + +static inline struct xsc_ib_cq *to_xcq(struct ib_cq *ibcq) +{ + return container_of(ibcq, struct xsc_ib_cq, ibcq); +} + +static inline struct xsc_ib_qp *to_xibqp(struct xsc_core_qp *xqp) +{ + return container_of(xqp, struct xsc_ib_qp, xqp); +} + +static inline struct xsc_ib_pd *to_mpd(struct ib_pd *ibpd) +{ + return container_of(ibpd, struct xsc_ib_pd, ibpd); +} + +static inline struct xsc_ib_qp *to_xqp(struct ib_qp *ibqp) +{ + return container_of(ibqp, struct xsc_ib_qp, ibqp); +} + +static inline struct xsc_ib_mr *to_mmr(struct ib_mr *ibmr) +{ + return container_of(ibmr, struct xsc_ib_mr, ibmr); +} + +struct xsc_ib_ah { + struct ib_ah ibah; + struct xsc_av av; +}; + +static inline struct xsc_ib_ah *to_mah(struct ib_ah *ibah) +{ + return container_of(ibah, struct xsc_ib_ah, ibah); +} + +static inline struct xsc_ib_dev *xdev2ibdev(struct xsc_core_device *xdev) +{ + return container_of((void *)xdev, struct xsc_ib_dev, xdev); +} + +int xsc_ib_query_port(struct ib_device *ibdev, u32 port, + struct ib_port_attr *props); + +int xsc_ib_create_qp(struct ib_qp *ibqp, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata); + +void __xsc_ib_cq_clean(struct xsc_ib_cq *cq, u32 qpn); +void xsc_ib_cq_clean(struct xsc_ib_cq *cq, u32 qpn); + +int xsc_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr); +int xsc_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata); +int xsc_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, + struct ib_qp_init_attr *qp_init_attr); + +int xsc_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, + const struct ib_send_wr **bad_wr); +int xsc_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, + const struct ib_recv_wr **bad_wr); + +void *xsc_get_send_wqe(struct xsc_ib_qp *qp, int n); +int xsc_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); +int xsc_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); +struct ib_mr *xsc_ib_get_dma_mr(struct ib_pd *pd, int acc); +struct ib_mr *xsc_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, + u64 virt_addr, int access_flags, + struct ib_udata *udata); +int xsc_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset); +void xsc_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, + int *ncont, int *order); +void xsc_ib_populate_pas(struct xsc_ib_dev *dev, struct ib_umem *umem, + int page_shift, __be64 *pas, int npages, bool need_to_devide); +const struct uverbs_object_tree_def *xsc_ib_get_devx_tree(void); + +int xsc_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, + int sg_nents, unsigned int *sg_offset); +int xsc_wr_reg_mr(struct xsc_ib_dev *dev, const struct ib_send_wr *wr); +int xsc_wr_invalidate_mr(struct xsc_ib_dev *dev, const struct ib_send_wr *wr); +int xsc_find_best_pgsz(struct ib_umem *umem, unsigned long pgsz_bitmap, + unsigned long addr, int *npage, int *shift, u64 **pas); + +void xsc_ib_drain_rq(struct ib_qp *qp); +void xsc_ib_drain_sq(struct ib_qp *qp); + +static inline void init_query_mad(struct ib_smp *mad) +{ + mad->base_version = 1; + mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; + mad->class_version = 1; + mad->method = IB_MGMT_METHOD_GET; +} + +static inline u8 convert_access(int acc) +{ + return (acc & IB_ACCESS_REMOTE_ATOMIC ? XSC_PERM_ATOMIC : 0) | + (acc & IB_ACCESS_REMOTE_WRITE ? XSC_PERM_REMOTE_WRITE : 0) | + (acc & IB_ACCESS_REMOTE_READ ? XSC_PERM_REMOTE_READ : 0) | + (acc & IB_ACCESS_LOCAL_WRITE ? XSC_PERM_LOCAL_WRITE : 0) | + XSC_PERM_LOCAL_READ; +} + +static inline enum ib_mtu xsc_net_to_ib_mtu(unsigned int mtu) +{ + mtu = mtu - (IB_GRH_BYTES + IB_UDP_BYTES + IB_BTH_BYTES + + IB_EXT_XRC_BYTES + IB_EXT_ATOMICETH_BYTES + + IB_ICRC_BYTES); + + if (mtu >= ib_mtu_enum_to_int(IB_MTU_4096)) + return IB_MTU_4096; + else if (mtu >= ib_mtu_enum_to_int(IB_MTU_1024)) + return IB_MTU_1024; + else + return 0; +} + +/** + * UDP source port selection must adhere IANA port allocation ranges. Thus + * we will be using IANA recommendation for Ephemeral port range of: + * 49152-65535, or in hex: 0xC000-0xFFFF. + */ +#define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000) +#define IB_ROCE_UDP_ENCAP_VALID_PORT_MAX (0xFFFF) +#define IB_GRH_FLOWLABEL_MASK (0x000FFFFF) + +/** + * rdma_flow_label_to_udp_sport - generate a RoCE v2 UDP src port value based + * on the flow_label + * + * This function will convert the 20 bit flow_label input to a valid RoCE v2 + * UDP src port 14 bit value. All RoCE V2 drivers should use this same + * convention. + */ +static inline u16 xsc_flow_label_to_udp_sport(u32 fl) +{ + u32 fl_low = fl & 0x03fff, fl_high = fl & 0xFC000; + + fl_low ^= fl_high >> 14; + return (u16)(fl_low | IB_ROCE_UDP_ENCAP_VALID_PORT_MIN); +} + +#define XSC_IB_IOMMU_MAP_DISABLE 0 +#define XSC_IB_IOMMU_MAP_UNKNOWN_DOMAIN 1 +#define XSC_IB_IOMMU_MAP_NORMAL 2 + +static inline int xsc_ib_iommu_dma_map(struct ib_device *ibdev) +{ + return to_mdev(ibdev)->iommu_state; +} + +static inline void *xsc_ib_iova_to_virt(struct ib_device *ibdev, dma_addr_t iova) +{ + phys_addr_t phyaddr; + struct iommu_domain *domain; + + domain = iommu_get_domain_for_dev(ibdev->dma_device); + if (likely(domain)) { + phyaddr = iommu_iova_to_phys(domain, iova); + phyaddr |= iova & (PAGE_SIZE - 1); + } else { + phyaddr = dma_to_phys(ibdev->dma_device, iova); + } + + return phys_to_virt(phyaddr); +} + +struct ib_mad_list_head { + struct list_head list; + struct ib_cqe cqe; + struct ib_mad_queue *mad_queue; +}; + +#define IB_MAD_SEND_REQ_MAX_SG 2 +struct ib_mad_send_wr_private { + struct ib_mad_list_head mad_list; + struct list_head agent_list; + struct ib_mad_agent_private *mad_agent_priv; + struct ib_mad_send_buf send_buf; + u64 header_mapping; + u64 payload_mapping; + struct ib_ud_wr send_wr; + struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; + __be64 tid; + unsigned long timeout; + int max_retries; + int retries_left; + int retry; + int refcount; + enum ib_wc_status status; + + /* RMPP control */ + struct list_head rmpp_list; + struct ib_rmpp_segment *last_ack_seg; + struct ib_rmpp_segment *cur_seg; + int last_ack; + int seg_num; + int newwin; + int pad; +}; + +struct ib_mad_private_header { + struct ib_mad_list_head mad_list; + struct ib_mad_recv_wc recv_wc; + struct ib_wc wc; + u64 mapping; +} __packed; + +struct ib_mad_private { + struct ib_mad_private_header header; + size_t mad_size; + struct ib_grh grh; + u8 mad[]; +} __packed; + +static inline void *xsc_ib_send_mad_sg_virt_addr(struct ib_device *ibdev, + const struct ib_send_wr *wr, + int sg) +{ + struct ib_mad_send_wr_private *mad_send_wr; + struct ib_mad_list_head *mad_list; + int iommu_state = xsc_ib_iommu_dma_map(ibdev); + + /* direct dma mapping */ + if (!iommu_state) + return phys_to_virt(dma_to_phys(ibdev->dma_device, wr->sg_list[sg].addr)); + + if (iommu_state == XSC_IB_IOMMU_MAP_NORMAL) + return xsc_ib_iova_to_virt(ibdev, wr->sg_list[sg].addr); + + mad_list = container_of(wr->wr_cqe, struct ib_mad_list_head, cqe); + mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private, + mad_list); + + /* sg_list[] */ + if (sg == 0) + return mad_send_wr->send_buf.mad; + + /* sg_list[1] */ + if (mad_send_wr->send_buf.seg_count) + return ib_get_rmpp_segment(&mad_send_wr->send_buf, + mad_send_wr->seg_num); + return mad_send_wr->send_buf.mad + mad_send_wr->send_buf.hdr_len; +} + +static inline void *xsc_ib_recv_mad_sg_virt_addr(struct ib_device *ibdev, + struct ib_wc *wc, + u64 sg_addr) +{ + struct ib_mad_private_header *mad_priv_hdr; + struct ib_mad_private *recv; + struct ib_mad_list_head *mad_list; + int iommu_state = xsc_ib_iommu_dma_map(ibdev); + + /* direct dma mapping */ + if (!iommu_state) + return phys_to_virt(dma_to_phys(ibdev->dma_device, sg_addr)); + + if (iommu_state == XSC_IB_IOMMU_MAP_NORMAL) + return xsc_ib_iova_to_virt(ibdev, sg_addr); + + mad_list = container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); + mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, mad_list); + recv = container_of(mad_priv_hdr, struct ib_mad_private, header); + return &recv->grh; +} + +#endif /* XSC_IB_H */ diff --git a/drivers/infiniband/hw/xsc/xsc_ib_compat.h b/drivers/infiniband/hw/xsc/xsc_ib_compat.h new file mode 100644 index 000000000000..9d43cfd5d41c --- /dev/null +++ b/drivers/infiniband/hw/xsc/xsc_ib_compat.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_IB_COMPAT_H +#define XSC_IB_COMPAT_H + +/* + * adaptive to different ib_core versions + */ + +struct xsc_ib_ucontext; + +int xsc_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *ah_attr, + struct ib_udata *udata); +#define xsc_ib_create_ah_def() int xsc_ib_create_ah(\ + struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, struct ib_udata *udata) + +int xsc_ib_destroy_ah(struct ib_ah *ah, u32 flags); +#define xsc_ib_destroy_ah_def() int xsc_ib_destroy_ah(struct ib_ah *ah, u32 flags) +int xsc_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata); +int xsc_ib_create_cq(struct ib_cq *ibcq, + const struct ib_cq_init_attr *attr, + struct ib_udata *udata); +int xsc_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); + +// from main.c static functions +int xsc_ib_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); +void xsc_ib_dealloc_ucontext(struct ib_ucontext *ibcontext); +int xsc_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata); + +int xsc_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); +#define xsc_ib_dealloc_pd_def() int xsc_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) + +int xsc_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); +#define xsc_ib_destroy_cq_def() int xsc_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) + +#define xsc_ib_destroy_qp_def() int xsc_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) +#define xsc_ib_create_cq_def() int xsc_ib_create_cq(struct ib_cq *ibcq,\ + const struct ib_cq_init_attr *attr, struct ib_udata *udata) +#define xsc_ib_dereg_mr_def() int xsc_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) +#define xsc_ib_alloc_ucontext_def() int xsc_ib_alloc_ucontext(\ + struct ib_ucontext *uctx, struct ib_udata *udata) +#define xsc_ib_dealloc_ucontext_def() void xsc_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) +#define xsc_ib_alloc_pd_def() int xsc_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) + +#define RET_VALUE(x) (x) + +#ifdef IB_ALLOC_MR_HAVE_UDATA +struct ib_mr *xsc_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, + u32 max_num_sg, struct ib_udata *udata); +#define xsc_ib_alloc_mr_def() struct ib_mr *xsc_ib_alloc_mr(\ + struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg, struct ib_udata *udata) +#else +struct ib_mr *xsc_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg); +#define xsc_ib_alloc_mr_def() struct ib_mr *xsc_ib_alloc_mr(\ + struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg) +#endif + +#endif diff --git a/drivers/infiniband/hw/xsc/xsc_ib_sysfs.c b/drivers/infiniband/hw/xsc/xsc_ib_sysfs.c new file mode 100644 index 000000000000..f94f76394b2d --- /dev/null +++ b/drivers/infiniband/hw/xsc/xsc_ib_sysfs.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_hsi.h" +#include "common/driver.h" +#include "common/xsc_cmd.h" +#include "xsc_ib.h" + +static ssize_t hca_type_show(struct device *device, struct device_attribute *attr, char *buf) +{ + struct ib_device *ib_dev = container_of(device, struct ib_device, dev); + struct xsc_core_device *dev = to_mdev(ib_dev)->xdev; + struct pci_dev *pdev = dev->pdev; + + return sprintf(buf, "%x\n", pdev->subsystem_device); +} + +static DEVICE_ATTR_RO(hca_type); + +static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr, char *buf) +{ + struct ib_device *ib_dev = container_of(device, struct ib_device, dev); + struct xsc_core_device *dev = to_mdev(ib_dev)->xdev; + u32 hw_ver = 0; + + hw_ver = ((dev->chip_ver_l & 0xffff) << 16) | + (dev->hotfix_num & 0xffff); + return sprintf(buf, "0x%x\n", hw_ver); +} + +static DEVICE_ATTR_RO(hw_rev); + +static struct device_attribute *xsc_ib_attributes[] = { + &dev_attr_hca_type, + &dev_attr_hw_rev, +}; + +void xsc_ib_sysfs_init(struct ib_device *ib_dev, struct xsc_core_device *xdev) +{ + int err = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(xsc_ib_attributes); i++) { + err = device_create_file(&ib_dev->dev, xsc_ib_attributes[i]); + if (err) + xsc_core_err(xdev, "Create sysfs file for %s failed.\n", + xsc_ib_attributes[i]->attr.name); + } +} + +void xsc_ib_sysfs_fini(struct ib_device *ib_dev, struct xsc_core_device *xdev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(xsc_ib_attributes); i++) + device_remove_file(&ib_dev->dev, xsc_ib_attributes[i]); +} diff --git a/drivers/infiniband/hw/xsc/xsc_rdma_ctrl.c b/drivers/infiniband/hw/xsc/xsc_rdma_ctrl.c new file mode 100644 index 000000000000..dcf934b61e9b --- /dev/null +++ b/drivers/infiniband/hw/xsc/xsc_rdma_ctrl.c @@ -0,0 +1,715 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_hsi.h" +#include "common/xsc_port_ctrl.h" +#include "xsc_ib.h" + +#define XSC_RDMA_CTRL_NAME "rdma_ctrl" + +static void encode_cc_cmd_enable_rp(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_enable_rp *cc_cmd = (struct xsc_cc_cmd_enable_rp *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->enable = __cpu_to_be32(cc_cmd->enable); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_enable_np(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_enable_np *cc_cmd = (struct xsc_cc_cmd_enable_np *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->enable = __cpu_to_be32(cc_cmd->enable); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_init_alpha(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_init_alpha *cc_cmd = (struct xsc_cc_cmd_init_alpha *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->alpha = __cpu_to_be32(cc_cmd->alpha); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_g(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_g *cc_cmd = (struct xsc_cc_cmd_g *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->g = __cpu_to_be32(cc_cmd->g); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_ai(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_ai *cc_cmd = (struct xsc_cc_cmd_ai *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->ai = __cpu_to_be32(cc_cmd->ai); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_hai(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_hai *cc_cmd = (struct xsc_cc_cmd_hai *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->hai = __cpu_to_be32(cc_cmd->hai); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_th(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_th *cc_cmd = (struct xsc_cc_cmd_th *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->threshold = __cpu_to_be32(cc_cmd->threshold); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_bc(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_bc *cc_cmd = (struct xsc_cc_cmd_bc *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->bytecount = __cpu_to_be32(cc_cmd->bytecount); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_cnp_opcode(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_cnp_opcode *cc_cmd = (struct xsc_cc_cmd_cnp_opcode *)data; + + cc_cmd->opcode = __cpu_to_be32(cc_cmd->opcode); +} + +static void encode_cc_cmd_cnp_bth_b(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_cnp_bth_b *cc_cmd = (struct xsc_cc_cmd_cnp_bth_b *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->bth_b = __cpu_to_be32(cc_cmd->bth_b); +} + +static void encode_cc_cmd_cnp_bth_f(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_cnp_bth_f *cc_cmd = (struct xsc_cc_cmd_cnp_bth_f *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->bth_f = __cpu_to_be32(cc_cmd->bth_f); +} + +static void encode_cc_cmd_cnp_ecn(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_cnp_ecn *cc_cmd = (struct xsc_cc_cmd_cnp_ecn *)data; + + cc_cmd->ecn = __cpu_to_be32(cc_cmd->ecn); +} + +static void encode_cc_cmd_data_ecn(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_data_ecn *cc_cmd = (struct xsc_cc_cmd_data_ecn *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->ecn = __cpu_to_be32(cc_cmd->ecn); +} + +static void encode_cc_cmd_cnp_tx_interval(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_cnp_tx_interval *cc_cmd = (struct xsc_cc_cmd_cnp_tx_interval *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->interval = __cpu_to_be32(cc_cmd->interval); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_evt_rsttime(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_evt_rsttime *cc_cmd = + (struct xsc_cc_cmd_evt_rsttime *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->period = __cpu_to_be32(cc_cmd->period); +} + +static void encode_cc_cmd_cnp_dscp(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_cnp_dscp *cc_cmd = (struct xsc_cc_cmd_cnp_dscp *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->dscp = __cpu_to_be32(cc_cmd->dscp); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_cnp_pcp(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_cnp_pcp *cc_cmd = (struct xsc_cc_cmd_cnp_pcp *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->pcp = __cpu_to_be32(cc_cmd->pcp); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_evt_period_alpha(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_evt_period_alpha *cc_cmd = (struct xsc_cc_cmd_evt_period_alpha *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->period = __cpu_to_be32(cc_cmd->period); +} + +static void encode_cc_cmd_clamp_tgt_rate(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_clamp_tgt_rate *cc_cmd = (struct xsc_cc_cmd_clamp_tgt_rate *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->clamp_tgt_rate = __cpu_to_be32(cc_cmd->clamp_tgt_rate); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_max_hai_factor(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_max_hai_factor *cc_cmd = (struct xsc_cc_cmd_max_hai_factor *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->max_hai_factor = __cpu_to_be32(cc_cmd->max_hai_factor); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_cmd_scale(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_scale *cc_cmd = (struct xsc_cc_cmd_scale *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->scale = __cpu_to_be32(cc_cmd->scale); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void encode_cc_get_cfg(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_get_cfg *cc_cmd = (struct xsc_cc_cmd_get_cfg *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void decode_cc_get_cfg(void *data) +{ + struct xsc_cc_cmd_get_cfg *cc_cmd = (struct xsc_cc_cmd_get_cfg *)data; + + cc_cmd->cmd = __be16_to_cpu(cc_cmd->cmd); + cc_cmd->len = __be16_to_cpu(cc_cmd->len); + cc_cmd->enable_rp = __be32_to_cpu(cc_cmd->enable_rp); + cc_cmd->enable_np = __be32_to_cpu(cc_cmd->enable_np); + cc_cmd->init_alpha = __be32_to_cpu(cc_cmd->init_alpha); + cc_cmd->g = __be32_to_cpu(cc_cmd->g); + cc_cmd->ai = __be32_to_cpu(cc_cmd->ai); + cc_cmd->hai = __be32_to_cpu(cc_cmd->hai); + cc_cmd->threshold = __be32_to_cpu(cc_cmd->threshold); + cc_cmd->bytecount = __be32_to_cpu(cc_cmd->bytecount); + cc_cmd->opcode = __be32_to_cpu(cc_cmd->opcode); + cc_cmd->bth_b = __be32_to_cpu(cc_cmd->bth_b); + cc_cmd->bth_f = __be32_to_cpu(cc_cmd->bth_f); + cc_cmd->cnp_ecn = __be32_to_cpu(cc_cmd->cnp_ecn); + cc_cmd->data_ecn = __be32_to_cpu(cc_cmd->data_ecn); + cc_cmd->cnp_tx_interval = __be32_to_cpu(cc_cmd->cnp_tx_interval); + cc_cmd->evt_period_rsttime = __be32_to_cpu(cc_cmd->evt_period_rsttime); + cc_cmd->cnp_dscp = __be32_to_cpu(cc_cmd->cnp_dscp); + cc_cmd->cnp_pcp = __be32_to_cpu(cc_cmd->cnp_pcp); + cc_cmd->evt_period_alpha = __be32_to_cpu(cc_cmd->evt_period_alpha); + cc_cmd->clamp_tgt_rate = __be32_to_cpu(cc_cmd->clamp_tgt_rate); + cc_cmd->max_hai_factor = __be32_to_cpu(cc_cmd->max_hai_factor); + cc_cmd->scale = __be32_to_cpu(cc_cmd->scale); + cc_cmd->section = __be32_to_cpu(cc_cmd->section); +} + +static void encode_cc_get_stat(void *data, u32 mac_port) +{ + struct xsc_cc_cmd_get_stat *cc_cmd = (struct xsc_cc_cmd_get_stat *)data; + + cc_cmd->cmd = __cpu_to_be16(cc_cmd->cmd); + cc_cmd->len = __cpu_to_be16(cc_cmd->len); + cc_cmd->section = __cpu_to_be32(mac_port); +} + +static void decode_cc_get_stat(void *data) +{ + struct xsc_cc_cmd_stat *cc_cmd = (struct xsc_cc_cmd_stat *)data; + + cc_cmd->cnp_handled = __be32_to_cpu(cc_cmd->cnp_handled); + cc_cmd->alpha_recovery = __be32_to_cpu(cc_cmd->alpha_recovery); + cc_cmd->reset_timeout = __be32_to_cpu(cc_cmd->reset_timeout); + cc_cmd->reset_bytecount = __be32_to_cpu(cc_cmd->reset_bytecount); +} + +static int xsc_priv_dev_ioctl_get_force_pcp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_force_pcp *resp = (struct xsc_ioctl_force_pcp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + resp->pcp = ib_dev->force_pcp; + return 0; +} + +static int xsc_priv_dev_ioctl_get_force_dscp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_force_dscp *resp = (struct xsc_ioctl_force_dscp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + resp->dscp = ib_dev->force_dscp; + return 0; +} + +static int xsc_priv_dev_ioctl_set_force_pcp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_force_pcp *req = (struct xsc_ioctl_force_pcp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + if (req->pcp < 0 || (req->pcp > QOS_PCP_MAX && req->pcp != DSCP_PCP_UNSET)) + return -EINVAL; + + ib_dev->force_pcp = req->pcp; + return 0; +} + +static int xsc_priv_dev_ioctl_set_force_dscp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_force_dscp *req = (struct xsc_ioctl_force_dscp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + if (req->dscp < 0 || (req->dscp > QOS_DSCP_MAX && req->dscp != DSCP_PCP_UNSET)) + return -EINVAL; + + ib_dev->force_dscp = req->dscp; + return 0; +} + +static int xsc_priv_dev_ioctl_get_cma_pcp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_cma_pcp *resp = (struct xsc_ioctl_cma_pcp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + resp->pcp = ib_dev->cm_pcp; + return 0; +} + +static int xsc_priv_dev_ioctl_get_cma_dscp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_cma_dscp *resp = (struct xsc_ioctl_cma_dscp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + resp->dscp = ib_dev->cm_dscp; + return 0; +} + +static int xsc_priv_dev_ioctl_set_cma_pcp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_cma_pcp *req = (struct xsc_ioctl_cma_pcp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + if (req->pcp < 0 || (req->pcp > QOS_PCP_MAX && req->pcp != DSCP_PCP_UNSET)) + return -EINVAL; + + ib_dev->cm_pcp = req->pcp; + return 0; +} + +static int xsc_priv_dev_ioctl_set_cma_dscp(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_ib_dev *ib_dev = xdev->xsc_ib_dev; + struct xsc_ioctl_cma_dscp *req = (struct xsc_ioctl_cma_dscp *)out; + + if (!xsc_core_is_pf(xdev)) + return -EOPNOTSUPP; + + if (req->dscp < 0 || (req->dscp > QOS_DSCP_MAX && req->dscp != DSCP_PCP_UNSET)) + return -EINVAL; + + ib_dev->cm_dscp = req->dscp; + return 0; +} + +static int _rdma_ctrl_ioctl_cc(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr, + u16 expect_req_size, u16 expect_resp_size, + void (*encode)(void *, u32), void (*decode)(void *)) +{ + struct xsc_cc_mbox_in *in; + struct xsc_cc_mbox_out *out; + u16 user_size; + int err; + + user_size = expect_req_size > expect_resp_size ? expect_req_size : expect_resp_size; + if (hdr->attr.length != user_size) + return -EINVAL; + + in = kvzalloc(sizeof(struct xsc_cc_mbox_in) + expect_req_size, GFP_KERNEL); + if (!in) + goto err_in; + out = kvzalloc(sizeof(struct xsc_cc_mbox_out) + expect_resp_size, GFP_KERNEL); + if (!out) + goto err_out; + + err = copy_from_user(&in->data, user_hdr->attr.data, expect_req_size); + if (err) + goto err; + + in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); + in->hdr.ver = cpu_to_be16(hdr->attr.ver); + if (encode) + encode((void *)in->data, xdev->mac_port); + + err = xsc_cmd_exec(xdev, in, sizeof(*in) + expect_req_size, out, + sizeof(*out) + expect_resp_size); + + hdr->attr.error = __be32_to_cpu(out->hdr.status); + if (decode) + decode((void *)out->data); + + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + goto err; + if (copy_to_user((void *)user_hdr->attr.data, &out->data, expect_resp_size)) + goto err; + + kvfree(in); + kvfree(out); + return 0; + +err: + kvfree(out); +err_out: + kvfree(in); +err_in: + return -EFAULT; +} + +int _rdma_ctrl_exec_ioctl(struct xsc_core_device *xdev, void *in, int in_size, void *out, + int out_size) +{ + int opcode, ret = 0; + struct xsc_ioctl_attr *hdr; + + hdr = (struct xsc_ioctl_attr *)in; + opcode = hdr->opcode; + switch (opcode) { + case XSC_IOCTL_GET_FORCE_PCP: + ret = xsc_priv_dev_ioctl_get_force_pcp(xdev, in, out); + break; + case XSC_IOCTL_GET_FORCE_DSCP: + ret = xsc_priv_dev_ioctl_get_force_dscp(xdev, in, out); + break; + case XSC_IOCTL_GET_CMA_PCP: + ret = xsc_priv_dev_ioctl_get_cma_pcp(xdev, in, out); + break; + case XSC_IOCTL_GET_CMA_DSCP: + ret = xsc_priv_dev_ioctl_get_cma_dscp(xdev, in, out); + break; + case XSC_IOCTL_SET_FORCE_PCP: + xsc_core_dbg(xdev, "setting global pcp\n"); + ret = xsc_priv_dev_ioctl_set_force_pcp(xdev, in, out); + break; + case XSC_IOCTL_SET_FORCE_DSCP: + xsc_core_dbg(xdev, "setting global dscp\n"); + ret = xsc_priv_dev_ioctl_set_force_dscp(xdev, in, out); + break; + case XSC_IOCTL_SET_CMA_PCP: + ret = xsc_priv_dev_ioctl_set_cma_pcp(xdev, in, out); + break; + case XSC_IOCTL_SET_CMA_DSCP: + ret = xsc_priv_dev_ioctl_set_cma_dscp(xdev, in, out); + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + +static long _rdma_ctrl_ioctl_getinfo(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr) +{ + struct xsc_ioctl_hdr hdr; + struct xsc_ioctl_hdr *in; + int in_size; + int err; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) + return -EFAULT; + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) + return -EINVAL; + switch (hdr.attr.opcode) { + case XSC_IOCTL_GET_FORCE_PCP: + case XSC_IOCTL_GET_FORCE_DSCP: + case XSC_IOCTL_SET_FORCE_PCP: + case XSC_IOCTL_SET_FORCE_DSCP: + case XSC_IOCTL_GET_CMA_PCP: + case XSC_IOCTL_GET_CMA_DSCP: + case XSC_IOCTL_SET_CMA_PCP: + case XSC_IOCTL_SET_CMA_DSCP: + break; + default: + return -EINVAL; + } + in_size = sizeof(struct xsc_ioctl_hdr) + hdr.attr.length; + in = kvzalloc(in_size, GFP_KERNEL); + if (!in) + return -EFAULT; + in->attr.opcode = hdr.attr.opcode; + in->attr.length = hdr.attr.length; + err = copy_from_user(in->attr.data, user_hdr->attr.data, hdr.attr.length); + if (err) { + kvfree(in); + return -EFAULT; + } + + err = _rdma_ctrl_exec_ioctl(xdev, &in->attr, (in_size - sizeof(u32)), in->attr.data, + hdr.attr.length); + in->attr.error = err; + if (copy_to_user(user_hdr, in, in_size)) + err = -EFAULT; + kvfree(in); + return err; +} + +static long _rdma_ctrl_ioctl_cmdq(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr) +{ + struct xsc_ioctl_hdr hdr; + int err; + void *in; + void *out; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) + return -EFAULT; + + /* check valid */ + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) + return -EINVAL; + + /* check ioctl cmd */ + switch (hdr.attr.opcode) { + case XSC_CMD_OP_IOCTL_SET_ENABLE_RP: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_enable_rp), + 0, encode_cc_cmd_enable_rp, NULL); + case XSC_CMD_OP_IOCTL_SET_ENABLE_NP: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_enable_np), + 0, encode_cc_cmd_enable_np, NULL); + case XSC_CMD_OP_IOCTL_SET_INIT_ALPHA: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_init_alpha), + 0, encode_cc_cmd_init_alpha, NULL); + case XSC_CMD_OP_IOCTL_SET_G: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_g), + 0, encode_cc_cmd_g, NULL); + case XSC_CMD_OP_IOCTL_SET_AI: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_ai), + 0, encode_cc_cmd_ai, NULL); + case XSC_CMD_OP_IOCTL_SET_HAI: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_hai), + 0, encode_cc_cmd_hai, NULL); + case XSC_CMD_OP_IOCTL_SET_TH: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_th), + 0, encode_cc_cmd_th, NULL); + case XSC_CMD_OP_IOCTL_SET_BC_TH: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_bc), + 0, encode_cc_cmd_bc, NULL); + case XSC_CMD_OP_IOCTL_SET_CNP_OPCODE: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_cnp_opcode), + 0, encode_cc_cmd_cnp_opcode, NULL); + case XSC_CMD_OP_IOCTL_SET_CNP_BTH_B: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_cnp_bth_b), + 0, encode_cc_cmd_cnp_bth_b, NULL); + case XSC_CMD_OP_IOCTL_SET_CNP_BTH_F: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_cnp_bth_f), + 0, encode_cc_cmd_cnp_bth_f, NULL); + case XSC_CMD_OP_IOCTL_SET_CNP_ECN: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_cnp_ecn), + 0, encode_cc_cmd_cnp_ecn, NULL); + case XSC_CMD_OP_IOCTL_SET_DATA_ECN: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_data_ecn), + 0, encode_cc_cmd_data_ecn, NULL); + case XSC_CMD_OP_IOCTL_SET_CNP_TX_INTERVAL: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_cnp_tx_interval), + 0, encode_cc_cmd_cnp_tx_interval, NULL); + case XSC_CMD_OP_IOCTL_SET_EVT_PERIOD_RSTTIME: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_evt_rsttime), + 0, encode_cc_cmd_evt_rsttime, NULL); + case XSC_CMD_OP_IOCTL_SET_CNP_DSCP: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_cnp_dscp), + 0, encode_cc_cmd_cnp_dscp, NULL); + case XSC_CMD_OP_IOCTL_SET_CNP_PCP: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_cnp_pcp), + 0, encode_cc_cmd_cnp_pcp, NULL); + case XSC_CMD_OP_IOCTL_SET_EVT_PERIOD_ALPHA: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_evt_period_alpha), + 0, encode_cc_cmd_evt_period_alpha, NULL); + case XSC_CMD_OP_IOCTL_SET_CLAMP_TGT_RATE: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_clamp_tgt_rate), + 0, encode_cc_cmd_clamp_tgt_rate, NULL); + case XSC_CMD_OP_IOCTL_SET_MAX_HAI_FACTOR: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_max_hai_factor), + 0, encode_cc_cmd_max_hai_factor, NULL); + case XSC_CMD_OP_IOCTL_SET_SCALE: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, + sizeof(struct xsc_cc_cmd_scale), + 0, encode_cc_cmd_scale, NULL); + case XSC_CMD_OP_IOCTL_GET_CC_CFG: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_get_cfg), + sizeof(struct xsc_cc_cmd_get_cfg), + encode_cc_get_cfg, decode_cc_get_cfg); + case XSC_CMD_OP_IOCTL_GET_CC_STAT: + return _rdma_ctrl_ioctl_cc(xdev, user_hdr, &hdr, sizeof(struct xsc_cc_cmd_get_stat), + sizeof(struct xsc_cc_cmd_stat), + encode_cc_get_stat, decode_cc_get_stat); + default: + return -EINVAL; + } + + in = kvzalloc(hdr.attr.length, GFP_KERNEL); + if (!in) + return -ENOMEM; + out = kvzalloc(hdr.attr.length, GFP_KERNEL); + if (!out) { + kfree(in); + return -ENOMEM; + } + + err = copy_from_user(in, user_hdr->attr.data, hdr.attr.length); + if (err) { + err = -EFAULT; + goto err_exit; + } + + xsc_cmd_exec(xdev, in, hdr.attr.length, out, hdr.attr.length); + + if (copy_to_user((void *)user_hdr, &hdr, sizeof(hdr))) + err = -EFAULT; + if (copy_to_user((void *)user_hdr->attr.data, out, hdr.attr.length)) + err = -EFAULT; +err_exit: + kfree(in); + kfree(out); + return err; +} + +static int _rdma_ctrl_reg_cb(struct xsc_bdf_file *file, unsigned int cmd, + struct xsc_ioctl_hdr __user *user_hdr, void *data) +{ + struct xsc_core_device *xdev = file->xdev; + int err; + + switch (cmd) { + case XSC_IOCTL_CMDQ: + err = _rdma_ctrl_ioctl_cmdq(xdev, user_hdr); + break; + case XSC_IOCTL_DRV_GET: + case XSC_IOCTL_DRV_SET: + // TODO refactor to split driver get and set + err = _rdma_ctrl_ioctl_getinfo(xdev, user_hdr); + break; + default: + err = -EFAULT; + break; + } + + return err; +} + +static void _rdma_ctrl_reg_fini(void) +{ + xsc_port_ctrl_cb_dereg(XSC_RDMA_CTRL_NAME); +} + +static int _rdma_ctrl_reg_init(void) +{ + int ret; + + ret = xsc_port_ctrl_cb_reg(XSC_RDMA_CTRL_NAME, _rdma_ctrl_reg_cb, NULL); + if (ret != 0) + pr_err("failed to register port control node for %s\n", XSC_RDMA_CTRL_NAME); + + return ret; +} + +void xsc_rdma_ctrl_fini(void) +{ + _rdma_ctrl_reg_fini(); +} + +int xsc_rdma_ctrl_init(void) +{ + return _rdma_ctrl_reg_init(); +} diff --git a/drivers/infiniband/hw/xsc/xsc_rdma_ctrl.h b/drivers/infiniband/hw/xsc/xsc_rdma_ctrl.h new file mode 100644 index 000000000000..5049377101f9 --- /dev/null +++ b/drivers/infiniband/hw/xsc/xsc_rdma_ctrl.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_RDMA_CTRL_H +#define XSC_RDMA_CTRL_H + +void xsc_rdma_ctrl_fini(void); +int xsc_rdma_ctrl_init(void); + +#endif diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index d6753a9ba00f..268c84e49194 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -86,6 +86,7 @@ source "drivers/net/ethernet/i825xx/Kconfig" source "drivers/net/ethernet/ibm/Kconfig" source "drivers/net/ethernet/intel/Kconfig" source "drivers/net/ethernet/xscale/Kconfig" +source "drivers/net/ethernet/yunsilicon/Kconfig" config JME tristate "JMicron(R) PCI-Express Gigabit Ethernet support" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 5d715f4aff6b..423e9edd6777 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -51,6 +51,7 @@ obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/ obj-$(CONFIG_NET_VENDOR_MICROSOFT) += microsoft/ obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/ +obj-$(CONFIG_NET_VENDOR_YUNSILICON) += yunsilicon/ obj-$(CONFIG_JME) += jme.o obj-$(CONFIG_KORINA) += korina.o obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o diff --git a/drivers/net/ethernet/yunsilicon/Kconfig b/drivers/net/ethernet/yunsilicon/Kconfig new file mode 100644 index 000000000000..a387a8ddeba4 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/Kconfig @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +# All rights reserved. +# Yunsilicon driver configuration +# + +config NET_VENDOR_YUNSILICON + bool "Yunsilicon devices" + default y + depends on PCI || NET + depends on ARM64 || X86_64 + help + If you have a network (Ethernet or RDMA) device belonging to this + class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Yunsilicon devices. If you say Y, you will be + asked for your specific card in the following questions. + +if NET_VENDOR_YUNSILICON + +source "drivers/net/ethernet/yunsilicon/xsc/net/Kconfig" +source "drivers/net/ethernet/yunsilicon/xsc/pci/Kconfig" + +endif # NET_VENDOR_YUNSILICON diff --git a/drivers/net/ethernet/yunsilicon/Makefile b/drivers/net/ethernet/yunsilicon/Makefile new file mode 100644 index 000000000000..0c603d2bf207 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +# All rights reserved. +# Makefile for the Yunsilicon device drivers. +# + +obj-$(CONFIG_YUNSILICON_XSC_ETH) += xsc/net/ +obj-$(CONFIG_YUNSILICON_XSC_PCI) += xsc/pci/ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/cq.h b/drivers/net/ethernet/yunsilicon/xsc/common/cq.h new file mode 100644 index 000000000000..76f0c5064446 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/cq.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_CORE_CQ_H +#define XSC_CORE_CQ_H + +#include +#include "common/driver.h" +#include "common/xsc_hsi.h" +#include "common/xsc_core.h" + +struct xsc_core_cq { + u32 cqn; + int cqe_sz; + u64 arm_db; + u64 ci_db; + struct xsc_core_device *dev; + atomic_t refcount; + struct completion free; + unsigned int vector; + int irqn; + u16 dim_us; + u16 dim_pkts; + void (*comp)(struct xsc_core_cq *cq); + void (*event)(struct xsc_core_cq *cq, enum xsc_event); + u32 cons_index; + unsigned int arm_sn; + struct xsc_rsc_debug *dbg; + int pid; + u32 reg_next_cid; + u32 reg_done_pid; + struct xsc_eq *eq; +}; + +enum { + XSC_CQE_OWNER_MASK = 1, +}; + +enum { + CQE_SIZE_64 = 0, + CQE_SIZE_128 = 1, +}; + +enum { + XSC_CQ_DB_REQ_NOT_SOL = 1, + XSC_CQ_DB_REQ_NOT = 0, +}; + +static inline void xsc_cq_arm(struct xsc_core_cq *cq, u8 solicited) +{ + union xsc_cq_doorbell db; + + db.val = 0; + db.cq_next_cid = cq->cons_index; + db.cq_id = cq->cqn; + db.arm = solicited; + + /* Make sure that the doorbell record in host memory is + * written before ringing the doorbell via PCI MMIO. + */ + wmb(); + writel(db.val, REG_ADDR(cq->dev, cq->arm_db)); +} + +static inline void xsc_cq_set_ci(struct xsc_core_cq *cq) +{ + struct xsc_core_device *xdev = cq->dev; + union xsc_cq_doorbell db; + + db.cq_next_cid = cq->cons_index; + db.cq_id = cq->cqn; + /* ensure write val visable before doorbell */ + wmb(); + + writel(db.val, REG_ADDR(xdev, cq->ci_db)); +} + +int xsc_core_create_cq(struct xsc_core_device *dev, struct xsc_core_cq *cq, + struct xsc_create_cq_mbox_in *in, int inlen); +int xsc_core_destroy_cq(struct xsc_core_device *dev, struct xsc_core_cq *cq); +int xsc_core_query_cq(struct xsc_core_device *dev, struct xsc_core_cq *cq, + struct xsc_query_cq_mbox_out *out); +int xsc_debug_cq_add(struct xsc_core_device *dev, struct xsc_core_cq *cq); +void xsc_debug_cq_remove(struct xsc_core_device *dev, struct xsc_core_cq *cq); + +void xsc_init_cq_table(struct xsc_core_device *dev); +void xsc_cleanup_cq_table(struct xsc_core_device *dev); +#endif /* XSC_CORE_CQ_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/device.h b/drivers/net/ethernet/yunsilicon/xsc/common/device.h new file mode 100644 index 000000000000..1d1b0be09379 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/device.h @@ -0,0 +1,140 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_DEVICE_H +#define XSC_DEVICE_H + +#include +#include + +enum { + XSC_MAX_COMMANDS = 32, + XSC_CMD_DATA_BLOCK_SIZE = 512, + XSC_PCI_CMD_XPORT = 7, +}; + +enum { + XSC_PERM_LOCAL_READ = 1 << 0, + XSC_PERM_LOCAL_WRITE = 1 << 1, + XSC_PERM_REMOTE_READ = 1 << 2, + XSC_PERM_REMOTE_WRITE = 1 << 3, + XSC_PERM_ATOMIC = 1 << 6, + XSC_PERM_UMR_EN = 1 << 7, +}; + +enum { + XSC_ACCESS_MODE_PA = 0, + XSC_ACCESS_MODE_MTT = 1, + XSC_ACCESS_MODE_KLM = 2 +}; + +enum { + XSC_MKEY_REMOTE_INVAL = 1 << 24, + XSC_MKEY_FLAG_SYNC_UMR = 1 << 29, + XSC_MKEY_BSF_EN = 1 << 30, + XSC_MKEY_LEN64 = 1 << 31, +}; + +enum { + XSC_BF_REGS_PER_PAGE = 4, + XSC_MAX_UAR_PAGES = 1 << 8, + XSC_MAX_UUARS = XSC_MAX_UAR_PAGES * XSC_BF_REGS_PER_PAGE, +}; + +enum { + XSC_DEV_CAP_FLAG_RC = 1LL << 0, + XSC_DEV_CAP_FLAG_UC = 1LL << 1, + XSC_DEV_CAP_FLAG_UD = 1LL << 2, + XSC_DEV_CAP_FLAG_XRC = 1LL << 3, + XSC_DEV_CAP_FLAG_SRQ = 1LL << 6, + XSC_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, + XSC_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, + XSC_DEV_CAP_FLAG_APM = 1LL << 17, + XSC_DEV_CAP_FLAG_ATOMIC = 1LL << 18, + XSC_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24, + XSC_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32, + XSC_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38, + XSC_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39, + XSC_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, + XSC_DEV_CAP_FLAG_DCT = 1LL << 41, + XSC_DEV_CAP_FLAG_CMDIF_CSUM = 1LL << 46, +}; + +enum xsc_event { + XSC_EVENT_TYPE_COMP = 0x0, + XSC_EVENT_TYPE_COMM_EST = 0x02,//mad + XSC_EVENT_TYPE_CQ_ERROR = 0x04, + XSC_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, + XSC_EVENT_TYPE_INTERNAL_ERROR = 0x08,//tpe私有err,无IB event对应 + XSC_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,//IBV_EVENT_QP_REQ_ERR + XSC_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,//IBV_EVENT_QP_ACCESS_ERR +}; + +struct xsc_cmd_prot_block { + u8 data[XSC_CMD_DATA_BLOCK_SIZE]; + u8 rsvd0[48]; + __be64 next; + __be32 block_num; + u8 owner_status; //init to 0, dma user should change this val to 1 + u8 token; + u8 ctrl_sig; + u8 sig; +}; + +#define XSC_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) + +enum xsc_traffic_types { + XSC_TT_IPV4, + XSC_TT_IPV4_TCP, + XSC_TT_IPV4_UDP, + XSC_TT_IPV6, + XSC_TT_IPV6_TCP, + XSC_TT_IPV6_UDP, + XSC_TT_IPV4_IPSEC_AH, + XSC_TT_IPV6_IPSEC_AH, + XSC_TT_IPV4_IPSEC_ESP, + XSC_TT_IPV6_IPSEC_ESP, + XSC_TT_ANY, + XSC_NUM_TT, +}; + +#define XSC_NUM_INDIR_TIRS XSC_NUM_TT + +enum { + XSC_HASH_FUNC_XOR = 0, + XSC_HASH_FUNC_TOP = 1, + XSC_HASH_FUNC_TOP_SYM = 2, + XSC_HASH_FUNC_RSV = 3, +}; + +enum { + XSC_L3_PROT_TYPE_IPV4 = 1 << 0, + XSC_L3_PROT_TYPE_IPV6 = 1 << 1, +}; + +enum { + XSC_L4_PROT_TYPE_TCP = 1 << 0, + XSC_L4_PROT_TYPE_UDP = 1 << 1, +}; + +struct xsc_tirc_config { + u8 l3_prot_type; + u8 l4_prot_type; + u32 rx_hash_fields; +}; + +static inline u8 hash_func_type(u8 hash_func) +{ + switch (hash_func) { + case ETH_RSS_HASH_TOP: + return XSC_HASH_FUNC_TOP; + case ETH_RSS_HASH_XOR: + return XSC_HASH_FUNC_XOR; + default: + return XSC_HASH_FUNC_TOP; + } +} + +#endif /* XSC_DEVICE_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/doorbell.h b/drivers/net/ethernet/yunsilicon/xsc/common/doorbell.h new file mode 100644 index 000000000000..6b9fdfb738d8 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/doorbell.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_DOORBELL_H +#define XSC_DOORBELL_H + +#if BITS_PER_LONG == 64 +/* Assume that we can just write a 64-bit doorbell atomically. s390 + * actually doesn't have writeq() but S/390 systems don't even have + * PCI so we won't worry about it. + */ + +#define XSC_DECLARE_DOORBELL_LOCK(name) +#define XSC_INIT_DOORBELL_LOCK(ptr) do { } while (0) +#define XSC_GET_DOORBELL_LOCK(ptr) (NULL) + +static inline void xsc_write64(__be32 val[2], void __iomem *dest, + spinlock_t *doorbell_lock) +{ + __raw_writeq(*(u64 *)val, dest); +} + +#else + +/* Just fall back to a spinlock to protect the doorbell if + * BITS_PER_LONG is 32 -- there's no portable way to do atomic 64-bit + * MMIO writes. + */ + +#define XSC_DECLARE_DOORBELL_LOCK(name) spinlock_t name +#define XSC_INIT_DOORBELL_LOCK(ptr) spin_lock_init(ptr) +#define XSC_GET_DOORBELL_LOCK(ptr) (ptr) + +static inline void xsc_write64(__be32 val[2], void __iomem *dest, + spinlock_t *doorbell_lock) +{ + unsigned long flags; + + spin_lock_irqsave(doorbell_lock, flags); + __raw_writel((__force u32)val[0], dest); + __raw_writel((__force u32)val[1], dest + 4); + spin_unlock_irqrestore(doorbell_lock, flags); +} + +#endif + +#endif /* XSC_DOORBELL_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/driver.h b/drivers/net/ethernet/yunsilicon/xsc/common/driver.h new file mode 100644 index 000000000000..03705978a85a --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/driver.h @@ -0,0 +1,341 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_DRIVER_H +#define XSC_DRIVER_H + +#include +#include +#include +#include +#include +#include +#include +#include "common/device.h" +#include "common/doorbell.h" +#include "common/xsc_core.h" +#include "common/xsc_cmd.h" +#include "common/xsc_hsi.h" +#include "common/qpts.h" + +#define LS_64(val, field) (((u64)(val) << field ## _SHIFT) & (field ## _MASK)) +#define RS_64(val, field) ((u64)((val) & field ## _MASK) >> field ## _SHIFT) +#define LS_32(val, field) (((val) << field ## _SHIFT) & (field ## _MASK)) +#define RS_32(val, field) (((val) & field ## _MASK) >> field ## _SHIFT) + +enum { + CMD_OWNER_SW = 0x0, + CMD_OWNER_HW = 0x1, + CMD_STATUS_SUCCESS = 0, +}; + +enum { + XSC_MAX_FW_PORTS = 1, +}; + +enum { + XSC_MAX_IRQ_NAME = 32 +}; + +enum { + XSC_MAX_EQ_NAME = 20 +}; + +enum { + XSC_REG_PCAP = 0x5001, + XSC_REG_PMTU = 0x5003, + XSC_REG_PTYS = 0x5004, + XSC_REG_PAOS = 0x5006, + XSC_REG_PMAOS = 0x5012, + XSC_REG_PUDE = 0x5009, + XSC_REG_PMPE = 0x5010, + XSC_REG_PELC = 0x500e, + XSC_REG_PMLP = 0, /* TBD */ + XSC_REG_NODE_DESC = 0x6001, + XSC_REG_HOST_ENDIANNESS = 0x7004, + XSC_REG_MCIA = 0x9014, +}; + +enum dbg_rsc_type { + XSC_DBG_RSC_QP, + XSC_DBG_RSC_EQ, + XSC_DBG_RSC_CQ, +}; + +struct xsc_field_desc { + struct dentry *dent; + int i; +}; + +struct xsc_rsc_debug { + struct xsc_core_device *xdev; + void *object; + enum dbg_rsc_type type; + struct dentry *root; + struct xsc_field_desc fields[]; +}; + +struct xsc_buf_list { + void *buf; + dma_addr_t map; +}; + +struct xsc_buf { + struct xsc_buf_list direct; + struct xsc_buf_list *page_list; + int nbufs; + int npages; + int page_shift; + int size; +}; + +struct xsc_frag_buf { + struct xsc_buf_list *frags; + int npages; + int size; + u8 page_shift; +}; + +struct xsc_frag_buf_ctrl { + struct xsc_buf_list *frags; + u32 sz_m1; + u16 frag_sz_m1; + u16 strides_offset; + u8 log_sz; + u8 log_stride; + u8 log_frag_strides; +}; + +struct xsc_cq_table { + /* protect radix tree + */ + spinlock_t lock; + struct radix_tree_root tree; +}; + +struct xsc_eq { + struct xsc_core_device *dev; + struct xsc_cq_table cq_table; + u32 doorbell;//offset from bar0/2 space start + u32 cons_index; + struct xsc_buf buf; + int size; + unsigned int irqn; + u16 eqn; + int nent; + cpumask_var_t mask; + char name[XSC_MAX_EQ_NAME]; + struct list_head list; + int index; + struct xsc_rsc_debug *dbg; +}; + +struct xsc_core_mr { + u64 iova; + u64 size; + u32 key; + u32 pd; + u32 access; +}; + +struct xsc_eq_table { + void __iomem *update_ci; + void __iomem *update_arm_ci; + struct list_head comp_eqs_list; + struct xsc_eq pages_eq; + struct xsc_eq async_eq; + struct xsc_eq cmd_eq; + int num_comp_vectors; + int eq_vec_comp_base; + /* protect EQs list + */ + spinlock_t lock; +}; + +struct xsc_irq_info { + cpumask_var_t mask; + char name[XSC_MAX_IRQ_NAME]; +}; + +struct xsc_qp_table { + /* protect radix tree + */ + spinlock_t lock; + struct radix_tree_root tree; +}; + +struct counter_name_map { + int index; + const char *reg_name; +}; + +struct counter_reg_map { + int index; + int reg_addr; +}; + +struct xsc_dev_resource { + struct xsc_qp_table qp_table; + struct xsc_cq_table cq_table; + struct xsc_eq_table eq_table; + struct xsc_irq_info *irq_info; + spinlock_t mkey_lock; /* protect mkey */ + u8 mkey_key; + struct mutex alloc_mutex; /* protect buffer alocation according to numa node */ + int numa_node; + int fw_pages; + int reg_pages; + struct mutex pgdir_mutex; /* protect pgdir_list */ + struct list_head pgdir_list; + struct dentry *qp_debugfs; + struct dentry *eq_debugfs; + struct dentry *cq_debugfs; + struct dentry *cmdif_debugfs; + struct dentry *qptrace_debugfs; + struct dentry *dbg_root; +}; + +struct xsc_db { + __be32 *db; + union { + struct xsc_db_pgdir *pgdir; + struct xsc_ib_user_db_page *user_page; + } u; + dma_addr_t dma; + int index; +}; + +enum { + XSC_COMP_EQ_SIZE = 1024, +}; + +/*replace by struct define in ofed*/ +struct xsc_db_pgdir { + struct list_head list; + unsigned long *bitmap; + __be32 *db_page; + dma_addr_t db_dma; +}; + +static inline void *xsc_buf_offset(struct xsc_buf *buf, int offset) +{ + if (likely(BITS_PER_LONG == 64 || buf->nbufs == 1)) + return buf->direct.buf + offset; + else + return buf->page_list[offset >> PAGE_SHIFT].buf + + (offset & (PAGE_SIZE - 1)); +} + +static inline struct xsc_core_device *pci2xdev(struct pci_dev *pdev) +{ + return pci_get_drvdata(pdev); +} + +extern struct dentry *xsc_debugfs_root; + +static inline void *xsc_vzalloc(unsigned long size) +{ + void *rtn; + + rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); + if (!rtn) + rtn = vzalloc(size); + return rtn; +} + +static inline void xsc_vfree(const void *addr) +{ + if (addr && is_vmalloc_addr(addr)) + vfree(addr); + else + kfree(addr); +} + +int xsc_dev_init(struct xsc_core_device *xdev); +void xsc_dev_cleanup(struct xsc_core_device *xdev); +int xsc_cmd_init(struct xsc_core_device *xdev); +void xsc_cmd_cleanup(struct xsc_core_device *xdev); +void xsc_cmd_use_events(struct xsc_core_device *xdev); +void xsc_cmd_use_polling(struct xsc_core_device *xdev); +int xsc_cmd_err_handler(struct xsc_core_device *xdev); +void xsc_cmd_resp_handler(struct xsc_core_device *xdev); +int xsc_cmd_status_to_err(struct xsc_outbox_hdr *hdr); +int _xsc_cmd_exec(struct xsc_core_device *xdev, void *in, int in_size, void *out, + int out_size); +int xsc_buf_alloc(struct xsc_core_device *xdev, int size, int max_direct, + struct xsc_buf *buf); +void xsc_buf_free(struct xsc_core_device *dev, struct xsc_buf *buf); +int xsc_core_create_mkey(struct xsc_core_device *dev, struct xsc_core_mr *mr); +int xsc_core_destroy_mkey(struct xsc_core_device *dev, struct xsc_core_mr *mr); +int xsc_core_register_mr(struct xsc_core_device *dev, struct xsc_core_mr *mr, + struct xsc_register_mr_mbox_in *in, int inlen); +int xsc_core_dereg_mr(struct xsc_core_device *dev, struct xsc_core_mr *mr); +void xsc_reg_local_dma_mr(struct xsc_core_device *dev); +int xsc_core_alloc_pd(struct xsc_core_device *xdev, u32 *pdn); +int xsc_core_dealloc_pd(struct xsc_core_device *xdev, u32 pdn); +void xsc_register_debugfs(void); +void xsc_unregister_debugfs(void); +int xsc_eq_init(struct xsc_core_device *dev); +void xsc_eq_cleanup(struct xsc_core_device *dev); +struct xsc_eq *xsc_eq_get(struct xsc_core_device *dev, int index); + +void xsc_fill_page_array(struct xsc_buf *buf, __be64 *pas, int npages); +void xsc_fill_page_frag_array(struct xsc_frag_buf *buf, __be64 *pas, int npages); +void xsc_qp_event(struct xsc_core_device *xdev, u32 qpn, int event_type); +int xsc_vector2eqn(struct xsc_core_device *dev, int vector, int *eqn, + unsigned int *irqn); +void xsc_cq_event(struct xsc_core_device *xdev, u32 cqn, int event_type); +int xsc_create_map_eq(struct xsc_core_device *dev, struct xsc_eq *eq, u8 vecidx, + int nent, const char *name); +int xsc_destroy_unmap_eq(struct xsc_core_device *dev, struct xsc_eq *eq); +int xsc_start_eqs(struct xsc_core_device *dev); +void xsc_stop_eqs(struct xsc_core_device *dev); + +int xsc_qp_debugfs_init(struct xsc_core_device *dev); +void xsc_qp_debugfs_cleanup(struct xsc_core_device *dev); +int xsc_core_access_reg(struct xsc_core_device *xdev, void *data_in, + int size_in, void *data_out, int size_out, + u16 reg_num, int arg, int write); +int xsc_set_port_caps(struct xsc_core_device *xdev, int port_num, u32 caps); + +int xsc_debug_eq_add(struct xsc_core_device *xdev, struct xsc_eq *eq); +void xsc_debug_eq_remove(struct xsc_core_device *xdev, struct xsc_eq *eq); +int xsc_core_eq_query(struct xsc_core_device *dev, struct xsc_eq *eq, + struct xsc_query_eq_mbox_out *out, int outlen); +int xsc_eq_debugfs_init(struct xsc_core_device *dev); +void xsc_eq_debugfs_cleanup(struct xsc_core_device *dev); +int xsc_cq_debugfs_init(struct xsc_core_device *dev); +void xsc_cq_debugfs_cleanup(struct xsc_core_device *dev); + +const char *xsc_command_str(int command); +int xsc_cmdif_debugfs_init(struct xsc_core_device *xdev); +void xsc_cmdif_debugfs_cleanup(struct xsc_core_device *xdev); + +int xsc_qptrace_debugfs_init(struct xsc_core_device *dev); +void xsc_qptrace_debugfs_cleanup(struct xsc_core_device *dev); + +int xsc_db_alloc_node(struct xsc_core_device *xdev, struct xsc_db *db, int node); +int xsc_frag_buf_alloc_node(struct xsc_core_device *xdev, int size, + struct xsc_frag_buf *buf, int node); +void xsc_db_free(struct xsc_core_device *xdev, struct xsc_db *db); +void xsc_frag_buf_free(struct xsc_core_device *xdev, struct xsc_frag_buf *buf); + +static inline u32 xsc_mkey_to_idx(u32 mkey) +{ + return mkey >> ((MMC_MPT_TBL_MEM_DEPTH == 32768) ? 17 : 18); +} + +static inline u32 xsc_idx_to_mkey(u32 mkey_idx) +{ + return mkey_idx << ((MMC_MPT_TBL_MEM_DEPTH == 32768) ? 17 : 18); +} + +enum { + XSC_PROF_MASK_QP_SIZE = (u64)1 << 0, + XSC_PROF_MASK_CMDIF_CSUM = (u64)1 << 1, + XSC_PROF_MASK_MR_CACHE = (u64)1 << 2, +}; + +#endif /* XSC_DRIVER_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/port.h b/drivers/net/ethernet/yunsilicon/xsc/common/port.h new file mode 100644 index 000000000000..a44af6c88c06 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/port.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __XSC_PORT_H__ +#define __XSC_PORT_H__ + +enum xsc_module_id { + XSC_MODULE_ID_SFP = 0x3, + XSC_MODULE_ID_QSFP = 0xC, + XSC_MODULE_ID_QSFP_PLUS = 0xD, + XSC_MODULE_ID_QSFP28 = 0x11, + XSC_MODULE_ID_QSFP_DD = 0x18, + XSC_MODULE_ID_DSFP = 0x1B, + XSC_MODULE_ID_QSFP_PLUS_CMIS = 0x1E, +}; + +#define XSC_EEPROM_MAX_BYTES 32 +#define XSC_EEPROM_IDENTIFIER_BYTE_MASK 0x000000ff +#define XSC_I2C_ADDR_LOW 0x50 +#define XSC_I2C_ADDR_HIGH 0x51 +#define XSC_EEPROM_PAGE_LENGTH 256 +#define XSC_EEPROM_HIGH_PAGE_LENGTH 128 + +struct xsc_module_eeprom_query_params { + u16 size; + u16 offset; + u16 i2c_address; + u32 page; + u32 bank; + u32 module_number; +}; + +int xsc_query_module_eeprom(struct xsc_core_device *dev, + u16 offset, u16 size, u8 *data); +int xsc_query_module_eeprom_by_page(struct xsc_core_device *dev, + struct xsc_module_eeprom_query_params *params, + u8 *data); +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/qp.h b/drivers/net/ethernet/yunsilicon/xsc/common/qp.h new file mode 100644 index 000000000000..fd3d6ee4a8df --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/qp.h @@ -0,0 +1,198 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_QP_H +#define XSC_QP_H + +#include "common/xsc_hsi.h" +#include "common/device.h" +#include "common/driver.h" + +enum { + XSC_QP_PM_MIGRATED = 0x3, + XSC_QP_PM_ARMED = 0x0, + XSC_QP_PM_REARM = 0x1 +}; + +enum { + XSC_WQE_CTRL_CQ_UPDATE = 2 << 2, + XSC_WQE_CTRL_SOLICITED = 1 << 1, +}; + +struct xsc_send_wqe_ctrl_seg { + __le32 msg_opcode:8; + __le32 with_immdt:1; + __le32 csum_en:2; + __le32 ds_data_num:5; + __le32 wqe_id:16; + __le32 msg_len; + union { + __le32 opcode_data; + struct { + u8 has_pph:1; + u8 so_type:1; + __le16 so_data_size:14; + u8:8; + u8 so_hdr_len:8; + }; + struct { + __le16 desc_id; + __le16 is_last_wqe:1; + __le16 dst_qp_id:15; + }; + }; + __le32 se:1; + __le32 ce:1; + __le32:30; +}; + +struct xsc_wqe_data_seg { + union { + __le32 in_line:1; + struct { + __le32:1; + __le32 seg_len:31; + __le32 mkey; + __le64 va; + }; + struct { + __le32:1; + __le32 len:7; + u8 in_line_data[15]; + }; + }; +}; + +struct xsc_wqe_ctrl_seg_2 { + __be32 opmod_idx_opcode; + __be32 qpn_ds; + u8 signature; + u8 rsvd[2]; + u8 fm_ce_se; + __be32 imm; +}; + +struct xsc_av { + union { + struct { + __be32 qkey; + __be32 reserved; + } qkey; + __be64 dc_key; + } key; + __be32 dqp_dct; + u8 stat_rate_sl; + u8 fl_mlid; + union { + __be16 rlid; + __be16 udp_sport; + }; + u8 reserved0[4]; + u8 rmac[6]; + u8 tclass; + u8 hop_limit; + __be32 grh_gid_fl; + u8 rgid[16]; +}; + +struct xsc_wqe_data_seg_2 { + __be32 byte_count; + __be32 lkey; + __be64 addr; +}; + +struct xsc_core_qp { + void (*event)(struct xsc_core_qp *qp, int type); + int qpn; + atomic_t refcount; + struct completion free; + struct xsc_rsc_debug *dbg; + int pid; + u16 qp_type; + u16 eth_queue_type; + struct dentry *trace; + struct xsc_qp_trace *trace_info; + u16 qp_type_internal; + u16 grp_id; + u8 mac_id; +}; + +struct xsc_qp_rsc { + struct list_head node; + u32 qpn; + struct completion delayed_release; + struct xsc_core_device *xdev; +}; + +struct xsc_qp_path { + u8 fl; + u8 rsvd3; + u8 free_ar; + u8 pkey_index; + u8 rsvd0; + u8 grh_mlid; + __be16 rlid; + u8 ackto_lt; + u8 mgid_index; + u8 static_rate; + u8 hop_limit; + __be32 tclass_flowlabel; + u8 rgid[16]; + u8 rsvd1[4]; + u8 sl; + u8 port; + u8 rsvd2[6]; + u8 dmac[6]; + u8 smac[6]; + __be16 af_type; + __be32 sip[4]; + __be32 dip[4]; + __be16 sport; + u8 ecn_dscp; + u8 vlan_valid; + __be16 vlan_id; + u8 dci_cfi_prio_sl; //not left moved yet. +}; + +static inline struct xsc_core_qp *__xsc_qp_lookup(struct xsc_core_device *xdev, u32 qpn) +{ + return radix_tree_lookup(&xdev->dev_res->qp_table.tree, qpn); +} + +int create_resource_common(struct xsc_core_device *xdev, + struct xsc_core_qp *qp); +void destroy_resource_common(struct xsc_core_device *xdev, + struct xsc_core_qp *qp); + +int xsc_core_create_qp(struct xsc_core_device *xdev, + struct xsc_core_qp *qp, + struct xsc_create_qp_mbox_in *in, + int inlen); +int xsc_core_qp_modify(struct xsc_core_device *xdev, enum xsc_qp_state cur_state, + enum xsc_qp_state new_state, + struct xsc_modify_qp_mbox_in *in, int sqd_event, + struct xsc_core_qp *qp); +int xsc_core_destroy_qp(struct xsc_core_device *xdev, + struct xsc_core_qp *qp); +int xsc_core_qp_query(struct xsc_core_device *xdev, struct xsc_core_qp *qp, + struct xsc_query_qp_mbox_out *out, int outlen); + +void xsc_init_qp_table(struct xsc_core_device *xdev); +void xsc_cleanup_qp_table(struct xsc_core_device *xdev); +int xsc_debug_qp_add(struct xsc_core_device *xdev, struct xsc_core_qp *qp); +void xsc_debug_qp_remove(struct xsc_core_device *xdev, struct xsc_core_qp *qp); + +int xsc_create_qptrace(struct xsc_core_device *xdev, struct xsc_core_qp *qp); +void xsc_remove_qptrace(struct xsc_core_device *xdev, struct xsc_core_qp *qp); + +void xsc_init_delayed_release(void); +void xsc_stop_delayed_release(void); + +int xsc_modify_qp(struct xsc_core_device *xdev, + struct xsc_modify_qp_mbox_in *in, + struct xsc_modify_qp_mbox_out *out, + u32 qpn, u16 status); + +#endif /* XSC_QP_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/qpts.h b/drivers/net/ethernet/yunsilicon/xsc/common/qpts.h new file mode 100644 index 000000000000..57eb829f811b --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/qpts.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __QPTS_H__ +#define __QPTS_H__ + +struct __packed xsc_qp_trace { + u16 main_ver; + u16 sub_ver; + u32 pid; + u16 qp_type; + u16 af_type; + union { + u32 s_addr4; + u8 s_addr6[16]; + } s_addr; + union { + u32 d_addr4; + u8 d_addr6[16]; + } d_addr; + u16 s_port; + u16 d_port; + u32 affinity_idx; + u64 timestamp; + u32 lqpn; + u32 rqpn; +}; + +struct __packed qpt_update_affinity { + u32 aff_new; + u32 aff_old; +}; + +struct __packed qpt_update_sport { + u16 port_new; + u16 port_old; +}; + +struct __packed qpt_update_data { + u64 timestamp; + u32 qpn; + u32 bus; + u32 dev; + u32 fun; + union { + struct qpt_update_affinity affinity; + struct qpt_update_sport sport; + } update; +}; + +struct __packed xsc_qpt_update_msg { + u16 main_ver; + u16 sub_ver; + u32 type; //0:UPDATE_TYPE_SPORT; 1:UPDATE_TYPE_AFFINITY + struct qpt_update_data data; +}; + +enum { + YS_QPTRACE_UPDATE_TYPE_SPORT = 0, + YS_QPTRACE_UPDATE_TYPE_AFFINITY, +}; + +#define YS_QPTRACE_VER_MAJOR 2 +#define YS_QPTRACE_VER_MINOR 0 + +int qpts_init(void); +void qpts_fini(void); +int qpts_write_one_msg(struct xsc_qpt_update_msg *msg); + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/res_obj.h b/drivers/net/ethernet/yunsilicon/xsc/common/res_obj.h new file mode 100644 index 000000000000..d259d69f2211 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/res_obj.h @@ -0,0 +1,122 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef RES_OBJ_H +#define RES_OBJ_H + +#include +#include +#include "common/xsc_core.h" + +struct xsc_res_obj { + struct list_head node; + struct xsc_bdf_file *file; + void (*release_method)(void *obj); + char *data; + unsigned int datalen; +}; + +struct xsc_pd_obj { + struct xsc_res_obj obj; + unsigned int pdn; +}; + +struct xsc_mr_obj { + struct xsc_res_obj obj; + unsigned int mkey; +}; + +struct xsc_cq_obj { + struct xsc_res_obj obj; + unsigned int cqn; +}; + +struct xsc_qp_obj { + struct xsc_res_obj obj; + unsigned int qpn; +}; + +struct xsc_pct_obj { + struct xsc_res_obj obj; + unsigned int pct_idx; +}; + +struct xsc_wct_obj { + struct xsc_res_obj obj; + unsigned int wct_idx; +}; + +struct xsc_em_obj { + struct xsc_res_obj obj; + unsigned int em_idx[54]; +}; + +struct xsc_flow_pct_v4_add { + char key[44]; + char mask[44]; + char ad[6]; + unsigned int priority; +}; + +struct xsc_flow_pct_v4_del { + char key[44]; + char mask[44]; + unsigned int priority; +}; + +struct xsc_flow_pct_v6_add { + char key[44]; + char mask[44]; + char ad[6]; + unsigned int priority; +}; + +struct xsc_flow_pct_v6_del { + char key[44]; + char mask[44]; + unsigned int priority; +}; + +enum RES_OBJ_TYPE { + RES_OBJ_PD, + RES_OBJ_MR, + RES_OBJ_CQ, + RES_OBJ_QP, + RES_OBJ_PCT, + RES_OBJ_WCT, + RES_OBJ_EM, + RES_OBJ_MAX +}; + +static inline unsigned long xsc_idx_to_key(unsigned int obj_type, unsigned int idx) +{ + return ((unsigned long)obj_type << 32) | idx; +} + +int xsc_alloc_pd_obj(struct xsc_bdf_file *file, unsigned int pdn, + char *data, unsigned int datalen); +void xsc_destroy_pd_obj(struct xsc_bdf_file *file, unsigned int pdn); + +int xsc_alloc_mr_obj(struct xsc_bdf_file *file, unsigned int mkey, + char *data, unsigned int datalen); +void xsc_destroy_mr_obj(struct xsc_bdf_file *file, unsigned int mkey); + +int xsc_alloc_cq_obj(struct xsc_bdf_file *file, unsigned int cqn, + char *data, unsigned int datalen); +void xsc_destroy_cq_obj(struct xsc_bdf_file *file, unsigned int cqn); + +int xsc_alloc_qp_obj(struct xsc_bdf_file *file, unsigned int qpn, + char *data, unsigned int datalen); +void xsc_destroy_qp_obj(struct xsc_bdf_file *file, unsigned int qpn); + +int xsc_alloc_pct_obj(struct xsc_bdf_file *file, unsigned int priority, + char *data, unsigned int datalen); +void xsc_destroy_pct_obj(struct xsc_bdf_file *file, unsigned int priority); + +void xsc_close_bdf_file(struct xsc_bdf_file *file); + +void xsc_send_cmd_2rst_qp(struct xsc_core_device *xdev, unsigned int qpn); + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/version.h b/drivers/net/ethernet/yunsilicon/xsc/common/version.h new file mode 100644 index 000000000000..8c7c6e03f5a1 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/version.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#define BRANCH_VERSION 1 +#define MAJOR_VERSION 2 +#define MINOR_VERSION 0 +#define BUILD_VERSION 367 +#define HOTFIX_NUM 446 diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/vport.h b/drivers/net/ethernet/yunsilicon/xsc/common/vport.h new file mode 100644 index 000000000000..dad39f12e265 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/vport.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_VPORT_H +#define XSC_VPORT_H + +#include "common/xsc_core.h" +#include +#include "common/xsc_fs.h" + +#define XSC_VPORT_PF_PLACEHOLDER (1u) +#define XSC_VPORT_UPLINK_PLACEHOLDER (1u) +#define XSC_VPORT_ECPF_PLACEHOLDER(dev) (xsc_ecpf_vport_exists(dev) || \ + xsc_core_is_ecpf_esw_manager(dev)) + +#define XSC_SPECIAL_VPORTS(dev) (XSC_VPORT_PF_PLACEHOLDER + \ + XSC_VPORT_UPLINK_PLACEHOLDER + \ + XSC_VPORT_ECPF_PLACEHOLDER(dev)) + +#define XSC_VPORT_MANAGER(dev) (xsc_core_is_vport_manager(dev)) + +enum { + XSC_CAP_INLINE_MODE_L2, + XSC_CAP_INLINE_MODE_VPORT_CONTEXT, + XSC_CAP_INLINE_MODE_NOT_REQUIRED, +}; + +/* Vport number for each function must keep unchanged */ +enum { + XSC_VPORT_PF = 0x0, + XSC_VPORT_FIRST_VF = 0x1, + XSC_VPORT_ECPF = 0xfffe, + XSC_VPORT_UPLINK = 0xffff, +}; + +enum { + XSC_VPORT_ADMIN_STATE_DOWN = 0x0, + XSC_VPORT_ADMIN_STATE_UP = 0x1, + XSC_VPORT_ADMIN_STATE_AUTO = 0x2, +}; + +u8 xsc_query_vport_state(struct xsc_core_device *dev, u16 opmod, u16 vport); +int xsc_modify_vport_admin_state(struct xsc_core_device *dev, u16 opmod, + u16 vport, u8 other_vport, u8 state); +int xsc_query_nic_vport_mac_address(struct xsc_core_device *dev, + u16 vport, u8 *addr); +int xsc_query_other_nic_vport_mac_address(struct xsc_core_device *dev, + u16 vport, u8 *addr); +int xsc_query_nic_vport_min_inline(struct xsc_core_device *dev, + u16 vport, u8 *min_inline); +void xsc_query_min_inline(struct xsc_core_device *dev, u8 *min_inline); +int xsc_modify_nic_vport_min_inline(struct xsc_core_device *dev, + u16 vport, u8 min_inline); +int xsc_modify_nic_vport_mac_address(struct xsc_core_device *dev, + u16 vport, u8 *addr, bool perm_mac); +int xsc_modify_other_nic_vport_mac_address(struct xsc_core_device *dev, + u16 vport, u8 *addr, bool perm_mac); +int xsc_query_nic_vport_mtu(struct xsc_core_device *dev, u16 *mtu); +int xsc_modify_nic_vport_mtu(struct xsc_core_device *dev, u16 mtu); +int xsc_query_nic_vport_system_image_guid(struct xsc_core_device *dev, + u64 *system_image_guid); +int xsc_query_nic_vport_node_guid(struct xsc_core_device *dev, u32 vport, + u64 *node_guid); +int xsc_modify_nic_vport_node_guid(struct xsc_core_device *dev, + u16 vport, u64 node_guid); +int xsc_modify_other_nic_vport_node_guid(struct xsc_core_device *dev, + u16 vport, u64 node_guid); +int xsc_query_nic_vport_qkey_viol_cntr(struct xsc_core_device *dev, + u16 *qkey_viol_cntr); +int xsc_query_hca_vport_gid(struct xsc_core_device *dev, u8 other_vport, + u8 port_num, u16 vf_num, u16 gid_index, + union ib_gid *gid); +int xsc_query_hca_vport_pkey(struct xsc_core_device *dev, u8 other_vport, + u8 port_num, u16 vf_num, u16 pkey_index, + u16 *pkey); +int xsc_query_hca_vport_context(struct xsc_core_device *dev, + u8 other_vport, u8 port_num, + u16 vf_num, + struct xsc_hca_vport_context *rep); +int xsc_query_hca_vport_node_guid(struct xsc_core_device *dev, + u64 *node_guid); +int xsc_query_nic_vport_mac_list(struct xsc_core_device *dev, + u16 vport, + enum xsc_list_type list_type, + u8 addr_list[][ETH_ALEN], + int *list_size); +int xsc_modify_nic_vport_mac_list(struct xsc_core_device *dev, + enum xsc_list_type list_type, + u8 addr_list[][ETH_ALEN], + int list_size); +int xsc_query_nic_vport_promisc(struct xsc_core_device *dev, + u16 vport, + int *promisc, + int *allmcast); +int xsc_modify_nic_vport_promisc(struct xsc_core_device *dev, + bool allmcast_flag, bool promisc_flag, + int allmcast, int promisc); +int xsc_modify_nic_vport_spoofchk(struct xsc_core_device *dev, + u16 vport, int spoofchk); +int xsc_modify_nic_vport_trust(struct xsc_core_device *dev, + u16 vport, bool trust); +int xsc_query_nic_vport_vlans(struct xsc_core_device *dev, u32 vport, + unsigned long *vlans); +int xsc_modify_nic_vport_vlans(struct xsc_core_device *dev, + u16 vid, bool add); +int xsc_query_vport_down_stats(struct xsc_core_device *dev, u16 vport, + u8 other_vport, u64 *rx_discard_vport_down, + u64 *tx_discard_vport_down); +int xsc_query_vport_counter(struct xsc_core_device *dev, u8 other_vport, + int vf, u8 port_num, void *out, + size_t out_sz); +int xsc_modify_hca_vport_context(struct xsc_core_device *dev, + u8 other_vport, u8 port_num, + int vf, + struct xsc_hca_vport_context *req); +int xsc_modify_vport_max_rate(struct xsc_core_device *dev, + u16 vport, u32 rate); + +u16 xsc_eswitch_get_total_vports(const struct xsc_core_device *dev); +int xsc_modify_nic_vport_context(struct xsc_core_device *dev, void *in, + int inlen); +int __xsc_query_nic_vport_context(struct xsc_core_device *dev, + u16 vport, void *out, int outlen, + int force_other); +#endif /* XSC_VPORT_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_auto_hw.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_auto_hw.h new file mode 100644 index 000000000000..4864cb747cde --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_auto_hw.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ +/* generated time: + * Thu Feb 29 15:33:50 CST 2024 + */ + +#ifndef XSC_HW_H +#define XSC_HW_H + +//hif_irq_csr_defines.h +#define HIF_IRQ_TBL2IRQ_TBL_RD_DONE_INT_MSIX_REG_ADDR 0xa1100070 + +//hif_cpm_csr_defines.h +#define HIF_CPM_LOCK_GET_REG_ADDR 0xa0000104 +#define HIF_CPM_LOCK_PUT_REG_ADDR 0xa0000108 +#define HIF_CPM_LOCK_AVAIL_REG_ADDR 0xa000010c +#define HIF_CPM_IDA_DATA_MEM_ADDR 0xa0000800 +#define HIF_CPM_IDA_CMD_REG_ADDR 0xa0000020 +#define HIF_CPM_IDA_ADDR_REG_ADDR 0xa0000080 +#define HIF_CPM_IDA_BUSY_REG_ADDR 0xa0000100 +#define HIF_CPM_IDA_CMD_REG_IDA_IDX_WIDTH 5 +#define HIF_CPM_IDA_CMD_REG_IDA_LEN_WIDTH 4 +#define HIF_CPM_IDA_CMD_REG_IDA_R0W1_WIDTH 1 +#define HIF_CPM_LOCK_GET_REG_LOCK_VLD_SHIFT 5 +#define HIF_CPM_LOCK_GET_REG_LOCK_IDX_MASK 0x1f +#define HIF_CPM_IDA_ADDR_REG_STRIDE 0x4 +#define HIF_CPM_CHIP_VERSION_H_REG_ADDR 0xa0000010 + +//mmc_csr_defines.h +#define MMC_MPT_TBL_MEM_DEPTH 32768 +#define MMC_MTT_TBL_MEM_DEPTH 262144 +#define MMC_MPT_TBL_MEM_WIDTH 256 +#define MMC_MTT_TBL_MEM_WIDTH 64 +#define MMC_MPT_TBL_MEM_ADDR 0xa4100000 +#define MMC_MTT_TBL_MEM_ADDR 0xa4200000 + +//clsf_dma_csr_defines.h +#define CLSF_DMA_DMA_UL_BUSY_REG_ADDR 0xa6010048 +#define CLSF_DMA_DMA_DL_DONE_REG_ADDR 0xa60100d0 +#define CLSF_DMA_DMA_DL_SUCCESS_REG_ADDR 0xa60100c0 +#define CLSF_DMA_ERR_CODE_CLR_REG_ADDR 0xa60100d4 +#define CLSF_DMA_DMA_RD_TABLE_ID_REG_DMA_RD_TBL_ID_MASK 0x7f +#define CLSF_DMA_DMA_RD_TABLE_ID_REG_ADDR 0xa6010020 +#define CLSF_DMA_DMA_RD_ADDR_REG_DMA_RD_BURST_NUM_SHIFT 16 +#define CLSF_DMA_DMA_RD_ADDR_REG_ADDR 0xa6010024 +#define CLSF_DMA_INDRW_RD_START_REG_ADDR 0xa6010028 + +//hif_tbl_csr_defines.h +#define HIF_TBL_TBL_DL_BUSY_REG_ADDR 0xa1060030 +#define HIF_TBL_TBL_DL_REQ_REG_TBL_DL_LEN_SHIFT 12 +#define HIF_TBL_TBL_DL_REQ_REG_TBL_DL_HOST_ID_SHIFT 11 +#define HIF_TBL_TBL_DL_REQ_REG_ADDR 0xa1060020 +#define HIF_TBL_TBL_DL_ADDR_L_REG_TBL_DL_ADDR_L_MASK 0xffffffff +#define HIF_TBL_TBL_DL_ADDR_L_REG_ADDR 0xa1060024 +#define HIF_TBL_TBL_DL_ADDR_H_REG_TBL_DL_ADDR_H_MASK 0xffffffff +#define HIF_TBL_TBL_DL_ADDR_H_REG_ADDR 0xa1060028 +#define HIF_TBL_TBL_DL_START_REG_ADDR 0xa106002c +#define HIF_TBL_TBL_UL_REQ_REG_TBL_UL_HOST_ID_SHIFT 11 +#define HIF_TBL_TBL_UL_REQ_REG_ADDR 0xa106007c +#define HIF_TBL_TBL_UL_ADDR_L_REG_TBL_UL_ADDR_L_MASK 0xffffffff +#define HIF_TBL_TBL_UL_ADDR_L_REG_ADDR 0xa1060080 +#define HIF_TBL_TBL_UL_ADDR_H_REG_TBL_UL_ADDR_H_MASK 0xffffffff +#define HIF_TBL_TBL_UL_ADDR_H_REG_ADDR 0xa1060084 +#define HIF_TBL_TBL_UL_START_REG_ADDR 0xa1060088 +#define HIF_TBL_MSG_RDY_REG_ADDR 0xa1060044 + +//hif_cmdqm_csr_defines.h +#define HIF_CMDQM_HOST_REQ_PID_MEM_ADDR 0xa1026000 +#define HIF_CMDQM_HOST_REQ_CID_MEM_ADDR 0xa1028000 +#define HIF_CMDQM_HOST_RSP_PID_MEM_ADDR 0xa102e000 +#define HIF_CMDQM_HOST_RSP_CID_MEM_ADDR 0xa1030000 +#define HIF_CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_ADDR 0xa1022000 +#define HIF_CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_ADDR 0xa1024000 +#define HIF_CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_ADDR 0xa102a000 +#define HIF_CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_ADDR 0xa102c000 +#define HIF_CMDQM_VECTOR_ID_MEM_ADDR 0xa1034000 +#define HIF_CMDQM_Q_ELEMENT_SZ_REG_ADDR 0xa1020020 +#define HIF_CMDQM_HOST_Q_DEPTH_REG_ADDR 0xa1020028 +#define HIF_CMDQM_HOST_VF_ERR_STS_MEM_ADDR 0xa1032000 + +//PSV use +//hif_irq_csr_defines.h +#define HIF_IRQ_CONTROL_TBL_MEM_ADDR 0xa1102000 +#define HIF_IRQ_INT_DB_REG_ADDR 0xa11000b4 +#define HIF_IRQ_CFG_VECTOR_TABLE_BUSY_REG_ADDR 0xa1100114 +#define HIF_IRQ_CFG_VECTOR_TABLE_ADDR_REG_ADDR 0xa11000f0 +#define HIF_IRQ_CFG_VECTOR_TABLE_CMD_REG_ADDR 0xa11000ec +#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_LADDR_REG_ADDR 0xa11000f4 +#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_UADDR_REG_ADDR 0xa11000f8 +#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_DATA_REG_ADDR 0xa11000fc +#define HIF_IRQ_CFG_VECTOR_TABLE_CTRL_REG_ADDR 0xa1100100 +#define HIF_IRQ_CFG_VECTOR_TABLE_START_REG_ADDR 0xa11000e8 + +#endif /* XSC_HW_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_cmd.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_cmd.h new file mode 100644 index 000000000000..1d5d0e6c8c78 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_cmd.h @@ -0,0 +1,2513 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_CMD_H +#define XSC_CMD_H + +#define CMDQ_VERSION 0x32 + +#define MAX_MBOX_OUT_LEN 2048 + +#define QOS_PRIO_MAX 7 +#define QOS_DSCP_MAX 63 +#define MAC_PORT_DSCP_SHIFT 6 +#define QOS_PCP_MAX 7 +#define DSCP_PCP_UNSET 255 +#define MAC_PORT_PCP_SHIFT 3 +#define XSC_MAX_MAC_NUM 8 +#define XSC_BOARD_SN_LEN 32 +#define MAX_PKT_LEN 9800 +#define XSC_RTT_CFG_QPN_MAX 32 + +#define XSC_PCIE_LAT_CFG_INTERVAL_MAX 8 +#define XSC_PCIE_LAT_CFG_HISTOGRAM_MAX 9 +#define XSC_PCIE_LAT_EN_DISABLE 0 +#define XSC_PCIE_LAT_EN_ENABLE 1 +#define XSC_PCIE_LAT_PERIOD_MIN 1 +#define XSC_PCIE_LAT_PERIOD_MAX 20 +#define DPU_PORT_WGHT_CFG_MAX 1 + +enum { + XSC_CMD_STAT_OK = 0x0, + XSC_CMD_STAT_INT_ERR = 0x1, + XSC_CMD_STAT_BAD_OP_ERR = 0x2, + XSC_CMD_STAT_BAD_PARAM_ERR = 0x3, + XSC_CMD_STAT_BAD_SYS_STATE_ERR = 0x4, + XSC_CMD_STAT_BAD_RES_ERR = 0x5, + XSC_CMD_STAT_RES_BUSY = 0x6, + XSC_CMD_STAT_LIM_ERR = 0x8, + XSC_CMD_STAT_BAD_RES_STATE_ERR = 0x9, + XSC_CMD_STAT_IX_ERR = 0xa, + XSC_CMD_STAT_NO_RES_ERR = 0xf, + XSC_CMD_STAT_BAD_INP_LEN_ERR = 0x50, + XSC_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51, + XSC_CMD_STAT_BAD_QP_STATE_ERR = 0x10, + XSC_CMD_STAT_BAD_PKT_ERR = 0x30, + XSC_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40, +}; + +enum { + DPU_PORT_WGHT_TARGET_HOST, + DPU_PORT_WGHT_TARGET_SOC, + DPU_PORT_WGHT_TARGET_NUM, +}; + +enum { + DPU_PRIO_WGHT_TARGET_HOST2SOC, + DPU_PRIO_WGHT_TARGET_SOC2HOST, + DPU_PRIO_WGHT_TARGET_HOSTSOC2LAG, + DPU_PRIO_WGHT_TARGET_NUM, +}; + +#define XSC_AP_FEAT_UDP_SPORT_MIN 1024 +#define XSC_AP_FEAT_UDP_SPORT_MAX 65535 + +enum { + XSC_CMD_OP_QUERY_HCA_CAP = 0x100, + XSC_CMD_OP_QUERY_ADAPTER = 0x101, + XSC_CMD_OP_INIT_HCA = 0x102, + XSC_CMD_OP_TEARDOWN_HCA = 0x103, + XSC_CMD_OP_ENABLE_HCA = 0x104, + XSC_CMD_OP_DISABLE_HCA = 0x105, + XSC_CMD_OP_MODIFY_HCA = 0x106, + XSC_CMD_OP_QUERY_PAGES = 0x107, + XSC_CMD_OP_MANAGE_PAGES = 0x108, + XSC_CMD_OP_SET_HCA_CAP = 0x109, + XSC_CMD_OP_QUERY_CMDQ_VERSION = 0x10a, + XSC_CMD_OP_QUERY_MSIX_TBL_INFO = 0x10b, + XSC_CMD_OP_FUNCTION_RESET = 0x10c, + XSC_CMD_OP_DUMMY = 0x10d, + XSC_CMD_OP_SET_DEBUG_INFO = 0x10e, + XSC_CMD_OP_QUERY_PSV_FUNCID = 0x10f, + XSC_CMD_OP_ALLOC_IA_LOCK = 0x110, + XSC_CMD_OP_RELEASE_IA_LOCK = 0x111, + XSC_CMD_OP_ENABLE_RELAXED_ORDER = 0x112, + XSC_CMD_OP_QUERY_GUID = 0x113, + XSC_CMD_OP_ACTIVATE_HW_CONFIG = 0x114, + + XSC_CMD_OP_CREATE_MKEY = 0x200, + XSC_CMD_OP_QUERY_MKEY = 0x201, + XSC_CMD_OP_DESTROY_MKEY = 0x202, + XSC_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203, + XSC_CMD_OP_REG_MR = 0x204, + XSC_CMD_OP_DEREG_MR = 0x205, + XSC_CMD_OP_SET_MPT = 0x206, + XSC_CMD_OP_SET_MTT = 0x207, + + XSC_CMD_OP_CREATE_EQ = 0x301, + XSC_CMD_OP_DESTROY_EQ = 0x302, + XSC_CMD_OP_QUERY_EQ = 0x303, + + XSC_CMD_OP_CREATE_CQ = 0x400, + XSC_CMD_OP_DESTROY_CQ = 0x401, + XSC_CMD_OP_QUERY_CQ = 0x402, + XSC_CMD_OP_MODIFY_CQ = 0x403, + XSC_CMD_OP_ALLOC_MULTI_VIRTQ_CQ = 0x404, + XSC_CMD_OP_RELEASE_MULTI_VIRTQ_CQ = 0x405, + + XSC_CMD_OP_CREATE_QP = 0x500, + XSC_CMD_OP_DESTROY_QP = 0x501, + XSC_CMD_OP_RST2INIT_QP = 0x502, + XSC_CMD_OP_INIT2RTR_QP = 0x503, + XSC_CMD_OP_RTR2RTS_QP = 0x504, + XSC_CMD_OP_RTS2RTS_QP = 0x505, + XSC_CMD_OP_SQERR2RTS_QP = 0x506, + XSC_CMD_OP_2ERR_QP = 0x507, + XSC_CMD_OP_RTS2SQD_QP = 0x508, + XSC_CMD_OP_SQD2RTS_QP = 0x509, + XSC_CMD_OP_2RST_QP = 0x50a, + XSC_CMD_OP_QUERY_QP = 0x50b, + XSC_CMD_OP_CONF_SQP = 0x50c, + XSC_CMD_OP_MAD_IFC = 0x50d, + XSC_CMD_OP_INIT2INIT_QP = 0x50e, + XSC_CMD_OP_SUSPEND_QP = 0x50f, + XSC_CMD_OP_UNSUSPEND_QP = 0x510, + XSC_CMD_OP_SQD2SQD_QP = 0x511, + XSC_CMD_OP_ALLOC_QP_COUNTER_SET = 0x512, + XSC_CMD_OP_DEALLOC_QP_COUNTER_SET = 0x513, + XSC_CMD_OP_QUERY_QP_COUNTER_SET = 0x514, + XSC_CMD_OP_CREATE_MULTI_QP = 0x515, + XSC_CMD_OP_ALLOC_MULTI_VIRTQ = 0x516, + XSC_CMD_OP_RELEASE_MULTI_VIRTQ = 0x517, + XSC_CMD_OP_QUERY_QP_FLUSH_STATUS = 0x518, + + XSC_CMD_OP_CREATE_PSV = 0x600, + XSC_CMD_OP_DESTROY_PSV = 0x601, + XSC_CMD_OP_QUERY_PSV = 0x602, + XSC_CMD_OP_QUERY_SIG_RULE_TABLE = 0x603, + XSC_CMD_OP_QUERY_BLOCK_SIZE_TABLE = 0x604, + + XSC_CMD_OP_CREATE_SRQ = 0x700, + XSC_CMD_OP_DESTROY_SRQ = 0x701, + XSC_CMD_OP_QUERY_SRQ = 0x702, + XSC_CMD_OP_ARM_RQ = 0x703, + XSC_CMD_OP_RESIZE_SRQ = 0x704, + + XSC_CMD_OP_ALLOC_PD = 0x800, + XSC_CMD_OP_DEALLOC_PD = 0x801, + XSC_CMD_OP_ALLOC_UAR = 0x802, + XSC_CMD_OP_DEALLOC_UAR = 0x803, + + XSC_CMD_OP_ATTACH_TO_MCG = 0x806, + XSC_CMD_OP_DETACH_FROM_MCG = 0x807, + + XSC_CMD_OP_ALLOC_XRCD = 0x80e, + XSC_CMD_OP_DEALLOC_XRCD = 0x80f, + + XSC_CMD_OP_ACCESS_REG = 0x805, + + XSC_CMD_OP_MODIFY_RAW_QP = 0x81f, + + XSC_CMD_OP_ENABLE_NIC_HCA = 0x810, + XSC_CMD_OP_DISABLE_NIC_HCA = 0x811, + XSC_CMD_OP_MODIFY_NIC_HCA = 0x812, + + XSC_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x820, + XSC_CMD_OP_MODIFY_NIC_VPORT_CONTEXT = 0x821, + XSC_CMD_OP_QUERY_VPORT_STATE = 0x822, + XSC_CMD_OP_MODIFY_VPORT_STATE = 0x823, + XSC_CMD_OP_QUERY_HCA_VPORT_CONTEXT = 0x824, + XSC_CMD_OP_MODIFY_HCA_VPORT_CONTEXT = 0x825, + XSC_CMD_OP_QUERY_HCA_VPORT_GID = 0x826, + XSC_CMD_OP_QUERY_HCA_VPORT_PKEY = 0x827, + XSC_CMD_OP_QUERY_VPORT_COUNTER = 0x828, + XSC_CMD_OP_QUERY_PRIO_STATS = 0x829, + XSC_CMD_OP_QUERY_PHYPORT_STATE = 0x830, + XSC_CMD_OP_QUERY_EVENT_TYPE = 0x831, + XSC_CMD_OP_QUERY_LINK_INFO = 0x832, + XSC_CMD_OP_QUERY_PFC_PRIO_STATS = 0x833, + XSC_CMD_OP_MODIFY_LINK_INFO = 0x834, + XSC_CMD_OP_QUERY_FEC_PARAM = 0x835, + XSC_CMD_OP_MODIFY_FEC_PARAM = 0x836, + + XSC_CMD_OP_LAG_CREATE = 0x840, + XSC_CMD_OP_LAG_ADD_MEMBER = 0x841, + XSC_CMD_OP_LAG_REMOVE_MEMBER = 0x842, + XSC_CMD_OP_LAG_UPDATE_MEMBER_STATUS = 0x843, + XSC_CMD_OP_LAG_UPDATE_HASH_TYPE = 0x844, + XSC_CMD_OP_LAG_DESTROY = 0x845, + + XSC_CMD_OP_LAG_SET_QOS = 0x848, + XSC_CMD_OP_ENABLE_MSIX = 0x850, + + XSC_CMD_OP_IOCTL_FLOW = 0x900, + XSC_CMD_OP_IOCTL_OTHER = 0x901, + + XSC_CMD_OP_IOCTL_SET_DSCP_PMT = 0x1000, + XSC_CMD_OP_IOCTL_GET_DSCP_PMT = 0x1001, + XSC_CMD_OP_IOCTL_SET_TRUST_MODE = 0x1002, + XSC_CMD_OP_IOCTL_GET_TRUST_MODE = 0x1003, + XSC_CMD_OP_IOCTL_SET_PCP_PMT = 0x1004, + XSC_CMD_OP_IOCTL_GET_PCP_PMT = 0x1005, + XSC_CMD_OP_IOCTL_SET_DEFAULT_PRI = 0x1006, + XSC_CMD_OP_IOCTL_GET_DEFAULT_PRI = 0x1007, + XSC_CMD_OP_IOCTL_SET_PFC = 0x1008, + XSC_CMD_OP_IOCTL_GET_PFC = 0x1009, + XSC_CMD_OP_IOCTL_SET_RATE_LIMIT = 0x100a, + XSC_CMD_OP_IOCTL_GET_RATE_LIMIT = 0x100b, + XSC_CMD_OP_IOCTL_SET_SP = 0x100c, + XSC_CMD_OP_IOCTL_GET_SP = 0x100d, + XSC_CMD_OP_IOCTL_SET_WEIGHT = 0x100e, + XSC_CMD_OP_IOCTL_GET_WEIGHT = 0x100f, + XSC_CMD_OP_IOCTL_DPU_SET_PORT_WEIGHT = 0x1010, + XSC_CMD_OP_IOCTL_DPU_GET_PORT_WEIGHT = 0x1011, + XSC_CMD_OP_IOCTL_DPU_SET_PRIO_WEIGHT = 0x1012, + XSC_CMD_OP_IOCTL_DPU_GET_PRIO_WEIGHT = 0x1013, + XSC_CMD_OP_IOCTL_SET_WATCHDOG_EN = 0x1014, + XSC_CMD_OP_IOCTL_GET_WATCHDOG_EN = 0x1015, + XSC_CMD_OP_IOCTL_SET_WATCHDOG_PERIOD = 0x1016, + XSC_CMD_OP_IOCTL_GET_WATCHDOG_PERIOD = 0x1017, + XSC_CMD_OP_IOCTL_SET_PFC_DROP_TH = 0x1018, + XSC_CMD_OP_IOCTL_GET_PFC_CFG_STATUS = 0x1019, + + XSC_CMD_OP_IOCTL_SET_ENABLE_RP = 0x1030, + XSC_CMD_OP_IOCTL_SET_ENABLE_NP = 0x1031, + XSC_CMD_OP_IOCTL_SET_INIT_ALPHA = 0x1032, + XSC_CMD_OP_IOCTL_SET_G = 0x1033, + XSC_CMD_OP_IOCTL_SET_AI = 0x1034, + XSC_CMD_OP_IOCTL_SET_HAI = 0x1035, + XSC_CMD_OP_IOCTL_SET_TH = 0x1036, + XSC_CMD_OP_IOCTL_SET_BC_TH = 0x1037, + XSC_CMD_OP_IOCTL_SET_CNP_OPCODE = 0x1038, + XSC_CMD_OP_IOCTL_SET_CNP_BTH_B = 0x1039, + XSC_CMD_OP_IOCTL_SET_CNP_BTH_F = 0x103a, + XSC_CMD_OP_IOCTL_SET_CNP_ECN = 0x103b, + XSC_CMD_OP_IOCTL_SET_DATA_ECN = 0x103c, + XSC_CMD_OP_IOCTL_SET_CNP_TX_INTERVAL = 0x103d, + XSC_CMD_OP_IOCTL_SET_EVT_PERIOD_RSTTIME = 0x103e, + XSC_CMD_OP_IOCTL_SET_CNP_DSCP = 0x103f, + XSC_CMD_OP_IOCTL_SET_CNP_PCP = 0x1040, + XSC_CMD_OP_IOCTL_SET_EVT_PERIOD_ALPHA = 0x1041, + XSC_CMD_OP_IOCTL_GET_CC_CFG = 0x1042, + XSC_CMD_OP_IOCTL_GET_CC_STAT = 0x104b, + XSC_CMD_OP_IOCTL_SET_CLAMP_TGT_RATE = 0x1052, + XSC_CMD_OP_IOCTL_SET_MAX_HAI_FACTOR = 0x1053, + XSC_CMD_OP_IOCTL_SET_SCALE = 0x1054, + + XSC_CMD_OP_IOCTL_SET_HWC = 0x1060, + XSC_CMD_OP_IOCTL_GET_HWC = 0x1061, + + XSC_CMD_OP_SET_MTU = 0x1100, + XSC_CMD_OP_QUERY_ETH_MAC = 0X1101, + + XSC_CMD_OP_QUERY_HW_STATS = 0X1200, + XSC_CMD_OP_QUERY_PAUSE_CNT = 0X1201, + XSC_CMD_OP_IOCTL_QUERY_PFC_STALL_STATS = 0x1202, + XSC_CMD_OP_QUERY_HW_STATS_RDMA = 0X1203, + XSC_CMD_OP_QUERY_HW_STATS_ETH = 0X1204, + XSC_CMD_OP_QUERY_HW_GLOBAL_STATS = 0X1210, + + XSC_CMD_OP_SET_RTT_EN = 0X1220, + XSC_CMD_OP_GET_RTT_EN = 0X1221, + XSC_CMD_OP_SET_RTT_QPN = 0X1222, + XSC_CMD_OP_GET_RTT_QPN = 0X1223, + XSC_CMD_OP_SET_RTT_PERIOD = 0X1224, + XSC_CMD_OP_GET_RTT_PERIOD = 0X1225, + XSC_CMD_OP_GET_RTT_RESULT = 0X1226, + XSC_CMD_OP_GET_RTT_STATS = 0X1227, + + XSC_CMD_OP_SET_LED_STATUS = 0X1228, + + XSC_CMD_OP_AP_FEAT = 0x1400, + XSC_CMD_OP_PCIE_LAT_FEAT = 0x1401, + + XSC_CMD_OP_GET_LLDP_STATUS = 0x1500, + XSC_CMD_OP_SET_LLDP_STATUS = 0x1501, + + XSC_CMD_OP_SET_VPORT_RATE_LIMIT = 0x1600, + + XSC_CMD_OP_SET_PORT_ADMIN_STATUS = 0x1801, + XSC_CMD_OP_USER_EMU_CMD = 0x8000, + + XSC_CMD_OP_MAX +}; + +enum { + XSC_CMD_EVENT_RESP_CHANGE_LINK = 0x0001, + XSC_CMD_EVENT_RESP_TEMP_WARN = 0x0002, + XSC_CMD_EVENT_RESP_OVER_TEMP_PROTECTION = 0x0004, +}; + +enum xsc_eth_qp_num_sel { + XSC_ETH_QP_NUM_8K_SEL = 0, + XSC_ETH_QP_NUM_8K_8TC_SEL, + XSC_ETH_QP_NUM_SEL_MAX, +}; + +enum xsc_eth_vf_num_sel { + XSC_ETH_VF_NUM_SEL_8 = 0, + XSC_ETH_VF_NUM_SEL_16, + XSC_ETH_VF_NUM_SEL_32, + XSC_ETH_VF_NUM_SEL_64, + XSC_ETH_VF_NUM_SEL_128, + XSC_ETH_VF_NUM_SEL_256, + XSC_ETH_VF_NUM_SEL_512, + XSC_ETH_VF_NUM_SEL_1024, + XSC_ETH_VF_NUM_SEL_MAX +}; + +enum { + LINKSPEED_MODE_UNKNOWN = -1, + LINKSPEED_MODE_10G = 10000, + LINKSPEED_MODE_25G = 25000, + LINKSPEED_MODE_40G = 40000, + LINKSPEED_MODE_50G = 50000, + LINKSPEED_MODE_100G = 100000, + LINKSPEED_MODE_200G = 200000, + LINKSPEED_MODE_400G = 400000, +}; + +enum { + MODULE_SPEED_UNKNOWN, + MODULE_SPEED_10G, + MODULE_SPEED_25G, + MODULE_SPEED_40G_R4, + MODULE_SPEED_50G_R, + MODULE_SPEED_50G_R2, + MODULE_SPEED_100G_R2, + MODULE_SPEED_100G_R4, + MODULE_SPEED_200G_R4, + MODULE_SPEED_200G_R8, + MODULE_SPEED_400G_R8, +}; + +enum xsc_dma_direct { + DMA_DIR_TO_MAC, + DMA_DIR_READ, + DMA_DIR_WRITE, + DMA_DIR_LOOPBACK, + DMA_DIR_MAX, +}; + +/* hw feature bitmap, 32bit */ +enum xsc_hw_feature_flag { + XSC_HW_RDMA_SUPPORT = 0x1, + XSC_HW_PFC_PRIO_STATISTIC_SUPPORT = 0x2, + XSC_HW_THIRD_FEATURE = 0x4, + XSC_HW_PFC_STALL_STATS_SUPPORT = 0x8, + XSC_HW_RDMA_CM_SUPPORT = 0x20, + + XSC_HW_LAST_FEATURE = 0x80000000, +}; + +enum xsc_lldp_dcbx_sub_cmd { + XSC_OS_HANDLE_LLDP_STATUS = 0x1, + XSC_DCBX_STATUS +}; + +struct xsc_inbox_hdr { + __be16 opcode; + u8 rsvd[4]; + __be16 ver; +}; + +struct xsc_outbox_hdr { + u8 status; + u8 rsvd[5]; + __be16 ver; +}; + +struct xsc_alloc_ia_lock_mbox_in { + struct xsc_inbox_hdr hdr; + u8 lock_num; + u8 rsvd[7]; +}; + +#define XSC_RES_NUM_IAE_GRP 16 + +struct xsc_alloc_ia_lock_mbox_out { + struct xsc_outbox_hdr hdr; + u8 lock_idx[XSC_RES_NUM_IAE_GRP]; +}; + +struct xsc_release_ia_lock_mbox_in { + struct xsc_inbox_hdr hdr; + u8 lock_idx[XSC_RES_NUM_IAE_GRP]; +}; + +struct xsc_release_ia_lock_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_pci_driver_init_params_in { + struct xsc_inbox_hdr hdr; + __be32 s_wqe_mode; + __be32 r_wqe_mode; + __be32 local_timeout_retrans; + u8 mac_lossless_prio[XSC_MAX_MAC_NUM]; + __be32 group_mod; +}; + +struct xsc_pci_driver_init_params_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +/*CQ mbox*/ +struct xsc_cq_context { + __be16 eqn; + __be16 pa_num; + __be16 glb_func_id; + u8 log_cq_sz; + u8 cq_type; +}; + +struct xsc_create_cq_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_cq_context ctx; + __be64 pas[]; +}; + +struct xsc_create_cq_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 cqn; + u8 rsvd[4]; +}; + +struct xsc_destroy_cq_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 cqn; + u8 rsvd[4]; +}; + +struct xsc_destroy_cq_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +/*QP mbox*/ +struct xsc_create_qp_request { + __be16 input_qpn; + __be16 pa_num; + u8 qp_type; + u8 log_sq_sz; + u8 log_rq_sz; + u8 dma_direct;//0 for dma read, 1 for dma write + __be32 pdn; + __be16 cqn_send; + __be16 cqn_recv; + __be16 glb_funcid; + /*rsvd,rename logic_port used to transfer logical_port to fw*/ + u8 rsvd[2]; + __be64 pas[]; +}; + +struct xsc_create_qp_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_create_qp_request req; +}; + +struct xsc_create_qp_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 qpn; + u8 rsvd[4]; +}; + +struct xsc_destroy_qp_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 qpn; + u8 rsvd[4]; +}; + +struct xsc_destroy_qp_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_query_qp_flush_status_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 qpn; +}; + +struct xsc_query_qp_flush_status_mbox_out { + struct xsc_outbox_hdr hdr; +}; + +struct xsc_qp_context { + __be32 remote_qpn; + __be32 cqn_send; + __be32 cqn_recv; + __be32 next_send_psn; + __be32 next_recv_psn; + __be32 pdn; + __be16 src_udp_port; + __be16 path_id; + u8 mtu_mode; + u8 lag_sel; + u8 lag_sel_en; + u8 retry_cnt; + u8 rnr_retry; + u8 dscp; + u8 state; + u8 hop_limit; + u8 dmac[6]; + u8 smac[6]; + __be32 dip[4]; + __be32 sip[4]; + __be16 ip_type; + __be16 grp_id; + u8 vlan_valid; + u8 dci_cfi_prio_sl; + __be16 vlan_id; + u8 qp_out_port; + u8 pcie_no; + __be16 lag_id; + __be16 func_id; + __be16 rsvd; +}; + +struct xsc_query_qp_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 qpn; + u8 rsvd[4]; +}; + +struct xsc_query_qp_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_qp_context ctx; +}; + +struct xsc_modify_qp_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 qpn; + struct xsc_qp_context ctx; + u8 no_need_wait; +}; + +struct xsc_modify_qp_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_create_multiqp_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 qp_num; + u8 qp_type; + u8 rsvd; + __be32 req_len; + u8 data[]; +}; + +struct xsc_create_multiqp_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 qpn_base; +}; + +struct xsc_alloc_multi_virtq_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 qp_or_cq_num; + __be16 pa_num; + __be32 rsvd; + __be32 rsvd2; +}; + +struct xsc_alloc_multi_virtq_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 qnum_base; + __be32 pa_list_base; + __be32 rsvd; +}; + +struct xsc_release_multi_virtq_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 qp_or_cq_num; + __be16 pa_num; + __be32 qnum_base; + __be32 pa_list_base; +}; + +struct xsc_release_multi_virtq_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 rsvd; + __be32 rsvd2; + __be32 rsvd3; +}; + +/* MSIX TABLE mbox */ +struct xsc_msix_table_info_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 index; + u8 rsvd[6]; +}; + +struct xsc_msix_table_info_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 addr_lo; + __be32 addr_hi; + __be32 data; +}; + +/*EQ mbox*/ +struct xsc_eq_context { + __be16 vecidx; + __be16 pa_num; + u8 log_eq_sz; + __be16 glb_func_id; + u8 is_async_eq; + u8 rsvd; +}; + +struct xsc_create_eq_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_eq_context ctx; + __be64 pas[]; +}; + +struct xsc_create_eq_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 eqn; + u8 rsvd[4]; +}; + +struct xsc_destroy_eq_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 eqn; + u8 rsvd[4]; + +}; + +struct xsc_destroy_eq_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +/*PD mbox*/ +struct xsc_alloc_pd_request { + u8 rsvd[8]; +}; + +struct xsc_alloc_pd_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_alloc_pd_request req; +}; + +struct xsc_alloc_pd_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 pdn; + u8 rsvd[4]; +}; + +struct xsc_dealloc_pd_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 pdn; + u8 rsvd[4]; + +}; + +struct xsc_dealloc_pd_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +/*MR mbox*/ +struct xsc_register_mr_request { + __be32 pdn; + __be32 pa_num; + __be32 len; + __be32 mkey; + u8 rsvd; + u8 acc; + u8 page_mode; + u8 map_en; + __be64 va_base; + __be64 pas[]; +}; + +struct xsc_register_mr_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_register_mr_request req; +}; + +struct xsc_register_mr_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 mkey; + u8 rsvd[4]; +}; + +struct xsc_unregister_mr_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 mkey; + u8 rsvd[4]; +}; + +struct xsc_unregister_mr_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_mpt_item { + __be32 pdn; + __be32 pa_num; + __be32 len; + __be32 mkey; + u8 rsvd[5]; + u8 acc; + u8 page_mode; + u8 map_en; + __be64 va_base; +}; + +struct xsc_set_mpt_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_mpt_item mpt_item; +}; + +struct xsc_set_mpt_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 mtt_base; + u8 rsvd[4]; +}; + +struct xsc_mtt_setting { + __be32 mtt_base; + __be32 pa_num; + __be64 pas[]; +}; + +struct xsc_set_mtt_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_mtt_setting mtt_setting; +}; + +struct xsc_set_mtt_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_create_mkey_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[4]; +}; + +struct xsc_create_mkey_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 mkey; +}; + +struct xsc_destroy_mkey_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 mkey; +}; + +struct xsc_destroy_mkey_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd; +}; + +struct xsc_access_reg_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd0[2]; + __be16 register_id; + __be32 arg; + __be32 data[]; +}; + +struct xsc_access_reg_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; + __be32 data[]; +}; + +struct xsc_mad_ifc_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 remote_lid; + u8 rsvd0; + u8 port; + u8 rsvd1[4]; + u8 data[256]; +}; + +struct xsc_mad_ifc_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; + u8 data[256]; +}; + +struct xsc_query_eq_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd0[3]; + u8 eqn; + u8 rsvd1[4]; +}; + +struct xsc_query_eq_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; + struct xsc_eq_context ctx; +}; + +struct xsc_query_cq_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 cqn; + u8 rsvd0[4]; +}; + +struct xsc_query_cq_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd0[8]; + struct xsc_cq_context ctx; + u8 rsvd6[16]; + __be64 pas[]; +}; + +struct xsc_cmd_query_cmdq_ver_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_cmd_query_cmdq_ver_mbox_out { + struct xsc_outbox_hdr hdr; + __be16 cmdq_ver; + u8 rsvd[6]; +}; + +struct xsc_cmd_dummy_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_cmd_dummy_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_fw_version { + u8 fw_version_major; + u8 fw_version_minor; + __be16 fw_version_patch; + __be32 fw_version_tweak; + u8 fw_version_extra_flag; + u8 rsvd[7]; +}; + +struct xsc_hca_cap { + u8 rsvd1[12]; + u8 send_seg_num; + u8 send_wqe_shift; + u8 recv_seg_num; + u8 recv_wqe_shift; + u8 log_max_srq_sz; + u8 log_max_qp_sz; + u8 log_max_mtt; + u8 log_max_qp; + u8 log_max_strq_sz; + u8 log_max_srqs; + u8 rsvd4[2]; + u8 log_max_tso; + u8 log_max_cq_sz; + u8 rsvd6; + u8 log_max_cq; + u8 log_max_eq_sz; + u8 log_max_mkey; + u8 log_max_msix; + u8 log_max_eq; + u8 max_indirection; + u8 log_max_mrw_sz; + u8 log_max_bsf_list_sz; + u8 log_max_klm_list_sz; + u8 rsvd_8_0; + u8 log_max_ra_req_dc; + u8 rsvd_8_1; + u8 log_max_ra_res_dc; + u8 rsvd9; + u8 log_max_ra_req_qp; + u8 log_max_qp_depth; + u8 log_max_ra_res_qp; + __be16 max_vfs; + __be16 raweth_qp_id_end; + __be16 raw_tpe_qp_num; + __be16 max_qp_count; + __be16 raweth_qp_id_base; + u8 rsvd13; + u8 local_ca_ack_delay; + u8 max_num_eqs; + u8 num_ports; + u8 log_max_msg; + u8 mac_port; + __be16 raweth_rss_qp_id_base; + __be16 stat_rate_support; + u8 rsvd16[2]; + __be64 flags; + u8 rsvd17; + u8 uar_sz; + u8 rsvd18; + u8 log_pg_sz; + __be16 bf_log_bf_reg_size; + __be16 msix_base; + __be16 msix_num; + __be16 max_desc_sz_sq; + u8 rsvd20[2]; + __be16 max_desc_sz_rq; + u8 rsvd21[2]; + __be16 max_desc_sz_sq_dc; + u8 rsvd22[4]; + __be16 max_qp_mcg; + u8 rsvd23; + u8 log_max_mcg; + u8 rsvd24; + u8 log_max_pd; + u8 rsvd25; + u8 log_max_xrcd; + u8 rsvd26[40]; + __be32 uar_page_sz; + u8 rsvd27[8]; + __be32 hw_feature_flag;/*enum xsc_hw_feature_flag*/ + __be16 pf0_vf_funcid_base; + __be16 pf0_vf_funcid_top; + __be16 pf1_vf_funcid_base; + __be16 pf1_vf_funcid_top; + __be16 pcie0_pf_funcid_base; + __be16 pcie0_pf_funcid_top; + __be16 pcie1_pf_funcid_base; + __be16 pcie1_pf_funcid_top; + u8 log_msx_atomic_size_qp; + u8 pcie_host; + u8 rsvd28; + u8 log_msx_atomic_size_dc; + u8 board_sn[XSC_BOARD_SN_LEN]; + u8 max_tc; + u8 mac_bit; + __be16 funcid_to_logic_port; + u8 rsvd29[6]; + u8 nif_port_num; + u8 reg_mr_via_cmdq; + __be32 hca_core_clock; + __be32 max_rwq_indirection_tables;/*rss_caps*/ + __be32 max_rwq_indirection_table_size;/*rss_caps*/ + __be32 chip_ver_h; + __be32 chip_ver_m; + __be32 chip_ver_l; + __be32 hotfix_num; + __be32 feature_flag; + __be32 rx_pkt_len_max; + __be32 glb_func_id; + __be64 tx_db; + __be64 rx_db; + __be64 complete_db; + __be64 complete_reg; + __be64 event_db; + __be32 qp_rate_limit_min; + __be32 qp_rate_limit_max; + struct xsc_fw_version fw_ver; + u8 lag_logic_port_ofst; +}; + +struct xsc_cmd_query_hca_cap_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 cpu_num; + u8 rsvd[6]; +}; + +struct xsc_cmd_query_hca_cap_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd0[8]; + struct xsc_hca_cap hca_cap; +}; + +struct xsc_cmd_enable_hca_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 vf_num; + __be16 max_msix_vec; + __be16 cpu_num; + u8 pp_bypass; + u8 esw_mode; +}; + +struct xsc_cmd_enable_hca_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd0[4]; +}; + +struct xsc_cmd_disable_hca_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 vf_num; + u8 pp_bypass; + u8 esw_mode; +}; + +struct xsc_cmd_disable_hca_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd0[4]; +}; + +struct xsc_cmd_modify_hca_mbox_in { + struct xsc_inbox_hdr hdr; + u8 pp_bypass; + u8 esw_mode; + u8 rsvd0[6]; +}; + +struct xsc_cmd_modify_hca_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd0[4]; +}; + +struct xsc_query_special_ctxs_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_query_special_ctxs_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 dump_fill_mkey; + __be32 reserved_lkey; +}; + +/* vport mbox */ +struct xsc_nic_vport_context { + __be32 min_wqe_inline_mode:3; + __be32 disable_mc_local_lb:1; + __be32 disable_uc_local_lb:1; + __be32 roce_en:1; + + __be32 arm_change_event:1; + __be32 event_on_mtu:1; + __be32 event_on_promisc_change:1; + __be32 event_on_vlan_change:1; + __be32 event_on_mc_address_change:1; + __be32 event_on_uc_address_change:1; + __be32 affiliation_criteria:4; + __be32 affiliated_vhca_id; + + __be16 mtu; + + __be64 system_image_guid; + __be64 port_guid; + __be64 node_guid; + + __be32 qkey_violation_counter; + + __be16 spoofchk:1; + __be16 trust:1; + __be16 promisc:1; + __be16 allmcast:1; + __be16 vlan_allowed:1; + __be16 allowed_list_type:3; + __be16 allowed_list_size:10; + + __be16 vlan_proto; + __be16 vlan; + u8 qos; + u8 permanent_address[6]; + u8 current_address[6]; + u8 current_uc_mac_address[0][2]; +}; + +enum { + XSC_HCA_VPORT_SEL_PORT_GUID = 1 << 0, + XSC_HCA_VPORT_SEL_NODE_GUID = 1 << 1, + XSC_HCA_VPORT_SEL_STATE_POLICY = 1 << 2, +}; + +struct xsc_hca_vport_context { + u32 field_select; + u32 port_physical_state:4; + u32 vport_state_policy:4; + u32 port_state:4; + u32 vport_state:4; + u32 rcvd0:16; + + u64 system_image_guid; + u64 port_guid; + u64 node_guid; + + u16 qkey_violation_counter; + u16 pkey_violation_counter; +}; + +struct xsc_query_nic_vport_context_out { + struct xsc_outbox_hdr hdr; + struct xsc_nic_vport_context nic_vport_ctx; +}; + +struct xsc_query_nic_vport_context_in { + struct xsc_inbox_hdr hdr; + u32 other_vport:1; + u32 vport_number:16; + u32 allowed_list_type:3; + u32 rsvd:12; +}; + +struct xsc_modify_nic_vport_context_out { + struct xsc_outbox_hdr hdr; + __be16 outer_vlan_id; + u8 rsvd[2]; +}; + +struct xsc_modify_nic_vport_field_select { + __be32 affiliation:1; + __be32 disable_uc_local_lb:1; + __be32 disable_mc_local_lb:1; + __be32 node_guid:1; + __be32 port_guid:1; + __be32 min_inline:1; + __be32 mtu:1; + __be32 change_event:1; + __be32 promisc:1; + __be32 allmcast:1; + __be32 permanent_address:1; + __be32 current_address:1; + __be32 addresses_list:1; + __be32 roce_en:1; + __be32 spoofchk:1; + __be32 trust:1; + __be32 rsvd:16; +}; + +struct xsc_modify_nic_vport_context_in { + struct xsc_inbox_hdr hdr; + __be32 other_vport:1; + __be32 vport_number:16; + __be32 rsvd:15; + __be16 caps; + __be16 caps_mask; + __be16 lag_id; + + struct xsc_modify_nic_vport_field_select field_select; + struct xsc_nic_vport_context nic_vport_ctx; +}; + +struct xsc_query_hca_vport_context_out { + struct xsc_outbox_hdr hdr; + struct xsc_hca_vport_context hca_vport_ctx; +}; + +struct xsc_query_hca_vport_context_in { + struct xsc_inbox_hdr hdr; + u32 other_vport:1; + u32 port_num:4; + u32 vport_number:16; + u32 rsvd0:11; +}; + +struct xsc_modify_hca_vport_context_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[2]; +}; + +struct xsc_modify_hca_vport_context_in { + struct xsc_inbox_hdr hdr; + u32 other_vport:1; + u32 port_num:4; + u32 vport_number:16; + u32 rsvd0:11; + + struct xsc_hca_vport_context hca_vport_ctx; +}; + +struct xsc_array128 { + u8 array128[16]; +}; + +struct xsc_query_hca_vport_gid_out { + struct xsc_outbox_hdr hdr; + u16 gids_num; + struct xsc_array128 gid[]; +}; + +struct xsc_query_hca_vport_gid_in { + struct xsc_inbox_hdr hdr; + u32 other_vport:1; + u32 port_num:4; + u32 vport_number:16; + u32 rsvd0:11; + u16 gid_index; +}; + +struct xsc_pkey { + u16 pkey; +}; + +struct xsc_query_hca_vport_pkey_out { + struct xsc_outbox_hdr hdr; + struct xsc_pkey pkey[]; +}; + +struct xsc_query_hca_vport_pkey_in { + struct xsc_inbox_hdr hdr; + u32 other_vport:1; + u32 port_num:4; + u32 vport_number:16; + u32 rsvd0:11; + u16 pkey_index; +}; + +struct xsc_query_vport_state_out { + struct xsc_outbox_hdr hdr; + u8 admin_state:4; + u8 state:4; +}; + +struct xsc_query_vport_state_in { + struct xsc_inbox_hdr hdr; + u32 other_vport:1; + u32 vport_number:16; + u32 rsvd0:15; +}; + +struct xsc_modify_vport_state_out { + struct xsc_outbox_hdr hdr; +}; + +struct xsc_modify_vport_state_in { + struct xsc_inbox_hdr hdr; + u32 other_vport:1; + u32 vport_number:16; + u32 rsvd0:15; + u8 admin_state:4; + u8 rsvd1:4; +}; + +struct xsc_traffic_counter { + u64 packets; + u64 bytes; +}; + +struct xsc_query_vport_counter_out { + struct xsc_outbox_hdr hdr; + struct xsc_traffic_counter received_errors; + struct xsc_traffic_counter transmit_errors; + struct xsc_traffic_counter received_ib_unicast; + struct xsc_traffic_counter transmitted_ib_unicast; + struct xsc_traffic_counter received_ib_multicast; + struct xsc_traffic_counter transmitted_ib_multicast; + struct xsc_traffic_counter received_eth_broadcast; + struct xsc_traffic_counter transmitted_eth_broadcast; + struct xsc_traffic_counter received_eth_unicast; + struct xsc_traffic_counter transmitted_eth_unicast; + struct xsc_traffic_counter received_eth_multicast; + struct xsc_traffic_counter transmitted_eth_multicast; +}; + +struct xsc_query_vport_counter_in { + struct xsc_inbox_hdr hdr; + u32 other_vport:1; + u32 port_num:4; + u32 vport_number:16; + u32 rsvd0:11; +}; + +/* ioctl mbox */ +struct xsc_ioctl_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 len; + __be16 rsvd; + u8 data[]; +}; + +struct xsc_ioctl_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 error; + __be16 len; + __be16 rsvd; + u8 data[]; +}; + +struct xsc_modify_raw_qp_request { + u16 qpn; + u16 lag_id; + u16 func_id; + u8 dma_direct; + u8 prio; + u8 qp_out_port; + u8 rsvd[7]; +}; + +struct xsc_modify_raw_qp_mbox_in { + struct xsc_inbox_hdr hdr; + u8 pcie_no; + u8 rsv[7]; + struct xsc_modify_raw_qp_request req; +}; + +struct xsc_modify_raw_qp_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +#define ETH_ALEN 6 + +struct xsc_create_lag_request { + __be16 lag_id; + u8 lag_type; + u8 lag_sel_mode; + u8 mac_idx; + u8 netdev_addr[ETH_ALEN]; + u8 bond_mode; + u8 slave_status; +}; + +struct xsc_add_lag_member_request { + __be16 lag_id; + u8 lag_type; + u8 lag_sel_mode; + u8 mac_idx; + u8 netdev_addr[ETH_ALEN]; + u8 bond_mode; + u8 slave_status; + u8 mad_mac_idx; +}; + +struct xsc_remove_lag_member_request { + __be16 lag_id; + u8 lag_type; + u8 mac_idx; + u8 mad_mac_idx; + u8 bond_mode; + u8 is_roce_lag_xdev; + u8 not_roce_lag_xdev_mask; +}; + +struct xsc_update_lag_member_status_request { + __be16 lag_id; + u8 lag_type; + u8 mac_idx; + u8 bond_mode; + u8 slave_status; + u8 rsvd; +}; + +struct xsc_update_lag_hash_type_request { + __be16 lag_id; + u8 lag_sel_mode; + u8 rsvd[5]; +}; + +struct xsc_destroy_lag_request { + __be16 lag_id; + u8 lag_type; + u8 mac_idx; + u8 bond_mode; + u8 slave_status; + u8 rsvd[3]; +}; + +struct xsc_set_lag_qos_request { + __be16 lag_id; + u8 member_idx; + u8 lag_op; + u8 resv[4]; +}; + +struct xsc_create_lag_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_create_lag_request req; +}; + +struct xsc_create_lag_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_add_lag_member_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_add_lag_member_request req; +}; + +struct xsc_add_lag_member_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_remove_lag_member_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_remove_lag_member_request req; +}; + +struct xsc_remove_lag_member_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_update_lag_member_status_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_update_lag_member_status_request req; +}; + +struct xsc_update_lag_member_status_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_update_lag_hash_type_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_update_lag_hash_type_request req; +}; + +struct xsc_update_lag_hash_type_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_destroy_lag_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_destroy_lag_request req; +}; + +struct xsc_destroy_lag_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_set_lag_qos_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_set_lag_qos_request req; +}; + +struct xsc_set_lag_qos_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +/*ioctl qos*/ +struct xsc_qos_req_prfx { + u8 mac_port; + u8 rsvd[7]; +}; + +struct xsc_qos_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_qos_req_prfx req_prfx; + u8 data[]; +}; + +struct xsc_qos_mbox_out { + struct xsc_outbox_hdr hdr; + u8 data[]; +}; + +struct xsc_prio_stats { + u64 tx_bytes; + u64 rx_bytes; + u64 tx_pkts; + u64 rx_pkts; +}; + +struct xsc_prio_stats_mbox_in { + struct xsc_inbox_hdr hdr; + u8 pport; +}; + +struct xsc_prio_stats_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_prio_stats prio_stats[QOS_PRIO_MAX + 1]; +}; + +struct xsc_pfc_prio_stats { + u64 tx_pause; + u64 tx_pause_duration; + u64 rx_pause; + u64 rx_pause_duration; +}; + +struct xsc_pfc_prio_stats_mbox_in { + struct xsc_inbox_hdr hdr; + u8 pport; +}; + +struct xsc_pfc_prio_stats_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_pfc_prio_stats prio_stats[QOS_PRIO_MAX + 1]; +}; + +struct xsc_hw_stats_rdma_pf { + /*by mac port*/ + u64 rdma_tx_pkts; + u64 rdma_tx_bytes; + u64 rdma_rx_pkts; + u64 rdma_rx_bytes; + u64 np_cnp_sent; + u64 rp_cnp_handled; + u64 np_ecn_marked_roce_packets; + u64 rp_cnp_ignored; + u64 read_rsp_out_of_seq; + u64 implied_nak_seq_err; + /*by function*/ + u64 out_of_sequence; + u64 packet_seq_err; + u64 out_of_buffer; + u64 rnr_nak_retry_err; + u64 local_ack_timeout_err; + u64 rx_read_requests; + u64 rx_write_requests; + u64 duplicate_requests; + u64 rdma_tx_pkts_func; + u64 rdma_tx_payload_bytes; + u64 rdma_rx_pkts_func; + u64 rdma_rx_payload_bytes; + /*global*/ + u64 rdma_loopback_pkts; + u64 rdma_loopback_bytes; +}; + +struct xsc_hw_stats_rdma_vf { + /*by function*/ + u64 rdma_tx_pkts_func; + u64 rdma_tx_payload_bytes; + u64 rdma_rx_pkts_func; + u64 rdma_rx_payload_bytes; + + u64 out_of_sequence; + u64 packet_seq_err; + u64 out_of_buffer; + u64 rnr_nak_retry_err; + u64 local_ack_timeout_err; + u64 rx_read_requests; + u64 rx_write_requests; + u64 duplicate_requests; +}; + +struct xsc_hw_stats_rdma { + u8 is_pf; + u8 rsv[3]; + union { + struct xsc_hw_stats_rdma_pf pf_stats; + struct xsc_hw_stats_rdma_vf vf_stats; + } stats; +}; + +struct xsc_hw_stats_eth_pf { + /*by mac port*/ + u64 rdma_tx_pkts; + u64 rdma_tx_bytes; + u64 rdma_rx_pkts; + u64 rdma_rx_bytes; + u64 tx_pause; + u64 rx_pause; + u64 rx_fcs_errors; + u64 rx_discards; + u64 tx_multicast_phy; + u64 tx_broadcast_phy; + u64 rx_multicast_phy; + u64 rx_broadcast_phy; + /*by global*/ + u64 rdma_loopback_pkts; + u64 rdma_loopback_bytes; +}; + +struct xsc_hw_stats_eth_vf { + /*by function*/ + u64 rdma_tx_pkts; + u64 rdma_tx_bytes; + u64 rdma_rx_pkts; + u64 rdma_rx_bytes; +}; + +struct xsc_hw_stats_eth { + u8 is_pf; + u8 rsv[3]; + union { + struct xsc_hw_stats_eth_pf pf_stats; + struct xsc_hw_stats_eth_vf vf_stats; + } stats; +}; + +struct xsc_hw_stats_mbox_in { + struct xsc_inbox_hdr hdr; + u8 mac_port; + u8 is_lag; + u8 lag_member_num; + u8 member_port[]; +}; + +struct xsc_hw_stats_rdma_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_hw_stats_rdma hw_stats; +}; + +struct xsc_hw_stats_eth_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_hw_stats_eth hw_stats; +}; + +struct xsc_hw_global_stats_rdma { + /*by global*/ + u64 rdma_loopback_pkts; + u64 rdma_loopback_bytes; + u64 rx_icrc_encapsulated; + u64 req_cqe_error; + u64 resp_cqe_error; + u64 cqe_msg_code_error; +}; + +struct xsc_hw_global_stats_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsv[4]; +}; + +struct xsc_hw_global_stats_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_hw_global_stats_rdma hw_stats; +}; + +struct xsc_pfc_stall_stats { + /*by mac port*/ + u64 tx_pause_storm_triggered; +}; + +struct xsc_pfc_stall_stats_mbox_in { + struct xsc_inbox_hdr hdr; + u8 mac_port; +}; + +struct xsc_pfc_stall_stats_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_pfc_stall_stats pfc_stall_stats; +}; + +struct xsc_dscp_pmt_set { + u8 dscp; + u8 priority; + u8 rsvd[6]; +}; + +struct xsc_dscp_pmt_get { + u8 prio_map[QOS_DSCP_MAX + 1]; + u8 max_prio; + u8 rsvd[7]; +}; + +struct xsc_trust_mode_set { + u8 is_pcp; + u8 rsvd[7]; +}; + +struct xsc_trust_mode_get { + u8 is_pcp; + u8 rsvd[7]; +}; + +struct xsc_pcp_pmt_set { + u8 pcp; + u8 priority; + u8 rsvd[6]; +}; + +struct xsc_pcp_pmt_get { + u8 prio_map[QOS_PCP_MAX + 1]; + u8 max_prio; + u8 rsvd[7]; +}; + +struct xsc_default_pri_set { + u8 priority; + u8 rsvd[7]; +}; + +struct xsc_default_pri_get { + u8 priority; + u8 rsvd[7]; +}; + +#define PFC_WATCHDOG_EN_OFF 0 +#define PFC_WATCHDOG_EN_ON 1 +struct xsc_watchdog_en_set { + u8 en; +}; + +struct xsc_watchdog_en_get { + u8 en; +}; + +#define PFC_WATCHDOG_PERIOD_MIN 1 +#define PFC_WATCHDOG_PERIOD_MAX 4000000 +struct xsc_watchdog_period_set { + u32 period; +}; + +struct xsc_watchdog_period_get { + u32 period; +}; + +struct xsc_event_resp { + u8 resp_cmd_type; /* bitmap:0x0001: link up/down */ +}; + +struct xsc_event_linkstatus_resp { + u8 linkstatus; /*0:down, 1:up*/ +}; + +struct xsc_event_linkinfo { + u8 linkstatus; /*0:down, 1:up*/ + u8 port; + u8 duplex; + u8 autoneg; + u32 linkspeed; + u64 supported; + u64 advertising; + u64 supported_fec; /* reserved, not support currently */ + u64 advertised_fec; /* reserved, not support currently */ + u64 supported_speed[2]; + u64 advertising_speed[2]; +}; + +struct xsc_lldp_status_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 os_handle_lldp; + u8 sub_type; +}; + +struct xsc_lldp_status_mbox_out { + struct xsc_outbox_hdr hdr; + union { + __be32 os_handle_lldp; + __be32 dcbx_status; + } status; +}; + +struct xsc_vport_rate_limit_mobox_in { + struct xsc_inbox_hdr hdr; + u8 other_vport; + __be16 vport_number; + __be16 rsvd0; + __be32 rate; +}; + +struct xsc_vport_rate_limit_mobox_out { + struct xsc_outbox_hdr hdr; +}; + +struct xsc_event_query_type_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[2]; +}; + +struct xsc_event_query_type_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_event_resp ctx; +}; + +struct xsc_event_query_linkstatus_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[2]; +}; + +struct xsc_event_query_linkstatus_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_event_linkstatus_resp ctx; +}; + +struct xsc_event_query_linkinfo_mbox_in { + struct xsc_inbox_hdr hdr; +}; + +struct xsc_event_query_linkinfo_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_event_linkinfo ctx; +}; + +struct xsc_event_modify_linkinfo_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_event_linkinfo ctx; +}; + +struct xsc_event_modify_linkinfo_mbox_out { + struct xsc_outbox_hdr hdr; + u32 status; +}; + +struct xsc_event_set_port_admin_status_mbox_in { + struct xsc_inbox_hdr hdr; + u16 admin_status; + +}; + +struct xsc_event_set_port_admin_status_mbox_out { + struct xsc_outbox_hdr hdr; + u32 status; +}; + +struct xsc_event_set_led_status_mbox_in { + struct xsc_inbox_hdr hdr; + u8 port_id; +}; + +struct xsc_event_set_led_status_mbox_out { + struct xsc_outbox_hdr hdr; + u32 status; +}; + +struct xsc_event_modify_fecparam_mbox_in { + struct xsc_inbox_hdr hdr; + u32 fec; +}; + +struct xsc_event_modify_fecparam_mbox_out { + struct xsc_outbox_hdr hdr; + u32 status; +}; + +struct xsc_event_query_fecparam_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[2]; +}; + +struct xsc_event_query_fecparam_mbox_out { + struct xsc_outbox_hdr hdr; + u32 active_fec; + u32 fec_cfg; + u32 status; +}; + +#define PFC_ON_PG_PRFL_IDX 0 +#define PFC_OFF_PG_PRFL_IDX 1 +#define PFC_ON_QMU_VALUE 0 +#define PFC_OFF_QMU_VALUE 1 + +#define NIF_PFC_EN_ON 1 +#define NIF_PFC_EN_OFF 0 + +#define PFC_CFG_CHECK_TIMEOUT_US 8000000 +#define PFC_CFG_CHECK_SLEEP_TIME_US 200 +#define PFC_CFG_CHECK_MAX_RETRY_TIMES \ + (PFC_CFG_CHECK_TIMEOUT_US / PFC_CFG_CHECK_SLEEP_TIME_US) +#define PFC_CFG_CHECK_VALID_CNT 3 + +enum { + PFC_OP_ENABLE = 0, + PFC_OP_DISABLE, + PFC_OP_MODIFY, + PFC_OP_TYPE_MAX, +}; + +enum { + DROP_TH_CLEAR = 0, + DROP_TH_RECOVER, + DROP_TH_RECOVER_LOSSY, + DROP_TH_RECOVER_LOSSLESS, +}; + +struct xsc_pfc_cfg { + u8 req_prio; + u8 req_pfc_en; + u8 curr_prio; + u8 curr_pfc_en; + u8 pfc_op; + u8 lossless_num; +}; + +#define LOSSLESS_NUM_INVAILD 9 +struct xsc_pfc_set { + u8 priority; + u8 pfc_on; + u8 type; + u8 src_prio; + u8 lossless_num; +}; + +#define PFC_PRIO_MAX 7 +struct xsc_pfc_get { + u8 pfc_on[PFC_PRIO_MAX + 1]; + u8 max_prio; +}; + +struct xsc_pfc_set_drop_th_mbox_in { + struct xsc_inbox_hdr hdr; + u8 prio; + u8 cfg_type; +}; + +struct xsc_pfc_set_drop_th_mbox_out { + struct xsc_outbox_hdr hdr; +}; + +struct xsc_pfc_get_cfg_status_mbox_in { + struct xsc_inbox_hdr hdr; + u8 prio; +}; + +struct xsc_pfc_get_cfg_status_mbox_out { + struct xsc_outbox_hdr hdr; +}; + +struct xsc_rate_limit_set { + u32 rate_cir; + u32 limit_id; + u8 limit_level; + u8 rsvd[7]; +}; + +struct xsc_rate_limit_get { + u32 rate_cir[QOS_PRIO_MAX + 1]; + u32 max_limit_id; + u8 limit_level; + u8 rsvd[3]; +}; + +struct xsc_sp_set { + u8 sp[QOS_PRIO_MAX + 1]; +}; + +struct xsc_sp_get { + u8 sp[QOS_PRIO_MAX + 1]; + u8 max_prio; + u8 rsvd[7]; +}; + +struct xsc_weight_set { + u8 weight[QOS_PRIO_MAX + 1]; +}; + +struct xsc_weight_get { + u8 weight[QOS_PRIO_MAX + 1]; + u8 max_prio; + u8 rsvd[7]; +}; + +struct xsc_dpu_port_weight_set { + u8 target; + u8 weight[DPU_PORT_WGHT_CFG_MAX + 1]; + u8 rsv[5]; +}; + +struct xsc_dpu_port_weight_get { + u8 weight[DPU_PORT_WGHT_TARGET_NUM][DPU_PORT_WGHT_CFG_MAX + 1]; + u8 rsvd[4]; +}; + +struct xsc_dpu_prio_weight_set { + u8 target; + u8 weight[QOS_PRIO_MAX + 1]; + u8 rsv[7]; +}; + +struct xsc_dpu_prio_weight_get { + u8 weight[DPU_PRIO_WGHT_TARGET_NUM][QOS_PRIO_MAX + 1]; +}; + +struct xsc_cc_mbox_in { + struct xsc_inbox_hdr hdr; + u8 data[]; +}; + +struct xsc_cc_mbox_out { + struct xsc_outbox_hdr hdr; + u8 data[]; +}; + +struct xsc_cc_ctrl_cmd { + u16 cmd; + u16 len; + u8 val[]; +}; + +struct xsc_cc_cmd_enable_rp { + u16 cmd; + u16 len; + u32 enable; + u32 section; +}; + +struct xsc_cc_cmd_enable_np { + u16 cmd; + u16 len; + u32 enable; + u32 section; +}; + +struct xsc_cc_cmd_init_alpha { + u16 cmd; + u16 len; + u32 alpha; + u32 section; +}; + +struct xsc_cc_cmd_g { + u16 cmd; + u16 len; + u32 g; + u32 section; +}; + +struct xsc_cc_cmd_ai { + u16 cmd; + u16 len; + u32 ai; + u32 section; +}; + +struct xsc_cc_cmd_hai { + u16 cmd; + u16 len; + u32 hai; + u32 section; +}; + +struct xsc_cc_cmd_th { + u16 cmd; + u16 len; + u32 threshold; + u32 section; +}; + +struct xsc_cc_cmd_bc { + u16 cmd; + u16 len; + u32 bytecount; + u32 section; +}; + +struct xsc_cc_cmd_cnp_opcode { + u16 cmd; + u16 len; + u32 opcode; +}; + +struct xsc_cc_cmd_cnp_bth_b { + u16 cmd; + u16 len; + u32 bth_b; +}; + +struct xsc_cc_cmd_cnp_bth_f { + u16 cmd; + u16 len; + u32 bth_f; +}; + +struct xsc_cc_cmd_cnp_ecn { + u16 cmd; + u16 len; + u32 ecn; +}; + +struct xsc_cc_cmd_data_ecn { + u16 cmd; + u16 len; + u32 ecn; +}; + +struct xsc_cc_cmd_cnp_tx_interval { + u16 cmd; + u16 len; + u32 interval; // us + u32 section; +}; + +struct xsc_cc_cmd_evt_rsttime { + u16 cmd; + u16 len; + u32 period; +}; + +struct xsc_cc_cmd_cnp_dscp { + u16 cmd; + u16 len; + u32 dscp; + u32 section; +}; + +struct xsc_cc_cmd_cnp_pcp { + u16 cmd; + u16 len; + u32 pcp; + u32 section; +}; + +struct xsc_cc_cmd_evt_period_alpha { + u16 cmd; + u16 len; + u32 period; +}; + +struct xsc_cc_cmd_clamp_tgt_rate { + u16 cmd; + u16 len; + u32 clamp_tgt_rate; + u32 section; +}; + +struct xsc_cc_cmd_max_hai_factor { + u16 cmd; + u16 len; + u32 max_hai_factor; + u32 section; +}; + +struct xsc_cc_cmd_scale { + u16 cmd; + u16 len; + u32 scale; + u32 section; +}; + +struct xsc_cc_cmd_get_cfg { + u16 cmd; + u16 len; + u32 enable_rp; + u32 enable_np; + u32 init_alpha; + u32 g; + u32 ai; + u32 hai; + u32 threshold; + u32 bytecount; + u32 opcode; + u32 bth_b; + u32 bth_f; + u32 cnp_ecn; + u32 data_ecn; + u32 cnp_tx_interval; + u32 evt_period_rsttime; + u32 cnp_dscp; + u32 cnp_pcp; + u32 evt_period_alpha; + u32 clamp_tgt_rate; + u32 max_hai_factor; + u32 scale; + u32 section; +}; + +struct xsc_cc_cmd_get_stat { + u16 cmd; + u16 len; + u32 section; +}; + +struct xsc_cc_cmd_stat { + u32 cnp_handled; + u32 alpha_recovery; + u32 reset_timeout; + u32 reset_bytecount; +}; + +struct xsc_set_mtu_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 mtu; + __be16 rx_buf_sz_min; + u8 mac_port; + u8 rsvd; +}; + +struct xsc_hwc_mbox_in { + struct xsc_inbox_hdr hdr; + u8 data[]; +}; + +struct xsc_hwc_mbox_out { + struct xsc_outbox_hdr hdr; + u8 data[]; +}; + +struct hwc_set_t { + u8 type; + u8 s_wqe_mode; + u8 r_wqe_mode; + u8 ack_timeout; + u8 group_mode; + u8 lossless_prio[XSC_MAX_MAC_NUM]; + u8 lossless_prio_len; + u8 retry_cnt_th; + u8 adapt_to_other; + u8 alloc_qp_id_mode; + u16 vf_num_per_pf; + u16 max_vf_num_per_pf; + u8 eth_pkt_offset; + u8 rdma_pkt_offset; + u8 tso_eth_pkt_offset; + u8 tx_dedi_pref; + u8 reg_mr_via_cmdq; + u8 per_dst_grp_thr; + u8 per_dst_grp_cnt; + u8 dcbx_status[XSC_MAX_MAC_NUM]; + u8 dcbx_port_cnt; +}; + +struct hwc_get_t { + u8 cur_s_wqe_mode; + u8 next_s_wqe_mode; + u8 cur_r_wqe_mode; + u8 next_r_wqe_mode; + u8 cur_ack_timeout; + u8 next_ack_timeout; + u8 cur_group_mode; + u8 next_group_mode; + u8 cur_lossless_prio[XSC_MAX_MAC_NUM]; + u8 next_lossless_prio[XSC_MAX_MAC_NUM]; + u8 lossless_prio_len; + u8 cur_retry_cnt_th; + u8 next_retry_cnt_th; + u8 cur_adapt_to_other; + u8 next_adapt_to_other; + u16 cur_vf_num_per_pf; + u16 next_vf_num_per_pf; + u16 cur_max_vf_num_per_pf; + u16 next_max_vf_num_per_pf; + u8 cur_eth_pkt_offset; + u8 next_eth_pkt_offset; + u8 cur_rdma_pkt_offset; + u8 next_rdma_pkt_offset; + u8 cur_tso_eth_pkt_offset; + u8 next_tso_eth_pkt_offset; + u8 cur_alloc_qp_id_mode; + u8 next_alloc_qp_id_mode; + u8 cur_tx_dedi_pref; + u8 next_tx_dedi_pref; + u8 cur_reg_mr_via_cmdq; + u8 next_reg_mr_via_cmdq; + u8 cur_per_dst_grp_thr; + u8 next_per_dst_grp_thr; + u8 cur_per_dst_grp_cnt; + u8 next_per_dst_grp_cnt; + u8 cur_dcbx_status[XSC_MAX_MAC_NUM]; + u8 next_dcbx_status[XSC_MAX_MAC_NUM]; + u8 dcbx_port_cnt; +}; + +struct xsc_set_mtu_mbox_out { + struct xsc_outbox_hdr hdr; +}; + +struct xsc_query_eth_mac_mbox_in { + struct xsc_inbox_hdr hdr; + u8 index; +}; + +struct xsc_query_eth_mac_mbox_out { + struct xsc_outbox_hdr hdr; + u8 mac[6]; +}; + +struct xsc_query_pause_cnt_mbox_in { + struct xsc_inbox_hdr hdr; + u16 mac_port; + u16 cnt_type; + u32 reg_addr; +}; + +struct xsc_query_pause_cnt_mbox_out { + struct xsc_outbox_hdr hdr; + u64 val; +}; + +enum { + XSC_TBM_CAP_HASH_PPH = 0, + XSC_TBM_CAP_RSS, + XSC_TBM_CAP_PP_BYPASS, + XSC_TBM_CAP_PCT_DROP_CONFIG, +}; + +struct xsc_nic_attr { + __be16 caps; + __be16 caps_mask; + u8 mac_addr[6]; +}; + +struct xsc_rss_attr { + u8 rss_en; + u8 hfunc; + __be16 rqn_base; + __be16 rqn_num; + __be32 hash_tmpl; +}; + +struct xsc_cmd_enable_nic_hca_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_nic_attr nic; + struct xsc_rss_attr rss; +}; + +struct xsc_cmd_enable_nic_hca_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd0[2]; +}; + +struct xsc_nic_dis_attr { + __be16 caps; +}; + +struct xsc_cmd_disable_nic_hca_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_nic_dis_attr nic; +}; + +struct xsc_cmd_disable_nic_hca_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd0[4]; +}; + +enum { + XSC_RSS_HASH_KEY_UPDATE = 0, + XSC_RSS_HASH_TEMP_UPDATE, + XSC_RSS_HASH_FUNC_UPDATE, + XSC_RSS_RXQ_UPDATE, + XSC_RSS_RXQ_DROP, +}; + +struct xsc_rss_modify_attr { + u8 caps_mask; + u8 rss_en; + __be16 rqn_base; + __be16 rqn_num; + u8 hfunc; + __be32 hash_tmpl; + u8 hash_key[52]; +}; + +struct xsc_cmd_modify_nic_hca_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_nic_attr nic; + struct xsc_rss_modify_attr rss; +}; + +struct xsc_cmd_modify_nic_hca_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd0[4]; +}; + +struct xsc_function_reset_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 glb_func_id; + u8 rsvd[6]; +}; + +struct xsc_function_reset_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +enum { + XSC_PCIE_LAT_FEAT_SET_EN = 0, + XSC_PCIE_LAT_FEAT_GET_EN, + XSC_PCIE_LAT_FEAT_SET_INTERVAL, + XSC_PCIE_LAT_FEAT_GET_INTERVAL, + XSC_PCIE_LAT_FEAT_GET_HISTOGRAM, + XSC_PCIE_LAT_FEAT_GET_PEAK, + XSC_PCIE_LAT_FEAT_HW, + XSC_PCIE_LAT_FEAT_HW_INIT, +}; + +struct xsc_pcie_lat { + u8 pcie_lat_enable; + u32 pcie_lat_interval[XSC_PCIE_LAT_CFG_INTERVAL_MAX]; + u32 pcie_lat_histogram[XSC_PCIE_LAT_CFG_HISTOGRAM_MAX]; + u32 pcie_lat_peak; +}; + +struct xsc_pcie_lat_feat_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 xsc_pcie_lat_feature_opcode; + struct xsc_pcie_lat pcie_lat; +}; + +struct xsc_pcie_lat_feat_mbox_out { + struct xsc_outbox_hdr hdr; + __be16 xsc_pcie_lat_feature_opcode; + struct xsc_pcie_lat pcie_lat; +}; + +struct xsc_reg_mcia { + u8 module; + u8 status; + + u8 i2c_device_address; + u8 page_number; + u8 device_address; + + u8 size; + + u8 dword_0[0x20]; + u8 dword_1[0x20]; + u8 dword_2[0x20]; + u8 dword_3[0x20]; + u8 dword_4[0x20]; + u8 dword_5[0x20]; + u8 dword_6[0x20]; + u8 dword_7[0x20]; + u8 dword_8[0x20]; + u8 dword_9[0x20]; + u8 dword_10[0x20]; + u8 dword_11[0x20]; +}; + +struct xsc_rtt_en_mbox_in { + struct xsc_inbox_hdr hdr; + u8 en;//0-disable, 1-enable + u8 rsvd[7]; +}; + +struct xsc_rtt_en_mbox_out { + struct xsc_outbox_hdr hdr; + u8 en;//0-disable, 1-enable + u8 rsvd[7]; +}; + +struct xsc_rtt_qpn_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 qpn[32]; +}; + +struct xsc_rtt_qpn_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_get_rtt_qpn_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 qpn[32]; +}; + +struct xsc_rtt_period_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 period; //ms +}; + +struct xsc_rtt_period_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 period; //ms + u8 rsvd[4]; +}; + +struct xsc_rtt_result_mbox_out { + struct xsc_outbox_hdr hdr; + __be64 result[32]; +}; + +struct rtt_stats { + u64 rtt_succ_snd_req_cnt; + u64 rtt_succ_snd_rsp_cnt; + u64 rtt_fail_snd_req_cnt; + u64 rtt_fail_snd_rsp_cnt; + u64 rtt_rcv_req_cnt; + u64 rtt_rcv_rsp_cnt; + u64 rtt_rcv_unk_cnt; + u64 rtt_grp_invalid_cnt; +}; + +struct xsc_rtt_stats_mbox_out { + struct xsc_outbox_hdr hdr; + struct rtt_stats stats; +}; + +enum { + XSC_AP_FEAT_SET_UDP_SPORT = 0, +}; + +struct xsc_ap_feat_set_udp_sport { + u32 qpn; + u32 udp_sport; +}; + +struct xsc_ap { + struct xsc_ap_feat_set_udp_sport set_udp_sport; +}; + +struct xsc_ap_feat_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 xsc_ap_feature_opcode; + struct xsc_ap ap; +}; + +struct xsc_ap_feat_mbox_out { + struct xsc_outbox_hdr hdr; + __be16 xsc_ap_feature_opcode; + struct xsc_ap ap; +}; + +struct xsc_set_debug_info_mbox_in { + struct xsc_inbox_hdr hdr; + u8 set_field; + u8 log_level; + u8 cmd_verbose; + u8 rsvd[5]; +}; + +struct xsc_set_debug_info_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_cmd_enable_relaxed_order_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_cmd_enable_relaxed_order_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_cmd_query_guid_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_cmd_query_guid_mbox_out { + struct xsc_outbox_hdr hdr; + __be64 guid; +}; + +struct xsc_cmd_activate_hw_config_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_cmd_activate_hw_config_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +#endif /* XSC_CMD_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_core.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_core.h new file mode 100644 index 000000000000..122b06a87991 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_core.h @@ -0,0 +1,1315 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_CORE_H +#define XSC_CORE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common/xsc_macro.h" +#include "common/xsc_cmd.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_auto_hw.h" +#include "common/driver.h" +#include "common/xsc_reg.h" +#include "common/xsc_eswitch.h" + +extern uint xsc_debug_mask; +extern unsigned int xsc_log_level; + +#ifndef mmiowb +#define mmiowb() +#endif + +#define XSC_PCI_VENDOR_ID 0x1f67 + +#define XSC_MC_PF_DEV_ID 0x1011 +#define XSC_MC_VF_DEV_ID 0x1012 + +#define XSC_MF_HOST_PF_DEV_ID 0x1051 +#define XSC_MF_HOST_VF_DEV_ID 0x1052 +#define XSC_MF_SOC_PF_DEV_ID 0x1053 + +#define XSC_MS_PF_DEV_ID 0x1111 +#define XSC_MS_VF_DEV_ID 0x1112 + +#define XSC_MV_HOST_PF_DEV_ID 0x1151 +#define XSC_MV_HOST_VF_DEV_ID 0x1152 +#define XSC_MV_SOC_PF_DEV_ID 0x1153 + +#define REG_ADDR(dev, offset) \ + (xsc_core_is_pf(dev) ? ((dev->bar) + ((offset) - 0xA0000000)) : ((dev->bar) + (offset))) + +#define REG_WIDTH_TO_STRIDE(width) ((width) / 8) +#define QPM_PAM_TBL_NUM 4 +#define QPM_PAM_TBL_NUM_MASK 3 +#define QPM_PAM_TBL_INDEX_SHIFT 2 +#define QPM_PAM_PAGE_SHIFT 12 + +#define XSC_SUB_DEV_ID_MC_50 0xC050 +#define XSC_SUB_DEV_ID_MC_100 0xC100 +#define XSC_SUB_DEV_ID_MC_200 0xC200 +#define XSC_SUB_DEV_ID_MC_400S 0xC400 +#define XSC_SUB_DEV_ID_MF_50 0xF050 +#define XSC_SUB_DEV_ID_MF_200 0xF200 +#define XSC_SUB_DEV_ID_MS_50 0xA050 +#define XSC_SUB_DEV_ID_MS_100Q 0xA104 +#define XSC_SUB_DEV_ID_MS_200 0xA200 +#define XSC_SUB_DEV_ID_MS_200S 0xA201 +#define XSC_SUB_DEV_ID_MS_400M 0xA202 +#define XSC_SUB_DEV_ID_MS_200_OCP 0xA203 +#define XSC_SUB_DEV_ID_MV_100 0xD100 +#define XSC_SUB_DEV_ID_MV_200 0xD200 + +#define XSC_MAX_PRODUCT_NAME_LEN 32 + +enum { + XSC_LOG_LEVEL_DBG = 0, + XSC_LOG_LEVEL_INFO = 1, + XSC_LOG_LEVEL_WARN = 2, + XSC_LOG_LEVEL_ERR = 3, +}; + +enum { + XSC_CHIP_MC, + XSC_CHIP_MF, + XSC_CHIP_MS, + XSC_CHIP_MV, + XSC_CHIP_UNKNOWN, +}; + +#ifndef dev_fmt +#define dev_fmt(fmt) fmt +#endif + +#define xsc_dev_log(condition, level, dev, fmt, ...) \ +do { \ + if (condition) \ + dev_printk(level, dev, dev_fmt(fmt), ##__VA_ARGS__); \ +} while (0) + +#define xsc_core_dbg(__dev, format, ...) \ + xsc_dev_log(xsc_log_level <= XSC_LOG_LEVEL_DBG, KERN_DEBUG, \ + &(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, ##__VA_ARGS__) + +#define xsc_core_dbg_once(__dev, format, ...) \ + dev_dbg_once(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) + +#define xsc_core_dbg_mask(__dev, mask, format, ...) \ +do { \ + if ((mask) & xsc_debug_mask) \ + xsc_core_dbg(__dev, format, ##__VA_ARGS__); \ +} while (0) + +#define xsc_core_err(__dev, format, ...) \ + xsc_dev_log(xsc_log_level <= XSC_LOG_LEVEL_ERR, KERN_ERR, \ + &(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, ##__VA_ARGS__) + +#define xsc_core_err_rl(__dev, format, ...) \ + dev_err_ratelimited(&(__dev)->pdev->dev, \ + "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) + +#define xsc_core_warn(__dev, format, ...) \ + xsc_dev_log(xsc_log_level <= XSC_LOG_LEVEL_WARN, KERN_WARNING, \ + &(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, ##__VA_ARGS__) + +#define xsc_core_info(__dev, format, ...) \ + xsc_dev_log(xsc_log_level <= XSC_LOG_LEVEL_INFO, KERN_INFO, \ + &(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, ##__VA_ARGS__) + +#define xsc_pr_debug(format, ...) \ +do { \ + if (xsc_log_level <= XSC_LOG_LEVEL_DBG) \ + pr_debug(format, ##__VA_ARGS__); \ +} while (0) + +#define assert(__dev, expr) \ +do { \ + if (!(expr)) { \ + dev_err(&(__dev)->pdev->dev, \ + "Assertion failed! %s, %s, %s, line %d\n", \ + #expr, __FILE__, __func__, __LINE__); \ + } \ +} while (0) + +#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) + +#define XSC_PCIE_NO_HOST 0x0 +#define XSC_PCIE_NO_SOC 0x1 +#define XSC_PCIE_NO_UNSET 0xFF + +enum xsc_driver_mode { + HOST_MODE, + SOC_MODE, +}; + +u8 xsc_get_driver_work_mode(void); + +enum xsc_dev_event { + XSC_DEV_EVENT_SYS_ERROR, + XSC_DEV_EVENT_PORT_UP, + XSC_DEV_EVENT_PORT_DOWN, + XSC_DEV_EVENT_PORT_INITIALIZED, + XSC_DEV_EVENT_LID_CHANGE, + XSC_DEV_EVENT_PKEY_CHANGE, + XSC_DEV_EVENT_GUID_CHANGE, + XSC_DEV_EVENT_CLIENT_REREG, +}; + +enum { + /* one minute for the sake of bringup. Generally, commands must always + * complete and we may need to increase this timeout value + */ + XSC_CMD_TIMEOUT_MSEC = 10 * 1000, + XSC_CMD_WQ_MAX_NAME = 32, +}; + +enum { + XSC_MAX_NAME_LEN = 32, +}; + +enum { + XSC_MAX_PORTS = 2, +}; + +enum { + MAX_MR_CACHE_ENTRIES = 16, +}; + +enum { + XSC_CMD_DATA, /* print command payload only */ + XSC_CMD_TIME, /* print command execution time */ +}; + +enum xsc_rdma_driver_id { + RDMA_DRIVER_XSC_UNKNOWN, + RDMA_DRIVER_XSC5, + RDMA_DRIVER_XSC4, +}; + +/* mutex for interface device list */ +extern struct mutex xsc_intf_mutex; + +#define GROUP_REFER_CNT_SIZE 1024 + +struct qp_group_refer { + spinlock_t lock; /* protect refer_cnt[] */ + u16 refer_cnt[GROUP_REFER_CNT_SIZE]; +}; + +struct xsc_priv_device { + char device_name[IB_DEVICE_NAME_MAX]; + dev_t devno; + struct cdev cdev; + struct list_head mem_list; + spinlock_t mem_lock; /* protect mem_list */ + struct radix_tree_root bdf_tree; + spinlock_t bdf_lock; /* protect bdf_tree */ +}; + +enum xsc_pci_status { + XSC_PCI_STATUS_DISABLED, + XSC_PCI_STATUS_ENABLED, +}; + +enum xsc_device_state { + XSC_DEVICE_STATE_UNINITIALIZED, + XSC_DEVICE_STATE_UP, + XSC_DEVICE_STATE_INTERNAL_ERROR, +}; + +enum xsc_interface_state { + XSC_INTERFACE_STATE_UP = BIT(0), + XSC_INTERFACE_STATE_TEARDOWN = BIT(1), +}; + +enum { + XSC_INTERFACE_PROTOCOL_IB = 0, + XSC_INTERFACE_PROTOCOL_ETH = 1, +}; + +enum { + XSC_INTERFACE_ADDED, + XSC_INTERFACE_ATTACHED, +}; + +#define CONFIG_XSC_SRIOV 1 + +enum xsc_coredev_type { + XSC_COREDEV_PF, + XSC_COREDEV_VF, + XSC_COREDEV_SF +}; + +enum { + XSC_PCI_DEV_IS_VF = 1 << 0, +}; + +enum port_state_policy { + XSC_POLICY_DOWN = 0, + XSC_POLICY_UP = 1, + XSC_POLICY_FOLLOW = 2, + XSC_POLICY_INVALID = 0xffffffff +}; + +enum { + XSC_CAP_PORT_TYPE_IB = 0x0, + XSC_CAP_PORT_TYPE_ETH = 0x1, +}; + +enum xsc_inline_modes { + XSC_INLINE_MODE_NONE, + XSC_INLINE_MODE_L2, + XSC_INLINE_MODE_IP, + XSC_INLINE_MODE_TCP_UDP, +}; + +struct xsc_core_device; + +struct xsc_vf_context { + int enabled; + u64 port_guid; + u64 node_guid; + enum port_state_policy policy; +}; + +struct xsc_sriov_vf { + struct xsc_core_device *dev; + struct kobject kobj; + int vf; +}; + +struct xsc_pci_sriov { + /* standard SRIOV capability fields, mostly for debug */ + int pos; /* capability position */ + int nres; /* number of resources */ + u32 cap; /* SR-IOV Capabilities */ + u16 ctrl; /* SR-IOV Control */ + u16 total_vfs; /* total VFs of PF */ + u16 initial_vfs; /* initial VFs of PF */ + u16 num_vfs; /* number of VFs available */ + u16 offset; /* first VF Routing ID offset */ + u16 stride; /* following VF stride */ + u16 vf_device; /* VF device ID */ + u32 pgsz; /* page size for BAR alignment */ + u8 link; /* Function Dependency Link */ +}; + +struct xsc_core_sriov { + int num_vfs; + u16 max_vfs; + u16 vf_bdf_base; + u8 probe_vf; + struct xsc_vf_context *vfs_ctx; + struct kobject *config; + struct kobject *groups_config; + struct kobject node_guid_kobj; + struct xsc_sriov_vf *vfs; + struct xsc_pci_sriov pci_sriov; +}; + +struct xsc_vgroup { + struct xsc_core_device *dev; + u32 group_id; + u32 num_vports; + u32 tsar_ix; + u32 max_rate; + u32 min_rate; + u32 bw_share; + struct kobject kobj; + struct list_head list; +}; + +struct xsc_vport_info { + u8 mac[ETH_ALEN]; + u16 vlan; + u8 qos; + __be16 vlan_proto; + u64 node_guid; + int link_state; + u32 min_rate; + u32 max_rate; + u8 spoofchk; + u8 trusted; + u8 roce; + /* the admin approved vlan list */ + DECLARE_BITMAP(vlan_trunk_8021q_bitmap, VLAN_N_VID); + u32 group; +}; + +#define XSC_L2_ADDR_HASH_SIZE 8 + +enum xsc_eswitch_vport_event { + XSC_VPORT_UC_ADDR_CHANGE = BIT(0), + XSC_VPORT_MC_ADDR_CHANGE = BIT(1), + XSC_VPORT_PROMISC_CHANGE = BIT(2), + XSC_VPORT_VLAN_CHANGE = BIT(3), +}; + +struct xsc_vport { + struct xsc_core_device *dev; + u16 vport; + struct hlist_head uc_list[XSC_L2_ADDR_HASH_SIZE]; + struct hlist_head mc_list[XSC_L2_ADDR_HASH_SIZE]; + /* The requested vlan list from the vport side */ + DECLARE_BITMAP(req_vlan_bitmap, VLAN_N_VID); + /* Actual accepted vlans on the acl tables */ + DECLARE_BITMAP(acl_vlan_8021q_bitmap, VLAN_N_VID); + struct work_struct vport_change_handler; + + struct xsc_vport_info info; + + struct { + u8 enabled; + u32 esw_tsar_ix; + u32 bw_share; + u32 min_rate; + u32 max_rate; + } qos; + + u8 enabled; + enum xsc_eswitch_vport_event enabled_events; + u16 match_id; + u32 bond_metadata; + u16 vlan_id; + u8 vlan_qos; + __be16 vlan_proto; +}; + +struct xsc_eswitch { + struct xsc_core_device *dev; + u32 flags; + int total_vports; + int enabled_vports; + int num_vfs; + struct xsc_vport *vports; + struct workqueue_struct *work_queue; + + /* Synchronize between vport change events + * and async SRIOV admin state changes + */ + struct mutex state_lock; + + /* Protects eswitch mode changes occurring via sriov + * state change, devlink commands. + */ + struct mutex mode_lock; + int mode; + int nvports; + u16 manager_vport; + u16 first_host_vport; +}; + +struct xsc_core_health { + u8 sick; +}; + +struct xsc_priv { + char name[XSC_MAX_NAME_LEN]; + struct list_head dev_list; + struct list_head ctx_list; + spinlock_t ctx_lock; /* protect ctx_list */ + int numa_node; + struct xsc_core_sriov sriov; + struct xsc_eswitch *eswitch; + struct xsc_core_health health; +}; + +struct xsc_port_ctrl { + struct list_head node; + dev_t devid; + struct cdev cdev; + struct device *device; + struct list_head file_list; + spinlock_t file_lock; /* protect file_list */ +}; + +typedef int (*restore_func_t)(struct xsc_core_device *dev); + +struct xsc_bdf_file { + unsigned long key; + struct radix_tree_root obj_tree; /* protect obj_tree */ + spinlock_t obj_lock; + struct xsc_core_device *xdev; + restore_func_t restore_nic_fn; +}; + +struct xsc_port_ctrl_file { + struct list_head file_node; + struct radix_tree_root bdf_tree; + spinlock_t bdf_lock; /* protect bdf_tree */ + struct xsc_bdf_file *root_bdf; + struct xsc_port_ctrl *ctrl; +}; + +struct xsc_port_caps { + int gid_table_len; + int pkey_table_len; +}; + +struct xsc_caps { + u8 log_max_eq; + u8 log_max_cq; + u8 log_max_qp; + u8 log_max_mkey; + u8 log_max_pd; + u8 log_max_srq; + u8 log_max_msix; + u32 max_cqes; + u32 max_wqes; + u32 max_sq_desc_sz; + u32 max_rq_desc_sz; + u64 flags; + u16 stat_rate_support; + u32 log_max_msg; + u32 num_ports; + u32 max_ra_res_qp; + u32 max_ra_req_qp; + u32 max_srq_wqes; + u32 bf_reg_size; + u32 bf_regs_per_page; + struct xsc_port_caps port[XSC_MAX_PORTS]; + u8 ext_port_cap[XSC_MAX_PORTS]; + u32 reserved_lkey; + u8 local_ca_ack_delay; + u8 log_max_mcg; + u16 max_qp_mcg; + u32 min_page_sz; + u32 send_ds_num; + u32 send_wqe_shift; + u32 recv_ds_num; + u32 recv_wqe_shift; + u32 rx_pkt_len_max; + + u32 msix_enable:1; + u32 port_type:1; + u32 embedded_cpu:1; + u32 eswitch_manager:1; + u32 ecpf_vport_exists:1; + u32 vport_group_manager:1; + u32 sf:1; + u32 wqe_inline_mode:3; + u32 raweth_qp_id_base:15; + u32 rsvd0:7; + + u16 max_vfs; + u8 log_max_qp_depth; + u8 log_max_current_uc_list; + u8 log_max_current_mc_list; + u16 log_max_vlan_list; + u8 fdb_multi_path_to_table; + u8 log_esw_max_sched_depth; + + u8 max_num_sf_partitions; + u8 log_max_esw_sf; + u16 sf_base_id; + + u32 max_tc:8; + u32 ets:1; + u32 dcbx:1; + u32 dscp:1; + u32 sbcam_reg:1; + u32 qos:1; + u32 port_buf:1; + u32 rsvd1:2; + u32 raw_tpe_qp_num:16; + u32 max_num_eqs:8; + u32 mac_port:8; + u32 raweth_rss_qp_id_base:16; + u16 msix_base; + u16 msix_num; + u8 log_max_mtt; + u8 log_max_tso; + u32 hca_core_clock; + u32 max_rwq_indirection_tables;/*rss_caps*/ + u32 max_rwq_indirection_table_size;/*rss_caps*/ + u16 raweth_qp_id_end; + u32 qp_rate_limit_min; + u32 qp_rate_limit_max; + u32 hw_feature_flag; + u16 pf0_vf_funcid_base; + u16 pf0_vf_funcid_top; + u16 pf1_vf_funcid_base; + u16 pf1_vf_funcid_top; + u16 pcie0_pf_funcid_base; + u16 pcie0_pf_funcid_top; + u16 pcie1_pf_funcid_base; + u16 pcie1_pf_funcid_top; + u8 nif_port_num; + u8 pcie_host; + u8 mac_bit; + u16 funcid_to_logic_port; + u8 lag_logic_port_ofst; +}; + +struct cache_ent { + /* protect block chain allocations + */ + spinlock_t lock; + struct list_head head; +}; + +struct cmd_msg_cache { + struct cache_ent large; + struct cache_ent med; + +}; + +#define CMD_FIRST_SIZE 8 +struct xsc_cmd_first { + __be32 data[CMD_FIRST_SIZE]; +}; + +struct xsc_cmd_mailbox { + void *buf; + dma_addr_t dma; + struct xsc_cmd_mailbox *next; +}; + +struct xsc_cmd_msg { + struct list_head list; + struct cache_ent *cache; + u32 len; + struct xsc_cmd_first first; + struct xsc_cmd_mailbox *next; +}; + +#define RSP_FIRST_SIZE 14 +struct xsc_rsp_first { + __be32 data[RSP_FIRST_SIZE]; //can be larger, xsc_rsp_layout +}; + +struct xsc_rsp_msg { + struct list_head list; + struct cache_ent *cache; + u32 len; + struct xsc_rsp_first first; + struct xsc_cmd_mailbox *next; +}; + +typedef void (*xsc_cmd_cbk_t)(int status, void *context); + +//hw will use this for some records(e.g. vf_id) +struct cmdq_rsv { + u16 vf_id; + u8 rsv[2]; +}; + +//related with hw, won't change +#define CMDQ_ENTRY_SIZE 64 + +struct xsc_cmd_layout { + struct cmdq_rsv rsv0; + __be32 inlen; + __be64 in_ptr; + __be32 in[CMD_FIRST_SIZE]; + __be64 out_ptr; + __be32 outlen; + u8 token; + u8 sig; + u8 idx; + u8 type: 7; + u8 owner_bit: 1; //rsv for hw, arm will check this bit to make sure mem written +}; + +struct xsc_rsp_layout { + struct cmdq_rsv rsv0; + __be32 out[RSP_FIRST_SIZE]; + u8 token; + u8 sig; + u8 idx; + u8 type: 7; + u8 owner_bit: 1; //rsv for hw, driver will check this bit to make sure mem written +}; + +struct xsc_cmd_work_ent { + struct xsc_cmd_msg *in; + struct xsc_rsp_msg *out; + int idx; + struct completion done; + struct xsc_cmd *cmd; + struct work_struct work; + struct xsc_cmd_layout *lay; + struct xsc_rsp_layout *rsp_lay; + int ret; + u8 status; + u8 token; + struct timespec64 ts1; + struct timespec64 ts2; +}; + +struct xsc_cmd_debug { + struct dentry *dbg_root; + struct dentry *dbg_in; + struct dentry *dbg_out; + struct dentry *dbg_outlen; + struct dentry *dbg_status; + struct dentry *dbg_run; + void *in_msg; + void *out_msg; + u8 status; + u16 inlen; + u16 outlen; +}; + +struct xsc_cmd_stats { + u64 sum; + u64 n; + struct dentry *root; + struct dentry *avg; + struct dentry *count; + /* protect command average calculations */ + spinlock_t lock; +}; + +struct xsc_cmd_reg { + u32 req_pid_addr; + u32 req_cid_addr; + u32 rsp_pid_addr; + u32 rsp_cid_addr; + u32 req_buf_h_addr; + u32 req_buf_l_addr; + u32 rsp_buf_h_addr; + u32 rsp_buf_l_addr; + u32 msix_vec_addr; + u32 element_sz_addr; + u32 q_depth_addr; + u32 interrupt_stat_addr; +}; + +enum xsc_cmd_status { + XSC_CMD_STATUS_NORMAL, + XSC_CMD_STATUS_TIMEDOUT, +}; + +struct xsc_cmd { + struct xsc_cmd_reg reg; + void *cmd_buf; + void *cq_buf; + dma_addr_t dma; + dma_addr_t cq_dma; + u16 cmd_pid; + u16 cq_cid; + u8 owner_bit; + u8 cmdif_rev; + u8 log_sz; + u8 log_stride; + int max_reg_cmds; + int events; + u32 __iomem *vector; + + spinlock_t alloc_lock; /* protect command queue allocations */ + spinlock_t token_lock; /* protect token allocations */ + spinlock_t doorbell_lock; /* protect cmdq req pid doorbell */ + u8 token; + unsigned long bitmask; + char wq_name[XSC_CMD_WQ_MAX_NAME]; + struct workqueue_struct *wq; + struct task_struct *cq_task; + struct semaphore sem; + int mode; + struct xsc_cmd_work_ent *ent_arr[XSC_MAX_COMMANDS]; + struct dma_pool *pool; + struct xsc_cmd_debug dbg; + struct cmd_msg_cache cache; + int checksum_disabled; + struct xsc_cmd_stats stats[XSC_CMD_OP_MAX]; + unsigned int irqn; + u8 ownerbit_learned; + u8 cmd_status; +}; + +struct xsc_lock { + spinlock_t lock; /* xsc spin lock */ +}; + +struct xsc_reg_addr { + u64 tx_db; + u64 rx_db; + u64 complete_db; + u64 complete_reg; + u64 event_db; + u64 cpm_get_lock; + u64 cpm_put_lock; + u64 cpm_lock_avail; + u64 cpm_data_mem; + u64 cpm_cmd; + u64 cpm_addr; + u64 cpm_busy; +}; + +struct xsc_board_info { + u32 board_id; + char board_sn[XSC_BOARD_SN_LEN]; + __be64 guid; + u8 guid_valid; + u8 hw_config_activated; +}; + +/* our core device */ +struct xsc_core_device { + struct pci_dev *pdev; + struct device *device; + struct xsc_priv priv; + struct xsc_dev_resource *dev_res; + void *xsc_ib_dev; + void *netdev; + void *eth_priv; + void *ovs_priv; + void __iomem *bar; + int bar_num; + + u8 mac_port; /* mac port */ + u8 pcie_no; /* pcie number */ + u8 pf_id; + u16 vf_id; + u16 glb_func_id; /* function id */ + + u16 gsi_qpn; /* logic qpn for gsi*/ + u16 msix_vec_base; + + struct mutex pci_status_mutex; /* protect pci_status */ + enum xsc_pci_status pci_status; + struct mutex intf_state_mutex; /* protect intf_state */ + unsigned long intf_state; + enum xsc_coredev_type coredev_type; + struct xsc_caps caps; + atomic_t num_qps; + struct xsc_cmd cmd; + struct xsc_lock reg_access_lock; + + void *counters_priv; + struct xsc_priv_device priv_device; + struct xsc_board_info *board_info; + void (*event)(struct xsc_core_device *dev, + enum xsc_dev_event event, unsigned long param); + + void (*event_handler)(void *adapter); + + struct xsc_reg_addr regs; + u32 chip_ver_h; + u32 chip_ver_m; + u32 chip_ver_l; + u32 hotfix_num; + u32 feature_flag; + u16 cmdq_ver; + u8 fw_version_major; + u8 fw_version_minor; + u16 fw_version_patch; + u32 fw_version_tweak; + u8 fw_version_extra_flag; + cpumask_var_t xps_cpumask; + + u8 reg_mr_via_cmdq; + u8 user_mode; + + struct xsc_port_ctrl port_ctrl; + + void *rtt_priv; + void *ap_priv; + void *pcie_lat; + + u8 bond_id; + struct list_head slave_node; +}; + +struct xsc_feature_flag { + u8 fpga_type:2; + u8 hps_ddr:2; + u8 onchip_ft:1; + u8 rdma_icrc:1; + u8 ma_xbar:1; + u8 anlt_fec:1; + u8 pp_tbl_dma:1; + u8 pct_exp:1; +}; + +struct xsc_interface { + struct list_head list; + int protocol; + + void *(*add)(struct xsc_core_device *dev); + void (*remove)(struct xsc_core_device *dev, void *context); + int (*attach)(struct xsc_core_device *dev, void *context); + void (*detach)(struct xsc_core_device *dev, void *context); + void (*event)(struct xsc_core_device *dev, void *context, + enum xsc_dev_event event, unsigned long param); + void *(*get_dev)(void *context); +}; + +struct xsc_device_context { + struct list_head list; + struct xsc_interface *intf; + void *context; + unsigned long state; +}; + +struct xsc_mem_entry { + struct list_head list; + char task_name[TASK_COMM_LEN]; + struct xsc_ioctl_mem_info mem_info; +}; + +struct xsc_device_product_info { + u16 vendor; + u16 device; + u16 subdevice; + char product_name[XSC_MAX_PRODUCT_NAME_LEN]; +}; + +#define XSC_DEVICE_PRODUCT_INFO(vend, dev, subdev, name) \ + .vendor = (vend), .device = (dev), \ + .subdevice = (subdev), .product_name = (name) + +static inline bool xsc_fw_is_available(struct xsc_core_device *dev) +{ + return dev->cmd.cmd_status == XSC_CMD_STATUS_NORMAL; +} + +int xsc_debugfs_init(struct xsc_core_device *dev); +void xsc_debugfs_fini(struct xsc_core_device *dev); +void xsc_register_debugfs(void); +void xsc_unregister_debugfs(void); + +bool xsc_device_registered(struct xsc_core_device *dev); +int xsc_register_device(struct xsc_core_device *dev); +void xsc_unregister_device(struct xsc_core_device *dev); +void xsc_attach_device(struct xsc_core_device *dev); +void xsc_detach_device(struct xsc_core_device *dev); +int xsc_register_interface(struct xsc_interface *intf); +void xsc_unregister_interface(struct xsc_interface *intf); +void xsc_reload_interface(struct xsc_core_device *dev, int protocol); +void xsc_reload_interfaces(struct xsc_core_device *dev, + int protocol1, int protocol2, + bool valid1, bool valid2); + +void xsc_remove_dev_by_protocol(struct xsc_core_device *dev, int protocol); +void xsc_add_dev_by_protocol(struct xsc_core_device *dev, int protocol); +void xsc_dev_list_lock(void); +void xsc_dev_list_unlock(void); +int xsc_dev_list_trylock(void); + +int xsc_cmd_write_reg_directly(struct xsc_core_device *dev, void *in, int in_size, void *out, + int out_size, int func_id); +int xsc_cmd_exec(struct xsc_core_device *dev, void *in, int in_size, + void *out, int out_size); +int xsc_create_mkey(struct xsc_core_device *xdev, void *in, void *out); +int xsc_destroy_mkey(struct xsc_core_device *xdev, void *in, void *out); +int xsc_reg_mr(struct xsc_core_device *dev, void *in, void *out); +int xsc_dereg_mr(struct xsc_core_device *dev, void *in, void *out); +int xsc_eth_reset(struct xsc_core_device *dev); +int xsc_tbm_init(struct xsc_core_device *dev); +int xsc_qos_init(struct xsc_core_device *xdev); + +bool xsc_chk_chip_ver(struct xsc_core_device *dev); + +int xsc_alloc_iae_idx(struct xsc_core_device *dev, int *iae_idx); +void xsc_release_iae_idx(struct xsc_core_device *dev, int *iae_idx); +int xsc_get_iae_idx(struct xsc_core_device *dev); + +int xsc_create_res(struct xsc_core_device *dev); +void xsc_destroy_res(struct xsc_core_device *dev); + +int xsc_counters_init(struct ib_device *ib_dev, + struct xsc_core_device *dev); +void xsc_counters_fini(struct ib_device *ib_dev, + struct xsc_core_device *dev); + +int xsc_priv_dev_init(struct ib_device *ib_dev, struct xsc_core_device *dev); +void xsc_priv_dev_fini(struct ib_device *ib_dev, struct xsc_core_device *dev); + +int xsc_priv_alloc_chrdev_region(void); +void xsc_priv_unregister_chrdev_region(void); + +int xsc_eth_sysfs_create(struct net_device *netdev, struct xsc_core_device *dev); +void xsc_eth_sysfs_remove(struct net_device *netdev, struct xsc_core_device *dev); +int xsc_rtt_sysfs_init(struct ib_device *ib_dev, struct xsc_core_device *xdev); +void xsc_rtt_sysfs_fini(struct xsc_core_device *xdev); + +void xsc_ib_sysfs_init(struct ib_device *ib_dev, struct xsc_core_device *xdev); +void xsc_ib_sysfs_fini(struct ib_device *ib_dev, struct xsc_core_device *xdev); + +int xsc_cmd_query_hca_cap(struct xsc_core_device *dev, + struct xsc_caps *caps); +int xsc_cmd_enable_hca(struct xsc_core_device *dev, u16 vf_num, u16 max_msix); +int xsc_cmd_disable_hca(struct xsc_core_device *dev, u16 vf_num); +int xsc_cmd_modify_hca(struct xsc_core_device *dev); +int xsc_query_guid(struct xsc_core_device *dev); +void xsc_free_board_info(void); + +int xsc_irq_eq_create(struct xsc_core_device *dev); +int xsc_irq_eq_destroy(struct xsc_core_device *dev); + +int xsc_sriov_init(struct xsc_core_device *dev); +void xsc_sriov_cleanup(struct xsc_core_device *dev); +int xsc_sriov_attach(struct xsc_core_device *dev); +void xsc_sriov_detach(struct xsc_core_device *dev); +int xsc_core_sriov_configure(struct pci_dev *dev, int num_vfs); +int xsc_sriov_sysfs_init(struct xsc_core_device *dev); +void xsc_sriov_sysfs_cleanup(struct xsc_core_device *dev); +int xsc_create_vfs_sysfs(struct xsc_core_device *dev, int num_vfs); +void xsc_destroy_vfs_sysfs(struct xsc_core_device *dev, int num_vfs); +int xsc_create_vf_group_sysfs(struct xsc_core_device *dev, + u32 group_id, struct kobject *group_kobj); +void xsc_destroy_vf_group_sysfs(struct xsc_core_device *dev, + struct kobject *group_kobj); +u32 xsc_eth_pcie_read32_by_mac_port(struct xsc_core_device *xdev, u32 mac_port, + u32 eth_ip_inter_addr); +void xsc_eth_pcie_write32_by_mac_port(struct xsc_core_device *xdev, u32 mac_port, + u32 eth_ip_inter_addr, u32 val); +struct cpumask *xsc_comp_irq_get_affinity_mask(struct xsc_core_device *dev, int vector); +void mask_cpu_by_node(int node, struct cpumask *dstp); +int xsc_get_link_speed(struct xsc_core_device *dev); +int xsc_chip_type(struct xsc_core_device *dev); +int xsc_eth_restore_nic_hca(struct xsc_core_device *dev); + +#define XSC_ESWITCH_MANAGER(dev) ((dev)->caps.eswitch_manager) + +static inline bool xsc_sriov_is_enabled(struct xsc_core_device *dev) +{ + return pci_num_vf(dev->pdev) ? true : false; +} + +static inline u16 xsc_core_max_vfs(const struct xsc_core_device *dev) +{ + return dev->priv.sriov.max_vfs; +} + +static inline int xsc_core_vfs_num(const struct xsc_core_device *dev) +{ + return dev->priv.sriov.num_vfs; +} + +static inline bool xsc_core_is_pf(const struct xsc_core_device *dev) +{ + return dev->coredev_type == XSC_COREDEV_PF; +} + +static inline bool xsc_core_is_sf(const struct xsc_core_device *dev) +{ + return dev->coredev_type == XSC_COREDEV_SF; +} + +static inline bool xsc_core_is_ecpf(struct xsc_core_device *dev) +{ + return dev->caps.embedded_cpu; +} + +#define XSC_ESWITCH_MANAGER(dev) ((dev)->caps.eswitch_manager) +#define ESW_ALLOWED(esw) ((esw) && XSC_ESWITCH_MANAGER((esw)->dev)) + +static inline bool +xsc_core_is_ecpf_esw_manager(const struct xsc_core_device *dev) +{ + return dev->caps.embedded_cpu && dev->caps.eswitch_manager; +} + +static inline bool +xsc_ecpf_vport_exists(const struct xsc_core_device *dev) +{ + return xsc_core_is_pf(dev) && dev->caps.ecpf_vport_exists; +} + +static inline bool +xsc_core_is_vport_manager(const struct xsc_core_device *dev) +{ + return dev->caps.vport_group_manager && xsc_core_is_pf(dev); +} + +static inline bool xsc_rl_is_supported(struct xsc_core_device *dev) +{ + return false; +} + +/* define in andes */ +#define HIF_CPM_IDA_DATA_MEM_STRIDE 0x40 + +#define CPM_IAE_CMD_READ 0 +#define CPM_IAE_CMD_WRITE 1 + +#define CPM_IAE_ADDR_REG_STRIDE HIF_CPM_IDA_ADDR_REG_STRIDE + +#define CPM_IAE_DATA_MEM_STRIDE HIF_CPM_IDA_DATA_MEM_STRIDE + +#define CPM_IAE_DATA_MEM_MAX_LEN 16 + +struct iae_cmd { + union { + struct { + u32 iae_idx:HIF_CPM_IDA_CMD_REG_IDA_IDX_WIDTH; + u32 iae_len:HIF_CPM_IDA_CMD_REG_IDA_LEN_WIDTH; + u32 iae_r0w1:HIF_CPM_IDA_CMD_REG_IDA_R0W1_WIDTH; + }; + unsigned int raw_data; + }; +}; + +static inline void acquire_ia_lock(struct xsc_core_device *xdev, int *iae_idx) +{ + int lock_val; + int lock_vld; + + lock_val = readl(REG_ADDR(xdev, xdev->regs.cpm_get_lock)); + lock_vld = lock_val >> HIF_CPM_LOCK_GET_REG_LOCK_VLD_SHIFT; + if (lock_vld) + *iae_idx = lock_val & HIF_CPM_LOCK_GET_REG_LOCK_IDX_MASK; + else + *iae_idx = -1; +} + +#define ACQUIRE_IA_LOCK(bp, iae_idx) \ + do { \ + int idx; \ + acquire_ia_lock(bp, &idx); \ + iae_idx = idx; \ + } while (0) + +static inline void release_ia_lock(struct xsc_core_device *xdev, int lock_idx) +{ + writel(lock_idx, REG_ADDR(xdev, xdev->regs.cpm_put_lock)); +} + +#define RELEASE_IA_LOCK(bp, iae_idx) release_ia_lock(bp, iae_idx) + +static inline void ia_write_data(struct xsc_core_device *xdev, u32 *ptr, int n, int iae_idx) +{ + int i; + int offset = xdev->regs.cpm_data_mem + (iae_idx) * CPM_IAE_DATA_MEM_STRIDE; + + for (i = 0; i < n; i++) { + writel(*(ptr++), REG_ADDR(xdev, offset)); + offset += sizeof(*ptr); + } +} + +static inline void ia_read_data(struct xsc_core_device *xdev, u32 *ptr, int n, int iae_idx) +{ + int i; + int offset = xdev->regs.cpm_data_mem + (iae_idx) * CPM_IAE_DATA_MEM_STRIDE; + u32 *pptr = ptr; + + for (i = 0; i < n; i++) { + *(pptr) = readl(REG_ADDR(xdev, offset)); + offset += sizeof(*ptr); + pptr = pptr + 1; + } +} + +static inline void ia_write_reg_addr(struct xsc_core_device *xdev, u32 reg, int iae_idx) +{ + int offset = xdev->regs.cpm_addr + (iae_idx) * CPM_IAE_ADDR_REG_STRIDE; + + writel(reg, REG_ADDR(xdev, offset)); +} + +static inline void initiate_ia_cmd(struct xsc_core_device *xdev, int iae_idx, int length, int r0w1) +{ + struct iae_cmd cmd; + int addr = xdev->regs.cpm_cmd; + + cmd.iae_r0w1 = r0w1; + cmd.iae_len = length - 1; + cmd.iae_idx = iae_idx; + writel(cmd.raw_data, REG_ADDR(xdev, addr)); +} + +static inline void initiate_ia_write_cmd(struct xsc_core_device *xdev, int iae_idx, int length) +{ + initiate_ia_cmd(xdev, iae_idx, length, CPM_IAE_CMD_WRITE); +} + +static inline void initiate_ia_read_cmd(struct xsc_core_device *xdev, int iae_idx, int length) +{ + initiate_ia_cmd(xdev, iae_idx, length, CPM_IAE_CMD_READ); +} + +static inline void wait_for_complete(struct xsc_core_device *xdev, int iae_idx) +{ + while ((readl(REG_ADDR(xdev, xdev->regs.cpm_busy)) & (1 << iae_idx))) + ; +} + +static inline void ia_write_reg_mr(struct xsc_core_device *xdev, u32 reg, + u32 *ptr, int n, int idx) +{ + ia_write_data(xdev, ptr, n, idx); + ia_write_reg_addr(xdev, reg, idx); + initiate_ia_write_cmd(xdev, idx, n); +} + +#define IA_WRITE_REG_MR(bp, reg, ptr, n, idx) ia_write_reg_mr(bp, reg, ptr, n, idx) + +static inline void ia_write(struct xsc_core_device *xdev, u32 reg, u32 *ptr, int n) +{ + int iae_idx; + + acquire_ia_lock(xdev, &iae_idx); + ia_write_data(xdev, ptr, n, iae_idx); + ia_write_reg_addr(xdev, reg, iae_idx); + initiate_ia_write_cmd(xdev, iae_idx, n); + release_ia_lock(xdev, iae_idx); +} + +#define IA_WRITE(bp, reg, ptr, n) ia_write(bp, reg, ptr, n) + +static inline void ia_read(struct xsc_core_device *xdev, u32 reg, u32 *ptr, int n) +{ + int iae_idx; + + acquire_ia_lock(xdev, &iae_idx); + ia_write_reg_addr(xdev, reg, iae_idx); + initiate_ia_read_cmd(xdev, iae_idx, n); + wait_for_complete(xdev, iae_idx); + ia_read_data(xdev, ptr, n, iae_idx); + release_ia_lock(xdev, iae_idx); +} + +#define IA_READ(bp, reg, ptr, n) ia_read(bp, reg, ptr, n) + +static inline u32 reg_read32(struct xsc_core_device *dev, u32 offset) +{ + u32 val = 0; + + if (xsc_core_is_pf(dev)) + val = readl(REG_ADDR(dev, offset)); + else + IA_READ(dev, offset, &val, 1); + + return val; +} + +static inline void reg_write32(struct xsc_core_device *dev, u32 offset, u32 val) +{ + u32 *ptr = &val; + + if (xsc_core_is_pf(dev)) + writel(val, REG_ADDR(dev, offset)); + else + IA_WRITE(dev, offset, ptr, 1); +} + +#define REG_RD32(dev, offset) reg_read32(dev, offset) +#define REG_WR32(dev, offset, val) reg_write32(dev, offset, val) + +static inline unsigned long bdf_to_key(unsigned int domain, unsigned int bus, unsigned int devfn) +{ + return ((unsigned long)domain << 32) | ((bus & 0xff) << 16) | (devfn & 0xff); +} + +static inline void +funcid_to_pf_vf_index(struct xsc_caps *caps, u16 func_id, u8 *pf_no, u8 *pf_id, u16 *vf_id) +{ + if (func_id >= caps->pf0_vf_funcid_base && func_id <= caps->pf0_vf_funcid_top) { + *pf_id = 0; + *pf_no = caps->pcie_host; + *vf_id = func_id - caps->pf0_vf_funcid_base; + } else if (func_id >= caps->pf1_vf_funcid_base && func_id <= caps->pf1_vf_funcid_top) { + *pf_id = 1; + *pf_no = caps->pcie_host; + *vf_id = func_id - caps->pf1_vf_funcid_base; + } else if (func_id >= caps->pcie0_pf_funcid_base && func_id <= caps->pcie0_pf_funcid_top) { + *pf_id = func_id - caps->pcie0_pf_funcid_base; + *pf_no = 0; + *vf_id = -1; + } else { + *pf_id = func_id - caps->pcie1_pf_funcid_base; + *pf_no = 1; + *vf_id = -1; + } +} + +static inline bool +is_support_rdma(struct xsc_core_device *dev) +{ + if (!dev) + return false; + + if (dev->caps.hw_feature_flag & XSC_HW_RDMA_SUPPORT) + return true; + + return false; +} + +static inline bool is_support_rdma_cm(struct xsc_core_device *dev) +{ + return dev->caps.hw_feature_flag & XSC_HW_RDMA_CM_SUPPORT; +} + +static inline bool +is_support_pfc_prio_statistic(struct xsc_core_device *dev) +{ + if (!dev) + return false; + + if (dev->caps.hw_feature_flag & XSC_HW_PFC_PRIO_STATISTIC_SUPPORT) + return true; + + return false; +} + +static inline bool +is_support_pfc_stall_stats(struct xsc_core_device *dev) +{ + if (!dev) + return false; + + if (dev->caps.hw_feature_flag & XSC_HW_PFC_STALL_STATS_SUPPORT) + return true; + + return false; +} + +static inline bool is_support_hw_pf_stats(struct xsc_core_device *dev) +{ + return xsc_core_is_pf(dev); +} + +static inline void xsc_set_user_mode(struct xsc_core_device *dev, u8 mode) +{ + dev->user_mode = mode; +} + +static inline u8 xsc_get_user_mode(struct xsc_core_device *dev) +{ + return dev->user_mode; +} + +void xsc_pci_exit(void); + +void xsc_remove_eth_driver(void); + +void xsc_remove_rdma_driver(void); + +void xsc_set_exit_flag(void); +bool xsc_get_exit_flag(void); +bool exist_incomplete_qp_flush(void); +#endif /* XSC_CORE_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_eswitch.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_eswitch.h new file mode 100644 index 000000000000..9da4396d66ee --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_eswitch.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_ESWITCH_H +#define XSC_ESWITCH_H + +enum { + XSC_ESWITCH_NONE, + XSC_ESWITCH_LEGACY, + XSC_ESWITCH_OFFLOADS +}; + +enum { + REP_ETH, + REP_IB, + NUM_REP_TYPES, +}; + +enum { + REP_UNREGISTERED, + REP_REGISTERED, + REP_LOADED, +}; + +enum xsc_switchdev_event { + XSC_SWITCHDEV_EVENT_PAIR, + XSC_SWITCHDEV_EVENT_UNPAIR, +}; + +enum { + SET_VLAN_STRIP = BIT(0), + SET_VLAN_INSERT = BIT(1), + CLR_VLAN_STRIP = BIT(2), + CLR_VLAN_INSERT = BIT(3), +}; + +#endif /* XSC_ESWITCH_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_fs.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_fs.h new file mode 100644 index 000000000000..97cbded4a2f2 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_fs.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_FS_H +#define XSC_FS_H + +#include +#include +#include + +enum xsc_list_type { + XSC_NVPRT_LIST_TYPE_UC = 0x0, + XSC_NVPRT_LIST_TYPE_MC = 0x1, + XSC_NVPRT_LIST_TYPE_VLAN = 0x2, + XSC_NVPRT_LIST_TYPE_VLAN_OFFLOAD = 0x03, +}; + +enum xsc_vlan_rule_type { + XSC_VLAN_RULE_TYPE_UNTAGGED, + XSC_VLAN_RULE_TYPE_ANY_CTAG_VID, + XSC_VLAN_RULE_TYPE_ANY_STAG_VID, + XSC_VLAN_RULE_TYPE_MATCH_CTAG_VID, + XSC_VLAN_RULE_TYPE_MATCH_STAG_VID, +}; + +struct xsc_vlan_table { + DECLARE_BITMAP(active_cvlans, VLAN_N_VID); + DECLARE_BITMAP(active_svlans, VLAN_N_VID); + DECLARE_BITMAP(active_outer_cvlans, VLAN_N_VID); + DECLARE_BITMAP(active_outer_svlans, VLAN_N_VID); + u8 cvlan_filter_disabled; +}; + +struct xsc_l2_table { + struct hlist_head netdev_uc[XSC_L2_ADDR_HASH_SIZE]; + struct hlist_head netdev_mc[XSC_L2_ADDR_HASH_SIZE]; + u8 broadcast_enabled; + u8 allmulti_enabled; + u8 promisc_enabled; +}; + +struct xsc_flow_steering { + struct xsc_vlan_table vlan; + struct xsc_l2_table l2; +}; + +int xsc_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, + u16 vid); +int xsc_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, + u16 vid); +void xsc_set_rx_mode_work(struct work_struct *work); +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_hsi.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_hsi.h new file mode 100644 index 000000000000..d1fa8b207607 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_hsi.h @@ -0,0 +1,373 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_HSI_H +#define XSC_HSI_H + +#include + +#include +#include +#include "common/xsc_macro.h" + +#ifdef MSIX_SUPPORT +#else +#define NEED_CREATE_RX_THREAD +#endif + +#define PAGE_SHIFT_4K 12 +#define PAGE_SIZE_4K (_AC(1, UL) << PAGE_SHIFT_4K) +#define PAGE_MASK_4K (~(PAGE_SIZE_4K - 1)) + +#ifndef EQ_NUM_MAX +#define EQ_NUM_MAX 1024 +#endif +#ifndef EQ_SIZE_MAX +#define EQ_SIZE_MAX 1024 +#endif + +#define XSC_RSS_INDIR_TBL_S 256 +#define XSC_MAX_TSO_PAYLOAD 0x10000/*64kb*/ + +#define MAX_BOARD_NUM 32 + +#define DMA_LO_LE(x) __cpu_to_le32(lower_32_bits(x)) +#define DMA_HI_LE(x) __cpu_to_le32(upper_32_bits(x)) +#define DMA_REGPAIR_LE(x, val) do { \ + (x).hi = DMA_HI_LE((val)); \ + (x).lo = DMA_LO_LE((val)); \ + } while (0) + +#define WR_LE_16(x, val) (x = __cpu_to_le16(val)) +#define WR_LE_32(x, val) (x = __cpu_to_le32(val)) +#define WR_LE_64(x, val) (x = __cpu_to_le64(val)) +#define WR_LE_R64(x, val) (DMA_REGPAIR_LE(x, val)) +#define WR_BE_32(x, val) (x = __cpu_to_be32(val)) + +#define RD_LE_16(x) __le16_to_cpu(x) +#define RD_LE_32(x) __le32_to_cpu(x) +#define RD_BE_32(x) __be32_to_cpu(x) + +#define WR_REG(addr, val) mmio_write64_le(addr, val) +#define RD_REG(addr) mmio_read64_le(addr) + +#define XSC_MPT_MAP_EN 0 + +/* FIXME: 32-byte alignment for SW descriptors for Amber for now */ +#define XSC_DESC_ALIGNMENT 32 + +/* each ds holds one fragment in skb */ +#define XSC_MAX_RX_FRAGS 4 +#define XSC_RX_FRAG_SZ_ORDER 0 +#define XSC_RX_FRAG_SZ (PAGE_SIZE << XSC_RX_FRAG_SZ_ORDER) +#define DEFAULT_FRAG_SIZE (2048) + +/* message opcode */ +enum { + XSC_MSG_OPCODE_SEND = 0, + XSC_MSG_OPCODE_RDMA_WRITE = 1, + XSC_MSG_OPCODE_RDMA_READ = 2, + XSC_MSG_OPCODE_MAD = 3, + XSC_MSG_OPCODE_RDMA_ACK = 4, + XSC_MSG_OPCODE_RDMA_ACK_READ = 5, + XSC_MSG_OPCODE_RDMA_CNP = 6, + XSC_MSG_OPCODE_RAW = 7, + XSC_MSG_OPCODE_VIRTIO_NET = 8, + XSC_MSG_OPCODE_VIRTIO_BLK = 9, + XSC_MSG_OPCODE_RAW_TPE = 10, + XSC_MSG_OPCODE_INIT_QP_REQ = 11, + XSC_MSG_OPCODE_INIT_QP_RSP = 12, + XSC_MSG_OPCODE_INIT_PATH_REQ = 13, + XSC_MSG_OPCODE_INIT_PATH_RSP = 14, +}; + +/* TODO: sw cqe opcode*/ +enum { + XSC_OPCODE_RDMA_REQ_SEND = 0, + XSC_OPCODE_RDMA_REQ_SEND_IMMDT = 1, + XSC_OPCODE_RDMA_RSP_RECV = 2, + XSC_OPCODE_RDMA_RSP_RECV_IMMDT = 3, + XSC_OPCODE_RDMA_REQ_WRITE = 4, + XSC_OPCODE_RDMA_REQ_WRITE_IMMDT = 5, + XSC_OPCODE_RDMA_RSP_WRITE_IMMDT = 6, + XSC_OPCODE_RDMA_REQ_READ = 7, + XSC_OPCODE_RDMA_REQ_ERROR = 8, + XSC_OPCODE_RDMA_RSP_ERROR = 9, + XSC_OPCODE_RDMA_CQE_ERROR = 10, + XSC_OPCODE_RDMA_MAD_REQ_SEND, + XSC_OPCODE_RDMA_MAD_RSP_RECV, +}; + +enum { + XSC_REQ = 0, + XSC_RSP = 1, +}; + +enum { + XSC_WITHOUT_IMMDT = 0, + XSC_WITH_IMMDT = 1, +}; + +enum { + XSC_ERR_CODE_NAK_RETRY = 0x40, + XSC_ERR_CODE_NAK_OPCODE = 0x41, + XSC_ERR_CODE_NAK_MR = 0x42, + XSC_ERR_CODE_NAK_OPERATION = 0x43, + XSC_ERR_CODE_NAK_RNR = 0x44, + XSC_ERR_CODE_LOCAL_MR = 0x45, + XSC_ERR_CODE_LOCAL_LEN = 0x46, + XSC_ERR_CODE_LOCAL_OPCODE = 0x47, + XSC_ERR_CODE_CQ_OVER_FLOW = 0x48, + XSC_ERR_CODE_STRG_ACC_GEN_CQE = 0x4c, + XSC_ERR_CODE_CQE_ACC = 0x4d, + XSC_ERR_CODE_FLUSH = 0x4e, + XSC_ERR_CODE_MALF_WQE_HOST = 0x50, + XSC_ERR_CODE_MALF_WQE_INFO = 0x51, + XSC_ERR_CODE_MR_NON_NAK = 0x52, + XSC_ERR_CODE_OPCODE_GEN_CQE = 0x61, + XSC_ERR_CODE_MANY_READ = 0x62, + XSC_ERR_CODE_LEN_GEN_CQE = 0x63, + XSC_ERR_CODE_MR = 0x65, + XSC_ERR_CODE_MR_GEN_CQE = 0x66, + XSC_ERR_CODE_OPERATION = 0x67, + XSC_ERR_CODE_MALF_WQE_INFO_GEN_NAK = 0x68, +}; + +/* QP type */ +enum { + XSC_QUEUE_TYPE_RDMA_RC = 0, + XSC_QUEUE_TYPE_RDMA_MAD = 1, + XSC_QUEUE_TYPE_RAW = 2, + XSC_QUEUE_TYPE_VIRTIO_NET = 3, + XSC_QUEUE_TYPE_VIRTIO_BLK = 4, + XSC_QUEUE_TYPE_RAW_TPE = 5, + XSC_QUEUE_TYPE_RAW_TSO = 6, + XSC_QUEUE_TYPE_RAW_TX = 7, + XSC_QUEUE_TYPE_INVALID = 0xFF, +}; + +/* CQ type */ +enum { + XSC_CQ_TYPE_NORMAL = 0, + XSC_CQ_TYPE_VIRTIO = 1, +}; + +enum xsc_qp_state { + XSC_QP_STATE_RST = 0, + XSC_QP_STATE_INIT = 1, + XSC_QP_STATE_RTR = 2, + XSC_QP_STATE_RTS = 3, + XSC_QP_STATE_SQER = 4, + XSC_QP_STATE_SQD = 5, + XSC_QP_STATE_ERR = 6, + XSC_QP_STATE_SQ_DRAINING = 7, + XSC_QP_STATE_SUSPENDED = 9, + XSC_QP_NUM_STATE +}; + +enum { + XSC_SEND_SEG_MAX = 32, + XSC_BASE_WQE_SHIFT = 4, + XSC_SEND_SEG_NUM = 4, + XSC_SEND_WQE_SHIFT = 6, + XSC_CTRL_SEG_NUM = 1, + XSC_RADDR_SEG_NUM = 1, +}; + +enum { + XSC_RECV_SEG_MAX = 4, + XSC_RECV_SEG_NUM = 1, + XSC_RECV_WQE_SHIFT = 4, +}; + +enum { + XSC_INLINE_SIZE_MAX = 15, +}; + +/* Descriptors that are allocated by SW and accessed by HW, 32-byte aligned + * this is to keep descriptor structures packed + */ +struct regpair { + __le32 lo; + __le32 hi; +}; + +struct xsc_cqe { + union { + u8 msg_opcode; + struct { + u8 error_code:7; + u8 is_error:1; + }; + }; + __le32 qp_id:15; + u8 rsv1:1; + u8 se:1; + u8 has_pph:1; + u8 type:1; + u8 with_immdt:1; + u8 csum_err:4; + __le32 imm_data; + __le32 msg_len; + __le32 vni; + __le64 ts:48; + __le16 wqe_id; + __le16 rsv[3]; + __le16 rsv2:15; + u8 owner:1; +}; + +/* CQ doorbell */ +union xsc_cq_doorbell { + struct{ + u32 cq_next_cid:16; + u32 cq_id:15; + u32 arm:1; + }; + u32 val; +}; + +/* EQE TBD */ +struct xsc_eqe { + u8 type; + u8 sub_type; + __le16 queue_id:15; + u8 rsv1:1; + u8 err_code; + u8 rsvd[2]; + u8 rsv2:7; + u8 owner:1; +}; + +/* EQ doorbell */ +union xsc_eq_doorbell { + struct{ + u32 eq_next_cid : 11; + u32 eq_id : 11; + u32 arm : 1; + }; + u32 val; +}; + +/*for beryl tcam table .begin*/ +#define XSC_TBM_PCT_DW_SIZE_MAX 20 +#define XSC_TCAM_REG_ADDR_STRIDE 4 + +enum xsc_tbm_tcam_type { + XSC_TBM_TCAM_PCT = 0, + XSC_TBM_TCAM_PRS_STAGE0, + XSC_TBM_TCAM_PRS_STAGE1, + XSC_TBM_TCAM_PRS_STAGE2, +}; + +enum xsc_tbm_tcam_oper { + XSC_TCAM_OP_X_WRITE = 0, + XSC_TCAM_OP_Y_WRITE, + XSC_TCAM_OP_ACTION_WRITE, + XSC_TCAM_OP_X_READ, + XSC_TCAM_OP_Y_READ, + XSC_TCAM_OP_ACTION_READ, + XSC_TCAM_OP_TCAM_FLUSH, + XSC_TCAM_OP_ACTION_FLUSH, + XSC_TCAM_OP_CPU_SEARCH, + XSC_TCAM_OP_LONG_X_WRT, + XSC_TCAM_OP_LONG_Y_WRT +}; + +enum xsc_tbm_prs_stage_encode { + XSC_PRS_STAGE0_HDR_TYPE_NONE = 0x00, + XSC_PRS_STAGE0_HDR_TYPE_ETH0 = 0x01, + XSC_PRS_STAGE1_HDR_TYPE_NONE = 0x10, + XSC_PRS_STAGE1_HDR_TYPE_RSV = 0x11, + XSC_PRS_STAGE1_HDR_TYPE_IPV4 = 0x12, + XSC_PRS_STAGE1_HDR_TYPE_IPV6 = 0x13, + XSC_PRS_STAGE2_HDR_TYPE_NONE = 0x20, + XSC_PRS_STAGE2_HDR_TYPE_TCP = 0x21, + XSC_PRS_STAGE2_HDR_TYPE_UDP = 0x22, + XSC_PRS_STAGE2_HDR_TYPE_GRE = 0x23, + XSC_PRS_STAGE2_HDR_TYPE_RSV = 0x24, + XSC_PRS_STAGE2_HDR_TYPE_IFA_TCP = 0x25, + XSC_PRS_STAGE2_HDR_TYPE_IFA_UDP = 0x26, + XSC_PRS_STAGE2_HDR_TYPE_IFA_GRE = 0x27, + XSC_PRS_STAGE6_HDR_TYPE_ICMP = 0x63, + XSC_PRS_STAGEX_HDR_TYPE_PAYLOAD = 0xa0, + XSC_PRS_STAGEX_HDR_TYPE_BTH = 0xa1, +}; + +enum xsc_tbm_prs_eth_hdr_type_encode { + ETH_HDR_TYPE_MAC0 = 0x0, + ETH_HDR_TYPE_MAC0_VLANA = 0x2, + ETH_HDR_TYPE_MAC0_VLANA_VLANB = 0x3, +}; + +enum xsc_tbm_pct_pkttype { + XSC_PCT_RDMA_NORMAL = 0x0, + XSC_PCT_RDMA_CNP, + XSC_PCT_RDMA_MAD, + XSC_PCT_RAW, + XSC_PCT_RAW_TPE, + XSC_PCT_VIRTIO_NET_TO_HOST, + XSC_PCT_SOC_WITH_PPH, +}; + +enum xsc_tbm_pct_inport { + XSC_PCT_PORT_NIF0 = 0x0, + XSC_PCT_PORT_NIF1, + XSC_PCT_PORT_PCIE0_PF0, + XSC_PCT_PORT_PCIE0_PF1, + XSC_PCT_PORT_PCIE1_PF0, +}; + +/*for beryl tcam table .end*/ + +/* Size of WQE */ +#define XSC_SEND_WQE_SIZE BIT(XSC_SEND_WQE_SHIFT) +#define XSC_RECV_WQE_SIZE BIT(XSC_RECV_WQE_SHIFT) + +union xsc_db_data { + struct { + __le32 sq_next_pid:16; + __le32 sqn:15; + __le32:1; + }; + struct { + __le32 rq_next_pid:13; + __le32 rqn:15; + __le32:4; + }; + struct { + __le32 cq_next_cid:16; + __le32 cqn:15; + __le32 solicited:1; + + }; + __le32 raw_data; +}; + +#define XSC_BROADCASTID_MAX 2 +#define XSC_TBM_BOMT_DESTINFO_SHIFT (XSC_BROADCASTID_MAX / 2) + +enum { + XSC_EQ_VEC_ASYNC = 0, + XSC_VEC_CMD = 1, + XSC_VEC_CMD_EVENT = 2, + XSC_DMA_READ_DONE_VEC = 3, + XSC_EQ_VEC_COMP_BASE, +}; + +struct rxe_bth { + u8 opcode; + u8 flags; + __be16 pkey; + __be32 qpn; + __be32 apsn; +}; + +struct rxe_deth { + __be32 qkey; + __be32 sqp; +}; + +#endif /* XSC_HSI_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_ioctl.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_ioctl.h new file mode 100644 index 000000000000..e2355cf91a02 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_ioctl.h @@ -0,0 +1,317 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_IOCTL_H +#define XSC_IOCTL_H + +#include +#include + +/* Documentation/ioctl/ioctl-number.txt */ +#define XSC_IOCTL_MAGIC (0x1b) /* TBD */ +#define XSC_IOCTL_CMDQ \ + _IOWR(XSC_IOCTL_MAGIC, 1, struct xsc_ioctl_hdr) +#define XSC_IOCTL_DRV_GET \ + _IOR(XSC_IOCTL_MAGIC, 2, struct xsc_ioctl_hdr) +#define XSC_IOCTL_DRV_SET \ + _IOWR(XSC_IOCTL_MAGIC, 3, struct xsc_ioctl_hdr) +#define XSC_IOCTL_MEM \ + _IOWR(XSC_IOCTL_MAGIC, 4, struct xsc_ioctl_hdr) +#define XSC_IOCTL_CMDQ_RAW \ + _IOWR(XSC_IOCTL_MAGIC, 5, struct xsc_ioctl_hdr) +#define XSC_IOCTL_USER_MODE \ + _IOWR(XSC_IOCTL_MAGIC, 8, struct xsc_ioctl_hdr) + +#define XSC_IOCTL_CHECK_FILED 0x01234567 +enum { + XSC_IOCTL_OP_GET_LOCAL, + XSC_IOCTL_OP_GET_VF_INFO, + XSC_IOCTL_OP_GET_CONTEXT, + XSC_IOCTL_OP_GET_INFO_BY_BDF, + XSC_IOCTL_OP_GET_MAX +}; + +enum { + XSC_IOCTL_GET_PHY_INFO = 0x100, + XSC_IOCTL_GET_FORCE_PCP = 0x101, + XSC_IOCTL_GET_FORCE_DSCP = 0x102, + XSC_IOCTL_GET_CMA_PCP = 0x103, + XSC_IOCTL_GET_CMA_DSCP = 0x104, + XSC_IOCTL_GET_CONTEXT = 0x105, + XSC_IOCTL_GAT_MAX +}; + +enum { + XSC_IOCTL_SET_QP_STATUS = 0x200, + XSC_IOCTL_SET_FORCE_PCP = 0x201, + XSC_IOCTL_SET_FORCE_DSCP = 0x202, + XSC_IOCTL_SET_CMA_PCP = 0x203, + XSC_IOCTL_SET_CMA_DSCP = 0x204, + XSC_IOCTL_SET_MAX +}; + +enum { + XSC_IOCTL_MEM_ALLOC = 0x300, + XSC_IOCTL_MEM_FREE, + XSC_IOCTL_MEM_MAX +}; + +enum { + XSC_IOCTL_GET_VECTOR_MATRIX = 0x400, + XSC_IOCTL_SET_LOG_LEVEL = 0x401, + XSC_IOCTL_SET_CMD_VERBOSE = 0x402, + XSC_IOCTL_DRIVER_MAX +}; + +enum { + XSC_IOCTL_OPCODE_ENABLE_USER_MODE = 0x600, +}; + +enum xsc_flow_tbl_id { + XSC_FLOW_TBL_IPAT, //IN_PORT_ATTR + XSC_FLOW_TBL_IPVLANMT, //IN_PORT_VLAN_MEMBER + XSC_FLOW_TBL_IN_VLAN_M, //IN_VLAN_MAPPING + XSC_FLOW_TBL_HOST_VLAN_M, //HOST_VLAN_MAPPING + XSC_FLOW_TBL_PCT_V4, //PACKET_CLASSIFIER_V4 + XSC_FLOW_TBL_PCT_V6, //PACKET_CLASSIFIER_V6 + XSC_FLOW_TBL_WCT_KP, //WCT_KEY_PROFILE + XSC_FLOW_TBL_WCT, //WILDCARD_TBL + XSC_FLOW_TBL_FKP, //FLOW_KEY_PROFILE + XSC_FLOW_TBL_EM, //EXACT_MATCH + XSC_FLOW_TBL_FAT, //FLOW_ACTION + XSC_FLOW_TBL_TNL_ECP, //TUNNEL_ENCAP + XSC_FLOW_TBL_ERP_HDR, //ERSPAN_HDR_INFO + XSC_FLOW_TBL_MIR_IDX, //MIRROR_INDEX + XSC_FLOW_TBL_MIR, //MIRROR_TBL + XSC_FLOW_TBL_MIR_HDR, //ENCAP_MIRROR_HDR + XSC_FLOW_TBL_VER, //VERSION_TBL + XSC_FLOW_TBL_LCMT, //LCMT_TBL + XSC_FLOW_TBL_CT, //CONN_TRACK + XSC_FLOW_TBL_EPAT, //EG_PORT_ATTR + XSC_FLOW_TBL_OPVLANMT, //OUT_PORT_VLAN_MEMBER + XSC_FLOW_TBL_RSS_HASH, //RSS_HASH + XSC_FLOW_TBL_MDF_MAC, //MODIFY_MAC + XSC_FLOW_TBL_MDF_IP, //MODIFY_IP + XSC_FLOW_TBL_MDF_TPID, //MODIFY_TPID + XSC_FLOW_TBL_ECP_HDR, //ENCAP_HDR + XSC_FLOW_TBL_ECP_MAC, //ENCAP_MAC + XSC_FLOW_TBL_ECP_IP, //ENCAP_IP + XSC_FLOW_TBL_ECP_TPID, //ENCAP_TPID + XSC_FLOW_TBL_ECP_TP_TNL, //ENCAP_TP_TUNNEL + XSC_FLOW_TBL_ECP_DPORT, //ENCAP_DPORT + XSC_FLOW_TBL_VFSO, //VF_START_OFST + XSC_FLOW_TBL_IACL, //INGRESS_ACL + XSC_FLOW_TBL_IACL_CNT, //INGRESS_ACL_COUNTER + XSC_FLOW_TBL_EACL, //EGRESS_ACL + XSC_FLOW_TBL_EACL_CNT, //EGRESS_ACL_COUNTER + XSC_FLOW_TBL_EM_EXT, //EXACT_MATCH_EXT + XSC_FLOW_TBL_EM_EXT_2M_HASH_ADR, //EM_EXT_2M_HASH_ADDR + XSC_FLOW_TBL_EM_EXT_1G_HASH_ADR, //EM_EXT_1G_HASH_ADDR + XSC_FLOW_TBL_EM_EXT_2M_KEY_ADR, //EM_EXT_2M_KEY_ADDR + XSC_FLOW_TBL_EM_EXT_1G_KEY_ADR, //EM_EXT_1G_KEY_ADDR + XSC_FLOW_TBL_PG_QP_SET_ID, //PG_QP_SET_ID + XSC_FLOW_DIR_REGISTER, //DIR_REGISTER + XSC_FLOW_INDIR_REGISTER, //INDIR_REGISTER + XSC_FLOW_TBL_BM_PCT_V4, //BIM MATCH PACKET_CLASSIFIER_V4 + XSC_FLOW_TBL_BM_PCT_V6, //BIM MATCH PACKET_CLASSIFIER_V6 + XSC_FLOW_TBL_BM_WCT, //BIM MATCH WILDCARD_TBL + XSC_FLOW_TBL_BM_IACL, //BIM MATCH INGRESS_ACL + XSC_FLOW_TBL_BMT, //BROADCAST MEMBER + XSC_FLOW_TBL_BOMT, //BROADCAST OUTPUT + XSC_FLOW_TBL_PST, //pst + XSC_FLOW_DMA_WR, //DMA WRITE + XSC_FLOW_DMA_RD, //DMA READ + XSC_FLOW_PARSER_TBL, //PARSER_TBL + XSC_FLOW_UDF_AWARE_TBL, //UDF_AWARE_TBL + XSC_FLOW_UDF_UNAWARE_TBL, //UDF_UNAWARE_TBL + XSC_FLOW_MTR_CTRL_TBL, //MTR_CTRL_TBL + XSC_FLOW_MTR_FLOW_PD, //MTR_FLOW_PD + XSC_FLOW_MTR_VPORT_PD, //MTR_VPORT_PD + XSC_FLOW_MTR_VPG_PD, //MTR_VPG_PD + XSC_FLOW_MTR_FLOW_SCAN, //MTR_FLOW_SCAN + XSC_FLOW_MTR_VPORT_SCAN, //MTR_VPORT_SCAN + XSC_FLOW_MTR_VPG_SCAN, //MTR_VPG_SCAN + XSC_FLOW_MTR_MAPPING, //MTR_MAPPING + XSC_FLOW_PRG_ACT_IDX, //PRG_ACT_INDEX + XSC_FLOW_PRG_ACT0, //PRG_ACT0 + XSC_FLOW_PRG_ACT1, //PRG_ACT1 + XSC_FLOW_PRG_ACT2, //PRG_ACT2 + XSC_FLOW_NIF_PRI_CNT, //NIF_PRI_CNT + XSC_FLOW_PRS2CLSF_SRC_PORT_CNT, //PRS2CLSF_SRC_PORT_CNT + XSC_FLOW_QUEUE_RX_CNT, //QUEUE_TX_CNT + XSC_FLOW_QUEUE_TX_CNT, //QUEUE_TX_CNT + XSC_FLOW_MAC_LAG_PORT_SEL, //MAC_LAG_PORT_SEL + XSC_FLOW_EXT_CT_CLR, //EXT_CT_CLR + XSC_FLOW_IP_TBL_CFG, //IP_TBL_CFG + XSC_FLOW_RSS_HASH_INIT_KEY_CFG, //SS_HASH_INIT_KEY_CFG + XSC_FLOW_QP_ID_BASE_CFG, //QP_ID_BASE_CFG + XSC_FLOW_PSS_INFO, //CLSF_CTRL_PSS_INFO + XSC_FLOW_SNAPSHOT, //SNAPSHOT + XSC_FLOW_PSS_MATCH_KEY, //PSS_MATCH_KEY + XSC_FLOW_PSS_CLR, //PSS_CLEAR + XSC_FLOW_PSS_START, //PSS_START + XSC_FLOW_PSS_DONE, //PSS_DONE + XSC_FLOW_MAC_PORT_MTU, //MAC_PORT_MTU + XSC_FLOW_ECP_PKT_LEN_INC, //ECP_PKT_LEN_INC + XSC_FLOW_TCP_FLAGS_CFG, //TCP_FLAGS_CFG + XSC_FLOW_DBG_CNT, //DBG_CNT + XSC_FLOW_PRS_REC_PORT_UDF_SEL, + XSC_FLOW_TBL_MAX +}; + +enum xsc_other_tbl_id { + XSC_OTHER_TBL_MAX +}; + +enum xsc_ioctl_op { + XSC_IOCTL_OP_ADD, + XSC_IOCTL_OP_DEL, + XSC_IOCTL_OP_GET, + XSC_IOCTL_OP_CLR, + XSC_IOCTL_OP_MOD, + XSC_IOCTL_OP_MAX +}; + +struct xsc_ioctl_mem_info { + u32 mem_num; + u32 size; + u64 vir_addr; + u64 phy_addr; +}; + +/* get phy info */ +struct xsc_ioctl_get_phy_info_attr { + u16 bdf; + u16 rsvd; +}; + +struct xsc_ioctl_qp_range { + u16 opcode; + int num; + u32 qpn; +}; + +struct xsc_ioctl_get_phy_info_res { + u32 domain; + u32 bus; + u32 devfn; + u32 pcie_no; //pcie number + u32 func_id; //pf glb func id + u32 pcie_host; //host pcie number + u32 mac_phy_port; //mac port + u32 funcid_to_logic_port_off; + u16 lag_id; + u16 raw_qp_id_base; + u16 raw_rss_qp_id_base; + u16 pf0_vf_funcid_base; + u16 pf0_vf_funcid_top; + u16 pf1_vf_funcid_base; + u16 pf1_vf_funcid_top; + u16 pcie0_pf_funcid_base; + u16 pcie0_pf_funcid_top; + u16 pcie1_pf_funcid_base; + u16 pcie1_pf_funcid_top; + u16 lag_port_start; + u16 raw_tpe_qp_num; + int send_seg_num; + int recv_seg_num; + u8 on_chip_tbl_vld; + u8 dma_rw_tbl_vld; + u8 pct_compress_vld; + u32 chip_version; + u32 hca_core_clock; + u8 mac_bit; + u8 esw_mode; + u32 board_id; +}; + +struct xsc_ioctl_get_vf_info_res { + u16 vf_id; //start from 1, 0 is reserved for pf + u16 phy_port; //pcie0=0, pcie1=1 + u16 pf_id; //pf0=0, pf1=1 + u32 func_id; + u32 logic_port; +}; + +struct xsc_alloc_ucontext_req { + u32 domain; + u32 bus; + u32 devfn; +}; + +struct xsc_ioctl_force_pcp { + int pcp; +}; + +struct xsc_ioctl_force_dscp { + int dscp; +}; + +struct xsc_alloc_ucontext_resp { + int max_cq; + int max_qp; + u32 max_rwq_indirection_table_size; + u64 qpm_tx_db; + u64 qpm_rx_db; + u64 cqm_next_cid_reg; + u64 cqm_armdb; + u32 send_ds_num; + u32 recv_ds_num; + u32 send_ds_shift; + u32 recv_ds_shift; + u32 glb_func_id; + u32 max_wqes; +}; + +struct xsc_ioctl_cma_pcp { + int pcp; +}; + +struct xsc_ioctl_cma_dscp { + int dscp; +}; + +struct xsc_ioctl_set_debug_info { + unsigned int log_level; + unsigned int cmd_verbose; +}; + +struct xsc_ioctl_user_mode_attr { + u8 enable; +}; + +/* type-value */ +struct xsc_ioctl_data_tl { + u16 table; /* table id */ + u16 opmod; /* add/del/mod */ + u16 length; + u16 rsvd; +}; + +/* public header */ +struct xsc_ioctl_attr { + u16 opcode; /* ioctl cmd */ + u16 length; /* data length */ + u32 error; /* ioctl error info */ + u16 ver; + u16 rsvd; + u8 data[]; /* specific table info */ +}; + +struct xsc_ioctl_emu_hdr { + u16 in_length; /* cmd req length */ + u16 out_length; /* cmd rsp length */ + u8 data[]; /* emu cmd content start from here */ +}; + +struct xsc_ioctl_hdr { + u32 check_filed; /* Validity verification fileds */ + u32 domain; + u32 bus; + u32 devfn; + struct xsc_ioctl_attr attr; +}; + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_lag.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_lag.h new file mode 100644 index 000000000000..24aa39a15e9d --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_lag.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_LAG_H +#define XSC_LAG_H + +#define XSC_BOARD_LAG_MAX XSC_MAX_PORTS + +enum lag_event_type { + XSC_LAG_CREATE, + XSC_LAG_ADD_MEMBER, + XSC_LAG_REMOVE_MEMBER, + XSC_LAG_UPDATE_MEMBER_STATUS, + XSC_LAG_UPDATE_HASH_TYPE, + XSC_LAG_DESTROY, + XSC_LAG_EVENT_MAX +}; + +enum lag_slave_status { + XSC_LAG_SLAVE_INACTIVE, + XSC_LAG_SLAVE_ACTIVE, + XSC_LAG_SLAVE_STATUS_MAX, +}; + +enum { + XSC_SLEEP, + XSC_WAKEUP, + XSC_EXIT, +}; + +enum { + XSC_LAG_FLAG_ROCE = 1 << 0, + XSC_LAG_FLAG_SRIOV = 1 << 1, + XSC_LAG_FLAG_KERNEL = 1 << 2, +}; + +enum xsc_lag_hash { + XSC_LAG_HASH_L23, + XSC_LAG_HASH_L34, + XSC_LAG_HASH_E23, + XSC_LAG_HASH_E34, +}; + +enum { + QOS_LAG_OP_CREATE = 0, + QOS_LAG_OP_ADD_MEMBER = 1, + QOS_LAG_OP_DEL_MEMBER = 2, + QOS_LAG_OP_DESTROY = 3, +}; + +#define BOND_ID_INVALID U8_MAX +#define BOARD_ID_INVALID U32_MAX +#define LAG_ID_INVALID U16_MAX + +#define XSC_LAG_MODE_FLAGS (XSC_LAG_FLAG_ROCE | XSC_LAG_FLAG_SRIOV | XSC_LAG_FLAG_KERNEL) + +struct xsc_lag { + struct net_device *bond_dev; + u8 bond_mode; + enum netdev_lag_tx_type tx_type; + enum netdev_lag_hash hash_type; + u8 lag_type; + u16 lag_id; + atomic_t qp_cnt[XSC_MAX_PORTS]; + struct list_head slave_list; + u8 xsc_member_cnt; + u32 board_id; + int mode_changes_in_progress; + u8 not_roce_lag_xdev_mask; +}; + +struct xsc_lag_event { + struct list_head node; + enum lag_event_type event_type; + struct xsc_core_device *xdev; + struct xsc_core_device *roce_lag_xdev; + u8 bond_mode; + u8 lag_type; + u8 hash_type; + u8 lag_sel_mode; + u16 lag_id; + enum lag_slave_status slave_status; + u8 is_roce_lag_xdev; + u8 not_roce_lag_xdev_mask; +}; + +struct lag_event_list { + struct list_head head; + spinlock_t lock; /* protect lag_event_list */ + struct task_struct *bond_poll_task; + wait_queue_head_t wq; + int wait_flag; + u8 event_type; +}; + +struct xsc_board_lag { + struct xsc_lag xsc_lag[XSC_BOARD_LAG_MAX]; + u32 board_id; + struct kref ref; + u8 bond_valid_mask; + struct lag_event_list lag_event_list; + struct notifier_block nb; + struct mutex lock; /* protects board_lag */ +}; + +void xsc_lag_add_xdev(struct xsc_core_device *xdev); +void xsc_lag_remove_xdev(struct xsc_core_device *xdev); +void xsc_lag_add_netdev(struct net_device *ndev); +void xsc_lag_remove_netdev(struct net_device *ndev); +void xsc_lag_disable(struct xsc_core_device *xdev); +void xsc_lag_enable(struct xsc_core_device *xdev); +bool xsc_lag_is_roce(struct xsc_core_device *xdev); +struct xsc_lag *xsc_get_lag(struct xsc_core_device *xdev); +struct xsc_core_device *xsc_get_roce_lag_xdev(struct xsc_core_device *xdev); +u16 xsc_get_lag_id(struct xsc_core_device *xdev); +struct xsc_board_lag *xsc_board_lag_get(struct xsc_core_device *xdev); + +static inline void xsc_board_lag_lock(struct xsc_core_device *xdev) +{ + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + + if (xsc_core_is_pf(xdev)) + mutex_lock(&board_lag->lock); +} + +static inline void xsc_board_lag_unlock(struct xsc_core_device *xdev) +{ + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + + if (xsc_core_is_pf(xdev)) + mutex_unlock(&board_lag->lock); +} + +#endif /* XSC_LAG_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_macro.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_macro.h new file mode 100644 index 000000000000..db23b910f8e3 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_macro.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_MACRO_H +#define XSC_MACRO_H + +#ifndef NO_MSIX_SUPPORT +#define MSIX_SUPPORT +#endif + +#ifndef NO_RSS_SUPPORT +#define XSC_RSS_SUPPORT +#endif + +#ifndef NO_BQL_SUPPORT +#define XSC_BQL_SUPPORT +#endif + +#endif /*XSC_MACRO_H*/ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_port_ctrl.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_port_ctrl.h new file mode 100644 index 000000000000..665103ac4dfa --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_port_ctrl.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_PORT_CTRL_H +#define XSC_PORT_CTRL_H + +/*mmap msg encode*/ +enum { + XSC_MMAP_MSG_SQDB = 0, + XSC_MMAP_MSG_RQDB = 1, + XSC_MMAP_MSG_CQDB = 2, + XSC_MMAP_MSG_ARM_CQDB = 3, +}; + +#define TRY_NEXT_CB 0x1a2b3c4d + +typedef int (*port_ctrl_cb)(struct xsc_bdf_file *file, unsigned int cmd, + struct xsc_ioctl_hdr __user *user_hdr, void *data); + +void xsc_port_ctrl_remove(struct xsc_core_device *dev); +int xsc_port_ctrl_probe(struct xsc_core_device *dev); +int xsc_port_ctrl_cb_reg(const char *name, port_ctrl_cb cb, void *data); +void xsc_port_ctrl_cb_dereg(const char *name); + +void xsc_port_ctrl_fini(void); +int xsc_port_ctrl_init(void); +struct xsc_core_device *xsc_pci_get_xdev_by_bus_and_slot(int domain, uint32_t bus, uint32_t devfn); +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_pp.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_pp.h new file mode 100644 index 000000000000..c200ba892897 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_pp.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_PP_H +#define XSC_PP_H + +enum { + XSC_HASH_FIELD_SEL_SRC_IP = 1 << 0, + XSC_HASH_FIELD_SEL_PROTO = 1 << 1, + XSC_HASH_FIELD_SEL_DST_IP = 1 << 2, + XSC_HASH_FIELD_SEL_SPORT = 1 << 3, + XSC_HASH_FIELD_SEL_DPORT = 1 << 4, + XSC_HASH_FIELD_SEL_SRC_IPV6 = 1 << 5, + XSC_HASH_FIELD_SEL_DST_IPV6 = 1 << 6, + XSC_HASH_FIELD_SEL_SPORT_V6 = 1 << 7, + XSC_HASH_FIELD_SEL_DPORT_V6 = 1 << 8, +}; + +#define XSC_HASH_IP (XSC_HASH_FIELD_SEL_SRC_IP |\ + XSC_HASH_FIELD_SEL_DST_IP |\ + XSC_HASH_FIELD_SEL_PROTO) +#define XSC_HASH_IP_PORTS (XSC_HASH_FIELD_SEL_SRC_IP |\ + XSC_HASH_FIELD_SEL_DST_IP |\ + XSC_HASH_FIELD_SEL_SPORT |\ + XSC_HASH_FIELD_SEL_DPORT |\ + XSC_HASH_FIELD_SEL_PROTO) +#define XSC_HASH_IP6 (XSC_HASH_FIELD_SEL_SRC_IPV6 |\ + XSC_HASH_FIELD_SEL_DST_IPV6 |\ + XSC_HASH_FIELD_SEL_PROTO) +#define XSC_HASH_IP6_PORTS (XSC_HASH_FIELD_SEL_SRC_IPV6 |\ + XSC_HASH_FIELD_SEL_DST_IPV6 |\ + XSC_HASH_FIELD_SEL_SPORT_V6 |\ + XSC_HASH_FIELD_SEL_DPORT_V6 |\ + XSC_HASH_FIELD_SEL_PROTO) + +enum { + XSC_HASH_TMPL_IDX_IP_PORTS_IP6_PORTS = 0, + XSC_HASH_TMPL_IDX_IP_IP6, + XSC_HASH_TMPL_IDX_IP_PORTS_IP6, + XSC_HASH_TMPL_IDX_IP_IP6_PORTS, + XSC_HASH_TMPL_IDX_MAX, +}; + +#endif /* XSC_PP_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_pph.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_pph.h new file mode 100644 index 000000000000..fec39d7137f5 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_pph.h @@ -0,0 +1,175 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_PPH_H +#define XSC_PPH_H + +#define XSC_PPH_HEAD_LEN 64 + +enum { + L4_PROTO_NONE = 0, + L4_PROTO_TCP = 1, + L4_PROTO_UDP = 2, + L4_PROTO_ICMP = 3, + L4_PROTO_GRE = 4, +}; + +enum { + L3_PROTO_NONE = 0, + L3_PROTO_IP = 2, + L3_PROTO_IP6 = 3, +}; + +struct epp_pph { + u16 outer_eth_type; //2 bytes + u16 inner_eth_type; //4 bytes + + u16 rsv1:1; + u16 outer_vlan_flag:2; + u16 outer_ip_type:2; + u16 outer_ip_ofst:5; + u16 outer_ip_len:6; //6 bytes + + u16 rsv2:1; + u16 outer_tp_type:3; + u16 outer_tp_csum_flag:1; + u16 outer_tp_ofst:7; + u16 ext_tunnel_type:4; //8 bytes + + u8 tunnel_ofst; //9 bytes + u8 inner_mac_ofst; //10 bytes + + u32 rsv3:2; + u32 inner_mac_flag:1; + u32 inner_vlan_flag:2; + u32 inner_ip_type:2; + u32 inner_ip_ofst:8; + u32 inner_ip_len:6; + u32 inner_tp_type:2; + u32 inner_tp_csum_flag:1; + u32 inner_tp_ofst:8; //14 bytees + + u16 rsv4:1; + u16 payload_type:4; + u16 payload_ofst:8; + u16 pkt_type:3; //16 bytes + + u16 rsv5:2; + u16 pri:3; + u16 logical_in_port:11; + u16 vlan_info; + u8 error_bitmap:8; //21 bytes + + u8 rsv6:7; + u8 recirc_id_vld:1; + u16 recirc_id; //24 bytes + + u8 rsv7:7; + u8 recirc_data_vld:1; + u32 recirc_data; //29 bytes + + u8 rsv8:6; + u8 mark_tag_vld:2; + u16 mark_tag; //32 bytes + + u8 rsv9:4; + u8 upa_to_soc:1; + u8 upa_from_soc:1; + u8 upa_re_up_call:1; + u8 upa_pkt_drop:1; //33 bytes + + u8 ucdv; + u16 rsv10:2; + u16 pkt_len:14; //36 bytes + + u16 rsv11:2; + u16 pkt_hdr_ptr:14; //38 bytes + + u64 rsv12:5; + u64 csum_ofst:8; + u64 csum_val:29; + u64 csum_plen:14; + u64 rsv11_0:8; //46 bytes + + u64 rsv11_1; + u64 rsv11_2; + u16 rsv11_3; +}; + +#define OUTER_L3_BIT BIT(3) +#define OUTER_L4_BIT BIT(2) +#define INNER_L3_BIT BIT(1) +#define INNER_L4_BIT BIT(0) +#define OUTER_BIT (OUTER_L3_BIT | OUTER_L4_BIT) +#define INNER_BIT (INNER_L3_BIT | INNER_L4_BIT) +#define OUTER_AND_INNER (OUTER_BIT | INNER_BIT) + +#define PACKET_UNKNOWN BIT(4) + +#define EPP2SOC_PPH_EXT_TUNNEL_TYPE_OFFSET (6UL) +#define EPP2SOC_PPH_EXT_TUNNEL_TYPE_BIT_MASK (0XF00) +#define EPP2SOC_PPH_EXT_TUNNEL_TYPE_BIT_OFFSET (8) + +#define EPP2SOC_PPH_EXT_ERROR_BITMAP_OFFSET (20UL) +#define EPP2SOC_PPH_EXT_ERROR_BITMAP_BIT_MASK (0XFF) +#define EPP2SOC_PPH_EXT_ERROR_BITMAP_BIT_OFFSET (0) + +#define XSC_GET_EPP2SOC_PPH_EXT_TUNNEL_TYPE(PPH_BASE_ADDR) \ + ((*(u16 *)((u8 *)(PPH_BASE_ADDR) + EPP2SOC_PPH_EXT_TUNNEL_TYPE_OFFSET) & \ + EPP2SOC_PPH_EXT_TUNNEL_TYPE_BIT_MASK) >> EPP2SOC_PPH_EXT_TUNNEL_TYPE_BIT_OFFSET) + +#define XSC_GET_EPP2SOC_PPH_ERROR_BITMAP(PPH_BASE_ADDR) \ + ((*(u8 *)((u8 *)(PPH_BASE_ADDR) + EPP2SOC_PPH_EXT_ERROR_BITMAP_OFFSET) & \ + EPP2SOC_PPH_EXT_ERROR_BITMAP_BIT_MASK) >> EPP2SOC_PPH_EXT_ERROR_BITMAP_BIT_OFFSET) + +#define PPH_OUTER_IP_TYPE_OFF (4UL) +#define PPH_OUTER_IP_TYPE_MASK (0x3) +#define PPH_OUTER_IP_TYPE_SHIFT (11) +#define PPH_OUTER_IP_TYPE(base) \ + ((ntohs(*(u16 *)((u8 *)(base) + PPH_OUTER_IP_TYPE_OFF)) >> \ + PPH_OUTER_IP_TYPE_SHIFT) & PPH_OUTER_IP_TYPE_MASK) + +#define PPH_OUTER_IP_OFST_OFF (4UL) +#define PPH_OUTER_IP_OFST_MASK (0x1f) +#define PPH_OUTER_IP_OFST_SHIFT (6) +#define PPH_OUTER_IP_OFST(base) \ + ((ntohs(*(u16 *)((u8 *)(base) + PPH_OUTER_IP_OFST_OFF)) >> \ + PPH_OUTER_IP_OFST_SHIFT) & PPH_OUTER_IP_OFST_MASK) + +#define PPH_OUTER_IP_LEN_OFF (4UL) +#define PPH_OUTER_IP_LEN_MASK (0x3f) +#define PPH_OUTER_IP_LEN_SHIFT (0) +#define PPH_OUTER_IP_LEN(base) \ + ((ntohs(*(u16 *)((u8 *)(base) + PPH_OUTER_IP_LEN_OFF)) >> \ + PPH_OUTER_IP_LEN_SHIFT) & PPH_OUTER_IP_LEN_MASK) + +#define PPH_OUTER_TP_TYPE_OFF (6UL) +#define PPH_OUTER_TP_TYPE_MASK (0x7) +#define PPH_OUTER_TP_TYPE_SHIFT (12) +#define PPH_OUTER_TP_TYPE(base) \ + ((ntohs(*(u16 *)((u8 *)(base) + PPH_OUTER_TP_TYPE_OFF)) >> \ + PPH_OUTER_TP_TYPE_SHIFT) & PPH_OUTER_TP_TYPE_MASK) + +#define PPH_PAYLOAD_OFST_OFF (14UL) +#define PPH_PAYLOAD_OFST_MASK (0xff) +#define PPH_PAYLOAD_OFST_SHIFT (3) +#define PPH_PAYLOAD_OFST(base) \ + ((ntohs(*(u16 *)((u8 *)(base) + PPH_PAYLOAD_OFST_OFF)) >> \ + PPH_PAYLOAD_OFST_SHIFT) & PPH_PAYLOAD_OFST_MASK) + +#define PPH_CSUM_OFST_OFF (38UL) +#define PPH_CSUM_OFST_MASK (0xff) +#define PPH_CSUM_OFST_SHIFT (51) +#define PPH_CSUM_OFST(base) \ + ((be64_to_cpu(*(u64 *)((u8 *)(base) + PPH_CSUM_OFST_OFF)) >> \ + PPH_CSUM_OFST_SHIFT) & PPH_CSUM_OFST_MASK) + +#define PPH_CSUM_VAL_OFF (38UL) +#define PPH_CSUM_VAL_MASK (0xeffffff) +#define PPH_CSUM_VAL_SHIFT (22) +#define PPH_CSUM_VAL(base) \ + ((be64_to_cpu(*(u64 *)((u8 *)(base) + PPH_CSUM_VAL_OFF)) >> \ + PPH_CSUM_VAL_SHIFT) & PPH_CSUM_VAL_MASK) +#endif /* XSC_TBM_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_reg.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_reg.h new file mode 100644 index 000000000000..6b2c84017c18 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_reg.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_REG_H +#define XSC_REG_H +#define CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_ADDR 0x0 +#define CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_ADDR 0x4 +#define CMDQM_HOST_REQ_PID_MEM_ADDR 0x8 +#define CMDQM_HOST_REQ_CID_MEM_ADDR 0xc +#define CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_ADDR 0x10 +#define CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_ADDR 0x14 +#define CMDQM_HOST_RSP_PID_MEM_ADDR 0x18 +#define CMDQM_HOST_RSP_CID_MEM_ADDR 0x1c +#define CMDQM_HOST_VF_ERR_STS_MEM_ADDR 0x20 +#define CMDQM_VECTOR_ID_MEM_ADDR 0x24 +#define CMDQM_Q_ELEMENT_SZ_REG_ADDR 0x28 +#define CMDQM_HOST_Q_DEPTH_REG_ADDR 0x2c + +#define CPM_LOCK_GET_REG_ADDR 0x30 +#define CPM_LOCK_PUT_REG_ADDR 0x34 +#define CPM_LOCK_AVAIL_REG_ADDR 0x38 +#define CPM_IDA_DATA_MEM_ADDR 0x3c +#define CPM_IDA_CMD_REG_ADDR 0x83c +#define CPM_IDA_ADDR_REG_ADDR 0x840 +#define CPM_IDA_BUSY_REG_ADDR 0x8c0 + +#define DB_CQ_FUNC_MEM_ADDR 0x8c4 +#define DB_EQ_FUNC_MEM_ADDR 0x8c8 +#define DB_CQ_CID_DIRECT_MEM_ADDR 0x8cc +#define TX_DB_FUNC_MEM_ADDR 0x8d0 +#define RX_DB_FUNC_MEM_ADDR 0x8d4 + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/Kconfig b/drivers/net/ethernet/yunsilicon/xsc/net/Kconfig new file mode 100644 index 000000000000..30889caa9603 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/Kconfig @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +# All rights reserved. +# Yunsilicon driver configuration +# + +config YUNSILICON_XSC_ETH + tristate "Yunsilicon XSC ethernet driver" + default n + depends on YUNSILICON_XSC_PCI + help + This driver provides ethernet support for + Yunsilicon XSC devices. + + To compile this driver as a module, choose M here. The module + will be called xsc_eth. diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/Makefile b/drivers/net/ethernet/yunsilicon/xsc/net/Makefile new file mode 100644 index 000000000000..a6b1a4a300aa --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +# All rights reserved. + +ccflags-y += -I$(srctree)/drivers/net/ethernet/yunsilicon/xsc + +obj-$(CONFIG_YUNSILICON_XSC_ETH) += xsc_eth.o + +xsc_eth-y := main.o xsc_eth_ctrl.o xsc_eth_tx.o xsc_eth_rx.o xsc_eth_txrx.o \ + ut_main.o xsc_eth_ethtool.o xsc_eth_stats.o xsc_dcbnl.o xsc_hw_comm.o \ + xsc_eth_sysfs.o xsc_fs.o xsc_eth_dim.o diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/main.c b/drivers/net/ethernet/yunsilicon/xsc/net/main.c new file mode 100644 index 000000000000..3ed7be4e5d7d --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/main.c @@ -0,0 +1,3397 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common/xsc_hsi.h" +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_cmd.h" +#include "common/qp.h" +#include "common/xsc_lag.h" +#include "common/xsc_pp.h" + +#include "xsc_eth.h" +#include "xsc_eth_txrx.h" +#include "xsc_eth_ethtool.h" +#include "xsc_eth_common.h" +#include "xsc_eth_stats.h" +#include "xsc_accel.h" +#include "xsc_eth_ctrl.h" +#include "../pci/eswitch.h" + +#include "common/xsc_fs.h" +#include "common/vport.h" +#include "common/qp.h" +#include "xsc_eth_dim.h" + +MODULE_LICENSE("GPL"); + +#define MAX_VF_NUM_MINIDUMP 1024 + +static void xsc_eth_close_channel(struct xsc_channel *c, bool free_rq); +static void xsc_eth_remove(struct xsc_core_device *xdev, void *context); + +static int xsc_eth_open(struct net_device *netdev); +static int xsc_eth_close(struct net_device *netdev); +static void xsc_netdev_set_tcs(struct xsc_adapter *priv, u16 nch, u8 ntc); + +#ifdef NEED_CREATE_RX_THREAD +extern uint32_t xsc_eth_rx_thread_create(struct xsc_adapter *adapter); +#endif + +static inline void xsc_set_feature(netdev_features_t *features, + netdev_features_t feature, + bool enable) +{ + if (enable) + *features |= feature; + else + *features &= ~feature; +} + +typedef int (*xsc_feature_handler)(struct net_device *netdev, bool enable); + +static int xsc_eth_modify_qp_status(struct xsc_core_device *xdev, + u32 qpn, u16 status); + +static void xsc_eth_build_queue_param(struct xsc_adapter *adapter, + struct xsc_queue_attr *attr, u8 type) +{ + struct xsc_core_device *xdev = adapter->xdev; + + if (adapter->nic_param.sq_size == 0) + adapter->nic_param.sq_size = BIT(xdev->caps.log_max_qp_depth); + if (adapter->nic_param.rq_size == 0) + adapter->nic_param.rq_size = BIT(xdev->caps.log_max_qp_depth); + + if (type == XSC_QUEUE_TYPE_EQ) { + attr->q_type = XSC_QUEUE_TYPE_EQ; + attr->ele_num = XSC_EQ_ELE_NUM; + attr->ele_size = XSC_EQ_ELE_SZ; + attr->ele_log_size = order_base_2(XSC_EQ_ELE_SZ); + attr->q_log_size = order_base_2(XSC_EQ_ELE_NUM); + } else if (type == XSC_QUEUE_TYPE_RQCQ) { + attr->q_type = XSC_QUEUE_TYPE_RQCQ; + attr->ele_num = min_t(int, XSC_RQCQ_ELE_NUM, xdev->caps.max_cqes); + attr->ele_size = XSC_RQCQ_ELE_SZ; + attr->ele_log_size = order_base_2(XSC_RQCQ_ELE_SZ); + attr->q_log_size = order_base_2(attr->ele_num); + } else if (type == XSC_QUEUE_TYPE_SQCQ) { + attr->q_type = XSC_QUEUE_TYPE_SQCQ; + attr->ele_num = min_t(int, XSC_SQCQ_ELE_NUM, xdev->caps.max_cqes); + attr->ele_size = XSC_SQCQ_ELE_SZ; + attr->ele_log_size = order_base_2(XSC_SQCQ_ELE_SZ); + attr->q_log_size = order_base_2(attr->ele_num); + } else if (type == XSC_QUEUE_TYPE_RQ) { + attr->q_type = XSC_QUEUE_TYPE_RQ; + attr->ele_num = adapter->nic_param.rq_size; + attr->ele_size = xdev->caps.recv_ds_num * XSC_RECV_WQE_DS; + attr->ele_log_size = order_base_2(attr->ele_size); + attr->q_log_size = order_base_2(attr->ele_num); + } else if (type == XSC_QUEUE_TYPE_SQ) { + attr->q_type = XSC_QUEUE_TYPE_SQ; + attr->ele_num = adapter->nic_param.sq_size; + attr->ele_size = xdev->caps.send_ds_num * XSC_SEND_WQE_DS; + attr->ele_log_size = order_base_2(attr->ele_size); + attr->q_log_size = order_base_2(attr->ele_num); + } +} + +static void xsc_eth_init_frags_partition(struct xsc_rq *rq) +{ + struct xsc_wqe_frag_info next_frag = {}; + struct xsc_wqe_frag_info *prev; + int i; + + next_frag.di = &rq->wqe.di[0]; + next_frag.offset = 0; + prev = NULL; + + for (i = 0; i < xsc_wq_cyc_get_size(&rq->wqe.wq); i++) { + struct xsc_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; + struct xsc_wqe_frag_info *frag = + &rq->wqe.frags[i << rq->wqe.info.log_num_frags]; + int f; + + for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) { + if (next_frag.offset + frag_info[f].frag_stride > + XSC_RX_FRAG_SZ) { + next_frag.di++; + next_frag.offset = 0; + if (prev) + prev->last_in_page = 1; + } + *frag = next_frag; + + /* prepare next */ + next_frag.offset += frag_info[f].frag_stride; + prev = frag; + } + } + + if (prev) + prev->last_in_page = 1; +} + +static int xsc_eth_init_di_list(struct xsc_rq *rq, int wq_sz, int cpu) +{ + int len = wq_sz << rq->wqe.info.log_num_frags; + + rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)), + GFP_KERNEL, cpu_to_node(cpu)); + if (!rq->wqe.di) + return -ENOMEM; + + xsc_eth_init_frags_partition(rq); + + return 0; +} + +static void xsc_eth_free_di_list(struct xsc_rq *rq) +{ + kvfree(rq->wqe.di); +} + +int xsc_rx_alloc_page_cache(struct xsc_rq *rq, int node, u8 log_init_sz) +{ + struct xsc_page_cache *cache = &rq->page_cache; + + cache->sz = 1 << log_init_sz; + cache->page_cache = kvzalloc_node(cache->sz * sizeof(*cache->page_cache), + GFP_KERNEL, node); + if (!cache->page_cache) + return -ENOMEM; + + return 0; +} + +void xsc_rx_free_page_cache(struct xsc_rq *rq) +{ + struct xsc_page_cache *cache = &rq->page_cache; + u32 i; + + for (i = cache->head; i != cache->tail; i = (i + 1) & (cache->sz - 1)) { + struct xsc_dma_info *dma_info = &cache->page_cache[i]; + + xsc_page_release_dynamic(rq, dma_info, false); + } + kvfree(cache->page_cache); +} + +int xsc_eth_reset(struct xsc_core_device *dev) +{ + return 0; +} + +void xsc_eth_cq_error_event(struct xsc_core_cq *xcq, enum xsc_event event) +{ + struct xsc_cq *xsc_cq = container_of(xcq, struct xsc_cq, xcq); + struct xsc_core_device *xdev = xsc_cq->xdev; + + if (event != XSC_EVENT_TYPE_CQ_ERROR) { + xsc_core_err(xdev, "Unexpected event type %d on CQ %06x\n", + event, xcq->cqn); + return; + } + + xsc_core_err(xdev, "Eth catch CQ ERROR:%x, cqn: %d\n", event, xcq->cqn); +} + +void xsc_eth_completion_event(struct xsc_core_cq *xcq) +{ + struct xsc_cq *cq = container_of(xcq, struct xsc_cq, xcq); + struct xsc_core_device *xdev = cq->xdev; + struct xsc_rq *rq = NULL; + + if (unlikely(!cq->channel)) { + xsc_core_warn(xdev, "cq%d->channel is null\n", xcq->cqn); + return; + } + + rq = &cq->channel->qp.rq[0]; + + set_bit(XSC_CHANNEL_NAPI_SCHED, &cq->channel->flags); + cq->channel->stats->poll = 0; + cq->channel->stats->poll_tx = 0; + + if (!test_bit(XSC_ETH_RQ_STATE_ENABLED, &rq->state)) + xsc_core_warn(xdev, "ch%d_cq%d, napi_flag=0x%lx\n", + cq->channel->chl_idx, xcq->cqn, cq->napi->state); + + napi_schedule(cq->napi); + cq->event_ctr++; + cq->channel->stats->events++; +} + +static inline int xsc_cmd_destroy_cq(struct xsc_core_device *dev, struct xsc_core_cq *xcq) +{ + struct xsc_destroy_cq_mbox_in in; + struct xsc_destroy_cq_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_CQ); + in.cqn = cpu_to_be32(xcq->cqn); + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(dev, "failed to destroy cq, err=%d out.status=%u\n", + err, out.hdr.status); + return -ENOEXEC; + } + + xcq->cqn = 0; + return 0; +} + +int xsc_eth_create_cq(struct xsc_core_device *xdev, struct xsc_core_cq *xcq, + struct xsc_create_cq_mbox_in *in, int insize) +{ + int err, ret = -1; + struct xsc_cq_table *table = &xdev->dev_res->cq_table; + struct xsc_create_cq_mbox_out out; + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_CQ); + ret = xsc_cmd_exec(xdev, in, insize, &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to create cq, err=%d out.status=%u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + xcq->cqn = be32_to_cpu(out.cqn) & 0xffffff; + xcq->cons_index = 0; + xcq->arm_sn = 0; + atomic_set(&xcq->refcount, 1); + init_completion(&xcq->free); + + spin_lock_irq(&table->lock); + ret = radix_tree_insert(&table->tree, xcq->cqn, xcq); + spin_unlock_irq(&table->lock); + if (ret) + goto err_insert_cq; + return 0; + +err_insert_cq: + err = xsc_cmd_destroy_cq(xdev, xcq); + if (err) + xsc_core_warn(xdev, "failed to destroy cqn=%d, err=%d\n", xcq->cqn, err); + return ret; +} + +int xsc_eth_destroy_cq(struct xsc_core_device *xdev, struct xsc_cq *cq) +{ + struct xsc_cq_table *table = &xdev->dev_res->cq_table; + struct xsc_core_cq *tmp; + int err; + + spin_lock_irq(&table->lock); + tmp = radix_tree_delete(&table->tree, cq->xcq.cqn); + spin_unlock_irq(&table->lock); + if (!tmp) { + err = -ENOENT; + goto err_delete_cq; + } + + if (tmp != &cq->xcq) { + err = -EINVAL; + goto err_delete_cq; + } + + err = xsc_cmd_destroy_cq(xdev, &cq->xcq); + if (err) + goto err_destroy_cq; + + if (atomic_dec_and_test(&cq->xcq.refcount)) + complete(&cq->xcq.free); + wait_for_completion(&cq->xcq.free); + return 0; + +err_destroy_cq: + xsc_core_warn(xdev, "failed to destroy cqn=%d, err=%d\n", + cq->xcq.cqn, err); + return err; +err_delete_cq: + xsc_core_warn(xdev, "cqn=%d not found in tree, err=%d\n", + cq->xcq.cqn, err); + return err; +} + +void xsc_eth_free_cq(struct xsc_cq *cq) +{ + xsc_eth_wq_destroy(&cq->wq_ctrl); +} + +int xsc_eth_create_rss_qp_rqs(struct xsc_core_device *xdev, + struct xsc_create_multiqp_mbox_in *in, + int insize, + int *prqn_base) +{ + int ret; + struct xsc_create_multiqp_mbox_out out; + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_MULTI_QP); + ret = xsc_cmd_exec(xdev, in, insize, &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, + "failed to create rss rq, qp_num=%d, type=%d, err=%d out.status=%u\n", + in->qp_num, in->qp_type, ret, out.hdr.status); + return -ENOEXEC; + } + + *prqn_base = be32_to_cpu(out.qpn_base) & 0xffffff; + return 0; +} + +void xsc_eth_qp_event(struct xsc_core_qp *qp, int type) +{ + struct xsc_rq *rq; + struct xsc_sq *sq; + struct xsc_core_device *xdev; + + if (qp->eth_queue_type == XSC_RES_RQ) { + rq = container_of(qp, struct xsc_rq, cqp); + xdev = rq->cq.xdev; + } else if (qp->eth_queue_type == XSC_RES_SQ) { + sq = container_of(qp, struct xsc_sq, cqp); + xdev = sq->cq.xdev; + } else { + pr_err("%s:Unknown eth qp type %d\n", __func__, type); + return; + } + + switch (type) { + case XSC_EVENT_TYPE_WQ_CATAS_ERROR: + case XSC_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + case XSC_EVENT_TYPE_WQ_ACCESS_ERROR: + xsc_core_err(xdev, "%s:Async event %x on QP %d\n", __func__, type, qp->qpn); + break; + default: + xsc_core_err(xdev, "%s: Unexpected event type %d on QP %d\n", + __func__, type, qp->qpn); + return; + } +} + +int xsc_eth_create_qp_rq(struct xsc_core_device *xdev, struct xsc_rq *prq, + struct xsc_create_qp_mbox_in *in, int insize) +{ + int ret = -1; + struct xsc_create_qp_mbox_out out; + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_QP); + ret = xsc_cmd_exec(xdev, in, insize, &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to create rq, err=%d out.status=%u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + prq->rqn = be32_to_cpu(out.qpn) & 0xffffff; + prq->cqp.event = xsc_eth_qp_event; + prq->cqp.eth_queue_type = XSC_RES_RQ; + + ret = create_resource_common(xdev, &prq->cqp); + if (ret) { + xsc_core_err(xdev, "%s:error qp:%d errno:%d\n", __func__, prq->rqn, ret); + return ret; + } + + return 0; +} + +int xsc_eth_destroy_qp_rq(struct xsc_core_device *xdev, struct xsc_rq *prq) +{ + struct xsc_destroy_qp_mbox_in in; + struct xsc_destroy_qp_mbox_out out; + int err; + + err = xsc_eth_modify_qp_status(xdev, prq->rqn, XSC_CMD_OP_2RST_QP); + if (err) { + xsc_core_warn(xdev, "failed to set rq%d status=rst, err=%d\n", prq->rqn, err); + return err; + } + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_QP); + in.qpn = cpu_to_be32(prq->rqn); + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(xdev, "failed to destroy rq%d, err=%d out.status=%u\n", + prq->rqn, err, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} + +static void xsc_eth_free_rx_wqe(struct xsc_rq *rq) +{ + u16 wqe_ix; + struct xsc_wq_cyc *wq = &rq->wqe.wq; + + while (!xsc_wq_cyc_is_empty(wq)) { + wqe_ix = xsc_wq_cyc_get_tail(wq); + rq->dealloc_wqe(rq, wqe_ix); + xsc_wq_cyc_pop(wq); + } +} + +static void xsc_free_qp_rq(struct xsc_rq *rq) +{ + if (rq->page_cache.page_cache) + xsc_rx_free_page_cache(rq); + + kvfree(rq->wqe.frags); + kvfree(rq->wqe.di); + + if (rq->page_pool) + page_pool_destroy(rq->page_pool); + + xsc_eth_wq_destroy(&rq->wq_ctrl); +} + +int xsc_eth_create_qp_sq(struct xsc_core_device *xdev, struct xsc_sq *psq, + struct xsc_create_qp_mbox_in *in, int insize) +{ + struct xsc_create_qp_mbox_out out; + int ret; + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_QP); + ret = xsc_cmd_exec(xdev, in, insize, &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to create sq, err=%d out.status=%u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + psq->sqn = be32_to_cpu(out.qpn) & 0xffffff; + + return 0; +} + +int xsc_eth_modify_qp_sq(struct xsc_core_device *xdev, struct xsc_modify_raw_qp_mbox_in *in) +{ + struct xsc_modify_raw_qp_mbox_out out; + int ret; + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_RAW_QP); + + ret = xsc_cmd_exec(xdev, in, sizeof(struct xsc_modify_raw_qp_mbox_in), + &out, sizeof(struct xsc_modify_raw_qp_mbox_out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to modify sq, err=%d out.status=%u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} + +int xsc_eth_destroy_qp_sq(struct xsc_core_device *xdev, struct xsc_sq *psq) +{ + struct xsc_destroy_qp_mbox_in in; + struct xsc_destroy_qp_mbox_out out; + int err; + + err = xsc_eth_modify_qp_status(xdev, psq->sqn, XSC_CMD_OP_2RST_QP); + if (err) { + xsc_core_warn(xdev, "failed to set sq%d status=rst, err=%d\n", psq->sqn, err); + return err; + } + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_QP); + in.qpn = cpu_to_be32(psq->sqn); + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(xdev, "failed to destroy sq%d, err=%d out.status=%u\n", + psq->sqn, err, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} + +static void xsc_free_qp_sq_db(struct xsc_sq *sq) +{ + kvfree(sq->db.wqe_info); + kvfree(sq->db.dma_fifo); +} + +static void xsc_free_qp_sq(struct xsc_sq *sq) +{ + xsc_free_qp_sq_db(sq); + xsc_eth_wq_destroy(&sq->wq_ctrl); +} + +static int xsc_eth_alloc_qp_sq_db(struct xsc_sq *sq, int numa) +{ + int wq_sz = xsc_wq_cyc_get_size(&sq->wq); + struct xsc_core_device *xdev = sq->cq.xdev; + int df_sz = wq_sz * xdev->caps.send_ds_num; + + sq->db.dma_fifo = kvzalloc_node(array_size(df_sz, sizeof(*sq->db.dma_fifo)), + GFP_KERNEL, numa); + sq->db.wqe_info = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.wqe_info)), + GFP_KERNEL, numa); + + if (!sq->db.dma_fifo || !sq->db.wqe_info) { + xsc_free_qp_sq_db(sq); + return -ENOMEM; + } + + sq->dma_fifo_mask = df_sz - 1; + + return 0; +} + +static int xsc_eth_alloc_cq(struct xsc_channel *c, struct xsc_cq *pcq, + struct xsc_cq_param *pcq_param) +{ + int ret; + struct xsc_core_device *xdev = c->adapter->xdev; + struct xsc_core_cq *core_cq = &pcq->xcq; + u32 i; + u8 q_log_size = pcq_param->cq_attr.q_log_size; + u8 ele_log_size = pcq_param->cq_attr.ele_log_size; + + pcq_param->wq.db_numa_node = cpu_to_node(c->cpu); + pcq_param->wq.buf_numa_node = cpu_to_node(c->cpu); + + ret = xsc_eth_cqwq_create(xdev, &pcq_param->wq, + q_log_size, ele_log_size, &pcq->wq, + &pcq->wq_ctrl); + if (ret) + return ret; + + core_cq->cqe_sz = pcq_param->cq_attr.ele_num; + core_cq->comp = xsc_eth_completion_event; + core_cq->event = xsc_eth_cq_error_event; + core_cq->vector = c->chl_idx; + + for (i = 0; i < xsc_cqwq_get_size(&pcq->wq); i++) { + struct xsc_cqe *cqe = xsc_cqwq_get_wqe(&pcq->wq, i); + + cqe->owner = 1; + } + pcq->xdev = xdev; + + return ret; +} + +#ifdef NEED_CREATE_RX_THREAD +static int xsc_eth_set_cq(struct xsc_channel *c, + struct xsc_cq *pcq, + struct xsc_cq_param *pcq_param) +{ + int ret = XSCALE_RET_SUCCESS; + struct xsc_create_cq_mbox_in *in; + int inlen; + int hw_npages; + + hw_npages = DIV_ROUND_UP(pcq->wq_ctrl.buf.size, PAGE_SIZE_4K); + /*mbox size + pas size*/ + inlen = sizeof(struct xsc_create_cq_mbox_in) + + sizeof(__be64) * hw_npages; + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + /*construct param of in struct*/ + in->ctx.log_cq_sz = pcq_param->cq_attr.q_log_size; + in->ctx.pa_num = cpu_to_be16(hw_npages); + in->ctx.glb_func_id = cpu_to_be16(c->adapter->xdev->glb_func_id); + + xsc_fill_page_frag_array(&pcq->wq_ctrl.buf, + &in->pas[0], hw_npages); + + ret = xsc_eth_create_cq(c->adapter->xdev, &pcq->xcq, in, inlen); + + kfree(in); + xsc_core_info(c->adapter->xdev, "create cqn%d, func_id=%d, ret=%d\n", + pcq->xcq.cqn, c->adapter->xdev->glb_func_id, ret); + return ret; +} +#else +static int xsc_eth_set_cq(struct xsc_channel *c, + struct xsc_cq *pcq, + struct xsc_cq_param *pcq_param) +{ + int ret = XSCALE_RET_SUCCESS; + struct xsc_core_device *xdev = c->adapter->xdev; + struct xsc_create_cq_mbox_in *in; + int inlen; + int eqn, irqn; + int hw_npages; + + hw_npages = DIV_ROUND_UP(pcq->wq_ctrl.buf.size, PAGE_SIZE_4K); + /*mbox size + pas size*/ + inlen = sizeof(struct xsc_create_cq_mbox_in) + + sizeof(__be64) * hw_npages; + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + /*construct param of in struct*/ + ret = xsc_vector2eqn(xdev, c->chl_idx, &eqn, &irqn); + if (ret) + goto err; + + in->ctx.eqn = eqn; + in->ctx.eqn = cpu_to_be16(in->ctx.eqn); + in->ctx.log_cq_sz = pcq_param->cq_attr.q_log_size; + in->ctx.pa_num = cpu_to_be16(hw_npages); + in->ctx.glb_func_id = cpu_to_be16(xdev->glb_func_id); + + xsc_fill_page_frag_array(&pcq->wq_ctrl.buf, &in->pas[0], hw_npages); + + ret = xsc_eth_create_cq(c->adapter->xdev, &pcq->xcq, in, inlen); + if (ret == 0) { + pcq->xcq.irqn = irqn; + pcq->xcq.eq = xsc_eq_get(xdev, pcq->xcq.vector); + } + +err: + kvfree(in); + xsc_core_info(c->adapter->xdev, "create ch%d cqn%d, eqn=%d, func_id=%d, ret=%d\n", + c->chl_idx, pcq->xcq.cqn, eqn, xdev->glb_func_id, ret); + return ret; +} +#endif + +static int xsc_eth_open_cq(struct xsc_channel *c, + struct xsc_cq *pcq, + struct xsc_cq_param *pcq_param) +{ + int ret; + + ret = xsc_eth_alloc_cq(c, pcq, pcq_param); + if (ret) + return ret; + + ret = xsc_eth_set_cq(c, pcq, pcq_param); + if (ret) + goto err_set_cq; + + xsc_cq_notify_hw_rearm(pcq); + + pcq->napi = &c->napi; + pcq->channel = c; + pcq->rx = (pcq_param->cq_attr.q_type == XSC_QUEUE_TYPE_RQCQ) ? 1 : 0; + + return 0; + +err_set_cq: + xsc_eth_free_cq(pcq); + return ret; +} + +static int xsc_eth_close_cq(struct xsc_channel *c, struct xsc_cq *pcq) +{ + int ret; + struct xsc_core_device *xdev = c->adapter->xdev; + + ret = xsc_eth_destroy_cq(xdev, pcq); + if (ret) { + xsc_core_warn(xdev, "failed to close ch%d cq%d, ret=%d\n", + c->chl_idx, pcq->xcq.cqn, ret); + return ret; + } + + xsc_eth_free_cq(pcq); + + return 0; +} + +static int xsc_eth_modify_qp_status(struct xsc_core_device *xdev, + u32 qpn, u16 status) +{ + struct xsc_modify_qp_mbox_in in; + struct xsc_modify_qp_mbox_out out; + + return xsc_modify_qp(xdev, &in, &out, qpn, status); +} + +int xsc_eth_set_hw_mtu(struct xsc_core_device *dev, u16 mtu, u16 rx_buf_sz) +{ + struct xsc_set_mtu_mbox_in in; + struct xsc_set_mtu_mbox_out out; + int ret; + + memset(&in, 0, sizeof(struct xsc_set_mtu_mbox_in)); + memset(&out, 0, sizeof(struct xsc_set_mtu_mbox_out)); + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_SET_MTU); + in.mtu = cpu_to_be16(mtu); + in.rx_buf_sz_min = cpu_to_be16(rx_buf_sz); + in.mac_port = dev->mac_port; + + ret = xsc_cmd_exec(dev, &in, sizeof(struct xsc_set_mtu_mbox_in), &out, + sizeof(struct xsc_set_mtu_mbox_out)); + if (ret || out.hdr.status) { + xsc_core_err(dev, "failed to set hw_mtu=%u rx_buf_sz=%u, err=%d, status=%d\n", + mtu, rx_buf_sz, ret, out.hdr.status); + ret = -ENOEXEC; + } + + return ret; +} + +int xsc_eth_get_mac(struct xsc_core_device *dev, char *mac) +{ + struct xsc_query_eth_mac_mbox_out *out; + struct xsc_query_eth_mac_mbox_in in; + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return -ENOMEM; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_ETH_MAC); + + err = xsc_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + if (err || out->hdr.status) { + xsc_core_warn(dev, "get mac failed! err=%d, out.status=%u\n", err, out->hdr.status); + err = -ENOEXEC; + goto exit; + } + + memcpy(mac, out->mac, 6); + xsc_core_dbg(dev, "get mac %02x:%02x:%02x:%02x:%02x:%02x\n", + mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + +exit: + kfree(out); + + return err; +} + +int xsc_eth_modify_qps_channel(struct xsc_adapter *adapter, struct xsc_channel *c) +{ + int ret = 0; + int i; + + for (i = 0; i < c->qp.rq_num; i++) { + c->qp.rq[i].post_wqes(&c->qp.rq[i]); + ret = xsc_eth_modify_qp_status(adapter->xdev, c->qp.rq[i].rqn, + XSC_CMD_OP_RTR2RTS_QP); + if (ret) + return ret; + } + + for (i = 0; i < c->qp.sq_num; i++) { + ret = xsc_eth_modify_qp_status(adapter->xdev, c->qp.sq[i].sqn, + XSC_CMD_OP_RTR2RTS_QP); + if (ret) + return ret; + } + return 0; +} + +int xsc_eth_modify_qps(struct xsc_adapter *adapter, + struct xsc_eth_channels *chls) +{ + int ret; + int i; + + for (i = 0; i < chls->num_chl; i++) { + struct xsc_channel *c = &chls->c[i]; + + ret = xsc_eth_modify_qps_channel(adapter, c); + if (ret) + return ret; + } + + return 0; +} + +u32 xsc_rx_get_linear_frag_sz(u32 mtu) +{ + u32 byte_count = XSC_SW2HW_FRAG_SIZE(mtu); + + return XSC_SKB_FRAG_SZ(byte_count); +} + +bool xsc_rx_is_linear_skb(u32 mtu) +{ + u32 linear_frag_sz = xsc_rx_get_linear_frag_sz(mtu); + + return linear_frag_sz <= PAGE_SIZE; +} + +static int xsc_eth_alloc_rq(struct xsc_channel *c, + struct xsc_rq *prq, + struct xsc_rq_param *prq_param) +{ + struct xsc_adapter *adapter = c->adapter; + u8 q_log_size = prq_param->rq_attr.q_log_size; + struct page_pool_params pagepool_params = { 0 }; + u32 pool_size = 1 << q_log_size; + u8 ele_log_size = prq_param->rq_attr.ele_log_size; + struct xsc_stats *stats = c->adapter->stats; + struct xsc_channel_stats *channel_stats = + &stats->channel_stats[c->chl_idx]; + int cache_init_sz = 0; + int wq_sz; + int i, f; + int ret = 0; + + prq->stats = &channel_stats->rq; + prq_param->wq.db_numa_node = cpu_to_node(c->cpu); + + ret = xsc_eth_wq_cyc_create(c->adapter->xdev, &prq_param->wq, + q_log_size, ele_log_size, &prq->wqe.wq, + &prq->wq_ctrl); + if (ret) + return ret; + + wq_sz = xsc_wq_cyc_get_size(&prq->wqe.wq); + + prq->wqe.info = prq_param->frags_info; + prq->wqe.frags = kvzalloc_node(array_size((wq_sz << prq->wqe.info.log_num_frags), + sizeof(*prq->wqe.frags)), + GFP_KERNEL, + cpu_to_node(c->cpu)); + if (!prq->wqe.frags) { + ret = -ENOMEM; + goto err_alloc_frags; + } + + ret = xsc_eth_init_di_list(prq, wq_sz, c->cpu); + if (ret) + goto err_init_di; + + prq->buff.map_dir = DMA_FROM_DEVICE; +#ifdef XSC_PAGE_CACHE + cache_init_sz = wq_sz << prq->wqe.info.log_num_frags; + ret = xsc_rx_alloc_page_cache(prq, cpu_to_node(c->cpu), ilog2(cache_init_sz)); + if (ret) + goto err_create_pool; +#endif + + /* Create a page_pool and register it with rxq */ + pool_size = wq_sz << prq->wqe.info.log_num_frags; + pagepool_params.order = XSC_RX_FRAG_SZ_ORDER; + pagepool_params.flags = 0; /* No-internal DMA mapping in page_pool */ + pagepool_params.pool_size = pool_size; + pagepool_params.nid = cpu_to_node(c->cpu); + pagepool_params.dev = c->adapter->dev; + pagepool_params.dma_dir = prq->buff.map_dir; + + prq->page_pool = page_pool_create(&pagepool_params); + if (IS_ERR(prq->page_pool)) { + ret = PTR_ERR(prq->page_pool); + prq->page_pool = NULL; + goto err_create_pool; + } + + if (c->chl_idx == 0) + xsc_core_dbg(adapter->xdev, + "page pool: size=%d, cpu=%d, pool_numa=%d, cache_size=%d, mtu=%d, wqe_numa=%d\n", + pool_size, c->cpu, pagepool_params.nid, + cache_init_sz, adapter->nic_param.mtu, + prq_param->wq.buf_numa_node); + + for (i = 0; i < wq_sz; i++) { + struct xsc_eth_rx_wqe_cyc *wqe = + xsc_wq_cyc_get_wqe(&prq->wqe.wq, i); + + for (f = 0; f < prq->wqe.info.num_frags; f++) { + u32 frag_size = prq->wqe.info.arr[f].frag_size; + + wqe->data[f].seg_len = cpu_to_le32(frag_size); + wqe->data[f].mkey = cpu_to_le32(XSC_INVALID_LKEY); + } + + for (; f < prq->wqe.info.frags_max_num; f++) { + wqe->data[f].seg_len = 0; + wqe->data[f].mkey = cpu_to_le32(XSC_INVALID_LKEY); + wqe->data[f].va = 0; + } + } + + prq->post_wqes = xsc_eth_post_rx_wqes; + prq->handle_rx_cqe = xsc_eth_handle_rx_cqe; + prq->dealloc_wqe = xsc_eth_dealloc_rx_wqe; + prq->wqe.skb_from_cqe = xsc_rx_is_linear_skb(adapter->nic_param.mtu) ? + xsc_skb_from_cqe_linear : + xsc_skb_from_cqe_nonlinear; + prq->ix = c->chl_idx; + prq->frags_sz = adapter->nic_param.rq_frags_size; + + if (adapter->nic_param.rx_dim_enabled) { + INIT_WORK(&prq->dim_obj.dim.work, xsc_rx_dim_work); + prq->dim_obj.dim.mode = + adapter->nic_param.rx_cq_moderation.cq_period_mode; + hrtimer_init(&prq->cq.cq_reduce.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + prq->cq.cq_reduce.timer.function = xsc_dim_reduce_timer_fn; + set_bit(XSC_ETH_RQ_STATE_AM, &prq->state); + } + + return 0; + +err_create_pool: + xsc_eth_free_di_list(prq); +err_init_di: + kvfree(prq->wqe.frags); +err_alloc_frags: + xsc_eth_wq_destroy(&prq->wq_ctrl); + return ret; +} + +#ifdef XSC_RSS_SUPPORT +static int xsc_eth_open_rss_qp_rqs(struct xsc_adapter *adapter, + struct xsc_rq_param *prq_param, + struct xsc_eth_channels *chls, + unsigned int num_chl) +{ + int ret = 0, err = 0; + struct xsc_create_multiqp_mbox_in *in; + struct xsc_create_qp_request *req; + u8 q_log_size = prq_param->rq_attr.q_log_size; + int paslen = 0; + struct xsc_rq *prq; + struct xsc_channel *c; + int rqn_base; + int inlen; + int entry_len; + int i, j, n; + int hw_npages; + + for (i = 0; i < num_chl; i++) { + c = &chls->c[i]; + + for (j = 0; j < c->qp.rq_num; j++) { + prq = &c->qp.rq[j]; + ret = xsc_eth_alloc_rq(c, prq, prq_param); + if (ret) + goto err_alloc_rqs; + + hw_npages = DIV_ROUND_UP(prq->wq_ctrl.buf.size, PAGE_SIZE_4K); + /*support different npages number smoothly*/ + entry_len = sizeof(struct xsc_create_qp_request) + + sizeof(__be64) * hw_npages; + + paslen += entry_len; + } + } + + inlen = sizeof(struct xsc_create_multiqp_mbox_in) + paslen; + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) { + ret = -ENOMEM; + goto err_create_rss_rqs; + } + + in->qp_num = cpu_to_be16(num_chl); + in->qp_type = XSC_QUEUE_TYPE_RAW; + in->req_len = cpu_to_be32(inlen); + + req = (struct xsc_create_qp_request *)&in->data[0]; + n = 0; + for (i = 0; i < num_chl; i++) { + c = &chls->c[i]; + for (j = 0; j < c->qp.rq_num; j++) { + prq = &c->qp.rq[j]; + + hw_npages = DIV_ROUND_UP(prq->wq_ctrl.buf.size, PAGE_SIZE_4K); + /* no use for eth */ + req->input_qpn = cpu_to_be16(0); + req->qp_type = XSC_QUEUE_TYPE_RAW; + req->log_rq_sz = ilog2(adapter->xdev->caps.recv_ds_num) + + q_log_size; + req->pa_num = cpu_to_be16(hw_npages); + req->cqn_recv = cpu_to_be16(prq->cq.xcq.cqn); + req->cqn_send = req->cqn_recv; + req->glb_funcid = cpu_to_be16(adapter->xdev->glb_func_id); + + xsc_fill_page_frag_array(&prq->wq_ctrl.buf, &req->pas[0], hw_npages); + n++; + req = (struct xsc_create_qp_request *)(&in->data[0] + entry_len * n); + } + } + + ret = xsc_eth_create_rss_qp_rqs(adapter->xdev, in, inlen, &rqn_base); + kvfree(in); + if (ret) + goto err_create_rss_rqs; + + n = 0; + for (i = 0; i < num_chl; i++) { + c = &chls->c[i]; + for (j = 0; j < c->qp.rq_num; j++) { + prq = &c->qp.rq[j]; + prq->rqn = rqn_base + n; + prq->cqp.qpn = prq->rqn; + prq->cqp.event = xsc_eth_qp_event; + prq->cqp.eth_queue_type = XSC_RES_RQ; + ret = create_resource_common(adapter->xdev, &prq->cqp); + if (ret) { + err = ret; + xsc_core_err(adapter->xdev, + "create resource common error qp:%d errno:%d\n", + prq->rqn, ret); + continue; + } + + n++; + } + } + if (err) + return err; + + adapter->channels.rqn_base = rqn_base; + xsc_core_info(adapter->xdev, "rqn_base=%d, rq_num=%d, state=0x%lx\n", + rqn_base, num_chl, prq->state); + return 0; + +err_create_rss_rqs: + i = num_chl; +err_alloc_rqs: + for (--i; i >= 0; i--) { + c = &chls->c[i]; + for (j = 0; j < c->qp.rq_num; j++) { + prq = &c->qp.rq[j]; + xsc_free_qp_rq(prq); + } + } + return ret; +} + +#else +static int xsc_eth_open_qp_rq(struct xsc_channel *c, + struct xsc_rq *prq, + struct xsc_rq_param *prq_param, + u32 rq_idx) +{ + struct xsc_adapter *adapter = c->adapter; + struct xsc_core_device *xdev = adapter->xdev; + u8 q_log_size = prq_param->rq_attr.q_log_size; + struct xsc_create_qp_mbox_in *in; + int hw_npages; + int inlen; + int ret = 0; + + ret = xsc_eth_alloc_rq(c, prq, prq_param); + if (ret) + goto out; + + hw_npages = DIV_ROUND_UP(prq->wq_ctrl.buf.size, PAGE_SIZE_4K); + inlen = sizeof(struct xsc_create_qp_mbox_in) + + sizeof(__be64) * hw_npages; + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) { + ret = -ENOMEM; + goto err_alloc_rq; + } + + in->req.input_qpn = cpu_to_be16(XSC_QPN_RQN_STUB); /*no use for eth*/ + in->req.qp_type = XSC_QUEUE_TYPE_RAW; + in->req.log_rq_sz = ilog2(xdev->caps.recv_ds_num) + q_log_size; + in->req.pa_num = cpu_to_be16(hw_npages); + in->req.cqn_recv = cpu_to_be16(prq->cq.xcq.cqn); + in->req.cqn_send = in->req.cqn_recv; + in->req.glb_funcid = cpu_to_be16(xdev->glb_func_id); + + xsc_fill_page_frag_array(&prq->wq_ctrl.buf, &in->req.pas[0], hw_npages); + + ret = xsc_eth_create_qp_rq(xdev, prq, in, inlen); + if (ret) + goto err_create_rq; + + prq->cqp.qpn = prq->rqn; + prq->cqp.event = xsc_eth_qp_event; + prq->cqp.eth_queue_type = XSC_RES_RQ; + + ret = create_resource_common(xdev, &prq->cqp); + if (ret) { + xsc_core_err(xdev, "failed to init rqn%d, err=%d\n", + prq->rqn, ret); + goto err_destroy_rq; + } + + xsc_core_info(c->adapter->xdev, "rqn=%d ch_num=%d state=0x%llx\n", + prq->rqn, c->chl_idx, prq->state); + + kvfree(in); + + return 0; + +err_destroy_rq: + xsc_eth_destroy_qp_rq(xdev, prq); +err_create_rq: + kvfree(in); +err_alloc_rq: + xsc_free_qp_rq(prq); +out: + return ret; +} +#endif + +static int xsc_eth_close_qp_rq(struct xsc_channel *c, struct xsc_rq *prq) +{ + int ret; + struct xsc_core_device *xdev = c->adapter->xdev; + + destroy_resource_common(xdev, &prq->cqp); + + ret = xsc_eth_destroy_qp_rq(xdev, prq); + if (ret) + return ret; + + xsc_eth_free_rx_wqe(prq); + xsc_free_qp_rq(prq); + + return 0; +} + +static int xsc_eth_open_qp_sq(struct xsc_channel *c, + struct xsc_sq *psq, + struct xsc_sq_param *psq_param, + u32 sq_idx) +{ + struct xsc_adapter *adapter = c->adapter; + struct xsc_core_device *xdev = adapter->xdev; + u8 q_log_size = psq_param->sq_attr.q_log_size; + u8 ele_log_size = psq_param->sq_attr.ele_log_size; + struct xsc_stats *stats = adapter->stats; + struct xsc_channel_stats *channel_stats = + &stats->channel_stats[c->chl_idx]; + struct xsc_create_qp_mbox_in *in; + struct xsc_modify_raw_qp_mbox_in *modify_in; + int hw_npages; + int inlen; + int ret; + + psq->stats = &channel_stats->sq[sq_idx]; + psq_param->wq.db_numa_node = cpu_to_node(c->cpu); + + ret = xsc_eth_wq_cyc_create(xdev, &psq_param->wq, + q_log_size, ele_log_size, &psq->wq, + &psq->wq_ctrl); + if (ret) + return ret; + + hw_npages = DIV_ROUND_UP(psq->wq_ctrl.buf.size, PAGE_SIZE_4K); + inlen = sizeof(struct xsc_create_qp_mbox_in) + + sizeof(__be64) * hw_npages; + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) { + ret = -ENOMEM; + goto err_sq_wq_destroy; + } + in->req.input_qpn = cpu_to_be16(XSC_QPN_SQN_STUB); /*no use for eth*/ + in->req.qp_type = XSC_QUEUE_TYPE_RAW_TSO; /*default sq is tso qp*/ + in->req.log_sq_sz = ilog2(xdev->caps.send_ds_num) + q_log_size; + in->req.pa_num = cpu_to_be16(hw_npages); + in->req.cqn_send = cpu_to_be16(psq->cq.xcq.cqn); + in->req.cqn_recv = in->req.cqn_send; + in->req.glb_funcid = cpu_to_be16(xdev->glb_func_id); + + xsc_fill_page_frag_array(&psq->wq_ctrl.buf, + &in->req.pas[0], hw_npages); + + ret = xsc_eth_create_qp_sq(xdev, psq, in, inlen); + if (ret) + goto err_sq_in_destroy; + + psq->cqp.qpn = psq->sqn; + psq->cqp.event = xsc_eth_qp_event; + psq->cqp.eth_queue_type = XSC_RES_SQ; + + ret = create_resource_common(xdev, &psq->cqp); + if (ret) { + xsc_core_err(xdev, "%s:error qp:%d errno:%d\n", + __func__, psq->sqn, ret); + goto err_sq_destroy; + } + + psq->channel = c; + psq->ch_ix = c->chl_idx; + psq->txq_ix = psq->ch_ix + sq_idx * adapter->channels.num_chl; + + /*need to querify from hardware*/ + psq->hw_mtu = XSC_ETH_HW_MTU_SEND; + psq->stop_room = 1; + + ret = xsc_eth_alloc_qp_sq_db(psq, psq_param->wq.db_numa_node); + if (ret) + goto err_sq_common_destroy; + + inlen = sizeof(struct xsc_modify_raw_qp_mbox_in); + modify_in = kvzalloc(inlen, GFP_KERNEL); + if (!modify_in) { + ret = -ENOMEM; + goto err_sq_common_destroy; + } + + modify_in->req.qp_out_port = xdev->pf_id; + modify_in->pcie_no = xdev->pcie_no; + modify_in->req.qpn = cpu_to_be16((u16)(psq->sqn)); + modify_in->req.func_id = cpu_to_be16(xdev->glb_func_id); + modify_in->req.dma_direct = DMA_DIR_TO_MAC; + modify_in->req.prio = sq_idx; + ret = xsc_eth_modify_qp_sq(xdev, modify_in); + if (ret) + goto err_sq_modify_in_destroy; + + kvfree(modify_in); + kvfree(in); + + if (adapter->nic_param.tx_dim_enabled) { + INIT_WORK(&psq->dim_obj.dim.work, xsc_tx_dim_work); + psq->dim_obj.dim.mode = adapter->nic_param.tx_cq_moderation.cq_period_mode; + hrtimer_init(&psq->cq.cq_reduce.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + psq->cq.cq_reduce.timer.function = xsc_dim_reduce_timer_fn; + set_bit(XSC_ETH_SQ_STATE_AM, &psq->state); + } + + xsc_core_info(c->adapter->xdev, + "open sq ok, ch%d_sq%d_qpn=%d, state=0x%lx, db_numa=%d, buf_numa=%d\n", + c->chl_idx, sq_idx, psq->sqn, psq->state, + psq_param->wq.db_numa_node, psq_param->wq.buf_numa_node); + + return 0; + +err_sq_modify_in_destroy: + kvfree(modify_in); + +err_sq_common_destroy: + destroy_resource_common(xdev, &psq->cqp); + +err_sq_destroy: + xsc_eth_destroy_qp_sq(xdev, psq); + +err_sq_in_destroy: + kvfree(in); + +err_sq_wq_destroy: + xsc_eth_wq_destroy(&psq->wq_ctrl); + return ret; +} + +static int xsc_eth_close_qp_sq(struct xsc_channel *c, struct xsc_sq *psq) +{ + struct xsc_core_device *xdev = c->adapter->xdev; + int ret; + + destroy_resource_common(xdev, &psq->cqp); + + ret = xsc_eth_destroy_qp_sq(xdev, psq); + if (ret) + return ret; + + xsc_free_tx_wqe(c->adapter->dev, psq); + xsc_free_qp_sq(psq); + + return 0; +} + +int xsc_eth_open_channel(struct xsc_adapter *adapter, + int idx, + struct xsc_channel *c, + struct xsc_channel_param *chl_param) +{ + int ret = 0; + struct net_device *netdev = adapter->netdev; + struct xsc_stats *stats = adapter->stats; + struct xsc_core_device *xdev = adapter->xdev; + int i, j, eqn, irqn; + const struct cpumask *aff; + + c->adapter = adapter; + c->netdev = adapter->netdev; + c->chl_idx = idx; + c->num_tc = adapter->nic_param.num_tc; + c->stats = &stats->channel_stats[idx].ch; + + /*1rq per channel, and may have multi sqs per channel*/ + c->qp.rq_num = 1; + c->qp.sq_num = c->num_tc; + + if (xdev->caps.msix_enable) { + ret = xsc_vector2eqn(xdev, c->chl_idx, &eqn, &irqn); + if (ret) + goto err; + aff = irq_get_affinity_mask(irqn); + c->aff_mask = aff; + c->cpu = cpumask_first(aff); + } + + if (c->qp.sq_num > XSC_MAX_NUM_TC || c->qp.rq_num > XSC_MAX_NUM_TC) { + ret = -EINVAL; + goto err; + } + + for (i = 0; i < c->qp.rq_num; i++) { + ret = xsc_eth_open_cq(c, &c->qp.rq[i].cq, &chl_param->rqcq_param); + if (ret) { + j = i - 1; + goto err_open_rq_cq; + } + } + + for (i = 0; i < c->qp.sq_num; i++) { + ret = xsc_eth_open_cq(c, &c->qp.sq[i].cq, &chl_param->sqcq_param); + if (ret) { + j = i - 1; + goto err_open_sq_cq; + } + } + +#ifndef XSC_RSS_SUPPORT + for (i = 0; i < c->qp.rq_num; i++) { + ret = xsc_eth_open_qp_rq(c, &c->qp.rq[i], &chl_param->rq_param, i); + if (ret) { + j = i - 1; + goto err_open_rq; + } + } +#endif + + for (i = 0; i < c->qp.sq_num; i++) { + ret = xsc_eth_open_qp_sq(c, &c->qp.sq[i], &chl_param->sq_param, i); + if (ret) { + j = i - 1; + goto err_open_sq; + } + } + + netif_napi_add(netdev, &c->napi, xsc_eth_napi_poll); + + xsc_core_dbg(adapter->xdev, "open channel%d ok\n", idx); + return 0; + +err_open_sq: + for (; j >= 0; j--) + xsc_eth_close_qp_sq(c, &c->qp.sq[j]); + j = (c->qp.rq_num - 1); +#ifndef XSC_RSS_SUPPORT +err_open_rq: + for (; j >= 0; j--) + xsc_eth_close_qp_rq(c, &c->qp.rq[j]); + j = (c->qp.sq_num - 1); +#endif +err_open_sq_cq: + for (; j >= 0; j--) + xsc_eth_close_cq(c, &c->qp.sq[j].cq); + j = (c->qp.rq_num - 1); +err_open_rq_cq: + for (; j >= 0; j--) + xsc_eth_close_cq(c, &c->qp.rq[j].cq); +err: + xsc_core_warn(adapter->xdev, + "failed to open channel: ch%d, sq_num=%d, rq_num=%d, err=%d\n", + idx, c->qp.sq_num, c->qp.rq_num, ret); + return ret; +} + +static u32 xsc_get_rq_frag_info(struct xsc_rq_frags_info *frags_info, u32 mtu) +{ + u32 byte_count = XSC_SW2HW_FRAG_SIZE(mtu); + int frag_stride; + int i = 0; + + if (xsc_rx_is_linear_skb(mtu)) { + frag_stride = xsc_rx_get_linear_frag_sz(mtu); + frag_stride = roundup_pow_of_two(frag_stride); + + frags_info->arr[0].frag_size = byte_count; + frags_info->arr[0].frag_stride = frag_stride; + frags_info->num_frags = 1; + frags_info->wqe_bulk = PAGE_SIZE / frag_stride; + frags_info->wqe_bulk_min = frags_info->wqe_bulk; + goto out; + } + + if (byte_count <= DEFAULT_FRAG_SIZE) { + frags_info->arr[0].frag_size = DEFAULT_FRAG_SIZE; + frags_info->arr[0].frag_stride = DEFAULT_FRAG_SIZE; + frags_info->num_frags = 1; + } else if (byte_count <= PAGE_SIZE_4K) { + frags_info->arr[0].frag_size = PAGE_SIZE_4K; + frags_info->arr[0].frag_stride = PAGE_SIZE_4K; + frags_info->num_frags = 1; + } else if (byte_count <= (PAGE_SIZE_4K + DEFAULT_FRAG_SIZE)) { + if (PAGE_SIZE < 2 * PAGE_SIZE_4K) { + frags_info->arr[0].frag_size = PAGE_SIZE_4K; + frags_info->arr[0].frag_stride = PAGE_SIZE_4K; + frags_info->arr[1].frag_size = PAGE_SIZE_4K; + frags_info->arr[1].frag_stride = PAGE_SIZE_4K; + frags_info->num_frags = 2; + } else { + frags_info->arr[0].frag_size = 2 * PAGE_SIZE_4K; + frags_info->arr[0].frag_stride = 2 * PAGE_SIZE_4K; + frags_info->num_frags = 1; + } + } else if (byte_count <= 2 * PAGE_SIZE_4K) { + if (PAGE_SIZE < 2 * PAGE_SIZE_4K) { + frags_info->arr[0].frag_size = PAGE_SIZE_4K; + frags_info->arr[0].frag_stride = PAGE_SIZE_4K; + frags_info->arr[1].frag_size = PAGE_SIZE_4K; + frags_info->arr[1].frag_stride = PAGE_SIZE_4K; + frags_info->num_frags = 2; + } else { + frags_info->arr[0].frag_size = 2 * PAGE_SIZE_4K; + frags_info->arr[0].frag_stride = 2 * PAGE_SIZE_4K; + frags_info->num_frags = 1; + } + } else { + if (PAGE_SIZE < 4 * PAGE_SIZE_4K) { + frags_info->num_frags = roundup(byte_count, PAGE_SIZE_4K) / PAGE_SIZE_4K; + for (i = 0; i < frags_info->num_frags; i++) { + frags_info->arr[i].frag_size = PAGE_SIZE_4K; + frags_info->arr[i].frag_stride = PAGE_SIZE_4K; + } + } else { + frags_info->arr[0].frag_size = 4 * PAGE_SIZE_4K; + frags_info->arr[0].frag_stride = 4 * PAGE_SIZE_4K; + frags_info->num_frags = 1; + } + } + + if (PAGE_SIZE <= PAGE_SIZE_4K) { + frags_info->wqe_bulk_min = 4; + frags_info->wqe_bulk = max_t(u8, frags_info->wqe_bulk_min, 8); + } else if (PAGE_SIZE <= 2 * PAGE_SIZE_4K) { + frags_info->wqe_bulk = 2; + frags_info->wqe_bulk_min = frags_info->wqe_bulk; + } else { + frags_info->wqe_bulk = + PAGE_SIZE / (frags_info->num_frags * frags_info->arr[0].frag_size); + frags_info->wqe_bulk_min = frags_info->wqe_bulk; + } + +out: + frags_info->log_num_frags = order_base_2(frags_info->num_frags); + + return frags_info->num_frags * frags_info->arr[0].frag_size; +} + +static void xsc_build_rq_frags_info(struct xsc_queue_attr *attr, + struct xsc_rq_frags_info *frags_info, + struct xsc_eth_params *params) +{ + params->rq_frags_size = xsc_get_rq_frag_info(frags_info, params->mtu); + frags_info->frags_max_num = attr->ele_size / XSC_RECV_WQE_DS; +} + +static void xsc_eth_build_channel_param(struct xsc_adapter *adapter, + struct xsc_channel_param *chl_param) +{ + xsc_eth_build_queue_param(adapter, &chl_param->rqcq_param.cq_attr, + XSC_QUEUE_TYPE_RQCQ); + chl_param->rqcq_param.wq.buf_numa_node = dev_to_node(adapter->dev); + + xsc_eth_build_queue_param(adapter, &chl_param->sqcq_param.cq_attr, + XSC_QUEUE_TYPE_SQCQ); + chl_param->sqcq_param.wq.buf_numa_node = dev_to_node(adapter->dev); + + xsc_eth_build_queue_param(adapter, &chl_param->sq_param.sq_attr, + XSC_QUEUE_TYPE_SQ); + chl_param->sq_param.wq.buf_numa_node = dev_to_node(adapter->dev); + + xsc_eth_build_queue_param(adapter, &chl_param->rq_param.rq_attr, + XSC_QUEUE_TYPE_RQ); + chl_param->rq_param.wq.buf_numa_node = dev_to_node(adapter->dev); + + xsc_build_rq_frags_info(&chl_param->rq_param.rq_attr, + &chl_param->rq_param.frags_info, + &adapter->nic_param); +} + +int xsc_eth_open_channels(struct xsc_adapter *adapter) +{ + int ret = 0; + int i; + struct xsc_channel_param *chl_param; + struct xsc_eth_channels *chls = &adapter->channels; + struct xsc_core_device *xdev = adapter->xdev; + bool free_rq = false; + + chls->num_chl = adapter->nic_param.num_channels; + chls->c = kcalloc_node(chls->num_chl, sizeof(struct xsc_channel), + GFP_KERNEL, xdev->priv.numa_node); + if (!chls->c) { + ret = -ENOMEM; + goto err; + } + + chl_param = kvzalloc(sizeof(*chl_param), GFP_KERNEL); + if (!chl_param) { + ret = -ENOMEM; + goto err_free_ch; + } + + xsc_eth_build_channel_param(adapter, chl_param); + + for (i = 0; i < chls->num_chl; i++) { + ret = xsc_eth_open_channel(adapter, i, &chls->c[i], chl_param); + if (ret) + goto err_open_channel; +#ifndef XSC_RSS_SUPPORT + free_rq = true; +#endif + } + +#ifdef XSC_RSS_SUPPORT + ret = xsc_eth_open_rss_qp_rqs(adapter, &chl_param->rq_param, chls, chls->num_chl); + if (ret) + goto err_open_channel; + free_rq = true; +#endif + + for (i = 0; i < chls->num_chl; i++) + napi_enable(&chls->c[i].napi); + + /* flush cache to memory before interrupt and napi_poll running */ + smp_wmb(); + + ret = xsc_eth_modify_qps(adapter, chls); + if (ret) + goto err_modify_qps; + + kvfree(chl_param); + xsc_core_info(adapter->xdev, "open %d channels ok\n", chls->num_chl); + return 0; + +err_modify_qps: + i = chls->num_chl; +err_open_channel: + for (--i; i >= 0; i--) + xsc_eth_close_channel(&chls->c[i], free_rq); + + kvfree(chl_param); +err_free_ch: + kfree(chls->c); +err: + chls->num_chl = 0; + xsc_core_warn(adapter->xdev, "failed to open %d channels, err=%d\n", + chls->num_chl, ret); + return ret; +} + +static void xsc_eth_activate_txqsq(struct xsc_channel *c) +{ + int tc = c->num_tc; + struct xsc_sq *psq; + + for (tc = 0; tc < c->num_tc; tc++) { + psq = &c->qp.sq[tc]; + psq->txq = netdev_get_tx_queue(psq->channel->netdev, psq->txq_ix); + set_bit(XSC_ETH_SQ_STATE_ENABLED, &psq->state); + netdev_tx_reset_queue(psq->txq); + netif_tx_start_queue(psq->txq); + } +} + +static void xsc_eth_deactivate_txqsq(struct xsc_channel *c) +{ + int tc = c->num_tc; + struct xsc_sq *psq; + + for (tc = 0; tc < c->num_tc; tc++) { + psq = &c->qp.sq[tc]; + clear_bit(XSC_ETH_SQ_STATE_ENABLED, &psq->state); + } +} + +static void xsc_activate_rq(struct xsc_channel *c) +{ + int i; + + for (i = 0; i < c->qp.rq_num; i++) + set_bit(XSC_ETH_RQ_STATE_ENABLED, &c->qp.rq[i].state); +} + +static void xsc_deactivate_rq(struct xsc_channel *c) +{ + int i; + + for (i = 0; i < c->qp.rq_num; i++) + clear_bit(XSC_ETH_RQ_STATE_ENABLED, &c->qp.rq[i].state); +} + +void xsc_eth_activate_channel(struct xsc_channel *c) +{ + xsc_eth_activate_txqsq(c); + xsc_activate_rq(c); +} + +void xsc_eth_deactivate_channel(struct xsc_channel *c) +{ + xsc_deactivate_rq(c); + xsc_eth_deactivate_txqsq(c); +} + +static void xsc_eth_activate_channels(struct xsc_eth_channels *chs) +{ + int i; + + for (i = 0; i < chs->num_chl; i++) + xsc_eth_activate_channel(&chs->c[i]); +} + +static void xsc_eth_deactivate_channels(struct xsc_eth_channels *chs) +{ + int i; + + for (i = 0; i < chs->num_chl; i++) + xsc_eth_deactivate_channel(&chs->c[i]); + + /* Sync with all NAPIs to wait until they stop using queues. */ + synchronize_net(); + + for (i = 0; i < chs->num_chl; i++) + /* last doorbell out */ + napi_disable(&chs->c[i].napi); +} + +static void xsc_eth_build_tx2sq_maps(struct xsc_adapter *adapter) +{ + struct xsc_channel *c; + struct xsc_sq *psq; + int i, tc; + + for (i = 0; i < adapter->channels.num_chl; i++) { + c = &adapter->channels.c[i]; + for (tc = 0; tc < c->num_tc; tc++) { + psq = &c->qp.sq[tc]; + adapter->txq2sq[psq->txq_ix] = psq; + adapter->channel_tc2realtxq[i][tc] = + i + tc * adapter->channels.num_chl; + } + } +} + +void xsc_eth_activate_priv_channels(struct xsc_adapter *adapter) +{ + int num_txqs; + struct net_device *netdev = adapter->netdev; + + num_txqs = adapter->channels.num_chl * adapter->nic_param.num_tc; + xsc_netdev_set_tcs(adapter, adapter->channels.num_chl, adapter->nic_param.num_tc); + netif_set_real_num_tx_queues(netdev, num_txqs); + netif_set_real_num_rx_queues(netdev, adapter->channels.num_chl); + + xsc_eth_build_tx2sq_maps(adapter); + xsc_eth_activate_channels(&adapter->channels); + netif_tx_start_all_queues(adapter->netdev); +} + +void xsc_eth_deactivate_priv_channels(struct xsc_adapter *adapter) +{ + netif_tx_disable(adapter->netdev); + xsc_eth_deactivate_channels(&adapter->channels); +} + +static int xsc_eth_sw_init(struct xsc_adapter *adapter) +{ + int ret; + + ret = xsc_eth_open_channels(adapter); + if (ret) + return ret; + + xsc_eth_activate_priv_channels(adapter); + + return 0; +} + +static void xsc_eth_close_channel(struct xsc_channel *c, bool free_rq) +{ + int i; + + for (i = 0; i < c->qp.rq_num; i++) { + if (free_rq) + xsc_eth_close_qp_rq(c, &c->qp.rq[i]); + xsc_eth_close_cq(c, &c->qp.rq[i].cq); + memset(&c->qp.rq[i], 0, sizeof(struct xsc_rq)); + } + + for (i = 0; i < c->qp.sq_num; i++) { + xsc_eth_close_qp_sq(c, &c->qp.sq[i]); + xsc_eth_close_cq(c, &c->qp.sq[i].cq); + } + + netif_napi_del(&c->napi); +} + +static void xsc_eth_close_channels(struct xsc_adapter *adapter) +{ + int i; + struct xsc_channel *c = NULL; + + for (i = 0; i < adapter->channels.num_chl; i++) { + c = &adapter->channels.c[i]; + xsc_core_dbg(adapter->xdev, "start to close channel%d\n", c->chl_idx); + + xsc_eth_close_channel(c, true); + } + + kfree(adapter->channels.c); + adapter->channels.num_chl = 0; +} + +static void xsc_eth_sw_deinit(struct xsc_adapter *adapter) +{ + xsc_eth_deactivate_priv_channels(adapter); + + return xsc_eth_close_channels(adapter); +} + +int xsc_eth_set_led_status(int id, struct xsc_adapter *adapter) +{ + int err; + + struct xsc_event_set_led_status_mbox_in in; + struct xsc_event_set_led_status_mbox_out out; + + /*query linkstatus cmd*/ + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_SET_LED_STATUS); + in.port_id = id; + + err = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.status) { + xsc_core_err(adapter->xdev, "failed to set led to %d, err=%d, status=%d\n", + id, err, out.status); + return -1; + } + + return 0; +} + +bool xsc_eth_get_link_status(struct xsc_adapter *adapter) +{ + bool link_up; + struct xsc_core_device *xdev = adapter->xdev; + u16 vport = xsc_core_is_pf(xdev) ? 0 : (xdev->vf_id + 1); + + link_up = xsc_query_vport_state(xdev, XSC_CMD_OP_QUERY_VPORT_STATE, vport); + + xsc_core_dbg(adapter->xdev, "link_status=%d\n", link_up); + + return link_up ? true : false; +} + +int xsc_eth_get_link_info(struct xsc_adapter *adapter, + struct xsc_event_linkinfo *plinkinfo) +{ + struct xsc_event_query_linkinfo_mbox_in in; + struct xsc_event_query_linkinfo_mbox_out out; + int i, err; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_LINK_INFO); + + err = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "failed to get link info, err=%d, status=%d\n", + err, out.hdr.status); + return -ENOEXEC; + } + + memcpy(plinkinfo, &out.ctx, sizeof(*plinkinfo)); + + plinkinfo->linkspeed = be32_to_cpu(plinkinfo->linkspeed); + plinkinfo->supported = be64_to_cpu(plinkinfo->supported); + plinkinfo->advertising = be64_to_cpu(plinkinfo->advertising); + for (i = 0; i < ARRAY_SIZE(plinkinfo->supported_speed); i++) { + plinkinfo->supported_speed[i] = be64_to_cpu(plinkinfo->supported_speed[i]); + plinkinfo->advertising_speed[i] = be64_to_cpu(plinkinfo->advertising_speed[i]); + } + + return 0; +} + +int xsc_eth_set_link_info(struct xsc_adapter *adapter, + struct xsc_event_linkinfo *plinkinfo) +{ + struct xsc_event_modify_linkinfo_mbox_in in; + struct xsc_event_modify_linkinfo_mbox_out out; + int err = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_LINK_INFO); + memcpy(&in.ctx, plinkinfo, sizeof(*plinkinfo)); + + err = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "failed to set link info, err=%d, status=%d\n", + err, out.hdr.status); + return -ENOEXEC; + } + + return err; +} + +int xsc_get_link_speed(struct xsc_core_device *dev) +{ + struct xsc_adapter *adapter = netdev_priv(dev->netdev); + struct xsc_event_linkinfo linkinfo; + + if (xsc_eth_get_link_info(adapter, &linkinfo)) { + xsc_core_err(adapter->xdev, "fail to get linkspeed, return 25G\n"); + return MODULE_SPEED_25G; + } + + return linkinfo.linkspeed; +} +EXPORT_SYMBOL(xsc_get_link_speed); + +#if defined(MSIX_SUPPORT) +int xsc_eth_change_link_status(struct xsc_adapter *adapter) +{ + bool link_up; + + link_up = xsc_eth_get_link_status(adapter); + + if (link_up && !netif_carrier_ok(adapter->netdev)) { + netdev_info(adapter->netdev, "Link up\n"); + netif_carrier_on(adapter->netdev); + } else if (!link_up && netif_carrier_ok(adapter->netdev)) { + netdev_info(adapter->netdev, "Link down\n"); + netif_carrier_off(adapter->netdev); + } + + return 0; +} + +static void xsc_eth_event_work(struct work_struct *work) +{ + int err; + struct xsc_event_query_type_mbox_in in; + struct xsc_event_query_type_mbox_out out; + struct xsc_adapter *adapter = container_of(work, struct xsc_adapter, event_work); + + if (adapter->status != XSCALE_ETH_DRIVER_OK) + return; + + /*query cmd_type cmd*/ + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_EVENT_TYPE); + + err = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "failed to query event type, err=%d, stats=%d\n", + err, out.hdr.status); + goto failed; + } + + switch (out.ctx.resp_cmd_type) { + case XSC_CMD_EVENT_RESP_CHANGE_LINK: + err = xsc_eth_change_link_status(adapter); + if (err) { + xsc_core_err(adapter->xdev, "failed to change linkstatus, err=%d\n", err); + goto failed; + } + + xsc_core_dbg(adapter->xdev, "event cmdtype=%04x\n", out.ctx.resp_cmd_type); + break; + case XSC_CMD_EVENT_RESP_TEMP_WARN: + xsc_core_warn(adapter->xdev, "[Minor]nic chip temperature high warning\n"); + break; + case XSC_CMD_EVENT_RESP_OVER_TEMP_PROTECTION: + xsc_core_warn(adapter->xdev, "[Critical]nic chip was over-temperature\n"); + break; + default: + xsc_core_info(adapter->xdev, "unknown event cmdtype=%04x\n", + out.ctx.resp_cmd_type); + break; + } + +failed: + return; +} + +void xsc_eth_event_handler(void *arg) +{ + struct xsc_adapter *adapter = (struct xsc_adapter *)arg; + + queue_work(adapter->workq, &adapter->event_work); +} +#endif + +int xsc_eth_enable_nic_hca(struct xsc_adapter *adapter) +{ + struct xsc_core_device *xdev = adapter->xdev; + struct net_device *netdev = adapter->netdev; + struct xsc_cmd_enable_nic_hca_mbox_in in = {}; + struct xsc_cmd_enable_nic_hca_mbox_out out = {}; + u16 caps = 0; + u16 caps_mask = 0; + int err; + + if (xsc_get_user_mode(xdev)) + return 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ENABLE_NIC_HCA); + +#ifdef XSC_RSS_SUPPORT + in.rss.rss_en = 1; + in.rss.rqn_base = cpu_to_be16(adapter->channels.rqn_base - + xdev->caps.raweth_rss_qp_id_base); + in.rss.rqn_num = cpu_to_be16(adapter->channels.num_chl); + in.rss.hash_tmpl = cpu_to_be32(adapter->rss_params.rss_hash_tmpl); + in.rss.hfunc = hash_func_type(adapter->rss_params.hfunc); +#else + in.rss.rss_en = 0; + if (adapter->channels.c) + in.rss.rqn_base = cpu_to_be16(adapter->channels.c[0].qp.rq[0].rqn - + xdev->caps.raweth_rss_qp_id_base); +#endif + caps_mask |= BIT(XSC_TBM_CAP_RSS); + + if (netdev->features & NETIF_F_RXCSUM) + caps |= BIT(XSC_TBM_CAP_HASH_PPH); + caps_mask |= BIT(XSC_TBM_CAP_HASH_PPH); + + if (xsc_get_pp_bypass_res(adapter->xdev, false)) + caps |= BIT(XSC_TBM_CAP_PP_BYPASS); + caps_mask |= BIT(XSC_TBM_CAP_PP_BYPASS); + + if (xsc_get_pct_drop_config(xdev) && !(netdev->flags & IFF_SLAVE)) + caps |= BIT(XSC_TBM_CAP_PCT_DROP_CONFIG); + caps_mask |= BIT(XSC_TBM_CAP_PCT_DROP_CONFIG); + + memcpy(in.nic.mac_addr, netdev->dev_addr, ETH_ALEN); + + in.nic.caps = cpu_to_be16(caps); + in.nic.caps_mask = cpu_to_be16(caps_mask); + + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(xdev, "failed!! err=%d, status=%d\n", err, out.hdr.status); + return -ENOEXEC; + } + + xsc_core_info(xdev, "caps=0x%x, caps_mask=0x%x\n", caps, caps_mask); + + return 0; +} + +int xsc_eth_restore_nic_hca(struct xsc_core_device *dev) +{ + return xsc_eth_enable_nic_hca((struct xsc_adapter *)dev->eth_priv); +} +EXPORT_SYMBOL(xsc_eth_restore_nic_hca); + +int xsc_eth_disable_nic_hca(struct xsc_adapter *adapter) +{ + struct xsc_core_device *xdev = adapter->xdev; + struct net_device *netdev = adapter->netdev; + struct xsc_cmd_disable_nic_hca_mbox_in in = {}; + struct xsc_cmd_disable_nic_hca_mbox_out out = {}; + int err; + u16 caps = 0; + + if (xsc_get_user_mode(xdev)) + return 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DISABLE_NIC_HCA); + + if (xsc_get_pp_bypass_res(adapter->xdev, false)) + caps |= BIT(XSC_TBM_CAP_PP_BYPASS); + + if (xsc_get_pct_drop_config(xdev) && !(netdev->priv_flags & IFF_BONDING)) + caps |= BIT(XSC_TBM_CAP_PCT_DROP_CONFIG); + + in.nic.caps = cpu_to_be16(caps); + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(xdev, "failed!! err=%d, status=%d\n", err, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} + +void xsc_eth_rss_params_change(struct xsc_adapter *adapter, u32 change, void *modify) +{ + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_rss_params *rss = &adapter->rss_params; + struct xsc_eth_params *params = &adapter->nic_param; + struct xsc_cmd_modify_nic_hca_mbox_in *in = + (struct xsc_cmd_modify_nic_hca_mbox_in *)modify; + u32 hash_field = 0; + int key_len; + u8 rss_caps_mask = 0; + + if (xsc_get_user_mode(xdev)) + return; + + if (change & BIT(XSC_RSS_RXQ_DROP)) { + in->rss.rqn_base = cpu_to_be16(adapter->channels.rqn_base - + xdev->caps.raweth_rss_qp_id_base); + in->rss.rqn_num = 0; + rss_caps_mask |= BIT(XSC_RSS_RXQ_DROP); + goto rss_caps; + } + + if (change & BIT(XSC_RSS_RXQ_UPDATE)) { + in->rss.rqn_base = cpu_to_be16(adapter->channels.rqn_base - + xdev->caps.raweth_rss_qp_id_base); + in->rss.rqn_num = cpu_to_be16(params->num_channels); + rss_caps_mask |= BIT(XSC_RSS_RXQ_UPDATE); + } + + if (change & BIT(XSC_RSS_HASH_KEY_UPDATE)) { + key_len = min(sizeof(in->rss.hash_key), sizeof(rss->toeplitz_hash_key)); + memcpy(&in->rss.hash_key, rss->toeplitz_hash_key, key_len); + rss_caps_mask |= BIT(XSC_RSS_HASH_KEY_UPDATE); + } + + if (change & BIT(XSC_RSS_HASH_TEMP_UPDATE)) { + hash_field = rss->rx_hash_fields[XSC_TT_IPV4_TCP] | + rss->rx_hash_fields[XSC_TT_IPV6_TCP]; + in->rss.hash_tmpl = cpu_to_be32(hash_field); + rss_caps_mask |= BIT(XSC_RSS_HASH_TEMP_UPDATE); + } + + if (change & BIT(XSC_RSS_HASH_FUNC_UPDATE)) { + in->rss.hfunc = hash_func_type(rss->hfunc); + rss_caps_mask |= BIT(XSC_RSS_HASH_FUNC_UPDATE); + } + +rss_caps: + if (rss_caps_mask) { + in->rss.caps_mask = rss_caps_mask; + in->rss.rss_en = 1; + in->nic.caps_mask = cpu_to_be16(BIT(XSC_TBM_CAP_RSS)); + in->nic.caps = in->nic.caps_mask; + } +} + +int xsc_eth_modify_nic_hca(struct xsc_adapter *adapter, u32 flags) +{ + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_cmd_modify_nic_hca_mbox_in in = {}; + struct xsc_cmd_modify_nic_hca_mbox_out out = {}; + int err = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_NIC_HCA); + + xsc_eth_rss_params_change(adapter, flags, &in); + if (in.rss.caps_mask) { + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(xdev, "failed!! err=%d, status=%u\n", + err, out.hdr.status); + return -ENOEXEC; + } + } + + return 0; +} + +static void xsc_set_default_xps_cpumasks(struct xsc_adapter *priv, + struct xsc_eth_params *params) +{ +#ifdef MSIX_SUPPORT + struct xsc_core_device *xdev = priv->xdev; + int num_comp_vectors, irq; + + num_comp_vectors = priv->nic_param.comp_vectors; + cpumask_clear(xdev->xps_cpumask); + + for (irq = 0; irq < num_comp_vectors; irq++) { + mask_cpu_by_node(xdev->priv.numa_node, xdev->xps_cpumask); + netif_set_xps_queue(priv->netdev, xdev->xps_cpumask, irq); + } +#endif +} + +static int xsc_set_port_admin_status(struct xsc_adapter *adapter, + enum xsc_port_status status) +{ + struct xsc_event_set_port_admin_status_mbox_in in; + struct xsc_event_set_port_admin_status_mbox_out out; + int ret = 0; + + if (!xsc_core_is_pf(adapter->xdev)) + return 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_SET_PORT_ADMIN_STATUS); + in.admin_status = cpu_to_be16(status); + + ret = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(adapter->xdev, "failed to set port admin status, err=%d, status=%d\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + return ret; +} + +int xsc_eth_open(struct net_device *netdev) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + struct xsc_core_device *xdev = adapter->xdev; + int ret = XSCALE_RET_SUCCESS; + + mutex_lock(&adapter->state_lock); + if (adapter->status == XSCALE_ETH_DRIVER_OK) { + xsc_core_warn(adapter->xdev, "unnormal ndo_open when status=%d\n", + adapter->status); + goto ret; + } + + spin_lock_init(&adapter->lock); + + ret = xsc_eth_sw_init(adapter); + if (ret) + goto ret; + + ret = xsc_eth_reset(xdev); + if (ret) + goto sw_deinit; + + ret = xsc_eth_enable_nic_hca(adapter); + if (ret) + goto sw_deinit; + +#ifdef NEED_CREATE_RX_THREAD + ret = xsc_eth_rx_thread_create(adapter); + if (ret) { + xsc_core_warn(xdev, "xsc_eth_rx_thread_create failed, err=%d\n", ret); + goto sw_deinit; + } +#endif + +#if defined(MSIX_SUPPORT) + /*INIT_WORK*/ + INIT_WORK(&adapter->event_work, xsc_eth_event_work); + xdev->event_handler = xsc_eth_event_handler; + + if (xsc_eth_get_link_status(adapter)) { + netdev_info(netdev, "Link up\n"); + netif_carrier_on(adapter->netdev); + } else { + netdev_info(netdev, "Link down\n"); + } +#else + netif_carrier_on(netdev); +#endif + + adapter->status = XSCALE_ETH_DRIVER_OK; + + xsc_set_default_xps_cpumasks(adapter, &adapter->nic_param); + + xsc_set_port_admin_status(adapter, XSC_PORT_UP); + + goto ret; + +sw_deinit: + xsc_eth_sw_deinit(adapter); + +ret: + mutex_unlock(&adapter->state_lock); + xsc_core_info(xdev, "open %s %s, ret=%d\n", + netdev->name, ret ? "failed" : "ok", ret); + if (ret) + return XSCALE_RET_ERROR; + else + return XSCALE_RET_SUCCESS; +} + +int xsc_eth_close(struct net_device *netdev) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + mutex_lock(&adapter->state_lock); + + if (!netif_device_present(netdev)) { + ret = -ENODEV; + goto ret; + } + + if (adapter->status != XSCALE_ETH_DRIVER_OK) + goto ret; + + adapter->status = XSCALE_ETH_DRIVER_CLOSE; + +#ifdef NEED_CREATE_RX_THREAD + if (adapter->task) + kthread_stop(adapter->task); +#endif + + netif_carrier_off(adapter->netdev); + + xsc_eth_sw_deinit(adapter); + + ret = xsc_eth_disable_nic_hca(adapter); + if (ret) + xsc_core_warn(adapter->xdev, "failed to disable nic hca, err=%d\n", ret); + + xsc_set_port_admin_status(adapter, XSC_PORT_DOWN); + +ret: + mutex_unlock(&adapter->state_lock); + xsc_core_info(adapter->xdev, "close device %s %s, ret=%d\n", + adapter->netdev->name, ret ? "failed" : "ok", ret); + + return ret; +} + +static int xsc_eth_set_mac(struct net_device *netdev, void *addr) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + struct sockaddr *saddr = addr; + struct xsc_core_device *xdev = adapter->xdev; + int ret; + u16 vport = xsc_core_is_pf(xdev) ? 0 : (xdev->vf_id + 1); + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + ret = xsc_modify_nic_vport_mac_address(xdev, vport, saddr->sa_data, false); + if (ret) + xsc_core_err(adapter->xdev, "%s: xsc set mac addr failed\n", __func__); + + netif_addr_lock_bh(netdev); + eth_hw_addr_set(netdev, saddr->sa_data); + netif_addr_unlock_bh(netdev); + + return 0; +} + +static void xsc_netdev_set_tcs(struct xsc_adapter *priv, u16 nch, u8 ntc) +{ + int tc; + + netdev_reset_tc(priv->netdev); + + if (ntc == 1) + return; + + netdev_set_num_tc(priv->netdev, ntc); + + /* Map netdev TCs to offset 0 + * We have our own UP to TXQ mapping for QoS + */ + for (tc = 0; tc < ntc; tc++) + netdev_set_tc_queue(priv->netdev, tc, nch, 0); +} + +static int xsc_update_netdev_queues(struct xsc_adapter *priv) +{ + struct net_device *netdev = priv->netdev; + int num_txqs, num_rxqs, nch, ntc; + int old_num_txqs, old_ntc; + int err; +#ifndef HAVE_NET_SYNCHRONIZE_IN_SET_REAL_NUM_TX_QUEUES + bool disabling; +#endif + + old_num_txqs = netdev->real_num_tx_queues; + old_ntc = netdev->num_tc ? : 1; + + nch = priv->nic_param.num_channels; + ntc = priv->nic_param.num_tc; + num_txqs = nch * ntc; + num_rxqs = nch;// * priv->profile->rq_groups; + +#ifndef HAVE_NET_SYNCHRONIZE_IN_SET_REAL_NUM_TX_QUEUES + disabling = num_txqs < netdev->real_num_tx_queues; +#endif + + xsc_netdev_set_tcs(priv, nch, ntc); + + err = netif_set_real_num_tx_queues(netdev, num_txqs); + if (err) { + netdev_warn(netdev, + "netif_set_real_num_tx_queues failed, txqs=%d->%d, tc=%d->%d, err=%d\n", + old_num_txqs, num_txqs, old_ntc, ntc, err); + goto err_tcs; + } + + err = netif_set_real_num_rx_queues(netdev, num_rxqs); + if (err) { + netdev_warn(netdev, "netif_set_real_num_rx_queues failed, rxqs=%d, err=%d\n", + num_rxqs, err); + goto err_txqs; + } + +#ifndef HAVE_NET_SYNCHRONIZE_IN_SET_REAL_NUM_TX_QUEUES + if (disabling) + synchronize_net(); +#endif + + return 0; + +err_txqs: + /* netif_set_real_num_rx_queues could fail only when nch increased. Only + * one of nch and ntc is changed in this function. That means, the call + * to netif_set_real_num_tx_queues below should not fail, because it + * decreases the number of TX queues. + */ + WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev, old_num_txqs)); + +err_tcs: + xsc_netdev_set_tcs(priv, old_num_txqs / old_ntc, old_ntc); + return err; +} + +void xsc_build_default_indir_rqt(u32 *indirection_rqt, int len, + int num_channels) +{ + int i; + + for (i = 0; i < len; i++) + indirection_rqt[i] = i % num_channels; +} + +int xsc_eth_num_channels_changed(struct xsc_adapter *priv) +{ + struct net_device *netdev = priv->netdev; + u16 count = priv->nic_param.num_channels; + int err; + + err = xsc_update_netdev_queues(priv); + if (err) + goto err; + + if (!netif_is_rxfh_configured(priv->netdev)) + xsc_build_default_indir_rqt(priv->rss_params.indirection_rqt, + XSC_INDIR_RQT_SIZE, count); + + return 0; + +err: + netdev_err(netdev, "%s: failed to change rss rxq number %d, err=%d\n", + __func__, count, err); + return err; +} + +int xsc_safe_switch_channels(struct xsc_adapter *adapter, + xsc_eth_fp_preactivate preactivate, + xsc_eth_fp_postactivate postactivate) +{ + struct net_device *netdev = adapter->netdev; + int carrier_ok; + int ret = 0; + + adapter->status = XSCALE_ETH_DRIVER_CLOSE; + + carrier_ok = netif_carrier_ok(netdev); + netif_carrier_off(netdev); +#ifdef NEED_CREATE_RX_THREAD + if (adapter->task) + kthread_stop(adapter->task); +#endif + ret = xsc_eth_modify_nic_hca(adapter, BIT(XSC_RSS_RXQ_DROP)); + if (ret) + goto close_channels; + + xsc_eth_deactivate_priv_channels(adapter); + xsc_eth_close_channels(adapter); + + if (preactivate) { + ret = preactivate(adapter); + if (ret) + goto out; + } + + ret = xsc_eth_open_channels(adapter); + if (ret) + goto close_channels; + + if (postactivate) { + ret = postactivate(adapter); + if (ret) + goto close_channels; + } + + xsc_eth_activate_priv_channels(adapter); + ret = xsc_eth_modify_nic_hca(adapter, BIT(XSC_RSS_RXQ_UPDATE)); + if (ret) + goto close_channels; + +#ifdef NEED_CREATE_RX_THREAD + ret = xsc_eth_rx_thread_create(adapter); + if (ret) + goto close_channels; +#endif + + adapter->status = XSCALE_ETH_DRIVER_OK; + + goto out; + +close_channels: + xsc_eth_deactivate_priv_channels(adapter); + xsc_eth_close_channels(adapter); + +out: + if (carrier_ok) + netif_carrier_on(netdev); + xsc_core_dbg(adapter->xdev, "channels=%d, mtu=%d, err=%d\n", + adapter->nic_param.num_channels, + adapter->nic_param.mtu, ret); + return ret; +} + +int xsc_eth_nic_mtu_changed(struct xsc_adapter *priv) +{ + u32 new_mtu = priv->nic_param.mtu; + int ret; + + ret = xsc_eth_set_hw_mtu(priv->xdev, XSC_SW2HW_MTU(new_mtu), + XSC_SW2HW_RX_PKT_LEN(new_mtu)); + + return ret; +} + +static int xsc_eth_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + int old_mtu = netdev->mtu; + int ret = 0; + int max_buf_len = 0; + + if (new_mtu > netdev->max_mtu || new_mtu < netdev->min_mtu) { + netdev_err(netdev, "%s: Bad MTU (%d), valid range is: [%d..%d]\n", + __func__, new_mtu, netdev->min_mtu, netdev->max_mtu); + return -EINVAL; + } + + if (!xsc_rx_is_linear_skb(new_mtu)) { + max_buf_len = adapter->xdev->caps.recv_ds_num * PAGE_SIZE; + if (new_mtu > max_buf_len) { + netdev_err(netdev, "Bad MTU (%d), max buf len is %d\n", + new_mtu, max_buf_len); + return -EINVAL; + } + } + mutex_lock(&adapter->state_lock); + adapter->nic_param.mtu = new_mtu; + if (adapter->status != XSCALE_ETH_DRIVER_OK) { + ret = xsc_eth_nic_mtu_changed(adapter); + if (ret) + adapter->nic_param.mtu = old_mtu; + else + netdev->mtu = adapter->nic_param.mtu; + goto out; + } + + ret = xsc_safe_switch_channels(adapter, xsc_eth_nic_mtu_changed, NULL); + if (ret) + goto out; + + netdev->mtu = adapter->nic_param.mtu; + +out: + mutex_unlock(&adapter->state_lock); + xsc_core_info(adapter->xdev, "mtu change from %d to %d, new_mtu=%d, err=%d\n", + old_mtu, netdev->mtu, new_mtu, ret); + return ret; +} + +static void xsc_get_stats(struct net_device *netdev, struct rtnl_link_stats64 *stats) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + + xsc_fold_sw_stats64(adapter, stats); +} + +static void xsc_set_rx_mode(struct net_device *dev) +{ + struct xsc_adapter *priv = netdev_priv(dev); + + queue_work(priv->workq, &priv->set_rx_mode_work); +} + +int xsc_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + struct xsc_core_sriov *sriov = &adapter->xdev->priv.sriov; + struct xsc_core_device *xdev = adapter->xdev; + int ret; + + if (vf >= sriov->num_vfs) + return -EINVAL; + + ret = xsc_eswitch_set_vport_mac(xdev->priv.eswitch, vf + 1, mac); + if (ret) + xsc_core_err(xdev, "xsc set mac addr failed\n"); + + return ret; +} + +static int xsc_set_vf_trust(struct net_device *dev, int vf, bool setting) +{ + struct xsc_adapter *priv = netdev_priv(dev); + struct xsc_core_device *xdev = priv->xdev; + + return xsc_eswitch_set_vport_trust(xdev->priv.eswitch, vf + 1, setting); +} + +static int xsc_set_vf_spoofchk(struct net_device *dev, int vf, bool setting) +{ + struct xsc_adapter *priv = netdev_priv(dev); + struct xsc_core_device *xdev = priv->xdev; + + return xsc_eswitch_set_vport_spoofchk(xdev->priv.eswitch, vf + 1, setting); +} + +static int xsc_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos, + __be16 vlan_proto) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_vport *evport = xsc_eswitch_get_vport(xdev->priv.eswitch, vf + 1); + int err; + + if (!(dev->features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) { + xsc_core_err(xdev, "dev features not support STAG_RX %llu STAG_TX %llu\n", + dev->features & NETIF_F_HW_VLAN_STAG_RX, + dev->features & NETIF_F_HW_VLAN_STAG_TX); + return -EOPNOTSUPP; + } + + if (vlan_proto != htons(ETH_P_8021Q) && vlan_proto != htons(ETH_P_8021AD)) + return -EPROTONOSUPPORT; + + err = xsc_eswitch_set_vport_vlan(xdev->priv.eswitch, vf + 1, + vlan, qos, vlan_proto); + if (err) { + xsc_core_err(xdev, "fail to set vf %d vlan %u qos %u err=%d\n", + vf, vlan, qos, err); + return err; + } + + if (evport) { + evport->vlan_id = vlan; + evport->vlan_qos = qos; + evport->vlan_proto = vlan_proto; + } + + return 0; +} + +int xsc_get_vf_config(struct net_device *dev, + int vf, struct ifla_vf_info *ivi) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_eswitch *esw = xdev->priv.eswitch; + struct xsc_core_sriov *sriov = &xdev->priv.sriov; + int err; + + if (!netif_device_present(dev) || sriov->num_vfs > MAX_VF_NUM_MINIDUMP) + return -EOPNOTSUPP; + + err = xsc_eswitch_get_vport_config(esw, vf + 1, ivi); + + return err; +} + +int xsc_set_vf_link_state(struct net_device *dev, int vf, + int link_state) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_eswitch *esw = xdev->priv.eswitch; + + return xsc_eswitch_set_vport_state(esw, vf + 1, link_state); +} + +int set_feature_rxcsum(struct net_device *netdev, bool enable) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_cmd_modify_nic_hca_mbox_in in = {}; + struct xsc_cmd_modify_nic_hca_mbox_out out = {}; + int err; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_NIC_HCA); + in.nic.caps_mask = cpu_to_be16(BIT(XSC_TBM_CAP_HASH_PPH)); + in.nic.caps = cpu_to_be16(enable << XSC_TBM_CAP_HASH_PPH); + + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + netdev_err(netdev, "failed to change rxcsum=%d, err=%d, status=%d\n", + enable, err, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} + +int set_feature_vlan_offload(struct net_device *netdev, bool enable) +{ + int err = 0, i; + struct xsc_adapter *adapter = netdev_priv(netdev); + struct xsc_vport *evport = NULL; + + if (!enable) { + for (i = 0; i < adapter->xdev->priv.eswitch->num_vfs; i++) { + evport = xsc_eswitch_get_vport(adapter->xdev->priv.eswitch, + i + 1); + if (evport && (evport->vlan_id || evport->vlan_qos)) { + evport->vlan_id = 0; + evport->vlan_qos = 0; + err = xsc_eswitch_set_vport_vlan(adapter->xdev->priv.eswitch, + i + 1, evport->vlan_id, + evport->vlan_qos, + evport->vlan_proto); + if (err) + xsc_core_err(adapter->xdev, "fail to clear vf vlan offload vf=%d err=%d\n", + i, err); + } + } + } + + return 0; +} + +static int xsc_handle_feature(struct net_device *netdev, + netdev_features_t *features, + netdev_features_t wanted_features, + netdev_features_t feature, + xsc_feature_handler feature_handler) +{ + netdev_features_t changes = wanted_features ^ netdev->features; + bool enable = !!(wanted_features & feature); + int err; + + if (!(changes & feature)) + return 0; + + err = feature_handler(netdev, enable); + if (err) { + netdev_err(netdev, "%s feature %pNF failed, err %d\n", + enable ? "Enable" : "Disable", &feature, err); + return err; + } + + xsc_set_feature(features, feature, enable); + + return 0; +} + +int xsc_eth_set_features(struct net_device *netdev, netdev_features_t features) +{ + netdev_features_t oper_features = netdev->features; + int err = 0; + +#define XSC_HANDLE_FEATURE(feature, handler) \ + xsc_handle_feature(netdev, &oper_features, features, feature, handler) + + err |= XSC_HANDLE_FEATURE(NETIF_F_RXCSUM, set_feature_rxcsum); + err |= XSC_HANDLE_FEATURE(NETIF_F_HW_VLAN_STAG_RX, set_feature_vlan_offload); + err |= XSC_HANDLE_FEATURE(NETIF_F_HW_VLAN_STAG_TX, set_feature_vlan_offload); + if (err) { + netdev->features = oper_features; + return -EINVAL; + } + + return 0; +} + +static netdev_features_t xsc_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_RX)) + features |= NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_RX; + return features; +} + +#ifdef HAVE_NETDEVICE_OPS_SELECT_QUEUE_FALLBACK +u16 xsc_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev, + select_queue_fallback_t fallback) +{ + int txq_ix = fallback(dev, skb, NULL); + u16 num_channels; + int up = 0; + struct xsc_adapter *adapter = netdev_priv(dev); + + if (!adapter) { + pr_err("%s adapter is null\n", __func__); + return txq_ix; + } + + if (!netdev_get_num_tc(dev)) + return txq_ix; + + if (skb_vlan_tag_present(skb)) { + up = skb->vlan_tci >> VLAN_PRIO_SHIFT; + if (adapter->nic_param.num_tc > 1) + up = up % (adapter->nic_param.num_tc - 1) + 1; + else + up = 0; + } + + /* channel_ix can be larger than num_channels since + * dev->num_real_tx_queues = num_channels * num_tc + */ + num_channels = adapter->channels.num_chl; + if (txq_ix >= num_channels) + txq_ix = adapter->txq2sq[txq_ix]->ch_ix; + + return adapter->channel_tc2realtxq[txq_ix][up]; +} +#else +u16 xsc_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + int txq_ix, up = 0; + u16 num_channels; + struct xsc_adapter *adapter = netdev_priv(dev); + + if (!adapter) { + pr_err("%s adapter is null\n", __func__); + return txq_ix; + } + + txq_ix = netdev_pick_tx(dev, skb, NULL); + if (!netdev_get_num_tc(dev)) + return txq_ix; + + if (skb_vlan_tag_present(skb)) { + up = skb_vlan_tag_get_prio(skb); + if (adapter->nic_param.num_tc > 1) + up = up % (adapter->nic_param.num_tc - 1) + 1; + else + up = 0; + } + + /* channel_ix can be larger than num_channels since + * dev->num_real_tx_queues = num_channels * num_tc + */ + num_channels = adapter->channels.num_chl; + if (txq_ix >= num_channels) + txq_ix = adapter->txq2sq[txq_ix]->ch_ix; + + return adapter->channel_tc2realtxq[txq_ix][up]; +} +#endif + +static int xsc_get_phys_port_name(struct net_device *dev, + char *buf, size_t len) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_core_device *pf_xdev; + struct net_device *pf_netdev; + struct pci_dev *pdev = xdev->pdev; + int ret = len; + + if (!pdev) + return -EOPNOTSUPP; + if (!xsc_core_is_pf(xdev)) { + if (!pdev->physfn) + return -EOPNOTSUPP; + pf_xdev = pci_get_drvdata(pdev->physfn); + if (!pf_xdev || !pf_xdev->netdev) + return -EOPNOTSUPP; + pf_netdev = (struct net_device *)pf_xdev->netdev; + ret = snprintf(buf, len, "%s_%d", + pf_netdev->name, xdev->vf_id); + } else { + return -EOPNOTSUPP; + } + if (ret >= len) + return -EOPNOTSUPP; + + return 0; +} + +static int xsc_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_tx_rate) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + struct xsc_core_sriov *sriov = &adapter->xdev->priv.sriov; + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_eswitch *esw = xdev->priv.eswitch; + u16 vport; + int err = 0; + u32 rate = 0; + + if (vf >= sriov->num_vfs) + return -EINVAL; + + if (min_tx_rate > 0) + return -EOPNOTSUPP; + + vport = vf + 1; + xsc_core_dbg(xdev, "set vf rate %d Mbps\n", max_tx_rate); + + rate = (u32)max_tx_rate; + err = xsc_eswitch_set_vport_rate(esw, vport, rate, 0); + if (err) { + xsc_core_err(xdev, "set_vf_rate failed!! err=%d\n", err); + return -EINVAL; + } + + return 0; +} + +static const struct net_device_ops xsc_netdev_ops = { + .ndo_open = xsc_eth_open, + .ndo_stop = xsc_eth_close, + .ndo_start_xmit = xsc_eth_xmit_start, + + .ndo_set_rx_mode = xsc_set_rx_mode, + .ndo_validate_addr = NULL, + .ndo_set_mac_address = xsc_eth_set_mac, + .ndo_change_mtu = xsc_eth_change_mtu, + + .ndo_tx_timeout = NULL, + .ndo_set_tx_maxrate = NULL, + .ndo_vlan_rx_add_vid = xsc_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = xsc_vlan_rx_kill_vid, + .ndo_do_ioctl = NULL, + .ndo_set_vf_mac = xsc_set_vf_mac, + .ndo_set_vf_vlan = xsc_set_vf_vlan, + .ndo_set_vf_rate = xsc_set_vf_rate, + .ndo_set_vf_spoofchk = xsc_set_vf_spoofchk, + .ndo_set_vf_rss_query_en = NULL, + .ndo_set_vf_trust = xsc_set_vf_trust, + .ndo_get_vf_config = xsc_get_vf_config, + .ndo_set_vf_link_state = xsc_set_vf_link_state, + .ndo_get_stats64 = xsc_get_stats, + .ndo_setup_tc = NULL, + .ndo_set_features = xsc_eth_set_features, + .ndo_fix_features = xsc_fix_features, + .ndo_fdb_add = NULL, + .ndo_bridge_setlink = NULL, + .ndo_bridge_getlink = NULL, + .ndo_dfwd_add_station = NULL, + .ndo_dfwd_del_station = NULL, + .ndo_get_phys_port_name = xsc_get_phys_port_name, + +#ifdef HAVE_NETDEVICE_OPS_UDP_TUNNEL + .ndo_udp_tunnel_add = NULL, + .ndo_udp_tunnel_del = NULL, +#endif + .ndo_features_check = NULL, + .ndo_select_queue = xsc_select_queue, +}; + +static int xsc_get_max_num_channels(struct xsc_core_device *xdev) +{ +#ifdef NEED_CREATE_RX_THREAD + return 8; +#else + return min_t(int, xdev->dev_res->eq_table.num_comp_vectors, + XSC_ETH_MAX_NUM_CHANNELS); +#endif +} + +static int xsc_eth_netdev_init(struct xsc_adapter *adapter) +{ + unsigned int node, tc, nch; + + tc = adapter->nic_param.num_tc; + nch = adapter->nic_param.max_num_ch; + node = dev_to_node(adapter->dev); + adapter->txq2sq = kcalloc_node(nch * tc, + sizeof(*adapter->txq2sq), GFP_KERNEL, node); + if (!adapter->txq2sq) + goto err_out; + + mutex_init(&adapter->state_lock); + + INIT_WORK(&adapter->set_rx_mode_work, xsc_set_rx_mode_work); + + adapter->workq = create_singlethread_workqueue("xsc_eth"); + if (!adapter->workq) + goto err_free_priv; + + netif_carrier_off(adapter->netdev); + + return 0; + +err_free_priv: + kfree(adapter->txq2sq); +err_out: + return -ENOMEM; +} + +static const struct xsc_tirc_config tirc_default_config[XSC_NUM_INDIR_TIRS] = { + [XSC_TT_IPV4] = { + .l3_prot_type = XSC_L3_PROT_TYPE_IPV4, + .l4_prot_type = 0, + .rx_hash_fields = XSC_HASH_IP, + }, + [XSC_TT_IPV4_TCP] = { + .l3_prot_type = XSC_L3_PROT_TYPE_IPV4, + .l4_prot_type = XSC_L4_PROT_TYPE_TCP, + .rx_hash_fields = XSC_HASH_IP_PORTS, + }, + [XSC_TT_IPV4_UDP] = { + .l3_prot_type = XSC_L3_PROT_TYPE_IPV4, + .l4_prot_type = XSC_L4_PROT_TYPE_UDP, + .rx_hash_fields = XSC_HASH_IP_PORTS, + }, + [XSC_TT_IPV6] = { + .l3_prot_type = XSC_L3_PROT_TYPE_IPV6, + .l4_prot_type = 0, + .rx_hash_fields = XSC_HASH_IP6, + }, + [XSC_TT_IPV6_TCP] = { + .l3_prot_type = XSC_L3_PROT_TYPE_IPV6, + .l4_prot_type = XSC_L4_PROT_TYPE_TCP, + .rx_hash_fields = XSC_HASH_IP6_PORTS, + }, + [XSC_TT_IPV6_UDP] = { + .l3_prot_type = XSC_L3_PROT_TYPE_IPV6, + .l4_prot_type = XSC_L4_PROT_TYPE_UDP, + .rx_hash_fields = XSC_HASH_IP6_PORTS, + }, +}; + +struct xsc_tirc_config xsc_tirc_get_default_config(enum xsc_traffic_types tt) +{ + return tirc_default_config[tt]; +} + +void xsc_build_rss_params(struct xsc_rss_params *rss_params, u16 num_channels) +{ + enum xsc_traffic_types tt; + + rss_params->hfunc = ETH_RSS_HASH_TOP; + netdev_rss_key_fill(rss_params->toeplitz_hash_key, + sizeof(rss_params->toeplitz_hash_key)); + + xsc_build_default_indir_rqt(rss_params->indirection_rqt, + XSC_INDIR_RQT_SIZE, num_channels); + + for (tt = 0; tt < XSC_NUM_INDIR_TIRS; tt++) { + rss_params->rx_hash_fields[tt] = + tirc_default_config[tt].rx_hash_fields; + } + rss_params->rss_hash_tmpl = XSC_HASH_IP_PORTS | XSC_HASH_IP6_PORTS; +} + +void xsc_eth_build_nic_params(struct xsc_adapter *adapter, u32 ch_num, u32 tc_num) +{ + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_eth_params *params = &adapter->nic_param; + + params->mtu = SW_DEFAULT_MTU; + params->num_tc = tc_num; + + params->comp_vectors = xdev->dev_res->eq_table.num_comp_vectors; + params->max_num_ch = ch_num; + params->num_channels = ch_num; + + params->rq_max_size = BIT(xdev->caps.log_max_qp_depth); + params->sq_max_size = BIT(xdev->caps.log_max_qp_depth); + xsc_build_rss_params(&adapter->rss_params, adapter->nic_param.num_channels); + + if (params->num_channels > XSC_NET_DIM_ENABLE_THRESHOLD) { + params->rx_dim_enabled = 1; + params->tx_dim_enabled = 1; + xsc_set_rx_cq_mode_params(params, XSC_CQ_PERIOD_MODE_START_FROM_EQE); + xsc_set_tx_cq_mode_params(params, XSC_CQ_PERIOD_MODE_START_FROM_EQE); + } + + xsc_core_info(xdev, "mtu=%d, num_ch=%d(max=%d), num_tc=%d\n", + params->mtu, params->num_channels, + params->max_num_ch, params->num_tc); +} + +void xsc_eth_build_nic_netdev(struct xsc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct xsc_core_device *xdev = adapter->xdev; + + /* Set up network device as normal. */ + netdev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; + netdev->netdev_ops = &xsc_netdev_ops; + +#ifdef CONFIG_XSC_CORE_EN_DCB + netdev->dcbnl_ops = &xsc_dcbnl_ops; +#endif + eth_set_ethtool_ops(netdev); + + netdev->min_mtu = SW_MIN_MTU; + netdev->max_mtu = SW_MAX_MTU; + /*mtu - macheaderlen - ipheaderlen should be aligned in 8B*/ + netdev->mtu = SW_DEFAULT_MTU; + + netdev->vlan_features |= NETIF_F_SG; + netdev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;//NETIF_F_HW_CSUM; + netdev->vlan_features |= NETIF_F_GRO; + netdev->vlan_features |= NETIF_F_TSO;//NETIF_F_TSO_ECN + netdev->vlan_features |= NETIF_F_TSO6; + //todo: enable rx csum + netdev->vlan_features |= NETIF_F_RXCSUM; + netdev->vlan_features |= NETIF_F_RXHASH; + netdev->vlan_features |= NETIF_F_GSO_PARTIAL; + + netdev->hw_features = netdev->vlan_features; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; + + if (xsc_vxlan_allowed(xdev) || xsc_geneve_tx_allowed(xdev) || + xsc_any_tunnel_proto_supported(xdev)) { + netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + netdev->hw_enc_features |= NETIF_F_TSO; //NETIF_F_TSO_ECN + netdev->hw_enc_features |= NETIF_F_TSO6; + netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL; + } + + netdev->features |= netdev->hw_features; + netdev->features |= NETIF_F_HIGHDMA; +} + +static int xsc_eth_nic_init(struct xsc_adapter *adapter, + void *rep_priv, u32 ch_num, u32 tc_num) +{ + int err = -1; + + xsc_eth_build_nic_params(adapter, ch_num, tc_num); + + err = xsc_eth_netdev_init(adapter); + if (err) + return err; + + xsc_eth_build_nic_netdev(adapter); + + return 0; +} + +static void xsc_eth_nic_cleanup(struct xsc_adapter *adapter) +{ + destroy_workqueue(adapter->workq); + kfree(adapter->txq2sq); +} + +/* create xdev resource,pd/domain/mkey */ +int xsc_eth_create_xdev_resources(struct xsc_core_device *xdev) +{ + return 0; +} + +static int xsc_eth_init_nic_tx(struct xsc_adapter *adapter) +{ + /*create tis table*/ +#ifdef CONFIG_XSC_CORE_EN_DCB + xsc_dcbnl_initialize(adapter); +#endif + + return 0; +} + +static int xsc_eth_cleanup_nic_tx(struct xsc_adapter *adapter) +{ + return 0; +} + +/* init tx: create hw resource, set register according to spec */ +int xsc_eth_init_nic_rx(struct xsc_adapter *adapter) +{ + /* create rqt and tir table + * tir table:base on traffic type like ip4_tcp/ipv6_tcp/ + * each rqt table for a traffic type + */ + + return 0; +} + +static int xsc_eth_cleanup_nic_rx(struct xsc_adapter *adapter) +{ + return 0; +} + +static void xsc_eth_l2_addr_init(struct xsc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + char mac[6] = {0}; + int ret = 0; + + ret = xsc_eth_get_mac(adapter->xdev, mac); + if (ret) { + xsc_core_warn(adapter->xdev, "get mac failed %d, generate random mac...", ret); + eth_random_addr(mac); + } + dev_addr_mod(netdev, 0, mac, 6); + + if (!is_valid_ether_addr(netdev->perm_addr)) + memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); +} + +static int xsc_eth_nic_enable(struct xsc_adapter *adapter) +{ + struct xsc_core_device *xdev = adapter->xdev; + + if (xsc_core_is_pf(xdev)) + xsc_lag_add_netdev(adapter->netdev); + xsc_eth_l2_addr_init(adapter); + + xsc_eth_set_hw_mtu(xdev, XSC_SW2HW_MTU(adapter->nic_param.mtu), + XSC_SW2HW_RX_PKT_LEN(adapter->nic_param.mtu)); + +#ifdef CONFIG_XSC_CORE_EN_DCB + xsc_dcbnl_init_app(adapter); +#endif + + rtnl_lock(); + netif_device_attach(adapter->netdev); + rtnl_unlock(); + + return 0; +} + +static void xsc_eth_nic_disable(struct xsc_adapter *adapter) +{ + rtnl_lock(); + if (netif_running(adapter->netdev)) + xsc_eth_close(adapter->netdev); + netif_device_detach(adapter->netdev); + rtnl_unlock(); + + if (xsc_core_is_pf(adapter->xdev)) + xsc_lag_remove_netdev(adapter->netdev); +} + +/* call init tx/rx, enable function about nic init */ +static int xsc_attach_netdev(struct xsc_adapter *adapter) +{ + int err = -1; + + err = xsc_eth_init_nic_tx(adapter); + if (err) + return err; + + err = xsc_eth_init_nic_rx(adapter); + if (err) + return err; + + err = xsc_eth_nic_enable(adapter); + if (err) + return err; + + xsc_core_info(adapter->xdev, "%s ok\n", __func__); + return 0; +} + +static void xsc_detach_netdev(struct xsc_adapter *adapter) +{ + xsc_eth_nic_disable(adapter); + + flush_workqueue(adapter->workq); + + xsc_eth_cleanup_nic_rx(adapter); + xsc_eth_cleanup_nic_tx(adapter); + adapter->status = XSCALE_ETH_DRIVER_DETACH; +} + +static int xsc_eth_attach(struct xsc_core_device *xdev, struct xsc_adapter *adapter) +{ + int err = -1; + + if (netif_device_present(adapter->netdev)) + return 0; + + err = xsc_eth_create_xdev_resources(xdev); + if (err) + return err; + + err = xsc_attach_netdev(adapter); + if (err) + return err; + + xsc_core_info(adapter->xdev, "%s ok\n", __func__); + return 0; +} + +static void xsc_eth_detach(struct xsc_core_device *xdev, struct xsc_adapter *adapter) +{ + if (!netif_device_present(adapter->netdev)) + return; + + xsc_detach_netdev(adapter); +} + +static void *xsc_eth_add(struct xsc_core_device *xdev) +{ + int err = -1; + int num_chl, num_tc; + struct net_device *netdev; + struct xsc_adapter *adapter = NULL; + void *rep_priv = NULL; + + num_chl = xsc_get_max_num_channels(xdev); + num_tc = xdev->caps.max_tc; + + /* Allocate ourselves a network device with room for our info */ + netdev = alloc_etherdev_mqs(sizeof(struct xsc_adapter), + num_chl * num_tc, num_chl); + if (unlikely(!netdev)) { + xsc_core_warn(xdev, "alloc_etherdev_mqs failed, txq=%d, rxq=%d\n", + (num_chl * num_tc), num_chl); + return NULL; + } + + /* Set up our device-specific information */ + netdev->dev.parent = &xdev->pdev->dev; + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = xdev->pdev; + adapter->dev = &adapter->pdev->dev; + adapter->xdev = (void *)xdev; + xdev->eth_priv = adapter; + + err = xsc_eth_nic_init(adapter, rep_priv, num_chl, num_tc); + if (err) { + xsc_core_warn(xdev, "xsc_nic_init failed, num_ch=%d, num_tc=%d, err=%d\n", + num_chl, num_tc, err); + goto err_free_netdev; + } + + err = xsc_eth_attach(xdev, adapter); + if (err) { + xsc_core_warn(xdev, "xsc_eth_attach failed, err=%d\n", err); + goto err_cleanup_netdev; + } + + adapter->stats = kvzalloc(sizeof(*adapter->stats), GFP_KERNEL); + if (unlikely(!adapter->stats)) + goto err_detach; + + err = register_netdev(netdev); + if (err) { + xsc_core_warn(xdev, "register_netdev failed, err=%d\n", err); + goto err_reg_netdev; + } + + err = xsc_eth_sysfs_create(netdev, xdev); + if (err) + goto err_sysfs_create; + + xdev->netdev = (void *)netdev; + adapter->status = XSCALE_ETH_DRIVER_INIT; + + return adapter; + +err_sysfs_create: + unregister_netdev(adapter->netdev); +err_reg_netdev: + kfree(adapter->stats); +err_detach: + xsc_eth_detach(xdev, adapter); +err_cleanup_netdev: + xsc_eth_nic_cleanup(adapter); +err_free_netdev: + free_netdev(netdev); + + return NULL; +} + +static void xsc_eth_remove(struct xsc_core_device *xdev, void *context) +{ + struct xsc_adapter *adapter = NULL; + + if (!xdev) + return; + + adapter = xdev->eth_priv; + if (!adapter) { + xsc_core_warn(xdev, "failed! adapter is null\n"); + return; + } + + xsc_core_info(adapter->xdev, "remove netdev %s entry\n", adapter->netdev->name); + + xsc_eth_sysfs_remove(adapter->netdev, xdev); + + unregister_netdev(adapter->netdev); + + kfree(adapter->stats); + + xsc_eth_detach(xdev, adapter); + xsc_eth_nic_cleanup(adapter); + + free_netdev(adapter->netdev); + + xdev->netdev = NULL; + xdev->eth_priv = NULL; +} + +static struct xsc_interface xsc_interface = { + .add = xsc_eth_add, + .remove = xsc_eth_remove, + .event = NULL, + .protocol = XSC_INTERFACE_PROTOCOL_ETH, +}; + +int xsc_net_reboot_event_handler(struct notifier_block *nb, unsigned long action, void *data) +{ + pr_info("xsc net driver recv %lu event\n", action); + if (xsc_get_exit_flag()) + return NOTIFY_OK; + xsc_remove_eth_driver(); + + return NOTIFY_OK; +} + +struct notifier_block xsc_net_nb = { + .notifier_call = xsc_net_reboot_event_handler, + .next = NULL, + .priority = 1, +}; + +void xsc_remove_eth_driver(void) +{ + pr_info("remove ethernet driver\n"); + xsc_eth_ctrl_fini(); + xsc_unregister_interface(&xsc_interface); +} + +static __init int xsc_net_driver_init(void) +{ + int ret; + + pr_info("add ethernet driver\n"); + ret = xsc_register_interface(&xsc_interface); + if (ret != 0) { + pr_err("failed to register interface\n"); + goto out; + } + + ret = xsc_eth_ctrl_init(); + if (ret != 0) { + pr_err("failed to register port control node\n"); + xsc_unregister_interface(&xsc_interface); + goto out; + } + + register_reboot_notifier(&xsc_net_nb); + return 0; +out: + return -1; +} + +static __exit void xsc_net_driver_exit(void) +{ + unregister_reboot_notifier(&xsc_net_nb); + xsc_remove_eth_driver(); +} + +module_init(xsc_net_driver_init); +module_exit(xsc_net_driver_exit); diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/ut_main.c b/drivers/net/ethernet/yunsilicon/xsc/net/ut_main.c new file mode 100644 index 000000000000..6c4afad1be8f --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/ut_main.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common/xsc_hsi.h" +#include "common/xsc_core.h" +#include "common/xsc_cmd.h" + +#include "xsc_eth.h" +#include "xsc_accel.h" +#include +#include +#include "xsc_eth_txrx.h" +#include "xsc_eth_stats.h" +#include "xsc_eth_debug.h" + +#ifdef NEED_CREATE_RX_THREAD + +extern void xsc_cq_notify_hw(struct xsc_cq *cq); + +DEFINE_PER_CPU(bool, txcqe_get); +EXPORT_PER_CPU_SYMBOL(txcqe_get); + +u32 xsc_eth_process_napi(struct xsc_adapter *adapter) +{ + int work_done = 0; + bool err = false; + int budget = 1; + int i, chl; + int errtx = false; + struct xsc_channel *c; + struct xsc_rq *prq; + struct xsc_ch_stats *ch_stats; + + if (adapter->status == XSCALE_ETH_DRIVER_OK) { + for (chl = 0; chl < adapter->channels.num_chl; chl++) { + c = &adapter->channels.c[chl]; + prq = &c->qp.rq[0]; + ch_stats = c->stats; + ch_stats->poll++; + + for (i = 0; i < c->num_tc; i++) { + errtx |= xsc_poll_tx_cq(&c->qp.sq[i].cq, budget); + ETH_DEBUG_LOG("errtx=%u.\r\n", errtx); + if (likely(__this_cpu_read(txcqe_get))) { + xsc_cq_notify_hw(&c->qp.sq[i].cq); + __this_cpu_write(txcqe_get, false); + } + } + + work_done = xsc_poll_rx_cq(&prq->cq, budget); + + ETH_DEBUG_LOG("work_done=%d.\r\n", work_done); + + if (work_done != 0) { + xsc_cq_notify_hw(&prq->cq); + err |= prq->post_wqes(prq); + + ETH_DEBUG_LOG("err=%u.\r\n", err); + } else { + ETH_DEBUG_LOG("no-load.\r\n"); + } + + ch_stats->arm++; + } + } + + return XSCALE_RET_SUCCESS; +} + +int xsc_eth_rx_thread(void *arg) +{ + u32 ret = XSCALE_RET_SUCCESS; + struct xsc_adapter *adapter = (struct xsc_adapter *)arg; + + while (kthread_should_stop() == 0) { + if (need_resched()) + schedule(); + ret = xsc_eth_process_napi(adapter); + if (ret != XSCALE_RET_SUCCESS) + ETH_DEBUG_LOG("unexpected branch.\r\n"); + + ETH_DEBUG_LOG("adapter=%p\r\n", adapter); + } + ETH_DEBUG_LOG("do_exit.\r\n"); + + return XSCALE_RET_SUCCESS; +} + +u32 g_thread_count; +u32 xsc_eth_rx_thread_create(struct xsc_adapter *adapter) +{ + struct task_struct *task = NULL; + + task = kthread_create(xsc_eth_rx_thread, (void *)adapter, + "xsc_rx%i", g_thread_count); + if (!task) + return XSCALE_RET_ERROR; + + ETH_DEBUG_LOG("thread_count=%d\r\n", g_thread_count); + + kthread_bind(task, g_thread_count); + wake_up_process(task); + adapter->task = task; + + g_thread_count++; + + return XSCALE_RET_SUCCESS; +} +#endif /* NEED_CREATE_RX_THREAD */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_accel.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_accel.h new file mode 100644 index 000000000000..1378be66b615 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_accel.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_ACCEL_H +#define XSC_ACCEL_H + +#include +#include +#include "common/xsc_core.h" + +static inline void xsc_udp_gso_handle_tx_skb(struct sk_buff *skb) +{ + int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr); + + udp_hdr(skb)->len = htons(payload_len); +} + +static inline struct sk_buff *xsc_accel_handle_tx(struct sk_buff *skb) +{ + /*no not consider tls and ipsec*/ + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) + xsc_udp_gso_handle_tx_skb(skb); + return skb; +} + +static inline bool xsc_vxlan_allowed(struct xsc_core_device *dev) +{ + return false; +} + +static inline bool xsc_geneve_tx_allowed(struct xsc_core_device *dev) +{ + return false; +} + +static inline bool xsc_any_tunnel_proto_supported(struct xsc_core_device *dev) +{ + return false; +} + +#endif /* XSC_ACCEL_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_dcbnl.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_dcbnl.c new file mode 100644 index 000000000000..36503b3113f7 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_dcbnl.c @@ -0,0 +1,1482 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_cmd.h" +#include "common/vport.h" +#include "xsc_eth.h" +#include "xsc_eth_debug.h" +#include "xsc_hw_comm.h" + +#ifndef IEEE_8021QAZ_APP_SEL_DSCP +#define IEEE_8021QAZ_APP_SEL_DSCP 5 +#endif + +#define XSC_100MB (100000) +#define XSC_1GB (1000000) +#define XSC_RATE_LIMIT_BASE (16000) +#define XSC_WRR_DIV_BASE 10 +#define XSC_WRR_DEFAULT_WEIGHT 10 +#define XSC_DCBX_WFQ_TOTAL_WEIGHT 100 +#define XSC_DCBX_MAX_TC 8 + +#define XSC_CEE_STATE_UP 1 +#define XSC_CEE_STATE_DOWN 0 + +/* Max supported cable length is 1000 meters */ +#define XSC_MAX_CABLE_LENGTH 1000 + +enum { + XSC_VENDOR_TC_GROUP_NUM = 7, + XSC_LOWEST_PRIO_GROUP = 0, +}; + +#ifdef CONFIG_XSC_CORE_EN_DCB +static int xsc_set_trust_state(struct xsc_adapter *priv, u8 trust_state); +static int xsc_set_dscp2prio(struct xsc_adapter *priv, u8 dscp, u8 prio); +static u8 xsc_dcbnl_setall(struct net_device *netdev); + +static int xsc_max_tc(struct xsc_core_device *dev) +{ + u8 num_tc = dev->caps.max_tc ? : 8; + + if (num_tc > XSC_DCBX_MAX_TC) + num_tc = XSC_DCBX_MAX_TC; + + return num_tc - 1; +} + +static void xsc_pfc_array2bitmap(u8 *pfcbitmap, u8 *array) +{ + u8 i; + + *pfcbitmap = 0; + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (array[i]) + *pfcbitmap = *pfcbitmap | (1 << i); + } +} + +static void xsc_pfc_bitmap2array(u8 pfcbitmap, u8 *array) +{ + u8 i; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if ((pfcbitmap >> i) & 0x1) + array[i] = 1; + } +} + +static int xsc_query_port_prio_tc(struct xsc_core_device *xdev, int prio, u8 *tc) +{ + /* user priotity to tc 0:0; 1:1; 2:2; 3:3 ... 7:7 */ + *tc = (u8)prio; + return 0; +} + +static int xsc_set_port_prio_tc(struct xsc_core_device *xdev, u8 *prio_tc) +{ + u8 i; + + for (i = 0; i <= xsc_max_tc(xdev); i++) + prio_tc[i] = i; + + return 0; +} + +static int xsc_wfq_to_wrr_adpat(struct xsc_core_device *xdev, u8 *dst_bw, + u8 *src_bw, u8 ets_cnt, u8 min_weight) +{ + u8 i, index; + u8 max_commom_div = 1; + u8 flag[XSC_DCBX_WFQ_TOTAL_WEIGHT] = {0}; + + if (min_weight >= XSC_DCBX_WFQ_TOTAL_WEIGHT || !ets_cnt) + return 0; + + for (index = 1; index <= min_weight; index++) { + for (i = 0; i < ets_cnt; i++) { + /*any ets bw can not div by whole,flag = 1*/ + if (src_bw[i] % index) { + flag[index] = 1; + break; + } + } + } + + for (index = 1; index <= min_weight; index++) { + if (flag[index] == 0) + max_commom_div = index; + } + + xsc_core_dbg(xdev, "max_commom_div = %d, min_weight = %d\n", max_commom_div, min_weight); + + for (i = 0; i < ets_cnt; i++) { + dst_bw[i] = src_bw[i] / max_commom_div; + xsc_core_dbg(xdev, "dst_bw[%d] = %d\n", i, dst_bw[i]); + } + + return 0; +} + +static int xsc_wrr_to_wfq_adpat(struct xsc_core_device *xdev, + struct xsc_weight_get *wrr, u8 *bandwidth) +{ + u8 i, wrr_cnt = 0, index; + u16 wrr_total_weight = 0, wfq_tatal_weight = 0; + u16 portion = 0; + u16 rmndr = 0; + u16 temp[IEEE_8021QAZ_MAX_TCS] = {0}; + + /*1 calc cur wrr weight total*/ + for (i = 0; i <= wrr->max_prio; i++) { + if (wrr->weight[i] > 0) { + wrr_total_weight += wrr->weight[i]; + wrr_cnt++; + } + } + + xsc_core_dbg(xdev, "%s: wrr_total_weight = %d max_prio = %d\n", + __func__, wrr_total_weight, wrr->max_prio); + + if (!wrr_total_weight || wrr_total_weight > XSC_DCBX_WFQ_TOTAL_WEIGHT) + return -EINVAL; + + portion = XSC_DCBX_WFQ_TOTAL_WEIGHT / wrr_total_weight; + rmndr = XSC_DCBX_WFQ_TOTAL_WEIGHT % wrr_total_weight; + + /*2 calc major wfq weight*/ + for (i = 0; i <= wrr->max_prio; i++) { + if (wrr->weight[i] > 0) { + temp[i] = wrr->weight[i] * portion; + wfq_tatal_weight += temp[i]; + } + } + + xsc_core_dbg(xdev, "portion = %d, rmndr = %d, wfq_tatal = %d\n", + portion, rmndr, wfq_tatal_weight); + + /*3 average remainder to every prio*/ + if (rmndr > 0) { + for (i = 0; i < rmndr; i++) { + index = i % wrr_cnt; + temp[index] = temp[index] + 1; + } + } + for (i = 0; i <= wrr->max_prio; i++) + bandwidth[i] = (u8)temp[i]; + + return 0; +} + +static int xsc_query_port_ets_rate_limit(struct xsc_core_device *xdev, u64 *ratelimit) +{ + u8 i; + int err = 0; + struct xsc_rate_limit_get req; + struct xsc_rate_limit_get rsp; + + memset(&req, 0, sizeof(struct xsc_rate_limit_get)); + memset(&rsp, 0, sizeof(struct xsc_rate_limit_get)); + /*0--port rate limit; 1--priority rate limit*/ + req.limit_level = 1; + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_GET_RATE_LIMIT, &req, &rsp); + if (err) + return err; + + for (i = 0; i <= xsc_max_tc(xdev); i++) + ratelimit[i] = (u64)(rsp.rate_cir[i]); + + return 0; +} + +static int xsc_modify_port_ets_rate_limit(struct xsc_core_device *xdev, u64 *ratelimit) +{ + u8 i; + struct xsc_rate_limit_set req; + + memset(&req, 0, sizeof(struct xsc_rate_limit_set)); + req.limit_level = 1; + + for (i = 0; i <= xsc_max_tc(xdev); i++) { + req.rate_cir = (u32)ratelimit[i]; + req.limit_id = i; + xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_SET_RATE_LIMIT, &req, NULL); + } + + return 0; +} + +static int xsc_query_port_bw_config(struct xsc_core_device *xdev, u8 *bandwidth) +{ + u8 i; + u8 sp_cnt = 0; + int err = 0; + struct xsc_sp_get sp_rsp; + struct xsc_weight_get weight_rsp; + + memset(&sp_rsp, 0, sizeof(struct xsc_sp_get)); + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_GET_SP, NULL, &sp_rsp); + if (err) + return err; + /*SP enable,bandwidth is 0*/ + for (i = 0; i <= sp_rsp.max_prio; i++) { + if (sp_rsp.sp[i]) { + sp_cnt++; + bandwidth[i] = 0; + } + } + + xsc_core_dbg(xdev, "sp_cnt = %d, max_prio = %d\n", sp_cnt, sp_rsp.max_prio); + + memset(&weight_rsp, 0, sizeof(struct xsc_weight_get)); + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_GET_WEIGHT, NULL, &weight_rsp); + if (err) + return err; + + xsc_core_dbg(xdev, "weight_rsp.max_prio = %d\n", weight_rsp.max_prio); + for (i = 0; i <= weight_rsp.max_prio; i++) + xsc_core_dbg(xdev, "i = %d, weight = %d\n", i, weight_rsp.weight[i]); + + xsc_wrr_to_wfq_adpat(xdev, &weight_rsp, bandwidth); + + return 0; +} + +static int xsc_query_port_pfc(struct xsc_core_device *xdev, u8 *pfc_bitmap) +{ + int err = 0; + struct xsc_pfc_get rsp; + + memset(&rsp, 0, sizeof(struct xsc_pfc_get)); + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_GET_PFC, NULL, &rsp); + if (err) + return err; + + xsc_pfc_array2bitmap(pfc_bitmap, rsp.pfc_on); + + return 0; +} + +static int xsc_query_port_stats(struct xsc_core_device *xdev, struct ieee_pfc *pfc) +{ + u8 i; + int err = 0; + struct xsc_pfc_prio_stats_mbox_in req; + struct xsc_pfc_prio_stats_mbox_out rsp; + + memset(&req, 0, sizeof(struct xsc_pfc_prio_stats_mbox_in)); + memset(&rsp, 0, sizeof(struct xsc_pfc_prio_stats_mbox_out)); + + req.pport = xdev->mac_port; + req.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_QUERY_PFC_PRIO_STATS); + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_QUERY_PFC_PRIO_STATS, &req, &rsp); + if (err == 0 && rsp.hdr.status == 0) { + for (i = 0; i <= xsc_max_tc(xdev); i++) { + pfc->requests[i] = rsp.prio_stats[i].tx_pause; + pfc->indications[i] = rsp.prio_stats[i].rx_pause; + } + } + + return 0; +} + +static int xsc_query_port_pfc_stats(struct xsc_core_device *xdev, struct ieee_pfc *pfc) +{ + xsc_query_port_stats(xdev, pfc); + + xsc_query_port_pfc(xdev, &pfc->pfc_en); + + return 0; +} + +static int xsc_set_port_pfc(struct xsc_core_device *xdev, u8 pfcbitmap) +{ + u8 i; + u8 pfc_en[IEEE_8021QAZ_MAX_TCS] = {0}; + struct xsc_pfc_set req; + struct xsc_pfc_set rsp; + + xsc_pfc_bitmap2array(pfcbitmap, pfc_en); + + memset(&req, 0, sizeof(struct xsc_pfc_set)); + for (i = 0; i <= xsc_max_tc(xdev); i++) { + req.pfc_on = pfc_en[i]; + req.priority = i; + xsc_core_dbg(xdev, "%s: prio %d, pfc %d\n", __func__, i, req.pfc_on); + xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_SET_PFC, &req, &rsp); + } + return 0; +} + +static int xsc_cmd_set_dscp2prio(struct xsc_core_device *xdev, u8 dscp, u8 prio) +{ + int err = 0; + struct xsc_dscp_pmt_set req; + + memset(&req, 0, sizeof(struct xsc_dscp_pmt_set)); + req.dscp = dscp; + req.priority = prio; + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_SET_DSCP_PMT, &req, NULL); + if (err) + return err; + + xsc_core_dbg(xdev, "%s: dscp %d mapping to prio %d\n", __func__, dscp, prio); + + return 0; +} + +static int xsc_cmd_set_trust_state(struct xsc_core_device *xdev, u8 trust_state) +{ + int err = 0; + struct xsc_trust_mode_set req; + + memset(&req, 0, sizeof(struct xsc_trust_mode_set)); + + /*set trust state,0,DSCP mdoe; 1,PCP mode*/ + if (trust_state == XSC_QPTS_TRUST_PCP) + req.is_pcp = 1; + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_SET_TRUST_MODE, &req, NULL); + if (err) + return err; + + return 0; +} + +static int xsc_cmd_get_trust_state(struct xsc_core_device *xdev, u8 *trust_state) +{ + int err; + struct xsc_trust_mode_get rsp; + + memset(&rsp, 0, sizeof(struct xsc_trust_mode_get)); + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_GET_TRUST_MODE, NULL, &rsp); + if (err) + return err; + + if (rsp.is_pcp) + *trust_state = XSC_QPTS_TRUST_PCP; + else + *trust_state = XSC_QPTS_TRUST_DSCP; + + return 0; +} + +static int xsc_dcbnl_ieee_getets(struct net_device *netdev, + struct ieee_ets *ets) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + int err = 0; + int i; + + if (!priv->dcbx.enable || !xdev->caps.ets) + return -EOPNOTSUPP; + + memset(ets, 0, sizeof(*ets)); + ets->willing = 1; + ets->ets_cap = xsc_max_tc(priv->xdev) + 1; + for (i = 0; i < ets->ets_cap; i++) { + /*get prio->tc mapping*/ + xsc_query_port_prio_tc(xdev, i, &ets->prio_tc[i]); + } + + err = xsc_query_port_bw_config(xdev, ets->tc_tx_bw); + if (err) + return err; + + for (i = 0; i < ets->ets_cap; i++) { + if (!ets->tc_tx_bw[i]) + priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_STRICT; + else if (ets->tc_tx_bw[i] < XSC_MAX_BW_ALLOC) + priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS; + + xsc_core_dbg(xdev, "%s: tc%d, bw=%d\n", + __func__, i, ets->tc_tx_bw[i]); + } + + memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa)); + + return err; +} + +static void xsc_build_tc_tx_bw_sch(struct xsc_core_device *xdev, + struct ieee_ets *ets, u8 *tc_tx_bw, + u8 *tc_sp_enable, int max_tc) +{ + u8 i; + u8 ets_cnt = 0; + u8 min_weight = 0xff; + + for (i = 0; i <= max_tc; i++) { + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + tc_tx_bw[i] = 1; + tc_sp_enable[i] = i + 1; + break; + case IEEE_8021QAZ_TSA_ETS: + ets_cnt++; + if (ets->tc_tx_bw[i] <= min_weight) + min_weight = ets->tc_tx_bw[i]; + break; + } + } + xsc_wfq_to_wrr_adpat(xdev, tc_tx_bw, ets->tc_tx_bw, ets_cnt, min_weight); +} + +static int xsc_set_port_tx_bw_sch(struct xsc_core_device *xdev, u8 *tc_sp_enable, u8 *tc_tx_bw) +{ + u8 i; + int err = 0; + struct xsc_sp_set req_sch; + struct xsc_weight_set req_weight; + + memset(&req_sch, 0, sizeof(struct xsc_sp_set)); + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + req_sch.sp[i] = tc_sp_enable[i]; + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_SET_SP, &req_sch, NULL); + if (err) + return err; + + memset(&req_weight, 0, sizeof(struct xsc_weight_set)); + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + req_weight.weight[i] = tc_tx_bw[i]; + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_SET_WEIGHT, &req_weight, NULL); + if (err) + return err; + + return 0; +} + +int xsc_dcbnl_ieee_setets_core(struct xsc_adapter *priv, struct ieee_ets *ets) +{ + struct xsc_core_device *xdev = priv->xdev; + u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS] = {1}; + u8 tc_sp_enable[IEEE_8021QAZ_MAX_TCS]; + int max_tc = xsc_max_tc(xdev); + int err = 0; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + memset(tc_sp_enable, 0, IEEE_8021QAZ_MAX_TCS); + xsc_build_tc_tx_bw_sch(xdev, ets, tc_tx_bw, tc_sp_enable, max_tc); + xsc_set_port_prio_tc(xdev, ets->prio_tc); + + err = xsc_set_port_tx_bw_sch(xdev, tc_sp_enable, tc_tx_bw); + if (err) + return err; + + memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa)); + + return err; +} + +static int xsc_dbcnl_validate_ets(struct net_device *netdev, + struct ieee_ets *ets) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + bool have_ets_tc = false; + int bw_sum = 0; + int i; + + if (!priv->dcbx.enable) + return 0; + + /* Validate Priority */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (ets->prio_tc[i] >= XSC_MAX_PRIORITY) { + netdev_err(netdev, + "Failed to validate ETS: priority value greater than max(%d)\n", + XSC_MAX_PRIORITY); + return -EINVAL; + } + } + + /* Validate Bandwidth Sum */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) { + /* do not allow ets with 0 weight */ + have_ets_tc = true; + if (!ets->tc_tx_bw[i]) + return -EINVAL; + bw_sum += ets->tc_tx_bw[i]; + } + } + + xsc_core_dbg(xdev, "%s bw_sum = %d\n", __func__, bw_sum); + + if (have_ets_tc && bw_sum != 100) { + netdev_err(netdev, "Failed to validate ETS: BW sum is illegal\n"); + return -EINVAL; + } + return 0; +} + +static int xsc_dcbnl_ieee_setets(struct net_device *dev, + struct ieee_ets *ets) +{ + struct xsc_adapter *priv = netdev_priv(dev); + int err; + + if (!priv->dcbx.enable) + return 0; + + if (!priv->xdev->caps.ets) + return -EOPNOTSUPP; + + err = xsc_dbcnl_validate_ets(dev, ets); + if (err) + return err; + + err = xsc_dcbnl_ieee_setets_core(priv, ets); + if (err) + return err; + + return 0; +} + +static int xsc_dcbnl_ieee_getpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct xsc_adapter *priv = netdev_priv(dev); + struct xsc_core_device *xdev = priv->xdev; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + pfc->pfc_cap = xsc_max_tc(xdev) + 1; + pfc->pfc_en = 0; + if (xdev->caps.port_buf) + pfc->delay = priv->dcbx.cable_len; + xsc_query_port_pfc_stats(xdev, pfc); + + xsc_core_dbg(xdev, "%s: pfc_en=0x%x\n", __func__, pfc->pfc_en); + + return 0; +} + +static int xsc_dcbnl_ieee_setpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct xsc_adapter *priv = netdev_priv(dev); + struct xsc_core_device *xdev = priv->xdev; + u8 curr_pfc_en; + int ret = 0; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + /* pfc_en */ + xsc_query_port_pfc(xdev, &curr_pfc_en); + if (pfc->pfc_en != curr_pfc_en) { + ret = xsc_set_port_pfc(xdev, pfc->pfc_en); + if (ret) + return ret; + } + + xsc_core_dbg(xdev, "%s: new_pfc_en=0x%x, cur_pfc_en=0x%x\n", + __func__, pfc->pfc_en, curr_pfc_en); + return ret; +} + +static u8 xsc_dcbnl_getdcbx(struct net_device *dev) +{ + struct xsc_adapter *priv = netdev_priv(dev); + struct xsc_core_device *xdev = priv->xdev; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + xsc_core_dbg(xdev, "%s: dcbx->cap=0x%x\n", __func__, priv->dcbx.cap); + return priv->dcbx.cap; +} + +static u8 xsc_dcbnl_setdcbx(struct net_device *dev, u8 mode) +{ + struct xsc_adapter *priv = netdev_priv(dev); + struct xsc_core_device *xdev = priv->xdev; + struct xsc_dcbx *dcbx = &priv->dcbx; + struct ieee_ets ets = {0}; + struct ieee_pfc pfc = {0}; + struct xsc_lldp_status_mbox_in req; + struct xsc_lldp_status_mbox_out rsp; + int err = 0; + + memset(&req, 0, sizeof(struct xsc_lldp_status_mbox_in)); + memset(&rsp, 0, sizeof(struct xsc_lldp_status_mbox_out)); + + req.sub_type = XSC_OS_HANDLE_LLDP_STATUS; + req.os_handle_lldp = cpu_to_be32(1); + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_SET_LLDP_STATUS, &req, &rsp); + if (err) { + xsc_core_err(xdev, "set LLDP status fail,err %d\n", err); + return err; + } + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + xsc_core_dbg(xdev, "%s: mode=%d, dcbx->cap = %d\n", __func__, mode, dcbx->cap); + + /* no support for LLD_MANAGED modes or CEE+IEEE */ + if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || + ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) || + !(mode & DCB_CAP_DCBX_HOST)) + return -EINVAL; + + if (mode == dcbx->cap) + return 0; + + /* ETS and PFC defaults */ + ets.ets_cap = 8; + pfc.pfc_cap = 8; + + /*mode switch, set base config*/ + if (mode & DCB_CAP_DCBX_VER_IEEE) { + xsc_dcbnl_ieee_setets(dev, &ets); + xsc_dcbnl_ieee_setpfc(dev, &pfc); + } else if (mode & DCB_CAP_DCBX_VER_CEE) { + xsc_dcbnl_setall(dev); + } + + dcbx->cap = mode; + + return 0; +} + +static int xsc_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app) +{ + struct xsc_adapter *priv = netdev_priv(dev); + struct dcb_app temp; + bool is_new; + int err; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + if (!priv->xdev->caps.dscp) + return -EOPNOTSUPP; + + if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP || app->protocol >= XSC_MAX_DSCP) + return -EINVAL; + + /* Save the old entry info */ + temp.selector = IEEE_8021QAZ_APP_SEL_DSCP; + temp.protocol = app->protocol; + temp.priority = priv->dcbx_dp.dscp2prio[app->protocol]; + + /* Check if need to switch to dscp trust state */ + if (!priv->dcbx.dscp_app_cnt) { + err = xsc_set_trust_state(priv, XSC_QPTS_TRUST_DSCP); + if (err) + return err; + } + + /* Skip the fw command if new and old mapping are the same */ + if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol]) { + err = xsc_set_dscp2prio(priv, app->protocol, app->priority); + if (err) + goto fw_err; + } + + /* Delete the old entry if exists */ + is_new = false; + err = dcb_ieee_delapp(dev, &temp); + if (err) + is_new = true; + + /* Add new entry and update counter */ + err = dcb_ieee_setapp(dev, app); + if (err) + return err; + + if (is_new) + priv->dcbx.dscp_app_cnt++; + + return err; + +fw_err: + xsc_set_trust_state(priv, XSC_QPTS_TRUST_PCP); + return err; +} + +static int xsc_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app) +{ + struct xsc_adapter *priv = netdev_priv(dev); + int err; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + if (!priv->xdev->caps.dscp) + return -EOPNOTSUPP; + + if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP || app->protocol >= XSC_MAX_DSCP) + return -EINVAL; + + /* Skip if no dscp app entry */ + if (!priv->dcbx.dscp_app_cnt) + return -ENOENT; + + /* Check if the entry matches fw setting */ + if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol]) + return -ENOENT; + + /* Delete the app entry */ + err = dcb_ieee_delapp(dev, app); + if (err) + return err; + + /* Reset the priority mapping back to zero */ + err = xsc_set_dscp2prio(priv, app->protocol, 0); + if (err) + goto fw_err; + + priv->dcbx.dscp_app_cnt--; + + /* Check if need to switch to pcp trust state */ + if (!priv->dcbx.dscp_app_cnt) + err = xsc_set_trust_state(priv, XSC_QPTS_TRUST_PCP); + + return err; + +fw_err: + xsc_set_trust_state(priv, XSC_QPTS_TRUST_PCP); + return err; +} + +static int xsc_dcbnl_ieee_getmaxrate(struct net_device *netdev, + struct ieee_maxrate *maxrate) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + u64 max_bw_value[IEEE_8021QAZ_MAX_TCS] = {0}; + int i, err; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + memset(maxrate->tc_maxrate, 0, sizeof(maxrate->tc_maxrate)); + + err = xsc_query_port_ets_rate_limit(xdev, max_bw_value); + if (err) + return err; + + for (i = 0; i <= xsc_max_tc(xdev); i++) + maxrate->tc_maxrate[i] = max_bw_value[i] * XSC_RATE_LIMIT_BASE / XSC_1GB; + + return 0; +} + +static int xsc_dcbnl_ieee_setmaxrate(struct net_device *netdev, + struct ieee_maxrate *maxrate) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + u64 max_bw_value[IEEE_8021QAZ_MAX_TCS]; + int i; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + memset(max_bw_value, 0, sizeof(max_bw_value)); + + for (i = 0; i <= xsc_max_tc(xdev); i++) { + if (!maxrate->tc_maxrate[i]) + continue; + max_bw_value[i] = maxrate->tc_maxrate[i] * XSC_1GB / XSC_RATE_LIMIT_BASE; + xsc_core_dbg(xdev, "%s: tc_%d <=> max_bw %llu * 16kbps\n", + __func__, i, max_bw_value[i]); + } + + return xsc_modify_port_ets_rate_limit(xdev, max_bw_value); +} + +static u8 xsc_dcbnl_setall(struct net_device *netdev) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_cee_config *cee_cfg = &priv->dcbx.cee_cfg; + struct xsc_core_device *xdev = priv->xdev; + struct ieee_ets ets; + struct ieee_pfc pfc; + int err = -EOPNOTSUPP; + int i; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + if (!xdev->caps.ets) + goto out; + + memset(&ets, 0, sizeof(ets)); + memset(&pfc, 0, sizeof(pfc)); + + ets.ets_cap = IEEE_8021QAZ_MAX_TCS; + for (i = 0; i < CEE_DCBX_MAX_PGS; i++) { + ets.tc_tx_bw[i] = cee_cfg->pg_bw_pct[i]; + ets.tc_rx_bw[i] = cee_cfg->pg_bw_pct[i]; + ets.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS; + ets.prio_tc[i] = cee_cfg->prio_to_pg_map[i]; + } + + err = xsc_dbcnl_validate_ets(netdev, &ets); + if (err) + goto out; + + err = xsc_dcbnl_ieee_setets_core(priv, &ets); + if (err) { + netdev_err(netdev, + "%s, Failed to set ETS: %d\n", __func__, err); + goto out; + } + + /* Set PFC */ + pfc.pfc_cap = xsc_max_tc(xdev) + 1; + if (!cee_cfg->pfc_enable) + pfc.pfc_en = 0; + else + for (i = 0; i < CEE_DCBX_MAX_PRIO; i++) + pfc.pfc_en |= cee_cfg->pfc_setting[i] << i; + + err = xsc_dcbnl_ieee_setpfc(netdev, &pfc); + if (err) { + netdev_err(netdev, + "%s, Failed to set PFC: %d\n", __func__, err); + goto out; + } +out: + return err ? XSC_DCB_NO_CHG : XSC_DCB_CHG_RESET; +} + +static u8 xsc_dcbnl_getstate(struct net_device *netdev) +{ + return XSC_CEE_STATE_UP; +} + +static void xsc_dcbnl_getpermhwaddr(struct net_device *netdev, + u8 *perm_addr) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + + if (!priv->dcbx.enable || !perm_addr) + return; + + memset(perm_addr, 0xff, MAX_ADDR_LEN); + xsc_query_nic_vport_mac_address(priv->xdev, 0, perm_addr); +} + +static void xsc_dcbnl_setpgtccfgtx(struct net_device *netdev, + int priority, u8 prio_type, + u8 pgid, u8 bw_pct, u8 up_map) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + struct xsc_cee_config *cee_cfg = &priv->dcbx.cee_cfg; + + if (!priv->dcbx.enable) + return; + + xsc_core_dbg(xdev, "%s: prio=%d, type=%d, pgid=%d, bw_pct=%d, up_map=%d\n", + __func__, priority, prio_type, pgid, + bw_pct, up_map); + + if (priority >= CEE_DCBX_MAX_PRIO) { + netdev_err(netdev, + "%s, priority is out of range\n", __func__); + return; + } + + if (pgid >= CEE_DCBX_MAX_PGS) { + netdev_err(netdev, + "%s, priority group is out of range\n", __func__); + return; + } + + cee_cfg->prio_to_pg_map[priority] = pgid; +} + +static void xsc_dcbnl_setpgtccfgrx(struct net_device *netdev, + int priority, u8 prio_type, + u8 pgid, u8 bw_pct, u8 up_map) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + + if (!priv->dcbx.enable) + return; + + xsc_core_dbg(xdev, "Nothing to be done pgtccfg rx, not support\n"); +} + +static void xsc_dcbnl_setpgbwgcfgtx(struct net_device *netdev, + int pgid, u8 bw_pct) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + struct xsc_cee_config *cee_cfg = &priv->dcbx.cee_cfg; + + if (!priv->dcbx.enable) + return; + + xsc_core_dbg(xdev, "%s: pgid=%d, bw_pct=%d\n", + __func__, pgid, bw_pct); + if (pgid >= CEE_DCBX_MAX_PGS) { + netdev_err(netdev, + "%s, priority group is out of range\n", __func__); + return; + } + + cee_cfg->pg_bw_pct[pgid] = bw_pct; +} + +static void xsc_dcbnl_setpgbwgcfgrx(struct net_device *netdev, + int pgid, u8 bw_pct) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + + if (!priv->dcbx.enable) + return; + + xsc_core_dbg(xdev, "Nothing to be done pgbwgcfg rx, not support\n"); +} + +static void xsc_dcbnl_getpgtccfgtx(struct net_device *netdev, + int priority, u8 *prio_type, + u8 *pgid, u8 *bw_pct, u8 *up_map) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + + if (!priv->dcbx.enable) + return; + + if (!xdev->caps.ets) { + netdev_err(netdev, "%s, ets is not supported\n", __func__); + return; + } + + if (priority >= CEE_DCBX_MAX_PRIO) { + netdev_err(netdev, + "%s, priority is out of range\n", __func__); + return; + } + xsc_query_port_prio_tc(xdev, priority, pgid); + + *up_map = *pgid; + *prio_type = 0; + *bw_pct = 100; + + xsc_core_dbg(xdev, "%s: prio=%d, pgid=%d, bw_pct=%d\n", + __func__, priority, *pgid, *bw_pct); +} + +static void xsc_dcbnl_getpgtccfgrx(struct net_device *netdev, int prio, + u8 *prio_type, u8 *pgid, u8 *bw_pct, + u8 *up_map) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + + if (!priv->dcbx.enable) + return; + + xsc_core_dbg(xdev, "pgtccfgrx Nothing to get; No RX support\n"); + + *prio_type = *pgid = *bw_pct = *up_map = 0; +} + +static void xsc_dcbnl_getpgbwgcfgtx(struct net_device *netdev, + int pgid, u8 *bw_pct) +{ + struct ieee_ets ets; + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + + if (!priv->dcbx.enable) + return; + + if (pgid >= CEE_DCBX_MAX_PGS) { + netdev_err(netdev, + "%s, priority group is out of range\n", __func__); + return; + } + + xsc_dcbnl_ieee_getets(netdev, &ets); + *bw_pct = ets.tc_tx_bw[pgid]; + xsc_core_dbg(xdev, "%s: pgid=%d, bw_pct=%d\n", + __func__, pgid, *bw_pct); +} + +static void xsc_dcbnl_setpfccfg(struct net_device *netdev, + int priority, u8 setting) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + struct xsc_cee_config *cee_cfg = &priv->dcbx.cee_cfg; + + if (!priv->dcbx.enable) + return; + + xsc_core_dbg(xdev, "%s: prio=%d, setting=%d\n", + __func__, priority, setting); + if (priority >= CEE_DCBX_MAX_PRIO) { + netdev_err(netdev, + "%s, priority is out of range\n", __func__); + return; + } + + if (setting > 1) + return; + + cee_cfg->pfc_setting[priority] = setting; +} + +static void xsc_dcbnl_getpgbwgcfgrx(struct net_device *netdev, + int pgid, u8 *bw_pct) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + + if (!priv->dcbx.enable) + return; + + xsc_core_dbg(xdev, "bwgcfgrx Nothing to get; No RX support\n"); + + *bw_pct = 0; +} + +static int xsc_dcbnl_get_priority_pfc(struct net_device *netdev, + int priority, u8 *setting) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + struct ieee_pfc pfc; + int err; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + err = xsc_dcbnl_ieee_getpfc(netdev, &pfc); + + if (err) + *setting = 0; + else + *setting = (pfc.pfc_en >> priority) & 0x01; + + xsc_core_dbg(xdev, "%s: prio=%d, setting=%d\n", + __func__, priority, *setting); + return err; +} + +static void xsc_dcbnl_getpfccfg(struct net_device *netdev, + int priority, u8 *setting) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + + if (!priv->dcbx.enable) + return; + + if (priority >= CEE_DCBX_MAX_PRIO) { + netdev_err(netdev, + "%s, priority is out of range\n", __func__); + return; + } + + if (!setting) + return; + + xsc_dcbnl_get_priority_pfc(netdev, priority, setting); +} + +static u8 xsc_dcbnl_getcap(struct net_device *netdev, + int capid, u8 *cap) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + u8 rval = 0; + + if (!priv->dcbx.enable) + return rval; + + switch (capid) { + case DCB_CAP_ATTR_PG: + *cap = true; + break; + case DCB_CAP_ATTR_PFC: + *cap = true; + break; + case DCB_CAP_ATTR_UP2TC: + *cap = false; + break; + case DCB_CAP_ATTR_PG_TCS: + *cap = 1 << xsc_max_tc(xdev); + break; + case DCB_CAP_ATTR_PFC_TCS: + *cap = 1 << xsc_max_tc(xdev); + break; + case DCB_CAP_ATTR_GSP: + *cap = false; + break; + case DCB_CAP_ATTR_BCN: + *cap = false; + break; + case DCB_CAP_ATTR_DCBX: + *cap = priv->dcbx.cap | + DCB_CAP_DCBX_VER_CEE | + DCB_CAP_DCBX_VER_IEEE; + break; + default: + *cap = 0; + rval = 1; + break; + } + + xsc_core_dbg(xdev, "%s: capid=%d, cap=%d, ret=%d\n", + __func__, capid, *cap, rval); + return rval; +} + +static int xsc_dcbnl_getnumtcs(struct net_device *netdev, + int tcs_id, u8 *num) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + + if (!priv->dcbx.enable) + return -EOPNOTSUPP; + + switch (tcs_id) { + case DCB_NUMTCS_ATTR_PG: + case DCB_NUMTCS_ATTR_PFC: + *num = xsc_max_tc(xdev) + 1; + break; + default: + return -EINVAL; + } + + xsc_core_dbg(xdev, "%s: tcs_id=%d, tc_num=%d\n", + __func__, tcs_id, *num); + return 0; +} + +static u8 xsc_dcbnl_getpfcstate(struct net_device *netdev) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct ieee_pfc pfc; + + if (!priv->dcbx.enable) + return XSC_CEE_STATE_DOWN; + + if (xsc_dcbnl_ieee_getpfc(netdev, &pfc)) + return XSC_CEE_STATE_DOWN; + + return pfc.pfc_en ? XSC_CEE_STATE_UP : XSC_CEE_STATE_DOWN; +} + +static void xsc_dcbnl_setpfcstate(struct net_device *netdev, u8 state) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_cee_config *cee_cfg = &priv->dcbx.cee_cfg; + + if (!priv->dcbx.enable) + return; + + if (state != XSC_CEE_STATE_UP && state != XSC_CEE_STATE_DOWN) + return; + + cee_cfg->pfc_enable = state; +} + +const struct dcbnl_rtnl_ops xsc_dcbnl_ops = { + .ieee_getets = xsc_dcbnl_ieee_getets, + .ieee_setets = xsc_dcbnl_ieee_setets, + .ieee_getmaxrate = xsc_dcbnl_ieee_getmaxrate, + .ieee_setmaxrate = xsc_dcbnl_ieee_setmaxrate, + .ieee_getpfc = xsc_dcbnl_ieee_getpfc, + .ieee_setpfc = xsc_dcbnl_ieee_setpfc, + .ieee_setapp = xsc_dcbnl_ieee_setapp, + .ieee_delapp = xsc_dcbnl_ieee_delapp, + .getdcbx = xsc_dcbnl_getdcbx, + .setdcbx = xsc_dcbnl_setdcbx, + + /* CEE interfaces */ + .setall = xsc_dcbnl_setall, + .getstate = xsc_dcbnl_getstate, + .getpermhwaddr = xsc_dcbnl_getpermhwaddr, + + .setpgtccfgtx = xsc_dcbnl_setpgtccfgtx, + .setpgtccfgrx = xsc_dcbnl_setpgtccfgrx, + .setpgbwgcfgtx = xsc_dcbnl_setpgbwgcfgtx, + .setpgbwgcfgrx = xsc_dcbnl_setpgbwgcfgrx, + + .getpgtccfgtx = xsc_dcbnl_getpgtccfgtx, + .getpgtccfgrx = xsc_dcbnl_getpgtccfgrx, + .getpgbwgcfgtx = xsc_dcbnl_getpgbwgcfgtx, + .getpgbwgcfgtx = xsc_dcbnl_getpgbwgcfgrx, + + .setpfccfg = xsc_dcbnl_setpfccfg, + .getpfccfg = xsc_dcbnl_getpfccfg, + .getcap = xsc_dcbnl_getcap, + .getnumtcs = xsc_dcbnl_getnumtcs, + .getpfcstate = xsc_dcbnl_getpfcstate, + .setpfcstate = xsc_dcbnl_setpfcstate, +}; + +static void xsc_dcbnl_query_dcbx_mode(struct xsc_core_device *xdev, + enum xsc_dcbx_oper_mode *mode) +{ + int err = 0; + struct xsc_lldp_status_mbox_in req; + struct xsc_lldp_status_mbox_out rsp; + + *mode = XSC_DCBX_PARAM_VER_OPER_HOST; + + memset(&req, 0, sizeof(struct xsc_lldp_status_mbox_in)); + memset(&rsp, 0, sizeof(struct xsc_lldp_status_mbox_out)); + + req.sub_type = XSC_OS_HANDLE_LLDP_STATUS; + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_GET_LLDP_STATUS, &req, &rsp); + if (err) { + xsc_core_err(xdev, "get LLDP status fail,err %d\n", err); + return; + } + + rsp.status.os_handle_lldp = be32_to_cpu(rsp.status.os_handle_lldp); + xsc_core_dbg(xdev, "%s: lldp os handle = %u\n", __func__, rsp.status.os_handle_lldp); + if (rsp.status.os_handle_lldp != XSC_DCBX_PARAM_VER_OPER_HOST) + *mode = XSC_DCBX_PARAM_VER_OPER_AUTO; +} + +static void xsc_ets_init(struct xsc_adapter *priv) +{ + struct ieee_ets ets; + int err; + int i; + + if (!priv->xdev->caps.ets) + return; + memset(&ets, 0, sizeof(ets)); + ets.ets_cap = xsc_max_tc(priv->xdev) + 1; + for (i = 0; i < ets.ets_cap; i++) { + ets.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS; + ets.prio_tc[i] = i; + ets.tc_tx_bw[i] = XSC_WRR_DEFAULT_WEIGHT; + } + + err = xsc_dcbnl_ieee_setets_core(priv, &ets); + if (err) + netdev_err(priv->netdev, + "%s, Failed to init ETS: %d\n", __func__, err); +} + +enum { + INIT, + DELETE, +}; + +static void xsc_dcbnl_dscp_app(struct xsc_adapter *priv, int action) +{ + struct dcb_app temp; + struct xsc_core_device *xdev = priv->xdev; + int i; + + xsc_core_dbg(xdev, "%s: action=%d\n", __func__, action); + if (!priv->xdev->caps.dscp) + return; + + /* No SEL_DSCP entry in non DSCP state */ + if (priv->dcbx_dp.trust_state != XSC_QPTS_TRUST_DSCP) + return; + + temp.selector = IEEE_8021QAZ_APP_SEL_DSCP; + for (i = 0; i < XSC_MAX_DSCP; i++) { + temp.protocol = i; + temp.priority = priv->dcbx_dp.dscp2prio[i]; + if (action == INIT) + dcb_ieee_setapp(priv->netdev, &temp); + else + dcb_ieee_delapp(priv->netdev, &temp); + } + + priv->dcbx.dscp_app_cnt = (action == INIT) ? XSC_MAX_DSCP : 0; +} + +void xsc_dcbnl_init_app(struct xsc_adapter *priv) +{ + xsc_dcbnl_dscp_app(priv, INIT); +} + +void xsc_dcbnl_delete_app(struct xsc_adapter *priv) +{ + xsc_dcbnl_dscp_app(priv, DELETE); +} + +static int xsc_query_trust_state(struct xsc_core_device *xdev, u8 *trust) +{ + int err = 0; + + err = xsc_cmd_get_trust_state(xdev, trust); + if (err) + return err; + + return 0; +} + +static int xsc_set_trust_state(struct xsc_adapter *priv, u8 trust_state) +{ + int err = 0; + + err = xsc_cmd_set_trust_state(priv->xdev, trust_state); + if (err) + return err; + + priv->dcbx_dp.trust_state = trust_state; + + return err; +} + +static int xsc_set_dscp2prio(struct xsc_adapter *priv, u8 dscp, u8 prio) +{ + int err = 0; + struct xsc_core_device *xdev = priv->xdev; + + xsc_core_dbg(xdev, "%s: dscp=%d, prio=%d\n", + __func__, dscp, prio); + + err = xsc_cmd_set_dscp2prio(priv->xdev, dscp, prio); + if (err) + return err; + + priv->dcbx_dp.dscp2prio[dscp] = prio; + return err; +} + +static int xsc_query_dscp2prio(struct xsc_core_device *xdev, u8 *dscp2prio) +{ + int err = 0; + struct xsc_dscp_pmt_get rsp; + + memset(&rsp, 0, sizeof(rsp)); + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_GET_DSCP_PMT, NULL, &rsp); + if (err) + return err; + + memcpy(dscp2prio, rsp.prio_map, sizeof(u8) * XSC_MAX_DSCP); + + return 0; +} + +static int xsc_trust_initialize(struct xsc_adapter *priv) +{ + struct xsc_core_device *xdev = priv->xdev; + int err; + + priv->dcbx_dp.trust_state = XSC_QPTS_TRUST_PCP; + + if (!xdev->caps.dscp) + return 0; + + err = xsc_query_trust_state(xdev, &priv->dcbx_dp.trust_state); + if (err) + return err; + + err = xsc_query_dscp2prio(xdev, priv->dcbx_dp.dscp2prio); + if (err) + return err; + + return 0; +} + +#define XSC_BUFFER_CELL_SHIFT 7 +static u16 xsc_query_port_buffers_cell_size(struct xsc_adapter *priv) +{ + return (1 << XSC_BUFFER_CELL_SHIFT); +} + +static void xsc_cee_init(struct xsc_adapter *priv) +{ + struct xsc_cee_config *cee_cfg = &priv->dcbx.cee_cfg; + struct xsc_core_device *xdev = priv->xdev; + int i, max_tc; + u8 pfc_bitmap; + + memset(cee_cfg, 0, sizeof(*cee_cfg)); + + cee_cfg->pfc_enable = 1; + + xsc_query_port_pfc(xdev, &pfc_bitmap); + + xsc_pfc_bitmap2array(pfc_bitmap, cee_cfg->pfc_setting); + + max_tc = xsc_max_tc(priv->xdev) + 1; + for (i = 0; i < max_tc; i++) + cee_cfg->prio_to_pg_map[i] = i % max_tc; +} + +static u8 xsc_dcbnl_get_dcbx_status(struct xsc_core_device *xdev) +{ + u8 enable = 0; + int err; + struct xsc_lldp_status_mbox_in req; + struct xsc_lldp_status_mbox_out rsp; + + memset(&req, 0, sizeof(struct xsc_hwc_mbox_in)); + memset(&rsp, 0, sizeof(struct xsc_hwc_mbox_out)); + + req.sub_type = XSC_DCBX_STATUS; + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_GET_LLDP_STATUS, &req, &rsp); + if (err) + return 0; + + enable = (u8)be32_to_cpu(rsp.status.dcbx_status); + + return enable; +} + +void xsc_dcbnl_initialize(struct xsc_adapter *priv) +{ + struct xsc_dcbx *dcbx = &priv->dcbx; + struct xsc_core_device *xdev = priv->xdev; + + xsc_trust_initialize(priv); + + if (!priv->xdev->caps.qos) + return; + + if (priv->xdev->caps.dcbx) + xsc_dcbnl_query_dcbx_mode(xdev, &dcbx->mode); + + priv->dcbx.enable = xsc_dcbnl_get_dcbx_status(xdev); + + if (priv->dcbx.enable) { + priv->dcbx.cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_VER_IEEE; + + if (priv->dcbx.mode == XSC_DCBX_PARAM_VER_OPER_HOST) + priv->dcbx.cap = priv->dcbx.cap | DCB_CAP_DCBX_HOST; + + priv->dcbx.port_buff_cell_sz = xsc_query_port_buffers_cell_size(priv); + priv->dcbx.manual_buffer = 0; + priv->dcbx.cable_len = XSC_DEFAULT_CABLE_LEN; + + xsc_cee_init(priv); + xsc_ets_init(priv); + } +} +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth.h new file mode 100644 index 000000000000..be7e6d89c9f6 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth.h @@ -0,0 +1,230 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_ETH_H +#define XSC_ETH_H + +#include "common/qp.h" +#include "xsc_eth_common.h" +#include "xsc_eth_stats.h" +#include "common/version.h" +#include +#include "common/xsc_fs.h" + +#define XSC_INVALID_LKEY 0x100 + +#define XSCALE_ETH_PHYPORT_DOWN 0 +#define XSCALE_ETH_PHYPORT_UP 1 +#ifdef CONFIG_DCB +#define CONFIG_XSC_CORE_EN_DCB 1 +#endif +#define XSC_PAGE_CACHE 1 + +#define XSCALE_DRIVER_NAME "xsc_eth" +#define XSCALE_RET_SUCCESS 0 +#define XSCALE_RET_ERROR 1 + +enum { + XSCALE_ETH_DRIVER_INIT, + XSCALE_ETH_DRIVER_OK, + XSCALE_ETH_DRIVER_CLOSE, + XSCALE_ETH_DRIVER_DETACH, +}; + +#define XSCALE_ETH_QP_NUM_MAX 1 +#define XSCALE_RX_THREAD_MAX 128 + +enum { + XSC_BW_NO_LIMIT = 0, + XSC_100_MBPS_UNIT = 3, + XSC_GBPS_UNIT = 4, +}; + +struct xsc_cee_config { + /* bw pct for priority group */ + u8 pg_bw_pct[CEE_DCBX_MAX_PGS]; + u8 prio_to_pg_map[CEE_DCBX_MAX_PRIO]; + u8 pfc_setting[CEE_DCBX_MAX_PRIO]; + u8 pfc_enable; +}; + +enum { + XSC_DCB_CHG_RESET, + XSC_DCB_NO_CHG, + XSC_DCB_CHG_NO_RESET, +}; + +enum xsc_qpts_trust_state { + XSC_QPTS_TRUST_PCP = 1, + XSC_QPTS_TRUST_DSCP = 2, +}; + +enum xsc_dcbx_oper_mode { + XSC_DCBX_PARAM_VER_OPER_HOST = 0x0, + XSC_DCBX_PARAM_VER_OPER_AUTO = 0x3, +}; + +enum { + XSC_PORT_BUFFER_CABLE_LEN = BIT(0), + XSC_PORT_BUFFER_PFC = BIT(1), + XSC_PORT_BUFFER_PRIO2BUFFER = BIT(2), + XSC_PORT_BUFFER_SIZE = BIT(3), +}; + +struct xsc_dcbx { + u8 enable; + enum xsc_dcbx_oper_mode mode; + struct xsc_cee_config cee_cfg; /* pending configuration */ + u8 dscp_app_cnt; + + /* The only setting that cannot be read from FW */ + u8 tc_tsa[IEEE_8021QAZ_MAX_TCS]; + u8 cap; + + /* Buffer configuration */ + u8 manual_buffer; + u32 cable_len; + u32 xoff; + u16 port_buff_cell_sz; +}; + +struct xsc_bufferx_reg { + u8 lossy; + u8 epsb; + u32 size; + u32 xoff; + u32 xon; +}; + +struct xsc_port_buffer { + u32 port_buffer_size; + u32 spare_buffer_size; + struct xsc_bufferx_reg buffer[XSC_MAX_BUFFER]; +}; + +struct xsc_dcbx_dp { + u8 dscp2prio[XSC_MAX_DSCP]; + u8 trust_state; +}; + +struct xsc_rss_params { + u32 indirection_rqt[XSC_INDIR_RQT_SIZE]; + u32 rx_hash_fields[XSC_NUM_INDIR_TIRS]; + u8 toeplitz_hash_key[52]; + u8 hfunc; + u32 rss_hash_tmpl; +}; + +struct xsc_vlan_params { + DECLARE_BITMAP(active_cvlans, VLAN_N_VID); + DECLARE_BITMAP(active_svlans, VLAN_N_VID); +}; + +struct xsc_adapter { + struct net_device *netdev; + struct pci_dev *pdev; + struct device *dev; + struct xsc_core_device *xdev; + + struct xsc_eth_params nic_param; + struct xsc_rss_params rss_params; + struct xsc_vlan_params vlan_params; + + struct xsc_flow_steering fs; + + struct workqueue_struct *workq; + struct work_struct update_carrier_work; + struct work_struct set_rx_mode_work; + struct work_struct event_work; + + struct xsc_eth_channels channels; + struct xsc_sq **txq2sq; + + u32 status; + spinlock_t lock; /* adapter lock */ + + struct mutex state_lock; /* Protects Interface state */ + struct xsc_stats *stats; + + struct xsc_dcbx dcbx; + struct xsc_dcbx_dp dcbx_dp; + + u32 msglevel; + + struct task_struct *task; + + int channel_tc2realtxq[XSC_ETH_MAX_NUM_CHANNELS][XSC_MAX_NUM_TC]; +}; + +struct xsc_rx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + u32 len; + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + u32 page_offset; +#else + u16 page_offset; +#endif + u16 pagecnt_bias; +}; + +struct xsc_tx_buffer { + struct sk_buff *skb; + unsigned long *h_skb_data; + dma_addr_t dma; + u32 len; + struct page *page; +#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) + u32 page_offset; +#else + u16 page_offset; +#endif + u16 pagecnt_bias; +}; + +struct xsc_tx_wqe { + struct xsc_send_wqe_ctrl_seg ctrl; + struct xsc_wqe_data_seg data[]; +}; + +typedef int (*xsc_eth_fp_preactivate)(struct xsc_adapter *priv); +typedef int (*xsc_eth_fp_postactivate)(struct xsc_adapter *priv); + +int xsc_safe_switch_channels(struct xsc_adapter *adapter, + xsc_eth_fp_preactivate preactivate, + xsc_eth_fp_postactivate postactivate); +int xsc_eth_num_channels_changed(struct xsc_adapter *priv); +int xsc_eth_modify_nic_hca(struct xsc_adapter *adapter, u32 change); +bool xsc_eth_get_link_status(struct xsc_adapter *adapter); +int xsc_eth_get_link_info(struct xsc_adapter *adapter, + struct xsc_event_linkinfo *plinkinfo); +int xsc_eth_set_link_info(struct xsc_adapter *adapter, + struct xsc_event_linkinfo *plinkinfo); + +int xsc_eth_set_led_status(int id, struct xsc_adapter *adapter); + +/* Use this function to get max num channels after netdev was created */ +static inline int xsc_get_netdev_max_channels(struct xsc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + return min_t(unsigned int, netdev->num_rx_queues, + netdev->num_tx_queues); +} + +static inline int xsc_get_netdev_max_tc(struct xsc_adapter *adapter) +{ + return adapter->nic_param.num_tc; +} + +#ifdef CONFIG_XSC_CORE_EN_DCB +extern const struct dcbnl_rtnl_ops xsc_dcbnl_ops; +int xsc_dcbnl_ieee_setets_core(struct xsc_adapter *priv, struct ieee_ets *ets); +void xsc_dcbnl_initialize(struct xsc_adapter *priv); +void xsc_dcbnl_init_app(struct xsc_adapter *priv); +void xsc_dcbnl_delete_app(struct xsc_adapter *priv); +#endif +#endif /* XSC_ETH_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_common.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_common.h new file mode 100644 index 000000000000..49550e1f87d2 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_common.h @@ -0,0 +1,296 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_ETH_COMMON_H +#define XSC_ETH_COMMON_H + +#include "xsc_queue.h" +#include "xsc_eth_compat.h" +#include "common/xsc_pph.h" +#include "common/xsc_hsi.h" + +#define SW_MIN_MTU 64 +#define SW_DEFAULT_MTU 1500 +#define SW_MAX_MTU 9600 + +#define XSC_ETH_HW_MTU_SEND 9800 /*need to obtain from hardware*/ +#define XSC_ETH_HW_MTU_RECV 9800 /*need to obtain from hardware*/ +#define XSC_SW2HW_MTU(mtu) ((mtu) + 14 + 4) +#define XSC_SW2HW_FRAG_SIZE(mtu) ((mtu) + 14 + 8 + 4 + XSC_PPH_HEAD_LEN) +#define XSC_SW2HW_RX_PKT_LEN(mtu) ((mtu) + 14 + 256) + +#define XSC_RX_MAX_HEAD (256) +#define XSC_RX_HEADROOM NET_SKB_PAD + +#define XSC_QPN_SQN_STUB 1025 +#define XSC_QPN_RQN_STUB 1024 + +#define XSC_LOG_INDIR_RQT_SIZE 0x8 + +#define XSC_INDIR_RQT_SIZE BIT(XSC_LOG_INDIR_RQT_SIZE) +#ifdef XSC_RSS_SUPPORT +#define XSC_ETH_MIN_NUM_CHANNELS 2 +#else +#define XSC_ETH_MIN_NUM_CHANNELS 1 +#endif +#define XSC_ETH_MAX_NUM_CHANNELS XSC_INDIR_RQT_SIZE + +#define XSC_TX_NUM_TC 1 +#define XSC_MAX_NUM_TC 8 +#define XSC_ETH_MAX_TC_TOTAL (XSC_ETH_MAX_NUM_CHANNELS * XSC_MAX_NUM_TC) +#define XSC_ETH_MAX_QP_NUM_PER_CH (XSC_MAX_NUM_TC + 1) + +#define XSC_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#define XSC_MIN_SKB_FRAG_SZ (XSC_SKB_FRAG_SZ(XSC_RX_HEADROOM)) +#define XSC_LOG_MAX_RX_WQE_BULK \ + (ilog2(PAGE_SIZE / roundup_pow_of_two(XSC_MIN_SKB_FRAG_SZ))) + +#define XSC_MIN_LOG_RQ_SZ (1 + XSC_LOG_MAX_RX_WQE_BULK) +#define XSC_DEF_LOG_RQ_SZ 0xa +#define XSC_MAX_LOG_RQ_SZ 0xd + +#define XSC_MIN_LOG_SQ_SZ 0x6 +#define XSC_DEF_LOG_SQ_SZ 0xa +#define XSC_MAX_LOG_SQ_SZ 0xd + +#define XSC_SQ_ELE_NUM_DEF BIT(XSC_DEF_LOG_SQ_SZ) +#define XSC_RQ_ELE_NUM_DEF BIT(XSC_DEF_LOG_RQ_SZ) + +#define XSC_LOG_RQCQ_SZ 0xb +#define XSC_LOG_SQCQ_SZ 0xa + +#define XSC_RQCQ_ELE_NUM BIT(XSC_LOG_RQCQ_SZ) +#define XSC_SQCQ_ELE_NUM BIT(XSC_LOG_SQCQ_SZ) +#define XSC_RQ_ELE_NUM XSC_RQ_ELE_NUM_DEF //ds number of a wqebb +#define XSC_SQ_ELE_NUM XSC_SQ_ELE_NUM_DEF //DS number +#define XSC_EQ_ELE_NUM XSC_SQ_ELE_NUM_DEF //number of eq entry??? + +#define XSC_RQCQ_ELE_SZ 32 //size of a rqcq entry +#define XSC_SQCQ_ELE_SZ 32 //size of a sqcq entry +#define XSC_RQ_ELE_SZ XSC_RECV_WQE_BB +#define XSC_SQ_ELE_SZ XSC_SEND_WQE_BB +#define XSC_EQ_ELE_SZ 8 //size of a eq entry + +#define XSC_CQ_POLL_BUDGET 64 +#define XSC_TX_POLL_BUDGET 128 + +#define XSC_NET_DIM_ENABLE_THRESHOLD 16 + +#define XSC_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ +#define XSC_MAX_PRIORITY 8 +#define XSC_MAX_DSCP 64 +#define XSC_MAX_BUFFER 8 +#define XSC_DEFAULT_CABLE_LEN 7 /* 7 meters */ + +enum xsc_port_status { + XSC_PORT_DOWN = 0, + XSC_PORT_UP = 1, +}; + +/*all attributes of queue, MAYBE no use for some special queue*/ + +enum xsc_queue_type { + XSC_QUEUE_TYPE_EQ = 0, + XSC_QUEUE_TYPE_RQCQ, + XSC_QUEUE_TYPE_SQCQ, + XSC_QUEUE_TYPE_RQ, + XSC_QUEUE_TYPE_SQ, + XSC_QUEUE_TYPE_MAX, +}; + +struct xsc_queue_attr { + u8 q_type; + u32 ele_num; + u32 ele_size; + u8 ele_log_size; + u8 q_log_size; +}; + +/*MUST set value before create queue*/ +struct xsc_eth_eq_attr { + struct xsc_queue_attr xsc_eq_attr; +}; + +struct xsc_eth_cq_attr { + struct xsc_queue_attr xsc_cq_attr; +}; + +struct xsc_eth_rq_attr { + struct xsc_queue_attr xsc_rq_attr; +}; + +struct xsc_eth_sq_attr { + struct xsc_queue_attr xsc_sq_attr; +}; + +struct xsc_eth_qp_attr { + struct xsc_queue_attr xsc_qp_attr; +}; + +struct xsc_eth_rx_wqe_cyc { +#ifdef DECLARE_FLEX_ARRAY + DECLARE_FLEX_ARRAY(struct xsc_wqe_data_seg, data); +#else + struct xsc_wqe_data_seg data[0]; +#endif +}; + +struct xsc_eq_param { + struct xsc_queue_attr eq_attr; +}; + +struct xsc_cq_param { + struct xsc_wq_param wq; + struct cq_cmd { + u8 abc[16]; + } cqc; + struct xsc_queue_attr cq_attr; +}; + +struct xsc_rq_param { + struct xsc_wq_param wq; + struct xsc_queue_attr rq_attr; + struct xsc_rq_frags_info frags_info; + +}; + +struct xsc_sq_param { +// struct xsc_rq_cmd_param sqc; + struct xsc_wq_param wq; + struct xsc_queue_attr sq_attr; +}; + +struct xsc_qp_param { +// struct xsc_qp_cmd_param qpc; + struct xsc_queue_attr qp_attr; +}; + +struct xsc_channel_param { + struct xsc_cq_param rqcq_param; + struct xsc_cq_param sqcq_param; + struct xsc_rq_param rq_param; + struct xsc_sq_param sq_param; + struct xsc_qp_param qp_param; +}; + +struct xsc_eth_qp { + u16 rq_num; + u16 sq_num; + struct xsc_rq rq[XSC_MAX_NUM_TC]; /*may be use one only*/ + struct xsc_sq sq[XSC_MAX_NUM_TC]; /*reserved to tc*/ +}; + +enum channel_flags { + XSC_CHANNEL_NAPI_SCHED = 1, +}; + +struct xsc_channel { + /* data path */ + struct xsc_eth_qp qp; + struct napi_struct napi; + u8 num_tc; + int chl_idx; + + /*relationship*/ + struct xsc_adapter *adapter; + struct net_device *netdev; + int cpu; + unsigned long flags; + + /* data path - accessed per napi poll */ + const struct cpumask *aff_mask; + struct irq_desc *irq_desc; + struct xsc_ch_stats *stats; +} ____cacheline_aligned_in_smp; + +enum xsc_eth_priv_flag { + XSC_PFLAG_RX_NO_CSUM_COMPLETE, + XSC_PFLAG_SNIFFER, + XSC_PFLAG_DROPLESS_RQ, + XSC_PFLAG_RX_COPY_BREAK, + XSC_PFLAG_RX_CQE_BASED_MODER, + XSC_PFLAG_TX_CQE_BASED_MODER, + XSC_NUM_PFLAGS, /* Keep last */ +}; + +#define XSC_SET_PFLAG(params, pflag, enable) \ + do { \ + if (enable) \ + (params)->pflags |= BIT(pflag); \ + else \ + (params)->pflags &= ~(BIT(pflag)); \ + } while (0) + +#define XSC_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag)))) + +struct xsc_eth_params { + u16 num_channels; + u16 max_num_ch; + u8 num_tc; + u32 mtu; + u32 hard_mtu; + u32 comp_vectors; + u32 sq_size; + u32 sq_max_size; + u8 rq_wq_type; + u32 rq_size; + u32 rq_max_size; + u32 rq_frags_size; + + u16 num_rl_txqs; + u8 rx_cqe_compress_def; + u8 tunneled_offload_en; + u8 lro_en; + u8 tx_min_inline_mode; + u8 vlan_strip_disable; + u8 scatter_fcs_en; + u8 rx_dim_enabled; + u8 tx_dim_enabled; + u32 rx_dim_usecs_low; + u32 rx_dim_frames_low; + u32 tx_dim_usecs_low; + u32 tx_dim_frames_low; + u32 lro_timeout; + u32 pflags; + + xsc_dim_cq_moder_t rx_cq_moderation; + xsc_dim_cq_moder_t tx_cq_moderation; +}; + +struct xsc_eth_channels { + struct xsc_channel *c; + unsigned int num_chl; + u32 rqn_base; +}; + +struct xsc_eth_redirect_rqt_param { + u8 is_rss; + union { + u32 rqn; /* Direct RQN (Non-RSS) */ + struct { + u8 hfunc; + struct xsc_eth_channels *channels; + } rss; /* RSS data */ + }; +}; + +union xsc_send_doorbell { + struct{ + s32 next_pid : 16; + u32 qp_num : 15; + }; + u32 send_data; +}; + +union xsc_recv_doorbell { + struct{ + s32 next_pid : 13; + u32 qp_num : 15; + }; + u32 recv_data; +}; + +#endif /* XSC_ETH_COMMON_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_compat.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_compat.h new file mode 100644 index 000000000000..5e34982faa46 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_compat.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_ETH_COMPAT_H +#define XSC_ETH_COMPAT_H + +#define xsc_netdev_xmit_more(skb) netdev_xmit_more() + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.c new file mode 100644 index 000000000000..ccf21b8c704b --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.c @@ -0,0 +1,654 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_hsi.h" +#include "common/xsc_port_ctrl.h" +#include "xsc_hw_comm.h" + +#define XSC_ETH_CTRL_NAME "eth_ctrl" + +struct mutex pfc_mutex; /* protect pfc operation */ + +static void encode_watchdog_set(void *data, u32 mac_port) +{ + struct xsc_watchdog_period_set *req = + (struct xsc_watchdog_period_set *)data; + + req->period = __cpu_to_be32(req->period); +} + +static void decode_watchdog_get(void *data) +{ + struct xsc_watchdog_period_get *resp = + (struct xsc_watchdog_period_get *)data; + + resp->period = __be32_to_cpu(resp->period); +} + +static void encode_rlimit_set(void *data, u32 mac_port) +{ + struct xsc_rate_limit_set *req = (struct xsc_rate_limit_set *)data; + + req->rate_cir = __cpu_to_be32(req->rate_cir); + req->limit_id = __cpu_to_be32(req->limit_id); +} + +static void decode_rlimit_get(void *data) +{ + struct xsc_rate_limit_get *resp = (struct xsc_rate_limit_get *)data; + int i; + + for (i = 0; i <= QOS_PRIO_MAX; i++) + resp->rate_cir[i] = __be32_to_cpu(resp->rate_cir[i]); + + resp->max_limit_id = __be32_to_cpu(resp->max_limit_id); +} + +static int xsc_get_port_pfc(struct xsc_core_device *xdev, u8 *pfc, u8 pfc_size) +{ + int err = 0; + struct xsc_pfc_get rsp; + + memset(&rsp, 0, sizeof(struct xsc_pfc_get)); + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_GET_PFC, NULL, &rsp); + if (err) { + xsc_core_err(xdev, "failed to get pfc, err: %d\n", err); + return err; + } + + memcpy(pfc, rsp.pfc_on, pfc_size); + + return 0; +} + +static int xsc_set_port_pfc_drop_th(struct xsc_core_device *xdev, u8 prio, u8 cfg_type) +{ + int err = 0; + struct xsc_pfc_set_drop_th_mbox_in req; + struct xsc_pfc_set_drop_th_mbox_out rsp; + + memset(&req, 0, sizeof(struct xsc_pfc_set_drop_th_mbox_in)); + memset(&rsp, 0, sizeof(struct xsc_pfc_set_drop_th_mbox_out)); + + req.prio = prio; + req.cfg_type = cfg_type; + req.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_IOCTL_SET_PFC_DROP_TH); + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_SET_PFC_DROP_TH, &req, &rsp); + if (err) { + xsc_core_err(xdev, + "failed to set pfc drop th, err: %d, prio: %d, cfg_type: %d\n", + err, prio, cfg_type); + return err; + } + + return 0; +} + +static int xsc_set_drop_th(struct xsc_core_device *xdev, + const struct xsc_pfc_cfg *pfc_cfg, + u8 cfg_type) +{ + int err = 0; + + if (cfg_type == DROP_TH_CLEAR) { + err = xsc_set_port_pfc_drop_th(xdev, pfc_cfg->req_prio, cfg_type); + if (pfc_cfg->pfc_op == PFC_OP_MODIFY) + err |= xsc_set_port_pfc_drop_th(xdev, pfc_cfg->curr_prio, cfg_type); + } else if (cfg_type == DROP_TH_RECOVER) { + if (pfc_cfg->pfc_op == PFC_OP_DISABLE) { + err = xsc_set_port_pfc_drop_th(xdev, + pfc_cfg->req_prio, + DROP_TH_RECOVER_LOSSY); + } else if (pfc_cfg->pfc_op == PFC_OP_ENABLE) { + err = xsc_set_port_pfc_drop_th(xdev, + pfc_cfg->req_prio, + DROP_TH_RECOVER_LOSSLESS); + } else if (pfc_cfg->pfc_op == PFC_OP_MODIFY) { + err = xsc_set_port_pfc_drop_th(xdev, + pfc_cfg->req_prio, + DROP_TH_RECOVER_LOSSLESS); + err |= xsc_set_port_pfc_drop_th(xdev, + pfc_cfg->curr_prio, + DROP_TH_RECOVER_LOSSY); + } + } + + return err; +} + +static int xsc_get_port_pfc_cfg_status(struct xsc_core_device *xdev, u8 prio, int *status) +{ + int err = 0; + struct xsc_pfc_get_cfg_status_mbox_in req; + struct xsc_pfc_get_cfg_status_mbox_out rsp; + + memset(&req, 0, sizeof(struct xsc_pfc_get_cfg_status_mbox_in)); + memset(&rsp, 0, sizeof(struct xsc_pfc_get_cfg_status_mbox_out)); + + req.prio = prio; + req.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_IOCTL_GET_PFC_CFG_STATUS); + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_GET_PFC_CFG_STATUS, &req, &rsp); + if (err) { + xsc_core_err(xdev, "failed to get pfc cfg status, err: %d, prio: %d\n", err, prio); + return err; + } + + *status = rsp.hdr.status; + + return 0; +} + +static int xsc_get_cfg_status(struct xsc_core_device *xdev, + struct xsc_pfc_cfg *pfc_cfg, + int *status) +{ + int err = 0; + + err = xsc_get_port_pfc_cfg_status(xdev, pfc_cfg->req_prio, status); + if (pfc_cfg->pfc_op == PFC_OP_MODIFY) + err |= xsc_get_port_pfc_cfg_status(xdev, pfc_cfg->curr_prio, status); + + return err; +} + +static int xsc_wait_pfc_check_complete(struct xsc_core_device *xdev, + struct xsc_pfc_cfg *pfc_cfg) +{ + int err = 0; + int status = 0; + u32 valid_cnt = 0; + u32 retry_cnt = 0; + + while (retry_cnt < PFC_CFG_CHECK_MAX_RETRY_TIMES) { + err = xsc_get_cfg_status(xdev, pfc_cfg, &status); + + if (err || status) { + valid_cnt = 0; + } else { + valid_cnt++; + if (valid_cnt >= PFC_CFG_CHECK_VALID_CNT) + break; + } + + retry_cnt++; + usleep_range(PFC_CFG_CHECK_SLEEP_TIME_US, + PFC_CFG_CHECK_SLEEP_TIME_US + 1); + } + + if (retry_cnt >= PFC_CFG_CHECK_MAX_RETRY_TIMES) { + xsc_core_err(xdev, "pfc check timeout, req_prio: %d, curr_prio:%d\n", + pfc_cfg->req_prio, pfc_cfg->curr_prio); + err = -EFAULT; + } + + return err | status; +} + +static int xsc_set_port_pfc(struct xsc_core_device *xdev, u8 prio, + u8 pfc_on, u8 pfc_op, u8 *lossless_num) +{ + int err = 0; + struct xsc_pfc_set req; + struct xsc_pfc_set rsp; + + memset(&req, 0, sizeof(struct xsc_pfc_set)); + req.priority = prio; + req.pfc_on = pfc_on; + req.type = pfc_op; + + err = xsc_hw_kernel_call(xdev, XSC_CMD_OP_IOCTL_SET_PFC, &req, &rsp); + if (err) { + xsc_core_err(xdev, "failed to set pfc, err: %d, prio: %d, pfc_on: %d\n", + err, prio, pfc_on); + return err; + } + + *lossless_num = rsp.lossless_num; + + return 0; +} + +static int xsc_set_pfc(struct xsc_core_device *xdev, struct xsc_pfc_cfg *pfc_cfg) +{ + int err = 0; + u8 lossless_num = LOSSLESS_NUM_INVAILD; + + switch (pfc_cfg->pfc_op) { + case PFC_OP_DISABLE: + err = xsc_set_port_pfc(xdev, pfc_cfg->req_prio, NIF_PFC_EN_OFF, + pfc_cfg->pfc_op, &lossless_num); + break; + case PFC_OP_ENABLE: + err = xsc_set_port_pfc(xdev, pfc_cfg->req_prio, NIF_PFC_EN_ON, + pfc_cfg->pfc_op, &lossless_num); + break; + case PFC_OP_MODIFY: + err = xsc_set_port_pfc(xdev, pfc_cfg->curr_prio, NIF_PFC_EN_OFF, + pfc_cfg->pfc_op, &lossless_num); + err |= xsc_set_port_pfc(xdev, pfc_cfg->req_prio, NIF_PFC_EN_ON, + pfc_cfg->pfc_op, &lossless_num); + break; + default: + xsc_core_err(xdev, "unsupported pfc operation: %d\n", pfc_cfg->pfc_op); + err = -EINVAL; + } + + pfc_cfg->lossless_num = lossless_num; + return err; +} + +static int handle_pfc_cfg(struct xsc_core_device *xdev, + struct xsc_qos_mbox_in *in, int in_size, + struct xsc_qos_mbox_out *out, int out_size) +{ + const struct xsc_pfc_set *req = (struct xsc_pfc_set *)in->data; + struct xsc_pfc_set *rsp = (struct xsc_pfc_set *)out->data; + struct xsc_pfc_cfg pfc_cfg; + u8 curr_pfc[PFC_PRIO_MAX + 1] = {0}; + int idx; + int err = 0; + bool invalid_op = false; + + if (!mutex_trylock(&pfc_mutex)) { + xsc_core_err(xdev, "pfc is configuring by other user\n"); + return -EBUSY; + } + + memcpy(rsp, req, sizeof(struct xsc_pfc_set)); + memset(&pfc_cfg, 0, sizeof(struct xsc_pfc_cfg)); + + if (req->priority < 0 || req->priority > PFC_PRIO_MAX) { + xsc_core_err(xdev, "invalid req priority: %d\n", req->priority); + err = -EINVAL; + goto err_process; + } + + pfc_cfg.req_prio = req->priority; + pfc_cfg.req_pfc_en = req->pfc_on; + pfc_cfg.curr_pfc_en = 0; + pfc_cfg.pfc_op = PFC_OP_TYPE_MAX; + pfc_cfg.lossless_num = LOSSLESS_NUM_INVAILD; + + err = xsc_get_port_pfc(xdev, curr_pfc, sizeof(curr_pfc)); + if (err) + goto err_process; + + for (idx = 0; idx < PFC_PRIO_MAX + 1; idx++) { + if (curr_pfc[idx] == NIF_PFC_EN_ON) { + pfc_cfg.curr_prio = idx; + pfc_cfg.curr_pfc_en = 1; + break; + } + } + + if (pfc_cfg.curr_pfc_en && pfc_cfg.req_pfc_en) { + if (pfc_cfg.curr_prio != pfc_cfg.req_prio) + pfc_cfg.pfc_op = PFC_OP_MODIFY; + else + invalid_op = true; + } else if (pfc_cfg.curr_pfc_en && !pfc_cfg.req_pfc_en) { + if (pfc_cfg.curr_prio == pfc_cfg.req_prio) + pfc_cfg.pfc_op = PFC_OP_DISABLE; + else + invalid_op = true; + } else if (!pfc_cfg.curr_pfc_en && pfc_cfg.req_pfc_en) { + pfc_cfg.pfc_op = PFC_OP_ENABLE; + } else { + invalid_op = true; + } + + if (invalid_op) { + xsc_core_err(xdev, "invalid operation, req_pfc_cfg:%d,%d curr_pfc_cfg:%d,%d\n", + pfc_cfg.req_prio, pfc_cfg.req_pfc_en, + pfc_cfg.curr_prio, pfc_cfg.curr_pfc_en); + err = 0; + goto err_process; + } + + xsc_core_dbg(xdev, "req_pfc_cfg:%d, %d curr_pfc_cfg: %d,%d, pfc_op: %d\n", + pfc_cfg.req_prio, pfc_cfg.req_pfc_en, + pfc_cfg.curr_prio, pfc_cfg.curr_pfc_en, pfc_cfg.pfc_op); + + err = xsc_set_drop_th(xdev, &pfc_cfg, DROP_TH_CLEAR); + if (err) + goto err_process; + + err = xsc_wait_pfc_check_complete(xdev, &pfc_cfg); + if (!err) + err = xsc_set_pfc(xdev, &pfc_cfg); + + err |= xsc_set_drop_th(xdev, &pfc_cfg, DROP_TH_RECOVER); + +err_process: + mutex_unlock(&pfc_mutex); + + if (pfc_cfg.pfc_op == PFC_OP_MODIFY) + rsp->src_prio = pfc_cfg.curr_prio; + else + rsp->src_prio = pfc_cfg.req_prio; + + rsp->lossless_num = pfc_cfg.lossless_num; + rsp->type = pfc_cfg.pfc_op; + out->hdr.status = err; + xsc_core_dbg(xdev, "response lossless_num: %d, src_prio: %d, type: %d, hdr status: %d\n", + rsp->lossless_num, rsp->src_prio, rsp->type, out->hdr.status); + return err; +} + +static int _eth_ctrl_ioctl_qos(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, + struct xsc_ioctl_hdr *hdr, + u16 expect_req_size, + u16 expect_resp_size, + void (*encode)(void *, u32), + void (*decode)(void *)) +{ + struct xsc_qos_mbox_in *in; + struct xsc_qos_mbox_out *out; + u16 user_size; + int err; + + user_size = expect_req_size > expect_resp_size ? expect_req_size : expect_resp_size; + if (hdr->attr.length != user_size) + return -EINVAL; + + in = kvzalloc(sizeof(*in) + expect_req_size, GFP_KERNEL); + if (!in) + goto err_in; + out = kvzalloc(sizeof(*out) + expect_resp_size, GFP_KERNEL); + if (!out) + goto err_out; + + err = copy_from_user(&in->data, user_hdr->attr.data, expect_req_size); + if (err) + goto err; + + in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); + in->hdr.ver = cpu_to_be16(hdr->attr.ver); + in->req_prfx.mac_port = xdev->mac_port; + + if (encode) + encode((void *)in->data, xdev->mac_port); + + if (hdr->attr.opcode == XSC_CMD_OP_IOCTL_SET_PFC) + err = handle_pfc_cfg(xdev, in, sizeof(*in) + expect_req_size, out, + sizeof(*out) + expect_resp_size); + else + err = xsc_cmd_exec(xdev, in, sizeof(*in) + expect_req_size, out, + sizeof(*out) + expect_resp_size); + + hdr->attr.error = out->hdr.status; + if (decode) + decode((void *)out->data); + + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + goto err; + if (copy_to_user((void *)user_hdr->attr.data, &out->data, expect_resp_size)) + goto err; + + kvfree(in); + kvfree(out); + return 0; + +err: + kvfree(out); +err_out: + kvfree(in); +err_in: + return -EFAULT; +} + +static int _eth_ctrl_ioctl_hwconfig(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, + struct xsc_ioctl_hdr *hdr, + u16 expect_req_size, + u16 expect_resp_size, + void (*encode)(void *, u32), + void (*decode)(void *)) +{ + struct xsc_hwc_mbox_in *in; + struct xsc_hwc_mbox_out *out; + u16 user_size; + int err; + + user_size = expect_req_size > expect_resp_size ? expect_req_size : expect_resp_size; + if (hdr->attr.length != user_size) + return -EINVAL; + + in = kvzalloc(sizeof(*in) + expect_req_size, GFP_KERNEL); + if (!in) + goto err_in; + out = kvzalloc(sizeof(*out) + expect_resp_size, GFP_KERNEL); + if (!out) + goto err_out; + + err = copy_from_user(&in->data, user_hdr->attr.data, expect_req_size); + if (err) + goto err; + + in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); + in->hdr.ver = cpu_to_be16(hdr->attr.ver); + if (encode) + encode((void *)in->data, xdev->mac_port); + + err = xsc_cmd_exec(xdev, in, sizeof(*in) + expect_req_size, out, + sizeof(*out) + expect_resp_size); + + hdr->attr.error = out->hdr.status; + if (decode) + decode((void *)out->data); + + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + goto err; + if (copy_to_user((void *)user_hdr->attr.data, &out->data, expect_resp_size)) + goto err; + + kvfree(in); + kvfree(out); + return 0; + +err: + kvfree(out); +err_out: + kvfree(in); +err_in: + return -EFAULT; +} + +static long _eth_ctrl_ioctl_cmdq(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr) +{ + struct xsc_ioctl_hdr hdr; + int err; + void *in; + void *out; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) + return -EFAULT; + + /* check valid */ + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) + return -EINVAL; + + /* check ioctl cmd */ + switch (hdr.attr.opcode) { + case XSC_CMD_OP_IOCTL_SET_DSCP_PMT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_dscp_pmt_set), 0, NULL, NULL); + case XSC_CMD_OP_IOCTL_GET_DSCP_PMT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_dscp_pmt_get), NULL, NULL); + case XSC_CMD_OP_IOCTL_SET_TRUST_MODE: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_trust_mode_set), 0, NULL, NULL); + case XSC_CMD_OP_IOCTL_GET_TRUST_MODE: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_trust_mode_get), NULL, NULL); + case XSC_CMD_OP_IOCTL_SET_PCP_PMT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_pcp_pmt_set), 0, NULL, NULL); + case XSC_CMD_OP_IOCTL_GET_PCP_PMT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_pcp_pmt_get), NULL, NULL); + case XSC_CMD_OP_IOCTL_SET_DEFAULT_PRI: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_default_pri_set), 0, NULL, NULL); + case XSC_CMD_OP_IOCTL_GET_DEFAULT_PRI: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_default_pri_get), NULL, NULL); + case XSC_CMD_OP_IOCTL_SET_PFC: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_pfc_set), + sizeof(struct xsc_pfc_set), + NULL, NULL); + case XSC_CMD_OP_IOCTL_GET_PFC: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_pfc_get), NULL, NULL); + case XSC_CMD_OP_IOCTL_SET_RATE_LIMIT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_rate_limit_set), 0, + encode_rlimit_set, NULL); + case XSC_CMD_OP_IOCTL_GET_RATE_LIMIT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, sizeof(struct xsc_rate_limit_get), + sizeof(struct xsc_rate_limit_get), + NULL, decode_rlimit_get); + case XSC_CMD_OP_IOCTL_SET_SP: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_sp_set), 0, NULL, NULL); + case XSC_CMD_OP_IOCTL_GET_SP: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_sp_get), NULL, NULL); + case XSC_CMD_OP_IOCTL_SET_WEIGHT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_weight_set), 0, NULL, NULL); + case XSC_CMD_OP_IOCTL_GET_WEIGHT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_weight_get), NULL, NULL); + case XSC_CMD_OP_IOCTL_DPU_SET_PORT_WEIGHT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_dpu_port_weight_set), 0, NULL, NULL); + case XSC_CMD_OP_IOCTL_DPU_GET_PORT_WEIGHT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_dpu_port_weight_get), NULL, NULL); + case XSC_CMD_OP_IOCTL_DPU_SET_PRIO_WEIGHT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_dpu_prio_weight_set), 0, NULL, NULL); + case XSC_CMD_OP_IOCTL_DPU_GET_PRIO_WEIGHT: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_dpu_prio_weight_get), NULL, NULL); + case XSC_CMD_OP_IOCTL_SET_HWC: + return _eth_ctrl_ioctl_hwconfig(xdev, user_hdr, &hdr, + sizeof(struct hwc_set_t), sizeof(struct hwc_set_t), + NULL, NULL); + case XSC_CMD_OP_IOCTL_GET_HWC: + return _eth_ctrl_ioctl_hwconfig(xdev, user_hdr, &hdr, sizeof(struct hwc_get_t), + sizeof(struct hwc_get_t), + NULL, NULL); + case XSC_CMD_OP_IOCTL_SET_WATCHDOG_EN: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_watchdog_en_set), 0, + NULL, NULL); + case XSC_CMD_OP_IOCTL_GET_WATCHDOG_EN: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_watchdog_en_get), + NULL, NULL); + case XSC_CMD_OP_IOCTL_SET_WATCHDOG_PERIOD: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + sizeof(struct xsc_watchdog_period_set), 0, + encode_watchdog_set, NULL); + case XSC_CMD_OP_IOCTL_GET_WATCHDOG_PERIOD: + return _eth_ctrl_ioctl_qos(xdev, user_hdr, &hdr, + 0, sizeof(struct xsc_watchdog_period_get), + NULL, decode_watchdog_get); + default: + return TRY_NEXT_CB; + } + + in = kvzalloc(hdr.attr.length, GFP_KERNEL); + if (!in) + return -ENOMEM; + out = kvzalloc(hdr.attr.length, GFP_KERNEL); + if (!out) { + kfree(in); + return -ENOMEM; + } + + err = copy_from_user(in, user_hdr->attr.data, hdr.attr.length); + if (err) { + err = -EFAULT; + goto err_exit; + } + + xsc_cmd_exec(xdev, in, hdr.attr.length, out, hdr.attr.length); + + if (copy_to_user((void *)user_hdr, &hdr, sizeof(hdr))) + err = -EFAULT; + if (copy_to_user((void *)user_hdr->attr.data, out, hdr.attr.length)) + err = -EFAULT; +err_exit: + kfree(in); + kfree(out); + return err; +} + +static int _eth_ctrl_reg_cb(struct xsc_bdf_file *file, unsigned int cmd, + struct xsc_ioctl_hdr __user *user_hdr, void *data) +{ + struct xsc_core_device *xdev = file->xdev; + int err; + + switch (cmd) { + case XSC_IOCTL_CMDQ: + err = _eth_ctrl_ioctl_cmdq(xdev, user_hdr); + break; + default: + err = TRY_NEXT_CB; + break; + } + + return err; +} + +static void _eth_ctrl_reg_fini(void) +{ + xsc_port_ctrl_cb_dereg(XSC_ETH_CTRL_NAME); +} + +static int _eth_ctrl_reg_init(void) +{ + int ret; + + ret = xsc_port_ctrl_cb_reg(XSC_ETH_CTRL_NAME, _eth_ctrl_reg_cb, NULL); + if (ret != 0) + pr_err("failed to register port control node for %s\n", XSC_ETH_CTRL_NAME); + + return ret; +} + +static void _pfc_global_res_init(void) +{ + mutex_init(&pfc_mutex); +} + +void xsc_eth_ctrl_fini(void) +{ + _eth_ctrl_reg_fini(); +} + +int xsc_eth_ctrl_init(void) +{ + _pfc_global_res_init(); + return _eth_ctrl_reg_init(); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.h new file mode 100644 index 000000000000..d7e93f0afc41 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ctrl.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_ETH_CTRL_H +#define XSC_ETH_CTRL_H + +void xsc_eth_ctrl_fini(void); +int xsc_eth_ctrl_init(void); + +#endif /* XSC_RXTX_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_debug.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_debug.h new file mode 100644 index 000000000000..61850c2ea9de --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_debug.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_ETH_DEBUG_H +#define XSC_ETH_DEBUG_H + +#include "common/xsc_core.h" +#include +#include "xsc_eth.h" + +static bool debug; +#define FUN_LINE_FMT "%s %d " + +#define ETH_DEBUG_LOG(fmt, ...) do { } while (0) + +#define XSC_MSG_LEVEL (NETIF_MSG_LINK) // | NETIF_MSG_HW) + +#define xsc_eth_dbg(mlevel, priv, format, ...) \ +do { \ + if (NETIF_MSG_##mlevel & (priv)->msglevel) \ + netdev_warn(priv->netdev, format, \ + ##__VA_ARGS__); \ +} while (0) + +#define WQE_CSEG_DUMP(seg_name, seg) \ + do { \ + ETH_DEBUG_LOG("dump %s:\n", seg_name); \ + ETH_DEBUG_LOG("cseg->has_pph: %d\n", (seg)->has_pph); \ + ETH_DEBUG_LOG("cseg->so_type: %d\n", (seg)->so_type); \ + ETH_DEBUG_LOG("cseg->so_hdr_len: %d\n", (seg)->so_hdr_len); \ + ETH_DEBUG_LOG("cseg->so_data_size: %d\n", (seg)->so_data_size); \ + ETH_DEBUG_LOG("cseg->msg_opcode: %d\n", (seg)->msg_opcode); \ + ETH_DEBUG_LOG("cseg->wqe_id: %d\n", (seg)->wqe_id); \ + ETH_DEBUG_LOG("cseg->ds_data_num: %d\n", (seg)->ds_data_num); \ + ETH_DEBUG_LOG("cseg->msg_len: %d\n", (seg)->msg_len); \ + } while (0) + +#define WQE_DSEG_DUMP(seg_name, seg) \ + do { \ + ETH_DEBUG_LOG("dump %s:\n", seg_name); \ + ETH_DEBUG_LOG("dseg->va: %#llx\n", (seg)->va); \ + ETH_DEBUG_LOG("dseg->in_line: %d\n", (seg)->in_line); \ + ETH_DEBUG_LOG("dseg->mkey: %d\n", (seg)->mkey); \ + ETH_DEBUG_LOG("dseg->seg_len: %d\n", (seg)->seg_len); \ + } while (0) + +static inline void skbdata_debug_dump(struct sk_buff *skb, u16 headlen, int direct) +{ + if (!debug) + return; + + netdev_info(skb->dev, "pkt[%s]: skb_len=%d, head_len=%d\n", + (direct ? "tx" : "rx"), skb->len, headlen); + + if (skb) { + char *buf = skb->data; + int i, j; + int pos; + + for (i = 0; i < headlen; i++) { + if (i % 16 == 0) + pr_info("%#4.4x ", i); + pr_info("%2.2x ", ((unsigned char *)buf)[i]); + } + + pr_info("\n"); + + pos = headlen; + for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; + int fsz = skb_frag_size(frag); + + buf = (char *)(page_address(frag->bv_page) + frag->bv_offset); + for (i = 0; i < fsz; i++) { + if (i % 16 == 0) + pr_info("%#4.4x ", i); + pr_info("%2.2x ", ((unsigned char *)buf)[i]); + } + + pos += frag->bv_len; + } + pr_info("\n"); + } +} + +#define ETH_SQ_STATE(sq) \ + do { \ + if (test_bit(__QUEUE_STATE_STACK_XOFF, &(sq)->txq->state)) \ + ETH_DEBUG_LOG("sq is __QUEUE_STATE_STACK_XOFF\n"); \ + else if (test_bit(__QUEUE_STATE_DRV_XOFF, &(sq)->txq->state)) \ + ETH_DEBUG_LOG("sq is __QUEUE_STATE_DRV_XOFF\n"); \ + else \ + ETH_DEBUG_LOG("sq is %ld\n", (sq)->txq->state); \ + } while (0) + +static inline void xsc_pkt_pph_dump(char *data, int len) +{ + int i; + + if (!debug) + return; + + for (i = 0; i < len; i++) { + if (i % 16 == 0) + pr_info("%#4.4x ", i); + pr_info("%2.2x ", ((unsigned char *)data)[i]); + } +} + +#endif /* XSC_ETH_DEBUG_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_dim.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_dim.c new file mode 100644 index 000000000000..3a29fa03e92a --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_dim.c @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "xsc_eth_dim.h" +#include "xsc_queue.h" +#include "xsc_eth_stats.h" + +xsc_dim_cq_moder_t xsc_get_def_tx_moderation(u8 cq_period_mode) +{ + xsc_dim_cq_moder_t moder; + + moder.cq_period_mode = cq_period_mode; + moder.pkts = XSC_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; + moder.usec = XSC_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; + if (cq_period_mode == XSC_CQ_PERIOD_MODE_START_FROM_CQE) + moder.usec = XSC_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE; + + return moder; +} + +xsc_dim_cq_moder_t xsc_get_def_rx_moderation(u8 cq_period_mode) +{ + xsc_dim_cq_moder_t moder; + + moder.cq_period_mode = cq_period_mode; + moder.pkts = XSC_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; + moder.usec = XSC_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; + + return moder; +} + +void xsc_set_tx_cq_mode_params(struct xsc_eth_params *params, u8 cq_period_mode) +{ + if (params->tx_dim_enabled) + params->tx_cq_moderation = net_dim_get_tx_moderation(cq_period_mode, + XSC_DEF_TX_DIM_PROFILE_IDX); + else + params->tx_cq_moderation = xsc_get_def_tx_moderation(cq_period_mode); + + XSC_SET_PFLAG(params, XSC_PFLAG_TX_CQE_BASED_MODER, + params->tx_cq_moderation.cq_period_mode == + XSC_CQ_PERIOD_MODE_START_FROM_CQE); +} + +void xsc_set_rx_cq_mode_params(struct xsc_eth_params *params, u8 cq_period_mode) +{ + if (params->rx_dim_enabled) { + params->rx_cq_moderation = net_dim_get_rx_moderation(cq_period_mode, + XSC_DEF_RX_DIM_PROFILE_IDX); + if (cq_period_mode == XSC_CQ_PERIOD_MODE_START_FROM_EQE) + params->rx_cq_moderation.usec = + XSC_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_EQE; + } else { + params->rx_cq_moderation = xsc_get_def_rx_moderation(cq_period_mode); + } + + params->rx_dim_usecs_low = XSC_PARAMS_RX_DIM_USECS_LOW; + params->rx_dim_frames_low = XSC_PARAMS_RX_DIM_FRAMES_LOW; + + XSC_SET_PFLAG(params, XSC_PFLAG_RX_CQE_BASED_MODER, + params->rx_cq_moderation.cq_period_mode == + XSC_CQ_PERIOD_MODE_START_FROM_CQE); +} + +void xsc_handle_tx_dim(struct xsc_sq *sq) +{ + xsc_dim_sample_t *sample = &sq->dim_obj.sample; + + if (unlikely(!test_bit(XSC_ETH_SQ_STATE_AM, &sq->state))) + return; + + dim_update_sample(sq->cq.event_ctr, sample->pkt_ctr, sample->byte_ctr, sample); + net_dim(&sq->dim_obj.dim, *sample); +} + +void xsc_handle_rx_dim(struct xsc_rq *rq) +{ + xsc_dim_sample_t *sample = &rq->dim_obj.sample; + + if (unlikely(!test_bit(XSC_ETH_RQ_STATE_AM, &rq->state))) + return; + + dim_update_sample(rq->cq.event_ctr, sample->pkt_ctr, sample->byte_ctr, sample); + net_dim(&rq->dim_obj.dim, *sample); +} + +static void xsc_complete_dim_work(xsc_dim_t *dim, xsc_dim_cq_moder_t moder, + struct xsc_core_device *dev, struct xsc_core_cq *xcq) +{ + xcq->dim_us = moder.usec; + xcq->dim_pkts = moder.pkts; + dim->state = XSC_DIM_START_MEASURE; +} + +void xsc_rx_dim_work(struct work_struct *work) +{ + xsc_dim_t *dim = container_of(work, xsc_dim_t, work); + struct xsc_dim *dim_obj = container_of(dim, struct xsc_dim, dim); + struct xsc_rq *rq = container_of(dim_obj, struct xsc_rq, dim_obj); + xsc_dim_cq_moder_t cur_moder = + net_dim_get_rx_moderation(dim->mode, dim->profile_ix); + + xsc_complete_dim_work(dim, cur_moder, rq->cq.xdev, &rq->cq.xcq); + rq->stats->dim_pkts = cur_moder.pkts; +} + +void xsc_tx_dim_work(struct work_struct *work) +{ + xsc_dim_t *dim = container_of(work, xsc_dim_t, work); + struct xsc_dim *dim_obj = container_of(dim, struct xsc_dim, dim); + struct xsc_sq *sq = container_of(dim_obj, struct xsc_sq, dim_obj); + xsc_dim_cq_moder_t cur_moder = + net_dim_get_tx_moderation(dim->mode, dim->profile_ix); + + xsc_complete_dim_work(dim, cur_moder, sq->cq.xdev, &sq->cq.xcq); + sq->stats->dim_pkts = cur_moder.pkts; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_dim.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_dim.h new file mode 100644 index 000000000000..1e3515db5eef --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_dim.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_ETH_DIM_H +#define XSC_ETH_DIM_H + +#include "xsc_eth_common.h" + +#define XSC_DEF_RX_DIM_PROFILE_IDX 4 +#define XSC_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x1 +#define XSC_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x40 +#define XSC_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x2 +#define XSC_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_EQE 0x40 + +#define XSC_PARAMS_RX_DIM_USECS_LOW 8 +#define XSC_PARAMS_RX_DIM_FRAMES_LOW 2 + +#define XSC_DEF_TX_DIM_PROFILE_IDX 4 +#define XSC_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x1 +#define XSC_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE 0x2 +#define XSC_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x80 +#define XSC_MAX_COAL_TIME 512 +#define XSC_MAX_COAL_FRAMES 1024 + +#define XSC_DIM_START_MEASURE DIM_START_MEASURE + +enum { + XSC_CQ_PERIOD_MODE_START_FROM_EQE = DIM_CQ_PERIOD_MODE_START_FROM_EQE, + XSC_CQ_PERIOD_MODE_START_FROM_CQE = DIM_CQ_PERIOD_MODE_START_FROM_CQE, + XSC_CQ_PERIOD_NUM_MODES +}; + +xsc_dim_cq_moder_t xsc_get_def_tx_moderation(u8 cq_period_mode); +xsc_dim_cq_moder_t xsc_get_def_rx_moderation(u8 cq_period_mode); +u8 xsc_to_net_dim_cq_period_mode(u8 cq_period_mode); +void xsc_set_tx_cq_mode_params(struct xsc_eth_params *params, u8 cq_period_mode); +void xsc_set_rx_cq_mode_params(struct xsc_eth_params *params, u8 cq_period_mode); + +void xsc_tx_dim_work(struct work_struct *work); +void xsc_rx_dim_work(struct work_struct *work); + +void xsc_handle_tx_dim(struct xsc_sq *sq); +void xsc_handle_rx_dim(struct xsc_rq *rq); + +#endif /* XSC_ETH_DIM_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.c new file mode 100644 index 000000000000..01c055372003 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.c @@ -0,0 +1,1279 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "xsc_eth_stats.h" +#include "xsc_eth_debug.h" +#include "xsc_eth_ethtool.h" +#include "xsc_eth.h" +#include "common/xsc_cmd.h" +#include "common/xsc_pp.h" +#include "common/port.h" +#include "xsc_eth_dim.h" + +typedef int (*xsc_pflag_handler)(struct net_device *dev, bool enable); + +struct pflag_desc { + char name[ETH_GSTRING_LEN]; + xsc_pflag_handler handler; +}; + +static const char * const fpga_type_name[] = {"S", "L"}; +static const char * const hps_ddr_name[] = {"1", "2", "4", "unknown"}; +static const char * const onchip_ft_name[] = {"N", "O" }; +static const char * const rdma_icrc_name[] = {"N", "C" }; +static const char * const ma_xbar_name[] = {"N", "X" }; +static const char * const anlt_fec_name[] = {"N", "A" }; +static const char * const pp_tbl_dma_name[] = {"N", "D" }; +static const char * const pct_exp_name[] = {"N", "E" }; + +enum { + XSC_ST_LINK_STATE, + XSC_ST_LINK_SPEED, + XSC_ST_HEALTH_INFO, +#ifdef CONFIG_INET + XSC_ST_LOOPBACK, +#endif + XSC_ST_NUM, +}; + +const char xsc_self_tests[XSC_ST_NUM][ETH_GSTRING_LEN] = { + "Link Test", + "Speed Test", + "Health Test", +#ifdef CONFIG_INET + "Loopback Test", +#endif +}; + +static int xsc_test_loopback(struct xsc_adapter *adapter) +{ + if (adapter->status != XSCALE_ETH_DRIVER_OK) { + netdev_err(adapter->netdev, + "\tCan't perform loopback test while device is down\n"); + return -ENODEV; + } + return 0; +} + +static int xsc_test_health_info(struct xsc_adapter *adapter) +{ + struct xsc_core_health *health = &adapter->xdev->priv.health; + + return health->sick ? 1 : 0; +} + +static int xsc_test_link_state(struct xsc_adapter *adapter) +{ + u8 port_state; + + if (!netif_carrier_ok(adapter->netdev)) + return 1; + + port_state = xsc_eth_get_link_status(adapter); + return port_state == 0 ? 1 : 0; +} + +static int xsc_test_link_speed(struct xsc_adapter *adapter) +{ + struct xsc_event_linkinfo linkinfo; + + if (xsc_eth_get_link_info(adapter, &linkinfo)) + return 1; + + return 0; +} + +static int set_pflag_rx_no_csum_complete(struct net_device *dev, + bool enable) +{ + struct xsc_adapter *priv = netdev_priv(dev); + + XSC_SET_PFLAG(&priv->nic_param, XSC_PFLAG_RX_NO_CSUM_COMPLETE, enable); + + return 0; +} + +static int set_pflag_sniffer(struct net_device *dev, bool enable) +{ + struct xsc_adapter *priv = netdev_priv(dev); + + XSC_SET_PFLAG(&priv->nic_param, XSC_PFLAG_SNIFFER, enable); + + return 0; +} + +static int set_pflag_dropless_rq(struct net_device *dev, + bool enable) +{ + struct xsc_adapter *priv = netdev_priv(dev); + + XSC_SET_PFLAG(&priv->nic_param, XSC_PFLAG_DROPLESS_RQ, enable); + + return 0; +} + +static int set_pflag_rx_copy_break(struct net_device *dev, + bool enable) +{ + struct xsc_adapter *priv = netdev_priv(dev); + + XSC_SET_PFLAG(&priv->nic_param, XSC_PFLAG_RX_COPY_BREAK, enable); + + return 0; +} + +static int cqe_mode_to_period_mode(bool val) +{ + return val ? XSC_CQ_PERIOD_MODE_START_FROM_CQE : XSC_CQ_PERIOD_MODE_START_FROM_EQE; +} + +static int set_pflag_cqe_based_moder(struct net_device *dev, bool enable, + bool is_rx_cq) +{ + struct xsc_adapter *priv = netdev_priv(dev); + u8 cq_period_mode, current_cq_period_mode; + struct xsc_eth_params new_params; + int err; + + cq_period_mode = cqe_mode_to_period_mode(enable); + + current_cq_period_mode = is_rx_cq ? + priv->nic_param.rx_cq_moderation.cq_period_mode : + priv->nic_param.tx_cq_moderation.cq_period_mode; + + if (cq_period_mode == current_cq_period_mode) + return 0; + + new_params = priv->nic_param; + if (is_rx_cq) + xsc_set_rx_cq_mode_params(&new_params, cq_period_mode); + else + xsc_set_tx_cq_mode_params(&new_params, cq_period_mode); + + priv->nic_param = new_params; + + err = xsc_safe_switch_channels(priv, NULL, NULL); + return err; +} + +static int set_pflag_rx_cqe_moder(struct net_device *dev, bool enable) +{ + return set_pflag_cqe_based_moder(dev, enable, true); +} + +static int set_pflag_tx_cqe_moder(struct net_device *dev, bool enable) +{ + return set_pflag_cqe_based_moder(dev, enable, false); +} + +static const struct pflag_desc xsc_priv_flags[XSC_NUM_PFLAGS] = { + { "rx_no_csum_complete", set_pflag_rx_no_csum_complete }, + { "sniffer", set_pflag_sniffer }, + { "dropless_rq", set_pflag_dropless_rq}, + { "rx_copy_break", set_pflag_rx_copy_break}, + { "rx_cqe_moder", set_pflag_rx_cqe_moder}, + { "tx_cqe_moder", set_pflag_tx_cqe_moder}, +}; + +int xsc_priv_flags_num(void) +{ + return ARRAY_SIZE(xsc_priv_flags); +} + +const char *xsc_priv_flags_name(int flag) +{ + return xsc_priv_flags[flag].name; +} + +static int xsc_handle_pflag(struct net_device *dev, + u32 wanted_flags, + enum xsc_eth_priv_flag flag) +{ + struct xsc_adapter *priv = netdev_priv(dev); + bool enable = !!(wanted_flags & BIT(flag)); + u32 changes = wanted_flags ^ priv->nic_param.pflags; + int err; + + if (!(changes & BIT(flag))) + return 0; + + err = xsc_priv_flags[flag].handler(dev, enable); + if (err) + netdev_err(dev, "%s private flag '%s' failed err %d\n", + enable ? "Enable" : "Disable", + xsc_priv_flags[flag].name, err); + + return err; +} + +int xsc_set_priv_flags(struct net_device *dev, u32 pflags) +{ + struct xsc_adapter *priv = netdev_priv(dev); + enum xsc_eth_priv_flag pflag; + int err; + + mutex_lock(&priv->state_lock); + + for (pflag = 0; pflag < XSC_NUM_PFLAGS; pflag++) { + err = xsc_handle_pflag(dev, pflags, pflag); + if (err) + break; + } + + mutex_unlock(&priv->state_lock); + + /* Need to fix some features.. */ + netdev_update_features(dev); + + return err; +} + +static int xsc_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + int size_read = 0; + u8 data[4] = {0}; + + size_read = xsc_query_module_eeprom(xdev, 0, 3, data); + if (size_read < 3) + return -EIO; + + /* data[0] = identifier byte */ + switch (data[0]) { + case XSC_MODULE_ID_QSFP: + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; + break; + case XSC_MODULE_ID_QSFP_PLUS: + case XSC_MODULE_ID_QSFP28: + /* data[1] = revision id */ + if (data[0] == XSC_MODULE_ID_QSFP28 || data[1] >= 0x3) { + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; + } + break; + case XSC_MODULE_ID_SFP: + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + break; + case XSC_MODULE_ID_QSFP_DD: + case XSC_MODULE_ID_DSFP: + case XSC_MODULE_ID_QSFP_PLUS_CMIS: + modinfo->type = ETH_MODULE_SFF_8636; + /* Verify if module EEPROM is a flat memory. In case of flat + * memory only page 00h (0-255 bytes) can be read. Otherwise + * upper pages 01h and 02h can also be read. Upper pages 10h + * and 11h are currently not supported by the driver. + */ + if (data[2] & 0x80) + modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + else + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + break; + default: + netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n", + __func__, data[0]); + return -EINVAL; + } + + return 0; +} + +static int xsc_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, + u8 *data) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + int offset = ee->offset; + int size_read; + int i = 0; + + if (!ee->len) + return -EINVAL; + + memset(data, 0, ee->len); + + while (i < ee->len) { + size_read = xsc_query_module_eeprom(xdev, offset, ee->len - i, data + i); + + if (!size_read) + /* Done reading */ + return 0; + + if (size_read < 0) { + netdev_err(priv->netdev, "%s: xsc_query_eeprom failed:0x%x\n", + __func__, size_read); + return 0; + } + + i += size_read; + offset += size_read; + } + + return 0; +} + +static int xsc_get_module_eeprom_by_page(struct net_device *netdev, + const struct ethtool_module_eeprom *page_data, + struct netlink_ext_ack *extack) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_core_device *xdev = priv->xdev; + struct xsc_module_eeprom_query_params query; + u8 *data = page_data->data; + int size_read; + int i = 0; + + if (!page_data->length) + return -EINVAL; + + memset(data, 0, page_data->length); + + query.offset = page_data->offset; + query.i2c_address = page_data->i2c_address; + query.bank = page_data->bank; + query.page = page_data->page; + while (i < page_data->length) { + query.size = page_data->length - i; + size_read = xsc_query_module_eeprom_by_page(xdev, &query, data + i); + + // Done reading, return how many bytes was read + if (!size_read) + return i; + + if (size_read == -EINVAL) + return -EINVAL; + if (size_read < 0) { + netdev_err(priv->netdev, "%s: xsc_query_module_eeprom_by_page failed:0x%x\n", + __func__, size_read); + return i; + } + + i += size_read; + query.offset += size_read; + } + + return i; +} + +u32 xsc_get_priv_flags(struct net_device *dev) +{ + struct xsc_adapter *priv = netdev_priv(dev); + + return priv->nic_param.pflags; +} + +static void xsc_set_drv_fw_version(struct ethtool_drvinfo *info, struct xsc_core_device *xdev) +{ + u8 fw_ver_major = xdev->fw_version_major; + u8 fw_ver_minor = xdev->fw_version_minor; + u16 fw_ver_patch = xdev->fw_version_patch; + u32 fw_ver_tweak = xdev->fw_version_tweak; + u8 fw_ver_extra_flag = xdev->fw_version_extra_flag; + + if (fw_ver_tweak == 0) { + if (fw_ver_extra_flag == 0) { + snprintf(info->fw_version, sizeof(info->fw_version), "v%u.%u.%u", + fw_ver_major, fw_ver_minor, fw_ver_patch); + } else { + snprintf(info->fw_version, sizeof(info->fw_version), "v%u.%u.%u-dirty", + fw_ver_major, fw_ver_minor, fw_ver_patch); + } + } else { + if (fw_ver_extra_flag == 0) { + snprintf(info->fw_version, sizeof(info->fw_version), "v%u.%u.%u+%u", + fw_ver_major, fw_ver_minor, fw_ver_patch, fw_ver_tweak); + } else { + snprintf(info->fw_version, sizeof(info->fw_version), "v%u.%u.%u+%u-dirty", + fw_ver_major, fw_ver_minor, fw_ver_patch, fw_ver_tweak); + } + } +} + +static void xsc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + + snprintf(info->driver, sizeof(info->driver), "%s", XSCALE_DRIVER_NAME); + + if (HOTFIX_NUM == 0) + snprintf(info->version, sizeof(info->version), "%d.%d.%d.%d", + BRANCH_VERSION, MAJOR_VERSION, MINOR_VERSION, BUILD_VERSION); + else + snprintf(info->version, sizeof(info->version), "%d.%d.%d.%d.H%d", + BRANCH_VERSION, MAJOR_VERSION, MINOR_VERSION, BUILD_VERSION, HOTFIX_NUM); + + xsc_set_drv_fw_version(info, adapter->xdev); + strscpy(info->bus_info, pci_name(adapter->pdev), sizeof(info->bus_info)); +} + +static void xsc_fill_stats_strings(struct xsc_adapter *adapter, u8 *data) +{ + int i, idx = 0; + + for (i = 0; i < xsc_num_stats_grps; i++) + idx = xsc_stats_grps[i].fill_strings(adapter, data, idx); +} + +static int xsc_self_test_num(struct xsc_adapter *adapter) +{ + return ARRAY_SIZE(xsc_self_tests); +} + +static void xsc_ethtool_get_strings(struct xsc_adapter *adapter, u32 stringset, u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + xsc_fill_stats_strings(adapter, data); + break; + + case ETH_SS_TEST: + for (i = 0; i < xsc_self_test_num(adapter); i++) + strscpy(data + i * ETH_GSTRING_LEN, + xsc_self_tests[i], + ETH_GSTRING_LEN); + break; + + case ETH_SS_PRIV_FLAGS: + for (i = 0; i < XSC_NUM_PFLAGS; i++) + strscpy(data + i * ETH_GSTRING_LEN, + xsc_priv_flags[i].name, + ETH_GSTRING_LEN); + break; + + default: + ETH_DEBUG_LOG("wrong stringset\n"); + break; + } +} + +static void xsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + + xsc_ethtool_get_strings(adapter, stringset, data); +} + +static int xsc_ethtool_get_sset_count(struct xsc_adapter *adapter, int sset) +{ + int i, num_stats = 0; + + switch (sset) { + case ETH_SS_STATS: + for (i = 0; i < xsc_num_stats_grps; i++) + num_stats += xsc_stats_grps[i].get_num_stats(adapter); + return num_stats; + case ETH_SS_PRIV_FLAGS: + return XSC_NUM_PFLAGS; + case ETH_SS_TEST: + return xsc_self_test_num(adapter); + default: + return -EOPNOTSUPP; + } +} + +static int xsc_get_sset_count(struct net_device *dev, int sset) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + + return xsc_ethtool_get_sset_count(adapter, sset); +} + +static int (*xsc_st_func[XSC_ST_NUM])(struct xsc_adapter *) = { + xsc_test_link_state, + xsc_test_link_speed, + xsc_test_health_info, +#ifdef CONFIG_INET + xsc_test_loopback, +#endif +}; + +static void xsc_self_test(struct net_device *ndev, struct ethtool_test *etest, u64 *buf) +{ + struct xsc_adapter *priv = netdev_priv(ndev); + int i; + + memset(buf, 0, sizeof(u64) * XSC_ST_NUM); + + mutex_lock(&priv->state_lock); + netdev_info(ndev, "Self test begin..\n"); + + for (i = 0; i < XSC_ST_NUM; i++) { + netdev_info(ndev, "\t[%d] %s start..\n", + i, xsc_self_tests[i]); + buf[i] = xsc_st_func[i](priv); + netdev_info(ndev, "\t[%d] %s end: result(%lld)\n", + i, xsc_self_tests[i], buf[i]); + } + + mutex_unlock(&priv->state_lock); + + for (i = 0; i < XSC_ST_NUM; i++) { + if (buf[i]) { + etest->flags |= ETH_TEST_FL_FAILED; + break; + } + } + netdev_info(ndev, "Self test out: status flags(0x%x)\n", + etest->flags); +} + +static void xsc_update_stats(struct xsc_adapter *adapter) +{ + int i; + + for (i = xsc_num_stats_grps - 1; i >= 0; i--) + if (xsc_stats_grps[i].update_stats) + xsc_stats_grps[i].update_stats(adapter); +} + +static void xsc_ethtool_get_ethtool_stats(struct xsc_adapter *adapter, + struct ethtool_stats *stats, u64 *data) +{ + int i, idx = 0; + + mutex_lock(&adapter->state_lock); + xsc_update_stats(adapter); + mutex_unlock(&adapter->state_lock); + + for (i = 0; i < xsc_num_stats_grps; i++) + idx = xsc_stats_grps[i].fill_stats(adapter, data, idx); +} + +static void xsc_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + + xsc_ethtool_get_ethtool_stats(adapter, stats, data); +} + +static u32 xsc_get_msglevel(struct net_device *dev) +{ + return ((struct xsc_adapter *)netdev_priv(dev))->msglevel; +} + +static void xsc_set_msglevel(struct net_device *dev, u32 val) +{ + ((struct xsc_adapter *)netdev_priv(dev))->msglevel = val; +} + +static void xsc_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct netlink_ext_ack *extack) +{ + struct xsc_adapter *priv = netdev_priv(dev); + + param->rx_max_pending = 8192; //hack for H3C + param->rx_pending = priv->nic_param.rq_size; + param->tx_max_pending = 8192; //hack for H3C + param->tx_pending = priv->nic_param.sq_size; +} + +static int xsc_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct netlink_ext_ack *extack) +{ + struct xsc_adapter *priv = netdev_priv(dev); + u32 old_rq_size, old_sq_size; + int err = 0; + + if (param->rx_jumbo_pending) { + netdev_info(priv->netdev, "%s: rx_jumbo_pending not supported\n", + __func__); + return -EINVAL; + } + if (param->rx_mini_pending) { + netdev_info(priv->netdev, "%s: rx_mini_pending not supported\n", + __func__); + return -EINVAL; + } + + if (param->rx_pending < BIT(XSC_MIN_LOG_RQ_SZ)) { + netdev_info(priv->netdev, "%s: rx_pending (%d) < min (%ld)\n", + __func__, param->rx_pending, BIT(XSC_MIN_LOG_RQ_SZ)); + return -EINVAL; + } + if (param->rx_pending > priv->nic_param.rq_max_size) { + netdev_info(priv->netdev, "%s: rx_pending (%d) > max (%d)\n", + __func__, param->rx_pending, priv->nic_param.rq_max_size); + param->rx_pending = priv->nic_param.rq_max_size; + } + + if (param->tx_pending < BIT(XSC_MIN_LOG_SQ_SZ)) { + netdev_info(priv->netdev, "%s: tx_pending (%d) < min (%ld)\n", + __func__, param->tx_pending, BIT(XSC_MIN_LOG_SQ_SZ)); + return -EINVAL; + } + if (param->tx_pending > priv->nic_param.sq_max_size) { + netdev_info(priv->netdev, "%s: tx_pending (%d) > max (%d)\n", + __func__, param->tx_pending, priv->nic_param.sq_max_size); + param->tx_pending = priv->nic_param.sq_max_size; + } + + if (param->rx_pending == priv->nic_param.rq_size && + param->tx_pending == priv->nic_param.sq_size) + return 0; + + mutex_lock(&priv->state_lock); + + if (priv->status != XSCALE_ETH_DRIVER_OK) + goto unlock; + + old_rq_size = priv->nic_param.rq_size; + old_sq_size = priv->nic_param.sq_size; + priv->nic_param.rq_size = param->rx_pending; + priv->nic_param.sq_size = param->tx_pending; + + netdev_info(priv->netdev, "%s: tx_pending(%d->%d), rx_pending(%d->%d)\n", + __func__, old_sq_size, param->tx_pending, + old_rq_size, priv->nic_param.rq_size); + err = xsc_safe_switch_channels(priv, NULL, NULL); + if (err) { + priv->nic_param.rq_size = old_rq_size; + priv->nic_param.sq_size = old_sq_size; + netdev_err(priv->netdev, "%s: set ringparams failed, err=%d\n", + __func__, err); + } + +unlock: + mutex_unlock(&priv->state_lock); + + return err; +} + +static void xsc_get_channels(struct net_device *dev, struct ethtool_channels *ch) +{ + struct xsc_adapter *priv = netdev_priv(dev); + + mutex_lock(&priv->state_lock); + + ch->max_combined = priv->nic_param.max_num_ch; + ch->combined_count = priv->nic_param.num_channels; + + mutex_unlock(&priv->state_lock); +} + +static int xsc_set_channels(struct net_device *dev, struct ethtool_channels *ch) +{ + struct xsc_adapter *priv = netdev_priv(dev); + struct xsc_eth_params *params = &priv->nic_param; + unsigned int ch_max = params->max_num_ch; + unsigned int ch_num_old = params->num_channels; + unsigned int count = ch->combined_count; + int err = 0; + + if (!count) { + netdev_info(priv->netdev, "%s: combined_count=0 not supported\n", __func__); + return -EINVAL; + } + + if (ch->rx_count || ch->tx_count) { + netdev_info(priv->netdev, "%s: separate rx/tx count not supported\n", __func__); + return -EINVAL; + } + + if (count > ch_max) { + netdev_info(priv->netdev, "%s: count (%d) > max (%d)\n", + __func__, count, ch_max); + return -EINVAL; + } + + if (ch_num_old == count) + return 0; + + mutex_lock(&priv->state_lock); + + params->num_channels = count; + + if (priv->status != XSCALE_ETH_DRIVER_OK) { + err = xsc_eth_num_channels_changed(priv); + if (err) + params->num_channels = ch_num_old; + goto out; + } + + /* Switch to new channels, set new parameters and close old ones */ + err = xsc_safe_switch_channels(priv, NULL, xsc_eth_num_channels_changed); + +out: + mutex_unlock(&priv->state_lock); + netdev_info(priv->netdev, "set combined_cnt=%d, err=%d\n", count, err); + + return err; +} + +static int flow_type_to_traffic_type(u32 flow_type) +{ + switch (flow_type) { + case IPV4_FLOW: + return XSC_TT_IPV4; + case TCP_V4_FLOW: + return XSC_TT_IPV4_TCP; + case UDP_V4_FLOW: + return XSC_TT_IPV4_TCP; + case IPV6_FLOW: + return XSC_TT_IPV6; + case TCP_V6_FLOW: + return XSC_TT_IPV6_TCP; + case UDP_V6_FLOW: + return XSC_TT_IPV6_TCP; + case AH_V4_FLOW: + return XSC_TT_IPV4_IPSEC_AH; + case AH_V6_FLOW: + return XSC_TT_IPV6_IPSEC_AH; + case ESP_V4_FLOW: + return XSC_TT_IPV4_IPSEC_ESP; + case ESP_V6_FLOW: + return XSC_TT_IPV6_IPSEC_ESP; + default: + return -EINVAL; + } +} + +static int xsc_get_rss_hash_opt(struct xsc_adapter *priv, + struct ethtool_rxnfc *nfc) +{ + u32 hash_field = 0; + int tt; + + tt = flow_type_to_traffic_type(nfc->flow_type); + if (tt < 0) + return -EINVAL; + + hash_field = priv->rss_params.rx_hash_fields[tt]; + nfc->data = 0; + + if (hash_field & XSC_HASH_FIELD_SEL_PROTO) + nfc->data |= RXH_L3_PROTO; + if (tt == XSC_TT_IPV4_TCP) { + if (hash_field & XSC_HASH_FIELD_SEL_SRC_IP) + nfc->data |= RXH_IP_SRC; + if (hash_field & XSC_HASH_FIELD_SEL_DST_IP) + nfc->data |= RXH_IP_DST; + if (hash_field & XSC_HASH_FIELD_SEL_SPORT) + nfc->data |= RXH_L4_B_0_1; + if (hash_field & XSC_HASH_FIELD_SEL_DPORT) + nfc->data |= RXH_L4_B_2_3; + } else if (tt == XSC_TT_IPV6_TCP) { + if (hash_field & XSC_HASH_FIELD_SEL_SRC_IPV6) + nfc->data |= RXH_IP_SRC; + if (hash_field & XSC_HASH_FIELD_SEL_DST_IPV6) + nfc->data |= RXH_IP_DST; + if (hash_field & XSC_HASH_FIELD_SEL_SPORT_V6) + nfc->data |= RXH_L4_B_0_1; + if (hash_field & XSC_HASH_FIELD_SEL_DPORT_V6) + nfc->data |= RXH_L4_B_2_3; + } + + return 0; +} + +static int xsc_set_rss_hash_opt(struct xsc_adapter *priv, + struct ethtool_rxnfc *nfc) +{ + u32 rx_hash_field = XSC_HASH_FIELD_SEL_PROTO; + u32 change = 0; + int ret = 0; + int tt; + + tt = flow_type_to_traffic_type(nfc->flow_type); + if (tt < 0) + return -EINVAL; + + /* RSS does not support anything other than hashing to queues + * on src IP, dest IP, TCP/UDP src port and TCP/UDP dest + * port. + */ + if (nfc->flow_type != TCP_V4_FLOW && + nfc->flow_type != TCP_V6_FLOW && + nfc->flow_type != UDP_V4_FLOW && + nfc->flow_type != UDP_V6_FLOW) + return -EOPNOTSUPP; + + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EOPNOTSUPP; + + if (nfc->flow_type == TCP_V4_FLOW) { + if (nfc->data & RXH_IP_SRC) + rx_hash_field |= XSC_HASH_FIELD_SEL_SRC_IP; + if (nfc->data & RXH_IP_DST) + rx_hash_field |= XSC_HASH_FIELD_SEL_DST_IP; + if (nfc->data & RXH_L4_B_0_1) + rx_hash_field |= XSC_HASH_FIELD_SEL_SPORT; + if (nfc->data & RXH_L4_B_2_3) + rx_hash_field |= XSC_HASH_FIELD_SEL_DPORT; + } else if (nfc->flow_type == TCP_V6_FLOW) { + if (nfc->data & RXH_IP_SRC) + rx_hash_field |= XSC_HASH_FIELD_SEL_SRC_IPV6; + if (nfc->data & RXH_IP_DST) + rx_hash_field |= XSC_HASH_FIELD_SEL_DST_IPV6; + if (nfc->data & RXH_L4_B_0_1) + rx_hash_field |= XSC_HASH_FIELD_SEL_SPORT_V6; + if (nfc->data & RXH_L4_B_2_3) + rx_hash_field |= XSC_HASH_FIELD_SEL_DPORT_V6; + } else { + return 0; + } + + mutex_lock(&priv->state_lock); + if (rx_hash_field != priv->rss_params.rx_hash_fields[tt]) { + change |= BIT(XSC_RSS_HASH_TEMP_UPDATE); + priv->rss_params.rx_hash_fields[tt] = rx_hash_field; + } + + xsc_core_info(priv->xdev, "flow_type=%d, change=0x%x, hash_tmpl=0x%x\n", + nfc->flow_type, change, rx_hash_field); + if (change) + ret = xsc_eth_modify_nic_hca(priv, change); + + mutex_unlock(&priv->state_lock); + return ret; +} + +int xsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs) +{ + struct xsc_adapter *priv = netdev_priv(dev); + struct xsc_eth_params *params = &priv->nic_param; + int err = 0; + + if (info->cmd == ETHTOOL_GRXRINGS) { + info->data = params->num_channels; + return 0; + } + + switch (info->cmd) { + case ETHTOOL_GRXFH: + err = xsc_get_rss_hash_opt(priv, info); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +int xsc_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct xsc_adapter *priv = netdev_priv(dev); + int err = 0; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + err = xsc_set_rss_hash_opt(priv, cmd); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static u32 xsc_get_rxfh_key_size(struct net_device *dev) +{ + struct xsc_adapter *priv = netdev_priv(dev); + + return sizeof(priv->rss_params.toeplitz_hash_key); +} + +static u32 xsc_get_rxfh_indir_size(struct net_device *netdev) +{ + return XSC_INDIR_RQT_SIZE; +} + +int xsc_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + struct xsc_rss_params *rss = &priv->rss_params; + + if (indir) + memcpy(indir, rss->indirection_rqt, + sizeof(rss->indirection_rqt)); + + if (key) + memcpy(key, rss->toeplitz_hash_key, + sizeof(rss->toeplitz_hash_key)); + + if (hfunc) + *hfunc = rss->hfunc; + + return 0; +} + +int xsc_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc) +{ + struct xsc_adapter *priv = netdev_priv(dev); + struct xsc_rss_params *rss = &priv->rss_params; + u32 refresh = 0; + int err = 0; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && + hfunc != ETH_RSS_HASH_XOR && + hfunc != ETH_RSS_HASH_TOP) + return -EINVAL; + + mutex_lock(&priv->state_lock); + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != rss->hfunc) { + rss->hfunc = hfunc; + refresh |= BIT(XSC_RSS_HASH_FUNC_UPDATE); + } + + if (key) { + memcpy(rss->toeplitz_hash_key, key, sizeof(rss->toeplitz_hash_key)); + if (rss->hfunc == ETH_RSS_HASH_TOP) + refresh |= BIT(XSC_RSS_HASH_KEY_UPDATE); + } + + if (refresh > 0 && priv->status == XSCALE_ETH_DRIVER_OK) + err = xsc_eth_modify_nic_hca(priv, refresh); + + mutex_unlock(&priv->state_lock); + + return err; +} + +static int xsc_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + struct xsc_event_linkinfo linkinfo; + + if (xsc_eth_get_link_info(adapter, &linkinfo)) + return -EINVAL; + + cmd->base.port = linkinfo.port; + cmd->base.duplex = linkinfo.duplex; + cmd->base.autoneg = linkinfo.autoneg; + switch (linkinfo.linkspeed) { + case MODULE_SPEED_UNKNOWN: + cmd->base.speed = LINKSPEED_MODE_UNKNOWN; + break; + case MODULE_SPEED_10G: + cmd->base.speed = LINKSPEED_MODE_10G; + break; + case MODULE_SPEED_25G: + cmd->base.speed = LINKSPEED_MODE_25G; + break; + case MODULE_SPEED_40G_R4: + cmd->base.speed = LINKSPEED_MODE_40G; + break; + case MODULE_SPEED_50G_R: + case MODULE_SPEED_50G_R2: + cmd->base.speed = LINKSPEED_MODE_50G; + break; + case MODULE_SPEED_100G_R2: + case MODULE_SPEED_100G_R4: + cmd->base.speed = LINKSPEED_MODE_100G; + break; + case MODULE_SPEED_200G_R4: + case MODULE_SPEED_200G_R8: + cmd->base.speed = LINKSPEED_MODE_200G; + break; + case MODULE_SPEED_400G_R8: + cmd->base.speed = LINKSPEED_MODE_400G; + break; + default: + cmd->base.speed = LINKSPEED_MODE_25G; + break; + } + + //when link down, show speed && duplex as unknown + if (!linkinfo.linkstatus) { + cmd->base.duplex = DUPLEX_UNKNOWN; + cmd->base.speed = LINKSPEED_MODE_UNKNOWN; + } + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + + bitmap_copy(cmd->link_modes.supported, (unsigned long *)linkinfo.supported_speed, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_copy(cmd->link_modes.advertising, (unsigned long *)linkinfo.advertising_speed, + __ETHTOOL_LINK_MODE_MASK_NBITS); + + bitmap_or(cmd->link_modes.supported, cmd->link_modes.supported, + (unsigned long *)&linkinfo.supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_or(cmd->link_modes.advertising, cmd->link_modes.advertising, + (unsigned long *)&linkinfo.advertising, __ETHTOOL_LINK_MODE_MASK_NBITS); + + return 0; +} + +static int xsc_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + struct xsc_event_linkinfo linkinfo; + int err = 0, i; + + if (!adapter) { + pr_err("%s fail to find adapter\n", __func__); + return -EINVAL; + } + + memset(&linkinfo, 0, sizeof(struct xsc_event_linkinfo)); + + linkinfo.port = cmd->base.port; + linkinfo.duplex = cmd->base.duplex; + linkinfo.autoneg = cmd->base.autoneg; + linkinfo.linkspeed = cpu_to_be32(cmd->base.speed); + + bitmap_copy((unsigned long *)linkinfo.supported_speed, + cmd->link_modes.supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_copy((unsigned long *)linkinfo.advertising_speed, + cmd->link_modes.advertising, __ETHTOOL_LINK_MODE_MASK_NBITS); + + for (i = 0; i < ARRAY_SIZE(linkinfo.supported_speed); i++) { + linkinfo.supported_speed[i] = be64_to_cpu(linkinfo.supported_speed[i]); + linkinfo.advertising_speed[i] = be64_to_cpu(linkinfo.advertising_speed[i]); + } + + err = xsc_eth_set_link_info(adapter, &linkinfo); + if (err) + xsc_core_err(adapter->xdev, "fail to set link info err %d\n", err); + + return err; +} + +static int xsc_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + struct xsc_core_device *xdev = adapter->xdev; + int ret = 0; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + xsc_eth_set_led_status(xdev->pf_id, adapter); + break; + case ETHTOOL_ID_INACTIVE: + xsc_eth_set_led_status(LED_ACT_ON_HW, adapter); + break; + default: + return -EOPNOTSUPP; + } + + return ret; +} + +static int xsc_set_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fec) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + struct xsc_event_modify_fecparam_mbox_in in; + struct xsc_event_modify_fecparam_mbox_out out; + u32 new_fec = fec->fec; + int err = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_FEC_PARAM); + in.fec = cpu_to_be32(new_fec); + + err = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "failed to set fec param, err=%d, status=%d\n", + err, out.hdr.status); + return -ENOEXEC; + } + + return err; +} + +static int xsc_get_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fec) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + struct xsc_event_query_fecparam_mbox_in in; + struct xsc_event_query_fecparam_mbox_out out; + int err = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_FEC_PARAM); + + err = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "failed to get fec param, err=%d, status=%d\n", + err, out.hdr.status); + return -ENOEXEC; + } + + fec->active_fec = be32_to_cpu(out.active_fec); + fec->fec = be32_to_cpu(out.fec_cfg); + + return err; +} + +static int xsc_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + xsc_dim_cq_moder_t *rx_moder, *tx_moder; + + rx_moder = &priv->nic_param.rx_cq_moderation; + coal->rx_coalesce_usecs = rx_moder->usec; + coal->rx_max_coalesced_frames = rx_moder->pkts; + coal->use_adaptive_rx_coalesce = priv->nic_param.rx_dim_enabled; + + tx_moder = &priv->nic_param.tx_cq_moderation; + coal->tx_coalesce_usecs = tx_moder->usec; + coal->tx_max_coalesced_frames = tx_moder->pkts; + coal->use_adaptive_tx_coalesce = priv->nic_param.tx_dim_enabled; + coal->rx_coalesce_usecs_low = priv->nic_param.rx_dim_usecs_low; + coal->rx_max_coalesced_frames_low = priv->nic_param.rx_dim_frames_low; + + kernel_coal->use_cqe_mode_rx = + XSC_GET_PFLAG(&priv->nic_param, XSC_PFLAG_RX_CQE_BASED_MODER); + kernel_coal->use_cqe_mode_tx = + XSC_GET_PFLAG(&priv->nic_param, XSC_PFLAG_TX_CQE_BASED_MODER); + + return 0; +} + +static int xsc_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct xsc_adapter *priv = netdev_priv(netdev); + xsc_dim_cq_moder_t *rx_moder, *tx_moder; + struct xsc_eth_params new_params = {}; + int err = 0; + bool reset_rx, reset_tx; + u8 mode; + + if (coal->tx_coalesce_usecs > XSC_MAX_COAL_TIME || + coal->rx_coalesce_usecs > XSC_MAX_COAL_TIME || + coal->rx_coalesce_usecs_low > XSC_MAX_COAL_TIME) { + netdev_info(priv->netdev, "%s: maximum coalesce time supported is %u usecs\n", + __func__, XSC_MAX_COAL_TIME); + return -ERANGE; + } + + if (coal->tx_max_coalesced_frames > XSC_MAX_COAL_FRAMES || + coal->rx_max_coalesced_frames > XSC_MAX_COAL_FRAMES || + coal->rx_max_coalesced_frames_low > XSC_MAX_COAL_FRAMES) { + netdev_info(priv->netdev, "%s: maximum coalesced frames supported is %u\n", + __func__, XSC_MAX_COAL_FRAMES); + return -ERANGE; + } + + mutex_lock(&priv->state_lock); + new_params = priv->nic_param; + + rx_moder = &new_params.rx_cq_moderation; + rx_moder->usec = coal->rx_coalesce_usecs; + rx_moder->pkts = coal->rx_max_coalesced_frames; + new_params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce; + new_params.rx_dim_usecs_low = coal->rx_coalesce_usecs_low; + new_params.rx_dim_frames_low = coal->rx_max_coalesced_frames_low; + + tx_moder = &new_params.tx_cq_moderation; + tx_moder->usec = coal->tx_coalesce_usecs; + tx_moder->pkts = coal->tx_max_coalesced_frames; + new_params.tx_dim_enabled = !!coal->use_adaptive_tx_coalesce; + + if (priv->status != XSCALE_ETH_DRIVER_OK) { + priv->nic_param = new_params; + goto out; + } + + reset_rx = !!coal->use_adaptive_rx_coalesce != priv->nic_param.rx_dim_enabled; + reset_tx = !!coal->use_adaptive_tx_coalesce != priv->nic_param.tx_dim_enabled; + + if (rx_moder->cq_period_mode != kernel_coal->use_cqe_mode_rx) { + rx_moder->cq_period_mode = kernel_coal->use_cqe_mode_rx; + XSC_SET_PFLAG(&new_params, XSC_PFLAG_RX_CQE_BASED_MODER, + rx_moder->cq_period_mode == + XSC_CQ_PERIOD_MODE_START_FROM_CQE); + reset_rx = true; + } + if (tx_moder->cq_period_mode != kernel_coal->use_cqe_mode_tx) { + tx_moder->cq_period_mode = kernel_coal->use_cqe_mode_tx; + XSC_SET_PFLAG(&new_params, XSC_PFLAG_TX_CQE_BASED_MODER, + tx_moder->cq_period_mode == + XSC_CQ_PERIOD_MODE_START_FROM_CQE); + reset_tx = true; + } + + if (reset_rx) { + mode = XSC_GET_PFLAG(&new_params, XSC_PFLAG_RX_CQE_BASED_MODER); + + xsc_set_rx_cq_mode_params(&new_params, mode); + } + if (reset_tx) { + mode = XSC_GET_PFLAG(&new_params, XSC_PFLAG_TX_CQE_BASED_MODER); + + xsc_set_tx_cq_mode_params(&new_params, mode); + } + + priv->nic_param = new_params; + if (!reset_rx && !reset_tx) + goto out; + + err = xsc_safe_switch_channels(priv, NULL, NULL); + +out: + mutex_unlock(&priv->state_lock); + return err; +} + +static const struct ethtool_ops xsc_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USECS_LOW_HIGH | + ETHTOOL_COALESCE_MAX_FRAMES_LOW_HIGH | + ETHTOOL_COALESCE_USE_ADAPTIVE, + .get_drvinfo = xsc_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_strings = xsc_get_strings, + .get_sset_count = xsc_get_sset_count, + .get_ethtool_stats = xsc_get_ethtool_stats, + .get_ringparam = xsc_get_ringparam, + .set_ringparam = xsc_set_ringparam, + .set_channels = xsc_set_channels, + .get_channels = xsc_get_channels, + .get_coalesce = xsc_get_coalesce, + .set_coalesce = xsc_set_coalesce, + .get_ts_info = NULL, + .get_link_ksettings = xsc_get_link_ksettings, + .set_link_ksettings = xsc_set_link_ksettings, + .get_rxfh_key_size = xsc_get_rxfh_key_size, + .get_rxfh_indir_size = xsc_get_rxfh_indir_size, + .get_rxfh = xsc_get_rxfh, + .set_rxfh = xsc_set_rxfh, + .get_rxnfc = xsc_get_rxnfc, + .set_rxnfc = xsc_set_rxnfc, + .get_module_info = xsc_get_module_info, + .get_module_eeprom = xsc_get_module_eeprom, + .get_module_eeprom_by_page = xsc_get_module_eeprom_by_page, + .get_priv_flags = xsc_get_priv_flags, + .set_priv_flags = xsc_set_priv_flags, + .get_msglevel = xsc_get_msglevel, + .set_msglevel = xsc_set_msglevel, + .self_test = xsc_self_test, + .set_phys_id = xsc_set_phys_id, + .get_fecparam = xsc_get_fecparam, + .set_fecparam = xsc_set_fecparam, +}; + +void eth_set_ethtool_ops(struct net_device *dev) +{ + dev->ethtool_ops = &xsc_ethtool_ops; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.h new file mode 100644 index 000000000000..eb2eb3491c14 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_ethtool.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_ETH_ETHTOOL_H +#define XSC_ETH_ETHTOOL_H + +void eth_set_ethtool_ops(struct net_device *dev); + +/* EEPROM Standards for plug in modules */ +#ifndef ETH_MODULE_SFF_8436_MAX_LEN +#define ETH_MODULE_SFF_8636_MAX_LEN 640 +#define ETH_MODULE_SFF_8436_MAX_LEN 640 +#endif + +#define LED_ACT_ON_HW 0xff + +#endif /* XSC_ETH_ETHTOOL_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_rx.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_rx.c new file mode 100644 index 000000000000..547556aa536b --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_rx.c @@ -0,0 +1,804 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include "xsc_eth.h" +#include "xsc_eth_txrx.h" +#include "xsc_eth_common.h" +#include "xsc_eth_stats.h" +#include +#include "common/xsc_pp.h" + +#define PAGE_REF_ELEV (U16_MAX) +/* Upper bound on number of packets that share a single page */ +#define PAGE_REF_THRSD (PAGE_SIZE / 64) + +static inline void xsc_rq_notify_hw(struct xsc_rq *rq) +{ + struct xsc_core_device *xdev = rq->cq.xdev; + struct xsc_wq_cyc *wq = &rq->wqe.wq; + union xsc_recv_doorbell doorbell_value; + u64 rqwqe_id = wq->wqe_ctr << (ilog2(xdev->caps.recv_ds_num)); + + ETH_DEBUG_LOG("rq%d_db_val=0x%x, recv_ds=%d\n", + rq->rqn, doorbell_value.recv_data, + xdev->caps.recv_ds_num); + /*reverse wqe index to ds index*/ + doorbell_value.next_pid = rqwqe_id; + doorbell_value.qp_num = rq->rqn; + + /* Make sure that descriptors are written before + * updating doorbell record and ringing the doorbell + */ + wmb(); + writel(doorbell_value.recv_data, REG_ADDR(xdev, xdev->regs.rx_db)); +} + +static inline void xsc_skb_set_hash(struct xsc_adapter *adapter, + struct xsc_cqe *cqe, + struct sk_buff *skb) +{ + struct xsc_rss_params *rss = &adapter->rss_params; + u32 hash_field; + bool l3_hash = false; + bool l4_hash = false; + int ht = 0; + + if (adapter->netdev->features & NETIF_F_RXHASH) { + if (skb->protocol == htons(ETH_P_IP)) { + hash_field = rss->rx_hash_fields[XSC_TT_IPV4_TCP]; + if (hash_field & XSC_HASH_FIELD_SEL_SRC_IP || + hash_field & XSC_HASH_FIELD_SEL_DST_IP) + l3_hash = true; + + if (hash_field & XSC_HASH_FIELD_SEL_SPORT || + hash_field & XSC_HASH_FIELD_SEL_DPORT) + l4_hash = true; + } else if (skb->protocol == htons(ETH_P_IPV6)) { + hash_field = rss->rx_hash_fields[XSC_TT_IPV6_TCP]; + if (hash_field & XSC_HASH_FIELD_SEL_SRC_IPV6 || + hash_field & XSC_HASH_FIELD_SEL_DST_IPV6) + l3_hash = true; + + if (hash_field & XSC_HASH_FIELD_SEL_SPORT_V6 || + hash_field & XSC_HASH_FIELD_SEL_DPORT_V6) + l4_hash = true; + } + + if (l3_hash && l4_hash) + ht = PKT_HASH_TYPE_L4; + else if (l3_hash) + ht = PKT_HASH_TYPE_L3; + if (ht) + skb_set_hash(skb, be32_to_cpu(cqe->vni), ht); + } +} + +static inline unsigned short from32to16(unsigned int x) +{ + /* add up 16-bit and 16-bit for 16+c bit */ + x = (x & 0xffff) + (x >> 16); + /* add up carry.. */ + x = (x & 0xffff) + (x >> 16); + return x; +} + +static inline bool handle_udp_frag_csum(struct sk_buff *skb, struct epp_pph *pph) +{ +#ifdef XSC_UDP_FRAG_CSUM + char *head = (char *)pph; + struct iphdr *iph; + u8 l3_proto = PPH_OUTER_IP_TYPE(head); + u8 l4_proto = PPH_OUTER_TP_TYPE(head); + u16 csum_off = (u16)PPH_CSUM_OFST(head); + u16 csum_plen = (u16)PPH_CSUM_PLEN(head); + u8 payload_off = PPH_PAYLOAD_OFST(head); + u32 hw_csum = PPH_CSUM_VAL(head); + u16 udp_check = 0; + u16 udp_len = 0; + u32 off = 64; + __wsum csum1, csum2, csum3, csum; + +#ifdef CUM_SKB_DATA + head = (char *)skb->data; + off = 0; +#endif + + if (l4_proto != L4_PROTO_UDP && l4_proto != L4_PROTO_NONE) + return false; + + off += ETH_HLEN; + if (l3_proto == L3_PROTO_IP) { + iph = (struct iphdr *)(head + off); + if (!ip_is_fragment(iph)) + return false; + +#ifdef UDP_CSUM_DEBUG + netdev_dbg("ip_id=%d frag_off=0x%x l4_prt=%d l3_prt=%d iph_off=%d ip_len=%d csum_off=%d pload_off=%d\n", + ntohs(iph->id), ntohs(iph->frag_off), + l4_proto, l3_proto, PPH_OUTER_IP_OFST(head), PPH_OUTER_IP_LEN(pph), + csum_off, payload_off); +#endif + + off += iph->ihl * 4; + if (l4_proto == L4_PROTO_UDP) { + struct udphdr *uh = (struct udphdr *)(head + off); + + udp_check = uh->check; + udp_len = ntohs(uh->len); + } + + if (csum_off == 0) + csum_off = 256; + + netdev_dbg("%s: ip_id=%d frag_off=0x%x skb_len=%d data_len=%d csum_off=%d csum_plen=%d payload_off=%d udp_off=%d udp_len=%d udp_check=0x%x\n", + __func__, ntohs(iph->id), ntohs(iph->frag_off), + skb->len, skb->data_len, + csum_off, csum_plen, payload_off, off, udp_len, udp_check); +#ifdef CUM_RAW_DATA_DUMP + xsc_pkt_pph_dump((char *)head, 272); +#endif + + if (csum_off < off) { + csum1 = csum_partial((char *)(head + csum_off), (off - csum_off), 0); + csum2 = htons(from32to16(hw_csum)); + csum = csum_sub(csum2, csum1); + } else if (csum_off > off) { + csum2 = csum_partial((char *)(head + csum_off), csum_plen, 0); + csum1 = csum_partial((char *)(head + off), (csum_off - off), 0); + csum = htons(from32to16(hw_csum)); + csum = csum_partial((char *)(head + off), (csum_off - off), csum); + csum3 = csum_partial((char *)(head + off), (skb->len - off + 64), 0); + } else { + csum = htons(from32to16(hw_csum)); + } + skb->csum = csum_unfold(from32to16(csum)); + + ETH_DEBUG_LOG("%s: sw_cal_csum[%d:%d]=0x%x -> 0x%x\n", + __func__, off, csum_off, csum1, from32to16(csum1)); + ETH_DEBUG_LOG("%s: sw_cal_hw_csum[%d:%d]=0x%x -> 0x%x, hw_csum=0x%x -> 0x%x\n", + __func__, csum_off, csum_plen, csum2, from32to16(csum2), + hw_csum, from32to16(hw_csum)); + ETH_DEBUG_LOG("%s: sw_cal_tot_csum[%d:%d]=0x%x -> 0x%x, skb_csum=0x%x -> 0x%x\n", + __func__, off, skb->len, csum3, from32to16(csum3), csum, skb->csum); + + skb->ip_summed = CHECKSUM_COMPLETE; + + return true; + } +#endif + + return false; +} + +static inline void xsc_handle_csum(struct xsc_cqe *cqe, struct xsc_rq *rq, + struct sk_buff *skb, struct xsc_wqe_frag_info *wi) +{ + struct xsc_rq_stats *stats = rq->stats; + struct xsc_channel *c = rq->cq.channel; + struct net_device *netdev = c->adapter->netdev; + struct xsc_dma_info *dma_info = wi->di; + int offset_from = wi->offset; + struct epp_pph *hw_pph = page_address(dma_info->page) + offset_from; + + if (unlikely((netdev->features & NETIF_F_RXCSUM) == 0)) + goto csum_none; + + if (unlikely(XSC_GET_EPP2SOC_PPH_ERROR_BITMAP(hw_pph) & PACKET_UNKNOWN)) + goto csum_none; + + if (handle_udp_frag_csum(skb, hw_pph)) { + stats->csum_succ++; + goto out; + } + + if (XSC_GET_EPP2SOC_PPH_EXT_TUNNEL_TYPE(hw_pph) && + (!(cqe->csum_err & OUTER_AND_INNER))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = 1; + skb->encapsulation = 1; + + stats->csum_unnecessary++; + } else if (XSC_GET_EPP2SOC_PPH_EXT_TUNNEL_TYPE(hw_pph) && + (!(cqe->csum_err & OUTER_BIT) && (cqe->csum_err & INNER_BIT))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = 0; + skb->encapsulation = 1; + + stats->csum_unnecessary++; + } else if (!XSC_GET_EPP2SOC_PPH_EXT_TUNNEL_TYPE(hw_pph) && + (!(cqe->csum_err & OUTER_BIT))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + + stats->csum_unnecessary++; + } else { + stats->csum_err++; + } + + goto out; + +csum_none: + skb->csum = 0; + skb->ip_summed = CHECKSUM_NONE; + stats->csum_none++; +out: + return; +} + +static inline void xsc_build_rx_skb(struct xsc_cqe *cqe, + u32 cqe_bcnt, + struct xsc_rq *rq, + struct sk_buff *skb, + struct xsc_wqe_frag_info *wi) +{ + struct xsc_channel *c = rq->cq.channel; + struct net_device *netdev = c->netdev; + struct xsc_adapter *adapter = c->adapter; + + skb->mac_len = ETH_HLEN; + + skb_record_rx_queue(skb, rq->ix); + xsc_handle_csum(cqe, rq, skb, wi); + + skb->protocol = eth_type_trans(skb, netdev); + xsc_skb_set_hash(adapter, cqe, skb); +} + +static inline void xsc_complete_rx_cqe(struct xsc_rq *rq, + struct xsc_cqe *cqe, + u32 cqe_bcnt, + struct sk_buff *skb, + struct xsc_wqe_frag_info *wi) +{ + struct xsc_rq_stats *stats = rq->stats; + + stats->packets++; + stats->bytes += cqe_bcnt; + xsc_build_rx_skb(cqe, cqe_bcnt, rq, skb, wi); + + rq->dim_obj.sample.pkt_ctr = rq->stats->packets; + rq->dim_obj.sample.byte_ctr = rq->stats->bytes; +} + +static inline void xsc_add_skb_frag(struct xsc_rq *rq, + struct sk_buff *skb, + struct xsc_dma_info *di, + u32 frag_offset, u32 len, + unsigned int truesize) +{ + struct xsc_channel *c = rq->cq.channel; + struct device *dev = c->adapter->dev; + + dma_sync_single_for_cpu(dev, di->addr + frag_offset, len, DMA_FROM_DEVICE); + page_ref_inc(di->page); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + di->page, frag_offset, len, truesize); +} + +static inline void xsc_copy_skb_header(struct device *dev, + struct sk_buff *skb, + struct xsc_dma_info *dma_info, + int offset_from, u32 headlen) +{ + void *from = page_address(dma_info->page) + offset_from; + /* Aligning len to sizeof(long) optimizes memcpy performance */ + unsigned int len = ALIGN(headlen, sizeof(long)); + + dma_sync_single_for_cpu(dev, dma_info->addr + offset_from, len, + DMA_FROM_DEVICE); + skb_copy_to_linear_data(skb, from, len); +} + +static inline struct sk_buff *xsc_build_linear_skb(struct xsc_rq *rq, void *va, + u32 frag_size, u16 headroom, + u32 cqe_bcnt) +{ + struct sk_buff *skb = build_skb(va, frag_size); + + if (unlikely(!skb)) { + rq->stats->buff_alloc_err++; + return NULL; + } + + skb_reserve(skb, headroom); + skb_put(skb, cqe_bcnt); + + return skb; +} + +struct sk_buff *xsc_skb_from_cqe_linear(struct xsc_rq *rq, + struct xsc_wqe_frag_info *wi, + u32 cqe_bcnt, u8 has_pph) +{ + struct xsc_dma_info *di = wi->di; + u16 rx_headroom = rq->buff.headroom; + int pph_len = has_pph ? XSC_PPH_HEAD_LEN : 0; + struct sk_buff *skb; + void *va, *data; + u32 frag_size; + + va = page_address(di->page) + wi->offset; + data = va + rx_headroom + pph_len; + frag_size = XSC_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); + + dma_sync_single_range_for_cpu(rq->cq.xdev->device, di->addr, wi->offset, + frag_size, DMA_FROM_DEVICE); + prefetchw(va); /* xdp_frame data area */ + prefetch(data); + + skb = xsc_build_linear_skb(rq, va, frag_size, (rx_headroom + pph_len), + (cqe_bcnt - pph_len)); + if (unlikely(!skb)) + return NULL; + + /* queue up for recycling/reuse */ + page_ref_inc(di->page); + + return skb; +} + +struct sk_buff *xsc_skb_from_cqe_nonlinear(struct xsc_rq *rq, + struct xsc_wqe_frag_info *wi, + u32 cqe_bcnt, u8 has_pph) +{ + struct xsc_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; + struct xsc_wqe_frag_info *head_wi = wi; + struct xsc_wqe_frag_info *rx_wi = wi; + u16 headlen = min_t(u32, XSC_RX_MAX_HEAD, cqe_bcnt); + u16 frag_headlen = headlen; + u16 byte_cnt = cqe_bcnt - headlen; + struct sk_buff *skb; + struct xsc_channel *c = rq->cq.channel; + struct device *dev = c->adapter->dev; + struct net_device *netdev = c->adapter->netdev; + u8 fragcnt = 0; + u16 head_offset = head_wi->offset; + u16 frag_consumed_bytes = 0; + int i = 0; + +#ifndef NEED_CREATE_RX_THREAD + skb = napi_alloc_skb(rq->cq.napi, ALIGN(XSC_RX_MAX_HEAD, sizeof(long))); +#else + skb = netdev_alloc_skb(netdev, ALIGN(XSC_RX_MAX_HEAD, sizeof(long))); +#endif + if (unlikely(!skb)) { + rq->stats->buff_alloc_err++; + return NULL; + } + + prefetchw(skb->data); + + if (likely(has_pph)) { + headlen = min_t(u32, XSC_RX_MAX_HEAD, (cqe_bcnt - XSC_PPH_HEAD_LEN)); + frag_headlen = headlen + XSC_PPH_HEAD_LEN; + byte_cnt = cqe_bcnt - headlen - XSC_PPH_HEAD_LEN; + head_offset += XSC_PPH_HEAD_LEN; + } + + if (byte_cnt == 0 && (XSC_GET_PFLAG(&c->adapter->nic_param, XSC_PFLAG_RX_COPY_BREAK))) { + for (i = 0; i < rq->wqe.info.num_frags; i++, wi++) + wi->is_available = 1; + goto ret; + } + + for (i = 0; i < rq->wqe.info.num_frags; i++, rx_wi++) + rx_wi->is_available = 0; + + while (byte_cnt) { + /*figure out whether the first fragment can be a page ?*/ + frag_consumed_bytes = + min_t(u16, frag_info->frag_size - frag_headlen, byte_cnt); + + xsc_add_skb_frag(rq, skb, wi->di, wi->offset + frag_headlen, + frag_consumed_bytes, frag_info->frag_stride); + byte_cnt -= frag_consumed_bytes; + ETH_DEBUG_LOG("consumed=%d, frag_size=%d, byte_cnt=%d, cqe_bcnt=%d, addr=0x%llx\n", + frag_consumed_bytes, frag_info->frag_size, byte_cnt, + cqe_bcnt, (u64)wi->di->addr); + + /*to protect extend wqe read, drop exceed bytes*/ + frag_headlen = 0; + fragcnt++; + if (fragcnt == rq->wqe.info.num_frags) { + if (byte_cnt) { + rq->stats->oversize_pkts_sw_drop += byte_cnt; + netdev_warn(netdev, + "large packet reach the maximum rev-wqe num.\n"); + netdev_warn(netdev, + "%u bytes dropped: frag_num=%d, headlen=%d, cqe_cnt=%d, frag0_bytes=%d, frag_size=%d\n", + byte_cnt, fragcnt, headlen, cqe_bcnt, + frag_consumed_bytes, frag_info->frag_size); + } + break; + } + + frag_info++; + wi++; + } + +ret: + /* copy header */ + xsc_copy_skb_header(dev, skb, head_wi->di, head_offset, headlen); + + /* skb linear part was allocated with headlen and aligned to long */ + skb->tail += headlen; + skb->len += headlen; + + skbdata_debug_dump(skb, headlen, 0); + + return skb; +} + +static inline bool xsc_rx_cache_is_empty(struct xsc_page_cache *cache) +{ + return cache->head == cache->tail; +} + +static inline bool xsc_page_is_reserved(struct page *page) +{ + return page_is_pfmemalloc(page) || page_to_nid(page) != numa_mem_id(); +} + +static inline bool xsc_rx_cache_get(struct xsc_rq *rq, + struct xsc_dma_info *dma_info) +{ + struct xsc_page_cache *cache = &rq->page_cache; + struct xsc_rq_stats *stats = rq->stats; + struct xsc_core_device *xdev = rq->cq.xdev; + + if (unlikely(xsc_rx_cache_is_empty(cache))) { + stats->cache_empty++; + return false; + } + + if (page_ref_count(cache->page_cache[cache->head].page) != 1) { + stats->cache_busy++; + return false; + } + + stats->cache_reuse++; + *dma_info = cache->page_cache[cache->head]; + cache->head = (cache->head + 1) & (cache->sz - 1); + + dma_sync_single_for_device(&xdev->pdev->dev, dma_info->addr, + PAGE_SIZE, DMA_FROM_DEVICE); + + return true; +} + +static inline bool xsc_rx_cache_put(struct xsc_rq *rq, + struct xsc_dma_info *dma_info) +{ + struct xsc_page_cache *cache = &rq->page_cache; + struct xsc_rq_stats *stats = rq->stats; + u32 tail_next = (cache->tail + 1) & (cache->sz - 1); + + if (tail_next == cache->head) { + stats->cache_full++; + return false; + } + + if (unlikely(xsc_page_is_reserved(dma_info->page))) { + stats->cache_waive++; + return false; + } + + cache->page_cache[cache->tail] = *dma_info; + cache->tail = tail_next; + return true; +} + +void xsc_page_dma_unmap(struct xsc_rq *rq, struct xsc_dma_info *dma_info) +{ + struct xsc_channel *c = rq->cq.channel; + struct device *dev = c->adapter->dev; + + dma_unmap_page(dev, dma_info->addr, XSC_RX_FRAG_SZ, rq->buff.map_dir); +} + +static inline void xsc_put_page(struct xsc_dma_info *dma_info) +{ + put_page(dma_info->page); +} + +void xsc_page_release_dynamic(struct xsc_rq *rq, + struct xsc_dma_info *dma_info, bool recycle) +{ + if (likely(recycle)) { +#ifdef XSC_PAGE_CACHE + if (xsc_rx_cache_put(rq, dma_info)) + return; +#endif + + xsc_page_dma_unmap(rq, dma_info); + page_pool_recycle_direct(rq->page_pool, dma_info->page); + } else { + xsc_page_dma_unmap(rq, dma_info); + page_pool_put_defragged_page(rq->page_pool, + dma_info->page, + -1, true); + } +} + +static inline void xsc_put_rx_frag(struct xsc_rq *rq, + struct xsc_wqe_frag_info *frag, bool recycle) +{ + if (frag->last_in_page) + xsc_page_release_dynamic(rq, frag->di, recycle); +} + +static inline struct xsc_wqe_frag_info *get_frag(struct xsc_rq *rq, u16 ix) +{ + return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags]; +} + +static inline void xsc_free_rx_wqe(struct xsc_rq *rq, + struct xsc_wqe_frag_info *wi, bool recycle) +{ + int i; + + for (i = 0; i < rq->wqe.info.num_frags; i++, wi++) { + if (wi->is_available && recycle) + continue; + xsc_put_rx_frag(rq, wi, recycle); + } +} + +static void xsc_dump_error_rqcqe(struct xsc_rq *rq, + struct xsc_cqe *cqe) +{ + struct xsc_channel *c = rq->cq.channel; + struct net_device *netdev = c->adapter->netdev; + u32 ci = xsc_cqwq_get_ci(&rq->cq.wq); + + net_err_ratelimited("Error cqe on dev=%s, cqn=%d, ci=%d, rqn=%d, qpn=%d, error_code=0x%x\n", + netdev->name, rq->cq.xcq.cqn, ci, + rq->rqn, cqe->qp_id, get_cqe_opcode(cqe)); +} + +void xsc_eth_handle_rx_cqe(struct xsc_cqwq *cqwq, + struct xsc_rq *rq, struct xsc_cqe *cqe) +{ + struct xsc_wq_cyc *wq = &rq->wqe.wq; + struct xsc_channel *c = rq->cq.channel; + u8 cqe_opcode = get_cqe_opcode(cqe); + struct xsc_wqe_frag_info *wi; + struct sk_buff *skb; + u32 cqe_bcnt; + u16 ci; + + ci = xsc_wq_cyc_ctr2ix(wq, cqwq->cc); + wi = get_frag(rq, ci); + if (unlikely(cqe_opcode & BIT(7))) { + xsc_dump_error_rqcqe(rq, cqe); + rq->stats->cqe_err++; + goto free_wqe; + } + + cqe_bcnt = le32_to_cpu(cqe->msg_len); + if (cqe->has_pph && cqe_bcnt <= XSC_PPH_HEAD_LEN) { + rq->stats->wqe_err++; + goto free_wqe; + } + + if (unlikely(cqe_bcnt > rq->frags_sz)) { + if (!XSC_GET_PFLAG(&c->adapter->nic_param, XSC_PFLAG_DROPLESS_RQ)) { + rq->stats->oversize_pkts_sw_drop += cqe_bcnt; + goto free_wqe; + } else { + rq->stats->oversize_pkts_err++; + } + } + + cqe_bcnt = min_t(u32, cqe_bcnt, rq->frags_sz); + skb = rq->wqe.skb_from_cqe(rq, wi, cqe_bcnt, cqe->has_pph); + if (!skb) + goto free_wqe; + + xsc_complete_rx_cqe(rq, cqe, + cqe->has_pph == 1 ? cqe_bcnt - XSC_PPH_HEAD_LEN : cqe_bcnt, + skb, wi); + +#ifdef NEED_CREATE_RX_THREAD + netif_rx_ni(skb); +#else + napi_gro_receive(rq->cq.napi, skb); +#endif + +free_wqe: + xsc_free_rx_wqe(rq, wi, true); + xsc_wq_cyc_pop(wq); +} + +int xsc_poll_rx_cq(struct xsc_cq *cq, int budget) +{ + struct xsc_rq *rq = container_of(cq, struct xsc_rq, cq); + struct xsc_cqwq *cqwq = &cq->wq; + struct xsc_cqe *cqe; + int work_done = 0; + struct xsc_ch_stats *ch_stats = cq->channel->stats; + + if (!test_bit(XSC_ETH_RQ_STATE_ENABLED, &rq->state)) + return 0; + + while ((work_done < budget) && (cqe = xsc_cqwq_get_cqe(cqwq))) { + rq->stats->cqes++; + + rq->handle_rx_cqe(cqwq, rq, cqe); + ++work_done; + + xsc_cqwq_pop(cqwq); + } + + if (!work_done) + goto out; + + xsc_cq_notify_hw(cq); + /* ensure cq space is freed before enabling more cqes */ + wmb(); + +out: + ch_stats->poll += work_done; + if (work_done < budget) { + if (ch_stats->poll == 0) + ch_stats->poll_0++; + else if (ch_stats->poll < 64) + ch_stats->poll_1_63++; + else if (ch_stats->poll < 512) + ch_stats->poll_64_511++; + else if (ch_stats->poll < 1024) + ch_stats->poll_512_1023++; + else if (ch_stats->poll >= 1024) + cq->channel->stats->poll_1024++; + } + + return work_done; +} + +static inline int xsc_page_alloc_mapped(struct xsc_rq *rq, + struct xsc_dma_info *dma_info) +{ + struct xsc_channel *c = rq->cq.channel; + struct device *dev = c->adapter->dev; + +#ifdef XSC_PAGE_CACHE + if (xsc_rx_cache_get(rq, dma_info)) + return 0; + + rq->stats->cache_alloc++; +#endif + + dma_info->page = page_pool_dev_alloc_pages(rq->page_pool); + if (unlikely(!dma_info->page)) + return -ENOMEM; + + dma_info->addr = dma_map_page(dev, dma_info->page, 0, + XSC_RX_FRAG_SZ, rq->buff.map_dir); + if (unlikely(dma_mapping_error(dev, dma_info->addr))) { + page_pool_recycle_direct(rq->page_pool, dma_info->page); + dma_info->page = NULL; + return -ENOMEM; + } + + return 0; +} + +static inline int xsc_get_rx_frag(struct xsc_rq *rq, + struct xsc_wqe_frag_info *frag) +{ + int err = 0; + + if (!frag->offset && !frag->is_available) + /* On first frag (offset == 0), replenish page (dma_info actually). + * Other frags that point to the same dma_info (with a different + * offset) should just use the new one without replenishing again + * by themselves. + */ + err = xsc_page_alloc_mapped(rq, frag->di); + + return err; +} + +static int xsc_alloc_rx_wqe(struct xsc_rq *rq, struct xsc_eth_rx_wqe_cyc *wqe, u16 ix) +{ + struct xsc_wqe_frag_info *frag = get_frag(rq, ix); + u64 addr; + int i; + int err; + + for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) { + err = xsc_get_rx_frag(rq, frag); + if (unlikely(err)) + goto free_frags; + + addr = cpu_to_le64(frag->di->addr + frag->offset + rq->buff.headroom); + wqe->data[i].va = addr; + if (frag->offset == 0) + ETH_DEBUG_LOG("rq%d_wqe%d_frag%d off=%d last=%d refcnt=%d addr=0x%llx\n", + rq->rqn, ix, i, frag->offset, frag->last_in_page, + page_ref_count(frag->di->page), addr); + } + + return 0; + +free_frags: + while (--i >= 0) + xsc_put_rx_frag(rq, --frag, true); + + return err; +} + +void xsc_eth_dealloc_rx_wqe(struct xsc_rq *rq, u16 ix) +{ + struct xsc_wqe_frag_info *wi = get_frag(rq, ix); + + xsc_free_rx_wqe(rq, wi, false); +} + +static int xsc_alloc_rx_wqes(struct xsc_rq *rq, u16 ix, u8 wqe_bulk) +{ + struct xsc_wq_cyc *wq = &rq->wqe.wq; + struct xsc_eth_rx_wqe_cyc *wqe; + int err; + int i; + int idx; + + for (i = 0; i < wqe_bulk; i++) { + idx = xsc_wq_cyc_ctr2ix(wq, (ix + i)); + wqe = xsc_wq_cyc_get_wqe(wq, idx); + + err = xsc_alloc_rx_wqe(rq, wqe, idx); + if (unlikely(err)) { + rq->stats->buff_alloc_err++; + goto free_wqes; + } + } + + return 0; + +free_wqes: + while (--i >= 0) + xsc_eth_dealloc_rx_wqe(rq, ix + i); + + return err; +} + +bool xsc_eth_post_rx_wqes(struct xsc_rq *rq) +{ + struct xsc_wq_cyc *wq = &rq->wqe.wq; + u8 wqe_bulk, wqe_bulk_min; + int alloc; + u16 head; + int err; + + wqe_bulk = rq->wqe.info.wqe_bulk; + wqe_bulk_min = rq->wqe.info.wqe_bulk_min; + if (xsc_wq_cyc_missing(wq) < wqe_bulk) + return false; + + do { + head = xsc_wq_cyc_get_head(wq); + + alloc = min_t(int, wqe_bulk, xsc_wq_cyc_missing(wq)); + if (alloc < wqe_bulk && alloc >= wqe_bulk_min) + alloc = alloc & 0xfffffffe; + + if (alloc > 0) { + err = xsc_alloc_rx_wqes(rq, head, alloc); + if (unlikely(err)) + break; + + xsc_wq_cyc_push_n(wq, alloc); + rq->stats->wqes += alloc; + } + } while (xsc_wq_cyc_missing(wq) >= wqe_bulk_min); + + dma_wmb(); + + /* ensure wqes are visible to device before updating doorbell record */ + xsc_rq_notify_hw(rq); + + return !!err; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.c new file mode 100644 index 000000000000..8b75ce05afb1 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.c @@ -0,0 +1,651 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include "common/xsc_cmd.h" +#include "common/xsc_core.h" + +#include "xsc_eth_stats.h" +#include "xsc_eth.h" + +static const struct counter_desc sw_stats_desc[] = { + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_packets) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_bytes) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_packets) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_bytes) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_tso_packets) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_tso_bytes) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_tso_inner_packets) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_tso_inner_bytes) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_csum_unnecessary) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_csum_none) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_csum_err) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_csum_succ) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_csum_partial) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_csum_partial_inner) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_queue_stopped) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_queue_dropped) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_xmit_more) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_cqes) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_queue_wake) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_cqe_err) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_oversize_pkts_sw_drop) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_dim_us) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, tx_dim_pkts) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, txdone_skb_null) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, txdone_skb_refcnt_err) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_cqes) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_cqe_err) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_wqes) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_wqe_err) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_dim_us) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_dim_pkts) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_oversize_pkts_sw_drop) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_oversize_pkts_err) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_buff_alloc_err) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_cache_reuse) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_cache_full) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_cache_empty) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_cache_busy) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_cache_alloc) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_cache_waive) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_cache_ext) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, rx_cache_rdc) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_events) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_poll) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_poll_0) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_poll_1_63) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_poll_64_511) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_poll_512_1023) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_poll_1024) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_poll_tx) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_arm) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_noarm) }, + { XSC_DECLARE_STAT(struct xsc_sw_stats, ch_aff_change) }, +}; + +#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc) + +static int xsc_grp_sw_get_num_stats(struct xsc_adapter *adapter) +{ + return NUM_SW_COUNTERS; +} + +static int xsc_grp_sw_fill_strings(struct xsc_adapter *adapter, u8 *data, int idx) +{ + int i; + + for (i = 0; i < NUM_SW_COUNTERS; i++) + strscpy(data + (idx++) * ETH_GSTRING_LEN, + sw_stats_desc[i].format, + ETH_GSTRING_LEN); + return idx; +} + +static int xsc_grp_sw_fill_stats(struct xsc_adapter *adapter, u64 *data, int idx) +{ + int i; + + for (i = 0; i < NUM_SW_COUNTERS; i++) + data[idx++] = XSC_READ_CTR64_CPU(&adapter->stats->sw, sw_stats_desc, i); + return idx; +} + +void xsc_grp_sw_update_stats(struct xsc_adapter *adapter) +{ + struct xsc_sw_stats *s = &adapter->stats->sw; + int max_tc = xsc_get_netdev_max_tc(adapter); + int i; + + memset(s, 0, sizeof(*s)); + + for (i = 0; i < xsc_get_netdev_max_channels(adapter); i++) { + struct xsc_channel_stats *channel_stats = + &adapter->stats->channel_stats[i]; + + struct xsc_rq_stats *rq_stats = &channel_stats->rq; + struct xsc_ch_stats *ch_stats = &channel_stats->ch; + int j; + + s->rx_packets += rq_stats->packets; + s->rx_bytes += rq_stats->bytes; + s->rx_csum_unnecessary += rq_stats->csum_unnecessary; + s->rx_csum_none += rq_stats->csum_none; + s->rx_csum_err += rq_stats->csum_err; + s->rx_csum_succ += rq_stats->csum_succ; + s->rx_cqes += rq_stats->cqes; + s->rx_cqe_err += rq_stats->cqe_err; + s->rx_wqes += rq_stats->wqes; + s->rx_wqe_err += rq_stats->wqe_err; + s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop; + s->rx_oversize_pkts_err += rq_stats->oversize_pkts_err; + s->rx_buff_alloc_err += rq_stats->buff_alloc_err; + s->rx_cache_reuse += rq_stats->cache_reuse; + s->rx_cache_full += rq_stats->cache_full; + s->rx_cache_empty += rq_stats->cache_empty; + s->rx_cache_busy += rq_stats->cache_busy; + s->rx_cache_alloc += rq_stats->cache_alloc; + s->rx_cache_waive += rq_stats->cache_waive; + s->rx_cache_ext += rq_stats->cache_ext; + s->rx_cache_rdc += rq_stats->cache_rdc; + s->rx_dim_us += rq_stats->dim_us; + s->rx_dim_pkts += rq_stats->dim_pkts; + + s->ch_events += ch_stats->events; + s->ch_poll += ch_stats->poll; + s->ch_poll_0 += ch_stats->poll_0; + s->ch_poll_1_63 += ch_stats->poll_1_63; + s->ch_poll_64_511 += ch_stats->poll_64_511; + s->ch_poll_512_1023 += ch_stats->poll_512_1023; + s->ch_poll_1024 += ch_stats->poll_1024; + s->ch_poll_tx += ch_stats->poll_tx; + s->ch_arm += ch_stats->arm; + s->ch_noarm += ch_stats->noarm; + s->ch_aff_change += ch_stats->aff_change; + + for (j = 0; j < max_tc; j++) { + struct xsc_sq_stats *sq_stats = &channel_stats->sq[j]; + + s->tx_packets += sq_stats->packets; + s->tx_bytes += sq_stats->bytes; + s->tx_tso_packets += sq_stats->tso_packets; + s->tx_tso_bytes += sq_stats->tso_bytes; + s->tx_tso_inner_packets += sq_stats->tso_inner_packets; + s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes; + s->tx_csum_partial += sq_stats->csum_partial; + s->tx_csum_partial_inner += sq_stats->csum_partial_inner; + s->tx_csum_none += sq_stats->csum_none; + s->tx_queue_stopped += sq_stats->stopped; + s->tx_queue_dropped += sq_stats->dropped; + s->tx_xmit_more += sq_stats->xmit_more; + s->tx_cqes += sq_stats->cqes; + s->tx_queue_wake += sq_stats->wake; + s->tx_cqe_err += sq_stats->cqe_err; + s->tx_oversize_pkts_sw_drop += sq_stats->oversize_pkts_sw_drop; + s->txdone_skb_null += sq_stats->txdone_skb_null; + s->txdone_skb_refcnt_err += sq_stats->txdone_skb_refcnt_err; + s->skb_linear += sq_stats->skb_linear; + s->tx_dim_us += sq_stats->dim_us; + s->tx_dim_pkts += sq_stats->dim_pkts; + } + } +} + +static const struct counter_desc rq_stats_desc[] = { + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, packets) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, bytes) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, csum_unnecessary) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, csum_none) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, csum_err) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, csum_succ) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, cqes) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, dim_us) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, dim_pkts) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, wqe_err) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, oversize_pkts_sw_drop) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, oversize_pkts_err) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, buff_alloc_err) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, cache_reuse) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, cache_full) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, cache_empty) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, cache_busy) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, cache_alloc) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, cache_waive) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, cache_ext) }, + { XSC_DECLARE_RX_STAT(struct xsc_rq_stats, cache_rdc) }, +}; + +static const struct counter_desc sq_stats_desc[] = { + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, packets) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, bytes) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, tso_packets) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, tso_bytes) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, tso_inner_packets) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, tso_inner_bytes) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, csum_partial) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, csum_partial_inner) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, csum_none) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, stopped) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, dropped) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, xmit_more) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, cqes) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, wake) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, dim_us) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, dim_pkts) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, cqe_err) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, oversize_pkts_sw_drop) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, txdone_skb_null) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, txdone_skb_refcnt_err) }, + { XSC_DECLARE_TX_STAT(struct xsc_sq_stats, skb_linear) }, +}; + +static const struct counter_desc ch_stats_desc[] = { + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, events) }, + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, poll) }, + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, poll_0) }, + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, poll_1_63) }, + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, poll_64_511) }, + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, poll_512_1023) }, + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, poll_1024) }, + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, poll_tx) }, + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, arm) }, + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, noarm) }, + { XSC_DECLARE_CH_STAT(struct xsc_ch_stats, aff_change) }, +}; + +#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) +#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) +#define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc) + +static int xsc_grp_channels_get_num_stats(struct xsc_adapter *adapter) +{ + int max_nch = xsc_get_netdev_max_channels(adapter); + int max_tc = xsc_get_netdev_max_tc(adapter); + + return (NUM_RQ_STATS * max_nch) + + (NUM_CH_STATS * max_nch) + + (NUM_SQ_STATS * max_nch * max_tc); +} + +static int xsc_grp_channels_fill_strings(struct xsc_adapter *adapter, u8 *data, + int idx) +{ + int max_nch = xsc_get_netdev_max_channels(adapter); + int max_tc = xsc_get_netdev_max_tc(adapter); + int i, j, tc; + + for (i = 0; i < max_nch; i++) + for (j = 0; j < NUM_CH_STATS; j++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + ch_stats_desc[j].format, i); + + for (i = 0; i < max_nch; i++) { + for (j = 0; j < NUM_RQ_STATS; j++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + rq_stats_desc[j].format, i); + } + + for (tc = 0; tc < max_tc; tc++) + for (i = 0; i < max_nch; i++) + for (j = 0; j < NUM_SQ_STATS; j++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + sq_stats_desc[j].format, + i + tc * max_nch); + + return idx; +} + +static int xsc_grp_channels_fill_stats(struct xsc_adapter *adapter, u64 *data, + int idx) +{ + int max_nch = xsc_get_netdev_max_channels(adapter); + int max_tc = xsc_get_netdev_max_tc(adapter); + int i, j, tc; + struct xsc_stats *stats = adapter->stats; + + for (i = 0; i < max_nch; i++) + for (j = 0; j < NUM_CH_STATS; j++) + data[idx++] = + XSC_READ_CTR64_CPU(&stats->channel_stats[i].ch, + ch_stats_desc, j); + + for (i = 0; i < max_nch; i++) { + for (j = 0; j < NUM_RQ_STATS; j++) + data[idx++] = + XSC_READ_CTR64_CPU(&stats->channel_stats[i].rq, + rq_stats_desc, j); + } + + for (tc = 0; tc < max_tc; tc++) + for (i = 0; i < max_nch; i++) + for (j = 0; j < NUM_SQ_STATS; j++) + data[idx++] = + XSC_READ_CTR64_CPU(&stats->channel_stats[i].sq[tc], + sq_stats_desc, j); + + return idx; +} + +static const struct counter_desc hw_prio_stats_desc[] = { + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_bytes, 0), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_bytes, 0), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_pkts, 0), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_pkts, 0), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_bytes, 1), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_bytes, 1), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_pkts, 1), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_pkts, 1), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_bytes, 2), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_bytes, 2), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_pkts, 2), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_pkts, 2), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_bytes, 3), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_bytes, 3), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_pkts, 3), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_pkts, 3), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_bytes, 4), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_bytes, 4), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_pkts, 4), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_pkts, 4), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_bytes, 5), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_bytes, 5), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_pkts, 5), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_pkts, 5), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_bytes, 6), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_bytes, 6), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_pkts, 6), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_pkts, 6), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_bytes, 7), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_bytes, 7), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, tx_pkts, 7), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_prio_stats, rx_pkts, 7), + +}; + +static const struct counter_desc hw_pfc_prio_stats_desc[] = { + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause, 0), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause_duration, 0), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause, 0), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause_duration, 0), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause, 1), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause_duration, 1), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause, 1), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause_duration, 1), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause, 2), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause_duration, 2), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause, 2), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause_duration, 2), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause, 3), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause_duration, 3), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause, 3), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause_duration, 3), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause, 4), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause_duration, 4), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause, 4), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause_duration, 4), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause, 5), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause_duration, 5), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause, 5), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause_duration, 5), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause, 6), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause_duration, 6), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause, 6), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause_duration, 6), + + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause, 7), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, tx_pause_duration, 7), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause, 7), + XSC_DECLARE_HW_PRIO_STAT(struct xsc_pfc_prio_stats, rx_pause_duration, 7), +}; + +static const struct counter_desc hw_eth_stats_pf_desc[] = { + /*by mac port*/ + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rdma_tx_pkts) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rdma_tx_bytes) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rdma_rx_pkts) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rdma_rx_bytes) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, tx_pause) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rx_pause) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rx_fcs_errors) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rx_discards) }, + + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, tx_multicast_phy) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, tx_broadcast_phy) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rx_multicast_phy) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rx_broadcast_phy) }, + + /*by global*/ + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rdma_loopback_pkts) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_pf, rdma_loopback_bytes) }, +}; + +static const struct counter_desc hw_eth_stats_vf_desc[] = { + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_vf, rdma_tx_pkts) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_vf, rdma_tx_bytes) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_vf, rdma_rx_pkts) }, + { XSC_DECLARE_STAT(struct xsc_hw_stats_eth_vf, rdma_rx_bytes) }, +}; + +static const struct counter_desc pfc_stall_stats_desc[] = { + /*by mac port*/ + { XSC_DECLARE_STAT(struct xsc_pfc_stall_stats, tx_pause_storm_triggered) }, +}; + +static int get_hw_stats_eth(struct xsc_core_device *dev, struct xsc_hw_stats_eth *stats_eth) +{ + int ret; + struct xsc_hw_stats_mbox_in in; + struct xsc_hw_stats_eth_mbox_out out; + + memset(stats_eth, 0, sizeof(*stats_eth)); + + if (!dev) + return -1; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_HW_STATS_ETH); + in.mac_port = dev->mac_port; + + ret = xsc_cmd_exec(dev, (void *)&in, sizeof(in), (void *)&out, sizeof(out)); + if (ret || out.hdr.status) + return -1; + + memcpy(stats_eth, &out.hw_stats, sizeof(*stats_eth)); + return 0; +} + +static int xsc_hw_get_num_stats(struct xsc_adapter *adapter) +{ + int ret = 0; + + if (is_support_hw_pf_stats(adapter->xdev)) { + ret = ARRAY_SIZE(hw_prio_stats_desc) + ARRAY_SIZE(hw_eth_stats_pf_desc) + + (is_support_pfc_prio_statistic(adapter->xdev) ? + ARRAY_SIZE(hw_pfc_prio_stats_desc) : 0) + + (is_support_pfc_stall_stats(adapter->xdev) ? + ARRAY_SIZE(pfc_stall_stats_desc) : 0); + } else { + ret = ARRAY_SIZE(hw_eth_stats_vf_desc); + } + + return ret; +} + +static int xsc_hw_fill_strings(struct xsc_adapter *adapter, u8 *data, int idx) +{ + int i; + struct xsc_core_device *xdev; + + xdev = adapter->xdev; + + if (is_support_hw_pf_stats(xdev)) { + for (i = 0; i < ARRAY_SIZE(hw_prio_stats_desc); i++) + strscpy(data + (idx++) * ETH_GSTRING_LEN, + hw_prio_stats_desc[i].format, + ETH_GSTRING_LEN); + + if (is_support_pfc_prio_statistic(xdev)) + for (i = 0; i < ARRAY_SIZE(hw_pfc_prio_stats_desc); i++) + strscpy(data + (idx++) * ETH_GSTRING_LEN, + hw_pfc_prio_stats_desc[i].format, + ETH_GSTRING_LEN); + + for (i = 0 ; i < ARRAY_SIZE(hw_eth_stats_pf_desc); i++) + strscpy(data + (idx++) * ETH_GSTRING_LEN, + hw_eth_stats_pf_desc[i].format, + ETH_GSTRING_LEN); + + if (is_support_pfc_stall_stats(xdev)) + for (i = 0; i < ARRAY_SIZE(pfc_stall_stats_desc); i++) + strscpy(data + (idx++) * ETH_GSTRING_LEN, + pfc_stall_stats_desc[i].format, + ETH_GSTRING_LEN); + } else { + for (i = 0 ; i < ARRAY_SIZE(hw_eth_stats_vf_desc); i++) + strscpy(data + (idx++) * ETH_GSTRING_LEN, + hw_eth_stats_vf_desc[i].format, + ETH_GSTRING_LEN); + } + + return idx; +} + +static int xsc_hw_fill_stats(struct xsc_adapter *adapter, u64 *data, int idx) +{ + struct xsc_prio_stats_mbox_in in; + struct xsc_prio_stats_mbox_out out; + struct xsc_pfc_prio_stats_mbox_in pfc_prio_in; + struct xsc_pfc_prio_stats_mbox_out pfc_prio_out; + struct xsc_pfc_stall_stats_mbox_in pfc_stall_in; + struct xsc_pfc_stall_stats_mbox_out pfc_stall_out; + struct xsc_core_device *xdev; + int ret; + u32 i; + u64 val; + u8 *stats; + struct xsc_hw_stats_eth stats_eth; + int ret_s; + + xdev = adapter->xdev; + ret_s = get_hw_stats_eth(xdev, &stats_eth); + + if (is_support_hw_pf_stats(xdev)) { + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_QUERY_PRIO_STATS); + in.pport = xdev->mac_port; + + ret = xsc_cmd_exec(adapter->xdev, (void *)&in, + sizeof(struct xsc_prio_stats_mbox_in), + (void *)&out, sizeof(struct xsc_prio_stats_mbox_out)); + if (ret == 0 && out.hdr.status == 0) { + for (i = 0; i < ARRAY_SIZE(hw_prio_stats_desc); i++) { + val = XSC_READ_CTR64_CPU(&out.prio_stats, + hw_prio_stats_desc, i); + data[idx++] = __be64_to_cpu(val); + } + } + + if (is_support_pfc_prio_statistic(xdev)) { + memset(&pfc_prio_in, 0, sizeof(pfc_prio_in)); + memset(&pfc_prio_out, 0, sizeof(pfc_prio_out)); + pfc_prio_in.hdr.opcode = + __cpu_to_be16(XSC_CMD_OP_QUERY_PFC_PRIO_STATS); + pfc_prio_in.pport = xdev->mac_port; + + ret = xsc_cmd_exec(adapter->xdev, (void *)&pfc_prio_in, + sizeof(struct xsc_pfc_prio_stats_mbox_in), + (void *)&pfc_prio_out, + sizeof(struct xsc_pfc_prio_stats_mbox_out)); + if (ret == 0 && pfc_prio_out.hdr.status == 0) { + for (i = 0; i < ARRAY_SIZE(hw_pfc_prio_stats_desc); i++) { + val = XSC_READ_CTR64_CPU(&pfc_prio_out.prio_stats, + hw_pfc_prio_stats_desc, + i); + data[idx++] = __be64_to_cpu(val); + } + } + } + + if (!ret_s && stats_eth.is_pf) { + stats = (u8 *)&stats_eth.stats.pf_stats; + for (i = 0 ; i < ARRAY_SIZE(hw_eth_stats_pf_desc); i++) { + val = XSC_READ_CTR64_CPU(stats, hw_eth_stats_pf_desc, i); + data[idx++] = __be64_to_cpu(val); + } + } + + if (is_support_pfc_stall_stats(xdev)) { + memset(&pfc_stall_in, 0, sizeof(pfc_stall_in)); + memset(&pfc_stall_out, 0, sizeof(pfc_stall_out)); + pfc_stall_in.hdr.opcode = + __cpu_to_be16(XSC_CMD_OP_IOCTL_QUERY_PFC_STALL_STATS); + pfc_stall_in.mac_port = xdev->mac_port; + + ret = xsc_cmd_exec(adapter->xdev, + (void *)&pfc_stall_in, + sizeof(struct xsc_pfc_stall_stats_mbox_in), + (void *)&pfc_stall_out, + sizeof(struct xsc_pfc_stall_stats_mbox_out)); + if (ret == 0 && pfc_stall_out.hdr.status == 0) { + for (i = 0; i < ARRAY_SIZE(pfc_stall_stats_desc); i++) { + val = XSC_READ_CTR64_CPU(&pfc_stall_out.pfc_stall_stats, + pfc_stall_stats_desc, i); + data[idx++] = __be64_to_cpu(val); + } + } + } + } else { + if (!ret_s && !stats_eth.is_pf) { + stats = (u8 *)&stats_eth.stats.vf_stats; + for (i = 0 ; i < ARRAY_SIZE(hw_eth_stats_vf_desc); i++) { + val = XSC_READ_CTR64_CPU(stats, hw_eth_stats_vf_desc, i); + data[idx++] = __be64_to_cpu(val); + } + } + } + + return idx; +} + +/* The stats groups order is opposite to the update_stats() order calls */ +const struct xsc_stats_grp xsc_stats_grps[] = { + { + .get_num_stats = xsc_grp_sw_get_num_stats, + .fill_strings = xsc_grp_sw_fill_strings, + .fill_stats = xsc_grp_sw_fill_stats, + .update_stats = xsc_grp_sw_update_stats, + }, + + { + .get_num_stats = xsc_grp_channels_get_num_stats, + .fill_strings = xsc_grp_channels_fill_strings, + .fill_stats = xsc_grp_channels_fill_stats, + }, + + { + .get_num_stats = xsc_hw_get_num_stats, + .fill_strings = xsc_hw_fill_strings, + .fill_stats = xsc_hw_fill_stats, + }, +}; + +const int xsc_num_stats_grps = ARRAY_SIZE(xsc_stats_grps); + +void xsc_fold_sw_stats64(struct xsc_adapter *adapter, struct rtnl_link_stats64 *s) +{ + int i, j; + + for (i = 0; i < xsc_get_netdev_max_channels(adapter); i++) { + struct xsc_channel_stats *channel_stats = &adapter->stats->channel_stats[i]; + struct xsc_rq_stats *rq_stats = &channel_stats->rq; + + s->rx_packets += rq_stats->packets; + s->rx_bytes += rq_stats->bytes; + + for (j = 0; j < xsc_get_netdev_max_tc(adapter); j++) { + struct xsc_sq_stats *sq_stats = &channel_stats->sq[j]; + + s->tx_packets += sq_stats->packets; + s->tx_bytes += sq_stats->bytes; + s->tx_dropped += sq_stats->dropped; + } + } +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.h new file mode 100644 index 000000000000..069c5d8ad0db --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.h @@ -0,0 +1,183 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_EN_STATS_H +#define XSC_EN_STATS_H + +#include "xsc_eth_common.h" + +#define XSC_READ_CTR64_CPU(ptr, dsc, i) \ + (*(u64 *)((char *)(ptr) + (dsc)[i].offset)) + +#define ETH_GSTRING_LEN 32 + +#define XSC_DECLARE_STAT(type, fld) ""#fld, offsetof(type, fld) +#define XSC_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld) +#define XSC_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld) +#define XSC_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld) + +#define XSC_DECLARE_HW_PRIO_STAT_NAME(fld, prio) (#fld "_prio"#prio) +#define XSC_DECLARE_HW_PRIO_STAT_OFFSET(type, fld, prio) \ + (offsetof(type, fld) + (sizeof(type) * (prio))) +#define XSC_DECLARE_HW_PRIO_STAT(type, fld, prio) \ + {XSC_DECLARE_HW_PRIO_STAT_NAME(fld, prio), \ + XSC_DECLARE_HW_PRIO_STAT_OFFSET(type, fld, prio)} + +struct xsc_rq_stats { + u64 packets; + u64 bytes; + u64 csum_unnecessary; + u64 csum_none; + u64 csum_err; + u64 csum_succ; + u64 cqes; + u64 cqe_err; + u64 wqes; + u64 wqe_err; + u64 oversize_pkts_sw_drop; + u64 oversize_pkts_err; + u64 buff_alloc_err; + u64 cache_reuse; + u64 cache_full; + u64 cache_empty; + u64 cache_busy; + u64 cache_alloc; + u64 cache_waive; + u64 cache_ext; + u64 cache_rdc; + u64 dim_us; + u64 dim_pkts; +}; + +struct xsc_sq_stats { + /* commonly accessed in data path */ + u64 packets; + u64 bytes; + u64 tso_packets; + u64 tso_bytes; + u64 tso_inner_packets; + u64 tso_inner_bytes; + u64 csum_partial; + u64 csum_partial_inner; + /* less likely accessed in data path */ + u64 csum_none; + u64 stopped; + u64 dropped; + u64 xmit_more; + /* dirtied @completion */ + u64 cqes; + u64 wake; + u64 cqe_err; + u64 oversize_pkts_sw_drop; + u64 txdone_skb_null; + u64 txdone_skb_refcnt_err; + u64 skb_linear; + u64 dim_us; + u64 dim_pkts; +}; + +struct xsc_ch_stats { + u64 events; + u64 poll; + u64 poll_0; + u64 poll_1_63; + u64 poll_64_511; + u64 poll_512_1023; + u64 poll_1024; + u64 poll_tx; + u64 arm; + u64 noarm; + u64 aff_change; +} ____cacheline_aligned_in_smp; + +struct xsc_adapter; +struct xsc_stats_grp { + u16 update_stats_mask; + int (*get_num_stats)(struct xsc_adapter *adapter); + int (*fill_strings)(struct xsc_adapter *adapter, u8 *data, int idx); + int (*fill_stats)(struct xsc_adapter *adapter, u64 *data, int idx); + void (*update_stats)(struct xsc_adapter *adapter); +}; + +struct counter_desc { + char format[ETH_GSTRING_LEN]; + size_t offset; /* Byte offset */ +}; + +struct xsc_sw_stats { + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + u64 tx_tso_packets; + u64 tx_tso_bytes; + u64 tx_tso_inner_packets; + u64 tx_tso_inner_bytes; + u64 rx_csum_unnecessary; + u64 rx_csum_none; + u64 rx_csum_err; + u64 rx_csum_succ; + u64 tx_csum_none; + u64 tx_csum_partial; + u64 tx_csum_partial_inner; + u64 tx_queue_stopped; + u64 tx_queue_dropped; + u64 tx_xmit_more; + u64 tx_cqes; + u64 tx_queue_wake; + u64 tx_cqe_err; + u64 tx_oversize_pkts_sw_drop; + u64 tx_dim_us; + u64 tx_dim_pkts; + u64 txdone_skb_null; + u64 txdone_skb_refcnt_err; + u64 skb_linear; + u64 rx_cqes; + u64 rx_cqe_err; + u64 rx_wqes; + u64 rx_wqe_err; + u64 rx_oversize_pkts_sw_drop; + u64 rx_oversize_pkts_err; + u64 rx_buff_alloc_err; + u64 rx_cache_reuse; + u64 rx_cache_full; + u64 rx_cache_empty; + u64 rx_cache_busy; + u64 rx_cache_alloc; + u64 rx_cache_waive; + u64 rx_cache_ext; + u64 rx_cache_rdc; + u64 rx_dim_us; + u64 rx_dim_pkts; + u64 ch_events; + u64 ch_poll; + u64 ch_poll_0; + u64 ch_poll_1_63; + u64 ch_poll_64_511; + u64 ch_poll_512_1023; + u64 ch_poll_1024; + u64 ch_poll_tx; + u64 ch_arm; + u64 ch_noarm; + u64 ch_aff_change; +}; + +struct xsc_channel_stats { + struct xsc_ch_stats ch; + struct xsc_sq_stats sq[XSC_MAX_NUM_TC]; + struct xsc_rq_stats rq; +} ____cacheline_aligned_in_smp; + +struct xsc_stats { + struct xsc_sw_stats sw; + struct xsc_channel_stats channel_stats[XSC_ETH_MAX_NUM_CHANNELS]; +}; + +extern const struct xsc_stats_grp xsc_stats_grps[]; +extern const int xsc_num_stats_grps; + +void xsc_fold_sw_stats64(struct xsc_adapter *adapter, struct rtnl_link_stats64 *s); + +#endif /* XSC_EN_STATS_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_sysfs.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_sysfs.c new file mode 100644 index 000000000000..8709b22c3b87 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_sysfs.c @@ -0,0 +1,373 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "common/xsc_core.h" +#include "common/xsc_cmd.h" + +#include "xsc_eth.h" + +static void pcie_lat_hw_work(struct work_struct *work) +{ + int err; + struct delayed_work *dwork = to_delayed_work(work); + struct xsc_pcie_lat_work *pcie_lat = container_of(dwork, struct xsc_pcie_lat_work, work); + struct xsc_core_device *xdev = pcie_lat->xdev; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_HW); + + err = xsc_cmd_exec(xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(xdev, "Failed to run pcie_lat hw, err(%u), status(%u)\n", + err, out.hdr.status); + } + schedule_delayed_work_on(smp_processor_id(), dwork, + msecs_to_jiffies(pcie_lat->period * 1000)); +} + +static void pcie_lat_hw_init(struct xsc_core_device *xdev) +{ + int err; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_HW_INIT); + + err = xsc_cmd_exec(xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(xdev, "Failed to run pcie_lat hw, err(%u), status(%u)\n", + err, out.hdr.status); + } +} + +static ssize_t pcie_lat_enable_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + int err; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_GET_EN); + + err = xsc_cmd_exec(adapter->xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "Failed to get pcie_lat en, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + return sprintf(buf, "%hhu\n", out.pcie_lat.pcie_lat_enable); +} + +static ssize_t pcie_lat_enable_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + struct xsc_pcie_lat_work *pcie_lat = adapter->xdev->pcie_lat; + int err; + u16 pcie_lat_enable; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + + err = kstrtou16(buf, 0, &pcie_lat_enable); + if (err != 0) + return -EINVAL; + + if (pcie_lat_enable != XSC_PCIE_LAT_EN_DISABLE && + pcie_lat_enable != XSC_PCIE_LAT_EN_ENABLE) { + xsc_core_err(adapter->xdev, + "pcie_lat_enable should be set as %d or %d, cannot be %d\n", + XSC_PCIE_LAT_EN_DISABLE, XSC_PCIE_LAT_EN_ENABLE, + pcie_lat_enable); + return -EPERM; + } + + if (pcie_lat_enable == XSC_PCIE_LAT_EN_ENABLE && + pcie_lat->enable == XSC_PCIE_LAT_EN_DISABLE) { + pcie_lat_hw_init(adapter->xdev); + pcie_lat->adapter = adapter; + INIT_DELAYED_WORK(&pcie_lat->work, pcie_lat_hw_work); + schedule_delayed_work_on(smp_processor_id(), &pcie_lat->work, + msecs_to_jiffies(pcie_lat->period * 1000)); + } else if (pcie_lat_enable == XSC_PCIE_LAT_EN_DISABLE && + pcie_lat->enable == XSC_PCIE_LAT_EN_ENABLE) { + cancel_delayed_work_sync(&pcie_lat->work); + } + + pcie_lat->enable = pcie_lat_enable; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_SET_EN); + in.pcie_lat.pcie_lat_enable = pcie_lat_enable; + + err = xsc_cmd_exec(adapter->xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "Failed to set pcie_lat en, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + return count; +} + +static DEVICE_ATTR_RW(pcie_lat_enable); + +static ssize_t pcie_lat_interval_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + int err, i; + u32 count = 0; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_GET_INTERVAL); + + err = xsc_cmd_exec(adapter->xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "Failed to get pcie_lat interval, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + for (i = 0; i < (XSC_PCIE_LAT_CFG_INTERVAL_MAX - 1); i++) + count += sprintf(&buf[count], "%u,", + __be32_to_cpu(out.pcie_lat.pcie_lat_interval[i])); + + count += sprintf(&buf[count], "%u\n", __be32_to_cpu(out.pcie_lat.pcie_lat_interval[i])); + + return count; +} + +static DEVICE_ATTR_RO(pcie_lat_interval); + +static ssize_t pcie_lat_period_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + struct xsc_pcie_lat_work *tmp = adapter->xdev->pcie_lat; + + return sprintf(buf, "%u\n", tmp->period); +} + +static ssize_t pcie_lat_period_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + struct xsc_pcie_lat_work *tmp = adapter->xdev->pcie_lat; + int err; + u32 pcie_lat_period; + + err = kstrtouint(buf, 0, &pcie_lat_period); + if (err != 0) + return -EINVAL; + + if (pcie_lat_period < XSC_PCIE_LAT_PERIOD_MIN || + pcie_lat_period > XSC_PCIE_LAT_PERIOD_MAX) { + xsc_core_err(adapter->xdev, "pcie_lat_period should be set between [%d-%d], cannot be %d\n", + XSC_PCIE_LAT_PERIOD_MIN, XSC_PCIE_LAT_PERIOD_MAX, + pcie_lat_period); + return -EPERM; + } + + tmp->period = pcie_lat_period; + + return count; +} + +static DEVICE_ATTR_RW(pcie_lat_period); + +static ssize_t pcie_lat_histogram_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + int i, err; + u32 count = 0; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_GET_HISTOGRAM); + + err = xsc_cmd_exec(adapter->xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, + "Failed to get pcie_lat histogram, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + for (i = 0; i < (XSC_PCIE_LAT_CFG_HISTOGRAM_MAX - 1); i++) + count += sprintf(&buf[count], "%u,", + __be32_to_cpu(out.pcie_lat.pcie_lat_histogram[i])); + + count += sprintf(&buf[count], "%u\n", __be32_to_cpu(out.pcie_lat.pcie_lat_histogram[i])); + + return count; +} + +static DEVICE_ATTR_RO(pcie_lat_histogram); + +static ssize_t pcie_lat_peak_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct xsc_adapter *adapter = netdev_priv(to_net_dev(device)); + int err; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_GET_PEAK); + + err = xsc_cmd_exec(adapter->xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) { + xsc_core_err(adapter->xdev, "Failed to get pcie_lat peak, err(%u), status(%u)\n", + err, out.hdr.status); + return -EINVAL; + } + + return sprintf(buf, "%u\n", __be32_to_cpu(out.pcie_lat.pcie_lat_peak)); +} + +static DEVICE_ATTR_RO(pcie_lat_peak); + +static struct attribute *pcie_lat_attrs[] = { + &dev_attr_pcie_lat_enable.attr, + &dev_attr_pcie_lat_interval.attr, + &dev_attr_pcie_lat_period.attr, + &dev_attr_pcie_lat_histogram.attr, + &dev_attr_pcie_lat_peak.attr, + NULL, +}; + +static struct attribute_group pcie_lat_group = { + .name = "pcie_lat", + .attrs = pcie_lat_attrs, +}; + +static int xsc_pcie_lat_sysfs_init(struct net_device *dev, struct xsc_core_device *xdev) +{ + int err = 0; + struct xsc_pcie_lat_work *tmp; + + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + xdev->pcie_lat = tmp; + tmp->xdev = xdev; + + tmp->enable = XSC_PCIE_LAT_EN_DISABLE; + tmp->period = XSC_PCIE_LAT_PERIOD_MIN; + + err = sysfs_create_group(&dev->dev.kobj, &pcie_lat_group); + if (err) + goto remove_pcie_lat; + + return 0; + +remove_pcie_lat: + sysfs_remove_group(&dev->dev.kobj, &pcie_lat_group); + kfree(tmp); + + return err; +} + +static void xsc_pcie_lat_sysfs_fini(struct net_device *dev, struct xsc_core_device *xdev) +{ + int err; + struct xsc_pcie_lat_work *tmp; + struct xsc_pcie_lat_feat_mbox_in in; + struct xsc_pcie_lat_feat_mbox_out out; + + tmp = xdev->pcie_lat; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_PCIE_LAT_FEAT); + in.xsc_pcie_lat_feature_opcode = __cpu_to_be16(XSC_PCIE_LAT_FEAT_SET_EN); + in.pcie_lat.pcie_lat_enable = XSC_PCIE_LAT_EN_DISABLE; + + err = xsc_cmd_exec(xdev, (void *)&in, sizeof(struct xsc_pcie_lat_feat_mbox_in), + (void *)&out, sizeof(struct xsc_pcie_lat_feat_mbox_out)); + if (err || out.hdr.status) + xsc_core_err(xdev, "Failed to set pcie_lat disable, err(%u), status(%u)\n", + err, out.hdr.status); + + if (tmp->enable == XSC_PCIE_LAT_EN_ENABLE) + cancel_delayed_work_sync(&tmp->work); + + sysfs_remove_group(&dev->dev.kobj, &pcie_lat_group); + + if (!xdev->pcie_lat) + return; + + kfree(tmp); + xdev->pcie_lat = NULL; +} + +int xsc_eth_sysfs_create(struct net_device *dev, struct xsc_core_device *xdev) +{ + int err = 0; + + if (xsc_core_is_pf(xdev) && xdev->pf_id == 0) + err = xsc_pcie_lat_sysfs_init(dev, xdev); + + return err; +} + +void xsc_eth_sysfs_remove(struct net_device *dev, struct xsc_core_device *xdev) +{ + if (xsc_core_is_pf(xdev) && xdev->pf_id == 0) + xsc_pcie_lat_sysfs_fini(dev, xdev); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_tx.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_tx.c new file mode 100644 index 000000000000..8f5b4ecd9ed9 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_tx.c @@ -0,0 +1,564 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "xsc_eth_stats.h" +#include "xsc_eth_common.h" +#include "common/xsc_hsi.h" +#include "common/qp.h" +#include "xsc_eth.h" +#include "xsc_eth_txrx.h" + +#define XSC_OPCODE_RAW 0x7 + +static inline void *xsc_sq_fetch_wqe(struct xsc_sq *sq, size_t size, u16 *pi) +{ + struct xsc_wq_cyc *wq = &sq->wq; + void *wqe; + + /*caution, sp->pc is default to be zero*/ + *pi = xsc_wq_cyc_ctr2ix(wq, sq->pc); + wqe = xsc_wq_cyc_get_wqe(wq, *pi); + memset(wqe, 0, size); + + return wqe; +} + +u16 xsc_tx_get_gso_ihs(struct xsc_sq *sq, struct sk_buff *skb) +{ + struct xsc_sq_stats *stats = sq->stats; + u16 ihs; + + if (skb->encapsulation) { + ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); + stats->tso_inner_packets++; + stats->tso_inner_bytes += skb->len - ihs; + } else { + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) + ihs = skb_transport_offset(skb) + sizeof(struct udphdr); + else + ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); + stats->tso_packets++; + stats->tso_bytes += skb->len - ihs; + } + + return ihs; +} + +void xsc_txwqe_build_cseg_csum(struct xsc_sq *sq, + struct sk_buff *skb, + struct xsc_send_wqe_ctrl_seg *cseg) +{ + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + if (skb->encapsulation) { + cseg->csum_en = XSC_ETH_WQE_INNER_AND_OUTER_CSUM; + sq->stats->csum_partial_inner++; + } else { + cseg->csum_en = XSC_ETH_WQE_OUTER_CSUM; + sq->stats->csum_partial++; + } + } else { + cseg->csum_en = XSC_ETH_WQE_NONE_CSUM; + sq->stats->csum_none++; + } +} + +static inline struct xsc_sq_dma *xsc_dma_get(struct xsc_sq *sq, u32 i) +{ + return &sq->db.dma_fifo[i & sq->dma_fifo_mask]; +} + +static inline void xsc_dma_push(struct xsc_sq *sq, dma_addr_t addr, u32 size, + enum xsc_dma_map_type map_type) +{ + struct xsc_sq_dma *dma = xsc_dma_get(sq, sq->dma_fifo_pc++); + + dma->addr = addr; + dma->size = size; + dma->type = map_type; + ETH_DEBUG_LOG("dma = %p, dma->addr = %#llx\n", dma, dma->addr); +} + +static inline void xsc_tx_dma_unmap(struct device *dev, struct xsc_sq_dma *dma) +{ + switch (dma->type) { + case XSC_DMA_MAP_SINGLE: + dma_unmap_single(dev, dma->addr, dma->size, DMA_TO_DEVICE); + break; + case XSC_DMA_MAP_PAGE: + dma_unmap_page(dev, dma->addr, dma->size, DMA_TO_DEVICE); + break; + default: + ETH_DEBUG_LOG("%s\n", "xsc_tx_dma_unmap unknown DMA type!\n"); + } +} + +static void xsc_dma_unmap_wqe_err(struct xsc_sq *sq, u8 num_dma) +{ + struct xsc_adapter *adapter = sq->channel->adapter; + struct device *dev = adapter->dev; + + int i; + + for (i = 0; i < num_dma; i++) { + struct xsc_sq_dma *last_pushed_dma = xsc_dma_get(sq, --sq->dma_fifo_pc); + + xsc_tx_dma_unmap(dev, last_pushed_dma); + } +} + +static void xsc_txwqe_build_csegs(struct xsc_sq *sq, struct sk_buff *skb, + u16 mss, u16 ihs, u16 headlen, + u8 opcode, u16 ds_cnt, u32 num_bytes, + struct xsc_send_wqe_ctrl_seg *cseg) +{ + struct xsc_core_device *xdev = sq->cq.xdev; + int send_wqe_ds_num_log = ilog2(xdev->caps.send_ds_num); + + xsc_txwqe_build_cseg_csum(sq, skb, cseg); + + if (mss != 0) { + cseg->has_pph = 0; + cseg->so_type = 1; + cseg->so_hdr_len = ihs; + cseg->so_data_size = cpu_to_le16(mss); + } + + cseg->msg_opcode = opcode; + cseg->wqe_id = cpu_to_le16(sq->pc << send_wqe_ds_num_log); + cseg->ds_data_num = ds_cnt - XSC_SEND_WQEBB_CTRL_NUM_DS; + cseg->msg_len = cpu_to_le32(num_bytes); + + cseg->ce = 1; + + WQE_CSEG_DUMP("cseg", cseg); +} + +static int xsc_txwqe_build_dsegs(struct xsc_sq *sq, struct sk_buff *skb, + u16 ihs, u16 headlen, + struct xsc_wqe_data_seg *dseg) +{ + dma_addr_t dma_addr = 0; + u8 num_dma = 0; + int i; + struct xsc_adapter *adapter = sq->channel->adapter; + struct device *dev = adapter->dev; + + if (headlen) { + dma_addr = dma_map_single(dev, skb->data, headlen, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, dma_addr))) + goto dma_unmap_wqe_err; + + dseg->va = cpu_to_le64(dma_addr); + dseg->mkey = cpu_to_le32(sq->mkey_be); + dseg->seg_len = cpu_to_le32(headlen); + + WQE_DSEG_DUMP("dseg-headlen", dseg); + + xsc_dma_push(sq, dma_addr, headlen, XSC_DMA_MAP_SINGLE); + num_dma++; + dseg++; + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + int fsz = skb_frag_size(frag); + + dma_addr = skb_frag_dma_map(dev, frag, 0, fsz, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, dma_addr))) + goto dma_unmap_wqe_err; + + dseg->va = cpu_to_le64(dma_addr); + dseg->mkey = cpu_to_le32(sq->mkey_be); + dseg->seg_len = cpu_to_le32(fsz); + + WQE_DSEG_DUMP("dseg-frag", dseg); + + xsc_dma_push(sq, dma_addr, fsz, XSC_DMA_MAP_PAGE); + num_dma++; + dseg++; + } + + return num_dma; + +dma_unmap_wqe_err: + xsc_dma_unmap_wqe_err(sq, num_dma); + return -ENOMEM; +} + +static inline bool xsc_wqc_has_room_for(struct xsc_wq_cyc *wq, + u16 cc, u16 pc, u16 n) +{ + return (xsc_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc); +} + +static inline void xsc_sq_notify_hw(struct xsc_wq_cyc *wq, u16 pc, + struct xsc_sq *sq) +{ + struct xsc_adapter *adapter = sq->channel->adapter; + struct xsc_core_device *xdev = adapter->xdev; + union xsc_send_doorbell doorbell_value; + int send_ds_num_log = ilog2(xdev->caps.send_ds_num); + + /*reverse wqe index to ds index*/ + doorbell_value.next_pid = pc << send_ds_num_log; + doorbell_value.qp_num = sq->sqn; + + /* Make sure that descriptors are written before + * updating doorbell record and ringing the doorbell + */ + wmb(); + ETH_DEBUG_LOG("pc = %d sqn = %d\n", pc, sq->sqn); + ETH_DEBUG_LOG("doorbell_value = %#x\n", doorbell_value.send_data); + writel(doorbell_value.send_data, REG_ADDR(xdev, xdev->regs.tx_db)); +} + +void xsc_txwqe_complete(struct xsc_sq *sq, struct sk_buff *skb, + u8 opcode, u16 ds_cnt, u8 num_wqebbs, u32 num_bytes, u8 num_dma, + struct xsc_tx_wqe_info *wi) +{ + struct xsc_wq_cyc *wq = &sq->wq; + + wi->num_bytes = num_bytes; + wi->num_dma = num_dma; + wi->num_wqebbs = num_wqebbs; + wi->skb = skb; + +#ifdef XSC_BQL_SUPPORT + ETH_SQ_STATE(sq); + netdev_tx_sent_queue(sq->txq, num_bytes); + ETH_SQ_STATE(sq); +#endif + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + ETH_DEBUG_LOG("%s\n", "hw tstamp\n"); + } + + /*1*/ + sq->pc += wi->num_wqebbs; + ETH_DEBUG_LOG("%d\n", sq->pc); + + if (unlikely(!xsc_wqc_has_room_for(wq, sq->cc, sq->pc, sq->stop_room))) { + netif_tx_stop_queue(sq->txq); + sq->stats->stopped++; + ETH_DEBUG_LOG("%p %d %d %d\n", wq, sq->cc, sq->pc, sq->stop_room); + } + + ETH_DEBUG_LOG("%d %d\n", xsc_netdev_xmit_more(skb), netif_xmit_stopped(sq->txq)); + + if (!xsc_netdev_xmit_more(skb) || netif_xmit_stopped(sq->txq)) + xsc_sq_notify_hw(wq, sq->pc, sq); +} + +static void xsc_dump_error_sqcqe(struct xsc_sq *sq, + struct xsc_cqe *cqe) +{ + u32 ci = xsc_cqwq_get_ci(&sq->cq.wq); + struct net_device *netdev = sq->channel->netdev; + + net_err_ratelimited("Err cqe on dev %s cqn=0x%x ci=0x%x sqn=0x%x err_code=0x%x qpid=0x%x\n", + netdev->name, sq->cq.xcq.cqn, ci, + sq->sqn, get_cqe_opcode(cqe), cqe->qp_id); +} + +void xsc_free_tx_wqe(struct device *dev, struct xsc_sq *sq) +{ + struct xsc_tx_wqe_info *wi; + struct sk_buff *skb; + u16 ci, npkts = 0; + u32 nbytes = 0; + int i; + + while (sq->cc != sq->pc) { + ci = xsc_wq_cyc_ctr2ix(&sq->wq, sq->cc); + wi = &sq->db.wqe_info[ci]; + skb = wi->skb; + + if (!skb) { /* nop */ + sq->cc++; + continue; + } + + for (i = 0; i < wi->num_dma; i++) { + struct xsc_sq_dma *dma = + xsc_dma_get(sq, sq->dma_fifo_cc++); + + xsc_tx_dma_unmap(dev, dma); + } + + dev_kfree_skb_any(skb); + npkts++; + nbytes += wi->num_bytes; + sq->cc += wi->num_wqebbs; + } + +#ifdef XSC_BQL_SUPPORT + netdev_tx_completed_queue(sq->txq, npkts, nbytes); +#endif +} + +#ifdef NEED_CREATE_RX_THREAD + DECLARE_PER_CPU(bool, txcqe_get); +#endif + +bool xsc_poll_tx_cq(struct xsc_cq *cq, int napi_budget) +{ + struct xsc_adapter *adapter; + struct device *dev; + struct xsc_sq_stats *stats; + struct xsc_sq *sq; + struct xsc_cqe *cqe; + u32 dma_fifo_cc; + u32 nbytes = 0; + u16 npkts = 0; + u16 sqcc; + int i = 0; + + sq = container_of(cq, struct xsc_sq, cq); + if (!test_bit(XSC_ETH_SQ_STATE_ENABLED, &sq->state)) + return false; + + adapter = sq->channel->adapter; + dev = adapter->dev; + + cqe = xsc_cqwq_get_cqe(&cq->wq); + if (!cqe) + goto out; + + stats = sq->stats; + + if (unlikely(get_cqe_opcode(cqe) & BIT(7))) { + xsc_dump_error_sqcqe(sq, cqe); + stats->cqe_err++; + return false; + } + +#ifdef NEED_CREATE_RX_THREAD + __this_cpu_write(txcqe_get, true); +#endif + + sqcc = sq->cc; + + /* avoid dirtying sq cache line every cqe */ + dma_fifo_cc = sq->dma_fifo_cc; + i = 0; + do { + struct xsc_tx_wqe_info *wi; + struct sk_buff *skb; + int j; + u16 ci; + + xsc_cqwq_pop(&cq->wq); + + ci = xsc_wq_cyc_ctr2ix(&sq->wq, sqcc); + wi = &sq->db.wqe_info[ci]; + skb = wi->skb; + + /*cqe may be overstanding in real test, not by nop in other*/ + if (unlikely(!skb)) { + stats->txdone_skb_null++; + continue; + } + + for (j = 0; j < wi->num_dma; j++) { + struct xsc_sq_dma *dma = xsc_dma_get(sq, dma_fifo_cc++); + + xsc_tx_dma_unmap(dev, dma); + } + +#ifndef NEED_CREATE_RX_THREAD + npkts++; + nbytes += wi->num_bytes; + sqcc += wi->num_wqebbs; + napi_consume_skb(skb, napi_budget); +#else + npkts++; + nbytes += wi->num_bytes; + sqcc += wi->num_wqebbs; + if (refcount_read(&skb->users) < 1) + stats->txdone_skb_refcnt_err++; + napi_consume_skb(skb, 0); +#endif + ETH_DEBUG_LOG("ci=%d, sqcc=%d, pkts=%d\n", ci, sqcc, npkts); + + } while ((++i <= napi_budget) && (cqe = xsc_cqwq_get_cqe(&cq->wq))); + + stats->cqes += i; + + xsc_cq_notify_hw(cq); + + /* ensure cq space is freed before enabling more cqes */ + wmb(); + + sq->dma_fifo_cc = dma_fifo_cc; + sq->cc = sqcc; + ETH_DEBUG_LOG("dma_fifo_cc=%d, sqcc=%d\n", dma_fifo_cc, sqcc); + +#ifdef XSC_BQL_SUPPORT + ETH_SQ_STATE(sq); + netdev_tx_completed_queue(sq->txq, npkts, nbytes); + ETH_SQ_STATE(sq); +#endif + + if (netif_tx_queue_stopped(sq->txq) && + xsc_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room)) { + netif_tx_wake_queue(sq->txq); + stats->wake++; + } + +out: + return (i == napi_budget); +} + +static uint32_t xsc_eth_xmit_frame(struct sk_buff *skb, + struct xsc_sq *sq, + struct xsc_tx_wqe *wqe, + u16 pi) +{ + struct xsc_send_wqe_ctrl_seg *cseg; + struct xsc_wqe_data_seg *dseg; + struct xsc_tx_wqe_info *wi; + struct xsc_sq_stats *stats = sq->stats; + struct xsc_core_device *xdev = sq->cq.xdev; + u16 ds_cnt; + u16 mss, ihs, headlen; + u8 opcode; + u32 num_bytes, num_dma; + u8 num_wqebbs; + +retry_send: + /* Calc ihs and ds cnt, no writes to wqe yet */ + /*ctrl-ds, it would be reduce in ds_data_num*/ + ds_cnt = XSC_SEND_WQEBB_CTRL_NUM_DS; + + /*in andes inline is bonding with gso*/ + if (skb_is_gso(skb)) { + opcode = XSC_OPCODE_RAW; + mss = skb_shinfo(skb)->gso_size; + ihs = xsc_tx_get_gso_ihs(sq, skb); + num_bytes = skb->len; + stats->packets += skb_shinfo(skb)->gso_segs; + } else { + opcode = XSC_OPCODE_RAW; + mss = 0; + ihs = 0; + num_bytes = skb->len; + stats->packets++; + } + + /*linear data in skb*/ + headlen = skb->len - skb->data_len; + ds_cnt += !!headlen; + ds_cnt += skb_shinfo(skb)->nr_frags; + ETH_DEBUG_LOG("skb_len=%d data_len=%d nr_frags=%d mss=%d ihs=%d headlen=%d ds_cnt=%d\n", + skb->len, skb->data_len, skb_shinfo(skb)->nr_frags, + mss, ihs, headlen, ds_cnt); + + /*to make the connection, only linear data is present*/ + skbdata_debug_dump(skb, headlen, 1); + + /* Check packet size. */ + if (unlikely(mss == 0 && num_bytes > sq->hw_mtu)) { + sq->stats->oversize_pkts_sw_drop++; + goto err_drop; + } + + num_wqebbs = DIV_ROUND_UP(ds_cnt, xdev->caps.send_ds_num); + /*if ds_cnt exceed one wqe, drop it*/ + if (num_wqebbs != 1) { + sq->stats->skb_linear++; + if (skb_linearize(skb)) + goto err_drop; + goto retry_send; + } + + /* fill wqe */ + wi = (struct xsc_tx_wqe_info *)&sq->db.wqe_info[pi]; + cseg = &wqe->ctrl; + dseg = &wqe->data[0]; + + if (unlikely(num_bytes == 0)) + goto err_drop; + + xsc_txwqe_build_csegs(sq, skb, mss, ihs, headlen, + opcode, ds_cnt, num_bytes, cseg); + + /*inline header is also use dma to transport*/ + num_dma = xsc_txwqe_build_dsegs(sq, skb, ihs, headlen, dseg); + if (unlikely(num_dma < 0)) + goto err_drop; + + xsc_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes, + num_dma, wi); + + stats->bytes += num_bytes; + stats->xmit_more += xsc_netdev_xmit_more(skb); + + sq->dim_obj.sample.pkt_ctr = sq->stats->packets; + sq->dim_obj.sample.byte_ctr = sq->stats->bytes; + + return NETDEV_TX_OK; + +err_drop: + ETH_DEBUG_LOG("%s: drop skb, ds_cnt=%d, num_wqebbs=%d, num_dma=%d\n", + __func__, ds_cnt, num_wqebbs, num_dma); + stats->dropped++; + dev_kfree_skb_any(skb); + + return NETDEV_TX_OK; +} + +netdev_tx_t xsc_eth_xmit_start(struct sk_buff *skb, struct net_device *netdev) +{ + u32 ret; + u32 queue_id; + struct xsc_sq *sq; + struct xsc_tx_wqe *wqe; + u16 pi; + struct xsc_adapter *adapter = netdev_priv(netdev); + struct xsc_core_device *xdev = adapter->xdev; + + if (!skb) { + ETH_DEBUG_LOG("skb == NULL\n"); + return NETDEV_TX_OK; + } + + if (!adapter) { + ETH_DEBUG_LOG("adapter == NULL\n"); + return NETDEV_TX_BUSY; + } + + if (adapter->status != XSCALE_ETH_DRIVER_OK) { + ETH_DEBUG_LOG("adapter->status = %d\n", adapter->status); + return NETDEV_TX_BUSY; + } + + queue_id = skb_get_queue_mapping(skb); + ETH_DEBUG_LOG("queue_id = %d\n", queue_id); + assert(adapter->xdev, queue_id < XSC_ETH_MAX_TC_TOTAL); + + sq = adapter->txq2sq[queue_id]; + if (!sq) { + ETH_DEBUG_LOG("sq = NULL\n"); + return NETDEV_TX_BUSY; + } + ETH_DEBUG_LOG("sqn = %d\n", sq->sqn); + + wqe = xsc_sq_fetch_wqe(sq, xdev->caps.send_ds_num * XSC_SEND_WQE_DS, &pi); + ETH_DEBUG_LOG("wqe = %p pi = %d\n", wqe, pi); + assert(adapter->xdev, wqe); + +#ifndef ANDES_DRIVER + skb = xsc_accel_handle_tx(skb); +#endif + + ret = xsc_eth_xmit_frame(skb, sq, wqe, pi); + + ETH_DEBUG_LOG("ret = %d\n", ret); + + return ret; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.c new file mode 100644 index 000000000000..13699c6dd0dc --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "xsc_eth_common.h" +#include "xsc_eth_stats.h" +#include "xsc_eth_txrx.h" +#include "xsc_eth_dim.h" + +void xsc_cq_notify_hw_rearm(struct xsc_cq *cq) +{ + union xsc_cq_doorbell db; + + ETH_DEBUG_LOG("cc = %d cqn = %d\n", cq->wq.cc, cq->xcq.cqn); + + db.val = 0; + db.cq_next_cid = cpu_to_le32(cq->wq.cc); + db.cq_id = cpu_to_le32(cq->xcq.cqn); + db.arm = 0; + + /* ensure doorbell record is visible to device before ringing the doorbell */ + wmb(); + writel(db.val, REG_ADDR(cq->xdev, cq->xdev->regs.complete_db)); + if (cq->channel && cq->channel->stats) + cq->channel->stats->arm++; +} + +void xsc_cq_notify_hw(struct xsc_cq *cq) +{ + struct xsc_core_device *xdev = cq->xdev; + union xsc_cq_doorbell db; + + ETH_DEBUG_LOG("cc = %d cqn = %d\n", cq->wq.cc, cq->xcq.cqn); + + dma_wmb(); + + db.val = 0; + db.cq_next_cid = cpu_to_le32(cq->wq.cc); + db.cq_id = cpu_to_le32(cq->xcq.cqn); + + writel(db.val, REG_ADDR(xdev, xdev->regs.complete_reg)); + if (cq->channel && cq->channel->stats) + cq->channel->stats->noarm++; +} + +static inline bool xsc_channel_no_affinity_change(struct xsc_channel *c) +{ + int current_cpu = smp_processor_id(); + + return cpumask_test_cpu(current_cpu, c->aff_mask); +} + +enum hrtimer_restart xsc_dim_reduce_timer_fn(struct hrtimer *timer) +{ + struct xsc_dim_reduce_work *reduce = (struct xsc_dim_reduce_work *)timer; + struct xsc_cq *cq = container_of(reduce, struct xsc_cq, cq_reduce); + + xsc_cq_notify_hw_rearm(cq); + + return HRTIMER_NORESTART; +} + +int xsc_eth_napi_poll(struct napi_struct *napi, int budget) +{ + struct xsc_channel *c = container_of(napi, struct xsc_channel, napi); + struct xsc_eth_params *params = &c->adapter->nic_param; + struct xsc_rq *rq = &c->qp.rq[0]; + struct xsc_sq *sq = NULL; + bool busy = false; + int work_done = 0; + int tx_budget = 0; + int i; + + rcu_read_lock(); + + clear_bit(XSC_CHANNEL_NAPI_SCHED, &c->flags); + + tx_budget = params->sq_size >> 2; + for (i = 0; i < c->num_tc; i++) + busy |= xsc_poll_tx_cq(&c->qp.sq[i].cq, tx_budget); + + /* budget=0 means: don't poll rx rings */ + if (likely(budget)) { + work_done = xsc_poll_rx_cq(&rq->cq, budget); + busy |= work_done == budget; + } + + busy |= rq->post_wqes(rq); + + if (busy) { + if (likely(xsc_channel_no_affinity_change(c))) { + rcu_read_unlock(); + return budget; + } + c->stats->aff_change++; + if (budget && work_done == budget) + work_done--; + } + +#ifdef NETDEV_NAPI_COMP_DONE_RETURN_VOID + napi_complete_done(napi, work_done); +#else + if (unlikely(!napi_complete_done(napi, work_done))) + goto out; +#endif + + for (i = 0; i < c->num_tc; i++) { + sq = &c->qp.sq[i]; + + if (test_bit(XSC_ETH_SQ_STATE_AM, &sq->state)) { + struct xsc_dim_reduce_work *reduce_sq = NULL; + u32 dim_us_tx = params->tx_cq_moderation.usec; + + xsc_handle_tx_dim(sq); + + reduce_sq = &sq->cq.cq_reduce; + if (hrtimer_is_queued(&reduce_sq->timer)) + continue; + + dim_us_tx = min_t(u32, sq->cq.xcq.dim_us, dim_us_tx); + sq->stats->dim_us = dim_us_tx; + if (dim_us_tx) { + hrtimer_start(&reduce_sq->timer, + ns_to_ktime(dim_us_tx * NSEC_PER_USEC), + HRTIMER_MODE_REL_PINNED); + continue; + } + } + xsc_cq_notify_hw_rearm(&sq->cq); + } + + if (test_bit(XSC_ETH_RQ_STATE_AM, &rq->state)) { + struct xsc_dim_reduce_work *reduce = &rq->cq.cq_reduce; + u32 dim_us = params->rx_cq_moderation.usec; + + xsc_handle_rx_dim(rq); + + if (c->stats->poll <= params->rx_dim_frames_low) { + dim_us = 0; + if (c->stats->poll == 0 && hrtimer_is_queued(&reduce->timer)) + goto out; + } else { + dim_us = min_t(u32, rq->cq.xcq.dim_us, dim_us); + } + rq->stats->dim_us = dim_us; + + if (dim_us) { + if (hrtimer_is_queued(&reduce->timer)) + goto out; + + reduce->dim_us = dim_us; + + if (dim_us <= params->rx_dim_usecs_low) { + udelay(dim_us); + xsc_cq_notify_hw_rearm(&rq->cq); + } else { + hrtimer_start(&reduce->timer, + ns_to_ktime(dim_us * NSEC_PER_USEC), + HRTIMER_MODE_REL_PINNED); + } + goto out; + } + } + + xsc_cq_notify_hw_rearm(&rq->cq); + +#ifndef NETDEV_NAPI_COMP_DONE_RETURN_VOID +out: +#endif + rcu_read_unlock(); + return work_done; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.h new file mode 100644 index 000000000000..005f1ae4a55a --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_RXTX_H +#define XSC_RXTX_H + +#include "xsc_eth.h" +#include "common/qp.h" +#include "xsc_eth_debug.h" + +enum { + XSC_ETH_WQE_NONE_CSUM, + XSC_ETH_WQE_INNER_CSUM, + XSC_ETH_WQE_OUTER_CSUM, + XSC_ETH_WQE_INNER_AND_OUTER_CSUM, +}; + +#define ANDES_DRIVER + +static inline u32 xsc_cqwq_get_size(struct xsc_cqwq *wq) +{ + return wq->fbc.sz_m1 + 1; +} + +static inline struct xsc_cqe *xsc_cqwq_get_wqe(struct xsc_cqwq *wq, u32 ix) +{ + struct xsc_cqe *cqe = xsc_frag_buf_get_wqe(&wq->fbc, ix); + + ETH_DEBUG_LOG("cqe = %p\n", cqe); + + return cqe; +} + +static inline struct xsc_cqe *xsc_cqwq_get_cqe(struct xsc_cqwq *wq) +{ + struct xsc_cqe *cqe; + u8 cqe_ownership_bit; + u8 sw_ownership_val; + u32 ci = xsc_cqwq_get_ci(wq); + + cqe = xsc_cqwq_get_wqe(wq, ci); + + cqe_ownership_bit = cqe->owner & XSC_CQE_OWNER_MASK; + sw_ownership_val = xsc_cqwq_get_wrap_cnt(wq) & 1; + ETH_DEBUG_LOG("ci=%d, cqe_owner=%d, sw_owner=%d\n", + ci, cqe_ownership_bit, sw_ownership_val); + + if (cqe_ownership_bit != sw_ownership_val) + return NULL; + + /* ensure cqe content is read after cqe ownership bit */ + dma_rmb(); + + return cqe; +} + +void xsc_free_tx_wqe(struct device *dev, struct xsc_sq *sq); +int xsc_eth_napi_poll(struct napi_struct *napi, int budget); +bool xsc_poll_tx_cq(struct xsc_cq *cq, int napi_budget); +int xsc_poll_rx_cq(struct xsc_cq *cq, int budget); +void xsc_eth_handle_rx_cqe(struct xsc_cqwq *cqwq, + struct xsc_rq *rq, struct xsc_cqe *cqe); +struct sk_buff *xsc_skb_from_cqe_linear(struct xsc_rq *rq, + struct xsc_wqe_frag_info *wi, u32 cqe_bcnt, u8 has_pph); +struct sk_buff *xsc_skb_from_cqe_nonlinear(struct xsc_rq *rq, + struct xsc_wqe_frag_info *wi, + u32 cqe_bcnt, u8 has_pph); +bool xsc_eth_post_rx_wqes(struct xsc_rq *rq); +void xsc_cq_notify_hw(struct xsc_cq *cq); +void xsc_cq_notify_hw_rearm(struct xsc_cq *cq); +void xsc_eth_dealloc_rx_wqe(struct xsc_rq *rq, u16 ix); +netdev_tx_t xsc_eth_xmit_start(struct sk_buff *skb, struct net_device *netdev); + +void xsc_page_release_dynamic(struct xsc_rq *rq, + struct xsc_dma_info *dma_info, + bool recycle); + +enum hrtimer_restart xsc_dim_reduce_timer_fn(struct hrtimer *timer); + +#endif /* XSC_RXTX_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_fs.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_fs.c new file mode 100644 index 000000000000..7379574f1a7e --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_fs.c @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "xsc_eth.h" +#include "common/vport.h" +#include "common/xsc_fs.h" + +static int xsc_vport_context_update_vlans(struct xsc_adapter *adapter, + enum xsc_vlan_rule_type rule_type, + u16 vid, bool add) +{ + struct net_device *ndev = adapter->netdev; + struct xsc_core_device *xdev = adapter->xdev; + int err; + + err = xsc_modify_nic_vport_vlans(xdev, vid, add); + if (err) + netdev_err(ndev, "Failed to modify vport vid:%d rule_type:%d err:%d\n", + vid, rule_type, err); + return err; +} + +static int xsc_add_vlan_rule(struct xsc_adapter *adapter, + enum xsc_vlan_rule_type rule_type, u16 vid) +{ + return xsc_vport_context_update_vlans(adapter, rule_type, vid, true); +} + +static void xsc_del_vlan_rule(struct xsc_adapter *adapter, + enum xsc_vlan_rule_type rule_type, u16 vid) +{ + xsc_vport_context_update_vlans(adapter, rule_type, vid, false); +} + +static int xsc_vlan_rx_add_cvid(struct xsc_adapter *adapter, u16 vid) +{ + int err; + + set_bit(vid, adapter->fs.vlan.active_cvlans); + + err = xsc_add_vlan_rule(adapter, XSC_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid); + if (err) + clear_bit(vid, adapter->vlan_params.active_cvlans); + + return err; +} + +static int xsc_vlan_rx_add_svid(struct xsc_adapter *adapter, u16 vid) +{ + struct net_device *netdev = adapter->netdev; + int err; + + set_bit(vid, adapter->fs.vlan.active_svlans); + + err = xsc_add_vlan_rule(adapter, XSC_VLAN_RULE_TYPE_MATCH_STAG_VID, vid); + if (err) { + clear_bit(vid, adapter->fs.vlan.active_svlans); + return err; + } + + /* Need to fix some features.. */ + netdev_update_features(netdev); + return err; +} + +int xsc_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + + if (!vid) + return 0; + + if (be16_to_cpu(proto) == ETH_P_8021Q) + return xsc_vlan_rx_add_cvid(adapter, vid); + else if (be16_to_cpu(proto) == ETH_P_8021AD) + return xsc_vlan_rx_add_svid(adapter, vid); + + return -EOPNOTSUPP; +} + +int xsc_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct xsc_adapter *adapter = netdev_priv(dev); + + if (!vid) + return 0; + + if (be16_to_cpu(proto) == ETH_P_8021Q) { + clear_bit(vid, adapter->fs.vlan.active_cvlans); + xsc_del_vlan_rule(adapter, XSC_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid); + } else if (be16_to_cpu(proto) == ETH_P_8021AD) { + clear_bit(vid, adapter->fs.vlan.active_svlans); + xsc_del_vlan_rule(adapter, XSC_VLAN_RULE_TYPE_MATCH_STAG_VID, vid); + netdev_update_features(dev); + } + + return 0; +} + +void xsc_set_rx_mode_work(struct work_struct *work) +{ + int err = 0; + struct xsc_adapter *adapter = container_of(work, struct xsc_adapter, + set_rx_mode_work); + struct net_device *dev = adapter->netdev; + struct xsc_l2_table *l2 = &adapter->fs.l2; + + bool rx_mode_enable = (adapter->status == XSCALE_ETH_DRIVER_OK); + bool promisc_enabled = rx_mode_enable && (dev->flags & IFF_PROMISC); + bool allmulti_enabled = rx_mode_enable && (dev->flags & IFF_ALLMULTI); + + bool enable_promisc = !l2->promisc_enabled && promisc_enabled; + bool disable_promisc = l2->promisc_enabled && !promisc_enabled; + bool enable_allmulti = !l2->allmulti_enabled && allmulti_enabled; + bool disable_allmulti = l2->allmulti_enabled && !allmulti_enabled; + bool change = enable_promisc | disable_promisc | enable_allmulti | disable_allmulti; + + if (change) + err = xsc_modify_nic_vport_promisc(adapter->xdev, + (enable_allmulti | disable_allmulti), + (enable_promisc | disable_promisc), + allmulti_enabled, promisc_enabled); + if (err) { + xsc_core_err(adapter->xdev, "failed to set rx mode, err = %d\n", err); + + return; + } + + l2->promisc_enabled = promisc_enabled; + l2->allmulti_enabled = allmulti_enabled; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_hw_comm.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_hw_comm.c new file mode 100644 index 000000000000..32eb74563e4b --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_hw_comm.c @@ -0,0 +1,199 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_hsi.h" +#include "common/xsc_port_ctrl.h" +#include "common/xsc_cmd.h" +#include "xsc_eth.h" +#include "xsc_eth_debug.h" + +static void precmd_rlimit_set(void *data, u32 mac_port) +{ + struct xsc_rate_limit_set *req = (struct xsc_rate_limit_set *)data; + + req->rate_cir = __cpu_to_be32(req->rate_cir); + req->limit_id = __cpu_to_be32(req->limit_id); +} + +static void postcmd_rlimit_get(void *data) +{ + struct xsc_rate_limit_get *resp = (struct xsc_rate_limit_get *)data; + int i; + + for (i = 0; i <= QOS_PRIO_MAX; i++) + resp->rate_cir[i] = __be32_to_cpu(resp->rate_cir[i]); + + resp->max_limit_id = __be32_to_cpu(resp->max_limit_id); +} + +static int xsc_dcbx_hw_qos_cmdq(struct xsc_core_device *xdev, u16 opcode, + void *inupt, + void *output, + u16 expect_req_size, + u16 expect_resp_size, + void (*precmdq)(void *, u32), + void (*postcmdq)(void *)) +{ + struct xsc_qos_mbox_in *in; + struct xsc_qos_mbox_out *out; + int err; + + in = kvzalloc(sizeof(*in) + expect_req_size, GFP_KERNEL); + if (!in) + goto err_in; + out = kvzalloc(sizeof(*out) + expect_resp_size, GFP_KERNEL); + if (!out) + goto err_out; + + if (inupt) + memcpy(&in->data, inupt, expect_req_size); + + in->hdr.opcode = __cpu_to_be16(opcode); + in->req_prfx.mac_port = xdev->mac_port; + + if (precmdq) + precmdq((void *)in->data, xdev->mac_port); + + err = xsc_cmd_exec(xdev, in, sizeof(*in) + expect_req_size, out, + sizeof(*out) + expect_resp_size); + + if (postcmdq) + postcmdq((void *)out->data); + + if (output) + memcpy(output, out->data, expect_resp_size); + + kvfree(in); + kvfree(out); + return 0; + +err_out: + kvfree(in); +err_in: + return -EFAULT; +} + +static int xsc_dcbx_hw_common(struct xsc_core_device *xdev, u16 opcode, + void *input, + void *output, + u16 expect_req_size, + u16 expect_resp_size, + void (*precmdq)(void *, u32), + void (*postcmdq)(void *)) +{ + int ret; + struct xsc_inbox_hdr *hdr; + + hdr = (struct xsc_inbox_hdr *)input; + hdr->opcode = __cpu_to_be16(opcode); + + ret = xsc_cmd_exec(xdev, (void *)input, expect_req_size, + (void *)output, expect_resp_size); + + return ret; +} + +int xsc_hw_kernel_call(struct xsc_core_device *xdev, u16 opcode, void *req, void *rsp) +{ + switch (opcode) { + case XSC_CMD_OP_IOCTL_GET_RATE_LIMIT: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + sizeof(struct xsc_rate_limit_get), + sizeof(struct xsc_rate_limit_get), + NULL, postcmd_rlimit_get); + fallthrough; + case XSC_CMD_OP_IOCTL_SET_RATE_LIMIT: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + sizeof(struct xsc_rate_limit_set), + 0, precmd_rlimit_set, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_GET_PFC: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + 0, sizeof(struct xsc_pfc_get), + NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_SET_PFC: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + sizeof(struct xsc_pfc_set), + sizeof(struct xsc_pfc_set), + NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_GET_TRUST_MODE: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, 0, + sizeof(struct xsc_trust_mode_get), + NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_SET_TRUST_MODE: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + sizeof(struct xsc_trust_mode_set), 0, + NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_GET_DSCP_PMT: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + 0, sizeof(struct xsc_dscp_pmt_get), + NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_SET_DSCP_PMT: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + sizeof(struct xsc_dscp_pmt_set), + 0, NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_GET_SP: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + 0, sizeof(struct xsc_sp_get), + NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_SET_SP: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + sizeof(struct xsc_sp_set), + 0, NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_GET_WEIGHT: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + 0, sizeof(struct xsc_weight_get), + NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_SET_WEIGHT: + return xsc_dcbx_hw_qos_cmdq(xdev, opcode, req, rsp, + sizeof(struct xsc_weight_set), + 0, NULL, NULL); + fallthrough; + case XSC_CMD_OP_QUERY_PFC_PRIO_STATS: + return xsc_dcbx_hw_common(xdev, opcode, req, rsp, + sizeof(struct xsc_pfc_prio_stats_mbox_in), + sizeof(struct xsc_pfc_prio_stats_mbox_out), + NULL, NULL); + fallthrough; + case XSC_CMD_OP_GET_LLDP_STATUS: + case XSC_CMD_OP_SET_LLDP_STATUS: + return xsc_dcbx_hw_common(xdev, opcode, req, rsp, + sizeof(struct xsc_lldp_status_mbox_in), + sizeof(struct xsc_lldp_status_mbox_out), + NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_SET_PFC_DROP_TH: + return xsc_dcbx_hw_common(xdev, opcode, req, rsp, + sizeof(struct xsc_pfc_set_drop_th_mbox_in), + sizeof(struct xsc_pfc_set_drop_th_mbox_out), + NULL, NULL); + fallthrough; + case XSC_CMD_OP_IOCTL_GET_PFC_CFG_STATUS: + return xsc_dcbx_hw_common(xdev, opcode, req, rsp, + sizeof(struct xsc_pfc_get_cfg_status_mbox_in), + sizeof(struct xsc_pfc_get_cfg_status_mbox_out), + NULL, NULL); + fallthrough; + default: + xsc_core_dbg(xdev, "unknown type=%d\n", opcode); + } + + return 0; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_hw_comm.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_hw_comm.h new file mode 100644 index 000000000000..a9043f85fa05 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_hw_comm.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_HW_COMMON_H +#define XSC_HW_COMMON_H + +int xsc_hw_kernel_call(struct xsc_core_device *xdev, u16 opcode, void *req, void *rsp); + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_queue.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_queue.h new file mode 100644 index 000000000000..bbf05a26c740 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_queue.h @@ -0,0 +1,280 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_QUEUE_H +#define XSC_QUEUE_H + +#include +#include + +#include + +#include "../pci/wq.h" + +enum { + XSC_SEND_WQE_DS = 16, + XSC_SEND_WQE_BB = 64, +}; + +enum { + XSC_RECV_WQE_DS = 16, + XSC_RECV_WQE_BB = 16, +}; + +#define XSC_SEND_WQEBB_NUM_DS (XSC_SEND_WQE_BB / XSC_SEND_WQE_DS) +#define XSC_LOG_SEND_WQEBB_NUM_DS ilog2(XSC_SEND_WQEBB_NUM_DS) + +#define XSC_RECV_WQEBB_NUM_DS (XSC_RECV_WQE_BB / XSC_RECV_WQE_DS) +#define XSC_LOG_RECV_WQEBB_NUM_DS ilog2(XSC_RECV_WQEBB_NUM_DS) + +#define XSC_SEND_WQEBB_CTRL_NUM_DS 1 + +enum { + XSC_ETH_RQ_STATE_ENABLED, + XSC_ETH_RQ_STATE_AM, + XSC_ETH_RQ_STATE_CACHE_REDUCE_PENDING, +}; + +enum { + XSC_ETH_SQ_STATE_ENABLED, + XSC_ETH_SQ_STATE_AM, +}; + +struct xsc_dma_info { + struct page *page; + dma_addr_t addr; +}; + +struct xsc_wqe_frag_info { + struct xsc_dma_info *di; + u32 offset; + u8 last_in_page; + u8 is_available; +}; + +struct xsc_rq_frag_info { + int frag_size; + int frag_stride; +}; + +struct xsc_rq_frags_info { + struct xsc_rq_frag_info arr[XSC_MAX_RX_FRAGS]; + u8 num_frags; + u8 log_num_frags; + u8 wqe_bulk; + u8 wqe_bulk_min; + u8 frags_max_num; +}; + +#define xsc_dim_t struct dim +#define xsc_dim_sample_t struct dim_sample +#define xsc_dim_cq_moder_t struct dim_cq_moder + +struct xsc_dim { + xsc_dim_t dim; + xsc_dim_sample_t sample; +}; + +struct xsc_dim_reduce_work { + struct hrtimer timer; + u32 dim_us; +}; + +struct xsc_cq { + /* data path - accessed per cqe */ + struct xsc_cqwq wq; + + /* data path - accessed per napi poll */ + u16 event_ctr; + struct napi_struct *napi; + struct xsc_core_cq xcq; + struct xsc_channel *channel; + + /* control */ + struct xsc_core_device *xdev; + struct xsc_wq_ctrl wq_ctrl; + u8 rx; + struct xsc_dim_reduce_work cq_reduce; +} ____cacheline_aligned_in_smp; + +struct xsc_pcie_lat_work { + struct xsc_core_device *xdev; + struct xsc_adapter *adapter; + struct delayed_work work; + u16 enable; + u32 period; +}; + +#define XSC_PAGE_CACHE_LOG_MAX_RQ_MULT 6 +#define XSC_PAGE_CACHE_REDUCE_WORK_INTERVAL 200 /* msecs */ +#define XSC_PAGE_CACHE_REDUCE_GRACE_PERIOD 1000 /* msecs */ +#define XSC_PAGE_CACHE_REDUCE_SUCCESS_CNT 4 + +struct xsc_page_cache_reduce { + struct delayed_work reduce_work; + u32 success; + unsigned long next_ts; + unsigned long grace_period; + unsigned long delay; + struct xsc_dma_info *pending; + u32 npages; +}; + +struct xsc_page_cache { + struct xsc_dma_info *page_cache; + u32 head; + u32 tail; + u32 sz; + u32 resv; +}; + +struct xsc_rq; +struct xsc_cqe; +typedef void (*xsc_fp_handle_rx_cqe)(struct xsc_cqwq *cqwq, struct xsc_rq *rq, + struct xsc_cqe *cqe); +typedef bool (*xsc_fp_post_rx_wqes)(struct xsc_rq *rq); +typedef void (*xsc_fp_dealloc_wqe)(struct xsc_rq *rq, u16 ix); +typedef struct sk_buff * (*xsc_fp_skb_from_cqe)(struct xsc_rq *rq, + struct xsc_wqe_frag_info *wi, u32 cqe_bcnt, u8 has_pph); + +struct xsc_rq { + struct xsc_core_qp cqp; + struct { + struct xsc_wq_cyc wq; + struct xsc_wqe_frag_info *frags; + struct xsc_dma_info *di; + struct xsc_rq_frags_info info; + xsc_fp_skb_from_cqe skb_from_cqe; + } wqe; + + struct { + u16 headroom; + u8 map_dir; /* dma map direction */ + } buff; + + struct page_pool *page_pool; + struct xsc_wq_ctrl wq_ctrl; + struct xsc_cq cq; + u32 rqn; + int ix; + + unsigned long state; + struct work_struct recover_work; + struct xsc_rq_stats *stats; + struct xsc_dim dim_obj; + + u32 hw_mtu; + u32 frags_sz; + + xsc_fp_handle_rx_cqe handle_rx_cqe; + xsc_fp_post_rx_wqes post_wqes; + xsc_fp_dealloc_wqe dealloc_wqe; + struct xsc_page_cache page_cache; +} ____cacheline_aligned_in_smp; + +struct xsc_tx_wqe_info { + struct sk_buff *skb; + u32 num_bytes; + u8 num_wqebbs; + u8 num_dma; +}; + +enum xsc_dma_map_type { + XSC_DMA_MAP_SINGLE, + XSC_DMA_MAP_PAGE +}; + +struct xsc_sq_dma { + dma_addr_t addr; + u32 size; + enum xsc_dma_map_type type; +}; + +struct xsc_sq { + struct xsc_core_qp cqp; + /* dirtied @completion */ + u16 cc; + u32 dma_fifo_cc; + struct xsc_dim dim_obj; + + /* dirtied @xmit */ + u16 pc ____cacheline_aligned_in_smp; + u32 dma_fifo_pc; + + struct xsc_cq cq; + + /* read only */ + struct xsc_wq_cyc wq; + u32 dma_fifo_mask; + struct xsc_sq_stats *stats; + struct { + struct xsc_sq_dma *dma_fifo; + struct xsc_tx_wqe_info *wqe_info; + } db; + void __iomem *uar_map; + struct netdev_queue *txq; + u32 sqn; + u16 stop_room; + + __be32 mkey_be; + unsigned long state; + unsigned int hw_mtu; + + /* control path */ + struct xsc_wq_ctrl wq_ctrl; + struct xsc_channel *channel; + int ch_ix; + int txq_ix; + struct work_struct recover_work; +} ____cacheline_aligned_in_smp; + +struct rdma_opcode_data { + u32 immdt_value; +} __packed __aligned(4); + +struct raw_opcode_data { + u16 has_pph : 1; + u16 so_type : 1; + u16 so_data_size : 14; + u8 rsv; + u8 so_hdr_len; +} __packed __aligned(4); + +struct rawtype_opcode_data { + u16 desc_id; + u16 is_last_wqe : 1; + u16 dst_qp_id : 15; +} __packed __aligned(4); + +struct xsc_wqe_ctrl_seg { + u8 msg_opcode; + u8 with_immdt : 1; + u8 csum_en : 2; + u8 ds_data_num : 5; + u16 wqe_id; + u32 msg_len; + union { + struct rdma_opcode_data _rdma_opcode_data; + struct raw_opcode_data _raw_opcode_data; + struct rawtype_opcode_data _rawtype_opcode_data; + } opcode_data; + u32 se : 1; + u32 ce : 1; + u32 rsv : 30; +}; + +static inline u8 get_cqe_opcode(struct xsc_cqe *cqe) +{ + return cqe->msg_opcode; +} + +static inline void xsc_dump_err_cqe(struct xsc_core_device *dev, + struct xsc_cqe *cqe) +{ + print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, cqe, + sizeof(*cqe), false); +} + +#endif /* XSC_QUEUE_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/Kconfig b/drivers/net/ethernet/yunsilicon/xsc/pci/Kconfig new file mode 100644 index 000000000000..fafa69b8a478 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/Kconfig @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +# All rights reserved. +# Yunsilicon PCI configuration +# + +config YUNSILICON_XSC_PCI + tristate "Yunsilicon XSC PCI driver" + default n + select NET_DEVLINK + select PAGE_POOL + help + This driver is common for Yunsilicon XSC + ethernet and RDMA drivers. + + To compile this driver as a module, choose M here. The module + will be called xsc_pci. diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/Makefile b/drivers/net/ethernet/yunsilicon/xsc/pci/Makefile new file mode 100644 index 000000000000..45a7d473cac7 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/Makefile @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. +# All rights reserved. + +ccflags-y += -I$(srctree)/drivers/net/ethernet/yunsilicon/xsc + +obj-$(CONFIG_YUNSILICON_XSC_PCI) += xsc_pci.o + +xsc_pci-y := main.o eq.o intf.o debugfs.o alloc.o wq.o cq.o qp.o \ + cmd2.o fw.o port.o mr.o pd.o xsc_lag.o xsc_pci_ctrl.o\ + pci_irq.o vport.o sriov.o sriov_sysfs.o devlink.o eswitch.o xsc_port_ctrl.o res_obj.o qpts.o\ + fw/cmd.o \ + fw/xsc_flow.o \ + fw/xsc_res.o \ + fw/osdep.o \ + fw/xsc_mem.o diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/alloc.c b/drivers/net/ethernet/yunsilicon/xsc/pci/alloc.c new file mode 100644 index 000000000000..cdef1b996fdf --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/alloc.c @@ -0,0 +1,338 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "common/driver.h" + +/* Handling for queue buffers -- we allocate a bunch of memory and + * register it in a memory region at HCA virtual address 0. If the + * requested size is > max_direct, we split the allocation into + * multiple pages, so we don't require too much contiguous memory. + */ + +int xsc_buf_alloc(struct xsc_core_device *xdev, int size, int max_direct, + struct xsc_buf *buf) +{ + dma_addr_t t; + + buf->size = size; + if (size <= max_direct) { + buf->nbufs = 1; + buf->npages = 1; + buf->page_shift = get_order(size) + PAGE_SHIFT; + buf->direct.buf = dma_alloc_coherent(&xdev->pdev->dev, + size, &t, GFP_KERNEL | __GFP_ZERO); + if (!buf->direct.buf) + return -ENOMEM; + + buf->direct.map = t; + + while (t & ((1 << buf->page_shift) - 1)) { + --buf->page_shift; + buf->npages *= 2; + } + } else { + int i; + + buf->direct.buf = NULL; + buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; + buf->npages = buf->nbufs; + buf->page_shift = PAGE_SHIFT; + buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), + GFP_KERNEL); + if (!buf->page_list) + return -ENOMEM; + + for (i = 0; i < buf->nbufs; i++) { + buf->page_list[i].buf = + dma_alloc_coherent(&xdev->pdev->dev, PAGE_SIZE, + &t, GFP_KERNEL | __GFP_ZERO); + if (!buf->page_list[i].buf) + goto err_free; + + buf->page_list[i].map = t; + } + + if (BITS_PER_LONG == 64) { + struct page **pages; + + pages = kmalloc_array(buf->nbufs, sizeof(*pages), GFP_KERNEL); + if (!pages) + goto err_free; + for (i = 0; i < buf->nbufs; i++) { + if (is_vmalloc_addr(buf->page_list[i].buf)) + pages[i] = vmalloc_to_page(buf->page_list[i].buf); + else + pages[i] = virt_to_page(buf->page_list[i].buf); + } + buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); + kfree(pages); + if (!buf->direct.buf) + goto err_free; + } + } + + return 0; + +err_free: + xsc_buf_free(xdev, buf); + + return -ENOMEM; +} +EXPORT_SYMBOL_GPL(xsc_buf_alloc); + +void xsc_buf_free(struct xsc_core_device *xdev, struct xsc_buf *buf) +{ + int i; + + if (buf->nbufs == 1) { + dma_free_coherent(&xdev->pdev->dev, buf->size, buf->direct.buf, + buf->direct.map); + } else { + if (BITS_PER_LONG == 64 && buf->direct.buf) + vunmap(buf->direct.buf); + + for (i = 0; i < buf->nbufs; i++) + if (buf->page_list[i].buf) + dma_free_coherent(&xdev->pdev->dev, PAGE_SIZE, + buf->page_list[i].buf, + buf->page_list[i].map); + kfree(buf->page_list); + } +} +EXPORT_SYMBOL_GPL(xsc_buf_free); + +void xsc_fill_page_array(struct xsc_buf *buf, __be64 *pas, int npages) +{ + u64 addr; + int i; + int shift = PAGE_SHIFT - PAGE_SHIFT_4K; + int mask = (1 << shift) - 1; + + for (i = 0; i < npages; i++) { + if (buf->nbufs == 1) + addr = buf->direct.map + (i << PAGE_SHIFT_4K); + else + addr = buf->page_list[i >> shift].map + ((i & mask) << PAGE_SHIFT_4K); + + pas[i] = cpu_to_be64(addr); + } +} +EXPORT_SYMBOL_GPL(xsc_fill_page_array); + +void xsc_fill_page_frag_array(struct xsc_frag_buf *buf, __be64 *pas, int npages) +{ + int i; + dma_addr_t addr; + int shift = PAGE_SHIFT - PAGE_SHIFT_4K; + int mask = (1 << shift) - 1; + + for (i = 0; i < npages; i++) { + addr = buf->frags[i >> shift].map + ((i & mask) << PAGE_SHIFT_4K); + pas[i] = cpu_to_be64(addr); + } +} +EXPORT_SYMBOL_GPL(xsc_fill_page_frag_array); + +static void *xsc_dma_zalloc_coherent_node(struct xsc_core_device *xdev, + size_t size, dma_addr_t *dma_handle, + int node) +{ + struct xsc_dev_resource *dev_res = xdev->dev_res; + struct device *device = &xdev->pdev->dev; + int original_node; + void *cpu_handle; + + /* WA for kernels that don't use numa_mem_id in alloc_pages_node */ + if (node == NUMA_NO_NODE) +#ifdef HAVE_NUMA_MEM_ID + node = numa_mem_id(); +#else + node = first_memory_node; +#endif + + mutex_lock(&dev_res->alloc_mutex); + original_node = dev_to_node(device); + set_dev_node(device, node); + cpu_handle = dma_alloc_coherent(device, size, dma_handle, + GFP_KERNEL); + set_dev_node(device, original_node); + mutex_unlock(&dev_res->alloc_mutex); + return cpu_handle; +} + +int xsc_frag_buf_alloc_node(struct xsc_core_device *xdev, int size, + struct xsc_frag_buf *buf, int node) +{ + int i; + + buf->size = size; + buf->npages = DIV_ROUND_UP(size, PAGE_SIZE); + buf->page_shift = PAGE_SHIFT; + buf->frags = kcalloc(buf->npages, sizeof(struct xsc_buf_list), + GFP_KERNEL); + if (!buf->frags) + goto err_out; + + for (i = 0; i < buf->npages; i++) { + struct xsc_buf_list *frag = &buf->frags[i]; + int frag_sz = min_t(int, size, PAGE_SIZE); + + frag->buf = xsc_dma_zalloc_coherent_node(xdev, frag_sz, + &frag->map, node); + if (!frag->buf) + goto err_free_buf; + if (frag->map & ((1 << buf->page_shift) - 1)) { + dma_free_coherent(&xdev->pdev->dev, frag_sz, + buf->frags[i].buf, buf->frags[i].map); + xsc_core_warn(xdev, "unexpected map alignment: %pad, page_shift=%d\n", + &frag->map, buf->page_shift); + goto err_free_buf; + } + size -= frag_sz; + } + + return 0; + +err_free_buf: + while (i--) + dma_free_coherent(&xdev->pdev->dev, PAGE_SIZE, buf->frags[i].buf, + buf->frags[i].map); + kfree(buf->frags); +err_out: + return -ENOMEM; +} +EXPORT_SYMBOL_GPL(xsc_frag_buf_alloc_node); + +void xsc_frag_buf_free(struct xsc_core_device *xdev, struct xsc_frag_buf *buf) +{ + int size = buf->size; + int i; + + for (i = 0; i < buf->npages; i++) { + int frag_sz = min_t(int, size, PAGE_SIZE); + + dma_free_coherent(&xdev->pdev->dev, frag_sz, buf->frags[i].buf, + buf->frags[i].map); + size -= frag_sz; + } + kfree(buf->frags); +} +EXPORT_SYMBOL_GPL(xsc_frag_buf_free); + +static struct xsc_db_pgdir *xsc_alloc_db_pgdir(struct xsc_core_device *xdev, + int node) +{ + u32 db_per_page = PAGE_SIZE / cache_line_size(); + struct xsc_db_pgdir *pgdir; + + pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL); + if (!pgdir) + return NULL; + + pgdir->bitmap = bitmap_zalloc(db_per_page, GFP_KERNEL); + if (!pgdir->bitmap) { + kfree(pgdir); + return NULL; + } + + bitmap_fill(pgdir->bitmap, db_per_page); + + pgdir->db_page = xsc_dma_zalloc_coherent_node(xdev, PAGE_SIZE, + &pgdir->db_dma, node); + if (!pgdir->db_page) { + bitmap_free(pgdir->bitmap); + kfree(pgdir); + return NULL; + } + + return pgdir; +} + +static int xsc_alloc_db_from_pgdir(struct xsc_db_pgdir *pgdir, + struct xsc_db *db) +{ + u32 db_per_page = PAGE_SIZE / cache_line_size(); + int offset; + int i; + + i = find_first_bit(pgdir->bitmap, db_per_page); + if (i >= db_per_page) + return -ENOMEM; + + __clear_bit(i, pgdir->bitmap); + + db->u.pgdir = pgdir; + db->index = i; + offset = db->index * cache_line_size(); + db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page); + db->dma = pgdir->db_dma + offset; + + db->db[0] = 0; + db->db[1] = 0; + + return 0; +} + +int xsc_db_alloc_node(struct xsc_core_device *xdev, struct xsc_db *db, int node) +{ + struct xsc_db_pgdir *pgdir; + int ret = 0; + + mutex_lock(&xdev->dev_res->pgdir_mutex); + + list_for_each_entry(pgdir, &xdev->dev_res->pgdir_list, list) + if (!xsc_alloc_db_from_pgdir(pgdir, db)) + goto out; + + pgdir = xsc_alloc_db_pgdir(xdev, node); + if (!pgdir) { + ret = -ENOMEM; + goto out; + } + + list_add(&pgdir->list, &xdev->dev_res->pgdir_list); + + /* This should never fail -- we just allocated an empty page: */ + WARN_ON(xsc_alloc_db_from_pgdir(pgdir, db)); + +out: + mutex_unlock(&xdev->dev_res->pgdir_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(xsc_db_alloc_node); + +int xsc_db_alloc(struct xsc_core_device *xdev, struct xsc_db *db) +{ + return xsc_db_alloc_node(xdev, db, xdev->priv.numa_node); +} +EXPORT_SYMBOL_GPL(xsc_db_alloc); + +void xsc_db_free(struct xsc_core_device *xdev, struct xsc_db *db) +{ + u32 db_per_page = PAGE_SIZE / cache_line_size(); + + mutex_lock(&xdev->dev_res->pgdir_mutex); + + __set_bit(db->index, db->u.pgdir->bitmap); + + if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) { + dma_free_coherent(&xdev->pdev->dev, PAGE_SIZE, + db->u.pgdir->db_page, db->u.pgdir->db_dma); + list_del(&db->u.pgdir->list); + bitmap_free(db->u.pgdir->bitmap); + kfree(db->u.pgdir); + } + + mutex_unlock(&xdev->dev_res->pgdir_mutex); +} +EXPORT_SYMBOL_GPL(xsc_db_free); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/cmd2.c b/drivers/net/ethernet/yunsilicon/xsc/pci/cmd2.c new file mode 100644 index 000000000000..9f7966169b18 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/cmd2.c @@ -0,0 +1,2148 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifdef HAVE_GENERIC_KMAP_TYPE +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "common/driver.h" +#include +#include "common/xsc_hsi.h" +#include "common/xsc_core.h" +#include "tmp_cmdq_defines.h" + +enum { + CMD_IF_REV = 3, +}; + +enum { + CMD_MODE_POLLING, + CMD_MODE_EVENTS +}; + +enum { + NUM_LONG_LISTS = 2, + NUM_MED_LISTS = 64, + LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 + + XSC_CMD_DATA_BLOCK_SIZE, + MED_LIST_SIZE = 16 + XSC_CMD_DATA_BLOCK_SIZE, +}; + +enum { + XSC_CMD_DELIVERY_STAT_OK = 0x0, + XSC_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1, + XSC_CMD_DELIVERY_STAT_TOK_ERR = 0x2, + XSC_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3, + XSC_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4, + XSC_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5, + XSC_CMD_DELIVERY_STAT_FW_ERR = 0x6, + XSC_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7, + XSC_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8, + XSC_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9, + XSC_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, +}; + +static struct xsc_cmd_work_ent *alloc_cmd(struct xsc_cmd *cmd, + struct xsc_cmd_msg *in, + struct xsc_rsp_msg *out) +{ + struct xsc_cmd_work_ent *ent; + + ent = kzalloc(sizeof(*ent), GFP_KERNEL); + if (!ent) + return ERR_PTR(-ENOMEM); + + ent->in = in; + ent->out = out; + ent->cmd = cmd; + + return ent; +} + +static u8 alloc_token(struct xsc_cmd *cmd) +{ + u8 token; + + spin_lock(&cmd->token_lock); + token = cmd->token++ % 255 + 1; + spin_unlock(&cmd->token_lock); + + return token; +} + +static int alloc_ent(struct xsc_cmd *cmd) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&cmd->alloc_lock, flags); + ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds); + if (ret < cmd->max_reg_cmds) + clear_bit(ret, &cmd->bitmask); + spin_unlock_irqrestore(&cmd->alloc_lock, flags); + + return ret < cmd->max_reg_cmds ? ret : -ENOMEM; +} + +static void free_ent(struct xsc_cmd *cmd, int idx) +{ + unsigned long flags; + + spin_lock_irqsave(&cmd->alloc_lock, flags); + set_bit(idx, &cmd->bitmask); + spin_unlock_irqrestore(&cmd->alloc_lock, flags); +} + +static struct xsc_cmd_layout *get_inst(struct xsc_cmd *cmd, int idx) +{ + return cmd->cmd_buf + (idx << cmd->log_stride); +} + +static struct xsc_rsp_layout *get_cq_inst(struct xsc_cmd *cmd, int idx) +{ + return cmd->cq_buf + (idx << cmd->log_stride); +} + +static u8 xor8_buf(void *buf, int len) +{ + u8 *ptr = buf; + u8 sum = 0; + int i; + + for (i = 0; i < len; i++) + sum ^= ptr[i]; + + return sum; +} + +static int verify_block_sig(struct xsc_cmd_prot_block *block) +{ + if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff) + return -EINVAL; + + if (xor8_buf(block, sizeof(*block)) != 0xff) + return -EINVAL; + + return 0; +} + +static void calc_block_sig(struct xsc_cmd_prot_block *block, u8 token) +{ + block->token = token; + block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 2); + block->sig = ~xor8_buf(block, sizeof(*block) - 1); +} + +static void calc_chain_sig(struct xsc_cmd_mailbox *head, u8 token) +{ + struct xsc_cmd_mailbox *next = head; + + while (next) { + calc_block_sig(next->buf, token); + next = next->next; + } +} + +static void set_signature(struct xsc_cmd_work_ent *ent) +{ + ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); + calc_chain_sig(ent->in->next, ent->token); + calc_chain_sig(ent->out->next, ent->token); +} + +static void free_cmd(struct xsc_cmd_work_ent *ent) +{ + kfree(ent); +} + +static int verify_signature(struct xsc_cmd_work_ent *ent) +{ + struct xsc_cmd_mailbox *next = ent->out->next; + int err; + u8 sig; + + sig = xor8_buf(ent->rsp_lay, sizeof(*ent->rsp_lay)); + if (sig != 0xff) + return -EINVAL; + + while (next) { + err = verify_block_sig(next->buf); + if (err) + return err; + + next = next->next; + } + + return 0; +} + +static void dump_buf(void *buf, int size, int offset) +{ + __be32 *p = buf; + int i; + + for (i = 0; i < size; i += 16) { + xsc_pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]), + be32_to_cpu(p[1]), be32_to_cpu(p[2]), be32_to_cpu(p[3])); + p += 4; + offset += 16; + } + xsc_pr_debug("\n"); +} + +const char *xsc_command_str(int command) +{ + switch (command) { + case XSC_CMD_OP_QUERY_HCA_CAP: + return "QUERY_HCA_CAP"; + + case XSC_CMD_OP_ENABLE_HCA: + return "ENABLE_HCA"; + + case XSC_CMD_OP_DISABLE_HCA: + return "DISABLE_HCA"; + + case XSC_CMD_OP_MODIFY_HCA: + return "MODIFY_HCA"; + + case XSC_CMD_OP_QUERY_CMDQ_VERSION: + return "QUERY_CMDQ_VERSION"; + + case XSC_CMD_OP_QUERY_MSIX_TBL_INFO: + return "QUERY_MSIX_TBL_INFO"; + + case XSC_CMD_OP_FUNCTION_RESET: + return "FUNCTION_RESET"; + + case XSC_CMD_OP_ALLOC_IA_LOCK: + return "ALLOC_IA_LOCK"; + + case XSC_CMD_OP_RELEASE_IA_LOCK: + return "RELEASE_IA_LOCK"; + + case XSC_CMD_OP_DUMMY: + return "DUMMY_CMD"; + + case XSC_CMD_OP_SET_DEBUG_INFO: + return "SET_DEBUG_INFO"; + + case XSC_CMD_OP_CREATE_MKEY: + return "CREATE_MKEY"; + + case XSC_CMD_OP_QUERY_MKEY: + return "QUERY_MKEY"; + + case XSC_CMD_OP_DESTROY_MKEY: + return "DESTROY_MKEY"; + + case XSC_CMD_OP_QUERY_SPECIAL_CONTEXTS: + return "QUERY_SPECIAL_CONTEXTS"; + + case XSC_CMD_OP_SET_MPT: + return "SET_MPT"; + + case XSC_CMD_OP_SET_MTT: + return "SET_MTT"; + + case XSC_CMD_OP_CREATE_EQ: + return "CREATE_EQ"; + + case XSC_CMD_OP_DESTROY_EQ: + return "DESTROY_EQ"; + + case XSC_CMD_OP_QUERY_EQ: + return "QUERY_EQ"; + + case XSC_CMD_OP_CREATE_CQ: + return "CREATE_CQ"; + + case XSC_CMD_OP_DESTROY_CQ: + return "DESTROY_CQ"; + + case XSC_CMD_OP_QUERY_CQ: + return "QUERY_CQ"; + + case XSC_CMD_OP_MODIFY_CQ: + return "MODIFY_CQ"; + + case XSC_CMD_OP_CREATE_QP: + return "CREATE_QP"; + + case XSC_CMD_OP_DESTROY_QP: + return "DESTROY_QP"; + + case XSC_CMD_OP_RST2INIT_QP: + return "RST2INIT_QP"; + + case XSC_CMD_OP_INIT2RTR_QP: + return "INIT2RTR_QP"; + + case XSC_CMD_OP_RTR2RTS_QP: + return "RTR2RTS_QP"; + + case XSC_CMD_OP_RTS2RTS_QP: + return "RTS2RTS_QP"; + + case XSC_CMD_OP_SQERR2RTS_QP: + return "SQERR2RTS_QP"; + + case XSC_CMD_OP_2ERR_QP: + return "2ERR_QP"; + + case XSC_CMD_OP_RTS2SQD_QP: + return "RTS2SQD_QP"; + + case XSC_CMD_OP_SQD2RTS_QP: + return "SQD2RTS_QP"; + + case XSC_CMD_OP_2RST_QP: + return "2RST_QP"; + + case XSC_CMD_OP_QUERY_QP: + return "QUERY_QP"; + + case XSC_CMD_OP_CONF_SQP: + return "CONF_SQP"; + + case XSC_CMD_OP_MAD_IFC: + return "MAD_IFC"; + + case XSC_CMD_OP_INIT2INIT_QP: + return "INIT2INIT_QP"; + + case XSC_CMD_OP_SQD2SQD_QP: + return "SQD2SQD_QP"; + + case XSC_CMD_OP_QUERY_QP_FLUSH_STATUS: + return "QUERY_QP_FLUSH_STATUS"; + + case XSC_CMD_OP_ALLOC_PD: + return "ALLOC_PD"; + + case XSC_CMD_OP_DEALLOC_PD: + return "DEALLOC_PD"; + + case XSC_CMD_OP_ACCESS_REG: + return "ACCESS_REG"; + + case XSC_CMD_OP_MODIFY_RAW_QP: + return "MODIFY_RAW_QP"; + + case XSC_CMD_OP_ENABLE_NIC_HCA: + return "ENABLE_NIC_HCA"; + + case XSC_CMD_OP_DISABLE_NIC_HCA: + return "DISABLE_NIC_HCA"; + + case XSC_CMD_OP_MODIFY_NIC_HCA: + return "MODIFY_NIC_HCA"; + + case XSC_CMD_OP_QUERY_NIC_VPORT_CONTEXT: + return "QUERY_NIC_VPORT_CONTEXT"; + + case XSC_CMD_OP_MODIFY_NIC_VPORT_CONTEXT: + return "MODIFY_NIC_VPORT_CONTEXT"; + + case XSC_CMD_OP_QUERY_VPORT_STATE: + return "QUERY_VPORT_STATE"; + + case XSC_CMD_OP_MODIFY_VPORT_STATE: + return "MODIFY_VPORT_STATE"; + + case XSC_CMD_OP_QUERY_HCA_VPORT_CONTEXT: + return "QUERY_HCA_VPORT_CONTEXT"; + + case XSC_CMD_OP_MODIFY_HCA_VPORT_CONTEXT: + return "MODIFY_HCA_VPORT_CONTEXT"; + + case XSC_CMD_OP_QUERY_HCA_VPORT_GID: + return "QUERY_HCA_VPORT_GID"; + + case XSC_CMD_OP_QUERY_HCA_VPORT_PKEY: + return "QUERY_HCA_VPORT_PKEY"; + + case XSC_CMD_OP_QUERY_VPORT_COUNTER: + return "QUERY_VPORT_COUNTER"; + + case XSC_CMD_OP_QUERY_PRIO_STATS: + return "QUERY_PRIO_STATS"; + + case XSC_CMD_OP_QUERY_PHYPORT_STATE: + return "QUERY_PHYPORT_STATE"; + + case XSC_CMD_OP_QUERY_EVENT_TYPE: + return "QUERY_EVENT_TYPE"; + + case XSC_CMD_OP_QUERY_LINK_INFO: + return "QUERY_LINK_INFO"; + + case XSC_CMD_OP_MODIFY_LINK_INFO: + return "MODIFY_LINK_INFO"; + + case XSC_CMD_OP_MODIFY_FEC_PARAM: + return "MODIFY_FEC_PARAM"; + + case XSC_CMD_OP_QUERY_FEC_PARAM: + return "QUERY_FEC_PARAM"; + + case XSC_CMD_OP_LAG_CREATE: + return "LAG_CREATE"; + + case XSC_CMD_OP_LAG_ADD_MEMBER: + return "LAG ADD MEMBER"; + + case XSC_CMD_OP_LAG_REMOVE_MEMBER: + return "LAG REMOVE MEMBER"; + + case XSC_CMD_OP_LAG_UPDATE_MEMBER_STATUS: + return "LAG UPDATE MEMBER STATUS"; + + case XSC_CMD_OP_LAG_UPDATE_HASH_TYPE: + return "LAG UPDATE HASH TYPE"; + + case XSC_CMD_OP_LAG_DESTROY: + return "LAG_DESTROY"; + + case XSC_CMD_OP_LAG_SET_QOS: + return "LAG_SET_QOS"; + + case XSC_CMD_OP_ENABLE_MSIX: + return "ENABLE_MSIX"; + + case XSC_CMD_OP_IOCTL_FLOW: + return "CFG_FLOW_TABLE"; + + case XSC_CMD_OP_IOCTL_SET_DSCP_PMT: + return "SET_DSCP_PMT"; + + case XSC_CMD_OP_IOCTL_GET_DSCP_PMT: + return "GET_DSCP_PMT"; + + case XSC_CMD_OP_IOCTL_SET_TRUST_MODE: + return "SET_TRUST_MODE"; + + case XSC_CMD_OP_IOCTL_GET_TRUST_MODE: + return "GET_TRUST_MODE"; + + case XSC_CMD_OP_IOCTL_SET_PCP_PMT: + return "SET_PCP_PMT"; + + case XSC_CMD_OP_IOCTL_GET_PCP_PMT: + return "GET_PCP_PMT"; + + case XSC_CMD_OP_IOCTL_SET_DEFAULT_PRI: + return "SET_DEFAULT_PRI"; + + case XSC_CMD_OP_IOCTL_GET_DEFAULT_PRI: + return "GET_DEFAULT_PRI"; + + case XSC_CMD_OP_IOCTL_SET_PFC: + return "SET_PFC"; + + case XSC_CMD_OP_IOCTL_SET_PFC_DROP_TH: + return "SET_PFC_DROP_TH"; + + case XSC_CMD_OP_IOCTL_GET_PFC: + return "GET_PFC"; + + case XSC_CMD_OP_IOCTL_GET_PFC_CFG_STATUS: + return "GET_PFC_CFG_STATUS"; + + case XSC_CMD_OP_IOCTL_SET_RATE_LIMIT: + return "SET_RATE_LIMIT"; + + case XSC_CMD_OP_IOCTL_GET_RATE_LIMIT: + return "GET_RATE_LIMIT"; + + case XSC_CMD_OP_IOCTL_SET_SP: + return "SET_SP"; + + case XSC_CMD_OP_IOCTL_GET_SP: + return "GET_SP"; + + case XSC_CMD_OP_IOCTL_SET_WEIGHT: + return "SET_WEIGHT"; + + case XSC_CMD_OP_IOCTL_GET_WEIGHT: + return "GET_WEIGHT"; + + case XSC_CMD_OP_IOCTL_DPU_SET_PORT_WEIGHT: + return "DPU_SET_PORT_WEIGHT"; + + case XSC_CMD_OP_IOCTL_DPU_GET_PORT_WEIGHT: + return "DPU_GET_PORT_WEIGHT"; + + case XSC_CMD_OP_IOCTL_DPU_SET_PRIO_WEIGHT: + return "DPU_SET_PRIO_WEIGHT"; + + case XSC_CMD_OP_IOCTL_DPU_GET_PRIO_WEIGHT: + return "DPU_GET_PRIO_WEIGHT"; + + case XSC_CMD_OP_IOCTL_SET_WATCHDOG_EN: + return "SET_WATCHDOG_EN"; + + case XSC_CMD_OP_IOCTL_GET_WATCHDOG_EN: + return "GET_WATCHDOG_EN"; + + case XSC_CMD_OP_IOCTL_SET_WATCHDOG_PERIOD: + return "SET_WATCHDOG_PERIOD"; + + case XSC_CMD_OP_IOCTL_GET_WATCHDOG_PERIOD: + return "GET_WATCHDOG_PERIOD"; + + case XSC_CMD_OP_IOCTL_SET_ENABLE_RP: + return "ENABLE_RP"; + + case XSC_CMD_OP_IOCTL_SET_ENABLE_NP: + return "ENABLE_NP"; + + case XSC_CMD_OP_IOCTL_SET_INIT_ALPHA: + return "SET_INIT_ALPHA"; + + case XSC_CMD_OP_IOCTL_SET_G: + return "SET_G"; + + case XSC_CMD_OP_IOCTL_SET_AI: + return "SET_AI"; + + case XSC_CMD_OP_IOCTL_SET_HAI: + return "SET_HAI"; + + case XSC_CMD_OP_IOCTL_SET_TH: + return "SET_TH"; + + case XSC_CMD_OP_IOCTL_SET_BC_TH: + return "SET_BC_TH"; + + case XSC_CMD_OP_IOCTL_SET_CNP_OPCODE: + return "SET_CNP_OPCODE"; + + case XSC_CMD_OP_IOCTL_SET_CNP_BTH_B: + return "SET_CNP_BTH_B"; + + case XSC_CMD_OP_IOCTL_SET_CNP_BTH_F: + return "SET_CNP_BTH_F"; + + case XSC_CMD_OP_IOCTL_SET_CNP_ECN: + return "SET_CNP_ECN"; + + case XSC_CMD_OP_IOCTL_SET_DATA_ECN: + return "SET_DATA_ECN"; + + case XSC_CMD_OP_IOCTL_SET_CNP_TX_INTERVAL: + return "SET_CNP_TX_INTERVAL"; + + case XSC_CMD_OP_IOCTL_SET_EVT_PERIOD_RSTTIME: + return "SET_EVT_PERIOD_RSTTIME"; + + case XSC_CMD_OP_IOCTL_SET_CNP_DSCP: + return "SET_CNP_DSCP"; + + case XSC_CMD_OP_IOCTL_SET_CNP_PCP: + return "SET_CNP_PCP"; + + case XSC_CMD_OP_IOCTL_SET_EVT_PERIOD_ALPHA: + return "SET_EVT_PERIOD_ALPHA"; + + case XSC_CMD_OP_IOCTL_GET_CC_CFG: + return "GET_CC_CFG"; + + case XSC_CMD_OP_IOCTL_GET_CC_STAT: + return "GET_CC_STAT"; + + case XSC_CMD_OP_IOCTL_SET_CLAMP_TGT_RATE: + return "SET_CLAMP_TGT_RATE"; + + case XSC_CMD_OP_IOCTL_SET_MAX_HAI_FACTOR: + return "SET_MAX_HAI_FACTOR"; + + case XSC_CMD_OP_IOCTL_SET_HWC: + return "SET_HWCONFIG"; + + case XSC_CMD_OP_IOCTL_GET_HWC: + return "GET_HWCONFIG"; + + case XSC_CMD_OP_SET_MTU: + return "SET_MTU"; + + case XSC_CMD_OP_QUERY_ETH_MAC: + return "QUERY_ETH_MAC"; + + case XSC_CMD_OP_QUERY_HW_STATS: + return "QUERY_HW_STATS"; + + case XSC_CMD_OP_QUERY_PAUSE_CNT: + return "QUERY_PAUSE_CNT"; + + case XSC_CMD_OP_SET_RTT_EN: + return "SET_RTT_EN"; + + case XSC_CMD_OP_GET_RTT_EN: + return "GET_RTT_EN"; + + case XSC_CMD_OP_SET_RTT_QPN: + return "SET_RTT_QPN"; + + case XSC_CMD_OP_GET_RTT_QPN: + return "GET_RTT_QPN"; + + case XSC_CMD_OP_SET_RTT_PERIOD: + return "SET_RTT_PERIOD"; + + case XSC_CMD_OP_GET_RTT_PERIOD: + return "GET_RTT_PERIOD"; + + case XSC_CMD_OP_GET_RTT_RESULT: + return "GET_RTT_RESULT"; + + case XSC_CMD_OP_GET_RTT_STATS: + return "ET_RTT_STATS"; + + case XSC_CMD_OP_SET_LED_STATUS: + return "SET_LED_STATUS"; + + case XSC_CMD_OP_AP_FEAT: + return "AP_FEAT"; + + case XSC_CMD_OP_PCIE_LAT_FEAT: + return "PCIE_LAT_FEAT"; + + case XSC_CMD_OP_USER_EMU_CMD: + return "USER_EMU_CMD"; + + case XSC_CMD_OP_QUERY_PFC_PRIO_STATS: + return "QUERY_PFC_PRIO_STATS"; + + case XSC_CMD_OP_IOCTL_QUERY_PFC_STALL_STATS: + return "QUERY_PFC_STALL_STATS"; + + case XSC_CMD_OP_QUERY_HW_STATS_RDMA: + return "QUERY_HW_STATS_RDMA"; + + case XSC_CMD_OP_QUERY_HW_STATS_ETH: + return "QUERY_HW_STATS_ETH"; + + case XSC_CMD_OP_SET_VPORT_RATE_LIMIT: + return "SET_VPORT_RATE_LIMIT"; + + default: return "unknown command opcode"; + } +} + +static void dump_command(struct xsc_core_device *xdev, struct xsc_cmd_mailbox *next, + struct xsc_cmd_work_ent *ent, int input, int len) +{ + u16 op = be16_to_cpu(((struct xsc_inbox_hdr *)(ent->lay->in))->opcode); + int offset = 0; + + if (!(xsc_debug_mask & (1 << XSC_CMD_DATA))) + return; + + xsc_core_dbg(xdev, "dump command %s(0x%x) %s\n", xsc_command_str(op), op, + input ? "INPUT" : "OUTPUT"); + + if (input) { + dump_buf(ent->lay, sizeof(*ent->lay), offset); + offset += sizeof(*ent->lay); + } else { + dump_buf(ent->rsp_lay, sizeof(*ent->rsp_lay), offset); + offset += sizeof(*ent->rsp_lay); + } + + while (next && offset < len) { + xsc_core_dbg(xdev, "command block:\n"); + dump_buf(next->buf, sizeof(struct xsc_cmd_prot_block), offset); + offset += sizeof(struct xsc_cmd_prot_block); + next = next->next; + } +} + +static void cmd_work_handler(struct work_struct *work) +{ + struct xsc_cmd_work_ent *ent = container_of(work, struct xsc_cmd_work_ent, work); + struct xsc_cmd *cmd = ent->cmd; + struct xsc_core_device *xdev = container_of(cmd, struct xsc_core_device, cmd); + struct xsc_cmd_layout *lay; + struct semaphore *sem; + unsigned long flags; + + sem = &cmd->sem; + down(sem); + ent->idx = alloc_ent(cmd); + if (ent->idx < 0) { + xsc_core_err(xdev, "failed to allocate command entry\n"); + up(sem); + return; + } + + ent->token = alloc_token(cmd); + cmd->ent_arr[ent->idx] = ent; + + spin_lock_irqsave(&cmd->doorbell_lock, flags); + lay = get_inst(cmd, cmd->cmd_pid); + ent->lay = lay; + memset(lay, 0, sizeof(*lay)); + memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); + if (ent->in->next) + lay->in_ptr = cpu_to_be64(ent->in->next->dma); + lay->inlen = cpu_to_be32(ent->in->len); + if (ent->out->next) + lay->out_ptr = cpu_to_be64(ent->out->next->dma); + lay->outlen = cpu_to_be32(ent->out->len); + lay->type = XSC_PCI_CMD_XPORT; + lay->token = ent->token; + lay->idx = ent->idx; + if (!cmd->checksum_disabled) + set_signature(ent); + else + lay->sig = 0xff; + dump_command(xdev, ent->in->next, ent, 1, ent->in->len); + + ktime_get_ts64(&ent->ts1); + + /* ring doorbell after the descriptor is valid */ + wmb(); + + cmd->cmd_pid = (cmd->cmd_pid + 1) % (1 << cmd->log_sz); + writel(cmd->cmd_pid, REG_ADDR(xdev, cmd->reg.req_pid_addr)); + mmiowb(); + spin_unlock_irqrestore(&cmd->doorbell_lock, flags); +} + +static const char *deliv_status_to_str(u8 status) +{ + switch (status) { + case XSC_CMD_DELIVERY_STAT_OK: + return "no errors"; + case XSC_CMD_DELIVERY_STAT_SIGNAT_ERR: + return "signature error"; + case XSC_CMD_DELIVERY_STAT_TOK_ERR: + return "token error"; + case XSC_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: + return "bad block number"; + case XSC_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: + return "output pointer not aligned to block size"; + case XSC_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: + return "input pointer not aligned to block size"; + case XSC_CMD_DELIVERY_STAT_FW_ERR: + return "firmware internal error"; + case XSC_CMD_DELIVERY_STAT_IN_LENGTH_ERR: + return "command input length error"; + case XSC_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: + return "command output length error"; + case XSC_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: + return "reserved fields not cleared"; + case XSC_CMD_DELIVERY_STAT_CMD_DESCR_ERR: + return "bad command descriptor type"; + default: + return "unknown status code"; + } +} + +static u16 msg_to_opcode(struct xsc_cmd_msg *in) +{ + struct xsc_inbox_hdr *hdr = (struct xsc_inbox_hdr *)(in->first.data); + + return be16_to_cpu(hdr->opcode); +} + +static int wait_func(struct xsc_core_device *xdev, struct xsc_cmd_work_ent *ent) +{ + unsigned long timeout = msecs_to_jiffies(XSC_CMD_TIMEOUT_MSEC); + int err; + struct xsc_cmd *cmd = &xdev->cmd; + + if (!wait_for_completion_timeout(&ent->done, timeout)) + err = -ETIMEDOUT; + else + err = ent->ret; + + if (err == -ETIMEDOUT) { + cmd->cmd_status = XSC_CMD_STATUS_TIMEDOUT; + xsc_core_warn(xdev, "wait for %s(0x%x) response timeout!\n", + xsc_command_str(msg_to_opcode(ent->in)), + msg_to_opcode(ent->in)); + } else if (err) { + xsc_core_dbg(xdev, "err %d, delivery status %s(%d)\n", err, + deliv_status_to_str(ent->status), ent->status); + } + + return err; +} + +/* Notes: + * 1. Callback functions may not sleep + * 2. page queue commands do not support asynchrous completion + */ +static int xsc_cmd_invoke(struct xsc_core_device *xdev, struct xsc_cmd_msg *in, + struct xsc_rsp_msg *out, u8 *status) +{ + struct xsc_cmd *cmd = &xdev->cmd; + struct xsc_cmd_work_ent *ent; + ktime_t t1, t2, delta; + struct xsc_cmd_stats *stats; + int err = 0; + s64 ds; + u16 op; + struct semaphore *sem; + + ent = alloc_cmd(cmd, in, out); + if (IS_ERR(ent)) + return PTR_ERR(ent); + + init_completion(&ent->done); + INIT_WORK(&ent->work, cmd_work_handler); + if (!queue_work(cmd->wq, &ent->work)) { + xsc_core_warn(xdev, "failed to queue work\n"); + err = -ENOMEM; + goto out_free; + } + + err = wait_func(xdev, ent); + if (err == -ETIMEDOUT) + goto out; + t1 = timespec64_to_ktime(ent->ts1); + t2 = timespec64_to_ktime(ent->ts2); + delta = ktime_sub(t2, t1); + ds = ktime_to_ns(delta); + op = be16_to_cpu(((struct xsc_inbox_hdr *)in->first.data)->opcode); + if (op < ARRAY_SIZE(cmd->stats)) { + stats = &cmd->stats[op]; + spin_lock(&stats->lock); + stats->sum += ds; + ++stats->n; + spin_unlock(&stats->lock); + } + xsc_core_dbg_mask(xdev, 1 << XSC_CMD_TIME, + "fw exec time for %s is %lld nsec\n", + xsc_command_str(op), ds); + *status = ent->status; + free_cmd(ent); + + return err; + +out: + sem = &cmd->sem; + up(sem); +out_free: + free_cmd(ent); + return err; +} + +static ssize_t dbg_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct xsc_core_device *xdev = filp->private_data; + struct xsc_cmd_debug *dbg = &xdev->cmd.dbg; + char lbuf[3]; + int err; + + if (!dbg->in_msg || !dbg->out_msg) + return -ENOMEM; + + if (copy_from_user(lbuf, buf, sizeof(lbuf))) + return -EPERM; + + lbuf[sizeof(lbuf) - 1] = 0; + + if (strcmp(lbuf, "go")) + return -EINVAL; + + err = xsc_cmd_exec(xdev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen); + + return err ? err : count; +} + +static const struct file_operations fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = dbg_write, +}; + +static int xsc_copy_to_cmd_msg(struct xsc_cmd_msg *to, void *from, int size) +{ + struct xsc_cmd_prot_block *block; + struct xsc_cmd_mailbox *next; + int copy; + + if (!to || !from) + return -ENOMEM; + + copy = min_t(int, size, sizeof(to->first.data)); + memcpy(to->first.data, from, copy); + size -= copy; + from += copy; + + next = to->next; + while (size) { + if (!next) { + /* this is a BUG */ + return -ENOMEM; + } + + copy = min_t(int, size, XSC_CMD_DATA_BLOCK_SIZE); + block = next->buf; + memcpy(block->data, from, copy); + block->owner_status = 0; + from += copy; + size -= copy; + next = next->next; + } + + return 0; +} + +static int xsc_copy_from_rsp_msg(void *to, struct xsc_rsp_msg *from, int size) +{ + struct xsc_cmd_prot_block *block; + struct xsc_cmd_mailbox *next; + int copy; + + if (!to || !from) + return -ENOMEM; + + copy = min_t(int, size, sizeof(from->first.data)); + memcpy(to, from->first.data, copy); + size -= copy; + to += copy; + + next = from->next; + while (size) { + if (!next) { + /* this is a BUG */ + return -ENOMEM; + } + + copy = min_t(int, size, XSC_CMD_DATA_BLOCK_SIZE); + block = next->buf; + if (!block->owner_status) + pr_err("block ownership check failed\n"); + + memcpy(to, block->data, copy); + to += copy; + size -= copy; + next = next->next; + } + + return 0; +} + +static struct xsc_cmd_mailbox *alloc_cmd_box(struct xsc_core_device *xdev, + gfp_t flags) +{ + struct xsc_cmd_mailbox *mailbox; + + mailbox = kmalloc(sizeof(*mailbox), flags); + if (!mailbox) + return ERR_PTR(-ENOMEM); + + mailbox->buf = dma_pool_alloc(xdev->cmd.pool, flags, + &mailbox->dma); + if (!mailbox->buf) { + xsc_core_dbg(xdev, "failed allocation\n"); + kfree(mailbox); + return ERR_PTR(-ENOMEM); + } + memset(mailbox->buf, 0, sizeof(struct xsc_cmd_prot_block)); + mailbox->next = NULL; + + return mailbox; +} + +static void free_cmd_box(struct xsc_core_device *xdev, + struct xsc_cmd_mailbox *mailbox) +{ + dma_pool_free(xdev->cmd.pool, mailbox->buf, mailbox->dma); + kfree(mailbox); +} + +static struct xsc_cmd_msg *xsc_alloc_cmd_msg(struct xsc_core_device *xdev, + gfp_t flags, int size) +{ + struct xsc_cmd_mailbox *tmp, *head = NULL; + struct xsc_cmd_prot_block *block; + struct xsc_cmd_msg *msg; + int blen; + int err; + int n; + int i; + + msg = kzalloc(sizeof(*msg), GFP_KERNEL); + if (!msg) + return ERR_PTR(-ENOMEM); + + blen = size - min_t(int, sizeof(msg->first.data), size); + n = (blen + XSC_CMD_DATA_BLOCK_SIZE - 1) / XSC_CMD_DATA_BLOCK_SIZE; + + for (i = 0; i < n; i++) { + tmp = alloc_cmd_box(xdev, flags); + if (IS_ERR(tmp)) { + xsc_core_warn(xdev, "failed allocating block\n"); + err = PTR_ERR(tmp); + goto err_alloc; + } + + block = tmp->buf; + tmp->next = head; + block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0); + block->block_num = cpu_to_be32(n - i - 1); + head = tmp; + } + msg->next = head; + msg->len = size; + return msg; + +err_alloc: + while (head) { + tmp = head->next; + free_cmd_box(xdev, head); + head = tmp; + } + kfree(msg); + + return ERR_PTR(err); +} + +static void xsc_free_cmd_msg(struct xsc_core_device *xdev, + struct xsc_cmd_msg *msg) +{ + struct xsc_cmd_mailbox *head = msg->next; + struct xsc_cmd_mailbox *next; + + while (head) { + next = head->next; + free_cmd_box(xdev, head); + head = next; + } + kfree(msg); +} + +static struct xsc_rsp_msg *xsc_alloc_rsp_msg(struct xsc_core_device *xdev, + gfp_t flags, int size) +{ + struct xsc_cmd_mailbox *tmp, *head = NULL; + struct xsc_cmd_prot_block *block; + struct xsc_rsp_msg *msg; + int blen; + int err; + int n; + int i; + + msg = kzalloc(sizeof(*msg), GFP_KERNEL); + if (!msg) + return ERR_PTR(-ENOMEM); + + blen = size - min_t(int, sizeof(msg->first.data), size); + n = (blen + XSC_CMD_DATA_BLOCK_SIZE - 1) / XSC_CMD_DATA_BLOCK_SIZE; + + for (i = 0; i < n; i++) { + tmp = alloc_cmd_box(xdev, flags); + if (IS_ERR(tmp)) { + xsc_core_warn(xdev, "failed allocating block\n"); + err = PTR_ERR(tmp); + goto err_alloc; + } + + block = tmp->buf; + tmp->next = head; + block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0); + block->block_num = cpu_to_be32(n - i - 1); + head = tmp; + } + msg->next = head; + msg->len = size; + return msg; + +err_alloc: + while (head) { + tmp = head->next; + free_cmd_box(xdev, head); + head = tmp; + } + kfree(msg); + + return ERR_PTR(err); +} + +static void xsc_free_rsp_msg(struct xsc_core_device *xdev, + struct xsc_rsp_msg *msg) +{ + struct xsc_cmd_mailbox *head = msg->next; + struct xsc_cmd_mailbox *next; + + while (head) { + next = head->next; + free_cmd_box(xdev, head); + head = next; + } + kfree(msg); +} + +static ssize_t data_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct xsc_core_device *xdev = filp->private_data; + struct xsc_cmd_debug *dbg = &xdev->cmd.dbg; + void *ptr; + int err; + + if (*pos != 0) + return -EINVAL; + + kfree(dbg->in_msg); + dbg->in_msg = NULL; + dbg->inlen = 0; + + ptr = kzalloc(count, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + if (copy_from_user(ptr, buf, count)) { + err = -EPERM; + goto out; + } + dbg->in_msg = ptr; + dbg->inlen = count; + + *pos = count; + + return count; + +out: + kfree(ptr); + return err; +} + +static ssize_t data_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct xsc_core_device *xdev = filp->private_data; + struct xsc_cmd_debug *dbg = &xdev->cmd.dbg; + int copy; + + if (*pos) + return 0; + + if (!dbg->out_msg) + return -ENOMEM; + + copy = min_t(int, count, dbg->outlen); + if (copy_to_user(buf, dbg->out_msg, copy)) + return -EPERM; + + *pos += copy; + + return copy; +} + +static const struct file_operations dfops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = data_write, + .read = data_read, +}; + +static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct xsc_core_device *xdev = filp->private_data; + struct xsc_cmd_debug *dbg = &xdev->cmd.dbg; + char outlen[8]; + int err; + + if (*pos) + return 0; + + err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen); + if (err < 0) + return err; + + if (copy_to_user(buf, &outlen, err)) + return -EPERM; + + *pos += err; + + return err; +} + +static ssize_t outlen_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct xsc_core_device *xdev = filp->private_data; + struct xsc_cmd_debug *dbg = &xdev->cmd.dbg; + char outlen_str[8]; + int outlen; + void *ptr; + int err; + + if (*pos != 0 || count > 6) + return -EINVAL; + + kfree(dbg->out_msg); + dbg->out_msg = NULL; + dbg->outlen = 0; + + if (copy_from_user(outlen_str, buf, count)) + return -EPERM; + + outlen_str[7] = 0; + + err = kstrtoint(outlen_str, 10, &outlen); + if (err < 0) + return err; + + ptr = kzalloc(outlen, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + dbg->out_msg = ptr; + dbg->outlen = outlen; + + *pos = count; + + return count; +} + +static const struct file_operations olfops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = outlen_write, + .read = outlen_read, +}; + +static void set_wqname(struct xsc_core_device *xdev) +{ + struct xsc_cmd *cmd = &xdev->cmd; + + snprintf(cmd->wq_name, sizeof(cmd->wq_name), "xsc_cmd_%s", + dev_name(&xdev->pdev->dev)); +} + +static void clean_debug_files(struct xsc_core_device *xdev) +{ + struct xsc_cmd_debug *dbg = &xdev->cmd.dbg; + + if (!xsc_debugfs_root) + return; + + xsc_cmdif_debugfs_cleanup(xdev); + debugfs_remove_recursive(dbg->dbg_root); +} + +static int create_debugfs_files(struct xsc_core_device *xdev) +{ + struct xsc_cmd_debug *dbg = &xdev->cmd.dbg; + int err = -ENOMEM; + + if (!xsc_debugfs_root) + return 0; + + dbg->dbg_root = debugfs_create_dir("cmd", xdev->dev_res->dbg_root); + if (!dbg->dbg_root) + return err; + + dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root, + xdev, &dfops); + if (!dbg->dbg_in) + goto err_dbg; + + dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root, + xdev, &dfops); + if (!dbg->dbg_out) + goto err_dbg; + + dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root, + xdev, &olfops); + if (!dbg->dbg_outlen) + goto err_dbg; + + debugfs_create_u8("status", 0600, dbg->dbg_root, &dbg->status); + + dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, xdev, &fops); + if (!dbg->dbg_run) + goto err_dbg; + + xsc_cmdif_debugfs_init(xdev); + + return 0; + +err_dbg: + clean_debug_files(xdev); + return err; +} + +void xsc_cmd_use_events(struct xsc_core_device *xdev) +{ + struct xsc_cmd *cmd = &xdev->cmd; + int i; + + for (i = 0; i < cmd->max_reg_cmds; i++) + down(&cmd->sem); + + flush_workqueue(cmd->wq); + + cmd->mode = CMD_MODE_EVENTS; + + while (cmd->cmd_pid != cmd->cq_cid) + msleep(20); + kthread_stop(cmd->cq_task); + cmd->cq_task = NULL; + + for (i = 0; i < cmd->max_reg_cmds; i++) + up(&cmd->sem); +} + +static int cmd_cq_polling(void *data); +void xsc_cmd_use_polling(struct xsc_core_device *xdev) +{ + struct xsc_cmd *cmd = &xdev->cmd; + int i; + + for (i = 0; i < cmd->max_reg_cmds; i++) + down(&cmd->sem); + + flush_workqueue(cmd->wq); + cmd->mode = CMD_MODE_POLLING; + cmd->cq_task = kthread_create(cmd_cq_polling, (void *)xdev, "xsc_cmd_cq_polling"); + if (cmd->cq_task) + wake_up_process(cmd->cq_task); + + for (i = 0; i < cmd->max_reg_cmds; i++) + up(&cmd->sem); +} + +static int status_to_err(u8 status) +{ + return status ? -1 : 0; /* TBD more meaningful codes */ +} + +static struct xsc_cmd_msg *alloc_msg(struct xsc_core_device *xdev, int in_size) +{ + struct xsc_cmd_msg *msg = ERR_PTR(-ENOMEM); + struct xsc_cmd *cmd = &xdev->cmd; + struct cache_ent *ent = NULL; + + if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE) + ent = &cmd->cache.large; + else if (in_size > 16 && in_size <= MED_LIST_SIZE) + ent = &cmd->cache.med; + + if (ent) { + spin_lock(&ent->lock); + if (!list_empty(&ent->head)) { + msg = list_entry(ent->head.next, typeof(*msg), list); + /* For cached lists, we must explicitly state what is + * the real size + */ + msg->len = in_size; + list_del(&msg->list); + } + spin_unlock(&ent->lock); + } + + if (IS_ERR(msg)) + msg = xsc_alloc_cmd_msg(xdev, GFP_KERNEL, in_size); + + return msg; +} + +static void free_msg(struct xsc_core_device *xdev, struct xsc_cmd_msg *msg) +{ + if (msg->cache) { + spin_lock(&msg->cache->lock); + list_add_tail(&msg->list, &msg->cache->head); + spin_unlock(&msg->cache->lock); + } else { + xsc_free_cmd_msg(xdev, msg); + } +} + +static int dummy_work(struct xsc_core_device *xdev, struct xsc_cmd_msg *in, + struct xsc_rsp_msg *out, u16 dummy_cnt, u16 dummy_start_pid) +{ + struct xsc_cmd *cmd = &xdev->cmd; + struct xsc_cmd_work_ent **dummy_ent_arr; + struct xsc_cmd_layout *lay; + struct semaphore *sem; + int err = 0; + u16 i; + u16 free_cnt = 0; + u16 temp_pid = dummy_start_pid; + + sem = &cmd->sem; + + dummy_ent_arr = kcalloc(dummy_cnt, sizeof(struct xsc_cmd_work_ent *), GFP_KERNEL); + if (!dummy_ent_arr) { + err = -ENOMEM; + goto alloc_ent_arr_err; + } + + for (i = 0; i < dummy_cnt; i++) { + dummy_ent_arr[i] = alloc_cmd(cmd, in, out); + if (IS_ERR(dummy_ent_arr[i])) { + xsc_core_err(xdev, "failed to alloc cmd buffer\n"); + err = -ENOMEM; + free_cnt = i; + goto alloc_ent_err; + } + + down(sem); + + dummy_ent_arr[i]->idx = alloc_ent(cmd); + if (dummy_ent_arr[i]->idx < 0) { + xsc_core_err(xdev, "failed to allocate command entry\n"); + err = -1; + free_cnt = i; + goto get_cmd_ent_idx_err; + } + dummy_ent_arr[i]->token = alloc_token(cmd); + cmd->ent_arr[dummy_ent_arr[i]->idx] = dummy_ent_arr[i]; + init_completion(&dummy_ent_arr[i]->done); + + lay = get_inst(cmd, temp_pid); + dummy_ent_arr[i]->lay = lay; + memset(lay, 0, sizeof(*lay)); + memcpy(lay->in, dummy_ent_arr[i]->in->first.data, sizeof(dummy_ent_arr[i]->in)); + lay->inlen = cpu_to_be32(dummy_ent_arr[i]->in->len); + lay->outlen = cpu_to_be32(dummy_ent_arr[i]->out->len); + lay->type = XSC_PCI_CMD_XPORT; + lay->token = dummy_ent_arr[i]->token; + lay->idx = dummy_ent_arr[i]->idx; + if (!cmd->checksum_disabled) + set_signature(dummy_ent_arr[i]); + else + lay->sig = 0xff; + temp_pid = (temp_pid + 1) % (1 << cmd->log_sz); + } + + /* ring doorbell after the descriptor is valid */ + wmb(); + writel(cmd->cmd_pid, REG_ADDR(xdev, cmd->reg.req_pid_addr)); + if (readl(REG_ADDR(xdev, cmd->reg.interrupt_stat_addr)) != 0) + writel(0xF, REG_ADDR(xdev, cmd->reg.interrupt_stat_addr)); + + mmiowb(); + xsc_core_dbg(xdev, "write 0x%x to command doorbell, idx %u ~ %u\n", cmd->cmd_pid, + dummy_ent_arr[0]->idx, dummy_ent_arr[dummy_cnt - 1]->idx); + + if (wait_for_completion_timeout(&dummy_ent_arr[dummy_cnt - 1]->done, + msecs_to_jiffies(3000)) == 0) { + xsc_core_err(xdev, "dummy_cmd %d ent timeout, cmdq fail\n", dummy_cnt - 1); + err = -ETIMEDOUT; + } else { + xsc_core_dbg(xdev, "%d ent done\n", dummy_cnt); + } + + for (i = 0; i < dummy_cnt; i++) + free_cmd(dummy_ent_arr[i]); + + kfree(dummy_ent_arr); + return err; + +get_cmd_ent_idx_err: + free_cmd(dummy_ent_arr[free_cnt]); + up(sem); +alloc_ent_err: + for (i = 0; i < free_cnt; i++) { + free_ent(cmd, dummy_ent_arr[i]->idx); + up(sem); + free_cmd(dummy_ent_arr[i]); + } + kfree(dummy_ent_arr); +alloc_ent_arr_err: + return err; +} + +static int xsc_dummy_cmd_exec(struct xsc_core_device *xdev, void *in, int in_size, void *out, + int out_size, u16 dmmy_cnt, u16 dummy_start) +{ + struct xsc_cmd_msg *inb; + struct xsc_rsp_msg *outb; + int err; + + inb = alloc_msg(xdev, in_size); + if (IS_ERR(inb)) { + err = PTR_ERR(inb); + return err; + } + + err = xsc_copy_to_cmd_msg(inb, in, in_size); + if (err) { + xsc_core_warn(xdev, "err %d\n", err); + goto out_in; + } + + outb = xsc_alloc_rsp_msg(xdev, GFP_KERNEL, out_size); + if (IS_ERR(outb)) { + err = PTR_ERR(outb); + goto out_in; + } + + err = dummy_work(xdev, inb, outb, dmmy_cnt, dummy_start); + + if (err) + goto out_out; + + err = xsc_copy_from_rsp_msg(out, outb, out_size); + +out_out: + xsc_free_rsp_msg(xdev, outb); + +out_in: + free_msg(xdev, inb); + return err; +} + +static int xsc_send_dummy_cmd(struct xsc_core_device *xdev, u16 gap, u16 dummy_start) +{ + struct xsc_cmd_dummy_mbox_out *out; + struct xsc_cmd_dummy_mbox_in in; + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) { + err = -ENOMEM; + goto no_mem_out; + } + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DUMMY); + + err = xsc_dummy_cmd_exec(xdev, &in, sizeof(in), out, sizeof(*out), gap, dummy_start); + if (err) + goto out_out; + + if (out->hdr.status) { + err = xsc_cmd_status_to_err(&out->hdr); + goto out_out; + } + +out_out: + kfree(out); +no_mem_out: + return err; +} + +static int request_pid_cid_mismatch_restore(struct xsc_core_device *xdev) +{ + struct xsc_cmd *cmd = &xdev->cmd; + u16 req_pid, req_cid; + u16 gap; + + int err; + + req_pid = readl(REG_ADDR(xdev, cmd->reg.req_pid_addr)); + req_cid = readl(REG_ADDR(xdev, cmd->reg.req_cid_addr)); + if (req_pid >= (1 << cmd->log_sz) || req_cid >= (1 << cmd->log_sz)) { + xsc_core_err(xdev, "req_pid %d, req_cid %d, out of normal range!!! max value is %d\n", + req_pid, req_cid, (1 << cmd->log_sz)); + return -1; + } + + if (req_pid == req_cid) + return 0; + + gap = (req_pid > req_cid) ? (req_pid - req_cid) : ((1 << cmd->log_sz) + req_pid - req_cid); + xsc_core_info(xdev, "Cmdq req_pid %d, req_cid %d, send %d dummy cmds\n", + req_pid, req_cid, gap); + + err = xsc_send_dummy_cmd(xdev, gap, req_cid); + if (err) { + xsc_core_err(xdev, "Send dummy cmd failed\n"); + goto send_dummy_fail; + } + +send_dummy_fail: + return err; +} + +int _xsc_cmd_exec(struct xsc_core_device *xdev, void *in, int in_size, void *out, + int out_size) +{ + struct xsc_cmd_msg *inb; + struct xsc_rsp_msg *outb; + int err; + u8 status = 0; + struct xsc_cmd *cmd = &xdev->cmd; + + if (cmd->cmd_status == XSC_CMD_STATUS_TIMEDOUT) + return -ETIMEDOUT; + + inb = alloc_msg(xdev, in_size); + if (IS_ERR(inb)) { + err = PTR_ERR(inb); + return err; + } + + err = xsc_copy_to_cmd_msg(inb, in, in_size); + if (err) { + xsc_core_warn(xdev, "err %d\n", err); + goto out_in; + } + + outb = xsc_alloc_rsp_msg(xdev, GFP_KERNEL, out_size); + if (IS_ERR(outb)) { + err = PTR_ERR(outb); + goto out_in; + } + + err = xsc_cmd_invoke(xdev, inb, outb, &status); + if (err) + goto out_out; + + if (status) { + xsc_core_err(xdev, "opcode:%#x, err %d, status %d\n", + msg_to_opcode(inb), err, status); + err = status_to_err(status); + goto out_out; + } + + err = xsc_copy_from_rsp_msg(out, outb, out_size); + +out_out: + xsc_free_rsp_msg(xdev, outb); + +out_in: + free_msg(xdev, inb); + return err; +} +EXPORT_SYMBOL(_xsc_cmd_exec); + +static void destroy_msg_cache(struct xsc_core_device *xdev) +{ + struct xsc_cmd *cmd = &xdev->cmd; + struct xsc_cmd_msg *msg; + struct xsc_cmd_msg *n; + + list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) { + list_del(&msg->list); + xsc_free_cmd_msg(xdev, msg); + } + + list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) { + list_del(&msg->list); + xsc_free_cmd_msg(xdev, msg); + } +} + +static int create_msg_cache(struct xsc_core_device *xdev) +{ + struct xsc_cmd *cmd = &xdev->cmd; + struct xsc_cmd_msg *msg; + int err; + int i; + + spin_lock_init(&cmd->cache.large.lock); + INIT_LIST_HEAD(&cmd->cache.large.head); + spin_lock_init(&cmd->cache.med.lock); + INIT_LIST_HEAD(&cmd->cache.med.head); + + for (i = 0; i < NUM_LONG_LISTS; i++) { + msg = xsc_alloc_cmd_msg(xdev, GFP_KERNEL, LONG_LIST_SIZE); + if (IS_ERR(msg)) { + err = PTR_ERR(msg); + goto ex_err; + } + msg->cache = &cmd->cache.large; + list_add_tail(&msg->list, &cmd->cache.large.head); + } + + for (i = 0; i < NUM_MED_LISTS; i++) { + msg = xsc_alloc_cmd_msg(xdev, GFP_KERNEL, MED_LIST_SIZE); + if (IS_ERR(msg)) { + err = PTR_ERR(msg); + goto ex_err; + } + msg->cache = &cmd->cache.med; + list_add_tail(&msg->list, &cmd->cache.med.head); + } + + return 0; + +ex_err: + destroy_msg_cache(xdev); + return err; +} + +static void xsc_cmd_comp_handler(struct xsc_core_device *xdev, u8 idx, struct xsc_rsp_layout *rsp) +{ + struct xsc_cmd *cmd = &xdev->cmd; + struct xsc_cmd_work_ent *ent; + struct xsc_inbox_hdr *hdr; + + if (idx > cmd->max_reg_cmds || (cmd->bitmask & (1 << idx))) { + xsc_core_err(xdev, "idx[%d] exceed max cmds, or has no relative request.\n", idx); + return; + } + ent = cmd->ent_arr[idx]; + ent->rsp_lay = rsp; + ktime_get_ts64(&ent->ts2); + + memcpy(ent->out->first.data, ent->rsp_lay->out, sizeof(ent->rsp_lay->out)); + dump_command(xdev, ent->out->next, ent, 0, ent->out->len); + if (!cmd->checksum_disabled) + ent->ret = verify_signature(ent); + else + ent->ret = 0; + ent->status = 0; + + hdr = (struct xsc_inbox_hdr *)ent->in->first.data; + xsc_core_dbg(xdev, "delivery status:%s(%d), rsp status=%d, opcode %#x, idx:%d,%d, ret=%d\n", + deliv_status_to_str(ent->status), ent->status, + ((struct xsc_outbox_hdr *)ent->rsp_lay->out)->status, + __be16_to_cpu(hdr->opcode), idx, ent->lay->idx, ent->ret); + free_ent(cmd, ent->idx); + complete(&ent->done); + up(&cmd->sem); +} + +static int cmd_cq_polling(void *data) +{ + struct xsc_core_device *xdev = data; + struct xsc_cmd *cmd = &xdev->cmd; + struct xsc_rsp_layout *rsp; + u32 cq_pid; + + while (!kthread_should_stop()) { + if (need_resched()) + schedule(); + cq_pid = readl(REG_ADDR(xdev, cmd->reg.rsp_pid_addr)); + if (cmd->cq_cid == cq_pid) { + mdelay(3); + continue; + } + + //get cqe + rsp = get_cq_inst(cmd, cmd->cq_cid); + if (!cmd->ownerbit_learned) { + cmd->ownerbit_learned = 1; + cmd->owner_bit = rsp->owner_bit; + } + if (cmd->owner_bit != rsp->owner_bit) { + //hw update cq doorbell but buf may not ready + xsc_core_err(xdev, "hw update cq doorbell but buf not ready %u %u\n", + cmd->cq_cid, cq_pid); + continue; + } + + xsc_cmd_comp_handler(xdev, rsp->idx, rsp); + + cmd->cq_cid = (cmd->cq_cid + 1) % (1 << cmd->log_sz); + + writel(cmd->cq_cid, REG_ADDR(xdev, cmd->reg.rsp_cid_addr)); + if (cmd->cq_cid == 0) + cmd->owner_bit = !cmd->owner_bit; + } + return 0; +} + +int xsc_cmd_err_handler(struct xsc_core_device *xdev) +{ + union interrupt_stat { + struct { + u32 hw_read_req_err:1; + u32 hw_write_req_err:1; + u32 req_pid_err:1; + u32 rsp_cid_err:1; + }; + u32 raw; + } stat; + int err = 0; + int retry = 0; + + stat.raw = readl(REG_ADDR(xdev, xdev->cmd.reg.interrupt_stat_addr)); + while (stat.raw != 0) { + err++; + if (stat.hw_read_req_err) { + retry = 1; + stat.hw_read_req_err = 0; + xsc_core_err(xdev, "hw report read req from host failed!\n"); + } else if (stat.hw_write_req_err) { + retry = 1; + stat.hw_write_req_err = 0; + xsc_core_err(xdev, "hw report write req to fw failed!\n"); + } else if (stat.req_pid_err) { + stat.req_pid_err = 0; + xsc_core_err(xdev, "hw report unexpected req pid!\n"); + } else if (stat.rsp_cid_err) { + stat.rsp_cid_err = 0; + xsc_core_err(xdev, "hw report unexpected rsp cid!\n"); + } else { + stat.raw = 0; + xsc_core_err(xdev, "ignore unknown interrupt!\n"); + } + } + + if (retry) + writel(xdev->cmd.cmd_pid, REG_ADDR(xdev, xdev->cmd.reg.req_pid_addr)); + + if (err) + writel(0xf, REG_ADDR(xdev, xdev->cmd.reg.interrupt_stat_addr)); + + return err; +} + +void xsc_cmd_resp_handler(struct xsc_core_device *xdev) +{ + struct xsc_cmd *cmd = &xdev->cmd; + struct xsc_rsp_layout *rsp; + u32 cq_pid; + const int budget = 32; + int count = 0; + + while (count < budget) { + cq_pid = readl(REG_ADDR(xdev, cmd->reg.rsp_pid_addr)); + if (cq_pid == cmd->cq_cid) + return; + + rsp = get_cq_inst(cmd, cmd->cq_cid); + if (!cmd->ownerbit_learned) { + cmd->ownerbit_learned = 1; + cmd->owner_bit = rsp->owner_bit; + } + if (cmd->owner_bit != rsp->owner_bit) { + xsc_core_err(xdev, "hw update cq doorbell but buf not ready %u %u\n", + cmd->cq_cid, cq_pid); + return; + } + + xsc_cmd_comp_handler(xdev, rsp->idx, rsp); + + cmd->cq_cid = (cmd->cq_cid + 1) % (1 << cmd->log_sz); + writel(cmd->cq_cid, REG_ADDR(xdev, cmd->reg.rsp_cid_addr)); + if (cmd->cq_cid == 0) + cmd->owner_bit = !cmd->owner_bit; + + count++; + } +} + +static void xsc_cmd_handle_rsp_before_reload +(struct xsc_cmd *cmd, struct xsc_core_device *xdev) +{ + u32 rsp_pid, rsp_cid; + + rsp_pid = readl(REG_ADDR(xdev, cmd->reg.rsp_pid_addr)); + rsp_cid = readl(REG_ADDR(xdev, cmd->reg.rsp_cid_addr)); + if (rsp_pid == rsp_cid) + return; + + cmd->cq_cid = rsp_pid; + + writel(cmd->cq_cid, REG_ADDR(xdev, cmd->reg.rsp_cid_addr)); +} + +int xsc_cmd_init(struct xsc_core_device *xdev) +{ + int size = sizeof(struct xsc_cmd_prot_block); + int align = roundup_pow_of_two(size); + struct xsc_cmd *cmd = &xdev->cmd; + u32 cmd_h, cmd_l; + u32 err_stat; + int err; + int i; + + //sriov need adapt for this process. + //now there is 544 cmdq resource, soc using from id 514 + if (xsc_core_is_pf(xdev)) { + cmd->reg.req_pid_addr = HIF_CMDQM_HOST_REQ_PID_MEM_ADDR; + cmd->reg.req_cid_addr = HIF_CMDQM_HOST_REQ_CID_MEM_ADDR; + cmd->reg.rsp_pid_addr = HIF_CMDQM_HOST_RSP_PID_MEM_ADDR; + cmd->reg.rsp_cid_addr = HIF_CMDQM_HOST_RSP_CID_MEM_ADDR; + cmd->reg.req_buf_h_addr = HIF_CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_ADDR; + cmd->reg.req_buf_l_addr = HIF_CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_ADDR; + cmd->reg.rsp_buf_h_addr = HIF_CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_ADDR; + cmd->reg.rsp_buf_l_addr = HIF_CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_ADDR; + cmd->reg.msix_vec_addr = HIF_CMDQM_VECTOR_ID_MEM_ADDR; + cmd->reg.element_sz_addr = HIF_CMDQM_Q_ELEMENT_SZ_REG_ADDR; + cmd->reg.q_depth_addr = HIF_CMDQM_HOST_Q_DEPTH_REG_ADDR; + cmd->reg.interrupt_stat_addr = HIF_CMDQM_HOST_VF_ERR_STS_MEM_ADDR; + } else { + cmd->reg.req_pid_addr = CMDQM_HOST_REQ_PID_MEM_ADDR; + cmd->reg.req_cid_addr = CMDQM_HOST_REQ_CID_MEM_ADDR; + cmd->reg.rsp_pid_addr = CMDQM_HOST_RSP_PID_MEM_ADDR; + cmd->reg.rsp_cid_addr = CMDQM_HOST_RSP_CID_MEM_ADDR; + cmd->reg.req_buf_h_addr = CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_ADDR; + cmd->reg.req_buf_l_addr = CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_ADDR; + cmd->reg.rsp_buf_h_addr = CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_ADDR; + cmd->reg.rsp_buf_l_addr = CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_ADDR; + cmd->reg.msix_vec_addr = CMDQM_VECTOR_ID_MEM_ADDR; + cmd->reg.element_sz_addr = CMDQM_Q_ELEMENT_SZ_REG_ADDR; + cmd->reg.q_depth_addr = CMDQM_HOST_Q_DEPTH_REG_ADDR; + cmd->reg.interrupt_stat_addr = CMDQM_HOST_VF_ERR_STS_MEM_ADDR; + } + + cmd->pool = dma_pool_create("xsc_cmd", &xdev->pdev->dev, size, align, 0); + if (!cmd->pool) + return -ENOMEM; + + cmd->cmd_buf = (void *)__get_free_pages(GFP_ATOMIC, 0); + if (!cmd->cmd_buf) { + err = -ENOMEM; + goto err_free_pool; + } + cmd->cq_buf = (void *)__get_free_pages(GFP_ATOMIC, 0); + if (!cmd->cq_buf) { + err = -ENOMEM; + goto err_free_cmd; + } + + cmd->dma = dma_map_single(&xdev->pdev->dev, cmd->cmd_buf, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(&xdev->pdev->dev, cmd->dma)) { + err = -ENOMEM; + goto err_free; + } + + cmd->cq_dma = dma_map_single(&xdev->pdev->dev, cmd->cq_buf, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(&xdev->pdev->dev, cmd->cq_dma)) { + err = -ENOMEM; + goto err_map_cmd; + } + + cmd->cmd_pid = readl(REG_ADDR(xdev, cmd->reg.req_pid_addr)); + cmd->cq_cid = readl(REG_ADDR(xdev, cmd->reg.rsp_cid_addr)); + cmd->ownerbit_learned = 0; + + xsc_cmd_handle_rsp_before_reload(cmd, xdev); + +#define ELEMENT_SIZE_LOG 6 //64B +#define Q_DEPTH_LOG 5 //32 + + cmd->log_sz = Q_DEPTH_LOG; + cmd->log_stride = readl(REG_ADDR(xdev, cmd->reg.element_sz_addr)); + writel(1 << cmd->log_sz, REG_ADDR(xdev, cmd->reg.q_depth_addr)); + if (cmd->log_stride != ELEMENT_SIZE_LOG) { + dev_err(&xdev->pdev->dev, "firmware failed to init cmdq, log_stride=(%d, %d)\n", + cmd->log_stride, ELEMENT_SIZE_LOG); + err = -ENODEV; + goto err_map; + } + + if (1 << cmd->log_sz > XSC_MAX_COMMANDS) { + dev_err(&xdev->pdev->dev, "firmware reports too many outstanding commands %d\n", + 1 << cmd->log_sz); + err = -EINVAL; + goto err_map; + } + + if (cmd->log_sz + cmd->log_stride > PAGE_SHIFT) { + dev_err(&xdev->pdev->dev, "command queue size overflow\n"); + err = -EINVAL; + goto err_map; + } + + cmd->checksum_disabled = 1; + cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; + cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; + + spin_lock_init(&cmd->alloc_lock); + spin_lock_init(&cmd->token_lock); + spin_lock_init(&cmd->doorbell_lock); + for (i = 0; i < ARRAY_SIZE(cmd->stats); i++) + spin_lock_init(&cmd->stats[i].lock); + + sema_init(&cmd->sem, cmd->max_reg_cmds); + + cmd_h = (u32)((u64)(cmd->dma) >> 32); + cmd_l = (u32)(cmd->dma); + if (cmd_l & 0xfff) { + dev_err(&xdev->pdev->dev, "invalid command queue address\n"); + err = -ENOMEM; + goto err_map; + } + + writel(cmd_h, REG_ADDR(xdev, cmd->reg.req_buf_h_addr)); + writel(cmd_l, REG_ADDR(xdev, cmd->reg.req_buf_l_addr)); + + cmd_h = (u32)((u64)(cmd->cq_dma) >> 32); + cmd_l = (u32)(cmd->cq_dma); + if (cmd_l & 0xfff) { + dev_err(&xdev->pdev->dev, "invalid command queue address\n"); + err = -ENOMEM; + goto err_map; + } + writel(cmd_h, REG_ADDR(xdev, cmd->reg.rsp_buf_h_addr)); + writel(cmd_l, REG_ADDR(xdev, cmd->reg.rsp_buf_l_addr)); + + /* Make sure firmware sees the complete address before we proceed */ + wmb(); + + xsc_core_dbg(xdev, "descriptor at dma 0x%llx 0x%llx\n", + (unsigned long long)(cmd->dma), (unsigned long long)(cmd->cq_dma)); + + cmd->mode = CMD_MODE_POLLING; + cmd->cmd_status = XSC_CMD_STATUS_NORMAL; + + err = create_msg_cache(xdev); + if (err) { + dev_err(&xdev->pdev->dev, "failed to create command cache\n"); + goto err_map; + } + + set_wqname(xdev); + cmd->wq = create_singlethread_workqueue(cmd->wq_name); + if (!cmd->wq) { + dev_err(&xdev->pdev->dev, "failed to create command workqueue\n"); + err = -ENOMEM; + goto err_cache; + } + + cmd->cq_task = kthread_create(cmd_cq_polling, (void *)xdev, "xsc_cmd_cq_polling"); + if (!cmd->cq_task) { + dev_err(&xdev->pdev->dev, "failed to create cq task\n"); + err = -ENOMEM; + goto err_wq; + } + wake_up_process(cmd->cq_task); + + err = create_debugfs_files(xdev); + if (err) { + err = -ENOMEM; + goto err_task; + } + + err = request_pid_cid_mismatch_restore(xdev); + if (err) { + dev_err(&xdev->pdev->dev, "request pid,cid wrong, restore failed\n"); + goto err_req_restore; + } + + // clear abnormal state to avoid the impact of previous error + err_stat = readl(REG_ADDR(xdev, xdev->cmd.reg.interrupt_stat_addr)); + if (err_stat) { + xsc_core_warn(xdev, "err_stat 0x%x when initializing, clear it\n", err_stat); + writel(0xf, REG_ADDR(xdev, xdev->cmd.reg.interrupt_stat_addr)); + } + + return 0; + +err_req_restore: +err_task: + kthread_stop(cmd->cq_task); + +err_wq: + destroy_workqueue(cmd->wq); + +err_cache: + destroy_msg_cache(xdev); + +err_map: + dma_unmap_single(&xdev->pdev->dev, cmd->cq_dma, PAGE_SIZE, + DMA_BIDIRECTIONAL); + +err_map_cmd: + dma_unmap_single(&xdev->pdev->dev, cmd->dma, PAGE_SIZE, + DMA_BIDIRECTIONAL); +err_free: + free_pages((unsigned long)cmd->cq_buf, 0); + +err_free_cmd: + free_pages((unsigned long)cmd->cmd_buf, 0); + +err_free_pool: + dma_pool_destroy(cmd->pool); + + return err; +} +EXPORT_SYMBOL(xsc_cmd_init); + +void xsc_cmd_cleanup(struct xsc_core_device *xdev) +{ + struct xsc_cmd *cmd = &xdev->cmd; + + clean_debug_files(xdev); + destroy_workqueue(cmd->wq); + if (cmd->cq_task) + kthread_stop(cmd->cq_task); + destroy_msg_cache(xdev); + dma_unmap_single(&xdev->pdev->dev, cmd->dma, PAGE_SIZE, + DMA_BIDIRECTIONAL); + free_pages((unsigned long)cmd->cq_buf, 0); + dma_unmap_single(&xdev->pdev->dev, cmd->cq_dma, PAGE_SIZE, + DMA_BIDIRECTIONAL); + free_pages((unsigned long)cmd->cmd_buf, 0); + dma_pool_destroy(cmd->pool); +} +EXPORT_SYMBOL(xsc_cmd_cleanup); + +static const char *cmd_status_str(u8 status) +{ + switch (status) { + case XSC_CMD_STAT_OK: + return "OK"; + case XSC_CMD_STAT_INT_ERR: + return "internal error"; + case XSC_CMD_STAT_BAD_OP_ERR: + return "bad operation"; + case XSC_CMD_STAT_BAD_PARAM_ERR: + return "bad parameter"; + case XSC_CMD_STAT_BAD_SYS_STATE_ERR: + return "bad system state"; + case XSC_CMD_STAT_BAD_RES_ERR: + return "bad resource"; + case XSC_CMD_STAT_RES_BUSY: + return "resource busy"; + case XSC_CMD_STAT_LIM_ERR: + return "limits exceeded"; + case XSC_CMD_STAT_BAD_RES_STATE_ERR: + return "bad resource state"; + case XSC_CMD_STAT_IX_ERR: + return "bad index"; + case XSC_CMD_STAT_NO_RES_ERR: + return "no resources"; + case XSC_CMD_STAT_BAD_INP_LEN_ERR: + return "bad input length"; + case XSC_CMD_STAT_BAD_OUTP_LEN_ERR: + return "bad output length"; + case XSC_CMD_STAT_BAD_QP_STATE_ERR: + return "bad QP state"; + case XSC_CMD_STAT_BAD_PKT_ERR: + return "bad packet (discarded)"; + case XSC_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: + return "bad size too many outstanding CQEs"; + default: + return "unknown status"; + } +} + +int xsc_cmd_status_to_err(struct xsc_outbox_hdr *hdr) +{ + if (!hdr->status) + return 0; + + pr_warn("command failed, status %s(0x%x)\n", + cmd_status_str(hdr->status), hdr->status); + + switch (hdr->status) { + case XSC_CMD_STAT_OK: return 0; + case XSC_CMD_STAT_INT_ERR: return -EIO; + case XSC_CMD_STAT_BAD_OP_ERR: return -EOPNOTSUPP; + case XSC_CMD_STAT_BAD_PARAM_ERR: return -EINVAL; + case XSC_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; + case XSC_CMD_STAT_BAD_RES_ERR: return -EINVAL; + case XSC_CMD_STAT_RES_BUSY: return -EBUSY; + case XSC_CMD_STAT_LIM_ERR: return -EINVAL; + case XSC_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; + case XSC_CMD_STAT_IX_ERR: return -EINVAL; + case XSC_CMD_STAT_NO_RES_ERR: return -EAGAIN; + case XSC_CMD_STAT_BAD_INP_LEN_ERR: return -EIO; + case XSC_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO; + case XSC_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL; + case XSC_CMD_STAT_BAD_PKT_ERR: return -EINVAL; + case XSC_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL; + default: return -EIO; + } +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/cq.c b/drivers/net/ethernet/yunsilicon/xsc/pci/cq.c new file mode 100644 index 000000000000..49a00f759b5f --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/cq.c @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "common/driver.h" +#include "common/cq.h" +#include + +void xsc_cq_event(struct xsc_core_device *xdev, u32 cqn, int event_type) +{ + struct xsc_cq_table *table = &xdev->dev_res->cq_table; + struct xsc_core_cq *cq; + + spin_lock(&table->lock); + + cq = radix_tree_lookup(&table->tree, cqn); + if (cq) + atomic_inc(&cq->refcount); + + spin_unlock(&table->lock); + + if (!cq) { + xsc_core_warn(xdev, "Async event for bogus CQ 0x%x\n", cqn); + return; + } + + cq->event(cq, event_type); + + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); +} + +int xsc_core_create_cq(struct xsc_core_device *dev, struct xsc_core_cq *cq, + struct xsc_create_cq_mbox_in *in, int inlen) +{ + int err; + struct xsc_cq_table *table = &dev->dev_res->cq_table; + struct xsc_create_cq_mbox_out out; + struct xsc_destroy_cq_mbox_in din; + struct xsc_destroy_cq_mbox_out dout; + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_CQ); + memset(&out, 0, sizeof(out)); + err = xsc_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return xsc_cmd_status_to_err(&out.hdr); + + cq->cqn = be32_to_cpu(out.cqn); + cq->cons_index = 0; + cq->arm_sn = 0; + cq->arm_db = dev->regs.complete_db; + cq->ci_db = dev->regs.complete_reg; + cq->dev = dev; + atomic_set(&cq->refcount, 1); + init_completion(&cq->free); + + spin_lock_irq(&table->lock); + err = radix_tree_insert(&table->tree, cq->cqn, cq); + spin_unlock_irq(&table->lock); + if (err) + goto err_cmd; + + cq->pid = current->pid; + err = xsc_debug_cq_add(dev, cq); + if (err) + xsc_core_dbg(dev, "failed adding CP 0x%x to debug file system\n", cq->cqn); + + return 0; + +err_cmd: + memset(&din, 0, sizeof(din)); + memset(&dout, 0, sizeof(dout)); + din.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_CQ); + xsc_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout)); + return err; +} +EXPORT_SYMBOL(xsc_core_create_cq); + +int xsc_core_destroy_cq(struct xsc_core_device *dev, struct xsc_core_cq *cq) +{ + struct xsc_cq_table *table = &dev->dev_res->cq_table; + struct xsc_destroy_cq_mbox_in in; + struct xsc_destroy_cq_mbox_out out; + struct xsc_core_cq *tmp; + int err; + + spin_lock_irq(&table->lock); + tmp = radix_tree_delete(&table->tree, cq->cqn); + spin_unlock_irq(&table->lock); + if (!tmp) { + xsc_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn); + return -EINVAL; + } + if (tmp != cq) { + xsc_core_warn(dev, "corruption on srqn 0x%x\n", cq->cqn); + return -EINVAL; + } + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_CQ); + in.cqn = cpu_to_be32(cq->cqn); + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return xsc_cmd_status_to_err(&out.hdr); + + xsc_debug_cq_remove(dev, cq); + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); + wait_for_completion(&cq->free); + + return 0; +} +EXPORT_SYMBOL(xsc_core_destroy_cq); + +int xsc_core_query_cq(struct xsc_core_device *dev, struct xsc_core_cq *cq, + struct xsc_query_cq_mbox_out *out) +{ + struct xsc_query_cq_mbox_in in; + int err; + + memset(&in, 0, sizeof(in)); + memset(out, 0, sizeof(*out)); + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_CQ); + in.cqn = cpu_to_be32(cq->cqn); + err = xsc_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + if (err) + return err; + + if (out->hdr.status) + return xsc_cmd_status_to_err(&out->hdr); + + return err; +} +EXPORT_SYMBOL(xsc_core_query_cq); + +void xsc_init_cq_table(struct xsc_core_device *dev) +{ + struct xsc_cq_table *table = &dev->dev_res->cq_table; + + spin_lock_init(&table->lock); + INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); + xsc_cq_debugfs_init(dev); +} + +void xsc_cleanup_cq_table(struct xsc_core_device *dev) +{ + xsc_cq_debugfs_cleanup(dev); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/debugfs.c b/drivers/net/ethernet/yunsilicon/xsc/pci/debugfs.c new file mode 100644 index 000000000000..5ea8d8a29107 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/debugfs.c @@ -0,0 +1,866 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_hsi.h" +#include "common/driver.h" +#include "common/qp.h" +#include "common/cq.h" + +enum { + QP_PID, + QP_STATE, + QP_XPORT, + QP_MTU, + QP_N_RECV, + QP_RECV_SZ, + QP_N_SEND, + QP_LOG_PG_SZ, + QP_RQPN, +}; + +static char *qp_fields[] = { + [QP_PID] = "pid", + [QP_STATE] = "state", + [QP_XPORT] = "transport", + [QP_MTU] = "mtu", + [QP_N_RECV] = "num_recv", + [QP_RECV_SZ] = "rcv_wqe_sz", + [QP_N_SEND] = "num_send", + [QP_LOG_PG_SZ] = "log2_page_sz", + [QP_RQPN] = "remote_qpn", +}; + +enum { + EQ_NUM_EQES, + EQ_INTR, + EQ_LOG_PG_SZ, +}; + +static char *eq_fields[] = { + [EQ_NUM_EQES] = "num_eqes", + [EQ_INTR] = "intr", + [EQ_LOG_PG_SZ] = "log_page_size", +}; + +enum { + CQ_PID, + CQ_NUM_CQES, + CQ_LOG_PG_SZ, +}; + +static char *cq_fields[] = { + [CQ_PID] = "pid", + [CQ_NUM_CQES] = "num_cqes", + [CQ_LOG_PG_SZ] = "log_page_size", +}; + +struct dentry *xsc_debugfs_root; +EXPORT_SYMBOL(xsc_debugfs_root); + +static ssize_t xsc_debugfs_reg_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + char *buf; + int len; + char xsc_debugfs_reg_buf[256] = ""; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + buf = kasprintf(GFP_KERNEL, "%s: %s\n", + "xsc debugfs", + xsc_debugfs_reg_buf); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + + kfree(buf); + + return len; +} + +static ssize_t xsc_debugfs_reg_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct xsc_core_device *xdev = filp->private_data; + u64 reg; + int cnt, len; + int num; + int offset; + char xsc_debugfs_reg_buf[256] = ""; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + + if (count >= sizeof(xsc_debugfs_reg_buf)) + return -ENOSPC; + + len = simple_write_to_buffer(xsc_debugfs_reg_buf, + sizeof(xsc_debugfs_reg_buf) - 1, + ppos, buffer, count); + if (len < 0) + return len; + + xsc_debugfs_reg_buf[len] = '\0'; + + if (strncmp(xsc_debugfs_reg_buf, "write", 5) == 0) { + cnt = sscanf(&xsc_debugfs_reg_buf[5], "%llx %n", + ®, &offset); + if (cnt == 1) { + int tmp; + int value; + int buf[8]; + int *ptr; + + offset += 5; + num = 0; + while (1) { + cnt = sscanf(&xsc_debugfs_reg_buf[offset], "%x %n", &value, &tmp); + if (cnt < 2) + break; + xsc_core_info(xdev, "write: 0x%llx = 0x%x\n", + (reg + sizeof(int) * num), value); + offset += tmp; + buf[num++] = value; + if (num == 8) + break; + } + if (num > 1) { + ptr = &buf[0]; + IA_WRITE(xdev, reg, ptr, num); + } else if (num == 1) { + REG_WR32(xdev, reg, buf[0]); + } + } else { + xsc_core_err(xdev, "write \n"); + } + } else if (strncmp(xsc_debugfs_reg_buf, "read", 4) == 0) { + cnt = sscanf(&xsc_debugfs_reg_buf[4], "%llx %d %n", ®, &num, &offset); + if (cnt == 2) { + int *buf; + int i; + int *ptr; + + buf = kcalloc(num, sizeof(int), GFP_KERNEL); + if (!buf) + return -ENOMEM; + ptr = buf; + IA_READ(xdev, reg, ptr, num); + xsc_core_info(xdev, "read: 0x%llx num:%d\n", reg, num); + for (i = 0; i < num; i++) + xsc_core_info(xdev, "read:0x%llx = %#x\n", + (reg + sizeof(int) * i), buf[i]); + } else if (cnt == 1) { + int value = REG_RD32(xdev, reg); + + xsc_core_info(xdev, "read: 0x%llx = %#x\n", reg, value); + } else { + xsc_core_err(xdev, "read \n"); + } + } else { + xsc_core_err(xdev, "Unknown command %s\n", xsc_debugfs_reg_buf); + xsc_core_err(xdev, "Available commands:\n"); + xsc_core_err(xdev, "read \n"); + xsc_core_err(xdev, "write \n"); + } + return count; +} + +static const struct file_operations xsc_debugfs_reg_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = xsc_debugfs_reg_read, + .write = xsc_debugfs_reg_write, +}; + +int xsc_debugfs_init(struct xsc_core_device *dev) +{ + const char *name = pci_name(dev->pdev); + struct dentry *pfile; + + if (!xsc_debugfs_root) + return -ENOMEM; + + dev->dev_res->dbg_root = debugfs_create_dir(name, xsc_debugfs_root); + if (dev->dev_res->dbg_root) { + pfile = debugfs_create_file("reg_ops", 0600, + dev->dev_res->dbg_root, dev, + &xsc_debugfs_reg_fops); + if (!pfile) + xsc_core_err(dev, "failed to create debugfs ops for %s\n", name); + } else { + xsc_core_err(dev, "failed to create debugfs dir for %s\n", name); + return -ENOMEM; + } + + return 0; +} + +void xsc_debugfs_fini(struct xsc_core_device *dev) +{ + if (!xsc_debugfs_root) + return; + + debugfs_remove_recursive(dev->dev_res->dbg_root); +} + +void xsc_register_debugfs(void) +{ + xsc_debugfs_root = debugfs_create_dir("xsc_pci", NULL); +} + +void xsc_unregister_debugfs(void) +{ + debugfs_remove(xsc_debugfs_root); +} + +int xsc_qp_debugfs_init(struct xsc_core_device *dev) +{ + if (!xsc_debugfs_root) + return 0; + + atomic_set(&dev->num_qps, 0); + + dev->dev_res->qp_debugfs = debugfs_create_dir("QPs", dev->dev_res->dbg_root); + if (!dev->dev_res->qp_debugfs) + return -ENOMEM; + + return 0; +} + +void xsc_qp_debugfs_cleanup(struct xsc_core_device *dev) +{ + if (!xsc_debugfs_root) + return; + + debugfs_remove_recursive(dev->dev_res->qp_debugfs); +} + +int xsc_eq_debugfs_init(struct xsc_core_device *dev) +{ + if (!xsc_debugfs_root) + return 0; + + dev->dev_res->eq_debugfs = debugfs_create_dir("EQs", dev->dev_res->dbg_root); + if (!dev->dev_res->eq_debugfs) + return -ENOMEM; + + return 0; +} + +void xsc_eq_debugfs_cleanup(struct xsc_core_device *dev) +{ + if (!xsc_debugfs_root) + return; + + debugfs_remove_recursive(dev->dev_res->eq_debugfs); +} + +static ssize_t average_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct xsc_cmd_stats *stats; + u64 field = 0; + int ret; + int err; + char tbuf[22]; + + if (*pos) + return 0; + + stats = filp->private_data; + spin_lock(&stats->lock); + if (stats->n) + field = stats->sum / stats->n; + spin_unlock(&stats->lock); + ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field); + if (ret > 0) { + err = copy_to_user(buf, tbuf, ret); + if (err) + return err; + } + + *pos += ret; + return ret; +} + +static ssize_t average_write(struct file *filp, const char __user *buf, + size_t count, loff_t *pos) +{ + struct xsc_cmd_stats *stats; + + stats = filp->private_data; + spin_lock(&stats->lock); + stats->sum = 0; + stats->n = 0; + spin_unlock(&stats->lock); + + *pos += count; + + return count; +} + +static const struct file_operations stats_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = average_read, + .write = average_write, +}; + +int xsc_cmdif_debugfs_init(struct xsc_core_device *xdev) +{ + struct xsc_cmd_stats *stats; + struct xsc_cmd *cmd; + struct dentry **cmdif_debugfs; + const char *namep; + int err; + int i; + + if (!xsc_debugfs_root) + return 0; + + cmd = &xdev->cmd; + cmdif_debugfs = &xdev->dev_res->cmdif_debugfs; + *cmdif_debugfs = debugfs_create_dir("commands", xdev->dev_res->dbg_root); + if (!*cmdif_debugfs) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(cmd->stats); i++) { + stats = &cmd->stats[i]; + namep = xsc_command_str(i); + if (strcmp(namep, "unknown command opcode")) { + stats->root = debugfs_create_dir(namep, *cmdif_debugfs); + if (!stats->root) { + xsc_core_warn(xdev, "failed adding command %d\n", i); + err = -ENOMEM; + goto out; + } + + stats->avg = debugfs_create_file("average", 0400, + stats->root, stats, + &stats_fops); + if (!stats->avg) { + xsc_core_warn(xdev, "failed creating debugfs file\n"); + err = -ENOMEM; + goto out; + } + + debugfs_create_u64("n", 0400, stats->root, &stats->n); + } + } + + return 0; +out: + debugfs_remove_recursive(xdev->dev_res->cmdif_debugfs); + return err; +} + +void xsc_cmdif_debugfs_cleanup(struct xsc_core_device *xdev) +{ + if (!xsc_debugfs_root) + return; + + debugfs_remove_recursive(xdev->dev_res->cmdif_debugfs); +} + +int xsc_cq_debugfs_init(struct xsc_core_device *dev) +{ + if (!xsc_debugfs_root) + return 0; + + dev->dev_res->cq_debugfs = debugfs_create_dir("CQs", dev->dev_res->dbg_root); + if (!dev->dev_res->cq_debugfs) + return -ENOMEM; + + return 0; +} + +void xsc_cq_debugfs_cleanup(struct xsc_core_device *dev) +{ + if (!xsc_debugfs_root) + return; + + debugfs_remove_recursive(dev->dev_res->cq_debugfs); +} + +int xsc_qptrace_debugfs_init(struct xsc_core_device *dev) +{ + if (!xsc_debugfs_root) + return 0; + + dev->dev_res->qptrace_debugfs = + debugfs_create_dir("QPTrace", dev->dev_res->dbg_root); + if (!dev->dev_res->qptrace_debugfs) + return -ENOMEM; + + return 0; +} + +void xsc_qptrace_debugfs_cleanup(struct xsc_core_device *dev) +{ + if (!xsc_debugfs_root) + return; + + debugfs_remove_recursive(dev->dev_res->qptrace_debugfs); +} + +static u64 qp_read_field(struct xsc_core_device *dev, struct xsc_core_qp *qp, + int index) +{ + struct xsc_query_qp_mbox_out *out; + struct xsc_qp_context *ctx; + u64 param = 0; + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return param; + + err = xsc_core_qp_query(dev, qp, out, sizeof(*out)); + if (err) { + xsc_core_warn(dev, "failed to query qp\n"); + goto out; + } + + ctx = &out->ctx; + switch (index) { + case QP_PID: + param = qp->pid; + break; + case QP_MTU: + param = ctx->mtu_mode ? IB_MTU_1024 : IB_MTU_4096; + break; + case QP_RQPN: + param = cpu_to_be32(ctx->remote_qpn) & 0xffffff; + break; + } + +out: + kfree(out); + return param; +} + +static u64 eq_read_field(struct xsc_core_device *dev, struct xsc_eq *eq, + int index) +{ + struct xsc_query_eq_mbox_out *out; + struct xsc_eq_context *ctx; + u64 param = 0; + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return param; + + ctx = &out->ctx; + + err = xsc_core_eq_query(dev, eq, out, sizeof(*out)); + if (err) { + xsc_core_warn(dev, "failed to query eq\n"); + goto out; + } + + switch (index) { + case EQ_NUM_EQES: + break; + case EQ_INTR: + break; + case EQ_LOG_PG_SZ: + break; + } + +out: + kfree(out); + return param; +} + +static u64 cq_read_field(struct xsc_core_device *dev, struct xsc_core_cq *cq, + int index) +{ + struct xsc_query_cq_mbox_out *out; + struct xsc_cq_context *ctx; + u64 param = 0; + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return param; + + ctx = &out->ctx; + + err = xsc_core_query_cq(dev, cq, out); + if (err) { + xsc_core_warn(dev, "failed to query cq\n"); + goto out; + } + + switch (index) { + case CQ_PID: + break; + case CQ_NUM_CQES: + break; + case CQ_LOG_PG_SZ: + break; + } + +out: + kfree(out); + return param; +} + +static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count, + loff_t *pos) +{ + struct xsc_field_desc *desc; + struct xsc_rsc_debug *d; + char tbuf[18]; + u64 field; + int ret; + int err; + + if (*pos) + return 0; + + desc = filp->private_data; + d = (void *)(desc - desc->i) - sizeof(*d); + switch (d->type) { + case XSC_DBG_RSC_QP: + field = qp_read_field(d->xdev, d->object, desc->i); + break; + + case XSC_DBG_RSC_EQ: + field = eq_read_field(d->xdev, d->object, desc->i); + break; + + case XSC_DBG_RSC_CQ: + field = cq_read_field(d->xdev, d->object, desc->i); + break; + + default: + xsc_core_warn(d->xdev, "invalid resource type %d\n", d->type); + return -EINVAL; + } + + ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field); + if (ret > 0) { + err = copy_to_user(buf, tbuf, ret); + if (err) + return err; + } + + *pos += ret; + return ret; +} + +static const struct file_operations fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = dbg_read, +}; + +static int add_res_tree(struct xsc_core_device *dev, enum dbg_rsc_type type, + struct dentry *root, struct xsc_rsc_debug **dbg, + int rsn, char **field, int nfile, void *data) +{ + struct xsc_rsc_debug *d; + char resn[32]; + int err; + int i; + + d = kzalloc(sizeof(*d) + nfile * sizeof(d->fields[0]), GFP_KERNEL); + if (!d) + return -ENOMEM; + + d->xdev = dev; + d->object = data; + d->type = type; + sprintf(resn, "0x%x", rsn); + d->root = debugfs_create_dir(resn, root); + if (!d->root) { + err = -ENOMEM; + goto out_free; + } + + for (i = 0; i < nfile; i++) { + d->fields[i].i = i; + d->fields[i].dent = debugfs_create_file(field[i], 0400, + d->root, &d->fields[i], + &fops); + if (!d->fields[i].dent) { + err = -ENOMEM; + goto out_rem; + } + } + *dbg = d; + + return 0; +out_rem: + debugfs_remove_recursive(d->root); + +out_free: + kfree(d); + return err; +} + +static void rem_res_tree(struct xsc_rsc_debug *d) +{ + debugfs_remove_recursive(d->root); + kfree(d); +} + +int xsc_debug_qp_add(struct xsc_core_device *dev, struct xsc_core_qp *qp) +{ + int err; + + if (!xsc_debugfs_root) + return 0; + + err = add_res_tree(dev, XSC_DBG_RSC_QP, dev->dev_res->qp_debugfs, + &qp->dbg, qp->qpn, qp_fields, + ARRAY_SIZE(qp_fields), qp); + if (err) + qp->dbg = NULL; + + return err; +} + +void xsc_debug_qp_remove(struct xsc_core_device *dev, struct xsc_core_qp *qp) +{ + if (!xsc_debugfs_root) + return; + + if (qp->dbg) + rem_res_tree(qp->dbg); +} + +static int set_udp_sport(u32 qpn, u32 sport, struct xsc_core_device *xdev, struct xsc_qp_trace *t) +{ + int err; + struct xsc_ap_feat_mbox_in in; + struct xsc_ap_feat_mbox_out out; + struct timespec64 ts; + struct xsc_qpt_update_msg msg; + + ktime_get_boottime_ts64(&ts); + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = __cpu_to_be16(XSC_CMD_OP_AP_FEAT); + in.xsc_ap_feature_opcode = __cpu_to_be16(XSC_AP_FEAT_SET_UDP_SPORT); + in.ap.set_udp_sport.qpn = __cpu_to_be32(qpn); + in.ap.set_udp_sport.udp_sport = __cpu_to_be32(sport); + + err = xsc_cmd_exec(xdev, (void *)&in, sizeof(in), (void *)&out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(xdev, "Failed to set udp_sport, err(%u), status(%u)\n", err, + out.hdr.status); + return -EINVAL; + } + + msg.main_ver = YS_QPTRACE_VER_MAJOR; + msg.sub_ver = YS_QPTRACE_VER_MINOR; + msg.type = YS_QPTRACE_UPDATE_TYPE_SPORT; + msg.data.timestamp = (u64)(u32)ts.tv_sec * MSEC_PER_SEC + + ts.tv_nsec / NSEC_PER_MSEC; + msg.data.qpn = qpn; + msg.data.bus = xdev->pdev->bus->number; + msg.data.dev = PCI_SLOT(xdev->pdev->devfn); + msg.data.fun = PCI_FUNC(xdev->pdev->devfn); + msg.data.update.sport.port_old = t->s_port; + msg.data.update.sport.port_new = __cpu_to_be16(sport); + t->s_port = msg.data.update.sport.port_new; + + qpts_write_one_msg(&msg); + + xsc_core_info(xdev, "Set qpn(%u) udp_sport(%u)\n", qpn, sport); + + return 0; +} + +static ssize_t trace_read(struct file *filp, char __user *buf, size_t count, loff_t *pos) +{ + struct xsc_core_qp *qp = filp->private_data; + struct xsc_qp_trace *trace_info; + int err; + int len; + + if (*pos) + return 0; + + if (!qp || !qp->trace_info) + return -EIO; + + trace_info = qp->trace_info; + + len = sizeof(struct xsc_qp_trace); + err = copy_to_user(buf, trace_info, len); + if (err) + return err; + + *pos += len; + return len; +} + +static ssize_t trace_write(struct file *filp, const char __user *buf, size_t count, loff_t *pos) +{ + struct xsc_core_qp *qp = filp->private_data; + struct xsc_qp_trace *trace_info; + struct xsc_core_device *xdev; + int ret = 0, len; + u32 sport; + char tmp_buf[256] = ""; + + ret = -EIO; + if (!qp || !qp->dbg || !qp->dbg->xdev || !qp->trace_info) { + pr_err("%s error null pointer!\n", __func__); + goto trace_write_out; + } + + trace_info = qp->trace_info; + xdev = qp->dbg->xdev; + + ret = 0; + /* don't allow partial writes */ + if (*pos != 0) { + xsc_core_err(xdev, "Don't allow partial writes!\n"); + goto trace_write_out; + } + + ret = -ENOSPC; + if (count >= sizeof(tmp_buf)) { + xsc_core_err(xdev, "Count out of size of buffer!\n"); + goto trace_write_out; + } + + len = simple_write_to_buffer(tmp_buf, sizeof(tmp_buf) - 1, + pos, buf, count); + ret = len; + if (len < 0) { + xsc_core_err(xdev, "Write to buffer error(%d)!\n", len); + goto trace_write_out; + } + + tmp_buf[len] = '\0'; + + // + // sport 10000 + if (strncmp(tmp_buf, "sport", 5) == 0) { + ret = kstrtouint(&tmp_buf[6], 0, &sport); + if (ret != 0) { + xsc_core_err(xdev, "error arguments: \n"); + ret = -EINVAL; + goto trace_write_out; + } + ret = set_udp_sport(trace_info->lqpn, sport, xdev, trace_info); + if (ret) { + ret = -EIO; + goto trace_write_out; + } + } else { + xsc_core_err(xdev, "invalid arguments: %s\n", tmp_buf); + ret = -EOPNOTSUPP; + goto trace_write_out; + } + + return count; + +trace_write_out: + return ret; +} + +static const struct file_operations fops_trace = { + .owner = THIS_MODULE, + .open = simple_open, + .read = trace_read, + .write = trace_write, +}; + +int xsc_create_qptrace(struct xsc_core_device *dev, struct xsc_core_qp *qp) +{ + char name[16]; + + if (!xsc_debugfs_root) + return 0; + + snprintf(name, sizeof(name), "%d", qp->qpn); + + qp->trace = debugfs_create_file(name, 0644, dev->dev_res->qptrace_debugfs, + (void *)qp, &fops_trace); + if (!qp->trace) + return -1; + + return 0; +} + +void xsc_remove_qptrace(struct xsc_core_device *dev, struct xsc_core_qp *qp) +{ + if (!xsc_debugfs_root) + return; + + debugfs_remove(qp->trace); +} + +int xsc_debug_eq_add(struct xsc_core_device *dev, struct xsc_eq *eq) +{ + int err; + + if (!xsc_debugfs_root) + return 0; + + err = add_res_tree(dev, XSC_DBG_RSC_EQ, dev->dev_res->eq_debugfs, + &eq->dbg, eq->eqn, eq_fields, + ARRAY_SIZE(eq_fields), eq); + if (err) + eq->dbg = NULL; + + return err; +} + +void xsc_debug_eq_remove(struct xsc_core_device *dev, struct xsc_eq *eq) +{ + if (!xsc_debugfs_root) + return; + + if (eq->dbg) + rem_res_tree(eq->dbg); +} + +int xsc_debug_cq_add(struct xsc_core_device *dev, struct xsc_core_cq *cq) +{ + int err; + + if (!xsc_debugfs_root) + return 0; + + err = add_res_tree(dev, XSC_DBG_RSC_CQ, dev->dev_res->cq_debugfs, + &cq->dbg, cq->cqn, cq_fields, + ARRAY_SIZE(cq_fields), cq); + if (err) + cq->dbg = NULL; + + return err; +} + +void xsc_debug_cq_remove(struct xsc_core_device *dev, struct xsc_core_cq *cq) +{ + if (!xsc_debugfs_root) + return; + + if (cq->dbg) + rem_res_tree(cq->dbg); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/devlink.c b/drivers/net/ethernet/yunsilicon/xsc/pci/devlink.c new file mode 100644 index 000000000000..7ea5e1c78230 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/devlink.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/xsc_core.h" +#include "devlink.h" +#include "eswitch.h" + +static const struct devlink_ops xsc_devlink_ops = { + .eswitch_mode_set = xsc_devlink_eswitch_mode_set, + .eswitch_mode_get = xsc_devlink_eswitch_mode_get, +}; + +struct devlink *xsc_devlink_alloc(struct device *dev) +{ + return devlink_alloc(&xsc_devlink_ops, sizeof(struct xsc_core_device), dev); +} + +void xsc_devlink_free(struct devlink *devlink) +{ + devlink_free(devlink); +} + +int xsc_devlink_register(struct devlink *devlink, struct device *dev) +{ + int err = 0; + + devlink_register(devlink); + return err; +} + +void xsc_devlink_unregister(struct devlink *devlink) +{ + devlink_unregister(devlink); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/devlink.h b/drivers/net/ethernet/yunsilicon/xsc/pci/devlink.h new file mode 100644 index 000000000000..c08d04bfa989 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/devlink.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_DEVLINK_H +#define XSC_DEVLINK_H + +#include + +struct devlink *xsc_devlink_alloc(struct device *dev); +void xsc_devlink_free(struct devlink *devlink); +int xsc_devlink_register(struct devlink *devlink, struct device *dev); +void xsc_devlink_unregister(struct devlink *devlink); + +#endif /* XSC_DEVLINK_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/eq.c b/drivers/net/ethernet/yunsilicon/xsc/pci/eq.c new file mode 100644 index 000000000000..1ce0123fcdd2 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/eq.c @@ -0,0 +1,364 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ +#include +#include +#include "common/driver.h" +#include "common/cq.h" +#include "fw/xsc_fw.h" +#include "wq.h" +#include "common/xsc_core.h" + +enum { + XSC_EQE_SIZE = sizeof(struct xsc_eqe), + XSC_EQE_OWNER_INIT_VAL = 0x1, +}; + +enum { + XSC_NUM_SPARE_EQE = 0x80, + XSC_NUM_ASYNC_EQE = 0x100, +}; + +struct map_eq_in { + u64 mask; + u32 reserved; + u32 unmap_eqn; +}; + +struct cre_des_eq { + u8 reserved[15]; + u8 eqn; +}; + +static int xsc_cmd_destroy_eq(struct xsc_core_device *dev, u32 eqn) +{ + struct xsc_destroy_eq_mbox_in in; + struct xsc_destroy_eq_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_EQ); + in.eqn = cpu_to_be32(eqn); + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (!err) + goto ex; + + if (out.hdr.status) + err = xsc_cmd_status_to_err(&out.hdr); + +ex: + return err; +} + +static struct xsc_eqe *get_eqe(struct xsc_eq *eq, u32 entry) +{ + return xsc_buf_offset(&eq->buf, entry * XSC_EQE_SIZE); +} + +static struct xsc_eqe *next_eqe_sw(struct xsc_eq *eq) +{ + struct xsc_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1)); + + return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe; +} + +static void eq_update_ci(struct xsc_eq *eq, int arm) +{ + union xsc_eq_doorbell db; + + db.val = 0; + db.arm = !!arm; + db.eq_next_cid = eq->cons_index; + db.eq_id = eq->eqn; + writel(db.val, REG_ADDR(eq->dev, eq->doorbell)); + /* We still want ordering, just not swabbing, so add a barrier */ + mb(); +} + +void xsc_cq_completion(struct xsc_core_device *dev, u32 cqn) +{ + struct xsc_core_cq *cq; + struct xsc_cq_table *table = &dev->dev_res->cq_table; + + rcu_read_lock(); + cq = radix_tree_lookup(&table->tree, cqn); + if (likely(cq)) + atomic_inc(&cq->refcount); + rcu_read_unlock(); + + if (!cq) { + xsc_core_err(dev, "Completion event for bogus CQ, cqn=%d\n", cqn); + return; + } + + ++cq->arm_sn; + + if (!cq->comp) + xsc_core_err(dev, "cq->comp is NULL\n"); + else + cq->comp(cq); + + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); +} + +void xsc_eq_cq_event(struct xsc_core_device *dev, u32 cqn, int event_type) +{ + struct xsc_core_cq *cq; + struct xsc_cq_table *table = &dev->dev_res->cq_table; + + spin_lock(&table->lock); + cq = radix_tree_lookup(&table->tree, cqn); + if (likely(cq)) + atomic_inc(&cq->refcount); + spin_unlock(&table->lock); + + if (unlikely(!cq)) { + xsc_core_err(dev, "Async event for bogus CQ, cqn=%d\n", cqn); + return; + } + + cq->event(cq, event_type); + + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); +} + +static int xsc_eq_int(struct xsc_core_device *dev, struct xsc_eq *eq) +{ + struct xsc_eqe *eqe; + int eqes_found = 0; + int set_ci = 0; + u32 cqn, qpn, queue_id; + + while ((eqe = next_eqe_sw(eq))) { + /* Make sure we read EQ entry contents after we've + * checked the ownership bit. + */ + rmb(); + switch (eqe->type) { + case XSC_EVENT_TYPE_COMP: + case XSC_EVENT_TYPE_INTERNAL_ERROR: + /* eqe is changing */ + queue_id = eqe->queue_id; + cqn = queue_id; + xsc_cq_completion(dev, cqn); + break; + + case XSC_EVENT_TYPE_CQ_ERROR: + queue_id = eqe->queue_id; + cqn = queue_id; + xsc_eq_cq_event(dev, cqn, eqe->type); + break; + case XSC_EVENT_TYPE_WQ_CATAS_ERROR: + case XSC_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + case XSC_EVENT_TYPE_WQ_ACCESS_ERROR: + queue_id = eqe->queue_id; + qpn = queue_id; + xsc_qp_event(dev, qpn, eqe->type); + break; + default: + xsc_core_warn(dev, "Unhandle event %d on EQ %d\n", eqe->type, eq->eqn); + break; + } + + ++eq->cons_index; + eqes_found = 1; + ++set_ci; + + /* The HCA will think the queue has overflowed if we + * don't tell it we've been processing events. We + * create our EQs with XSC_NUM_SPARE_EQE extra + * entries, so we must update our consumer index at + * least that often. + */ + if (unlikely(set_ci >= XSC_NUM_SPARE_EQE)) { + xsc_core_dbg(dev, "EQ%d eq_num=%d qpn=%d, db_noarm\n", + eq->eqn, set_ci, eqe->queue_id); + eq_update_ci(eq, 0); + set_ci = 0; + } + } + + eq_update_ci(eq, 1); + + return eqes_found; +} + +static irqreturn_t xsc_msix_handler(int irq, void *eq_ptr) +{ + struct xsc_eq *eq = eq_ptr; + struct xsc_core_device *dev = eq->dev; + + xsc_eq_int(dev, eq); + + /* MSI-X vectors always belong to us */ + return IRQ_HANDLED; +} + +static void init_eq_buf(struct xsc_eq *eq) +{ + struct xsc_eqe *eqe; + int i; + + for (i = 0; i < eq->nent; i++) { + eqe = get_eqe(eq, i); + eqe->owner = XSC_EQE_OWNER_INIT_VAL; + } +} + +int xsc_create_map_eq(struct xsc_core_device *dev, struct xsc_eq *eq, u8 vecidx, + int nent, const char *name) +{ + struct xsc_dev_resource *dev_res = dev->dev_res; + u16 msix_vec_offset = dev->msix_vec_base + vecidx; + struct xsc_create_eq_mbox_in *in; + struct xsc_create_eq_mbox_out out; + int err; + int inlen; + int hw_npages; + + eq->nent = roundup_pow_of_two(roundup(nent, XSC_NUM_SPARE_EQE)); + err = xsc_buf_alloc(dev, eq->nent * XSC_EQE_SIZE, PAGE_SIZE, &eq->buf); + if (err) + return err; + + init_eq_buf(eq); + + hw_npages = DIV_ROUND_UP(eq->nent * XSC_EQE_SIZE, PAGE_SIZE_4K); + inlen = sizeof(*in) + sizeof(in->pas[0]) * hw_npages; + in = xsc_vzalloc(inlen); + if (!in) { + err = -ENOMEM; + goto err_buf; + } + memset(&out, 0, sizeof(out)); + + xsc_fill_page_array(&eq->buf, in->pas, hw_npages); + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_EQ); + in->ctx.log_eq_sz = ilog2(eq->nent); + in->ctx.vecidx = cpu_to_be16(msix_vec_offset); + in->ctx.pa_num = cpu_to_be16(hw_npages); + in->ctx.glb_func_id = cpu_to_be16(dev->glb_func_id); + in->ctx.is_async_eq = (vecidx == XSC_EQ_VEC_ASYNC ? 1 : 0); + + err = xsc_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (err) + goto err_in; + + if (out.hdr.status) { + err = -ENOSPC; + goto err_in; + } + + snprintf(dev_res->irq_info[vecidx].name, XSC_MAX_IRQ_NAME, "%s@pci:%s", + name, pci_name(dev->pdev)); + + eq->eqn = be32_to_cpu(out.eqn); + eq->irqn = pci_irq_vector(dev->pdev, vecidx); + eq->dev = dev; + eq->doorbell = dev->regs.event_db; + eq->index = vecidx; + xsc_core_dbg(dev, "msix%d request vector%d eq%d irq%d\n", + vecidx, msix_vec_offset, eq->eqn, eq->irqn); + + err = request_irq(eq->irqn, xsc_msix_handler, 0, + dev_res->irq_info[vecidx].name, eq); + if (err) + goto err_eq; + + /* EQs are created in ARMED state + */ + eq_update_ci(eq, 1); + xsc_vfree(in); + return 0; + +err_eq: + xsc_cmd_destroy_eq(dev, eq->eqn); + +err_in: + xsc_vfree(in); + +err_buf: + xsc_buf_free(dev, &eq->buf); + return err; +} +EXPORT_SYMBOL_GPL(xsc_create_map_eq); + +int xsc_destroy_unmap_eq(struct xsc_core_device *dev, struct xsc_eq *eq) +{ + int err; + + if (!xsc_fw_is_available(dev)) + return 0; + + free_irq(eq->irqn, eq); + err = xsc_cmd_destroy_eq(dev, eq->eqn); + if (err) + xsc_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", + eq->eqn); + xsc_buf_free(dev, &eq->buf); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_destroy_unmap_eq); + +int xsc_eq_init(struct xsc_core_device *dev) +{ + int err; + + spin_lock_init(&dev->dev_res->eq_table.lock); + + err = xsc_eq_debugfs_init(dev); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_eq_init); + +void xsc_eq_cleanup(struct xsc_core_device *dev) +{ + xsc_eq_debugfs_cleanup(dev); +} +EXPORT_SYMBOL_GPL(xsc_eq_cleanup); + +int xsc_start_eqs(struct xsc_core_device *dev) +{ + struct xsc_eq_table *table = &dev->dev_res->eq_table; + int err; + + err = xsc_create_map_eq(dev, &table->async_eq, XSC_EQ_VEC_ASYNC, + XSC_NUM_ASYNC_EQE, "xsc_async_eq"); + if (err) + xsc_core_warn(dev, "failed to create async EQ %d\n", err); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_start_eqs); + +void xsc_stop_eqs(struct xsc_core_device *dev) +{ + struct xsc_eq_table *table = &dev->dev_res->eq_table; + + xsc_destroy_unmap_eq(dev, &table->async_eq); +} + +int xsc_core_eq_query(struct xsc_core_device *dev, struct xsc_eq *eq, + struct xsc_query_eq_mbox_out *out, int outlen) +{ + struct xsc_query_eq_mbox_in in; + int err = 0; + + memset(&in, 0, sizeof(in)); + memset(out, 0, outlen); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_EQ); + in.eqn = eq->eqn; + + if (out->hdr.status) + err = xsc_cmd_status_to_err(&out->hdr); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_core_eq_query); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.c b/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.c new file mode 100644 index 000000000000..005c8aa93d72 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.c @@ -0,0 +1,812 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include "common/vport.h" +#include "eswitch.h" +#include "common/xsc_lag.h" + +static int xsc_eswitch_check(const struct xsc_core_device *dev) +{ + if (!ESW_ALLOWED(dev->priv.eswitch)) + return -EPERM; + if (!dev->priv.eswitch->num_vfs) + return -EOPNOTSUPP; + + return 0; +} + +struct xsc_vport *__must_check +xsc_eswitch_get_vport(struct xsc_eswitch *esw, u16 vport_num) +{ + u16 idx; + + if (!esw || !xsc_core_is_vport_manager(esw->dev)) + return ERR_PTR(-EPERM); + + idx = xsc_eswitch_vport_num_to_index(esw, vport_num); + if (idx > esw->total_vports - 1) { + xsc_core_dbg(esw->dev, "vport out of range: num(0x%x), idx(0x%x)\n", + vport_num, idx); + return ERR_PTR(-EINVAL); + } + + return &esw->vports[idx]; +} +EXPORT_SYMBOL(xsc_eswitch_get_vport); + +static int eswitch_devlink_pf_support_check(const struct xsc_eswitch *esw) +{ + return 0; +} + +static int esw_mode_from_devlink(u16 mode, u16 *xsc_mode) +{ + switch (mode) { + case DEVLINK_ESWITCH_MODE_LEGACY: + *xsc_mode = XSC_ESWITCH_LEGACY; + break; + case DEVLINK_ESWITCH_MODE_SWITCHDEV: + *xsc_mode = XSC_ESWITCH_OFFLOADS; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int esw_mode_to_devlink(u16 xsc_mode, u16 *mode) +{ + switch (xsc_mode) { + case XSC_ESWITCH_LEGACY: + *mode = DEVLINK_ESWITCH_MODE_LEGACY; + break; + case XSC_ESWITCH_OFFLOADS: + *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; + break; + default: + return -EINVAL; + } + + return 0; +} + +int xsc_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, struct netlink_ext_ack *extack) +{ + struct xsc_core_device *dev = devlink_priv(devlink); + struct xsc_eswitch *esw = dev->priv.eswitch; + u16 cur_xsc_mode, xsc_mode = 0; + int err = 0; + + err = xsc_eswitch_check(dev); + if (err) + return err; + + if (esw_mode_from_devlink(mode, &xsc_mode)) + return -EINVAL; + + mutex_lock(&esw->mode_lock); + err = eswitch_devlink_pf_support_check(esw); + if (err) + goto done; + + cur_xsc_mode = esw->mode; + + if (cur_xsc_mode == xsc_mode) + goto done; + + if (xsc_host_is_dpu_mode(dev) || + (cur_xsc_mode != XSC_ESWITCH_LEGACY && xsc_mode == XSC_ESWITCH_OFFLOADS) || + (cur_xsc_mode == XSC_ESWITCH_OFFLOADS && xsc_mode == XSC_ESWITCH_LEGACY)) { + xsc_core_err(dev, "%s failed: do not set mode %d to mode %d\n", + __func__, cur_xsc_mode, xsc_mode); + mutex_unlock(&esw->mode_lock); + return -EOPNOTSUPP; + } + + xsc_lag_disable(dev); + + esw->mode = xsc_mode; + if (esw->mode == XSC_ESWITCH_OFFLOADS) + xsc_cmd_modify_hca(dev); + + xsc_lag_enable(dev); + +done: + mutex_unlock(&esw->mode_lock); + return err; +} + +int xsc_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) +{ + struct xsc_core_device *dev = devlink_priv(devlink); + struct xsc_eswitch *esw = dev->priv.eswitch; + int err = 0; + + err = xsc_eswitch_check(dev); + if (err) + return err; + + mutex_lock(&esw->mode_lock); + if (xsc_host_is_dpu_mode(dev)) + err = -EOPNOTSUPP; + else + err = esw_mode_to_devlink(esw->mode, mode); + mutex_unlock(&esw->mode_lock); + + return err; +} + +static void esw_vport_change_handle_locked(struct xsc_vport *vport) +{ + struct xsc_core_device *dev = vport->dev; + u8 mac[ETH_ALEN]; + + xsc_query_other_nic_vport_mac_address(dev, vport->vport, mac); +} + +static void esw_vport_change_handler(struct work_struct *work) +{ + struct xsc_vport *vport = + container_of(work, struct xsc_vport, vport_change_handler); + struct xsc_eswitch *esw = vport->dev->priv.eswitch; + + mutex_lock(&esw->state_lock); + esw_vport_change_handle_locked(vport); + mutex_unlock(&esw->state_lock); +} + +void xsc_eswitch_enable_vport(struct xsc_eswitch *esw, + struct xsc_vport *vport, + enum xsc_eswitch_vport_event enabled_events) +{ + mutex_lock(&esw->state_lock); + if (vport->enabled) + goto unlock_out; + + bitmap_zero(vport->req_vlan_bitmap, VLAN_N_VID); + bitmap_zero(vport->acl_vlan_8021q_bitmap, VLAN_N_VID); + bitmap_zero(vport->info.vlan_trunk_8021q_bitmap, VLAN_N_VID); + + /* Sync with current vport context */ + vport->enabled_events = enabled_events; + vport->enabled = true; + + esw->enabled_vports++; +unlock_out: + mutex_unlock(&esw->state_lock); +} + +void xsc_eswitch_disable_vport(struct xsc_eswitch *esw, + struct xsc_vport *vport) +{ + u16 vport_num = vport->vport; + + mutex_lock(&esw->state_lock); + if (!vport->enabled) + goto done; + + xsc_core_dbg(esw->dev, "Disabling vport(%d)\n", vport_num); + /* Mark this vport as disabled to discard new events */ + vport->enabled = false; + vport->enabled_events = 0; + esw->enabled_vports--; +done: + mutex_unlock(&esw->state_lock); +} + +void xsc_eswitch_enable_pf_vf_vports(struct xsc_eswitch *esw, + enum xsc_eswitch_vport_event enabled_events) +{ + struct xsc_vport *vport; + int i; + + vport = xsc_eswitch_get_vport(esw, XSC_VPORT_PF); + xsc_eswitch_enable_vport(esw, vport, enabled_events); + + xsc_esw_for_each_vf_vport(esw, i, vport, esw->num_vfs) + xsc_eswitch_enable_vport(esw, vport, enabled_events); +} + +#define XSC_LEGACY_SRIOV_VPORT_EVENTS (XSC_VPORT_UC_ADDR_CHANGE | \ + XSC_VPORT_MC_ADDR_CHANGE | \ + XSC_VPORT_PROMISC_CHANGE | \ + XSC_VPORT_VLAN_CHANGE) + +static int esw_legacy_enable(struct xsc_eswitch *esw) +{ + struct xsc_vport *vport; + unsigned long i; + + xsc_esw_for_each_vf_vport(esw, i, vport, esw->num_vfs) { + vport->info.link_state = XSC_VPORT_ADMIN_STATE_AUTO; + } + xsc_eswitch_enable_pf_vf_vports(esw, XSC_LEGACY_SRIOV_VPORT_EVENTS); + return 0; +} + +int xsc_eswitch_enable_locked(struct xsc_eswitch *esw, int mode, int num_vfs) +{ + int err; + + lockdep_assert_held(&esw->mode_lock); + + esw->num_vfs = num_vfs; + + if (esw->mode == XSC_ESWITCH_NONE) + err = esw_legacy_enable(esw); + else + err = -EOPNOTSUPP; + + if (err) + goto ret; + + esw->mode = mode; + + xsc_core_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n", + mode == XSC_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", + num_vfs, esw->enabled_vports); + + return 0; + +ret: + return err; +} + +int xsc_eswitch_enable(struct xsc_eswitch *esw, int mode, int num_vfs) +{ + int ret; + + mutex_lock(&esw->mode_lock); + ret = xsc_eswitch_enable_locked(esw, mode, num_vfs); + mutex_unlock(&esw->mode_lock); + return ret; +} + +void xsc_eswitch_disable_locked(struct xsc_eswitch *esw, bool clear_vf) +{ + int old_mode; + + lockdep_assert_held(&esw->mode_lock); + + if (esw->mode == XSC_ESWITCH_NONE) + return; + + xsc_core_info(esw->dev, "Disable: mode(%s)\n", + esw->mode == XSC_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS"); + + old_mode = esw->mode; + esw->mode = XSC_ESWITCH_NONE; + + esw->num_vfs = 0; +} + +void xsc_eswitch_disable(struct xsc_eswitch *esw, bool clear_vf) +{ + if (!ESW_ALLOWED(esw)) + return; + + mutex_lock(&esw->mode_lock); + xsc_eswitch_disable_locked(esw, clear_vf); + mutex_unlock(&esw->mode_lock); +} + +int xsc_eswitch_init(struct xsc_core_device *dev) +{ + struct xsc_eswitch *esw; + struct xsc_vport *vport; + int i, total_vports, err; + + if (!XSC_VPORT_MANAGER(dev)) { + if (xsc_core_is_pf(dev)) + xsc_core_err(dev, "%s XSC_VPORT_MANAGER check fail\n", __func__); + return 0; + } + + total_vports = xsc_eswitch_get_total_vports(dev); + + xsc_core_info(dev, "Total vports %d\n", total_vports); + + esw = kzalloc(sizeof(*esw), GFP_KERNEL); + if (!esw) + return -ENOMEM; + + esw->dev = dev; + esw->manager_vport = xsc_eswitch_manager_vport(dev); + esw->first_host_vport = xsc_eswitch_first_host_vport_num(dev); + esw->work_queue = create_singlethread_workqueue("xsc_esw_wq"); + if (!esw->work_queue) { + err = -ENOMEM; + goto abort; + } + esw->vports = kcalloc(total_vports, sizeof(struct xsc_vport), + GFP_KERNEL); + if (!esw->vports) { + err = -ENOMEM; + goto abort; + } + esw->total_vports = total_vports; + + mutex_init(&esw->state_lock); + mutex_init(&esw->mode_lock); + + xsc_esw_for_all_vports(esw, i, vport) { + vport->vport = xsc_eswitch_index_to_vport_num(esw, i); + vport->info.link_state = XSC_VPORT_ADMIN_STATE_AUTO; + vport->info.vlan_proto = htons(ETH_P_8021Q); + vport->info.roce = true; + + vport->dev = dev; + INIT_WORK(&vport->vport_change_handler, + esw_vport_change_handler); + } + esw->enabled_vports = 0; + esw->mode = XSC_ESWITCH_NONE; + + dev->priv.eswitch = esw; + return 0; + +abort: + if (esw->work_queue) + destroy_workqueue(esw->work_queue); + kfree(esw->vports); + kfree(esw); + return 0; +} + +void xsc_eswitch_cleanup(struct xsc_core_device *dev) +{ + if (!dev->priv.eswitch || !XSC_VPORT_MANAGER(dev)) + return; + + xsc_core_dbg(dev, "cleanup\n"); + + destroy_workqueue(dev->priv.eswitch->work_queue); + kfree(dev->priv.eswitch->vports); + kfree(dev->priv.eswitch); +} + +#ifdef XSC_ESW_GUID_ENABLE +static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN]) +{ + ((u8 *)node_guid)[7] = mac[0]; + ((u8 *)node_guid)[6] = mac[1]; + ((u8 *)node_guid)[5] = mac[2]; + ((u8 *)node_guid)[4] = 0xff; + ((u8 *)node_guid)[3] = 0xfe; + ((u8 *)node_guid)[2] = mac[3]; + ((u8 *)node_guid)[1] = mac[4]; + ((u8 *)node_guid)[0] = mac[5]; +} +#endif + +int xsc_eswitch_set_vport_mac(struct xsc_eswitch *esw, + u16 vport, u8 mac[ETH_ALEN]) +{ + struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport); + int err = 0; + +#ifdef XSC_ESW_GUID_ENABLE + u64 node_guid; +#endif + + if (IS_ERR(evport)) + return PTR_ERR(evport); + + if (is_multicast_ether_addr(mac)) + return -EINVAL; + + mutex_lock(&esw->state_lock); + + if (evport->info.spoofchk && !is_valid_ether_addr(mac)) + xsc_core_warn(esw->dev, + "Set invalid MAC while spoofchk is on, vport(%d)\n", + vport); + + err = xsc_modify_other_nic_vport_mac_address(esw->dev, vport, mac, false); + if (err) { + xsc_core_err(esw->dev, + "Failed to xsc_modify_nic_vport_mac vport(%d) err=(%d)\n", + vport, err); + goto unlock; + } + + ether_addr_copy(evport->info.mac, mac); + +#ifdef XSC_ESW_GUID_ENABLE + node_guid_gen_from_mac(&node_guid, mac); + err = xsc_modify_other_nic_vport_node_guid(esw->dev, vport, node_guid); + if (err) + xsc_core_err(esw->dev, + "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n", + vport, err); + evport->info.node_guid = node_guid; +#endif + +#ifdef XSC_ESW_FDB_ENABLE + if (evport->enabled && esw->mode == XSC_ESWITCH_LEGACY) + err = esw_vport_ingress_config(esw, evport); +#endif + +unlock: + mutex_unlock(&esw->state_lock); + return err; +} +EXPORT_SYMBOL(xsc_eswitch_set_vport_mac); + +int xsc_eswitch_get_vport_mac(struct xsc_eswitch *esw, + u16 vport, u8 *mac) +{ + struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport); + + if (IS_ERR(evport)) + return PTR_ERR(evport); + + mutex_lock(&esw->state_lock); + ether_addr_copy(mac, evport->info.mac); + mutex_unlock(&esw->state_lock); + return 0; +} + +int __xsc_eswitch_set_vport_vlan(struct xsc_eswitch *esw, int vport, u16 vlan, + u8 qos, __be16 proto, u8 set_flags) +{ + struct xsc_modify_nic_vport_context_in *in; + int err, in_sz; + + in_sz = sizeof(struct xsc_modify_nic_vport_context_in) + 2; + + in = kzalloc(in_sz, GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->field_select.addresses_list = 1; + if ((set_flags & SET_VLAN_STRIP) || (set_flags & SET_VLAN_INSERT)) + in->nic_vport_ctx.vlan_allowed = 1; + else + in->nic_vport_ctx.vlan_allowed = 0; + in->vport_number = cpu_to_be16(vport); + in->other_vport = 1; + in->nic_vport_ctx.allowed_list_type = XSC_NVPRT_LIST_TYPE_VLAN_OFFLOAD; + in->nic_vport_ctx.vlan_proto = cpu_to_be16(ntohs(proto)); + in->nic_vport_ctx.qos = qos; + in->nic_vport_ctx.vlan = cpu_to_be16(vlan); + + err = xsc_modify_nic_vport_context(esw->dev, in, in_sz); + + kfree(in); + return err; +} + +int xsc_eswitch_set_vport_vlan(struct xsc_eswitch *esw, int vport, + u16 vlan, u8 qos, __be16 vlan_proto) +{ + u8 set_flags = 0; + int err = 0; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + + if (vlan || qos) + set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT; + else + set_flags = CLR_VLAN_STRIP | CLR_VLAN_INSERT; + + mutex_lock(&esw->state_lock); + if (esw->mode != XSC_ESWITCH_LEGACY) { + if (!vlan) + goto unlock; /* compatibility with libvirt */ + + err = -EOPNOTSUPP; + goto unlock; + } + + err = __xsc_eswitch_set_vport_vlan(esw, vport, vlan, qos, vlan_proto, set_flags); + +unlock: + mutex_unlock(&esw->state_lock); + return err; +} +EXPORT_SYMBOL_GPL(xsc_eswitch_set_vport_vlan); + +static int xsc_vport_link2ifla(u8 esw_link) +{ + switch (esw_link) { + case XSC_VPORT_ADMIN_STATE_DOWN: + return IFLA_VF_LINK_STATE_DISABLE; + case XSC_VPORT_ADMIN_STATE_UP: + return IFLA_VF_LINK_STATE_ENABLE; + } + return IFLA_VF_LINK_STATE_AUTO; +} + +static int xsc_ifla_link2vport(u8 ifla_link) +{ + switch (ifla_link) { + case IFLA_VF_LINK_STATE_DISABLE: + return XSC_VPORT_ADMIN_STATE_DOWN; + case IFLA_VF_LINK_STATE_ENABLE: + return XSC_VPORT_ADMIN_STATE_UP; + } + return XSC_VPORT_ADMIN_STATE_AUTO; +} + +int xsc_eswitch_set_vport_state(struct xsc_eswitch *esw, + u16 vport, int link_state) +{ + u8 xsc_link = xsc_ifla_link2vport((u8)link_state); + struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport); + int err = 0; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (IS_ERR(evport)) + return PTR_ERR(evport); + + mutex_lock(&esw->state_lock); + err = xsc_modify_vport_admin_state(esw->dev, XSC_CMD_OP_MODIFY_VPORT_STATE, + vport, 1, xsc_link); + if (err) { + xsc_core_warn(esw->dev, + "Failed to set vport %d link state %d, err = %d", + vport, xsc_link, err); + goto unlock; + } + + evport->info.link_state = xsc_link; + +unlock: + mutex_unlock(&esw->state_lock); + return err; +} +EXPORT_SYMBOL(xsc_eswitch_set_vport_state); + +int xsc_eswitch_set_vport_spoofchk(struct xsc_eswitch *esw, + u16 vport, u8 spoofchk) +{ + struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport); + bool pschk; + int err = 0; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (IS_ERR(evport)) + return PTR_ERR(evport); + + mutex_lock(&esw->state_lock); + if (esw->mode != XSC_ESWITCH_LEGACY) { + err = -EOPNOTSUPP; + goto unlock; + } + + pschk = evport->info.spoofchk; + evport->info.spoofchk = spoofchk; + if (spoofchk && !is_valid_ether_addr(evport->info.mac)) + xsc_core_warn(esw->dev, "Spoofchk in set while MAC is invalid, vport(%d)\n", + evport->vport); + + if (pschk != spoofchk) { + err = xsc_modify_nic_vport_spoofchk(esw->dev, vport, spoofchk); + if (err) + evport->info.spoofchk = pschk; + } + +unlock: + mutex_unlock(&esw->state_lock); + return err; +} +EXPORT_SYMBOL(xsc_eswitch_set_vport_spoofchk); + +static int xsc_eswitch_update_vport_trunk(struct xsc_eswitch *esw, + struct xsc_vport *evport, + unsigned long *old_trunk) +{ + DECLARE_BITMAP(diff_vlan_bm, VLAN_N_VID); + int err = 0; + + bitmap_xor(diff_vlan_bm, old_trunk, + evport->info.vlan_trunk_8021q_bitmap, VLAN_N_VID); + if (!bitmap_weight(diff_vlan_bm, VLAN_N_VID)) + return err; + + if (err) + bitmap_copy(evport->info.vlan_trunk_8021q_bitmap, old_trunk, VLAN_N_VID); + + return err; +} + +int xsc_eswitch_add_vport_trunk_range(struct xsc_eswitch *esw, + int vport, u16 start_vlan, u16 end_vlan) +{ + DECLARE_BITMAP(prev_vport_bitmap, VLAN_N_VID); + struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport); + int err = 0; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (IS_ERR(evport)) + return PTR_ERR(evport); + + if (end_vlan > VLAN_N_VID || start_vlan > end_vlan) + return -EINVAL; + + mutex_lock(&esw->state_lock); + + if (evport->info.vlan || evport->info.qos) { + err = -EPERM; + xsc_core_warn(esw->dev, + "VGT+ is not allowed when operating in VST mode vport(%d)\n", + vport); + goto unlock; + } + + bitmap_copy(prev_vport_bitmap, evport->info.vlan_trunk_8021q_bitmap, + VLAN_N_VID); + bitmap_set(evport->info.vlan_trunk_8021q_bitmap, start_vlan, + end_vlan - start_vlan + 1); + err = xsc_eswitch_update_vport_trunk(esw, evport, prev_vport_bitmap); + +unlock: + mutex_unlock(&esw->state_lock); + + return err; +} + +int xsc_eswitch_del_vport_trunk_range(struct xsc_eswitch *esw, + int vport, u16 start_vlan, u16 end_vlan) +{ + DECLARE_BITMAP(prev_vport_bitmap, VLAN_N_VID); + struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport); + int err = 0; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (IS_ERR(evport)) + return PTR_ERR(evport); + + if (end_vlan > VLAN_N_VID || start_vlan > end_vlan) + return -EINVAL; + + mutex_lock(&esw->state_lock); + bitmap_copy(prev_vport_bitmap, evport->info.vlan_trunk_8021q_bitmap, + VLAN_N_VID); + bitmap_clear(evport->info.vlan_trunk_8021q_bitmap, start_vlan, + end_vlan - start_vlan + 1); + err = xsc_eswitch_update_vport_trunk(esw, evport, prev_vport_bitmap); + mutex_unlock(&esw->state_lock); + + return err; +} + +int xsc_eswitch_set_vport_trust(struct xsc_eswitch *esw, + u16 vport_num, bool setting) +{ + struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport_num); + int err = 0; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (IS_ERR(evport)) + return PTR_ERR(evport); + + mutex_lock(&esw->state_lock); + if (esw->mode != XSC_ESWITCH_LEGACY) { + err = -EOPNOTSUPP; + goto unlock; + } + if (setting != evport->info.trusted) { + err = xsc_modify_nic_vport_trust(esw->dev, vport_num, setting); + if (err) + goto unlock; + + evport->info.trusted = setting; + } + +unlock: + mutex_unlock(&esw->state_lock); + return err; +} +EXPORT_SYMBOL(xsc_eswitch_set_vport_trust); + +int xsc_eswitch_set_vport_rate(struct xsc_eswitch *esw, u16 vport, + u32 max_rate, u32 min_rate) +{ + struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport); + int err = 0; + + if (IS_ERR(evport)) + return PTR_ERR(evport); + + mutex_lock(&esw->state_lock); + err = xsc_modify_vport_max_rate(evport->dev, vport, max_rate); + if (!err) { + evport->info.max_rate = max_rate; + evport->info.min_rate = min_rate; + } + mutex_unlock(&esw->state_lock); + + return err; +} +EXPORT_SYMBOL(xsc_eswitch_set_vport_rate); + +int xsc_eswitch_get_vport_config(struct xsc_eswitch *esw, + u16 vport, struct ifla_vf_info *ivi) +{ + struct xsc_vport *evport = xsc_eswitch_get_vport(esw, vport); + + if (IS_ERR(evport)) + return PTR_ERR(evport); + + memset(ivi, 0, sizeof(*ivi)); + ivi->vf = vport - 1; + + mutex_lock(&esw->state_lock); + ether_addr_copy(ivi->mac, evport->info.mac); + + ivi->linkstate = xsc_vport_link2ifla(evport->info.link_state); + ivi->spoofchk = evport->info.spoofchk; + ivi->trusted = evport->info.trusted; + ivi->min_tx_rate = evport->info.min_rate; + ivi->max_tx_rate = evport->info.max_rate; + ivi->vlan = evport->vlan_id; + ivi->vlan_proto = evport->vlan_proto; + + mutex_unlock(&esw->state_lock); + + return 0; +} +EXPORT_SYMBOL(xsc_eswitch_get_vport_config); + +int xsc_eswitch_vport_update_group(struct xsc_eswitch *esw, int vport_num, + u32 group_id) +{ + return 0; +} + +int xsc_eswitch_set_vgroup_rate(struct xsc_eswitch *esw, int group_id, + u32 max_rate) +{ + return 0; +} + +int xsc_eswitch_set_vgroup_max_rate(struct xsc_eswitch *esw, int group_id, + u32 max_rate) +{ + return 0; +} + +int xsc_eswitch_set_vgroup_min_rate(struct xsc_eswitch *esw, int group_id, + u32 min_rate) +{ + return 0; +} + +int xsc_eswitch_modify_esw_vport_context(struct xsc_eswitch *esw, u16 vport, + bool other_vport, void *in, int inlen) +{ + return 0; +} + +int xsc_eswitch_query_esw_vport_context(struct xsc_eswitch *esw, u16 vport, + bool other_vport, void *out, int outlen) +{ + return 0; +} + +int xsc_eswitch_get_vport_stats(struct xsc_eswitch *esw, + u16 vport, struct ifla_vf_stats *vf_stats) +{ + return 0; +} + +int xsc_eswitch_query_vport_drop_stats(struct xsc_core_device *dev, + struct xsc_vport *vport, + struct xsc_vport_drop_stats *stats) +{ + return 0; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.h b/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.h new file mode 100644 index 000000000000..711e698cc0cc --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/eswitch.h @@ -0,0 +1,170 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef ESWITCH_H +#define ESWITCH_H + +#include +#include +#include +#include +#include +#include +#include "common/xsc_core.h" +#include "common/vport.h" + +struct xsc_vport_drop_stats { + u64 rx_dropped; + u64 tx_dropped; +}; + +int xsc_eswitch_init(struct xsc_core_device *dev); +void xsc_eswitch_cleanup(struct xsc_core_device *dev); +int xsc_eswitch_enable_locked(struct xsc_eswitch *esw, int mode, int num_vfs); +int xsc_eswitch_enable(struct xsc_eswitch *esw, int mode, int num_vfs); +void xsc_eswitch_disable_locked(struct xsc_eswitch *esw, bool clear_vf); +void xsc_eswitch_disable(struct xsc_eswitch *esw, bool clear_vf); + +int xsc_devlink_eswitch_mode_set(struct devlink *devlink, u16 mod, struct netlink_ext_ack *extack); +int xsc_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode); + +struct xsc_vport *__must_check +xsc_eswitch_get_vport(struct xsc_eswitch *esw, u16 vport_num); +int xsc_eswitch_get_vport_config(struct xsc_eswitch *esw, + u16 vport, struct ifla_vf_info *ivi); +int xsc_eswitch_set_vport_mac(struct xsc_eswitch *esw, + u16 vport, u8 mac[ETH_ALEN]); +int xsc_eswitch_get_vport_mac(struct xsc_eswitch *esw, + u16 vport, u8 *mac); +int xsc_eswitch_set_vport_vlan(struct xsc_eswitch *esw, int vport, + u16 vlan, u8 qos, __be16 vlan_proto); +int xsc_eswitch_set_vport_state(struct xsc_eswitch *esw, + u16 vport, int link_state); +int xsc_eswitch_set_vport_spoofchk(struct xsc_eswitch *esw, + u16 vport, u8 spoofchk); +int xsc_eswitch_set_vport_trust(struct xsc_eswitch *esw, + u16 vport_num, bool setting); +int xsc_eswitch_set_vport_rate(struct xsc_eswitch *esw, u16 vport, + u32 max_rate, u32 min_rate); +int xsc_eswitch_vport_update_group(struct xsc_eswitch *esw, int vport_num, + u32 group_id); +int xsc_eswitch_set_vgroup_rate(struct xsc_eswitch *esw, int group_id, + u32 max_rate); +int xsc_eswitch_set_vgroup_max_rate(struct xsc_eswitch *esw, int group_id, + u32 max_rate); +int xsc_eswitch_set_vgroup_min_rate(struct xsc_eswitch *esw, int group_id, + u32 min_rate); +int xsc_eswitch_add_vport_trunk_range(struct xsc_eswitch *esw, + int vport, u16 start_vlan, u16 end_vlan); +int xsc_eswitch_del_vport_trunk_range(struct xsc_eswitch *esw, + int vport, u16 start_vlan, u16 end_vlan); +int xsc_eswitch_modify_esw_vport_context(struct xsc_eswitch *esw, u16 vport, + bool other_vport, + void *in, int inlen); +int xsc_eswitch_query_esw_vport_context(struct xsc_eswitch *esw, u16 vport, + bool other_vport, + void *out, int outlen); +int xsc_eswitch_get_vport_stats(struct xsc_eswitch *esw, + u16 vport, + struct ifla_vf_stats *vf_stats); +int xsc_eswitch_query_vport_drop_stats(struct xsc_core_device *dev, + struct xsc_vport *vport, + struct xsc_vport_drop_stats *stats); +int xsc_eswitch_set_vport_rate(struct xsc_eswitch *esw, u16 vport, + u32 max_rate, u32 min_rate); + +#define xsc_esw_for_all_vports(esw, i, vport) \ + for ((i) = XSC_VPORT_PF; \ + (vport) = &(esw)->vports[(i)], \ + (i) < (esw)->total_vports; (i)++) + +#define xsc_esw_for_each_vf_vport(esw, i, vport, nvfs) \ + for ((i) = XSC_VPORT_FIRST_VF; \ + (vport) = &(esw)->vports[(i)], \ + (i) <= (nvfs); (i)++) + +static inline int xsc_eswitch_uplink_idx(struct xsc_eswitch *esw) +{ + /* Uplink always locate at the last element of the array.*/ + return esw->total_vports - 1; +} + +static inline int xsc_eswitch_ecpf_idx(struct xsc_eswitch *esw) +{ + return esw->total_vports - 2; +} + +static inline int xsc_eswitch_vport_num_to_index(struct xsc_eswitch *esw, + u16 vport_num) +{ + if (vport_num == XSC_VPORT_ECPF) { + if (!xsc_ecpf_vport_exists(esw->dev) && + !xsc_core_is_ecpf_esw_manager(esw->dev)) + xsc_core_warn(esw->dev, "ECPF vport doesn't exist!\n"); + return xsc_eswitch_ecpf_idx(esw); + } + + if (vport_num == XSC_VPORT_UPLINK) + return xsc_eswitch_uplink_idx(esw); + + /* PF and VF vports start from 0 to max_vfs */ + return vport_num; +} + +static inline u16 xsc_eswitch_index_to_vport_num(struct xsc_eswitch *esw, + int index) +{ + if (index == xsc_eswitch_uplink_idx(esw)) + return XSC_VPORT_UPLINK; + return index; +} + +static inline u16 xsc_eswitch_manager_vport(struct xsc_core_device *dev) +{ + return xsc_core_is_ecpf_esw_manager(dev) ? + XSC_VPORT_ECPF : XSC_VPORT_PF; +} + +static inline u16 xsc_eswitch_first_host_vport_num(struct xsc_core_device *dev) +{ + return xsc_core_is_ecpf_esw_manager(dev) ? + XSC_VPORT_PF : XSC_VPORT_FIRST_VF; +} + +static inline u8 xsc_get_eswitch_mode(struct xsc_core_device *dev) +{ + struct xsc_eswitch *esw = dev->priv.eswitch; + + return ESW_ALLOWED(esw) ? esw->mode : XSC_ESWITCH_NONE; +} + +static inline bool xsc_host_is_dpu_mode(struct xsc_core_device *dev) +{ + return (dev->pdev->device == XSC_MF_HOST_PF_DEV_ID || + dev->pdev->device == XSC_MV_HOST_PF_DEV_ID); +} + +static inline bool xsc_pf_vf_is_dpu_mode(struct xsc_core_device *dev) +{ + return (dev->pdev->device == XSC_MF_HOST_PF_DEV_ID || + dev->pdev->device == XSC_MF_HOST_VF_DEV_ID || + dev->pdev->device == XSC_MV_HOST_PF_DEV_ID || + dev->pdev->device == XSC_MV_HOST_VF_DEV_ID); +} + +static inline bool xsc_get_pp_bypass_res(struct xsc_core_device *dev, bool esw_set) +{ + return esw_set || xsc_pf_vf_is_dpu_mode(dev); +} + +static inline bool xsc_get_pct_drop_config(struct xsc_core_device *dev) +{ + return (dev->pdev->device == XSC_MC_PF_DEV_ID) || + (dev->pdev->device == XSC_MF_SOC_PF_DEV_ID) || + (dev->pdev->device == XSC_MS_PF_DEV_ID) || + (dev->pdev->device == XSC_MV_SOC_PF_DEV_ID); +} + +#endif /* ESWITCH_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw.c b/drivers/net/ethernet/yunsilicon/xsc/pci/fw.c new file mode 100644 index 000000000000..91827fd56b00 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw.c @@ -0,0 +1,316 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/driver.h" +#include +#include "eswitch.h" + +static struct xsc_board_info *board_info[MAX_BOARD_NUM]; + +static struct xsc_board_info *xsc_get_board_info(char *board_sn) +{ + int i; + + for (i = 0; i < MAX_BOARD_NUM; i++) { + if (!board_info[i]) + continue; + if (!strncmp(board_info[i]->board_sn, board_sn, XSC_BOARD_SN_LEN)) + return board_info[i]; + } + return NULL; +} + +static struct xsc_board_info *xsc_alloc_board_info(void) +{ + int i; + + for (i = 0; i < MAX_BOARD_NUM; i++) { + if (!board_info[i]) + break; + } + if (i == MAX_BOARD_NUM) + return NULL; + board_info[i] = vmalloc(sizeof(*board_info[i])); + if (!board_info[i]) + return NULL; + memset(board_info[i], 0, sizeof(*board_info[i])); + board_info[i]->board_id = i; + return board_info[i]; +} + +void xsc_free_board_info(void) +{ + int i; + + for (i = 0; i < MAX_BOARD_NUM; i++) + vfree(board_info[i]); +} + +int xsc_cmd_query_hca_cap(struct xsc_core_device *dev, + struct xsc_caps *caps) +{ + struct xsc_cmd_query_hca_cap_mbox_out *out; + struct xsc_cmd_query_hca_cap_mbox_in in; + int err; + u16 t16; + struct xsc_board_info *board_info = NULL; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return -ENOMEM; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_HCA_CAP); + in.cpu_num = cpu_to_be16(num_online_cpus()); + + err = xsc_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + if (err) + goto out_out; + + if (out->hdr.status) { + err = xsc_cmd_status_to_err(&out->hdr); + goto out_out; + } + + dev->glb_func_id = be32_to_cpu(out->hca_cap.glb_func_id); + caps->pf0_vf_funcid_base = be16_to_cpu(out->hca_cap.pf0_vf_funcid_base); + caps->pf0_vf_funcid_top = be16_to_cpu(out->hca_cap.pf0_vf_funcid_top); + caps->pf1_vf_funcid_base = be16_to_cpu(out->hca_cap.pf1_vf_funcid_base); + caps->pf1_vf_funcid_top = be16_to_cpu(out->hca_cap.pf1_vf_funcid_top); + caps->pcie0_pf_funcid_base = be16_to_cpu(out->hca_cap.pcie0_pf_funcid_base); + caps->pcie0_pf_funcid_top = be16_to_cpu(out->hca_cap.pcie0_pf_funcid_top); + caps->pcie1_pf_funcid_base = be16_to_cpu(out->hca_cap.pcie1_pf_funcid_base); + caps->pcie1_pf_funcid_top = be16_to_cpu(out->hca_cap.pcie1_pf_funcid_top); + caps->funcid_to_logic_port = be16_to_cpu(out->hca_cap.funcid_to_logic_port); + if (xsc_core_is_pf(dev)) { + xsc_core_dbg(dev, "pf0_vf_range(%4u, %4u), pf1_vf_range(%4u, %4u)\n", + caps->pf0_vf_funcid_base, caps->pf0_vf_funcid_top, + caps->pf1_vf_funcid_base, caps->pf1_vf_funcid_top); + xsc_core_dbg(dev, "pcie0_pf_range=(%4u, %4u), pcie1_pf_range=(%4u, %4u)\n", + caps->pcie0_pf_funcid_base, caps->pcie0_pf_funcid_top, + caps->pcie1_pf_funcid_base, caps->pcie1_pf_funcid_top); + } + caps->pcie_host = out->hca_cap.pcie_host; + caps->nif_port_num = out->hca_cap.nif_port_num; + caps->hw_feature_flag = be32_to_cpu(out->hca_cap.hw_feature_flag); + + caps->raweth_qp_id_base = be16_to_cpu(out->hca_cap.raweth_qp_id_base); + caps->raweth_qp_id_end = be16_to_cpu(out->hca_cap.raweth_qp_id_end); + caps->raweth_rss_qp_id_base = be16_to_cpu(out->hca_cap.raweth_rss_qp_id_base); + caps->raw_tpe_qp_num = be16_to_cpu(out->hca_cap.raw_tpe_qp_num); + caps->max_cqes = 1 << out->hca_cap.log_max_cq_sz; + caps->max_wqes = 1 << out->hca_cap.log_max_qp_sz; + caps->max_sq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_sq); + caps->max_rq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_rq); + caps->flags = be64_to_cpu(out->hca_cap.flags); + caps->stat_rate_support = be16_to_cpu(out->hca_cap.stat_rate_support); + caps->log_max_msg = out->hca_cap.log_max_msg & 0x1f; + caps->num_ports = out->hca_cap.num_ports & 0xf; + caps->log_max_cq = out->hca_cap.log_max_cq & 0x1f; + caps->log_max_eq = out->hca_cap.log_max_eq & 0xf; + caps->log_max_msix = out->hca_cap.log_max_msix & 0xf; + caps->mac_port = out->hca_cap.mac_port & 0xff; + dev->mac_port = caps->mac_port; + if (caps->num_ports > XSC_MAX_FW_PORTS) { + xsc_core_err(dev, "device has %d ports while the driver supports max %d ports\n", + caps->num_ports, XSC_MAX_FW_PORTS); + err = -EINVAL; + goto out_out; + } + caps->send_ds_num = out->hca_cap.send_seg_num; + caps->send_wqe_shift = out->hca_cap.send_wqe_shift; + caps->recv_ds_num = out->hca_cap.recv_seg_num; + caps->recv_wqe_shift = out->hca_cap.recv_wqe_shift; + + caps->embedded_cpu = 0; + caps->ecpf_vport_exists = 0; + caps->eswitch_manager = 1; + caps->vport_group_manager = 1; + caps->log_max_current_uc_list = 0; + caps->log_max_current_mc_list = 0; + caps->log_max_vlan_list = 8; + caps->log_max_qp = out->hca_cap.log_max_qp & 0x1f; + caps->log_max_mkey = out->hca_cap.log_max_mkey & 0x3f; + caps->log_max_pd = out->hca_cap.log_max_pd & 0x1f; + caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f; + caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f; + caps->log_max_mcg = out->hca_cap.log_max_mcg; + caps->log_max_mtt = out->hca_cap.log_max_mtt; + caps->log_max_tso = out->hca_cap.log_max_tso; + caps->hca_core_clock = be32_to_cpu(out->hca_cap.hca_core_clock); + caps->max_rwq_indirection_tables = + be32_to_cpu(out->hca_cap.max_rwq_indirection_tables); + caps->max_rwq_indirection_table_size = + be32_to_cpu(out->hca_cap.max_rwq_indirection_table_size); + caps->max_qp_mcg = be16_to_cpu(out->hca_cap.max_qp_mcg); + caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f); + caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f); + caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz; + caps->rx_pkt_len_max = be32_to_cpu(out->hca_cap.rx_pkt_len_max); + caps->max_vfs = be16_to_cpu(out->hca_cap.max_vfs); + caps->qp_rate_limit_min = be32_to_cpu(out->hca_cap.qp_rate_limit_min); + caps->qp_rate_limit_max = be32_to_cpu(out->hca_cap.qp_rate_limit_max); + +#ifdef MSIX_SUPPORT + caps->msix_enable = 1; +#else + caps->msix_enable = 0; +#endif + + caps->msix_base = be16_to_cpu(out->hca_cap.msix_base); + caps->msix_num = be16_to_cpu(out->hca_cap.msix_num); + + t16 = be16_to_cpu(out->hca_cap.bf_log_bf_reg_size); + if (t16 & 0x8000) { + caps->bf_reg_size = 1 << (t16 & 0x1f); + caps->bf_regs_per_page = XSC_BF_REGS_PER_PAGE; + } else { + caps->bf_reg_size = 0; + caps->bf_regs_per_page = 0; + } + caps->min_page_sz = ~(u32)((1 << PAGE_SHIFT) - 1); + + caps->dcbx = 1; + caps->qos = 1; + caps->ets = 1; + caps->dscp = 1; + caps->max_tc = out->hca_cap.max_tc; + caps->log_max_qp_depth = out->hca_cap.log_max_qp_depth & 0xff; + caps->mac_bit = out->hca_cap.mac_bit; + caps->lag_logic_port_ofst = out->hca_cap.lag_logic_port_ofst; + + dev->chip_ver_h = be32_to_cpu(out->hca_cap.chip_ver_h); + dev->chip_ver_m = be32_to_cpu(out->hca_cap.chip_ver_m); + dev->chip_ver_l = be32_to_cpu(out->hca_cap.chip_ver_l); + dev->hotfix_num = be32_to_cpu(out->hca_cap.hotfix_num); + dev->feature_flag = be32_to_cpu(out->hca_cap.feature_flag); + + board_info = xsc_get_board_info(out->hca_cap.board_sn); + if (!board_info) { + board_info = xsc_alloc_board_info(); + if (!board_info) + return -ENOMEM; + + memcpy(board_info->board_sn, out->hca_cap.board_sn, sizeof(out->hca_cap.board_sn)); + } + dev->board_info = board_info; + + if (xsc_core_is_pf(dev)) { + dev->regs.tx_db = be64_to_cpu(out->hca_cap.tx_db); + dev->regs.rx_db = be64_to_cpu(out->hca_cap.rx_db); + dev->regs.complete_db = be64_to_cpu(out->hca_cap.complete_db); + dev->regs.complete_reg = be64_to_cpu(out->hca_cap.complete_reg); + dev->regs.event_db = be64_to_cpu(out->hca_cap.event_db); + } + + dev->fw_version_major = out->hca_cap.fw_ver.fw_version_major; + dev->fw_version_minor = out->hca_cap.fw_ver.fw_version_minor; + dev->fw_version_patch = be16_to_cpu(out->hca_cap.fw_ver.fw_version_patch); + dev->fw_version_tweak = be32_to_cpu(out->hca_cap.fw_ver.fw_version_tweak); + dev->fw_version_extra_flag = out->hca_cap.fw_ver.fw_version_extra_flag; + dev->reg_mr_via_cmdq = out->hca_cap.reg_mr_via_cmdq; +out_out: + kfree(out); + + return err; +} + +int xsc_cmd_enable_hca(struct xsc_core_device *dev, u16 vf_num, u16 max_msix) +{ + struct xsc_cmd_enable_hca_mbox_in in; + struct xsc_cmd_enable_hca_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ENABLE_HCA); + + in.vf_num = cpu_to_be16(vf_num); + in.max_msix_vec = cpu_to_be16(max_msix); + in.cpu_num = cpu_to_be16(num_online_cpus()); + in.pp_bypass = xsc_get_pp_bypass_res(dev, false); + in.esw_mode = XSC_ESWITCH_LEGACY; + + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(dev, + "cpu's msix vec(%u) not enough for all %u vfs, err=%d, status=%d\n", + max_msix, vf_num, err, out.hdr.status); + return -EINVAL; + } + + return err; +} + +int xsc_cmd_disable_hca(struct xsc_core_device *dev, u16 vf_num) +{ + struct xsc_cmd_disable_hca_mbox_in in; + struct xsc_cmd_disable_hca_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DISABLE_HCA); + in.vf_num = cpu_to_be16(vf_num); + in.pp_bypass = xsc_get_pp_bypass_res(dev, false); + in.esw_mode = XSC_ESWITCH_NONE; + + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(dev, "failed to disable hca, err=%d, status=%d\n", + err, out.hdr.status); + return -EINVAL; + } + + return err; +} + +int xsc_cmd_modify_hca(struct xsc_core_device *dev) +{ + struct xsc_cmd_modify_hca_mbox_in in; + struct xsc_cmd_modify_hca_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_HCA); + in.pp_bypass = xsc_get_pp_bypass_res(dev, true); + in.esw_mode = xsc_get_eswitch_mode(dev); + + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + err = xsc_cmd_status_to_err(&out.hdr); + + return err; +} + +static int xsc_cmd_query_guid(struct xsc_core_device *dev) +{ + struct xsc_cmd_query_guid_mbox_in in; + struct xsc_cmd_query_guid_mbox_out out; + int err; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_GUID); + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return xsc_cmd_status_to_err(&out.hdr); + dev->board_info->guid = out.guid; + dev->board_info->guid_valid = 1; + return 0; +} + +int xsc_query_guid(struct xsc_core_device *dev) +{ + if (dev->board_info->guid_valid) + return 0; + + return xsc_cmd_query_guid(dev); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/bitops.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/bitops.h new file mode 100644 index 000000000000..94d843801030 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/bitops.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef BITOPS_H +#define BITOPS_H + +#include +#include + +#define __round_mask(x, y) ((__typeof__(x))((y) - 1)) +#define round_up(x, y) ((((x) - 1) | __round_mask(x, y)) + 1) +#define round_down(x, y) ((x) & ~__round_mask(x, y)) + +unsigned long find_next_bit(const unsigned long *addr, unsigned long size, + unsigned long offset); + +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) + +#define clear_bit(bit, bitmap) __clear_bit(bit, bitmap) + +static inline void xsc_clear_bit(int bit, long *bitmap) +{ + clear_bit(bit, bitmap); +} + +static inline int xsc_test_bit(int bit, long *bitmap) +{ + return test_bit(bit, bitmap); +} + +static inline int xsc_test_and_set_bit(int bit, long *bitmap) +{ + return test_and_set_bit(bit, bitmap); +} + +static inline void xsc_set_bit(int bit, long *bitmap) +{ + set_bit(bit, bitmap); +} + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/cmd.c b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/cmd.c new file mode 100644 index 000000000000..ca5e889050b3 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/cmd.c @@ -0,0 +1,277 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/xsc_hsi.h" +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_cmd.h" + +#include "xsc_reg_struct.h" +#include "xsc_fw.h" +#include "xsc_flow.h" + +#include + +static inline void xsc_iae_lock(struct xsc_core_device *dev, int grp) +{ + spin_lock_bh(&get_xsc_res(dev)->iae_lock[grp]); +} + +static inline void xsc_iae_unlock(struct xsc_core_device *dev, int grp) +{ + spin_unlock_bh(&get_xsc_res(dev)->iae_lock[grp]); +} + +static inline int xsc_iae_idx_get(struct xsc_core_device *dev, int grp) +{ + return get_xsc_res(dev)->iae_idx[grp]; +} + +static inline int xsc_iae_grp_get(struct xsc_core_device *dev) +{ + struct xsc_resources *xres = get_xsc_res(dev); + + return atomic_inc_return(&xres->iae_grp) & XSC_RES_IAE_GRP_MASK; +} + +static int xsc_cmd_exec_create_mkey(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_create_mkey_mbox_out *resp = out; + u32 mpt_idx = 0; + + if (alloc_mpt_entry(xdev, &mpt_idx)) + return -EINVAL; + + resp->mkey = cpu_to_be32(mpt_idx & 0xffffff); + resp->hdr.status = 0; + + return 0; +} + +int xsc_create_mkey(struct xsc_core_device *xdev, void *in, void *out) +{ + unsigned long flags; + struct xsc_resources *xres = get_xsc_res(xdev); + int ret = 0; + + xsc_acquire_lock(&xres->lock, &flags); + ret = xsc_cmd_exec_create_mkey(xdev, in, out); + xsc_release_lock(&xres->lock, flags); + return ret; +} + +static int xsc_cmd_exec_destroy_mkey(struct xsc_core_device *xdev, void *in, void *out) +{ + struct xsc_destroy_mkey_mbox_in *req = in; + struct xsc_destroy_mkey_mbox_out *resp = out; + u32 mkey = be32_to_cpu(req->mkey); + u32 mpt_idx = xsc_mkey_to_idx(mkey); + + dealloc_mpt_entry(xdev, &mpt_idx); + + resp->hdr.status = 0; + + return 0; +} + +int xsc_destroy_mkey(struct xsc_core_device *xdev, void *in, void *out) +{ + unsigned long flags; + struct xsc_resources *xres = get_xsc_res(xdev); + int ret = 0; + + xsc_acquire_lock(&xres->lock, &flags); + ret = xsc_cmd_exec_destroy_mkey(xdev, in, out); + xsc_release_lock(&xres->lock, flags); + return ret; +} + +static int xsc_cmd_exec_reg_mr(struct xsc_core_device *dev, void *in, void *out) +{ + struct xsc_register_mr_mbox_in *req = in; + struct xsc_register_mr_mbox_out *resp = out; + struct xsc_mpt_entry mpt_ent; + u32 mpt_idx = 0; + u32 mtt_base; + u64 va = be64_to_cpu(req->req.va_base); + u32 mem_size = be32_to_cpu(req->req.len); + u32 pdn = be32_to_cpu(req->req.pdn); + u32 key = be32_to_cpu(req->req.mkey); + int pa_num = be32_to_cpu(req->req.pa_num); + u32 *ptr; + u64 reg_addr; + int i; + int reg_stride; + int iae_idx, iae_grp; + + if (pa_num && alloc_mtt_entry(dev, pa_num, &mtt_base)) + return -EINVAL; + + mpt_idx = xsc_mkey_to_idx(key); + mpt_ent.va_l = va & 0xFFFFFFFF; + mpt_ent.va_h = va >> 32; + mpt_ent.mem_size = mem_size; + mpt_ent.pdn = pdn; + mpt_ent.key = key & 0xFF; + mpt_ent.mtt_base = mtt_base; + mpt_ent.acc = req->req.acc; + mpt_ent.page_mode = req->req.page_mode; + mpt_ent.mem_map_en = req->req.map_en; + mpt_ent.rsv = 0; + + get_xsc_res(dev)->mpt_entry[mpt_idx].va = va; + get_xsc_res(dev)->mpt_entry[mpt_idx].mtt_base = mtt_base; + get_xsc_res(dev)->mpt_entry[mpt_idx].page_num = pa_num; + + ptr = (u32 *)&mpt_ent; + reg_stride = REG_WIDTH_TO_STRIDE(MMC_MPT_TBL_MEM_WIDTH); + reg_addr = MMC_MPT_TBL_MEM_ADDR + + mpt_idx * roundup_pow_of_two(reg_stride); + + iae_grp = xsc_iae_grp_get(dev); + iae_idx = xsc_iae_idx_get(dev, iae_grp); + + xsc_iae_lock(dev, iae_grp); + + IA_WRITE_REG_MR(dev, reg_addr, ptr, sizeof(mpt_ent) / sizeof(u32), iae_idx); + + xsc_core_info(dev, "reg mr, write mpt[%u]: va=%llx, mem_size=%u, pdn=%u\n", + mpt_idx, va, mpt_ent.mem_size, mpt_ent.pdn); + xsc_core_info(dev, "key=%u, mtt_base=%u, acc=%u, page_mode=%u, mem_map_en=%u\n", + mpt_ent.key, mpt_ent.mtt_base, mpt_ent.acc, + mpt_ent.page_mode, mpt_ent.mem_map_en); + + for (i = 0; i < pa_num; i++) { + u64 pa = req->req.pas[i]; + + pa = be64_to_cpu(pa); + pa = pa >> PAGE_SHIFT_4K; + ptr = (u32 *)&pa; + reg_addr = MMC_MTT_TBL_MEM_ADDR + + (mtt_base + i) * REG_WIDTH_TO_STRIDE(MMC_MTT_TBL_MEM_WIDTH); + + IA_WRITE_REG_MR(dev, reg_addr, ptr, sizeof(pa) / sizeof(u32), iae_idx); + + xsc_core_info(dev, "reg mr, write mtt: pa[%u]=%llx\n", i, pa); + } + + xsc_iae_unlock(dev, iae_grp); + + resp->hdr.status = 0; + return 0; +} + +int xsc_reg_mr(struct xsc_core_device *xdev, void *in, void *out) +{ + return xsc_cmd_exec_reg_mr(xdev, in, out); +} + +static int xsc_cmd_exec_dereg_mr(struct xsc_core_device *dev, void *in, void *out) +{ + struct xsc_unregister_mr_mbox_in *req; + struct xsc_unregister_mr_mbox_out *resp; + u32 mpt_idx; + u32 mtt_base; + int pages_num; + + req = in; + resp = out; + resp->hdr.status = -EINVAL; + + mpt_idx = be32_to_cpu(req->mkey); + xsc_core_info(dev, "mpt idx:%u\n", mpt_idx); + + pages_num = get_xsc_res(dev)->mpt_entry[mpt_idx].page_num; + mtt_base = get_xsc_res(dev)->mpt_entry[mpt_idx].mtt_base; + if (pages_num > 0) { + dealloc_mtt_entry(dev, pages_num, mtt_base); + get_xsc_res(dev)->mpt_entry[mpt_idx].page_num = 0; + } else { + xsc_core_dbg(dev, "no mtt entries to be freed, mpt_idx=%d\n", mpt_idx); + } + + resp->hdr.status = 0; + return 0; +} + +int xsc_dereg_mr(struct xsc_core_device *xdev, void *in, void *out) +{ + return xsc_cmd_exec_dereg_mr(xdev, in, out); +} + +static int xsc_cmd_exec_ioctl_flow(struct xsc_core_device *dev, + void *in, void *out) +{ + struct xsc_ioctl_mbox_in *req; + struct xsc_ioctl_mbox_out *resp; + struct xsc_ioctl_data_tl *tl; + char *data; + u16 datalen; + u16 tllen = sizeof(struct xsc_ioctl_data_tl); + int opmod; + int table; + int length; + int ret = -EINVAL; + + req = in; + resp = out; + resp->hdr.status = -EINVAL; + + data = (char *)req->data; + datalen = be16_to_cpu(req->len); + + if (datalen < tllen) + goto out; + + tl = (struct xsc_ioctl_data_tl *)data; + opmod = tl->opmod; + table = tl->table; + length = tl->length; + + switch (opmod) { + case XSC_IOCTL_OP_ADD: + ret = xsc_flow_add(dev, table, length, tl + 1); + break; + default: + ret = -EINVAL; + break; + } + + xsc_core_dbg(dev, "table=%d, opcode=0x%x, ret=%d\n", table, opmod, ret); + +out: + resp->hdr.status = 0; + resp->error = cpu_to_be32(ret); + return ret; +} + +int xsc_cmd_write_reg_directly(struct xsc_core_device *dev, void *in, int in_size, void *out, + int out_size, int func_id) +{ + int opcode, ret = 0; + unsigned long flags; + struct xsc_inbox_hdr *hdr; + + hdr = (struct xsc_inbox_hdr *)in; + opcode = be16_to_cpu(hdr->opcode); + xsc_core_dbg(dev, "opcode: %x\n", opcode); + + xsc_acquire_lock(&dev->reg_access_lock, &flags); + switch (opcode) { + case XSC_CMD_OP_IOCTL_FLOW: + ret = xsc_cmd_exec_ioctl_flow(dev, in, out); + break; + default: + ret = -EINVAL; + break; + } + + /* ensure pci sequence */ + xsc_mmiowb(); + + xsc_release_lock(&dev->reg_access_lock, flags); + + return ret; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/cmdq.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/cmdq.h new file mode 100644 index 000000000000..fbc6c7699f7f --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/cmdq.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef CMDQ_H +#define CMDQ_H + +//hw will use this for some records(e.g. vf_id) +struct cmdq_rsv { + u16 func_id; + u8 rsv[2]; +}; + +//related with hw, won't change +#define CMDQ_ENTRY_SIZE 64 +#define CMD_FIRST_SIZE 8 +#define RSP_FIRST_SIZE 14 + +struct xsc_cmd_layout { + struct cmdq_rsv rsv0; + __be32 inlen; + __be64 in_ptr; + __be32 in[CMD_FIRST_SIZE]; + __be64 out_ptr; + __be32 outlen; + u8 token; + u8 sig; + u8 idx; + u8 type: 7; + u8 owner_bit: 1; //rsv for hw, arm will check this bit to make sure mem written +}; + +struct xsc_rsp_layout { + struct cmdq_rsv rsv0; + __be32 out[RSP_FIRST_SIZE]; + u8 token; + u8 sig; + u8 idx; + u8 type: 7; + u8 owner_bit: 1; //rsv for hw, driver will check this bit to make sure mem written +}; + +struct xsc_cmd_prot_block { + u8 data[512]; + u8 rsvd0[48]; + __be64 next; + __be32 block_num; + u8 owner_status; //fw should change this val to 1 + u8 token; + u8 ctrl_sig; + u8 sig; +}; + +#endif // XSC_CMD_H diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.c b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.c new file mode 100644 index 000000000000..9c63cdae414b --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include "common/xsc_core.h" + +void xsc_lock_init(struct xsc_lock *lock) +{ + spin_lock_init(&lock->lock); +} + +void xsc_acquire_lock(struct xsc_lock *lock, unsigned long *oflags) +{ + unsigned long flags; + + spin_lock_irqsave(&lock->lock, flags); + *oflags = flags; +} + +void xsc_release_lock(struct xsc_lock *lock, unsigned long flags) +{ + spin_unlock_irqrestore(&lock->lock, flags); +} + +void xsc_mmiowb(void) +{ + mmiowb(); +} + +void xsc_wmb(void) +{ + /* mem barrier for xsc operation */ + wmb(); +} + +void xsc_msleep(int timeout) +{ + msleep(timeout); +} + +void xsc_udelay(int timeout) +{ + udelay(timeout); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.h new file mode 100644 index 000000000000..9d8581759633 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/osdep.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef OSDEP_H +#define OSDEP_H + +#include "common/xsc_core.h" + +#define xsc_print printk + +void xsc_msleep(int timeout); + +void xsc_udelay(int timeout); + +void xsc_lock_init(struct xsc_lock *lock); + +void xsc_acquire_lock(struct xsc_lock *lock, unsigned long *flags); + +void xsc_release_lock(struct xsc_lock *lock, unsigned long flags); + +void xsc_mmiowb(void); + +void xsc_wmb(void); + +void *xsc_malloc(unsigned int size); + +void xsc_free(void *addr); + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_counters.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_counters.h new file mode 100644 index 000000000000..44a1b7848902 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_counters.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __XSC_COUNTERS_H__ +#define __XSC_COUNTERS_H__ + +/* From E-tile Hard User Guide */ +#define NIF_ETH_TX_PFC_LOW 0x83c +#define NIF_ETH_TX_PFC_HIGH 0x83d +#define NIF_ETH_RX_PFC_LOW 0x93c +#define NIF_ETH_RX_PFC_HIGH 0x93d +#define NIF_ETH_TX_CNTR_CONFIG 0x845 +#define NIF_ETH_RX_CNTR_CONFIG 0x945 +#define NIF_ETH_RX_FCSERR_LOW 0x904 +#define NIF_ETH_RX_FCSERR_HIGH 0x905 + +#define XSC_CNT_WIDTH_32_BIT 32 +#define XSC_CNT_WIDTH_64_BIT 64 +#define XSC_CNT_MASK_32 0xffffffff +#define XSC_CNT_MASK_64 0xffffffffffffffff + +struct cnt_value_64 { + u32 va_l; + u32 va_h; +}; + +struct cnt_value_96 { + u32 va_l; + u32 va_m; + u32 va_h; +}; + +enum { + XSC_CNT_TYPE_TX_PAUSE = 0, + XSC_CNT_TYPE_RX_PAUSE, +}; + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.c b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.c new file mode 100644 index 000000000000..0623b0f7d4ec --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.c @@ -0,0 +1,196 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/xsc_hsi.h" +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" + +#include "xsc_flow.h" + +static DECLARE_COMPLETION(dma_read_done); + +static inline int xsc_dma_wr_isbusy(struct xsc_core_device *xdev) +{ + u32 busy = 0; + + do { + busy = REG_RD32(xdev, HIF_TBL_TBL_DL_BUSY_REG_ADDR); + } while (busy != 0x0); + + return busy; +} + +static inline int xsc_dma_rd_isbusy(struct xsc_core_device *xdev) +{ + u32 busy = 0; + + do { + busy = REG_RD32(xdev, CLSF_DMA_DMA_UL_BUSY_REG_ADDR); + } while (busy != 0x0); + + return busy; +} + +static inline int xsc_dma_done(struct xsc_core_device *xdev) +{ + u32 done = 0; + + do { + done = REG_RD32(xdev, CLSF_DMA_DMA_DL_DONE_REG_ADDR); + } while ((done & 0x1) != 0x1); + + return done; +} + +static inline void xsc_dma_wr_success_get(struct xsc_core_device *xdev, u32 *success, u32 size) +{ + u32 *ptr = NULL; + + ptr = success; + IA_READ(xdev, CLSF_DMA_DMA_DL_SUCCESS_REG_ADDR, ptr, (size / sizeof(u32))); +} + +int xsc_flow_table_dma_write_add(struct xsc_core_device *xdev, + const struct tdi_dma_write_key_bits *key, + const struct tdi_dma_write_action_bits *action) +{ + u32 i = 0; + u32 busy = 0; + u32 dma_wr_num = 0; + u32 value = 0; + u32 done = 0; + u64 success[2]; + u32 data_len = 0; + u64 dma_wr_addr = 0; + + if (!xdev || !key || !action) + return -1; + + if (!action->entry_num) + return -1; + + dma_wr_num = ((action->entry_num + (XSC_DMA_WR_MAX - 1)) / XSC_DMA_WR_MAX); + + for (i = 0; i < dma_wr_num; i++) { + if ((action->entry_num % XSC_DMA_WR_MAX) && (i == (dma_wr_num - 1))) + data_len = ((action->entry_num % XSC_DMA_WR_MAX) * XSC_DMA_LEN); + else + data_len = (XSC_DMA_WR_MAX * XSC_DMA_LEN); + + busy = xsc_dma_wr_isbusy(xdev); + if (busy) + return -1; + + REG_WR32(xdev, CLSF_DMA_ERR_CODE_CLR_REG_ADDR, 1); + + value = ((data_len << HIF_TBL_TBL_DL_REQ_REG_TBL_DL_LEN_SHIFT) | + (key->host_id << HIF_TBL_TBL_DL_REQ_REG_TBL_DL_HOST_ID_SHIFT) | + key->func_id); + + REG_WR32(xdev, HIF_TBL_TBL_DL_REQ_REG_ADDR, value); + + dma_wr_addr = (action->data_addr + ((i * XSC_DMA_WR_MAX) * XSC_DMA_LEN)); + value = (dma_wr_addr & HIF_TBL_TBL_DL_ADDR_L_REG_TBL_DL_ADDR_L_MASK); + REG_WR32(xdev, HIF_TBL_TBL_DL_ADDR_L_REG_ADDR, value); + + value = ((dma_wr_addr >> 32) & HIF_TBL_TBL_DL_ADDR_H_REG_TBL_DL_ADDR_H_MASK); + REG_WR32(xdev, HIF_TBL_TBL_DL_ADDR_H_REG_ADDR, value); + + REG_WR32(xdev, HIF_TBL_TBL_DL_START_REG_ADDR, 1); + + done = xsc_dma_done(xdev); + if (done != XSC_DMA_WR_SUCCESS) { + memset(success, 0, sizeof(success)); + xsc_dma_wr_success_get(xdev, (u32 *)&success, sizeof(success)); + xsc_core_err(xdev, "DMA write time %d status 0x%lx%lx fail.\n", i, + (unsigned long)success[1], (unsigned long)success[0]); + return -1; + } + } + + return 0; +} + +void xsc_dma_read_done_complete(void) +{ + complete(&dma_read_done); +} + +int xsc_flow_table_dma_read_add(struct xsc_core_device *xdev, + const struct tdi_dma_read_key_bits *key, + const struct tdi_dma_read_action_bits *action) +{ + u32 busy = 0; + u32 value = 0; + + if (!xdev || !key || !action) + return -1; + + if (!action->burst_num) + return -1; + + busy = xsc_dma_rd_isbusy(xdev); + if (busy) + return -1; + + value = ((key->host_id << HIF_TBL_TBL_UL_REQ_REG_TBL_UL_HOST_ID_SHIFT) | + key->func_id); + + REG_WR32(xdev, HIF_TBL_TBL_UL_REQ_REG_ADDR, value); + + value = (action->data_addr & HIF_TBL_TBL_UL_ADDR_L_REG_TBL_UL_ADDR_L_MASK); + REG_WR32(xdev, HIF_TBL_TBL_UL_ADDR_L_REG_ADDR, value); + + value = ((action->data_addr >> 32) & HIF_TBL_TBL_UL_ADDR_H_REG_TBL_UL_ADDR_H_MASK); + REG_WR32(xdev, HIF_TBL_TBL_UL_ADDR_H_REG_ADDR, value); + + REG_WR32(xdev, HIF_TBL_TBL_UL_START_REG_ADDR, 1); + + value = (key->tbl_id & CLSF_DMA_DMA_RD_TABLE_ID_REG_DMA_RD_TBL_ID_MASK); + REG_WR32(xdev, CLSF_DMA_DMA_RD_TABLE_ID_REG_ADDR, value); + + value = ((action->burst_num << CLSF_DMA_DMA_RD_ADDR_REG_DMA_RD_BURST_NUM_SHIFT) | + key->tbl_start_addr); + REG_WR32(xdev, CLSF_DMA_DMA_RD_ADDR_REG_ADDR, value); + + REG_WR32(xdev, CLSF_DMA_INDRW_RD_START_REG_ADDR, 1); + + /*wait msix interrupt */ + if (!wait_for_completion_timeout(&dma_read_done, msecs_to_jiffies(5000))) { + xsc_core_err(xdev, "wait for dma read done completion timeout.\n"); + return -ETIMEDOUT; + } + + REG_WR32(xdev, HIF_TBL_MSG_RDY_REG_ADDR, 1); + + return 0; +} + +int xsc_flow_add(struct xsc_core_device *xdev, + int table, int length, void *data) +{ + int ret = -EINVAL; + struct xsc_flow_dma_write_add *dma_wr; + struct xsc_flow_dma_read_add *dma_rd; + + switch (table) { + case XSC_FLOW_DMA_WR: + if (length == sizeof(struct xsc_flow_dma_write_add)) { + dma_wr = (struct xsc_flow_dma_write_add *)data; + ret = xsc_flow_table_dma_write_add(xdev, &dma_wr->key, &dma_wr->action); + } + break; + case XSC_FLOW_DMA_RD: + if (length == sizeof(struct xsc_flow_dma_read_add)) { + dma_rd = (struct xsc_flow_dma_read_add *)data; + ret = xsc_flow_table_dma_read_add(xdev, &dma_rd->key, &dma_rd->action); + } + break; + default: + ret = -EINVAL; + break; + } + return ret; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.h new file mode 100644 index 000000000000..ec7c7a2c3959 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_flow.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_FLOW_H +#define XSC_FLOW_H + +#include "osdep.h" + +#define XSC_DMA_LEN 64 +#define XSC_DMA_WR_MAX 128 +#define XSC_DMA_WR_SUCCESS 0x3 + +/* key */ +struct tdi_dma_write_key_bits { + uint8_t host_id:1; + uint16_t func_id:11; +} __packed; + +struct tdi_dma_read_key_bits { + uint16_t tbl_start_addr:16; + uint8_t tbl_id:7; + uint8_t host_id:1; + uint16_t func_id:11; +} __packed; + +/* action */ +struct tdi_dma_write_action_bits { + uint32_t entry_num:32; + uint64_t data_addr:64; +} __packed; + +struct tdi_dma_read_action_bits { + uint16_t burst_num:16; + uint64_t data_addr:64; +} __packed; + +/* ioctl data - add */ +struct xsc_flow_dma_write_add { + struct tdi_dma_write_key_bits key; + struct tdi_dma_write_action_bits action; +}; + +struct xsc_flow_dma_read_add { + struct tdi_dma_read_key_bits key; + struct tdi_dma_read_action_bits action; +}; + +struct xsc_logic_in_port_cfg_reg { + u32 phy_port_offset:11; + u32 resv0:5; + u32 func_id_offset:11; + u32 resv1:5; + u32 aps_port_offset:11; + u32 resv2:1; + u32 aps_port_rec_flg:1; + u32 resv3:19; +}; + +int xsc_flow_add(struct xsc_core_device *xdev, + int table, int length, void *data); + +void xsc_dma_read_done_complete(void); + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_fw.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_fw.h new file mode 100644 index 000000000000..a949bb0f4a2c --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_fw.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_FW_H +#define XSC_FW_H + +#include "osdep.h" + +#include "common/xsc_hsi.h" +#include "common/xsc_core.h" + +struct xsc_free_list { + struct list_head list; + int start; + int end; +}; + +struct xsc_free_list_wl { + struct xsc_free_list head; + struct xsc_lock lock; +}; + +struct xsc_mpt_info { + u64 va; + u32 mtt_base; + u32 page_num; +}; + +#define XSC_RES_IAE_GRP_MASK (XSC_RES_NUM_IAE_GRP - 1) +struct xsc_resources { + int refcnt; + atomic_t iae_grp; + int iae_idx[XSC_RES_NUM_IAE_GRP]; + spinlock_t iae_lock[XSC_RES_NUM_IAE_GRP]; /* iae group lock */ +#define XSC_MAX_MPT_NUM MMC_MPT_TBL_MEM_DEPTH + struct xsc_mpt_info mpt_entry[XSC_MAX_MPT_NUM]; + int max_mpt_num; + u64 mpt_tbl[XSC_MAX_MPT_NUM >> 6]; +#define XSC_MAX_MTT_NUM MMC_MTT_TBL_MEM_DEPTH + int max_mtt_num; + struct xsc_free_list_wl mtt_list; + struct xsc_lock lock; +}; + +struct xsc_resources *get_xsc_res(struct xsc_core_device *dev); + +int xsc_alloc_res(u32 *res, u64 *res_tbl, u32 max); + +int xsc_dealloc_res(u32 *res, u64 *res_tbl); + +int alloc_from_free_list(struct xsc_free_list_wl *list, int required, u32 *alloc, + u32 base_align); + +int release_to_free_list(struct xsc_free_list_wl *list, u32 release, + u32 num_released); + +int alloc_mpt_entry(struct xsc_core_device *dev, u32 *mpt_idx); + +int dealloc_mpt_entry(struct xsc_core_device *dev, u32 *mpt_idx); + +int alloc_mtt_entry(struct xsc_core_device *dev, u32 pages_num, u32 *mtt_base); + +int dealloc_mtt_entry(struct xsc_core_device *dev, int pages_num, u32 mtt_base); + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_mem.c b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_mem.c new file mode 100644 index 000000000000..758b5c77a263 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_mem.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/xsc_core.h" + +void *xsc_malloc(unsigned int size) +{ + return kmalloc(size, GFP_ATOMIC); +} + +void xsc_free(void *addr) +{ + kfree(addr); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_reg_struct.h b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_reg_struct.h new file mode 100644 index 000000000000..8eab3e6803a3 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_reg_struct.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_REG_DEFINE_H +#define XSC_REG_DEFINE_H + +struct xsc_mpt_entry { + u32 va_l; + u32 va_h; + u32 mem_size; + u32 pdn:24; + u32 key:8; + u32 mtt_base:18; + u32 acc:4; + u32 page_mode:2; + u32 mem_map_en:1; + u32 rsv:7; +}; + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_res.c b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_res.c new file mode 100644 index 000000000000..8bd6916e2103 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/fw/xsc_res.c @@ -0,0 +1,325 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "xsc_fw.h" + +struct xsc_resources *g_xres[MAX_BOARD_NUM]; + +static int xsc_alloc_free_list_res(struct xsc_free_list_wl *list, int max_num) +{ + struct xsc_free_list *free_node; + + xsc_lock_init(&list->lock); + INIT_LIST_HEAD(&list->head.list); + + free_node = xsc_malloc(sizeof(struct xsc_free_list)); + if (!free_node) + return -ENOMEM; + + free_node->start = 0; + free_node->end = free_node->start + max_num - 1; + list_add(&free_node->list, &list->head.list); + + return 0; +} + +static void xsc_destroy_free_list_res(struct xsc_free_list_wl *list) +{ + struct xsc_free_list *pos; + struct xsc_free_list *next; + + list_for_each_entry_safe(pos, next, &list->head.list, list) { + list_del(&pos->list); + xsc_free(pos); + } +} + +static int xsc_res_iae_init(struct xsc_core_device *dev) +{ + int i = 0; + int ret = 0; + struct xsc_resources *res = get_xsc_res(dev); + struct xsc_alloc_ia_lock_mbox_in in; + struct xsc_alloc_ia_lock_mbox_out out; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ALLOC_IA_LOCK); + in.lock_num = XSC_RES_NUM_IAE_GRP; + + ret = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(dev, "failed to alloc ia lock from fw, ret = %d\n", ret); + return -EINVAL; + } + + for (i = 0; i < XSC_RES_NUM_IAE_GRP; i++) { + res->iae_idx[i] = out.lock_idx[i]; + spin_lock_init(&res->iae_lock[i]); + } + + atomic_set(&res->iae_grp, 0); + + xsc_core_info(dev, "allocated %d iae groups", i); + + return 0; +} + +static void xsc_res_iae_release(struct xsc_core_device *dev) +{ + int ret = 0; + int i = 0; + struct xsc_resources *res = get_xsc_res(dev); + struct xsc_release_ia_lock_mbox_in in; + struct xsc_release_ia_lock_mbox_out out; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_RELEASE_IA_LOCK); + for (i = 0; i < XSC_RES_NUM_IAE_GRP; i++) + in.lock_idx[i] = res->iae_idx[i]; + + ret = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (ret) + xsc_core_err(dev, "failed to release ia lock, ret = %d\n", ret); +} + +int xsc_create_res(struct xsc_core_device *dev) +{ + int ret = 0; + u32 board_id = dev->board_info->board_id; + struct xsc_resources *xres = get_xsc_res(dev); + + if (xres) { + xres->refcnt++; + if (xres->refcnt > 1) + return 0; + } else { + g_xres[board_id] = vmalloc(sizeof(*g_xres[board_id])); + if (!g_xres[board_id]) + return -ENOMEM; + xres = g_xres[board_id]; + xres->refcnt = 1; + } + + xsc_lock_init(&xres->lock); + xres->max_mpt_num = XSC_MAX_MPT_NUM; + memset(xres->mpt_tbl, 0xFF, XSC_MAX_MPT_NUM >> 3); + /* reserved for local dma lkey */ + clear_bit(0, (unsigned long *)xres->mpt_tbl); + + ret = xsc_res_iae_init(dev); + if (ret) { + vfree(g_xres[board_id]); + g_xres[board_id] = NULL; + return -EINVAL; + } + + xres->max_mtt_num = XSC_MAX_MTT_NUM; + ret = xsc_alloc_free_list_res(&xres->mtt_list, xres->max_mtt_num); + if (ret) + goto err_mtt; + + return ret; + +err_mtt: + xsc_res_iae_release(dev); + vfree(g_xres[board_id]); + g_xres[board_id] = NULL; + return ret; +} + +void xsc_destroy_res(struct xsc_core_device *dev) +{ + struct xsc_resources *xres = get_xsc_res(dev); + + if (xres) { + xres->refcnt--; + if (xres->refcnt) + return; + + xsc_destroy_free_list_res(&xres->mtt_list); + xsc_res_iae_release(dev); + vfree(g_xres[dev->board_info->board_id]); + g_xres[dev->board_info->board_id] = NULL; + } +} + +struct xsc_resources *get_xsc_res(struct xsc_core_device *dev) +{ + return g_xres[dev->board_info->board_id]; +} + +int xsc_alloc_res(u32 *res, u64 *res_tbl, u32 max) +{ + u32 bit_num; + + bit_num = find_first_bit((unsigned long *)res_tbl, max); + if (bit_num == max) + return -ENOMEM; + clear_bit(bit_num, (unsigned long *)res_tbl); + *res = bit_num; + return 0; +} + +int xsc_dealloc_res(u32 *res, u64 *res_tbl) +{ + if (test_and_set_bit(*res, (unsigned long *)res_tbl)) + return -EINVAL; + + *res = 0; + return 0; +} + +int alloc_from_free_list(struct xsc_free_list_wl *list, int required, u32 *alloc, + u32 base_align) +{ + struct xsc_free_list *free_node; + struct xsc_free_list *next; + struct xsc_free_list *new_node; + unsigned long flags; + + *alloc = -1; + xsc_acquire_lock(&list->lock, &flags); + list_for_each_entry_safe(free_node, next, &list->head.list, list) { + int start = round_up(free_node->start, base_align); + int avail_num = free_node->end - start + 1; + + if (required < avail_num) { + if (start > free_node->start) { + new_node = xsc_malloc(sizeof(struct xsc_free_list)); + if (!new_node) { + xsc_release_lock(&list->lock, flags); + return -ENOMEM; + } + new_node->start = free_node->start; + new_node->end = start - 1; + __list_add(&new_node->list, free_node->list.prev, + &free_node->list); + } + *alloc = start; + free_node->start = start + required; + break; + } else if (required == avail_num) { + *alloc = start; + if (start > free_node->start) { + free_node->end = start - 1; + } else { + list_del(&free_node->list); + xsc_free(free_node); + } + break; + } + } + xsc_release_lock(&list->lock, flags); + + if (*alloc == -1) + return -EINVAL; + + return 0; +} + +int release_to_free_list(struct xsc_free_list_wl *list, uint32_t release, + uint32_t num_released) +{ + struct xsc_free_list *free_node = NULL; + struct xsc_free_list *next, *prev; + struct xsc_free_list *new_node; + unsigned long flags; + bool new_flag = false; + bool end_merge = false; + int ret = 0; + + xsc_acquire_lock(&list->lock, &flags); + list_for_each_entry_safe(free_node, next, &list->head.list, list) { + if (release + num_released < free_node->start) { + new_flag = true; + } else if (release + num_released == free_node->start) { + /* backward merge */ + end_merge = true; + free_node->start = release; + } + + if (new_flag || end_merge) { + /* forward merge, and backward merge if possible */ + if (free_node->list.prev == &list->head.list) + goto create_node; + + prev = list_entry(free_node->list.prev, struct xsc_free_list, list); + if (release == prev->end + 1) { + if (end_merge) { + prev->end = free_node->end; + list_del(&free_node->list); + xsc_free(free_node); + free_node = NULL; + } else { + prev->end = release + num_released - 1; + new_flag = false; + } + } + + break; + } + } + + if (list_empty(&list->head.list)) { + new_flag = true; + free_node = &list->head; + } + +create_node: + if (new_flag && free_node) { + new_node = xsc_malloc(sizeof(struct xsc_free_list)); + if (!new_node) { + ret = -ENOMEM; + goto ret; + } + new_node->start = release; + new_node->end = release + num_released - 1; + __list_add(&new_node->list, free_node->list.prev, + &free_node->list); + } +ret: + xsc_release_lock(&list->lock, flags); + return ret; +} + +int alloc_mpt_entry(struct xsc_core_device *dev, u32 *mpt_idx) +{ + struct xsc_resources *xres = get_xsc_res(dev); + + if (xsc_alloc_res(mpt_idx, xres->mpt_tbl, xres->max_mpt_num)) + return -EINVAL; + + return 0; +} + +int dealloc_mpt_entry(struct xsc_core_device *dev, u32 *mpt_idx) +{ + struct xsc_resources *xres = get_xsc_res(dev); + + if (xsc_dealloc_res(mpt_idx, xres->mpt_tbl)) + return -EINVAL; + + return 0; +} + +int alloc_mtt_entry(struct xsc_core_device *dev, u32 pages_num, u32 *mtt_base) +{ + struct xsc_resources *xres = get_xsc_res(dev); + int ret = alloc_from_free_list(&xres->mtt_list, pages_num, mtt_base, 1); + + xsc_core_dbg(dev, "alloc mtt for %d pages start from %d\n", + pages_num, *mtt_base); + + return ret; +} + +int dealloc_mtt_entry(struct xsc_core_device *dev, int pages_num, u32 mtt_base) +{ + struct xsc_resources *xres = get_xsc_res(dev); + int ret = release_to_free_list(&xres->mtt_list, mtt_base, pages_num); + + xsc_core_dbg(dev, "mtt release %d pages start from %d\n", + pages_num, mtt_base); + + return ret; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/intf.c b/drivers/net/ethernet/yunsilicon/xsc/pci/intf.c new file mode 100644 index 000000000000..da4761565f1a --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/intf.c @@ -0,0 +1,268 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/xsc_core.h" + +LIST_HEAD(intf_list); +LIST_HEAD(xsc_dev_list); +DEFINE_MUTEX(xsc_intf_mutex); // protect intf_list and xsc_dev_list + +static void xsc_add_device(struct xsc_interface *intf, struct xsc_priv *priv) +{ + struct xsc_device_context *dev_ctx; + struct xsc_core_device *dev; + + dev = container_of(priv, struct xsc_core_device, priv); + dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL); + if (!dev_ctx) + return; + + dev_ctx->intf = intf; + + dev_ctx->context = intf->add(dev); + if (dev_ctx->context) { + set_bit(XSC_INTERFACE_ADDED, &dev_ctx->state); + if (intf->attach) + set_bit(XSC_INTERFACE_ATTACHED, &dev_ctx->state); + + spin_lock_irq(&priv->ctx_lock); + list_add_tail(&dev_ctx->list, &priv->ctx_list); + spin_unlock_irq(&priv->ctx_lock); + } else { + kfree(dev_ctx); + } +} + +static struct xsc_device_context *xsc_get_device(struct xsc_interface *intf, + struct xsc_priv *priv) +{ + struct xsc_device_context *dev_ctx; + + /* caller of this function has mutex protection */ + list_for_each_entry(dev_ctx, &priv->ctx_list, list) + if (dev_ctx->intf == intf) + return dev_ctx; + + return NULL; +} + +static void xsc_remove_device(struct xsc_interface *intf, struct xsc_priv *priv) +{ + struct xsc_device_context *dev_ctx; + struct xsc_core_device *dev = container_of(priv, struct xsc_core_device, priv); + + dev_ctx = xsc_get_device(intf, priv); + if (!dev_ctx) + return; + + spin_lock_irq(&priv->ctx_lock); + list_del(&dev_ctx->list); + spin_unlock_irq(&priv->ctx_lock); + + if (test_bit(XSC_INTERFACE_ADDED, &dev_ctx->state)) + intf->remove(dev, dev_ctx->context); + + kfree(dev_ctx); +} + +int xsc_register_interface(struct xsc_interface *intf) +{ + struct xsc_priv *priv; + + if (!intf->add || !intf->remove) + return -EINVAL; + + mutex_lock(&xsc_intf_mutex); + list_add_tail(&intf->list, &intf_list); + list_for_each_entry(priv, &xsc_dev_list, dev_list) { + xsc_add_device(intf, priv); + } + mutex_unlock(&xsc_intf_mutex); + + return 0; +} +EXPORT_SYMBOL(xsc_register_interface); + +void xsc_unregister_interface(struct xsc_interface *intf) +{ + struct xsc_priv *priv; + + mutex_lock(&xsc_intf_mutex); + list_for_each_entry(priv, &xsc_dev_list, dev_list) + xsc_remove_device(intf, priv); + list_del(&intf->list); + mutex_unlock(&xsc_intf_mutex); +} +EXPORT_SYMBOL(xsc_unregister_interface); + +static void xsc_attach_interface(struct xsc_interface *intf, + struct xsc_priv *priv) +{ + struct xsc_device_context *dev_ctx; + struct xsc_core_device *dev = container_of(priv, struct xsc_core_device, priv); + + dev_ctx = xsc_get_device(intf, priv); + if (!dev_ctx) + return; + + if (intf->attach) { + if (test_bit(XSC_INTERFACE_ATTACHED, &dev_ctx->state)) + return; + if (intf->attach(dev, dev_ctx->context)) + return; + set_bit(XSC_INTERFACE_ATTACHED, &dev_ctx->state); + } else { + if (test_bit(XSC_INTERFACE_ADDED, &dev_ctx->state)) + return; + dev_ctx->context = intf->add(dev); + if (!dev_ctx->context) + return; + set_bit(XSC_INTERFACE_ADDED, &dev_ctx->state); + } +} + +static void xsc_detach_interface(struct xsc_interface *intf, + struct xsc_priv *priv) +{ + struct xsc_device_context *dev_ctx; + struct xsc_core_device *dev = container_of(priv, struct xsc_core_device, priv); + + dev_ctx = xsc_get_device(intf, priv); + if (!dev_ctx) + return; + + if (intf->detach) { + if (!test_bit(XSC_INTERFACE_ATTACHED, &dev_ctx->state)) + return; + intf->detach(dev, dev_ctx->context); + clear_bit(XSC_INTERFACE_ATTACHED, &dev_ctx->state); + } else { + if (!test_bit(XSC_INTERFACE_ADDED, &dev_ctx->state)) + return; + intf->remove(dev, dev_ctx->context); + clear_bit(XSC_INTERFACE_ADDED, &dev_ctx->state); + } +} + +void xsc_attach_device(struct xsc_core_device *dev) +{ + struct xsc_priv *priv = &dev->priv; + struct xsc_interface *intf; + + mutex_lock(&xsc_intf_mutex); + list_for_each_entry(intf, &intf_list, list) { + xsc_attach_interface(intf, priv); + } + mutex_unlock(&xsc_intf_mutex); +} +EXPORT_SYMBOL(xsc_attach_device); + +void xsc_attach_device_by_protocol(struct xsc_core_device *dev, int protocol) +{ + struct xsc_priv *priv = &dev->priv; + struct xsc_interface *intf; + + mutex_lock(&xsc_intf_mutex); + list_for_each_entry(intf, &intf_list, list) + if (intf->protocol == protocol) + xsc_attach_interface(intf, priv); + mutex_unlock(&xsc_intf_mutex); +} + +void xsc_detach_device(struct xsc_core_device *dev) +{ + struct xsc_priv *priv = &dev->priv; + struct xsc_interface *intf; + + mutex_lock(&xsc_intf_mutex); + list_for_each_entry(intf, &intf_list, list) + xsc_detach_interface(intf, priv); + mutex_unlock(&xsc_intf_mutex); +} +EXPORT_SYMBOL(xsc_detach_device); + +bool xsc_device_registered(struct xsc_core_device *dev) +{ + struct xsc_priv *priv; + bool found = false; + + mutex_lock(&xsc_intf_mutex); + list_for_each_entry(priv, &xsc_dev_list, dev_list) + if (priv == &dev->priv) + found = true; + mutex_unlock(&xsc_intf_mutex); + + return found; +} + +int xsc_register_device(struct xsc_core_device *dev) +{ + struct xsc_priv *priv = &dev->priv; + struct xsc_interface *intf; + + mutex_lock(&xsc_intf_mutex); + list_add_tail(&priv->dev_list, &xsc_dev_list); + list_for_each_entry(intf, &intf_list, list) + xsc_add_device(intf, priv); + mutex_unlock(&xsc_intf_mutex); + + return 0; +} +EXPORT_SYMBOL(xsc_register_device); + +void xsc_unregister_device(struct xsc_core_device *dev) +{ + struct xsc_priv *priv = &dev->priv; + struct xsc_interface *intf; + + mutex_lock(&xsc_intf_mutex); + list_for_each_entry_reverse(intf, &intf_list, list) + xsc_remove_device(intf, priv); + list_del(&priv->dev_list); + mutex_unlock(&xsc_intf_mutex); +} +EXPORT_SYMBOL(xsc_unregister_device); + +void xsc_add_dev_by_protocol(struct xsc_core_device *dev, int protocol) +{ + struct xsc_interface *intf; + + list_for_each_entry(intf, &intf_list, list) + if (intf->protocol == protocol) { + xsc_add_device(intf, &dev->priv); + break; + } +} +EXPORT_SYMBOL(xsc_add_dev_by_protocol); + +void xsc_remove_dev_by_protocol(struct xsc_core_device *dev, int protocol) +{ + struct xsc_interface *intf; + + list_for_each_entry(intf, &intf_list, list) + if (intf->protocol == protocol) { + xsc_remove_device(intf, &dev->priv); + break; + } +} +EXPORT_SYMBOL(xsc_remove_dev_by_protocol); + +void xsc_dev_list_lock(void) +{ + mutex_lock(&xsc_intf_mutex); +} +EXPORT_SYMBOL(xsc_dev_list_lock); + +void xsc_dev_list_unlock(void) +{ + mutex_unlock(&xsc_intf_mutex); +} +EXPORT_SYMBOL(xsc_dev_list_unlock); + +int xsc_dev_list_trylock(void) +{ + return mutex_trylock(&xsc_intf_mutex); +} +EXPORT_SYMBOL(xsc_dev_list_trylock); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/main.c b/drivers/net/ethernet/yunsilicon/xsc/pci/main.c new file mode 100644 index 000000000000..0c9ba75b2d37 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/main.c @@ -0,0 +1,937 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/xsc_core.h" +#include "common/driver.h" +#include "common/cq.h" +#include "common/qp.h" +#include "common/xsc_lag.h" +#include "common/xsc_port_ctrl.h" +#include "devlink.h" +#include "eswitch.h" +#include "fw/xsc_counters.h" +#include "xsc_pci_ctrl.h" + +unsigned int xsc_debug_mask; +module_param_named(debug_mask, xsc_debug_mask, uint, 0644); +MODULE_PARM_DESC(debug_mask, + "debug mask: 1=dump cmd data, 2=dump cmd exec time, 3=both. Default=0"); + +unsigned int xsc_log_level = XSC_LOG_LEVEL_WARN; +module_param_named(log_level, xsc_log_level, uint, 0644); +MODULE_PARM_DESC(log_level, + "lowest log level to print: 0=debug, 1=info, 2=warning, 3=error. Default=1"); +EXPORT_SYMBOL(xsc_log_level); + +static bool probe_vf = 1; +module_param_named(probe_vf, probe_vf, bool, 0644); +MODULE_PARM_DESC(probe_vf, "probe VFs or not, 0 = not probe, 1 = probe. Default = 1"); + +static bool xsc_hw_reset; + +#define DRIVER_NAME "xsc_pci" +#define DRIVER_VERSION "0.1.0" +#define ETH_DRIVER_NAME "xsc_eth" + +static const struct pci_device_id xsc_pci_id_table[] = { + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MC_PF_DEV_ID) }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MC_VF_DEV_ID), + .driver_data = XSC_PCI_DEV_IS_VF }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MF_HOST_PF_DEV_ID) }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MF_HOST_VF_DEV_ID), + .driver_data = XSC_PCI_DEV_IS_VF }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MF_SOC_PF_DEV_ID) }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MS_PF_DEV_ID) }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MS_VF_DEV_ID), + .driver_data = XSC_PCI_DEV_IS_VF }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MV_HOST_PF_DEV_ID) }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MV_HOST_VF_DEV_ID), + .driver_data = XSC_PCI_DEV_IS_VF }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MV_SOC_PF_DEV_ID) }, + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, xsc_pci_id_table); + +static const struct xsc_device_product_info xsc_product_list[] = { + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MC_PF_DEV_ID, + XSC_SUB_DEV_ID_MC_50, "metaConnect-50")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MC_PF_DEV_ID, + XSC_SUB_DEV_ID_MC_100, "metaConnect-100")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MC_PF_DEV_ID, + XSC_SUB_DEV_ID_MC_200, "metaConnect-200")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MC_PF_DEV_ID, + XSC_SUB_DEV_ID_MC_400S, "metaConnect-400S")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MF_HOST_PF_DEV_ID, + XSC_SUB_DEV_ID_MF_50, "metaFusion-50")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MF_HOST_PF_DEV_ID, + XSC_SUB_DEV_ID_MF_200, "metaFusion-200")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MS_PF_DEV_ID, + XSC_SUB_DEV_ID_MS_50, "metaScale-50")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MS_PF_DEV_ID, + XSC_SUB_DEV_ID_MS_100Q, "metaScale-100Q")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MS_PF_DEV_ID, + XSC_SUB_DEV_ID_MS_200, "metaScale-200")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MS_PF_DEV_ID, + XSC_SUB_DEV_ID_MS_200S, "metaScale-200S")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MS_PF_DEV_ID, + XSC_SUB_DEV_ID_MS_400M, "metaScale-400M")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MS_PF_DEV_ID, + XSC_SUB_DEV_ID_MS_200_OCP, "metaScale-200-OCP")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MV_HOST_PF_DEV_ID, + XSC_SUB_DEV_ID_MV_100, "metaVisor-100")}, + {XSC_DEVICE_PRODUCT_INFO(XSC_PCI_VENDOR_ID, XSC_MV_HOST_PF_DEV_ID, + XSC_SUB_DEV_ID_MV_200, "metaVisor-200")}, + {0} +}; + +#define IS_VIRT_FUNCTION(id) ((id)->driver_data == XSC_PCI_DEV_IS_VF) + +static bool need_write_reg_directly(void *in) +{ + struct xsc_inbox_hdr *hdr; + struct xsc_ioctl_mbox_in *req; + struct xsc_ioctl_data_tl *tl; + char *data; + + hdr = (struct xsc_inbox_hdr *)in; + if (unlikely(be16_to_cpu(hdr->opcode) == XSC_CMD_OP_IOCTL_FLOW)) { + req = (struct xsc_ioctl_mbox_in *)in; + data = (char *)req->data; + tl = (struct xsc_ioctl_data_tl *)data; + if (tl->opmod == XSC_IOCTL_OP_ADD) { + if (unlikely(tl->table == XSC_FLOW_DMA_WR || tl->table == XSC_FLOW_DMA_RD)) + return true; + } + } + return false; +} + +int xsc_cmd_exec(struct xsc_core_device *dev, void *in, int in_size, void *out, + int out_size) +{ + struct xsc_inbox_hdr *hdr = (struct xsc_inbox_hdr *)in; + + hdr->ver = 0; + if (hdr->ver != 0) { + xsc_core_warn(dev, "recv an unexpected cmd ver = %d, opcode = %d\n", + be16_to_cpu(hdr->ver), be16_to_cpu(hdr->opcode)); + WARN_ON(hdr->ver != 0); + } + + if (need_write_reg_directly(in)) + return xsc_cmd_write_reg_directly(dev, in, in_size, out, + out_size, dev->glb_func_id); + return _xsc_cmd_exec(dev, in, in_size, out, out_size); +} +EXPORT_SYMBOL(xsc_cmd_exec); + +static int set_dma_caps(struct pci_dev *pdev) +{ + int err = 0; + + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + if (err) + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + else + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); + + if (!err) + dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024); + + return err; +} + +static int xsc_pci_enable_device(struct xsc_core_device *dev) +{ + struct pci_dev *pdev = dev->pdev; + int err = 0; + + mutex_lock(&dev->pci_status_mutex); + if (dev->pci_status == XSC_PCI_STATUS_DISABLED) { + err = pci_enable_device(pdev); + if (!err) + dev->pci_status = XSC_PCI_STATUS_ENABLED; + } + mutex_unlock(&dev->pci_status_mutex); + + return err; +} + +static void xsc_pci_disable_device(struct xsc_core_device *dev) +{ + struct pci_dev *pdev = dev->pdev; + + mutex_lock(&dev->pci_status_mutex); + if (dev->pci_status == XSC_PCI_STATUS_ENABLED) { + pci_disable_device(pdev); + dev->pci_status = XSC_PCI_STATUS_DISABLED; + } + mutex_unlock(&dev->pci_status_mutex); +} + +int xsc_priv_init(struct xsc_core_device *dev) +{ + struct xsc_priv *priv = &dev->priv; + + strscpy(priv->name, dev_name(&dev->pdev->dev), XSC_MAX_NAME_LEN); + priv->name[XSC_MAX_NAME_LEN - 1] = 0; + + INIT_LIST_HEAD(&priv->ctx_list); + spin_lock_init(&priv->ctx_lock); + mutex_init(&dev->intf_state_mutex); + + return 0; +} + +int xsc_dev_res_init(struct xsc_core_device *dev) +{ + struct xsc_dev_resource *dev_res = NULL; + + dev_res = kvzalloc(sizeof(*dev_res), GFP_KERNEL); + if (!dev_res) + return -ENOMEM; + + dev->dev_res = dev_res; + /* init access lock */ + spin_lock_init(&dev->reg_access_lock.lock); + mutex_init(&dev_res->alloc_mutex); + mutex_init(&dev_res->pgdir_mutex); + INIT_LIST_HEAD(&dev_res->pgdir_list); + spin_lock_init(&dev_res->mkey_lock); + + return 0; +} + +void xsc_dev_res_cleanup(struct xsc_core_device *dev) +{ + kfree(dev->dev_res); + dev->dev_res = NULL; +} + +void xsc_init_reg_addr(struct xsc_core_device *dev) +{ + if (xsc_core_is_pf(dev)) { + dev->regs.cpm_get_lock = HIF_CPM_LOCK_GET_REG_ADDR; + dev->regs.cpm_put_lock = HIF_CPM_LOCK_PUT_REG_ADDR; + dev->regs.cpm_lock_avail = HIF_CPM_LOCK_AVAIL_REG_ADDR; + dev->regs.cpm_data_mem = HIF_CPM_IDA_DATA_MEM_ADDR; + dev->regs.cpm_cmd = HIF_CPM_IDA_CMD_REG_ADDR; + dev->regs.cpm_addr = HIF_CPM_IDA_ADDR_REG_ADDR; + dev->regs.cpm_busy = HIF_CPM_IDA_BUSY_REG_ADDR; + } else { + dev->regs.tx_db = TX_DB_FUNC_MEM_ADDR; + dev->regs.rx_db = RX_DB_FUNC_MEM_ADDR; + dev->regs.complete_db = DB_CQ_FUNC_MEM_ADDR; + dev->regs.complete_reg = DB_CQ_CID_DIRECT_MEM_ADDR; + dev->regs.event_db = DB_EQ_FUNC_MEM_ADDR; + dev->regs.cpm_get_lock = CPM_LOCK_GET_REG_ADDR; + dev->regs.cpm_put_lock = CPM_LOCK_PUT_REG_ADDR; + dev->regs.cpm_lock_avail = CPM_LOCK_AVAIL_REG_ADDR; + dev->regs.cpm_data_mem = CPM_IDA_DATA_MEM_ADDR; + dev->regs.cpm_cmd = CPM_IDA_CMD_REG_ADDR; + dev->regs.cpm_addr = CPM_IDA_ADDR_REG_ADDR; + dev->regs.cpm_busy = CPM_IDA_BUSY_REG_ADDR; + } +} + +int xsc_dev_init(struct xsc_core_device *dev) +{ + int err = 0; + + xsc_priv_init(dev); + + err = xsc_dev_res_init(dev); + if (err) { + xsc_core_err(dev, "xsc dev res init failed %d\n", err); + goto err_res_init; + } + + /* create debugfs */ + err = xsc_debugfs_init(dev); + if (err) { + xsc_core_err(dev, "xsc_debugfs_init failed %d\n", err); + goto err_debugfs_init; + } + + return 0; + +err_debugfs_init: + xsc_dev_res_cleanup(dev); +err_res_init: + return err; +} + +void xsc_dev_cleanup(struct xsc_core_device *dev) +{ +// iounmap(dev->iseg); + xsc_debugfs_fini(dev); + xsc_dev_res_cleanup(dev); +} + +static void xsc_product_info(struct pci_dev *pdev) +{ + const struct xsc_device_product_info *p_info = xsc_product_list; + + while (p_info->vendor) { + if (pdev->device == p_info->device && pdev->subsystem_device == p_info->subdevice) { + pr_info("Product: %s, Vendor: Yunsilicon\n", p_info->product_name); + break; + } + p_info++; + } +} + +static int xsc_pci_init(struct xsc_core_device *dev, const struct pci_device_id *id) +{ + struct pci_dev *pdev = dev->pdev; + int err = 0; + int bar_num = 0; + void __iomem *bar_base = NULL; + + mutex_init(&dev->pci_status_mutex); + dev->priv.numa_node = dev_to_node(&pdev->dev); + if (dev->priv.numa_node == -1) + dev->priv.numa_node = 0; + + /* enable the device */ + err = xsc_pci_enable_device(dev); + if (err) { + xsc_core_err(dev, "failed to enable PCI device: err=%d\n", err); + goto err_ret; + } + + err = pci_request_region(pdev, bar_num, KBUILD_MODNAME); + if (err) { + xsc_core_err(dev, "failed to request %s pci_region=%d: err=%d\n", + KBUILD_MODNAME, bar_num, err); + goto err_disable; + } + + pci_set_master(pdev); + + err = set_dma_caps(pdev); + if (err) { + xsc_core_err(dev, "failed to set DMA capabilities mask: err=%d\n", err); + goto err_clr_master; + } + + bar_base = pci_ioremap_bar(pdev, bar_num); + if (!bar_base) { + xsc_core_err(dev, "failed to ioremap %s bar%d\n", KBUILD_MODNAME, bar_num); + goto err_clr_master; + } + + err = pci_save_state(pdev); + if (err) { + xsc_core_err(dev, "pci_save_state failed: err=%d\n", err); + goto err_io_unmap; + } + + dev->bar_num = bar_num; + dev->bar = bar_base; + + xsc_init_reg_addr(dev); + + return 0; + +err_io_unmap: + pci_iounmap(pdev, bar_base); +err_clr_master: + pci_clear_master(pdev); + pci_release_region(pdev, bar_num); +err_disable: + xsc_pci_disable_device(dev); +err_ret: + return err; +} + +static void xsc_pci_fini(struct xsc_core_device *dev) +{ + struct pci_dev *pdev = dev->pdev; + + if (dev->bar) + pci_iounmap(pdev, dev->bar); + pci_clear_master(pdev); + pci_release_region(pdev, dev->bar_num); + xsc_pci_disable_device(dev); +} + +static int xsc_check_cmdq_version(struct xsc_core_device *dev) +{ + struct xsc_cmd_query_cmdq_ver_mbox_out *out; + struct xsc_cmd_query_cmdq_ver_mbox_in in; + + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) { + err = -ENOMEM; + goto no_mem_out; + } + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_CMDQ_VERSION); + + err = xsc_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + if (err) + goto out_out; + + if (out->hdr.status) { + err = xsc_cmd_status_to_err(&out->hdr); + goto out_out; + } + + if (be16_to_cpu(out->cmdq_ver) != CMDQ_VERSION) { + xsc_core_err(dev, "cmdq version check failed, expecting version %d, actual version %d\n", + CMDQ_VERSION, be16_to_cpu(out->cmdq_ver)); + err = -EINVAL; + goto out_out; + } + dev->cmdq_ver = CMDQ_VERSION; + +out_out: + kfree(out); +no_mem_out: + return err; +} + +int xsc_reset_function_resource(struct xsc_core_device *dev) +{ + struct xsc_function_reset_mbox_in in; + struct xsc_function_reset_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_FUNCTION_RESET); + in.glb_func_id = cpu_to_be16(dev->glb_func_id); + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) + return -EINVAL; + + return 0; +} + +static int xsc_fpga_not_supported(struct xsc_core_device *dev) +{ +#define FPGA_VERSION_H 0x100 +#define ASIC_VERSION_H 0x20230423 + u32 ver_h; + + if (!xsc_core_is_pf(dev)) + return 0; + + ver_h = REG_RD32(dev, HIF_CPM_CHIP_VERSION_H_REG_ADDR); + if (ver_h != FPGA_VERSION_H && ver_h != ASIC_VERSION_H) { + xsc_core_err(dev, "fpga version 0x%x not supported\n", ver_h); + return 1; + } + + return 0; +} + +int xsc_chip_type(struct xsc_core_device *dev) +{ + switch (dev->pdev->device) { + case XSC_MC_PF_DEV_ID: + case XSC_MC_VF_DEV_ID: + return XSC_CHIP_MC; + case XSC_MF_HOST_PF_DEV_ID: + case XSC_MF_HOST_VF_DEV_ID: + case XSC_MF_SOC_PF_DEV_ID: + return XSC_CHIP_MF; + case XSC_MS_PF_DEV_ID: + case XSC_MS_VF_DEV_ID: + return XSC_CHIP_MS; + case XSC_MV_HOST_PF_DEV_ID: + case XSC_MV_HOST_VF_DEV_ID: + case XSC_MV_SOC_PF_DEV_ID: + return XSC_CHIP_MV; + default: + return XSC_CHIP_UNKNOWN; + } +} +EXPORT_SYMBOL(xsc_chip_type); + +#if defined(__sw_64__) +static void xsc_enable_relaxed_order(struct xsc_core_device *dev) +{ + struct xsc_cmd_enable_relaxed_order_in in; + struct xsc_cmd_enable_relaxed_order_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ENABLE_RELAXED_ORDER); + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + goto err_out; + + if (out.hdr.status) { + err = xsc_cmd_status_to_err(&out.hdr); + goto err_out; + } + + return; +err_out: + xsc_core_warn(dev, "Failed to enable relaxed order %d\n", err); +} +#endif + +static int xsc_cmd_activate_hw_config(struct xsc_core_device *dev) +{ + struct xsc_cmd_activate_hw_config_mbox_in in; + struct xsc_cmd_activate_hw_config_mbox_out out; + int err = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ACTIVATE_HW_CONFIG); + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + if (out.hdr.status) + return xsc_cmd_status_to_err(&out.hdr); + dev->board_info->hw_config_activated = 1; + return 0; +} + +static int xsc_activate_hw_config(struct xsc_core_device *dev) +{ + if (dev->board_info->hw_config_activated) + return 0; + + return xsc_cmd_activate_hw_config(dev); +} + +static int xsc_init_once(struct xsc_core_device *dev) +{ + int err; + + err = xsc_cmd_init(dev); + if (err) { + xsc_core_err(dev, "Failed initializing command interface, aborting\n"); + goto err_cmd_init; + } + + err = xsc_check_cmdq_version(dev); + if (err) { + xsc_core_err(dev, "Failed to check cmdq version\n"); + goto err_cmdq_ver_chk; + } + + err = xsc_cmd_query_hca_cap(dev, &dev->caps); + if (err) { + xsc_core_err(dev, "Failed to query hca, err=%d\n", err); + goto err_cmdq_ver_chk; + } + + err = xsc_query_guid(dev); + if (err) { + xsc_core_err(dev, "failed to query guid, err=%d\n", err); + goto err_cmdq_ver_chk; + } + + err = xsc_activate_hw_config(dev); + if (err) { + xsc_core_err(dev, "failed to activate hw config, err=%d\n", err); + goto err_cmdq_ver_chk; + } + + err = xsc_reset_function_resource(dev); + if (err) { + xsc_core_err(dev, "Failed to reset function resource\n"); + goto err_cmdq_ver_chk; + } + + funcid_to_pf_vf_index(&dev->caps, dev->glb_func_id, &dev->pcie_no, + &dev->pf_id, &dev->vf_id); + xsc_init_cq_table(dev); + xsc_init_qp_table(dev); + xsc_eq_init(dev); + +#ifdef CONFIG_XSC_SRIOV + err = xsc_sriov_init(dev); + if (err) { + xsc_core_err(dev, "Failed to init sriov %d\n", err); + goto err_sriov_init; + } + err = xsc_eswitch_init(dev); + if (err) { + xsc_core_err(dev, "Failed to init eswitch %d\n", err); + goto err_eswitch_init; + } +#endif + +#if defined(__sw_64__) + xsc_enable_relaxed_order(dev); +#endif + return 0; + +#ifdef CONFIG_XSC_SRIOV +err_eswitch_init: + xsc_sriov_cleanup(dev); +err_sriov_init: + xsc_eq_cleanup(dev); + xsc_cleanup_qp_table(dev); + xsc_cleanup_cq_table(dev); +#endif +err_cmdq_ver_chk: + xsc_cmd_cleanup(dev); +err_cmd_init: + return err; +} + +static int xsc_cleanup_once(struct xsc_core_device *dev) +{ +#ifdef CONFIG_XSC_SRIOV + xsc_eswitch_cleanup(dev); + xsc_sriov_cleanup(dev); +#endif + xsc_eq_cleanup(dev); + xsc_cleanup_qp_table(dev); + xsc_cleanup_cq_table(dev); + xsc_cmd_cleanup(dev); + return 0; +} + +static int xsc_load(struct xsc_core_device *dev) +{ + int err; + + err = xsc_irq_eq_create(dev); + if (err) { + xsc_core_err(dev, "xsc_irq_eq_create failed %d\n", err); + goto err_irq_eq_create; + } + +#ifdef CONFIG_XSC_SRIOV + err = xsc_sriov_attach(dev); + if (err) { + xsc_core_err(dev, "sriov init failed %d\n", err); + goto err_sriov; + } +#endif + return 0; + +#ifdef CONFIG_XSC_SRIOV +err_sriov: + xsc_irq_eq_destroy(dev); +#endif +err_irq_eq_create: + return err; +} + +static int xsc_unload(struct xsc_core_device *dev) +{ +#ifdef CONFIG_XSC_SRIOV + xsc_sriov_detach(dev); +#endif + if (xsc_fw_is_available(dev)) + xsc_irq_eq_destroy(dev); + + return 0; +} + +int xsc_load_one(struct xsc_core_device *dev, bool boot) +{ + int err = 0; + + mutex_lock(&dev->intf_state_mutex); + if (test_bit(XSC_INTERFACE_STATE_UP, &dev->intf_state)) { + xsc_core_warn(dev, "interface is up, NOP\n"); + goto out; + } + + if (test_bit(XSC_INTERFACE_STATE_TEARDOWN, &dev->intf_state)) { + xsc_core_warn(dev, "device is being removed, stop load\n"); + err = -ENODEV; + goto out; + } + + if (boot) { + err = xsc_init_once(dev); + if (err) { + xsc_core_err(dev, "xsc_init_once failed %d\n", err); + goto err_dev_init; + } + } + + err = xsc_load(dev); + if (err) { + xsc_core_err(dev, "xsc_load failed %d\n", err); + goto err_load; + } + + if (!dev->reg_mr_via_cmdq && (xsc_core_is_pf(dev) || !dev->pdev->physfn)) { + err = xsc_create_res(dev); + if (err) { + xsc_core_err(dev, "Failed to create resource, err=%d\n", err); + goto err_create_res; + } + } + + if (boot) { + err = xsc_devlink_register(priv_to_devlink(dev), dev->device); + if (err) + goto err_devlink_reg; + } + + if (xsc_core_is_pf(dev)) + xsc_lag_add_xdev(dev); + + if (xsc_device_registered(dev)) { + xsc_attach_device(dev); + } else { + err = xsc_register_device(dev); + if (err) { + xsc_core_err(dev, "register device failed %d\n", err); + goto err_reg_dev; + } + } + + err = xsc_port_ctrl_probe(dev); + if (err) { + xsc_core_err(dev, "failed to probe port control node\n"); + goto err_port_ctrl; + } + + set_bit(XSC_INTERFACE_STATE_UP, &dev->intf_state); + mutex_unlock(&dev->intf_state_mutex); + + return err; + +err_port_ctrl: + xsc_unregister_device(dev); +err_reg_dev: + if (xsc_core_is_pf(dev)) + xsc_lag_remove_xdev(dev); + if (boot) + xsc_devlink_unregister(priv_to_devlink(dev)); +err_devlink_reg: + if (!dev->reg_mr_via_cmdq && (xsc_core_is_pf(dev) || !dev->pdev->physfn)) + xsc_destroy_res(dev); + +err_create_res: + xsc_unload(dev); + +err_load: + if (boot) + xsc_cleanup_once(dev); +err_dev_init: +out: + mutex_unlock(&dev->intf_state_mutex); + return err; +} + +int xsc_unload_one(struct xsc_core_device *dev, bool cleanup) +{ + xsc_port_ctrl_remove(dev); + xsc_devlink_unregister(priv_to_devlink(dev)); + if (cleanup) + xsc_unregister_device(dev); + mutex_lock(&dev->intf_state_mutex); + if (!test_bit(XSC_INTERFACE_STATE_UP, &dev->intf_state)) { + xsc_core_warn(dev, "%s: interface is down, NOP\n", + __func__); + if (cleanup) + xsc_cleanup_once(dev); + goto out; + } + + clear_bit(XSC_INTERFACE_STATE_UP, &dev->intf_state); + if (xsc_device_registered(dev)) + xsc_detach_device(dev); + + if (xsc_core_is_pf(dev)) + xsc_lag_remove_xdev(dev); + + if (!dev->reg_mr_via_cmdq && (xsc_core_is_pf(dev) || !dev->pdev->physfn)) + xsc_destroy_res(dev); + + xsc_unload(dev); + + if (cleanup) + xsc_cleanup_once(dev); + +out: + mutex_unlock(&dev->intf_state_mutex); + + return 0; +} + +static int xsc_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *id) +{ + struct xsc_core_device *xdev; + struct xsc_priv *priv; + int err; + struct devlink *devlink; + + devlink = xsc_devlink_alloc(&pci_dev->dev); + if (!devlink) { + dev_err(&pci_dev->dev, "devlink alloc failed\n"); + return -ENOMEM; + } + xdev = devlink_priv(devlink); + + xsc_product_info(pci_dev); + xdev->pdev = pci_dev; + xdev->device = &pci_dev->dev; + priv = &xdev->priv; + xdev->coredev_type = (IS_VIRT_FUNCTION(id)) ? + XSC_COREDEV_VF : XSC_COREDEV_PF; + xsc_core_info(xdev, "dev_type=%d is_vf=%d\n", + xdev->coredev_type, pci_dev->is_virtfn); + +#ifdef CONFIG_XSC_SRIOV + priv->sriov.probe_vf = probe_vf; + if ((IS_VIRT_FUNCTION(id)) && !probe_vf) { + xsc_core_err(xdev, "VFs are not binded to xsc driver\n"); + return 0; + } +#endif + + /* init pcie device */ + pci_set_drvdata(pci_dev, xdev); + err = xsc_pci_init(xdev, id); + if (err) { + xsc_core_err(xdev, "xsc_pci_init failed %d\n", err); + goto err_pci_init; + } + + err = xsc_dev_init(xdev); + if (err) { + xsc_core_err(xdev, "xsc_dev_init failed %d\n", err); + goto err_dev_init; + } + + if (xsc_fpga_not_supported(xdev)) { + err = -EOPNOTSUPP; + goto err_version_check; + } + + err = xsc_load_one(xdev, true); + if (err) { + xsc_core_err(xdev, "xsc_load_one failed %d\n", err); + goto err_load; + } + + request_module_nowait(ETH_DRIVER_NAME); + + return 0; + +err_load: +err_version_check: + xsc_dev_cleanup(xdev); +err_dev_init: + xsc_pci_fini(xdev); +err_pci_init: + pci_set_drvdata(pci_dev, NULL); + xsc_devlink_free(devlink); + return err; +} + +static void xsc_pci_remove(struct pci_dev *pci_dev) +{ + struct xsc_core_device *xdev = pci_get_drvdata(pci_dev); + + set_bit(XSC_INTERFACE_STATE_TEARDOWN, &xdev->intf_state); + xsc_unload_one(xdev, true); + xsc_dev_cleanup(xdev); + + xsc_pci_fini(xdev); + pci_set_drvdata(pci_dev, NULL); + xsc_devlink_free(priv_to_devlink(xdev)); +} + +static struct pci_driver xsc_pci_driver = { + .name = "xsc-pci", + .id_table = xsc_pci_id_table, + .probe = xsc_pci_probe, + .remove = xsc_pci_remove, + +#ifdef CONFIG_XSC_SRIOV + .sriov_configure = xsc_core_sriov_configure, +#endif +}; + +int xsc_pci_reboot_event_handler(struct notifier_block *nb, unsigned long action, void *data) +{ + pr_info("xsc pci driver recv %lu event\n", action); + if (xsc_get_exit_flag()) + return NOTIFY_OK; + xsc_pci_exit(); + + return NOTIFY_OK; +} + +struct notifier_block xsc_pci_nb = { + .notifier_call = xsc_pci_reboot_event_handler, + .next = NULL, + .priority = 0, +}; + +void xsc_pci_exit(void) +{ + xsc_stop_delayed_release(); + pci_unregister_driver(&xsc_pci_driver); + xsc_pci_ctrl_fini(); + xsc_port_ctrl_fini(); + xsc_unregister_debugfs(); + qpts_fini(); + xsc_free_board_info(); +} + +static int __init xsc_init(void) +{ + int err; + + xsc_register_debugfs(); + + qpts_init(); + + err = xsc_port_ctrl_init(); + if (err) { + pr_err("failed to initialize port control\n"); + goto err_port_ctrl; + } + + err = xsc_pci_ctrl_init(); + if (err) { + pr_err("failed to initialize dpdk ctrl\n"); + goto err_pci_ctrl; + } + + xsc_hw_reset = false; + err = pci_register_driver(&xsc_pci_driver); + if (err) { + pr_err("failed to register pci driver\n"); + goto err_register; + } + + xsc_init_delayed_release(); + register_reboot_notifier(&xsc_pci_nb); + + return 0; + +err_register: + xsc_pci_ctrl_fini(); +err_pci_ctrl: + xsc_port_ctrl_fini(); +err_port_ctrl: + xsc_unregister_debugfs(); + qpts_fini(); + return err; +} + +static void __exit xsc_fini(void) +{ + unregister_reboot_notifier(&xsc_pci_nb); + xsc_pci_exit(); +} + +module_init(xsc_init); +module_exit(xsc_fini); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/mr.c b/drivers/net/ethernet/yunsilicon/xsc/pci/mr.c new file mode 100644 index 000000000000..a834a09d23da --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/mr.c @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "common/driver.h" +#include "common/xsc_cmd.h" + +int xsc_core_create_mkey(struct xsc_core_device *dev, struct xsc_core_mr *mr) +{ + struct xsc_create_mkey_mbox_in in; + struct xsc_create_mkey_mbox_out out; + int err; + u8 key; + + memset(&out, 0, sizeof(out)); + spin_lock(&dev->dev_res->mkey_lock); + key = 0x80 + dev->dev_res->mkey_key++; + spin_unlock(&dev->dev_res->mkey_lock); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_MKEY); + if (dev->reg_mr_via_cmdq) + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + else + err = xsc_create_mkey(dev, &in, &out); + + if (err) { + xsc_core_err(dev, "cmd exec faile %d\n", err); + return err; + } + + if (out.hdr.status) { + xsc_core_err(dev, "status %d\n", out.hdr.status); + return xsc_cmd_status_to_err(&out.hdr); + } + + mr->key = xsc_idx_to_mkey(be32_to_cpu(out.mkey) & 0xffffff) | key; + xsc_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n", be32_to_cpu(out.mkey), key, mr->key); + + return err; +} +EXPORT_SYMBOL(xsc_core_create_mkey); + +int xsc_core_destroy_mkey(struct xsc_core_device *dev, struct xsc_core_mr *mr) +{ + struct xsc_destroy_mkey_mbox_in in; + struct xsc_destroy_mkey_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_MKEY); + in.mkey = cpu_to_be32(mr->key); + if (dev->reg_mr_via_cmdq) + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + else + err = xsc_destroy_mkey(dev, &in, &out); + + if (err) + return err; + + if (out.hdr.status) + return xsc_cmd_status_to_err(&out.hdr); + + return err; +} +EXPORT_SYMBOL(xsc_core_destroy_mkey); + +int xsc_set_mpt_via_cmdq(struct xsc_core_device *dev, struct xsc_register_mr_mbox_in *in_cmd, + u32 *mtt_base) +{ + struct xsc_set_mpt_mbox_in *in; + struct xsc_set_mpt_mbox_out out; + struct xsc_register_mr_request *req = &in_cmd->req; + int err; + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) { + err = -ENOMEM; + return err; + } + in->mpt_item.pdn = req->pdn; + in->mpt_item.pa_num = req->pa_num; + in->mpt_item.len = req->len; + in->mpt_item.mkey = req->mkey; + in->mpt_item.acc = req->acc; + in->mpt_item.page_mode = req->page_mode; + in->mpt_item.map_en = req->map_en; + in->mpt_item.va_base = req->va_base; + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_SET_MPT); + memset(&out, 0, sizeof(out)); + err = xsc_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(dev, "set mpt failed\n"); + kfree(in); + return -EINVAL; + } + + *mtt_base = be32_to_cpu(out.mtt_base); + kfree(in); + return 0; +} + +int xsc_set_mtt_via_cmdq(struct xsc_core_device *dev, struct xsc_register_mr_mbox_in *in_cmd, + u32 mtt_base) +{ +#define PA_NUM_PER_CMD 1024 + struct xsc_set_mtt_mbox_in *seg_in; + struct xsc_set_mtt_mbox_out seg_out; + struct xsc_register_mr_request *req = &in_cmd->req; + int tot_pg_num = be32_to_cpu(req->pa_num); + int seg_idx, tot_seg_num, seg_pa_num; + int pa_idx_base = 0; + int i; + int in_len; + int err; + + tot_seg_num = (tot_pg_num & 0x7FF) ? ((tot_pg_num >> 10) + 1) : + (tot_pg_num >> 10); + for (seg_idx = 0; seg_idx < tot_seg_num; seg_idx++) { + seg_pa_num = (seg_idx != tot_seg_num - 1) ? PA_NUM_PER_CMD : + (tot_pg_num - ((tot_seg_num - 1) << 10)); + in_len = (seg_pa_num << 3) + sizeof(*seg_in); + seg_in = kzalloc(in_len, GFP_KERNEL); + if (!seg_in) { + err = -ENOMEM; + return err; + } + seg_in->mtt_setting.mtt_base = cpu_to_be32(mtt_base); + seg_in->mtt_setting.pa_num = cpu_to_be32(seg_pa_num); + for (i = 0; i < seg_pa_num; i++) + seg_in->mtt_setting.pas[i] = req->pas[pa_idx_base + i]; + seg_in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_SET_MTT); + + memset(&seg_out, 0, sizeof(seg_out)); + xsc_core_dbg(dev, "set mtt seg %d, pa_num %d, pa_idx_base %d, tot_seg %d\n", + seg_idx, seg_pa_num, pa_idx_base, tot_seg_num); + err = xsc_cmd_exec(dev, seg_in, in_len, &seg_out, sizeof(seg_out)); + if (err || seg_out.hdr.status) { + xsc_core_err(dev, "set mtt seg %d failed\n", seg_idx); + kfree(seg_in); + return -EINVAL; + } + kfree(seg_in); + pa_idx_base += seg_pa_num; + mtt_base += seg_pa_num; + } + return 0; +} + +int xsc_dereg_mr_via_cmdq(struct xsc_core_device *dev, struct xsc_register_mr_mbox_in *in_cmd) +{ + struct xsc_unregister_mr_mbox_in in; + struct xsc_unregister_mr_mbox_out out; + int err; + + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DEREG_MR); + in.mkey = in_cmd->req.mkey; + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(dev, "cmd exec failed %d\n", err); + return -EINVAL; + } + return 0; +} + +int xsc_reg_mr_via_cmdq(struct xsc_core_device *dev, struct xsc_register_mr_mbox_in *in) +{ + u32 mtt_base; + int err; + + err = xsc_set_mpt_via_cmdq(dev, in, &mtt_base); + if (err) { + xsc_core_err(dev, "set mpt via cmdq failed\n"); + return err; + } + + err = xsc_set_mtt_via_cmdq(dev, in, mtt_base); + if (err) { + xsc_core_err(dev, "set mtt via cmdq failed\n"); + goto set_mtt_err; + } + return 0; + +set_mtt_err: + err = xsc_dereg_mr_via_cmdq(dev, in); + if (err) + xsc_core_err(dev, "dereg error mr failed\n"); + return err; +} + +int xsc_core_register_mr(struct xsc_core_device *dev, struct xsc_core_mr *mr, + struct xsc_register_mr_mbox_in *in, int inlen) +{ + struct xsc_register_mr_mbox_out out; + int err; + + memset(&out, 0, sizeof(out)); + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_REG_MR); + if (dev->reg_mr_via_cmdq) + err = xsc_reg_mr_via_cmdq(dev, in); + else + err = xsc_reg_mr(dev, in, &out); + + if (err) { + xsc_core_err(dev, "cmd exec failed %d\n", err); + return err; + } + if (out.hdr.status) { + xsc_core_err(dev, "status %d\n", out.hdr.status); + return xsc_cmd_status_to_err(&out.hdr); + } + + return 0; +} +EXPORT_SYMBOL(xsc_core_register_mr); + +int xsc_core_dereg_mr(struct xsc_core_device *dev, struct xsc_core_mr *mr) +{ + struct xsc_unregister_mr_mbox_in in; + struct xsc_unregister_mr_mbox_out out; + int err; + + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DEREG_MR); + in.mkey = cpu_to_be32(xsc_mkey_to_idx(mr->key)); + if (dev->reg_mr_via_cmdq) + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + else + err = xsc_dereg_mr(dev, &in, &out); + + if (err) { + xsc_core_err(dev, "cmd exec failed %d\n", err); + return err; + } + if (out.hdr.status) { + xsc_core_err(dev, "status %d\n", out.hdr.status); + return xsc_cmd_status_to_err(&out.hdr); + } + + return 0; +} +EXPORT_SYMBOL(xsc_core_dereg_mr); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/pci_irq.c b/drivers/net/ethernet/yunsilicon/xsc/pci/pci_irq.c new file mode 100644 index 000000000000..7138c281ed20 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/pci_irq.c @@ -0,0 +1,515 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "common/driver.h" +#include "common/xsc_hsi.h" +#include "common/xsc_core.h" +#ifdef CONFIG_RFS_ACCEL +#include +#endif +#include "fw/xsc_flow.h" +#include "fw/xsc_fw.h" + +enum xsc_eq_type { + XSC_EQ_TYPE_COMP, + XSC_EQ_TYPE_ASYNC, +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + XSC_EQ_TYPE_PF, +#endif +}; + +struct xsc_irq { + struct atomic_notifier_head nh; + cpumask_var_t mask; + char name[XSC_MAX_IRQ_NAME]; +}; + +struct xsc_irq_table { + struct xsc_irq *irq; + int nvec; +#ifdef CONFIG_RFS_ACCEL + struct cpu_rmap *rmap; +#endif +}; + +struct xsc_msix_resource *g_msix_xres; + +static irqreturn_t xsc_dma_read_msix_handler(int irq, void *dev_id) +{ + xsc_dma_read_done_complete(); + return IRQ_HANDLED; +} + +static int xsc_dma_read_msix_init(struct xsc_core_device *xdev) +{ + int err; + char *name = "xsc_dma_read_done"; + struct xsc_dev_resource *dev_res = xdev->dev_res; + int irqn; + u32 value = 0; + int vecid = 0; + + snprintf(dev_res->irq_info[XSC_DMA_READ_DONE_VEC].name, XSC_MAX_IRQ_NAME, "%s@pci:%s", + name, pci_name(xdev->pdev)); + irqn = pci_irq_vector(xdev->pdev, XSC_DMA_READ_DONE_VEC); + err = request_irq(irqn, xsc_dma_read_msix_handler, 0, + dev_res->irq_info[XSC_DMA_READ_DONE_VEC].name, (void *)xdev); + + vecid = (xdev->msix_vec_base + XSC_DMA_READ_DONE_VEC); + value = ((1 << 12) | (vecid & 0xfff)); + REG_WR32(xdev, HIF_IRQ_TBL2IRQ_TBL_RD_DONE_INT_MSIX_REG_ADDR, value); + + return err; +} + +static void xsc_free_irq(struct xsc_core_device *xdev, unsigned int vector) +{ + unsigned int irqn = 0; + + irqn = pci_irq_vector(xdev->pdev, vector); + disable_irq(irqn); + + if (xsc_fw_is_available(xdev)) + free_irq(irqn, xdev); +} + +static void xsc_dma_read_msix_fini(struct xsc_core_device *xdev) +{ + if (xdev->caps.msix_enable && xsc_core_is_pf(xdev)) + xsc_free_irq(xdev, XSC_DMA_READ_DONE_VEC); +} + +struct xsc_eq *xsc_eq_get(struct xsc_core_device *dev, int i) +{ + struct xsc_eq_table *table = &dev->dev_res->eq_table; + struct xsc_eq *eq, *n; + struct xsc_eq *eq_ret = NULL; + + spin_lock(&table->lock); + list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { + if (eq->index == i) { + eq_ret = eq; + break; + } + } + spin_unlock(&table->lock); + + return eq_ret; +} +EXPORT_SYMBOL(xsc_eq_get); + +void mask_cpu_by_node(int node, struct cpumask *dstp) +{ + int i; + + for (i = 0; i < nr_cpu_ids; i++) { + if (node == cpu_to_node(i)) + cpumask_set_cpu(i, dstp); + } +} +EXPORT_SYMBOL(mask_cpu_by_node); + +static int set_comp_irq_affinity_hint(struct xsc_core_device *dev, int i) +{ + struct xsc_eq_table *table = &dev->dev_res->eq_table; + int vecidx = table->eq_vec_comp_base + i; + struct xsc_eq *eq = xsc_eq_get(dev, i); + unsigned int irqn; + int ret; + + irqn = pci_irq_vector(dev->pdev, vecidx); + if (!zalloc_cpumask_var(&eq->mask, GFP_KERNEL)) { + xsc_core_err(dev, "zalloc_cpumask_var rx cpumask failed"); + return -ENOMEM; + } + + if (!zalloc_cpumask_var(&dev->xps_cpumask, GFP_KERNEL)) { + xsc_core_err(dev, "zalloc_cpumask_var tx cpumask failed"); + return -ENOMEM; + } + + mask_cpu_by_node(dev->priv.numa_node, eq->mask); + ret = irq_set_affinity_hint(irqn, eq->mask); + + return ret; +} + +static void clear_comp_irq_affinity_hint(struct xsc_core_device *dev, int i) +{ + struct xsc_eq_table *table = &dev->dev_res->eq_table; + int vecidx = table->eq_vec_comp_base + i; + struct xsc_eq *eq = xsc_eq_get(dev, i); + int irqn; + + irqn = pci_irq_vector(dev->pdev, vecidx); + irq_set_affinity_hint(irqn, NULL); + free_cpumask_var(eq->mask); +} + +static int set_comp_irq_affinity_hints(struct xsc_core_device *dev) +{ + struct xsc_eq_table *table = &dev->dev_res->eq_table; + int nvec = table->num_comp_vectors; + int err; + int i; + + for (i = 0; i < nvec; i++) { + err = set_comp_irq_affinity_hint(dev, i); + if (err) + goto err_out; + } + + return 0; + +err_out: + for (i--; i >= 0; i--) + clear_comp_irq_affinity_hint(dev, i); + free_cpumask_var(dev->xps_cpumask); + + return err; +} + +static void clear_comp_irq_affinity_hints(struct xsc_core_device *dev) +{ + struct xsc_eq_table *table = &dev->dev_res->eq_table; + int nvec = table->num_comp_vectors; + int i; + + for (i = 0; i < nvec; i++) + clear_comp_irq_affinity_hint(dev, i); + free_cpumask_var(dev->xps_cpumask); +} + +struct cpumask * +xsc_comp_irq_get_affinity_mask(struct xsc_core_device *dev, int vector) +{ + struct xsc_eq *eq = xsc_eq_get(dev, vector); + + if (unlikely(!eq)) + return NULL; + + return eq->mask; +} +EXPORT_SYMBOL(xsc_comp_irq_get_affinity_mask); + +static int xsc_alloc_irq_vectors(struct xsc_core_device *dev) +{ + struct xsc_dev_resource *dev_res = dev->dev_res; + struct xsc_eq_table *table = &dev_res->eq_table; + int nvec = dev->caps.msix_num; + int nvec_base; + int err; + + if (xsc_core_is_pf(dev)) + nvec_base = XSC_EQ_VEC_COMP_BASE; + else + /*VF device not need dma read done vector.*/ + nvec_base = (XSC_EQ_VEC_COMP_BASE - 1); + + if (nvec <= nvec_base) { + xsc_core_warn(dev, "failed to alloc irq vector(%d)\n", nvec); + return -ENOMEM; + } + + dev_res->irq_info = kcalloc(nvec, sizeof(*dev_res->irq_info), GFP_KERNEL); + if (!dev_res->irq_info) + return -ENOMEM; + + nvec = pci_alloc_irq_vectors(dev->pdev, nvec_base + 1, nvec, PCI_IRQ_MSIX); + if (nvec < 0) { + err = nvec; + goto err_free_irq_info; + } + + table->eq_vec_comp_base = nvec_base; + table->num_comp_vectors = nvec - nvec_base; + dev->msix_vec_base = dev->caps.msix_base; + xsc_core_info(dev, + "alloc msix_vec_num=%d, comp_num=%d, max_msix_num=%d, msix_vec_base=%d\n", + nvec, table->num_comp_vectors, dev->caps.msix_num, dev->msix_vec_base); + + return 0; + +err_free_irq_info: + pci_free_irq_vectors(dev->pdev); + kfree(dev_res->irq_info); + return err; +} + +static void xsc_free_irq_vectors(struct xsc_core_device *dev) +{ + struct xsc_dev_resource *dev_res = dev->dev_res; + + if (!xsc_fw_is_available(dev)) + return; + + pci_free_irq_vectors(dev->pdev); + kfree(dev_res->irq_info); +} + +int xsc_vector2eqn(struct xsc_core_device *dev, int vector, int *eqn, + unsigned int *irqn) +{ + struct xsc_eq_table *table = &dev->dev_res->eq_table; + struct xsc_eq *eq, *n; + int err = -ENOENT; + + if (!dev->caps.msix_enable) + return 0; + + spin_lock(&table->lock); + list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { + if (eq->index == vector) { + *eqn = eq->eqn; + *irqn = eq->irqn; + err = 0; + break; + } + } + spin_unlock(&table->lock); + + return err; +} +EXPORT_SYMBOL(xsc_vector2eqn); + +static void free_comp_eqs(struct xsc_core_device *dev) +{ + struct xsc_eq_table *table = &dev->dev_res->eq_table; + struct xsc_eq *eq, *n; + + spin_lock(&table->lock); + list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { + list_del(&eq->list); + spin_unlock(&table->lock); + if (xsc_destroy_unmap_eq(dev, eq)) + xsc_core_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn); + kfree(eq); + spin_lock(&table->lock); + } + spin_unlock(&table->lock); +} + +static int alloc_comp_eqs(struct xsc_core_device *dev) +{ + struct xsc_eq_table *table = &dev->dev_res->eq_table; + char name[XSC_MAX_IRQ_NAME]; + struct xsc_eq *eq; + int ncomp_vec; + int nent; + int err; + int i; + + INIT_LIST_HEAD(&table->comp_eqs_list); + ncomp_vec = table->num_comp_vectors; + nent = XSC_COMP_EQ_SIZE; + + for (i = 0; i < ncomp_vec; i++) { + eq = kzalloc(sizeof(*eq), GFP_KERNEL); + if (!eq) { + err = -ENOMEM; + goto clean; + } + + snprintf(name, XSC_MAX_IRQ_NAME, "xsc_comp%d", i); + err = xsc_create_map_eq(dev, eq, + i + table->eq_vec_comp_base, nent, name); + if (err) { + kfree(eq); + goto clean; + } + + eq->index = i; + spin_lock(&table->lock); + list_add_tail(&eq->list, &table->comp_eqs_list); + spin_unlock(&table->lock); + } + + return 0; + +clean: + free_comp_eqs(dev); + return err; +} + +static irqreturn_t xsc_cmd_handler(int irq, void *arg) +{ + struct xsc_core_device *dev = (struct xsc_core_device *)arg; + int err; + + disable_irq_nosync(dev->cmd.irqn); + err = xsc_cmd_err_handler(dev); + if (!err) + xsc_cmd_resp_handler(dev); + enable_irq(dev->cmd.irqn); + + return IRQ_HANDLED; +} + +int xsc_request_irq_for_cmdq(struct xsc_core_device *dev, u8 vecidx) +{ + struct xsc_dev_resource *dev_res = dev->dev_res; + + writel(dev->msix_vec_base + vecidx, REG_ADDR(dev, dev->cmd.reg.msix_vec_addr)); + + snprintf(dev_res->irq_info[vecidx].name, XSC_MAX_IRQ_NAME, "%s@pci:%s", + "xsc_cmd", pci_name(dev->pdev)); + dev->cmd.irqn = pci_irq_vector(dev->pdev, vecidx); + return request_irq(dev->cmd.irqn, xsc_cmd_handler, 0, + dev_res->irq_info[vecidx].name, dev); +} + +void xsc_free_irq_for_cmdq(struct xsc_core_device *dev) +{ + xsc_free_irq(dev, XSC_VEC_CMD); +} + +static irqreturn_t xsc_event_handler(int irq, void *arg) +{ + struct xsc_core_device *dev = (struct xsc_core_device *)arg; + + xsc_core_dbg(dev, "cmd event hint irq: %d\n", irq); + + if (!dev->eth_priv) + return IRQ_NONE; + + if (!dev->event_handler) + return IRQ_NONE; + + dev->event_handler(dev->eth_priv); + + return IRQ_HANDLED; +} + +int xsc_request_irq_for_event(struct xsc_core_device *dev) +{ + struct xsc_dev_resource *dev_res = dev->dev_res; + + snprintf(dev_res->irq_info[XSC_VEC_CMD_EVENT].name, XSC_MAX_IRQ_NAME, "%s@pci:%s", + "xsc_eth_event", pci_name(dev->pdev)); + return request_irq(pci_irq_vector(dev->pdev, XSC_VEC_CMD_EVENT), xsc_event_handler, 0, + dev_res->irq_info[XSC_VEC_CMD_EVENT].name, dev); +} + +void xsc_free_irq_for_event(struct xsc_core_device *dev) +{ + xsc_free_irq(dev, XSC_VEC_CMD_EVENT); +} + +int xsc_cmd_enable_msix(struct xsc_core_device *xdev) +{ + struct xsc_msix_table_info_mbox_in in; + struct xsc_msix_table_info_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ENABLE_MSIX); + + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err) { + xsc_core_err(xdev, "xsc_cmd_exec enable msix failed %d\n", err); + return err; + } + + return 0; +} + +int xsc_irq_eq_create(struct xsc_core_device *dev) +{ + int err; + + if (dev->caps.msix_enable == 0) + return 0; + + err = xsc_alloc_irq_vectors(dev); + if (err) { + xsc_core_err(dev, "enable msix failed, err=%d\n", err); + goto err_alloc_irq; + } + + err = xsc_start_eqs(dev); + if (err) { + xsc_core_err(dev, "failed to start EQs, err=%d\n", err); + goto err_start_eqs; + } + + err = alloc_comp_eqs(dev); + if (err) { + xsc_core_err(dev, "failed to alloc comp EQs, err=%d\n", err); + goto err_alloc_comp_eqs; + } + + err = xsc_request_irq_for_cmdq(dev, XSC_VEC_CMD); + if (err) { + xsc_core_err(dev, "failed to request irq for cmdq, err=%d\n", err); + goto err_request_cmd_irq; + } + + err = xsc_request_irq_for_event(dev); + if (err) { + xsc_core_err(dev, "failed to request irq for event, err=%d\n", err); + goto err_request_event_irq; + } + + if (dev->caps.msix_enable && xsc_core_is_pf(dev)) { + err = xsc_dma_read_msix_init(dev); + if (err) { + xsc_core_err(dev, "dma read msix init failed %d.\n", err); + goto err_dma_read_msix; + } + } + + err = set_comp_irq_affinity_hints(dev); + if (err) { + xsc_core_err(dev, "failed to alloc affinity hint cpumask, err=%d\n", err); + goto err_set_affinity; + } + + xsc_cmd_use_events(dev); + err = xsc_cmd_enable_msix(dev); + if (err) { + xsc_core_err(dev, "xsc_cmd_enable_msix failed %d.\n", err); + xsc_cmd_use_polling(dev); + goto err_set_affinity; + } + return 0; + +err_set_affinity: + xsc_dma_read_msix_fini(dev); +err_dma_read_msix: + xsc_free_irq_for_event(dev); +err_request_event_irq: + xsc_free_irq_for_cmdq(dev); +err_request_cmd_irq: + free_comp_eqs(dev); +err_alloc_comp_eqs: + xsc_stop_eqs(dev); +err_start_eqs: + xsc_free_irq_vectors(dev); +err_alloc_irq: + return err; +} + +int xsc_irq_eq_destroy(struct xsc_core_device *dev) +{ + if (dev->caps.msix_enable == 0) + return 0; + + xsc_stop_eqs(dev); + clear_comp_irq_affinity_hints(dev); + free_comp_eqs(dev); + + xsc_dma_read_msix_fini(dev); + xsc_free_irq_for_event(dev); + xsc_free_irq_for_cmdq(dev); + xsc_free_irq_vectors(dev); + + return 0; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/pd.c b/drivers/net/ethernet/yunsilicon/xsc/pci/pd.c new file mode 100644 index 000000000000..37db01d1742f --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/pd.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "common/driver.h" + +int xsc_core_alloc_pd(struct xsc_core_device *xdev, u32 *pdn) +{ + struct xsc_alloc_pd_mbox_in in; + struct xsc_alloc_pd_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ALLOC_PD); + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return xsc_cmd_status_to_err(&out.hdr); + + *pdn = be32_to_cpu(out.pdn) & 0xffffff; + return err; +} +EXPORT_SYMBOL(xsc_core_alloc_pd); + +int xsc_core_dealloc_pd(struct xsc_core_device *xdev, u32 pdn) +{ + struct xsc_dealloc_pd_mbox_in in; + struct xsc_dealloc_pd_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DEALLOC_PD); + in.pdn = cpu_to_be32(pdn); + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return xsc_cmd_status_to_err(&out.hdr); + + return err; +} +EXPORT_SYMBOL(xsc_core_dealloc_pd); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/port.c b/drivers/net/ethernet/yunsilicon/xsc/pci/port.c new file mode 100644 index 000000000000..80414f3917d9 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/port.c @@ -0,0 +1,277 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include "common/driver.h" +#include "common/port.h" + +int xsc_core_access_reg(struct xsc_core_device *xdev, void *data_in, + int size_in, void *data_out, int size_out, + u16 reg_num, int arg, int write) +{ + struct xsc_access_reg_mbox_in *in = NULL; + struct xsc_access_reg_mbox_out *out = NULL; + int err = -ENOMEM; + + in = xsc_vzalloc(sizeof(*in) + size_in); + if (!in) + return -ENOMEM; + + out = xsc_vzalloc(sizeof(*out) + size_out); + if (!out) + goto ex1; + + memcpy(in->data, data_in, size_in); + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_ACCESS_REG); + in->arg = cpu_to_be32(arg); + in->register_id = cpu_to_be16(reg_num); + err = xsc_cmd_exec(xdev, in, sizeof(*in) + size_in, out, + sizeof(*out) + size_out); + if (err) + goto ex2; + + if (out->hdr.status) + return xsc_cmd_status_to_err(&out->hdr); + + if (!err) + memcpy(data_out, out->data, size_out); + +ex2: + xsc_vfree(out); +ex1: + xsc_vfree(in); + return err; +} +EXPORT_SYMBOL_GPL(xsc_core_access_reg); + +struct xsc_reg_pcap { + u8 rsvd0; + u8 port_num; + u8 rsvd1[2]; + __be32 caps_127_96; + __be32 caps_95_64; + __be32 caps_63_32; + __be32 caps_31_0; +}; + +int xsc_set_port_caps(struct xsc_core_device *xdev, int port_num, u32 caps) +{ + struct xsc_reg_pcap in; + struct xsc_reg_pcap out; + int err; + + memset(&in, 0, sizeof(in)); + in.caps_127_96 = cpu_to_be32(caps); + in.port_num = port_num; + + err = xsc_core_access_reg(xdev, &in, sizeof(in), &out, + sizeof(out), XSC_REG_PCAP, 0, 1); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_set_port_caps); + +static int xsc_query_module_num(struct xsc_core_device *dev, int *module_num) +{ + *module_num = dev->mac_port; + return 0; +} + +static int xsc_query_module_id(struct xsc_core_device *dev, int module_num, + u8 *module_id) +{ + struct xsc_reg_mcia in; + struct xsc_reg_mcia out; + int err, status; + u8 *ptr; + + in.i2c_device_address = XSC_I2C_ADDR_LOW; + in.module = module_num; + in.device_address = 0; + in.page_number = 0; + in.size = 1; + + err = xsc_core_access_reg(dev, &in, sizeof(in), &out, + sizeof(out), XSC_REG_MCIA, 0, 0); + if (err) + return err; + + status = out.status; + if (status) { + xsc_core_err(dev, "query_mcia_reg failed: status: 0x%x\n", + status); + return -EIO; + } + ptr = out.dword_0; + + *module_id = ptr[0]; + + return 0; +} + +static int xsc_qsfp_eeprom_page(u16 offset) +{ + if (offset < XSC_EEPROM_PAGE_LENGTH) + /* Addresses between 0-255 - page 00 */ + return 0; + + /* Addresses between 256 - 639 belongs to pages 01, 02 and 03 + * For example, offset = 400 belongs to page 02: + * 1 + ((400 - 256)/128) = 2 + */ + return 1 + ((offset - XSC_EEPROM_PAGE_LENGTH) / + XSC_EEPROM_HIGH_PAGE_LENGTH); +} + +static int xsc_qsfp_eeprom_high_page_offset(int page_num) +{ + if (!page_num) /* Page 0 always start from low page */ + return 0; + + /* High page */ + return page_num * XSC_EEPROM_HIGH_PAGE_LENGTH; +} + +static void xsc_qsfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset) +{ + *i2c_addr = XSC_I2C_ADDR_LOW; + *page_num = xsc_qsfp_eeprom_page(*offset); + *offset -= xsc_qsfp_eeprom_high_page_offset(*page_num); +} + +static void xsc_sfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset) +{ + *i2c_addr = XSC_I2C_ADDR_LOW; + *page_num = 0; + + if (*offset < XSC_EEPROM_PAGE_LENGTH) + return; + + *i2c_addr = XSC_I2C_ADDR_HIGH; + *offset -= XSC_EEPROM_PAGE_LENGTH; +} + +static int xsc_query_mcia(struct xsc_core_device *dev, + struct xsc_module_eeprom_query_params *params, u8 *data) +{ + struct xsc_reg_mcia in; + struct xsc_reg_mcia out; + int status, err; + void *ptr; + u16 size; + + size = min_t(int, params->size, XSC_EEPROM_MAX_BYTES); + + in.i2c_device_address = params->i2c_address; + in.module = params->module_number; + in.device_address = params->offset; + in.page_number = params->page; + in.size = size; + + err = xsc_core_access_reg(dev, &in, sizeof(in), &out, + sizeof(out), XSC_REG_MCIA, 0, 0); + if (err) + return err; + + status = out.status; + if (status) { + xsc_core_err(dev, "query_mcia_reg failed: status: 0x%x\n", + status); + return -EIO; + } + + ptr = out.dword_0; + memcpy(data, ptr, size); + + return size; +} + +int xsc_query_module_eeprom(struct xsc_core_device *dev, + u16 offset, u16 size, u8 *data) +{ + struct xsc_module_eeprom_query_params query = {0}; + u8 module_id; + int err; + + err = xsc_query_module_num(dev, &query.module_number); + if (err) + return err; + + err = xsc_query_module_id(dev, query.module_number, &module_id); + if (err) + return err; + + switch (module_id) { + case XSC_MODULE_ID_SFP: + xsc_sfp_eeprom_params_set(&query.i2c_address, &query.page, &offset); + break; + case XSC_MODULE_ID_QSFP: + case XSC_MODULE_ID_QSFP_PLUS: + case XSC_MODULE_ID_QSFP28: + case XSC_MODULE_ID_QSFP_DD: + case XSC_MODULE_ID_DSFP: + case XSC_MODULE_ID_QSFP_PLUS_CMIS: + xsc_qsfp_eeprom_params_set(&query.i2c_address, &query.page, &offset); + break; + default: + xsc_core_err(dev, "Module ID not recognized: 0x%x\n", module_id); + return -EINVAL; + } + + if (offset + size > XSC_EEPROM_PAGE_LENGTH) + /* Cross pages read, read until offset 256 in low page */ + size = XSC_EEPROM_PAGE_LENGTH - offset; + + query.size = size; + query.offset = offset; + + return xsc_query_mcia(dev, &query, data); +} +EXPORT_SYMBOL_GPL(xsc_query_module_eeprom); + +int xsc_query_module_eeprom_by_page(struct xsc_core_device *dev, + struct xsc_module_eeprom_query_params *params, + u8 *data) +{ + u8 module_id; + int err; + + err = xsc_query_module_num(dev, ¶ms->module_number); + if (err) + return err; + + err = xsc_query_module_id(dev, params->module_number, &module_id); + if (err) + return err; + + switch (module_id) { + case XSC_MODULE_ID_SFP: + if (params->page > 0) + return -EINVAL; + break; + case XSC_MODULE_ID_QSFP: + case XSC_MODULE_ID_QSFP28: + case XSC_MODULE_ID_QSFP_PLUS: + case XSC_MODULE_ID_QSFP_DD: + case XSC_MODULE_ID_QSFP_PLUS_CMIS: + if (params->page > 3) + return -EINVAL; + break; + case XSC_MODULE_ID_DSFP: + break; + default: + xsc_core_err(dev, "Module ID not recognized: 0x%x\n", module_id); + return -EINVAL; + } + + if (params->i2c_address != XSC_I2C_ADDR_HIGH && + params->i2c_address != XSC_I2C_ADDR_LOW) { + xsc_core_err(dev, "I2C address not recognized: 0x%x\n", params->i2c_address); + return -EINVAL; + } + + return xsc_query_mcia(dev, params, data); +} +EXPORT_SYMBOL_GPL(xsc_query_module_eeprom_by_page); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/qp.c b/drivers/net/ethernet/yunsilicon/xsc/pci/qp.c new file mode 100644 index 000000000000..0e5d365c0b23 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/qp.c @@ -0,0 +1,478 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include "common/qp.h" +#include "common/driver.h" +#include +#include "common/xsc_core.h" + +#define GROUP_DESTROY_FLAG_SHFIT 15 +#define GROUP_DESTROY_FLAG_MASK (1 << (GROUP_DESTROY_FLAG_SHFIT)) + +#define GROUP_OTHER_HASH_SIZE 16 +#define GROUP_CC_HASH_SIZE (1024 - GROUP_OTHER_HASH_SIZE) + +enum { + GROUP_MODE_PER_QP = 0, + GROUP_MODE_PER_DEST_IP, +}; + +struct { + struct list_head head; + spinlock_t lock; /* protect delayed_release_list */ + struct task_struct *poll_task; + wait_queue_head_t wq; + int wait_flag; +} delayed_release_list; + +enum { + SLEEP, + WAKEUP, + EXIT, +}; + +static bool exit_flag; + +void xsc_set_exit_flag(void) +{ + exit_flag = true; +} +EXPORT_SYMBOL_GPL(xsc_set_exit_flag); + +bool xsc_get_exit_flag(void) +{ + return exit_flag; +} +EXPORT_SYMBOL_GPL(xsc_get_exit_flag); + +bool exist_incomplete_qp_flush(void) +{ + return !list_empty(&delayed_release_list.head); +} +EXPORT_SYMBOL_GPL(exist_incomplete_qp_flush); + +static bool xsc_qp_flush_finished(struct xsc_core_device *xdev, u32 qpn) +{ + struct xsc_query_qp_flush_status_mbox_in in; + struct xsc_query_qp_flush_status_mbox_out out; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_QP_FLUSH_STATUS); + in.qpn = cpu_to_be32(qpn); + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if ((!err && !out.hdr.status) || err == -ETIMEDOUT) + return true; + + xsc_core_dbg(xdev, "qp[%d] flush incomplete.\n", qpn); + return false; +} + +static int xsc_qp_flush_check(void *arg) +{ + struct xsc_qp_rsc *entry; + + while (!kthread_should_stop()) { + if (need_resched()) + schedule(); + + spin_lock(&delayed_release_list.lock); + entry = list_first_entry_or_null(&delayed_release_list.head, + struct xsc_qp_rsc, node); + if (!entry) { + spin_unlock(&delayed_release_list.lock); + wait_event_interruptible(delayed_release_list.wq, + delayed_release_list.wait_flag != SLEEP); + if (delayed_release_list.wait_flag == EXIT) + break; + delayed_release_list.wait_flag = SLEEP; + continue; + } + list_del(&entry->node); + spin_unlock(&delayed_release_list.lock); + + if (!exit_flag && !xsc_qp_flush_finished(entry->xdev, entry->qpn)) { + spin_lock(&delayed_release_list.lock); + list_add_tail(&entry->node, &delayed_release_list.head); + spin_unlock(&delayed_release_list.lock); + } else { + complete(&entry->delayed_release); + } + } + + return 0; +} + +void xsc_init_delayed_release(void) +{ + INIT_LIST_HEAD(&delayed_release_list.head); + spin_lock_init(&delayed_release_list.lock); + init_waitqueue_head(&delayed_release_list.wq); + delayed_release_list.wait_flag = SLEEP; + delayed_release_list.poll_task = kthread_create(xsc_qp_flush_check, NULL, "qp flush check"); + if (delayed_release_list.poll_task) + wake_up_process(delayed_release_list.poll_task); +} + +void xsc_stop_delayed_release(void) +{ + delayed_release_list.wait_flag = EXIT; + wake_up(&delayed_release_list.wq); + if (delayed_release_list.poll_task) + kthread_stop(delayed_release_list.poll_task); +} + +static void xsc_wait_qp_flush_complete(struct xsc_core_device *xdev, u32 qpn) +{ + struct xsc_qp_rsc qp_rsc; + int err = 0; + + if (exit_flag) + return; + + init_completion(&qp_rsc.delayed_release); + qp_rsc.qpn = qpn; + qp_rsc.xdev = xdev; + spin_lock(&delayed_release_list.lock); + list_add_tail(&qp_rsc.node, &delayed_release_list.head); + spin_unlock(&delayed_release_list.lock); + delayed_release_list.wait_flag = WAKEUP; + wake_up(&delayed_release_list.wq); + + while ((err = wait_for_completion_interruptible(&qp_rsc.delayed_release)) + == -ERESTARTSYS) { + xsc_core_dbg(xdev, "qp %d wait for completion is interrupted, err = %d\n", + qpn, err); + if (need_resched()) + schedule(); + } +} + +int create_resource_common(struct xsc_core_device *xdev, + struct xsc_core_qp *qp) +{ + struct xsc_qp_table *table = &xdev->dev_res->qp_table; + int err; + + spin_lock_irq(&table->lock); + err = radix_tree_insert(&table->tree, qp->qpn, qp); + spin_unlock_irq(&table->lock); + if (err) + return err; + + atomic_set(&qp->refcount, 1); + init_completion(&qp->free); + qp->pid = current->pid; + + return 0; +} +EXPORT_SYMBOL_GPL(create_resource_common); + +void destroy_resource_common(struct xsc_core_device *xdev, + struct xsc_core_qp *qp) +{ + struct xsc_qp_table *table = &xdev->dev_res->qp_table; + unsigned long flags; + + spin_lock_irqsave(&table->lock, flags); + radix_tree_delete(&table->tree, qp->qpn); + spin_unlock_irqrestore(&table->lock, flags); + + if (atomic_dec_and_test(&qp->refcount)) + complete(&qp->free); + wait_for_completion(&qp->free); +} +EXPORT_SYMBOL_GPL(destroy_resource_common); + +void xsc_qp_event(struct xsc_core_device *xdev, u32 qpn, int event_type) +{ + struct xsc_qp_table *table = &xdev->dev_res->qp_table; + struct xsc_core_qp *qp; + + spin_lock(&table->lock); + + qp = radix_tree_lookup(&table->tree, qpn); + if (qp) + atomic_inc(&qp->refcount); + + spin_unlock(&table->lock); + + if (!qp) { + xsc_core_warn(xdev, "Async event for bogus QP 0x%x\n", qpn); + return; + } + + qp->event(qp, event_type); + + if (atomic_dec_and_test(&qp->refcount)) + complete(&qp->free); +} + +int xsc_core_create_qp(struct xsc_core_device *xdev, + struct xsc_core_qp *qp, + struct xsc_create_qp_mbox_in *in, + int inlen) +{ + struct xsc_create_qp_mbox_out out; + struct xsc_destroy_qp_mbox_in din; + struct xsc_destroy_qp_mbox_out dout; + int err; + struct timespec64 ts; + + ktime_get_boottime_ts64(&ts); + + memset(&dout, 0, sizeof(dout)); + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_QP); + + err = xsc_cmd_exec(xdev, in, inlen, &out, sizeof(out)); + if (err) { + xsc_core_err(xdev, "ret %d", err); + return err; + } + + if (out.hdr.status) { + xsc_core_err(xdev, "current num of QPs %u\n", atomic_read(&xdev->num_qps)); + return xsc_cmd_status_to_err(&out.hdr); + } + qp->qpn = be32_to_cpu(out.qpn) & 0xffffff; + xsc_core_info(xdev, "qpn = %u\n", qp->qpn); + + qp->trace_info = kzalloc(sizeof(*qp->trace_info), GFP_KERNEL); + if (!qp->trace_info) { + err = -ENOMEM; + goto err_cmd; + } + qp->trace_info->pid = current->pid; + qp->trace_info->timestamp = (u64)(u32)ts.tv_sec * MSEC_PER_SEC + + ts.tv_nsec / NSEC_PER_MSEC; + + err = create_resource_common(xdev, qp); + if (err) { + xsc_core_err(xdev, "err %d", err); + goto err_trace; + } + + err = xsc_debug_qp_add(xdev, qp); + if (err) + xsc_core_err(xdev, "failed adding QP %u to debug file system\n", + qp->qpn); + + atomic_inc(&xdev->num_qps); + return 0; +err_trace: + kfree(qp->trace_info); +err_cmd: + memset(&din, 0, sizeof(din)); + memset(&dout, 0, sizeof(dout)); + din.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_QP); + din.qpn = cpu_to_be32(qp->qpn); + xsc_cmd_exec(xdev, &din, sizeof(din), &out, sizeof(dout)); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_core_create_qp); + +int xsc_core_destroy_qp(struct xsc_core_device *xdev, + struct xsc_core_qp *qp) +{ + struct xsc_destroy_qp_mbox_in in; + struct xsc_destroy_qp_mbox_out out; + int err; + + xsc_debug_qp_remove(xdev, qp); + xsc_remove_qptrace(xdev, qp); + kfree(qp->trace_info); + + destroy_resource_common(xdev, qp); + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_QP); + in.qpn = cpu_to_be32(qp->qpn); + + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return xsc_cmd_status_to_err(&out.hdr); + atomic_dec(&xdev->num_qps); + return 0; +} +EXPORT_SYMBOL_GPL(xsc_core_destroy_qp); + +int xsc_modify_qp(struct xsc_core_device *xdev, + struct xsc_modify_qp_mbox_in *in, + struct xsc_modify_qp_mbox_out *out, + u32 qpn, u16 status) +{ + int ret = 0; + + in->hdr.opcode = cpu_to_be16(status); + in->qpn = cpu_to_be32(qpn); + in->no_need_wait = 1; + + ret = xsc_cmd_exec(xdev, in, sizeof(*in), out, sizeof(*out)); + if ((status == XSC_CMD_OP_2RST_QP || status == XSC_CMD_OP_2ERR_QP) && + out->hdr.status) { + xsc_wait_qp_flush_complete(xdev, qpn); + out->hdr.status = 0; + } + if (ret || out->hdr.status != 0) { + xsc_core_err(xdev, "failed to modify qp %u status=%u, err=%d out.status %u\n", + qpn, status, ret, out->hdr.status); + ret = -ENOEXEC; + } + + return ret; +} +EXPORT_SYMBOL_GPL(xsc_modify_qp); + +int xsc_core_qp_modify(struct xsc_core_device *xdev, enum xsc_qp_state cur_state, + enum xsc_qp_state new_state, + struct xsc_modify_qp_mbox_in *in, int sqd_event, + struct xsc_core_qp *qp) +{ + static const u16 optab[XSC_QP_NUM_STATE][XSC_QP_NUM_STATE] = { + [XSC_QP_STATE_RST] = { + [XSC_QP_STATE_RST] = XSC_CMD_OP_2RST_QP, + [XSC_QP_STATE_ERR] = XSC_CMD_OP_2ERR_QP, + [XSC_QP_STATE_INIT] = XSC_CMD_OP_RST2INIT_QP, + }, + [XSC_QP_STATE_INIT] = { + [XSC_QP_STATE_RST] = XSC_CMD_OP_2RST_QP, + [XSC_QP_STATE_ERR] = XSC_CMD_OP_2ERR_QP, + [XSC_QP_STATE_INIT] = XSC_CMD_OP_INIT2INIT_QP, + [XSC_QP_STATE_RTR] = XSC_CMD_OP_INIT2RTR_QP, + }, + [XSC_QP_STATE_RTR] = { + [XSC_QP_STATE_RST] = XSC_CMD_OP_2RST_QP, + [XSC_QP_STATE_ERR] = XSC_CMD_OP_2ERR_QP, + [XSC_QP_STATE_RTS] = XSC_CMD_OP_RTR2RTS_QP, + }, + [XSC_QP_STATE_RTS] = { + [XSC_QP_STATE_RST] = XSC_CMD_OP_2RST_QP, + [XSC_QP_STATE_ERR] = XSC_CMD_OP_2ERR_QP, + [XSC_QP_STATE_RTS] = XSC_CMD_OP_RTS2RTS_QP, + [XSC_QP_STATE_SQD] = XSC_CMD_OP_RTS2SQD_QP, + }, + [XSC_QP_STATE_SQD] = { + [XSC_QP_STATE_RST] = XSC_CMD_OP_2RST_QP, + [XSC_QP_STATE_ERR] = XSC_CMD_OP_2ERR_QP, + [XSC_QP_STATE_RTS] = XSC_CMD_OP_SQD2RTS_QP, + [XSC_QP_STATE_SQD] = XSC_CMD_OP_SQD2SQD_QP, + }, + [XSC_QP_STATE_SQER] = { + [XSC_QP_STATE_RST] = XSC_CMD_OP_2RST_QP, + [XSC_QP_STATE_ERR] = XSC_CMD_OP_2ERR_QP, + [XSC_QP_STATE_RTS] = XSC_CMD_OP_SQERR2RTS_QP, + }, + [XSC_QP_STATE_ERR] = { + [XSC_QP_STATE_RST] = XSC_CMD_OP_2RST_QP, + [XSC_QP_STATE_ERR] = XSC_CMD_OP_2ERR_QP, + } + }; + + struct xsc_modify_qp_mbox_out out; + int err = 0; + u16 op; + + if (cur_state >= XSC_QP_NUM_STATE || new_state >= XSC_QP_NUM_STATE || + !optab[cur_state][new_state]) + return -EINVAL; + + memset(&out, 0, sizeof(out)); + op = optab[cur_state][new_state]; + + if (new_state == XSC_QP_STATE_RTR) { + if (qp->qp_type_internal == XSC_QUEUE_TYPE_RDMA_RC && + ((in->ctx.ip_type == 0 && in->ctx.dip[0] == in->ctx.sip[0]) || + (in->ctx.ip_type != 0 && + memcmp(in->ctx.dip, in->ctx.sip, sizeof(in->ctx.sip)) == 0))) + in->ctx.qp_out_port = xdev->caps.nif_port_num + xdev->pcie_no; + else if (in->ctx.lag_sel_en == 0) + in->ctx.qp_out_port = xdev->pf_id; + else + in->ctx.qp_out_port = in->ctx.lag_sel; + + in->ctx.pcie_no = xdev->pcie_no; + in->ctx.func_id = cpu_to_be16(xdev->glb_func_id); + } + + err = xsc_modify_qp(xdev, in, &out, qp->qpn, op); + if (err) + return err; + + if (new_state == XSC_QP_STATE_RTR) { + qp->trace_info->main_ver = YS_QPTRACE_VER_MAJOR; + qp->trace_info->sub_ver = YS_QPTRACE_VER_MINOR; + qp->trace_info->qp_type = qp->qp_type; + qp->trace_info->s_port = in->ctx.src_udp_port; + qp->trace_info->d_port = cpu_to_be16(4791); + qp->trace_info->lqpn = qp->qpn; + qp->trace_info->rqpn = be32_to_cpu(in->ctx.remote_qpn); + qp->trace_info->affinity_idx = (in->ctx.lag_sel_en == 0 ? 0 : in->ctx.lag_sel); + qp->trace_info->af_type = (in->ctx.ip_type == 0 ? AF_INET : AF_INET6); + + if (in->ctx.ip_type == 0) { + qp->trace_info->s_addr.s_addr4 = in->ctx.sip[0]; + qp->trace_info->d_addr.d_addr4 = in->ctx.dip[0]; + } else { + memcpy(qp->trace_info->s_addr.s_addr6, in->ctx.sip, + sizeof(qp->trace_info->s_addr.s_addr6)); + memcpy(qp->trace_info->d_addr.d_addr6, in->ctx.dip, + sizeof(qp->trace_info->d_addr.d_addr6)); + } + + err = xsc_create_qptrace(xdev, qp); + if (err) + return err; + } + + return xsc_cmd_status_to_err(&out.hdr); +} +EXPORT_SYMBOL_GPL(xsc_core_qp_modify); + +int xsc_core_qp_query(struct xsc_core_device *xdev, struct xsc_core_qp *qp, + struct xsc_query_qp_mbox_out *out, int outlen) +{ + struct xsc_query_qp_mbox_in in; + int err; + + memset(&in, 0, sizeof(in)); + memset(out, 0, outlen); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_QP); + in.qpn = cpu_to_be32(qp->qpn); + err = xsc_cmd_exec(xdev, &in, sizeof(in), out, outlen); + if (err) + return err; + + if (out->hdr.status) + return xsc_cmd_status_to_err(&out->hdr); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_core_qp_query); + +void xsc_init_qp_table(struct xsc_core_device *xdev) +{ + struct xsc_qp_table *table = &xdev->dev_res->qp_table; + + spin_lock_init(&table->lock); + INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); + + xsc_qp_debugfs_init(xdev); + xsc_qptrace_debugfs_init(xdev); +} + +void xsc_cleanup_qp_table(struct xsc_core_device *xdev) +{ + xsc_qp_debugfs_cleanup(xdev); + xsc_qptrace_debugfs_cleanup(xdev); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/qpts.c b/drivers/net/ethernet/yunsilicon/xsc/pci/qpts.c new file mode 100644 index 000000000000..59122a490eb8 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/qpts.c @@ -0,0 +1,212 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 - 2022, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common/driver.h" + +#define QPTS_ELEMENT_MAX_NUM 0x4000 //16384 = 16k + +static struct proc_dir_entry *g_entry; +static DECLARE_WAIT_QUEUE_HEAD(g_ring_buff_wait); +static struct xsc_qpt_update_msg *g_ring_buff; +static struct mutex g_ring_buff_lock; + +static DECLARE_WAIT_QUEUE_HEAD(g_remove_wait); +static u32 g_pid; + +static unsigned long R; +static unsigned long R_cur; +static unsigned long W; + +static void send_signal(int sig_no) +{ + int ret; + struct task_struct *task = NULL; + + if (g_pid < 2) { + pr_err("%s error, pid(%u) is invalid.\n", __func__, g_pid); + return; + } + + rcu_read_lock(); + task = pid_task(find_vpid(g_pid), PIDTYPE_PID); + rcu_read_unlock(); + + if (!task) { + pr_err("%s error, get pid_task failed, pid(%d).\n", __func__, g_pid); + return; + } + + ret = send_sig(sig_no, task, 0); + if (ret < 0) + pr_err("%s error, send signal(%d) failed.\n", __func__, sig_no); +} + +static int read_buff(struct xsc_qpt_update_msg *msg) +{ + mutex_lock(&g_ring_buff_lock); + if (R_cur == W) { + mutex_unlock(&g_ring_buff_lock); + return 0; + } + + *msg = g_ring_buff[R_cur]; + R_cur = (R_cur + 1) % QPTS_ELEMENT_MAX_NUM; + mutex_unlock(&g_ring_buff_lock); + + return 1; +} + +static void write_buff(struct xsc_qpt_update_msg *msg) +{ + mutex_lock(&g_ring_buff_lock); + g_ring_buff[W] = *msg; + W = (W + 1) % QPTS_ELEMENT_MAX_NUM; + if (R == W) + R = (R + 1) % QPTS_ELEMENT_MAX_NUM; + + if (R_cur == W) + R_cur = (R_cur + 1) % QPTS_ELEMENT_MAX_NUM; + + mutex_unlock(&g_ring_buff_lock); + + wake_up_interruptible(&g_ring_buff_wait); +} + +int qpts_write_one_msg(struct xsc_qpt_update_msg *msg) +{ + if (!msg) + return -1; + + write_buff(msg); + + return 0; +} +EXPORT_SYMBOL(qpts_write_one_msg); + +static int qpts_open(struct inode *inode, struct file *file) +{ + mutex_lock(&g_ring_buff_lock); + if (g_pid > 0) { + mutex_unlock(&g_ring_buff_lock); + goto end; + } + g_pid = current->pid; + R_cur = R; + mutex_unlock(&g_ring_buff_lock); + + return 0; +end: + pr_err("%s failed, pid:%d.\n", __func__, g_pid); + return -1; +} + +static int qpts_release(struct inode *inode, struct file *file) +{ + mutex_lock(&g_ring_buff_lock); + g_pid = 0; + mutex_unlock(&g_ring_buff_lock); + + wake_up_interruptible(&g_remove_wait); + + return 0; +} + +static ssize_t qpts_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) +{ + int error = -EINVAL, i = 0; + struct xsc_qpt_update_msg qpt_msg = {0}; + + if ((file->f_flags & O_NONBLOCK) && R_cur == W) + goto out; + + if (!buf || !count) { + pr_err("%s error, null buffer or count!\n", __func__); + goto out; + } + + error = wait_event_interruptible(g_ring_buff_wait, (R_cur != W)); + if (error) + goto out; + + while (!error && i < count && read_buff(&qpt_msg)) { + error = copy_to_user(buf, &qpt_msg, sizeof(qpt_msg)); + buf += sizeof(qpt_msg); + i += sizeof(qpt_msg); + } + + if (!error) + error = i; + +out: + return error; +} + +static __poll_t qpts_poll(struct file *file, poll_table *wait) +{ + poll_wait(file, &g_ring_buff_wait, wait); + + if (R_cur != W) + return EPOLLIN | EPOLLRDNORM; + + return 0; +} + +const struct proc_ops qpts_ops = { + .proc_open = qpts_open, + .proc_read = qpts_read, + .proc_poll = qpts_poll, + .proc_release = qpts_release, +}; + +int qpts_init(void) +{ + g_ring_buff = kcalloc(QPTS_ELEMENT_MAX_NUM, sizeof(struct xsc_qpt_update_msg), GFP_KERNEL); + if (!g_ring_buff) + return -ENOMEM; + + mutex_init(&g_ring_buff_lock); + + g_entry = proc_create_data("qpts_kmsg", 0400, NULL, &qpts_ops, NULL); + if (!g_entry) { + pr_err("Could not create /proc/qpts_kmsg file!\n"); + goto error_qpts_init; + } + + return 0; + +error_qpts_init: + kfree(g_ring_buff); + g_ring_buff = NULL; + return -1; +} + +void qpts_fini(void) +{ + mutex_lock(&g_ring_buff_lock); + if (!g_pid) + g_pid = 1; + mutex_unlock(&g_ring_buff_lock); + + if (g_pid > 1) { + send_signal(SIGKILL); + wait_event_interruptible(g_remove_wait, (g_pid == 0)); + } + + remove_proc_entry("qpts_kmsg", NULL); + + kfree(g_ring_buff); + g_ring_buff = NULL; + g_entry = NULL; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/res_obj.c b/drivers/net/ethernet/yunsilicon/xsc/pci/res_obj.c new file mode 100644 index 000000000000..7471367ce83f --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/res_obj.c @@ -0,0 +1,450 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/res_obj.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_hsi.h" +#include "common/xsc_cmd.h" +#include "common/qp.h" +#include "common/driver.h" + +static int xsc_alloc_obj(struct xsc_res_obj *obj, struct xsc_bdf_file *file, + void (*release_func)(void *), unsigned long key, + char *data, unsigned int datalen) +{ + obj->release_method = release_func; + obj->file = file; + obj->datalen = datalen; + if (datalen) { + obj->data = kmalloc(datalen, GFP_KERNEL); + if (!obj->data) + return -ENOMEM; + memcpy(obj->data, data, datalen); + } + + radix_tree_preload(GFP_KERNEL); + spin_lock(&file->obj_lock); + radix_tree_insert(&file->obj_tree, key, (void *)obj); + spin_unlock(&file->obj_lock); + radix_tree_preload_end(); + + return 0; +} + +static inline void xsc_free_obj(struct xsc_bdf_file *file, unsigned long key, + struct xsc_res_obj **obj) +{ + *obj = radix_tree_delete(&file->obj_tree, key); + if (!*obj) + return; + if ((*obj)->datalen) + kfree((*obj)->data); +} + +static void xsc_send_cmd_dealloc_pd(struct xsc_core_device *xdev, unsigned int pdn) +{ + struct xsc_dealloc_pd_mbox_in in; + struct xsc_dealloc_pd_mbox_out out; + int ret; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DEALLOC_PD); + in.pdn = cpu_to_be32(pdn); + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status != 0) + xsc_core_err(xdev, "failed to dealloc pd %d\n", pdn); +} + +static void xsc_free_pd_obj(void *obj) +{ + struct xsc_pd_obj *pd_obj = container_of(obj, struct xsc_pd_obj, obj); + struct xsc_bdf_file *file = pd_obj->obj.file; + unsigned long key; + struct xsc_res_obj *_obj; + + xsc_send_cmd_dealloc_pd(file->xdev, pd_obj->pdn); + key = xsc_idx_to_key(RES_OBJ_PD, pd_obj->pdn); + xsc_free_obj(file, key, &_obj); + xsc_core_warn(pd_obj->obj.file->xdev, "free pd obj: %d\n", pd_obj->pdn); + kfree(pd_obj); +} + +int xsc_alloc_pd_obj(struct xsc_bdf_file *file, + unsigned int pdn, char *data, unsigned int datalen) +{ + struct xsc_pd_obj *pd_obj; + unsigned long key; + int ret; + + pd_obj = kzalloc(sizeof(*pd_obj), GFP_KERNEL); + if (!pd_obj) + return -ENOMEM; + + pd_obj->pdn = pdn; + key = xsc_idx_to_key(RES_OBJ_PD, pdn); + ret = xsc_alloc_obj(&pd_obj->obj, file, xsc_free_pd_obj, key, data, datalen); + if (ret) { + kfree(pd_obj); + return ret; + } + xsc_core_dbg(file->xdev, "alloc pd %d obj\n", pdn); + + return 0; +} +EXPORT_SYMBOL_GPL(xsc_alloc_pd_obj); + +void xsc_destroy_pd_obj(struct xsc_bdf_file *file, unsigned int pdn) +{ + struct xsc_pd_obj *pd_obj; + struct xsc_res_obj *obj; + unsigned long key = xsc_idx_to_key(RES_OBJ_PD, pdn); + + spin_lock(&file->obj_lock); + xsc_free_obj(file, key, &obj); + spin_unlock(&file->obj_lock); + pd_obj = container_of(obj, struct xsc_pd_obj, obj); + kfree(pd_obj); + xsc_core_dbg(file->xdev, "destroy pd %d obj\n", pdn); +} +EXPORT_SYMBOL_GPL(xsc_destroy_pd_obj); + +static void xsc_send_cmd_destroy_mkey(struct xsc_core_device *xdev, unsigned int mkey) +{ + struct xsc_destroy_mkey_mbox_in in; + struct xsc_destroy_mkey_mbox_out out; + int ret; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_MKEY); + in.mkey = cpu_to_be32(mkey); + if (xdev->reg_mr_via_cmdq) + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + else + ret = xsc_destroy_mkey(xdev, &in, &out); + + if (ret || out.hdr.status != 0) + xsc_core_err(xdev, "failed to destroy mkey %d\n", mkey); +} + +static void xsc_send_cmd_dereg_mr(struct xsc_core_device *xdev, unsigned int mkey) +{ + struct xsc_unregister_mr_mbox_in in; + struct xsc_unregister_mr_mbox_out out; + int ret; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DEREG_MR); + in.mkey = cpu_to_be32(mkey); + if (xdev->reg_mr_via_cmdq) + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + else + ret = xsc_dereg_mr(xdev, &in, &out); + + if (ret || out.hdr.status != 0) + xsc_core_err(xdev, "failed to dereg mr %d\n", mkey); +} + +static void xsc_free_mr_obj(void *obj) +{ + struct xsc_mr_obj *mr_obj = container_of(obj, struct xsc_mr_obj, obj); + struct xsc_bdf_file *file = mr_obj->obj.file; + unsigned long key = xsc_idx_to_key(RES_OBJ_MR, mr_obj->mkey); + struct xsc_res_obj *_obj; + + xsc_send_cmd_destroy_mkey(file->xdev, mr_obj->mkey); + xsc_send_cmd_dereg_mr(file->xdev, mr_obj->mkey); + + xsc_free_obj(file, key, &_obj); + xsc_core_warn(file->xdev, "free mr obj: %d\n", mr_obj->mkey); + kfree(mr_obj); +} + +int xsc_alloc_mr_obj(struct xsc_bdf_file *file, + unsigned int mkey, char *data, unsigned int datalen) +{ + struct xsc_mr_obj *mr_obj; + unsigned long key = xsc_idx_to_key(RES_OBJ_MR, mkey); + int ret; + + mr_obj = kzalloc(sizeof(*mr_obj), GFP_KERNEL); + if (!mr_obj) + return -ENOMEM; + + mr_obj->mkey = mkey; + ret = xsc_alloc_obj(&mr_obj->obj, file, xsc_free_mr_obj, key, data, datalen); + if (ret) { + kfree(mr_obj); + return ret; + } + + xsc_core_dbg(file->xdev, "alloc mr %d obj\n", mkey); + return 0; +} +EXPORT_SYMBOL_GPL(xsc_alloc_mr_obj); + +void xsc_destroy_mr_obj(struct xsc_bdf_file *file, unsigned int mkey) +{ + struct xsc_mr_obj *mr_obj; + struct xsc_res_obj *obj; + unsigned long key = xsc_idx_to_key(RES_OBJ_MR, mkey); + + spin_lock(&file->obj_lock); + xsc_free_obj(file, key, &obj); + spin_unlock(&file->obj_lock); + mr_obj = container_of(obj, struct xsc_mr_obj, obj); + kfree(mr_obj); + xsc_core_dbg(file->xdev, "destroy mr %d obj\n", mkey); +} +EXPORT_SYMBOL_GPL(xsc_destroy_mr_obj); + +static void xsc_send_cmd_destroy_cq(struct xsc_core_device *xdev, unsigned int cqn) +{ + struct xsc_destroy_cq_mbox_in in; + struct xsc_destroy_cq_mbox_out out; + int ret; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_CQ); + in.cqn = cpu_to_be32(cqn); + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status != 0) + xsc_core_err(xdev, "failed to destroy cq %d\n", cqn); +} + +static void xsc_free_cq_obj(void *obj) +{ + struct xsc_cq_obj *cq_obj = container_of(obj, struct xsc_cq_obj, obj); + struct xsc_bdf_file *file = cq_obj->obj.file; + unsigned long key = xsc_idx_to_key(RES_OBJ_CQ, cq_obj->cqn); + struct xsc_res_obj *_obj; + + xsc_send_cmd_destroy_cq(file->xdev, cq_obj->cqn); + xsc_free_obj(file, key, &_obj); + xsc_core_warn(file->xdev, "free cq obj: %d\n", cq_obj->cqn); + kfree(cq_obj); +} + +int xsc_alloc_cq_obj(struct xsc_bdf_file *file, unsigned int cqn, + char *data, unsigned int datalen) +{ + struct xsc_cq_obj *cq_obj; + unsigned long key = xsc_idx_to_key(RES_OBJ_CQ, cqn); + int ret; + + cq_obj = kzalloc(sizeof(*cq_obj), GFP_KERNEL); + if (!cq_obj) + return -ENOMEM; + + cq_obj->cqn = cqn; + ret = xsc_alloc_obj(&cq_obj->obj, file, xsc_free_cq_obj, key, data, datalen); + if (ret) { + kfree(cq_obj); + return ret; + } + xsc_core_dbg(file->xdev, "alloc cq %d obj\n", cqn); + + return 0; +} +EXPORT_SYMBOL_GPL(xsc_alloc_cq_obj); + +void xsc_destroy_cq_obj(struct xsc_bdf_file *file, unsigned int cqn) +{ + struct xsc_cq_obj *cq_obj; + struct xsc_res_obj *obj; + unsigned long key = xsc_idx_to_key(RES_OBJ_CQ, cqn); + + spin_lock(&file->obj_lock); + xsc_free_obj(file, key, &obj); + spin_unlock(&file->obj_lock); + cq_obj = container_of(obj, struct xsc_cq_obj, obj); + kfree(cq_obj); + xsc_core_dbg(file->xdev, "destroy cq %d obj\n", cqn); +} +EXPORT_SYMBOL_GPL(xsc_destroy_cq_obj); + +void xsc_send_cmd_2rst_qp(struct xsc_core_device *xdev, unsigned int qpn) +{ + struct xsc_modify_qp_mbox_in in; + struct xsc_modify_qp_mbox_out out; + int ret; + + ret = xsc_modify_qp(xdev, &in, &out, qpn, XSC_CMD_OP_2RST_QP); + if (ret) + xsc_core_err(xdev, "failed to reset qp %u\n", qpn); +} + +static void xsc_send_cmd_destroy_qp(struct xsc_core_device *xdev, unsigned int qpn) +{ + struct xsc_destroy_qp_mbox_in in; + struct xsc_destroy_qp_mbox_out out; + int ret; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_QP); + in.qpn = cpu_to_be32(qpn); + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status != 0) + xsc_core_err(xdev, "failed to destroy qp %d\n", qpn); +} + +static void xsc_free_qp_obj(void *obj) +{ + struct xsc_qp_obj *qp_obj = container_of(obj, struct xsc_qp_obj, obj); + struct xsc_bdf_file *file = qp_obj->obj.file; + unsigned long key; + struct xsc_res_obj *_obj; + + xsc_send_cmd_2rst_qp(file->xdev, qp_obj->qpn); + xsc_send_cmd_destroy_qp(file->xdev, qp_obj->qpn); + + key = xsc_idx_to_key(RES_OBJ_QP, qp_obj->qpn); + xsc_free_obj(file, key, &_obj); + xsc_core_warn(file->xdev, "free qp obj: %d\n", qp_obj->qpn); + kfree(qp_obj); +} + +int xsc_alloc_qp_obj(struct xsc_bdf_file *file, unsigned int qpn, + char *data, unsigned int datalen) +{ + struct xsc_qp_obj *qp_obj; + unsigned long key; + int ret; + + qp_obj = kzalloc(sizeof(*qp_obj), GFP_KERNEL); + if (!qp_obj) + return -ENOMEM; + + qp_obj->qpn = qpn; + key = xsc_idx_to_key(RES_OBJ_QP, qpn); + ret = xsc_alloc_obj(&qp_obj->obj, file, xsc_free_qp_obj, key, data, datalen); + if (ret) { + kfree(qp_obj); + return ret; + } + xsc_core_dbg(file->xdev, "alloc qp %d obj\n", qpn); + + return 0; +} +EXPORT_SYMBOL_GPL(xsc_alloc_qp_obj); + +void xsc_destroy_qp_obj(struct xsc_bdf_file *file, unsigned int qpn) +{ + struct xsc_qp_obj *qp_obj; + struct xsc_res_obj *obj; + unsigned long key = xsc_idx_to_key(RES_OBJ_QP, qpn); + + spin_lock(&file->obj_lock); + xsc_free_obj(file, key, &obj); + spin_unlock(&file->obj_lock); + qp_obj = container_of(obj, struct xsc_qp_obj, obj); + kfree(qp_obj); + xsc_core_dbg(file->xdev, "destroy qp %d obj\n", qpn); +} +EXPORT_SYMBOL_GPL(xsc_destroy_qp_obj); + +static void xsc_send_cmd_del_pct(struct xsc_core_device *xdev, + unsigned int priority) +{ + struct xsc_ioctl_mbox_in *in; + struct xsc_ioctl_mbox_out *out; + struct xsc_ioctl_data_tl *tl; + struct xsc_flow_pct_v4_del *pct_v4; + unsigned int inlen; + unsigned int outlen; + int ret; + + inlen = sizeof(struct xsc_ioctl_mbox_in) + sizeof(struct xsc_ioctl_data_tl) + + sizeof(struct xsc_flow_pct_v4_del); + in = kzalloc(inlen, GFP_KERNEL); + if (!in) + return; + + outlen = sizeof(struct xsc_ioctl_mbox_out) + sizeof(struct xsc_ioctl_data_tl) + + sizeof(struct xsc_flow_pct_v4_del); + out = kzalloc(outlen, GFP_KERNEL); + if (!out) { + kfree(in); + return; + } + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_IOCTL_FLOW); + in->len = sizeof(struct xsc_ioctl_data_tl) + sizeof(struct xsc_flow_pct_v4_del); + in->len = cpu_to_be16(in->len); + tl = (struct xsc_ioctl_data_tl *)in->data; + tl->opmod = XSC_IOCTL_OP_DEL; + tl->table = XSC_FLOW_TBL_PCT_V4; + tl->length = sizeof(struct xsc_flow_pct_v4_del); + pct_v4 = (struct xsc_flow_pct_v4_del *)(tl + 1); + pct_v4->priority = priority; + out->len = in->len; + ret = xsc_cmd_exec(xdev, in, inlen, out, outlen); + if (ret || out->hdr.status != 0) + xsc_core_err(xdev, "failed to del pct %d\n", priority); + + kfree(in); + kfree(out); +} + +static void xsc_free_pct_obj(void *obj) +{ + struct xsc_pct_obj *pct_obj = container_of(obj, struct xsc_pct_obj, obj); + struct xsc_bdf_file *file = pct_obj->obj.file; + struct xsc_res_obj *_obj; + unsigned long key = xsc_idx_to_key(RES_OBJ_PCT, pct_obj->pct_idx); + + xsc_send_cmd_del_pct(file->xdev, pct_obj->pct_idx); + xsc_free_obj(file, key, &_obj); + xsc_core_warn(file->xdev, "free pct obj, priority:%d\n", pct_obj->pct_idx); + kfree(pct_obj); +} + +/* both pct4 and pct6 are allocated in the same tcam table, so we can delete pct6 + * by pct4 method + */ +int xsc_alloc_pct_obj(struct xsc_bdf_file *file, unsigned int priority, + char *data, unsigned int datalen) +{ + struct xsc_pct_obj *pct_obj; + int ret; + unsigned long key = xsc_idx_to_key(RES_OBJ_PCT, priority); + + pct_obj = kzalloc(sizeof(*pct_obj), GFP_KERNEL); + if (!pct_obj) + return -ENOMEM; + + pct_obj->pct_idx = priority; + ret = xsc_alloc_obj(&pct_obj->obj, file, xsc_free_pct_obj, key, data, datalen); + if (ret) + kfree(pct_obj); + xsc_core_dbg(file->xdev, "alloc pct %d obj\n", priority); + return ret; +} +EXPORT_SYMBOL_GPL(xsc_alloc_pct_obj); + +void xsc_destroy_pct_obj(struct xsc_bdf_file *file, unsigned int priority) +{ + struct xsc_pct_obj *pct_obj; + struct xsc_res_obj *obj; + unsigned long key = xsc_idx_to_key(RES_OBJ_PCT, priority); + + spin_lock(&file->obj_lock); + xsc_free_obj(file, key, &obj); + spin_unlock(&file->obj_lock); + pct_obj = container_of(obj, struct xsc_pct_obj, obj); + kfree(pct_obj); + xsc_core_dbg(file->xdev, "destroy pct %d obj\n", priority); +} +EXPORT_SYMBOL_GPL(xsc_destroy_pct_obj); + +void xsc_close_bdf_file(struct xsc_bdf_file *file) +{ + struct radix_tree_iter iter; + void **slot; + struct xsc_res_obj *obj; + + xsc_core_warn(file->xdev, "release bdf file:%lx\n", file->key); + spin_lock(&file->obj_lock); + radix_tree_for_each_slot(slot, &file->obj_tree, &iter, 0) { + obj = (struct xsc_res_obj *)(*slot); + obj->release_method(obj); + } + spin_unlock(&file->obj_lock); +} +EXPORT_SYMBOL_GPL(xsc_close_bdf_file); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/sriov.c b/drivers/net/ethernet/yunsilicon/xsc/pci/sriov.c new file mode 100644 index 000000000000..057be7df0f0f --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/sriov.c @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include "common/xsc_core.h" +#include "common/xsc_lag.h" +#include "common/vport.h" +#include "eswitch.h" +#include "xsc_pci_ctrl.h" + +static int xsc_device_enable_sriov(struct xsc_core_device *dev, int num_vfs) +{ + struct xsc_core_sriov *sriov = &dev->priv.sriov; + u16 vf; + u16 max_msix = 0; + int err; + + max_msix = xsc_get_irq_matrix_global_available(dev); + xsc_core_info(dev, "global_available=%u\n", max_msix); + err = xsc_cmd_enable_hca(dev, num_vfs, max_msix); + if (err) + return err; + + if (!XSC_ESWITCH_MANAGER(dev)) + goto enable_vfs; + + err = xsc_eswitch_enable(dev->priv.eswitch, XSC_ESWITCH_LEGACY, + num_vfs); + if (err) { + xsc_core_warn(dev, "failed to enable eswitch SRIOV (%d)\n", err); + return err; + } + +enable_vfs: + err = xsc_create_vfs_sysfs(dev, num_vfs); + if (err) { + xsc_core_warn(dev, "failed to create SRIOV sysfs (%d)\n", err); + if (XSC_ESWITCH_MANAGER(dev)) + xsc_eswitch_disable(dev->priv.eswitch, true); + + return err; + } + + for (vf = 0; vf < num_vfs; vf++) + sriov->vfs_ctx[vf].enabled = 1; + + return 0; +} + +static void xsc_device_disable_sriov(struct xsc_core_device *dev, + int num_vfs, bool clear_vf) +{ + struct xsc_core_sriov *sriov = &dev->priv.sriov; + int vf, err; + + err = xsc_cmd_disable_hca(dev, (u16)num_vfs); + if (err) { + xsc_core_warn(dev, "failed to disable hca, num_vfs=%d, err=%d\n", + num_vfs, err); + return; + } + + for (vf = num_vfs - 1; vf >= 0; vf--) { + if (!sriov->vfs_ctx[vf].enabled) + continue; + + sriov->vfs_ctx[vf].enabled = 0; + } + + if (XSC_ESWITCH_MANAGER(dev)) + xsc_eswitch_disable(dev->priv.eswitch, clear_vf); + + xsc_destroy_vfs_sysfs(dev, num_vfs); +} + +static int xsc_sriov_enable(struct pci_dev *pdev, int num_vfs) +{ + struct xsc_core_device *dev = pci_get_drvdata(pdev); + int err; + + if (num_vfs > dev->caps.max_vfs) { + xsc_core_warn(dev, + "invalid sriov param, num_vfs(%d) > total_vfs(%d)\n", + num_vfs, dev->caps.max_vfs); + return -EINVAL; + } + + if (num_vfs && pci_num_vf(dev->pdev)) { + if (num_vfs == pci_num_vf(dev->pdev)) + return 0; + + xsc_core_warn(dev, "VFs already enabled. Disable before enabling %d VFs\n", + num_vfs); + return -EBUSY; + } + + xsc_lag_disable(dev); + + xsc_core_info(dev, "enable %d VFs\n", num_vfs); + + err = xsc_device_enable_sriov(dev, num_vfs); + if (err) { + xsc_core_warn(dev, "xsc_device_enable_sriov failed, err=%d\n", err); + goto device_enable_sriov_err; + } + + err = pci_enable_sriov(pdev, num_vfs); + if (err) { + xsc_core_warn(dev, "pci_enable_sriov failed, err=%d\n", err); + goto pci_enable_sriov_err; + } + + xsc_lag_enable(dev); + + return err; + +pci_enable_sriov_err: + xsc_device_disable_sriov(dev, num_vfs, true); + +device_enable_sriov_err: + xsc_lag_enable(dev); + + return err; +} + +static void xsc_sriov_disable(struct pci_dev *pdev) +{ + struct xsc_core_device *dev = pci_get_drvdata(pdev); + int num_vfs = pci_num_vf(dev->pdev); + + xsc_lag_disable(dev); + + xsc_core_info(dev, "disable %d VFs\n", num_vfs); + pci_disable_sriov(pdev); + + xsc_device_disable_sriov(dev, num_vfs, true); + + xsc_lag_enable(dev); +} + +int xsc_core_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct xsc_core_device *dev = pci_get_drvdata(pdev); + struct xsc_core_sriov *sriov = &dev->priv.sriov; + int err = 0; + + if (num_vfs) + err = xsc_sriov_enable(pdev, num_vfs); + else + xsc_sriov_disable(pdev); + + if (!err) + sriov->num_vfs = num_vfs; + return err ? err : num_vfs; +} + +int xsc_sriov_attach(struct xsc_core_device *dev) +{ + struct pci_dev *pdev = dev->pdev; + struct xsc_core_device *pf_xdev; + struct xsc_core_sriov *sriov; + + if (!xsc_core_is_pf(dev)) { + if (!pdev->physfn) /*for vf passthrough vm*/ + return 0; + + pf_xdev = pci_get_drvdata(pdev->physfn); + sriov = &pf_xdev->priv.sriov; + + sriov->vfs[dev->vf_id].vf = dev->vf_id; + sriov->vfs[dev->vf_id].dev = dev; + return 0; + } + + if (!dev->priv.sriov.num_vfs) + return 0; + + /* If sriov VFs exist in PCI level, enable them in device level */ + return xsc_device_enable_sriov(dev, pci_num_vf(dev->pdev)); +} + +void xsc_sriov_detach(struct xsc_core_device *dev) +{ + if (!xsc_core_is_pf(dev) || !dev->priv.sriov.num_vfs) + return; + + xsc_device_disable_sriov(dev, pci_num_vf(dev->pdev), false); +} + +static u16 xsc_get_max_vfs(struct xsc_core_device *dev) +{ + /* In RH6.8 and lower pci_sriov_get_totalvfs might return -EINVAL + * return in that case 1 + */ + return (pci_sriov_get_totalvfs(dev->pdev) < 0) ? 0 : + pci_sriov_get_totalvfs(dev->pdev); +} + +static int xsc_sriov_pci_cfg_info(struct xsc_core_device *dev, + struct xsc_pci_sriov *iov) +{ + int pos; + struct pci_dev *pdev = dev->pdev; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) { + xsc_core_err(dev, "%s: failed to find SRIOV capability in device\n", + __func__); + return -ENODEV; + } + + iov->pos = pos; + pci_read_config_dword(pdev, pos + PCI_SRIOV_CAP, &iov->cap); + pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl); + pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs); + pci_read_config_word(pdev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs); + pci_read_config_word(pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs); + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset); + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride); + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &iov->vf_device); + pci_read_config_dword(pdev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); + pci_read_config_byte(pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); + + return 0; +} + +int xsc_sriov_init(struct xsc_core_device *dev) +{ + struct xsc_core_sriov *sriov = &dev->priv.sriov; + struct pci_dev *pdev = dev->pdev; + struct xsc_pci_sriov *iov = &sriov->pci_sriov; + int total_vfs; + u32 vf_bus, vf_devfn; + int err; + + if (!xsc_core_is_pf(dev)) + return 0; + + err = xsc_sriov_pci_cfg_info(dev, iov); + if (err) { + xsc_core_warn(dev, "%s: pci not support sriov, err=%d\n", + __func__, err); + return 0; + } + + total_vfs = pci_sriov_get_totalvfs(pdev); + if (unlikely(iov->total_vfs == 0)) { + xsc_core_warn(dev, "%s: pci not support sriov, total_vfs=%d, cur_vfs=%d\n", + __func__, iov->total_vfs, sriov->num_vfs); + return 0; + } + sriov->max_vfs = xsc_get_max_vfs(dev); + sriov->num_vfs = pci_num_vf(pdev); + + vf_bus = pdev->bus->number + ((pdev->devfn + iov->offset) >> 8); + vf_devfn = (pdev->devfn + iov->offset) & 0xff; + sriov->vf_bdf_base = (u16)((vf_bus << 8) | vf_devfn); + + sriov->vfs_ctx = kcalloc(total_vfs, sizeof(*sriov->vfs_ctx), GFP_KERNEL); + if (!sriov->vfs_ctx) + return -ENOMEM; + + xsc_core_info(dev, "total_vfs=%d, cur_vfs=%d, vf_bdf_base=0x%02x\n", + total_vfs, sriov->num_vfs, sriov->vf_bdf_base); + xsc_core_info(dev, "vf_offset=%d, stride=%d, vf_device_id=0x%x\n", + iov->offset, iov->stride, iov->vf_device); + err = xsc_sriov_sysfs_init(dev); + if (err) { + xsc_core_warn(dev, "failed to init SRIOV sysfs, err=%d\n", err); + kfree(sriov->vfs_ctx); + return err; + } + + return 0; +} + +void xsc_sriov_cleanup(struct xsc_core_device *dev) +{ + struct xsc_core_sriov *sriov = &dev->priv.sriov; + + if (!xsc_core_is_pf(dev)) + return; + + xsc_sriov_sysfs_cleanup(dev); + kfree(sriov->vfs_ctx); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/sriov_sysfs.c b/drivers/net/ethernet/yunsilicon/xsc/pci/sriov_sysfs.c new file mode 100644 index 000000000000..e5b07b0b5ecc --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/sriov_sysfs.c @@ -0,0 +1,1063 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include "common/xsc_core.h" +#include "common/vport.h" +#include "eswitch.h" + +struct vf_attributes { + struct attribute attr; + ssize_t (*show)(struct xsc_sriov_vf *vf, struct vf_attributes *attr, + char *buf); + ssize_t (*store)(struct xsc_sriov_vf *vf, struct vf_attributes *attr, + const char *buf, size_t count); +}; + +static ssize_t vf_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct vf_attributes *ga = + container_of(attr, struct vf_attributes, attr); + struct xsc_sriov_vf *g = container_of(kobj, struct xsc_sriov_vf, kobj); + + if (!ga->show) + return -EIO; + + return ga->show(g, ga, buf); +} + +static ssize_t vf_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, size_t size) +{ + struct vf_attributes *ga = + container_of(attr, struct vf_attributes, attr); + struct xsc_sriov_vf *g = container_of(kobj, struct xsc_sriov_vf, kobj); + + if (!ga->store) + return -EIO; + + return ga->store(g, ga, buf, size); +} + +struct vf_group_attributes { + struct attribute attr; + ssize_t (*show)(struct xsc_vgroup *g, struct vf_group_attributes *attr, + char *buf); + ssize_t (*store)(struct xsc_vgroup *g, struct vf_group_attributes *attr, + const char *buf, size_t count); +}; + +static ssize_t vf_group_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct vf_group_attributes *ga = + container_of(attr, struct vf_group_attributes, attr); + struct xsc_vgroup *g = container_of(kobj, struct xsc_vgroup, kobj); + + if (!ga->show) + return -EIO; + + return ga->show(g, ga, buf); +} + +static ssize_t vf_group_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, size_t size) +{ + struct vf_group_attributes *ga = + container_of(attr, struct vf_group_attributes, attr); + struct xsc_vgroup *g = container_of(kobj, struct xsc_vgroup, kobj); + + if (!ga->store) + return -EIO; + + return ga->store(g, ga, buf, size); +} + +static ssize_t port_show(struct xsc_sriov_vf *g, struct vf_attributes *oa, + char *buf) +{ + struct xsc_core_device *dev = g->dev; + union ib_gid gid; + int err; + u8 *p; + + err = xsc_query_hca_vport_gid(dev, 1, 1, g->vf, 0, &gid); + if (err) { + xsc_core_warn(dev, "failed to query gid at index 0 for vf %d\n", g->vf); + return err; + } + + p = &gid.raw[8]; + err = sprintf(buf, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", + p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7]); + return err; +} + +static ssize_t port_store(struct xsc_sriov_vf *g, struct vf_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_vf_context *vfs_ctx = dev->priv.sriov.vfs_ctx; + struct xsc_hca_vport_context *in; + u64 guid = 0; + int err; + int tmp[8]; + int i; + + err = sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", + &tmp[0], &tmp[1], &tmp[2], &tmp[3], &tmp[4], &tmp[5], &tmp[6], &tmp[7]); + if (err != 8) + return -EINVAL; + + for (i = 0; i < 8; i++) + guid += ((u64)tmp[i] << ((7 - i) * 8)); + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->field_select = XSC_HCA_VPORT_SEL_PORT_GUID; + in->port_guid = guid; + err = xsc_modify_hca_vport_context(dev, 1, 1, g->vf + 1, in); + kfree(in); + if (err) + return err; + + vfs_ctx[g->vf].port_guid = guid; + + return count; +} + +static int show_nic_node_guid(struct xsc_core_device *dev, u16 vf, + __be64 *node_guid) +{ + int err; + + err = xsc_query_nic_vport_node_guid(dev, vf + 1, node_guid); + if (!err) + *node_guid = cpu_to_be64(*node_guid); + + return err; +} + +static ssize_t node_show(struct xsc_sriov_vf *g, struct vf_attributes *oa, + char *buf) +{ + struct xsc_core_device *dev = g->dev; + __be64 guid; + + int err; + u8 *p; + + err = show_nic_node_guid(dev, g->vf, &guid); + if (err) { + xsc_core_warn(dev, "failed to query node guid for vf %d (%d)\n", + g->vf, err); + return err; + } + + p = (u8 *)&guid; + err = sprintf(buf, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", + p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7]); + + return err; +} + +static int modify_nic_node_guid(struct xsc_core_device *dev, u16 vf, + u64 node_guid) +{ + return xsc_modify_other_nic_vport_node_guid(dev, vf + 1, node_guid); +} + +static ssize_t node_store(struct xsc_sriov_vf *g, struct vf_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + u64 guid = 0; + int err; + int tmp[8]; + int i; + + err = sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", + &tmp[0], &tmp[1], &tmp[2], &tmp[3], &tmp[4], &tmp[5], &tmp[6], &tmp[7]); + if (err != 8) + return -EINVAL; + + for (i = 0; i < 8; i++) + guid += ((u64)tmp[i] << ((7 - i) * 8)); + + err = modify_nic_node_guid(dev, g->vf, guid); + if (err) { + xsc_core_warn(dev, "failed to modify node guid for vf %d (%d)\n", + g->vf, err); + return err; + } + + return count; +} + +static const char *policy_str(enum port_state_policy policy) +{ + switch (policy) { + case XSC_POLICY_DOWN: return "Down\n"; + case XSC_POLICY_UP: return "Up\n"; + case XSC_POLICY_FOLLOW: return "Follow\n"; + default: return "Invalid policy\n"; + } +} + +static ssize_t policy_show(struct xsc_sriov_vf *g, struct vf_attributes *oa, + char *buf) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_hca_vport_context *rep; + const char *p = NULL; + int err; + + rep = kzalloc(sizeof(*rep), GFP_KERNEL); + if (!rep) + return -ENOMEM; + + err = xsc_query_hca_vport_context(dev, 1, 1, g->vf, rep); + if (err) { + xsc_core_warn(dev, "failed to query port policy for vf %d (%d)\n", + g->vf, err); + goto free; + } + p = policy_str(rep->vport_state_policy); + if (p) + sprintf(buf, "%s", p); + +free: + kfree(rep); + return p ? strlen(p) : err; +} + +static int strpolicy(const char *buf, enum port_state_policy *policy) +{ + if (sysfs_streq(buf, "Down")) { + *policy = XSC_POLICY_DOWN; + return 0; + } + + if (sysfs_streq(buf, "Up")) { + *policy = XSC_POLICY_UP; + return 0; + } + + if (sysfs_streq(buf, "Follow")) { + *policy = XSC_POLICY_FOLLOW; + return 0; + } + return -EINVAL; +} + +static ssize_t policy_store(struct xsc_sriov_vf *g, struct vf_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_vf_context *vfs_ctx = dev->priv.sriov.vfs_ctx; + struct xsc_hca_vport_context *in; + enum port_state_policy policy; + int err; + + err = strpolicy(buf, &policy); + if (err) + return err; + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->vport_state_policy = policy; + in->field_select = XSC_HCA_VPORT_SEL_STATE_POLICY; + err = xsc_modify_hca_vport_context(dev, 1, 1, g->vf + 1, in); + kfree(in); + if (err) + return err; + + vfs_ctx[g->vf].policy = policy; + + return count; +} + +/* ETH SRIOV SYSFS */ +static ssize_t mac_show(struct xsc_sriov_vf *g, struct vf_attributes *oa, + char *buf) +{ + return sprintf(buf, + "usage: write to set VF Mac Address\n"); +} + +static ssize_t mac_store(struct xsc_sriov_vf *g, struct vf_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + u8 mac[ETH_ALEN]; + int err; + + err = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", + &mac[0], &mac[1], &mac[2], &mac[3], &mac[4], &mac[5]); + if (err == 6) + goto set_mac; + + if (sysfs_streq(buf, "Random")) + eth_random_addr(mac); + else + return -EINVAL; + +set_mac: + err = xsc_eswitch_set_vport_mac(dev->priv.eswitch, g->vf + 1, mac); + return err ? err : count; +} + +static ssize_t vlan_show(struct xsc_sriov_vf *g, struct vf_attributes *oa, + char *buf) +{ + return sprintf(buf, ": set VF Vlan, Qos, Vlan Proto(default 802.1Q)\n"); +} + +static ssize_t vlan_store(struct xsc_sriov_vf *g, struct vf_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + char vproto_ext[5] = {'\0'}; + __be16 vlan_proto; + u16 vlan_id; + u8 qos; + int err; + + err = sscanf(buf, "%hu:%hhu:802.%4s", &vlan_id, &qos, vproto_ext); + if (err == 3) { + if ((strcmp(vproto_ext, "1AD") == 0) || + (strcmp(vproto_ext, "1ad") == 0)) + vlan_proto = htons(ETH_P_8021AD); + else if ((strcmp(vproto_ext, "1Q") == 0) || + (strcmp(vproto_ext, "1q") == 0)) + vlan_proto = htons(ETH_P_8021Q); + else + return -EINVAL; + } else { + err = sscanf(buf, "%hu:%hhu", &vlan_id, &qos); + if (err != 2) + return -EINVAL; + vlan_proto = htons(ETH_P_8021Q); + } + + err = xsc_eswitch_set_vport_vlan(dev->priv.eswitch, g->vf + 1, + vlan_id, qos, vlan_proto); + return err ? err : count; +} + +static const char *vlan_proto_str(u16 vlan, u8 qos, __be16 vlan_proto) +{ + if (!vlan && !qos) + return "N/A"; + + switch (vlan_proto) { + case htons(ETH_P_8021AD): return "802.1ad"; + case htons(ETH_P_8021Q): return "802.1Q"; + default: return "Invalid vlan protocol"; + } +} + +static ssize_t spoofcheck_show(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + char *buf) +{ + return sprintf(buf, + "usage: write to enable|disable VF SpoofCheck\n" + ); +} + +static ssize_t spoofcheck_store(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + const char *buf, + size_t count) +{ + struct xsc_core_device *dev = g->dev; + bool settings; + int err; + + if (sysfs_streq(buf, "ON")) + settings = true; + else if (sysfs_streq(buf, "OFF")) + settings = false; + else + return -EINVAL; + + err = xsc_eswitch_set_vport_spoofchk(dev->priv.eswitch, g->vf + 1, settings); + return err ? err : count; +} + +static ssize_t trust_show(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + char *buf) +{ + return sprintf(buf, + "usage: write to trust|untrust VF\n" + ); +} + +static ssize_t trust_store(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + const char *buf, + size_t count) +{ + struct xsc_core_device *dev = g->dev; + bool settings; + int err; + + if (sysfs_streq(buf, "ON")) + settings = true; + else if (sysfs_streq(buf, "OFF")) + settings = false; + else + return -EINVAL; + + err = xsc_eswitch_set_vport_trust(dev->priv.eswitch, g->vf + 1, settings); + return err ? err : count; +} + +static ssize_t link_state_show(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + char *buf) +{ + return sprintf(buf, "usage: write to set VF State\n"); +} + +static ssize_t link_state_store(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + const char *buf, + size_t count) +{ + struct xsc_core_device *dev = g->dev; + enum port_state_policy policy; + int err; + + err = strpolicy(buf, &policy); + if (err) + return err; + + err = xsc_eswitch_set_vport_state(dev->priv.eswitch, g->vf + 1, policy); + return err ? err : count; +} + +static ssize_t max_tx_rate_show(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + char *buf) +{ + return sprintf(buf, + "usage: write to set VF max rate\n"); +} + +static ssize_t max_tx_rate_store(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_eswitch *esw = dev->priv.eswitch; + u32 max_tx_rate; + u32 min_tx_rate; + int err; + + mutex_lock(&esw->state_lock); + min_tx_rate = esw->vports[g->vf + 1].info.min_rate; + mutex_unlock(&esw->state_lock); + + err = kstrtouint(buf, 10, &max_tx_rate); + if (err != 1) + return -EINVAL; + + err = xsc_eswitch_set_vport_rate(dev->priv.eswitch, g->vf + 1, + max_tx_rate, min_tx_rate); + return err ? err : count; +} + +static ssize_t min_tx_rate_show(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + char *buf) +{ + return sprintf(buf, + "usage: write to set VF min rate\n"); +} + +static ssize_t min_tx_rate_store(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_eswitch *esw = dev->priv.eswitch; + u32 min_tx_rate; + u32 max_tx_rate; + int err; + + mutex_lock(&esw->state_lock); + max_tx_rate = esw->vports[g->vf + 1].info.max_rate; + mutex_unlock(&esw->state_lock); + + err = kstrtouint(buf, 10, &min_tx_rate); + if (err != 1) + return -EINVAL; + + err = xsc_eswitch_set_vport_rate(dev->priv.eswitch, g->vf + 1, + max_tx_rate, min_tx_rate); + return err ? err : count; +} + +static ssize_t min_pf_tx_rate_show(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + char *buf) +{ + return sprintf(buf, "usage: write to set PF min rate\n"); +} + +static ssize_t min_pf_tx_rate_store(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_eswitch *esw = dev->priv.eswitch; + u32 min_tx_rate; + u32 max_tx_rate; + int err; + + mutex_lock(&esw->state_lock); + max_tx_rate = esw->vports[g->vf].info.max_rate; + mutex_unlock(&esw->state_lock); + + err = kstrtouint(buf, 10, &min_tx_rate); + if (err != 1) + return -EINVAL; + + err = xsc_eswitch_set_vport_rate(dev->priv.eswitch, g->vf, + max_tx_rate, min_tx_rate); + return err ? err : count; +} + +static ssize_t group_show(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + char *buf) +{ + return sprintf(buf, + "usage: write to set VF vport group\n"); +} + +static ssize_t group_store(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_eswitch *esw = dev->priv.eswitch; + u32 group_id; + int err; + + err = kstrtouint(buf, 10, &group_id); + if (err != 1) + return -EINVAL; + + if (group_id > 255) + return -EINVAL; + + err = xsc_eswitch_vport_update_group(esw, g->vf + 1, group_id); + + return err ? err : count; +} + +static ssize_t max_tx_rate_group_show(struct xsc_vgroup *g, + struct vf_group_attributes *oa, + char *buf) +{ + return sprintf(buf, + "usage: write to set VF group max rate\n"); +} + +static ssize_t max_tx_rate_group_store(struct xsc_vgroup *g, + struct vf_group_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_eswitch *esw = dev->priv.eswitch; + u32 max_rate; + int err; + + err = kstrtouint(buf, 10, &max_rate); + if (err != 1) + return -EINVAL; + + err = xsc_eswitch_set_vgroup_max_rate(esw, g->group_id, max_rate); + + return err ? err : count; +} + +static ssize_t min_tx_rate_group_show(struct xsc_vgroup *g, + struct vf_group_attributes *oa, + char *buf) +{ + return sprintf(buf, + "usage: write to set VF group min rate\n"); +} + +static ssize_t min_tx_rate_group_store(struct xsc_vgroup *g, + struct vf_group_attributes *oa, + const char *buf, size_t count) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_eswitch *esw = dev->priv.eswitch; + u32 min_rate; + int err; + + err = kstrtouint(buf, 10, &min_rate); + if (err != 1) + return -EINVAL; + + err = xsc_eswitch_set_vgroup_min_rate(esw, g->group_id, min_rate); + + return err ? err : count; +} + +#define _sprintf(p, buf, format, arg...) \ + ((PAGE_SIZE - (int)((p) - (buf))) <= 0 ? 0 : \ + scnprintf((p), PAGE_SIZE - (int)((p) - (buf)), format, ## arg)) + +static ssize_t trunk_show(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + char *buf) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_eswitch *esw = dev->priv.eswitch; + struct xsc_vport *vport = &esw->vports[g->vf + 1]; + u16 vlan_id = 0; + char *ret = buf; + + mutex_lock(&esw->state_lock); + if (!!bitmap_weight(vport->info.vlan_trunk_8021q_bitmap, VLAN_N_VID)) { + ret += _sprintf(ret, buf, "Allowed 802.1Q VLANs:"); + for_each_set_bit(vlan_id, vport->info.vlan_trunk_8021q_bitmap, VLAN_N_VID) + ret += _sprintf(ret, buf, " %d", vlan_id); + ret += _sprintf(ret, buf, "\n"); + } + mutex_unlock(&esw->state_lock); + + return (ssize_t)(ret - buf); +} + +static ssize_t trunk_store(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + const char *buf, + size_t count) +{ + struct xsc_core_device *dev = g->dev; + u16 start_vid, end_vid; + char op[5]; + int err; + + err = sscanf(buf, "%4s %hu %hu", op, &start_vid, &end_vid); + if (err != 3) + return -EINVAL; + + if (!strcmp(op, "add")) + err = xsc_eswitch_add_vport_trunk_range(dev->priv.eswitch, + g->vf + 1, + start_vid, end_vid); + else if (!strcmp(op, "rem")) + err = xsc_eswitch_del_vport_trunk_range(dev->priv.eswitch, + g->vf + 1, + start_vid, end_vid); + else + return -EINVAL; + + return err ? err : count; +} + +static ssize_t config_show(struct xsc_sriov_vf *g, struct vf_attributes *oa, + char *buf) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_eswitch *esw = dev->priv.eswitch; + struct xsc_vport_info *ivi; + int vport = g->vf + 1; + char *p = buf; + + if (!esw || !xsc_core_is_vport_manager(dev)) + return -EPERM; + if (!(vport >= 0 && vport < esw->total_vports)) + return -EINVAL; + + mutex_lock(&esw->state_lock); + ivi = &esw->vports[vport].info; + p += _sprintf(p, buf, "VF : %d\n", g->vf); + p += _sprintf(p, buf, "MAC : %pM\n", ivi->mac); + p += _sprintf(p, buf, "VLAN : %d\n", ivi->vlan); + p += _sprintf(p, buf, "QoS : %d\n", ivi->qos); + p += _sprintf(p, buf, "VLAN Proto : %s\n", + vlan_proto_str(ivi->vlan, ivi->qos, ivi->vlan_proto)); + p += _sprintf(p, buf, "SpoofCheck : %s\n", ivi->spoofchk ? "ON" : "OFF"); + p += _sprintf(p, buf, "Trust : %s\n", ivi->trusted ? "ON" : "OFF"); + p += _sprintf(p, buf, "LinkState : %s", policy_str(ivi->link_state)); + p += _sprintf(p, buf, "MinTxRate : %d\n", ivi->min_rate); + p += _sprintf(p, buf, "MaxTxRate : %d\n", ivi->max_rate); + p += _sprintf(p, buf, "VGT+ : %s\n", + !!bitmap_weight(ivi->vlan_trunk_8021q_bitmap, VLAN_N_VID) ? + "ON" : "OFF"); + p += _sprintf(p, buf, "RateGroup : %d\n", ivi->group); + mutex_unlock(&esw->state_lock); + + return (ssize_t)(p - buf); +} + +static ssize_t config_store(struct xsc_sriov_vf *g, + struct vf_attributes *oa, + const char *buf, size_t count) +{ + return -EOPNOTSUPP; +} + +static ssize_t config_group_show(struct xsc_vgroup *g, + struct vf_group_attributes *oa, + char *buf) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_eswitch *esw = dev->priv.eswitch; + char *p = buf; + + if (!esw || !xsc_core_is_vport_manager(dev)) + return -EPERM; + + mutex_lock(&esw->state_lock); + p += _sprintf(p, buf, "Num VFs : %d\n", g->num_vports); + p += _sprintf(p, buf, "MaxRate : %d\n", g->max_rate); + p += _sprintf(p, buf, "MinRate : %d\n", g->min_rate); + p += _sprintf(p, buf, "BWShare(Indirect cfg) : %d\n", g->bw_share); + mutex_unlock(&esw->state_lock); + + return (ssize_t)(p - buf); +} + +static ssize_t config_group_store(struct xsc_vgroup *g, + struct vf_group_attributes *oa, + const char *buf, size_t count) +{ + return -EOPNOTSUPP; +} + +static ssize_t stats_show(struct xsc_sriov_vf *g, struct vf_attributes *oa, + char *buf) +{ + struct xsc_core_device *dev = g->dev; + struct xsc_vport *vport = xsc_eswitch_get_vport(dev->priv.eswitch, g->vf + 1); + struct ifla_vf_stats ifi; + struct xsc_vport_drop_stats stats = {}; + int err; + char *p = buf; + + err = xsc_eswitch_get_vport_stats(dev->priv.eswitch, g->vf + 1, &ifi); + if (err) + return -EINVAL; + + err = xsc_eswitch_query_vport_drop_stats(dev, vport, &stats); + if (err) + return -EINVAL; + + p += _sprintf(p, buf, "tx_packets : %llu\n", ifi.tx_packets); + p += _sprintf(p, buf, "tx_bytes : %llu\n", ifi.tx_bytes); + p += _sprintf(p, buf, "tx_dropped : %llu\n", stats.tx_dropped); + p += _sprintf(p, buf, "rx_packets : %llu\n", ifi.rx_packets); + p += _sprintf(p, buf, "rx_bytes : %llu\n", ifi.rx_bytes); + p += _sprintf(p, buf, "rx_broadcast : %llu\n", ifi.broadcast); + p += _sprintf(p, buf, "rx_multicast : %llu\n", ifi.multicast); + p += _sprintf(p, buf, "rx_dropped : %llu\n", stats.rx_dropped); + + return (ssize_t)(p - buf); +} + +static ssize_t stats_store(struct xsc_sriov_vf *g, struct vf_attributes *oa, + const char *buf, size_t count) +{ + return -EOPNOTSUPP; +} + +static ssize_t num_vfs_store(struct device *device, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct pci_dev *pdev = container_of(device, struct pci_dev, dev); + int req_vfs; + int err; + + if (kstrtoint(buf, 0, &req_vfs) || req_vfs < 0 || + req_vfs > pci_sriov_get_totalvfs(pdev)) + return -EINVAL; + + err = xsc_core_sriov_configure(pdev, req_vfs); + if (err < 0) + return err; + + return count; +} + +static ssize_t num_vfs_show(struct device *device, struct device_attribute *attr, + char *buf) +{ + struct pci_dev *pdev = container_of(device, struct pci_dev, dev); + struct xsc_core_device *dev = pci_get_drvdata(pdev); + struct xsc_core_sriov *sriov = &dev->priv.sriov; + + return sprintf(buf, "%d\n", sriov->num_vfs); +} + +static DEVICE_ATTR_RW(num_vfs); + +static const struct sysfs_ops vf_sysfs_ops = { + .show = vf_attr_show, + .store = vf_attr_store, +}; + +static const struct sysfs_ops vf_group_sysfs_ops = { + .show = vf_group_attr_show, + .store = vf_group_attr_store, +}; + +#define VF_RATE_GROUP_ATTR(_name) struct vf_group_attributes vf_group_attr_##_name = \ + __ATTR(_name, 0644, _name##_group_show, _name##_group_store) +#define VF_ATTR(_name) struct vf_attributes vf_attr_##_name = \ + __ATTR(_name, 0644, _name##_show, _name##_store) + +VF_ATTR(node); +VF_ATTR(port); +VF_ATTR(policy); + +VF_ATTR(mac); +VF_ATTR(vlan); +VF_ATTR(link_state); +VF_ATTR(spoofcheck); +VF_ATTR(trust); +VF_ATTR(max_tx_rate); +VF_ATTR(min_tx_rate); +VF_ATTR(config); +VF_ATTR(trunk); +VF_ATTR(stats); +VF_ATTR(group); +VF_RATE_GROUP_ATTR(max_tx_rate); +VF_RATE_GROUP_ATTR(min_tx_rate); +VF_RATE_GROUP_ATTR(config); + +static struct attribute *vf_eth_attrs[] = { + &vf_attr_node.attr, + &vf_attr_mac.attr, + &vf_attr_vlan.attr, + &vf_attr_link_state.attr, + &vf_attr_spoofcheck.attr, + &vf_attr_trust.attr, + &vf_attr_max_tx_rate.attr, + &vf_attr_min_tx_rate.attr, + &vf_attr_config.attr, + &vf_attr_trunk.attr, + &vf_attr_stats.attr, + &vf_attr_group.attr, + NULL +}; +ATTRIBUTE_GROUPS(vf_eth); + +static struct attribute *vf_group_attrs[] = { + &vf_group_attr_max_tx_rate.attr, + &vf_group_attr_min_tx_rate.attr, + &vf_group_attr_config.attr, + NULL +}; +ATTRIBUTE_GROUPS(vf_group); + +static const struct kobj_type vf_type_eth = { + .sysfs_ops = &vf_sysfs_ops, + .default_groups = vf_eth_groups, +}; + +static const struct kobj_type vf_group = { + .sysfs_ops = &vf_group_sysfs_ops, + .default_groups = vf_group_groups, +}; + +static struct vf_attributes pf_attr_min_pf_tx_rate = + __ATTR(min_tx_rate, 0644, min_pf_tx_rate_show, min_pf_tx_rate_store); + +static struct attribute *pf_eth_attrs[] = { + &pf_attr_min_pf_tx_rate.attr, + NULL, +}; +ATTRIBUTE_GROUPS(pf_eth); + +static const struct kobj_type pf_type_eth = { + .sysfs_ops = &vf_sysfs_ops, + .default_groups = pf_eth_groups, +}; + +static struct attribute *vf_ib_attrs[] = { + &vf_attr_node.attr, + &vf_attr_port.attr, + &vf_attr_policy.attr, + NULL +}; +ATTRIBUTE_GROUPS(vf_ib); + +static const struct kobj_type vf_type_ib = { + .sysfs_ops = &vf_sysfs_ops, + .default_groups = vf_ib_groups, +}; + +static struct device_attribute *xsc_class_attributes[] = { + &dev_attr_num_vfs, +}; + +int xsc_sriov_sysfs_init(struct xsc_core_device *dev) +{ + struct xsc_core_sriov *sriov = &dev->priv.sriov; + struct device *device = &dev->pdev->dev; + int err; + int i; + + sriov->config = kobject_create_and_add("sriov", &device->kobj); + if (!sriov->config) + return -ENOMEM; + + if (dev->caps.log_esw_max_sched_depth) { + sriov->groups_config = kobject_create_and_add("groups", + sriov->config); + if (!sriov->groups_config) { + err = -ENOMEM; + goto err_groups; + } + } + + for (i = 0; i < ARRAY_SIZE(xsc_class_attributes); i++) { + err = device_create_file(device, xsc_class_attributes[i]); + if (err) + goto err_attr; + } + + return 0; + +err_attr: + if (sriov->groups_config) { + kobject_put(sriov->groups_config); + sriov->groups_config = NULL; + } + +err_groups: + kobject_put(sriov->config); + sriov->config = NULL; + return err; +} + +void xsc_sriov_sysfs_cleanup(struct xsc_core_device *dev) +{ + struct xsc_core_sriov *sriov = &dev->priv.sriov; + struct device *device = &dev->pdev->dev; + int i; + + for (i = 0; i < ARRAY_SIZE(xsc_class_attributes); i++) + device_remove_file(device, xsc_class_attributes[i]); + + if (dev->caps.log_esw_max_sched_depth) + kobject_put(sriov->groups_config); + kobject_put(sriov->config); + sriov->config = NULL; +} + +int xsc_create_vf_group_sysfs(struct xsc_core_device *dev, + u32 group_id, struct kobject *group_kobj) +{ + struct xsc_core_sriov *sriov = &dev->priv.sriov; + int err; + + err = kobject_init_and_add(group_kobj, &vf_group, sriov->groups_config, + "%d", group_id); + if (err) + return err; + + kobject_uevent(group_kobj, KOBJ_ADD); + + return 0; +} + +void xsc_destroy_vf_group_sysfs(struct xsc_core_device *dev, + struct kobject *group_kobj) +{ + kobject_put(group_kobj); +} + +int xsc_create_vfs_sysfs(struct xsc_core_device *dev, int num_vfs) +{ + struct xsc_core_sriov *sriov = &dev->priv.sriov; + struct xsc_sriov_vf *tmp; + static const struct kobj_type *sysfs; + int err; + int vf; + + sysfs = &vf_type_ib; + sysfs = &vf_type_eth; + + sriov->vfs = kcalloc(num_vfs + 1, sizeof(*sriov->vfs), GFP_KERNEL); + if (!sriov->vfs) + return -ENOMEM; + + for (vf = 0; vf < num_vfs; vf++) { + tmp = &sriov->vfs[vf]; + tmp->dev = dev; + tmp->vf = vf; + err = kobject_init_and_add(&tmp->kobj, sysfs, sriov->config, + "%d", vf); + if (err) + goto err_vf; + + kobject_uevent(&tmp->kobj, KOBJ_ADD); + } + + tmp = &sriov->vfs[vf]; + tmp->dev = dev; + tmp->vf = 0; + err = kobject_init_and_add(&tmp->kobj, &pf_type_eth, + sriov->config, "%s", "pf"); + if (err) { + --vf; + goto err_vf; + } + + kobject_uevent(&tmp->kobj, KOBJ_ADD); + + return 0; + +err_vf: + for (; vf >= 0; vf--) { + tmp = &sriov->vfs[vf]; + kobject_put(&tmp->kobj); + } + + kfree(sriov->vfs); + sriov->vfs = NULL; + return err; +} + +void xsc_destroy_vfs_sysfs(struct xsc_core_device *dev, int num_vfs) +{ + struct xsc_core_sriov *sriov = &dev->priv.sriov; + struct xsc_sriov_vf *tmp; + int vf; + + if (num_vfs) { + tmp = &sriov->vfs[num_vfs]; + kobject_put(&tmp->kobj); + } + for (vf = 0; vf < num_vfs; vf++) { + tmp = &sriov->vfs[vf]; + kobject_put(&tmp->kobj); + } + + kfree(sriov->vfs); + sriov->vfs = NULL; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/tmp_cmdq_defines.h b/drivers/net/ethernet/yunsilicon/xsc/pci/tmp_cmdq_defines.h new file mode 100644 index 000000000000..7f6561c1e005 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/tmp_cmdq_defines.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef CMDQ_DEFINE_H +#define CMDQ_DEFINE_H + +#define CMDQ_PA_REG_ADDR 0xFC00000 +#define CMDQ_PA_REG_WIDTH 64 + +#define CMDQ_LOG_SIZE_REG_ADDR 0xFC00008 +#define CMDQ_LOG_SIZE_WIDTH 4 + +#define CMDQ_DB_REG_ADDR 0xFC0000C +#define CMDQ_DB_REG_WIDTH 32 + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/vport.c b/drivers/net/ethernet/yunsilicon/xsc/pci/vport.c new file mode 100644 index 000000000000..acbe7e83a9e2 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/vport.c @@ -0,0 +1,954 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_cmd.h" +#include "eswitch.h" +#include "common/xsc_fs.h" +#include "net/xsc_eth.h" +#include "common/xsc_lag.h" + +static int _xsc_query_vport_state(struct xsc_core_device *dev, u16 opmod, + u16 vport, void *out, int outlen) +{ + struct xsc_query_vport_state_in in; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_VPORT_STATE); + in.vport_number = cpu_to_be16(vport); + if (vport) + in.other_vport = 1; + + return xsc_cmd_exec(dev, &in, sizeof(in), out, outlen); +} + +u8 xsc_query_vport_state(struct xsc_core_device *dev, u16 opmod, u16 vport) +{ + struct xsc_query_vport_state_out out; + + memset(&out, 0, sizeof(out)); + _xsc_query_vport_state(dev, opmod, vport, &out, sizeof(out)); + + return out.state; +} +EXPORT_SYMBOL(xsc_query_vport_state); + +int xsc_modify_vport_admin_state(struct xsc_core_device *dev, u16 opmod, + u16 vport, u8 other_vport, u8 state) +{ + struct xsc_modify_vport_state_in in; + struct xsc_modify_vport_state_out out; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_VPORT_STATE); + in.vport_number = cpu_to_be16(vport); + in.other_vport = other_vport; + in.admin_state = state; + + return xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); +} + +int __xsc_query_nic_vport_context(struct xsc_core_device *dev, + u16 vport, void *out, int outlen, + int force_other) +{ + struct xsc_query_nic_vport_context_in in; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_NIC_VPORT_CONTEXT); + in.vport_number = cpu_to_be16(vport); + if (vport || force_other) + in.other_vport = 1; + + return xsc_cmd_exec(dev, &in, sizeof(in), out, outlen); +} + +static int xsc_query_nic_vport_context(struct xsc_core_device *dev, u16 vport, + void *out, int outlen) +{ + return __xsc_query_nic_vport_context(dev, vport, out, outlen, 0); +} + +int xsc_modify_nic_vport_context(struct xsc_core_device *dev, void *in, + int inlen) +{ + struct xsc_modify_nic_vport_context_out out; + struct xsc_modify_nic_vport_context_in *tmp; + int err; + + memset(&out, 0, sizeof(out)); + tmp = (struct xsc_modify_nic_vport_context_in *)in; + tmp->hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); + + err = xsc_cmd_exec(dev, in, inlen, &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(dev, "fail to modify nic vport err=%d status=%d\n", + err, out.hdr.status); + } + return err; +} + +int xsc_query_nic_vport_min_inline(struct xsc_core_device *dev, + u16 vport, u8 *min_inline) +{ + struct xsc_query_nic_vport_context_out out; + int err; + + memset(&out, 0, sizeof(out)); + err = xsc_query_nic_vport_context(dev, vport, &out, sizeof(out)); + if (!err) + *min_inline = out.nic_vport_ctx.min_wqe_inline_mode; + return err; +} +EXPORT_SYMBOL_GPL(xsc_query_nic_vport_min_inline); + +void xsc_query_min_inline(struct xsc_core_device *dev, + u8 *min_inline_mode) +{ + switch (dev->caps.wqe_inline_mode) { + case XSC_CAP_INLINE_MODE_VPORT_CONTEXT: + if (!xsc_query_nic_vport_min_inline(dev, 0, min_inline_mode)) + break; + fallthrough; + case XSC_CAP_INLINE_MODE_L2: + *min_inline_mode = XSC_INLINE_MODE_L2; + break; + case XSC_CAP_INLINE_MODE_NOT_REQUIRED: + *min_inline_mode = XSC_INLINE_MODE_NONE; + break; + } +} +EXPORT_SYMBOL_GPL(xsc_query_min_inline); + +int xsc_modify_nic_vport_min_inline(struct xsc_core_device *dev, + u16 vport, u8 min_inline) +{ + struct xsc_modify_nic_vport_context_in in; + + memset(&in, 0, sizeof(in)); + in.field_select.min_inline = 1; + in.vport_number = vport; + in.other_vport = 1; + in.nic_vport_ctx.min_wqe_inline_mode = min_inline; + + return xsc_modify_nic_vport_context(dev, &in, sizeof(in)); +} + +static int __xsc_query_nic_vport_mac_address(struct xsc_core_device *dev, + u16 vport, u8 *addr, + int force_other) +{ + struct xsc_query_nic_vport_context_out out; + u8 *out_addr; + int err; + + memset(&out, 0, sizeof(out)); + out_addr = out.nic_vport_ctx.permanent_address; + + err = __xsc_query_nic_vport_context(dev, vport, &out, sizeof(out), + force_other); + if (!err) + ether_addr_copy(addr, out_addr); + + return err; +} + +int xsc_query_other_nic_vport_mac_address(struct xsc_core_device *dev, + u16 vport, u8 *addr) +{ + return __xsc_query_nic_vport_mac_address(dev, vport, addr, 1); +} +EXPORT_SYMBOL_GPL(xsc_query_other_nic_vport_mac_address); + +int xsc_query_nic_vport_mac_address(struct xsc_core_device *dev, + u16 vport, u8 *addr) +{ + return __xsc_query_nic_vport_mac_address(dev, vport, addr, 0); +} +EXPORT_SYMBOL_GPL(xsc_query_nic_vport_mac_address); + +static int __xsc_modify_nic_vport_mac_address(struct xsc_core_device *dev, + u16 vport, u8 *addr, int force_other, bool perm_mac) +{ + struct xsc_modify_nic_vport_context_in *in; + struct xsc_modify_nic_vport_context_out out; + struct xsc_adapter *adapter = netdev_priv(dev->netdev); + struct xsc_vport *evport = NULL; + int err, in_sz, i; + u8 *mac_addr; + u16 caps = 0; + u16 caps_mask = 0; + u16 lag_id = xsc_get_lag_id(dev); + + in_sz = sizeof(struct xsc_modify_nic_vport_context_in) + 2; + + in = kzalloc(in_sz, GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->lag_id = cpu_to_be16(lag_id); + + if (perm_mac) { + in->field_select.permanent_address = 1; + mac_addr = in->nic_vport_ctx.permanent_address; + } else { + in->field_select.current_address = 1; + mac_addr = in->nic_vport_ctx.current_address; + } + + if (force_other) { + in->other_vport = 1; + in->vport_number = cpu_to_be16(vport); + evport = xsc_eswitch_get_vport(adapter->xdev->priv.eswitch, i + 1); + } + + if (xsc_get_pp_bypass_res(dev, false)) + caps |= BIT(XSC_TBM_CAP_PP_BYPASS); + caps_mask |= BIT(XSC_TBM_CAP_PP_BYPASS); + in->caps = cpu_to_be16(caps); + in->caps_mask = cpu_to_be16(caps_mask); + + ether_addr_copy(mac_addr, addr); + + in->field_select.addresses_list = 1; + if (evport) + in->nic_vport_ctx.vlan = cpu_to_be16(evport->vlan_id); + + in->nic_vport_ctx.vlan_allowed = 1; + + err = xsc_modify_nic_vport_context(dev, in, in_sz); + if (be16_to_cpu(out.outer_vlan_id)) + goto ret; + + for (i = 0; i < VLAN_N_VID; i++) { + if (test_bit(i, adapter->vlan_params.active_cvlans)) { + in->nic_vport_ctx.vlan = cpu_to_be16(i); + in->nic_vport_ctx.vlan_allowed = 1; + err |= xsc_modify_nic_vport_context(dev, in, in_sz); + } + if (test_bit(i, adapter->vlan_params.active_svlans)) { + in->nic_vport_ctx.vlan = cpu_to_be16(i); + in->nic_vport_ctx.vlan_allowed = 1; + err |= xsc_modify_nic_vport_context(dev, in, in_sz); + } + } + +ret: + kfree(in); + return err; +} + +static int __xsc_modify_vport_max_rate(struct xsc_core_device *dev, + u16 vport, u32 rate) +{ + struct xsc_vport_rate_limit_mobox_in in; + struct xsc_vport_rate_limit_mobox_out out; + int err = 0; + + memset(&in, 0, sizeof(struct xsc_vport_rate_limit_mobox_in)); + memset(&out, 0, sizeof(struct xsc_vport_rate_limit_mobox_out)); + + in.vport_number = cpu_to_be16(vport); + if (vport) + in.other_vport = 1; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_SET_VPORT_RATE_LIMIT); + in.rate = cpu_to_be32(rate); + + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(dev, "modify_vport_max_rate failed!err=%d, status=%u\n", + err, out.hdr.status); + return -EINVAL; + } + + return 0; +} + +int xsc_modify_other_nic_vport_mac_address(struct xsc_core_device *dev, + u16 vport, u8 *addr, bool perm_mac) +{ + return __xsc_modify_nic_vport_mac_address(dev, vport, addr, 1, perm_mac); +} +EXPORT_SYMBOL(xsc_modify_other_nic_vport_mac_address); + +int xsc_modify_vport_max_rate(struct xsc_core_device *dev, + u16 vport, u32 rate) +{ + return __xsc_modify_vport_max_rate(dev, vport, rate); +} +EXPORT_SYMBOL(xsc_modify_vport_max_rate); + +int xsc_modify_nic_vport_mac_address(struct xsc_core_device *dev, + u16 vport, u8 *addr, bool perm_mac) +{ + return __xsc_modify_nic_vport_mac_address(dev, vport, addr, 0, perm_mac); +} +EXPORT_SYMBOL(xsc_modify_nic_vport_mac_address); + +int xsc_query_nic_vport_mtu(struct xsc_core_device *dev, u16 *mtu) +{ + struct xsc_query_nic_vport_context_out out; + int err; + + memset(&out, 0, sizeof(out)); + err = xsc_query_nic_vport_context(dev, 0, &out, sizeof(out)); + if (!err) + *mtu = out.nic_vport_ctx.mtu; + + return err; +} +EXPORT_SYMBOL_GPL(xsc_query_nic_vport_mtu); + +int xsc_modify_nic_vport_mtu(struct xsc_core_device *dev, u16 mtu) +{ + struct xsc_modify_nic_vport_context_in in; + int err; + + memset(&in, 0, sizeof(in)); + in.field_select.mtu = 1; + in.nic_vport_ctx.mtu = mtu; + + err = xsc_modify_nic_vport_context(dev, &in, sizeof(in)); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_modify_nic_vport_mtu); + +int xsc_query_nic_vport_mac_list(struct xsc_core_device *dev, + u16 vport, + enum xsc_list_type list_type, + u8 addr_list[][ETH_ALEN], + int *list_size) +{ + struct xsc_query_nic_vport_context_in in; + struct xsc_query_nic_vport_context_out *out; + int max_list_size; + int req_list_size; + int out_sz; + int err; + int i; + + req_list_size = *list_size; + + max_list_size = list_type == XSC_NVPRT_LIST_TYPE_UC ? + 1 << dev->caps.log_max_current_uc_list : + 1 << dev->caps.log_max_current_mc_list; + + if (req_list_size > max_list_size) { + xsc_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n", + req_list_size, max_list_size); + req_list_size = max_list_size; + } + + out_sz = sizeof(struct xsc_query_nic_vport_context_out) + + req_list_size * 8; + + memset(&in, 0, sizeof(in)); + out = kzalloc(out_sz, GFP_KERNEL); + if (!out) + return -ENOMEM; + + in.hdr.opcode = XSC_CMD_OP_QUERY_NIC_VPORT_CONTEXT; + in.allowed_list_type = list_type; + in.vport_number = vport; + in.other_vport = 1; + + err = xsc_cmd_exec(dev, &in, sizeof(in), out, out_sz); + if (err) + goto out; + + req_list_size = out->nic_vport_ctx.allowed_list_size; + *list_size = req_list_size; + for (i = 0; i < req_list_size; i++) { + u8 *mac_addr = (u8 *)out->nic_vport_ctx.current_uc_mac_address[i]; + + ether_addr_copy(addr_list[i], mac_addr); + } +out: + kfree(out); + return err; +} +EXPORT_SYMBOL_GPL(xsc_query_nic_vport_mac_list); + +int xsc_modify_nic_vport_mac_list(struct xsc_core_device *dev, + enum xsc_list_type list_type, + u8 addr_list[][ETH_ALEN], + int list_size) +{ + struct xsc_modify_nic_vport_context_out out; + struct xsc_modify_nic_vport_context_in *in; + int max_list_size; + int in_sz; + int err; + int i; + + max_list_size = list_type == XSC_NVPRT_LIST_TYPE_UC ? + 1 << dev->caps.log_max_current_uc_list : + 1 << dev->caps.log_max_current_mc_list; + + if (list_size > max_list_size) + return -ENOSPC; + + in_sz = sizeof(struct xsc_modify_nic_vport_context_in) + + list_size * 8; + in = kzalloc(in_sz, GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->hdr.opcode = XSC_CMD_OP_MODIFY_NIC_VPORT_CONTEXT; + in->field_select.addresses_list = 1; + in->nic_vport_ctx.allowed_list_type = list_type; + in->nic_vport_ctx.allowed_list_size = list_size; + + for (i = 0; i < list_size; i++) { + u8 *curr_mac = + (u8 *)(in->nic_vport_ctx.current_uc_mac_address[i]); + ether_addr_copy(curr_mac, addr_list[i]); + } + + memset(&out, 0, sizeof(out)); + err = xsc_cmd_exec(dev, in, in_sz, &out, sizeof(out)); + kfree(in); + return err; +} +EXPORT_SYMBOL_GPL(xsc_modify_nic_vport_mac_list); + +int xsc_query_nic_vport_vlans(struct xsc_core_device *dev, u32 vport, + unsigned long *vlans) +{ + struct xsc_query_nic_vport_context_in in; + struct xsc_query_nic_vport_context_out *out; + int req_list_size; + int out_sz; + int err; + int i; + + req_list_size = 1 << dev->caps.log_max_vlan_list; + out_sz = sizeof(*out) + req_list_size * 8; + + out = kzalloc(out_sz, GFP_KERNEL); + if (!out) + return -ENOMEM; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = XSC_CMD_OP_QUERY_NIC_VPORT_CONTEXT; + in.allowed_list_type = XSC_NVPRT_LIST_TYPE_VLAN; + in.vport_number = vport; + + if (vport) + in.other_vport = 1; + + err = xsc_cmd_exec(dev, &in, sizeof(in), out, out_sz); + if (err) + goto out; + + req_list_size = out->nic_vport_ctx.allowed_list_size; + + for (i = 0; i < req_list_size; i++) { + u16 *vlan_addr = (u16 *)&out->nic_vport_ctx.current_uc_mac_address[i]; + + bitmap_set(vlans, (*vlan_addr & 0xfff), 1); + } +out: + kfree(out); + return err; +} +EXPORT_SYMBOL_GPL(xsc_query_nic_vport_vlans); + +int xsc_modify_nic_vport_vlans(struct xsc_core_device *dev, + u16 vid, bool add) +{ + struct xsc_modify_nic_vport_context_out out; + struct xsc_modify_nic_vport_context_in *in; + int in_sz; + int err; + + in_sz = sizeof(struct xsc_modify_nic_vport_context_in) + 2; + + in = kzalloc(in_sz, GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); + in->field_select.addresses_list = 1; + + in->nic_vport_ctx.vlan_allowed = add; + in->nic_vport_ctx.allowed_list_type = XSC_NVPRT_LIST_TYPE_VLAN; + in->nic_vport_ctx.vlan = cpu_to_be16(vid); + + memset(&out, 0, sizeof(out)); + err = xsc_cmd_exec(dev, in, in_sz, &out, sizeof(out)); + kfree(in); + + if (err || out.hdr.status) { + xsc_core_err(dev, "Failed to modify vlan err=%d out.status=%u", + err, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} +EXPORT_SYMBOL_GPL(xsc_modify_nic_vport_vlans); + +int xsc_query_nic_vport_system_image_guid(struct xsc_core_device *dev, + u64 *system_image_guid) +{ + struct xsc_query_nic_vport_context_out out; + + memset(&out, 0, sizeof(out)); + xsc_query_nic_vport_context(dev, 0, &out, sizeof(out)); + + *system_image_guid = out.nic_vport_ctx.system_image_guid; + + return 0; +} +EXPORT_SYMBOL_GPL(xsc_query_nic_vport_system_image_guid); + +int xsc_query_nic_vport_node_guid(struct xsc_core_device *dev, u32 vport, + u64 *node_guid) +{ + struct xsc_query_nic_vport_context_out out; + + memset(&out, 0, sizeof(out)); + xsc_query_nic_vport_context(dev, vport, &out, sizeof(out)); + + *node_guid = out.nic_vport_ctx.node_guid; + + return 0; +} +EXPORT_SYMBOL_GPL(xsc_query_nic_vport_node_guid); + +static int __xsc_modify_nic_vport_node_guid(struct xsc_core_device *dev, + u16 vport, u64 node_guid, + int force_other) +{ + struct xsc_modify_nic_vport_context_in in; + int err; + + /* vport = 0 only if ECPF modifying Host PF */ + if (!vport && !force_other) + return -EINVAL; + if (!dev->caps.vport_group_manager) + return -EACCES; + + memset(&in, 0, sizeof(in)); + in.field_select.node_guid = 1; + in.vport_number = vport; + if (vport || force_other) + in.other_vport = 1; + + in.nic_vport_ctx.node_guid = node_guid; + + err = xsc_modify_nic_vport_context(dev, &in, sizeof(in)); + + return err; +} + +int xsc_modify_nic_vport_node_guid(struct xsc_core_device *dev, + u16 vport, u64 node_guid) +{ + return __xsc_modify_nic_vport_node_guid(dev, vport, node_guid, 0); +} + +int xsc_modify_other_nic_vport_node_guid(struct xsc_core_device *dev, + u16 vport, u64 node_guid) +{ + return __xsc_modify_nic_vport_node_guid(dev, vport, node_guid, 1); +} + +int xsc_query_nic_vport_qkey_viol_cntr(struct xsc_core_device *dev, + u16 *qkey_viol_cntr) +{ + struct xsc_query_nic_vport_context_out out; + + memset(&out, 0, sizeof(out)); + xsc_query_nic_vport_context(dev, 0, &out, sizeof(out)); + + *qkey_viol_cntr = out.nic_vport_ctx.qkey_violation_counter; + + return 0; +} +EXPORT_SYMBOL_GPL(xsc_query_nic_vport_qkey_viol_cntr); + +int xsc_query_hca_vport_gid(struct xsc_core_device *dev, u8 other_vport, + u8 port_num, u16 vf_num, u16 gid_index, + union ib_gid *gid) +{ + int in_sz = sizeof(struct xsc_query_hca_vport_gid_in); + int out_sz = sizeof(struct xsc_query_hca_vport_gid_out); + struct xsc_query_hca_vport_gid_in *in; + struct xsc_query_hca_vport_gid_out *out; + int is_group_manager; + union ib_gid *tmp; + int tbsz; + int nout; + int err; + + is_group_manager = dev->caps.vport_group_manager; + tbsz = dev->caps.port[port_num].gid_table_len; + xsc_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n", + vf_num, gid_index, tbsz); + + if (gid_index > tbsz && gid_index != 0xffff) + return -EINVAL; + + if (gid_index == 0xffff) + nout = tbsz; + else + nout = 1; + + out_sz += nout * sizeof(*gid); + + in = kzalloc(in_sz, GFP_KERNEL); + out = kzalloc(out_sz, GFP_KERNEL); + if (!in || !out) { + err = -ENOMEM; + goto out; + } + + in->hdr.opcode = XSC_CMD_OP_QUERY_HCA_VPORT_GID; + if (other_vport) { + if (is_group_manager) { + in->vport_number = vf_num; + in->other_vport = 1; + } else { + err = -EPERM; + goto out; + } + } + + in->gid_index = gid_index; + in->port_num = port_num; + + err = xsc_cmd_exec(dev, in, in_sz, out, out_sz); + if (err) + goto out; + + tmp = (union ib_gid *)((void *)out + + sizeof(struct xsc_query_hca_vport_gid_out)); + gid->global.subnet_prefix = tmp->global.subnet_prefix; + gid->global.interface_id = tmp->global.interface_id; + +out: + kfree(in); + kfree(out); + return err; +} +EXPORT_SYMBOL_GPL(xsc_query_hca_vport_gid); + +int xsc_query_hca_vport_pkey(struct xsc_core_device *dev, u8 other_vport, + u8 port_num, u16 vf_num, u16 pkey_index, + u16 *pkey) +{ + int in_sz = sizeof(struct xsc_query_hca_vport_pkey_in); + int out_sz = sizeof(struct xsc_query_hca_vport_pkey_out); + struct xsc_query_hca_vport_pkey_in *in; + struct xsc_query_hca_vport_pkey_out *out; + int is_group_manager; + void *pkarr; + int nout; + int tbsz; + int err; + int i; + + is_group_manager = dev->caps.vport_group_manager; + + tbsz = dev->caps.port[port_num].pkey_table_len; + if (pkey_index > tbsz && pkey_index != 0xffff) + return -EINVAL; + + if (pkey_index == 0xffff) + nout = tbsz; + else + nout = 1; + + out_sz += nout * sizeof(*pkey); + + in = kzalloc(in_sz, GFP_KERNEL); + out = kzalloc(out_sz, GFP_KERNEL); + if (!in || !out) { + err = -ENOMEM; + goto out; + } + + in->hdr.opcode = XSC_CMD_OP_QUERY_HCA_VPORT_PKEY; + if (other_vport) { + if (is_group_manager) { + in->vport_number = vf_num; + in->other_vport = 1; + } else { + err = -EPERM; + goto out; + } + } + in->pkey_index = pkey_index; + + if (dev->caps.num_ports == 2) + in->port_num = port_num; + + err = xsc_cmd_exec(dev, in, in_sz, out, out_sz); + if (err) + goto out; + + pkarr = out->pkey; + for (i = 0; i < nout; i++, pkey++, pkarr += sizeof(*pkey)) + *pkey = *(u16 *)pkarr; + +out: + kfree(in); + kfree(out); + return err; +} +EXPORT_SYMBOL_GPL(xsc_query_hca_vport_pkey); + +int xsc_query_hca_vport_context(struct xsc_core_device *dev, + u8 other_vport, u8 port_num, + u16 vf_num, + struct xsc_hca_vport_context *rep) +{ + struct xsc_query_hca_vport_context_out *out = NULL; + struct xsc_query_hca_vport_context_in in; + int is_group_manager; + void *ctx; + int err; + + is_group_manager = dev->caps.vport_group_manager; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = XSC_CMD_OP_QUERY_HCA_VPORT_CONTEXT; + + if (other_vport) { + if (is_group_manager) { + in.other_vport = 1; + in.vport_number = vf_num; + } else { + err = -EPERM; + goto ex; + } + } + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return -ENOMEM; + + if (dev->caps.num_ports == 2) + in.port_num = port_num; + + err = xsc_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); + if (err) + goto ex; + + ctx = &out->hca_vport_ctx; + memcpy(rep, ctx, sizeof(struct xsc_hca_vport_context)); + +ex: + kfree(out); + return err; +} +EXPORT_SYMBOL_GPL(xsc_query_hca_vport_context); + +int xsc_query_hca_vport_node_guid(struct xsc_core_device *dev, + u64 *node_guid) +{ + struct xsc_hca_vport_context *rep; + int err; + + rep = kzalloc(sizeof(*rep), GFP_KERNEL); + if (!rep) + return -ENOMEM; + + err = xsc_query_hca_vport_context(dev, 0, 1, 0, rep); + if (!err) + *node_guid = rep->node_guid; + + kfree(rep); + return err; +} +EXPORT_SYMBOL_GPL(xsc_query_hca_vport_node_guid); + +int xsc_query_nic_vport_promisc(struct xsc_core_device *dev, + u16 vport, + int *promisc, + int *allmcast) +{ + struct xsc_query_nic_vport_context_out *out; + int err; + + out = kzalloc(sizeof(out), GFP_KERNEL); + if (!out) + return -ENOMEM; + + err = xsc_query_nic_vport_context(dev, vport, out, sizeof(*out)); + if (err) + goto out; + + *promisc = out->nic_vport_ctx.promisc; + *allmcast = out->nic_vport_ctx.allmcast; + +out: + kfree(out); + return err; +} +EXPORT_SYMBOL_GPL(xsc_query_nic_vport_promisc); + +int xsc_modify_nic_vport_promisc(struct xsc_core_device *dev, + bool allmulti_flag, bool promisc_flag, + int allmulti, int promisc) +{ + struct xsc_modify_nic_vport_context_in *in; + int err; + + in = kvzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->field_select.allmcast = allmulti_flag; + in->nic_vport_ctx.allmcast = allmulti; + + in->field_select.promisc = promisc_flag; + in->nic_vport_ctx.promisc = promisc; + + err = xsc_modify_nic_vport_context(dev, in, sizeof(*in)); + + kvfree(in); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_modify_nic_vport_promisc); + +int xsc_modify_nic_vport_spoofchk(struct xsc_core_device *dev, + u16 vport, int spoofchk) +{ + struct xsc_modify_nic_vport_context_in *in; + int err; + + in = kvzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->other_vport = 1; + in->vport_number = cpu_to_be16(vport); + in->field_select.spoofchk = 1; + in->nic_vport_ctx.spoofchk = spoofchk; + + err = xsc_modify_nic_vport_context(dev, in, sizeof(*in)); + + kvfree(in); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_modify_nic_vport_spoofchk); + +int xsc_modify_nic_vport_trust(struct xsc_core_device *dev, + u16 vport, bool trust) +{ + struct xsc_modify_nic_vport_context_in *in; + int err; + + in = kvzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->other_vport = 1; + in->vport_number = cpu_to_be16(vport); + in->field_select.trust = 1; + in->nic_vport_ctx.trust = (trust ? 1 : 0); + + err = xsc_modify_nic_vport_context(dev, in, sizeof(*in)); + + kvfree(in); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_modify_nic_vport_trust); + +int xsc_query_vport_counter(struct xsc_core_device *dev, u8 other_vport, + int vf, u8 port_num, void *out, + size_t out_sz) +{ + struct xsc_query_vport_counter_in *in; + int is_group_manager; + int err; + + is_group_manager = dev->caps.vport_group_manager; + in = kvzalloc(sizeof(*in), GFP_KERNEL); + if (!in) { + err = -ENOMEM; + return err; + } + + in->hdr.opcode = XSC_CMD_OP_QUERY_VPORT_COUNTER; + if (other_vport) { + if (is_group_manager) { + in->other_vport = 1; + in->vport_number = (vf + 1); + } else { + err = -EPERM; + goto free; + } + } + + if (dev->caps.num_ports == 2) + in->port_num = port_num; + + err = xsc_cmd_exec(dev, in, sizeof(*in), out, out_sz); +free: + kvfree(in); + return err; +} +EXPORT_SYMBOL_GPL(xsc_query_vport_counter); + +int xsc_modify_hca_vport_context(struct xsc_core_device *dev, + u8 other_vport, u8 port_num, + int vf, + struct xsc_hca_vport_context *req) +{ + struct xsc_modify_hca_vport_context_in in; + struct xsc_modify_hca_vport_context_out out; + int is_group_manager; + int err; + + xsc_core_dbg(dev, "vf %d\n", vf); + is_group_manager = dev->caps.vport_group_manager; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = XSC_CMD_OP_MODIFY_HCA_VPORT_CONTEXT; + if (other_vport) { + if (is_group_manager) { + in.other_vport = 1; + in.vport_number = vf; + } else { + err = -EPERM; + goto err; + } + } + + if (dev->caps.num_ports > 1) + in.port_num = port_num; + memcpy(&in.hca_vport_ctx, req, sizeof(*req)); + err = xsc_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); +err: + return err; +} +EXPORT_SYMBOL_GPL(xsc_modify_hca_vport_context); + +/** + * xsc_eswitch_get_total_vports - Get total vports of the eswitch + * + * @dev: Pointer to core device + * + * xsc_eswitch_get_total_vports returns total number of vports for + * the eswitch. + */ +u16 xsc_eswitch_get_total_vports(const struct xsc_core_device *dev) +{ + return XSC_SPECIAL_VPORTS(dev) + xsc_core_max_vfs(dev); +} +EXPORT_SYMBOL(xsc_eswitch_get_total_vports); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/wq.c b/drivers/net/ethernet/yunsilicon/xsc/pci/wq.c new file mode 100644 index 000000000000..5d0c96f204e2 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/wq.c @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/driver.h" +#include "common/device.h" +#include "common/xsc_core.h" +#include "wq.h" + +u32 xsc_wq_cyc_get_size(struct xsc_wq_cyc *wq) +{ + return (u32)wq->fbc.sz_m1 + 1; +} +EXPORT_SYMBOL_GPL(xsc_wq_cyc_get_size); + +static u32 wq_get_byte_sz(u8 log_sz, u8 log_stride) +{ + return ((u32)1 << log_sz) << log_stride; +} + +int xsc_eth_cqwq_create(struct xsc_core_device *xdev, struct xsc_wq_param *param, + u8 q_log_size, u8 ele_log_size, struct xsc_cqwq *wq, + struct xsc_wq_ctrl *wq_ctrl) +{ + u8 log_wq_stride = ele_log_size; + u8 log_wq_sz = q_log_size; + int err; + + err = xsc_db_alloc_node(xdev, &wq_ctrl->db, param->db_numa_node); + if (err) { + xsc_core_warn(xdev, "xsc_db_alloc_node() failed, %d\n", err); + return err; + } + + err = xsc_frag_buf_alloc_node(xdev, wq_get_byte_sz(log_wq_sz, log_wq_stride), + &wq_ctrl->buf, + param->buf_numa_node); + if (err) { + xsc_core_warn(xdev, "xsc_frag_buf_alloc_node() failed, %d\n", err); + goto err_db_free; + } + + xsc_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, &wq->fbc); + + wq_ctrl->xdev = xdev; + + return 0; + +err_db_free: + xsc_db_free(xdev, &wq_ctrl->db); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_eth_cqwq_create); + +int xsc_eth_wq_cyc_create(struct xsc_core_device *xdev, struct xsc_wq_param *param, + u8 q_log_size, u8 ele_log_size, struct xsc_wq_cyc *wq, + struct xsc_wq_ctrl *wq_ctrl) +{ + u8 log_wq_stride = ele_log_size; + u8 log_wq_sz = q_log_size; + struct xsc_frag_buf_ctrl *fbc = &wq->fbc; + int err; + + err = xsc_db_alloc_node(xdev, &wq_ctrl->db, param->db_numa_node); + if (err) { + xsc_core_warn(xdev, "xsc_db_alloc_node() failed, %d\n", err); + return err; + } + + err = xsc_frag_buf_alloc_node(xdev, wq_get_byte_sz(log_wq_sz, log_wq_stride), + &wq_ctrl->buf, param->buf_numa_node); + if (err) { + xsc_core_warn(xdev, "xsc_frag_buf_alloc_node() failed, %d\n", err); + goto err_db_free; + } + + xsc_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc); + wq->sz = xsc_wq_cyc_get_size(wq); + + wq_ctrl->xdev = xdev; + + return 0; + +err_db_free: + xsc_db_free(xdev, &wq_ctrl->db); + + return err; +} +EXPORT_SYMBOL_GPL(xsc_eth_wq_cyc_create); + +void xsc_eth_wq_destroy(struct xsc_wq_ctrl *wq_ctrl) +{ + xsc_frag_buf_free(wq_ctrl->xdev, &wq_ctrl->buf); + xsc_db_free(wq_ctrl->xdev, &wq_ctrl->db); +} +EXPORT_SYMBOL_GPL(xsc_eth_wq_destroy); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/wq.h b/drivers/net/ethernet/yunsilicon/xsc/pci/wq.h new file mode 100644 index 000000000000..8811ef1bf0f7 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/wq.h @@ -0,0 +1,170 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __XSC_WQ_H__ +#define __XSC_WQ_H__ + +#include "common/cq.h" +#include "common/qp.h" + +struct xsc_wq_param { + int buf_numa_node; + int db_numa_node; +}; + +struct xsc_wq_ctrl { + struct xsc_core_device *xdev; + struct xsc_frag_buf buf; + struct xsc_db db; +}; + +struct xsc_wq_cyc { + struct xsc_frag_buf_ctrl fbc; + u16 sz; + u16 wqe_ctr; + u16 cur_sz; +}; + +struct xsc_cqwq { + struct xsc_frag_buf_ctrl fbc; + __be32 *db; + u32 cc; /* consumer counter */ +}; + +enum xsc_res_type { + XSC_RES_UND = 0, + XSC_RES_RQ, + XSC_RES_SQ, + XSC_RES_MAX, +}; + +u32 xsc_wq_cyc_get_size(struct xsc_wq_cyc *wq); + +int xsc_buf_alloc_node(struct xsc_core_device *dev, int size, + struct xsc_frag_buf *buf, int node); + +/*api for eth driver*/ +int xsc_eth_cqwq_create(struct xsc_core_device *xdev, struct xsc_wq_param *param, + u8 q_log_size, u8 ele_log_size, struct xsc_cqwq *wq, + struct xsc_wq_ctrl *wq_ctrl); + +int xsc_eth_wq_cyc_create(struct xsc_core_device *xdev, struct xsc_wq_param *param, + u8 q_log_size, u8 ele_log_size, struct xsc_wq_cyc *wq, + struct xsc_wq_ctrl *wq_ctrl); +void xsc_eth_wq_destroy(struct xsc_wq_ctrl *wq_ctrl); + +static inline void xsc_init_fbc_offset(struct xsc_buf_list *frags, + u8 log_stride, u8 log_sz, + u16 strides_offset, + struct xsc_frag_buf_ctrl *fbc) +{ + fbc->frags = frags; + fbc->log_stride = log_stride; + fbc->log_sz = log_sz; + fbc->sz_m1 = (1 << fbc->log_sz) - 1; + fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride; + fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1; + fbc->strides_offset = strides_offset; +} + +static inline void xsc_init_fbc(struct xsc_buf_list *frags, + u8 log_stride, u8 log_sz, + struct xsc_frag_buf_ctrl *fbc) +{ + xsc_init_fbc_offset(frags, log_stride, log_sz, 0, fbc); +} + +static inline void *xsc_frag_buf_get_wqe(struct xsc_frag_buf_ctrl *fbc, + u32 ix) +{ + unsigned int frag; + + ix += fbc->strides_offset; + frag = ix >> fbc->log_frag_strides; + + return fbc->frags[frag].buf + ((fbc->frag_sz_m1 & ix) << fbc->log_stride); +} + +static inline u32 +xsc_frag_buf_get_idx_last_contig_stride(struct xsc_frag_buf_ctrl *fbc, u32 ix) +{ + u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1; + + return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1); +} + +static inline int xsc_wq_cyc_missing(struct xsc_wq_cyc *wq) +{ + return wq->sz - wq->cur_sz; +} + +static inline int xsc_wq_cyc_is_empty(struct xsc_wq_cyc *wq) +{ + return !wq->cur_sz; +} + +static inline void xsc_wq_cyc_push(struct xsc_wq_cyc *wq) +{ + wq->wqe_ctr++; + wq->cur_sz++; +} + +static inline void xsc_wq_cyc_push_n(struct xsc_wq_cyc *wq, u8 n) +{ + wq->wqe_ctr += n; + wq->cur_sz += n; +} + +static inline void xsc_wq_cyc_pop(struct xsc_wq_cyc *wq) +{ + wq->cur_sz--; +} + +static inline u16 xsc_wq_cyc_ctr2ix(struct xsc_wq_cyc *wq, u16 ctr) +{ + return ctr & wq->fbc.sz_m1; +} + +static inline u16 xsc_wq_cyc_get_head(struct xsc_wq_cyc *wq) +{ + return xsc_wq_cyc_ctr2ix(wq, wq->wqe_ctr); +} + +static inline u16 xsc_wq_cyc_get_tail(struct xsc_wq_cyc *wq) +{ + return xsc_wq_cyc_ctr2ix(wq, wq->wqe_ctr - wq->cur_sz); +} + +static inline void *xsc_wq_cyc_get_wqe(struct xsc_wq_cyc *wq, u16 ix) +{ + return xsc_frag_buf_get_wqe(&wq->fbc, ix); +} + +static inline u32 xsc_cqwq_ctr2ix(struct xsc_cqwq *wq, u32 ctr) +{ + return ctr & wq->fbc.sz_m1; +} + +static inline u32 xsc_cqwq_get_ci(struct xsc_cqwq *wq) +{ + return xsc_cqwq_ctr2ix(wq, wq->cc); +} + +static inline u32 xsc_cqwq_get_ctr_wrap_cnt(struct xsc_cqwq *wq, u32 ctr) +{ + return ctr >> wq->fbc.log_sz; +} + +static inline u32 xsc_cqwq_get_wrap_cnt(struct xsc_cqwq *wq) +{ + return xsc_cqwq_get_ctr_wrap_cnt(wq, wq->cc); +} + +static inline void xsc_cqwq_pop(struct xsc_cqwq *wq) +{ + wq->cc++; +} + +#endif /* __XSC_WQ_H__ */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_lag.c b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_lag.c new file mode 100644 index 000000000000..4d12ce7f0459 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_lag.c @@ -0,0 +1,1418 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include "common/xsc_core.h" +#include "common/driver.h" +#include +#include "common/xsc_lag.h" +#include "common/xsc_hsi.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_cmd.h" +#include "net/xsc_eth.h" + +#include +#include +#include +#include + +static struct xsc_board_lag *board_lag_array[MAX_BOARD_NUM]; + +struct xsc_board_lag *xsc_board_lag_get(struct xsc_core_device *xdev) +{ + return board_lag_array[xdev->board_info->board_id]; +} +EXPORT_SYMBOL(xsc_board_lag_get); + +void xsc_board_lag_set(struct xsc_core_device *xdev, + void *board_lag) +{ + struct xsc_board_lag *board_lag_new = board_lag; + + board_lag_new->board_id = xdev->board_info->board_id; + board_lag_array[xdev->board_info->board_id] = board_lag_new; +} + +void xsc_board_lag_reset(u32 board_id) +{ + board_lag_array[board_id] = NULL; +} + +static u8 hash_type_map[] = { + [NETDEV_LAG_HASH_NONE] = XSC_LAG_HASH_L23, + [NETDEV_LAG_HASH_L2] = XSC_LAG_HASH_L23, + [NETDEV_LAG_HASH_L34] = XSC_LAG_HASH_L34, + [NETDEV_LAG_HASH_L23] = XSC_LAG_HASH_L23, + [NETDEV_LAG_HASH_E23] = XSC_LAG_HASH_E23, + [NETDEV_LAG_HASH_E34] = XSC_LAG_HASH_E34, + [NETDEV_LAG_HASH_UNKNOWN] = XSC_LAG_HASH_L23, +}; + +static inline u8 xsc_lag_hashtype_convert(enum netdev_lag_hash hash_type) +{ + return hash_type_map[hash_type]; +} + +static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond) +{ + switch (BOND_MODE(bond)) { + case BOND_MODE_ROUNDROBIN: + return NETDEV_LAG_TX_TYPE_ROUNDROBIN; + case BOND_MODE_ACTIVEBACKUP: + return NETDEV_LAG_TX_TYPE_ACTIVEBACKUP; + case BOND_MODE_BROADCAST: + return NETDEV_LAG_TX_TYPE_BROADCAST; + case BOND_MODE_XOR: + case BOND_MODE_8023AD: + return NETDEV_LAG_TX_TYPE_HASH; + default: + return NETDEV_LAG_TX_TYPE_UNKNOWN; + } +} + +enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond) +{ + switch (bond->params.xmit_policy) { + case BOND_XMIT_POLICY_LAYER2: + return NETDEV_LAG_HASH_L23; + case BOND_XMIT_POLICY_LAYER34: + return NETDEV_LAG_HASH_L34; + case BOND_XMIT_POLICY_LAYER23: + return NETDEV_LAG_HASH_L23; + case BOND_XMIT_POLICY_ENCAP23: + return NETDEV_LAG_HASH_E23; + case BOND_XMIT_POLICY_ENCAP34: + return NETDEV_LAG_HASH_E34; + default: + return NETDEV_LAG_HASH_UNKNOWN; + } +} + +static inline bool __xsc_lag_is_active(struct xsc_lag *lag) +{ + return !!(lag->lag_type & XSC_LAG_MODE_FLAGS); +} + +static inline bool __xsc_lag_is_roce(struct xsc_lag *lag) +{ + return !!(lag->lag_type & XSC_LAG_FLAG_ROCE); +} + +static inline bool __xsc_lag_is_kernel(struct xsc_lag *lag) +{ + return !!(lag->lag_type & XSC_LAG_FLAG_KERNEL); +} + +static inline struct xsc_lag *__xsc_get_lag(struct xsc_core_device *xdev) +{ + struct xsc_board_lag *board_lag; + + if (!xdev) + return NULL; + + board_lag = xsc_board_lag_get(xdev); + if (!board_lag || xdev->bond_id == BOND_ID_INVALID) + return NULL; + + return &board_lag->xsc_lag[xdev->bond_id]; +} + +int xsc_cmd_create_lag(struct xsc_lag_event *entry) +{ + struct xsc_create_lag_mbox_in in = {}; + struct xsc_create_lag_mbox_out out = {}; + struct xsc_core_device *xdev = entry->xdev; + struct net_device *netdev = xdev->netdev; + int ret = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_LAG_CREATE); + + in.req.lag_id = cpu_to_be16(entry->lag_id); + in.req.lag_type = entry->lag_type; + in.req.lag_sel_mode = entry->lag_sel_mode; + in.req.mac_idx = xdev->pf_id; + in.req.bond_mode = entry->bond_mode; + in.req.slave_status = entry->slave_status; + + memcpy(in.req.netdev_addr, netdev->dev_addr, ETH_ALEN); + + xsc_core_info(xdev, "create LAG: lag_id = %d, lag_type = %d, lag_sel_mode = %d, bond_mode = %d\n", + entry->lag_id, entry->lag_type, entry->lag_sel_mode, entry->bond_mode); + + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to create LAG, err =%d out.status= %u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} + +int xsc_cmd_add_lag_member(struct xsc_lag_event *entry) +{ + struct xsc_add_lag_member_mbox_in in = {}; + struct xsc_add_lag_member_mbox_out out = {}; + struct xsc_core_device *xdev = entry->xdev; + struct net_device *netdev = xdev->netdev; + int ret = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_LAG_ADD_MEMBER); + + in.req.lag_id = cpu_to_be16(entry->lag_id); + in.req.lag_type = entry->lag_type; + in.req.lag_sel_mode = entry->lag_sel_mode; + in.req.mac_idx = xdev->pf_id; + in.req.bond_mode = entry->bond_mode; + in.req.slave_status = entry->slave_status; + in.req.mad_mac_idx = entry->roce_lag_xdev->pf_id; + + memcpy(in.req.netdev_addr, netdev->dev_addr, ETH_ALEN); + + xsc_core_info(xdev, "add LAG member: lag_id = %d, lag_type = %d, bond_mode = %d\n", + entry->lag_id, entry->lag_type, entry->bond_mode); + + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to add LAG member, err =%d out.status= %u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} + +int xsc_cmd_remove_lag_member(struct xsc_lag_event *entry) +{ + struct xsc_remove_lag_member_mbox_in in = {}; + struct xsc_remove_lag_member_mbox_out out = {}; + struct xsc_core_device *xdev = entry->xdev; + int ret = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_LAG_REMOVE_MEMBER); + + in.req.lag_id = cpu_to_be16(entry->lag_id); + in.req.lag_type = entry->lag_type; + in.req.mac_idx = xdev->pf_id; + in.req.bond_mode = entry->bond_mode; + if (entry->lag_type & XSC_LAG_FLAG_ROCE && entry->is_roce_lag_xdev) { + in.req.is_roce_lag_xdev = entry->is_roce_lag_xdev; + in.req.mad_mac_idx = entry->roce_lag_xdev->pf_id; + in.req.not_roce_lag_xdev_mask = entry->not_roce_lag_xdev_mask; + } + + xsc_core_info(xdev, "remove LAG member: lag_id = %d, lag_type = %d, bond_mode = %d\n", + entry->lag_id, entry->lag_type, entry->bond_mode); + + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to add LAG member, err =%d out.status= %u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} + +int xsc_cmd_update_lag_member_status(struct xsc_lag_event *entry) +{ + struct xsc_update_lag_member_status_mbox_in in = {}; + struct xsc_update_lag_member_status_mbox_out out = {}; + struct xsc_core_device *xdev = entry->xdev; + int ret = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_LAG_UPDATE_MEMBER_STATUS); + + in.req.lag_type = entry->lag_type; + in.req.bond_mode = entry->bond_mode; + in.req.lag_id = cpu_to_be16(entry->lag_id); + in.req.mac_idx = xdev->pf_id; + in.req.slave_status = entry->slave_status; + + xsc_core_info(xdev, "update LAG member status: lag_id = %d, bond_mode = %d, lag_type = %d, slave_status = %d, mac_idx = %d\n", + entry->lag_id, entry->bond_mode, entry->lag_type, + entry->slave_status, xdev->pf_id); + + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to update LAG member status, err=%d out.status=%u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + return ret; +} + +int xsc_cmd_update_lag_hash_type(struct xsc_lag_event *entry) +{ + struct xsc_update_lag_hash_type_mbox_in in = {}; + struct xsc_update_lag_hash_type_mbox_out out = {}; + struct xsc_core_device *xdev = entry->xdev; + int ret = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_LAG_UPDATE_HASH_TYPE); + + in.req.lag_id = cpu_to_be16(entry->lag_id); + in.req.lag_sel_mode = entry->lag_sel_mode; + + xsc_core_info(xdev, "update LAG hash type: lag_id = %d, lag_sel_mode = %d\n", + entry->lag_id, in.req.lag_sel_mode); + + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to update LAG hash type, err=%d out.status=%u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + return ret; +} + +int xsc_cmd_destroy_lag(struct xsc_lag_event *entry) +{ + struct xsc_destroy_lag_mbox_in in = {}; + struct xsc_destroy_lag_mbox_out out = {}; + struct xsc_core_device *xdev = entry->xdev; + int ret = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_LAG_DESTROY); + + in.req.lag_id = cpu_to_be16(entry->lag_id); + in.req.lag_type = entry->lag_type; + in.req.mac_idx = xdev->pf_id; + in.req.bond_mode = entry->bond_mode; + + xsc_core_info(xdev, "destroy LAG: lag_id = %d\n", entry->lag_id); + + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status) { + xsc_core_err(xdev, "failed to destroy LAG, err =%d out.status= %u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} + +static int xsc_lag_set_qos(struct xsc_core_device *xdev, u16 lag_id, u8 member_idx, u8 lag_op) +{ + struct xsc_set_lag_qos_mbox_in in; + struct xsc_set_lag_qos_mbox_out out; + struct xsc_set_lag_qos_request *req; + int ret; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + req = &in.req; + + req->lag_id = cpu_to_be16(lag_id); + req->member_idx = member_idx; + req->lag_op = lag_op; + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_LAG_SET_QOS); + + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + return ret; +} + +void xsc_create_lag(struct xsc_lag_event *entry) +{ + int ret = 0; + bool roce_lag = entry->lag_type & XSC_LAG_FLAG_ROCE; + struct xsc_core_device *xdev = entry->xdev; + + if (roce_lag) + xsc_remove_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); + + ret = xsc_cmd_create_lag(entry); + if (ret) { + xsc_core_err(xdev, "failed to create LAG, err =%d\n", ret); + goto out; + } + + if (xsc_lag_set_qos(xdev, entry->lag_id, 0, QOS_LAG_OP_CREATE)) { + xsc_core_err(xdev, "failed to create QoS LAG %u\n", entry->lag_id); + goto out; + } + + if (entry->slave_status == XSC_LAG_SLAVE_ACTIVE) { + if (xsc_lag_set_qos(xdev, entry->lag_id, xdev->pf_id, QOS_LAG_OP_ADD_MEMBER)) + xsc_core_err(xdev, "failed to add member %u for QoS LAG %u\n", + xdev->pf_id, entry->lag_id); + } + +out: + if (roce_lag) + xsc_add_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); +} + +void xsc_add_lag_member(struct xsc_lag_event *entry) +{ + int ret = 0; + bool roce_lag = entry->lag_type & XSC_LAG_FLAG_ROCE; + struct xsc_core_device *xdev = entry->xdev; + + if (roce_lag) + xsc_remove_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); + + ret = xsc_cmd_add_lag_member(entry); + if (ret) { + xsc_core_err(xdev, "failed to create LAG, err =%d\n", ret); + goto out; + } + + if (entry->slave_status == XSC_LAG_SLAVE_ACTIVE) { + if (xsc_lag_set_qos(xdev, entry->lag_id, xdev->pf_id, QOS_LAG_OP_ADD_MEMBER)) + xsc_core_err(xdev, "failed to add member %u for QoS LAG %u\n", + xdev->pf_id, entry->lag_id); + } + + return; + +out: + if (roce_lag) + xsc_add_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); +} + +void xsc_remove_lag_member(struct xsc_lag_event *entry) +{ + int ret = 0; + bool roce_lag = entry->lag_type & XSC_LAG_FLAG_ROCE; + struct xsc_core_device *xdev = entry->xdev; + struct xsc_core_device *roce_lag_xdev = entry->roce_lag_xdev; + + if (roce_lag && entry->is_roce_lag_xdev) + xsc_remove_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); + + ret = xsc_cmd_remove_lag_member(entry); + if (ret) { + xsc_core_err(xdev, "failed to create LAG, err =%d\n", ret); + goto out; + } + + if (roce_lag && entry->is_roce_lag_xdev) { + xsc_add_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); + xsc_add_dev_by_protocol(roce_lag_xdev, XSC_INTERFACE_PROTOCOL_IB); + } + + if (roce_lag && !entry->is_roce_lag_xdev) + xsc_add_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); + + if (xsc_lag_set_qos(xdev, entry->lag_id, xdev->pf_id, QOS_LAG_OP_DEL_MEMBER)) + xsc_core_err(xdev, "failed to del member %u for QoS LAG %u\n", + xdev->pf_id, entry->lag_id); + + return; + +out: + if (roce_lag && entry->is_roce_lag_xdev) + xsc_add_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); +} + +void xsc_update_lag_member_status(struct xsc_lag_event *entry) +{ + int ret = 0; + struct xsc_core_device *xdev = entry->xdev; + + ret = xsc_cmd_update_lag_member_status(entry); + if (ret) + xsc_core_err(xdev, "failed to update LAG member status, err =%d\n", ret); + + if (entry->slave_status == XSC_LAG_SLAVE_ACTIVE) { + if (xsc_lag_set_qos(xdev, entry->lag_id, xdev->pf_id, QOS_LAG_OP_ADD_MEMBER)) + xsc_core_err(xdev, "failed to add member %u for QoS LAG %u\n", + xdev->pf_id, entry->lag_id); + } else if (entry->slave_status == XSC_LAG_SLAVE_INACTIVE) { + if (xsc_lag_set_qos(xdev, entry->lag_id, xdev->pf_id, QOS_LAG_OP_DEL_MEMBER)) + xsc_core_err(xdev, "failed to del member %u for QoS LAG %u\n", + xdev->pf_id, entry->lag_id); + } +} + +void xsc_update_lag_hash_type(struct xsc_lag_event *entry) +{ + int ret = 0; + struct xsc_core_device *xdev = entry->xdev; + + ret = xsc_cmd_update_lag_hash_type(entry); + if (ret) + xsc_core_err(xdev, "failed to update LAG member status, err =%d\n", ret); +} + +void xsc_destroy_lag(struct xsc_lag_event *entry) +{ + int ret = 0; + bool roce_lag = entry->lag_type & XSC_LAG_FLAG_ROCE; + struct xsc_core_device *xdev = entry->xdev; + + if (roce_lag) + xsc_remove_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); + + ret = xsc_cmd_destroy_lag(entry); + if (ret) { + xsc_core_err(xdev, "failed to create LAG, err =%d\n", ret); + goto out; + } + + if (xsc_lag_set_qos(xdev, entry->lag_id, 0, QOS_LAG_OP_DESTROY)) + xsc_core_err(xdev, "failed to destroy QoS LAG %u\n", entry->lag_id); + +out: + if (roce_lag) + xsc_add_dev_by_protocol(xdev, XSC_INTERFACE_PROTOCOL_IB); +} + +static void (*handlers[XSC_LAG_EVENT_MAX])(struct xsc_lag_event *entry) = { + [XSC_LAG_CREATE] = xsc_create_lag, + [XSC_LAG_ADD_MEMBER] = xsc_add_lag_member, + [XSC_LAG_REMOVE_MEMBER] = xsc_remove_lag_member, + [XSC_LAG_UPDATE_MEMBER_STATUS] = xsc_update_lag_member_status, + [XSC_LAG_UPDATE_HASH_TYPE] = xsc_update_lag_hash_type, + [XSC_LAG_DESTROY] = xsc_destroy_lag, +}; + +static int xsc_do_bond_thread(void *arg) +{ + struct xsc_lag_event *entry; + struct xsc_board_lag *board_lag = arg; + struct lag_event_list *lag_event_list; + int status; + + lag_event_list = &board_lag->lag_event_list; + + while (!kthread_should_stop()) { + if (need_resched()) + schedule(); + + spin_lock(&lag_event_list->lock); + entry = list_first_entry_or_null(&lag_event_list->head, + struct xsc_lag_event, node); + if (!entry) { + spin_unlock(&lag_event_list->lock); + wait_event_interruptible(lag_event_list->wq, + lag_event_list->wait_flag != XSC_SLEEP); + if (lag_event_list->wait_flag == XSC_EXIT) + break; + lag_event_list->wait_flag = XSC_SLEEP; + continue; + } + + spin_unlock(&lag_event_list->lock); + + if (entry->event_type >= XSC_LAG_EVENT_MAX) + goto free_entry; + + status = xsc_dev_list_trylock(); + if (!status) + continue; + + (*handlers[entry->event_type])(entry); + xsc_dev_list_unlock(); + +free_entry: + list_del(&entry->node); + kfree(entry); + } + + return 0; +} + +static inline bool xsc_is_roce_lag_allowed(struct xsc_lag *lag) +{ + struct xsc_core_device *xdev; + bool roce_lag_support = true; + + list_for_each_entry(xdev, &lag->slave_list, slave_node) { + roce_lag_support &= !xsc_sriov_is_enabled(xdev); + if (!roce_lag_support) { + xsc_core_info(xdev, "create ROCE LAG while sriov is open\n"); + break; + } + + roce_lag_support &= radix_tree_empty(&xdev->priv_device.bdf_tree); + if (!roce_lag_support) { + xsc_core_info(xdev, "create ROCE LAG while the ib device is open\n"); + break; + } + } + + return roce_lag_support; +} + +static bool xsc_is_sriov_lag_allowed(struct xsc_lag *lag) +{ + struct xsc_core_device *xdev; + bool sriov_lag_support = true; + + list_for_each_entry(xdev, &lag->slave_list, slave_node) { + sriov_lag_support &= (xdev->priv.eswitch->mode == XSC_ESWITCH_OFFLOADS); + if (!sriov_lag_support) + xsc_core_info(xdev, "create SRIOV LAG while the switchdev is not open\n"); + } + + return sriov_lag_support; +} + +static u8 xsc_get_lag_type(struct xsc_lag *lag) +{ + u8 lag_type; + bool roce_lag; + bool sriov_lag; + u8 lag_mode_support; + + lag_mode_support = (lag->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP || + lag->tx_type == NETDEV_LAG_TX_TYPE_HASH); + roce_lag = lag_mode_support && xsc_is_roce_lag_allowed(lag); + sriov_lag = lag_mode_support && xsc_is_sriov_lag_allowed(lag); + lag_type = sriov_lag ? XSC_LAG_FLAG_SRIOV : + (roce_lag ? XSC_LAG_FLAG_ROCE : XSC_LAG_FLAG_KERNEL); + + return lag_type; +} + +static inline void pack_add_and_wake_wq(struct xsc_board_lag *board_lag, + struct xsc_lag_event *entry) +{ + spin_lock(&board_lag->lag_event_list.lock); + list_add_tail(&entry->node, &board_lag->lag_event_list.head); + spin_unlock(&board_lag->lag_event_list.lock); + board_lag->lag_event_list.wait_flag = XSC_WAKEUP; + wake_up(&board_lag->lag_event_list.wq); +} + +static inline enum lag_slave_status lag_slave_status_get(struct net_device *ndev) +{ + struct slave *slave = NULL; + enum lag_slave_status slave_status = XSC_LAG_SLAVE_STATUS_MAX; + + if (!netif_is_bond_slave(ndev)) + goto out; + + rcu_read_lock(); + slave = bond_slave_get_rtnl(ndev); + rcu_read_unlock(); + if (bond_slave_is_up(slave) && bond_slave_can_tx(slave)) + slave_status = XSC_LAG_SLAVE_ACTIVE; + else + slave_status = XSC_LAG_SLAVE_INACTIVE; + +out: + return slave_status; +} + +void pack_lag_create(struct xsc_lag *lag, + struct xsc_core_device *xdev, bool no_wq) +{ + struct net_device *ndev = xdev->netdev; + struct xsc_lag_event *entry; + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + + if (lag->mode_changes_in_progress) + return; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return; + + lag->lag_type = xsc_get_lag_type(lag); + + entry->event_type = XSC_LAG_CREATE; + entry->xdev = xdev; + entry->lag_sel_mode = lag->hash_type; + entry->lag_id = lag->lag_id; + entry->bond_mode = lag->bond_mode; + entry->lag_type = lag->lag_type; + entry->slave_status = lag_slave_status_get(ndev); + + xsc_core_info(xdev, "lag_sel_mode = %d, slave_status = %d, lag_type = %d\n", + entry->lag_sel_mode, entry->slave_status, entry->lag_type); + + if (!no_wq) + pack_add_and_wake_wq(board_lag, entry); + else + xsc_create_lag(entry); +} + +void pack_lag_add_member(struct xsc_lag *lag, + struct xsc_core_device *xdev, bool no_wq) +{ + struct xsc_lag_event *entry; + struct net_device *ndev = xdev->netdev; + struct xsc_core_device *roce_lag_xdev = NULL; + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + + if (lag->mode_changes_in_progress) + return; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return; + + entry->lag_type = xsc_get_lag_type(lag); + if (entry->lag_type != lag->lag_type) { + xsc_core_err(xdev, "do not permit add slave to different type lag, xdev_lag_type = %d, lag_type = %d\n", + entry->lag_type, lag->lag_type); + + kfree(entry); + return; + } + + entry->event_type = XSC_LAG_ADD_MEMBER; + entry->xdev = xdev; + entry->lag_sel_mode = lag->hash_type; + entry->slave_status = lag_slave_status_get(ndev); + entry->lag_id = lag->lag_id; + entry->bond_mode = lag->bond_mode; + + roce_lag_xdev = list_first_entry(&lag->slave_list, + struct xsc_core_device, slave_node); + entry->roce_lag_xdev = roce_lag_xdev; + entry->not_roce_lag_xdev_mask = lag->not_roce_lag_xdev_mask; + + xsc_core_info(xdev, "lag_sel_mode = %d, slave_status = %d, lag_type = %d\n", + entry->lag_sel_mode, entry->slave_status, entry->lag_type); + + if (!no_wq) + pack_add_and_wake_wq(board_lag, entry); + else + xsc_add_lag_member(entry); +} + +void pack_lag_remove_member(struct xsc_lag *lag, + struct xsc_core_device *xdev, bool no_wq) +{ + struct xsc_lag_event *entry; + struct xsc_core_device *roce_lag_xdev = NULL; + struct xsc_core_device *xdev_tmp = NULL; + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + u8 cnt = 0; + u8 not_roce_lag_xdev_mask = 0; + + if (lag->mode_changes_in_progress) + return; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return; + + entry->event_type = XSC_LAG_REMOVE_MEMBER; + entry->xdev = xdev; + entry->lag_id = lag->lag_id; + entry->bond_mode = lag->bond_mode; + entry->lag_type = lag->lag_type; + + if (entry->lag_type & XSC_LAG_FLAG_ROCE) { + roce_lag_xdev = list_first_entry(&lag->slave_list, + struct xsc_core_device, slave_node); + if (roce_lag_xdev == xdev) { + entry->is_roce_lag_xdev = 1; + + list_for_each_entry(xdev_tmp, &lag->slave_list, slave_node) { + cnt++; + if (cnt == 1) + continue; + + if (cnt == 2) { + roce_lag_xdev = xdev_tmp; + continue; + } + + not_roce_lag_xdev_mask |= BIT(xdev_tmp->pf_id); + } + entry->roce_lag_xdev = roce_lag_xdev; + entry->not_roce_lag_xdev_mask = not_roce_lag_xdev_mask; + } + } + + xsc_core_info(xdev, "lag_type = %d, is_roce_lag_xdev = %d, not_roce_lag_xdev_mask = %d\n", + entry->lag_type, entry->is_roce_lag_xdev, entry->not_roce_lag_xdev_mask); + + if (!no_wq) + pack_add_and_wake_wq(board_lag, entry); + else + xsc_remove_lag_member(entry); +} + +void pack_lag_update_member_status(struct xsc_lag *lag, + struct net_device *ndev, enum lag_slave_status slave_status) +{ + struct xsc_lag_event *entry; + struct xsc_adapter *adapter = netdev_priv(ndev); + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + + if (lag->mode_changes_in_progress || lag->lag_type & XSC_LAG_FLAG_KERNEL) + return; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return; + + entry->event_type = XSC_LAG_UPDATE_MEMBER_STATUS; + entry->xdev = xdev; + entry->lag_id = lag->lag_id; + entry->bond_mode = lag->bond_mode; + entry->lag_type = lag->lag_type; + entry->slave_status = slave_status; + + xsc_core_info(xdev, "lag_id = %d, slave_status = %d\n", + entry->lag_id, entry->slave_status); + + pack_add_and_wake_wq(board_lag, entry); +} + +void pack_lag_update_hash_type(struct xsc_lag *lag, + u8 bond_id, enum netdev_lag_hash hash_type) +{ + struct xsc_lag_event *entry; + struct xsc_core_device *xdev = NULL; + struct xsc_board_lag *board_lag; + + if (lag->mode_changes_in_progress || lag->lag_type & XSC_LAG_FLAG_KERNEL) + return; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return; + + xdev = list_first_entry(&lag->slave_list, + struct xsc_core_device, slave_node); + + board_lag = xsc_board_lag_get(xdev); + + entry->event_type = XSC_LAG_UPDATE_HASH_TYPE; + entry->xdev = xdev; + entry->lag_id = lag->lag_id; + entry->lag_sel_mode = lag->hash_type; + + xsc_core_info(xdev, "lag_id = %d, lag_sel_mode = %d\n", + entry->lag_id, entry->lag_sel_mode); + + pack_add_and_wake_wq(board_lag, entry); +} + +void pack_lag_destroy(struct xsc_lag *lag, struct xsc_core_device *xdev, bool no_wq) +{ + struct xsc_lag_event *entry; + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + + if (lag->mode_changes_in_progress) + return; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return; + + entry->event_type = XSC_LAG_DESTROY; + entry->xdev = xdev; + entry->lag_id = lag->lag_id; + entry->bond_mode = lag->bond_mode; + entry->lag_type = lag->lag_type; + + lag->lag_type = 0; + + xsc_core_info(xdev, "lag_id = %d, board_id = %d, lag_type = %d\n", + lag->lag_id, lag->board_id, entry->lag_type); + + if (!no_wq) + pack_add_and_wake_wq(board_lag, entry); + else + xsc_destroy_lag(entry); +} + +static u8 xsc_get_valid_bond_id(struct xsc_board_lag *board_lag) +{ + u8 bond_valid_mask = board_lag->bond_valid_mask; + u8 i; + + for (i = 0; i < XSC_BOARD_LAG_MAX; i++) { + if (!(bond_valid_mask & BIT(i))) { + board_lag->bond_valid_mask = (bond_valid_mask | BIT(i)); + return i; + } + } + return BOND_ID_INVALID; +} + +static void xsc_lag_setup(struct xsc_board_lag *board_lag, + struct net_device *upper, struct xsc_core_device *xdev, bool no_wq) +{ + struct bonding *bond = netdev_priv(upper); + struct xsc_lag *lag = NULL; + u8 bond_id; + + bond_id = xsc_get_valid_bond_id(board_lag); + + if (bond_id == BOND_ID_INVALID) + return; + + xdev->bond_id = bond_id; + lag = &board_lag->xsc_lag[xdev->bond_id]; + + INIT_LIST_HEAD(&lag->slave_list); + list_add(&xdev->slave_node, &lag->slave_list); + lag->xsc_member_cnt = 1; + lag->bond_dev = upper; + lag->bond_mode = BOND_MODE(bond); + lag->tx_type = bond_lag_tx_type(bond); + lag->hash_type = xsc_lag_hashtype_convert(bond_lag_hash_type(bond)); + lag->board_id = xdev->board_info->board_id; + lag->lag_id = xdev->caps.lag_logic_port_ofst + xdev->bond_id; + + xsc_core_info(xdev, "lag_id = %d, board_id = %d, bond_mode = %d\n", + lag->lag_id, lag->board_id, lag->bond_mode); + + pack_lag_create(lag, xdev, false); +} + +static bool xsc_is_ndev_xsc_pf(struct net_device *slave_ndev) +{ + struct device *dev = &slave_ndev->dev; + struct pci_dev *pdev = to_pci_dev(dev->parent); + + return (pdev->device == XSC_MS_PF_DEV_ID || + pdev->device == XSC_MV_SOC_PF_DEV_ID); +} + +static u8 xsc_get_bond_board_xsc_cnt(struct net_device *upper, + u32 board_id) +{ + struct xsc_adapter *adapter; + struct xsc_core_device *xdev; + struct net_device *ndev_tmp; + u8 slave_cnt = 0; + + rcu_read_lock(); + for_each_netdev_in_bond_rcu(upper, ndev_tmp) { + if (!ndev_tmp) + continue; + if (xsc_is_ndev_xsc_pf(ndev_tmp)) { + adapter = netdev_priv(ndev_tmp); + xdev = adapter->xdev; + if (xdev->board_info->board_id == board_id) + slave_cnt++; + } + } + rcu_read_unlock(); + + return slave_cnt; +} + +static void xsc_lag_member_add(struct xsc_lag *lag, + struct xsc_core_device *xdev, bool no_wq) +{ + list_add_tail(&xdev->slave_node, &lag->slave_list); + lag->xsc_member_cnt++; + lag->not_roce_lag_xdev_mask |= BIT(xdev->pf_id); + + xsc_core_dbg(xdev, "xsc_member_cnt = %d\n", + lag->xsc_member_cnt); + + pack_lag_add_member(lag, xdev, no_wq); +} + +static void xsc_lag_member_remove(struct xsc_lag *lag, + struct xsc_core_device *xdev, bool no_wq) +{ + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + u8 bond_valid_mask; + + lag->xsc_member_cnt--; + + xsc_core_info(xdev, "xsc_member_cnt = %d\n", + lag->xsc_member_cnt); + + if (lag->xsc_member_cnt > 0) { + pack_lag_remove_member(lag, xdev, no_wq); + } else { + pack_lag_destroy(lag, xdev, no_wq); + + lag->lag_id = LAG_ID_INVALID; + lag->board_id = BOARD_ID_INVALID; + + bond_valid_mask = board_lag->bond_valid_mask; + board_lag->bond_valid_mask = bond_valid_mask & ~BIT(xdev->bond_id); + } + + list_del(&xdev->slave_node); + xdev->bond_id = BOND_ID_INVALID; +} + +static void xsc_lag_update_member(struct xsc_lag *lag, + struct net_device *ndev, struct net_device *upper, u8 bond_id) +{ + struct xsc_adapter *adapter = netdev_priv(ndev); + struct xsc_core_device *xdev = adapter->xdev; + u8 xsc_slave_cnt = xsc_get_bond_board_xsc_cnt(upper, lag->board_id); + + xsc_core_dbg(xdev, "xsc_slave_cnt = %d, old_xsc_slave_cnt = %d\n", + xsc_slave_cnt, lag->xsc_member_cnt); + + if (xsc_slave_cnt > lag->xsc_member_cnt) + xsc_lag_member_add(lag, xdev, false); + + if (xsc_slave_cnt < lag->xsc_member_cnt) + xsc_lag_member_remove(lag, xdev, false); +} + +static u8 xsc_get_upper_bond_id(struct net_device *bond_ndev, + struct net_device *ndev, struct xsc_board_lag *board_lag, + bool hash_change) +{ + u8 i; + struct xsc_lag *lag; + u8 bond_valid_mask = board_lag->bond_valid_mask; + struct xsc_adapter *adapter; + struct xsc_core_device *xdev; + u8 bond_id = BOND_ID_INVALID; + + for (i = 0; i < XSC_BOARD_LAG_MAX; i++) { + if (bond_valid_mask & BIT(i)) { + lag = &board_lag->xsc_lag[i]; + if (!hash_change) { + adapter = netdev_priv(ndev); + xdev = adapter->xdev; + if (lag->bond_dev == bond_ndev && + lag->board_id == xdev->board_info->board_id) { + bond_id = i; + break; + } + } else { + if (lag->bond_dev == bond_ndev) { + bond_id = i; + break; + } + } + } + } + + return bond_id; +} + +static struct xsc_board_lag *xsc_board_lag_filter(struct xsc_board_lag *board_lag, + struct net_device *ndev) +{ + struct xsc_adapter *adapter; + struct xsc_core_device *xdev; + + if (xsc_is_ndev_xsc_pf(ndev)) { + adapter = netdev_priv(ndev); + xdev = adapter->xdev; + if (xdev->board_info->board_id == board_lag->board_id) + return board_lag; + } + + return NULL; +} + +static void xsc_handle_changeupper_event(struct xsc_board_lag *board_lag, + struct net_device *ndev, + struct netdev_notifier_changeupper_info *info) +{ + struct xsc_adapter *adapter; + struct xsc_core_device *xdev; + struct net_device *upper = info->upper_dev; + u8 bond_id; + struct xsc_lag *lag; + + if (!netif_is_lag_master(upper) || !ndev) + return; + + mutex_lock(&board_lag->lock); + if (!xsc_board_lag_filter(board_lag, ndev)) { + mutex_unlock(&board_lag->lock); + return; + } + + adapter = netdev_priv(ndev); + xdev = adapter->xdev; + + bond_id = xsc_get_upper_bond_id(upper, ndev, board_lag, false); + xdev->bond_id = bond_id; + + xsc_core_dbg(xdev, "bond_id = %d\n", bond_id); + + if (bond_id != BOND_ID_INVALID) { + lag = &board_lag->xsc_lag[bond_id]; + xsc_lag_update_member(lag, ndev, upper, bond_id); + if (lag->xsc_member_cnt == 0) + memset(lag, 0, sizeof(*lag)); + } else { + xsc_lag_setup(board_lag, upper, xdev, false); + } + mutex_unlock(&board_lag->lock); +} + +static void xsc_handle_changelowerstate_event(struct xsc_board_lag *board_lag, + struct net_device *ndev, + struct netdev_notifier_changelowerstate_info *info) +{ + struct netdev_lag_lower_state_info *lag_lower_info; + struct net_device *bond_dev; + struct slave *slave; + struct xsc_lag *lag; + u8 bond_id; + enum lag_slave_status slave_status = XSC_LAG_SLAVE_INACTIVE; + + if (!netif_is_lag_port(ndev) || !info->lower_state_info) + return; + + rcu_read_lock(); + slave = bond_slave_get_rtnl(ndev); + rcu_read_unlock(); + if (!slave || !slave->bond || !slave->bond->dev) + return; + + bond_dev = slave->bond->dev; + + lag_lower_info = info->lower_state_info; + if (lag_lower_info->link_up && lag_lower_info->tx_enabled) + slave_status = XSC_LAG_SLAVE_ACTIVE; + + mutex_lock(&board_lag->lock); + if (!xsc_board_lag_filter(board_lag, ndev)) { + mutex_unlock(&board_lag->lock); + return; + } + + bond_id = xsc_get_upper_bond_id(bond_dev, ndev, board_lag, false); + if (bond_id == BOND_ID_INVALID) { + mutex_unlock(&board_lag->lock); + return; + } + + lag = &board_lag->xsc_lag[bond_id]; + pack_lag_update_member_status(lag, ndev, slave_status); + mutex_unlock(&board_lag->lock); +} + +static void xsc_handle_changehash_event(struct xsc_board_lag *board_lag, + struct net_device *ndev) +{ + struct bonding *bond; + enum netdev_lag_hash hash_type; + struct xsc_lag *lag; + u8 bond_id; + + if (!netif_is_lag_master(ndev)) + return; + + bond = netdev_priv(ndev); + if (!bond_mode_uses_xmit_hash(bond)) + return; + + mutex_lock(&board_lag->lock); + bond_id = xsc_get_upper_bond_id(ndev, NULL, board_lag, true); + if (bond_id == BOND_ID_INVALID) { + mutex_unlock(&board_lag->lock); + return; + } + + lag = &board_lag->xsc_lag[bond_id]; + hash_type = xsc_lag_hashtype_convert(bond_lag_hash_type(bond)); + + if (hash_type != lag->hash_type) { + lag->hash_type = hash_type; + pack_lag_update_hash_type(lag, bond_id, hash_type); + } + mutex_unlock(&board_lag->lock); +} + +static int xsc_lag_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); + struct xsc_board_lag *board_lag; + + if (event != NETDEV_CHANGE && event != NETDEV_CHANGEUPPER && + event != NETDEV_CHANGELOWERSTATE) + return NOTIFY_DONE; + + board_lag = container_of(this, struct xsc_board_lag, nb); + if (!board_lag) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_CHANGEUPPER: + xsc_handle_changeupper_event(board_lag, ndev, ptr); + break; + case NETDEV_CHANGELOWERSTATE: + xsc_handle_changelowerstate_event(board_lag, ndev, ptr); + break; + case NETDEV_CHANGE: + xsc_handle_changehash_event(board_lag, ndev); + break; + } + + return NOTIFY_DONE; +} + +static struct xsc_board_lag *xsc_board_lag_dev_alloc(struct xsc_core_device *xdev) +{ + struct xsc_board_lag *board_lag; + struct lag_event_list *lag_event_list; + int err; + + board_lag = kzalloc(sizeof(*board_lag), GFP_KERNEL); + if (!board_lag) + return NULL; + + lag_event_list = &board_lag->lag_event_list; + + INIT_LIST_HEAD(&lag_event_list->head); + spin_lock_init(&lag_event_list->lock); + init_waitqueue_head(&lag_event_list->wq); + lag_event_list->wait_flag = XSC_SLEEP; + lag_event_list->bond_poll_task = + kthread_create(xsc_do_bond_thread, (void *)board_lag, "xsc board lag"); + if (lag_event_list->bond_poll_task) + wake_up_process(lag_event_list->bond_poll_task); + + board_lag->nb.notifier_call = xsc_lag_netdev_event; + err = register_netdevice_notifier(&board_lag->nb); + if (err) + goto err_create_notifier; + + kref_init(&board_lag->ref); + mutex_init(&board_lag->lock); + board_lag->bond_valid_mask = 0; + + return board_lag; + +err_create_notifier: + xsc_core_err(xdev, "failed to register LAG netdev notifier\n"); + board_lag->nb.notifier_call = NULL; + kthread_stop(lag_event_list->bond_poll_task); + kfree(board_lag); + + return NULL; +} + +static int __xsc_lag_add_xdev(struct xsc_core_device *xdev) +{ + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + + if (!board_lag) { + board_lag = xsc_board_lag_dev_alloc(xdev); + if (!board_lag) + return -EPIPE; + xsc_board_lag_set(xdev, board_lag); + } else { + kref_get(&board_lag->ref); + } + + xdev->bond_id = BOND_ID_INVALID; + + return 0; +} + +void xsc_lag_add_xdev(struct xsc_core_device *xdev) +{ + int err; + + xsc_dev_list_lock(); + err = __xsc_lag_add_xdev(xdev); + xsc_dev_list_unlock(); + + if (err) + xsc_core_dbg(xdev, "add xdev err=%d\n", err); +} +EXPORT_SYMBOL(xsc_lag_add_xdev); + +static void xsc_lag_dev_free(struct kref *ref) +{ + struct xsc_board_lag *board_lag = container_of(ref, struct xsc_board_lag, ref); + struct lag_event_list *lag_event_list = &board_lag->lag_event_list; + + if (board_lag->nb.notifier_call) + unregister_netdevice_notifier(&board_lag->nb); + + lag_event_list->wait_flag = XSC_EXIT; + wake_up(&lag_event_list->wq); + if (lag_event_list->bond_poll_task) + kthread_stop(lag_event_list->bond_poll_task); + + board_lag->nb.notifier_call = NULL; + mutex_destroy(&board_lag->lock); + + xsc_board_lag_reset(board_lag->board_id); + kfree(board_lag); +} + +void xsc_lag_remove_xdev(struct xsc_core_device *xdev) +{ + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + + xsc_dev_list_lock(); + if (board_lag) + kref_put(&board_lag->ref, xsc_lag_dev_free); + xsc_dev_list_unlock(); +} +EXPORT_SYMBOL(xsc_lag_remove_xdev); + +void xsc_lag_disable(struct xsc_core_device *xdev) +{ + struct xsc_lag *lag; + struct xsc_core_device *xdev_tmp = NULL; + u8 cnt = 0; + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + + mutex_lock(&board_lag->lock); + lag = __xsc_get_lag(xdev); + if (!lag || !__xsc_lag_is_active(lag)) { + mutex_unlock(&board_lag->lock); + return; + } + + list_for_each_entry(xdev_tmp, &lag->slave_list, slave_node) { + cnt++; + if (cnt == lag->xsc_member_cnt) + pack_lag_destroy(lag, xdev_tmp, false); + else + pack_lag_remove_member(lag, xdev_tmp, false); + } + + lag->mode_changes_in_progress++; + mutex_unlock(&board_lag->lock); +} +EXPORT_SYMBOL(xsc_lag_disable); + +void xsc_lag_enable(struct xsc_core_device *xdev) +{ + struct xsc_lag *lag; + struct xsc_core_device *xdev_tmp = NULL; + u8 cnt = 0; + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + + mutex_lock(&board_lag->lock); + lag = __xsc_get_lag(xdev); + if (!lag || __xsc_lag_is_active(lag)) { + mutex_unlock(&board_lag->lock); + return; + } + + lag->mode_changes_in_progress--; + list_for_each_entry(xdev_tmp, &lag->slave_list, slave_node) { + if (cnt == 0) + pack_lag_create(lag, xdev_tmp, false); + else + pack_lag_add_member(lag, xdev_tmp, false); + + cnt++; + } + mutex_unlock(&board_lag->lock); +} +EXPORT_SYMBOL(xsc_lag_enable); + +void xsc_lag_add_netdev(struct net_device *ndev) +{ + struct xsc_adapter *adapter = netdev_priv(ndev); + struct xsc_core_device *xdev = adapter->xdev; + struct bonding *bond = NULL; + struct net_device *upper = NULL; + struct slave *slave; + u8 bond_id = BOND_ID_INVALID; + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + struct xsc_lag *lag; + + if (!board_lag || ndev->reg_state != NETREG_REGISTERED || + !netif_is_bond_slave(ndev)) + return; + + rcu_read_lock(); + slave = bond_slave_get_rcu(ndev); + rcu_read_unlock(); + bond = bond_get_bond_by_slave(slave); + upper = bond->dev; + + mutex_lock(&board_lag->lock); + bond_id = xsc_get_upper_bond_id(upper, ndev, board_lag, false); + xdev->bond_id = bond_id; + lag = __xsc_get_lag(xdev); + + if (bond_id != BOND_ID_INVALID) + xsc_lag_member_add(lag, xdev, true); + else + xsc_lag_setup(board_lag, upper, xdev, true); + mutex_unlock(&board_lag->lock); +} +EXPORT_SYMBOL(xsc_lag_add_netdev); + +void xsc_lag_remove_netdev(struct net_device *ndev) +{ + struct xsc_adapter *adapter = netdev_priv(ndev); + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_board_lag *board_lag = xsc_board_lag_get(xdev); + struct xsc_lag *lag; + + if (!board_lag) + return; + + mutex_lock(&board_lag->lock); + lag = __xsc_get_lag(xdev); + if (!lag) + goto out; + + if (__xsc_lag_is_active(lag)) { + xsc_lag_member_remove(lag, xdev, true); + if (lag->xsc_member_cnt == 0) + memset(lag, 0, sizeof(*lag)); + } + +out: + mutex_unlock(&board_lag->lock); +} +EXPORT_SYMBOL(xsc_lag_remove_netdev); + +bool xsc_lag_is_roce(struct xsc_core_device *xdev) +{ + struct xsc_lag *lag; + + lag = __xsc_get_lag(xdev); + if (!lag) + return false; + + return __xsc_lag_is_roce(lag); +} +EXPORT_SYMBOL(xsc_lag_is_roce); + +struct xsc_lag *xsc_get_lag(struct xsc_core_device *xdev) +{ + return __xsc_get_lag(xdev); +} +EXPORT_SYMBOL(xsc_get_lag); + +u16 xsc_get_lag_id(struct xsc_core_device *xdev) +{ + struct xsc_lag *lag; + u16 lag_id = LAG_ID_INVALID; + + xsc_board_lag_lock(xdev); + lag = __xsc_get_lag(xdev); + if (lag && __xsc_lag_is_active(lag) && !__xsc_lag_is_kernel(lag)) + lag_id = lag->lag_id; + xsc_board_lag_unlock(xdev); + + return lag_id; +} +EXPORT_SYMBOL(xsc_get_lag_id); + +struct xsc_core_device *xsc_get_roce_lag_xdev(struct xsc_core_device *xdev) +{ + struct xsc_core_device *roce_lag_xdev; + struct xsc_lag *lag; + + xsc_board_lag_lock(xdev); + if (xsc_lag_is_roce(xdev)) { + lag = __xsc_get_lag(xdev); + roce_lag_xdev = list_first_entry(&lag->slave_list, + struct xsc_core_device, slave_node); + } else { + roce_lag_xdev = xdev; + } + xsc_board_lag_unlock(xdev); + + return roce_lag_xdev; +} +EXPORT_SYMBOL(xsc_get_roce_lag_xdev); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.c b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.c new file mode 100644 index 000000000000..2e63e13bc97d --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.c @@ -0,0 +1,909 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include "common/xsc_core.h" +#include "common/xsc_ioctl.h" +#include "common/xsc_hsi.h" +#include "common/xsc_lag.h" +#include "common/xsc_port_ctrl.h" +#include +#include +#include +#include +#include "xsc_pci_ctrl.h" +#include "common/res_obj.h" + +#define FEATURE_ONCHIP_FT_MASK BIT(4) +#define FEATURE_DMA_RW_TBL_MASK BIT(8) +#define FEATURE_PCT_EXP_MASK BIT(19) + +#define XSC_PCI_CTRL_NAME "pci_ctrl" + +static int xsc_pci_ctrl_modify_qp(struct xsc_core_device *xdev, void *in, void *out) +{ + int ret = 0, i = 0; + struct xsc_ioctl_qp_range *resp; + struct xsc_ioctl_data_tl *tl; + int insize; + struct xsc_modify_qp_mbox_in *mailin; + struct xsc_modify_qp_mbox_out mailout; + u32 qpn; + + tl = (struct xsc_ioctl_data_tl *)out; + resp = (struct xsc_ioctl_qp_range *)(tl + 1); + xsc_core_dbg(xdev, "xsc_ioctl_qp_range: qpn:%d, num:%d, opcode:%d\n", + resp->qpn, resp->num, resp->opcode); + if (resp->num == 0) { + xsc_core_dbg(xdev, "xsc_ioctl_qp_range: resp->num ==0\n"); + return 0; + } + qpn = resp->qpn; + insize = sizeof(struct xsc_modify_qp_mbox_in); + mailin = kvzalloc(insize, GFP_KERNEL); + if (!mailin) { + xsc_core_dbg(xdev, "xsc_ioctl_qp_range: enomem\n"); + return -ENOMEM; + } + for (i = 0; i < resp->num; i++) { + mailin->hdr.opcode = cpu_to_be16(resp->opcode); + mailin->qpn = cpu_to_be32(qpn + i); + ret = xsc_cmd_exec(xdev, mailin, insize, &mailout, sizeof(mailout)); + xsc_core_dbg(xdev, "modify qp state qpn:%d\n", qpn + i); + } + kvfree(mailin); + + return ret; +} + +static struct pci_dev *xsc_pci_get_pcidev_by_bus_and_slot(int domain, uint32_t bus, uint32_t devfn) +{ + return pci_get_domain_bus_and_slot(domain, bus, devfn); +} + +struct xsc_core_device *xsc_pci_get_xdev_by_bus_and_slot(int domain, uint32_t bus, uint32_t devfn) +{ + struct pci_dev *pdev = NULL; + struct xsc_core_device *xdev = NULL; + + pdev = xsc_pci_get_pcidev_by_bus_and_slot(domain, bus, devfn); + if (!pdev) + return NULL; + + xdev = pci_get_drvdata(pdev); + + return xdev; +} + +static int xsc_pci_ctrl_get_phy(struct xsc_core_device *xdev, + void *in, void *out) +{ + int ret = 0; + struct xsc_eswitch *esw = xdev->priv.eswitch; + struct xsc_ioctl_data_tl *tl = (struct xsc_ioctl_data_tl *)out; + struct xsc_ioctl_get_phy_info_res *resp; + u16 lag_id = xsc_get_lag_id(xdev); + struct xsc_core_device *rl_xdev; + + switch (tl->opmod) { + case XSC_IOCTL_OP_GET_LOCAL: + resp = (struct xsc_ioctl_get_phy_info_res *)(tl + 1); + + resp->pcie_no = xdev->pcie_no; + resp->func_id = xdev->glb_func_id; + resp->pcie_host = xdev->caps.pcie_host; + resp->mac_phy_port = xdev->mac_port; + resp->funcid_to_logic_port_off = xdev->caps.funcid_to_logic_port; + resp->lag_id = lag_id; + resp->raw_qp_id_base = xdev->caps.raweth_qp_id_base; + resp->raw_rss_qp_id_base = xdev->caps.raweth_rss_qp_id_base; + resp->lag_port_start = xdev->caps.lag_logic_port_ofst; + resp->send_seg_num = xdev->caps.send_ds_num; + resp->recv_seg_num = xdev->caps.recv_ds_num; + resp->raw_tpe_qp_num = xdev->caps.raw_tpe_qp_num; + resp->chip_version = xdev->chip_ver_l; + resp->on_chip_tbl_vld = + (xdev->feature_flag & FEATURE_ONCHIP_FT_MASK) ? 1 : 0; + resp->dma_rw_tbl_vld = + (xdev->feature_flag & FEATURE_DMA_RW_TBL_MASK) ? 1 : 0; + resp->pct_compress_vld = + (xdev->feature_flag & FEATURE_PCT_EXP_MASK) ? 1 : 0; + + xsc_core_dbg(xdev, "%d,%d,%d,%d,%d,%d\n", + resp->pcie_no, resp->func_id, resp->pcie_host, + resp->mac_phy_port, resp->lag_id, + resp->funcid_to_logic_port_off); + resp->pf0_vf_funcid_base = xdev->caps.pf0_vf_funcid_base; + resp->pf0_vf_funcid_top = xdev->caps.pf0_vf_funcid_top; + resp->pf1_vf_funcid_base = xdev->caps.pf1_vf_funcid_base; + resp->pf1_vf_funcid_top = xdev->caps.pf1_vf_funcid_top; + resp->pcie0_pf_funcid_base = xdev->caps.pcie0_pf_funcid_base; + resp->pcie0_pf_funcid_top = xdev->caps.pcie0_pf_funcid_top; + resp->pcie1_pf_funcid_base = xdev->caps.pcie1_pf_funcid_base; + resp->pcie1_pf_funcid_top = xdev->caps.pcie1_pf_funcid_top; + resp->hca_core_clock = xdev->caps.hca_core_clock; + resp->mac_bit = xdev->caps.mac_bit; + if (xsc_core_is_pf(xdev)) { + mutex_lock(&esw->mode_lock); + resp->esw_mode = esw->mode; + mutex_unlock(&esw->mode_lock); + } else { + resp->esw_mode = 0; + } + resp->board_id = xdev->board_info->board_id; + break; + + case XSC_IOCTL_OP_GET_INFO_BY_BDF: + resp = (struct xsc_ioctl_get_phy_info_res *)(tl + 1); + + xsc_core_dbg(xdev, "ioctrl get_pcidev. domain=%u, bus=%u, devfn=%u\n", + resp->domain, resp->bus, resp->devfn); + + rl_xdev = xsc_pci_get_xdev_by_bus_and_slot(resp->domain, resp->bus, resp->devfn); + if (!rl_xdev) + return -1; + + resp->pcie_no = rl_xdev->pcie_no; + resp->func_id = rl_xdev->glb_func_id; + resp->pcie_host = rl_xdev->caps.pcie_host; + resp->mac_phy_port = rl_xdev->mac_port; + resp->funcid_to_logic_port_off = rl_xdev->caps.funcid_to_logic_port; + resp->lag_id = lag_id; + resp->raw_qp_id_base = rl_xdev->caps.raweth_qp_id_base; + resp->raw_rss_qp_id_base = xdev->caps.raweth_rss_qp_id_base; + resp->lag_port_start = xdev->caps.lag_logic_port_ofst; + resp->send_seg_num = rl_xdev->caps.send_ds_num; + resp->recv_seg_num = rl_xdev->caps.recv_ds_num; + resp->raw_tpe_qp_num = rl_xdev->caps.raw_tpe_qp_num; + resp->chip_version = rl_xdev->chip_ver_l; + resp->on_chip_tbl_vld = + (rl_xdev->feature_flag & FEATURE_ONCHIP_FT_MASK) ? 1 : 0; + resp->dma_rw_tbl_vld = + (rl_xdev->feature_flag & FEATURE_DMA_RW_TBL_MASK) ? 1 : 0; + resp->pct_compress_vld = + (rl_xdev->feature_flag & FEATURE_PCT_EXP_MASK) ? 1 : 0; + + xsc_core_dbg(xdev, "%d,%d,%d,%d,%d,%d\n", + resp->pcie_no, resp->func_id, resp->pcie_host, + resp->mac_phy_port, resp->lag_id, + resp->funcid_to_logic_port_off); + resp->pf0_vf_funcid_base = rl_xdev->caps.pf0_vf_funcid_base; + resp->pf0_vf_funcid_top = rl_xdev->caps.pf0_vf_funcid_top; + resp->pf1_vf_funcid_base = rl_xdev->caps.pf1_vf_funcid_base; + resp->pf1_vf_funcid_top = rl_xdev->caps.pf1_vf_funcid_top; + resp->pcie0_pf_funcid_base = rl_xdev->caps.pcie0_pf_funcid_base; + resp->pcie0_pf_funcid_top = rl_xdev->caps.pcie0_pf_funcid_top; + resp->pcie1_pf_funcid_base = rl_xdev->caps.pcie1_pf_funcid_base; + resp->pcie1_pf_funcid_top = rl_xdev->caps.pcie1_pf_funcid_top; + resp->board_id = xdev->board_info->board_id; + break; + + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int xsc_pci_ctrl_get_contextinfo(struct xsc_core_device *xdev, + void *in, void *out) +{ + int ret = 0; + struct xsc_ioctl_data_tl *tl = (struct xsc_ioctl_data_tl *)out; + struct xsc_alloc_ucontext_req *req; + struct xsc_alloc_ucontext_resp *resp; + struct xsc_core_device *rl_xdev = NULL; + + if (tl->opmod != XSC_IOCTL_OP_GET_CONTEXT) + return -EINVAL; + + req = (struct xsc_alloc_ucontext_req *)(tl + 1); + xsc_core_dbg(xdev, "xsc_tdi_alloc_context req:\n"); + xsc_core_dbg(xdev, "req->domain=%u\n", req->domain); + xsc_core_dbg(xdev, "req->bus=%u\n", req->bus); + xsc_core_dbg(xdev, "req->devfn=%u\n", req->devfn); + + rl_xdev = xsc_pci_get_xdev_by_bus_and_slot(req->domain, req->bus, req->devfn); + if (!rl_xdev) + return -1; + + resp = (struct xsc_alloc_ucontext_resp *)(tl + 1); + + resp->max_cq = 1 << rl_xdev->caps.log_max_cq; + resp->max_qp = 1 << rl_xdev->caps.log_max_qp; + resp->max_rwq_indirection_table_size = rl_xdev->caps.max_rwq_indirection_table_size; + resp->qpm_tx_db = rl_xdev->regs.tx_db; + resp->qpm_rx_db = rl_xdev->regs.rx_db; + resp->cqm_next_cid_reg = rl_xdev->regs.complete_reg; + resp->cqm_armdb = rl_xdev->regs.complete_db; + resp->send_ds_num = rl_xdev->caps.send_ds_num; + resp->recv_ds_num = rl_xdev->caps.recv_ds_num; + resp->send_ds_shift = rl_xdev->caps.send_wqe_shift; + resp->recv_ds_shift = rl_xdev->caps.recv_wqe_shift; + resp->glb_func_id = rl_xdev->glb_func_id; + + resp->max_wqes = rl_xdev->caps.max_wqes; + + xsc_core_dbg(xdev, "xsc_tdi_alloc_context resp:\n"); + xsc_core_dbg(xdev, "resp->max_cq=%u\n", resp->max_cq); + xsc_core_dbg(xdev, "resp->max_qp=%u\n", resp->max_qp); + xsc_core_dbg(xdev, "resp->qpm_tx_db=%llx\n", resp->qpm_tx_db); + xsc_core_dbg(xdev, "resp->qpm_rx_db=%llx\n", resp->qpm_rx_db); + xsc_core_dbg(xdev, "resp->cqm_next_cid_reg=%llx\n", resp->cqm_next_cid_reg); + xsc_core_dbg(xdev, "resp->cqm_armdb=%llx\n", resp->cqm_armdb); + xsc_core_dbg(xdev, "resp->send_ds_num=%u\n", resp->send_ds_num); + xsc_core_dbg(xdev, "resp->send_ds_shift=%u\n", resp->send_ds_shift); + xsc_core_dbg(xdev, "resp->:recv_ds_num=%u\n", resp->recv_ds_num); + xsc_core_dbg(xdev, "resp->recv_ds_shift=%u\n", resp->recv_ds_shift); + xsc_core_dbg(xdev, "resp->glb_func_id=%u\n", resp->glb_func_id); + + return ret; +} + +int noop_pre(struct kprobe *p, struct pt_regs *regs) { return 0; } + +static struct kprobe kp = { + .symbol_name = "kallsyms_lookup_name", +}; + +unsigned long (*kallsyms_lookup_name_func)(const char *name) = NULL; + +//调用kprobe找到kallsyms_lookup_name的地址位置 +int find_kallsyms_lookup_name(void) +{ + int ret = -1; + + kp.addr = 0; + kp.pre_handler = noop_pre; + ret = register_kprobe(&kp); + if (ret < 0) + return ret; + + kallsyms_lookup_name_func = (void *)kp.addr; + unregister_kprobe(&kp); + return ret; +} + +u16 xsc_get_irq_matrix_global_available(struct xsc_core_device *dev) +{ + struct db_irq_matrix *m; + static unsigned long addr; + static int flag; + char *name = "vector_matrix"; + int ret; + + if (flag == 0) { + ret = find_kallsyms_lookup_name(); + if (ret < 0) { + xsc_core_err(dev, "find kallsyms_lookup_name failed\n"); + return 0xffff; + } + + addr = kallsyms_lookup_name_func(name); + xsc_core_dbg(dev, "vector_matrix addr=0x%lx\n", addr); + if (addr == 0) { + xsc_core_err(dev, "not support, arch maybe not X86?\n"); + /* 返回0xffff,做到在不知道cpu vector剩余多少可用的情况 + * 下不影响fw用该值判断能否分配中断 + */ + return 0xffff; + } + flag = 1; + } + + m = (struct db_irq_matrix *)(*(long *)addr); + if (!m) { + xsc_core_err(dev, "vector_matrix is NULL\n"); + return 0xffff; + } + xsc_core_info(dev, "vector_matrix global_available=%u\n", m->global_available); + return m->global_available; +} + +int xsc_pci_ctrl_exec_ioctl(struct xsc_core_device *xdev, void *in, int in_size, void *out, + int out_size) +{ + int opcode, ret = 0; + struct xsc_ioctl_attr *hdr; + + hdr = (struct xsc_ioctl_attr *)in; + opcode = hdr->opcode; + switch (opcode) { + case XSC_IOCTL_GET_PHY_INFO: + ret = xsc_pci_ctrl_get_phy(xdev, in, out); + break; + case XSC_IOCTL_SET_QP_STATUS: + xsc_core_dbg(xdev, "case XSC_IOCTL_SET_QP_STATUS:\n"); + ret = xsc_pci_ctrl_modify_qp(xdev, in, out); + break; + case XSC_IOCTL_GET_CONTEXT: + xsc_core_dbg(xdev, "case XSC_IOCTL_GET_CONTEXT:\n"); + ret = xsc_pci_ctrl_get_contextinfo(xdev, in, out); + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + +static long xsc_pci_ctrl_setinfo(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr) +{ + struct xsc_ioctl_hdr hdr; + int err; + struct xsc_set_debug_info_mbox_in in; + struct xsc_set_debug_info_mbox_out out; + struct xsc_ioctl_set_debug_info info; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) { + xsc_core_err(xdev, "copy user_hdr from user failed, err = %d\n", err); + return -EFAULT; + } + + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) { + xsc_core_err(xdev, "incorrect check field, check field=%#x\n", hdr.check_filed); + return -EFAULT; + } + + if (hdr.attr.length != sizeof(info)) { + xsc_core_err(xdev, "unexpected length, length=%d\n", hdr.attr.length); + return -EFAULT; + } + + err = copy_from_user(&info, user_hdr->attr.data, hdr.attr.length); + if (err) { + xsc_core_err(xdev, "copy attr.data from user failed, err = %d\n", err); + return -EFAULT; + } + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_SET_DEBUG_INFO); + switch (hdr.attr.opcode) { + case XSC_IOCTL_SET_LOG_LEVEL: + in.set_field = 0; + in.log_level = info.log_level; + break; + case XSC_IOCTL_SET_CMD_VERBOSE: + in.set_field = 1; + in.cmd_verbose = info.cmd_verbose; + break; + default: + xsc_core_err(xdev, "invalid opcode %d\n", hdr.attr.opcode); + return -EINVAL; + } + + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + xsc_core_err(xdev, "failed to set debug info to fw, err = %d, status = %d\n", + err, out.hdr.status); + return -EFAULT; + } + + return 0; +} + +static long xsc_pci_ctrl_getinfo(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr) +{ + struct xsc_ioctl_hdr hdr; + struct xsc_ioctl_hdr *in; + int in_size; + int err; + u16 global_available; + u16 totalvfs; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) + return -EFAULT; + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) + return -EINVAL; + switch (hdr.attr.opcode) { + case XSC_IOCTL_GET_PHY_INFO: + case XSC_IOCTL_SET_QP_STATUS: + case XSC_IOCTL_GET_CONTEXT: + case XSC_IOCTL_GET_VECTOR_MATRIX: + break; + default: + return TRY_NEXT_CB; + } + in_size = sizeof(struct xsc_ioctl_hdr) + hdr.attr.length; + in = kvzalloc(in_size, GFP_KERNEL); + if (!in) + return -EFAULT; + in->attr.opcode = hdr.attr.opcode; + in->attr.length = hdr.attr.length; + + if (hdr.attr.opcode == XSC_IOCTL_GET_VECTOR_MATRIX) { + global_available = xsc_get_irq_matrix_global_available(xdev); + totalvfs = (pci_sriov_get_totalvfs(xdev->pdev) < 0) ? 0 : + pci_sriov_get_totalvfs(xdev->pdev); + in->attr.error = err; + memcpy(in->attr.data, (void *)&global_available, sizeof(u16)); + memcpy(in->attr.data + sizeof(u16), (void *)&totalvfs, sizeof(u16)); + goto next; + } + + err = copy_from_user(in->attr.data, user_hdr->attr.data, hdr.attr.length); + if (err) { + kvfree(in); + return -EFAULT; + } + err = xsc_pci_ctrl_exec_ioctl(xdev, &in->attr, + (in_size - offsetof(struct xsc_ioctl_hdr, attr)), + in->attr.data, hdr.attr.length); + in->attr.error = err; +next: + if (copy_to_user((void *)user_hdr, in, in_size)) + err = -EFAULT; + kvfree(in); + return err; +} + +static int xsc_ioctl_flow_add_obj(struct xsc_bdf_file *file, struct xsc_ioctl_data_tl *tl, + char *data, unsigned int datalen) +{ + int err = 0; + struct xsc_flow_pct_v4_add *pct_v4; + struct xsc_flow_pct_v6_add *pct_v6; + + switch (tl->table) { + case XSC_FLOW_TBL_PCT_V4: + case XSC_FLOW_TBL_BM_PCT_V4: + pct_v4 = (struct xsc_flow_pct_v4_add *)(tl + 1); + err = xsc_alloc_pct_obj(file, pct_v4->priority, data, datalen); + break; + case XSC_FLOW_TBL_PCT_V6: + case XSC_FLOW_TBL_BM_PCT_V6: + pct_v6 = (struct xsc_flow_pct_v6_add *)(tl + 1); + err = xsc_alloc_pct_obj(file, pct_v6->priority, data, datalen); + break; + default: + break; + } + + return err; +} + +static void xsc_ioctl_flow_destroy_obj(struct xsc_bdf_file *file, struct xsc_ioctl_data_tl *tl) +{ + struct xsc_flow_pct_v4_del *pct_v4; + struct xsc_flow_pct_v6_del *pct_v6; + + switch (tl->table) { + case XSC_FLOW_TBL_PCT_V4: + case XSC_FLOW_TBL_BM_PCT_V4: + pct_v4 = (struct xsc_flow_pct_v4_del *)(tl + 1); + xsc_destroy_pct_obj(file, pct_v4->priority); + break; + case XSC_FLOW_TBL_PCT_V6: + case XSC_FLOW_TBL_BM_PCT_V6: + pct_v6 = (struct xsc_flow_pct_v6_del *)(tl + 1); + xsc_destroy_pct_obj(file, pct_v6->priority); + break; + default: + break; + } +} + +static int xsc_ioctl_flow_cmdq_handle_res_obj(struct xsc_bdf_file *file, + char *data, unsigned int datalen) +{ + struct xsc_ioctl_data_tl *tl; + int err = 0; + + tl = (struct xsc_ioctl_data_tl *)data; + + switch (tl->opmod) { + case XSC_IOCTL_OP_ADD: + err = xsc_ioctl_flow_add_obj(file, tl, data, datalen); + break; + case XSC_IOCTL_OP_DEL: + xsc_ioctl_flow_destroy_obj(file, tl); + break; + default: + break; + } + + return err; +} + +static int xsc_ioctl_flow_cmdq(struct xsc_bdf_file *file, + struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr) +{ + struct xsc_ioctl_mbox_in *in; + struct xsc_ioctl_mbox_out *out; + int in_size; + int out_size; + int err; + + in_size = sizeof(struct xsc_ioctl_mbox_in) + hdr->attr.length; + in = kvzalloc(in_size, GFP_KERNEL); + if (!in) + return -EFAULT; + + in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); + in->hdr.ver = cpu_to_be16(hdr->attr.ver); + in->len = __cpu_to_be16(hdr->attr.length); + err = copy_from_user(in->data, user_hdr->attr.data, hdr->attr.length); + if (err) { + kvfree(in); + return -EFAULT; + } + + err = xsc_ioctl_flow_cmdq_handle_res_obj(file, in->data, hdr->attr.length); + if (err) { + kvfree(in); + return -EFAULT; + } + + out_size = sizeof(struct xsc_ioctl_mbox_out) + hdr->attr.length; + out = kvzalloc(out_size, GFP_KERNEL); + if (!out) { + kvfree(in); + return -ENOMEM; + } + memcpy(out->data, in->data, hdr->attr.length); + out->len = in->len; + err = xsc_cmd_exec(file->xdev, in, in_size, out, out_size); + + hdr->attr.error = __be32_to_cpu(out->error); + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + err = -EFAULT; + if (copy_to_user((void *)user_hdr->attr.data, out->data, hdr->attr.length)) + err = -EFAULT; + + kvfree(in); + kvfree(out); + return err; +} + +static int xsc_ioctl_emu_cmd(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr) +{ + struct xsc_ioctl_mbox_in *in; + struct xsc_ioctl_mbox_out *out; + struct xsc_ioctl_emu_hdr *emu_hdr; + u8 *buffer; + int in_size; + int out_size; + int err; + + buffer = kvzalloc(hdr->attr.length, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + err = copy_from_user(buffer, user_hdr->attr.data, hdr->attr.length); + if (err) + goto err_copy_user_data; + + emu_hdr = (struct xsc_ioctl_emu_hdr *)buffer; + in_size = emu_hdr->in_length; + in = kvzalloc(in_size, GFP_KERNEL); + if (!in) { + err = -ENOMEM; + goto err_alloc_in_mem; + } + memcpy(in, emu_hdr->data, emu_hdr->in_length); + + out_size = emu_hdr->out_length; + out = kvzalloc(out_size, GFP_KERNEL); + if (!out) { + err = -ENOMEM; + goto err_alloc_out_mem; + } + + err = xsc_cmd_exec(xdev, in, in_size, out, out_size); + + hdr->attr.error = __be32_to_cpu(out->error); + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + err = -EFAULT; + if (copy_to_user((void *)user_hdr->attr.data + sizeof(struct xsc_ioctl_emu_hdr), + out, out_size)) + err = -EFAULT; + + kvfree(out); + kvfree(in); + kvfree(buffer); + return err; + +err_alloc_out_mem: + kvfree(in); +err_alloc_in_mem: +err_copy_user_data: + kvfree(buffer); + return err; +} + +static int xsc_ioctl_modify_raw_qp(struct xsc_core_device *xdev, + struct xsc_ioctl_hdr __user *user_hdr, struct xsc_ioctl_hdr *hdr) +{ + struct xsc_modify_raw_qp_mbox_in *in; + struct xsc_modify_raw_qp_mbox_out *out; + int err; + + if (hdr->attr.length != sizeof(struct xsc_modify_raw_qp_request)) + return -EINVAL; + + in = kvzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + goto err_in; + out = kvzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + goto err_out; + + err = copy_from_user(&in->req, user_hdr->attr.data, + sizeof(struct xsc_modify_raw_qp_request)); + if (err) + goto err; + + in->hdr.opcode = __cpu_to_be16(hdr->attr.opcode); + in->hdr.ver = cpu_to_be16(hdr->attr.ver); + in->pcie_no = xdev->pcie_no; + + err = xsc_cmd_exec(xdev, in, sizeof(struct xsc_modify_raw_qp_mbox_in), + out, sizeof(struct xsc_modify_raw_qp_mbox_out)); + + hdr->attr.error = __be32_to_cpu(out->hdr.status); + + if (copy_to_user((void *)user_hdr, hdr, sizeof(*hdr))) + goto err; + + kvfree(in); + kvfree(out); + return 0; + +err: + kvfree(out); +err_out: + kvfree(in); +err_in: + return -EFAULT; +} + +static void xsc_handle_multiqp_create(struct xsc_bdf_file *file, void *in, + unsigned int inlen, void *out) +{ + u16 qp_num = 0; + int i = 0; + struct xsc_create_qp_request *req = NULL; + void *ptr = NULL; + int len = 0; + u32 qpn_base = be32_to_cpu(((struct xsc_create_multiqp_mbox_out *)out)->qpn_base); + + qp_num = be16_to_cpu(((struct xsc_create_multiqp_mbox_in *)in)->qp_num); + ptr = ((struct xsc_create_multiqp_mbox_in *)in)->data; + for (i = 0; i < qp_num; i++) { + req = (struct xsc_create_qp_request *)ptr; + len = sizeof(struct xsc_create_qp_request) + + be16_to_cpu(req->pa_num) * sizeof(u64); + xsc_alloc_qp_obj(file, qpn_base + i, (char *)req, len); + ptr += len; + } +} + +static void xsc_pci_ctrl_cmdq_handle_res_obj(struct xsc_bdf_file *file, void *in, + unsigned int inlen, void *out, int opcode) +{ + unsigned int idx; + + switch (opcode) { + case XSC_CMD_OP_ALLOC_PD: + idx = be32_to_cpu(((struct xsc_alloc_pd_mbox_out *)out)->pdn); + xsc_alloc_pd_obj(file, idx, in, inlen); + break; + case XSC_CMD_OP_DEALLOC_PD: + idx = be32_to_cpu(((struct xsc_dealloc_pd_mbox_in *)in)->pdn); + xsc_destroy_pd_obj(file, idx); + break; + case XSC_CMD_OP_CREATE_MKEY: + idx = be32_to_cpu(((struct xsc_create_mkey_mbox_out *)out)->mkey); + xsc_alloc_mr_obj(file, idx, in, inlen); + break; + case XSC_CMD_OP_DESTROY_MKEY: + idx = be32_to_cpu(((struct xsc_destroy_mkey_mbox_in *)in)->mkey); + xsc_destroy_mr_obj(file, idx); + break; + case XSC_CMD_OP_DESTROY_CQ: + idx = be32_to_cpu(((struct xsc_destroy_cq_mbox_in *)in)->cqn); + xsc_destroy_cq_obj(file, idx); + break; + case XSC_CMD_OP_CREATE_CQ: + idx = be32_to_cpu(((struct xsc_create_cq_mbox_out *)out)->cqn); + xsc_alloc_cq_obj(file, idx, in, inlen); + break; + case XSC_CMD_OP_CREATE_QP: + idx = be32_to_cpu(((struct xsc_create_qp_mbox_out *)out)->qpn); + xsc_alloc_qp_obj(file, idx, + (char *)&(((struct xsc_create_qp_mbox_in *)in)->req), + inlen); + break; + case XSC_CMD_OP_DESTROY_QP: + idx = be32_to_cpu(((struct xsc_destroy_qp_mbox_in *)in)->qpn); + xsc_destroy_qp_obj(file, idx); + break; + case XSC_CMD_OP_CREATE_MULTI_QP: + xsc_handle_multiqp_create(file, in, inlen, out); + break; + default: + break; + } +} + +static long xsc_pci_ctrl_cmdq(struct xsc_bdf_file *file, + struct xsc_ioctl_hdr __user *user_hdr) +{ + struct xsc_core_device *xdev = file->xdev; + struct xsc_ioctl_hdr hdr; + int err; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) + return -EINVAL; + + /* check valid */ + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) + return -EINVAL; + + /* check ioctl cmd */ + switch (hdr.attr.opcode) { + case XSC_CMD_OP_IOCTL_FLOW: + return xsc_ioctl_flow_cmdq(file, user_hdr, &hdr); + case XSC_CMD_OP_MODIFY_RAW_QP: + return xsc_ioctl_modify_raw_qp(xdev, user_hdr, &hdr); + case XSC_CMD_OP_USER_EMU_CMD: + return xsc_ioctl_emu_cmd(xdev, user_hdr, &hdr); + default: + err = TRY_NEXT_CB; + break; + } + + return err; +} + +static long xsc_pci_ctrl_cmdq_raw(struct xsc_bdf_file *file, + struct xsc_ioctl_hdr __user *user_hdr) +{ + struct xsc_ioctl_hdr hdr; + int err; + void *in; + void *out; + int op; + struct xsc_core_device *dev = file->xdev; + struct xsc_create_mkey_mbox_out *resp; + struct xsc_unregister_mr_mbox_in *req; + u8 key; + u16 out_len; + int qpn = 0; + + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) { + xsc_core_err(dev, "fail to copy from user user_hdr\n"); + return -EFAULT; + } + + /* check valid */ + if (hdr.check_filed != XSC_IOCTL_CHECK_FILED) { + xsc_core_err(dev, "invalid check filed %u\n", hdr.check_filed); + return -EINVAL; + } + + in = kvzalloc(hdr.attr.length, GFP_KERNEL); + if (!in) + return -ENOMEM; + out_len = min_t(u16, hdr.attr.length, (u16)MAX_MBOX_OUT_LEN); + out = kvzalloc(out_len, GFP_KERNEL); + if (!out) { + kfree(in); + return -ENOMEM; + } + + err = copy_from_user(in, user_hdr->attr.data, hdr.attr.length); + if (err) { + err = -EFAULT; + xsc_core_err(dev, "fail to copy_from_user user hdr attr\n"); + goto err_exit; + } + + op = be16_to_cpu(((struct xsc_inbox_hdr *)in)->opcode); + switch (op) { + case XSC_CMD_OP_CREATE_MKEY: + spin_lock(&dev->dev_res->mkey_lock); + key = 0x80 + dev->dev_res->mkey_key++; + spin_unlock(&dev->dev_res->mkey_lock); + if (dev->reg_mr_via_cmdq) + err = xsc_cmd_exec(dev, in, hdr.attr.length, out, hdr.attr.length); + else + err = xsc_create_mkey(dev, in, out); + + resp = (struct xsc_create_mkey_mbox_out *)out; + resp->mkey = xsc_idx_to_mkey(be32_to_cpu(resp->mkey) & 0xffffff) | key; + resp->mkey = cpu_to_be32(resp->mkey); + break; + case XSC_CMD_OP_DESTROY_MKEY: + if (!dev->reg_mr_via_cmdq) + err = xsc_destroy_mkey(dev, in, out); + break; + case XSC_CMD_OP_REG_MR: + if (!dev->reg_mr_via_cmdq) + err = xsc_reg_mr(dev, in, out); + break; + case XSC_CMD_OP_DEREG_MR: + req = (struct xsc_unregister_mr_mbox_in *)in; + req->mkey = be32_to_cpu(req->mkey); + req->mkey = cpu_to_be32(xsc_mkey_to_idx(req->mkey)); + if (dev->reg_mr_via_cmdq) + err = xsc_cmd_exec(dev, in, hdr.attr.length, out, hdr.attr.length); + else + err = xsc_dereg_mr(dev, in, out); + break; + case XSC_CMD_OP_DESTROY_QP: + qpn = be32_to_cpu(((struct xsc_destroy_qp_mbox_in *)in)->qpn); + xsc_send_cmd_2rst_qp(dev, qpn); + err = xsc_cmd_exec(dev, in, hdr.attr.length, out, out_len); + break; + default: + err = xsc_cmd_exec(dev, in, hdr.attr.length, out, out_len); + break; + } + xsc_pci_ctrl_cmdq_handle_res_obj(file, in, hdr.attr.length, out, hdr.attr.opcode); + + if (copy_to_user((void *)user_hdr->attr.data, out, out_len)) { + xsc_core_err(dev, "fail to copy_to_user user hdr attr\n"); + err = -EFAULT; + } +err_exit: + kfree(in); + kfree(out); + return err; +} + +static int xsc_pci_ctrl_reg_cb(struct xsc_bdf_file *file, unsigned int cmd, + struct xsc_ioctl_hdr __user *user_hdr, void *data) +{ + int err; + + switch (cmd) { + case XSC_IOCTL_CMDQ: + err = xsc_pci_ctrl_cmdq(file, user_hdr); + break; + case XSC_IOCTL_DRV_GET: + err = xsc_pci_ctrl_getinfo(file->xdev, user_hdr); + break; + case XSC_IOCTL_DRV_SET: + err = xsc_pci_ctrl_setinfo(file->xdev, user_hdr); + break; + case XSC_IOCTL_CMDQ_RAW: + err = xsc_pci_ctrl_cmdq_raw(file, user_hdr); + break; + default: + err = TRY_NEXT_CB; + break; + } + + return err; +} + +void xsc_pci_ctrl_fini(void) +{ + xsc_port_ctrl_cb_dereg(XSC_PCI_CTRL_NAME); +} + +int xsc_pci_ctrl_init(void) +{ + int ret; + + ret = xsc_port_ctrl_cb_reg(XSC_PCI_CTRL_NAME, xsc_pci_ctrl_reg_cb, NULL); + if (ret != 0) + pr_err("failed to register port control node for %s\n", XSC_PCI_CTRL_NAME); + + return ret; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.h b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.h new file mode 100644 index 000000000000..c57caed380b7 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_pci_ctrl.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef XSC_PCI_CTRL_H +#define XSC_PCI_CTRL_H + +#include +#include +#include + +//for x86 +#ifndef NR_VECTORS +#define NR_VECTORS 256 +#endif +#define IRQ_MATRIX_BITS NR_VECTORS +#define IRQ_MATRIX_SIZE (BITS_TO_LONGS(IRQ_MATRIX_BITS)) + +struct db_cpumap { + unsigned int available; + unsigned int allocated; + unsigned int managed; + unsigned int managed_allocated; + u8 initialized; + u8 online; + unsigned long alloc_map[IRQ_MATRIX_SIZE]; + unsigned long managed_map[IRQ_MATRIX_SIZE]; +}; + +struct db_irq_matrix { + unsigned int matrix_bits; + unsigned int alloc_start; + unsigned int alloc_end; + unsigned int alloc_size; + unsigned int global_available; + unsigned int global_reserved; + unsigned int systembits_inalloc; + unsigned int total_allocated; + unsigned int online_maps; + struct db_cpumap __percpu *maps; + unsigned long scratch_map[IRQ_MATRIX_SIZE]; + unsigned long system_map[IRQ_MATRIX_SIZE]; +}; + +u16 xsc_get_irq_matrix_global_available(struct xsc_core_device *dev); + +int xsc_pci_ctrl_init(void); +void xsc_pci_ctrl_fini(void); + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_port_ctrl.c b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_port_ctrl.c new file mode 100644 index 000000000000..7e5c34ebe2dd --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/xsc_port_ctrl.c @@ -0,0 +1,512 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2023, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "common/xsc_core.h" +#include "common/driver.h" +#include "common/xsc_port_ctrl.h" +#include "common/res_obj.h" + +#define XSC_PORT_CTRL_MAX 1024 +#define XSC_PORT_CTRL_NAME_PRE "yunsilicon" +#define XSC_PORT_CTRL_NAME "port_ctrl" +#define XSC_PORT_CTRL_CB_NAME_LEN 15 +DECLARE_BITMAP(g_bitmap_dev_id, XSC_PORT_CTRL_MAX); + +struct xsc_port_ctrl_reg { + struct list_head node; + char name[XSC_PORT_CTRL_CB_NAME_LEN + 1]; + port_ctrl_cb cb; + void *data; +}; + +static dev_t g_port_ctrl_root_dev; +static struct class *g_port_ctrl_class; +static int g_port_ctrl_dev_cnt; +static struct list_head g_port_ctrl_cbs = LIST_HEAD_INIT(g_port_ctrl_cbs); +struct mutex g_port_ctrl_cbs_lock; /* protect port ctrl node list */ + +static int _port_ctrl_open(struct inode *inode, struct file *filp) +{ + struct xsc_port_ctrl *ctrl = container_of(inode->i_cdev, struct xsc_port_ctrl, cdev); + struct xsc_port_ctrl_file *file; + + file = kzalloc(sizeof(*file), GFP_KERNEL); + if (!file) + return -ENOMEM; + + INIT_RADIX_TREE(&file->bdf_tree, GFP_ATOMIC); + spin_lock_init(&file->bdf_lock); + file->ctrl = ctrl; + + file->root_bdf = kzalloc(sizeof(*file->root_bdf), GFP_KERNEL); + if (!file->root_bdf) { + kfree(file); + return -ENOMEM; + } + INIT_RADIX_TREE(&file->root_bdf->obj_tree, GFP_ATOMIC); + spin_lock_init(&file->root_bdf->obj_lock); + file->root_bdf->xdev = container_of(ctrl, struct xsc_core_device, port_ctrl); + + spin_lock(&ctrl->file_lock); + list_add_tail(&file->file_node, &ctrl->file_list); + spin_unlock(&ctrl->file_lock); + filp->private_data = file; + + xsc_core_info(file->root_bdf->xdev, "process %d open port ctrl file\n", current->pid); + + return 0; +} + +static void xsc_release_port_ctrl_file(struct xsc_port_ctrl_file *file) +{ + struct xsc_bdf_file *bdf_file; + struct radix_tree_iter iter; + void **slot; + + xsc_close_bdf_file(file->root_bdf); + kfree(file->root_bdf); + spin_lock(&file->bdf_lock); + radix_tree_for_each_slot(slot, &file->bdf_tree, &iter, 0) { + bdf_file = (struct xsc_bdf_file *)(*slot); + xsc_close_bdf_file(bdf_file); + radix_tree_iter_delete(&file->bdf_tree, &iter, slot); + kfree(bdf_file); + } + spin_unlock(&file->bdf_lock); +} + +static int _port_ctrl_release(struct inode *inode, struct file *filp) +{ + struct xsc_port_ctrl_file *file = filp->private_data; + + xsc_release_port_ctrl_file(file); + spin_lock(&file->ctrl->file_lock); + list_del(&file->file_node); + spin_unlock(&file->ctrl->file_lock); + kfree(file); + + return 0; +} + +static bool is_db_ofst(struct xsc_core_device *xdev, unsigned long offset) +{ + if (offset == (xdev->regs.tx_db & PAGE_MASK)) + return true; + else if (offset == (xdev->regs.rx_db & PAGE_MASK)) + return true; + else if (offset == (xdev->regs.complete_db & PAGE_MASK)) + return true; + else if (offset == (xdev->regs.complete_reg & PAGE_MASK)) + return true; + return false; +} + +static int _port_ctrl_mmap(struct file *filp, struct vm_area_struct *vma) +{ + resource_size_t reg_base; + unsigned long start = (unsigned long)vma->vm_start; + unsigned long size = (unsigned long)(vma->vm_end - vma->vm_start); + unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; + u64 addr; + u32 db_type; + u32 domain = 0; + u32 bus; + u32 devfn; + struct xsc_port_ctrl_file *file; + struct xsc_core_device *xdev; + struct xsc_core_device *rl_xdev; + u32 bdf; + + file = filp->private_data; + xdev = container_of(file->ctrl, struct xsc_core_device, port_ctrl); + + xsc_core_dbg(xdev, "_port_ctrl_map:offset=%lx\n", offset); + + bdf = offset >> 32; + db_type = bdf & 0x0000000f; + devfn = (bdf >> 4) & 0x000000ff; + bus = (bdf >> 12) & 0x000000ff; + + xsc_core_dbg(xdev, "bus=%u,devfn=%u,db_type=%u\n", bus, devfn, db_type); + + if (bdf != 0) { + rl_xdev = xsc_pci_get_xdev_by_bus_and_slot(domain, bus, devfn); + if (!rl_xdev) + return -1; + + if (db_type == XSC_MMAP_MSG_SQDB) { + addr = rl_xdev->regs.tx_db; + } else if (db_type == XSC_MMAP_MSG_RQDB) { + addr = rl_xdev->regs.rx_db; + } else if (db_type == XSC_MMAP_MSG_CQDB) { + addr = rl_xdev->regs.complete_db; + } else if (db_type == XSC_MMAP_MSG_ARM_CQDB) { + addr = rl_xdev->regs.complete_reg; + } else { + pr_err("[%s:%d] mmap err\n", __func__, __LINE__); + return -1; + } + } else { + rl_xdev = xdev; + if (is_db_ofst(xdev, offset) || !offset) + addr = offset; + else + return -EINVAL; + } + + xsc_core_dbg(xdev, "tx_db=%llx,rx_db=%llx,cq_db=%llx,cq_reg=%llx\n", + rl_xdev->regs.tx_db, rl_xdev->regs.rx_db, + rl_xdev->regs.complete_db, rl_xdev->regs.complete_reg); + + reg_base = (pci_resource_start(rl_xdev->pdev, rl_xdev->bar_num) + (addr & PAGE_MASK)); + + if (addr) { + if (xdev->chip_ver_h == 0x100) + reg_base = xsc_core_is_pf(rl_xdev) ? reg_base - 0xA0000000 : reg_base; + else + reg_base = reg_base - 0xA0000000; + } + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + if (remap_pfn_range(vma, start, (reg_base >> PAGE_SHIFT), size, vma->vm_page_prot)) { + pr_err("[%s:%d] remap_pfn_range err\n", __func__, __LINE__); + return -1; + } + + return 0; +} + +static inline struct xsc_bdf_file *get_bdf_file(struct xsc_port_ctrl_file *file, + struct xsc_ioctl_hdr *hdr) +{ + struct xsc_core_device *xdev; + struct xsc_bdf_file *bdf_file; + struct xsc_core_device *rl_xdev; + unsigned long key; + + xdev = container_of(file->ctrl, struct xsc_core_device, port_ctrl); + xsc_core_dbg(xdev, "domain=%x, bus=%x, devfn=%x\n", hdr->domain, hdr->bus, hdr->devfn); + if ((hdr->domain == 0 && hdr->bus == 0 && hdr->devfn == 0) || + (hdr->domain == pci_domain_nr(xdev->pdev->bus) && + hdr->bus == xdev->pdev->bus->number && + hdr->devfn == xdev->pdev->devfn)) + return file->root_bdf; + + key = bdf_to_key(hdr->domain, hdr->bus, hdr->devfn); + spin_lock(&file->bdf_lock); + bdf_file = radix_tree_lookup(&file->bdf_tree, key); + spin_unlock(&file->bdf_lock); + if (bdf_file) { + xsc_core_dbg(bdf_file->xdev, "find the bdf file: %lx\n", bdf_file->key); + return bdf_file; + } + + rl_xdev = xsc_pci_get_xdev_by_bus_and_slot(hdr->domain, hdr->bus, hdr->devfn); + if (!rl_xdev) { + xsc_core_err(bdf_file->xdev, "fail to get xdev:domain=%x, bus=%x, devfn=%x\n", + hdr->domain, hdr->bus, hdr->devfn); + return NULL; + } + + bdf_file = kzalloc(sizeof(*bdf_file), GFP_KERNEL); + if (!bdf_file) + return NULL; + + bdf_file->key = key; + INIT_RADIX_TREE(&bdf_file->obj_tree, GFP_ATOMIC); + spin_lock_init(&bdf_file->obj_lock); + bdf_file->xdev = rl_xdev; + radix_tree_preload(GFP_KERNEL); + spin_lock(&file->bdf_lock); + radix_tree_insert(&file->bdf_tree, key, bdf_file); + spin_unlock(&file->bdf_lock); + radix_tree_preload_end(); + xsc_core_dbg(rl_xdev, "bdf file not exist, create it and add to port ctrl file\n"); + + return bdf_file; +} + +static long _port_ctrl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct xsc_port_ctrl_reg *p; + struct xsc_port_ctrl_file *file; + struct xsc_ioctl_hdr __user *user_hdr; + struct xsc_bdf_file *bdf_file; + struct xsc_ioctl_hdr hdr; + int err; + + file = filp->private_data; + user_hdr = (struct xsc_ioctl_hdr __user *)arg; + err = copy_from_user(&hdr, user_hdr, sizeof(hdr)); + if (err) { + pr_err("%s: fail to copy from user hdr\n", __func__); + return err; + } + + bdf_file = get_bdf_file(file, &hdr); + if (!bdf_file) { + pr_err("%s: fail to find bdf file\n", __func__); + return -EFAULT; + } + + list_for_each_entry(p, &g_port_ctrl_cbs, node) { + if (p->cb) { + err = p->cb(bdf_file, cmd, user_hdr, p->data); + if (err != TRY_NEXT_CB) + break; + } + } + + return err; +} + +static const struct file_operations g_port_ctrl_fops = { + .owner = THIS_MODULE, + .open = _port_ctrl_open, + .mmap = _port_ctrl_mmap, + .unlocked_ioctl = _port_ctrl_ioctl, + .compat_ioctl = _port_ctrl_ioctl, + .release = _port_ctrl_release, +}; + +static struct xsc_port_ctrl_reg *_port_ctrl_cbs_get(const char *name) +{ + struct xsc_port_ctrl_reg *p, *found; + + found = NULL; + list_for_each_entry(p, &g_port_ctrl_cbs, node) { + if (strcmp(p->name, name) == 0) { + found = p; + break; + } + } + + return found; +} + +static void _port_ctrl_data_fini(void) +{ + class_destroy(g_port_ctrl_class); + unregister_chrdev_region(g_port_ctrl_root_dev, XSC_PORT_CTRL_MAX); +} + +static int _port_ctrl_data_init(void) +{ + int ret; + int major_devid; + + ret = alloc_chrdev_region(&g_port_ctrl_root_dev, 0, XSC_PORT_CTRL_MAX, + XSC_PORT_CTRL_NAME_PRE); + if (ret < 0) { + pr_err("%s cant't get major id\n", XSC_PORT_CTRL_NAME_PRE); + return -1; + } + + major_devid = MAJOR(g_port_ctrl_root_dev); + pr_info("requested major_devid %d\n", major_devid); + + g_port_ctrl_class = class_create(XSC_PORT_CTRL_NAME_PRE); + if (IS_ERR(g_port_ctrl_class)) { + pr_err("failed to call create class witch name %s\n", + XSC_PORT_CTRL_NAME_PRE); + unregister_chrdev_region(g_port_ctrl_root_dev, XSC_PORT_CTRL_MAX); + return -1; + } + + g_port_ctrl_dev_cnt = 0; + + return 0; +} + +static void _port_ctrl_dev_del(struct xsc_core_device *dev) +{ + struct xsc_port_ctrl *ctrl; + struct xsc_port_ctrl_file *file, *n; + int dev_id = 0; + + ctrl = &dev->port_ctrl; + if (!ctrl) + return; + + dev_id = MINOR(ctrl->devid); + spin_lock(&ctrl->file_lock); + list_for_each_entry_safe(file, n, &ctrl->file_list, file_node) { + xsc_release_port_ctrl_file(file); + list_del(&file->file_node); + kfree(file); + } + spin_unlock(&ctrl->file_lock); + + device_destroy(g_port_ctrl_class, ctrl->devid); + cdev_del(&ctrl->cdev); + + clear_bit(dev_id, g_bitmap_dev_id); + g_port_ctrl_dev_cnt--; +} + +static int _port_ctrl_dev_add(struct xsc_core_device *dev) +{ + struct xsc_port_ctrl *ctrl; + int ret; + int dev_id = 0; + + if (g_port_ctrl_dev_cnt >= XSC_PORT_CTRL_MAX) { + xsc_core_err(dev, "too many port control devices\n"); + return -ENOMEM; + } + + ctrl = &dev->port_ctrl; + dev_id = find_first_zero_bit(g_bitmap_dev_id, XSC_PORT_CTRL_MAX); + ctrl->devid = g_port_ctrl_root_dev + dev_id; + ctrl->cdev.owner = THIS_MODULE; + INIT_LIST_HEAD(&ctrl->file_list); + spin_lock_init(&ctrl->file_lock); + cdev_init(&ctrl->cdev, &g_port_ctrl_fops); + ret = cdev_add(&ctrl->cdev, ctrl->devid, 1); + if (ret != 0) { + xsc_core_err(dev, "failed to add cdev\n"); + kfree(ctrl); + return -ENOMEM; + } + + ctrl->device = device_create(g_port_ctrl_class, NULL, ctrl->devid, NULL, + "%s!%s_%04x:%02x:%02x.%x", XSC_PORT_CTRL_NAME_PRE, + XSC_PORT_CTRL_NAME, pci_domain_nr(dev->pdev->bus), + dev->pdev->bus->number, + PCI_SLOT(dev->pdev->devfn), + PCI_FUNC(dev->pdev->devfn)); + if (IS_ERR(ctrl->device)) { + xsc_core_err(dev, "failed to create port control device\n"); + cdev_del(&ctrl->cdev); + kfree(ctrl); + return -ENOMEM; + } + + g_port_ctrl_dev_cnt++; + set_bit(dev_id, g_bitmap_dev_id); + + return 0; +} + +static void _port_ctrl_cb_fini(void) +{ + struct xsc_port_ctrl_reg *p, *n; + + list_for_each_entry_safe(p, n, &g_port_ctrl_cbs, node) { + mutex_lock(&g_port_ctrl_cbs_lock); + list_del(&p->node); + mutex_unlock(&g_port_ctrl_cbs_lock); + kfree(p); + } +} + +static int _port_ctrl_cb_init(void) +{ + mutex_init(&g_port_ctrl_cbs_lock); + + return 0; +} + +static void _port_ctrl_dev_flush(void) +{ +} + +void xsc_port_ctrl_fini(void) +{ + _port_ctrl_dev_flush(); + _port_ctrl_data_fini(); + _port_ctrl_cb_fini(); +} + +int xsc_port_ctrl_init(void) +{ + int ret; + + ret = _port_ctrl_data_init(); + if (ret != 0) { + pr_err("failed to initialize port ctrl data\n"); + return -1; + } + + ret = _port_ctrl_cb_init(); + if (ret != 0) { + pr_err("failed to initialize port ctrl cb\n"); + _port_ctrl_data_fini(); + return -1; + } + + return 0; +} + +void xsc_port_ctrl_remove(struct xsc_core_device *dev) +{ + _port_ctrl_dev_del(dev); +} + +int xsc_port_ctrl_probe(struct xsc_core_device *dev) +{ + int ret = 0; + + ret = _port_ctrl_dev_add(dev); + if (ret != 0) + xsc_core_err(dev, "failed to add new port control device\n"); + + return ret; +} + +int xsc_port_ctrl_cb_reg(const char *name, port_ctrl_cb cb, void *data) +{ + struct xsc_port_ctrl_reg *reg_node; + + if (strlen(name) > XSC_PORT_CTRL_CB_NAME_LEN) { + pr_err("the name is too long to register to port control\n"); + return -1; + } + + reg_node = _port_ctrl_cbs_get(name); + if (reg_node) { + pr_err("failed to register a duplicated node\n"); + return -1; + } + + reg_node = kmalloc(sizeof(*reg_node), GFP_KERNEL); + if (!reg_node) + return -1; + + strscpy(reg_node->name, name, sizeof(reg_node->name)); + reg_node->cb = cb; + reg_node->data = data; + INIT_LIST_HEAD(®_node->node); + + mutex_lock(&g_port_ctrl_cbs_lock); + list_add_tail(®_node->node, &g_port_ctrl_cbs); + mutex_unlock(&g_port_ctrl_cbs_lock); + + return 0; +} +EXPORT_SYMBOL(xsc_port_ctrl_cb_reg); + +void xsc_port_ctrl_cb_dereg(const char *name) +{ + struct xsc_port_ctrl_reg *p, *n; + + list_for_each_entry_safe(p, n, &g_port_ctrl_cbs, node) { + if (strcmp(p->name, name) == 0) { + mutex_lock(&g_port_ctrl_cbs_lock); + list_del(&p->node); + mutex_unlock(&g_port_ctrl_cbs_lock); + kfree(p); + break; + } + } +} +EXPORT_SYMBOL(xsc_port_ctrl_cb_dereg); -- Gitee From 884ce9fc035309ed2abf84ef6df53ad319b6f942 Mon Sep 17 00:00:00 2001 From: Joy Allen Date: Mon, 23 Dec 2024 16:10:04 +0800 Subject: [PATCH 2138/2138] anolis: enhance rich container for overcommit memory ANBZ: #12437 Workloads running on the same host may require conflict configs of `overcommit_memory`. For example, redis prefers 1, and postgresql prefers 2. Add rich container extra data to support custom overcommit policy. Now, each container can set its own `/proc/sys/vm/overcommit*`. Note: We can expand more extra data in the future. Control this feature by `/proc/sys/kernel/rich_container_ext_enable`. Signed-off-by: Joy Allen --- fs/proc/meminfo.c | 17 +++- include/linux/mman.h | 16 ++++ include/linux/pid_namespace.h | 36 ++++++++ ipc/shm.c | 12 ++- kernel/pid_namespace.c | 41 +++++++++ kernel/sysctl.c | 10 +++ mm/mmap.c | 11 ++- mm/util.c | 163 ++++++++++++++++++++++++++++++++-- 8 files changed, 296 insertions(+), 10 deletions(-) diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index fa51e15ea094..4b090aeacfc2 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -41,11 +41,14 @@ static int meminfo_proc_show(struct seq_file *m, void *v) struct mem_cgroup *memcg = NULL; struct sysinfo_ext ext; + struct rich_container_ext *rich = NULL; + unsigned long commit_limit; #ifdef CONFIG_MEMCG rcu_read_lock(); if (in_rich_container(current)) { memcg = rich_container_get_memcg(); + rich = rich_container_get_ext(); } rcu_read_unlock(); #endif @@ -83,7 +86,17 @@ static int meminfo_proc_show(struct seq_file *m, void *v) memcg_meminfo(memcg, &i, &ext); } - committed = percpu_counter_read_positive(&vm_committed_as); + if (rich && memcg) { +#ifdef CONFIG_MEMCG + commit_limit = rich_container_vm_commit_limit(rich, memcg); +#else + commit_limit = vm_commit_limit(); +#endif + committed = percpu_counter_read_positive(&rich->vm_committed_as); + } else { + commit_limit = vm_commit_limit(); + committed = percpu_counter_read_positive(&vm_committed_as); + } sreclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B); sunreclaim = global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B); @@ -151,7 +164,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v) show_val_kb(m, "Bounce: ", global_zone_page_state(NR_BOUNCE)); show_val_kb(m, "WritebackTmp: ", ext.writeback_temp); - show_val_kb(m, "CommitLimit: ", vm_commit_limit()); + show_val_kb(m, "CommitLimit: ", commit_limit); show_val_kb(m, "Committed_AS: ", committed); seq_printf(m, "VmallocTotal: %8lu kB\n", (unsigned long)VMALLOC_TOTAL >> 10); diff --git a/include/linux/mman.h b/include/linux/mman.h index 1199d73d56cb..4d962ec5b2fd 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h @@ -5,6 +5,7 @@ #include #include #include +#include #include #include @@ -77,6 +78,16 @@ unsigned long vm_memory_committed(void); static inline void vm_acct_memory(long pages) { + struct rich_container_ext *ext = NULL; + + /* Account pages in current rich container */ + rcu_read_lock(); + if (in_rich_container(current)) + ext = rich_container_get_ext(); + rcu_read_unlock(); + if (ext) + percpu_counter_add_batch(&ext->vm_committed_as, pages, ext->as_batch); + percpu_counter_add_batch(&vm_committed_as, pages, vm_committed_as_batch); } @@ -163,6 +174,11 @@ calc_vm_flag_bits(struct file *file, unsigned long flags) unsigned long vm_commit_limit(void); +#ifdef CONFIG_MEMCG +unsigned long rich_container_vm_commit_limit(struct rich_container_ext *ext, + struct mem_cgroup *memcg); +#endif + #ifndef arch_memory_deny_write_exec_supported static inline bool arch_memory_deny_write_exec_supported(void) { diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index 9da7d0da722c..fb3a04dc1332 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -23,6 +23,15 @@ struct fs_pin; #define MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED 2 /* same as 1, except MFD_EXEC rejected */ #endif +struct rich_container_ext { + /* overcommit */ + int overcommit_memory; + int overcommit_ratio; + unsigned long overcommit_kbytes; + struct percpu_counter vm_committed_as; + s32 as_batch; +}; + struct pid_namespace { struct idr idr; struct rcu_head rcu; @@ -41,6 +50,7 @@ struct pid_namespace { #if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE) int memfd_noexec_scope; #endif + struct rich_container_ext *ext; } __randomize_layout; extern struct pid_namespace init_pid_ns; @@ -128,6 +138,7 @@ extern int sysctl_rich_container_enable; extern int sysctl_rich_container_source; extern int sysctl_rich_container_cpuinfo_source; extern unsigned int sysctl_rich_container_cpuinfo_sharesbase; +extern int sysctl_rich_container_ext_enable; static inline struct task_struct *rich_container_get_scenario(void) { @@ -146,6 +157,17 @@ static inline bool in_rich_container(struct task_struct *tsk) } void rich_container_get_cpuset_cpus(struct cpumask *pmask); + +static inline struct rich_container_ext *rich_container_get_ext(void) +{ + if (sysctl_rich_container_ext_enable == 0) + return NULL; + + return task_active_pid_ns(current)->ext; +} + +struct rich_container_ext *create_rich_container_ext(void); +void destroy_rich_container_ext(struct rich_container_ext *ext); #else static inline bool in_rich_container(struct task_struct *tsk) { @@ -160,6 +182,20 @@ static inline struct task_struct *rich_container_get_scenario(void) { return NULL; } + +static inline struct rich_container_ext *create_rich_container_ext(void) +{ + return NULL; +} + +static inline void destroy_rich_container_ext(struct rich_container_ext *ext) +{ +} + +static inline struct rich_container_ext *rich_container_get_ext(void) +{ + return NULL; +} #endif #endif /* _LINUX_PID_NS_H */ diff --git a/ipc/shm.c b/ipc/shm.c index 576a543b7cff..c9b9bc8c4fac 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -757,11 +757,21 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) file = hugetlb_file_setup(name, hugesize, acctflag, HUGETLB_SHMFS_INODE, (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); } else { + struct rich_container_ext *ext = NULL; + /* * Do not allow no accounting for OVERCOMMIT_NEVER, even * if it's asked for. */ - if ((shmflg & SHM_NORESERVE) && + rcu_read_lock(); + if (in_rich_container(current)) + ext = rich_container_get_ext(); + rcu_read_unlock(); + if (ext) { + if ((shmflg & SHM_NORESERVE) && + ext->overcommit_memory != OVERCOMMIT_NEVER) + acctflag = VM_NORESERVE; + } else if ((shmflg & SHM_NORESERVE) && sysctl_overcommit_memory != OVERCOMMIT_NEVER) acctflag = VM_NORESERVE; file = shmem_kernel_file_setup(name, size, acctflag); diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index 9d54ed62f36f..18cf494b9bfa 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c @@ -23,11 +23,15 @@ #include #include #include +#include #include "pid_sysctl.h" #ifdef CONFIG_RICH_CONTAINER int sysctl_rich_container_enable; int sysctl_rich_container_source; /* 0 - current; 1 - child_reaper */ +int sysctl_rich_container_ext_enable; + +static struct kmem_cache *ext_cachep; #endif static DEFINE_MUTEX(pid_caches_mutex); @@ -35,6 +39,38 @@ static struct kmem_cache *pid_ns_cachep; /* Write once array, filled from the beginning. */ static struct kmem_cache *pid_cache[MAX_PID_NS_LEVEL]; +#ifdef CONFIG_RICH_CONTAINER +struct rich_container_ext *create_rich_container_ext(void) +{ + struct rich_container_ext *ext; + + ext = kmem_cache_zalloc(ext_cachep, GFP_KERNEL); + if (!ext) + return NULL; + + ext->overcommit_memory = sysctl_overcommit_memory; + ext->overcommit_ratio = sysctl_overcommit_ratio; + ext->overcommit_kbytes = sysctl_overcommit_kbytes; + if (percpu_counter_init(&ext->vm_committed_as, 0, GFP_KERNEL)) + goto out; + ext->as_batch = vm_committed_as_batch; + + return ext; + +out: + kmem_cache_free(ext_cachep, ext); + return NULL; +} + +void destroy_rich_container_ext(struct rich_container_ext *ext) +{ + if (!ext) + return; + + kmem_cache_free(ext_cachep, ext); +} +#endif + /* * creates the kmem cache to allocate pids from. * @level: pid namespace level @@ -118,6 +154,7 @@ static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns #if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE) ns->memfd_noexec_scope = pidns_memfd_noexec_scope(parent_pid_ns); #endif + ns->ext = create_rich_container_ext(); return ns; out_free_idr: @@ -136,6 +173,7 @@ static void delayed_free_pidns(struct rcu_head *p) dec_pid_namespaces(ns->ucounts); put_user_ns(ns->user_ns); + destroy_rich_container_ext(ns->ext); kmem_cache_free(pid_ns_cachep, ns); } @@ -482,6 +520,9 @@ static __init int pid_namespaces_init(void) #endif register_pid_ns_sysctl_table_vm(); +#ifdef CONFIG_RICH_CONTAINER + ext_cachep = KMEM_CACHE(rich_container_ext, SLAB_PANIC | SLAB_ACCOUNT); +#endif return 0; } diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 777baff4d527..d46492e60ca2 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2116,6 +2116,16 @@ static struct ctl_table kern_table[] = { .proc_handler = proc_douintvec_minmax, .extra1 = SYSCTL_TWO, }, + { + .procname = "rich_container_ext_enable", + .data = &sysctl_rich_container_ext_enable, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = proc_douintvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, + #endif { } }; diff --git a/mm/mmap.c b/mm/mmap.c index 41f1ddf071b8..39ede193a189 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1379,8 +1379,17 @@ unsigned long do_mmap(struct file *file, unsigned long addr, * memory use of this mapping. */ if (flags & MAP_NORESERVE) { + struct rich_container_ext *ext = NULL; + /* We honor MAP_NORESERVE if allowed to overcommit */ - if (sysctl_overcommit_memory != OVERCOMMIT_NEVER) + rcu_read_lock(); + if (in_rich_container(current)) + ext = rich_container_get_ext(); + rcu_read_unlock(); + if (ext) { + if (ext->overcommit_memory != OVERCOMMIT_NEVER) + vm_flags |= VM_NORESERVE; + } else if (sysctl_overcommit_memory != OVERCOMMIT_NEVER) vm_flags |= VM_NORESERVE; /* hugetlb applies strict overcommit unless MAP_NORESERVE */ diff --git a/mm/util.c b/mm/util.c index 7e3d8adde0b2..a0e1a2939ed7 100644 --- a/mm/util.c +++ b/mm/util.c @@ -23,6 +23,8 @@ #include #include #include +#include +#include #include @@ -839,10 +841,26 @@ int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int ret; + struct ctl_table t; + struct rich_container_ext *ext = NULL; + + rcu_read_lock(); + if (in_rich_container(current)) + ext = rich_container_get_ext(); + rcu_read_unlock(); + if (ext) { + t = *table; + t.data = &ext->overcommit_ratio; + table = &t; + } ret = proc_dointvec(table, write, buffer, lenp, ppos); - if (ret == 0 && write) - sysctl_overcommit_kbytes = 0; + if (ret == 0 && write) { + if (ext) + ext->overcommit_kbytes = 0; + else + sysctl_overcommit_kbytes = 0; + } return ret; } @@ -851,12 +869,66 @@ static void sync_overcommit_as(struct work_struct *dummy) percpu_counter_sync(&vm_committed_as); } +#ifdef CONFIG_SMP +/* Sync overcommit as manually, since schedule_on_each_cpu + * cannot pass rich_container_ext directly + */ +static void rich_container_sync_overcommit_as(struct rich_container_ext *ext) +{ + struct percpu_counter *fbc = &ext->vm_committed_as; + unsigned long flags; + int cpu; + s32 *pcount; + s32 count; + + raw_spin_lock_irqsave(&fbc->lock, flags); + for_each_cpu_or(cpu, cpu_online_mask, cpu_dying_mask) { + pcount = per_cpu_ptr(fbc->counters, cpu); + count = *pcount; + fbc->count += count; + *pcount -= count; + } + raw_spin_unlock_irqrestore(&fbc->lock, flags); +} + +void rich_container_mm_compute_batch(struct rich_container_ext *ext, + int overcommit_policy) +{ + u64 memsized_batch; + s32 nr = num_present_cpus(); + s32 batch = max_t(s32, nr*2, 32); + unsigned long ram_pages = totalram_pages(); + + if (overcommit_policy == OVERCOMMIT_NEVER) + memsized_batch = min_t(u64, ram_pages/nr/256, INT_MAX); + else + memsized_batch = min_t(u64, ram_pages/nr/4, INT_MAX); + + ext->as_batch = max_t(s32, memsized_batch, batch); +} +#else +static void rich_container_sync_overcommit_as(struct rich_container_ext *ext) +{ +} + +void rich_container_mm_compute_batch(struct rich_container_ext *ext, + int overcommit_policy) +{ +} +#endif + int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table t; int new_policy = -1; int ret; + struct rich_container_ext *ext = NULL; + + rcu_read_lock(); + if (in_rich_container(current)) + ext = rich_container_get_ext(); + rcu_read_unlock(); /* * The deviation of sync_overcommit_as could be big with loose policy @@ -876,11 +948,23 @@ int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer, if (ret || new_policy == -1) return ret; + if (ext) { + rich_container_mm_compute_batch(ext, new_policy); + if (new_policy == OVERCOMMIT_NEVER) + rich_container_sync_overcommit_as(ext); + ext->overcommit_memory = new_policy; + return ret; + } mm_compute_batch(new_policy); if (new_policy == OVERCOMMIT_NEVER) schedule_on_each_cpu(sync_overcommit_as); sysctl_overcommit_memory = new_policy; } else { + if (ext) { + t = *table; + t.data = &ext->overcommit_memory; + table = &t; + } ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); } @@ -891,10 +975,26 @@ int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int ret; + struct ctl_table t; + struct rich_container_ext *ext = NULL; + + rcu_read_lock(); + if (in_rich_container(current)) + ext = rich_container_get_ext(); + rcu_read_unlock(); + if (ext) { + t = *table; + t.data = &ext->overcommit_kbytes; + table = &t; + } ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); - if (ret == 0 && write) - sysctl_overcommit_ratio = 0; + if (ret == 0 && write) { + if (ext) + ext->overcommit_ratio = 0; + else + sysctl_overcommit_ratio = 0; + } return ret; } @@ -915,6 +1015,28 @@ unsigned long vm_commit_limit(void) return allowed; } +#ifdef CONFIG_MEMCG +unsigned long rich_container_vm_commit_limit(struct rich_container_ext *ext, + struct mem_cgroup *memcg) +{ + unsigned long allowed; + struct mem_cgroup *iter; + unsigned long limit; + + if (ext->overcommit_kbytes) + allowed = ext->overcommit_kbytes >> (PAGE_SHIFT - 10); + else { + limit = totalram_pages() - hugetlb_total_pages(); + for (iter = memcg; iter; iter = parent_mem_cgroup(iter)) + limit = min(limit, iter->memory.max); + allowed = (limit * ext->overcommit_ratio / 100); + } + allowed += min_t(unsigned long, total_swap_pages, memcg->swap.max); + + return allowed; +} +#endif + /* * Make sure vm_committed_as in one cacheline and not cacheline shared with * other variables. It can be updated by several CPUs frequently. @@ -936,6 +1058,14 @@ struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp; */ unsigned long vm_memory_committed(void) { + struct rich_container_ext *ext = NULL; + + rcu_read_lock(); + if (in_rich_container(current)) + ext = rich_container_get_ext(); + rcu_read_unlock(); + if (ext) + return percpu_counter_sum_positive(&ext->vm_committed_as); return percpu_counter_sum_positive(&vm_committed_as); } EXPORT_SYMBOL_GPL(vm_memory_committed); @@ -959,16 +1089,33 @@ EXPORT_SYMBOL_GPL(vm_memory_committed); int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) { long allowed; + int overcommit = sysctl_overcommit_memory; +#ifdef CONFIG_MEMCG + struct rich_container_ext *ext = NULL; + struct mem_cgroup *memcg = NULL; + long memcg_allowed; + + rcu_read_lock(); + if (in_rich_container(current)) { + ext = rich_container_get_ext(); + memcg = rich_container_get_memcg(); + } + rcu_read_unlock(); + if (ext) { + overcommit = ext->overcommit_memory; + memcg_allowed = rich_container_vm_commit_limit(ext, memcg); + } +#endif vm_acct_memory(pages); /* * Sometimes we want to use more memory than we have */ - if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) + if (overcommit == OVERCOMMIT_ALWAYS) return 0; - if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { + if (overcommit == OVERCOMMIT_GUESS) { if (pages > totalram_pages() + total_swap_pages) goto error; return 0; @@ -990,6 +1137,10 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) allowed -= min_t(long, mm->total_vm / 32, reserve); } +#ifdef CONFIG_MEMCG + if (ext && percpu_counter_read_positive(&ext->vm_committed_as) < memcg_allowed) + return 0; +#endif if (percpu_counter_read_positive(&vm_committed_as) < allowed) return 0; error: -- Gitee